summaryrefslogtreecommitdiffstats
path: root/collections-debian-merged/ansible_collections/community/general
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--collections-debian-merged/ansible_collections/community/general/.azure-pipelines/README.md3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/.azure-pipelines/azure-pipelines.yml329
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/general/.azure-pipelines/scripts/aggregate-coverage.sh20
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/general/.azure-pipelines/scripts/combine-coverage.py60
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/general/.azure-pipelines/scripts/process-results.sh24
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/general/.azure-pipelines/scripts/publish-codecov.sh27
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/general/.azure-pipelines/scripts/report-coverage.sh15
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/general/.azure-pipelines/scripts/run-tests.sh34
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/general/.azure-pipelines/scripts/time-command.py25
-rw-r--r--collections-debian-merged/ansible_collections/community/general/.azure-pipelines/templates/coverage.yml39
-rw-r--r--collections-debian-merged/ansible_collections/community/general/.azure-pipelines/templates/matrix.yml55
-rw-r--r--collections-debian-merged/ansible_collections/community/general/.azure-pipelines/templates/test.yml45
-rw-r--r--collections-debian-merged/ansible_collections/community/general/.github/BOTMETA.yml1126
-rw-r--r--collections-debian-merged/ansible_collections/community/general/.github/patchback.yml5
-rw-r--r--collections-debian-merged/ansible_collections/community/general/.github/settings.yml6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/.github/workflows/codeql-analysis.yml49
-rw-r--r--collections-debian-merged/ansible_collections/community/general/.gitignore387
-rw-r--r--collections-debian-merged/ansible_collections/community/general/CHANGELOG.rst1108
-rw-r--r--collections-debian-merged/ansible_collections/community/general/COPYING675
-rw-r--r--collections-debian-merged/ansible_collections/community/general/FILES.json28117
-rw-r--r--collections-debian-merged/ansible_collections/community/general/MANIFEST.json34
-rw-r--r--collections-debian-merged/ansible_collections/community/general/README.md98
-rw-r--r--collections-debian-merged/ansible_collections/community/general/changelogs/.gitignore1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/changelogs/changelog.yaml1952
-rw-r--r--collections-debian-merged/ansible_collections/community/general/changelogs/config.yaml29
-rw-r--r--collections-debian-merged/ansible_collections/community/general/changelogs/fragments/.keep0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/meta/runtime.yml755
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/action/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/action/iptables_state.py198
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/action/shutdown.py211
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/action/system/iptables_state.py198
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/action/system/shutdown.py211
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/become/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/become/doas.py126
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/become/dzdo.py95
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/become/ksu.py120
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/become/machinectl.py88
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/become/pbrun.py104
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/become/pfexec.py104
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/become/pmrun.py77
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/become/sesu.py91
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/cache/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/cache/memcached.py248
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/cache/pickle.py67
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/cache/redis.py233
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/cache/yaml.py64
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/actionable.py61
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/cgroup_memory_recap.py117
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/context_demo.py53
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/counter_enabled.py248
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/dense.py499
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/diy.py1420
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/full_skip.py76
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/hipchat.py228
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/jabber.py118
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/log_plays.py123
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/logdna.py208
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/logentries.py330
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/logstash.py248
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/mail.py227
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/nrdp.py188
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/null.py30
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/osx_say.py114
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/say.py114
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/selective.py276
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/slack.py251
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/splunk.py253
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/stderr.py71
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/sumologic.py201
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/syslog_json.py107
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/unixy.py246
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/callback/yaml.py130
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/connection/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/connection/chroot.py206
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/connection/docker.py366
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/connection/funcd.py102
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/connection/iocage.py82
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/connection/jail.py201
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/connection/lxc.py228
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/connection/lxd.py125
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/connection/oc.py173
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/connection/qubes.py159
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/connection/saltstack.py106
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/connection/zone.py200
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/_gcp.py62
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/_netapp.py138
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/alicloud.py108
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/auth_basic.py31
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/dimensiondata.py50
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/dimensiondata_wait.py36
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/docker.py136
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/emc.py45
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/hetzner.py23
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/hpe3par.py35
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/hwc.py65
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/ibm_storage.py37
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/influxdb.py82
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/ipa.py75
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/keycloak.py61
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/kubevirt_common_options.py133
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/kubevirt_vm_options.py103
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/ldap.py47
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/lxca_common.py43
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/manageiq.py52
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/nios.py85
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/nomad.py51
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oneview.py59
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/online.py44
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/opennebula.py44
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/openswitch.py84
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle.py82
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle_creatable_resource.py23
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle_display_name_option.py15
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle_name_option.py15
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle_tags.py21
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle_wait_options.py25
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/ovirt_facts.py59
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/postgres.py62
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/proxmox.py45
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/purestorage.py62
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/rackspace.py117
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/scaleway.py50
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/utm.py54
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/vexata.py52
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/xenserver.py40
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/filter/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/filter/dict_kv.py70
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/filter/jc.py94
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/filter/json_query.py56
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/filter/random_mac.py73
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/filter/time.py143
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/inventory/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/inventory/cobbler.py279
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/inventory/docker_machine.py272
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/inventory/docker_swarm.py255
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/inventory/gitlab_runners.py140
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/inventory/kubevirt.py256
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/inventory/linode.py211
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/inventory/nmap.py168
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/inventory/online.py260
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/inventory/proxmox.py350
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/inventory/scaleway.py281
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/inventory/stackpath_compute.py281
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/inventory/virtualbox.py283
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/cartesian.py77
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/chef_databag.py104
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/consul_kv.py191
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/credstash.py125
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/cyberarkpassword.py182
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/dig.py356
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/dnstxt.py96
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/dsv.py140
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/etcd.py180
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/etcd3.py229
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/filetree.py218
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/flattened.py84
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/gcp_storage_file.py156
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/hashi_vault.py650
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/hiera.py90
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/keyring.py67
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/lastpass.py99
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/lmdb_kv.py120
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/manifold.py278
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/nios.py121
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/nios_next_ip.py100
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/nios_next_network.py113
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/onepassword.py277
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/onepassword_raw.py92
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/passwordstore.py302
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/redis.py117
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/shelvefile.py90
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/lookup/tss.py139
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/_mount.py90
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/_netapp.py747
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/_ovirt.py871
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/alicloud_ecs.py286
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/cloud.py208
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/compat/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/compat/ipaddress.py2580
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/database.py189
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/dimensiondata.py330
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/docker/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/docker/common.py1022
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/docker/swarm.py280
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/gcdns.py39
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/gce.py39
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/gcp.py799
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/gitlab.py106
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/heroku.py41
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/hetzner.py171
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/hwc_utils.py441
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/ibm_sa_utils.py94
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/identity/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/identity/keycloak/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/identity/keycloak/keycloak.py482
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/influxdb.py92
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/ipa.py213
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/known_hosts.py180
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/kubevirt.py465
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/ldap.py78
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/linode.py21
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/lxd.py129
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/manageiq.py156
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/memset.py137
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/module_helper.py302
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/net_tools/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/net_tools/nios/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/net_tools/nios/api.py590
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/oneandone.py263
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/oneview.py485
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/online.py121
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/opennebula.py310
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/oracle/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/oracle/oci_utils.py1962
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/postgres.py314
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/proxmox.py86
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/pure.py112
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/rax.py315
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/redfish_utils.py2694
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/redhat.py270
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/dellemc/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/dellemc/dellemc_idrac.py56
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/dellemc/ome.py163
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/lxca/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/lxca/common.py78
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/saslprep.py178
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/scaleway.py195
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/source_control/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/source_control/bitbucket.py92
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/storage/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/storage/emc/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/storage/emc/emc_vnx.py20
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/storage/hpe3par/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/storage/hpe3par/hpe3par.py94
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/univention_umc.py278
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/utm_utils.py216
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/vexata.py97
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/module_utils/xenserver.py862
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/aerospike_migrations.py521
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/airbrake_deployment.py204
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/aix_devices.py369
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/aix_filesystem.py567
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/aix_inittab.py247
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/aix_lvg.py363
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/aix_lvol.py337
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ali_instance.py1013
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ali_instance_facts.py440
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ali_instance_info.py440
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/alternatives.py159
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/apache2_mod_proxy.py450
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/apache2_module.py266
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/apk.py357
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/apt_repo.py146
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/apt_rpm.py183
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/archive.py572
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/atomic_container.py208
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/atomic_host.py101
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/atomic_image.py169
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/awall.py153
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/beadm.py431
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/bearychat.py182
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/bigpanda.py222
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/bitbucket_access_key.py278
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_key_pair.py206
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_known_host.py303
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_variable.py277
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/bower.py228
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/bundler.py202
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/bzr.py190
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/campfire.py154
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/capabilities.py180
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/catapult.py154
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/circonus_annotation.py234
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cisco_spark.py191
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cisco_webex.py191
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_aa_policy.py349
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_alert_policy.py526
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_blueprint_package.py299
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_firewall_policy.py584
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_group.py512
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_loadbalancer.py935
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_modify_server.py965
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_publicip.py357
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_server.py1557
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_server_snapshot.py409
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/alicloud/ali_instance.py1013
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/alicloud/ali_instance_facts.py440
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/alicloud/ali_instance_info.py440
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/atomic/atomic_container.py208
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/atomic/atomic_host.py101
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/atomic/atomic_image.py169
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_aa_policy.py349
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_alert_policy.py526
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_blueprint_package.py299
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_firewall_policy.py584
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_group.py512
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_loadbalancer.py935
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_modify_server.py965
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_publicip.py357
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_server.py1557
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_server_snapshot.py409
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/dimensiondata/dimensiondata_network.py296
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/dimensiondata/dimensiondata_vlan.py568
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_compose.py1155
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_config.py299
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_container.py3563
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_container_info.py145
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_host_info.py343
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_image.py1021
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_image_facts.py270
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_image_info.py270
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_login.py485
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_network.py717
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_network_info.py141
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_node.py294
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_node_info.py156
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_prune.py265
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_secret.py302
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_service.py1155
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_stack.py308
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_stack_info.py84
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_stack_task_info.py95
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_swarm.py675
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_swarm_info.py384
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_swarm_service.py3004
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_swarm_service_info.py115
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_volume.py332
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_volume_info.py128
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gc_storage.py497
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcdns_record.py780
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcdns_zone.py372
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce.py753
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_eip.py247
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_img.py211
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_instance_template.py605
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_labels.py350
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_lb.py310
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_mig.py904
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_net.py511
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_pd.py293
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_snapshot.py225
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_tag.py218
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcp_backend_service.py420
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcp_forwarding_rule.py385
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcp_healthcheck.py457
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcp_target_proxy.py320
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcp_url_map.py535
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcpubsub.py349
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcpubsub_facts.py164
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcpubsub_info.py164
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcspanner.py304
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/heroku/heroku_collaborator.py128
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_ecs_instance.py2135
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_evs_disk.py1210
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_network_vpc.py493
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_smn_topic.py338
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_eip.py877
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_peering_connect.py691
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_port.py1160
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_private_ip.py354
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_route.py437
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_security_group.py645
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_security_group_rule.py570
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_subnet.py734
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_cdi_upload.py184
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_preset.py154
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_pvc.py457
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_rs.py211
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_template.py385
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_vm.py469
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/linode/linode.py690
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/linode/linode_v4.py309
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/lxc/lxc_container.py1760
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/lxd/lxd_container.py710
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/lxd/lxd_profile.py404
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_dns_reload.py183
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_memstore_facts.py172
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_memstore_info.py172
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_server_facts.py297
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_server_info.py297
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_zone.py311
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_zone_domain.py266
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_zone_record.py380
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/cloud_init_data_facts.py129
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/helm.py216
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/ovirt.py503
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox.py735
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_domain_info.py133
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_group_info.py143
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_kvm.py1449
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_template.py306
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_user_info.py256
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/rhevm.py1516
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/serverless.py232
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/terraform.py413
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/xenserver_facts.py202
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_firewall_policy.py573
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_load_balancer.py677
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py1026
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_private_network.py454
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_public_ip.py341
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_server.py705
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/online/online_server_facts.py175
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/online/online_server_info.py175
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/online/online_user_facts.py76
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/online/online_user_info.py76
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_host.py283
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_image.py426
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_image_facts.py293
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_image_info.py293
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_service.py768
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_vm.py1599
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oracle/oci_vcn.py221
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovh/ovh_ip_failover.py261
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py311
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovh/ovh_monthly_billing.py157
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_affinity_label_facts.py196
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_api_facts.py98
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_cluster_facts.py125
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_datacenter_facts.py108
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_disk_facts.py125
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_event_facts.py170
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_external_provider_facts.py165
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_group_facts.py123
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_host_facts.py149
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_host_storage_facts.py187
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_network_facts.py125
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_nic_facts.py143
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_permission_facts.py166
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_quota_facts.py143
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_scheduling_policy_facts.py140
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_snapshot_facts.py137
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_storage_domain_facts.py126
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_storage_template_facts.py142
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_storage_vm_facts.py142
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_tag_facts.py176
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_template_facts.py124
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_user_facts.py123
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_vm_facts.py166
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py123
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_device.py651
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_ip_subnet.py326
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_project.py244
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_sshkey.py261
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_volume.py321
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_volume_attachment.py299
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks.py654
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_datacenter.py257
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_nic.py288
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_volume.py425
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_volume_attachments.py258
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/pubnub/pubnub_blocks.py626
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax.py897
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cbs.py226
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cbs_attachments.py218
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cdb.py258
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cdb_database.py171
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cdb_user.py218
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_clb.py311
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_clb_nodes.py282
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_clb_ssl.py281
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_dns.py172
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_dns_record.py352
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_facts.py142
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_files.py392
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_files_objects.py608
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_identity.py102
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_keypair.py171
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_meta.py173
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_alarm.py227
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_check.py319
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_entity.py191
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_notification.py174
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_notification_plan.py180
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_network.py138
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_queue.py139
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_scaling_group.py438
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_scaling_policy.py286
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_compute.py671
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_database_backup.py371
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_image_facts.py125
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_image_info.py126
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_ip.py261
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_ip_facts.py108
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_ip_info.py108
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_lb.py356
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_organization_facts.py104
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_organization_info.py104
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group.py238
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group_facts.py112
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group_info.py112
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py263
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_server_facts.py195
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_server_info.py195
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_snapshot_facts.py113
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_snapshot_info.py113
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_sshkey.py172
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_user_data.py171
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_volume.py176
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_volume_facts.py108
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_volume_info.py108
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/smartos/imgadm.py311
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/smartos/nictagadm.py234
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/smartos/smartos_image_facts.py124
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/smartos/smartos_image_info.py124
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/smartos/vmadm.py796
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/softlayer/sl_vm.py428
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py1543
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/univention/udm_dns_record.py185
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/univention/udm_dns_zone.py231
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/univention/udm_group.py177
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/univention/udm_share.py576
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/univention/udm_user.py542
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_app.py190
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_db.py188
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_domain.py162
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_mailbox.py130
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_site.py198
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest.py1933
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest_facts.py226
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest_info.py226
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest_powerstate.py270
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud_init_data_facts.py129
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cloudflare_dns.py878
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/consul/consul.py603
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/consul/consul_acl.py657
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/consul/consul_kv.py326
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/consul/consul_session.py276
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/etcd3.py240
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/nomad/nomad_job.py255
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/nomad/nomad_job_info.py345
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/pacemaker_cluster.py222
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/znode.py251
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cobbler_sync.py140
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cobbler_system.py339
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/composer.py267
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/consul.py603
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/consul_acl.py657
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/consul_kv.py326
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/consul_session.py276
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cpanm.py214
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/cronvar.py423
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/crypttab.py354
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/aerospike/aerospike_migrations.py521
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_database.py141
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_query.py101
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_retention_policy.py197
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_user.py263
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_write.py96
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/elasticsearch_plugin.py291
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/kibana_plugin.py257
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/odbc.py168
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/redis.py324
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/redis_info.py236
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/riak.py221
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/mssql/mssql_db.py233
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_copy.py420
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_db.py667
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_ext.py443
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_idx.py589
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_info.py1030
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_lang.py363
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_membership.py228
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_owner.py453
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_pg_hba.py745
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_ping.py170
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_privs.py1171
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_publication.py682
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_query.py452
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_schema.py293
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_sequence.py627
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_set.py447
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_slot.py304
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_subscription.py717
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_table.py611
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_tablespace.py541
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_user.py993
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_user_obj_stat_info.py335
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_configuration.py196
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_facts.py291
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_info.py291
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_role.py237
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_schema.py308
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_user.py373
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/datadog_event.py167
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/datadog_monitor.py404
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/dconf.py380
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/deploy_helper.py524
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/dimensiondata_network.py296
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/dimensiondata_vlan.py568
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/django_manage.py347
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/dnsimple.py336
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/dnsmadeeasy.py717
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_compose.py1155
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_config.py299
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_container.py3563
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_container_info.py145
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_host_info.py343
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_image.py1021
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_image_facts.py270
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_image_info.py270
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_login.py485
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_network.py717
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_network_info.py141
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_node.py294
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_node_info.py156
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_prune.py265
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_secret.py302
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_service.py1155
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_stack.py308
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_stack_info.py84
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_stack_task_info.py95
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_swarm.py675
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_swarm_info.py384
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_swarm_service.py3004
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_swarm_service_info.py115
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_volume.py332
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_volume_info.py128
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/dpkg_divert.py370
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/easy_install.py198
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ejabberd_user.py218
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/elasticsearch_plugin.py291
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/emc_vnx_sg_member.py170
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/etcd3.py240
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/facter.py72
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/files/archive.py572
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/files/ini_file.py334
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/files/iso_create.py295
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/files/iso_extract.py218
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/files/read_csv.py241
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/files/xattr.py237
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/files/xml.py958
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/filesystem.py496
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/flatpak.py312
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/flatpak_remote.py234
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/flowdock.py197
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/foreman.py157
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gc_storage.py497
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gcdns_record.py780
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gcdns_zone.py372
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gce.py753
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_eip.py247
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_img.py211
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_instance_template.py605
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_labels.py350
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_lb.py310
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_mig.py904
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_net.py511
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_pd.py293
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_snapshot.py225
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_tag.py218
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gconftool2.py233
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gcp_backend_service.py420
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gcp_forwarding_rule.py385
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gcp_healthcheck.py457
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gcp_target_proxy.py320
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gcp_url_map.py535
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gcpubsub.py349
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gcpubsub_facts.py164
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gcpubsub_info.py164
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gcspanner.py304
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gem.py311
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/git_config.py273
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/github_deploy_key.py330
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/github_hooks.py193
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/github_issue.py111
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/github_key.py237
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/github_release.py213
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/github_webhook.py280
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/github_webhook_info.py169
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_deploy_key.py295
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_group.py324
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_group_members.py263
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_group_variable.py304
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_hook.py387
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_project.py374
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_project_variable.py299
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_runner.py348
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_user.py563
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gluster_heal_info.py199
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gluster_peer.py172
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gluster_volume.py604
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/grove.py116
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/gunicorn.py230
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/haproxy.py479
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/helm.py216
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/heroku_collaborator.py128
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/hetzner_failover_ip.py141
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/hetzner_failover_ip_info.py117
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/hetzner_firewall.py509
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/hetzner_firewall_info.py226
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/hg.py291
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/hipchat.py212
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/homebrew.py971
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/homebrew_cask.py875
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/homebrew_tap.py256
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/honeybadger_deployment.py128
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/hpilo_boot.py203
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/hpilo_facts.py258
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/hpilo_info.py258
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/hponcfg.py111
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/htpasswd.py274
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_ecs_instance.py2135
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_evs_disk.py1210
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_network_vpc.py493
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_smn_topic.py338
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_eip.py877
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_peering_connect.py691
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_port.py1160
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_private_ip.py354
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_route.py437
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_security_group.py645
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_security_group_rule.py570
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_subnet.py734
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_domain.py156
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_host.py118
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_host_ports.py127
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_pool.py115
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_vol.py107
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_vol_map.py136
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/icinga2_feature.py126
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/icinga2_host.py331
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_config.py137
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_dnsrecord.py319
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_dnszone.py162
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_group.py259
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_hbacrule.py355
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_host.py305
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_hostgroup.py208
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_role.py302
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_service.py208
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_subca.py211
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_sudocmd.py151
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_sudocmdgroup.py179
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_sudorule.py400
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_user.py392
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_vault.py249
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_client.py879
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_clienttemplate.py431
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_group.py364
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/onepassword_facts.py392
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/onepassword_info.py392
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/opendj/opendj_backendprop.py198
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_firmware.py207
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_redfish_command.py200
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_redfish_config.py327
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_redfish_facts.py236
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_redfish_info.py236
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_server_config_profile.py301
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/imc_rest.py427
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/imgadm.py311
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/infinity.py565
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/influxdb_database.py141
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/influxdb_query.py101
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/influxdb_retention_policy.py197
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/influxdb_user.py263
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/influxdb_write.py96
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ini_file.py334
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/installp.py292
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/interfaces_file.py399
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ip_netns.py144
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_config.py137
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_dnsrecord.py319
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_dnszone.py162
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_group.py259
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_hbacrule.py355
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_host.py305
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_hostgroup.py208
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_role.py302
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_service.py208
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_subca.py211
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_sudocmd.py151
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_sudocmdgroup.py179
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_sudorule.py400
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_user.py392
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_vault.py249
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ipify_facts.py105
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ipinfoio_facts.py131
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ipmi_boot.py194
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ipmi_power.py131
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/iptables_state.py649
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ipwcli_dns.py342
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/irc.py303
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/iso_create.py295
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/iso_extract.py218
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/jabber.py166
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/java_cert.py401
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/java_keystore.py315
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/jboss.py178
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/jenkins_job.py367
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/jenkins_job_facts.py258
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/jenkins_job_info.py258
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/jenkins_plugin.py780
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/jenkins_script.py197
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/jira.py531
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/katello.py615
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/kernel_blacklist.py153
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/keycloak_client.py879
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/keycloak_clienttemplate.py431
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/keycloak_group.py364
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/kibana_plugin.py257
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_cdi_upload.py184
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_preset.py154
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_pvc.py457
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_rs.py211
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_template.py385
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_vm.py469
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/launchd.py514
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/layman.py268
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/lbu.py127
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ldap_attr.py284
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ldap_attrs.py318
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ldap_entry.py226
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ldap_passwd.py143
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ldap_search.py180
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/librato_annotation.py166
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/linode.py690
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/linode_v4.py309
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/listen_ports_facts.py243
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/lldp.py79
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/locale_gen.py234
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/logentries.py156
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/logentries_msg.py99
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/logstash_plugin.py172
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/lvg.py328
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/lvol.py566
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/lxc_container.py1760
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/lxca_cmms.py170
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/lxca_nodes.py200
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/lxd_container.py710
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/lxd_profile.py404
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/macports.py307
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/mail.py386
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/make.py173
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_alert_profiles.py304
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_alerts.py349
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_group.py648
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_policies.py344
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_provider.py928
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_tags.py289
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_tenant.py557
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_user.py331
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/mas.py295
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/matrix.py139
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/mattermost.py151
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/maven_artifact.py712
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_dns_reload.py183
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_memstore_facts.py172
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_memstore_info.py172
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_server_facts.py297
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_server_info.py297
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_zone.py311
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_zone_domain.py266
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_zone_record.py380
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/mksysb.py202
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/modprobe.py127
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monit.py340
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/airbrake_deployment.py204
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/bigpanda.py222
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/circonus_annotation.py234
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/datadog/datadog_event.py167
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/datadog/datadog_monitor.py404
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/honeybadger_deployment.py128
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/icinga2_feature.py126
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/icinga2_host.py331
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/librato_annotation.py166
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/logentries.py156
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/logstash_plugin.py172
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/monit.py340
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/nagios.py1304
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/newrelic_deployment.py146
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/pagerduty.py279
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/pagerduty_alert.py255
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/pagerduty_change.py192
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/pagerduty_user.py263
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/pingdom.py141
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/rollbar_deployment.py143
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_check.py370
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_client.py258
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_handler.py270
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_silence.py297
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_subscription.py152
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/spectrum_device.py332
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/stackdriver.py215
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/statusio_maintenance.py465
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/uptimerobot.py149
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/mqtt.py248
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/mssql_db.py233
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_aggregate.py228
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_license.py296
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_lun.py373
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_qtree.py234
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_svm.py246
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_user.py301
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_user_role.py227
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_volume.py437
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/na_ontap_gather_facts.py610
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nagios.py1304
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/cloudflare_dns.py878
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/dnsimple.py336
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/dnsmadeeasy.py717
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/haproxy.py479
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/hetzner_failover_ip.py141
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/hetzner_failover_ip_info.py117
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/hetzner_firewall.py509
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/hetzner_firewall_info.py226
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/infinity/infinity.py565
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ip_netns.py144
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ipify_facts.py105
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ipinfoio_facts.py131
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ipwcli_dns.py342
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ldap/ldap_attr.py284
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ldap/ldap_attrs.py318
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ldap/ldap_entry.py226
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ldap/ldap_passwd.py143
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ldap/ldap_search.py180
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/lldp.py79
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/netcup_dns.py268
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_a_record.py170
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_aaaa_record.py154
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_cname_record.py143
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_dns_view.py139
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_fixed_address.py283
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_host_record.py336
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_member.py519
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_mx_record.py150
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_naptr_record.py183
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_network.py295
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_network_view.py128
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_nsgroup.py361
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_ptr_record.py153
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_srv_record.py162
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_txt_record.py128
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_zone.py226
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nmcli.py1115
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nsupdate.py469
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/omapi_host.py310
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/snmp_facts.py459
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/netcup_dns.py268
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/newrelic_deployment.py146
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nexmo.py135
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nginx_status_facts.py160
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nginx_status_info.py155
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nictagadm.py234
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_a_record.py170
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_aaaa_record.py154
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_cname_record.py143
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_dns_view.py139
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_fixed_address.py283
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_host_record.py336
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_member.py519
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_mx_record.py150
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_naptr_record.py183
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_network.py295
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_network_view.py128
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_nsgroup.py361
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_ptr_record.py153
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_srv_record.py162
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_txt_record.py128
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_zone.py226
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nmcli.py1115
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nomad_job.py255
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nomad_job_info.py345
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nosh.py537
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/bearychat.py182
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/campfire.py154
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/catapult.py154
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/cisco_spark.py191
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/cisco_webex.py191
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/flowdock.py197
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/grove.py116
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/hipchat.py212
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/irc.py303
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/jabber.py166
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/logentries_msg.py99
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/mail.py386
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/matrix.py139
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/mattermost.py151
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/mqtt.py248
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/nexmo.py135
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/office_365_connector_card.py299
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/pushbullet.py185
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/pushover.py153
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/rocketchat.py241
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/say.py91
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/sendgrid.py268
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/slack.py487
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/syslogger.py189
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/telegram.py114
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/twilio.py173
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/typetalk.py128
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/npm.py308
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/nsupdate.py469
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oci_vcn.py221
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/odbc.py168
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/office_365_connector_card.py299
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ohai.py47
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/omapi_host.py310
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ome_device_info.py413
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/one_host.py283
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/one_image.py426
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/one_image_facts.py293
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/one_image_info.py293
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/one_service.py768
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/one_vm.py1599
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_firewall_policy.py573
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_load_balancer.py677
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_monitoring_policy.py1026
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_private_network.py454
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_public_ip.py341
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_server.py705
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/onepassword_facts.py392
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/onepassword_info.py392
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_datacenter_facts.py153
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_datacenter_info.py153
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_enclosure_facts.py225
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_enclosure_info.py225
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_ethernet_network.py247
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_ethernet_network_facts.py165
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_ethernet_network_info.py165
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fc_network.py121
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fc_network_facts.py110
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fc_network_info.py110
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fcoe_network.py117
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fcoe_network_facts.py110
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fcoe_network_info.py110
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group.py164
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group_facts.py122
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group_info.py122
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_network_set.py150
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_network_set_facts.py166
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_network_set_info.py166
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_san_manager.py215
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_san_manager_facts.py121
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_san_manager_info.py121
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/online_server_facts.py175
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/online_server_info.py175
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/online_user_facts.py76
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/online_user_info.py76
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/open_iscsi.py375
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/openbsd_pkg.py653
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/opendj_backendprop.py198
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/openwrt_init.py198
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/opkg.py197
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/osx_defaults.py395
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovh_ip_failover.py261
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovh_ip_loadbalancing_backend.py311
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovh_monthly_billing.py157
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt.py503
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_affinity_label_facts.py196
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_api_facts.py98
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_cluster_facts.py125
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_datacenter_facts.py108
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_disk_facts.py125
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_event_facts.py170
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_external_provider_facts.py165
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_group_facts.py123
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_host_facts.py149
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_host_storage_facts.py187
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_network_facts.py125
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_nic_facts.py143
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_permission_facts.py166
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_quota_facts.py143
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_scheduling_policy_facts.py140
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_snapshot_facts.py137
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_storage_domain_facts.py126
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_storage_template_facts.py142
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_storage_vm_facts.py142
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_tag_facts.py176
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_template_facts.py124
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_user_facts.py123
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_vm_facts.py166
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_vmpool_facts.py123
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/pacemaker_cluster.py222
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/bower.py228
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/bundler.py202
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/composer.py267
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/cpanm.py214
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/easy_install.py198
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/gem.py311
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/maven_artifact.py712
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/npm.py308
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/pear.py319
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/pip_package_info.py147
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/yarn.py394
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/apk.py357
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/apt_repo.py146
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/apt_rpm.py183
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/flatpak.py312
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/flatpak_remote.py234
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/homebrew.py971
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/homebrew_cask.py875
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/homebrew_tap.py256
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/installp.py292
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/layman.py268
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/macports.py307
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/mas.py295
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/openbsd_pkg.py653
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/opkg.py197
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pacman.py481
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pkg5.py178
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pkg5_publisher.py202
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pkgin.py388
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pkgng.py485
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pkgutil.py293
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/portage.py539
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/portinstall.py210
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pulp_repo.py754
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/redhat_subscription.py930
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/rhn_channel.py192
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/rhn_register.py431
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/rhsm_release.py124
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/rhsm_repository.py245
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/slackpkg.py205
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/snap.py256
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/sorcery.py644
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/svr4pkg.py263
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/swdepot.py206
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/swupd.py313
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/urpmi.py219
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/xbps.py336
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/zypper.py561
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/zypper_repository.py402
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_device.py651
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_ip_subnet.py326
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_project.py244
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_sshkey.py261
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_volume.py321
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_volume_attachment.py299
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/pacman.py481
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/pagerduty.py279
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/pagerduty_alert.py255
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/pagerduty_change.py192
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/pagerduty_user.py263
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/pam_limits.py317
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/pamd.py866
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/parted.py797
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/pear.py319
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/pids.py84
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/pingdom.py141
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/pip_package_info.py147
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/pkg5.py178
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/pkg5_publisher.py202
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/pkgin.py388
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/pkgng.py485
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/pkgutil.py293
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/portage.py539
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/portinstall.py210
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_copy.py420
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_db.py667
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_ext.py443
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_idx.py589
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_info.py1030
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_lang.py363
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_membership.py228
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_owner.py453
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_pg_hba.py745
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_ping.py170
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_privs.py1171
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_publication.py682
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_query.py452
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_schema.py293
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_sequence.py627
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_set.py447
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_slot.py304
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_subscription.py717
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_table.py611
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_tablespace.py541
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_user.py993
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_user_obj_stat_info.py335
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/profitbricks.py654
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/profitbricks_datacenter.py257
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/profitbricks_nic.py288
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/profitbricks_volume.py425
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/profitbricks_volume_attachments.py258
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox.py735
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox_domain_info.py133
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox_group_info.py143
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox_kvm.py1449
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox_template.py306
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox_user_info.py256
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/pubnub_blocks.py626
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/pulp_repo.py754
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/puppet.py330
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/purefa_facts.py858
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/purefb_facts.py652
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/pushbullet.py185
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/pushover.py153
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/python_requirements_facts.py172
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/python_requirements_info.py172
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax.py897
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_cbs.py226
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_cbs_attachments.py218
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_cdb.py258
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_cdb_database.py171
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_cdb_user.py218
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_clb.py311
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_clb_nodes.py282
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_clb_ssl.py281
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_dns.py172
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_dns_record.py352
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_facts.py142
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_files.py392
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_files_objects.py608
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_identity.py102
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_keypair.py171
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_meta.py173
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_mon_alarm.py227
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_mon_check.py319
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_mon_entity.py191
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_mon_notification.py174
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_mon_notification_plan.py180
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_network.py138
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_queue.py139
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_scaling_group.py438
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_scaling_policy.py286
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/read_csv.py241
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/redfish_command.py756
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/redfish_config.py335
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/redfish_facts.py466
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/redfish_info.py466
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/redhat_subscription.py930
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/redis.py324
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/redis_info.py236
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/cobbler/cobbler_sync.py140
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/cobbler/cobbler_system.py339
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/dellemc/idrac_firmware.py207
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/dellemc/idrac_server_config_profile.py301
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/dellemc/ome_device_info.py413
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/foreman/foreman.py157
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/foreman/katello.py615
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/hpilo/hpilo_boot.py203
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/hpilo/hpilo_facts.py258
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/hpilo/hpilo_info.py258
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/hpilo/hponcfg.py111
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/imc/imc_rest.py427
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/ipmi/ipmi_boot.py194
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/ipmi/ipmi_power.py131
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/lxca/lxca_cmms.py170
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/lxca/lxca_nodes.py200
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_alert_profiles.py304
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_alerts.py349
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_group.py648
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_policies.py344
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_provider.py928
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_tags.py289
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_tenant.py557
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_user.py331
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_datacenter_facts.py153
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_datacenter_info.py153
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_enclosure_facts.py225
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_enclosure_info.py225
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_ethernet_network.py247
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_ethernet_network_facts.py165
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py165
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fc_network.py121
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fc_network_facts.py110
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fc_network_info.py110
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fcoe_network.py117
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fcoe_network_facts.py110
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py110
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py164
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_facts.py122
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py122
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_network_set.py150
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_network_set_facts.py166
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_network_set_info.py166
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_san_manager.py215
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_san_manager_facts.py121
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_san_manager_info.py121
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/idrac_redfish_command.py200
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/idrac_redfish_config.py327
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/idrac_redfish_facts.py236
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/idrac_redfish_info.py236
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/redfish_command.py756
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/redfish_config.py335
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/redfish_facts.py466
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/redfish_info.py466
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/stacki/stacki_host.py277
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/wakeonlan.py131
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rhevm.py1516
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rhn_channel.py192
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rhn_register.py431
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rhsm_release.py124
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rhsm_repository.py245
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/riak.py221
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rocketchat.py241
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rollbar_deployment.py143
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rundeck_acl_policy.py255
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/rundeck_project.py207
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/runit.py278
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/say.py91
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_compute.py671
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_database_backup.py371
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_image_facts.py125
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_image_info.py126
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_ip.py261
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_ip_facts.py108
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_ip_info.py108
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_lb.py356
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_organization_facts.py104
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_organization_info.py104
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_security_group.py238
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_security_group_facts.py112
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_security_group_info.py112
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_security_group_rule.py263
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_server_facts.py195
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_server_info.py195
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_snapshot_facts.py113
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_snapshot_info.py113
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_sshkey.py172
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_user_data.py171
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_volume.py176
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_volume_facts.py108
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_volume_info.py108
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/sefcontext.py292
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/selinux_permissive.py127
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/selogin.py258
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/sendgrid.py268
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/sensu_check.py370
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/sensu_client.py258
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/sensu_handler.py270
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/sensu_silence.py297
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/sensu_subscription.py152
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/seport.py306
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/serverless.py232
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/sf_account_manager.py263
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/sf_check_connections.py179
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/sf_snapshot_schedule_manager.py384
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/sf_volume_access_group_manager.py244
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/sf_volume_manager.py315
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/shutdown.py68
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/sl_vm.py428
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/slack.py487
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/slackpkg.py205
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/smartos_image_facts.py124
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/smartos_image_info.py124
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/snap.py256
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/snmp_facts.py459
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/solaris_zone.py485
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/sorcery.py644
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/bitbucket/bitbucket_access_key.py278
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/bitbucket/bitbucket_pipeline_key_pair.py206
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/bitbucket/bitbucket_pipeline_known_host.py303
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/bitbucket/bitbucket_pipeline_variable.py277
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/bzr.py190
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/git_config.py273
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_deploy_key.py330
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_hooks.py193
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_issue.py111
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_key.py237
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_release.py213
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_webhook.py280
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_webhook_info.py169
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_deploy_key.py295
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_group.py324
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_group_members.py263
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_group_variable.py304
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_hook.py387
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_project.py374
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_project_variable.py299
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_runner.py348
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_user.py563
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/hg.py291
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/spectrum_device.py332
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/spotinst_aws_elastigroup.py1543
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ss_3par_cpg.py295
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/stackdriver.py215
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/stacki_host.py277
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/statusio_maintenance.py465
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/emc/emc_vnx_sg_member.py170
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/glusterfs/gluster_heal_info.py199
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/glusterfs/gluster_peer.py172
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/glusterfs/gluster_volume.py604
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/hpe3par/ss_3par_cpg.py295
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_domain.py156
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_host.py118
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_host_ports.py127
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_pool.py115
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_vol.py107
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_vol_map.py136
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_aggregate.py228
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_license.py296
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_lun.py373
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_qtree.py234
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_svm.py246
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_user.py301
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_user_role.py227
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_volume.py437
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_ontap_gather_facts.py610
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/sf_account_manager.py263
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/sf_check_connections.py179
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/sf_snapshot_schedule_manager.py384
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/sf_volume_access_group_manager.py244
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/sf_volume_manager.py315
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/purestorage/purefa_facts.py858
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/purestorage/purefb_facts.py652
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/vexata/vexata_eg.py209
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/vexata/vexata_volume.py196
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/zfs/zfs.py262
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/zfs/zfs_delegate_admin.py263
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/zfs/zfs_facts.py259
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/zfs/zpool_facts.py210
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/supervisorctl.py257
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/svc.py297
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/svr4pkg.py263
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/swdepot.py206
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/swupd.py313
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/syslogger.py189
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/syspatch.py175
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/aix_devices.py369
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/aix_filesystem.py567
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/aix_inittab.py247
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/aix_lvg.py363
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/aix_lvol.py337
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/alternatives.py159
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/awall.py153
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/beadm.py431
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/capabilities.py180
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/cronvar.py423
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/crypttab.py354
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/dconf.py380
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/dpkg_divert.py370
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/facter.py72
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/filesystem.py496
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/gconftool2.py233
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/interfaces_file.py399
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/iptables_state.py649
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/java_cert.py401
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/java_keystore.py315
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/kernel_blacklist.py153
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/launchd.py514
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/lbu.py127
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/listen_ports_facts.py243
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/locale_gen.py234
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/lvg.py328
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/lvol.py566
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/make.py173
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/mksysb.py202
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/modprobe.py127
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/nosh.py537
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/ohai.py47
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/open_iscsi.py375
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/openwrt_init.py198
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/osx_defaults.py395
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/pam_limits.py317
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/pamd.py866
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/parted.py797
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/pids.py84
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/puppet.py330
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/python_requirements_facts.py172
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/python_requirements_info.py172
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/runit.py278
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/sefcontext.py292
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/selinux_permissive.py127
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/selogin.py258
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/seport.py306
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/shutdown.py68
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/solaris_zone.py485
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/svc.py297
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/syspatch.py175
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/sysupgrade.py152
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/timezone.py905
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/ufw.py594
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/vdo.py866
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/xfconf.py279
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/system/xfs_quota.py426
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/sysupgrade.py152
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/taiga_issue.py313
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/telegram.py114
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/terraform.py413
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/timezone.py905
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/twilio.py173
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/typetalk.py128
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/udm_dns_record.py185
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/udm_dns_zone.py231
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/udm_group.py177
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/udm_share.py576
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/udm_user.py542
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/ufw.py594
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/uptimerobot.py149
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/urpmi.py219
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_aaa_group.py225
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_aaa_group_info.py122
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_ca_host_key_cert.py160
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_ca_host_key_cert_info.py101
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_dns_host.py157
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_network_interface_address.py134
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_network_interface_address_info.py96
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_auth_profile.py362
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_exception.py241
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_frontend.py278
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_frontend_info.py141
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_location.py214
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_location_info.py122
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/vdo.py866
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_configuration.py196
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_facts.py291
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_info.py291
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_role.py237
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_schema.py308
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_user.py373
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/vexata_eg.py209
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/vexata_volume.py196
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/vmadm.py796
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/wakeonlan.py131
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/apache2_mod_proxy.py450
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/apache2_module.py266
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/deploy_helper.py524
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/django_manage.py347
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/ejabberd_user.py218
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/gunicorn.py230
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/htpasswd.py274
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jboss.py178
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jenkins_job.py367
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jenkins_job_facts.py258
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jenkins_job_info.py258
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jenkins_plugin.py780
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jenkins_script.py197
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jira.py531
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/nginx_status_facts.py160
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/nginx_status_info.py155
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/rundeck_acl_policy.py255
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/rundeck_project.py207
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group.py225
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group_info.py122
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert.py160
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py101
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_dns_host.py157
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address.py134
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py96
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_auth_profile.py362
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_exception.py241
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend.py278
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend_info.py141
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location.py214
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location_info.py122
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/supervisorctl.py257
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/taiga_issue.py313
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/webfaction_app.py190
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/webfaction_db.py188
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/webfaction_domain.py162
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/webfaction_mailbox.py130
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/webfaction_site.py198
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/xattr.py237
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/xbps.py336
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/xenserver_facts.py202
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/xenserver_guest.py1933
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/xenserver_guest_facts.py226
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/xenserver_guest_info.py226
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/xenserver_guest_powerstate.py270
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/xfconf.py279
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/xfs_quota.py426
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/xml.py958
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/yarn.py394
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/zfs.py262
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/zfs_delegate_admin.py263
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/zfs_facts.py259
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/znode.py251
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/zpool_facts.py210
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/zypper.py561
-rw-r--r--collections-debian-merged/ansible_collections/community/general/plugins/modules/zypper_repository.py402
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/abiquo.ini48
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/abiquo.py224
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/apache-libcloud.py336
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/apstra_aos.ini20
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/apstra_aos.py580
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/azure_rm.ini23
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/azure_rm.py962
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/brook.ini39
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/brook.py248
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/cloudforms.ini40
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/cloudforms.py470
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/cobbler.ini24
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/cobbler.py305
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/collins.ini57
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/collins.py429
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/consul_io.ini54
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/consul_io.py527
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/docker.py892
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/docker.yml74
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/fleet.py99
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/foreman.ini200
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/foreman.py651
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/freeipa.py126
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/gce.ini76
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/gce.py524
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/infoblox.py117
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/infoblox.yaml24
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/jail.py27
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/landscape.py117
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/libcloud.ini15
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/linode.ini18
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/linode.py338
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/lxc_inventory.py60
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/lxd.ini13
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/lxd.py93
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/mdt.ini17
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/mdt_dynamic_inventory.py122
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/nagios_livestatus.ini41
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/nagios_livestatus.py163
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/nagios_ndo.ini10
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/nagios_ndo.py95
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/nsot.py346
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/nsot.yaml22
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/openshift.py89
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/openvz.py74
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/ovirt.ini35
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/ovirt.py279
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/ovirt4.py247
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/packet_net.ini53
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/packet_net.py496
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/proxmox.py240
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/rackhd.py86
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/rax.ini66
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/rax.py460
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/rhv.py247
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/rudder.ini35
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/rudder.py286
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/scaleway.ini37
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/scaleway.py220
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/serf.py101
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/softlayer.py196
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/spacewalk.ini16
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/spacewalk.py226
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/ssh_config.py121
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/stacki.py180
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/stacki.yml7
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/vagrant.py123
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/vbox.py107
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/inventory/zone.py33
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/vault/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/vault/azure_vault.ini10
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/vault/azure_vault.py595
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/vault/vault-keyring-client.py134
-rw-r--r--collections-debian-merged/ansible_collections/community/general/scripts/vault/vault-keyring.py87
-rw-r--r--collections-debian-merged/ansible_collections/community/general/shippable.yml44
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/.gitignore1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/aix_devices/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/aix_devices/tasks/main.yml76
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/aix_filesystem/aliases1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/aix_filesystem/tasks/main.yml125
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/aliases7
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/main.yml71
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/path_is_checked.yml12
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/remove_links.yml7
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/setup.yml14
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/setup_test.yml11
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/test.yml53
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/tests.yml15
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/tests_set_priority.yml23
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/templates/dummy_alternative12
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/templates/dummy_command2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/vars/Debian.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/vars/Suse-42.3.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/vars/default.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/apache2_module/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/apache2_module/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/apache2_module/tasks/actualtest.yml231
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/apache2_module/tasks/main.yml26
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/archive/aliases4
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/archive/files/bar.txt1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/archive/files/empty.txt0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/archive/files/foo.txt1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/archive/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/archive/tasks/main.yml368
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/callback/inventory.yml5
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/callback/tasks/main.yml96
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/callback_diy/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/callback_diy/tasks/main.yml457
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/callback_log_plays/aliases1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/callback_log_plays/ping_log.yml4
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/general/tests/integration/targets/callback_log_plays/runme.sh18
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/callback_yaml/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/callback_yaml/tasks/main.yml60
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/cloud_init_data_facts/aliases6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/cloud_init_data_facts/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/cloud_init_data_facts/tasks/main.yml55
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection/aliases1
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection/test.sh10
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection/test_connection.yml43
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_chroot/aliases3
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_chroot/runme.sh18
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_chroot/test_connection.inventory7
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_docker/aliases2
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_docker/runme.sh18
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_docker/test_connection.inventory6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_jail/aliases1
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_jail/runme.sh18
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_jail/test_connection.inventory7
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_lxc/aliases1
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_lxc/runme.sh18
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_lxc/test_connection.inventory17
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_lxd/aliases2
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_lxd/runme.sh18
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_lxd/test_connection.inventory6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_posix/aliases2
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_posix/test.sh18
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/consul/aliases4
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/consul/meta/main.yml4
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/consul/tasks/consul_session.yml162
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/consul/tasks/main.yml87
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/consul/templates/consul_config.hcl.j213
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/cronvar/aliases5
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/cronvar/defaults/main.yml1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/cronvar/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/cronvar/tasks/main.yml114
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/deploy_helper/aliases1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/deploy_helper/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/deploy_helper/tasks/main.yml154
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_config/aliases9
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_config/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_config/tasks/main.yml11
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_config/tasks/test_docker_config.yml139
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/aliases6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/files/env-file2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/filter_plugins/ipaddr_tools.py34
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/tasks/main.yml43
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/tasks/run-test.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/tasks/tests/comparisons.yml463
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/tasks/tests/compatibility.yml118
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/tasks/tests/image-ids.yml141
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/tasks/tests/mounts-volumes.yml445
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/tasks/tests/network.yml747
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/tasks/tests/options.yml3816
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/tasks/tests/ports.yml286
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/tasks/tests/regression-45700-dont-parse-on-absent.yml34
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/tasks/tests/start-stop.yml455
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container_info/aliases6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container_info/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container_info/tasks/main.yml80
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_host_info/aliases6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_host_info/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_host_info/tasks/main.yml10
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_host_info/tasks/test_host_info.yml296
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/aliases6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/tasks/main.yml8
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/tasks/run-test.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/tasks/test.yml49
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/tasks/tests/basic.yml78
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/tasks/tests/docker_image.yml228
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/tasks/tests/old-options.yml51
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/tasks/tests/options.yml337
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/templates/Dockerfile3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/templates/EtcHostsDockerfile3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/templates/MyDockerfile5
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/templates/StagedDockerfile7
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image_info/aliases6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image_info/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image_info/tasks/main.yml59
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_login/aliases6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_login/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_login/tasks/main.yml9
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_login/tasks/run-test.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_login/tasks/test.yml9
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_login/tasks/tests/docker_login.yml139
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network/aliases6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network/tasks/main.yml38
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network/tasks/run-test.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network/tasks/tests/basic.yml134
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network/tasks/tests/ipam.yml398
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network/tasks/tests/options.yml240
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network/tasks/tests/overlay.yml61
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network/tasks/tests/substring.yml37
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network_info/aliases6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network_info/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network_info/tasks/main.yml76
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_node/aliases12
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_node/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_node/tasks/main.yml37
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_node/tasks/test_node.yml840
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_node_info/aliases9
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_node_info/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_node_info/tasks/main.yml11
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_node_info/tasks/test_node_info.yml88
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_prune/aliases6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_prune/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_prune/tasks/main.yml68
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_secret/aliases9
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_secret/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_secret/tasks/main.yml10
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_secret/tasks/test_secrets.yml124
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack/aliases9
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack/tasks/main.yml10
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack/tasks/test_stack.yml113
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack/templates/stack_compose_base.yml5
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack/templates/stack_compose_overrides.yml5
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack/vars/main.yml15
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_info/aliases9
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_info/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_info/tasks/main.yml10
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_info/tasks/test_stack_info.yml75
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_info/templates/stack_compose_base.yml5
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_info/templates/stack_compose_overrides.yml5
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_info/vars/main.yml15
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_task_info/aliases9
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_task_info/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_task_info/tasks/main.yml10
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_task_info/tasks/test_stack_task_info.yml84
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_task_info/templates/stack_compose_base.yml5
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_task_info/templates/stack_compose_overrides.yml5
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_task_info/vars/main.yml15
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm/aliases13
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm/meta/main.yml4
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm/tasks/cleanup.yml35
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm/tasks/main.yml23
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm/tasks/run-test.yml0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm/tasks/tests/basic.yml158
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm/tasks/tests/options-ca.yml114
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm/tasks/tests/options.yml1158
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm/tasks/tests/remote-addr-pool.yml90
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_info/aliases9
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_info/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_info/tasks/main.yml11
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_info/tasks/test_swarm_info.yml190
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/aliases9
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/files/env-file-12
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/files/env-file-22
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/main.yml80
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/run-test.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/configs.yml413
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/logging.yml158
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/misc.yml113
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/mounts.yml601
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/networks.yml450
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/options.yml1878
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/placement.yml214
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/resources.yml230
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/restart_config.yml233
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/rollback_config.yml339
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/secrets.yml411
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/update_config.yml408
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/vars/main.yml54
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service_info/aliases9
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service_info/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service_info/tasks/main.yml11
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service_info/tasks/test_docker_swarm_service_info.yml83
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_volume/aliases6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_volume/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_volume/tasks/main.yml30
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_volume/tasks/run-test.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_volume/tasks/tests/basic.yml177
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_volume_info/aliases6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_volume_info/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_volume_info/tasks/main.yml74
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/dpkg_divert/aliases6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/dpkg_divert/tasks/main.yml9
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/dpkg_divert/tasks/prepare.yml39
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/dpkg_divert/tasks/tests/01-basic.yml287
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/dpkg_divert/tasks/tests/02-rename.yml380
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/etcd3/aliases8
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/etcd3/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/etcd3/tasks/main.yml31
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/etcd3/tasks/run_tests.yml94
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filesystem/aliases5
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filesystem/defaults/main.yml23
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filesystem/meta/main.yml4
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filesystem/tasks/create_device.yml33
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filesystem/tasks/create_fs.yml87
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filesystem/tasks/main.yml55
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filesystem/tasks/overwrite_another_fs.yml40
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filesystem/tasks/remove_fs.yml98
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filesystem/tasks/setup.yml97
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filesystem/vars/Ubuntu-14.04.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filesystem/vars/default.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_dict_kv/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_dict_kv/tasks/main.yml10
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_jc/aliases3
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_jc/runme.sh12
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_jc/runme.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_jc/tasks/main.yml10
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_json_query/aliases3
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_json_query/runme.sh12
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_json_query/runme.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_json_query/tasks/main.yml9
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_json_query/vars/main.yml11
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_random_mac/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_random_mac/tasks/main.yml64
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_time/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_time/tasks/main.yml104
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak/aliases9
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak/tasks/check_mode.yml193
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak/tasks/main.yml61
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak/tasks/setup.yml35
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak/tasks/test.yml115
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak_remote/aliases8
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak_remote/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak_remote/tasks/check_mode.yml101
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak_remote/tasks/main.yml62
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak_remote/tasks/setup.yml22
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak_remote/tasks/test.yml72
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gem/aliases5
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gem/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gem/tasks/main.yml180
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gem/vars/FreeBSD.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gem/vars/RedHat.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gem/vars/default.yml1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/files/gitconfig2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/tasks/exclusion_state_list-all.yml16
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/tasks/get_set_no_state.yml25
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/tasks/get_set_state_present.yml27
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/tasks/main.yml28
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/tasks/precedence_between_unset_and_value.yml25
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/tasks/setup.yml11
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/tasks/setup_no_value.yml8
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/tasks/setup_value.yml8
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/tasks/unset_check_mode.yml25
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/tasks/unset_no_value.yml23
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/tasks/unset_value.yml24
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/vars/main.yml6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/github_issue/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/github_issue/tasks/main.yml37
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/github_issue/vars/main.yml6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_deploy_key/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_deploy_key/defaults/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_deploy_key/tasks/main.yml41
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_group/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_group/defaults/main.yml1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_group/tasks/main.yml74
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_group_members/aliases1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_group_members/tasks/main.yml30
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_group_members/vars/main.yml5
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_group_variable/aliases1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_group_variable/tasks/main.yml585
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_hook/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_hook/defaults/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_hook/tasks/main.yml72
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_project/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_project/defaults/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_project/tasks/main.yml45
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_project_variable/aliases1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_project_variable/tasks/main.yml584
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_runner/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_runner/defaults/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_runner/tasks/main.yml73
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_user/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_user/defaults/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_user/tasks/main.yml250
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hg/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hg/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hg/tasks/install.yml83
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hg/tasks/main.yml44
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hg/tasks/run-tests.yml84
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hg/tasks/uninstall.yml48
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/homebrew/aliases6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/homebrew/tasks/main.yml85
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_ecs_instance/aliases1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_ecs_instance/tasks/main.yml315
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_evs_disk/aliases1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_evs_disk/tasks/main.yml109
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_network_vpc/aliases1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_network_vpc/tasks/main.yml101
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_smn_topic/aliases1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_smn_topic/tasks/main.yml81
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_eip/aliases1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_eip/tasks/main.yml186
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_peering_connect/aliases1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_peering_connect/tasks/main.yml151
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_port/aliases1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_port/tasks/main.yml137
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_private_ip/aliases1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_private_ip/tasks/main.yml138
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_route/aliases1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_route/tasks/main.yml155
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_security_group/aliases1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_security_group/tasks/main.yml87
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_security_group_rule/aliases1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_security_group_rule/tasks/main.yml162
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_subnet/aliases1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_subnet/tasks/main.yml148
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/influxdb_user/aliases7
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/influxdb_user/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/influxdb_user/tasks/main.yml9
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/influxdb_user/tasks/tests.yml140
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ini_file/aliases1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ini_file/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ini_file/tasks/main.yml413
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/aliases11
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/docker-machine20
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/inventory_1.docker_machine.yml1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/inventory_2.docker_machine.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/inventory_3.docker_machine.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/playbooks/pre-setup.yml18
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/playbooks/setup.yml11
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/playbooks/teardown.yml6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/playbooks/test_inventory_1.yml50
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/runme.sh68
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/teardown.docker_machine.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_swarm/aliases12
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_swarm/inventory_1.docker_swarm.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_swarm/inventory_2.docker_swarm.yml4
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_swarm/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_swarm/playbooks/swarm_cleanup.yml18
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_swarm/playbooks/swarm_setup.yml15
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_swarm/playbooks/test_inventory_1.yml58
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_swarm/playbooks/test_inventory_2.yml35
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_swarm/runme.sh22
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_kubevirt/aliases1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_kubevirt/constraints.txt1
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_kubevirt/inventory_diff.py70
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_kubevirt/runme.sh80
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_kubevirt/server.py164
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_kubevirt/test.out61
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ipify_facts/aliases1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ipify_facts/tasks/main.yml46
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ipify_facts/vars/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iptables_state/aliases7
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iptables_state/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iptables_state/tasks/main.yml34
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iptables_state/tasks/tests/00-basic.yml316
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iptables_state/tasks/tests/01-tables.yml299
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iptables_state/tasks/tests/10-rollback.yml199
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ipwcli_dns/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ipwcli_dns/tasks/main.yml111
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_create/aliases4
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_create/files/test1.cfg56
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_create/files/test_dir/test2.cfg56
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_create/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_create/tasks/main.yml154
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_create/tasks/prepare_dest_dir.yml12
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/files/test.isobin0 -> 374784 bytes
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/meta/main.yml4
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/tasks/7zip.yml66
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/tasks/main.yml48
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/tasks/prepare.yml33
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/tasks/tests.yml52
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/vars/FreeBSD.yml1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/vars/RedHat.yml1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/vars/Suse.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/vars/Ubuntu.yml1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/vars/default.yml0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/java_cert/aliases7
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/java_cert/defaults/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/java_cert/files/testpkcs.p12bin0 -> 2532 bytes
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/java_cert/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/java_cert/tasks/main.yml60
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/java_keystore/aliases7
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/java_keystore/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/java_keystore/tasks/main.yml137
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/jboss/aliases8
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/jboss/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/jboss/tasks/jboss.yml236
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/jboss/tasks/main.yml6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/files/ansible_test_service.py21
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/meta/main.yml4
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/tasks/main.yml27
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/tasks/setup.yml20
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/tasks/teardown.yml27
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/tasks/test.yml8
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_reload.yml68
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_restart.yml43
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_runatload.yml32
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_start_stop.yml112
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_unknown.yml11
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_unload.yml62
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/templates/launchd.test.service.plist.j213
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/templates/modified.launchd.test.service.plist.j213
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/vars/main.yml4
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ldap_search/aliases7
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ldap_search/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ldap_search/tasks/main.yml11
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ldap_search/tasks/run-test.yml0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ldap_search/tasks/tests/basic.yml20
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/listen_ports_facts/aliases6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/listen_ports_facts/tasks/main.yml90
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/locale_gen/aliases4
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/locale_gen/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/locale_gen/tasks/locale_gen.yml94
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/locale_gen/tasks/main.yml24
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_cartesian/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_cartesian/tasks/main.yml27
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/aliases10
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/defaults/main.yml4
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/dependencies.yml6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/meta/main.yml2
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/runme.sh9
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/tasks/main.yml27
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/tasks/tests.yml26
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/test_lookup_etcd3.yml6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_flattened/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_flattened/tasks/main.yml19
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/aliases7
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/defaults/main.yml5
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/files/jwt_private.pem27
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/files/jwt_public.pem9
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/files/token.jwt1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/files/token_invalid.jwt1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/approle_secret_id_less_setup.yml19
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/approle_secret_id_less_test.yml44
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/approle_setup.yml21
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/approle_test.yml45
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/jwt_setup.yml18
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/jwt_test.yml46
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/main.yml188
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/tests.yml76
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/token_setup.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/token_test.yml88
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/templates/vault_config.hcl.j210
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/playbooks/install_dependencies.yml19
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/playbooks/test_lookup_hashi_vault.yml9
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/runme.sh23
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_lmdb_kv/aliases4
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_lmdb_kv/dependencies.yml12
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_lmdb_kv/runme.sh9
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_lmdb_kv/test.yml26
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_lmdb_kv/test_db.py11
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/aliases5
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/tasks/main.yml12
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/tasks/package.yml58
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/tasks/tests.yml62
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/templates/input9
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/templates/security-privacy.repo.j27
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/Debian.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/Fedora.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/FreeBSD.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/default.yml0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/main.yml117
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lvg/aliases7
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lvg/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lvg/tasks/main.yml22
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lvg/tasks/setup.yml13
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lvg/tasks/teardown.yml17
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lvg/tasks/test_grow_reduce.yml33
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lvg/tasks/test_indempotency.yml15
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lvg/tasks/test_pvresize.yml76
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/mail/aliases1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/mail/files/smtpserver.crt22
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/mail/files/smtpserver.key28
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/mail/files/smtpserver.py68
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/mail/tasks/main.yml90
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/mas/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/mas/tasks/main.yml156
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_dns_reload/aliases1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_dns_reload/meta/main.yml1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_dns_reload/tasks/main.yml29
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_memstore_info/aliases1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_memstore_info/meta/main.yml1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_memstore_info/tasks/main.yml30
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_server_info/aliases1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_server_info/meta/main.yml1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_server_info/tasks/main.yml30
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone/aliases1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone/meta/main.yml1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone/tasks/main.yml121
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone/vars/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone_domain/aliases1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone_domain/meta/main.yml1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone_domain/tasks/main.yml148
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone_domain/vars/main.yml4
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone_record/aliases1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone_record/meta/main.yml1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone_record/tasks/main.yml230
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone_record/vars/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/aliases9
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/defaults/main.yml4
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/files/httpd_echo.py50
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/tasks/check_state.yml20
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/tasks/main.yml78
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/tasks/test.yml28
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/tasks/test_errors.yml6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/tasks/test_reload_present.yml60
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/tasks/test_state.yml33
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/templates/monitrc.j213
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/vars/CentOS-6.yml1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/vars/RedHat.yml1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/vars/Suse.yml1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/vars/defaults.yml1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/mqtt/aliases7
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/mqtt/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/mqtt/tasks/main.yml9
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/mqtt/tasks/ubuntu.yml142
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_a_record/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_a_record/defaults/main.yaml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_a_record/meta/main.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_a_record/tasks/main.yml6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_a_record/tasks/nios_a_record_idempotence.yml77
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_aaaa_record/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_aaaa_record/defaults/main.yaml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_aaaa_record/meta/main.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_aaaa_record/tasks/main.yml6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_aaaa_record/tasks/nios_aaaa_record_idempotence.yml77
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_cname_record/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_cname_record/defaults/main.yaml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_cname_record/meta/main.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_cname_record/tasks/main.yml6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_cname_record/tasks/nios_cname_record_idempotence.yml77
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_dns_view/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_dns_view/defaults/main.yaml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_dns_view/meta/main.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_dns_view/tasks/main.yml6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_dns_view/tasks/nios_dns_view_idempotence.yml58
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_host_record/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_host_record/defaults/main.yaml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_host_record/meta/main.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_host_record/tasks/main.yml6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_host_record/tasks/nios_host_record_idempotence.yml126
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_mx_record/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_mx_record/defaults/main.yaml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_mx_record/meta/main.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_mx_record/tasks/main.yml6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_mx_record/tasks/nios_mx_record_idempotence.yml84
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_naptr_record/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_naptr_record/defaults/main.yaml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_naptr_record/meta/main.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_naptr_record/tasks/main.yml6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_naptr_record/tasks/nios_naptr_record_idempotence.yml91
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_network/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_network/defaults/main.yaml0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_network/meta/main.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_network/tasks/main.yml6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_network/tasks/nios_network_idempotence.yml80
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_network_view/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_network_view/defaults/main.yaml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_network_view/meta/main.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_network_view/tasks/main.yml6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_network_view/tasks/nios_network_view_idempotence.yml58
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_ptr_record/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_ptr_record/defaults/main.yaml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_ptr_record/meta/main.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_ptr_record/tasks/main.yml6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_ptr_record/tasks/nios_ptr_record_idempotence.yml83
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_srv_record/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_srv_record/defaults/main.yaml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_srv_record/meta/main.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_srv_record/tasks/main.yml6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_srv_record/tasks/nios_srv_record_idempotence.yml98
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_txt_record/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_txt_record/defaults/main.yaml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_txt_record/meta/main.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_txt_record/tasks/main.yml6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_txt_record/tasks/nios_txt_record_idempotence.yml80
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_zone/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_zone/defaults/main.yaml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_zone/meta/main.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_zone/tasks/main.yml6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_zone/tasks/nios_zone_idempotence.yml68
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nomad/aliases6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nomad/files/job.hcl396
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nomad/meta/main.yml4
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nomad/tasks/main.yml106
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nomad/tasks/nomad_job.yml108
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/npm/aliases4
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/npm/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/npm/tasks/main.yml40
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/npm/tasks/run.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/npm/tasks/setup.yml6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/npm/tasks/test.yml69
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/odbc/aliases6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/odbc/defaults/main.yml27
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/odbc/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/odbc/tasks/install_pyodbc.yml7
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/odbc/tasks/main.yml149
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/odbc/tasks/negative_tests.yml19
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/odbc/tasks/no_pyodbc.yml11
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/one_host/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/one_host/files/testhost/tmp/opennebula-fixtures.json.gzbin0 -> 2950 bytes
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/one_host/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/one_host/tasks/main.yml240
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/osx_defaults/aliases5
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/osx_defaults/tasks/main.yml253
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/pagerduty_user/aliases1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/pagerduty_user/tasks/main.yml24
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/pagerduty_user/vars/main.yml5
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/pids/aliases1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/pids/files/obtainpid.sh3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/pids/tasks/main.yml69
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/pkgutil/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/pkgutil/tasks/main.yml116
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_copy/aliases5
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_copy/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_copy/tasks/main.yml8
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_copy/tasks/postgresql_copy_initial.yml278
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_db/aliases6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_db/defaults/main.yml11
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_db/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_db/tasks/main.yml36
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_db/tasks/postgresql_db_general.yml152
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_db/tasks/postgresql_db_initial.yml366
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_db/tasks/postgresql_db_session_role.yml80
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_db/tasks/state_dump_restore.yml235
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ext/aliases6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ext/defaults/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ext/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ext/tasks/main.yml26
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ext/tasks/postgresql_ext_initial.yml208
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ext/tasks/postgresql_ext_session_role.yml114
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ext/tasks/postgresql_ext_version_opt.yml364
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_idx/aliases5
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_idx/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_idx/tasks/main.yml7
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_idx/tasks/postgresql_idx_initial.yml377
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_info/aliases7
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_info/defaults/main.yml15
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_info/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_info/tasks/main.yml12
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_info/tasks/postgresql_info_initial.yml177
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_info/tasks/setup_publication.yml61
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_lang/aliases5
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_lang/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_lang/tasks/main.yml25
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_lang/tasks/postgresql_lang_add_owner_param.yml199
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_lang/tasks/postgresql_lang_initial.yml231
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_lang/vars/CentOS-7.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_lang/vars/CentOS-8.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_lang/vars/default.yml0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_membership/aliases5
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_membership/defaults/main.yml6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_membership/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_membership/tasks/main.yml7
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_membership/tasks/postgresql_membership_initial.yml390
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_owner/aliases5
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_owner/defaults/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_owner/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_owner/tasks/main.yml9
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_owner/tasks/postgresql_owner_initial.yml1073
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_pg_hba/aliases5
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_pg_hba/defaults/main.yml23
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_pg_hba/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_pg_hba/tasks/main.yml7
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_pg_hba/tasks/postgresql_pg_hba_initial.yml183
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ping/aliases5
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ping/defaults/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ping/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ping/tasks/main.yml9
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ping/tasks/postgresql_ping_initial.yml75
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_privs/aliases5
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_privs/defaults/main.yml12
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_privs/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_privs/tasks/main.yml19
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_privs/tasks/pg_authid_not_readable.yml50
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_privs/tasks/postgresql_privs_general.yml1533
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_privs/tasks/postgresql_privs_initial.yml407
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_privs/tasks/postgresql_privs_session_role.yml102
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_privs/tasks/test_target_role.yml120
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_publication/aliases6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_publication/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_publication/tasks/main.yml8
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_publication/tasks/postgresql_publication_initial.yml436
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_query/aliases5
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_query/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_query/tasks/main.yml7
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_query/tasks/postgresql_query_initial.yml534
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_schema/aliases5
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_schema/defaults/main.yml7
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_schema/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_schema/tasks/main.yml9
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_schema/tasks/postgresql_schema_initial.yml331
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_schema/tasks/postgresql_schema_session_role.yml78
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_sequence/aliases5
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_sequence/defaults/main.yml5
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_sequence/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_sequence/tasks/main.yml8
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_sequence/tasks/postgresql_sequence_initial.yml730
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_set/aliases5
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_set/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_set/tasks/main.yml8
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_set/tasks/postgresql_set_initial.yml375
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_slot/aliases5
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_slot/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_slot/tasks/main.yml9
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_slot/tasks/postgresql_slot_initial.yml735
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_subscription/aliases7
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_subscription/defaults/main.yml15
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_subscription/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_subscription/tasks/main.yml12
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_subscription/tasks/postgresql_subscription_initial.yml672
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_subscription/tasks/setup_publication.yml84
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_table/aliases5
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_table/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_table/tasks/main.yml7
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_table/tasks/postgresql_table_initial.yml899
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_tablespace/aliases5
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_tablespace/defaults/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_tablespace/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_tablespace/tasks/main.yml7
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_tablespace/tasks/postgresql_tablespace_initial.yml245
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user/aliases5
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user/defaults/main.yml4
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user/tasks/main.yml12
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user/tasks/postgresql_user_general.yml775
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user/tasks/postgresql_user_initial.yml156
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user/tasks/test_no_password_change.yml167
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user/tasks/test_password.yml429
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user_obj_stat_info/aliases7
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user_obj_stat_info/defaults/main.yml12
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user_obj_stat_info/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user_obj_stat_info/tasks/main.yml8
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user_obj_stat_info/tasks/postgresql_user_obj_stat_info.yml222
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/prepare_nios_tests/tasks/main.yml0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/prepare_nuage_tests/tasks/main.yml24
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/prepare_tests/tasks/main.yml0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/proxmox/aliases4
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/proxmox/tasks/main.yml111
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/python_requirements_info/aliases1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/python_requirements_info/tasks/main.yml27
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/read_csv/aliases1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/read_csv/tasks/main.yml145
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/redis_info/aliases6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/redis_info/defaults/main.yml4
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/redis_info/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/redis_info/tasks/main.yml47
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_compute/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_compute/defaults/main.yml9
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_compute/tasks/ip.yml201
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_compute/tasks/main.yml9
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_compute/tasks/pagination.yml71
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_compute/tasks/security_group.yml147
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_compute/tasks/state.yml387
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_database_backup/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_database_backup/defaults/main.yml5
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_database_backup/tasks/main.yml233
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_image_info/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_image_info/tasks/main.yml32
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_ip/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_ip/defaults/main.yml7
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_ip/tasks/main.yml444
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_ip_info/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_ip_info/tasks/main.yml32
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_lb/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_lb/defaults/main.yml8
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_lb/tasks/main.yml219
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_organization_info/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_organization_info/tasks/main.yml17
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_security_group/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_security_group/defaults/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_security_group/tasks/main.yml134
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_security_group_info/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_security_group_info/tasks/main.yml32
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_security_group_rule/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_security_group_rule/defaults/main.yml8
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_security_group_rule/tasks/main.yml247
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_server_info/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_server_info/tasks/main.yml32
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_snapshot_info/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_snapshot_info/tasks/main.yml32
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_sshkey/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_sshkey/tasks/main.yml44
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_user_data/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_user_data/defaults/main.yml15
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_user_data/tasks/main.yml82
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_volume/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_volume/defaults/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_volume/tasks/main.yml46
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_volume_info/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_volume_info/tasks/main.yml32
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/script_inventory_foreman/aliases3
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/general/tests/integration/targets/script_inventory_foreman/foreman.sh10
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/general/tests/integration/targets/script_inventory_foreman/runme.sh50
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/script_inventory_foreman/test_foreman_inventory.yml7
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sefcontext/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sefcontext/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sefcontext/tasks/main.yml33
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sefcontext/tasks/sefcontext.yml114
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sensu_client/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sensu_client/tasks/main.yml174
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sensu_handler/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/main.yml124
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/pipe.yml20
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/set.yml48
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/tcp.yml51
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/transport.yml51
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/udp.yml51
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_cron/defaults/main.yml1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_cron/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_cron/tasks/main.yml69
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/debian.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/default.yml0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/fedora.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/freebsd.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/redhat.yml4
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/suse.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/aliases1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/defaults/main.yml16
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/handlers/main.yml14
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/library/current_container_facts.py101
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/tasks/Debian.yml45
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/tasks/Fedora.yml26
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/tasks/RedHat-7.yml45
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/tasks/RedHat-8.yml34
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/tasks/Suse.yml8
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/tasks/main.yml150
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/vars/Debian.yml5
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/vars/Fedora.yml4
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/vars/RedHat-7.yml8
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/vars/RedHat-8.yml9
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/vars/Suse.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/vars/Ubuntu-14.yml5
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/vars/default.yml0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/vars/main.yml11
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker_registry/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker_registry/files/nginx.conf46
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker_registry/files/nginx.htpasswd1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker_registry/handlers/cleanup.yml55
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker_registry/handlers/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker_registry/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker_registry/tasks/main.yml9
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker_registry/tasks/setup-frontend.yml97
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker_registry/tasks/setup.yml80
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker_registry/vars/main.yml11
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_epel/tasks/main.yml10
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_etcd3/defaults/main.yml16
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_etcd3/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_etcd3/tasks/main.yml117
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_etcd3/vars/RedHat-7.yml1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_etcd3/vars/Suse-py3.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_etcd3/vars/Suse.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_etcd3/vars/default.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/README.md138
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/files/repo.tar.xzbin0 -> 15496 bytes
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/handlers/main.yaml4
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/meta/main.yaml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/tasks/main.yaml24
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_gnutar/handlers/main.yml7
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_gnutar/tasks/main.yml20
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_influxdb/tasks/main.yml9
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_influxdb/tasks/setup.yml26
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_java_keytool/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_java_keytool/tasks/main.yml21
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_java_keytool/vars/Debian.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_java_keytool/vars/RedHat.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_java_keytool/vars/Suse.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_mosquitto/files/mosquitto.conf35
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_mosquitto/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_mosquitto/tasks/main.yml8
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_mosquitto/tasks/ubuntu.yml24
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openldap/files/initial_config.ldif22
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openldap/files/rootpw_cnconfig.ldif4
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openldap/meta/main.yml1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openldap/tasks/main.yml68
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openldap/vars/Debian.yml55
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openldap/vars/Ubuntu.yml55
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_opennebula/vars/main.yml6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openssl/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openssl/tasks/main.yml46
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/Debian.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/FreeBSD.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/RedHat.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/Suse.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_pkg_mgr/tasks/main.yml17
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/defaults/main.yml17
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/files/dummy--1.0.sql2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/files/dummy--2.0.sql2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/files/dummy--3.0.sql2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/files/dummy.control3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/files/pg_hba.conf10
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/tasks/main.yml247
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/tasks/ssl.yml66
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Debian-8.yml8
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-11-py3.yml12
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-11.yml12
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.0-py3.yml12
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.0.yml12
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.1-py3.yml12
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.1.yml12
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/RedHat-py3.yml8
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/RedHat.yml7
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-12.yml8
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-14.yml8
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-16-py3.yml8
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-16.yml8
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-18-py3.yml8
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/default-py3.yml6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/default.yml6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_replication/defaults/main.yml30
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_replication/handlers/main.yml23
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_replication/tasks/main.yml13
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_replication/tasks/setup_postgresql_cluster.yml84
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_replication/templates/master_postgresql.conf.j228
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_replication/templates/pg_hba.conf.j27
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_replication/templates/replica_postgresql.conf.j228
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_redis_replication/defaults/main.yml35
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_redis_replication/handlers/main.yml34
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_redis_replication/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_redis_replication/tasks/main.yml11
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_redis_replication/tasks/setup_redis_cluster.yml105
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_remote_constraints/aliases1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_remote_constraints/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_remote_constraints/tasks/main.yml13
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir/handlers/main.yml5
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml5
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir/tasks/default.yml11
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir/tasks/main.yml15
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_tls/files/ca_certificate.pem19
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_tls/files/ca_key.pem28
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_tls/files/client_certificate.pem20
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_tls/files/client_key.pem27
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_tls/files/server_certificate.pem20
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_tls/files/server_key.pem27
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_tls/tasks/main.yml26
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/defaults/main.yml8
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/files/wildfly.conf8
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/handlers/main.yml13
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/tasks/main.yml102
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/templates/launch.sh.j211
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/templates/wildfly.service.j216
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/shutdown/aliases1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/shutdown/tasks/main.yml89
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/aliases4
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/files/sendProcessStdin.py27
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_Darwin.yml4
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_FreeBSD.yml4
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_Linux.yml10
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_RedHat.yml4
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_Suse.yml4
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_pip.yml4
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/main.yml52
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/start_supervisord.yml9
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/stop_supervisord.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/test.yml12
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/test_start.yml135
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/test_stop.yml59
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_Darwin.yml4
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_FreeBSD.yml4
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_Linux.yml4
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_RedHat.yml4
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_Suse.yml4
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_pip.yml4
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/templates/supervisord.conf42
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/vars/Debian.yml1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/vars/defaults.yml1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/synchronize-buildah/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/synchronize-buildah/inventory1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/synchronize-buildah/roles/test_buildah_synchronize/files/normal_file.txt1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/synchronize-buildah/roles/test_buildah_synchronize/tasks/main.yml71
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/synchronize-buildah/runme.sh15
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/synchronize-buildah/test_synchronize_buildah.yml8
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/timezone/aliases5
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/timezone/tasks/main.yml77
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/timezone/tasks/test.yml607
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ufw/aliases12
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ufw/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ufw/tasks/main.yml39
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ufw/tasks/run-test.yml21
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ufw/tasks/tests/basic.yml402
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ufw/tasks/tests/global-state.yml150
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ufw/tasks/tests/insert_relative_to.yml80
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ufw/tasks/tests/interface.yml81
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/wakeonlan/aliases2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/wakeonlan/tasks/main.yml53
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xattr/aliases7
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xattr/defaults/main.yml1
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xattr/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xattr/tasks/main.yml16
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xattr/tasks/setup.yml9
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xattr/tasks/test.yml67
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xfs_quota/aliases7
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xfs_quota/defaults/main.yml42
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xfs_quota/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xfs_quota/tasks/gquota.yml142
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xfs_quota/tasks/main.yml23
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xfs_quota/tasks/pquota.yml179
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xfs_quota/tasks/uquota.yml142
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/aliases3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/fixtures/ansible-xml-beers-unicode.xml13
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/fixtures/ansible-xml-beers.xml14
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/fixtures/ansible-xml-namespaced-beers.xml14
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-elements-unicode.xml14
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-elements.xml14
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-from-groupvars.xml14
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-insertafter.xml17
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-insertbefore.xml17
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-with-attributes-unicode.xml14
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-with-attributes.xml14
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-element-implicitly.yml32
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-namespaced-children-elements.xml14
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-pretty-print-only.xml14
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-pretty-print.xml15
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-attribute.xml14
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-element.xml13
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-namespaced-attribute.xml14
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-namespaced-element.xml13
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-attribute-value-unicode.xml14
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-attribute-value.xml14
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-children-elements-level.xml11
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-children-elements-unicode.xml11
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-children-elements.xml11
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-element-value-empty.xml14
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-element-value-unicode.xml14
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-element-value.xml14
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-namespaced-attribute-value.xml14
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-namespaced-element-value.xml14
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/main.yml73
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-elements-unicode.yml29
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-elements.yml29
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-from-groupvars.yml28
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-insertafter.yml32
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-insertbefore.yml32
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-with-attributes-unicode.yml31
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-with-attributes.yml35
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-element-implicitly.yml237
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-namespaced-children-elements.yml32
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-children-elements-xml.yml30
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-count-unicode.yml19
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-count.yml19
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-get-element-content-unicode.yml32
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-get-element-content.yml52
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-mutually-exclusive-attributes.yml22
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-pretty-print-only.yml29
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-pretty-print.yml30
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-attribute-nochange.yml28
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-attribute.yml28
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-element-nochange.yml28
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-element.yml28
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute-nochange.yml33
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute.yml33
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-namespaced-element-nochange.yml33
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-namespaced-element.yml33
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-attribute-value-unicode.yml29
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-attribute-value.yml29
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-children-elements-level.yml74
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-children-elements-unicode.yml46
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-children-elements.yml53
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-element-value-empty.yml28
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-element-value-unicode.yml43
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-element-value.yml43
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-namespaced-attribute-value.yml34
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-namespaced-children-elements.yml57
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-namespaced-element-value.yml46
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-xmlstring.yml81
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/vars/main.yml6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/yarn/aliases4
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/yarn/meta/main.yml3
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/yarn/tasks/main.yml33
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/yarn/tasks/run.yml132
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/yarn/templates/package.j29
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/zypper/aliases7
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/zypper/files/empty.spec12
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/zypper/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/zypper/tasks/main.yml30
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/zypper/tasks/zypper.yml525
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/zypper/templates/duplicate.spec.j218
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/zypper_repository/aliases7
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/zypper_repository/meta/main.yml2
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/zypper_repository/tasks/main.yml26
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/zypper_repository/tasks/test.yml33
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/integration/targets/zypper_repository/tasks/zypper_repository.yml127
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/requirements.yml10
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/sanity/extra/no-unwanted-files.json7
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/general/tests/sanity/extra/no-unwanted-files.py43
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/sanity/ignore-2.10.txt562
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/sanity/ignore-2.11.txt562
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/sanity/ignore-2.9.txt492
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/compat/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/compat/builtins.py33
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/compat/mock.py122
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/compat/unittest.py38
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/mock/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/mock/loader.py102
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/mock/path.py8
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/mock/procenv.py76
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/mock/vault_helper.py27
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/mock/yaml_helper.py126
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/become/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/become/conftest.py37
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/become/helper.py19
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/become/test_doas.py84
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/become/test_dzdo.py94
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/become/test_ksu.py85
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/become/test_pbrun.py84
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/become/test_pfexec.py81
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/cache/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/cache/test_memcached.py35
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/cache/test_redis.py36
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/connection/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/connection/test_docker.py67
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/connection/test_lxc.py40
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/inventory/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/inventory/test_cobbler.py41
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/inventory/test_linode.py76
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/inventory/test_proxmox.py207
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/inventory/test_stackpath_compute.py200
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/lookup/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/lookup/test_dsv.py43
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/lookup/test_etcd3.py57
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/lookup/test_lastpass.py187
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/lookup/test_manifold.py536
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/lookup/test_onepassword.py321
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/lookup/test_tss.py43
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/cloud/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/cloud/test_backoff.py52
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/conftest.py72
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/docker/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/docker/test_common.py518
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/gcp/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/gcp/test_auth.py162
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/gcp/test_utils.py361
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/hwc/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/hwc/test_dict_comparison.py166
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/hwc/test_hwc_utils.py38
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/identity/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/identity/keycloak/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/identity/keycloak/test_keycloak_connect.py169
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/net_tools/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/net_tools/nios/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/net_tools/nios/test_api.py251
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/postgresql/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/postgresql/test_postgres.py325
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/remote_management/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/remote_management/dellemc/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/remote_management/dellemc/test_ome.py79
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/test_database.py141
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/test_hetzner.py268
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/test_known_hosts.py116
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/test_kubevirt.py56
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/test_module_helper.py84
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/test_saslprep.py55
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/test_utm_utils.py47
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/FakeAnsibleModule.py33
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/FakeXenAPI.py69
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/common.py25
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/conftest.py118
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-1-facts.json73
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-1-params.json707
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-2-facts.json87
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-2-params.json771
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-3-facts.json75
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-3-params.json420
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_gather_vm_params_and_facts.py74
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_get_object_ref.py73
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_misc.py17
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_netaddr_functions.py182
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_set_vm_power_state.py413
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_wait_for_functions.py220
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_xapi.py175
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_xenserverobject.py50
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/docker/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/docker/test_docker_container.py22
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/docker/test_docker_network.py31
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/docker/test_docker_swarm_service.py510
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/docker/test_docker_volume.py36
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/google/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/google/test_gce_tag.py66
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/google/test_gcp_forwarding_rule.py30
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/google/test_gcp_url_map.py169
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/kubevirt/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/kubevirt/kubevirt_fixtures.py74
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/kubevirt/test_kubevirt_rs.py80
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/kubevirt/test_kubevirt_vm.py115
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/linode/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/linode/conftest.py85
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/linode/test_linode.py16
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/linode/test_linode_v4.py324
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/misc/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/misc/test_terraform.py22
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/xenserver/FakeAnsibleModule.py33
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/xenserver/FakeXenAPI.py69
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/xenserver/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/xenserver/common.py11
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/xenserver/conftest.py75
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/xenserver/test_xenserver_guest_info.py77
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/xenserver/test_xenserver_guest_powerstate.py297
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/conftest.py31
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/database/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/database/misc/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/database/misc/test_redis_info.py76
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/database/postgresql/test_postgresql_ext.py35
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/messaging/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/monitoring/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/monitoring/test_circonus_annotation.py151
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/monitoring/test_icinga2_feature.py99
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/monitoring/test_monit.py157
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/monitoring/test_pagerduty.py128
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/monitoring/test_pagerduty_alert.py44
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/monitoring/test_pagerduty_change.py82
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/fixtures/nios_result.txt0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_a_record.py159
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_aaaa_record.py159
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_cname_record.py133
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_dns_view.py127
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_fixed_address.py201
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_host_record.py152
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_member.py162
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_module.py88
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_mx_record.py137
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_naptr_record.py147
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_network.py248
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_network_view.py156
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_nsgroup.py125
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_ptr_record.py184
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_srv_record.py153
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_zone.py287
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/test_hetzner_failover_ip.py219
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/test_hetzner_failover_ip_info.py71
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/test_hetzner_firewall.py1193
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/test_hetzner_firewall_info.py240
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/test_nmcli.py1210
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/notification/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/notification/test_campfire.py93
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/notification/test_slack.py201
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/language/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/language/test_gem.py142
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/language/test_maven_artifact.py70
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/language/test_npm.py70
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/os/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/os/conftest.py34
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/os/test_apk.py36
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/os/test_homebrew.py22
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/os/test_homebrew_cask.py21
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/os/test_macports.py34
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/os/test_pkgin.py143
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/os/test_redhat_subscription.py1221
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/os/test_rhn_channel.py144
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/os/test_rhn_register.py289
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/os/test_rhsm_release.py141
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/dellemc/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/dellemc/test_ome_device_info.py196
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/lxca/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/lxca/test_lxca_cmms.py99
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/lxca/test_lxca_nodes.py103
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/conftest.py27
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/hpe_test_utils.py205
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/oneview_module_loader.py30
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_datacenter_info.py77
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_enclosure_info.py135
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_ethernet_network.py391
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_ethernet_network_info.py103
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_fc_network.py169
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_fc_network_info.py60
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_fcoe_network.py167
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_fcoe_network_info.py63
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_logical_interconnect_group.py260
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_logical_interconnect_group_info.py62
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_network_set.py186
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_network_set_info.py120
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_san_manager.py242
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_san_manager_info.py71
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/bitbucket/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/bitbucket/test_bitbucket_access_key.py342
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/bitbucket/test_bitbucket_pipeline_key_pair.py197
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/bitbucket/test_bitbucket_pipeline_known_host.py192
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/bitbucket/test_bitbucket_pipeline_variable.py295
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/gitlab/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/gitlab/gitlab.py581
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_deploy_key.py108
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_group.py112
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_hook.py103
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_project.py124
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_runner.py95
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_user.py181
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/storage/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/storage/hpe3par/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/storage/hpe3par/test_ss_3par_cpg.py247
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family.test_no_changes12
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family.test_no_changes.json21
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_aggi_up12
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_aggi_up.exceptions.txt8
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_aggi_up.json21
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_and_delete_aggi_up12
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_and_delete_aggi_up.exceptions.txt17
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_and_delete_aggi_up.json21
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv412
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4.exceptions.txt0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4.json21
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_post_up13
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_post_up.exceptions.txt0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_post_up.json21
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_pre_up13
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_pre_up.exceptions.txt0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_pre_up.json21
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv612
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6.exceptions.txt0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6.json21
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_post_up13
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_post_up.exceptions.txt0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_post_up.json21
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_pre_up13
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_pre_up.exceptions.txt0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_pre_up.json21
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_method12
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_method.exceptions.txt8
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_method.json21
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_revert12
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_revert.exceptions.txt0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_revert.json21
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_and_eth0_mtu13
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_and_eth0_mtu.exceptions.txt8
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_and_eth0_mtu.json21
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_slaves12
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_slaves.exceptions.txt8
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_slaves.json21
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp.test_no_changes6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp.test_no_changes.json18
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_aggi_up6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_aggi_up.exceptions.txt8
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_aggi_up.json18
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_and_delete_aggi_up6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_and_delete_aggi_up.exceptions.txt17
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_and_delete_aggi_up.json18
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv47
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4.exceptions.txt0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4.json18
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_post_up7
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_post_up.exceptions.txt0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_post_up.json18
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_pre_up7
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_pre_up.exceptions.txt0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_pre_up.json18
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv66
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6.exceptions.txt9
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6.json18
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_post_up6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_post_up.exceptions.txt9
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_post_up.json18
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_pre_up6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_pre_up.exceptions.txt9
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_pre_up.json18
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_method6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_method.exceptions.txt8
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_method.json18
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_revert6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_revert.exceptions.txt0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_revert.json18
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu7
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu.exceptions.txt8
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu.json18
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_slaves6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_slaves.exceptions.txt8
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_slaves.json18
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com.test_no_changes61
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com.test_no_changes.json109
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_aggi_up62
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_aggi_up.exceptions.txt0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_aggi_up.json109
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_and_delete_aggi_up61
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_and_delete_aggi_up.exceptions.txt0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_and_delete_aggi_up.json109
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv461
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4.exceptions.txt9
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4.json109
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_post_up61
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_post_up.exceptions.txt9
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_post_up.json109
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_pre_up61
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_pre_up.exceptions.txt9
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_pre_up.json109
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv661
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6.exceptions.txt9
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6.json109
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_post_up61
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_post_up.exceptions.txt9
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_post_up.json109
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_pre_up61
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_pre_up.exceptions.txt9
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_pre_up.json109
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_method61
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_method.exceptions.txt0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_method.json109
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_revert61
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_revert.exceptions.txt8
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_revert.json109
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu61
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu.exceptions.txt8
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu.json109
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_slaves61
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_slaves.exceptions.txt0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_slaves.json109
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/input/address_family12
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/input/default_dhcp6
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/input/servers.com61
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/test_interfaces_file.py320
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/test_java_keystore.py297
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/test_pamd.py376
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/test_parted.py345
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/test_solaris_zone.py115
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/test_sysupgrade.py67
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/test_ufw.py438
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/test_xfconf.py367
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/utils.py52
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/web_infrastructure/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/web_infrastructure/test_apache2_module.py22
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/web_infrastructure/test_jenkins_plugin.py153
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/unit/requirements.txt23
-rw-r--r--collections-debian-merged/ansible_collections/community/general/tests/utils/constraints.txt54
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/general/tests/utils/shippable/aix.sh22
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/general/tests/utils/shippable/check_matrix.py120
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/general/tests/utils/shippable/cloud.sh19
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/general/tests/utils/shippable/freebsd.sh22
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/general/tests/utils/shippable/linux.sh18
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/general/tests/utils/shippable/macos.sh22
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/general/tests/utils/shippable/osx.sh22
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/general/tests/utils/shippable/remote.sh22
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/general/tests/utils/shippable/rhel.sh22
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/general/tests/utils/shippable/sanity.sh42
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/general/tests/utils/shippable/shippable.sh223
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/general/tests/utils/shippable/timing.py16
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/general/tests/utils/shippable/timing.sh5
-rwxr-xr-xcollections-debian-merged/ansible_collections/community/general/tests/utils/shippable/units.sh27
3129 files changed, 641542 insertions, 0 deletions
diff --git a/collections-debian-merged/ansible_collections/community/general/.azure-pipelines/README.md b/collections-debian-merged/ansible_collections/community/general/.azure-pipelines/README.md
new file mode 100644
index 00000000..385e70ba
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/.azure-pipelines/README.md
@@ -0,0 +1,3 @@
+## Azure Pipelines Configuration
+
+Please see the [Documentation](https://github.com/ansible/community/wiki/Testing:-Azure-Pipelines) for more information.
diff --git a/collections-debian-merged/ansible_collections/community/general/.azure-pipelines/azure-pipelines.yml b/collections-debian-merged/ansible_collections/community/general/.azure-pipelines/azure-pipelines.yml
new file mode 100644
index 00000000..f708f60e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/.azure-pipelines/azure-pipelines.yml
@@ -0,0 +1,329 @@
+trigger:
+ batch: true
+ branches:
+ include:
+ - main
+ - stable-*
+
+pr:
+ autoCancel: true
+ branches:
+ include:
+ - main
+ - stable-*
+
+schedules:
+ - cron: 0 9 * * *
+ displayName: Nightly
+ always: true
+ branches:
+ include:
+ - main
+ - stable-*
+
+variables:
+ - name: checkoutPath
+ value: ansible_collections/community/general
+ - name: coverageBranches
+ value: main
+ - name: pipelinesCoverage
+ value: coverage
+ - name: entryPoint
+ value: tests/utils/shippable/shippable.sh
+ - name: fetchDepth
+ value: 0
+
+resources:
+ containers:
+ - container: default
+ image: quay.io/ansible/azure-pipelines-test-container:1.7.1
+
+pool: Standard
+
+stages:
+### Sanity
+ - stage: Sanity_devel
+ displayName: Sanity devel
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ nameFormat: Test {0}
+ testFormat: devel/sanity/{0}
+ targets:
+ - test: 1
+ - test: 2
+ - test: 3
+ - test: 4
+ - test: extra
+ - stage: Sanity_2_10
+ displayName: Sanity 2.10
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ nameFormat: Test {0}
+ testFormat: 2.10/sanity/{0}
+ targets:
+ - test: 1
+ - test: 2
+ - test: 3
+ - test: 4
+ - stage: Sanity_2_9
+ displayName: Sanity 2.9
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ nameFormat: Test {0}
+ testFormat: 2.9/sanity/{0}
+ targets:
+ - test: 1
+ - test: 2
+ - test: 3
+ - test: 4
+### Units
+ - stage: Units_devel
+ displayName: Units devel
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ nameFormat: Python {0}
+ testFormat: devel/units/{0}/1
+ targets:
+ - test: 2.6
+ - test: 2.7
+ - test: 3.5
+ - test: 3.6
+ - test: 3.7
+ - test: 3.8
+ - test: 3.9
+ - stage: Units_2_10
+ displayName: Units 2.10
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ nameFormat: Python {0}
+ testFormat: 2.10/units/{0}/1
+ targets:
+ - test: 2.6
+ - test: 2.7
+ - test: 3.5
+ - test: 3.6
+ - test: 3.7
+ - test: 3.8
+ - test: 3.9
+ - stage: Units_2_9
+ displayName: Units 2.9
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ nameFormat: Python {0}
+ testFormat: 2.9/units/{0}/1
+ targets:
+ - test: 2.6
+ - test: 2.7
+ - test: 3.5
+ - test: 3.6
+ - test: 3.7
+ - test: 3.8
+
+## Remote
+ - stage: Remote_devel
+ displayName: Remote devel
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ testFormat: devel/{0}
+ targets:
+ - name: OS X 10.11
+ test: osx/10.11
+ - name: macOS 10.15
+ test: macos/10.15
+ - name: RHEL 7.8
+ test: rhel/7.8
+ - name: RHEL 8.2
+ test: rhel/8.2
+ - name: FreeBSD 11.1
+ test: freebsd/11.1
+ - name: FreeBSD 12.1
+ test: freebsd/12.1
+ groups:
+ - 1
+ - 2
+ - 3
+ - 4
+ - 5
+ - stage: Remote_2_10
+ displayName: Remote 2.10
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ testFormat: 2.10/{0}
+ targets:
+ - name: OS X 10.11
+ test: osx/10.11
+ - name: RHEL 8.2
+ test: rhel/8.2
+ - name: FreeBSD 12.1
+ test: freebsd/12.1
+ groups:
+ - 1
+ - 2
+ - 3
+ - 4
+ - 5
+ - stage: Remote_2_9
+ displayName: Remote 2.9
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ testFormat: 2.9/{0}
+ targets:
+ - name: RHEL 8.2
+ test: rhel/8.2
+ #- name: FreeBSD 12.0
+ # test: freebsd/12.0
+ groups:
+ - 1
+ - 2
+ - 3
+ - 4
+ - 5
+
+### Docker
+ - stage: Docker_devel
+ displayName: Docker devel
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ testFormat: devel/linux/{0}
+ targets:
+ - name: CentOS 6
+ test: centos6
+ - name: CentOS 7
+ test: centos7
+ - name: CentOS 8
+ test: centos8
+ - name: Fedora 32
+ test: fedora32
+ - name: Fedora 33
+ test: fedora33
+ - name: openSUSE 15 py2
+ test: opensuse15py2
+ - name: openSUSE 15 py3
+ test: opensuse15
+ - name: Ubuntu 16.04
+ test: ubuntu1604
+ - name: Ubuntu 18.04
+ test: ubuntu1804
+ groups:
+ - 1
+ - 2
+ - 3
+ - 4
+ - 5
+ - stage: Docker_2_10
+ displayName: Docker 2.10
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ testFormat: 2.10/linux/{0}
+ targets:
+ #- name: CentOS 8
+ # test: centos8
+ - name: Fedora 32
+ test: fedora32
+ - name: openSUSE 15 py3
+ test: opensuse15
+ - name: Ubuntu 18.04
+ test: ubuntu1804
+ groups:
+ - 1
+ - 2
+ - 3
+ - 4
+ - 5
+ - stage: Docker_2_9
+ displayName: Docker 2.9
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ testFormat: 2.9/linux/{0}
+ targets:
+ #- name: CentOS 8
+ # test: centos8
+ #- name: Fedora 31
+ # test: fedora31
+ #- name: openSUSE 15 py3
+ # test: opensuse15
+ - name: Ubuntu 18.04
+ test: ubuntu1804
+ groups:
+ - 1
+ - 2
+ - 3
+ - 4
+ - 5
+
+### Cloud
+ - stage: Cloud_devel
+ displayName: Cloud devel
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ nameFormat: Python {0}
+ testFormat: devel/cloud/{0}/1
+ targets:
+ - test: 2.7
+ - test: 3.6
+ - stage: Cloud_2_10
+ displayName: Cloud 2.10
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ nameFormat: Python {0}
+ testFormat: 2.10/cloud/{0}/1
+ targets:
+ - test: 3.6
+ - stage: Cloud_2_9
+ displayName: Cloud 2.9
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ nameFormat: Python {0}
+ testFormat: 2.9/cloud/{0}/1
+ targets:
+ - test: 2.7
+ - stage: Summary
+ condition: succeededOrFailed()
+ dependsOn:
+ - Sanity_devel
+ - Sanity_2_9
+ - Sanity_2_10
+ - Units_devel
+ - Units_2_9
+ - Units_2_10
+ - Remote_devel
+ - Remote_2_9
+ - Remote_2_10
+ - Docker_devel
+ - Docker_2_9
+ - Docker_2_10
+ - Cloud_devel
+ - Cloud_2_9
+ - Cloud_2_10
+ jobs:
+ - template: templates/coverage.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/.azure-pipelines/scripts/aggregate-coverage.sh b/collections-debian-merged/ansible_collections/community/general/.azure-pipelines/scripts/aggregate-coverage.sh
new file mode 100755
index 00000000..f3113dd0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/.azure-pipelines/scripts/aggregate-coverage.sh
@@ -0,0 +1,20 @@
+#!/usr/bin/env bash
+# Aggregate code coverage results for later processing.
+
+set -o pipefail -eu
+
+agent_temp_directory="$1"
+
+PATH="${PWD}/bin:${PATH}"
+
+mkdir "${agent_temp_directory}/coverage/"
+
+options=(--venv --venv-system-site-packages --color -v)
+
+ansible-test coverage combine --export "${agent_temp_directory}/coverage/" "${options[@]}"
+
+if ansible-test coverage analyze targets generate --help >/dev/null 2>&1; then
+ # Only analyze coverage if the installed version of ansible-test supports it.
+ # Doing so allows this script to work unmodified for multiple Ansible versions.
+ ansible-test coverage analyze targets generate "${agent_temp_directory}/coverage/coverage-analyze-targets.json" "${options[@]}"
+fi
diff --git a/collections-debian-merged/ansible_collections/community/general/.azure-pipelines/scripts/combine-coverage.py b/collections-debian-merged/ansible_collections/community/general/.azure-pipelines/scripts/combine-coverage.py
new file mode 100755
index 00000000..506ade64
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/.azure-pipelines/scripts/combine-coverage.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python
+"""
+Combine coverage data from multiple jobs, keeping the data only from the most recent attempt from each job.
+Coverage artifacts must be named using the format: "Coverage $(System.JobAttempt) {StableUniqueNameForEachJob}"
+The recommended coverage artifact name format is: Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)
+Keep in mind that Azure Pipelines does not enforce unique job display names (only names).
+It is up to pipeline authors to avoid name collisions when deviating from the recommended format.
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+import shutil
+import sys
+
+
+def main():
+ """Main program entry point."""
+ source_directory = sys.argv[1]
+
+ if '/ansible_collections/' in os.getcwd():
+ output_path = "tests/output"
+ else:
+ output_path = "test/results"
+
+ destination_directory = os.path.join(output_path, 'coverage')
+
+ if not os.path.exists(destination_directory):
+ os.makedirs(destination_directory)
+
+ jobs = {}
+ count = 0
+
+ for name in os.listdir(source_directory):
+ match = re.search('^Coverage (?P<attempt>[0-9]+) (?P<label>.+)$', name)
+ label = match.group('label')
+ attempt = int(match.group('attempt'))
+ jobs[label] = max(attempt, jobs.get(label, 0))
+
+ for label, attempt in jobs.items():
+ name = 'Coverage {attempt} {label}'.format(label=label, attempt=attempt)
+ source = os.path.join(source_directory, name)
+ source_files = os.listdir(source)
+
+ for source_file in source_files:
+ source_path = os.path.join(source, source_file)
+ destination_path = os.path.join(destination_directory, source_file + '.' + label)
+ print('"%s" -> "%s"' % (source_path, destination_path))
+ shutil.copyfile(source_path, destination_path)
+ count += 1
+
+ print('Coverage file count: %d' % count)
+ print('##vso[task.setVariable variable=coverageFileCount]%d' % count)
+ print('##vso[task.setVariable variable=outputPath]%s' % output_path)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/.azure-pipelines/scripts/process-results.sh b/collections-debian-merged/ansible_collections/community/general/.azure-pipelines/scripts/process-results.sh
new file mode 100755
index 00000000..f3f1d1ba
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/.azure-pipelines/scripts/process-results.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+# Check the test results and set variables for use in later steps.
+
+set -o pipefail -eu
+
+if [[ "$PWD" =~ /ansible_collections/ ]]; then
+ output_path="tests/output"
+else
+ output_path="test/results"
+fi
+
+echo "##vso[task.setVariable variable=outputPath]${output_path}"
+
+if compgen -G "${output_path}"'/junit/*.xml' > /dev/null; then
+ echo "##vso[task.setVariable variable=haveTestResults]true"
+fi
+
+if compgen -G "${output_path}"'/bot/ansible-test-*' > /dev/null; then
+ echo "##vso[task.setVariable variable=haveBotResults]true"
+fi
+
+if compgen -G "${output_path}"'/coverage/*' > /dev/null; then
+ echo "##vso[task.setVariable variable=haveCoverageData]true"
+fi
diff --git a/collections-debian-merged/ansible_collections/community/general/.azure-pipelines/scripts/publish-codecov.sh b/collections-debian-merged/ansible_collections/community/general/.azure-pipelines/scripts/publish-codecov.sh
new file mode 100755
index 00000000..7aeabda0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/.azure-pipelines/scripts/publish-codecov.sh
@@ -0,0 +1,27 @@
+#!/usr/bin/env bash
+# Upload code coverage reports to codecov.io.
+# Multiple coverage files from multiple languages are accepted and aggregated after upload.
+# Python coverage, as well as PowerShell and Python stubs can all be uploaded.
+
+set -o pipefail -eu
+
+output_path="$1"
+
+curl --silent --show-error https://codecov.io/bash > codecov.sh
+
+for file in "${output_path}"/reports/coverage*.xml; do
+ name="${file}"
+ name="${name##*/}" # remove path
+ name="${name##coverage=}" # remove 'coverage=' prefix if present
+ name="${name%.xml}" # remove '.xml' suffix
+
+ bash codecov.sh \
+ -f "${file}" \
+ -n "${name}" \
+ -X coveragepy \
+ -X gcov \
+ -X fix \
+ -X search \
+ -X xcode \
+ || echo "Failed to upload code coverage report to codecov.io: ${file}"
+done
diff --git a/collections-debian-merged/ansible_collections/community/general/.azure-pipelines/scripts/report-coverage.sh b/collections-debian-merged/ansible_collections/community/general/.azure-pipelines/scripts/report-coverage.sh
new file mode 100755
index 00000000..1bd91bdc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/.azure-pipelines/scripts/report-coverage.sh
@@ -0,0 +1,15 @@
+#!/usr/bin/env bash
+# Generate code coverage reports for uploading to Azure Pipelines and codecov.io.
+
+set -o pipefail -eu
+
+PATH="${PWD}/bin:${PATH}"
+
+if ! ansible-test --help >/dev/null 2>&1; then
+ # Install the devel version of ansible-test for generating code coverage reports.
+ # This is only used by Ansible Collections, which are typically tested against multiple Ansible versions (in separate jobs).
+ # Since a version of ansible-test is required that can work the output from multiple older releases, the devel version is used.
+ pip install https://github.com/ansible/ansible/archive/devel.tar.gz --disable-pip-version-check
+fi
+
+ansible-test coverage xml --stub --venv --venv-system-site-packages --color -v
diff --git a/collections-debian-merged/ansible_collections/community/general/.azure-pipelines/scripts/run-tests.sh b/collections-debian-merged/ansible_collections/community/general/.azure-pipelines/scripts/run-tests.sh
new file mode 100755
index 00000000..a947fdf0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/.azure-pipelines/scripts/run-tests.sh
@@ -0,0 +1,34 @@
+#!/usr/bin/env bash
+# Configure the test environment and run the tests.
+
+set -o pipefail -eu
+
+entry_point="$1"
+test="$2"
+read -r -a coverage_branches <<< "$3" # space separated list of branches to run code coverage on for scheduled builds
+
+export COMMIT_MESSAGE
+export COMPLETE
+export COVERAGE
+export IS_PULL_REQUEST
+
+if [ "${SYSTEM_PULLREQUEST_TARGETBRANCH:-}" ]; then
+ IS_PULL_REQUEST=true
+ COMMIT_MESSAGE=$(git log --format=%B -n 1 HEAD^2)
+else
+ IS_PULL_REQUEST=
+ COMMIT_MESSAGE=$(git log --format=%B -n 1 HEAD)
+fi
+
+COMPLETE=
+COVERAGE=
+
+if [ "${BUILD_REASON}" = "Schedule" ]; then
+ COMPLETE=yes
+
+ if printf '%s\n' "${coverage_branches[@]}" | grep -q "^${BUILD_SOURCEBRANCHNAME}$"; then
+ COVERAGE=yes
+ fi
+fi
+
+"${entry_point}" "${test}" 2>&1 | "$(dirname "$0")/time-command.py"
diff --git a/collections-debian-merged/ansible_collections/community/general/.azure-pipelines/scripts/time-command.py b/collections-debian-merged/ansible_collections/community/general/.azure-pipelines/scripts/time-command.py
new file mode 100755
index 00000000..5e8eb8d4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/.azure-pipelines/scripts/time-command.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+"""Prepends a relative timestamp to each input line from stdin and writes it to stdout."""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+import time
+
+
+def main():
+ """Main program entry point."""
+ start = time.time()
+
+ sys.stdin.reconfigure(errors='surrogateescape')
+ sys.stdout.reconfigure(errors='surrogateescape')
+
+ for line in sys.stdin:
+ seconds = time.time() - start
+ sys.stdout.write('%02d:%02d %s' % (seconds // 60, seconds % 60, line))
+ sys.stdout.flush()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/.azure-pipelines/templates/coverage.yml b/collections-debian-merged/ansible_collections/community/general/.azure-pipelines/templates/coverage.yml
new file mode 100644
index 00000000..1864e444
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/.azure-pipelines/templates/coverage.yml
@@ -0,0 +1,39 @@
+# This template adds a job for processing code coverage data.
+# It will upload results to Azure Pipelines and codecov.io.
+# Use it from a job stage that completes after all other jobs have completed.
+# This can be done by placing it in a separate summary stage that runs after the test stage(s) have completed.
+
+jobs:
+ - job: Coverage
+ displayName: Code Coverage
+ container: default
+ workspace:
+ clean: all
+ steps:
+ - checkout: self
+ fetchDepth: $(fetchDepth)
+ path: $(checkoutPath)
+ - task: DownloadPipelineArtifact@2
+ displayName: Download Coverage Data
+ inputs:
+ path: coverage/
+ patterns: "Coverage */*=coverage.combined"
+ - bash: .azure-pipelines/scripts/combine-coverage.py coverage/
+ displayName: Combine Coverage Data
+ - bash: .azure-pipelines/scripts/report-coverage.sh
+ displayName: Generate Coverage Report
+ condition: gt(variables.coverageFileCount, 0)
+ - task: PublishCodeCoverageResults@1
+ inputs:
+ codeCoverageTool: Cobertura
+ # Azure Pipelines only accepts a single coverage data file.
+ # That means only Python or PowerShell coverage can be uploaded, but not both.
+ # Set the "pipelinesCoverage" variable to determine which type is uploaded.
+ # Use "coverage" for Python and "coverage-powershell" for PowerShell.
+ summaryFileLocation: "$(outputPath)/reports/$(pipelinesCoverage).xml"
+ displayName: Publish to Azure Pipelines
+ condition: gt(variables.coverageFileCount, 0)
+ - bash: .azure-pipelines/scripts/publish-codecov.sh "$(outputPath)"
+ displayName: Publish to codecov.io
+ condition: gt(variables.coverageFileCount, 0)
+ continueOnError: true
diff --git a/collections-debian-merged/ansible_collections/community/general/.azure-pipelines/templates/matrix.yml b/collections-debian-merged/ansible_collections/community/general/.azure-pipelines/templates/matrix.yml
new file mode 100644
index 00000000..4e9555dd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/.azure-pipelines/templates/matrix.yml
@@ -0,0 +1,55 @@
+# This template uses the provided targets and optional groups to generate a matrix which is then passed to the test template.
+# If this matrix template does not provide the required functionality, consider using the test template directly instead.
+
+parameters:
+ # A required list of dictionaries, one per test target.
+ # Each item in the list must contain a "test" or "name" key.
+ # Both may be provided. If one is omitted, the other will be used.
+ - name: targets
+ type: object
+
+ # An optional list of values which will be used to multiply the targets list into a matrix.
+ # Values can be strings or numbers.
+ - name: groups
+ type: object
+ default: []
+
+ # An optional format string used to generate the job name.
+ # - {0} is the name of an item in the targets list.
+ - name: nameFormat
+ type: string
+ default: "{0}"
+
+ # An optional format string used to generate the test name.
+ # - {0} is the name of an item in the targets list.
+ - name: testFormat
+ type: string
+ default: "{0}"
+
+ # An optional format string used to add the group to the job name.
+ # {0} is the formatted name of an item in the targets list.
+ # {{1}} is the group -- be sure to include the double "{{" and "}}".
+ - name: nameGroupFormat
+ type: string
+ default: "{0} - {{1}}"
+
+ # An optional format string used to add the group to the test name.
+ # {0} is the formatted test of an item in the targets list.
+ # {{1}} is the group -- be sure to include the double "{{" and "}}".
+ - name: testGroupFormat
+ type: string
+ default: "{0}/{{1}}"
+
+jobs:
+ - template: test.yml
+ parameters:
+ jobs:
+ - ${{ if eq(length(parameters.groups), 0) }}:
+ - ${{ each target in parameters.targets }}:
+ - name: ${{ format(parameters.nameFormat, coalesce(target.name, target.test)) }}
+ test: ${{ format(parameters.testFormat, coalesce(target.test, target.name)) }}
+ - ${{ if not(eq(length(parameters.groups), 0)) }}:
+ - ${{ each group in parameters.groups }}:
+ - ${{ each target in parameters.targets }}:
+ - name: ${{ format(format(parameters.nameGroupFormat, parameters.nameFormat), coalesce(target.name, target.test), group) }}
+ test: ${{ format(format(parameters.testGroupFormat, parameters.testFormat), coalesce(target.test, target.name), group) }}
diff --git a/collections-debian-merged/ansible_collections/community/general/.azure-pipelines/templates/test.yml b/collections-debian-merged/ansible_collections/community/general/.azure-pipelines/templates/test.yml
new file mode 100644
index 00000000..5250ed80
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/.azure-pipelines/templates/test.yml
@@ -0,0 +1,45 @@
+# This template uses the provided list of jobs to create test one or more test jobs.
+# It can be used directly if needed, or through the matrix template.
+
+parameters:
+ # A required list of dictionaries, one per test job.
+ # Each item in the list must contain a "job" and "name" key.
+ - name: jobs
+ type: object
+
+jobs:
+ - ${{ each job in parameters.jobs }}:
+ - job: test_${{ replace(replace(replace(job.test, '/', '_'), '.', '_'), '-', '_') }}
+ displayName: ${{ job.name }}
+ container: default
+ workspace:
+ clean: all
+ steps:
+ - checkout: self
+ fetchDepth: $(fetchDepth)
+ path: $(checkoutPath)
+ - bash: .azure-pipelines/scripts/run-tests.sh "$(entryPoint)" "${{ job.test }}" "$(coverageBranches)"
+ displayName: Run Tests
+ - bash: .azure-pipelines/scripts/process-results.sh
+ condition: succeededOrFailed()
+ displayName: Process Results
+ - bash: .azure-pipelines/scripts/aggregate-coverage.sh "$(Agent.TempDirectory)"
+ condition: eq(variables.haveCoverageData, 'true')
+ displayName: Aggregate Coverage Data
+ - task: PublishTestResults@2
+ condition: eq(variables.haveTestResults, 'true')
+ inputs:
+ testResultsFiles: "$(outputPath)/junit/*.xml"
+ displayName: Publish Test Results
+ - task: PublishPipelineArtifact@1
+ condition: eq(variables.haveBotResults, 'true')
+ displayName: Publish Bot Results
+ inputs:
+ targetPath: "$(outputPath)/bot/"
+ artifactName: "Bot $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)"
+ - task: PublishPipelineArtifact@1
+ condition: eq(variables.haveCoverageData, 'true')
+ displayName: Publish Coverage Data
+ inputs:
+ targetPath: "$(Agent.TempDirectory)/coverage/"
+ artifactName: "Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)"
diff --git a/collections-debian-merged/ansible_collections/community/general/.github/BOTMETA.yml b/collections-debian-merged/ansible_collections/community/general/.github/BOTMETA.yml
new file mode 100644
index 00000000..23575fd2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/.github/BOTMETA.yml
@@ -0,0 +1,1126 @@
+automerge: true
+files:
+ changelogs/fragments/:
+ support: community
+ $actions:
+ labels: action
+ $actions/aireos.py:
+ labels: aireos cisco networking
+ $actions/ironware.py:
+ maintainers: paulquack
+ labels: ironware networking
+ $actions/shutdown.py:
+ maintainers: nitzmahone samdoran aminvakil
+ $becomes/:
+ labels: become
+ $callbacks/:
+ labels: callbacks
+ $callbacks/say.py:
+ notify: chris-short
+ maintainers: $team_macos
+ labels: macos say
+ keywords: brew cask darwin homebrew macosx macports osx
+ $callbacks/stderr.py:
+ maintainers: ysn2233
+ labels: stderr
+ $callbacks/sumologic.py:
+ maintainers: ryancurrah
+ labels: sumologic
+ $callbacks/syslog_json.py:
+ maintainers: imjoseangel
+ $callbacks/unixy.py:
+ maintainers: akatch
+ labels: unixy
+ $connections/docker.py:
+ maintainers: $team_docker
+ labels: cloud docker
+ ignore: cove
+ supershipit: felixfontein
+ $connections/:
+ labels: connections
+ $connections/kubectl.py:
+ maintainers: chouseknecht fabianvf flaper87 maxamillion
+ labels: k8s kubectl
+ $connections/lxd.py:
+ maintainers: mattclay
+ labels: lxd
+ $connections/oc.py:
+ maintainers: chouseknecht fabianvf flaper87 maxamillion
+ labels: oc
+ $connections/saltstack.py:
+ labels: saltstack
+ $doc_fragments/:
+ labels: docs_fragments
+ $doc_fragments/docker.py:
+ maintainers: $team_docker
+ labels: cloud docker
+ ignore: cove
+ supershipit: felixfontein
+ $doc_fragments/gcp.py:
+ maintainers: $team_google
+ labels: gcp
+ supershipit: erjohnso rambleraptor
+ $doc_fragments/hetzner.py:
+ labels: hetzner
+ $doc_fragments/hpe3par.py:
+ maintainers: farhan7500 gautamphegde
+ labels: hpe3par
+ $doc_fragments/hwc.py:
+ maintainers: $team_huawei
+ labels: hwc
+ $doc_fragments/nomad.py:
+ maintainers: chris93111
+ $doc_fragments/postgres.py:
+ maintainers: $team_postgresql
+ labels: postgres postgresql
+ keywords: database postgres postgresql
+ $doc_fragments/xenserver.py:
+ maintainers: bvitnik
+ labels: xenserver
+ $filters/dict_kv.py:
+ maintainers: giner
+ $filters/time.py:
+ maintainers: resmo
+ $filters/jc.py:
+ maintainers: kellyjonbrazil
+ $httpapis/:
+ maintainers: $team_networking
+ labels: networking
+ $httpapis/ftd.py:
+ maintainers: $team_networking annikulin
+ labels: cisco ftd networking
+ keywords: firepower ftd
+ $inventories/:
+ labels: inventories
+ $inventories/docker_machine.py:
+ maintainers: $team_docker
+ labels: cloud docker
+ ignore: cove
+ supershipit: felixfontein
+ $inventories/docker_swarm.py:
+ maintainers: $team_docker morph027
+ labels: cloud docker docker_swarm
+ ignore: cove
+ supershipit: felixfontein
+ $inventories/linode.py:
+ maintainers: $team_linode
+ labels: cloud linode
+ keywords: linode dynamic inventory script
+ $inventories/scaleway.py:
+ maintainers: $team_scaleway
+ labels: cloud scaleway
+ $lookups/:
+ labels: lookups
+ $lookups/onepass:
+ maintainers: samdoran
+ labels: onepassword
+ $lookups/conjur_variable.py:
+ notify: cyberark-bizdev
+ maintainers: $team_cyberark_conjur
+ labels: conjur_variable
+ $lookups/cyberarkpassword.py:
+ notify: cyberark-bizdev
+ labels: cyberarkpassword
+ $lookups/dig.py:
+ maintainers: jpmens
+ labels: dig
+ $lookups/tss.py:
+ maintainers: amigus
+ $lookups/dsv.py:
+ maintainers: amigus
+ $lookups/hashi_vault.py:
+ labels: hashi_vault
+ maintainers: briantist
+ $lookups/manifold.py:
+ maintainers: galanoff
+ labels: manifold
+ $lookups/nios:
+ maintainers: $team_networking sganesh-infoblox
+ labels: infoblox networking
+ $module_utils/:
+ labels: module_utils
+ $module_utils/docker/:
+ maintainers: $team_docker
+ labels: cloud
+ ignore: cove
+ supershipit: felixfontein
+ $module_utils/gitlab.py:
+ notify: jlozadad
+ maintainers: $team_gitlab
+ labels: gitlab
+ keywords: gitlab source_control
+ $module_utils/hwc_utils.py:
+ maintainers: $team_huawei
+ labels: huawei hwc_utils networking
+ keywords: cloud huawei hwc
+ $module_utils/identity/keycloak/keycloak.py:
+ maintainers: $team_keycloak
+ $module_utils/ipa.py:
+ maintainers: $team_ipa
+ labels: ipa
+ $module_utils/kubevirt.py:
+ maintainers: $team_kubevirt
+ labels: cloud kubevirt
+ keywords: kubevirt
+ $module_utils/manageiq.py:
+ maintainers: $team_manageiq
+ labels: manageiq
+ $module_utils/memset.py:
+ maintainers: glitchcrab
+ labels: cloud memset
+ $module_utils/module_helper.py:
+ maintainers: russoz
+ labels: module_helper
+ $module_utils/net_tools/nios/api.py:
+ maintainers: $team_networking sganesh-infoblox
+ labels: infoblox networking
+ $module_utils/oracle/oci_utils.py:
+ maintainers: $team_oracle
+ labels: cloud
+ $module_utils/postgres.py:
+ maintainers: $team_postgresql
+ labels: postgres postgresql
+ keywords: database postgres postgresql
+ $module_utils/pure.py:
+ maintainers: $team_purestorage
+ labels: pure pure_storage
+ $module_utils/redfish_utils.py:
+ maintainers: $team_redfish
+ labels: redfish_utils
+ $module_utils/remote_management/dellemc/: rajeevarakkal
+ $module_utils/remote_management/lxca/common.py: navalkp prabhosa
+ $module_utils/scaleway.py:
+ maintainers: $team_scaleway
+ labels: cloud scaleway
+ $module_utils/storage/hpe3par/hpe3par.py: farhan7500 gautamphegde
+ $module_utils/utm_utils.py:
+ maintainers: $team_e_spirit
+ labels: utm_utils
+ $module_utils/xenserver.py:
+ maintainers: bvitnik
+ labels: xenserver
+ $modules/cloud/alicloud/:
+ maintainers: xiaozhu36
+ $modules/cloud/atomic/atomic_container.py:
+ maintainers: giuseppe krsacme
+ $modules/cloud/atomic/:
+ maintainers: krsacme
+ $modules/cloud/centurylink/:
+ maintainers: clc-runner
+ $modules/cloud/dimensiondata/dimensiondata_network.py:
+ maintainers: aimonb tintoy
+ labels: dimensiondata_network
+ $modules/cloud/dimensiondata/dimensiondata_vlan.py:
+ maintainers: tintoy
+ $modules/cloud/docker/:
+ maintainers: $team_docker
+ ignore: cove
+ supershipit: felixfontein
+ $modules/cloud/docker/docker_compose.py:
+ maintainers: sluther
+ labels: docker_compose
+ $modules/cloud/docker/docker_config.py:
+ maintainers: ushuz
+ $modules/cloud/docker/docker_container.py:
+ maintainers: dusdanig softzilla zfil
+ ignore: ThomasSteinbach cove joshuaconner
+ $modules/cloud/docker/docker_image.py:
+ maintainers: softzilla ssbarnea
+ $modules/cloud/docker/docker_login.py:
+ maintainers: olsaki
+ $modules/cloud/docker/docker_network.py:
+ maintainers: keitwb
+ labels: docker_network
+ $modules/cloud/docker/docker_stack_task_info.py:
+ maintainers: imjoseangel
+ $modules/cloud/docker/docker_swarm_service.py:
+ maintainers: hannseman
+ labels: docker_swarm_service
+ $modules/cloud/docker/docker_swarm_service_info.py:
+ maintainers: hannseman
+ $modules/cloud/docker/docker_volume.py:
+ maintainers: agronholm
+ $modules/cloud/google/:
+ maintainers: $team_google
+ ignore: supertom
+ supershipit: $team_google
+ $modules/cloud/heroku/heroku_collaborator.py:
+ maintainers: marns93
+ $modules/cloud/huawei/:
+ maintainers: $team_huawei huaweicloud
+ keywords: cloud huawei hwc
+ $modules/cloud/kubevirt/:
+ maintainers: $team_kubevirt kubevirt
+ keywords: kubevirt
+ $modules/cloud/linode/:
+ maintainers: $team_linode
+ $modules/cloud/linode/linode.py:
+ maintainers: zbal
+ $modules/cloud/lxc/lxc_container.py:
+ maintainers: cloudnull
+ $modules/cloud/lxd/:
+ ignore: hnakamur
+ $modules/cloud/memset/:
+ maintainers: glitchcrab
+ $modules/cloud/misc/cloud_init_data_facts.py:
+ maintainers: resmo
+ $modules/cloud/misc/helm.py:
+ maintainers: flaper87
+ $modules/cloud/misc/proxmox.py:
+ maintainers: $team_virt UnderGreen
+ labels: proxmox virt
+ ignore: skvidal
+ keywords: kvm libvirt proxmox qemu
+ $modules/cloud/misc/proxmox_kvm.py:
+ maintainers: $team_virt helldorado
+ labels: proxmox_kvm virt
+ ignore: skvidal
+ keywords: kvm libvirt proxmox qemu
+ $modules/cloud/misc/proxmox_template.py:
+ maintainers: $team_virt UnderGreen
+ labels: proxmox_template virt
+ ignore: skvidal
+ keywords: kvm libvirt proxmox qemu
+ $modules/cloud/misc/rhevm.py:
+ maintainers: $team_virt TimothyVandenbrande
+ labels: rhevm virt
+ ignore: skvidal
+ keywords: kvm libvirt proxmox qemu
+ $modules/cloud/misc/:
+ ignore: ryansb
+ $modules/cloud/misc/terraform.py:
+ maintainers: m-yosefpor
+ $modules/cloud/misc/xenserver_facts.py:
+ maintainers: caphrim007 cheese
+ labels: xenserver_facts
+ ignore: andyhky
+ $modules/cloud/oneandone/:
+ maintainers: aajdinov edevenport
+ $modules/cloud/online/:
+ maintainers: sieben
+ $modules/cloud/opennebula/:
+ maintainers: $team_opennebula
+ $modules/cloud/opennebula/one_host.py:
+ maintainers: rvalle
+ $modules/cloud/oracle/oci_vcn.py:
+ maintainers: $team_oracle rohitChaware
+ $modules/cloud/ovh/:
+ maintainers: pascalheraud
+ $modules/cloud/ovh/ovh_monthly_billing.py:
+ maintainers: fraff
+ $modules/cloud/packet/packet_device.py:
+ maintainers: baldwinSPC t0mk teebes
+ $modules/cloud/packet/:
+ maintainers: nurfet-becirevic t0mk
+ $modules/cloud/packet/packet_sshkey.py:
+ maintainers: t0mk
+ $modules/cloud/profitbricks/:
+ maintainers: baldwinSPC
+ $modules/cloud/pubnub/pubnub_blocks.py:
+ maintainers: parfeon pubnub
+ $modules/cloud/rackspace/rax.py:
+ maintainers: omgjlk sivel
+ $modules/cloud/rackspace/:
+ ignore: ryansb sivel
+ $modules/cloud/rackspace/rax_clb.py:
+ maintainers: claco
+ $modules/cloud/rackspace/rax_clb_nodes.py:
+ maintainers: neuroid
+ $modules/cloud/rackspace/rax_clb_ssl.py:
+ maintainers: smashwilson
+ $modules/cloud/rackspace/rax_identity.py:
+ maintainers: claco
+ $modules/cloud/rackspace/rax_network.py:
+ maintainers: claco omgjlk
+ $modules/cloud/rackspace/rax_queue.py:
+ maintainers: claco
+ $modules/cloud/scaleway/:
+ maintainers: $team_scaleway
+ $modules/cloud/scaleway/scaleway_database_backup.py:
+ maintainers: guillaume_ro_fr
+ $modules/cloud/scaleway/scaleway_image_info.py:
+ maintainers: Spredzy
+ $modules/cloud/scaleway/scaleway_ip_info.py:
+ maintainers: Spredzy
+ $modules/cloud/scaleway/scaleway_organization_info.py:
+ maintainers: sieben
+ $modules/cloud/scaleway/scaleway_security_group.py:
+ maintainers: DenBeke
+ $modules/cloud/scaleway/scaleway_security_group_info.py:
+ maintainers: sieben
+ $modules/cloud/scaleway/scaleway_security_group_rule.py:
+ maintainers: DenBeke
+ $modules/cloud/scaleway/scaleway_volume.py:
+ labels: scaleway_volume
+ ignore: hekonsek
+ $modules/cloud/scaleway/scaleway_volume_info.py:
+ maintainers: Spredzy
+ $modules/cloud/smartos/:
+ maintainers: $team_solaris
+ labels: solaris
+ keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
+ $modules/cloud/smartos/nictagadm.py:
+ maintainers: SmithX10
+ $modules/cloud/softlayer/sl_vm.py:
+ maintainers: mcltn
+ $modules/cloud/spotinst/spotinst_aws_elastigroup.py:
+ maintainers: talzur
+ $modules/cloud/univention/:
+ maintainers: keachi
+ $modules/cloud/webfaction/:
+ maintainers: quentinsf
+ $modules/cloud/xenserver/:
+ maintainers: bvitnik
+ $modules/clustering/consul/:
+ maintainers: $team_consul
+ $modules/clustering/etcd3.py:
+ maintainers: evrardjp vfauth
+ $modules/clustering/nomad/:
+ maintainers: chris93111
+ $modules/clustering/pacemaker_cluster.py:
+ maintainers: matbu
+ $modules/clustering/znode.py:
+ maintainers: treyperry
+ $modules/database/aerospike/aerospike_migrations.py:
+ maintainers: Alb0t
+ $modules/database/influxdb/:
+ maintainers: kamsz
+ $modules/database/influxdb/influxdb_query.py:
+ maintainers: resmo
+ $modules/database/influxdb/influxdb_user.py:
+ maintainers: zhhuta
+ $modules/database/influxdb/influxdb_write.py:
+ maintainers: resmo
+ $modules/database/misc/elasticsearch_plugin.py:
+ maintainers: ThePixelDeveloper samdoran
+ $modules/database/misc/kibana_plugin.py:
+ maintainers: barryib
+ $modules/database/misc/odbc.py:
+ maintainers: john-westcott-iv
+ $modules/database/misc/redis.py:
+ maintainers: slok
+ $modules/database/misc/riak.py:
+ maintainers: drewkerrigan jsmartin
+ $modules/database/mssql/mssql_db.py:
+ maintainers: vedit Jmainguy kenichi-ogawa-1988
+ labels: mssql_db
+ $modules/database/postgresql/:
+ keywords: database postgres postgresql
+ labels: postgres postgresql
+ maintainers: $team_postgresql
+ $modules/database/postgresql/postgresql_ext.py:
+ maintainers: dschep strk
+ $modules/database/postgresql/postgresql_lang.py:
+ maintainers: jensdepuydt
+ $modules/database/postgresql/postgresql_privs.py:
+ maintainers: b6d
+ $modules/database/postgresql/postgresql_query.py:
+ maintainers: archf wrouesnel
+ $modules/database/postgresql/postgresql_tablespace.py:
+ maintainers: antoinell
+ $modules/database/vertica/:
+ maintainers: dareko
+ $modules/files/archive.py:
+ maintainers: bendoh
+ $modules/files/ini_file.py:
+ maintainers: jpmens noseka1
+ $modules/files/iso_extract.py:
+ maintainers: dagwieers jhoekx ribbons
+ $modules/files/read_csv.py:
+ maintainers: dagwieers
+ $modules/files/xattr.py:
+ maintainers: bcoca
+ labels: xattr
+ $modules/files/xml.py:
+ maintainers: dagwieers magnus919 tbielawa cmprescott sm4rk0
+ labels: m:xml xml
+ ignore: magnus919
+ $modules/identity/onepassword_facts.py:
+ maintainers: Rylon
+ $modules/identity/ipa/:
+ maintainers: $team_ipa
+ $modules/identity/ipa/ipa_service.py:
+ maintainers: cprh
+ $modules/identity/ipa/ipa_vault.py:
+ maintainers: jparrill
+ $modules/identity/keycloak/:
+ maintainers: $team_keycloak
+ $modules/identity/keycloak/keycloak_group.py:
+ maintainers: adamgoossens
+ $modules/identity/onepassword_info.py:
+ maintainers: Rylon
+ $modules/identity/opendj/opendj_backendprop.py:
+ maintainers: dj-wasabi
+ $modules/monitoring/airbrake_deployment.py:
+ labels: airbrake_deployment
+ ignore: bpennypacker
+ $modules/monitoring/bigpanda.py:
+ maintainers: hkariti
+ $modules/monitoring/circonus_annotation.py:
+ maintainers: NickatEpic
+ $modules/monitoring/datadog/datadog_event.py:
+ maintainers: n0ts
+ labels: datadog_event
+ ignore: arturaz
+ $modules/monitoring/datadog/datadog_monitor.py:
+ maintainers: skornehl
+ $modules/monitoring/honeybadger_deployment.py:
+ maintainers: stympy
+ $modules/monitoring/icinga2_feature.py:
+ maintainers: nerzhul
+ $modules/monitoring/icinga2_host.py:
+ maintainers: t794104
+ $modules/monitoring/librato_annotation.py:
+ maintainers: Sedward
+ $modules/monitoring/logentries.py:
+ labels: logentries
+ ignore: ivanvanderbyl
+ $modules/monitoring/logstash_plugin.py:
+ maintainers: nerzhul
+ $modules/monitoring/monit.py:
+ maintainers: dstoflet brian-brazil snopoke
+ labels: monit
+ $modules/monitoring/nagios.py:
+ maintainers: tbielawa tgoetheyn
+ $modules/monitoring/newrelic_deployment.py:
+ maintainers: mcodd
+ $modules/monitoring/pagerduty.py:
+ maintainers: suprememoocow thaumos
+ labels: pagerduty
+ ignore: bpennypacker
+ $modules/monitoring/pagerduty_alert.py:
+ maintainers: ApsOps
+ $modules/monitoring/pagerduty_change.py:
+ maintainers: adamvaughan
+ $modules/monitoring/pagerduty_user.py:
+ maintainers: zanssa
+ $modules/monitoring/pingdom.py:
+ maintainers: thaumos
+ $modules/monitoring/rollbar_deployment.py:
+ maintainers: kavu
+ $modules/monitoring/sensu/sensu_check.py:
+ maintainers: andsens
+ $modules/monitoring/sensu/:
+ maintainers: dmsimard
+ $modules/monitoring/sensu/sensu_silence.py:
+ maintainers: smbambling
+ $modules/monitoring/sensu/sensu_subscription.py:
+ maintainers: andsens
+ $modules/monitoring/spectrum_device.py:
+ maintainers: orgito
+ $modules/monitoring/stackdriver.py:
+ maintainers: bwhaley
+ $modules/monitoring/statusio_maintenance.py:
+ maintainers: bhcopeland
+ $modules/monitoring/uptimerobot.py:
+ maintainers: nate-kingsley
+ $modules/net_tools/cloudflare_dns.py:
+ maintainers: mgruener
+ labels: cloudflare_dns
+ $modules/net_tools/dnsimple.py:
+ maintainers: drcapulet
+ $modules/net_tools/dnsmadeeasy.py:
+ maintainers: briceburg
+ $modules/net_tools/haproxy.py:
+ maintainers: ravibhure
+ $modules/net_tools/hetzner_failover_ip.py:
+ maintainers: felixfontein
+ $modules/net_tools/hetzner_failover_ip_info.py:
+ maintainers: felixfontein
+ $modules/net_tools/hetzner_firewall.py:
+ maintainers: felixfontein
+ $modules/net_tools/hetzner_firewall_info.py:
+ maintainers: felixfontein
+ $modules/net_tools/:
+ maintainers: nerzhul
+ $modules/net_tools/infinity/infinity.py:
+ maintainers: MeganLiu
+ $modules/net_tools/ip_netns.py:
+ maintainers: bregman-arie
+ $modules/net_tools/ipify_facts.py:
+ maintainers: resmo
+ $modules/net_tools/ipinfoio_facts.py:
+ maintainers: akostyuk
+ $modules/net_tools/ipwcli_dns.py:
+ maintainers: cwollinger
+ $modules/net_tools/ldap/ldap_attr.py:
+ maintainers: jtyr
+ $modules/net_tools/ldap/ldap_attrs.py:
+ maintainers: drybjed jtyr noles
+ $modules/net_tools/ldap/ldap_entry.py:
+ maintainers: jtyr
+ $modules/net_tools/ldap/ldap_passwd.py:
+ maintainers: KellerFuchs jtyr
+ $modules/net_tools/ldap/ldap_search.py:
+ maintainers: eryx12o45 jtyr
+ $modules/net_tools/lldp.py:
+ labels: lldp
+ ignore: andyhky
+ $modules/net_tools/netcup_dns.py:
+ maintainers: nbuchwitz
+ $modules/net_tools/omapi_host.py:
+ maintainers: amasolov
+ $modules/net_tools/nios/:
+ maintainers: $team_networking
+ labels: infoblox networking
+ $modules/net_tools/nios/nios_fixed_address.py:
+ maintainers: sjaiswal
+ $modules/net_tools/nios/nios_nsgroup.py:
+ maintainers: ebirn sjaiswal
+ $modules/net_tools/nios/nios_ptr_record.py:
+ maintainers: clementtrebuchet
+ $modules/net_tools/nios/nios_srv_record.py:
+ maintainers: brampling
+ $modules/net_tools/nios/nios_txt_record.py:
+ maintainers: coreywan
+ $modules/net_tools/nmcli.py:
+ maintainers: alcamie101
+ $modules/net_tools/snmp_facts.py:
+ maintainers: ogenstad ujwalkomarla
+ $modules/notification/osx_say.py:
+ maintainers: ansible mpdehaan
+ labels: _osx_say
+ deprecated: true
+ $modules/notification/bearychat.py:
+ maintainers: tonyseek
+ $modules/notification/campfire.py:
+ maintainers: fabulops
+ $modules/notification/catapult.py:
+ maintainers: Jmainguy
+ $modules/notification/cisco_spark.py:
+ maintainers: drew-russell
+ $modules/notification/flowdock.py:
+ maintainers: mcodd
+ $modules/notification/grove.py:
+ maintainers: zimbatm
+ $modules/notification/hipchat.py:
+ maintainers: pb8226 shirou
+ $modules/notification/irc.py:
+ maintainers: jpmens sivel
+ $modules/notification/jabber.py:
+ maintainers: bcoca
+ $modules/notification/logentries_msg.py:
+ maintainers: jcftang
+ $modules/notification/mail.py:
+ maintainers: dagwieers
+ $modules/notification/matrix.py:
+ maintainers: jcgruenhage
+ $modules/notification/mattermost.py:
+ maintainers: bjolivot
+ $modules/notification/mqtt.py:
+ maintainers: jpmens
+ $modules/notification/nexmo.py:
+ maintainers: sivel
+ $modules/notification/office_365_connector_card.py:
+ maintainers: marc-sensenich
+ $modules/notification/pushbullet.py:
+ maintainers: willybarro
+ $modules/notification/pushover.py:
+ maintainers: weaselkeeper
+ $modules/notification/rocketchat.py:
+ maintainers: Deepakkothandan
+ labels: rocketchat
+ ignore: ramondelafuente
+ $modules/notification/say.py:
+ maintainers: ansible mpdehaan
+ $modules/notification/sendgrid.py:
+ maintainers: makaimc
+ $modules/notification/slack.py:
+ maintainers: ramondelafuente
+ $modules/notification/syslogger.py:
+ maintainers: garbled1
+ $modules/notification/telegram.py:
+ maintainers: tyouxa
+ $modules/notification/twilio.py:
+ maintainers: makaimc
+ $modules/notification/typetalk.py:
+ maintainers: tksmd
+ $modules/packaging/language/bower.py:
+ maintainers: mwarkentin
+ $modules/packaging/language/bundler.py:
+ maintainers: thoiberg
+ $modules/packaging/language/composer.py:
+ maintainers: dmtrs resmo
+ $modules/packaging/language/cpanm.py:
+ maintainers: fcuny
+ $modules/packaging/language/easy_install.py:
+ maintainers: mattupstate
+ $modules/packaging/language/gem.py:
+ maintainers: ansible johanwiren
+ labels: gem
+ $modules/packaging/language/maven_artifact.py:
+ maintainers: tumbl3w33d turb
+ labels: maven_artifact
+ ignore: chrisisbeef
+ $modules/packaging/language/npm.py:
+ maintainers: shane-walker xcambar
+ labels: npm
+ ignore: chrishoffman
+ $modules/packaging/language/pear.py:
+ labels: pear
+ ignore: jle64
+ $modules/packaging/language/pip_package_info.py:
+ maintainers: bcoca matburt maxamillion
+ $modules/packaging/language/yarn.py:
+ maintainers: chrishoffman verkaufer
+ $modules/packaging/os/apk.py:
+ maintainers: tdtrask
+ labels: apk
+ ignore: kbrebanov
+ $modules/packaging/os/apt_rpm.py:
+ maintainers: evgkrsk
+ $modules/packaging/os/flatpak.py:
+ maintainers: $team_flatpak
+ $modules/packaging/os/flatpak_remote.py:
+ maintainers: $team_flatpak
+ $modules/packaging/os/pkg5:
+ maintainers: $team_solaris mavit
+ labels: pkg5 solaris
+ keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
+ $modules/packaging/os/homebrew.py:
+ notify: chris-short
+ maintainers: $team_macos andrew-d
+ labels: homebrew macos
+ ignore: ryansb
+ keywords: brew cask darwin homebrew macosx macports osx
+ $modules/packaging/os/homebrew_cask.py:
+ notify: chris-short
+ maintainers: $team_macos enriclluelles
+ labels: homebrew_ macos
+ ignore: ryansb
+ keywords: brew cask darwin homebrew macosx macports osx
+ $modules/packaging/os/homebrew_tap.py:
+ notify: chris-short
+ maintainers: $team_macos
+ labels: homebrew_ macos
+ ignore: ryansb
+ keywords: brew cask darwin homebrew macosx macports osx
+ $modules/packaging/os/installp.py:
+ maintainers: $team_aix kairoaraujo
+ labels: aix installp
+ keywords: aix efix lpar wpar
+ $modules/packaging/os/layman.py:
+ maintainers: jirutka
+ $modules/packaging/os/macports.py:
+ notify: chris-short
+ maintainers: $team_macos jcftang
+ labels: macos macports
+ ignore: ryansb
+ keywords: brew cask darwin homebrew macosx macports osx
+ $modules/packaging/os/mas.py:
+ maintainers: lukasbestle mheap
+ $modules/packaging/os/openbsd_pkg.py:
+ maintainers: $team_bsd eest
+ labels: bsd openbsd_pkg
+ ignore: ryansb
+ keywords: doas dragonfly freebsd iocage jail netbsd openbsd opnsense pfsense
+ $modules/packaging/os/opkg.py:
+ maintainers: skinp
+ $modules/packaging/os/pacman.py:
+ maintainers: elasticdog indrajitr tchernomax
+ labels: pacman
+ ignore: elasticdog
+ $modules/packaging/os/pkgin.py:
+ maintainers: $team_solaris L2G jasperla szinck martinm82
+ labels: pkgin solaris
+ $modules/packaging/os/pkgng.py:
+ maintainers: $team_bsd bleader
+ labels: bsd pkgng
+ ignore: bleader
+ keywords: doas dragonfly freebsd iocage jail netbsd openbsd opnsense pfsense
+ $modules/packaging/os/pkgutil.py:
+ maintainers: $team_solaris dermute
+ labels: pkgutil solaris
+ $modules/packaging/os/portage.py:
+ maintainers: Tatsh wltjr
+ labels: portage
+ ignore: sayap
+ $modules/packaging/os/portinstall.py:
+ maintainers: $team_bsd berenddeboer
+ labels: bsd portinstall
+ ignore: ryansb
+ keywords: doas dragonfly freebsd iocage jail netbsd openbsd opnsense pfsense
+ $modules/packaging/os/pulp_repo.py:
+ maintainers: sysadmind
+ $modules/packaging/os/redhat_subscription.py:
+ maintainers: barnabycourt alikins kahowell
+ labels: redhat_subscription
+ $modules/packaging/os/rhn_channel.py:
+ maintainers: vincentvdk alikins $team_rhn
+ labels: rhn_channel
+ $modules/packaging/os/rhn_register.py:
+ maintainers: jlaska $team_rhn
+ labels: rhn_register
+ $modules/packaging/os/rhsm_release.py:
+ maintainers: seandst
+ $modules/packaging/os/rhsm_repository.py:
+ maintainers: giovannisciortino
+ $modules/packaging/os/slackpkg.py:
+ maintainers: KimNorgaard
+ $modules/packaging/os/snap.py:
+ maintainers: angristan vcarceler
+ labels: snap
+ $modules/packaging/os/sorcery.py:
+ maintainers: vaygr
+ $modules/packaging/os/svr4pkg.py:
+ maintainers: $team_solaris brontitall
+ labels: solaris svr4pkg
+ $modules/packaging/os/swdepot.py:
+ maintainers: $team_hpux melodous
+ labels: hpux swdepot
+ keywords: hp-ux
+ $modules/packaging/os/swupd.py:
+ maintainers: hnanni albertomurillo
+ labels: swupd
+ $modules/packaging/os/urpmi.py:
+ maintainers: pmakowski
+ $modules/packaging/os/xbps.py:
+ maintainers: dinoocch the-maldridge
+ $modules/packaging/os/zypper.py:
+ maintainers: $team_suse
+ labels: zypper
+ ignore: dirtyharrycallahan robinro
+ $modules/packaging/os/zypper_repository.py:
+ maintainers: matze
+ $modules/remote_management/cobbler/:
+ maintainers: dagwieers
+ $modules/remote_management/dellemc/:
+ maintainers: rajeevarakkal
+ $modules/remote_management/dellemc/idrac_server_config_profile.py:
+ maintainers: jagadeeshnv
+ $modules/remote_management/dellemc/ome_device_info.py:
+ maintainers: Sajna-Shetty
+ $modules/remote_management/foreman/:
+ maintainers: ehelms ares ekohl xprazak2
+ $modules/remote_management/hpilo/:
+ maintainers: haad
+ ignore: dagwieers
+ $modules/remote_management/imc/imc_rest.py:
+ maintainers: dagwieers
+ labels: cisco
+ $modules/remote_management/ipmi/:
+ maintainers: bgaifullin cloudnull
+ $modules/remote_management/lxca/:
+ maintainers: navalkp prabhosa
+ $modules/remote_management/manageiq/:
+ labels: manageiq
+ maintainers: $team_manageiq
+ $modules/remote_management/manageiq/manageiq_group.py:
+ maintainers: evertmulder
+ $modules/remote_management/manageiq/manageiq_tenant.py:
+ maintainers: evertmulder
+ $modules/remote_management/oneview/oneview_datacenter_facts.py:
+ maintainers: aalexmonteiro madhav-bharadwaj ricardogpsf soodpr
+ $modules/remote_management/oneview/:
+ maintainers: adriane-cardozo fgbulsoni tmiotto
+ $modules/remote_management/oneview/oneview_datacenter_info.py:
+ maintainers: aalexmonteiro madhav-bharadwaj ricardogpsf soodpr
+ $modules/remote_management/oneview/oneview_fc_network.py:
+ maintainers: fgbulsoni
+ $modules/remote_management/oneview/oneview_fcoe_network.py:
+ maintainers: fgbulsoni
+ $modules/remote_management/redfish/:
+ maintainers: $team_redfish billdodd
+ ignore: jose-delarosa
+ $modules/remote_management/stacki/stacki_host.py:
+ maintainers: bsanders bbyhuy
+ labels: stacki_host
+ $modules/remote_management/wakeonlan.py:
+ maintainers: dagwieers
+ $modules/source_control/bitbucket/:
+ maintainers: catcombo
+ $modules/source_control/bzr.py:
+ maintainers: andreparames
+ $modules/source_control/git_config.py:
+ maintainers: djmattyg007 mgedmin
+ $modules/source_control/github/github_hooks.py:
+ maintainers: pcgentry
+ $modules/source_control/github/github_deploy_key.py:
+ maintainers: bincyber
+ $modules/source_control/github/github_issue.py:
+ maintainers: Akasurde
+ $modules/source_control/github/github_key.py:
+ maintainers: erydo
+ labels: github_key
+ ignore: erydo
+ $modules/source_control/github/github_release.py:
+ maintainers: adrianmoisey
+ $modules/source_control/github/:
+ maintainers: stpierre
+ $modules/source_control/gitlab/:
+ notify: jlozadad
+ maintainers: $team_gitlab
+ keywords: gitlab source_control
+ $modules/source_control/gitlab/gitlab_project_variable.py:
+ maintainers: markuman
+ $modules/source_control/gitlab/gitlab_runner.py:
+ maintainers: SamyCoenen
+ $modules/source_control/hg.py:
+ maintainers: yeukhon
+ $modules/storage/emc/emc_vnx_sg_member.py:
+ maintainers: remixtj
+ $modules/storage/glusterfs/:
+ maintainers: devyanikota
+ $modules/storage/glusterfs/gluster_peer.py:
+ maintainers: sac
+ $modules/storage/glusterfs/gluster_volume.py:
+ maintainers: rosmo
+ $modules/storage/hpe3par/ss_3par_cpg.py:
+ maintainers: farhan7500 gautamphegde
+ $modules/storage/ibm/:
+ maintainers: tzure
+ $modules/storage/infinidat/:
+ maintainers: vmalloc GR360RY
+ $modules/storage/netapp/:
+ maintainers: $team_netapp
+ $modules/storage/purestorage/:
+ maintainers: $team_purestorage
+ labels: pure_storage
+ $modules/storage/vexata/:
+ maintainers: vexata
+ $modules/storage/zfs/:
+ maintainers: $team_solaris
+ labels: solaris
+ keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
+ $modules/storage/zfs/zfs.py:
+ maintainers: johanwiren
+ $modules/storage/zfs/zfs_delegate_admin.py:
+ maintainers: natefoo
+ $modules/system/python_requirements_facts.py:
+ maintainers: willthames
+ ignore: ryansb
+ $modules/system/aix:
+ maintainers: $team_aix
+ labels: aix
+ keywords: aix efix lpar wpar
+ $modules/system/alternatives.py:
+ maintainers: mulby
+ labels: alternatives
+ ignore: DavidWittman
+ $modules/system/awall.py:
+ maintainers: tdtrask
+ $modules/system/beadm.py:
+ maintainers: $team_solaris
+ labels: beadm solaris
+ keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
+ $modules/system/capabilities.py:
+ maintainers: natefoo
+ $modules/system/cronvar.py:
+ maintainers: dougluce
+ $modules/system/crypttab.py:
+ maintainers: groks
+ $modules/system/dconf.py:
+ maintainers: azaghal
+ $modules/system/dpkg_divert.py:
+ maintainers: quidame
+ $modules/system/facter.py:
+ maintainers: ansible gamethis
+ labels: facter
+ $modules/system/filesystem.py:
+ maintainers: pilou- abulimov quidame
+ labels: filesystem
+ $modules/system/gconftool2.py:
+ maintainers: Akasurde kevensen
+ labels: gconftool2
+ $modules/system/interfaces_file.py:
+ maintainers: obourdon hryamzik
+ labels: interfaces_file
+ $modules/system/iptables_state.py:
+ maintainers: quidame
+ $modules/system/java_cert.py:
+ maintainers: haad
+ $modules/system/java_keystore.py:
+ maintainers: Mogztter
+ $modules/system/kernel_blacklist.py:
+ maintainers: matze
+ $modules/system/launchd.py:
+ maintainers: martinm82
+ $modules/system/lbu.py:
+ maintainers: kunkku
+ $modules/system/listen_ports_facts.py:
+ maintainers: ndavison
+ $modules/system/locale_gen.py:
+ maintainers: AugustusKling
+ $modules/system/lvg.py:
+ maintainers: abulimov
+ $modules/system/lvol.py:
+ maintainers: abulimov jhoekx
+ $modules/system/make.py:
+ maintainers: LinusU
+ $modules/system/mksysb.py:
+ maintainers: $team_aix
+ labels: aix mksysb
+ $modules/system/modprobe.py:
+ maintainers: jdauphant mattjeffery
+ labels: modprobe
+ ignore: stygstra
+ $modules/system/nosh.py:
+ maintainers: tacatac
+ $modules/system/ohai.py:
+ maintainers: ansible mpdehaan
+ labels: ohai
+ $modules/system/open_iscsi.py:
+ maintainers: srvg
+ $modules/system/openwrt_init.py:
+ maintainers: agaffney
+ $modules/system/osx_defaults.py:
+ notify: chris-short
+ maintainers: $team_macos notok
+ labels: macos osx_defaults
+ keywords: brew cask darwin homebrew macosx macports osx
+ $modules/system/pam_limits.py:
+ maintainers: giovannisciortino
+ labels: pam_limits
+ ignore: usawa
+ $modules/system/pamd.py:
+ maintainers: kevensen
+ $modules/system/parted.py:
+ maintainers: ColOfAbRiX rosowiecki jake2184
+ $modules/system/pids.py:
+ maintainers: saranyasridharan
+ $modules/system/puppet.py:
+ maintainers: nibalizer emonty
+ labels: puppet
+ $modules/system/python_requirements_info.py:
+ maintainers: willthames
+ ignore: ryansb
+ $modules/system/runit.py:
+ maintainers: jsumners
+ $modules/system/sefcontext.py:
+ maintainers: dagwieers
+ $modules/system/selinux_permissive.py:
+ maintainers: mscherer
+ $modules/system/selogin.py:
+ maintainers: bachradsusi dankeder jamescassell
+ $modules/system/seport.py:
+ maintainers: dankeder
+ $modules/system/solaris_zone.py:
+ maintainers: $team_solaris pmarkham
+ labels: solaris
+ keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
+ $modules/system/svc.py:
+ maintainers: bcoca
+ $modules/system/syspatch.py:
+ maintainers: precurse
+ $modules/system/sysupgrade.py:
+ maintainers: precurse
+ $modules/system/timezone.py:
+ maintainers: indrajitr jasperla tmshn
+ $modules/system/ufw.py:
+ notify: felixfontein
+ maintainers: ahtik ovcharenko pyykkis
+ labels: ufw
+ $modules/system/vdo.py:
+ maintainers: bgurney-rh
+ $modules/system/xfconf.py:
+ maintainers: russoz jbenden
+ labels: xfconf
+ $modules/system/xfs_quota.py:
+ maintainers: bushvin
+ $modules/web_infrastructure/jenkins_job_facts.py:
+ maintainers: stpierre
+ $modules/web_infrastructure/nginx_status_facts.py:
+ maintainers: resmo
+ $modules/web_infrastructure/apache2_mod_proxy.py:
+ maintainers: oboukili
+ $modules/web_infrastructure/apache2_module.py:
+ maintainers: berendt n0trax robinro
+ $modules/web_infrastructure/deploy_helper.py:
+ maintainers: ramondelafuente
+ $modules/web_infrastructure/django_manage.py:
+ maintainers: scottanderson42 russoz tastychutney
+ labels: django_manage
+ $modules/web_infrastructure/ejabberd_user.py:
+ maintainers: privateip
+ $modules/web_infrastructure/gunicorn.py:
+ maintainers: agmezr
+ $modules/web_infrastructure/htpasswd.py:
+ maintainers: ansible
+ labels: htpasswd
+ $modules/web_infrastructure/jboss.py:
+ maintainers: $team_jboss jhoekx
+ labels: jboss
+ $modules/web_infrastructure/jenkins_job.py:
+ maintainers: sermilrod
+ $modules/web_infrastructure/jenkins_job_info.py:
+ maintainers: stpierre
+ $modules/web_infrastructure/jenkins_plugin.py:
+ maintainers: jtyr
+ $modules/web_infrastructure/jenkins_script.py:
+ maintainers: hogarthj
+ $modules/web_infrastructure/jira.py:
+ maintainers: Slezhuk tarka
+ labels: jira
+ $modules/web_infrastructure/nginx_status_info.py:
+ maintainers: resmo
+ $modules/web_infrastructure/:
+ maintainers: nerzhul
+ $modules/web_infrastructure/sophos_utm/:
+ maintainers: $team_e_spirit
+ keywords: sophos utm
+ $modules/web_infrastructure/sophos_utm/utm_proxy_auth_profile.py:
+ maintainers: $team_e_spirit stearz
+ keywords: sophos utm
+ $modules/web_infrastructure/sophos_utm/utm_proxy_exception.py:
+ maintainers: $team_e_spirit RickS-C137
+ keywords: sophos utm
+ $modules/web_infrastructure/supervisorctl.py:
+ maintainers: inetfuture mattupstate
+ $modules/web_infrastructure/taiga_issue.py:
+ maintainers: lekum
+#########################
+ tests/:
+ labels: tests
+ tests/unit/:
+ labels: unit
+ support: community
+ tests/integration:
+ labels: integration
+ support: community
+ tests/utils/:
+ maintainers: gundalow
+ labels: unit
+macros:
+ actions: plugins/action
+ becomes: plugins/become
+ callbacks: plugins/callback
+ cliconfs: plugins/cliconf
+ connections: plugins/connection
+ doc_fragments: plugins/doc_fragments
+ filters: plugins/filter
+ httpapis: plugins/httpapi
+ inventories: plugins/inventory
+ lookups: plugins/lookup
+ module_utils: plugins/module_utils
+ modules: plugins/modules
+ terminals: plugins/terminal
+ team_aix: MorrisA bcoca d-little flynn1973 gforster kairoaraujo marvin-sinister mator molekuul ramooncamacho wtcross
+ team_bsd: JoergFiedler MacLemon bcoca dch jasperla mekanix opoplawski overhacked tuxillo
+ team_consul: colin-nolan sgargan
+ team_cyberark_conjur: jvanderhoof ryanprior
+ team_docker: DBendit WojciechowskiPiotr akshay196 danihodovic dariko felixfontein jwitko kassiansun tbouvet chouseknecht
+ team_e_spirit: MatrixCrawler getjack
+ team_flatpak: JayKayy oolongbrothers
+ team_gitlab: Lunik Shaps dj-wasabi marwatk waheedi zanssa scodeman
+ team_google: erjohnso rambleraptor
+ team_hpux: bcoca davx8342
+ team_huawei: QijunPan TommyLike edisonxiang freesky-edward hwDCN niuzhenguo xuxiaowei0512 yanzhangi zengchen1024 zhongjun2
+ team_ipa: Akasurde Nosmoht fxfitz
+ team_jboss: Wolfant jairojunior wbrefvem
+ team_keycloak: eikef ndclt
+ team_kubevirt: machacekondra mmazur pkliczewski
+ team_linode: InTheCloudDan decentral1se displague rmcintosh
+ team_macos: Akasurde kyleabenson martinm82 danieljaouen indrajitr
+ team_manageiq: abellotti cben gtanzillo yaacov zgalor dkorn evertmulder
+ team_netapp: amit0701 carchi8py hulquest lmprice lonico ndswartz schmots1
+ team_networking: NilashishC Qalthos danielmellado ganeshrn justjais trishnaguha sganesh-infoblox privateip
+ team_opennebula: ilicmilan meerkampdvv rsmontero xorel
+ team_oracle: manojmeda mross22 nalsaber
+ team_postgresql: Andersson007 Dorn- andytom jbscalia kostiantyn-nemchenko matburt nerzhul sebasmannem tcraxs ilicmilan
+ team_purestorage: bannaych dnix101 genegr lionmax opslounge raekins sdodsley sile16
+ team_redfish: billdodd mraineri tomasg2012
+ team_rhn: FlossWare alikins barnabycourt vritant
+ team_scaleway: QuentinBrosse abarbare jerome-quere kindermoumoute remyleone sieben
+ team_solaris: bcoca fishman jasperla jpdasma mator scathatheworm troy2914 xen0l
+ team_suse: commel dcermak evrardjp lrupp toabctl AnderEnder alxgu andytom
+ team_virt: joshainglis karmab Aversiste
diff --git a/collections-debian-merged/ansible_collections/community/general/.github/patchback.yml b/collections-debian-merged/ansible_collections/community/general/.github/patchback.yml
new file mode 100644
index 00000000..33ad6e84
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/.github/patchback.yml
@@ -0,0 +1,5 @@
+---
+backport_branch_prefix: patchback/backports/
+backport_label_prefix: backport-
+target_branch_prefix: stable-
+...
diff --git a/collections-debian-merged/ansible_collections/community/general/.github/settings.yml b/collections-debian-merged/ansible_collections/community/general/.github/settings.yml
new file mode 100644
index 00000000..8a5b8d32
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/.github/settings.yml
@@ -0,0 +1,6 @@
+# DO NOT MODIFY
+
+# Settings: https://probot.github.io/apps/settings/
+# Pull settings from https://github.com/ansible-collections/.github/blob/master/.github/settings.yml
+
+_extends: ".github"
diff --git a/collections-debian-merged/ansible_collections/community/general/.github/workflows/codeql-analysis.yml b/collections-debian-merged/ansible_collections/community/general/.github/workflows/codeql-analysis.yml
new file mode 100644
index 00000000..81884ac4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/.github/workflows/codeql-analysis.yml
@@ -0,0 +1,49 @@
+name: "Code scanning - action"
+
+on:
+ schedule:
+ - cron: '26 19 * * 1'
+
+jobs:
+ CodeQL-Build:
+
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v2
+ with:
+ # We must fetch at least the immediate parents so that if this is
+ # a pull request then we can checkout the head.
+ fetch-depth: 2
+
+ # If this run was triggered by a pull request event, then checkout
+ # the head of the pull request instead of the merge commit.
+ - run: git checkout HEAD^2
+ if: ${{ github.event_name == 'pull_request' }}
+
+ # Initializes the CodeQL tools for scanning.
+ - name: Initialize CodeQL
+ uses: github/codeql-action/init@v1
+ # Override language selection by uncommenting this and choosing your languages
+ # with:
+ # languages: go, javascript, csharp, python, cpp, java
+
+ # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
+ # If this step fails, then you should remove it and run the build manually (see below)
+ - name: Autobuild
+ uses: github/codeql-action/autobuild@v1
+
+ # ℹ️ Command-line programs to run using the OS shell.
+ # 📚 https://git.io/JvXDl
+
+ # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
+ # and modify them (or add more) to build your code if your project
+ # uses a compiled language
+
+ #- run: |
+ # make bootstrap
+ # make release
+
+ - name: Perform CodeQL Analysis
+ uses: github/codeql-action/analyze@v1
diff --git a/collections-debian-merged/ansible_collections/community/general/.gitignore b/collections-debian-merged/ansible_collections/community/general/.gitignore
new file mode 100644
index 00000000..c6fc14ad
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/.gitignore
@@ -0,0 +1,387 @@
+
+# Created by https://www.gitignore.io/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv
+# Edit at https://www.gitignore.io/?templates=git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv
+
+### dotenv ###
+.env
+
+### Emacs ###
+# -*- mode: gitignore; -*-
+*~
+\#*\#
+/.emacs.desktop
+/.emacs.desktop.lock
+*.elc
+auto-save-list
+tramp
+.\#*
+
+# Org-mode
+.org-id-locations
+*_archive
+
+# flymake-mode
+*_flymake.*
+
+# eshell files
+/eshell/history
+/eshell/lastdir
+
+# elpa packages
+/elpa/
+
+# reftex files
+*.rel
+
+# AUCTeX auto folder
+/auto/
+
+# cask packages
+.cask/
+dist/
+
+# Flycheck
+flycheck_*.el
+
+# server auth directory
+/server/
+
+# projectiles files
+.projectile
+
+# directory configuration
+.dir-locals.el
+
+# network security
+/network-security.data
+
+
+### Git ###
+# Created by git for backups. To disable backups in Git:
+# $ git config --global mergetool.keepBackup false
+*.orig
+
+# Created by git when using merge tools for conflicts
+*.BACKUP.*
+*.BASE.*
+*.LOCAL.*
+*.REMOTE.*
+*_BACKUP_*.txt
+*_BASE_*.txt
+*_LOCAL_*.txt
+*_REMOTE_*.txt
+
+#!! ERROR: jupyternotebook is undefined. Use list command to see defined gitignore types !!#
+
+### Linux ###
+
+# temporary files which can be created if a process still has a handle open of a deleted file
+.fuse_hidden*
+
+# KDE directory preferences
+.directory
+
+# Linux trash folder which might appear on any partition or disk
+.Trash-*
+
+# .nfs files are created when an open file is removed but is still being accessed
+.nfs*
+
+### PyCharm+all ###
+# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
+# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
+
+# User-specific stuff
+.idea/**/workspace.xml
+.idea/**/tasks.xml
+.idea/**/usage.statistics.xml
+.idea/**/dictionaries
+.idea/**/shelf
+
+# Generated files
+.idea/**/contentModel.xml
+
+# Sensitive or high-churn files
+.idea/**/dataSources/
+.idea/**/dataSources.ids
+.idea/**/dataSources.local.xml
+.idea/**/sqlDataSources.xml
+.idea/**/dynamic.xml
+.idea/**/uiDesigner.xml
+.idea/**/dbnavigator.xml
+
+# Gradle
+.idea/**/gradle.xml
+.idea/**/libraries
+
+# Gradle and Maven with auto-import
+# When using Gradle or Maven with auto-import, you should exclude module files,
+# since they will be recreated, and may cause churn. Uncomment if using
+# auto-import.
+# .idea/modules.xml
+# .idea/*.iml
+# .idea/modules
+# *.iml
+# *.ipr
+
+# CMake
+cmake-build-*/
+
+# Mongo Explorer plugin
+.idea/**/mongoSettings.xml
+
+# File-based project format
+*.iws
+
+# IntelliJ
+out/
+
+# mpeltonen/sbt-idea plugin
+.idea_modules/
+
+# JIRA plugin
+atlassian-ide-plugin.xml
+
+# Cursive Clojure plugin
+.idea/replstate.xml
+
+# Crashlytics plugin (for Android Studio and IntelliJ)
+com_crashlytics_export_strings.xml
+crashlytics.properties
+crashlytics-build.properties
+fabric.properties
+
+# Editor-based Rest Client
+.idea/httpRequests
+
+# Android studio 3.1+ serialized cache file
+.idea/caches/build_file_checksums.ser
+
+### PyCharm+all Patch ###
+# Ignores the whole .idea folder and all .iml files
+# See https://github.com/joeblau/gitignore.io/issues/186 and https://github.com/joeblau/gitignore.io/issues/360
+
+.idea/
+
+# Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-249601023
+
+*.iml
+modules.xml
+.idea/misc.xml
+*.ipr
+
+# Sonarlint plugin
+.idea/sonarlint
+
+### pydev ###
+.pydevproject
+
+### Python ###
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+pip-wheel-metadata/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+.hypothesis/
+.pytest_cache/
+
+# Translations
+*.mo
+*.pot
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+# pyenv
+.python-version
+
+# pipenv
+# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+# However, in case of collaboration, if having platform-specific dependencies or dependencies
+# having no cross-platform support, pipenv may install dependencies that don't work, or not
+# install all needed dependencies.
+#Pipfile.lock
+
+# celery beat schedule file
+celerybeat-schedule
+
+# SageMath parsed files
+*.sage.py
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# Mr Developer
+.mr.developer.cfg
+.project
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
+
+### Vim ###
+# Swap
+[._]*.s[a-v][a-z]
+[._]*.sw[a-p]
+[._]s[a-rt-v][a-z]
+[._]ss[a-gi-z]
+[._]sw[a-p]
+
+# Session
+Session.vim
+Sessionx.vim
+
+# Temporary
+.netrwhist
+# Auto-generated tag files
+tags
+# Persistent undo
+[._]*.un~
+
+### WebStorm ###
+# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
+# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
+
+# User-specific stuff
+
+# Generated files
+
+# Sensitive or high-churn files
+
+# Gradle
+
+# Gradle and Maven with auto-import
+# When using Gradle or Maven with auto-import, you should exclude module files,
+# since they will be recreated, and may cause churn. Uncomment if using
+# auto-import.
+# .idea/modules.xml
+# .idea/*.iml
+# .idea/modules
+# *.iml
+# *.ipr
+
+# CMake
+
+# Mongo Explorer plugin
+
+# File-based project format
+
+# IntelliJ
+
+# mpeltonen/sbt-idea plugin
+
+# JIRA plugin
+
+# Cursive Clojure plugin
+
+# Crashlytics plugin (for Android Studio and IntelliJ)
+
+# Editor-based Rest Client
+
+# Android studio 3.1+ serialized cache file
+
+### WebStorm Patch ###
+# Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721
+
+# *.iml
+# modules.xml
+# .idea/misc.xml
+# *.ipr
+
+# Sonarlint plugin
+.idea/**/sonarlint/
+
+# SonarQube Plugin
+.idea/**/sonarIssues.xml
+
+# Markdown Navigator plugin
+.idea/**/markdown-navigator.xml
+.idea/**/markdown-navigator/
+
+### Windows ###
+# Windows thumbnail cache files
+Thumbs.db
+Thumbs.db:encryptable
+ehthumbs.db
+ehthumbs_vista.db
+
+# Dump file
+*.stackdump
+
+# Folder config file
+[Dd]esktop.ini
+
+# Recycle Bin used on file shares
+$RECYCLE.BIN/
+
+# Windows Installer files
+*.cab
+*.msi
+*.msix
+*.msm
+*.msp
+
+# Windows shortcuts
+*.lnk
+
+# End of https://www.gitignore.io/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv
diff --git a/collections-debian-merged/ansible_collections/community/general/CHANGELOG.rst b/collections-debian-merged/ansible_collections/community/general/CHANGELOG.rst
new file mode 100644
index 00000000..639cf881
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/CHANGELOG.rst
@@ -0,0 +1,1108 @@
+===============================
+Community General Release Notes
+===============================
+
+.. contents:: Topics
+
+
+v1.3.6
+======
+
+Release Summary
+---------------
+
+Regular bugfix and security bugfix (potential information leaks in multiple modules, CVE-2021-20191) release.
+
+Minor Changes
+-------------
+
+- scaleway modules and inventory plugin - update regions and zones to add the new ones (https://github.com/ansible-collections/community.general/pull/1690).
+
+Breaking Changes / Porting Guide
+--------------------------------
+
+- utm_proxy_auth_profile - the ``frontend_cookie_secret`` return value now contains a placeholder string instead of the module's ``frontend_cookie_secret`` parameter (https://github.com/ansible-collections/community.general/pull/1736).
+
+Security Fixes
+--------------
+
+- dnsmadeeasy - mark the ``account_key`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736).
+- docker_swarm - enabled ``no_log`` for the option ``signing_ca_key`` to prevent accidental disclosure (CVE-2021-20191, https://github.com/ansible-collections/community.general/pull/1728).
+- gitlab_runner - mark the ``registration_token`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736).
+- hwc_ecs_instance - mark the ``admin_pass`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736).
+- ibm_sa_host - mark the ``iscsi_chap_secret`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736).
+- keycloak_* modules - mark the ``auth_client_secret`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736).
+- keycloak_client - mark the ``registration_access_token`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736).
+- librato_annotation - mark the ``api_key`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736).
+- logentries_msg - mark the ``token`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736).
+- module_utils/_netapp, na_ontap_gather_facts - enabled ``no_log`` for the options ``api_key`` and ``secret_key`` to prevent accidental disclosure (CVE-2021-20191, https://github.com/ansible-collections/community.general/pull/1725).
+- module_utils/identity/keycloak, keycloak_client, keycloak_clienttemplate, keycloak_group - enabled ``no_log`` for the option ``auth_client_secret`` to prevent accidental disclosure (CVE-2021-20191, https://github.com/ansible-collections/community.general/pull/1725).
+- nios_nsgroup - mark the ``tsig_key`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736).
+- oneandone_firewall_policy, oneandone_load_balancer, oneandone_monitoring_policy, oneandone_private_network, oneandone_public_ip - mark the ``auth_token`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736).
+- ovirt - mark the ``instance_key`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736).
+- ovirt - mark the ``instance_rootpw`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736).
+- pagerduty_alert - mark the ``api_key``, ``service_key`` and ``integration_key`` parameters as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736).
+- pagerduty_change - mark the ``integration_key`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736).
+- pingdom - mark the ``key`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736).
+- pulp_repo - mark the ``feed_client_key`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736).
+- rax_clb_ssl - mark the ``private_key`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736).
+- redfish_command - mark the ``update_creds.password`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736).
+- rollbar_deployment - mark the ``token`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736).
+- spotinst_aws_elastigroup - mark the ``multai_token`` and ``token`` parameters as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736).
+- stackdriver - mark the ``key`` parameter as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736).
+- utm_proxy_auth_profile - enabled ``no_log`` for the option ``frontend_cookie_secret`` to prevent accidental disclosure (CVE-2021-20191, https://github.com/ansible-collections/community.general/pull/1725).
+- utm_proxy_auth_profile - mark the ``frontend_cookie_secret`` parameter as ``no_log`` to avoid leakage of secrets. This causes the ``utm_proxy_auth_profile`` return value to no longer containing the correct value, but a placeholder (https://github.com/ansible-collections/community.general/pull/1736).
+
+Bugfixes
+--------
+
+- docker connection plugin - fix Docker version parsing, as some docker versions have a leading ``v`` in the output of the command ``docker version --format "{{.Server.Version}}"`` (https://github.com/ansible-collections/community.docker/pull/76).
+- filesystem - do not fail when ``resizefs=yes`` and ``fstype=xfs`` if there is nothing to do, even if the filesystem is not mounted. This only covers systems supporting access to unmounted XFS filesystems. Others will still fail (https://github.com/ansible-collections/community.general/issues/1457, https://github.com/ansible-collections/community.general/pull/1478).
+- gitlab_user - make updates to the ``isadmin``, ``password`` and ``confirm`` options of an already existing GitLab user work (https://github.com/ansible-collections/community.general/pull/1724).
+- parted - change the regex that decodes the partition size to better support different formats that parted uses. Change the regex that validates parted's version string (https://github.com/ansible-collections/community.general/pull/1695).
+- redfish_info module, redfish_utils module utils - add ``Name`` and ``Id`` properties to output of Redfish inventory commands (https://github.com/ansible-collections/community.general/issues/1650).
+- sensu-silence module - fix json parsing of sensu API responses on Python 3.5 (https://github.com/ansible-collections/community.general/pull/1703).
+
+v1.3.5
+======
+
+Release Summary
+---------------
+
+Regular bugfix release.
+
+Bugfixes
+--------
+
+- dnsmadeeasy - fix HTTP 400 errors when creating a TXT record (https://github.com/ansible-collections/community.general/issues/1237).
+- docker_container - allow IPv6 zones (RFC 4007) in bind IPs (https://github.com/ansible-collections/community.docker/pull/66).
+- docker_image - fix crash on loading images with versions of Docker SDK for Python before 2.5.0 (https://github.com/ansible-collections/community.docker/issues/72, https://github.com/ansible-collections/community.docker/pull/73).
+- homebrew - add default search path for ``brew`` on Apple silicon hardware (https://github.com/ansible-collections/community.general/pull/1679).
+- homebrew_cask - add default search path for ``brew`` on Apple silicon hardware (https://github.com/ansible-collections/community.general/pull/1679).
+- homebrew_tap - add default search path for ``brew`` on Apple silicon hardware (https://github.com/ansible-collections/community.general/pull/1679).
+- lldp - use ``get_bin_path`` to locate the ``lldpctl`` executable (https://github.com/ansible-collections/community.general/pull/1643).
+- onepassword lookup plugin - updated to support password items, which place the password field directly in the payload's ``details`` attribute (https://github.com/ansible-collections/community.general/pull/1610).
+- passwordstore lookup plugin - fix compatibility with gopass when used with ``create=true``. While pass returns 1 on a non-existent password, gopass returns 10, or 11, depending on whether a similar named password was stored. We now just check standard output and that the return code is not zero (https://github.com/ansible-collections/community.general/pull/1589).
+- terraform - improve result code checking when executing terraform commands (https://github.com/ansible-collections/community.general/pull/1632).
+
+v1.3.4
+======
+
+Release Summary
+---------------
+
+Bugfix/security release that addresses CVE-2021-20180.
+
+Security Fixes
+--------------
+
+- bitbucket_pipeline_variable - **CVE-2021-20180** - hide user sensitive information which are marked as ``secured`` from logging into the console (https://github.com/ansible-collections/community.general/pull/1635).
+
+Bugfixes
+--------
+
+- npm - handle json decode exception while parsing command line output (https://github.com/ansible-collections/community.general/issues/1614).
+
+v1.3.3
+======
+
+Release Summary
+---------------
+
+Bugfix/security release that addresses CVE-2021-20178.
+
+Major Changes
+-------------
+
+- For community.general 2.0.0, the kubevirt modules will be moved to the `community.kubevirt <https://galaxy.ansible.com/community/kubevirt>`_ collection.
+ A redirection will be inserted so that users using ansible-base 2.10 or newer do not have to change anything.
+
+ If you use Ansible 2.9 and explicitly use kubevirt modules from this collection, you will need to adjust your playbooks and roles to use FQCNs starting with ``community.kubevirt.`` instead of ``community.general.``,
+ for example replace ``community.general.kubevirt_vm`` in a task by ``community.kubevirt.kubevirt_vm``.
+
+ If you use ansible-base and installed ``community.general`` manually and rely on the kubevirt modules, you have to make sure to install the ``community.kubevirt`` collection as well.
+ If you are using FQCNs, for example ``community.general.kubevirt_vm`` instead of ``kubevirt_vm``, it will continue working, but we still recommend to adjust the FQCNs as well.
+
+Security Fixes
+--------------
+
+- snmp_facts - **CVE-2021-20178** - hide user sensitive information such as ``privkey`` and ``authkey`` from logging into the console (https://github.com/ansible-collections/community.general/pull/1621).
+
+Bugfixes
+--------
+
+- terraform - fix ``init_reconfigure`` option for proper CLI args (https://github.com/ansible-collections/community.general/pull/1620).
+
+v1.3.2
+======
+
+Release Summary
+---------------
+
+Regular bugfix release.
+
+Major Changes
+-------------
+
+- For community.general 2.0.0, the Google modules will be moved to the `community.google <https://galaxy.ansible.com/community/google>`_ collection.
+ A redirection will be inserted so that users using ansible-base 2.10 or newer do not have to change anything.
+
+ If you use Ansible 2.9 and explicitly use Google modules from this collection, you will need to adjust your playbooks and roles to use FQCNs starting with ``community.google.`` instead of ``community.general.``,
+ for example replace ``community.general.gcpubsub`` in a task by ``community.google.gcpubsub``.
+
+ If you use ansible-base and installed ``community.general`` manually and rely on the Google modules, you have to make sure to install the ``community.google`` collection as well.
+ If you are using FQCNs, for example ``community.general.gcpubsub`` instead of ``gcpubsub``, it will continue working, but we still recommend to adjust the FQCNs as well.
+- For community.general 2.0.0, the OC connection plugin will be moved to the `community.okd <https://galaxy.ansible.com/community/okd>`_ collection.
+ A redirection will be inserted so that users using ansible-base 2.10 or newer do not have to change anything.
+
+ If you use Ansible 2.9 and explicitly use OC connection plugin from this collection, you will need to adjust your playbooks and roles to use FQCNs ``community.okd.oc`` instead of ``community.general.oc``.
+
+ If you use ansible-base and installed ``community.general`` manually and rely on the OC connection plugin, you have to make sure to install the ``community.okd`` collection as well.
+ If you are using FQCNs, in other words ``community.general.oc`` instead of ``oc``, it will continue working, but we still recommend to adjust this FQCN as well.
+- For community.general 2.0.0, the hashi_vault lookup plugin will be moved to the `community.hashi_vault <https://galaxy.ansible.com/community/hashi_vault>`_ collection.
+ A redirection will be inserted so that users using ansible-base 2.10 or newer do not have to change anything.
+
+ If you use Ansible 2.9 and explicitly use hashi_vault lookup plugin from this collection, you will need to adjust your playbooks and roles to use FQCNs ``community.hashi_vault.hashi_vault`` instead of ``community.general.hashi_vault``.
+
+ If you use ansible-base and installed ``community.general`` manually and rely on the hashi_vault lookup plugin, you have to make sure to install the ``community.hashi_vault`` collection as well.
+ If you are using FQCNs, in other words ``community.general.hashi_vault`` instead of ``hashi_vault``, it will continue working, but we still recommend to adjust this FQCN as well.
+
+Minor Changes
+-------------
+
+- homebrew_cask - Homebrew will be deprecating use of ``brew cask`` commands as of version 2.6.0, see https://brew.sh/2020/12/01/homebrew-2.6.0/. Added logic to stop using ``brew cask`` for brew version >= 2.6.0 (https://github.com/ansible-collections/community.general/pull/1481).
+- jira - added the traceback output to ``fail_json()`` calls deriving from exceptions (https://github.com/ansible-collections/community.general/pull/1536).
+
+Bugfixes
+--------
+
+- docker_image - if ``push=true`` is used with ``repository``, and the image does not need to be tagged, still push. This can happen if ``repository`` and ``name`` are equal (https://github.com/ansible-collections/community.docker/issues/52, https://github.com/ansible-collections/community.docker/pull/53).
+- docker_image - report error when loading a broken archive that contains no image (https://github.com/ansible-collections/community.docker/issues/46, https://github.com/ansible-collections/community.docker/pull/55).
+- docker_image - report error when the loaded archive does not contain the specified image (https://github.com/ansible-collections/community.docker/issues/41, https://github.com/ansible-collections/community.docker/pull/55).
+- jira - ``fetch`` and ``search`` no longer indicate that something changed (https://github.com/ansible-collections/community.general/pull/1536).
+- jira - ensured parameter ``issue`` is mandatory for operation ``transition`` (https://github.com/ansible-collections/community.general/pull/1536).
+- jira - module no longer incorrectly reports change for information gathering operations (https://github.com/ansible-collections/community.general/pull/1536).
+- jira - replaced custom parameter validation with ``required_if`` (https://github.com/ansible-collections/community.general/pull/1536).
+- launchd - handle deprecated APIs like ``readPlist`` and ``writePlist`` in ``plistlib`` (https://github.com/ansible-collections/community.general/issues/1552).
+- ldap_search - the module no longer incorrectly reports a change (https://github.com/ansible-collections/community.general/issues/1040).
+- make - fixed ``make`` parameter used for check mode when running a non-GNU ``make`` (https://github.com/ansible-collections/community.general/pull/1574).
+- monit - add support for all monit service checks (https://github.com/ansible-collections/community.general/pull/1532).
+- nios_member - fix Python 3 compatibility with nios api ``member_normalize`` function (https://github.com/ansible-collections/community.general/issues/1526).
+- nmcli - remove ``bridge-slave`` from list of IP based connections ((https://github.com/ansible-collections/community.general/issues/1500).
+- pamd - added logic to retain the comment line (https://github.com/ansible-collections/community.general/issues/1394).
+- passwordstore lookup plugin - always use explicit ``show`` command to retrieve password. This ensures compatibility with ``gopass`` and avoids problems when password names equal ``pass`` commands (https://github.com/ansible-collections/community.general/pull/1493).
+- rhn_channel - Python 2.7.5 fails if the certificate should not be validated. Fixed this by creating the correct ``ssl_context`` (https://github.com/ansible-collections/community.general/pull/470).
+- sendgrid - update documentation and warn user about sendgrid Python library version (https://github.com/ansible-collections/community.general/issues/1553).
+- syslogger - update ``syslog.openlog`` API call for older Python versions, and improve error handling (https://github.com/ansible-collections/community.general/issues/953).
+- yaml callback plugin - do not remove non-ASCII Unicode characters from multiline string output (https://github.com/ansible-collections/community.general/issues/1519).
+
+v1.3.1
+======
+
+Release Summary
+---------------
+
+Regular bugfix release.
+
+Bugfixes
+--------
+
+- bigpanda - removed the dynamic default for ``host`` param (https://github.com/ansible-collections/community.general/pull/1423).
+- bitbucket_pipeline_variable - change pagination logic for pipeline variable get API (https://github.com/ansible-collections/community.general/issues/1425).
+- cobbler inventory script - add Python 3 support (https://github.com/ansible-collections/community.general/issues/638).
+- docker_container - the validation for ``capabilities`` in ``device_requests`` was incorrect (https://github.com/ansible-collections/community.docker/issues/42, https://github.com/ansible-collections/community.docker/pull/43).
+- git_config - now raises an error for non-existent repository paths (https://github.com/ansible-collections/community.general/issues/630).
+- icinga2_host - fix returning error codes (https://github.com/ansible-collections/community.general/pull/335).
+- jira - provide error message raised from exception (https://github.com/ansible-collections/community.general/issues/1504).
+- json_query - handle ``AnsibleUnicode`` and ``AnsibleUnsafeText`` (https://github.com/ansible-collections/community.general/issues/320).
+- keycloak module_utils - provide meaningful error message to user when auth URL does not start with http or https (https://github.com/ansible-collections/community.general/issues/331).
+- ldap_entry - improvements in documentation, simplifications and replaced code with better ``AnsibleModule`` arguments (https://github.com/ansible-collections/community.general/pull/1516).
+- mas - fix ``invalid literal`` when no app can be found (https://github.com/ansible-collections/community.general/pull/1436).
+- nios_host_record - fix to remove ``aliases`` (CNAMES) for configuration comparison (https://github.com/ansible-collections/community.general/issues/1335).
+- osx_defaults - unquote values and unescape double quotes when reading array values (https://github.com/ansible-collections/community.general/pull/358).
+- profitbricks_nic - removed the dynamic default for ``name`` param (https://github.com/ansible-collections/community.general/pull/1423).
+- profitbricks_nic - replaced code with ``required`` and ``required_if`` (https://github.com/ansible-collections/community.general/pull/1423).
+- redfish_info module, redfish_utils module utils - correct ``PartNumber`` property name in Redfish ``GetMemoryInventory`` command (https://github.com/ansible-collections/community.general/issues/1483).
+- saltstack connection plugin - use ``hashutil.base64_decodefile`` to ensure that the file checksum is preserved (https://github.com/ansible-collections/community.general/pull/1472).
+- udm_user - removed the dynamic default for ``userexpiry`` param (https://github.com/ansible-collections/community.general/pull/1423).
+- utm_network_interface_address - changed param type from invalid 'boolean' to valid 'bool' (https://github.com/ansible-collections/community.general/pull/1423).
+- utm_proxy_exception - four parameters had elements types set as 'string' (invalid), changed to 'str' (https://github.com/ansible-collections/community.general/pull/1399).
+- vmadm - simplification of code (https://github.com/ansible-collections/community.general/pull/1415).
+- xfconf - add in missing return values that are specified in the documentation (https://github.com/ansible-collections/community.general/issues/1418).
+
+v1.3.0
+======
+
+Release Summary
+---------------
+
+This is the last minor 1.x.0 release. The next releases from the stable-1 branch will be 1.3.y patch releases.
+
+Major Changes
+-------------
+
+- For community.general 2.0.0, the Hetzner Robot modules will be moved to the `community.hrobot <https://galaxy.ansible.com/community/hrobot>`_ collection.
+ A redirection will be inserted so that users using ansible-base 2.10 or newer do not have to change anything.
+
+ If you use Ansible 2.9 and explicitly use Hetzner Robot modules from this collection, you will need to adjust your playbooks and roles to use FQCNs starting with ``community.hrobot.`` instead of ``community.general.hetzner_``,
+ for example replace ``community.general.hetzner_firewall_info`` in a task by ``community.hrobot.firewall_info``.
+
+ If you use ansible-base and installed ``community.general`` manually and rely on the Hetzner Robot modules, you have to make sure to install the ``community.hrobot`` collection as well.
+ If you are using FQCNs, i.e. ``community.general.hetzner_failover_ip`` instead of ``hetzner_failover_ip``, it will continue working, but we still recommend to adjust the FQCNs as well.
+- For community.general 2.0.0, the ``docker`` modules and plugins will be moved to the `community.docker <https://galaxy.ansible.com/community/docker>`_ collection.
+ A redirection will be inserted so that users using ansible-base 2.10 or newer do not have to change anything.
+
+ If you use Ansible 2.9 and explicitly use ``docker`` content from this collection, you will need to adjust your playbooks and roles to use FQCNs starting with ``community.docker.`` instead of ``community.general.``,
+ for example replace ``community.general.docker_container`` in a task by ``community.docker.docker_container``.
+
+ If you use ansible-base and installed ``community.general`` manually and rely on the ``docker`` content, you have to make sure to install the ``community.docker`` collection as well.
+ If you are using FQCNs, i.e. ``community.general.docker_container`` instead of ``docker_container``, it will continue working, but we still recommend to adjust the FQCNs as well.
+- For community.general 2.0.0, the ``postgresql`` modules and plugins will be moved to the `community.postgresql <https://galaxy.ansible.com/community/postgresql>`_ collection.
+ A redirection will be inserted so that users using ansible-base 2.10 or newer do not have to change anything.
+
+ If you use Ansible 2.9 and explicitly use ``postgresql`` content from this collection, you will need to adjust your playbooks and roles to use FQCNs starting with ``community.postgresql.`` instead of ``community.general.``,
+ for example replace ``community.general.postgresql_info`` in a task by ``community.postgresql.postgresql_info``.
+
+ If you use ansible-base and installed ``community.general`` manually and rely on the ``postgresql`` content, you have to make sure to install the ``community.postgresql`` collection as well.
+ If you are using FQCNs, i.e. ``community.general.postgresql_info`` instead of ``postgresql_info``, it will continue working, but we still recommend to adjust the FQCNs as well.
+- The community.general collection no longer depends on the ansible.posix collection (https://github.com/ansible-collections/community.general/pull/1157).
+
+Minor Changes
+-------------
+
+- Add new filter plugin ``dict_kv`` which returns a single key-value pair from two arguments. Useful for generating complex dictionaries without using loops. For example ``'value' | community.general.dict_kv('key'))`` evaluates to ``{'key': 'value'}`` (https://github.com/ansible-collections/community.general/pull/1264).
+- archive - fix paramater types (https://github.com/ansible-collections/community.general/pull/1039).
+- consul - added support for tcp checks (https://github.com/ansible-collections/community.general/issues/1128).
+- datadog - mark ``notification_message`` as ``no_log`` (https://github.com/ansible-collections/community.general/pull/1338).
+- datadog_monitor - add ``include_tags`` option (https://github.com/ansible/ansible/issues/57441).
+- django_manage - renamed parameter ``app_path`` to ``project_path``, adding ``app_path`` and ``chdir`` as aliases (https://github.com/ansible-collections/community.general/issues/1044).
+- docker_container - now supports the ``device_requests`` option, which allows to request additional resources such as GPUs (https://github.com/ansible/ansible/issues/65748, https://github.com/ansible-collections/community.general/pull/1119).
+- docker_image - return docker build output (https://github.com/ansible-collections/community.general/pull/805).
+- docker_secret - add a warning when the secret does not have an ``ansible_key`` label but the ``force`` parameter is not set (https://github.com/ansible-collections/community.docker/issues/30, https://github.com/ansible-collections/community.docker/pull/31).
+- facter - added option for ``arguments`` (https://github.com/ansible-collections/community.general/pull/768).
+- hashi_vault - support ``VAULT_SKIP_VERIFY`` environment variable for determining if to verify certificates (in addition to the ``validate_certs=`` flag supported today) (https://github.com/ansible-collections/community.general/pull/1024).
+- hashi_vault lookup plugin - add support for JWT authentication (https://github.com/ansible-collections/community.general/pull/1213).
+- infoblox inventory script - use stderr for reporting errors, and allow use of environment for configuration (https://github.com/ansible-collections/community.general/pull/436).
+- ipa_host - silence warning about non-secret ``random_password`` option not having ``no_log`` set (https://github.com/ansible-collections/community.general/pull/1339).
+- ipa_user - silence warning about non-secret ``krbpasswordexpiration`` and ``update_password`` options not having ``no_log`` set (https://github.com/ansible-collections/community.general/pull/1339).
+- linode_v4 - added support for Linode StackScript usage when creating instances (https://github.com/ansible-collections/community.general/issues/723).
+- lvol - fix idempotency issue when using lvol with ``%VG`` or ``%PVS`` size options and VG is fully allocated (https://github.com/ansible-collections/community.general/pull/229).
+- maven_artifact - added ``client_cert`` and ``client_key`` parameters to the maven_artifact module (https://github.com/ansible-collections/community.general/issues/1123).
+- module_helper - added ModuleHelper class and a couple of convenience tools for module developers (https://github.com/ansible-collections/community.general/pull/1322).
+- nmcli - refactor internal methods for simplicity and enhance reuse to support existing and future connection types (https://github.com/ansible-collections/community.general/pull/1113).
+- nmcli - remove Python DBus and GTK Object library dependencies (https://github.com/ansible-collections/community.general/issues/1112).
+- nmcli - the ``dns4``, ``dns4_search``, ``dns6``, and ``dns6_search`` arguments are retained internally as lists (https://github.com/ansible-collections/community.general/pull/1113).
+- odbc - added a parameter ``commit`` which allows users to disable the explicit commit after the execute call (https://github.com/ansible-collections/community.general/pull/1139).
+- openbsd_pkg - added ``snapshot`` option (https://github.com/ansible-collections/community.general/pull/965).
+- pacman - improve group expansion speed: query list of pacman groups once (https://github.com/ansible-collections/community.general/pull/349).
+- parted - add ``resize`` option to resize existing partitions (https://github.com/ansible-collections/community.general/pull/773).
+- passwordstore lookup plugin - added ``umask`` option to set the desired file permisions on creation. This is done via the ``PASSWORD_STORE_UMASK`` environment variable (https://github.com/ansible-collections/community.general/pull/1156).
+- pkgin - add support for installation of full versioned package names (https://github.com/ansible-collections/community.general/pull/1256).
+- pkgng - present the ``ignore_osver`` option to pkg (https://github.com/ansible-collections/community.general/pull/1243).
+- portage - add ``getbinpkgonly`` option, remove unnecessary note on internal portage behaviour (getbinpkg=yes), and remove the undocumented exclusiveness of the pkg options as portage makes no such restriction (https://github.com/ansible-collections/community.general/pull/1169).
+- postgresql_info - add ``in_recovery`` return value to show if a service in recovery mode or not (https://github.com/ansible-collections/community.general/issues/1068).
+- postgresql_privs - add ``procedure`` type support (https://github.com/ansible-collections/community.general/issues/1002).
+- postgresql_query - add ``query_list`` and ``query_all_results`` return values (https://github.com/ansible-collections/community.general/issues/838).
+- proxmox - add new ``proxmox_default_behavior`` option (https://github.com/ansible-collections/community.general/pull/850).
+- proxmox - add support for API tokens (https://github.com/ansible-collections/community.general/pull/1206).
+- proxmox - extract common code and documentation (https://github.com/ansible-collections/community.general/pull/1331).
+- proxmox inventory plugin - ignore QEMU templates altogether instead of skipping the creation of the host in the inventory (https://github.com/ansible-collections/community.general/pull/1185).
+- proxmox_kvm - add cloud-init support (new options: ``cicustom``, ``cipassword``, ``citype``, ``ciuser``, ``ipconfig``, ``nameservers``, ``searchdomains``, ``sshkeys``) (https://github.com/ansible-collections/community.general/pull/797).
+- proxmox_kvm - add new ``proxmox_default_behavior`` option (https://github.com/ansible-collections/community.general/pull/850).
+- proxmox_kvm - add support for API tokens (https://github.com/ansible-collections/community.general/pull/1206).
+- proxmox_template - add support for API tokens (https://github.com/ansible-collections/community.general/pull/1206).
+- proxmox_template - download proxmox applicance templates (pveam) (https://github.com/ansible-collections/community.general/pull/1046).
+- redis cache plugin - add redis sentinel functionality to cache plugin (https://github.com/ansible-collections/community.general/pull/1055).
+- redis cache plugin - make the redis cache keyset name configurable (https://github.com/ansible-collections/community.general/pull/1036).
+- terraform - add ``init_reconfigure`` option, which controls the ``-reconfigure`` flag (backend reconfiguration) (https://github.com/ansible-collections/community.general/pull/823).
+- xfconf - removed unnecessary second execution of ``xfconf-query`` (https://github.com/ansible-collections/community.general/pull/1305).
+
+Deprecated Features
+-------------------
+
+- django_manage - the parameter ``liveserver`` relates to a no longer maintained third-party module for django. It is now deprecated, and will be remove in community.general 3.0.0 (https://github.com/ansible-collections/community.general/pull/1154).
+- proxmox - the default of the new ``proxmox_default_behavior`` option will change from ``compatibility`` to ``no_defaults`` in community.general 4.0.0. Set the option to an explicit value to avoid a deprecation warning (https://github.com/ansible-collections/community.general/pull/850).
+- proxmox_kvm - the default of the new ``proxmox_default_behavior`` option will change from ``compatibility`` to ``no_defaults`` in community.general 4.0.0. Set the option to an explicit value to avoid a deprecation warning (https://github.com/ansible-collections/community.general/pull/850).
+- syspatch - deprecate the redundant ``apply`` argument (https://github.com/ansible-collections/community.general/pull/360).
+
+Bugfixes
+--------
+
+- apache2_module - amend existing module identifier workaround to also apply to updated Shibboleth modules (https://github.com/ansible-collections/community.general/issues/1379).
+- beadm - fixed issue "list object has no attribute split" (https://github.com/ansible-collections/community.general/issues/791).
+- capabilities - fix for a newer version of libcap release (https://github.com/ansible-collections/community.general/pull/1061).
+- composer - fix bug in command idempotence with composer v2 (https://github.com/ansible-collections/community.general/issues/1179).
+- docker_login - fix internal config file storage to handle credentials for more than one registry (https://github.com/ansible-collections/community.general/issues/1117).
+- filesystem - add option ``state`` with default ``present``. When set to ``absent``, filesystem signatures are removed (https://github.com/ansible-collections/community.general/issues/355).
+- flatpak - use of the ``--non-interactive`` argument instead of ``-y`` when possible (https://github.com/ansible-collections/community.general/pull/1246).
+- gcp_storage_files lookup plugin - make sure that plugin errors out on initialization if the required library is not found, and not on load-time (https://github.com/ansible-collections/community.general/pull/1297).
+- gitlab_group - added description parameter to ``createGroup()`` call (https://github.com/ansible-collections/community.general/issues/138).
+- gitlab_group_variable - support for GitLab pagination limitation by iterating over GitLab variable pages (https://github.com/ansible-collections/community.general/pull/968).
+- gitlab_project_variable - support for GitLab pagination limitation by iterating over GitLab variable pages (https://github.com/ansible-collections/community.general/pull/968).
+- hashi_vault - fix approle authentication without ``secret_id`` (https://github.com/ansible-collections/community.general/pull/1138).
+- homebrew - fix package name validation for packages containing hypen ``-`` (https://github.com/ansible-collections/community.general/issues/1037).
+- homebrew_cask - fix package name validation for casks containing hypen ``-`` (https://github.com/ansible-collections/community.general/issues/1037).
+- influxdb - fix usage of path for older version of python-influxdb (https://github.com/ansible-collections/community.general/issues/997).
+- iptables_state - fix race condition between module and its action plugin (https://github.com/ansible-collections/community.general/issues/1136).
+- linode inventory plugin - make sure that plugin errors out on initialization if the required library is not found, and not on load-time (https://github.com/ansible-collections/community.general/pull/1297).
+- lxc_container - fix the type of the ``container_config`` parameter. It is now processed as a list and not a string (https://github.com/ansible-collections/community.general/pull/216).
+- macports - fix failure to install a package whose name is contained within an already installed package's name or variant (https://github.com/ansible-collections/community.general/issues/1307).
+- maven_artifact - handle timestamped snapshot version strings properly (https://github.com/ansible-collections/community.general/issues/709).
+- memcached cache plugin - make sure that plugin errors out on initialization if the required library is not found, and not on load-time (https://github.com/ansible-collections/community.general/pull/1297).
+- monit - fix modules ability to determine the current state of the monitored process (https://github.com/ansible-collections/community.general/pull/1107).
+- nios_fixed_address, nios_host_record, nios_zone - removed redundant parameter aliases causing warning messages to incorrectly appear in task output (https://github.com/ansible-collections/community.general/issues/852).
+- nmcli - cannot modify ``ifname`` after connection creation (https://github.com/ansible-collections/community.general/issues/1089).
+- nmcli - use consistent autoconnect parameters (https://github.com/ansible-collections/community.general/issues/459).
+- omapi_host - fix compatibility with Python 3 (https://github.com/ansible-collections/community.general/issues/787).
+- packet_net.py inventory script - fixed failure w.r.t. operating system retrieval by changing array subscription back to attribute access (https://github.com/ansible-collections/community.general/pull/891).
+- postgresql_ext - fix the module crashes when available ext versions cannot be compared with current version (https://github.com/ansible-collections/community.general/issues/1095).
+- postgresql_ext - fix version selection when ``version=latest`` (https://github.com/ansible-collections/community.general/pull/1078).
+- postgresql_pg_hba - fix a crash when a new rule with an 'options' field replaces a rule without or vice versa (https://github.com/ansible-collections/community.general/issues/1108).
+- postgresql_privs - fix module fails when ``type`` group and passing ``objs`` value containing hyphens (https://github.com/ansible-collections/community.general/issues/1058).
+- proxmox_kvm - fix issue causing linked clones not being create by allowing ``format=unspecified`` (https://github.com/ansible-collections/community.general/issues/1027).
+- proxmox_kvm - ignore unsupported ``pool`` parameter on update (https://github.com/ansible-collections/community.general/pull/1258).
+- redis - fixes parsing of config values which should not be converted to bytes (https://github.com/ansible-collections/community.general/pull/1079).
+- redis cache plugin - make sure that plugin errors out on initialization if the required library is not found, and not on load-time (https://github.com/ansible-collections/community.general/pull/1297).
+- slack - avoid trying to update existing message when sending messages that contain the string "ts" (https://github.com/ansible-collections/community.general/issues/1097).
+- solaris_zone - fixed issue trying to configure zone in Python 3 (https://github.com/ansible-collections/community.general/issues/1081).
+- syspatch - fix bug where not setting ``apply=true`` would result in error (https://github.com/ansible-collections/community.general/pull/360).
+- xfconf - parameter ``value`` no longer required for state ``absent`` (https://github.com/ansible-collections/community.general/issues/1329).
+- xfconf - xfconf no longer passing the command args as a string, but rather as a list (https://github.com/ansible-collections/community.general/issues/1328).
+- zypper - force ``LANG=C`` to as zypper is looking in XML output where attribute could be translated (https://github.com/ansible-collections/community.general/issues/1175).
+
+New Modules
+-----------
+
+Cloud
+~~~~~
+
+misc
+^^^^
+
+- proxmox_domain_info - Retrieve information about one or more Proxmox VE domains
+- proxmox_group_info - Retrieve information about one or more Proxmox VE groups
+- proxmox_user_info - Retrieve information about one or more Proxmox VE users
+
+Clustering
+~~~~~~~~~~
+
+nomad
+^^^^^
+
+- nomad_job - Launch a Nomad Job
+- nomad_job_info - Get Nomad Jobs info
+
+Monitoring
+~~~~~~~~~~
+
+- pagerduty_change - Track a code or infrastructure change as a PagerDuty change event
+- pagerduty_user - Manage a user account on PagerDuty
+
+v1.2.0
+======
+
+Release Summary
+---------------
+
+Regular bimonthly minor release.
+
+Minor Changes
+-------------
+
+- hashi_vault - support ``VAULT_NAMESPACE`` environment variable for namespaced lookups against Vault Enterprise (in addition to the ``namespace=`` flag supported today) (https://github.com/ansible-collections/community.general/pull/929).
+- hashi_vault lookup - add ``VAULT_TOKEN_FILE`` as env option to specify ``token_file`` param (https://github.com/ansible-collections/community.general/issues/373).
+- hashi_vault lookup - add ``VAULT_TOKEN_PATH`` as env option to specify ``token_path`` param (https://github.com/ansible-collections/community.general/issues/373).
+- ipa_user - add ``userauthtype`` option (https://github.com/ansible-collections/community.general/pull/951).
+- iptables_state - use FQCN when calling a module from action plugin (https://github.com/ansible-collections/community.general/pull/967).
+- nagios - add the ``acknowledge`` action (https://github.com/ansible-collections/community.general/pull/820).
+- nagios - add the ``host`` and ``all`` values for the ``forced_check`` action (https://github.com/ansible-collections/community.general/pull/998).
+- nagios - add the ``service_check`` action (https://github.com/ansible-collections/community.general/pull/820).
+- nagios - rename the ``service_check`` action to ``forced_check`` since we now are able to check both a particular service, all services of a particular host and the host itself (https://github.com/ansible-collections/community.general/pull/998).
+- pkgutil - module can now accept a list of packages (https://github.com/ansible-collections/community.general/pull/799).
+- pkgutil - module has a new option, ``force``, equivalent to the ``-f`` option to the `pkgutil <http://pkgutil.net/>`_ command (https://github.com/ansible-collections/community.general/pull/799).
+- pkgutil - module now supports check mode (https://github.com/ansible-collections/community.general/pull/799).
+- postgresql_privs - add the ``usage_on_types`` option (https://github.com/ansible-collections/community.general/issues/884).
+- proxmox_kvm - improve code readability (https://github.com/ansible-collections/community.general/pull/934).
+- pushover - add device parameter (https://github.com/ansible-collections/community.general/pull/802).
+- redfish_command - add sub-command for ``EnableContinuousBootOverride`` and ``DisableBootOverride`` to allow setting BootSourceOverrideEnabled Redfish property (https://github.com/ansible-collections/community.general/issues/824).
+- redfish_command - support same reset actions on Managers as on Systems (https://github.com/ansible-collections/community.general/issues/901).
+- slack - add support for updating messages (https://github.com/ansible-collections/community.general/issues/304).
+- xml - fixed issue were changed was returned when removing non-existent xpath (https://github.com/ansible-collections/community.general/pull/1007).
+- zypper_repository - proper failure when python-xml is missing (https://github.com/ansible-collections/community.general/pull/939).
+
+Bugfixes
+--------
+
+- aerospike_migrations - handle exception when unstable-cluster is returned (https://github.com/ansible-collections/community.general/pull/900).
+- django_manage - fix idempotence for ``createcachetable`` (https://github.com/ansible-collections/community.general/pull/699).
+- docker_container - fix idempotency problem with ``published_ports`` when strict comparison is used and list is empty (https://github.com/ansible-collections/community.general/issues/978).
+- gem - fix get_installed_versions: correctly parse ``default`` version (https://github.com/ansible-collections/community.general/pull/783).
+- hashi_vault - add missing ``mount_point`` parameter for approle auth (https://github.com/ansible-collections/community.general/pull/897).
+- hashi_vault lookup - ``token_path`` in config file overridden by env ``HOME`` (https://github.com/ansible-collections/community.general/issues/373).
+- homebrew_cask - fixed issue where a cask with ``@`` in the name is incorrectly reported as invalid (https://github.com/ansible-collections/community.general/issues/733).
+- interfaces_file - escape regular expression characters in old value (https://github.com/ansible-collections/community.general/issues/777).
+- launchd - fix for user-level services (https://github.com/ansible-collections/community.general/issues/896).
+- nmcli - set ``C`` locale when executing ``nmcli`` (https://github.com/ansible-collections/community.general/issues/989).
+- parted - fix creating partition when label is changed (https://github.com/ansible-collections/community.general/issues/522).
+- pkg5 - now works when Python 3 is used on the target (https://github.com/ansible-collections/community.general/pull/789).
+- postgresql_privs - allow to pass ``PUBLIC`` role written in lowercase letters (https://github.com/ansible-collections/community.general/issues/857).
+- postgresql_privs - fix the module mistakes a procedure for a function (https://github.com/ansible-collections/community.general/issues/994).
+- postgresql_privs - rollback if nothing changed (https://github.com/ansible-collections/community.general/issues/885).
+- postgresql_privs - the module was attempting to revoke grant options even though ``grant_option`` was not specified (https://github.com/ansible-collections/community.general/pull/796).
+- proxmox_kvm - defer error-checking for non-existent VMs in order to fix idempotency of tasks using ``state=absent`` and properly recognize a success (https://github.com/ansible-collections/community.general/pull/811).
+- proxmox_kvm - improve handling of long-running tasks by creating a dedicated function (https://github.com/ansible-collections/community.general/pull/831).
+- slack - fix ``xox[abp]`` token identification to capture everything after ``xox[abp]``, as the token is the only thing that should be in this argument (https://github.com/ansible-collections/community.general/issues/862).
+- terraform - fix incorrectly reporting a status of unchanged when number of resources added or destroyed are multiples of 10 (https://github.com/ansible-collections/community.general/issues/561).
+- timezone - support Python3 on macos/darwin (https://github.com/ansible-collections/community.general/pull/945).
+- zfs - fixed ``invalid character '@' in pool name"`` error when working with snapshots on a root zvol (https://github.com/ansible-collections/community.general/issues/932).
+
+New Plugins
+-----------
+
+Inventory
+~~~~~~~~~
+
+- proxmox - Proxmox inventory source
+- stackpath_compute - StackPath Edge Computing inventory source
+
+New Modules
+-----------
+
+Cloud
+~~~~~
+
+scaleway
+^^^^^^^^
+
+- scaleway_database_backup - Scaleway database backups management module
+
+Source Control
+~~~~~~~~~~~~~~
+
+gitlab
+^^^^^^
+
+- gitlab_group_members - Manage group members on GitLab Server
+- gitlab_group_variable - Creates, updates, or deletes GitLab groups variables
+
+v1.1.0
+======
+
+Release Summary
+---------------
+
+Release for Ansible 2.10.0.
+
+
+Minor Changes
+-------------
+
+- The collection dependencies where adjusted so that ``community.kubernetes`` and ``google.cloud`` are required to be of version 1.0.0 or newer (https://github.com/ansible-collections/community.general/pull/774).
+- jc - new filter to convert the output of many shell commands and file-types to JSON. Uses the jc library at https://github.com/kellyjonbrazil/jc. For example, filtering the STDOUT output of ``uname -a`` via ``{{ result.stdout | community.general.jc('uname') }}``. Requires Python 3.6+ (https://github.com/ansible-collections/community.general/pull/750).
+- xfconf - add support for ``double`` type (https://github.com/ansible-collections/community.general/pull/744).
+
+Bugfixes
+--------
+
+- cobbler inventory plugin - ``name`` needed FQCN (https://github.com/ansible-collections/community.general/pull/722).
+- dsv lookup - use correct dict usage (https://github.com/ansible-collections/community.general/pull/743).
+- inventory plugins - allow FQCN in ``plugin`` option (https://github.com/ansible-collections/community.general/pull/722).
+- ipa_hostgroup - fix an issue with load-balanced ipa and cookie handling with Python 3 (https://github.com/ansible-collections/community.general/issues/737).
+- oc connection plugin - ``transport`` needed FQCN (https://github.com/ansible-collections/community.general/pull/722).
+- postgresql_set - allow to pass an empty string to the ``value`` parameter (https://github.com/ansible-collections/community.general/issues/775).
+- xfconf - make it work in non-english locales (https://github.com/ansible-collections/community.general/pull/744).
+
+New Modules
+-----------
+
+Cloud
+~~~~~
+
+docker
+^^^^^^
+
+- docker_stack_task_info - Return information of the tasks on a docker stack
+
+System
+~~~~~~
+
+- iptables_state - Save iptables state into a file or restore it from a file
+- shutdown - Shut down a machine
+- sysupgrade - Manage OpenBSD system upgrades
+
+v1.0.0
+======
+
+Release Summary
+---------------
+
+This is release 1.0.0 of ``community.general``, released on 2020-07-31.
+
+
+Minor Changes
+-------------
+
+- Add the ``gcpubsub``, ``gcpubsub_info`` and ``gcpubsub_facts`` (to be removed in 3.0.0) modules. These were originally in community.general, but removed on the assumption that they have been moved to google.cloud. Since this turned out to be incorrect, we re-added them for 1.0.0.
+- Add the deprecated ``gcp_backend_service``, ``gcp_forwarding_rule`` and ``gcp_healthcheck`` modules, which will be removed in 2.0.0. These were originally in community.general, but removed on the assumption that they have been moved to google.cloud. Since this turned out to be incorrect, we re-added them for 1.0.0.
+- The collection is now actively tested in CI with the latest Ansible 2.9 release.
+- airbrake_deployment - add ``version`` param; clarified docs on ``revision`` param (https://github.com/ansible-collections/community.general/pull/583).
+- apk - added ``no_cache`` option (https://github.com/ansible-collections/community.general/pull/548).
+- firewalld - the module has been moved to the ``ansible.posix`` collection. A redirection is active, which will be removed in version 2.0.0 (https://github.com/ansible-collections/community.general/pull/623).
+- gitlab_project - add support for merge_method on projects (https://github.com/ansible/ansible/pull/66813).
+- gitlab_runners inventory plugin - permit environment variable input for ``server_url``, ``api_token`` and ``filter`` options (https://github.com/ansible-collections/community.general/pull/611).
+- haproxy - add options to dis/enable health and agent checks. When health and agent checks are enabled for a service, a disabled service will re-enable itself automatically. These options also change the state of the agent checks to match the requested state for the backend (https://github.com/ansible-collections/community.general/issues/684).
+- log_plays callback - use v2 methods (https://github.com/ansible-collections/community.general/pull/442).
+- logstash callback - add ini config (https://github.com/ansible-collections/community.general/pull/610).
+- lxd_container - added support of ``--target`` flag for cluster deployments (https://github.com/ansible-collections/community.general/issues/637).
+- parted - accept negative numbers in ``part_start`` and ``part_end``
+- pkgng - added ``stdout`` and ``stderr`` attributes to the result (https://github.com/ansible-collections/community.general/pull/560).
+- pkgng - added support for upgrading all packages using ``name: *, state: latest``, similar to other package providers (https://github.com/ansible-collections/community.general/pull/569).
+- postgresql_query - add search_path parameter (https://github.com/ansible-collections/community.general/issues/625).
+- rundeck_acl_policy - add check for rundeck_acl_policy name parameter (https://github.com/ansible-collections/community.general/pull/612).
+- slack - add support for sending messages built with block kit (https://github.com/ansible-collections/community.general/issues/380).
+- splunk callback - add an option to allow not to validate certificate from HEC (https://github.com/ansible-collections/community.general/pull/596).
+- xfconf - add arrays support (https://github.com/ansible/ansible/issues/46308).
+- xfconf - add support for ``uint`` type (https://github.com/ansible-collections/community.general/pull/696).
+
+Breaking Changes / Porting Guide
+--------------------------------
+
+- log_plays callback - add missing information to the logs generated by the callback plugin. This changes the log message format (https://github.com/ansible-collections/community.general/pull/442).
+- pkgng - passing ``name: *`` with ``state: absent`` will no longer remove every installed package from the system. It is now a noop. (https://github.com/ansible-collections/community.general/pull/569).
+- pkgng - passing ``name: *`` with ``state: latest`` or ``state: present`` will no longer install every package from the configured package repositories. Instead, ``name: *, state: latest`` will upgrade all already-installed packages, and ``name: *, state: present`` is a noop. (https://github.com/ansible-collections/community.general/pull/569).
+
+Deprecated Features
+-------------------
+
+- The ldap_attr module has been deprecated and will be removed in a later release; use ldap_attrs instead.
+- xbps - the ``force`` option never had any effect. It is now deprecated, and will be removed in 3.0.0 (https://github.com/ansible-collections/community.general/pull/568).
+
+Removed Features (previously deprecated)
+----------------------------------------
+
+- conjur_variable lookup - has been moved to the ``cyberark.conjur`` collection. A redirection is active, which will be removed in version 2.0.0 (https://github.com/ansible-collections/community.general/pull/570).
+- digital_ocean_* - all DigitalOcean modules have been moved to the ``community.digitalocean`` collection. A redirection is active, which will be removed in version 2.0.0 (https://github.com/ansible-collections/community.general/pull/622).
+- infini_* - all infinidat modules have been moved to the ``infinidat.infinibox`` collection. A redirection is active, which will be removed in version 2.0.0 (https://github.com/ansible-collections/community.general/pull/607).
+- logicmonitor - the module has been removed in 1.0.0 since it is unmaintained and the API used by the module has been turned off in 2017 (https://github.com/ansible-collections/community.general/issues/539, https://github.com/ansible-collections/community.general/pull/541).
+- logicmonitor_facts - the module has been removed in 1.0.0 since it is unmaintained and the API used by the module has been turned off in 2017 (https://github.com/ansible-collections/community.general/issues/539, https://github.com/ansible-collections/community.general/pull/541).
+- mysql_* - all MySQL modules have been moved to the ``community.mysql`` collection. A redirection is active, which will be removed in version 2.0.0 (https://github.com/ansible-collections/community.general/pull/633).
+- proxysql_* - all ProxySQL modules have been moved to the ``community.proxysql`` collection. A redirection is active, which will be removed in version 2.0.0 (https://github.com/ansible-collections/community.general/pull/624).
+
+Bugfixes
+--------
+
+- aix_filesystem - fix issues with ismount module_util pathing for Ansible 2.9 (https://github.com/ansible-collections/community.general/pull/567).
+- consul_kv lookup - fix ``ANSIBLE_CONSUL_URL`` environment variable handling (https://github.com/ansible/ansible/issues/51960).
+- consul_kv lookup - fix arguments handling (https://github.com/ansible-collections/community.general/pull/303).
+- digital_ocean_tag_info - fix crash when querying for an individual tag (https://github.com/ansible-collections/community.general/pull/615).
+- doas become plugin - address a bug with the parameters handling that was breaking the plugin in community.general when ``become_flags`` and ``become_user`` were not explicitly specified (https://github.com/ansible-collections/community.general/pull/704).
+- docker_compose - add a condition to prevent service startup if parameter ``stopped`` is true. Otherwise, the service will be started on each play and stopped again immediately due to the ``stopped`` parameter and breaks the idempotency of the module (https://github.com/ansible-collections/community.general/issues/532).
+- docker_compose - disallow usage of the parameters ``stopped`` and ``restarted`` at the same time. This breaks also the idempotency (https://github.com/ansible-collections/community.general/issues/532).
+- docker_container - use Config MacAddress by default instead of Networks. Networks MacAddress is empty in some cases (https://github.com/ansible/ansible/issues/70206).
+- docker_container - various error fixes in string handling for Python 2 to avoid crashes when non-ASCII characters are used in strings (https://github.com/ansible-collections/community.general/issues/640).
+- docker_swarm - removes ``advertise_addr`` from list of required arguments when ``state`` is ``"join"`` (https://github.com/ansible-collections/community.general/issues/439).
+- dzdo become plugin - address a bug with the parameters handling that was breaking the plugin in community.general when ``become_user`` was not explicitly specified (https://github.com/ansible-collections/community.general/pull/708).
+- filesystem - resizefs of xfs filesystems is fixed. Filesystem needs to be mounted.
+- jenkins_plugin - replace MD5 checksum verification with SHA1 due to MD5 being disabled on systems with FIPS-only algorithms enabled (https://github.com/ansible/ansible/issues/34304).
+- jira - improve error message handling (https://github.com/ansible-collections/community.general/pull/311).
+- jira - improve error message handling with multiple errors (https://github.com/ansible-collections/community.general/pull/707).
+- kubevirt - Add aliases 'interface_name' for network_name (https://github.com/ansible/ansible/issues/55641).
+- nmcli - fix idempotetency when modifying an existing connection (https://github.com/ansible-collections/community.general/issues/481).
+- osx_defaults - fix handling negative integers (https://github.com/ansible-collections/community.general/issues/134).
+- pacman - treat package names containing .zst as package files during installation (https://www.archlinux.org/news/now-using-zstandard-instead-of-xz-for-package-compression/, https://github.com/ansible-collections/community.general/pull/650).
+- pbrun become plugin - address a bug with the parameters handling that was breaking the plugin in community.general when ``become_user`` was not explicitly specified (https://github.com/ansible-collections/community.general/pull/708).
+- postgresql_privs - fix crash when set privileges on schema with hyphen in the name (https://github.com/ansible-collections/community.general/issues/656).
+- postgresql_set - only display a warning about restarts, when restarting is needed (https://github.com/ansible-collections/community.general/pull/651).
+- redfish_info, redfish_config, redfish_command - Fix Redfish response payload decode on Python 3.5 (https://github.com/ansible-collections/community.general/issues/686)
+- selective - mark task failed correctly (https://github.com/ansible/ansible/issues/63767).
+- snmp_facts - skip ``EndOfMibView`` values (https://github.com/ansible/ansible/issues/49044).
+- yarn - fixed an index out of range error when no outdated packages where returned by yarn executable (see https://github.com/ansible-collections/community.general/pull/474).
+- yarn - fixed an too many values to unpack error when scoped packages are installed (see https://github.com/ansible-collections/community.general/pull/474).
+
+New Plugins
+-----------
+
+Inventory
+~~~~~~~~~
+
+- cobbler - Cobbler inventory source
+
+Lookup
+~~~~~~
+
+- dsv - Get secrets from Thycotic DevOps Secrets Vault
+- tss - Get secrets from Thycotic Secret Server
+
+New Modules
+-----------
+
+Cloud
+~~~~~
+
+docker
+^^^^^^
+
+- docker_stack_info - Return information on a docker stack
+
+Database
+~~~~~~~~
+
+misc
+^^^^
+
+- odbc - Execute SQL via ODBC
+
+System
+~~~~~~
+
+- launchd - Manage macOS services
+
+v0.2.0
+======
+
+Release Summary
+---------------
+
+This is the first proper release of the ``community.general`` collection on 2020-06-20.
+The changelog describes all changes made to the modules and plugins included in this
+collection since Ansible 2.9.0.
+
+
+Major Changes
+-------------
+
+- docker_container - the ``network_mode`` option will be set by default to the name of the first network in ``networks`` if at least one network is given and ``networks_cli_compatible`` is ``true`` (will be default from community.general 2.0.0 on). Set to an explicit value to avoid deprecation warnings if you specify networks and set ``networks_cli_compatible`` to ``true``. The current default (not specifying it) is equivalent to the value ``default``.
+- docker_container - the module has a new option, ``container_default_behavior``, whose default value will change from ``compatibility`` to ``no_defaults``. Set to an explicit value to avoid deprecation warnings.
+- gitlab_user - no longer requires ``name``, ``email`` and ``password`` arguments when ``state=absent``.
+
+Minor Changes
+-------------
+
+- A new filter ``to_time_unit`` with specializations ``to_milliseconds``, ``to_seconds``, ``to_minutes``, ``to_hours``, ``to_days``, ``to_weeks``, ``to_months`` and ``to_years`` has been added. For example ``'2d 4h' | community.general.to_hours`` evaluates to 52.
+- Add a make option to the make module to be able to choose a specific make executable
+- Add information about changed packages in homebrew returned facts (https://github.com/ansible/ansible/issues/59376).
+- Follow up changes in homebrew_cask (https://github.com/ansible/ansible/issues/34696).
+- Moved OpenStack dynamic inventory script to Openstack Collection.
+- Remove redundant encoding in json.load call in ipa module_utils (https://github.com/ansible/ansible/issues/66592).
+- Updated documentation about netstat command requirement for listen_ports_facts module (https://github.com/ansible/ansible/issues/68077).
+- airbrake_deployment - Allow passing ``project_id`` and ``project_key`` for v4 api deploy compatibility
+- ali_instance - Add params ``unique_suffix``, ``tags``, ``purge_tags``, ``ram_role_name``, ``spot_price_limit``, ``spot_strategy``, ``period_unit``, ``dry_run``, ``include_data_disks``
+- ali_instance and ali_instance_info - the required package footmark needs a version higher than 1.19.0
+- ali_instance_info - Add params ``name_prefix``, ``filters``
+- alicloud modules - Add authentication params to all modules
+- alicloud modules - now only support Python 3.6, not support Python 2.x
+- cisco_spark - the module has been renamed to ``cisco_webex`` (https://github.com/ansible-collections/community.general/pull/457).
+- cloudflare_dns - Report unexpected failure with more detail (https://github.com/ansible-collections/community.general/pull/511).
+- database - add support to unique indexes in postgresql_idx
+- digital_ocean_droplet - add support for new vpc_uuid parameter
+- docker connection plugin - run Powershell modules on Windows containers.
+- docker_container - add ``cpus`` option (https://github.com/ansible/ansible/issues/34320).
+- docker_container - add new ``container_default_behavior`` option (PR https://github.com/ansible/ansible/pull/63419).
+- docker_container - allow to configure timeout when the module waits for a container's removal.
+- docker_container - only passes anonymous volumes to docker daemon as ``Volumes``. This increases compatibility with the ``docker`` CLI program. Note that if you specify ``volumes: strict`` in ``comparisons``, this could cause existing containers created with docker_container from Ansible 2.9 or earlier to restart.
+- docker_container - support for port ranges was adjusted to be more compatible to the ``docker`` command line utility: a one-port container range combined with a multiple-port host range will no longer result in only the first host port be used, but the whole range being passed to Docker so that a free port in that range will be used.
+- docker_container.py - update a containers restart_policy without restarting the container (https://github.com/ansible/ansible/issues/65993)
+- docker_stack - Added ``stdout``, ``stderr``, and ``rc`` to return values.
+- docker_swarm_service - Added support for ``init`` option.
+- docker_swarm_service - Sort lists when checking for changes.
+- firewalld - new feature, can now set ``target`` for a ``zone`` (https://github.com/ansible-collections/community.general/pull/526).
+- flatpak and flatpak_remote - use ``module.run_command()`` instead of ``subprocess.Popen()``.
+- gitlab_project_variable - implement masked and protected attributes
+- gitlab_project_variable - implemented variable_type attribute.
+- hashi_vault - AWS IAM auth method added. Accepts standard ansible AWS params and only loads AWS libraries when needed.
+- hashi_vault - INI and additional ENV sources made available for some new and old options.
+- hashi_vault - ``secret`` can now be an unnamed argument if it's specified first in the term string (see examples).
+- hashi_vault - ``token`` is now an explicit option (and the default) in the choices for ``auth_method``. This matches previous behavior (``auth_method`` omitted resulted in token auth) but makes the value clearer and allows it to be explicitly specified.
+- hashi_vault - new option ``return_format`` added to control how secrets are returned, including options for multiple secrets and returning raw values with metadata.
+- hashi_vault - previous (undocumented) behavior was to attempt to read token from ``~/.vault-token`` if not specified. This is now controlled through ``token_path`` and ``token_file`` options (defaults will mimic previous behavior).
+- hashi_vault - previously all options had to be supplied via key=value pairs in the term string; now a mix of string and parameters can be specified (see examples).
+- hashi_vault - uses newer authentication calls in the HVAC library and falls back to older ones with deprecation warnings.
+- homebrew - Added environment variable to honor update_homebrew setting (https://github.com/ansible/ansible/issues/56650).
+- homebrew - New option ``upgrade_options`` allows to pass flags to upgrade
+- homebrew - ``install_options`` is now validated to be a list of strings.
+- homebrew_tap - ``name`` is now validated to be a list of strings.
+- idrac_redfish_config - Support for multiple manager attributes configuration
+- java_keystore - add the private_key_passphrase parameter (https://github.com/ansible-collections/community.general/pull/276).
+- jira - added search function with support for Jira JQL (https://github.com/ansible-collections/community.general/pull/22).
+- jira - added update function which can update Jira Selects etc (https://github.com/ansible-collections/community.general/pull/22).
+- lvg - add ``pvresize`` new parameter (https://github.com/ansible/ansible/issues/29139).
+- mysql_db - add ``master_data`` parameter (https://github.com/ansible/ansible/pull/66048).
+- mysql_db - add ``skip_lock_tables`` option (https://github.com/ansible/ansible/pull/66688).
+- mysql_db - add the ``check_implicit_admin`` parameter (https://github.com/ansible/ansible/issues/24418).
+- mysql_db - add the ``config_overrides_defaults`` parameter (https://github.com/ansible/ansible/issues/26919).
+- mysql_db - add the ``dump_extra_args`` parameter (https://github.com/ansible/ansible/pull/67747).
+- mysql_db - add the ``executed_commands`` returned value (https://github.com/ansible/ansible/pull/65498).
+- mysql_db - add the ``force`` parameter (https://github.com/ansible/ansible/pull/65547).
+- mysql_db - add the ``restrict_config_file`` parameter (https://github.com/ansible/ansible/issues/34488).
+- mysql_db - add the ``unsafe_login_password`` parameter (https://github.com/ansible/ansible/issues/63955).
+- mysql_db - add the ``use_shell`` parameter (https://github.com/ansible/ansible/issues/20196).
+- mysql_info - add ``exclude_fields`` parameter (https://github.com/ansible/ansible/issues/63319).
+- mysql_info - add ``global_status`` filter parameter option and return (https://github.com/ansible/ansible/pull/63189).
+- mysql_info - add ``return_empty_dbs`` parameter to list empty databases (https://github.com/ansible/ansible/issues/65727).
+- mysql_replication - add ``channel`` parameter (https://github.com/ansible/ansible/issues/29311).
+- mysql_replication - add ``connection_name`` parameter (https://github.com/ansible/ansible/issues/46243).
+- mysql_replication - add ``fail_on_error`` parameter (https://github.com/ansible/ansible/pull/66252).
+- mysql_replication - add ``master_delay`` parameter (https://github.com/ansible/ansible/issues/51326).
+- mysql_replication - add ``master_use_gtid`` parameter (https://github.com/ansible/ansible/pull/62648).
+- mysql_replication - add ``queries`` return value (https://github.com/ansible/ansible/pull/63036).
+- mysql_replication - add support of ``resetmaster`` choice to ``mode`` parameter (https://github.com/ansible/ansible/issues/42870).
+- mysql_user - ``priv`` parameter can be string or dictionary (https://github.com/ansible/ansible/issues/57533).
+- mysql_user - add ``plugin_auth_string`` parameter (https://github.com/ansible/ansible/pull/44267).
+- mysql_user - add ``plugin_hash_string`` parameter (https://github.com/ansible/ansible/pull/44267).
+- mysql_user - add ``plugin`` parameter (https://github.com/ansible/ansible/pull/44267).
+- mysql_user - add the resource_limits parameter (https://github.com/ansible-collections/community.general/issues/133).
+- mysql_variables - add ``mode`` parameter (https://github.com/ansible/ansible/issues/60119).
+- nagios module - a start parameter has been added, allowing the time a Nagios outage starts to be set. It defaults to the current time if not provided, preserving the previous behavior and ensuring compatibility with existing playbooks.
+- nsupdate - Use provided TSIG key to not only sign update queries but also lookup queries
+- open_iscsi - allow ``portal`` parameter to be a domain name by resolving the portal ip address beforehand (https://github.com/ansible-collections/community.general/pull/461).
+- packet_device - add ``tags`` parameter on device creation (https://github.com/ansible-collections/community.general/pull/418)
+- pacman - Improve package state detection speed: Don't query for full details of a package.
+- parted - add the ``fs_type`` parameter (https://github.com/ansible-collections/community.general/issues/135).
+- pear - added ``prompts`` parameter to allow users to specify expected prompt that could hang Ansible execution (https://github.com/ansible-collections/community.general/pull/530).
+- postgresql_copy - add the ``trust_input`` parameter (https://github.com/ansible-collections/community.general/pull/313).
+- postgresql_db - add ``dump_extra_args`` parameter (https://github.com/ansible/ansible/pull/66717).
+- postgresql_db - add support for .pgc file format for dump and restores.
+- postgresql_db - add the ``executed_commands`` returned value (https://github.com/ansible/ansible/pull/65542).
+- postgresql_db - add the ``trust_input`` parameter (https://github.com/ansible-collections/community.general/issues/106).
+- postgresql_ext - add the ``trust_input`` parameter (https://github.com/ansible-collections/community.general/pull/282).
+- postgresql_ext - refactor to simplify and remove dead code (https://github.com/ansible-collections/community.general/pull/291)
+- postgresql_ext - use query parameters with cursor object (https://github.com/ansible/ansible/pull/64994).
+- postgresql_idx - add the ``trust_input`` parameter (https://github.com/ansible-collections/community.general/pull/264).
+- postgresql_idx - refactor to simplify code (https://github.com/ansible-collections/community.general/pull/291)
+- postgresql_info - add collecting info about logical replication publications in databases (https://github.com/ansible/ansible/pull/67614).
+- postgresql_info - add collection info about replication subscriptions (https://github.com/ansible/ansible/pull/67464).
+- postgresql_info - add the ``trust_input`` parameter (https://github.com/ansible-collections/community.general/pull/308).
+- postgresql_lang - add ``owner`` parameter (https://github.com/ansible/ansible/pull/62999).
+- postgresql_lang - add the ``trust_input`` parameter (https://github.com/ansible-collections/community.general/pull/272).
+- postgresql_membership - add the ``trust_input`` parameter (https://github.com/ansible-collections/community.general/pull/158).
+- postgresql_owner - add the ``trust_input`` parameter (https://github.com/ansible-collections/community.general/pull/198).
+- postgresql_ping - add the ``session_role`` parameter (https://github.com/ansible-collections/community.general/pull/312).
+- postgresql_ping - add the ``trust_input`` parameter (https://github.com/ansible-collections/community.general/pull/312).
+- postgresql_privs - add support for TYPE as object types in postgresql_privs module (https://github.com/ansible/ansible/issues/62432).
+- postgresql_privs - add the ``trust_input`` parameter (https://github.com/ansible-collections/community.general/pull/177).
+- postgresql_publication - add the ``session_role`` parameter (https://github.com/ansible-collections/community.general/pull/279).
+- postgresql_publication - add the ``trust_input`` parameter (https://github.com/ansible-collections/community.general/pull/279).
+- postgresql_query - add the ``encoding`` parameter (https://github.com/ansible/ansible/issues/65367).
+- postgresql_query - add the ``trust_input`` parameter (https://github.com/ansible-collections/community.general/pull/294).
+- postgresql_schema - add the ``trust_input`` parameter (https://github.com/ansible-collections/community.general/pull/259).
+- postgresql_sequence - add the ``trust_input`` parameter (https://github.com/ansible-collections/community.general/pull/295).
+- postgresql_set - add the ``trust_input`` parameter (https://github.com/ansible-collections/community.general/pull/302).
+- postgresql_slot - add the ``trust_input`` parameter (https://github.com/ansible-collections/community.general/pull/298).
+- postgresql_subscription - add the ``session_role`` parameter (https://github.com/ansible-collections/community.general/pull/280).
+- postgresql_subscription - add the ``trust_input`` parameter (https://github.com/ansible-collections/community.general/pull/280).
+- postgresql_table - add the ``trust_input`` parameter (https://github.com/ansible-collections/community.general/pull/307).
+- postgresql_tablespace - add the ``trust_input`` parameter (https://github.com/ansible-collections/community.general/pull/240).
+- postgresql_user - add scram-sha-256 support (https://github.com/ansible/ansible/issues/49878).
+- postgresql_user - add the ``trust_input`` parameter (https://github.com/ansible-collections/community.general/pull/116).
+- postgresql_user - add the comment parameter (https://github.com/ansible/ansible/pull/66711).
+- postgresql_user_obj_stat_info - add the ``trust_input`` parameter (https://github.com/ansible-collections/community.general/pull/310).
+- postgresql_user_obj_stat_info - refactor to simplify code (https://github.com/ansible-collections/community.general/pull/291)
+- proxmox - add the ``description`` and ``hookscript`` parameter (https://github.com/ansible-collections/community.general/pull/245).
+- redfish_command - Support for virtual media insert and eject commands (https://github.com/ansible-collections/community.general/issues/493)
+- redfish_config - New ``bios_attributes`` option to allow setting multiple BIOS attributes in one command.
+- redfish_config, redfish_command - Add ``resource_id`` option to specify which System, Manager, or Chassis resource to modify.
+- redis - add TLS support to redis cache plugin (https://github.com/ansible-collections/community.general/pull/410).
+- rhn_channel - Added ``validate_certs`` option (https://github.com/ansible/ansible/issues/68374).
+- rundeck modules - added new options ``client_cert``, ``client_key``, ``force``, ``force_basic_auth``, ``http_agent``, ``url_password``, ``url_username``, ``use_proxy``, ``validate_certs`` to allow changing fetch_url parameters.
+- slack - Add support for user/bot/application tokens (using Slack WebAPI)
+- slack - Return ``thread_id`` with thread timestamp when user/bot/application tokens are used
+- syslogger - added new parameter ident to specify the name of application which is sending the message to syslog (https://github.com/ansible-collections/community.general/issues/319).
+- terraform - Adds option ``backend_config_files``. This can accept a list of paths to multiple configuration files (https://github.com/ansible-collections/community.general/pull/394).
+- terraform - Adds option ``variables_files`` for multiple var-files (https://github.com/ansible-collections/community.general/issues/224).
+- ufw - accept ``interface_in`` and ``interface_out`` as parameters.
+- zypper - Added ``allow_vendor_change`` and ``replacefiles`` zypper options (https://github.com/ansible-collections/community.general/issues/381)
+
+Breaking Changes / Porting Guide
+--------------------------------
+
+- The environment variable for the auth context for the oc.py connection plugin has been corrected (K8S_CONTEXT). It was using an initial lowercase k by mistake. (https://github.com/ansible-collections/community.general/pull/377).
+- bigpanda - the parameter ``message`` was renamed to ``deployment_message`` since ``message`` is used by Ansible Core engine internally.
+- cisco_spark - the module option ``message`` was renamed to ``msg``, as ``message`` is used internally in Ansible Core engine (https://github.com/ansible/ansible/issues/39295)
+- datadog - the parameter ``message`` was renamed to ``notification_message`` since ``message`` is used by Ansible Core engine internally.
+- docker_container - no longer passes information on non-anonymous volumes or binds as ``Volumes`` to the Docker daemon. This increases compatibility with the ``docker`` CLI program. Note that if you specify ``volumes: strict`` in ``comparisons``, this could cause existing containers created with docker_container from Ansible 2.9 or earlier to restart.
+- docker_container - support for port ranges was adjusted to be more compatible to the ``docker`` command line utility: a one-port container range combined with a multiple-port host range will no longer result in only the first host port be used, but the whole range being passed to Docker so that a free port in that range will be used.
+- hashi_vault lookup - now returns the latest version when using the KV v2 secrets engine. Previously, it returned all versions of the secret which required additional steps to extract and filter the desired version.
+
+Deprecated Features
+-------------------
+
+- airbrake_deployment - Add deprecation notice for ``token`` parameter and v2 api deploys. This feature will be removed in community.general 3.0.0.
+- clc_aa_policy - The ``wait`` option had no effect and will be removed in community.general 3.0.0.
+- clc_aa_policy - the ``wait`` parameter will be removed. It has always been ignored by the module.
+- docker_container - the ``trust_image_content`` option is now deprecated and will be removed in community.general 3.0.0. It has never been used by the module.
+- docker_container - the ``trust_image_content`` option will be removed. It has always been ignored by the module.
+- docker_container - the default of ``container_default_behavior`` will change from ``compatibility`` to ``no_defaults`` in community.general 3.0.0. Set the option to an explicit value to avoid a deprecation warning.
+- docker_container - the default value for ``network_mode`` will change in community.general 3.0.0, provided at least one network is specified and ``networks_cli_compatible`` is ``true``. See porting guide, module documentation or deprecation warning for more details.
+- docker_stack - Return values ``out`` and ``err`` have been deprecated and will be removed in community.general 3.0.0. Use ``stdout`` and ``stderr`` instead.
+- docker_stack - the return values ``err`` and ``out`` have been deprecated. Use ``stdout`` and ``stderr`` from now on instead.
+- helm - Put ``helm`` module to deprecated. New implementation is available in community.kubernetes collection.
+- redfish_config - Deprecate ``bios_attribute_name`` and ``bios_attribute_value`` in favor of new `bios_attributes`` option.
+- redfish_config - the ``bios_attribute_name`` and ``bios_attribute_value`` options will be removed. To maintain the existing behavior use the ``bios_attributes`` option instead.
+- redfish_config and redfish_command - the behavior to select the first System, Manager, or Chassis resource to modify when multiple are present will be removed. Use the new ``resource_id`` option to specify target resource to modify.
+- redfish_config, redfish_command - Behavior to modify the first System, Mananger, or Chassis resource when multiple are present is deprecated. Use the new ``resource_id`` option to specify target resource to modify.
+
+Removed Features (previously deprecated)
+----------------------------------------
+
+- core - remove support for ``check_invalid_arguments`` in ``UTMModule``.
+- pacman - Removed deprecated ``recurse`` option, use ``extra_args=--recursive`` instead
+
+Security Fixes
+--------------
+
+- **SECURITY** - CVE-2019-14904 - solaris_zone module accepts zone name and performs actions related to that. However, there is no user input validation done while performing actions. A malicious user could provide a crafted zone name which allows executing commands into the server manipulating the module behaviour. Adding user input validation as per Solaris Zone documentation fixes this issue.
+- **security issue** - Ansible: Splunk and Sumologic callback plugins leak sensitive data in logs (CVE-2019-14864)
+- ldap_attr, ldap_entry - The ``params`` option has been removed in Ansible-2.10 as it circumvents Ansible's option handling. Setting ``bind_pw`` with the ``params`` option was disallowed in Ansible-2.7, 2.8, and 2.9 as it was insecure. For information about this policy, see the discussion at: https://meetbot.fedoraproject.org/ansible-meeting/2017-09-28/ansible_dev_meeting.2017-09-28-15.00.log.html This fixes CVE-2020-1746
+
+Bugfixes
+--------
+
+- Convert MD5SUM to lowercase before comparison in maven_artifact module (https://github.com/ansible-collections/community.general/issues/186).
+- Fix GitLab modules authentication by handling `python-gitlab` library version >= 1.13.0 (https://github.com/ansible/ansible/issues/64770)
+- Fix SSL protocol references in the ``mqtt`` module to prevent failures on Python 2.6.
+- Fix the ``xml`` module to use ``list(elem)`` instead of ``elem.getchildren()`` since it is being removed in Python 3.9
+- Fix to return XML as a string even for python3 (https://github.com/ansible/ansible/pull/64032).
+- Fixes the url handling in lxd_container module that url cannot be specified in lxd environment created by snap.
+- Fixes the url handling in lxd_profile module that url cannot be specified in lxd environment created by snap.
+- Redact GitLab Project variables which might include sensetive information such as password, api_keys and other project related details.
+- Run command in absent state in atomic_image module.
+- While deleting gitlab user, name, email and password is no longer required ini gitlab_user module (https://github.com/ansible/ansible/issues/61921).
+- airbrake_deployment - Allow deploy notifications for Airbrake compatible v2 api (e.g. Errbit)
+- apt_rpm - fix ``package`` type from ``str`` to ``list`` to fix invoking with list of packages (https://github.com/ansible-collections/community.general/issues/143).
+- archive - make module compatible with older Ansible versions (https://github.com/ansible-collections/community.general/pull/306).
+- become - Fix various plugins that still used play_context to get the become password instead of through the plugin - https://github.com/ansible/ansible/issues/62367
+- cloudflare_dns - fix KeyError 'success' (https://github.com/ansible-collections/community.general/issues/236).
+- cronvar - only run ``get_bin_path()`` once
+- cronvar - use correct binary name (https://github.com/ansible/ansible/issues/63274)
+- cronvar - use get_bin_path utility to locate the default crontab executable instead of the hardcoded /usr/bin/crontab. (https://github.com/ansible/ansible/pull/59765)
+- cyberarkpassword - fix invalid attribute access (https://github.com/ansible/ansible/issues/66268)
+- datadog_monitor - Corrects ``_update_monitor`` to use ``notification_message`` insteade of deprecated ``message`` (https://github.com/ansible-collections/community.general/pull/389).
+- datadog_monitor - added missing ``log alert`` type to ``type`` choices (https://github.com/ansible-collections/community.general/issues/251).
+- dense callback - fix plugin access to its configuration variables and remove a warning message (https://github.com/ansible/ansible/issues/64628).
+- digital_ocean_droplet - Fix creation of DigitalOcean droplets using digital_ocean_droplet module (https://github.com/ansible/ansible/pull/61655)
+- docker connection plugin - do not prefix remote path if running on Windows containers.
+- docker_compose - fix issue where docker deprecation warning results in ansible erroneously reporting a failure
+- docker_container - fix idempotency for IP addresses for networks. The old implementation checked the effective IP addresses assigned by the Docker daemon, and not the specified ones. This causes idempotency issues for containers which are not running, since they have no effective IP addresses assigned.
+- docker_container - fix network idempotence comparison error.
+- docker_container - improve error behavior when parsing port ranges fails.
+- docker_container - make sure that when image is missing, check mode indicates a change (image will be pulled).
+- docker_container - passing ``test: [NONE]`` now actually disables the image's healthcheck, as documented.
+- docker_container - wait for removal of container if docker API returns early (https://github.com/ansible/ansible/issues/65811).
+- docker_image - fix validation of build options.
+- docker_image - improve file handling when loading images from disk.
+- docker_image - make sure that deprecated options also emit proper deprecation warnings next to warnings which indicate how to replace them.
+- docker_login - Use ``with`` statement when accessing files, to prevent that invalid JSON output is produced.
+- docker_login - correct broken fix for https://github.com/ansible/ansible/pull/60381 which crashes for Python 3.
+- docker_login - fix error handling when ``username`` or ``password`` is not specified when ``state`` is ``present``.
+- docker_login - make sure that ``~/.docker/config.json`` is created with permissions ``0600``.
+- docker_machine - fallback to ip subcommand output if IPAddress is missing (https://github.com/ansible-collections/community.general/issues/412).
+- docker_network - fix idempotence comparison error.
+- docker_network - fix idempotency for multiple IPAM configs of the same IP version (https://github.com/ansible/ansible/issues/65815).
+- docker_network - validate IPAM config subnet CIDR notation on module setup and not during idempotence checking.
+- docker_node_info - improve error handling when service inspection fails, for example because node name being ambiguous (https://github.com/ansible/ansible/issues/63353, PR https://github.com/ansible/ansible/pull/63418).
+- docker_swarm_service - ``source`` must no longer be specified for ``tmpfs`` mounts.
+- docker_swarm_service - fix task always reporting as changed when using ``healthcheck.start_period``.
+- docker_swarm_service - passing ``test: [NONE]`` now actually disables the image's healthcheck, as documented.
+- firewalld - enable the firewalld module to function offline with firewalld version 0.7.0 and newer (https://github.com/ansible/ansible/issues/63254)
+- flatpak and flatpak_remote - fix command line construction to build commands as lists instead of strings.
+- gcp_storage_file lookup - die gracefully when the ``google.cloud`` collection is not installed, or changed in an incompatible way.
+- github_deploy_key - added support for pagination
+- gitlab_user - Fix adding ssh key to new/changed user and adding group membership for new/changed user
+- hashi_vault - Fix KV v2 lookup to always return latest version
+- hashi_vault - Handle equal sign in key=value (https://github.com/ansible/ansible/issues/55658).
+- hashi_vault - error messages are now user friendly and don't contain the secret name ( https://github.com/ansible-collections/community.general/issues/54 )
+- hashi_vault - if used via ``with_hashi_vault`` and a list of n secrets to retrieve, only the first one would be retrieved and returned n times.
+- hashi_vault - when a non-token authentication method like ldap or userpass failed, but a valid token was loaded anyway (via env or token file), the token was used to attempt authentication, hiding the failure of the requested auth method.
+- homebrew - fix Homebrew module's some functions ignored check_mode option (https://github.com/ansible/ansible/pull/65387).
+- influxdb_user - Don't grant admin privilege in check mode
+- ipa modules - fix error when IPA_HOST is empty and fallback on DNS (https://github.com/ansible-collections/community.general/pull/241)
+- java_keystore - make module compatible with older Ansible versions (https://github.com/ansible-collections/community.general/pull/306).
+- jira - printing full error message from jira server (https://github.com/ansible-collections/community.general/pull/22).
+- jira - transition issue not working (https://github.com/ansible-collections/community.general/issues/109).
+- linode inventory plugin - fix parsing of access_token (https://github.com/ansible/ansible/issues/66874)
+- manageiq_provider - fix serialization error when running on python3 environment.
+- maven_artifact - make module compatible with older Ansible versions (https://github.com/ansible-collections/community.general/pull/306).
+- mysql - dont mask ``mysql_connect`` function errors from modules (https://github.com/ansible/ansible/issues/64560).
+- mysql_db - fix Broken pipe error appearance when state is import and the target file is compressed (https://github.com/ansible/ansible/issues/20196).
+- mysql_db - fix bug in the ``db_import`` function introduced by https://github.com/ansible/ansible/pull/56721 (https://github.com/ansible/ansible/issues/65351).
+- mysql_info - add parameter for __collect to get only what are wanted (https://github.com/ansible-collections/community.general/pull/136).
+- mysql_replication - allow to pass empty values to parameters (https://github.com/ansible/ansible/issues/23976).
+- mysql_user - Fix idempotence when long grant lists are used (https://github.com/ansible/ansible/issues/68044)
+- mysql_user - Remove false positive ``no_log`` warning for ``update_password`` option
+- mysql_user - add ``INVOKE LAMBDA`` privilege support (https://github.com/ansible-collections/community.general/issues/283).
+- mysql_user - fix ``host_all`` arguments conversion string formatting error (https://github.com/ansible/ansible/issues/29644).
+- mysql_user - fix support privileges with underscore (https://github.com/ansible/ansible/issues/66974).
+- mysql_user - fix the error No database selected (https://github.com/ansible/ansible/issues/68070).
+- mysql_user - make sure current_pass_hash is a string before using it in comparison (https://github.com/ansible/ansible/issues/60567).
+- mysql_variable - fix the module doesn't support variables name with dot (https://github.com/ansible/ansible/issues/54239).
+- nmcli - typecast parameters to string as required (https://github.com/ansible/ansible/issues/59095).
+- nsupdate - Do not try fixing non-existing TXT values (https://github.com/ansible/ansible/issues/63364)
+- nsupdate - Fix zone name lookup of internal/private zones (https://github.com/ansible/ansible/issues/62052)
+- one_vm - improve file handling by using a context manager.
+- ovirt - don't ignore ``instance_cpus`` parameter
+- pacman - Fix pacman output parsing on localized environment. (https://github.com/ansible/ansible/issues/65237)
+- pacman - fix module crash with ``IndexError: list index out of range`` (https://github.com/ansible/ansible/issues/63077)
+- pamd - Bugfix for attribute error when removing the first or last line
+- parted - added 'undefined' align option to support parted versions < 2.1 (https://github.com/ansible-collections/community.general/pull/405).
+- parted - consider current partition state even in check mode (https://github.com/ansible-collections/community.general/issues/183).
+- passwordstore lookup - Honor equal sign in userpass
+- pmrun plugin - The success_command string was no longer quoted. This caused unusual use-cases like ``become_flags=su - root -c`` to fail.
+- postgres - use query params with cursor.execute in module_utils.postgres.PgMembership class (https://github.com/ansible/ansible/pull/65164).
+- postgres.py - add a new keyword argument ``query_params`` (https://github.com/ansible/ansible/pull/64661).
+- postgres_user - Remove false positive ``no_log`` warning for ``no_password_changes`` option
+- postgresql_db - Removed exception for 'LibraryError' (https://github.com/ansible/ansible/issues/65223).
+- postgresql_db - allow to pass users names which contain dots (https://github.com/ansible/ansible/issues/63204).
+- postgresql_idx.py - use the ``query_params`` arg of exec_sql function (https://github.com/ansible/ansible/pull/64661).
+- postgresql_lang - use query params with cursor.execute (https://github.com/ansible/ansible/pull/65093).
+- postgresql_membership - make the ``groups`` and ``target_roles`` parameters required (https://github.com/ansible/ansible/pull/67046).
+- postgresql_membership - remove unused import of exec_sql function (https://github.com/ansible-collections/community.general/pull/178).
+- postgresql_owner - use query_params with cursor object (https://github.com/ansible/ansible/pull/65310).
+- postgresql_privs - fix sorting lists with None elements for python3 (https://github.com/ansible/ansible/issues/65761).
+- postgresql_privs - sort results before comparing so that the values are compared and not the result of ``.sort()`` (https://github.com/ansible/ansible/pull/65125)
+- postgresql_privs.py - fix reports as changed behavior of module when using ``type=default_privs`` (https://github.com/ansible/ansible/issues/64371).
+- postgresql_publication - fix typo in module.warn method name (https://github.com/ansible/ansible/issues/64582).
+- postgresql_publication - use query params arg with cursor object (https://github.com/ansible/ansible/issues/65404).
+- postgresql_query - improve file handling by using a context manager.
+- postgresql_query - the module doesn't support non-ASCII characters in SQL files with Python3 (https://github.com/ansible/ansible/issues/65367).
+- postgresql_schema - use query parameters with cursor object (https://github.com/ansible/ansible/pull/65679).
+- postgresql_sequence - use query parameters with cursor object (https://github.com/ansible/ansible/pull/65787).
+- postgresql_set - fix converting value to uppercase (https://github.com/ansible/ansible/issues/67377).
+- postgresql_set - use query parameters with cursor object (https://github.com/ansible/ansible/pull/65791).
+- postgresql_slot - make the ``name`` parameter required (https://github.com/ansible/ansible/pull/67046).
+- postgresql_slot - use query parameters with cursor object (https://github.com/ansible/ansible/pull/65791).
+- postgresql_subscription - fix typo in module.warn method name (https://github.com/ansible/ansible/pull/64583).
+- postgresql_subscription - use query parameters with cursor object (https://github.com/ansible/ansible/pull/65791).
+- postgresql_table - use query parameters with cursor object (https://github.com/ansible/ansible/pull/65862).
+- postgresql_tablespace - make the ``tablespace`` parameter required (https://github.com/ansible/ansible/pull/67046).
+- postgresql_tablespace - use query parameters with cursor object (https://github.com/ansible/ansible/pull/65862).
+- postgresql_user - allow to pass user name which contains dots (https://github.com/ansible/ansible/issues/63204).
+- postgresql_user - use query parameters with cursor object (https://github.com/ansible/ansible/pull/65862).
+- proxmox - fix version detection of proxmox 6 and up (Fixes https://github.com/ansible/ansible/issues/59164)
+- proxysql - fixed mysql dictcursor
+- pulp_repo - the ``client_cert`` and ``client_key`` options were used for both requests to the Pulp instance and for the repo to sync with, resulting in errors when they were used. Use the new options ``feed_client_cert`` and ``feed_client_key`` for client certificates that should only be used for repo synchronisation, and not for communication with the Pulp instance. (https://github.com/ansible/ansible/issues/59513)
+- puppet - fix command line construction for check mode and ``manifest:``
+- pure - fix incorrect user_string setting in module_utils file (https://github.com/ansible/ansible/pull/66914)
+- redfish_command - fix EnableAccount if Enabled property is not present in Account resource (https://github.com/ansible/ansible/issues/59822)
+- redfish_command - fix error when deleting a disabled Redfish account (https://github.com/ansible/ansible/issues/64684)
+- redfish_command - fix power ResetType mapping logic (https://github.com/ansible/ansible/issues/59804)
+- redfish_config - fix support for boolean bios attrs (https://github.com/ansible/ansible/pull/68251)
+- redfish_facts - fix KeyError exceptions in GetLogs (https://github.com/ansible/ansible/issues/59797)
+- redhat_subscription - do not set the default quantity to ``1`` when no quantity is provided (https://github.com/ansible/ansible/issues/66478)
+- replace use of deprecated functions from ``ansible.module_utils.basic``.
+- rshm_repository - reduce execution time when changed is False (https://github.com/ansible-collections/community.general/pull/458).
+- runas - Fix the ``runas`` ``become_pass`` variable fallback from ``ansible_runas_runas`` to ``ansible_runas_pass``
+- scaleway - Fix bug causing KeyError exception on JSON http requests. (https://github.com/ansible-collections/community.general/pull/444)
+- scaleway: use jsonify unmarshaller only for application/json requests to avoid breaking the multiline configuration with requests in text/plain (https://github.com/ansible/ansible/issues/65036)
+- scaleway_compute - fix transition handling that could cause errors when removing a node (https://github.com/ansible-collections/community.general/pull/444).
+- scaleway_compute(check_image_id): use get image instead loop on first page of images results
+- sesu - make use of the prompt specified in the code
+- slack - Fix ``thread_id`` data type
+- slackpkg - fix matching some special cases in package names (https://github.com/ansible-collections/community.general/pull/505).
+- slackpkg - fix name matching in package installation (https://github.com/ansible-collections/community.general/issues/450).
+- spacewalk inventory - improve file handling by using a context manager.
+- syslog_json callback - fix plugin exception when running (https://github.com/ansible-collections/community.general/issues/407).
+- syslogger callback plugin - remove check mode support since it did nothing anyway
+- terraform - adding support for absolute paths additionally to the relative path within project_path (https://github.com/ansible/ansible/issues/58578)
+- terraform - reset out and err before plan creation (https://github.com/ansible/ansible/issues/64369)
+- terraform module - fixes usage for providers not supporting workspaces
+- yarn - Return correct values when running yarn in check mode (https://github.com/ansible-collections/community.general/pull/153).
+- yarn - handle no version when installing module by name (https://github.com/ansible/ansible/issues/55097)
+- zfs_delegate_admin - add missing choices diff/hold/release to the permissions parameter (https://github.com/ansible-collections/community.general/pull/278)
+
+New Plugins
+-----------
+
+Callback
+~~~~~~~~
+
+- diy - Customize the output
+
+Lookup
+~~~~~~
+
+- etcd3 - Get key values from etcd3 server
+- lmdb_kv - fetch data from LMDB
+
+New Modules
+-----------
+
+Cloud
+~~~~~
+
+huawei
+^^^^^^
+
+- hwc_ecs_instance - Creates a resource of Ecs/Instance in Huawei Cloud
+- hwc_evs_disk - Creates a resource of Evs/Disk in Huawei Cloud
+- hwc_vpc_eip - Creates a resource of Vpc/EIP in Huawei Cloud
+- hwc_vpc_peering_connect - Creates a resource of Vpc/PeeringConnect in Huawei Cloud
+- hwc_vpc_port - Creates a resource of Vpc/Port in Huawei Cloud
+- hwc_vpc_private_ip - Creates a resource of Vpc/PrivateIP in Huawei Cloud
+- hwc_vpc_route - Creates a resource of Vpc/Route in Huawei Cloud
+- hwc_vpc_security_group - Creates a resource of Vpc/SecurityGroup in Huawei Cloud
+- hwc_vpc_security_group_rule - Creates a resource of Vpc/SecurityGroupRule in Huawei Cloud
+- hwc_vpc_subnet - Creates a resource of Vpc/Subnet in Huawei Cloud
+
+ovh
+^^^
+
+- ovh_monthly_billing - Manage OVH monthly billing
+
+packet
+^^^^^^
+
+- packet_ip_subnet - Assign IP subnet to a bare metal server.
+- packet_project - Create/delete a project in Packet host.
+- packet_volume - Create/delete a volume in Packet host.
+- packet_volume_attachment - Attach/detach a volume to a device in the Packet host.
+
+Database
+~~~~~~~~
+
+misc
+^^^^
+
+- redis_info - Gather information about Redis servers
+
+mysql
+^^^^^
+
+- mysql_query - Run MySQL queries
+
+postgresql
+^^^^^^^^^^
+
+- postgresql_subscription - Add, update, or remove PostgreSQL subscription
+- postgresql_user_obj_stat_info - Gather statistics about PostgreSQL user objects
+
+Files
+~~~~~
+
+- iso_create - Generate ISO file with specified files or folders
+
+Net Tools
+~~~~~~~~~
+
+- hetzner_firewall - Manage Hetzner's dedicated server firewall
+- hetzner_firewall_info - Manage Hetzner's dedicated server firewall
+- ipwcli_dns - Manage DNS Records for Ericsson IPWorks via ipwcli
+
+ldap
+^^^^
+
+- ldap_attrs - Add or remove multiple LDAP attribute values
+- ldap_search - Search for entries in a LDAP server
+
+Packaging
+~~~~~~~~~
+
+os
+^^
+
+- mas - Manage Mac App Store applications with mas-cli
+
+System
+~~~~~~
+
+- dpkg_divert - Override a debian package's version of a file
+- lbu - Local Backup Utility for Alpine Linux
diff --git a/collections-debian-merged/ansible_collections/community/general/COPYING b/collections-debian-merged/ansible_collections/community/general/COPYING
new file mode 100644
index 00000000..10926e87
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/COPYING
@@ -0,0 +1,675 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<http://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<http://www.gnu.org/philosophy/why-not-lgpl.html>.
+
diff --git a/collections-debian-merged/ansible_collections/community/general/FILES.json b/collections-debian-merged/ansible_collections/community/general/FILES.json
new file mode 100644
index 00000000..6aaf295f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/FILES.json
@@ -0,0 +1,28117 @@
+{
+ "files": [
+ {
+ "name": ".",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/inventory",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/inventory/gitlab_runners.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b6b138214e7ca210dd078238c535893c7fdec06792160bad6f6d3b588858b4c8",
+ "format": 1
+ },
+ {
+ "name": "plugins/inventory/proxmox.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0ea953832abf39ae0c3042b9276177a62439ec52829fc532eaa97bdc29fc59de",
+ "format": 1
+ },
+ {
+ "name": "plugins/inventory/virtualbox.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0aa2159a3476d71fe0f658ea6ced96687675904f9b3b0871d0b910448b7b4f4f",
+ "format": 1
+ },
+ {
+ "name": "plugins/inventory/cobbler.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5b0d63e33aed4f856bea0952d0857937e972b832c843b2e0d7c97b4ef597ca70",
+ "format": 1
+ },
+ {
+ "name": "plugins/inventory/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/inventory/stackpath_compute.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "55c0ccc7b64c91c9de407f0a32c606f5e51cf1e831c548b1218e376cd611a438",
+ "format": 1
+ },
+ {
+ "name": "plugins/inventory/docker_swarm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e2f99e7f139fd044a8e07377b8087a3ce758f8bf5c9efffd879099014fe54014",
+ "format": 1
+ },
+ {
+ "name": "plugins/inventory/online.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "31e8358d5ea492b4147a318e99363b3fcdaf844f8af0df9f5c657c2637855e45",
+ "format": 1
+ },
+ {
+ "name": "plugins/inventory/docker_machine.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0a1b6115ffb36ed489ddcd1589bd088c43378f402bb041090a6a8d2fc0cfd599",
+ "format": 1
+ },
+ {
+ "name": "plugins/inventory/linode.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4017772987b6d4abf13f87041993936b780d86ee3400b56c289a53f7c1853756",
+ "format": 1
+ },
+ {
+ "name": "plugins/inventory/nmap.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a73b168307d0e551556cf4022860c9b205cadcf7f1a255f003b4762040e8f2cf",
+ "format": 1
+ },
+ {
+ "name": "plugins/inventory/kubevirt.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dfa761f7027f71ae6b570855110cb59662b80f2d27a3c6a9544fb374d38e237f",
+ "format": 1
+ },
+ {
+ "name": "plugins/inventory/scaleway.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "13f60ad5fa0f9bb07a4a30c34f233260e41f27bccb431b7d9baeec6b7e33bbf5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/snap.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "087261c16e07423ea89a74233e89b82ab56642ff4abce7ca2af3d2497d79f319",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/redfish_config.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "971d6e8bafddd6d8200286c6459f2aaa24871cbafcf94443f3b03fb7bda690d6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/manageiq_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2ee1a254c571f79836711b9bc3b7bb23d9432e10d7a9575c70a231c38e46a3f8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/jenkins_plugin.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "47313cb3f96c0166a102bf568688f4fad330764f6faaaff6083ae8c396eaf133",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_cdb_user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b1f6b917833d79430e5e6e5569879333d8d6bff53ef76d2973476dfcc4344f88",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/solaris_zone.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "60a77ff20a8d31547321204ecb03e5962a99cb34773e9bb46cf25ecfd0ef52d8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/osx_defaults.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "de3d35c9f9006b236e76c0cb92883305598f3dce2f29b5b840104851ac5dcab3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/github_issue.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9e6229cee4684cff6c1ac905fb9a0fe91692df8de87e325073eece89992fc5ad",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/onepassword_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "44635d0ae6630b1b265051edb8bde038f149f998f6b20b615b9397141af050fe",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/kubevirt_cdi_upload.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b13338bf0646f8a4d5ba8c0a2c5285e816f72afa3fef6dc182e96fad67a85f6b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/udm_share.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d15779f93dcbf8a888b867dc9208274ea7a6d674d831fe90e9a02d97c9e58eaf",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/manageiq_policies.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5a35417d72c064aa7e32028d3ff9b6cbf6476e0cb449b2c0baaa3d962309fe61",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gce_img.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6b7e86d857e3c5f6deaa940201f74c0ce54aca56d4ecacd7274162ecfbe43fc1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneview_datacenter_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f499285e7d5a63fdbd4562a5a4a3c38b497fda82c326e629f0dfd057c984eb4e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/syspatch.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e5efdeccf15b244b206f77af0fa288675b32a3dd56211d4a9e2a178cb1a28fec",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/utm_proxy_auth_profile.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a5613359e853bdaf3a258d452872f745607acbba01831f36bb6c75599ec4f4ec",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pkgin.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f0d40894a700318deb4ee2867f9af22a724d3f42d4fbf0453f59f6b068b0aebe",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/postgresql_membership.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4ca88131820b8919066dd0cc1beb52e45a0364af4f45d1688739a0ce5b620ade",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/nios_member.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ce749a5da1ee92bc2344fcf32193c9fa28323a67978e065b2cf78e1cab80813c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_files.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c14aa0a51cfd7bb663dc9f6457fd93e880142632f00b6816bedc89620b74490e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/clc_firewall_policy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4b20afd1a3778dcb1fba734e410a6f03d5d4b617a6d9b949a7d3af71f65155ba",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/sf_check_connections.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "39a754f4072b1053ccfd043068360a733d79a13db946383863f9521500289848",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/svr4pkg.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "22af57c13a7f7e7b0cdde00e50b5bc04a50886bc5ffbaf43aa103267f90675e5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packet_project.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "61a7d7192cd37fcb0302f16c35c33505d01aa87e0d861bc9e3fe8dd33bd46ce9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/clc_publicip.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c9e8f18cea2224e5f6bd8b24e38122d3c72368f57f173231a2f07c8aa9df383d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/jenkins_job.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "275e2260b89d3a3341ca3c4c8460f327186d2f30d65f972774462469815132a5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gce_eip.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e2fcdb1ac780fcdd7fe73e8024fe5324648531671592912a823c7d6d510e9703",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/jboss.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "482b60ea9d9b4e35b5e33abf89121294b4c015ab21144c582a980d4ae702aae7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gce_labels.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fa39632b3f2777e38cb46539eaf38e7fdfd537f9418eb9b6f78949e42ebb6818",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/shutdown.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "02c339648349f7eaa4fc7b64c85ee8c40cfc98cda4c9b97879658efaf889f552",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_template_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "891842609bfe672bb3cbefdc3b09a412d6aa78821845437bd431bda0a3fc91b0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gc_storage.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dad8779d8743fef5767715b490e7d6375d0c9102ed30aa030a17413d70893852",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pkg5_publisher.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1197f2086a98fe014717bdf3396a4ab17ce600b9867897b9c9a5464b34f626b6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/nomad_job_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4a2da24c881d689b0440f267a64e1181331aec5dcdac65559689a02be399d2f9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/postgresql_user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "07687658daf8ad9bb7cdbe8fb98d69dab13f52119ef8524273678b524561bfe5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/influxdb_query.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "af21bfda2c83803ac863a868857f30fc34dcf342b4ab163cd5f1c85428cd0261",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "935d481c82c7bbb29c246d2b90e6c802bf70f7c219e1af184c042ccdcaf55ec9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_network.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c2ad85905b2f9caf3e72f2efbace0c4e166452e06cba307815ab501fd47fa5b9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/clc_alert_policy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9f056c9b18ce84d1719a0cc6c0047fff4b5bf9088fc009553455b116ddf24661",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/manageiq_user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d289fd3d800ec3135cb36400f7edfb6741746606c5839ef8acdd80b30d968457",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pubnub_blocks.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2284d4f83fb20ff6fa709676901a45b7dc98f8299c3081b501f4bb32a88e0b69",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pushover.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8d4b6f7686646e0d44a7ad63811b8c1f69927317c2ce8cea4ff855027355c219",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneview_san_manager_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "24a321c453738aeb66c5be2c46c2869214658e81ac48dd0f5952ffe9fcda7b26",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/smartos_image_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "08e06e04872a2d1f8a82d47edecac93c32ff505440feb75f73903b40d14e83fd",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/utm_dns_host.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "724879648016b6bb2b4406601e7b3403ff7e625236278afcc4d4faa46535424e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pingdom.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "19b5785687a9151584a01ce49b9321d1cb4f4fb9a105e8c53a6e10654b1a38ab",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/aix_lvol.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "feb995da59928c227261390532e549999f7a27594f09744529878c91b72e7bea",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/proxmox_template.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "80603033f0ef0ade5b9ddda04563580985f38713b3c4efc5d67b156b3b669043",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/sf_volume_manager.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c2217e730d1ee09df5d2acbd94654d7cffaef4a45a5f93b5abd854ffa6d85454",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gcp_url_map.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "852c27c833b798760e09e8a49f6c599ec1397212a1934dbeaac2c4f8a360ebd1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/jenkins_script.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5abc31472144f31496d86c3a032244598b96397b2b939a8a7d047f4fdbd46c95",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/clc_aa_policy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a0d4f31be2c9d9d02a8ca10a1d0424706697fb1b49ad9ffbdb9f3848e260da7f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_ip.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e32b1d1f561baf94378f300d1606bf830ca0f383cb8e24341fe68eaf2c4f8ef8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gcp_forwarding_rule.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "74aec5915e1a3d9ea3a857af1a92ab4208d374c9654df46bf0da8fe0b1c8f0d0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/idrac_redfish_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "97b14cbe85c08172e43254096ee88f1aee85af6636d142ff13dc5a8aa449b9e1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/webfaction_mailbox.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "892d14fe4016b7b32b0be35d98ee1a7bddc6e295d1892e49d347fb715ec4e502",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_queue.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9806604cbd4c1ece98ddbff31c2ba875d30b2d8e1eb698f23be23f258f93ecdd",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneview_san_manager.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "58359cc106190240da426e0506fa1e32b479225a5671165698d5f766c2f46dff",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ipwcli_dns.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0ae8dd037de462f927965100a87f91c50e6928ea6cd612ac5f3e8de63f821d4e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ini_file.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dacbc9fb37fe2ea97c47896a53cffe5cd57207a6445b07816323ece0ca3c86f6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/say.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9427eced754de74bbb015098444c4cee334620980bcf62c4c6f7e687475515e6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/mssql_db.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "10836be6d1f0c2d46a5ad956f66a98f0ee983de1660c462d3220d377a14ce6c2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/stacki_host.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "933c1fc1d7ed244a7fc48ec213d70ba499796e940d9aa65abded1eb2c41cbd0d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_clb.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "433f51286db01d386666c5d4b8278b6e43dd7a7ce59ba8fdce3dc9e0c86ffa5e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pagerduty_user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "afe68c52a1fee0a441f79308f3e3f8fb296d9e5193bf74cb10b7a611e2a90c5e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/nios_mx_record.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0306b116c74a8240153cba553aa1d4d0644bed1b7fc34f63ed906fb9d1b45d23",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pids.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "38e7ab5ae14974177fb9a3b90658000151492dd87a5d77bd322f5261700fecdd",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packet_device.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b6ffb5c28d5dfe503e8144100dbaed3a7a09b42092d1dd77c7b32397c2658a47",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_api_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e123a2850ee96876b7828db14540f577df4a83caefecfffa6bbc4708e01194d0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/one_vm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "66358d51e24653a1580b3eb78cfa4042476a06d642a9b50cca085a5a53eb986b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/clc_blueprint_package.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "106b76b182d10601925101a24eacbc9ece41d5fe5dda77082785645d3fb60324",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/sensu_subscription.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cce1d65678626ba7bd2cf12eaeb7b7970c8b25526579d928a99a9df5ab507c68",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/homebrew_cask.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "41e76feaf5fbe525246eeb7b159bd36f7a4bede9bbecb4df424238511f34467d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/keycloak_clienttemplate.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "74e9aa2f6e94face0e0f5119e6a42520da9b079d94cdb5eed5c8a42df95109aa",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/postgresql_lang.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2e86a2db785415ea4e830953aa26a47dd3d75b52e161847a191f39bc931d3cab",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/honeybadger_deployment.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4134b1c7048a20ccb625634b2ba587532a5ed96feaed4d9fb57efb254916cbeb",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/git_config.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0808d6f8718ac41432121b6a35cce4adcb97e8279cdbb38cd5a72a1b7c181945",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cisco_spark.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "de0818295d2b8a3826a438a6da40d87cf22d612bb2f5f48805b1d47379e862d2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/macports.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "92aa83b0e76d3ed6bb3267be4078dc55ae81e4869347dbcd3282f0218166be88",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/librato_annotation.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b0eac8dd17dee78a7cd7bb262d3f479858ce5d7db0ff9551522a5827a5336a99",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_nic_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "203ffbd72b0503eb24938705111b23f57d21f50639b825bb8d414a2a27534daa",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/hwc_network_vpc.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ad8ab2a633dea8a8afe36d610bd108ec2d8455632452935ae7d32b49b9f9cb4d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/hwc_vpc_private_ip.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "778aea0f9e96d24c7c51afdf7eb50bdcda5690d2ca1f10511ead89a47c30a116",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/maven_artifact.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bc2d6b5bf24d8b5da231615b356231b400cb983bcc0509ff54848d5240b90b41",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/twilio.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "253715887b093a1818ed5a08945889c394b6a303dde84069807f76a3e3158e01",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/clc_server.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f7f52e3eb54993ec137853cb0d7b25abed0f9f9b8f10227a8b0e720cd1a70cf8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/bundler.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8d47853b115d001bc49516bddc5cf547a2d85f7f5359501f03075f1a0bdfee9b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneview_fcoe_network_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f48af12c1a23fcc09493aa9c93deae4487db65d1332d1ec31c755e93c2db5cba",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/monitoring",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/monitoring/pingdom.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "19b5785687a9151584a01ce49b9321d1cb4f4fb9a105e8c53a6e10654b1a38ab",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/monitoring/pagerduty_user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "afe68c52a1fee0a441f79308f3e3f8fb296d9e5193bf74cb10b7a611e2a90c5e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/monitoring/honeybadger_deployment.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4134b1c7048a20ccb625634b2ba587532a5ed96feaed4d9fb57efb254916cbeb",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/monitoring/librato_annotation.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b0eac8dd17dee78a7cd7bb262d3f479858ce5d7db0ff9551522a5827a5336a99",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/monitoring/spectrum_device.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "883564d265cd219779f52beb177c1eee686445277aec016a0000a9734bb3f426",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/monitoring/statusio_maintenance.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8178a94a79db920b5af21fba11592ab25da15a96220ce52e86365fe7dee19e63",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/monitoring/icinga2_feature.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "770edfacd0187f36c9bc94fc88df9fbe51dc29ae1dab5065dbcbd0b0043a089d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/monitoring/sensu",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/monitoring/sensu/sensu_subscription.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cce1d65678626ba7bd2cf12eaeb7b7970c8b25526579d928a99a9df5ab507c68",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/monitoring/sensu/sensu_client.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2385374a7e0d12d2aa7f02db22963ae083a09abcac4c4820ad98e6ca7c4a56fd",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/monitoring/sensu/sensu_check.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1901ef19fc3b55c9ca42f7969767a8afea8b3155d36290cba7b5e05e06f15842",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/monitoring/sensu/sensu_silence.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a66559942fcc141b0b21f9f617ef14dde9dfdb22ebe706c0d992a4816fb62e86",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/monitoring/sensu/sensu_handler.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c0771612a4d78971ea1f68f1d997b277e2a58f4e72c5a57e448f392d7434c4ba",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/monitoring/circonus_annotation.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cffc43f0116fb4a72e73f4e1ba75bd9e5e3353b5cfe9f9fea6d65e0d2108ea43",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/monitoring/uptimerobot.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d78b9b569cf508598fd49168c050fbe2164df8328b33792093368803a1212fae",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/monitoring/logstash_plugin.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b2eccfdcd2f11fa00d32ae57bfb174adf0801d03c861e8e12d4de9797451d567",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/monitoring/rollbar_deployment.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9485e0f35bb362d919fbdd58cc0e123ef8ed89751aefb801b7046557dc693f03",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/monitoring/datadog",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/monitoring/datadog/datadog_monitor.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f093f0b36de8fd2553715c66f6eacf27012ae106610f8bf8d919d5fe4ba09ab7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/monitoring/datadog/datadog_event.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ab82e356848f0da5156c4b5a35f2df7d057f19e321c352fce363fe855d603c48",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/monitoring/pagerduty_alert.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "90ccb28e4d767232fb186e458d0dea0512e5c5e46b7adecc8266dafae682c8d3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/monitoring/pagerduty.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "67c1f96200b03fdb1c71c248b86d2fa67a0f118e1fd4c6817ff323c381595848",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/monitoring/pagerduty_change.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7f8b9d10f9edd7c2a7c896a660f920faa975d680ed799eb738ec7277205e748a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/monitoring/monit.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e699bafd6dfc54d60df5107e88405a00b89991c1572aa16e60817aaf0ece5af1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/monitoring/logentries.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1092b7f7ab43dfab3f4a3cefafacf3ca737de9cad867148176f8fc1770e39cf0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/monitoring/icinga2_host.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6e58673f163b94ba752882ed2bfc5616b53fd81f9a431bd7aae47d94e3a3dced",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/monitoring/stackdriver.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e469374e370c83bbff5d8d2829b24a09488c26c769853f1313ba66bbe7722e48",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/monitoring/nagios.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8f3d329e518de7d3efb7cc6b8d96dd17f420a22134f61012b605e579dd365a7e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/monitoring/bigpanda.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0c521a6e0cd8df2272f2314deb990cc3bfeea4761afe8c47570135db0d32f41a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/monitoring/airbrake_deployment.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "674fece1637f868922c689743bc77a90fb2fb024bb7695329f9826675650ba4b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/monitoring/newrelic_deployment.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5bab501cf9754d7a6c46ae2977fec718592d45efae4d4cd5a29652e6f76bf33d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rhn_register.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fc3a0b888781830b3d41fd5aa05171dbcca6f3afe30278645c9e27ebe9815185",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/spectrum_device.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "883564d265cd219779f52beb177c1eee686445277aec016a0000a9734bb3f426",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/archive.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "55f3f1172d7539224975575bffdb1d7d10c7999d51b3d21e006d96351374df63",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/openwrt_init.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7da2deb1d4c5d11a585533f69c986c2cdb525265e3207a619791b61b7d58b685",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ejabberd_user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3ee35c9c2becfee224167f8eaa84b3fb3629de1d553ff7a96d22a208e5b06c8d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/memset_memstore_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ed49160abf2ad2493589d1cb78d836e8f46aea5b6747cdccf7d6064b09a01303",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/statusio_maintenance.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8178a94a79db920b5af21fba11592ab25da15a96220ce52e86365fe7dee19e63",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ipa_role.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5a859c3fa87e6e45e38b8f8f7a5126b462bbb25097084dc061e5459e25e4ba4a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_ontap_gather_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "be1184402d1ba8eb209fb7c376d7f47b42e2d2126d55ba679b52a08559c40088",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/java_cert.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b1ace748ca75f0b5ce0cedde55456365b4529a00d3ba63122ab1db5f225aca90",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/python_requirements_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "28282007ea020c0028150e66cd59f4fb1606f65b274e4d969d0f5ccb10b03dd2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/postgresql_pg_hba.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4d803927f52d842f23e3e0c0e9d8c15a2722d87f4c89e635904ef62f4ce8bcf6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/icinga2_feature.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "770edfacd0187f36c9bc94fc88df9fbe51dc29ae1dab5065dbcbd0b0043a089d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ali_instance_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aab7d64776371adab53aeaa905e756a524f6858be5490b53754f45405a05e7b3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/sf_volume_access_group_manager.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0708d647b51db83ccdd6c82746b4a3c107cf7a51b51f0ebaa36620e2768e110e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/language",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/language/maven_artifact.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bc2d6b5bf24d8b5da231615b356231b400cb983bcc0509ff54848d5240b90b41",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/language/bundler.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8d47853b115d001bc49516bddc5cf547a2d85f7f5359501f03075f1a0bdfee9b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/language/cpanm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cf25f0128d85aab7098e8655e20a823f5a9c1ca32247f91c3a0507d27856082a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/language/pear.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "be89456445044d37e1fe93dbe469dcb0ebab59a0a364cabcb7cf38471b7e1d1e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/language/pip_package_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "32403aae617804c7f05250e88c45e39d0eece5e2b1047f56883248b427c958d2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/language/easy_install.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a11e3e151595b9b729431aa2a4be23edd5d228870b3876cf95160d4552e2ee14",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/language/npm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0e1604caf4f1af452f860af230ff8686c544736ca800599510f32113d935f786",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/language/bower.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1469648267092280b084c97ff84b89cd29656ae25f5c12b23d6a34d6bd21f214",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/language/yarn.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ddf377e61b98568900d850a9663cc7110712986fd652188e7d0ec5f7b6fc4ff2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/language/gem.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b84ef579da05d86c4e189212e879d52632af6a136e9ce2dc38c518f3ac470ff7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/language/composer.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e847e7492cb58364d8571934538d9cc4f3ea8acf746070bd8f1687ca4e83915f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/os",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/os/snap.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "087261c16e07423ea89a74233e89b82ab56642ff4abce7ca2af3d2497d79f319",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/os/pkgin.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f0d40894a700318deb4ee2867f9af22a724d3f42d4fbf0453f59f6b068b0aebe",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/os/svr4pkg.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "22af57c13a7f7e7b0cdde00e50b5bc04a50886bc5ffbaf43aa103267f90675e5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/os/pkg5_publisher.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1197f2086a98fe014717bdf3396a4ab17ce600b9867897b9c9a5464b34f626b6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/os/homebrew_cask.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "41e76feaf5fbe525246eeb7b159bd36f7a4bede9bbecb4df424238511f34467d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/os/macports.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "92aa83b0e76d3ed6bb3267be4078dc55ae81e4869347dbcd3282f0218166be88",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/os/rhn_register.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fc3a0b888781830b3d41fd5aa05171dbcca6f3afe30278645c9e27ebe9815185",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/os/portage.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "be362cd3164874bf23e0de3737ad59fbc10a1b6fd8a9fec64df877ab74146a0a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/os/flatpak_remote.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b71550bed75bc8fbf5ef4b7e89376f6d60de227d3ec039a07bd5700005fe6611",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/os/urpmi.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f83c58d50bb9135a7be2b08f9b32ccdbbabdfbfd20a29353b66376d185bf64e4",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/os/homebrew_tap.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dcc3e3988afaabc0d7544f924ba10a806c082772ee54b12e1ffafbed3ded8bd0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/os/portinstall.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7f8c255fa388d228c0c2b3e18296ab1f8d9e0ea669241099f8004ec8989b23b2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/os/pulp_repo.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4107b3195e15435a28f2ca8f8f98db50df9678c93f7d4250d44f19767bbad16f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/os/rhsm_release.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "373a405741c2484bc48669b3eebc2525810f2069ed142095376b322b450c44fa",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/os/apt_rpm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "50fd04770646350b221fe9c59ec2bce4c73fe03366113c65b79309911807fef5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/os/sorcery.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ed8fec8e6c5357a8e0a4d7cf020c253a574f8c239f3371b9604beb90cb0975db",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/os/rhsm_repository.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2ffd51768087d3b9df7a4cfaf4ee571cffcb373ff770a1eacadbbab9d537918a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/os/apt_repo.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a92bdffb40fa2bc8fc8e6954573fccec4a94a8a23884dcee4f680ddec78880e2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/os/pkgutil.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d159d54afae920bbbc3ef2fc50c8f23a7e2f74a4931f6f59ce799d90a367403d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/os/homebrew.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d8b4305f2f13f327b7376803ee139f6f87e6bca875d481fd5d32d7f07d488f33",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/os/zypper.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "060ccb562cc31deee1a3f5c2b83df53e9233ed9c8db9561fa2c09962a7e2efe4",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/os/xbps.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "99ddbba3a2d7740e4be2d2b436b39f7421e9246378365e5ff4b2a60f80306c93",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/os/rhn_channel.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d9b34a7ee26112e9c5e3244d8d3ea4467e998fdca684e6443f1f12045a1267d9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/os/mas.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "52800c9c7cf8257ed285c79e72e95d78266cb7a6d969ec6875137cc6ef7d73d6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/os/opkg.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bd93bea75b70e04af71f95a8caefb83e8d477ab8c9420dcf052b90b7bbadc932",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/os/flatpak.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "12676542d7a15b9ef198d8d95d3112734faf0c2702ac1a32688be16a63d8356b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/os/apk.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "066665788179692795453db9675607e9c400f214f80382fa1646c0a5c4e0b709",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/os/layman.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "836e062d867c45bb523e37edfc3cf6b6b9b94700d994f1755d78b706cf3f6bd0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/os/pkg5.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e36ce1069607e0608509fc036fb6454af0ede52c3682cb43dea44eedab746729",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/os/pkgng.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7eb5a1082ac0624c8e380e5264e09966bea8abaa6e635e3326d5e2babc59f6bd",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/os/redhat_subscription.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c6cba86028f7f561bd3da6980f8a77e32ff9c9d1641e14f41e9bfdab201a09b2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/os/slackpkg.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bb680d9a9a0d282f5efc503bdbe9ad32e88bce362177e300db595fce77b8c6e2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/os/openbsd_pkg.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "994c3bec9149dc9a7e1f70a4cc168f38bfeb9e783b09e9fc261a44ea46b1634e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/os/swupd.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d97fd0540d721ac8131049e4868f2dbebb736a6873c9b3d239a4b1b1aaea5f59",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/os/installp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1360ed768c621c482767cb1994d96e93827b55a20da4d3f2cbcfbdb5278f9c18",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/os/swdepot.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7cf596e285fbcb98e9bae8ee345d63daa2528c34fd93138d6c9afb77db2f7d8e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/os/zypper_repository.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "09c18ea8eb93c7b031d58316301018dafa18791dfd7395930b7ab30985b65d7f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packaging/os/pacman.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1569e965bab7f6385730b9397685f3d8d26530522d550572298ebc94b35228d3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/postgresql_ping.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b9f54eb3f4561bce095f1eab82be935becf8b9c3e9a5c20d48f4205721342f51",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/vmadm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6c5bc7e1aed47d7fe95a58bb887521981c10784e2b7925f5afef6be222cea0d2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/lxca_cmms.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "95f67d443777f341f8c1a2f323eb3eb5da7794f7ef3ddc0da6c0c6cd33001bca",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_volume_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b06e66b0bc3d74ca627231df77be690f70b4682fe15a7f776829c115fd652cb9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "54f6f7df39ff842cfe8939ccb6fe0644cce9af4fc0c0fe0022b694b6e84d396f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/nios_srv_record.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3fe807e3e4f8cb7131205780359028e416130ff3491b99f9e60330b4b0dd3469",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/make.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8ca72ea1d8db919f94d1c81fdec999f2608d97c01776247012da162c7ad253af",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/xfs_quota.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6d2fd9ed1316a4341dcf51caa8af1aed4bd7a097af5f09ffe5a1ea928cb8c2c6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/net_tools",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/net_tools/ipwcli_dns.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0ae8dd037de462f927965100a87f91c50e6928ea6cd612ac5f3e8de63f821d4e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/net_tools/ipinfoio_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ebdaa5a8e84d1762b4f1b1f4c4bd0aa38ed7e05d2a604a8ae568ff7bbcdd3566",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/net_tools/ip_netns.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e1d373072658e68e3ed09b1e7b24cd0cc4760368d7614f0f98c45986e4533dcd",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/net_tools/hetzner_failover_ip_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7963ebcb4fd25467c0752b2835045f99bad62d15d86445bd0ab6cf715889d300",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/net_tools/omapi_host.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9bcb31a88709a9303506bfee3359c14ec9e811bedc775c1b80b9163966aa105a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/net_tools/hetzner_failover_ip.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0138339b0c0b5625ffd68ca0ffef5ec16792f2d4aaf2794afffd6a0ec3ab0ab6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/net_tools/nios",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/net_tools/nios/nios_member.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ce749a5da1ee92bc2344fcf32193c9fa28323a67978e065b2cf78e1cab80813c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/net_tools/nios/nios_mx_record.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0306b116c74a8240153cba553aa1d4d0644bed1b7fc34f63ed906fb9d1b45d23",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/net_tools/nios/nios_srv_record.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3fe807e3e4f8cb7131205780359028e416130ff3491b99f9e60330b4b0dd3469",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/net_tools/nios/nios_fixed_address.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "97e17f3436cd23ba54e77c4b9f8640ae574471cdafa3192889d5be1930187690",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/net_tools/nios/nios_network.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cf026fe499eb669e54c3599784dde57341d3309ca47c98be56da4ac6dbb90802",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/net_tools/nios/nios_ptr_record.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9afcb4e309c338f453184ac931fe06c74f8c358120566173113d0b6b2bbb3510",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/net_tools/nios/nios_network_view.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0824697d1582b9f6ce9a8446b0d24c908b8cfd52330c664161f096ed26995067",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/net_tools/nios/nios_zone.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aa50813ae97d8d5ae7a6d107d566de76f17fd81391756a4b15cb71f6e17bf73a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/net_tools/nios/nios_a_record.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b9fae0fb773295dcb048ef96a71b7a02bcc3d2e09a7e23d12f44a7aceda106fe",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/net_tools/nios/nios_dns_view.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8b284f904d003f2e6bd72e2c2084f2c2a7defee2391537f422ba6ad498c27e2f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/net_tools/nios/nios_nsgroup.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c40e9e89b4a7d6f30c856c8186f4665da5531b6d3c390182167b97adcb3af76a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/net_tools/nios/nios_cname_record.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "681fe0624aafea89a4ab62ce048c7f2f09da21518512084a4dc41355d6e0be1b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/net_tools/nios/nios_naptr_record.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "22a4fe451e396fdd934b8bfff8ceb132b4871753c6742605d1962969d9062311",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/net_tools/nios/nios_aaaa_record.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4c4514285788c44ae3961bd44db427e93cacf25e9dc3b47d666b474604ad404f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/net_tools/nios/nios_txt_record.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5d0b910babc14299d7a7cabd20aaaa42c8e8df021687ffd3b5f5f4e6f2fd069d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/net_tools/nios/nios_host_record.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6c1ae4530cc7acde03f293adcd5e0adc9c37a40ba3856e5acde36fb4635cfd90",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/net_tools/dnsmadeeasy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a4e6ee3395aa9b100b5f9e0e66bb721bcf9688822833ca3f821d977027961c66",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/net_tools/haproxy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0d8254d30cb970a5a2838dcf0c315cd93a7b4d3b0b815fcf768a3d2ed2ca7158",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/net_tools/nsupdate.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2cf82ae55ec11e4849b07082756dd2e83775f62f9d4cdbeb43389176693b2b93",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/net_tools/nmcli.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "938ca868fb2391e28fc2ba432e32aff3a13beeab10eb3691e098aaf40e906a48",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/net_tools/ipify_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "909e93a70f4b6eb99e22b5915aac96e011a408c5d93d5d517fefff39933c856b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/net_tools/infinity",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/net_tools/infinity/infinity.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9ccd4296408750bcc261f63fc2fe8fcebb0b2de08058bc08c6f976b34e07f54b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/net_tools/hetzner_firewall.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5596eeb9afa633ba7b4e8d1579fbd9b2c1745ba8f27e160dba62cca511dd313b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/net_tools/dnsimple.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cf69408ce8a10aa4687dc4c75391e797deda461ea35628900f3fa57e70b6f166",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/net_tools/lldp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2854a7d542dc7c585ff34c4170ce5d733e03b8eb5d1c84d7a01bb493d2609bc6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/net_tools/cloudflare_dns.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "03d9945319c8f9d0eeb4df6463265cf0784bf20a0b9d7c7cacbe688091fa7e4d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/net_tools/hetzner_firewall_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1479a7450e8c613100b9d729db4a2810d34b5a2991dc50d06dc6598fdb35338e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/net_tools/snmp_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3e07fed63168b9686045e42ac17f51e16aa7fe1da9d0fff8fc8ef00693005df8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/net_tools/ldap",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/net_tools/ldap/ldap_attrs.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3a983d1a574c1042b15b606c308ccddb4a890741f79111e5d9ba8f7ddb9715a1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/net_tools/ldap/ldap_entry.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2e8e1793eb03b8ab6c17dcb66ed6966eb767c01139f95ff9d8436362bdc7ef46",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/net_tools/ldap/ldap_attr.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9bf29cd15a9d1b94094e2f631aa3c4cf08acd789e7e879bfdfe28a311c24a271",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/net_tools/ldap/ldap_search.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3af13a76f94587edb07bdd7a3b3a3d154fe67c8c35da79fa21c851f0a70c2dca",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/net_tools/ldap/ldap_passwd.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6ba81db2b15e61479f3621ea0f9c1ee360a6938388349c842ee7cc39d4affaac",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/net_tools/netcup_dns.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "26fcee470cc40f970d929fe16c39d8aa85ca30f226314dabecc0013b33079f3b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_snapshot_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "320fbfe9f1cbb4a18b545a00eb41beb14ecbb67a269274130fcc47715ce6039d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneandone_public_ip.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "da33e62643a0422308fd7ae7b5842e6a0144d7bf3ca6cf418670d8f6bcf4f2ca",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_cbs.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7ab1780cd03fcbdef808dcab7f86059564fcdee3cf029430c37b6c37cc830b36",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/circonus_annotation.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cffc43f0116fb4a72e73f4e1ba75bd9e5e3353b5cfe9f9fea6d65e0d2108ea43",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ipinfoio_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ebdaa5a8e84d1762b4f1b1f4c4bd0aa38ed7e05d2a604a8ae568ff7bbcdd3566",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/foreman.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f52aa6e33bfd01af58f8652e00585bbb5d2ad82581898c199b920d3691dab9c1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/mqtt.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f2298ac90a05ab3825f068e18299f23ee69d61d85daab4d103c6d743ef3c69c6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneview_logical_interconnect_group_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1a15b1f773b587ad59653d66c0073b58d861372f09369ace09cd5a3bcbdbeb77",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_service.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3dfc84722608e18c24662856af1e529b2cda94e45c8b2daab1f2f018ec794c64",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/proxmox.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eab6d09afb37f32d0b76dc3fc7db7254d721e0a215430d7c02e56e03b15e1cb4",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_prune.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ccea35cc5a7652197a0caf4a120a8506ba04643147186d3e14a5327a11ff563d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/grove.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f9f7e1e9432b2d7a2132e88e272a0cafc03855f41efeed99808f061b113dd2af",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/uptimerobot.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d78b9b569cf508598fd49168c050fbe2164df8328b33792093368803a1212fae",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/github_key.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9c7c665c51f10cd0193e676a4a7d2aac8247096230ff93dca13596e205359252",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ipa_hbacrule.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0c7d864fbfe05d49c715b48c89ddb9dee40d5d4a59b4c7ec32fdfa127999a1a3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/sensu_client.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2385374a7e0d12d2aa7f02db22963ae083a09abcac4c4820ad98e6ca7c4a56fd",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/vertica_user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1713ee71635aa4ceb67b44c2d991cb0839a59deb8e9ab3d6567be7f1629f4557",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/hponcfg.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "40b0cce24e68dd7b30e7070199de82d95d761d73e11a7ac2b9c4c116cc6da467",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/nosh.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0814721a9b15d12099c9cb6df8c8e7f2767335b58f06940e49f13e749432c3a3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_group_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "317f2ab5a5c42a4fc000606a5c4440229b28414a1df11a55bd8532b9441833ea",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/nios_fixed_address.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "97e17f3436cd23ba54e77c4b9f8640ae574471cdafa3192889d5be1930187690",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gce_mig.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fbcf3f39975f688641e88beaaaef23227122b805abc13086a7f73cb42fe1f374",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/etcd3.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8cd0c5b9d57cc4003f1d44d22fca6e2b1dacbdecd2f45d64281f47661068cd6c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_login.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb38d2596d0d75447ff22b2502199e34f92792346d271ebf34b020f4d571f7a7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/nios_network.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cf026fe499eb669e54c3599784dde57341d3309ca47c98be56da4ac6dbb90802",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_host_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bbe90f85251be27448c39fd99da2f440b94106fa5ea0d6614a4520c47d86df82",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/serverless.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "73486c292e3d3ed88f3ad25a0411bf90c4af7e328a63ba9bb213cf2253c97847",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ip_netns.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e1d373072658e68e3ed09b1e7b24cd0cc4760368d7614f0f98c45986e4533dcd",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_node_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ab3f3801352a542f514de39711690e0ec3d5568dffa217e6aeaa660cbde43f85",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/online_server_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f37f70cd4b7a3fc317f59bff06ced5c8f6a7704760e8873cca739c6b26caa00c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/hwc_ecs_instance.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "89845b03caeb5d8bc17443300b889399ae73b4da9df2d1404c1d9c09f042ae8e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_sa_vol_map.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1cdb81a12863af6ca8f279eab2a9fee1927d6b10a74b68f9b889a53bef7d8da1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/atomic_image.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2c3e4035d39ac49d3d57b1587a626f624fc476313c3c9e835b159b8983e9d5be",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ohai.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e4272be634bd89295c956ff2215715a967d299b5d1173048d0513cb45dc1f5f9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneview_ethernet_network.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9ddedb56f841c97525670f19ab004f8c3431bfaa4cf32fb15bfd2cf65883b7d6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_clb_ssl.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4cad5870151dac39a3542fb19e80cda02285daff35312b0b7229d45b877e2da9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/utm_ca_host_key_cert_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "93d5ed2b1cbcfeb88b5fc797df0aa799316975139e38cd421de8c0e6e4f7d04b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_organization_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cf06a78a4beab79fe102bba67a69ead3fd8c35f74583b305458d050849fa0680",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/runit.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cdfb09bca1dd52237f5c9f8b99031af05f3b6037e4745f7228b472d257d87ee3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/hetzner_failover_ip_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7963ebcb4fd25467c0752b2835045f99bad62d15d86445bd0ab6cf715889d300",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b0153da122935d17da8faf41f62fb3bb02d2eaddbd684f20ce06f9912be18a02",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gcp_backend_service.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a5dc82a7a4dc3dfabb49cb05b746f095630b498f85c193cc68c79c129bf097ed",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/postgresql_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb2ea1275ffd0ee02ef3fd353351ee2a2d334885a6d882b5e5e9ce4a1c77fbb2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/portage.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "be362cd3164874bf23e0de3737ad59fbc10a1b6fd8a9fec64df877ab74146a0a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/memset_zone_domain.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "90d015499749fd99206a3f5e435b8bb3c59f971689f33024871a2b18125749c2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/nios_ptr_record.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9afcb4e309c338f453184ac931fe06c74f8c358120566173113d0b6b2bbb3510",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/bearychat.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8f224a3485783e66fbde1636e5131e561fd1a9006ffe2ec5d24188c07736f5c8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/postgresql_user_obj_stat_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "84f853a86e90bc120702630fbb114db1bf5b1106eea56f460e0ae3d940f90b87",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/redis_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "427a060f31d2b087a56495f20fd3fbcc3a86a3ff681b94efcc9e63916e2fef68",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_host_storage_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "538922d17f167b0b833b9b30f39b1bf7463d1e49c212bb9673ed372601c4b7d3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/xenserver_guest.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9672abac6e6b533e226dfb5fcc467e8d5b20c836362bb3cb932dd3af67bbb64b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_sa_host_ports.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "884e31f2c3d1f0c11c2f2da3e0167ccabcef3cdb8d729d225b646857bb6e40cb",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_vmpool_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "486ba90e26303a9cb3a9867916e0c933b1c8cdf7789f8610761637e34641bac0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/vdo.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e7859987160b6a81a86a578ef352edb5b5e0c2a191a2c5e475b2ab0083252f47",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_cdot_svm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6a93e5cd46d88b75de84429ee18d210560c73d84cbdefd7397bc588013194322",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/github_webhook.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ebf044078453f980ab8dc804c963ea1d2f31299bc982f29cb4cb18f715b09448",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/memset_dns_reload.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b84a25907522e6ce4bb42500d5a17d4d532da3de5a6d640fd4fb33a7adb147a3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_cdb.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "82214785d2a227099e7675ef32b1b05574fbb5e0f71c99ff6c3a11492f77a196",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/omapi_host.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9bcb31a88709a9303506bfee3359c14ec9e811bedc775c1b80b9163966aa105a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/vertica_schema.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d277d7ed1c202fae0d32e8a2d180310a9a6f9d553890f02cd74cbd0fbee00fd1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/consul_kv.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1fc2235d0e61e5abe6fdde4e429b99cfb220fff89e57a470972db134b084b3d9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/github_hooks.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9337e10a63d71a522a4c77492ac02c04060f4687a0b77947e7f9083b4a8c80bd",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gcdns_record.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "af529acf229bddafe47a5599b5d7033cde4802f49530e202f692792979a72f4b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_sshkey.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d05ba86911c4d94c6e7593283cefc7b708c23f65b70c870620466fe8ad90d17b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_cluster_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "af450b217197476ea291e04e58d7f9311a8284cee6c5a502cec494d2b2590802",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ipa_config.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "13ca5370ce4741c58f6e5385b1f151151a3d60ea4e00ea456619c5806f366147",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_sa_pool.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "737dbb3bb745c278f0d947a91e37c8653e2a2ee47860cd643872904d7cde9faf",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneview_fcoe_network.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b4ef083cfa921d2fa0eaef8c811ff750b35f989d053047848afd5db17f601494",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_storage_domain_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "78a13eaa2eebe07fe35057f1fd77cd41cd6e43d8014051de7003ad909b10c8a1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/datadog_monitor.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f093f0b36de8fd2553715c66f6eacf27012ae106610f8bf8d919d5fe4ba09ab7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/flatpak_remote.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b71550bed75bc8fbf5ef4b7e89376f6d60de227d3ec039a07bd5700005fe6611",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/sf_account_manager.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a1e81b8749bb53e8d5f5764d914fe6df2b1ae61ab0745cf10ade287a0100796a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/hwc_vpc_subnet.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3e5ac97a4be19828a95658766474adba0d1b9c4f2bb2dff454cd4bb3aa821480",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/bzr.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3c76b688efb71cb924cdee392fcad705648bf3c4f477ea389866ed0bb155814",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_ip_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "45abe3659a08da289e4b54a27a6d713719b79ab1616ab3e1e02ba9d29c33d256",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/nios_network_view.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0824697d1582b9f6ce9a8446b0d24c908b8cfd52330c664161f096ed26995067",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/urpmi.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f83c58d50bb9135a7be2b08f9b32ccdbbabdfbfd20a29353b66376d185bf64e4",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/znode.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ef9a9dd29b510c06103ae7a7d4b973b4973b943174e63de7fcb0ea11677ae74c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/onepassword_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "44635d0ae6630b1b265051edb8bde038f149f998f6b20b615b9397141af050fe",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/hwc_smn_topic.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "43f61a1ef273853a04a5a24138bd7f4d716d3892ba456b9d38a352d682fc26d8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/postgresql_sequence.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e94091810e122a119bba47af21d1710b3783640953096bb1ebcacdd9b892b3c7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/deploy_helper.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a16a4f004e90b5e80202efce594d9a8d2a9c2ce3f15164b7b583718e60d1298c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovh_ip_failover.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c37ecb76d0a389bd30712eab1dcace144991c56b6958ac716efb648d9fa05c4a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/open_iscsi.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "057c0db7001cad637996e7310baf8b2d84aaa0486ad9c2d483fd54af70f0547b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_host_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6bd90aae0d34dc54ce77c9b28109c919c8fb5011fd2c41f5ac4802c23a238c37",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_mon_entity.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "15007bcf1172ebb7615da39fa13c7a7a101a3d2fa457e1987891139e57fef2b7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_user_data.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1672964f9768fc7cc9710fbd616c315ae2154c235d06adc77e1c2c504e436515",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ipmi_power.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3d9d6865d0b44031c5df6d16a4b0284173ddc198eefe36d5543171496181f383",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/hetzner_failover_ip.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0138339b0c0b5625ffd68ca0ffef5ec16792f2d4aaf2794afffd6a0ec3ab0ab6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_permission_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0d7d837471d8e8fabbb26654e357d913c15d806fc76ddbe786d72a8150f5447e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_scaling_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b1cba90c9e01ec9fee21a0340c55f5718593ca5cf52ed56e732d2a5c35a00419",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/homebrew_tap.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dcc3e3988afaabc0d7544f924ba10a806c082772ee54b12e1ffafbed3ded8bd0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ipa_sudocmdgroup.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb9733ad227ead11feb5daf9d87f2c6d7053b45836fe8a023ff9fd99f5ac85fb",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/one_image_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "be4279ca31fa13d48340101b9057240d811c6b5c76fcb95b65a73ca8df073b88",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/one_service.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b8800ee2c709981d0fcc213975fa886aa4113b9d7b80846458ddfffd91d75420",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_secret.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "19a433bff0caacc0021b1030bc097e471d3ea5f5f026440aca9d82880d13d16a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneandone_load_balancer.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e884417349deef5938a7c60dd8f441df2b49ddf925c54c1a8e5033a177941bb3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/lbu.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "119f3b2beac4ed2b94e25388381f90df9698e2fbed69c736d46a0cf0d4108466",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cobbler_system.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e365e90abe7a7e8449311fb4954f25801f99f8b5534bc98a604e96ff942e7ad8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cpanm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cf25f0128d85aab7098e8655e20a823f5a9c1ca32247f91c3a0507d27856082a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oci_vcn.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d97bcee73bffc193943b8b83d90e39e20f1553be151adc54396277ebef42a730",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_compose.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3dfc84722608e18c24662856af1e529b2cda94e45c8b2daab1f2f018ec794c64",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_cdot_user_role.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b4dd228c355114f6f2d27ca6dc5fef10da34530e83ba8bb1273a4732a159bbe2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/hwc_vpc_security_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c3f938e9553714a1922e45e866d9aae306b332044a32429a3e84952226ec6dbd",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/udm_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "53b45885eb57a28f6031ac624a7fac75900e1ad124dd2a1d1a20449f21b5891e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/postgresql_set.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4094963afc6527515a731d83c9a0ecb4defae47a68dd97c1789692cde4055eba",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/puppet.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a02e93cfa49f8b5e359150ffc3410a5c54d15e88b8cf336512ac9aefeb8ae04e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ipa_dnsrecord.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "10e47a9bc929ad86a5c9d7b12c28f45e9927c88b9e1734beb34f76fdd2d6e8b7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/katello.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc10b9478b7e1f6033aa1b227b47add9496bd69c6a2c90d9c8c3caa67a37b3a2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gitlab_group_variable.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3ae8e0c8f5d875431b24d0c74b1be323bfbdd448a8a6c34f81f9eff17164161f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_image.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3cd6a104eafd35641ac483666b91da70afe312789008440bbe8c02533f96282a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/memset_server_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d4abcfcb8897d9f37209dea09634d3580e8fe35e67779c0c8e8b0800b0474b1b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/udm_user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "54215ed17641062926e688cde7a91140f098bda60b0dd4f666273ac172600ce3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gitlab_deploy_key.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8678caf02515a353a0d6b09aca6585dc39144f212112a7ae7f594881255ad7a4",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_network_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9c3ca0f61401cc71de636b17a7b6c9c34c7428d248a6a46f0a63ba783edb8f1b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/portinstall.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7f8c255fa388d228c0c2b3e18296ab1f8d9e0ea669241099f8004ec8989b23b2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneview_fc_network_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "749269f3a1982dc97eb6dbd8bbdb7f4167f09ff353062093d729f33e4b5d33d9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/postgresql_copy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "36eda73f79393b5b338611a75fa50d47f050c16e5dfdd0e936f31e26d86dcbd0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/profitbricks.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1c063f36228849d7b6ae073c4308d30eeba85408e8d460cacbd0b6762f0ca247",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pulp_repo.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4107b3195e15435a28f2ca8f8f98db50df9678c93f7d4250d44f19767bbad16f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/postgresql_ext.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc3d62cf224aa996c7e93c9191df7b9790680cdb50da95d5c4adbd68d2c27982",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pear.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "be89456445044d37e1fe93dbe469dcb0ebab59a0a364cabcb7cf38471b7e1d1e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneview_enclosure_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e4e0ab72e479649b63f6db15ece9227d8d5ba84eca314d14f3d7d16d62b6beba",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cisco_webex.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "de0818295d2b8a3826a438a6da40d87cf22d612bb2f5f48805b1d47379e862d2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/profitbricks_nic.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "88fdb9610691143b54f42e6a7dbb4dfc6639dfe9cd66572ab5cba1165973cf94",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/clc_server_snapshot.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "40ddc371a6be9cc3053919ef5df7b95148ff8b001f0c662b1161adca4b4541d6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_cdot_volume.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6dd9b558a7e2278721c37fe84927718a629267d548c52cd3b7a4e4a83be82f00",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pip_package_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "32403aae617804c7f05250e88c45e39d0eece5e2b1047f56883248b427c958d2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rhsm_release.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "373a405741c2484bc48669b3eebc2525810f2069ed142095376b322b450c44fa",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/capabilities.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7d9e46ddf9acbb7caa0bf526654e9b199abf60e253a551d9f10c4e4673fd6713",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneview_network_set_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d54cb48a8137b8b919daef76d4aa527cd08f3ed9840ae63c5c4f10b9e763725a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneview_network_set_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d54cb48a8137b8b919daef76d4aa527cd08f3ed9840ae63c5c4f10b9e763725a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/utm_proxy_frontend_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "35da861f0e8669cafc7a2cbc5fbb876948266466ff59e2e4ffa3445da9003c61",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/utm_network_interface_address_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "45243bd44a1a48194a4578076f47c64694017b49efa1d72f3d123e278ea60b2a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/beadm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "27fbb38324a97bad460b4494c458a1bff1544bd2ed58105fdb4bf25a8a994c81",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gce_net.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "46d9ca91c560db742db16c51ab423b58e3bcbaabc85e976febd40c0ead0a722e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/manageiq_alerts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3fa59d8ef8bd8981dedf9157097024083c55b14d7b601970f8c480eff6538772",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/postgresql_privs.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "03c15a94114650cade7f18b3fff9579b4b77ead6a1e1bf822ada7c04e36d0d34",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/logstash_plugin.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b2eccfdcd2f11fa00d32ae57bfb174adf0801d03c861e8e12d4de9797451d567",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/xattr.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3ced0d9d8cabfe78d986551633b58c5cef38abbefbee8e1e45132415e652f772",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/sendgrid.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "13273299a1307a46ff6f53c592da845cc3681fb45dff1313e3c357156207eb94",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/lvol.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cac3edc4616af897cecf1189ea102a206f3548151a1cd9b9df5891db9ad3a373",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/hipchat.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "73e76975f6eb47ab20ebcf94b28adf921dab3c07b3ac614416f64db5ca5ccf51",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_quota_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "81d4d83612a2689461ea4f22448a860bc2fee33f4246991085f119d301fcc318",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/smartos_image_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "08e06e04872a2d1f8a82d47edecac93c32ff505440feb75f73903b40d14e83fd",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_storage_template_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0cf4ce222107506ebb19566a5841801d68a4fa4929b2f0eeca17a50b35925f8e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pamd.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5a88f5c62b10d08c5e0b9b0ae169bc241e77702822201199437f615f970dd5ca",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_node.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "592db79efa6827d2093b0fc329dd8e4ced3a4e1a6dde3ce61ddb8055f1a5870d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/seport.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e1c1aef4a9689c0ea5840868f4ce8041173e64d0f0c2d28c64ccf05395f7327d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/proxmox_group_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a6dec44969b855a86a1d29a2d97ba8c6d4c5594b9b7dbe87aba14309919935e1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/clustering",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/clustering/nomad",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/clustering/nomad/nomad_job_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4a2da24c881d689b0440f267a64e1181331aec5dcdac65559689a02be399d2f9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/clustering/nomad/nomad_job.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b3d4b02ef5054d79e6c7652b699ea0955103ad60fdd93622fb5857c0a26ff708",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/clustering/etcd3.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8cd0c5b9d57cc4003f1d44d22fca6e2b1dacbdecd2f45d64281f47661068cd6c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/clustering/znode.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ef9a9dd29b510c06103ae7a7d4b973b4973b943174e63de7fcb0ea11677ae74c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/clustering/consul",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/clustering/consul/consul_kv.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1fc2235d0e61e5abe6fdde4e429b99cfb220fff89e57a470972db134b084b3d9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/clustering/consul/consul_acl.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "706aa7d7b75536ab2db04783b33ffa9f1cf6ff9af966b7d5bc6e3e47958620e6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/clustering/consul/consul_session.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "af9db76827fa06858887df4329a458a82ac4ebe274a4832fd58b1e33d3032080",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/clustering/consul/consul.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cff20750444211a7df214fcc2b01094b3e8fe40fd42de8900b86abe8490b9351",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/clustering/pacemaker_cluster.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4386ec559c0dd166cb6b6bf4b2f43f3368c2da231653b3f4027d64fb921b1e48",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/webfaction_site.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1f028785f128ca613a71320d5e369f16112c914508b3f652fc4bb9a604f0183d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/postgresql_table.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e41043b34cddfc0cd51fb63e6243cf8e96055134491d0c788de5cd765b0b7878",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_security_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0424fd2a5134c2b183451ad35aef9087bcac631356d7c4120122bd73545390a6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/filesystem.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c455e1326001f0b0f7bc1448cd31a63e972398dde865ff845181a0a793214b5d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ipa_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2fb49b0a859526899b70651ec0fba46c2338fc09d4d78284a75ed0722aaa5937",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/apt_rpm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "50fd04770646350b221fe9c59ec2bce4c73fe03366113c65b79309911807fef5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rollbar_deployment.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9485e0f35bb362d919fbdd58cc0e123ef8ed89751aefb801b7046557dc693f03",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gcp_healthcheck.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "380affe805390aa77d4b06a511cfcd1b152c58d18af458e79c91ae5d9a9fd4b9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/utm_aaa_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ff94bf268207686bd3f1ebac0822e4853a31ed258a18d9bb68a0472f9736b5a9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/dnsmadeeasy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a4e6ee3395aa9b100b5f9e0e66bb721bcf9688822833ca3f821d977027961c66",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_scheduling_policy_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "24363ffe26e4b28618b481f0e6f71e5b95d32d00825efdfe7bb57b2d164b2c56",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/easy_install.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a11e3e151595b9b729431aa2a4be23edd5d228870b3876cf95160d4552e2ee14",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_storage_vm_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4d745c1117246e158537396e73b702909b0e24e04e8d1899344261eb56d6a411",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/utm_aaa_group_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "36fbc7f802d311b4e7990e30c41a431d6a8b3c37af99d733b97326b923cb0726",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/idrac_server_config_profile.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c061c989bdff2d792711ea4cbff34c85510feb380960ce785b0c898388bdc02e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/vertica_role.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "248620df51ec3b7dabdf097acd7ac81e1d92255692d7618c8c15032f7c9cda08",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneview_datacenter_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f499285e7d5a63fdbd4562a5a4a3c38b497fda82c326e629f0dfd057c984eb4e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/postgresql_tablespace.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fadebab334670e02855afc003fa423ca86e7446dbd3c46c4fcee1128c4301e70",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/influxdb_retention_policy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "24b8394eb8edb553a38af558d690c84e33a31716988c1fff9fe6fa49146b9f32",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/memset_server_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d4abcfcb8897d9f37209dea09634d3580e8fe35e67779c0c8e8b0800b0474b1b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/haproxy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0d8254d30cb970a5a2838dcf0c315cd93a7b4d3b0b815fcf768a3d2ed2ca7158",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/postgresql_schema.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "caa8a9c418cb8054f753a1b791a6067d749f11ac7903b525ecf536abdec367f5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gcpubsub_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b1f96948d2f0e8db8bceb1d371ea1e69878c7d74193e7261217e024ce7c78163",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gcspanner.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "06f9ab9f6e2e8a4b567cb169f04dde843820ff3e094fbe033a63f15a5ca7e705",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_clb_nodes.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "92e67ca5fe37a627a5307c6f5fdc810bda987e6d13a853cccf157397c62f8c48",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/udm_dns_zone.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bca8e8a316465763050b5ce3f4a20f6ca209a53d7ba64ff35d47c6be6228d354",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/nsupdate.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2cf82ae55ec11e4849b07082756dd2e83775f62f9d4cdbeb43389176693b2b93",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/redfish_command.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3b0b84242e67208fa846779715da12198b05ae021fcdd5eb2d32a2728a200bf0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/proxmox_domain_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bf67f9e4050c8c319f33a89cd6136368144fe14f6b6e18457110737fc2ec7f2c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/nmcli.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "938ca868fb2391e28fc2ba432e32aff3a13beeab10eb3691e098aaf40e906a48",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/xenserver_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7a5a2e0a4960d97a07448712e3bc4ba5041630a9b2d755fbbeb0eb44e3f789ee",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_cdot_aggregate.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0465a8e2c4b7bb55854f697b71801402aa0a8c5e2d224a65e1d50f750e90ba37",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/typetalk.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8b0c5a2c18ec40da946914f93655f144d608fcc4737cca258642c44d69245b42",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/files/ini_file.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dacbc9fb37fe2ea97c47896a53cffe5cd57207a6445b07816323ece0ca3c86f6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/files/archive.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "55f3f1172d7539224975575bffdb1d7d10c7999d51b3d21e006d96351374df63",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/files/xattr.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3ced0d9d8cabfe78d986551633b58c5cef38abbefbee8e1e45132415e652f772",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/files/read_csv.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ffd4d55798730296a9062069211f169c46fb2744e40483d1afe568ca76030aed",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/files/iso_create.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c8910b3cbb68a3e0e90fea4a0e49d6723a2b1538096e35002cd4395c45578433",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/files/iso_extract.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "67d26864b1c5346a7b1d5544c5fda862ebe5f37a550c49ba5c5c8e91de6426d7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/files/xml.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "10469496a593b70462d43833e87cc38c3d35e7b635ee54ed55ce2e074e3bfa43",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/vertica_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3ff89442e56050bf201b026d7f77c1f14894a255a1cceff1afee447b69ea6538",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_container_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "842fd2203fda36b539a4aab69255ac671bb6d48e236cdad9e12caa35ddab871c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_mon_check.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "93b1008e70240c6ab5f87c7087c9e9e5d96867830a34abcbadb76f53c2eb0377",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/sorcery.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ed8fec8e6c5357a8e0a4d7cf020c253a574f8c239f3371b9604beb90cb0975db",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rhsm_repository.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2ffd51768087d3b9df7a4cfaf4ee571cffcb373ff770a1eacadbbab9d537918a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/npm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0e1604caf4f1af452f860af230ff8686c544736ca800599510f32113d935f786",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_organization_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "68d48725406687496589b807b84ba65a74cb078927cf4afb169dfa5c57c332b6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_volume_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1664c2a19580b9788e798f899b6622f3ea4d84b04b855825d11a841f45d72bdd",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/notification",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/notification/pushover.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8d4b6f7686646e0d44a7ad63811b8c1f69927317c2ce8cea4ff855027355c219",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/notification/say.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9427eced754de74bbb015098444c4cee334620980bcf62c4c6f7e687475515e6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/notification/cisco_spark.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "de0818295d2b8a3826a438a6da40d87cf22d612bb2f5f48805b1d47379e862d2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/notification/twilio.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "253715887b093a1818ed5a08945889c394b6a303dde84069807f76a3e3158e01",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/notification/mqtt.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f2298ac90a05ab3825f068e18299f23ee69d61d85daab4d103c6d743ef3c69c6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/notification/grove.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f9f7e1e9432b2d7a2132e88e272a0cafc03855f41efeed99808f061b113dd2af",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/notification/bearychat.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8f224a3485783e66fbde1636e5131e561fd1a9006ffe2ec5d24188c07736f5c8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/notification/cisco_webex.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "de0818295d2b8a3826a438a6da40d87cf22d612bb2f5f48805b1d47379e862d2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/notification/sendgrid.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "13273299a1307a46ff6f53c592da845cc3681fb45dff1313e3c357156207eb94",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/notification/hipchat.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "73e76975f6eb47ab20ebcf94b28adf921dab3c07b3ac614416f64db5ca5ccf51",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/notification/typetalk.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8b0c5a2c18ec40da946914f93655f144d608fcc4737cca258642c44d69245b42",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/notification/office_365_connector_card.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "285d42195ff549907ee4f131722dd3933046844426287c96edf1843856b68bee",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/notification/mail.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4eefb3422e3591ec80deb9fb40bdee5f13c4df840edba95d52aaa50cd5c9a990",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/notification/campfire.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d22a3da654653ddb964eb55db9164c254860f4430dbe8b505b6945f220294bea",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/notification/mattermost.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0f68dcbe92d4d39d3dfec659f3641cd00f7b2fe43aa5213064cb72ab996289a5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/notification/rocketchat.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3fea039d3b5222817edd7da7df15d21944120071bed0e5c07eb5d9e30a7ba3b0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/notification/nexmo.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "88784e0460a6290f56d4487bbbbb069df650a4bafa0cb0c8dfc112ecec49d448",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/notification/slack.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5a17b65ef7fdd3962a1ca54cee90f65831f0af635ff623c22cb7b4f51383146a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/notification/jabber.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6a37c8a32215ffd295470da8d34b481418850da53808c702097659aa4d7a1657",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/notification/irc.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8be4f2ca8a670351ba5814680a8e5c36df87358b1d9b27079e8542f5d3429f14",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/notification/catapult.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f1bc195bce4b7de9e4e5c612fba7c422e104af61e77d79860c7dfa69b8b0f15e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/notification/logentries_msg.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "34982c5c0e9aef4d724a068cc3bbb34df2d7e9757d7d2ed620990124d64b9a84",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/notification/telegram.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "59ae6ea0cf701725676bdcd76cb2914ebd4ae8923011d04a12fab2e200a1a38d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/notification/matrix.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "49291a2a57c72bea087e2afffade0f7f083deb196f8e32dd6d79955bb5b6116a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/notification/pushbullet.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9ac310e625cf356326bc27b5e51757f8c9983fd37ad7cd67488308bfb06c1a3f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/notification/syslogger.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ed8dcae984c245a56fd75e637e155bd619c042af064ef1c6ce10fb5d9899e80e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/notification/flowdock.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c50deeb4589cfd2ae9055e2ca708acceaf41f8c4e705a2f3c84bc4d5093bda9e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/idrac_redfish_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "97b14cbe85c08172e43254096ee88f1aee85af6636d142ff13dc5a8aa449b9e1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_image_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4420434f2176d0107d6c1834a209dcc0353f0998765445aa044fc0cb50b0f5fe",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/github_webhook_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7bb0ba054aaa801b414d2c65260a585136033dd168c7a65b6e98c03a2c6258a5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_database_backup.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2de312c6f7dfc36338a821b1e68ad311e3e0af0ad301b1c2d91bade7eaf11435",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud_init_data_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "34cba46d4e16da7c4b2b302d9acad3243063aae48fda28192b08d61fbf09ae10",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/dconf.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f331e2807a986c2bece1f368dd9b666ecc2c71ba45f424c33a94e2c4174081de",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ipify_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "909e93a70f4b6eb99e22b5915aac96e011a408c5d93d5d517fefff39933c856b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gce.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6190bd598b5c58d07fce4765745c7f9627c948ffb15de4b733c47f97f22968da",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gluster_heal_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1ce49855c74c92514a3374b1d32948bf6a7a3284af3183e6d7a2feecfffa6baf",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/java_keystore.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f1d635fdb6b13211ddcb6bb2156a65f743dc5f2ffc8918ab840451897e5e469b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/sl_vm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "87b7b9828fe8d0cca656625ef302ac4967ddf4a8d75b593b2b2b1cddf3811490",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneview_logical_interconnect_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "803675bc55d5b2d6b4e57c95e142aef188306533ed0c105521372d6ef68aa36f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/apt_repo.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a92bdffb40fa2bc8fc8e6954573fccec4a94a8a23884dcee4f680ddec78880e2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneandone_firewall_policy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7734f38f53b9de2c1485d22eeac3ff54fb7ba32be401292554a8f651a4b37de4",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rundeck_project.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ac30d46597ceb9068f1dcb04322888cf2353123863e3ab5b1b8db420064e7415",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneandone_server.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c9829ca44d6cc50bda1fc4b6e3f9c86afb26918221cffc9312aa5533645dc879",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/sysupgrade.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a9cb9262a6d0606eb15ef6ba85b2730396ab25e855795a3468ed1d830c13372c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ldap_attrs.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3a983d1a574c1042b15b606c308ccddb4a890741f79111e5d9ba8f7ddb9715a1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pkgutil.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d159d54afae920bbbc3ef2fc50c8f23a7e2f74a4931f6f59ce799d90a367403d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/influxdb_database.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d93c74814be8a699200c153c34299ccd9a2a2ed29476cd7f0d5858faddb96e0b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gce_pd.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c8aa31c000349a148026e8a9148947e437c4ccd940d729c748b12cd15bfb7cc2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_volume.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6540c965a29c8593ff735b5bdd41a7165bb50a55c4ae865afa8d844f6fae1184",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/dimensiondata_vlan.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b30817b9ad59ecb496117d3f53cae29c288dc7307f0ea100b7a01f73dfeb998e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_config.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "60d8c426b57ced42431eddd4dd85809b790ceeea4cbd8b42f22e917b3d2dd17c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/hetzner_firewall.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5596eeb9afa633ba7b4e8d1579fbd9b2c1745ba8f27e160dba62cca511dd313b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/modprobe.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e9b39b6da2e51cf26ee1ce89c040a7331ea906d716a7eb8c7adaf1f620a1ccac",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/apache2_mod_proxy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6c1c446900dbe4919ce14516295a0dd0be4a89d8f691cc3c13a2c6437aeb0ea4",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packet_ip_subnet.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1197506810c441975e01145f0d759a0506f810a4395e7dcd59e07fed32cc76b6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/redfish_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "36142791326d262abd6ec16a3ec2e7327319ee3f53316d1f4b22deb3fe326ff7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/profitbricks_volume_attachments.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "da8f6829f02b88544bd9b9b75c39388538b0d99ed9bb9f29e30166d99a4a1289",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/online_server_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e4d1bb713e49887cb2cc0904f865b58ec3b56bce4bffb28fa412a84604011275",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_swarm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "493a73a681dd3cd85d8531cc9d30f7f9f06fda7454fdf2a960329d12fcdaa63a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ldap_entry.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2e8e1793eb03b8ab6c17dcb66ed6966eb767c01139f95ff9d8436362bdc7ef46",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/bitbucket_access_key.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0807b63afb715251136638d1caab13a639c5be297a5c2b00aa4ca12d069e5e83",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/sefcontext.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bf13a1773810cdf2525129e4e694744fe002500d405c3734a6d5131761d2c416",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/vexata_eg.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fac270b3db28c9f8b6d24d299e753c80f9d251dbbdcb386a319097c17219a80d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/selogin.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c7552556e82a332b2f2a936a3576e2d5b54298ece38f29ee6f58e8dc1e20ecde",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/source_control",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/source_control/github",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/source_control/github/github_issue.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9e6229cee4684cff6c1ac905fb9a0fe91692df8de87e325073eece89992fc5ad",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/source_control/github/github_key.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9c7c665c51f10cd0193e676a4a7d2aac8247096230ff93dca13596e205359252",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/source_control/github/github_webhook.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ebf044078453f980ab8dc804c963ea1d2f31299bc982f29cb4cb18f715b09448",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/source_control/github/github_hooks.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9337e10a63d71a522a4c77492ac02c04060f4687a0b77947e7f9083b4a8c80bd",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/source_control/github/github_webhook_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7bb0ba054aaa801b414d2c65260a585136033dd168c7a65b6e98c03a2c6258a5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/source_control/github/github_deploy_key.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "056f88e87bb244ff36893ed776e61372bf240f7e4083b3f4de1470244b8ae51d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/source_control/github/github_release.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a3cc3b9f7efb1664f84685467ec47c33fef3e061fff7897a6fc17c4d71d0724a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/source_control/git_config.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0808d6f8718ac41432121b6a35cce4adcb97e8279cdbb38cd5a72a1b7c181945",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/source_control/bzr.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3c76b688efb71cb924cdee392fcad705648bf3c4f477ea389866ed0bb155814",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/source_control/gitlab",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/source_control/gitlab/gitlab_group_variable.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3ae8e0c8f5d875431b24d0c74b1be323bfbdd448a8a6c34f81f9eff17164161f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/source_control/gitlab/gitlab_deploy_key.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8678caf02515a353a0d6b09aca6585dc39144f212112a7ae7f594881255ad7a4",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/source_control/gitlab/gitlab_project_variable.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "37687d82eaf92fa182e14d8abf5e59672e192d2605aed9c8aa49dd7e857b7480",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/source_control/gitlab/gitlab_hook.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b24c2d206e160d8af9cf9ce2585adb6138f382d4f72d72021dc1bb5d5a8343b8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/source_control/gitlab/gitlab_user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "07caf4eb8319b46282ad20ee8af39c3910d160ef07821e238195e52bab9c4012",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/source_control/gitlab/gitlab_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a3184b0dfbc363ed8148ad144a03f7833c5e598ee2bc271f9e9bdeec80426f2e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/source_control/gitlab/gitlab_project.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "19f31736e1a725a9b28d46d182416999cdb4098791268999fbdcaea09dea3f43",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/source_control/gitlab/gitlab_runner.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "369f1e0ca6bce00b5c3cc149d4bb447462c16d841d6e1513c37e9d1bb5639af0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/source_control/gitlab/gitlab_group_members.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2eed5080cbeaf0cac15d133a72d26f61357e4bdc9520ae40433a5e21c388b64a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/source_control/bitbucket",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/source_control/bitbucket/bitbucket_access_key.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0807b63afb715251136638d1caab13a639c5be297a5c2b00aa4ca12d069e5e83",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/source_control/bitbucket/bitbucket_pipeline_variable.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "21f79951c0f3a290a9f1ef0069cc59b66fafb7e34bb8f7e486443ee9e710769d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/source_control/bitbucket/bitbucket_pipeline_known_host.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eeb1882a13453482c1d8cfdfc518032177d79763f2ed46a19c0e946ff60bd161",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/source_control/bitbucket/bitbucket_pipeline_key_pair.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ea031322d47bc6f7a986f409b4a0c67adf9fc4ce640f72a93d58f46c2293243e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/source_control/hg.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5ece6717290dd9be089ea86fa99df85a3bb847f0dea5643f1bc9a5f8f9abbf14",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gitlab_project_variable.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "37687d82eaf92fa182e14d8abf5e59672e192d2605aed9c8aa49dd7e857b7480",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/postgresql_owner.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dc0c1f19b1895e7ad78a8c2674d14ed33b2c94857028d738a2ff1c303f0abd27",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/postgresql_db.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "891f130b172eb492ac6fa9b4de8280e67a4a9bc5804bf01b4b0d3f7824a70c81",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/postgresql_subscription.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7d3379c74898ccd92fec9d216532ff7c3afde1293df08628625677d9fcfee922",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/lxd_container.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "22fd0b089ce450fe176db7736e9a554bf7a808b98afb48e3d7e2d56f038cc85e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/bitbucket_pipeline_variable.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "21f79951c0f3a290a9f1ef0069cc59b66fafb7e34bb8f7e486443ee9e710769d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/keycloak_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1264cc8faa99333a7c87436ef037c98fd5e22ca78c9afa09e1f28c3ea23e679f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_image_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cfe25f6d9944e7bbeb868e802e9e58b79d5034f31a61bfa0fa09d6770d3b97ad",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/postgresql_slot.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4e1ef460e98fa5e96632c474c2beaf8a4d1eaf4eb1b611e8e72c2a6ba5ac7fe3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/zfs_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "409c2ef26022ecb84412a6f0d953ecd1cef2a15d6af47a441c46113db00a7a01",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/atomic_host.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "23fb96d4bcb2efe68cccedea6bee84b0d2a9a34caee16a1534d19cf585bbcd9b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/clc_modify_server.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "56719888c138331c893ba5f7db1ea77925e45c690815414a0720377ab8c62564",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packet_volume_attachment.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "de789f3eedb44c681718f98b663dce8666973096ae22b85307968959ed3457ea",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/launchd.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "47a52143170241714d2e16af7b89af93c77f082ae908cb668a2a080584177a8d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_tag_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "57437e2c20080b48da03668d35e73ca60722871ceb5aa79fa232ad10514223af",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_stack_task_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3df48244514e052a112f35621d08a5088efd10fa57be54c1239990f4e01bd59c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneview_logical_interconnect_group_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1a15b1f773b587ad59653d66c0073b58d861372f09369ace09cd5a3bcbdbeb77",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/utm_proxy_frontend.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2ace6280cf2036043d151b379a7d2fc205ffbe2fd6b57196392e15f7c9bcecd1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_image_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4420434f2176d0107d6c1834a209dcc0353f0998765445aa044fc0cb50b0f5fe",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/kibana_plugin.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e987038dbc44067b09c1b6bc6dd0341932d557efdf0277cf1101c9050237a49b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ldap_attr.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9bf29cd15a9d1b94094e2f631aa3c4cf08acd789e7e879bfdfe28a311c24a271",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/manageiq_tenant.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e730ef524ff320f622501f0cb1720b6d7782699ffd2da7e090398b22e89e9177",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/one_image_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "be4279ca31fa13d48340101b9057240d811c6b5c76fcb95b65a73ca8df073b88",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/lvg.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d071992194a4ca1d09c8398a828f15c74c8e3427fff1cc5e707969ef84542fc2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_compute.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4aea99c2b2246e29be12bd23ba4ff4bcbdf398b50d21d89d832636caa8828c2c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_image_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "44624e36916a1c113d3dd8be54abd59e89b57225ec2936f0304f2d7ea9e181f6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneview_ethernet_network_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f9c0119c3db7c79c3ce42f7d1cd13632b1ee07c695858a1ea132db50d662200b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/webfaction_db.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e448e0988bcda4d3dd574dcb54d0dbad71003517e6c11a865db152e0a99bc165",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ipmi_boot.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "38c0881a00dfa8f2b7bb61c4dfe6467b2bbc9c477126815c011f62aa5479ac85",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/jira.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "91ca58f5aeba7b6b9d47ec65321d0f31c63ef94e324f13e818fbefa5f3fa62a3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pagerduty_alert.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "90ccb28e4d767232fb186e458d0dea0512e5c5e46b7adecc8266dafae682c8d3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/atomic",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/atomic/atomic_image.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2c3e4035d39ac49d3d57b1587a626f624fc476313c3c9e835b159b8983e9d5be",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/atomic/atomic_host.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "23fb96d4bcb2efe68cccedea6bee84b0d2a9a34caee16a1534d19cf585bbcd9b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/atomic/atomic_container.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0296f77506329fad1072ed256cd510e42d9f926cf7949fa59725450a921646e5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/linode",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/linode/linode_v4.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0d5a720554d901a9a90c270642db728780e7f4f4b4db49d20434c30afc090585",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/linode/linode.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "38d4f000ce9d0f6154beab8530df9351aea44371f8b3b112b271ecb5a17de305",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/docker",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/docker/docker_network.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c2ad85905b2f9caf3e72f2efbace0c4e166452e06cba307815ab501fd47fa5b9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/docker/docker_volume_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b06e66b0bc3d74ca627231df77be690f70b4682fe15a7f776829c115fd652cb9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/docker/docker_service.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3dfc84722608e18c24662856af1e529b2cda94e45c8b2daab1f2f018ec794c64",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/docker/docker_prune.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ccea35cc5a7652197a0caf4a120a8506ba04643147186d3e14a5327a11ff563d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/docker/docker_login.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb38d2596d0d75447ff22b2502199e34f92792346d271ebf34b020f4d571f7a7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/docker/docker_host_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bbe90f85251be27448c39fd99da2f440b94106fa5ea0d6614a4520c47d86df82",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/docker/docker_node_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ab3f3801352a542f514de39711690e0ec3d5568dffa217e6aeaa660cbde43f85",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/docker/docker_secret.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "19a433bff0caacc0021b1030bc097e471d3ea5f5f026440aca9d82880d13d16a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/docker/docker_compose.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3dfc84722608e18c24662856af1e529b2cda94e45c8b2daab1f2f018ec794c64",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/docker/docker_image.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3cd6a104eafd35641ac483666b91da70afe312789008440bbe8c02533f96282a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/docker/docker_network_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9c3ca0f61401cc71de636b17a7b6c9c34c7428d248a6a46f0a63ba783edb8f1b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/docker/docker_node.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "592db79efa6827d2093b0fc329dd8e4ced3a4e1a6dde3ce61ddb8055f1a5870d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/docker/docker_container_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "842fd2203fda36b539a4aab69255ac671bb6d48e236cdad9e12caa35ddab871c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/docker/docker_image_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4420434f2176d0107d6c1834a209dcc0353f0998765445aa044fc0cb50b0f5fe",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/docker/docker_volume.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6540c965a29c8593ff735b5bdd41a7165bb50a55c4ae865afa8d844f6fae1184",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/docker/docker_config.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "60d8c426b57ced42431eddd4dd85809b790ceeea4cbd8b42f22e917b3d2dd17c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/docker/docker_swarm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "493a73a681dd3cd85d8531cc9d30f7f9f06fda7454fdf2a960329d12fcdaa63a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/docker/docker_stack_task_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3df48244514e052a112f35621d08a5088efd10fa57be54c1239990f4e01bd59c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/docker/docker_image_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4420434f2176d0107d6c1834a209dcc0353f0998765445aa044fc0cb50b0f5fe",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/docker/docker_swarm_service_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63593d58b2580ebebd56db76fc4b50742fa214a5e6abb137fc52bbbd68e9ef1a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/docker/docker_stack.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4351b64553ba68d4902d550d7aa937bb9a29ef996b6921b4619c48982d12cd74",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/docker/docker_swarm_service.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "89bac0594f296a507fd7d72b74fbec894bf1d6ec1b47c088fb74f788c97db3c0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/docker/docker_stack_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ca995fd8a1998f2b1c1eef160764e26c126e4aaf6a6c6ca0c3b74c6a60ed1b39",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/docker/docker_container.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "48aaee08b3cfb7019e43615e0b57de29c38da8dff63e07f23f140b23852bcba6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/docker/docker_swarm_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "285f73590d776142b46d9d01d0a98c02e6909307d4dc9bd8a9c61655b03fa25a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/dimensiondata",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/dimensiondata/dimensiondata_vlan.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b30817b9ad59ecb496117d3f53cae29c288dc7307f0ea100b7a01f73dfeb998e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/dimensiondata/dimensiondata_network.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "061255e3a9319f3ce0878243acd4dc5f2433786d75c4e3a89b5b89c5ca4c6c6b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/spotinst",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "68ada248f0f1a6e8705ce6ebed63301367404afd56ba9bc1d30e2bdf27480883",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/scaleway",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/scaleway/scaleway_ip.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e32b1d1f561baf94378f300d1606bf830ca0f383cb8e24341fe68eaf2c4f8ef8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/scaleway/scaleway_organization_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cf06a78a4beab79fe102bba67a69ead3fd8c35f74583b305458d050849fa0680",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/scaleway/scaleway_sshkey.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d05ba86911c4d94c6e7593283cefc7b708c23f65b70c870620466fe8ad90d17b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/scaleway/scaleway_ip_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "45abe3659a08da289e4b54a27a6d713719b79ab1616ab3e1e02ba9d29c33d256",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/scaleway/scaleway_user_data.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1672964f9768fc7cc9710fbd616c315ae2154c235d06adc77e1c2c504e436515",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/scaleway/scaleway_security_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0424fd2a5134c2b183451ad35aef9087bcac631356d7c4120122bd73545390a6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/scaleway/scaleway_organization_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "68d48725406687496589b807b84ba65a74cb078927cf4afb169dfa5c57c332b6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/scaleway/scaleway_volume_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1664c2a19580b9788e798f899b6622f3ea4d84b04b855825d11a841f45d72bdd",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/scaleway/scaleway_database_backup.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2de312c6f7dfc36338a821b1e68ad311e3e0af0ad301b1c2d91bade7eaf11435",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/scaleway/scaleway_image_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cfe25f6d9944e7bbeb868e802e9e58b79d5034f31a61bfa0fa09d6770d3b97ad",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/scaleway/scaleway_compute.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4aea99c2b2246e29be12bd23ba4ff4bcbdf398b50d21d89d832636caa8828c2c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/scaleway/scaleway_image_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "44624e36916a1c113d3dd8be54abd59e89b57225ec2936f0304f2d7ea9e181f6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/scaleway/scaleway_volume_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c659be2d72a8476c6eb4427f1fbe203c197f6a02578cc4c39c63b62304fe5541",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/scaleway/scaleway_security_group_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6758c00c4a19986f93ae6a346e92d6dca090593dc27fdb0d66570598dbc1bbc1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/scaleway/scaleway_server_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c94a4fc9e6f4d357a4815f1eb05fe2b9197bbd7522e81597f3d3e924b507cf64",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/scaleway/scaleway_security_group_rule.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1799687b1406e1a75131a65874dcf1110a2b862d4a77089300d2be668ea10fd7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/scaleway/scaleway_security_group_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bba12f2322db765e1d9705dd7a59dd29a29fd23395483d2767a7a203b87d27ef",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/scaleway/scaleway_ip_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e04fa197d8307cd41e842b24ce83d9b16f0cf0cbe6a0b3d0ecb2496c44ad98cc",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/scaleway/scaleway_volume.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "12fdabbf47349f2227930d1ed6e40a4b0e0b5ae34cfcac67848e613ae070aa7b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/scaleway/scaleway_server_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "13c0b55eea4ef4d006d0948e6518637bfa8e67fac3800c0cfceaea71408a8ccd",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/scaleway/scaleway_lb.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b2318d33621f185a7205039dbb8725863986365b4798a8b16a34b403b8d00fd2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/scaleway/scaleway_snapshot_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a09021f8e05d26e7c8aa0272db39a41e9f2b23febc4e0aa3f3108646192ebd91",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/scaleway/scaleway_snapshot_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9c74c1c0d073de17e836e6b69cc0ccd5946396e38f8beb573ba238f8bee3458d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/ovh",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/ovh/ovh_ip_failover.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c37ecb76d0a389bd30712eab1dcace144991c56b6958ac716efb648d9fa05c4a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0a64c8b450c8d18837b282c31271d52ce0423d06af9a3d437b9a94fd1eb06a93",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/ovh/ovh_monthly_billing.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "921ff4d415e12a6ddbefc4a19a2d8807a9d7a3b7328c474fca5be64c59db55e6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/profitbricks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/profitbricks/profitbricks.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1c063f36228849d7b6ae073c4308d30eeba85408e8d460cacbd0b6762f0ca247",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/profitbricks/profitbricks_nic.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "88fdb9610691143b54f42e6a7dbb4dfc6639dfe9cd66572ab5cba1165973cf94",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/profitbricks/profitbricks_volume_attachments.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "da8f6829f02b88544bd9b9b75c39388538b0d99ed9bb9f29e30166d99a4a1289",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/profitbricks/profitbricks_datacenter.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "93b74e6022f5f6b589fa2d4e7637de05d46a4d4d724968324e5b2f1dfdabcec7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/profitbricks/profitbricks_volume.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a3b0d7b519c1eeaf6a427d615011d5389f33947c19c36ca4639e9818e49187f8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/packet",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/packet/packet_project.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "61a7d7192cd37fcb0302f16c35c33505d01aa87e0d861bc9e3fe8dd33bd46ce9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/packet/packet_device.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b6ffb5c28d5dfe503e8144100dbaed3a7a09b42092d1dd77c7b32397c2658a47",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/packet/packet_ip_subnet.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1197506810c441975e01145f0d759a0506f810a4395e7dcd59e07fed32cc76b6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/packet/packet_volume_attachment.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "de789f3eedb44c681718f98b663dce8666973096ae22b85307968959ed3457ea",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/packet/packet_sshkey.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "899292a5541dce8e317a5b23f68ceef50c5eacbded0c38e52feedb05b1e8a0be",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/packet/packet_volume.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "98a88102698ac822b8328b54e618b5b5983ccbc2bbee2164d004f9b9983493de",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/pubnub",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/pubnub/pubnub_blocks.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2284d4f83fb20ff6fa709676901a45b7dc98f8299c3081b501f4bb32a88e0b69",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/xenserver",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/xenserver/xenserver_guest.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9672abac6e6b533e226dfb5fcc467e8d5b20c836362bb3cb932dd3af67bbb64b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/xenserver/xenserver_guest_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f3ae0b9a0c9d4c0168dc528194478ea63d31a949a25339e3692f3d44ea09037d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/xenserver/xenserver_guest_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f3ae0b9a0c9d4c0168dc528194478ea63d31a949a25339e3692f3d44ea09037d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/xenserver/xenserver_guest_powerstate.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "512b73487169482e8887c48e6f3278950736c93a5c2a4c698b149e80217bf270",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/alicloud",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/alicloud/ali_instance_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aab7d64776371adab53aeaa905e756a524f6858be5490b53754f45405a05e7b3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/alicloud/ali_instance_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aab7d64776371adab53aeaa905e756a524f6858be5490b53754f45405a05e7b3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/alicloud/ali_instance.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6273f052fa89f9ab9a27230eee5064a37333af680e24ba1d5a715ec11e83c980",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/centurylink",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/centurylink/clc_firewall_policy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4b20afd1a3778dcb1fba734e410a6f03d5d4b617a6d9b949a7d3af71f65155ba",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/centurylink/clc_publicip.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c9e8f18cea2224e5f6bd8b24e38122d3c72368f57f173231a2f07c8aa9df383d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/centurylink/clc_alert_policy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9f056c9b18ce84d1719a0cc6c0047fff4b5bf9088fc009553455b116ddf24661",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/centurylink/clc_aa_policy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a0d4f31be2c9d9d02a8ca10a1d0424706697fb1b49ad9ffbdb9f3848e260da7f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/centurylink/clc_blueprint_package.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "106b76b182d10601925101a24eacbc9ece41d5fe5dda77082785645d3fb60324",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/centurylink/clc_server.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f7f52e3eb54993ec137853cb0d7b25abed0f9f9b8f10227a8b0e720cd1a70cf8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/centurylink/clc_server_snapshot.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "40ddc371a6be9cc3053919ef5df7b95148ff8b001f0c662b1161adca4b4541d6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/centurylink/clc_modify_server.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "56719888c138331c893ba5f7db1ea77925e45c690815414a0720377ab8c62564",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/centurylink/clc_loadbalancer.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9ffa0d1b36f1c7140695a98fbabd365ace9d9187607a76f1d112bb8f9c60de9c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/centurylink/clc_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bc5e7a9885f36bc30824cda3cb69dfcea08643453db7c3923c05f9080c431397",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/memset",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/memset/memset_memstore_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ed49160abf2ad2493589d1cb78d836e8f46aea5b6747cdccf7d6064b09a01303",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/memset/memset_zone_domain.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "90d015499749fd99206a3f5e435b8bb3c59f971689f33024871a2b18125749c2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/memset/memset_dns_reload.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b84a25907522e6ce4bb42500d5a17d4d532da3de5a6d640fd4fb33a7adb147a3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/memset/memset_server_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d4abcfcb8897d9f37209dea09634d3580e8fe35e67779c0c8e8b0800b0474b1b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/memset/memset_server_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d4abcfcb8897d9f37209dea09634d3580e8fe35e67779c0c8e8b0800b0474b1b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/memset/memset_memstore_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ed49160abf2ad2493589d1cb78d836e8f46aea5b6747cdccf7d6064b09a01303",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/memset/memset_zone_record.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0db0abd59574ef77493cc31edd1adf8d644740c6968352f94e58a60ea01534a0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/memset/memset_zone.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6a5b2527e6602a6e9533c842cf944b71be146787a9ab908eca03de3d97ab6cc0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/misc",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/misc/proxmox_template.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "80603033f0ef0ade5b9ddda04563580985f38713b3c4efc5d67b156b3b669043",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/misc/ovirt.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "54f6f7df39ff842cfe8939ccb6fe0644cce9af4fc0c0fe0022b694b6e84d396f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/misc/proxmox.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eab6d09afb37f32d0b76dc3fc7db7254d721e0a215430d7c02e56e03b15e1cb4",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/misc/serverless.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "73486c292e3d3ed88f3ad25a0411bf90c4af7e328a63ba9bb213cf2253c97847",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/misc/proxmox_group_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a6dec44969b855a86a1d29a2d97ba8c6d4c5594b9b7dbe87aba14309919935e1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/misc/proxmox_domain_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bf67f9e4050c8c319f33a89cd6136368144fe14f6b6e18457110737fc2ec7f2c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/misc/xenserver_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7a5a2e0a4960d97a07448712e3bc4ba5041630a9b2d755fbbeb0eb44e3f789ee",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/misc/cloud_init_data_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "34cba46d4e16da7c4b2b302d9acad3243063aae48fda28192b08d61fbf09ae10",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/misc/helm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0010cffe72ab4efefd2ffcc7cd44b869e15731a391f21685ec215bacb3938880",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/misc/terraform.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bdd72eedb1bc2d743517ca39155b2df1c88809879b022818f90f8c26f3132e6a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/misc/rhevm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6c014955ca82a03d11c22c0462c8abcb0570c586eb544c8792480f06c69015b6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/misc/proxmox_kvm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ab39298a1dcfb8b2fdbb4e76f0001b79e057c8d87bfe296051a5954d5f64e428",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/misc/proxmox_user_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "518e6e2c58d5aaeeb230bb70fa1fbd19bad795dd2f3a1d2ae673ccb6d321a177",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/oneandone",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/oneandone/oneandone_public_ip.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "da33e62643a0422308fd7ae7b5842e6a0144d7bf3ca6cf418670d8f6bcf4f2ca",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/oneandone/oneandone_load_balancer.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e884417349deef5938a7c60dd8f441df2b49ddf925c54c1a8e5033a177941bb3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/oneandone/oneandone_firewall_policy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7734f38f53b9de2c1485d22eeac3ff54fb7ba32be401292554a8f651a4b37de4",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/oneandone/oneandone_server.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c9829ca44d6cc50bda1fc4b6e3f9c86afb26918221cffc9312aa5533645dc879",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "27c30ef2ec8c85e5f22d81fd30fac4ee05fa6dc040c0f0968150b07071f2312c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/oneandone/oneandone_private_network.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4b0c3447b06744faa8f5128dce313bb527c799a102cd92cd1f57ab6946831381",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/online",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/online/online_server_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f37f70cd4b7a3fc317f59bff06ced5c8f6a7704760e8873cca739c6b26caa00c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/online/online_server_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e4d1bb713e49887cb2cc0904f865b58ec3b56bce4bffb28fa412a84604011275",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/online/online_user_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e76479607bc85426feb151af1be695089ac64791710e8b176d27b5765cff3a34",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/online/online_user_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8d317dff5d7c75c2a364f021d1a29dd4ce28d490c27f8cf7ff31bd9831f4463b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/smartos",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/smartos/smartos_image_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "08e06e04872a2d1f8a82d47edecac93c32ff505440feb75f73903b40d14e83fd",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/smartos/vmadm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6c5bc7e1aed47d7fe95a58bb887521981c10784e2b7925f5afef6be222cea0d2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/smartos/smartos_image_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "08e06e04872a2d1f8a82d47edecac93c32ff505440feb75f73903b40d14e83fd",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/smartos/nictagadm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3f2b78ff2a8f1e7a13388caf333057a3dcf7399f6e4da43e2023fc964ae14895",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/smartos/imgadm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e7bfa8f3eb4edeb4f1f9e51a4a2c5f17a4390513ff3f2375dc78ab27e5352208",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/opennebula",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/opennebula/one_vm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "66358d51e24653a1580b3eb78cfa4042476a06d642a9b50cca085a5a53eb986b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/opennebula/one_image_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "be4279ca31fa13d48340101b9057240d811c6b5c76fcb95b65a73ca8df073b88",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/opennebula/one_service.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b8800ee2c709981d0fcc213975fa886aa4113b9d7b80846458ddfffd91d75420",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/opennebula/one_image_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "be4279ca31fa13d48340101b9057240d811c6b5c76fcb95b65a73ca8df073b88",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/opennebula/one_host.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3534004d2f54b08f61ae049974b39b208d0549e6b55129537fdd26b836844087",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/opennebula/one_image.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "25578e3c736751703bfd74cb0a1fc0cde13f98904b6ba9653d90175ea5c4bdfb",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/google",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/google/gce_img.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6b7e86d857e3c5f6deaa940201f74c0ce54aca56d4ecacd7274162ecfbe43fc1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/google/gce_eip.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e2fcdb1ac780fcdd7fe73e8024fe5324648531671592912a823c7d6d510e9703",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/google/gce_labels.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fa39632b3f2777e38cb46539eaf38e7fdfd537f9418eb9b6f78949e42ebb6818",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/google/gc_storage.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dad8779d8743fef5767715b490e7d6375d0c9102ed30aa030a17413d70893852",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/google/gcp_url_map.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "852c27c833b798760e09e8a49f6c599ec1397212a1934dbeaac2c4f8a360ebd1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/google/gcp_forwarding_rule.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "74aec5915e1a3d9ea3a857af1a92ab4208d374c9654df46bf0da8fe0b1c8f0d0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/google/gce_mig.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fbcf3f39975f688641e88beaaaef23227122b805abc13086a7f73cb42fe1f374",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/google/gcp_backend_service.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a5dc82a7a4dc3dfabb49cb05b746f095630b498f85c193cc68c79c129bf097ed",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/google/gcdns_record.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "af529acf229bddafe47a5599b5d7033cde4802f49530e202f692792979a72f4b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/google/gce_net.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "46d9ca91c560db742db16c51ab423b58e3bcbaabc85e976febd40c0ead0a722e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/google/gcp_healthcheck.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "380affe805390aa77d4b06a511cfcd1b152c58d18af458e79c91ae5d9a9fd4b9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/google/gcpubsub_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b1f96948d2f0e8db8bceb1d371ea1e69878c7d74193e7261217e024ce7c78163",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/google/gcspanner.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "06f9ab9f6e2e8a4b567cb169f04dde843820ff3e094fbe033a63f15a5ca7e705",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/google/gce.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6190bd598b5c58d07fce4765745c7f9627c948ffb15de4b733c47f97f22968da",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/google/gce_pd.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c8aa31c000349a148026e8a9148947e437c4ccd940d729c748b12cd15bfb7cc2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/google/gce_tag.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "956bf787c777e6c745f0ffc90627f7f23cb80de2013cf6d64be44033b93f96e8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/google/gcdns_zone.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ded81a77d08709b6f7737f41438910782d91d66568a7e5cd4f624218bbec5cae",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/google/gcp_target_proxy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c7acd079f6c9abb770fc6ddb52355685151d4a66955618e196c04ae748ca4f49",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/google/gce_snapshot.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "57faf235af6f0cc7a98f1f9e2f23c4a0c464ee9c357af47a33ebdf0fea1abba5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/google/gce_lb.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f87579b77aae883ef7befe590acfda03e69e3241a7bfcfb45f46c7a193ac384a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/google/gcpubsub.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "40793023bacab1ebb95a0ce28eb59f907edd5c8f70e25159f09216e18651590e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/google/gcpubsub_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b1f96948d2f0e8db8bceb1d371ea1e69878c7d74193e7261217e024ce7c78163",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/google/gce_instance_template.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a1a9e2211fb88c9669a277f375b9fb02f56b2ed29e68619ee562a78f03aa86b3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/heroku",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/heroku/heroku_collaborator.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "757db77f9068e52d9898843a6399f9c28a36c565b80f9bcaf1d1e33d3359ae15",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/ovirt",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/ovirt/ovirt_template_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "891842609bfe672bb3cbefdc3b09a412d6aa78821845437bd431bda0a3fc91b0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/ovirt/ovirt_api_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e123a2850ee96876b7828db14540f577df4a83caefecfffa6bbc4708e01194d0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/ovirt/ovirt_nic_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "203ffbd72b0503eb24938705111b23f57d21f50639b825bb8d414a2a27534daa",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/ovirt/ovirt_snapshot_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "320fbfe9f1cbb4a18b545a00eb41beb14ecbb67a269274130fcc47715ce6039d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/ovirt/ovirt_group_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "317f2ab5a5c42a4fc000606a5c4440229b28414a1df11a55bd8532b9441833ea",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/ovirt/ovirt_host_storage_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "538922d17f167b0b833b9b30f39b1bf7463d1e49c212bb9673ed372601c4b7d3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "486ba90e26303a9cb3a9867916e0c933b1c8cdf7789f8610761637e34641bac0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/ovirt/ovirt_cluster_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "af450b217197476ea291e04e58d7f9311a8284cee6c5a502cec494d2b2590802",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/ovirt/ovirt_storage_domain_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "78a13eaa2eebe07fe35057f1fd77cd41cd6e43d8014051de7003ad909b10c8a1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/ovirt/ovirt_host_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6bd90aae0d34dc54ce77c9b28109c919c8fb5011fd2c41f5ac4802c23a238c37",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/ovirt/ovirt_permission_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0d7d837471d8e8fabbb26654e357d913c15d806fc76ddbe786d72a8150f5447e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/ovirt/ovirt_quota_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "81d4d83612a2689461ea4f22448a860bc2fee33f4246991085f119d301fcc318",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/ovirt/ovirt_storage_template_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0cf4ce222107506ebb19566a5841801d68a4fa4929b2f0eeca17a50b35925f8e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/ovirt/ovirt_scheduling_policy_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "24363ffe26e4b28618b481f0e6f71e5b95d32d00825efdfe7bb57b2d164b2c56",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/ovirt/ovirt_storage_vm_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4d745c1117246e158537396e73b702909b0e24e04e8d1899344261eb56d6a411",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/ovirt/ovirt_tag_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "57437e2c20080b48da03668d35e73ca60722871ceb5aa79fa232ad10514223af",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/ovirt/ovirt_disk_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "076b78fffb8cfc9b4c43b7ca3f11acf02a28f19277114b74d17e5d789f45fb2b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/ovirt/ovirt_event_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7882821a5120a296ecc91703d3d35a2893142853edebec440a59d2dfc0bb5418",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/ovirt/ovirt_datacenter_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4db04968b3bbca31d6b8efa6420eebff5087a8a5fb39f6a2e51f039a937a3fa8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/ovirt/ovirt_network_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4f8ed744767a73efdab5d8c77fb0bea1bf541c1ee1feece04438e9e6e851fed8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/ovirt/ovirt_affinity_label_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a670f0af4dcb53745843888e051183bbf330a5f6c9a6baf354f52144618beb5c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/ovirt/ovirt_external_provider_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "477822fb30c32c5671393afd8def725dbfaf8ccb29a48ce328d51b65a6b74532",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/ovirt/ovirt_user_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f789f067997a89ab7bb9da6b9749c66bc75995ebb6db6676a80245ffa9db542b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/ovirt/ovirt_vm_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c82c364ae81e2fcde89cc9ad99c5377e3572438a5297760166cd15d85ba80ad1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/lxd",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/lxd/lxd_container.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "22fd0b089ce450fe176db7736e9a554bf7a808b98afb48e3d7e2d56f038cc85e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/lxd/lxd_profile.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d28b5943f015d2554eabb0cd6ebe097302534f6701a26ebe422f35dac299fc3f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/univention",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/univention/udm_share.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d15779f93dcbf8a888b867dc9208274ea7a6d674d831fe90e9a02d97c9e58eaf",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/univention/udm_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "53b45885eb57a28f6031ac624a7fac75900e1ad124dd2a1d1a20449f21b5891e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/univention/udm_user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "54215ed17641062926e688cde7a91140f098bda60b0dd4f666273ac172600ce3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/univention/udm_dns_zone.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bca8e8a316465763050b5ce3f4a20f6ca209a53d7ba64ff35d47c6be6228d354",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/univention/udm_dns_record.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bc47deb188af664cdbc51f5e04529a63625d7d37a19d013796dbf170b3b02ff2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/webfaction",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/webfaction/webfaction_mailbox.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "892d14fe4016b7b32b0be35d98ee1a7bddc6e295d1892e49d347fb715ec4e502",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/webfaction/webfaction_site.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1f028785f128ca613a71320d5e369f16112c914508b3f652fc4bb9a604f0183d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/webfaction/webfaction_db.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e448e0988bcda4d3dd574dcb54d0dbad71003517e6c11a865db152e0a99bc165",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/webfaction/webfaction_domain.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e1beee3c008d00f3da16eca1d13048e1fcf85bc52af34db79d5c0b3ad88c2a9e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/webfaction/webfaction_app.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ef6a4d8e13b824d3265a82440f90080d64523b7f2308d8f6f026dca3ad77ba3d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/rackspace",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/rackspace/rax_cdb_user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b1f6b917833d79430e5e6e5569879333d8d6bff53ef76d2973476dfcc4344f88",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/rackspace/rax_files.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c14aa0a51cfd7bb663dc9f6457fd93e880142632f00b6816bedc89620b74490e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/rackspace/rax.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "935d481c82c7bbb29c246d2b90e6c802bf70f7c219e1af184c042ccdcaf55ec9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/rackspace/rax_queue.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9806604cbd4c1ece98ddbff31c2ba875d30b2d8e1eb698f23be23f258f93ecdd",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/rackspace/rax_clb.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "433f51286db01d386666c5d4b8278b6e43dd7a7ce59ba8fdce3dc9e0c86ffa5e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/rackspace/rax_cbs.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7ab1780cd03fcbdef808dcab7f86059564fcdee3cf029430c37b6c37cc830b36",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/rackspace/rax_clb_ssl.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4cad5870151dac39a3542fb19e80cda02285daff35312b0b7229d45b877e2da9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/rackspace/rax_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b0153da122935d17da8faf41f62fb3bb02d2eaddbd684f20ce06f9912be18a02",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/rackspace/rax_cdb.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "82214785d2a227099e7675ef32b1b05574fbb5e0f71c99ff6c3a11492f77a196",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/rackspace/rax_mon_entity.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "15007bcf1172ebb7615da39fa13c7a7a101a3d2fa457e1987891139e57fef2b7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/rackspace/rax_scaling_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b1cba90c9e01ec9fee21a0340c55f5718593ca5cf52ed56e732d2a5c35a00419",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/rackspace/rax_clb_nodes.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "92e67ca5fe37a627a5307c6f5fdc810bda987e6d13a853cccf157397c62f8c48",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/rackspace/rax_mon_check.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "93b1008e70240c6ab5f87c7087c9e9e5d96867830a34abcbadb76f53c2eb0377",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/rackspace/rax_cdb_database.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dad28baab95f4c8317eeebd142032eddf4406383911e8f54bdd98ff4174b7b55",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/rackspace/rax_files_objects.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b77cce78ab7fdbe1d293bd1e06d1a9a6c7fb84157666c03371137cd1cf0facbe",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/rackspace/rax_mon_notification.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9cbb83a8392cd8622ae54daa1c9593bd06194a7540ba8b8397708b3e541ec7c2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/rackspace/rax_dns.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "daae6d1276a28023bffadf92bdcfcf3db876e3c8394c4b1b0c5b3aa2dc42b116",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/rackspace/rax_dns_record.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5bdb73fe8903a3ff4d31f53c53ca78e7d299f3c547739162165b1734ac3ab7bd",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/rackspace/rax_mon_notification_plan.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ad6996138e1230af9724f6d6d1dbcd33a2ce638a8951850eeb92d1c991f49c89",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/rackspace/rax_network.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d28fc4298f463d92901e7ea4c3406b1861e34256b57d08bee7a6b4a18bd90c9b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/rackspace/rax_keypair.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "53ecdd8b505201cfbf5abae54e12d317cd420d2870a3aec646c8dbe97a6ad43f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/rackspace/rax_identity.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "536fd16cab05113b34f97a61d7aad13c705ffeb718d42f82d9ccd824e03744f7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/rackspace/rax_scaling_policy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb25fd542911f02a0c9e96cf7449e8ff7bcca68d28dd8838d653ded014dce671",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/rackspace/rax_mon_alarm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "46270b62e074173bc26436c78ec0d50f8185f4bbecba313a8ced0a6528bd0bb2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/rackspace/rax_cbs_attachments.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "27a3ea9cc65be47196c63e14018f3afce79eb64ec93ac36804f1c336ec8f3ec1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/rackspace/rax_meta.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e98d3728e55359cb20bec12e6af0c2cdfcd54f774ab033817082c46851ee70f4",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/kubevirt",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/kubevirt/kubevirt_cdi_upload.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b13338bf0646f8a4d5ba8c0a2c5285e816f72afa3fef6dc182e96fad67a85f6b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/kubevirt/kubevirt_preset.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ff89f3ddaa74dca083d86fb8ad0b097c57ee10fe6bcdda38da0ab8631c6cccf6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/kubevirt/kubevirt_pvc.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "424970d2760b201e5f241a4bd5949a90e126ee2d2c33dde031041c33c99d618f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/kubevirt/kubevirt_template.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "20606ce924bdd51408de9b453377a9e2bc0c7908b6422113b3dad51bb57f49c3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/kubevirt/kubevirt_vm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "97fbe9c19c779733822ab663c19b32b5a6dfc47a0a27672e9a815a68d86dfd76",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/kubevirt/kubevirt_rs.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e9f0905b8a4a3596fd419a77875fb78ef3d92bbf6d00e4524daac3f899e9c6bc",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/lxc",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/lxc/lxc_container.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3bc7163e21060fe5fbf644c51bd3554c434369f931f1abce7174892f127b1975",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/huawei",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/huawei/hwc_network_vpc.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ad8ab2a633dea8a8afe36d610bd108ec2d8455632452935ae7d32b49b9f9cb4d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/huawei/hwc_vpc_private_ip.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "778aea0f9e96d24c7c51afdf7eb50bdcda5690d2ca1f10511ead89a47c30a116",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/huawei/hwc_ecs_instance.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "89845b03caeb5d8bc17443300b889399ae73b4da9df2d1404c1d9c09f042ae8e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/huawei/hwc_vpc_subnet.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3e5ac97a4be19828a95658766474adba0d1b9c4f2bb2dff454cd4bb3aa821480",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/huawei/hwc_smn_topic.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "43f61a1ef273853a04a5a24138bd7f4d716d3892ba456b9d38a352d682fc26d8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/huawei/hwc_vpc_security_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c3f938e9553714a1922e45e866d9aae306b332044a32429a3e84952226ec6dbd",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/huawei/hwc_vpc_security_group_rule.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cd75294789234ffe193cfa2ff95084fb3edb0de2a42d9a20309db99bab189997",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/huawei/hwc_evs_disk.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a74a926cd9e503aaebaa3a77d5e80dbba7e42c4c4a92f9c7dbcd147dda363714",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/huawei/hwc_vpc_peering_connect.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d0eca5c552649fd19228928b85cf91670abd2122fd7a6afae49c91f7d84bae03",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/huawei/hwc_vpc_route.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4369f9a4cfa48a82a66435bf9ebbfcd9a19dd8c91aaf1c5f6684fd33b5c5103e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/huawei/hwc_vpc_port.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0981c5ad00e6719986102308ac2745eb5d316fd7e0785ebc236102ad9c987ec7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/huawei/hwc_vpc_eip.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4006ecd981645492fe82a37ea0910a40aac3e24e0e1503a046afa52e42e614a1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/softlayer",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/softlayer/sl_vm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "87b7b9828fe8d0cca656625ef302ac4967ddf4a8d75b593b2b2b1cddf3811490",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/oracle",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud/oracle/oci_vcn.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d97bcee73bffc193943b8b83d90e39e20f1553be151adc54396277ebef42a730",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneview_fcoe_network_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f48af12c1a23fcc09493aa9c93deae4487db65d1332d1ec31c755e93c2db5cba",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/utm_proxy_location_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8c06e92c8ec23de739513117e8e6a00cd75f65499b869d35d9a5e2f461560889",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/supervisorctl.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "64ca792b2b5095fd17bac13f2619e185f93ff957e93db0917c6444fc8e3ff559",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/svc.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "255db93828e643c200e6e2b1ee52b97659c8392e14e434a8d77312abd2aae9e6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/homebrew.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d8b4305f2f13f327b7376803ee139f6f87e6bca875d481fd5d32d7f07d488f33",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/kubevirt_preset.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ff89f3ddaa74dca083d86fb8ad0b097c57ee10fe6bcdda38da0ab8631c6cccf6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_cdb_database.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dad28baab95f4c8317eeebd142032eddf4406383911e8f54bdd98ff4174b7b55",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_volume_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c659be2d72a8476c6eb4427f1fbe203c197f6a02578cc4c39c63b62304fe5541",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/nios_zone.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aa50813ae97d8d5ae7a6d107d566de76f17fd81391756a4b15cb71f6e17bf73a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pagerduty.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "67c1f96200b03fdb1c71c248b86d2fa67a0f118e1fd4c6817ff323c381595848",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneview_fc_network.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "414d1b5d26a73577248f2ae1980b447ac9c4a35313407b97e11217afcff59463",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/awall.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63f6d1714ac308da87c08e54b17fc2205f0bf2426d26914061074317ae835b8c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/postgresql_idx.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "84c39e892f79be8d2734024f13f2ea1bb343db22f3a31a62f713c13de11840ca",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneview_network_set.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aa56814022262159a4fd5e4ccfa90b4e168be9b1212b6fda023456e9fa9405aa",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pagerduty_change.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7f8b9d10f9edd7c2a7c896a660f920faa975d680ed799eb738ec7277205e748a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ipa_hostgroup.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6a4bbb19ae2a13eecc9e8e7ea717015a4ad074925ca6dca7cdc529199a7e8a01",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/emc_vnx_sg_member.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb019104000b0e368ae6de81ccf63c9fdee7529f65447ad55176559791bcd397",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/spotinst_aws_elastigroup.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "68ada248f0f1a6e8705ce6ebed63301367404afd56ba9bc1d30e2bdf27480883",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/redis.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8d9fa6bed3211e26342cc8809a68100ddd7fc49e90d88f75f8f2563e947199ed",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ipa_subca.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "949dd207e959b814cfd35893e50fdbd95b60c843ee4f01f13df47b8906c70cda",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/zypper.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "060ccb562cc31deee1a3f5c2b83df53e9233ed9c8db9561fa2c09962a7e2efe4",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/nomad_job.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b3d4b02ef5054d79e6c7652b699ea0955103ad60fdd93622fb5857c0a26ff708",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/office_365_connector_card.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "285d42195ff549907ee4f131722dd3933046844426287c96edf1843856b68bee",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ipa_sudocmd.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "82463c45e2054c3aeb437bad00b11a932dc7af443df7d6bb285dfe6ce9af9946",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/aix_inittab.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1e4b6091b24210a657d58c1767107946ecdf34f90cef0460762144b8cf6d4cd2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/bower.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1469648267092280b084c97ff84b89cd29656ae25f5c12b23d6a34d6bd21f214",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gitlab_hook.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b24c2d206e160d8af9cf9ce2585adb6138f382d4f72d72021dc1bb5d5a8343b8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_files_objects.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b77cce78ab7fdbe1d293bd1e06d1a9a6c7fb84157666c03371137cd1cf0facbe",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gitlab_user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "07caf4eb8319b46282ad20ee8af39c3910d160ef07821e238195e52bab9c4012",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/mail.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4eefb3422e3591ec80deb9fb40bdee5f13c4df840edba95d52aaa50cd5c9a990",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovh_ip_loadbalancing_backend.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0a64c8b450c8d18837b282c31271d52ce0423d06af9a3d437b9a94fd1eb06a93",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_security_group_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6758c00c4a19986f93ae6a346e92d6dca090593dc27fdb0d66570598dbc1bbc1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/zfs.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7b51f93874cdc5573d1cf9c23c50e7000ffe559930f302ee6f900279a4938a79",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/web_infrastructure",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/web_infrastructure/jenkins_plugin.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "47313cb3f96c0166a102bf568688f4fad330764f6faaaff6083ae8c396eaf133",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/web_infrastructure/jenkins_job.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "275e2260b89d3a3341ca3c4c8460f327186d2f30d65f972774462469815132a5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/web_infrastructure/jboss.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "482b60ea9d9b4e35b5e33abf89121294b4c015ab21144c582a980d4ae702aae7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/web_infrastructure/jenkins_script.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5abc31472144f31496d86c3a032244598b96397b2b939a8a7d047f4fdbd46c95",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/web_infrastructure/ejabberd_user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3ee35c9c2becfee224167f8eaa84b3fb3629de1d553ff7a96d22a208e5b06c8d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/web_infrastructure/deploy_helper.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a16a4f004e90b5e80202efce594d9a8d2a9c2ce3f15164b7b583718e60d1298c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/web_infrastructure/sophos_utm",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/web_infrastructure/sophos_utm/utm_proxy_auth_profile.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a5613359e853bdaf3a258d452872f745607acbba01831f36bb6c75599ec4f4ec",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/web_infrastructure/sophos_utm/utm_dns_host.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "724879648016b6bb2b4406601e7b3403ff7e625236278afcc4d4faa46535424e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "93d5ed2b1cbcfeb88b5fc797df0aa799316975139e38cd421de8c0e6e4f7d04b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "35da861f0e8669cafc7a2cbc5fbb876948266466ff59e2e4ffa3445da9003c61",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "45243bd44a1a48194a4578076f47c64694017b49efa1d72f3d123e278ea60b2a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ff94bf268207686bd3f1ebac0822e4853a31ed258a18d9bb68a0472f9736b5a9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "36fbc7f802d311b4e7990e30c41a431d6a8b3c37af99d733b97326b923cb0726",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2ace6280cf2036043d151b379a7d2fc205ffbe2fd6b57196392e15f7c9bcecd1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8c06e92c8ec23de739513117e8e6a00cd75f65499b869d35d9a5e2f461560889",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fae4d1041424287379f0b048e6c4f434bed22f58456a12a53e6d37ed7280f0c4",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2f9929fda007f70902f3bfe0c52fc6ea7976e60bd49ea948ceadcc4bd2be3df0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ecd64614660129605e7e19c8e8f295d99e71f14f6fda2f7a2951a8005db5b726",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/web_infrastructure/sophos_utm/utm_proxy_exception.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cd30a8116a17aabb1ff8a91067a9591fb0e28c9eab85155b02c1d321eadb2aed",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/web_infrastructure/rundeck_project.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ac30d46597ceb9068f1dcb04322888cf2353123863e3ab5b1b8db420064e7415",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/web_infrastructure/apache2_mod_proxy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6c1c446900dbe4919ce14516295a0dd0be4a89d8f691cc3c13a2c6437aeb0ea4",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/web_infrastructure/jira.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "91ca58f5aeba7b6b9d47ec65321d0f31c63ef94e324f13e818fbefa5f3fa62a3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/web_infrastructure/supervisorctl.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "64ca792b2b5095fd17bac13f2619e185f93ff957e93db0917c6444fc8e3ff559",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/web_infrastructure/htpasswd.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6bb4f630fae6d153b7a6d1769b35b66a1d7d892071a2c8f975d6fffb8b417c18",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/web_infrastructure/nginx_status_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "20bce227cb3f02cad1e366614f32107cf5195e434b115d8109bf70d7572c5674",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/web_infrastructure/nginx_status_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "037392ad348b519161e88581921a23eb15d6027c518aeb1a61191157cabd6b4b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/web_infrastructure/taiga_issue.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d0185ea53026bdf6916194ac40abd8bb79152c75f8173f0fcee1ad1056e8542f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/web_infrastructure/jenkins_job_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c3256c41f3235942c5c3094ae423aedfe206c711477618c99e0a39a34e1f492b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/web_infrastructure/jenkins_job_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c3256c41f3235942c5c3094ae423aedfe206c711477618c99e0a39a34e1f492b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/web_infrastructure/gunicorn.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ddfe8f8cb4c8d53f43464b643c17ec78c4c81e537a8bcf8085abd6cb65b37f46",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/web_infrastructure/rundeck_acl_policy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5f44cf30c1519917723289f1f024c685273a2937e22e4bc82f2514dfca330af2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/web_infrastructure/django_manage.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e4d44750075e4cb37610f431d3c985e764f932026a7cd82a836f2779535733d6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/web_infrastructure/apache2_module.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ef041e2658dbe646fc8b695be228068c935bae46bdc6ec9c52046e31966d6e52",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/hwc_vpc_security_group_rule.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cd75294789234ffe193cfa2ff95084fb3edb0de2a42d9a20309db99bab189997",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/crypttab.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eebc33f087dadae76166e3160fd1789d37ed496d717aa14f5ecf5bd7414b7e68",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/nios_a_record.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b9fae0fb773295dcb048ef96a71b7a02bcc3d2e09a7e23d12f44a7aceda106fe",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/lxc_container.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3bc7163e21060fe5fbf644c51bd3554c434369f931f1abce7174892f127b1975",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/monit.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e699bafd6dfc54d60df5107e88405a00b89991c1572aa16e60817aaf0ece5af1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ldap_search.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3af13a76f94587edb07bdd7a3b3a3d154fe67c8c35da79fa21c851f0a70c2dca",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/kubevirt_pvc.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "424970d2760b201e5f241a4bd5949a90e126ee2d2c33dde031041c33c99d618f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/alternatives.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6ff612e341d4ea344cc52cfbeb736eab44acf72345a683b521fbb9f24a9f04ac",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/consul_acl.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "706aa7d7b75536ab2db04783b33ffa9f1cf6ff9af966b7d5bc6e3e47958620e6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ipa_sudorule.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1aa1c4f989af05b400962f52a78bff267aadf6bf38d8343ea46cb0727193427a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/opendj_backendprop.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "06ca20cce0cc6d8ccabbd18eacf962ea83d4d29ead45cccf37a77acff7b3ecc8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_sa_host.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6c58a0ec338d88403dd41942b7090dbe3b19b8215e3b22265adb942c2c08600a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/utm_proxy_location.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fae4d1041424287379f0b048e6c4f434bed22f58456a12a53e6d37ed7280f0c4",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/online_user_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e76479607bc85426feb151af1be695089ac64791710e8b176d27b5765cff3a34",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gce_tag.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "956bf787c777e6c745f0ffc90627f7f23cb80de2013cf6d64be44033b93f96e8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/hwc_evs_disk.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a74a926cd9e503aaebaa3a77d5e80dbba7e42c4c4a92f9c7dbcd147dda363714",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/idrac_redfish_config.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3f317f48e414e4bb8451790deb8a91d0fa5cfb24a1a3104c24588832f3c4089e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/one_host.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3534004d2f54b08f61ae049974b39b208d0549e6b55129537fdd26b836844087",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/keycloak_client.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7f87a51c3102c95d3a5ea9e15d36031915427ea483d9afdec21e74b80151db26",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/stacki",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/stacki/stacki_host.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "933c1fc1d7ed244a7fc48ec213d70ba499796e940d9aa65abded1eb2c41cbd0d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/foreman",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/foreman/foreman.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f52aa6e33bfd01af58f8652e00585bbb5d2ad82581898c199b920d3691dab9c1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/foreman/katello.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc10b9478b7e1f6033aa1b227b47add9496bd69c6a2c90d9c8c3caa67a37b3a2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/lxca",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/lxca/lxca_cmms.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "95f67d443777f341f8c1a2f323eb3eb5da7794f7ef3ddc0da6c0c6cd33001bca",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/lxca/lxca_nodes.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "59fb22f54af4343accf332f944e7e70e177c6f3c55bedc3c0f6771f2fe22186e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/cobbler",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/cobbler/cobbler_system.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e365e90abe7a7e8449311fb4954f25801f99f8b5534bc98a604e96ff942e7ad8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/cobbler/cobbler_sync.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d7344f393b042ffd6f3c7fba8bba4823da46cb5e7a59a6451835213d1eaf24c2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/oneview",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/oneview/oneview_datacenter_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f499285e7d5a63fdbd4562a5a4a3c38b497fda82c326e629f0dfd057c984eb4e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/oneview/oneview_san_manager_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "24a321c453738aeb66c5be2c46c2869214658e81ac48dd0f5952ffe9fcda7b26",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/oneview/oneview_san_manager.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "58359cc106190240da426e0506fa1e32b479225a5671165698d5f766c2f46dff",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f48af12c1a23fcc09493aa9c93deae4487db65d1332d1ec31c755e93c2db5cba",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1a15b1f773b587ad59653d66c0073b58d861372f09369ace09cd5a3bcbdbeb77",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/oneview/oneview_ethernet_network.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9ddedb56f841c97525670f19ab004f8c3431bfaa4cf32fb15bfd2cf65883b7d6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/oneview/oneview_fcoe_network.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b4ef083cfa921d2fa0eaef8c811ff750b35f989d053047848afd5db17f601494",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/oneview/oneview_fc_network_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "749269f3a1982dc97eb6dbd8bbdb7f4167f09ff353062093d729f33e4b5d33d9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/oneview/oneview_enclosure_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e4e0ab72e479649b63f6db15ece9227d8d5ba84eca314d14f3d7d16d62b6beba",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/oneview/oneview_network_set_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d54cb48a8137b8b919daef76d4aa527cd08f3ed9840ae63c5c4f10b9e763725a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/oneview/oneview_network_set_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d54cb48a8137b8b919daef76d4aa527cd08f3ed9840ae63c5c4f10b9e763725a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/oneview/oneview_datacenter_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f499285e7d5a63fdbd4562a5a4a3c38b497fda82c326e629f0dfd057c984eb4e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "803675bc55d5b2d6b4e57c95e142aef188306533ed0c105521372d6ef68aa36f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1a15b1f773b587ad59653d66c0073b58d861372f09369ace09cd5a3bcbdbeb77",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f9c0119c3db7c79c3ce42f7d1cd13632b1ee07c695858a1ea132db50d662200b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/oneview/oneview_fcoe_network_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f48af12c1a23fcc09493aa9c93deae4487db65d1332d1ec31c755e93c2db5cba",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/oneview/oneview_fc_network.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "414d1b5d26a73577248f2ae1980b447ac9c4a35313407b97e11217afcff59463",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/oneview/oneview_network_set.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aa56814022262159a4fd5e4ccfa90b4e168be9b1212b6fda023456e9fa9405aa",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/oneview/oneview_san_manager_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "24a321c453738aeb66c5be2c46c2869214658e81ac48dd0f5952ffe9fcda7b26",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/oneview/oneview_fc_network_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "749269f3a1982dc97eb6dbd8bbdb7f4167f09ff353062093d729f33e4b5d33d9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/oneview/oneview_enclosure_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e4e0ab72e479649b63f6db15ece9227d8d5ba84eca314d14f3d7d16d62b6beba",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/oneview/oneview_ethernet_network_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f9c0119c3db7c79c3ce42f7d1cd13632b1ee07c695858a1ea132db50d662200b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/imc",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/imc/imc_rest.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a61784221b055694a1293c28ae86ebe346b73ab059abe6325f999bca420741fc",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/manageiq",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/manageiq/manageiq_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2ee1a254c571f79836711b9bc3b7bb23d9432e10d7a9575c70a231c38e46a3f8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/manageiq/manageiq_policies.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5a35417d72c064aa7e32028d3ff9b6cbf6476e0cb449b2c0baaa3d962309fe61",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/manageiq/manageiq_user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d289fd3d800ec3135cb36400f7edfb6741746606c5839ef8acdd80b30d968457",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/manageiq/manageiq_alerts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3fa59d8ef8bd8981dedf9157097024083c55b14d7b601970f8c480eff6538772",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/manageiq/manageiq_tenant.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e730ef524ff320f622501f0cb1720b6d7782699ffd2da7e090398b22e89e9177",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/manageiq/manageiq_provider.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4afc5f3ad96baa3f085130a720c0ae99af358ebbec1f81e5d800c5f50337c33a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/manageiq/manageiq_tags.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2f8531cfd701f3ded93bd2b7314c108e012bb8c19c63eecff10204857d72e098",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/manageiq/manageiq_alert_profiles.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b61ef9ed9ca3928b9d8c1752563a27a6a76e9be9586cf9b7e932174d79002822",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/ipmi",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/ipmi/ipmi_power.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3d9d6865d0b44031c5df6d16a4b0284173ddc198eefe36d5543171496181f383",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/ipmi/ipmi_boot.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "38c0881a00dfa8f2b7bb61c4dfe6467b2bbc9c477126815c011f62aa5479ac85",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/redfish",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/redfish/redfish_config.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "971d6e8bafddd6d8200286c6459f2aaa24871cbafcf94443f3b03fb7bda690d6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/redfish/idrac_redfish_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "97b14cbe85c08172e43254096ee88f1aee85af6636d142ff13dc5a8aa449b9e1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/redfish/redfish_command.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3b0b84242e67208fa846779715da12198b05ae021fcdd5eb2d32a2728a200bf0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/redfish/idrac_redfish_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "97b14cbe85c08172e43254096ee88f1aee85af6636d142ff13dc5a8aa449b9e1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/redfish/redfish_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "36142791326d262abd6ec16a3ec2e7327319ee3f53316d1f4b22deb3fe326ff7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/redfish/idrac_redfish_config.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3f317f48e414e4bb8451790deb8a91d0fa5cfb24a1a3104c24588832f3c4089e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/redfish/idrac_redfish_command.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4032ceb99ffa5aa3852adbb7e11872ef40f244ca15cd01e6fd8d6af26456ec16",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/redfish/redfish_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "36142791326d262abd6ec16a3ec2e7327319ee3f53316d1f4b22deb3fe326ff7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/hpilo",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/hpilo/hponcfg.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "40b0cce24e68dd7b30e7070199de82d95d761d73e11a7ac2b9c4c116cc6da467",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/hpilo/hpilo_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0749fed7a211ab0079c509eda0f8c0ffb0bfbdda37914895f00e62af515eeb4b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/hpilo/hpilo_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0749fed7a211ab0079c509eda0f8c0ffb0bfbdda37914895f00e62af515eeb4b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/hpilo/hpilo_boot.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6d0d47b799f9e444207ed5b4667356cee1de57f1d2aeff137aba990ef08beedd",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/wakeonlan.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e87b05353ca75b4fd6125eac92c3c5e2849c50fa85144d6621ca819b4d20f541",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/dellemc",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/dellemc/idrac_server_config_profile.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c061c989bdff2d792711ea4cbff34c85510feb380960ce785b0c898388bdc02e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/dellemc/ome_device_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1a50007982a42a65031f3ecc9233bf6baede73e439975336c64b19f445770d21",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/remote_management/dellemc/idrac_firmware.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "be1bab26a93cd13ec72e6a30bd8ae846f10e85316ce2725edc1bf4779a52e132",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/python_requirements_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "28282007ea020c0028150e66cd59f4fb1606f65b274e4d969d0f5ccb10b03dd2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/campfire.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d22a3da654653ddb964eb55db9164c254860f4430dbe8b505b6945f220294bea",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/sensu_check.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1901ef19fc3b55c9ca42f7969767a8afea8b3155d36290cba7b5e05e06f15842",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/htpasswd.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6bb4f630fae6d153b7a6d1769b35b66a1d7d892071a2c8f975d6fffb8b417c18",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/xbps.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "99ddbba3a2d7740e4be2d2b436b39f7421e9246378365e5ff4b2a60f80306c93",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/mattermost.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0f68dcbe92d4d39d3dfec659f3641cd00f7b2fe43aa5213064cb72ab996289a5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneview_san_manager_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "24a321c453738aeb66c5be2c46c2869214658e81ac48dd0f5952ffe9fcda7b26",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/vertica_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3ff89442e56050bf201b026d7f77c1f14894a255a1cceff1afee447b69ea6538",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneview_fc_network_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "749269f3a1982dc97eb6dbd8bbdb7f4167f09ff353062093d729f33e4b5d33d9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/nios_dns_view.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8b284f904d003f2e6bd72e2c2084f2c2a7defee2391537f422ba6ad498c27e2f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_mon_notification.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9cbb83a8392cd8622ae54daa1c9593bd06194a7540ba8b8397708b3e541ec7c2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/linode_v4.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0d5a720554d901a9a90c270642db728780e7f4f4b4db49d20434c30afc090585",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/helm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0010cffe72ab4efefd2ffcc7cd44b869e15731a391f21685ec215bacb3938880",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/webfaction_domain.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e1beee3c008d00f3da16eca1d13048e1fcf85bc52af34db79d5c0b3ad88c2a9e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/nios_nsgroup.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c40e9e89b4a7d6f30c856c8186f4665da5531b6d3c390182167b97adcb3af76a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/logentries.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1092b7f7ab43dfab3f4a3cefafacf3ca737de9cad867148176f8fc1770e39cf0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/dnsimple.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cf69408ce8a10aa4687dc4c75391e797deda461ea35628900f3fa57e70b6f166",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_dns.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "daae6d1276a28023bffadf92bdcfcf3db876e3c8394c4b1b0c5b3aa2dc42b116",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/heroku_collaborator.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "757db77f9068e52d9898843a6399f9c28a36c565b80f9bcaf1d1e33d3359ae15",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rocketchat.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3fea039d3b5222817edd7da7df15d21944120071bed0e5c07eb5d9e30a7ba3b0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/linode.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "38d4f000ce9d0f6154beab8530df9351aea44371f8b3b112b271ecb5a17de305",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/odbc.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5dad6be3bf21952c701234158cba6fc5c72f9a1a97915648c21360bae8d0d373",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/github_deploy_key.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "056f88e87bb244ff36893ed776e61372bf240f7e4083b3f4de1470244b8ae51d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/nginx_status_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "20bce227cb3f02cad1e366614f32107cf5195e434b115d8109bf70d7572c5674",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gitlab_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a3184b0dfbc363ed8148ad144a03f7833c5e598ee2bc271f9e9bdeec80426f2e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/dpkg_divert.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "93b71119b18d6835e12a9c4a5bebf71480ff2cb5d70827ee3fee60811cf1876e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/icinga2_host.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6e58673f163b94ba752882ed2bfc5616b53fd81f9a431bd7aae47d94e3a3dced",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/vexata_volume.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a6377d7306fb5a11f52aaa9a89cff909e8028a7cef71959eb6a7135ba1561d4a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gcdns_zone.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ded81a77d08709b6f7737f41438910782d91d66568a7e5cd4f624218bbec5cae",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/nginx_status_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "037392ad348b519161e88581921a23eb15d6027c518aeb1a61191157cabd6b4b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/idrac_redfish_command.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4032ceb99ffa5aa3852adbb7e11872ef40f244ca15cd01e6fd8d6af26456ec16",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/aix_devices.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "977386dee01ac51d9c885ecee657e0a24df1b5de87996f0a9c9f8c3d0605c08a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ali_instance_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aab7d64776371adab53aeaa905e756a524f6858be5490b53754f45405a05e7b3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/lxca_nodes.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "59fb22f54af4343accf332f944e7e70e177c6f3c55bedc3c0f6771f2fe22186e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/sf_snapshot_schedule_manager.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e251e18be0ee4e25f06ed439fcdde936d45b23ca2fab29f23cd84d1a63b12b1b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ldap_passwd.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6ba81db2b15e61479f3621ea0f9c1ee360a6938388349c842ee7cc39d4affaac",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_disk_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "076b78fffb8cfc9b4c43b7ca3f11acf02a28f19277114b74d17e5d789f45fb2b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneandone_monitoring_policy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "27c30ef2ec8c85e5f22d81fd30fac4ee05fa6dc040c0f0968150b07071f2312c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_server_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c94a4fc9e6f4d357a4815f1eb05fe2b9197bbd7522e81597f3d3e924b507cf64",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/interfaces_file.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "23f9dc5b7a7be265a5aacc0b79276a26b8a3ad980568790c6b40123c3c35179f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/online_user_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8d317dff5d7c75c2a364f021d1a29dd4ce28d490c27f8cf7ff31bd9831f4463b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rhn_channel.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d9b34a7ee26112e9c5e3244d8d3ea4467e998fdca684e6443f1f12045a1267d9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/solaris_zone.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "60a77ff20a8d31547321204ecb03e5962a99cb34773e9bb46cf25ecfd0ef52d8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/osx_defaults.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "de3d35c9f9006b236e76c0cb92883305598f3dce2f29b5b840104851ac5dcab3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/syspatch.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e5efdeccf15b244b206f77af0fa288675b32a3dd56211d4a9e2a178cb1a28fec",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/shutdown.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "02c339648349f7eaa4fc7b64c85ee8c40cfc98cda4c9b97879658efaf889f552",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/aix_lvol.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "feb995da59928c227261390532e549999f7a27594f09744529878c91b72e7bea",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/pids.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "38e7ab5ae14974177fb9a3b90658000151492dd87a5d77bd322f5261700fecdd",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/openwrt_init.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7da2deb1d4c5d11a585533f69c986c2cdb525265e3207a619791b61b7d58b685",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/java_cert.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b1ace748ca75f0b5ce0cedde55456365b4529a00d3ba63122ab1db5f225aca90",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/python_requirements_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "28282007ea020c0028150e66cd59f4fb1606f65b274e4d969d0f5ccb10b03dd2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/make.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8ca72ea1d8db919f94d1c81fdec999f2608d97c01776247012da162c7ad253af",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/xfs_quota.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6d2fd9ed1316a4341dcf51caa8af1aed4bd7a097af5f09ffe5a1ea928cb8c2c6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/nosh.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0814721a9b15d12099c9cb6df8c8e7f2767335b58f06940e49f13e749432c3a3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/ohai.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e4272be634bd89295c956ff2215715a967d299b5d1173048d0513cb45dc1f5f9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/runit.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cdfb09bca1dd52237f5c9f8b99031af05f3b6037e4745f7228b472d257d87ee3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/vdo.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e7859987160b6a81a86a578ef352edb5b5e0c2a191a2c5e475b2ab0083252f47",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/open_iscsi.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "057c0db7001cad637996e7310baf8b2d84aaa0486ad9c2d483fd54af70f0547b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/lbu.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "119f3b2beac4ed2b94e25388381f90df9698e2fbed69c736d46a0cf0d4108466",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/puppet.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a02e93cfa49f8b5e359150ffc3410a5c54d15e88b8cf336512ac9aefeb8ae04e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/capabilities.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7d9e46ddf9acbb7caa0bf526654e9b199abf60e253a551d9f10c4e4673fd6713",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/beadm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "27fbb38324a97bad460b4494c458a1bff1544bd2ed58105fdb4bf25a8a994c81",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/lvol.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cac3edc4616af897cecf1189ea102a206f3548151a1cd9b9df5891db9ad3a373",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/pamd.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5a88f5c62b10d08c5e0b9b0ae169bc241e77702822201199437f615f970dd5ca",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/seport.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e1c1aef4a9689c0ea5840868f4ce8041173e64d0f0c2d28c64ccf05395f7327d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/filesystem.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c455e1326001f0b0f7bc1448cd31a63e972398dde865ff845181a0a793214b5d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/dconf.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f331e2807a986c2bece1f368dd9b666ecc2c71ba45f424c33a94e2c4174081de",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/java_keystore.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f1d635fdb6b13211ddcb6bb2156a65f743dc5f2ffc8918ab840451897e5e469b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/sysupgrade.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a9cb9262a6d0606eb15ef6ba85b2730396ab25e855795a3468ed1d830c13372c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/modprobe.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e9b39b6da2e51cf26ee1ce89c040a7331ea906d716a7eb8c7adaf1f620a1ccac",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/sefcontext.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bf13a1773810cdf2525129e4e694744fe002500d405c3734a6d5131761d2c416",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/selogin.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c7552556e82a332b2f2a936a3576e2d5b54298ece38f29ee6f58e8dc1e20ecde",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/launchd.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "47a52143170241714d2e16af7b89af93c77f082ae908cb668a2a080584177a8d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/lvg.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d071992194a4ca1d09c8398a828f15c74c8e3427fff1cc5e707969ef84542fc2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/svc.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "255db93828e643c200e6e2b1ee52b97659c8392e14e434a8d77312abd2aae9e6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/awall.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63f6d1714ac308da87c08e54b17fc2205f0bf2426d26914061074317ae835b8c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/aix_inittab.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1e4b6091b24210a657d58c1767107946ecdf34f90cef0460762144b8cf6d4cd2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/crypttab.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eebc33f087dadae76166e3160fd1789d37ed496d717aa14f5ecf5bd7414b7e68",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/alternatives.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6ff612e341d4ea344cc52cfbeb736eab44acf72345a683b521fbb9f24a9f04ac",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/python_requirements_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "28282007ea020c0028150e66cd59f4fb1606f65b274e4d969d0f5ccb10b03dd2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/dpkg_divert.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "93b71119b18d6835e12a9c4a5bebf71480ff2cb5d70827ee3fee60811cf1876e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/aix_devices.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "977386dee01ac51d9c885ecee657e0a24df1b5de87996f0a9c9f8c3d0605c08a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/interfaces_file.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "23f9dc5b7a7be265a5aacc0b79276a26b8a3ad980568790c6b40123c3c35179f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/aix_lvg.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "abab1ac15009cb6f96adf2e00880ced55ed7c5031d8b463701eb6b32f8a5cad8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/mksysb.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6ae524421715a31466ef9264dbd315a8b32e369895f752ed9599f52e58753d08",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/cronvar.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d0655e7cac5e70399638eaad23ab55777b2dca48eca863298d10559fdb42354c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/listen_ports_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ea9ba05e7d4cfb9d5aad2cf00715fcb09f4b938022c495e90ab11a84afa7a6c1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/parted.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d4159ef7b591a049c71c985fbb07494d2f9498306170a14ae44c1f7ca2faeef5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/xfconf.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "571c013812d3006f81442f6cbeeda1abf4671fa454d2ff7dbebba3f19ab25e0a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/ufw.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "51214246572dc927019d538efe246635ca152c8b70ebd8a27b2a3dceda61caf1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/pam_limits.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0f61807a62847b6fd53a25fa4258f69ae83cc81692d106dd207f164a4727ba40",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/aix_filesystem.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3cf91a8458aebb01076013fe6762f2be88e40592ff9b9e5f4b015bb9f649ccc9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/timezone.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3047af3d01f8613a832a22a2083f2aafe04179a4a5bd66c00b47fdd57cc7edc5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/locale_gen.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "003752a5c62a5bc050f7822b28f3b0c310956ea5bb486f3eefa9219c237b84b0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/iptables_state.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0bbb3797d60891d95e1d0ffadf4401d872e261b8e95aa695691fabb348f39158",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/selinux_permissive.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f44c6496a7a79ee04c273b58d2d371ee4476acdd7f861363dd577159dcc11884",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/gconftool2.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3dac6741e378a99d8f88e45920e2a4841ec35a69db328e7a0a4bcc8ee05a1cc8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/facter.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e9dc303791af31b7355e612dcde7b32ecaa6083514c401a900c1bd6c5da5c616",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/system/kernel_blacklist.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "726fd4c9672e6ae317efa7183e72927262ddd90c59021afa4b80d1f3d179e113",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/nexmo.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "88784e0460a6290f56d4487bbbbb069df650a4bafa0cb0c8dfc112ecec49d448",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/aix_lvg.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "abab1ac15009cb6f96adf2e00880ced55ed7c5031d8b463701eb6b32f8a5cad8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gitlab_project.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "19f31736e1a725a9b28d46d182416999cdb4098791268999fbdcaea09dea3f43",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_security_group_rule.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1799687b1406e1a75131a65874dcf1110a2b862d4a77089300d2be668ea10fd7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/mas.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "52800c9c7cf8257ed285c79e72e95d78266cb7a6d969ec6875137cc6ef7d73d6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_event_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7882821a5120a296ecc91703d3d35a2893142853edebec440a59d2dfc0bb5418",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/yarn.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ddf377e61b98568900d850a9663cc7110712986fd652188e7d0ec5f7b6fc4ff2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/postgresql_publication.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eaf5c0389ee5aaeac225084b085fcdf0fcb47b8af2e7fec70b1bf5891d44b579",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_dns_record.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5bdb73fe8903a3ff4d31f53c53ca78e7d299f3c547739162165b1734ac3ab7bd",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/mksysb.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6ae524421715a31466ef9264dbd315a8b32e369895f752ed9599f52e58753d08",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/read_csv.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ffd4d55798730296a9062069211f169c46fb2744e40483d1afe568ca76030aed",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gluster_peer.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e37cc212b1484d76d0d084277fa5b017f0bd55489794198977b032e561cd294f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefb_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7f52d4a666f4d9460c127dd8de138a5e21ad02b3f9def05828a0c512cadcb4b9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packet_sshkey.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "899292a5541dce8e317a5b23f68ceef50c5eacbded0c38e52feedb05b1e8a0be",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/opkg.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bd93bea75b70e04af71f95a8caefb83e8d477ab8c9420dcf052b90b7bbadc932",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/slack.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5a17b65ef7fdd3962a1ca54cee90f65831f0af635ff623c22cb7b4f51383146a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/lldp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2854a7d542dc7c585ff34c4170ce5d733e03b8eb5d1c84d7a01bb493d2609bc6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/sensu_silence.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a66559942fcc141b0b21f9f617ef14dde9dfdb22ebe706c0d992a4816fb62e86",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/kubevirt_template.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "20606ce924bdd51408de9b453377a9e2bc0c7908b6422113b3dad51bb57f49c3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/flatpak.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "12676542d7a15b9ef198d8d95d3112734faf0c2702ac1a32688be16a63d8356b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/jabber.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6a37c8a32215ffd295470da8d34b481418850da53808c702097659aa4d7a1657",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/xenserver_guest_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f3ae0b9a0c9d4c0168dc528194478ea63d31a949a25339e3692f3d44ea09037d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_mon_notification_plan.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ad6996138e1230af9724f6d6d1dbcd33a2ce638a8951850eeb92d1c991f49c89",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cronvar.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d0655e7cac5e70399638eaad23ab55777b2dca48eca863298d10559fdb42354c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_swarm_service_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63593d58b2580ebebd56db76fc4b50742fa214a5e6abb137fc52bbbd68e9ef1a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_network.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d28fc4298f463d92901e7ea4c3406b1861e34256b57d08bee7a6b4a18bd90c9b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ss_3par_cpg.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0652f7a77a9d16a55fcb35817372a60e52caec04fd43eb202ad747ea8fca2702",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_stack.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4351b64553ba68d4902d550d7aa937bb9a29ef996b6921b4619c48982d12cd74",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/listen_ports_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ea9ba05e7d4cfb9d5aad2cf00715fcb09f4b938022c495e90ab11a84afa7a6c1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovh_monthly_billing.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "921ff4d415e12a6ddbefc4a19a2d8807a9d7a3b7328c474fca5be64c59db55e6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/parted.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d4159ef7b591a049c71c985fbb07494d2f9498306170a14ae44c1f7ca2faeef5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/irc.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8be4f2ca8a670351ba5814680a8e5c36df87358b1d9b27079e8542f5d3429f14",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_cdot_qtree.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f12a1000d096c737241f37725a11239c83c98e4e4a1ac4a0ec064c0284a9556c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packet_volume.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "98a88102698ac822b8328b54e618b5b5983ccbc2bbee2164d004f9b9983493de",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_swarm_service.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "89bac0594f296a507fd7d72b74fbec894bf1d6ec1b47c088fb74f788c97db3c0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/catapult.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f1bc195bce4b7de9e4e5c612fba7c422e104af61e77d79860c7dfa69b8b0f15e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/hwc_vpc_peering_connect.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d0eca5c552649fd19228928b85cf91670abd2122fd7a6afae49c91f7d84bae03",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_keypair.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "53ecdd8b505201cfbf5abae54e12d317cd420d2870a3aec646c8dbe97a6ad43f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/consul_session.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "af9db76827fa06858887df4329a458a82ac4ebe274a4832fd58b1e33d3032080",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/logentries_msg.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "34982c5c0e9aef4d724a068cc3bbb34df2d7e9757d7d2ed620990124d64b9a84",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/xfconf.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "571c013812d3006f81442f6cbeeda1abf4671fa454d2ff7dbebba3f19ab25e0a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/vertica_configuration.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "621c0e3bb8c297fa57892f737bfbd384a642b8a9bc38b038052b88af08a2257a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/apk.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "066665788179692795453db9675607e9c400f214f80382fa1646c0a5c4e0b709",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ome_device_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1a50007982a42a65031f3ecc9233bf6baede73e439975336c64b19f445770d21",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/redfish_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "36142791326d262abd6ec16a3ec2e7327319ee3f53316d1f4b22deb3fe326ff7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_identity.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "536fd16cab05113b34f97a61d7aad13c705ffeb718d42f82d9ccd824e03744f7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_scaling_policy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb25fd542911f02a0c9e96cf7449e8ff7bcca68d28dd8838d653ded014dce671",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/telegram.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "59ae6ea0cf701725676bdcd76cb2914ebd4ae8923011d04a12fab2e200a1a38d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/layman.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "836e062d867c45bb523e37edfc3cf6b6b9b94700d994f1755d78b706cf3f6bd0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gcp_target_proxy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c7acd079f6c9abb770fc6ddb52355685151d4a66955618e196c04ae748ca4f49",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/stackdriver.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e469374e370c83bbff5d8d2829b24a09488c26c769853f1313ba66bbe7722e48",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ufw.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "51214246572dc927019d538efe246635ca152c8b70ebd8a27b2a3dceda61caf1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/consul.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cff20750444211a7df214fcc2b01094b3e8fe40fd42de8900b86abe8490b9351",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_cdot_lun.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a26568e77cd90b4073a04b76c3bb7438f957def416e787e9d739a5b300e26371",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/taiga_issue.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d0185ea53026bdf6916194ac40abd8bb79152c75f8173f0fcee1ad1056e8542f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/terraform.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bdd72eedb1bc2d743517ca39155b2df1c88809879b022818f90f8c26f3132e6a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/manageiq_provider.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4afc5f3ad96baa3f085130a720c0ae99af358ebbec1f81e5d800c5f50337c33a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/nios_cname_record.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "681fe0624aafea89a4ab62ce048c7f2f09da21518512084a4dc41355d6e0be1b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/matrix.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "49291a2a57c72bea087e2afffade0f7f083deb196f8e32dd6d79955bb5b6116a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ipa_vault.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2acb37ab3e88ccbbb469ae3c20e5f259070fb865d908aeccda7b15c9afca2d22",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/hpilo_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0749fed7a211ab0079c509eda0f8c0ffb0bfbdda37914895f00e62af515eeb4b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/wakeonlan.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e87b05353ca75b4fd6125eac92c3c5e2849c50fa85144d6621ca819b4d20f541",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/hwc_vpc_route.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4369f9a4cfa48a82a66435bf9ebbfcd9a19dd8c91aaf1c5f6684fd33b5c5103e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/jenkins_job_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c3256c41f3235942c5c3094ae423aedfe206c711477618c99e0a39a34e1f492b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloudflare_dns.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "03d9945319c8f9d0eeb4df6463265cf0784bf20a0b9d7c7cacbe688091fa7e4d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_datacenter_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4db04968b3bbca31d6b8efa6420eebff5087a8a5fb39f6a2e51f039a937a3fa8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pam_limits.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0f61807a62847b6fd53a25fa4258f69ae83cc81692d106dd207f164a4727ba40",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/sensu_handler.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c0771612a4d78971ea1f68f1d997b277e2a58f4e72c5a57e448f392d7434c4ba",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/hetzner_firewall_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1479a7450e8c613100b9d729db4a2810d34b5a2991dc50d06dc6598fdb35338e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ipa_dnszone.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "12ce71e05f5cb9521c2a738b0509d34ac2396d004a76064530ce228edd3b127e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_security_group_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bba12f2322db765e1d9705dd7a59dd29a29fd23395483d2767a7a203b87d27ef",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/hpilo_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0749fed7a211ab0079c509eda0f8c0ffb0bfbdda37914895f00e62af515eeb4b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/webfaction_app.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ef6a4d8e13b824d3265a82440f90080d64523b7f2308d8f6f026dca3ad77ba3d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/clc_loadbalancer.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9ffa0d1b36f1c7140695a98fbabd365ace9d9187607a76f1d112bb8f9c60de9c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneandone_private_network.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4b0c3447b06744faa8f5128dce313bb527c799a102cd92cd1f57ab6946831381",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/profitbricks_datacenter.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "93b74e6022f5f6b589fa2d4e7637de05d46a4d4d724968324e5b2f1dfdabcec7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/iso_create.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c8910b3cbb68a3e0e90fea4a0e49d6723a2b1538096e35002cd4395c45578433",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_network_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4f8ed744767a73efdab5d8c77fb0bea1bf541c1ee1feece04438e9e6e851fed8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pacemaker_cluster.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4386ec559c0dd166cb6b6bf4b2f43f3368c2da231653b3f4027d64fb921b1e48",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/hwc_vpc_port.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0981c5ad00e6719986102308ac2745eb5d316fd7e0785ebc236102ad9c987ec7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/purefa_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "12def5dc51e0a3346e747840f437913ade68b31b539833ec56174fece8d24c0f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pkg5.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e36ce1069607e0608509fc036fb6454af0ede52c3682cb43dea44eedab746729",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gce_snapshot.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "57faf235af6f0cc7a98f1f9e2f23c4a0c464ee9c357af47a33ebdf0fea1abba5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/bitbucket_pipeline_known_host.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eeb1882a13453482c1d8cfdfc518032177d79763f2ed46a19c0e946ff60bd161",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_sa_vol.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "53c4644d14e1c5c64b83fb621a2fb61cad88601b50ac1d326724f80b5ef5ea79",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/utm_ca_host_key_cert.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2f9929fda007f70902f3bfe0c52fc6ea7976e60bd49ea948ceadcc4bd2be3df0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/hwc_vpc_eip.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4006ecd981645492fe82a37ea0910a40aac3e24e0e1503a046afa52e42e614a1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/snmp_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3e07fed63168b9686045e42ac17f51e16aa7fe1da9d0fff8fc8ef00693005df8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_affinity_label_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a670f0af4dcb53745843888e051183bbf330a5f6c9a6baf354f52144618beb5c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_ip_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e04fa197d8307cd41e842b24ce83d9b16f0cf0cbe6a0b3d0ecb2496c44ad98cc",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gem.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b84ef579da05d86c4e189212e879d52632af6a136e9ce2dc38c518f3ac470ff7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rhevm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6c014955ca82a03d11c22c0462c8abcb0570c586eb544c8792480f06c69015b6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/zfs_delegate_admin.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a1c642fc5b0f8dd42ad1aaeb638d2103d795338fce5c81e9a301080e331a6b2e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/utm_network_interface_address.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ecd64614660129605e7e19c8e8f295d99e71f14f6fda2f7a2951a8005db5b726",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_external_provider_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "477822fb30c32c5671393afd8def725dbfaf8ccb29a48ce328d51b65a6b74532",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/udm_dns_record.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bc47deb188af664cdbc51f5e04529a63625d7d37a19d013796dbf170b3b02ff2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/one_image.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "25578e3c736751703bfd74cb0a1fc0cde13f98904b6ba9653d90175ea5c4bdfb",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/jenkins_job_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c3256c41f3235942c5c3094ae423aedfe206c711477618c99e0a39a34e1f492b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/nagios.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8f3d329e518de7d3efb7cc6b8d96dd17f420a22134f61012b605e579dd365a7e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pkgng.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7eb5a1082ac0624c8e380e5264e09966bea8abaa6e635e3326d5e2babc59f6bd",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/iso_extract.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "67d26864b1c5346a7b1d5544c5fda862ebe5f37a550c49ba5c5c8e91de6426d7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_mon_alarm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "46270b62e074173bc26436c78ec0d50f8185f4bbecba313a8ced0a6528bd0bb2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gunicorn.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ddfe8f8cb4c8d53f43464b643c17ec78c4c81e537a8bcf8085abd6cb65b37f46",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gce_lb.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f87579b77aae883ef7befe590acfda03e69e3241a7bfcfb45f46c7a193ac384a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/kubevirt_vm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "97fbe9c19c779733822ab663c19b32b5a6dfc47a0a27672e9a815a68d86dfd76",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_volume.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "12fdabbf47349f2227930d1ed6e40a4b0e0b5ae34cfcac67848e613ae070aa7b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pushbullet.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9ac310e625cf356326bc27b5e51757f8c9983fd37ad7cd67488308bfb06c1a3f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gcpubsub.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "40793023bacab1ebb95a0ce28eb59f907edd5c8f70e25159f09216e18651590e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/memset_memstore_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ed49160abf2ad2493589d1cb78d836e8f46aea5b6747cdccf7d6064b09a01303",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/nictagadm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3f2b78ff2a8f1e7a13388caf333057a3dcf7399f6e4da43e2023fc964ae14895",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/identity",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/identity/onepassword_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "44635d0ae6630b1b265051edb8bde038f149f998f6b20b615b9397141af050fe",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/identity/onepassword_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "44635d0ae6630b1b265051edb8bde038f149f998f6b20b615b9397141af050fe",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/identity/ipa",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/identity/ipa/ipa_role.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5a859c3fa87e6e45e38b8f8f7a5126b462bbb25097084dc061e5459e25e4ba4a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/identity/ipa/ipa_hbacrule.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0c7d864fbfe05d49c715b48c89ddb9dee40d5d4a59b4c7ec32fdfa127999a1a3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/identity/ipa/ipa_config.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "13ca5370ce4741c58f6e5385b1f151151a3d60ea4e00ea456619c5806f366147",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/identity/ipa/ipa_sudocmdgroup.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb9733ad227ead11feb5daf9d87f2c6d7053b45836fe8a023ff9fd99f5ac85fb",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/identity/ipa/ipa_dnsrecord.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "10e47a9bc929ad86a5c9d7b12c28f45e9927c88b9e1734beb34f76fdd2d6e8b7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/identity/ipa/ipa_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2fb49b0a859526899b70651ec0fba46c2338fc09d4d78284a75ed0722aaa5937",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/identity/ipa/ipa_hostgroup.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6a4bbb19ae2a13eecc9e8e7ea717015a4ad074925ca6dca7cdc529199a7e8a01",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/identity/ipa/ipa_subca.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "949dd207e959b814cfd35893e50fdbd95b60c843ee4f01f13df47b8906c70cda",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/identity/ipa/ipa_sudocmd.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "82463c45e2054c3aeb437bad00b11a932dc7af443df7d6bb285dfe6ce9af9946",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/identity/ipa/ipa_sudorule.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1aa1c4f989af05b400962f52a78bff267aadf6bf38d8343ea46cb0727193427a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/identity/ipa/ipa_vault.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2acb37ab3e88ccbbb469ae3c20e5f259070fb865d908aeccda7b15c9afca2d22",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/identity/ipa/ipa_dnszone.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "12ce71e05f5cb9521c2a738b0509d34ac2396d004a76064530ce228edd3b127e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/identity/ipa/ipa_user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1e087399755f652f5fa9565a6e56779976add5297552044d90f474bc66451855",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/identity/ipa/ipa_host.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "60acb296365ef8af986dceb820eeb8e4d46eb34edb3fd66ce18c8f7c6bfcc694",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/identity/ipa/ipa_service.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1af43a1330fa03019c0521c9f92fcfd421b1eba50b17da0369a51f81924957ac",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/identity/keycloak",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/identity/keycloak/keycloak_clienttemplate.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "74e9aa2f6e94face0e0f5119e6a42520da9b079d94cdb5eed5c8a42df95109aa",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/identity/keycloak/keycloak_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1264cc8faa99333a7c87436ef037c98fd5e22ca78c9afa09e1f28c3ea23e679f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/identity/keycloak/keycloak_client.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7f87a51c3102c95d3a5ea9e15d36031915427ea483d9afdec21e74b80151db26",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/identity/opendj",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/identity/opendj/opendj_backendprop.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "06ca20cce0cc6d8ccabbd18eacf962ea83d4d29ead45cccf37a77acff7b3ecc8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_cbs_attachments.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "27a3ea9cc65be47196c63e14018f3afce79eb64ec93ac36804f1c336ec8f3ec1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ipa_user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1e087399755f652f5fa9565a6e56779976add5297552044d90f474bc66451855",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/aix_filesystem.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3cf91a8458aebb01076013fe6762f2be88e40592ff9b9e5f4b015bb9f649ccc9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/elasticsearch_plugin.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "73dd74a1570ee9843452c10e9dcdd6cbea85d3562219a2363a9e7fd216681073",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/kubevirt_rs.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e9f0905b8a4a3596fd419a77875fb78ef3d92bbf6d00e4524daac3f899e9c6bc",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/hpilo_boot.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6d0d47b799f9e444207ed5b4667356cee1de57f1d2aeff137aba990ef08beedd",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/influxdb_write.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d343f5c03c4350fa8e5426a9cbb9fd4fe3b745702af4868b80e7d0192b14b713",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/github_release.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a3cc3b9f7efb1664f84685467ec47c33fef3e061fff7897a6fc17c4d71d0724a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/memset_zone_record.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0db0abd59574ef77493cc31edd1adf8d644740c6968352f94e58a60ea01534a0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/bigpanda.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0c521a6e0cd8df2272f2314deb990cc3bfeea4761afe8c47570135db0d32f41a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/redhat_subscription.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c6cba86028f7f561bd3da6980f8a77e32ff9c9d1641e14f41e9bfdab201a09b2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cobbler_sync.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d7344f393b042ffd6f3c7fba8bba4823da46cb5e7a59a6451835213d1eaf24c2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/nios_naptr_record.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "22a4fe451e396fdd934b8bfff8ceb132b4871753c6742605d1962969d9062311",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/postgresql",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/postgresql/postgresql_membership.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4ca88131820b8919066dd0cc1beb52e45a0364af4f45d1688739a0ce5b620ade",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/postgresql/postgresql_user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "07687658daf8ad9bb7cdbe8fb98d69dab13f52119ef8524273678b524561bfe5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/postgresql/postgresql_lang.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2e86a2db785415ea4e830953aa26a47dd3d75b52e161847a191f39bc931d3cab",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/postgresql/postgresql_pg_hba.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4d803927f52d842f23e3e0c0e9d8c15a2722d87f4c89e635904ef62f4ce8bcf6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/postgresql/postgresql_ping.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b9f54eb3f4561bce095f1eab82be935becf8b9c3e9a5c20d48f4205721342f51",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/postgresql/postgresql_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb2ea1275ffd0ee02ef3fd353351ee2a2d334885a6d882b5e5e9ce4a1c77fbb2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/postgresql/postgresql_user_obj_stat_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "84f853a86e90bc120702630fbb114db1bf5b1106eea56f460e0ae3d940f90b87",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/postgresql/postgresql_sequence.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e94091810e122a119bba47af21d1710b3783640953096bb1ebcacdd9b892b3c7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/postgresql/postgresql_set.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4094963afc6527515a731d83c9a0ecb4defae47a68dd97c1789692cde4055eba",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/postgresql/postgresql_copy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "36eda73f79393b5b338611a75fa50d47f050c16e5dfdd0e936f31e26d86dcbd0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/postgresql/postgresql_ext.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc3d62cf224aa996c7e93c9191df7b9790680cdb50da95d5c4adbd68d2c27982",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/postgresql/postgresql_privs.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "03c15a94114650cade7f18b3fff9579b4b77ead6a1e1bf822ada7c04e36d0d34",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/postgresql/postgresql_table.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e41043b34cddfc0cd51fb63e6243cf8e96055134491d0c788de5cd765b0b7878",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/postgresql/postgresql_tablespace.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fadebab334670e02855afc003fa423ca86e7446dbd3c46c4fcee1128c4301e70",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/postgresql/postgresql_schema.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "caa8a9c418cb8054f753a1b791a6067d749f11ac7903b525ecf536abdec367f5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/postgresql/postgresql_owner.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dc0c1f19b1895e7ad78a8c2674d14ed33b2c94857028d738a2ff1c303f0abd27",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/postgresql/postgresql_db.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "891f130b172eb492ac6fa9b4de8280e67a4a9bc5804bf01b4b0d3f7824a70c81",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/postgresql/postgresql_subscription.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7d3379c74898ccd92fec9d216532ff7c3afde1293df08628625677d9fcfee922",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/postgresql/postgresql_slot.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4e1ef460e98fa5e96632c474c2beaf8a4d1eaf4eb1b611e8e72c2a6ba5ac7fe3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/postgresql/postgresql_idx.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "84c39e892f79be8d2734024f13f2ea1bb343db22f3a31a62f713c13de11840ca",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/postgresql/postgresql_publication.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eaf5c0389ee5aaeac225084b085fcdf0fcb47b8af2e7fec70b1bf5891d44b579",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/postgresql/postgresql_query.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3259c441d192f2de4ebd88d6cc4605f2a5764c6cf0e93302ece156d32187cca9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/aerospike",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/aerospike/aerospike_migrations.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "09176a3f99ba973603a0ae2b87989b4560bf59fb6d675ef2e8c8faa7603c5cbc",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/misc",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/misc/redis_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "427a060f31d2b087a56495f20fd3fbcc3a86a3ff681b94efcc9e63916e2fef68",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/misc/kibana_plugin.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e987038dbc44067b09c1b6bc6dd0341932d557efdf0277cf1101c9050237a49b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/misc/redis.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8d9fa6bed3211e26342cc8809a68100ddd7fc49e90d88f75f8f2563e947199ed",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/misc/odbc.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5dad6be3bf21952c701234158cba6fc5c72f9a1a97915648c21360bae8d0d373",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/misc/elasticsearch_plugin.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "73dd74a1570ee9843452c10e9dcdd6cbea85d3562219a2363a9e7fd216681073",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/misc/riak.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "73fef66458904c05986af615660cf92378b1913a60f8da05456513d2f69cde9f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/mssql",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/mssql/mssql_db.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "10836be6d1f0c2d46a5ad956f66a98f0ee983de1660c462d3220d377a14ce6c2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/vertica",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/vertica/vertica_user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1713ee71635aa4ceb67b44c2d991cb0839a59deb8e9ab3d6567be7f1629f4557",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/vertica/vertica_schema.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d277d7ed1c202fae0d32e8a2d180310a9a6f9d553890f02cd74cbd0fbee00fd1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/vertica/vertica_role.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "248620df51ec3b7dabdf097acd7ac81e1d92255692d7618c8c15032f7c9cda08",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/vertica/vertica_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3ff89442e56050bf201b026d7f77c1f14894a255a1cceff1afee447b69ea6538",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/vertica/vertica_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3ff89442e56050bf201b026d7f77c1f14894a255a1cceff1afee447b69ea6538",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/vertica/vertica_configuration.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "621c0e3bb8c297fa57892f737bfbd384a642b8a9bc38b038052b88af08a2257a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/influxdb",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/influxdb/influxdb_query.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "af21bfda2c83803ac863a868857f30fc34dcf342b4ab163cd5f1c85428cd0261",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/influxdb/influxdb_retention_policy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "24b8394eb8edb553a38af558d690c84e33a31716988c1fff9fe6fa49146b9f32",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/influxdb/influxdb_database.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d93c74814be8a699200c153c34299ccd9a2a2ed29476cd7f0d5858faddb96e0b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/influxdb/influxdb_write.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d343f5c03c4350fa8e5426a9cbb9fd4fe3b745702af4868b80e7d0192b14b713",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/database/influxdb/influxdb_user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "39ff341dd31dc65c0254dabce04eedbb4286d34e1d690d5bd5df1cfe62c20d84",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/slackpkg.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bb680d9a9a0d282f5efc503bdbe9ad32e88bce362177e300db595fce77b8c6e2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/hg.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5ece6717290dd9be089ea86fa99df85a3bb847f0dea5643f1bc9a5f8f9abbf14",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_stack_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ca995fd8a1998f2b1c1eef160764e26c126e4aaf6a6c6ca0c3b74c6a60ed1b39",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rundeck_acl_policy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5f44cf30c1519917723289f1f024c685273a2937e22e4bc82f2514dfca330af2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_meta.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e98d3728e55359cb20bec12e6af0c2cdfcd54f774ab033817082c46851ee70f4",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/utm_proxy_exception.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cd30a8116a17aabb1ff8a91067a9591fb0e28c9eab85155b02c1d321eadb2aed",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/airbrake_deployment.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "674fece1637f868922c689743bc77a90fb2fb024bb7695329f9826675650ba4b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneview_enclosure_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e4e0ab72e479649b63f6db15ece9227d8d5ba84eca314d14f3d7d16d62b6beba",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/proxmox_kvm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ab39298a1dcfb8b2fdbb4e76f0001b79e057c8d87bfe296051a5954d5f64e428",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_server_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "13c0b55eea4ef4d006d0948e6518637bfa8e67fac3800c0cfceaea71408a8ccd",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/aerospike_migrations.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "09176a3f99ba973603a0ae2b87989b4560bf59fb6d675ef2e8c8faa7603c5cbc",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/syslogger.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ed8dcae984c245a56fd75e637e155bd619c042af064ef1c6ce10fb5d9899e80e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/imgadm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e7bfa8f3eb4edeb4f1f9e51a4a2c5f17a4390513ff3f2375dc78ab27e5352208",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_lb.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b2318d33621f185a7205039dbb8725863986365b4798a8b16a34b403b8d00fd2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/profitbricks_volume.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a3b0d7b519c1eeaf6a427d615011d5389f33947c19c36ca4639e9818e49187f8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ipa_host.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "60acb296365ef8af986dceb820eeb8e4d46eb34edb3fd66ce18c8f7c6bfcc694",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/bitbucket_pipeline_key_pair.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ea031322d47bc6f7a986f409b4a0c67adf9fc4ce640f72a93d58f46c2293243e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/postgresql_query.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3259c441d192f2de4ebd88d6cc4605f2a5764c6cf0e93302ece156d32187cca9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_container.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "48aaee08b3cfb7019e43615e0b57de29c38da8dff63e07f23f140b23852bcba6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/timezone.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3047af3d01f8613a832a22a2083f2aafe04179a4a5bd66c00b47fdd57cc7edc5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gcpubsub_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b1f96948d2f0e8db8bceb1d371ea1e69878c7d74193e7261217e024ce7c78163",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/locale_gen.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "003752a5c62a5bc050f7822b28f3b0c310956ea5bb486f3eefa9219c237b84b0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ipa_service.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1af43a1330fa03019c0521c9f92fcfd421b1eba50b17da0369a51f81924957ac",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_cdot_user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4eeddd1dad28c5e43bd65ac419433c24fec1a34e4752672083223008b245ab42",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/django_manage.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e4d44750075e4cb37610f431d3c985e764f932026a7cd82a836f2779535733d6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/storage",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/storage/hpe3par",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/storage/hpe3par/ss_3par_cpg.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0652f7a77a9d16a55fcb35817372a60e52caec04fd43eb202ad747ea8fca2702",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/storage/ibm",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/storage/ibm/ibm_sa_vol_map.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1cdb81a12863af6ca8f279eab2a9fee1927d6b10a74b68f9b889a53bef7d8da1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/storage/ibm/ibm_sa_host_ports.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "884e31f2c3d1f0c11c2f2da3e0167ccabcef3cdb8d729d225b646857bb6e40cb",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/storage/ibm/ibm_sa_pool.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "737dbb3bb745c278f0d947a91e37c8653e2a2ee47860cd643872904d7cde9faf",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/storage/ibm/ibm_sa_host.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6c58a0ec338d88403dd41942b7090dbe3b19b8215e3b22265adb942c2c08600a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/storage/ibm/ibm_sa_vol.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "53c4644d14e1c5c64b83fb621a2fb61cad88601b50ac1d326724f80b5ef5ea79",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/storage/ibm/ibm_sa_domain.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9af1232d0f5a93f57d262e5d289b755dccffdee71b2bdaeb1a0e707273e4bbbc",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/storage/netapp",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/storage/netapp/sf_check_connections.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "39a754f4072b1053ccfd043068360a733d79a13db946383863f9521500289848",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/storage/netapp/sf_volume_manager.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c2217e730d1ee09df5d2acbd94654d7cffaef4a45a5f93b5abd854ffa6d85454",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/storage/netapp/na_ontap_gather_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "be1184402d1ba8eb209fb7c376d7f47b42e2d2126d55ba679b52a08559c40088",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/storage/netapp/sf_volume_access_group_manager.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0708d647b51db83ccdd6c82746b4a3c107cf7a51b51f0ebaa36620e2768e110e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/storage/netapp/na_cdot_svm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6a93e5cd46d88b75de84429ee18d210560c73d84cbdefd7397bc588013194322",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/storage/netapp/sf_account_manager.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a1e81b8749bb53e8d5f5764d914fe6df2b1ae61ab0745cf10ade287a0100796a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/storage/netapp/na_cdot_user_role.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b4dd228c355114f6f2d27ca6dc5fef10da34530e83ba8bb1273a4732a159bbe2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/storage/netapp/na_cdot_volume.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6dd9b558a7e2278721c37fe84927718a629267d548c52cd3b7a4e4a83be82f00",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/storage/netapp/na_cdot_aggregate.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0465a8e2c4b7bb55854f697b71801402aa0a8c5e2d224a65e1d50f750e90ba37",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/storage/netapp/sf_snapshot_schedule_manager.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e251e18be0ee4e25f06ed439fcdde936d45b23ca2fab29f23cd84d1a63b12b1b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/storage/netapp/na_cdot_qtree.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f12a1000d096c737241f37725a11239c83c98e4e4a1ac4a0ec064c0284a9556c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/storage/netapp/na_cdot_lun.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a26568e77cd90b4073a04b76c3bb7438f957def416e787e9d739a5b300e26371",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/storage/netapp/na_cdot_user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4eeddd1dad28c5e43bd65ac419433c24fec1a34e4752672083223008b245ab42",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/storage/netapp/na_cdot_license.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f0dc6042646c18265fb63b7921490f337242bd328bf290f963b7dcffa8648956",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/storage/zfs",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/storage/zfs/zfs_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "409c2ef26022ecb84412a6f0d953ecd1cef2a15d6af47a441c46113db00a7a01",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/storage/zfs/zfs.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7b51f93874cdc5573d1cf9c23c50e7000ffe559930f302ee6f900279a4938a79",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/storage/zfs/zfs_delegate_admin.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a1c642fc5b0f8dd42ad1aaeb638d2103d795338fce5c81e9a301080e331a6b2e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/storage/zfs/zpool_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8116fe416d2f691532b5b1fd06013f66bda5bce968e6a03e79dec552894107b1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/storage/glusterfs",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/storage/glusterfs/gluster_heal_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1ce49855c74c92514a3374b1d32948bf6a7a3284af3183e6d7a2feecfffa6baf",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/storage/glusterfs/gluster_peer.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e37cc212b1484d76d0d084277fa5b017f0bd55489794198977b032e561cd294f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/storage/glusterfs/gluster_volume.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "466a554fe94173f476ec1b3d31adc4ca3d35568bc9afce030fc47abe5036f724",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/storage/vexata",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/storage/vexata/vexata_eg.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fac270b3db28c9f8b6d24d299e753c80f9d251dbbdcb386a319097c17219a80d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/storage/vexata/vexata_volume.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a6377d7306fb5a11f52aaa9a89cff909e8028a7cef71959eb6a7135ba1561d4a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/storage/purestorage",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/storage/purestorage/purefb_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7f52d4a666f4d9460c127dd8de138a5e21ad02b3f9def05828a0c512cadcb4b9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/storage/purestorage/purefa_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "12def5dc51e0a3346e747840f437913ade68b31b539833ec56174fece8d24c0f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/storage/emc",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/storage/emc/emc_vnx_sg_member.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb019104000b0e368ae6de81ccf63c9fdee7529f65447ad55176559791bcd397",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gitlab_runner.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "369f1e0ca6bce00b5c3cc149d4bb447462c16d841d6e1513c37e9d1bb5639af0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/iptables_state.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0bbb3797d60891d95e1d0ffadf4401d872e261b8e95aa695691fabb348f39158",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_snapshot_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a09021f8e05d26e7c8aa0272db39a41e9f2b23febc4e0aa3f3108646192ebd91",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/openbsd_pkg.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "994c3bec9149dc9a7e1f70a4cc168f38bfeb9e783b09e9fc261a44ea46b1634e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/xenserver_guest_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f3ae0b9a0c9d4c0168dc528194478ea63d31a949a25339e3692f3d44ea09037d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gluster_volume.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "466a554fe94173f476ec1b3d31adc4ca3d35568bc9afce030fc47abe5036f724",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/idrac_firmware.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "be1bab26a93cd13ec72e6a30bd8ae846f10e85316ce2725edc1bf4779a52e132",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/datadog_event.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ab82e356848f0da5156c4b5a35f2df7d057f19e321c352fce363fe855d603c48",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/selinux_permissive.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f44c6496a7a79ee04c273b58d2d371ee4476acdd7f861363dd577159dcc11884",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/zpool_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8116fe416d2f691532b5b1fd06013f66bda5bce968e6a03e79dec552894107b1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/xml.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "10469496a593b70462d43833e87cc38c3d35e7b635ee54ed55ce2e074e3bfa43",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_user_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f789f067997a89ab7bb9da6b9749c66bc75995ebb6db6676a80245ffa9db542b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_snapshot_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9c74c1c0d073de17e836e6b69cc0ccd5946396e38f8beb573ba238f8bee3458d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovirt_vm_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c82c364ae81e2fcde89cc9ad99c5377e3572438a5297760166cd15d85ba80ad1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/xenserver_guest_powerstate.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "512b73487169482e8887c48e6f3278950736c93a5c2a4c698b149e80217bf270",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/nios_aaaa_record.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4c4514285788c44ae3961bd44db427e93cacf25e9dc3b47d666b474604ad404f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/na_cdot_license.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f0dc6042646c18265fb63b7921490f337242bd328bf290f963b7dcffa8648956",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/nios_txt_record.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5d0b910babc14299d7a7cabd20aaaa42c8e8df021687ffd3b5f5f4e6f2fd069d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_swarm_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "285f73590d776142b46d9d01d0a98c02e6909307d4dc9bd8a9c61655b03fa25a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/memset_zone.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6a5b2527e6602a6e9533c842cf944b71be146787a9ab908eca03de3d97ab6cc0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/apache2_module.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ef041e2658dbe646fc8b695be228068c935bae46bdc6ec9c52046e31966d6e52",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/clc_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bc5e7a9885f36bc30824cda3cb69dfcea08643453db7c3923c05f9080c431397",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneview_ethernet_network_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f9c0119c3db7c79c3ce42f7d1cd13632b1ee07c695858a1ea132db50d662200b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gconftool2.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3dac6741e378a99d8f88e45920e2a4841ec35a69db328e7a0a4bcc8ee05a1cc8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/composer.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e847e7492cb58364d8571934538d9cc4f3ea8acf746070bd8f1687ca4e83915f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gce_instance_template.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a1a9e2211fb88c9669a277f375b9fb02f56b2ed29e68619ee562a78f03aa86b3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/manageiq_tags.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2f8531cfd701f3ded93bd2b7314c108e012bb8c19c63eecff10204857d72e098",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/dimensiondata_network.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "061255e3a9319f3ce0878243acd4dc5f2433786d75c4e3a89b5b89c5ca4c6c6b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/proxmox_user_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "518e6e2c58d5aaeeb230bb70fa1fbd19bad795dd2f3a1d2ae673ccb6d321a177",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/facter.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e9dc303791af31b7355e612dcde7b32ecaa6083514c401a900c1bd6c5da5c616",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/swupd.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d97fd0540d721ac8131049e4868f2dbebb736a6873c9b3d239a4b1b1aaea5f59",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/netcup_dns.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "26fcee470cc40f970d929fe16c39d8aa85ca30f226314dabecc0013b33079f3b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ali_instance.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6273f052fa89f9ab9a27230eee5064a37333af680e24ba1d5a715ec11e83c980",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/atomic_container.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0296f77506329fad1072ed256cd510e42d9f926cf7949fa59725450a921646e5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/infinity.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9ccd4296408750bcc261f63fc2fe8fcebb0b2de08058bc08c6f976b34e07f54b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/installp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1360ed768c621c482767cb1994d96e93827b55a20da4d3f2cbcfbdb5278f9c18",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/manageiq_alert_profiles.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b61ef9ed9ca3928b9d8c1752563a27a6a76e9be9586cf9b7e932174d79002822",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/flowdock.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c50deeb4589cfd2ae9055e2ca708acceaf41f8c4e705a2f3c84bc4d5093bda9e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/swdepot.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7cf596e285fbcb98e9bae8ee345d63daa2528c34fd93138d6c9afb77db2f7d8e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/riak.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "73fef66458904c05986af615660cf92378b1913a60f8da05456513d2f69cde9f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/kernel_blacklist.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "726fd4c9672e6ae317efa7183e72927262ddd90c59021afa4b80d1f3d179e113",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/nios_host_record.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6c1ae4530cc7acde03f293adcd5e0adc9c37a40ba3856e5acde36fb4635cfd90",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/influxdb_user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "39ff341dd31dc65c0254dabce04eedbb4286d34e1d690d5bd5df1cfe62c20d84",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_sa_domain.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9af1232d0f5a93f57d262e5d289b755dccffdee71b2bdaeb1a0e707273e4bbbc",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/newrelic_deployment.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5bab501cf9754d7a6c46ae2977fec718592d45efae4d4cd5a29652e6f76bf33d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/zypper_repository.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "09c18ea8eb93c7b031d58316301018dafa18791dfd7395930b7ab30985b65d7f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/lxd_profile.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d28b5943f015d2554eabb0cd6ebe097302534f6701a26ebe422f35dac299fc3f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gitlab_group_members.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2eed5080cbeaf0cac15d133a72d26f61357e4bdc9520ae40433a5e21c388b64a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pacman.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1569e965bab7f6385730b9397685f3d8d26530522d550572298ebc94b35228d3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/imc_rest.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a61784221b055694a1293c28ae86ebe346b73ab059abe6325f999bca420741fc",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/auth_basic.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "34740866c7bdbcaed75b7d4414e978ed24a2e05424e4a2af4e179fded67ab950",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/purestorage.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5ddc57c545438fa417cd600b412758153396222b802ec28968f57b11f6031cb8",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/docker.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "514c9e0f4e6ce63ccae77b7614ba811c64e123ee5f96f6fb9b858a9ebb1e9aa9",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/hpe3par.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "38808678393ccf34e3fe396aed42061975e4479f3da38c22bdbb879bd076acf0",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/vexata.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "937c7b62e1a1e4137b2492f60835d444a174ea012c4179545bfadc5ee5958fab",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/ibm_storage.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aa82aa6805f786bbffc1c2d0740fa230852373ce44a38a7af28e7f880f998e61",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/proxmox.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0265296f4283e736039d08c91ba3ad32c224a75564b04761fafaf098826c80a1",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/_netapp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "42ce3596f792de5bd66be736a6ad6e8aca43012c924750dd7dc5f9bd3bc88687",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/oracle_creatable_resource.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c54d0755efa18775c82f91a8bee83fc9ea0590e61ac9d307f9294a2b6f1d0033",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/dimensiondata.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a34334fca7e874a3c5381729e38804f893a49869e664b5098d2340074526b15d",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/_gcp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "31581014875f54e52e1ece32efe3b581b175c7bec2d939571d892daf2302219d",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/openswitch.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "803130c2ab3075b5553a20bb57cc40db502f37699f9e0e90d23539a1d04f45f1",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/oracle_display_name_option.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "03a418f8b6ef1fed29775c6eb01b1a9b494a52db0ab04317fb6ebb31d22b30df",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/oracle.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1a5c7876a07be836e94418efea8e5a4324b5a38530807742faa95bf75db32814",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/keycloak.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9162f09215d206536abbfce170558aeb2032f6423ace3a7f79ef4ee393a6fca2",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/postgres.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5829a5a9e73df091b04cc3e24c8ac3b467721b0c70a84bed979339fe850e5a53",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/utm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c595fd6936490911e09c041bf49e93933278c65a838896b95075432a3a7e6acc",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/kubevirt_common_options.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1749e0b3569bb892344c26713436f1e1b563ebe8cc75accf5ac1389cd1312255",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/oracle_tags.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a339231bc230f03ac91d219bb80555fb40bd38a870a78255c057ba63a2b5d4b9",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/emc.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d8e4738088f16fdf47cec032f3046c5dce8a520a06d4e3c47e4f8d7bbd978599",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/influxdb.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eb088065bb558d8bd0fd38facbd3e569a75cf2350ff54bddee7ec87d25f3391a",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/xenserver.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "84cffcdae284ac8290d7f2865f536fc13bd35b4cd31d4a5eaeef89493f53b64d",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/nios.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9f88cf2dd799455b82c2b35f5de5a0e97a52f98cc547b4570603bf1a3fa25c51",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/online.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a23dcec58b0d6fabcde691111466b6ffc6db5d8034de41f4da96eeeef3789a35",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/nomad.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7bfef29c982c903cd3263b446dcd28eed54bb5f5834a3a848425f478634fa9d9",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/rackspace.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "22456616c25433541723ad90c6fb91b09fa52a2c1bf925c93cb5cb7dcd73f2cb",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/ovirt_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b9665d6370ebdb5191364392abe86dce39fe0cac9f2d4a0f777d75c27a3fb6dd",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/lxca_common.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6f3c7661305f8b89b2e4a611bfbdf08a9ca2585d90fe0b156747eb45d1d6a09c",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/oracle_wait_options.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "05f4343d4f4bb36e24a6d4d523f91b0a5d36f76ae5ce55fef4ee60c2277337d7",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/hwc.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e0fc1bc0ce910a970408e610ecabcf509d21b249715c027c5d46698c4ba03249",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/opennebula.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c09c661f3141cee65af3798e6d8e99097cc80a61acd36ad52e41a7b12ba6b5f6",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/ldap.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6110c963acc2f8dc21d382f283dab643a25cd45328e24fb7031ba11ebe621bb8",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/oracle_name_option.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aab816432cd6dcc8cee755601c139c72adc192912eaeee84a407be9e5045a21c",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/hetzner.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "085e239bf157426ecf05ca4ee77e69aa9bb8fdb62bcb7c504a27c99b32850931",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/kubevirt_vm_options.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "87dc87652c472ea0555f6efdf23f4de828032e389521b6ff1274f626e738f97c",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/manageiq.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9d04400f964b445c2ba413c5028e62f5e4411c7daac5ee7c520c9da8a4adc1fb",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/ipa.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5f952ad0bc01198e8db6f21143273782cab239d5e6acc035fd4606e0aabbfed2",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/scaleway.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "45b9ab99f962f8c87c100b113c98ccac2789142220e55851c1239bab43c657cc",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/dimensiondata_wait.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "854970d12c42e53dd93c01cc6c00f4561c29503a0cb50f8ba7d74d85abb67047",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/alicloud.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "678f5b64368f51cc5ff9bbbac69c2b722cba9408176523d0a874eeec0d2d8c46",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/oneview.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8ca78c4d3946eaa5eb245478f246cde685094bc3e5a9b97f3a50dbab2a009197",
+ "format": 1
+ },
+ {
+ "name": "plugins/become",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/become/sesu.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6b0e21da262768d68cab851bc178b09d5168e826a54c8149071e9eb9172292d3",
+ "format": 1
+ },
+ {
+ "name": "plugins/become/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/become/pfexec.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e298f43a5574f185643ef9034848685a14bdfe1231ef13de328bc838648d64a8",
+ "format": 1
+ },
+ {
+ "name": "plugins/become/pbrun.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0d2a42a0cffaefa4ada845514153982407d615a0aa851d3269f9d52944d14af7",
+ "format": 1
+ },
+ {
+ "name": "plugins/become/pmrun.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8909f66e222483bc3a1dda7bf2f2222eb853fb0aa6710a56fdb47a5351bdd4e1",
+ "format": 1
+ },
+ {
+ "name": "plugins/become/machinectl.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a1fe12574a6027a46aab87dda32f05d0b1e15696dd58d1f590d667ee157e62d3",
+ "format": 1
+ },
+ {
+ "name": "plugins/become/ksu.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0bca2098900fa3edb95cd2364994a532cce0ac2d3c3d5f6d8a6d2b625b99dace",
+ "format": 1
+ },
+ {
+ "name": "plugins/become/doas.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c0d32f2aee428bcef6b510593c7a3c5c1febe1223159050219043b824d88864b",
+ "format": 1
+ },
+ {
+ "name": "plugins/become/dzdo.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c35b5afb2e0f0ff28ed45ea349ad1ebe6f092bd1117539aa4801cd5e1ed0c70c",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/yaml.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e2d790cab0ea8df294a78af669a0d12113587504e7b549184e400195f0e5dbaa",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/say.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "18c400bc2ef2b39c15ba6ed5659c6d38b1461d7ce82fdbd6498b9b201ec829e5",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/nrdp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "766963330aaacfebec99cbc73b5951a889bd24a803c4068f029a25648d893329",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/syslog_json.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "37c0e70f633b8fb2965b71109ec033ded47cce1681c0543ffcd1ac1e706c74dd",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/counter_enabled.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9fac7d6ddec71edeff86d142af6d1bbf79734837003d8fcfdad76999e4697f83",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/context_demo.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c920a194281b10669fdf4c3f5a4ed8d83b217f15e101c66a0f965a120c20b502",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/log_plays.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "32f03d705ace52f5dbe644096f677b4d6b3c34715337cb123e1df3271cfb09a6",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/actionable.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a1540e49bdeef186803a6afcc73257741a4a7bc0b3770e46b510ef6f962d45d6",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/unixy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6bc6fc1da6f51b2496c41793790d10a9d7598c566ff7be13764ae706a8486a80",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/hipchat.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9ef7cd98acc9c8235b3c48b3f4cd81956c9113e53ff1fa8435bfadffb4ff8e7f",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/sumologic.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "494bbde59aba8904117f36b48e96180241a120d41fa677d3778d273b41291bd8",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/osx_say.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "18c400bc2ef2b39c15ba6ed5659c6d38b1461d7ce82fdbd6498b9b201ec829e5",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/full_skip.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e1ae6128613a28659fabe1617e1689164db2edaa693132e340369f89f3d666c7",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/logstash.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c45743a365e245e0bf5b50a07f36b7ea9067a34942daa8058243746643e1f798",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/mail.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dda969f9eb14262d877b7fd921f3cbd0eb740588a6adf2bfc5e4e4bd528ac301",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/logentries.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "65a099dcbd530671508a0528dd2791f2257e69a4add768f8f907cd24f09f82ab",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/dense.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2c59189215b1cd65b48d254a4ddd6b8925ff1be6455d31469f9c31cb914f0825",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/selective.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9234845791d5ebc31a4da52f114776c28ff8248c8f4aca9f75d6215b01e52e8e",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/slack.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ff4f68ade7a5d346d5ceb4aa2fb94490920ef41fac25eaf3a8851b6e4590e026",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/jabber.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f0b0fdedf5ad84e97da6dd9fbff4de5d246d5aeb6c1daca18351de5a7f45a140",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/logdna.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8303d86a08fd0ef2795a6edf5b6c780b59dc235ec587c1c80e4a640c0e750749",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/stderr.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a68f56392de0f899bb792d22c66aae77678277aeb253aac4d1d18e8fdc930ecf",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/diy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "110e356b1e23cd9b92a4d02e933440f1c645bd1c65da46cbe4e0459ceba147fb",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/null.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f538796e2364839027bcef0a6a5bd7c59a305aa9bbe3efb1a30591e871e8d763",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/splunk.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "134a3a2d0b6b97798c82b01a3c8000e9e154ab32328d1de4b1b32433ab256ecb",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/cgroup_memory_recap.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b0b450214ad2fda5f4872d9723e3219e0c63e5758a2ed8e47a16aa968f7e2d8a",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/dict_kv.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "930e7a1b8af4c32e71ed5b4ada3f0b83f66a77fb3118afc4afaaa57939d663ac",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/random_mac.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c8ed28be0c33311e1315158f0ab895adbe6b286a0b09d3fe06a14d422356dce1",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/time.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "80a25fc2ba18f1ce7a68017a1f5af5435f40eee159c07be188f1fc51b3818d73",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/json_query.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ab0a35597a1cae7e15b7a1f81cd900ea73cc55893fa54dd4048ce9b3a768ffbb",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/jc.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0ddde83f098776a996981497b5ddcba5b0e2886a868355f447500a88e88fb9f6",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/flattened.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5eb223c1fbe196c48e8f58b1244b7100ea286883bded28d812ac9d32f53225f9",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/lastpass.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1acdfd0fe4320a4422027eb990ebc6e00468b394700c616245bd305e5e9af2d2",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/shelvefile.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "64dc5a860a6816c78022ad7ad3197c2769284ea8d08a12f7d75584058c3426db",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/nios_next_ip.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8c5f42e86b23f011cf96e071f422a822fbb849524cd3bbe420f50a22bd661bab",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/etcd3.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8eeff62934588ad3d887f0a7a450451e338a3841d27715730fee7bc502d5148d",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/dnstxt.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "18140900ee13af0877b85769c14ea80a63615a4099491964f779361c97b1ed2c",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/hiera.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a912c2132fdc1402acbb3c925cffa49044988a4f026cbd322e7061a12f05e320",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/manifold.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2abadbf23083895c905aac5ac33d2d20a7ffc4b21926ceaca39e4e44a609786d",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/consul_kv.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3872fcc50bd5e6d866df43811a71e55ab1bfc3a1ae7a16680290930192f465f7",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/hashi_vault.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "882896d4407f483ea7c8ff89c6ce98f3f7b626a36b7481170ec5da412f1393ea",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/passwordstore.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "671ea2c73de3c951dc4fe67b16e405960b49cf87a93f26825dcbd3ac68c564ee",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/etcd.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7933fa4afc907e572083917e105c2cae8701270722db72bf11a253f42f1742d4",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/onepassword.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0f4403f653bdd68445224b505f5c314e22d0b255ef021c91b128bb69fb727685",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/redis.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0772e6ebf606d885974d563c158a6ac881c9c9036da1137a55b368e3b3f81ae5",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/cartesian.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5e6ed51e5b6f2676743a079fb0563805f94991bc09d2f74e202259a60a654446",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/lmdb_kv.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e74896ad5b41ac9fa4fd26dca83cc99513353e95670edfdf496bc57efa65eacd",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/nios.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c930592e27055b1b8a766ca76881c1eec98fc29a5cabcf801a3c26bdd751c870",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/nios_next_network.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b57f0c3250655acdaf8eec368e8b138d2d3e260c69e13bda81cddd54588ac625",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/cyberarkpassword.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0fa6cf908fe0f852d7d3e7330c51d458f85ca5694c4d7de904e7a640b62c0188",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/chef_databag.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "035220bf45a50d2d9e33452eaf3a18b9c66c19af2dc4ef910c2cfb9123f71bd7",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/dsv.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c109e030eb8f5902027262110154c5b1abb809a3fffd253b06634a562c9c3ff6",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/filetree.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5b64f2a8f365c5c61e6096315b608f532395d8e7c5a40a43161da8928fd63ad6",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/gcp_storage_file.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a54a85e66afa8374664ec5d099b6df7cc9fe26a696fa0163556c721d7578c1e3",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/onepassword_raw.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cbb5fcdfed4508d8d20a4073a2a944426bfd2866a22acf990cd169d96dbf4a25",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/keyring.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5048ebc453ed22db1d46aafb1bf9742fe3b811c55091654a0bb00f98985cb4d4",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/credstash.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c0bf5a7c65b108f0caa50c8123072d84f6fef32b02f68100985a00a103a39192",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/tss.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "541345e02444d1534e32f09c595b87f54d827f280ef3497fa540e38910be6eee",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/dig.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7ecf1b8247ae05c72cb690e92caf843e38c021e72db83b0cd45e67ef564ba477",
+ "format": 1
+ },
+ {
+ "name": "plugins/action",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/action/shutdown.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cd53c90ff3ff4cf48b5f628398a5a9bf145eaf28e7287000a471dd74d97bd9c3",
+ "format": 1
+ },
+ {
+ "name": "plugins/action/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/action/system",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/action/system/shutdown.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cd53c90ff3ff4cf48b5f628398a5a9bf145eaf28e7287000a471dd74d97bd9c3",
+ "format": 1
+ },
+ {
+ "name": "plugins/action/system/iptables_state.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4ed1cc4118c8ecc4e14c74dc5f35f3d614520c8cfaefcbac3975bd84f5efb7e9",
+ "format": 1
+ },
+ {
+ "name": "plugins/action/iptables_state.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4ed1cc4118c8ecc4e14c74dc5f35f3d614520c8cfaefcbac3975bd84f5efb7e9",
+ "format": 1
+ },
+ {
+ "name": "plugins/connection",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/connection/lxc.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e095935cab06dbb6b113cf6c53204a175bada66b695a234fc322e4713efbe232",
+ "format": 1
+ },
+ {
+ "name": "plugins/connection/saltstack.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "68ff0eee1138f2351e7d921b3fc689eca28024bead37d30034333ebf4828736f",
+ "format": 1
+ },
+ {
+ "name": "plugins/connection/lxd.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c82bdf44193454b0c696674084914dba018f5002f8652f05e7356395c5b62c4b",
+ "format": 1
+ },
+ {
+ "name": "plugins/connection/docker.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "19375dfba688a2392ea2ee95c8c0637b5838154d5c03aa4e7cffeb29f9ec6c92",
+ "format": 1
+ },
+ {
+ "name": "plugins/connection/chroot.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "15dea26651845fc29b1cdd5c15a1b5333163feb4c19791468f66f778fa877aaa",
+ "format": 1
+ },
+ {
+ "name": "plugins/connection/zone.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "30bda1a62c03e5d817110783df96b5678fdf4356f704115102afa114e6bbfffe",
+ "format": 1
+ },
+ {
+ "name": "plugins/connection/qubes.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4fea6ec111907638039610553d944dd87b0382cac25f3f71f9c300320dd4678a",
+ "format": 1
+ },
+ {
+ "name": "plugins/connection/funcd.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b2d2bb0ce11a1ae7e6b4da6fe5549c02e71de81dcc4357ba15011c493c93d457",
+ "format": 1
+ },
+ {
+ "name": "plugins/connection/jail.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d61f90cfa90de861a8b913681c0b7c8c26121846789faf8dfad6207841e874f2",
+ "format": 1
+ },
+ {
+ "name": "plugins/connection/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/connection/oc.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f5ce6b54e47ac11777d7d91c3dc347c0169d281a9ae848018661030947633999",
+ "format": 1
+ },
+ {
+ "name": "plugins/connection/iocage.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f7150fcf3a52fc65be4667dd977191a696978825644a187a1f7393fb18234f68",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/gitlab.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7e3d3e100fb44b8b8c5fd7a4bd024cfa3969f700ca1d6623e0f688f45dcbd0ff",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/rax.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3363f7e041291e29f7668a437f0f9904a1988d175f656cef1677ce6ffb0268a7",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/lxd.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0eb88ac484b87321ba3be96af81a5fbc3baca63241dca319563efce9b0fca7bd",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/ibm_sa_utils.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c8facb77b6a9a99ffaa9039a1bef0d23c855be8a50b2ab41de3e3af7ae7e0c1b",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/compat",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/compat/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/compat/ipaddress.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "72daf10e2e5a0294c6953baa8f2439079038b8446e47ff8653c76b0e5bf4008f",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/docker",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/docker/common.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "22ba07297f58085762001625b0c02cf71e7126aded8fac8cfb3696d64e99dce3",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/docker/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/docker/swarm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d9c5040f892ca818d0e27a850bc6ab5296643c6d6a88d4be44f08d7da4a330b2",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/vexata.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e6f24c192e7e60e9f4edf70833405974c448addec668571258b07b4ea4641919",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/gcdns.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b39b36b0e6df57a37bf2d548386da0097111c2614f7827d1b444314b1244bd81",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/net_tools",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/net_tools/nios",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/net_tools/nios/api.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a712831d63d23d2176cfcf70f00ba6136320140ba23b6d43fd58a604040593af",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/net_tools/nios/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/net_tools/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/proxmox.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3af9085139519202decfaf7e3c945a399f98ec3961221ba7de4ca6ebf2934bb9",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/_netapp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7d86c54e431565e1f91299bac6171bced5bb978d3649854c2b921aa2cb0cd075",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/dimensiondata.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aa6a1917eb2c3cf04159130fac79f317e84711d539a7f2ab9eeeccc7df7de2ba",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/univention_umc.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ae321229be00d3c7673ee2107a90ace90f27daacba771546d31cca584eb29c88",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/utm_utils.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "051a5e7b4bf760a1940f6da9777129871457bf8a5bda90594a43dd7f8c4598d7",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/_ovirt.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b4dda0d121e1db9a0afddfc0018b9eefe57f47c914f1da5fdebb31a429cb1c9a",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/postgres.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "65e3101d323d453f7292912da477050d0fb90f7b0e4fd155cad0698f1c1c3815",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/redhat.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "701bbb9eeab6f9c5e9d4d7ce1be0637802c5b9589b161b40f4d75f9a1d5c8421",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/hwc_utils.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dd4c98bc5e0198900a44468599275ece404d342869a3f09a58d2a76e1535b463",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/gcp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a85bdf5808a57d1860fd128e8bf93be1ec8009022d0a0e8fdc1af3732c2e8a9e",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/alicloud_ecs.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ef32532f9e2819cd48b879cdfb5ad0e6a089ae5c6c5cc43b449a6ee808269e5d",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/cloud.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "598542817c93e1256b47bc93b076a2b7c006a92b13bfde435a1c6bc4dd630cd3",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/gce.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "15cbe6e25c4a34d8c3370d4a4badebc19f98b2ee865a2cd64bc5ce6a055ea6a5",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/source_control",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/source_control/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/source_control/bitbucket.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "786064f19fd25e3e8249131ca957d893874b5e4f8878659e251303ee4e4713ec",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/_mount.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f02da5fac9f38b5cea1a33aaf423f1a2141fda46857ab9af148bac09716cbc9b",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/redfish_utils.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cb685c18e200e9c333a1bf4d2592ee628c4704c2271cc2649444a35f01054da7",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/influxdb.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "730a5771b2e29fe76b3e719b458f7d4208ba0748923b57063df488b07d5085a8",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/xenserver.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "feed8dab4dbce145bcc864f9987849f5049e5f89a13b20d79130538a4d517369",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/online.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0b50befbe59f8cddb16b3347cfeb320ab64f6a01e117e40daab1b413ac27efda",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/remote_management",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/remote_management/lxca",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/remote_management/lxca/common.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "833421c898caddc0c3d1763e861df688a7c73175a0ae9e0572a22c41d50562ad",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/remote_management/lxca/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/remote_management/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/remote_management/dellemc",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/remote_management/dellemc/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/remote_management/dellemc/ome.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "44416af0fd9a7d23a24b27ee6bb72c4e35aceeb26de5d1e1be76b295e2c79b6d",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/remote_management/dellemc/dellemc_idrac.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e6f88f72222a785328fa99137fa68f70475b63ace0505979e6d6f03be11acb8e",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/linode.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6a81299f7dbb63fb94b47f5da59afebb018dfc6526188270ae7fc768bb74f3b3",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/module_helper.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "10b8317fac5e3efbce87ebd5c69355225c3224acdcbaa9a41bc595ceb715555b",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/oneandone.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a92b49b23a789b1f64e6eb09870881cf6812bc3a2f30912cd4ecee4898816fcf",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/opennebula.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bd979175ff5aa4921ed97400c3531f44b323484de149cc3c21fb88d832735885",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/ldap.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cdc019246f2d0c2370a8713d704468d1493f03285ef95420c6504835f9e79b8a",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/kubevirt.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "311f86db2bcfcc5f027abbb8860c683f633fda8100a6b6e01e0a5ce4ed1cde40",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/memset.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0ea565844217de915c225f4c92f30b68af24733a243f801697d8fc199a50468a",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/hetzner.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b4cf120cad1d8e55bc013534e14b1e0c89c9832c49162f03c85cb28ea02855fc",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/identity",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/identity/keycloak",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/identity/keycloak/keycloak.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6ee998b51a4be367477add9813611a0ef6d87d1ba2004dc9334b6898d3609632",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/identity/keycloak/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/identity/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/saslprep.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bc03619992801129d5aacabd6caca116a11077a3270d1a8934213a0f870725af",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/manageiq.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "96c1aca8ed36d72ba66440466c30f7049fc8ff27fd1acd0d6b1e8c9b62f54611",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/ipa.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efd1310df585b6ef4cce321d63c0f5684fc043df5243a4c718cc1e4e95ebabc6",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/database.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bf286e7fb228799c46b66d7e34bacb613838f65d8faa42efd2a1f29ee6759c1b",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/storage",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/storage/hpe3par",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/storage/hpe3par/hpe3par.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a2172a77a22367e580358dd9e966263e1e95d133d13b6a4846188471b584de8b",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/storage/hpe3par/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/storage/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/storage/emc",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/storage/emc/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/storage/emc/emc_vnx.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a0ae403f5a97534ec792410562013dc7e9b6a37a28893f2eb0c3319dc320e463",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/scaleway.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "452dcde138054534f7a6c1dfa3af3416daabe3526219e372d60677038d1f48ee",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/pure.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "60df81c9c2b060371eec008cff84670b87c339a4d414f3ae55f55ace604f5d76",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/known_hosts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fd113ba5afc8b16426afe37f9aeb91b16a68653df88dfac73190ff3d41f5a02f",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/oracle",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/oracle/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/oracle/oci_utils.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7f681309988ffe8c2a867819b5f9d78a619c4bd85fcf7561b169b2e3b3fef5d2",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/oneview.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a543633d14cee7ed63ff5451902e962042f209e1bcb10d36fcd01068fde88b8a",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/heroku.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "45a2c89f450df4f3fddc2e5e387f56400f3c4441ff331ac194be585425c1e1ea",
+ "format": 1
+ },
+ {
+ "name": "plugins/cache",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/cache/yaml.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "11d61c69555dbc4f903d1dca154c5e5d39c547ce8582ed727493405717cdb668",
+ "format": 1
+ },
+ {
+ "name": "plugins/cache/memcached.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "622d88d5f714df919403d522d0a46da3b3e0cf11b2bad06c609e3c1ec3ed3688",
+ "format": 1
+ },
+ {
+ "name": "plugins/cache/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/cache/pickle.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ac20e459efadf4b63879eb44a71cd90befe4b69be6d9e2f6e1b0b0bc30f4cb8d",
+ "format": 1
+ },
+ {
+ "name": "plugins/cache/redis.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "31972f09ea044816f28225f07ab44615404575f6d3c70c42d72bba6b52a674d8",
+ "format": 1
+ },
+ {
+ "name": ".github",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".github/settings.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e0381b42e525395bcf6c9e21de33e23ca8cace574b8ef85902a36ce606d3a991",
+ "format": 1
+ },
+ {
+ "name": ".github/workflows",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".github/workflows/codeql-analysis.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b03191ab8e81273883b6d5eb8ac4ff0a216cd2e3a11f46c9c15553ff9f0c5fcd",
+ "format": 1
+ },
+ {
+ "name": ".github/patchback.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f28653c2f8d2965a55f76092049c4205a9c7f828e4edbd1cd089f7dd2685f93a",
+ "format": 1
+ },
+ {
+ "name": ".github/BOTMETA.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "49f70be1c1c76111c45ef9a5f554597fd10270731d676347220f4a3d318deeb9",
+ "format": 1
+ },
+ {
+ "name": "README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "94b0909a7851d2f7f631ff976c9f4812abc02f7d4618c4d2be28512719fa28f0",
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/templates/coverage.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "daf1930264760d47b54588f05c6339fd69ca2d239c77c44bc4cee3c4e9f76447",
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/templates/matrix.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4fb0d3ffb2125d5806c7597e4f9d4b2af69cf8c337e9d57803081eddd4a6b081",
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/templates/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2cfa1271f94c71f05ffa0b1f763d8946394b5636e14579cda8ee14bb38bbcf1c",
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/azure-pipelines.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "679004a4b2b5ce16af5ac79c455d12f89c9f6664737d9a373a965fd2eb1c0267",
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "61f20decd3c8fb34ac2cc6ff79f598fc5136e642130a7ba065ccc5aa37960cd2",
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/scripts",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/scripts/combine-coverage.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e34d4e863a65b9f53c4ca8ae37655858969898a949e050e9cb3cb0d5f02342d0",
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/scripts/aggregate-coverage.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "820353ffde6fd3ad655118772547549d84ccf0a7ba951e8fb1325f912ef640a0",
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/scripts/report-coverage.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6a373322759ccc2736fb25d25d8c402dfe16b5d9a57cfccb1ca8cb136e09663",
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/scripts/publish-codecov.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2662ee5039b851fd66c069ff6704024951ced29fa04bf1e2df5b75f18fc2a32b",
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/scripts/time-command.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0232f415efeb583ddff907c058986963b775441eaf129d7162aee0acb0d36834",
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/scripts/run-tests.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cb08a3ec5715b00d476ae6d63ca22e11a9ad8887239439937d2a7ea342e5a623",
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/scripts/process-results.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c03d7273fe58882a439b6723e92ab89f1e127772b5ce35aa67c546dd62659741",
+ "format": 1
+ },
+ {
+ "name": ".gitignore",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "258e4be3cfda40797fe734b375b6f94c110c9a9bebce196fedce319a457ce720",
+ "format": 1
+ },
+ {
+ "name": "meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "meta/runtime.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d41b19c941b484e7ab6ad2c76f22793ec98abdc6c445efe5cfb40c7f56ac3a7c",
+ "format": 1
+ },
+ {
+ "name": "tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/compat",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/compat/mock.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0af958450cf6de3fbafe94b1111eae8ba5a8dbe1d785ffbb9df81f26e4946d99",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/compat/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/compat/unittest.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5401a046e5ce71fa19b6d905abd0f9bdf816c0c635f7bdda6730b3ef06e67096",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/compat/builtins.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0ca4cac919e166b25e601e11acb01f6957dddd574ff0a62569cb994a5ecb63e1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/inventory",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/inventory/test_stackpath_compute.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "51a662061f0170be9996a35396c2f19b681e8dcb7ec68caf1b64ff22c937dfe4",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/inventory/test_cobbler.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "530f1e9c7d7e8afbdd59dab3a8db6c136363d0a7efcfe25c6242bf784d64a9fc",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/inventory/test_proxmox.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a7fc02f6f8411681ca5816d8a7dfcf23fe59a9fd3e3409a8d4d00feba79b2ef3",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/inventory/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/inventory/test_linode.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5e34b12b0edb942791c3373655c1782a3f90274e42cc71f9a5f5c96436e73ae5",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/monitoring",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/monitoring/test_pagerduty_change.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c1f5661bedd93f7a7119c04fe3f15629fe77d21b27b3cb2c3f8af72fa2eb2f28",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/monitoring/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/monitoring/test_monit.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cd48e66df8dc07a4c254550a308de27368c60b952e94701a747b62a85afd4b42",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/monitoring/test_pagerduty_alert.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e86552e30a2074ae8275eb78b2e19376442b9b5c3d59e99b2b91119678875013",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/monitoring/test_pagerduty.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "51231e2644a421337619f184be06832ccffb8fdd9bc898989a18951df9a515c8",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/monitoring/test_icinga2_feature.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "784eaf2c4bb9687f5d501e243a92c970e6332682c10e223c0199f501d19ae05a",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/monitoring/test_circonus_annotation.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "637e2eb5351344cce77e7c3bc20051a5b368aa350b7ffa1b043378c7e29e4359",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/utils.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "adc9da4db54ecc1861794699afedae3ba6811be01ec547c2543e9b6a0901c02a",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/packaging",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/packaging/language",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/packaging/language/test_gem.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0484019fba974202d150ffafd84fe4ef416641854debf6505581c6eade9b7079",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/packaging/language/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/packaging/language/test_npm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4f21ce864258016f5925f6db1b351f7099fb9bf62acc7a6ed992dd24fad35ee1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/packaging/language/test_maven_artifact.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "179825e10a242f386aba87c4c8e1f5740653cd8962b07e37dbd588af37121a13",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/packaging/os",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/packaging/os/test_pkgin.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f8ee6102605e34a4de8445118f42e2caccbc164da2672b27ccffe00c72a3f8b1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/packaging/os/test_homebrew_cask.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7548ab19616febd1347d9324d0d67723e6025617a012b25a485a7eed5c9fcfc3",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/packaging/os/test_redhat_subscription.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1600a5ab12178d94d5c44ffb6d4369747571bddb62def87c68ce66ada0413fb3",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/packaging/os/test_macports.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ddd9585216de66bc18b9731523ae67ca2ba352070e58b7515b5b11b80422e2cb",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/packaging/os/test_rhsm_release.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e78ab99f6bc469e3a195447ca18ec2467dc06a1b49d8a96e3f74b3d2f94f3d42",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/packaging/os/test_apk.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ad806b3c277e03d16fc7a2506353c46b8574e3cb82504362c6028c33fddc7de5",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/packaging/os/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/packaging/os/test_homebrew.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1a9a041047a61d0801c467295ebb1e238c530bc7555a1d70f4d59efc306f2549",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/packaging/os/test_rhn_register.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5ea198f1e3cd0c8becafb759222d8fe0f69f1d1a0bfb185e1e3bfaad4e2d0a41",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/packaging/os/conftest.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "504f9b1ae9baf976f44e4ef62ed7c616129d090b3170571279e1bd8d43409be9",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/packaging/os/test_rhn_channel.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3ee90c66745027bbaba686da547bf7d1645f975be32918dfb5e3b7afe69cd71",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/packaging/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/net_tools",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/net_tools/test_hetzner_failover_ip_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "19e52c9a778522eb87980ff239af1da3a826f519dc03fd2470b08714d6473c55",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/net_tools/test_nmcli.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1d7fb73fcf5bbebad5ecb406dc164fe11bdac33385203777a9f0b30f969fb079",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/net_tools/test_hetzner_firewall_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0d85180be303aa3b7015e8840c268098e965e0821b9132943a896881c0f6ae5e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/net_tools/nios",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/net_tools/nios/test_nios_module.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6205c6ef010fad5ff3bade0e463932c9c3b11e62ec5865782dc9b47972b5dcaa",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/net_tools/nios/test_nios_network_view.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "27f573b0a72cf97b0b9b3b75ab5fe6817e3395eca61b59280302e48774b9c13b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/net_tools/nios/test_nios_naptr_record.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aaff81901bbba1a4c403d7c5312da3880b8762da2a3a20f915f02d66fe595d61",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/net_tools/nios/test_nios_cname_record.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9478c172786b6bef1b4b6917823db95c1675b37e94d488aa8a8af1173e3ce08e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/net_tools/nios/test_nios_aaaa_record.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e30515adb439779212bf4a06cb3b04be56ba25d6ce5e84514c6896c101835a49",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/net_tools/nios/test_nios_srv_record.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "332cd4034c01f5376b8875cdb380bbb40ebfa19201e4442bfe5210c93c3e7958",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/net_tools/nios/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/net_tools/nios/test_nios_mx_record.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "638ce7e75bffa0143cd0ca48a15304b18ba5d9dc1752361503be8c1dd8f09909",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/net_tools/nios/test_nios_fixed_address.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "27c4ab2a7cbbccc4959e0212e3c83fa399db4272652e520c36d4d28ca2a0afdc",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/net_tools/nios/test_nios_a_record.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "78f79ebc4db0c61c94939f75ca20e9204d4f631333c2fd58fa7af133cf180164",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/net_tools/nios/test_nios_host_record.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fe6fd9b0717501cfb8fb7f8605da3c70c19a2b691fd8e7cac43b15ff62080468",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/net_tools/nios/test_nios_nsgroup.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "429392a8b619df9c76bd78db93221fd96abfe778a4bf824a49e47f050a0e7d27",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/net_tools/nios/test_nios_ptr_record.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9c6f9448fdc83286cd4af52fe62531eb1448316aa93d33bca74517843ddfb795",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/net_tools/nios/test_nios_member.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dcbddc83577d026bae1c6fb4bebf7fca9a582f6a3caeac535cf150eb3c6962b6",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/net_tools/nios/test_nios_network.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "28070bfb080ed63aba72d091ebbe6489118f4bbf4a1674c6d63e6996d9203b06",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/net_tools/nios/test_nios_dns_view.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ad933ae099eb89708952cea7f2ebbbb9e800ebc8b80018c5f75522bc77917219",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/net_tools/nios/test_nios_zone.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "41f9a804b8c130d22f255624132d999a328940aac6b79d50b8ddb91b427c4d7a",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/net_tools/nios/fixtures",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/net_tools/nios/fixtures/nios_result.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/net_tools/test_hetzner_firewall.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0c160d2c05cf2c6ec42d6a2f9523dd0bc5b3bf9cb7470a143397b52e8333d827",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/net_tools/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/net_tools/test_hetzner_failover_ip.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5f7dcd6b7053b7652997ac5ff3e43720744129a6c8098d2c6769d429900c50d3",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/notification",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/notification/test_slack.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ea7a7424df543b70c068f8efb29a909a3935d8f7dced00bcec1e969c5fb334c0",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/notification/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/notification/test_campfire.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "818eda9c60aa41fce3ed8a7a88df3de57caa7c0c3267670295bd44ee96a0f8be",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/messaging",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/messaging/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/source_control",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/source_control/gitlab",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/source_control/gitlab/gitlab.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2543a4394cc2ff45fce0fee0b534e528c76ca529f1285476111bcdcd24ec285f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/source_control/gitlab/test_gitlab_runner.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0f8f3d4bb62026915dc53d1a9f1a93a699613a18059f9170e70178edb4c0812d",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/source_control/gitlab/test_gitlab_project.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fcdee81ec103c432b3dc691ce5354603fcc443952fb111cd984aa112a8895408",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/source_control/gitlab/test_gitlab_deploy_key.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dc0baa87b9afc40ea18d261d34d3f1ff349983a013b82fed6b89f3965b58c9c3",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/source_control/gitlab/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/source_control/gitlab/test_gitlab_user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cf413b6fa97e89a2c1ca14ca90f40b44873a9c8505476e4cb5688743d2fdd9bf",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/source_control/gitlab/test_gitlab_hook.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "830f487429ae71c3e72cd586cfc39fa150e46471b70f2c3ae83456b3094c708f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/source_control/gitlab/test_gitlab_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cedaa71a4c61865f876c348ccd3f51b4d80962a50f11fba0a90cd98d8da4690b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/source_control/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/source_control/bitbucket",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/source_control/bitbucket/test_bitbucket_pipeline_variable.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4b62d396f6748eb77a869ac7f25fd79e9a8cb9ba6de40e1721fd54986d6bb121",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/source_control/bitbucket/test_bitbucket_pipeline_known_host.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c32cd1463e12b5d2ad9c1358b011afd09cef2842a7508f0424d159cdd5e8826f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/source_control/bitbucket/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/source_control/bitbucket/test_bitbucket_pipeline_key_pair.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ff7cd4cebed67c8762f6825f07dc6978621343894bcd9c77e2e786eacbfdedc7",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/source_control/bitbucket/test_bitbucket_access_key.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "704bc5270830ab101c2025b76961c4a5d21233f6f1532cbe92c6f784d3297c6e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/cloud",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/cloud/linode",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/cloud/linode/test_linode_v4.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c956d3eae1a2f657f87788a5f26383da2e1e6e83aece7d114e782848663aea1f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/cloud/linode/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/cloud/linode/test_linode.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9c1b4d4e134d45209b594a04eda78afc8c7abcfd0a25b61a4c34137db0db6adf",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/cloud/linode/conftest.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4cb56f4daa028f7157bd3dd75b46162c565beb5120862c1585f890d51223209a",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/cloud/docker",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/cloud/docker/test_docker_swarm_service.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2349c4a540e85f9a5fb1b691d60b5fd7b338cc759dc6a53a73639395534a523f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/cloud/docker/test_docker_container.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "97032c4e8da8d234f8b18f0292da7756c92ef36ea9eea2be15b7c7e105cd2d7b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/cloud/docker/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/cloud/docker/test_docker_network.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "96d9dc5d140f6fa55298ece2c86c0778f0ac8f44caa7faa1f5ce2626c3c5c9dc",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/cloud/docker/test_docker_volume.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "76c12f70f3aacde9b342b575593a6ec966e9213efa88e130fabc27eea9de35b4",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/cloud/xenserver",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/cloud/xenserver/common.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "958eafb650d09fa883cc9b3d9cf14d493723d91dcc1da9b9ee57c6dc70bdd57d",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/cloud/xenserver/test_xenserver_guest_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f2599c3f551c4736f1e09629c7e2fcf44b6c8564022bb3dee6a8df2b728ba29f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/cloud/xenserver/FakeAnsibleModule.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8249860417aebbb656541d50db86e9b3314c58fd6608aa4cf87b29cb530e1cac",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/cloud/xenserver/test_xenserver_guest_powerstate.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4b6ed5382c32dcf082bb72b27c567214b0da7943fd224f54a674d71142f7b26c",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/cloud/xenserver/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/cloud/xenserver/FakeXenAPI.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cfe4d1e5778e71f0beacda11226b934635d87a2194be94af275d09701f501e01",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/cloud/xenserver/conftest.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6cc2b211f0a74a9ec3994c584d26b423d3b9cc6671eeed0db7d8821865479d58",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/cloud/misc",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/cloud/misc/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/cloud/misc/test_terraform.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9e60bfd9c9c35e83426606b13a75e8f6d2adcf1245ed1feae7c3030810061bac",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/cloud/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/cloud/google",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/cloud/google/test_gce_tag.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4046a175ab7b0f0f0c012c96a7cb3db5a06d770ebd3b215ab5b24ae529b24149",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/cloud/google/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/cloud/google/test_gcp_url_map.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b8230cc5ea7af4af5190e9c8934034bfc1f879bc0d43ddb40e924f7547dac402",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/cloud/google/test_gcp_forwarding_rule.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "102679d85fc53c201765cd7d85cd6f529f0f306816cc1307d30bd35b4b48f1d2",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/cloud/kubevirt",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/cloud/kubevirt/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/cloud/kubevirt/kubevirt_fixtures.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f7d1d9396b74b6b5458b6e2b307dd6f790b8333596bbc7fc905ccc7fe5b72c12",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/cloud/kubevirt/test_kubevirt_vm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "878e0f4a23c54527a1d27f6cc1a3d36a5abcc50705f5c99505fa1dcca4d0e14a",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/cloud/kubevirt/test_kubevirt_rs.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eabfda6a1c437957eb1c72628ea6463140c150099030124bc135c63452ae0525",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/web_infrastructure",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/web_infrastructure/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/web_infrastructure/test_apache2_module.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cd9ebad3842220ea8251164d45b9cb1d85197ef69cd1e76f80621bf396960d8f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/web_infrastructure/test_jenkins_plugin.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "765af25165a9aa2da46a1f0ecfccd01199d6a0f4d8bde8150e1f201908256074",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/remote_management",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/remote_management/lxca",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/remote_management/lxca/test_lxca_cmms.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6dc89465a3f3f8a766411db1bddd2a0b7ff0d2b39bcf392b3c6a6d9707665e2f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/remote_management/lxca/test_lxca_nodes.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b3d733b956abd26280a2322a9f11c04f52069df59c8c9bfe34af52325af97141",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/remote_management/lxca/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/remote_management/oneview",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/remote_management/oneview/test_oneview_fcoe_network.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9cf8e3857c596a63e055bcafed62b35d05f7f5a2f3a472839493dc3b9dae0222",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/remote_management/oneview/test_oneview_datacenter_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aba6e5371afc2bf09637d070139007bcbd45a9db409c6540e31e19ca21cd608d",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/remote_management/oneview/hpe_test_utils.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "26ce96e907d7a8e55f353489a7e466f64f4ec8e24c81e69018187b5f4af3ae18",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/remote_management/oneview/test_oneview_san_manager.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b9c21da4069dd0e9e9d22d07d3969c8b6e0fa638520d990e9c5e88859f626003",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/remote_management/oneview/test_oneview_ethernet_network.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "73f3689fa412d3c907898205cfc3073360490b6d7722676100bcd25bf235afda",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/remote_management/oneview/test_oneview_logical_interconnect_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "844029b96cc4dbb5449d89eb357f73e16c0233e557820635293dcb35e1884e63",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/remote_management/oneview/oneview_module_loader.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "02ae4523f1519d32f33f51f51420f9a48f75f32b55dbc6ee9ec3ead164e35ab5",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/remote_management/oneview/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/remote_management/oneview/test_oneview_network_set_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f3182e532dc20243b2bcee01228fd4430de5bf11971afe051650375ace450b46",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/remote_management/oneview/test_oneview_fc_network.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "19f461d1441aeef057bd0b2fa9a5c0ca98cc973414accf93cf583bef0f7726a7",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/remote_management/oneview/test_oneview_fcoe_network_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "53334346c654b1a2b755bb908aaad43a7691d44b537b550f0ca8f794281ee4b1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/remote_management/oneview/test_oneview_network_set.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9d98e413d2eb2b62cd4954127914838408793b8182dcf2916dfbe1c79efbffea",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/remote_management/oneview/test_oneview_fc_network_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5dd17dfd91d095476e740d32e543dcd51ed436d187fcb4e32e0c3411e4217fff",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/remote_management/oneview/test_oneview_enclosure_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "da6d11c5cacef736152e2289c2621c0ae630f2bcd2de6791c67a432335c77e96",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/remote_management/oneview/test_oneview_san_manager_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "daab0258103b8bbc5fe2a0499857127177ded33f1bb8cd423795950537693138",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/remote_management/oneview/conftest.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9df5d55d4865eec15d7799e2f889a1d539954f248564ca80aa3d38efb7fece3c",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/remote_management/oneview/test_oneview_logical_interconnect_group_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6409f79838322fbc92cc554b45398a6d5ee0b6d447ac3107d82806695e11aeb1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/remote_management/oneview/test_oneview_ethernet_network_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7e599e39df535672407b42860879a28ae6b83fa32cc4e927bff82ed17ce394ac",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/remote_management/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/remote_management/dellemc",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/remote_management/dellemc/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/remote_management/dellemc/test_ome_device_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7574eeed5a77cb02bf685a8f12deaa1366ccf95a092372af5bd437e9f8b60d66",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/test_xfconf.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8babbec585519aa08c47dd3f05983b62be0137a567c94a3092e05445cb506534",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/test_java_keystore.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3ac0c1bb31e751d5613f0cdf871639014d32b99a6e167fc430022ec3f6f9a940",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/test_sysupgrade.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d64272139629294cb611a70cc5bc8ab03efeef5a8c20f306f0bf109ce5f57d32",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/test_interfaces_file.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6e157d2e02ed63a98e2e1d0d3bef78293274b3e409ad749a682542351f12cfde",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp.test_no_changes.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "06a6a72ab203e90b10afc3dc292acd7509237b956d0867bd04722ce16fe1627f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_method.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7df04865747cdaf41c044674909f7f9d789de4c721aab7638549d28106f4eb7e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "06a6a72ab203e90b10afc3dc292acd7509237b956d0867bd04722ce16fe1627f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_pre_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "06a6a72ab203e90b10afc3dc292acd7509237b956d0867bd04722ce16fe1627f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_revert.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "97d0f214b0d74d4253d3152f125a27c2f876bee8ccf6f1357bef463e7b9ce66f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_aggi_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d3e14c9b11e9a08419fca885ab31abd8e775053b8c2b608037626ecd7ed01595",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_post_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b4dccfc3f80598ea3f2f32f6661b3b5fa6997e6d6a0e9e2f3cc4648505ec7f52",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_pre_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "06a6a72ab203e90b10afc3dc292acd7509237b956d0867bd04722ce16fe1627f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "70b95830220d518dae6662f2e1ca836dd9c8adc1823351048cc53db8c865c33a",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_slaves.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_pre_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bfe51ee900cfe3dc3229efbc0906a41210a1a2ce90da7721c8b3e753b2cda522",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_aggi_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "97d0f214b0d74d4253d3152f125a27c2f876bee8ccf6f1357bef463e7b9ce66f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_and_delete_aggi_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "732c90c6031d571c37e260b91d453408a7eb6a9b7bcef6ab5dcad7153dd653a0",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_method.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b20955f532135df51de900c83c10a6dd087cd30d0df0bfc238a5a7e057492778",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dc4f1ab45fe950aaa0dd6e61e3eb13423b0e1d98202a2f2b15cf78458eff5c48",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "97d0f214b0d74d4253d3152f125a27c2f876bee8ccf6f1357bef463e7b9ce66f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_revert.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "06a6a72ab203e90b10afc3dc292acd7509237b956d0867bd04722ce16fe1627f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_post_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "06a6a72ab203e90b10afc3dc292acd7509237b956d0867bd04722ce16fe1627f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_slaves",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d3e14c9b11e9a08419fca885ab31abd8e775053b8c2b608037626ecd7ed01595",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_method",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "97d0f214b0d74d4253d3152f125a27c2f876bee8ccf6f1357bef463e7b9ce66f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_post_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "96446e58502f5e9c1e50b4b3b64d639eed3afa6958e3aa5531225a24b8a94063",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0287f7b3a5351740d00394005ce8f49ae1a13484eaafb4c41776acf4e56c706d",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_and_eth0_mtu.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "13b4c7681e2369ea5765cefd22862511d38955b986f9033752faba1cd54ccd03",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_pre_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "08747ecd380971329c1bfe12df432f00c64dbbcf346f4c14ec799dfba42b2b1f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_revert.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "806e3459fe842c37406690b7ea1c112832ac485e8e10876495c671241ae7ab29",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_pre_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "06a6a72ab203e90b10afc3dc292acd7509237b956d0867bd04722ce16fe1627f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "06a6a72ab203e90b10afc3dc292acd7509237b956d0867bd04722ce16fe1627f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_pre_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9db4000a5df22bf6923e3c3fae4171698ec097639c4e94297297af729fc0dbe7",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_pre_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b05dd67b937088e5673b7fb10978bfc40f35eb4d5f5f5682936a9c1a5053db99",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_revert",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d3e14c9b11e9a08419fca885ab31abd8e775053b8c2b608037626ecd7ed01595",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_post_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "13b4c7681e2369ea5765cefd22862511d38955b986f9033752faba1cd54ccd03",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8ac948a5ada90b50ea34d1e31ed4657f220a7153ee2908b880f3dbcf4b1b417a",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_pre_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "97d0f214b0d74d4253d3152f125a27c2f876bee8ccf6f1357bef463e7b9ce66f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_aggi_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_and_delete_aggi_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "732c90c6031d571c37e260b91d453408a7eb6a9b7bcef6ab5dcad7153dd653a0",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_pre_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b05dd67b937088e5673b7fb10978bfc40f35eb4d5f5f5682936a9c1a5053db99",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_post_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "97d0f214b0d74d4253d3152f125a27c2f876bee8ccf6f1357bef463e7b9ce66f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_pre_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "13b4c7681e2369ea5765cefd22862511d38955b986f9033752faba1cd54ccd03",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_post_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "96446e58502f5e9c1e50b4b3b64d639eed3afa6958e3aa5531225a24b8a94063",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_pre_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "97d0f214b0d74d4253d3152f125a27c2f876bee8ccf6f1357bef463e7b9ce66f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "07945be2848b249d636ea429313c539ea4c9f921780e1d912b6472561821143c",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_slaves.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "13b4c7681e2369ea5765cefd22862511d38955b986f9033752faba1cd54ccd03",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_pre_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com.test_no_changes",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bfe51ee900cfe3dc3229efbc0906a41210a1a2ce90da7721c8b3e753b2cda522",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bfe51ee900cfe3dc3229efbc0906a41210a1a2ce90da7721c8b3e753b2cda522",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_and_delete_aggi_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bfe51ee900cfe3dc3229efbc0906a41210a1a2ce90da7721c8b3e753b2cda522",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0287f7b3a5351740d00394005ce8f49ae1a13484eaafb4c41776acf4e56c706d",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_post_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_revert",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_and_delete_aggi_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "06a6a72ab203e90b10afc3dc292acd7509237b956d0867bd04722ce16fe1627f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com.test_no_changes.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "97d0f214b0d74d4253d3152f125a27c2f876bee8ccf6f1357bef463e7b9ce66f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1fda07e9a6f93949f6f53ba8b71054114024b9d1d612c4455b1ca5effe630e5e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_and_eth0_mtu",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4eaf8c9708b20dab8fc90b8b2b5716167e2bc92c1c1b0638ca82e11323f78199",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6653eeb79ba4c679c224c9c43b4a0bde5075c9795cd5f446d95560c883df1c67",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_and_delete_aggi_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "97d0f214b0d74d4253d3152f125a27c2f876bee8ccf6f1357bef463e7b9ce66f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_post_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "529e2e7b36f6ec834edb09878ead526156aa9d5349a5cedc1194796d30c7b7e4",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_post_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_post_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "acfcc70084877cfb0c89871e02d24ec9711b22085f5f5fbe4ca8a69cf0336dcf",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_post_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bfe51ee900cfe3dc3229efbc0906a41210a1a2ce90da7721c8b3e753b2cda522",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_slaves",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4d65afd09be4ed2e70dadbbcc3691e8170b1e819256795dfcffb128a41a880d3",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bfe51ee900cfe3dc3229efbc0906a41210a1a2ce90da7721c8b3e753b2cda522",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_and_delete_aggi_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "97d0f214b0d74d4253d3152f125a27c2f876bee8ccf6f1357bef463e7b9ce66f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family.test_no_changes",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d3e14c9b11e9a08419fca885ab31abd8e775053b8c2b608037626ecd7ed01595",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_slaves.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "06a6a72ab203e90b10afc3dc292acd7509237b956d0867bd04722ce16fe1627f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "13b4c7681e2369ea5765cefd22862511d38955b986f9033752faba1cd54ccd03",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_aggi_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7931947554451b1a241d07eacac42d91143414f385e5ed6e99b5c6039d26fb0c",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_method",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eb32a11d2175d165ac30d4d96265aa7890de42aad1e4c03fe862db31a9b609f6",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_post_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "93e09e5b99049be103115e7ede6022cfd51cff8543cfc4f2552f5315e9e7ea75",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_aggi_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "134f64892e64e650bffb29c928ec0ab72e397c122f178417e99cb56fab5c3b2b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_pre_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bfe51ee900cfe3dc3229efbc0906a41210a1a2ce90da7721c8b3e753b2cda522",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_aggi_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e4f339f4a90e118d5b1b3b4e3fd59a3eb7460d743f3dfb1be34a9e0656f1e117",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_revert",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bfe51ee900cfe3dc3229efbc0906a41210a1a2ce90da7721c8b3e753b2cda522",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_method.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b20955f532135df51de900c83c10a6dd087cd30d0df0bfc238a5a7e057492778",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_slaves.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4fac1d8f53319085621e778b7012c376068ede405dd18f2a8a1a06a5f378b00a",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_method.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_post_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_post_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "97d0f214b0d74d4253d3152f125a27c2f876bee8ccf6f1357bef463e7b9ce66f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_and_delete_aggi_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d3e14c9b11e9a08419fca885ab31abd8e775053b8c2b608037626ecd7ed01595",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_post_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "13b4c7681e2369ea5765cefd22862511d38955b986f9033752faba1cd54ccd03",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_pre_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "53abda66ee9a035f805bb82fc7cfae6e0b17f42663158bd6c7af5fa2b90aea88",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_aggi_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "06a6a72ab203e90b10afc3dc292acd7509237b956d0867bd04722ce16fe1627f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_revert.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "13b4c7681e2369ea5765cefd22862511d38955b986f9033752faba1cd54ccd03",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_pre_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "13b4c7681e2369ea5765cefd22862511d38955b986f9033752faba1cd54ccd03",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_and_delete_aggi_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "13b4c7681e2369ea5765cefd22862511d38955b986f9033752faba1cd54ccd03",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_pre_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_slaves.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4fac1d8f53319085621e778b7012c376068ede405dd18f2a8a1a06a5f378b00a",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_pre_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a5bcf21eb70f131e027c0a1236d2264b0db9de60c2d8ac9df860b83839e7a757",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_post_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bfe51ee900cfe3dc3229efbc0906a41210a1a2ce90da7721c8b3e753b2cda522",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_pre_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_revert.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_aggi_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "13b4c7681e2369ea5765cefd22862511d38955b986f9033752faba1cd54ccd03",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family.test_no_changes.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "13b4c7681e2369ea5765cefd22862511d38955b986f9033752faba1cd54ccd03",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_method.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "13b4c7681e2369ea5765cefd22862511d38955b986f9033752faba1cd54ccd03",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp.test_no_changes",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_method",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d3e14c9b11e9a08419fca885ab31abd8e775053b8c2b608037626ecd7ed01595",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_slaves",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_and_eth0_mtu.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dc4f1ab45fe950aaa0dd6e61e3eb13423b0e1d98202a2f2b15cf78458eff5c48",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_revert.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_slaves.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "97d0f214b0d74d4253d3152f125a27c2f876bee8ccf6f1357bef463e7b9ce66f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_post_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_method.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "06a6a72ab203e90b10afc3dc292acd7509237b956d0867bd04722ce16fe1627f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_aggi_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e4f339f4a90e118d5b1b3b4e3fd59a3eb7460d743f3dfb1be34a9e0656f1e117",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "13b4c7681e2369ea5765cefd22862511d38955b986f9033752faba1cd54ccd03",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_post_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "06a6a72ab203e90b10afc3dc292acd7509237b956d0867bd04722ce16fe1627f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ac877a74278c9ed870b0358447d9c05e8dc910d4b3594bf04c63699d16d8f688",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_and_delete_aggi_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/input",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/input/servers.com",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bfe51ee900cfe3dc3229efbc0906a41210a1a2ce90da7721c8b3e753b2cda522",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/input/address_family",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d3e14c9b11e9a08419fca885ab31abd8e775053b8c2b608037626ecd7ed01595",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/input/default_dhcp",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/test_ufw.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7f43aae9d75f92711ca390e43607d275e436d1dd9aea74969e2b588027dd3d52",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/test_pamd.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "118b8de9dac0fc7e7dfd47149182618827e2c2d528ddb133f802f4494ab0e3f6",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/test_parted.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d9867bddb7cc565543bdfc4424b582ae22257f23d7f4b912b19c6eac25c2aa59",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/system/test_solaris_zone.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "97894d2e554664a5b1ae9654cf55276e8d8ea302c12b1a52537c1315906f604f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/database",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/database/postgresql",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/database/postgresql/test_postgresql_ext.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d110b1028bab70ee4557dd556f408d4a376943d2826d952226a5dfd4f8aaf1e1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/database/misc",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/database/misc/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/database/misc/test_redis_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aeac4234cfbb8535ebce9141aef81fa21dfc731b5ee925776b8cae12cceac44f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/database/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/conftest.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "909818cefd5093894a41494d1e43bd625538f57821375a564c52fe4219960967",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/storage",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/storage/hpe3par",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/storage/hpe3par/test_ss_3par_cpg.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "457fe72bb1be07d527f12757607fb8baa504fe99fedd3c5f193f2a961745a67d",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/storage/hpe3par/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/storage/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/become",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/become/test_pfexec.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6e7d4ccdbece51e1c3426e58225426cb3bfd5c6f475243f9dc9554a4a39f2509",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/become/test_dzdo.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f26500ca45cdedc6a217cdd18e0a1fdfeff72415c006cf78f0f4c25476b98ff7",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/become/test_pbrun.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6f581f310504561f10a22a512343d2ae213e0d73eed950bd79fe35916f56589e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/become/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/become/test_doas.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2bdc1de37449ed84ce41b44565a575e8ee619a055ced31cf62d2c55a44b64f99",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/become/test_ksu.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b7b1b2f3a6e77846a3adab6f323ce7cbcdb0ce65fbc2d4bc8ae66f10e8a8a488",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/become/helper.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6da8cd096bd56d3f206b879eaaa93fd1732df17ba15d10a524549df46185dafc",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/become/conftest.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "917507083eead1d34596d6b31a4a3600a780f477bc8856ef326c4b18a1dd2053",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/lookup",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/lookup/test_manifold.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d5d2b651cf2fc37f4f8ff632aea0237ac7fb35ac5f03a643d7cffc5f1ed0db2c",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/lookup/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/lookup/test_dsv.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c25e7fc101df073b45b7eb7b048d7bcd8b3f477e6021c27dbe974945fd8299dd",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/lookup/test_etcd3.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d79104d0074682c5308648f66eabf50569ddc34f32ab916a02a4425077043083",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/lookup/test_onepassword.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "97052edbff97b2b30d11ab8b147fe733bb382a02db51b4604bf0407a01fe2ef2",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/lookup/test_tss.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7890d74ef60f4ed12af2a816be56a77e75748633aa8676acd69f181e85d54c21",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/lookup/test_lastpass.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "53b61e83a357f3c9bd28795c8f16238a55f5cd9031783f06f00785d50e02dec8",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/connection",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/connection/test_docker.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4bd348130d4d91aa03e9c04bcb2a9ca8a6c520d2abac9132e3eb0dfd51e53f8e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/connection/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/connection/test_lxc.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fe90b5d5eb17eab987cd0022995eb85b6c8f0e90d20aee7b8fc0d6945041ab00",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/test_utm_utils.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ac7a58ed47d4ba383a871571bfbbb8e447e42019d555d3c5ccb5f31b79510a33",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/test_module_helper.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b0cd2e928b9cd456165a1216dd1e30473c0c8fed87a6b353b66c094b52fc8c8a",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/test_hetzner.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0614c9032028b40d44fe982dedd5719e953c24245203d5cd80cd2696381c576e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/docker",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/docker/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/docker/test_common.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "571c59b822ea75afbb45626866e32dea24a6ae1933ea70213a4004f6543f78ec",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/test_known_hosts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "856a89896c248a26157903f168873c8b575ac208c15d4b7071cbcae711ec51c9",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/test_saslprep.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1bff0807c10f8d51746ee02e3e25654d07004f4b35cad01baacd6b24f3d342bb",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/postgresql",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/postgresql/test_postgres.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "383621ada765d3896d5a9382f989853e96f89ca6bcff75c023922eda9396a1ad",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/postgresql/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/test_kubevirt.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a0e1e74ec53669ed8de4ae7f1e377b757f4603869283dacd42a571628c78d01d",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/net_tools",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/net_tools/nios",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/net_tools/nios/test_api.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "28409fb260d93ee1341c446b8a00746ba2dc7b86cd86b22b71e7fe54bf4204a3",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/net_tools/nios/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/net_tools/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver/test_netaddr_functions.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "34b0b4122166239da7c963580b38ee3beb3657815825b57c8c37349fafb55cb9",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver/test_get_object_ref.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "51388a0d4511aa3ab1ddf74b2ec0c603ed46e88741d90b8c202725d7c303b89d",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver/test_misc.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4e7c63d3bbf78b71353572f6ee50a49f633a371b9506523cb5e9541df82837c9",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver/test_set_vm_power_state.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5539b93046d0b10ed2823aa1d89efcc6969c154d00b4a92443157f6e4ed75313",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver/common.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5beba741c380832f920ce31d44c0061e44cd9301469262e080d83336713ac65c",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver/FakeAnsibleModule.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8249860417aebbb656541d50db86e9b3314c58fd6608aa4cf87b29cb530e1cac",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver/test_xapi.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c32fee1f92b6049387e5af9a600b80e302cf08e3831a866af986d70a44700237",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver/test_gather_vm_params_and_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "19e5f84c0f0d0f1316516689819332972c3f21b6175f6e9748a5319b68e5a2ab",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver/test_xenserverobject.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2c7d8c81a73c9ab1c69a8e610c788a41f2817a56678475a1e36267cf8037b5a6",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver/FakeXenAPI.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cfe4d1e5778e71f0beacda11226b934635d87a2194be94af275d09701f501e01",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver/test_wait_for_functions.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3dcd632f7e357295691f1dd4f1c5ef041bc76b28b3362ab91aa1a8b2be8bca08",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver/conftest.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cdc32c1f62b6cd60be98a4b5531ab3231d79055650df266371420afb052c1f93",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver/fixtures",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-3-facts.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "92c62d837dcef25a817ac3a9903d6a430b0deb44848d29ab5ac5bb6eafcda526",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-2-params.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b994736a7e80d02c759c7b19977101c0c04ebc1c8460258f5b96f595b9daf037",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-2-facts.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "728bad77904f8e3d2539810fd0dfcec6bb24621c78406daf4434dd611042da5e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-3-params.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2a8de78de7ba45268294a48c99a82a957ecb3da299ac9036264308392b14106b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-1-params.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "61a935bdae44191686b63826996abbf2431834febaa54e4a9e523aec016cdd61",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-1-facts.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "58e21893fa65459c9869fdbcc9c79299cc01183e3a10cf575cd75a62ff366e58",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/hwc",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/hwc/test_hwc_utils.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0fb975491eb25d8f8e74373095a0cd87e12e1a7d6acd4282b1aa1101142f2b87",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/hwc/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/hwc/test_dict_comparison.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c844c6b0656694a4828278a84f6ebe4d6850f022076d86aaf3b68c2fac685311",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/cloud",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/cloud/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/cloud/test_backoff.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c43c5418bed4f056752610c28cdc7b9ff535a1c29238996444a21fc6b47502c5",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/remote_management",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/remote_management/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/remote_management/dellemc",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/remote_management/dellemc/test_ome.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "623112c1774477c6f55c4125671e79ba2ba9ab82852f354b77cfb1e056ad4292",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/remote_management/dellemc/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/test_database.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dcf067c65ac448adaee89a093902592e7b79a3ae95b5cf47098cd729f7912727",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/identity",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/identity/keycloak",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/identity/keycloak/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/identity/keycloak/test_keycloak_connect.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ea96bd96234663eca8d1ac37cd467f9f030e97acab14425572ce96a1d82d644b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/identity/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/conftest.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2e722f74b02b3af62498536794cf19d8ecc9dcafa0fa06eb750a32f1fff7a7cc",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/gcp",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/gcp/test_utils.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "723ef60a3e1b80bdad3141b4edc51d69273930fcc219b5dba6398bdccd784030",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/gcp/test_auth.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a050c1ae8d1af35f5c1250f59a25fefa8930ceacb87e06e5fd069ad043d95f0a",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/gcp/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/cache",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/cache/test_memcached.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b48bd2b0a8fda4c81a721efdcd1997f107d103297e2f1068f391ceeb067c4ada",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/cache/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/cache/test_redis.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d3b5c11136dbf19135b0fbef03b178853dd74f2531b5ea7279e28822755541a1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/requirements.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "17e2718f502a0d23c006d1facd236945dc6de44015a10089cfd4faeaea2aad18",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/mock",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/mock/vault_helper.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1c5bb024ac2f936a8755eca00fb6e43f2a72dac131fe07f1ed0249dc29504ee0",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/mock/loader.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e088bfefdc95e3fdb8df90c5ee5c2f676dcb2a0217a39a010b153f0b6fa478af",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/mock/path.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f048a12629a6297a676ea56529ecf766cff30bcaa873c6659ac5b7f6e29472b1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/mock/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/mock/procenv.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0f9ddb6e4030020111771943324610e0312f61c875a609ffc225f5dcebb97cab",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/mock/yaml_helper.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cd95a4807e52e9123a8d40132a5f52b75cbc1496e1a32b104b2655bf631cfee4",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.11.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fcb19e4dacc01262ca5b70ba20413e21bc09ebadedba26f494e90653e89f7d4d",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/extra",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/extra/no-unwanted-files.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a3d3b17f699b042958c7cd845a9d685bc935d83062e0bcf077f2c7200e2c0bac",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/extra/no-unwanted-files.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f1468e7b22ba353d18fcf2f5b18607873f792de629f887798f081eb6e2cd54fc",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.9.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4e1399621cec2950daa5cd389c8c478c4e4a08bb61bb04d31b7257c2b2d8ee99",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.10.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fcb19e4dacc01262ca5b70ba20413e21bc09ebadedba26f494e90653e89f7d4d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_etcd3",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_etcd3/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_etcd3/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fdcd7485be383a77858e967fd1d53038375736553dd7b8d5579c7d6e49d24c3d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_etcd3/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_etcd3/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ae816995bad20b4c09762e88d447ddf2793e1df8f6d61c0fdc03a7432a06e75a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_etcd3/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_etcd3/vars/RedHat-7.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2327ed2b66086f12d6644563781754dd9d8131ad9beef32f315e2f9861504deb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_etcd3/vars/default.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "42fb21e5a17c47ffdc62fa1ef9ae61f066ae3010150ad08f9ed877e440872167",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_etcd3/vars/Suse.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc8858e017dfc003a31b4bd8e20a7d442a996e94bca6882354d9cf9b7a43fabe",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_etcd3/vars/Suse-py3.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc8858e017dfc003a31b4bd8e20a7d442a996e94bca6882354d9cf9b7a43fabe",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_etcd3/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_etcd3/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc96f60f9770d3926de79613a52d26a89400a88bbb2680746b6b8546b20d23c9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_ptr_record",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_ptr_record/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_ptr_record/tasks/nios_ptr_record_idempotence.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d963d7efe7113cc92ab45fc5bca5faafd1aa713346558b43004aedd684725019",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_ptr_record/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5024096a60693d9c501401b37993b78a74d794aa6a995177c81598a754f3add5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_ptr_record/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_ptr_record/meta/main.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6641fa60f8ba09c7ce762c2ef3514012995ac5302b4864a10e6aa0f1c83d512",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_ptr_record/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b2aba930fc2918934f7b9bdff17f9f99ab65167a6e9e41a0c9f45fffb31eaef0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_ptr_record/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_ptr_record/defaults/main.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "665288590cd4226da42511817f09ccdfc92df36c9621583cd2c82919af295c5a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_volume",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_volume/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_volume/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bc48f489deef2cff35e3b1fb618c6350ef36bf4b8f9848ef27787ff2992d7b9d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_volume/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7ba305d97432a37d4912f291b3f4fa4f76ab544dc9f2753910c35935dc3a3617",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_volume/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_volume/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8253d2c996e5fb60ecf54fcd9c13f8a15df898dd60f95c41aa2638bb34e0dfb4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-remove-attribute.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d4729b53544d44cd247ccbfd4ac49084f78ee131f0655a1fbc619435260d28fb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-xmlstring.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "549383c66266b2d68f2d7628218d51840b1b60b541573342829d746fbad12fad",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-add-element-implicitly.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb038fce25f676d3bcc65e14e2bffce9fd95e4b4cc5e2246c8b1b082fea4de78",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-remove-attribute-nochange.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5e725056571e6fa990faba7db1ccebf5a59339e8955918d313d7b403a067b6d8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-set-children-elements-unicode.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9cb1a66a018483c6e2322289b4163c27218c855d9ef0b21e4bbbabf30e26d24b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-set-attribute-value.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6007c1f7b9aa5870b75f907b6a9eccfaf03ecbef4c5a2db44e5546b605af424",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-remove-element.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4565eb61ac841665c8415c3124a463ff293654f13cdc955dd640f38c04542239",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-add-children-with-attributes.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "781635e92c2a19896c245321ceb73a5e9af8ba83af57df5672a14abbe70b6dae",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-add-children-insertbefore.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c279acbc5f3930a7d1f415ac8a991312423bcd30c4d05ae77ab36e8029ca44e3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-children-elements-xml.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "39eb7ffa27d7344fd082558437a6280cb442d30718d1a897348906a140f45206",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-set-element-value-unicode.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1772744c96a7e6358c1d429a9eb041cbddb593f73f0af7666481d09b66e4d1cc",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-add-children-insertafter.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ce723879925e16828f1bef9deb4e91f3954b88aa1abd9628b1f1a15334536948",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-add-children-elements-unicode.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "044a4820d4e8503a6529266fe7b56d0f72f07bcaf3e8450a159940bc205ea3c2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-add-children-elements.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e24adeba1e63027aa1048be7448fa154c207991a885ce4b10329daad7f19f62f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0f28e5364fca3e9ccd551f07dc7cd0b3cf352e456ab0823651fc6280ac0c9344",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-remove-namespaced-element-nochange.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "965761539455f8a58c2f003dcdf644ec90b8856475ed77ac48d601881f98b14e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-set-element-value.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "86c137512feed20685301431ba09167f25ab6af37abe7dd211566bd7397bc266",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-remove-namespaced-attribute.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "228f29657ae521f4635d23758c67f219966b966ab08fd528756c299e8e63aee0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-remove-element-nochange.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bf10b314376606565ca6fbc2f24b22687b612f3b387a75201e850c3616751159",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-set-children-elements-level.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c558e28321494ce16d3244551d070886c0971fd8d3da04b45b12b23ad882cc0f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-set-attribute-value-unicode.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "35ecb6ffc66a6ff44e68b98dce9042f53d897355f76124b6f0037d108183ec1f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-add-children-with-attributes-unicode.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc65f04f8c88dba73623d55bdc80c12bc3bda1e555464eedadb3db99d0192a7f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-pretty-print-only.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b76ef80fa21975d332799f74443a72ead11f7eefa7629a11acb6085afc6a6c5a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-count-unicode.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "30fb55ac6cef1a5ffc09a9925fd7c9dc178094ae174dd9cce199c0fe0ebf54cb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-set-namespaced-element-value.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ab0fde4f8d8f8ffe8d60d19109e3afe58d6344871b6a4921f6b9eaa5ded9f906",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-add-namespaced-children-elements.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fdf0e7935fb042cf28a51eec0ceb9b3f0f5eea1036ace37318ffade22c12ec8a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-mutually-exclusive-attributes.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "565f31a4b21becda45fed23f4912a5417e178d223846d0840aa9e619cc879fa0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-set-children-elements.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8988cab1a96aa47f1dff7f06119afcd44d0ce3d898abdbc1572e73ec8ca301db",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-set-element-value-empty.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b45bf3b0948127449113c91a9f890cba215cd4d2c92af4668ef963df5daf6770",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-pretty-print.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ae2efb8440053d74212fb1068c6bc2170fd148fe0655961a73147df4d2d0c00e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-count.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "926f16373f57f274c8f848f6d0f42ceb76763fb09a20f5445ee8c47f203e4083",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-get-element-content.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "703774aac6461616fb0b1b3f196cdbd6d654f9e0c966c4e62a1953471b00075c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-get-element-content-unicode.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ce772f224f15a05cbfed6555991e0eee15c009998d4ac776f0a2f00a0718f423",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-set-namespaced-attribute-value.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e5da7db351704774c660255bf4d0105f7a4507e1461c9027796566365ade6ae4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-set-namespaced-children-elements.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fd528082c7a46fc92d68ed6994676de668c3ab6bf901ce0583a8442d5d11699b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-remove-namespaced-element.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7d4f9cf197487ac19fd9798e97385147c97e372531f2af1013d9702359b554f2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-add-children-from-groupvars.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8e403493c2b6b13ad2dadab66f4dd63afea50b1738c06e21fe674a634002a755",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-remove-namespaced-attribute-nochange.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c7b7a197cbc38ac9e8101d06ee27a7cc14ab64feafe74f3b7a1faf65ef267208",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ae816995bad20b4c09762e88d447ddf2793e1df8f6d61c0fdc03a7432a06e75a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-add-element-implicitly.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2f51c7ddee9d1cd6e1bd7ab58dfca1bd58d56f1a27bd3bdecc49428a6a58778a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-set-children-elements.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e70378485034552978d9bd0b525729717e75c33c73e947f4c02779f6ca8a9fa0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-add-children-insertbefore.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "54632a063875c7558eddb674a785edd2ae3d6360d0988912ae3ee3f50c6f0082",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-remove-namespaced-element.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e2571bd28e49e82ce1b494fd2162f89bb82947615a9e775a1f7399df435f3e19",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-add-namespaced-children-elements.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e50992baa59f7a611e4ef08211dce8847618ecbd0b786fc01a17b41330405200",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-set-children-elements-level.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "91ad02bae7452891174b2178d2f6556e0dfc07a5e8f491d98f0e11efece9b1ca",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-add-children-elements.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6d479f1699c9dfed26820f218e0642689c9a7b75f9df8a49d22158ec117f0a43",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-set-element-value.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9d68db62d1a5fbad0082338ef7f7743ff4c60e1ba452d507496a0df95980060b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-set-attribute-value-unicode.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ae94b872105fd18f6cee5e0cf4b9b0527757a6da94e52638f2e492dea88f2034",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-remove-element.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ecc4e61ff85be0098aada5efc9e3c0d8424c98baff4f8901d991ae08c08416f2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-remove-namespaced-attribute.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "77a5e85cecebfe2a8fc432d8bbae1aee6b313956f3f2c12383310ad151b6fcb6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-set-children-elements-unicode.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "600316e3ef64bba85725621be258991fad9477b00b25422461aa59e703363291",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-add-children-with-attributes-unicode.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ff7167bef3d711a8dec1572ed1128746ea63cc69ba51257bf59b56f00113846b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-set-element-value-empty.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b492c8b81778ca3793f788cdc189c07f9170e4b75e63b49f09768611b2953868",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-add-children-insertafter.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f12f8469913b495e3138ad3207c4a228eb99c584b016021afff0ebd565033e36",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-add-children-with-attributes.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "adc4d6df33b14a6b2dcbf9e2df9ee41c12f31f2690f7b198b7ee810ec29329c1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-pretty-print.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "824b2a646c1c901e70bccfb7e1ee63721c9e8cee7994133dd166178d53e67065",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-add-children-from-groupvars.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "463714365dadbb9920967fa83c913702ffa1e5488e86624456970b4ab8928b9f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-pretty-print-only.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "36e6ffd3c5397494980ebfe9771ba624b7d920e3ce3d7bb843f364675fbcddb3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-set-namespaced-attribute-value.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a06edcd61c987b3076806449dc923591867f6a38b059ee1e9582a165f7f6fec8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-remove-attribute.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7e38225db2b6f8a1c485a328ad08a8b0c24ca3b017dfa68366cd609991b9104f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-set-element-value-unicode.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aec28ed22238a57708d1c7e4b7416e3fd64b91d7ea679df3d511d0ff4d78d794",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-add-children-elements-unicode.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "18833a8f2893edcb2ae267ed7f0580d06475e7da070c4eecabe16435fd98b0e8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-set-attribute-value.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c25c127ada5a292507a55c36919bc801edac4bcd6f85807727e1cd76e1e5cb4a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-set-namespaced-element-value.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e5e5f1d03f86bec1b16af94fea829c2303a1fe38050cd7453de87290e7b2d0dd",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b6e6b3eab89eec157e047b733c9e9c8b2ae7ec87e514ef9057018fee6fca9ba2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1dfa0429b7bec0c9e705c2b649cd7a1c5a3a9301984b6b61ebb93a932acb0b60",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/fixtures",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/fixtures/ansible-xml-beers-unicode.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b88157804ecb91179f87676a83ba7980af70efe935b17d39c18d05c298f57cf5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/fixtures/ansible-xml-beers.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c66414fe6d4b64014dbf96c994c07cd97b38e03e6f476cc0d9f0ef27ddc96df2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/fixtures/ansible-xml-namespaced-beers.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63dbf18a99b1f1eb359e912bea594f9d2450438068213158c145d3c815c9f0dc",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/npm",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/npm/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/npm/tasks/setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "092f114b6153fc381d5d630030693b486302846126e1816a85c29126d5f30711",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/npm/tasks/run.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5e9aa1aea361cf01e7a82febb1619d20f1e3c407e7c47f8665f4871afebd0324",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/npm/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5e465cc2f351b4760084491e0e809989bbd30abd6fdaf0a2db7ecb3c08d7aa54",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/npm/tasks/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "289c6ae0bcdf0fcb51f032f32d7fdc531235de45135653eabd0c42f5f504b417",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/npm/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/npm/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9d6545c9d579e8dba7ed7f0752d1a320f877000129a7e4a1c392c3cf5ff594a6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/npm/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6513e6ddce12ed2e95878f953386ea898ad88bec8085158c8a422d78a03a4a5c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_lxc",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_lxc/runme.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ccdab97fd6569dbe495460cfec0b901006efa79ec4c6d066a98250901b0c9d0e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_lxc/test_connection.inventory",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "840bf39cd3675cc46dd72ede6b17ecb9383f97e085b0c194dc33b841213aa886",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_lxc/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_json_query",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_json_query/runme.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "584d2eae00a6663043e7836bb071f11109eba5a06a83f58de8d8b50c501da163",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_json_query/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_json_query/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "60214d35e8f2e5c325430cd80c635e9d690698824f6f957644bc8e237e1f9638",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_json_query/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fae4c68ed62f686121e77073f3d43160d927a876417feadfa3be71b063ce9fda",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_json_query/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_json_query/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "00cfbed7e5d4044baaba437bc0d2ba800f28300042c01c53047d8a4894078eef",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_json_query/runme.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "040ec78afb260ec34ebf5cb4b62cd2203fe9e579f33029d450a92c2f004d3545",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_ext",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_ext/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_ext/tasks/postgresql_ext_version_opt.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6109411f740064ab34f8658eedb93865183bdc5b561ac184e3d6ea7c8af4ab6c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_ext/tasks/postgresql_ext_initial.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d95163444521b730b60aa1b1113c51a350afb9ba594cc6a9e319a8d298606f2a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_ext/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "69406a13751afa1a236adfa8a4f6d029018ee42986ab25676885bb54a0d5adc5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_ext/tasks/postgresql_ext_session_role.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c598bdac114d3caf6d677960202338b5d28fd7b4597d346d72fc543c8e125fbc",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_ext/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_ext/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "62bf42b117b74dcc39491f0501575a090971dd200281a3d7b1323601f8429365",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_ext/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "16082bc73fe6e626012830dcf7ba97d4a959ce48c7a24150ed7f8d573750e140",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_ext/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_ext/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0825fa293c525220ac6e3f52f1fbd7464a1ea91281eda9fb3dc3d614ba77f815",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_group_variable",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_group_variable/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_group_variable/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5159fabd2f8230f1abdd58aa0d0e7f2db4d2bd3cf66e47617751ea85bd2009e2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_group_variable/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_server_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_server_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_server_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c80257da29e1d730786a3377fd78936022c5e532bca726dc8c4110c0eb935428",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_server_info/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_server_info/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cb3f91d54eee30e53e35b2b99905f70f169ed549fd78909d3dac2defc9ed8d3b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_server_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/library",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/library/current_container_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "55d65193a4257af586fd5a7a26d6066b7714046a473362c2ffb6aa66e97a1c40",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/tasks/RedHat-7.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6b108947baad9d785edec9c70ad077c5c2be85d231fd2a85f87f891d620ddbdd",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/tasks/Debian.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6c53aba35a990530c5881348a390cfc1710d556884de88b5e9af55c60be39bc2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/tasks/Fedora.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "68d72b7da7948423718b7f99ffd1fe8929bbe00de93feaa5b1a11b73cf0a42ca",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b9ac4a47ac2e0480efa80a0503f1de8bfd8d9e451245eafc234418d9c65a5bc1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/tasks/RedHat-8.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b65172fbb3eb0e0ad28c2f4a48dc2e3a9809c15f190088ed6949b3d83411e633",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/tasks/Suse.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e41822094a550a20c4e37b58d7716e2119a788c6a5005c727d3f2cf4d22d9ed8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/handlers",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/handlers/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5ebde042b81fed14a9bf1a133f42e3ba5167e0d7adb521300bac2dcd21bfb7a9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "edd8a1449dca1cf2ff7cade76d428a198a21d20a8b7b989fc4fe8ce3b0a93f3e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e00b460f6ad8bd7ddc6ff35a7c4037c79748a7e5dbbe4dcbb23fceb11245d0e3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/vars/RedHat-7.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7d4dc3f696eb79c17feeec8852558122871ad9032d35cc2aaa23ec96695657d7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/vars/Debian.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "54995627d290883d96453180079b17e71a5edbc58854042d358a503591603830",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/vars/Fedora.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5a53c17beba7d742f4a9ba1631ac4ca1c3c19583e708630054895f6f4f4a1d86",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/vars/Ubuntu-14.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bd2efe82406f44a033bec20a84a7e551add444ad880ea69980b90ceddc67bb3d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/vars/default.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3c0c4fe950c7c1a4bccd60f47ad339c0083f5d0a5c0d87fa6325d0fe93a132a0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/vars/RedHat-8.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dc6f67b78242bc995e9c4442a84d78086e24d13a4c5f2084f16e520dc52fb71b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/vars/Suse.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a41f0df0b9e053484f77627a2b303b5697df12a0d434c1af7e687d1905d64cc6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc8d80b5bb41ca0ddc3aad2597424cc11505d8f761c1a4c1da0ef12155c84f31",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_extract",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_extract/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_extract/files/test.iso",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9c5a1719603516790e3a007e17f28e28fca7eb5ec8d6205692e906d4239fe068",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_extract/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_extract/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "97c9fb677d6c99d16680155a609e67dbc730a5edb034205acf3f6d601408b356",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_extract/tasks/tests.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "788cb943246fe446a5b54f03e69b97815d6756bdb4fdde1edb41790dc4119e06",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_extract/tasks/prepare.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "32eb401c33a73c16c143d35ce041f2706f5ce740ffad964e3f3cba7b9fb977b5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_extract/tasks/7zip.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "975b4509c955dc0f879211315f91331e092fa2a713dcfdcf2256454d1dfbbaac",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_extract/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_extract/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ff424098ce66d25b00d403da41318c82f8aa5078bc52d4cdf0d9507c654ec141",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_extract/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6af019b386e91ae5e5b11ae3e5ffbbcecc2ddb79271c4573a9a0d010bd28801c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_extract/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_extract/vars/default.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_extract/vars/RedHat.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b294d90c2772adc4f7a238290888129b12021d8b6d23b97589f81e72befac2a1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_extract/vars/FreeBSD.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6a7cb2dcfc68f900b0a830283aa6117f5390a63f7361f0de0aac5d2bb5b5e96b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_extract/vars/Ubuntu.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "26272cf027dc30fcd95df70e0b3aa3656228b0a3285e48aae289bb649dc4dc23",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_extract/vars/Suse.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ff54674472190ab490cccbe6e71e3226bc5640b41776bc8453592e587da5cd13",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pids",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pids/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pids/files/obtainpid.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ae4a9adefeed72dabcfc60eef1cfae673b7f0af5a25a4f0df685e431175e0b24",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pids/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pids/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8b40d710487531145ca5848868a5985ec79d5015c266603c733143d1209dabbf",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pids/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8a5657d4075cac91b7e2fc1ffaaa04d37136cc64e51c75c2d261147519bb12d6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_zone",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_zone/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_zone/tasks/nios_zone_idempotence.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "22e69c13af0623593d880c24015f6b8990c8d3d8f03bc887d10dca8972b76d0c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_zone/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "45952de5d7315102143da8ac0862d42822614c6633602832a1536a9e9d7fbe84",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_zone/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_zone/meta/main.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9b2c007ef16bd19c23982f22d955c1cebe881731f8e06615c9a8fb2985604cc4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_zone/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b2aba930fc2918934f7b9bdff17f9f99ab65167a6e9e41a0c9f45fffb31eaef0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_zone/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_zone/defaults/main.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3cb79c3d80cffc120afea8cc796848a327b790dbe8b6702bd51c1e2a5d82d906",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_passwordstore",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_passwordstore/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_passwordstore/templates/security-privacy.repo.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "00b89accfb1e13ae32acd82783723e64793a7d4870461b042fecdf80d9dfab0c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_passwordstore/templates/input",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9b5f9a20d102714792f1cc5d2eb6b87ae0379b2ce632d3ea1bd983b13a2d819f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_passwordstore/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_passwordstore/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b3735d01e792324eb746207de73f90bd1bd83ee3aeda65e6d733d270f86c5ffc",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_passwordstore/tasks/tests.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d6a7ca46ab6550ef486d9cf5d5c0ae30a864195b65c94709a43348217bd829a6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_passwordstore/tasks/package.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e4bdb24595075fcad9c9f2340e03bbcb9906cc12ef1429178df485b48623ac4a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_passwordstore/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_passwordstore/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ae816995bad20b4c09762e88d447ddf2793e1df8f6d61c0fdc03a7432a06e75a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_passwordstore/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "218b7eb5fb882bf688595e00cbf1067e840558f47c423a124aee94abb0bd9af2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_passwordstore/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_passwordstore/vars/Debian.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a867b420471083523fa4867d401e2709b60258caf7ba1a5069049c809158277e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_passwordstore/vars/Fedora.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a867b420471083523fa4867d401e2709b60258caf7ba1a5069049c809158277e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_passwordstore/vars/default.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_passwordstore/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "29b468e6a619569af108d581b1fc0099511cea7bfeacd2e771817abfcc17db83",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_passwordstore/vars/FreeBSD.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f341735e70e95d1e7b23ea160010b148bef2e38f2e58daf9fbf1131318763fc6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_network_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_network_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_network_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6459cefa9faaad2e3f255e58ed076406e1e8e11a3dec495943e5fba52ee8b737",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_network_info/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_network_info/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ef2f7bd17ab2ac7019d8677e50835d412400e2f7a47a5dd2efe790be2ede49e1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_network_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fc2660c638db75d33c27be35b4d6186af2061df629d01d9d047e7a7020448a44",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gem",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gem/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gem/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "709ca262a5f562c0315a8c0b2b76967096df58030ec9359d41976b315a98b14a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gem/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gem/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7177bfd97afb15fbaebbf2cafcb0db3e2b4728c2dbd0c7bf77eda8404c3464a9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gem/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "de4b021a7ffae564d5e878e8a94a4121b367cb3b51a9b0dfe2b08943cdafc492",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gem/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gem/vars/default.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "37a9c91c779fc3095ab2e480b350e360bb26988d8e0bd3b8f546ce8b538b0f8e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gem/vars/RedHat.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e539831c03045b807895fbf0f8da5f13e1d3a6a9aed78d1f3946a0fdf3678359",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gem/vars/FreeBSD.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1bfdff73d0b467666b98caf7ca46c8ae4d1f4d5a21c08d83484718ff3a768a9b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/yarn",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/yarn/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/yarn/templates/package.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "664c8935b09b77dee0b4068cd201aa26923567264026632473acaec6f90fc4b9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/yarn/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/yarn/tasks/run.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bd237ed30ea0c995824cfde4889d692dc5522fbb5fdbf2877fcbcbf2aa085dec",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/yarn/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b0defd354f78ffb87ec9cfc028722a766ae925a8aa9a968dc77fd46bba9e9092",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/yarn/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/yarn/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9d6545c9d579e8dba7ed7f0752d1a320f877000129a7e4a1c392c3cf5ff594a6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/yarn/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4e99bd95e5ccbbc3157a4036b5a91bd46bb22457eca85b713b441cb0d4f0b9e5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_security_group_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_security_group_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_security_group_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c3bcbd121e3abea9835e7f51e81b2da5a98e346a3d80e1850726ea23910201b2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_security_group_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7ba305d97432a37d4912f291b3f4fa4f76ab544dc9f2753910c35935dc3a3617",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_host_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_host_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_host_info/tasks/test_host_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "69d43f186ad9eb8eebd52cd95f28fa4f660822fb68338c6c6a5637bee4570890",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_host_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "af0a23f6387c3d8a125b243420f2030eaba632664c005a3999eb8fb029e39cb8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_host_info/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_host_info/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ef2f7bd17ab2ac7019d8677e50835d412400e2f7a47a5dd2efe790be2ede49e1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_host_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fc2660c638db75d33c27be35b4d6186af2061df629d01d9d047e7a7020448a44",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_db",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_db/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_db/tasks/state_dump_restore.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "977d4f865cde7124f68c3c97f31776d15d86897a843c7fe9e815134e2852babb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_db/tasks/postgresql_db_general.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "061558b412eb0734d680cc4498c45dacb89222065c292631fe4f9351d9f74eca",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_db/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "61aeb2ebcd4439d4c8f2f150ffabf461df17e0da5760297e07790b59f6911506",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_db/tasks/postgresql_db_initial.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7ae81d2773a21629eda0387590cfc79f41514fe513f8f16c80019a66f72da26c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_db/tasks/postgresql_db_session_role.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "41224534c3fee82c517472b1ff3af82528eb5f67122e2a4de23a1e2a1ec2d73a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_db/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_db/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_db/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "99e8d0c257408d7118d8eec341741a8363480ce4ec9552343a9b7d67a8d0564a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_db/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_db/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "11d903913550d93aaffcda9458d70349ce6e703cf071e922c8124dc24b7f9cdb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_lb",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_lb/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_lb/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "818c3c4aa67f6b54b1258d8594b60453f3383570316896f970fae6f5aee19222",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_lb/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7ba305d97432a37d4912f291b3f4fa4f76ab544dc9f2753910c35935dc3a3617",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_lb/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_lb/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "64a63db5e3fcdd9491ae50fb45f1e01dbcbf5e8f5d52a89df9ff132226234f63",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_user_data",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_user_data/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_user_data/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c1e1f3fe109406b19b4c82eaec06c7fdeabc3e3e627eff6961859dd8d6f28366",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_user_data/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7ba305d97432a37d4912f291b3f4fa4f76ab544dc9f2753910c35935dc3a3617",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_user_data/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_user_data/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6cc6d53c9dad7749fa6cbac4a8201d0d26355ad194e184c5148a22d048d2e0e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_info/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_info/templates/stack_compose_base.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7cdba12ae8999f6f7e44b65aaaa4ffa24221fa957b928eace52f10e37eab1ba6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_info/templates/stack_compose_overrides.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "16b3c31f92970815706d4d206e0a98cce29e28aedfdec17f803fe8a789462a09",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_info/tasks/test_stack_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "76cdd5b6e08bc25eeb204177a2d98bf0918696f8a78c580d923c1cbbe74b5467",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2061a2bff39efefe2e0761bd95b22bc84e389e2cb6b99b06f14ac4d6ef25af9b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_info/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_info/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ef2f7bd17ab2ac7019d8677e50835d412400e2f7a47a5dd2efe790be2ede49e1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0e19ffa2de17cc76b3329a4998bac08edbea141cafc780cf034c974f539e54bd",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_info/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_info/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1bb9483bbe3c899533e31d8794251fa0eccf41ced205e3169f44ded2b10c7a00",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_cartesian",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_cartesian/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_cartesian/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bb854c1b495c50987895fd8a267dffe50c4521cf73d6018df77d511c915930a6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_cartesian/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a448d01a35a1c112f7d370e8a29da2e5d960c967c9c80f2322450041aca81da7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_project",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_project/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_project/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "80908756e59e11150798e8c3f44acc549b246190079b95c5bb55e467cb3a1fc1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_project/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c8d60d4380e3eb4babd9d5866b3c3c7cc11cc441302eef5398edf8b12d2cf16a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_project/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_project/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bdca29ef497b21e9bfbb51f911df8c1cc13a3010965f5349d4cc358b1688aff1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/one_host",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/one_host/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/one_host/files/testhost",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/one_host/files/testhost/tmp",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/one_host/files/testhost/tmp/opennebula-fixtures.json.gz",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "18dc59bac231e749997e1a5243db69081f63b757fd43c37de017e20d58d010d6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/one_host/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/one_host/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "057b584d376d5f131aaac750213894882168a3efa6a94afca9156e6fadbc66fb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/one_host/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/one_host/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3b981db212298d31645895377cac39ff68ed0d739270f19443250438ca66c47a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/one_host/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "768ebe421dd904b7142848f4bd6c9defa722632d9d4fdddb4a489c8ed755b825",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pagerduty_user",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pagerduty_user/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pagerduty_user/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5feee27e45ba9e467becb074e8789133b40238558c0d0cd4dcc85f50e96017ba",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pagerduty_user/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pagerduty_user/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pagerduty_user/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "69321ab157a3edaa675ba5be65214e59d93f82f7db0cf5027c44e157576b7130",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_group",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_group/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_group/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "53a7b04dc173262bbc190da863e254a13484bbd9d8874e320bed48ee26c891d9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_group/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c8d60d4380e3eb4babd9d5866b3c3c7cc11cc441302eef5398edf8b12d2cf16a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_group/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_group/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4b4c1cfacea788e6b9ce76d8b4d1d3c0bacef30ba57e5af64624c72163c12509",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_replication",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_replication/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_replication/templates/replica_postgresql.conf.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2a70a2f2e5beaa52cefcc93461ca067ef2b665b859a468cbe74d27464253bc6e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_replication/templates/master_postgresql.conf.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f4fedb50a382f0153f37bb273808a4df282bffd4b35410dac27a4b1a6c961624",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_replication/templates/pg_hba.conf.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9b3e39d80a8a59947b5fba38e8db942a1933ffefcef368cd13a5594fc2f65668",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_replication/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_replication/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6d8abd7fe65aa2234a28d02124d7b660f17796e64fe4e6833b2031487c9e46a7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_replication/tasks/setup_postgresql_cluster.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8e6986d91d5fb93bae8843f7df3d4b39db79181fa17c0e38118e8052835dc2c2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_replication/handlers",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_replication/handlers/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8a04df3a152ce62fb42770e18089f0ed3802d618ae9cad8b8b57274e8875b4cd",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_replication/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_replication/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3285130635bbc661c3a3e195cce90ca914de220a86e94b6b423dde6a9ae909f3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/launchd",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/launchd/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/launchd/templates/launchd.test.service.plist.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3b2aa7254c3c493666745f80868f5eed3ea63089ded01c1d45a97a84338c8585",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/launchd/templates/modified.launchd.test.service.plist.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ab2ed9efd43886160eb67c8a7d31b949c203616466c2a6762604ff2074292406",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/launchd/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/launchd/files/ansible_test_service.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d10b0fcb1c6ec6b0e5250821e1ee5a2782e34cad5327b37660eb457400b455bb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/launchd/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/launchd/tasks/setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "df6d13c8c4fa9d93a8d958e577c8e92b7789ea31806835c15866047bddf1f82b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/launchd/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "785e14037edd887fc59a7cb2bf4d384dc6b67d54eb2a3683a133056b6efa3bd1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/launchd/tasks/teardown.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d982202029c12820d4d29bd3d35d5568f7370d0d8fe292e81e2779da393c8af9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/launchd/tasks/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/launchd/tasks/tests/test_runatload.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8299cde6cf41dfe095828bf838e72b4f96743f5fd6d4a06690f99b350f29fc61",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/launchd/tasks/tests/test_unload.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "98aff4473c41233d041f404c30be7db975f6b552acb22a9f2cfdee5c139f199e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/launchd/tasks/tests/test_reload.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b20f13d5ee7a2aa15015a8155b3529d0b1a2cebb1d49dd5d7465bb37874c4188",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/launchd/tasks/tests/test_start_stop.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "523ce654be36680e551275021267b27a14956266b06feaad9755f2b0609971c9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/launchd/tasks/tests/test_restart.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0c1d201bbda5261054ea2665e5d2e6f31a61c07529a839a14958ad71d58a59d2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/launchd/tasks/tests/test_unknown.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cd41fbcd89bce7da78fc8f6a1852e489a529c82531d06b6fefcc62ec58b55db7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/launchd/tasks/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "13c2574fda72b4a025e49f7eb748cd9242765bd62b9572d35da396a74705e05e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/launchd/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/launchd/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ab43083ade2589524118bf2f9f31935cb691a2a73ddc7e5b781f344822f0555d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/launchd/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "75ad1ca2f920de9b6df6fecc06134026eb2f9e59cd559c3cdb214e88c93f03d1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/launchd/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/launchd/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7568f2a55857df4d6b83e7be5dd251131e4e092f6be4e74e479a85280ff9a1ff",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_wildfly_server",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_wildfly_server/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_wildfly_server/templates/wildfly.service.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6199adb74eafdedff83e41c4377b4c778d1c10773461f479c3b63eb2de90014e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_wildfly_server/templates/launch.sh.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1aaff5c06ef04fcbcd51df947fd85c94dede66e35d188166a03678720ba6bc56",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_wildfly_server/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_wildfly_server/files/wildfly.conf",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f8c07bbd656b9d303974d536af56b75593c9b831d17ca17ba7af2c14502b7be2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_wildfly_server/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_wildfly_server/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6e26de7ad0b193acfcc863b4342855ec844466c84d864c21da7aa05c0d00cfd7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_wildfly_server/handlers",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_wildfly_server/handlers/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cfb6a9b45a8f36d652d845b282219a344b7a53c7474b27533e7231a1c736dca7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_wildfly_server/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_wildfly_server/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "847bb6c4dae501f75ec017de8302d70c08bf23548a82058650b1fbd1180cd218",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_wildfly_server/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_wildfly_server/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "10a80fb6bf47c167c0d3546ec8b477a32c8d9a92767d62d3c1d0a77132838e42",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nomad",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nomad/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nomad/files/job.hcl",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "37e149184dfb3c56a86af62362de71f887b6e3d75b9bb8ffab07bbc4dd8aa2af",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nomad/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nomad/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "44a6ed5db359693347aadb92263165415a4313f604a0c9559e0124c030cbbf4d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nomad/tasks/nomad_job.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c3c067f9a1599dcc8ae567507fd85091739acd42c409712cc7ff80261f8778a5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nomad/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nomad/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "485aac70051232a57159d0d734a3ef95088ce20dd3911b7f6f604eb9ea56357c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nomad/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "11923455942cc5d6bf1e89cfec52d38ce22ed832752a317d9906562d6986b98b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_subnet",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_subnet/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_subnet/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "952039d60f73e9747ef9c7f9e54a2c920d800643948df3a468b32ee7955000e3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_subnet/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/osx_defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/osx_defaults/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/osx_defaults/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f5199983dc3924b34cadcc6d04ea5197f9185191c3c4948330008b742cb59e20",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/osx_defaults/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "008395b49feeb25967b9261639f4bac0ce08f7e766019bd16bb5566c2f2035f7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/redis_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/redis_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/redis_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5ed4cc92cbba18869ddfc35744e79d86b6004770bb870db8622c3ae516cf8fd0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/redis_info/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/redis_info/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7875cb86e0e75d552d4840ecc13a97c949eb6daaea319fd2f1301aa88701f593",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/redis_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "23e223be5470925ec403c01861adf6a9ce98b6627243d6ad2f58385fb4d694f4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/redis_info/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/redis_info/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6275964c2a579ce7b20bedb38d4699b4c8fe6f353a64a0bacd57377ef3992fff",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_security_group_rule",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_security_group_rule/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_security_group_rule/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1e2cbb083babb3647ae0eb48848b4f2b27e69f56930046dd5f15ce4c7c99cac0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_security_group_rule/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7ba305d97432a37d4912f291b3f4fa4f76ab544dc9f2753910c35935dc3a3617",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_security_group_rule/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_security_group_rule/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "98a9538a4d2fc1f89399150252e84060730fb20d3e34d5eca3cc91b8fe4165d3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ff1e241dd69d370b0e2e74c5d693e13952b88368ff780fee159ada4a4adda27d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service_info/tasks/test_docker_swarm_service_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b29795a2716d68ba7f457b53a7b7ded35c109dd6e2cc6205a8f0114108eaeb71",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service_info/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service_info/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ef2f7bd17ab2ac7019d8677e50835d412400e2f7a47a5dd2efe790be2ede49e1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "95beee985a7146cbdc344e4756e60d1616ea28a341cdd4afe308f044ca832855",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_memstore_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_memstore_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_memstore_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c6cc39e44674cdd97f45755136cc976294e1df35be4128970367159ceaaee3c8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_memstore_info/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_memstore_info/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cb3f91d54eee30e53e35b2b99905f70f169ed549fd78909d3dac2defc9ed8d3b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_memstore_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iptables_state",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iptables_state/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iptables_state/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3688529100e39015e96632ce94deca3f35fa32c6dc96e4cecee76484b0e7ea2a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iptables_state/tasks/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iptables_state/tasks/tests/00-basic.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2cfe87661c422f8bec04b806d6e906353dc259d55e6a2b6a18c278a0d3e0d90d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iptables_state/tasks/tests/10-rollback.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0f5f17a3117bb19cf75aa1969b637271124d44e50a776c737e5b718fea131738",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iptables_state/tasks/tests/01-tables.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cedfd25751938db5690276e1832ab4f0f4eb88d92a7454fb9e0d9d2dda11b3d8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iptables_state/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iptables_state/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ae816995bad20b4c09762e88d447ddf2793e1df8f6d61c0fdc03a7432a06e75a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iptables_state/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "027f3ce0e20c1a03d73f749116a209d6246359a999bb2b2b285f744b41fd6ea2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_volume_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_volume_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_volume_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "292b61a61eb9f906fe8341153f7608522fb698fb0509ecd5b3671e3d53de5789",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_volume_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7ba305d97432a37d4912f291b3f4fa4f76ab544dc9f2753910c35935dc3a3617",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/etcd3",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/etcd3/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/etcd3/tasks/run_tests.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efae7ddb7f13fdd93ed0502750a0efce2e042137676e429f22097d8ffbe6aeb4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/etcd3/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "595c566e59d35e9f088f6df364a86f831b79150e7d7dff03d947a0fc61d1e773",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/etcd3/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/etcd3/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b3065c9d434c22b1009b808c53c5221d9e2f8c201f58d4a71fff2db06cf72a27",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/etcd3/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c410d3272c7c8f07c20809aba1af5eacad70c842e5d15c825ca385ac455fd3a9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_slot",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_slot/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_slot/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5419b276f7c47acaf995267ce3acbb56a0922a4f715db0df27cf0e535cd0220d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_slot/tasks/postgresql_slot_initial.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3b8d53742b5e50fc42d5ae1b7de9d5090168ac4ecc5c6ce06b142a3cc6938eb5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_slot/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_slot/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_slot/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4f6ef28b33adbfc834e79e3d58688d036aaee0ea96c6acdcae1b0aaaecaa6e15",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image/templates/EtcHostsDockerfile",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6e75d2a307018b4c24faadb616f679f2873551369ded52ec838b12e43c8c0d0c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image/templates/StagedDockerfile",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f85dfa61ffd1cfd05bd5af9edf55326812150527156e1025a3edb5c72a0636f1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image/templates/Dockerfile",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "71bbe9d5a3d7096ac98dace9e355b7d30ed00939355fbc8febad75cce9c1c802",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image/templates/MyDockerfile",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ec99d784de7d6f04f25b7a5585a15df27bd7af4127cce9efdd629c0980b1dffe",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e26e03bbf9fa23fc684d7ae5e9ee90a0fd081c2cadf5ed666c039c4dfe25e90b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image/tasks/run-test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d67bb6a11ed135170fd45e3bdeabd04bf7eeb3c1e10f6830b65b82e16ff0820f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image/tasks/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image/tasks/tests/docker_image.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a78280245492cfaf6b556f0e299cdb853680528421a3fde3f9b55813956d1f2d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image/tasks/tests/options.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "759c49445307e4668596af68c23738a1d7899ab8e2356d7df80b8ed858272525",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image/tasks/tests/old-options.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d64f288dcbeb5695c7d271f1e6f41eb9389964c858c429845ee8d89f4fb42788",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image/tasks/tests/basic.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "21eb803420b89ead65f5cfbcf71b72d8265e2baeabc4843bc745ebfd904613ce",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image/tasks/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5ff27f27589585c9348b29d8e6d10f54cccdee1a30c26f8bc793ed7f525f493b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "effcd9450b3f039a7c09bf7f5cdf6a296456d9b0b649ed5e4bb3eab045da0081",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e85b416aef41bf246e36c7aeb4f107e9a9d3157a1e90e50bf65a5922f1a7e737",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/archive",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/archive/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/archive/files/bar.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "08bd2d247cc7aa38b8c4b7fd20ee7edad0b593c3debce92f595c9d016da40bae",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/archive/files/empty.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/archive/files/foo.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b6a5ff9795209b3d64cb5c04d574515413f9fec7abde49d66b44de90d1e0db14",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/archive/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/archive/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "692baabab175d2278fa6de48453e9ed816524a85caccc1f98d4726e9d9ae6453",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/archive/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/archive/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7177bfd97afb15fbaebbf2cafcb0db3e2b4728c2dbd0c7bf77eda8404c3464a9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/archive/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ae4314e8c2218e062afb7b60986203d626ddc8a1320a44de81b7fc25976c2405",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_lxd",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_lxd/runme.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ccdab97fd6569dbe495460cfec0b901006efa79ec4c6d066a98250901b0c9d0e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_lxd/test_connection.inventory",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "44f89257fbaf385b7b113c10a6e47387221ff1a6a851bcf322dfeb55563a3be6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_lxd/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ec1a8d284cdd3ebfbde0cfecc54f6852263cd47f652c6b1a7bfc1d874fdb6c18",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_organization_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_organization_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_organization_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cf1e66b4ef6c0a2a7d97b312278fad5d954953dbb878a4574b79706fee898aa1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_organization_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7ba305d97432a37d4912f291b3f4fa4f76ab544dc9f2753910c35935dc3a3617",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_port",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_port/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_port/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "138eb286938566790257801685298cd92ffe8bd57c6f2600b053b2306d5abec2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_port/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_server_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_server_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_server_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bd1f9b47cdd018934487d38089de553fb3b43ee71400906964004afd774aae2e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_server_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7ba305d97432a37d4912f291b3f4fa4f76ab544dc9f2753910c35935dc3a3617",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_table",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_table/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_table/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "472e36bde52e14d2f401739cdbcaf09c945bafaec718ff016890616b6bc3c118",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_table/tasks/postgresql_table_initial.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d4c3198701c97bd1ce2daad5592e08b15ad9f0324f9c856ce16433c73b0e0406",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_table/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_table/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_table/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e02f69582e53e312f054435993ccff3d2a33346c179d77fc23f317d9e6eedebb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_hashi_vault",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_hashi_vault/playbooks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_hashi_vault/playbooks/test_lookup_hashi_vault.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6cc75a2bed695f94ea97cab46303c46ad84110d1aedb02fa2a4ecd63bef44b5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_hashi_vault/playbooks/install_dependencies.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0e523f976347a9395968278a67bbdb051747355f9825cdb86fe1690e33cee6f4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_hashi_vault/runme.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4702a4c867700b03a7b8a47e40a5b0913393946f2f515ba8bd987cc5ecd47566",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/templates/vault_config.hcl.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "101354fff64fd7eb51fd5c62e064e950e2d61ffdccb042f026c677d5dc677edc",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/files/token_invalid.jwt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9292b2ff2f4a057d4c12de49e429ee861bb181c1ceae52726b1125acbae5c22e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/files/jwt_private.pem",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d9adb929b5968b9305a591628a4ed2cb98d2aa4209f2da2d3a44a6fb46a17ba1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/files/token.jwt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "560cb7531d1b1033910d828804ef4d7adbd4cef834fcc3d42f79cfef1a5e030b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/files/jwt_public.pem",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "21d20458029194ee3697de1327a50741ca22801800d48b65d30cfab876b4ef0a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/approle_secret_id_less_setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "29414866822f202840f136bb87a3768237fcdcd395359ec241c565086cc0b14f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/approle_test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "50094b6e259a76aa3861bc03d275a7944027a770d68565c70acc4b6cdb26cad9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2c7bd568c67554fe1efd2ad7ef2606c35d2cf8abd967d4f4c5d66b0ab4028539",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/jwt_setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3646da7aec3b971010ae8cf451cdda058097a5868f1f69b5cd7cec3174695bf2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/approle_secret_id_less_test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f2c8f48ae8fd9eff14dee42b8fed4a4aa5353c50c588e0793d12411cbb3ea1dc",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/jwt_test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "924c454d1cf29eafb50c5c4003bac421d4fa0274ac75850f42d86042942fb65d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/tests.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c6453429add9ae5d5deab70f1f4ec152b00b15bcb85a9f17d7f6fe93b8d108a9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/token_test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4bf53a7077709b80c3d93f1b387595f596cbb2511f5340dc384a21808ea22b12",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/token_setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7b888cda70cad6b9915692f0deb8237fc51a162a7db6428cadf24b95ac215d87",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/approle_setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d17da24ab6ffba0b47f51e42c6fba8df9befd8c3c2bf1645b82e5e866e7608cf",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ae816995bad20b4c09762e88d447ddf2793e1df8f6d61c0fdc03a7432a06e75a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "28e88d16538fca25e082edcec14046a426c0c27a48969e3fdb5b10fc5d5f65e4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_hashi_vault/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "74699a83bed3394f06a3c012f3530d43443a81aa300cce8a8bd5bc4e3bb17e90",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lvg",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lvg/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lvg/tasks/setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aa736d1ebed4a320d02af6397a292c44f243acb4738dba4798ff8ea8920b4de7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lvg/tasks/test_indempotency.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "acda7d6e98f33b902a2f78d1389d57a6b268b0f1dd04b68e615603c358a6bab6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lvg/tasks/test_pvresize.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "10f5e2906836542bd73b121fcf6d93b69395e142f103c1abf754af5d5fcac44f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lvg/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "73c27a48b9413acda1ebeb7276920d77ed66258741e3f7c8b95424dda21bb5c7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lvg/tasks/test_grow_reduce.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "564cf97c44900d4e3c5a3148a900b4577927179cdf8732d6958cea9862c88ac6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lvg/tasks/teardown.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f9cd523d7c8218d7b4f296ec486d2d933d4c3e9051d4b25e6f1251bf70462e71",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lvg/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lvg/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "769fd1fe7bec847f2f097de2fc87ddc3062a542f24ea15a783fcea9e18b038f7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lvg/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9d74111e18f3d38965d19bda2998ddc9bfe25a34dbf5c26c2700c70c0f645453",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_influxdb",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_influxdb/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_influxdb/tasks/setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b0636fb6ff7220ecedf7d3b481e221726663d418caf6fe7e8f3f6b1acd30ce42",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_influxdb/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2fda0abb2d8bda4e8ca6ea30d7994b22a90871f7f7a3aeb7fbbc86c1d622fff5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filesystem",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filesystem/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filesystem/tasks/setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "883dc1cced1a773013adcf3ff4fe0b0c07fcaf4bef1a8f34f23abb295abd037a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filesystem/tasks/overwrite_another_fs.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1ac4a5a417babdce80e26d521e03d1dca75be5f8423e7a69478ca74e966796cf",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filesystem/tasks/create_device.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ccb7dfa2131a3668d80c8153ef8821f509c93ccfb29ccf2e90b83e45cb2747b2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filesystem/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "118e41a3a1b059cf38442277aaed2df60df40ef85b552ec56b2552acbd115dbf",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filesystem/tasks/create_fs.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "00201c187819fe86536b88db4e5c4bbe6b5754763ef3ca84ff705916756c3e73",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filesystem/tasks/remove_fs.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3931b6904e08dc9e152cf5a123efbd3b7f28df47a49c88ec67fd2e04ec022449",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filesystem/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filesystem/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "55a70f7ff94020af40ac26fb36a89e5669f6ceb19447e7017b271c85c0e7e25f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filesystem/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "89574b4b847b9f3856aa58220ab1df26bf11517abe25b683576fd1308102b3ac",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filesystem/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filesystem/vars/default.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efead6e1ff36b22afa53723f82f74b980f6ec2fcb899cb29be017b9caaaaaf79",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filesystem/vars/Ubuntu-14.04.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d6920cd18ed5a801036ffb67965b492631a80e36d9b800a3bc3ebe8712880c55",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filesystem/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filesystem/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a19bdcc9c978078e4987d56ab9de1d5f35af282821131f03c452b7eca155dd27",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_constraints",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_constraints/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_constraints/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b625673c8e7b2a5dced6117b01aec6f25452246117746bd23450dcf389a61883",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_constraints/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_constraints/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1e8d632f9db7209967c5b2f6d734bede09841acc7b898dafc19f31c72cee9929",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_constraints/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b34f7e77b6117dd66a49957c17656b22e5dfa444d8f93af2b4e1d7f1450a5a3d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_project_variable",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_project_variable/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_project_variable/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e9be9947125bd8c0636c94eb61e6108fb454372dc2babaed5e2b3a5bac32299a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_project_variable/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_node_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_node_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_node_info/tasks/test_node_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3c154d0c71f371be9e83cdae504c3584a1508ff642000e9e99b5e46dcab4680d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_node_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "005e4872497ab2f91e82e7e1cf8da7b80027aa2d791528486ee7c932cc939176",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_node_info/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_node_info/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ef2f7bd17ab2ac7019d8677e50835d412400e2f7a47a5dd2efe790be2ede49e1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_node_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "95beee985a7146cbdc344e4756e60d1616ea28a341cdd4afe308f044ca832855",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_mosquitto",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_mosquitto/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_mosquitto/files/mosquitto.conf",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6b90092ad37894754edbb4c4ab997e12f96f8a93b26ee58dd547cda7e1ae04a8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_mosquitto/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_mosquitto/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6384c76b03ae445b891167e13407ca4915637387a6a9bc6c23bd1d3d92baffae",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_mosquitto/tasks/ubuntu.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "538431cedb49bda0ca4126c7091f3a46cf282e50094e14ebcde08e17aa55236a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_mosquitto/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_mosquitto/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "018110439f4fa79f060ac894fe54567fd9c3eb410aedbdf0b4aaeee1ad5fd705",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d6ef035de264dc97206b79d51da4d5f4406196e024f46b0ee91cca44274882e1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_info/tasks/test_swarm_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2355f9d04e7057f8e9ae5ec535d2da6d58c4ac171d276a0c74fa9fb4b581a43f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_info/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_info/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ef2f7bd17ab2ac7019d8677e50835d412400e2f7a47a5dd2efe790be2ede49e1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0e19ffa2de17cc76b3329a4998bac08edbea141cafc780cf034c974f539e54bd",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/influxdb_user",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/influxdb_user/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/influxdb_user/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9e631aa1aca12eb260bbb34bd7f771b07a965d504095be4569a148f400ec2328",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/influxdb_user/tasks/tests.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "78af5771139e816025b05aa39eab7ca6db29d39336497ba8e2f5f1a262285635",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/influxdb_user/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/influxdb_user/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "158ad00ea1445a97e1c5b6b0e12365134587a090b899a5b01bd76beec2dd8e22",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/influxdb_user/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5dc95ef850dbe4e8cdbdcf90c74b390fcf1ad12cf3b15de1b037c5a06194fb28",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_opennebula",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_opennebula/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_opennebula/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0a4c50dabbd36c9c08ae74da3171fa2953eb30fa8b4ed2eb6ee31eddaf6938ea",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/callback_log_plays",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/callback_log_plays/runme.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ad703f7bee42e32b4bfdc9f79d91bb3e604d12319eed16bbe8c575c691c46290",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/callback_log_plays/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8a5657d4075cac91b7e2fc1ffaaa04d37136cc64e51c75c2d261147519bb12d6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/callback_log_plays/ping_log.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc5ca975d2e3a0c4289c613b8a1187f1bac1274cf1a96b13d0bf47bf2fb7443b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ipwcli_dns",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ipwcli_dns/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ipwcli_dns/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dc9f592affb4dffd770f50930869e51394e7907cf5352f17a48491153cedbbf0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ipwcli_dns/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "536a08c5b55f0ea5d5b58de3d90806e1341df3c8c1c568dc2494be42afb1e73f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/callback_yaml",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/callback_yaml/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/callback_yaml/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "437b346bf6c3dd5bc800cb5df5e71f2d9d03f332ed62c2de0e4a9e2dc9099496",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/callback_yaml/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7d128604afccb61be3dd2a49ccb142879c91ab4786e985688cf23aca7c019b16",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openssl",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openssl/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openssl/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1189f32404a39dde724ede58ccdbb9a826ba20ef480b404d809a47a2cf1aa856",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openssl/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openssl/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "edd8a1449dca1cf2ff7cade76d428a198a21d20a8b7b989fc4fe8ce3b0a93f3e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openssl/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openssl/vars/Debian.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fa80492e4c8d23f92614ee89113bd2cf99fd9b2c37d2a7e46cd83d2c0d367e0a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openssl/vars/RedHat.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c10c06105ed9bc9bd11bb2dbab4d5bf6b6033255eb8d3f6c5c8b87288afdbd95",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openssl/vars/FreeBSD.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ffdfec2d73131e68feafef09bb0f74ce03b2dd27508f5abf86c9c961e8e72f6e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openssl/vars/Suse.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "afb1ef54e7d65746fdae2ef0c0fa31b3a08b7b53250b735c59cb0907d82e6b49",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_sshkey",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_sshkey/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_sshkey/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6bdfb6b06c04764726c7a1ee3c97ac38476d2fe0d21de694a7f43d92ac48c20",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_sshkey/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7ba305d97432a37d4912f291b3f4fa4f76ab544dc9f2753910c35935dc3a3617",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_schema",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_schema/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_schema/tasks/postgresql_schema_session_role.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "da5a970704edf3a78de91acb2549a101cfadda8793c90c65723938c18036e3cb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_schema/tasks/postgresql_schema_initial.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4e71d17cbb99fbec0b80c86ec63447f1ffafe17baf914a44ab751b673145626e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_schema/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "01c1f7f0d634cabfe23bdbaac0559d84c25265bafeebf3ffba6175a6e90f837a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_schema/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_schema/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_schema/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4f6ef28b33adbfc834e79e3d58688d036aaee0ea96c6acdcae1b0aaaecaa6e15",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_schema/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_schema/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "db462dadd7106222bf4e2f1beab06e522172bd5ba2502cb702aaead6a06ac716",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pkgutil",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pkgutil/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pkgutil/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fa1a76125fc665b4623a4d8720fb03e512cf38d631a2d8593641410b8cf22626",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pkgutil/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "07e21b4fadf2da7cd49f0116e21341154d4ce15404a5defdf083b921516ee48e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_kubevirt",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_kubevirt/runme.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ded4b509bca10974610b60fac18c860e1e3e4509ee28179dbd027a21fc995c43",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_kubevirt/inventory_diff.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d7613eb330c526559fadb558f94d3dd89f5acb2d77caef47801ded1ba060a457",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_kubevirt/test.out",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "46bfa629dd84a66c7944bc00062d023c5130a385c9f0ec7e1670d13cdf2b1185",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_kubevirt/constraints.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e8b88db827901e5ef567c9fc5cc4e86bfd9a7b2a0aff3f2f63d95dd4f84392b8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_kubevirt/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e5a8866bb35edfbfde89676bc1698a7dd27c89530da249de619169892e7482d1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_kubevirt/server.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "845ab062dc6a14bf125ab101b911f30523d988dd9d955278eb709574ddae101c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_aaaa_record",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_aaaa_record/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_aaaa_record/tasks/nios_aaaa_record_idempotence.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9b7293e35f42a9951679d1eb30c0a73c5b20e8563373e81e77ef4ef149145abe",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_aaaa_record/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c57ee4d83fcddab0332bd9d3bf0a0b1ff0f2b3d6cee2e54ede1a117fbe843522",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_aaaa_record/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_aaaa_record/meta/main.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6641fa60f8ba09c7ce762c2ef3514012995ac5302b4864a10e6aa0f1c83d512",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_aaaa_record/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b2aba930fc2918934f7b9bdff17f9f99ab65167a6e9e41a0c9f45fffb31eaef0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_aaaa_record/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_aaaa_record/defaults/main.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "665288590cd4226da42511817f09ccdfc92df36c9621583cd2c82919af295c5a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_dict_kv",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_dict_kv/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_dict_kv/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9aba3847ffd31a14704ebeccf574c11695676707f74bbab7e2aaa44698bd6483",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_dict_kv/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3f754208d937e93c9c873d3b8d73be37d9da93b1136989db5236e75f12854608",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_ip",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_ip/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_ip/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ce9f220650b042c8eb5b61a02904715882335a702d41b7f5c99d1e907c8daff3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_ip/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7ba305d97432a37d4912f291b3f4fa4f76ab544dc9f2753910c35935dc3a3617",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_ip/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_ip/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6f0b020f9c3d4b1b4e1a14919644cc6c469401d2e7fe4ff57c75dfc3e366131",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/callback_diy",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/callback_diy/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/callback_diy/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ade8d31897eb82d321318493957dffb3422b03c3ef58e953bd8ae877ccce3e23",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/callback_diy/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2aa69e858ce545ae65624bea04459e7be706c4c3f1014e0a5408501b064663fa",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/shutdown",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/shutdown/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/shutdown/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ad35cc47d7d11c0e80fd0ba8a6a59faa5c3f2ebd04e6b5d232dad5cb737d5770",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/shutdown/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "811322fad85866ac31f9501f8da0c7072809065397d53c06712447946b746b6b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_security_group_rule",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_security_group_rule/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_security_group_rule/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1a2f86a545e40ccca398a0ec5c0f743d344ad204c5e1c3c9262a46461087a007",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_security_group_rule/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/github_issue",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/github_issue/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/github_issue/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "34f6549acdccf52e5bf878ead65e606a174a6d06fc971e2e3d2273abf16f7068",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/github_issue/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9c3727573ab082cf9663d6af87da5aba1a250004f96e94811cfe3e435de28f6f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/github_issue/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/github_issue/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6c3e159cbba9dc6d749a2b27f310a79e73dd79f8be491192c16864e709c12814",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_machine",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_machine/playbooks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_machine/playbooks/setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "72d91645ecdb49c881426a17d8a1af014c3987b35125c0f83ec648b5e8d665df",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_machine/playbooks/pre-setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1f61d2216516f7aa1d8d1777150bd952c854372966c6aff0e2ce6c113851dff7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_machine/playbooks/teardown.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e56a2b9e428cdc2082427ee41bf27cbf04d172e83243a4c0041df90208f8ef2d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_machine/playbooks/test_inventory_1.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2992535f795ebcc2df502069bc78fb5289b8541aca8b60fd7577787a9e2e317d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_machine/inventory_1.docker_machine.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "067bb6fe42fea36b0e067b4ed9a0b4e318c9c7549ed3fdb918eecccb9107e494",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_machine/inventory_3.docker_machine.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8d92ee906346f3f3a6b6d9fda64bffbc67594aace3add091cac73f681b0fff16",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_machine/runme.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c35017ffb24489c0276a884a9be89af7fcfc74fc49e70637a2f5c09bed7d7e46",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_machine/teardown.docker_machine.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "48529064bf775d466124dfda375c72d62f2ab3f10e1710934ec9ba1f8dbb9c22",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_machine/inventory_2.docker_machine.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5c7e54c6e2d23aeb8db7f17517bfa8553c6e3a1f9038acc62aaa004ffb34d756",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_machine/docker-machine",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e73a5b2c73dd975127cdb8df5182de3cdc648e1b87494ec81e5318e311d77dd1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_machine/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_machine/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ef2f7bd17ab2ac7019d8677e50835d412400e2f7a47a5dd2efe790be2ede49e1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_machine/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a8344cf9147fbd40897b430d17348e04c6882aac64bc4f6888a2a575dab103a4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_tmp_dir",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_tmp_dir/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e273324ab90d72180a971d99b9ab69f08689c8be2e6adb991154fc294cf1056e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_tmp_dir/tasks/default.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2441ac1753320d2cd3bea299c160540e6ae31739ed235923ca478284d1fcfe09",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_tmp_dir/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bcb3221a68dc87c7eae0c7ea50c0c0e81380932bf2e21a3dfdee1acc2266c3f3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_tmp_dir/handlers",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_tmp_dir/handlers/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "050157a29c48915cf220b3cdcf5a032e53e359bdc4a210cd457c4836e8e32a4d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_cname_record",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_cname_record/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_cname_record/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ce3b1e94df8b1a60cf5999e57e0816a0d63819250054cd48d7fd02e39a597be2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_cname_record/tasks/nios_cname_record_idempotence.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6e5e3f2f0b5ed874e855b9122802b8706faa49150d1d8d4fe2e9d46a535f754e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_cname_record/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_cname_record/meta/main.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6641fa60f8ba09c7ce762c2ef3514012995ac5302b4864a10e6aa0f1c83d512",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_cname_record/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b2aba930fc2918934f7b9bdff17f9f99ab65167a6e9e41a0c9f45fffb31eaef0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_cname_record/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_cname_record/defaults/main.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "665288590cd4226da42511817f09ccdfc92df36c9621583cd2c82919af295c5a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_config",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_config/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_config/tasks/test_docker_config.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1fc563ad7c63d710b97668eea1c7ba38fc4fcfafc97b2a1c9509f5380bd5da98",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_config/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ed708322aa6495a9f796c6e69b147861a18d54b85393b33715d24b5820bdb434",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_config/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_config/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ef2f7bd17ab2ac7019d8677e50835d412400e2f7a47a5dd2efe790be2ede49e1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_config/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "befd51c5287c06339fc87cf5014776e0296f0df90df2525948382f170896f4b7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ldap_search",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ldap_search/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ldap_search/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aa78306d233b0651c0e88c40ebc4974b38f6ff3aec34f344413a4db4ee3e785b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ldap_search/tasks/run-test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ldap_search/tasks/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ldap_search/tasks/tests/basic.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6b3ddab041bd891f70f2d3d8c83c7480aab5fc5654165f2e3cb4a96bb2e1a5e9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ldap_search/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ldap_search/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d5d4da7d75a6e80bc78241b575d83e252dcbe32f9fd3770e05d808d896dd6f31",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ldap_search/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d8f4c57a73a1bb071fa3c931f0cddbb6b8dd7ce48e4e1afcfbab2a6d1e915482",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_publication",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_publication/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_publication/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "663e2ade4e3c5f2b5e2c75da96b8f596196a21fe8bb4ce5fcecb63e2f664d0b7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_publication/tasks/postgresql_publication_initial.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "30586aed9fc7d67db41330928b859804e787699cd953223ef14c245b170d1a07",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_publication/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_publication/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_publication/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "16082bc73fe6e626012830dcf7ba97d4a959ce48c7a24150ed7f8d573750e140",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_flattened",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_flattened/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_flattened/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1374dbaac0fbbb8c550643883dc61e1346375901fe96cf98b1366d2301261384",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_flattened/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "49ba587b44048f1af400f126684ddaae01111f2d3c8d7a3587b7387daf5b9f92",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_docker",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_docker/runme.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ccdab97fd6569dbe495460cfec0b901006efa79ec4c6d066a98250901b0c9d0e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_docker/test_connection.inventory",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f0e1a940abc28440c4dc0e0cbb7669f6c645a6d12d2fcabfb4f407f205403e61",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_docker/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ec1a8d284cdd3ebfbde0cfecc54f6852263cd47f652c6b1a7bfc1d874fdb6c18",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_network",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_network/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_network/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f02dc98bf912c05a4b6dabcb531b0b5ad98590db1d400548ec68e17f9215edbc",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_network/tasks/nios_network_idempotence.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0e8b1f0c838de0cea2151ce497b6d631b0aefda15731e2eb6bc0daefdab3774f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_network/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_network/meta/main.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9b2c007ef16bd19c23982f22d955c1cebe881731f8e06615c9a8fb2985604cc4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_network/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b2aba930fc2918934f7b9bdff17f9f99ab65167a6e9e41a0c9f45fffb31eaef0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_network/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_network/defaults/main.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_dns_view",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_dns_view/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_dns_view/tasks/nios_dns_view_idempotence.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "35a60985b368995a2347da83a8cbb30217cf2d5a3b71204e40f626ef13d228bb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_dns_view/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "76afc0c86fcfb0ea0c537df67faf583d067ef4225281229dc96786852bc2c4b3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_dns_view/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_dns_view/meta/main.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9b2c007ef16bd19c23982f22d955c1cebe881731f8e06615c9a8fb2985604cc4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_dns_view/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b2aba930fc2918934f7b9bdff17f9f99ab65167a6e9e41a0c9f45fffb31eaef0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_dns_view/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_dns_view/defaults/main.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3cb79c3d80cffc120afea8cc796848a327b790dbe8b6702bd51c1e2a5d82d906",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_host_record",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_host_record/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_host_record/tasks/nios_host_record_idempotence.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2c6efd391e887105cc040b3dfc42ed5e8ce4ce3bafaba8ccd2377292a27de8c5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_host_record/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "140d550992e4726a0053e8e2022ddccbaf6400fd561c5c1092dcc9d6cd1d8c15",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_host_record/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_host_record/meta/main.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9b2c007ef16bd19c23982f22d955c1cebe881731f8e06615c9a8fb2985604cc4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_host_record/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b2aba930fc2918934f7b9bdff17f9f99ab65167a6e9e41a0c9f45fffb31eaef0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_host_record/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_host_record/defaults/main.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3cb79c3d80cffc120afea8cc796848a327b790dbe8b6702bd51c1e2a5d82d906",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_srv_record",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_srv_record/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_srv_record/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "927b06d04cfbea2a01ee32ae8f9edb6427c80804d80df083bf0993bfd334425f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_srv_record/tasks/nios_srv_record_idempotence.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "983b84f88a05dc722a0ef22e4a2deea656cf9425f86a40d10abe1c3d91cfe51d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_srv_record/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_srv_record/meta/main.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6641fa60f8ba09c7ce762c2ef3514012995ac5302b4864a10e6aa0f1c83d512",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_srv_record/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b2aba930fc2918934f7b9bdff17f9f99ab65167a6e9e41a0c9f45fffb31eaef0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_srv_record/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_srv_record/defaults/main.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "665288590cd4226da42511817f09ccdfc92df36c9621583cd2c82919af295c5a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_jail",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_jail/runme.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ccdab97fd6569dbe495460cfec0b901006efa79ec4c6d066a98250901b0c9d0e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_jail/test_connection.inventory",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e68e68eef9de19ce29e372127ec2ff42dfddee8af5934e1c6785b5896d540681",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_jail/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/proxmox",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/proxmox/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/proxmox/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "527b5485ba31c7fde17710d0c5af5f7c37276b2add42c99ea3e1881e4efb8890",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/proxmox/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2af2ea50cc1f08cf1944416d4b1ba2640a4674b18696d467b0d78ba777601d9e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/callback",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/callback/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/callback/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "122d3dc3384d2031f9179746389f7641fd35f9bdb31a062613670f8586f2a5bd",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/callback/inventory.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "74bdaf35b547d38d9a2d81fb57baf2ff9fe88525b0de1cac491ce9fadcdec6c5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ini_file",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ini_file/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ini_file/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "06ef93f5ad23bdb9f34c55ebc0d1930f4b79e4717c714848076433bcc0abaa96",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ini_file/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ini_file/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1e8d632f9db7209967c5b2f6d734bede09841acc7b898dafc19f31c72cee9929",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ini_file/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e5a8866bb35edfbfde89676bc1698a7dd27c89530da249de619169892e7482d1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_naptr_record",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_naptr_record/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_naptr_record/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "77e77b12424c855ac82a4d2febdf814580eb55f3d2818c22948a425fb60e1f80",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_naptr_record/tasks/nios_naptr_record_idempotence.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "943fccfaaab7641fff4e8d2f809830fd3d266504e5a093ba19423fb79c3ac363",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_naptr_record/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_naptr_record/meta/main.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6641fa60f8ba09c7ce762c2ef3514012995ac5302b4864a10e6aa0f1c83d512",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_naptr_record/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b2aba930fc2918934f7b9bdff17f9f99ab65167a6e9e41a0c9f45fffb31eaef0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_naptr_record/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_naptr_record/defaults/main.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "665288590cd4226da42511817f09ccdfc92df36c9621583cd2c82919af295c5a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_pkg_mgr",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_pkg_mgr/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_pkg_mgr/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4a3ac16c80747874defcb3b8bcd0cf796fb847b440ee5ae240d7b5b71cf32c0e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/flatpak_remote",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/flatpak_remote/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/flatpak_remote/tasks/setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "173148e596fb6f57b0b03279f0523b75b1ca7d079c9705c022100afd5fd58d75",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/flatpak_remote/tasks/check_mode.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7eafd4ab2ee5d1910dbfe747b828c4d41ef8f79b8da2bc7b48523d702541004d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/flatpak_remote/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c9a64a6a28102190f47e29d434092e1ec9c1f0b35b7c5426a893375853b8fc55",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/flatpak_remote/tasks/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bf8da428786df9dd0bc404fdf0bd4e098a32337b1891894a24979cc78e0c5051",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/flatpak_remote/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/flatpak_remote/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cb229ee42f239ca1a4b9392cf0559cfab6191b9ba80f2df080bb849f693c3d83",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/flatpak_remote/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4e8c4d4bf2055db6c7160889d246b648084fa9990fe4b6ff4197e30ebfc62b42",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_lmdb_kv",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_lmdb_kv/dependencies.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2b937926bdd526ef463973b166cde34c884fa779bd898b6f63e901ed7c0f62d5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_lmdb_kv/runme.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d63607a90c412835ad16889e23ffb58bf589878980a7ff2ede8fe06dde514d73",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_lmdb_kv/test_db.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f340fcdcb004de2205af050b6077e89bb71f1ce08b972c4deafa7e45bc3809b2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_lmdb_kv/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "991cb87e99f0e7f52f7416a83c33bb6eea974c5d884023b0b57c4f99bc70a37b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_lmdb_kv/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a2a322dfe5df22a6407fcf5b91572ec5b232d97d8b4ba1a6af45bc4fd7baaa60",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cronvar",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cronvar/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cronvar/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b4f569247571c83f6d6d0520e7efd71fa8fb7e6b1f66ab3560a67d87b2a84768",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cronvar/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cronvar/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ee06f343a7d6949952623866c8845381ed7fb0231f32e94aa8e780c244e38d8a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cronvar/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4f6ef28b33adbfc834e79e3d58688d036aaee0ea96c6acdcae1b0aaaecaa6e15",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cronvar/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cronvar/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0c6d87783ce94ef757b0c55298b080c8f50f00fe51344baa8d122bbcdbbe5cd1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_jc",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_jc/runme.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e7c72c69d6ba57e1e7732c59811a990859d06aa17f8d58ede5760c79e825a25b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_jc/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_jc/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "852b469909b319e64fc4f186cbfa812a7f72d389edf3a2b1deaa45ec1b626fe8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_jc/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1f6109e8ddbb7c2c85cf0d51119d787bafc9d9695dd74bc96d3385fb0a1454d5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_jc/runme.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c028125bc1be296044e51b7bdcb04231cb566386c2f935871f0e40b4947eafcc",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack/templates/stack_compose_base.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7cdba12ae8999f6f7e44b65aaaa4ffa24221fa957b928eace52f10e37eab1ba6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack/templates/stack_compose_overrides.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "16b3c31f92970815706d4d206e0a98cce29e28aedfdec17f803fe8a789462a09",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0f1569368ad85f40153d66eb99b3490afbd88ebfabcfa36059682f81e05186b9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack/tasks/test_stack.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7c37ed33f4a92c8e525b83df0f889ea522208bed08f3196a7dfeb3d017b56737",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ef2f7bd17ab2ac7019d8677e50835d412400e2f7a47a5dd2efe790be2ede49e1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7a50be5d8af58b5765d4bcb61a594c13237d2b250d8d96f23b11358e23a17a48",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1bb9483bbe3c899533e31d8794251fa0eccf41ced205e3169f44ded2b10c7a00",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_idx",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_idx/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_idx/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "be09f7acd3c03ff517ade5601bc6a55aafe3c0d8e60fc37133f1e24d69935f8c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_idx/tasks/postgresql_idx_initial.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5310371ca64df3006a6a5d2193d495509cdf281282c8c17d238185659b5f2f5e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_idx/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_idx/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_idx/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4f6ef28b33adbfc834e79e3d58688d036aaee0ea96c6acdcae1b0aaaecaa6e15",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_login",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_login/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_login/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ee890763d871e913cc6880f76bc950cefe3970916dbf472de7fc0b6e72d1e44e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_login/tasks/run-test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d67bb6a11ed135170fd45e3bdeabd04bf7eeb3c1e10f6830b65b82e16ff0820f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_login/tasks/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_login/tasks/tests/docker_login.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "81cedf41ba32f65a6297cdabe78fc67e3cf5152fa0ad3dcad6e2d603db5d05f6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_login/tasks/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6f2564eeb7cbbf2be8c7aa444c2852098ee2c07d0d710a85c54c5cdfb819bd37",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_login/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_login/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "effcd9450b3f039a7c09bf7f5cdf6a296456d9b0b649ed5e4bb3eab045da0081",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_login/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f5f9cb832759f98caccd011c766cfcedb8761ff41ad75e7289abeab263271a59",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_prune",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_prune/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_prune/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7b40474475308fc306d7b4a97ce1ce0d1a0f83632cddd67fe005e535fab8f9fa",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_prune/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_prune/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ef2f7bd17ab2ac7019d8677e50835d412400e2f7a47a5dd2efe790be2ede49e1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_prune/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fc2660c638db75d33c27be35b4d6186af2061df629d01d9d047e7a7020448a44",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_java_keytool",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_java_keytool/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_java_keytool/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "acf40ef4897d85734cdfec310d31a05515b0552f9884e6afcdddfa3c25b57b11",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_java_keytool/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_java_keytool/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "edd8a1449dca1cf2ff7cade76d428a198a21d20a8b7b989fc4fe8ce3b0a93f3e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_java_keytool/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_java_keytool/vars/Debian.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5467bf8784847f9ae6e9da39e4935a32012900c7423e84e43560e588911c2e9c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_java_keytool/vars/RedHat.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "502530ab512b2ecf51959f4e81d899537a6da192c7b4e6a88bf860cf950f2aba",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_java_keytool/vars/Suse.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "502530ab512b2ecf51959f4e81d899537a6da192c7b4e6a88bf860cf950f2aba",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_ecs_instance",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_ecs_instance/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_ecs_instance/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "635d2837e0eb48e3954bf577b19ba39badcb0e87e54f6a83641acfc4b5fb47db",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_ecs_instance/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit/templates/monitrc.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9de9c9c884d2080f8ec61a09fee4e1b493cd6f76f669bc866daaa1637c3b16c8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit/files/httpd_echo.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9c7a5c16f2ae6c611edd538f3d0549f89db33015ee5e7cb9193b60d0b39540c7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit/tasks/test_reload_present.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ce236af6a0d981a66b001afbfcd7d45d7544a4397739ed45b256e9c5fc94da81",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b71412a837cef2ebcc536d77efe1d5146ef33526791fd54c2868eb017549b541",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit/tasks/test_errors.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7fbd04ef7bf73505329dd0340d28330b1dd8f80564e649a3d217844359f9d3c4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit/tasks/check_state.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e08af6eb7cf52766f08ed7e4cc9ac32c9974eae8e8c47203e9fbf89337826377",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit/tasks/test_state.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cf46c8a28d3089d91b851add3f68540830af7281cd848b64fb1e21677cdcb8b3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit/tasks/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "77856e3d9b710a347000a4df4d7fae6622f56bbcfe71a200b114f643bd2bf594",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ae816995bad20b4c09762e88d447ddf2793e1df8f6d61c0fdc03a7432a06e75a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f50efd00db8c2d6177833ea2ff8b473fc4656571203581300af933f6f30f9635",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit/vars/defaults.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fd17ec0f116ab194e2dbd313fca5de30362d09be9b48ed4d87bdaa7df5ed4f7a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit/vars/RedHat.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1eebd4e36416221a18f3948f6b64cde3c9ecc7de4a3873cc4650232cb4eccf4f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit/vars/CentOS-6.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0116067b4db69e9e5ff4178810fb268021a7d89787f3fe692863d78a4977362c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit/vars/Suse.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1eebd4e36416221a18f3948f6b64cde3c9ecc7de4a3873cc4650232cb4eccf4f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8c965891bd86e77ca17e9eb381554259b5be20540a005a4024bd405a7b398ec1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_gnutar",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_gnutar/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_gnutar/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7e8fbc4c57e31732b07eecb5c841956fc63abb50a723f77779e510b9f118e8bb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_gnutar/handlers",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_gnutar/handlers/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ea523c40ea286f1458a7f4135dcc548e50ef105b03aae76c11696661742ec2a7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/read_csv",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/read_csv/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/read_csv/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a68f6f094d91bccee8fbf7e4fe4bf3537d3d142a242548f83e563c91279a7606",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/read_csv/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e5a8866bb35edfbfde89676bc1698a7dd27c89530da249de619169892e7482d1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_time",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_time/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_time/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9ca76bc55b0ed8540c4f3d578884ef636c7b66c1a1cc089e9be629216e45df66",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_time/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3f754208d937e93c9c873d3b8d73be37d9da93b1136989db5236e75f12854608",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_user",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_user/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_user/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7d1442f15c3fbac6334d28201f847f35fb5a41683c822d75442ffbc914db2ea5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_user/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c8d60d4380e3eb4babd9d5866b3c3c7cc11cc441302eef5398edf8b12d2cf16a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_user/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_user/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "90c100919f8366f95c238f75bc3efa5cf2866905662b8500d159f8a70f000b25",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/files/dummy--3.0.sql",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a16cb164d32705033b9e7a7c4e9b8050de79c561deddbcc8603e8d0d59cb563e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/files/dummy.control",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e8000b3819e84f8f1af575e137e4f478bc16cef5b0b11867f4d348840ea34bff",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/files/pg_hba.conf",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a1d8fd0e6645d939cf0fc5a67738039e036f06c540efeb8a18bf9fed779ddb40",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/files/dummy--1.0.sql",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e4e7839720cd873fbfbe855a61c55f4d69bf5154c420a5a776daccba0db0326e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/files/dummy--2.0.sql",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "be49da51a69d0f8af9ad8bfd120189b95aa9feb2ea00be9e2f6e06af3a5c754b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a64e1cd8f108335b01429db1e69cea7470b3399c08f186092377e89c88c55ac2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/tasks/ssl.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1e33b09073258235aaf62001cdbe34257c2a25412856d1fb2dbefac18fa4c17f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ae816995bad20b4c09762e88d447ddf2793e1df8f6d61c0fdc03a7432a06e75a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars/FreeBSD-11.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3aed68dc0d315a161453b95ef5d5fc2e386fe3569386bc1620128bd59e955afb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars/default-py3.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bd40f6ab072c5b9d5c83d83595fc6a6489dfc8ddeb4c470b01d8b6b3d539b361",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars/Debian-8.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cec5e777df87e1ef7dfac426286cc5a26d3ed9bc8d7e4e3a8c307f6d670b5edd",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars/Ubuntu-12.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "83efd43f61060d2f160e5a2fa8fcd185029672112068fc103f0e35ab384bb8b2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.1-py3.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c03e2e8a0629d06cb1bd91a6e0dc3b72a3079d5c045d07f09684d63e2f1759e9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.0-py3.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6d202e54ca2923b961c8d293fcb733c3a7acfa5aceeb0a524f230314e760c62b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.1.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b8b67d7d2dc8c0caa1b89de5338dfabcc75e6480ecc6cd92bc26da43affd9568",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars/RedHat-py3.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "40a6304f5e4cf6e2baaa10718ae657c1ca67bb1cf127bd971b2a438d6c64f215",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars/Ubuntu-14.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "44b26cda219358db0bdac8d4df06f14be1405c0ec75c9cd066b79a84fd97990e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars/default.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "64f6e20d24c3def6bae446791f056131c8d272d6dda5f354ae63bfdc415e0819",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars/Ubuntu-16-py3.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3dff2ca237b634d4731dad7c812af330acd802a9aafa126c1ce623d80a2330b4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars/RedHat.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "510078001c894619b1437c45e647391781d9fb1a17bcff5cb26d7939a4970a16",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars/Ubuntu-18-py3.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "92b075e3829177e0a0163e38e7a65f108084d520ac1d4f55031c4b574654a7af",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars/FreeBSD-11-py3.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6d202e54ca2923b961c8d293fcb733c3a7acfa5aceeb0a524f230314e760c62b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars/Ubuntu-16.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "be5506a46fa7e9d06827fb16fcbcb51093d07c4e50e476b928bd285e4d3d6d60",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.0.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "641d72a4f0cd5bb640de8ed043befc0cadcf9c70cc399f0a1485483e32c35fe7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0349988df512a65575f9594c62d6d8a0aa0cea38ef60f75236e6c9c1bb075d58",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone_record",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone_record/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone_record/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eb0b5904368bb749f72ab0dccb6452fd5b92903763c9c7b02c654ea2a254d9d7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone_record/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone_record/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone_record/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone_record/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone_record/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1dab77163c3e164596e15c1eb1658f88196dcc1e92f38f067c652d3c27756025",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/git_config",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/git_config/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/git_config/files/gitconfig",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d709a94ee4ce2ab24b5344060dd6553680071da85b7f216511712d365086c68f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/git_config/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/git_config/tasks/setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d202854e0c11a186e10df5e9d10fd9c4f2b6496b99a96c54bbac94a6e5a99e1f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/git_config/tasks/setup_no_value.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fbc1ff31c8d1443946d3b18fd84afed7e2b83010fdec57a8a7ba932ef36abc9f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/git_config/tasks/exclusion_state_list-all.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "19413ce550186861d20f0ac1ffd3b060b447f928a7f07d993abde073258d6b57",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/git_config/tasks/get_set_state_present.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d760b494536ba9e2bb8f21f7f698dd0d756b07dee3ec76cc0139ce128acc2d7e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/git_config/tasks/unset_value.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1cb5f56b5a75b797a3dcc2278ce7ba1ad0f915aa131aaadb29ab299cf59db5cd",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/git_config/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2973059dd8bcf43e93a5cc0d05c4549af580b25e040d30c29c88084bddc6af8d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/git_config/tasks/get_set_no_state.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "39b3c1d932cef11b9ac976df58221d18809a2e444d8f93f7384370b1a14a0a12",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/git_config/tasks/precedence_between_unset_and_value.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b7e7c915aa9826053a80aff5d8057ec95c653e83075b7b7bb073977bb5b6a9f4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/git_config/tasks/setup_value.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "39fa257453645c9d0cc63980b6cedbfc35be7891551ae74a35d4eac6c6cea593",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/git_config/tasks/unset_check_mode.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ffdad24be10feb580b2e508fcabd264f5a0ceb36edab75195345c26dea6c554e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/git_config/tasks/unset_no_value.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f31666cf978aaf14e44ae515ca60ec039b911b8e144ed317135f94d4e7ab55d5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/git_config/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cff479d1f3b0f53410f2fc7771227864a3427d0b3b7c7cbbc49b5e1f4d19a74c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/git_config/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/git_config/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4fa6677756ae0c25fc29eb8ee5b7e8cd9cb31b303b6ce8bb053a96f57c1bd874",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/files/env-file-1",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "50a89c391ed8404416ef6ced670c291e4b9e6ec5f509fbf57dc2b0385912b166",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/files/env-file-2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1cfed9ac20bcf4e6325702546862e8347616d8da6bf8714a6facacc4fa4463df",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c4955832d7961978a22679dfc1b4faa7bae3488a5d71c7400b851dd8a44c6865",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/tasks/run-test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d67bb6a11ed135170fd45e3bdeabd04bf7eeb3c1e10f6830b65b82e16ff0820f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/tasks/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/tasks/tests/restart_config.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fce93e68feb31ff83b2fc4a91c021483aff5b258f267f8201fd2076c9d5879ee",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/tasks/tests/configs.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3e16dcd0316cd14ffdf7410610d9e02cac67e939d690d5257be2a6eb205bbbc8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/tasks/tests/mounts.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a8ce252c415c957a8148ca9b7aee0d230cdcc4711d976b7e420ad69a99e735f3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/tasks/tests/secrets.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "401715167deadc80859c34549304fee97eb0a4f25b39fb392e44761184ae06eb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/tasks/tests/misc.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bf90f8164f952b47302a1adae64b7134aa66f552203fcb3950e905f8386765a5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/tasks/tests/networks.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "27f74fd187ebef29cae2792ae2dae38e8aba279a5326b9b9e28b0824e634e74f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/tasks/tests/update_config.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9855a7275c126dab2485d171224d5a16dfea742f8e12e37f08c1b9f012a394af",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/tasks/tests/options.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "162fbfaba5a2b6449aebed5ffcc80fba203b58455102f6b68d070f44cb540fd1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/tasks/tests/rollback_config.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6c9bd1f62edb22c34cbb58a3be65d98d29cc1ed600c11152edca572adc6ecaa6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/tasks/tests/placement.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0a48b46aeb658e6e534306b97e0e846f8322092aa9bf510a854b3707d99e633a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/tasks/tests/resources.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "df3739c7854328f987aef413ae59a0c10724aabd3ca60a82fef4a42343dd9d2a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/tasks/tests/logging.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a3f27ef2b69158748534aca34f61ec7f657fcb093ed571b7c02ce93e1271e9e5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ef2f7bd17ab2ac7019d8677e50835d412400e2f7a47a5dd2efe790be2ede49e1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "72475e5505afa573ac415658da0d2cfddf06feb8589cd7532b19a31382b2595e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5ca78b97bcbec98bca1d7d08a6a68d12cbe1c2671eb52fbcdd25e89d6124ac31",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mail",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mail/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mail/files/smtpserver.crt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a55d9996272afcf7ba2064e9d7ca4c77617a405966cab5157da80566482431d0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mail/files/smtpserver.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "07ce913cff3a6a186774bb3ac87b9d195f650141abdbc26e7bcb52777392fc9b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mail/files/smtpserver.key",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0895aca337049f7faddb275b6e8a81ae0e8285fc9388f2d96e9b970a0c31541e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mail/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mail/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ddf7a7f376b4067e47ad5b20fe4845d9417c7c8e0a5f44937e9eb3545605e915",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mail/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "811322fad85866ac31f9501f8da0c7072809065397d53c06712447946b746b6b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_flatpak_remote",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_flatpak_remote/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_flatpak_remote/files/repo.tar.xz",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efbcee193f6b153a0e813ac8aaf67aaaa607665cbee763d396836c2b9a69471f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_flatpak_remote/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_flatpak_remote/tasks/main.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "db63a9560f008b86cec0aa5174e8b9ae8a61bdc6291f35415c8d9e7d11c602b0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_flatpak_remote/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1ba3b350ef99a406c2000e141c829d66bfe3aa11a572072a00d1f4438886d6d4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_flatpak_remote/handlers",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_flatpak_remote/handlers/main.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f17b28729bd86bb98423f9bc6e16598b48540cb9cf6ed2d69f597b319aa452b1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_flatpak_remote/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_flatpak_remote/meta/main.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5b3a4b611769090095ae6d6640f5a7ab139cbd83c9f1a06cef6668bcaab35d2a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_node",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_node/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_node/tasks/test_node.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1bee30ea26db86db833a3c73c6d7a923ea3b1aa092535484fda3523887780ddc",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_node/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "883cb600f2f7de7fe5afc8aeb8c278eb91585bd3d2313fc9ddf3c1c7b371702e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_node/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_node/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ef2f7bd17ab2ac7019d8677e50835d412400e2f7a47a5dd2efe790be2ede49e1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_node/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "00109c7a021e0056a9e63ed174e7ebfc79c583ea8dd46a1477778b15d28b7a3d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cloud_init_data_facts",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cloud_init_data_facts/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cloud_init_data_facts/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7e1aa34b93b773fc3385b96e988016a85c6847f810f73df87c1c0ebebf360fbc",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cloud_init_data_facts/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cloud_init_data_facts/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ae816995bad20b4c09762e88d447ddf2793e1df8f6d61c0fdc03a7432a06e75a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cloud_init_data_facts/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bd70b4562a8f5f0b0b7166b6815afa23318c040d3ede1055aafe92b302a09169",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xattr",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xattr/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xattr/tasks/setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8fd614bf34876618e9ca28dc4c49818fdfc0a7982ac0814e28df3741af5930df",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xattr/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6409d500eead57967d4fd868cb5852adc1822fe58bd1ed0f92f2ea764de50c54",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xattr/tasks/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a010da6a9ac6709b5e4fb53ebd960462e0e8afb9d5564dadb4dc013b21b91c3d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xattr/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xattr/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ae816995bad20b4c09762e88d447ddf2793e1df8f6d61c0fdc03a7432a06e75a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xattr/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1fbcab4b8d4b681f9278736b73af5b7e26c18d133f4c6df700158b2be244904f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xattr/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xattr/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "642a4e91fd5cc162e6ea07ac3cf2f684259086d1873d0f83bf11eaaffb9c400f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_group_members",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_group_members/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_group_members/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ea11ca94d27522a91a785ac301a5282914a2cdaf3aa79fc246a60a87f6e43ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_group_members/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "60945d49535300be8e42108658dba31fcd5d665fc40d6f186798e7e0682320ae",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_group_members/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_group_members/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bb2cf6830420cf88eb6457b434834652d2570791ebd708b857afaab67a967759",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_peering_connect",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_peering_connect/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_peering_connect/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1cc051a4603ba1cb04efabfc83906641e54a638a824eef696d1bc44e13b9d4d3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_peering_connect/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_snapshot_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_snapshot_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_snapshot_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5a16f6e0af308cfd4dfc3843e498e5f6729990bef5c5ffc0b682e4e017bab314",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_snapshot_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7ba305d97432a37d4912f291b3f4fa4f76ab544dc9f2753910c35935dc3a3617",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_compute",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_compute/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_compute/tasks/pagination.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "46ca5a3f14a6f20849abb0fe518a47b5b6a2a1b188c6bcaabd23f50e3ec4c52e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_compute/tasks/security_group.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "68f1db98323b94ed628040877036b264158152279fe11a7ef659b6ea237980b0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_compute/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5129fae409fe4b9f3027ad5a696792c2059215cdce03a473beca9ea7638d5891",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_compute/tasks/ip.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a3d99eb46240db6fbb210bdf710969729176a4dc0e4185ba51a3a882f054e659",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_compute/tasks/state.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "50868f360a93cdabe4951870e52df684d59d09ea5556b41e4a37e6db2e7be4ce",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_compute/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7ba305d97432a37d4912f291b3f4fa4f76ab544dc9f2753910c35935dc3a3617",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_compute/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_compute/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1d3a9ff0da05429f37941231a7266e8a09cf2c716007611457b9a63e47226ccb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_ip_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_ip_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_ip_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7484dbefebee0ff6f0045d863158fac6e302433247d115f0e8144be531428ff1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_ip_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7ba305d97432a37d4912f291b3f4fa4f76ab544dc9f2753910c35935dc3a3617",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openldap",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openldap/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openldap/files/rootpw_cnconfig.ldif",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "35f150a5a1d546167e9bff24e558439396208879876812cd1bc210252a86274c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openldap/files/initial_config.ldif",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ae3cbc203ebfd2d9a3810399be1923b2f8d41162068f92f0cd473033c86bc697",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openldap/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openldap/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e893ed1fdc33e53535d7f06c8f5f9748b54d8bfa289a995c28c4dc2006a2f64e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openldap/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openldap/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openldap/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openldap/vars/Debian.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6300a7fa4f660acb989c8a12d2fc55890c00c89e8ce430b82dc8ac9f7e75acd0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openldap/vars/Ubuntu.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6300a7fa4f660acb989c8a12d2fc55890c00c89e8ce430b82dc8ac9f7e75acd0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_epel",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_epel/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_epel/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4013a5ad79e7944851ff6a5be0d25dbb2e4354aa6be08e3c435d7707e1d8576c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_smn_topic",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_smn_topic/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_smn_topic/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fe25f9a5fb62125b8ffdf2a8c7ba4dc4e684041dc08fbca7672483a52005579a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_smn_topic/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/script_inventory_foreman",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/script_inventory_foreman/test_foreman_inventory.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "99773272bf98af75f97683ffbb62826a73e561962dd8dc79cf28e342878791ef",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/script_inventory_foreman/runme.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "03d29c816aceef5412fa844230c11ff009f291d95031ca3d411ef78b80ef6496",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/script_inventory_foreman/foreman.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d15dd7a421810a78f8aa26084a3a180bb89567c5bc9532e19d383904f97463f9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/script_inventory_foreman/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1b5f4632427c923a2f5d645972e6cc8a70850d940033cd20db36aa2ea10026fe",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/listen_ports_facts",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/listen_ports_facts/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/listen_ports_facts/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "618caf5f19275f07c874dcb54eca422ee6c66259270bdb5d1f652db9706bf85c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/listen_ports_facts/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "59fc7c3959a7cbc9ff73abdf90f8fa853c52ff56602f548faacdda0b5bc3d485",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_task_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_task_info/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_task_info/templates/stack_compose_base.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7cdba12ae8999f6f7e44b65aaaa4ffa24221fa957b928eace52f10e37eab1ba6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_task_info/templates/stack_compose_overrides.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "16b3c31f92970815706d4d206e0a98cce29e28aedfdec17f803fe8a789462a09",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_task_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_task_info/tasks/test_stack_task_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2e0f1cf44831d325c34b056cfc865589a4666c7560da33f68a8ab8248195b2aa",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_task_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "48e555c194784189d6bcb0503660b8acaffa853efa2b75de1dd4add4ec3f8e0b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_task_info/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_task_info/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ef2f7bd17ab2ac7019d8677e50835d412400e2f7a47a5dd2efe790be2ede49e1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_task_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0e19ffa2de17cc76b3329a4998bac08edbea141cafc780cf034c974f539e54bd",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_task_info/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_task_info/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1bb9483bbe3c899533e31d8794251fa0eccf41ced205e3169f44ded2b10c7a00",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_query",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_query/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_query/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a77f5f53953beb0037561ef70130651715fe677f0428a3f56e9facc00dcc5eb5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_query/tasks/postgresql_query_initial.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1c2f6d84f38c79c034856f6bcfc29aa20675b63d996e6266bfb3f077fbce9d71",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_query/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_query/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_query/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4f6ef28b33adbfc834e79e3d58688d036aaee0ea96c6acdcae1b0aaaecaa6e15",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9b22fd2cb0cd07292350ec15f9ada3c9932c589631ac3a7bfbef1634da9bc9e7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container_info/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container_info/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ef2f7bd17ab2ac7019d8677e50835d412400e2f7a47a5dd2efe790be2ede49e1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "49c60257b1a79784dfb487895e0c43bbaeacf4fcba60789e97f88d9f1172e0f3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ufw",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ufw/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ufw/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "071fa18e8ee40f0e0aadffab2ad453eba19945ab310fe864df2b478e3006ad9d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ufw/tasks/run-test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bab5dae8b59202497a94d3a13d9ed34aa200543b7ea8d8f0cb3a24d16b115fee",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ufw/tasks/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ufw/tasks/tests/global-state.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6adba710aa58f28cd81d6a0e3620c2fc38587ef14b3e26a85f41a7dd2814b20d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ufw/tasks/tests/insert_relative_to.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b28169e97fa8a69653ad91f5bc21cc746d26c1c310170652b5a94d9161fa6064",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ufw/tasks/tests/interface.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3bc22ae0cc6f0384c2f29530b9cce48be2fa992ca72367fec2887f367f6899fc",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ufw/tasks/tests/basic.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cbfd03a4a6a79672ed38e204abed663ea00315e59a26d7d7b5acd166efc16de9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ufw/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ufw/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ae816995bad20b4c09762e88d447ddf2793e1df8f6d61c0fdc03a7432a06e75a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ufw/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5a8fe3f6351eba91318e83afee3686003438a97bf10fa9d77330e99742a5445d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/aix_filesystem",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/aix_filesystem/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/aix_filesystem/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a7547d84307d4170efbf1815ffc8bf4badd3e70c285bca90255b2aa80c004758",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/aix_filesystem/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/java_cert",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/java_cert/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/java_cert/files/testpkcs.p12",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "194ae4f77eeaf175ebefa471eced93551d2b9f0a0018e9bfd0a24cb0acc380da",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/java_cert/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/java_cert/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9cec67837e8aaccead5245438d7f834aa86fd38a537f6441299f3465d3584ccd",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/java_cert/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/java_cert/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "40b2fcebcdd104feee2ec9a60683681131d41ed32bfb1dd256ff8414103e3d93",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/java_cert/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f15869163e4716570ba32825c2afa82d1a0553a245b6c483d0e852ec8a5ee826",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/java_cert/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/java_cert/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3e0325f6a70e75485b5e262aec115afef16140d32934af5244b00ebd5e29cd8b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/zypper_repository",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/zypper_repository/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/zypper_repository/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "925247336b1a937fc2d08e3c873d4598d214a3796b51da04d3835321bc41ce30",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/zypper_repository/tasks/zypper_repository.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "54b1289611298bf5410f5db3e34a30b0c70bc5e27f2a7c0ecc9833af623a5c31",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/zypper_repository/tasks/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f16086d132a8ca45d2007d016195a29dc60873bb65594658aacd6e0847e52faf",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/zypper_repository/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/zypper_repository/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dbce11dc51ead2d8cd98e7cd7edda44554a696dc2aa59b16a4f59573c27d2c07",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/zypper_repository/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d502a4aa4e2eb90aa8ca5b3574abb191b74f24f1f4d15266457c616538367c26",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ipify_facts",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ipify_facts/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ipify_facts/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ac232ee9d60fb8df2b210c1d1fde2586a0a0baaca280432a6f0d25195ec8c226",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ipify_facts/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e5a8866bb35edfbfde89676bc1698a7dd27c89530da249de619169892e7482d1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ipify_facts/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ipify_facts/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ebe010ed6441ca00f76d559f7645469f089a40f79cdc3fb0af3511dd0e03222c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/java_keystore",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/java_keystore/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/java_keystore/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "481a48209c3571076ac05acd0b13dab4ae8946184f44da042df8b6d83a453b27",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/java_keystore/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/java_keystore/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b510ace150dbcd8e91f61b9db851a1e19c3d40ebf6dbe267ee51e6525f08e0c2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/java_keystore/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f15869163e4716570ba32825c2afa82d1a0553a245b6c483d0e852ec8a5ee826",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_database_backup",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_database_backup/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_database_backup/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1c8c7c1f542d472c44c6e7da02d21be33974e90f6dff27f359ce7241b1afb693",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_database_backup/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a8e636666bf86c5da11a1a862f00b9f523f3ec9d400239b99a47df4474fec963",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_database_backup/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_database_backup/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "853493acec7a353030441990b6da91b04005237db30a3475a7782e568397ef78",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "10e71a368970eba75f8962b673fe5a37a45b0aee5a30d9f7eff640c92e96f589",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image_info/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image_info/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ef2f7bd17ab2ac7019d8677e50835d412400e2f7a47a5dd2efe790be2ede49e1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fc2660c638db75d33c27be35b4d6186af2061df629d01d9d047e7a7020448a44",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1e6779c3721824e477ee2b1075957ea75de4985e790631967c6e404bf963a7c3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4c81bfc3449bf48a59e3ee5bfb16dce121eee7001770bfe843233684a4a09d82",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_random_mac",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_random_mac/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_random_mac/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8629e3646b608a0d853b3558450205ffc3ad7628e13edee68d4a75395807261f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_random_mac/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fae4c68ed62f686121e77073f3d43160d927a876417feadfa3be71b063ce9fda",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_create",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_create/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_create/files/test_dir",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_create/files/test_dir/test2.cfg",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "52e199689c2481bec1de30c69fe948823b576f222a75360cc7ef7da65578262a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_create/files/test1.cfg",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "52e199689c2481bec1de30c69fe948823b576f222a75360cc7ef7da65578262a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_create/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_create/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6700b74fc2570bedb6f1cd4e6352ed597f9cb49ecd3e159bf7027a84a32acbf5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_create/tasks/prepare_dest_dir.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "352f20fc69b2c6d9a2d29e1899345dab0a465fb4586a3f8c768d8988cf0a36fa",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_create/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_create/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7177bfd97afb15fbaebbf2cafcb0db3e2b4728c2dbd0c7bf77eda8404c3464a9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_create/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f4de83e34024a07b9684aab1f4a6217aa205a7737493fafe42f6ff40141eec04",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mqtt",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mqtt/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mqtt/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2e73069e56fe78051f6159581d225f036e475d290b8aaa3feba60e9ecbed89f2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mqtt/tasks/ubuntu.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a581edcff230b2c1df3d2fdb921ed8c6a511ec27de41be2b02078e092d314688",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mqtt/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mqtt/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bcefe5429771d37b3d344153395093b33a0ecd10d8fd9968af8ee0d63684899b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mqtt/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a1e65281e1516219227748ed36b18936ba8bef2242b3f66d022c09f4e5de781c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xfs_quota",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xfs_quota/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xfs_quota/tasks/uquota.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d796e8378720c441626f4f9dff7e13f5f7d7aa16e3acd73a4618c01abd8e289b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xfs_quota/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1f3028e8eb861d0a8a56a98f1835b4bbe784726482a4dbeaef5a2eeedb28f26f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xfs_quota/tasks/pquota.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e9f26b8363b960d579433d1c0163a18188377ac645be131e7aeaefd7ac83d367",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xfs_quota/tasks/gquota.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4f78ba14c8dd91da0a4fca770577755f07aeecbad8df8acd5a96b7dda65c05cc",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xfs_quota/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xfs_quota/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1e8d632f9db7209967c5b2f6d734bede09841acc7b898dafc19f31c72cee9929",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xfs_quota/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d2726eddce66cc8903ec22708217894083028934ccc9874b779234699c822298",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xfs_quota/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xfs_quota/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7fcbdf40bed8e4180a0f571f5f979872d1aea52c476a80994e7f4e3a488e9225",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_network",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_network/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_network/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c9bcaff2d64557d25e56a99ddf28d196287ab4e1319a2d5421b44c20903f83ad",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_network/tasks/run-test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d67bb6a11ed135170fd45e3bdeabd04bf7eeb3c1e10f6830b65b82e16ff0820f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_network/tasks/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_network/tasks/tests/overlay.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f1bc0ba9f389dcd122006eda7892d68a3365bf2d97a6c1d4eebfc7f34c7d934f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_network/tasks/tests/ipam.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "27289668a37deeab089d71c61aba86df0580d6c8c87885e1b0645a0ca0db15af",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_network/tasks/tests/options.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "996f3c0e078165a96d3050aee092ebe6361ffad83537efa1dbd3dd25aceeac79",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_network/tasks/tests/substring.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b829b6330954ea3eac30b554f1a884c2199898d5046b49ddeb54ba00cedc6142",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_network/tasks/tests/basic.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b5c8ba85673aa1da939d015987f61cc4adea700f2c97990fbdfcc54cd464b52a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_network/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_network/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ef2f7bd17ab2ac7019d8677e50835d412400e2f7a47a5dd2efe790be2ede49e1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_network/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e85b416aef41bf246e36c7aeb4f107e9a9d3157a1e90e50bf65a5922f1a7e737",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_mx_record",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_mx_record/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_mx_record/tasks/nios_mx_record_idempotence.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8082362994c85378fd9c5b26cad32f583dc83d84d5f83527fb905c401946e65a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_mx_record/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7b2cd91527f8155e3bef88d49921aa4d9ddd8886e14c224194fd15c36e4b29ee",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_mx_record/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_mx_record/meta/main.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6641fa60f8ba09c7ce762c2ef3514012995ac5302b4864a10e6aa0f1c83d512",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_mx_record/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b2aba930fc2918934f7b9bdff17f9f99ab65167a6e9e41a0c9f45fffb31eaef0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_mx_record/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_mx_record/defaults/main.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "665288590cd4226da42511817f09ccdfc92df36c9621583cd2c82919af295c5a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "41aee9caccefdd5eec4b8fd1eeaea84532cd4402095e8742d0e866cc8139b5b4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_info/tasks/setup_publication.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7c1b2d7e52bc4193e32bc18eada60703e9baab6fed72b72d46ec059d671a1471",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_info/tasks/postgresql_info_initial.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2c57a82aac7bbdc95103399dd7cdf22c8f4ddd32c6757f31a7bba7bc12ee2dcc",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_info/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_info/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ab541e45bbbeb211496e76434bd09715e9a541449c267f449d4625b044465286",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4704f4c6f03a6f6955c330852cd60d4e5c037464531482ee1b4627675f7ad001",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_info/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_info/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "18ab0e6a6ced70d5cfc47e95b4d965681f21e946f0b624a53d591461f6068117",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/alternatives",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/alternatives/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/alternatives/templates/dummy_alternative",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fa719c49691aabd3b22160f0b5c64afcb002e65dc718e33989523be08faf2971",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/alternatives/templates/dummy_command",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8af22a87ded6536dace0aa9e546372b01191d6ea52e9011cc42503d4f8216e0d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/alternatives/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/alternatives/tasks/setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b882d2293d0e0475c3f0a1d940a9d96fea2a5c377e3b8579f634fad461f6909f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/alternatives/tasks/path_is_checked.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "055494791cbce8c13c3229b97afc4d57de0d7abf31cee3684b6cab1f41265699",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/alternatives/tasks/setup_test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "29a7d23b9a85f8740860adb8c229a2bb6c8c8aee1e5c8d6b79fae2f04bc8b724",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/alternatives/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bd11b8308e50204f41d8d4378d6ad8d6551319457dea94054e966397364d90e1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/alternatives/tasks/remove_links.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c76d52c3ceac3df1588b8ad3933e040ac9296bff57bf8ac32ae533eedf36453b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/alternatives/tasks/tests.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3d53e7fb88aca511c4dec5f6993622b07eb75310dd302203bc9d128d2d5eb9a7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/alternatives/tasks/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8822ca17287af378507b3b7697fbda91dc3bece2703dc2451f01885861a18995",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/alternatives/tasks/tests_set_priority.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5865ae276981f661a7e1f5ceededa3ce94a04cfa1eaef584e739fa465791677b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/alternatives/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "680dff9a6433cbeb4ff95620592e73d53b323a4205d09e030ba29a479c347587",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/alternatives/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/alternatives/vars/Suse-42.3.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e2b52f4afd41f1c28b2c48fff66b165191525fb9ebaa825b3e104c98457d540a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/alternatives/vars/Debian.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6d955d8a80b9d85aab9779d3598143d9a97f02d3987637307bfa69cdb599f844",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/alternatives/vars/default.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "061cd989ba91f9bcaeb0c970b22d2aa9e2cf13a07d1e03f9074ddbe9a874e0db",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_evs_disk",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_evs_disk/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_evs_disk/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8a7c56a9216836204c331cf7e01b948c3c727d2b2d6c71f2710a2f24f40f1b48",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_evs_disk/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_network_vpc",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_network_vpc/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_network_vpc/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "59d99408e20689c125506dac2a76977aa2f73c8cf9f8fa5df4d218744e0be7b8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_network_vpc/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/deploy_helper",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/deploy_helper/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/deploy_helper/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "79c0291994b4bd12e85c97dcf9d3b43a8f7e8d25e27a285ef36b737ea44032a5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/deploy_helper/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/deploy_helper/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dbce11dc51ead2d8cd98e7cd7edda44554a696dc2aa59b16a4f59573c27d2c07",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/deploy_helper/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "811322fad85866ac31f9501f8da0c7072809065397d53c06712447946b746b6b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/templates/supervisord.conf",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "97edbc9b202ce1e28ee21ee23ed919f2161ed30fdb1de57c23c2e2e299189ad6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/files/sendProcessStdin.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bb8184512251663d37e1c79ba14c7d962dbac0cf77a1af593456823c223e293c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/tasks/uninstall_pip.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7760bbf840a5a5b5273e5b518ff7d4305ebcbea60e3de38af307bf2bf798e663",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/tasks/install_Suse.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8193ffa8c321692d3f855b32deb703157078f27b7e661207713fff4e3f10322d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/tasks/uninstall_FreeBSD.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7760bbf840a5a5b5273e5b518ff7d4305ebcbea60e3de38af307bf2bf798e663",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/tasks/stop_supervisord.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d0149e985fcc6f1ce01c6924a687cfc156426d3a460fecda1aa3c7480044e078",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f7538f196e8c2454ef078c4f9bad1e9edd8d0620f4245cabf9a3abc625ee0b95",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/tasks/install_FreeBSD.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8193ffa8c321692d3f855b32deb703157078f27b7e661207713fff4e3f10322d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/tasks/test_start.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2c3670d28a9df9819c54c4b0224ccb68619bb1f2776f3fa7f7058b09237e6ee3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/tasks/uninstall_Linux.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "151565b1b6437c5330d7b1619da7dd7ed96393e5366d7eed6f6bb023ec0d7b90",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/tasks/uninstall_Darwin.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7760bbf840a5a5b5273e5b518ff7d4305ebcbea60e3de38af307bf2bf798e663",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/tasks/uninstall_Suse.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7760bbf840a5a5b5273e5b518ff7d4305ebcbea60e3de38af307bf2bf798e663",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/tasks/uninstall_RedHat.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7760bbf840a5a5b5273e5b518ff7d4305ebcbea60e3de38af307bf2bf798e663",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/tasks/test_stop.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "faa13f1f8d9159088fe715246176d79c32c1cddf9a0df81a3c0a7478b948a141",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/tasks/install_pip.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8193ffa8c321692d3f855b32deb703157078f27b7e661207713fff4e3f10322d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/tasks/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "71b4b479a99894786d7cde1d873795001d32db57f3005644696ea968db5fbd73",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/tasks/start_supervisord.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d95a47f330845cfe456b085c58aa8441a9d4f6908820eedfa86bcd6661ef7e01",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/tasks/install_Darwin.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8193ffa8c321692d3f855b32deb703157078f27b7e661207713fff4e3f10322d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/tasks/install_Linux.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e7c90b9aaa3f24047e0f90b9041524106f6fa5fb3fbc776bab667d4e3bc094e0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/tasks/install_RedHat.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8193ffa8c321692d3f855b32deb703157078f27b7e661207713fff4e3f10322d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ae816995bad20b4c09762e88d447ddf2793e1df8f6d61c0fdc03a7432a06e75a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b268603706f3146929caf02b9766fd864cb33d10c1be9d581ef762640ad0dc26",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/vars/defaults.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9a0893de74514778586ad0c0240c024b26d16c0c8c4c7eec89fe89d7a475b752",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/vars/Debian.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4b73003c59b63fbb82a349cc0693c5a352b9a232ff520bbde67f0a76b947c909",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/homebrew",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/homebrew/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/homebrew/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "629cde9e1e8d5d35b283b677211b5116f691bbe96f3e38a833ad3a52199e9be2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/homebrew/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9aa01df43b8368ff8ff2f27edfd594993bdcdacf76eb59889807346b97422b0e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_membership",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_membership/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_membership/tasks/postgresql_membership_initial.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3bf999eaca031d48e1f02c951c913b339c20992f5fed64196b8c95f7f3d8f221",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_membership/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d5aca162654e8110b7883ff6ce7517273d7b49688a05d8331b0d39f5321b4d8a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_membership/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_membership/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_membership/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4f6ef28b33adbfc834e79e3d58688d036aaee0ea96c6acdcae1b0aaaecaa6e15",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_membership/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_membership/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b96d9358354b72ca27b4079b748fc140086aa57b553209aa83887e15ee219420",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/odbc",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/odbc/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/odbc/tasks/no_pyodbc.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "67e03bf567e68aff49728425969ce1179bf82dc5e4ee75bccb88bcfb03e9de81",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/odbc/tasks/negative_tests.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f655299c1f15bd9f4cfb04cce676fd6aa13f0b0052c18a17c4fe0f2de52b18bf",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/odbc/tasks/install_pyodbc.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c35a5a838c05ae876ac633ac75a308b3a94f5d34a5ba7e0bed62edbb547e59a0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/odbc/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bdbb848f49f0ce1fd26c6f1704403e3e9ea525832b5ea6694abfd709da7cb6a2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/odbc/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/odbc/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "79ae9977dedb8dfcdffe5c8a71f55413156aa43719f33bd85fcf87c3ff741f5d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/odbc/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7d3670fcf44107e0637dd3ba6cacc778244eadadc4cc233aaa6bbd8be133284b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/odbc/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/odbc/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d822d4fc8170c37c3acdbda750b1336d22d858ed5dd19a71ba1cfe787cb9bbe1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_eip",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_eip/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_eip/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c589f75e30b6ace37673c390892a1e36966830b871a0dd8d83acb2263c1cf8b5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_eip/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_txt_record",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_txt_record/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_txt_record/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b677c331f6c1587bbae41812432803c0747e857cc7b4723251c2709cbc896839",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_txt_record/tasks/nios_txt_record_idempotence.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "820067ea1674925f6037a7cd858f6666e28fa666039afbc98084c3f8ad769f00",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_txt_record/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_txt_record/meta/main.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9b2c007ef16bd19c23982f22d955c1cebe881731f8e06615c9a8fb2985604cc4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_txt_record/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b2aba930fc2918934f7b9bdff17f9f99ab65167a6e9e41a0c9f45fffb31eaef0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_txt_record/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_txt_record/defaults/main.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3cb79c3d80cffc120afea8cc796848a327b790dbe8b6702bd51c1e2a5d82d906",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/zypper",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/zypper/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/zypper/templates/duplicate.spec.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c5401528520b9f3ee1e8ebba24e66ad649c2e95130f184508d023b82be001c7b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/zypper/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/zypper/files/empty.spec",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "77f59f1c05484da8dd181c8158c7ac48b5540a9a308c5f3872c52960c6317450",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/zypper/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/zypper/tasks/zypper.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "50d01b0f20eaec7bd7c775cf991a3e08e5d73c6a573312a848f42c951855b93c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/zypper/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "978cdbad8d0ab5434c81bf5ebdaa7b66b1b99388a742abc871dacf11709311c5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/zypper/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/zypper/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dbce11dc51ead2d8cd98e7cd7edda44554a696dc2aa59b16a4f59573c27d2c07",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/zypper/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d502a4aa4e2eb90aa8ca5b3574abb191b74f24f1f4d15266457c616538367c26",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone_domain",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone_domain/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone_domain/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bf5ffdd7f49c858218d24c2b92b4b0cad7c308e2b91c8aa7f9aa26c42728b525",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone_domain/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone_domain/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cb3f91d54eee30e53e35b2b99905f70f169ed549fd78909d3dac2defc9ed8d3b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone_domain/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone_domain/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone_domain/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2a37de1a0afb6552501549e1f955115730ca1ed0e60cf18f07b89704b749ae96",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_volume_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_volume_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_volume_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "31d586af76d9684e993af8b2df0f14dfab96be4b0b9c293c42644aa348f9b284",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_volume_info/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_volume_info/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ef2f7bd17ab2ac7019d8677e50835d412400e2f7a47a5dd2efe790be2ede49e1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_volume_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "533f830a4261fdd75c9f43c2956bb80651caa761ed92fb8376f6623303b19025",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_etcd3",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_etcd3/test_lookup_etcd3.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2871df47541b86d0adb6a4444eb01df5ab1124c1dae75c3ec7d8d0513ea093ac",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_etcd3/dependencies.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f30e183aa0eb77ea5830ce9cc7166cc93b874663a4247caa67bff8144641490c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_etcd3/runme.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0ef51b03fc4c317f790f7717b8134a950749cef70f871d5efe3ab2762e0ed15f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_etcd3/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_etcd3/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4ecde298d2c5c606796c2d9d6786ac9dc0c81f99f59a501b45fcd037ea36e13d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_etcd3/tasks/tests.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a7e80348f1c6c2dde46f749d14d6098983cd5586973b52fddb99c4ff0494cc5b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_etcd3/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_etcd3/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ae816995bad20b4c09762e88d447ddf2793e1df8f6d61c0fdc03a7432a06e75a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_etcd3/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d8415b9a44e521d868b65251bb6810a29a0d3cd513751441e932fd84bf3b461b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_etcd3/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_etcd3/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "60c4ec43079a8d934d7f8c21cf902cbddf88d6d43432f45acf00a06804209ff5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_lang",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_lang/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_lang/tasks/postgresql_lang_add_owner_param.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "725d64739e31b82e7102e348baffdd50693713723c434fffc17c5d4a9d174868",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_lang/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b75ee9a6bf02c5ff2f9e629a35e588e5297d1bca6463f5fc69a06aa27735d96f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_lang/tasks/postgresql_lang_initial.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e19f7046dc136bb74a25b4fc5bd45ae7c7dd48be648dd28fd12ffccd54ae1e83",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_lang/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_lang/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_lang/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4f6ef28b33adbfc834e79e3d58688d036aaee0ea96c6acdcae1b0aaaecaa6e15",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_lang/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_lang/vars/default.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_lang/vars/CentOS-8.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a5b45ee4b79b491c7c057d1c4c940df1ef7fa8e7fa6e1d006cbb1f839eeca40d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_lang/vars/CentOS-7.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a2395b707eb60229acffb97605104820a7f00a23d83d63d90e353929e97fb4e9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_set",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_set/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_set/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bc23a0de364583fd6a1ada6193ec234132fb5c4c3f8ea32312854dabca928253",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_set/tasks/postgresql_set_initial.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "640ba15edd71248f5eeaf15414f883efd44f409cf1bc717a6994d49955f3b0f2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_set/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_set/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_set/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4f6ef28b33adbfc834e79e3d58688d036aaee0ea96c6acdcae1b0aaaecaa6e15",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_volume",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_volume/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_volume/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4d32809876f7c309d60ed16725e245db8efa6034c980a101373a13b9beeee13a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_volume/tasks/run-test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d67bb6a11ed135170fd45e3bdeabd04bf7eeb3c1e10f6830b65b82e16ff0820f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_volume/tasks/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_volume/tasks/tests/basic.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8f9980eb3289036f29aaa85cdfc51b711ab19903149fb98eeecdbe72ef603a53",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_volume/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_volume/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ef2f7bd17ab2ac7019d8677e50835d412400e2f7a47a5dd2efe790be2ede49e1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_volume/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fc2660c638db75d33c27be35b4d6186af2061df629d01d9d047e7a7020448a44",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_user_obj_stat_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_user_obj_stat_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_user_obj_stat_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "72441f6b0cc63e71b547b18314bc8b45bb8a792ba466d75c8a4b2c63c7bdecc2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_user_obj_stat_info/tasks/postgresql_user_obj_stat_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cb9e09507ad5d6513450009d5745cf6af17ebae39a58b53bad34607387170752",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_user_obj_stat_info/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_user_obj_stat_info/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_user_obj_stat_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ba958a260f54fe32b1b068d9ecedbfd384b609faf8af0d281bd980074071b1b8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_user_obj_stat_info/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_user_obj_stat_info/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bd425d7bbca57edbcc6883e032b1d00ecdc9b2cc1d75b0be4fda3e488fed7053",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_dns_reload",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_dns_reload/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_dns_reload/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "04d09b3332b47f6b7c88e028e61eb609d9619f2348f72e06822082cd472113df",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_dns_reload/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_dns_reload/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cb3f91d54eee30e53e35b2b99905f70f169ed549fd78909d3dac2defc9ed8d3b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_dns_reload/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_secret",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_secret/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_secret/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c3395c3c99e19af68880f2e803328611cf85ea5da46e680cadfce588f6c47410",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_secret/tasks/test_secrets.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b56c6357e1011252a6710cd4aceaa217d4fb6caed8209ce332cd9515f086affc",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_secret/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_secret/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ef2f7bd17ab2ac7019d8677e50835d412400e2f7a47a5dd2efe790be2ede49e1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_secret/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7a50be5d8af58b5765d4bcb61a594c13237d2b250d8d96f23b11358e23a17a48",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_copy",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_copy/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_copy/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cb8afe7f5d60c2d26691849facab97a0cabdf2113dff7963c9dcf8ea399d0354",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_copy/tasks/postgresql_copy_initial.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9208884beaa8b6ff04285e748e50ef0402f1af9f9875929dd2eacea9c389112e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_copy/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_copy/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_copy/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4f6ef28b33adbfc834e79e3d58688d036aaee0ea96c6acdcae1b0aaaecaa6e15",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/dpkg_divert",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/dpkg_divert/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/dpkg_divert/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "942f62d745fd5f95f3b5ba8c7b2c7eec4b6bbfbead87f342f75c5bff11680fc3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/dpkg_divert/tasks/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/dpkg_divert/tasks/tests/01-basic.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "995e695b188db7961d7f3de6fe4270fcacedeadbd3665a90361a984d818188a4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/dpkg_divert/tasks/tests/02-rename.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5f287a9a4d91cb6207959a2d7c577633a894efc6c74b65972e2eb0d3012792b7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/dpkg_divert/tasks/prepare.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4a492d008b23484bbe7c6cf2e4af199e619cee6e3f1682e150593d8dc9b8c8f2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/dpkg_divert/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "863aab7a2dfdd57225d9380d54e67865361ed0d4acdebfc241857a585a473855",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/consul",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/consul/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/consul/templates/consul_config.hcl.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b11bdcd7f655df577a39c5668b76f4d9287f71a1ad28edc59ff9a040b7f5c04c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/consul/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/consul/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6ef1a99341f56c86bfd21ae22326331c386ebb404d3a63405663d085c2a40aa1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/consul/tasks/consul_session.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "14570000e1037fd8ecce033eebcd0f26cdf05f5bdda3a22173ceae2bd01ab098",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/consul/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/consul/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "485aac70051232a57159d0d734a3ef95088ce20dd3911b7f6f604eb9ea56357c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/consul/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a40969a414a8a84d59746ad2ec3a1b2b697443e715086c01925cc8a163b7aa1a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_security_group",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_security_group/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_security_group/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a812db5681f6477d26fd7955c0288e14da67cb1f5b151d3658e1b51324b9434d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_security_group/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7ba305d97432a37d4912f291b3f4fa4f76ab544dc9f2753910c35935dc3a3617",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_security_group/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_security_group/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8253d2c996e5fb60ecf54fcd9c13f8a15df898dd60f95c41aa2638bb34e0dfb4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_private_ip",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_private_ip/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_private_ip/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0764cead829b07be802779072c06ed56a1121222b6eb0329a9845b6611f69729",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_private_ip/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/jboss",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/jboss/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/jboss/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f186d67ed2a7b23c533cdae8cea758cb8ffa2dbd5858b64ea4be861d1dfd922a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/jboss/tasks/jboss.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8873ae18f2441425cbe73266fcbfe9ff8580a81e5e3c98245f736b92c7e3b79e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/jboss/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/jboss/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "653439bf464124f10cbf0e09beba4e37fdbb99fc3ac82ffbdb48d8c5a6f23874",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/jboss/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4eb67a6465d730c165121c76bcb6825d72c1894f6ba3a3c797d6a11b8b4e687c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sensu_client",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sensu_client/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sensu_client/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb7ca5e8206e2e10de97e975dc8f5de3cd43ebe27acb5eea3dba31132db7a10f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sensu_client/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c742157c2d638e509d901a06509289c0a19da50f514e2f059abb93d9f492d88f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_tablespace",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_tablespace/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_tablespace/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "188470559381756e939df9d964d444ff47e7df613c77b6cc2653627bab65df69",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_tablespace/tasks/postgresql_tablespace_initial.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c42d4a477852bb0307062e6aa65d150e9fd5e84c643e74c458b576b2b1b72844",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_tablespace/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_tablespace/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_tablespace/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4f6ef28b33adbfc834e79e3d58688d036aaee0ea96c6acdcae1b0aaaecaa6e15",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_tablespace/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_tablespace/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "589d9dd6e690018d918be7f002c310d40aa4632f100d32008dbd7db685d47ecb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/locale_gen",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/locale_gen/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/locale_gen/tasks/locale_gen.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7ee978f272037425567d6e2051c8ec5a0f263d25a81bfb4ced417aeef52a4147",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/locale_gen/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ee234de7e912e0b7b6aa49643af769675600152350ae20862abe8d38d62f5976",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/locale_gen/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/locale_gen/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dbce11dc51ead2d8cd98e7cd7edda44554a696dc2aa59b16a4f59573c27d2c07",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/locale_gen/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c6fe5ba544a3a858f7921ead134971c6b094f597106f6c621ea21ab4608ba5f0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/prepare_tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/prepare_tests/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/prepare_tests/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_tls",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_tls/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_tls/files/ca_certificate.pem",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "47ddc514d030d2dd28b98eb257b690b8aa94abc7b657b43caf6e32e2e5a6bf9d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_tls/files/client_key.pem",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1ffc8420355a69fecd60242feb89bfef5517292aa9129ea79e99bb36ffd80dc6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_tls/files/server_certificate.pem",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a93a860161059bf8b6d065d2b01a5218a7beefdb075fa704e0139d4f96bdb61c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_tls/files/server_key.pem",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0bb0b33983d37d5b6404c0feb969e80d0787331f774d2b8024570133d65851f6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_tls/files/ca_key.pem",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0157029faae2207eaec99b67360db8ca46fe6964eb98165a0ca4ac56cbed7ebb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_tls/files/client_certificate.pem",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1c88ee01e59fe19f497b74f0fb15a6d705bbac6df554d16f2f80fc25d2723bad",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_tls/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_tls/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3b7623cd0bfcfa8b836f0eed7e40c6546f781ea549220f409320fd3517590694",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_redis_replication",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_redis_replication/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_redis_replication/tasks/setup_redis_cluster.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d43893f30ddf98f652ecc25abe31227412572f5823f6b312cc58d097d1c3c6ab",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_redis_replication/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6f934056b68d386290a41d982dfb5aad12dd510863a4c1f122dbf4a106b4d762",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_redis_replication/handlers",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_redis_replication/handlers/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "db0f537faf57947dfabba25e47d51bafde1ee8ef42beddc233c7c413fbff9413",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_redis_replication/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_redis_replication/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "15ff3cf7a29d0176c9bcdb45a232c93911be60919ff46fbb156fdef642d27a66",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_redis_replication/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_redis_replication/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fe7ab01431b46e78a3f9c798601da7967e73df31f89f61179d54039df3df2152",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hg",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hg/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hg/tasks/uninstall.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b94a8d10092fc2781587cbd2ae5bffa1909832af5780852b03be4124fd393baf",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hg/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9bcb9f54c987dff4eb2cf6334ab977cc8c6e0d93083ed06d1e9e684375a95ee5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hg/tasks/install.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "084fe24287e02f0091df7cb1e00a4ab15a8ef021fc39e3e52dada5a990815991",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hg/tasks/run-tests.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7e38820ca8dbb7718ae28726d3a8cae512ca2645f9b610cb18909a519e568070",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hg/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hg/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7177bfd97afb15fbaebbf2cafcb0db3e2b4728c2dbd0c7bf77eda8404c3464a9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hg/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7a0d5b9fbb9f7be73ffd1f589ddf8b702d470b7539576afc31855cad91860a08",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_security_group",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_security_group/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_security_group/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "352b9b8af62e3522451c64e8383dade902fc52c1257cfb47ee0c8b15e09426b9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_security_group/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_route",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_route/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_route/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "572d2cc3d5bd12cd01a8667cdd7f55a2b67b94c24faf62a14c0324e03c7a29d8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_route/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/prepare_nuage_tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/prepare_nuage_tests/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/prepare_nuage_tests/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b08c2318ec3bb630e636c73c78b78064327e2a9f37739cce97c77e62068ac30f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/apache2_module",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/apache2_module/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/apache2_module/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ba3e1376d39fa4bc8b88e7656ec1e1aa761beb67a1b6226c340190115d7ad704",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/apache2_module/tasks/actualtest.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ad5278f90b7ab68b315897b894a9907050db319038f99001e520c3d7da055c8b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/apache2_module/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/apache2_module/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dbce11dc51ead2d8cd98e7cd7edda44554a696dc2aa59b16a4f59573c27d2c07",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/apache2_module/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b6e6b3eab89eec157e047b733c9e9c8b2ae7ec87e514ef9057018fee6fca9ba2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_runner",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_runner/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_runner/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c4b0f7fb12d1c5a1dd43495ae9bb60911ef077c599d667af2406b4bfe305e4cc",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_runner/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c8d60d4380e3eb4babd9d5866b3c3c7cc11cc441302eef5398edf8b12d2cf16a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_runner/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_runner/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2c7e14b41d8d4ada634180cd2ce6e5ac801c719b1742762fa79cc9287215e020",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_a_record",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_a_record/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_a_record/tasks/nios_a_record_idempotence.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ce6edea9daaa81eaeabe5c9565437f59744da7eefe0834b66a35fdba9408a3c5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_a_record/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b8562a0c4855409677a0d075e9d1c4b3a828bdea5c5d03bb83b5d22fea04fcfd",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_a_record/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_a_record/meta/main.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6641fa60f8ba09c7ce762c2ef3514012995ac5302b4864a10e6aa0f1c83d512",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_a_record/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b2aba930fc2918934f7b9bdff17f9f99ab65167a6e9e41a0c9f45fffb31eaef0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_a_record/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_a_record/defaults/main.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "665288590cd4226da42511817f09ccdfc92df36c9621583cd2c82919af295c5a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sensu_handler",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sensu_handler/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sensu_handler/tasks/transport.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "64757ff83593f669dfc25bc41c1abb935ecb8587841be41f2dffb01296f76250",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sensu_handler/tasks/set.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "84cd589d9a596950c2360a26668980de7174d7bcbff08df6039ec310c578f5ef",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sensu_handler/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "89b86a747d9095c1bceea2748aece92c504e4409ce53236c9898a64a3774a100",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sensu_handler/tasks/udp.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6e280697b30f3fbcd859f3f561d34cb51cff5acb3eccbfa8ba9b1598a032e860",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sensu_handler/tasks/pipe.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5b067919a3bee60aea22d1fbca8cfb57b99a8862d272c38f976a903ed8316d9b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sensu_handler/tasks/tcp.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63e32525bd29b6c499bd01a0a3804c267f6c71de066c86a1fe6c796d59ee0c75",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sensu_handler/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c742157c2d638e509d901a06509289c0a19da50f514e2f059abb93d9f492d88f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_privs",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_privs/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_privs/tasks/postgresql_privs_session_role.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ec0bfa34cdff051289a8824b5efaed38d2a3e5e1c3810a2f3f155b6f72f3a551",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_privs/tasks/pg_authid_not_readable.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3b9c4a77bc5ca43db99fdce7e5ae3edf5cb72fdce4f328e11d3cef0399be907f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_privs/tasks/postgresql_privs_initial.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3581e99cafd5936362840e483d966ac78f69c4969dbd55609d18e114574fd4d0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_privs/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "52d9ea758f3abea032d42028fe55c7373ac250cc5285a23e22f49229a14831f9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_privs/tasks/postgresql_privs_general.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0b3669f59f5e07f827479495a55c7cfd401b682e5067aeec46e0b9f380776ed5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_privs/tasks/test_target_role.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7b6544ff9e5356dbde038ea100d1d6e25eaa25ae588222dbb8d94e649256b013",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_privs/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_privs/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_privs/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4f6ef28b33adbfc834e79e3d58688d036aaee0ea96c6acdcae1b0aaaecaa6e15",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_privs/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_privs/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "68036cf7cd11887e6421c5863abd5ab5b4592fe4d70f5199dd284d82c581c798",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_swarm",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_swarm/playbooks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_swarm/playbooks/swarm_cleanup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "048d09306680a238ebe8fb6c65cc4d9a8ed198be06e6bd8bb355728316481255",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_swarm/playbooks/test_inventory_2.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "837e035480fdac759eb72245a58dbace266ffbf52cc8cb11ff42b40bf2a51122",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_swarm/playbooks/swarm_setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "76368a9cc15d0ab424b2be527d9cac55302281555825e7721b85df2166027b18",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_swarm/playbooks/test_inventory_1.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eb2b94163ae09c00bfd545b7284abf64e938f7cc2cfd5bcfb38988bb45cdc7db",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_swarm/runme.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "51490300a5521e905d783638d4bdce403be57c74c8622d5f20d78df5c4e70a07",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_swarm/inventory_1.docker_swarm.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4b639e9d66247b78d1c4e43e987008deb00936eb6367056f31b3ae7547d4e928",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_swarm/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_swarm/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ef2f7bd17ab2ac7019d8677e50835d412400e2f7a47a5dd2efe790be2ede49e1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_swarm/inventory_2.docker_swarm.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f5d88b9057b0472b5c5bd7822f265a30231cf5c24fd2be869fb6ca57e5712542",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_swarm/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "00109c7a021e0056a9e63ed174e7ebfc79c583ea8dd46a1477778b15d28b7a3d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/flatpak",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/flatpak/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/flatpak/tasks/setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e72f60345f074a9af0cf68b25eb491a52b6dcc0f5f946565d0ba1f4c51f11421",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/flatpak/tasks/check_mode.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aefea58f6ce91e8e5802b4d6d53f09d4b9655789dec22fb394d327913435b108",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/flatpak/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1d247abc2c40d94b0c015fea95572b171ce2460002962eee95137c7b61f61206",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/flatpak/tasks/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f237cf45ef191c2b0371c613462a33364609c1588c096665bbe68df78c94c585",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/flatpak/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/flatpak/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dbce11dc51ead2d8cd98e7cd7edda44554a696dc2aa59b16a4f59573c27d2c07",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/flatpak/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1c8b8c4765bbbe7b995178ea46a18ed48bcd0dc635dfea5dab5bf15635439619",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_ping",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_ping/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_ping/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "115c090f1029bac19af8970417d5e50b212a935c55a2d23059175fdb308f3b92",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_ping/tasks/postgresql_ping_initial.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "31a47d8be93cb3ebd67498a9016ced3d259b1d1bffcbfec8ab84bf03c677bcee",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_ping/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_ping/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_ping/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4f6ef28b33adbfc834e79e3d58688d036aaee0ea96c6acdcae1b0aaaecaa6e15",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_ping/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_ping/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ee4c4c179b3e7fdde23d8cf3a4855188e229f2a4ba2828bb3dd8b0a6a1365aea",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection/test_connection.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3297fe2040e5b0c523fd6f14bc0c56a886980c2a1b241b93bcce847958528861",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection/test.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f32dbff55de60ace66e2555586b94abd0f74f6bbcc008eb8d1c25dbfcc464a3e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e084a3683ef795d1cdbf5e9b253f2ca1f783ae0d0d6e47e419acbbc4fc80bbfa",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_cron",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_cron/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_cron/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2832635b2731a5ae2a7cbbc6c19ef83bed716c6ab04bdfc930b613c61d55f988",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_cron/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_cron/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ae816995bad20b4c09762e88d447ddf2793e1df8f6d61c0fdc03a7432a06e75a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_cron/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_cron/vars/fedora.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1f904e1f682ddb4e1dac223c46baaa1b22f6d0b1801aa6a3ff54d283f7b570dd",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_cron/vars/redhat.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c19debd2669548a152b9b2da7e584f86bb5d5e51ffe6612adff775056cbc876e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_cron/vars/default.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_cron/vars/suse.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bb743826acea65ecb3cec1af4c16692aaaf6a80bc42407f45a4cb219dd3e21b8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_cron/vars/freebsd.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6dc1f19ad13663f68075ebfc933bdbef4532c527a440b1a82ecad85442dffb05",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_cron/vars/debian.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aef7e744c306a83637e9395ff2f7aa375b2337fb8bf8b7656597f585ea469f11",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_cron/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_cron/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4a7a029ca849c93b2891e0e35afb1ae117b815ff0c6696e4fa7d239b5a37bd47",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_network_view",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_network_view/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_network_view/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "58be4e1108742d837a7e2a1dd6e8e309ccc89c403195a3bc4a15977fde55cc27",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_network_view/tasks/nios_network_view_idempotence.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0988d75d8debe3ad04f656d77c22cff648214fff669323be2ae1d1707dfd06f1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_network_view/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_network_view/meta/main.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9b2c007ef16bd19c23982f22d955c1cebe881731f8e06615c9a8fb2985604cc4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_network_view/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b2aba930fc2918934f7b9bdff17f9f99ab65167a6e9e41a0c9f45fffb31eaef0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_network_view/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nios_network_view/defaults/main.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3cb79c3d80cffc120afea8cc796848a327b790dbe8b6702bd51c1e2a5d82d906",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_image_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_image_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_image_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7c3985e1f44c3c44321b6c0a2d578a12d898521f5c81f591b5669b7a022721d3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_image_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7ba305d97432a37d4912f291b3f4fa4f76ab544dc9f2753910c35935dc3a3617",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_user",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_user/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_user/tasks/test_no_password_change.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc4c15de06b2d6a7367156243b5f7a40bea5863eeb098e44cf7ddc3b304e0125",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_user/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b2d881033346cf5e84f4a84dbdcbdd6539fca4a20663af9aa958f7befe2a07a7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_user/tasks/test_password.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c49f0cdaf8a111ae6d575cffaf696a863eb1288bc7df778d48871da06a4bba19",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_user/tasks/postgresql_user_initial.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f4342b6e307d91be7a98212fe29adbaa34d6ac12905a1e1f4347eea6c74173d8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_user/tasks/postgresql_user_general.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7ab6c81a10d4fad3dc540e804f657478b4f6b8b940549742bf944216ed51bd07",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_user/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_user/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_user/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4f6ef28b33adbfc834e79e3d58688d036aaee0ea96c6acdcae1b0aaaecaa6e15",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_user/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_user/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b14eed97d15235e2056971743de23709e18c00b91c358fe1e79002c98ece9d60",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_sequence",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_sequence/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_sequence/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7e2b8a8ad0b65cf738ea7fdcb6b18f528a6bcfffa1981f91838fc284ac4ddb6f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_sequence/tasks/postgresql_sequence_initial.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0763f1ec93dac58e3dbe26963e7ff9f4df97e5692302d31e19ee7edffb58384e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_sequence/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_sequence/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_sequence/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4f6ef28b33adbfc834e79e3d58688d036aaee0ea96c6acdcae1b0aaaecaa6e15",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_sequence/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_sequence/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d437a0ee3b271c8934e68d14684931a56a398d633c6b549e74ee3a9f17f9bc46",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/synchronize-buildah",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/synchronize-buildah/inventory",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "936557da70edfe4a3a40a2e4785a340f12848f07254b1a971a579ce373386bf3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/synchronize-buildah/runme.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f1d79462f9030176f246960f6ff49b8eab3d4436b45951792f6bd1d8011a34c6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/synchronize-buildah/roles",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/synchronize-buildah/roles/test_buildah_synchronize",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/synchronize-buildah/roles/test_buildah_synchronize/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/synchronize-buildah/roles/test_buildah_synchronize/files/normal_file.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "21be85167d69a5ab442921a763f4f353e0d1534a4bd6caf9cbde17e627ff9162",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/synchronize-buildah/roles/test_buildah_synchronize/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/synchronize-buildah/roles/test_buildah_synchronize/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9368161dcd8aed4e4cf85ea4eaa8569cdbb795884987f2749b7142db857a338a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/synchronize-buildah/test_synchronize_buildah.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b044d604e39396b11d7a7eaaae7897689170eb8d648f5d8e8f5c0a4baa1b6348",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/synchronize-buildah/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "89d9e542f0ed328791d171a7e39018ecf0a39d2408fa67ffad475fa8ac233b61",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_chroot",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_chroot/runme.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ccdab97fd6569dbe495460cfec0b901006efa79ec4c6d066a98250901b0c9d0e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_chroot/test_connection.inventory",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5eb84ac30158da1476995439d5c07afbaa95553857727aaf5d68f734f706607b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_chroot/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1d4961f0b8634e5a5e3f194d624d8ff66252900892b32fc46c12db712aa1eb43",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container/files/env-file",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "50a89c391ed8404416ef6ced670c291e4b9e6ec5f509fbf57dc2b0385912b166",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1fb911d95c5f2e8071534edcca10bca148d74fcb05ca0f307ffbd60612d9b5f3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container/tasks/run-test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d67bb6a11ed135170fd45e3bdeabd04bf7eeb3c1e10f6830b65b82e16ff0820f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container/tasks/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container/tasks/tests/image-ids.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a9d5939abbcd79ca3491484b030b0a8f5d744b2f700aa6903c16d443727f7384",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container/tasks/tests/network.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fbe53e0349db894edebdeb1cc364d65d5d2d1fae3b0ac31b7ae6c56d6d1cec15",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container/tasks/tests/mounts-volumes.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dfda3813acfaa7d6f3f1d6ad69cfd2a0cb4bc8eeb923fc81f813d9b8d32421ee",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container/tasks/tests/comparisons.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb4c255a5eee97f4c75c33248a2e66f5d9399b2739ad87ffc39f9a1b7b826de9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container/tasks/tests/compatibility.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a49c8747c60ea755405456d1e6483402853a9fb34d0d46151d5d951ef8a3fddc",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container/tasks/tests/options.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "64c1aa12c75e60131911442b431ce4d0c9cc6be02c70fb8523dfa35af5c81e8f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container/tasks/tests/ports.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d8d7d001854b717cd6b257d21099b63697d020e91a7af1ded48ff2968efb94de",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container/tasks/tests/regression-45700-dont-parse-on-absent.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d24a02b3c81b7c347a224f7e5ad0e701c25b721710aabc9fa9c66a70787ae369",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container/tasks/tests/start-stop.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6b5539326cf9097cd6b3e0486cebac31ee1dcf086689c14095201dec9e3c5d16",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ef2f7bd17ab2ac7019d8677e50835d412400e2f7a47a5dd2efe790be2ede49e1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "25a59c0620e4a33b27f3804cc0bf6afeee28a1d41b65c60576ff5ca5e8533110",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container/filter_plugins",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container/filter_plugins/ipaddr_tools.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "21353bbfef517ae4f8194ab30a9963d63c7ef328567ac507ee347ae2a6fa4a0a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_hook",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_hook/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_hook/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0c4931f013923286bfbebcf960b7e40393eebe4f174bf06dcac98844b38e69f6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_hook/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c8d60d4380e3eb4babd9d5866b3c3c7cc11cc441302eef5398edf8b12d2cf16a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_hook/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_hook/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8ed33d526fae3cdff057b6022f77a7c4d32b7498112efaa0cb7641e69bec96e0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_registry",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_registry/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_registry/files/nginx.conf",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8f7eaed3c6f5778062768d3cc33452679378888e5aa5fbad1914a8aeb6c4acbd",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_registry/files/nginx.htpasswd",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cade308eee36a0197cd4949387d8646aae59fe1b66b34242cafb2bbfdeef6706",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_registry/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_registry/tasks/setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cd1c2f77957da0e4a3da3175d30f018fcea93581e72b1d9f1d3e3c54ada63a90",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_registry/tasks/setup-frontend.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c8f833cbb1ea2e575d7708e5b4535beb8e4a66519e49e475d3c367d730f297d7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_registry/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c033292d39cd16d8eb02e73de0b935cc05a719866fb111268b870a3c05df11f0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_registry/handlers",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_registry/handlers/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eeb58604285e5dc380dff4451d7705f17c8bc7c09958be560d20ac738885e60",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_registry/handlers/cleanup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f2217b169f755cc0a531d1fa8b30aa61aa5b978709ff039f8d4ac404a91cd7e6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_registry/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_registry/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "97c3c36f2bd05e2ffd694efc2a84dcc02b2f1912bcdd48ead759c1044212091a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_registry/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fe80079c35cf640ed58583d338090127a92e0c920eb2e58c1d692fa6dcf66a77",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_registry/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_registry/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3c0c4fe950c7c1a4bccd60f47ad339c0083f5d0a5c0d87fa6325d0fe93a132a0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/prepare_nios_tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/prepare_nios_tests/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/prepare_nios_tests/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_pg_hba",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_pg_hba/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_pg_hba/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "447bebf5eaa66b04d77ddc3640990e7816dbaf207422402037a5869a4694399c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_pg_hba/tasks/postgresql_pg_hba_initial.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bd5c23f758ddbf09bb7959fb3aefcf2fae8bfb1e3fdf3ee21b74a96776fa8608",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_pg_hba/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_pg_hba/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_pg_hba/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4f6ef28b33adbfc834e79e3d58688d036aaee0ea96c6acdcae1b0aaaecaa6e15",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_pg_hba/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_pg_hba/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "92cf983b9149e1ac59ed2b39a814b5c1c9204f949884922cbd265ce0eeec4747",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/python_requirements_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/python_requirements_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/python_requirements_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "23ae85f258625c2fb17f6655ab0125d532307ffda964e6dc25221c69f391ea6b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/python_requirements_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e5a8866bb35edfbfde89676bc1698a7dd27c89530da249de619169892e7482d1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_deploy_key",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_deploy_key/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_deploy_key/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4ae5564005dbcc461b05d506fe1f2d1b8e21208e53892cf9dca96dfccfe5ba4b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_deploy_key/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c8d60d4380e3eb4babd9d5866b3c3c7cc11cc441302eef5398edf8b12d2cf16a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_deploy_key/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_deploy_key/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bdca29ef497b21e9bfbb51f911df8c1cc13a3010965f5349d4cc358b1688aff1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mas",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mas/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mas/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1133fa36703e51b978dee3be6f6e7e291ea0e744208c39ae9088b5ddaac49d6b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mas/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "95b0a7b5f17cd3772fc4b68faeb81e6cb6b459013f4b5c91a2812d06afc31e2f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ce76d01b6fead8139dc4676535e795f4010017248275fba1a0ae9a89ecf34e92",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm/tasks/cleanup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6aecdf9b8af5eb8aec7209c2004b2b5dc0578e0d47a0d14d97ae61bc2034c922",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm/tasks/run-test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm/tasks/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm/tasks/tests/options.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "397aca2a0b963a65c5d0aa341e543e60be6748882d30ee9d717031ece533def9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm/tasks/tests/remote-addr-pool.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b26da73f25849e4c4a7f74060c0348e68227caeb3c29a6d3d70237844e48f998",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm/tasks/tests/options-ca.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a429ebec6b0982a5f5948ab40ddfc3b7043dc2ea4aa34e17c036a07ea68ed452",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm/tasks/tests/basic.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "47495bac1e1bd14f4e1c832bc527113acc68707a4b13d85c54fa9b584f799898",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "867e4077b2e661f834372ab4dc4cdc63693c448d2aa5971fa762474d5cedcbe1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6857193e44c906dd7cf121c8759cbcde77eba3469dc00b9408cbb91272cb9957",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/aix_devices",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/aix_devices/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/aix_devices/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5926739106235917ed4672c00a9b356ff7ef3016b826d8d0976c65c5b705288b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/aix_devices/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2fb2fe1de7acac9fe00bbe9918b6ec663623abf8938099a8f7b41505d703db55",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sefcontext",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sefcontext/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sefcontext/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3e917715fd6de57a163d1c2f41bea7b96e23e2ad34496f175fa069f1264988d7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sefcontext/tasks/sefcontext.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8b153e2c6e76b42e11ce0dc3efc209a845402c6cf3d1b17fd9934e8a1aa2088c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sefcontext/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sefcontext/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ae816995bad20b4c09762e88d447ddf2793e1df8f6d61c0fdc03a7432a06e75a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sefcontext/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "467b9bc1e410a98e565e4903966195b8b9a9d8c76e1f88bff6b1724369d244fa",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/timezone",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/timezone/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/timezone/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5d46974178392bb576f893a3fef735d5128b0eb0bd1e1f6d09d99162c29e167e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/timezone/tasks/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "73ea1b70fd2b5411eca231acea9212ac2e2a0a3eb2ca93618638bd88108bfb4f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/timezone/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "de4b021a7ffae564d5e878e8a94a4121b367cb3b51a9b0dfe2b08943cdafc492",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_posix",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_posix/test.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ccdab97fd6569dbe495460cfec0b901006efa79ec4c6d066a98250901b0c9d0e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_posix/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ba6bc4b7b7f06e33b61092629dbd2f094b2d814d5cb051650b7494031fba6bea",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_subscription",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_subscription/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_subscription/tasks/postgresql_subscription_initial.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b3870dfe0e07b4439e25117ae0b424887347c1cd677f608522d6d7a99384642a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_subscription/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eca673fddaf151a108873b2567fc9e40fb19ec24f6559355a602e983fdcf5495",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_subscription/tasks/setup_publication.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b08433d5f3e4e564e74dcf0a3fe4221379ab317705845907d68d5203ab1704c2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_subscription/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_subscription/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ab541e45bbbeb211496e76434bd09715e9a541449c267f449d4625b044465286",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_subscription/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4704f4c6f03a6f6955c330852cd60d4e5c037464531482ee1b4627675f7ad001",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_subscription/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_subscription/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a1c340db087979bebe0424643ee450456e3306e9bbc5e82d42cc34857023e03e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/wakeonlan",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/wakeonlan/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/wakeonlan/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0b3c431835de84d4f4f33d43d8e3a9ce9fabe19ff24b0fc617876c11b7e97208",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/wakeonlan/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ffe208f20c1cb038da9750e438e2377f03d31145867413d919e6a025c15d270b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_owner",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_owner/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_owner/tasks/postgresql_owner_initial.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a46fea48ad48b4722d9a103098fdb57ba23dab047a74d6db00a70b53141847e3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_owner/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "be8c3305d4986b588578932b0b29cf10d6c85909adf2a31afeaa8fa4c9d9d946",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_owner/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_owner/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc91f9aaa105d7ebdf8f3ecb4801b8e815c91b8af6dbf673c2a4167399c0228e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_owner/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4f6ef28b33adbfc834e79e3d58688d036aaee0ea96c6acdcae1b0aaaecaa6e15",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_owner/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/postgresql_owner/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5c1aa84798ed23a23135fbf53a881b42e2c1edc79f8edc7509829125fefa1a05",
+ "format": 1
+ },
+ {
+ "name": "tests/utils",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/freebsd.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2a140a1cea2fbf3b291009694bcfcf0f2877e92ec01c7e929e787f5b4cdd6d92",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/linux.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "07aa5e07a0b732a671bf9fdadfe073dd310b81857b897328ce2fa829e2c76315",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/rhel.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2a140a1cea2fbf3b291009694bcfcf0f2877e92ec01c7e929e787f5b4cdd6d92",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/timing.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f3f3cc03a997cdba719b0542fe668fc612451841cbe840ab36865f30aa54a1bd",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/check_matrix.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "79d1b0fd384970d8ae90530ea930a16d85061898ffbb1581ecc307d93585e719",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/shippable.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7664df7a138af0bab1c3e577bdecbfb9a5bee185b5972bd51af1d55a37eb53d1",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/aix.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2a140a1cea2fbf3b291009694bcfcf0f2877e92ec01c7e929e787f5b4cdd6d92",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/osx.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2a140a1cea2fbf3b291009694bcfcf0f2877e92ec01c7e929e787f5b4cdd6d92",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/cloud.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dd953f7e779b9962e76492c389142e03174e84a8115f53e56628e2af9e66b818",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/sanity.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "23edc478fb7f1329a4e89bdac3921c41ba49e5fba0d20dd1a87ff1db08c6a1fb",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/remote.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2a140a1cea2fbf3b291009694bcfcf0f2877e92ec01c7e929e787f5b4cdd6d92",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/macos.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2a140a1cea2fbf3b291009694bcfcf0f2877e92ec01c7e929e787f5b4cdd6d92",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/units.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "86a21fd597c471a106be13c9dd7265248e5fc7941298b422a9e1d6e93259107c",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/timing.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ebb7d3553349747ad41d80899ed353e13cf32fcbecbb6566cf36e9d2bc33703e",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/constraints.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "50acc7d1f44039c5966268c50195dc9336e47fbd069d1e57a4ecf9914c636fe8",
+ "format": 1
+ },
+ {
+ "name": "tests/.gitignore",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b5726d3ec9335a09c124469eca039523847a6b0f08a083efaefd002b83326600",
+ "format": 1
+ },
+ {
+ "name": "tests/requirements.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c853a4e57325cfe4dbfc0f12cd4ee893772fd0e39fcced19ece245afb5391cae",
+ "format": 1
+ },
+ {
+ "name": "CHANGELOG.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7de9a11129062b770d468e255a7a47489b51740f9ca2f8c2bd55877e0f35699c",
+ "format": 1
+ },
+ {
+ "name": "scripts",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/linode.ini",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b4222efb9b5f3be642d94263eb65010f0497b793d012b910e9bf2674a2fa7ccd",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/vagrant.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "382abde23b9b3305483f8ca5d2acc42952b9c931dbe7bcc46756dd9cea19ed38",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/nsot.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "06a77d808dd9bc4875aff82ae88a247cf28ea3d53251d506b6b3c0a56526b1c6",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/abiquo.ini",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ca119e39ca2c6dda3b42e088b23cf79647b729f2829d990cca77ac06f7a22114",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/rax.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "18bf62fc25dbaa9c85116f3741e424ad2ba0563e1f67c49078f36f8bbf4c4030",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/lxd.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c85b87f23ffe8924b2e88af2e2a840d4222d4a82f99400abaf0bdbb13b9b0c71",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/docker.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b5b3768d87c711a4140ab474d4e6855c421ff17fad6d38e32cf19e79787df489",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/ssh_config.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "82977fb307606d41f7c9dd4ce7c9dc16855f23bbbda36109d22bdf4161c9dbf7",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/fleet.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "56939bd0e7800840d1d95dff71c8dbccc0256941586710f4bee21c366737dda4",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/cloudforms.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "698442b29ff955360e58feafc8e23ae3ac9566f3672307137d1420886ca038fd",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/docker.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2605e30aa0259a6e872c2744c9f400a71614fbd8cf47bb17e10387288432d2e8",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/apstra_aos.ini",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7b5935c3874fb9bb60127db5b5154565dca907575057328584ebd90dacd63089",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/azure_rm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "92e7791ebbb48592e8feca8974d99b17df30d9dc818fcf27d71fde2a53438a5c",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/ovirt.ini",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9f229394077a7541073a043c976129732f3b6e7428bb3b6cebaec894c2a02557",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/ovirt.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "da038e1a57c4e7f9ec702c206b46beff3f571a02140353a2c80cb39fd49a8f34",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/foreman.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "846dcb93bca6135296c7df0d4062cb89c252192aabe75ce91301e9440556310f",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/proxmox.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "019cc163e4f83a0fbf72a1c6824267ad6cef8d05e0682874c8cc521f3fb50a9d",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/lxd.ini",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "434e4c638900e9a1114a7fcd1318a657b42e677937fba1ef474c75fd261c7711",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/apstra_aos.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "558978941b2c5cfbc3c926f14407c6356e0a527936395ddaf32ae3b8ce254c21",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/foreman.ini",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8b72ea32ce2c91a0146f1ef5cb32f8b42ffd999ac68cf69596dbe4eb869ca8dc",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/rudder.ini",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bcdbc3aaefddb278dfd2fbef09bd377c953529cc692423263e9599a4f93e084d",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/nagios_ndo.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "93312140ada88f1efe60ee95ae95e36c6765d3811d2e746564e2202e30df6f7e",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/infoblox.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dc460a539e2332d408938f68ea9932c429069af29db678c9ef5e6dfd4dda5904",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/lxc_inventory.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f3130f6a3e7deccd4677516af992a04cd7d9af2bf739baf0e2220f3c9485fdac",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/zone.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b672ea8f8b1c26052474615ab303bebb834c23f1d3c1898c4e3bd9b0dfb78928",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/consul_io.ini",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb73d931d08773b58bbef8b9f86b3ebc632067a1b3fdc6525f9115e5dca7af7b",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/openshift.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4edb8f90548946334fef67b9a0cd5836d9bc3062b5363ae4f693a5f331339e1a",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/rax.ini",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f65260dd3a511bd496cd6f2865fa543874aebb7f2f2cccb675e247e8eac5c963",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/scaleway.ini",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5e186f087e489fca158dbd84778c74ed5486a08bb6c35ad53f815448f6e03d87",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/nsot.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5e54946dbb73cb4a424e863873bacfee957e65d13fcff22aed45516c281854e4",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/ovirt4.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "86e566b98176a7a7721e510beaa61c979db22b63c0ca89530b01b085afbf9c8d",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/cobbler.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3ac13c6cfb9faef67c381a46b2b786dd827c893838c44883285dcf05f2207790",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/jail.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "99c53d0e47314f099d6f33bd818072e1ee1334ddbfad8491b965d2e247b7edff",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/rhv.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "86e566b98176a7a7721e510beaa61c979db22b63c0ca89530b01b085afbf9c8d",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/collins.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "52e980d1b3fa81c3699bc3f8ce1220346443871237e5fbebc9edea13fda13b86",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/gce.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e2fd208cca342e3e91d6711bd781bc2658715bf4201fa7d190be8d5ad35dc423",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/mdt_dynamic_inventory.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4dda8620698ba8aae684c506aa09c802ea7b5a4b32003b781b1bbbdadd4e1192",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/gce.ini",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "925cfa8728fa00c71d851d13203fc6c9bfe87f96c0528029a0e909a602a0004c",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/spacewalk.ini",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2c70bf339d00f085034523e00d47d0d30c36ab1f1b434e3e3395779acb44b83f",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/stacki.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "de6213eaa7fa7293bdddc5496a23df53301538782f950c412a0a40895a134e0e",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/packet_net.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "08f8dc09028ba8e3f8ae7a6edb38bcafc163a2bef1bfbb97a0d968f9b5400a97",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/softlayer.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "015f4fab69148edbc626374d3d67f5dbdfcaac1ef707757e61fafca541737d5b",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/rackhd.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fba6405414f5d8c5e4ccc5f9c72a911f4921db273fed154205c18965de7d35b0",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/nagios_livestatus.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f101aeff068778d8ada3be0ed8b6f9f5da881728afdebb08346df959d24fde19",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/azure_rm.ini",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0d1385d111f490ba1f9530b5444452786833c1be2981609e5950c5898de2d018",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/linode.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1198dd4d5f7b5f4d81cf8b2412b547a8e9017b2425c6e9e50c92f6fd8a831518",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/packet_net.ini",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "22891e213687970cd121556cff67b3e7890ce30d9858eba0d6d7684b48f13a2a",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/rudder.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "583183f7f7d51d0fb756f1650c1c689e21ccc81ed38390d85a82e7a0bd5ded5d",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/brook.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "40b8080b7793bfc92084bb99e46ca87f3a497f3bce497b35115d09da3d33d8b8",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/brook.ini",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2b6cc89b89c016918a880603fec7c7a4ec63f1672cdddd979fbd60448dc7623b",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/mdt.ini",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "82319574806f676d19e07d4174a52332247eb6a413542346dfe5581339ad0d19",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/landscape.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "54c89ccf29b7252253e6741307e19780567a1988c5d2b04b57d0d0590d7cd2ef",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/abiquo.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b424f99a1b58e1e463056b9e2ad11cedc8def94a7d15e8d4b67a29c80460da1c",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/nagios_ndo.ini",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dba93d3ae81ce415c38dfd5a4b27a4f85a6e91673166cfd9f1b819e3b9695fd0",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/cobbler.ini",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c67351dbac410a433de1c921a3f49c39f529c651ada481e4466b9d0209912bb8",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/cloudforms.ini",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d716784e4f9ab41bfd762126f3840bb8911be994b5b2ab1d0bd50330c9cca25c",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/apache-libcloud.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cbc8aa7daea0cf5c38ee845fc289f5ae825838d155c226fa632f8087d91099d4",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/collins.ini",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "81dc06112a66fb48d60beb1fd41a899d1aad3c5417d82fa57a866cebfd28e749",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/consul_io.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb4a6f7362671c4d8ac4c3d1139ad1e265108c8d62fb3b7863e76085c2d71d0d",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/infoblox.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c18f40e04e62757be581903f3cfd3cea5c98a2b4e9e284f4947ed1ff1769bb6d",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/openvz.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f42369a685559521c1c32d68940b907aa6b0270a9035b19a5aa3d68d507f17b1",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/libcloud.ini",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "44daa3ed1ea23f11795c01d26cbee29b2b9a31f9ca80ade1904da211af99c00a",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/spacewalk.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a74f017a945213e272d1e2605f36f3b97ac9fa1932c33951f674c1335f389e8f",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/serf.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d74e7d96fafa90b91d944a11743468e287f01addf26ced8033123988ae76aa9b",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/stacki.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "62990f769b191b5f3be68930a6305c1d703f31a4447b97a6e9a36eda50247e4a",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/scaleway.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4aac84390542dfe7dafc728d961b6eec8f2651b7f0abad58747ffc4f2c02738b",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/nagios_livestatus.ini",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a48028120de79c9d440b523b98296e6d04ad438958d0827a28ba23972494162a",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/freeipa.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "50cba39cf08c9e25edcdc31e222285f2c8103f05e0fb95923f3f2e4304769b7b",
+ "format": 1
+ },
+ {
+ "name": "scripts/inventory/vbox.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9dab73474c67511191c36360bdeeb7d2070e9542efb4feac603c7a9dd6b626df",
+ "format": 1
+ },
+ {
+ "name": "scripts/vault",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "scripts/vault/vault-keyring.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ec2cbb0d28148e62cf9b5590999e5db46fc6012eb5b32ed0e066961a90befa20",
+ "format": 1
+ },
+ {
+ "name": "scripts/vault/azure_vault.ini",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "411907a903364e868e0f8d381205667e628aa05d1b1b9d2b906e2a9e5e8815dd",
+ "format": 1
+ },
+ {
+ "name": "scripts/vault/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "scripts/vault/azure_vault.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e81139d4d6c6ecdd093bbfda89c9407e74f47c360c1343041774c51644c610a6",
+ "format": 1
+ },
+ {
+ "name": "scripts/vault/vault-keyring-client.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c90596190b2e328e4a6d13d0bb99399a7b862ceebf25e3dd46b8314cc29a9e6c",
+ "format": 1
+ },
+ {
+ "name": "changelogs",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "changelogs/config.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a31925d687298c2d9568cd8a6083c015024ba3560d3275fd9ef0b1e9e8f6b378",
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/.keep",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "changelogs/.gitignore",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "919ef00776e7d2ff349950ac4b806132aa9faf006e214d5285de54533e443b33",
+ "format": 1
+ },
+ {
+ "name": "changelogs/changelog.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b402a1efc75de4554557b846eb4c6d9816fd1d0ef754cce207202519628ba216",
+ "format": 1
+ },
+ {
+ "name": "shippable.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5af9f2331a6593d6e98e20f7a8bc4c5f00c9a9bc188a074a59981203ef9ec5d6",
+ "format": 1
+ },
+ {
+ "name": "COPYING",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0ae0485a5bd37a63e63603596417e4eb0e653334fa6c7f932ca3a0e85d4af227",
+ "format": 1
+ }
+ ],
+ "format": 1
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/MANIFEST.json b/collections-debian-merged/ansible_collections/community/general/MANIFEST.json
new file mode 100644
index 00000000..9c850c32
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/MANIFEST.json
@@ -0,0 +1,34 @@
+{
+ "collection_info": {
+ "namespace": "community",
+ "name": "general",
+ "version": "1.3.6",
+ "authors": [
+ "Ansible (https://github.com/ansible)"
+ ],
+ "readme": "README.md",
+ "tags": [
+ "community"
+ ],
+ "description": null,
+ "license": [],
+ "license_file": "COPYING",
+ "dependencies": {
+ "ansible.netcommon": ">=1.0.0",
+ "community.kubernetes": ">=1.0.0",
+ "google.cloud": ">=1.0.0"
+ },
+ "repository": "https://github.com/ansible-collections/community.general",
+ "documentation": "https://docs.ansible.com/ansible/latest/collections/community/general/",
+ "homepage": "https://github.com/ansible-collections/community.general",
+ "issues": "https://github.com/ansible-collections/community.general/issues"
+ },
+ "file_manifest_file": {
+ "name": "FILES.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1cec7b9329b6e72c0bf6e42dd0773263522211ff7a128e6b0e74cee4ef246197",
+ "format": 1
+ },
+ "format": 1
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/README.md b/collections-debian-merged/ansible_collections/community/general/README.md
new file mode 100644
index 00000000..0e4bbfee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/README.md
@@ -0,0 +1,98 @@
+# Community General Collection
+
+[![Build Status](https://dev.azure.com/ansible/community.general/_apis/build/status/CI?branchName=stable-1)](https://dev.azure.com/ansible/community.general/_build?definitionId=31)
+[![Codecov](https://img.shields.io/codecov/c/github/ansible-collections/community.general)](https://codecov.io/gh/ansible-collections/community.general)
+
+This repo contains the `community.general` Ansible Collection. The collection includes many modules and plugins supported by Ansible community which are not part of more specialized community collections.
+
+You can find [documentation for this collection on the Ansible docs site](https://docs.ansible.com/ansible/latest/collections/community/general/).
+
+## Tested with Ansible
+
+Tested with the current Ansible 2.9 and 2.10 releases and the current development version of Ansible. Ansible versions before 2.9.10 are not supported.
+
+## External requirements
+
+Some modules and plugins require external libraries. Please check the requirements for each plugin or module you use in the documentation to find out which requirements are needed.
+
+## Included content
+
+Please check the included content on the [Ansible Galaxy page for this collection](https://galaxy.ansible.com/community/general) or the [documentation on the Ansible docs site](https://docs.ansible.com/ansible/latest/collections/community/general/).
+
+## Using this collection
+
+Before using the General community collection, you need to install the collection with the `ansible-galaxy` CLI:
+
+ ansible-galaxy collection install community.general
+
+You can also include it in a `requirements.yml` file and install it via `ansible-galaxy collection install -r requirements.yml` using the format:
+
+```yaml
+collections:
+- name: community.general
+```
+
+See [Ansible Using collections](https://docs.ansible.com/ansible/latest/user_guide/collections_using.html) for more details.
+
+## Contributing to this collection
+
+If you want to develop new content for this collection or improve what is already here, the easiest way to work on the collection is to clone it into one of the configured [`COLLECTIONS_PATH`](https://docs.ansible.com/ansible/latest/reference_appendices/config.html#collections-paths), and work on it there.
+
+For example, if you are working in the `~/dev` directory:
+
+```
+cd ~/dev
+git clone git@github.com:ansible-collections/community.general.git collections/ansible_collections/community/general
+export COLLECTIONS_PATH=$(pwd)/collections:$COLLECTIONS_PATH
+```
+
+You can find more information in the [developer guide for collections](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#contributing-to-collections), and in the [Ansible Community Guide](https://docs.ansible.com/ansible/latest/community/index.html).
+
+### Running tests
+
+See [here](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#testing-collections).
+
+### Communication
+
+We have a dedicated Working Group for Ansible development.
+
+You can find other people interested on the following Freenode IRC channels -
+- `#ansible` - For general use questions and support.
+- `#ansible-devel` - For discussions on developer topics and code related to features or bugs.
+- `#ansible-community` - For discussions on community topics and community meetings.
+
+For more information about communities, meetings and agendas see [Community Wiki](https://github.com/ansible/community/wiki/Community).
+
+For more information about [communication](https://docs.ansible.com/ansible/latest/community/communication.html)
+
+### Publishing New Version
+
+Basic instructions without release branches:
+
+1. Create `changelogs/fragments/<version>.yml` with `release_summary:` section (which must be a string, not a list).
+2. Run `antsibull-changelog release --collection-flatmap yes`
+3. Make sure `CHANGELOG.rst` and `changelogs/changelog.yaml` are added to git, and the deleted fragments have been removed.
+4. Tag the commit with `<version>`. Push changes and tag to the main repository.
+
+## Release notes
+
+See the [changelog](https://github.com/ansible-collections/community.general/blob/main/CHANGELOG.rst).
+
+## Roadmap
+
+See [this issue](https://github.com/ansible-collections/community.general/issues/582) for information on releasing, versioning and deprecation.
+
+In general, we plan to release a major version every six months, and minor versions every two months. Major versions can contain breaking changes, while minor versions only contain new features and bugfixes.
+
+## More information
+
+- [Ansible Collection overview](https://github.com/ansible-collections/overview)
+- [Ansible User guide](https://docs.ansible.com/ansible/latest/user_guide/index.html)
+- [Ansible Developer guide](https://docs.ansible.com/ansible/latest/dev_guide/index.html)
+- [Ansible Community code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html)
+
+## Licensing
+
+GNU General Public License v3.0 or later.
+
+See [COPYING](https://www.gnu.org/licenses/gpl-3.0.txt) to see the full text.
diff --git a/collections-debian-merged/ansible_collections/community/general/changelogs/.gitignore b/collections-debian-merged/ansible_collections/community/general/changelogs/.gitignore
new file mode 100644
index 00000000..6be6b533
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/changelogs/.gitignore
@@ -0,0 +1 @@
+/.plugin-cache.yaml
diff --git a/collections-debian-merged/ansible_collections/community/general/changelogs/changelog.yaml b/collections-debian-merged/ansible_collections/community/general/changelogs/changelog.yaml
new file mode 100644
index 00000000..ff11138f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/changelogs/changelog.yaml
@@ -0,0 +1,1952 @@
+ancestor: null
+releases:
+ 0.2.0:
+ changes:
+ breaking_changes:
+ - The environment variable for the auth context for the oc.py connection plugin
+ has been corrected (K8S_CONTEXT). It was using an initial lowercase k by
+ mistake. (https://github.com/ansible-collections/community.general/pull/377).
+ - bigpanda - the parameter ``message`` was renamed to ``deployment_message``
+ since ``message`` is used by Ansible Core engine internally.
+ - cisco_spark - the module option ``message`` was renamed to ``msg``, as ``message``
+ is used internally in Ansible Core engine (https://github.com/ansible/ansible/issues/39295)
+ - datadog - the parameter ``message`` was renamed to ``notification_message``
+ since ``message`` is used by Ansible Core engine internally.
+ - 'docker_container - no longer passes information on non-anonymous volumes
+ or binds as ``Volumes`` to the Docker daemon. This increases compatibility
+ with the ``docker`` CLI program. Note that if you specify ``volumes: strict``
+ in ``comparisons``, this could cause existing containers created with docker_container
+ from Ansible 2.9 or earlier to restart.'
+ - 'docker_container - support for port ranges was adjusted to be more compatible
+ to the ``docker`` command line utility: a one-port container range combined
+ with a multiple-port host range will no longer result in only the first host
+ port be used, but the whole range being passed to Docker so that a free port
+ in that range will be used.'
+ - hashi_vault lookup - now returns the latest version when using the KV v2 secrets
+ engine. Previously, it returned all versions of the secret which required
+ additional steps to extract and filter the desired version.
+ bugfixes:
+ - Convert MD5SUM to lowercase before comparison in maven_artifact module (https://github.com/ansible-collections/community.general/issues/186).
+ - Fix GitLab modules authentication by handling `python-gitlab` library version
+ >= 1.13.0 (https://github.com/ansible/ansible/issues/64770)
+ - Fix SSL protocol references in the ``mqtt`` module to prevent failures on
+ Python 2.6.
+ - Fix the ``xml`` module to use ``list(elem)`` instead of ``elem.getchildren()``
+ since it is being removed in Python 3.9
+ - Fix to return XML as a string even for python3 (https://github.com/ansible/ansible/pull/64032).
+ - Fixes the url handling in lxd_container module that url cannot be specified
+ in lxd environment created by snap.
+ - Fixes the url handling in lxd_profile module that url cannot be specified
+ in lxd environment created by snap.
+ - Redact GitLab Project variables which might include sensetive information
+ such as password, api_keys and other project related details.
+ - Run command in absent state in atomic_image module.
+ - While deleting gitlab user, name, email and password is no longer required
+ ini gitlab_user module (https://github.com/ansible/ansible/issues/61921).
+ - airbrake_deployment - Allow deploy notifications for Airbrake compatible v2
+ api (e.g. Errbit)
+ - apt_rpm - fix ``package`` type from ``str`` to ``list`` to fix invoking with
+ list of packages (https://github.com/ansible-collections/community.general/issues/143).
+ - archive - make module compatible with older Ansible versions (https://github.com/ansible-collections/community.general/pull/306).
+ - become - Fix various plugins that still used play_context to get the become
+ password instead of through the plugin - https://github.com/ansible/ansible/issues/62367
+ - cloudflare_dns - fix KeyError 'success' (https://github.com/ansible-collections/community.general/issues/236).
+ - cronvar - only run ``get_bin_path()`` once
+ - cronvar - use correct binary name (https://github.com/ansible/ansible/issues/63274)
+ - cronvar - use get_bin_path utility to locate the default crontab executable
+ instead of the hardcoded /usr/bin/crontab. (https://github.com/ansible/ansible/pull/59765)
+ - cyberarkpassword - fix invalid attribute access (https://github.com/ansible/ansible/issues/66268)
+ - datadog_monitor - Corrects ``_update_monitor`` to use ``notification_message``
+ insteade of deprecated ``message`` (https://github.com/ansible-collections/community.general/pull/389).
+ - datadog_monitor - added missing ``log alert`` type to ``type`` choices (https://github.com/ansible-collections/community.general/issues/251).
+ - dense callback - fix plugin access to its configuration variables and remove
+ a warning message (https://github.com/ansible/ansible/issues/64628).
+ - digital_ocean_droplet - Fix creation of DigitalOcean droplets using digital_ocean_droplet
+ module (https://github.com/ansible/ansible/pull/61655)
+ - docker connection plugin - do not prefix remote path if running on Windows
+ containers.
+ - docker_compose - fix issue where docker deprecation warning results in ansible
+ erroneously reporting a failure
+ - docker_container - fix idempotency for IP addresses for networks. The old
+ implementation checked the effective IP addresses assigned by the Docker daemon,
+ and not the specified ones. This causes idempotency issues for containers
+ which are not running, since they have no effective IP addresses assigned.
+ - docker_container - fix network idempotence comparison error.
+ - docker_container - improve error behavior when parsing port ranges fails.
+ - docker_container - make sure that when image is missing, check mode indicates
+ a change (image will be pulled).
+ - 'docker_container - passing ``test: [NONE]`` now actually disables the image''s
+ healthcheck, as documented.'
+ - docker_container - wait for removal of container if docker API returns early
+ (https://github.com/ansible/ansible/issues/65811).
+ - docker_image - fix validation of build options.
+ - docker_image - improve file handling when loading images from disk.
+ - docker_image - make sure that deprecated options also emit proper deprecation
+ warnings next to warnings which indicate how to replace them.
+ - docker_login - Use ``with`` statement when accessing files, to prevent that
+ invalid JSON output is produced.
+ - docker_login - correct broken fix for https://github.com/ansible/ansible/pull/60381
+ which crashes for Python 3.
+ - docker_login - fix error handling when ``username`` or ``password`` is not
+ specified when ``state`` is ``present``.
+ - docker_login - make sure that ``~/.docker/config.json`` is created with permissions
+ ``0600``.
+ - docker_machine - fallback to ip subcommand output if IPAddress is missing
+ (https://github.com/ansible-collections/community.general/issues/412).
+ - docker_network - fix idempotence comparison error.
+ - docker_network - fix idempotency for multiple IPAM configs of the same IP
+ version (https://github.com/ansible/ansible/issues/65815).
+ - docker_network - validate IPAM config subnet CIDR notation on module setup
+ and not during idempotence checking.
+ - docker_node_info - improve error handling when service inspection fails, for
+ example because node name being ambiguous (https://github.com/ansible/ansible/issues/63353,
+ PR https://github.com/ansible/ansible/pull/63418).
+ - docker_swarm_service - ``source`` must no longer be specified for ``tmpfs``
+ mounts.
+ - docker_swarm_service - fix task always reporting as changed when using ``healthcheck.start_period``.
+ - 'docker_swarm_service - passing ``test: [NONE]`` now actually disables the
+ image''s healthcheck, as documented.'
+ - firewalld - enable the firewalld module to function offline with firewalld
+ version 0.7.0 and newer (https://github.com/ansible/ansible/issues/63254)
+ - flatpak and flatpak_remote - fix command line construction to build commands
+ as lists instead of strings.
+ - gcp_storage_file lookup - die gracefully when the ``google.cloud`` collection
+ is not installed, or changed in an incompatible way.
+ - github_deploy_key - added support for pagination
+ - gitlab_user - Fix adding ssh key to new/changed user and adding group membership
+ for new/changed user
+ - hashi_vault - Fix KV v2 lookup to always return latest version
+ - hashi_vault - Handle equal sign in key=value (https://github.com/ansible/ansible/issues/55658).
+ - hashi_vault - error messages are now user friendly and don't contain the secret
+ name ( https://github.com/ansible-collections/community.general/issues/54
+ )
+ - hashi_vault - if used via ``with_hashi_vault`` and a list of n secrets to
+ retrieve, only the first one would be retrieved and returned n times.
+ - hashi_vault - when a non-token authentication method like ldap or userpass
+ failed, but a valid token was loaded anyway (via env or token file), the token
+ was used to attempt authentication, hiding the failure of the requested auth
+ method.
+ - homebrew - fix Homebrew module's some functions ignored check_mode option
+ (https://github.com/ansible/ansible/pull/65387).
+ - influxdb_user - Don't grant admin privilege in check mode
+ - ipa modules - fix error when IPA_HOST is empty and fallback on DNS (https://github.com/ansible-collections/community.general/pull/241)
+ - java_keystore - make module compatible with older Ansible versions (https://github.com/ansible-collections/community.general/pull/306).
+ - jira - printing full error message from jira server (https://github.com/ansible-collections/community.general/pull/22).
+ - jira - transition issue not working (https://github.com/ansible-collections/community.general/issues/109).
+ - linode inventory plugin - fix parsing of access_token (https://github.com/ansible/ansible/issues/66874)
+ - manageiq_provider - fix serialization error when running on python3 environment.
+ - maven_artifact - make module compatible with older Ansible versions (https://github.com/ansible-collections/community.general/pull/306).
+ - mysql - dont mask ``mysql_connect`` function errors from modules (https://github.com/ansible/ansible/issues/64560).
+ - mysql_db - fix Broken pipe error appearance when state is import and the target
+ file is compressed (https://github.com/ansible/ansible/issues/20196).
+ - mysql_db - fix bug in the ``db_import`` function introduced by https://github.com/ansible/ansible/pull/56721
+ (https://github.com/ansible/ansible/issues/65351).
+ - mysql_info - add parameter for __collect to get only what are wanted (https://github.com/ansible-collections/community.general/pull/136).
+ - mysql_replication - allow to pass empty values to parameters (https://github.com/ansible/ansible/issues/23976).
+ - mysql_user - Fix idempotence when long grant lists are used (https://github.com/ansible/ansible/issues/68044)
+ - mysql_user - Remove false positive ``no_log`` warning for ``update_password``
+ option
+ - mysql_user - add ``INVOKE LAMBDA`` privilege support (https://github.com/ansible-collections/community.general/issues/283).
+ - mysql_user - fix ``host_all`` arguments conversion string formatting error
+ (https://github.com/ansible/ansible/issues/29644).
+ - mysql_user - fix support privileges with underscore (https://github.com/ansible/ansible/issues/66974).
+ - mysql_user - fix the error No database selected (https://github.com/ansible/ansible/issues/68070).
+ - mysql_user - make sure current_pass_hash is a string before using it in comparison
+ (https://github.com/ansible/ansible/issues/60567).
+ - mysql_variable - fix the module doesn't support variables name with dot (https://github.com/ansible/ansible/issues/54239).
+ - nmcli - typecast parameters to string as required (https://github.com/ansible/ansible/issues/59095).
+ - nsupdate - Do not try fixing non-existing TXT values (https://github.com/ansible/ansible/issues/63364)
+ - nsupdate - Fix zone name lookup of internal/private zones (https://github.com/ansible/ansible/issues/62052)
+ - one_vm - improve file handling by using a context manager.
+ - ovirt - don't ignore ``instance_cpus`` parameter
+ - pacman - Fix pacman output parsing on localized environment. (https://github.com/ansible/ansible/issues/65237)
+ - 'pacman - fix module crash with ``IndexError: list index out of range`` (https://github.com/ansible/ansible/issues/63077)'
+ - pamd - Bugfix for attribute error when removing the first or last line
+ - parted - added 'undefined' align option to support parted versions < 2.1 (https://github.com/ansible-collections/community.general/pull/405).
+ - parted - consider current partition state even in check mode (https://github.com/ansible-collections/community.general/issues/183).
+ - passwordstore lookup - Honor equal sign in userpass
+ - pmrun plugin - The success_command string was no longer quoted. This caused
+ unusual use-cases like ``become_flags=su - root -c`` to fail.
+ - postgres - use query params with cursor.execute in module_utils.postgres.PgMembership
+ class (https://github.com/ansible/ansible/pull/65164).
+ - postgres.py - add a new keyword argument ``query_params`` (https://github.com/ansible/ansible/pull/64661).
+ - postgres_user - Remove false positive ``no_log`` warning for ``no_password_changes``
+ option
+ - postgresql_db - Removed exception for 'LibraryError' (https://github.com/ansible/ansible/issues/65223).
+ - postgresql_db - allow to pass users names which contain dots (https://github.com/ansible/ansible/issues/63204).
+ - postgresql_idx.py - use the ``query_params`` arg of exec_sql function (https://github.com/ansible/ansible/pull/64661).
+ - postgresql_lang - use query params with cursor.execute (https://github.com/ansible/ansible/pull/65093).
+ - postgresql_membership - make the ``groups`` and ``target_roles`` parameters
+ required (https://github.com/ansible/ansible/pull/67046).
+ - postgresql_membership - remove unused import of exec_sql function (https://github.com/ansible-collections/community.general/pull/178).
+ - postgresql_owner - use query_params with cursor object (https://github.com/ansible/ansible/pull/65310).
+ - postgresql_privs - fix sorting lists with None elements for python3 (https://github.com/ansible/ansible/issues/65761).
+ - postgresql_privs - sort results before comparing so that the values are compared
+ and not the result of ``.sort()`` (https://github.com/ansible/ansible/pull/65125)
+ - postgresql_privs.py - fix reports as changed behavior of module when using
+ ``type=default_privs`` (https://github.com/ansible/ansible/issues/64371).
+ - postgresql_publication - fix typo in module.warn method name (https://github.com/ansible/ansible/issues/64582).
+ - postgresql_publication - use query params arg with cursor object (https://github.com/ansible/ansible/issues/65404).
+ - postgresql_query - improve file handling by using a context manager.
+ - postgresql_query - the module doesn't support non-ASCII characters in SQL
+ files with Python3 (https://github.com/ansible/ansible/issues/65367).
+ - postgresql_schema - use query parameters with cursor object (https://github.com/ansible/ansible/pull/65679).
+ - postgresql_sequence - use query parameters with cursor object (https://github.com/ansible/ansible/pull/65787).
+ - postgresql_set - fix converting value to uppercase (https://github.com/ansible/ansible/issues/67377).
+ - postgresql_set - use query parameters with cursor object (https://github.com/ansible/ansible/pull/65791).
+ - postgresql_slot - make the ``name`` parameter required (https://github.com/ansible/ansible/pull/67046).
+ - postgresql_slot - use query parameters with cursor object (https://github.com/ansible/ansible/pull/65791).
+ - postgresql_subscription - fix typo in module.warn method name (https://github.com/ansible/ansible/pull/64583).
+ - postgresql_subscription - use query parameters with cursor object (https://github.com/ansible/ansible/pull/65791).
+ - postgresql_table - use query parameters with cursor object (https://github.com/ansible/ansible/pull/65862).
+ - postgresql_tablespace - make the ``tablespace`` parameter required (https://github.com/ansible/ansible/pull/67046).
+ - postgresql_tablespace - use query parameters with cursor object (https://github.com/ansible/ansible/pull/65862).
+ - postgresql_user - allow to pass user name which contains dots (https://github.com/ansible/ansible/issues/63204).
+ - postgresql_user - use query parameters with cursor object (https://github.com/ansible/ansible/pull/65862).
+ - proxmox - fix version detection of proxmox 6 and up (Fixes https://github.com/ansible/ansible/issues/59164)
+ - proxysql - fixed mysql dictcursor
+ - pulp_repo - the ``client_cert`` and ``client_key`` options were used for both
+ requests to the Pulp instance and for the repo to sync with, resulting in
+ errors when they were used. Use the new options ``feed_client_cert`` and ``feed_client_key``
+ for client certificates that should only be used for repo synchronisation,
+ and not for communication with the Pulp instance. (https://github.com/ansible/ansible/issues/59513)
+ - puppet - fix command line construction for check mode and ``manifest:``
+ - pure - fix incorrect user_string setting in module_utils file (https://github.com/ansible/ansible/pull/66914)
+ - redfish_command - fix EnableAccount if Enabled property is not present in
+ Account resource (https://github.com/ansible/ansible/issues/59822)
+ - redfish_command - fix error when deleting a disabled Redfish account (https://github.com/ansible/ansible/issues/64684)
+ - redfish_command - fix power ResetType mapping logic (https://github.com/ansible/ansible/issues/59804)
+ - redfish_config - fix support for boolean bios attrs (https://github.com/ansible/ansible/pull/68251)
+ - redfish_facts - fix KeyError exceptions in GetLogs (https://github.com/ansible/ansible/issues/59797)
+ - redhat_subscription - do not set the default quantity to ``1`` when no quantity
+ is provided (https://github.com/ansible/ansible/issues/66478)
+ - replace use of deprecated functions from ``ansible.module_utils.basic``.
+ - rshm_repository - reduce execution time when changed is False (https://github.com/ansible-collections/community.general/pull/458).
+ - runas - Fix the ``runas`` ``become_pass`` variable fallback from ``ansible_runas_runas``
+ to ``ansible_runas_pass``
+ - scaleway - Fix bug causing KeyError exception on JSON http requests. (https://github.com/ansible-collections/community.general/pull/444)
+ - 'scaleway: use jsonify unmarshaller only for application/json requests to
+ avoid breaking the multiline configuration with requests in text/plain (https://github.com/ansible/ansible/issues/65036)'
+ - scaleway_compute - fix transition handling that could cause errors when removing
+ a node (https://github.com/ansible-collections/community.general/pull/444).
+ - 'scaleway_compute(check_image_id): use get image instead loop on first page
+ of images results'
+ - sesu - make use of the prompt specified in the code
+ - slack - Fix ``thread_id`` data type
+ - slackpkg - fix matching some special cases in package names (https://github.com/ansible-collections/community.general/pull/505).
+ - slackpkg - fix name matching in package installation (https://github.com/ansible-collections/community.general/issues/450).
+ - spacewalk inventory - improve file handling by using a context manager.
+ - syslog_json callback - fix plugin exception when running (https://github.com/ansible-collections/community.general/issues/407).
+ - syslogger callback plugin - remove check mode support since it did nothing
+ anyway
+ - terraform - adding support for absolute paths additionally to the relative
+ path within project_path (https://github.com/ansible/ansible/issues/58578)
+ - terraform - reset out and err before plan creation (https://github.com/ansible/ansible/issues/64369)
+ - terraform module - fixes usage for providers not supporting workspaces
+ - yarn - Return correct values when running yarn in check mode (https://github.com/ansible-collections/community.general/pull/153).
+ - yarn - handle no version when installing module by name (https://github.com/ansible/ansible/issues/55097)
+ - zfs_delegate_admin - add missing choices diff/hold/release to the permissions
+ parameter (https://github.com/ansible-collections/community.general/pull/278)
+ deprecated_features:
+ - airbrake_deployment - Add deprecation notice for ``token`` parameter and v2
+ api deploys. This feature will be removed in community.general 3.0.0.
+ - clc_aa_policy - The ``wait`` option had no effect and will be removed in community.general
+ 3.0.0.
+ - clc_aa_policy - the ``wait`` parameter will be removed. It has always been
+ ignored by the module.
+ - docker_container - the ``trust_image_content`` option is now deprecated and
+ will be removed in community.general 3.0.0. It has never been used by the
+ module.
+ - docker_container - the ``trust_image_content`` option will be removed. It
+ has always been ignored by the module.
+ - docker_container - the default of ``container_default_behavior`` will change
+ from ``compatibility`` to ``no_defaults`` in community.general 3.0.0. Set
+ the option to an explicit value to avoid a deprecation warning.
+ - docker_container - the default value for ``network_mode`` will change in community.general
+ 3.0.0, provided at least one network is specified and ``networks_cli_compatible``
+ is ``true``. See porting guide, module documentation or deprecation warning
+ for more details.
+ - docker_stack - Return values ``out`` and ``err`` have been deprecated and
+ will be removed in community.general 3.0.0. Use ``stdout`` and ``stderr``
+ instead.
+ - docker_stack - the return values ``err`` and ``out`` have been deprecated.
+ Use ``stdout`` and ``stderr`` from now on instead.
+ - helm - Put ``helm`` module to deprecated. New implementation is available
+ in community.kubernetes collection.
+ - redfish_config - Deprecate ``bios_attribute_name`` and ``bios_attribute_value``
+ in favor of new `bios_attributes`` option.
+ - redfish_config - the ``bios_attribute_name`` and ``bios_attribute_value``
+ options will be removed. To maintain the existing behavior use the ``bios_attributes``
+ option instead.
+ - redfish_config and redfish_command - the behavior to select the first System,
+ Manager, or Chassis resource to modify when multiple are present will be removed.
+ Use the new ``resource_id`` option to specify target resource to modify.
+ - redfish_config, redfish_command - Behavior to modify the first System, Mananger,
+ or Chassis resource when multiple are present is deprecated. Use the new ``resource_id``
+ option to specify target resource to modify.
+ major_changes:
+ - docker_container - the ``network_mode`` option will be set by default to the
+ name of the first network in ``networks`` if at least one network is given
+ and ``networks_cli_compatible`` is ``true`` (will be default from community.general
+ 2.0.0 on). Set to an explicit value to avoid deprecation warnings if you specify
+ networks and set ``networks_cli_compatible`` to ``true``. The current default
+ (not specifying it) is equivalent to the value ``default``.
+ - docker_container - the module has a new option, ``container_default_behavior``,
+ whose default value will change from ``compatibility`` to ``no_defaults``.
+ Set to an explicit value to avoid deprecation warnings.
+ - gitlab_user - no longer requires ``name``, ``email`` and ``password`` arguments
+ when ``state=absent``.
+ minor_changes:
+ - A new filter ``to_time_unit`` with specializations ``to_milliseconds``, ``to_seconds``,
+ ``to_minutes``, ``to_hours``, ``to_days``, ``to_weeks``, ``to_months`` and
+ ``to_years`` has been added. For example ``'2d 4h' | community.general.to_hours``
+ evaluates to 52.
+ - Add a make option to the make module to be able to choose a specific make
+ executable
+ - Add information about changed packages in homebrew returned facts (https://github.com/ansible/ansible/issues/59376).
+ - Follow up changes in homebrew_cask (https://github.com/ansible/ansible/issues/34696).
+ - Moved OpenStack dynamic inventory script to Openstack Collection.
+ - Remove redundant encoding in json.load call in ipa module_utils (https://github.com/ansible/ansible/issues/66592).
+ - Updated documentation about netstat command requirement for listen_ports_facts
+ module (https://github.com/ansible/ansible/issues/68077).
+ - airbrake_deployment - Allow passing ``project_id`` and ``project_key`` for
+ v4 api deploy compatibility
+ - ali_instance - Add params ``unique_suffix``, ``tags``, ``purge_tags``, ``ram_role_name``,
+ ``spot_price_limit``, ``spot_strategy``, ``period_unit``, ``dry_run``, ``include_data_disks``
+ - ali_instance and ali_instance_info - the required package footmark needs a
+ version higher than 1.19.0
+ - ali_instance_info - Add params ``name_prefix``, ``filters``
+ - alicloud modules - Add authentication params to all modules
+ - alicloud modules - now only support Python 3.6, not support Python 2.x
+ - cisco_spark - the module has been renamed to ``cisco_webex`` (https://github.com/ansible-collections/community.general/pull/457).
+ - cloudflare_dns - Report unexpected failure with more detail (https://github.com/ansible-collections/community.general/pull/511).
+ - database - add support to unique indexes in postgresql_idx
+ - digital_ocean_droplet - add support for new vpc_uuid parameter
+ - docker connection plugin - run Powershell modules on Windows containers.
+ - docker_container - add ``cpus`` option (https://github.com/ansible/ansible/issues/34320).
+ - docker_container - add new ``container_default_behavior`` option (PR https://github.com/ansible/ansible/pull/63419).
+ - docker_container - allow to configure timeout when the module waits for a
+ container's removal.
+ - 'docker_container - only passes anonymous volumes to docker daemon as ``Volumes``.
+ This increases compatibility with the ``docker`` CLI program. Note that if
+ you specify ``volumes: strict`` in ``comparisons``, this could cause existing
+ containers created with docker_container from Ansible 2.9 or earlier to restart.'
+ - 'docker_container - support for port ranges was adjusted to be more compatible
+ to the ``docker`` command line utility: a one-port container range combined
+ with a multiple-port host range will no longer result in only the first host
+ port be used, but the whole range being passed to Docker so that a free port
+ in that range will be used.'
+ - docker_container.py - update a containers restart_policy without restarting
+ the container (https://github.com/ansible/ansible/issues/65993)
+ - docker_stack - Added ``stdout``, ``stderr``, and ``rc`` to return values.
+ - docker_swarm_service - Added support for ``init`` option.
+ - docker_swarm_service - Sort lists when checking for changes.
+ - firewalld - new feature, can now set ``target`` for a ``zone`` (https://github.com/ansible-collections/community.general/pull/526).
+ - flatpak and flatpak_remote - use ``module.run_command()`` instead of ``subprocess.Popen()``.
+ - gitlab_project_variable - implement masked and protected attributes
+ - gitlab_project_variable - implemented variable_type attribute.
+ - hashi_vault - AWS IAM auth method added. Accepts standard ansible AWS params
+ and only loads AWS libraries when needed.
+ - hashi_vault - INI and additional ENV sources made available for some new and
+ old options.
+ - hashi_vault - ``secret`` can now be an unnamed argument if it's specified
+ first in the term string (see examples).
+ - hashi_vault - ``token`` is now an explicit option (and the default) in the
+ choices for ``auth_method``. This matches previous behavior (``auth_method``
+ omitted resulted in token auth) but makes the value clearer and allows it
+ to be explicitly specified.
+ - hashi_vault - new option ``return_format`` added to control how secrets are
+ returned, including options for multiple secrets and returning raw values
+ with metadata.
+ - hashi_vault - previous (undocumented) behavior was to attempt to read token
+ from ``~/.vault-token`` if not specified. This is now controlled through ``token_path``
+ and ``token_file`` options (defaults will mimic previous behavior).
+ - hashi_vault - previously all options had to be supplied via key=value pairs
+ in the term string; now a mix of string and parameters can be specified (see
+ examples).
+ - hashi_vault - uses newer authentication calls in the HVAC library and falls
+ back to older ones with deprecation warnings.
+ - homebrew - Added environment variable to honor update_homebrew setting (https://github.com/ansible/ansible/issues/56650).
+ - homebrew - New option ``upgrade_options`` allows to pass flags to upgrade
+ - homebrew - ``install_options`` is now validated to be a list of strings.
+ - homebrew_tap - ``name`` is now validated to be a list of strings.
+ - idrac_redfish_config - Support for multiple manager attributes configuration
+ - java_keystore - add the private_key_passphrase parameter (https://github.com/ansible-collections/community.general/pull/276).
+ - jira - added search function with support for Jira JQL (https://github.com/ansible-collections/community.general/pull/22).
+ - jira - added update function which can update Jira Selects etc (https://github.com/ansible-collections/community.general/pull/22).
+ - lvg - add ``pvresize`` new parameter (https://github.com/ansible/ansible/issues/29139).
+ - mysql_db - add ``master_data`` parameter (https://github.com/ansible/ansible/pull/66048).
+ - mysql_db - add ``skip_lock_tables`` option (https://github.com/ansible/ansible/pull/66688).
+ - mysql_db - add the ``check_implicit_admin`` parameter (https://github.com/ansible/ansible/issues/24418).
+ - mysql_db - add the ``config_overrides_defaults`` parameter (https://github.com/ansible/ansible/issues/26919).
+ - mysql_db - add the ``dump_extra_args`` parameter (https://github.com/ansible/ansible/pull/67747).
+ - mysql_db - add the ``executed_commands`` returned value (https://github.com/ansible/ansible/pull/65498).
+ - mysql_db - add the ``force`` parameter (https://github.com/ansible/ansible/pull/65547).
+ - mysql_db - add the ``restrict_config_file`` parameter (https://github.com/ansible/ansible/issues/34488).
+ - mysql_db - add the ``unsafe_login_password`` parameter (https://github.com/ansible/ansible/issues/63955).
+ - mysql_db - add the ``use_shell`` parameter (https://github.com/ansible/ansible/issues/20196).
+ - mysql_info - add ``exclude_fields`` parameter (https://github.com/ansible/ansible/issues/63319).
+ - mysql_info - add ``global_status`` filter parameter option and return (https://github.com/ansible/ansible/pull/63189).
+ - mysql_info - add ``return_empty_dbs`` parameter to list empty databases (https://github.com/ansible/ansible/issues/65727).
+ - mysql_replication - add ``channel`` parameter (https://github.com/ansible/ansible/issues/29311).
+ - mysql_replication - add ``connection_name`` parameter (https://github.com/ansible/ansible/issues/46243).
+ - mysql_replication - add ``fail_on_error`` parameter (https://github.com/ansible/ansible/pull/66252).
+ - mysql_replication - add ``master_delay`` parameter (https://github.com/ansible/ansible/issues/51326).
+ - mysql_replication - add ``master_use_gtid`` parameter (https://github.com/ansible/ansible/pull/62648).
+ - mysql_replication - add ``queries`` return value (https://github.com/ansible/ansible/pull/63036).
+ - mysql_replication - add support of ``resetmaster`` choice to ``mode`` parameter
+ (https://github.com/ansible/ansible/issues/42870).
+ - mysql_user - ``priv`` parameter can be string or dictionary (https://github.com/ansible/ansible/issues/57533).
+ - mysql_user - add ``plugin_auth_string`` parameter (https://github.com/ansible/ansible/pull/44267).
+ - mysql_user - add ``plugin_hash_string`` parameter (https://github.com/ansible/ansible/pull/44267).
+ - mysql_user - add ``plugin`` parameter (https://github.com/ansible/ansible/pull/44267).
+ - mysql_user - add the resource_limits parameter (https://github.com/ansible-collections/community.general/issues/133).
+ - mysql_variables - add ``mode`` parameter (https://github.com/ansible/ansible/issues/60119).
+ - nagios module - a start parameter has been added, allowing the time a Nagios
+ outage starts to be set. It defaults to the current time if not provided,
+ preserving the previous behavior and ensuring compatibility with existing
+ playbooks.
+ - nsupdate - Use provided TSIG key to not only sign update queries but also
+ lookup queries
+ - open_iscsi - allow ``portal`` parameter to be a domain name by resolving the
+ portal ip address beforehand (https://github.com/ansible-collections/community.general/pull/461).
+ - packet_device - add ``tags`` parameter on device creation (https://github.com/ansible-collections/community.general/pull/418)
+ - 'pacman - Improve package state detection speed: Don''t query for full details
+ of a package.'
+ - parted - add the ``fs_type`` parameter (https://github.com/ansible-collections/community.general/issues/135).
+ - pear - added ``prompts`` parameter to allow users to specify expected prompt
+ that could hang Ansible execution (https://github.com/ansible-collections/community.general/pull/530).
+ - postgresql_copy - add the ``trust_input`` parameter (https://github.com/ansible-collections/community.general/pull/313).
+ - postgresql_db - add ``dump_extra_args`` parameter (https://github.com/ansible/ansible/pull/66717).
+ - postgresql_db - add support for .pgc file format for dump and restores.
+ - postgresql_db - add the ``executed_commands`` returned value (https://github.com/ansible/ansible/pull/65542).
+ - postgresql_db - add the ``trust_input`` parameter (https://github.com/ansible-collections/community.general/issues/106).
+ - postgresql_ext - add the ``trust_input`` parameter (https://github.com/ansible-collections/community.general/pull/282).
+ - postgresql_ext - refactor to simplify and remove dead code (https://github.com/ansible-collections/community.general/pull/291)
+ - postgresql_ext - use query parameters with cursor object (https://github.com/ansible/ansible/pull/64994).
+ - postgresql_idx - add the ``trust_input`` parameter (https://github.com/ansible-collections/community.general/pull/264).
+ - postgresql_idx - refactor to simplify code (https://github.com/ansible-collections/community.general/pull/291)
+ - postgresql_info - add collecting info about logical replication publications
+ in databases (https://github.com/ansible/ansible/pull/67614).
+ - postgresql_info - add collection info about replication subscriptions (https://github.com/ansible/ansible/pull/67464).
+ - postgresql_info - add the ``trust_input`` parameter (https://github.com/ansible-collections/community.general/pull/308).
+ - postgresql_lang - add ``owner`` parameter (https://github.com/ansible/ansible/pull/62999).
+ - postgresql_lang - add the ``trust_input`` parameter (https://github.com/ansible-collections/community.general/pull/272).
+ - postgresql_membership - add the ``trust_input`` parameter (https://github.com/ansible-collections/community.general/pull/158).
+ - postgresql_owner - add the ``trust_input`` parameter (https://github.com/ansible-collections/community.general/pull/198).
+ - postgresql_ping - add the ``session_role`` parameter (https://github.com/ansible-collections/community.general/pull/312).
+ - postgresql_ping - add the ``trust_input`` parameter (https://github.com/ansible-collections/community.general/pull/312).
+ - postgresql_privs - add support for TYPE as object types in postgresql_privs
+ module (https://github.com/ansible/ansible/issues/62432).
+ - postgresql_privs - add the ``trust_input`` parameter (https://github.com/ansible-collections/community.general/pull/177).
+ - postgresql_publication - add the ``session_role`` parameter (https://github.com/ansible-collections/community.general/pull/279).
+ - postgresql_publication - add the ``trust_input`` parameter (https://github.com/ansible-collections/community.general/pull/279).
+ - postgresql_query - add the ``encoding`` parameter (https://github.com/ansible/ansible/issues/65367).
+ - postgresql_query - add the ``trust_input`` parameter (https://github.com/ansible-collections/community.general/pull/294).
+ - postgresql_schema - add the ``trust_input`` parameter (https://github.com/ansible-collections/community.general/pull/259).
+ - postgresql_sequence - add the ``trust_input`` parameter (https://github.com/ansible-collections/community.general/pull/295).
+ - postgresql_set - add the ``trust_input`` parameter (https://github.com/ansible-collections/community.general/pull/302).
+ - postgresql_slot - add the ``trust_input`` parameter (https://github.com/ansible-collections/community.general/pull/298).
+ - postgresql_subscription - add the ``session_role`` parameter (https://github.com/ansible-collections/community.general/pull/280).
+ - postgresql_subscription - add the ``trust_input`` parameter (https://github.com/ansible-collections/community.general/pull/280).
+ - postgresql_table - add the ``trust_input`` parameter (https://github.com/ansible-collections/community.general/pull/307).
+ - postgresql_tablespace - add the ``trust_input`` parameter (https://github.com/ansible-collections/community.general/pull/240).
+ - postgresql_user - add scram-sha-256 support (https://github.com/ansible/ansible/issues/49878).
+ - postgresql_user - add the ``trust_input`` parameter (https://github.com/ansible-collections/community.general/pull/116).
+ - postgresql_user - add the comment parameter (https://github.com/ansible/ansible/pull/66711).
+ - postgresql_user_obj_stat_info - add the ``trust_input`` parameter (https://github.com/ansible-collections/community.general/pull/310).
+ - postgresql_user_obj_stat_info - refactor to simplify code (https://github.com/ansible-collections/community.general/pull/291)
+ - proxmox - add the ``description`` and ``hookscript`` parameter (https://github.com/ansible-collections/community.general/pull/245).
+ - redfish_command - Support for virtual media insert and eject commands (https://github.com/ansible-collections/community.general/issues/493)
+ - redfish_config - New ``bios_attributes`` option to allow setting multiple
+ BIOS attributes in one command.
+ - redfish_config, redfish_command - Add ``resource_id`` option to specify which
+ System, Manager, or Chassis resource to modify.
+ - redis - add TLS support to redis cache plugin (https://github.com/ansible-collections/community.general/pull/410).
+ - rhn_channel - Added ``validate_certs`` option (https://github.com/ansible/ansible/issues/68374).
+ - rundeck modules - added new options ``client_cert``, ``client_key``, ``force``,
+ ``force_basic_auth``, ``http_agent``, ``url_password``, ``url_username``,
+ ``use_proxy``, ``validate_certs`` to allow changing fetch_url parameters.
+ - slack - Add support for user/bot/application tokens (using Slack WebAPI)
+ - slack - Return ``thread_id`` with thread timestamp when user/bot/application
+ tokens are used
+ - syslogger - added new parameter ident to specify the name of application which
+ is sending the message to syslog (https://github.com/ansible-collections/community.general/issues/319).
+ - terraform - Adds option ``backend_config_files``. This can accept a list of
+ paths to multiple configuration files (https://github.com/ansible-collections/community.general/pull/394).
+ - terraform - Adds option ``variables_files`` for multiple var-files (https://github.com/ansible-collections/community.general/issues/224).
+ - ufw - accept ``interface_in`` and ``interface_out`` as parameters.
+ - zypper - Added ``allow_vendor_change`` and ``replacefiles`` zypper options
+ (https://github.com/ansible-collections/community.general/issues/381)
+ release_summary: 'This is the first proper release of the ``community.general``
+ collection on 2020-06-20.
+
+ The changelog describes all changes made to the modules and plugins included
+ in this
+
+ collection since Ansible 2.9.0.
+
+ '
+ removed_features:
+ - core - remove support for ``check_invalid_arguments`` in ``UTMModule``.
+ - pacman - Removed deprecated ``recurse`` option, use ``extra_args=--recursive``
+ instead
+ security_fixes:
+ - '**SECURITY** - CVE-2019-14904 - solaris_zone module accepts zone name and
+ performs actions related to that. However, there is no user input validation
+ done while performing actions. A malicious user could provide a crafted zone
+ name which allows executing commands into the server manipulating the module
+ behaviour. Adding user input validation as per Solaris Zone documentation
+ fixes this issue.'
+ - '**security issue** - Ansible: Splunk and Sumologic callback plugins leak
+ sensitive data in logs (CVE-2019-14864)'
+ - 'ldap_attr, ldap_entry - The ``params`` option has been removed in Ansible-2.10
+ as it circumvents Ansible''s option handling. Setting ``bind_pw`` with the
+ ``params`` option was disallowed in Ansible-2.7, 2.8, and 2.9 as it was insecure. For
+ information about this policy, see the discussion at: https://meetbot.fedoraproject.org/ansible-meeting/2017-09-28/ansible_dev_meeting.2017-09-28-15.00.log.html
+ This fixes CVE-2020-1746'
+ fragments:
+ - 0.2.0.yml
+ - 100-postgresql_user_scram_sha_256_support.yml
+ - 114-puppet-commandline-construction.yml
+ - 115-deprecated-helm-module.yaml
+ - 116-postgresql_user_add_trust_input_parameter.yml
+ - 123-slack-add_bot_token_support_thread_id.yml
+ - 124-airbrake_deployments-api_v4_for_deploy_notices.yml
+ - 142-mysql_user_add_resource_limit_parameter.yml
+ - 151-mysql_db_add_use_shell_parameter.yml
+ - 153-yarn_fix_checkmode-ec61975fc65df7f0.yaml
+ - 158-postgresql_membership_add_trust_input_parameter.yml
+ - 17-postgres_user-no_password_changes-no_log.yml
+ - 177-postgresql_privs_add_trust_input_parameter.yml
+ - 178-postgresql_membership_remove_unused_import.yml
+ - 18-mysql_user-update_password-no_log.yml
+ - 183-parted_check_mode.yml
+ - 184-postgresql_db_add_trust_input_parameter.yml
+ - 186-maven_artifact.yml
+ - 19-passwordstore-equal-sign.yml
+ - 198-postgresql_owner_add_trust_input_parameter.yml
+ - 212-make-path-option.yml
+ - 22-jira.yaml
+ - 221-parted-fs_type-parameter.yml
+ - 223-manageiq_provider-fix-serialization.yml
+ - 225-mysql_user_fix_no_database_selected.yml
+ - 227-sesu-use-defined-prompt.yaml
+ - 23-hashi-vault-lookup-refresh.yaml
+ - 24-homebrew-upgrade_options.yml
+ - 240-postgresql_tablespace_add_trust_input_parameter.yml
+ - 241-fix-ipa-modules-when-ipa_host-empty.yml
+ - 243-cloudflare_dns_fix_keyerror.yml
+ - 245-proxmox.yml
+ - 259-postgresql_schema_add_trust_input_parameter.yml
+ - 26-influxdb_user-admin-check-mode.yml
+ - 264-postgresql_idx_add_trust_input_parameter.yml
+ - 269-flatpak-command-list.yaml
+ - 272-postgresql_lang_add_trust_input_parameter.yml
+ - 274-flatpak-run-command.yaml
+ - 276-java_keystore-private_key_passphrase.yaml
+ - 277-datadog_monitor-adds-missing-log-alert-type.yml
+ - 278-zfs_delegate_admin_add_diff_hold_release.yml
+ - 279-postgresql_publication_add_trust_input_session_role.yml
+ - 280-postgresql_subscription_add_trust_input_session_role.yml
+ - 282-postgresql_ext_add_trust_input.yml
+ - 285-mysql_user_invoke_lambda_support.yml
+ - 291-postgresql_refactor_modules.yml
+ - 29253-pear_add_prompts_parameter.yml
+ - 294-postgresql_query_add_trust_input_parameter.yml
+ - 295-postgresql_sequence_add_trust_input.yml
+ - 298-postgresql_slot_add_trust_input.yml
+ - 302-postgresql_set_add_trust_input_parameter.yml
+ - 306-ansible-2.9-compatibility.yml
+ - 307-postgresql_table_add_trust_input_parameter.yml
+ - 308-postgresql_info_add_trust_input_parameter.yml
+ - 310-postgresql_user_obj_stat_info_add_trust_input.yml
+ - 312-postgresql_ping_add_trust_input_session_role.yml
+ - 313-postgresql_copy_add_trust_input_session_role.yml
+ - 318-linode-inventory-access_token-fix.yaml
+ - 319-syslogger.yml
+ - 326-pacman_improve_package_state_detection_speed.yml
+ - 34696-homebrew_cask.yml
+ - 36-homebrew-elements.yml
+ - 36876-github-deploy-key-fix-pagination.yaml
+ - 37-homebrew_tap-elements.yml
+ - 372-gcp_storage_file-gracefully.yml
+ - 382-install_upgrade_specific_args.yaml
+ - 389-datadog_monitor-corrects-deprecated-message-param.yml
+ - 394-terraform-add-config_file.yml
+ - 405-parted_align_undefined.yml
+ - 407-syslogjson-callback-exception.yml
+ - 410-redis_cache-add_tls_support.yaml
+ - 412-docker-machine-add-ip-fallback.yaml
+ - 418-add-tags-parameter-to-packet-device.yaml
+ - 428-mysql_db_add_unsafe_login_password_param.yml
+ - 442-add-new-parameter-pvresize.yaml
+ - 444-scaleway-improve_removal_handling.yml
+ - 444-scaleway_fix_http_header_casing.yml
+ - 450-slackpkg-package-matching.yml
+ - 457-cisco_webex_spark-rename.yml
+ - 458-rshm_repository-reduce_execution_time_when_changed_is_false.yml
+ - 461-resolve-domain-for-iscsi-portal.yml
+ - 468-mysql_db_add_restrict_config_file_param.yml
+ - 475-digital_ocean_droplet-add-vpcuuid.yaml
+ - 476-docker_swarm_service_add_init_option.yml
+ - 486-mysql_db_add_check_implicit_admin_parameter.yml
+ - 490-mysql_user_fix_cursor_errors.yml
+ - 494-add-redfish-virtual-media-commands.yml
+ - 505-slackpkg_fix_matching_some_special_cases_in_package_names.yml
+ - 511-cloudflare_dns-verbose-failure.yml
+ - 513-mysql_db_config_overrides_defaults.yml
+ - 55658_hashi_vault.yml
+ - 56650-homebrew-update_brew.yml
+ - 58115_nmcli.yml
+ - 58812-support_absolute_paths_additionally.yml
+ - 59376-homebrew_fix.yml
+ - 59522-renamed-module-tls-client-auth-params-to-avoid-overlaping-with-fetch_url.yaml
+ - 59765-cron-cronvar-use-get-bin-path.yaml
+ - 59877-fix-keyerror-in-redfish-getlogs.yaml
+ - 59927-fix-redfish-power-reset-type-mapping.yaml
+ - 60201-idrac-redfish-config-attributes-support.yml
+ - 60961-docker_compose-fix-deprecation-warning.yml
+ - 61562-nagios-start.yaml
+ - 61655-fix-digital-ocean-droplet-create.yaml
+ - 61740-docker_container-port-range-parsing.yml
+ - 61921-gitlab_user.yml
+ - 61961-pacman_remove_recurse_option.yaml
+ - 62329-nsupdate-lookup-internal-zones.yaml
+ - 62348-yarn-no_version_install_fix.yml
+ - 62617-fix-redfish-enable-account-if-enabled-prop-missing.yaml
+ - 62621-docker_login-fix-60381.yaml
+ - 62648-mysql_replication_add_master_use_gtid_param.yml
+ - 62928-docker_container-ip-address-idempotency.yml
+ - 62971-docker_container-image-finding.yml
+ - 62999-postgresql_lang_add_owner_parameter.yml
+ - 63036-mysql_replication_add_return_value.yml
+ - 63130-mysql_replication_add_master_delay_parameter.yml
+ - 63174-nsupdate-tsig-all-the-queries.yaml
+ - 63189-mysql_info-global-status.yml
+ - 63229-mysql_replication_add_connection_name_parameter.yml
+ - 63271-mysql_replication_add_channel_parameter.yml
+ - 63321-mysql_replication_add_resetmaster_to_mode.yml
+ - 63345-docker_image-deprecation-warnings.yml
+ - 63371-mysql_info_add_exclude_fields_parameter.yml
+ - 63408-nsupdate-dont-fix-none-txt-value.yaml
+ - 63418-docker_node_info-errors.yml
+ - 63419-docker_container-defaults.yml
+ - 63420-docker_container-trust_image_content.yml
+ - 63467-docker-stack-return-fix.yml
+ - 63522-remove-args-from-sumologic-and-splunk-callbacks.yml
+ - 63546-mysql_replication_allow_to_pass_empty_values.yml
+ - 63547-mysql_variables_add_mode_param.yml
+ - 63555-postgresql_privs_typy_obj_types.yaml
+ - 63565-postgresql_user_allow_user_name_with_dots.yml
+ - 63621-gitlab_user-fix-sshkey-and-user.yml
+ - 63629-postgresql_db_pgc_support.yaml
+ - 63887-docker_swarm_service-sort-lists-when-checking-changes.yml
+ - 63903-ufw.yaml
+ - 63990-replace-deprecated-basic-functions.yml
+ - 64007-postgresql_db_allow_user_name_with_dots.yml
+ - 64059-mysql_user_fix_password_comparison.yaml
+ - 64288-fix-hashi-vault-kv-v2.yaml
+ - 64371-postgresql_privs-always-reports-as-changed-when-using-default_privs.yml
+ - 64382-docker_login-fix-invalid-json.yml
+ - 64582-postgresql_publication_fix_typo_in_module_warn.yml
+ - 64583-postgresql_subscription_fix_typo_in_module_warn.yml
+ - 64585-mysql_dont_mask_mysql_connect_errors_from_modules.yml
+ - 64635-docker_container-network_mode.yml
+ - 64637-docker_swarm_service-tmpfs-source.yml
+ - 64661-postgres_py_add_query_params_arg.yml
+ - 64683-docker_container-cpus.yml
+ - 64797-fix-error-deleting-redfish-acct.yaml
+ - 64989-gitlab-handle-lib-new-version.yml
+ - 64994-postgresql_ext_use_query_params.yml
+ - 65018-docker-none-errors.yml
+ - 65044-fix-terraform-no-workspace.yaml
+ - 65093-postgresql_lang_use_query_params_with_cursor.yml
+ - 65164-postgres_use_query_params_with_cursor.yml
+ - 65223-postgresql_db-exception-added.yml
+ - 65238-fix_pacman_stdout_parsing.yml
+ - 65310-postgresql_owner_use_query_params.yml
+ - 65372-misc-context-manager.yml
+ - 65387-homebrew_check_mode_option.yml
+ - 65404-postgresql_publication_user_query_params_with_cursor.yml
+ - 65498-mysql_db_add_executed_commands_return_val.yml
+ - 65542-postgresql_db_add_executed_commands_return_val.yml
+ - 65547-mysql_db_add_force_param.yml
+ - 65609-docker-context-manager.yml
+ - 65632-docker-argspec-fixup.yml
+ - 65679-postgresql_schema_use_query_params_with_cursor.yml
+ - 65750-pacman.yml
+ - 65755-mysql_info_doesnt_list_empty_dbs.yml
+ - 65787-postgresql_sequence_use_query_params_with_cursor.yml
+ - 65789-mysql_user_add_plugin_authentication_parameters.yml
+ - 65791-postgresql_modules_use_query_params_with_cursor.yml
+ - 65839-docker_network-idempotence.yml
+ - 65854-docker_container-wait-for-removal.yml
+ - 65862-postgresql_modules_use_query_params_with_cursor.yml
+ - 65894-redfish-bios-attributes.yaml
+ - 65903-postgresql_privs_sort_lists_with_none_elements.yml
+ - 65993-restart-docker_container-on-restart-policy-updates.yaml
+ - 66048-mysql_add_master_data_parameter.yml
+ - 66060-redfish-new-resource-id-option.yaml
+ - 66144-docker_container-removal-timeout.yml
+ - 66151-docker_swarm_service-healthcheck-start-period.yml
+ - 66157-postgresql-create-unique-indexes.yml
+ - 66252-mysql_replication_fail_on_error.yml
+ - 66268-cyberarkpassword-fix-invalid-attr.yaml
+ - 66322-moved_line_causing_terraform_output_suppression.yml
+ - 66331-postgresql_query_fix_unable_to_handle_non_ascii_chars_when_python3.yml
+ - 66357-support-changing-fetch_url-settings-for-rundeck-modules.yaml
+ - 66382-docker_container-port-range.yml
+ - 66398-pamd_fix-attributeerror-when-removing-first-line.yml
+ - 66592_ipa_encoding_fix.yml
+ - 66599-docker-healthcheck.yml
+ - 66600-docker_container-volumes.yml
+ - 66688-mysql_db_add_skip_lock_tables_option.yml
+ - 66711-postgresql_user_add_comment_parameter.yml
+ - 66717-postgresql_db_add_dump_extra_args_param.yml
+ - 66801-mysql_user_priv_can_be_dict.yml
+ - 66806-mysql_variables_not_support_variables_with_dot.yml
+ - 66807-redhat_subscription-no-default-quantity.yaml
+ - 66914-purefa_user_string.yaml
+ - 66929-pmrun-quote-entire-success-command-string.yml
+ - 66957-scaleway-jsonify-only-for-json-requests.yml
+ - 66974-mysql_user_doesnt_support_privs_with_underscore.yml
+ - 67046-postgresql_modules_make_params_required.yml
+ - 67337-fix-proxysql-mysql-cursor.yaml
+ - 67353-docker_login-permissions.yml
+ - 67418-postgresql_set_converts_value_to_uppercase.yml
+ - 67461-gitlab-project-variable-masked-protected.yml
+ - 67464-postgresql_info_add_collecting_subscription_info.yml
+ - 67614-postgresql_info_add_collecting_publication_info.yml
+ - 67655-scaleway_compute-get-image-instead-loop-on-list.yml
+ - 67747-mysql_db_add_dump_extra_args_param.yml
+ - 67767-mysql_db_fix_bug_introduced_by_56721.yml
+ - 67832-run_powershell_modules_on_windows_containers.yml
+ - 68251-redfish_config-fix-boolean-bios-attr-support.yaml
+ - 68374_rhn_channel.yml
+ - 80-update_docker_connection_plugin.yml
+ - 83-dense-callback-warning.yml
+ - alicloud_params_add.yml
+ - apt_rpm_typefix.yml
+ - atomic_image_absent.yml
+ - become-pass-precedence.yaml
+ - clc_aa_policy-remove-unused-wait-parameter.yaml
+ - cron-only-get-bin-path-once.yaml
+ - cronvar-correct-binary-name.yaml
+ - filter-time.yml
+ - firewalld-version-0_7_0.yml
+ - firewalld_zone_target.yml
+ - fix-oc-conn-plugin-envvar.yml
+ - gitlab-project-variable-variable-type.yml
+ - gitlab_project_variable.yml
+ - ldap-params-removal.yml
+ - listen_ports_facts_doc.yml
+ - lxd_container_url.yaml
+ - lxd_profile_url.yaml
+ - mqtt-ssl-protocols.yml
+ - mysql_info_add_parameter.yml
+ - mysql_user_idempotency.yml
+ - openstack_inventory_move.yml
+ - ovirt-dont-ignore-instance_cpus-parameter.yaml
+ - porting-guide.yml
+ - postgresol_privs-fix-status-sorting.yaml
+ - proxmox-6-version-detection.yaml
+ - remove-2.9-deprecations.yml
+ - solaris_zone_name_fix.yml
+ - syslogger-disable-check-mode.yaml
+ - xml-deprecated-functions.yml
+ modules:
+ - description: Override a debian package's version of a file
+ name: dpkg_divert
+ namespace: system
+ - description: Manage Hetzner's dedicated server firewall
+ name: hetzner_firewall
+ namespace: net_tools
+ - description: Manage Hetzner's dedicated server firewall
+ name: hetzner_firewall_info
+ namespace: net_tools
+ - description: Creates a resource of Ecs/Instance in Huawei Cloud
+ name: hwc_ecs_instance
+ namespace: cloud.huawei
+ - description: Creates a resource of Evs/Disk in Huawei Cloud
+ name: hwc_evs_disk
+ namespace: cloud.huawei
+ - description: Creates a resource of Vpc/EIP in Huawei Cloud
+ name: hwc_vpc_eip
+ namespace: cloud.huawei
+ - description: Creates a resource of Vpc/PeeringConnect in Huawei Cloud
+ name: hwc_vpc_peering_connect
+ namespace: cloud.huawei
+ - description: Creates a resource of Vpc/Port in Huawei Cloud
+ name: hwc_vpc_port
+ namespace: cloud.huawei
+ - description: Creates a resource of Vpc/PrivateIP in Huawei Cloud
+ name: hwc_vpc_private_ip
+ namespace: cloud.huawei
+ - description: Creates a resource of Vpc/Route in Huawei Cloud
+ name: hwc_vpc_route
+ namespace: cloud.huawei
+ - description: Creates a resource of Vpc/SecurityGroup in Huawei Cloud
+ name: hwc_vpc_security_group
+ namespace: cloud.huawei
+ - description: Creates a resource of Vpc/SecurityGroupRule in Huawei Cloud
+ name: hwc_vpc_security_group_rule
+ namespace: cloud.huawei
+ - description: Creates a resource of Vpc/Subnet in Huawei Cloud
+ name: hwc_vpc_subnet
+ namespace: cloud.huawei
+ - description: Manage DNS Records for Ericsson IPWorks via ipwcli
+ name: ipwcli_dns
+ namespace: net_tools
+ - description: Generate ISO file with specified files or folders
+ name: iso_create
+ namespace: files
+ - description: Local Backup Utility for Alpine Linux
+ name: lbu
+ namespace: system
+ - description: Add or remove multiple LDAP attribute values
+ name: ldap_attrs
+ namespace: net_tools.ldap
+ - description: Search for entries in a LDAP server
+ name: ldap_search
+ namespace: net_tools.ldap
+ - description: Manage Mac App Store applications with mas-cli
+ name: mas
+ namespace: packaging.os
+ - description: Run MySQL queries
+ name: mysql_query
+ namespace: database.mysql
+ - description: Manage OVH monthly billing
+ name: ovh_monthly_billing
+ namespace: cloud.ovh
+ - description: Assign IP subnet to a bare metal server.
+ name: packet_ip_subnet
+ namespace: cloud.packet
+ - description: Create/delete a project in Packet host.
+ name: packet_project
+ namespace: cloud.packet
+ - description: Create/delete a volume in Packet host.
+ name: packet_volume
+ namespace: cloud.packet
+ - description: Attach/detach a volume to a device in the Packet host.
+ name: packet_volume_attachment
+ namespace: cloud.packet
+ - description: Add, update, or remove PostgreSQL subscription
+ name: postgresql_subscription
+ namespace: database.postgresql
+ - description: Gather statistics about PostgreSQL user objects
+ name: postgresql_user_obj_stat_info
+ namespace: database.postgresql
+ - description: Gather information about Redis servers
+ name: redis_info
+ namespace: database.misc
+ plugins:
+ callback:
+ - description: Customize the output
+ name: diy
+ namespace: null
+ lookup:
+ - description: Get key values from etcd3 server
+ name: etcd3
+ namespace: null
+ - description: fetch data from LMDB
+ name: lmdb_kv
+ namespace: null
+ release_date: '2020-06-20'
+ 1.0.0:
+ changes:
+ breaking_changes:
+ - log_plays callback - add missing information to the logs generated by the
+ callback plugin. This changes the log message format (https://github.com/ansible-collections/community.general/pull/442).
+ - 'pkgng - passing ``name: *`` with ``state: absent`` will no longer remove
+ every installed package from the system. It is now a noop. (https://github.com/ansible-collections/community.general/pull/569).'
+ - 'pkgng - passing ``name: *`` with ``state: latest`` or ``state: present``
+ will no longer install every package from the configured package repositories.
+ Instead, ``name: *, state: latest`` will upgrade all already-installed packages,
+ and ``name: *, state: present`` is a noop. (https://github.com/ansible-collections/community.general/pull/569).'
+ bugfixes:
+ - aix_filesystem - fix issues with ismount module_util pathing for Ansible 2.9
+ (https://github.com/ansible-collections/community.general/pull/567).
+ - consul_kv lookup - fix ``ANSIBLE_CONSUL_URL`` environment variable handling
+ (https://github.com/ansible/ansible/issues/51960).
+ - consul_kv lookup - fix arguments handling (https://github.com/ansible-collections/community.general/pull/303).
+ - digital_ocean_tag_info - fix crash when querying for an individual tag (https://github.com/ansible-collections/community.general/pull/615).
+ - doas become plugin - address a bug with the parameters handling that was breaking
+ the plugin in community.general when ``become_flags`` and ``become_user``
+ were not explicitly specified (https://github.com/ansible-collections/community.general/pull/704).
+ - docker_compose - add a condition to prevent service startup if parameter ``stopped``
+ is true. Otherwise, the service will be started on each play and stopped again
+ immediately due to the ``stopped`` parameter and breaks the idempotency of
+ the module (https://github.com/ansible-collections/community.general/issues/532).
+ - docker_compose - disallow usage of the parameters ``stopped`` and ``restarted``
+ at the same time. This breaks also the idempotency (https://github.com/ansible-collections/community.general/issues/532).
+ - docker_container - use Config MacAddress by default instead of Networks. Networks
+ MacAddress is empty in some cases (https://github.com/ansible/ansible/issues/70206).
+ - docker_container - various error fixes in string handling for Python 2 to
+ avoid crashes when non-ASCII characters are used in strings (https://github.com/ansible-collections/community.general/issues/640).
+ - docker_swarm - removes ``advertise_addr`` from list of required arguments
+ when ``state`` is ``"join"`` (https://github.com/ansible-collections/community.general/issues/439).
+ - dzdo become plugin - address a bug with the parameters handling that was breaking
+ the plugin in community.general when ``become_user`` was not explicitly specified
+ (https://github.com/ansible-collections/community.general/pull/708).
+ - filesystem - resizefs of xfs filesystems is fixed. Filesystem needs to be
+ mounted.
+ - jenkins_plugin - replace MD5 checksum verification with SHA1 due to MD5 being
+ disabled on systems with FIPS-only algorithms enabled (https://github.com/ansible/ansible/issues/34304).
+ - jira - improve error message handling (https://github.com/ansible-collections/community.general/pull/311).
+ - jira - improve error message handling with multiple errors (https://github.com/ansible-collections/community.general/pull/707).
+ - kubevirt - Add aliases 'interface_name' for network_name (https://github.com/ansible/ansible/issues/55641).
+ - nmcli - fix idempotetency when modifying an existing connection (https://github.com/ansible-collections/community.general/issues/481).
+ - osx_defaults - fix handling negative integers (https://github.com/ansible-collections/community.general/issues/134).
+ - pacman - treat package names containing .zst as package files during installation
+ (https://www.archlinux.org/news/now-using-zstandard-instead-of-xz-for-package-compression/,
+ https://github.com/ansible-collections/community.general/pull/650).
+ - pbrun become plugin - address a bug with the parameters handling that was
+ breaking the plugin in community.general when ``become_user`` was not explicitly
+ specified (https://github.com/ansible-collections/community.general/pull/708).
+ - postgresql_privs - fix crash when set privileges on schema with hyphen in
+ the name (https://github.com/ansible-collections/community.general/issues/656).
+ - postgresql_set - only display a warning about restarts, when restarting is
+ needed (https://github.com/ansible-collections/community.general/pull/651).
+ - redfish_info, redfish_config, redfish_command - Fix Redfish response payload
+ decode on Python 3.5 (https://github.com/ansible-collections/community.general/issues/686)
+ - selective - mark task failed correctly (https://github.com/ansible/ansible/issues/63767).
+ - snmp_facts - skip ``EndOfMibView`` values (https://github.com/ansible/ansible/issues/49044).
+ - yarn - fixed an index out of range error when no outdated packages where returned
+ by yarn executable (see https://github.com/ansible-collections/community.general/pull/474).
+ - yarn - fixed an too many values to unpack error when scoped packages are installed
+ (see https://github.com/ansible-collections/community.general/pull/474).
+ deprecated_features:
+ - The ldap_attr module has been deprecated and will be removed in a later release;
+ use ldap_attrs instead.
+ - xbps - the ``force`` option never had any effect. It is now deprecated, and
+ will be removed in 3.0.0 (https://github.com/ansible-collections/community.general/pull/568).
+ minor_changes:
+ - Add the ``gcpubsub``, ``gcpubsub_info`` and ``gcpubsub_facts`` (to be removed
+ in 3.0.0) modules. These were originally in community.general, but removed
+ on the assumption that they have been moved to google.cloud. Since this turned
+ out to be incorrect, we re-added them for 1.0.0.
+ - Add the deprecated ``gcp_backend_service``, ``gcp_forwarding_rule`` and ``gcp_healthcheck``
+ modules, which will be removed in 2.0.0. These were originally in community.general,
+ but removed on the assumption that they have been moved to google.cloud. Since
+ this turned out to be incorrect, we re-added them for 1.0.0.
+ - The collection is now actively tested in CI with the latest Ansible 2.9 release.
+ - airbrake_deployment - add ``version`` param; clarified docs on ``revision``
+ param (https://github.com/ansible-collections/community.general/pull/583).
+ - apk - added ``no_cache`` option (https://github.com/ansible-collections/community.general/pull/548).
+ - firewalld - the module has been moved to the ``ansible.posix`` collection.
+ A redirection is active, which will be removed in version 2.0.0 (https://github.com/ansible-collections/community.general/pull/623).
+ - gitlab_project - add support for merge_method on projects (https://github.com/ansible/ansible/pull/66813).
+ - gitlab_runners inventory plugin - permit environment variable input for ``server_url``,
+ ``api_token`` and ``filter`` options (https://github.com/ansible-collections/community.general/pull/611).
+ - haproxy - add options to dis/enable health and agent checks. When health
+ and agent checks are enabled for a service, a disabled service will re-enable
+ itself automatically. These options also change the state of the agent checks
+ to match the requested state for the backend (https://github.com/ansible-collections/community.general/issues/684).
+ - log_plays callback - use v2 methods (https://github.com/ansible-collections/community.general/pull/442).
+ - logstash callback - add ini config (https://github.com/ansible-collections/community.general/pull/610).
+ - lxd_container - added support of ``--target`` flag for cluster deployments
+ (https://github.com/ansible-collections/community.general/issues/637).
+ - parted - accept negative numbers in ``part_start`` and ``part_end``
+ - pkgng - added ``stdout`` and ``stderr`` attributes to the result (https://github.com/ansible-collections/community.general/pull/560).
+ - 'pkgng - added support for upgrading all packages using ``name: *, state:
+ latest``, similar to other package providers (https://github.com/ansible-collections/community.general/pull/569).'
+ - postgresql_query - add search_path parameter (https://github.com/ansible-collections/community.general/issues/625).
+ - rundeck_acl_policy - add check for rundeck_acl_policy name parameter (https://github.com/ansible-collections/community.general/pull/612).
+ - slack - add support for sending messages built with block kit (https://github.com/ansible-collections/community.general/issues/380).
+ - splunk callback - add an option to allow not to validate certificate from
+ HEC (https://github.com/ansible-collections/community.general/pull/596).
+ - xfconf - add arrays support (https://github.com/ansible/ansible/issues/46308).
+ - xfconf - add support for ``uint`` type (https://github.com/ansible-collections/community.general/pull/696).
+ release_summary: 'This is release 1.0.0 of ``community.general``, released on
+ 2020-07-31.
+
+ '
+ removed_features:
+ - conjur_variable lookup - has been moved to the ``cyberark.conjur`` collection.
+ A redirection is active, which will be removed in version 2.0.0 (https://github.com/ansible-collections/community.general/pull/570).
+ - digital_ocean_* - all DigitalOcean modules have been moved to the ``community.digitalocean``
+ collection. A redirection is active, which will be removed in version 2.0.0
+ (https://github.com/ansible-collections/community.general/pull/622).
+ - infini_* - all infinidat modules have been moved to the ``infinidat.infinibox``
+ collection. A redirection is active, which will be removed in version 2.0.0
+ (https://github.com/ansible-collections/community.general/pull/607).
+ - logicmonitor - the module has been removed in 1.0.0 since it is unmaintained
+ and the API used by the module has been turned off in 2017 (https://github.com/ansible-collections/community.general/issues/539,
+ https://github.com/ansible-collections/community.general/pull/541).
+ - logicmonitor_facts - the module has been removed in 1.0.0 since it is unmaintained
+ and the API used by the module has been turned off in 2017 (https://github.com/ansible-collections/community.general/issues/539,
+ https://github.com/ansible-collections/community.general/pull/541).
+ - mysql_* - all MySQL modules have been moved to the ``community.mysql`` collection.
+ A redirection is active, which will be removed in version 2.0.0 (https://github.com/ansible-collections/community.general/pull/633).
+ - proxysql_* - all ProxySQL modules have been moved to the ``community.proxysql``
+ collection. A redirection is active, which will be removed in version 2.0.0
+ (https://github.com/ansible-collections/community.general/pull/624).
+ fragments:
+ - 1.0.0.yml
+ - 296-ansible-2.9.yml
+ - 303-consul_kv-fix-env-variables-handling.yaml
+ - 311-jira-error-handling.yaml
+ - 33979-xfs_growfs.yml
+ - 442-log_plays-add_playbook_task_name_and_action.yml
+ - 474-yarn_fix-outdated-fix-list.yml
+ - 547-start-service-condition.yaml
+ - 548_apk.yml
+ - 55903_kubevirt.yml
+ - 560-pkgng-add-stdout-and-stderr.yaml
+ - 562-nmcli-fix-idempotency.yaml
+ - 564-docker_container_use_config_macaddress_by_default.yaml
+ - 568_packaging.yml
+ - 569-pkgng-add-upgrade-action.yaml
+ - 596-splunk-add-option-to-not-validate-cert.yaml
+ - 610_logstash_callback_add_ini_config.yml
+ - 611-gitlab-runners-env-vars-intput-and-default-item-limit.yaml
+ - 613-snmp_facts-EndOfMibView.yml
+ - 615-digital-ocean-tag-info-bugfix.yml
+ - 63767_selective.yml
+ - 642-docker_container-python-2.yml
+ - 646-docker_swarm-remove-advertise_addr-from-join-requirement.yaml
+ - 650_pacman_support_zst_package_files.yaml
+ - 651-fix-postgresql_set-warning.yaml
+ - 653-postgresql_query_add_search_path_param.yml
+ - 656-name-with-hyphen.yml
+ - 66813_gitlab_project.yml
+ - 676-osx_defaults_fix_handling_negative_ints.yml
+ - 677-jenkins_plugins_sha1.yaml
+ - 687-fix-redfish-payload-decode-python35.yml
+ - 689-haproxy_agent_and_health.yml
+ - 693-big-revamp-on-xfconf-adding-array-values.yml
+ - 702-slack-support-for-blocks.yaml
+ - 704-doas-set-correct-default-values.yml
+ - 707-jira-error-handling.yaml
+ - 708-set-correct-default-values.yml
+ - 711-lxd-target.yml
+ - add_argument_check_for_rundeck.yaml
+ - airbrake_deployment_add_version.yml
+ - aix_filesystem-module_util-routing-issue.yml
+ - cyberarkconjur-removal.yml
+ - digital-ocean.yml
+ - firewalld_migration.yml
+ - google-modules.yml
+ - infinidat-removal.yml
+ - logicmonitor-removal.yml
+ - mysql.yml
+ - parted_negative_numbers.yml
+ - porting-guide-2.yml
+ - proxysql.yml
+ - xfconf_add_uint_type.yml
+ modules:
+ - description: Return information on a docker stack
+ name: docker_stack_info
+ namespace: cloud.docker
+ - description: Manage macOS services
+ name: launchd
+ namespace: system
+ - description: Execute SQL via ODBC
+ name: odbc
+ namespace: database.misc
+ plugins:
+ inventory:
+ - description: Cobbler inventory source
+ name: cobbler
+ namespace: null
+ lookup:
+ - description: Get secrets from Thycotic DevOps Secrets Vault
+ name: dsv
+ namespace: null
+ - description: Get secrets from Thycotic Secret Server
+ name: tss
+ namespace: null
+ release_date: '2020-07-31'
+ 1.1.0:
+ changes:
+ bugfixes:
+ - cobbler inventory plugin - ``name`` needed FQCN (https://github.com/ansible-collections/community.general/pull/722).
+ - dsv lookup - use correct dict usage (https://github.com/ansible-collections/community.general/pull/743).
+ - inventory plugins - allow FQCN in ``plugin`` option (https://github.com/ansible-collections/community.general/pull/722).
+ - ipa_hostgroup - fix an issue with load-balanced ipa and cookie handling with
+ Python 3 (https://github.com/ansible-collections/community.general/issues/737).
+ - oc connection plugin - ``transport`` needed FQCN (https://github.com/ansible-collections/community.general/pull/722).
+ - postgresql_set - allow to pass an empty string to the ``value`` parameter
+ (https://github.com/ansible-collections/community.general/issues/775).
+ - xfconf - make it work in non-english locales (https://github.com/ansible-collections/community.general/pull/744).
+ minor_changes:
+ - The collection dependencies where adjusted so that ``community.kubernetes``
+ and ``google.cloud`` are required to be of version 1.0.0 or newer (https://github.com/ansible-collections/community.general/pull/774).
+ - jc - new filter to convert the output of many shell commands and file-types
+ to JSON. Uses the jc library at https://github.com/kellyjonbrazil/jc. For
+ example, filtering the STDOUT output of ``uname -a`` via ``{{ result.stdout
+ | community.general.jc('uname') }}``. Requires Python 3.6+ (https://github.com/ansible-collections/community.general/pull/750).
+ - xfconf - add support for ``double`` type (https://github.com/ansible-collections/community.general/pull/744).
+ release_summary: 'Release for Ansible 2.10.0.
+
+ '
+ fragments:
+ - 1.1.0.yml
+ - 722-plugins.yml
+ - 738-ipa-python3.yml
+ - 744-xfconf_make_locale-independent.yml
+ - 750-jc-new-filter.yaml
+ - 776-postgresql_set_allow_empty_string.yaml
+ - dsv_fix.yml
+ - galaxy-yml.yml
+ modules:
+ - description: Return information of the tasks on a docker stack
+ name: docker_stack_task_info
+ namespace: cloud.docker
+ - description: Save iptables state into a file or restore it from a file
+ name: iptables_state
+ namespace: system
+ - description: Shut down a machine
+ name: shutdown
+ namespace: system
+ - description: Manage OpenBSD system upgrades
+ name: sysupgrade
+ namespace: system
+ release_date: '2020-08-18'
+ 1.2.0:
+ changes:
+ bugfixes:
+ - aerospike_migrations - handle exception when unstable-cluster is returned
+ (https://github.com/ansible-collections/community.general/pull/900).
+ - django_manage - fix idempotence for ``createcachetable`` (https://github.com/ansible-collections/community.general/pull/699).
+ - docker_container - fix idempotency problem with ``published_ports`` when strict
+ comparison is used and list is empty (https://github.com/ansible-collections/community.general/issues/978).
+ - 'gem - fix get_installed_versions: correctly parse ``default`` version (https://github.com/ansible-collections/community.general/pull/783).'
+ - hashi_vault - add missing ``mount_point`` parameter for approle auth (https://github.com/ansible-collections/community.general/pull/897).
+ - hashi_vault lookup - ``token_path`` in config file overridden by env ``HOME``
+ (https://github.com/ansible-collections/community.general/issues/373).
+ - homebrew_cask - fixed issue where a cask with ``@`` in the name is incorrectly
+ reported as invalid (https://github.com/ansible-collections/community.general/issues/733).
+ - interfaces_file - escape regular expression characters in old value (https://github.com/ansible-collections/community.general/issues/777).
+ - launchd - fix for user-level services (https://github.com/ansible-collections/community.general/issues/896).
+ - nmcli - set ``C`` locale when executing ``nmcli`` (https://github.com/ansible-collections/community.general/issues/989).
+ - parted - fix creating partition when label is changed (https://github.com/ansible-collections/community.general/issues/522).
+ - pkg5 - now works when Python 3 is used on the target (https://github.com/ansible-collections/community.general/pull/789).
+ - postgresql_privs - allow to pass ``PUBLIC`` role written in lowercase letters
+ (https://github.com/ansible-collections/community.general/issues/857).
+ - postgresql_privs - fix the module mistakes a procedure for a function (https://github.com/ansible-collections/community.general/issues/994).
+ - postgresql_privs - rollback if nothing changed (https://github.com/ansible-collections/community.general/issues/885).
+ - postgresql_privs - the module was attempting to revoke grant options even
+ though ``grant_option`` was not specified (https://github.com/ansible-collections/community.general/pull/796).
+ - proxmox_kvm - defer error-checking for non-existent VMs in order to fix idempotency
+ of tasks using ``state=absent`` and properly recognize a success (https://github.com/ansible-collections/community.general/pull/811).
+ - proxmox_kvm - improve handling of long-running tasks by creating a dedicated
+ function (https://github.com/ansible-collections/community.general/pull/831).
+ - slack - fix ``xox[abp]`` token identification to capture everything after
+ ``xox[abp]``, as the token is the only thing that should be in this argument
+ (https://github.com/ansible-collections/community.general/issues/862).
+ - terraform - fix incorrectly reporting a status of unchanged when number of
+ resources added or destroyed are multiples of 10 (https://github.com/ansible-collections/community.general/issues/561).
+ - timezone - support Python3 on macos/darwin (https://github.com/ansible-collections/community.general/pull/945).
+ - zfs - fixed ``invalid character '@' in pool name"`` error when working with
+ snapshots on a root zvol (https://github.com/ansible-collections/community.general/issues/932).
+ minor_changes:
+ - hashi_vault - support ``VAULT_NAMESPACE`` environment variable for namespaced
+ lookups against Vault Enterprise (in addition to the ``namespace=`` flag supported
+ today) (https://github.com/ansible-collections/community.general/pull/929).
+ - hashi_vault lookup - add ``VAULT_TOKEN_FILE`` as env option to specify ``token_file``
+ param (https://github.com/ansible-collections/community.general/issues/373).
+ - hashi_vault lookup - add ``VAULT_TOKEN_PATH`` as env option to specify ``token_path``
+ param (https://github.com/ansible-collections/community.general/issues/373).
+ - ipa_user - add ``userauthtype`` option (https://github.com/ansible-collections/community.general/pull/951).
+ - iptables_state - use FQCN when calling a module from action plugin (https://github.com/ansible-collections/community.general/pull/967).
+ - nagios - add the ``acknowledge`` action (https://github.com/ansible-collections/community.general/pull/820).
+ - nagios - add the ``host`` and ``all`` values for the ``forced_check`` action
+ (https://github.com/ansible-collections/community.general/pull/998).
+ - nagios - add the ``service_check`` action (https://github.com/ansible-collections/community.general/pull/820).
+ - nagios - rename the ``service_check`` action to ``forced_check`` since we
+ now are able to check both a particular service, all services of a particular
+ host and the host itself (https://github.com/ansible-collections/community.general/pull/998).
+ - pkgutil - module can now accept a list of packages (https://github.com/ansible-collections/community.general/pull/799).
+ - pkgutil - module has a new option, ``force``, equivalent to the ``-f`` option
+ to the `pkgutil <http://pkgutil.net/>`_ command (https://github.com/ansible-collections/community.general/pull/799).
+ - pkgutil - module now supports check mode (https://github.com/ansible-collections/community.general/pull/799).
+ - postgresql_privs - add the ``usage_on_types`` option (https://github.com/ansible-collections/community.general/issues/884).
+ - proxmox_kvm - improve code readability (https://github.com/ansible-collections/community.general/pull/934).
+ - pushover - add device parameter (https://github.com/ansible-collections/community.general/pull/802).
+ - redfish_command - add sub-command for ``EnableContinuousBootOverride`` and
+ ``DisableBootOverride`` to allow setting BootSourceOverrideEnabled Redfish
+ property (https://github.com/ansible-collections/community.general/issues/824).
+ - redfish_command - support same reset actions on Managers as on Systems (https://github.com/ansible-collections/community.general/issues/901).
+ - slack - add support for updating messages (https://github.com/ansible-collections/community.general/issues/304).
+ - xml - fixed issue were changed was returned when removing non-existent xpath
+ (https://github.com/ansible-collections/community.general/pull/1007).
+ - zypper_repository - proper failure when python-xml is missing (https://github.com/ansible-collections/community.general/pull/939).
+ release_summary: Regular bimonthly minor release.
+ fragments:
+ - 1.2.0.yml
+ - 522-parted_change_label.yml
+ - 563-update-terraform-status-test.yaml
+ - 699-django_manage-createcachetable-fix-idempotence.yml
+ - 777-interfaces_file-re-escape.yml
+ - 783-fix-gem-installed-versions.yaml
+ - 789-pkg5-wrap-to-modify-package-list.yaml
+ - 796-postgresql_privs-grant-option-bug.yaml
+ - 802-pushover-device-parameter.yml
+ - 811-proxmox-kvm-state-absent.yml
+ - 820_nagios_added_acknowledge_and_servicecheck.yml
+ - 825-bootsource-override-option.yaml
+ - 831-proxmox-kvm-wait.yml
+ - 843-update-slack-messages.yml
+ - 858-postgresql_privs_should_allow_public_role_lowercased.yml
+ - 887-rollback-if-nothing-changed.yml
+ - 892-slack-token-validation.yml
+ - 897-lookup-plugin-hashivault-add-approle-mount-point.yaml
+ - 899_launchd_user_service.yml
+ - 900-aerospike-migration-handle-unstable-cluster.yaml
+ - 902-hashi_vault-token-path.yml
+ - 903-enhance-redfish-manager-reset-actions.yml
+ - 929-vault-namespace-support.yml
+ - 939-zypper_repository_proper_failure_on_missing_python-xml.yml
+ - 941-postgresql_privs_usage_on_types_option.yml
+ - 943-proxmox-kvm-code-cleanup.yml
+ - 945-darwin-timezone-py3.yaml
+ - 951-ipa_user-add-userauthtype-param.yaml
+ - 967-use-fqcn-when-calling-a-module-from-action-plugin.yml
+ - 979-docker_container-published_ports-empty-idempotency.yml
+ - 992-nmcli-locale.yml
+ - 996-postgresql_privs_fix_function_handling.yml
+ - 998-nagios-added_forced_check_for_all_services_or_host.yml
+ - homebrew-cask-at-symbol-fix.yaml
+ - pkgutil-check-mode-etc.yaml
+ - xml-remove-changed.yml
+ - zfs-root-snapshot.yml
+ modules:
+ - description: Manage group members on GitLab Server
+ name: gitlab_group_members
+ namespace: source_control.gitlab
+ - description: Creates, updates, or deletes GitLab groups variables
+ name: gitlab_group_variable
+ namespace: source_control.gitlab
+ - description: Scaleway database backups management module
+ name: scaleway_database_backup
+ namespace: cloud.scaleway
+ plugins:
+ inventory:
+ - description: Proxmox inventory source
+ name: proxmox
+ namespace: null
+ - description: StackPath Edge Computing inventory source
+ name: stackpath_compute
+ namespace: null
+ release_date: '2020-09-30'
+ 1.3.0:
+ changes:
+ bugfixes:
+ - apache2_module - amend existing module identifier workaround to also apply
+ to updated Shibboleth modules (https://github.com/ansible-collections/community.general/issues/1379).
+ - beadm - fixed issue "list object has no attribute split" (https://github.com/ansible-collections/community.general/issues/791).
+ - capabilities - fix for a newer version of libcap release (https://github.com/ansible-collections/community.general/pull/1061).
+ - composer - fix bug in command idempotence with composer v2 (https://github.com/ansible-collections/community.general/issues/1179).
+ - docker_login - fix internal config file storage to handle credentials for
+ more than one registry (https://github.com/ansible-collections/community.general/issues/1117).
+ - filesystem - add option ``state`` with default ``present``. When set to ``absent``,
+ filesystem signatures are removed (https://github.com/ansible-collections/community.general/issues/355).
+ - flatpak - use of the ``--non-interactive`` argument instead of ``-y`` when
+ possible (https://github.com/ansible-collections/community.general/pull/1246).
+ - gcp_storage_files lookup plugin - make sure that plugin errors out on initialization
+ if the required library is not found, and not on load-time (https://github.com/ansible-collections/community.general/pull/1297).
+ - gitlab_group - added description parameter to ``createGroup()`` call (https://github.com/ansible-collections/community.general/issues/138).
+ - gitlab_group_variable - support for GitLab pagination limitation by iterating
+ over GitLab variable pages (https://github.com/ansible-collections/community.general/pull/968).
+ - gitlab_project_variable - support for GitLab pagination limitation by iterating
+ over GitLab variable pages (https://github.com/ansible-collections/community.general/pull/968).
+ - hashi_vault - fix approle authentication without ``secret_id`` (https://github.com/ansible-collections/community.general/pull/1138).
+ - homebrew - fix package name validation for packages containing hypen ``-``
+ (https://github.com/ansible-collections/community.general/issues/1037).
+ - homebrew_cask - fix package name validation for casks containing hypen ``-``
+ (https://github.com/ansible-collections/community.general/issues/1037).
+ - influxdb - fix usage of path for older version of python-influxdb (https://github.com/ansible-collections/community.general/issues/997).
+ - iptables_state - fix race condition between module and its action plugin (https://github.com/ansible-collections/community.general/issues/1136).
+ - linode inventory plugin - make sure that plugin errors out on initialization
+ if the required library is not found, and not on load-time (https://github.com/ansible-collections/community.general/pull/1297).
+ - lxc_container - fix the type of the ``container_config`` parameter. It is
+ now processed as a list and not a string (https://github.com/ansible-collections/community.general/pull/216).
+ - macports - fix failure to install a package whose name is contained within
+ an already installed package's name or variant (https://github.com/ansible-collections/community.general/issues/1307).
+ - maven_artifact - handle timestamped snapshot version strings properly (https://github.com/ansible-collections/community.general/issues/709).
+ - memcached cache plugin - make sure that plugin errors out on initialization
+ if the required library is not found, and not on load-time (https://github.com/ansible-collections/community.general/pull/1297).
+ - monit - fix modules ability to determine the current state of the monitored
+ process (https://github.com/ansible-collections/community.general/pull/1107).
+ - nios_fixed_address, nios_host_record, nios_zone - removed redundant parameter
+ aliases causing warning messages to incorrectly appear in task output (https://github.com/ansible-collections/community.general/issues/852).
+ - nmcli - cannot modify ``ifname`` after connection creation (https://github.com/ansible-collections/community.general/issues/1089).
+ - nmcli - use consistent autoconnect parameters (https://github.com/ansible-collections/community.general/issues/459).
+ - omapi_host - fix compatibility with Python 3 (https://github.com/ansible-collections/community.general/issues/787).
+ - packet_net.py inventory script - fixed failure w.r.t. operating system retrieval
+ by changing array subscription back to attribute access (https://github.com/ansible-collections/community.general/pull/891).
+ - postgresql_ext - fix the module crashes when available ext versions cannot
+ be compared with current version (https://github.com/ansible-collections/community.general/issues/1095).
+ - postgresql_ext - fix version selection when ``version=latest`` (https://github.com/ansible-collections/community.general/pull/1078).
+ - postgresql_pg_hba - fix a crash when a new rule with an 'options' field replaces
+ a rule without or vice versa (https://github.com/ansible-collections/community.general/issues/1108).
+ - postgresql_privs - fix module fails when ``type`` group and passing ``objs``
+ value containing hyphens (https://github.com/ansible-collections/community.general/issues/1058).
+ - proxmox_kvm - fix issue causing linked clones not being create by allowing
+ ``format=unspecified`` (https://github.com/ansible-collections/community.general/issues/1027).
+ - proxmox_kvm - ignore unsupported ``pool`` parameter on update (https://github.com/ansible-collections/community.general/pull/1258).
+ - redis - fixes parsing of config values which should not be converted to bytes
+ (https://github.com/ansible-collections/community.general/pull/1079).
+ - redis cache plugin - make sure that plugin errors out on initialization if
+ the required library is not found, and not on load-time (https://github.com/ansible-collections/community.general/pull/1297).
+ - slack - avoid trying to update existing message when sending messages that
+ contain the string "ts" (https://github.com/ansible-collections/community.general/issues/1097).
+ - solaris_zone - fixed issue trying to configure zone in Python 3 (https://github.com/ansible-collections/community.general/issues/1081).
+ - syspatch - fix bug where not setting ``apply=true`` would result in error
+ (https://github.com/ansible-collections/community.general/pull/360).
+ - xfconf - parameter ``value`` no longer required for state ``absent`` (https://github.com/ansible-collections/community.general/issues/1329).
+ - xfconf - xfconf no longer passing the command args as a string, but rather
+ as a list (https://github.com/ansible-collections/community.general/issues/1328).
+ - zypper - force ``LANG=C`` to as zypper is looking in XML output where attribute
+ could be translated (https://github.com/ansible-collections/community.general/issues/1175).
+ deprecated_features:
+ - django_manage - the parameter ``liveserver`` relates to a no longer maintained
+ third-party module for django. It is now deprecated, and will be remove in
+ community.general 3.0.0 (https://github.com/ansible-collections/community.general/pull/1154).
+ - proxmox - the default of the new ``proxmox_default_behavior`` option will
+ change from ``compatibility`` to ``no_defaults`` in community.general 4.0.0.
+ Set the option to an explicit value to avoid a deprecation warning (https://github.com/ansible-collections/community.general/pull/850).
+ - proxmox_kvm - the default of the new ``proxmox_default_behavior`` option will
+ change from ``compatibility`` to ``no_defaults`` in community.general 4.0.0.
+ Set the option to an explicit value to avoid a deprecation warning (https://github.com/ansible-collections/community.general/pull/850).
+ - syspatch - deprecate the redundant ``apply`` argument (https://github.com/ansible-collections/community.general/pull/360).
+ major_changes:
+ - 'For community.general 2.0.0, the Hetzner Robot modules will be moved to the
+ `community.hrobot <https://galaxy.ansible.com/community/hrobot>`_ collection.
+
+ A redirection will be inserted so that users using ansible-base 2.10 or newer
+ do not have to change anything.
+
+
+ If you use Ansible 2.9 and explicitly use Hetzner Robot modules from this
+ collection, you will need to adjust your playbooks and roles to use FQCNs
+ starting with ``community.hrobot.`` instead of ``community.general.hetzner_``,
+
+ for example replace ``community.general.hetzner_firewall_info`` in a task
+ by ``community.hrobot.firewall_info``.
+
+
+ If you use ansible-base and installed ``community.general`` manually and rely
+ on the Hetzner Robot modules, you have to make sure to install the ``community.hrobot``
+ collection as well.
+
+ If you are using FQCNs, i.e. ``community.general.hetzner_failover_ip`` instead
+ of ``hetzner_failover_ip``, it will continue working, but we still recommend
+ to adjust the FQCNs as well.
+
+ '
+ - 'For community.general 2.0.0, the ``docker`` modules and plugins will be moved
+ to the `community.docker <https://galaxy.ansible.com/community/docker>`_ collection.
+
+ A redirection will be inserted so that users using ansible-base 2.10 or newer
+ do not have to change anything.
+
+
+ If you use Ansible 2.9 and explicitly use ``docker`` content from this collection,
+ you will need to adjust your playbooks and roles to use FQCNs starting with
+ ``community.docker.`` instead of ``community.general.``,
+
+ for example replace ``community.general.docker_container`` in a task by ``community.docker.docker_container``.
+
+
+ If you use ansible-base and installed ``community.general`` manually and rely
+ on the ``docker`` content, you have to make sure to install the ``community.docker``
+ collection as well.
+
+ If you are using FQCNs, i.e. ``community.general.docker_container`` instead
+ of ``docker_container``, it will continue working, but we still recommend
+ to adjust the FQCNs as well.
+
+ '
+ - 'For community.general 2.0.0, the ``postgresql`` modules and plugins will
+ be moved to the `community.postgresql <https://galaxy.ansible.com/community/postgresql>`_
+ collection.
+
+ A redirection will be inserted so that users using ansible-base 2.10 or newer
+ do not have to change anything.
+
+
+ If you use Ansible 2.9 and explicitly use ``postgresql`` content from this
+ collection, you will need to adjust your playbooks and roles to use FQCNs
+ starting with ``community.postgresql.`` instead of ``community.general.``,
+
+ for example replace ``community.general.postgresql_info`` in a task by ``community.postgresql.postgresql_info``.
+
+
+ If you use ansible-base and installed ``community.general`` manually and rely
+ on the ``postgresql`` content, you have to make sure to install the ``community.postgresql``
+ collection as well.
+
+ If you are using FQCNs, i.e. ``community.general.postgresql_info`` instead
+ of ``postgresql_info``, it will continue working, but we still recommend to
+ adjust the FQCNs as well.
+
+ '
+ - The community.general collection no longer depends on the ansible.posix collection
+ (https://github.com/ansible-collections/community.general/pull/1157).
+ minor_changes:
+ - 'Add new filter plugin ``dict_kv`` which returns a single key-value pair from
+ two arguments. Useful for generating complex dictionaries without using loops.
+ For example ``''value'' | community.general.dict_kv(''key''))`` evaluates
+ to ``{''key'': ''value''}`` (https://github.com/ansible-collections/community.general/pull/1264).'
+ - archive - fix paramater types (https://github.com/ansible-collections/community.general/pull/1039).
+ - consul - added support for tcp checks (https://github.com/ansible-collections/community.general/issues/1128).
+ - datadog - mark ``notification_message`` as ``no_log`` (https://github.com/ansible-collections/community.general/pull/1338).
+ - datadog_monitor - add ``include_tags`` option (https://github.com/ansible/ansible/issues/57441).
+ - django_manage - renamed parameter ``app_path`` to ``project_path``, adding
+ ``app_path`` and ``chdir`` as aliases (https://github.com/ansible-collections/community.general/issues/1044).
+ - docker_container - now supports the ``device_requests`` option, which allows
+ to request additional resources such as GPUs (https://github.com/ansible/ansible/issues/65748,
+ https://github.com/ansible-collections/community.general/pull/1119).
+ - docker_image - return docker build output (https://github.com/ansible-collections/community.general/pull/805).
+ - docker_secret - add a warning when the secret does not have an ``ansible_key``
+ label but the ``force`` parameter is not set (https://github.com/ansible-collections/community.docker/issues/30,
+ https://github.com/ansible-collections/community.docker/pull/31).
+ - facter - added option for ``arguments`` (https://github.com/ansible-collections/community.general/pull/768).
+ - hashi_vault - support ``VAULT_SKIP_VERIFY`` environment variable for determining
+ if to verify certificates (in addition to the ``validate_certs=`` flag supported
+ today) (https://github.com/ansible-collections/community.general/pull/1024).
+ - hashi_vault lookup plugin - add support for JWT authentication (https://github.com/ansible-collections/community.general/pull/1213).
+ - infoblox inventory script - use stderr for reporting errors, and allow use
+ of environment for configuration (https://github.com/ansible-collections/community.general/pull/436).
+ - ipa_host - silence warning about non-secret ``random_password`` option not
+ having ``no_log`` set (https://github.com/ansible-collections/community.general/pull/1339).
+ - ipa_user - silence warning about non-secret ``krbpasswordexpiration`` and
+ ``update_password`` options not having ``no_log`` set (https://github.com/ansible-collections/community.general/pull/1339).
+ - linode_v4 - added support for Linode StackScript usage when creating instances
+ (https://github.com/ansible-collections/community.general/issues/723).
+ - lvol - fix idempotency issue when using lvol with ``%VG`` or ``%PVS`` size
+ options and VG is fully allocated (https://github.com/ansible-collections/community.general/pull/229).
+ - maven_artifact - added ``client_cert`` and ``client_key`` parameters to the
+ maven_artifact module (https://github.com/ansible-collections/community.general/issues/1123).
+ - module_helper - added ModuleHelper class and a couple of convenience tools
+ for module developers (https://github.com/ansible-collections/community.general/pull/1322).
+ - nmcli - refactor internal methods for simplicity and enhance reuse to support
+ existing and future connection types (https://github.com/ansible-collections/community.general/pull/1113).
+ - nmcli - remove Python DBus and GTK Object library dependencies (https://github.com/ansible-collections/community.general/issues/1112).
+ - nmcli - the ``dns4``, ``dns4_search``, ``dns6``, and ``dns6_search`` arguments
+ are retained internally as lists (https://github.com/ansible-collections/community.general/pull/1113).
+ - odbc - added a parameter ``commit`` which allows users to disable the explicit
+ commit after the execute call (https://github.com/ansible-collections/community.general/pull/1139).
+ - openbsd_pkg - added ``snapshot`` option (https://github.com/ansible-collections/community.general/pull/965).
+ - 'pacman - improve group expansion speed: query list of pacman groups once
+ (https://github.com/ansible-collections/community.general/pull/349).'
+ - parted - add ``resize`` option to resize existing partitions (https://github.com/ansible-collections/community.general/pull/773).
+ - passwordstore lookup plugin - added ``umask`` option to set the desired file
+ permisions on creation. This is done via the ``PASSWORD_STORE_UMASK`` environment
+ variable (https://github.com/ansible-collections/community.general/pull/1156).
+ - pkgin - add support for installation of full versioned package names (https://github.com/ansible-collections/community.general/pull/1256).
+ - pkgng - present the ``ignore_osver`` option to pkg (https://github.com/ansible-collections/community.general/pull/1243).
+ - portage - add ``getbinpkgonly`` option, remove unnecessary note on internal
+ portage behaviour (getbinpkg=yes), and remove the undocumented exclusiveness
+ of the pkg options as portage makes no such restriction (https://github.com/ansible-collections/community.general/pull/1169).
+ - postgresql_info - add ``in_recovery`` return value to show if a service in
+ recovery mode or not (https://github.com/ansible-collections/community.general/issues/1068).
+ - postgresql_privs - add ``procedure`` type support (https://github.com/ansible-collections/community.general/issues/1002).
+ - postgresql_query - add ``query_list`` and ``query_all_results`` return values
+ (https://github.com/ansible-collections/community.general/issues/838).
+ - proxmox - add new ``proxmox_default_behavior`` option (https://github.com/ansible-collections/community.general/pull/850).
+ - proxmox - add support for API tokens (https://github.com/ansible-collections/community.general/pull/1206).
+ - proxmox - extract common code and documentation (https://github.com/ansible-collections/community.general/pull/1331).
+ - proxmox inventory plugin - ignore QEMU templates altogether instead of skipping
+ the creation of the host in the inventory (https://github.com/ansible-collections/community.general/pull/1185).
+ - 'proxmox_kvm - add cloud-init support (new options: ``cicustom``, ``cipassword``,
+ ``citype``, ``ciuser``, ``ipconfig``, ``nameservers``, ``searchdomains``,
+ ``sshkeys``) (https://github.com/ansible-collections/community.general/pull/797).'
+ - proxmox_kvm - add new ``proxmox_default_behavior`` option (https://github.com/ansible-collections/community.general/pull/850).
+ - proxmox_kvm - add support for API tokens (https://github.com/ansible-collections/community.general/pull/1206).
+ - proxmox_template - add support for API tokens (https://github.com/ansible-collections/community.general/pull/1206).
+ - proxmox_template - download proxmox applicance templates (pveam) (https://github.com/ansible-collections/community.general/pull/1046).
+ - redis cache plugin - add redis sentinel functionality to cache plugin (https://github.com/ansible-collections/community.general/pull/1055).
+ - redis cache plugin - make the redis cache keyset name configurable (https://github.com/ansible-collections/community.general/pull/1036).
+ - terraform - add ``init_reconfigure`` option, which controls the ``-reconfigure``
+ flag (backend reconfiguration) (https://github.com/ansible-collections/community.general/pull/823).
+ - xfconf - removed unnecessary second execution of ``xfconf-query`` (https://github.com/ansible-collections/community.general/pull/1305).
+ release_summary: This is the last minor 1.x.0 release. The next releases from
+ the stable-1 branch will be 1.3.y patch releases.
+ fragments:
+ - 1.3.0.yml
+ - 1024-vault-skip-verify-support.yml
+ - 1028-proxmox-kvm-linked-clone.yml
+ - 1036-redis-cache-keyset-name.yaml
+ - 1038-fix-homebrew-and-homebrew-cask-package-validation.yaml
+ - 1039-archive-fix-paramater-types.yaml
+ - 1048-postgresql_privs_add_procedure_type.yml
+ - 1055-redis-cache-sentinel.yaml
+ - 1059-postgresql_privs_fix_failings_when_using_roles_with_hyphen.yml
+ - 1078-postgresql_ext_fix_version_selection_when_version_is_latest.yml
+ - 1079-redis-use-regexp-to-check-if-the-value-matches-expected-form.yaml
+ - 1081-solaris_zone-python3.yml
+ - 1091-postgresql_info_add_in_recovery_ret_val.yml
+ - 1099-postgresql_ext_fix_failing_when_version_cannot_be_compared.yml
+ - 1101-slack-ts-fix.yaml
+ - 1105-beadm_bugfix.yaml
+ - 1107-monit-fix-status-check.yml
+ - 1118-docker_login-config-store.yml
+ - 1119-docker_container-device-reqests.yml
+ - 1124-pg_hba-dictkey_bugfix.yaml
+ - 1126-influxdb-conditional-path-argument.yml
+ - 1127-maven_artifact_client_cert.yml
+ - 1138-hashi_vault_fix_approle_authentication_without_secret_id.yml
+ - 1140-iptables_state-fix-race-condition.yml
+ - 1144-consul-add-tcp-check-support.yml
+ - 1149-filesystem-fix-355-state-absent.yml
+ - 1154-django_manage-docs.yml
+ - 1169-getbinpkgonly.yaml
+ - 1175-zypper-absent-lang.yml
+ - 1179-composer_require_v2_idempotence_fix.yml
+ - 1185-proxmox-ignore-qemu-templates.yml
+ - 1196-use_description-in-gitlab-group-creation.yml
+ - 1206-proxmox-api-token.yml
+ - 1213-hashi_vault-jwt-auth-support.yaml
+ - 1223-nios-remove-redundant-aliases.yml
+ - 1243-pkgng-present-ignoreosver.yaml
+ - 1244-renamed-parameter.yaml
+ - 1246-flatpak-use-non-interactive-argument.yaml
+ - 1256-feat-pkgin-add-full-version-package-name.yml
+ - 1258-proxmox_kvm-ignore-pool-on-update.yaml
+ - 1264-dict_kv-new-filter.yaml
+ - 1270-linode-v4-stackscript-support.yaml
+ - 1305-added-xfconf-tests.yaml
+ - 1307-macports-fix-status-check.yml
+ - 1322-module_helper_and_xfconf.yaml
+ - 1331-proxmox-info-modules.yml
+ - 1338-datadog-mark-notification_message-no_log.yml
+ - 1339-ip-no_log-nonsecret.yml
+ - 1383-apache2-module-amend-shib-workaround.yaml
+ - 216-fix-lxc-container-container_config-parameter.yaml
+ - 229_lvol_percentage_fix.yml
+ - 349-pacman_improve_group_expansion_speed.yml
+ - 360_syspatch_apply_patches_by_default.yml
+ - 409-datadog-monitor-include-tags.yaml
+ - 436-infoblox-use-stderr-and-environment-for-config.yaml
+ - 713-maven-timestamp-snapshot.yml
+ - 768-facter.yml
+ - 773-resize-partition.yml
+ - 788-fix_omapi_host_on_python3.yaml
+ - 797-proxmox-kvm-cloud-init.yaml
+ - 805-docker_image-build-output.yml
+ - 823-terraform_init_reconfigure.yaml
+ - 850-proxmox_kvm-remove_hard_coded_defaults.yml
+ - 886-postgresql_query_add_ret_vals.yml
+ - 891-packet_net-fix-not-subscriptable.yaml
+ - 968-gitlab_variables-pagination.yml
+ - 993-file-capabilities.yml
+ - community.docker-31-docker-secret.yml
+ - docker-migration.yml
+ - fix-plugin-imports.yml
+ - hetzner-migration.yml
+ - lookup-passwordstore-umask.yml
+ - nmcli-refactor.yml
+ - odbc.yml
+ - openbsd_pkg.yml
+ - postgresql-migration.yml
+ - proxmox_template-appliance-download.yml
+ - remove-ansible.posix-dependency.yml
+ modules:
+ - description: Launch a Nomad Job
+ name: nomad_job
+ namespace: clustering.nomad
+ - description: Get Nomad Jobs info
+ name: nomad_job_info
+ namespace: clustering.nomad
+ - description: Track a code or infrastructure change as a PagerDuty change event
+ name: pagerduty_change
+ namespace: monitoring
+ - description: Manage a user account on PagerDuty
+ name: pagerduty_user
+ namespace: monitoring
+ - description: Retrieve information about one or more Proxmox VE domains
+ name: proxmox_domain_info
+ namespace: cloud.misc
+ - description: Retrieve information about one or more Proxmox VE groups
+ name: proxmox_group_info
+ namespace: cloud.misc
+ - description: Retrieve information about one or more Proxmox VE users
+ name: proxmox_user_info
+ namespace: cloud.misc
+ release_date: '2020-11-26'
+ 1.3.1:
+ changes:
+ bugfixes:
+ - bigpanda - removed the dynamic default for ``host`` param (https://github.com/ansible-collections/community.general/pull/1423).
+ - bitbucket_pipeline_variable - change pagination logic for pipeline variable
+ get API (https://github.com/ansible-collections/community.general/issues/1425).
+ - cobbler inventory script - add Python 3 support (https://github.com/ansible-collections/community.general/issues/638).
+ - docker_container - the validation for ``capabilities`` in ``device_requests``
+ was incorrect (https://github.com/ansible-collections/community.docker/issues/42,
+ https://github.com/ansible-collections/community.docker/pull/43).
+ - git_config - now raises an error for non-existent repository paths (https://github.com/ansible-collections/community.general/issues/630).
+ - icinga2_host - fix returning error codes (https://github.com/ansible-collections/community.general/pull/335).
+ - jira - provide error message raised from exception (https://github.com/ansible-collections/community.general/issues/1504).
+ - json_query - handle ``AnsibleUnicode`` and ``AnsibleUnsafeText`` (https://github.com/ansible-collections/community.general/issues/320).
+ - keycloak module_utils - provide meaningful error message to user when auth
+ URL does not start with http or https (https://github.com/ansible-collections/community.general/issues/331).
+ - ldap_entry - improvements in documentation, simplifications and replaced code
+ with better ``AnsibleModule`` arguments (https://github.com/ansible-collections/community.general/pull/1516).
+ - mas - fix ``invalid literal`` when no app can be found (https://github.com/ansible-collections/community.general/pull/1436).
+ - nios_host_record - fix to remove ``aliases`` (CNAMES) for configuration comparison
+ (https://github.com/ansible-collections/community.general/issues/1335).
+ - osx_defaults - unquote values and unescape double quotes when reading array
+ values (https://github.com/ansible-collections/community.general/pull/358).
+ - profitbricks_nic - removed the dynamic default for ``name`` param (https://github.com/ansible-collections/community.general/pull/1423).
+ - profitbricks_nic - replaced code with ``required`` and ``required_if`` (https://github.com/ansible-collections/community.general/pull/1423).
+ - redfish_info module, redfish_utils module utils - correct ``PartNumber`` property
+ name in Redfish ``GetMemoryInventory`` command (https://github.com/ansible-collections/community.general/issues/1483).
+ - saltstack connection plugin - use ``hashutil.base64_decodefile`` to ensure
+ that the file checksum is preserved (https://github.com/ansible-collections/community.general/pull/1472).
+ - udm_user - removed the dynamic default for ``userexpiry`` param (https://github.com/ansible-collections/community.general/pull/1423).
+ - utm_network_interface_address - changed param type from invalid 'boolean'
+ to valid 'bool' (https://github.com/ansible-collections/community.general/pull/1423).
+ - utm_proxy_exception - four parameters had elements types set as 'string' (invalid),
+ changed to 'str' (https://github.com/ansible-collections/community.general/pull/1399).
+ - vmadm - simplification of code (https://github.com/ansible-collections/community.general/pull/1415).
+ - xfconf - add in missing return values that are specified in the documentation
+ (https://github.com/ansible-collections/community.general/issues/1418).
+ release_summary: Regular bugfix release.
+ fragments:
+ - 1.3.1.yml
+ - 1399-fixed-wrong-elements-type.yaml
+ - 1415-valmod_req_mismatch.yml
+ - 1419-xfconf-return-values.yaml
+ - 1423-valmod_multiple_cases.yml
+ - 1425_bitbucket_pipeline_variable.yml
+ - 1436-mas-fix-no-app-installed.yml
+ - 1472-saltstack-fix-put_file-to-preserve-checksum.yml
+ - 1484-fix-property-name-in-redfish-memory-inventory.yml
+ - 1504_jira.yml
+ - 1516-ldap_entry-improvements.yaml
+ - 320_unsafe_text.yml
+ - 331_keycloak.yml
+ - 335-icinga2_host-return-error-code.yaml
+ - 630-git_config-handling-invalid-dir.yaml
+ - 638_cobbler_py3.yml
+ - community.docker-43-docker_container-device_requests.yml
+ - fix_parsing_array_values_in_osx_defaults.yml
+ - nios_host_record-fix-aliases-removal.yml
+ release_date: '2020-12-21'
+ 1.3.2:
+ changes:
+ bugfixes:
+ - docker_image - if ``push=true`` is used with ``repository``, and the image
+ does not need to be tagged, still push. This can happen if ``repository``
+ and ``name`` are equal (https://github.com/ansible-collections/community.docker/issues/52,
+ https://github.com/ansible-collections/community.docker/pull/53).
+ - docker_image - report error when loading a broken archive that contains no
+ image (https://github.com/ansible-collections/community.docker/issues/46,
+ https://github.com/ansible-collections/community.docker/pull/55).
+ - docker_image - report error when the loaded archive does not contain the specified
+ image (https://github.com/ansible-collections/community.docker/issues/41,
+ https://github.com/ansible-collections/community.docker/pull/55).
+ - jira - ``fetch`` and ``search`` no longer indicate that something changed
+ (https://github.com/ansible-collections/community.general/pull/1536).
+ - jira - ensured parameter ``issue`` is mandatory for operation ``transition``
+ (https://github.com/ansible-collections/community.general/pull/1536).
+ - jira - module no longer incorrectly reports change for information gathering
+ operations (https://github.com/ansible-collections/community.general/pull/1536).
+ - jira - replaced custom parameter validation with ``required_if`` (https://github.com/ansible-collections/community.general/pull/1536).
+ - launchd - handle deprecated APIs like ``readPlist`` and ``writePlist`` in
+ ``plistlib`` (https://github.com/ansible-collections/community.general/issues/1552).
+ - ldap_search - the module no longer incorrectly reports a change (https://github.com/ansible-collections/community.general/issues/1040).
+ - make - fixed ``make`` parameter used for check mode when running a non-GNU
+ ``make`` (https://github.com/ansible-collections/community.general/pull/1574).
+ - monit - add support for all monit service checks (https://github.com/ansible-collections/community.general/pull/1532).
+ - nios_member - fix Python 3 compatibility with nios api ``member_normalize``
+ function (https://github.com/ansible-collections/community.general/issues/1526).
+ - nmcli - remove ``bridge-slave`` from list of IP based connections ((https://github.com/ansible-collections/community.general/issues/1500).
+ - pamd - added logic to retain the comment line (https://github.com/ansible-collections/community.general/issues/1394).
+ - passwordstore lookup plugin - always use explicit ``show`` command to retrieve
+ password. This ensures compatibility with ``gopass`` and avoids problems when
+ password names equal ``pass`` commands (https://github.com/ansible-collections/community.general/pull/1493).
+ - rhn_channel - Python 2.7.5 fails if the certificate should not be validated.
+ Fixed this by creating the correct ``ssl_context`` (https://github.com/ansible-collections/community.general/pull/470).
+ - sendgrid - update documentation and warn user about sendgrid Python library
+ version (https://github.com/ansible-collections/community.general/issues/1553).
+ - syslogger - update ``syslog.openlog`` API call for older Python versions,
+ and improve error handling (https://github.com/ansible-collections/community.general/issues/953).
+ - yaml callback plugin - do not remove non-ASCII Unicode characters from multiline
+ string output (https://github.com/ansible-collections/community.general/issues/1519).
+ major_changes:
+ - 'For community.general 2.0.0, the Google modules will be moved to the `community.google
+ <https://galaxy.ansible.com/community/google>`_ collection.
+
+ A redirection will be inserted so that users using ansible-base 2.10 or newer
+ do not have to change anything.
+
+
+ If you use Ansible 2.9 and explicitly use Google modules from this collection,
+ you will need to adjust your playbooks and roles to use FQCNs starting with
+ ``community.google.`` instead of ``community.general.``,
+
+ for example replace ``community.general.gcpubsub`` in a task by ``community.google.gcpubsub``.
+
+
+ If you use ansible-base and installed ``community.general`` manually and rely
+ on the Google modules, you have to make sure to install the ``community.google``
+ collection as well.
+
+ If you are using FQCNs, for example ``community.general.gcpubsub`` instead
+ of ``gcpubsub``, it will continue working, but we still recommend to adjust
+ the FQCNs as well.
+
+ '
+ - 'For community.general 2.0.0, the OC connection plugin will be moved to the
+ `community.okd <https://galaxy.ansible.com/community/okd>`_ collection.
+
+ A redirection will be inserted so that users using ansible-base 2.10 or newer
+ do not have to change anything.
+
+
+ If you use Ansible 2.9 and explicitly use OC connection plugin from this collection,
+ you will need to adjust your playbooks and roles to use FQCNs ``community.okd.oc``
+ instead of ``community.general.oc``.
+
+
+ If you use ansible-base and installed ``community.general`` manually and rely
+ on the OC connection plugin, you have to make sure to install the ``community.okd``
+ collection as well.
+
+ If you are using FQCNs, in other words ``community.general.oc`` instead of
+ ``oc``, it will continue working, but we still recommend to adjust this FQCN
+ as well.
+
+ '
+ - 'For community.general 2.0.0, the hashi_vault lookup plugin will be moved
+ to the `community.hashi_vault <https://galaxy.ansible.com/community/hashi_vault>`_
+ collection.
+
+ A redirection will be inserted so that users using ansible-base 2.10 or newer
+ do not have to change anything.
+
+
+ If you use Ansible 2.9 and explicitly use hashi_vault lookup plugin from this
+ collection, you will need to adjust your playbooks and roles to use FQCNs
+ ``community.hashi_vault.hashi_vault`` instead of ``community.general.hashi_vault``.
+
+
+ If you use ansible-base and installed ``community.general`` manually and rely
+ on the hashi_vault lookup plugin, you have to make sure to install the ``community.hashi_vault``
+ collection as well.
+
+ If you are using FQCNs, in other words ``community.general.hashi_vault`` instead
+ of ``hashi_vault``, it will continue working, but we still recommend to adjust
+ this FQCN as well.
+
+ '
+ minor_changes:
+ - homebrew_cask - Homebrew will be deprecating use of ``brew cask`` commands
+ as of version 2.6.0, see https://brew.sh/2020/12/01/homebrew-2.6.0/. Added
+ logic to stop using ``brew cask`` for brew version >= 2.6.0 (https://github.com/ansible-collections/community.general/pull/1481).
+ - jira - added the traceback output to ``fail_json()`` calls deriving from exceptions
+ (https://github.com/ansible-collections/community.general/pull/1536).
+ release_summary: Regular bugfix release.
+ fragments:
+ - 1.3.2.yml
+ - 1040-ldap_search-changed-must-be-false.yaml
+ - 1394-pamd-removing-comments.yaml
+ - 1481-deprecated-brew-cask-command.yaml
+ - 1493-fix_passwordstore.py_to_be_compatible_with_gopass_versions.yml
+ - 1517-bridge-slave-from-list-of-ip-based-connections.yml
+ - 1522-yaml-callback-unicode.yml
+ - 1527-fix-nios-api-member-normalize.yaml
+ - 1532-monit-support-all-services.yaml
+ - 1552_launchd.yml
+ - 1553_sendgrid.yml
+ - 1574-make-question.yaml
+ - 470-spacewalk-legacy-python-certificate-validation.yaml
+ - 953_syslogger.yml
+ - community.docker-53-docker_image-tag-push.yml
+ - community.docker-55-docker_image-loading.yml
+ - google-migration.yml
+ - hashi_vault-migration.yml
+ - jira_improvements.yaml
+ - oc-migration.yml
+ release_date: '2021-01-04'
+ 1.3.3:
+ changes:
+ bugfixes:
+ - terraform - fix ``init_reconfigure`` option for proper CLI args (https://github.com/ansible-collections/community.general/pull/1620).
+ major_changes:
+ - 'For community.general 2.0.0, the kubevirt modules will be moved to the `community.kubevirt
+ <https://galaxy.ansible.com/community/kubevirt>`_ collection.
+
+ A redirection will be inserted so that users using ansible-base 2.10 or newer
+ do not have to change anything.
+
+
+ If you use Ansible 2.9 and explicitly use kubevirt modules from this collection,
+ you will need to adjust your playbooks and roles to use FQCNs starting with
+ ``community.kubevirt.`` instead of ``community.general.``,
+
+ for example replace ``community.general.kubevirt_vm`` in a task by ``community.kubevirt.kubevirt_vm``.
+
+
+ If you use ansible-base and installed ``community.general`` manually and rely
+ on the kubevirt modules, you have to make sure to install the ``community.kubevirt``
+ collection as well.
+
+ If you are using FQCNs, for example ``community.general.kubevirt_vm`` instead
+ of ``kubevirt_vm``, it will continue working, but we still recommend to adjust
+ the FQCNs as well.
+
+ '
+ release_summary: Bugfix/security release that addresses CVE-2021-20178.
+ security_fixes:
+ - snmp_facts - **CVE-2021-20178** - hide user sensitive information such as
+ ``privkey`` and ``authkey`` from logging into the console (https://github.com/ansible-collections/community.general/pull/1621).
+ fragments:
+ - 1.3.3.yml
+ - 1620-terraform_init_reconfigure_fix.yml
+ - kubevirt-migration.yml
+ - snmp_facts.yml
+ release_date: '2021-01-13'
+ 1.3.4:
+ changes:
+ bugfixes:
+ - npm - handle json decode exception while parsing command line output (https://github.com/ansible-collections/community.general/issues/1614).
+ release_summary: Bugfix/security release that addresses CVE-2021-20180.
+ security_fixes:
+ - bitbucket_pipeline_variable - **CVE-2021-20180** - hide user sensitive information
+ which are marked as ``secured`` from logging into the console (https://github.com/ansible-collections/community.general/pull/1635).
+ fragments:
+ - 1.3.4.yml
+ - 1614_npm.yml
+ - cve_bitbucket_pipeline_variable.yml
+ release_date: '2021-01-14'
+ 1.3.5:
+ changes:
+ bugfixes:
+ - dnsmadeeasy - fix HTTP 400 errors when creating a TXT record (https://github.com/ansible-collections/community.general/issues/1237).
+ - docker_container - allow IPv6 zones (RFC 4007) in bind IPs (https://github.com/ansible-collections/community.docker/pull/66).
+ - docker_image - fix crash on loading images with versions of Docker SDK for
+ Python before 2.5.0 (https://github.com/ansible-collections/community.docker/issues/72,
+ https://github.com/ansible-collections/community.docker/pull/73).
+ - homebrew - add default search path for ``brew`` on Apple silicon hardware
+ (https://github.com/ansible-collections/community.general/pull/1679).
+ - homebrew_cask - add default search path for ``brew`` on Apple silicon hardware
+ (https://github.com/ansible-collections/community.general/pull/1679).
+ - homebrew_tap - add default search path for ``brew`` on Apple silicon hardware
+ (https://github.com/ansible-collections/community.general/pull/1679).
+ - lldp - use ``get_bin_path`` to locate the ``lldpctl`` executable (https://github.com/ansible-collections/community.general/pull/1643).
+ - onepassword lookup plugin - updated to support password items, which place
+ the password field directly in the payload's ``details`` attribute (https://github.com/ansible-collections/community.general/pull/1610).
+ - passwordstore lookup plugin - fix compatibility with gopass when used with
+ ``create=true``. While pass returns 1 on a non-existent password, gopass returns
+ 10, or 11, depending on whether a similar named password was stored. We now
+ just check standard output and that the return code is not zero (https://github.com/ansible-collections/community.general/pull/1589).
+ - terraform - improve result code checking when executing terraform commands
+ (https://github.com/ansible-collections/community.general/pull/1632).
+ release_summary: Regular bugfix release.
+ fragments:
+ - 1.3.5.yml
+ - 1589-passwordstore-fix-passwordstore.py-to-be-compatible-with-gopass.yaml
+ - 1610-bugfix-onepassword-lookup-plugin.yaml
+ - 1632-using_check_rc_in_terraform.yml
+ - 1654-dnsmadeeasy-http-400-fixes.yaml
+ - 1679-homebrew_search_path.yml
+ - community.docker-66-ipv6-zones.yml
+ - community.docker-73-docker_image-fix-old-docker-py-version.yml
+ - lldp-use-get_bin_path-to-locate-the-lldpctl-executable.yaml
+ release_date: '2021-01-26'
+ 1.3.6:
+ changes:
+ breaking_changes:
+ - utm_proxy_auth_profile - the ``frontend_cookie_secret`` return value now contains
+ a placeholder string instead of the module's ``frontend_cookie_secret`` parameter
+ (https://github.com/ansible-collections/community.general/pull/1736).
+ bugfixes:
+ - docker connection plugin - fix Docker version parsing, as some docker versions
+ have a leading ``v`` in the output of the command ``docker version --format
+ "{{.Server.Version}}"`` (https://github.com/ansible-collections/community.docker/pull/76).
+ - filesystem - do not fail when ``resizefs=yes`` and ``fstype=xfs`` if there
+ is nothing to do, even if the filesystem is not mounted. This only covers
+ systems supporting access to unmounted XFS filesystems. Others will still
+ fail (https://github.com/ansible-collections/community.general/issues/1457,
+ https://github.com/ansible-collections/community.general/pull/1478).
+ - gitlab_user - make updates to the ``isadmin``, ``password`` and ``confirm``
+ options of an already existing GitLab user work (https://github.com/ansible-collections/community.general/pull/1724).
+ - parted - change the regex that decodes the partition size to better support
+ different formats that parted uses. Change the regex that validates parted's
+ version string (https://github.com/ansible-collections/community.general/pull/1695).
+ - redfish_info module, redfish_utils module utils - add ``Name`` and ``Id``
+ properties to output of Redfish inventory commands (https://github.com/ansible-collections/community.general/issues/1650).
+ - sensu-silence module - fix json parsing of sensu API responses on Python 3.5
+ (https://github.com/ansible-collections/community.general/pull/1703).
+ minor_changes:
+ - scaleway modules and inventory plugin - update regions and zones to add the
+ new ones (https://github.com/ansible-collections/community.general/pull/1690).
+ release_summary: Regular bugfix and security bugfix (potential information leaks
+ in multiple modules, CVE-2021-20191) release.
+ security_fixes:
+ - dnsmadeeasy - mark the ``account_key`` parameter as ``no_log`` to avoid leakage
+ of secrets (https://github.com/ansible-collections/community.general/pull/1736).
+ - docker_swarm - enabled ``no_log`` for the option ``signing_ca_key`` to prevent
+ accidental disclosure (CVE-2021-20191, https://github.com/ansible-collections/community.general/pull/1728).
+ - gitlab_runner - mark the ``registration_token`` parameter as ``no_log`` to
+ avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736).
+ - hwc_ecs_instance - mark the ``admin_pass`` parameter as ``no_log`` to avoid
+ leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736).
+ - ibm_sa_host - mark the ``iscsi_chap_secret`` parameter as ``no_log`` to avoid
+ leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736).
+ - keycloak_* modules - mark the ``auth_client_secret`` parameter as ``no_log``
+ to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736).
+ - keycloak_client - mark the ``registration_access_token`` parameter as ``no_log``
+ to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736).
+ - librato_annotation - mark the ``api_key`` parameter as ``no_log`` to avoid
+ leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736).
+ - logentries_msg - mark the ``token`` parameter as ``no_log`` to avoid leakage
+ of secrets (https://github.com/ansible-collections/community.general/pull/1736).
+ - module_utils/_netapp, na_ontap_gather_facts - enabled ``no_log`` for the options
+ ``api_key`` and ``secret_key`` to prevent accidental disclosure (CVE-2021-20191,
+ https://github.com/ansible-collections/community.general/pull/1725).
+ - module_utils/identity/keycloak, keycloak_client, keycloak_clienttemplate,
+ keycloak_group - enabled ``no_log`` for the option ``auth_client_secret``
+ to prevent accidental disclosure (CVE-2021-20191, https://github.com/ansible-collections/community.general/pull/1725).
+ - nios_nsgroup - mark the ``tsig_key`` parameter as ``no_log`` to avoid leakage
+ of secrets (https://github.com/ansible-collections/community.general/pull/1736).
+ - oneandone_firewall_policy, oneandone_load_balancer, oneandone_monitoring_policy,
+ oneandone_private_network, oneandone_public_ip - mark the ``auth_token`` parameter
+ as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736).
+ - ovirt - mark the ``instance_key`` parameter as ``no_log`` to avoid leakage
+ of secrets (https://github.com/ansible-collections/community.general/pull/1736).
+ - ovirt - mark the ``instance_rootpw`` parameter as ``no_log`` to avoid leakage
+ of secrets (https://github.com/ansible-collections/community.general/pull/1736).
+ - pagerduty_alert - mark the ``api_key``, ``service_key`` and ``integration_key``
+ parameters as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736).
+ - pagerduty_change - mark the ``integration_key`` parameter as ``no_log`` to
+ avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736).
+ - pingdom - mark the ``key`` parameter as ``no_log`` to avoid leakage of secrets
+ (https://github.com/ansible-collections/community.general/pull/1736).
+ - pulp_repo - mark the ``feed_client_key`` parameter as ``no_log`` to avoid
+ leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736).
+ - rax_clb_ssl - mark the ``private_key`` parameter as ``no_log`` to avoid leakage
+ of secrets (https://github.com/ansible-collections/community.general/pull/1736).
+ - redfish_command - mark the ``update_creds.password`` parameter as ``no_log``
+ to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736).
+ - rollbar_deployment - mark the ``token`` parameter as ``no_log`` to avoid leakage
+ of secrets (https://github.com/ansible-collections/community.general/pull/1736).
+ - spotinst_aws_elastigroup - mark the ``multai_token`` and ``token`` parameters
+ as ``no_log`` to avoid leakage of secrets (https://github.com/ansible-collections/community.general/pull/1736).
+ - stackdriver - mark the ``key`` parameter as ``no_log`` to avoid leakage of
+ secrets (https://github.com/ansible-collections/community.general/pull/1736).
+ - utm_proxy_auth_profile - enabled ``no_log`` for the option ``frontend_cookie_secret``
+ to prevent accidental disclosure (CVE-2021-20191, https://github.com/ansible-collections/community.general/pull/1725).
+ - utm_proxy_auth_profile - mark the ``frontend_cookie_secret`` parameter as
+ ``no_log`` to avoid leakage of secrets. This causes the ``utm_proxy_auth_profile``
+ return value to no longer containing the correct value, but a placeholder
+ (https://github.com/ansible-collections/community.general/pull/1736).
+ fragments:
+ - 1.3.6.yml
+ - 1478-filesystem-fix-1457-resizefs-idempotency.yml
+ - 1690-scaleway-regions.yaml
+ - 1691-add-name-and-id-props-to-redfish-inventory-output.yml
+ - 1695-parted-updatedregex.yaml
+ - 1703-sensu_silence-fix_json_parsing.yml
+ - 1724-various-fixes-for-updating-existing-gitlab-user.yml
+ - CVE-2021-20191_no_log.yml
+ - CVE-2021-20191_no_log_docker.yml
+ - community.docker-76-leading-v-support-in-docker-version.yml
+ - no_log-fixes.yml
+ release_date: '2021-02-09'
diff --git a/collections-debian-merged/ansible_collections/community/general/changelogs/config.yaml b/collections-debian-merged/ansible_collections/community/general/changelogs/config.yaml
new file mode 100644
index 00000000..fd0b422a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/changelogs/config.yaml
@@ -0,0 +1,29 @@
+changelog_filename_template: ../CHANGELOG.rst
+changelog_filename_version_depth: 0
+changes_file: changelog.yaml
+changes_format: combined
+keep_fragments: false
+mention_ancestor: true
+flatmap: true
+new_plugins_after_name: removed_features
+notesdir: fragments
+prelude_section_name: release_summary
+prelude_section_title: Release Summary
+sections:
+- - major_changes
+ - Major Changes
+- - minor_changes
+ - Minor Changes
+- - breaking_changes
+ - Breaking Changes / Porting Guide
+- - deprecated_features
+ - Deprecated Features
+- - removed_features
+ - Removed Features (previously deprecated)
+- - security_fixes
+ - Security Fixes
+- - bugfixes
+ - Bugfixes
+- - known_issues
+ - Known Issues
+title: Community General
diff --git a/collections-debian-merged/ansible_collections/community/general/changelogs/fragments/.keep b/collections-debian-merged/ansible_collections/community/general/changelogs/fragments/.keep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/changelogs/fragments/.keep
diff --git a/collections-debian-merged/ansible_collections/community/general/meta/runtime.yml b/collections-debian-merged/ansible_collections/community/general/meta/runtime.yml
new file mode 100644
index 00000000..a7f8af61
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/meta/runtime.yml
@@ -0,0 +1,755 @@
+requires_ansible: '>=2.9.10'
+action_groups:
+ docker:
+ - docker_swarm
+ - docker_image_facts
+ - docker_service
+ - docker_compose
+ - docker_config
+ - docker_container
+ - docker_container_info
+ - docker_host_info
+ - docker_image
+ - docker_image_info
+ - docker_login
+ - docker_network
+ - docker_network_info
+ - docker_node
+ - docker_node_info
+ - docker_prune
+ - docker_secret
+ - docker_swarm
+ - docker_swarm_info
+ - docker_swarm_service
+ - docker_swarm_service_info
+ - docker_volume
+ - docker_volume_info
+ k8s:
+ - kubevirt_cdi_upload
+ - kubevirt_preset
+ - kubevirt_pvc
+ - kubevirt_rs
+ - kubevirt_template
+ - kubevirt_vm
+ ovirt:
+ - ovirt_affinity_label_facts
+ - ovirt_api_facts
+ - ovirt_cluster_facts
+ - ovirt_datacenter_facts
+ - ovirt_disk_facts
+ - ovirt_event_facts
+ - ovirt_external_provider_facts
+ - ovirt_group_facts
+ - ovirt_host_facts
+ - ovirt_host_storage_facts
+ - ovirt_network_facts
+ - ovirt_nic_facts
+ - ovirt_permission_facts
+ - ovirt_quota_facts
+ - ovirt_scheduling_policy_facts
+ - ovirt_snapshot_facts
+ - ovirt_storage_domain_facts
+ - ovirt_storage_template_facts
+ - ovirt_storage_vm_facts
+ - ovirt_tag_facts
+ - ovirt_template_facts
+ - ovirt_user_facts
+ - ovirt_vm_facts
+ - ovirt_vmpool_facts
+plugin_routing:
+ lookup:
+ conjur_variable:
+ redirect: cyberark.conjur.conjur_variable
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The conjur_variable lookup has been moved to the cyberark.conjur collection.
+ modules:
+ ali_instance_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ digital_ocean:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The digital_ocean module has been moved to the community.digitalocean collection.
+ redirect: community.digitalocean.digital_ocean
+ digital_ocean_account_facts:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The digital_ocean_account_facts module has been moved to the community.digitalocean collection.
+ redirect: community.digitalocean.digital_ocean_account_facts
+ digital_ocean_account_info:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The digital_ocean_account_info module has been moved to the community.digitalocean collection.
+ redirect: community.digitalocean.digital_ocean_account_info
+ digital_ocean_block_storage:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The digital_ocean_block_storage module has been moved to the community.digitalocean collection.
+ redirect: community.digitalocean.digital_ocean_block_storage
+ digital_ocean_certificate:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The digital_ocean_certificate module has been moved to the community.digitalocean collection.
+ redirect: community.digitalocean.digital_ocean_certificate
+ digital_ocean_certificate_facts:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The digital_ocean_certificate_facts module has been moved to the community.digitalocean collection.
+ redirect: community.digitalocean.digital_ocean_certificate_facts
+ digital_ocean_certificate_info:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The digital_ocean_certificate_info module has been moved to the community.digitalocean collection.
+ redirect: community.digitalocean.digital_ocean_certificate_info
+ digital_ocean_domain:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The digital_ocean_domain module has been moved to the community.digitalocean collection.
+ redirect: community.digitalocean.digital_ocean_domain
+ digital_ocean_domain_facts:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The digital_ocean_domain_facts module has been moved to the community.digitalocean collection.
+ redirect: community.digitalocean.digital_ocean_domain_facts
+ digital_ocean_domain_info:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The digital_ocean_domain_info module has been moved to the community.digitalocean collection.
+ redirect: community.digitalocean.digital_ocean_domain_info
+ digital_ocean_droplet:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The digital_ocean_droplet module has been moved to the community.digitalocean collection.
+ redirect: community.digitalocean.digital_ocean_droplet
+ digital_ocean_firewall_facts:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The digital_ocean_firewall_facts module has been moved to the community.digitalocean collection.
+ redirect: community.digitalocean.digital_ocean_firewall_facts
+ digital_ocean_firewall_info:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The digital_ocean_firewall_info module has been moved to the community.digitalocean collection.
+ redirect: community.digitalocean.digital_ocean_firewall_info
+ digital_ocean_floating_ip:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The digital_ocean_floating_ip module has been moved to the community.digitalocean collection.
+ redirect: community.digitalocean.digital_ocean_floating_ip
+ digital_ocean_floating_ip_facts:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The digital_ocean_floating_ip_facts module has been moved to the community.digitalocean collection.
+ redirect: community.digitalocean.digital_ocean_floating_ip_facts
+ digital_ocean_floating_ip_info:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The digital_ocean_floating_ip_info module has been moved to the community.digitalocean collection.
+ redirect: community.digitalocean.digital_ocean_floating_ip_info
+ digital_ocean_image_facts:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The digital_ocean_image_facts module has been moved to the community.digitalocean collection.
+ redirect: community.digitalocean.digital_ocean_image_facts
+ digital_ocean_image_info:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The digital_ocean_image_info module has been moved to the community.digitalocean collection.
+ redirect: community.digitalocean.digital_ocean_image_info
+ digital_ocean_load_balancer_facts:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The digital_ocean_load_balancer_facts module has been moved to the community.digitalocean collection.
+ redirect: community.digitalocean.digital_ocean_load_balancer_facts
+ digital_ocean_load_balancer_info:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The digital_ocean_load_balancer_info module has been moved to the community.digitalocean collection.
+ redirect: community.digitalocean.digital_ocean_load_balancer_info
+ digital_ocean_region_facts:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The digital_ocean_region_facts module has been moved to the community.digitalocean collection.
+ redirect: community.digitalocean.digital_ocean_region_facts
+ digital_ocean_region_info:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The digital_ocean_region_info module has been moved to the community.digitalocean collection.
+ redirect: community.digitalocean.digital_ocean_region_info
+ digital_ocean_size_facts:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The digital_ocean_size_facts module has been moved to the community.digitalocean collection.
+ redirect: community.digitalocean.digital_ocean_size_facts
+ digital_ocean_size_info:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The digital_ocean_size_info module has been moved to the community.digitalocean collection.
+ redirect: community.digitalocean.digital_ocean_size_info
+ digital_ocean_snapshot_facts:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The digital_ocean_snapshot_facts module has been moved to the community.digitalocean collection.
+ redirect: community.digitalocean.digital_ocean_snapshot_facts
+ digital_ocean_snapshot_info:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The digital_ocean_snapshot_info module has been moved to the community.digitalocean collection.
+ redirect: community.digitalocean.digital_ocean_snapshot_info
+ digital_ocean_sshkey:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The digital_ocean_sshkey module has been moved to the community.digitalocean collection.
+ redirect: community.digitalocean.digital_ocean_sshkey
+ digital_ocean_sshkey_facts:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The digital_ocean_sshkey_facts module has been moved to the community.digitalocean collection.
+ redirect: community.digitalocean.digital_ocean_sshkey_facts
+ digital_ocean_sshkey_info:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The digital_ocean_sshkey_info module has been moved to the community.digitalocean collection.
+ redirect: community.digitalocean.digital_ocean_sshkey_info
+ digital_ocean_tag:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The digital_ocean_tag module has been moved to the community.digitalocean collection.
+ redirect: community.digitalocean.digital_ocean_tag
+ digital_ocean_tag_facts:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The digital_ocean_tag_facts module has been moved to the community.digitalocean collection.
+ redirect: community.digitalocean.digital_ocean_tag_facts
+ digital_ocean_tag_info:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The digital_ocean_tag_info module has been moved to the community.digitalocean collection.
+ redirect: community.digitalocean.digital_ocean_tag_info
+ digital_ocean_volume_facts:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The digital_ocean_volume_facts module has been moved to the community.digitalocean collection.
+ redirect: community.digitalocean.digital_ocean_volume_facts
+ digital_ocean_volume_info:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The digital_ocean_volume_info module has been moved to the community.digitalocean collection.
+ redirect: community.digitalocean.digital_ocean_volume_info
+ docker_image_facts:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: see plugin documentation for details
+ docker_service:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: see plugin documentation for details
+ firewalld:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The firewalld module has been moved to the ansible.posix collection.
+ redirect: ansible.posix.firewalld
+ foreman:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: see plugin documentation for details
+ gcdns_record:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: see plugin documentation for details
+ gcdns_zone:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: see plugin documentation for details
+ gce:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: see plugin documentation for details
+ gcp_backend_service:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: see plugin documentation for details
+ gcp_forwarding_rule:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: see plugin documentation for details
+ gcp_healthcheck:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: see plugin documentation for details
+ gcp_target_proxy:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: see plugin documentation for details
+ gcp_url_map:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: see plugin documentation for details
+ gcpubsub_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ gcspanner:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: see plugin documentation for details
+ github_hooks:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: see plugin documentation for details
+ helm:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: The helm module in community.general has been deprecated. Use community.kubernetes.helm instead.
+ hpilo_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ idrac_redfish_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ infini_export:
+ redirect: infinidat.infinibox.infini_export
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The infini_export module has been moved to the infinidat collection.
+ infini_export_client:
+ redirect: infinidat.infinibox.infini_export_client
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The infini_export_client module has been moved to the infinidat collection.
+ infini_fs:
+ redirect: infinidat.infinibox.infini_fs
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The infini_fs module has been moved to the infinidat collection.
+ infini_host:
+ redirect: infinidat.infinibox.infini_host
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The infini_host module has been moved to the infinidat collection.
+ infini_pool:
+ redirect: infinidat.infinibox.infini_pool
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The infini_pool module has been moved to the infinidat collection.
+ infini_vol:
+ redirect: infinidat.infinibox.infini_vol
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The infini_vol module has been moved to the infinidat collection.
+ jenkins_job_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ katello:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: see plugin documentation for details
+ ldap_attr:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ logicmonitor:
+ tombstone:
+ removal_version: 1.0.0
+ warning_text: The logicmonitor_facts module is no longer maintained and the API used has been disabled in 2017.
+ logicmonitor_facts:
+ tombstone:
+ removal_version: 1.0.0
+ warning_text: The logicmonitor_facts module is no longer maintained and the API used has been disabled in 2017.
+ memset_memstore_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ memset_server_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ mysql_db:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The mysql_db module has been moved to the community.mysql collection.
+ redirect: community.mysql.mysql_db
+ mysql_info:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The mysql_info module has been moved to the community.mysql collection.
+ redirect: community.mysql.mysql_info
+ mysql_query:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The mysql_query module has been moved to the community.mysql collection.
+ redirect: community.mysql.mysql_query
+ mysql_replication:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The mysql_replication module has been moved to the community.mysql collection.
+ redirect: community.mysql.mysql_replication
+ mysql_user:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The mysql_user module has been moved to the community.mysql collection.
+ redirect: community.mysql.mysql_user
+ mysql_variables:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The mysql_variables module has been moved to the community.mysql collection.
+ redirect: community.mysql.mysql_variables
+ na_cdot_aggregate:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: see plugin documentation for details
+ na_cdot_license:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: see plugin documentation for details
+ na_cdot_lun:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: see plugin documentation for details
+ na_cdot_qtree:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: see plugin documentation for details
+ na_cdot_svm:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: see plugin documentation for details
+ na_cdot_user:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: see plugin documentation for details
+ na_cdot_user_role:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: see plugin documentation for details
+ na_cdot_volume:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: see plugin documentation for details
+ na_ontap_gather_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ nginx_status_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ one_image_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ onepassword_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ oneview_datacenter_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ oneview_enclosure_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ oneview_ethernet_network_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ oneview_fc_network_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ oneview_fcoe_network_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ oneview_logical_interconnect_group_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ oneview_network_set_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ oneview_san_manager_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ online_server_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ online_user_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ ovirt:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ ovirt_affinity_label_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ ovirt_api_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ ovirt_cluster_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ ovirt_datacenter_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ ovirt_disk_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ ovirt_event_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ ovirt_external_provider_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ ovirt_group_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ ovirt_host_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ ovirt_host_storage_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ ovirt_network_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ ovirt_nic_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ ovirt_permission_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ ovirt_quota_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ ovirt_scheduling_policy_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ ovirt_snapshot_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ ovirt_storage_domain_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ ovirt_storage_template_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ ovirt_storage_vm_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ ovirt_tag_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ ovirt_template_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ ovirt_user_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ ovirt_vm_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ ovirt_vmpool_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ proxysql_backend_servers:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The proxysql_backend_servers module has been moved to the community.proxysql collection.
+ redirect: community.proxysql.proxysql_backend_servers
+ proxysql_global_variables:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The proxysql_global_variables module has been moved to the community.proxysql collection.
+ redirect: community.proxysql.proxysql_global_variables
+ proxysql_manage_config:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The proxysql_manage_config module has been moved to the community.proxysql collection.
+ redirect: community.proxysql.proxysql_manage_config
+ proxysql_mysql_users:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The proxysql_mysql_users module has been moved to the community.proxysql collection.
+ redirect: community.proxysql.proxysql_mysql_users
+ proxysql_query_rules:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The proxysql_query_rules module has been moved to the community.proxysql collection.
+ redirect: community.proxysql.proxysql_query_rules
+ proxysql_replication_hostgroups:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The proxysql_replication_hostgroups module has been moved to the community.proxysql collection.
+ redirect: community.proxysql.proxysql_replication_hostgroups
+ proxysql_scheduler:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The proxysql_scheduler module has been moved to the community.proxysql collection.
+ redirect: community.proxysql.proxysql_scheduler
+ purefa_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ purefb_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ python_requirements_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ redfish_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ scaleway_image_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ scaleway_ip_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ scaleway_organization_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ scaleway_security_group_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ scaleway_server_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ scaleway_snapshot_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ scaleway_volume_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ sf_account_manager:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: see plugin documentation for details
+ sf_check_connections:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: see plugin documentation for details
+ sf_snapshot_schedule_manager:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: see plugin documentation for details
+ sf_volume_access_group_manager:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: see plugin documentation for details
+ sf_volume_manager:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: see plugin documentation for details
+ smartos_image_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ vertica_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ xenserver_guest_facts:
+ deprecation:
+ removal_version: 3.0.0
+ warning_text: see plugin documentation for details
+ doc_fragments:
+ digital_ocean:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The digital_ocean docs_fragment has been moved to the community.digitalocean collection.
+ redirect: community.digitalocean.digital_ocean
+ infinibox:
+ redirect: infinidat.infinibox.infinibox
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The infinibox doc_fragments plugin has been moved to the infinidat.infinibox collection.
+ mysql:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The mysql docs_fragment has been moved to the community.mysql collection.
+ redirect: community.mysql.mysql
+ proxysql:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The proxysql docs_fragment has been moved to the community.proxysql collection.
+ redirect: community.proxysql.proxysql
+ module_utils:
+ digital_ocean:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The digital_ocean module_utils has been moved to the community.digitalocean collection.
+ redirect: community.digitalocean.digital_ocean
+ firewalld:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The firewalld module_utils has been moved to the ansible.posix collection.
+ redirect: ansible.posix.firewalld
+ infinibox:
+ redirect: infinidat.infinibox.infinibox
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The infinibox module_utils plugin has been moved to the infinidat.infinibox collection.
+ mysql:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: The mysql module_utils has been moved to the community.mysql collection.
+ redirect: community.mysql.mysql
+ callback:
+ actionable:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: see plugin documentation for details
+ full_skip:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: see plugin documentation for details
+ stderr:
+ deprecation:
+ removal_version: 2.0.0
+ warning_text: see plugin documentation for details
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/action/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/action/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/action/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/action/iptables_state.py b/collections-debian-merged/ansible_collections/community/general/plugins/action/iptables_state.py
new file mode 100644
index 00000000..92fb079a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/action/iptables_state.py
@@ -0,0 +1,198 @@
+# Copyright: (c) 2020, quidame <quidame@poivron.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import time
+
+from ansible.plugins.action import ActionBase
+from ansible.errors import AnsibleError, AnsibleActionFail, AnsibleConnectionFailure
+from ansible.utils.vars import merge_hash
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class ActionModule(ActionBase):
+
+ # Keep internal params away from user interactions
+ _VALID_ARGS = frozenset(('path', 'state', 'table', 'noflush', 'counters', 'modprobe', 'ip_version', 'wait'))
+ DEFAULT_SUDOABLE = True
+
+ MSG_ERROR__ASYNC_AND_POLL_NOT_ZERO = (
+ "This module doesn't support async>0 and poll>0 when its 'state' param "
+ "is set to 'restored'. To enable its rollback feature (that needs the "
+ "module to run asynchronously on the remote), please set task attribute "
+ "'poll' (=%s) to 0, and 'async' (=%s) to a value >2 and not greater than "
+ "'ansible_timeout' (=%s) (recommended).")
+ MSG_WARNING__NO_ASYNC_IS_NO_ROLLBACK = (
+ "Attempts to restore iptables state without rollback in case of mistake "
+ "may lead the ansible controller to loose access to the hosts and never "
+ "regain it before fixing firewall rules through a serial console, or any "
+ "other way except SSH. Please set task attribute 'poll' (=%s) to 0, and "
+ "'async' (=%s) to a value >2 and not greater than 'ansible_timeout' (=%s) "
+ "(recommended).")
+ MSG_WARNING__ASYNC_GREATER_THAN_TIMEOUT = (
+ "You attempt to restore iptables state with rollback in case of mistake, "
+ "but with settings that will lead this rollback to happen AFTER that the "
+ "controller will reach its own timeout. Please set task attribute 'poll' "
+ "(=%s) to 0, and 'async' (=%s) to a value >2 and not greater than "
+ "'ansible_timeout' (=%s) (recommended).")
+
+ def _async_result(self, module_args, task_vars, timeout):
+ '''
+ Retrieve results of the asynchonous task, and display them in place of
+ the async wrapper results (those with the ansible_job_id key).
+ '''
+ # At least one iteration is required, even if timeout is 0.
+ for i in range(max(1, timeout)):
+ async_result = self._execute_module(
+ module_name='ansible.builtin.async_status',
+ module_args=module_args,
+ task_vars=task_vars,
+ wrap_async=False)
+ if async_result['finished'] == 1:
+ break
+ time.sleep(min(1, timeout))
+
+ return async_result
+
+ def run(self, tmp=None, task_vars=None):
+
+ self._supports_check_mode = True
+ self._supports_async = True
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ if not result.get('skipped'):
+
+ # FUTURE: better to let _execute_module calculate this internally?
+ wrap_async = self._task.async_val and not self._connection.has_native_async
+
+ # Set short names for values we'll have to compare or reuse
+ task_poll = self._task.poll
+ task_async = self._task.async_val
+ check_mode = self._play_context.check_mode
+ max_timeout = self._connection._play_context.timeout
+ module_name = self._task.action
+ module_args = self._task.args
+
+ if module_args.get('state', None) == 'restored':
+ if not wrap_async:
+ if not check_mode:
+ display.warning(self.MSG_WARNING__NO_ASYNC_IS_NO_ROLLBACK % (
+ task_poll,
+ task_async,
+ max_timeout))
+ elif task_poll:
+ raise AnsibleActionFail(self.MSG_ERROR__ASYNC_AND_POLL_NOT_ZERO % (
+ task_poll,
+ task_async,
+ max_timeout))
+ else:
+ if task_async > max_timeout and not check_mode:
+ display.warning(self.MSG_WARNING__ASYNC_GREATER_THAN_TIMEOUT % (
+ task_poll,
+ task_async,
+ max_timeout))
+
+ # BEGIN snippet from async_status action plugin
+ env_async_dir = [e for e in self._task.environment if
+ "ANSIBLE_ASYNC_DIR" in e]
+ if len(env_async_dir) > 0:
+ # for backwards compatibility we need to get the dir from
+ # ANSIBLE_ASYNC_DIR that is defined in the environment. This is
+ # deprecated and will be removed in favour of shell options
+ async_dir = env_async_dir[0]['ANSIBLE_ASYNC_DIR']
+
+ msg = "Setting the async dir from the environment keyword " \
+ "ANSIBLE_ASYNC_DIR is deprecated. Set the async_dir " \
+ "shell option instead"
+ display.deprecated(msg, version='2.0.0',
+ collection_name='community.general') # was Ansible 2.12
+ else:
+ # inject the async directory based on the shell option into the
+ # module args
+ async_dir = self.get_shell_option('async_dir', default="~/.ansible_async")
+ # END snippet from async_status action plugin
+
+ # Bind the loop max duration to consistent values on both
+ # remote and local sides (if not the same, make the loop
+ # longer on the controller); and set a backup file path.
+ module_args['_timeout'] = task_async
+ module_args['_back'] = '%s/iptables.state' % async_dir
+ async_status_args = dict(_async_dir=async_dir)
+ confirm_cmd = 'rm -f %s' % module_args['_back']
+ starter_cmd = 'touch %s.starter' % module_args['_back']
+ remaining_time = max(task_async, max_timeout)
+
+ # do work!
+ result = merge_hash(result, self._execute_module(module_args=module_args, task_vars=task_vars, wrap_async=wrap_async))
+
+ # Then the 3-steps "go ahead or rollback":
+ # 1. Catch early errors of the module (in asynchronous task) if any.
+ # Touch a file on the target to signal the module to process now.
+ # 2. Reset connection to ensure a persistent one will not be reused.
+ # 3. Confirm the restored state by removing the backup on the remote.
+ # Retrieve the results of the asynchronous task to return them.
+ if '_back' in module_args:
+ async_status_args['jid'] = result.get('ansible_job_id', None)
+ if async_status_args['jid'] is None:
+ raise AnsibleActionFail("Unable to get 'ansible_job_id'.")
+
+ # Catch early errors due to missing mandatory option, bad
+ # option type/value, missing required system command, etc.
+ result = merge_hash(result, self._async_result(async_status_args, task_vars, 0))
+
+ # The module is aware to not process the main iptables-restore
+ # command before finding (and deleting) the 'starter' cookie on
+ # the host, so the previous query will not reach ssh timeout.
+ garbage = self._low_level_execute_command(starter_cmd, sudoable=self.DEFAULT_SUDOABLE)
+
+ # As the main command is not yet executed on the target, here
+ # 'finished' means 'failed before main command be executed'.
+ if not result['finished']:
+ try:
+ self._connection.reset()
+ except AttributeError:
+ pass
+
+ for x in range(max_timeout):
+ time.sleep(1)
+ remaining_time -= 1
+ # - AnsibleConnectionFailure covers rejected requests (i.e.
+ # by rules with '--jump REJECT')
+ # - ansible_timeout is able to cover dropped requests (due
+ # to a rule or policy DROP) if not lower than async_val.
+ try:
+ garbage = self._low_level_execute_command(confirm_cmd, sudoable=self.DEFAULT_SUDOABLE)
+ break
+ except AnsibleConnectionFailure:
+ continue
+
+ result = merge_hash(result, self._async_result(async_status_args, task_vars, remaining_time))
+
+ # Cleanup async related stuff and internal params
+ for key in ('ansible_job_id', 'results_file', 'started', 'finished'):
+ if result.get(key):
+ del result[key]
+
+ if result.get('invocation', {}).get('module_args'):
+ if '_timeout' in result['invocation']['module_args']:
+ del result['invocation']['module_args']['_back']
+ del result['invocation']['module_args']['_timeout']
+
+ async_status_args['mode'] = 'cleanup'
+ garbage = self._execute_module(
+ module_name='ansible.builtin.async_status',
+ module_args=async_status_args,
+ task_vars=task_vars,
+ wrap_async=False)
+
+ if not wrap_async:
+ # remove a temporary path we created
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+
+ return result
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/action/shutdown.py b/collections-debian-merged/ansible_collections/community/general/plugins/action/shutdown.py
new file mode 100644
index 00000000..e36397ff
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/action/shutdown.py
@@ -0,0 +1,211 @@
+# Copyright: (c) 2020, Amin Vakil <info@aminvakil.com>
+# Copyright: (c) 2016-2018, Matt Davis <mdavis@ansible.com>
+# Copyright: (c) 2018, Sam Doran <sdoran@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleError, AnsibleConnectionFailure
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.common.collections import is_string
+from ansible.plugins.action import ActionBase
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class TimedOutException(Exception):
+ pass
+
+
+class ActionModule(ActionBase):
+ TRANSFERS_FILES = False
+ _VALID_ARGS = frozenset((
+ 'msg',
+ 'delay',
+ 'search_paths'
+ ))
+
+ DEFAULT_CONNECT_TIMEOUT = None
+ DEFAULT_PRE_SHUTDOWN_DELAY = 0
+ DEFAULT_SHUTDOWN_MESSAGE = 'Shut down initiated by Ansible'
+ DEFAULT_SHUTDOWN_COMMAND = 'shutdown'
+ DEFAULT_SHUTDOWN_COMMAND_ARGS = '-h {delay_min} "{message}"'
+ DEFAULT_SUDOABLE = True
+
+ SHUTDOWN_COMMANDS = {
+ 'alpine': 'poweroff',
+ 'vmkernel': 'halt',
+ }
+
+ SHUTDOWN_COMMAND_ARGS = {
+ 'alpine': '',
+ 'void': '-h +{delay_min} "{message}"',
+ 'freebsd': '-h +{delay_sec}s "{message}"',
+ 'linux': DEFAULT_SHUTDOWN_COMMAND_ARGS,
+ 'macosx': '-h +{delay_min} "{message}"',
+ 'openbsd': '-h +{delay_min} "{message}"',
+ 'solaris': '-y -g {delay_sec} -i 5 "{message}"',
+ 'sunos': '-y -g {delay_sec} -i 5 "{message}"',
+ 'vmkernel': '-d {delay_sec}',
+ 'aix': '-Fh',
+ }
+
+ def __init__(self, *args, **kwargs):
+ super(ActionModule, self).__init__(*args, **kwargs)
+
+ @property
+ def delay(self):
+ return self._check_delay('delay', self.DEFAULT_PRE_SHUTDOWN_DELAY)
+
+ def _check_delay(self, key, default):
+ """Ensure that the value is positive or zero"""
+ value = int(self._task.args.get(key, default))
+ if value < 0:
+ value = 0
+ return value
+
+ def _get_value_from_facts(self, variable_name, distribution, default_value):
+ """Get dist+version specific args first, then distribution, then family, lastly use default"""
+ attr = getattr(self, variable_name)
+ value = attr.get(
+ distribution['name'] + distribution['version'],
+ attr.get(
+ distribution['name'],
+ attr.get(
+ distribution['family'],
+ getattr(self, default_value))))
+ return value
+
+ def get_shutdown_command_args(self, distribution):
+ args = self._get_value_from_facts('SHUTDOWN_COMMAND_ARGS', distribution, 'DEFAULT_SHUTDOWN_COMMAND_ARGS')
+ # Convert seconds to minutes. If less that 60, set it to 0.
+ delay_sec = self.delay
+ shutdown_message = self._task.args.get('msg', self.DEFAULT_SHUTDOWN_MESSAGE)
+ return args.format(delay_sec=delay_sec, delay_min=delay_sec // 60, message=shutdown_message)
+
+ def get_distribution(self, task_vars):
+ # FIXME: only execute the module if we don't already have the facts we need
+ distribution = {}
+ display.debug('{action}: running setup module to get distribution'.format(action=self._task.action))
+ module_output = self._execute_module(
+ task_vars=task_vars,
+ module_name='ansible.legacy.setup',
+ module_args={'gather_subset': 'min'})
+ try:
+ if module_output.get('failed', False):
+ raise AnsibleError('Failed to determine system distribution. {0}, {1}'.format(
+ to_native(module_output['module_stdout']).strip(),
+ to_native(module_output['module_stderr']).strip()))
+ distribution['name'] = module_output['ansible_facts']['ansible_distribution'].lower()
+ distribution['version'] = to_text(module_output['ansible_facts']['ansible_distribution_version'].split('.')[0])
+ distribution['family'] = to_text(module_output['ansible_facts']['ansible_os_family'].lower())
+ display.debug("{action}: distribution: {dist}".format(action=self._task.action, dist=distribution))
+ return distribution
+ except KeyError as ke:
+ raise AnsibleError('Failed to get distribution information. Missing "{0}" in output.'.format(ke.args[0]))
+
+ def get_shutdown_command(self, task_vars, distribution):
+ shutdown_bin = self._get_value_from_facts('SHUTDOWN_COMMANDS', distribution, 'DEFAULT_SHUTDOWN_COMMAND')
+ default_search_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin']
+ search_paths = self._task.args.get('search_paths', default_search_paths)
+
+ # FIXME: switch all this to user arg spec validation methods when they are available
+ # Convert bare strings to a list
+ if is_string(search_paths):
+ search_paths = [search_paths]
+
+ # Error if we didn't get a list
+ err_msg = "'search_paths' must be a string or flat list of strings, got {0}"
+ try:
+ incorrect_type = any(not is_string(x) for x in search_paths)
+ if not isinstance(search_paths, list) or incorrect_type:
+ raise TypeError
+ except TypeError:
+ raise AnsibleError(err_msg.format(search_paths))
+
+ display.debug('{action}: running find module looking in {paths} to get path for "{command}"'.format(
+ action=self._task.action,
+ command=shutdown_bin,
+ paths=search_paths))
+ find_result = self._execute_module(
+ task_vars=task_vars,
+ # prevent collection search by calling with ansible.legacy (still allows library/ override of find)
+ module_name='ansible.legacy.find',
+ module_args={
+ 'paths': search_paths,
+ 'patterns': [shutdown_bin],
+ 'file_type': 'any'
+ }
+ )
+
+ full_path = [x['path'] for x in find_result['files']]
+ if not full_path:
+ raise AnsibleError('Unable to find command "{0}" in search paths: {1}'.format(shutdown_bin, search_paths))
+ self._shutdown_command = full_path[0]
+ return self._shutdown_command
+
+ def perform_shutdown(self, task_vars, distribution):
+ result = {}
+ shutdown_result = {}
+ shutdown_command = self.get_shutdown_command(task_vars, distribution)
+ shutdown_command_args = self.get_shutdown_command_args(distribution)
+ shutdown_command_exec = '{0} {1}'.format(shutdown_command, shutdown_command_args)
+
+ self.cleanup(force=True)
+ try:
+ display.vvv("{action}: shutting down server...".format(action=self._task.action))
+ display.debug("{action}: shutting down server with command '{command}'".format(action=self._task.action, command=shutdown_command_exec))
+ if self._play_context.check_mode:
+ shutdown_result['rc'] = 0
+ else:
+ shutdown_result = self._low_level_execute_command(shutdown_command_exec, sudoable=self.DEFAULT_SUDOABLE)
+ except AnsibleConnectionFailure as e:
+ # If the connection is closed too quickly due to the system being shutdown, carry on
+ display.debug('{action}: AnsibleConnectionFailure caught and handled: {error}'.format(action=self._task.action, error=to_text(e)))
+ shutdown_result['rc'] = 0
+
+ if shutdown_result['rc'] != 0:
+ result['failed'] = True
+ result['shutdown'] = False
+ result['msg'] = "Shutdown command failed. Error was {stdout}, {stderr}".format(
+ stdout=to_native(shutdown_result['stdout'].strip()),
+ stderr=to_native(shutdown_result['stderr'].strip()))
+ return result
+
+ result['failed'] = False
+ result['shutdown_command'] = shutdown_command_exec
+ return result
+
+ def run(self, tmp=None, task_vars=None):
+ self._supports_check_mode = True
+ self._supports_async = True
+
+ # If running with local connection, fail so we don't shutdown ourself
+ if self._connection.transport == 'local' and (not self._play_context.check_mode):
+ msg = 'Running {0} with local connection would shutdown the control node.'.format(self._task.action)
+ return {'changed': False, 'elapsed': 0, 'shutdown': False, 'failed': True, 'msg': msg}
+
+ if task_vars is None:
+ task_vars = {}
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+
+ if result.get('skipped', False) or result.get('failed', False):
+ return result
+
+ distribution = self.get_distribution(task_vars)
+
+ # Initiate shutdown
+ shutdown_result = self.perform_shutdown(task_vars, distribution)
+
+ if shutdown_result['failed']:
+ result = shutdown_result
+ return result
+
+ result['shutdown'] = True
+ result['changed'] = True
+ result['shutdown_command'] = shutdown_result['shutdown_command']
+
+ return result
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/action/system/iptables_state.py b/collections-debian-merged/ansible_collections/community/general/plugins/action/system/iptables_state.py
new file mode 100644
index 00000000..92fb079a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/action/system/iptables_state.py
@@ -0,0 +1,198 @@
+# Copyright: (c) 2020, quidame <quidame@poivron.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import time
+
+from ansible.plugins.action import ActionBase
+from ansible.errors import AnsibleError, AnsibleActionFail, AnsibleConnectionFailure
+from ansible.utils.vars import merge_hash
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class ActionModule(ActionBase):
+
+ # Keep internal params away from user interactions
+ _VALID_ARGS = frozenset(('path', 'state', 'table', 'noflush', 'counters', 'modprobe', 'ip_version', 'wait'))
+ DEFAULT_SUDOABLE = True
+
+ MSG_ERROR__ASYNC_AND_POLL_NOT_ZERO = (
+ "This module doesn't support async>0 and poll>0 when its 'state' param "
+ "is set to 'restored'. To enable its rollback feature (that needs the "
+ "module to run asynchronously on the remote), please set task attribute "
+ "'poll' (=%s) to 0, and 'async' (=%s) to a value >2 and not greater than "
+ "'ansible_timeout' (=%s) (recommended).")
+ MSG_WARNING__NO_ASYNC_IS_NO_ROLLBACK = (
+ "Attempts to restore iptables state without rollback in case of mistake "
+ "may lead the ansible controller to loose access to the hosts and never "
+ "regain it before fixing firewall rules through a serial console, or any "
+ "other way except SSH. Please set task attribute 'poll' (=%s) to 0, and "
+ "'async' (=%s) to a value >2 and not greater than 'ansible_timeout' (=%s) "
+ "(recommended).")
+ MSG_WARNING__ASYNC_GREATER_THAN_TIMEOUT = (
+ "You attempt to restore iptables state with rollback in case of mistake, "
+ "but with settings that will lead this rollback to happen AFTER that the "
+ "controller will reach its own timeout. Please set task attribute 'poll' "
+ "(=%s) to 0, and 'async' (=%s) to a value >2 and not greater than "
+ "'ansible_timeout' (=%s) (recommended).")
+
+ def _async_result(self, module_args, task_vars, timeout):
+ '''
+ Retrieve results of the asynchonous task, and display them in place of
+ the async wrapper results (those with the ansible_job_id key).
+ '''
+ # At least one iteration is required, even if timeout is 0.
+ for i in range(max(1, timeout)):
+ async_result = self._execute_module(
+ module_name='ansible.builtin.async_status',
+ module_args=module_args,
+ task_vars=task_vars,
+ wrap_async=False)
+ if async_result['finished'] == 1:
+ break
+ time.sleep(min(1, timeout))
+
+ return async_result
+
+ def run(self, tmp=None, task_vars=None):
+
+ self._supports_check_mode = True
+ self._supports_async = True
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ if not result.get('skipped'):
+
+ # FUTURE: better to let _execute_module calculate this internally?
+ wrap_async = self._task.async_val and not self._connection.has_native_async
+
+ # Set short names for values we'll have to compare or reuse
+ task_poll = self._task.poll
+ task_async = self._task.async_val
+ check_mode = self._play_context.check_mode
+ max_timeout = self._connection._play_context.timeout
+ module_name = self._task.action
+ module_args = self._task.args
+
+ if module_args.get('state', None) == 'restored':
+ if not wrap_async:
+ if not check_mode:
+ display.warning(self.MSG_WARNING__NO_ASYNC_IS_NO_ROLLBACK % (
+ task_poll,
+ task_async,
+ max_timeout))
+ elif task_poll:
+ raise AnsibleActionFail(self.MSG_ERROR__ASYNC_AND_POLL_NOT_ZERO % (
+ task_poll,
+ task_async,
+ max_timeout))
+ else:
+ if task_async > max_timeout and not check_mode:
+ display.warning(self.MSG_WARNING__ASYNC_GREATER_THAN_TIMEOUT % (
+ task_poll,
+ task_async,
+ max_timeout))
+
+ # BEGIN snippet from async_status action plugin
+ env_async_dir = [e for e in self._task.environment if
+ "ANSIBLE_ASYNC_DIR" in e]
+ if len(env_async_dir) > 0:
+ # for backwards compatibility we need to get the dir from
+ # ANSIBLE_ASYNC_DIR that is defined in the environment. This is
+ # deprecated and will be removed in favour of shell options
+ async_dir = env_async_dir[0]['ANSIBLE_ASYNC_DIR']
+
+ msg = "Setting the async dir from the environment keyword " \
+ "ANSIBLE_ASYNC_DIR is deprecated. Set the async_dir " \
+ "shell option instead"
+ display.deprecated(msg, version='2.0.0',
+ collection_name='community.general') # was Ansible 2.12
+ else:
+ # inject the async directory based on the shell option into the
+ # module args
+ async_dir = self.get_shell_option('async_dir', default="~/.ansible_async")
+ # END snippet from async_status action plugin
+
+ # Bind the loop max duration to consistent values on both
+ # remote and local sides (if not the same, make the loop
+ # longer on the controller); and set a backup file path.
+ module_args['_timeout'] = task_async
+ module_args['_back'] = '%s/iptables.state' % async_dir
+ async_status_args = dict(_async_dir=async_dir)
+ confirm_cmd = 'rm -f %s' % module_args['_back']
+ starter_cmd = 'touch %s.starter' % module_args['_back']
+ remaining_time = max(task_async, max_timeout)
+
+ # do work!
+ result = merge_hash(result, self._execute_module(module_args=module_args, task_vars=task_vars, wrap_async=wrap_async))
+
+ # Then the 3-steps "go ahead or rollback":
+ # 1. Catch early errors of the module (in asynchronous task) if any.
+ # Touch a file on the target to signal the module to process now.
+ # 2. Reset connection to ensure a persistent one will not be reused.
+ # 3. Confirm the restored state by removing the backup on the remote.
+ # Retrieve the results of the asynchronous task to return them.
+ if '_back' in module_args:
+ async_status_args['jid'] = result.get('ansible_job_id', None)
+ if async_status_args['jid'] is None:
+ raise AnsibleActionFail("Unable to get 'ansible_job_id'.")
+
+ # Catch early errors due to missing mandatory option, bad
+ # option type/value, missing required system command, etc.
+ result = merge_hash(result, self._async_result(async_status_args, task_vars, 0))
+
+ # The module is aware to not process the main iptables-restore
+ # command before finding (and deleting) the 'starter' cookie on
+ # the host, so the previous query will not reach ssh timeout.
+ garbage = self._low_level_execute_command(starter_cmd, sudoable=self.DEFAULT_SUDOABLE)
+
+ # As the main command is not yet executed on the target, here
+ # 'finished' means 'failed before main command be executed'.
+ if not result['finished']:
+ try:
+ self._connection.reset()
+ except AttributeError:
+ pass
+
+ for x in range(max_timeout):
+ time.sleep(1)
+ remaining_time -= 1
+ # - AnsibleConnectionFailure covers rejected requests (i.e.
+ # by rules with '--jump REJECT')
+ # - ansible_timeout is able to cover dropped requests (due
+ # to a rule or policy DROP) if not lower than async_val.
+ try:
+ garbage = self._low_level_execute_command(confirm_cmd, sudoable=self.DEFAULT_SUDOABLE)
+ break
+ except AnsibleConnectionFailure:
+ continue
+
+ result = merge_hash(result, self._async_result(async_status_args, task_vars, remaining_time))
+
+ # Cleanup async related stuff and internal params
+ for key in ('ansible_job_id', 'results_file', 'started', 'finished'):
+ if result.get(key):
+ del result[key]
+
+ if result.get('invocation', {}).get('module_args'):
+ if '_timeout' in result['invocation']['module_args']:
+ del result['invocation']['module_args']['_back']
+ del result['invocation']['module_args']['_timeout']
+
+ async_status_args['mode'] = 'cleanup'
+ garbage = self._execute_module(
+ module_name='ansible.builtin.async_status',
+ module_args=async_status_args,
+ task_vars=task_vars,
+ wrap_async=False)
+
+ if not wrap_async:
+ # remove a temporary path we created
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+
+ return result
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/action/system/shutdown.py b/collections-debian-merged/ansible_collections/community/general/plugins/action/system/shutdown.py
new file mode 100644
index 00000000..e36397ff
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/action/system/shutdown.py
@@ -0,0 +1,211 @@
+# Copyright: (c) 2020, Amin Vakil <info@aminvakil.com>
+# Copyright: (c) 2016-2018, Matt Davis <mdavis@ansible.com>
+# Copyright: (c) 2018, Sam Doran <sdoran@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleError, AnsibleConnectionFailure
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.common.collections import is_string
+from ansible.plugins.action import ActionBase
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class TimedOutException(Exception):
+ pass
+
+
+class ActionModule(ActionBase):
+ TRANSFERS_FILES = False
+ _VALID_ARGS = frozenset((
+ 'msg',
+ 'delay',
+ 'search_paths'
+ ))
+
+ DEFAULT_CONNECT_TIMEOUT = None
+ DEFAULT_PRE_SHUTDOWN_DELAY = 0
+ DEFAULT_SHUTDOWN_MESSAGE = 'Shut down initiated by Ansible'
+ DEFAULT_SHUTDOWN_COMMAND = 'shutdown'
+ DEFAULT_SHUTDOWN_COMMAND_ARGS = '-h {delay_min} "{message}"'
+ DEFAULT_SUDOABLE = True
+
+ SHUTDOWN_COMMANDS = {
+ 'alpine': 'poweroff',
+ 'vmkernel': 'halt',
+ }
+
+ SHUTDOWN_COMMAND_ARGS = {
+ 'alpine': '',
+ 'void': '-h +{delay_min} "{message}"',
+ 'freebsd': '-h +{delay_sec}s "{message}"',
+ 'linux': DEFAULT_SHUTDOWN_COMMAND_ARGS,
+ 'macosx': '-h +{delay_min} "{message}"',
+ 'openbsd': '-h +{delay_min} "{message}"',
+ 'solaris': '-y -g {delay_sec} -i 5 "{message}"',
+ 'sunos': '-y -g {delay_sec} -i 5 "{message}"',
+ 'vmkernel': '-d {delay_sec}',
+ 'aix': '-Fh',
+ }
+
+ def __init__(self, *args, **kwargs):
+ super(ActionModule, self).__init__(*args, **kwargs)
+
+ @property
+ def delay(self):
+ return self._check_delay('delay', self.DEFAULT_PRE_SHUTDOWN_DELAY)
+
+ def _check_delay(self, key, default):
+ """Ensure that the value is positive or zero"""
+ value = int(self._task.args.get(key, default))
+ if value < 0:
+ value = 0
+ return value
+
+ def _get_value_from_facts(self, variable_name, distribution, default_value):
+ """Get dist+version specific args first, then distribution, then family, lastly use default"""
+ attr = getattr(self, variable_name)
+ value = attr.get(
+ distribution['name'] + distribution['version'],
+ attr.get(
+ distribution['name'],
+ attr.get(
+ distribution['family'],
+ getattr(self, default_value))))
+ return value
+
+ def get_shutdown_command_args(self, distribution):
+ args = self._get_value_from_facts('SHUTDOWN_COMMAND_ARGS', distribution, 'DEFAULT_SHUTDOWN_COMMAND_ARGS')
+ # Convert seconds to minutes. If less that 60, set it to 0.
+ delay_sec = self.delay
+ shutdown_message = self._task.args.get('msg', self.DEFAULT_SHUTDOWN_MESSAGE)
+ return args.format(delay_sec=delay_sec, delay_min=delay_sec // 60, message=shutdown_message)
+
+ def get_distribution(self, task_vars):
+ # FIXME: only execute the module if we don't already have the facts we need
+ distribution = {}
+ display.debug('{action}: running setup module to get distribution'.format(action=self._task.action))
+ module_output = self._execute_module(
+ task_vars=task_vars,
+ module_name='ansible.legacy.setup',
+ module_args={'gather_subset': 'min'})
+ try:
+ if module_output.get('failed', False):
+ raise AnsibleError('Failed to determine system distribution. {0}, {1}'.format(
+ to_native(module_output['module_stdout']).strip(),
+ to_native(module_output['module_stderr']).strip()))
+ distribution['name'] = module_output['ansible_facts']['ansible_distribution'].lower()
+ distribution['version'] = to_text(module_output['ansible_facts']['ansible_distribution_version'].split('.')[0])
+ distribution['family'] = to_text(module_output['ansible_facts']['ansible_os_family'].lower())
+ display.debug("{action}: distribution: {dist}".format(action=self._task.action, dist=distribution))
+ return distribution
+ except KeyError as ke:
+ raise AnsibleError('Failed to get distribution information. Missing "{0}" in output.'.format(ke.args[0]))
+
+ def get_shutdown_command(self, task_vars, distribution):
+ shutdown_bin = self._get_value_from_facts('SHUTDOWN_COMMANDS', distribution, 'DEFAULT_SHUTDOWN_COMMAND')
+ default_search_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin']
+ search_paths = self._task.args.get('search_paths', default_search_paths)
+
+ # FIXME: switch all this to user arg spec validation methods when they are available
+ # Convert bare strings to a list
+ if is_string(search_paths):
+ search_paths = [search_paths]
+
+ # Error if we didn't get a list
+ err_msg = "'search_paths' must be a string or flat list of strings, got {0}"
+ try:
+ incorrect_type = any(not is_string(x) for x in search_paths)
+ if not isinstance(search_paths, list) or incorrect_type:
+ raise TypeError
+ except TypeError:
+ raise AnsibleError(err_msg.format(search_paths))
+
+ display.debug('{action}: running find module looking in {paths} to get path for "{command}"'.format(
+ action=self._task.action,
+ command=shutdown_bin,
+ paths=search_paths))
+ find_result = self._execute_module(
+ task_vars=task_vars,
+ # prevent collection search by calling with ansible.legacy (still allows library/ override of find)
+ module_name='ansible.legacy.find',
+ module_args={
+ 'paths': search_paths,
+ 'patterns': [shutdown_bin],
+ 'file_type': 'any'
+ }
+ )
+
+ full_path = [x['path'] for x in find_result['files']]
+ if not full_path:
+ raise AnsibleError('Unable to find command "{0}" in search paths: {1}'.format(shutdown_bin, search_paths))
+ self._shutdown_command = full_path[0]
+ return self._shutdown_command
+
+ def perform_shutdown(self, task_vars, distribution):
+ result = {}
+ shutdown_result = {}
+ shutdown_command = self.get_shutdown_command(task_vars, distribution)
+ shutdown_command_args = self.get_shutdown_command_args(distribution)
+ shutdown_command_exec = '{0} {1}'.format(shutdown_command, shutdown_command_args)
+
+ self.cleanup(force=True)
+ try:
+ display.vvv("{action}: shutting down server...".format(action=self._task.action))
+ display.debug("{action}: shutting down server with command '{command}'".format(action=self._task.action, command=shutdown_command_exec))
+ if self._play_context.check_mode:
+ shutdown_result['rc'] = 0
+ else:
+ shutdown_result = self._low_level_execute_command(shutdown_command_exec, sudoable=self.DEFAULT_SUDOABLE)
+ except AnsibleConnectionFailure as e:
+ # If the connection is closed too quickly due to the system being shutdown, carry on
+ display.debug('{action}: AnsibleConnectionFailure caught and handled: {error}'.format(action=self._task.action, error=to_text(e)))
+ shutdown_result['rc'] = 0
+
+ if shutdown_result['rc'] != 0:
+ result['failed'] = True
+ result['shutdown'] = False
+ result['msg'] = "Shutdown command failed. Error was {stdout}, {stderr}".format(
+ stdout=to_native(shutdown_result['stdout'].strip()),
+ stderr=to_native(shutdown_result['stderr'].strip()))
+ return result
+
+ result['failed'] = False
+ result['shutdown_command'] = shutdown_command_exec
+ return result
+
+ def run(self, tmp=None, task_vars=None):
+ self._supports_check_mode = True
+ self._supports_async = True
+
+ # If running with local connection, fail so we don't shutdown ourself
+ if self._connection.transport == 'local' and (not self._play_context.check_mode):
+ msg = 'Running {0} with local connection would shutdown the control node.'.format(self._task.action)
+ return {'changed': False, 'elapsed': 0, 'shutdown': False, 'failed': True, 'msg': msg}
+
+ if task_vars is None:
+ task_vars = {}
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+
+ if result.get('skipped', False) or result.get('failed', False):
+ return result
+
+ distribution = self.get_distribution(task_vars)
+
+ # Initiate shutdown
+ shutdown_result = self.perform_shutdown(task_vars, distribution)
+
+ if shutdown_result['failed']:
+ result = shutdown_result
+ return result
+
+ result['shutdown'] = True
+ result['changed'] = True
+ result['shutdown_command'] = shutdown_result['shutdown_command']
+
+ return result
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/become/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/become/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/become/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/become/doas.py b/collections-debian-merged/ansible_collections/community/general/plugins/become/doas.py
new file mode 100644
index 00000000..d7f4ad81
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/become/doas.py
@@ -0,0 +1,126 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ become: doas
+ short_description: Do As user
+ description:
+ - This become plugins allows your remote/login user to execute commands as another user via the doas utility.
+ author: ansible (@core)
+ options:
+ become_user:
+ description: User you 'become' to execute the task
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: doas_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_doas_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_DOAS_USER
+ become_exe:
+ description: Doas executable
+ default: doas
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: doas_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_doas_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_DOAS_EXE
+ become_flags:
+ description: Options to pass to doas
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: doas_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_doas_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_DOAS_FLAGS
+ become_pass:
+ description: password for doas prompt
+ required: False
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_doas_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_DOAS_PASS
+ ini:
+ - section: doas_become_plugin
+ key: password
+ prompt_l10n:
+ description:
+ - List of localized strings to match for prompt detection
+ - If empty we'll use the built in one
+ default: []
+ ini:
+ - section: doas_become_plugin
+ key: localized_prompts
+ vars:
+ - name: ansible_doas_prompt_l10n
+ env:
+ - name: ANSIBLE_DOAS_PROMPT_L10N
+'''
+
+import re
+
+from ansible.module_utils._text import to_bytes
+from ansible.plugins.become import BecomeBase
+
+
+class BecomeModule(BecomeBase):
+
+ name = 'community.general.doas'
+
+ # messages for detecting prompted password issues
+ fail = ('Permission denied',)
+ missing = ('Authorization required',)
+
+ def check_password_prompt(self, b_output):
+ ''' checks if the expected password prompt exists in b_output '''
+
+ # FIXME: more accurate would be: 'doas (%s@' % remote_user
+ # however become plugins don't have that information currently
+ b_prompts = [to_bytes(p) for p in self.get_option('prompt_l10n')] or [br'doas \(', br'Password:']
+ b_prompt = b"|".join(b_prompts)
+
+ return bool(re.match(b_prompt, b_output))
+
+ def build_become_command(self, cmd, shell):
+ super(BecomeModule, self).build_become_command(cmd, shell)
+
+ if not cmd:
+ return cmd
+
+ self.prompt = True
+
+ become_exe = self.get_option('become_exe')
+
+ flags = self.get_option('become_flags')
+ if not self.get_option('become_pass') and '-n' not in flags:
+ flags += ' -n'
+
+ become_user = self.get_option('become_user')
+ user = '-u %s' % (become_user) if become_user else ''
+
+ success_cmd = self._build_success_command(cmd, shell, noexe=True)
+ executable = getattr(shell, 'executable', shell.SHELL_FAMILY)
+
+ return '%s %s %s %s -c %s' % (become_exe, flags, user, executable, success_cmd)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/become/dzdo.py b/collections-debian-merged/ansible_collections/community/general/plugins/become/dzdo.py
new file mode 100644
index 00000000..a0ff4c05
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/become/dzdo.py
@@ -0,0 +1,95 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ become: dzdo
+ short_description: Centrify's Direct Authorize
+ description:
+ - This become plugins allows your remote/login user to execute commands as another user via the dzdo utility.
+ author: ansible (@core)
+ options:
+ become_user:
+ description: User you 'become' to execute the task
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: dzdo_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_dzdo_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_DZDO_USER
+ become_exe:
+ description: Dzdo executable
+ default: dzdo
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: dzdo_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_dzdo_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_DZDO_EXE
+ become_flags:
+ description: Options to pass to dzdo
+ default: -H -S -n
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: dzdo_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_dzdo_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_DZDO_FLAGS
+ become_pass:
+ description: Options to pass to dzdo
+ required: False
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_dzdo_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_DZDO_PASS
+ ini:
+ - section: dzdo_become_plugin
+ key: password
+'''
+
+from ansible.plugins.become import BecomeBase
+
+
+class BecomeModule(BecomeBase):
+
+ name = 'community.general.dzdo'
+
+ # messages for detecting prompted password issues
+ fail = ('Sorry, try again.',)
+
+ def build_become_command(self, cmd, shell):
+ super(BecomeModule, self).build_become_command(cmd, shell)
+
+ if not cmd:
+ return cmd
+
+ becomecmd = self.get_option('become_exe')
+
+ flags = self.get_option('become_flags')
+ if self.get_option('become_pass'):
+ self.prompt = '[dzdo via ansible, key=%s] password:' % self._id
+ flags = '%s -p "%s"' % (flags.replace('-n', ''), self.prompt)
+
+ become_user = self.get_option('become_user')
+ user = '-u %s' % (become_user) if become_user else ''
+
+ return ' '.join([becomecmd, flags, user, self._build_success_command(cmd, shell)])
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/become/ksu.py b/collections-debian-merged/ansible_collections/community/general/plugins/become/ksu.py
new file mode 100644
index 00000000..d81b7a11
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/become/ksu.py
@@ -0,0 +1,120 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ become: ksu
+ short_description: Kerberos substitute user
+ description:
+ - This become plugins allows your remote/login user to execute commands as another user via the ksu utility.
+ author: ansible (@core)
+ options:
+ become_user:
+ description: User you 'become' to execute the task
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: ksu_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_ksu_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_KSU_USER
+ required: True
+ become_exe:
+ description: Su executable
+ default: ksu
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: ksu_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_ksu_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_KSU_EXE
+ become_flags:
+ description: Options to pass to ksu
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: ksu_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_ksu_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_KSU_FLAGS
+ become_pass:
+ description: ksu password
+ required: False
+ vars:
+ - name: ansible_ksu_pass
+ - name: ansible_become_pass
+ - name: ansible_become_password
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_KSU_PASS
+ ini:
+ - section: ksu_become_plugin
+ key: password
+ prompt_l10n:
+ description:
+ - List of localized strings to match for prompt detection
+ - If empty we'll use the built in one
+ default: []
+ ini:
+ - section: ksu_become_plugin
+ key: localized_prompts
+ vars:
+ - name: ansible_ksu_prompt_l10n
+ env:
+ - name: ANSIBLE_KSU_PROMPT_L10N
+'''
+
+import re
+
+from ansible.module_utils._text import to_bytes
+from ansible.plugins.become import BecomeBase
+
+
+class BecomeModule(BecomeBase):
+
+ name = 'community.general.ksu'
+
+ # messages for detecting prompted password issues
+ fail = ('Password incorrect',)
+ missing = ('No password given',)
+
+ def check_password_prompt(self, b_output):
+ ''' checks if the expected password prompt exists in b_output '''
+
+ prompts = self.get_option('prompt_l10n') or ["Kerberos password for .*@.*:"]
+ b_prompt = b"|".join(to_bytes(p) for p in prompts)
+
+ return bool(re.match(b_prompt, b_output))
+
+ def build_become_command(self, cmd, shell):
+
+ super(BecomeModule, self).build_become_command(cmd, shell)
+
+ # Prompt handling for ``ksu`` is more complicated, this
+ # is used to satisfy the connection plugin
+ self.prompt = True
+
+ if not cmd:
+ return cmd
+
+ exe = self.get_option('become_exe')
+
+ flags = self.get_option('become_flags')
+ user = self.get_option('become_user')
+ return '%s %s %s -e %s ' % (exe, user, flags, self._build_success_command(cmd, shell))
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/become/machinectl.py b/collections-debian-merged/ansible_collections/community/general/plugins/become/machinectl.py
new file mode 100644
index 00000000..6751f9b4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/become/machinectl.py
@@ -0,0 +1,88 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ become: machinectl
+ short_description: Systemd's machinectl privilege escalation
+ description:
+ - This become plugins allows your remote/login user to execute commands as another user via the machinectl utility.
+ author: ansible (@core)
+ options:
+ become_user:
+ description: User you 'become' to execute the task
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: machinectl_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_machinectl_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_MACHINECTL_USER
+ become_exe:
+ description: Machinectl executable
+ default: machinectl
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: machinectl_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_machinectl_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_MACHINECTL_EXE
+ become_flags:
+ description: Options to pass to machinectl
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: machinectl_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_machinectl_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_MACHINECTL_FLAGS
+ become_pass:
+ description: Password for machinectl
+ required: False
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_machinectl_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_MACHINECTL_PASS
+ ini:
+ - section: machinectl_become_plugin
+ key: password
+'''
+
+from ansible.plugins.become import BecomeBase
+
+
+class BecomeModule(BecomeBase):
+
+ name = 'community.general.machinectl'
+
+ def build_become_command(self, cmd, shell):
+ super(BecomeModule, self).build_become_command(cmd, shell)
+
+ if not cmd:
+ return cmd
+
+ become = self.get_option('become_exe')
+
+ flags = self.get_option('become_flags')
+ user = self.get_option('become_user')
+ return '%s -q shell %s %s@ %s' % (become, flags, user, cmd)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/become/pbrun.py b/collections-debian-merged/ansible_collections/community/general/plugins/become/pbrun.py
new file mode 100644
index 00000000..9d64ff6a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/become/pbrun.py
@@ -0,0 +1,104 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ become: pbrun
+ short_description: PowerBroker run
+ description:
+ - This become plugins allows your remote/login user to execute commands as another user via the pbrun utility.
+ author: ansible (@core)
+ options:
+ become_user:
+ description: User you 'become' to execute the task
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: pbrun_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_pbrun_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_PBRUN_USER
+ become_exe:
+ description: Sudo executable
+ default: pbrun
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: pbrun_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_pbrun_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_PBRUN_EXE
+ become_flags:
+ description: Options to pass to pbrun
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: pbrun_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_pbrun_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_PBRUN_FLAGS
+ become_pass:
+ description: Password for pbrun
+ required: False
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_pbrun_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_PBRUN_PASS
+ ini:
+ - section: pbrun_become_plugin
+ key: password
+ wrap_exe:
+ description: Toggle to wrap the command pbrun calls in 'shell -c' or not
+ default: False
+ type: bool
+ ini:
+ - section: pbrun_become_plugin
+ key: wrap_execution
+ vars:
+ - name: ansible_pbrun_wrap_execution
+ env:
+ - name: ANSIBLE_PBRUN_WRAP_EXECUTION
+'''
+
+from ansible.plugins.become import BecomeBase
+
+
+class BecomeModule(BecomeBase):
+
+ name = 'community.general.pbrun'
+
+ prompt = 'Password:'
+
+ def build_become_command(self, cmd, shell):
+ super(BecomeModule, self).build_become_command(cmd, shell)
+
+ if not cmd:
+ return cmd
+
+ become_exe = self.get_option('become_exe')
+
+ flags = self.get_option('become_flags')
+ become_user = self.get_option('become_user')
+ user = '-u %s' % (become_user) if become_user else ''
+ noexe = not self.get_option('wrap_exe')
+
+ return ' '.join([become_exe, flags, user, self._build_success_command(cmd, shell, noexe=noexe)])
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/become/pfexec.py b/collections-debian-merged/ansible_collections/community/general/plugins/become/pfexec.py
new file mode 100644
index 00000000..d86af6e3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/become/pfexec.py
@@ -0,0 +1,104 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ become: pfexec
+ short_description: profile based execution
+ description:
+ - This become plugins allows your remote/login user to execute commands as another user via the pfexec utility.
+ author: ansible (@core)
+ options:
+ become_user:
+ description:
+ - User you 'become' to execute the task
+ - This plugin ignores this setting as pfexec uses it's own C(exec_attr) to figure this out,
+ but it is supplied here for Ansible to make decisions needed for the task execution, like file permissions.
+ default: root
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: pfexec_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_pfexec_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_PFEXEC_USER
+ become_exe:
+ description: Sudo executable
+ default: pfexec
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: pfexec_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_pfexec_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_PFEXEC_EXE
+ become_flags:
+ description: Options to pass to pfexec
+ default: -H -S -n
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: pfexec_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_pfexec_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_PFEXEC_FLAGS
+ become_pass:
+ description: pfexec password
+ required: False
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_pfexec_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_PFEXEC_PASS
+ ini:
+ - section: pfexec_become_plugin
+ key: password
+ wrap_exe:
+ description: Toggle to wrap the command pfexec calls in 'shell -c' or not
+ default: False
+ type: bool
+ ini:
+ - section: pfexec_become_plugin
+ key: wrap_execution
+ vars:
+ - name: ansible_pfexec_wrap_execution
+ env:
+ - name: ANSIBLE_PFEXEC_WRAP_EXECUTION
+ notes:
+ - This plugin ignores I(become_user) as pfexec uses it's own C(exec_attr) to figure this out.
+'''
+
+from ansible.plugins.become import BecomeBase
+
+
+class BecomeModule(BecomeBase):
+
+ name = 'community.general.pfexec'
+
+ def build_become_command(self, cmd, shell):
+ super(BecomeModule, self).build_become_command(cmd, shell)
+
+ if not cmd:
+ return cmd
+
+ exe = self.get_option('become_exe')
+
+ flags = self.get_option('become_flags')
+ noexe = not self.get_option('wrap_exe')
+ return '%s %s "%s"' % (exe, flags, self._build_success_command(cmd, shell, noexe=noexe))
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/become/pmrun.py b/collections-debian-merged/ansible_collections/community/general/plugins/become/pmrun.py
new file mode 100644
index 00000000..52fc3360
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/become/pmrun.py
@@ -0,0 +1,77 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ become: pmrun
+ short_description: Privilege Manager run
+ description:
+ - This become plugins allows your remote/login user to execute commands as another user via the pmrun utility.
+ author: ansible (@core)
+ options:
+ become_exe:
+ description: Sudo executable
+ default: pmrun
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: pmrun_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_pmrun_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_PMRUN_EXE
+ become_flags:
+ description: Options to pass to pmrun
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: pmrun_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_pmrun_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_PMRUN_FLAGS
+ become_pass:
+ description: pmrun password
+ required: False
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_pmrun_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_PMRUN_PASS
+ ini:
+ - section: pmrun_become_plugin
+ key: password
+ notes:
+ - This plugin ignores the become_user supplied and uses pmrun's own configuration to select the user.
+'''
+
+from ansible.plugins.become import BecomeBase
+from ansible.module_utils.six.moves import shlex_quote
+
+
+class BecomeModule(BecomeBase):
+
+ name = 'community.general.pmrun'
+ prompt = 'Enter UPM user password:'
+
+ def build_become_command(self, cmd, shell):
+ super(BecomeModule, self).build_become_command(cmd, shell)
+
+ if not cmd:
+ return cmd
+
+ become = self.get_option('become_exe')
+
+ flags = self.get_option('become_flags')
+ return '%s %s %s' % (become, flags, shlex_quote(self._build_success_command(cmd, shell)))
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/become/sesu.py b/collections-debian-merged/ansible_collections/community/general/plugins/become/sesu.py
new file mode 100644
index 00000000..b56e6ee2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/become/sesu.py
@@ -0,0 +1,91 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ become: sesu
+ short_description: CA Privileged Access Manager
+ description:
+ - This become plugins allows your remote/login user to execute commands as another user via the sesu utility.
+ author: ansible (@nekonyuu)
+ options:
+ become_user:
+ description: User you 'become' to execute the task
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: sesu_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_sesu_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_SESU_USER
+ become_exe:
+ description: sesu executable
+ default: sesu
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: sesu_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_sesu_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_SESU_EXE
+ become_flags:
+ description: Options to pass to sesu
+ default: -H -S -n
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: sesu_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_sesu_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_SESU_FLAGS
+ become_pass:
+ description: Password to pass to sesu
+ required: False
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_sesu_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_SESU_PASS
+ ini:
+ - section: sesu_become_plugin
+ key: password
+'''
+
+from ansible.plugins.become import BecomeBase
+
+
+class BecomeModule(BecomeBase):
+
+ name = 'community.general.sesu'
+
+ prompt = 'Please enter your password:'
+ fail = missing = ('Sorry, try again with sesu.',)
+
+ def build_become_command(self, cmd, shell):
+ super(BecomeModule, self).build_become_command(cmd, shell)
+
+ if not cmd:
+ return cmd
+
+ become = self.get_option('become_exe')
+
+ flags = self.get_option('become_flags')
+ user = self.get_option('become_user')
+ return '%s %s %s -c %s' % (become, flags, user, self._build_success_command(cmd, shell))
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/cache/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/cache/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/cache/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/cache/memcached.py b/collections-debian-merged/ansible_collections/community/general/plugins/cache/memcached.py
new file mode 100644
index 00000000..3cf670d7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/cache/memcached.py
@@ -0,0 +1,248 @@
+# (c) 2014, Brian Coca, Josh Drake, et al
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ cache: memcached
+ short_description: Use memcached DB for cache
+ description:
+ - This cache uses JSON formatted, per host records saved in memcached.
+ requirements:
+ - memcache (python lib)
+ options:
+ _uri:
+ description:
+ - List of connection information for the memcached DBs
+ default: ['127.0.0.1:11211']
+ type: list
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_CONNECTION
+ ini:
+ - key: fact_caching_connection
+ section: defaults
+ _prefix:
+ description: User defined prefix to use when creating the DB entries
+ default: ansible_facts
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_PREFIX
+ ini:
+ - key: fact_caching_prefix
+ section: defaults
+ _timeout:
+ default: 86400
+ description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
+ ini:
+ - key: fact_caching_timeout
+ section: defaults
+ type: integer
+'''
+
+import collections
+import os
+import time
+from multiprocessing import Lock
+from itertools import chain
+
+from ansible import constants as C
+from ansible.errors import AnsibleError
+from ansible.module_utils.common._collections_compat import MutableSet
+from ansible.plugins.cache import BaseCacheModule
+from ansible.utils.display import Display
+
+try:
+ import memcache
+ HAS_MEMCACHE = True
+except ImportError:
+ HAS_MEMCACHE = False
+
+display = Display()
+
+
+class ProxyClientPool(object):
+ """
+ Memcached connection pooling for thread/fork safety. Inspired by py-redis
+ connection pool.
+
+ Available connections are maintained in a deque and released in a FIFO manner.
+ """
+
+ def __init__(self, *args, **kwargs):
+ self.max_connections = kwargs.pop('max_connections', 1024)
+ self.connection_args = args
+ self.connection_kwargs = kwargs
+ self.reset()
+
+ def reset(self):
+ self.pid = os.getpid()
+ self._num_connections = 0
+ self._available_connections = collections.deque(maxlen=self.max_connections)
+ self._locked_connections = set()
+ self._lock = Lock()
+
+ def _check_safe(self):
+ if self.pid != os.getpid():
+ with self._lock:
+ if self.pid == os.getpid():
+ # bail out - another thread already acquired the lock
+ return
+ self.disconnect_all()
+ self.reset()
+
+ def get_connection(self):
+ self._check_safe()
+ try:
+ connection = self._available_connections.popleft()
+ except IndexError:
+ connection = self.create_connection()
+ self._locked_connections.add(connection)
+ return connection
+
+ def create_connection(self):
+ if self._num_connections >= self.max_connections:
+ raise RuntimeError("Too many memcached connections")
+ self._num_connections += 1
+ return memcache.Client(*self.connection_args, **self.connection_kwargs)
+
+ def release_connection(self, connection):
+ self._check_safe()
+ self._locked_connections.remove(connection)
+ self._available_connections.append(connection)
+
+ def disconnect_all(self):
+ for conn in chain(self._available_connections, self._locked_connections):
+ conn.disconnect_all()
+
+ def __getattr__(self, name):
+ def wrapped(*args, **kwargs):
+ return self._proxy_client(name, *args, **kwargs)
+ return wrapped
+
+ def _proxy_client(self, name, *args, **kwargs):
+ conn = self.get_connection()
+
+ try:
+ return getattr(conn, name)(*args, **kwargs)
+ finally:
+ self.release_connection(conn)
+
+
+class CacheModuleKeys(MutableSet):
+ """
+ A set subclass that keeps track of insertion time and persists
+ the set in memcached.
+ """
+ PREFIX = 'ansible_cache_keys'
+
+ def __init__(self, cache, *args, **kwargs):
+ self._cache = cache
+ self._keyset = dict(*args, **kwargs)
+
+ def __contains__(self, key):
+ return key in self._keyset
+
+ def __iter__(self):
+ return iter(self._keyset)
+
+ def __len__(self):
+ return len(self._keyset)
+
+ def add(self, key):
+ self._keyset[key] = time.time()
+ self._cache.set(self.PREFIX, self._keyset)
+
+ def discard(self, key):
+ del self._keyset[key]
+ self._cache.set(self.PREFIX, self._keyset)
+
+ def remove_by_timerange(self, s_min, s_max):
+ for k in self._keyset.keys():
+ t = self._keyset[k]
+ if s_min < t < s_max:
+ del self._keyset[k]
+ self._cache.set(self.PREFIX, self._keyset)
+
+
+class CacheModule(BaseCacheModule):
+
+ def __init__(self, *args, **kwargs):
+ connection = ['127.0.0.1:11211']
+
+ try:
+ super(CacheModule, self).__init__(*args, **kwargs)
+ if self.get_option('_uri'):
+ connection = self.get_option('_uri')
+ self._timeout = self.get_option('_timeout')
+ self._prefix = self.get_option('_prefix')
+ except KeyError:
+ display.deprecated('Rather than importing CacheModules directly, '
+ 'use ansible.plugins.loader.cache_loader',
+ version='2.0.0', collection_name='community.general') # was Ansible 2.12
+ if C.CACHE_PLUGIN_CONNECTION:
+ connection = C.CACHE_PLUGIN_CONNECTION.split(',')
+ self._timeout = C.CACHE_PLUGIN_TIMEOUT
+ self._prefix = C.CACHE_PLUGIN_PREFIX
+
+ if not HAS_MEMCACHE:
+ raise AnsibleError("python-memcached is required for the memcached fact cache")
+
+ self._cache = {}
+ self._db = ProxyClientPool(connection, debug=0)
+ self._keys = CacheModuleKeys(self._db, self._db.get(CacheModuleKeys.PREFIX) or [])
+
+ def _make_key(self, key):
+ return "{0}{1}".format(self._prefix, key)
+
+ def _expire_keys(self):
+ if self._timeout > 0:
+ expiry_age = time.time() - self._timeout
+ self._keys.remove_by_timerange(0, expiry_age)
+
+ def get(self, key):
+ if key not in self._cache:
+ value = self._db.get(self._make_key(key))
+ # guard against the key not being removed from the keyset;
+ # this could happen in cases where the timeout value is changed
+ # between invocations
+ if value is None:
+ self.delete(key)
+ raise KeyError
+ self._cache[key] = value
+
+ return self._cache.get(key)
+
+ def set(self, key, value):
+ self._db.set(self._make_key(key), value, time=self._timeout, min_compress_len=1)
+ self._cache[key] = value
+ self._keys.add(key)
+
+ def keys(self):
+ self._expire_keys()
+ return list(iter(self._keys))
+
+ def contains(self, key):
+ self._expire_keys()
+ return key in self._keys
+
+ def delete(self, key):
+ del self._cache[key]
+ self._db.delete(self._make_key(key))
+ self._keys.discard(key)
+
+ def flush(self):
+ for key in self.keys():
+ self.delete(key)
+
+ def copy(self):
+ return self._keys.copy()
+
+ def __getstate__(self):
+ return dict()
+
+ def __setstate__(self, data):
+ self.__init__()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/cache/pickle.py b/collections-debian-merged/ansible_collections/community/general/plugins/cache/pickle.py
new file mode 100644
index 00000000..80b00b4c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/cache/pickle.py
@@ -0,0 +1,67 @@
+# (c) 2017, Brian Coca
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ cache: pickle
+ short_description: Pickle formatted files.
+ description:
+ - This cache uses Python's pickle serialization format, in per host files, saved to the filesystem.
+ author: Brian Coca (@bcoca)
+ options:
+ _uri:
+ required: True
+ description:
+ - Path in which the cache plugin will save the files
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_CONNECTION
+ ini:
+ - key: fact_caching_connection
+ section: defaults
+ _prefix:
+ description: User defined prefix to use when creating the files
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_PREFIX
+ ini:
+ - key: fact_caching_prefix
+ section: defaults
+ _timeout:
+ default: 86400
+ description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
+ ini:
+ - key: fact_caching_timeout
+ section: defaults
+'''
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+from ansible.module_utils.six import PY3
+from ansible.plugins.cache import BaseFileCacheModule
+
+
+class CacheModule(BaseFileCacheModule):
+ """
+ A caching module backed by pickle files.
+ """
+
+ def _load(self, filepath):
+ # Pickle is a binary format
+ with open(filepath, 'rb') as f:
+ if PY3:
+ return pickle.load(f, encoding='bytes')
+ else:
+ return pickle.load(f)
+
+ def _dump(self, value, filepath):
+ with open(filepath, 'wb') as f:
+ # Use pickle protocol 2 which is compatible with Python 2.3+.
+ pickle.dump(value, f, protocol=2)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/cache/redis.py b/collections-debian-merged/ansible_collections/community/general/plugins/cache/redis.py
new file mode 100644
index 00000000..fe41c4c0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/cache/redis.py
@@ -0,0 +1,233 @@
+# (c) 2014, Brian Coca, Josh Drake, et al
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ cache: redis
+ short_description: Use Redis DB for cache
+ description:
+ - This cache uses JSON formatted, per host records saved in Redis.
+ requirements:
+ - redis>=2.4.5 (python lib)
+ options:
+ _uri:
+ description:
+ - A colon separated string of connection information for Redis.
+ - The format is C(host:port:db:password), for example C(localhost:6379:0:changeme).
+ - To use encryption in transit, prefix the connection with C(tls://), as in C(tls://localhost:6379:0:changeme).
+ - To use redis sentinel, use separator C(;), for example C(localhost:26379;localhost:26379;0:changeme). Requires redis>=2.9.0.
+ required: True
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_CONNECTION
+ ini:
+ - key: fact_caching_connection
+ section: defaults
+ _prefix:
+ description: User defined prefix to use when creating the DB entries
+ default: ansible_facts
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_PREFIX
+ ini:
+ - key: fact_caching_prefix
+ section: defaults
+ _keyset_name:
+ description: User defined name for cache keyset name.
+ default: ansible_cache_keys
+ env:
+ - name: ANSIBLE_CACHE_REDIS_KEYSET_NAME
+ ini:
+ - key: fact_caching_redis_keyset_name
+ section: defaults
+ version_added: 1.3.0
+ _sentinel_service_name:
+ description: The redis sentinel service name (or referenced as cluster name).
+ env:
+ - name: ANSIBLE_CACHE_REDIS_SENTINEL
+ ini:
+ - key: fact_caching_redis_sentinel
+ section: defaults
+ version_added: 1.3.0
+ _timeout:
+ default: 86400
+ description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
+ ini:
+ - key: fact_caching_timeout
+ section: defaults
+ type: integer
+'''
+
+import time
+import json
+
+from ansible import constants as C
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_native
+from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder
+from ansible.plugins.cache import BaseCacheModule
+from ansible.utils.display import Display
+
+try:
+ from redis import StrictRedis, VERSION
+ HAS_REDIS = True
+except ImportError:
+ HAS_REDIS = False
+
+display = Display()
+
+
+class CacheModule(BaseCacheModule):
+ """
+ A caching module backed by redis.
+
+ Keys are maintained in a zset with their score being the timestamp
+ when they are inserted. This allows for the usage of 'zremrangebyscore'
+ to expire keys. This mechanism is used or a pattern matched 'scan' for
+ performance.
+ """
+ _sentinel_service_name = None
+
+ def __init__(self, *args, **kwargs):
+ uri = ''
+
+ try:
+ super(CacheModule, self).__init__(*args, **kwargs)
+ if self.get_option('_uri'):
+ uri = self.get_option('_uri')
+ self._timeout = float(self.get_option('_timeout'))
+ self._prefix = self.get_option('_prefix')
+ self._keys_set = self.get_option('_keyset_name')
+ self._sentinel_service_name = self.get_option('_sentinel_service_name')
+ except KeyError:
+ display.deprecated('Rather than importing CacheModules directly, '
+ 'use ansible.plugins.loader.cache_loader',
+ version='2.0.0', collection_name='community.general') # was Ansible 2.12
+ if C.CACHE_PLUGIN_CONNECTION:
+ uri = C.CACHE_PLUGIN_CONNECTION
+ self._timeout = float(C.CACHE_PLUGIN_TIMEOUT)
+ self._prefix = C.CACHE_PLUGIN_PREFIX
+ self._keys_set = 'ansible_cache_keys'
+
+ if not HAS_REDIS:
+ raise AnsibleError("The 'redis' python module (version 2.4.5 or newer) is required for the redis fact cache, 'pip install redis'")
+
+ self._cache = {}
+ kw = {}
+
+ # tls connection
+ tlsprefix = 'tls://'
+ if uri.startswith(tlsprefix):
+ kw['ssl'] = True
+ uri = uri[len(tlsprefix):]
+
+ # redis sentinel connection
+ if self._sentinel_service_name:
+ self._db = self._get_sentinel_connection(uri, kw)
+ # normal connection
+ else:
+ connection = uri.split(':')
+ self._db = StrictRedis(*connection, **kw)
+
+ display.vv('Redis connection: %s' % self._db)
+
+ def _get_sentinel_connection(self, uri, kw):
+ """
+ get sentinel connection details from _uri
+ """
+ try:
+ from redis.sentinel import Sentinel
+ except ImportError:
+ raise AnsibleError("The 'redis' python module (version 2.9.0 or newer) is required to use redis sentinel.")
+
+ if ';' not in uri:
+ raise AnsibleError('_uri does not have sentinel syntax.')
+
+ # format: "localhost:26379;localhost2:26379;0:changeme"
+ connections = uri.split(';')
+ connection_args = connections.pop(-1)
+ if len(connection_args) > 0: # hanle if no db nr is given
+ connection_args = connection_args.split(':')
+ kw['db'] = connection_args.pop(0)
+ try:
+ kw['password'] = connection_args.pop(0)
+ except IndexError:
+ pass # password is optional
+
+ sentinels = [tuple(shost.split(':')) for shost in connections]
+ display.vv('\nUsing redis sentinels: %s' % sentinels)
+ scon = Sentinel(sentinels, **kw)
+ try:
+ return scon.master_for(self._sentinel_service_name, socket_timeout=0.2)
+ except Exception as exc:
+ raise AnsibleError('Could not connect to redis sentinel: %s' % to_native(exc))
+
+ def _make_key(self, key):
+ return self._prefix + key
+
+ def get(self, key):
+
+ if key not in self._cache:
+ value = self._db.get(self._make_key(key))
+ # guard against the key not being removed from the zset;
+ # this could happen in cases where the timeout value is changed
+ # between invocations
+ if value is None:
+ self.delete(key)
+ raise KeyError
+ self._cache[key] = json.loads(value, cls=AnsibleJSONDecoder)
+
+ return self._cache.get(key)
+
+ def set(self, key, value):
+
+ value2 = json.dumps(value, cls=AnsibleJSONEncoder, sort_keys=True, indent=4)
+ if self._timeout > 0: # a timeout of 0 is handled as meaning 'never expire'
+ self._db.setex(self._make_key(key), int(self._timeout), value2)
+ else:
+ self._db.set(self._make_key(key), value2)
+
+ if VERSION[0] == 2:
+ self._db.zadd(self._keys_set, time.time(), key)
+ else:
+ self._db.zadd(self._keys_set, {key: time.time()})
+ self._cache[key] = value
+
+ def _expire_keys(self):
+ if self._timeout > 0:
+ expiry_age = time.time() - self._timeout
+ self._db.zremrangebyscore(self._keys_set, 0, expiry_age)
+
+ def keys(self):
+ self._expire_keys()
+ return self._db.zrange(self._keys_set, 0, -1)
+
+ def contains(self, key):
+ self._expire_keys()
+ return (self._db.zrank(self._keys_set, key) is not None)
+
+ def delete(self, key):
+ if key in self._cache:
+ del self._cache[key]
+ self._db.delete(self._make_key(key))
+ self._db.zrem(self._keys_set, key)
+
+ def flush(self):
+ for key in self.keys():
+ self.delete(key)
+
+ def copy(self):
+ # TODO: there is probably a better way to do this in redis
+ ret = dict()
+ for key in self.keys():
+ ret[key] = self.get(key)
+ return ret
+
+ def __getstate__(self):
+ return dict()
+
+ def __setstate__(self, data):
+ self.__init__()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/cache/yaml.py b/collections-debian-merged/ansible_collections/community/general/plugins/cache/yaml.py
new file mode 100644
index 00000000..e4c495be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/cache/yaml.py
@@ -0,0 +1,64 @@
+# (c) 2017, Brian Coca
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ cache: yaml
+ short_description: YAML formatted files.
+ description:
+ - This cache uses YAML formatted, per host, files saved to the filesystem.
+ author: Brian Coca (@bcoca)
+ options:
+ _uri:
+ required: True
+ description:
+ - Path in which the cache plugin will save the files
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_CONNECTION
+ ini:
+ - key: fact_caching_connection
+ section: defaults
+ _prefix:
+ description: User defined prefix to use when creating the files
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_PREFIX
+ ini:
+ - key: fact_caching_prefix
+ section: defaults
+ _timeout:
+ default: 86400
+ description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
+ ini:
+ - key: fact_caching_timeout
+ section: defaults
+ type: integer
+'''
+
+
+import codecs
+
+import yaml
+
+from ansible.parsing.yaml.loader import AnsibleLoader
+from ansible.parsing.yaml.dumper import AnsibleDumper
+from ansible.plugins.cache import BaseFileCacheModule
+
+
+class CacheModule(BaseFileCacheModule):
+ """
+ A caching module backed by yaml files.
+ """
+
+ def _load(self, filepath):
+ with codecs.open(filepath, 'r', encoding='utf-8') as f:
+ return AnsibleLoader(f).get_single_data()
+
+ def _dump(self, value, filepath):
+ with codecs.open(filepath, 'w', encoding='utf-8') as f:
+ yaml.dump(value, f, Dumper=AnsibleDumper, default_flow_style=False)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/actionable.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/actionable.py
new file mode 100644
index 00000000..8309a846
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/actionable.py
@@ -0,0 +1,61 @@
+# (c) 2015, Andrew Gaffney <andrew@agaffney.org>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ callback: actionable
+ type: stdout
+ short_description: shows only items that need attention
+ description:
+ - Use this callback when you dont care about OK nor Skipped.
+ - This callback suppresses any non Failed or Changed status.
+ deprecated:
+ why: The 'default' callback plugin now supports this functionality
+ removed_in: '2.0.0' # was Ansible 2.11
+ alternative: "'default' callback plugin with 'display_skipped_hosts = no' and 'display_ok_hosts = no' options"
+ extends_documentation_fragment:
+ - default_callback
+ requirements:
+ - set as stdout callback in configuration
+ # Override defaults from 'default' callback plugin
+ options:
+ display_skipped_hosts:
+ name: Show skipped hosts
+ description: "Toggle to control displaying skipped task/host results in a task"
+ type: bool
+ default: no
+ env:
+ - name: DISPLAY_SKIPPED_HOSTS
+ deprecated:
+ why: environment variables without "ANSIBLE_" prefix are deprecated
+ version: "2.0.0" # was Ansible 2.12
+ alternatives: the "ANSIBLE_DISPLAY_SKIPPED_HOSTS" environment variable
+ - name: ANSIBLE_DISPLAY_SKIPPED_HOSTS
+ ini:
+ - key: display_skipped_hosts
+ section: defaults
+ display_ok_hosts:
+ name: Show 'ok' hosts
+ description: "Toggle to control displaying 'ok' task/host results in a task"
+ type: bool
+ default: no
+ env:
+ - name: ANSIBLE_DISPLAY_OK_HOSTS
+ ini:
+ - key: display_ok_hosts
+ section: defaults
+'''
+
+from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
+
+
+class CallbackModule(CallbackModule_default):
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'community.general.actionable'
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/cgroup_memory_recap.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/cgroup_memory_recap.py
new file mode 100644
index 00000000..a6dace8d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/cgroup_memory_recap.py
@@ -0,0 +1,117 @@
+# -*- coding: utf-8 -*-
+# (c) 2018 Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ callback: cgroup_memory_recap
+ type: aggregate
+ requirements:
+ - whitelist in configuration
+ - cgroups
+ short_description: Profiles maximum memory usage of tasks and full execution using cgroups
+ description:
+ - This is an ansible callback plugin that profiles maximum memory usage of ansible and individual tasks, and displays a recap at the end using cgroups
+ notes:
+ - Requires ansible to be run from within a cgroup, such as with C(cgexec -g memory:ansible_profile ansible-playbook ...)
+ - This cgroup should only be used by ansible to get accurate results
+ - To create the cgroup, first use a command such as C(sudo cgcreate -a ec2-user:ec2-user -t ec2-user:ec2-user -g memory:ansible_profile)
+ options:
+ max_mem_file:
+ required: True
+ description: Path to cgroups C(memory.max_usage_in_bytes) file. Example C(/sys/fs/cgroup/memory/ansible_profile/memory.max_usage_in_bytes)
+ env:
+ - name: CGROUP_MAX_MEM_FILE
+ ini:
+ - section: callback_cgroupmemrecap
+ key: max_mem_file
+ cur_mem_file:
+ required: True
+ description: Path to C(memory.usage_in_bytes) file. Example C(/sys/fs/cgroup/memory/ansible_profile/memory.usage_in_bytes)
+ env:
+ - name: CGROUP_CUR_MEM_FILE
+ ini:
+ - section: callback_cgroupmemrecap
+ key: cur_mem_file
+'''
+
+import time
+import threading
+
+from ansible.plugins.callback import CallbackBase
+
+
+class MemProf(threading.Thread):
+ """Python thread for recording memory usage"""
+ def __init__(self, path, obj=None):
+ threading.Thread.__init__(self)
+ self.obj = obj
+ self.path = path
+ self.results = []
+ self.running = True
+
+ def run(self):
+ while self.running:
+ with open(self.path) as f:
+ val = f.read()
+ self.results.append(int(val.strip()) / 1024 / 1024)
+ time.sleep(0.001)
+
+
+class CallbackModule(CallbackBase):
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'aggregate'
+ CALLBACK_NAME = 'community.general.cgroup_memory_recap'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self, display=None):
+ super(CallbackModule, self).__init__(display)
+
+ self._task_memprof = None
+
+ self.task_results = []
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+ super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+
+ self.cgroup_max_file = self.get_option('max_mem_file')
+ self.cgroup_current_file = self.get_option('cur_mem_file')
+
+ with open(self.cgroup_max_file, 'w+') as f:
+ f.write('0')
+
+ def _profile_memory(self, obj=None):
+ prev_task = None
+ results = None
+ try:
+ self._task_memprof.running = False
+ results = self._task_memprof.results
+ prev_task = self._task_memprof.obj
+ except AttributeError:
+ pass
+
+ if obj is not None:
+ self._task_memprof = MemProf(self.cgroup_current_file, obj=obj)
+ self._task_memprof.start()
+
+ if results is not None:
+ self.task_results.append((prev_task, max(results)))
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ self._profile_memory(task)
+
+ def v2_playbook_on_stats(self, stats):
+ self._profile_memory()
+
+ with open(self.cgroup_max_file) as f:
+ max_results = int(f.read().strip()) / 1024 / 1024
+
+ self._display.banner('CGROUP MEMORY RECAP')
+ self._display.display('Execution Maximum: %0.2fMB\n\n' % max_results)
+
+ for task, memory in self.task_results:
+ self._display.display('%s (%s): %0.2fMB' % (task.get_name(), task._uuid, memory))
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/context_demo.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/context_demo.py
new file mode 100644
index 00000000..d134f616
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/context_demo.py
@@ -0,0 +1,53 @@
+# (C) 2012, Michael DeHaan, <michael.dehaan@gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ callback: context_demo
+ type: aggregate
+ short_description: demo callback that adds play/task context
+ description:
+ - Displays some play and task context along with normal output
+ - This is mostly for demo purposes
+ requirements:
+ - whitelist in configuration
+'''
+
+from ansible.plugins.callback import CallbackBase
+
+
+class CallbackModule(CallbackBase):
+ """
+ This is a very trivial example of how any callback function can get at play and task objects.
+ play will be 'None' for runner invocations, and task will be None for 'setup' invocations.
+ """
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'aggregate'
+ CALLBACK_NAME = 'community.general.context_demo'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self, *args, **kwargs):
+ super(CallbackModule, self).__init__(*args, **kwargs)
+ self.task = None
+ self.play = None
+
+ def v2_on_any(self, *args, **kwargs):
+ self._display.display("--- play: {0} task: {1} ---".format(getattr(self.play, 'name', None), self.task))
+
+ self._display.display(" --- ARGS ")
+ for i, a in enumerate(args):
+ self._display.display(' %s: %s' % (i, a))
+
+ self._display.display(" --- KWARGS ")
+ for k in kwargs:
+ self._display.display(' %s: %s' % (k, kwargs[k]))
+
+ def v2_playbook_on_play_start(self, play):
+ self.play = play
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ self.task = task
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/counter_enabled.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/counter_enabled.py
new file mode 100644
index 00000000..89c8b0f2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/counter_enabled.py
@@ -0,0 +1,248 @@
+# (c) 2018, Ivan Aragones Muniesa <ivan.aragones.muniesa@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+'''
+ Counter enabled Ansible callback plugin (See DOCUMENTATION for more information)
+'''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ callback: counter_enabled
+ type: stdout
+ short_description: adds counters to the output items (tasks and hosts/task)
+ description:
+ - Use this callback when you need a kind of progress bar on a large environments.
+ - You will know how many tasks has the playbook to run, and which one is actually running.
+ - You will know how many hosts may run a task, and which of them is actually running.
+ extends_documentation_fragment:
+ - default_callback
+ requirements:
+ - set as stdout callback in ansible.cfg (stdout_callback = counter_enabled)
+'''
+
+from ansible import constants as C
+from ansible.plugins.callback import CallbackBase
+from ansible.utils.color import colorize, hostcolor
+from ansible.template import Templar
+from ansible.playbook.task_include import TaskInclude
+
+
+class CallbackModule(CallbackBase):
+
+ '''
+ This is the default callback interface, which simply prints messages
+ to stdout when new callback events are received.
+ '''
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'community.general.counter_enabled'
+
+ _task_counter = 1
+ _task_total = 0
+ _host_counter = 1
+ _host_total = 0
+
+ def __init__(self):
+ super(CallbackModule, self).__init__()
+
+ self._playbook = ""
+ self._play = ""
+
+ def _all_vars(self, host=None, task=None):
+ # host and task need to be specified in case 'magic variables' (host vars, group vars, etc)
+ # need to be loaded as well
+ return self._play.get_variable_manager().get_vars(
+ play=self._play,
+ host=host,
+ task=task
+ )
+
+ def v2_playbook_on_start(self, playbook):
+ self._playbook = playbook
+
+ def v2_playbook_on_play_start(self, play):
+ name = play.get_name().strip()
+ if not name:
+ msg = u"play"
+ else:
+ msg = u"PLAY [%s]" % name
+
+ self._play = play
+
+ self._display.banner(msg)
+ self._play = play
+
+ self._host_total = len(self._all_vars()['vars']['ansible_play_hosts_all'])
+ self._task_total = len(self._play.get_tasks()[0])
+
+ def v2_playbook_on_stats(self, stats):
+ self._display.banner("PLAY RECAP")
+
+ hosts = sorted(stats.processed.keys())
+ for host in hosts:
+ stat = stats.summarize(host)
+
+ self._display.display(u"%s : %s %s %s %s %s %s" % (
+ hostcolor(host, stat),
+ colorize(u'ok', stat['ok'], C.COLOR_OK),
+ colorize(u'changed', stat['changed'], C.COLOR_CHANGED),
+ colorize(u'unreachable', stat['unreachable'], C.COLOR_UNREACHABLE),
+ colorize(u'failed', stat['failures'], C.COLOR_ERROR),
+ colorize(u'rescued', stat['rescued'], C.COLOR_OK),
+ colorize(u'ignored', stat['ignored'], C.COLOR_WARN)),
+ screen_only=True
+ )
+
+ self._display.display(u"%s : %s %s %s %s %s %s" % (
+ hostcolor(host, stat, False),
+ colorize(u'ok', stat['ok'], None),
+ colorize(u'changed', stat['changed'], None),
+ colorize(u'unreachable', stat['unreachable'], None),
+ colorize(u'failed', stat['failures'], None),
+ colorize(u'rescued', stat['rescued'], None),
+ colorize(u'ignored', stat['ignored'], None)),
+ log_only=True
+ )
+
+ self._display.display("", screen_only=True)
+
+ # print custom stats
+ if self._plugin_options.get('show_custom_stats', C.SHOW_CUSTOM_STATS) and stats.custom:
+ # fallback on constants for inherited plugins missing docs
+ self._display.banner("CUSTOM STATS: ")
+ # per host
+ # TODO: come up with 'pretty format'
+ for k in sorted(stats.custom.keys()):
+ if k == '_run':
+ continue
+ self._display.display('\t%s: %s' % (k, self._dump_results(stats.custom[k], indent=1).replace('\n', '')))
+
+ # print per run custom stats
+ if '_run' in stats.custom:
+ self._display.display("", screen_only=True)
+ self._display.display('\tRUN: %s' % self._dump_results(stats.custom['_run'], indent=1).replace('\n', ''))
+ self._display.display("", screen_only=True)
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ args = ''
+ # args can be specified as no_log in several places: in the task or in
+ # the argument spec. We can check whether the task is no_log but the
+ # argument spec can't be because that is only run on the target
+ # machine and we haven't run it there yet at this time.
+ #
+ # So we give people a config option to affect display of the args so
+ # that they can secure this if they feel that their stdout is insecure
+ # (shoulder surfing, logging stdout straight to a file, etc).
+ if not task.no_log and C.DISPLAY_ARGS_TO_STDOUT:
+ args = ', '.join(('%s=%s' % a for a in task.args.items()))
+ args = ' %s' % args
+ self._display.banner("TASK %d/%d [%s%s]" % (self._task_counter, self._task_total, task.get_name().strip(), args))
+ if self._display.verbosity >= 2:
+ path = task.get_path()
+ if path:
+ self._display.display("task path: %s" % path, color=C.COLOR_DEBUG)
+ self._host_counter = 0
+ self._task_counter += 1
+
+ def v2_runner_on_ok(self, result):
+
+ self._host_counter += 1
+
+ delegated_vars = result._result.get('_ansible_delegated_vars', None)
+
+ if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
+ self._print_task_banner(result._task)
+
+ if isinstance(result._task, TaskInclude):
+ return
+ elif result._result.get('changed', False):
+ if delegated_vars:
+ msg = "changed: %d/%d [%s -> %s]" % (self._host_counter, self._host_total, result._host.get_name(), delegated_vars['ansible_host'])
+ else:
+ msg = "changed: %d/%d [%s]" % (self._host_counter, self._host_total, result._host.get_name())
+ color = C.COLOR_CHANGED
+ else:
+ if delegated_vars:
+ msg = "ok: %d/%d [%s -> %s]" % (self._host_counter, self._host_total, result._host.get_name(), delegated_vars['ansible_host'])
+ else:
+ msg = "ok: %d/%d [%s]" % (self._host_counter, self._host_total, result._host.get_name())
+ color = C.COLOR_OK
+
+ self._handle_warnings(result._result)
+
+ if result._task.loop and 'results' in result._result:
+ self._process_items(result)
+ else:
+ self._clean_results(result._result, result._task.action)
+
+ if self._run_is_verbose(result):
+ msg += " => %s" % (self._dump_results(result._result),)
+ self._display.display(msg, color=color)
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+
+ self._host_counter += 1
+
+ delegated_vars = result._result.get('_ansible_delegated_vars', None)
+ self._clean_results(result._result, result._task.action)
+
+ if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
+ self._print_task_banner(result._task)
+
+ self._handle_exception(result._result)
+ self._handle_warnings(result._result)
+
+ if result._task.loop and 'results' in result._result:
+ self._process_items(result)
+
+ else:
+ if delegated_vars:
+ self._display.display("fatal: %d/%d [%s -> %s]: FAILED! => %s" % (self._host_counter, self._host_total,
+ result._host.get_name(), delegated_vars['ansible_host'],
+ self._dump_results(result._result)),
+ color=C.COLOR_ERROR)
+ else:
+ self._display.display("fatal: %d/%d [%s]: FAILED! => %s" % (self._host_counter, self._host_total,
+ result._host.get_name(), self._dump_results(result._result)),
+ color=C.COLOR_ERROR)
+
+ if ignore_errors:
+ self._display.display("...ignoring", color=C.COLOR_SKIP)
+
+ def v2_runner_on_skipped(self, result):
+ self._host_counter += 1
+
+ if self._plugin_options.get('show_skipped_hosts', C.DISPLAY_SKIPPED_HOSTS): # fallback on constants for inherited plugins missing docs
+
+ self._clean_results(result._result, result._task.action)
+
+ if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
+ self._print_task_banner(result._task)
+
+ if result._task.loop and 'results' in result._result:
+ self._process_items(result)
+ else:
+ msg = "skipping: %d/%d [%s]" % (self._host_counter, self._host_total, result._host.get_name())
+ if self._run_is_verbose(result):
+ msg += " => %s" % self._dump_results(result._result)
+ self._display.display(msg, color=C.COLOR_SKIP)
+
+ def v2_runner_on_unreachable(self, result):
+ self._host_counter += 1
+
+ if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
+ self._print_task_banner(result._task)
+
+ delegated_vars = result._result.get('_ansible_delegated_vars', None)
+ if delegated_vars:
+ self._display.display("fatal: %d/%d [%s -> %s]: UNREACHABLE! => %s" % (self._host_counter, self._host_total,
+ result._host.get_name(), delegated_vars['ansible_host'],
+ self._dump_results(result._result)),
+ color=C.COLOR_UNREACHABLE)
+ else:
+ self._display.display("fatal: %d/%d [%s]: UNREACHABLE! => %s" % (self._host_counter, self._host_total,
+ result._host.get_name(), self._dump_results(result._result)),
+ color=C.COLOR_UNREACHABLE)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/dense.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/dense.py
new file mode 100644
index 00000000..bec62279
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/dense.py
@@ -0,0 +1,499 @@
+# (c) 2016, Dag Wieers <dag@wieers.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+callback: dense
+type: stdout
+short_description: minimal stdout output
+extends_documentation_fragment:
+- default_callback
+description:
+- When in verbose mode it will act the same as the default callback
+author:
+- Dag Wieers (@dagwieers)
+requirements:
+- set as stdout in configuration
+'''
+
+HAS_OD = False
+try:
+ from collections import OrderedDict
+ HAS_OD = True
+except ImportError:
+ pass
+
+from ansible.module_utils.six import binary_type, text_type
+from ansible.module_utils.common._collections_compat import MutableMapping, MutableSequence
+from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
+from ansible.utils.color import colorize, hostcolor
+from ansible.utils.display import Display
+
+import sys
+
+display = Display()
+
+
+# Design goals:
+#
+# + On screen there should only be relevant stuff
+# - How far are we ? (during run, last line)
+# - What issues occurred
+# - What changes occurred
+# - Diff output (in diff-mode)
+#
+# + If verbosity increases, act as default output
+# So that users can easily switch to default for troubleshooting
+#
+# + Rewrite the output during processing
+# - We use the cursor to indicate where in the task we are.
+# Output after the prompt is the output of the previous task.
+# - If we would clear the line at the start of a task, there would often
+# be no information at all, so we leave it until it gets updated
+#
+# + Use the same color-conventions of Ansible
+#
+# + Ensure the verbose output (-v) is also dense.
+# Remove information that is not essential (eg. timestamps, status)
+
+
+# TODO:
+#
+# + Properly test for terminal capabilities, and fall back to default
+# + Modify Ansible mechanism so we don't need to use sys.stdout directly
+# + Find an elegant solution for progress bar line wrapping
+
+
+# FIXME: Importing constants as C simply does not work, beats me :-/
+# from ansible import constants as C
+class C:
+ COLOR_HIGHLIGHT = 'white'
+ COLOR_VERBOSE = 'blue'
+ COLOR_WARN = 'bright purple'
+ COLOR_ERROR = 'red'
+ COLOR_DEBUG = 'dark gray'
+ COLOR_DEPRECATE = 'purple'
+ COLOR_SKIP = 'cyan'
+ COLOR_UNREACHABLE = 'bright red'
+ COLOR_OK = 'green'
+ COLOR_CHANGED = 'yellow'
+
+
+# Taken from Dstat
+class vt100:
+ black = '\033[0;30m'
+ darkred = '\033[0;31m'
+ darkgreen = '\033[0;32m'
+ darkyellow = '\033[0;33m'
+ darkblue = '\033[0;34m'
+ darkmagenta = '\033[0;35m'
+ darkcyan = '\033[0;36m'
+ gray = '\033[0;37m'
+
+ darkgray = '\033[1;30m'
+ red = '\033[1;31m'
+ green = '\033[1;32m'
+ yellow = '\033[1;33m'
+ blue = '\033[1;34m'
+ magenta = '\033[1;35m'
+ cyan = '\033[1;36m'
+ white = '\033[1;37m'
+
+ blackbg = '\033[40m'
+ redbg = '\033[41m'
+ greenbg = '\033[42m'
+ yellowbg = '\033[43m'
+ bluebg = '\033[44m'
+ magentabg = '\033[45m'
+ cyanbg = '\033[46m'
+ whitebg = '\033[47m'
+
+ reset = '\033[0;0m'
+ bold = '\033[1m'
+ reverse = '\033[2m'
+ underline = '\033[4m'
+
+ clear = '\033[2J'
+# clearline = '\033[K'
+ clearline = '\033[2K'
+ save = '\033[s'
+ restore = '\033[u'
+ save_all = '\0337'
+ restore_all = '\0338'
+ linewrap = '\033[7h'
+ nolinewrap = '\033[7l'
+
+ up = '\033[1A'
+ down = '\033[1B'
+ right = '\033[1C'
+ left = '\033[1D'
+
+
+colors = dict(
+ ok=vt100.darkgreen,
+ changed=vt100.darkyellow,
+ skipped=vt100.darkcyan,
+ ignored=vt100.cyanbg + vt100.red,
+ failed=vt100.darkred,
+ unreachable=vt100.red,
+)
+
+states = ('skipped', 'ok', 'changed', 'failed', 'unreachable')
+
+
+class CallbackModule(CallbackModule_default):
+
+ '''
+ This is the dense callback interface, where screen estate is still valued.
+ '''
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'dense'
+
+ def __init__(self):
+
+ # From CallbackModule
+ self._display = display
+
+ if HAS_OD:
+
+ self.disabled = False
+ self.super_ref = super(CallbackModule, self)
+ self.super_ref.__init__()
+
+ # Attributes to remove from results for more density
+ self.removed_attributes = (
+ # 'changed',
+ 'delta',
+ # 'diff',
+ 'end',
+ 'failed',
+ 'failed_when_result',
+ 'invocation',
+ 'start',
+ 'stdout_lines',
+ )
+
+ # Initiate data structures
+ self.hosts = OrderedDict()
+ self.keep = False
+ self.shown_title = False
+ self.count = dict(play=0, handler=0, task=0)
+ self.type = 'foo'
+
+ # Start immediately on the first line
+ sys.stdout.write(vt100.reset + vt100.save + vt100.clearline)
+ sys.stdout.flush()
+ else:
+ display.warning("The 'dense' callback plugin requires OrderedDict which is not available in this version of python, disabling.")
+ self.disabled = True
+
+ def __del__(self):
+ sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
+
+ def _add_host(self, result, status):
+ name = result._host.get_name()
+
+ # Add a new status in case a failed task is ignored
+ if status == 'failed' and result._task.ignore_errors:
+ status = 'ignored'
+
+ # Check if we have to update an existing state (when looping over items)
+ if name not in self.hosts:
+ self.hosts[name] = dict(state=status)
+ elif states.index(self.hosts[name]['state']) < states.index(status):
+ self.hosts[name]['state'] = status
+
+ # Store delegated hostname, if needed
+ delegated_vars = result._result.get('_ansible_delegated_vars', None)
+ if delegated_vars:
+ self.hosts[name]['delegate'] = delegated_vars['ansible_host']
+
+ # Print progress bar
+ self._display_progress(result)
+
+# # Ensure that tasks with changes/failures stay on-screen, and during diff-mode
+# if status in ['changed', 'failed', 'unreachable'] or (result.get('_diff_mode', False) and result._resultget('diff', False)):
+ # Ensure that tasks with changes/failures stay on-screen
+ if status in ['changed', 'failed', 'unreachable']:
+ self.keep = True
+
+ if self._display.verbosity == 1:
+ # Print task title, if needed
+ self._display_task_banner()
+ self._display_results(result, status)
+
+ def _clean_results(self, result):
+ # Remove non-essential attributes
+ for attr in self.removed_attributes:
+ if attr in result:
+ del(result[attr])
+
+ # Remove empty attributes (list, dict, str)
+ for attr in result.copy():
+ if isinstance(result[attr], (MutableSequence, MutableMapping, binary_type, text_type)):
+ if not result[attr]:
+ del(result[attr])
+
+ def _handle_exceptions(self, result):
+ if 'exception' in result:
+ # Remove the exception from the result so it's not shown every time
+ del result['exception']
+
+ if self._display.verbosity == 1:
+ return "An exception occurred during task execution. To see the full traceback, use -vvv."
+
+ def _display_progress(self, result=None):
+ # Always rewrite the complete line
+ sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.nolinewrap + vt100.underline)
+ sys.stdout.write('%s %d:' % (self.type, self.count[self.type]))
+ sys.stdout.write(vt100.reset)
+ sys.stdout.flush()
+
+ # Print out each host in its own status-color
+ for name in self.hosts:
+ sys.stdout.write(' ')
+ if self.hosts[name].get('delegate', None):
+ sys.stdout.write(self.hosts[name]['delegate'] + '>')
+ sys.stdout.write(colors[self.hosts[name]['state']] + name + vt100.reset)
+ sys.stdout.flush()
+
+# if result._result.get('diff', False):
+# sys.stdout.write('\n' + vt100.linewrap)
+ sys.stdout.write(vt100.linewrap)
+
+# self.keep = True
+
+ def _display_task_banner(self):
+ if not self.shown_title:
+ self.shown_title = True
+ sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.underline)
+ sys.stdout.write('%s %d: %s' % (self.type, self.count[self.type], self.task.get_name().strip()))
+ sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
+ sys.stdout.flush()
+ else:
+ sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline)
+ self.keep = False
+
+ def _display_results(self, result, status):
+ # Leave the previous task on screen (as it has changes/errors)
+ if self._display.verbosity == 0 and self.keep:
+ sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
+ else:
+ sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline)
+ self.keep = False
+
+ self._clean_results(result._result)
+
+ dump = ''
+ if result._task.action == 'include':
+ return
+ elif status == 'ok':
+ return
+ elif status == 'ignored':
+ dump = self._handle_exceptions(result._result)
+ elif status == 'failed':
+ dump = self._handle_exceptions(result._result)
+ elif status == 'unreachable':
+ dump = result._result['msg']
+
+ if not dump:
+ dump = self._dump_results(result._result)
+
+ if result._task.loop and 'results' in result._result:
+ self._process_items(result)
+ else:
+ sys.stdout.write(colors[status] + status + ': ')
+
+ delegated_vars = result._result.get('_ansible_delegated_vars', None)
+ if delegated_vars:
+ sys.stdout.write(vt100.reset + result._host.get_name() + '>' + colors[status] + delegated_vars['ansible_host'])
+ else:
+ sys.stdout.write(result._host.get_name())
+
+ sys.stdout.write(': ' + dump + '\n')
+ sys.stdout.write(vt100.reset + vt100.save + vt100.clearline)
+ sys.stdout.flush()
+
+ if status == 'changed':
+ self._handle_warnings(result._result)
+
+ def v2_playbook_on_play_start(self, play):
+ # Leave the previous task on screen (as it has changes/errors)
+ if self._display.verbosity == 0 and self.keep:
+ sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline + vt100.bold)
+ else:
+ sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.bold)
+
+ # Reset at the start of each play
+ self.keep = False
+ self.count.update(dict(handler=0, task=0))
+ self.count['play'] += 1
+ self.play = play
+
+ # Write the next play on screen IN UPPERCASE, and make it permanent
+ name = play.get_name().strip()
+ if not name:
+ name = 'unnamed'
+ sys.stdout.write('PLAY %d: %s' % (self.count['play'], name.upper()))
+ sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
+ sys.stdout.flush()
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ # Leave the previous task on screen (as it has changes/errors)
+ if self._display.verbosity == 0 and self.keep:
+ sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline + vt100.underline)
+ else:
+ # Do not clear line, since we want to retain the previous output
+ sys.stdout.write(vt100.restore + vt100.reset + vt100.underline)
+
+ # Reset at the start of each task
+ self.keep = False
+ self.shown_title = False
+ self.hosts = OrderedDict()
+ self.task = task
+ self.type = 'task'
+
+ # Enumerate task if not setup (task names are too long for dense output)
+ if task.get_name() != 'setup':
+ self.count['task'] += 1
+
+ # Write the next task on screen (behind the prompt is the previous output)
+ sys.stdout.write('%s %d.' % (self.type, self.count[self.type]))
+ sys.stdout.write(vt100.reset)
+ sys.stdout.flush()
+
+ def v2_playbook_on_handler_task_start(self, task):
+ # Leave the previous task on screen (as it has changes/errors)
+ if self._display.verbosity == 0 and self.keep:
+ sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline + vt100.underline)
+ else:
+ sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.underline)
+
+ # Reset at the start of each handler
+ self.keep = False
+ self.shown_title = False
+ self.hosts = OrderedDict()
+ self.task = task
+ self.type = 'handler'
+
+ # Enumerate handler if not setup (handler names may be too long for dense output)
+ if task.get_name() != 'setup':
+ self.count[self.type] += 1
+
+ # Write the next task on screen (behind the prompt is the previous output)
+ sys.stdout.write('%s %d.' % (self.type, self.count[self.type]))
+ sys.stdout.write(vt100.reset)
+ sys.stdout.flush()
+
+ def v2_playbook_on_cleanup_task_start(self, task):
+ # TBD
+ sys.stdout.write('cleanup.')
+ sys.stdout.flush()
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+ self._add_host(result, 'failed')
+
+ def v2_runner_on_ok(self, result):
+ if result._result.get('changed', False):
+ self._add_host(result, 'changed')
+ else:
+ self._add_host(result, 'ok')
+
+ def v2_runner_on_skipped(self, result):
+ self._add_host(result, 'skipped')
+
+ def v2_runner_on_unreachable(self, result):
+ self._add_host(result, 'unreachable')
+
+ def v2_runner_on_include(self, included_file):
+ pass
+
+ def v2_runner_on_file_diff(self, result, diff):
+ sys.stdout.write(vt100.bold)
+ self.super_ref.v2_runner_on_file_diff(result, diff)
+ sys.stdout.write(vt100.reset)
+
+ def v2_on_file_diff(self, result):
+ sys.stdout.write(vt100.bold)
+ self.super_ref.v2_on_file_diff(result)
+ sys.stdout.write(vt100.reset)
+
+ # Old definition in v2.0
+ def v2_playbook_item_on_ok(self, result):
+ self.v2_runner_item_on_ok(result)
+
+ def v2_runner_item_on_ok(self, result):
+ if result._result.get('changed', False):
+ self._add_host(result, 'changed')
+ else:
+ self._add_host(result, 'ok')
+
+ # Old definition in v2.0
+ def v2_playbook_item_on_failed(self, result):
+ self.v2_runner_item_on_failed(result)
+
+ def v2_runner_item_on_failed(self, result):
+ self._add_host(result, 'failed')
+
+ # Old definition in v2.0
+ def v2_playbook_item_on_skipped(self, result):
+ self.v2_runner_item_on_skipped(result)
+
+ def v2_runner_item_on_skipped(self, result):
+ self._add_host(result, 'skipped')
+
+ def v2_playbook_on_no_hosts_remaining(self):
+ if self._display.verbosity == 0 and self.keep:
+ sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
+ else:
+ sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline)
+ self.keep = False
+
+ sys.stdout.write(vt100.white + vt100.redbg + 'NO MORE HOSTS LEFT')
+ sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
+ sys.stdout.flush()
+
+ def v2_playbook_on_include(self, included_file):
+ pass
+
+ def v2_playbook_on_stats(self, stats):
+ if self._display.verbosity == 0 and self.keep:
+ sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
+ else:
+ sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline)
+
+ # In normal mode screen output should be sufficient, summary is redundant
+ if self._display.verbosity == 0:
+ return
+
+ sys.stdout.write(vt100.bold + vt100.underline)
+ sys.stdout.write('SUMMARY')
+
+ sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
+ sys.stdout.flush()
+
+ hosts = sorted(stats.processed.keys())
+ for h in hosts:
+ t = stats.summarize(h)
+ self._display.display(
+ u"%s : %s %s %s %s %s %s" % (
+ hostcolor(h, t),
+ colorize(u'ok', t['ok'], C.COLOR_OK),
+ colorize(u'changed', t['changed'], C.COLOR_CHANGED),
+ colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE),
+ colorize(u'failed', t['failures'], C.COLOR_ERROR),
+ colorize(u'rescued', t['rescued'], C.COLOR_OK),
+ colorize(u'ignored', t['ignored'], C.COLOR_WARN),
+ ),
+ screen_only=True
+ )
+
+
+# When using -vv or higher, simply do the default action
+if display.verbosity >= 2 or not HAS_OD:
+ CallbackModule = CallbackModule_default
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/diy.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/diy.py
new file mode 100644
index 00000000..d24c9145
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/diy.py
@@ -0,0 +1,1420 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Trevor Highfill <trevor.highfill@outlook.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+ callback: diy
+ type: stdout
+ short_description: Customize the output
+ version_added: 0.2.0
+ description:
+ - Callback plugin that allows you to supply your own custom callback templates to be output.
+ author: Trevor Highfill (@theque5t)
+ extends_documentation_fragment:
+ - default_callback
+ notes:
+ - Uses the C(default) callback plugin output when a custom callback message(C(msg)) is not provided.
+ - Makes the callback event data available via the C(ansible_callback_diy) dictionary, which can be used in the templating context for the options.
+ The dictionary is only available in the templating context for the options. It is not a variable that is available via the other
+ various execution contexts, such as playbook, play, task etc.
+ - Options being set by their respective variable input can only be set using the variable if the variable was set in a context that is available to the
+ respective callback.
+ Use the C(ansible_callback_diy) dictionary to see what is available to a callback. Additionally, C(ansible_callback_diy.top_level_var_names) will output
+ the top level variable names available to the callback.
+ - Each option value is rendered as a template before being evaluated. This allows for the dynamic usage of an option. For example,
+ C("{{ 'yellow' if ansible_callback_diy.result.is_changed else 'bright green' }}")
+ - "**Condition** for all C(msg) options:
+ if value C(is None or omit),
+ then the option is not being used.
+ **Effect**: use of the C(default) callback plugin for output"
+ - "**Condition** for all C(msg) options:
+ if value C(is not None and not omit and length is not greater than 0),
+ then the option is being used without output.
+ **Effect**: suppress output"
+ - "**Condition** for all C(msg) options:
+ if value C(is not None and not omit and length is greater than 0),
+ then the option is being used with output.
+ **Effect**: render value as template and output"
+ - "Valid color values: C(black), C(bright gray), C(blue), C(white), C(green), C(bright blue), C(cyan), C(bright green), C(red), C(bright cyan),
+ C(purple), C(bright red), C(yellow), C(bright purple), C(dark gray), C(bright yellow), C(magenta), C(bright magenta), C(normal)"
+ seealso:
+ - name: default – default Ansible screen output
+ description: The official documentation on the B(default) callback plugin.
+ link: https://docs.ansible.com/ansible/latest/plugins/callback/default.html
+ requirements:
+ - set as stdout_callback in configuration
+ options:
+ on_any_msg:
+ description: Output to be used for callback on_any.
+ ini:
+ - section: callback_diy
+ key: on_any_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_ON_ANY_MSG
+ vars:
+ - name: ansible_callback_diy_on_any_msg
+ type: str
+
+ on_any_msg_color:
+ description:
+ - Output color to be used for I(on_any_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: on_any_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_ON_ANY_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_on_any_msg_color
+ type: str
+
+ runner_on_failed_msg:
+ description: Output to be used for callback runner_on_failed.
+ ini:
+ - section: callback_diy
+ key: runner_on_failed_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_FAILED_MSG
+ vars:
+ - name: ansible_callback_diy_runner_on_failed_msg
+ type: str
+
+ runner_on_failed_msg_color:
+ description:
+ - Output color to be used for I(runner_on_failed_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: runner_on_failed_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_FAILED_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_runner_on_failed_msg_color
+ type: str
+
+ runner_on_ok_msg:
+ description: Output to be used for callback runner_on_ok.
+ ini:
+ - section: callback_diy
+ key: runner_on_ok_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_OK_MSG
+ vars:
+ - name: ansible_callback_diy_runner_on_ok_msg
+ type: str
+
+ runner_on_ok_msg_color:
+ description:
+ - Output color to be used for I(runner_on_ok_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: runner_on_ok_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_OK_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_runner_on_ok_msg_color
+ type: str
+
+ runner_on_skipped_msg:
+ description: Output to be used for callback runner_on_skipped.
+ ini:
+ - section: callback_diy
+ key: runner_on_skipped_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_SKIPPED_MSG
+ vars:
+ - name: ansible_callback_diy_runner_on_skipped_msg
+ type: str
+
+ runner_on_skipped_msg_color:
+ description:
+ - Output color to be used for I(runner_on_skipped_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: runner_on_skipped_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_SKIPPED_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_runner_on_skipped_msg_color
+ type: str
+
+ runner_on_unreachable_msg:
+ description: Output to be used for callback runner_on_unreachable.
+ ini:
+ - section: callback_diy
+ key: runner_on_unreachable_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_UNREACHABLE_MSG
+ vars:
+ - name: ansible_callback_diy_runner_on_unreachable_msg
+ type: str
+
+ runner_on_unreachable_msg_color:
+ description:
+ - Output color to be used for I(runner_on_unreachable_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: runner_on_unreachable_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_UNREACHABLE_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_runner_on_unreachable_msg_color
+ type: str
+
+ playbook_on_start_msg:
+ description: Output to be used for callback playbook_on_start.
+ ini:
+ - section: callback_diy
+ key: playbook_on_start_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_START_MSG
+ vars:
+ - name: ansible_callback_diy_playbook_on_start_msg
+ type: str
+
+ playbook_on_start_msg_color:
+ description:
+ - Output color to be used for I(playbook_on_start_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: playbook_on_start_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_START_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_playbook_on_start_msg_color
+ type: str
+
+ playbook_on_notify_msg:
+ description: Output to be used for callback playbook_on_notify.
+ ini:
+ - section: callback_diy
+ key: playbook_on_notify_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NOTIFY_MSG
+ vars:
+ - name: ansible_callback_diy_playbook_on_notify_msg
+ type: str
+
+ playbook_on_notify_msg_color:
+ description:
+ - Output color to be used for I(playbook_on_notify_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: playbook_on_notify_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NOTIFY_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_playbook_on_notify_msg_color
+ type: str
+
+ playbook_on_no_hosts_matched_msg:
+ description: Output to be used for callback playbook_on_no_hosts_matched.
+ ini:
+ - section: callback_diy
+ key: playbook_on_no_hosts_matched_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_MATCHED_MSG
+ vars:
+ - name: ansible_callback_diy_playbook_on_no_hosts_matched_msg
+ type: str
+
+ playbook_on_no_hosts_matched_msg_color:
+ description:
+ - Output color to be used for I(playbook_on_no_hosts_matched_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: playbook_on_no_hosts_matched_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_MATCHED_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_playbook_on_no_hosts_matched_msg_color
+ type: str
+
+ playbook_on_no_hosts_remaining_msg:
+ description: Output to be used for callback playbook_on_no_hosts_remaining.
+ ini:
+ - section: callback_diy
+ key: playbook_on_no_hosts_remaining_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_REMAINING_MSG
+ vars:
+ - name: ansible_callback_diy_playbook_on_no_hosts_remaining_msg
+ type: str
+
+ playbook_on_no_hosts_remaining_msg_color:
+ description:
+ - Output color to be used for I(playbook_on_no_hosts_remaining_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: playbook_on_no_hosts_remaining_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_REMAINING_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_playbook_on_no_hosts_remaining_msg_color
+ type: str
+
+ playbook_on_task_start_msg:
+ description: Output to be used for callback playbook_on_task_start.
+ ini:
+ - section: callback_diy
+ key: playbook_on_task_start_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_TASK_START_MSG
+ vars:
+ - name: ansible_callback_diy_playbook_on_task_start_msg
+ type: str
+
+ playbook_on_task_start_msg_color:
+ description:
+ - Output color to be used for I(playbook_on_task_start_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: playbook_on_task_start_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_TASK_START_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_playbook_on_task_start_msg_color
+ type: str
+
+ playbook_on_handler_task_start_msg:
+ description: Output to be used for callback playbook_on_handler_task_start.
+ ini:
+ - section: callback_diy
+ key: playbook_on_handler_task_start_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_HANDLER_TASK_START_MSG
+ vars:
+ - name: ansible_callback_diy_playbook_on_handler_task_start_msg
+ type: str
+
+ playbook_on_handler_task_start_msg_color:
+ description:
+ - Output color to be used for I(playbook_on_handler_task_start_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: playbook_on_handler_task_start_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_HANDLER_TASK_START_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_playbook_on_handler_task_start_msg_color
+ type: str
+
+ playbook_on_vars_prompt_msg:
+ description: Output to be used for callback playbook_on_vars_prompt.
+ ini:
+ - section: callback_diy
+ key: playbook_on_vars_prompt_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_VARS_PROMPT_MSG
+ vars:
+ - name: ansible_callback_diy_playbook_on_vars_prompt_msg
+ type: str
+
+ playbook_on_vars_prompt_msg_color:
+ description:
+ - Output color to be used for I(playbook_on_vars_prompt_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: playbook_on_vars_prompt_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_VARS_PROMPT_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_playbook_on_vars_prompt_msg_color
+ type: str
+
+ playbook_on_play_start_msg:
+ description: Output to be used for callback playbook_on_play_start.
+ ini:
+ - section: callback_diy
+ key: playbook_on_play_start_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_PLAY_START_MSG
+ vars:
+ - name: ansible_callback_diy_playbook_on_play_start_msg
+ type: str
+
+ playbook_on_play_start_msg_color:
+ description:
+ - Output color to be used for I(playbook_on_play_start_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: playbook_on_play_start_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_PLAY_START_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_playbook_on_play_start_msg_color
+ type: str
+
+ playbook_on_stats_msg:
+ description: Output to be used for callback playbook_on_stats.
+ ini:
+ - section: callback_diy
+ key: playbook_on_stats_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_STATS_MSG
+ vars:
+ - name: ansible_callback_diy_playbook_on_stats_msg
+ type: str
+
+ playbook_on_stats_msg_color:
+ description:
+ - Output color to be used for I(playbook_on_stats_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: playbook_on_stats_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_STATS_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_playbook_on_stats_msg_color
+ type: str
+
+ on_file_diff_msg:
+ description: Output to be used for callback on_file_diff.
+ ini:
+ - section: callback_diy
+ key: on_file_diff_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_ON_FILE_DIFF_MSG
+ vars:
+ - name: ansible_callback_diy_on_file_diff_msg
+ type: str
+
+ on_file_diff_msg_color:
+ description:
+ - Output color to be used for I(on_file_diff_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: on_file_diff_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_ON_FILE_DIFF_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_on_file_diff_msg_color
+ type: str
+
+ playbook_on_include_msg:
+ description: Output to be used for callback playbook_on_include.
+ ini:
+ - section: callback_diy
+ key: playbook_on_include_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_INCLUDE_MSG
+ vars:
+ - name: ansible_callback_diy_playbook_on_include_msg
+ type: str
+
+ playbook_on_include_msg_color:
+ description:
+ - Output color to be used for I(playbook_on_include_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: playbook_on_include_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_INCLUDE_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_playbook_on_include_msg_color
+ type: str
+
+ runner_item_on_ok_msg:
+ description: Output to be used for callback runner_item_on_ok.
+ ini:
+ - section: callback_diy
+ key: runner_item_on_ok_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_OK_MSG
+ vars:
+ - name: ansible_callback_diy_runner_item_on_ok_msg
+ type: str
+
+ runner_item_on_ok_msg_color:
+ description:
+ - Output color to be used for I(runner_item_on_ok_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: runner_item_on_ok_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_OK_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_runner_item_on_ok_msg_color
+ type: str
+
+ runner_item_on_failed_msg:
+ description: Output to be used for callback runner_item_on_failed.
+ ini:
+ - section: callback_diy
+ key: runner_item_on_failed_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_FAILED_MSG
+ vars:
+ - name: ansible_callback_diy_runner_item_on_failed_msg
+ type: str
+
+ runner_item_on_failed_msg_color:
+ description:
+ - Output color to be used for I(runner_item_on_failed_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: runner_item_on_failed_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_FAILED_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_runner_item_on_failed_msg_color
+ type: str
+
+ runner_item_on_skipped_msg:
+ description: Output to be used for callback runner_item_on_skipped.
+ ini:
+ - section: callback_diy
+ key: runner_item_on_skipped_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_SKIPPED_MSG
+ vars:
+ - name: ansible_callback_diy_runner_item_on_skipped_msg
+ type: str
+
+ runner_item_on_skipped_msg_color:
+ description:
+ - Output color to be used for I(runner_item_on_skipped_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: runner_item_on_skipped_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_SKIPPED_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_runner_item_on_skipped_msg_color
+ type: str
+
+ runner_retry_msg:
+ description: Output to be used for callback runner_retry.
+ ini:
+ - section: callback_diy
+ key: runner_retry_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_RETRY_MSG
+ vars:
+ - name: ansible_callback_diy_runner_retry_msg
+ type: str
+
+ runner_retry_msg_color:
+ description:
+ - Output color to be used for I(runner_retry_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: runner_retry_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_RETRY_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_runner_retry_msg_color
+ type: str
+
+ runner_on_start_msg:
+ description: Output to be used for callback runner_on_start.
+ ini:
+ - section: callback_diy
+ key: runner_on_start_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_START_MSG
+ vars:
+ - name: ansible_callback_diy_runner_on_start_msg
+ type: str
+
+ runner_on_start_msg_color:
+ description:
+ - Output color to be used for I(runner_on_start_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: runner_on_start_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_START_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_runner_on_start_msg_color
+ type: str
+
+ runner_on_no_hosts_msg:
+ description: Output to be used for callback runner_on_no_hosts.
+ ini:
+ - section: callback_diy
+ key: runner_on_no_hosts_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_NO_HOSTS_MSG
+ vars:
+ - name: ansible_callback_diy_runner_on_no_hosts_msg
+ type: str
+
+ runner_on_no_hosts_msg_color:
+ description:
+ - Output color to be used for I(runner_on_no_hosts_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: runner_on_no_hosts_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_NO_HOSTS_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_runner_on_no_hosts_msg_color
+ type: str
+
+ playbook_on_setup_msg:
+ description: Output to be used for callback playbook_on_setup.
+ ini:
+ - section: callback_diy
+ key: playbook_on_setup_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_SETUP_MSG
+ vars:
+ - name: ansible_callback_diy_playbook_on_setup_msg
+ type: str
+
+ playbook_on_setup_msg_color:
+ description:
+ - Output color to be used for I(playbook_on_setup_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: playbook_on_setup_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_SETUP_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_playbook_on_setup_msg_color
+ type: str
+'''
+
+EXAMPLES = r'''
+ansible.cfg: >
+ # Enable plugin
+ [defaults]
+ stdout_callback=community.general.diy
+
+ [callback_diy]
+ # Output when playbook starts
+ playbook_on_start_msg="DIY output(via ansible.cfg): playbook example: {{ ansible_callback_diy.playbook.file_name }}"
+ playbook_on_start_msg_color=yellow
+
+ # Comment out to allow default plugin output
+ # playbook_on_play_start_msg="PLAY: starting play {{ ansible_callback_diy.play.name }}"
+
+ # Accept on_skipped_msg or ansible_callback_diy_runner_on_skipped_msg as input vars
+ # If neither are supplied, omit the option
+ runner_on_skipped_msg="{{ on_skipped_msg | default(ansible_callback_diy_runner_on_skipped_msg) | default(omit) }}"
+
+ # Newline after every callback
+ # on_any_msg='{{ " " | join("\n") }}'
+
+playbook.yml: >
+ ---
+ - name: "Default plugin output: play example"
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: Default plugin output
+ ansible.builtin.debug:
+ msg: default plugin output
+
+ - name: Override from play vars
+ hosts: localhost
+ gather_facts: no
+ vars:
+ ansible_connection: local
+ green: "\e[0m\e[38;5;82m"
+ yellow: "\e[0m\e[38;5;11m"
+ bright_purple: "\e[0m\e[38;5;105m"
+ cyan: "\e[0m\e[38;5;51m"
+ green_bg_black_fg: "\e[0m\e[48;5;40m\e[38;5;232m"
+ yellow_bg_black_fg: "\e[0m\e[48;5;226m\e[38;5;232m"
+ purple_bg_white_fg: "\e[0m\e[48;5;57m\e[38;5;255m"
+ cyan_bg_black_fg: "\e[0m\e[48;5;87m\e[38;5;232m"
+ magenta: "\e[38;5;198m"
+ white: "\e[0m\e[38;5;255m"
+ ansible_callback_diy_playbook_on_play_start_msg: "\n{{green}}DIY output(via play vars): play example: {{magenta}}{{ansible_callback_diy.play.name}}\n\n"
+ ansible_callback_diy_playbook_on_task_start_msg: "DIY output(via play vars): task example: {{ ansible_callback_diy.task.name }}"
+ ansible_callback_diy_playbook_on_task_start_msg_color: cyan
+ ansible_callback_diy_playbook_on_stats_msg: |+2
+ CUSTOM STATS
+ ==============================
+ {% for key in ansible_callback_diy.stats | sort %}
+ {% if ansible_callback_diy.stats[key] %}
+ {% if key == 'ok' %}
+ {% set color_one = lookup('vars','green_bg_black_fg') %}
+ {% set prefix = ' ' %}
+ {% set suffix = ' ' %}
+ {% set color_two = lookup('vars','green') %}
+ {% elif key == 'changed' %}
+ {% set color_one = lookup('vars','yellow_bg_black_fg') %}
+ {% set prefix = ' ' %}
+ {% set suffix = ' ' %}
+ {% set color_two = lookup('vars','yellow') %}
+ {% elif key == 'processed' %}
+ {% set color_one = lookup('vars','purple_bg_white_fg') %}
+ {% set prefix = ' ' %}
+ {% set suffix = ' ' %}
+ {% set color_two = lookup('vars','bright_purple') %}
+ {% elif key == 'skipped' %}
+ {% set color_one = lookup('vars','cyan_bg_black_fg') %}
+ {% set prefix = ' ' %}
+ {% set suffix = ' ' %}
+ {% set color_two = lookup('vars','cyan') %}
+ {% else %}
+ {% set color_one = "" %}
+ {% set prefix = "" %}
+ {% set suffix = "" %}
+ {% set color_two = "" %}
+ {% endif %}
+ {{ color_one }}{{ "%s%s%s" | format(prefix,key,suffix) }}{{ color_two }}: {{ ansible_callback_diy.stats[key] | to_nice_yaml }}
+ {% endif %}
+ {% endfor %}
+
+ tasks:
+ - name: Custom banner with default plugin result output
+ ansible.builtin.debug:
+ msg: "default plugin output: result example"
+
+ - name: Override from task vars
+ ansible.builtin.debug:
+ msg: "example {{ two }}"
+ changed_when: true
+ vars:
+ white_fg_red_bg: "\e[0m\e[48;5;1m"
+ two: "{{ white_fg_red_bg }} 2 "
+ ansible_callback_diy_playbook_on_task_start_msg: "\nDIY output(via task vars): task example: {{ ansible_callback_diy.task.name }}"
+ ansible_callback_diy_playbook_on_task_start_msg_color: bright magenta
+ ansible_callback_diy_runner_on_ok_msg: "DIY output(via task vars): result example: \n{{ ansible_callback_diy.result.output.msg }}\n"
+ ansible_callback_diy_runner_on_ok_msg_color: "{{ 'yellow' if ansible_callback_diy.result.is_changed else 'bright green' }}"
+
+ - name: Suppress output
+ ansible.builtin.debug:
+ msg: i should not be displayed
+ vars:
+ ansible_callback_diy_playbook_on_task_start_msg: ""
+ ansible_callback_diy_runner_on_ok_msg: ""
+
+ - name: Using alias vars (see ansible.cfg)
+ ansible.builtin.debug:
+ msg:
+ when: False
+ vars:
+ ansible_callback_diy_playbook_on_task_start_msg: ""
+ on_skipped_msg: "DIY output(via task vars): skipped example:\n\e[0m\e[38;5;4m\u25b6\u25b6 {{ ansible_callback_diy.result.task.name }}\n"
+ on_skipped_msg_color: white
+
+ - name: Just stdout
+ ansible.builtin.command: echo some stdout
+ vars:
+ ansible_callback_diy_playbook_on_task_start_msg: "\n"
+ ansible_callback_diy_runner_on_ok_msg: "{{ ansible_callback_diy.result.output.stdout }}\n"
+
+ - name: Multiline output
+ ansible.builtin.debug:
+ msg: "{{ multiline }}"
+ vars:
+ ansible_callback_diy_playbook_on_task_start_msg: "\nDIY output(via task vars): task example: {{ ansible_callback_diy.task.name }}"
+ multiline: "line\nline\nline"
+ ansible_callback_diy_runner_on_ok_msg: |+2
+ some
+ {{ ansible_callback_diy.result.output.msg }}
+ output
+
+ ansible_callback_diy_playbook_on_task_start_msg_color: bright blue
+
+ - name: Indentation
+ ansible.builtin.debug:
+ msg: "{{ item.msg }}"
+ with_items:
+ - { indent: 1, msg: one., color: red }
+ - { indent: 2, msg: two.., color: yellow }
+ - { indent: 3, msg: three..., color: bright yellow }
+ vars:
+ ansible_callback_diy_runner_item_on_ok_msg: "{{ ansible_callback_diy.result.output.msg | indent(item.indent, True) }}"
+ ansible_callback_diy_runner_item_on_ok_msg_color: "{{ item.color }}"
+ ansible_callback_diy_runner_on_ok_msg: "GO!!!"
+ ansible_callback_diy_runner_on_ok_msg_color: bright green
+
+ - name: Using lookup and template as file
+ ansible.builtin.shell: "echo {% raw %}'output from {{ file_name }}'{% endraw %} > {{ file_name }}"
+ vars:
+ ansible_callback_diy_playbook_on_task_start_msg: "\nDIY output(via task vars): task example: {{ ansible_callback_diy.task.name }}"
+ file_name: diy_file_template_example
+ ansible_callback_diy_runner_on_ok_msg: "{{ lookup('template', file_name) }}"
+
+ - name: 'Look at top level vars available to the "runner_on_ok" callback'
+ ansible.builtin.debug:
+ msg: ''
+ vars:
+ ansible_callback_diy_playbook_on_task_start_msg: "\nDIY output(via task vars): task example: {{ ansible_callback_diy.task.name }}"
+ ansible_callback_diy_runner_on_ok_msg: |+2
+ {% for var in (ansible_callback_diy.top_level_var_names|reject('match','vars|ansible_callback_diy.*')) | sort %}
+ {{ green }}{{ var }}:
+ {{ white }}{{ lookup('vars', var) }}
+
+ {% endfor %}
+ ansible_callback_diy_runner_on_ok_msg_color: white
+
+ - name: 'Look at event data available to the "runner_on_ok" callback'
+ ansible.builtin.debug:
+ msg: ''
+ vars:
+ ansible_callback_diy_playbook_on_task_start_msg: "\nDIY output(via task vars): task example: {{ ansible_callback_diy.task.name }}"
+ ansible_callback_diy_runner_on_ok_msg: |+2
+ {% for key in ansible_callback_diy | sort %}
+ {{ green }}{{ key }}:
+ {{ white }}{{ ansible_callback_diy[key] }}
+
+ {% endfor %}
+'''
+
+import sys
+from contextlib import contextmanager
+from ansible import constants as C
+from ansible.playbook.task_include import TaskInclude
+from ansible.plugins.callback import CallbackBase
+from ansible.utils.color import colorize, hostcolor
+from ansible.template import Templar
+from ansible.vars.manager import VariableManager
+from ansible.plugins.callback.default import CallbackModule as Default
+from ansible.module_utils._text import to_text
+
+
+class DummyStdout(object):
+ def flush(self):
+ pass
+
+ def write(self, b):
+ pass
+
+ def writelines(self, l):
+ pass
+
+
+class CallbackModule(Default):
+ """
+ Callback plugin that allows you to supply your own custom callback templates to be output.
+ """
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'community.general.diy'
+
+ DIY_NS = 'ansible_callback_diy'
+
+ @contextmanager
+ def _suppress_stdout(self, enabled):
+ saved_stdout = sys.stdout
+ if enabled:
+ sys.stdout = DummyStdout()
+ yield
+ sys.stdout = saved_stdout
+
+ def _get_output_specification(self, loader, variables):
+ _ret = {}
+ _calling_method = sys._getframe(1).f_code.co_name
+ _callback_type = (_calling_method[3:] if _calling_method[:3] == "v2_" else _calling_method)
+ _callback_options = ['msg', 'msg_color']
+
+ for option in _callback_options:
+ _option_name = '%s_%s' % (_callback_type, option)
+ _option_template = variables.get(
+ self.DIY_NS + "_" + _option_name,
+ self.get_option(_option_name)
+ )
+ _ret.update({option: self._template(
+ loader=loader,
+ template=_option_template,
+ variables=variables
+ )})
+
+ _ret.update({'vars': variables})
+
+ return _ret
+
+ def _using_diy(self, spec):
+ return (spec['msg'] is not None) and (spec['msg'] != spec['vars']['omit'])
+
+ def _parent_has_callback(self):
+ return hasattr(super(CallbackModule, self), sys._getframe(1).f_code.co_name)
+
+ def _template(self, loader, template, variables):
+ _templar = Templar(loader=loader, variables=variables)
+ return _templar.template(
+ template,
+ preserve_trailing_newlines=True,
+ convert_data=False,
+ escape_backslashes=True
+ )
+
+ def _output(self, spec, stderr=False):
+ _msg = to_text(spec['msg'])
+ if len(_msg) > 0:
+ self._display.display(msg=_msg, color=spec['msg_color'], stderr=stderr)
+
+ def _get_vars(self, playbook, play=None, host=None, task=None, included_file=None,
+ handler=None, result=None, stats=None, remove_attr_ref_loop=True):
+ def _get_value(obj, attr=None, method=None):
+ if attr:
+ return getattr(obj, attr, getattr(obj, "_" + attr, None))
+
+ if method:
+ _method = getattr(obj, method)
+ return _method()
+
+ def _remove_attr_ref_loop(obj, attributes):
+ _loop_var = getattr(obj, 'loop_control', None)
+ _loop_var = (_loop_var or 'item')
+
+ for attr in attributes:
+ if str(_loop_var) in str(_get_value(obj=obj, attr=attr)):
+ attributes.remove(attr)
+
+ return attributes
+
+ class CallbackDIYDict(dict):
+ def __deepcopy__(self, memo):
+ return self
+
+ _ret = {}
+
+ _variable_manager = VariableManager(loader=playbook.get_loader())
+
+ _all = _variable_manager.get_vars()
+ if play:
+ _all = play.get_variable_manager().get_vars(
+ play=play,
+ host=(host if host else getattr(result, '_host', None)),
+ task=(handler if handler else task)
+ )
+ _ret.update(_all)
+
+ _ret.update(_ret.get(self.DIY_NS, {self.DIY_NS: CallbackDIYDict()}))
+
+ _ret[self.DIY_NS].update({'playbook': {}})
+ _playbook_attributes = ['entries', 'file_name', 'basedir']
+
+ for attr in _playbook_attributes:
+ _ret[self.DIY_NS]['playbook'].update({attr: _get_value(obj=playbook, attr=attr)})
+
+ if play:
+ _ret[self.DIY_NS].update({'play': {}})
+ _play_attributes = ['any_errors_fatal', 'become', 'become_flags', 'become_method',
+ 'become_user', 'check_mode', 'collections', 'connection',
+ 'debugger', 'diff', 'environment', 'fact_path', 'finalized',
+ 'force_handlers', 'gather_facts', 'gather_subset',
+ 'gather_timeout', 'handlers', 'hosts', 'ignore_errors',
+ 'ignore_unreachable', 'included_conditional', 'included_path',
+ 'max_fail_percentage', 'module_defaults', 'name', 'no_log',
+ 'only_tags', 'order', 'port', 'post_tasks', 'pre_tasks',
+ 'remote_user', 'removed_hosts', 'roles', 'run_once', 'serial',
+ 'skip_tags', 'squashed', 'strategy', 'tags', 'tasks', 'uuid',
+ 'validated', 'vars_files', 'vars_prompt']
+
+ for attr in _play_attributes:
+ _ret[self.DIY_NS]['play'].update({attr: _get_value(obj=play, attr=attr)})
+
+ if host:
+ _ret[self.DIY_NS].update({'host': {}})
+ _host_attributes = ['name', 'uuid', 'address', 'implicit']
+
+ for attr in _host_attributes:
+ _ret[self.DIY_NS]['host'].update({attr: _get_value(obj=host, attr=attr)})
+
+ if task:
+ _ret[self.DIY_NS].update({'task': {}})
+ _task_attributes = ['action', 'any_errors_fatal', 'args', 'async', 'async_val',
+ 'become', 'become_flags', 'become_method', 'become_user',
+ 'changed_when', 'check_mode', 'collections', 'connection',
+ 'debugger', 'delay', 'delegate_facts', 'delegate_to', 'diff',
+ 'environment', 'failed_when', 'finalized', 'ignore_errors',
+ 'ignore_unreachable', 'loop', 'loop_control', 'loop_with',
+ 'module_defaults', 'name', 'no_log', 'notify', 'parent', 'poll',
+ 'port', 'register', 'remote_user', 'retries', 'role', 'run_once',
+ 'squashed', 'tags', 'untagged', 'until', 'uuid', 'validated',
+ 'when']
+
+ # remove arguments that reference a loop var because they cause templating issues in
+ # callbacks that do not have the loop context(e.g. playbook_on_task_start)
+ if task.loop and remove_attr_ref_loop:
+ _task_attributes = _remove_attr_ref_loop(obj=task, attributes=_task_attributes)
+
+ for attr in _task_attributes:
+ _ret[self.DIY_NS]['task'].update({attr: _get_value(obj=task, attr=attr)})
+
+ if included_file:
+ _ret[self.DIY_NS].update({'included_file': {}})
+ _included_file_attributes = ['args', 'filename', 'hosts', 'is_role', 'task']
+
+ for attr in _included_file_attributes:
+ _ret[self.DIY_NS]['included_file'].update({attr: _get_value(
+ obj=included_file,
+ attr=attr
+ )})
+
+ if handler:
+ _ret[self.DIY_NS].update({'handler': {}})
+ _handler_attributes = ['action', 'any_errors_fatal', 'args', 'async', 'async_val',
+ 'become', 'become_flags', 'become_method', 'become_user',
+ 'changed_when', 'check_mode', 'collections', 'connection',
+ 'debugger', 'delay', 'delegate_facts', 'delegate_to', 'diff',
+ 'environment', 'failed_when', 'finalized', 'ignore_errors',
+ 'ignore_unreachable', 'listen', 'loop', 'loop_control',
+ 'loop_with', 'module_defaults', 'name', 'no_log',
+ 'notified_hosts', 'notify', 'parent', 'poll', 'port',
+ 'register', 'remote_user', 'retries', 'role', 'run_once',
+ 'squashed', 'tags', 'untagged', 'until', 'uuid', 'validated',
+ 'when']
+
+ if handler.loop and remove_attr_ref_loop:
+ _handler_attributes = _remove_attr_ref_loop(obj=handler,
+ attributes=_handler_attributes)
+
+ for attr in _handler_attributes:
+ _ret[self.DIY_NS]['handler'].update({attr: _get_value(obj=handler, attr=attr)})
+
+ _ret[self.DIY_NS]['handler'].update({'is_host_notified': handler.is_host_notified(host)})
+
+ if result:
+ _ret[self.DIY_NS].update({'result': {}})
+ _result_attributes = ['host', 'task', 'task_name']
+
+ for attr in _result_attributes:
+ _ret[self.DIY_NS]['result'].update({attr: _get_value(obj=result, attr=attr)})
+
+ _result_methods = ['is_changed', 'is_failed', 'is_skipped', 'is_unreachable']
+
+ for method in _result_methods:
+ _ret[self.DIY_NS]['result'].update({method: _get_value(obj=result, method=method)})
+
+ _ret[self.DIY_NS]['result'].update({'output': getattr(result, '_result', None)})
+
+ _ret.update(result._result)
+
+ if stats:
+ _ret[self.DIY_NS].update({'stats': {}})
+ _stats_attributes = ['changed', 'custom', 'dark', 'failures', 'ignored',
+ 'ok', 'processed', 'rescued', 'skipped']
+
+ for attr in _stats_attributes:
+ _ret[self.DIY_NS]['stats'].update({attr: _get_value(obj=stats, attr=attr)})
+
+ _ret[self.DIY_NS].update({'top_level_var_names': _ret.keys()})
+
+ return _ret
+
+ def v2_on_any(self, *args, **kwargs):
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._diy_spec['vars']
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_on_any(*args, **kwargs)
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play,
+ task=self._diy_task,
+ result=result
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec, stderr=(not ignore_errors))
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_runner_on_failed(result, ignore_errors)
+
+ def v2_runner_on_ok(self, result):
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play,
+ task=self._diy_task,
+ result=result
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_runner_on_ok(result)
+
+ def v2_runner_on_skipped(self, result):
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play,
+ task=self._diy_task,
+ result=result
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_runner_on_skipped(result)
+
+ def v2_runner_on_unreachable(self, result):
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play,
+ task=self._diy_task,
+ result=result
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_runner_on_unreachable(result)
+
+ # not implemented as the call to this is not implemented yet
+ def v2_runner_on_async_poll(self, result):
+ pass
+
+ # not implemented as the call to this is not implemented yet
+ def v2_runner_on_async_ok(self, result):
+ pass
+
+ # not implemented as the call to this is not implemented yet
+ def v2_runner_on_async_failed(self, result):
+ pass
+
+ def v2_runner_item_on_ok(self, result):
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play,
+ task=self._diy_task,
+ result=result,
+ remove_attr_ref_loop=False
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_runner_item_on_ok(result)
+
+ def v2_runner_item_on_failed(self, result):
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play,
+ task=self._diy_task,
+ result=result,
+ remove_attr_ref_loop=False
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_runner_item_on_failed(result)
+
+ def v2_runner_item_on_skipped(self, result):
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play,
+ task=self._diy_task,
+ result=result,
+ remove_attr_ref_loop=False
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_runner_item_on_skipped(result)
+
+ def v2_runner_retry(self, result):
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play,
+ task=self._diy_task,
+ result=result
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_runner_retry(result)
+
+ def v2_runner_on_start(self, host, task):
+ self._diy_host = host
+ self._diy_task = task
+
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play,
+ host=self._diy_host,
+ task=self._diy_task
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_runner_on_start(host, task)
+
+ def v2_playbook_on_start(self, playbook):
+ self._diy_playbook = playbook
+ self._diy_loader = self._diy_playbook.get_loader()
+
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_playbook_on_start(playbook)
+
+ def v2_playbook_on_notify(self, handler, host):
+ self._diy_handler = handler
+ self._diy_host = host
+
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play,
+ host=self._diy_host,
+ handler=self._diy_handler
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_playbook_on_notify(handler, host)
+
+ def v2_playbook_on_no_hosts_matched(self):
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._diy_spec['vars']
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_playbook_on_no_hosts_matched()
+
+ def v2_playbook_on_no_hosts_remaining(self):
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._diy_spec['vars']
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_playbook_on_no_hosts_remaining()
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ self._diy_task = task
+
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play,
+ task=self._diy_task
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_playbook_on_task_start(task, is_conditional)
+
+ # not implemented as the call to this is not implemented yet
+ def v2_playbook_on_cleanup_task_start(self, task):
+ pass
+
+ def v2_playbook_on_handler_task_start(self, task):
+ self._diy_task = task
+
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play,
+ task=self._diy_task
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_playbook_on_handler_task_start(task)
+
+ def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None,
+ confirm=False, salt_size=None, salt=None, default=None,
+ unsafe=None):
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._diy_spec['vars']
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_playbook_on_vars_prompt(
+ varname, private, prompt, encrypt,
+ confirm, salt_size, salt, default,
+ unsafe
+ )
+
+ # not implemented as the call to this is not implemented yet
+ def v2_playbook_on_import_for_host(self, result, imported_file):
+ pass
+
+ # not implemented as the call to this is not implemented yet
+ def v2_playbook_on_not_import_for_host(self, result, missing_file):
+ pass
+
+ def v2_playbook_on_play_start(self, play):
+ self._diy_play = play
+
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_playbook_on_play_start(play)
+
+ def v2_playbook_on_stats(self, stats):
+ self._diy_stats = stats
+
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play,
+ stats=self._diy_stats
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_playbook_on_stats(stats)
+
+ def v2_playbook_on_include(self, included_file):
+ self._diy_included_file = included_file
+
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play,
+ task=self._diy_included_file._task,
+ included_file=self._diy_included_file
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_playbook_on_include(included_file)
+
+ def v2_on_file_diff(self, result):
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play,
+ task=self._diy_task,
+ result=result
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_on_file_diff(result)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/full_skip.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/full_skip.py
new file mode 100644
index 00000000..9fce6970
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/full_skip.py
@@ -0,0 +1,76 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ callback: full_skip
+ type: stdout
+ short_description: suppresses tasks if all hosts skipped
+ description:
+ - Use this plugin when you do not care about any output for tasks that were completely skipped
+ deprecated:
+ why: The 'default' callback plugin now supports this functionality
+ removed_in: '2.0.0' # was Ansible 2.11
+ alternative: "'default' callback plugin with 'display_skipped_hosts = no' option"
+ extends_documentation_fragment:
+ - default_callback
+ requirements:
+ - set as stdout in configuration
+'''
+
+from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
+
+
+class CallbackModule(CallbackModule_default):
+
+ '''
+ This is the default callback interface, which simply prints messages
+ to stdout when new callback events are received.
+ '''
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'community.general.full_skip'
+
+ def v2_runner_on_skipped(self, result):
+ self.outlines = []
+
+ def v2_playbook_item_on_skipped(self, result):
+ self.outlines = []
+
+ def v2_runner_item_on_skipped(self, result):
+ self.outlines = []
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+ self.display()
+ super(CallbackModule, self).v2_runner_on_failed(result, ignore_errors)
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ self.outlines = []
+ self.outlines.append("TASK [%s]" % task.get_name().strip())
+ if self._display.verbosity >= 2:
+ path = task.get_path()
+ if path:
+ self.outlines.append("task path: %s" % path)
+
+ def v2_playbook_item_on_ok(self, result):
+ self.display()
+ super(CallbackModule, self).v2_playbook_item_on_ok(result)
+
+ def v2_runner_on_ok(self, result):
+ self.display()
+ super(CallbackModule, self).v2_runner_on_ok(result)
+
+ def display(self):
+ if len(self.outlines) == 0:
+ return
+ (first, rest) = self.outlines[0], self.outlines[1:]
+ self._display.banner(first)
+ for line in rest:
+ self._display.display(line)
+ self.outlines = []
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/hipchat.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/hipchat.py
new file mode 100644
index 00000000..efe4e94e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/hipchat.py
@@ -0,0 +1,228 @@
+# (C) 2014, Matt Martz <matt@sivel.net>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ callback: hipchat
+ type: notification
+ requirements:
+ - whitelist in configuration.
+ - prettytable (python lib)
+ short_description: post task events to hipchat
+ description:
+ - This callback plugin sends status updates to a HipChat channel during playbook execution.
+ - Before 2.4 only environment variables were available for configuring this plugin.
+ options:
+ token:
+ description: HipChat API token for v1 or v2 API.
+ required: True
+ env:
+ - name: HIPCHAT_TOKEN
+ ini:
+ - section: callback_hipchat
+ key: token
+ api_version:
+ description: HipChat API version, v1 or v2.
+ required: False
+ default: v1
+ env:
+ - name: HIPCHAT_API_VERSION
+ ini:
+ - section: callback_hipchat
+ key: api_version
+ room:
+ description: HipChat room to post in.
+ default: ansible
+ env:
+ - name: HIPCHAT_ROOM
+ ini:
+ - section: callback_hipchat
+ key: room
+ from:
+ description: Name to post as
+ default: ansible
+ env:
+ - name: HIPCHAT_FROM
+ ini:
+ - section: callback_hipchat
+ key: from
+ notify:
+ description: Add notify flag to important messages
+ type: bool
+ default: True
+ env:
+ - name: HIPCHAT_NOTIFY
+ ini:
+ - section: callback_hipchat
+ key: notify
+
+'''
+
+import os
+import json
+
+try:
+ import prettytable
+ HAS_PRETTYTABLE = True
+except ImportError:
+ HAS_PRETTYTABLE = False
+
+from ansible.plugins.callback import CallbackBase
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import open_url
+
+
+class CallbackModule(CallbackBase):
+ """This is an example ansible callback plugin that sends status
+ updates to a HipChat channel during playbook execution.
+ """
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'notification'
+ CALLBACK_NAME = 'community.general.hipchat'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ API_V1_URL = 'https://api.hipchat.com/v1/rooms/message'
+ API_V2_URL = 'https://api.hipchat.com/v2/'
+
+ def __init__(self):
+
+ super(CallbackModule, self).__init__()
+
+ if not HAS_PRETTYTABLE:
+ self.disabled = True
+ self._display.warning('The `prettytable` python module is not installed. '
+ 'Disabling the HipChat callback plugin.')
+ self.printed_playbook = False
+ self.playbook_name = None
+ self.play = None
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+ super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+
+ self.token = self.get_option('token')
+ self.api_version = self.get_option('api_version')
+ self.from_name = self.get_option('from')
+ self.allow_notify = self.get_option('notify')
+ self.room = self.get_option('room')
+
+ if self.token is None:
+ self.disabled = True
+ self._display.warning('HipChat token could not be loaded. The HipChat '
+ 'token can be provided using the `HIPCHAT_TOKEN` '
+ 'environment variable.')
+
+ # Pick the request handler.
+ if self.api_version == 'v2':
+ self.send_msg = self.send_msg_v2
+ else:
+ self.send_msg = self.send_msg_v1
+
+ def send_msg_v2(self, msg, msg_format='text', color='yellow', notify=False):
+ """Method for sending a message to HipChat"""
+
+ headers = {'Authorization': 'Bearer %s' % self.token, 'Content-Type': 'application/json'}
+
+ body = {}
+ body['room_id'] = self.room
+ body['from'] = self.from_name[:15] # max length is 15
+ body['message'] = msg
+ body['message_format'] = msg_format
+ body['color'] = color
+ body['notify'] = self.allow_notify and notify
+
+ data = json.dumps(body)
+ url = self.API_V2_URL + "room/{room_id}/notification".format(room_id=self.room)
+ try:
+ response = open_url(url, data=data, headers=headers, method='POST')
+ return response.read()
+ except Exception as ex:
+ self._display.warning('Could not submit message to hipchat: {0}'.format(ex))
+
+ def send_msg_v1(self, msg, msg_format='text', color='yellow', notify=False):
+ """Method for sending a message to HipChat"""
+
+ params = {}
+ params['room_id'] = self.room
+ params['from'] = self.from_name[:15] # max length is 15
+ params['message'] = msg
+ params['message_format'] = msg_format
+ params['color'] = color
+ params['notify'] = int(self.allow_notify and notify)
+
+ url = ('%s?auth_token=%s' % (self.API_V1_URL, self.token))
+ try:
+ response = open_url(url, data=urlencode(params))
+ return response.read()
+ except Exception as ex:
+ self._display.warning('Could not submit message to hipchat: {0}'.format(ex))
+
+ def v2_playbook_on_play_start(self, play):
+ """Display Playbook and play start messages"""
+
+ self.play = play
+ name = play.name
+ # This block sends information about a playbook when it starts
+ # The playbook object is not immediately available at
+ # playbook_on_start so we grab it via the play
+ #
+ # Displays info about playbook being started by a person on an
+ # inventory, as well as Tags, Skip Tags and Limits
+ if not self.printed_playbook:
+ self.playbook_name, _ = os.path.splitext(
+ os.path.basename(self.play.playbook.filename))
+ host_list = self.play.playbook.inventory.host_list
+ inventory = os.path.basename(os.path.realpath(host_list))
+ self.send_msg("%s: Playbook initiated by %s against %s" %
+ (self.playbook_name,
+ self.play.playbook.remote_user,
+ inventory), notify=True)
+ self.printed_playbook = True
+ subset = self.play.playbook.inventory._subset
+ skip_tags = self.play.playbook.skip_tags
+ self.send_msg("%s:\nTags: %s\nSkip Tags: %s\nLimit: %s" %
+ (self.playbook_name,
+ ', '.join(self.play.playbook.only_tags),
+ ', '.join(skip_tags) if skip_tags else None,
+ ', '.join(subset) if subset else subset))
+
+ # This is where we actually say we are starting a play
+ self.send_msg("%s: Starting play: %s" %
+ (self.playbook_name, name))
+
+ def playbook_on_stats(self, stats):
+ """Display info about playbook statistics"""
+ hosts = sorted(stats.processed.keys())
+
+ t = prettytable.PrettyTable(['Host', 'Ok', 'Changed', 'Unreachable',
+ 'Failures'])
+
+ failures = False
+ unreachable = False
+
+ for h in hosts:
+ s = stats.summarize(h)
+
+ if s['failures'] > 0:
+ failures = True
+ if s['unreachable'] > 0:
+ unreachable = True
+
+ t.add_row([h] + [s[k] for k in ['ok', 'changed', 'unreachable',
+ 'failures']])
+
+ self.send_msg("%s: Playbook complete" % self.playbook_name,
+ notify=True)
+
+ if failures or unreachable:
+ color = 'red'
+ self.send_msg("%s: Failures detected" % self.playbook_name,
+ color=color, notify=True)
+ else:
+ color = 'green'
+
+ self.send_msg("/code %s:\n%s" % (self.playbook_name, t), color=color)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/jabber.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/jabber.py
new file mode 100644
index 00000000..01abde17
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/jabber.py
@@ -0,0 +1,118 @@
+# Copyright (C) 2016 maxn nikolaev.makc@gmail.com
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ callback: jabber
+ type: notification
+ short_description: post task events to a jabber server
+ description:
+ - The chatty part of ChatOps with a Hipchat server as a target
+ - This callback plugin sends status updates to a HipChat channel during playbook execution.
+ requirements:
+ - xmpp (python lib https://github.com/ArchipelProject/xmpppy)
+ options:
+ server:
+ description: connection info to jabber server
+ required: True
+ env:
+ - name: JABBER_SERV
+ user:
+ description: Jabber user to authenticate as
+ required: True
+ env:
+ - name: JABBER_USER
+ password:
+ description: Password for the user to the jabber server
+ required: True
+ env:
+ - name: JABBER_PASS
+ to:
+ description: chat identifier that will receive the message
+ required: True
+ env:
+ - name: JABBER_TO
+'''
+
+import os
+
+HAS_XMPP = True
+try:
+ import xmpp
+except ImportError:
+ HAS_XMPP = False
+
+from ansible.plugins.callback import CallbackBase
+
+
+class CallbackModule(CallbackBase):
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'notification'
+ CALLBACK_NAME = 'community.general.jabber'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self, display=None):
+
+ super(CallbackModule, self).__init__(display=display)
+
+ if not HAS_XMPP:
+ self._display.warning("The required python xmpp library (xmpppy) is not installed. "
+ "pip install git+https://github.com/ArchipelProject/xmpppy")
+ self.disabled = True
+
+ self.serv = os.getenv('JABBER_SERV')
+ self.j_user = os.getenv('JABBER_USER')
+ self.j_pass = os.getenv('JABBER_PASS')
+ self.j_to = os.getenv('JABBER_TO')
+
+ if (self.j_user or self.j_pass or self.serv or self.j_to) is None:
+ self.disabled = True
+ self._display.warning('Jabber CallBack wants the JABBER_SERV, JABBER_USER, JABBER_PASS and JABBER_TO environment variables')
+
+ def send_msg(self, msg):
+ """Send message"""
+ jid = xmpp.JID(self.j_user)
+ client = xmpp.Client(self.serv, debug=[])
+ client.connect(server=(self.serv, 5222))
+ client.auth(jid.getNode(), self.j_pass, resource=jid.getResource())
+ message = xmpp.Message(self.j_to, msg)
+ message.setAttr('type', 'chat')
+ client.send(message)
+ client.disconnect()
+
+ def v2_runner_on_ok(self, result):
+ self._clean_results(result._result, result._task.action)
+ self.debug = self._dump_results(result._result)
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ self.task = task
+
+ def v2_playbook_on_play_start(self, play):
+ """Display Playbook and play start messages"""
+ self.play = play
+ name = play.name
+ self.send_msg("Ansible starting play: %s" % (name))
+
+ def playbook_on_stats(self, stats):
+ name = self.play
+ hosts = sorted(stats.processed.keys())
+ failures = False
+ unreachable = False
+ for h in hosts:
+ s = stats.summarize(h)
+ if s['failures'] > 0:
+ failures = True
+ if s['unreachable'] > 0:
+ unreachable = True
+
+ if failures or unreachable:
+ out = self.debug
+ self.send_msg("%s: Failures detected \n%s \nHost: %s\n Failed at:\n%s" % (name, self.task, h, out))
+ else:
+ out = self.debug
+ self.send_msg("Great! \n Playbook %s completed:\n%s \n Last task debug:\n %s" % (name, s, out))
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/log_plays.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/log_plays.py
new file mode 100644
index 00000000..d184b9a8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/log_plays.py
@@ -0,0 +1,123 @@
+# (C) 2012, Michael DeHaan, <michael.dehaan@gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ callback: log_plays
+ type: notification
+ short_description: write playbook output to log file
+ description:
+ - This callback writes playbook output to a file per host in the `/var/log/ansible/hosts` directory
+ requirements:
+ - Whitelist in configuration
+ - A writeable /var/log/ansible/hosts directory by the user executing Ansible on the controller
+ options:
+ log_folder:
+ default: /var/log/ansible/hosts
+ description: The folder where log files will be created.
+ env:
+ - name: ANSIBLE_LOG_FOLDER
+ ini:
+ - section: callback_log_plays
+ key: log_folder
+'''
+
+import os
+import time
+import json
+
+from ansible.utils.path import makedirs_safe
+from ansible.module_utils._text import to_bytes
+from ansible.module_utils.common._collections_compat import MutableMapping
+from ansible.parsing.ajson import AnsibleJSONEncoder
+from ansible.plugins.callback import CallbackBase
+
+
+# NOTE: in Ansible 1.2 or later general logging is available without
+# this plugin, just set ANSIBLE_LOG_PATH as an environment variable
+# or log_path in the DEFAULTS section of your ansible configuration
+# file. This callback is an example of per hosts logging for those
+# that want it.
+
+
+class CallbackModule(CallbackBase):
+ """
+ logs playbook results, per host, in /var/log/ansible/hosts
+ """
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'notification'
+ CALLBACK_NAME = 'community.general.log_plays'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ TIME_FORMAT = "%b %d %Y %H:%M:%S"
+ MSG_FORMAT = "%(now)s - %(playbook)s - %(task_name)s - %(task_action)s - %(category)s - %(data)s\n\n"
+
+ def __init__(self):
+
+ super(CallbackModule, self).__init__()
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+ super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+
+ self.log_folder = self.get_option("log_folder")
+
+ if not os.path.exists(self.log_folder):
+ makedirs_safe(self.log_folder)
+
+ def log(self, result, category):
+ data = result._result
+ if isinstance(data, MutableMapping):
+ if '_ansible_verbose_override' in data:
+ # avoid logging extraneous data
+ data = 'omitted'
+ else:
+ data = data.copy()
+ invocation = data.pop('invocation', None)
+ data = json.dumps(data, cls=AnsibleJSONEncoder)
+ if invocation is not None:
+ data = json.dumps(invocation) + " => %s " % data
+
+ path = os.path.join(self.log_folder, result._host.get_name())
+ now = time.strftime(self.TIME_FORMAT, time.localtime())
+
+ msg = to_bytes(
+ self.MSG_FORMAT
+ % dict(
+ now=now,
+ playbook=self.playbook,
+ task_name=result._task.name,
+ task_action=result._task.action,
+ category=category,
+ data=data,
+ )
+ )
+ with open(path, "ab") as fd:
+ fd.write(msg)
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+ self.log(result, 'FAILED')
+
+ def v2_runner_on_ok(self, result):
+ self.log(result, 'OK')
+
+ def v2_runner_on_skipped(self, result):
+ self.log(result, 'SKIPPED')
+
+ def v2_runner_on_unreachable(self, result):
+ self.log(result, 'UNREACHABLE')
+
+ def v2_runner_on_async_failed(self, result):
+ self.log(result, 'ASYNC_FAILED')
+
+ def v2_playbook_on_start(self, playbook):
+ self.playbook = playbook._file_name
+
+ def v2_playbook_on_import_for_host(self, result, imported_file):
+ self.log(result, 'IMPORTED', imported_file)
+
+ def v2_playbook_on_not_import_for_host(self, result, missing_file):
+ self.log(result, 'NOTIMPORTED', missing_file)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/logdna.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/logdna.py
new file mode 100644
index 00000000..53bc7114
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/logdna.py
@@ -0,0 +1,208 @@
+# (c) 2018, Samir Musali <samir.musali@logdna.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ callback: logdna
+ type: aggregate
+ short_description: Sends playbook logs to LogDNA
+ description:
+ - This callback will report logs from playbook actions, tasks, and events to LogDNA (https://app.logdna.com)
+ requirements:
+ - LogDNA Python Library (https://github.com/logdna/python)
+ - whitelisting in configuration
+ options:
+ conf_key:
+ required: True
+ description: LogDNA Ingestion Key
+ type: string
+ env:
+ - name: LOGDNA_INGESTION_KEY
+ ini:
+ - section: callback_logdna
+ key: conf_key
+ plugin_ignore_errors:
+ required: False
+ description: Whether to ignore errors on failing or not
+ type: boolean
+ env:
+ - name: ANSIBLE_IGNORE_ERRORS
+ ini:
+ - section: callback_logdna
+ key: plugin_ignore_errors
+ default: False
+ conf_hostname:
+ required: False
+ description: Alternative Host Name; the current host name by default
+ type: string
+ env:
+ - name: LOGDNA_HOSTNAME
+ ini:
+ - section: callback_logdna
+ key: conf_hostname
+ conf_tags:
+ required: False
+ description: Tags
+ type: string
+ env:
+ - name: LOGDNA_TAGS
+ ini:
+ - section: callback_logdna
+ key: conf_tags
+ default: ansible
+'''
+
+import logging
+import json
+import socket
+from uuid import getnode
+from ansible.plugins.callback import CallbackBase
+from ansible.parsing.ajson import AnsibleJSONEncoder
+
+try:
+ from logdna import LogDNAHandler
+ HAS_LOGDNA = True
+except ImportError:
+ HAS_LOGDNA = False
+
+
+# Getting MAC Address of system:
+def get_mac():
+ mac = "%012x" % getnode()
+ return ":".join(map(lambda index: mac[index:index + 2], range(int(len(mac) / 2))))
+
+
+# Getting hostname of system:
+def get_hostname():
+ return str(socket.gethostname()).split('.local')[0]
+
+
+# Getting IP of system:
+def get_ip():
+ try:
+ return socket.gethostbyname(get_hostname())
+ except Exception:
+ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ try:
+ s.connect(('10.255.255.255', 1))
+ IP = s.getsockname()[0]
+ except Exception:
+ IP = '127.0.0.1'
+ finally:
+ s.close()
+ return IP
+
+
+# Is it JSON?
+def isJSONable(obj):
+ try:
+ json.dumps(obj, sort_keys=True, cls=AnsibleJSONEncoder)
+ return True
+ except Exception:
+ return False
+
+
+# LogDNA Callback Module:
+class CallbackModule(CallbackBase):
+
+ CALLBACK_VERSION = 0.1
+ CALLBACK_TYPE = 'aggregate'
+ CALLBACK_NAME = 'community.general.logdna'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self, display=None):
+ super(CallbackModule, self).__init__(display=display)
+
+ self.disabled = True
+ self.playbook_name = None
+ self.playbook = None
+ self.conf_key = None
+ self.plugin_ignore_errors = None
+ self.conf_hostname = None
+ self.conf_tags = None
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+ super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+
+ self.conf_key = self.get_option('conf_key')
+ self.plugin_ignore_errors = self.get_option('plugin_ignore_errors')
+ self.conf_hostname = self.get_option('conf_hostname')
+ self.conf_tags = self.get_option('conf_tags')
+ self.mac = get_mac()
+ self.ip = get_ip()
+
+ if self.conf_hostname is None:
+ self.conf_hostname = get_hostname()
+
+ self.conf_tags = self.conf_tags.split(',')
+
+ if HAS_LOGDNA:
+ self.log = logging.getLogger('logdna')
+ self.log.setLevel(logging.INFO)
+ self.options = {'hostname': self.conf_hostname, 'mac': self.mac, 'index_meta': True}
+ self.log.addHandler(LogDNAHandler(self.conf_key, self.options))
+ self.disabled = False
+ else:
+ self.disabled = True
+ self._display.warning('WARNING:\nPlease, install LogDNA Python Package: `pip install logdna`')
+
+ def metaIndexing(self, meta):
+ invalidKeys = []
+ ninvalidKeys = 0
+ for key, value in meta.items():
+ if not isJSONable(value):
+ invalidKeys.append(key)
+ ninvalidKeys += 1
+ if ninvalidKeys > 0:
+ for key in invalidKeys:
+ del meta[key]
+ meta['__errors'] = 'These keys have been sanitized: ' + ', '.join(invalidKeys)
+ return meta
+
+ def sanitizeJSON(self, data):
+ try:
+ return json.loads(json.dumps(data, sort_keys=True, cls=AnsibleJSONEncoder))
+ except Exception:
+ return {'warnings': ['JSON Formatting Issue', json.dumps(data, sort_keys=True, cls=AnsibleJSONEncoder)]}
+
+ def flush(self, log, options):
+ if HAS_LOGDNA:
+ self.log.info(json.dumps(log), options)
+
+ def sendLog(self, host, category, logdata):
+ options = {'app': 'ansible', 'meta': {'playbook': self.playbook_name, 'host': host, 'category': category}}
+ logdata['info'].pop('invocation', None)
+ warnings = logdata['info'].pop('warnings', None)
+ if warnings is not None:
+ self.flush({'warn': warnings}, options)
+ self.flush(logdata, options)
+
+ def v2_playbook_on_start(self, playbook):
+ self.playbook = playbook
+ self.playbook_name = playbook._file_name
+
+ def v2_playbook_on_stats(self, stats):
+ result = dict()
+ for host in stats.processed.keys():
+ result[host] = stats.summarize(host)
+ self.sendLog(self.conf_hostname, 'STATS', {'info': self.sanitizeJSON(result)})
+
+ def runner_on_failed(self, host, res, ignore_errors=False):
+ if self.plugin_ignore_errors:
+ ignore_errors = self.plugin_ignore_errors
+ self.sendLog(host, 'FAILED', {'info': self.sanitizeJSON(res), 'ignore_errors': ignore_errors})
+
+ def runner_on_ok(self, host, res):
+ self.sendLog(host, 'OK', {'info': self.sanitizeJSON(res)})
+
+ def runner_on_unreachable(self, host, res):
+ self.sendLog(host, 'UNREACHABLE', {'info': self.sanitizeJSON(res)})
+
+ def runner_on_async_failed(self, host, res, jid):
+ self.sendLog(host, 'ASYNC_FAILED', {'info': self.sanitizeJSON(res), 'job_id': jid})
+
+ def runner_on_async_ok(self, host, res, jid):
+ self.sendLog(host, 'ASYNC_OK', {'info': self.sanitizeJSON(res), 'job_id': jid})
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/logentries.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/logentries.py
new file mode 100644
index 00000000..c6bc9935
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/logentries.py
@@ -0,0 +1,330 @@
+# (c) 2015, Logentries.com, Jimmy Tang <jimmy.tang@logentries.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ callback: logentries
+ type: notification
+ short_description: Sends events to Logentries
+ description:
+ - This callback plugin will generate JSON objects and send them to Logentries via TCP for auditing/debugging purposes.
+ - Before 2.4, if you wanted to use an ini configuration, the file must be placed in the same directory as this plugin and named logentries.ini
+ - In 2.4 and above you can just put it in the main Ansible configuration file.
+ requirements:
+ - whitelisting in configuration
+ - certifi (python library)
+ - flatdict (python library), if you want to use the 'flatten' option
+ options:
+ api:
+ description: URI to the Logentries API
+ env:
+ - name: LOGENTRIES_API
+ default: data.logentries.com
+ ini:
+ - section: callback_logentries
+ key: api
+ port:
+ description: HTTP port to use when connecting to the API
+ env:
+ - name: LOGENTRIES_PORT
+ default: 80
+ ini:
+ - section: callback_logentries
+ key: port
+ tls_port:
+ description: Port to use when connecting to the API when TLS is enabled
+ env:
+ - name: LOGENTRIES_TLS_PORT
+ default: 443
+ ini:
+ - section: callback_logentries
+ key: tls_port
+ token:
+ description: The logentries "TCP token"
+ env:
+ - name: LOGENTRIES_ANSIBLE_TOKEN
+ required: True
+ ini:
+ - section: callback_logentries
+ key: token
+ use_tls:
+ description:
+ - Toggle to decide whether to use TLS to encrypt the communications with the API server
+ env:
+ - name: LOGENTRIES_USE_TLS
+ default: False
+ type: boolean
+ ini:
+ - section: callback_logentries
+ key: use_tls
+ flatten:
+ description: flatten complex data structures into a single dictionary with complex keys
+ type: boolean
+ default: False
+ env:
+ - name: LOGENTRIES_FLATTEN
+ ini:
+ - section: callback_logentries
+ key: flatten
+'''
+
+EXAMPLES = '''
+examples: >
+ To enable, add this to your ansible.cfg file in the defaults block
+
+ [defaults]
+ callback_whitelist = community.general.logentries
+
+ Either set the environment variables
+ export LOGENTRIES_API=data.logentries.com
+ export LOGENTRIES_PORT=10000
+ export LOGENTRIES_ANSIBLE_TOKEN=dd21fc88-f00a-43ff-b977-e3a4233c53af
+
+ Or in the main Ansible config file
+ [callback_logentries]
+ api = data.logentries.com
+ port = 10000
+ tls_port = 20000
+ use_tls = no
+ token = dd21fc88-f00a-43ff-b977-e3a4233c53af
+ flatten = False
+'''
+
+import os
+import socket
+import random
+import time
+import uuid
+
+try:
+ import certifi
+ HAS_CERTIFI = True
+except ImportError:
+ HAS_CERTIFI = False
+
+try:
+ import flatdict
+ HAS_FLATDICT = True
+except ImportError:
+ HAS_FLATDICT = False
+
+from ansible.module_utils._text import to_bytes, to_text
+from ansible.plugins.callback import CallbackBase
+
+# Todo:
+# * Better formatting of output before sending out to logentries data/api nodes.
+
+
+class PlainTextSocketAppender(object):
+ def __init__(self, display, LE_API='data.logentries.com', LE_PORT=80, LE_TLS_PORT=443):
+
+ self.LE_API = LE_API
+ self.LE_PORT = LE_PORT
+ self.LE_TLS_PORT = LE_TLS_PORT
+ self.MIN_DELAY = 0.1
+ self.MAX_DELAY = 10
+ # Error message displayed when an incorrect Token has been detected
+ self.INVALID_TOKEN = "\n\nIt appears the LOGENTRIES_TOKEN parameter you entered is incorrect!\n\n"
+ # Unicode Line separator character \u2028
+ self.LINE_SEP = u'\u2028'
+
+ self._display = display
+ self._conn = None
+
+ def open_connection(self):
+ self._conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ self._conn.connect((self.LE_API, self.LE_PORT))
+
+ def reopen_connection(self):
+ self.close_connection()
+
+ root_delay = self.MIN_DELAY
+ while True:
+ try:
+ self.open_connection()
+ return
+ except Exception as e:
+ self._display.vvvv(u"Unable to connect to Logentries: %s" % to_text(e))
+
+ root_delay *= 2
+ if root_delay > self.MAX_DELAY:
+ root_delay = self.MAX_DELAY
+
+ wait_for = root_delay + random.uniform(0, root_delay)
+
+ try:
+ self._display.vvvv("sleeping %s before retry" % wait_for)
+ time.sleep(wait_for)
+ except KeyboardInterrupt:
+ raise
+
+ def close_connection(self):
+ if self._conn is not None:
+ self._conn.close()
+
+ def put(self, data):
+ # Replace newlines with Unicode line separator
+ # for multi-line events
+ data = to_text(data, errors='surrogate_or_strict')
+ multiline = data.replace(u'\n', self.LINE_SEP)
+ multiline += u"\n"
+ # Send data, reconnect if needed
+ while True:
+ try:
+ self._conn.send(to_bytes(multiline, errors='surrogate_or_strict'))
+ except socket.error:
+ self.reopen_connection()
+ continue
+ break
+
+ self.close_connection()
+
+
+try:
+ import ssl
+ HAS_SSL = True
+except ImportError: # for systems without TLS support.
+ SocketAppender = PlainTextSocketAppender
+ HAS_SSL = False
+else:
+
+ class TLSSocketAppender(PlainTextSocketAppender):
+ def open_connection(self):
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock = ssl.wrap_socket(
+ sock=sock,
+ keyfile=None,
+ certfile=None,
+ server_side=False,
+ cert_reqs=ssl.CERT_REQUIRED,
+ ssl_version=getattr(
+ ssl, 'PROTOCOL_TLSv1_2', ssl.PROTOCOL_TLSv1),
+ ca_certs=certifi.where(),
+ do_handshake_on_connect=True,
+ suppress_ragged_eofs=True, )
+ sock.connect((self.LE_API, self.LE_TLS_PORT))
+ self._conn = sock
+
+ SocketAppender = TLSSocketAppender
+
+
+class CallbackModule(CallbackBase):
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'notification'
+ CALLBACK_NAME = 'community.general.logentries'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self):
+
+ # TODO: allow for alternate posting methods (REST/UDP/agent/etc)
+ super(CallbackModule, self).__init__()
+
+ # verify dependencies
+ if not HAS_SSL:
+ self._display.warning("Unable to import ssl module. Will send over port 80.")
+
+ if not HAS_CERTIFI:
+ self.disabled = True
+ self._display.warning('The `certifi` python module is not installed.\nDisabling the Logentries callback plugin.')
+
+ self.le_jobid = str(uuid.uuid4())
+
+ # FIXME: make configurable, move to options
+ self.timeout = 10
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+
+ super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+
+ # get options
+ try:
+ self.api_url = self.get_option('api')
+ self.api_port = self.get_option('port')
+ self.api_tls_port = self.get_option('tls_port')
+ self.use_tls = self.get_option('use_tls')
+ self.flatten = self.get_option('flatten')
+ except KeyError as e:
+ self._display.warning(u"Missing option for Logentries callback plugin: %s" % to_text(e))
+ self.disabled = True
+
+ try:
+ self.token = self.get_option('token')
+ except KeyError as e:
+ self._display.warning('Logentries token was not provided, this is required for this callback to operate, disabling')
+ self.disabled = True
+
+ if self.flatten and not HAS_FLATDICT:
+ self.disabled = True
+ self._display.warning('You have chosen to flatten and the `flatdict` python module is not installed.\nDisabling the Logentries callback plugin.')
+
+ self._initialize_connections()
+
+ def _initialize_connections(self):
+
+ if not self.disabled:
+ if self.use_tls:
+ self._display.vvvv("Connecting to %s:%s with TLS" % (self.api_url, self.api_tls_port))
+ self._appender = TLSSocketAppender(display=self._display, LE_API=self.api_url, LE_TLS_PORT=self.api_tls_port)
+ else:
+ self._display.vvvv("Connecting to %s:%s" % (self.api_url, self.api_port))
+ self._appender = PlainTextSocketAppender(display=self._display, LE_API=self.api_url, LE_PORT=self.api_port)
+ self._appender.reopen_connection()
+
+ def emit_formatted(self, record):
+ if self.flatten:
+ results = flatdict.FlatDict(record)
+ self.emit(self._dump_results(results))
+ else:
+ self.emit(self._dump_results(record))
+
+ def emit(self, record):
+ msg = record.rstrip('\n')
+ msg = "{0} {1}".format(self.token, msg)
+ self._appender.put(msg)
+ self._display.vvvv("Sent event to logentries")
+
+ def _set_info(self, host, res):
+ return {'le_jobid': self.le_jobid, 'hostname': host, 'results': res}
+
+ def runner_on_ok(self, host, res):
+ results = self._set_info(host, res)
+ results['status'] = 'OK'
+ self.emit_formatted(results)
+
+ def runner_on_failed(self, host, res, ignore_errors=False):
+ results = self._set_info(host, res)
+ results['status'] = 'FAILED'
+ self.emit_formatted(results)
+
+ def runner_on_skipped(self, host, item=None):
+ results = self._set_info(host, item)
+ del results['results']
+ results['status'] = 'SKIPPED'
+ self.emit_formatted(results)
+
+ def runner_on_unreachable(self, host, res):
+ results = self._set_info(host, res)
+ results['status'] = 'UNREACHABLE'
+ self.emit_formatted(results)
+
+ def runner_on_async_failed(self, host, res, jid):
+ results = self._set_info(host, res)
+ results['jid'] = jid
+ results['status'] = 'ASYNC_FAILED'
+ self.emit_formatted(results)
+
+ def v2_playbook_on_play_start(self, play):
+ results = {}
+ results['le_jobid'] = self.le_jobid
+ results['started_by'] = os.getlogin()
+ if play.name:
+ results['play'] = play.name
+ results['hosts'] = play.hosts
+ self.emit_formatted(results)
+
+ def playbook_on_stats(self, stats):
+ """ close connection """
+ self._appender.close_connection()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/logstash.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/logstash.py
new file mode 100644
index 00000000..ad1b2b0f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/logstash.py
@@ -0,0 +1,248 @@
+# (C) 2016, Ievgen Khmelenko <ujenmr@gmail.com>
+# (C) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ callback: logstash
+ type: notification
+ short_description: Sends events to Logstash
+ description:
+ - This callback will report facts and task events to Logstash https://www.elastic.co/products/logstash
+ requirements:
+ - whitelisting in configuration
+ - logstash (python library)
+ options:
+ server:
+ description: Address of the Logstash server
+ env:
+ - name: LOGSTASH_SERVER
+ ini:
+ - section: callback_logstash
+ key: server
+ version_added: 1.0.0
+ default: localhost
+ port:
+ description: Port on which logstash is listening
+ env:
+ - name: LOGSTASH_PORT
+ ini:
+ - section: callback_logstash
+ key: port
+ version_added: 1.0.0
+ default: 5000
+ type:
+ description: Message type
+ env:
+ - name: LOGSTASH_TYPE
+ ini:
+ - section: callback_logstash
+ key: type
+ version_added: 1.0.0
+ default: ansible
+'''
+
+import os
+import json
+import socket
+import uuid
+from datetime import datetime
+
+import logging
+
+try:
+ import logstash
+ HAS_LOGSTASH = True
+except ImportError:
+ HAS_LOGSTASH = False
+
+from ansible.plugins.callback import CallbackBase
+
+
+class CallbackModule(CallbackBase):
+ """
+ ansible logstash callback plugin
+ ansible.cfg:
+ callback_plugins = <path_to_callback_plugins_folder>
+ callback_whitelist = logstash
+ and put the plugin in <path_to_callback_plugins_folder>
+
+ logstash config:
+ input {
+ tcp {
+ port => 5000
+ codec => json
+ }
+ }
+
+ Requires:
+ python-logstash
+
+ This plugin makes use of the following environment variables or ini config:
+ LOGSTASH_SERVER (optional): defaults to localhost
+ LOGSTASH_PORT (optional): defaults to 5000
+ LOGSTASH_TYPE (optional): defaults to ansible
+ """
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'aggregate'
+ CALLBACK_NAME = 'community.general.logstash'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self, display=None):
+ super(CallbackModule, self).__init__(display=display)
+
+ if not HAS_LOGSTASH:
+ self.disabled = True
+ self._display.warning("The required python-logstash is not installed. "
+ "pip install python-logstash")
+
+ self.start_time = datetime.utcnow()
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+
+ super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+
+ self.logger = logging.getLogger('python-logstash-logger')
+ self.logger.setLevel(logging.DEBUG)
+
+ self.logstash_server = self.get_option('server')
+ self.logstash_port = self.get_option('port')
+ self.logstash_type = self.get_option('type')
+ self.handler = logstash.TCPLogstashHandler(
+ self.logstash_server,
+ int(self.logstash_port),
+ version=1,
+ message_type=self.logstash_type
+ )
+ self.logger.addHandler(self.handler)
+ self.hostname = socket.gethostname()
+ self.session = str(uuid.uuid1())
+ self.errors = 0
+
+ def v2_playbook_on_start(self, playbook):
+ self.playbook = playbook._file_name
+ data = {
+ 'status': "OK",
+ 'host': self.hostname,
+ 'session': self.session,
+ 'ansible_type': "start",
+ 'ansible_playbook': self.playbook,
+ }
+ self.logger.info("ansible start", extra=data)
+
+ def v2_playbook_on_stats(self, stats):
+ end_time = datetime.utcnow()
+ runtime = end_time - self.start_time
+ summarize_stat = {}
+ for host in stats.processed.keys():
+ summarize_stat[host] = stats.summarize(host)
+
+ if self.errors == 0:
+ status = "OK"
+ else:
+ status = "FAILED"
+
+ data = {
+ 'status': status,
+ 'host': self.hostname,
+ 'session': self.session,
+ 'ansible_type': "finish",
+ 'ansible_playbook': self.playbook,
+ 'ansible_playbook_duration': runtime.total_seconds(),
+ 'ansible_result': json.dumps(summarize_stat),
+ }
+ self.logger.info("ansible stats", extra=data)
+
+ def v2_runner_on_ok(self, result, **kwargs):
+ data = {
+ 'status': "OK",
+ 'host': self.hostname,
+ 'session': self.session,
+ 'ansible_type': "task",
+ 'ansible_playbook': self.playbook,
+ 'ansible_host': result._host.name,
+ 'ansible_task': result._task,
+ 'ansible_result': self._dump_results(result._result)
+ }
+ self.logger.info("ansible ok", extra=data)
+
+ def v2_runner_on_skipped(self, result, **kwargs):
+ data = {
+ 'status': "SKIPPED",
+ 'host': self.hostname,
+ 'session': self.session,
+ 'ansible_type': "task",
+ 'ansible_playbook': self.playbook,
+ 'ansible_task': result._task,
+ 'ansible_host': result._host.name
+ }
+ self.logger.info("ansible skipped", extra=data)
+
+ def v2_playbook_on_import_for_host(self, result, imported_file):
+ data = {
+ 'status': "IMPORTED",
+ 'host': self.hostname,
+ 'session': self.session,
+ 'ansible_type': "import",
+ 'ansible_playbook': self.playbook,
+ 'ansible_host': result._host.name,
+ 'imported_file': imported_file
+ }
+ self.logger.info("ansible import", extra=data)
+
+ def v2_playbook_on_not_import_for_host(self, result, missing_file):
+ data = {
+ 'status': "NOT IMPORTED",
+ 'host': self.hostname,
+ 'session': self.session,
+ 'ansible_type': "import",
+ 'ansible_playbook': self.playbook,
+ 'ansible_host': result._host.name,
+ 'missing_file': missing_file
+ }
+ self.logger.info("ansible import", extra=data)
+
+ def v2_runner_on_failed(self, result, **kwargs):
+ data = {
+ 'status': "FAILED",
+ 'host': self.hostname,
+ 'session': self.session,
+ 'ansible_type': "task",
+ 'ansible_playbook': self.playbook,
+ 'ansible_host': result._host.name,
+ 'ansible_task': result._task,
+ 'ansible_result': self._dump_results(result._result)
+ }
+ self.errors += 1
+ self.logger.error("ansible failed", extra=data)
+
+ def v2_runner_on_unreachable(self, result, **kwargs):
+ data = {
+ 'status': "UNREACHABLE",
+ 'host': self.hostname,
+ 'session': self.session,
+ 'ansible_type': "task",
+ 'ansible_playbook': self.playbook,
+ 'ansible_host': result._host.name,
+ 'ansible_task': result._task,
+ 'ansible_result': self._dump_results(result._result)
+ }
+ self.logger.error("ansible unreachable", extra=data)
+
+ def v2_runner_on_async_failed(self, result, **kwargs):
+ data = {
+ 'status': "FAILED",
+ 'host': self.hostname,
+ 'session': self.session,
+ 'ansible_type': "task",
+ 'ansible_playbook': self.playbook,
+ 'ansible_host': result._host.name,
+ 'ansible_task': result._task,
+ 'ansible_result': self._dump_results(result._result)
+ }
+ self.errors += 1
+ self.logger.error("ansible async", extra=data)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/mail.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/mail.py
new file mode 100644
index 00000000..2172f45c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/mail.py
@@ -0,0 +1,227 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+callback: mail
+type: notification
+short_description: Sends failure events via email
+description:
+- This callback will report failures via email
+author:
+- Dag Wieers (@dagwieers)
+requirements:
+- whitelisting in configuration
+options:
+ mta:
+ description: Mail Transfer Agent, server that accepts SMTP
+ env:
+ - name: SMTPHOST
+ ini:
+ - section: callback_mail
+ key: smtphost
+ default: localhost
+ mtaport:
+ description: Mail Transfer Agent Port, port at which server SMTP
+ ini:
+ - section: callback_mail
+ key: smtpport
+ default: 25
+ to:
+ description: Mail recipient
+ ini:
+ - section: callback_mail
+ key: to
+ default: root
+ sender:
+ description: Mail sender
+ ini:
+ - section: callback_mail
+ key: sender
+ cc:
+ description: CC'd recipient
+ ini:
+ - section: callback_mail
+ key: cc
+ bcc:
+ description: BCC'd recipient
+ ini:
+ - section: callback_mail
+ key: bcc
+notes:
+- "TODO: expand configuration options now that plugins can leverage Ansible's configuration"
+'''
+
+import json
+import os
+import re
+import smtplib
+
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_bytes
+from ansible.parsing.ajson import AnsibleJSONEncoder
+from ansible.plugins.callback import CallbackBase
+
+
+class CallbackModule(CallbackBase):
+ ''' This Ansible callback plugin mails errors to interested parties. '''
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'notification'
+ CALLBACK_NAME = 'community.general.mail'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self, display=None):
+ super(CallbackModule, self).__init__(display=display)
+ self.sender = None
+ self.to = 'root'
+ self.smtphost = os.getenv('SMTPHOST', 'localhost')
+ self.smtpport = 25
+ self.cc = None
+ self.bcc = None
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+
+ super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+
+ self.sender = self.get_option('sender')
+ self.to = self.get_option('to')
+ self.smtphost = self.get_option('mta')
+ self.smtpport = int(self.get_option('mtaport'))
+ self.cc = self.get_option('cc')
+ self.bcc = self.get_option('bcc')
+
+ def mail(self, subject='Ansible error mail', body=None):
+ if body is None:
+ body = subject
+
+ smtp = smtplib.SMTP(self.smtphost, port=self.smtpport)
+
+ b_sender = to_bytes(self.sender)
+ b_to = to_bytes(self.to)
+ b_cc = to_bytes(self.cc)
+ b_bcc = to_bytes(self.bcc)
+ b_subject = to_bytes(subject)
+ b_body = to_bytes(body)
+
+ b_content = b'From: %s\n' % b_sender
+ b_content += b'To: %s\n' % b_to
+ if self.cc:
+ b_content += b'Cc: %s\n' % b_cc
+ b_content += b'Subject: %s\n\n' % b_subject
+ b_content += b_body
+
+ b_addresses = b_to.split(b',')
+ if self.cc:
+ b_addresses += b_cc.split(b',')
+ if self.bcc:
+ b_addresses += b_bcc.split(b',')
+
+ for b_address in b_addresses:
+ smtp.sendmail(b_sender, b_address, b_content)
+
+ smtp.quit()
+
+ def subject_msg(self, multiline, failtype, linenr):
+ return '%s: %s' % (failtype, multiline.strip('\r\n').splitlines()[linenr])
+
+ def indent(self, multiline, indent=8):
+ return re.sub('^', ' ' * indent, multiline, flags=re.MULTILINE)
+
+ def body_blob(self, multiline, texttype):
+ ''' Turn some text output in a well-indented block for sending in a mail body '''
+ intro = 'with the following %s:\n\n' % texttype
+ blob = ''
+ for line in multiline.strip('\r\n').splitlines():
+ blob += '%s\n' % line
+ return intro + self.indent(blob) + '\n'
+
+ def mail_result(self, result, failtype):
+ host = result._host.get_name()
+ if not self.sender:
+ self.sender = '"Ansible: %s" <root>' % host
+
+ # Add subject
+ if self.itembody:
+ subject = self.itemsubject
+ elif result._result.get('failed_when_result') is True:
+ subject = "Failed due to 'failed_when' condition"
+ elif result._result.get('msg'):
+ subject = self.subject_msg(result._result['msg'], failtype, 0)
+ elif result._result.get('stderr'):
+ subject = self.subject_msg(result._result['stderr'], failtype, -1)
+ elif result._result.get('stdout'):
+ subject = self.subject_msg(result._result['stdout'], failtype, -1)
+ elif result._result.get('exception'): # Unrelated exceptions are added to output :-/
+ subject = self.subject_msg(result._result['exception'], failtype, -1)
+ else:
+ subject = '%s: %s' % (failtype, result._task.name or result._task.action)
+
+ # Make playbook name visible (e.g. in Outlook/Gmail condensed view)
+ body = 'Playbook: %s\n' % os.path.basename(self.playbook._file_name)
+ if result._task.name:
+ body += 'Task: %s\n' % result._task.name
+ body += 'Module: %s\n' % result._task.action
+ body += 'Host: %s\n' % host
+ body += '\n'
+
+ # Add task information (as much as possible)
+ body += 'The following task failed:\n\n'
+ if 'invocation' in result._result:
+ body += self.indent('%s: %s\n' % (result._task.action, json.dumps(result._result['invocation']['module_args'], indent=4)))
+ elif result._task.name:
+ body += self.indent('%s (%s)\n' % (result._task.name, result._task.action))
+ else:
+ body += self.indent('%s\n' % result._task.action)
+ body += '\n'
+
+ # Add item / message
+ if self.itembody:
+ body += self.itembody
+ elif result._result.get('failed_when_result') is True:
+ body += "due to the following condition:\n\n" + self.indent('failed_when:\n- ' + '\n- '.join(result._task.failed_when)) + '\n\n'
+ elif result._result.get('msg'):
+ body += self.body_blob(result._result['msg'], 'message')
+
+ # Add stdout / stderr / exception / warnings / deprecations
+ if result._result.get('stdout'):
+ body += self.body_blob(result._result['stdout'], 'standard output')
+ if result._result.get('stderr'):
+ body += self.body_blob(result._result['stderr'], 'error output')
+ if result._result.get('exception'): # Unrelated exceptions are added to output :-/
+ body += self.body_blob(result._result['exception'], 'exception')
+ if result._result.get('warnings'):
+ for i in range(len(result._result.get('warnings'))):
+ body += self.body_blob(result._result['warnings'][i], 'exception %d' % (i + 1))
+ if result._result.get('deprecations'):
+ for i in range(len(result._result.get('deprecations'))):
+ body += self.body_blob(result._result['deprecations'][i], 'exception %d' % (i + 1))
+
+ body += 'and a complete dump of the error:\n\n'
+ body += self.indent('%s: %s' % (failtype, json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4)))
+
+ self.mail(subject=subject, body=body)
+
+ def v2_playbook_on_start(self, playbook):
+ self.playbook = playbook
+ self.itembody = ''
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+ if ignore_errors:
+ return
+
+ self.mail_result(result, 'Failed')
+
+ def v2_runner_on_unreachable(self, result):
+ self.mail_result(result, 'Unreachable')
+
+ def v2_runner_on_async_failed(self, result):
+ self.mail_result(result, 'Async failure')
+
+ def v2_runner_item_on_failed(self, result):
+ # Pass item information to task failure
+ self.itemsubject = result._result['msg']
+ self.itembody += self.body_blob(json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4), "failed item dump '%(item)s'" % result._result)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/nrdp.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/nrdp.py
new file mode 100644
index 00000000..a814a41c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/nrdp.py
@@ -0,0 +1,188 @@
+# -*- coding: utf-8 -*-
+# (c) 2018 Remi Verchere <remi@verchere.fr>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ callback: nrdp
+ type: notification
+ author: "Remi VERCHERE (@rverchere)"
+ short_description: post task result to a nagios server through nrdp
+ description:
+ - this callback send playbook result to nagios
+ - nagios shall use NRDP to recive passive events
+ - the passive check is sent to a dedicated host/service for ansible
+ options:
+ url:
+ description: url of the nrdp server
+ required: True
+ env:
+ - name : NRDP_URL
+ ini:
+ - section: callback_nrdp
+ key: url
+ validate_certs:
+ description: (bool) validate the SSL certificate of the nrdp server. (For HTTPS url)
+ env:
+ - name: NRDP_VALIDATE_CERTS
+ ini:
+ - section: callback_nrdp
+ key: validate_nrdp_certs
+ - section: callback_nrdp
+ key: validate_certs
+ default: False
+ aliases: [ validate_nrdp_certs ]
+ token:
+ description: token to be allowed to push nrdp events
+ required: True
+ env:
+ - name: NRDP_TOKEN
+ ini:
+ - section: callback_nrdp
+ key: token
+ hostname:
+ description: hostname where the passive check is linked to
+ required: True
+ env:
+ - name : NRDP_HOSTNAME
+ ini:
+ - section: callback_nrdp
+ key: hostname
+ servicename:
+ description: service where the passive check is linked to
+ required: True
+ env:
+ - name : NRDP_SERVICENAME
+ ini:
+ - section: callback_nrdp
+ key: servicename
+'''
+
+import os
+import json
+
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import open_url
+from ansible.plugins.callback import CallbackBase
+
+
+class CallbackModule(CallbackBase):
+ '''
+ send ansible-playbook to Nagios server using nrdp protocol
+ '''
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'notification'
+ CALLBACK_NAME = 'community.general.nrdp'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ # Nagios states
+ OK = 0
+ WARNING = 1
+ CRITICAL = 2
+ UNKNOWN = 3
+
+ def __init__(self):
+ super(CallbackModule, self).__init__()
+
+ self.printed_playbook = False
+ self.playbook_name = None
+ self.play = None
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+ super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+
+ self.url = self.get_option('url')
+ if not self.url.endswith('/'):
+ self.url += '/'
+ self.token = self.get_option('token')
+ self.hostname = self.get_option('hostname')
+ self.servicename = self.get_option('servicename')
+ self.validate_nrdp_certs = self.get_option('validate_certs')
+
+ if (self.url or self.token or self.hostname or
+ self.servicename) is None:
+ self._display.warning("NRDP callback wants the NRDP_URL,"
+ " NRDP_TOKEN, NRDP_HOSTNAME,"
+ " NRDP_SERVICENAME"
+ " environment variables'."
+ " The NRDP callback plugin is disabled.")
+ self.disabled = True
+
+ def _send_nrdp(self, state, msg):
+ '''
+ nrpd service check send XMLDATA like this:
+ <?xml version='1.0'?>
+ <checkresults>
+ <checkresult type='service'>
+ <hostname>somehost</hostname>
+ <servicename>someservice</servicename>
+ <state>1</state>
+ <output>WARNING: Danger Will Robinson!|perfdata</output>
+ </checkresult>
+ </checkresults>
+ '''
+ xmldata = "<?xml version='1.0'?>\n"
+ xmldata += "<checkresults>\n"
+ xmldata += "<checkresult type='service'>\n"
+ xmldata += "<hostname>%s</hostname>\n" % self.hostname
+ xmldata += "<servicename>%s</servicename>\n" % self.servicename
+ xmldata += "<state>%d</state>\n" % state
+ xmldata += "<output>%s</output>\n" % msg
+ xmldata += "</checkresult>\n"
+ xmldata += "</checkresults>\n"
+
+ body = {
+ 'cmd': 'submitcheck',
+ 'token': self.token,
+ 'XMLDATA': bytes(xmldata)
+ }
+
+ try:
+ response = open_url(self.url,
+ data=urlencode(body),
+ method='POST',
+ validate_certs=self.validate_nrdp_certs)
+ return response.read()
+ except Exception as ex:
+ self._display.warning("NRDP callback cannot send result {0}".format(ex))
+
+ def v2_playbook_on_play_start(self, play):
+ '''
+ Display Playbook and play start messages
+ '''
+ self.play = play
+
+ def v2_playbook_on_stats(self, stats):
+ '''
+ Display info about playbook statistics
+ '''
+ name = self.play
+ gstats = ""
+ hosts = sorted(stats.processed.keys())
+ critical = warning = 0
+ for host in hosts:
+ stat = stats.summarize(host)
+ gstats += "'%s_ok'=%d '%s_changed'=%d \
+ '%s_unreachable'=%d '%s_failed'=%d " % \
+ (host, stat['ok'], host, stat['changed'],
+ host, stat['unreachable'], host, stat['failures'])
+ # Critical when failed tasks or unreachable host
+ critical += stat['failures']
+ critical += stat['unreachable']
+ # Warning when changed tasks
+ warning += stat['changed']
+
+ msg = "%s | %s" % (name, gstats)
+ if critical:
+ # Send Critical
+ self._send_nrdp(self.CRITICAL, msg)
+ elif warning:
+ # Send Warning
+ self._send_nrdp(self.WARNING, msg)
+ else:
+ # Send OK
+ self._send_nrdp(self.OK, msg)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/null.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/null.py
new file mode 100644
index 00000000..e4ef684b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/null.py
@@ -0,0 +1,30 @@
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ callback: 'null'
+ type: stdout
+ requirements:
+ - set as main display callback
+ short_description: Don't display stuff to screen
+ description:
+ - This callback prevents outputing events to screen
+'''
+
+from ansible.plugins.callback import CallbackBase
+
+
+class CallbackModule(CallbackBase):
+
+ '''
+ This callback wont print messages to stdout when new callback events are received.
+ '''
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'community.general.null'
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/osx_say.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/osx_say.py
new file mode 100644
index 00000000..fe1a917e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/osx_say.py
@@ -0,0 +1,114 @@
+# (c) 2012, Michael DeHaan, <michael.dehaan@gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ callback: say
+ type: notification
+ requirements:
+ - whitelisting in configuration
+ - the '/usr/bin/say' command line program (standard on macOS) or 'espeak' command line program
+ short_description: notify using software speech synthesizer
+ description:
+ - This plugin will use the 'say' or 'espeak' program to "speak" about play events.
+ notes:
+ - In 2.8, this callback has been renamed from C(osx_say) into M(community.general.say).
+'''
+
+import distutils.spawn
+import platform
+import subprocess
+import os
+
+from ansible.plugins.callback import CallbackBase
+
+
+class CallbackModule(CallbackBase):
+ """
+ makes Ansible much more exciting.
+ """
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'notification'
+ CALLBACK_NAME = 'community.general.say'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self):
+
+ super(CallbackModule, self).__init__()
+
+ self.FAILED_VOICE = None
+ self.REGULAR_VOICE = None
+ self.HAPPY_VOICE = None
+ self.LASER_VOICE = None
+
+ self.synthesizer = distutils.spawn.find_executable('say')
+ if not self.synthesizer:
+ self.synthesizer = distutils.spawn.find_executable('espeak')
+ if self.synthesizer:
+ self.FAILED_VOICE = 'klatt'
+ self.HAPPY_VOICE = 'f5'
+ self.LASER_VOICE = 'whisper'
+ elif platform.system() != 'Darwin':
+ # 'say' binary available, it might be GNUstep tool which doesn't support 'voice' parameter
+ self._display.warning("'say' executable found but system is '%s': ignoring voice parameter" % platform.system())
+ else:
+ self.FAILED_VOICE = 'Zarvox'
+ self.REGULAR_VOICE = 'Trinoids'
+ self.HAPPY_VOICE = 'Cellos'
+ self.LASER_VOICE = 'Princess'
+
+ # plugin disable itself if say is not present
+ # ansible will not call any callback if disabled is set to True
+ if not self.synthesizer:
+ self.disabled = True
+ self._display.warning("Unable to find either 'say' or 'espeak' executable, plugin %s disabled" % os.path.basename(__file__))
+
+ def say(self, msg, voice):
+ cmd = [self.synthesizer, msg]
+ if voice:
+ cmd.extend(('-v', voice))
+ subprocess.call(cmd)
+
+ def runner_on_failed(self, host, res, ignore_errors=False):
+ self.say("Failure on host %s" % host, self.FAILED_VOICE)
+
+ def runner_on_ok(self, host, res):
+ self.say("pew", self.LASER_VOICE)
+
+ def runner_on_skipped(self, host, item=None):
+ self.say("pew", self.LASER_VOICE)
+
+ def runner_on_unreachable(self, host, res):
+ self.say("Failure on host %s" % host, self.FAILED_VOICE)
+
+ def runner_on_async_ok(self, host, res, jid):
+ self.say("pew", self.LASER_VOICE)
+
+ def runner_on_async_failed(self, host, res, jid):
+ self.say("Failure on host %s" % host, self.FAILED_VOICE)
+
+ def playbook_on_start(self):
+ self.say("Running Playbook", self.REGULAR_VOICE)
+
+ def playbook_on_notify(self, host, handler):
+ self.say("pew", self.LASER_VOICE)
+
+ def playbook_on_task_start(self, name, is_conditional):
+ if not is_conditional:
+ self.say("Starting task: %s" % name, self.REGULAR_VOICE)
+ else:
+ self.say("Notifying task: %s" % name, self.REGULAR_VOICE)
+
+ def playbook_on_setup(self):
+ self.say("Gathering facts", self.REGULAR_VOICE)
+
+ def playbook_on_play_start(self, name):
+ self.say("Starting play: %s" % name, self.HAPPY_VOICE)
+
+ def playbook_on_stats(self, stats):
+ self.say("Play complete", self.HAPPY_VOICE)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/say.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/say.py
new file mode 100644
index 00000000..fe1a917e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/say.py
@@ -0,0 +1,114 @@
+# (c) 2012, Michael DeHaan, <michael.dehaan@gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ callback: say
+ type: notification
+ requirements:
+ - whitelisting in configuration
+ - the '/usr/bin/say' command line program (standard on macOS) or 'espeak' command line program
+ short_description: notify using software speech synthesizer
+ description:
+ - This plugin will use the 'say' or 'espeak' program to "speak" about play events.
+ notes:
+ - In 2.8, this callback has been renamed from C(osx_say) into M(community.general.say).
+'''
+
+import distutils.spawn
+import platform
+import subprocess
+import os
+
+from ansible.plugins.callback import CallbackBase
+
+
+class CallbackModule(CallbackBase):
+ """
+ makes Ansible much more exciting.
+ """
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'notification'
+ CALLBACK_NAME = 'community.general.say'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self):
+
+ super(CallbackModule, self).__init__()
+
+ self.FAILED_VOICE = None
+ self.REGULAR_VOICE = None
+ self.HAPPY_VOICE = None
+ self.LASER_VOICE = None
+
+ self.synthesizer = distutils.spawn.find_executable('say')
+ if not self.synthesizer:
+ self.synthesizer = distutils.spawn.find_executable('espeak')
+ if self.synthesizer:
+ self.FAILED_VOICE = 'klatt'
+ self.HAPPY_VOICE = 'f5'
+ self.LASER_VOICE = 'whisper'
+ elif platform.system() != 'Darwin':
+ # 'say' binary available, it might be GNUstep tool which doesn't support 'voice' parameter
+ self._display.warning("'say' executable found but system is '%s': ignoring voice parameter" % platform.system())
+ else:
+ self.FAILED_VOICE = 'Zarvox'
+ self.REGULAR_VOICE = 'Trinoids'
+ self.HAPPY_VOICE = 'Cellos'
+ self.LASER_VOICE = 'Princess'
+
+ # plugin disable itself if say is not present
+ # ansible will not call any callback if disabled is set to True
+ if not self.synthesizer:
+ self.disabled = True
+ self._display.warning("Unable to find either 'say' or 'espeak' executable, plugin %s disabled" % os.path.basename(__file__))
+
+ def say(self, msg, voice):
+ cmd = [self.synthesizer, msg]
+ if voice:
+ cmd.extend(('-v', voice))
+ subprocess.call(cmd)
+
+ def runner_on_failed(self, host, res, ignore_errors=False):
+ self.say("Failure on host %s" % host, self.FAILED_VOICE)
+
+ def runner_on_ok(self, host, res):
+ self.say("pew", self.LASER_VOICE)
+
+ def runner_on_skipped(self, host, item=None):
+ self.say("pew", self.LASER_VOICE)
+
+ def runner_on_unreachable(self, host, res):
+ self.say("Failure on host %s" % host, self.FAILED_VOICE)
+
+ def runner_on_async_ok(self, host, res, jid):
+ self.say("pew", self.LASER_VOICE)
+
+ def runner_on_async_failed(self, host, res, jid):
+ self.say("Failure on host %s" % host, self.FAILED_VOICE)
+
+ def playbook_on_start(self):
+ self.say("Running Playbook", self.REGULAR_VOICE)
+
+ def playbook_on_notify(self, host, handler):
+ self.say("pew", self.LASER_VOICE)
+
+ def playbook_on_task_start(self, name, is_conditional):
+ if not is_conditional:
+ self.say("Starting task: %s" % name, self.REGULAR_VOICE)
+ else:
+ self.say("Notifying task: %s" % name, self.REGULAR_VOICE)
+
+ def playbook_on_setup(self):
+ self.say("Gathering facts", self.REGULAR_VOICE)
+
+ def playbook_on_play_start(self, name):
+ self.say("Starting play: %s" % name, self.HAPPY_VOICE)
+
+ def playbook_on_stats(self, stats):
+ self.say("Play complete", self.HAPPY_VOICE)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/selective.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/selective.py
new file mode 100644
index 00000000..9521081e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/selective.py
@@ -0,0 +1,276 @@
+# (c) Fastly, inc 2016
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ callback: selective
+ type: stdout
+ requirements:
+ - set as main display callback
+ short_description: only print certain tasks
+ description:
+ - This callback only prints tasks that have been tagged with `print_action` or that have failed.
+ This allows operators to focus on the tasks that provide value only.
+ - Tasks that are not printed are placed with a '.'.
+ - If you increase verbosity all tasks are printed.
+ options:
+ nocolor:
+ default: False
+ description: This setting allows suppressing colorizing output
+ env:
+ - name: ANSIBLE_NOCOLOR
+ - name: ANSIBLE_SELECTIVE_DONT_COLORIZE
+ ini:
+ - section: defaults
+ key: nocolor
+ type: boolean
+'''
+
+EXAMPLES = """
+ - ansible.builtin.debug: msg="This will not be printed"
+ - ansible.builtin.debug: msg="But this will"
+ tags: [print_action]
+"""
+
+import difflib
+
+from ansible import constants as C
+from ansible.plugins.callback import CallbackBase
+from ansible.module_utils._text import to_text
+from ansible.utils.color import codeCodes
+
+DONT_COLORIZE = False
+COLORS = {
+ 'normal': '\033[0m',
+ 'ok': '\033[{0}m'.format(codeCodes[C.COLOR_OK]),
+ 'bold': '\033[1m',
+ 'not_so_bold': '\033[1m\033[34m',
+ 'changed': '\033[{0}m'.format(codeCodes[C.COLOR_CHANGED]),
+ 'failed': '\033[{0}m'.format(codeCodes[C.COLOR_ERROR]),
+ 'endc': '\033[0m',
+ 'skipped': '\033[{0}m'.format(codeCodes[C.COLOR_SKIP]),
+}
+
+
+def dict_diff(prv, nxt):
+ """Return a dict of keys that differ with another config object."""
+ keys = set(prv.keys() + nxt.keys())
+ result = {}
+ for k in keys:
+ if prv.get(k) != nxt.get(k):
+ result[k] = (prv.get(k), nxt.get(k))
+ return result
+
+
+def colorize(msg, color):
+ """Given a string add necessary codes to format the string."""
+ if DONT_COLORIZE:
+ return msg
+ else:
+ return '{0}{1}{2}'.format(COLORS[color], msg, COLORS['endc'])
+
+
+class CallbackModule(CallbackBase):
+ """selective.py callback plugin."""
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'community.general.selective'
+
+ def __init__(self, display=None):
+ """selective.py callback plugin."""
+ super(CallbackModule, self).__init__(display)
+ self.last_skipped = False
+ self.last_task_name = None
+ self.printed_last_task = False
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+
+ super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+
+ global DONT_COLORIZE
+ DONT_COLORIZE = self.get_option('nocolor')
+
+ def _print_task(self, task_name=None):
+ if task_name is None:
+ task_name = self.last_task_name
+
+ if not self.printed_last_task:
+ self.printed_last_task = True
+ line_length = 120
+ if self.last_skipped:
+ print()
+ msg = colorize("# {0} {1}".format(task_name,
+ '*' * (line_length - len(task_name))), 'bold')
+ print(msg)
+
+ def _indent_text(self, text, indent_level):
+ lines = text.splitlines()
+ result_lines = []
+ for l in lines:
+ result_lines.append("{0}{1}".format(' ' * indent_level, l))
+ return '\n'.join(result_lines)
+
+ def _print_diff(self, diff, indent_level):
+ if isinstance(diff, dict):
+ try:
+ diff = '\n'.join(difflib.unified_diff(diff['before'].splitlines(),
+ diff['after'].splitlines(),
+ fromfile=diff.get('before_header',
+ 'new_file'),
+ tofile=diff['after_header']))
+ except AttributeError:
+ diff = dict_diff(diff['before'], diff['after'])
+ if diff:
+ diff = colorize(str(diff), 'changed')
+ print(self._indent_text(diff, indent_level + 4))
+
+ def _print_host_or_item(self, host_or_item, changed, msg, diff, is_host, error, stdout, stderr):
+ if is_host:
+ indent_level = 0
+ name = colorize(host_or_item.name, 'not_so_bold')
+ else:
+ indent_level = 4
+ if isinstance(host_or_item, dict):
+ if 'key' in host_or_item.keys():
+ host_or_item = host_or_item['key']
+ name = colorize(to_text(host_or_item), 'bold')
+
+ if error:
+ color = 'failed'
+ change_string = colorize('FAILED!!!', color)
+ else:
+ color = 'changed' if changed else 'ok'
+ change_string = colorize("changed={0}".format(changed), color)
+
+ msg = colorize(msg, color)
+
+ line_length = 120
+ spaces = ' ' * (40 - len(name) - indent_level)
+ line = "{0} * {1}{2}- {3}".format(' ' * indent_level, name, spaces, change_string)
+
+ if len(msg) < 50:
+ line += ' -- {0}'.format(msg)
+ print("{0} {1}---------".format(line, '-' * (line_length - len(line))))
+ else:
+ print("{0} {1}".format(line, '-' * (line_length - len(line))))
+ print(self._indent_text(msg, indent_level + 4))
+
+ if diff:
+ self._print_diff(diff, indent_level)
+ if stdout:
+ stdout = colorize(stdout, 'failed')
+ print(self._indent_text(stdout, indent_level + 4))
+ if stderr:
+ stderr = colorize(stderr, 'failed')
+ print(self._indent_text(stderr, indent_level + 4))
+
+ def v2_playbook_on_play_start(self, play):
+ """Run on start of the play."""
+ pass
+
+ def v2_playbook_on_task_start(self, task, **kwargs):
+ """Run when a task starts."""
+ self.last_task_name = task.get_name()
+ self.printed_last_task = False
+
+ def _print_task_result(self, result, error=False, **kwargs):
+ """Run when a task finishes correctly."""
+
+ if 'print_action' in result._task.tags or error or self._display.verbosity > 1:
+ self._print_task()
+ self.last_skipped = False
+ msg = to_text(result._result.get('msg', '')) or\
+ to_text(result._result.get('reason', ''))
+
+ stderr = [result._result.get('exception', None),
+ result._result.get('module_stderr', None)]
+ stderr = "\n".join([e for e in stderr if e]).strip()
+
+ self._print_host_or_item(result._host,
+ result._result.get('changed', False),
+ msg,
+ result._result.get('diff', None),
+ is_host=True,
+ error=error,
+ stdout=result._result.get('module_stdout', None),
+ stderr=stderr.strip(),
+ )
+ if 'results' in result._result:
+ for r in result._result['results']:
+ failed = 'failed' in r and r['failed']
+
+ stderr = [r.get('exception', None), r.get('module_stderr', None)]
+ stderr = "\n".join([e for e in stderr if e]).strip()
+
+ self._print_host_or_item(r['item'],
+ r.get('changed', False),
+ to_text(r.get('msg', '')),
+ r.get('diff', None),
+ is_host=False,
+ error=failed,
+ stdout=r.get('module_stdout', None),
+ stderr=stderr.strip(),
+ )
+ else:
+ self.last_skipped = True
+ print('.', end="")
+
+ def v2_playbook_on_stats(self, stats):
+ """Display info about playbook statistics."""
+ print()
+ self.printed_last_task = False
+ self._print_task('STATS')
+
+ hosts = sorted(stats.processed.keys())
+ for host in hosts:
+ s = stats.summarize(host)
+
+ if s['failures'] or s['unreachable']:
+ color = 'failed'
+ elif s['changed']:
+ color = 'changed'
+ else:
+ color = 'ok'
+
+ msg = '{0} : ok={1}\tchanged={2}\tfailed={3}\tunreachable={4}\trescued={5}\tignored={6}'.format(
+ host, s['ok'], s['changed'], s['failures'], s['unreachable'], s['rescued'], s['ignored'])
+ print(colorize(msg, color))
+
+ def v2_runner_on_skipped(self, result, **kwargs):
+ """Run when a task is skipped."""
+ if self._display.verbosity > 1:
+ self._print_task()
+ self.last_skipped = False
+
+ line_length = 120
+ spaces = ' ' * (31 - len(result._host.name) - 4)
+
+ line = " * {0}{1}- {2}".format(colorize(result._host.name, 'not_so_bold'),
+ spaces,
+ colorize("skipped", 'skipped'),)
+
+ reason = result._result.get('skipped_reason', '') or \
+ result._result.get('skip_reason', '')
+ if len(reason) < 50:
+ line += ' -- {0}'.format(reason)
+ print("{0} {1}---------".format(line, '-' * (line_length - len(line))))
+ else:
+ print("{0} {1}".format(line, '-' * (line_length - len(line))))
+ print(self._indent_text(reason, 8))
+ print(reason)
+
+ def v2_runner_on_ok(self, result, **kwargs):
+ self._print_task_result(result, error=False, **kwargs)
+
+ def v2_runner_on_failed(self, result, **kwargs):
+ self._print_task_result(result, error=True, **kwargs)
+
+ def v2_runner_on_unreachable(self, result, **kwargs):
+ self._print_task_result(result, error=True, **kwargs)
+
+ v2_playbook_on_handler_task_start = v2_playbook_on_task_start
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/slack.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/slack.py
new file mode 100644
index 00000000..33cee0ac
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/slack.py
@@ -0,0 +1,251 @@
+# (C) 2014-2015, Matt Martz <matt@sivel.net>
+# (C) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ callback: slack
+ type: notification
+ requirements:
+ - whitelist in configuration
+ - prettytable (python library)
+ short_description: Sends play events to a Slack channel
+ description:
+ - This is an ansible callback plugin that sends status updates to a Slack channel during playbook execution.
+ - Before 2.4 only environment variables were available for configuring this plugin
+ options:
+ webhook_url:
+ required: True
+ description: Slack Webhook URL
+ env:
+ - name: SLACK_WEBHOOK_URL
+ ini:
+ - section: callback_slack
+ key: webhook_url
+ channel:
+ default: "#ansible"
+ description: Slack room to post in.
+ env:
+ - name: SLACK_CHANNEL
+ ini:
+ - section: callback_slack
+ key: channel
+ username:
+ description: Username to post as.
+ env:
+ - name: SLACK_USERNAME
+ default: ansible
+ ini:
+ - section: callback_slack
+ key: username
+ validate_certs:
+ description: validate the SSL certificate of the Slack server. (For HTTPS URLs)
+ env:
+ - name: SLACK_VALIDATE_CERTS
+ ini:
+ - section: callback_slack
+ key: validate_certs
+ default: True
+ type: bool
+'''
+
+import json
+import os
+import uuid
+
+from ansible import context
+from ansible.module_utils._text import to_text
+from ansible.module_utils.urls import open_url
+from ansible.plugins.callback import CallbackBase
+
+try:
+ import prettytable
+ HAS_PRETTYTABLE = True
+except ImportError:
+ HAS_PRETTYTABLE = False
+
+
+class CallbackModule(CallbackBase):
+ """This is an ansible callback plugin that sends status
+ updates to a Slack channel during playbook execution.
+ """
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'notification'
+ CALLBACK_NAME = 'community.general.slack'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self, display=None):
+
+ super(CallbackModule, self).__init__(display=display)
+
+ if not HAS_PRETTYTABLE:
+ self.disabled = True
+ self._display.warning('The `prettytable` python module is not '
+ 'installed. Disabling the Slack callback '
+ 'plugin.')
+
+ self.playbook_name = None
+
+ # This is a 6 character identifier provided with each message
+ # This makes it easier to correlate messages when there are more
+ # than 1 simultaneous playbooks running
+ self.guid = uuid.uuid4().hex[:6]
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+
+ super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+
+ self.webhook_url = self.get_option('webhook_url')
+ self.channel = self.get_option('channel')
+ self.username = self.get_option('username')
+ self.show_invocation = (self._display.verbosity > 1)
+ self.validate_certs = self.get_option('validate_certs')
+
+ if self.webhook_url is None:
+ self.disabled = True
+ self._display.warning('Slack Webhook URL was not provided. The '
+ 'Slack Webhook URL can be provided using '
+ 'the `SLACK_WEBHOOK_URL` environment '
+ 'variable.')
+
+ def send_msg(self, attachments):
+ headers = {
+ 'Content-type': 'application/json',
+ }
+
+ payload = {
+ 'channel': self.channel,
+ 'username': self.username,
+ 'attachments': attachments,
+ 'parse': 'none',
+ 'icon_url': ('https://cdn2.hubspot.net/hub/330046/'
+ 'file-449187601-png/ansible_badge.png'),
+ }
+
+ data = json.dumps(payload)
+ self._display.debug(data)
+ self._display.debug(self.webhook_url)
+ try:
+ response = open_url(self.webhook_url, data=data, validate_certs=self.validate_certs,
+ headers=headers)
+ return response.read()
+ except Exception as e:
+ self._display.warning(u'Could not submit message to Slack: %s' %
+ to_text(e))
+
+ def v2_playbook_on_start(self, playbook):
+ self.playbook_name = os.path.basename(playbook._file_name)
+
+ title = [
+ '*Playbook initiated* (_%s_)' % self.guid
+ ]
+
+ invocation_items = []
+ if context.CLIARGS and self.show_invocation:
+ tags = context.CLIARGS['tags']
+ skip_tags = context.CLIARGS['skip_tags']
+ extra_vars = context.CLIARGS['extra_vars']
+ subset = context.CLIARGS['subset']
+ inventory = [os.path.abspath(i) for i in context.CLIARGS['inventory']]
+
+ invocation_items.append('Inventory: %s' % ', '.join(inventory))
+ if tags and tags != ['all']:
+ invocation_items.append('Tags: %s' % ', '.join(tags))
+ if skip_tags:
+ invocation_items.append('Skip Tags: %s' % ', '.join(skip_tags))
+ if subset:
+ invocation_items.append('Limit: %s' % subset)
+ if extra_vars:
+ invocation_items.append('Extra Vars: %s' %
+ ' '.join(extra_vars))
+
+ title.append('by *%s*' % context.CLIARGS['remote_user'])
+
+ title.append('\n\n*%s*' % self.playbook_name)
+ msg_items = [' '.join(title)]
+ if invocation_items:
+ msg_items.append('```\n%s\n```' % '\n'.join(invocation_items))
+
+ msg = '\n'.join(msg_items)
+
+ attachments = [{
+ 'fallback': msg,
+ 'fields': [
+ {
+ 'value': msg
+ }
+ ],
+ 'color': 'warning',
+ 'mrkdwn_in': ['text', 'fallback', 'fields'],
+ }]
+
+ self.send_msg(attachments=attachments)
+
+ def v2_playbook_on_play_start(self, play):
+ """Display Play start messages"""
+
+ name = play.name or 'Play name not specified (%s)' % play._uuid
+ msg = '*Starting play* (_%s_)\n\n*%s*' % (self.guid, name)
+ attachments = [
+ {
+ 'fallback': msg,
+ 'text': msg,
+ 'color': 'warning',
+ 'mrkdwn_in': ['text', 'fallback', 'fields'],
+ }
+ ]
+ self.send_msg(attachments=attachments)
+
+ def v2_playbook_on_stats(self, stats):
+ """Display info about playbook statistics"""
+
+ hosts = sorted(stats.processed.keys())
+
+ t = prettytable.PrettyTable(['Host', 'Ok', 'Changed', 'Unreachable',
+ 'Failures', 'Rescued', 'Ignored'])
+
+ failures = False
+ unreachable = False
+
+ for h in hosts:
+ s = stats.summarize(h)
+
+ if s['failures'] > 0:
+ failures = True
+ if s['unreachable'] > 0:
+ unreachable = True
+
+ t.add_row([h] + [s[k] for k in ['ok', 'changed', 'unreachable',
+ 'failures', 'rescued', 'ignored']])
+
+ attachments = []
+ msg_items = [
+ '*Playbook Complete* (_%s_)' % self.guid
+ ]
+ if failures or unreachable:
+ color = 'danger'
+ msg_items.append('\n*Failed!*')
+ else:
+ color = 'good'
+ msg_items.append('\n*Success!*')
+
+ msg_items.append('```\n%s\n```' % t)
+
+ msg = '\n'.join(msg_items)
+
+ attachments.append({
+ 'fallback': msg,
+ 'fields': [
+ {
+ 'value': msg
+ }
+ ],
+ 'color': color,
+ 'mrkdwn_in': ['text', 'fallback', 'fields']
+ })
+
+ self.send_msg(attachments=attachments)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/splunk.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/splunk.py
new file mode 100644
index 00000000..68480752
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/splunk.py
@@ -0,0 +1,253 @@
+# -*- coding: utf-8 -*-
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ callback: splunk
+ type: aggregate
+ short_description: Sends task result events to Splunk HTTP Event Collector
+ author: "Stuart Hirst (!UNKNOWN) <support@convergingdata.com>"
+ description:
+ - This callback plugin will send task results as JSON formatted events to a Splunk HTTP collector.
+ - The companion Splunk Monitoring & Diagnostics App is available here "https://splunkbase.splunk.com/app/4023/"
+ - Credit to "Ryan Currah (@ryancurrah)" for original source upon which this is based.
+ requirements:
+ - Whitelisting this callback plugin
+ - 'Create a HTTP Event Collector in Splunk'
+ - 'Define the url and token in ansible.cfg'
+ options:
+ url:
+ description: URL to the Splunk HTTP collector source
+ env:
+ - name: SPLUNK_URL
+ ini:
+ - section: callback_splunk
+ key: url
+ authtoken:
+ description: Token to authenticate the connection to the Splunk HTTP collector
+ env:
+ - name: SPLUNK_AUTHTOKEN
+ ini:
+ - section: callback_splunk
+ key: authtoken
+ validate_certs:
+ description: Whether to validate certificates for connections to HEC. It is not recommended to set to
+ C(false) except when you are sure that nobody can intercept the connection
+ between this plugin and HEC, as setting it to C(false) allows man-in-the-middle attacks!
+ env:
+ - name: SPLUNK_VALIDATE_CERTS
+ ini:
+ - section: callback_splunk
+ key: validate_certs
+ type: bool
+ default: true
+ version_added: '1.0.0'
+'''
+
+EXAMPLES = '''
+examples: >
+ To enable, add this to your ansible.cfg file in the defaults block
+ [defaults]
+ callback_whitelist = community.general.splunk
+ Set the environment variable
+ export SPLUNK_URL=http://mysplunkinstance.datapaas.io:8088/services/collector/event
+ export SPLUNK_AUTHTOKEN=f23blad6-5965-4537-bf69-5b5a545blabla88
+ Set the ansible.cfg variable in the callback_splunk block
+ [callback_splunk]
+ url = http://mysplunkinstance.datapaas.io:8088/services/collector/event
+ authtoken = f23blad6-5965-4537-bf69-5b5a545blabla88
+'''
+
+import json
+import uuid
+import socket
+import getpass
+
+from datetime import datetime
+from os.path import basename
+
+from ansible.module_utils.urls import open_url
+from ansible.parsing.ajson import AnsibleJSONEncoder
+from ansible.plugins.callback import CallbackBase
+
+
+class SplunkHTTPCollectorSource(object):
+ def __init__(self):
+ self.ansible_check_mode = False
+ self.ansible_playbook = ""
+ self.ansible_version = ""
+ self.session = str(uuid.uuid4())
+ self.host = socket.gethostname()
+ self.ip_address = socket.gethostbyname(socket.gethostname())
+ self.user = getpass.getuser()
+
+ def send_event(self, url, authtoken, validate_certs, state, result, runtime):
+ if result._task_fields['args'].get('_ansible_check_mode') is True:
+ self.ansible_check_mode = True
+
+ if result._task_fields['args'].get('_ansible_version'):
+ self.ansible_version = \
+ result._task_fields['args'].get('_ansible_version')
+
+ if result._task._role:
+ ansible_role = str(result._task._role)
+ else:
+ ansible_role = None
+
+ if 'args' in result._task_fields:
+ del result._task_fields['args']
+
+ data = {}
+ data['uuid'] = result._task._uuid
+ data['session'] = self.session
+ data['status'] = state
+ data['timestamp'] = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S '
+ '+0000')
+ data['host'] = self.host
+ data['ip_address'] = self.ip_address
+ data['user'] = self.user
+ data['runtime'] = runtime
+ data['ansible_version'] = self.ansible_version
+ data['ansible_check_mode'] = self.ansible_check_mode
+ data['ansible_host'] = result._host.name
+ data['ansible_playbook'] = self.ansible_playbook
+ data['ansible_role'] = ansible_role
+ data['ansible_task'] = result._task_fields
+ data['ansible_result'] = result._result
+
+ # This wraps the json payload in and outer json event needed by Splunk
+ jsondata = json.dumps(data, cls=AnsibleJSONEncoder, sort_keys=True)
+ jsondata = '{"event":' + jsondata + "}"
+
+ open_url(
+ url,
+ jsondata,
+ headers={
+ 'Content-type': 'application/json',
+ 'Authorization': 'Splunk ' + authtoken
+ },
+ method='POST',
+ validate_certs=validate_certs
+ )
+
+
+class CallbackModule(CallbackBase):
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'aggregate'
+ CALLBACK_NAME = 'community.general.splunk'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self, display=None):
+ super(CallbackModule, self).__init__(display=display)
+ self.start_datetimes = {} # Collect task start times
+ self.url = None
+ self.authtoken = None
+ self.validate_certs = None
+ self.splunk = SplunkHTTPCollectorSource()
+
+ def _runtime(self, result):
+ return (
+ datetime.utcnow() -
+ self.start_datetimes[result._task._uuid]
+ ).total_seconds()
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+ super(CallbackModule, self).set_options(task_keys=task_keys,
+ var_options=var_options,
+ direct=direct)
+
+ self.url = self.get_option('url')
+
+ if self.url is None:
+ self.disabled = True
+ self._display.warning('Splunk HTTP collector source URL was '
+ 'not provided. The Splunk HTTP collector '
+ 'source URL can be provided using the '
+ '`SPLUNK_URL` environment variable or '
+ 'in the ansible.cfg file.')
+
+ self.authtoken = self.get_option('authtoken')
+
+ if self.authtoken is None:
+ self.disabled = True
+ self._display.warning('Splunk HTTP collector requires an authentication'
+ 'token. The Splunk HTTP collector '
+ 'authentication token can be provided using the '
+ '`SPLUNK_AUTHTOKEN` environment variable or '
+ 'in the ansible.cfg file.')
+
+ self.validate_certs = self.get_option('validate_certs')
+
+ def v2_playbook_on_start(self, playbook):
+ self.splunk.ansible_playbook = basename(playbook._file_name)
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ self.start_datetimes[task._uuid] = datetime.utcnow()
+
+ def v2_playbook_on_handler_task_start(self, task):
+ self.start_datetimes[task._uuid] = datetime.utcnow()
+
+ def v2_runner_on_ok(self, result, **kwargs):
+ self.splunk.send_event(
+ self.url,
+ self.authtoken,
+ self.validate_certs,
+ 'OK',
+ result,
+ self._runtime(result)
+ )
+
+ def v2_runner_on_skipped(self, result, **kwargs):
+ self.splunk.send_event(
+ self.url,
+ self.authtoken,
+ self.validate_certs,
+ 'SKIPPED',
+ result,
+ self._runtime(result)
+ )
+
+ def v2_runner_on_failed(self, result, **kwargs):
+ self.splunk.send_event(
+ self.url,
+ self.authtoken,
+ self.validate_certs,
+ 'FAILED',
+ result,
+ self._runtime(result)
+ )
+
+ def runner_on_async_failed(self, result, **kwargs):
+ self.splunk.send_event(
+ self.url,
+ self.authtoken,
+ self.validate_certs,
+ 'FAILED',
+ result,
+ self._runtime(result)
+ )
+
+ def v2_runner_on_unreachable(self, result, **kwargs):
+ self.splunk.send_event(
+ self.url,
+ self.authtoken,
+ self.validate_certs,
+ 'UNREACHABLE',
+ result,
+ self._runtime(result)
+ )
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/stderr.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/stderr.py
new file mode 100644
index 00000000..9aa0e3fd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/stderr.py
@@ -0,0 +1,71 @@
+# (c) 2017, Frederic Van Espen <github@freh.be>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ callback: stderr
+ type: stdout
+ requirements:
+ - set as main display callback
+ short_description: Splits output, sending failed tasks to stderr
+ deprecated:
+ why: The 'default' callback plugin now supports this functionality
+ removed_in: '2.0.0' # was Ansible 2.11
+ alternative: "'default' callback plugin with 'display_failed_stderr = yes' option"
+ extends_documentation_fragment:
+ - default_callback
+ description:
+ - This is the stderr callback plugin, it behaves like the default callback plugin but sends error output to stderr.
+ - Also it does not output skipped host/task/item status
+'''
+
+from ansible import constants as C
+from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
+
+
+class CallbackModule(CallbackModule_default):
+
+ '''
+ This is the stderr callback plugin, which reuses the default
+ callback plugin but sends error output to stderr.
+ '''
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'community.general.stderr'
+
+ def __init__(self):
+
+ self.super_ref = super(CallbackModule, self)
+ self.super_ref.__init__()
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+
+ delegated_vars = result._result.get('_ansible_delegated_vars', None)
+ self._clean_results(result._result, result._task.action)
+
+ if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
+ self._print_task_banner(result._task)
+
+ self._handle_exception(result._result, use_stderr=True)
+ self._handle_warnings(result._result)
+
+ if result._task.loop and 'results' in result._result:
+ self._process_items(result)
+
+ else:
+ if delegated_vars:
+ self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'],
+ self._dump_results(result._result)), color=C.COLOR_ERROR,
+ stderr=True)
+ else:
+ self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)),
+ color=C.COLOR_ERROR, stderr=True)
+
+ if ignore_errors:
+ self._display.display("...ignoring", color=C.COLOR_SKIP)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/sumologic.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/sumologic.py
new file mode 100644
index 00000000..bfb8d586
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/sumologic.py
@@ -0,0 +1,201 @@
+# -*- coding: utf-8 -*-
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+callback: sumologic
+type: aggregate
+short_description: Sends task result events to Sumologic
+author: "Ryan Currah (@ryancurrah)"
+description:
+ - This callback plugin will send task results as JSON formatted events to a Sumologic HTTP collector source
+requirements:
+ - Whitelisting this callback plugin
+ - 'Create a HTTP collector source in Sumologic and specify a custom timestamp format of C(yyyy-MM-dd HH:mm:ss ZZZZ) and a custom timestamp locator
+ of C("timestamp": "(.*)")'
+options:
+ url:
+ description: URL to the Sumologic HTTP collector source
+ env:
+ - name: SUMOLOGIC_URL
+ ini:
+ - section: callback_sumologic
+ key: url
+'''
+
+EXAMPLES = '''
+examples: >
+ To enable, add this to your ansible.cfg file in the defaults block
+ [defaults]
+ callback_whitelist = community.general.sumologic
+
+ Set the environment variable
+ export SUMOLOGIC_URL=https://endpoint1.collection.us2.sumologic.com/receiver/v1/http/R8moSv1d8EW9LAUFZJ6dbxCFxwLH6kfCdcBfddlfxCbLuL-BN5twcTpMk__pYy_cDmp==
+
+ Set the ansible.cfg variable in the callback_sumologic block
+ [callback_sumologic]
+ url = https://endpoint1.collection.us2.sumologic.com/receiver/v1/http/R8moSv1d8EW9LAUFZJ6dbxCFxwLH6kfCdcBfddlfxCbLuL-BN5twcTpMk__pYy_cDmp==
+'''
+
+import json
+import uuid
+import socket
+import getpass
+
+from datetime import datetime
+from os.path import basename
+
+from ansible.module_utils.urls import open_url
+from ansible.parsing.ajson import AnsibleJSONEncoder
+from ansible.plugins.callback import CallbackBase
+
+
+class SumologicHTTPCollectorSource(object):
+ def __init__(self):
+ self.ansible_check_mode = False
+ self.ansible_playbook = ""
+ self.ansible_version = ""
+ self.session = str(uuid.uuid4())
+ self.host = socket.gethostname()
+ self.ip_address = socket.gethostbyname(socket.gethostname())
+ self.user = getpass.getuser()
+
+ def send_event(self, url, state, result, runtime):
+ if result._task_fields['args'].get('_ansible_check_mode') is True:
+ self.ansible_check_mode = True
+
+ if result._task_fields['args'].get('_ansible_version'):
+ self.ansible_version = \
+ result._task_fields['args'].get('_ansible_version')
+
+ if result._task._role:
+ ansible_role = str(result._task._role)
+ else:
+ ansible_role = None
+
+ if 'args' in result._task_fields:
+ del result._task_fields['args']
+
+ data = {}
+ data['uuid'] = result._task._uuid
+ data['session'] = self.session
+ data['status'] = state
+ data['timestamp'] = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S '
+ '+0000')
+ data['host'] = self.host
+ data['ip_address'] = self.ip_address
+ data['user'] = self.user
+ data['runtime'] = runtime
+ data['ansible_version'] = self.ansible_version
+ data['ansible_check_mode'] = self.ansible_check_mode
+ data['ansible_host'] = result._host.name
+ data['ansible_playbook'] = self.ansible_playbook
+ data['ansible_role'] = ansible_role
+ data['ansible_task'] = result._task_fields
+ data['ansible_result'] = result._result
+
+ open_url(
+ url,
+ data=json.dumps(data, cls=AnsibleJSONEncoder, sort_keys=True),
+ headers={
+ 'Content-type': 'application/json',
+ 'X-Sumo-Host': data['ansible_host']
+ },
+ method='POST'
+ )
+
+
+class CallbackModule(CallbackBase):
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'aggregate'
+ CALLBACK_NAME = 'community.general.sumologic'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self, display=None):
+ super(CallbackModule, self).__init__(display=display)
+ self.start_datetimes = {} # Collect task start times
+ self.url = None
+ self.sumologic = SumologicHTTPCollectorSource()
+
+ def _runtime(self, result):
+ return (
+ datetime.utcnow() -
+ self.start_datetimes[result._task._uuid]
+ ).total_seconds()
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+ super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+
+ self.url = self.get_option('url')
+
+ if self.url is None:
+ self.disabled = True
+ self._display.warning('Sumologic HTTP collector source URL was '
+ 'not provided. The Sumologic HTTP collector '
+ 'source URL can be provided using the '
+ '`SUMOLOGIC_URL` environment variable or '
+ 'in the ansible.cfg file.')
+
+ def v2_playbook_on_start(self, playbook):
+ self.sumologic.ansible_playbook = basename(playbook._file_name)
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ self.start_datetimes[task._uuid] = datetime.utcnow()
+
+ def v2_playbook_on_handler_task_start(self, task):
+ self.start_datetimes[task._uuid] = datetime.utcnow()
+
+ def v2_runner_on_ok(self, result, **kwargs):
+ self.sumologic.send_event(
+ self.url,
+ 'OK',
+ result,
+ self._runtime(result)
+ )
+
+ def v2_runner_on_skipped(self, result, **kwargs):
+ self.sumologic.send_event(
+ self.url,
+ 'SKIPPED',
+ result,
+ self._runtime(result)
+ )
+
+ def v2_runner_on_failed(self, result, **kwargs):
+ self.sumologic.send_event(
+ self.url,
+ 'FAILED',
+ result,
+ self._runtime(result)
+ )
+
+ def runner_on_async_failed(self, result, **kwargs):
+ self.sumologic.send_event(
+ self.url,
+ 'FAILED',
+ result,
+ self._runtime(result)
+ )
+
+ def v2_runner_on_unreachable(self, result, **kwargs):
+ self.sumologic.send_event(
+ self.url,
+ 'UNREACHABLE',
+ result,
+ self._runtime(result)
+ )
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/syslog_json.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/syslog_json.py
new file mode 100644
index 00000000..dad34c6d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/syslog_json.py
@@ -0,0 +1,107 @@
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ callback: syslog_json
+ type: notification
+ requirements:
+ - whitelist in configuration
+ short_description: sends JSON events to syslog
+ description:
+ - This plugin logs ansible-playbook and ansible runs to a syslog server in JSON format
+ - Before 2.9 only environment variables were available for configuration
+ options:
+ server:
+ description: syslog server that will receive the event
+ env:
+ - name: SYSLOG_SERVER
+ default: localhost
+ ini:
+ - section: callback_syslog_json
+ key: syslog_server
+ port:
+ description: port on which the syslog server is listening
+ env:
+ - name: SYSLOG_PORT
+ default: 514
+ ini:
+ - section: callback_syslog_json
+ key: syslog_port
+ facility:
+ description: syslog facility to log as
+ env:
+ - name: SYSLOG_FACILITY
+ default: user
+ ini:
+ - section: callback_syslog_json
+ key: syslog_facility
+'''
+
+import os
+import json
+
+import logging
+import logging.handlers
+
+import socket
+
+from ansible.plugins.callback import CallbackBase
+
+
+class CallbackModule(CallbackBase):
+ """
+ logs ansible-playbook and ansible runs to a syslog server in json format
+ """
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'aggregate'
+ CALLBACK_NAME = 'community.general.syslog_json'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self):
+
+ super(CallbackModule, self).__init__()
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+
+ super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+
+ syslog_host = self.get_option("server")
+ syslog_port = int(self.get_option("port"))
+ syslog_facility = self.get_option("facility")
+
+ self.logger = logging.getLogger('ansible logger')
+ self.logger.setLevel(logging.DEBUG)
+
+ self.handler = logging.handlers.SysLogHandler(
+ address=(syslog_host, syslog_port),
+ facility=syslog_facility
+ )
+ self.logger.addHandler(self.handler)
+ self.hostname = socket.gethostname()
+
+ def runner_on_failed(self, host, res, ignore_errors=False):
+ self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s', self.hostname, host, self._dump_results(res))
+
+ def runner_on_ok(self, host, res):
+ self.logger.info('%s ansible-command: task execution OK; host: %s; message: %s', self.hostname, host, self._dump_results(res))
+
+ def runner_on_skipped(self, host, item=None):
+ self.logger.info('%s ansible-command: task execution SKIPPED; host: %s; message: %s', self.hostname, host, 'skipped')
+
+ def runner_on_unreachable(self, host, res):
+ self.logger.error('%s ansible-command: task execution UNREACHABLE; host: %s; message: %s', self.hostname, host, self._dump_results(res))
+
+ def runner_on_async_failed(self, host, res, jid):
+ self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s', self.hostname, host, self._dump_results(res))
+
+ def playbook_on_import_for_host(self, host, imported_file):
+ self.logger.info('%s ansible-command: playbook IMPORTED; host: %s; message: imported file %s', self.hostname, host, imported_file)
+
+ def playbook_on_not_import_for_host(self, host, missing_file):
+ self.logger.info('%s ansible-command: playbook NOT IMPORTED; host: %s; message: missing file %s', self.hostname, host, missing_file)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/unixy.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/unixy.py
new file mode 100644
index 00000000..fa3e6d25
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/unixy.py
@@ -0,0 +1,246 @@
+# Copyright: (c) 2017, Allyson Bowles <@akatch>
+# Copyright: (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ callback: unixy
+ type: stdout
+ author: Allyson Bowles (@akatch)
+ short_description: condensed Ansible output
+ description:
+ - Consolidated Ansible output in the style of LINUX/UNIX startup logs.
+ extends_documentation_fragment:
+ - default_callback
+ requirements:
+ - set as stdout in configuration
+'''
+
+from os.path import basename
+from ansible import constants as C
+from ansible import context
+from ansible.module_utils._text import to_text
+from ansible.utils.color import colorize, hostcolor
+from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
+
+
+class CallbackModule(CallbackModule_default):
+
+ '''
+ Design goals:
+ - Print consolidated output that looks like a *NIX startup log
+ - Defaults should avoid displaying unnecessary information wherever possible
+
+ TODOs:
+ - Only display task names if the task runs on at least one host
+ - Add option to display all hostnames on a single line in the appropriate result color (failures may have a separate line)
+ - Consolidate stats display
+ - Display whether run is in --check mode
+ - Don't show play name if no hosts found
+ '''
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'community.general.unixy'
+
+ def _run_is_verbose(self, result):
+ return ((self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and '_ansible_verbose_override' not in result._result)
+
+ def _get_task_display_name(self, task):
+ self.task_display_name = None
+ display_name = task.get_name().strip().split(" : ")
+
+ task_display_name = display_name[-1]
+ if task_display_name.startswith("include"):
+ return
+ else:
+ self.task_display_name = task_display_name
+
+ def _preprocess_result(self, result):
+ self.delegated_vars = result._result.get('_ansible_delegated_vars', None)
+ self._handle_exception(result._result, use_stderr=self.display_failed_stderr)
+ self._handle_warnings(result._result)
+
+ def _process_result_output(self, result, msg):
+ task_host = result._host.get_name()
+ task_result = "%s %s" % (task_host, msg)
+
+ if self._run_is_verbose(result):
+ task_result = "%s %s: %s" % (task_host, msg, self._dump_results(result._result, indent=4))
+ return task_result
+
+ if self.delegated_vars:
+ task_delegate_host = self.delegated_vars['ansible_host']
+ task_result = "%s -> %s %s" % (task_host, task_delegate_host, msg)
+
+ if result._result.get('msg') and result._result.get('msg') != "All items completed":
+ task_result += " | msg: " + to_text(result._result.get('msg'))
+
+ if result._result.get('stdout'):
+ task_result += " | stdout: " + result._result.get('stdout')
+
+ if result._result.get('stderr'):
+ task_result += " | stderr: " + result._result.get('stderr')
+
+ return task_result
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ self._get_task_display_name(task)
+ if self.task_display_name is not None:
+ self._display.display("%s..." % self.task_display_name)
+
+ def v2_playbook_on_handler_task_start(self, task):
+ self._get_task_display_name(task)
+ if self.task_display_name is not None:
+ self._display.display("%s (via handler)... " % self.task_display_name)
+
+ def v2_playbook_on_play_start(self, play):
+ name = play.get_name().strip()
+ if name and play.hosts:
+ msg = u"\n- %s on hosts: %s -" % (name, ",".join(play.hosts))
+ else:
+ msg = u"---"
+
+ self._display.display(msg)
+
+ def v2_runner_on_skipped(self, result, ignore_errors=False):
+ if self.display_skipped_hosts:
+ self._preprocess_result(result)
+ display_color = C.COLOR_SKIP
+ msg = "skipped"
+
+ task_result = self._process_result_output(result, msg)
+ self._display.display(" " + task_result, display_color)
+ else:
+ return
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+ self._preprocess_result(result)
+ display_color = C.COLOR_ERROR
+ msg = "failed"
+ item_value = self._get_item_label(result._result)
+ if item_value:
+ msg += " | item: %s" % (item_value,)
+
+ task_result = self._process_result_output(result, msg)
+ self._display.display(" " + task_result, display_color, stderr=self.display_failed_stderr)
+
+ def v2_runner_on_ok(self, result, msg="ok", display_color=C.COLOR_OK):
+ self._preprocess_result(result)
+
+ result_was_changed = ('changed' in result._result and result._result['changed'])
+ if result_was_changed:
+ msg = "done"
+ item_value = self._get_item_label(result._result)
+ if item_value:
+ msg += " | item: %s" % (item_value,)
+ display_color = C.COLOR_CHANGED
+ task_result = self._process_result_output(result, msg)
+ self._display.display(" " + task_result, display_color)
+ elif self.display_ok_hosts:
+ task_result = self._process_result_output(result, msg)
+ self._display.display(" " + task_result, display_color)
+
+ def v2_runner_item_on_skipped(self, result):
+ self.v2_runner_on_skipped(result)
+
+ def v2_runner_item_on_failed(self, result):
+ self.v2_runner_on_failed(result)
+
+ def v2_runner_item_on_ok(self, result):
+ self.v2_runner_on_ok(result)
+
+ def v2_runner_on_unreachable(self, result):
+ self._preprocess_result(result)
+
+ msg = "unreachable"
+ display_color = C.COLOR_UNREACHABLE
+ task_result = self._process_result_output(result, msg)
+
+ self._display.display(" " + task_result, display_color, stderr=self.display_failed_stderr)
+
+ def v2_on_file_diff(self, result):
+ if result._task.loop and 'results' in result._result:
+ for res in result._result['results']:
+ if 'diff' in res and res['diff'] and res.get('changed', False):
+ diff = self._get_diff(res['diff'])
+ if diff:
+ self._display.display(diff)
+ elif 'diff' in result._result and result._result['diff'] and result._result.get('changed', False):
+ diff = self._get_diff(result._result['diff'])
+ if diff:
+ self._display.display(diff)
+
+ def v2_playbook_on_stats(self, stats):
+ self._display.display("\n- Play recap -", screen_only=True)
+
+ hosts = sorted(stats.processed.keys())
+ for h in hosts:
+ # TODO how else can we display these?
+ t = stats.summarize(h)
+
+ self._display.display(u" %s : %s %s %s %s %s %s" % (
+ hostcolor(h, t),
+ colorize(u'ok', t['ok'], C.COLOR_OK),
+ colorize(u'changed', t['changed'], C.COLOR_CHANGED),
+ colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE),
+ colorize(u'failed', t['failures'], C.COLOR_ERROR),
+ colorize(u'rescued', t['rescued'], C.COLOR_OK),
+ colorize(u'ignored', t['ignored'], C.COLOR_WARN)),
+ screen_only=True
+ )
+
+ self._display.display(u" %s : %s %s %s %s %s %s" % (
+ hostcolor(h, t, False),
+ colorize(u'ok', t['ok'], None),
+ colorize(u'changed', t['changed'], None),
+ colorize(u'unreachable', t['unreachable'], None),
+ colorize(u'failed', t['failures'], None),
+ colorize(u'rescued', t['rescued'], None),
+ colorize(u'ignored', t['ignored'], None)),
+ log_only=True
+ )
+ if stats.custom and self.show_custom_stats:
+ self._display.banner("CUSTOM STATS: ")
+ # per host
+ # TODO: come up with 'pretty format'
+ for k in sorted(stats.custom.keys()):
+ if k == '_run':
+ continue
+ self._display.display('\t%s: %s' % (k, self._dump_results(stats.custom[k], indent=1).replace('\n', '')))
+
+ # print per run custom stats
+ if '_run' in stats.custom:
+ self._display.display("", screen_only=True)
+ self._display.display('\tRUN: %s' % self._dump_results(stats.custom['_run'], indent=1).replace('\n', ''))
+ self._display.display("", screen_only=True)
+
+ def v2_playbook_on_no_hosts_matched(self):
+ self._display.display(" No hosts found!", color=C.COLOR_DEBUG)
+
+ def v2_playbook_on_no_hosts_remaining(self):
+ self._display.display(" Ran out of hosts!", color=C.COLOR_ERROR)
+
+ def v2_playbook_on_start(self, playbook):
+ # TODO display whether this run is happening in check mode
+ self._display.display("Executing playbook %s" % basename(playbook._file_name))
+
+ # show CLI arguments
+ if self._display.verbosity > 3:
+ if context.CLIARGS.get('args'):
+ self._display.display('Positional arguments: %s' % ' '.join(context.CLIARGS['args']),
+ color=C.COLOR_VERBOSE, screen_only=True)
+
+ for argument in (a for a in context.CLIARGS if a != 'args'):
+ val = context.CLIARGS[argument]
+ if val:
+ self._display.vvvv('%s: %s' % (argument, val))
+
+ def v2_runner_retry(self, result):
+ msg = " Retrying... (%d of %d)" % (result._result['attempts'], result._result['retries'])
+ if self._run_is_verbose(result):
+ msg += "Result was: %s" % self._dump_results(result._result)
+ self._display.display(msg, color=C.COLOR_DEBUG)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/callback/yaml.py b/collections-debian-merged/ansible_collections/community/general/plugins/callback/yaml.py
new file mode 100644
index 00000000..2fbb2f48
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/callback/yaml.py
@@ -0,0 +1,130 @@
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ callback: yaml
+ type: stdout
+ short_description: yaml-ized Ansible screen output
+ description:
+ - Ansible output that can be quite a bit easier to read than the
+ default JSON formatting.
+ extends_documentation_fragment:
+ - default_callback
+ requirements:
+ - set as stdout in configuration
+'''
+
+import yaml
+import json
+import re
+import string
+import sys
+
+from ansible.module_utils._text import to_bytes, to_text
+from ansible.module_utils.six import string_types
+from ansible.parsing.yaml.dumper import AnsibleDumper
+from ansible.plugins.callback import CallbackBase, strip_internal_keys, module_response_deepcopy
+from ansible.plugins.callback.default import CallbackModule as Default
+
+
+# from http://stackoverflow.com/a/15423007/115478
+def should_use_block(value):
+ """Returns true if string should be in block format"""
+ for c in u"\u000a\u000d\u001c\u001d\u001e\u0085\u2028\u2029":
+ if c in value:
+ return True
+ return False
+
+
+def my_represent_scalar(self, tag, value, style=None):
+ """Uses block style for multi-line strings"""
+ if style is None:
+ if should_use_block(value):
+ style = '|'
+ # we care more about readable than accuracy, so...
+ # ...no trailing space
+ value = value.rstrip()
+ # ...and non-printable characters
+ value = ''.join(x for x in value if x in string.printable or ord(x) >= 0xA0)
+ # ...tabs prevent blocks from expanding
+ value = value.expandtabs()
+ # ...and odd bits of whitespace
+ value = re.sub(r'[\x0b\x0c\r]', '', value)
+ # ...as does trailing space
+ value = re.sub(r' +\n', '\n', value)
+ else:
+ style = self.default_style
+ node = yaml.representer.ScalarNode(tag, value, style=style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ return node
+
+
+class CallbackModule(Default):
+
+ """
+ Variation of the Default output which uses nicely readable YAML instead
+ of JSON for printing results.
+ """
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'community.general.yaml'
+
+ def __init__(self):
+ super(CallbackModule, self).__init__()
+ yaml.representer.BaseRepresenter.represent_scalar = my_represent_scalar
+
+ def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False):
+ if result.get('_ansible_no_log', False):
+ return json.dumps(dict(censored="The output has been hidden due to the fact that 'no_log: true' was specified for this result"))
+
+ # All result keys stating with _ansible_ are internal, so remove them from the result before we output anything.
+ abridged_result = strip_internal_keys(module_response_deepcopy(result))
+
+ # remove invocation unless specifically wanting it
+ if not keep_invocation and self._display.verbosity < 3 and 'invocation' in result:
+ del abridged_result['invocation']
+
+ # remove diff information from screen output
+ if self._display.verbosity < 3 and 'diff' in result:
+ del abridged_result['diff']
+
+ # remove exception from screen output
+ if 'exception' in abridged_result:
+ del abridged_result['exception']
+
+ dumped = ''
+
+ # put changed and skipped into a header line
+ if 'changed' in abridged_result:
+ dumped += 'changed=' + str(abridged_result['changed']).lower() + ' '
+ del abridged_result['changed']
+
+ if 'skipped' in abridged_result:
+ dumped += 'skipped=' + str(abridged_result['skipped']).lower() + ' '
+ del abridged_result['skipped']
+
+ # if we already have stdout, we don't need stdout_lines
+ if 'stdout' in abridged_result and 'stdout_lines' in abridged_result:
+ abridged_result['stdout_lines'] = '<omitted>'
+
+ # if we already have stderr, we don't need stderr_lines
+ if 'stderr' in abridged_result and 'stderr_lines' in abridged_result:
+ abridged_result['stderr_lines'] = '<omitted>'
+
+ if abridged_result:
+ dumped += '\n'
+ dumped += to_text(yaml.dump(abridged_result, allow_unicode=True, width=1000, Dumper=AnsibleDumper, default_flow_style=False))
+
+ # indent by a couple of spaces
+ dumped = '\n '.join(dumped.split('\n')).rstrip()
+ return dumped
+
+ def _serialize_diff(self, diff):
+ return to_text(yaml.dump(diff, allow_unicode=True, width=1000, Dumper=AnsibleDumper, default_flow_style=False))
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/connection/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/connection/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/connection/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/connection/chroot.py b/collections-debian-merged/ansible_collections/community/general/plugins/connection/chroot.py
new file mode 100644
index 00000000..7c9aed10
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/connection/chroot.py
@@ -0,0 +1,206 @@
+# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# (c) 2013, Maykel Moya <mmoya@speedyrails.com>
+# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Maykel Moya (!UNKNOWN) <mmoya@speedyrails.com>
+ connection: chroot
+ short_description: Interact with local chroot
+ description:
+ - Run commands or put/fetch files to an existing chroot on the Ansible controller.
+ options:
+ remote_addr:
+ description:
+ - The path of the chroot you want to access.
+ default: inventory_hostname
+ vars:
+ - name: ansible_host
+ executable:
+ description:
+ - User specified executable shell
+ ini:
+ - section: defaults
+ key: executable
+ env:
+ - name: ANSIBLE_EXECUTABLE
+ vars:
+ - name: ansible_executable
+ default: /bin/sh
+ chroot_exe:
+ description:
+ - User specified chroot binary
+ ini:
+ - section: chroot_connection
+ key: exe
+ env:
+ - name: ANSIBLE_CHROOT_EXE
+ vars:
+ - name: ansible_chroot_exe
+ default: chroot
+'''
+
+import os
+import os.path
+import subprocess
+import traceback
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.basic import is_executable
+from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils.six.moves import shlex_quote
+from ansible.module_utils._text import to_bytes, to_native
+from ansible.plugins.connection import ConnectionBase, BUFSIZE
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class Connection(ConnectionBase):
+ ''' Local chroot based connections '''
+
+ transport = 'community.general.chroot'
+ has_pipelining = True
+ # su currently has an undiagnosed issue with calculating the file
+ # checksums (so copy, for instance, doesn't work right)
+ # Have to look into that before re-enabling this
+ has_tty = False
+
+ default_user = 'root'
+
+ def __init__(self, play_context, new_stdin, *args, **kwargs):
+ super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
+
+ self.chroot = self._play_context.remote_addr
+
+ if os.geteuid() != 0:
+ raise AnsibleError("chroot connection requires running as root")
+
+ # we're running as root on the local system so do some
+ # trivial checks for ensuring 'host' is actually a chroot'able dir
+ if not os.path.isdir(self.chroot):
+ raise AnsibleError("%s is not a directory" % self.chroot)
+
+ chrootsh = os.path.join(self.chroot, 'bin/sh')
+ # Want to check for a usable bourne shell inside the chroot.
+ # is_executable() == True is sufficient. For symlinks it
+ # gets really complicated really fast. So we punt on finding that
+ # out. As long as it's a symlink we assume that it will work
+ if not (is_executable(chrootsh) or (os.path.lexists(chrootsh) and os.path.islink(chrootsh))):
+ raise AnsibleError("%s does not look like a chrootable dir (/bin/sh missing)" % self.chroot)
+
+ def _connect(self):
+ ''' connect to the chroot '''
+ if os.path.isabs(self.get_option('chroot_exe')):
+ self.chroot_cmd = self.get_option('chroot_exe')
+ else:
+ try:
+ self.chroot_cmd = get_bin_path(self.get_option('chroot_exe'))
+ except ValueError as e:
+ raise AnsibleError(to_native(e))
+
+ super(Connection, self)._connect()
+ if not self._connected:
+ display.vvv("THIS IS A LOCAL CHROOT DIR", host=self.chroot)
+ self._connected = True
+
+ def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE):
+ ''' run a command on the chroot. This is only needed for implementing
+ put_file() get_file() so that we don't have to read the whole file
+ into memory.
+
+ compared to exec_command() it looses some niceties like being able to
+ return the process's exit code immediately.
+ '''
+ executable = self.get_option('executable')
+ local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd]
+
+ display.vvv("EXEC %s" % (local_cmd), host=self.chroot)
+ local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
+ p = subprocess.Popen(local_cmd, shell=False, stdin=stdin,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ return p
+
+ def exec_command(self, cmd, in_data=None, sudoable=False):
+ ''' run a command on the chroot '''
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ p = self._buffered_exec_command(cmd)
+
+ stdout, stderr = p.communicate(in_data)
+ return (p.returncode, stdout, stderr)
+
+ def _prefix_login_path(self, remote_path):
+ ''' Make sure that we put files into a standard path
+
+ If a path is relative, then we need to choose where to put it.
+ ssh chooses $HOME but we aren't guaranteed that a home dir will
+ exist in any given chroot. So for now we're choosing "/" instead.
+ This also happens to be the former default.
+
+ Can revisit using $HOME instead if it's a problem
+ '''
+ if not remote_path.startswith(os.path.sep):
+ remote_path = os.path.join(os.path.sep, remote_path)
+ return os.path.normpath(remote_path)
+
+ def put_file(self, in_path, out_path):
+ ''' transfer a file from local to chroot '''
+ super(Connection, self).put_file(in_path, out_path)
+ display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.chroot)
+
+ out_path = shlex_quote(self._prefix_login_path(out_path))
+ try:
+ with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file:
+ if not os.fstat(in_file.fileno()).st_size:
+ count = ' count=0'
+ else:
+ count = ''
+ try:
+ p = self._buffered_exec_command('dd of=%s bs=%s%s' % (out_path, BUFSIZE, count), stdin=in_file)
+ except OSError:
+ raise AnsibleError("chroot connection requires dd command in the chroot")
+ try:
+ stdout, stderr = p.communicate()
+ except Exception:
+ traceback.print_exc()
+ raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
+ if p.returncode != 0:
+ raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
+ except IOError:
+ raise AnsibleError("file or module does not exist at: %s" % in_path)
+
+ def fetch_file(self, in_path, out_path):
+ ''' fetch a file from chroot to local '''
+ super(Connection, self).fetch_file(in_path, out_path)
+ display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.chroot)
+
+ in_path = shlex_quote(self._prefix_login_path(in_path))
+ try:
+ p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE))
+ except OSError:
+ raise AnsibleError("chroot connection requires dd command in the chroot")
+
+ with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb+') as out_file:
+ try:
+ chunk = p.stdout.read(BUFSIZE)
+ while chunk:
+ out_file.write(chunk)
+ chunk = p.stdout.read(BUFSIZE)
+ except Exception:
+ traceback.print_exc()
+ raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
+ stdout, stderr = p.communicate()
+ if p.returncode != 0:
+ raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
+
+ def close(self):
+ ''' terminate the connection; nothing to do here '''
+ super(Connection, self).close()
+ self._connected = False
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/connection/docker.py b/collections-debian-merged/ansible_collections/community/general/plugins/connection/docker.py
new file mode 100644
index 00000000..732620b0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/connection/docker.py
@@ -0,0 +1,366 @@
+# Based on the chroot connection plugin by Maykel Moya
+#
+# (c) 2014, Lorin Hochstein
+# (c) 2015, Leendert Brouwer (https://github.com/objectified)
+# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author:
+ - Lorin Hochestein (!UNKNOWN)
+ - Leendert Brouwer (!UNKNOWN)
+ connection: docker
+ short_description: Run tasks in docker containers
+ description:
+ - Run commands or put/fetch files to an existing docker container.
+ options:
+ remote_user:
+ description:
+ - The user to execute as inside the container
+ vars:
+ - name: ansible_user
+ - name: ansible_docker_user
+ docker_extra_args:
+ description:
+ - Extra arguments to pass to the docker command line
+ default: ''
+ remote_addr:
+ description:
+ - The name of the container you want to access.
+ default: inventory_hostname
+ vars:
+ - name: ansible_host
+ - name: ansible_docker_host
+'''
+
+import distutils.spawn
+import fcntl
+import os
+import os.path
+import subprocess
+import re
+
+from distutils.version import LooseVersion
+
+import ansible.constants as C
+from ansible.compat import selectors
+from ansible.errors import AnsibleError, AnsibleFileNotFound
+from ansible.module_utils.six.moves import shlex_quote
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.plugins.connection import ConnectionBase, BUFSIZE
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class Connection(ConnectionBase):
+ ''' Local docker based connections '''
+
+ transport = 'community.general.docker'
+ has_pipelining = True
+
+ def __init__(self, play_context, new_stdin, *args, **kwargs):
+ super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
+
+ # Note: docker supports running as non-root in some configurations.
+ # (For instance, setting the UNIX socket file to be readable and
+ # writable by a specific UNIX group and then putting users into that
+ # group). Therefore we don't check that the user is root when using
+ # this connection. But if the user is getting a permission denied
+ # error it probably means that docker on their system is only
+ # configured to be connected to by root and they are not running as
+ # root.
+
+ # Windows uses Powershell modules
+ if getattr(self._shell, "_IS_WINDOWS", False):
+ self.module_implementation_preferences = ('.ps1', '.exe', '')
+
+ if 'docker_command' in kwargs:
+ self.docker_cmd = kwargs['docker_command']
+ else:
+ self.docker_cmd = distutils.spawn.find_executable('docker')
+ if not self.docker_cmd:
+ raise AnsibleError("docker command not found in PATH")
+
+ docker_version = self._get_docker_version()
+ if docker_version == u'dev':
+ display.warning(u'Docker version number is "dev". Will assume latest version.')
+ if docker_version != u'dev' and LooseVersion(docker_version) < LooseVersion(u'1.3'):
+ raise AnsibleError('docker connection type requires docker 1.3 or higher')
+
+ # The remote user we will request from docker (if supported)
+ self.remote_user = None
+ # The actual user which will execute commands in docker (if known)
+ self.actual_user = None
+
+ if self._play_context.remote_user is not None:
+ if docker_version == u'dev' or LooseVersion(docker_version) >= LooseVersion(u'1.7'):
+ # Support for specifying the exec user was added in docker 1.7
+ self.remote_user = self._play_context.remote_user
+ self.actual_user = self.remote_user
+ else:
+ self.actual_user = self._get_docker_remote_user()
+
+ if self.actual_user != self._play_context.remote_user:
+ display.warning(u'docker {0} does not support remote_user, using container default: {1}'
+ .format(docker_version, self.actual_user or u'?'))
+ elif self._display.verbosity > 2:
+ # Since we're not setting the actual_user, look it up so we have it for logging later
+ # Only do this if display verbosity is high enough that we'll need the value
+ # This saves overhead from calling into docker when we don't need to
+ self.actual_user = self._get_docker_remote_user()
+
+ @staticmethod
+ def _sanitize_version(version):
+ version = re.sub(u'[^0-9a-zA-Z.]', u'', version)
+ version = re.sub(u'^v', u'', version)
+ return version
+
+ def _old_docker_version(self):
+ cmd_args = []
+ if self._play_context.docker_extra_args:
+ cmd_args += self._play_context.docker_extra_args.split(' ')
+
+ old_version_subcommand = ['version']
+
+ old_docker_cmd = [self.docker_cmd] + cmd_args + old_version_subcommand
+ p = subprocess.Popen(old_docker_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ cmd_output, err = p.communicate()
+
+ return old_docker_cmd, to_native(cmd_output), err, p.returncode
+
+ def _new_docker_version(self):
+ # no result yet, must be newer Docker version
+ cmd_args = []
+ if self._play_context.docker_extra_args:
+ cmd_args += self._play_context.docker_extra_args.split(' ')
+
+ new_version_subcommand = ['version', '--format', "'{{.Server.Version}}'"]
+
+ new_docker_cmd = [self.docker_cmd] + cmd_args + new_version_subcommand
+ p = subprocess.Popen(new_docker_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ cmd_output, err = p.communicate()
+ return new_docker_cmd, to_native(cmd_output), err, p.returncode
+
+ def _get_docker_version(self):
+
+ cmd, cmd_output, err, returncode = self._old_docker_version()
+ if returncode == 0:
+ for line in to_text(cmd_output, errors='surrogate_or_strict').split(u'\n'):
+ if line.startswith(u'Server version:'): # old docker versions
+ return self._sanitize_version(line.split()[2])
+
+ cmd, cmd_output, err, returncode = self._new_docker_version()
+ if returncode:
+ raise AnsibleError('Docker version check (%s) failed: %s' % (to_native(cmd), to_native(err)))
+
+ return self._sanitize_version(to_text(cmd_output, errors='surrogate_or_strict'))
+
+ def _get_docker_remote_user(self):
+ """ Get the default user configured in the docker container """
+ p = subprocess.Popen([self.docker_cmd, 'inspect', '--format', '{{.Config.User}}', self._play_context.remote_addr],
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ out, err = p.communicate()
+ out = to_text(out, errors='surrogate_or_strict')
+
+ if p.returncode != 0:
+ display.warning(u'unable to retrieve default user from docker container: %s %s' % (out, to_text(err)))
+ return None
+
+ # The default exec user is root, unless it was changed in the Dockerfile with USER
+ return out.strip() or u'root'
+
+ def _build_exec_cmd(self, cmd):
+ """ Build the local docker exec command to run cmd on remote_host
+
+ If remote_user is available and is supported by the docker
+ version we are using, it will be provided to docker exec.
+ """
+
+ local_cmd = [self.docker_cmd]
+
+ if self._play_context.docker_extra_args:
+ local_cmd += self._play_context.docker_extra_args.split(' ')
+
+ local_cmd += [b'exec']
+
+ if self.remote_user is not None:
+ local_cmd += [b'-u', self.remote_user]
+
+ # -i is needed to keep stdin open which allows pipelining to work
+ local_cmd += [b'-i', self._play_context.remote_addr] + cmd
+
+ return local_cmd
+
+ def _connect(self, port=None):
+ """ Connect to the container. Nothing to do """
+ super(Connection, self)._connect()
+ if not self._connected:
+ display.vvv(u"ESTABLISH DOCKER CONNECTION FOR USER: {0}".format(
+ self.actual_user or u'?'), host=self._play_context.remote_addr
+ )
+ self._connected = True
+
+ def exec_command(self, cmd, in_data=None, sudoable=False):
+ """ Run a command on the docker host """
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ local_cmd = self._build_exec_cmd([self._play_context.executable, '-c', cmd])
+
+ display.vvv(u"EXEC {0}".format(to_text(local_cmd)), host=self._play_context.remote_addr)
+ display.debug("opening command with Popen()")
+
+ local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
+
+ p = subprocess.Popen(
+ local_cmd,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ display.debug("done running command with Popen()")
+
+ if self.become and self.become.expect_prompt() and sudoable:
+ fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
+ fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
+ selector = selectors.DefaultSelector()
+ selector.register(p.stdout, selectors.EVENT_READ)
+ selector.register(p.stderr, selectors.EVENT_READ)
+
+ become_output = b''
+ try:
+ while not self.become.check_success(become_output) and not self.become.check_password_prompt(become_output):
+ events = selector.select(self._play_context.timeout)
+ if not events:
+ stdout, stderr = p.communicate()
+ raise AnsibleError('timeout waiting for privilege escalation password prompt:\n' + to_native(become_output))
+
+ for key, event in events:
+ if key.fileobj == p.stdout:
+ chunk = p.stdout.read()
+ elif key.fileobj == p.stderr:
+ chunk = p.stderr.read()
+
+ if not chunk:
+ stdout, stderr = p.communicate()
+ raise AnsibleError('privilege output closed while waiting for password prompt:\n' + to_native(become_output))
+ become_output += chunk
+ finally:
+ selector.close()
+
+ if not self.become.check_success(become_output):
+ become_pass = self.become.get_option('become_pass', playcontext=self._play_context)
+ p.stdin.write(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
+ fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
+ fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
+
+ display.debug("getting output with communicate()")
+ stdout, stderr = p.communicate(in_data)
+ display.debug("done communicating")
+
+ display.debug("done with docker.exec_command()")
+ return (p.returncode, stdout, stderr)
+
+ def _prefix_login_path(self, remote_path):
+ ''' Make sure that we put files into a standard path
+
+ If a path is relative, then we need to choose where to put it.
+ ssh chooses $HOME but we aren't guaranteed that a home dir will
+ exist in any given chroot. So for now we're choosing "/" instead.
+ This also happens to be the former default.
+
+ Can revisit using $HOME instead if it's a problem
+ '''
+ if getattr(self._shell, "_IS_WINDOWS", False):
+ import ntpath
+ return ntpath.normpath(remote_path)
+ else:
+ if not remote_path.startswith(os.path.sep):
+ remote_path = os.path.join(os.path.sep, remote_path)
+ return os.path.normpath(remote_path)
+
+ def put_file(self, in_path, out_path):
+ """ Transfer a file from local to docker container """
+ super(Connection, self).put_file(in_path, out_path)
+ display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
+
+ out_path = self._prefix_login_path(out_path)
+ if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
+ raise AnsibleFileNotFound(
+ "file or module does not exist: %s" % to_native(in_path))
+
+ out_path = shlex_quote(out_path)
+ # Older docker doesn't have native support for copying files into
+ # running containers, so we use docker exec to implement this
+ # Although docker version 1.8 and later provide support, the
+ # owner and group of the files are always set to root
+ with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file:
+ if not os.fstat(in_file.fileno()).st_size:
+ count = ' count=0'
+ else:
+ count = ''
+ args = self._build_exec_cmd([self._play_context.executable, "-c", "dd of=%s bs=%s%s" % (out_path, BUFSIZE, count)])
+ args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
+ try:
+ p = subprocess.Popen(args, stdin=in_file,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ except OSError:
+ raise AnsibleError("docker connection requires dd command in the container to put files")
+ stdout, stderr = p.communicate()
+
+ if p.returncode != 0:
+ raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" %
+ (to_native(in_path), to_native(out_path), to_native(stdout), to_native(stderr)))
+
+ def fetch_file(self, in_path, out_path):
+ """ Fetch a file from container to local. """
+ super(Connection, self).fetch_file(in_path, out_path)
+ display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
+
+ in_path = self._prefix_login_path(in_path)
+ # out_path is the final file path, but docker takes a directory, not a
+ # file path
+ out_dir = os.path.dirname(out_path)
+
+ args = [self.docker_cmd, "cp", "%s:%s" % (self._play_context.remote_addr, in_path), out_dir]
+ args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
+
+ p = subprocess.Popen(args, stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ p.communicate()
+
+ if getattr(self._shell, "_IS_WINDOWS", False):
+ import ntpath
+ actual_out_path = ntpath.join(out_dir, ntpath.basename(in_path))
+ else:
+ actual_out_path = os.path.join(out_dir, os.path.basename(in_path))
+
+ if p.returncode != 0:
+ # Older docker doesn't have native support for fetching files command `cp`
+ # If `cp` fails, try to use `dd` instead
+ args = self._build_exec_cmd([self._play_context.executable, "-c", "dd if=%s bs=%s" % (in_path, BUFSIZE)])
+ args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
+ with open(to_bytes(actual_out_path, errors='surrogate_or_strict'), 'wb') as out_file:
+ try:
+ p = subprocess.Popen(args, stdin=subprocess.PIPE,
+ stdout=out_file, stderr=subprocess.PIPE)
+ except OSError:
+ raise AnsibleError("docker connection requires dd command in the container to put files")
+ stdout, stderr = p.communicate()
+
+ if p.returncode != 0:
+ raise AnsibleError("failed to fetch file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
+
+ # Rename if needed
+ if actual_out_path != out_path:
+ os.rename(to_bytes(actual_out_path, errors='strict'), to_bytes(out_path, errors='strict'))
+
+ def close(self):
+ """ Terminate the connection. Nothing to do for Docker"""
+ super(Connection, self).close()
+ self._connected = False
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/connection/funcd.py b/collections-debian-merged/ansible_collections/community/general/plugins/connection/funcd.py
new file mode 100644
index 00000000..83f4a9e0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/connection/funcd.py
@@ -0,0 +1,102 @@
+# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# Based on chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
+# Copyright (c) 2013, Michael Scherer <misc@zarb.org>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Michael Scherer (@msherer) <misc@zarb.org>
+ connection: funcd
+ short_description: Use funcd to connect to target
+ description:
+ - This transport permits you to use Ansible over Func.
+ - For people who have already setup func and that wish to play with ansible,
+ this permit to move gradually to ansible without having to redo completely the setup of the network.
+ options:
+ remote_addr:
+ description:
+ - The path of the chroot you want to access.
+ default: inventory_hostname
+ vars:
+ - name: ansible_host
+ - name: ansible_func_host
+'''
+
+HAVE_FUNC = False
+try:
+ import func.overlord.client as fc
+ HAVE_FUNC = True
+except ImportError:
+ pass
+
+import os
+import tempfile
+import shutil
+
+from ansible.errors import AnsibleError
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class Connection(object):
+ ''' Func-based connections '''
+
+ has_pipelining = False
+
+ def __init__(self, runner, host, port, *args, **kwargs):
+ self.runner = runner
+ self.host = host
+ # port is unused, this go on func
+ self.port = port
+
+ def connect(self, port=None):
+ if not HAVE_FUNC:
+ raise AnsibleError("func is not installed")
+
+ self.client = fc.Client(self.host)
+ return self
+
+ def exec_command(self, cmd, become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
+ ''' run a command on the remote minion '''
+
+ if in_data:
+ raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
+
+ # totally ignores privlege escalation
+ display.vvv("EXEC %s" % (cmd), host=self.host)
+ p = self.client.command.run(cmd)[self.host]
+ return (p[0], p[1], p[2])
+
+ def _normalize_path(self, path, prefix):
+ if not path.startswith(os.path.sep):
+ path = os.path.join(os.path.sep, path)
+ normpath = os.path.normpath(path)
+ return os.path.join(prefix, normpath[1:])
+
+ def put_file(self, in_path, out_path):
+ ''' transfer a file from local to remote '''
+
+ out_path = self._normalize_path(out_path, '/')
+ display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
+ self.client.local.copyfile.send(in_path, out_path)
+
+ def fetch_file(self, in_path, out_path):
+ ''' fetch a file from remote to local '''
+
+ in_path = self._normalize_path(in_path, '/')
+ display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
+ # need to use a tmp dir due to difference of semantic for getfile
+ # ( who take a # directory as destination) and fetch_file, who
+ # take a file directly
+ tmpdir = tempfile.mkdtemp(prefix="func_ansible")
+ self.client.local.getfile.get(in_path, tmpdir)
+ shutil.move(os.path.join(tmpdir, self.host, os.path.basename(in_path)), out_path)
+ shutil.rmtree(tmpdir)
+
+ def close(self):
+ ''' terminate the connection; nothing to do here '''
+ pass
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/connection/iocage.py b/collections-debian-merged/ansible_collections/community/general/plugins/connection/iocage.py
new file mode 100644
index 00000000..aafd3a1d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/connection/iocage.py
@@ -0,0 +1,82 @@
+# Based on jail.py
+# (c) 2013, Michael Scherer <misc@zarb.org>
+# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
+# (c) 2016, Stephan Lohse <dev-github@ploek.org>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Stephan Lohse (!UNKNOWN) <dev-github@ploek.org>
+ connection: iocage
+ short_description: Run tasks in iocage jails
+ description:
+ - Run commands or put/fetch files to an existing iocage jail
+ options:
+ remote_addr:
+ description:
+ - Path to the jail
+ vars:
+ - name: ansible_host
+ - name: ansible_iocage_host
+ remote_user:
+ description:
+ - User to execute as inside the jail
+ vars:
+ - name: ansible_user
+ - name: ansible_iocage_user
+'''
+
+import subprocess
+
+from ansible_collections.community.general.plugins.connection.jail import Connection as Jail
+from ansible.module_utils._text import to_native
+from ansible.errors import AnsibleError
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class Connection(Jail):
+ ''' Local iocage based connections '''
+
+ transport = 'community.general.iocage'
+
+ def __init__(self, play_context, new_stdin, *args, **kwargs):
+ self.ioc_jail = play_context.remote_addr
+
+ self.iocage_cmd = Jail._search_executable('iocage')
+
+ jail_uuid = self.get_jail_uuid()
+
+ kwargs[Jail.modified_jailname_key] = 'ioc-{0}'.format(jail_uuid)
+
+ display.vvv(u"Jail {iocjail} has been translated to {rawjail}".format(
+ iocjail=self.ioc_jail, rawjail=kwargs[Jail.modified_jailname_key]),
+ host=kwargs[Jail.modified_jailname_key])
+
+ super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
+
+ def get_jail_uuid(self):
+ p = subprocess.Popen([self.iocage_cmd, 'get', 'host_hostuuid', self.ioc_jail],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+
+ stdout, stderr = p.communicate()
+
+ if stdout is not None:
+ stdout = to_native(stdout)
+
+ if stderr is not None:
+ stderr = to_native(stderr)
+
+ # otherwise p.returncode would not be set
+ p.wait()
+
+ if p.returncode != 0:
+ raise AnsibleError(u"iocage returned an error: {0}".format(stdout))
+
+ return stdout.strip('\n')
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/connection/jail.py b/collections-debian-merged/ansible_collections/community/general/plugins/connection/jail.py
new file mode 100644
index 00000000..7b44b9cd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/connection/jail.py
@@ -0,0 +1,201 @@
+# Based on local.py by Michael DeHaan <michael.dehaan@gmail.com>
+# and chroot.py by Maykel Moya <mmoya@speedyrails.com>
+# Copyright (c) 2013, Michael Scherer <misc@zarb.org>
+# Copyright (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Ansible Core Team
+ connection: jail
+ short_description: Run tasks in jails
+ description:
+ - Run commands or put/fetch files to an existing jail
+ options:
+ remote_addr:
+ description:
+ - Path to the jail
+ default: inventory_hostname
+ vars:
+ - name: ansible_host
+ - name: ansible_jail_host
+ remote_user:
+ description:
+ - User to execute as inside the jail
+ vars:
+ - name: ansible_user
+ - name: ansible_jail_user
+'''
+
+import distutils.spawn
+import os
+import os.path
+import subprocess
+import traceback
+import ansible.constants as C
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.six.moves import shlex_quote
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.plugins.connection import ConnectionBase, BUFSIZE
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class Connection(ConnectionBase):
+ ''' Local BSD Jail based connections '''
+
+ modified_jailname_key = 'conn_jail_name'
+
+ transport = 'community.general.jail'
+ # Pipelining may work. Someone needs to test by setting this to True and
+ # having pipelining=True in their ansible.cfg
+ has_pipelining = True
+ has_tty = False
+
+ def __init__(self, play_context, new_stdin, *args, **kwargs):
+ super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
+
+ self.jail = self._play_context.remote_addr
+ if self.modified_jailname_key in kwargs:
+ self.jail = kwargs[self.modified_jailname_key]
+
+ if os.geteuid() != 0:
+ raise AnsibleError("jail connection requires running as root")
+
+ self.jls_cmd = self._search_executable('jls')
+ self.jexec_cmd = self._search_executable('jexec')
+
+ if self.jail not in self.list_jails():
+ raise AnsibleError("incorrect jail name %s" % self.jail)
+
+ @staticmethod
+ def _search_executable(executable):
+ cmd = distutils.spawn.find_executable(executable)
+ if not cmd:
+ raise AnsibleError("%s command not found in PATH" % executable)
+ return cmd
+
+ def list_jails(self):
+ p = subprocess.Popen([self.jls_cmd, '-q', 'name'],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ stdout, stderr = p.communicate()
+
+ return to_text(stdout, errors='surrogate_or_strict').split()
+
+ def _connect(self):
+ ''' connect to the jail; nothing to do here '''
+ super(Connection, self)._connect()
+ if not self._connected:
+ display.vvv(u"ESTABLISH JAIL CONNECTION FOR USER: {0}".format(self._play_context.remote_user), host=self.jail)
+ self._connected = True
+
+ def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE):
+ ''' run a command on the jail. This is only needed for implementing
+ put_file() get_file() so that we don't have to read the whole file
+ into memory.
+
+ compared to exec_command() it looses some niceties like being able to
+ return the process's exit code immediately.
+ '''
+
+ local_cmd = [self.jexec_cmd]
+ set_env = ''
+
+ if self._play_context.remote_user is not None:
+ local_cmd += ['-U', self._play_context.remote_user]
+ # update HOME since -U does not update the jail environment
+ set_env = 'HOME=~' + self._play_context.remote_user + ' '
+
+ local_cmd += [self.jail, self._play_context.executable, '-c', set_env + cmd]
+
+ display.vvv("EXEC %s" % (local_cmd,), host=self.jail)
+ local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
+ p = subprocess.Popen(local_cmd, shell=False, stdin=stdin,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ return p
+
+ def exec_command(self, cmd, in_data=None, sudoable=False):
+ ''' run a command on the jail '''
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ p = self._buffered_exec_command(cmd)
+
+ stdout, stderr = p.communicate(in_data)
+ return (p.returncode, stdout, stderr)
+
+ def _prefix_login_path(self, remote_path):
+ ''' Make sure that we put files into a standard path
+
+ If a path is relative, then we need to choose where to put it.
+ ssh chooses $HOME but we aren't guaranteed that a home dir will
+ exist in any given chroot. So for now we're choosing "/" instead.
+ This also happens to be the former default.
+
+ Can revisit using $HOME instead if it's a problem
+ '''
+ if not remote_path.startswith(os.path.sep):
+ remote_path = os.path.join(os.path.sep, remote_path)
+ return os.path.normpath(remote_path)
+
+ def put_file(self, in_path, out_path):
+ ''' transfer a file from local to jail '''
+ super(Connection, self).put_file(in_path, out_path)
+ display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.jail)
+
+ out_path = shlex_quote(self._prefix_login_path(out_path))
+ try:
+ with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file:
+ if not os.fstat(in_file.fileno()).st_size:
+ count = ' count=0'
+ else:
+ count = ''
+ try:
+ p = self._buffered_exec_command('dd of=%s bs=%s%s' % (out_path, BUFSIZE, count), stdin=in_file)
+ except OSError:
+ raise AnsibleError("jail connection requires dd command in the jail")
+ try:
+ stdout, stderr = p.communicate()
+ except Exception:
+ traceback.print_exc()
+ raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
+ if p.returncode != 0:
+ raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, to_native(stdout), to_native(stderr)))
+ except IOError:
+ raise AnsibleError("file or module does not exist at: %s" % in_path)
+
+ def fetch_file(self, in_path, out_path):
+ ''' fetch a file from jail to local '''
+ super(Connection, self).fetch_file(in_path, out_path)
+ display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.jail)
+
+ in_path = shlex_quote(self._prefix_login_path(in_path))
+ try:
+ p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE))
+ except OSError:
+ raise AnsibleError("jail connection requires dd command in the jail")
+
+ with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb+') as out_file:
+ try:
+ chunk = p.stdout.read(BUFSIZE)
+ while chunk:
+ out_file.write(chunk)
+ chunk = p.stdout.read(BUFSIZE)
+ except Exception:
+ traceback.print_exc()
+ raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
+ stdout, stderr = p.communicate()
+ if p.returncode != 0:
+ raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, to_native(stdout), to_native(stderr)))
+
+ def close(self):
+ ''' terminate the connection; nothing to do here '''
+ super(Connection, self).close()
+ self._connected = False
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/connection/lxc.py b/collections-debian-merged/ansible_collections/community/general/plugins/connection/lxc.py
new file mode 100644
index 00000000..b3b68a51
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/connection/lxc.py
@@ -0,0 +1,228 @@
+# (c) 2015, Joerg Thalheim <joerg@higgsboson.tk>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Joerg Thalheim (!UNKNOWN) <joerg@higgsboson.tk>
+ connection: lxc
+ short_description: Run tasks in lxc containers via lxc python library
+ description:
+ - Run commands or put/fetch files to an existing lxc container using lxc python library
+ options:
+ remote_addr:
+ description:
+ - Container identifier
+ default: inventory_hostname
+ vars:
+ - name: ansible_host
+ - name: ansible_lxc_host
+ executable:
+ default: /bin/sh
+ description:
+ - Shell executable
+ vars:
+ - name: ansible_executable
+ - name: ansible_lxc_executable
+'''
+
+import os
+import shutil
+import traceback
+import select
+import fcntl
+import errno
+
+HAS_LIBLXC = False
+try:
+ import lxc as _lxc
+ HAS_LIBLXC = True
+except ImportError:
+ pass
+
+from ansible import constants as C
+from ansible import errors
+from ansible.module_utils._text import to_bytes, to_native
+from ansible.plugins.connection import ConnectionBase
+
+
+class Connection(ConnectionBase):
+ ''' Local lxc based connections '''
+
+ transport = 'community.general.lxc'
+ has_pipelining = True
+ default_user = 'root'
+
+ def __init__(self, play_context, new_stdin, *args, **kwargs):
+ super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
+
+ self.container_name = self._play_context.remote_addr
+ self.container = None
+
+ def _connect(self):
+ ''' connect to the lxc; nothing to do here '''
+ super(Connection, self)._connect()
+
+ if not HAS_LIBLXC:
+ msg = "lxc bindings for python2 are not installed"
+ raise errors.AnsibleError(msg)
+
+ if self.container:
+ return
+
+ self._display.vvv("THIS IS A LOCAL LXC DIR", host=self.container_name)
+ self.container = _lxc.Container(self.container_name)
+ if self.container.state == "STOPPED":
+ raise errors.AnsibleError("%s is not running" % self.container_name)
+
+ def _communicate(self, pid, in_data, stdin, stdout, stderr):
+ buf = {stdout: [], stderr: []}
+ read_fds = [stdout, stderr]
+ if in_data:
+ write_fds = [stdin]
+ else:
+ write_fds = []
+ while len(read_fds) > 0 or len(write_fds) > 0:
+ try:
+ ready_reads, ready_writes, _ = select.select(read_fds, write_fds, [])
+ except select.error as e:
+ if e.args[0] == errno.EINTR:
+ continue
+ raise
+ for fd in ready_writes:
+ in_data = in_data[os.write(fd, in_data):]
+ if len(in_data) == 0:
+ write_fds.remove(fd)
+ for fd in ready_reads:
+ data = os.read(fd, 32768)
+ if not data:
+ read_fds.remove(fd)
+ buf[fd].append(data)
+
+ (pid, returncode) = os.waitpid(pid, 0)
+
+ return returncode, b"".join(buf[stdout]), b"".join(buf[stderr])
+
+ def _set_nonblocking(self, fd):
+ flags = fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK
+ fcntl.fcntl(fd, fcntl.F_SETFL, flags)
+ return fd
+
+ def exec_command(self, cmd, in_data=None, sudoable=False):
+ ''' run a command on the chroot '''
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ # python2-lxc needs bytes. python3-lxc needs text.
+ executable = to_native(self._play_context.executable, errors='surrogate_or_strict')
+ local_cmd = [executable, '-c', to_native(cmd, errors='surrogate_or_strict')]
+
+ read_stdout, write_stdout = None, None
+ read_stderr, write_stderr = None, None
+ read_stdin, write_stdin = None, None
+
+ try:
+ read_stdout, write_stdout = os.pipe()
+ read_stderr, write_stderr = os.pipe()
+
+ kwargs = {
+ 'stdout': self._set_nonblocking(write_stdout),
+ 'stderr': self._set_nonblocking(write_stderr),
+ 'env_policy': _lxc.LXC_ATTACH_CLEAR_ENV
+ }
+
+ if in_data:
+ read_stdin, write_stdin = os.pipe()
+ kwargs['stdin'] = self._set_nonblocking(read_stdin)
+
+ self._display.vvv("EXEC %s" % (local_cmd), host=self.container_name)
+ pid = self.container.attach(_lxc.attach_run_command, local_cmd, **kwargs)
+ if pid == -1:
+ msg = "failed to attach to container %s" % self.container_name
+ raise errors.AnsibleError(msg)
+
+ write_stdout = os.close(write_stdout)
+ write_stderr = os.close(write_stderr)
+ if read_stdin:
+ read_stdin = os.close(read_stdin)
+
+ return self._communicate(pid,
+ in_data,
+ write_stdin,
+ read_stdout,
+ read_stderr)
+ finally:
+ fds = [read_stdout,
+ write_stdout,
+ read_stderr,
+ write_stderr,
+ read_stdin,
+ write_stdin]
+ for fd in fds:
+ if fd:
+ os.close(fd)
+
+ def put_file(self, in_path, out_path):
+ ''' transfer a file from local to lxc '''
+ super(Connection, self).put_file(in_path, out_path)
+ self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.container_name)
+ in_path = to_bytes(in_path, errors='surrogate_or_strict')
+ out_path = to_bytes(out_path, errors='surrogate_or_strict')
+
+ if not os.path.exists(in_path):
+ msg = "file or module does not exist: %s" % in_path
+ raise errors.AnsibleFileNotFound(msg)
+ try:
+ src_file = open(in_path, "rb")
+ except IOError:
+ traceback.print_exc()
+ raise errors.AnsibleError("failed to open input file to %s" % in_path)
+ try:
+ def write_file(args):
+ with open(out_path, 'wb+') as dst_file:
+ shutil.copyfileobj(src_file, dst_file)
+ try:
+ self.container.attach_wait(write_file, None)
+ except IOError:
+ traceback.print_exc()
+ msg = "failed to transfer file to %s" % out_path
+ raise errors.AnsibleError(msg)
+ finally:
+ src_file.close()
+
+ def fetch_file(self, in_path, out_path):
+ ''' fetch a file from lxc to local '''
+ super(Connection, self).fetch_file(in_path, out_path)
+ self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.container_name)
+ in_path = to_bytes(in_path, errors='surrogate_or_strict')
+ out_path = to_bytes(out_path, errors='surrogate_or_strict')
+
+ try:
+ dst_file = open(out_path, "wb")
+ except IOError:
+ traceback.print_exc()
+ msg = "failed to open output file %s" % out_path
+ raise errors.AnsibleError(msg)
+ try:
+ def write_file(args):
+ try:
+ with open(in_path, 'rb') as src_file:
+ shutil.copyfileobj(src_file, dst_file)
+ finally:
+ # this is needed in the lxc child process
+ # to flush internal python buffers
+ dst_file.close()
+ try:
+ self.container.attach_wait(write_file, None)
+ except IOError:
+ traceback.print_exc()
+ msg = "failed to transfer file from %s to %s" % (in_path, out_path)
+ raise errors.AnsibleError(msg)
+ finally:
+ dst_file.close()
+
+ def close(self):
+ ''' terminate the connection; nothing to do here '''
+ super(Connection, self).close()
+ self._connected = False
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/connection/lxd.py b/collections-debian-merged/ansible_collections/community/general/plugins/connection/lxd.py
new file mode 100644
index 00000000..754b4f9d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/connection/lxd.py
@@ -0,0 +1,125 @@
+# (c) 2016 Matt Clay <matt@mystile.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Matt Clay (@mattclay) <matt@mystile.com>
+ connection: lxd
+ short_description: Run tasks in lxc containers via lxc CLI
+ description:
+ - Run commands or put/fetch files to an existing lxc container using lxc CLI
+ options:
+ remote_addr:
+ description:
+ - Container identifier
+ default: inventory_hostname
+ vars:
+ - name: ansible_host
+ - name: ansible_lxd_host
+ executable:
+ description:
+ - shell to use for execution inside container
+ default: /bin/sh
+ vars:
+ - name: ansible_executable
+ - name: ansible_lxd_executable
+'''
+
+import os
+from distutils.spawn import find_executable
+from subprocess import Popen, PIPE
+
+from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
+from ansible.module_utils._text import to_bytes, to_text
+from ansible.plugins.connection import ConnectionBase
+
+
+class Connection(ConnectionBase):
+ """ lxd based connections """
+
+ transport = 'community.general.lxd'
+ has_pipelining = True
+ default_user = 'root'
+
+ def __init__(self, play_context, new_stdin, *args, **kwargs):
+ super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
+
+ self._host = self._play_context.remote_addr
+ self._lxc_cmd = find_executable("lxc")
+
+ if not self._lxc_cmd:
+ raise AnsibleError("lxc command not found in PATH")
+
+ if self._play_context.remote_user is not None and self._play_context.remote_user != 'root':
+ self._display.warning('lxd does not support remote_user, using container default: root')
+
+ def _connect(self):
+ """connect to lxd (nothing to do here) """
+ super(Connection, self)._connect()
+
+ if not self._connected:
+ self._display.vvv(u"ESTABLISH LXD CONNECTION FOR USER: root", host=self._host)
+ self._connected = True
+
+ def exec_command(self, cmd, in_data=None, sudoable=True):
+ """ execute a command on the lxd host """
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ self._display.vvv(u"EXEC {0}".format(cmd), host=self._host)
+
+ local_cmd = [self._lxc_cmd, "exec", self._host, "--", self._play_context.executable, "-c", cmd]
+
+ local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
+ in_data = to_bytes(in_data, errors='surrogate_or_strict', nonstring='passthru')
+
+ process = Popen(local_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
+ stdout, stderr = process.communicate(in_data)
+
+ stdout = to_text(stdout)
+ stderr = to_text(stderr)
+
+ if stderr == "error: Container is not running.\n":
+ raise AnsibleConnectionFailure("container not running: %s" % self._host)
+
+ if stderr == "error: not found\n":
+ raise AnsibleConnectionFailure("container not found: %s" % self._host)
+
+ return process.returncode, stdout, stderr
+
+ def put_file(self, in_path, out_path):
+ """ put a file from local to lxd """
+ super(Connection, self).put_file(in_path, out_path)
+
+ self._display.vvv(u"PUT {0} TO {1}".format(in_path, out_path), host=self._host)
+
+ if not os.path.isfile(to_bytes(in_path, errors='surrogate_or_strict')):
+ raise AnsibleFileNotFound("input path is not a file: %s" % in_path)
+
+ local_cmd = [self._lxc_cmd, "file", "push", in_path, self._host + "/" + out_path]
+
+ local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
+
+ process = Popen(local_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
+ process.communicate()
+
+ def fetch_file(self, in_path, out_path):
+ """ fetch a file from lxd to local """
+ super(Connection, self).fetch_file(in_path, out_path)
+
+ self._display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self._host)
+
+ local_cmd = [self._lxc_cmd, "file", "pull", self._host + "/" + in_path, out_path]
+
+ local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
+
+ process = Popen(local_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
+ process.communicate()
+
+ def close(self):
+ """ close the connection (nothing to do here) """
+ super(Connection, self).close()
+
+ self._connected = False
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/connection/oc.py b/collections-debian-merged/ansible_collections/community/general/plugins/connection/oc.py
new file mode 100644
index 00000000..4b035397
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/connection/oc.py
@@ -0,0 +1,173 @@
+# Based on the docker connection plugin
+#
+# Connection plugin for configuring kubernetes containers with kubectl
+# (c) 2017, XuXinkun <xuxinkun@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author:
+ - xuxinkun (!UNKNOWN)
+
+ connection: oc
+
+ short_description: Execute tasks in pods running on OpenShift.
+
+ description:
+ - Use the oc exec command to run tasks in, or put/fetch files to, pods running on the OpenShift
+ container platform.
+
+
+ requirements:
+ - oc (go binary)
+
+ options:
+ oc_pod:
+ description:
+ - Pod name. Required when the host name does not match pod name.
+ default: ''
+ vars:
+ - name: ansible_oc_pod
+ env:
+ - name: K8S_AUTH_POD
+ oc_container:
+ description:
+ - Container name. Required when a pod contains more than one container.
+ default: ''
+ vars:
+ - name: ansible_oc_container
+ env:
+ - name: K8S_AUTH_CONTAINER
+ oc_namespace:
+ description:
+ - The namespace of the pod
+ default: ''
+ vars:
+ - name: ansible_oc_namespace
+ env:
+ - name: K8S_AUTH_NAMESPACE
+ oc_extra_args:
+ description:
+ - Extra arguments to pass to the oc command line.
+ default: ''
+ vars:
+ - name: ansible_oc_extra_args
+ env:
+ - name: K8S_AUTH_EXTRA_ARGS
+ oc_kubeconfig:
+ description:
+ - Path to a oc config file. Defaults to I(~/.kube/conig)
+ default: ''
+ vars:
+ - name: ansible_oc_kubeconfig
+ - name: ansible_oc_config
+ env:
+ - name: K8S_AUTH_KUBECONFIG
+ oc_context:
+ description:
+ - The name of a context found in the K8s config file.
+ default: ''
+ vars:
+ - name: ansible_oc_context
+ env:
+ - name: K8S_AUTH_CONTEXT
+ oc_host:
+ description:
+ - URL for accessing the API.
+ default: ''
+ vars:
+ - name: ansible_oc_host
+ - name: ansible_oc_server
+ env:
+ - name: K8S_AUTH_HOST
+ - name: K8S_AUTH_SERVER
+ oc_token:
+ description:
+ - API authentication bearer token.
+ vars:
+ - name: ansible_oc_token
+ - name: ansible_oc_api_key
+ env:
+ - name: K8S_AUTH_TOKEN
+ - name: K8S_AUTH_API_KEY
+ client_cert:
+ description:
+ - Path to a certificate used to authenticate with the API.
+ default: ''
+ vars:
+ - name: ansible_oc_cert_file
+ - name: ansible_oc_client_cert
+ env:
+ - name: K8S_AUTH_CERT_FILE
+ aliases: [ oc_cert_file ]
+ client_key:
+ description:
+ - Path to a key file used to authenticate with the API.
+ default: ''
+ vars:
+ - name: ansible_oc_key_file
+ - name: ansible_oc_client_key
+ env:
+ - name: K8S_AUTH_KEY_FILE
+ aliases: [ oc_key_file ]
+ ca_cert:
+ description:
+ - Path to a CA certificate used to authenticate with the API.
+ default: ''
+ vars:
+ - name: ansible_oc_ssl_ca_cert
+ - name: ansible_oc_ca_cert
+ env:
+ - name: K8S_AUTH_SSL_CA_CERT
+ aliases: [ oc_ssl_ca_cert ]
+ validate_certs:
+ description:
+ - Whether or not to verify the API server's SSL certificate. Defaults to I(true).
+ default: ''
+ vars:
+ - name: ansible_oc_verify_ssl
+ - name: ansible_oc_validate_certs
+ env:
+ - name: K8S_AUTH_VERIFY_SSL
+ aliases: [ oc_verify_ssl ]
+'''
+
+from ansible_collections.community.kubernetes.plugins.connection.kubectl import Connection as KubectlConnection
+
+
+CONNECTION_TRANSPORT = 'community.general.oc'
+
+CONNECTION_OPTIONS = {
+ 'oc_container': '-c',
+ 'oc_namespace': '-n',
+ 'oc_kubeconfig': '--config',
+ 'oc_context': '--context',
+ 'oc_host': '--server',
+ 'client_cert': '--client-certificate',
+ 'client_key': '--client-key',
+ 'ca_cert': '--certificate-authority',
+ 'validate_certs': '--insecure-skip-tls-verify',
+ 'oc_token': '--token'
+}
+
+
+class Connection(KubectlConnection):
+ ''' Local oc based connections '''
+ transport = CONNECTION_TRANSPORT
+ connection_options = CONNECTION_OPTIONS
+ documentation = DOCUMENTATION
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/connection/qubes.py b/collections-debian-merged/ansible_collections/community/general/plugins/connection/qubes.py
new file mode 100644
index 00000000..ed03b3d0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/connection/qubes.py
@@ -0,0 +1,159 @@
+# Based on the buildah connection plugin
+# Copyright (c) 2017 Ansible Project
+# 2018 Kushal Das
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+#
+# Written by: Kushal Das (https://github.com/kushaldas)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+ connection: qubes
+ short_description: Interact with an existing QubesOS AppVM
+
+ description:
+ - Run commands or put/fetch files to an existing Qubes AppVM using qubes tools.
+
+ author: Kushal Das (@kushaldas)
+
+
+ options:
+ remote_addr:
+ description:
+ - vm name
+ default: inventory_hostname
+ vars:
+ - name: ansible_host
+ remote_user:
+ description:
+ - The user to execute as inside the vm.
+ default: The *user* account as default in Qubes OS.
+ vars:
+ - name: ansible_user
+# keyword:
+# - name: hosts
+'''
+
+import shlex
+import shutil
+
+import os
+import base64
+import subprocess
+
+import ansible.constants as C
+from ansible.module_utils._text import to_bytes, to_native
+from ansible.plugins.connection import ConnectionBase, ensure_connect
+from ansible.errors import AnsibleConnectionFailure
+from ansible.utils.display import Display
+
+display = Display()
+
+
+# this _has to be_ named Connection
+class Connection(ConnectionBase):
+ """This is a connection plugin for qubes: it uses qubes-run-vm binary to interact with the containers."""
+
+ # String used to identify this Connection class from other classes
+ transport = 'community.general.qubes'
+ has_pipelining = True
+
+ def __init__(self, play_context, new_stdin, *args, **kwargs):
+ super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
+
+ self._remote_vmname = self._play_context.remote_addr
+ self._connected = False
+ # Default username in Qubes
+ self.user = "user"
+ if self._play_context.remote_user:
+ self.user = self._play_context.remote_user
+
+ def _qubes(self, cmd=None, in_data=None, shell="qubes.VMShell"):
+ """run qvm-run executable
+
+ :param cmd: cmd string for remote system
+ :param in_data: data passed to qvm-run-vm's stdin
+ :return: return code, stdout, stderr
+ """
+ display.vvvv("CMD: ", cmd)
+ if not cmd.endswith("\n"):
+ cmd = cmd + "\n"
+ local_cmd = []
+
+ # For dom0
+ local_cmd.extend(["qvm-run", "--pass-io", "--service"])
+ if self.user != "user":
+ # Means we have a remote_user value
+ local_cmd.extend(["-u", self.user])
+
+ local_cmd.append(self._remote_vmname)
+
+ local_cmd.append(shell)
+
+ local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
+
+ display.vvvv("Local cmd: ", local_cmd)
+
+ display.vvv("RUN %s" % (local_cmd,), host=self._remote_vmname)
+ p = subprocess.Popen(local_cmd, shell=False, stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ # Here we are writing the actual command to the remote bash
+ p.stdin.write(to_bytes(cmd, errors='surrogate_or_strict'))
+ stdout, stderr = p.communicate(input=in_data)
+ return p.returncode, stdout, stderr
+
+ def _connect(self):
+ """No persistent connection is being maintained."""
+ super(Connection, self)._connect()
+ self._connected = True
+
+ @ensure_connect
+ def exec_command(self, cmd, in_data=None, sudoable=False):
+ """Run specified command in a running QubesVM """
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ display.vvvv("CMD IS: %s" % cmd)
+
+ rc, stdout, stderr = self._qubes(cmd)
+
+ display.vvvvv("STDOUT %r STDERR %r" % (stderr, stderr))
+ return rc, stdout, stderr
+
+ def put_file(self, in_path, out_path):
+ """ Place a local file located in 'in_path' inside VM at 'out_path' """
+ super(Connection, self).put_file(in_path, out_path)
+ display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._remote_vmname)
+
+ with open(in_path, "rb") as fobj:
+ source_data = fobj.read()
+
+ retcode, dummy, dummy = self._qubes('cat > "{0}"\n'.format(out_path), source_data, "qubes.VMRootShell")
+ # if qubes.VMRootShell service not supported, fallback to qubes.VMShell and
+ # hope it will have appropriate permissions
+ if retcode == 127:
+ retcode, dummy, dummy = self._qubes('cat > "{0}"\n'.format(out_path), source_data)
+
+ if retcode != 0:
+ raise AnsibleConnectionFailure('Failed to put_file to {0}'.format(out_path))
+
+ def fetch_file(self, in_path, out_path):
+ """Obtain file specified via 'in_path' from the container and place it at 'out_path' """
+ super(Connection, self).fetch_file(in_path, out_path)
+ display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._remote_vmname)
+
+ # We are running in dom0
+ cmd_args_list = ["qvm-run", "--pass-io", self._remote_vmname, "cat {0}".format(in_path)]
+ with open(out_path, "wb") as fobj:
+ p = subprocess.Popen(cmd_args_list, shell=False, stdout=fobj)
+ p.communicate()
+ if p.returncode != 0:
+ raise AnsibleConnectionFailure('Failed to fetch file to {0}'.format(out_path))
+
+ def close(self):
+ """ Closing the connection """
+ super(Connection, self).close()
+ self._connected = False
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/connection/saltstack.py b/collections-debian-merged/ansible_collections/community/general/plugins/connection/saltstack.py
new file mode 100644
index 00000000..ac521e11
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/connection/saltstack.py
@@ -0,0 +1,106 @@
+# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# Based on chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
+# Based on func.py
+# (c) 2014, Michael Scherer <misc@zarb.org>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Michael Scherer (@mscherer) <misc@zarb.org>
+ connection: saltstack
+ short_description: Allow ansible to piggyback on salt minions
+ description:
+ - This allows you to use existing Saltstack infrastructure to connect to targets.
+'''
+
+import re
+import os
+import pty
+import codecs
+import subprocess
+
+from ansible.module_utils._text import to_bytes, to_text
+from ansible.module_utils.six.moves import cPickle
+
+HAVE_SALTSTACK = False
+try:
+ import salt.client as sc
+ HAVE_SALTSTACK = True
+except ImportError:
+ pass
+
+import os
+from ansible import errors
+from ansible.plugins.connection import ConnectionBase
+
+
+class Connection(ConnectionBase):
+ ''' Salt-based connections '''
+
+ has_pipelining = False
+ # while the name of the product is salt, naming that module salt cause
+ # trouble with module import
+ transport = 'community.general.saltstack'
+
+ def __init__(self, play_context, new_stdin, *args, **kwargs):
+ super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
+ self.host = self._play_context.remote_addr
+
+ def _connect(self):
+ if not HAVE_SALTSTACK:
+ raise errors.AnsibleError("saltstack is not installed")
+
+ self.client = sc.LocalClient()
+ self._connected = True
+ return self
+
+ def exec_command(self, cmd, sudoable=False, in_data=None):
+ ''' run a command on the remote minion '''
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ if in_data:
+ raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
+
+ self._display.vvv("EXEC %s" % (cmd), host=self.host)
+ # need to add 'true;' to work around https://github.com/saltstack/salt/issues/28077
+ res = self.client.cmd(self.host, 'cmd.exec_code_all', ['bash', 'true;' + cmd])
+ if self.host not in res:
+ raise errors.AnsibleError("Minion %s didn't answer, check if salt-minion is running and the name is correct" % self.host)
+
+ p = res[self.host]
+ return (p['retcode'], p['stdout'], p['stderr'])
+
+ def _normalize_path(self, path, prefix):
+ if not path.startswith(os.path.sep):
+ path = os.path.join(os.path.sep, path)
+ normpath = os.path.normpath(path)
+ return os.path.join(prefix, normpath[1:])
+
+ def put_file(self, in_path, out_path):
+ ''' transfer a file from local to remote '''
+
+ super(Connection, self).put_file(in_path, out_path)
+
+ out_path = self._normalize_path(out_path, '/')
+ self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
+ with open(in_path, 'rb') as in_fh:
+ content = in_fh.read()
+ self.client.cmd(self.host, 'hashutil.base64_decodefile', [codecs.encode(content, 'base64'), out_path])
+
+ # TODO test it
+ def fetch_file(self, in_path, out_path):
+ ''' fetch a file from remote to local '''
+
+ super(Connection, self).fetch_file(in_path, out_path)
+
+ in_path = self._normalize_path(in_path, '/')
+ self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
+ content = self.client.cmd(self.host, 'cp.get_file_str', [in_path])[self.host]
+ open(out_path, 'wb').write(content)
+
+ def close(self):
+ ''' terminate the connection; nothing to do here '''
+ pass
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/connection/zone.py b/collections-debian-merged/ansible_collections/community/general/plugins/connection/zone.py
new file mode 100644
index 00000000..755081a8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/connection/zone.py
@@ -0,0 +1,200 @@
+# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# and chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
+# and jail.py (c) 2013, Michael Scherer <misc@zarb.org>
+# (c) 2015, Dagobert Michelsen <dam@baltic-online.de>
+# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Ansible Core Team
+ connection: zone
+ short_description: Run tasks in a zone instance
+ description:
+ - Run commands or put/fetch files to an existing zone
+ options:
+ remote_addr:
+ description:
+ - Zone identifier
+ default: inventory_hostname
+ vars:
+ - name: ansible_host
+ - name: ansible_zone_host
+'''
+
+import distutils.spawn
+import os
+import os.path
+import subprocess
+import traceback
+
+from ansible import constants as C
+from ansible.errors import AnsibleError
+from ansible.module_utils.six.moves import shlex_quote
+from ansible.module_utils._text import to_bytes
+from ansible.plugins.connection import ConnectionBase, BUFSIZE
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class Connection(ConnectionBase):
+ ''' Local zone based connections '''
+
+ transport = 'community.general.zone'
+ has_pipelining = True
+ has_tty = False
+
+ def __init__(self, play_context, new_stdin, *args, **kwargs):
+ super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
+
+ self.zone = self._play_context.remote_addr
+
+ if os.geteuid() != 0:
+ raise AnsibleError("zone connection requires running as root")
+
+ self.zoneadm_cmd = to_bytes(self._search_executable('zoneadm'))
+ self.zlogin_cmd = to_bytes(self._search_executable('zlogin'))
+
+ if self.zone not in self.list_zones():
+ raise AnsibleError("incorrect zone name %s" % self.zone)
+
+ @staticmethod
+ def _search_executable(executable):
+ cmd = distutils.spawn.find_executable(executable)
+ if not cmd:
+ raise AnsibleError("%s command not found in PATH" % executable)
+ return cmd
+
+ def list_zones(self):
+ process = subprocess.Popen([self.zoneadm_cmd, 'list', '-ip'],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ zones = []
+ for l in process.stdout.readlines():
+ # 1:work:running:/zones/work:3126dc59-9a07-4829-cde9-a816e4c5040e:native:shared
+ s = l.split(':')
+ if s[1] != 'global':
+ zones.append(s[1])
+
+ return zones
+
+ def get_zone_path(self):
+ # solaris10vm# zoneadm -z cswbuild list -p
+ # -:cswbuild:installed:/zones/cswbuild:479f3c4b-d0c6-e97b-cd04-fd58f2c0238e:native:shared
+ process = subprocess.Popen([self.zoneadm_cmd, '-z', to_bytes(self.zone), 'list', '-p'],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ # stdout, stderr = p.communicate()
+ path = process.stdout.readlines()[0].split(':')[3]
+ return path + '/root'
+
+ def _connect(self):
+ ''' connect to the zone; nothing to do here '''
+ super(Connection, self)._connect()
+ if not self._connected:
+ display.vvv("THIS IS A LOCAL ZONE DIR", host=self.zone)
+ self._connected = True
+
+ def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE):
+ ''' run a command on the zone. This is only needed for implementing
+ put_file() get_file() so that we don't have to read the whole file
+ into memory.
+
+ compared to exec_command() it looses some niceties like being able to
+ return the process's exit code immediately.
+ '''
+ # NOTE: zlogin invokes a shell (just like ssh does) so we do not pass
+ # this through /bin/sh -c here. Instead it goes through the shell
+ # that zlogin selects.
+ local_cmd = [self.zlogin_cmd, self.zone, cmd]
+ local_cmd = map(to_bytes, local_cmd)
+
+ display.vvv("EXEC %s" % (local_cmd), host=self.zone)
+ p = subprocess.Popen(local_cmd, shell=False, stdin=stdin,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ return p
+
+ def exec_command(self, cmd, in_data=None, sudoable=False):
+ ''' run a command on the zone '''
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ p = self._buffered_exec_command(cmd)
+
+ stdout, stderr = p.communicate(in_data)
+ return (p.returncode, stdout, stderr)
+
+ def _prefix_login_path(self, remote_path):
+ ''' Make sure that we put files into a standard path
+
+ If a path is relative, then we need to choose where to put it.
+ ssh chooses $HOME but we aren't guaranteed that a home dir will
+ exist in any given chroot. So for now we're choosing "/" instead.
+ This also happens to be the former default.
+
+ Can revisit using $HOME instead if it's a problem
+ '''
+ if not remote_path.startswith(os.path.sep):
+ remote_path = os.path.join(os.path.sep, remote_path)
+ return os.path.normpath(remote_path)
+
+ def put_file(self, in_path, out_path):
+ ''' transfer a file from local to zone '''
+ super(Connection, self).put_file(in_path, out_path)
+ display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.zone)
+
+ out_path = shlex_quote(self._prefix_login_path(out_path))
+ try:
+ with open(in_path, 'rb') as in_file:
+ if not os.fstat(in_file.fileno()).st_size:
+ count = ' count=0'
+ else:
+ count = ''
+ try:
+ p = self._buffered_exec_command('dd of=%s bs=%s%s' % (out_path, BUFSIZE, count), stdin=in_file)
+ except OSError:
+ raise AnsibleError("jail connection requires dd command in the jail")
+ try:
+ stdout, stderr = p.communicate()
+ except Exception:
+ traceback.print_exc()
+ raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
+ if p.returncode != 0:
+ raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
+ except IOError:
+ raise AnsibleError("file or module does not exist at: %s" % in_path)
+
+ def fetch_file(self, in_path, out_path):
+ ''' fetch a file from zone to local '''
+ super(Connection, self).fetch_file(in_path, out_path)
+ display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.zone)
+
+ in_path = shlex_quote(self._prefix_login_path(in_path))
+ try:
+ p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE))
+ except OSError:
+ raise AnsibleError("zone connection requires dd command in the zone")
+
+ with open(out_path, 'wb+') as out_file:
+ try:
+ chunk = p.stdout.read(BUFSIZE)
+ while chunk:
+ out_file.write(chunk)
+ chunk = p.stdout.read(BUFSIZE)
+ except Exception:
+ traceback.print_exc()
+ raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
+ stdout, stderr = p.communicate()
+ if p.returncode != 0:
+ raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
+
+ def close(self):
+ ''' terminate the connection; nothing to do here '''
+ super(Connection, self).close()
+ self._connected = False
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/_gcp.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/_gcp.py
new file mode 100644
index 00000000..06872543
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/_gcp.py
@@ -0,0 +1,62 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ # GCP doc fragment.
+ DOCUMENTATION = r'''
+options:
+ project:
+ description:
+ - The Google Cloud Platform project to use.
+ type: str
+ auth_kind:
+ description:
+ - The type of credential used.
+ type: str
+ required: true
+ choices: [ application, machineaccount, serviceaccount ]
+ service_account_contents:
+ description:
+ - The contents of a Service Account JSON file, either in a dictionary or as a JSON string that represents it.
+ type: jsonarg
+ service_account_file:
+ description:
+ - The path of a Service Account JSON file if serviceaccount is selected as type.
+ type: path
+ service_account_email:
+ description:
+ - An optional service account email address if machineaccount is selected
+ and the user does not wish to use the default email.
+ type: str
+ scopes:
+ description:
+ - Array of scopes to be used.
+ type: list
+ elements: str
+ env_type:
+ description:
+ - Specifies which Ansible environment you're running this module within.
+ - This should not be set unless you know what you're doing.
+ - This only alters the User Agent string for any API requests.
+ type: str
+notes:
+ - for authentication, you can set service_account_file using the
+ c(gcp_service_account_file) env variable.
+ - for authentication, you can set service_account_contents using the
+ c(GCP_SERVICE_ACCOUNT_CONTENTS) env variable.
+ - For authentication, you can set service_account_email using the
+ C(GCP_SERVICE_ACCOUNT_EMAIL) env variable.
+ - For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env
+ variable.
+ - For authentication, you can set scopes using the C(GCP_SCOPES) env variable.
+ - Environment variables values will only be used if the playbook values are
+ not set.
+ - The I(service_account_email) and I(service_account_file) options are
+ mutually exclusive.
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/_netapp.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/_netapp.py
new file mode 100644
index 00000000..c3d0d3ba
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/_netapp.py
@@ -0,0 +1,138 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Sumit Kumar <sumit4@netapp.com>, chris Archibald <carchi@netapp.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ DOCUMENTATION = r'''
+options:
+ - See respective platform section for more details
+requirements:
+ - See respective platform section for more details
+notes:
+ - Ansible modules are available for the following NetApp Storage Platforms: E-Series, ONTAP, SolidFire
+'''
+
+ # Documentation fragment for ONTAP (na_cdot)
+ ONTAP = r'''
+options:
+ hostname:
+ required: true
+ description:
+ - The hostname or IP address of the ONTAP instance.
+ username:
+ required: true
+ description:
+ - This can be a Cluster-scoped or SVM-scoped account, depending on whether a Cluster-level or SVM-level API is required.
+ For more information, please read the documentation U(https://mysupport.netapp.com/NOW/download/software/nmsdk/9.4/).
+ aliases: ['user']
+ password:
+ required: true
+ description:
+ - Password for the specified user.
+ aliases: ['pass']
+requirements:
+ - A physical or virtual clustered Data ONTAP system. The modules were developed with Clustered Data ONTAP 8.3
+ - Ansible 2.2
+ - netapp-lib (2015.9.25). Install using 'pip install netapp-lib'
+
+notes:
+ - The modules prefixed with na\\_cdot are built to support the ONTAP storage platform.
+
+'''
+
+ # Documentation fragment for SolidFire
+ SOLIDFIRE = r'''
+options:
+ hostname:
+ required: true
+ description:
+ - The hostname or IP address of the SolidFire cluster.
+ username:
+ required: true
+ description:
+ - Please ensure that the user has the adequate permissions. For more information, please read the official documentation
+ U(https://mysupport.netapp.com/documentation/docweb/index.html?productID=62636&language=en-US).
+ aliases: ['user']
+ password:
+ required: true
+ description:
+ - Password for the specified user.
+ aliases: ['pass']
+
+requirements:
+ - The modules were developed with SolidFire 10.1
+ - solidfire-sdk-python (1.1.0.92) or greater. Install using 'pip install solidfire-sdk-python'
+
+notes:
+ - The modules prefixed with na\\_elementsw are built to support the SolidFire storage platform.
+
+'''
+
+ # Documentation fragment for ONTAP (na_ontap)
+ NA_ONTAP = r'''
+options:
+ hostname:
+ description:
+ - The hostname or IP address of the ONTAP instance.
+ type: str
+ required: true
+ username:
+ description:
+ - This can be a Cluster-scoped or SVM-scoped account, depending on whether a Cluster-level or SVM-level API is required.
+ For more information, please read the documentation U(https://mysupport.netapp.com/NOW/download/software/nmsdk/9.4/).
+ type: str
+ required: true
+ aliases: [ user ]
+ password:
+ description:
+ - Password for the specified user.
+ type: str
+ required: true
+ aliases: [ pass ]
+ https:
+ description:
+ - Enable and disable https
+ type: bool
+ default: no
+ validate_certs:
+ description:
+ - If set to C(no), the SSL certificates will not be validated.
+ - This should only set to C(False) used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: yes
+ http_port:
+ description:
+ - Override the default port (80 or 443) with this port
+ type: int
+ ontapi:
+ description:
+ - The ontap api version to use
+ type: int
+ use_rest:
+ description:
+ - REST API if supported by the target system for all the resources and attributes the module requires. Otherwise will revert to ZAPI.
+ - Always -- will always use the REST API
+ - Never -- will always use the ZAPI
+ - Auto -- will try to use the REST Api
+ default: Auto
+ choices: ['Never', 'Always', 'Auto']
+ type: str
+
+
+requirements:
+ - A physical or virtual clustered Data ONTAP system. The modules support Data ONTAP 9.1 and onward
+ - Ansible 2.6
+ - Python2 netapp-lib (2017.10.30) or later. Install using 'pip install netapp-lib'
+ - Python3 netapp-lib (2018.11.13) or later. Install using 'pip install netapp-lib'
+ - To enable http on the cluster you must run the following commands 'set -privilege advanced;' 'system services web modify -http-enabled true;'
+
+notes:
+ - The modules prefixed with na\\_ontap are built to support the ONTAP storage platform.
+
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/alicloud.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/alicloud.py
new file mode 100644
index 00000000..f9c9640b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/alicloud.py
@@ -0,0 +1,108 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin <heguimin36@163.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Alicloud only documentation fragment
+ DOCUMENTATION = r'''
+options:
+ alicloud_access_key:
+ description:
+ - Alibaba Cloud access key. If not set then the value of environment variable C(ALICLOUD_ACCESS_KEY),
+ C(ALICLOUD_ACCESS_KEY_ID) will be used instead.
+ aliases: ['access_key_id', 'access_key']
+ type: str
+ alicloud_secret_key:
+ description:
+ - Alibaba Cloud secret key. If not set then the value of environment variable C(ALICLOUD_SECRET_KEY),
+ C(ALICLOUD_SECRET_ACCESS_KEY) will be used instead.
+ aliases: ['secret_access_key', 'secret_key']
+ type: str
+ alicloud_region:
+ description:
+ - The Alibaba Cloud region to use. If not specified then the value of environment variable
+ C(ALICLOUD_REGION), C(ALICLOUD_REGION_ID) will be used instead.
+ aliases: ['region', 'region_id']
+ required: true
+ type: str
+ alicloud_security_token:
+ description:
+ - The Alibaba Cloud security token. If not specified then the value of environment variable
+ C(ALICLOUD_SECURITY_TOKEN) will be used instead.
+ aliases: ['security_token']
+ type: str
+ alicloud_assume_role:
+ description:
+ - If provided with a role ARN, Ansible will attempt to assume this role using the supplied credentials.
+ - The nested assume_role block supports I(alicloud_assume_role_arn), I(alicloud_assume_role_session_name),
+ I(alicloud_assume_role_session_expiration) and I(alicloud_assume_role_policy)
+ type: dict
+ aliases: ['assume_role']
+ alicloud_assume_role_arn:
+ description:
+ - The Alibaba Cloud role_arn. The ARN of the role to assume. If ARN is set to an empty string,
+ it does not perform role switching. It supports environment variable ALICLOUD_ASSUME_ROLE_ARN.
+ ansible will execute with provided credentials.
+ aliases: ['assume_role_arn']
+ type: str
+ alicloud_assume_role_session_name:
+ description:
+ - The Alibaba Cloud session_name. The session name to use when assuming the role. If omitted,
+ 'ansible' is passed to the AssumeRole call as session name. It supports environment variable
+ ALICLOUD_ASSUME_ROLE_SESSION_NAME
+ aliases: ['assume_role_session_name']
+ type: str
+ alicloud_assume_role_session_expiration:
+ description:
+ - The Alibaba Cloud session_expiration. The time after which the established session for assuming
+ role expires. Valid value range 900-3600 seconds. Default to 3600 (in this case Alicloud use own default
+ value). It supports environment variable ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION
+ aliases: ['assume_role_session_expiration']
+ type: int
+ ecs_role_name:
+ description:
+ - The RAM Role Name attached on a ECS instance for API operations. You can retrieve this from the 'Access Control'
+ section of the Alibaba Cloud console.
+ - If you're running Ansible from an ECS instance with RAM Instance using RAM Role, Ansible will just access the
+ metadata U(http://100.100.100.200/latest/meta-data/ram/security-credentials/<ecs_role_name>) to obtain the STS
+ credential. This is a preferred approach over any other when running in ECS as you can avoid hard coding
+ credentials. Instead these are leased on-the-fly by Ansible which reduces the chance of leakage.
+ aliases: ['role_name']
+ type: str
+ profile:
+ description:
+ - This is the Alicloud profile name as set in the shared credentials file. It can also be sourced from the
+ ALICLOUD_PROFILE environment variable.
+ type: str
+ shared_credentials_file:
+ description:
+ - This is the path to the shared credentials file. It can also be sourced from the ALICLOUD_SHARED_CREDENTIALS_FILE
+ environment variable.
+ - If this is not set and a profile is specified, ~/.aliyun/config.json will be used.
+ type: str
+author:
+ - "He Guimin (@xiaozhu36)"
+requirements:
+ - "python >= 3.6"
+notes:
+ - If parameters are not set within the module, the following
+ environment variables can be used in decreasing order of precedence
+ C(ALICLOUD_ACCESS_KEY) or C(ALICLOUD_ACCESS_KEY_ID),
+ C(ALICLOUD_SECRET_KEY) or C(ALICLOUD_SECRET_ACCESS_KEY),
+ C(ALICLOUD_REGION) or C(ALICLOUD_REGION_ID),
+ C(ALICLOUD_SECURITY_TOKEN),
+ C(ALICLOUD_ECS_ROLE_NAME),
+ C(ALICLOUD_SHARED_CREDENTIALS_FILE),
+ C(ALICLOUD_PROFILE),
+ C(ALICLOUD_ASSUME_ROLE_ARN),
+ C(ALICLOUD_ASSUME_ROLE_SESSION_NAME),
+ C(ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION),
+ - C(ALICLOUD_REGION) or C(ALICLOUD_REGION_ID) can be typically be used to specify the
+ ALICLOUD region, when required, but this can also be configured in the footmark config file
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/auth_basic.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/auth_basic.py
new file mode 100644
index 00000000..6f590611
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/auth_basic.py
@@ -0,0 +1,31 @@
+# -*- coding: utf-8 -*-
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard files documentation fragment
+ DOCUMENTATION = r'''
+options:
+ api_url:
+ description:
+ - The resolvable endpoint for the API
+ type: str
+ api_username:
+ description:
+ - The username to use for authentication against the API
+ type: str
+ api_password:
+ description:
+ - The password to use for authentication against the API
+ type: str
+ validate_certs:
+ description:
+ - Whether or not to validate SSL certs when supplying a https endpoint.
+ type: bool
+ default: yes
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/dimensiondata.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/dimensiondata.py
new file mode 100644
index 00000000..02435e25
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/dimensiondata.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2016, Dimension Data
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# Authors:
+# - Adam Friedman <tintoy@tintoy.io>
+
+
+class ModuleDocFragment(object):
+
+ # Dimension Data doc fragment
+ DOCUMENTATION = r'''
+
+options:
+ region:
+ description:
+ - The target region.
+ - Regions are defined in Apache libcloud project [libcloud/common/dimensiondata.py]
+ - They are also listed in U(https://libcloud.readthedocs.io/en/latest/compute/drivers/dimensiondata.html)
+ - Note that the default value "na" stands for "North America".
+ - The module prepends 'dd-' to the region choice.
+ type: str
+ default: na
+ mcp_user:
+ description:
+ - The username used to authenticate to the CloudControl API.
+ - If not specified, will fall back to C(MCP_USER) from environment variable or C(~/.dimensiondata).
+ type: str
+ mcp_password:
+ description:
+ - The password used to authenticate to the CloudControl API.
+ - If not specified, will fall back to C(MCP_PASSWORD) from environment variable or C(~/.dimensiondata).
+ - Required if I(mcp_user) is specified.
+ type: str
+ location:
+ description:
+ - The target datacenter.
+ type: str
+ required: true
+ validate_certs:
+ description:
+ - If C(false), SSL certificates will not be validated.
+ - This should only be used on private instances of the CloudControl API that use self-signed certificates.
+ type: bool
+ default: yes
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/dimensiondata_wait.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/dimensiondata_wait.py
new file mode 100644
index 00000000..ac3deab1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/dimensiondata_wait.py
@@ -0,0 +1,36 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2016, Dimension Data
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# Authors:
+# - Adam Friedman <tintoy@tintoy.io>
+
+
+class ModuleDocFragment(object):
+
+ # Dimension Data ("wait-for-completion" parameters) doc fragment
+ DOCUMENTATION = r'''
+
+options:
+ wait:
+ description:
+ - Should we wait for the task to complete before moving onto the next.
+ type: bool
+ default: no
+ wait_time:
+ description:
+ - The maximum amount of time (in seconds) to wait for the task to complete.
+ - Only applicable if I(wait=true).
+ type: int
+ default: 600
+ wait_poll_interval:
+ description:
+ - The amount of time (in seconds) to wait between checks for task completion.
+ - Only applicable if I(wait=true).
+ type: int
+ default: 2
+ '''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/docker.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/docker.py
new file mode 100644
index 00000000..ad3efb1f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/docker.py
@@ -0,0 +1,136 @@
+# -*- coding: utf-8 -*-
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Docker doc fragment
+ DOCUMENTATION = r'''
+
+options:
+ docker_host:
+ description:
+ - The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the
+ TCP connection string. For example, C(tcp://192.0.2.23:2376). If TLS is used to encrypt the connection,
+ the module will automatically replace C(tcp) in the connection URL with C(https).
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_HOST) will be used
+ instead. If the environment variable is not set, the default value will be used.
+ type: str
+ default: unix://var/run/docker.sock
+ aliases: [ docker_url ]
+ tls_hostname:
+ description:
+ - When verifying the authenticity of the Docker Host server, provide the expected name of the server.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_HOSTNAME) will
+ be used instead. If the environment variable is not set, the default value will be used.
+ type: str
+ default: localhost
+ api_version:
+ description:
+ - The version of the Docker API running on the Docker Host.
+ - Defaults to the latest version of the API supported by Docker SDK for Python and the docker daemon.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_API_VERSION) will be
+ used instead. If the environment variable is not set, the default value will be used.
+ type: str
+ default: auto
+ aliases: [ docker_api_version ]
+ timeout:
+ description:
+ - The maximum amount of time in seconds to wait on a response from the API.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT) will be used
+ instead. If the environment variable is not set, the default value will be used.
+ type: int
+ default: 60
+ ca_cert:
+ description:
+ - Use a CA certificate when performing server verification by providing the path to a CA certificate file.
+ - If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
+ the file C(ca.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
+ type: path
+ aliases: [ tls_ca_cert, cacert_path ]
+ client_cert:
+ description:
+ - Path to the client's TLS certificate file.
+ - If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
+ the file C(cert.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
+ type: path
+ aliases: [ tls_client_cert, cert_path ]
+ client_key:
+ description:
+ - Path to the client's TLS key file.
+ - If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
+ the file C(key.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
+ type: path
+ aliases: [ tls_client_key, key_path ]
+ ssl_version:
+ description:
+ - Provide a valid SSL version number. Default value determined by ssl.py module.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_SSL_VERSION) will be
+ used instead.
+ type: str
+ tls:
+ description:
+ - Secure the connection to the API by using TLS without verifying the authenticity of the Docker host
+ server. Note that if I(validate_certs) is set to C(yes) as well, it will take precedence.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_TLS) will be used
+ instead. If the environment variable is not set, the default value will be used.
+ type: bool
+ default: no
+ validate_certs:
+ description:
+ - Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_VERIFY) will be
+ used instead. If the environment variable is not set, the default value will be used.
+ type: bool
+ default: no
+ aliases: [ tls_verify ]
+ debug:
+ description:
+ - Debug mode
+ type: bool
+ default: no
+
+notes:
+ - Connect to the Docker daemon by providing parameters with each task or by defining environment variables.
+ You can define C(DOCKER_HOST), C(DOCKER_TLS_HOSTNAME), C(DOCKER_API_VERSION), C(DOCKER_CERT_PATH), C(DOCKER_SSL_VERSION),
+ C(DOCKER_TLS), C(DOCKER_TLS_VERIFY) and C(DOCKER_TIMEOUT). If you are using docker machine, run the script shipped
+ with the product that sets up the environment. It will set these variables for you. See
+ U(https://docs.docker.com/machine/reference/env/) for more details.
+ - When connecting to Docker daemon with TLS, you might need to install additional Python packages.
+ For the Docker SDK for Python, version 2.4 or newer, this can be done by installing C(docker[tls]) with M(ansible.builtin.pip).
+ - Note that the Docker SDK for Python only allows to specify the path to the Docker configuration for very few functions.
+ In general, it will use C($HOME/.docker/config.json) if the C(DOCKER_CONFIG) environment variable is not specified,
+ and use C($DOCKER_CONFIG/config.json) otherwise.
+'''
+
+ # Additional, more specific stuff for minimal Docker SDK for Python version < 2.0
+
+ DOCKER_PY_1_DOCUMENTATION = r'''
+options: {}
+requirements:
+ - "Docker SDK for Python: Please note that the L(docker-py,https://pypi.org/project/docker-py/)
+ Python module has been superseded by L(docker,https://pypi.org/project/docker/)
+ (see L(here,https://github.com/docker/docker-py/issues/1310) for details).
+ For Python 2.6, C(docker-py) must be used. Otherwise, it is recommended to
+ install the C(docker) Python module. Note that both modules should *not*
+ be installed at the same time. Also note that when both modules are installed
+ and one of them is uninstalled, the other might no longer function and a
+ reinstall of it is required."
+'''
+
+ # Additional, more specific stuff for minimal Docker SDK for Python version >= 2.0.
+ # Note that Docker SDK for Python >= 2.0 requires Python 2.7 or newer.
+
+ DOCKER_PY_2_DOCUMENTATION = r'''
+options: {}
+requirements:
+ - "Python >= 2.7"
+ - "Docker SDK for Python: Please note that the L(docker-py,https://pypi.org/project/docker-py/)
+ Python module has been superseded by L(docker,https://pypi.org/project/docker/)
+ (see L(here,https://github.com/docker/docker-py/issues/1310) for details).
+ This module does *not* work with docker-py."
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/emc.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/emc.py
new file mode 100644
index 00000000..cce76823
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/emc.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Luca Lorenzetto (@remix_tj) <lorenzetto.luca@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ DOCUMENTATION = r'''
+options:
+ - See respective platform section for more details
+requirements:
+ - See respective platform section for more details
+notes:
+ - Ansible modules are available for EMC VNX.
+'''
+
+ # Documentation fragment for VNX (emc_vnx)
+ EMC_VNX = r'''
+options:
+ sp_address:
+ description:
+ - Address of the SP of target/secondary storage.
+ type: str
+ required: true
+ sp_user:
+ description:
+ - Username for accessing SP.
+ type: str
+ default: sysadmin
+ sp_password:
+ description:
+ - password for accessing SP.
+ type: str
+ default: sysadmin
+requirements:
+ - An EMC VNX Storage device.
+ - Ansible 2.7.
+ - storops (0.5.10 or greater). Install using 'pip install storops'.
+notes:
+ - The modules prefixed with emc_vnx are built to support the EMC VNX storage platform.
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/hetzner.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/hetzner.py
new file mode 100644
index 00000000..32a595f0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/hetzner.py
@@ -0,0 +1,23 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019 Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard files documentation fragment
+ DOCUMENTATION = r'''
+options:
+ hetzner_user:
+ description: The username for the Robot webservice user.
+ type: str
+ required: yes
+ hetzner_password:
+ description: The password for the Robot webservice user.
+ type: str
+ required: yes
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/hpe3par.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/hpe3par.py
new file mode 100644
index 00000000..fa51ccdb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/hpe3par.py
@@ -0,0 +1,35 @@
+# Copyright: (c) 2018, Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # HPE 3PAR doc fragment
+ DOCUMENTATION = '''
+options:
+ storage_system_ip:
+ description:
+ - The storage system IP address.
+ type: str
+ required: true
+ storage_system_password:
+ description:
+ - The storage system password.
+ type: str
+ required: true
+ storage_system_username:
+ description:
+ - The storage system user name.
+ type: str
+ required: true
+
+requirements:
+ - hpe3par_sdk >= 1.0.2. Install using 'pip install hpe3par_sdk'
+ - WSAPI service should be enabled on the 3PAR storage array.
+notes:
+ - check_mode not supported
+ '''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/hwc.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/hwc.py
new file mode 100644
index 00000000..80cd0465
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/hwc.py
@@ -0,0 +1,65 @@
+# Copyright: (c) 2018, Huawei Inc.
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # HWC doc fragment.
+ DOCUMENTATION = '''
+options:
+ identity_endpoint:
+ description:
+ - The Identity authentication URL.
+ type: str
+ required: true
+ user:
+ description:
+ - The user name to login with (currently only user names are
+ supported, and not user IDs).
+ type: str
+ required: true
+ password:
+ description:
+ - The password to login with.
+ type: str
+ required: true
+ domain:
+ description:
+ - The name of the Domain to scope to (Identity v3).
+ (currently only domain names are supported, and not domain IDs).
+ type: str
+ required: true
+ project:
+ description:
+ - The name of the Tenant (Identity v2) or Project (Identity v3).
+ (currently only project names are supported, and not
+ project IDs).
+ type: str
+ required: true
+ region:
+ description:
+ - The region to which the project belongs.
+ type: str
+ id:
+ description:
+ - The id of resource to be managed.
+ type: str
+notes:
+ - For authentication, you can set identity_endpoint using the
+ C(ANSIBLE_HWC_IDENTITY_ENDPOINT) env variable.
+ - For authentication, you can set user using the
+ C(ANSIBLE_HWC_USER) env variable.
+ - For authentication, you can set password using the C(ANSIBLE_HWC_PASSWORD) env
+ variable.
+ - For authentication, you can set domain using the C(ANSIBLE_HWC_DOMAIN) env
+ variable.
+ - For authentication, you can set project using the C(ANSIBLE_HWC_PROJECT) env
+ variable.
+ - For authentication, you can set region using the C(ANSIBLE_HWC_REGION) env variable.
+ - Environment variables values will only be used if the playbook values are
+ not set.
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/ibm_storage.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/ibm_storage.py
new file mode 100644
index 00000000..0d8eb5fe
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/ibm_storage.py
@@ -0,0 +1,37 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, IBM CORPORATION
+# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # ibm_storage documentation fragment
+ DOCUMENTATION = r'''
+options:
+ username:
+ description:
+ - Management user on the spectrum accelerate storage system.
+ type: str
+ required: True
+ password:
+ description:
+ - Password for username on the spectrum accelerate storage system.
+ type: str
+ required: True
+ endpoints:
+ description:
+ - The hostname or management IP of Spectrum Accelerate storage system.
+ type: str
+ required: True
+notes:
+ - This module requires pyxcli python library.
+ Use 'pip install pyxcli' in order to get pyxcli.
+requirements:
+ - python >= 2.7
+ - pyxcli
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/influxdb.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/influxdb.py
new file mode 100644
index 00000000..a31c84cb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/influxdb.py
@@ -0,0 +1,82 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# Copyright: (c) 2017, Abhijeet Kasurde (akasurde@redhat.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ # Parameters for influxdb modules
+ DOCUMENTATION = r'''
+options:
+ hostname:
+ description:
+ - The hostname or IP address on which InfluxDB server is listening.
+ - Since Ansible 2.5, defaulted to localhost.
+ type: str
+ default: localhost
+ username:
+ description:
+ - Username that will be used to authenticate against InfluxDB server.
+ - Alias C(login_username) added in Ansible 2.5.
+ type: str
+ default: root
+ aliases: [ login_username ]
+ password:
+ description:
+ - Password that will be used to authenticate against InfluxDB server.
+ - Alias C(login_password) added in Ansible 2.5.
+ type: str
+ default: root
+ aliases: [ login_password ]
+ port:
+ description:
+ - The port on which InfluxDB server is listening
+ type: int
+ default: 8086
+ path:
+ description:
+ - The path on which InfluxDB server is accessible
+ - Only available when using python-influxdb >= 5.1.0
+ type: str
+ version_added: '0.2.0'
+ validate_certs:
+ description:
+ - If set to C(no), the SSL certificates will not be validated.
+ - This should only set to C(no) used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: yes
+ ssl:
+ description:
+ - Use https instead of http to connect to InfluxDB server.
+ type: bool
+ default: false
+ timeout:
+ description:
+ - Number of seconds Requests will wait for client to establish a connection.
+ type: int
+ retries:
+ description:
+ - Number of retries client will try before aborting.
+ - C(0) indicates try until success.
+ - Only available when using python-influxdb >= 4.1.0
+ type: int
+ default: 3
+ use_udp:
+ description:
+ - Use UDP to connect to InfluxDB server.
+ type: bool
+ default: false
+ udp_port:
+ description:
+ - UDP port to connect to InfluxDB server.
+ type: int
+ default: 4444
+ proxies:
+ description:
+ - HTTP(S) proxy to use for Requests to connect to InfluxDB server.
+ type: dict
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/ipa.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/ipa.py
new file mode 100644
index 00000000..47bcee60
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/ipa.py
@@ -0,0 +1,75 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017-18, Ansible Project
+# Copyright: (c) 2017-18, Abhijeet Kasurde (akasurde@redhat.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ # Parameters for FreeIPA/IPA modules
+ DOCUMENTATION = r'''
+options:
+ ipa_port:
+ description:
+ - Port of FreeIPA / IPA server.
+ - If the value is not specified in the task, the value of environment variable C(IPA_PORT) will be used instead.
+ - If both the environment variable C(IPA_PORT) and the value are not specified in the task, then default value is set.
+ - Environment variable fallback mechanism is added in Ansible 2.5.
+ type: int
+ default: 443
+ ipa_host:
+ description:
+ - IP or hostname of IPA server.
+ - If the value is not specified in the task, the value of environment variable C(IPA_HOST) will be used instead.
+ - If both the environment variable C(IPA_HOST) and the value are not specified in the task, then DNS will be used to try to discover the FreeIPA server.
+ - The relevant entry needed in FreeIPA is the 'ipa-ca' entry.
+ - If neither the DNS entry, nor the environment C(IPA_HOST), nor the value are available in the task, then the default value will be used.
+ - Environment variable fallback mechanism is added in Ansible 2.5.
+ type: str
+ default: ipa.example.com
+ ipa_user:
+ description:
+ - Administrative account used on IPA server.
+ - If the value is not specified in the task, the value of environment variable C(IPA_USER) will be used instead.
+ - If both the environment variable C(IPA_USER) and the value are not specified in the task, then default value is set.
+ - Environment variable fallback mechanism is added in Ansible 2.5.
+ type: str
+ default: admin
+ ipa_pass:
+ description:
+ - Password of administrative user.
+ - If the value is not specified in the task, the value of environment variable C(IPA_PASS) will be used instead.
+ - Note that if the 'urllib_gssapi' library is available, it is possible to use GSSAPI to authenticate to FreeIPA.
+ - If the environment variable C(KRB5CCNAME) is available, the module will use this kerberos credentials cache to authenticate to the FreeIPA server.
+ - If the environment variable C(KRB5_CLIENT_KTNAME) is available, and C(KRB5CCNAME) is not; the module will use this kerberos keytab to authenticate.
+ - If GSSAPI is not available, the usage of 'ipa_pass' is required.
+ - Environment variable fallback mechanism is added in Ansible 2.5.
+ type: str
+ ipa_prot:
+ description:
+ - Protocol used by IPA server.
+ - If the value is not specified in the task, the value of environment variable C(IPA_PROT) will be used instead.
+ - If both the environment variable C(IPA_PROT) and the value are not specified in the task, then default value is set.
+ - Environment variable fallback mechanism is added in Ansible 2.5.
+ type: str
+ choices: [ http, https ]
+ default: https
+ validate_certs:
+ description:
+ - This only applies if C(ipa_prot) is I(https).
+ - If set to C(no), the SSL certificates will not be validated.
+ - This should only set to C(no) used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: yes
+ ipa_timeout:
+ description:
+ - Specifies idle timeout (in seconds) for the connection.
+ - For bulk operations, you may want to increase this in order to avoid timeout from IPA server.
+ - If the value is not specified in the task, the value of environment variable C(IPA_TIMEOUT) will be used instead.
+ - If both the environment variable C(IPA_TIMEOUT) and the value are not specified in the task, then default value is set.
+ type: int
+ default: 10
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/keycloak.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/keycloak.py
new file mode 100644
index 00000000..e664d7ec
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/keycloak.py
@@ -0,0 +1,61 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Eike Frost <ei@kefro.st>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard documentation fragment
+ DOCUMENTATION = r'''
+options:
+ auth_keycloak_url:
+ description:
+ - URL to the Keycloak instance.
+ type: str
+ required: true
+ aliases:
+ - url
+
+ auth_client_id:
+ description:
+ - OpenID Connect I(client_id) to authenticate to the API with.
+ type: str
+ default: admin-cli
+
+ auth_realm:
+ description:
+ - Keycloak realm name to authenticate to for API access.
+ type: str
+ required: true
+
+ auth_client_secret:
+ description:
+ - Client Secret to use in conjunction with I(auth_client_id) (if required).
+ type: str
+
+ auth_username:
+ description:
+ - Username to authenticate for API access with.
+ type: str
+ required: true
+ aliases:
+ - username
+
+ auth_password:
+ description:
+ - Password to authenticate for API access with.
+ type: str
+ required: true
+ aliases:
+ - password
+
+ validate_certs:
+ description:
+ - Verify TLS certificates (do not disable this in production).
+ type: bool
+ default: yes
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/kubevirt_common_options.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/kubevirt_common_options.py
new file mode 100644
index 00000000..1d3c98fd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/kubevirt_common_options.py
@@ -0,0 +1,133 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, KubeVirt Team <@kubevirt>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ DOCUMENTATION = r'''
+options:
+ resource_definition:
+ description:
+ - "A partial YAML definition of the object being created/updated. Here you can define Kubernetes
+ resource parameters not covered by this module's parameters."
+ - "NOTE: I(resource_definition) has lower priority than module parameters. If you try to define e.g.
+ I(metadata.namespace) here, that value will be ignored and I(namespace) used instead."
+ aliases:
+ - definition
+ - inline
+ type: dict
+ wait:
+ description:
+ - "I(True) if the module should wait for the resource to get into desired state."
+ type: bool
+ default: yes
+ force:
+ description:
+ - If set to C(no), and I(state) is C(present), an existing object will be replaced.
+ type: bool
+ default: no
+ wait_timeout:
+ description:
+ - The amount of time in seconds the module should wait for the resource to get into desired state.
+ type: int
+ default: 120
+ wait_sleep:
+ description:
+ - Number of seconds to sleep between checks.
+ default: 5
+ version_added: '0.2.0'
+ memory:
+ description:
+ - The amount of memory to be requested by virtual machine.
+ - For example 1024Mi.
+ type: str
+ memory_limit:
+ description:
+ - The maximum memory to be used by virtual machine.
+ - For example 1024Mi.
+ type: str
+ machine_type:
+ description:
+ - QEMU machine type is the actual chipset of the virtual machine.
+ type: str
+ merge_type:
+ description:
+ - Whether to override the default patch merge approach with a specific type.
+ - If more than one merge type is given, the merge types will be tried in order.
+ - "Defaults to C(['strategic-merge', 'merge']), which is ideal for using the same parameters
+ on resource kinds that combine Custom Resources and built-in resources, as
+ Custom Resource Definitions typically aren't updatable by the usual strategic merge."
+ - "See U(https://kubernetes.io/docs/tasks/run-application/update-api-object-kubectl-patch/#use-a-json-merge-patch-to-update-a-deployment)"
+ type: list
+ choices: [ json, merge, strategic-merge ]
+ cpu_shares:
+ description:
+ - "Specify CPU shares."
+ type: int
+ cpu_limit:
+ description:
+ - "Is converted to its millicore value and multiplied by 100. The resulting value is the total amount of CPU time that a container can use
+ every 100ms. A virtual machine cannot use more than its share of CPU time during this interval."
+ type: int
+ cpu_cores:
+ description:
+ - "Number of CPU cores."
+ type: int
+ cpu_model:
+ description:
+ - "CPU model."
+ - "You can check list of available models here: U(https://github.com/libvirt/libvirt/blob/master/src/cpu_map/index.xml)."
+ - "I(Note:) User can define default CPU model via as I(default-cpu-model) in I(kubevirt-config) I(ConfigMap), if not set I(host-model) is used."
+ - "I(Note:) Be sure that node CPU model where you run a VM, has the same or higher CPU family."
+ - "I(Note:) If CPU model wasn't defined, the VM will have CPU model closest to one that used on the node where the VM is running."
+ type: str
+ bootloader:
+ description:
+ - "Specify the bootloader of the virtual machine."
+ - "All virtual machines use BIOS by default for booting."
+ type: str
+ smbios_uuid:
+ description:
+ - "In order to provide a consistent view on the virtualized hardware for the guest OS, the SMBIOS UUID can be set."
+ type: str
+ cpu_features:
+ description:
+ - "List of dictionary to fine-tune features provided by the selected CPU model."
+ - "I(Note): Policy attribute can either be omitted or contain one of the following policies: force, require, optional, disable, forbid."
+ - "I(Note): In case a policy is omitted for a feature, it will default to require."
+ - "More information about policies: U(https://libvirt.org/formatdomain.html#elementsCPU)"
+ type: list
+ headless:
+ description:
+ - "Specify if the virtual machine should have attached a minimal Video and Graphics device configuration."
+ - "By default a minimal Video and Graphics device configuration will be applied to the VirtualMachineInstance. The video device is vga
+ compatible and comes with a memory size of 16 MB."
+ hugepage_size:
+ description:
+ - "Specify huge page size."
+ type: str
+ tablets:
+ description:
+ - "Specify tablets to be used as input devices"
+ type: list
+ hostname:
+ description:
+ - "Specifies the hostname of the virtual machine. The hostname will be set either by dhcp, cloud-init if configured or virtual machine
+ name will be used."
+ subdomain:
+ description:
+ - "If specified, the fully qualified virtual machine hostname will be hostname.subdomain.namespace.svc.cluster_domain. If not specified,
+ the virtual machine will not have a domain name at all. The DNS entry will resolve to the virtual machine, no matter if the virtual machine
+ itself can pick up a hostname."
+requirements:
+ - python >= 2.7
+ - openshift >= 0.8.2
+notes:
+ - "In order to use this module you have to install Openshift Python SDK.
+ To ensure it's installed with correct version you can create the following task:
+ I(pip: name=openshift>=0.8.2)"
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/kubevirt_vm_options.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/kubevirt_vm_options.py
new file mode 100644
index 00000000..ba5dc332
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/kubevirt_vm_options.py
@@ -0,0 +1,103 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, KubeVirt Team <@kubevirt>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard oVirt documentation fragment
+ DOCUMENTATION = r'''
+options:
+ disks:
+ description:
+ - List of dictionaries which specify disks of the virtual machine.
+ - "A disk can be made accessible via four different types: I(disk), I(lun), I(cdrom), I(floppy)."
+ - "All possible configuration options are available in U(https://kubevirt.io/api-reference/master/definitions.html#_v1_disk)"
+ - Each disk must have specified a I(volume) that declares which volume type of the disk
+ All possible configuration options of volume are available in U(https://kubevirt.io/api-reference/master/definitions.html#_v1_volume).
+ type: list
+ labels:
+ description:
+ - Labels are key/value pairs that are attached to virtual machines. Labels are intended to be used to
+ specify identifying attributes of virtual machines that are meaningful and relevant to users, but do not directly
+ imply semantics to the core system. Labels can be used to organize and to select subsets of virtual machines.
+ Labels can be attached to virtual machines at creation time and subsequently added and modified at any time.
+ - More on labels that are used for internal implementation U(https://kubevirt.io/user-guide/#/misc/annotations_and_labels)
+ type: dict
+ interfaces:
+ description:
+ - An interface defines a virtual network interface of a virtual machine (also called a frontend).
+ - All possible configuration options interfaces are available in U(https://kubevirt.io/api-reference/master/definitions.html#_v1_interface)
+ - Each interface must have specified a I(network) that declares which logical or physical device it is connected to (also called as backend).
+ All possible configuration options of network are available in U(https://kubevirt.io/api-reference/master/definitions.html#_v1_network).
+ type: list
+ cloud_init_nocloud:
+ description:
+ - "Represents a cloud-init NoCloud user-data source. The NoCloud data will be added
+ as a disk to the virtual machine. A proper cloud-init installation is required inside the guest.
+ More information U(https://kubevirt.io/api-reference/master/definitions.html#_v1_cloudinitnocloudsource)"
+ type: dict
+ affinity:
+ description:
+ - "Describes node affinity scheduling rules for the vm."
+ type: dict
+ suboptions:
+ soft:
+ description:
+ - "The scheduler will prefer to schedule vms to nodes that satisfy the affinity expressions specified by this field, but it may choose a
+ node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for
+ each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute
+ a sum by iterating through the elements of this field and adding C(weight) to the sum if the node has vms which matches the corresponding
+ C(term); the nodes with the highest sum are the most preferred."
+ type: dict
+ hard:
+ description:
+ - "If the affinity requirements specified by this field are not met at scheduling time, the vm will not be scheduled onto the node. If
+ the affinity requirements specified by this field cease to be met at some point during vm execution (e.g. due to a vm label update), the
+ system may or may not try to eventually evict the vm from its node. When there are multiple elements, the lists of nodes corresponding to
+ each C(term) are intersected, i.e. all terms must be satisfied."
+ type: dict
+ node_affinity:
+ description:
+ - "Describes vm affinity scheduling rules e.g. co-locate this vm in the same node, zone, etc. as some other vms"
+ type: dict
+ suboptions:
+ soft:
+ description:
+ - "The scheduler will prefer to schedule vms to nodes that satisfy the affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding C(weight) to the sum if the node matches the corresponding
+ match_expressions; the nodes with the highest sum are the most preferred."
+ type: dict
+ hard:
+ description:
+ - "If the affinity requirements specified by this field are not met at scheduling time, the vm will not be scheduled onto the node. If
+ the affinity requirements specified by this field cease to be met at some point during vm execution (e.g. due to an update), the system
+ may or may not try to eventually evict the vm from its node."
+ type: dict
+ anti_affinity:
+ description:
+ - "Describes vm anti-affinity scheduling rules e.g. avoid putting this vm in the same node, zone, etc. as some other vms."
+ type: dict
+ suboptions:
+ soft:
+ description:
+ - "The scheduler will prefer to schedule vms to nodes that satisfy the anti-affinity expressions specified by this field, but it may
+ choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights,
+ i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions,
+ etc.), compute a sum by iterating through the elements of this field and adding C(weight) to the sum if the node has vms which matches
+ the corresponding C(term); the nodes with the highest sum are the most preferred."
+ type: dict
+ hard:
+ description:
+ - "If the anti-affinity requirements specified by this field are not met at scheduling time, the vm will not be scheduled onto the node.
+ If the anti-affinity requirements specified by this field cease to be met at some point during vm execution (e.g. due to a vm label
+ update), the system may or may not try to eventually evict the vm from its node. When there are multiple elements, the lists of nodes
+ corresponding to each C(term) are intersected, i.e. all terms must be satisfied."
+ type: dict
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/ldap.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/ldap.py
new file mode 100644
index 00000000..890c22ee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/ldap.py
@@ -0,0 +1,47 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Peter Sagerson <psagers@ignorare.net>
+# Copyright: (c) 2016, Jiri Tyr <jiri.tyr@gmail.com>
+# Copyright: (c) 2017-2018 Keller Fuchs (@KellerFuchs) <kellerfuchs@hashbang.sh>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ # Standard LDAP documentation fragment
+ DOCUMENTATION = r'''
+options:
+ bind_dn:
+ description:
+ - A DN to bind with. If this is omitted, we'll try a SASL bind with the EXTERNAL mechanism.
+ - If this is blank, we'll use an anonymous bind.
+ type: str
+ bind_pw:
+ description:
+ - The password to use with I(bind_dn).
+ type: str
+ dn:
+ required: true
+ description:
+ - The DN of the entry to add or remove.
+ type: str
+ server_uri:
+ description:
+ - A URI to the LDAP server.
+ - The default value lets the underlying LDAP client library look for a UNIX domain socket in its default location.
+ type: str
+ default: ldapi:///
+ start_tls:
+ description:
+ - If true, we'll use the START_TLS LDAP extension.
+ type: bool
+ default: no
+ validate_certs:
+ description:
+ - If set to C(no), SSL certificates will not be validated.
+ - This should only be used on sites using self-signed certificates.
+ type: bool
+ default: yes
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/lxca_common.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/lxca_common.py
new file mode 100644
index 00000000..c55eca16
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/lxca_common.py
@@ -0,0 +1,43 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2017 Lenovo, Inc.
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ # Standard Pylxca documentation fragment
+ DOCUMENTATION = r'''
+author:
+ - Naval Patel (@navalkp)
+ - Prashant Bhosale (@prabhosa)
+
+options:
+ login_user:
+ description:
+ - The username for use in HTTP basic authentication.
+ type: str
+ required: true
+
+ login_password:
+ description:
+ - The password for use in HTTP basic authentication.
+ type: str
+ required: true
+
+ auth_url:
+ description:
+ - lxca https full web address
+ type: str
+ required: true
+
+requirements:
+ - pylxca
+
+notes:
+ - Additional detail about pylxca can be found at U(https://github.com/lenovo/pylxca)
+ - Playbooks using these modules can be found at U(https://github.com/lenovo/ansible.lenovo-lxca)
+ - Check mode is not supported.
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/manageiq.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/manageiq.py
new file mode 100644
index 00000000..b610b512
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/manageiq.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Daniel Korn <korndaniel1@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard ManageIQ documentation fragment
+ DOCUMENTATION = r'''
+options:
+ manageiq_connection:
+ description:
+ - ManageIQ connection configuration information.
+ required: false
+ type: dict
+ suboptions:
+ url:
+ description:
+ - ManageIQ environment url. C(MIQ_URL) env var if set. otherwise, it is required to pass it.
+ type: str
+ required: false
+ username:
+ description:
+ - ManageIQ username. C(MIQ_USERNAME) env var if set. otherwise, required if no token is passed in.
+ type: str
+ password:
+ description:
+ - ManageIQ password. C(MIQ_PASSWORD) env var if set. otherwise, required if no token is passed in.
+ type: str
+ token:
+ description:
+ - ManageIQ token. C(MIQ_TOKEN) env var if set. otherwise, required if no username or password is passed in.
+ type: str
+ validate_certs:
+ description:
+ - Whether SSL certificates should be verified for HTTPS requests. defaults to True.
+ type: bool
+ default: yes
+ aliases: [ verify_ssl ]
+ ca_cert:
+ description:
+ - The path to a CA bundle file or directory with certificates. defaults to None.
+ type: str
+ aliases: [ ca_bundle_path ]
+
+requirements:
+ - 'manageiq-client U(https://github.com/ManageIQ/manageiq-api-client-python/)'
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/nios.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/nios.py
new file mode 100644
index 00000000..cf3130bb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/nios.py
@@ -0,0 +1,85 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Peter Sprygada <psprygada@ansible.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard files documentation fragment
+ DOCUMENTATION = r'''
+options:
+ provider:
+ description:
+ - A dict object containing connection details.
+ type: dict
+ suboptions:
+ host:
+ description:
+ - Specifies the DNS host name or address for connecting to the remote
+ instance of NIOS WAPI over REST
+ - Value can also be specified using C(INFOBLOX_HOST) environment
+ variable.
+ type: str
+ username:
+ description:
+ - Configures the username to use to authenticate the connection to
+ the remote instance of NIOS.
+ - Value can also be specified using C(INFOBLOX_USERNAME) environment
+ variable.
+ type: str
+ password:
+ description:
+ - Specifies the password to use to authenticate the connection to
+ the remote instance of NIOS.
+ - Value can also be specified using C(INFOBLOX_PASSWORD) environment
+ variable.
+ type: str
+ validate_certs:
+ description:
+ - Boolean value to enable or disable verifying SSL certificates
+ - Value can also be specified using C(INFOBLOX_SSL_VERIFY) environment
+ variable.
+ type: bool
+ default: no
+ aliases: [ ssl_verify ]
+ http_request_timeout:
+ description:
+ - The amount of time before to wait before receiving a response
+ - Value can also be specified using C(INFOBLOX_HTTP_REQUEST_TIMEOUT) environment
+ variable.
+ type: int
+ default: 10
+ max_retries:
+ description:
+ - Configures the number of attempted retries before the connection
+ is declared usable
+ - Value can also be specified using C(INFOBLOX_MAX_RETRIES) environment
+ variable.
+ type: int
+ default: 3
+ wapi_version:
+ description:
+ - Specifies the version of WAPI to use
+ - Value can also be specified using C(INFOBLOX_WAP_VERSION) environment
+ variable.
+ - Until ansible 2.8 the default WAPI was 1.4
+ type: str
+ default: '2.1'
+ max_results:
+ description:
+ - Specifies the maximum number of objects to be returned,
+ if set to a negative number the appliance will return an error when the
+ number of returned objects would exceed the setting.
+ - Value can also be specified using C(INFOBLOX_MAX_RESULTS) environment
+ variable.
+ type: int
+ default: 1000
+notes:
+ - "This module must be run locally, which can be achieved by specifying C(connection: local)."
+ - Please read the :ref:`nios_guide` for more detailed information on how to use Infoblox with Ansible.
+
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/nomad.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/nomad.py
new file mode 100644
index 00000000..3845c541
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/nomad.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020 FERREIRA Christophe <christophe.ferreira@cnaf.fr>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard files documentation fragment
+ DOCUMENTATION = r'''
+options:
+ host:
+ description:
+ - FQDN of Nomad server.
+ required: true
+ type: str
+ use_ssl:
+ description:
+ - Use TLS/SSL connection.
+ type: bool
+ default: true
+ timeout:
+ description:
+ - Timeout (in seconds) for the request to Nomad.
+ type: int
+ default: 5
+ validate_certs:
+ description:
+ - Enable TLS/SSL certificate validation.
+ type: bool
+ default: true
+ client_cert:
+ description:
+ - Path of certificate for TLS/SSL.
+ type: path
+ client_key:
+ description:
+ - Path of certificate's private key for TLS/SSL.
+ type: path
+ namespace:
+ description:
+ - Namespace for Nomad.
+ type: str
+ token:
+ description:
+ - ACL token for authentification.
+ type: str
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oneview.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oneview.py
new file mode 100644
index 00000000..bbbcbeea
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oneview.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # OneView doc fragment
+ DOCUMENTATION = r'''
+options:
+ config:
+ description:
+ - Path to a .json configuration file containing the OneView client configuration.
+ The configuration file is optional and when used should be present in the host running the ansible commands.
+ If the file path is not provided, the configuration will be loaded from environment variables.
+ For links to example configuration files or how to use the environment variables verify the notes section.
+ type: path
+
+requirements:
+ - python >= 2.7.9
+
+notes:
+ - "A sample configuration file for the config parameter can be found at:
+ U(https://github.com/HewlettPackard/oneview-ansible/blob/master/examples/oneview_config-rename.json)"
+ - "Check how to use environment variables for configuration at:
+ U(https://github.com/HewlettPackard/oneview-ansible#environment-variables)"
+ - "Additional Playbooks for the HPE OneView Ansible modules can be found at:
+ U(https://github.com/HewlettPackard/oneview-ansible/tree/master/examples)"
+ - "The OneView API version used will directly affect returned and expected fields in resources.
+ Information on setting the desired API version and can be found at:
+ U(https://github.com/HewlettPackard/oneview-ansible#setting-your-oneview-version)"
+ '''
+
+ VALIDATEETAG = r'''
+options:
+ validate_etag:
+ description:
+ - When the ETag Validation is enabled, the request will be conditionally processed only if the current ETag
+ for the resource matches the ETag provided in the data.
+ type: bool
+ default: yes
+'''
+
+ FACTSPARAMS = r'''
+options:
+ params:
+ description:
+ - List of params to delimit, filter and sort the list of resources.
+ - "params allowed:
+ - C(start): The first item to return, using 0-based indexing.
+ - C(count): The number of resources to return.
+ - C(filter): A general filter/query string to narrow the list of items returned.
+ - C(sort): The sort order of the returned data set."
+ type: dict
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/online.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/online.py
new file mode 100644
index 00000000..4ad35bab
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/online.py
@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard documentation fragment
+ DOCUMENTATION = r'''
+options:
+ api_token:
+ description:
+ - Online OAuth token.
+ type: str
+ required: true
+ aliases: [ oauth_token ]
+ api_url:
+ description:
+ - Online API URL
+ type: str
+ default: 'https://api.online.net'
+ aliases: [ base_url ]
+ api_timeout:
+ description:
+ - HTTP timeout to Online API in seconds.
+ type: int
+ default: 30
+ aliases: [ timeout ]
+ validate_certs:
+ description:
+ - Validate SSL certs of the Online API.
+ type: bool
+ default: yes
+notes:
+ - Also see the API documentation on U(https://console.online.net/en/api/)
+ - If C(api_token) is not set within the module, the following
+ environment variables can be used in decreasing order of precedence
+ C(ONLINE_TOKEN), C(ONLINE_API_KEY), C(ONLINE_OAUTH_TOKEN), C(ONLINE_API_TOKEN)
+ - If one wants to use a different C(api_url) one can also set the C(ONLINE_API_URL)
+ environment variable.
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/opennebula.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/opennebula.py
new file mode 100644
index 00000000..08b614a6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/opennebula.py
@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, www.privaz.io Valletech AB
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ # OpenNebula common documentation
+ DOCUMENTATION = r'''
+options:
+ api_url:
+ description:
+ - The ENDPOINT URL of the XMLRPC server.
+ - If not specified then the value of the ONE_URL environment variable, if any, is used.
+ type: str
+ aliases:
+ - api_endpoint
+ api_username:
+ description:
+ - The name of the user for XMLRPC authentication.
+ - If not specified then the value of the ONE_USERNAME environment variable, if any, is used.
+ type: str
+ api_password:
+ description:
+ - The password or token for XMLRPC authentication.
+ - If not specified then the value of the ONE_PASSWORD environment variable, if any, is used.
+ type: str
+ aliases:
+ - api_token
+ validate_certs:
+ description:
+ - Whether to validate the SSL certificates or not.
+ - This parameter is ignored if PYTHONHTTPSVERIFY environment variable is used.
+ type: bool
+ default: yes
+ wait_timeout:
+ description:
+ - Time to wait for the desired state to be reached before timeout, in seconds.
+ type: int
+ default: 300
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/openswitch.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/openswitch.py
new file mode 100644
index 00000000..7ab7c155
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/openswitch.py
@@ -0,0 +1,84 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Peter Sprygada <psprygada@ansible.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard files documentation fragment
+ DOCUMENTATION = r'''
+options:
+ host:
+ description:
+ - Specifies the DNS host name or address for connecting to the remote
+ device over the specified transport. The value of host is used as
+ the destination address for the transport. Note this argument
+ does not affect the SSH argument.
+ type: str
+ port:
+ description:
+ - Specifies the port to use when building the connection to the remote
+ device. This value applies to either I(cli) or I(rest). The port
+ value will default to the appropriate transport common port if
+ none is provided in the task. (cli=22, http=80, https=443). Note
+ this argument does not affect the SSH transport.
+ type: int
+ default: 0 (use common port)
+ username:
+ description:
+ - Configures the username to use to authenticate the connection to
+ the remote device. This value is used to authenticate
+ either the CLI login or the eAPI authentication depending on which
+ transport is used. Note this argument does not affect the SSH
+ transport. If the value is not specified in the task, the value of
+ environment variable C(ANSIBLE_NET_USERNAME) will be used instead.
+ type: str
+ password:
+ description:
+ - Specifies the password to use to authenticate the connection to
+ the remote device. This is a common argument used for either I(cli)
+ or I(rest) transports. Note this argument does not affect the SSH
+ transport. If the value is not specified in the task, the value of
+ environment variable C(ANSIBLE_NET_PASSWORD) will be used instead.
+ type: str
+ timeout:
+ description:
+ - Specifies the timeout in seconds for communicating with the network device
+ for either connecting or sending commands. If the timeout is
+ exceeded before the operation is completed, the module will error.
+ type: int
+ default: 10
+ ssh_keyfile:
+ description:
+ - Specifies the SSH key to use to authenticate the connection to
+ the remote device. This argument is only used for the I(cli)
+ transports. If the value is not specified in the task, the value of
+ environment variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead.
+ type: path
+ transport:
+ description:
+ - Configures the transport connection to use when connecting to the
+ remote device. The transport argument supports connectivity to the
+ device over ssh, cli or REST.
+ required: true
+ type: str
+ choices: [ cli, rest, ssh ]
+ default: ssh
+ use_ssl:
+ description:
+ - Configures the I(transport) to use SSL if set to C(yes) only when the
+ I(transport) argument is configured as rest. If the transport
+ argument is not I(rest), this value is ignored.
+ type: bool
+ default: yes
+ provider:
+ description:
+ - Convenience method that allows all I(openswitch) arguments to be passed as
+ a dict object. All constraints (required, choices, etc) must be
+ met either by individual arguments or values in this dict.
+ type: dict
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle.py
new file mode 100644
index 00000000..776c8f52
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle.py
@@ -0,0 +1,82 @@
+# Copyright (c) 2018, Oracle and/or its affiliates.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ DOCUMENTATION = """
+ requirements:
+ - "python >= 2.7"
+ - Python SDK for Oracle Cloud Infrastructure U(https://oracle-cloud-infrastructure-python-sdk.readthedocs.io)
+ notes:
+ - For OCI python sdk configuration, please refer to
+ U(https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/configuration.html)
+ options:
+ config_file_location:
+ description:
+ - Path to configuration file. If not set then the value of the OCI_CONFIG_FILE environment variable,
+ if any, is used. Otherwise, defaults to ~/.oci/config.
+ type: str
+ config_profile_name:
+ description:
+ - The profile to load from the config file referenced by C(config_file_location). If not set, then the
+ value of the OCI_CONFIG_PROFILE environment variable, if any, is used. Otherwise, defaults to the
+ "DEFAULT" profile in C(config_file_location).
+ default: "DEFAULT"
+ type: str
+ api_user:
+ description:
+ - The OCID of the user, on whose behalf, OCI APIs are invoked. If not set, then the
+ value of the OCI_USER_OCID environment variable, if any, is used. This option is required if the user
+ is not specified through a configuration file (See C(config_file_location)). To get the user's OCID,
+ please refer U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm).
+ type: str
+ api_user_fingerprint:
+ description:
+ - Fingerprint for the key pair being used. If not set, then the value of the OCI_USER_FINGERPRINT
+ environment variable, if any, is used. This option is required if the key fingerprint is not
+ specified through a configuration file (See C(config_file_location)). To get the key pair's
+ fingerprint value please refer
+ U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm).
+ type: str
+ api_user_key_file:
+ description:
+ - Full path and filename of the private key (in PEM format). If not set, then the value of the
+ OCI_USER_KEY_FILE variable, if any, is used. This option is required if the private key is
+ not specified through a configuration file (See C(config_file_location)). If the key is encrypted
+ with a pass-phrase, the C(api_user_key_pass_phrase) option must also be provided.
+ type: str
+ api_user_key_pass_phrase:
+ description:
+ - Passphrase used by the key referenced in C(api_user_key_file), if it is encrypted. If not set, then
+ the value of the OCI_USER_KEY_PASS_PHRASE variable, if any, is used. This option is required if the
+ key passphrase is not specified through a configuration file (See C(config_file_location)).
+ type: str
+ auth_type:
+ description:
+ - The type of authentication to use for making API requests. By default C(auth_type="api_key") based
+ authentication is performed and the API key (see I(api_user_key_file)) in your config file will be
+ used. If this 'auth_type' module option is not specified, the value of the OCI_ANSIBLE_AUTH_TYPE,
+ if any, is used. Use C(auth_type="instance_principal") to use instance principal based authentication
+ when running ansible playbooks within an OCI compute instance.
+ choices: ['api_key', 'instance_principal']
+ default: 'api_key'
+ type: str
+ tenancy:
+ description:
+ - OCID of your tenancy. If not set, then the value of the OCI_TENANCY variable, if any, is
+ used. This option is required if the tenancy OCID is not specified through a configuration file
+ (See C(config_file_location)). To get the tenancy OCID, please refer
+ U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm)
+ type: str
+ region:
+ description:
+ - The Oracle Cloud Infrastructure region to use for all OCI API requests. If not set, then the
+ value of the OCI_REGION variable, if any, is used. This option is required if the region is
+ not specified through a configuration file (See C(config_file_location)). Please refer to
+ U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/regions.htm) for more information
+ on OCI regions.
+ type: str
+ """
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle_creatable_resource.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle_creatable_resource.py
new file mode 100644
index 00000000..d8f22101
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle_creatable_resource.py
@@ -0,0 +1,23 @@
+# Copyright (c) 2018, Oracle and/or its affiliates.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ DOCUMENTATION = """
+ options:
+ force_create:
+ description: Whether to attempt non-idempotent creation of a resource. By default, create resource is an
+ idempotent operation, and doesn't create the resource if it already exists. Setting this option
+ to true, forcefully creates a copy of the resource, even if it already exists.This option is
+ mutually exclusive with I(key_by).
+ default: False
+ type: bool
+ key_by:
+ description: The list of comma-separated attributes of this resource which should be used to uniquely
+ identify an instance of the resource. By default, all the attributes of a resource except
+ I(freeform_tags) are used to uniquely identify a resource.
+ type: list
+ """
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle_display_name_option.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle_display_name_option.py
new file mode 100644
index 00000000..01f92f18
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle_display_name_option.py
@@ -0,0 +1,15 @@
+# Copyright (c) 2018, Oracle and/or its affiliates.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ DOCUMENTATION = """
+ options:
+ display_name:
+ description: Use I(display_name) along with the other options to return only resources that match the given
+ display name exactly.
+ type: str
+ """
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle_name_option.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle_name_option.py
new file mode 100644
index 00000000..9a7b0226
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle_name_option.py
@@ -0,0 +1,15 @@
+# Copyright (c) 2018, Oracle and/or its affiliates.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ DOCUMENTATION = """
+ options:
+ name:
+ description: Use I(name) along with the other options to return only resources that match the given name
+ exactly.
+ type: str
+ """
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle_tags.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle_tags.py
new file mode 100644
index 00000000..1d9cae0e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle_tags.py
@@ -0,0 +1,21 @@
+# Copyright (c) 2018, Oracle and/or its affiliates.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ DOCUMENTATION = """
+ options:
+ defined_tags:
+ description: Defined tags for this resource. Each key is predefined and scoped to a namespace. For more
+ information, see
+ U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/resourcetags.htm).
+ type: dict
+ freeform_tags:
+ description: Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name,
+ type, or namespace. For more information, see
+ U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/resourcetags.htm).
+ type: dict
+ """
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle_wait_options.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle_wait_options.py
new file mode 100644
index 00000000..248319c2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/oracle_wait_options.py
@@ -0,0 +1,25 @@
+# Copyright (c) 2018, Oracle and/or its affiliates.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ DOCUMENTATION = """
+ options:
+ wait:
+ description: Whether to wait for create or delete operation to complete.
+ default: yes
+ type: bool
+ wait_timeout:
+ description: Time, in seconds, to wait when I(wait=yes).
+ default: 1200
+ type: int
+ wait_until:
+ description: The lifecycle state to wait for the resource to transition into when I(wait=yes). By default,
+ when I(wait=yes), we wait for the resource to get into ACTIVE/ATTACHED/AVAILABLE/PROVISIONED/
+ RUNNING applicable lifecycle state during create operation & to get into DELETED/DETACHED/
+ TERMINATED lifecycle state during delete operation.
+ type: str
+ """
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/ovirt_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/ovirt_facts.py
new file mode 100644
index 00000000..43b9b37b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/ovirt_facts.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # info standard oVirt documentation fragment
+ DOCUMENTATION = r'''
+options:
+ fetch_nested:
+ description:
+ - If I(yes) the module will fetch additional data from the API.
+ - It will fetch only IDs of nested entity. It doesn't fetch multiple levels of nested attributes.
+ Only the attributes of the current entity. User can configure to fetch other
+ attributes of the nested entities by specifying C(nested_attributes).
+ type: bool
+ default: false
+ nested_attributes:
+ description:
+ - Specifies list of the attributes which should be fetched from the API.
+ - This parameter apply only when C(fetch_nested) is I(true).
+ type: list
+ auth:
+ description:
+ - "Dictionary with values needed to create HTTP/HTTPS connection to oVirt:"
+ - C(username)[I(required)] - The name of the user, something like I(admin@internal).
+ Default value is set by I(OVIRT_USERNAME) environment variable.
+ - "C(password)[I(required)] - The password of the user. Default value is set by I(OVIRT_PASSWORD) environment variable."
+ - "C(url)- A string containing the API URL of the server, usually
+ something like `I(https://server.example.com/ovirt-engine/api)`. Default value is set by I(OVIRT_URL) environment variable.
+ Either C(url) or C(hostname) is required."
+ - "C(hostname) - A string containing the hostname of the server, usually
+ something like `I(server.example.com)`. Default value is set by I(OVIRT_HOSTNAME) environment variable.
+ Either C(url) or C(hostname) is required."
+ - "C(token) - Token to be used instead of login with username/password. Default value is set by I(OVIRT_TOKEN) environment variable."
+ - "C(insecure) - A boolean flag that indicates if the server TLS
+ certificate and host name should be checked."
+ - "C(ca_file) - A PEM file containing the trusted CA certificates. The
+ certificate presented by the server will be verified using these CA
+ certificates. If `C(ca_file)` parameter is not set, system wide
+ CA certificate store is used. Default value is set by I(OVIRT_CAFILE) environment variable."
+ - "C(kerberos) - A boolean flag indicating if Kerberos authentication
+ should be used instead of the default basic authentication."
+ - "C(headers) - Dictionary of HTTP headers to be added to each API call."
+ type: dict
+ required: true
+requirements:
+ - python >= 2.7
+ - ovirt-engine-sdk-python >= 4.3.0
+notes:
+ - "In order to use this module you have to install oVirt Python SDK.
+ To ensure it's installed with correct version you can create the following task:
+ ansible.builtin.pip: name=ovirt-engine-sdk-python version=4.3.0"
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/postgres.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/postgres.py
new file mode 100644
index 00000000..a207bc35
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/postgres.py
@@ -0,0 +1,62 @@
+# -*- coding: utf-8 -*-
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ # Postgres documentation fragment
+ DOCUMENTATION = r'''
+options:
+ login_user:
+ description:
+ - The username used to authenticate with.
+ type: str
+ default: postgres
+ login_password:
+ description:
+ - The password used to authenticate with.
+ type: str
+ login_host:
+ description:
+ - Host running the database.
+ type: str
+ login_unix_socket:
+ description:
+ - Path to a Unix domain socket for local connections.
+ type: str
+ port:
+ description:
+ - Database port to connect to.
+ type: int
+ default: 5432
+ aliases: [ login_port ]
+ ssl_mode:
+ description:
+ - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
+ - See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
+ - Default of C(prefer) matches libpq default.
+ type: str
+ default: prefer
+ choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
+ ca_cert:
+ description:
+ - Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
+ - If the file exists, the server's certificate will be verified to be signed by one of these authorities.
+ type: str
+ aliases: [ ssl_rootcert ]
+notes:
+- The default authentication assumes that you are either logging in as or sudo'ing to the C(postgres) account on the host.
+- To avoid "Peer authentication failed for user postgres" error,
+ use postgres user as a I(become_user).
+- This module uses psycopg2, a Python PostgreSQL database adapter. You must
+ ensure that psycopg2 is installed on the host before using this module.
+- If the remote host is the PostgreSQL server (which is the default case), then
+ PostgreSQL must also be installed on the remote host.
+- For Ubuntu-based systems, install the postgresql, libpq-dev, and python-psycopg2 packages
+ on the remote host before using this module.
+- The ca_cert parameter requires at least Postgres version 8.4 and I(psycopg2) version 2.4.3.
+requirements: [ psycopg2 ]
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/proxmox.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/proxmox.py
new file mode 100644
index 00000000..1d0490aa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/proxmox.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ # Common parameters for Proxmox VE modules
+ DOCUMENTATION = r'''
+options:
+ api_host:
+ description:
+ - Specify the target host of the Proxmox VE cluster.
+ type: str
+ required: true
+ api_user:
+ description:
+ - Specify the user to authenticate with.
+ type: str
+ required: true
+ api_password:
+ description:
+ - Specify the password to authenticate with.
+ - You can use C(PROXMOX_PASSWORD) environment variable.
+ type: str
+ api_token_id:
+ description:
+ - Specify the token ID.
+ type: str
+ version_added: 1.3.0
+ api_token_secret:
+ description:
+ - Specify the token secret.
+ type: str
+ version_added: 1.3.0
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated.
+ - This should only be used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: no
+requirements: [ "proxmoxer", "requests" ]
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/purestorage.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/purestorage.py
new file mode 100644
index 00000000..f35f0267
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/purestorage.py
@@ -0,0 +1,62 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Simon Dodsley <simon@purestorage.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard Pure Storage documentation fragment
+ DOCUMENTATION = r'''
+options:
+ - See separate platform section for more details
+requirements:
+ - See separate platform section for more details
+notes:
+ - Ansible modules are available for the following Pure Storage products: FlashArray, FlashBlade
+'''
+
+ # Documentation fragment for FlashBlade
+ FB = r'''
+options:
+ fb_url:
+ description:
+ - FlashBlade management IP address or Hostname.
+ type: str
+ api_token:
+ description:
+ - FlashBlade API token for admin privileged user.
+ type: str
+notes:
+ - This module requires the C(purity_fb) Python library
+ - You must set C(PUREFB_URL) and C(PUREFB_API) environment variables
+ if I(fb_url) and I(api_token) arguments are not passed to the module directly
+requirements:
+ - python >= 2.7
+ - purity_fb >= 1.1
+'''
+
+ # Documentation fragment for FlashArray
+ FA = r'''
+options:
+ fa_url:
+ description:
+ - FlashArray management IPv4 address or Hostname.
+ type: str
+ required: true
+ api_token:
+ description:
+ - FlashArray API token for admin privileged user.
+ type: str
+ required: true
+notes:
+ - This module requires the C(purestorage) Python library
+ - You must set C(PUREFA_URL) and C(PUREFA_API) environment variables
+ if I(fa_url) and I(api_token) arguments are not passed to the module directly
+requirements:
+ - python >= 2.7
+ - purestorage
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/rackspace.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/rackspace.py
new file mode 100644
index 00000000..0f57dd88
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/rackspace.py
@@ -0,0 +1,117 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard Rackspace only documentation fragment
+ DOCUMENTATION = r'''
+options:
+ api_key:
+ description:
+ - Rackspace API key, overrides I(credentials).
+ type: str
+ aliases: [ password ]
+ credentials:
+ description:
+ - File to find the Rackspace credentials in. Ignored if I(api_key) and
+ I(username) are provided.
+ type: path
+ aliases: [ creds_file ]
+ env:
+ description:
+ - Environment as configured in I(~/.pyrax.cfg),
+ see U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration).
+ type: str
+ region:
+ description:
+ - Region to create an instance in.
+ type: str
+ username:
+ description:
+ - Rackspace username, overrides I(credentials).
+ type: str
+ validate_certs:
+ description:
+ - Whether or not to require SSL validation of API endpoints.
+ type: bool
+ aliases: [ verify_ssl ]
+requirements:
+ - python >= 2.6
+ - pyrax
+notes:
+ - The following environment variables can be used, C(RAX_USERNAME),
+ C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION).
+ - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file
+ appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating)
+ - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file
+ - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...)
+'''
+
+ # Documentation fragment including attributes to enable communication
+ # of other OpenStack clouds. Not all rax modules support this.
+ OPENSTACK = r'''
+options:
+ api_key:
+ type: str
+ description:
+ - Rackspace API key, overrides I(credentials).
+ aliases: [ password ]
+ auth_endpoint:
+ type: str
+ description:
+ - The URI of the authentication service.
+ - If not specified will be set to U(https://identity.api.rackspacecloud.com/v2.0/)
+ credentials:
+ type: path
+ description:
+ - File to find the Rackspace credentials in. Ignored if I(api_key) and
+ I(username) are provided.
+ aliases: [ creds_file ]
+ env:
+ type: str
+ description:
+ - Environment as configured in I(~/.pyrax.cfg),
+ see U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration).
+ identity_type:
+ type: str
+ description:
+ - Authentication mechanism to use, such as rackspace or keystone.
+ default: rackspace
+ region:
+ type: str
+ description:
+ - Region to create an instance in.
+ tenant_id:
+ type: str
+ description:
+ - The tenant ID used for authentication.
+ tenant_name:
+ type: str
+ description:
+ - The tenant name used for authentication.
+ username:
+ type: str
+ description:
+ - Rackspace username, overrides I(credentials).
+ validate_certs:
+ description:
+ - Whether or not to require SSL validation of API endpoints.
+ type: bool
+ aliases: [ verify_ssl ]
+requirements:
+ - python >= 2.6
+ - pyrax
+notes:
+ - The following environment variables can be used, C(RAX_USERNAME),
+ C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION).
+ - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file
+ appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating)
+ - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file
+ - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...)
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/scaleway.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/scaleway.py
new file mode 100644
index 00000000..c1e1b13d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/scaleway.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard documentation fragment
+ DOCUMENTATION = r'''
+options:
+ api_token:
+ description:
+ - Scaleway OAuth token.
+ type: str
+ required: true
+ aliases: [ oauth_token ]
+ api_url:
+ description:
+ - Scaleway API URL.
+ type: str
+ default: https://api.scaleway.com
+ aliases: [ base_url ]
+ api_timeout:
+ description:
+ - HTTP timeout to Scaleway API in seconds.
+ type: int
+ default: 30
+ aliases: [ timeout ]
+ query_parameters:
+ description:
+ - List of parameters passed to the query string.
+ type: dict
+ default: {}
+ validate_certs:
+ description:
+ - Validate SSL certs of the Scaleway API.
+ type: bool
+ default: yes
+notes:
+ - Also see the API documentation on U(https://developer.scaleway.com/)
+ - If C(api_token) is not set within the module, the following
+ environment variables can be used in decreasing order of precedence
+ C(SCW_TOKEN), C(SCW_API_KEY), C(SCW_OAUTH_TOKEN) or C(SCW_API_TOKEN).
+ - If one wants to use a different C(api_url) one can also set the C(SCW_API_URL)
+ environment variable.
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/utm.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/utm.py
new file mode 100644
index 00000000..413fb496
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/utm.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ DOCUMENTATION = r'''
+options:
+ headers:
+ description:
+ - A dictionary of additional headers to be sent to POST and PUT requests.
+ - Is needed for some modules
+ type: dict
+ required: false
+ utm_host:
+ description:
+ - The REST Endpoint of the Sophos UTM.
+ type: str
+ required: true
+ utm_port:
+ description:
+ - The port of the REST interface.
+ type: int
+ default: 4444
+ utm_token:
+ description:
+ - "The token used to identify at the REST-API. See U(https://www.sophos.com/en-us/medialibrary/\
+ PDFs/documentation/UTMonAWS/Sophos-UTM-RESTful-API.pdf?la=en), Chapter 2.4.2."
+ type: str
+ required: true
+ utm_protocol:
+ description:
+ - The protocol of the REST Endpoint.
+ choices: [ http, https ]
+ type: str
+ default: https
+ validate_certs:
+ description:
+ - Whether the REST interface's ssl certificate should be verified or not.
+ type: bool
+ default: yes
+ state:
+ description:
+ - The desired state of the object.
+ - C(present) will create or update an object
+ - C(absent) will delete an object if it was present
+ type: str
+ choices: [ absent, present ]
+ default: present
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/vexata.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/vexata.py
new file mode 100644
index 00000000..9f756cc8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/vexata.py
@@ -0,0 +1,52 @@
+#
+# Copyright: (c) 2019, Sandeep Kasargod <sandeep@vexata.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ DOCUMENTATION = r'''
+options:
+ - See respective platform section for more details
+requirements:
+ - See respective platform section for more details
+notes:
+ - Ansible modules are available for Vexata VX100 arrays.
+'''
+
+ # Documentation fragment for Vexata VX100 series
+ VX100 = r'''
+options:
+ array:
+ description:
+ - Vexata VX100 array hostname or IPv4 Address.
+ required: true
+ type: str
+ user:
+ description:
+ - Vexata API user with administrative privileges.
+ required: false
+ type: str
+ password:
+ description:
+ - Vexata API user password.
+ required: false
+ type: str
+ validate_certs:
+ description:
+ - Allows connection when SSL certificates are not valid. Set to C(false) when certificates are not trusted.
+ - If set to C(yes), please make sure Python >= 2.7.9 is installed on the given machine.
+ required: false
+ type: bool
+ default: 'no'
+
+requirements:
+ - Vexata VX100 storage array with VXOS >= v3.5.0 on storage array
+ - vexatapi >= 0.0.1
+ - python >= 2.7
+ - VEXATA_USER and VEXATA_PASSWORD environment variables must be set if
+ user and password arguments are not passed to the module directly.
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/xenserver.py b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/xenserver.py
new file mode 100644
index 00000000..747bf02f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/doc_fragments/xenserver.py
@@ -0,0 +1,40 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ # Common parameters for XenServer modules
+ DOCUMENTATION = r'''
+options:
+ hostname:
+ description:
+ - The hostname or IP address of the XenServer host or XenServer pool master.
+ - If the value is not specified in the task, the value of environment variable C(XENSERVER_HOST) will be used instead.
+ type: str
+ default: localhost
+ aliases: [ host, pool ]
+ username:
+ description:
+ - The username to use for connecting to XenServer.
+ - If the value is not specified in the task, the value of environment variable C(XENSERVER_USER) will be used instead.
+ type: str
+ default: root
+ aliases: [ admin, user ]
+ password:
+ description:
+ - The password to use for connecting to XenServer.
+ - If the value is not specified in the task, the value of environment variable C(XENSERVER_PASSWORD) will be used instead.
+ type: str
+ aliases: [ pass, pwd ]
+ validate_certs:
+ description:
+ - Allows connection when SSL certificates are not valid. Set to C(false) when certificates are not trusted.
+ - If the value is not specified in the task, the value of environment variable C(XENSERVER_VALIDATE_CERTS) will be used instead.
+ type: bool
+ default: yes
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/filter/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/filter/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/filter/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/filter/dict_kv.py b/collections-debian-merged/ansible_collections/community/general/plugins/filter/dict_kv.py
new file mode 100644
index 00000000..b2124ed7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/filter/dict_kv.py
@@ -0,0 +1,70 @@
+# Copyright (C) 2020 Stanislav German-Evtushenko (@giner) <ginermail@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def dict_kv(value, key):
+ '''Return a dictionary with a single key-value pair
+
+ Example:
+
+ - hosts: localhost
+ gather_facts: false
+ vars:
+ myvar: myvalue
+ tasks:
+ - debug:
+ msg: "{{ myvar | dict_kv('thatsmyvar') }}"
+
+ produces:
+
+ ok: [localhost] => {
+ "msg": {
+ "thatsmyvar": "myvalue"
+ }
+ }
+
+ Example 2:
+
+ - hosts: localhost
+ gather_facts: false
+ vars:
+ common_config:
+ type: host
+ database: all
+ myservers:
+ - server1
+ - server2
+ tasks:
+ - debug:
+ msg: "{{ myservers | map('dict_kv', 'server') | map('combine', common_config) }}"
+
+ produces:
+
+ ok: [localhost] => {
+ "msg": [
+ {
+ "database": "all",
+ "server": "server1",
+ "type": "host"
+ },
+ {
+ "database": "all",
+ "server": "server2",
+ "type": "host"
+ }
+ ]
+ }
+ '''
+ return {key: value}
+
+
+class FilterModule(object):
+ ''' Query filter '''
+
+ def filters(self):
+ return {
+ 'dict_kv': dict_kv
+ }
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/filter/jc.py b/collections-debian-merged/ansible_collections/community/general/plugins/filter/jc.py
new file mode 100644
index 00000000..e854128f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/filter/jc.py
@@ -0,0 +1,94 @@
+# (c) 2015, Filipe Niero Felisbino <filipenf@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+# contributed by Kelly Brazil <kellyjonbrazil@gmail.com>
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleError, AnsibleFilterError
+import importlib
+
+try:
+ import jc
+ HAS_LIB = True
+except ImportError:
+ HAS_LIB = False
+
+
+def jc(data, parser, quiet=True, raw=False):
+ """Convert returned command output to JSON using the JC library
+
+ Arguments:
+
+ parser required (string) the correct parser for the input data (e.g. 'ifconfig')
+ see https://github.com/kellyjonbrazil/jc#parsers for latest list of parsers.
+ quiet optional (bool) True to suppress warning messages (default is True)
+ raw optional (bool) True to return pre-processed JSON (default is False)
+
+ Returns:
+
+ dictionary or list of dictionaries
+
+ Example:
+
+ - name: run date command
+ hosts: ubuntu
+ tasks:
+ - shell: date
+ register: result
+ - set_fact:
+ myvar: "{{ result.stdout | community.general.jc('date') }}"
+ - debug:
+ msg: "{{ myvar }}"
+
+ produces:
+
+ ok: [192.168.1.239] => {
+ "msg": {
+ "day": 9,
+ "hour": 22,
+ "minute": 6,
+ "month": "Aug",
+ "month_num": 8,
+ "second": 22,
+ "timezone": "UTC",
+ "weekday": "Sun",
+ "weekday_num": 1,
+ "year": 2020
+ }
+ }
+ """
+
+ if not HAS_LIB:
+ raise AnsibleError('You need to install "jc" prior to running jc filter')
+
+ try:
+ jc_parser = importlib.import_module('jc.parsers.' + parser)
+ return jc_parser.parse(data, quiet=quiet, raw=raw)
+
+ except Exception as e:
+ raise AnsibleFilterError('Error in jc filter plugin: %s' % e)
+
+
+class FilterModule(object):
+ ''' Query filter '''
+
+ def filters(self):
+ return {
+ 'jc': jc
+ }
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/filter/json_query.py b/collections-debian-merged/ansible_collections/community/general/plugins/filter/json_query.py
new file mode 100644
index 00000000..972109a0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/filter/json_query.py
@@ -0,0 +1,56 @@
+# (c) 2015, Filipe Niero Felisbino <filipenf@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleError, AnsibleFilterError
+
+try:
+ import jmespath
+ HAS_LIB = True
+except ImportError:
+ HAS_LIB = False
+
+
+def json_query(data, expr):
+ '''Query data using jmespath query language ( http://jmespath.org ). Example:
+ - ansible.builtin.debug: msg="{{ instance | json_query(tagged_instances[*].block_device_mapping.*.volume_id') }}"
+ '''
+ if not HAS_LIB:
+ raise AnsibleError('You need to install "jmespath" prior to running '
+ 'json_query filter')
+
+ # Hack to handle Ansible String Types
+ # See issue: https://github.com/ansible-collections/community.general/issues/320
+ jmespath.functions.REVERSE_TYPES_MAP['string'] = jmespath.functions.REVERSE_TYPES_MAP['string'] + ('AnsibleUnicode', 'AnsibleUnsafeText', )
+ try:
+ return jmespath.search(expr, data)
+ except jmespath.exceptions.JMESPathError as e:
+ raise AnsibleFilterError('JMESPathError in json_query filter plugin:\n%s' % e)
+ except Exception as e:
+ # For older jmespath, we can get ValueError and TypeError without much info.
+ raise AnsibleFilterError('Error in jmespath.search in json_query filter plugin:\n%s' % e)
+
+
+class FilterModule(object):
+ ''' Query filter '''
+
+ def filters(self):
+ return {
+ 'json_query': json_query
+ }
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/filter/random_mac.py b/collections-debian-merged/ansible_collections/community/general/plugins/filter/random_mac.py
new file mode 100644
index 00000000..aa9f59be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/filter/random_mac.py
@@ -0,0 +1,73 @@
+# (c) 2020 Ansible Project
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+from random import Random, SystemRandom
+
+from ansible.errors import AnsibleFilterError
+from ansible.module_utils.six import string_types
+
+
+def random_mac(value, seed=None):
+ ''' takes string prefix, and return it completed with random bytes
+ to get a complete 6 bytes MAC address '''
+
+ if not isinstance(value, string_types):
+ raise AnsibleFilterError('Invalid value type (%s) for random_mac (%s)' %
+ (type(value), value))
+
+ value = value.lower()
+ mac_items = value.split(':')
+
+ if len(mac_items) > 5:
+ raise AnsibleFilterError('Invalid value (%s) for random_mac: 5 colon(:) separated'
+ ' items max' % value)
+
+ err = ""
+ for mac in mac_items:
+ if not mac:
+ err += ",empty item"
+ continue
+ if not re.match('[a-f0-9]{2}', mac):
+ err += ",%s not hexa byte" % mac
+ err = err.strip(',')
+
+ if err:
+ raise AnsibleFilterError('Invalid value (%s) for random_mac: %s' % (value, err))
+
+ if seed is None:
+ r = SystemRandom()
+ else:
+ r = Random(seed)
+ # Generate random int between x1000000000 and xFFFFFFFFFF
+ v = r.randint(68719476736, 1099511627775)
+ # Select first n chars to complement input prefix
+ remain = 2 * (6 - len(mac_items))
+ rnd = ('%x' % v)[:remain]
+ return value + re.sub(r'(..)', r':\1', rnd)
+
+
+class FilterModule:
+ ''' Ansible jinja2 filters '''
+ def filters(self):
+ return {
+ 'random_mac': random_mac,
+ }
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/filter/time.py b/collections-debian-merged/ansible_collections/community/general/plugins/filter/time.py
new file mode 100644
index 00000000..3b44ad0e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/filter/time.py
@@ -0,0 +1,143 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2020, René Moser <mail@renemoser.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+from ansible.errors import AnsibleFilterError
+
+
+UNIT_FACTORS = {
+ 'ms': [],
+ 's': [1000],
+ 'm': [1000, 60],
+ 'h': [1000, 60, 60],
+ 'd': [1000, 60, 60, 24],
+ 'w': [1000, 60, 60, 24, 7],
+ 'mo': [1000, 60, 60, 24, 30],
+ 'y': [1000, 60, 60, 24, 365],
+}
+
+
+UNIT_TO_SHORT_FORM = {
+ 'millisecond': 'ms',
+ 'msec': 'ms',
+ 'msecond': 'ms',
+ 'sec': 's',
+ 'second': 's',
+ 'hour': 'h',
+ 'min': 'm',
+ 'minute': 'm',
+ 'day': 'd',
+ 'week': 'w',
+ 'month': 'mo',
+ 'year': 'y',
+}
+
+
+def multiply(factors):
+ result = 1
+ for factor in factors:
+ result = result * factor
+ return result
+
+
+def to_time_unit(human_time, unit='ms', **kwargs):
+ ''' Return a time unit from a human readable string '''
+ unit_to_short_form = UNIT_TO_SHORT_FORM
+ unit_factors = UNIT_FACTORS
+
+ unit = unit_to_short_form.get(unit.rstrip('s'), unit)
+ if unit not in unit_factors:
+ raise AnsibleFilterError("to_time_unit() can not convert to the following unit: %s. "
+ "Available units (singular or plural): %s. "
+ "Available short units: %s"
+ % (unit, ', '.join(unit_to_short_form.keys()), ', '.join(unit_factors.keys())))
+
+ if 'year' in kwargs:
+ unit_factors['y'] = unit_factors['y'][:-1] + [kwargs.pop('year')]
+ if 'month' in kwargs:
+ unit_factors['mo'] = unit_factors['mo'][:-1] + [kwargs.pop('month')]
+
+ if kwargs:
+ raise AnsibleFilterError('to_time_unit() got unknown keyword arguments: %s' % ', '.join(kwargs.keys()))
+
+ result = 0
+ for h_time_string in human_time.split():
+ res = re.match(r'(-?\d+)(\w+)', h_time_string)
+ if not res:
+ raise AnsibleFilterError(
+ "to_time_unit() can not interpret following string: %s" % human_time)
+
+ h_time_int = int(res.group(1))
+ h_time_unit = res.group(2)
+
+ h_time_unit = unit_to_short_form.get(h_time_unit.rstrip('s'), h_time_unit)
+ if h_time_unit not in unit_factors:
+ raise AnsibleFilterError(
+ "to_time_unit() can not interpret following string: %s" % human_time)
+
+ time_in_milliseconds = h_time_int * multiply(unit_factors[h_time_unit])
+ result += time_in_milliseconds
+ return round(result / multiply(unit_factors[unit]), 12)
+
+
+def to_milliseconds(human_time, **kwargs):
+ ''' Return milli seconds from a human readable string '''
+ return to_time_unit(human_time, 'ms', **kwargs)
+
+
+def to_seconds(human_time, **kwargs):
+ ''' Return seconds from a human readable string '''
+ return to_time_unit(human_time, 's', **kwargs)
+
+
+def to_minutes(human_time, **kwargs):
+ ''' Return minutes from a human readable string '''
+ return to_time_unit(human_time, 'm', **kwargs)
+
+
+def to_hours(human_time, **kwargs):
+ ''' Return hours from a human readable string '''
+ return to_time_unit(human_time, 'h', **kwargs)
+
+
+def to_days(human_time, **kwargs):
+ ''' Return days from a human readable string '''
+ return to_time_unit(human_time, 'd', **kwargs)
+
+
+def to_weeks(human_time, **kwargs):
+ ''' Return weeks from a human readable string '''
+ return to_time_unit(human_time, 'w', **kwargs)
+
+
+def to_months(human_time, **kwargs):
+ ''' Return months from a human readable string '''
+ return to_time_unit(human_time, 'mo', **kwargs)
+
+
+def to_years(human_time, **kwargs):
+ ''' Return years from a human readable string '''
+ return to_time_unit(human_time, 'y', **kwargs)
+
+
+class FilterModule(object):
+ ''' Ansible time jinja2 filters '''
+
+ def filters(self):
+ filters = {
+ 'to_time_unit': to_time_unit,
+ 'to_milliseconds': to_milliseconds,
+ 'to_seconds': to_seconds,
+ 'to_minutes': to_minutes,
+ 'to_hours': to_hours,
+ 'to_days': to_days,
+ 'to_weeks': to_weeks,
+ 'to_months': to_months,
+ 'to_years': to_years,
+ }
+
+ return filters
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/inventory/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/inventory/cobbler.py b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/cobbler.py
new file mode 100644
index 00000000..0178c2ee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/cobbler.py
@@ -0,0 +1,279 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2020 Orion Poplawski <orion@nwra.com>
+# Copyright (c) 2020 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Orion Poplawski (@opoplawski)
+ name: cobbler
+ plugin_type: inventory
+ short_description: Cobbler inventory source
+ version_added: 1.0.0
+ description:
+ - Get inventory hosts from the cobbler service.
+ - "Uses a configuration file as an inventory source, it must end in C(.cobbler.yml) or C(.cobbler.yaml) and has a C(plugin: cobbler) entry."
+ extends_documentation_fragment:
+ - inventory_cache
+ options:
+ plugin:
+ description: The name of this plugin, it should always be set to C(community.general.cobbler) for this plugin to recognize it as it's own.
+ required: yes
+ choices: [ 'cobbler', 'community.general.cobbler' ]
+ url:
+ description: URL to cobbler.
+ default: 'http://cobbler/cobbler_api'
+ env:
+ - name: COBBLER_SERVER
+ user:
+ description: Cobbler authentication user.
+ required: no
+ env:
+ - name: COBBLER_USER
+ password:
+ description: Cobbler authentication password
+ required: no
+ env:
+ - name: COBBLER_PASSWORD
+ cache_fallback:
+ description: Fallback to cached results if connection to cobbler fails
+ type: boolean
+ default: no
+ exclude_profiles:
+ description: Profiles to exclude from inventory
+ type: list
+ default: []
+ elements: str
+ group_by:
+ description: Keys to group hosts by
+ type: list
+ default: [ 'mgmt_classes', 'owners', 'status' ]
+ group:
+ description: Group to place all hosts into
+ default: cobbler
+ group_prefix:
+ description: Prefix to apply to cobbler groups
+ default: cobbler_
+ want_facts:
+ description: Toggle, if C(true) the plugin will retrieve host facts from the server
+ type: boolean
+ default: yes
+'''
+
+EXAMPLES = '''
+# my.cobbler.yml
+plugin: community.general.cobbler
+url: http://cobbler/cobbler_api
+user: ansible-tester
+password: secure
+'''
+
+from distutils.version import LooseVersion
+import socket
+
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.common._collections_compat import MutableMapping
+from ansible.module_utils.six import iteritems
+from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable, to_safe_group_name
+
+# xmlrpc
+try:
+ import xmlrpclib as xmlrpc_client
+ HAS_XMLRPC_CLIENT = True
+except ImportError:
+ try:
+ import xmlrpc.client as xmlrpc_client
+ HAS_XMLRPC_CLIENT = True
+ except ImportError:
+ HAS_XMLRPC_CLIENT = False
+
+
+class InventoryModule(BaseInventoryPlugin, Cacheable):
+ ''' Host inventory parser for ansible using cobbler as source. '''
+
+ NAME = 'community.general.cobbler'
+
+ def __init__(self):
+
+ super(InventoryModule, self).__init__()
+
+ # from config
+ self.cobbler_url = None
+ self.exclude_profiles = [] # A list of profiles to exclude
+
+ self.connection = None
+ self.token = None
+
+ self.cache_key = None
+ self.use_cache = None
+
+ def verify_file(self, path):
+ valid = False
+ if super(InventoryModule, self).verify_file(path):
+ if path.endswith(('cobbler.yaml', 'cobbler.yml')):
+ valid = True
+ else:
+ self.display.vvv('Skipping due to inventory source not ending in "cobbler.yaml" nor "cobbler.yml"')
+ return valid
+
+ def _get_connection(self):
+ if not HAS_XMLRPC_CLIENT:
+ raise AnsibleError('Could not import xmlrpc client library')
+
+ if self.connection is None:
+ self.display.vvvv('Connecting to %s\n' % self.cobbler_url)
+ self.connection = xmlrpc_client.Server(self.cobbler_url, allow_none=True)
+ self.token = None
+ if self.get_option('user') is not None:
+ self.token = self.connection.login(self.get_option('user'), self.get_option('password'))
+ return self.connection
+
+ def _init_cache(self):
+ if self.cache_key not in self._cache:
+ self._cache[self.cache_key] = {}
+
+ def _reload_cache(self):
+ if self.get_option('cache_fallback'):
+ self.display.vvv('Cannot connect to server, loading cache\n')
+ self._options['cache_timeout'] = 0
+ self.load_cache_plugin()
+ self._cache.get(self.cache_key, {})
+
+ def _get_profiles(self):
+ if not self.use_cache or 'profiles' not in self._cache.get(self.cache_key, {}):
+ c = self._get_connection()
+ try:
+ if self.token is not None:
+ data = c.get_profiles(self.token)
+ else:
+ data = c.get_profiles()
+ except (socket.gaierror, socket.error, xmlrpc_client.ProtocolError):
+ self._reload_cache()
+ else:
+ self._init_cache()
+ self._cache[self.cache_key]['profiles'] = data
+
+ return self._cache[self.cache_key]['profiles']
+
+ def _get_systems(self):
+ if not self.use_cache or 'systems' not in self._cache.get(self.cache_key, {}):
+ c = self._get_connection()
+ try:
+ if self.token is not None:
+ data = c.get_systems(self.token)
+ else:
+ data = c.get_systems()
+ except (socket.gaierror, socket.error, xmlrpc_client.ProtocolError):
+ self._reload_cache()
+ else:
+ self._init_cache()
+ self._cache[self.cache_key]['systems'] = data
+
+ return self._cache[self.cache_key]['systems']
+
+ def _add_safe_group_name(self, group, child=None):
+ group_name = self.inventory.add_group(to_safe_group_name('%s%s' % (self.get_option('group_prefix'), group.lower().replace(" ", ""))))
+ if child is not None:
+ self.inventory.add_child(group_name, child)
+ return group_name
+
+ def parse(self, inventory, loader, path, cache=True):
+
+ super(InventoryModule, self).parse(inventory, loader, path)
+
+ # read config from file, this sets 'options'
+ self._read_config_data(path)
+
+ # get connection host
+ self.cobbler_url = self.get_option('url')
+ self.cache_key = self.get_cache_key(path)
+ self.use_cache = cache and self.get_option('cache')
+
+ self.exclude_profiles = self.get_option('exclude_profiles')
+ self.group_by = self.get_option('group_by')
+
+ for profile in self._get_profiles():
+ if profile['parent']:
+ self.display.vvvv('Processing profile %s with parent %s\n' % (profile['name'], profile['parent']))
+ if profile['parent'] not in self.exclude_profiles:
+ parent_group_name = self._add_safe_group_name(profile['parent'])
+ self.display.vvvv('Added profile parent group %s\n' % parent_group_name)
+ if profile['name'] not in self.exclude_profiles:
+ group_name = self._add_safe_group_name(profile['name'])
+ self.display.vvvv('Added profile group %s\n' % group_name)
+ self.inventory.add_child(parent_group_name, group_name)
+ else:
+ self.display.vvvv('Processing profile %s without parent\n' % profile['name'])
+ # Create a heirarchy of profile names
+ profile_elements = profile['name'].split('-')
+ i = 0
+ while i < len(profile_elements) - 1:
+ profile_group = '-'.join(profile_elements[0:i + 1])
+ profile_group_child = '-'.join(profile_elements[0:i + 2])
+ if profile_group in self.exclude_profiles:
+ self.display.vvvv('Excluding profile %s\n' % profile_group)
+ break
+ group_name = self._add_safe_group_name(profile_group)
+ self.display.vvvv('Added profile group %s\n' % group_name)
+ child_group_name = self._add_safe_group_name(profile_group_child)
+ self.display.vvvv('Added profile child group %s to %s\n' % (child_group_name, group_name))
+ self.inventory.add_child(group_name, child_group_name)
+ i = i + 1
+
+ # Add default group for this inventory if specified
+ self.group = to_safe_group_name(self.get_option('group'))
+ if self.group is not None and self.group != '':
+ self.inventory.add_group(self.group)
+ self.display.vvvv('Added site group %s\n' % self.group)
+
+ for host in self._get_systems():
+ # Get the FQDN for the host and add it to the right groups
+ hostname = host['hostname'] # None
+ interfaces = host['interfaces']
+
+ if host['profile'] in self.exclude_profiles:
+ self.display.vvvv('Excluding host %s in profile %s\n' % (host['name'], host['profile']))
+ continue
+
+ # hostname is often empty for non-static IP hosts
+ if hostname == '':
+ for (iname, ivalue) in iteritems(interfaces):
+ if ivalue['management'] or not ivalue['static']:
+ this_dns_name = ivalue.get('dns_name', None)
+ if this_dns_name is not None and this_dns_name != "":
+ hostname = this_dns_name
+ self.display.vvvv('Set hostname to %s from %s\n' % (hostname, iname))
+
+ if hostname == '':
+ self.display.vvvv('Cannot determine hostname for host %s, skipping\n' % host['name'])
+ continue
+
+ self.inventory.add_host(hostname)
+ self.display.vvvv('Added host %s hostname %s\n' % (host['name'], hostname))
+
+ # Add host to profile group
+ group_name = self._add_safe_group_name(host['profile'], child=hostname)
+ self.display.vvvv('Added host %s to profile group %s\n' % (hostname, group_name))
+
+ # Add host to groups specified by group_by fields
+ for group_by in self.group_by:
+ if host[group_by] == '<<inherit>>':
+ groups = []
+ else:
+ groups = [host[group_by]] if isinstance(host[group_by], str) else host[group_by]
+ for group in groups:
+ group_name = self._add_safe_group_name(group, child=hostname)
+ self.display.vvvv('Added host %s to group_by %s group %s\n' % (hostname, group_by, group_name))
+
+ # Add to group for this inventory
+ if self.group is not None:
+ self.inventory.add_child(self.group, hostname)
+
+ # Add host variables
+ if self.get_option('want_facts'):
+ try:
+ self.inventory.set_variable(hostname, 'cobbler', host)
+ except ValueError as e:
+ self.display.warning("Could not set host info for %s: %s" % (hostname, to_text(e)))
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/inventory/docker_machine.py b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/docker_machine.py
new file mode 100644
index 00000000..7d92184b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/docker_machine.py
@@ -0,0 +1,272 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019, Ximon Eighteen <ximon.eighteen@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: docker_machine
+ plugin_type: inventory
+ author: Ximon Eighteen (@ximon18)
+ short_description: Docker Machine inventory source
+ requirements:
+ - L(Docker Machine,https://docs.docker.com/machine/)
+ extends_documentation_fragment:
+ - constructed
+ description:
+ - Get inventory hosts from Docker Machine.
+ - Uses a YAML configuration file that ends with docker_machine.(yml|yaml).
+ - The plugin sets standard host variables C(ansible_host), C(ansible_port), C(ansible_user) and C(ansible_ssh_private_key).
+ - The plugin stores the Docker Machine 'env' output variables in I(dm_) prefixed host variables.
+
+ options:
+ plugin:
+ description: token that ensures this is a source file for the C(docker_machine) plugin.
+ required: yes
+ choices: ['docker_machine', 'community.general.docker_machine']
+ daemon_env:
+ description:
+ - Whether docker daemon connection environment variables should be fetched, and how to behave if they cannot be fetched.
+ - With C(require) and C(require-silently), fetch them and skip any host for which they cannot be fetched.
+ A warning will be issued for any skipped host if the choice is C(require).
+ - With C(optional) and C(optional-silently), fetch them and not skip hosts for which they cannot be fetched.
+ A warning will be issued for hosts where they cannot be fetched if the choice is C(optional).
+ - With C(skip), do not attempt to fetch the docker daemon connection environment variables.
+ - If fetched successfully, the variables will be prefixed with I(dm_) and stored as host variables.
+ type: str
+ choices:
+ - require
+ - require-silently
+ - optional
+ - optional-silently
+ - skip
+ default: require
+ running_required:
+ description: when true, hosts which Docker Machine indicates are in a state other than C(running) will be skipped.
+ type: bool
+ default: yes
+ verbose_output:
+ description: when true, include all available nodes metadata (e.g. Image, Region, Size) as a JSON object named C(docker_machine_node_attributes).
+ type: bool
+ default: yes
+'''
+
+EXAMPLES = '''
+# Minimal example
+plugin: community.general.docker_machine
+
+# Example using constructed features to create a group per Docker Machine driver
+# (https://docs.docker.com/machine/drivers/), e.g.:
+# $ docker-machine create --driver digitalocean ... mymachine
+# $ ansible-inventory -i ./path/to/docker-machine.yml --host=mymachine
+# {
+# ...
+# "digitalocean": {
+# "hosts": [
+# "mymachine"
+# ]
+# ...
+# }
+strict: no
+keyed_groups:
+ - separator: ''
+ key: docker_machine_node_attributes.DriverName
+
+# Example grouping hosts by Digital Machine tag
+strict: no
+keyed_groups:
+ - prefix: tag
+ key: 'dm_tags'
+
+# Example using compose to override the default SSH behaviour of asking the user to accept the remote host key
+compose:
+ ansible_ssh_common_args: '"-o StrictHostKeyChecking=accept-new"'
+'''
+
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_native
+from ansible.module_utils._text import to_text
+from ansible.module_utils.common.process import get_bin_path
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
+from ansible.utils.display import Display
+
+import json
+import re
+import subprocess
+
+display = Display()
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
+ ''' Host inventory parser for ansible using Docker machine as source. '''
+
+ NAME = 'community.general.docker_machine'
+
+ DOCKER_MACHINE_PATH = None
+
+ def _run_command(self, args):
+ if not self.DOCKER_MACHINE_PATH:
+ try:
+ self.DOCKER_MACHINE_PATH = get_bin_path('docker-machine')
+ except ValueError as e:
+ raise AnsibleError(to_native(e))
+
+ command = [self.DOCKER_MACHINE_PATH]
+ command.extend(args)
+ display.debug('Executing command {0}'.format(command))
+ try:
+ result = subprocess.check_output(command)
+ except subprocess.CalledProcessError as e:
+ display.warning('Exception {0} caught while executing command {1}, this was the original exception: {2}'.format(type(e).__name__, command, e))
+ raise e
+
+ return to_text(result).strip()
+
+ def _get_docker_daemon_variables(self, machine_name):
+ '''
+ Capture settings from Docker Machine that would be needed to connect to the remote Docker daemon installed on
+ the Docker Machine remote host. Note: passing '--shell=sh' is a workaround for 'Error: Unknown shell'.
+ '''
+ try:
+ env_lines = self._run_command(['env', '--shell=sh', machine_name]).splitlines()
+ except subprocess.CalledProcessError:
+ # This can happen when the machine is created but provisioning is incomplete
+ return []
+
+ # example output of docker-machine env --shell=sh:
+ # export DOCKER_TLS_VERIFY="1"
+ # export DOCKER_HOST="tcp://134.209.204.160:2376"
+ # export DOCKER_CERT_PATH="/root/.docker/machine/machines/routinator"
+ # export DOCKER_MACHINE_NAME="routinator"
+ # # Run this command to configure your shell:
+ # # eval $(docker-machine env --shell=bash routinator)
+
+ # capture any of the DOCKER_xxx variables that were output and create Ansible host vars
+ # with the same name and value but with a dm_ name prefix.
+ vars = []
+ for line in env_lines:
+ match = re.search('(DOCKER_[^=]+)="([^"]+)"', line)
+ if match:
+ env_var_name = match.group(1)
+ env_var_value = match.group(2)
+ vars.append((env_var_name, env_var_value))
+
+ return vars
+
+ def _get_machine_names(self):
+ # Filter out machines that are not in the Running state as we probably can't do anything useful actions
+ # with them.
+ ls_command = ['ls', '-q']
+ if self.get_option('running_required'):
+ ls_command.extend(['--filter', 'state=Running'])
+
+ try:
+ ls_lines = self._run_command(ls_command)
+ except subprocess.CalledProcessError:
+ return []
+
+ return ls_lines.splitlines()
+
+ def _inspect_docker_machine_host(self, node):
+ try:
+ inspect_lines = self._run_command(['inspect', self.node])
+ except subprocess.CalledProcessError:
+ return None
+
+ return json.loads(inspect_lines)
+
+ def _ip_addr_docker_machine_host(self, node):
+ try:
+ ip_addr = self._run_command(['ip', self.node])
+ except subprocess.CalledProcessError:
+ return None
+
+ return ip_addr
+
+ def _should_skip_host(self, machine_name, env_var_tuples, daemon_env):
+ if not env_var_tuples:
+ warning_prefix = 'Unable to fetch Docker daemon env vars from Docker Machine for host {0}'.format(machine_name)
+ if daemon_env in ('require', 'require-silently'):
+ if daemon_env == 'require':
+ display.warning('{0}: host will be skipped'.format(warning_prefix))
+ return True
+ else: # 'optional', 'optional-silently'
+ if daemon_env == 'optional':
+ display.warning('{0}: host will lack dm_DOCKER_xxx variables'.format(warning_prefix))
+ return False
+
+ def _populate(self):
+ daemon_env = self.get_option('daemon_env')
+ try:
+ for self.node in self._get_machine_names():
+ self.node_attrs = self._inspect_docker_machine_host(self.node)
+ if not self.node_attrs:
+ continue
+
+ machine_name = self.node_attrs['Driver']['MachineName']
+
+ # query `docker-machine env` to obtain remote Docker daemon connection settings in the form of commands
+ # that could be used to set environment variables to influence a local Docker client:
+ if daemon_env == 'skip':
+ env_var_tuples = []
+ else:
+ env_var_tuples = self._get_docker_daemon_variables(machine_name)
+ if self._should_skip_host(machine_name, env_var_tuples, daemon_env):
+ continue
+
+ # add an entry in the inventory for this host
+ self.inventory.add_host(machine_name)
+
+ # check for valid ip address from inspect output, else explicitly use ip command to find host ip address
+ # this works around an issue seen with Google Compute Platform where the IP address was not available
+ # via the 'inspect' subcommand but was via the 'ip' subcomannd.
+ if self.node_attrs['Driver']['IPAddress']:
+ ip_addr = self.node_attrs['Driver']['IPAddress']
+ else:
+ ip_addr = self._ip_addr_docker_machine_host(self.node)
+
+ # set standard Ansible remote host connection settings to details captured from `docker-machine`
+ # see: https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html
+ self.inventory.set_variable(machine_name, 'ansible_host', ip_addr)
+ self.inventory.set_variable(machine_name, 'ansible_port', self.node_attrs['Driver']['SSHPort'])
+ self.inventory.set_variable(machine_name, 'ansible_user', self.node_attrs['Driver']['SSHUser'])
+ self.inventory.set_variable(machine_name, 'ansible_ssh_private_key_file', self.node_attrs['Driver']['SSHKeyPath'])
+
+ # set variables based on Docker Machine tags
+ tags = self.node_attrs['Driver'].get('Tags') or ''
+ self.inventory.set_variable(machine_name, 'dm_tags', tags)
+
+ # set variables based on Docker Machine env variables
+ for kv in env_var_tuples:
+ self.inventory.set_variable(machine_name, 'dm_{0}'.format(kv[0]), kv[1])
+
+ if self.get_option('verbose_output'):
+ self.inventory.set_variable(machine_name, 'docker_machine_node_attributes', self.node_attrs)
+
+ # Use constructed if applicable
+ strict = self.get_option('strict')
+
+ # Composed variables
+ self._set_composite_vars(self.get_option('compose'), self.node_attrs, machine_name, strict=strict)
+
+ # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
+ self._add_host_to_composed_groups(self.get_option('groups'), self.node_attrs, machine_name, strict=strict)
+
+ # Create groups based on variable values and add the corresponding hosts to it
+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'), self.node_attrs, machine_name, strict=strict)
+
+ except Exception as e:
+ raise AnsibleError('Unable to fetch hosts from Docker Machine, this was the original exception: %s' %
+ to_native(e), orig_exc=e)
+
+ def verify_file(self, path):
+ """Return the possibility of a file being consumable by this plugin."""
+ return (
+ super(InventoryModule, self).verify_file(path) and
+ path.endswith(('docker_machine.yaml', 'docker_machine.yml')))
+
+ def parse(self, inventory, loader, path, cache=True):
+ super(InventoryModule, self).parse(inventory, loader, path, cache)
+ self._read_config_data(path)
+ self._populate()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/inventory/docker_swarm.py b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/docker_swarm.py
new file mode 100644
index 00000000..e730bd0c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/docker_swarm.py
@@ -0,0 +1,255 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018, Stefan Heitmueller <stefan.heitmueller@gmx.com>
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: docker_swarm
+ plugin_type: inventory
+ author:
+ - Stefan Heitmüller (@morph027) <stefan.heitmueller@gmx.com>
+ short_description: Ansible dynamic inventory plugin for Docker swarm nodes.
+ requirements:
+ - python >= 2.7
+ - L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0
+ extends_documentation_fragment:
+ - constructed
+ description:
+ - Reads inventories from the Docker swarm API.
+ - Uses a YAML configuration file docker_swarm.[yml|yaml].
+ - "The plugin returns following groups of swarm nodes: I(all) - all hosts; I(workers) - all worker nodes;
+ I(managers) - all manager nodes; I(leader) - the swarm leader node;
+ I(nonleaders) - all nodes except the swarm leader."
+ options:
+ plugin:
+ description: The name of this plugin, it should always be set to C(community.general.docker_swarm)
+ for this plugin to recognize it as it's own.
+ type: str
+ required: true
+ choices: [ docker_swarm, community.general.docker_swarm ]
+ docker_host:
+ description:
+ - Socket of a Docker swarm manager node (C(tcp), C(unix)).
+ - "Use C(unix://var/run/docker.sock) to connect via local socket."
+ type: str
+ required: true
+ aliases: [ docker_url ]
+ verbose_output:
+ description: Toggle to (not) include all available nodes metadata (e.g. C(Platform), C(Architecture), C(OS),
+ C(EngineVersion))
+ type: bool
+ default: yes
+ tls:
+ description: Connect using TLS without verifying the authenticity of the Docker host server.
+ type: bool
+ default: no
+ validate_certs:
+ description: Toggle if connecting using TLS with or without verifying the authenticity of the Docker
+ host server.
+ type: bool
+ default: no
+ aliases: [ tls_verify ]
+ client_key:
+ description: Path to the client's TLS key file.
+ type: path
+ aliases: [ tls_client_key, key_path ]
+ ca_cert:
+ description: Use a CA certificate when performing server verification by providing the path to a CA
+ certificate file.
+ type: path
+ aliases: [ tls_ca_cert, cacert_path ]
+ client_cert:
+ description: Path to the client's TLS certificate file.
+ type: path
+ aliases: [ tls_client_cert, cert_path ]
+ tls_hostname:
+ description: When verifying the authenticity of the Docker host server, provide the expected name of
+ the server.
+ type: str
+ ssl_version:
+ description: Provide a valid SSL version number. Default value determined by ssl.py module.
+ type: str
+ api_version:
+ description:
+ - The version of the Docker API running on the Docker Host.
+ - Defaults to the latest version of the API supported by docker-py.
+ type: str
+ aliases: [ docker_api_version ]
+ timeout:
+ description:
+ - The maximum amount of time in seconds to wait on a response from the API.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT)
+ will be used instead. If the environment variable is not set, the default value will be used.
+ type: int
+ default: 60
+ aliases: [ time_out ]
+ include_host_uri:
+ description: Toggle to return the additional attribute C(ansible_host_uri) which contains the URI of the
+ swarm leader in format of C(tcp://172.16.0.1:2376). This value may be used without additional
+ modification as value of option I(docker_host) in Docker Swarm modules when connecting via API.
+ The port always defaults to C(2376).
+ type: bool
+ default: no
+ include_host_uri_port:
+ description: Override the detected port number included in I(ansible_host_uri)
+ type: int
+'''
+
+EXAMPLES = '''
+# Minimal example using local docker
+plugin: community.general.docker_swarm
+docker_host: unix://var/run/docker.sock
+
+# Minimal example using remote docker
+plugin: community.general.docker_swarm
+docker_host: tcp://my-docker-host:2375
+
+# Example using remote docker with unverified TLS
+plugin: community.general.docker_swarm
+docker_host: tcp://my-docker-host:2376
+tls: yes
+
+# Example using remote docker with verified TLS and client certificate verification
+plugin: community.general.docker_swarm
+docker_host: tcp://my-docker-host:2376
+validate_certs: yes
+ca_cert: /somewhere/ca.pem
+client_key: /somewhere/key.pem
+client_cert: /somewhere/cert.pem
+
+# Example using constructed features to create groups and set ansible_host
+plugin: community.general.docker_swarm
+docker_host: tcp://my-docker-host:2375
+strict: False
+keyed_groups:
+ # add e.g. x86_64 hosts to an arch_x86_64 group
+ - prefix: arch
+ key: 'Description.Platform.Architecture'
+ # add e.g. linux hosts to an os_linux group
+ - prefix: os
+ key: 'Description.Platform.OS'
+ # create a group per node label
+ # e.g. a node labeled w/ "production" ends up in group "label_production"
+ # hint: labels containing special characters will be converted to safe names
+ - key: 'Spec.Labels'
+ prefix: label
+'''
+
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_native
+from ansible_collections.community.general.plugins.module_utils.docker.common import update_tls_hostname, get_connect_params
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
+from ansible.parsing.utils.addresses import parse_address
+
+try:
+ import docker
+ HAS_DOCKER = True
+except ImportError:
+ HAS_DOCKER = False
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable):
+ ''' Host inventory parser for ansible using Docker swarm as source. '''
+
+ NAME = 'community.general.docker_swarm'
+
+ def _fail(self, msg):
+ raise AnsibleError(msg)
+
+ def _populate(self):
+ raw_params = dict(
+ docker_host=self.get_option('docker_host'),
+ tls=self.get_option('tls'),
+ tls_verify=self.get_option('validate_certs'),
+ key_path=self.get_option('client_key'),
+ cacert_path=self.get_option('ca_cert'),
+ cert_path=self.get_option('client_cert'),
+ tls_hostname=self.get_option('tls_hostname'),
+ api_version=self.get_option('api_version'),
+ timeout=self.get_option('timeout'),
+ ssl_version=self.get_option('ssl_version'),
+ debug=None,
+ )
+ update_tls_hostname(raw_params)
+ connect_params = get_connect_params(raw_params, fail_function=self._fail)
+ self.client = docker.DockerClient(**connect_params)
+ self.inventory.add_group('all')
+ self.inventory.add_group('manager')
+ self.inventory.add_group('worker')
+ self.inventory.add_group('leader')
+ self.inventory.add_group('nonleaders')
+
+ if self.get_option('include_host_uri'):
+ if self.get_option('include_host_uri_port'):
+ host_uri_port = str(self.get_option('include_host_uri_port'))
+ elif self.get_option('tls') or self.get_option('validate_certs'):
+ host_uri_port = '2376'
+ else:
+ host_uri_port = '2375'
+
+ try:
+ self.nodes = self.client.nodes.list()
+ for self.node in self.nodes:
+ self.node_attrs = self.client.nodes.get(self.node.id).attrs
+ self.inventory.add_host(self.node_attrs['ID'])
+ self.inventory.add_host(self.node_attrs['ID'], group=self.node_attrs['Spec']['Role'])
+ self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host',
+ self.node_attrs['Status']['Addr'])
+ if self.get_option('include_host_uri'):
+ self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri',
+ 'tcp://' + self.node_attrs['Status']['Addr'] + ':' + host_uri_port)
+ if self.get_option('verbose_output'):
+ self.inventory.set_variable(self.node_attrs['ID'], 'docker_swarm_node_attributes', self.node_attrs)
+ if 'ManagerStatus' in self.node_attrs:
+ if self.node_attrs['ManagerStatus'].get('Leader'):
+ # This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
+ # Check moby/moby#35437 for details
+ swarm_leader_ip = parse_address(self.node_attrs['ManagerStatus']['Addr'])[0] or \
+ self.node_attrs['Status']['Addr']
+ if self.get_option('include_host_uri'):
+ self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri',
+ 'tcp://' + swarm_leader_ip + ':' + host_uri_port)
+ self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host', swarm_leader_ip)
+ self.inventory.add_host(self.node_attrs['ID'], group='leader')
+ else:
+ self.inventory.add_host(self.node_attrs['ID'], group='nonleaders')
+ else:
+ self.inventory.add_host(self.node_attrs['ID'], group='nonleaders')
+ # Use constructed if applicable
+ strict = self.get_option('strict')
+ # Composed variables
+ self._set_composite_vars(self.get_option('compose'),
+ self.node_attrs,
+ self.node_attrs['ID'],
+ strict=strict)
+ # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
+ self._add_host_to_composed_groups(self.get_option('groups'),
+ self.node_attrs,
+ self.node_attrs['ID'],
+ strict=strict)
+ # Create groups based on variable values and add the corresponding hosts to it
+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'),
+ self.node_attrs,
+ self.node_attrs['ID'],
+ strict=strict)
+ except Exception as e:
+ raise AnsibleError('Unable to fetch hosts from Docker swarm API, this was the original exception: %s' %
+ to_native(e))
+
+ def verify_file(self, path):
+ """Return the possibly of a file being consumable by this plugin."""
+ return (
+ super(InventoryModule, self).verify_file(path) and
+ path.endswith(('docker_swarm.yaml', 'docker_swarm.yml')))
+
+ def parse(self, inventory, loader, path, cache=True):
+ if not HAS_DOCKER:
+ raise AnsibleError('The Docker swarm dynamic inventory plugin requires the Docker SDK for Python: '
+ 'https://github.com/docker/docker-py.')
+ super(InventoryModule, self).parse(inventory, loader, path, cache)
+ self._read_config_data(path)
+ self._populate()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/inventory/gitlab_runners.py b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/gitlab_runners.py
new file mode 100644
index 00000000..ce487f2f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/gitlab_runners.py
@@ -0,0 +1,140 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018, Stefan Heitmueller <stefan.heitmueller@gmx.com>
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: gitlab_runners
+ plugin_type: inventory
+ author:
+ - Stefan Heitmüller (@morph027) <stefan.heitmueller@gmx.com>
+ short_description: Ansible dynamic inventory plugin for GitLab runners.
+ requirements:
+ - python >= 2.7
+ - python-gitlab > 1.8.0
+ extends_documentation_fragment:
+ - constructed
+ description:
+ - Reads inventories from the GitLab API.
+ - Uses a YAML configuration file gitlab_runners.[yml|yaml].
+ options:
+ plugin:
+ description: The name of this plugin, it should always be set to 'gitlab_runners' for this plugin to recognize it as it's own.
+ type: str
+ required: true
+ choices:
+ - gitlab_runners
+ - community.general.gitlab_runners
+ server_url:
+ description: The URL of the GitLab server, with protocol (i.e. http or https).
+ env:
+ - name: GITLAB_SERVER_URL
+ version_added: 1.0.0
+ type: str
+ required: true
+ default: https://gitlab.com
+ api_token:
+ description: GitLab token for logging in.
+ env:
+ - name: GITLAB_API_TOKEN
+ version_added: 1.0.0
+ type: str
+ aliases:
+ - private_token
+ - access_token
+ filter:
+ description: filter runners from GitLab API
+ env:
+ - name: GITLAB_FILTER
+ version_added: 1.0.0
+ type: str
+ choices: ['active', 'paused', 'online', 'specific', 'shared']
+ verbose_output:
+ description: Toggle to (not) include all available nodes metadata
+ type: bool
+ default: yes
+'''
+
+EXAMPLES = '''
+# gitlab_runners.yml
+plugin: community.general.gitlab_runners
+host: https://gitlab.com
+
+# Example using constructed features to create groups and set ansible_host
+plugin: community.general.gitlab_runners
+host: https://gitlab.com
+strict: False
+keyed_groups:
+ # add e.g. amd64 hosts to an arch_amd64 group
+ - prefix: arch
+ key: 'architecture'
+ # add e.g. linux hosts to an os_linux group
+ - prefix: os
+ key: 'platform'
+ # create a group per runner tag
+ # e.g. a runner tagged w/ "production" ends up in group "label_production"
+ # hint: labels containing special characters will be converted to safe names
+ - key: 'tag_list'
+ prefix: tag
+'''
+
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.module_utils._text import to_native
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
+
+try:
+ import gitlab
+ HAS_GITLAB = True
+except ImportError:
+ HAS_GITLAB = False
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable):
+ ''' Host inventory parser for ansible using GitLab API as source. '''
+
+ NAME = 'community.general.gitlab_runners'
+
+ def _populate(self):
+ gl = gitlab.Gitlab(self.get_option('server_url'), private_token=self.get_option('api_token'))
+ self.inventory.add_group('gitlab_runners')
+ try:
+ if self.get_option('filter'):
+ runners = gl.runners.all(scope=self.get_option('filter'))
+ else:
+ runners = gl.runners.all()
+ for runner in runners:
+ host = str(runner['id'])
+ ip_address = runner['ip_address']
+ host_attrs = vars(gl.runners.get(runner['id']))['_attrs']
+ self.inventory.add_host(host, group='gitlab_runners')
+ self.inventory.set_variable(host, 'ansible_host', ip_address)
+ if self.get_option('verbose_output', True):
+ self.inventory.set_variable(host, 'gitlab_runner_attributes', host_attrs)
+
+ # Use constructed if applicable
+ strict = self.get_option('strict')
+ # Composed variables
+ self._set_composite_vars(self.get_option('compose'), host_attrs, host, strict=strict)
+ # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
+ self._add_host_to_composed_groups(self.get_option('groups'), host_attrs, host, strict=strict)
+ # Create groups based on variable values and add the corresponding hosts to it
+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host_attrs, host, strict=strict)
+ except Exception as e:
+ raise AnsibleParserError('Unable to fetch hosts from GitLab API, this was the original exception: %s' % to_native(e))
+
+ def verify_file(self, path):
+ """Return the possibly of a file being consumable by this plugin."""
+ return (
+ super(InventoryModule, self).verify_file(path) and
+ path.endswith(("gitlab_runners.yaml", "gitlab_runners.yml")))
+
+ def parse(self, inventory, loader, path, cache=True):
+ if not HAS_GITLAB:
+ raise AnsibleError('The GitLab runners dynamic inventory plugin requires python-gitlab: https://python-gitlab.readthedocs.io/en/stable/')
+ super(InventoryModule, self).parse(inventory, loader, path, cache)
+ self._read_config_data(path)
+ self._populate()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/inventory/kubevirt.py b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/kubevirt.py
new file mode 100644
index 00000000..14ba9df2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/kubevirt.py
@@ -0,0 +1,256 @@
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: kubevirt
+ plugin_type: inventory
+ author:
+ - KubeVirt Team (@kubevirt)
+
+ short_description: KubeVirt inventory source
+ extends_documentation_fragment:
+ - inventory_cache
+ - constructed
+ description:
+ - Fetch running VirtualMachines for one or more namespaces.
+ - Groups by namespace, namespace_vms and labels.
+ - Uses kubevirt.(yml|yaml) YAML configuration file to set parameter values.
+
+ options:
+ plugin:
+ description: token that ensures this is a source file for the 'kubevirt' plugin.
+ required: True
+ choices: ['kubevirt', 'community.general.kubevirt']
+ type: str
+ host_format:
+ description:
+ - Specify the format of the host in the inventory group.
+ default: "{namespace}-{name}-{uid}"
+ connections:
+ type: list
+ description:
+ - Optional list of cluster connection settings. If no connections are provided, the default
+ I(~/.kube/config) and active context will be used, and objects will be returned for all namespaces
+ the active user is authorized to access.
+ suboptions:
+ name:
+ description:
+ - Optional name to assign to the cluster. If not provided, a name is constructed from the server
+ and port.
+ type: str
+ kubeconfig:
+ description:
+ - Path to an existing Kubernetes config file. If not provided, and no other connection
+ options are provided, the OpenShift client will attempt to load the default
+ configuration file from I(~/.kube/config.json). Can also be specified via K8S_AUTH_KUBECONFIG
+ environment variable.
+ type: str
+ context:
+ description:
+ - The name of a context found in the config file. Can also be specified via K8S_AUTH_CONTEXT environment
+ variable.
+ type: str
+ host:
+ description:
+ - Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable.
+ type: str
+ api_key:
+ description:
+ - Token used to authenticate with the API. Can also be specified via K8S_AUTH_API_KEY environment
+ variable.
+ type: str
+ username:
+ description:
+ - Provide a username for authenticating with the API. Can also be specified via K8S_AUTH_USERNAME
+ environment variable.
+ type: str
+ password:
+ description:
+ - Provide a password for authenticating with the API. Can also be specified via K8S_AUTH_PASSWORD
+ environment variable.
+ type: str
+ cert_file:
+ description:
+ - Path to a certificate used to authenticate with the API. Can also be specified via K8S_AUTH_CERT_FILE
+ environment variable.
+ type: str
+ key_file:
+ description:
+ - Path to a key file used to authenticate with the API. Can also be specified via K8S_AUTH_HOST
+ environment variable.
+ type: str
+ ssl_ca_cert:
+ description:
+ - Path to a CA certificate used to authenticate with the API. Can also be specified via
+ K8S_AUTH_SSL_CA_CERT environment variable.
+ type: str
+ verify_ssl:
+ description:
+ - "Whether or not to verify the API server's SSL certificates. Can also be specified via
+ K8S_AUTH_VERIFY_SSL environment variable."
+ type: bool
+ namespaces:
+ description:
+ - List of namespaces. If not specified, will fetch all virtual machines for all namespaces user is authorized
+ to access.
+ type: list
+ network_name:
+ description:
+ - In case of multiple network attached to virtual machine, define which interface should be returned as primary IP
+ address.
+ type: str
+ aliases: [ interface_name ]
+ api_version:
+ description:
+ - "Specify the KubeVirt API version."
+ type: str
+ annotation_variable:
+ description:
+ - "Specify the name of the annotation which provides data, which should be used as inventory host variables."
+ - "Note, that the value in ansible annotations should be json."
+ type: str
+ default: 'ansible'
+ requirements:
+ - "openshift >= 0.6"
+ - "PyYAML >= 3.11"
+'''
+
+EXAMPLES = '''
+# File must be named kubevirt.yaml or kubevirt.yml
+
+# Authenticate with token, and return all virtual machines for all namespaces
+plugin: community.general.kubevirt
+connections:
+ - host: https://kubevirt.io
+ token: xxxxxxxxxxxxxxxx
+ ssl_verify: false
+
+# Use default config (~/.kube/config) file and active context, and return vms with interfaces
+# connected to network myovsnetwork and from namespace vms
+plugin: community.general.kubevirt
+connections:
+ - namespaces:
+ - vms
+ network_name: myovsnetwork
+'''
+
+import json
+
+from ansible_collections.community.kubernetes.plugins.inventory.k8s import K8sInventoryException, InventoryModule as K8sInventoryModule, format_dynamic_api_exc
+
+try:
+ from openshift.dynamic.exceptions import DynamicApiError
+except ImportError:
+ pass
+
+
+API_VERSION = 'kubevirt.io/v1alpha3'
+
+
+class InventoryModule(K8sInventoryModule):
+ NAME = 'community.general.kubevirt'
+
+ def setup(self, config_data, cache, cache_key):
+ self.config_data = config_data
+ super(InventoryModule, self).setup(config_data, cache, cache_key)
+
+ def fetch_objects(self, connections):
+ client = self.get_api_client()
+ vm_format = self.config_data.get('host_format', '{namespace}-{name}-{uid}')
+
+ if connections:
+ for connection in connections:
+ client = self.get_api_client(**connection)
+ name = connection.get('name', self.get_default_host_name(client.configuration.host))
+ if connection.get('namespaces'):
+ namespaces = connection['namespaces']
+ else:
+ namespaces = self.get_available_namespaces(client)
+ interface_name = connection.get('network_name')
+ api_version = connection.get('api_version', API_VERSION)
+ annotation_variable = connection.get('annotation_variable', 'ansible')
+ for namespace in namespaces:
+ self.get_vms_for_namespace(client, name, namespace, vm_format, interface_name, api_version, annotation_variable)
+ else:
+ name = self.get_default_host_name(client.configuration.host)
+ namespaces = self.get_available_namespaces(client)
+ for namespace in namespaces:
+ self.get_vms_for_namespace(client, name, namespace, vm_format, None, api_version, annotation_variable)
+
+ def get_vms_for_namespace(self, client, name, namespace, name_format, interface_name=None, api_version=None, annotation_variable=None):
+ v1_vm = client.resources.get(api_version=api_version, kind='VirtualMachineInstance')
+ try:
+ obj = v1_vm.get(namespace=namespace)
+ except DynamicApiError as exc:
+ self.display.debug(exc)
+ raise K8sInventoryException('Error fetching Virtual Machines list: %s' % format_dynamic_api_exc(exc))
+
+ namespace_group = 'namespace_{0}'.format(namespace)
+ namespace_vms_group = '{0}_vms'.format(namespace_group)
+
+ name = self._sanitize_group_name(name)
+ namespace_group = self._sanitize_group_name(namespace_group)
+ namespace_vms_group = self._sanitize_group_name(namespace_vms_group)
+ self.inventory.add_group(name)
+ self.inventory.add_group(namespace_group)
+ self.inventory.add_child(name, namespace_group)
+ self.inventory.add_group(namespace_vms_group)
+ self.inventory.add_child(namespace_group, namespace_vms_group)
+ for vm in obj.items:
+ if not (vm.status and vm.status.interfaces):
+ continue
+
+ # Find interface by its name:
+ if interface_name is None:
+ interface = vm.status.interfaces[0]
+ else:
+ interface = next(
+ (i for i in vm.status.interfaces if i.name == interface_name),
+ None
+ )
+
+ # If interface is not found or IP address is not reported skip this VM:
+ if interface is None or interface.ipAddress is None:
+ continue
+
+ vm_name = name_format.format(namespace=vm.metadata.namespace, name=vm.metadata.name, uid=vm.metadata.uid)
+ vm_ip = interface.ipAddress
+ vm_annotations = {} if not vm.metadata.annotations else dict(vm.metadata.annotations)
+
+ self.inventory.add_host(vm_name)
+
+ if vm.metadata.labels:
+ # create a group for each label_value
+ for key, value in vm.metadata.labels:
+ group_name = 'label_{0}_{1}'.format(key, value)
+ group_name = self._sanitize_group_name(group_name)
+ self.inventory.add_group(group_name)
+ self.inventory.add_child(group_name, vm_name)
+ vm_labels = dict(vm.metadata.labels)
+ else:
+ vm_labels = {}
+
+ self.inventory.add_child(namespace_vms_group, vm_name)
+
+ # add hostvars
+ self.inventory.set_variable(vm_name, 'ansible_host', vm_ip)
+ self.inventory.set_variable(vm_name, 'labels', vm_labels)
+ self.inventory.set_variable(vm_name, 'annotations', vm_annotations)
+ self.inventory.set_variable(vm_name, 'object_type', 'vm')
+ self.inventory.set_variable(vm_name, 'resource_version', vm.metadata.resourceVersion)
+ self.inventory.set_variable(vm_name, 'uid', vm.metadata.uid)
+
+ # Add all variables which are listed in 'ansible' annotation:
+ annotations_data = json.loads(vm_annotations.get(annotation_variable, "{}"))
+ for k, v in annotations_data.items():
+ self.inventory.set_variable(vm_name, k, v)
+
+ def verify_file(self, path):
+ if super(InventoryModule, self).verify_file(path):
+ if path.endswith(('kubevirt.yml', 'kubevirt.yaml')):
+ return True
+ return False
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/inventory/linode.py b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/linode.py
new file mode 100644
index 00000000..c308fb82
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/linode.py
@@ -0,0 +1,211 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+ name: linode
+ plugin_type: inventory
+ author:
+ - Luke Murphy (@decentral1se)
+ short_description: Ansible dynamic inventory plugin for Linode.
+ requirements:
+ - python >= 2.7
+ - linode_api4 >= 2.0.0
+ description:
+ - Reads inventories from the Linode API v4.
+ - Uses a YAML configuration file that ends with linode.(yml|yaml).
+ - Linode labels are used by default as the hostnames.
+ - The inventory groups are built from groups and not tags.
+ options:
+ plugin:
+ description: marks this as an instance of the 'linode' plugin
+ required: true
+ choices: ['linode', 'community.general.linode']
+ access_token:
+ description: The Linode account personal access token.
+ required: true
+ env:
+ - name: LINODE_ACCESS_TOKEN
+ regions:
+ description: Populate inventory with instances in this region.
+ default: []
+ type: list
+ required: false
+ types:
+ description: Populate inventory with instances with this type.
+ default: []
+ type: list
+ required: false
+'''
+
+EXAMPLES = r'''
+# Minimal example. `LINODE_ACCESS_TOKEN` is exposed in environment.
+plugin: community.general.linode
+
+# Example with regions, types, groups and access token
+plugin: community.general.linode
+access_token: foobar
+regions:
+ - eu-west
+types:
+ - g5-standard-2
+'''
+
+import os
+
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.module_utils.six import string_types
+from ansible.plugins.inventory import BaseInventoryPlugin
+
+
+try:
+ from linode_api4 import LinodeClient
+ from linode_api4.errors import ApiError as LinodeApiError
+ HAS_LINODE = True
+except ImportError:
+ HAS_LINODE = False
+
+
+class InventoryModule(BaseInventoryPlugin):
+
+ NAME = 'community.general.linode'
+
+ def _build_client(self):
+ """Build the Linode client."""
+
+ access_token = self.get_option('access_token')
+
+ if access_token is None:
+ try:
+ access_token = os.environ['LINODE_ACCESS_TOKEN']
+ except KeyError:
+ pass
+
+ if access_token is None:
+ raise AnsibleError((
+ 'Could not retrieve Linode access token '
+ 'from plugin configuration or environment'
+ ))
+
+ self.client = LinodeClient(access_token)
+
+ def _get_instances_inventory(self):
+ """Retrieve Linode instance information from cloud inventory."""
+ try:
+ self.instances = self.client.linode.instances()
+ except LinodeApiError as exception:
+ raise AnsibleError('Linode client raised: %s' % exception)
+
+ def _add_groups(self):
+ """Add Linode instance groups to the dynamic inventory."""
+ self.linode_groups = set(
+ filter(None, [
+ instance.group
+ for instance
+ in self.instances
+ ])
+ )
+
+ for linode_group in self.linode_groups:
+ self.inventory.add_group(linode_group)
+
+ def _filter_by_config(self, regions, types):
+ """Filter instances by user specified configuration."""
+ if regions:
+ self.instances = [
+ instance for instance in self.instances
+ if instance.region.id in regions
+ ]
+
+ if types:
+ self.instances = [
+ instance for instance in self.instances
+ if instance.type.id in types
+ ]
+
+ def _add_instances_to_groups(self):
+ """Add instance names to their dynamic inventory groups."""
+ for instance in self.instances:
+ self.inventory.add_host(instance.label, group=instance.group)
+
+ def _add_hostvars_for_instances(self):
+ """Add hostvars for instances in the dynamic inventory."""
+ for instance in self.instances:
+ hostvars = instance._raw_json
+ for hostvar_key in hostvars:
+ self.inventory.set_variable(
+ instance.label,
+ hostvar_key,
+ hostvars[hostvar_key]
+ )
+
+ def _validate_option(self, name, desired_type, option_value):
+ """Validate user specified configuration data against types."""
+ if isinstance(option_value, string_types) and desired_type == list:
+ option_value = [option_value]
+
+ if option_value is None:
+ option_value = desired_type()
+
+ if not isinstance(option_value, desired_type):
+ raise AnsibleParserError(
+ 'The option %s (%s) must be a %s' % (
+ name, option_value, desired_type
+ )
+ )
+
+ return option_value
+
+ def _get_query_options(self, config_data):
+ """Get user specified query options from the configuration."""
+ options = {
+ 'regions': {
+ 'type_to_be': list,
+ 'value': config_data.get('regions', [])
+ },
+ 'types': {
+ 'type_to_be': list,
+ 'value': config_data.get('types', [])
+ },
+ }
+
+ for name in options:
+ options[name]['value'] = self._validate_option(
+ name,
+ options[name]['type_to_be'],
+ options[name]['value']
+ )
+
+ regions = options['regions']['value']
+ types = options['types']['value']
+
+ return regions, types
+
+ def verify_file(self, path):
+ """Verify the Linode configuration file."""
+ if super(InventoryModule, self).verify_file(path):
+ endings = ('linode.yaml', 'linode.yml')
+ if any((path.endswith(ending) for ending in endings)):
+ return True
+ return False
+
+ def parse(self, inventory, loader, path, cache=True):
+ """Dynamically parse Linode the cloud inventory."""
+ super(InventoryModule, self).parse(inventory, loader, path)
+
+ if not HAS_LINODE:
+ raise AnsibleError('the Linode dynamic inventory plugin requires linode_api4.')
+
+ config_data = self._read_config_data(path)
+ self._build_client()
+
+ self._get_instances_inventory()
+
+ regions, types = self._get_query_options(config_data)
+ self._filter_by_config(regions, types)
+
+ self._add_groups()
+ self._add_instances_to_groups()
+ self._add_hostvars_for_instances()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/inventory/nmap.py b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/nmap.py
new file mode 100644
index 00000000..6e2efae3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/nmap.py
@@ -0,0 +1,168 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ name: nmap
+ plugin_type: inventory
+ short_description: Uses nmap to find hosts to target
+ description:
+ - Uses a YAML configuration file with a valid YAML extension.
+ extends_documentation_fragment:
+ - constructed
+ - inventory_cache
+ requirements:
+ - nmap CLI installed
+ options:
+ plugin:
+ description: token that ensures this is a source file for the 'nmap' plugin.
+ required: True
+ choices: ['nmap', 'community.general.nmap']
+ address:
+ description: Network IP or range of IPs to scan, you can use a simple range (10.2.2.15-25) or CIDR notation.
+ required: True
+ exclude:
+ description: list of addresses to exclude
+ type: list
+ ports:
+ description: Enable/disable scanning for open ports
+ type: boolean
+ default: True
+ ipv4:
+ description: use IPv4 type addresses
+ type: boolean
+ default: True
+ ipv6:
+ description: use IPv6 type addresses
+ type: boolean
+ default: True
+ notes:
+ - At least one of ipv4 or ipv6 is required to be True, both can be True, but they cannot both be False.
+ - 'TODO: add OS fingerprinting'
+'''
+EXAMPLES = '''
+# inventory.config file in YAML format
+plugin: community.general.nmap
+strict: False
+address: 192.168.0.0/24
+'''
+
+import os
+import re
+
+from subprocess import Popen, PIPE
+
+from ansible import constants as C
+from ansible.errors import AnsibleParserError
+from ansible.module_utils._text import to_native, to_text
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
+from ansible.module_utils.common.process import get_bin_path
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
+
+ NAME = 'community.general.nmap'
+ find_host = re.compile(r'^Nmap scan report for ([\w,.,-]+)(?: \(([\w,.,:,\[,\]]+)\))?')
+ find_port = re.compile(r'^(\d+)/(\w+)\s+(\w+)\s+(\w+)')
+
+ def __init__(self):
+ self._nmap = None
+ super(InventoryModule, self).__init__()
+
+ def verify_file(self, path):
+
+ valid = False
+ if super(InventoryModule, self).verify_file(path):
+ file_name, ext = os.path.splitext(path)
+
+ if not ext or ext in C.YAML_FILENAME_EXTENSIONS:
+ valid = True
+
+ return valid
+
+ def parse(self, inventory, loader, path, cache=False):
+
+ try:
+ self._nmap = get_bin_path('nmap')
+ except ValueError as e:
+ raise AnsibleParserError('nmap inventory plugin requires the nmap cli tool to work: {0}'.format(to_native(e)))
+
+ super(InventoryModule, self).parse(inventory, loader, path, cache=cache)
+
+ self._read_config_data(path)
+
+ # setup command
+ cmd = [self._nmap]
+ if not self._options['ports']:
+ cmd.append('-sP')
+
+ if self._options['ipv4'] and not self._options['ipv6']:
+ cmd.append('-4')
+ elif self._options['ipv6'] and not self._options['ipv4']:
+ cmd.append('-6')
+ elif not self._options['ipv6'] and not self._options['ipv4']:
+ raise AnsibleParserError('One of ipv4 or ipv6 must be enabled for this plugin')
+
+ if self._options['exclude']:
+ cmd.append('--exclude')
+ cmd.append(','.join(self._options['exclude']))
+
+ cmd.append(self._options['address'])
+ try:
+ # execute
+ p = Popen(cmd, stdout=PIPE, stderr=PIPE)
+ stdout, stderr = p.communicate()
+ if p.returncode != 0:
+ raise AnsibleParserError('Failed to run nmap, rc=%s: %s' % (p.returncode, to_native(stderr)))
+
+ # parse results
+ host = None
+ ip = None
+ ports = []
+
+ try:
+ t_stdout = to_text(stdout, errors='surrogate_or_strict')
+ except UnicodeError as e:
+ raise AnsibleParserError('Invalid (non unicode) input returned: %s' % to_native(e))
+
+ for line in t_stdout.splitlines():
+ hits = self.find_host.match(line)
+ if hits:
+ if host is not None:
+ self.inventory.set_variable(host, 'ports', ports)
+
+ # if dns only shows arpa, just use ip instead as hostname
+ if hits.group(1).endswith('.in-addr.arpa'):
+ host = hits.group(2)
+ else:
+ host = hits.group(1)
+
+ # if no reverse dns exists, just use ip instead as hostname
+ if hits.group(2) is not None:
+ ip = hits.group(2)
+ else:
+ ip = hits.group(1)
+
+ if host is not None:
+ # update inventory
+ self.inventory.add_host(host)
+ self.inventory.set_variable(host, 'ip', ip)
+ ports = []
+ continue
+
+ host_ports = self.find_port.match(line)
+ if host is not None and host_ports:
+ ports.append({'port': host_ports.group(1), 'protocol': host_ports.group(2), 'state': host_ports.group(3), 'service': host_ports.group(4)})
+ continue
+
+ # TODO: parse more data, OS?
+
+ # if any leftovers
+ if host and ports:
+ self.inventory.set_variable(host, 'ports', ports)
+
+ except Exception as e:
+ raise AnsibleParserError("failed to parse %s: %s " % (to_native(path), to_native(e)))
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/inventory/online.py b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/online.py
new file mode 100644
index 00000000..d976633a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/online.py
@@ -0,0 +1,260 @@
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+ name: online
+ plugin_type: inventory
+ author:
+ - Remy Leone (@sieben)
+ short_description: Scaleway (previously Online SAS or Online.net) inventory source
+ description:
+ - Get inventory hosts from Scaleway (previously Online SAS or Online.net).
+ options:
+ plugin:
+ description: token that ensures this is a source file for the 'online' plugin.
+ required: True
+ choices: ['online', 'community.general.online']
+ oauth_token:
+ required: True
+ description: Online OAuth token.
+ env:
+ # in order of precedence
+ - name: ONLINE_TOKEN
+ - name: ONLINE_API_KEY
+ - name: ONLINE_OAUTH_TOKEN
+ hostnames:
+ description: List of preference about what to use as an hostname.
+ type: list
+ default:
+ - public_ipv4
+ choices:
+ - public_ipv4
+ - private_ipv4
+ - hostname
+ groups:
+ description: List of groups.
+ type: list
+ choices:
+ - location
+ - offer
+ - rpn
+'''
+
+EXAMPLES = r'''
+# online_inventory.yml file in YAML format
+# Example command line: ansible-inventory --list -i online_inventory.yml
+
+plugin: community.general.online
+hostnames:
+ - public_ipv4
+groups:
+ - location
+ - offer
+ - rpn
+'''
+
+import json
+from sys import version as python_version
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.urls import open_url
+from ansible.plugins.inventory import BaseInventoryPlugin
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.ansible_release import __version__ as ansible_version
+from ansible.module_utils.six.moves.urllib.parse import urljoin
+
+
+class InventoryModule(BaseInventoryPlugin):
+ NAME = 'community.general.online'
+ API_ENDPOINT = "https://api.online.net"
+
+ def extract_public_ipv4(self, host_infos):
+ try:
+ return host_infos["network"]["ip"][0]
+ except (KeyError, TypeError, IndexError):
+ self.display.warning("An error happened while extracting public IPv4 address. Information skipped.")
+ return None
+
+ def extract_private_ipv4(self, host_infos):
+ try:
+ return host_infos["network"]["private"][0]
+ except (KeyError, TypeError, IndexError):
+ self.display.warning("An error happened while extracting private IPv4 address. Information skipped.")
+ return None
+
+ def extract_os_name(self, host_infos):
+ try:
+ return host_infos["os"]["name"]
+ except (KeyError, TypeError):
+ self.display.warning("An error happened while extracting OS name. Information skipped.")
+ return None
+
+ def extract_os_version(self, host_infos):
+ try:
+ return host_infos["os"]["version"]
+ except (KeyError, TypeError):
+ self.display.warning("An error happened while extracting OS version. Information skipped.")
+ return None
+
+ def extract_hostname(self, host_infos):
+ try:
+ return host_infos["hostname"]
+ except (KeyError, TypeError):
+ self.display.warning("An error happened while extracting hostname. Information skipped.")
+ return None
+
+ def extract_location(self, host_infos):
+ try:
+ return host_infos["location"]["datacenter"]
+ except (KeyError, TypeError):
+ self.display.warning("An error happened while extracting datacenter location. Information skipped.")
+ return None
+
+ def extract_offer(self, host_infos):
+ try:
+ return host_infos["offer"]
+ except (KeyError, TypeError):
+ self.display.warning("An error happened while extracting commercial offer. Information skipped.")
+ return None
+
+ def extract_rpn(self, host_infos):
+ try:
+ return self.rpn_lookup_cache[host_infos["id"]]
+ except (KeyError, TypeError):
+ self.display.warning("An error happened while extracting RPN information. Information skipped.")
+ return None
+
+ def _fetch_information(self, url):
+ try:
+ response = open_url(url, headers=self.headers)
+ except Exception as e:
+ self.display.warning("An error happened while fetching: %s" % url)
+ return None
+
+ try:
+ raw_data = to_text(response.read(), errors='surrogate_or_strict')
+ except UnicodeError:
+ raise AnsibleError("Incorrect encoding of fetched payload from Online servers")
+
+ try:
+ return json.loads(raw_data)
+ except ValueError:
+ raise AnsibleError("Incorrect JSON payload")
+
+ @staticmethod
+ def extract_rpn_lookup_cache(rpn_list):
+ lookup = {}
+ for rpn in rpn_list:
+ for member in rpn["members"]:
+ lookup[member["id"]] = rpn["name"]
+ return lookup
+
+ def _fill_host_variables(self, hostname, host_infos):
+ targeted_attributes = (
+ "offer",
+ "id",
+ "hostname",
+ "location",
+ "boot_mode",
+ "power",
+ "last_reboot",
+ "anti_ddos",
+ "hardware_watch",
+ "support"
+ )
+ for attribute in targeted_attributes:
+ self.inventory.set_variable(hostname, attribute, host_infos[attribute])
+
+ if self.extract_public_ipv4(host_infos=host_infos):
+ self.inventory.set_variable(hostname, "public_ipv4", self.extract_public_ipv4(host_infos=host_infos))
+ self.inventory.set_variable(hostname, "ansible_host", self.extract_public_ipv4(host_infos=host_infos))
+
+ if self.extract_private_ipv4(host_infos=host_infos):
+ self.inventory.set_variable(hostname, "public_ipv4", self.extract_private_ipv4(host_infos=host_infos))
+
+ if self.extract_os_name(host_infos=host_infos):
+ self.inventory.set_variable(hostname, "os_name", self.extract_os_name(host_infos=host_infos))
+
+ if self.extract_os_version(host_infos=host_infos):
+ self.inventory.set_variable(hostname, "os_version", self.extract_os_name(host_infos=host_infos))
+
+ def _filter_host(self, host_infos, hostname_preferences):
+
+ for pref in hostname_preferences:
+ if self.extractors[pref](host_infos):
+ return self.extractors[pref](host_infos)
+
+ return None
+
+ def do_server_inventory(self, host_infos, hostname_preferences, group_preferences):
+
+ hostname = self._filter_host(host_infos=host_infos,
+ hostname_preferences=hostname_preferences)
+
+ # No suitable hostname were found in the attributes and the host won't be in the inventory
+ if not hostname:
+ return
+
+ self.inventory.add_host(host=hostname)
+ self._fill_host_variables(hostname=hostname, host_infos=host_infos)
+
+ for g in group_preferences:
+ group = self.group_extractors[g](host_infos)
+
+ if not group:
+ return
+
+ self.inventory.add_group(group=group)
+ self.inventory.add_host(group=group, host=hostname)
+
+ def parse(self, inventory, loader, path, cache=True):
+ super(InventoryModule, self).parse(inventory, loader, path)
+ self._read_config_data(path=path)
+
+ token = self.get_option("oauth_token")
+ hostname_preferences = self.get_option("hostnames")
+
+ group_preferences = self.get_option("groups")
+ if group_preferences is None:
+ group_preferences = []
+
+ self.extractors = {
+ "public_ipv4": self.extract_public_ipv4,
+ "private_ipv4": self.extract_private_ipv4,
+ "hostname": self.extract_hostname,
+ }
+
+ self.group_extractors = {
+ "location": self.extract_location,
+ "offer": self.extract_offer,
+ "rpn": self.extract_rpn
+ }
+
+ self.headers = {
+ 'Authorization': "Bearer %s" % token,
+ 'User-Agent': "ansible %s Python %s" % (ansible_version, python_version.split(' ')[0]),
+ 'Content-type': 'application/json'
+ }
+
+ servers_url = urljoin(InventoryModule.API_ENDPOINT, "api/v1/server")
+ servers_api_path = self._fetch_information(url=servers_url)
+
+ if "rpn" in group_preferences:
+ rpn_groups_url = urljoin(InventoryModule.API_ENDPOINT, "api/v1/rpn/group")
+ rpn_list = self._fetch_information(url=rpn_groups_url)
+ self.rpn_lookup_cache = self.extract_rpn_lookup_cache(rpn_list)
+
+ for server_api_path in servers_api_path:
+
+ server_url = urljoin(InventoryModule.API_ENDPOINT, server_api_path)
+ raw_server_info = self._fetch_information(url=server_url)
+
+ if raw_server_info is None:
+ continue
+
+ self.do_server_inventory(host_infos=raw_server_info,
+ hostname_preferences=hostname_preferences,
+ group_preferences=group_preferences)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/inventory/proxmox.py b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/proxmox.py
new file mode 100644
index 00000000..aa9a757a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/proxmox.py
@@ -0,0 +1,350 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2016 Guido Günther <agx@sigxcpu.org>, Daniel Lobato Garcia <dlobatog@redhat.com>
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: proxmox
+ plugin_type: inventory
+ short_description: Proxmox inventory source
+ version_added: "1.2.0"
+ author:
+ - Jeffrey van Pelt (@Thulium-Drake) <jeff@vanpelt.one>
+ requirements:
+ - requests >= 1.1
+ description:
+ - Get inventory hosts from a Proxmox PVE cluster.
+ - "Uses a configuration file as an inventory source, it must end in C(.proxmox.yml) or C(.proxmox.yaml)"
+ - Will retrieve the first network interface with an IP for Proxmox nodes.
+ - Can retrieve LXC/QEMU configuration as facts.
+ extends_documentation_fragment:
+ - inventory_cache
+ options:
+ plugin:
+ description: The name of this plugin, it should always be set to C(community.general.proxmox) for this plugin to recognize it as it's own.
+ required: yes
+ choices: ['community.general.proxmox']
+ type: str
+ url:
+ description: URL to Proxmox cluster.
+ default: 'http://localhost:8006'
+ type: str
+ user:
+ description: Proxmox authentication user.
+ required: yes
+ type: str
+ password:
+ description: Proxmox authentication password.
+ required: yes
+ type: str
+ validate_certs:
+ description: Verify SSL certificate if using HTTPS.
+ type: boolean
+ default: yes
+ group_prefix:
+ description: Prefix to apply to Proxmox groups.
+ default: proxmox_
+ type: str
+ facts_prefix:
+ description: Prefix to apply to LXC/QEMU config facts.
+ default: proxmox_
+ type: str
+ want_facts:
+ description: Gather LXC/QEMU configuration facts.
+ default: no
+ type: bool
+'''
+
+EXAMPLES = '''
+# my.proxmox.yml
+plugin: community.general.proxmox
+url: http://localhost:8006
+user: ansible@pve
+password: secure
+validate_certs: no
+'''
+
+import re
+
+from ansible.module_utils.common._collections_compat import MutableMapping
+from distutils.version import LooseVersion
+
+from ansible.errors import AnsibleError
+from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+
+# 3rd party imports
+try:
+ import requests
+ if LooseVersion(requests.__version__) < LooseVersion('1.1.0'):
+ raise ImportError
+ HAS_REQUESTS = True
+except ImportError:
+ HAS_REQUESTS = False
+
+
+class InventoryModule(BaseInventoryPlugin, Cacheable):
+ ''' Host inventory parser for ansible using Proxmox as source. '''
+
+ NAME = 'community.general.proxmox'
+
+ def __init__(self):
+
+ super(InventoryModule, self).__init__()
+
+ # from config
+ self.proxmox_url = None
+
+ self.session = None
+ self.cache_key = None
+ self.use_cache = None
+
+ def verify_file(self, path):
+
+ valid = False
+ if super(InventoryModule, self).verify_file(path):
+ if path.endswith(('proxmox.yaml', 'proxmox.yml')):
+ valid = True
+ else:
+ self.display.vvv('Skipping due to inventory source not ending in "proxmox.yaml" nor "proxmox.yml"')
+ return valid
+
+ def _get_session(self):
+ if not self.session:
+ self.session = requests.session()
+ self.session.verify = self.get_option('validate_certs')
+ return self.session
+
+ def _get_auth(self):
+ credentials = urlencode({'username': self.proxmox_user, 'password': self.proxmox_password, })
+
+ a = self._get_session()
+ ret = a.post('%s/api2/json/access/ticket' % self.proxmox_url, data=credentials)
+
+ json = ret.json()
+
+ self.credentials = {
+ 'ticket': json['data']['ticket'],
+ 'CSRFPreventionToken': json['data']['CSRFPreventionToken'],
+ }
+
+ def _get_json(self, url, ignore_errors=None):
+
+ if not self.use_cache or url not in self._cache.get(self.cache_key, {}):
+
+ if self.cache_key not in self._cache:
+ self._cache[self.cache_key] = {'url': ''}
+
+ data = []
+ s = self._get_session()
+ while True:
+ headers = {'Cookie': 'PVEAuthCookie={0}'.format(self.credentials['ticket'])}
+ ret = s.get(url, headers=headers)
+ if ignore_errors and ret.status_code in ignore_errors:
+ break
+ ret.raise_for_status()
+ json = ret.json()
+
+ # process results
+ # FIXME: This assumes 'return type' matches a specific query,
+ # it will break if we expand the queries and they dont have different types
+ if 'data' not in json:
+ # /hosts/:id does not have a 'data' key
+ data = json
+ break
+ elif isinstance(json['data'], MutableMapping):
+ # /facts are returned as dict in 'data'
+ data = json['data']
+ break
+ else:
+ # /hosts 's 'results' is a list of all hosts, returned is paginated
+ data = data + json['data']
+ break
+
+ self._cache[self.cache_key][url] = data
+
+ return self._cache[self.cache_key][url]
+
+ def _get_nodes(self):
+ return self._get_json("%s/api2/json/nodes" % self.proxmox_url)
+
+ def _get_pools(self):
+ return self._get_json("%s/api2/json/pools" % self.proxmox_url)
+
+ def _get_lxc_per_node(self, node):
+ return self._get_json("%s/api2/json/nodes/%s/lxc" % (self.proxmox_url, node))
+
+ def _get_qemu_per_node(self, node):
+ return self._get_json("%s/api2/json/nodes/%s/qemu" % (self.proxmox_url, node))
+
+ def _get_members_per_pool(self, pool):
+ ret = self._get_json("%s/api2/json/pools/%s" % (self.proxmox_url, pool))
+ return ret['members']
+
+ def _get_node_ip(self, node):
+ ret = self._get_json("%s/api2/json/nodes/%s/network" % (self.proxmox_url, node))
+
+ for iface in ret:
+ try:
+ return iface['address']
+ except Exception:
+ return None
+
+ def _get_vm_config(self, node, vmid, vmtype, name):
+ ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/config" % (self.proxmox_url, node, vmtype, vmid))
+
+ vmid_key = 'vmid'
+ vmid_key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), vmid_key.lower()))
+ self.inventory.set_variable(name, vmid_key, vmid)
+
+ vmtype_key = 'vmtype'
+ vmtype_key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), vmtype_key.lower()))
+ self.inventory.set_variable(name, vmtype_key, vmtype)
+
+ for config in ret:
+ key = config
+ key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), key.lower()))
+ value = ret[config]
+ try:
+ # fixup disk images as they have no key
+ if config == 'rootfs' or config.startswith(('virtio', 'sata', 'ide', 'scsi')):
+ value = ('disk_image=' + value)
+
+ if isinstance(value, int) or ',' not in value:
+ value = value
+ # split off strings with commas to a dict
+ else:
+ # skip over any keys that cannot be processed
+ try:
+ value = dict(key.split("=") for key in value.split(","))
+ except Exception:
+ continue
+
+ self.inventory.set_variable(name, key, value)
+ except NameError:
+ return None
+
+ def _get_vm_status(self, node, vmid, vmtype, name):
+ ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/status/current" % (self.proxmox_url, node, vmtype, vmid))
+
+ status = ret['status']
+ status_key = 'status'
+ status_key = self.to_safe('%s%s' % (self.get_option('facts_prefix'), status_key.lower()))
+ self.inventory.set_variable(name, status_key, status)
+
+ def to_safe(self, word):
+ '''Converts 'bad' characters in a string to underscores so they can be used as Ansible groups
+ #> ProxmoxInventory.to_safe("foo-bar baz")
+ 'foo_barbaz'
+ '''
+ regex = r"[^A-Za-z0-9\_]"
+ return re.sub(regex, "_", word.replace(" ", ""))
+
+ def _populate(self):
+
+ self._get_auth()
+
+ # gather vm's on nodes
+ for node in self._get_nodes():
+ # FIXME: this can probably be cleaner
+ # create groups
+ lxc_group = 'all_lxc'
+ lxc_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), lxc_group.lower()))
+ self.inventory.add_group(lxc_group)
+ qemu_group = 'all_qemu'
+ qemu_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), qemu_group.lower()))
+ self.inventory.add_group(qemu_group)
+ nodes_group = 'nodes'
+ nodes_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), nodes_group.lower()))
+ self.inventory.add_group(nodes_group)
+ running_group = 'all_running'
+ running_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), running_group.lower()))
+ self.inventory.add_group(running_group)
+ stopped_group = 'all_stopped'
+ stopped_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), stopped_group.lower()))
+ self.inventory.add_group(stopped_group)
+
+ if node.get('node'):
+ self.inventory.add_host(node['node'])
+
+ if node['type'] == 'node':
+ self.inventory.add_child(nodes_group, node['node'])
+
+ # get node IP address
+ ip = self._get_node_ip(node['node'])
+ self.inventory.set_variable(node['node'], 'ansible_host', ip)
+
+ # get LXC containers for this node
+ node_lxc_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), ('%s_lxc' % node['node']).lower()))
+ self.inventory.add_group(node_lxc_group)
+ for lxc in self._get_lxc_per_node(node['node']):
+ self.inventory.add_host(lxc['name'])
+ self.inventory.add_child(lxc_group, lxc['name'])
+ self.inventory.add_child(node_lxc_group, lxc['name'])
+
+ # get LXC status when want_facts == True
+ if self.get_option('want_facts'):
+ self._get_vm_status(node['node'], lxc['vmid'], 'lxc', lxc['name'])
+ if lxc['status'] == 'stopped':
+ self.inventory.add_child(stopped_group, lxc['name'])
+ elif lxc['status'] == 'running':
+ self.inventory.add_child(running_group, lxc['name'])
+
+ # get LXC config for facts
+ if self.get_option('want_facts'):
+ self._get_vm_config(node['node'], lxc['vmid'], 'lxc', lxc['name'])
+
+ # get QEMU vm's for this node
+ node_qemu_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), ('%s_qemu' % node['node']).lower()))
+ self.inventory.add_group(node_qemu_group)
+ for qemu in self._get_qemu_per_node(node['node']):
+ if qemu['template']:
+ continue
+
+ self.inventory.add_host(qemu['name'])
+ self.inventory.add_child(qemu_group, qemu['name'])
+ self.inventory.add_child(node_qemu_group, qemu['name'])
+
+ # get QEMU status
+ self._get_vm_status(node['node'], qemu['vmid'], 'qemu', qemu['name'])
+ if qemu['status'] == 'stopped':
+ self.inventory.add_child(stopped_group, qemu['name'])
+ elif qemu['status'] == 'running':
+ self.inventory.add_child(running_group, qemu['name'])
+
+ # get QEMU config for facts
+ if self.get_option('want_facts'):
+ self._get_vm_config(node['node'], qemu['vmid'], 'qemu', qemu['name'])
+
+ # gather vm's in pools
+ for pool in self._get_pools():
+ if pool.get('poolid'):
+ pool_group = 'pool_' + pool['poolid']
+ pool_group = self.to_safe('%s%s' % (self.get_option('group_prefix'), pool_group.lower()))
+ self.inventory.add_group(pool_group)
+
+ for member in self._get_members_per_pool(pool['poolid']):
+ if member.get('name'):
+ self.inventory.add_child(pool_group, member['name'])
+
+ def parse(self, inventory, loader, path, cache=True):
+ if not HAS_REQUESTS:
+ raise AnsibleError('This module requires Python Requests 1.1.0 or higher: '
+ 'https://github.com/psf/requests.')
+
+ super(InventoryModule, self).parse(inventory, loader, path)
+
+ # read config from file, this sets 'options'
+ self._read_config_data(path)
+
+ # get connection host
+ self.proxmox_url = self.get_option('url')
+ self.proxmox_user = self.get_option('user')
+ self.proxmox_password = self.get_option('password')
+ self.cache_key = self.get_cache_key(path)
+ self.use_cache = cache and self.get_option('cache')
+
+ # actually populate inventory
+ self._populate()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/inventory/scaleway.py b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/scaleway.py
new file mode 100644
index 00000000..4cc16956
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/scaleway.py
@@ -0,0 +1,281 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: scaleway
+ plugin_type: inventory
+ author:
+ - Remy Leone (@sieben)
+ short_description: Scaleway inventory source
+ description:
+ - Get inventory hosts from Scaleway
+ options:
+ plugin:
+ description: token that ensures this is a source file for the 'scaleway' plugin.
+ required: True
+ choices: ['scaleway', 'community.general.scaleway']
+ regions:
+ description: Filter results on a specific Scaleway region
+ type: list
+ default:
+ - ams1
+ - par1
+ - par2
+ - waw1
+ tags:
+ description: Filter results on a specific tag
+ type: list
+ oauth_token:
+ required: True
+ description: Scaleway OAuth token.
+ env:
+ # in order of precedence
+ - name: SCW_TOKEN
+ - name: SCW_API_KEY
+ - name: SCW_OAUTH_TOKEN
+ hostnames:
+ description: List of preference about what to use as an hostname.
+ type: list
+ default:
+ - public_ipv4
+ choices:
+ - public_ipv4
+ - private_ipv4
+ - public_ipv6
+ - hostname
+ - id
+ variables:
+ description: 'set individual variables: keys are variable names and
+ values are templates. Any value returned by the
+ L(Scaleway API, https://developer.scaleway.com/#servers-server-get)
+ can be used.'
+ type: dict
+'''
+
+EXAMPLES = '''
+# scaleway_inventory.yml file in YAML format
+# Example command line: ansible-inventory --list -i scaleway_inventory.yml
+
+# use hostname as inventory_hostname
+# use the private IP address to connect to the host
+plugin: community.general.scaleway
+regions:
+ - ams1
+ - par1
+tags:
+ - foobar
+hostnames:
+ - hostname
+variables:
+ ansible_host: private_ip
+ state: state
+
+# use hostname as inventory_hostname and public IP address to connect to the host
+plugin: community.general.scaleway
+hostnames:
+ - hostname
+regions:
+ - par1
+variables:
+ ansible_host: public_ip.address
+'''
+
+import json
+
+from ansible.errors import AnsibleError
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
+from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, parse_pagination_link
+from ansible.module_utils.urls import open_url
+from ansible.module_utils._text import to_native
+
+import ansible.module_utils.six.moves.urllib.parse as urllib_parse
+
+
+def _fetch_information(token, url):
+ results = []
+ paginated_url = url
+ while True:
+ try:
+ response = open_url(paginated_url,
+ headers={'X-Auth-Token': token,
+ 'Content-type': 'application/json'})
+ except Exception as e:
+ raise AnsibleError("Error while fetching %s: %s" % (url, to_native(e)))
+ try:
+ raw_json = json.loads(response.read())
+ except ValueError:
+ raise AnsibleError("Incorrect JSON payload")
+
+ try:
+ results.extend(raw_json["servers"])
+ except KeyError:
+ raise AnsibleError("Incorrect format from the Scaleway API response")
+
+ link = response.headers['Link']
+ if not link:
+ return results
+ relations = parse_pagination_link(link)
+ if 'next' not in relations:
+ return results
+ paginated_url = urllib_parse.urljoin(paginated_url, relations['next'])
+
+
+def _build_server_url(api_endpoint):
+ return "/".join([api_endpoint, "servers"])
+
+
+def extract_public_ipv4(server_info):
+ try:
+ return server_info["public_ip"]["address"]
+ except (KeyError, TypeError):
+ return None
+
+
+def extract_private_ipv4(server_info):
+ try:
+ return server_info["private_ip"]
+ except (KeyError, TypeError):
+ return None
+
+
+def extract_hostname(server_info):
+ try:
+ return server_info["hostname"]
+ except (KeyError, TypeError):
+ return None
+
+
+def extract_server_id(server_info):
+ try:
+ return server_info["id"]
+ except (KeyError, TypeError):
+ return None
+
+
+def extract_public_ipv6(server_info):
+ try:
+ return server_info["ipv6"]["address"]
+ except (KeyError, TypeError):
+ return None
+
+
+def extract_tags(server_info):
+ try:
+ return server_info["tags"]
+ except (KeyError, TypeError):
+ return None
+
+
+def extract_zone(server_info):
+ try:
+ return server_info["location"]["zone_id"]
+ except (KeyError, TypeError):
+ return None
+
+
+extractors = {
+ "public_ipv4": extract_public_ipv4,
+ "private_ipv4": extract_private_ipv4,
+ "public_ipv6": extract_public_ipv6,
+ "hostname": extract_hostname,
+ "id": extract_server_id
+}
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable):
+ NAME = 'community.general.scaleway'
+
+ def _fill_host_variables(self, host, server_info):
+ targeted_attributes = (
+ "arch",
+ "commercial_type",
+ "id",
+ "organization",
+ "state",
+ "hostname",
+ )
+ for attribute in targeted_attributes:
+ self.inventory.set_variable(host, attribute, server_info[attribute])
+
+ self.inventory.set_variable(host, "tags", server_info["tags"])
+
+ if extract_public_ipv6(server_info=server_info):
+ self.inventory.set_variable(host, "public_ipv6", extract_public_ipv6(server_info=server_info))
+
+ if extract_public_ipv4(server_info=server_info):
+ self.inventory.set_variable(host, "public_ipv4", extract_public_ipv4(server_info=server_info))
+
+ if extract_private_ipv4(server_info=server_info):
+ self.inventory.set_variable(host, "private_ipv4", extract_private_ipv4(server_info=server_info))
+
+ def _get_zones(self, config_zones):
+ return set(SCALEWAY_LOCATION.keys()).intersection(config_zones)
+
+ def match_groups(self, server_info, tags):
+ server_zone = extract_zone(server_info=server_info)
+ server_tags = extract_tags(server_info=server_info)
+
+ # If a server does not have a zone, it means it is archived
+ if server_zone is None:
+ return set()
+
+ # If no filtering is defined, all tags are valid groups
+ if tags is None:
+ return set(server_tags).union((server_zone,))
+
+ matching_tags = set(server_tags).intersection(tags)
+
+ if not matching_tags:
+ return set()
+ else:
+ return matching_tags.union((server_zone,))
+
+ def _filter_host(self, host_infos, hostname_preferences):
+
+ for pref in hostname_preferences:
+ if extractors[pref](host_infos):
+ return extractors[pref](host_infos)
+
+ return None
+
+ def do_zone_inventory(self, zone, token, tags, hostname_preferences):
+ self.inventory.add_group(zone)
+ zone_info = SCALEWAY_LOCATION[zone]
+
+ url = _build_server_url(zone_info["api_endpoint"])
+ raw_zone_hosts_infos = _fetch_information(url=url, token=token)
+
+ for host_infos in raw_zone_hosts_infos:
+
+ hostname = self._filter_host(host_infos=host_infos,
+ hostname_preferences=hostname_preferences)
+
+ # No suitable hostname were found in the attributes and the host won't be in the inventory
+ if not hostname:
+ continue
+
+ groups = self.match_groups(host_infos, tags)
+
+ for group in groups:
+ self.inventory.add_group(group=group)
+ self.inventory.add_host(group=group, host=hostname)
+ self._fill_host_variables(host=hostname, server_info=host_infos)
+
+ # Composed variables
+ self._set_composite_vars(self.get_option('variables'), host_infos, hostname, strict=False)
+
+ def parse(self, inventory, loader, path, cache=True):
+ super(InventoryModule, self).parse(inventory, loader, path)
+ self._read_config_data(path=path)
+
+ config_zones = self.get_option("regions")
+ tags = self.get_option("tags")
+ token = self.get_option("oauth_token")
+ hostname_preference = self.get_option("hostnames")
+
+ for zone in self._get_zones(config_zones):
+ self.do_zone_inventory(zone=zone, token=token, tags=tags, hostname_preferences=hostname_preference)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/inventory/stackpath_compute.py b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/stackpath_compute.py
new file mode 100644
index 00000000..21e1b085
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/stackpath_compute.py
@@ -0,0 +1,281 @@
+# Copyright (c) 2020 Shay Rybak <shay.rybak@stackpath.com>
+# Copyright (c) 2020 Ansible Project
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: stackpath_compute
+ plugin_type: inventory
+ short_description: StackPath Edge Computing inventory source
+ version_added: 1.2.0
+ extends_documentation_fragment:
+ - inventory_cache
+ - constructed
+ description:
+ - Get inventory hosts from StackPath Edge Computing.
+ - Uses a YAML configuration file that ends with stackpath_compute.(yml|yaml).
+ options:
+ plugin:
+ description:
+ - A token that ensures this is a source file for the plugin.
+ required: true
+ choices: ['community.general.stackpath_compute']
+ client_id:
+ description:
+ - An OAuth client ID generated from the API Management section of the StackPath customer portal
+ U(https://control.stackpath.net/api-management).
+ required: true
+ type: str
+ client_secret:
+ description:
+ - An OAuth client secret generated from the API Management section of the StackPath customer portal
+ U(https://control.stackpath.net/api-management).
+ required: true
+ type: str
+ stack_slugs:
+ description:
+ - A list of Stack slugs to query instances in. If no entry then get instances in all stacks on the account.
+ type: list
+ elements: str
+ use_internal_ip:
+ description:
+ - Whether or not to use internal IP addresses, If false, uses external IP addresses, internal otherwise.
+ - If an instance doesn't have an external IP it will not be returned when this option is set to false.
+ type: bool
+'''
+
+EXAMPLES = '''
+# Example using credentials to fetch all workload instances in a stack.
+---
+plugin: community.general.stackpath_compute
+client_id: my_client_id
+client_secret: my_client_secret
+stack_slugs:
+- my_first_stack_slug
+- my_other_stack_slug
+use_internal_ip: false
+'''
+
+import traceback
+import json
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.urls import open_url
+from ansible.plugins.inventory import (
+ BaseInventoryPlugin,
+ Constructable,
+ Cacheable
+)
+from ansible.utils.display import Display
+
+
+display = Display()
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
+
+ NAME = 'community.general.stackpath_compute'
+
+ def __init__(self):
+ super(InventoryModule, self).__init__()
+
+ # credentials
+ self.client_id = None
+ self.client_secret = None
+ self.stack_slug = None
+ self.api_host = "https://gateway.stackpath.com"
+ self.group_keys = [
+ "stackSlug",
+ "workloadId",
+ "cityCode",
+ "countryCode",
+ "continent",
+ "target",
+ "name",
+ "workloadSlug"
+ ]
+
+ def _validate_config(self, config):
+ if config['plugin'] != 'community.general.stackpath_compute':
+ raise AnsibleError("plugin doesn't match this plugin")
+ try:
+ client_id = config['client_id']
+ if client_id != 32:
+ raise AnsibleError("client_id must be 32 characters long")
+ except KeyError:
+ raise AnsibleError("config missing client_id, a required option")
+ try:
+ client_secret = config['client_secret']
+ if client_secret != 64:
+ raise AnsibleError("client_secret must be 64 characters long")
+ except KeyError:
+ raise AnsibleError("config missing client_id, a required option")
+ return True
+
+ def _set_credentials(self):
+ '''
+ :param config_data: contents of the inventory config file
+ '''
+ self.client_id = self.get_option('client_id')
+ self.client_secret = self.get_option('client_secret')
+
+ def _authenticate(self):
+ payload = json.dumps(
+ {
+ "client_id": self.client_id,
+ "client_secret": self.client_secret,
+ "grant_type": "client_credentials",
+ }
+ )
+ headers = {
+ "Content-Type": "application/json",
+ }
+ resp = open_url(
+ self.api_host + '/identity/v1/oauth2/token',
+ headers=headers,
+ data=payload,
+ method="POST"
+ )
+ status_code = resp.code
+ if status_code == 200:
+ body = resp.read()
+ self.auth_token = json.loads(body)["access_token"]
+
+ def _query(self):
+ results = []
+ workloads = []
+ self._authenticate()
+ for stack_slug in self.stack_slugs:
+ try:
+ workloads = self._stackpath_query_get_list(self.api_host + '/workload/v1/stacks/' + stack_slug + '/workloads')
+ except Exception:
+ raise AnsibleError("Failed to get workloads from the StackPath API: %s" % traceback.format_exc())
+ for workload in workloads:
+ try:
+ workload_instances = self._stackpath_query_get_list(
+ self.api_host + '/workload/v1/stacks/' + stack_slug + '/workloads/' + workload["id"] + '/instances'
+ )
+ except Exception:
+ raise AnsibleError("Failed to get workload instances from the StackPath API: %s" % traceback.format_exc())
+ for instance in workload_instances:
+ if instance["phase"] == "RUNNING":
+ instance["stackSlug"] = stack_slug
+ instance["workloadId"] = workload["id"]
+ instance["workloadSlug"] = workload["slug"]
+ instance["cityCode"] = instance["location"]["cityCode"]
+ instance["countryCode"] = instance["location"]["countryCode"]
+ instance["continent"] = instance["location"]["continent"]
+ instance["target"] = instance["metadata"]["labels"]["workload.platform.stackpath.net/target-name"]
+ try:
+ if instance[self.hostname_key]:
+ results.append(instance)
+ except KeyError:
+ pass
+ return results
+
+ def _populate(self, instances):
+ for instance in instances:
+ for group_key in self.group_keys:
+ group = group_key + "_" + instance[group_key]
+ group = group.lower().replace(" ", "_").replace("-", "_")
+ self.inventory.add_group(group)
+ self.inventory.add_host(instance[self.hostname_key],
+ group=group)
+
+ def _stackpath_query_get_list(self, url):
+ self._authenticate()
+ headers = {
+ "Content-Type": "application/json",
+ "Authorization": "Bearer " + self.auth_token,
+ }
+ next_page = True
+ result = []
+ cursor = '-1'
+ while next_page:
+ resp = open_url(
+ url + '?page_request.first=10&page_request.after=%s' % cursor,
+ headers=headers,
+ method="GET"
+ )
+ status_code = resp.code
+ if status_code == 200:
+ body = resp.read()
+ body_json = json.loads(body)
+ result.extend(body_json["results"])
+ next_page = body_json["pageInfo"]["hasNextPage"]
+ if next_page:
+ cursor = body_json["pageInfo"]["endCursor"]
+ return result
+
+ def _get_stack_slugs(self, stacks):
+ self.stack_slugs = [stack["slug"] for stack in stacks]
+
+ def verify_file(self, path):
+ '''
+ :param loader: an ansible.parsing.dataloader.DataLoader object
+ :param path: the path to the inventory config file
+ :return the contents of the config file
+ '''
+ if super(InventoryModule, self).verify_file(path):
+ if path.endswith(('stackpath_compute.yml', 'stackpath_compute.yaml')):
+ return True
+ display.debug(
+ "stackpath_compute inventory filename must end with \
+ 'stackpath_compute.yml' or 'stackpath_compute.yaml'"
+ )
+ return False
+
+ def parse(self, inventory, loader, path, cache=True):
+
+ super(InventoryModule, self).parse(inventory, loader, path)
+
+ config = self._read_config_data(path)
+ self._validate_config(config)
+ self._set_credentials()
+
+ # get user specifications
+ self.use_internal_ip = self.get_option('use_internal_ip')
+ if self.use_internal_ip:
+ self.hostname_key = "ipAddress"
+ else:
+ self.hostname_key = "externalIpAddress"
+
+ self.stack_slugs = self.get_option('stack_slugs')
+ if not self.stack_slugs:
+ try:
+ stacks = self._stackpath_query_get_list(self.api_host + '/stack/v1/stacks')
+ self._get_stack_slugs(stacks)
+ except Exception:
+ raise AnsibleError("Failed to get stack IDs from the Stackpath API: %s" % traceback.format_exc())
+
+ cache_key = self.get_cache_key(path)
+ # false when refresh_cache or --flush-cache is used
+ if cache:
+ # get the user-specified directive
+ cache = self.get_option('cache')
+
+ # Generate inventory
+ cache_needs_update = False
+ if cache:
+ try:
+ results = self._cache[cache_key]
+ except KeyError:
+ # if cache expires or cache file doesn't exist
+ cache_needs_update = True
+
+ if not cache or cache_needs_update:
+ results = self._query()
+
+ self._populate(results)
+
+ # If the cache has expired/doesn't exist or
+ # if refresh_inventory/flush cache is used
+ # when the user is using caching, update the cached inventory
+ try:
+ if cache_needs_update or (not cache and self.get_option('cache')):
+ self._cache[cache_key] = results
+ except Exception:
+ raise AnsibleError("Failed to populate data: %s" % traceback.format_exc())
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/inventory/virtualbox.py b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/virtualbox.py
new file mode 100644
index 00000000..391a83c9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/inventory/virtualbox.py
@@ -0,0 +1,283 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ name: virtualbox
+ plugin_type: inventory
+ short_description: virtualbox inventory source
+ description:
+ - Get inventory hosts from the local virtualbox installation.
+ - Uses a YAML configuration file that ends with virtualbox.(yml|yaml) or vbox.(yml|yaml).
+ - The inventory_hostname is always the 'Name' of the virtualbox instance.
+ extends_documentation_fragment:
+ - constructed
+ - inventory_cache
+ options:
+ plugin:
+ description: token that ensures this is a source file for the 'virtualbox' plugin
+ required: True
+ choices: ['virtualbox', 'community.general.virtualbox']
+ running_only:
+ description: toggles showing all vms vs only those currently running
+ type: boolean
+ default: False
+ settings_password_file:
+ description: provide a file containing the settings password (equivalent to --settingspwfile)
+ network_info_path:
+ description: property path to query for network information (ansible_host)
+ default: "/VirtualBox/GuestInfo/Net/0/V4/IP"
+ query:
+ description: create vars from virtualbox properties
+ type: dictionary
+ default: {}
+'''
+
+EXAMPLES = '''
+# file must be named vbox.yaml or vbox.yml
+simple_config_file:
+ plugin: community.general.virtualbox
+ settings_password_file: /etc/virtulbox/secrets
+ query:
+ logged_in_users: /VirtualBox/GuestInfo/OS/LoggedInUsersList
+ compose:
+ ansible_connection: ('indows' in vbox_Guest_OS)|ternary('winrm', 'ssh')
+
+# add hosts (all match with minishift vm) to the group container if any of the vms are in ansible_inventory'
+plugin: community.general.virtualbox
+groups:
+ container: "'minis' in (inventory_hostname)"
+'''
+
+import os
+
+from subprocess import Popen, PIPE
+
+from ansible.errors import AnsibleParserError
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.common._collections_compat import MutableMapping
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
+from ansible.module_utils.common.process import get_bin_path
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
+ ''' Host inventory parser for ansible using local virtualbox. '''
+
+ NAME = 'community.general.virtualbox'
+ VBOX = "VBoxManage"
+
+ def __init__(self):
+ self._vbox_path = None
+ super(InventoryModule, self).__init__()
+
+ def _query_vbox_data(self, host, property_path):
+ ret = None
+ try:
+ cmd = [self._vbox_path, b'guestproperty', b'get',
+ to_bytes(host, errors='surrogate_or_strict'),
+ to_bytes(property_path, errors='surrogate_or_strict')]
+ x = Popen(cmd, stdout=PIPE)
+ ipinfo = to_text(x.stdout.read(), errors='surrogate_or_strict')
+ if 'Value' in ipinfo:
+ a, ip = ipinfo.split(':', 1)
+ ret = ip.strip()
+ except Exception:
+ pass
+ return ret
+
+ def _set_variables(self, hostvars):
+
+ # set vars in inventory from hostvars
+ for host in hostvars:
+
+ query = self.get_option('query')
+ # create vars from vbox properties
+ if query and isinstance(query, MutableMapping):
+ for varname in query:
+ hostvars[host][varname] = self._query_vbox_data(host, query[varname])
+
+ strict = self.get_option('strict')
+
+ # create composite vars
+ self._set_composite_vars(self.get_option('compose'), hostvars[host], host, strict=strict)
+
+ # actually update inventory
+ for key in hostvars[host]:
+ self.inventory.set_variable(host, key, hostvars[host][key])
+
+ # constructed groups based on conditionals
+ self._add_host_to_composed_groups(self.get_option('groups'), hostvars[host], host, strict=strict)
+
+ # constructed keyed_groups
+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'), hostvars[host], host, strict=strict)
+
+ def _populate_from_cache(self, source_data):
+ hostvars = source_data.pop('_meta', {}).get('hostvars', {})
+ for group in source_data:
+ if group == 'all':
+ continue
+ else:
+ group = self.inventory.add_group(group)
+ hosts = source_data[group].get('hosts', [])
+ for host in hosts:
+ self._populate_host_vars([host], hostvars.get(host, {}), group)
+ self.inventory.add_child('all', group)
+ if not source_data:
+ for host in hostvars:
+ self.inventory.add_host(host)
+ self._populate_host_vars([host], hostvars.get(host, {}))
+
+ def _populate_from_source(self, source_data, using_current_cache=False):
+ if using_current_cache:
+ self._populate_from_cache(source_data)
+ return source_data
+
+ cacheable_results = {'_meta': {'hostvars': {}}}
+
+ hostvars = {}
+ prevkey = pref_k = ''
+ current_host = None
+
+ # needed to possibly set ansible_host
+ netinfo = self.get_option('network_info_path')
+
+ for line in source_data:
+ line = to_text(line)
+ if ':' not in line:
+ continue
+ try:
+ k, v = line.split(':', 1)
+ except Exception:
+ # skip non splitable
+ continue
+
+ if k.strip() == '':
+ # skip empty
+ continue
+
+ v = v.strip()
+ # found host
+ if k.startswith('Name') and ',' not in v: # some setting strings appear in Name
+ current_host = v
+ if current_host not in hostvars:
+ hostvars[current_host] = {}
+ self.inventory.add_host(current_host)
+
+ # try to get network info
+ netdata = self._query_vbox_data(current_host, netinfo)
+ if netdata:
+ self.inventory.set_variable(current_host, 'ansible_host', netdata)
+
+ # found groups
+ elif k == 'Groups':
+ for group in v.split('/'):
+ if group:
+ group = self.inventory.add_group(group)
+ self.inventory.add_child(group, current_host)
+ if group not in cacheable_results:
+ cacheable_results[group] = {'hosts': []}
+ cacheable_results[group]['hosts'].append(current_host)
+ continue
+
+ else:
+ # found vars, accumulate in hostvars for clean inventory set
+ pref_k = 'vbox_' + k.strip().replace(' ', '_')
+ if k.startswith(' '):
+ if prevkey not in hostvars[current_host]:
+ hostvars[current_host][prevkey] = {}
+ hostvars[current_host][prevkey][pref_k] = v
+ else:
+ if v != '':
+ hostvars[current_host][pref_k] = v
+ if self._ungrouped_host(current_host, cacheable_results):
+ if 'ungrouped' not in cacheable_results:
+ cacheable_results['ungrouped'] = {'hosts': []}
+ cacheable_results['ungrouped']['hosts'].append(current_host)
+
+ prevkey = pref_k
+
+ self._set_variables(hostvars)
+ for host in hostvars:
+ h = self.inventory.get_host(host)
+ cacheable_results['_meta']['hostvars'][h.name] = h.vars
+
+ return cacheable_results
+
+ def _ungrouped_host(self, host, inventory):
+ def find_host(host, inventory):
+ for k, v in inventory.items():
+ if k == '_meta':
+ continue
+ if isinstance(v, dict):
+ yield self._ungrouped_host(host, v)
+ elif isinstance(v, list):
+ yield host not in v
+ yield True
+
+ return all([found_host for found_host in find_host(host, inventory)])
+
+ def verify_file(self, path):
+
+ valid = False
+ if super(InventoryModule, self).verify_file(path):
+ if path.endswith(('virtualbox.yaml', 'virtualbox.yml', 'vbox.yaml', 'vbox.yml')):
+ valid = True
+ return valid
+
+ def parse(self, inventory, loader, path, cache=True):
+
+ try:
+ self._vbox_path = get_bin_path(self.VBOX)
+ except ValueError as e:
+ raise AnsibleParserError(e)
+
+ super(InventoryModule, self).parse(inventory, loader, path)
+
+ cache_key = self.get_cache_key(path)
+
+ config_data = self._read_config_data(path)
+
+ # set _options from config data
+ self._consume_options(config_data)
+
+ source_data = None
+ if cache:
+ cache = self.get_option('cache')
+
+ update_cache = False
+ if cache:
+ try:
+ source_data = self._cache[cache_key]
+ except KeyError:
+ update_cache = True
+
+ if not source_data:
+ b_pwfile = to_bytes(self.get_option('settings_password_file'), errors='surrogate_or_strict', nonstring='passthru')
+ running = self.get_option('running_only')
+
+ # start getting data
+ cmd = [self._vbox_path, b'list', b'-l']
+ if running:
+ cmd.append(b'runningvms')
+ else:
+ cmd.append(b'vms')
+
+ if b_pwfile and os.path.exists(b_pwfile):
+ cmd.append(b'--settingspwfile')
+ cmd.append(b_pwfile)
+
+ try:
+ p = Popen(cmd, stdout=PIPE)
+ except Exception as e:
+ raise AnsibleParserError(to_native(e))
+
+ source_data = p.stdout.read().splitlines()
+
+ using_current_cache = cache and not update_cache
+ cacheable_results = self._populate_from_source(source_data, using_current_cache)
+
+ if update_cache:
+ self._cache[cache_key] = cacheable_results
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/cartesian.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/cartesian.py
new file mode 100644
index 00000000..46f192b9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/cartesian.py
@@ -0,0 +1,77 @@
+# (c) 2013, Bradley Young <young.bradley@gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ lookup: cartesian
+ short_description: returns the cartesian product of lists
+ description:
+ - Takes the input lists and returns a list that represents the product of the input lists.
+ - It is clearer with an example, it turns [1, 2, 3], [a, b] into [1, a], [1, b], [2, a], [2, b], [3, a], [3, b].
+ You can see the exact syntax in the examples section.
+ options:
+ _raw:
+ description:
+ - a set of lists
+ required: True
+'''
+
+EXAMPLES = """
+- name: Example of the change in the description
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.cartesian', [1,2,3], [a, b])}}"
+
+- name: loops over the cartesian product of the supplied lists
+ ansible.builtin.debug:
+ msg: "{{item}}"
+ with_community.general.cartesian:
+ - "{{list1}}"
+ - "{{list2}}"
+ - [1,2,3,4,5,6]
+"""
+
+RETURN = """
+ _list:
+ description:
+ - list of lists composed of elements of the input lists
+ type: list
+ elements: list
+"""
+
+from itertools import product
+
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+from ansible.utils.listify import listify_lookup_plugin_terms
+
+
+class LookupModule(LookupBase):
+ """
+ Create the cartesian product of lists
+ """
+
+ def _lookup_variables(self, terms):
+ """
+ Turn this:
+ terms == ["1,2,3", "a,b"]
+ into this:
+ terms == [[1,2,3], [a, b]]
+ """
+ results = []
+ for x in terms:
+ intermediate = listify_lookup_plugin_terms(x, templar=self._templar, loader=self._loader)
+ results.append(intermediate)
+ return results
+
+ def run(self, terms, variables=None, **kwargs):
+
+ terms = self._lookup_variables(terms)
+
+ my_list = terms[:]
+ if len(my_list) == 0:
+ raise AnsibleError("with_cartesian requires at least one element in each list")
+
+ return [self._flatten(x) for x in product(*my_list)]
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/chef_databag.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/chef_databag.py
new file mode 100644
index 00000000..c3263e88
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/chef_databag.py
@@ -0,0 +1,104 @@
+# (c) 2016, Josh Bradley <jbradley(at)digitalocean.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ lookup: chef_databag
+ short_description: fetches data from a Chef Databag
+ description:
+ - "This is a lookup plugin to provide access to chef data bags using the pychef package.
+ It interfaces with the chef server api using the same methods to find a knife or chef-client config file to load parameters from,
+ starting from either the given base path or the current working directory.
+ The lookup order mirrors the one from Chef, all folders in the base path are walked back looking for the following configuration
+ file in order : .chef/knife.rb, ~/.chef/knife.rb, /etc/chef/client.rb"
+ requirements:
+ - "pychef (python library https://pychef.readthedocs.io `pip install pychef`)"
+ options:
+ name:
+ description:
+ - Name of the databag
+ required: True
+ item:
+ description:
+ - Item to fetch
+ required: True
+'''
+
+EXAMPLES = """
+ - ansible.builtin.debug:
+ msg: "{{ lookup('community.general.chef_databag', 'name=data_bag_name item=data_bag_item') }}"
+"""
+
+RETURN = """
+ _raw:
+ description:
+ - The value from the databag.
+ type: list
+ elements: dict
+"""
+
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+from ansible.parsing.splitter import parse_kv
+
+try:
+ import chef
+ HAS_CHEF = True
+except ImportError as missing_module:
+ HAS_CHEF = False
+
+
+class LookupModule(LookupBase):
+ """
+ Chef data bag lookup module
+ """
+ def __init__(self, loader=None, templar=None, **kwargs):
+
+ super(LookupModule, self).__init__(loader, templar, **kwargs)
+
+ # setup vars for data bag name and data bag item
+ self.name = None
+ self.item = None
+
+ def parse_kv_args(self, args):
+ """
+ parse key-value style arguments
+ """
+
+ for arg in ["name", "item"]:
+ try:
+ arg_raw = args.pop(arg, None)
+ if arg_raw is None:
+ continue
+ parsed = str(arg_raw)
+ setattr(self, arg, parsed)
+ except ValueError:
+ raise AnsibleError(
+ "can't parse arg {0}={1} as string".format(arg, arg_raw)
+ )
+ if args:
+ raise AnsibleError(
+ "unrecognized arguments to with_sequence: %r" % args.keys()
+ )
+
+ def run(self, terms, variables=None, **kwargs):
+ # Ensure pychef has been loaded
+ if not HAS_CHEF:
+ raise AnsibleError('PyChef needed for lookup plugin, try `pip install pychef`')
+
+ for term in terms:
+ self.parse_kv_args(parse_kv(term))
+
+ api_object = chef.autoconfigure()
+
+ if not isinstance(api_object, chef.api.ChefAPI):
+ raise AnsibleError('Unable to connect to Chef Server API.')
+
+ data_bag_object = chef.DataBag(self.name)
+
+ data_bag_item = data_bag_object[self.item]
+
+ return [dict(data_bag_item)]
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/consul_kv.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/consul_kv.py
new file mode 100644
index 00000000..91c50595
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/consul_kv.py
@@ -0,0 +1,191 @@
+# (c) 2015, Steve Gargan <steve.gargan@gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ lookup: consul_kv
+ short_description: Fetch metadata from a Consul key value store.
+ description:
+ - Lookup metadata for a playbook from the key value store in a Consul cluster.
+ Values can be easily set in the kv store with simple rest commands
+ - C(curl -X PUT -d 'some-value' http://localhost:8500/v1/kv/ansible/somedata)
+ requirements:
+ - 'python-consul python library U(https://python-consul.readthedocs.io/en/latest/#installation)'
+ options:
+ _raw:
+ description: List of key(s) to retrieve.
+ type: list
+ recurse:
+ type: boolean
+ description: If true, will retrieve all the values that have the given key as prefix.
+ default: False
+ index:
+ description:
+ - If the key has a value with the specified index then this is returned allowing access to historical values.
+ datacenter:
+ description:
+ - Retrieve the key from a consul datacenter other than the default for the consul host.
+ token:
+ description: The acl token to allow access to restricted values.
+ host:
+ default: localhost
+ description:
+ - The target to connect to, must be a resolvable address.
+ Will be determined from C(ANSIBLE_CONSUL_URL) if that is set.
+ - "C(ANSIBLE_CONSUL_URL) should look like this: C(https://my.consul.server:8500)"
+ env:
+ - name: ANSIBLE_CONSUL_URL
+ ini:
+ - section: lookup_consul
+ key: host
+ port:
+ description:
+ - The port of the target host to connect to.
+ - If you use C(ANSIBLE_CONSUL_URL) this value will be used from there.
+ default: 8500
+ scheme:
+ default: http
+ description:
+ - Whether to use http or https.
+ - If you use C(ANSIBLE_CONSUL_URL) this value will be used from there.
+ validate_certs:
+ default: True
+ description: Whether to verify the ssl connection or not.
+ env:
+ - name: ANSIBLE_CONSUL_VALIDATE_CERTS
+ ini:
+ - section: lookup_consul
+ key: validate_certs
+ client_cert:
+ description: The client cert to verify the ssl connection.
+ env:
+ - name: ANSIBLE_CONSUL_CLIENT_CERT
+ ini:
+ - section: lookup_consul
+ key: client_cert
+ url:
+ description: "The target to connect to, should look like this: C(https://my.consul.server:8500)."
+ type: str
+ version_added: 1.0.0
+ env:
+ - name: ANSIBLE_CONSUL_URL
+ ini:
+ - section: lookup_consul
+ key: url
+'''
+
+EXAMPLES = """
+ - ansible.builtin.debug:
+ msg: 'key contains {{item}}'
+ with_community.general.consul_kv:
+ - 'key/to/retrieve'
+
+ - name: Parameters can be provided after the key be more specific about what to retrieve
+ ansible.builtin.debug:
+ msg: 'key contains {{item}}'
+ with_community.general.consul_kv:
+ - 'key/to recurse=true token=E6C060A9-26FB-407A-B83E-12DDAFCB4D98'
+
+ - name: retrieving a KV from a remote cluster on non default port
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.consul_kv', 'my/key', host='10.10.10.10', port='2000') }}"
+"""
+
+RETURN = """
+ _raw:
+ description:
+ - Value(s) stored in consul.
+ type: dict
+"""
+
+import os
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+from ansible.errors import AnsibleError, AnsibleAssertionError
+from ansible.plugins.lookup import LookupBase
+from ansible.module_utils._text import to_text
+
+try:
+ import consul
+
+ HAS_CONSUL = True
+except ImportError as e:
+ HAS_CONSUL = False
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+
+ if not HAS_CONSUL:
+ raise AnsibleError(
+ 'python-consul is required for consul_kv lookup. see http://python-consul.readthedocs.org/en/latest/#installation')
+
+ # get options
+ self.set_options(direct=kwargs)
+
+ scheme = self.get_option('scheme')
+ host = self.get_option('host')
+ port = self.get_option('port')
+ url = self.get_option('url')
+ if url is not None:
+ u = urlparse(url)
+ if u.scheme:
+ scheme = u.scheme
+ host = u.hostname
+ if u.port is not None:
+ port = u.port
+
+ validate_certs = self.get_option('validate_certs')
+ client_cert = self.get_option('client_cert')
+
+ values = []
+ try:
+ for term in terms:
+ params = self.parse_params(term)
+ consul_api = consul.Consul(host=host, port=port, scheme=scheme, verify=validate_certs, cert=client_cert)
+
+ results = consul_api.kv.get(params['key'],
+ token=params['token'],
+ index=params['index'],
+ recurse=params['recurse'],
+ dc=params['datacenter'])
+ if results[1]:
+ # responds with a single or list of result maps
+ if isinstance(results[1], list):
+ for r in results[1]:
+ values.append(to_text(r['Value']))
+ else:
+ values.append(to_text(results[1]['Value']))
+ except Exception as e:
+ raise AnsibleError(
+ "Error locating '%s' in kv store. Error was %s" % (term, e))
+
+ return values
+
+ def parse_params(self, term):
+ params = term.split(' ')
+
+ paramvals = {
+ 'key': params[0],
+ 'token': None,
+ 'recurse': False,
+ 'index': None,
+ 'datacenter': None
+ }
+
+ # parameters specified?
+ try:
+ for param in params[1:]:
+ if param and len(param) > 0:
+ name, value = param.split('=')
+ if name not in paramvals:
+ raise AnsibleAssertionError("%s not a valid consul lookup parameter" % name)
+ paramvals[name] = value
+ except (ValueError, AssertionError) as e:
+ raise AnsibleError(e)
+
+ return paramvals
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/credstash.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/credstash.py
new file mode 100644
index 00000000..6ab4d3bc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/credstash.py
@@ -0,0 +1,125 @@
+# (c) 2015, Ensighten <infra@ensighten.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ lookup: credstash
+ short_description: retrieve secrets from Credstash on AWS
+ requirements:
+ - credstash (python library)
+ description:
+ - "Credstash is a small utility for managing secrets using AWS's KMS and DynamoDB: https://github.com/fugue/credstash"
+ options:
+ _terms:
+ description: term or list of terms to lookup in the credit store
+ type: list
+ required: True
+ table:
+ description: name of the credstash table to query
+ default: 'credential-store'
+ required: True
+ version:
+ description: Credstash version
+ region:
+ description: AWS region
+ profile_name:
+ description: AWS profile to use for authentication
+ env:
+ - name: AWS_PROFILE
+ aws_access_key_id:
+ description: AWS access key ID
+ env:
+ - name: AWS_ACCESS_KEY_ID
+ aws_secret_access_key:
+ description: AWS access key
+ env:
+ - name: AWS_SECRET_ACCESS_KEY
+ aws_session_token:
+ description: AWS session token
+ env:
+ - name: AWS_SESSION_TOKEN
+'''
+
+EXAMPLES = """
+- name: first use credstash to store your secrets
+ ansible.builtin.shell: credstash put my-github-password secure123
+
+- name: "Test credstash lookup plugin -- get my github password"
+ ansible.builtin.debug:
+ msg: "Credstash lookup! {{ lookup('community.general.credstash', 'my-github-password') }}"
+
+- name: "Test credstash lookup plugin -- get my other password from us-west-1"
+ ansible.builtin.debug:
+ msg: "Credstash lookup! {{ lookup('community.general.credstash', 'my-other-password', region='us-west-1') }}"
+
+- name: "Test credstash lookup plugin -- get the company's github password"
+ ansible.builtin.debug:
+ msg: "Credstash lookup! {{ lookup('community.general.credstash', 'company-github-password', table='company-passwords') }}"
+
+- name: Example play using the 'context' feature
+ hosts: localhost
+ vars:
+ context:
+ app: my_app
+ environment: production
+ tasks:
+
+ - name: "Test credstash lookup plugin -- get the password with a context passed as a variable"
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.credstash', 'some-password', context=context) }}"
+
+ - name: "Test credstash lookup plugin -- get the password with a context defined here"
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.credstash', 'some-password', context=dict(app='my_app', environment='production')) }}"
+"""
+
+RETURN = """
+ _raw:
+ description:
+ - Value(s) stored in Credstash.
+ type: str
+"""
+
+import os
+
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+
+CREDSTASH_INSTALLED = False
+
+try:
+ import credstash
+ CREDSTASH_INSTALLED = True
+except ImportError:
+ CREDSTASH_INSTALLED = False
+
+
+class LookupModule(LookupBase):
+ def run(self, terms, variables, **kwargs):
+
+ if not CREDSTASH_INSTALLED:
+ raise AnsibleError('The credstash lookup plugin requires credstash to be installed.')
+
+ ret = []
+ for term in terms:
+ try:
+ version = kwargs.pop('version', '')
+ region = kwargs.pop('region', None)
+ table = kwargs.pop('table', 'credential-store')
+ profile_name = kwargs.pop('profile_name', os.getenv('AWS_PROFILE', None))
+ aws_access_key_id = kwargs.pop('aws_access_key_id', os.getenv('AWS_ACCESS_KEY_ID', None))
+ aws_secret_access_key = kwargs.pop('aws_secret_access_key', os.getenv('AWS_SECRET_ACCESS_KEY', None))
+ aws_session_token = kwargs.pop('aws_session_token', os.getenv('AWS_SESSION_TOKEN', None))
+ kwargs_pass = {'profile_name': profile_name, 'aws_access_key_id': aws_access_key_id,
+ 'aws_secret_access_key': aws_secret_access_key, 'aws_session_token': aws_session_token}
+ val = credstash.getSecret(term, version, region, table, context=kwargs, **kwargs_pass)
+ except credstash.ItemNotFound:
+ raise AnsibleError('Key {0} not found'.format(term))
+ except Exception as e:
+ raise AnsibleError('Encountered exception while fetching {0}: {1}'.format(term, e))
+ ret.append(val)
+
+ return ret
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/cyberarkpassword.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/cyberarkpassword.py
new file mode 100644
index 00000000..449cb916
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/cyberarkpassword.py
@@ -0,0 +1,182 @@
+# (c) 2017, Edward Nunez <edward.nunez@cyberark.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ lookup: cyberarkpassword
+ short_description: get secrets from CyberArk AIM
+ requirements:
+ - CyberArk AIM tool installed
+ description:
+ - Get secrets from CyberArk AIM.
+ options :
+ _command:
+ description: Cyberark CLI utility.
+ env:
+ - name: AIM_CLIPASSWORDSDK_CMD
+ default: '/opt/CARKaim/sdk/clipasswordsdk'
+ appid:
+ description: Defines the unique ID of the application that is issuing the password request.
+ required: True
+ query:
+ description: Describes the filter criteria for the password retrieval.
+ required: True
+ output:
+ description:
+ - Specifies the desired output fields separated by commas.
+ - "They could be: Password, PassProps.<property>, PasswordChangeInProcess"
+ default: 'password'
+ _extra:
+ description: for extra_params values please check parameters for clipasswordsdk in CyberArk's "Credential Provider and ASCP Implementation Guide"
+ notes:
+ - For Ansible on Windows, please change the -parameters (-p, -d, and -o) to /parameters (/p, /d, and /o) and change the location of CLIPasswordSDK.exe.
+'''
+
+EXAMPLES = """
+ - name: passing options to the lookup
+ ansible.builtin.debug:
+ msg: '{{ lookup("community.general.cyberarkpassword", cyquery) }}'
+ vars:
+ cyquery:
+ appid: "app_ansible"
+ query: "safe=CyberArk_Passwords;folder=root;object=AdminPass"
+ output: "Password,PassProps.UserName,PassProps.Address,PasswordChangeInProcess"
+
+
+ - name: used in a loop
+ ansible.builtin.debug:
+ msg: "{{item}}"
+ with_community.general.cyberarkpassword:
+ appid: 'app_ansible'
+ query: 'safe=CyberArk_Passwords;folder=root;object=AdminPass'
+ output: 'Password,PassProps.UserName,PassProps.Address,PasswordChangeInProcess'
+"""
+
+RETURN = """
+ password:
+ description:
+ - The actual value stored
+ passprops:
+ description: properties assigned to the entry
+ type: dictionary
+ passwordchangeinprocess:
+ description: did the password change?
+"""
+
+import os
+import subprocess
+from subprocess import PIPE
+from subprocess import Popen
+
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+from ansible.parsing.splitter import parse_kv
+from ansible.module_utils._text import to_bytes, to_text, to_native
+from ansible.utils.display import Display
+
+display = Display()
+
+CLIPASSWORDSDK_CMD = os.getenv('AIM_CLIPASSWORDSDK_CMD', '/opt/CARKaim/sdk/clipasswordsdk')
+
+
+class CyberarkPassword:
+
+ def __init__(self, appid=None, query=None, output=None, **kwargs):
+
+ self.appid = appid
+ self.query = query
+ self.output = output
+
+ # Support for Generic parameters to be able to specify
+ # FailRequestOnPasswordChange, Queryformat, Reason, etc.
+ self.extra_parms = []
+ for key, value in kwargs.items():
+ self.extra_parms.append('-p')
+ self.extra_parms.append("%s=%s" % (key, value))
+
+ if self.appid is None:
+ raise AnsibleError("CyberArk Error: No Application ID specified")
+ if self.query is None:
+ raise AnsibleError("CyberArk Error: No Vault query specified")
+
+ if self.output is None:
+ # If no output is specified, return at least the password
+ self.output = "password"
+ else:
+ # To avoid reference issues/confusion to values, all
+ # output 'keys' will be in lowercase.
+ self.output = self.output.lower()
+
+ self.b_delimiter = b"@#@" # Known delimiter to split output results
+
+ def get(self):
+
+ result_dict = {}
+
+ try:
+ all_parms = [
+ CLIPASSWORDSDK_CMD,
+ 'GetPassword',
+ '-p', 'AppDescs.AppID=%s' % self.appid,
+ '-p', 'Query=%s' % self.query,
+ '-o', self.output,
+ '-d', self.b_delimiter]
+ all_parms.extend(self.extra_parms)
+
+ b_credential = b""
+ b_all_params = [to_bytes(v) for v in all_parms]
+ tmp_output, tmp_error = Popen(b_all_params, stdout=PIPE, stderr=PIPE, stdin=PIPE).communicate()
+
+ if tmp_output:
+ b_credential = to_bytes(tmp_output)
+
+ if tmp_error:
+ raise AnsibleError("ERROR => %s " % (tmp_error))
+
+ if b_credential and b_credential.endswith(b'\n'):
+ b_credential = b_credential[:-1]
+
+ output_names = self.output.split(",")
+ output_values = b_credential.split(self.b_delimiter)
+
+ for i in range(len(output_names)):
+ if output_names[i].startswith("passprops."):
+ if "passprops" not in result_dict:
+ result_dict["passprops"] = {}
+ output_prop_name = output_names[i][10:]
+ result_dict["passprops"][output_prop_name] = to_native(output_values[i])
+ else:
+ result_dict[output_names[i]] = to_native(output_values[i])
+
+ except subprocess.CalledProcessError as e:
+ raise AnsibleError(e.output)
+ except OSError as e:
+ raise AnsibleError("ERROR - AIM not installed or clipasswordsdk not in standard location. ERROR=(%s) => %s " % (to_text(e.errno), e.strerror))
+
+ return [result_dict]
+
+
+class LookupModule(LookupBase):
+
+ """
+ USAGE:
+
+ """
+
+ def run(self, terms, variables=None, **kwargs):
+
+ display.vvvv("%s" % terms)
+ if isinstance(terms, list):
+ return_values = []
+ for term in terms:
+ display.vvvv("Term: %s" % term)
+ cyberark_conn = CyberarkPassword(**term)
+ return_values.append(cyberark_conn.get())
+ return return_values
+ else:
+ cyberark_conn = CyberarkPassword(**terms)
+ result = cyberark_conn.get()
+ return result
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/dig.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/dig.py
new file mode 100644
index 00000000..6dc8fc6e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/dig.py
@@ -0,0 +1,356 @@
+# (c) 2015, Jan-Piet Mens <jpmens(at)gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ lookup: dig
+ author: Jan-Piet Mens (@jpmens) <jpmens(at)gmail.com>
+ short_description: query DNS using the dnspython library
+ requirements:
+ - dnspython (python library, http://www.dnspython.org/)
+ description:
+ - The dig lookup runs queries against DNS servers to retrieve DNS records for a specific name (FQDN - fully qualified domain name).
+ It is possible to lookup any DNS record in this manner.
+ - There is a couple of different syntaxes that can be used to specify what record should be retrieved, and for which name.
+ It is also possible to explicitly specify the DNS server(s) to use for lookups.
+ - In its simplest form, the dig lookup plugin can be used to retrieve an IPv4 address (DNS A record) associated with FQDN
+ - In addition to (default) A record, it is also possible to specify a different record type that should be queried.
+ This can be done by either passing-in additional parameter of format qtype=TYPE to the dig lookup, or by appending /TYPE to the FQDN being queried.
+ - If multiple values are associated with the requested record, the results will be returned as a comma-separated list.
+ In such cases you may want to pass option wantlist=True to the plugin, which will result in the record values being returned as a list
+ over which you can iterate later on.
+ - By default, the lookup will rely on system-wide configured DNS servers for performing the query.
+ It is also possible to explicitly specify DNS servers to query using the @DNS_SERVER_1,DNS_SERVER_2,...,DNS_SERVER_N notation.
+ This needs to be passed-in as an additional parameter to the lookup
+ options:
+ _terms:
+ description: domain(s) to query
+ qtype:
+ description: record type to query
+ default: 'A'
+ choices: [A, ALL, AAAA, CNAME, DNAME, DLV, DNSKEY, DS, HINFO, LOC, MX, NAPTR, NS, NSEC3PARAM, PTR, RP, RRSIG, SOA, SPF, SRV, SSHFP, TLSA, TXT]
+ flat:
+ description: If 0 each record is returned as a dictionary, otherwise a string
+ default: 1
+ notes:
+ - ALL is not a record per-se, merely the listed fields are available for any record results you retrieve in the form of a dictionary.
+ - While the 'dig' lookup plugin supports anything which dnspython supports out of the box, only a subset can be converted into a dictionary.
+ - If you need to obtain the AAAA record (IPv6 address), you must specify the record type explicitly.
+ Syntax for specifying the record type is shown in the examples below.
+ - The trailing dot in most of the examples listed is purely optional, but is specified for completeness/correctness sake.
+'''
+
+EXAMPLES = """
+- name: Simple A record (IPV4 address) lookup for example.com
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.dig', 'example.com.')}}"
+
+- name: "The TXT record for example.org."
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.dig', 'example.org.', 'qtype=TXT') }}"
+
+- name: "The TXT record for example.org, alternative syntax."
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.dig', 'example.org./TXT') }}"
+
+- name: use in a loop
+ ansible.builtin.debug:
+ msg: "MX record for gmail.com {{ item }}"
+ with_items: "{{ lookup('community.general.dig', 'gmail.com./MX', wantlist=True) }}"
+
+- ansible.builtin.debug:
+ msg: "Reverse DNS for 192.0.2.5 is {{ lookup('community.general.dig', '192.0.2.5/PTR') }}"
+- ansible.builtin.debug:
+ msg: "Reverse DNS for 192.0.2.5 is {{ lookup('community.general.dig', '5.2.0.192.in-addr.arpa./PTR') }}"
+- ansible.builtin.debug:
+ msg: "Reverse DNS for 192.0.2.5 is {{ lookup('community.general.dig', '5.2.0.192.in-addr.arpa.', 'qtype=PTR') }}"
+- ansible.builtin.debug:
+ msg: "Querying 198.51.100.23 for IPv4 address for example.com. produces {{ lookup('dig', 'example.com', '@198.51.100.23') }}"
+
+- ansible.builtin.debug:
+ msg: "XMPP service for gmail.com. is available at {{ item.target }} on port {{ item.port }}"
+ with_items: "{{ lookup('community.general.dig', '_xmpp-server._tcp.gmail.com./SRV', 'flat=0', wantlist=True) }}"
+"""
+
+RETURN = """
+ _list:
+ description:
+ - List of composed strings or dictionaries with key and value
+ If a dictionary, fields shows the keys returned depending on query type
+ type: list
+ elements: raw
+ contains:
+ ALL:
+ description:
+ - owner, ttl, type
+ A:
+ description:
+ - address
+ AAAA:
+ description:
+ - address
+ CNAME:
+ description:
+ - target
+ DNAME:
+ description:
+ - target
+ DLV:
+ description:
+ - algorithm, digest_type, key_tag, digest
+ DNSKEY:
+ description:
+ - flags, algorithm, protocol, key
+ DS:
+ description:
+ - algorithm, digest_type, key_tag, digest
+ HINFO:
+ description:
+ - cpu, os
+ LOC:
+ description:
+ - latitude, longitude, altitude, size, horizontal_precision, vertical_precision
+ MX:
+ description:
+ - preference, exchange
+ NAPTR:
+ description:
+ - order, preference, flags, service, regexp, replacement
+ NS:
+ description:
+ - target
+ NSEC3PARAM:
+ description:
+ - algorithm, flags, iterations, salt
+ PTR:
+ description:
+ - target
+ RP:
+ description:
+ - mbox, txt
+ SOA:
+ description:
+ - mname, rname, serial, refresh, retry, expire, minimum
+ SPF:
+ description:
+ - strings
+ SRV:
+ description:
+ - priority, weight, port, target
+ SSHFP:
+ description:
+ - algorithm, fp_type, fingerprint
+ TLSA:
+ description:
+ - usage, selector, mtype, cert
+ TXT:
+ description:
+ - strings
+"""
+
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+from ansible.module_utils._text import to_native
+import socket
+
+try:
+ import dns.exception
+ import dns.name
+ import dns.resolver
+ import dns.reversename
+ import dns.rdataclass
+ from dns.rdatatype import (A, AAAA, CNAME, DLV, DNAME, DNSKEY, DS, HINFO, LOC,
+ MX, NAPTR, NS, NSEC3PARAM, PTR, RP, SOA, SPF, SRV, SSHFP, TLSA, TXT)
+ HAVE_DNS = True
+except ImportError:
+ HAVE_DNS = False
+
+
+def make_rdata_dict(rdata):
+ ''' While the 'dig' lookup plugin supports anything which dnspython supports
+ out of the box, the following supported_types list describes which
+ DNS query types we can convert to a dict.
+
+ Note: adding support for RRSIG is hard work. :)
+ '''
+ supported_types = {
+ A: ['address'],
+ AAAA: ['address'],
+ CNAME: ['target'],
+ DNAME: ['target'],
+ DLV: ['algorithm', 'digest_type', 'key_tag', 'digest'],
+ DNSKEY: ['flags', 'algorithm', 'protocol', 'key'],
+ DS: ['algorithm', 'digest_type', 'key_tag', 'digest'],
+ HINFO: ['cpu', 'os'],
+ LOC: ['latitude', 'longitude', 'altitude', 'size', 'horizontal_precision', 'vertical_precision'],
+ MX: ['preference', 'exchange'],
+ NAPTR: ['order', 'preference', 'flags', 'service', 'regexp', 'replacement'],
+ NS: ['target'],
+ NSEC3PARAM: ['algorithm', 'flags', 'iterations', 'salt'],
+ PTR: ['target'],
+ RP: ['mbox', 'txt'],
+ # RRSIG: ['algorithm', 'labels', 'original_ttl', 'expiration', 'inception', 'signature'],
+ SOA: ['mname', 'rname', 'serial', 'refresh', 'retry', 'expire', 'minimum'],
+ SPF: ['strings'],
+ SRV: ['priority', 'weight', 'port', 'target'],
+ SSHFP: ['algorithm', 'fp_type', 'fingerprint'],
+ TLSA: ['usage', 'selector', 'mtype', 'cert'],
+ TXT: ['strings'],
+ }
+
+ rd = {}
+
+ if rdata.rdtype in supported_types:
+ fields = supported_types[rdata.rdtype]
+ for f in fields:
+ val = rdata.__getattribute__(f)
+
+ if isinstance(val, dns.name.Name):
+ val = dns.name.Name.to_text(val)
+
+ if rdata.rdtype == DLV and f == 'digest':
+ val = dns.rdata._hexify(rdata.digest).replace(' ', '')
+ if rdata.rdtype == DS and f == 'digest':
+ val = dns.rdata._hexify(rdata.digest).replace(' ', '')
+ if rdata.rdtype == DNSKEY and f == 'key':
+ val = dns.rdata._base64ify(rdata.key).replace(' ', '')
+ if rdata.rdtype == NSEC3PARAM and f == 'salt':
+ val = dns.rdata._hexify(rdata.salt).replace(' ', '')
+ if rdata.rdtype == SSHFP and f == 'fingerprint':
+ val = dns.rdata._hexify(rdata.fingerprint).replace(' ', '')
+ if rdata.rdtype == TLSA and f == 'cert':
+ val = dns.rdata._hexify(rdata.cert).replace(' ', '')
+
+ rd[f] = val
+
+ return rd
+
+
+# ==============================================================
+# dig: Lookup DNS records
+#
+# --------------------------------------------------------------
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+
+ '''
+ terms contains a string with things to `dig' for. We support the
+ following formats:
+ example.com # A record
+ example.com qtype=A # same
+ example.com/TXT # specific qtype
+ example.com qtype=txt # same
+ 192.0.2.23/PTR # reverse PTR
+ ^^ shortcut for 23.2.0.192.in-addr.arpa/PTR
+ example.net/AAAA @nameserver # query specified server
+ ^^^ can be comma-sep list of names/addresses
+
+ ... flat=0 # returns a dict; default is 1 == string
+ '''
+
+ if HAVE_DNS is False:
+ raise AnsibleError("The dig lookup requires the python 'dnspython' library and it is not installed")
+
+ # Create Resolver object so that we can set NS if necessary
+ myres = dns.resolver.Resolver(configure=True)
+ edns_size = 4096
+ myres.use_edns(0, ednsflags=dns.flags.DO, payload=edns_size)
+
+ domain = None
+ qtype = 'A'
+ flat = True
+ rdclass = dns.rdataclass.from_text('IN')
+
+ for t in terms:
+ if t.startswith('@'): # e.g. "@10.0.1.2,192.0.2.1" is ok.
+ nsset = t[1:].split(',')
+ for ns in nsset:
+ nameservers = []
+ # Check if we have a valid IP address. If so, use that, otherwise
+ # try to resolve name to address using system's resolver. If that
+ # fails we bail out.
+ try:
+ socket.inet_aton(ns)
+ nameservers.append(ns)
+ except Exception:
+ try:
+ nsaddr = dns.resolver.query(ns)[0].address
+ nameservers.append(nsaddr)
+ except Exception as e:
+ raise AnsibleError("dns lookup NS: %s" % to_native(e))
+ myres.nameservers = nameservers
+ continue
+ if '=' in t:
+ try:
+ opt, arg = t.split('=')
+ except Exception:
+ pass
+
+ if opt == 'qtype':
+ qtype = arg.upper()
+ elif opt == 'flat':
+ flat = int(arg)
+ elif opt == 'class':
+ try:
+ rdclass = dns.rdataclass.from_text(arg)
+ except Exception as e:
+ raise AnsibleError("dns lookup illegal CLASS: %s" % to_native(e))
+
+ continue
+
+ if '/' in t:
+ try:
+ domain, qtype = t.split('/')
+ except Exception:
+ domain = t
+ else:
+ domain = t
+
+ # print "--- domain = {0} qtype={1} rdclass={2}".format(domain, qtype, rdclass)
+
+ ret = []
+
+ if qtype.upper() == 'PTR':
+ try:
+ n = dns.reversename.from_address(domain)
+ domain = n.to_text()
+ except dns.exception.SyntaxError:
+ pass
+ except Exception as e:
+ raise AnsibleError("dns.reversename unhandled exception %s" % to_native(e))
+
+ try:
+ answers = myres.query(domain, qtype, rdclass=rdclass)
+ for rdata in answers:
+ s = rdata.to_text()
+ if qtype.upper() == 'TXT':
+ s = s[1:-1] # Strip outside quotes on TXT rdata
+
+ if flat:
+ ret.append(s)
+ else:
+ try:
+ rd = make_rdata_dict(rdata)
+ rd['owner'] = answers.canonical_name.to_text()
+ rd['type'] = dns.rdatatype.to_text(rdata.rdtype)
+ rd['ttl'] = answers.rrset.ttl
+ rd['class'] = dns.rdataclass.to_text(rdata.rdclass)
+
+ ret.append(rd)
+ except Exception as e:
+ ret.append(str(e))
+
+ except dns.resolver.NXDOMAIN:
+ ret.append('NXDOMAIN')
+ except dns.resolver.NoAnswer:
+ ret.append("")
+ except dns.resolver.Timeout:
+ ret.append('')
+ except dns.exception.DNSException as e:
+ raise AnsibleError("dns.resolver unhandled exception %s" % to_native(e))
+
+ return ret
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/dnstxt.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/dnstxt.py
new file mode 100644
index 00000000..19e28e1d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/dnstxt.py
@@ -0,0 +1,96 @@
+# (c) 2012, Jan-Piet Mens <jpmens(at)gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ lookup: dnstxt
+ author: Jan-Piet Mens (@jpmens) <jpmens(at)gmail.com>
+ short_description: query a domain(s)'s DNS txt fields
+ requirements:
+ - dns/dns.resolver (python library)
+ description:
+ - Uses a python library to return the DNS TXT record for a domain.
+ options:
+ _terms:
+ description: domain or list of domains to query TXT records from
+ required: True
+ type: list
+'''
+
+EXAMPLES = """
+- name: show txt entry
+ ansible.builtin.debug:
+ msg: "{{lookup('community.general.dnstxt', ['test.example.com'])}}"
+
+- name: iterate over txt entries
+ ansible.builtin.debug:
+ msg: "{{item}}"
+ with_community.general.dnstxt:
+ - 'test.example.com'
+ - 'other.example.com'
+ - 'last.example.com'
+
+- name: iterate of a comma delimited DNS TXT entry
+ ansible.builtin.debug:
+ msg: "{{item}}"
+ with_community.general.dnstxt: "{{lookup('community.general.dnstxt', ['test.example.com']).split(',')}}"
+"""
+
+RETURN = """
+ _list:
+ description:
+ - values returned by the DNS TXT record.
+ type: list
+"""
+
+HAVE_DNS = False
+try:
+ import dns.resolver
+ from dns.exception import DNSException
+ HAVE_DNS = True
+except ImportError:
+ pass
+
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_native
+from ansible.plugins.lookup import LookupBase
+
+# ==============================================================
+# DNSTXT: DNS TXT records
+#
+# key=domainname
+# TODO: configurable resolver IPs
+# --------------------------------------------------------------
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+
+ if HAVE_DNS is False:
+ raise AnsibleError("Can't LOOKUP(dnstxt): module dns.resolver is not installed")
+
+ ret = []
+ for term in terms:
+ domain = term.split()[0]
+ string = []
+ try:
+ answers = dns.resolver.query(domain, 'TXT')
+ for rdata in answers:
+ s = rdata.to_text()
+ string.append(s[1:-1]) # Strip outside quotes on TXT rdata
+
+ except dns.resolver.NXDOMAIN:
+ string = 'NXDOMAIN'
+ except dns.resolver.Timeout:
+ string = ''
+ except dns.resolver.NoAnswer:
+ string = ''
+ except DNSException as e:
+ raise AnsibleError("dns.resolver unhandled exception %s" % to_native(e))
+
+ ret.append(''.join(string))
+
+ return ret
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/dsv.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/dsv.py
new file mode 100644
index 00000000..18165f9f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/dsv.py
@@ -0,0 +1,140 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2020, Adam Migus <adam@migus.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+lookup: dsv
+author: Adam Migus (@amigus) <adam@migus.org>
+short_description: Get secrets from Thycotic DevOps Secrets Vault
+version_added: 1.0.0
+description:
+ - Uses the Thycotic DevOps Secrets Vault Python SDK to get Secrets from a
+ DSV I(tenant) using a I(client_id) and I(client_secret).
+requirements:
+ - python-dsv-sdk - https://pypi.org/project/python-dsv-sdk/
+options:
+ _terms:
+ description: The path to the secret, e.g. C(/staging/servers/web1).
+ required: true
+ tenant:
+ description: The first format parameter in the default I(url_template).
+ env:
+ - name: DSV_TENANT
+ ini:
+ - section: dsv_lookup
+ key: tenant
+ required: true
+ tld:
+ default: com
+ description: The top-level domain of the tenant; the second format
+ parameter in the default I(url_template).
+ env:
+ - name: DSV_TLD
+ ini:
+ - section: dsv_lookup
+ key: tld
+ required: false
+ client_id:
+ description: The client_id with which to request the Access Grant.
+ env:
+ - name: DSV_CLIENT_ID
+ ini:
+ - section: dsv_lookup
+ key: client_id
+ required: true
+ client_secret:
+ description: The client secret associated with the specific I(client_id).
+ env:
+ - name: DSV_CLIENT_SECRET
+ ini:
+ - section: dsv_lookup
+ key: client_secret
+ required: true
+ url_template:
+ default: https://{}.secretsvaultcloud.{}/v1
+ description: The path to prepend to the base URL to form a valid REST
+ API request.
+ env:
+ - name: DSV_URL_TEMPLATE
+ ini:
+ - section: dsv_lookup
+ key: url_template
+ required: false
+"""
+
+RETURN = r"""
+_list:
+ description:
+ - One or more JSON responses to C(GET /secrets/{path}).
+ - See U(https://dsv.thycotic.com/api/index.html#operation/getSecret).
+ type: list
+ elements: dict
+"""
+
+EXAMPLES = r"""
+- hosts: localhost
+ vars:
+ secret: "{{ lookup('community.general.dsv', '/test/secret') }}"
+ tasks:
+ - ansible.builtin.debug:
+ msg: 'the password is {{ secret["data"]["password"] }}'
+"""
+
+from ansible.errors import AnsibleError, AnsibleOptionsError
+
+sdk_is_missing = False
+
+try:
+ from thycotic.secrets.vault import (
+ SecretsVault,
+ SecretsVaultError,
+ )
+except ImportError:
+ sdk_is_missing = True
+
+from ansible.utils.display import Display
+from ansible.plugins.lookup import LookupBase
+
+
+display = Display()
+
+
+class LookupModule(LookupBase):
+ @staticmethod
+ def Client(vault_parameters):
+ return SecretsVault(**vault_parameters)
+
+ def run(self, terms, variables, **kwargs):
+ if sdk_is_missing:
+ raise AnsibleError("python-dsv-sdk must be installed to use this plugin")
+
+ self.set_options(var_options=variables, direct=kwargs)
+
+ vault = LookupModule.Client(
+ {
+ "tenant": self.get_option("tenant"),
+ "client_id": self.get_option("client_id"),
+ "client_secret": self.get_option("client_secret"),
+ "url_template": self.get_option("url_template"),
+ }
+ )
+ result = []
+
+ for term in terms:
+ display.debug("dsv_lookup term: %s" % term)
+ try:
+ path = term.lstrip("[/:]")
+
+ if path == "":
+ raise AnsibleOptionsError("Invalid secret path: %s" % term)
+
+ display.vvv(u"DevOps Secrets Vault GET /secrets/%s" % path)
+ result.append(vault.get_secret_json(path))
+ except SecretsVaultError as error:
+ raise AnsibleError(
+ "DevOps Secrets Vault lookup failure: %s" % error.message
+ )
+ return result
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/etcd.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/etcd.py
new file mode 100644
index 00000000..91724df7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/etcd.py
@@ -0,0 +1,180 @@
+# (c) 2013, Jan-Piet Mens <jpmens(at)gmail.com>
+# (m) 2016, Mihai Moldovanu <mihaim@tfm.ro>
+# (m) 2017, Juan Manuel Parrilla <jparrill@redhat.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author:
+ - Jan-Piet Mens (@jpmens)
+ lookup: etcd
+ short_description: get info from an etcd server
+ description:
+ - Retrieves data from an etcd server
+ options:
+ _terms:
+ description:
+ - the list of keys to lookup on the etcd server
+ type: list
+ elements: string
+ required: True
+ url:
+ description:
+ - Environment variable with the url for the etcd server
+ default: 'http://127.0.0.1:4001'
+ env:
+ - name: ANSIBLE_ETCD_URL
+ version:
+ description:
+ - Environment variable with the etcd protocol version
+ default: 'v1'
+ env:
+ - name: ANSIBLE_ETCD_VERSION
+ validate_certs:
+ description:
+ - toggle checking that the ssl certificates are valid, you normally only want to turn this off with self-signed certs.
+ default: True
+ type: boolean
+'''
+
+EXAMPLES = '''
+- name: "a value from a locally running etcd"
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.etcd', 'foo/bar') }}"
+
+- name: "values from multiple folders on a locally running etcd"
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.etcd', 'foo', 'bar', 'baz') }}"
+
+- name: "since Ansible 2.5 you can set server options inline"
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.etcd', 'foo', version='v2', url='http://192.168.0.27:4001') }}"
+'''
+
+RETURN = '''
+ _raw:
+ description:
+ - list of values associated with input keys
+ type: list
+ elements: string
+'''
+
+import json
+
+from ansible.plugins.lookup import LookupBase
+from ansible.module_utils.urls import open_url
+
+# this can be made configurable, not should not use ansible.cfg
+#
+# Made module configurable from playbooks:
+# If etcd v2 running on host 192.168.1.21 on port 2379
+# we can use the following in a playbook to retrieve /tfm/network/config key
+#
+# - ansible.builtin.debug: msg={{lookup('etcd','/tfm/network/config', url='http://192.168.1.21:2379' , version='v2')}}
+#
+# Example Output:
+#
+# TASK [debug] *******************************************************************
+# ok: [localhost] => {
+# "msg": {
+# "Backend": {
+# "Type": "vxlan"
+# },
+# "Network": "172.30.0.0/16",
+# "SubnetLen": 24
+# }
+# }
+#
+#
+#
+#
+
+
+class Etcd:
+ def __init__(self, url, version, validate_certs):
+ self.url = url
+ self.version = version
+ self.baseurl = '%s/%s/keys' % (self.url, self.version)
+ self.validate_certs = validate_certs
+
+ def _parse_node(self, node):
+ # This function will receive all etcd tree,
+ # if the level requested has any node, the recursion starts
+ # create a list in the dir variable and it is passed to the
+ # recursive function, and so on, if we get a variable,
+ # the function will create a key-value at this level and
+ # undoing the loop.
+ path = {}
+ if node.get('dir', False):
+ for n in node.get('nodes', []):
+ path[n['key'].split('/')[-1]] = self._parse_node(n)
+
+ else:
+ path = node['value']
+
+ return path
+
+ def get(self, key):
+ url = "%s/%s?recursive=true" % (self.baseurl, key)
+ data = None
+ value = {}
+ try:
+ r = open_url(url, validate_certs=self.validate_certs)
+ data = r.read()
+ except Exception:
+ return None
+
+ try:
+ # I will not support Version 1 of etcd for folder parsing
+ item = json.loads(data)
+ if self.version == 'v1':
+ # When ETCD are working with just v1
+ if 'value' in item:
+ value = item['value']
+ else:
+ if 'node' in item:
+ # When a usual result from ETCD
+ value = self._parse_node(item['node'])
+
+ if 'errorCode' in item:
+ # Here return an error when an unknown entry responds
+ value = "ENOENT"
+ except Exception:
+ raise
+
+ return value
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables, **kwargs):
+
+ self.set_options(var_options=variables, direct=kwargs)
+
+ validate_certs = self.get_option('validate_certs')
+ url = self.get_option('url')
+ version = self.get_option('version')
+
+ etcd = Etcd(url=url, version=version, validate_certs=validate_certs)
+
+ ret = []
+ for term in terms:
+ key = term.split()[0]
+ value = etcd.get(key)
+ ret.append(value)
+ return ret
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/etcd3.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/etcd3.py
new file mode 100644
index 00000000..55bbed2d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/etcd3.py
@@ -0,0 +1,229 @@
+# -*- coding: utf-8 -*-
+#
+# (c) 2020, SCC France, Eric Belhomme <ebelhomme@fr.scc.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author:
+ - Eric Belhomme (@eric-belhomme) <ebelhomme@fr.scc.com>
+ version_added: '0.2.0'
+ lookup: etcd3
+ short_description: Get key values from etcd3 server
+ description:
+ - Retrieves key values and/or key prefixes from etcd3 server using its native gRPC API.
+ - Try to reuse M(community.general.etcd3) options for connection parameters, but add support for some C(ETCDCTL_*) environment variables.
+ - See U(https://github.com/etcd-io/etcd/tree/master/Documentation/op-guide) for etcd overview.
+
+ options:
+ _terms:
+ description:
+ - The list of keys (or key prefixes) to look up on the etcd3 server.
+ type: list
+ elements: str
+ required: True
+ prefix:
+ description:
+ - Look for key or prefix key.
+ type: bool
+ default: False
+ endpoints:
+ description:
+ - Counterpart of C(ETCDCTL_ENDPOINTS) environment variable.
+ Specify the etcd3 connection with and URL form eg. C(https://hostname:2379) or C(<host>:<port>) form.
+ - The C(host) part is overwritten by I(host) option, if defined.
+ - The C(port) part is overwritten by I(port) option, if defined.
+ env:
+ - name: ETCDCTL_ENDPOINTS
+ default: '127.0.0.1:2379'
+ type: str
+ host:
+ description:
+ - etcd3 listening client host.
+ - Takes precedence over I(endpoints).
+ type: str
+ port:
+ description:
+ - etcd3 listening client port.
+ - Takes precedence over I(endpoints).
+ type: int
+ ca_cert:
+ description:
+ - etcd3 CA authority.
+ env:
+ - name: ETCDCTL_CACERT
+ type: str
+ cert_cert:
+ description:
+ - etcd3 client certificate.
+ env:
+ - name: ETCDCTL_CERT
+ type: str
+ cert_key:
+ description:
+ - etcd3 client private key.
+ env:
+ - name: ETCDCTL_KEY
+ type: str
+ timeout:
+ description:
+ - Client timeout.
+ default: 60
+ env:
+ - name: ETCDCTL_DIAL_TIMEOUT
+ type: int
+ user:
+ description:
+ - Authenticated user name.
+ env:
+ - name: ETCDCTL_USER
+ type: str
+ password:
+ description:
+ - Authenticated user password.
+ env:
+ - name: ETCDCTL_PASSWORD
+ type: str
+
+ notes:
+ - I(host) and I(port) options take precedence over (endpoints) option.
+ - The recommended way to connect to etcd3 server is using C(ETCDCTL_ENDPOINT)
+ environment variable and keep I(endpoints), I(host), and I(port) unused.
+ seealso:
+ - module: community.general.etcd3
+ - ref: etcd_lookup
+ description: The etcd v2 lookup.
+
+ requirements:
+ - "etcd3 >= 0.10"
+'''
+
+EXAMPLES = '''
+- name: "a value from a locally running etcd"
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.etcd3', 'foo/bar') }}"
+
+- name: "values from multiple folders on a locally running etcd"
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.etcd3', 'foo', 'bar', 'baz') }}"
+
+- name: "look for a key prefix"
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.etcd3', '/foo/bar', prefix=True) }}"
+
+- name: "connect to etcd3 with a client certificate"
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.etcd3', 'foo/bar', cert_cert='/etc/ssl/etcd/client.pem', cert_key='/etc/ssl/etcd/client.key') }}"
+'''
+
+RETURN = '''
+ _raw:
+ description:
+ - List of keys and associated values.
+ type: list
+ elements: dict
+ contains:
+ key:
+ description: The element's key.
+ type: str
+ value:
+ description: The element's value.
+ type: str
+'''
+
+import re
+
+from ansible.plugins.lookup import LookupBase
+from ansible.utils.display import Display
+from ansible.module_utils.basic import missing_required_lib
+from ansible.module_utils._text import to_native
+from ansible.plugins.lookup import LookupBase
+from ansible.errors import AnsibleError, AnsibleLookupError
+
+try:
+ import etcd3
+ HAS_ETCD = True
+except ImportError:
+ HAS_ETCD = False
+
+display = Display()
+
+etcd3_cnx_opts = (
+ 'host',
+ 'port',
+ 'ca_cert',
+ 'cert_key',
+ 'cert_cert',
+ 'timeout',
+ 'user',
+ 'password',
+ # 'grpc_options' Etcd3Client() option currently not supported by lookup module (maybe in future ?)
+)
+
+
+def etcd3_client(client_params):
+ try:
+ etcd = etcd3.client(**client_params)
+ etcd.status()
+ except Exception as exp:
+ raise AnsibleLookupError('Cannot connect to etcd cluster: %s' % (to_native(exp)))
+ return etcd
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables, **kwargs):
+
+ self.set_options(var_options=variables, direct=kwargs)
+
+ if not HAS_ETCD:
+ display.error(missing_required_lib('etcd3'))
+ return None
+
+ # create the etcd3 connection parameters dict to pass to etcd3 class
+ client_params = {}
+
+ # etcd3 class expects host and port as connection parameters, so endpoints
+ # must be mangled a bit to fit in this scheme.
+ # so here we use a regex to extract server and port
+ match = re.compile(
+ r'^(https?://)?(?P<host>(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})|([-_\d\w\.]+))(:(?P<port>\d{1,5}))?/?$'
+ ).match(self.get_option('endpoints'))
+ if match:
+ if match.group('host'):
+ client_params['host'] = match.group('host')
+ if match.group('port'):
+ client_params['port'] = match.group('port')
+
+ for opt in etcd3_cnx_opts:
+ if self.get_option(opt):
+ client_params[opt] = self.get_option(opt)
+
+ cnx_log = dict(client_params)
+ if 'password' in cnx_log:
+ cnx_log['password'] = '<redacted>'
+ display.verbose("etcd3 connection parameters: %s" % cnx_log)
+
+ # connect to etcd3 server
+ etcd = etcd3_client(client_params)
+
+ ret = []
+ # we can pass many keys to lookup
+ for term in terms:
+ if self.get_option('prefix'):
+ try:
+ for val, meta in etcd.get_prefix(term):
+ if val and meta:
+ ret.append({'key': to_native(meta.key), 'value': to_native(val)})
+ except Exception as exp:
+ display.warning('Caught except during etcd3.get_prefix: %s' % (to_native(exp)))
+ else:
+ try:
+ val, meta = etcd.get(term)
+ if val and meta:
+ ret.append({'key': to_native(meta.key), 'value': to_native(val)})
+ except Exception as exp:
+ display.warning('Caught except during etcd3.get: %s' % (to_native(exp)))
+ return ret
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/filetree.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/filetree.py
new file mode 100644
index 00000000..4b050968
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/filetree.py
@@ -0,0 +1,218 @@
+# (c) 2016 Dag Wieers <dag@wieers.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+lookup: filetree
+author: Dag Wieers (@dagwieers) <dag@wieers.com>
+short_description: recursively match all files in a directory tree
+description:
+- This lookup enables you to template a complete tree of files on a target system while retaining permissions and ownership.
+- Supports directories, files and symlinks, including SELinux and other file properties.
+- If you provide more than one path, it will implement a first_found logic, and will not process entries it already processed in previous paths.
+ This enables merging different trees in order of importance, or add role_vars to specific paths to influence different instances of the same role.
+options:
+ _terms:
+ description: path(s) of files to read
+ required: True
+'''
+
+EXAMPLES = r"""
+- name: Create directories
+ ansible.builtin.file:
+ path: /web/{{ item.path }}
+ state: directory
+ mode: '{{ item.mode }}'
+ with_community.general.filetree: web/
+ when: item.state == 'directory'
+
+- name: Template files (explicitly skip directories in order to use the 'src' attribute)
+ ansible.builtin.template:
+ src: '{{ item.src }}'
+ dest: /web/{{ item.path }}
+ mode: '{{ item.mode }}'
+ with_community.general.filetree: web/
+ when: item.state == 'file'
+
+- name: Recreate symlinks
+ ansible.builtin.file:
+ src: '{{ item.src }}'
+ dest: /web/{{ item.path }}
+ state: link
+ force: yes
+ mode: '{{ item.mode }}'
+ with_community.general.filetree: web/
+ when: item.state == 'link'
+
+- name: list all files under web/
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.filetree', 'web/') }}"
+"""
+
+RETURN = r"""
+ _raw:
+ description: List of dictionaries with file information.
+ type: list
+ elements: dict
+ contains:
+ src:
+ description:
+ - Full path to file.
+ - Not returned when I(item.state) is set to C(directory).
+ type: path
+ root:
+ description: Allows filtering by original location.
+ type: path
+ path:
+ description: Contains the relative path to root.
+ type: path
+ mode:
+ description: The permissions the resulting file or directory.
+ type: str
+ state:
+ description: TODO
+ type: str
+ owner:
+ description: Name of the user that owns the file/directory.
+ type: raw
+ group:
+ description: Name of the group that owns the file/directory.
+ type: raw
+ seuser:
+ description: The user part of the SELinux file context.
+ type: raw
+ serole:
+ description: The role part of the SELinux file context.
+ type: raw
+ setype:
+ description: The type part of the SELinux file context.
+ type: raw
+ selevel:
+ description: The level part of the SELinux file context.
+ type: raw
+ uid:
+ description: Owner ID of the file/directory.
+ type: int
+ gid:
+ description: Group ID of the file/directory.
+ type: int
+ size:
+ description: Size of the target.
+ type: int
+ mtime:
+ description: Time of last modification.
+ type: float
+ ctime:
+ description: Time of last metadata update or creation (depends on OS).
+ type: float
+"""
+import os
+import pwd
+import grp
+import stat
+
+HAVE_SELINUX = False
+try:
+ import selinux
+ HAVE_SELINUX = True
+except ImportError:
+ pass
+
+from ansible.plugins.lookup import LookupBase
+from ansible.module_utils._text import to_native, to_text
+from ansible.utils.display import Display
+
+display = Display()
+
+
+# If selinux fails to find a default, return an array of None
+def selinux_context(path):
+ context = [None, None, None, None]
+ if HAVE_SELINUX and selinux.is_selinux_enabled():
+ try:
+ # note: the selinux module uses byte strings on python2 and text
+ # strings on python3
+ ret = selinux.lgetfilecon_raw(to_native(path))
+ except OSError:
+ return context
+ if ret[0] != -1:
+ # Limit split to 4 because the selevel, the last in the list,
+ # may contain ':' characters
+ context = ret[1].split(':', 3)
+ return context
+
+
+def file_props(root, path):
+ ''' Returns dictionary with file properties, or return None on failure '''
+ abspath = os.path.join(root, path)
+
+ try:
+ st = os.lstat(abspath)
+ except OSError as e:
+ display.warning('filetree: Error using stat() on path %s (%s)' % (abspath, e))
+ return None
+
+ ret = dict(root=root, path=path)
+
+ if stat.S_ISLNK(st.st_mode):
+ ret['state'] = 'link'
+ ret['src'] = os.readlink(abspath)
+ elif stat.S_ISDIR(st.st_mode):
+ ret['state'] = 'directory'
+ elif stat.S_ISREG(st.st_mode):
+ ret['state'] = 'file'
+ ret['src'] = abspath
+ else:
+ display.warning('filetree: Error file type of %s is not supported' % abspath)
+ return None
+
+ ret['uid'] = st.st_uid
+ ret['gid'] = st.st_gid
+ try:
+ ret['owner'] = pwd.getpwuid(st.st_uid).pw_name
+ except KeyError:
+ ret['owner'] = st.st_uid
+ try:
+ ret['group'] = to_text(grp.getgrgid(st.st_gid).gr_name)
+ except KeyError:
+ ret['group'] = st.st_gid
+ ret['mode'] = '0%03o' % (stat.S_IMODE(st.st_mode))
+ ret['size'] = st.st_size
+ ret['mtime'] = st.st_mtime
+ ret['ctime'] = st.st_ctime
+
+ if HAVE_SELINUX and selinux.is_selinux_enabled() == 1:
+ context = selinux_context(abspath)
+ ret['seuser'] = context[0]
+ ret['serole'] = context[1]
+ ret['setype'] = context[2]
+ ret['selevel'] = context[3]
+
+ return ret
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+ basedir = self.get_basedir(variables)
+
+ ret = []
+ for term in terms:
+ term_file = os.path.basename(term)
+ dwimmed_path = self._loader.path_dwim_relative(basedir, 'files', os.path.dirname(term))
+ path = os.path.join(dwimmed_path, term_file)
+ display.debug("Walking '{0}'".format(path))
+ for root, dirs, files in os.walk(path, topdown=True):
+ for entry in dirs + files:
+ relpath = os.path.relpath(os.path.join(root, entry), path)
+
+ # Skip if relpath was already processed (from another root)
+ if relpath not in [entry['path'] for entry in ret]:
+ props = file_props(path, relpath)
+ if props is not None:
+ display.debug(" found '{0}'".format(os.path.join(path, relpath)))
+ ret.append(props)
+
+ return ret
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/flattened.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/flattened.py
new file mode 100644
index 00000000..d5616670
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/flattened.py
@@ -0,0 +1,84 @@
+# (c) 2013, Serge van Ginderachter <serge@vanginderachter.be>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ lookup: flattened
+ author: Serge van Ginderachter (!UNKNOWN) <serge@vanginderachter.be>
+ short_description: return single list completely flattened
+ description:
+ - given one or more lists, this lookup will flatten any list elements found recursively until only 1 list is left.
+ options:
+ _terms:
+ description: lists to flatten
+ required: True
+ notes:
+ - unlike 'items' which only flattens 1 level, this plugin will continue to flatten until it cannot find lists anymore.
+ - aka highlander plugin, there can only be one (list).
+'''
+
+EXAMPLES = """
+- name: "'unnest' all elements into single list"
+ ansible.builtin.debug:
+ msg: "all in one list {{lookup('community.general.flattened', [1,2,3,[5,6]], [a,b,c], [[5,6,1,3], [34,a,b,c]])}}"
+"""
+
+RETURN = """
+ _raw:
+ description:
+ - flattened list
+ type: list
+"""
+from ansible.errors import AnsibleError
+from ansible.module_utils.six import string_types
+from ansible.plugins.lookup import LookupBase
+from ansible.utils.listify import listify_lookup_plugin_terms
+
+
+class LookupModule(LookupBase):
+
+ def _check_list_of_one_list(self, term):
+ # make sure term is not a list of one (list of one..) item
+ # return the final non list item if so
+
+ if isinstance(term, list) and len(term) == 1:
+ term = term[0]
+ if isinstance(term, list):
+ term = self._check_list_of_one_list(term)
+
+ return term
+
+ def _do_flatten(self, terms, variables):
+
+ ret = []
+ for term in terms:
+ term = self._check_list_of_one_list(term)
+
+ if term == 'None' or term == 'null':
+ # ignore undefined items
+ break
+
+ if isinstance(term, string_types):
+ # convert a variable to a list
+ term2 = listify_lookup_plugin_terms(term, templar=self._templar, loader=self._loader)
+ # but avoid converting a plain string to a list of one string
+ if term2 != [term]:
+ term = term2
+
+ if isinstance(term, list):
+ # if it's a list, check recursively for items that are a list
+ term = self._do_flatten(term, variables)
+ ret.extend(term)
+ else:
+ ret.append(term)
+
+ return ret
+
+ def run(self, terms, variables, **kwargs):
+
+ if not isinstance(terms, list):
+ raise AnsibleError("with_flattened expects a list")
+
+ return self._do_flatten(terms, variables)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/gcp_storage_file.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/gcp_storage_file.py
new file mode 100644
index 00000000..206788c3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/gcp_storage_file.py
@@ -0,0 +1,156 @@
+# (c) 2019, Eric Anderson <eric.sysmin@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+lookup: gcp_storage_file
+description:
+ - This lookup returns the contents from a file residing on Google Cloud Storage
+short_description: Return GC Storage content
+author: Eric Anderson (!UNKNOWN) <eanderson@avinetworks.com>
+requirements:
+ - python >= 2.6
+ - requests >= 2.18.4
+ - google-auth >= 1.3.0
+options:
+ src:
+ description:
+ - Source location of file (may be local machine or cloud depending on action).
+ required: false
+ bucket:
+ description:
+ - The name of the bucket.
+ required: false
+extends_documentation_fragment:
+- community.general._gcp
+
+'''
+
+EXAMPLES = '''
+- ansible.builtin.debug:
+ msg: |
+ the value of foo.txt is {{ lookup('community.general.gcp_storage_file',
+ bucket='gcp-bucket', src='mydir/foo.txt', project='project-name',
+ auth_kind='serviceaccount', service_account_file='/tmp/myserviceaccountfile.json') }}
+'''
+
+RETURN = '''
+_raw:
+ description:
+ - base64 encoded file content
+ type: list
+ elements: str
+'''
+
+import base64
+import json
+import mimetypes
+import os
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+from ansible.utils.display import Display
+
+try:
+ import requests
+ HAS_REQUESTS = True
+except ImportError:
+ HAS_REQUESTS = False
+
+try:
+ from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession
+ HAS_GOOGLE_CLOUD_COLLECTION = True
+except ImportError:
+ HAS_GOOGLE_CLOUD_COLLECTION = False
+
+
+display = Display()
+
+
+class GcpMockModule(object):
+ def __init__(self, params):
+ self.params = params
+
+ def fail_json(self, *args, **kwargs):
+ raise AnsibleError(kwargs['msg'])
+
+ def raise_for_status(self, response):
+ try:
+ response.raise_for_status()
+ except getattr(requests.exceptions, 'RequestException'):
+ self.fail_json(msg="GCP returned error: %s" % response.json())
+
+
+class GcpFileLookup():
+ def get_file_contents(self, module):
+ auth = GcpSession(module, 'storage')
+ data = auth.get(self.media_link(module))
+ return base64.b64encode(data.content.rstrip())
+
+ def fetch_resource(self, module, link, allow_not_found=True):
+ auth = GcpSession(module, 'storage')
+ return self.return_if_object(module, auth.get(link), allow_not_found)
+
+ def self_link(self, module):
+ return "https://www.googleapis.com/storage/v1/b/{bucket}/o/{src}".format(**module.params)
+
+ def media_link(self, module):
+ return "https://www.googleapis.com/storage/v1/b/{bucket}/o/{src}?alt=media".format(**module.params)
+
+ def return_if_object(self, module, response, allow_not_found=False):
+ # If not found, return nothing.
+ if allow_not_found and response.status_code == 404:
+ return None
+ # If no content, return nothing.
+ if response.status_code == 204:
+ return None
+ try:
+ module.raise_for_status(response)
+ result = response.json()
+ except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
+ raise AnsibleError("Invalid JSON response with error: %s" % inst)
+ if navigate_hash(result, ['error', 'errors']):
+ raise AnsibleError(navigate_hash(result, ['error', 'errors']))
+ return result
+
+ def object_headers(self, module):
+ return {
+ "name": module.params['src'],
+ "Content-Type": mimetypes.guess_type(module.params['src'])[0],
+ "Content-Length": str(os.path.getsize(module.params['src'])),
+ }
+
+ def run(self, terms, variables=None, **kwargs):
+ params = {
+ 'bucket': kwargs.get('bucket', None),
+ 'src': kwargs.get('src', None),
+ 'projects': kwargs.get('projects', None),
+ 'scopes': kwargs.get('scopes', None),
+ 'zones': kwargs.get('zones', None),
+ 'auth_kind': kwargs.get('auth_kind', None),
+ 'service_account_file': kwargs.get('service_account_file', None),
+ 'service_account_email': kwargs.get('service_account_email', None),
+ }
+
+ if not params['scopes']:
+ params['scopes'] = ['https://www.googleapis.com/auth/devstorage.full_control']
+
+ fake_module = GcpMockModule(params)
+
+ # Check if files exist.
+ remote_object = self.fetch_resource(fake_module, self.self_link(fake_module))
+ if not remote_object:
+ raise AnsibleError("File does not exist in bucket")
+
+ result = self.get_file_contents(fake_module)
+ return [result]
+
+
+class LookupModule(LookupBase):
+ def run(self, terms, variables=None, **kwargs):
+ if not HAS_GOOGLE_CLOUD_COLLECTION:
+ raise AnsibleError("community.general.gcp_storage_file needs a supported version of the google.cloud collection installed")
+ if not HAS_REQUESTS:
+ raise AnsibleError("community.general.gcp_storage_file needs requests installed. Use `pip install requests` to install it")
+ return GcpFileLookup().run(terms, variables=variables, **kwargs)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/hashi_vault.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/hashi_vault.py
new file mode 100644
index 00000000..a4da243a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/hashi_vault.py
@@ -0,0 +1,650 @@
+# (c) 2020, Brian Scholer (@briantist)
+# (c) 2015, Jonathan Davila <jonathan(at)davila.io>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ lookup: hashi_vault
+ author:
+ - Jonathan Davila (!UNKNOWN) <jdavila(at)ansible.com>
+ - Brian Scholer (@briantist)
+ short_description: Retrieve secrets from HashiCorp's Vault
+ requirements:
+ - hvac (python library)
+ - hvac 0.7.0+ (for namespace support)
+ - hvac 0.9.6+ (to avoid all deprecation warnings)
+ - botocore (only if inferring aws params from boto)
+ - boto3 (only if using a boto profile)
+ description:
+ - Retrieve secrets from HashiCorp's Vault.
+ notes:
+ - Due to a current limitation in the HVAC library there won't necessarily be an error if a bad endpoint is specified.
+ - As of community.general 0.2.0, only the latest version of a secret is returned when specifying a KV v2 path.
+ - As of community.general 0.2.0, all options can be supplied via term string (space delimited key=value pairs) or by parameters (see examples).
+ - As of community.general 0.2.0, when C(secret) is the first option in the term string, C(secret=) is not required (see examples).
+ options:
+ secret:
+ description: Vault path to the secret being requested in the format C(path[:field]).
+ required: True
+ token:
+ description:
+ - Vault token. If using token auth and no token is supplied, explicitly or through env, then the plugin will check
+ - for a token file, as determined by C(token_path) and C(token_file).
+ env:
+ - name: VAULT_TOKEN
+ token_path:
+ description: If no token is specified, will try to read the token file from this path.
+ env:
+ - name: VAULT_TOKEN_PATH
+ version_added: 1.2.0
+ ini:
+ - section: lookup_hashi_vault
+ key: token_path
+ version_added: '0.2.0'
+ token_file:
+ description: If no token is specified, will try to read the token from this file in C(token_path).
+ env:
+ - name: VAULT_TOKEN_FILE
+ version_added: 1.2.0
+ ini:
+ - section: lookup_hashi_vault
+ key: token_file
+ default: '.vault-token'
+ version_added: '0.2.0'
+ url:
+ description: URL to the Vault service.
+ env:
+ - name: VAULT_ADDR
+ ini:
+ - section: lookup_hashi_vault
+ key: url
+ version_added: '0.2.0'
+ default: 'http://127.0.0.1:8200'
+ username:
+ description: Authentication user name.
+ password:
+ description: Authentication password.
+ role_id:
+ description: Vault Role ID. Used in approle and aws_iam_login auth methods.
+ env:
+ - name: VAULT_ROLE_ID
+ ini:
+ - section: lookup_hashi_vault
+ key: role_id
+ version_added: '0.2.0'
+ secret_id:
+ description: Secret ID to be used for Vault AppRole authentication.
+ env:
+ - name: VAULT_SECRET_ID
+ auth_method:
+ description:
+ - Authentication method to be used.
+ - C(userpass) is added in Ansible 2.8.
+ - C(aws_iam_login) is added in community.general 0.2.0.
+ - C(jwt) is added in community.general 1.3.0.
+ env:
+ - name: VAULT_AUTH_METHOD
+ ini:
+ - section: lookup_hashi_vault
+ key: auth_method
+ version_added: '0.2.0'
+ choices:
+ - token
+ - userpass
+ - ldap
+ - approle
+ - aws_iam_login
+ - jwt
+ default: token
+ return_format:
+ description:
+ - Controls how multiple key/value pairs in a path are treated on return.
+ - C(dict) returns a single dict containing the key/value pairs (same behavior as before community.general 0.2.0).
+ - C(values) returns a list of all the values only. Use when you don't care about the keys.
+ - C(raw) returns the actual API result, which includes metadata and may have the data nested in other keys.
+ choices:
+ - dict
+ - values
+ - raw
+ default: dict
+ aliases: [ as ]
+ version_added: '0.2.0'
+ mount_point:
+ description: Vault mount point, only required if you have a custom mount point. Does not apply to token authentication.
+ jwt:
+ description: The JSON Web Token (JWT) to use for JWT authentication to Vault.
+ env:
+ - name: ANSIBLE_HASHI_VAULT_JWT
+ version_added: 1.3.0
+ ca_cert:
+ description: Path to certificate to use for authentication.
+ aliases: [ cacert ]
+ validate_certs:
+ description:
+ - Controls verification and validation of SSL certificates, mostly you only want to turn off with self signed ones.
+ - Will be populated with the inverse of C(VAULT_SKIP_VERIFY) if that is set and I(validate_certs) is not explicitly
+ provided (added in community.general 1.3.0).
+ - Will default to C(true) if neither I(validate_certs) or C(VAULT_SKIP_VERIFY) are set.
+ type: boolean
+ namespace:
+ description:
+ - Vault namespace where secrets reside. This option requires HVAC 0.7.0+ and Vault 0.11+.
+ - Optionally, this may be achieved by prefixing the authentication mount point and/or secret path with the namespace
+ (e.g C(mynamespace/secret/mysecret)).
+ env:
+ - name: VAULT_NAMESPACE
+ version_added: 1.2.0
+ aws_profile:
+ description: The AWS profile
+ type: str
+ aliases: [ boto_profile ]
+ env:
+ - name: AWS_DEFAULT_PROFILE
+ - name: AWS_PROFILE
+ version_added: '0.2.0'
+ aws_access_key:
+ description: The AWS access key to use.
+ type: str
+ aliases: [ aws_access_key_id ]
+ env:
+ - name: EC2_ACCESS_KEY
+ - name: AWS_ACCESS_KEY
+ - name: AWS_ACCESS_KEY_ID
+ version_added: '0.2.0'
+ aws_secret_key:
+ description: The AWS secret key that corresponds to the access key.
+ type: str
+ aliases: [ aws_secret_access_key ]
+ env:
+ - name: EC2_SECRET_KEY
+ - name: AWS_SECRET_KEY
+ - name: AWS_SECRET_ACCESS_KEY
+ version_added: '0.2.0'
+ aws_security_token:
+ description: The AWS security token if using temporary access and secret keys.
+ type: str
+ env:
+ - name: EC2_SECURITY_TOKEN
+ - name: AWS_SESSION_TOKEN
+ - name: AWS_SECURITY_TOKEN
+ version_added: '0.2.0'
+ region:
+ description: The AWS region for which to create the connection.
+ type: str
+ env:
+ - name: EC2_REGION
+ - name: AWS_REGION
+ version_added: '0.2.0'
+"""
+
+EXAMPLES = """
+- ansible.builtin.debug:
+ msg: "{{ lookup('community.general.hashi_vault', 'secret=secret/hello:value token=c975b780-d1be-8016-866b-01d0f9b688a5 url=http://myvault:8200') }}"
+
+- name: Return all secrets from a path
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.hashi_vault', 'secret=secret/hello token=c975b780-d1be-8016-866b-01d0f9b688a5 url=http://myvault:8200') }}"
+
+- name: Vault that requires authentication via LDAP
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.hashi_vault', 'secret/hello:value auth_method=ldap mount_point=ldap username=myuser password=mypas') }}"
+
+- name: Vault that requires authentication via username and password
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.hashi_vault', 'secret=secret/hello:value auth_method=userpass username=myuser password=psw url=http://myvault:8200') }}"
+
+- name: Connect to Vault using TLS
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.hashi_vault', 'secret=secret/hola:value token=c975b780-d1be-8016-866b-01d0f9b688a5 validate_certs=False') }}"
+
+- name: using certificate auth
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.hashi_vault', 'secret/hi:value token=xxxx url=https://myvault:8200 validate_certs=True cacert=/cacert/path/ca.pem') }}"
+
+- name: Authenticate with a Vault app role
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.hashi_vault', 'secret=secret/hello:value auth_method=approle role_id=myroleid secret_id=mysecretid') }}"
+
+- name: Return all secrets from a path in a namespace
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.hashi_vault', 'secret=secret/hello token=c975b780-d1be-8016-866b-01d0f9b688a5 namespace=teama/admins') }}"
+
+# When using KV v2 the PATH should include "data" between the secret engine mount and path (e.g. "secret/data/:path")
+# see: https://www.vaultproject.io/api/secret/kv/kv-v2.html#read-secret-version
+- name: Return latest KV v2 secret from path
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.hashi_vault', 'secret=secret/data/hello token=my_vault_token url=http://myvault_url:8200') }}"
+
+# The following examples work in collection releases after community.general 0.2.0
+
+- name: secret= is not required if secret is first
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.hashi_vault', 'secret/data/hello token=<token> url=http://myvault_url:8200') }}"
+
+- name: options can be specified as parameters rather than put in term string
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.hashi_vault', 'secret/data/hello', token=my_token_var, url='http://myvault_url:8200') }}"
+
+# return_format (or its alias 'as') can control how secrets are returned to you
+- name: return secrets as a dict (default)
+ ansible.builtin.set_fact:
+ my_secrets: "{{ lookup('community.general.hashi_vault', 'secret/data/manysecrets', token=my_token_var, url='http://myvault_url:8200') }}"
+- ansible.builtin.debug:
+ msg: "{{ my_secrets['secret_key'] }}"
+- ansible.builtin.debug:
+ msg: "Secret '{{ item.key }}' has value '{{ item.value }}'"
+ loop: "{{ my_secrets | dict2items }}"
+
+- name: return secrets as values only
+ ansible.builtin.debug:
+ msg: "A secret value: {{ item }}"
+ loop: "{{ query('community.general.hashi_vault', 'secret/data/manysecrets', token=my_token_var, url='http://myvault_url:8200', return_format='values') }}"
+
+- name: return raw secret from API, including metadata
+ ansible.builtin.set_fact:
+ my_secret: "{{ lookup('community.general.hashi_vault', 'secret/data/hello:value', token=my_token_var, url='http://myvault_url:8200', as='raw') }}"
+- ansible.builtin.debug:
+ msg: "This is version {{ my_secret['metadata']['version'] }} of hello:value. The secret data is {{ my_secret['data']['data']['value'] }}"
+
+# AWS IAM authentication method
+# uses Ansible standard AWS options
+
+- name: authenticate with aws_iam_login
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.hashi_vault', 'secret/hello:value', auth_method='aws_iam_login', role_id='myroleid', profile=my_boto_profile) }}"
+
+# The following examples work in collection releases after community.general 1.3.0
+
+- name: Authenticate with a JWT
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.hashi_vault', 'secret/hello:value', auth_method='jwt', role_id='myroleid', jwt='myjwt', url='https://myvault:8200')}}"
+"""
+
+RETURN = """
+_raw:
+ description:
+ - secrets(s) requested
+ type: list
+ elements: dict
+"""
+
+import os
+
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+from ansible.utils.display import Display
+from ansible.module_utils.parsing.convert_bool import boolean
+
+HAS_HVAC = False
+try:
+ import hvac
+ HAS_HVAC = True
+except ImportError:
+ HAS_HVAC = False
+
+HAS_BOTOCORE = False
+try:
+ # import boto3
+ import botocore
+ HAS_BOTOCORE = True
+except ImportError:
+ HAS_BOTOCORE = False
+
+HAS_BOTO3 = False
+try:
+ import boto3
+ # import botocore
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+
+class HashiVault:
+ def get_options(self, *option_names, **kwargs):
+ ret = {}
+ include_falsey = kwargs.get('include_falsey', False)
+ for option in option_names:
+ val = self.options.get(option)
+ if val or include_falsey:
+ ret[option] = val
+ return ret
+
+ def __init__(self, **kwargs):
+ self.options = kwargs
+
+ # check early that auth method is actually available
+ self.auth_function = 'auth_' + self.options['auth_method']
+ if not (hasattr(self, self.auth_function) and callable(getattr(self, self.auth_function))):
+ raise AnsibleError(
+ "Authentication method '%s' is not implemented. ('%s' member function not found)" % (self.options['auth_method'], self.auth_function)
+ )
+
+ client_args = {
+ 'url': self.options['url'],
+ 'verify': self.options['ca_cert']
+ }
+
+ if self.options.get('namespace'):
+ client_args['namespace'] = self.options['namespace']
+
+ # this is the only auth_method-specific thing here, because if we're using a token, we need it now
+ if self.options['auth_method'] == 'token':
+ client_args['token'] = self.options.get('token')
+
+ self.client = hvac.Client(**client_args)
+
+ # Check for old version, before auth_methods class (added in 0.7.0):
+ # https://github.com/hvac/hvac/releases/tag/v0.7.0
+ #
+ # hvac is moving auth methods into the auth_methods class
+ # which lives in the client.auth member.
+ #
+ # Attempting to find which backends were moved into the class when (this is primarily for warnings):
+ # 0.7.0 -- github, ldap, mfa, azure?, gcp
+ # 0.7.1 -- okta
+ # 0.8.0 -- kubernetes
+ # 0.9.0 -- azure?, radius
+ # 0.9.3 -- aws
+ # 0.9.6 -- userpass
+ self.hvac_has_auth_methods = hasattr(self.client, 'auth')
+
+ # We've already checked to ensure a method exists for a particular auth_method, of the form:
+ #
+ # auth_<method_name>
+ #
+ def authenticate(self):
+ getattr(self, self.auth_function)()
+
+ def get(self):
+ '''gets a secret. should always return a list'''
+ secret = self.options['secret']
+ field = self.options['secret_field']
+ return_as = self.options['return_format']
+
+ try:
+ data = self.client.read(secret)
+ except hvac.exceptions.Forbidden:
+ raise AnsibleError("Forbidden: Permission Denied to secret '%s'." % secret)
+
+ if data is None:
+ raise AnsibleError("The secret '%s' doesn't seem to exist." % secret)
+
+ if return_as == 'raw':
+ return [data]
+
+ # Check response for KV v2 fields and flatten nested secret data.
+ # https://vaultproject.io/api/secret/kv/kv-v2.html#sample-response-1
+ try:
+ # sentinel field checks
+ check_dd = data['data']['data']
+ check_md = data['data']['metadata']
+ # unwrap nested data
+ data = data['data']
+ except KeyError:
+ pass
+
+ if return_as == 'values':
+ return list(data['data'].values())
+
+ # everything after here implements return_as == 'dict'
+ if not field:
+ return [data['data']]
+
+ if field not in data['data']:
+ raise AnsibleError("The secret %s does not contain the field '%s'. for hashi_vault lookup" % (secret, field))
+
+ return [data['data'][field]]
+
+ # begin auth implementation methods
+ #
+ # To add new backends, 3 things should be added:
+ #
+ # 1. Add a new validate_auth_<method_name> method to the LookupModule, which is responsible for validating
+ # that it has the necessary options and whatever else it needs.
+ #
+ # 2. Add a new auth_<method_name> method to this class. These implementations are faily minimal as they should
+ # already have everything they need. This is also the place to check for deprecated auth methods as hvac
+ # continues to move backends into the auth_methods class.
+ #
+ # 3. Update the avail_auth_methods list in the LookupModules auth_methods() method (for now this is static).
+ #
+ def auth_token(self):
+ if not self.client.is_authenticated():
+ raise AnsibleError("Invalid Hashicorp Vault Token Specified for hashi_vault lookup.")
+
+ def auth_userpass(self):
+ params = self.get_options('username', 'password', 'mount_point')
+ if self.hvac_has_auth_methods and hasattr(self.client.auth.userpass, 'login'):
+ self.client.auth.userpass.login(**params)
+ else:
+ Display().warning("HVAC should be updated to version 0.9.6 or higher. Deprecated method 'auth_userpass' will be used.")
+ self.client.auth_userpass(**params)
+
+ def auth_ldap(self):
+ params = self.get_options('username', 'password', 'mount_point')
+# not hasattr(self.client, 'auth')
+ if self.hvac_has_auth_methods and hasattr(self.client.auth.ldap, 'login'):
+ self.client.auth.ldap.login(**params)
+ else:
+ Display().warning("HVAC should be updated to version 0.7.0 or higher. Deprecated method 'auth_ldap' will be used.")
+ self.client.auth_ldap(**params)
+
+ def auth_approle(self):
+ params = self.get_options('role_id', 'secret_id', 'mount_point')
+ self.client.auth_approle(**params)
+
+ def auth_aws_iam_login(self):
+ params = self.options['iam_login_credentials']
+ if self.hvac_has_auth_methods and hasattr(self.client.auth.aws, 'iam_login'):
+ self.client.auth.aws.iam_login(**params)
+ else:
+ Display().warning("HVAC should be updated to version 0.9.3 or higher. Deprecated method 'auth_aws_iam' will be used.")
+ self.client.auth_aws_iam(**params)
+
+ def auth_jwt(self):
+ params = self.get_options('role_id', 'jwt', 'mount_point')
+ params['role'] = params.pop('role_id')
+ if self.hvac_has_auth_methods and hasattr(self.client.auth, 'jwt') and hasattr(self.client.auth.jwt, 'jwt_login'):
+ response = self.client.auth.jwt.jwt_login(**params)
+ # must manually set the client token with JWT login
+ # see https://github.com/hvac/hvac/issues/644
+ self.client.token = response['auth']['client_token']
+ else:
+ raise AnsibleError("JWT authentication requires HVAC version 0.10.5 or higher.")
+
+ # end auth implementation methods
+
+
+class LookupModule(LookupBase):
+ def run(self, terms, variables=None, **kwargs):
+ if not HAS_HVAC:
+ raise AnsibleError("Please pip install hvac to use the hashi_vault lookup module.")
+
+ ret = []
+
+ for term in terms:
+ opts = kwargs.copy()
+ opts.update(self.parse_term(term))
+ self.set_options(direct=opts)
+ self.process_options()
+ # FUTURE: Create one object, authenticate once, and re-use it,
+ # for gets, for better use during with_ loops.
+ client = HashiVault(**self._options)
+ client.authenticate()
+ ret.extend(client.get())
+
+ return ret
+
+ def parse_term(self, term):
+ '''parses a term string into options'''
+ param_dict = {}
+
+ for i, param in enumerate(term.split()):
+ try:
+ key, value = param.split('=', 1)
+ except ValueError:
+ if (i == 0):
+ # allow secret to be specified as value only if it's first
+ key = 'secret'
+ value = param
+ else:
+ raise AnsibleError("hashi_vault lookup plugin needs key=value pairs, but received %s" % term)
+ param_dict[key] = value
+ return param_dict
+
+ def process_options(self):
+ '''performs deep validation and value loading for options'''
+
+ # ca_cert to verify
+ self.boolean_or_cacert()
+
+ # auth methods
+ self.auth_methods()
+
+ # secret field splitter
+ self.field_ops()
+
+ # begin options processing methods
+
+ def boolean_or_cacert(self):
+ # This is needed because of this (https://hvac.readthedocs.io/en/stable/source/hvac_v1.html):
+ #
+ # # verify (Union[bool,str]) - Either a boolean to indicate whether TLS verification should
+ # # be performed when sending requests to Vault, or a string pointing at the CA bundle to use for verification.
+ #
+ '''' return a bool or cacert '''
+ ca_cert = self.get_option('ca_cert')
+
+ validate_certs = self.get_option('validate_certs')
+
+ if validate_certs is None:
+ # Validate certs option was not explicitly set
+
+ # Check if VAULT_SKIP_VERIFY is set
+ vault_skip_verify = os.environ.get('VAULT_SKIP_VERIFY')
+
+ if vault_skip_verify is not None:
+ # VAULT_SKIP_VERIFY is set
+ try:
+ # Check that we have a boolean value
+ vault_skip_verify = boolean(vault_skip_verify)
+ # Use the inverse of VAULT_SKIP_VERIFY
+ validate_certs = not vault_skip_verify
+ except TypeError:
+ # Not a boolean value fallback to default value (True)
+ validate_certs = True
+ else:
+ validate_certs = True
+
+ if not (validate_certs and ca_cert):
+ self.set_option('ca_cert', validate_certs)
+
+ def field_ops(self):
+ # split secret and field
+ secret = self.get_option('secret')
+
+ s_f = secret.rsplit(':', 1)
+ self.set_option('secret', s_f[0])
+ if len(s_f) >= 2:
+ field = s_f[1]
+ else:
+ field = None
+ self.set_option('secret_field', field)
+
+ def auth_methods(self):
+ # enforce and set the list of available auth methods
+ # TODO: can this be read from the choices: field in documentation?
+ avail_auth_methods = ['token', 'approle', 'userpass', 'ldap', 'aws_iam_login', 'jwt']
+ self.set_option('avail_auth_methods', avail_auth_methods)
+ auth_method = self.get_option('auth_method')
+
+ if auth_method not in avail_auth_methods:
+ raise AnsibleError(
+ "Authentication method '%s' not supported. Available options are %r" % (auth_method, avail_auth_methods)
+ )
+
+ # run validator if available
+ auth_validator = 'validate_auth_' + auth_method
+ if hasattr(self, auth_validator) and callable(getattr(self, auth_validator)):
+ getattr(self, auth_validator)(auth_method)
+
+ # end options processing methods
+
+ # begin auth method validators
+
+ def validate_by_required_fields(self, auth_method, *field_names):
+ missing = [field for field in field_names if not self.get_option(field)]
+
+ if missing:
+ raise AnsibleError("Authentication method %s requires options %r to be set, but these are missing: %r" % (auth_method, field_names, missing))
+
+ def validate_auth_userpass(self, auth_method):
+ self.validate_by_required_fields(auth_method, 'username', 'password')
+
+ def validate_auth_ldap(self, auth_method):
+ self.validate_by_required_fields(auth_method, 'username', 'password')
+
+ def validate_auth_approle(self, auth_method):
+ self.validate_by_required_fields(auth_method, 'role_id')
+
+ def validate_auth_token(self, auth_method):
+ if auth_method == 'token':
+ if not self.get_option('token_path'):
+ # generally we want env vars defined in the spec, but in this case we want
+ # the env var HOME to have lower precedence than any other value source,
+ # including ini, so we're doing it here after all other processing has taken place
+ self.set_option('token_path', os.environ.get('HOME'))
+ if not self.get_option('token') and self.get_option('token_path'):
+ token_filename = os.path.join(
+ self.get_option('token_path'),
+ self.get_option('token_file')
+ )
+ if os.path.exists(token_filename):
+ with open(token_filename) as token_file:
+ self.set_option('token', token_file.read().strip())
+
+ if not self.get_option('token'):
+ raise AnsibleError("No Vault Token specified or discovered.")
+
+ def validate_auth_aws_iam_login(self, auth_method):
+ params = {
+ 'access_key': self.get_option('aws_access_key'),
+ 'secret_key': self.get_option('aws_secret_key')
+ }
+
+ if self.get_option('role_id'):
+ params['role'] = self.get_option('role_id')
+
+ if self.get_option('region'):
+ params['region'] = self.get_option('region')
+
+ if not (params['access_key'] and params['secret_key']):
+ profile = self.get_option('aws_profile')
+ if profile:
+ # try to load boto profile
+ if not HAS_BOTO3:
+ raise AnsibleError("boto3 is required for loading a boto profile.")
+ session_credentials = boto3.session.Session(profile_name=profile).get_credentials()
+ else:
+ # try to load from IAM credentials
+ if not HAS_BOTOCORE:
+ raise AnsibleError("botocore is required for loading IAM role credentials.")
+ session_credentials = botocore.session.get_session().get_credentials()
+
+ if not session_credentials:
+ raise AnsibleError("No AWS credentials supplied or available.")
+
+ params['access_key'] = session_credentials.access_key
+ params['secret_key'] = session_credentials.secret_key
+ if session_credentials.token:
+ params['session_token'] = session_credentials.token
+
+ self.set_option('iam_login_credentials', params)
+
+ def validate_auth_jwt(self, auth_method):
+ self.validate_by_required_fields(auth_method, 'role_id', 'jwt')
+
+ # end auth method validators
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/hiera.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/hiera.py
new file mode 100644
index 00000000..09b7c0a1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/hiera.py
@@ -0,0 +1,90 @@
+# (c) 2017, Juan Manuel Parrilla <jparrill@redhat.com>
+# (c) 2012-17 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author:
+ - Juan Manuel Parrilla (@jparrill)
+ lookup: hiera
+ short_description: get info from hiera data
+ requirements:
+ - hiera (command line utility)
+ description:
+ - Retrieves data from an Puppetmaster node using Hiera as ENC
+ options:
+ _hiera_key:
+ description:
+ - The list of keys to lookup on the Puppetmaster
+ type: list
+ elements: string
+ required: True
+ _bin_file:
+ description:
+ - Binary file to execute Hiera
+ default: '/usr/bin/hiera'
+ env:
+ - name: ANSIBLE_HIERA_BIN
+ _hierarchy_file:
+ description:
+ - File that describes the hierarchy of Hiera
+ default: '/etc/hiera.yaml'
+ env:
+ - name: ANSIBLE_HIERA_CFG
+# FIXME: incomplete options .. _terms? environment/fqdn?
+'''
+
+EXAMPLES = """
+# All this examples depends on hiera.yml that describes the hierarchy
+
+- name: "a value from Hiera 'DB'"
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.hiera', 'foo') }}"
+
+- name: "a value from a Hiera 'DB' on other environment"
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.hiera', 'foo environment=production') }}"
+
+- name: "a value from a Hiera 'DB' for a concrete node"
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.hiera', 'foo fqdn=puppet01.localdomain') }}"
+"""
+
+RETURN = """
+ _raw:
+ description:
+ - a value associated with input key
+ type: list
+ elements: str
+"""
+
+import os
+
+from ansible.plugins.lookup import LookupBase
+from ansible.utils.cmd_functions import run_cmd
+
+ANSIBLE_HIERA_CFG = os.getenv('ANSIBLE_HIERA_CFG', '/etc/hiera.yaml')
+ANSIBLE_HIERA_BIN = os.getenv('ANSIBLE_HIERA_BIN', '/usr/bin/hiera')
+
+
+class Hiera(object):
+ def get(self, hiera_key):
+ pargs = [ANSIBLE_HIERA_BIN]
+ pargs.extend(['-c', ANSIBLE_HIERA_CFG])
+
+ pargs.extend(hiera_key)
+
+ rc, output, err = run_cmd("{0} -c {1} {2}".format(
+ ANSIBLE_HIERA_BIN, ANSIBLE_HIERA_CFG, hiera_key[0]))
+
+ return output.strip()
+
+
+class LookupModule(LookupBase):
+ def run(self, terms, variables=''):
+ hiera = Hiera()
+ ret = []
+
+ ret.append(hiera.get(terms))
+ return ret
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/keyring.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/keyring.py
new file mode 100644
index 00000000..0472dfbb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/keyring.py
@@ -0,0 +1,67 @@
+# (c) 2016, Samuel Boucher <boucher.samuel.c@gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ lookup: keyring
+ author:
+ - Samuel Boucher (!UNKNOWN) <boucher.samuel.c@gmail.com>
+ requirements:
+ - keyring (python library)
+ short_description: grab secrets from the OS keyring
+ description:
+ - Allows you to access data stored in the OS provided keyring/keychain.
+'''
+
+EXAMPLES = """
+- name : output secrets to screen (BAD IDEA)
+ ansible.builtin.debug:
+ msg: "Password: {{item}}"
+ with_community.general.keyring:
+ - 'servicename username'
+
+- name: access mysql with password from keyring
+ mysql_db: login_password={{lookup('community.general.keyring','mysql joe')}} login_user=joe
+"""
+
+RETURN = """
+ _raw:
+ description: Secrets stored.
+ type: list
+ elements: str
+"""
+
+HAS_KEYRING = True
+
+from ansible.errors import AnsibleError
+from ansible.utils.display import Display
+
+try:
+ import keyring
+except ImportError:
+ HAS_KEYRING = False
+
+from ansible.plugins.lookup import LookupBase
+
+display = Display()
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, **kwargs):
+ if not HAS_KEYRING:
+ raise AnsibleError(u"Can't LOOKUP(keyring): missing required python library 'keyring'")
+
+ display.vvvv(u"keyring: %s" % keyring.get_keyring())
+ ret = []
+ for term in terms:
+ (servicename, username) = (term.split()[0], term.split()[1])
+ display.vvvv(u"username: %s, servicename: %s " % (username, servicename))
+ password = keyring.get_password(servicename, username)
+ if password is None:
+ raise AnsibleError(u"servicename: %s for user %s not found" % (servicename, username))
+ ret.append(password.rstrip())
+ return ret
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/lastpass.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/lastpass.py
new file mode 100644
index 00000000..43e99986
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/lastpass.py
@@ -0,0 +1,99 @@
+# (c) 2016, Andrew Zenk <azenk@umn.edu>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ lookup: lastpass
+ author:
+ - Andrew Zenk (!UNKNOWN) <azenk@umn.edu>
+ requirements:
+ - lpass (command line utility)
+ - must have already logged into lastpass
+ short_description: fetch data from lastpass
+ description:
+ - use the lpass command line utility to fetch specific fields from lastpass
+ options:
+ _terms:
+ description: key from which you want to retrieve the field
+ required: True
+ field:
+ description: field to return from lastpass
+ default: 'password'
+'''
+
+EXAMPLES = """
+- name: get 'custom_field' from lastpass entry 'entry-name'
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.lastpass', 'entry-name', field='custom_field') }}"
+"""
+
+RETURN = """
+ _raw:
+ description: secrets stored
+ type: list
+ elements: str
+"""
+
+from subprocess import Popen, PIPE
+
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_bytes, to_text
+from ansible.plugins.lookup import LookupBase
+
+
+class LPassException(AnsibleError):
+ pass
+
+
+class LPass(object):
+
+ def __init__(self, path='lpass'):
+ self._cli_path = path
+
+ @property
+ def cli_path(self):
+ return self._cli_path
+
+ @property
+ def logged_in(self):
+ out, err = self._run(self._build_args("logout"), stdin="n\n", expected_rc=1)
+ return err.startswith("Are you sure you would like to log out?")
+
+ def _run(self, args, stdin=None, expected_rc=0):
+ p = Popen([self.cli_path] + args, stdout=PIPE, stderr=PIPE, stdin=PIPE)
+ out, err = p.communicate(to_bytes(stdin))
+ rc = p.wait()
+ if rc != expected_rc:
+ raise LPassException(err)
+ return to_text(out, errors='surrogate_or_strict'), to_text(err, errors='surrogate_or_strict')
+
+ def _build_args(self, command, args=None):
+ if args is None:
+ args = []
+ args = [command] + args
+ args += ["--color=never"]
+ return args
+
+ def get_field(self, key, field):
+ if field in ['username', 'password', 'url', 'notes', 'id', 'name']:
+ out, err = self._run(self._build_args("show", ["--{0}".format(field), key]))
+ else:
+ out, err = self._run(self._build_args("show", ["--field={0}".format(field), key]))
+ return out.strip()
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+ lp = LPass()
+
+ if not lp.logged_in:
+ raise AnsibleError("Not logged into lastpass: please run 'lpass login' first")
+
+ field = kwargs.get('field', 'password')
+ values = []
+ for term in terms:
+ values.append(lp.get_field(term, field))
+ return values
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/lmdb_kv.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/lmdb_kv.py
new file mode 100644
index 00000000..3764a43c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/lmdb_kv.py
@@ -0,0 +1,120 @@
+# (c) 2017-2018, Jan-Piet Mens <jpmens(at)gmail.com>
+# (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ lookup: lmdb_kv
+ author:
+ - Jan-Piet Mens (@jpmens)
+ version_added: '0.2.0'
+ short_description: fetch data from LMDB
+ description:
+ - This lookup returns a list of results from an LMDB DB corresponding to a list of items given to it
+ requirements:
+ - lmdb (python library https://lmdb.readthedocs.io/en/release/)
+ options:
+ _terms:
+ description: list of keys to query
+ db:
+ description: path to LMDB database
+ default: 'ansible.mdb'
+'''
+
+EXAMPLES = """
+- name: query LMDB for a list of country codes
+ ansible.builtin.debug:
+ msg: "{{ query('community.general.lmdb_kv', 'nl', 'be', 'lu', db='jp.mdb') }}"
+
+- name: use list of values in a loop by key wildcard
+ ansible.builtin.debug:
+ msg: "Hello from {{ item.0 }} a.k.a. {{ item.1 }}"
+ vars:
+ - lmdb_kv_db: jp.mdb
+ with_community.general.lmdb_kv:
+ - "n*"
+
+- name: get an item by key
+ ansible.builtin.assert:
+ that:
+ - item == 'Belgium'
+ vars:
+ - lmdb_kv_db: jp.mdb
+ with_community.general.lmdb_kv:
+ - be
+"""
+
+RETURN = """
+_raw:
+ description: value(s) stored in LMDB
+ type: list
+ elements: raw
+"""
+
+
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+from ansible.module_utils._text import to_native, to_text
+HAVE_LMDB = True
+try:
+ import lmdb
+except ImportError:
+ HAVE_LMDB = False
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables, **kwargs):
+
+ '''
+ terms contain any number of keys to be retrieved.
+ If terms is None, all keys from the database are returned
+ with their values, and if term ends in an asterisk, we
+ start searching there
+
+ The LMDB database defaults to 'ansible.mdb' if Ansible's
+ variable 'lmdb_kv_db' is not set:
+
+ vars:
+ - lmdb_kv_db: "jp.mdb"
+ '''
+
+ if HAVE_LMDB is False:
+ raise AnsibleError("Can't LOOKUP(lmdb_kv): this module requires lmdb to be installed")
+
+ db = variables.get('lmdb_kv_db', None)
+ if db is None:
+ db = kwargs.get('db', 'ansible.mdb')
+ db = str(db)
+
+ try:
+ env = lmdb.open(db, readonly=True)
+ except Exception as e:
+ raise AnsibleError("LMDB can't open database %s: %s" % (db, to_native(e)))
+
+ ret = []
+ if len(terms) == 0:
+ with env.begin() as txn:
+ cursor = txn.cursor()
+ cursor.first()
+ for key, value in cursor:
+ ret.append((to_text(key), to_native(value)))
+
+ else:
+ for term in terms:
+ with env.begin() as txn:
+ if term.endswith('*'):
+ cursor = txn.cursor()
+ prefix = term[:-1] # strip asterisk
+ cursor.set_range(to_text(term).encode())
+ while cursor.key().startswith(to_text(prefix).encode()):
+ for key, value in cursor:
+ ret.append((to_text(key), to_native(value)))
+ cursor.next()
+ else:
+ value = txn.get(to_text(term).encode())
+ if value is not None:
+ ret.append(to_native(value))
+
+ return ret
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/manifold.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/manifold.py
new file mode 100644
index 00000000..f7f843a5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/manifold.py
@@ -0,0 +1,278 @@
+# (c) 2018, Arigato Machine Inc.
+# (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author:
+ - Kyrylo Galanov (!UNKNOWN) <galanoff@gmail.com>
+ lookup: manifold
+ short_description: get credentials from Manifold.co
+ description:
+ - Retrieves resources' credentials from Manifold.co
+ options:
+ _terms:
+ description:
+ - Optional list of resource labels to lookup on Manifold.co. If no resources are specified, all
+ matched resources will be returned.
+ type: list
+ elements: string
+ required: False
+ api_token:
+ description:
+ - manifold API token
+ type: string
+ required: True
+ env:
+ - name: MANIFOLD_API_TOKEN
+ project:
+ description:
+ - The project label you want to get the resource for.
+ type: string
+ required: False
+ team:
+ description:
+ - The team label you want to get the resource for.
+ type: string
+ required: False
+'''
+
+EXAMPLES = '''
+ - name: all available resources
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.manifold', api_token='SecretToken') }}"
+ - name: all available resources for a specific project in specific team
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.manifold', api_token='SecretToken', project='poject-1', team='team-2') }}"
+ - name: two specific resources
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.manifold', 'resource-1', 'resource-2') }}"
+'''
+
+RETURN = '''
+ _raw:
+ description:
+ - dictionary of credentials ready to be consumed as environment variables. If multiple resources define
+ the same environment variable(s), the last one returned by the Manifold API will take precedence.
+ type: dict
+'''
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
+from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils import six
+from ansible.utils.display import Display
+from traceback import format_exception
+import json
+import sys
+import os
+
+display = Display()
+
+
+class ApiError(Exception):
+ pass
+
+
+class ManifoldApiClient(object):
+ base_url = 'https://api.{api}.manifold.co/v1/{endpoint}'
+ http_agent = 'python-manifold-ansible-1.0.0'
+
+ def __init__(self, token):
+ self._token = token
+
+ def request(self, api, endpoint, *args, **kwargs):
+ """
+ Send a request to API backend and pre-process a response.
+ :param api: API to send a request to
+ :type api: str
+ :param endpoint: API endpoint to fetch data from
+ :type endpoint: str
+ :param args: other args for open_url
+ :param kwargs: other kwargs for open_url
+ :return: server response. JSON response is automatically deserialized.
+ :rtype: dict | list | str
+ """
+
+ default_headers = {
+ 'Authorization': "Bearer {0}".format(self._token),
+ 'Accept': "*/*" # Otherwise server doesn't set content-type header
+ }
+
+ url = self.base_url.format(api=api, endpoint=endpoint)
+
+ headers = default_headers
+ arg_headers = kwargs.pop('headers', None)
+ if arg_headers:
+ headers.update(arg_headers)
+
+ try:
+ display.vvvv('manifold lookup connecting to {0}'.format(url))
+ response = open_url(url, headers=headers, http_agent=self.http_agent, *args, **kwargs)
+ data = response.read()
+ if response.headers.get('content-type') == 'application/json':
+ data = json.loads(data)
+ return data
+ except ValueError:
+ raise ApiError('JSON response can\'t be parsed while requesting {url}:\n{json}'.format(json=data, url=url))
+ except HTTPError as e:
+ raise ApiError('Server returned: {err} while requesting {url}:\n{response}'.format(
+ err=str(e), url=url, response=e.read()))
+ except URLError as e:
+ raise ApiError('Failed lookup url for {url} : {err}'.format(url=url, err=str(e)))
+ except SSLValidationError as e:
+ raise ApiError('Error validating the server\'s certificate for {url}: {err}'.format(url=url, err=str(e)))
+ except ConnectionError as e:
+ raise ApiError('Error connecting to {url}: {err}'.format(url=url, err=str(e)))
+
+ def get_resources(self, team_id=None, project_id=None, label=None):
+ """
+ Get resources list
+ :param team_id: ID of the Team to filter resources by
+ :type team_id: str
+ :param project_id: ID of the project to filter resources by
+ :type project_id: str
+ :param label: filter resources by a label, returns a list with one or zero elements
+ :type label: str
+ :return: list of resources
+ :rtype: list
+ """
+ api = 'marketplace'
+ endpoint = 'resources'
+ query_params = {}
+
+ if team_id:
+ query_params['team_id'] = team_id
+ if project_id:
+ query_params['project_id'] = project_id
+ if label:
+ query_params['label'] = label
+
+ if query_params:
+ endpoint += '?' + urlencode(query_params)
+
+ return self.request(api, endpoint)
+
+ def get_teams(self, label=None):
+ """
+ Get teams list
+ :param label: filter teams by a label, returns a list with one or zero elements
+ :type label: str
+ :return: list of teams
+ :rtype: list
+ """
+ api = 'identity'
+ endpoint = 'teams'
+ data = self.request(api, endpoint)
+ # Label filtering is not supported by API, however this function provides uniform interface
+ if label:
+ data = list(filter(lambda x: x['body']['label'] == label, data))
+ return data
+
+ def get_projects(self, label=None):
+ """
+ Get projects list
+ :param label: filter projects by a label, returns a list with one or zero elements
+ :type label: str
+ :return: list of projects
+ :rtype: list
+ """
+ api = 'marketplace'
+ endpoint = 'projects'
+ query_params = {}
+
+ if label:
+ query_params['label'] = label
+
+ if query_params:
+ endpoint += '?' + urlencode(query_params)
+
+ return self.request(api, endpoint)
+
+ def get_credentials(self, resource_id):
+ """
+ Get resource credentials
+ :param resource_id: ID of the resource to filter credentials by
+ :type resource_id: str
+ :return:
+ """
+ api = 'marketplace'
+ endpoint = 'credentials?' + urlencode({'resource_id': resource_id})
+ return self.request(api, endpoint)
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, api_token=None, project=None, team=None):
+ """
+ :param terms: a list of resources lookups to run.
+ :param variables: ansible variables active at the time of the lookup
+ :param api_token: API token
+ :param project: optional project label
+ :param team: optional team label
+ :return: a dictionary of resources credentials
+ """
+
+ if not api_token:
+ api_token = os.getenv('MANIFOLD_API_TOKEN')
+ if not api_token:
+ raise AnsibleError('API token is required. Please set api_token parameter or MANIFOLD_API_TOKEN env var')
+
+ try:
+ labels = terms
+ client = ManifoldApiClient(api_token)
+
+ if team:
+ team_data = client.get_teams(team)
+ if len(team_data) == 0:
+ raise AnsibleError("Team '{0}' does not exist".format(team))
+ team_id = team_data[0]['id']
+ else:
+ team_id = None
+
+ if project:
+ project_data = client.get_projects(project)
+ if len(project_data) == 0:
+ raise AnsibleError("Project '{0}' does not exist".format(project))
+ project_id = project_data[0]['id']
+ else:
+ project_id = None
+
+ if len(labels) == 1: # Use server-side filtering if one resource is requested
+ resources_data = client.get_resources(team_id=team_id, project_id=project_id, label=labels[0])
+ else: # Get all resources and optionally filter labels
+ resources_data = client.get_resources(team_id=team_id, project_id=project_id)
+ if labels:
+ resources_data = list(filter(lambda x: x['body']['label'] in labels, resources_data))
+
+ if labels and len(resources_data) < len(labels):
+ fetched_labels = [r['body']['label'] for r in resources_data]
+ not_found_labels = [label for label in labels if label not in fetched_labels]
+ raise AnsibleError("Resource(s) {0} do not exist".format(', '.join(not_found_labels)))
+
+ credentials = {}
+ cred_map = {}
+ for resource in resources_data:
+ resource_credentials = client.get_credentials(resource['id'])
+ if len(resource_credentials) and resource_credentials[0]['body']['values']:
+ for cred_key, cred_val in six.iteritems(resource_credentials[0]['body']['values']):
+ label = resource['body']['label']
+ if cred_key in credentials:
+ display.warning("'{cred_key}' with label '{old_label}' was replaced by resource data "
+ "with label '{new_label}'".format(cred_key=cred_key,
+ old_label=cred_map[cred_key],
+ new_label=label))
+ credentials[cred_key] = cred_val
+ cred_map[cred_key] = label
+
+ ret = [credentials]
+ return ret
+ except ApiError as e:
+ raise AnsibleError('API Error: {0}'.format(str(e)))
+ except AnsibleError as e:
+ raise e
+ except Exception:
+ exc_type, exc_value, exc_traceback = sys.exc_info()
+ raise AnsibleError(format_exception(exc_type, exc_value, exc_traceback))
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/nios.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/nios.py
new file mode 100644
index 00000000..c9a3c34b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/nios.py
@@ -0,0 +1,121 @@
+#
+# Copyright 2018 Red Hat | Ansible
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+author: Unknown (!UNKNOWN)
+lookup: nios
+short_description: Query Infoblox NIOS objects
+description:
+ - Uses the Infoblox WAPI API to fetch NIOS specified objects. This lookup
+ supports adding additional keywords to filter the return data and specify
+ the desired set of returned fields.
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ _terms:
+ description: The name of the object to return from NIOS
+ required: True
+ return_fields:
+ description: The list of field names to return for the specified object.
+ filter:
+ description: a dict object that is used to filter the return objects
+ extattrs:
+ description: a dict object that is used to filter on extattrs
+'''
+
+EXAMPLES = """
+- name: fetch all networkview objects
+ ansible.builtin.set_fact:
+ networkviews: "{{ lookup('community.general.nios', 'networkview',
+ provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
+
+- name: fetch the default dns view
+ ansible.builtin.set_fact:
+ dns_views: "{{ lookup('community.general.nios', 'view', filter={'name': 'default'},
+ provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
+
+# all of the examples below use credentials that are set using env variables
+# export INFOBLOX_HOST=nios01
+# export INFOBLOX_USERNAME=admin
+# export INFOBLOX_PASSWORD=admin
+
+- name: fetch all host records and include extended attributes
+ ansible.builtin.set_fact:
+ host_records: "{{ lookup('community.general.nios', 'record:host', return_fields=['extattrs', 'name', 'view', 'comment']}) }}"
+
+
+- name: use env variables to pass credentials
+ ansible.builtin.set_fact:
+ networkviews: "{{ lookup('community.general.nios', 'networkview') }}"
+
+- name: get a host record
+ ansible.builtin.set_fact:
+ host: "{{ lookup('community.general.nios', 'record:host', filter={'name': 'hostname.ansible.com'}) }}"
+
+- name: get the authoritative zone from a non default dns view
+ ansible.builtin.set_fact:
+ host: "{{ lookup('community.general.nios', 'zone_auth', filter={'fqdn': 'ansible.com', 'view': 'ansible-dns'}) }}"
+"""
+
+RETURN = """
+obj_type:
+ description:
+ - The object type specified in the terms argument
+ type: dictionary
+ contains:
+ obj_field:
+ description:
+ - One or more obj_type fields as specified by return_fields argument or
+ the default set of fields as per the object type
+"""
+
+from ansible.plugins.lookup import LookupBase
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiLookup
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import normalize_extattrs, flatten_extattrs
+from ansible.errors import AnsibleError
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+ try:
+ obj_type = terms[0]
+ except IndexError:
+ raise AnsibleError('the object_type must be specified')
+
+ return_fields = kwargs.pop('return_fields', None)
+ filter_data = kwargs.pop('filter', {})
+ extattrs = normalize_extattrs(kwargs.pop('extattrs', {}))
+ provider = kwargs.pop('provider', {})
+ wapi = WapiLookup(provider)
+ res = wapi.get_object(obj_type, filter_data, return_fields=return_fields, extattrs=extattrs)
+ if res is not None:
+ for obj in res:
+ if 'extattrs' in obj:
+ obj['extattrs'] = flatten_extattrs(obj['extattrs'])
+ else:
+ res = []
+ return res
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/nios_next_ip.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/nios_next_ip.py
new file mode 100644
index 00000000..20d28523
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/nios_next_ip.py
@@ -0,0 +1,100 @@
+#
+# Copyright 2018 Red Hat | Ansible
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+author: Unknown (!UNKNOWN)
+lookup: nios_next_ip
+short_description: Return the next available IP address for a network
+description:
+ - Uses the Infoblox WAPI API to return the next available IP addresses
+ for a given network CIDR
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ _terms:
+ description: The CIDR network to retrieve the next addresses from
+ required: True
+ num:
+ description: The number of IP addresses to return
+ required: false
+ default: 1
+ exclude:
+ description: List of IP's that need to be excluded from returned IP addresses
+ required: false
+'''
+
+EXAMPLES = """
+- name: return next available IP address for network 192.168.10.0/24
+ ansible.builtin.set_fact:
+ ipaddr: "{{ lookup('community.general.nios_next_ip', '192.168.10.0/24', provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
+
+- name: return the next 3 available IP addresses for network 192.168.10.0/24
+ ansible.builtin.set_fact:
+ ipaddr: "{{ lookup('community.general.nios_next_ip', '192.168.10.0/24', num=3, provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
+
+- name: return the next 3 available IP addresses for network 192.168.10.0/24 excluding ip addresses - ['192.168.10.1', '192.168.10.2']
+ ansible.builtin.set_fact:
+ ipaddr: "{{ lookup('community.general.nios_next_ip', '192.168.10.0/24', num=3, exclude=['192.168.10.1', '192.168.10.2'],
+ provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
+"""
+
+RETURN = """
+_list:
+ description:
+ - The list of next IP addresses available
+ type: list
+"""
+
+from ansible.plugins.lookup import LookupBase
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiLookup
+from ansible.module_utils._text import to_text
+from ansible.errors import AnsibleError
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+ try:
+ network = terms[0]
+ except IndexError:
+ raise AnsibleError('missing argument in the form of A.B.C.D/E')
+
+ provider = kwargs.pop('provider', {})
+ wapi = WapiLookup(provider)
+
+ network_obj = wapi.get_object('network', {'network': network})
+ if network_obj is None:
+ raise AnsibleError('unable to find network object %s' % network)
+
+ num = kwargs.get('num', 1)
+ exclude_ip = kwargs.get('exclude', [])
+
+ try:
+ ref = network_obj[0]['_ref']
+ avail_ips = wapi.call_func('next_available_ip', ref, {'num': num, 'exclude': exclude_ip})
+ return [avail_ips['ips']]
+ except Exception as exc:
+ raise AnsibleError(to_text(exc))
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/nios_next_network.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/nios_next_network.py
new file mode 100644
index 00000000..e76ff24b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/nios_next_network.py
@@ -0,0 +1,113 @@
+#
+# Copyright 2018 Red Hat | Ansible
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+author: Unknown (!UNKNOWN)
+lookup: nios_next_network
+short_description: Return the next available network range for a network-container
+description:
+ - Uses the Infoblox WAPI API to return the next available network addresses for
+ a given network CIDR
+requirements:
+ - infoblox_client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ _terms:
+ description: The CIDR network to retrieve the next network from next available network within the specified
+ container.
+ required: True
+ cidr:
+ description:
+ - The CIDR of the network to retrieve the next network from next available network within the
+ specified container. Also, Requested CIDR must be specified and greater than the parent CIDR.
+ required: True
+ default: 24
+ num:
+ description: The number of network addresses to return from network-container
+ required: false
+ default: 1
+ exclude:
+ description: Network addresses returned from network-container excluding list of user's input network range
+ required: false
+ default: ''
+'''
+
+EXAMPLES = """
+- name: return next available network for network-container 192.168.10.0/24
+ ansible.builtin.set_fact:
+ networkaddr: "{{ lookup('community.general.nios_next_network', '192.168.10.0/24', cidr=25,
+ provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
+
+- name: return the next 2 available network addresses for network-container 192.168.10.0/24
+ ansible.builtin.set_fact:
+ networkaddr: "{{ lookup('community.general.nios_next_network', '192.168.10.0/24', cidr=25, num=2,
+ provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
+
+- name: return the available network addresses for network-container 192.168.10.0/24 excluding network range '192.168.10.0/25'
+ ansible.builtin.set_fact:
+ networkaddr: "{{ lookup('community.general.nios_next_network', '192.168.10.0/24', cidr=25, exclude=['192.168.10.0/25'],
+ provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
+"""
+
+RETURN = """
+_list:
+ description:
+ - The list of next network addresses available
+ type: list
+"""
+
+from ansible.plugins.lookup import LookupBase
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiLookup
+from ansible.module_utils._text import to_text
+from ansible.errors import AnsibleError
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+ try:
+ network = terms[0]
+ except IndexError:
+ raise AnsibleError('missing network argument in the form of A.B.C.D/E')
+ try:
+ cidr = kwargs.get('cidr', 24)
+ except IndexError:
+ raise AnsibleError('missing CIDR argument in the form of xx')
+
+ provider = kwargs.pop('provider', {})
+ wapi = WapiLookup(provider)
+ network_obj = wapi.get_object('networkcontainer', {'network': network})
+
+ if network_obj is None:
+ raise AnsibleError('unable to find network-container object %s' % network)
+ num = kwargs.get('num', 1)
+ exclude_ip = kwargs.get('exclude', [])
+
+ try:
+ ref = network_obj[0]['_ref']
+ avail_nets = wapi.call_func('next_available_network', ref, {'cidr': cidr, 'num': num, 'exclude': exclude_ip})
+ return [avail_nets['networks']]
+ except Exception as exc:
+ raise AnsibleError(to_text(exc))
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/onepassword.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/onepassword.py
new file mode 100644
index 00000000..995037a1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/onepassword.py
@@ -0,0 +1,277 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Scott Buchanan <sbuchanan@ri.pn>
+# Copyright: (c) 2016, Andrew Zenk <azenk@umn.edu> (lastpass.py used as starting point)
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ lookup: onepassword
+ author:
+ - Scott Buchanan (@scottsb)
+ - Andrew Zenk (@azenk)
+ - Sam Doran (@samdoran)
+ requirements:
+ - C(op) 1Password command line utility. See U(https://support.1password.com/command-line/)
+ short_description: fetch field values from 1Password
+ description:
+ - C(onepassword) wraps the C(op) command line utility to fetch specific field values from 1Password.
+ options:
+ _terms:
+ description: identifier(s) (UUID, name, or subdomain; case-insensitive) of item(s) to retrieve.
+ required: True
+ field:
+ description: field to return from each matching item (case-insensitive).
+ default: 'password'
+ master_password:
+ description: The password used to unlock the specified vault.
+ aliases: ['vault_password']
+ section:
+ description: Item section containing the field to retrieve (case-insensitive). If absent will return first match from any section.
+ subdomain:
+ description: The 1Password subdomain to authenticate against.
+ username:
+ description: The username used to sign in.
+ secret_key:
+ description: The secret key used when performing an initial sign in.
+ vault:
+ description: Vault containing the item to retrieve (case-insensitive). If absent will search all vaults.
+ notes:
+ - This lookup will use an existing 1Password session if one exists. If not, and you have already
+ performed an initial sign in (meaning C(~/.op/config exists)), then only the C(master_password) is required.
+ You may optionally specify C(subdomain) in this scenario, otherwise the last used subdomain will be used by C(op).
+ - This lookup can perform an initial login by providing C(subdomain), C(username), C(secret_key), and C(master_password).
+ - Due to the B(very) sensitive nature of these credentials, it is B(highly) recommended that you only pass in the minimal credentials
+ needed at any given time. Also, store these credentials in an Ansible Vault using a key that is equal to or greater in strength
+ to the 1Password master password.
+ - This lookup stores potentially sensitive data from 1Password as Ansible facts.
+ Facts are subject to caching if enabled, which means this data could be stored in clear text
+ on disk or in a database.
+ - Tested with C(op) version 0.5.3
+'''
+
+EXAMPLES = """
+# These examples only work when already signed in to 1Password
+- name: Retrieve password for KITT when already signed in to 1Password
+ ansible.builtin.debug:
+ var: lookup('community.general.onepassword', 'KITT')
+
+- name: Retrieve password for Wintermute when already signed in to 1Password
+ ansible.builtin.debug:
+ var: lookup('community.general.onepassword', 'Tessier-Ashpool', section='Wintermute')
+
+- name: Retrieve username for HAL when already signed in to 1Password
+ ansible.builtin.debug:
+ var: lookup('community.general.onepassword', 'HAL 9000', field='username', vault='Discovery')
+
+- name: Retrieve password for HAL when not signed in to 1Password
+ ansible.builtin.debug:
+ var: lookup('community.general.onepassword'
+ 'HAL 9000'
+ subdomain='Discovery'
+ master_password=vault_master_password)
+
+- name: Retrieve password for HAL when never signed in to 1Password
+ ansible.builtin.debug:
+ var: lookup('community.general.onepassword'
+ 'HAL 9000'
+ subdomain='Discovery'
+ master_password=vault_master_password
+ username='tweety@acme.com'
+ secret_key=vault_secret_key)
+"""
+
+RETURN = """
+ _raw:
+ description: field data requested
+ type: list
+ elements: str
+"""
+
+import errno
+import json
+import os
+
+from subprocess import Popen, PIPE
+
+from ansible.plugins.lookup import LookupBase
+from ansible.errors import AnsibleLookupError
+from ansible.module_utils._text import to_bytes, to_text
+
+
+class OnePass(object):
+
+ def __init__(self, path='op'):
+ self.cli_path = path
+ self.config_file_path = os.path.expanduser('~/.op/config')
+ self.logged_in = False
+ self.token = None
+ self.subdomain = None
+ self.username = None
+ self.secret_key = None
+ self.master_password = None
+
+ def get_token(self):
+ # If the config file exists, assume an initial signin has taken place and try basic sign in
+ if os.path.isfile(self.config_file_path):
+
+ if not self.master_password:
+ raise AnsibleLookupError('Unable to sign in to 1Password. master_password is required.')
+
+ try:
+ args = ['signin', '--output=raw']
+
+ if self.subdomain:
+ args = ['signin', self.subdomain, '--output=raw']
+
+ rc, out, err = self._run(args, command_input=to_bytes(self.master_password))
+ self.token = out.strip()
+
+ except AnsibleLookupError:
+ self.full_login()
+
+ else:
+ # Attempt a full sign in since there appears to be no existing sign in
+ self.full_login()
+
+ def assert_logged_in(self):
+ try:
+ rc, out, err = self._run(['get', 'account'], ignore_errors=True)
+ if rc == 0:
+ self.logged_in = True
+ if not self.logged_in:
+ self.get_token()
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ raise AnsibleLookupError("1Password CLI tool '%s' not installed in path on control machine" % self.cli_path)
+ raise e
+
+ def get_raw(self, item_id, vault=None):
+ args = ["get", "item", item_id]
+ if vault is not None:
+ args += ['--vault={0}'.format(vault)]
+ if not self.logged_in:
+ args += [to_bytes('--session=') + self.token]
+ rc, output, dummy = self._run(args)
+ return output
+
+ def get_field(self, item_id, field, section=None, vault=None):
+ output = self.get_raw(item_id, vault)
+ return self._parse_field(output, field, section) if output != '' else ''
+
+ def full_login(self):
+ if None in [self.subdomain, self.username, self.secret_key, self.master_password]:
+ raise AnsibleLookupError('Unable to perform initial sign in to 1Password. '
+ 'subdomain, username, secret_key, and master_password are required to perform initial sign in.')
+
+ args = [
+ 'signin',
+ '{0}.1password.com'.format(self.subdomain),
+ to_bytes(self.username),
+ to_bytes(self.secret_key),
+ '--output=raw',
+ ]
+
+ rc, out, err = self._run(args, command_input=to_bytes(self.master_password))
+ self.token = out.strip()
+
+ def _run(self, args, expected_rc=0, command_input=None, ignore_errors=False):
+ command = [self.cli_path] + args
+ p = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
+ out, err = p.communicate(input=command_input)
+ rc = p.wait()
+ if not ignore_errors and rc != expected_rc:
+ raise AnsibleLookupError(to_text(err))
+ return rc, out, err
+
+ def _parse_field(self, data_json, field_name, section_title=None):
+ """
+ Retrieves the desired field from the `op` response payload
+
+ When the item is a `password` type, the password is a key within the `details` key:
+
+ $ op get item 'test item' | jq
+ {
+ [...]
+ "templateUuid": "005",
+ "details": {
+ "notesPlain": "",
+ "password": "foobar",
+ "passwordHistory": [],
+ "sections": [
+ {
+ "name": "linked items",
+ "title": "Related Items"
+ }
+ ]
+ },
+ [...]
+ }
+
+ However, when the item is a `login` type, the password is within a fields array:
+
+ $ op get item 'test item' | jq
+ {
+ [...]
+ "details": {
+ "fields": [
+ {
+ "designation": "username",
+ "name": "username",
+ "type": "T",
+ "value": "foo"
+ },
+ {
+ "designation": "password",
+ "name": "password",
+ "type": "P",
+ "value": "bar"
+ }
+ ],
+ [...]
+ },
+ [...]
+ """
+ data = json.loads(data_json)
+ if section_title is None:
+ # https://github.com/ansible-collections/community.general/pull/1610:
+ # check the details dictionary for `field_name` and return it immediately if it exists
+ # when the entry is a "password" instead of a "login" item, the password field is a key
+ # in the `details` dictionary:
+ if field_name in data['details']:
+ return data['details'][field_name]
+
+ # when the field is not found above, iterate through the fields list in the object details
+ for field_data in data['details'].get('fields', []):
+ if field_data.get('name', '').lower() == field_name.lower():
+ return field_data.get('value', '')
+ for section_data in data['details'].get('sections', []):
+ if section_title is not None and section_title.lower() != section_data['title'].lower():
+ continue
+ for field_data in section_data.get('fields', []):
+ if field_data.get('t', '').lower() == field_name.lower():
+ return field_data.get('v', '')
+ return ''
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+ op = OnePass()
+
+ field = kwargs.get('field', 'password')
+ section = kwargs.get('section')
+ vault = kwargs.get('vault')
+ op.subdomain = kwargs.get('subdomain')
+ op.username = kwargs.get('username')
+ op.secret_key = kwargs.get('secret_key')
+ op.master_password = kwargs.get('master_password', kwargs.get('vault_password'))
+
+ op.assert_logged_in()
+
+ values = []
+ for term in terms:
+ values.append(op.get_field(term, field, section, vault))
+ return values
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/onepassword_raw.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/onepassword_raw.py
new file mode 100644
index 00000000..76423a23
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/onepassword_raw.py
@@ -0,0 +1,92 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Scott Buchanan <sbuchanan@ri.pn>
+# Copyright: (c) 2016, Andrew Zenk <azenk@umn.edu> (lastpass.py used as starting point)
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ lookup: onepassword_raw
+ author:
+ - Scott Buchanan (@scottsb)
+ - Andrew Zenk (@azenk)
+ - Sam Doran (@samdoran)
+ requirements:
+ - C(op) 1Password command line utility. See U(https://support.1password.com/command-line/)
+ short_description: fetch an entire item from 1Password
+ description:
+ - C(onepassword_raw) wraps C(op) command line utility to fetch an entire item from 1Password
+ options:
+ _terms:
+ description: identifier(s) (UUID, name, or domain; case-insensitive) of item(s) to retrieve.
+ required: True
+ master_password:
+ description: The password used to unlock the specified vault.
+ aliases: ['vault_password']
+ section:
+ description: Item section containing the field to retrieve (case-insensitive). If absent will return first match from any section.
+ subdomain:
+ description: The 1Password subdomain to authenticate against.
+ username:
+ description: The username used to sign in.
+ secret_key:
+ description: The secret key used when performing an initial sign in.
+ vault:
+ description: Vault containing the item to retrieve (case-insensitive). If absent will search all vaults.
+ notes:
+ - This lookup will use an existing 1Password session if one exists. If not, and you have already
+ performed an initial sign in (meaning C(~/.op/config exists)), then only the C(master_password) is required.
+ You may optionally specify C(subdomain) in this scenario, otherwise the last used subdomain will be used by C(op).
+ - This lookup can perform an initial login by providing C(subdomain), C(username), C(secret_key), and C(master_password).
+ - Due to the B(very) sensitive nature of these credentials, it is B(highly) recommended that you only pass in the minimal credentials
+ needed at any given time. Also, store these credentials in an Ansible Vault using a key that is equal to or greater in strength
+ to the 1Password master password.
+ - This lookup stores potentially sensitive data from 1Password as Ansible facts.
+ Facts are subject to caching if enabled, which means this data could be stored in clear text
+ on disk or in a database.
+ - Tested with C(op) version 0.5.3
+'''
+
+EXAMPLES = """
+- name: Retrieve all data about Wintermute
+ ansible.builtin.debug:
+ var: lookup('community.general.onepassword_raw', 'Wintermute')
+
+- name: Retrieve all data about Wintermute when not signed in to 1Password
+ ansible.builtin.debug:
+ var: lookup('community.general.onepassword_raw', 'Wintermute', subdomain='Turing', vault_password='DmbslfLvasjdl')
+"""
+
+RETURN = """
+ _raw:
+ description: field data requested
+ type: list
+ elements: dict
+"""
+
+import json
+
+from ansible_collections.community.general.plugins.lookup.onepassword import OnePass
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+ op = OnePass()
+
+ vault = kwargs.get('vault')
+ op.subdomain = kwargs.get('subdomain')
+ op.username = kwargs.get('username')
+ op.secret_key = kwargs.get('secret_key')
+ op.master_password = kwargs.get('master_password', kwargs.get('vault_password'))
+
+ op.assert_logged_in()
+
+ values = []
+ for term in terms:
+ data = json.loads(op.get_raw(term, vault))
+ values.append(data)
+ return values
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/passwordstore.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/passwordstore.py
new file mode 100644
index 00000000..4d0f6461
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/passwordstore.py
@@ -0,0 +1,302 @@
+# (c) 2017, Patrick Deelman <patrick@patrickdeelman.nl>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+ lookup: passwordstore
+ author:
+ - Patrick Deelman (!UNKNOWN) <patrick@patrickdeelman.nl>
+ short_description: manage passwords with passwordstore.org's pass utility
+ description:
+ - Enables Ansible to retrieve, create or update passwords from the passwordstore.org pass utility.
+ It also retrieves YAML style keys stored as multilines in the passwordfile.
+ options:
+ _terms:
+ description: query key.
+ required: True
+ passwordstore:
+ description: location of the password store.
+ default: '~/.password-store'
+ directory:
+ description: The directory of the password store.
+ env:
+ - name: PASSWORD_STORE_DIR
+ create:
+ description: Create the password if it does not already exist.
+ type: bool
+ default: 'no'
+ overwrite:
+ description: Overwrite the password if it does already exist.
+ type: bool
+ default: 'no'
+ umask:
+ description:
+ - Sets the umask for the created .gpg files. The first octed must be greater than 3 (user readable).
+ - Note pass' default value is C('077').
+ env:
+ - name: PASSWORD_STORE_UMASK
+ version_added: 1.3.0
+ returnall:
+ description: Return all the content of the password, not only the first line.
+ type: bool
+ default: 'no'
+ subkey:
+ description: Return a specific subkey of the password. When set to C(password), always returns the first line.
+ default: password
+ userpass:
+ description: Specify a password to save, instead of a generated one.
+ length:
+ description: The length of the generated password.
+ type: integer
+ default: 16
+ backup:
+ description: Used with C(overwrite=yes). Backup the previous password in a subkey.
+ type: bool
+ default: 'no'
+ nosymbols:
+ description: use alphanumeric characters.
+ type: bool
+ default: 'no'
+'''
+EXAMPLES = """
+# Debug is used for examples, BAD IDEA to show passwords on screen
+- name: Basic lookup. Fails if example/test doesn't exist
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.passwordstore', 'example/test')}}"
+
+- name: Create pass with random 16 character password. If password exists just give the password
+ ansible.builtin.debug:
+ var: mypassword
+ vars:
+ mypassword: "{{ lookup('community.general.passwordstore', 'example/test create=true')}}"
+
+- name: Different size password
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.passwordstore', 'example/test create=true length=42')}}"
+
+- name: Create password and overwrite the password if it exists. As a bonus, this module includes the old password inside the pass file
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.passwordstore', 'example/test create=true overwrite=true')}}"
+
+- name: Create an alphanumeric password
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.passwordstore', 'example/test create=true nosymbols=true') }}"
+
+- name: Return the value for user in the KV pair user, username
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.passwordstore', 'example/test subkey=user')}}"
+
+- name: Return the entire password file content
+ ansible.builtin.set_fact:
+ passfilecontent: "{{ lookup('community.general.passwordstore', 'example/test returnall=true')}}"
+"""
+
+RETURN = """
+_raw:
+ description:
+ - a password
+ type: list
+ elements: str
+"""
+
+import os
+import subprocess
+import time
+
+from distutils import util
+from ansible.errors import AnsibleError, AnsibleAssertionError
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.utils.encrypt import random_password
+from ansible.plugins.lookup import LookupBase
+from ansible import constants as C
+
+
+# backhacked check_output with input for python 2.7
+# http://stackoverflow.com/questions/10103551/passing-data-to-subprocess-check-output
+def check_output2(*popenargs, **kwargs):
+ if 'stdout' in kwargs:
+ raise ValueError('stdout argument not allowed, it will be overridden.')
+ if 'stderr' in kwargs:
+ raise ValueError('stderr argument not allowed, it will be overridden.')
+ if 'input' in kwargs:
+ if 'stdin' in kwargs:
+ raise ValueError('stdin and input arguments may not both be used.')
+ b_inputdata = to_bytes(kwargs['input'], errors='surrogate_or_strict')
+ del kwargs['input']
+ kwargs['stdin'] = subprocess.PIPE
+ else:
+ b_inputdata = None
+ process = subprocess.Popen(*popenargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
+ try:
+ b_out, b_err = process.communicate(b_inputdata)
+ except Exception:
+ process.kill()
+ process.wait()
+ raise
+ retcode = process.poll()
+ if retcode != 0 or \
+ b'encryption failed: Unusable public key' in b_out or \
+ b'encryption failed: Unusable public key' in b_err:
+ cmd = kwargs.get("args")
+ if cmd is None:
+ cmd = popenargs[0]
+ raise subprocess.CalledProcessError(
+ retcode,
+ cmd,
+ to_native(b_out + b_err, errors='surrogate_or_strict')
+ )
+ return b_out
+
+
+class LookupModule(LookupBase):
+ def parse_params(self, term):
+ # I went with the "traditional" param followed with space separated KV pairs.
+ # Waiting for final implementation of lookup parameter parsing.
+ # See: https://github.com/ansible/ansible/issues/12255
+ params = term.split()
+ if len(params) > 0:
+ # the first param is the pass-name
+ self.passname = params[0]
+ # next parse the optional parameters in keyvalue pairs
+ try:
+ for param in params[1:]:
+ name, value = param.split('=', 1)
+ if name not in self.paramvals:
+ raise AnsibleAssertionError('%s not in paramvals' % name)
+ self.paramvals[name] = value
+ except (ValueError, AssertionError) as e:
+ raise AnsibleError(e)
+ # check and convert values
+ try:
+ for key in ['create', 'returnall', 'overwrite', 'backup', 'nosymbols']:
+ if not isinstance(self.paramvals[key], bool):
+ self.paramvals[key] = util.strtobool(self.paramvals[key])
+ except (ValueError, AssertionError) as e:
+ raise AnsibleError(e)
+ if not isinstance(self.paramvals['length'], int):
+ if self.paramvals['length'].isdigit():
+ self.paramvals['length'] = int(self.paramvals['length'])
+ else:
+ raise AnsibleError("{0} is not a correct value for length".format(self.paramvals['length']))
+
+ # Collect pass environment variables from the plugin's parameters.
+ self.env = os.environ.copy()
+
+ # Set PASSWORD_STORE_DIR if directory is set
+ if self.paramvals['directory']:
+ if os.path.isdir(self.paramvals['directory']):
+ self.env['PASSWORD_STORE_DIR'] = self.paramvals['directory']
+ else:
+ raise AnsibleError('Passwordstore directory \'{0}\' does not exist'.format(self.paramvals['directory']))
+
+ # Set PASSWORD_STORE_UMASK if umask is set
+ if 'umask' in self.paramvals:
+ if len(self.paramvals['umask']) != 3:
+ raise AnsibleError('Passwordstore umask must have a length of 3.')
+ elif int(self.paramvals['umask'][0]) > 3:
+ raise AnsibleError('Passwordstore umask not allowed (password not user readable).')
+ else:
+ self.env['PASSWORD_STORE_UMASK'] = self.paramvals['umask']
+
+ def check_pass(self):
+ try:
+ self.passoutput = to_text(
+ check_output2(["pass", "show", self.passname], env=self.env),
+ errors='surrogate_or_strict'
+ ).splitlines()
+ self.password = self.passoutput[0]
+ self.passdict = {}
+ for line in self.passoutput[1:]:
+ if ':' in line:
+ name, value = line.split(':', 1)
+ self.passdict[name.strip()] = value.strip()
+ except (subprocess.CalledProcessError) as e:
+ if e.returncode != 0 and 'not in the password store' in e.output:
+ # if pass returns 1 and return string contains 'is not in the password store.'
+ # We need to determine if this is valid or Error.
+ if not self.paramvals['create']:
+ raise AnsibleError('passname: {0} not found, use create=True'.format(self.passname))
+ else:
+ return False
+ else:
+ raise AnsibleError(e)
+ return True
+
+ def get_newpass(self):
+ if self.paramvals['nosymbols']:
+ chars = C.DEFAULT_PASSWORD_CHARS[:62]
+ else:
+ chars = C.DEFAULT_PASSWORD_CHARS
+
+ if self.paramvals['userpass']:
+ newpass = self.paramvals['userpass']
+ else:
+ newpass = random_password(length=self.paramvals['length'], chars=chars)
+ return newpass
+
+ def update_password(self):
+ # generate new password, insert old lines from current result and return new password
+ newpass = self.get_newpass()
+ datetime = time.strftime("%d/%m/%Y %H:%M:%S")
+ msg = newpass + '\n'
+ if self.passoutput[1:]:
+ msg += '\n'.join(self.passoutput[1:]) + '\n'
+ if self.paramvals['backup']:
+ msg += "lookup_pass: old password was {0} (Updated on {1})\n".format(self.password, datetime)
+ try:
+ check_output2(['pass', 'insert', '-f', '-m', self.passname], input=msg, env=self.env)
+ except (subprocess.CalledProcessError) as e:
+ raise AnsibleError(e)
+ return newpass
+
+ def generate_password(self):
+ # generate new file and insert lookup_pass: Generated by Ansible on {date}
+ # use pwgen to generate the password and insert values with pass -m
+ newpass = self.get_newpass()
+ datetime = time.strftime("%d/%m/%Y %H:%M:%S")
+ msg = newpass + '\n' + "lookup_pass: First generated by ansible on {0}\n".format(datetime)
+ try:
+ check_output2(['pass', 'insert', '-f', '-m', self.passname], input=msg, env=self.env)
+ except (subprocess.CalledProcessError) as e:
+ raise AnsibleError(e)
+ return newpass
+
+ def get_passresult(self):
+ if self.paramvals['returnall']:
+ return os.linesep.join(self.passoutput)
+ if self.paramvals['subkey'] == 'password':
+ return self.password
+ else:
+ if self.paramvals['subkey'] in self.passdict:
+ return self.passdict[self.paramvals['subkey']]
+ else:
+ return None
+
+ def run(self, terms, variables, **kwargs):
+ result = []
+ self.paramvals = {
+ 'subkey': 'password',
+ 'directory': variables.get('passwordstore'),
+ 'create': False,
+ 'returnall': False,
+ 'overwrite': False,
+ 'nosymbols': False,
+ 'userpass': '',
+ 'length': 16,
+ 'backup': False,
+ }
+
+ for term in terms:
+ self.parse_params(term) # parse the input into paramvals
+ if self.check_pass(): # password exists
+ if self.paramvals['overwrite'] and self.paramvals['subkey'] == 'password':
+ result.append(self.update_password())
+ else:
+ result.append(self.get_passresult())
+ else: # password does not exist
+ if self.paramvals['create']:
+ result.append(self.generate_password())
+ return result
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/redis.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/redis.py
new file mode 100644
index 00000000..67d35c22
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/redis.py
@@ -0,0 +1,117 @@
+# (c) 2012, Jan-Piet Mens <jpmens(at)gmail.com>
+# (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ lookup: redis
+ author:
+ - Jan-Piet Mens (@jpmens) <jpmens(at)gmail.com>
+ - Ansible Core Team
+ short_description: fetch data from Redis
+ description:
+ - This lookup returns a list of results from a Redis DB corresponding to a list of items given to it
+ requirements:
+ - redis (python library https://github.com/andymccurdy/redis-py/)
+ options:
+ _terms:
+ description: list of keys to query
+ host:
+ description: location of Redis host
+ default: '127.0.0.1'
+ env:
+ - name: ANSIBLE_REDIS_HOST
+ ini:
+ - section: lookup_redis
+ key: host
+ port:
+ description: port on which Redis is listening on
+ default: 6379
+ type: int
+ env:
+ - name: ANSIBLE_REDIS_PORT
+ ini:
+ - section: lookup_redis
+ key: port
+ socket:
+ description: path to socket on which to query Redis, this option overrides host and port options when set.
+ type: path
+ env:
+ - name: ANSIBLE_REDIS_SOCKET
+ ini:
+ - section: lookup_redis
+ key: socket
+'''
+
+EXAMPLES = """
+- name: query redis for somekey (default or configured settings used)
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.redis', 'somekey') }}"
+
+- name: query redis for list of keys and non-default host and port
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.redis', item, host='myredis.internal.com', port=2121) }}"
+ loop: '{{list_of_redis_keys}}'
+
+- name: use list directly
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.redis', 'key1', 'key2', 'key3') }}"
+
+- name: use list directly with a socket
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.redis', 'key1', 'key2', socket='/var/tmp/redis.sock') }}"
+
+"""
+
+RETURN = """
+_raw:
+ description: value(s) stored in Redis
+ type: list
+ elements: str
+"""
+
+import os
+
+HAVE_REDIS = False
+try:
+ import redis
+ HAVE_REDIS = True
+except ImportError:
+ pass
+
+from ansible.module_utils._text import to_text
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables, **kwargs):
+
+ if not HAVE_REDIS:
+ raise AnsibleError("Can't LOOKUP(redis_kv): module redis is not installed")
+
+ # get options
+ self.set_options(direct=kwargs)
+
+ # setup connection
+ host = self.get_option('host')
+ port = self.get_option('port')
+ socket = self.get_option('socket')
+ if socket is None:
+ conn = redis.Redis(host=host, port=port)
+ else:
+ conn = redis.Redis(unix_socket_path=socket)
+
+ ret = []
+ for term in terms:
+ try:
+ res = conn.get(term)
+ if res is None:
+ res = ""
+ ret.append(to_text(res))
+ except Exception as e:
+ # connection failed or key not found
+ raise AnsibleError('Encountered exception while fetching {0}: {1}'.format(term, e))
+ return ret
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/shelvefile.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/shelvefile.py
new file mode 100644
index 00000000..cfeb61f3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/shelvefile.py
@@ -0,0 +1,90 @@
+# (c) 2015, Alejandro Guirao <lekumberri@gmail.com>
+# (c) 2012-17 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ lookup: shelvefile
+ author: Alejandro Guirao (!UNKNOWN) <lekumberri@gmail.com>
+ short_description: read keys from Python shelve file
+ description:
+ - Read keys from Python shelve file.
+ options:
+ _terms:
+ description: sets of key value pairs of parameters
+ key:
+ description: key to query
+ required: True
+ file:
+ description: path to shelve file
+ required: True
+'''
+
+EXAMPLES = """
+- name: retrieve a string value corresponding to a key inside a Python shelve file
+ ansible.builtin.debug: msg="{{ lookup('community.general.shelvefile', 'file=path_to_some_shelve_file.db key=key_to_retrieve') }}
+"""
+
+RETURN = """
+_list:
+ description: value(s) of key(s) in shelve file(s)
+ type: list
+ elements: str
+"""
+import shelve
+
+from ansible.errors import AnsibleError, AnsibleAssertionError
+from ansible.plugins.lookup import LookupBase
+from ansible.module_utils._text import to_bytes, to_text
+
+
+class LookupModule(LookupBase):
+
+ def read_shelve(self, shelve_filename, key):
+ """
+ Read the value of "key" from a shelve file
+ """
+ d = shelve.open(to_bytes(shelve_filename))
+ res = d.get(key, None)
+ d.close()
+ return res
+
+ def run(self, terms, variables=None, **kwargs):
+
+ if not isinstance(terms, list):
+ terms = [terms]
+
+ ret = []
+
+ for term in terms:
+ paramvals = {"file": None, "key": None}
+ params = term.split()
+
+ try:
+ for param in params:
+ name, value = param.split('=')
+ if name not in paramvals:
+ raise AnsibleAssertionError('%s not in paramvals' % name)
+ paramvals[name] = value
+
+ except (ValueError, AssertionError) as e:
+ # In case "file" or "key" are not present
+ raise AnsibleError(e)
+
+ key = paramvals['key']
+
+ # Search also in the role/files directory and in the playbook directory
+ shelvefile = self.find_file_in_search_path(variables, 'files', paramvals['file'])
+
+ if shelvefile:
+ res = self.read_shelve(shelvefile, key)
+ if res is None:
+ raise AnsibleError("Key %s not found in shelve file %s" % (key, shelvefile))
+ # Convert the value read to string
+ ret.append(to_text(res))
+ break
+ else:
+ raise AnsibleError("Could not locate shelve file in lookup: %s" % paramvals['file'])
+
+ return ret
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/lookup/tss.py b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/tss.py
new file mode 100644
index 00000000..32c0460c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/lookup/tss.py
@@ -0,0 +1,139 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2020, Adam Migus <adam@migus.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+lookup: tss
+author: Adam Migus (@amigus) <adam@migus.org>
+short_description: Get secrets from Thycotic Secret Server
+version_added: 1.0.0
+description:
+ - Uses the Thycotic Secret Server Python SDK to get Secrets from Secret
+ Server using token authentication with I(username) and I(password) on
+ the REST API at I(base_url).
+requirements:
+ - python-tss-sdk - https://pypi.org/project/python-tss-sdk/
+options:
+ _terms:
+ description: The integer ID of the secret.
+ required: true
+ type: int
+ base_url:
+ description: The base URL of the server, e.g. C(https://localhost/SecretServer).
+ env:
+ - name: TSS_BASE_URL
+ ini:
+ - section: tss_lookup
+ key: base_url
+ required: true
+ username:
+ description: The username with which to request the OAuth2 Access Grant.
+ env:
+ - name: TSS_USERNAME
+ ini:
+ - section: tss_lookup
+ key: username
+ required: true
+ password:
+ description: The password associated with the supplied username.
+ env:
+ - name: TSS_PASSWORD
+ ini:
+ - section: tss_lookup
+ key: password
+ required: true
+ api_path_uri:
+ default: /api/v1
+ description: The path to append to the base URL to form a valid REST
+ API request.
+ env:
+ - name: TSS_API_PATH_URI
+ required: false
+ token_path_uri:
+ default: /oauth2/token
+ description: The path to append to the base URL to form a valid OAuth2
+ Access Grant request.
+ env:
+ - name: TSS_TOKEN_PATH_URI
+ required: false
+"""
+
+RETURN = r"""
+_list:
+ description:
+ - The JSON responses to C(GET /secrets/{id}).
+ - See U(https://updates.thycotic.net/secretserver/restapiguide/TokenAuth/#operation--secrets--id--get).
+ type: list
+ elements: dict
+"""
+
+EXAMPLES = r"""
+- hosts: localhost
+ vars:
+ secret: "{{ lookup('community.general.tss', 1) }}"
+ tasks:
+ - ansible.builtin.debug:
+ msg: >
+ the password is {{
+ (secret['items']
+ | items2dict(key_name='slug',
+ value_name='itemValue'))['password']
+ }}
+"""
+
+from ansible.errors import AnsibleError, AnsibleOptionsError
+
+sdk_is_missing = False
+
+try:
+ from thycotic.secrets.server import (
+ SecretServer,
+ SecretServerAccessError,
+ SecretServerError,
+ )
+except ImportError:
+ sdk_is_missing = True
+
+from ansible.utils.display import Display
+from ansible.plugins.lookup import LookupBase
+
+
+display = Display()
+
+
+class LookupModule(LookupBase):
+ @staticmethod
+ def Client(server_parameters):
+ return SecretServer(**server_parameters)
+
+ def run(self, terms, variables, **kwargs):
+ if sdk_is_missing:
+ raise AnsibleError("python-tss-sdk must be installed to use this plugin")
+
+ self.set_options(var_options=variables, direct=kwargs)
+
+ secret_server = LookupModule.Client(
+ {
+ "base_url": self.get_option("base_url"),
+ "username": self.get_option("username"),
+ "password": self.get_option("password"),
+ "api_path_uri": self.get_option("api_path_uri"),
+ "token_path_uri": self.get_option("token_path_uri"),
+ }
+ )
+ result = []
+
+ for term in terms:
+ display.debug("tss_lookup term: %s" % term)
+ try:
+ id = int(term)
+ display.vvv(u"Secret Server lookup of Secret with ID %d" % id)
+ result.append(secret_server.get_secret_json(id))
+ except ValueError:
+ raise AnsibleOptionsError("Secret ID must be an integer")
+ except SecretServerError as error:
+ raise AnsibleError("Secret Server lookup failure: %s" % error.message)
+ return result
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/_mount.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/_mount.py
new file mode 100644
index 00000000..62feb354
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/_mount.py
@@ -0,0 +1,90 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is based on
+# Lib/posixpath.py of cpython
+# It is licensed under the PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
+#
+# 1. This LICENSE AGREEMENT is between the Python Software Foundation
+# ("PSF"), and the Individual or Organization ("Licensee") accessing and
+# otherwise using this software ("Python") in source or binary form and
+# its associated documentation.
+#
+# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
+# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
+# analyze, test, perform and/or display publicly, prepare derivative works,
+# distribute, and otherwise use Python alone or in any derivative version,
+# provided, however, that PSF's License Agreement and PSF's notice of copyright,
+# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+# 2011, 2012, 2013, 2014, 2015 Python Software Foundation; All Rights Reserved"
+# are retained in Python alone or in any derivative version prepared by Licensee.
+#
+# 3. In the event Licensee prepares a derivative work that is based on
+# or incorporates Python or any part thereof, and wants to make
+# the derivative work available to others as provided herein, then
+# Licensee hereby agrees to include in any such work a brief summary of
+# the changes made to Python.
+#
+# 4. PSF is making Python available to Licensee on an "AS IS"
+# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
+# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
+# INFRINGE ANY THIRD PARTY RIGHTS.
+#
+# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
+# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+#
+# 6. This License Agreement will automatically terminate upon a material
+# breach of its terms and conditions.
+#
+# 7. Nothing in this License Agreement shall be deemed to create any
+# relationship of agency, partnership, or joint venture between PSF and
+# Licensee. This License Agreement does not grant permission to use PSF
+# trademarks or trade name in a trademark sense to endorse or promote
+# products or services of Licensee, or any third party.
+#
+# 8. By copying, installing or otherwise using Python, Licensee
+# agrees to be bound by the terms and conditions of this License
+# Agreement.
+
+import os
+
+
+def ismount(path):
+ """Test whether a path is a mount point
+ This is a copy of the upstream version of ismount(). Originally this was copied here as a workaround
+ until Python issue 2466 was fixed. Now it is here so this will work on older versions of Python
+ that may not have the upstream fix.
+ https://github.com/ansible/ansible-modules-core/issues/2186
+ http://bugs.python.org/issue2466
+ """
+ try:
+ s1 = os.lstat(path)
+ except (OSError, ValueError):
+ # It doesn't exist -- so not a mount point. :-)
+ return False
+ else:
+ # A symlink can never be a mount point
+ if os.path.stat.S_ISLNK(s1.st_mode):
+ return False
+
+ if isinstance(path, bytes):
+ parent = os.path.join(path, b'..')
+ else:
+ parent = os.path.join(path, '..')
+ parent = os.path.realpath(parent)
+ try:
+ s2 = os.lstat(parent)
+ except (OSError, ValueError):
+ return False
+
+ dev1 = s1.st_dev
+ dev2 = s2.st_dev
+ if dev1 != dev2:
+ return True # path/.. on a different device as path
+ ino1 = s1.st_ino
+ ino2 = s2.st_ino
+ if ino1 == ino2:
+ return True # path/.. is the same i-node as path
+ return False
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/_netapp.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/_netapp.py
new file mode 100644
index 00000000..d80506bb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/_netapp.py
@@ -0,0 +1,747 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2017, Sumit Kumar <sumit4@netapp.com>
+# Copyright (c) 2017, Michael Price <michael.price@netapp.com>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import random
+import mimetypes
+
+from pprint import pformat
+from ansible.module_utils import six
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils._text import to_native
+
+try:
+ from ansible.module_utils.ansible_release import __version__ as ansible_version
+except ImportError:
+ ansible_version = 'unknown'
+
+try:
+ from netapp_lib.api.zapi import zapi
+ HAS_NETAPP_LIB = True
+except ImportError:
+ HAS_NETAPP_LIB = False
+
+try:
+ import requests
+ HAS_REQUESTS = True
+except ImportError:
+ HAS_REQUESTS = False
+
+import ssl
+try:
+ from urlparse import urlparse, urlunparse
+except ImportError:
+ from urllib.parse import urlparse, urlunparse
+
+
+HAS_SF_SDK = False
+SF_BYTE_MAP = dict(
+ # Management GUI displays 1024 ** 3 as 1.1 GB, thus use 1000.
+ bytes=1,
+ b=1,
+ kb=1000,
+ mb=1000 ** 2,
+ gb=1000 ** 3,
+ tb=1000 ** 4,
+ pb=1000 ** 5,
+ eb=1000 ** 6,
+ zb=1000 ** 7,
+ yb=1000 ** 8
+)
+
+POW2_BYTE_MAP = dict(
+ # Here, 1 kb = 1024
+ bytes=1,
+ b=1,
+ kb=1024,
+ mb=1024 ** 2,
+ gb=1024 ** 3,
+ tb=1024 ** 4,
+ pb=1024 ** 5,
+ eb=1024 ** 6,
+ zb=1024 ** 7,
+ yb=1024 ** 8
+)
+
+try:
+ from solidfire.factory import ElementFactory
+ from solidfire.custom.models import TimeIntervalFrequency
+ from solidfire.models import Schedule, ScheduleInfo
+
+ HAS_SF_SDK = True
+except Exception:
+ HAS_SF_SDK = False
+
+
+def has_netapp_lib():
+ return HAS_NETAPP_LIB
+
+
+def has_sf_sdk():
+ return HAS_SF_SDK
+
+
+def na_ontap_host_argument_spec():
+
+ return dict(
+ hostname=dict(required=True, type='str'),
+ username=dict(required=True, type='str', aliases=['user']),
+ password=dict(required=True, type='str', aliases=['pass'], no_log=True),
+ https=dict(required=False, type='bool', default=False),
+ validate_certs=dict(required=False, type='bool', default=True),
+ http_port=dict(required=False, type='int'),
+ ontapi=dict(required=False, type='int'),
+ use_rest=dict(required=False, type='str', default='Auto', choices=['Never', 'Always', 'Auto'])
+ )
+
+
+def ontap_sf_host_argument_spec():
+
+ return dict(
+ hostname=dict(required=True, type='str'),
+ username=dict(required=True, type='str', aliases=['user']),
+ password=dict(required=True, type='str', aliases=['pass'], no_log=True)
+ )
+
+
+def aws_cvs_host_argument_spec():
+
+ return dict(
+ api_url=dict(required=True, type='str'),
+ validate_certs=dict(required=False, type='bool', default=True),
+ api_key=dict(required=True, type='str', no_log=True),
+ secret_key=dict(required=True, type='str', no_log=True)
+ )
+
+
+def create_sf_connection(module, port=None):
+ hostname = module.params['hostname']
+ username = module.params['username']
+ password = module.params['password']
+
+ if HAS_SF_SDK and hostname and username and password:
+ try:
+ return_val = ElementFactory.create(hostname, username, password, port=port)
+ return return_val
+ except Exception:
+ raise Exception("Unable to create SF connection")
+ else:
+ module.fail_json(msg="the python SolidFire SDK module is required")
+
+
+def setup_na_ontap_zapi(module, vserver=None):
+ hostname = module.params['hostname']
+ username = module.params['username']
+ password = module.params['password']
+ https = module.params['https']
+ validate_certs = module.params['validate_certs']
+ port = module.params['http_port']
+ version = module.params['ontapi']
+
+ if HAS_NETAPP_LIB:
+ # set up zapi
+ server = zapi.NaServer(hostname)
+ server.set_username(username)
+ server.set_password(password)
+ if vserver:
+ server.set_vserver(vserver)
+ if version:
+ minor = version
+ else:
+ minor = 110
+ server.set_api_version(major=1, minor=minor)
+ # default is HTTP
+ if https:
+ if port is None:
+ port = 443
+ transport_type = 'HTTPS'
+ # HACK to bypass certificate verification
+ if validate_certs is False:
+ if not os.environ.get('PYTHONHTTPSVERIFY', '') and getattr(ssl, '_create_unverified_context', None):
+ ssl._create_default_https_context = ssl._create_unverified_context
+ else:
+ if port is None:
+ port = 80
+ transport_type = 'HTTP'
+ server.set_transport_type(transport_type)
+ server.set_port(port)
+ server.set_server_type('FILER')
+ return server
+ else:
+ module.fail_json(msg="the python NetApp-Lib module is required")
+
+
+def setup_ontap_zapi(module, vserver=None):
+ hostname = module.params['hostname']
+ username = module.params['username']
+ password = module.params['password']
+
+ if HAS_NETAPP_LIB:
+ # set up zapi
+ server = zapi.NaServer(hostname)
+ server.set_username(username)
+ server.set_password(password)
+ if vserver:
+ server.set_vserver(vserver)
+ # Todo : Replace hard-coded values with configurable parameters.
+ server.set_api_version(major=1, minor=110)
+ server.set_port(80)
+ server.set_server_type('FILER')
+ server.set_transport_type('HTTP')
+ return server
+ else:
+ module.fail_json(msg="the python NetApp-Lib module is required")
+
+
+def eseries_host_argument_spec():
+ """Retrieve a base argument specification common to all NetApp E-Series modules"""
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_username=dict(type='str', required=True),
+ api_password=dict(type='str', required=True, no_log=True),
+ api_url=dict(type='str', required=True),
+ ssid=dict(type='str', required=False, default='1'),
+ validate_certs=dict(type='bool', required=False, default=True)
+ ))
+ return argument_spec
+
+
+class NetAppESeriesModule(object):
+ """Base class for all NetApp E-Series modules.
+
+ Provides a set of common methods for NetApp E-Series modules, including version checking, mode (proxy, embedded)
+ verification, http requests, secure http redirection for embedded web services, and logging setup.
+
+ Be sure to add the following lines in the module's documentation section:
+ extends_documentation_fragment:
+ - netapp.eseries
+
+ :param dict(dict) ansible_options: dictionary of ansible option definitions
+ :param str web_services_version: minimally required web services rest api version (default value: "02.00.0000.0000")
+ :param bool supports_check_mode: whether the module will support the check_mode capabilities (default=False)
+ :param list(list) mutually_exclusive: list containing list(s) of mutually exclusive options (optional)
+ :param list(list) required_if: list containing list(s) containing the option, the option value, and then
+ a list of required options. (optional)
+ :param list(list) required_one_of: list containing list(s) of options for which at least one is required. (optional)
+ :param list(list) required_together: list containing list(s) of options that are required together. (optional)
+ :param bool log_requests: controls whether to log each request (default: True)
+ """
+ DEFAULT_TIMEOUT = 60
+ DEFAULT_SECURE_PORT = "8443"
+ DEFAULT_REST_API_PATH = "devmgr/v2/"
+ DEFAULT_REST_API_ABOUT_PATH = "devmgr/utils/about"
+ DEFAULT_HEADERS = {"Content-Type": "application/json", "Accept": "application/json",
+ "netapp-client-type": "Ansible-%s" % ansible_version}
+ HTTP_AGENT = "Ansible / %s" % ansible_version
+ SIZE_UNIT_MAP = dict(bytes=1, b=1, kb=1024, mb=1024**2, gb=1024**3, tb=1024**4,
+ pb=1024**5, eb=1024**6, zb=1024**7, yb=1024**8)
+
+ def __init__(self, ansible_options, web_services_version=None, supports_check_mode=False,
+ mutually_exclusive=None, required_if=None, required_one_of=None, required_together=None,
+ log_requests=True):
+ argument_spec = eseries_host_argument_spec()
+ argument_spec.update(ansible_options)
+
+ self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=supports_check_mode,
+ mutually_exclusive=mutually_exclusive, required_if=required_if,
+ required_one_of=required_one_of, required_together=required_together)
+
+ args = self.module.params
+ self.web_services_version = web_services_version if web_services_version else "02.00.0000.0000"
+ self.ssid = args["ssid"]
+ self.url = args["api_url"]
+ self.log_requests = log_requests
+ self.creds = dict(url_username=args["api_username"],
+ url_password=args["api_password"],
+ validate_certs=args["validate_certs"])
+
+ if not self.url.endswith("/"):
+ self.url += "/"
+
+ self.is_embedded_mode = None
+ self.is_web_services_valid_cache = None
+
+ def _check_web_services_version(self):
+ """Verify proxy or embedded web services meets minimum version required for module.
+
+ The minimum required web services version is evaluated against version supplied through the web services rest
+ api. AnsibleFailJson exception will be raised when the minimum is not met or exceeded.
+
+ This helper function will update the supplied api url if secure http is not used for embedded web services
+
+ :raise AnsibleFailJson: raised when the contacted api service does not meet the minimum required version.
+ """
+ if not self.is_web_services_valid_cache:
+
+ url_parts = urlparse(self.url)
+ if not url_parts.scheme or not url_parts.netloc:
+ self.module.fail_json(msg="Failed to provide valid API URL. Example: https://192.168.1.100:8443/devmgr/v2. URL [%s]." % self.url)
+
+ if url_parts.scheme not in ["http", "https"]:
+ self.module.fail_json(msg="Protocol must be http or https. URL [%s]." % self.url)
+
+ self.url = "%s://%s/" % (url_parts.scheme, url_parts.netloc)
+ about_url = self.url + self.DEFAULT_REST_API_ABOUT_PATH
+ rc, data = request(about_url, timeout=self.DEFAULT_TIMEOUT, headers=self.DEFAULT_HEADERS, ignore_errors=True, **self.creds)
+
+ if rc != 200:
+ self.module.warn("Failed to retrieve web services about information! Retrying with secure ports. Array Id [%s]." % self.ssid)
+ self.url = "https://%s:8443/" % url_parts.netloc.split(":")[0]
+ about_url = self.url + self.DEFAULT_REST_API_ABOUT_PATH
+ try:
+ rc, data = request(about_url, timeout=self.DEFAULT_TIMEOUT, headers=self.DEFAULT_HEADERS, **self.creds)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve the webservices about information! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(error)))
+
+ major, minor, other, revision = data["version"].split(".")
+ minimum_major, minimum_minor, other, minimum_revision = self.web_services_version.split(".")
+
+ if not (major > minimum_major or
+ (major == minimum_major and minor > minimum_minor) or
+ (major == minimum_major and minor == minimum_minor and revision >= minimum_revision)):
+ self.module.fail_json(msg="Web services version does not meet minimum version required. Current version: [%s]."
+ " Version required: [%s]." % (data["version"], self.web_services_version))
+
+ self.module.log("Web services rest api version met the minimum required version.")
+ self.is_web_services_valid_cache = True
+
+ def is_embedded(self):
+ """Determine whether web services server is the embedded web services.
+
+ If web services about endpoint fails based on an URLError then the request will be attempted again using
+ secure http.
+
+ :raise AnsibleFailJson: raised when web services about endpoint failed to be contacted.
+ :return bool: whether contacted web services is running from storage array (embedded) or from a proxy.
+ """
+ self._check_web_services_version()
+
+ if self.is_embedded_mode is None:
+ about_url = self.url + self.DEFAULT_REST_API_ABOUT_PATH
+ try:
+ rc, data = request(about_url, timeout=self.DEFAULT_TIMEOUT, headers=self.DEFAULT_HEADERS, **self.creds)
+ self.is_embedded_mode = not data["runningAsProxy"]
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve the webservices about information! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(error)))
+
+ return self.is_embedded_mode
+
+ def request(self, path, data=None, method='GET', headers=None, ignore_errors=False):
+ """Issue an HTTP request to a url, retrieving an optional JSON response.
+
+ :param str path: web services rest api endpoint path (Example: storage-systems/1/graph). Note that when the
+ full url path is specified then that will be used without supplying the protocol, hostname, port and rest path.
+ :param data: data required for the request (data may be json or any python structured data)
+ :param str method: request method such as GET, POST, DELETE.
+ :param dict headers: dictionary containing request headers.
+ :param bool ignore_errors: forces the request to ignore any raised exceptions.
+ """
+ self._check_web_services_version()
+
+ if headers is None:
+ headers = self.DEFAULT_HEADERS
+
+ if not isinstance(data, str) and headers["Content-Type"] == "application/json":
+ data = json.dumps(data)
+
+ if path.startswith("/"):
+ path = path[1:]
+ request_url = self.url + self.DEFAULT_REST_API_PATH + path
+
+ if self.log_requests or True:
+ self.module.log(pformat(dict(url=request_url, data=data, method=method)))
+
+ return request(url=request_url, data=data, method=method, headers=headers, use_proxy=True, force=False, last_mod_time=None,
+ timeout=self.DEFAULT_TIMEOUT, http_agent=self.HTTP_AGENT, force_basic_auth=True, ignore_errors=ignore_errors, **self.creds)
+
+
+def create_multipart_formdata(files, fields=None, send_8kb=False):
+ """Create the data for a multipart/form request.
+
+ :param list(list) files: list of lists each containing (name, filename, path).
+ :param list(list) fields: list of lists each containing (key, value).
+ :param bool send_8kb: only sends the first 8kb of the files (default: False).
+ """
+ boundary = "---------------------------" + "".join([str(random.randint(0, 9)) for x in range(27)])
+ data_parts = list()
+ data = None
+
+ if six.PY2: # Generate payload for Python 2
+ newline = "\r\n"
+ if fields is not None:
+ for key, value in fields:
+ data_parts.extend(["--%s" % boundary,
+ 'Content-Disposition: form-data; name="%s"' % key,
+ "",
+ value])
+
+ for name, filename, path in files:
+ with open(path, "rb") as fh:
+ value = fh.read(8192) if send_8kb else fh.read()
+
+ data_parts.extend(["--%s" % boundary,
+ 'Content-Disposition: form-data; name="%s"; filename="%s"' % (name, filename),
+ "Content-Type: %s" % (mimetypes.guess_type(path)[0] or "application/octet-stream"),
+ "",
+ value])
+ data_parts.extend(["--%s--" % boundary, ""])
+ data = newline.join(data_parts)
+
+ else:
+ newline = six.b("\r\n")
+ if fields is not None:
+ for key, value in fields:
+ data_parts.extend([six.b("--%s" % boundary),
+ six.b('Content-Disposition: form-data; name="%s"' % key),
+ six.b(""),
+ six.b(value)])
+
+ for name, filename, path in files:
+ with open(path, "rb") as fh:
+ value = fh.read(8192) if send_8kb else fh.read()
+
+ data_parts.extend([six.b("--%s" % boundary),
+ six.b('Content-Disposition: form-data; name="%s"; filename="%s"' % (name, filename)),
+ six.b("Content-Type: %s" % (mimetypes.guess_type(path)[0] or "application/octet-stream")),
+ six.b(""),
+ value])
+ data_parts.extend([six.b("--%s--" % boundary), b""])
+ data = newline.join(data_parts)
+
+ headers = {
+ "Content-Type": "multipart/form-data; boundary=%s" % boundary,
+ "Content-Length": str(len(data))}
+
+ return headers, data
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ """Issue an HTTP request to a url, retrieving an optional JSON response."""
+
+ if headers is None:
+ headers = {"Content-Type": "application/json", "Accept": "application/json"}
+ headers.update({"netapp-client-type": "Ansible-%s" % ansible_version})
+
+ if not http_agent:
+ http_agent = "Ansible / %s" % ansible_version
+
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError as err:
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except Exception:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+def ems_log_event(source, server, name="Ansible", id="12345", version=ansible_version,
+ category="Information", event="setup", autosupport="false"):
+ ems_log = zapi.NaElement('ems-autosupport-log')
+ # Host name invoking the API.
+ ems_log.add_new_child("computer-name", name)
+ # ID of event. A user defined event-id, range [0..2^32-2].
+ ems_log.add_new_child("event-id", id)
+ # Name of the application invoking the API.
+ ems_log.add_new_child("event-source", source)
+ # Version of application invoking the API.
+ ems_log.add_new_child("app-version", version)
+ # Application defined category of the event.
+ ems_log.add_new_child("category", category)
+ # Description of event to log. An application defined message to log.
+ ems_log.add_new_child("event-description", event)
+ ems_log.add_new_child("log-level", "6")
+ ems_log.add_new_child("auto-support", autosupport)
+ server.invoke_successfully(ems_log, True)
+
+
+def get_cserver_zapi(server):
+ vserver_info = zapi.NaElement('vserver-get-iter')
+ query_details = zapi.NaElement.create_node_with_children('vserver-info', **{'vserver-type': 'admin'})
+ query = zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ vserver_info.add_child_elem(query)
+ result = server.invoke_successfully(vserver_info,
+ enable_tunneling=False)
+ attribute_list = result.get_child_by_name('attributes-list')
+ vserver_list = attribute_list.get_child_by_name('vserver-info')
+ return vserver_list.get_child_content('vserver-name')
+
+
+def get_cserver(connection, is_rest=False):
+ if not is_rest:
+ return get_cserver_zapi(connection)
+
+ params = {'fields': 'type'}
+ api = "private/cli/vserver"
+ json, error = connection.get(api, params)
+ if json is None or error is not None:
+ # exit if there is an error or no data
+ return None
+ vservers = json.get('records')
+ if vservers is not None:
+ for vserver in vservers:
+ if vserver['type'] == 'admin': # cluster admin
+ return vserver['vserver']
+ if len(vservers) == 1: # assume vserver admin
+ return vservers[0]['vserver']
+
+ return None
+
+
+class OntapRestAPI(object):
+ def __init__(self, module, timeout=60):
+ self.module = module
+ self.username = self.module.params['username']
+ self.password = self.module.params['password']
+ self.hostname = self.module.params['hostname']
+ self.use_rest = self.module.params['use_rest']
+ self.verify = self.module.params['validate_certs']
+ self.timeout = timeout
+ self.url = 'https://' + self.hostname + '/api/'
+ self.errors = list()
+ self.debug_logs = list()
+ self.check_required_library()
+
+ def check_required_library(self):
+ if not HAS_REQUESTS:
+ self.module.fail_json(msg=missing_required_lib('requests'))
+
+ def send_request(self, method, api, params, json=None, return_status_code=False):
+ ''' send http request and process reponse, including error conditions '''
+ url = self.url + api
+ status_code = None
+ content = None
+ json_dict = None
+ json_error = None
+ error_details = None
+
+ def get_json(response):
+ ''' extract json, and error message if present '''
+ try:
+ json = response.json()
+ except ValueError:
+ return None, None
+ error = json.get('error')
+ return json, error
+
+ try:
+ response = requests.request(method, url, verify=self.verify, auth=(self.username, self.password), params=params, timeout=self.timeout, json=json)
+ content = response.content # for debug purposes
+ status_code = response.status_code
+ # If the response was successful, no Exception will be raised
+ response.raise_for_status()
+ json_dict, json_error = get_json(response)
+ except requests.exceptions.HTTPError as err:
+ __, json_error = get_json(response)
+ if json_error is None:
+ self.log_error(status_code, 'HTTP error: %s' % err)
+ error_details = str(err)
+ # If an error was reported in the json payload, it is handled below
+ except requests.exceptions.ConnectionError as err:
+ self.log_error(status_code, 'Connection error: %s' % err)
+ error_details = str(err)
+ except Exception as err:
+ self.log_error(status_code, 'Other error: %s' % err)
+ error_details = str(err)
+ if json_error is not None:
+ self.log_error(status_code, 'Endpoint error: %d: %s' % (status_code, json_error))
+ error_details = json_error
+ self.log_debug(status_code, content)
+ if return_status_code:
+ return status_code, error_details
+ return json_dict, error_details
+
+ def get(self, api, params):
+ method = 'GET'
+ return self.send_request(method, api, params)
+
+ def post(self, api, data, params=None):
+ method = 'POST'
+ return self.send_request(method, api, params, json=data)
+
+ def patch(self, api, data, params=None):
+ method = 'PATCH'
+ return self.send_request(method, api, params, json=data)
+
+ def delete(self, api, data, params=None):
+ method = 'DELETE'
+ return self.send_request(method, api, params, json=data)
+
+ def _is_rest(self, used_unsupported_rest_properties=None):
+ if self.use_rest == "Always":
+ if used_unsupported_rest_properties:
+ error = "REST API currently does not support '%s'" % \
+ ', '.join(used_unsupported_rest_properties)
+ return True, error
+ else:
+ return True, None
+ if self.use_rest == 'Never' or used_unsupported_rest_properties:
+ # force ZAPI if requested or if some parameter requires it
+ return False, None
+ method = 'HEAD'
+ api = 'cluster/software'
+ status_code, __ = self.send_request(method, api, params=None, return_status_code=True)
+ if status_code == 200:
+ return True, None
+ return False, None
+
+ def is_rest(self, used_unsupported_rest_properties=None):
+ ''' only return error if there is a reason to '''
+ use_rest, error = self._is_rest(used_unsupported_rest_properties)
+ if used_unsupported_rest_properties is None:
+ return use_rest
+ return use_rest, error
+
+ def log_error(self, status_code, message):
+ self.errors.append(message)
+ self.debug_logs.append((status_code, message))
+
+ def log_debug(self, status_code, content):
+ self.debug_logs.append((status_code, content))
+
+
+class AwsCvsRestAPI(object):
+ def __init__(self, module, timeout=60):
+ self.module = module
+ self.api_key = self.module.params['api_key']
+ self.secret_key = self.module.params['secret_key']
+ self.api_url = self.module.params['api_url']
+ self.verify = self.module.params['validate_certs']
+ self.timeout = timeout
+ self.url = 'https://' + self.api_url + '/v1/'
+ self.check_required_library()
+
+ def check_required_library(self):
+ if not HAS_REQUESTS:
+ self.module.fail_json(msg=missing_required_lib('requests'))
+
+ def send_request(self, method, api, params, json=None):
+ ''' send http request and process reponse, including error conditions '''
+ url = self.url + api
+ status_code = None
+ content = None
+ json_dict = None
+ json_error = None
+ error_details = None
+ headers = {
+ 'Content-type': "application/json",
+ 'api-key': self.api_key,
+ 'secret-key': self.secret_key,
+ 'Cache-Control': "no-cache",
+ }
+
+ def get_json(response):
+ ''' extract json, and error message if present '''
+ try:
+ json = response.json()
+
+ except ValueError:
+ return None, None
+ success_code = [200, 201, 202]
+ if response.status_code not in success_code:
+ error = json.get('message')
+ else:
+ error = None
+ return json, error
+ try:
+ response = requests.request(method, url, headers=headers, timeout=self.timeout, json=json)
+ status_code = response.status_code
+ # If the response was successful, no Exception will be raised
+ json_dict, json_error = get_json(response)
+ except requests.exceptions.HTTPError as err:
+ __, json_error = get_json(response)
+ if json_error is None:
+ error_details = str(err)
+ except requests.exceptions.ConnectionError as err:
+ error_details = str(err)
+ except Exception as err:
+ error_details = str(err)
+ if json_error is not None:
+ error_details = json_error
+
+ return json_dict, error_details
+
+ # If an error was reported in the json payload, it is handled below
+ def get(self, api, params=None):
+ method = 'GET'
+ return self.send_request(method, api, params)
+
+ def post(self, api, data, params=None):
+ method = 'POST'
+ return self.send_request(method, api, params, json=data)
+
+ def patch(self, api, data, params=None):
+ method = 'PATCH'
+ return self.send_request(method, api, params, json=data)
+
+ def put(self, api, data, params=None):
+ method = 'PUT'
+ return self.send_request(method, api, params, json=data)
+
+ def delete(self, api, data, params=None):
+ method = 'DELETE'
+ return self.send_request(method, api, params, json=data)
+
+ def get_state(self, jobId):
+ """ Method to get the state of the job """
+ method = 'GET'
+ response, status_code = self.get('Jobs/%s' % jobId)
+ while str(response['state']) not in 'done':
+ response, status_code = self.get('Jobs/%s' % jobId)
+ return 'done'
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/_ovirt.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/_ovirt.py
new file mode 100644
index 00000000..5ccd1482
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/_ovirt.py
@@ -0,0 +1,871 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import inspect
+import os
+import time
+
+from abc import ABCMeta, abstractmethod
+from datetime import datetime
+from distutils.version import LooseVersion
+
+from ansible_collections.community.general.plugins.module_utils.cloud import CloudRetry
+from ansible.module_utils.common._collections_compat import Mapping
+
+try:
+ from enum import Enum # enum is a ovirtsdk4 requirement
+ import ovirtsdk4 as sdk
+ import ovirtsdk4.version as sdk_version
+ import ovirtsdk4.types as otypes
+ HAS_SDK = LooseVersion(sdk_version.VERSION) >= LooseVersion('4.3.0')
+except ImportError:
+ HAS_SDK = False
+
+
+BYTES_MAP = {
+ 'kib': 2**10,
+ 'mib': 2**20,
+ 'gib': 2**30,
+ 'tib': 2**40,
+ 'pib': 2**50,
+}
+
+
+def check_sdk(module):
+ if not HAS_SDK:
+ module.fail_json(
+ msg='ovirtsdk4 version 4.3.0 or higher is required for this module'
+ )
+
+
+def get_dict_of_struct(struct, connection=None, fetch_nested=False, attributes=None):
+ """
+ Convert SDK Struct type into dictionary.
+ """
+ res = {}
+
+ def resolve_href(value):
+ # Fetch nested values of struct:
+ try:
+ value = connection.follow_link(value)
+ except sdk.Error:
+ value = None
+ nested_obj = dict(
+ (attr, convert_value(getattr(value, attr)))
+ for attr in attributes if getattr(value, attr, None) is not None
+ )
+ nested_obj['id'] = getattr(value, 'id', None)
+ nested_obj['href'] = getattr(value, 'href', None)
+ return nested_obj
+
+ def remove_underscore(val):
+ if val.startswith('_'):
+ val = val[1:]
+ remove_underscore(val)
+ return val
+
+ def convert_value(value):
+ nested = False
+
+ if isinstance(value, sdk.Struct):
+ if not fetch_nested or not value.href:
+ return get_dict_of_struct(value)
+ return resolve_href(value)
+
+ elif isinstance(value, Enum) or isinstance(value, datetime):
+ return str(value)
+ elif isinstance(value, list) or isinstance(value, sdk.List):
+ if isinstance(value, sdk.List) and fetch_nested and value.href:
+ try:
+ value = connection.follow_link(value)
+ nested = True
+ except sdk.Error:
+ value = []
+
+ ret = []
+ for i in value:
+ if isinstance(i, sdk.Struct):
+ if not nested and fetch_nested and i.href:
+ ret.append(resolve_href(i))
+ elif not nested:
+ ret.append(get_dict_of_struct(i))
+ else:
+ nested_obj = dict(
+ (attr, convert_value(getattr(i, attr)))
+ for attr in attributes if getattr(i, attr, None)
+ )
+ nested_obj['id'] = getattr(i, 'id', None)
+ ret.append(nested_obj)
+ elif isinstance(i, Enum):
+ ret.append(str(i))
+ else:
+ ret.append(i)
+ return ret
+ else:
+ return value
+
+ if struct is not None:
+ for key, value in struct.__dict__.items():
+ if value is None:
+ continue
+
+ key = remove_underscore(key)
+ res[key] = convert_value(value)
+
+ return res
+
+
+def engine_version(connection):
+ """
+ Return string representation of oVirt engine version.
+ """
+ engine_api = connection.system_service().get()
+ engine_version = engine_api.product_info.version
+ return '%s.%s' % (engine_version.major, engine_version.minor)
+
+
+def create_connection(auth):
+ """
+ Create a connection to Python SDK, from task `auth` parameter.
+ If user doesnt't have SSO token the `auth` dictionary has following parameters mandatory:
+ url, username, password
+
+ If user has SSO token the `auth` dictionary has following parameters mandatory:
+ url, token
+
+ The `ca_file` parameter is mandatory in case user want to use secure connection,
+ in case user want to use insecure connection, it's mandatory to send insecure=True.
+
+ :param auth: dictionary which contains needed values for connection creation
+ :return: Python SDK connection
+ """
+
+ url = auth.get('url')
+ if url is None and auth.get('hostname') is not None:
+ url = 'https://{0}/ovirt-engine/api'.format(auth.get('hostname'))
+
+ return sdk.Connection(
+ url=url,
+ username=auth.get('username'),
+ password=auth.get('password'),
+ ca_file=auth.get('ca_file', None),
+ insecure=auth.get('insecure', False),
+ token=auth.get('token', None),
+ kerberos=auth.get('kerberos', None),
+ headers=auth.get('headers', None),
+ )
+
+
+def convert_to_bytes(param):
+ """
+ This method convert units to bytes, which follow IEC standard.
+
+ :param param: value to be converted
+ """
+ if param is None:
+ return None
+
+ # Get rid of whitespaces:
+ param = ''.join(param.split())
+
+ # Convert to bytes:
+ if len(param) > 3 and param[-3].lower() in ['k', 'm', 'g', 't', 'p']:
+ return int(param[:-3]) * BYTES_MAP.get(param[-3:].lower(), 1)
+ elif param.isdigit():
+ return int(param) * 2**10
+ else:
+ raise ValueError(
+ "Unsupported value(IEC supported): '{value}'".format(value=param)
+ )
+
+
+def follow_link(connection, link):
+ """
+ This method returns the entity of the element which link points to.
+
+ :param connection: connection to the Python SDK
+ :param link: link of the entity
+ :return: entity which link points to
+ """
+
+ if link:
+ return connection.follow_link(link)
+ else:
+ return None
+
+
+def get_link_name(connection, link):
+ """
+ This method returns the name of the element which link points to.
+
+ :param connection: connection to the Python SDK
+ :param link: link of the entity
+ :return: name of the entity, which link points to
+ """
+
+ if link:
+ return connection.follow_link(link).name
+ else:
+ return None
+
+
+def equal(param1, param2, ignore_case=False):
+ """
+ Compare two parameters and return if they are equal.
+ This parameter doesn't run equal operation if first parameter is None.
+ With this approach we don't run equal operation in case user don't
+ specify parameter in their task.
+
+ :param param1: user inputted parameter
+ :param param2: value of entity parameter
+ :return: True if parameters are equal or first parameter is None, otherwise False
+ """
+ if param1 is not None:
+ if ignore_case:
+ return param1.lower() == param2.lower()
+ return param1 == param2
+ return True
+
+
+def search_by_attributes(service, list_params=None, **kwargs):
+ """
+ Search for the entity by attributes. Nested entities don't support search
+ via REST, so in case using search for nested entity we return all entities
+ and filter them by specified attributes.
+ """
+ list_params = list_params or {}
+ # Check if 'list' method support search(look for search parameter):
+ if 'search' in inspect.getargspec(service.list)[0]:
+ res = service.list(
+ # There must be double quotes around name, because some oVirt resources it's possible to create then with space in name.
+ search=' and '.join('{0}="{1}"'.format(k, v) for k, v in kwargs.items()),
+ **list_params
+ )
+ else:
+ res = [
+ e for e in service.list(**list_params) if len([
+ k for k, v in kwargs.items() if getattr(e, k, None) == v
+ ]) == len(kwargs)
+ ]
+
+ res = res or [None]
+ return res[0]
+
+
+def search_by_name(service, name, **kwargs):
+ """
+ Search for the entity by its name. Nested entities don't support search
+ via REST, so in case using search for nested entity we return all entities
+ and filter them by name.
+
+ :param service: service of the entity
+ :param name: name of the entity
+ :return: Entity object returned by Python SDK
+ """
+ # Check if 'list' method support search(look for search parameter):
+ if 'search' in inspect.getargspec(service.list)[0]:
+ res = service.list(
+ # There must be double quotes around name, because some oVirt resources it's possible to create then with space in name.
+ search='name="{name}"'.format(name=name)
+ )
+ else:
+ res = [e for e in service.list() if e.name == name]
+
+ if kwargs:
+ res = [
+ e for e in service.list() if len([
+ k for k, v in kwargs.items() if getattr(e, k, None) == v
+ ]) == len(kwargs)
+ ]
+
+ res = res or [None]
+ return res[0]
+
+
+def get_entity(service, get_params=None):
+ """
+ Ignore SDK Error in case of getting an entity from service.
+ """
+ entity = None
+ try:
+ if get_params is not None:
+ entity = service.get(**get_params)
+ else:
+ entity = service.get()
+ except sdk.Error:
+ # We can get here 404, we should ignore it, in case
+ # of removing entity for example.
+ pass
+ return entity
+
+
+def get_id_by_name(service, name, raise_error=True, ignore_case=False):
+ """
+ Search an entity ID by it's name.
+ """
+ entity = search_by_name(service, name)
+
+ if entity is not None:
+ return entity.id
+
+ if raise_error:
+ raise Exception("Entity '%s' was not found." % name)
+
+
+def wait(
+ service,
+ condition,
+ fail_condition=lambda e: False,
+ timeout=180,
+ wait=True,
+ poll_interval=3,
+):
+ """
+ Wait until entity fulfill expected condition.
+
+ :param service: service of the entity
+ :param condition: condition to be fulfilled
+ :param fail_condition: if this condition is true, raise Exception
+ :param timeout: max time to wait in seconds
+ :param wait: if True wait for condition, if False don't wait
+ :param poll_interval: Number of seconds we should wait until next condition check
+ """
+ # Wait until the desired state of the entity:
+ if wait:
+ start = time.time()
+ while time.time() < start + timeout:
+ # Exit if the condition of entity is valid:
+ entity = get_entity(service)
+ if condition(entity):
+ return
+ elif fail_condition(entity):
+ raise Exception("Error while waiting on result state of the entity.")
+
+ # Sleep for `poll_interval` seconds if none of the conditions apply:
+ time.sleep(float(poll_interval))
+
+ raise Exception("Timeout exceed while waiting on result state of the entity.")
+
+
+def __get_auth_dict():
+ OVIRT_URL = os.environ.get('OVIRT_URL')
+ OVIRT_HOSTNAME = os.environ.get('OVIRT_HOSTNAME')
+ OVIRT_USERNAME = os.environ.get('OVIRT_USERNAME')
+ OVIRT_PASSWORD = os.environ.get('OVIRT_PASSWORD')
+ OVIRT_TOKEN = os.environ.get('OVIRT_TOKEN')
+ OVIRT_CAFILE = os.environ.get('OVIRT_CAFILE')
+ OVIRT_INSECURE = OVIRT_CAFILE is None
+
+ env_vars = None
+ if OVIRT_URL is None and OVIRT_HOSTNAME is not None:
+ OVIRT_URL = 'https://{0}/ovirt-engine/api'.format(OVIRT_HOSTNAME)
+ if OVIRT_URL and ((OVIRT_USERNAME and OVIRT_PASSWORD) or OVIRT_TOKEN):
+ env_vars = {
+ 'url': OVIRT_URL,
+ 'username': OVIRT_USERNAME,
+ 'password': OVIRT_PASSWORD,
+ 'insecure': OVIRT_INSECURE,
+ 'token': OVIRT_TOKEN,
+ 'ca_file': OVIRT_CAFILE,
+ }
+ if env_vars is not None:
+ auth = dict(default=env_vars, type='dict')
+ else:
+ auth = dict(required=True, type='dict')
+
+ return auth
+
+
+def ovirt_info_full_argument_spec(**kwargs):
+ """
+ Extend parameters of info module with parameters which are common to all
+ oVirt info modules.
+
+ :param kwargs: kwargs to be extended
+ :return: extended dictionary with common parameters
+ """
+ spec = dict(
+ auth=__get_auth_dict(),
+ fetch_nested=dict(default=False, type='bool'),
+ nested_attributes=dict(type='list', default=list()),
+ )
+ spec.update(kwargs)
+ return spec
+
+
+# Left for third-party module compatibility
+def ovirt_facts_full_argument_spec(**kwargs):
+ """
+ This is deprecated. Please use ovirt_info_full_argument_spec instead!
+
+ :param kwargs: kwargs to be extended
+ :return: extended dictionary with common parameters
+ """
+ return ovirt_info_full_argument_spec(**kwargs)
+
+
+def ovirt_full_argument_spec(**kwargs):
+ """
+ Extend parameters of module with parameters which are common to all oVirt modules.
+
+ :param kwargs: kwargs to be extended
+ :return: extended dictionary with common parameters
+ """
+ spec = dict(
+ auth=__get_auth_dict(),
+ timeout=dict(default=180, type='int'),
+ wait=dict(default=True, type='bool'),
+ poll_interval=dict(default=3, type='int'),
+ fetch_nested=dict(default=False, type='bool'),
+ nested_attributes=dict(type='list', default=list()),
+ )
+ spec.update(kwargs)
+ return spec
+
+
+def check_params(module):
+ """
+ Most modules must have either `name` or `id` specified.
+ """
+ if module.params.get('name') is None and module.params.get('id') is None:
+ module.fail_json(msg='"name" or "id" is required')
+
+
+def engine_supported(connection, version):
+ return LooseVersion(engine_version(connection)) >= LooseVersion(version)
+
+
+def check_support(version, connection, module, params):
+ """
+ Check if parameters used by user are supported by oVirt Python SDK
+ and oVirt engine.
+ """
+ api_version = LooseVersion(engine_version(connection))
+ version = LooseVersion(version)
+ for param in params:
+ if module.params.get(param) is not None:
+ return LooseVersion(sdk_version.VERSION) >= version and api_version >= version
+
+ return True
+
+
+class BaseModule(object):
+ """
+ This is base class for oVirt modules. oVirt modules should inherit this
+ class and override method to customize specific needs of the module.
+ The only abstract method of this class is `build_entity`, which must
+ to be implemented in child class.
+ """
+ __metaclass__ = ABCMeta
+
+ def __init__(self, connection, module, service, changed=False):
+ self._connection = connection
+ self._module = module
+ self._service = service
+ self._changed = changed
+ self._diff = {'after': dict(), 'before': dict()}
+
+ @property
+ def changed(self):
+ return self._changed
+
+ @changed.setter
+ def changed(self, changed):
+ if not self._changed:
+ self._changed = changed
+
+ @abstractmethod
+ def build_entity(self):
+ """
+ This method should return oVirt Python SDK type, which we want to
+ create or update, initialized by values passed by Ansible module.
+
+ For example if we want to create VM, we will return following:
+ types.Vm(name=self._module.params['vm_name'])
+
+ :return: Specific instance of sdk.Struct.
+ """
+ pass
+
+ def param(self, name, default=None):
+ """
+ Return a module parameter specified by it's name.
+ """
+ return self._module.params.get(name, default)
+
+ def update_check(self, entity):
+ """
+ This method handle checks whether the entity values are same as values
+ passed to ansible module. By default we don't compare any values.
+
+ :param entity: Entity we want to compare with Ansible module values.
+ :return: True if values are same, so we don't need to update the entity.
+ """
+ return True
+
+ def pre_create(self, entity):
+ """
+ This method is called right before entity is created.
+
+ :param entity: Entity to be created or updated.
+ """
+ pass
+
+ def post_create(self, entity):
+ """
+ This method is called right after entity is created.
+
+ :param entity: Entity which was created.
+ """
+ pass
+
+ def post_update(self, entity):
+ """
+ This method is called right after entity is updated.
+
+ :param entity: Entity which was updated.
+ """
+ pass
+
+ def diff_update(self, after, update):
+ for k, v in update.items():
+ if isinstance(v, Mapping):
+ after[k] = self.diff_update(after.get(k, dict()), v)
+ else:
+ after[k] = update[k]
+ return after
+
+ def create(
+ self,
+ entity=None,
+ result_state=None,
+ fail_condition=lambda e: False,
+ search_params=None,
+ update_params=None,
+ _wait=None,
+ force_create=False,
+ **kwargs
+ ):
+ """
+ Method which is called when state of the entity is 'present'. If user
+ don't provide `entity` parameter the entity is searched using
+ `search_params` parameter. If entity is found it's updated, whether
+ the entity should be updated is checked by `update_check` method.
+ The corresponding updated entity is build by `build_entity` method.
+
+ Function executed after entity is created can optionally be specified
+ in `post_create` parameter. Function executed after entity is updated
+ can optionally be specified in `post_update` parameter.
+
+ :param entity: Entity we want to update, if exists.
+ :param result_state: State which should entity has in order to finish task.
+ :param fail_condition: Function which checks incorrect state of entity, if it returns `True` Exception is raised.
+ :param search_params: Dictionary of parameters to be used for search.
+ :param update_params: The params which should be passed to update method.
+ :param kwargs: Additional parameters passed when creating entity.
+ :return: Dictionary with values returned by Ansible module.
+ """
+ if entity is None and not force_create:
+ entity = self.search_entity(search_params)
+
+ self.pre_create(entity)
+
+ if entity:
+ # Entity exists, so update it:
+ entity_service = self._service.service(entity.id)
+ if not self.update_check(entity):
+ new_entity = self.build_entity()
+ if not self._module.check_mode:
+ update_params = update_params or {}
+ updated_entity = entity_service.update(
+ new_entity,
+ **update_params
+ )
+ self.post_update(entity)
+
+ # Update diffs only if user specified --diff parameter,
+ # so we don't useless overload API:
+ if self._module._diff:
+ before = get_dict_of_struct(
+ entity,
+ self._connection,
+ fetch_nested=True,
+ attributes=['name'],
+ )
+ after = before.copy()
+ self.diff_update(after, get_dict_of_struct(new_entity))
+ self._diff['before'] = before
+ self._diff['after'] = after
+
+ self.changed = True
+ else:
+ # Entity don't exists, so create it:
+ if not self._module.check_mode:
+ entity = self._service.add(
+ self.build_entity(),
+ **kwargs
+ )
+ self.post_create(entity)
+ self.changed = True
+
+ if not self._module.check_mode:
+ # Wait for the entity to be created and to be in the defined state:
+ entity_service = self._service.service(entity.id)
+
+ def state_condition(entity):
+ return entity
+
+ if result_state:
+
+ def state_condition(entity):
+ return entity and entity.status == result_state
+
+ wait(
+ service=entity_service,
+ condition=state_condition,
+ fail_condition=fail_condition,
+ wait=_wait if _wait is not None else self._module.params['wait'],
+ timeout=self._module.params['timeout'],
+ poll_interval=self._module.params['poll_interval'],
+ )
+
+ return {
+ 'changed': self.changed,
+ 'id': getattr(entity, 'id', None),
+ type(entity).__name__.lower(): get_dict_of_struct(
+ struct=entity,
+ connection=self._connection,
+ fetch_nested=self._module.params.get('fetch_nested'),
+ attributes=self._module.params.get('nested_attributes'),
+ ),
+ 'diff': self._diff,
+ }
+
+ def pre_remove(self, entity):
+ """
+ This method is called right before entity is removed.
+
+ :param entity: Entity which we want to remove.
+ """
+ pass
+
+ def entity_name(self, entity):
+ return "{e_type} '{e_name}'".format(
+ e_type=type(entity).__name__.lower(),
+ e_name=getattr(entity, 'name', None),
+ )
+
+ def remove(self, entity=None, search_params=None, **kwargs):
+ """
+ Method which is called when state of the entity is 'absent'. If user
+ don't provide `entity` parameter the entity is searched using
+ `search_params` parameter. If entity is found it's removed.
+
+ Function executed before remove is executed can optionally be specified
+ in `pre_remove` parameter.
+
+ :param entity: Entity we want to remove.
+ :param search_params: Dictionary of parameters to be used for search.
+ :param kwargs: Additional parameters passed when removing entity.
+ :return: Dictionary with values returned by Ansible module.
+ """
+ if entity is None:
+ entity = self.search_entity(search_params)
+
+ if entity is None:
+ return {
+ 'changed': self.changed,
+ 'msg': "Entity wasn't found."
+ }
+
+ self.pre_remove(entity)
+
+ entity_service = self._service.service(entity.id)
+ if not self._module.check_mode:
+ entity_service.remove(**kwargs)
+ wait(
+ service=entity_service,
+ condition=lambda entity: not entity,
+ wait=self._module.params['wait'],
+ timeout=self._module.params['timeout'],
+ poll_interval=self._module.params['poll_interval'],
+ )
+ self.changed = True
+
+ return {
+ 'changed': self.changed,
+ 'id': entity.id,
+ type(entity).__name__.lower(): get_dict_of_struct(
+ struct=entity,
+ connection=self._connection,
+ fetch_nested=self._module.params.get('fetch_nested'),
+ attributes=self._module.params.get('nested_attributes'),
+ ),
+ }
+
+ def action(
+ self,
+ action,
+ entity=None,
+ action_condition=lambda e: e,
+ wait_condition=lambda e: e,
+ fail_condition=lambda e: False,
+ pre_action=lambda e: e,
+ post_action=lambda e: None,
+ search_params=None,
+ **kwargs
+ ):
+ """
+ This method is executed when we want to change the state of some oVirt
+ entity. The action to be executed on oVirt service is specified by
+ `action` parameter. Whether the action should be executed can be
+ specified by passing `action_condition` parameter. State which the
+ entity should be in after execution of the action can be specified
+ by `wait_condition` parameter.
+
+ Function executed before an action on entity can optionally be specified
+ in `pre_action` parameter. Function executed after an action on entity can
+ optionally be specified in `post_action` parameter.
+
+ :param action: Action which should be executed by service on entity.
+ :param entity: Entity we want to run action on.
+ :param action_condition: Function which is executed when checking if action should be executed.
+ :param fail_condition: Function which checks incorrect state of entity, if it returns `True` Exception is raised.
+ :param wait_condition: Function which is executed when waiting on result state.
+ :param pre_action: Function which is executed before running the action.
+ :param post_action: Function which is executed after running the action.
+ :param search_params: Dictionary of parameters to be used for search.
+ :param kwargs: Additional parameters passed to action.
+ :return: Dictionary with values returned by Ansible module.
+ """
+ if entity is None:
+ entity = self.search_entity(search_params)
+
+ entity = pre_action(entity)
+
+ if entity is None:
+ self._module.fail_json(
+ msg="Entity not found, can't run action '{0}'.".format(
+ action
+ )
+ )
+
+ entity_service = self._service.service(entity.id)
+ entity = entity_service.get()
+ if action_condition(entity):
+ if not self._module.check_mode:
+ getattr(entity_service, action)(**kwargs)
+ self.changed = True
+
+ post_action(entity)
+
+ wait(
+ service=self._service.service(entity.id),
+ condition=wait_condition,
+ fail_condition=fail_condition,
+ wait=self._module.params['wait'],
+ timeout=self._module.params['timeout'],
+ poll_interval=self._module.params['poll_interval'],
+ )
+ return {
+ 'changed': self.changed,
+ 'id': entity.id,
+ type(entity).__name__.lower(): get_dict_of_struct(
+ struct=entity,
+ connection=self._connection,
+ fetch_nested=self._module.params.get('fetch_nested'),
+ attributes=self._module.params.get('nested_attributes'),
+ ),
+ 'diff': self._diff,
+ }
+
+ def wait_for_import(self, condition=lambda e: True):
+ if self._module.params['wait']:
+ start = time.time()
+ timeout = self._module.params['timeout']
+ poll_interval = self._module.params['poll_interval']
+ while time.time() < start + timeout:
+ entity = self.search_entity()
+ if entity and condition(entity):
+ return entity
+ time.sleep(poll_interval)
+
+ def search_entity(self, search_params=None, list_params=None):
+ """
+ Always first try to search by `ID`, if ID isn't specified,
+ check if user constructed special search in `search_params`,
+ if not search by `name`.
+ """
+ entity = None
+
+ if 'id' in self._module.params and self._module.params['id'] is not None:
+ entity = get_entity(self._service.service(self._module.params['id']), get_params=list_params)
+ elif search_params is not None:
+ entity = search_by_attributes(self._service, list_params=list_params, **search_params)
+ elif self._module.params.get('name') is not None:
+ entity = search_by_attributes(self._service, list_params=list_params, name=self._module.params['name'])
+
+ return entity
+
+ def _get_major(self, full_version):
+ if full_version is None or full_version == "":
+ return None
+ if isinstance(full_version, otypes.Version):
+ return int(full_version.major)
+ return int(full_version.split('.')[0])
+
+ def _get_minor(self, full_version):
+ if full_version is None or full_version == "":
+ return None
+ if isinstance(full_version, otypes.Version):
+ return int(full_version.minor)
+ return int(full_version.split('.')[1])
+
+
+def _sdk4_error_maybe():
+ """
+ Allow for ovirtsdk4 not being installed.
+ """
+ if HAS_SDK:
+ return sdk.Error
+ return type(None)
+
+
+class OvirtRetry(CloudRetry):
+ base_class = _sdk4_error_maybe()
+
+ @staticmethod
+ def status_code_from_exception(error):
+ return error.code
+
+ @staticmethod
+ def found(response_code, catch_extra_error_codes=None):
+ # This is a list of error codes to retry.
+ retry_on = [
+ # HTTP status: Conflict
+ 409,
+ ]
+ if catch_extra_error_codes:
+ retry_on.extend(catch_extra_error_codes)
+
+ return response_code in retry_on
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/alicloud_ecs.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/alicloud_ecs.py
new file mode 100644
index 00000000..3c87c1ad
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/alicloud_ecs.py
@@ -0,0 +1,286 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin <heguimin36@163.com>
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import json
+from ansible.module_utils.basic import env_fallback
+
+try:
+ import footmark
+ import footmark.ecs
+ import footmark.slb
+ import footmark.vpc
+ import footmark.rds
+ import footmark.ess
+ import footmark.sts
+ import footmark.dns
+ import footmark.ram
+ import footmark.market
+ HAS_FOOTMARK = True
+except ImportError:
+ HAS_FOOTMARK = False
+
+
+class AnsibleACSError(Exception):
+ pass
+
+
+def acs_common_argument_spec():
+ return dict(
+ alicloud_access_key=dict(aliases=['access_key_id', 'access_key'], no_log=True,
+ fallback=(env_fallback, ['ALICLOUD_ACCESS_KEY', 'ALICLOUD_ACCESS_KEY_ID'])),
+ alicloud_secret_key=dict(aliases=['secret_access_key', 'secret_key'], no_log=True,
+ fallback=(env_fallback, ['ALICLOUD_SECRET_KEY', 'ALICLOUD_SECRET_ACCESS_KEY'])),
+ alicloud_security_token=dict(aliases=['security_token'], no_log=True,
+ fallback=(env_fallback, ['ALICLOUD_SECURITY_TOKEN'])),
+ ecs_role_name=dict(aliases=['role_name'], fallback=(env_fallback, ['ALICLOUD_ECS_ROLE_NAME']))
+ )
+
+
+def ecs_argument_spec():
+ spec = acs_common_argument_spec()
+ spec.update(
+ dict(
+ alicloud_region=dict(required=True, aliases=['region', 'region_id'],
+ fallback=(env_fallback, ['ALICLOUD_REGION', 'ALICLOUD_REGION_ID'])),
+ alicloud_assume_role_arn=dict(fallback=(env_fallback, ['ALICLOUD_ASSUME_ROLE_ARN']),
+ aliases=['assume_role_arn']),
+ alicloud_assume_role_session_name=dict(fallback=(env_fallback, ['ALICLOUD_ASSUME_ROLE_SESSION_NAME']),
+ aliases=['assume_role_session_name']),
+ alicloud_assume_role_session_expiration=dict(type='int',
+ fallback=(env_fallback,
+ ['ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION']),
+ aliases=['assume_role_session_expiration']),
+ alicloud_assume_role=dict(type='dict', aliases=['assume_role']),
+ profile=dict(fallback=(env_fallback, ['ALICLOUD_PROFILE'])),
+ shared_credentials_file=dict(fallback=(env_fallback, ['ALICLOUD_SHARED_CREDENTIALS_FILE']))
+ )
+ )
+ return spec
+
+
+def get_acs_connection_info(params):
+
+ ecs_params = dict(acs_access_key_id=params.get('alicloud_access_key'),
+ acs_secret_access_key=params.get('alicloud_secret_key'),
+ security_token=params.get('alicloud_security_token'),
+ ecs_role_name=params.get('ecs_role_name'),
+ user_agent='Ansible-Provider-Alicloud')
+ return ecs_params
+
+
+def connect_to_acs(acs_module, region, **params):
+ conn = acs_module.connect_to_region(region, **params)
+ if not conn:
+ if region not in [acs_module_region.id for acs_module_region in acs_module.regions()]:
+ raise AnsibleACSError(
+ "Region %s does not seem to be available for acs module %s." % (region, acs_module.__name__))
+ else:
+ raise AnsibleACSError(
+ "Unknown problem connecting to region %s for acs module %s." % (region, acs_module.__name__))
+ return conn
+
+
+def get_assume_role(params):
+ """ Return new params """
+ sts_params = get_acs_connection_info(params)
+ assume_role = {}
+ if params.get('assume_role'):
+ assume_role['alicloud_assume_role_arn'] = params['assume_role'].get('role_arn')
+ assume_role['alicloud_assume_role_session_name'] = params['assume_role'].get('session_name')
+ assume_role['alicloud_assume_role_session_expiration'] = params['assume_role'].get('session_expiration')
+ assume_role['alicloud_assume_role_policy'] = params['assume_role'].get('policy')
+
+ assume_role_params = {
+ 'role_arn': params.get('alicloud_assume_role_arn') if params.get('alicloud_assume_role_arn') else assume_role.get('alicloud_assume_role_arn'),
+ 'role_session_name': params.get('alicloud_assume_role_session_name') if params.get('alicloud_assume_role_session_name')
+ else assume_role.get('alicloud_assume_role_session_name'),
+ 'duration_seconds': params.get('alicloud_assume_role_session_expiration') if params.get('alicloud_assume_role_session_expiration')
+ else assume_role.get('alicloud_assume_role_session_expiration', 3600),
+ 'policy': assume_role.get('alicloud_assume_role_policy', {})
+ }
+
+ try:
+ sts = connect_to_acs(footmark.sts, params.get('alicloud_region'), **sts_params).assume_role(**assume_role_params).read()
+ sts_params['acs_access_key_id'], sts_params['acs_secret_access_key'], sts_params['security_token'] \
+ = sts['access_key_id'], sts['access_key_secret'], sts['security_token']
+ except AnsibleACSError as e:
+ params.fail_json(msg=str(e))
+ return sts_params
+
+
+def get_profile(params):
+ if not params['alicloud_access_key'] and not params['ecs_role_name'] and params['profile']:
+ path = params['shared_credentials_file'] if params['shared_credentials_file'] else os.getenv('HOME') + '/.aliyun/config.json'
+ auth = {}
+ with open(path, 'r') as f:
+ for pro in json.load(f)['profiles']:
+ if params['profile'] == pro['name']:
+ auth = pro
+ if auth:
+ if auth['mode'] == 'AK' and auth.get('access_key_id') and auth.get('access_key_secret'):
+ params['alicloud_access_key'] = auth.get('access_key_id')
+ params['alicloud_secret_key'] = auth.get('access_key_secret')
+ params['alicloud_region'] = auth.get('region_id')
+ params = get_acs_connection_info(params)
+ elif auth['mode'] == 'StsToken' and auth.get('access_key_id') and auth.get('access_key_secret') and auth.get('sts_token'):
+ params['alicloud_access_key'] = auth.get('access_key_id')
+ params['alicloud_secret_key'] = auth.get('access_key_secret')
+ params['security_token'] = auth.get('sts_token')
+ params['alicloud_region'] = auth.get('region_id')
+ params = get_acs_connection_info(params)
+ elif auth['mode'] == 'EcsRamRole':
+ params['ecs_role_name'] = auth.get('ram_role_name')
+ params['alicloud_region'] = auth.get('region_id')
+ params = get_acs_connection_info(params)
+ elif auth['mode'] == 'RamRoleArn' and auth.get('ram_role_arn'):
+ params['alicloud_access_key'] = auth.get('access_key_id')
+ params['alicloud_secret_key'] = auth.get('access_key_secret')
+ params['security_token'] = auth.get('sts_token')
+ params['ecs_role_name'] = auth.get('ram_role_name')
+ params['alicloud_assume_role_arn'] = auth.get('ram_role_arn')
+ params['alicloud_assume_role_session_name'] = auth.get('ram_session_name')
+ params['alicloud_assume_role_session_expiration'] = auth.get('expired_seconds')
+ params['alicloud_region'] = auth.get('region_id')
+ params = get_assume_role(params)
+ elif params.get('alicloud_assume_role_arn') or params.get('assume_role'):
+ params = get_assume_role(params)
+ else:
+ params = get_acs_connection_info(params)
+ return params
+
+
+def ecs_connect(module):
+ """ Return an ecs connection"""
+ ecs_params = get_profile(module.params)
+ # If we have a region specified, connect to its endpoint.
+ region = module.params.get('alicloud_region')
+ if region:
+ try:
+ ecs = connect_to_acs(footmark.ecs, region, **ecs_params)
+ except AnsibleACSError as e:
+ module.fail_json(msg=str(e))
+ # Otherwise, no region so we fallback to the old connection method
+ return ecs
+
+
+def slb_connect(module):
+ """ Return an slb connection"""
+ slb_params = get_profile(module.params)
+ # If we have a region specified, connect to its endpoint.
+ region = module.params.get('alicloud_region')
+ if region:
+ try:
+ slb = connect_to_acs(footmark.slb, region, **slb_params)
+ except AnsibleACSError as e:
+ module.fail_json(msg=str(e))
+ # Otherwise, no region so we fallback to the old connection method
+ return slb
+
+
+def dns_connect(module):
+ """ Return an dns connection"""
+ dns_params = get_profile(module.params)
+ # If we have a region specified, connect to its endpoint.
+ region = module.params.get('alicloud_region')
+ if region:
+ try:
+ dns = connect_to_acs(footmark.dns, region, **dns_params)
+ except AnsibleACSError as e:
+ module.fail_json(msg=str(e))
+ # Otherwise, no region so we fallback to the old connection method
+ return dns
+
+
+def vpc_connect(module):
+ """ Return an vpc connection"""
+ vpc_params = get_profile(module.params)
+ # If we have a region specified, connect to its endpoint.
+ region = module.params.get('alicloud_region')
+ if region:
+ try:
+ vpc = connect_to_acs(footmark.vpc, region, **vpc_params)
+ except AnsibleACSError as e:
+ module.fail_json(msg=str(e))
+ # Otherwise, no region so we fallback to the old connection method
+ return vpc
+
+
+def rds_connect(module):
+ """ Return an rds connection"""
+ rds_params = get_profile(module.params)
+ # If we have a region specified, connect to its endpoint.
+ region = module.params.get('alicloud_region')
+ if region:
+ try:
+ rds = connect_to_acs(footmark.rds, region, **rds_params)
+ except AnsibleACSError as e:
+ module.fail_json(msg=str(e))
+ # Otherwise, no region so we fallback to the old connection method
+ return rds
+
+
+def ess_connect(module):
+ """ Return an ess connection"""
+ ess_params = get_profile(module.params)
+ # If we have a region specified, connect to its endpoint.
+ region = module.params.get('alicloud_region')
+ if region:
+ try:
+ ess = connect_to_acs(footmark.ess, region, **ess_params)
+ except AnsibleACSError as e:
+ module.fail_json(msg=str(e))
+ # Otherwise, no region so we fallback to the old connection method
+ return ess
+
+
+def sts_connect(module):
+ """ Return an sts connection"""
+ sts_params = get_profile(module.params)
+ # If we have a region specified, connect to its endpoint.
+ region = module.params.get('alicloud_region')
+ if region:
+ try:
+ sts = connect_to_acs(footmark.sts, region, **sts_params)
+ except AnsibleACSError as e:
+ module.fail_json(msg=str(e))
+ # Otherwise, no region so we fallback to the old connection method
+ return sts
+
+
+def ram_connect(module):
+ """ Return an ram connection"""
+ ram_params = get_profile(module.params)
+ # If we have a region specified, connect to its endpoint.
+ region = module.params.get('alicloud_region')
+ if region:
+ try:
+ ram = connect_to_acs(footmark.ram, region, **ram_params)
+ except AnsibleACSError as e:
+ module.fail_json(msg=str(e))
+ # Otherwise, no region so we fallback to the old connection method
+ return ram
+
+
+def market_connect(module):
+ """ Return an market connection"""
+ market_params = get_profile(module.params)
+ # If we have a region specified, connect to its endpoint.
+ region = module.params.get('alicloud_region')
+ if region:
+ try:
+ market = connect_to_acs(footmark.market, region, **market_params)
+ except AnsibleACSError as e:
+ module.fail_json(msg=str(e))
+ # Otherwise, no region so we fallback to the old connection method
+ return market
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/cloud.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/cloud.py
new file mode 100644
index 00000000..33b33084
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/cloud.py
@@ -0,0 +1,208 @@
+#
+# (c) 2016 Allen Sanabria, <asanabria@linuxdynasty.org>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+"""
+This module adds shared support for generic cloud modules
+
+In order to use this module, include it as part of a custom
+module as shown below.
+
+from ansible.module_utils.cloud import CloudRetry
+
+The 'cloud' module provides the following common classes:
+
+ * CloudRetry
+ - The base class to be used by other cloud providers, in order to
+ provide a backoff/retry decorator based on status codes.
+
+ - Example using the AWSRetry class which inherits from CloudRetry.
+
+ @AWSRetry.exponential_backoff(retries=10, delay=3)
+ get_ec2_security_group_ids_from_names()
+
+ @AWSRetry.jittered_backoff()
+ get_ec2_security_group_ids_from_names()
+
+"""
+import random
+from functools import wraps
+import syslog
+import time
+
+
+def _exponential_backoff(retries=10, delay=2, backoff=2, max_delay=60):
+ """ Customizable exponential backoff strategy.
+ Args:
+ retries (int): Maximum number of times to retry a request.
+ delay (float): Initial (base) delay.
+ backoff (float): base of the exponent to use for exponential
+ backoff.
+ max_delay (int): Optional. If provided each delay generated is capped
+ at this amount. Defaults to 60 seconds.
+ Returns:
+ Callable that returns a generator. This generator yields durations in
+ seconds to be used as delays for an exponential backoff strategy.
+ Usage:
+ >>> backoff = _exponential_backoff()
+ >>> backoff
+ <function backoff_backoff at 0x7f0d939facf8>
+ >>> list(backoff())
+ [2, 4, 8, 16, 32, 60, 60, 60, 60, 60]
+ """
+ def backoff_gen():
+ for retry in range(0, retries):
+ sleep = delay * backoff ** retry
+ yield sleep if max_delay is None else min(sleep, max_delay)
+ return backoff_gen
+
+
+def _full_jitter_backoff(retries=10, delay=3, max_delay=60, _random=random):
+ """ Implements the "Full Jitter" backoff strategy described here
+ https://www.awsarchitectureblog.com/2015/03/backoff.html
+ Args:
+ retries (int): Maximum number of times to retry a request.
+ delay (float): Approximate number of seconds to sleep for the first
+ retry.
+ max_delay (int): The maximum number of seconds to sleep for any retry.
+ _random (random.Random or None): Makes this generator testable by
+ allowing developers to explicitly pass in the a seeded Random.
+ Returns:
+ Callable that returns a generator. This generator yields durations in
+ seconds to be used as delays for a full jitter backoff strategy.
+ Usage:
+ >>> backoff = _full_jitter_backoff(retries=5)
+ >>> backoff
+ <function backoff_backoff at 0x7f0d939facf8>
+ >>> list(backoff())
+ [3, 6, 5, 23, 38]
+ >>> list(backoff())
+ [2, 1, 6, 6, 31]
+ """
+ def backoff_gen():
+ for retry in range(0, retries):
+ yield _random.randint(0, min(max_delay, delay * 2 ** retry))
+ return backoff_gen
+
+
+class CloudRetry(object):
+ """ CloudRetry can be used by any cloud provider, in order to implement a
+ backoff algorithm/retry effect based on Status Code from Exceptions.
+ """
+ # This is the base class of the exception.
+ # AWS Example botocore.exceptions.ClientError
+ base_class = None
+
+ @staticmethod
+ def status_code_from_exception(error):
+ """ Return the status code from the exception object
+ Args:
+ error (object): The exception itself.
+ """
+ pass
+
+ @staticmethod
+ def found(response_code, catch_extra_error_codes=None):
+ """ Return True if the Response Code to retry on was found.
+ Args:
+ response_code (str): This is the Response Code that is being matched against.
+ """
+ pass
+
+ @classmethod
+ def _backoff(cls, backoff_strategy, catch_extra_error_codes=None):
+ """ Retry calling the Cloud decorated function using the provided
+ backoff strategy.
+ Args:
+ backoff_strategy (callable): Callable that returns a generator. The
+ generator should yield sleep times for each retry of the decorated
+ function.
+ """
+ def deco(f):
+ @wraps(f)
+ def retry_func(*args, **kwargs):
+ for delay in backoff_strategy():
+ try:
+ return f(*args, **kwargs)
+ except Exception as e:
+ if isinstance(e, cls.base_class):
+ response_code = cls.status_code_from_exception(e)
+ if cls.found(response_code, catch_extra_error_codes):
+ msg = "{0}: Retrying in {1} seconds...".format(str(e), delay)
+ syslog.syslog(syslog.LOG_INFO, msg)
+ time.sleep(delay)
+ else:
+ # Return original exception if exception is not a ClientError
+ raise e
+ else:
+ # Return original exception if exception is not a ClientError
+ raise e
+ return f(*args, **kwargs)
+
+ return retry_func # true decorator
+
+ return deco
+
+ @classmethod
+ def exponential_backoff(cls, retries=10, delay=3, backoff=2, max_delay=60, catch_extra_error_codes=None):
+ """
+ Retry calling the Cloud decorated function using an exponential backoff.
+
+ Kwargs:
+ retries (int): Number of times to retry a failed request before giving up
+ default=10
+ delay (int or float): Initial delay between retries in seconds
+ default=3
+ backoff (int or float): backoff multiplier e.g. value of 2 will
+ double the delay each retry
+ default=1.1
+ max_delay (int or None): maximum amount of time to wait between retries.
+ default=60
+ """
+ return cls._backoff(_exponential_backoff(
+ retries=retries, delay=delay, backoff=backoff, max_delay=max_delay), catch_extra_error_codes)
+
+ @classmethod
+ def jittered_backoff(cls, retries=10, delay=3, max_delay=60, catch_extra_error_codes=None):
+ """
+ Retry calling the Cloud decorated function using a jittered backoff
+ strategy. More on this strategy here:
+
+ https://www.awsarchitectureblog.com/2015/03/backoff.html
+
+ Kwargs:
+ retries (int): Number of times to retry a failed request before giving up
+ default=10
+ delay (int): Initial delay between retries in seconds
+ default=3
+ max_delay (int): maximum amount of time to wait between retries.
+ default=60
+ """
+ return cls._backoff(_full_jitter_backoff(
+ retries=retries, delay=delay, max_delay=max_delay), catch_extra_error_codes)
+
+ @classmethod
+ def backoff(cls, tries=10, delay=3, backoff=1.1, catch_extra_error_codes=None):
+ """
+ Retry calling the Cloud decorated function using an exponential backoff.
+
+ Compatibility for the original implementation of CloudRetry.backoff that
+ did not provide configurable backoff strategies. Developers should use
+ CloudRetry.exponential_backoff instead.
+
+ Kwargs:
+ tries (int): Number of times to try (not retry) before giving up
+ default=10
+ delay (int or float): Initial delay between retries in seconds
+ default=3
+ backoff (int or float): backoff multiplier e.g. value of 2 will
+ double the delay each retry
+ default=1.1
+ """
+ return cls.exponential_backoff(
+ retries=tries - 1, delay=delay, backoff=backoff, max_delay=None, catch_extra_error_codes=catch_extra_error_codes)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/compat/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/compat/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/compat/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/compat/ipaddress.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/compat/ipaddress.py
new file mode 100644
index 00000000..db4e91b7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/compat/ipaddress.py
@@ -0,0 +1,2580 @@
+# -*- coding: utf-8 -*-
+
+# This code is part of Ansible, but is an independent component.
+# This particular file, and this file only, is based on
+# Lib/ipaddress.py of cpython
+# It is licensed under the PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
+#
+# 1. This LICENSE AGREEMENT is between the Python Software Foundation
+# ("PSF"), and the Individual or Organization ("Licensee") accessing and
+# otherwise using this software ("Python") in source or binary form and
+# its associated documentation.
+#
+# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
+# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
+# analyze, test, perform and/or display publicly, prepare derivative works,
+# distribute, and otherwise use Python alone or in any derivative version,
+# provided, however, that PSF's License Agreement and PSF's notice of copyright,
+# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+# 2011, 2012, 2013, 2014, 2015 Python Software Foundation; All Rights Reserved"
+# are retained in Python alone or in any derivative version prepared by Licensee.
+#
+# 3. In the event Licensee prepares a derivative work that is based on
+# or incorporates Python or any part thereof, and wants to make
+# the derivative work available to others as provided herein, then
+# Licensee hereby agrees to include in any such work a brief summary of
+# the changes made to Python.
+#
+# 4. PSF is making Python available to Licensee on an "AS IS"
+# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
+# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
+# INFRINGE ANY THIRD PARTY RIGHTS.
+#
+# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
+# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+#
+# 6. This License Agreement will automatically terminate upon a material
+# breach of its terms and conditions.
+#
+# 7. Nothing in this License Agreement shall be deemed to create any
+# relationship of agency, partnership, or joint venture between PSF and
+# Licensee. This License Agreement does not grant permission to use PSF
+# trademarks or trade name in a trademark sense to endorse or promote
+# products or services of Licensee, or any third party.
+#
+# 8. By copying, installing or otherwise using Python, Licensee
+# agrees to be bound by the terms and conditions of this License
+# Agreement.
+
+# Copyright 2007 Google Inc.
+# Licensed to PSF under a Contributor Agreement.
+
+"""A fast, lightweight IPv4/IPv6 manipulation library in Python.
+
+This library is used to create/poke/manipulate IPv4 and IPv6 addresses
+and networks.
+
+"""
+
+from __future__ import (absolute_import, division, print_function)
+from __future__ import unicode_literals
+__metaclass__ = type
+
+
+import itertools
+import struct
+
+
+# The following makes it easier for us to script updates of the bundled code and is not part of
+# upstream
+_BUNDLED_METADATA = {"pypi_name": "ipaddress", "version": "1.0.22"}
+
+__version__ = "1.0.22"
+
+# Compatibility functions
+_compat_int_types = (int,)
+try:
+ _compat_int_types = (int, long)
+except NameError:
+ pass
+try:
+ _compat_str = unicode
+except NameError:
+ _compat_str = str
+ assert bytes != str
+if b"\0"[0] == 0: # Python 3 semantics
+
+ def _compat_bytes_to_byte_vals(byt):
+ return byt
+
+
+else:
+
+ def _compat_bytes_to_byte_vals(byt):
+ return [struct.unpack(b"!B", b)[0] for b in byt]
+
+
+try:
+ _compat_int_from_byte_vals = int.from_bytes
+except AttributeError:
+
+ def _compat_int_from_byte_vals(bytvals, endianess):
+ assert endianess == "big"
+ res = 0
+ for bv in bytvals:
+ assert isinstance(bv, _compat_int_types)
+ res = (res << 8) + bv
+ return res
+
+
+def _compat_to_bytes(intval, length, endianess):
+ assert isinstance(intval, _compat_int_types)
+ assert endianess == "big"
+ if length == 4:
+ if intval < 0 or intval >= 2 ** 32:
+ raise struct.error("integer out of range for 'I' format code")
+ return struct.pack(b"!I", intval)
+ elif length == 16:
+ if intval < 0 or intval >= 2 ** 128:
+ raise struct.error("integer out of range for 'QQ' format code")
+ return struct.pack(b"!QQ", intval >> 64, intval & 0xFFFFFFFFFFFFFFFF)
+ else:
+ raise NotImplementedError()
+
+
+if hasattr(int, "bit_length"):
+ # Not int.bit_length , since that won't work in 2.7 where long exists
+ def _compat_bit_length(i):
+ return i.bit_length()
+
+
+else:
+
+ def _compat_bit_length(i):
+ for res in itertools.count():
+ if i >> res == 0:
+ return res
+
+
+def _compat_range(start, end, step=1):
+ assert step > 0
+ i = start
+ while i < end:
+ yield i
+ i += step
+
+
+class _TotalOrderingMixin(object):
+ __slots__ = ()
+
+ # Helper that derives the other comparison operations from
+ # __lt__ and __eq__
+ # We avoid functools.total_ordering because it doesn't handle
+ # NotImplemented correctly yet (http://bugs.python.org/issue10042)
+ def __eq__(self, other):
+ raise NotImplementedError
+
+ def __ne__(self, other):
+ equal = self.__eq__(other)
+ if equal is NotImplemented:
+ return NotImplemented
+ return not equal
+
+ def __lt__(self, other):
+ raise NotImplementedError
+
+ def __le__(self, other):
+ less = self.__lt__(other)
+ if less is NotImplemented or not less:
+ return self.__eq__(other)
+ return less
+
+ def __gt__(self, other):
+ less = self.__lt__(other)
+ if less is NotImplemented:
+ return NotImplemented
+ equal = self.__eq__(other)
+ if equal is NotImplemented:
+ return NotImplemented
+ return not (less or equal)
+
+ def __ge__(self, other):
+ less = self.__lt__(other)
+ if less is NotImplemented:
+ return NotImplemented
+ return not less
+
+
+IPV4LENGTH = 32
+IPV6LENGTH = 128
+
+
+class AddressValueError(ValueError):
+ """A Value Error related to the address."""
+
+
+class NetmaskValueError(ValueError):
+ """A Value Error related to the netmask."""
+
+
+def ip_address(address):
+ """Take an IP string/int and return an object of the correct type.
+
+ Args:
+ address: A string or integer, the IP address. Either IPv4 or
+ IPv6 addresses may be supplied; integers less than 2**32 will
+ be considered to be IPv4 by default.
+
+ Returns:
+ An IPv4Address or IPv6Address object.
+
+ Raises:
+ ValueError: if the *address* passed isn't either a v4 or a v6
+ address
+
+ """
+ try:
+ return IPv4Address(address)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ try:
+ return IPv6Address(address)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ if isinstance(address, bytes):
+ raise AddressValueError(
+ "%r does not appear to be an IPv4 or IPv6 address. "
+ "Did you pass in a bytes (str in Python 2) instead of"
+ " a unicode object?" % address
+ )
+
+ raise ValueError(
+ "%r does not appear to be an IPv4 or IPv6 address" % address
+ )
+
+
+def ip_network(address, strict=True):
+ """Take an IP string/int and return an object of the correct type.
+
+ Args:
+ address: A string or integer, the IP network. Either IPv4 or
+ IPv6 networks may be supplied; integers less than 2**32 will
+ be considered to be IPv4 by default.
+
+ Returns:
+ An IPv4Network or IPv6Network object.
+
+ Raises:
+ ValueError: if the string passed isn't either a v4 or a v6
+ address. Or if the network has host bits set.
+
+ """
+ try:
+ return IPv4Network(address, strict)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ try:
+ return IPv6Network(address, strict)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ if isinstance(address, bytes):
+ raise AddressValueError(
+ "%r does not appear to be an IPv4 or IPv6 network. "
+ "Did you pass in a bytes (str in Python 2) instead of"
+ " a unicode object?" % address
+ )
+
+ raise ValueError(
+ "%r does not appear to be an IPv4 or IPv6 network" % address
+ )
+
+
+def ip_interface(address):
+ """Take an IP string/int and return an object of the correct type.
+
+ Args:
+ address: A string or integer, the IP address. Either IPv4 or
+ IPv6 addresses may be supplied; integers less than 2**32 will
+ be considered to be IPv4 by default.
+
+ Returns:
+ An IPv4Interface or IPv6Interface object.
+
+ Raises:
+ ValueError: if the string passed isn't either a v4 or a v6
+ address.
+
+ Notes:
+ The IPv?Interface classes describe an Address on a particular
+ Network, so they're basically a combination of both the Address
+ and Network classes.
+
+ """
+ try:
+ return IPv4Interface(address)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ try:
+ return IPv6Interface(address)
+ except (AddressValueError, NetmaskValueError):
+ pass
+
+ raise ValueError(
+ "%r does not appear to be an IPv4 or IPv6 interface" % address
+ )
+
+
+def v4_int_to_packed(address):
+ """Represent an address as 4 packed bytes in network (big-endian) order.
+
+ Args:
+ address: An integer representation of an IPv4 IP address.
+
+ Returns:
+ The integer address packed as 4 bytes in network (big-endian) order.
+
+ Raises:
+ ValueError: If the integer is negative or too large to be an
+ IPv4 IP address.
+
+ """
+ try:
+ return _compat_to_bytes(address, 4, "big")
+ except (struct.error, OverflowError):
+ raise ValueError("Address negative or too large for IPv4")
+
+
+def v6_int_to_packed(address):
+ """Represent an address as 16 packed bytes in network (big-endian) order.
+
+ Args:
+ address: An integer representation of an IPv6 IP address.
+
+ Returns:
+ The integer address packed as 16 bytes in network (big-endian) order.
+
+ """
+ try:
+ return _compat_to_bytes(address, 16, "big")
+ except (struct.error, OverflowError):
+ raise ValueError("Address negative or too large for IPv6")
+
+
+def _split_optional_netmask(address):
+ """Helper to split the netmask and raise AddressValueError if needed"""
+ addr = _compat_str(address).split("/")
+ if len(addr) > 2:
+ raise AddressValueError("Only one '/' permitted in %r" % address)
+ return addr
+
+
+def _find_address_range(addresses):
+ """Find a sequence of sorted deduplicated IPv#Address.
+
+ Args:
+ addresses: a list of IPv#Address objects.
+
+ Yields:
+ A tuple containing the first and last IP addresses in the sequence.
+
+ """
+ it = iter(addresses)
+ first = last = next(it) # pylint: disable=stop-iteration-return
+ for ip in it:
+ if ip._ip != last._ip + 1:
+ yield first, last
+ first = ip
+ last = ip
+ yield first, last
+
+
+def _count_righthand_zero_bits(number, bits):
+ """Count the number of zero bits on the right hand side.
+
+ Args:
+ number: an integer.
+ bits: maximum number of bits to count.
+
+ Returns:
+ The number of zero bits on the right hand side of the number.
+
+ """
+ if number == 0:
+ return bits
+ return min(bits, _compat_bit_length(~number & (number - 1)))
+
+
+def summarize_address_range(first, last):
+ """Summarize a network range given the first and last IP addresses.
+
+ Example:
+ >>> list(summarize_address_range(IPv4Address('192.0.2.0'),
+ ... IPv4Address('192.0.2.130')))
+ ... #doctest: +NORMALIZE_WHITESPACE
+ [IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/31'),
+ IPv4Network('192.0.2.130/32')]
+
+ Args:
+ first: the first IPv4Address or IPv6Address in the range.
+ last: the last IPv4Address or IPv6Address in the range.
+
+ Returns:
+ An iterator of the summarized IPv(4|6) network objects.
+
+ Raise:
+ TypeError:
+ If the first and last objects are not IP addresses.
+ If the first and last objects are not the same version.
+ ValueError:
+ If the last object is not greater than the first.
+ If the version of the first address is not 4 or 6.
+
+ """
+ if not (
+ isinstance(first, _BaseAddress) and isinstance(last, _BaseAddress)
+ ):
+ raise TypeError("first and last must be IP addresses, not networks")
+ if first.version != last.version:
+ raise TypeError(
+ "%s and %s are not of the same version" % (first, last)
+ )
+ if first > last:
+ raise ValueError("last IP address must be greater than first")
+
+ if first.version == 4:
+ ip = IPv4Network
+ elif first.version == 6:
+ ip = IPv6Network
+ else:
+ raise ValueError("unknown IP version")
+
+ ip_bits = first._max_prefixlen
+ first_int = first._ip
+ last_int = last._ip
+ while first_int <= last_int:
+ nbits = min(
+ _count_righthand_zero_bits(first_int, ip_bits),
+ _compat_bit_length(last_int - first_int + 1) - 1,
+ )
+ net = ip((first_int, ip_bits - nbits))
+ yield net
+ first_int += 1 << nbits
+ if first_int - 1 == ip._ALL_ONES:
+ break
+
+
+def _collapse_addresses_internal(addresses):
+ """Loops through the addresses, collapsing concurrent netblocks.
+
+ Example:
+
+ ip1 = IPv4Network('192.0.2.0/26')
+ ip2 = IPv4Network('192.0.2.64/26')
+ ip3 = IPv4Network('192.0.2.128/26')
+ ip4 = IPv4Network('192.0.2.192/26')
+
+ _collapse_addresses_internal([ip1, ip2, ip3, ip4]) ->
+ [IPv4Network('192.0.2.0/24')]
+
+ This shouldn't be called directly; it is called via
+ collapse_addresses([]).
+
+ Args:
+ addresses: A list of IPv4Network's or IPv6Network's
+
+ Returns:
+ A list of IPv4Network's or IPv6Network's depending on what we were
+ passed.
+
+ """
+ # First merge
+ to_merge = list(addresses)
+ subnets = {}
+ while to_merge:
+ net = to_merge.pop()
+ supernet = net.supernet()
+ existing = subnets.get(supernet)
+ if existing is None:
+ subnets[supernet] = net
+ elif existing != net:
+ # Merge consecutive subnets
+ del subnets[supernet]
+ to_merge.append(supernet)
+ # Then iterate over resulting networks, skipping subsumed subnets
+ last = None
+ for net in sorted(subnets.values()):
+ if last is not None:
+ # Since they are sorted,
+ # last.network_address <= net.network_address is a given.
+ if last.broadcast_address >= net.broadcast_address:
+ continue
+ yield net
+ last = net
+
+
+def collapse_addresses(addresses):
+ """Collapse a list of IP objects.
+
+ Example:
+ collapse_addresses([IPv4Network('192.0.2.0/25'),
+ IPv4Network('192.0.2.128/25')]) ->
+ [IPv4Network('192.0.2.0/24')]
+
+ Args:
+ addresses: An iterator of IPv4Network or IPv6Network objects.
+
+ Returns:
+ An iterator of the collapsed IPv(4|6)Network objects.
+
+ Raises:
+ TypeError: If passed a list of mixed version objects.
+
+ """
+ addrs = []
+ ips = []
+ nets = []
+
+ # split IP addresses and networks
+ for ip in addresses:
+ if isinstance(ip, _BaseAddress):
+ if ips and ips[-1]._version != ip._version:
+ raise TypeError(
+ "%s and %s are not of the same version" % (ip, ips[-1])
+ )
+ ips.append(ip)
+ elif ip._prefixlen == ip._max_prefixlen:
+ if ips and ips[-1]._version != ip._version:
+ raise TypeError(
+ "%s and %s are not of the same version" % (ip, ips[-1])
+ )
+ try:
+ ips.append(ip.ip)
+ except AttributeError:
+ ips.append(ip.network_address)
+ else:
+ if nets and nets[-1]._version != ip._version:
+ raise TypeError(
+ "%s and %s are not of the same version" % (ip, nets[-1])
+ )
+ nets.append(ip)
+
+ # sort and dedup
+ ips = sorted(set(ips))
+
+ # find consecutive address ranges in the sorted sequence and summarize them
+ if ips:
+ for first, last in _find_address_range(ips):
+ addrs.extend(summarize_address_range(first, last))
+
+ return _collapse_addresses_internal(addrs + nets)
+
+
+def get_mixed_type_key(obj):
+ """Return a key suitable for sorting between networks and addresses.
+
+ Address and Network objects are not sortable by default; they're
+ fundamentally different so the expression
+
+ IPv4Address('192.0.2.0') <= IPv4Network('192.0.2.0/24')
+
+ doesn't make any sense. There are some times however, where you may wish
+ to have ipaddress sort these for you anyway. If you need to do this, you
+ can use this function as the key= argument to sorted().
+
+ Args:
+ obj: either a Network or Address object.
+ Returns:
+ appropriate key.
+
+ """
+ if isinstance(obj, _BaseNetwork):
+ return obj._get_networks_key()
+ elif isinstance(obj, _BaseAddress):
+ return obj._get_address_key()
+ return NotImplemented
+
+
+class _IPAddressBase(_TotalOrderingMixin):
+
+ """The mother class."""
+
+ __slots__ = ()
+
+ @property
+ def exploded(self):
+ """Return the longhand version of the IP address as a string."""
+ return self._explode_shorthand_ip_string()
+
+ @property
+ def compressed(self):
+ """Return the shorthand version of the IP address as a string."""
+ return _compat_str(self)
+
+ @property
+ def reverse_pointer(self):
+ """The name of the reverse DNS pointer for the IP address, e.g.:
+ >>> ipaddress.ip_address("127.0.0.1").reverse_pointer
+ '1.0.0.127.in-addr.arpa'
+ >>> ipaddress.ip_address("2001:db8::1").reverse_pointer
+ '1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa'
+
+ """
+ return self._reverse_pointer()
+
+ @property
+ def version(self):
+ msg = "%200s has no version specified" % (type(self),)
+ raise NotImplementedError(msg)
+
+ def _check_int_address(self, address):
+ if address < 0:
+ msg = "%d (< 0) is not permitted as an IPv%d address"
+ raise AddressValueError(msg % (address, self._version))
+ if address > self._ALL_ONES:
+ msg = "%d (>= 2**%d) is not permitted as an IPv%d address"
+ raise AddressValueError(
+ msg % (address, self._max_prefixlen, self._version)
+ )
+
+ def _check_packed_address(self, address, expected_len):
+ address_len = len(address)
+ if address_len != expected_len:
+ msg = (
+ "%r (len %d != %d) is not permitted as an IPv%d address. "
+ "Did you pass in a bytes (str in Python 2) instead of"
+ " a unicode object?"
+ )
+ raise AddressValueError(
+ msg % (address, address_len, expected_len, self._version)
+ )
+
+ @classmethod
+ def _ip_int_from_prefix(cls, prefixlen):
+ """Turn the prefix length into a bitwise netmask
+
+ Args:
+ prefixlen: An integer, the prefix length.
+
+ Returns:
+ An integer.
+
+ """
+ return cls._ALL_ONES ^ (cls._ALL_ONES >> prefixlen)
+
+ @classmethod
+ def _prefix_from_ip_int(cls, ip_int):
+ """Return prefix length from the bitwise netmask.
+
+ Args:
+ ip_int: An integer, the netmask in expanded bitwise format
+
+ Returns:
+ An integer, the prefix length.
+
+ Raises:
+ ValueError: If the input intermingles zeroes & ones
+ """
+ trailing_zeroes = _count_righthand_zero_bits(
+ ip_int, cls._max_prefixlen
+ )
+ prefixlen = cls._max_prefixlen - trailing_zeroes
+ leading_ones = ip_int >> trailing_zeroes
+ all_ones = (1 << prefixlen) - 1
+ if leading_ones != all_ones:
+ byteslen = cls._max_prefixlen // 8
+ details = _compat_to_bytes(ip_int, byteslen, "big")
+ msg = "Netmask pattern %r mixes zeroes & ones"
+ raise ValueError(msg % details)
+ return prefixlen
+
+ @classmethod
+ def _report_invalid_netmask(cls, netmask_str):
+ msg = "%r is not a valid netmask" % netmask_str
+ raise NetmaskValueError(msg)
+
+ @classmethod
+ def _prefix_from_prefix_string(cls, prefixlen_str):
+ """Return prefix length from a numeric string
+
+ Args:
+ prefixlen_str: The string to be converted
+
+ Returns:
+ An integer, the prefix length.
+
+ Raises:
+ NetmaskValueError: If the input is not a valid netmask
+ """
+ # int allows a leading +/- as well as surrounding whitespace,
+ # so we ensure that isn't the case
+ if not _BaseV4._DECIMAL_DIGITS.issuperset(prefixlen_str):
+ cls._report_invalid_netmask(prefixlen_str)
+ try:
+ prefixlen = int(prefixlen_str)
+ except ValueError:
+ cls._report_invalid_netmask(prefixlen_str)
+ if not (0 <= prefixlen <= cls._max_prefixlen):
+ cls._report_invalid_netmask(prefixlen_str)
+ return prefixlen
+
+ @classmethod
+ def _prefix_from_ip_string(cls, ip_str):
+ """Turn a netmask/hostmask string into a prefix length
+
+ Args:
+ ip_str: The netmask/hostmask to be converted
+
+ Returns:
+ An integer, the prefix length.
+
+ Raises:
+ NetmaskValueError: If the input is not a valid netmask/hostmask
+ """
+ # Parse the netmask/hostmask like an IP address.
+ try:
+ ip_int = cls._ip_int_from_string(ip_str)
+ except AddressValueError:
+ cls._report_invalid_netmask(ip_str)
+
+ # Try matching a netmask (this would be /1*0*/ as a bitwise regexp).
+ # Note that the two ambiguous cases (all-ones and all-zeroes) are
+ # treated as netmasks.
+ try:
+ return cls._prefix_from_ip_int(ip_int)
+ except ValueError:
+ pass
+
+ # Invert the bits, and try matching a /0+1+/ hostmask instead.
+ ip_int ^= cls._ALL_ONES
+ try:
+ return cls._prefix_from_ip_int(ip_int)
+ except ValueError:
+ cls._report_invalid_netmask(ip_str)
+
+ def __reduce__(self):
+ return self.__class__, (_compat_str(self),)
+
+
+class _BaseAddress(_IPAddressBase):
+
+ """A generic IP object.
+
+ This IP class contains the version independent methods which are
+ used by single IP addresses.
+ """
+
+ __slots__ = ()
+
+ def __int__(self):
+ return self._ip
+
+ def __eq__(self, other):
+ try:
+ return self._ip == other._ip and self._version == other._version
+ except AttributeError:
+ return NotImplemented
+
+ def __lt__(self, other):
+ if not isinstance(other, _IPAddressBase):
+ return NotImplemented
+ if not isinstance(other, _BaseAddress):
+ raise TypeError(
+ "%s and %s are not of the same type" % (self, other)
+ )
+ if self._version != other._version:
+ raise TypeError(
+ "%s and %s are not of the same version" % (self, other)
+ )
+ if self._ip != other._ip:
+ return self._ip < other._ip
+ return False
+
+ # Shorthand for Integer addition and subtraction. This is not
+ # meant to ever support addition/subtraction of addresses.
+ def __add__(self, other):
+ if not isinstance(other, _compat_int_types):
+ return NotImplemented
+ return self.__class__(int(self) + other)
+
+ def __sub__(self, other):
+ if not isinstance(other, _compat_int_types):
+ return NotImplemented
+ return self.__class__(int(self) - other)
+
+ def __repr__(self):
+ return "%s(%r)" % (self.__class__.__name__, _compat_str(self))
+
+ def __str__(self):
+ return _compat_str(self._string_from_ip_int(self._ip))
+
+ def __hash__(self):
+ return hash(hex(int(self._ip)))
+
+ def _get_address_key(self):
+ return (self._version, self)
+
+ def __reduce__(self):
+ return self.__class__, (self._ip,)
+
+
+class _BaseNetwork(_IPAddressBase):
+
+ """A generic IP network object.
+
+ This IP class contains the version independent methods which are
+ used by networks.
+
+ """
+
+ def __init__(self, address):
+ self._cache = {}
+
+ def __repr__(self):
+ return "%s(%r)" % (self.__class__.__name__, _compat_str(self))
+
+ def __str__(self):
+ return "%s/%d" % (self.network_address, self.prefixlen)
+
+ def hosts(self):
+ """Generate Iterator over usable hosts in a network.
+
+ This is like __iter__ except it doesn't return the network
+ or broadcast addresses.
+
+ """
+ network = int(self.network_address)
+ broadcast = int(self.broadcast_address)
+ for x in _compat_range(network + 1, broadcast):
+ yield self._address_class(x)
+
+ def __iter__(self):
+ network = int(self.network_address)
+ broadcast = int(self.broadcast_address)
+ for x in _compat_range(network, broadcast + 1):
+ yield self._address_class(x)
+
+ def __getitem__(self, n):
+ network = int(self.network_address)
+ broadcast = int(self.broadcast_address)
+ if n >= 0:
+ if network + n > broadcast:
+ raise IndexError("address out of range")
+ return self._address_class(network + n)
+ else:
+ n += 1
+ if broadcast + n < network:
+ raise IndexError("address out of range")
+ return self._address_class(broadcast + n)
+
+ def __lt__(self, other):
+ if not isinstance(other, _IPAddressBase):
+ return NotImplemented
+ if not isinstance(other, _BaseNetwork):
+ raise TypeError(
+ "%s and %s are not of the same type" % (self, other)
+ )
+ if self._version != other._version:
+ raise TypeError(
+ "%s and %s are not of the same version" % (self, other)
+ )
+ if self.network_address != other.network_address:
+ return self.network_address < other.network_address
+ if self.netmask != other.netmask:
+ return self.netmask < other.netmask
+ return False
+
+ def __eq__(self, other):
+ try:
+ return (
+ self._version == other._version
+ and self.network_address == other.network_address
+ and int(self.netmask) == int(other.netmask)
+ )
+ except AttributeError:
+ return NotImplemented
+
+ def __hash__(self):
+ return hash(int(self.network_address) ^ int(self.netmask))
+
+ def __contains__(self, other):
+ # always false if one is v4 and the other is v6.
+ if self._version != other._version:
+ return False
+ # dealing with another network.
+ if isinstance(other, _BaseNetwork):
+ return False
+ # dealing with another address
+ else:
+ # address
+ return (
+ int(self.network_address)
+ <= int(other._ip)
+ <= int(self.broadcast_address)
+ )
+
+ def overlaps(self, other):
+ """Tell if self is partly contained in other."""
+ return self.network_address in other or (
+ self.broadcast_address in other
+ or (
+ other.network_address in self
+ or (other.broadcast_address in self)
+ )
+ )
+
+ @property
+ def broadcast_address(self):
+ x = self._cache.get("broadcast_address")
+ if x is None:
+ x = self._address_class(
+ int(self.network_address) | int(self.hostmask)
+ )
+ self._cache["broadcast_address"] = x
+ return x
+
+ @property
+ def hostmask(self):
+ x = self._cache.get("hostmask")
+ if x is None:
+ x = self._address_class(int(self.netmask) ^ self._ALL_ONES)
+ self._cache["hostmask"] = x
+ return x
+
+ @property
+ def with_prefixlen(self):
+ return "%s/%d" % (self.network_address, self._prefixlen)
+
+ @property
+ def with_netmask(self):
+ return "%s/%s" % (self.network_address, self.netmask)
+
+ @property
+ def with_hostmask(self):
+ return "%s/%s" % (self.network_address, self.hostmask)
+
+ @property
+ def num_addresses(self):
+ """Number of hosts in the current subnet."""
+ return int(self.broadcast_address) - int(self.network_address) + 1
+
+ @property
+ def _address_class(self):
+ # Returning bare address objects (rather than interfaces) allows for
+ # more consistent behaviour across the network address, broadcast
+ # address and individual host addresses.
+ msg = "%200s has no associated address class" % (type(self),)
+ raise NotImplementedError(msg)
+
+ @property
+ def prefixlen(self):
+ return self._prefixlen
+
+ def address_exclude(self, other):
+ """Remove an address from a larger block.
+
+ For example:
+
+ addr1 = ip_network('192.0.2.0/28')
+ addr2 = ip_network('192.0.2.1/32')
+ list(addr1.address_exclude(addr2)) =
+ [IPv4Network('192.0.2.0/32'), IPv4Network('192.0.2.2/31'),
+ IPv4Network('192.0.2.4/30'), IPv4Network('192.0.2.8/29')]
+
+ or IPv6:
+
+ addr1 = ip_network('2001:db8::1/32')
+ addr2 = ip_network('2001:db8::1/128')
+ list(addr1.address_exclude(addr2)) =
+ [ip_network('2001:db8::1/128'),
+ ip_network('2001:db8::2/127'),
+ ip_network('2001:db8::4/126'),
+ ip_network('2001:db8::8/125'),
+ ...
+ ip_network('2001:db8:8000::/33')]
+
+ Args:
+ other: An IPv4Network or IPv6Network object of the same type.
+
+ Returns:
+ An iterator of the IPv(4|6)Network objects which is self
+ minus other.
+
+ Raises:
+ TypeError: If self and other are of differing address
+ versions, or if other is not a network object.
+ ValueError: If other is not completely contained by self.
+
+ """
+ if not self._version == other._version:
+ raise TypeError(
+ "%s and %s are not of the same version" % (self, other)
+ )
+
+ if not isinstance(other, _BaseNetwork):
+ raise TypeError("%s is not a network object" % other)
+
+ if not other.subnet_of(self):
+ raise ValueError("%s not contained in %s" % (other, self))
+ if other == self:
+ return
+
+ # Make sure we're comparing the network of other.
+ other = other.__class__(
+ "%s/%s" % (other.network_address, other.prefixlen)
+ )
+
+ s1, s2 = self.subnets()
+ while s1 != other and s2 != other:
+ if other.subnet_of(s1):
+ yield s2
+ s1, s2 = s1.subnets()
+ elif other.subnet_of(s2):
+ yield s1
+ s1, s2 = s2.subnets()
+ else:
+ # If we got here, there's a bug somewhere.
+ raise AssertionError(
+ "Error performing exclusion: "
+ "s1: %s s2: %s other: %s" % (s1, s2, other)
+ )
+ if s1 == other:
+ yield s2
+ elif s2 == other:
+ yield s1
+ else:
+ # If we got here, there's a bug somewhere.
+ raise AssertionError(
+ "Error performing exclusion: "
+ "s1: %s s2: %s other: %s" % (s1, s2, other)
+ )
+
+ def compare_networks(self, other):
+ """Compare two IP objects.
+
+ This is only concerned about the comparison of the integer
+ representation of the network addresses. This means that the
+ host bits aren't considered at all in this method. If you want
+ to compare host bits, you can easily enough do a
+ 'HostA._ip < HostB._ip'
+
+ Args:
+ other: An IP object.
+
+ Returns:
+ If the IP versions of self and other are the same, returns:
+
+ -1 if self < other:
+ eg: IPv4Network('192.0.2.0/25') < IPv4Network('192.0.2.128/25')
+ IPv6Network('2001:db8::1000/124') <
+ IPv6Network('2001:db8::2000/124')
+ 0 if self == other
+ eg: IPv4Network('192.0.2.0/24') == IPv4Network('192.0.2.0/24')
+ IPv6Network('2001:db8::1000/124') ==
+ IPv6Network('2001:db8::1000/124')
+ 1 if self > other
+ eg: IPv4Network('192.0.2.128/25') > IPv4Network('192.0.2.0/25')
+ IPv6Network('2001:db8::2000/124') >
+ IPv6Network('2001:db8::1000/124')
+
+ Raises:
+ TypeError if the IP versions are different.
+
+ """
+ # does this need to raise a ValueError?
+ if self._version != other._version:
+ raise TypeError(
+ "%s and %s are not of the same type" % (self, other)
+ )
+ # self._version == other._version below here:
+ if self.network_address < other.network_address:
+ return -1
+ if self.network_address > other.network_address:
+ return 1
+ # self.network_address == other.network_address below here:
+ if self.netmask < other.netmask:
+ return -1
+ if self.netmask > other.netmask:
+ return 1
+ return 0
+
+ def _get_networks_key(self):
+ """Network-only key function.
+
+ Returns an object that identifies this address' network and
+ netmask. This function is a suitable "key" argument for sorted()
+ and list.sort().
+
+ """
+ return (self._version, self.network_address, self.netmask)
+
+ def subnets(self, prefixlen_diff=1, new_prefix=None):
+ """The subnets which join to make the current subnet.
+
+ In the case that self contains only one IP
+ (self._prefixlen == 32 for IPv4 or self._prefixlen == 128
+ for IPv6), yield an iterator with just ourself.
+
+ Args:
+ prefixlen_diff: An integer, the amount the prefix length
+ should be increased by. This should not be set if
+ new_prefix is also set.
+ new_prefix: The desired new prefix length. This must be a
+ larger number (smaller prefix) than the existing prefix.
+ This should not be set if prefixlen_diff is also set.
+
+ Returns:
+ An iterator of IPv(4|6) objects.
+
+ Raises:
+ ValueError: The prefixlen_diff is too small or too large.
+ OR
+ prefixlen_diff and new_prefix are both set or new_prefix
+ is a smaller number than the current prefix (smaller
+ number means a larger network)
+
+ """
+ if self._prefixlen == self._max_prefixlen:
+ yield self
+ return
+
+ if new_prefix is not None:
+ if new_prefix < self._prefixlen:
+ raise ValueError("new prefix must be longer")
+ if prefixlen_diff != 1:
+ raise ValueError("cannot set prefixlen_diff and new_prefix")
+ prefixlen_diff = new_prefix - self._prefixlen
+
+ if prefixlen_diff < 0:
+ raise ValueError("prefix length diff must be > 0")
+ new_prefixlen = self._prefixlen + prefixlen_diff
+
+ if new_prefixlen > self._max_prefixlen:
+ raise ValueError(
+ "prefix length diff %d is invalid for netblock %s"
+ % (new_prefixlen, self)
+ )
+
+ start = int(self.network_address)
+ end = int(self.broadcast_address) + 1
+ step = (int(self.hostmask) + 1) >> prefixlen_diff
+ for new_addr in _compat_range(start, end, step):
+ current = self.__class__((new_addr, new_prefixlen))
+ yield current
+
+ def supernet(self, prefixlen_diff=1, new_prefix=None):
+ """The supernet containing the current network.
+
+ Args:
+ prefixlen_diff: An integer, the amount the prefix length of
+ the network should be decreased by. For example, given a
+ /24 network and a prefixlen_diff of 3, a supernet with a
+ /21 netmask is returned.
+
+ Returns:
+ An IPv4 network object.
+
+ Raises:
+ ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have
+ a negative prefix length.
+ OR
+ If prefixlen_diff and new_prefix are both set or new_prefix is a
+ larger number than the current prefix (larger number means a
+ smaller network)
+
+ """
+ if self._prefixlen == 0:
+ return self
+
+ if new_prefix is not None:
+ if new_prefix > self._prefixlen:
+ raise ValueError("new prefix must be shorter")
+ if prefixlen_diff != 1:
+ raise ValueError("cannot set prefixlen_diff and new_prefix")
+ prefixlen_diff = self._prefixlen - new_prefix
+
+ new_prefixlen = self.prefixlen - prefixlen_diff
+ if new_prefixlen < 0:
+ raise ValueError(
+ "current prefixlen is %d, cannot have a prefixlen_diff of %d"
+ % (self.prefixlen, prefixlen_diff)
+ )
+ return self.__class__(
+ (
+ int(self.network_address)
+ & (int(self.netmask) << prefixlen_diff),
+ new_prefixlen,
+ )
+ )
+
+ @property
+ def is_multicast(self):
+ """Test if the address is reserved for multicast use.
+
+ Returns:
+ A boolean, True if the address is a multicast address.
+ See RFC 2373 2.7 for details.
+
+ """
+ return (
+ self.network_address.is_multicast
+ and self.broadcast_address.is_multicast
+ )
+
+ @staticmethod
+ def _is_subnet_of(a, b):
+ try:
+ # Always false if one is v4 and the other is v6.
+ if a._version != b._version:
+ raise TypeError(
+ "%s and %s are not of the same version" % (a, b)
+ )
+ return (
+ b.network_address <= a.network_address
+ and b.broadcast_address >= a.broadcast_address
+ )
+ except AttributeError:
+ raise TypeError(
+ "Unable to test subnet containment "
+ "between %s and %s" % (a, b)
+ )
+
+ def subnet_of(self, other):
+ """Return True if this network is a subnet of other."""
+ return self._is_subnet_of(self, other)
+
+ def supernet_of(self, other):
+ """Return True if this network is a supernet of other."""
+ return self._is_subnet_of(other, self)
+
+ @property
+ def is_reserved(self):
+ """Test if the address is otherwise IETF reserved.
+
+ Returns:
+ A boolean, True if the address is within one of the
+ reserved IPv6 Network ranges.
+
+ """
+ return (
+ self.network_address.is_reserved
+ and self.broadcast_address.is_reserved
+ )
+
+ @property
+ def is_link_local(self):
+ """Test if the address is reserved for link-local.
+
+ Returns:
+ A boolean, True if the address is reserved per RFC 4291.
+
+ """
+ return (
+ self.network_address.is_link_local
+ and self.broadcast_address.is_link_local
+ )
+
+ @property
+ def is_private(self):
+ """Test if this address is allocated for private networks.
+
+ Returns:
+ A boolean, True if the address is reserved per
+ iana-ipv4-special-registry or iana-ipv6-special-registry.
+
+ """
+ return (
+ self.network_address.is_private
+ and self.broadcast_address.is_private
+ )
+
+ @property
+ def is_global(self):
+ """Test if this address is allocated for public networks.
+
+ Returns:
+ A boolean, True if the address is not reserved per
+ iana-ipv4-special-registry or iana-ipv6-special-registry.
+
+ """
+ return not self.is_private
+
+ @property
+ def is_unspecified(self):
+ """Test if the address is unspecified.
+
+ Returns:
+ A boolean, True if this is the unspecified address as defined in
+ RFC 2373 2.5.2.
+
+ """
+ return (
+ self.network_address.is_unspecified
+ and self.broadcast_address.is_unspecified
+ )
+
+ @property
+ def is_loopback(self):
+ """Test if the address is a loopback address.
+
+ Returns:
+ A boolean, True if the address is a loopback address as defined in
+ RFC 2373 2.5.3.
+
+ """
+ return (
+ self.network_address.is_loopback
+ and self.broadcast_address.is_loopback
+ )
+
+
+class _BaseV4(object):
+
+ """Base IPv4 object.
+
+ The following methods are used by IPv4 objects in both single IP
+ addresses and networks.
+
+ """
+
+ __slots__ = ()
+ _version = 4
+ # Equivalent to 255.255.255.255 or 32 bits of 1's.
+ _ALL_ONES = (2 ** IPV4LENGTH) - 1
+ _DECIMAL_DIGITS = frozenset("0123456789")
+
+ # the valid octets for host and netmasks. only useful for IPv4.
+ _valid_mask_octets = frozenset([255, 254, 252, 248, 240, 224, 192, 128, 0])
+
+ _max_prefixlen = IPV4LENGTH
+ # There are only a handful of valid v4 netmasks, so we cache them all
+ # when constructed (see _make_netmask()).
+ _netmask_cache = {}
+
+ def _explode_shorthand_ip_string(self):
+ return _compat_str(self)
+
+ @classmethod
+ def _make_netmask(cls, arg):
+ """Make a (netmask, prefix_len) tuple from the given argument.
+
+ Argument can be:
+ - an integer (the prefix length)
+ - a string representing the prefix length (e.g. "24")
+ - a string representing the prefix netmask (e.g. "255.255.255.0")
+ """
+ if arg not in cls._netmask_cache:
+ if isinstance(arg, _compat_int_types):
+ prefixlen = arg
+ else:
+ try:
+ # Check for a netmask in prefix length form
+ prefixlen = cls._prefix_from_prefix_string(arg)
+ except NetmaskValueError:
+ # Check for a netmask or hostmask in dotted-quad form.
+ # This may raise NetmaskValueError.
+ prefixlen = cls._prefix_from_ip_string(arg)
+ netmask = IPv4Address(cls._ip_int_from_prefix(prefixlen))
+ cls._netmask_cache[arg] = netmask, prefixlen
+ return cls._netmask_cache[arg]
+
+ @classmethod
+ def _ip_int_from_string(cls, ip_str):
+ """Turn the given IP string into an integer for comparison.
+
+ Args:
+ ip_str: A string, the IP ip_str.
+
+ Returns:
+ The IP ip_str as an integer.
+
+ Raises:
+ AddressValueError: if ip_str isn't a valid IPv4 Address.
+
+ """
+ if not ip_str:
+ raise AddressValueError("Address cannot be empty")
+
+ octets = ip_str.split(".")
+ if len(octets) != 4:
+ raise AddressValueError("Expected 4 octets in %r" % ip_str)
+
+ try:
+ return _compat_int_from_byte_vals(
+ map(cls._parse_octet, octets), "big"
+ )
+ except ValueError as exc:
+ raise AddressValueError("%s in %r" % (exc, ip_str))
+
+ @classmethod
+ def _parse_octet(cls, octet_str):
+ """Convert a decimal octet into an integer.
+
+ Args:
+ octet_str: A string, the number to parse.
+
+ Returns:
+ The octet as an integer.
+
+ Raises:
+ ValueError: if the octet isn't strictly a decimal from [0..255].
+
+ """
+ if not octet_str:
+ raise ValueError("Empty octet not permitted")
+ # Whitelist the characters, since int() allows a lot of bizarre stuff.
+ if not cls._DECIMAL_DIGITS.issuperset(octet_str):
+ msg = "Only decimal digits permitted in %r"
+ raise ValueError(msg % octet_str)
+ # We do the length check second, since the invalid character error
+ # is likely to be more informative for the user
+ if len(octet_str) > 3:
+ msg = "At most 3 characters permitted in %r"
+ raise ValueError(msg % octet_str)
+ # Convert to integer (we know digits are legal)
+ octet_int = int(octet_str, 10)
+ # Any octets that look like they *might* be written in octal,
+ # and which don't look exactly the same in both octal and
+ # decimal are rejected as ambiguous
+ if octet_int > 7 and octet_str[0] == "0":
+ msg = "Ambiguous (octal/decimal) value in %r not permitted"
+ raise ValueError(msg % octet_str)
+ if octet_int > 255:
+ raise ValueError("Octet %d (> 255) not permitted" % octet_int)
+ return octet_int
+
+ @classmethod
+ def _string_from_ip_int(cls, ip_int):
+ """Turns a 32-bit integer into dotted decimal notation.
+
+ Args:
+ ip_int: An integer, the IP address.
+
+ Returns:
+ The IP address as a string in dotted decimal notation.
+
+ """
+ return ".".join(
+ _compat_str(
+ struct.unpack(b"!B", b)[0] if isinstance(b, bytes) else b
+ )
+ for b in _compat_to_bytes(ip_int, 4, "big")
+ )
+
+ def _is_hostmask(self, ip_str):
+ """Test if the IP string is a hostmask (rather than a netmask).
+
+ Args:
+ ip_str: A string, the potential hostmask.
+
+ Returns:
+ A boolean, True if the IP string is a hostmask.
+
+ """
+ bits = ip_str.split(".")
+ try:
+ parts = [x for x in map(int, bits) if x in self._valid_mask_octets]
+ except ValueError:
+ return False
+ if len(parts) != len(bits):
+ return False
+ if parts[0] < parts[-1]:
+ return True
+ return False
+
+ def _reverse_pointer(self):
+ """Return the reverse DNS pointer name for the IPv4 address.
+
+ This implements the method described in RFC1035 3.5.
+
+ """
+ reverse_octets = _compat_str(self).split(".")[::-1]
+ return ".".join(reverse_octets) + ".in-addr.arpa"
+
+ @property
+ def max_prefixlen(self):
+ return self._max_prefixlen
+
+ @property
+ def version(self):
+ return self._version
+
+
+class IPv4Address(_BaseV4, _BaseAddress):
+
+ """Represent and manipulate single IPv4 Addresses."""
+
+ __slots__ = ("_ip", "__weakref__")
+
+ def __init__(self, address):
+
+ """
+ Args:
+ address: A string or integer representing the IP
+
+ Additionally, an integer can be passed, so
+ IPv4Address('192.0.2.1') == IPv4Address(3221225985).
+ or, more generally
+ IPv4Address(int(IPv4Address('192.0.2.1'))) ==
+ IPv4Address('192.0.2.1')
+
+ Raises:
+ AddressValueError: If ipaddress isn't a valid IPv4 address.
+
+ """
+ # Efficient constructor from integer.
+ if isinstance(address, _compat_int_types):
+ self._check_int_address(address)
+ self._ip = address
+ return
+
+ # Constructing from a packed address
+ if isinstance(address, bytes):
+ self._check_packed_address(address, 4)
+ bvs = _compat_bytes_to_byte_vals(address)
+ self._ip = _compat_int_from_byte_vals(bvs, "big")
+ return
+
+ # Assume input argument to be string or any object representation
+ # which converts into a formatted IP string.
+ addr_str = _compat_str(address)
+ if "/" in addr_str:
+ raise AddressValueError("Unexpected '/' in %r" % address)
+ self._ip = self._ip_int_from_string(addr_str)
+
+ @property
+ def packed(self):
+ """The binary representation of this address."""
+ return v4_int_to_packed(self._ip)
+
+ @property
+ def is_reserved(self):
+ """Test if the address is otherwise IETF reserved.
+
+ Returns:
+ A boolean, True if the address is within the
+ reserved IPv4 Network range.
+
+ """
+ return self in self._constants._reserved_network
+
+ @property
+ def is_private(self):
+ """Test if this address is allocated for private networks.
+
+ Returns:
+ A boolean, True if the address is reserved per
+ iana-ipv4-special-registry.
+
+ """
+ return any(self in net for net in self._constants._private_networks)
+
+ @property
+ def is_global(self):
+ return (
+ self not in self._constants._public_network and not self.is_private
+ )
+
+ @property
+ def is_multicast(self):
+ """Test if the address is reserved for multicast use.
+
+ Returns:
+ A boolean, True if the address is multicast.
+ See RFC 3171 for details.
+
+ """
+ return self in self._constants._multicast_network
+
+ @property
+ def is_unspecified(self):
+ """Test if the address is unspecified.
+
+ Returns:
+ A boolean, True if this is the unspecified address as defined in
+ RFC 5735 3.
+
+ """
+ return self == self._constants._unspecified_address
+
+ @property
+ def is_loopback(self):
+ """Test if the address is a loopback address.
+
+ Returns:
+ A boolean, True if the address is a loopback per RFC 3330.
+
+ """
+ return self in self._constants._loopback_network
+
+ @property
+ def is_link_local(self):
+ """Test if the address is reserved for link-local.
+
+ Returns:
+ A boolean, True if the address is link-local per RFC 3927.
+
+ """
+ return self in self._constants._linklocal_network
+
+
+class IPv4Interface(IPv4Address):
+ def __init__(self, address):
+ if isinstance(address, (bytes, _compat_int_types)):
+ IPv4Address.__init__(self, address)
+ self.network = IPv4Network(self._ip)
+ self._prefixlen = self._max_prefixlen
+ return
+
+ if isinstance(address, tuple):
+ IPv4Address.__init__(self, address[0])
+ if len(address) > 1:
+ self._prefixlen = int(address[1])
+ else:
+ self._prefixlen = self._max_prefixlen
+
+ self.network = IPv4Network(address, strict=False)
+ self.netmask = self.network.netmask
+ self.hostmask = self.network.hostmask
+ return
+
+ addr = _split_optional_netmask(address)
+ IPv4Address.__init__(self, addr[0])
+
+ self.network = IPv4Network(address, strict=False)
+ self._prefixlen = self.network._prefixlen
+
+ self.netmask = self.network.netmask
+ self.hostmask = self.network.hostmask
+
+ def __str__(self):
+ return "%s/%d" % (
+ self._string_from_ip_int(self._ip),
+ self.network.prefixlen,
+ )
+
+ def __eq__(self, other):
+ address_equal = IPv4Address.__eq__(self, other)
+ if not address_equal or address_equal is NotImplemented:
+ return address_equal
+ try:
+ return self.network == other.network
+ except AttributeError:
+ # An interface with an associated network is NOT the
+ # same as an unassociated address. That's why the hash
+ # takes the extra info into account.
+ return False
+
+ def __lt__(self, other):
+ address_less = IPv4Address.__lt__(self, other)
+ if address_less is NotImplemented:
+ return NotImplemented
+ try:
+ return (
+ self.network < other.network
+ or self.network == other.network
+ and address_less
+ )
+ except AttributeError:
+ # We *do* allow addresses and interfaces to be sorted. The
+ # unassociated address is considered less than all interfaces.
+ return False
+
+ def __hash__(self):
+ return self._ip ^ self._prefixlen ^ int(self.network.network_address)
+
+ __reduce__ = _IPAddressBase.__reduce__
+
+ @property
+ def ip(self):
+ return IPv4Address(self._ip)
+
+ @property
+ def with_prefixlen(self):
+ return "%s/%s" % (self._string_from_ip_int(self._ip), self._prefixlen)
+
+ @property
+ def with_netmask(self):
+ return "%s/%s" % (self._string_from_ip_int(self._ip), self.netmask)
+
+ @property
+ def with_hostmask(self):
+ return "%s/%s" % (self._string_from_ip_int(self._ip), self.hostmask)
+
+
+class IPv4Network(_BaseV4, _BaseNetwork):
+
+ """This class represents and manipulates 32-bit IPv4 network + addresses..
+
+ Attributes: [examples for IPv4Network('192.0.2.0/27')]
+ .network_address: IPv4Address('192.0.2.0')
+ .hostmask: IPv4Address('0.0.0.31')
+ .broadcast_address: IPv4Address('192.0.2.32')
+ .netmask: IPv4Address('255.255.255.224')
+ .prefixlen: 27
+
+ """
+
+ # Class to use when creating address objects
+ _address_class = IPv4Address
+
+ def __init__(self, address, strict=True):
+
+ """Instantiate a new IPv4 network object.
+
+ Args:
+ address: A string or integer representing the IP [& network].
+ '192.0.2.0/24'
+ '192.0.2.0/255.255.255.0'
+ '192.0.0.2/0.0.0.255'
+ are all functionally the same in IPv4. Similarly,
+ '192.0.2.1'
+ '192.0.2.1/255.255.255.255'
+ '192.0.2.1/32'
+ are also functionally equivalent. That is to say, failing to
+ provide a subnetmask will create an object with a mask of /32.
+
+ If the mask (portion after the / in the argument) is given in
+ dotted quad form, it is treated as a netmask if it starts with a
+ non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it
+ starts with a zero field (e.g. 0.255.255.255 == /8), with the
+ single exception of an all-zero mask which is treated as a
+ netmask == /0. If no mask is given, a default of /32 is used.
+
+ Additionally, an integer can be passed, so
+ IPv4Network('192.0.2.1') == IPv4Network(3221225985)
+ or, more generally
+ IPv4Interface(int(IPv4Interface('192.0.2.1'))) ==
+ IPv4Interface('192.0.2.1')
+
+ Raises:
+ AddressValueError: If ipaddress isn't a valid IPv4 address.
+ NetmaskValueError: If the netmask isn't valid for
+ an IPv4 address.
+ ValueError: If strict is True and a network address is not
+ supplied.
+
+ """
+ _BaseNetwork.__init__(self, address)
+
+ # Constructing from a packed address or integer
+ if isinstance(address, (_compat_int_types, bytes)):
+ self.network_address = IPv4Address(address)
+ self.netmask, self._prefixlen = self._make_netmask(
+ self._max_prefixlen
+ )
+ # fixme: address/network test here.
+ return
+
+ if isinstance(address, tuple):
+ if len(address) > 1:
+ arg = address[1]
+ else:
+ # We weren't given an address[1]
+ arg = self._max_prefixlen
+ self.network_address = IPv4Address(address[0])
+ self.netmask, self._prefixlen = self._make_netmask(arg)
+ packed = int(self.network_address)
+ if packed & int(self.netmask) != packed:
+ if strict:
+ raise ValueError("%s has host bits set" % self)
+ else:
+ self.network_address = IPv4Address(
+ packed & int(self.netmask)
+ )
+ return
+
+ # Assume input argument to be string or any object representation
+ # which converts into a formatted IP prefix string.
+ addr = _split_optional_netmask(address)
+ self.network_address = IPv4Address(self._ip_int_from_string(addr[0]))
+
+ if len(addr) == 2:
+ arg = addr[1]
+ else:
+ arg = self._max_prefixlen
+ self.netmask, self._prefixlen = self._make_netmask(arg)
+
+ if strict:
+ if (
+ IPv4Address(int(self.network_address) & int(self.netmask))
+ != self.network_address
+ ):
+ raise ValueError("%s has host bits set" % self)
+ self.network_address = IPv4Address(
+ int(self.network_address) & int(self.netmask)
+ )
+
+ if self._prefixlen == (self._max_prefixlen - 1):
+ self.hosts = self.__iter__
+
+ @property
+ def is_global(self):
+ """Test if this address is allocated for public networks.
+
+ Returns:
+ A boolean, True if the address is not reserved per
+ iana-ipv4-special-registry.
+
+ """
+ return (
+ not (
+ self.network_address in IPv4Network("100.64.0.0/10")
+ and self.broadcast_address in IPv4Network("100.64.0.0/10")
+ )
+ and not self.is_private
+ )
+
+
+class _IPv4Constants(object):
+
+ _linklocal_network = IPv4Network("169.254.0.0/16")
+
+ _loopback_network = IPv4Network("127.0.0.0/8")
+
+ _multicast_network = IPv4Network("224.0.0.0/4")
+
+ _public_network = IPv4Network("100.64.0.0/10")
+
+ _private_networks = [
+ IPv4Network("0.0.0.0/8"),
+ IPv4Network("10.0.0.0/8"),
+ IPv4Network("127.0.0.0/8"),
+ IPv4Network("169.254.0.0/16"),
+ IPv4Network("172.16.0.0/12"),
+ IPv4Network("192.0.0.0/29"),
+ IPv4Network("192.0.0.170/31"),
+ IPv4Network("192.0.2.0/24"),
+ IPv4Network("192.168.0.0/16"),
+ IPv4Network("198.18.0.0/15"),
+ IPv4Network("198.51.100.0/24"),
+ IPv4Network("203.0.113.0/24"),
+ IPv4Network("240.0.0.0/4"),
+ IPv4Network("255.255.255.255/32"),
+ ]
+
+ _reserved_network = IPv4Network("240.0.0.0/4")
+
+ _unspecified_address = IPv4Address("0.0.0.0")
+
+
+IPv4Address._constants = _IPv4Constants
+
+
+class _BaseV6(object):
+
+ """Base IPv6 object.
+
+ The following methods are used by IPv6 objects in both single IP
+ addresses and networks.
+
+ """
+
+ __slots__ = ()
+ _version = 6
+ _ALL_ONES = (2 ** IPV6LENGTH) - 1
+ _HEXTET_COUNT = 8
+ _HEX_DIGITS = frozenset("0123456789ABCDEFabcdef")
+ _max_prefixlen = IPV6LENGTH
+
+ # There are only a bunch of valid v6 netmasks, so we cache them all
+ # when constructed (see _make_netmask()).
+ _netmask_cache = {}
+
+ @classmethod
+ def _make_netmask(cls, arg):
+ """Make a (netmask, prefix_len) tuple from the given argument.
+
+ Argument can be:
+ - an integer (the prefix length)
+ - a string representing the prefix length (e.g. "24")
+ - a string representing the prefix netmask (e.g. "255.255.255.0")
+ """
+ if arg not in cls._netmask_cache:
+ if isinstance(arg, _compat_int_types):
+ prefixlen = arg
+ else:
+ prefixlen = cls._prefix_from_prefix_string(arg)
+ netmask = IPv6Address(cls._ip_int_from_prefix(prefixlen))
+ cls._netmask_cache[arg] = netmask, prefixlen
+ return cls._netmask_cache[arg]
+
+ @classmethod
+ def _ip_int_from_string(cls, ip_str):
+ """Turn an IPv6 ip_str into an integer.
+
+ Args:
+ ip_str: A string, the IPv6 ip_str.
+
+ Returns:
+ An int, the IPv6 address
+
+ Raises:
+ AddressValueError: if ip_str isn't a valid IPv6 Address.
+
+ """
+ if not ip_str:
+ raise AddressValueError("Address cannot be empty")
+
+ parts = ip_str.split(":")
+
+ # An IPv6 address needs at least 2 colons (3 parts).
+ _min_parts = 3
+ if len(parts) < _min_parts:
+ msg = "At least %d parts expected in %r" % (_min_parts, ip_str)
+ raise AddressValueError(msg)
+
+ # If the address has an IPv4-style suffix, convert it to hexadecimal.
+ if "." in parts[-1]:
+ try:
+ ipv4_int = IPv4Address(parts.pop())._ip
+ except AddressValueError as exc:
+ raise AddressValueError("%s in %r" % (exc, ip_str))
+ parts.append("%x" % ((ipv4_int >> 16) & 0xFFFF))
+ parts.append("%x" % (ipv4_int & 0xFFFF))
+
+ # An IPv6 address can't have more than 8 colons (9 parts).
+ # The extra colon comes from using the "::" notation for a single
+ # leading or trailing zero part.
+ _max_parts = cls._HEXTET_COUNT + 1
+ if len(parts) > _max_parts:
+ msg = "At most %d colons permitted in %r" % (
+ _max_parts - 1,
+ ip_str,
+ )
+ raise AddressValueError(msg)
+
+ # Disregarding the endpoints, find '::' with nothing in between.
+ # This indicates that a run of zeroes has been skipped.
+ skip_index = None
+ for i in _compat_range(1, len(parts) - 1):
+ if not parts[i]:
+ if skip_index is not None:
+ # Can't have more than one '::'
+ msg = "At most one '::' permitted in %r" % ip_str
+ raise AddressValueError(msg)
+ skip_index = i
+
+ # parts_hi is the number of parts to copy from above/before the '::'
+ # parts_lo is the number of parts to copy from below/after the '::'
+ if skip_index is not None:
+ # If we found a '::', then check if it also covers the endpoints.
+ parts_hi = skip_index
+ parts_lo = len(parts) - skip_index - 1
+ if not parts[0]:
+ parts_hi -= 1
+ if parts_hi:
+ msg = "Leading ':' only permitted as part of '::' in %r"
+ raise AddressValueError(msg % ip_str) # ^: requires ^::
+ if not parts[-1]:
+ parts_lo -= 1
+ if parts_lo:
+ msg = "Trailing ':' only permitted as part of '::' in %r"
+ raise AddressValueError(msg % ip_str) # :$ requires ::$
+ parts_skipped = cls._HEXTET_COUNT - (parts_hi + parts_lo)
+ if parts_skipped < 1:
+ msg = "Expected at most %d other parts with '::' in %r"
+ raise AddressValueError(msg % (cls._HEXTET_COUNT - 1, ip_str))
+ else:
+ # Otherwise, allocate the entire address to parts_hi. The
+ # endpoints could still be empty, but _parse_hextet() will check
+ # for that.
+ if len(parts) != cls._HEXTET_COUNT:
+ msg = "Exactly %d parts expected without '::' in %r"
+ raise AddressValueError(msg % (cls._HEXTET_COUNT, ip_str))
+ if not parts[0]:
+ msg = "Leading ':' only permitted as part of '::' in %r"
+ raise AddressValueError(msg % ip_str) # ^: requires ^::
+ if not parts[-1]:
+ msg = "Trailing ':' only permitted as part of '::' in %r"
+ raise AddressValueError(msg % ip_str) # :$ requires ::$
+ parts_hi = len(parts)
+ parts_lo = 0
+ parts_skipped = 0
+
+ try:
+ # Now, parse the hextets into a 128-bit integer.
+ ip_int = 0
+ for i in range(parts_hi):
+ ip_int <<= 16
+ ip_int |= cls._parse_hextet(parts[i])
+ ip_int <<= 16 * parts_skipped
+ for i in range(-parts_lo, 0):
+ ip_int <<= 16
+ ip_int |= cls._parse_hextet(parts[i])
+ return ip_int
+ except ValueError as exc:
+ raise AddressValueError("%s in %r" % (exc, ip_str))
+
+ @classmethod
+ def _parse_hextet(cls, hextet_str):
+ """Convert an IPv6 hextet string into an integer.
+
+ Args:
+ hextet_str: A string, the number to parse.
+
+ Returns:
+ The hextet as an integer.
+
+ Raises:
+ ValueError: if the input isn't strictly a hex number from
+ [0..FFFF].
+
+ """
+ # Whitelist the characters, since int() allows a lot of bizarre stuff.
+ if not cls._HEX_DIGITS.issuperset(hextet_str):
+ raise ValueError("Only hex digits permitted in %r" % hextet_str)
+ # We do the length check second, since the invalid character error
+ # is likely to be more informative for the user
+ if len(hextet_str) > 4:
+ msg = "At most 4 characters permitted in %r"
+ raise ValueError(msg % hextet_str)
+ # Length check means we can skip checking the integer value
+ return int(hextet_str, 16)
+
+ @classmethod
+ def _compress_hextets(cls, hextets):
+ """Compresses a list of hextets.
+
+ Compresses a list of strings, replacing the longest continuous
+ sequence of "0" in the list with "" and adding empty strings at
+ the beginning or at the end of the string such that subsequently
+ calling ":".join(hextets) will produce the compressed version of
+ the IPv6 address.
+
+ Args:
+ hextets: A list of strings, the hextets to compress.
+
+ Returns:
+ A list of strings.
+
+ """
+ best_doublecolon_start = -1
+ best_doublecolon_len = 0
+ doublecolon_start = -1
+ doublecolon_len = 0
+ for index, hextet in enumerate(hextets):
+ if hextet == "0":
+ doublecolon_len += 1
+ if doublecolon_start == -1:
+ # Start of a sequence of zeros.
+ doublecolon_start = index
+ if doublecolon_len > best_doublecolon_len:
+ # This is the longest sequence of zeros so far.
+ best_doublecolon_len = doublecolon_len
+ best_doublecolon_start = doublecolon_start
+ else:
+ doublecolon_len = 0
+ doublecolon_start = -1
+
+ if best_doublecolon_len > 1:
+ best_doublecolon_end = (
+ best_doublecolon_start + best_doublecolon_len
+ )
+ # For zeros at the end of the address.
+ if best_doublecolon_end == len(hextets):
+ hextets += [""]
+ hextets[best_doublecolon_start:best_doublecolon_end] = [""]
+ # For zeros at the beginning of the address.
+ if best_doublecolon_start == 0:
+ hextets = [""] + hextets
+
+ return hextets
+
+ @classmethod
+ def _string_from_ip_int(cls, ip_int=None):
+ """Turns a 128-bit integer into hexadecimal notation.
+
+ Args:
+ ip_int: An integer, the IP address.
+
+ Returns:
+ A string, the hexadecimal representation of the address.
+
+ Raises:
+ ValueError: The address is bigger than 128 bits of all ones.
+
+ """
+ if ip_int is None:
+ ip_int = int(cls._ip)
+
+ if ip_int > cls._ALL_ONES:
+ raise ValueError("IPv6 address is too large")
+
+ hex_str = "%032x" % ip_int
+ hextets = ["%x" % int(hex_str[x:x + 4], 16) for x in range(0, 32, 4)]
+
+ hextets = cls._compress_hextets(hextets)
+ return ":".join(hextets)
+
+ def _explode_shorthand_ip_string(self):
+ """Expand a shortened IPv6 address.
+
+ Args:
+ ip_str: A string, the IPv6 address.
+
+ Returns:
+ A string, the expanded IPv6 address.
+
+ """
+ if isinstance(self, IPv6Network):
+ ip_str = _compat_str(self.network_address)
+ elif isinstance(self, IPv6Interface):
+ ip_str = _compat_str(self.ip)
+ else:
+ ip_str = _compat_str(self)
+
+ ip_int = self._ip_int_from_string(ip_str)
+ hex_str = "%032x" % ip_int
+ parts = [hex_str[x:x + 4] for x in range(0, 32, 4)]
+ if isinstance(self, (_BaseNetwork, IPv6Interface)):
+ return "%s/%d" % (":".join(parts), self._prefixlen)
+ return ":".join(parts)
+
+ def _reverse_pointer(self):
+ """Return the reverse DNS pointer name for the IPv6 address.
+
+ This implements the method described in RFC3596 2.5.
+
+ """
+ reverse_chars = self.exploded[::-1].replace(":", "")
+ return ".".join(reverse_chars) + ".ip6.arpa"
+
+ @property
+ def max_prefixlen(self):
+ return self._max_prefixlen
+
+ @property
+ def version(self):
+ return self._version
+
+
+class IPv6Address(_BaseV6, _BaseAddress):
+
+ """Represent and manipulate single IPv6 Addresses."""
+
+ __slots__ = ("_ip", "__weakref__")
+
+ def __init__(self, address):
+ """Instantiate a new IPv6 address object.
+
+ Args:
+ address: A string or integer representing the IP
+
+ Additionally, an integer can be passed, so
+ IPv6Address('2001:db8::') ==
+ IPv6Address(42540766411282592856903984951653826560)
+ or, more generally
+ IPv6Address(int(IPv6Address('2001:db8::'))) ==
+ IPv6Address('2001:db8::')
+
+ Raises:
+ AddressValueError: If address isn't a valid IPv6 address.
+
+ """
+ # Efficient constructor from integer.
+ if isinstance(address, _compat_int_types):
+ self._check_int_address(address)
+ self._ip = address
+ return
+
+ # Constructing from a packed address
+ if isinstance(address, bytes):
+ self._check_packed_address(address, 16)
+ bvs = _compat_bytes_to_byte_vals(address)
+ self._ip = _compat_int_from_byte_vals(bvs, "big")
+ return
+
+ # Assume input argument to be string or any object representation
+ # which converts into a formatted IP string.
+ addr_str = _compat_str(address)
+ if "/" in addr_str:
+ raise AddressValueError("Unexpected '/' in %r" % address)
+ self._ip = self._ip_int_from_string(addr_str)
+
+ @property
+ def packed(self):
+ """The binary representation of this address."""
+ return v6_int_to_packed(self._ip)
+
+ @property
+ def is_multicast(self):
+ """Test if the address is reserved for multicast use.
+
+ Returns:
+ A boolean, True if the address is a multicast address.
+ See RFC 2373 2.7 for details.
+
+ """
+ return self in self._constants._multicast_network
+
+ @property
+ def is_reserved(self):
+ """Test if the address is otherwise IETF reserved.
+
+ Returns:
+ A boolean, True if the address is within one of the
+ reserved IPv6 Network ranges.
+
+ """
+ return any(self in x for x in self._constants._reserved_networks)
+
+ @property
+ def is_link_local(self):
+ """Test if the address is reserved for link-local.
+
+ Returns:
+ A boolean, True if the address is reserved per RFC 4291.
+
+ """
+ return self in self._constants._linklocal_network
+
+ @property
+ def is_site_local(self):
+ """Test if the address is reserved for site-local.
+
+ Note that the site-local address space has been deprecated by RFC 3879.
+ Use is_private to test if this address is in the space of unique local
+ addresses as defined by RFC 4193.
+
+ Returns:
+ A boolean, True if the address is reserved per RFC 3513 2.5.6.
+
+ """
+ return self in self._constants._sitelocal_network
+
+ @property
+ def is_private(self):
+ """Test if this address is allocated for private networks.
+
+ Returns:
+ A boolean, True if the address is reserved per
+ iana-ipv6-special-registry.
+
+ """
+ return any(self in net for net in self._constants._private_networks)
+
+ @property
+ def is_global(self):
+ """Test if this address is allocated for public networks.
+
+ Returns:
+ A boolean, true if the address is not reserved per
+ iana-ipv6-special-registry.
+
+ """
+ return not self.is_private
+
+ @property
+ def is_unspecified(self):
+ """Test if the address is unspecified.
+
+ Returns:
+ A boolean, True if this is the unspecified address as defined in
+ RFC 2373 2.5.2.
+
+ """
+ return self._ip == 0
+
+ @property
+ def is_loopback(self):
+ """Test if the address is a loopback address.
+
+ Returns:
+ A boolean, True if the address is a loopback address as defined in
+ RFC 2373 2.5.3.
+
+ """
+ return self._ip == 1
+
+ @property
+ def ipv4_mapped(self):
+ """Return the IPv4 mapped address.
+
+ Returns:
+ If the IPv6 address is a v4 mapped address, return the
+ IPv4 mapped address. Return None otherwise.
+
+ """
+ if (self._ip >> 32) != 0xFFFF:
+ return None
+ return IPv4Address(self._ip & 0xFFFFFFFF)
+
+ @property
+ def teredo(self):
+ """Tuple of embedded teredo IPs.
+
+ Returns:
+ Tuple of the (server, client) IPs or None if the address
+ doesn't appear to be a teredo address (doesn't start with
+ 2001::/32)
+
+ """
+ if (self._ip >> 96) != 0x20010000:
+ return None
+ return (
+ IPv4Address((self._ip >> 64) & 0xFFFFFFFF),
+ IPv4Address(~self._ip & 0xFFFFFFFF),
+ )
+
+ @property
+ def sixtofour(self):
+ """Return the IPv4 6to4 embedded address.
+
+ Returns:
+ The IPv4 6to4-embedded address if present or None if the
+ address doesn't appear to contain a 6to4 embedded address.
+
+ """
+ if (self._ip >> 112) != 0x2002:
+ return None
+ return IPv4Address((self._ip >> 80) & 0xFFFFFFFF)
+
+
+class IPv6Interface(IPv6Address):
+ def __init__(self, address):
+ if isinstance(address, (bytes, _compat_int_types)):
+ IPv6Address.__init__(self, address)
+ self.network = IPv6Network(self._ip)
+ self._prefixlen = self._max_prefixlen
+ return
+ if isinstance(address, tuple):
+ IPv6Address.__init__(self, address[0])
+ if len(address) > 1:
+ self._prefixlen = int(address[1])
+ else:
+ self._prefixlen = self._max_prefixlen
+ self.network = IPv6Network(address, strict=False)
+ self.netmask = self.network.netmask
+ self.hostmask = self.network.hostmask
+ return
+
+ addr = _split_optional_netmask(address)
+ IPv6Address.__init__(self, addr[0])
+ self.network = IPv6Network(address, strict=False)
+ self.netmask = self.network.netmask
+ self._prefixlen = self.network._prefixlen
+ self.hostmask = self.network.hostmask
+
+ def __str__(self):
+ return "%s/%d" % (
+ self._string_from_ip_int(self._ip),
+ self.network.prefixlen,
+ )
+
+ def __eq__(self, other):
+ address_equal = IPv6Address.__eq__(self, other)
+ if not address_equal or address_equal is NotImplemented:
+ return address_equal
+ try:
+ return self.network == other.network
+ except AttributeError:
+ # An interface with an associated network is NOT the
+ # same as an unassociated address. That's why the hash
+ # takes the extra info into account.
+ return False
+
+ def __lt__(self, other):
+ address_less = IPv6Address.__lt__(self, other)
+ if address_less is NotImplemented:
+ return NotImplemented
+ try:
+ return (
+ self.network < other.network
+ or self.network == other.network
+ and address_less
+ )
+ except AttributeError:
+ # We *do* allow addresses and interfaces to be sorted. The
+ # unassociated address is considered less than all interfaces.
+ return False
+
+ def __hash__(self):
+ return self._ip ^ self._prefixlen ^ int(self.network.network_address)
+
+ __reduce__ = _IPAddressBase.__reduce__
+
+ @property
+ def ip(self):
+ return IPv6Address(self._ip)
+
+ @property
+ def with_prefixlen(self):
+ return "%s/%s" % (self._string_from_ip_int(self._ip), self._prefixlen)
+
+ @property
+ def with_netmask(self):
+ return "%s/%s" % (self._string_from_ip_int(self._ip), self.netmask)
+
+ @property
+ def with_hostmask(self):
+ return "%s/%s" % (self._string_from_ip_int(self._ip), self.hostmask)
+
+ @property
+ def is_unspecified(self):
+ return self._ip == 0 and self.network.is_unspecified
+
+ @property
+ def is_loopback(self):
+ return self._ip == 1 and self.network.is_loopback
+
+
+class IPv6Network(_BaseV6, _BaseNetwork):
+
+ """This class represents and manipulates 128-bit IPv6 networks.
+
+ Attributes: [examples for IPv6('2001:db8::1000/124')]
+ .network_address: IPv6Address('2001:db8::1000')
+ .hostmask: IPv6Address('::f')
+ .broadcast_address: IPv6Address('2001:db8::100f')
+ .netmask: IPv6Address('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff0')
+ .prefixlen: 124
+
+ """
+
+ # Class to use when creating address objects
+ _address_class = IPv6Address
+
+ def __init__(self, address, strict=True):
+ """Instantiate a new IPv6 Network object.
+
+ Args:
+ address: A string or integer representing the IPv6 network or the
+ IP and prefix/netmask.
+ '2001:db8::/128'
+ '2001:db8:0000:0000:0000:0000:0000:0000/128'
+ '2001:db8::'
+ are all functionally the same in IPv6. That is to say,
+ failing to provide a subnetmask will create an object with
+ a mask of /128.
+
+ Additionally, an integer can be passed, so
+ IPv6Network('2001:db8::') ==
+ IPv6Network(42540766411282592856903984951653826560)
+ or, more generally
+ IPv6Network(int(IPv6Network('2001:db8::'))) ==
+ IPv6Network('2001:db8::')
+
+ strict: A boolean. If true, ensure that we have been passed
+ A true network address, eg, 2001:db8::1000/124 and not an
+ IP address on a network, eg, 2001:db8::1/124.
+
+ Raises:
+ AddressValueError: If address isn't a valid IPv6 address.
+ NetmaskValueError: If the netmask isn't valid for
+ an IPv6 address.
+ ValueError: If strict was True and a network address was not
+ supplied.
+
+ """
+ _BaseNetwork.__init__(self, address)
+
+ # Efficient constructor from integer or packed address
+ if isinstance(address, (bytes, _compat_int_types)):
+ self.network_address = IPv6Address(address)
+ self.netmask, self._prefixlen = self._make_netmask(
+ self._max_prefixlen
+ )
+ return
+
+ if isinstance(address, tuple):
+ if len(address) > 1:
+ arg = address[1]
+ else:
+ arg = self._max_prefixlen
+ self.netmask, self._prefixlen = self._make_netmask(arg)
+ self.network_address = IPv6Address(address[0])
+ packed = int(self.network_address)
+ if packed & int(self.netmask) != packed:
+ if strict:
+ raise ValueError("%s has host bits set" % self)
+ else:
+ self.network_address = IPv6Address(
+ packed & int(self.netmask)
+ )
+ return
+
+ # Assume input argument to be string or any object representation
+ # which converts into a formatted IP prefix string.
+ addr = _split_optional_netmask(address)
+
+ self.network_address = IPv6Address(self._ip_int_from_string(addr[0]))
+
+ if len(addr) == 2:
+ arg = addr[1]
+ else:
+ arg = self._max_prefixlen
+ self.netmask, self._prefixlen = self._make_netmask(arg)
+
+ if strict:
+ if (
+ IPv6Address(int(self.network_address) & int(self.netmask))
+ != self.network_address
+ ):
+ raise ValueError("%s has host bits set" % self)
+ self.network_address = IPv6Address(
+ int(self.network_address) & int(self.netmask)
+ )
+
+ if self._prefixlen == (self._max_prefixlen - 1):
+ self.hosts = self.__iter__
+
+ def hosts(self):
+ """Generate Iterator over usable hosts in a network.
+
+ This is like __iter__ except it doesn't return the
+ Subnet-Router anycast address.
+
+ """
+ network = int(self.network_address)
+ broadcast = int(self.broadcast_address)
+ for x in _compat_range(network + 1, broadcast + 1):
+ yield self._address_class(x)
+
+ @property
+ def is_site_local(self):
+ """Test if the address is reserved for site-local.
+
+ Note that the site-local address space has been deprecated by RFC 3879.
+ Use is_private to test if this address is in the space of unique local
+ addresses as defined by RFC 4193.
+
+ Returns:
+ A boolean, True if the address is reserved per RFC 3513 2.5.6.
+
+ """
+ return (
+ self.network_address.is_site_local
+ and self.broadcast_address.is_site_local
+ )
+
+
+class _IPv6Constants(object):
+
+ _linklocal_network = IPv6Network("fe80::/10")
+
+ _multicast_network = IPv6Network("ff00::/8")
+
+ _private_networks = [
+ IPv6Network("::1/128"),
+ IPv6Network("::/128"),
+ IPv6Network("::ffff:0:0/96"),
+ IPv6Network("100::/64"),
+ IPv6Network("2001::/23"),
+ IPv6Network("2001:2::/48"),
+ IPv6Network("2001:db8::/32"),
+ IPv6Network("2001:10::/28"),
+ IPv6Network("fc00::/7"),
+ IPv6Network("fe80::/10"),
+ ]
+
+ _reserved_networks = [
+ IPv6Network("::/8"),
+ IPv6Network("100::/8"),
+ IPv6Network("200::/7"),
+ IPv6Network("400::/6"),
+ IPv6Network("800::/5"),
+ IPv6Network("1000::/4"),
+ IPv6Network("4000::/3"),
+ IPv6Network("6000::/3"),
+ IPv6Network("8000::/3"),
+ IPv6Network("A000::/3"),
+ IPv6Network("C000::/3"),
+ IPv6Network("E000::/4"),
+ IPv6Network("F000::/5"),
+ IPv6Network("F800::/6"),
+ IPv6Network("FE00::/9"),
+ ]
+
+ _sitelocal_network = IPv6Network("fec0::/10")
+
+
+IPv6Address._constants = _IPv6Constants
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/database.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/database.py
new file mode 100644
index 00000000..67850308
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/database.py
@@ -0,0 +1,189 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+
+# Input patterns for is_input_dangerous function:
+#
+# 1. '"' in string and '--' in string or
+# "'" in string and '--' in string
+PATTERN_1 = re.compile(r'(\'|\").*--')
+
+# 2. union \ intersect \ except + select
+PATTERN_2 = re.compile(r'(UNION|INTERSECT|EXCEPT).*SELECT', re.IGNORECASE)
+
+# 3. ';' and any KEY_WORDS
+PATTERN_3 = re.compile(r';.*(SELECT|UPDATE|INSERT|DELETE|DROP|TRUNCATE|ALTER)', re.IGNORECASE)
+
+
+class SQLParseError(Exception):
+ pass
+
+
+class UnclosedQuoteError(SQLParseError):
+ pass
+
+
+# maps a type of identifier to the maximum number of dot levels that are
+# allowed to specify that identifier. For example, a database column can be
+# specified by up to 4 levels: database.schema.table.column
+_PG_IDENTIFIER_TO_DOT_LEVEL = dict(
+ database=1,
+ schema=2,
+ table=3,
+ column=4,
+ role=1,
+ tablespace=1,
+ sequence=3,
+ publication=1,
+)
+_MYSQL_IDENTIFIER_TO_DOT_LEVEL = dict(database=1, table=2, column=3, role=1, vars=1)
+
+
+def _find_end_quote(identifier, quote_char):
+ accumulate = 0
+ while True:
+ try:
+ quote = identifier.index(quote_char)
+ except ValueError:
+ raise UnclosedQuoteError
+ accumulate = accumulate + quote
+ try:
+ next_char = identifier[quote + 1]
+ except IndexError:
+ return accumulate
+ if next_char == quote_char:
+ try:
+ identifier = identifier[quote + 2:]
+ accumulate = accumulate + 2
+ except IndexError:
+ raise UnclosedQuoteError
+ else:
+ return accumulate
+
+
+def _identifier_parse(identifier, quote_char):
+ if not identifier:
+ raise SQLParseError('Identifier name unspecified or unquoted trailing dot')
+
+ already_quoted = False
+ if identifier.startswith(quote_char):
+ already_quoted = True
+ try:
+ end_quote = _find_end_quote(identifier[1:], quote_char=quote_char) + 1
+ except UnclosedQuoteError:
+ already_quoted = False
+ else:
+ if end_quote < len(identifier) - 1:
+ if identifier[end_quote + 1] == '.':
+ dot = end_quote + 1
+ first_identifier = identifier[:dot]
+ next_identifier = identifier[dot + 1:]
+ further_identifiers = _identifier_parse(next_identifier, quote_char)
+ further_identifiers.insert(0, first_identifier)
+ else:
+ raise SQLParseError('User escaped identifiers must escape extra quotes')
+ else:
+ further_identifiers = [identifier]
+
+ if not already_quoted:
+ try:
+ dot = identifier.index('.')
+ except ValueError:
+ identifier = identifier.replace(quote_char, quote_char * 2)
+ identifier = ''.join((quote_char, identifier, quote_char))
+ further_identifiers = [identifier]
+ else:
+ if dot == 0 or dot >= len(identifier) - 1:
+ identifier = identifier.replace(quote_char, quote_char * 2)
+ identifier = ''.join((quote_char, identifier, quote_char))
+ further_identifiers = [identifier]
+ else:
+ first_identifier = identifier[:dot]
+ next_identifier = identifier[dot + 1:]
+ further_identifiers = _identifier_parse(next_identifier, quote_char)
+ first_identifier = first_identifier.replace(quote_char, quote_char * 2)
+ first_identifier = ''.join((quote_char, first_identifier, quote_char))
+ further_identifiers.insert(0, first_identifier)
+
+ return further_identifiers
+
+
+def pg_quote_identifier(identifier, id_type):
+ identifier_fragments = _identifier_parse(identifier, quote_char='"')
+ if len(identifier_fragments) > _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]:
+ raise SQLParseError('PostgreSQL does not support %s with more than %i dots' % (id_type, _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]))
+ return '.'.join(identifier_fragments)
+
+
+def mysql_quote_identifier(identifier, id_type):
+ identifier_fragments = _identifier_parse(identifier, quote_char='`')
+ if (len(identifier_fragments) - 1) > _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]:
+ raise SQLParseError('MySQL does not support %s with more than %i dots' % (id_type, _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]))
+
+ special_cased_fragments = []
+ for fragment in identifier_fragments:
+ if fragment == '`*`':
+ special_cased_fragments.append('*')
+ else:
+ special_cased_fragments.append(fragment)
+
+ return '.'.join(special_cased_fragments)
+
+
+def is_input_dangerous(string):
+ """Check if the passed string is potentially dangerous.
+ Can be used to prevent SQL injections.
+
+ Note: use this function only when you can't use
+ psycopg2's cursor.execute method parametrized
+ (typically with DDL queries).
+ """
+ if not string:
+ return False
+
+ for pattern in (PATTERN_1, PATTERN_2, PATTERN_3):
+ if re.search(pattern, string):
+ return True
+
+ return False
+
+
+def check_input(module, *args):
+ """Wrapper for is_input_dangerous function."""
+ needs_to_check = args
+
+ dangerous_elements = []
+
+ for elem in needs_to_check:
+ if isinstance(elem, str):
+ if is_input_dangerous(elem):
+ dangerous_elements.append(elem)
+
+ elif isinstance(elem, list):
+ for e in elem:
+ if is_input_dangerous(e):
+ dangerous_elements.append(e)
+
+ elif elem is None or isinstance(elem, bool):
+ pass
+
+ else:
+ elem = str(elem)
+ if is_input_dangerous(elem):
+ dangerous_elements.append(elem)
+
+ if dangerous_elements:
+ module.fail_json(msg="Passed input '%s' is "
+ "potentially dangerous" % ', '.join(dangerous_elements))
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/dimensiondata.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/dimensiondata.py
new file mode 100644
index 00000000..bcb02e84
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/dimensiondata.py
@@ -0,0 +1,330 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Dimension Data
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+# Authors:
+# - Aimon Bustardo <aimon.bustardo@dimensiondata.com>
+# - Mark Maglana <mmaglana@gmail.com>
+# - Adam Friedman <tintoy@tintoy.io>
+#
+# Common functionality to be used by various module components
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.six.moves import configparser
+from os.path import expanduser
+from uuid import UUID
+
+LIBCLOUD_IMP_ERR = None
+try:
+ from libcloud.common.dimensiondata import API_ENDPOINTS, DimensionDataAPIException, DimensionDataStatus
+ from libcloud.compute.base import Node, NodeLocation
+ from libcloud.compute.providers import get_driver
+ from libcloud.compute.types import Provider
+
+ import libcloud.security
+
+ HAS_LIBCLOUD = True
+except ImportError:
+ LIBCLOUD_IMP_ERR = traceback.format_exc()
+ HAS_LIBCLOUD = False
+
+# MCP 2.x version patten for location (datacenter) names.
+#
+# Note that this is not a totally reliable way of determining MCP version.
+# Unfortunately, libcloud's NodeLocation currently makes no provision for extended properties.
+# At some point we may therefore want to either enhance libcloud or enable overriding mcp_version
+# by specifying it in the module parameters.
+MCP_2_LOCATION_NAME_PATTERN = re.compile(r".*MCP\s?2.*")
+
+
+class DimensionDataModule(object):
+ """
+ The base class containing common functionality used by Dimension Data modules for Ansible.
+ """
+
+ def __init__(self, module):
+ """
+ Create a new DimensionDataModule.
+
+ Will fail if Apache libcloud is not present.
+
+ :param module: The underlying Ansible module.
+ :type module: AnsibleModule
+ """
+
+ self.module = module
+
+ if not HAS_LIBCLOUD:
+ self.module.fail_json(msg=missing_required_lib('libcloud'), exception=LIBCLOUD_IMP_ERR)
+
+ # Credentials are common to all Dimension Data modules.
+ credentials = self.get_credentials()
+ self.user_id = credentials['user_id']
+ self.key = credentials['key']
+
+ # Region and location are common to all Dimension Data modules.
+ region = self.module.params['region']
+ self.region = 'dd-{0}'.format(region)
+ self.location = self.module.params['location']
+
+ libcloud.security.VERIFY_SSL_CERT = self.module.params['validate_certs']
+
+ self.driver = get_driver(Provider.DIMENSIONDATA)(
+ self.user_id,
+ self.key,
+ region=self.region
+ )
+
+ # Determine the MCP API version (this depends on the target datacenter).
+ self.mcp_version = self.get_mcp_version(self.location)
+
+ # Optional "wait-for-completion" arguments
+ if 'wait' in self.module.params:
+ self.wait = self.module.params['wait']
+ self.wait_time = self.module.params['wait_time']
+ self.wait_poll_interval = self.module.params['wait_poll_interval']
+ else:
+ self.wait = False
+ self.wait_time = 0
+ self.wait_poll_interval = 0
+
+ def get_credentials(self):
+ """
+ Get user_id and key from module configuration, environment, or dotfile.
+ Order of priority is module, environment, dotfile.
+
+ To set in environment:
+
+ export MCP_USER='myusername'
+ export MCP_PASSWORD='mypassword'
+
+ To set in dot file place a file at ~/.dimensiondata with
+ the following contents:
+
+ [dimensiondatacloud]
+ MCP_USER: myusername
+ MCP_PASSWORD: mypassword
+ """
+
+ if not HAS_LIBCLOUD:
+ self.module.fail_json(msg='libcloud is required for this module.')
+
+ user_id = None
+ key = None
+
+ # First, try the module configuration
+ if 'mcp_user' in self.module.params:
+ if 'mcp_password' not in self.module.params:
+ self.module.fail_json(
+ msg='"mcp_user" parameter was specified, but not "mcp_password" (either both must be specified, or neither).'
+ )
+
+ user_id = self.module.params['mcp_user']
+ key = self.module.params['mcp_password']
+
+ # Fall back to environment
+ if not user_id or not key:
+ user_id = os.environ.get('MCP_USER', None)
+ key = os.environ.get('MCP_PASSWORD', None)
+
+ # Finally, try dotfile (~/.dimensiondata)
+ if not user_id or not key:
+ home = expanduser('~')
+ config = configparser.RawConfigParser()
+ config.read("%s/.dimensiondata" % home)
+
+ try:
+ user_id = config.get("dimensiondatacloud", "MCP_USER")
+ key = config.get("dimensiondatacloud", "MCP_PASSWORD")
+ except (configparser.NoSectionError, configparser.NoOptionError):
+ pass
+
+ # One or more credentials not found. Function can't recover from this
+ # so it has to raise an error instead of fail silently.
+ if not user_id:
+ raise MissingCredentialsError("Dimension Data user id not found")
+ elif not key:
+ raise MissingCredentialsError("Dimension Data key not found")
+
+ # Both found, return data
+ return dict(user_id=user_id, key=key)
+
+ def get_mcp_version(self, location):
+ """
+ Get the MCP version for the specified location.
+ """
+
+ location = self.driver.ex_get_location_by_id(location)
+ if MCP_2_LOCATION_NAME_PATTERN.match(location.name):
+ return '2.0'
+
+ return '1.0'
+
+ def get_network_domain(self, locator, location):
+ """
+ Retrieve a network domain by its name or Id.
+ """
+
+ if is_uuid(locator):
+ network_domain = self.driver.ex_get_network_domain(locator)
+ else:
+ matching_network_domains = [
+ network_domain for network_domain in self.driver.ex_list_network_domains(location=location)
+ if network_domain.name == locator
+ ]
+
+ if matching_network_domains:
+ network_domain = matching_network_domains[0]
+ else:
+ network_domain = None
+
+ if network_domain:
+ return network_domain
+
+ raise UnknownNetworkError("Network '%s' could not be found" % locator)
+
+ def get_vlan(self, locator, location, network_domain):
+ """
+ Get a VLAN object by its name or id
+ """
+ if is_uuid(locator):
+ vlan = self.driver.ex_get_vlan(locator)
+ else:
+ matching_vlans = [
+ vlan for vlan in self.driver.ex_list_vlans(location, network_domain)
+ if vlan.name == locator
+ ]
+
+ if matching_vlans:
+ vlan = matching_vlans[0]
+ else:
+ vlan = None
+
+ if vlan:
+ return vlan
+
+ raise UnknownVLANError("VLAN '%s' could not be found" % locator)
+
+ @staticmethod
+ def argument_spec(**additional_argument_spec):
+ """
+ Build an argument specification for a Dimension Data module.
+ :param additional_argument_spec: An optional dictionary representing the specification for additional module arguments (if any).
+ :return: A dict containing the argument specification.
+ """
+
+ spec = dict(
+ region=dict(type='str', default='na'),
+ mcp_user=dict(type='str', required=False),
+ mcp_password=dict(type='str', required=False, no_log=True),
+ location=dict(type='str', required=True),
+ validate_certs=dict(type='bool', required=False, default=True)
+ )
+
+ if additional_argument_spec:
+ spec.update(additional_argument_spec)
+
+ return spec
+
+ @staticmethod
+ def argument_spec_with_wait(**additional_argument_spec):
+ """
+ Build an argument specification for a Dimension Data module that includes "wait for completion" arguments.
+ :param additional_argument_spec: An optional dictionary representing the specification for additional module arguments (if any).
+ :return: A dict containing the argument specification.
+ """
+
+ spec = DimensionDataModule.argument_spec(
+ wait=dict(type='bool', required=False, default=False),
+ wait_time=dict(type='int', required=False, default=600),
+ wait_poll_interval=dict(type='int', required=False, default=2)
+ )
+
+ if additional_argument_spec:
+ spec.update(additional_argument_spec)
+
+ return spec
+
+ @staticmethod
+ def required_together(*additional_required_together):
+ """
+ Get the basic argument specification for Dimension Data modules indicating which arguments are must be specified together.
+ :param additional_required_together: An optional list representing the specification for additional module arguments that must be specified together.
+ :return: An array containing the argument specifications.
+ """
+
+ required_together = [
+ ['mcp_user', 'mcp_password']
+ ]
+
+ if additional_required_together:
+ required_together.extend(additional_required_together)
+
+ return required_together
+
+
+class LibcloudNotFound(Exception):
+ """
+ Exception raised when Apache libcloud cannot be found.
+ """
+
+ pass
+
+
+class MissingCredentialsError(Exception):
+ """
+ Exception raised when credentials for Dimension Data CloudControl cannot be found.
+ """
+
+ pass
+
+
+class UnknownNetworkError(Exception):
+ """
+ Exception raised when a network or network domain cannot be found.
+ """
+
+ pass
+
+
+class UnknownVLANError(Exception):
+ """
+ Exception raised when a VLAN cannot be found.
+ """
+
+ pass
+
+
+def get_dd_regions():
+ """
+ Get the list of available regions whose vendor is Dimension Data.
+ """
+
+ # Get endpoints
+ all_regions = API_ENDPOINTS.keys()
+
+ # Only Dimension Data endpoints (no prefix)
+ regions = [region[3:] for region in all_regions if region.startswith('dd-')]
+
+ return regions
+
+
+def is_uuid(u, version=4):
+ """
+ Test if valid v4 UUID
+ """
+ try:
+ uuid_obj = UUID(u, version=version)
+
+ return str(uuid_obj) == u
+ except ValueError:
+ return False
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/docker/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/docker/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/docker/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/docker/common.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/docker/common.py
new file mode 100644
index 00000000..03307250
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/docker/common.py
@@ -0,0 +1,1022 @@
+#
+# Copyright 2016 Red Hat | Ansible
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import os
+import platform
+import re
+import sys
+from datetime import timedelta
+from distutils.version import LooseVersion
+
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback, missing_required_lib
+from ansible.module_utils.common._collections_compat import Mapping, Sequence
+from ansible.module_utils.six import string_types
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE, BOOLEANS_FALSE
+
+HAS_DOCKER_PY = True
+HAS_DOCKER_PY_2 = False
+HAS_DOCKER_PY_3 = False
+HAS_DOCKER_ERROR = None
+
+try:
+ from requests.exceptions import SSLError
+ from docker import __version__ as docker_version
+ from docker.errors import APIError, NotFound, TLSParameterError
+ from docker.tls import TLSConfig
+ from docker import auth
+
+ if LooseVersion(docker_version) >= LooseVersion('3.0.0'):
+ HAS_DOCKER_PY_3 = True
+ from docker import APIClient as Client
+ elif LooseVersion(docker_version) >= LooseVersion('2.0.0'):
+ HAS_DOCKER_PY_2 = True
+ from docker import APIClient as Client
+ else:
+ from docker import Client
+
+except ImportError as exc:
+ HAS_DOCKER_ERROR = str(exc)
+ HAS_DOCKER_PY = False
+
+
+# The next 2 imports ``docker.models`` and ``docker.ssladapter`` are used
+# to ensure the user does not have both ``docker`` and ``docker-py`` modules
+# installed, as they utilize the same namespace are are incompatible
+try:
+ # docker (Docker SDK for Python >= 2.0.0)
+ import docker.models # noqa: F401
+ HAS_DOCKER_MODELS = True
+except ImportError:
+ HAS_DOCKER_MODELS = False
+
+try:
+ # docker-py (Docker SDK for Python < 2.0.0)
+ import docker.ssladapter # noqa: F401
+ HAS_DOCKER_SSLADAPTER = True
+except ImportError:
+ HAS_DOCKER_SSLADAPTER = False
+
+
+try:
+ from requests.exceptions import RequestException
+except ImportError:
+ # Either docker-py is no longer using requests, or docker-py isn't around either,
+ # or docker-py's dependency requests is missing. In any case, define an exception
+ # class RequestException so that our code doesn't break.
+ class RequestException(Exception):
+ pass
+
+
+DEFAULT_DOCKER_HOST = 'unix://var/run/docker.sock'
+DEFAULT_TLS = False
+DEFAULT_TLS_VERIFY = False
+DEFAULT_TLS_HOSTNAME = 'localhost'
+MIN_DOCKER_VERSION = "1.8.0"
+DEFAULT_TIMEOUT_SECONDS = 60
+
+DOCKER_COMMON_ARGS = dict(
+ docker_host=dict(type='str', default=DEFAULT_DOCKER_HOST, fallback=(env_fallback, ['DOCKER_HOST']), aliases=['docker_url']),
+ tls_hostname=dict(type='str', default=DEFAULT_TLS_HOSTNAME, fallback=(env_fallback, ['DOCKER_TLS_HOSTNAME'])),
+ api_version=dict(type='str', default='auto', fallback=(env_fallback, ['DOCKER_API_VERSION']), aliases=['docker_api_version']),
+ timeout=dict(type='int', default=DEFAULT_TIMEOUT_SECONDS, fallback=(env_fallback, ['DOCKER_TIMEOUT'])),
+ ca_cert=dict(type='path', aliases=['tls_ca_cert', 'cacert_path']),
+ client_cert=dict(type='path', aliases=['tls_client_cert', 'cert_path']),
+ client_key=dict(type='path', aliases=['tls_client_key', 'key_path']),
+ ssl_version=dict(type='str', fallback=(env_fallback, ['DOCKER_SSL_VERSION'])),
+ tls=dict(type='bool', default=DEFAULT_TLS, fallback=(env_fallback, ['DOCKER_TLS'])),
+ validate_certs=dict(type='bool', default=DEFAULT_TLS_VERIFY, fallback=(env_fallback, ['DOCKER_TLS_VERIFY']), aliases=['tls_verify']),
+ debug=dict(type='bool', default=False)
+)
+
+DOCKER_MUTUALLY_EXCLUSIVE = []
+
+DOCKER_REQUIRED_TOGETHER = [
+ ['client_cert', 'client_key']
+]
+
+DEFAULT_DOCKER_REGISTRY = 'https://index.docker.io/v1/'
+EMAIL_REGEX = r'[^@]+@[^@]+\.[^@]+'
+BYTE_SUFFIXES = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
+
+
+if not HAS_DOCKER_PY:
+ docker_version = None
+
+ # No Docker SDK for Python. Create a place holder client to allow
+ # instantiation of AnsibleModule and proper error handing
+ class Client(object): # noqa: F811
+ def __init__(self, **kwargs):
+ pass
+
+ class APIError(Exception): # noqa: F811
+ pass
+
+ class NotFound(Exception): # noqa: F811
+ pass
+
+
+def is_image_name_id(name):
+ """Check whether the given image name is in fact an image ID (hash)."""
+ if re.match('^sha256:[0-9a-fA-F]{64}$', name):
+ return True
+ return False
+
+
+def is_valid_tag(tag, allow_empty=False):
+ """Check whether the given string is a valid docker tag name."""
+ if not tag:
+ return allow_empty
+ # See here ("Extended description") for a definition what tags can be:
+ # https://docs.docker.com/engine/reference/commandline/tag/
+ return bool(re.match('^[a-zA-Z0-9_][a-zA-Z0-9_.-]{0,127}$', tag))
+
+
+def sanitize_result(data):
+ """Sanitize data object for return to Ansible.
+
+ When the data object contains types such as docker.types.containers.HostConfig,
+ Ansible will fail when these are returned via exit_json or fail_json.
+ HostConfig is derived from dict, but its constructor requires additional
+ arguments. This function sanitizes data structures by recursively converting
+ everything derived from dict to dict and everything derived from list (and tuple)
+ to a list.
+ """
+ if isinstance(data, dict):
+ return dict((k, sanitize_result(v)) for k, v in data.items())
+ elif isinstance(data, (list, tuple)):
+ return [sanitize_result(v) for v in data]
+ else:
+ return data
+
+
+class DockerBaseClass(object):
+
+ def __init__(self):
+ self.debug = False
+
+ def log(self, msg, pretty_print=False):
+ pass
+ # if self.debug:
+ # log_file = open('docker.log', 'a')
+ # if pretty_print:
+ # log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': ')))
+ # log_file.write(u'\n')
+ # else:
+ # log_file.write(msg + u'\n')
+
+
+def update_tls_hostname(result):
+ if result['tls_hostname'] is None:
+ # get default machine name from the url
+ parsed_url = urlparse(result['docker_host'])
+ if ':' in parsed_url.netloc:
+ result['tls_hostname'] = parsed_url.netloc[:parsed_url.netloc.rindex(':')]
+ else:
+ result['tls_hostname'] = parsed_url
+
+
+def _get_tls_config(fail_function, **kwargs):
+ try:
+ tls_config = TLSConfig(**kwargs)
+ return tls_config
+ except TLSParameterError as exc:
+ fail_function("TLS config error: %s" % exc)
+
+
+def get_connect_params(auth, fail_function):
+ if auth['tls'] or auth['tls_verify']:
+ auth['docker_host'] = auth['docker_host'].replace('tcp://', 'https://')
+
+ if auth['tls_verify'] and auth['cert_path'] and auth['key_path']:
+ # TLS with certs and host verification
+ if auth['cacert_path']:
+ tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
+ ca_cert=auth['cacert_path'],
+ verify=True,
+ assert_hostname=auth['tls_hostname'],
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+ else:
+ tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
+ verify=True,
+ assert_hostname=auth['tls_hostname'],
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+
+ return dict(base_url=auth['docker_host'],
+ tls=tls_config,
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+ if auth['tls_verify'] and auth['cacert_path']:
+ # TLS with cacert only
+ tls_config = _get_tls_config(ca_cert=auth['cacert_path'],
+ assert_hostname=auth['tls_hostname'],
+ verify=True,
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+ return dict(base_url=auth['docker_host'],
+ tls=tls_config,
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+ if auth['tls_verify']:
+ # TLS with verify and no certs
+ tls_config = _get_tls_config(verify=True,
+ assert_hostname=auth['tls_hostname'],
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+ return dict(base_url=auth['docker_host'],
+ tls=tls_config,
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+ if auth['tls'] and auth['cert_path'] and auth['key_path']:
+ # TLS with certs and no host verification
+ tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
+ verify=False,
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+ return dict(base_url=auth['docker_host'],
+ tls=tls_config,
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+ if auth['tls']:
+ # TLS with no certs and not host verification
+ tls_config = _get_tls_config(verify=False,
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+ return dict(base_url=auth['docker_host'],
+ tls=tls_config,
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+ # No TLS
+ return dict(base_url=auth['docker_host'],
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+
+DOCKERPYUPGRADE_SWITCH_TO_DOCKER = "Try `pip uninstall docker-py` followed by `pip install docker`."
+DOCKERPYUPGRADE_UPGRADE_DOCKER = "Use `pip install --upgrade docker` to upgrade."
+DOCKERPYUPGRADE_RECOMMEND_DOCKER = ("Use `pip install --upgrade docker-py` to upgrade. "
+ "Hint: if you do not need Python 2.6 support, try "
+ "`pip uninstall docker-py` instead, followed by `pip install docker`.")
+
+
+class AnsibleDockerClient(Client):
+
+ def __init__(self, argument_spec=None, supports_check_mode=False, mutually_exclusive=None,
+ required_together=None, required_if=None, min_docker_version=MIN_DOCKER_VERSION,
+ min_docker_api_version=None, option_minimal_versions=None,
+ option_minimal_versions_ignore_params=None, fail_results=None):
+
+ # Modules can put information in here which will always be returned
+ # in case client.fail() is called.
+ self.fail_results = fail_results or {}
+
+ merged_arg_spec = dict()
+ merged_arg_spec.update(DOCKER_COMMON_ARGS)
+ if argument_spec:
+ merged_arg_spec.update(argument_spec)
+ self.arg_spec = merged_arg_spec
+
+ mutually_exclusive_params = []
+ mutually_exclusive_params += DOCKER_MUTUALLY_EXCLUSIVE
+ if mutually_exclusive:
+ mutually_exclusive_params += mutually_exclusive
+
+ required_together_params = []
+ required_together_params += DOCKER_REQUIRED_TOGETHER
+ if required_together:
+ required_together_params += required_together
+
+ self.module = AnsibleModule(
+ argument_spec=merged_arg_spec,
+ supports_check_mode=supports_check_mode,
+ mutually_exclusive=mutually_exclusive_params,
+ required_together=required_together_params,
+ required_if=required_if)
+
+ NEEDS_DOCKER_PY2 = (LooseVersion(min_docker_version) >= LooseVersion('2.0.0'))
+
+ self.docker_py_version = LooseVersion(docker_version)
+
+ if HAS_DOCKER_MODELS and HAS_DOCKER_SSLADAPTER:
+ self.fail("Cannot have both the docker-py and docker python modules (old and new version of Docker "
+ "SDK for Python) installed together as they use the same namespace and cause a corrupt "
+ "installation. Please uninstall both packages, and re-install only the docker-py or docker "
+ "python module (for %s's Python %s). It is recommended to install the docker module if no "
+ "support for Python 2.6 is required. Please note that simply uninstalling one of the modules "
+ "can leave the other module in a broken state." % (platform.node(), sys.executable))
+
+ if not HAS_DOCKER_PY:
+ if NEEDS_DOCKER_PY2:
+ msg = missing_required_lib("Docker SDK for Python: docker")
+ msg = msg + ", for example via `pip install docker`. The error was: %s"
+ else:
+ msg = missing_required_lib("Docker SDK for Python: docker (Python >= 2.7) or docker-py (Python 2.6)")
+ msg = msg + ", for example via `pip install docker` or `pip install docker-py` (Python 2.6). The error was: %s"
+ self.fail(msg % HAS_DOCKER_ERROR)
+
+ if self.docker_py_version < LooseVersion(min_docker_version):
+ msg = "Error: Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s."
+ if not NEEDS_DOCKER_PY2:
+ # The minimal required version is < 2.0 (and the current version as well).
+ # Advertise docker (instead of docker-py) for non-Python-2.6 users.
+ msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER
+ elif docker_version < LooseVersion('2.0'):
+ msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER
+ else:
+ msg += DOCKERPYUPGRADE_UPGRADE_DOCKER
+ self.fail(msg % (docker_version, platform.node(), sys.executable, min_docker_version))
+
+ self.debug = self.module.params.get('debug')
+ self.check_mode = self.module.check_mode
+ self._connect_params = get_connect_params(self.auth_params, fail_function=self.fail)
+
+ try:
+ super(AnsibleDockerClient, self).__init__(**self._connect_params)
+ self.docker_api_version_str = self.version()['ApiVersion']
+ except APIError as exc:
+ self.fail("Docker API error: %s" % exc)
+ except Exception as exc:
+ self.fail("Error connecting: %s" % exc)
+
+ self.docker_api_version = LooseVersion(self.docker_api_version_str)
+ if min_docker_api_version is not None:
+ if self.docker_api_version < LooseVersion(min_docker_api_version):
+ self.fail('Docker API version is %s. Minimum version required is %s.' % (self.docker_api_version_str, min_docker_api_version))
+
+ if option_minimal_versions is not None:
+ self._get_minimal_versions(option_minimal_versions, option_minimal_versions_ignore_params)
+
+ def log(self, msg, pretty_print=False):
+ pass
+ # if self.debug:
+ # log_file = open('docker.log', 'a')
+ # if pretty_print:
+ # log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': ')))
+ # log_file.write(u'\n')
+ # else:
+ # log_file.write(msg + u'\n')
+
+ def fail(self, msg, **kwargs):
+ self.fail_results.update(kwargs)
+ self.module.fail_json(msg=msg, **sanitize_result(self.fail_results))
+
+ @staticmethod
+ def _get_value(param_name, param_value, env_variable, default_value):
+ if param_value is not None:
+ # take module parameter value
+ if param_value in BOOLEANS_TRUE:
+ return True
+ if param_value in BOOLEANS_FALSE:
+ return False
+ return param_value
+
+ if env_variable is not None:
+ env_value = os.environ.get(env_variable)
+ if env_value is not None:
+ # take the env variable value
+ if param_name == 'cert_path':
+ return os.path.join(env_value, 'cert.pem')
+ if param_name == 'cacert_path':
+ return os.path.join(env_value, 'ca.pem')
+ if param_name == 'key_path':
+ return os.path.join(env_value, 'key.pem')
+ if env_value in BOOLEANS_TRUE:
+ return True
+ if env_value in BOOLEANS_FALSE:
+ return False
+ return env_value
+
+ # take the default
+ return default_value
+
+ @property
+ def auth_params(self):
+ # Get authentication credentials.
+ # Precedence: module parameters-> environment variables-> defaults.
+
+ self.log('Getting credentials')
+
+ params = dict()
+ for key in DOCKER_COMMON_ARGS:
+ params[key] = self.module.params.get(key)
+
+ if self.module.params.get('use_tls'):
+ # support use_tls option in docker_image.py. This will be deprecated.
+ use_tls = self.module.params.get('use_tls')
+ if use_tls == 'encrypt':
+ params['tls'] = True
+ if use_tls == 'verify':
+ params['validate_certs'] = True
+
+ result = dict(
+ docker_host=self._get_value('docker_host', params['docker_host'], 'DOCKER_HOST',
+ DEFAULT_DOCKER_HOST),
+ tls_hostname=self._get_value('tls_hostname', params['tls_hostname'],
+ 'DOCKER_TLS_HOSTNAME', DEFAULT_TLS_HOSTNAME),
+ api_version=self._get_value('api_version', params['api_version'], 'DOCKER_API_VERSION',
+ 'auto'),
+ cacert_path=self._get_value('cacert_path', params['ca_cert'], 'DOCKER_CERT_PATH', None),
+ cert_path=self._get_value('cert_path', params['client_cert'], 'DOCKER_CERT_PATH', None),
+ key_path=self._get_value('key_path', params['client_key'], 'DOCKER_CERT_PATH', None),
+ ssl_version=self._get_value('ssl_version', params['ssl_version'], 'DOCKER_SSL_VERSION', None),
+ tls=self._get_value('tls', params['tls'], 'DOCKER_TLS', DEFAULT_TLS),
+ tls_verify=self._get_value('tls_verfy', params['validate_certs'], 'DOCKER_TLS_VERIFY',
+ DEFAULT_TLS_VERIFY),
+ timeout=self._get_value('timeout', params['timeout'], 'DOCKER_TIMEOUT',
+ DEFAULT_TIMEOUT_SECONDS),
+ )
+
+ update_tls_hostname(result)
+
+ return result
+
+ def _handle_ssl_error(self, error):
+ match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error))
+ if match:
+ self.fail("You asked for verification that Docker daemons certificate's hostname matches %s. "
+ "The actual certificate's hostname is %s. Most likely you need to set DOCKER_TLS_HOSTNAME "
+ "or pass `tls_hostname` with a value of %s. You may also use TLS without verification by "
+ "setting the `tls` parameter to true."
+ % (self.auth_params['tls_hostname'], match.group(1), match.group(1)))
+ self.fail("SSL Exception: %s" % (error))
+
+ def _get_minimal_versions(self, option_minimal_versions, ignore_params=None):
+ self.option_minimal_versions = dict()
+ for option in self.module.argument_spec:
+ if ignore_params is not None:
+ if option in ignore_params:
+ continue
+ self.option_minimal_versions[option] = dict()
+ self.option_minimal_versions.update(option_minimal_versions)
+
+ for option, data in self.option_minimal_versions.items():
+ # Test whether option is supported, and store result
+ support_docker_py = True
+ support_docker_api = True
+ if 'docker_py_version' in data:
+ support_docker_py = self.docker_py_version >= LooseVersion(data['docker_py_version'])
+ if 'docker_api_version' in data:
+ support_docker_api = self.docker_api_version >= LooseVersion(data['docker_api_version'])
+ data['supported'] = support_docker_py and support_docker_api
+ # Fail if option is not supported but used
+ if not data['supported']:
+ # Test whether option is specified
+ if 'detect_usage' in data:
+ used = data['detect_usage'](self)
+ else:
+ used = self.module.params.get(option) is not None
+ if used and 'default' in self.module.argument_spec[option]:
+ used = self.module.params[option] != self.module.argument_spec[option]['default']
+ if used:
+ # If the option is used, compose error message.
+ if 'usage_msg' in data:
+ usg = data['usage_msg']
+ else:
+ usg = 'set %s option' % (option, )
+ if not support_docker_api:
+ msg = 'Docker API version is %s. Minimum version required is %s to %s.'
+ msg = msg % (self.docker_api_version_str, data['docker_api_version'], usg)
+ elif not support_docker_py:
+ msg = "Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s to %s. "
+ if LooseVersion(data['docker_py_version']) < LooseVersion('2.0.0'):
+ msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER
+ elif self.docker_py_version < LooseVersion('2.0.0'):
+ msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER
+ else:
+ msg += DOCKERPYUPGRADE_UPGRADE_DOCKER
+ msg = msg % (docker_version, platform.node(), sys.executable, data['docker_py_version'], usg)
+ else:
+ # should not happen
+ msg = 'Cannot %s with your configuration.' % (usg, )
+ self.fail(msg)
+
+ def get_container_by_id(self, container_id):
+ try:
+ self.log("Inspecting container Id %s" % container_id)
+ result = self.inspect_container(container=container_id)
+ self.log("Completed container inspection")
+ return result
+ except NotFound as dummy:
+ return None
+ except Exception as exc:
+ self.fail("Error inspecting container: %s" % exc)
+
+ def get_container(self, name=None):
+ '''
+ Lookup a container and return the inspection results.
+ '''
+ if name is None:
+ return None
+
+ search_name = name
+ if not name.startswith('/'):
+ search_name = '/' + name
+
+ result = None
+ try:
+ for container in self.containers(all=True):
+ self.log("testing container: %s" % (container['Names']))
+ if isinstance(container['Names'], list) and search_name in container['Names']:
+ result = container
+ break
+ if container['Id'].startswith(name):
+ result = container
+ break
+ if container['Id'] == name:
+ result = container
+ break
+ except SSLError as exc:
+ self._handle_ssl_error(exc)
+ except Exception as exc:
+ self.fail("Error retrieving container list: %s" % exc)
+
+ if result is None:
+ return None
+
+ return self.get_container_by_id(result['Id'])
+
+ def get_network(self, name=None, network_id=None):
+ '''
+ Lookup a network and return the inspection results.
+ '''
+ if name is None and network_id is None:
+ return None
+
+ result = None
+
+ if network_id is None:
+ try:
+ for network in self.networks():
+ self.log("testing network: %s" % (network['Name']))
+ if name == network['Name']:
+ result = network
+ break
+ if network['Id'].startswith(name):
+ result = network
+ break
+ except SSLError as exc:
+ self._handle_ssl_error(exc)
+ except Exception as exc:
+ self.fail("Error retrieving network list: %s" % exc)
+
+ if result is not None:
+ network_id = result['Id']
+
+ if network_id is not None:
+ try:
+ self.log("Inspecting network Id %s" % network_id)
+ result = self.inspect_network(network_id)
+ self.log("Completed network inspection")
+ except NotFound as dummy:
+ return None
+ except Exception as exc:
+ self.fail("Error inspecting network: %s" % exc)
+
+ return result
+
+ def find_image(self, name, tag):
+ '''
+ Lookup an image (by name and tag) and return the inspection results.
+ '''
+ if not name:
+ return None
+
+ self.log("Find image %s:%s" % (name, tag))
+ images = self._image_lookup(name, tag)
+ if not images:
+ # In API <= 1.20 seeing 'docker.io/<name>' as the name of images pulled from docker hub
+ registry, repo_name = auth.resolve_repository_name(name)
+ if registry == 'docker.io':
+ # If docker.io is explicitly there in name, the image
+ # isn't found in some cases (#41509)
+ self.log("Check for docker.io image: %s" % repo_name)
+ images = self._image_lookup(repo_name, tag)
+ if not images and repo_name.startswith('library/'):
+ # Sometimes library/xxx images are not found
+ lookup = repo_name[len('library/'):]
+ self.log("Check for docker.io image: %s" % lookup)
+ images = self._image_lookup(lookup, tag)
+ if not images:
+ # Last case: if docker.io wasn't there, it can be that
+ # the image wasn't found either (#15586)
+ lookup = "%s/%s" % (registry, repo_name)
+ self.log("Check for docker.io image: %s" % lookup)
+ images = self._image_lookup(lookup, tag)
+
+ if len(images) > 1:
+ self.fail("Registry returned more than one result for %s:%s" % (name, tag))
+
+ if len(images) == 1:
+ try:
+ inspection = self.inspect_image(images[0]['Id'])
+ except Exception as exc:
+ self.fail("Error inspecting image %s:%s - %s" % (name, tag, str(exc)))
+ return inspection
+
+ self.log("Image %s:%s not found." % (name, tag))
+ return None
+
+ def find_image_by_id(self, image_id):
+ '''
+ Lookup an image (by ID) and return the inspection results.
+ '''
+ if not image_id:
+ return None
+
+ self.log("Find image %s (by ID)" % image_id)
+ try:
+ inspection = self.inspect_image(image_id)
+ except Exception as exc:
+ self.fail("Error inspecting image ID %s - %s" % (image_id, str(exc)))
+ return inspection
+
+ def _image_lookup(self, name, tag):
+ '''
+ Including a tag in the name parameter sent to the Docker SDK for Python images method
+ does not work consistently. Instead, get the result set for name and manually check
+ if the tag exists.
+ '''
+ try:
+ response = self.images(name=name)
+ except Exception as exc:
+ self.fail("Error searching for image %s - %s" % (name, str(exc)))
+ images = response
+ if tag:
+ lookup = "%s:%s" % (name, tag)
+ lookup_digest = "%s@%s" % (name, tag)
+ images = []
+ for image in response:
+ tags = image.get('RepoTags')
+ digests = image.get('RepoDigests')
+ if (tags and lookup in tags) or (digests and lookup_digest in digests):
+ images = [image]
+ break
+ return images
+
+ def pull_image(self, name, tag="latest"):
+ '''
+ Pull an image
+ '''
+ self.log("Pulling image %s:%s" % (name, tag))
+ old_tag = self.find_image(name, tag)
+ try:
+ for line in self.pull(name, tag=tag, stream=True, decode=True):
+ self.log(line, pretty_print=True)
+ if line.get('error'):
+ if line.get('errorDetail'):
+ error_detail = line.get('errorDetail')
+ self.fail("Error pulling %s - code: %s message: %s" % (name,
+ error_detail.get('code'),
+ error_detail.get('message')))
+ else:
+ self.fail("Error pulling %s - %s" % (name, line.get('error')))
+ except Exception as exc:
+ self.fail("Error pulling image %s:%s - %s" % (name, tag, str(exc)))
+
+ new_tag = self.find_image(name, tag)
+
+ return new_tag, old_tag == new_tag
+
+ def report_warnings(self, result, warnings_key=None):
+ '''
+ Checks result of client operation for warnings, and if present, outputs them.
+
+ warnings_key should be a list of keys used to crawl the result dictionary.
+ For example, if warnings_key == ['a', 'b'], the function will consider
+ result['a']['b'] if these keys exist. If the result is a non-empty string, it
+ will be reported as a warning. If the result is a list, every entry will be
+ reported as a warning.
+
+ In most cases (if warnings are returned at all), warnings_key should be
+ ['Warnings'] or ['Warning']. The default value (if not specified) is ['Warnings'].
+ '''
+ if warnings_key is None:
+ warnings_key = ['Warnings']
+ for key in warnings_key:
+ if not isinstance(result, Mapping):
+ return
+ result = result.get(key)
+ if isinstance(result, Sequence):
+ for warning in result:
+ self.module.warn('Docker warning: {0}'.format(warning))
+ elif isinstance(result, string_types) and result:
+ self.module.warn('Docker warning: {0}'.format(result))
+
+ def inspect_distribution(self, image, **kwargs):
+ '''
+ Get image digest by directly calling the Docker API when running Docker SDK < 4.0.0
+ since prior versions did not support accessing private repositories.
+ '''
+ if self.docker_py_version < LooseVersion('4.0.0'):
+ registry = auth.resolve_repository_name(image)[0]
+ header = auth.get_config_header(self, registry)
+ if header:
+ return self._result(self._get(
+ self._url('/distribution/{0}/json', image),
+ headers={'X-Registry-Auth': header}
+ ), json=True)
+ return super(AnsibleDockerClient, self).inspect_distribution(image, **kwargs)
+
+
+def compare_dict_allow_more_present(av, bv):
+ '''
+ Compare two dictionaries for whether every entry of the first is in the second.
+ '''
+ for key, value in av.items():
+ if key not in bv:
+ return False
+ if bv[key] != value:
+ return False
+ return True
+
+
+def compare_generic(a, b, method, datatype):
+ '''
+ Compare values a and b as described by method and datatype.
+
+ Returns ``True`` if the values compare equal, and ``False`` if not.
+
+ ``a`` is usually the module's parameter, while ``b`` is a property
+ of the current object. ``a`` must not be ``None`` (except for
+ ``datatype == 'value'``).
+
+ Valid values for ``method`` are:
+ - ``ignore`` (always compare as equal);
+ - ``strict`` (only compare if really equal)
+ - ``allow_more_present`` (allow b to have elements which a does not have).
+
+ Valid values for ``datatype`` are:
+ - ``value``: for simple values (strings, numbers, ...);
+ - ``list``: for ``list``s or ``tuple``s where order matters;
+ - ``set``: for ``list``s, ``tuple``s or ``set``s where order does not
+ matter;
+ - ``set(dict)``: for ``list``s, ``tuple``s or ``sets`` where order does
+ not matter and which contain ``dict``s; ``allow_more_present`` is used
+ for the ``dict``s, and these are assumed to be dictionaries of values;
+ - ``dict``: for dictionaries of values.
+ '''
+ if method == 'ignore':
+ return True
+ # If a or b is None:
+ if a is None or b is None:
+ # If both are None: equality
+ if a == b:
+ return True
+ # Otherwise, not equal for values, and equal
+ # if the other is empty for set/list/dict
+ if datatype == 'value':
+ return False
+ # For allow_more_present, allow a to be None
+ if method == 'allow_more_present' and a is None:
+ return True
+ # Otherwise, the iterable object which is not None must have length 0
+ return len(b if a is None else a) == 0
+ # Do proper comparison (both objects not None)
+ if datatype == 'value':
+ return a == b
+ elif datatype == 'list':
+ if method == 'strict':
+ return a == b
+ else:
+ i = 0
+ for v in a:
+ while i < len(b) and b[i] != v:
+ i += 1
+ if i == len(b):
+ return False
+ i += 1
+ return True
+ elif datatype == 'dict':
+ if method == 'strict':
+ return a == b
+ else:
+ return compare_dict_allow_more_present(a, b)
+ elif datatype == 'set':
+ set_a = set(a)
+ set_b = set(b)
+ if method == 'strict':
+ return set_a == set_b
+ else:
+ return set_b >= set_a
+ elif datatype == 'set(dict)':
+ for av in a:
+ found = False
+ for bv in b:
+ if compare_dict_allow_more_present(av, bv):
+ found = True
+ break
+ if not found:
+ return False
+ if method == 'strict':
+ # If we would know that both a and b do not contain duplicates,
+ # we could simply compare len(a) to len(b) to finish this test.
+ # We can assume that b has no duplicates (as it is returned by
+ # docker), but we don't know for a.
+ for bv in b:
+ found = False
+ for av in a:
+ if compare_dict_allow_more_present(av, bv):
+ found = True
+ break
+ if not found:
+ return False
+ return True
+
+
+class DifferenceTracker(object):
+ def __init__(self):
+ self._diff = []
+
+ def add(self, name, parameter=None, active=None):
+ self._diff.append(dict(
+ name=name,
+ parameter=parameter,
+ active=active,
+ ))
+
+ def merge(self, other_tracker):
+ self._diff.extend(other_tracker._diff)
+
+ @property
+ def empty(self):
+ return len(self._diff) == 0
+
+ def get_before_after(self):
+ '''
+ Return texts ``before`` and ``after``.
+ '''
+ before = dict()
+ after = dict()
+ for item in self._diff:
+ before[item['name']] = item['active']
+ after[item['name']] = item['parameter']
+ return before, after
+
+ def has_difference_for(self, name):
+ '''
+ Returns a boolean if a difference exists for name
+ '''
+ return any(diff for diff in self._diff if diff['name'] == name)
+
+ def get_legacy_docker_container_diffs(self):
+ '''
+ Return differences in the docker_container legacy format.
+ '''
+ result = []
+ for entry in self._diff:
+ item = dict()
+ item[entry['name']] = dict(
+ parameter=entry['parameter'],
+ container=entry['active'],
+ )
+ result.append(item)
+ return result
+
+ def get_legacy_docker_diffs(self):
+ '''
+ Return differences in the docker_container legacy format.
+ '''
+ result = [entry['name'] for entry in self._diff]
+ return result
+
+
+def clean_dict_booleans_for_docker_api(data):
+ '''
+ Go doesn't like Python booleans 'True' or 'False', while Ansible is just
+ fine with them in YAML. As such, they need to be converted in cases where
+ we pass dictionaries to the Docker API (e.g. docker_network's
+ driver_options and docker_prune's filters).
+ '''
+ result = dict()
+ if data is not None:
+ for k, v in data.items():
+ if v is True:
+ v = 'true'
+ elif v is False:
+ v = 'false'
+ else:
+ v = str(v)
+ result[str(k)] = v
+ return result
+
+
+def convert_duration_to_nanosecond(time_str):
+ """
+ Return time duration in nanosecond.
+ """
+ if not isinstance(time_str, str):
+ raise ValueError('Missing unit in duration - %s' % time_str)
+
+ regex = re.compile(
+ r'^(((?P<hours>\d+)h)?'
+ r'((?P<minutes>\d+)m(?!s))?'
+ r'((?P<seconds>\d+)s)?'
+ r'((?P<milliseconds>\d+)ms)?'
+ r'((?P<microseconds>\d+)us)?)$'
+ )
+ parts = regex.match(time_str)
+
+ if not parts:
+ raise ValueError('Invalid time duration - %s' % time_str)
+
+ parts = parts.groupdict()
+ time_params = {}
+ for (name, value) in parts.items():
+ if value:
+ time_params[name] = int(value)
+
+ delta = timedelta(**time_params)
+ time_in_nanoseconds = (
+ delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10 ** 6
+ ) * 10 ** 3
+
+ return time_in_nanoseconds
+
+
+def parse_healthcheck(healthcheck):
+ """
+ Return dictionary of healthcheck parameters and boolean if
+ healthcheck defined in image was requested to be disabled.
+ """
+ if (not healthcheck) or (not healthcheck.get('test')):
+ return None, None
+
+ result = dict()
+
+ # All supported healthcheck parameters
+ options = dict(
+ test='test',
+ interval='interval',
+ timeout='timeout',
+ start_period='start_period',
+ retries='retries'
+ )
+
+ duration_options = ['interval', 'timeout', 'start_period']
+
+ for (key, value) in options.items():
+ if value in healthcheck:
+ if healthcheck.get(value) is None:
+ # due to recursive argument_spec, all keys are always present
+ # (but have default value None if not specified)
+ continue
+ if value in duration_options:
+ time = convert_duration_to_nanosecond(healthcheck.get(value))
+ if time:
+ result[key] = time
+ elif healthcheck.get(value):
+ result[key] = healthcheck.get(value)
+ if key == 'test':
+ if isinstance(result[key], (tuple, list)):
+ result[key] = [str(e) for e in result[key]]
+ else:
+ result[key] = ['CMD-SHELL', str(result[key])]
+ elif key == 'retries':
+ try:
+ result[key] = int(result[key])
+ except ValueError:
+ raise ValueError(
+ 'Cannot parse number of retries for healthcheck. '
+ 'Expected an integer, got "{0}".'.format(result[key])
+ )
+
+ if result['test'] == ['NONE']:
+ # If the user explicitly disables the healthcheck, return None
+ # as the healthcheck object, and set disable_healthcheck to True
+ return None, True
+
+ return result, False
+
+
+def omit_none_from_dict(d):
+ """
+ Return a copy of the dictionary with all keys with value None omitted.
+ """
+ return dict((k, v) for (k, v) in d.items() if v is not None)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/docker/swarm.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/docker/swarm.py
new file mode 100644
index 00000000..610ed9a8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/docker/swarm.py
@@ -0,0 +1,280 @@
+# (c) 2019 Piotr Wojciechowski (@wojciechowskipiotr) <piotr@it-playground.pl>
+# (c) Thierry Bouvet (@tbouvet)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import json
+from time import sleep
+
+try:
+ from docker.errors import APIError, NotFound
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible.module_utils._text import to_native
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ LooseVersion,
+)
+
+
+class AnsibleDockerSwarmClient(AnsibleDockerClient):
+
+ def __init__(self, **kwargs):
+ super(AnsibleDockerSwarmClient, self).__init__(**kwargs)
+
+ def get_swarm_node_id(self):
+ """
+ Get the 'NodeID' of the Swarm node or 'None' if host is not in Swarm. It returns the NodeID
+ of Docker host the module is executed on
+ :return:
+ NodeID of host or 'None' if not part of Swarm
+ """
+
+ try:
+ info = self.info()
+ except APIError as exc:
+ self.fail("Failed to get node information for %s" % to_native(exc))
+
+ if info:
+ json_str = json.dumps(info, ensure_ascii=False)
+ swarm_info = json.loads(json_str)
+ if swarm_info['Swarm']['NodeID']:
+ return swarm_info['Swarm']['NodeID']
+ return None
+
+ def check_if_swarm_node(self, node_id=None):
+ """
+ Checking if host is part of Docker Swarm. If 'node_id' is not provided it reads the Docker host
+ system information looking if specific key in output exists. If 'node_id' is provided then it tries to
+ read node information assuming it is run on Swarm manager. The get_node_inspect() method handles exception if
+ it is not executed on Swarm manager
+
+ :param node_id: Node identifier
+ :return:
+ bool: True if node is part of Swarm, False otherwise
+ """
+
+ if node_id is None:
+ try:
+ info = self.info()
+ except APIError:
+ self.fail("Failed to get host information.")
+
+ if info:
+ json_str = json.dumps(info, ensure_ascii=False)
+ swarm_info = json.loads(json_str)
+ if swarm_info['Swarm']['NodeID']:
+ return True
+ if swarm_info['Swarm']['LocalNodeState'] in ('active', 'pending', 'locked'):
+ return True
+ return False
+ else:
+ try:
+ node_info = self.get_node_inspect(node_id=node_id)
+ except APIError:
+ return
+
+ if node_info['ID'] is not None:
+ return True
+ return False
+
+ def check_if_swarm_manager(self):
+ """
+ Checks if node role is set as Manager in Swarm. The node is the docker host on which module action
+ is performed. The inspect_swarm() will fail if node is not a manager
+
+ :return: True if node is Swarm Manager, False otherwise
+ """
+
+ try:
+ self.inspect_swarm()
+ return True
+ except APIError:
+ return False
+
+ def fail_task_if_not_swarm_manager(self):
+ """
+ If host is not a swarm manager then Ansible task on this host should end with 'failed' state
+ """
+ if not self.check_if_swarm_manager():
+ self.fail("Error running docker swarm module: must run on swarm manager node")
+
+ def check_if_swarm_worker(self):
+ """
+ Checks if node role is set as Worker in Swarm. The node is the docker host on which module action
+ is performed. Will fail if run on host that is not part of Swarm via check_if_swarm_node()
+
+ :return: True if node is Swarm Worker, False otherwise
+ """
+
+ if self.check_if_swarm_node() and not self.check_if_swarm_manager():
+ return True
+ return False
+
+ def check_if_swarm_node_is_down(self, node_id=None, repeat_check=1):
+ """
+ Checks if node status on Swarm manager is 'down'. If node_id is provided it query manager about
+ node specified in parameter, otherwise it query manager itself. If run on Swarm Worker node or
+ host that is not part of Swarm it will fail the playbook
+
+ :param repeat_check: number of check attempts with 5 seconds delay between them, by default check only once
+ :param node_id: node ID or name, if None then method will try to get node_id of host module run on
+ :return:
+ True if node is part of swarm but its state is down, False otherwise
+ """
+
+ if repeat_check < 1:
+ repeat_check = 1
+
+ if node_id is None:
+ node_id = self.get_swarm_node_id()
+
+ for retry in range(0, repeat_check):
+ if retry > 0:
+ sleep(5)
+ node_info = self.get_node_inspect(node_id=node_id)
+ if node_info['Status']['State'] == 'down':
+ return True
+ return False
+
+ def get_node_inspect(self, node_id=None, skip_missing=False):
+ """
+ Returns Swarm node info as in 'docker node inspect' command about single node
+
+ :param skip_missing: if True then function will return None instead of failing the task
+ :param node_id: node ID or name, if None then method will try to get node_id of host module run on
+ :return:
+ Single node information structure
+ """
+
+ if node_id is None:
+ node_id = self.get_swarm_node_id()
+
+ if node_id is None:
+ self.fail("Failed to get node information.")
+
+ try:
+ node_info = self.inspect_node(node_id=node_id)
+ except APIError as exc:
+ if exc.status_code == 503:
+ self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager")
+ if exc.status_code == 404:
+ if skip_missing:
+ return None
+ self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
+ except Exception as exc:
+ self.fail("Error inspecting swarm node: %s" % exc)
+
+ json_str = json.dumps(node_info, ensure_ascii=False)
+ node_info = json.loads(json_str)
+
+ if 'ManagerStatus' in node_info:
+ if node_info['ManagerStatus'].get('Leader'):
+ # This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
+ # Check moby/moby#35437 for details
+ count_colons = node_info['ManagerStatus']['Addr'].count(":")
+ if count_colons == 1:
+ swarm_leader_ip = node_info['ManagerStatus']['Addr'].split(":", 1)[0] or node_info['Status']['Addr']
+ else:
+ swarm_leader_ip = node_info['Status']['Addr']
+ node_info['Status']['Addr'] = swarm_leader_ip
+ return node_info
+
+ def get_all_nodes_inspect(self):
+ """
+ Returns Swarm node info as in 'docker node inspect' command about all registered nodes
+
+ :return:
+ Structure with information about all nodes
+ """
+ try:
+ node_info = self.nodes()
+ except APIError as exc:
+ if exc.status_code == 503:
+ self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager")
+ self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
+ except Exception as exc:
+ self.fail("Error inspecting swarm node: %s" % exc)
+
+ json_str = json.dumps(node_info, ensure_ascii=False)
+ node_info = json.loads(json_str)
+ return node_info
+
+ def get_all_nodes_list(self, output='short'):
+ """
+ Returns list of nodes registered in Swarm
+
+ :param output: Defines format of returned data
+ :return:
+ If 'output' is 'short' then return data is list of nodes hostnames registered in Swarm,
+ if 'output' is 'long' then returns data is list of dict containing the attributes as in
+ output of command 'docker node ls'
+ """
+ nodes_list = []
+
+ nodes_inspect = self.get_all_nodes_inspect()
+ if nodes_inspect is None:
+ return None
+
+ if output == 'short':
+ for node in nodes_inspect:
+ nodes_list.append(node['Description']['Hostname'])
+ elif output == 'long':
+ for node in nodes_inspect:
+ node_property = {}
+
+ node_property.update({'ID': node['ID']})
+ node_property.update({'Hostname': node['Description']['Hostname']})
+ node_property.update({'Status': node['Status']['State']})
+ node_property.update({'Availability': node['Spec']['Availability']})
+ if 'ManagerStatus' in node:
+ if node['ManagerStatus']['Leader'] is True:
+ node_property.update({'Leader': True})
+ node_property.update({'ManagerStatus': node['ManagerStatus']['Reachability']})
+ node_property.update({'EngineVersion': node['Description']['Engine']['EngineVersion']})
+
+ nodes_list.append(node_property)
+ else:
+ return None
+
+ return nodes_list
+
+ def get_node_name_by_id(self, nodeid):
+ return self.get_node_inspect(nodeid)['Description']['Hostname']
+
+ def get_unlock_key(self):
+ if self.docker_py_version < LooseVersion('2.7.0'):
+ return None
+ return super(AnsibleDockerSwarmClient, self).get_unlock_key()
+
+ def get_service_inspect(self, service_id, skip_missing=False):
+ """
+ Returns Swarm service info as in 'docker service inspect' command about single service
+
+ :param service_id: service ID or name
+ :param skip_missing: if True then function will return None instead of failing the task
+ :return:
+ Single service information structure
+ """
+ try:
+ service_info = self.inspect_service(service_id)
+ except NotFound as exc:
+ if skip_missing is False:
+ self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
+ else:
+ return None
+ except APIError as exc:
+ if exc.status_code == 503:
+ self.fail("Cannot inspect service: To inspect service execute module on Swarm Manager")
+ self.fail("Error inspecting swarm service: %s" % exc)
+ except Exception as exc:
+ self.fail("Error inspecting swarm service: %s" % exc)
+
+ json_str = json.dumps(service_info, ensure_ascii=False)
+ service_info = json.loads(json_str)
+ return service_info
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/gcdns.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/gcdns.py
new file mode 100644
index 00000000..1c61510f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/gcdns.py
@@ -0,0 +1,39 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Franck Cuny <franck.cuny@gmail.com>, 2014
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+try:
+ from libcloud.dns.types import Provider
+ from libcloud.dns.providers import get_driver
+ HAS_LIBCLOUD_BASE = True
+except ImportError:
+ HAS_LIBCLOUD_BASE = False
+
+from ansible_collections.community.general.plugins.module_utils.gcp import gcp_connect
+from ansible_collections.community.general.plugins.module_utils.gcp import unexpected_error_msg as gcp_error
+
+USER_AGENT_PRODUCT = "Ansible-gcdns"
+USER_AGENT_VERSION = "v1"
+
+
+def gcdns_connect(module, provider=None):
+ """Return a GCP connection for Google Cloud DNS."""
+ if not HAS_LIBCLOUD_BASE:
+ module.fail_json(msg='libcloud must be installed to use this module')
+
+ provider = provider or Provider.GOOGLE
+ return gcp_connect(module, provider, get_driver, USER_AGENT_PRODUCT, USER_AGENT_VERSION)
+
+
+def unexpected_error_msg(error):
+ """Create an error string based on passed in error."""
+ return gcp_error(error)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/gce.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/gce.py
new file mode 100644
index 00000000..292bb8b9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/gce.py
@@ -0,0 +1,39 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Franck Cuny <franck.cuny@gmail.com>, 2014
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+try:
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ HAS_LIBCLOUD_BASE = True
+except ImportError:
+ HAS_LIBCLOUD_BASE = False
+
+from ansible_collections.community.general.plugins.module_utils.gcp import gcp_connect
+from ansible_collections.community.general.plugins.module_utils.gcp import unexpected_error_msg as gcp_error
+
+USER_AGENT_PRODUCT = "Ansible-gce"
+USER_AGENT_VERSION = "v1"
+
+
+def gce_connect(module, provider=None):
+ """Return a GCP connection for Google Compute Engine."""
+ if not HAS_LIBCLOUD_BASE:
+ module.fail_json(msg='libcloud must be installed to use this module')
+ provider = provider or Provider.GCE
+
+ return gcp_connect(module, provider, get_driver, USER_AGENT_PRODUCT, USER_AGENT_VERSION)
+
+
+def unexpected_error_msg(error):
+ """Create an error string based on passed in error."""
+ return gcp_error(error)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/gcp.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/gcp.py
new file mode 100644
index 00000000..a034f3b6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/gcp.py
@@ -0,0 +1,799 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Franck Cuny <franck.cuny@gmail.com>, 2014
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import time
+import traceback
+from distutils.version import LooseVersion
+
+# libcloud
+try:
+ import libcloud
+ HAS_LIBCLOUD_BASE = True
+except ImportError:
+ HAS_LIBCLOUD_BASE = False
+
+# google-auth
+try:
+ import google.auth
+ from google.oauth2 import service_account
+ HAS_GOOGLE_AUTH = True
+except ImportError:
+ HAS_GOOGLE_AUTH = False
+
+# google-python-api
+try:
+ import google_auth_httplib2
+ from httplib2 import Http
+ from googleapiclient.http import set_user_agent
+ from googleapiclient.errors import HttpError
+ from apiclient.discovery import build
+ HAS_GOOGLE_API_LIB = True
+except ImportError:
+ HAS_GOOGLE_API_LIB = False
+
+
+import ansible.module_utils.six.moves.urllib.parse as urlparse
+
+GCP_DEFAULT_SCOPES = ['https://www.googleapis.com/auth/cloud-platform']
+
+
+def _get_gcp_ansible_credentials(module):
+ """Helper to fetch creds from AnsibleModule object."""
+ service_account_email = module.params.get('service_account_email', None)
+ # Note: pem_file is discouraged and will be deprecated
+ credentials_file = module.params.get('pem_file', None) or module.params.get(
+ 'credentials_file', None)
+ project_id = module.params.get('project_id', None)
+
+ return (service_account_email, credentials_file, project_id)
+
+
+def _get_gcp_environ_var(var_name, default_value):
+ """Wrapper around os.environ.get call."""
+ return os.environ.get(
+ var_name, default_value)
+
+
+def _get_gcp_environment_credentials(service_account_email, credentials_file, project_id):
+ """Helper to look in environment variables for credentials."""
+ # If any of the values are not given as parameters, check the appropriate
+ # environment variables.
+ if not service_account_email:
+ service_account_email = _get_gcp_environ_var('GCE_EMAIL', None)
+ if not credentials_file:
+ credentials_file = _get_gcp_environ_var(
+ 'GCE_CREDENTIALS_FILE_PATH', None) or _get_gcp_environ_var(
+ 'GOOGLE_APPLICATION_CREDENTIALS', None) or _get_gcp_environ_var(
+ 'GCE_PEM_FILE_PATH', None)
+ if not project_id:
+ project_id = _get_gcp_environ_var('GCE_PROJECT', None) or _get_gcp_environ_var(
+ 'GOOGLE_CLOUD_PROJECT', None)
+ return (service_account_email, credentials_file, project_id)
+
+
+def _get_gcp_credentials(module, require_valid_json=True, check_libcloud=False):
+ """
+ Obtain GCP credentials by trying various methods.
+
+ There are 3 ways to specify GCP credentials:
+ 1. Specify via Ansible module parameters (recommended).
+ 2. Specify via environment variables. Two sets of env vars are available:
+ a) GOOGLE_CLOUD_PROJECT, GOOGLE_CREDENTIALS_APPLICATION (preferred)
+ b) GCE_PROJECT, GCE_CREDENTIAL_FILE_PATH, GCE_EMAIL (legacy, not recommended; req'd if
+ using p12 key)
+ 3. Specify via libcloud secrets.py file (deprecated).
+
+ There are 3 helper functions to assist in the above.
+
+ Regardless of method, the user also has the option of specifying a JSON
+ file or a p12 file as the credentials file. JSON is strongly recommended and
+ p12 will be removed in the future.
+
+ Additionally, flags may be set to require valid json and check the libcloud
+ version.
+
+ AnsibleModule.fail_json is called only if the project_id cannot be found.
+
+ :param module: initialized Ansible module object
+ :type module: `class AnsibleModule`
+
+ :param require_valid_json: If true, require credentials to be valid JSON. Default is True.
+ :type require_valid_json: ``bool``
+
+ :params check_libcloud: If true, check the libcloud version available to see if
+ JSON creds are supported.
+ :type check_libcloud: ``bool``
+
+ :return: {'service_account_email': service_account_email,
+ 'credentials_file': credentials_file,
+ 'project_id': project_id}
+ :rtype: ``dict``
+ """
+ (service_account_email,
+ credentials_file,
+ project_id) = _get_gcp_ansible_credentials(module)
+
+ # If any of the values are not given as parameters, check the appropriate
+ # environment variables.
+ (service_account_email,
+ credentials_file,
+ project_id) = _get_gcp_environment_credentials(service_account_email,
+ credentials_file, project_id)
+
+ if credentials_file is None or project_id is None or service_account_email is None:
+ if check_libcloud is True:
+ if project_id is None:
+ # TODO(supertom): this message is legacy and integration tests
+ # depend on it.
+ module.fail_json(msg='Missing GCE connection parameters in libcloud '
+ 'secrets file.')
+ else:
+ if project_id is None:
+ module.fail_json(msg=('GCP connection error: unable to determine project (%s) or '
+ 'credentials file (%s)' % (project_id, credentials_file)))
+ # Set these fields to empty strings if they are None
+ # consumers of this will make the distinction between an empty string
+ # and None.
+ if credentials_file is None:
+ credentials_file = ''
+ if service_account_email is None:
+ service_account_email = ''
+
+ # ensure the credentials file is found and is in the proper format.
+ if credentials_file:
+ _validate_credentials_file(module, credentials_file,
+ require_valid_json=require_valid_json,
+ check_libcloud=check_libcloud)
+
+ return {'service_account_email': service_account_email,
+ 'credentials_file': credentials_file,
+ 'project_id': project_id}
+
+
+def _validate_credentials_file(module, credentials_file, require_valid_json=True, check_libcloud=False):
+ """
+ Check for valid credentials file.
+
+ Optionally check for JSON format and if libcloud supports JSON.
+
+ :param module: initialized Ansible module object
+ :type module: `class AnsibleModule`
+
+ :param credentials_file: path to file on disk
+ :type credentials_file: ``str``. Complete path to file on disk.
+
+ :param require_valid_json: This argument is ignored as of Ansible 2.7.
+ :type require_valid_json: ``bool``
+
+ :params check_libcloud: If true, check the libcloud version available to see if
+ JSON creds are supported.
+ :type check_libcloud: ``bool``
+
+ :returns: True
+ :rtype: ``bool``
+ """
+ try:
+ # Try to read credentials as JSON
+ with open(credentials_file) as credentials:
+ json.loads(credentials.read())
+ # If the credentials are proper JSON and we do not have the minimum
+ # required libcloud version, bail out and return a descriptive
+ # error
+ if check_libcloud and LooseVersion(libcloud.__version__) < '0.17.0':
+ module.fail_json(msg='Using JSON credentials but libcloud minimum version not met. '
+ 'Upgrade to libcloud>=0.17.0.')
+ return True
+ except IOError as e:
+ module.fail_json(msg='GCP Credentials File %s not found.' %
+ credentials_file, changed=False)
+ return False
+ except ValueError as e:
+ module.fail_json(
+ msg='Non-JSON credentials file provided. Please generate a new JSON key from the Google Cloud console',
+ changed=False)
+
+
+def gcp_connect(module, provider, get_driver, user_agent_product, user_agent_version):
+ """Return a Google libcloud driver connection."""
+ if not HAS_LIBCLOUD_BASE:
+ module.fail_json(msg='libcloud must be installed to use this module')
+
+ creds = _get_gcp_credentials(module,
+ require_valid_json=False,
+ check_libcloud=True)
+ try:
+ gcp = get_driver(provider)(creds['service_account_email'], creds['credentials_file'],
+ datacenter=module.params.get('zone', None),
+ project=creds['project_id'])
+ gcp.connection.user_agent_append("%s/%s" % (
+ user_agent_product, user_agent_version))
+ except (RuntimeError, ValueError) as e:
+ module.fail_json(msg=str(e), changed=False)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ return gcp
+
+
+def get_google_cloud_credentials(module, scopes=None):
+ """
+ Get credentials object for use with Google Cloud client.
+
+ Attempts to obtain credentials by calling _get_gcp_credentials. If those are
+ not present will attempt to connect via Application Default Credentials.
+
+ To connect via libcloud, don't use this function, use gcp_connect instead. For
+ Google Python API Client, see get_google_api_auth for how to connect.
+
+ For more information on Google's client library options for Python, see:
+ U(https://cloud.google.com/apis/docs/client-libraries-explained#google_api_client_libraries)
+
+ Google Cloud example:
+ creds, params = get_google_cloud_credentials(module, scopes, user_agent_product, user_agent_version)
+ pubsub_client = pubsub.Client(project=params['project_id'], credentials=creds)
+ pubsub_client.user_agent = 'ansible-pubsub-0.1'
+ ...
+
+ :param module: initialized Ansible module object
+ :type module: `class AnsibleModule`
+
+ :param scopes: list of scopes
+ :type module: ``list`` of URIs
+
+ :returns: A tuple containing (google authorized) credentials object and
+ params dict {'service_account_email': '...', 'credentials_file': '...', 'project_id': ...}
+ :rtype: ``tuple``
+ """
+ scopes = [] if scopes is None else scopes
+
+ if not HAS_GOOGLE_AUTH:
+ module.fail_json(msg='Please install google-auth.')
+
+ conn_params = _get_gcp_credentials(module,
+ require_valid_json=True,
+ check_libcloud=False)
+ try:
+ if conn_params['credentials_file']:
+ credentials = service_account.Credentials.from_service_account_file(
+ conn_params['credentials_file'])
+ if scopes:
+ credentials = credentials.with_scopes(scopes)
+ else:
+ (credentials, project_id) = google.auth.default(
+ scopes=scopes)
+ if project_id is not None:
+ conn_params['project_id'] = project_id
+
+ return (credentials, conn_params)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ return (None, None)
+
+
+def get_google_api_auth(module, scopes=None, user_agent_product='ansible-python-api', user_agent_version='NA'):
+ """
+ Authentication for use with google-python-api-client.
+
+ Function calls get_google_cloud_credentials, which attempts to assemble the credentials
+ from various locations. Next it attempts to authenticate with Google.
+
+ This function returns an httplib2 (compatible) object that can be provided to the Google Python API client.
+
+ For libcloud, don't use this function, use gcp_connect instead. For Google Cloud, See
+ get_google_cloud_credentials for how to connect.
+
+ For more information on Google's client library options for Python, see:
+ U(https://cloud.google.com/apis/docs/client-libraries-explained#google_api_client_libraries)
+
+ Google API example:
+ http_auth, conn_params = get_google_api_auth(module, scopes, user_agent_product, user_agent_version)
+ service = build('myservice', 'v1', http=http_auth)
+ ...
+
+ :param module: initialized Ansible module object
+ :type module: `class AnsibleModule`
+
+ :param scopes: list of scopes
+ :type scopes: ``list`` of URIs
+
+ :param user_agent_product: User agent product. eg: 'ansible-python-api'
+ :type user_agent_product: ``str``
+
+ :param user_agent_version: Version string to append to product. eg: 'NA' or '0.1'
+ :type user_agent_version: ``str``
+
+ :returns: A tuple containing (google authorized) httplib2 request object and a
+ params dict {'service_account_email': '...', 'credentials_file': '...', 'project_id': ...}
+ :rtype: ``tuple``
+ """
+ scopes = [] if scopes is None else scopes
+
+ if not HAS_GOOGLE_API_LIB:
+ module.fail_json(msg="Please install google-api-python-client library")
+ if not scopes:
+ scopes = GCP_DEFAULT_SCOPES
+ try:
+ (credentials, conn_params) = get_google_cloud_credentials(module, scopes)
+ http = set_user_agent(Http(), '%s-%s' %
+ (user_agent_product, user_agent_version))
+ http_auth = google_auth_httplib2.AuthorizedHttp(credentials, http=http)
+
+ return (http_auth, conn_params)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ return (None, None)
+
+
+def get_google_api_client(module, service, user_agent_product, user_agent_version,
+ scopes=None, api_version='v1'):
+ """
+ Get the discovery-based python client. Use when a cloud client is not available.
+
+ client = get_google_api_client(module, 'compute', user_agent_product=USER_AGENT_PRODUCT,
+ user_agent_version=USER_AGENT_VERSION)
+
+ :returns: A tuple containing the authorized client to the specified service and a
+ params dict {'service_account_email': '...', 'credentials_file': '...', 'project_id': ...}
+ :rtype: ``tuple``
+ """
+ if not scopes:
+ scopes = GCP_DEFAULT_SCOPES
+
+ http_auth, conn_params = get_google_api_auth(module, scopes=scopes,
+ user_agent_product=user_agent_product,
+ user_agent_version=user_agent_version)
+ client = build(service, api_version, http=http_auth)
+
+ return (client, conn_params)
+
+
+def check_min_pkg_version(pkg_name, minimum_version):
+ """Minimum required version is >= installed version."""
+ from pkg_resources import get_distribution
+ try:
+ installed_version = get_distribution(pkg_name).version
+ return LooseVersion(installed_version) >= minimum_version
+ except Exception as e:
+ return False
+
+
+def unexpected_error_msg(error):
+ """Create an error string based on passed in error."""
+ return 'Unexpected response: (%s). Detail: %s' % (str(error), traceback.format_exc())
+
+
+def get_valid_location(module, driver, location, location_type='zone'):
+ if location_type == 'zone':
+ l = driver.ex_get_zone(location)
+ else:
+ l = driver.ex_get_region(location)
+ if l is None:
+ link = 'https://cloud.google.com/compute/docs/regions-zones/regions-zones#available'
+ module.fail_json(msg=('%s %s is invalid. Please see the list of '
+ 'available %s at %s' % (
+ location_type, location, location_type, link)),
+ changed=False)
+ return l
+
+
+def check_params(params, field_list):
+ """
+ Helper to validate params.
+
+ Use this in function definitions if they require specific fields
+ to be present.
+
+ :param params: structure that contains the fields
+ :type params: ``dict``
+
+ :param field_list: list of dict representing the fields
+ [{'name': str, 'required': True/False', 'type': cls}]
+ :type field_list: ``list`` of ``dict``
+
+ :return True or raises ValueError
+ :rtype: ``bool`` or `class:ValueError`
+ """
+ for d in field_list:
+ if not d['name'] in params:
+ if 'required' in d and d['required'] is True:
+ raise ValueError(("%s is required and must be of type: %s" %
+ (d['name'], str(d['type']))))
+ else:
+ if not isinstance(params[d['name']], d['type']):
+ raise ValueError(("%s must be of type: %s. %s (%s) provided." % (
+ d['name'], str(d['type']), params[d['name']],
+ type(params[d['name']]))))
+ if 'values' in d:
+ if params[d['name']] not in d['values']:
+ raise ValueError(("%s must be one of: %s" % (
+ d['name'], ','.join(d['values']))))
+ if isinstance(params[d['name']], int):
+ if 'min' in d:
+ if params[d['name']] < d['min']:
+ raise ValueError(("%s must be greater than or equal to: %s" % (
+ d['name'], d['min'])))
+ if 'max' in d:
+ if params[d['name']] > d['max']:
+ raise ValueError("%s must be less than or equal to: %s" % (
+ d['name'], d['max']))
+ return True
+
+
+class GCPUtils(object):
+ """
+ Helper utilities for GCP.
+ """
+
+ @staticmethod
+ def underscore_to_camel(txt):
+ return txt.split('_')[0] + ''.join(x.capitalize() or '_' for x in txt.split('_')[1:])
+
+ @staticmethod
+ def remove_non_gcp_params(params):
+ """
+ Remove params if found.
+ """
+ params_to_remove = ['state']
+ for p in params_to_remove:
+ if p in params:
+ del params[p]
+
+ return params
+
+ @staticmethod
+ def params_to_gcp_dict(params, resource_name=None):
+ """
+ Recursively convert ansible params to GCP Params.
+
+ Keys are converted from snake to camelCase
+ ex: default_service to defaultService
+
+ Handles lists, dicts and strings
+
+ special provision for the resource name
+ """
+ if not isinstance(params, dict):
+ return params
+ gcp_dict = {}
+ params = GCPUtils.remove_non_gcp_params(params)
+ for k, v in params.items():
+ gcp_key = GCPUtils.underscore_to_camel(k)
+ if isinstance(v, dict):
+ retval = GCPUtils.params_to_gcp_dict(v)
+ gcp_dict[gcp_key] = retval
+ elif isinstance(v, list):
+ gcp_dict[gcp_key] = [GCPUtils.params_to_gcp_dict(x) for x in v]
+ else:
+ if resource_name and k == resource_name:
+ gcp_dict['name'] = v
+ else:
+ gcp_dict[gcp_key] = v
+ return gcp_dict
+
+ @staticmethod
+ def execute_api_client_req(req, client=None, raw=True,
+ operation_timeout=180, poll_interval=5,
+ raise_404=True):
+ """
+ General python api client interaction function.
+
+ For use with google-api-python-client, or clients created
+ with get_google_api_client function
+ Not for use with Google Cloud client libraries
+
+ For long-running operations, we make an immediate query and then
+ sleep poll_interval before re-querying. After the request is done
+ we rebuild the request with a get method and return the result.
+
+ """
+ try:
+ resp = req.execute()
+
+ if not resp:
+ return None
+
+ if raw:
+ return resp
+
+ if resp['kind'] == 'compute#operation':
+ resp = GCPUtils.execute_api_client_operation_req(req, resp,
+ client,
+ operation_timeout,
+ poll_interval)
+
+ if 'items' in resp:
+ return resp['items']
+
+ return resp
+ except HttpError as h:
+ # Note: 404s can be generated (incorrectly) for dependent
+ # resources not existing. We let the caller determine if
+ # they want 404s raised for their invocation.
+ if h.resp.status == 404 and not raise_404:
+ return None
+ else:
+ raise
+ except Exception:
+ raise
+
+ @staticmethod
+ def execute_api_client_operation_req(orig_req, op_resp, client,
+ operation_timeout=180, poll_interval=5):
+ """
+ Poll an operation for a result.
+ """
+ parsed_url = GCPUtils.parse_gcp_url(orig_req.uri)
+ project_id = parsed_url['project']
+ resource_name = GCPUtils.get_gcp_resource_from_methodId(
+ orig_req.methodId)
+ resource = GCPUtils.build_resource_from_name(client, resource_name)
+
+ start_time = time.time()
+
+ complete = False
+ attempts = 1
+ while not complete:
+ if start_time + operation_timeout >= time.time():
+ op_req = client.globalOperations().get(
+ project=project_id, operation=op_resp['name'])
+ op_resp = op_req.execute()
+ if op_resp['status'] != 'DONE':
+ time.sleep(poll_interval)
+ attempts += 1
+ else:
+ complete = True
+ if op_resp['operationType'] == 'delete':
+ # don't wait for the delete
+ return True
+ elif op_resp['operationType'] in ['insert', 'update', 'patch']:
+ # TODO(supertom): Isolate 'build-new-request' stuff.
+ resource_name_singular = GCPUtils.get_entity_name_from_resource_name(
+ resource_name)
+ if op_resp['operationType'] == 'insert' or 'entity_name' not in parsed_url:
+ parsed_url['entity_name'] = GCPUtils.parse_gcp_url(op_resp['targetLink'])[
+ 'entity_name']
+ args = {'project': project_id,
+ resource_name_singular: parsed_url['entity_name']}
+ new_req = resource.get(**args)
+ resp = new_req.execute()
+ return resp
+ else:
+ # assuming multiple entities, do a list call.
+ new_req = resource.list(project=project_id)
+ resp = new_req.execute()
+ return resp
+ else:
+ # operation didn't complete on time.
+ raise GCPOperationTimeoutError("Operation timed out: %s" % (
+ op_resp['targetLink']))
+
+ @staticmethod
+ def build_resource_from_name(client, resource_name):
+ try:
+ method = getattr(client, resource_name)
+ return method()
+ except AttributeError:
+ raise NotImplementedError('%s is not an attribute of %s' % (resource_name,
+ client))
+
+ @staticmethod
+ def get_gcp_resource_from_methodId(methodId):
+ try:
+ parts = methodId.split('.')
+ if len(parts) != 3:
+ return None
+ else:
+ return parts[1]
+ except AttributeError:
+ return None
+
+ @staticmethod
+ def get_entity_name_from_resource_name(resource_name):
+ if not resource_name:
+ return None
+
+ try:
+ # Chop off global or region prefixes
+ if resource_name.startswith('global'):
+ resource_name = resource_name.replace('global', '')
+ elif resource_name.startswith('regional'):
+ resource_name = resource_name.replace('region', '')
+
+ # ensure we have a lower case first letter
+ resource_name = resource_name[0].lower() + resource_name[1:]
+
+ if resource_name[-3:] == 'ies':
+ return resource_name.replace(
+ resource_name[-3:], 'y')
+ if resource_name[-1] == 's':
+ return resource_name[:-1]
+
+ return resource_name
+
+ except AttributeError:
+ return None
+
+ @staticmethod
+ def parse_gcp_url(url):
+ """
+ Parse GCP urls and return dict of parts.
+
+ Supported URL structures:
+ /SERVICE/VERSION/'projects'/PROJECT_ID/RESOURCE
+ /SERVICE/VERSION/'projects'/PROJECT_ID/RESOURCE/ENTITY_NAME
+ /SERVICE/VERSION/'projects'/PROJECT_ID/RESOURCE/ENTITY_NAME/METHOD_NAME
+ /SERVICE/VERSION/'projects'/PROJECT_ID/'global'/RESOURCE
+ /SERVICE/VERSION/'projects'/PROJECT_ID/'global'/RESOURCE/ENTITY_NAME
+ /SERVICE/VERSION/'projects'/PROJECT_ID/'global'/RESOURCE/ENTITY_NAME/METHOD_NAME
+ /SERVICE/VERSION/'projects'/PROJECT_ID/LOCATION_TYPE/LOCATION/RESOURCE
+ /SERVICE/VERSION/'projects'/PROJECT_ID/LOCATION_TYPE/LOCATION/RESOURCE/ENTITY_NAME
+ /SERVICE/VERSION/'projects'/PROJECT_ID/LOCATION_TYPE/LOCATION/RESOURCE/ENTITY_NAME/METHOD_NAME
+
+ :param url: GCP-generated URL, such as a selflink or resource location.
+ :type url: ``str``
+
+ :return: dictionary of parts. Includes stanard components of urlparse, plus
+ GCP-specific 'service', 'api_version', 'project' and
+ 'resource_name' keys. Optionally, 'zone', 'region', 'entity_name'
+ and 'method_name', if applicable.
+ :rtype: ``dict``
+ """
+
+ p = urlparse.urlparse(url)
+ if not p:
+ return None
+ else:
+ # we add extra items such as
+ # zone, region and resource_name
+ url_parts = {}
+ url_parts['scheme'] = p.scheme
+ url_parts['host'] = p.netloc
+ url_parts['path'] = p.path
+ if p.path.find('/') == 0:
+ url_parts['path'] = p.path[1:]
+ url_parts['params'] = p.params
+ url_parts['fragment'] = p.fragment
+ url_parts['query'] = p.query
+ url_parts['project'] = None
+ url_parts['service'] = None
+ url_parts['api_version'] = None
+
+ path_parts = url_parts['path'].split('/')
+ url_parts['service'] = path_parts[0]
+ url_parts['api_version'] = path_parts[1]
+ if path_parts[2] == 'projects':
+ url_parts['project'] = path_parts[3]
+ else:
+ # invalid URL
+ raise GCPInvalidURLError('unable to parse: %s' % url)
+
+ if 'global' in path_parts:
+ url_parts['global'] = True
+ idx = path_parts.index('global')
+ if len(path_parts) - idx == 4:
+ # we have a resource, entity and method_name
+ url_parts['resource_name'] = path_parts[idx + 1]
+ url_parts['entity_name'] = path_parts[idx + 2]
+ url_parts['method_name'] = path_parts[idx + 3]
+
+ if len(path_parts) - idx == 3:
+ # we have a resource and entity
+ url_parts['resource_name'] = path_parts[idx + 1]
+ url_parts['entity_name'] = path_parts[idx + 2]
+
+ if len(path_parts) - idx == 2:
+ url_parts['resource_name'] = path_parts[idx + 1]
+
+ if len(path_parts) - idx < 2:
+ # invalid URL
+ raise GCPInvalidURLError('unable to parse: %s' % url)
+
+ elif 'regions' in path_parts or 'zones' in path_parts:
+ idx = -1
+ if 'regions' in path_parts:
+ idx = path_parts.index('regions')
+ url_parts['region'] = path_parts[idx + 1]
+ else:
+ idx = path_parts.index('zones')
+ url_parts['zone'] = path_parts[idx + 1]
+
+ if len(path_parts) - idx == 5:
+ # we have a resource, entity and method_name
+ url_parts['resource_name'] = path_parts[idx + 2]
+ url_parts['entity_name'] = path_parts[idx + 3]
+ url_parts['method_name'] = path_parts[idx + 4]
+
+ if len(path_parts) - idx == 4:
+ # we have a resource and entity
+ url_parts['resource_name'] = path_parts[idx + 2]
+ url_parts['entity_name'] = path_parts[idx + 3]
+
+ if len(path_parts) - idx == 3:
+ url_parts['resource_name'] = path_parts[idx + 2]
+
+ if len(path_parts) - idx < 3:
+ # invalid URL
+ raise GCPInvalidURLError('unable to parse: %s' % url)
+
+ else:
+ # no location in URL.
+ idx = path_parts.index('projects')
+ if len(path_parts) - idx == 5:
+ # we have a resource, entity and method_name
+ url_parts['resource_name'] = path_parts[idx + 2]
+ url_parts['entity_name'] = path_parts[idx + 3]
+ url_parts['method_name'] = path_parts[idx + 4]
+
+ if len(path_parts) - idx == 4:
+ # we have a resource and entity
+ url_parts['resource_name'] = path_parts[idx + 2]
+ url_parts['entity_name'] = path_parts[idx + 3]
+
+ if len(path_parts) - idx == 3:
+ url_parts['resource_name'] = path_parts[idx + 2]
+
+ if len(path_parts) - idx < 3:
+ # invalid URL
+ raise GCPInvalidURLError('unable to parse: %s' % url)
+
+ return url_parts
+
+ @staticmethod
+ def build_googleapi_url(project, api_version='v1', service='compute'):
+ return 'https://www.googleapis.com/%s/%s/projects/%s' % (service, api_version, project)
+
+ @staticmethod
+ def filter_gcp_fields(params, excluded_fields=None):
+ new_params = {}
+ if not excluded_fields:
+ excluded_fields = ['creationTimestamp', 'id', 'kind',
+ 'selfLink', 'fingerprint', 'description']
+
+ if isinstance(params, list):
+ new_params = [GCPUtils.filter_gcp_fields(
+ x, excluded_fields) for x in params]
+ elif isinstance(params, dict):
+ for k in params.keys():
+ if k not in excluded_fields:
+ new_params[k] = GCPUtils.filter_gcp_fields(
+ params[k], excluded_fields)
+ else:
+ new_params = params
+
+ return new_params
+
+ @staticmethod
+ def are_params_equal(p1, p2):
+ """
+ Check if two params dicts are equal.
+ TODO(supertom): need a way to filter out URLs, or they need to be built
+ """
+ filtered_p1 = GCPUtils.filter_gcp_fields(p1)
+ filtered_p2 = GCPUtils.filter_gcp_fields(p2)
+ if filtered_p1 != filtered_p2:
+ return False
+ return True
+
+
+class GCPError(Exception):
+ pass
+
+
+class GCPOperationTimeoutError(GCPError):
+ pass
+
+
+class GCPInvalidURLError(GCPError):
+ pass
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/gitlab.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/gitlab.py
new file mode 100644
index 00000000..e13f38c0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/gitlab.py
@@ -0,0 +1,106 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# Copyright: (c) 2018, Marcus Watkins <marwatk@marcuswatkins.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+from distutils.version import StrictVersion
+
+from ansible.module_utils.basic import missing_required_lib
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils._text import to_native
+
+try:
+ from urllib import quote_plus # Python 2.X
+except ImportError:
+ from urllib.parse import quote_plus # Python 3+
+
+import traceback
+
+GITLAB_IMP_ERR = None
+try:
+ import gitlab
+ HAS_GITLAB_PACKAGE = True
+except Exception:
+ GITLAB_IMP_ERR = traceback.format_exc()
+ HAS_GITLAB_PACKAGE = False
+
+
+def request(module, api_url, project, path, access_token, private_token, rawdata='', method='GET'):
+ url = "%s/v4/projects/%s%s" % (api_url, quote_plus(project), path)
+ headers = {}
+ if access_token:
+ headers['Authorization'] = "Bearer %s" % access_token
+ else:
+ headers['Private-Token'] = private_token
+
+ headers['Accept'] = "application/json"
+ headers['Content-Type'] = "application/json"
+
+ response, info = fetch_url(module=module, url=url, headers=headers, data=rawdata, method=method)
+ status = info['status']
+ content = ""
+ if response:
+ content = response.read()
+ if status == 204:
+ return True, content
+ elif status == 200 or status == 201:
+ return True, json.loads(content)
+ else:
+ return False, str(status) + ": " + content
+
+
+def findProject(gitlab_instance, identifier):
+ try:
+ project = gitlab_instance.projects.get(identifier)
+ except Exception as e:
+ current_user = gitlab_instance.user
+ try:
+ project = gitlab_instance.projects.get(current_user.username + '/' + identifier)
+ except Exception as e:
+ return None
+
+ return project
+
+
+def findGroup(gitlab_instance, identifier):
+ try:
+ project = gitlab_instance.groups.get(identifier)
+ except Exception as e:
+ return None
+
+ return project
+
+
+def gitlabAuthentication(module):
+ gitlab_url = module.params['api_url']
+ validate_certs = module.params['validate_certs']
+ gitlab_user = module.params['api_username']
+ gitlab_password = module.params['api_password']
+ gitlab_token = module.params['api_token']
+
+ if not HAS_GITLAB_PACKAGE:
+ module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
+
+ try:
+ # python-gitlab library remove support for username/password authentication since 1.13.0
+ # Changelog : https://github.com/python-gitlab/python-gitlab/releases/tag/v1.13.0
+ # This condition allow to still support older version of the python-gitlab library
+ if StrictVersion(gitlab.__version__) < StrictVersion("1.13.0"):
+ gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=validate_certs, email=gitlab_user, password=gitlab_password,
+ private_token=gitlab_token, api_version=4)
+ else:
+ gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=validate_certs, private_token=gitlab_token, api_version=4)
+
+ gitlab_instance.auth()
+ except (gitlab.exceptions.GitlabAuthenticationError, gitlab.exceptions.GitlabGetError) as e:
+ module.fail_json(msg="Failed to connect to GitLab server: %s" % to_native(e))
+ except (gitlab.exceptions.GitlabHttpError) as e:
+ module.fail_json(msg="Failed to connect to GitLab server: %s. \
+ GitLab remove Session API now that private tokens are removed from user API endpoints since version 10.2." % to_native(e))
+
+ return gitlab_instance
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/heroku.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/heroku.py
new file mode 100644
index 00000000..b6e89614
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/heroku.py
@@ -0,0 +1,41 @@
+# Copyright: (c) 2018, Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import traceback
+
+from ansible.module_utils.basic import env_fallback, missing_required_lib
+
+HAS_HEROKU = False
+HEROKU_IMP_ERR = None
+try:
+ import heroku3
+ HAS_HEROKU = True
+except ImportError:
+ HEROKU_IMP_ERR = traceback.format_exc()
+
+
+class HerokuHelper():
+ def __init__(self, module):
+ self.module = module
+ self.check_lib()
+ self.api_key = module.params["api_key"]
+
+ def check_lib(self):
+ if not HAS_HEROKU:
+ self.module.fail_json(msg=missing_required_lib('heroku3'), exception=HEROKU_IMP_ERR)
+
+ @staticmethod
+ def heroku_argument_spec():
+ return dict(
+ api_key=dict(fallback=(env_fallback, ['HEROKU_API_KEY', 'TF_VAR_HEROKU_API_KEY']), type='str', no_log=True))
+
+ def get_heroku_client(self):
+ client = heroku3.from_key(self.api_key)
+
+ if not client.is_authenticated:
+ self.module.fail_json(msg='Heroku authentication failure, please check your API Key')
+
+ return client
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/hetzner.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/hetzner.py
new file mode 100644
index 00000000..2bc3d166
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/hetzner.py
@@ -0,0 +1,171 @@
+# -*- coding: utf-8 -*-
+
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Felix Fontein <felix@fontein.de>, 2019
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+
+import time
+
+
+HETZNER_DEFAULT_ARGUMENT_SPEC = dict(
+ hetzner_user=dict(type='str', required=True),
+ hetzner_password=dict(type='str', required=True, no_log=True),
+)
+
+# The API endpoint is fixed.
+BASE_URL = "https://robot-ws.your-server.de"
+
+
+def fetch_url_json(module, url, method='GET', timeout=10, data=None, headers=None, accept_errors=None):
+ '''
+ Make general request to Hetzner's JSON robot API.
+ '''
+ module.params['url_username'] = module.params['hetzner_user']
+ module.params['url_password'] = module.params['hetzner_password']
+ resp, info = fetch_url(module, url, method=method, timeout=timeout, data=data, headers=headers)
+ try:
+ content = resp.read()
+ except AttributeError:
+ content = info.pop('body', None)
+
+ if not content:
+ module.fail_json(msg='Cannot retrieve content from {0}'.format(url))
+
+ try:
+ result = module.from_json(content.decode('utf8'))
+ if 'error' in result:
+ if accept_errors:
+ if result['error']['code'] in accept_errors:
+ return result, result['error']['code']
+ module.fail_json(msg='Request failed: {0} {1} ({2})'.format(
+ result['error']['status'],
+ result['error']['code'],
+ result['error']['message']
+ ))
+ return result, None
+ except ValueError:
+ module.fail_json(msg='Cannot decode content retrieved from {0}'.format(url))
+
+
+class CheckDoneTimeoutException(Exception):
+ def __init__(self, result, error):
+ super(CheckDoneTimeoutException, self).__init__()
+ self.result = result
+ self.error = error
+
+
+def fetch_url_json_with_retries(module, url, check_done_callback, check_done_delay=10, check_done_timeout=180, skip_first=False, **kwargs):
+ '''
+ Make general request to Hetzner's JSON robot API, with retries until a condition is satisfied.
+
+ The condition is tested by calling ``check_done_callback(result, error)``. If it is not satisfied,
+ it will be retried with delays ``check_done_delay`` (in seconds) until a total timeout of
+ ``check_done_timeout`` (in seconds) since the time the first request is started is reached.
+
+ If ``skip_first`` is specified, will assume that a first call has already been made and will
+ directly start with waiting.
+ '''
+ start_time = time.time()
+ if not skip_first:
+ result, error = fetch_url_json(module, url, **kwargs)
+ if check_done_callback(result, error):
+ return result, error
+ while True:
+ elapsed = (time.time() - start_time)
+ left_time = check_done_timeout - elapsed
+ time.sleep(max(min(check_done_delay, left_time), 0))
+ result, error = fetch_url_json(module, url, **kwargs)
+ if check_done_callback(result, error):
+ return result, error
+ if left_time < check_done_delay:
+ raise CheckDoneTimeoutException(result, error)
+
+
+# #####################################################################################
+# ## FAILOVER IP ######################################################################
+
+def get_failover_record(module, ip):
+ '''
+ Get information record of failover IP.
+
+ See https://robot.your-server.de/doc/webservice/en.html#get-failover-failover-ip
+ '''
+ url = "{0}/failover/{1}".format(BASE_URL, ip)
+ result, error = fetch_url_json(module, url)
+ if 'failover' not in result:
+ module.fail_json(msg='Cannot interpret result: {0}'.format(result))
+ return result['failover']
+
+
+def get_failover(module, ip):
+ '''
+ Get current routing target of failover IP.
+
+ The value ``None`` represents unrouted.
+
+ See https://robot.your-server.de/doc/webservice/en.html#get-failover-failover-ip
+ '''
+ return get_failover_record(module, ip)['active_server_ip']
+
+
+def set_failover(module, ip, value, timeout=180):
+ '''
+ Set current routing target of failover IP.
+
+ Return a pair ``(value, changed)``. The value ``None`` for ``value`` represents unrouted.
+
+ See https://robot.your-server.de/doc/webservice/en.html#post-failover-failover-ip
+ and https://robot.your-server.de/doc/webservice/en.html#delete-failover-failover-ip
+ '''
+ url = "{0}/failover/{1}".format(BASE_URL, ip)
+ if value is None:
+ result, error = fetch_url_json(
+ module,
+ url,
+ method='DELETE',
+ timeout=timeout,
+ accept_errors=['FAILOVER_ALREADY_ROUTED']
+ )
+ else:
+ headers = {"Content-type": "application/x-www-form-urlencoded"}
+ data = dict(
+ active_server_ip=value,
+ )
+ result, error = fetch_url_json(
+ module,
+ url,
+ method='POST',
+ timeout=timeout,
+ data=urlencode(data),
+ headers=headers,
+ accept_errors=['FAILOVER_ALREADY_ROUTED']
+ )
+ if error is not None:
+ return value, False
+ else:
+ return result['failover']['active_server_ip'], True
+
+
+def get_failover_state(value):
+ '''
+ Create result dictionary for failover IP's value.
+
+ The value ``None`` represents unrouted.
+ '''
+ return dict(
+ value=value,
+ state='routed' if value else 'unrouted'
+ )
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/hwc_utils.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/hwc_utils.py
new file mode 100644
index 00000000..05e0c137
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/hwc_utils.py
@@ -0,0 +1,441 @@
+# Copyright (c), Google Inc, 2017
+# Simplified BSD License (see licenses/simplified_bsd.txt or
+# https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import time
+import traceback
+
+THIRD_LIBRARIES_IMP_ERR = None
+try:
+ from keystoneauth1.adapter import Adapter
+ from keystoneauth1.identity import v3
+ from keystoneauth1 import session
+ HAS_THIRD_LIBRARIES = True
+except ImportError:
+ THIRD_LIBRARIES_IMP_ERR = traceback.format_exc()
+ HAS_THIRD_LIBRARIES = False
+
+from ansible.module_utils.basic import (AnsibleModule, env_fallback,
+ missing_required_lib)
+from ansible.module_utils._text import to_text
+
+
+class HwcModuleException(Exception):
+ def __init__(self, message):
+ super(HwcModuleException, self).__init__()
+
+ self._message = message
+
+ def __str__(self):
+ return "[HwcClientException] message=%s" % self._message
+
+
+class HwcClientException(Exception):
+ def __init__(self, code, message):
+ super(HwcClientException, self).__init__()
+
+ self._code = code
+ self._message = message
+
+ def __str__(self):
+ msg = " code=%s," % str(self._code) if self._code != 0 else ""
+ return "[HwcClientException]%s message=%s" % (
+ msg, self._message)
+
+
+class HwcClientException404(HwcClientException):
+ def __init__(self, message):
+ super(HwcClientException404, self).__init__(404, message)
+
+ def __str__(self):
+ return "[HwcClientException404] message=%s" % self._message
+
+
+def session_method_wrapper(f):
+ def _wrap(self, url, *args, **kwargs):
+ try:
+ url = self.endpoint + url
+ r = f(self, url, *args, **kwargs)
+ except Exception as ex:
+ raise HwcClientException(
+ 0, "Sending request failed, error=%s" % ex)
+
+ result = None
+ if r.content:
+ try:
+ result = r.json()
+ except Exception as ex:
+ raise HwcClientException(
+ 0, "Parsing response to json failed, error: %s" % ex)
+
+ code = r.status_code
+ if code not in [200, 201, 202, 203, 204, 205, 206, 207, 208, 226]:
+ msg = ""
+ for i in ['message', 'error.message']:
+ try:
+ msg = navigate_value(result, i)
+ break
+ except Exception:
+ pass
+ else:
+ msg = str(result)
+
+ if code == 404:
+ raise HwcClientException404(msg)
+
+ raise HwcClientException(code, msg)
+
+ return result
+
+ return _wrap
+
+
+class _ServiceClient(object):
+ def __init__(self, client, endpoint, product):
+ self._client = client
+ self._endpoint = endpoint
+ self._default_header = {
+ 'User-Agent': "Huawei-Ansible-MM-%s" % product,
+ 'Accept': 'application/json',
+ }
+
+ @property
+ def endpoint(self):
+ return self._endpoint
+
+ @endpoint.setter
+ def endpoint(self, e):
+ self._endpoint = e
+
+ @session_method_wrapper
+ def get(self, url, body=None, header=None, timeout=None):
+ return self._client.get(url, json=body, timeout=timeout,
+ headers=self._header(header))
+
+ @session_method_wrapper
+ def post(self, url, body=None, header=None, timeout=None):
+ return self._client.post(url, json=body, timeout=timeout,
+ headers=self._header(header))
+
+ @session_method_wrapper
+ def delete(self, url, body=None, header=None, timeout=None):
+ return self._client.delete(url, json=body, timeout=timeout,
+ headers=self._header(header))
+
+ @session_method_wrapper
+ def put(self, url, body=None, header=None, timeout=None):
+ return self._client.put(url, json=body, timeout=timeout,
+ headers=self._header(header))
+
+ def _header(self, header):
+ if header and isinstance(header, dict):
+ for k, v in self._default_header.items():
+ if k not in header:
+ header[k] = v
+ else:
+ header = self._default_header
+
+ return header
+
+
+class Config(object):
+ def __init__(self, module, product):
+ self._project_client = None
+ self._domain_client = None
+ self._module = module
+ self._product = product
+ self._endpoints = {}
+
+ self._validate()
+ self._gen_provider_client()
+
+ @property
+ def module(self):
+ return self._module
+
+ def client(self, region, service_type, service_level):
+ c = self._project_client
+ if service_level == "domain":
+ c = self._domain_client
+
+ e = self._get_service_endpoint(c, service_type, region)
+
+ return _ServiceClient(c, e, self._product)
+
+ def _gen_provider_client(self):
+ m = self._module
+ p = {
+ "auth_url": m.params['identity_endpoint'],
+ "password": m.params['password'],
+ "username": m.params['user'],
+ "project_name": m.params['project'],
+ "user_domain_name": m.params['domain'],
+ "reauthenticate": True
+ }
+
+ self._project_client = Adapter(
+ session.Session(auth=v3.Password(**p)),
+ raise_exc=False)
+
+ p.pop("project_name")
+ self._domain_client = Adapter(
+ session.Session(auth=v3.Password(**p)),
+ raise_exc=False)
+
+ def _get_service_endpoint(self, client, service_type, region):
+ k = "%s.%s" % (service_type, region if region else "")
+
+ if k in self._endpoints:
+ return self._endpoints.get(k)
+
+ url = None
+ try:
+ url = client.get_endpoint(service_type=service_type,
+ region_name=region, interface="public")
+ except Exception as ex:
+ raise HwcClientException(
+ 0, "Getting endpoint failed, error=%s" % ex)
+
+ if url == "":
+ raise HwcClientException(
+ 0, "Can not find the enpoint for %s" % service_type)
+
+ if url[-1] != "/":
+ url += "/"
+
+ self._endpoints[k] = url
+ return url
+
+ def _validate(self):
+ if not HAS_THIRD_LIBRARIES:
+ self.module.fail_json(
+ msg=missing_required_lib('keystoneauth1'),
+ exception=THIRD_LIBRARIES_IMP_ERR)
+
+
+class HwcModule(AnsibleModule):
+ def __init__(self, *args, **kwargs):
+ arg_spec = kwargs.setdefault('argument_spec', {})
+
+ arg_spec.update(
+ dict(
+ identity_endpoint=dict(
+ required=True, type='str',
+ fallback=(env_fallback, ['ANSIBLE_HWC_IDENTITY_ENDPOINT']),
+ ),
+ user=dict(
+ required=True, type='str',
+ fallback=(env_fallback, ['ANSIBLE_HWC_USER']),
+ ),
+ password=dict(
+ required=True, type='str', no_log=True,
+ fallback=(env_fallback, ['ANSIBLE_HWC_PASSWORD']),
+ ),
+ domain=dict(
+ required=True, type='str',
+ fallback=(env_fallback, ['ANSIBLE_HWC_DOMAIN']),
+ ),
+ project=dict(
+ required=True, type='str',
+ fallback=(env_fallback, ['ANSIBLE_HWC_PROJECT']),
+ ),
+ region=dict(
+ type='str',
+ fallback=(env_fallback, ['ANSIBLE_HWC_REGION']),
+ ),
+ id=dict(type='str')
+ )
+ )
+
+ super(HwcModule, self).__init__(*args, **kwargs)
+
+
+class _DictComparison(object):
+ ''' This class takes in two dictionaries `a` and `b`.
+ These are dictionaries of arbitrary depth, but made up of standard
+ Python types only.
+ This differ will compare all values in `a` to those in `b`.
+ If value in `a` is None, always returns True, indicating
+ this value is no need to compare.
+ Note: On all lists, order does matter.
+ '''
+
+ def __init__(self, request):
+ self.request = request
+
+ def __eq__(self, other):
+ return self._compare_dicts(self.request, other.request)
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def _compare_dicts(self, dict1, dict2):
+ if dict1 is None:
+ return True
+
+ if set(dict1.keys()) != set(dict2.keys()):
+ return False
+
+ for k in dict1:
+ if not self._compare_value(dict1.get(k), dict2.get(k)):
+ return False
+
+ return True
+
+ def _compare_lists(self, list1, list2):
+ """Takes in two lists and compares them."""
+ if list1 is None:
+ return True
+
+ if len(list1) != len(list2):
+ return False
+
+ for i in range(len(list1)):
+ if not self._compare_value(list1[i], list2[i]):
+ return False
+
+ return True
+
+ def _compare_value(self, value1, value2):
+ """
+ return: True: value1 is same as value2, otherwise False.
+ """
+ if value1 is None:
+ return True
+
+ if not (value1 and value2):
+ return (not value1) and (not value2)
+
+ # Can assume non-None types at this point.
+ if isinstance(value1, list) and isinstance(value2, list):
+ return self._compare_lists(value1, value2)
+
+ elif isinstance(value1, dict) and isinstance(value2, dict):
+ return self._compare_dicts(value1, value2)
+
+ # Always use to_text values to avoid unicode issues.
+ return (to_text(value1, errors='surrogate_or_strict') == to_text(
+ value2, errors='surrogate_or_strict'))
+
+
+def wait_to_finish(target, pending, refresh, timeout, min_interval=1, delay=3):
+ is_last_time = False
+ not_found_times = 0
+ wait = 0
+
+ time.sleep(delay)
+
+ end = time.time() + timeout
+ while not is_last_time:
+ if time.time() > end:
+ is_last_time = True
+
+ obj, status = refresh()
+
+ if obj is None:
+ not_found_times += 1
+
+ if not_found_times > 10:
+ raise HwcModuleException(
+ "not found the object for %d times" % not_found_times)
+ else:
+ not_found_times = 0
+
+ if status in target:
+ return obj
+
+ if pending and status not in pending:
+ raise HwcModuleException(
+ "unexpect status(%s) occured" % status)
+
+ if not is_last_time:
+ wait *= 2
+ if wait < min_interval:
+ wait = min_interval
+ elif wait > 10:
+ wait = 10
+
+ time.sleep(wait)
+
+ raise HwcModuleException("asycn wait timeout after %d seconds" % timeout)
+
+
+def navigate_value(data, index, array_index=None):
+ if array_index and (not isinstance(array_index, dict)):
+ raise HwcModuleException("array_index must be dict")
+
+ d = data
+ for n in range(len(index)):
+ if d is None:
+ return None
+
+ if not isinstance(d, dict):
+ raise HwcModuleException(
+ "can't navigate value from a non-dict object")
+
+ i = index[n]
+ if i not in d:
+ raise HwcModuleException(
+ "navigate value failed: key(%s) is not exist in dict" % i)
+ d = d[i]
+
+ if not array_index:
+ continue
+
+ k = ".".join(index[: (n + 1)])
+ if k not in array_index:
+ continue
+
+ if d is None:
+ return None
+
+ if not isinstance(d, list):
+ raise HwcModuleException(
+ "can't navigate value from a non-list object")
+
+ j = array_index.get(k)
+ if j >= len(d):
+ raise HwcModuleException(
+ "navigate value failed: the index is out of list")
+ d = d[j]
+
+ return d
+
+
+def build_path(module, path, kv=None):
+ if kv is None:
+ kv = dict()
+
+ v = {}
+ for p in re.findall(r"{[^/]*}", path):
+ n = p[1:][:-1]
+
+ if n in kv:
+ v[n] = str(kv[n])
+
+ else:
+ if n in module.params:
+ v[n] = str(module.params.get(n))
+ else:
+ v[n] = ""
+
+ return path.format(**v)
+
+
+def get_region(module):
+ if module.params['region']:
+ return module.params['region']
+
+ return module.params['project'].split("_")[0]
+
+
+def is_empty_value(v):
+ return (not v)
+
+
+def are_different_dicts(dict1, dict2):
+ return _DictComparison(dict1) != _DictComparison(dict2)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/ibm_sa_utils.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/ibm_sa_utils.py
new file mode 100644
index 00000000..c3ab4103
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/ibm_sa_utils.py
@@ -0,0 +1,94 @@
+# Copyright (C) 2018 IBM CORPORATION
+# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import traceback
+
+from functools import wraps
+from ansible.module_utils._text import to_native
+from ansible.module_utils.basic import missing_required_lib
+
+PYXCLI_INSTALLED = True
+PYXCLI_IMP_ERR = None
+try:
+ from pyxcli import client, errors
+except ImportError:
+ PYXCLI_IMP_ERR = traceback.format_exc()
+ PYXCLI_INSTALLED = False
+
+AVAILABLE_PYXCLI_FIELDS = ['pool', 'size', 'snapshot_size',
+ 'domain', 'perf_class', 'vol',
+ 'iscsi_chap_name', 'iscsi_chap_secret',
+ 'cluster', 'host', 'lun', 'override',
+ 'fcaddress', 'iscsi_name', 'max_dms',
+ 'max_cgs', 'ldap_id', 'max_mirrors',
+ 'max_pools', 'max_volumes', 'hard_capacity',
+ 'soft_capacity']
+
+
+def xcli_wrapper(func):
+ """ Catch xcli errors and return a proper message"""
+ @wraps(func)
+ def wrapper(module, *args, **kwargs):
+ try:
+ return func(module, *args, **kwargs)
+ except errors.CommandExecutionError as e:
+ module.fail_json(msg=to_native(e))
+ return wrapper
+
+
+@xcli_wrapper
+def connect_ssl(module):
+ endpoints = module.params['endpoints']
+ username = module.params['username']
+ password = module.params['password']
+ if not (username and password and endpoints):
+ module.fail_json(
+ msg="Username, password or endpoints arguments "
+ "are missing from the module arguments")
+
+ try:
+ return client.XCLIClient.connect_multiendpoint_ssl(username,
+ password,
+ endpoints)
+ except errors.CommandFailedConnectionError as e:
+ module.fail_json(
+ msg="Connection with Spectrum Accelerate system has "
+ "failed: {[0]}.".format(to_native(e)))
+
+
+def spectrum_accelerate_spec():
+ """ Return arguments spec for AnsibleModule """
+ return dict(
+ endpoints=dict(required=True),
+ username=dict(required=True),
+ password=dict(no_log=True, required=True),
+ )
+
+
+@xcli_wrapper
+def execute_pyxcli_command(module, xcli_command, xcli_client):
+ pyxcli_args = build_pyxcli_command(module.params)
+ getattr(xcli_client.cmd, xcli_command)(**(pyxcli_args))
+ return True
+
+
+def build_pyxcli_command(fields):
+ """ Builds the args for pyxcli using the exact args from ansible"""
+ pyxcli_args = {}
+ for field in fields:
+ if not fields[field]:
+ continue
+ if field in AVAILABLE_PYXCLI_FIELDS and fields[field] != '':
+ pyxcli_args[field] = fields[field]
+ return pyxcli_args
+
+
+def is_pyxcli_installed(module):
+ if not PYXCLI_INSTALLED:
+ module.fail_json(msg=missing_required_lib('pyxcli'),
+ exception=PYXCLI_IMP_ERR)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/identity/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/identity/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/identity/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/identity/keycloak/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/identity/keycloak/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/identity/keycloak/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/identity/keycloak/keycloak.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/identity/keycloak/keycloak.py
new file mode 100644
index 00000000..5c57e755
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/identity/keycloak/keycloak.py
@@ -0,0 +1,482 @@
+# Copyright (c) 2017, Eike Frost <ei@kefro.st>
+#
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+import json
+
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+from ansible.module_utils._text import to_native
+
+URL_TOKEN = "{url}/realms/{realm}/protocol/openid-connect/token"
+URL_CLIENT = "{url}/admin/realms/{realm}/clients/{id}"
+URL_CLIENTS = "{url}/admin/realms/{realm}/clients"
+URL_CLIENT_ROLES = "{url}/admin/realms/{realm}/clients/{id}/roles"
+URL_REALM_ROLES = "{url}/admin/realms/{realm}/roles"
+
+URL_CLIENTTEMPLATE = "{url}/admin/realms/{realm}/client-templates/{id}"
+URL_CLIENTTEMPLATES = "{url}/admin/realms/{realm}/client-templates"
+URL_GROUPS = "{url}/admin/realms/{realm}/groups"
+URL_GROUP = "{url}/admin/realms/{realm}/groups/{groupid}"
+
+
+def keycloak_argument_spec():
+ """
+ Returns argument_spec of options common to keycloak_*-modules
+
+ :return: argument_spec dict
+ """
+ return dict(
+ auth_keycloak_url=dict(type='str', aliases=['url'], required=True),
+ auth_client_id=dict(type='str', default='admin-cli'),
+ auth_realm=dict(type='str', required=True),
+ auth_client_secret=dict(type='str', default=None, no_log=True),
+ auth_username=dict(type='str', aliases=['username'], required=True),
+ auth_password=dict(type='str', aliases=['password'], required=True, no_log=True),
+ validate_certs=dict(type='bool', default=True)
+ )
+
+
+def camel(words):
+ return words.split('_')[0] + ''.join(x.capitalize() or '_' for x in words.split('_')[1:])
+
+
+class KeycloakError(Exception):
+ pass
+
+
+def get_token(base_url, validate_certs, auth_realm, client_id,
+ auth_username, auth_password, client_secret):
+ if not base_url.lower().startswith(('http', 'https')):
+ raise KeycloakError("auth_url '%s' should either start with 'http' or 'https'." % base_url)
+ auth_url = URL_TOKEN.format(url=base_url, realm=auth_realm)
+ temp_payload = {
+ 'grant_type': 'password',
+ 'client_id': client_id,
+ 'client_secret': client_secret,
+ 'username': auth_username,
+ 'password': auth_password,
+ }
+ # Remove empty items, for instance missing client_secret
+ payload = dict(
+ (k, v) for k, v in temp_payload.items() if v is not None)
+ try:
+ r = json.loads(to_native(open_url(auth_url, method='POST',
+ validate_certs=validate_certs,
+ data=urlencode(payload)).read()))
+ except ValueError as e:
+ raise KeycloakError(
+ 'API returned invalid JSON when trying to obtain access token from %s: %s'
+ % (auth_url, str(e)))
+ except Exception as e:
+ raise KeycloakError('Could not obtain access token from %s: %s'
+ % (auth_url, str(e)))
+
+ try:
+ return {
+ 'Authorization': 'Bearer ' + r['access_token'],
+ 'Content-Type': 'application/json'
+ }
+ except KeyError:
+ raise KeycloakError(
+ 'Could not obtain access token from %s' % auth_url)
+
+
+class KeycloakAPI(object):
+ """ Keycloak API access; Keycloak uses OAuth 2.0 to protect its API, an access token for which
+ is obtained through OpenID connect
+ """
+ def __init__(self, module, connection_header):
+ self.module = module
+ self.baseurl = self.module.params.get('auth_keycloak_url')
+ self.validate_certs = self.module.params.get('validate_certs')
+ self.restheaders = connection_header
+
+ def get_clients(self, realm='master', filter=None):
+ """ Obtains client representations for clients in a realm
+
+ :param realm: realm to be queried
+ :param filter: if defined, only the client with clientId specified in the filter is returned
+ :return: list of dicts of client representations
+ """
+ clientlist_url = URL_CLIENTS.format(url=self.baseurl, realm=realm)
+ if filter is not None:
+ clientlist_url += '?clientId=%s' % filter
+
+ try:
+ return json.loads(to_native(open_url(clientlist_url, method='GET', headers=self.restheaders,
+ validate_certs=self.validate_certs).read()))
+ except ValueError as e:
+ self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of clients for realm %s: %s'
+ % (realm, str(e)))
+ except Exception as e:
+ self.module.fail_json(msg='Could not obtain list of clients for realm %s: %s'
+ % (realm, str(e)))
+
+ def get_client_by_clientid(self, client_id, realm='master'):
+ """ Get client representation by clientId
+ :param client_id: The clientId to be queried
+ :param realm: realm from which to obtain the client representation
+ :return: dict with a client representation or None if none matching exist
+ """
+ r = self.get_clients(realm=realm, filter=client_id)
+ if len(r) > 0:
+ return r[0]
+ else:
+ return None
+
+ def get_client_by_id(self, id, realm='master'):
+ """ Obtain client representation by id
+
+ :param id: id (not clientId) of client to be queried
+ :param realm: client from this realm
+ :return: dict of client representation or None if none matching exist
+ """
+ client_url = URL_CLIENT.format(url=self.baseurl, realm=realm, id=id)
+
+ try:
+ return json.loads(to_native(open_url(client_url, method='GET', headers=self.restheaders,
+ validate_certs=self.validate_certs).read()))
+
+ except HTTPError as e:
+ if e.code == 404:
+ return None
+ else:
+ self.module.fail_json(msg='Could not obtain client %s for realm %s: %s'
+ % (id, realm, str(e)))
+ except ValueError as e:
+ self.module.fail_json(msg='API returned incorrect JSON when trying to obtain client %s for realm %s: %s'
+ % (id, realm, str(e)))
+ except Exception as e:
+ self.module.fail_json(msg='Could not obtain client %s for realm %s: %s'
+ % (id, realm, str(e)))
+
+ def get_client_id(self, client_id, realm='master'):
+ """ Obtain id of client by client_id
+
+ :param client_id: client_id of client to be queried
+ :param realm: client template from this realm
+ :return: id of client (usually a UUID)
+ """
+ result = self.get_client_by_clientid(client_id, realm)
+ if isinstance(result, dict) and 'id' in result:
+ return result['id']
+ else:
+ return None
+
+ def update_client(self, id, clientrep, realm="master"):
+ """ Update an existing client
+ :param id: id (not clientId) of client to be updated in Keycloak
+ :param clientrep: corresponding (partial/full) client representation with updates
+ :param realm: realm the client is in
+ :return: HTTPResponse object on success
+ """
+ client_url = URL_CLIENT.format(url=self.baseurl, realm=realm, id=id)
+
+ try:
+ return open_url(client_url, method='PUT', headers=self.restheaders,
+ data=json.dumps(clientrep), validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg='Could not update client %s in realm %s: %s'
+ % (id, realm, str(e)))
+
+ def create_client(self, clientrep, realm="master"):
+ """ Create a client in keycloak
+ :param clientrep: Client representation of client to be created. Must at least contain field clientId
+ :param realm: realm for client to be created
+ :return: HTTPResponse object on success
+ """
+ client_url = URL_CLIENTS.format(url=self.baseurl, realm=realm)
+
+ try:
+ return open_url(client_url, method='POST', headers=self.restheaders,
+ data=json.dumps(clientrep), validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg='Could not create client %s in realm %s: %s'
+ % (clientrep['clientId'], realm, str(e)))
+
+ def delete_client(self, id, realm="master"):
+ """ Delete a client from Keycloak
+
+ :param id: id (not clientId) of client to be deleted
+ :param realm: realm of client to be deleted
+ :return: HTTPResponse object on success
+ """
+ client_url = URL_CLIENT.format(url=self.baseurl, realm=realm, id=id)
+
+ try:
+ return open_url(client_url, method='DELETE', headers=self.restheaders,
+ validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg='Could not delete client %s in realm %s: %s'
+ % (id, realm, str(e)))
+
+ def get_client_templates(self, realm='master'):
+ """ Obtains client template representations for client templates in a realm
+
+ :param realm: realm to be queried
+ :return: list of dicts of client representations
+ """
+ url = URL_CLIENTTEMPLATES.format(url=self.baseurl, realm=realm)
+
+ try:
+ return json.loads(to_native(open_url(url, method='GET', headers=self.restheaders,
+ validate_certs=self.validate_certs).read()))
+ except ValueError as e:
+ self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of client templates for realm %s: %s'
+ % (realm, str(e)))
+ except Exception as e:
+ self.module.fail_json(msg='Could not obtain list of client templates for realm %s: %s'
+ % (realm, str(e)))
+
+ def get_client_template_by_id(self, id, realm='master'):
+ """ Obtain client template representation by id
+
+ :param id: id (not name) of client template to be queried
+ :param realm: client template from this realm
+ :return: dict of client template representation or None if none matching exist
+ """
+ url = URL_CLIENTTEMPLATE.format(url=self.baseurl, id=id, realm=realm)
+
+ try:
+ return json.loads(to_native(open_url(url, method='GET', headers=self.restheaders,
+ validate_certs=self.validate_certs).read()))
+ except ValueError as e:
+ self.module.fail_json(msg='API returned incorrect JSON when trying to obtain client templates %s for realm %s: %s'
+ % (id, realm, str(e)))
+ except Exception as e:
+ self.module.fail_json(msg='Could not obtain client template %s for realm %s: %s'
+ % (id, realm, str(e)))
+
+ def get_client_template_by_name(self, name, realm='master'):
+ """ Obtain client template representation by name
+
+ :param name: name of client template to be queried
+ :param realm: client template from this realm
+ :return: dict of client template representation or None if none matching exist
+ """
+ result = self.get_client_templates(realm)
+ if isinstance(result, list):
+ result = [x for x in result if x['name'] == name]
+ if len(result) > 0:
+ return result[0]
+ return None
+
+ def get_client_template_id(self, name, realm='master'):
+ """ Obtain client template id by name
+
+ :param name: name of client template to be queried
+ :param realm: client template from this realm
+ :return: client template id (usually a UUID)
+ """
+ result = self.get_client_template_by_name(name, realm)
+ if isinstance(result, dict) and 'id' in result:
+ return result['id']
+ else:
+ return None
+
+ def update_client_template(self, id, clienttrep, realm="master"):
+ """ Update an existing client template
+ :param id: id (not name) of client template to be updated in Keycloak
+ :param clienttrep: corresponding (partial/full) client template representation with updates
+ :param realm: realm the client template is in
+ :return: HTTPResponse object on success
+ """
+ url = URL_CLIENTTEMPLATE.format(url=self.baseurl, realm=realm, id=id)
+
+ try:
+ return open_url(url, method='PUT', headers=self.restheaders,
+ data=json.dumps(clienttrep), validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg='Could not update client template %s in realm %s: %s'
+ % (id, realm, str(e)))
+
+ def create_client_template(self, clienttrep, realm="master"):
+ """ Create a client in keycloak
+ :param clienttrep: Client template representation of client template to be created. Must at least contain field name
+ :param realm: realm for client template to be created in
+ :return: HTTPResponse object on success
+ """
+ url = URL_CLIENTTEMPLATES.format(url=self.baseurl, realm=realm)
+
+ try:
+ return open_url(url, method='POST', headers=self.restheaders,
+ data=json.dumps(clienttrep), validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg='Could not create client template %s in realm %s: %s'
+ % (clienttrep['clientId'], realm, str(e)))
+
+ def delete_client_template(self, id, realm="master"):
+ """ Delete a client template from Keycloak
+
+ :param id: id (not name) of client to be deleted
+ :param realm: realm of client template to be deleted
+ :return: HTTPResponse object on success
+ """
+ url = URL_CLIENTTEMPLATE.format(url=self.baseurl, realm=realm, id=id)
+
+ try:
+ return open_url(url, method='DELETE', headers=self.restheaders,
+ validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg='Could not delete client template %s in realm %s: %s'
+ % (id, realm, str(e)))
+
+ def get_groups(self, realm="master"):
+ """ Fetch the name and ID of all groups on the Keycloak server.
+
+ To fetch the full data of the group, make a subsequent call to
+ get_group_by_groupid, passing in the ID of the group you wish to return.
+
+ :param realm: Return the groups of this realm (default "master").
+ """
+ groups_url = URL_GROUPS.format(url=self.baseurl, realm=realm)
+ try:
+ return json.loads(to_native(open_url(groups_url, method="GET", headers=self.restheaders,
+ validate_certs=self.validate_certs).read()))
+ except Exception as e:
+ self.module.fail_json(msg="Could not fetch list of groups in realm %s: %s"
+ % (realm, str(e)))
+
+ def get_group_by_groupid(self, gid, realm="master"):
+ """ Fetch a keycloak group from the provided realm using the group's unique ID.
+
+ If the group does not exist, None is returned.
+
+ gid is a UUID provided by the Keycloak API
+ :param gid: UUID of the group to be returned
+ :param realm: Realm in which the group resides; default 'master'.
+ """
+ groups_url = URL_GROUP.format(url=self.baseurl, realm=realm, groupid=gid)
+ try:
+ return json.loads(to_native(open_url(groups_url, method="GET", headers=self.restheaders,
+ validate_certs=self.validate_certs).read()))
+
+ except HTTPError as e:
+ if e.code == 404:
+ return None
+ else:
+ self.module.fail_json(msg="Could not fetch group %s in realm %s: %s"
+ % (gid, realm, str(e)))
+ except Exception as e:
+ self.module.fail_json(msg="Could not fetch group %s in realm %s: %s"
+ % (gid, realm, str(e)))
+
+ def get_group_by_name(self, name, realm="master"):
+ """ Fetch a keycloak group within a realm based on its name.
+
+ The Keycloak API does not allow filtering of the Groups resource by name.
+ As a result, this method first retrieves the entire list of groups - name and ID -
+ then performs a second query to fetch the group.
+
+ If the group does not exist, None is returned.
+ :param name: Name of the group to fetch.
+ :param realm: Realm in which the group resides; default 'master'
+ """
+ groups_url = URL_GROUPS.format(url=self.baseurl, realm=realm)
+ try:
+ all_groups = self.get_groups(realm=realm)
+
+ for group in all_groups:
+ if group['name'] == name:
+ return self.get_group_by_groupid(group['id'], realm=realm)
+
+ return None
+
+ except Exception as e:
+ self.module.fail_json(msg="Could not fetch group %s in realm %s: %s"
+ % (name, realm, str(e)))
+
+ def create_group(self, grouprep, realm="master"):
+ """ Create a Keycloak group.
+
+ :param grouprep: a GroupRepresentation of the group to be created. Must contain at minimum the field name.
+ :return: HTTPResponse object on success
+ """
+ groups_url = URL_GROUPS.format(url=self.baseurl, realm=realm)
+ try:
+ return open_url(groups_url, method='POST', headers=self.restheaders,
+ data=json.dumps(grouprep), validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg="Could not create group %s in realm %s: %s"
+ % (grouprep['name'], realm, str(e)))
+
+ def update_group(self, grouprep, realm="master"):
+ """ Update an existing group.
+
+ :param grouprep: A GroupRepresentation of the updated group.
+ :return HTTPResponse object on success
+ """
+ group_url = URL_GROUP.format(url=self.baseurl, realm=realm, groupid=grouprep['id'])
+
+ try:
+ return open_url(group_url, method='PUT', headers=self.restheaders,
+ data=json.dumps(grouprep), validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg='Could not update group %s in realm %s: %s'
+ % (grouprep['name'], realm, str(e)))
+
+ def delete_group(self, name=None, groupid=None, realm="master"):
+ """ Delete a group. One of name or groupid must be provided.
+
+ Providing the group ID is preferred as it avoids a second lookup to
+ convert a group name to an ID.
+
+ :param name: The name of the group. A lookup will be performed to retrieve the group ID.
+ :param groupid: The ID of the group (preferred to name).
+ :param realm: The realm in which this group resides, default "master".
+ """
+
+ if groupid is None and name is None:
+ # prefer an exception since this is almost certainly a programming error in the module itself.
+ raise Exception("Unable to delete group - one of group ID or name must be provided.")
+
+ # only lookup the name if groupid isn't provided.
+ # in the case that both are provided, prefer the ID, since it's one
+ # less lookup.
+ if groupid is None and name is not None:
+ for group in self.get_groups(realm=realm):
+ if group['name'] == name:
+ groupid = group['id']
+ break
+
+ # if the group doesn't exist - no problem, nothing to delete.
+ if groupid is None:
+ return None
+
+ # should have a good groupid by here.
+ group_url = URL_GROUP.format(realm=realm, groupid=groupid, url=self.baseurl)
+ try:
+ return open_url(group_url, method='DELETE', headers=self.restheaders,
+ validate_certs=self.validate_certs)
+
+ except Exception as e:
+ self.module.fail_json(msg="Unable to delete group %s: %s" % (groupid, str(e)))
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/influxdb.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/influxdb.py
new file mode 100644
index 00000000..92c78023
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/influxdb.py
@@ -0,0 +1,92 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import traceback
+
+from ansible.module_utils.basic import missing_required_lib
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests.exceptions
+ HAS_REQUESTS = True
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ HAS_REQUESTS = False
+
+INFLUXDB_IMP_ERR = None
+try:
+ from influxdb import InfluxDBClient
+ from influxdb import __version__ as influxdb_version
+ from influxdb import exceptions
+ HAS_INFLUXDB = True
+except ImportError:
+ INFLUXDB_IMP_ERR = traceback.format_exc()
+ HAS_INFLUXDB = False
+
+
+class InfluxDb():
+ def __init__(self, module):
+ self.module = module
+ self.params = self.module.params
+ self.check_lib()
+ self.hostname = self.params['hostname']
+ self.port = self.params['port']
+ self.path = self.params['path']
+ self.username = self.params['username']
+ self.password = self.params['password']
+ self.database_name = self.params.get('database_name')
+
+ def check_lib(self):
+ if not HAS_REQUESTS:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+
+ if not HAS_INFLUXDB:
+ self.module.fail_json(msg=missing_required_lib('influxdb'), exception=INFLUXDB_IMP_ERR)
+
+ @staticmethod
+ def influxdb_argument_spec():
+ return dict(
+ hostname=dict(type='str', default='localhost'),
+ port=dict(type='int', default=8086),
+ path=dict(type='str', default=''),
+ username=dict(type='str', default='root', aliases=['login_username']),
+ password=dict(type='str', default='root', no_log=True, aliases=['login_password']),
+ ssl=dict(type='bool', default=False),
+ validate_certs=dict(type='bool', default=True),
+ timeout=dict(type='int'),
+ retries=dict(type='int', default=3),
+ proxies=dict(type='dict', default={}),
+ use_udp=dict(type='bool', default=False),
+ udp_port=dict(type='int', default=4444),
+ )
+
+ def connect_to_influxdb(self):
+ args = dict(
+ host=self.hostname,
+ port=self.port,
+ username=self.username,
+ password=self.password,
+ database=self.database_name,
+ ssl=self.params['ssl'],
+ verify_ssl=self.params['validate_certs'],
+ timeout=self.params['timeout'],
+ use_udp=self.params['use_udp'],
+ udp_port=self.params['udp_port'],
+ proxies=self.params['proxies'],
+ )
+ influxdb_api_version = LooseVersion(influxdb_version)
+ if influxdb_api_version >= LooseVersion('4.1.0'):
+ # retries option is added in version 4.1.0
+ args.update(retries=self.params['retries'])
+
+ if influxdb_api_version >= LooseVersion('5.1.0'):
+ # path argument is added in version 5.1.0
+ args.update(path=self.path)
+
+ return InfluxDBClient(**args)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/ipa.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/ipa.py
new file mode 100644
index 00000000..9eb9f406
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/ipa.py
@@ -0,0 +1,213 @@
+# -*- coding: utf-8 -*-
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2016 Thomas Krahn (@Nosmoht)
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import socket
+import uuid
+
+import re
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.six import PY3
+from ansible.module_utils.six.moves.urllib.parse import quote
+from ansible.module_utils.urls import fetch_url, HAS_GSSAPI
+from ansible.module_utils.basic import env_fallback, AnsibleFallbackNotFound
+
+
+def _env_then_dns_fallback(*args, **kwargs):
+ ''' Load value from environment or DNS in that order'''
+ try:
+ result = env_fallback(*args, **kwargs)
+ if result == '':
+ raise AnsibleFallbackNotFound
+ except AnsibleFallbackNotFound:
+ # If no host was given, we try to guess it from IPA.
+ # The ipa-ca entry is a standard entry that IPA will have set for
+ # the CA.
+ try:
+ return socket.gethostbyaddr(socket.gethostbyname('ipa-ca'))[0]
+ except Exception:
+ raise AnsibleFallbackNotFound
+
+
+class IPAClient(object):
+ def __init__(self, module, host, port, protocol):
+ self.host = host
+ self.port = port
+ self.protocol = protocol
+ self.module = module
+ self.headers = None
+ self.timeout = module.params.get('ipa_timeout')
+ self.use_gssapi = False
+
+ def get_base_url(self):
+ return '%s://%s/ipa' % (self.protocol, self.host)
+
+ def get_json_url(self):
+ return '%s/session/json' % self.get_base_url()
+
+ def login(self, username, password):
+ if 'KRB5CCNAME' in os.environ and HAS_GSSAPI:
+ self.use_gssapi = True
+ elif 'KRB5_CLIENT_KTNAME' in os.environ and HAS_GSSAPI:
+ ccache = "MEMORY:" + str(uuid.uuid4())
+ os.environ['KRB5CCNAME'] = ccache
+ self.use_gssapi = True
+ else:
+ if not password:
+ if 'KRB5CCNAME' in os.environ or 'KRB5_CLIENT_KTNAME' in os.environ:
+ self.module.warn("In order to use GSSAPI, you need to install 'urllib_gssapi'")
+ self._fail('login', 'Password is required if not using '
+ 'GSSAPI. To use GSSAPI, please set the '
+ 'KRB5_CLIENT_KTNAME or KRB5CCNAME (or both) '
+ ' environment variables.')
+ url = '%s/session/login_password' % self.get_base_url()
+ data = 'user=%s&password=%s' % (quote(username, safe=''), quote(password, safe=''))
+ headers = {'referer': self.get_base_url(),
+ 'Content-Type': 'application/x-www-form-urlencoded',
+ 'Accept': 'text/plain'}
+ try:
+ resp, info = fetch_url(module=self.module, url=url, data=to_bytes(data), headers=headers, timeout=self.timeout)
+ status_code = info['status']
+ if status_code not in [200, 201, 204]:
+ self._fail('login', info['msg'])
+
+ self.headers = {'Cookie': info.get('set-cookie')}
+ except Exception as e:
+ self._fail('login', to_native(e))
+ if not self.headers:
+ self.headers = dict()
+ self.headers.update({
+ 'referer': self.get_base_url(),
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/json'})
+
+ def _fail(self, msg, e):
+ if 'message' in e:
+ err_string = e.get('message')
+ else:
+ err_string = e
+ self.module.fail_json(msg='%s: %s' % (msg, err_string))
+
+ def get_ipa_version(self):
+ response = self.ping()['summary']
+ ipa_ver_regex = re.compile(r'IPA server version (\d\.\d\.\d).*')
+ version_match = ipa_ver_regex.match(response)
+ ipa_version = None
+ if version_match:
+ ipa_version = version_match.groups()[0]
+ return ipa_version
+
+ def ping(self):
+ return self._post_json(method='ping', name=None)
+
+ def _post_json(self, method, name, item=None):
+ if item is None:
+ item = {}
+ url = '%s/session/json' % self.get_base_url()
+ data = dict(method=method)
+
+ # TODO: We should probably handle this a little better.
+ if method in ('ping', 'config_show'):
+ data['params'] = [[], {}]
+ elif method == 'config_mod':
+ data['params'] = [[], item]
+ else:
+ data['params'] = [[name], item]
+
+ try:
+ resp, info = fetch_url(module=self.module, url=url, data=to_bytes(json.dumps(data)),
+ headers=self.headers, timeout=self.timeout, use_gssapi=self.use_gssapi)
+ status_code = info['status']
+ if status_code not in [200, 201, 204]:
+ self._fail(method, info['msg'])
+ except Exception as e:
+ self._fail('post %s' % method, to_native(e))
+
+ if PY3:
+ charset = resp.headers.get_content_charset('latin-1')
+ else:
+ response_charset = resp.headers.getparam('charset')
+ if response_charset:
+ charset = response_charset
+ else:
+ charset = 'latin-1'
+ resp = json.loads(to_text(resp.read(), encoding=charset))
+ err = resp.get('error')
+ if err is not None:
+ self._fail('response %s' % method, err)
+
+ if 'result' in resp:
+ result = resp.get('result')
+ if 'result' in result:
+ result = result.get('result')
+ if isinstance(result, list):
+ if len(result) > 0:
+ return result[0]
+ else:
+ return {}
+ return result
+ return None
+
+ def get_diff(self, ipa_data, module_data):
+ result = []
+ for key in module_data.keys():
+ mod_value = module_data.get(key, None)
+ if isinstance(mod_value, list):
+ default = []
+ else:
+ default = None
+ ipa_value = ipa_data.get(key, default)
+ if isinstance(ipa_value, list) and not isinstance(mod_value, list):
+ mod_value = [mod_value]
+ if isinstance(ipa_value, list) and isinstance(mod_value, list):
+ mod_value = sorted(mod_value)
+ ipa_value = sorted(ipa_value)
+ if mod_value != ipa_value:
+ result.append(key)
+ return result
+
+ def modify_if_diff(self, name, ipa_list, module_list, add_method, remove_method, item=None):
+ changed = False
+ diff = list(set(ipa_list) - set(module_list))
+ if len(diff) > 0:
+ changed = True
+ if not self.module.check_mode:
+ if item:
+ remove_method(name=name, item={item: diff})
+ else:
+ remove_method(name=name, item=diff)
+
+ diff = list(set(module_list) - set(ipa_list))
+ if len(diff) > 0:
+ changed = True
+ if not self.module.check_mode:
+ if item:
+ add_method(name=name, item={item: diff})
+ else:
+ add_method(name=name, item=diff)
+
+ return changed
+
+
+def ipa_argument_spec():
+ return dict(
+ ipa_prot=dict(type='str', default='https', choices=['http', 'https'], fallback=(env_fallback, ['IPA_PROT'])),
+ ipa_host=dict(type='str', default='ipa.example.com', fallback=(_env_then_dns_fallback, ['IPA_HOST'])),
+ ipa_port=dict(type='int', default=443, fallback=(env_fallback, ['IPA_PORT'])),
+ ipa_user=dict(type='str', default='admin', fallback=(env_fallback, ['IPA_USER'])),
+ ipa_pass=dict(type='str', no_log=True, fallback=(env_fallback, ['IPA_PASS'])),
+ ipa_timeout=dict(type='int', default=10, fallback=(env_fallback, ['IPA_TIMEOUT'])),
+ validate_certs=dict(type='bool', default=True),
+ )
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/known_hosts.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/known_hosts.py
new file mode 100644
index 00000000..96f91ba8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/known_hosts.py
@@ -0,0 +1,180 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import hmac
+import re
+
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+
+try:
+ from hashlib import sha1
+except ImportError:
+ import sha as sha1
+
+HASHED_KEY_MAGIC = "|1|"
+
+
+def is_ssh_url(url):
+
+ """ check if url is ssh """
+
+ if "@" in url and "://" not in url:
+ return True
+ for scheme in "ssh://", "git+ssh://", "ssh+git://":
+ if url.startswith(scheme):
+ return True
+ return False
+
+
+def get_fqdn_and_port(repo_url):
+
+ """ chop the hostname and port out of a url """
+
+ fqdn = None
+ port = None
+ ipv6_re = re.compile(r'(\[[^]]*\])(?::([0-9]+))?')
+ if "@" in repo_url and "://" not in repo_url:
+ # most likely an user@host:path or user@host/path type URL
+ repo_url = repo_url.split("@", 1)[1]
+ match = ipv6_re.match(repo_url)
+ # For this type of URL, colon specifies the path, not the port
+ if match:
+ fqdn, path = match.groups()
+ elif ":" in repo_url:
+ fqdn = repo_url.split(":")[0]
+ elif "/" in repo_url:
+ fqdn = repo_url.split("/")[0]
+ elif "://" in repo_url:
+ # this should be something we can parse with urlparse
+ parts = urlparse(repo_url)
+ # parts[1] will be empty on python2.4 on ssh:// or git:// urls, so
+ # ensure we actually have a parts[1] before continuing.
+ if parts[1] != '':
+ fqdn = parts[1]
+ if "@" in fqdn:
+ fqdn = fqdn.split("@", 1)[1]
+ match = ipv6_re.match(fqdn)
+ if match:
+ fqdn, port = match.groups()
+ elif ":" in fqdn:
+ fqdn, port = fqdn.split(":")[0:2]
+ return fqdn, port
+
+
+def check_hostkey(module, fqdn):
+ return not not_in_host_file(module, fqdn)
+
+
+# this is a variant of code found in connection_plugins/paramiko.py and we should modify
+# the paramiko code to import and use this.
+
+def not_in_host_file(self, host):
+
+ if 'USER' in os.environ:
+ user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
+ else:
+ user_host_file = "~/.ssh/known_hosts"
+ user_host_file = os.path.expanduser(user_host_file)
+
+ host_file_list = []
+ host_file_list.append(user_host_file)
+ host_file_list.append("/etc/ssh/ssh_known_hosts")
+ host_file_list.append("/etc/ssh/ssh_known_hosts2")
+ host_file_list.append("/etc/openssh/ssh_known_hosts")
+
+ hfiles_not_found = 0
+ for hf in host_file_list:
+ if not os.path.exists(hf):
+ hfiles_not_found += 1
+ continue
+
+ try:
+ host_fh = open(hf)
+ except IOError:
+ hfiles_not_found += 1
+ continue
+ else:
+ data = host_fh.read()
+ host_fh.close()
+
+ for line in data.split("\n"):
+ if line is None or " " not in line:
+ continue
+ tokens = line.split()
+ if tokens[0].find(HASHED_KEY_MAGIC) == 0:
+ # this is a hashed known host entry
+ try:
+ (kn_salt, kn_host) = tokens[0][len(HASHED_KEY_MAGIC):].split("|", 2)
+ hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1)
+ hash.update(host)
+ if hash.digest() == kn_host.decode('base64'):
+ return False
+ except Exception:
+ # invalid hashed host key, skip it
+ continue
+ else:
+ # standard host file entry
+ if host in tokens[0]:
+ return False
+
+ return True
+
+
+def add_host_key(module, fqdn, port=22, key_type="rsa", create_dir=False):
+
+ """ use ssh-keyscan to add the hostkey """
+
+ keyscan_cmd = module.get_bin_path('ssh-keyscan', True)
+
+ if 'USER' in os.environ:
+ user_ssh_dir = os.path.expandvars("~${USER}/.ssh/")
+ user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
+ else:
+ user_ssh_dir = "~/.ssh/"
+ user_host_file = "~/.ssh/known_hosts"
+ user_ssh_dir = os.path.expanduser(user_ssh_dir)
+
+ if not os.path.exists(user_ssh_dir):
+ if create_dir:
+ try:
+ os.makedirs(user_ssh_dir, int('700', 8))
+ except Exception:
+ module.fail_json(msg="failed to create host key directory: %s" % user_ssh_dir)
+ else:
+ module.fail_json(msg="%s does not exist" % user_ssh_dir)
+ elif not os.path.isdir(user_ssh_dir):
+ module.fail_json(msg="%s is not a directory" % user_ssh_dir)
+
+ if port:
+ this_cmd = "%s -t %s -p %s %s" % (keyscan_cmd, key_type, port, fqdn)
+ else:
+ this_cmd = "%s -t %s %s" % (keyscan_cmd, key_type, fqdn)
+
+ rc, out, err = module.run_command(this_cmd)
+ # ssh-keyscan gives a 0 exit code and prints nothing on timeout
+ if rc != 0 or not out:
+ msg = 'failed to retrieve hostkey'
+ if not out:
+ msg += '. "%s" returned no matches.' % this_cmd
+ else:
+ msg += ' using command "%s". [stdout]: %s' % (this_cmd, out)
+
+ if err:
+ msg += ' [stderr]: %s' % err
+
+ module.fail_json(msg=msg)
+
+ module.append_to_file(user_host_file, out)
+
+ return rc, out, err
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/kubevirt.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/kubevirt.py
new file mode 100644
index 00000000..90d8d9ee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/kubevirt.py
@@ -0,0 +1,465 @@
+# -*- coding: utf-8 -*-
+#
+
+# Copyright (c) 2018, KubeVirt Team <@kubevirt>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from collections import defaultdict
+from distutils.version import Version
+
+from ansible.module_utils.common import dict_transformations
+from ansible.module_utils.common._collections_compat import Sequence
+from ansible_collections.community.kubernetes.plugins.module_utils.common import list_dict_str
+from ansible_collections.community.kubernetes.plugins.module_utils.raw import KubernetesRawModule
+
+import copy
+import re
+
+MAX_SUPPORTED_API_VERSION = 'v1alpha3'
+API_GROUP = 'kubevirt.io'
+
+
+# Put all args that (can) modify 'spec:' here:
+VM_SPEC_DEF_ARG_SPEC = {
+ 'resource_definition': {
+ 'type': 'dict',
+ 'aliases': ['definition', 'inline']
+ },
+ 'memory': {'type': 'str'},
+ 'memory_limit': {'type': 'str'},
+ 'cpu_cores': {'type': 'int'},
+ 'disks': {'type': 'list'},
+ 'labels': {'type': 'dict'},
+ 'interfaces': {'type': 'list'},
+ 'machine_type': {'type': 'str'},
+ 'cloud_init_nocloud': {'type': 'dict'},
+ 'bootloader': {'type': 'str'},
+ 'smbios_uuid': {'type': 'str'},
+ 'cpu_model': {'type': 'str'},
+ 'headless': {'type': 'str'},
+ 'hugepage_size': {'type': 'str'},
+ 'tablets': {'type': 'list'},
+ 'cpu_limit': {'type': 'int'},
+ 'cpu_shares': {'type': 'int'},
+ 'cpu_features': {'type': 'list'},
+ 'affinity': {'type': 'dict'},
+ 'anti_affinity': {'type': 'dict'},
+ 'node_affinity': {'type': 'dict'},
+}
+# And other common args go here:
+VM_COMMON_ARG_SPEC = {
+ 'name': {'required': True},
+ 'namespace': {'required': True},
+ 'hostname': {'type': 'str'},
+ 'subdomain': {'type': 'str'},
+ 'state': {
+ 'default': 'present',
+ 'choices': ['present', 'absent'],
+ },
+ 'force': {
+ 'type': 'bool',
+ 'default': False,
+ },
+ 'merge_type': {'type': 'list', 'choices': ['json', 'merge', 'strategic-merge']},
+ 'wait': {'type': 'bool', 'default': True},
+ 'wait_timeout': {'type': 'int', 'default': 120},
+ 'wait_sleep': {'type': 'int', 'default': 5},
+}
+VM_COMMON_ARG_SPEC.update(VM_SPEC_DEF_ARG_SPEC)
+
+
+def virtdict():
+ """
+ This function create dictionary, with defaults to dictionary.
+ """
+ return defaultdict(virtdict)
+
+
+class KubeAPIVersion(Version):
+ component_re = re.compile(r'(\d+ | [a-z]+)', re.VERBOSE)
+
+ def __init__(self, vstring=None):
+ if vstring:
+ self.parse(vstring)
+
+ def parse(self, vstring):
+ self.vstring = vstring
+ components = [x for x in self.component_re.split(vstring) if x]
+ for i, obj in enumerate(components):
+ try:
+ components[i] = int(obj)
+ except ValueError:
+ pass
+
+ errmsg = "version '{0}' does not conform to kubernetes api versioning guidelines".format(vstring)
+ c = components
+
+ if len(c) not in (2, 4) or c[0] != 'v' or not isinstance(c[1], int):
+ raise ValueError(errmsg)
+ if len(c) == 4 and (c[2] not in ('alpha', 'beta') or not isinstance(c[3], int)):
+ raise ValueError(errmsg)
+
+ self.version = components
+
+ def __str__(self):
+ return self.vstring
+
+ def __repr__(self):
+ return "KubeAPIVersion ('{0}')".format(str(self))
+
+ def _cmp(self, other):
+ if isinstance(other, str):
+ other = KubeAPIVersion(other)
+
+ myver = self.version
+ otherver = other.version
+
+ for ver in myver, otherver:
+ if len(ver) == 2:
+ ver.extend(['zeta', 9999])
+
+ if myver == otherver:
+ return 0
+ if myver < otherver:
+ return -1
+ if myver > otherver:
+ return 1
+
+ # python2 compatibility
+ def __cmp__(self, other):
+ return self._cmp(other)
+
+
+class KubeVirtRawModule(KubernetesRawModule):
+ def __init__(self, *args, **kwargs):
+ super(KubeVirtRawModule, self).__init__(*args, **kwargs)
+
+ @staticmethod
+ def merge_dicts(base_dict, merging_dicts):
+ """This function merges a base dictionary with one or more other dictionaries.
+ The base dictionary takes precedence when there is a key collision.
+ merging_dicts can be a dict or a list or tuple of dicts. In the latter case, the
+ dictionaries at the front of the list have higher precedence over the ones at the end.
+ """
+ if not merging_dicts:
+ merging_dicts = ({},)
+
+ if not isinstance(merging_dicts, Sequence):
+ merging_dicts = (merging_dicts,)
+
+ new_dict = {}
+ for d in reversed(merging_dicts):
+ new_dict = dict_transformations.dict_merge(new_dict, d)
+
+ new_dict = dict_transformations.dict_merge(new_dict, base_dict)
+
+ return new_dict
+
+ def get_resource(self, resource):
+ try:
+ existing = resource.get(name=self.name, namespace=self.namespace)
+ except Exception:
+ existing = None
+
+ return existing
+
+ def _define_datavolumes(self, datavolumes, spec):
+ """
+ Takes datavoulmes parameter of Ansible and create kubevirt API datavolumesTemplateSpec
+ structure from it
+ """
+ if not datavolumes:
+ return
+
+ spec['dataVolumeTemplates'] = []
+ for dv in datavolumes:
+ # Add datavolume to datavolumetemplates spec:
+ dvt = virtdict()
+ dvt['metadata']['name'] = dv.get('name')
+ dvt['spec']['pvc'] = {
+ 'accessModes': dv.get('pvc').get('accessModes'),
+ 'resources': {
+ 'requests': {
+ 'storage': dv.get('pvc').get('storage'),
+ }
+ }
+ }
+ dvt['spec']['source'] = dv.get('source')
+ spec['dataVolumeTemplates'].append(dvt)
+
+ # Add datavolume to disks spec:
+ if not spec['template']['spec']['domain']['devices']['disks']:
+ spec['template']['spec']['domain']['devices']['disks'] = []
+
+ spec['template']['spec']['domain']['devices']['disks'].append(
+ {
+ 'name': dv.get('name'),
+ 'disk': dv.get('disk', {'bus': 'virtio'}),
+ }
+ )
+
+ # Add datavolume to volumes spec:
+ if not spec['template']['spec']['volumes']:
+ spec['template']['spec']['volumes'] = []
+
+ spec['template']['spec']['volumes'].append(
+ {
+ 'dataVolume': {
+ 'name': dv.get('name')
+ },
+ 'name': dv.get('name'),
+ }
+ )
+
+ def _define_cloud_init(self, cloud_init_nocloud, template_spec):
+ """
+ Takes the user's cloud_init_nocloud parameter and fill it in kubevirt
+ API strucuture. The name for disk is hardcoded to ansiblecloudinitdisk.
+ """
+ if cloud_init_nocloud:
+ if not template_spec['volumes']:
+ template_spec['volumes'] = []
+ if not template_spec['domain']['devices']['disks']:
+ template_spec['domain']['devices']['disks'] = []
+
+ template_spec['volumes'].append({'name': 'ansiblecloudinitdisk', 'cloudInitNoCloud': cloud_init_nocloud})
+ template_spec['domain']['devices']['disks'].append({
+ 'name': 'ansiblecloudinitdisk',
+ 'disk': {'bus': 'virtio'},
+ })
+
+ def _define_interfaces(self, interfaces, template_spec, defaults):
+ """
+ Takes interfaces parameter of Ansible and create kubevirt API interfaces
+ and networks strucutre out from it.
+ """
+ if not interfaces and defaults and 'interfaces' in defaults:
+ interfaces = copy.deepcopy(defaults['interfaces'])
+ for d in interfaces:
+ d['network'] = defaults['networks'][0]
+
+ if interfaces:
+ # Extract interfaces k8s specification from interfaces list passed to Ansible:
+ spec_interfaces = []
+ for i in interfaces:
+ spec_interfaces.append(
+ self.merge_dicts(dict((k, v) for k, v in i.items() if k != 'network'), defaults['interfaces'])
+ )
+ if 'interfaces' not in template_spec['domain']['devices']:
+ template_spec['domain']['devices']['interfaces'] = []
+ template_spec['domain']['devices']['interfaces'].extend(spec_interfaces)
+
+ # Extract networks k8s specification from interfaces list passed to Ansible:
+ spec_networks = []
+ for i in interfaces:
+ net = i['network']
+ net['name'] = i['name']
+ spec_networks.append(self.merge_dicts(net, defaults['networks']))
+ if 'networks' not in template_spec:
+ template_spec['networks'] = []
+ template_spec['networks'].extend(spec_networks)
+
+ def _define_disks(self, disks, template_spec, defaults):
+ """
+ Takes disks parameter of Ansible and create kubevirt API disks and
+ volumes strucutre out from it.
+ """
+ if not disks and defaults and 'disks' in defaults:
+ disks = copy.deepcopy(defaults['disks'])
+ for d in disks:
+ d['volume'] = defaults['volumes'][0]
+
+ if disks:
+ # Extract k8s specification from disks list passed to Ansible:
+ spec_disks = []
+ for d in disks:
+ spec_disks.append(
+ self.merge_dicts(dict((k, v) for k, v in d.items() if k != 'volume'), defaults['disks'])
+ )
+ if 'disks' not in template_spec['domain']['devices']:
+ template_spec['domain']['devices']['disks'] = []
+ template_spec['domain']['devices']['disks'].extend(spec_disks)
+
+ # Extract volumes k8s specification from disks list passed to Ansible:
+ spec_volumes = []
+ for d in disks:
+ volume = d['volume']
+ volume['name'] = d['name']
+ spec_volumes.append(self.merge_dicts(volume, defaults['volumes']))
+ if 'volumes' not in template_spec:
+ template_spec['volumes'] = []
+ template_spec['volumes'].extend(spec_volumes)
+
+ def find_supported_resource(self, kind):
+ results = self.client.resources.search(kind=kind, group=API_GROUP)
+ if not results:
+ self.fail('Failed to find resource {0} in {1}'.format(kind, API_GROUP))
+ sr = sorted(results, key=lambda r: KubeAPIVersion(r.api_version), reverse=True)
+ for r in sr:
+ if KubeAPIVersion(r.api_version) <= KubeAPIVersion(MAX_SUPPORTED_API_VERSION):
+ return r
+ self.fail("API versions {0} are too recent. Max supported is {1}/{2}.".format(
+ str([r.api_version for r in sr]), API_GROUP, MAX_SUPPORTED_API_VERSION))
+
+ def _construct_vm_definition(self, kind, definition, template, params, defaults=None):
+ self.client = self.get_api_client()
+
+ disks = params.get('disks', [])
+ memory = params.get('memory')
+ memory_limit = params.get('memory_limit')
+ cpu_cores = params.get('cpu_cores')
+ cpu_model = params.get('cpu_model')
+ cpu_features = params.get('cpu_features')
+ labels = params.get('labels')
+ datavolumes = params.get('datavolumes')
+ interfaces = params.get('interfaces')
+ bootloader = params.get('bootloader')
+ cloud_init_nocloud = params.get('cloud_init_nocloud')
+ machine_type = params.get('machine_type')
+ headless = params.get('headless')
+ smbios_uuid = params.get('smbios_uuid')
+ hugepage_size = params.get('hugepage_size')
+ tablets = params.get('tablets')
+ cpu_shares = params.get('cpu_shares')
+ cpu_limit = params.get('cpu_limit')
+ node_affinity = params.get('node_affinity')
+ vm_affinity = params.get('affinity')
+ vm_anti_affinity = params.get('anti_affinity')
+ hostname = params.get('hostname')
+ subdomain = params.get('subdomain')
+ template_spec = template['spec']
+
+ # Merge additional flat parameters:
+ if memory:
+ template_spec['domain']['resources']['requests']['memory'] = memory
+
+ if cpu_shares:
+ template_spec['domain']['resources']['requests']['cpu'] = cpu_shares
+
+ if cpu_limit:
+ template_spec['domain']['resources']['limits']['cpu'] = cpu_limit
+
+ if tablets:
+ for tablet in tablets:
+ tablet['type'] = 'tablet'
+ template_spec['domain']['devices']['inputs'] = tablets
+
+ if memory_limit:
+ template_spec['domain']['resources']['limits']['memory'] = memory_limit
+
+ if hugepage_size is not None:
+ template_spec['domain']['memory']['hugepages']['pageSize'] = hugepage_size
+
+ if cpu_features is not None:
+ template_spec['domain']['cpu']['features'] = cpu_features
+
+ if cpu_cores is not None:
+ template_spec['domain']['cpu']['cores'] = cpu_cores
+
+ if cpu_model:
+ template_spec['domain']['cpu']['model'] = cpu_model
+
+ if labels:
+ template['metadata']['labels'] = self.merge_dicts(labels, template['metadata']['labels'])
+
+ if machine_type:
+ template_spec['domain']['machine']['type'] = machine_type
+
+ if bootloader:
+ template_spec['domain']['firmware']['bootloader'] = {bootloader: {}}
+
+ if smbios_uuid:
+ template_spec['domain']['firmware']['uuid'] = smbios_uuid
+
+ if headless is not None:
+ template_spec['domain']['devices']['autoattachGraphicsDevice'] = not headless
+
+ if vm_affinity or vm_anti_affinity:
+ vms_affinity = vm_affinity or vm_anti_affinity
+ affinity_name = 'podAffinity' if vm_affinity else 'podAntiAffinity'
+ for affinity in vms_affinity.get('soft', []):
+ if not template_spec['affinity'][affinity_name]['preferredDuringSchedulingIgnoredDuringExecution']:
+ template_spec['affinity'][affinity_name]['preferredDuringSchedulingIgnoredDuringExecution'] = []
+ template_spec['affinity'][affinity_name]['preferredDuringSchedulingIgnoredDuringExecution'].append({
+ 'weight': affinity.get('weight'),
+ 'podAffinityTerm': {
+ 'labelSelector': {
+ 'matchExpressions': affinity.get('term').get('match_expressions'),
+ },
+ 'topologyKey': affinity.get('topology_key'),
+ },
+ })
+ for affinity in vms_affinity.get('hard', []):
+ if not template_spec['affinity'][affinity_name]['requiredDuringSchedulingIgnoredDuringExecution']:
+ template_spec['affinity'][affinity_name]['requiredDuringSchedulingIgnoredDuringExecution'] = []
+ template_spec['affinity'][affinity_name]['requiredDuringSchedulingIgnoredDuringExecution'].append({
+ 'labelSelector': {
+ 'matchExpressions': affinity.get('term').get('match_expressions'),
+ },
+ 'topologyKey': affinity.get('topology_key'),
+ })
+
+ if node_affinity:
+ for affinity in node_affinity.get('soft', []):
+ if not template_spec['affinity']['nodeAffinity']['preferredDuringSchedulingIgnoredDuringExecution']:
+ template_spec['affinity']['nodeAffinity']['preferredDuringSchedulingIgnoredDuringExecution'] = []
+ template_spec['affinity']['nodeAffinity']['preferredDuringSchedulingIgnoredDuringExecution'].append({
+ 'weight': affinity.get('weight'),
+ 'preference': {
+ 'matchExpressions': affinity.get('term').get('match_expressions'),
+ }
+ })
+ for affinity in node_affinity.get('hard', []):
+ if not template_spec['affinity']['nodeAffinity']['requiredDuringSchedulingIgnoredDuringExecution']['nodeSelectorTerms']:
+ template_spec['affinity']['nodeAffinity']['requiredDuringSchedulingIgnoredDuringExecution']['nodeSelectorTerms'] = []
+ template_spec['affinity']['nodeAffinity']['requiredDuringSchedulingIgnoredDuringExecution']['nodeSelectorTerms'].append({
+ 'matchExpressions': affinity.get('term').get('match_expressions'),
+ })
+
+ if hostname:
+ template_spec['hostname'] = hostname
+
+ if subdomain:
+ template_spec['subdomain'] = subdomain
+
+ # Define disks
+ self._define_disks(disks, template_spec, defaults)
+
+ # Define cloud init disk if defined:
+ # Note, that this must be called after _define_disks, so the cloud_init
+ # is not first in order and it's not used as boot disk:
+ self._define_cloud_init(cloud_init_nocloud, template_spec)
+
+ # Define interfaces:
+ self._define_interfaces(interfaces, template_spec, defaults)
+
+ # Define datavolumes:
+ self._define_datavolumes(datavolumes, definition['spec'])
+
+ return self.merge_dicts(definition, self.resource_definitions[0])
+
+ def construct_vm_definition(self, kind, definition, template, defaults=None):
+ definition = self._construct_vm_definition(kind, definition, template, self.params, defaults)
+ resource = self.find_supported_resource(kind)
+ definition = self.set_defaults(resource, definition)
+ return resource, definition
+
+ def construct_vm_template_definition(self, kind, definition, template, params):
+ definition = self._construct_vm_definition(kind, definition, template, params)
+ resource = self.find_resource(kind, definition['apiVersion'], fail=True)
+
+ # Set defaults:
+ definition['kind'] = kind
+ definition['metadata']['name'] = params.get('name')
+ definition['metadata']['namespace'] = params.get('namespace')
+
+ return resource, definition
+
+ def execute_crud(self, kind, definition):
+ """ Module execution """
+ resource = self.find_supported_resource(kind)
+ definition = self.set_defaults(resource, definition)
+ return self.perform_action(resource, definition)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/ldap.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/ldap.py
new file mode 100644
index 00000000..d49d0a97
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/ldap.py
@@ -0,0 +1,78 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Peter Sagerson <psagers@ignorare.net>
+# Copyright: (c) 2016, Jiri Tyr <jiri.tyr@gmail.com>
+# Copyright: (c) 2017-2018 Keller Fuchs (@KellerFuchs) <kellerfuchs@hashbang.sh>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import traceback
+from ansible.module_utils._text import to_native
+
+try:
+ import ldap
+ import ldap.sasl
+
+ HAS_LDAP = True
+except ImportError:
+ HAS_LDAP = False
+
+
+def gen_specs(**specs):
+ specs.update({
+ 'bind_dn': dict(),
+ 'bind_pw': dict(default='', no_log=True),
+ 'dn': dict(required=True),
+ 'server_uri': dict(default='ldapi:///'),
+ 'start_tls': dict(default=False, type='bool'),
+ 'validate_certs': dict(default=True, type='bool'),
+ })
+
+ return specs
+
+
+class LdapGeneric(object):
+ def __init__(self, module):
+ # Shortcuts
+ self.module = module
+ self.bind_dn = self.module.params['bind_dn']
+ self.bind_pw = self.module.params['bind_pw']
+ self.dn = self.module.params['dn']
+ self.server_uri = self.module.params['server_uri']
+ self.start_tls = self.module.params['start_tls']
+ self.verify_cert = self.module.params['validate_certs']
+
+ # Establish connection
+ self.connection = self._connect_to_ldap()
+
+ def fail(self, msg, exn):
+ self.module.fail_json(
+ msg=msg,
+ details=to_native(exn),
+ exception=traceback.format_exc()
+ )
+
+ def _connect_to_ldap(self):
+ if not self.verify_cert:
+ ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
+
+ connection = ldap.initialize(self.server_uri)
+
+ if self.start_tls:
+ try:
+ connection.start_tls_s()
+ except ldap.LDAPError as e:
+ self.fail("Cannot start TLS.", e)
+
+ try:
+ if self.bind_dn is not None:
+ connection.simple_bind_s(self.bind_dn, self.bind_pw)
+ else:
+ connection.sasl_interactive_bind_s('', ldap.sasl.external())
+ except ldap.LDAPError as e:
+ self.fail("Cannot bind to the server.", e)
+
+ return connection
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/linode.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/linode.py
new file mode 100644
index 00000000..53d546db
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/linode.py
@@ -0,0 +1,21 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Luke Murphy @decentral1se
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def get_user_agent(module):
+ """Retrieve a user-agent to send with LinodeClient requests."""
+ try:
+ from ansible.module_utils.ansible_release import __version__ as ansible_version
+ except ImportError:
+ ansible_version = 'unknown'
+ return 'Ansible-%s/%s' % (module, ansible_version)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/lxd.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/lxd.py
new file mode 100644
index 00000000..e835a6ab
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/lxd.py
@@ -0,0 +1,129 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Hiroaki Nakamura <hnakamur@gmail.com>
+#
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import socket
+import ssl
+
+from ansible.module_utils.urls import generic_urlparse
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+from ansible.module_utils.six.moves import http_client
+from ansible.module_utils._text import to_text
+
+# httplib/http.client connection using unix domain socket
+HTTPConnection = http_client.HTTPConnection
+HTTPSConnection = http_client.HTTPSConnection
+
+import json
+
+
+class UnixHTTPConnection(HTTPConnection):
+ def __init__(self, path):
+ HTTPConnection.__init__(self, 'localhost')
+ self.path = path
+
+ def connect(self):
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ sock.connect(self.path)
+ self.sock = sock
+
+
+class LXDClientException(Exception):
+ def __init__(self, msg, **kwargs):
+ self.msg = msg
+ self.kwargs = kwargs
+
+
+class LXDClient(object):
+ def __init__(self, url, key_file=None, cert_file=None, debug=False):
+ """LXD Client.
+
+ :param url: The URL of the LXD server. (e.g. unix:/var/lib/lxd/unix.socket or https://127.0.0.1)
+ :type url: ``str``
+ :param key_file: The path of the client certificate key file.
+ :type key_file: ``str``
+ :param cert_file: The path of the client certificate file.
+ :type cert_file: ``str``
+ :param debug: The debug flag. The request and response are stored in logs when debug is true.
+ :type debug: ``bool``
+ """
+ self.url = url
+ self.debug = debug
+ self.logs = []
+ if url.startswith('https:'):
+ self.cert_file = cert_file
+ self.key_file = key_file
+ parts = generic_urlparse(urlparse(self.url))
+ ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
+ ctx.load_cert_chain(cert_file, keyfile=key_file)
+ self.connection = HTTPSConnection(parts.get('netloc'), context=ctx)
+ elif url.startswith('unix:'):
+ unix_socket_path = url[len('unix:'):]
+ self.connection = UnixHTTPConnection(unix_socket_path)
+ else:
+ raise LXDClientException('URL scheme must be unix: or https:')
+
+ def do(self, method, url, body_json=None, ok_error_codes=None, timeout=None):
+ resp_json = self._send_request(method, url, body_json=body_json, ok_error_codes=ok_error_codes, timeout=timeout)
+ if resp_json['type'] == 'async':
+ url = '{0}/wait'.format(resp_json['operation'])
+ resp_json = self._send_request('GET', url)
+ if resp_json['metadata']['status'] != 'Success':
+ self._raise_err_from_json(resp_json)
+ return resp_json
+
+ def authenticate(self, trust_password):
+ body_json = {'type': 'client', 'password': trust_password}
+ return self._send_request('POST', '/1.0/certificates', body_json=body_json)
+
+ def _send_request(self, method, url, body_json=None, ok_error_codes=None, timeout=None):
+ try:
+ body = json.dumps(body_json)
+ self.connection.request(method, url, body=body)
+ resp = self.connection.getresponse()
+ resp_data = resp.read()
+ resp_data = to_text(resp_data, errors='surrogate_or_strict')
+ resp_json = json.loads(resp_data)
+ self.logs.append({
+ 'type': 'sent request',
+ 'request': {'method': method, 'url': url, 'json': body_json, 'timeout': timeout},
+ 'response': {'json': resp_json}
+ })
+ resp_type = resp_json.get('type', None)
+ if resp_type == 'error':
+ if ok_error_codes is not None and resp_json['error_code'] in ok_error_codes:
+ return resp_json
+ if resp_json['error'] == "Certificate already in trust store":
+ return resp_json
+ self._raise_err_from_json(resp_json)
+ return resp_json
+ except socket.error as e:
+ raise LXDClientException('cannot connect to the LXD server', err=e)
+
+ def _raise_err_from_json(self, resp_json):
+ err_params = {}
+ if self.debug:
+ err_params['logs'] = self.logs
+ raise LXDClientException(self._get_err_from_resp_json(resp_json), **err_params)
+
+ @staticmethod
+ def _get_err_from_resp_json(resp_json):
+ err = None
+ metadata = resp_json.get('metadata', None)
+ if metadata is not None:
+ err = metadata.get('err', None)
+ if err is None:
+ err = resp_json.get('error', None)
+ return err
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/manageiq.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/manageiq.py
new file mode 100644
index 00000000..7038fac8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/manageiq.py
@@ -0,0 +1,156 @@
+#
+# Copyright (c) 2017, Daniel Korn <korndaniel1@gmail.com>
+#
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import os
+import traceback
+
+from ansible.module_utils.basic import missing_required_lib
+
+CLIENT_IMP_ERR = None
+try:
+ from manageiq_client.api import ManageIQClient
+ HAS_CLIENT = True
+except ImportError:
+ CLIENT_IMP_ERR = traceback.format_exc()
+ HAS_CLIENT = False
+
+
+def manageiq_argument_spec():
+ options = dict(
+ url=dict(default=os.environ.get('MIQ_URL', None)),
+ username=dict(default=os.environ.get('MIQ_USERNAME', None)),
+ password=dict(default=os.environ.get('MIQ_PASSWORD', None), no_log=True),
+ token=dict(default=os.environ.get('MIQ_TOKEN', None), no_log=True),
+ validate_certs=dict(default=True, type='bool', aliases=['verify_ssl']),
+ ca_cert=dict(required=False, default=None, aliases=['ca_bundle_path']),
+ )
+
+ return dict(
+ manageiq_connection=dict(type='dict',
+ apply_defaults=True,
+ options=options),
+ )
+
+
+def check_client(module):
+ if not HAS_CLIENT:
+ module.fail_json(msg=missing_required_lib('manageiq-client'), exception=CLIENT_IMP_ERR)
+
+
+def validate_connection_params(module):
+ params = module.params['manageiq_connection']
+ error_str = "missing required argument: manageiq_connection[{}]"
+ url = params['url']
+ token = params['token']
+ username = params['username']
+ password = params['password']
+
+ if (url and username and password) or (url and token):
+ return params
+ for arg in ['url', 'username', 'password']:
+ if params[arg] in (None, ''):
+ module.fail_json(msg=error_str.format(arg))
+
+
+def manageiq_entities():
+ return {
+ 'provider': 'providers', 'host': 'hosts', 'vm': 'vms',
+ 'category': 'categories', 'cluster': 'clusters', 'data store': 'data_stores',
+ 'group': 'groups', 'resource pool': 'resource_pools', 'service': 'services',
+ 'service template': 'service_templates', 'template': 'templates',
+ 'tenant': 'tenants', 'user': 'users', 'blueprint': 'blueprints'
+ }
+
+
+class ManageIQ(object):
+ """
+ class encapsulating ManageIQ API client.
+ """
+
+ def __init__(self, module):
+ # handle import errors
+ check_client(module)
+
+ params = validate_connection_params(module)
+
+ url = params['url']
+ username = params['username']
+ password = params['password']
+ token = params['token']
+ verify_ssl = params['validate_certs']
+ ca_bundle_path = params['ca_cert']
+
+ self._module = module
+ self._api_url = url + '/api'
+ self._auth = dict(user=username, password=password, token=token)
+ try:
+ self._client = ManageIQClient(self._api_url, self._auth, verify_ssl=verify_ssl, ca_bundle_path=ca_bundle_path)
+ except Exception as e:
+ self.module.fail_json(msg="failed to open connection (%s): %s" % (url, str(e)))
+
+ @property
+ def module(self):
+ """ Ansible module module
+
+ Returns:
+ the ansible module
+ """
+ return self._module
+
+ @property
+ def api_url(self):
+ """ Base ManageIQ API
+
+ Returns:
+ the base ManageIQ API
+ """
+ return self._api_url
+
+ @property
+ def client(self):
+ """ ManageIQ client
+
+ Returns:
+ the ManageIQ client
+ """
+ return self._client
+
+ def find_collection_resource_by(self, collection_name, **params):
+ """ Searches the collection resource by the collection name and the param passed.
+
+ Returns:
+ the resource as an object if it exists in manageiq, None otherwise.
+ """
+ try:
+ entity = self.client.collections.__getattribute__(collection_name).get(**params)
+ except ValueError:
+ return None
+ except Exception as e:
+ self.module.fail_json(msg="failed to find resource {error}".format(error=e))
+ return vars(entity)
+
+ def find_collection_resource_or_fail(self, collection_name, **params):
+ """ Searches the collection resource by the collection name and the param passed.
+
+ Returns:
+ the resource as an object if it exists in manageiq, Fail otherwise.
+ """
+ resource = self.find_collection_resource_by(collection_name, **params)
+ if resource:
+ return resource
+ else:
+ msg = "{collection_name} where {params} does not exist in manageiq".format(
+ collection_name=collection_name, params=str(params))
+ self.module.fail_json(msg=msg)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/memset.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/memset.py
new file mode 100644
index 00000000..357fded5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/memset.py
@@ -0,0 +1,137 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2018, Simon Weald <ansible@simonweald.com>
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import open_url, urllib_error
+from ansible.module_utils.basic import json
+
+
+class Response(object):
+ '''
+ Create a response object to mimic that of requests.
+ '''
+
+ def __init__(self):
+ self.content = None
+ self.status_code = None
+
+ def json(self):
+ return json.loads(self.content)
+
+
+def memset_api_call(api_key, api_method, payload=None):
+ '''
+ Generic function which returns results back to calling function.
+
+ Requires an API key and an API method to assemble the API URL.
+ Returns response text to be analysed.
+ '''
+ # instantiate a response object
+ response = Response()
+
+ # if we've already started preloading the payload then copy it
+ # and use that, otherwise we need to isntantiate it.
+ if payload is None:
+ payload = dict()
+ else:
+ payload = payload.copy()
+
+ # set some sane defaults
+ has_failed = False
+ msg = None
+
+ data = urlencode(payload)
+ headers = {'Content-Type': 'application/x-www-form-urlencoded'}
+ api_uri_base = 'https://api.memset.com/v1/json/'
+ api_uri = '{0}{1}/' . format(api_uri_base, api_method)
+
+ try:
+ resp = open_url(api_uri, data=data, headers=headers, method="POST", force_basic_auth=True, url_username=api_key)
+ response.content = resp.read().decode('utf-8')
+ response.status_code = resp.getcode()
+ except urllib_error.HTTPError as e:
+ try:
+ errorcode = e.code
+ except AttributeError:
+ errorcode = None
+
+ has_failed = True
+ response.content = e.read().decode('utf8')
+ response.status_code = errorcode
+
+ if response.status_code is not None:
+ msg = "Memset API returned a {0} response ({1}, {2})." . format(response.status_code, response.json()['error_type'], response.json()['error'])
+ else:
+ msg = "Memset API returned an error ({0}, {1})." . format(response.json()['error_type'], response.json()['error'])
+
+ if msg is None:
+ msg = response.json()
+
+ return(has_failed, msg, response)
+
+
+def check_zone_domain(data, domain):
+ '''
+ Returns true if domain already exists, and false if not.
+ '''
+ exists = False
+
+ if data.status_code in [201, 200]:
+ for zone_domain in data.json():
+ if zone_domain['domain'] == domain:
+ exists = True
+
+ return(exists)
+
+
+def check_zone(data, name):
+ '''
+ Returns true if zone already exists, and false if not.
+ '''
+ counter = 0
+ exists = False
+
+ if data.status_code in [201, 200]:
+ for zone in data.json():
+ if zone['nickname'] == name:
+ counter += 1
+ if counter == 1:
+ exists = True
+
+ return(exists, counter)
+
+
+def get_zone_id(zone_name, current_zones):
+ '''
+ Returns the zone's id if it exists and is unique
+ '''
+ zone_exists = False
+ zone_id, msg = None, None
+ zone_list = []
+
+ for zone in current_zones:
+ if zone['nickname'] == zone_name:
+ zone_list.append(zone['id'])
+
+ counter = len(zone_list)
+
+ if counter == 0:
+ msg = 'No matching zone found'
+ elif counter == 1:
+ zone_id = zone_list[0]
+ zone_exists = True
+ elif counter > 1:
+ zone_id = None
+ msg = 'Zone ID could not be returned as duplicate zone names were detected'
+
+ return(zone_exists, msg, counter, zone_id)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/module_helper.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/module_helper.py
new file mode 100644
index 00000000..0e52db7b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/module_helper.py
@@ -0,0 +1,302 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from functools import partial, wraps
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class ArgFormat(object):
+ """
+ Argument formatter
+ """
+ BOOLEAN = 0
+ PRINTF = 1
+ FORMAT = 2
+
+ @staticmethod
+ def stars_deco(num):
+ if num == 1:
+ def deco(f):
+ return lambda v: f(*v)
+ return deco
+ elif num == 2:
+ def deco(f):
+ return lambda v: f(**v)
+ return deco
+
+ return lambda f: f
+
+ def __init__(self, name, fmt=None, style=FORMAT, stars=0):
+ """
+ Creates a new formatter
+ :param name: Name of the argument to be formatted
+ :param fmt: Either a str to be formatted (using or not printf-style) or a callable that does that
+ :param style: Whether arg_format (as str) should use printf-style formatting.
+ Ignored if arg_format is None or not a str (should be callable).
+ :param stars: A int with 0, 1 or 2 value, indicating to formatting the value as: value, *value or **value
+ """
+ def printf_fmt(_fmt, v):
+ try:
+ return [_fmt % v]
+ except TypeError as e:
+ if e.args[0] != 'not all arguments converted during string formatting':
+ raise
+ return [_fmt]
+
+ _fmts = {
+ ArgFormat.BOOLEAN: lambda _fmt, v: ([_fmt] if bool(v) else []),
+ ArgFormat.PRINTF: printf_fmt,
+ ArgFormat.FORMAT: lambda _fmt, v: [_fmt.format(v)],
+ }
+
+ self.name = name
+ self.stars = stars
+
+ if fmt is None:
+ fmt = "{0}"
+ style = ArgFormat.FORMAT
+
+ if isinstance(fmt, str):
+ func = _fmts[style]
+ self.arg_format = partial(func, fmt)
+ elif isinstance(fmt, list) or isinstance(fmt, tuple):
+ self.arg_format = lambda v: [_fmts[style](f, v)[0] for f in fmt]
+ elif hasattr(fmt, '__call__'):
+ self.arg_format = fmt
+ else:
+ raise TypeError('Parameter fmt must be either: a string, a list/tuple of '
+ 'strings or a function: type={0}, value={1}'.format(type(fmt), fmt))
+
+ if stars:
+ self.arg_format = (self.stars_deco(stars))(self.arg_format)
+
+ def to_text(self, value):
+ func = self.arg_format
+ return [str(p) for p in func(value)]
+
+
+def cause_changes(func, on_success=True, on_failure=False):
+ @wraps(func)
+ def wrapper(self, *args, **kwargs):
+ try:
+ func(*args, **kwargs)
+ if on_success:
+ self.changed = True
+ except Exception as e:
+ if on_failure:
+ self.changed = True
+ raise
+ return wrapper
+
+
+def module_fails_on_exception(func):
+ @wraps(func)
+ def wrapper(self, *args, **kwargs):
+ try:
+ func(self, *args, **kwargs)
+ except SystemExit:
+ raise
+ except Exception as e:
+ self.vars.msg = "Module failed with exception: {0}".format(str(e).strip())
+ self.vars.exception = traceback.format_exc()
+ self.module.fail_json(changed=False, msg=self.vars.msg, exception=self.vars.exception, output=self.output, vars=self.vars)
+ return wrapper
+
+
+class DependencyCtxMgr(object):
+ def __init__(self, name, msg=None):
+ self.name = name
+ self.msg = msg
+ self.has_it = False
+ self.exc_type = None
+ self.exc_val = None
+ self.exc_tb = None
+
+ def __enter__(self):
+ pass
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.has_it = exc_type is None
+ self.exc_type = exc_type
+ self.exc_val = exc_val
+ self.exc_tb = exc_tb
+ return not self.has_it
+
+ @property
+ def text(self):
+ return self.msg or str(self.exc_val)
+
+
+class ModuleHelper(object):
+ _dependencies = []
+ module = {}
+ facts_name = None
+
+ class AttrDict(dict):
+ def __getattr__(self, item):
+ return self[item]
+
+ def __init__(self, module=None):
+ self.vars = ModuleHelper.AttrDict()
+ self.output_dict = dict()
+ self.facts_dict = dict()
+ self._changed = False
+
+ if module:
+ self.module = module
+
+ if not isinstance(module, AnsibleModule):
+ self.module = AnsibleModule(**self.module)
+
+ def update_output(self, **kwargs):
+ if kwargs:
+ self.output_dict.update(kwargs)
+
+ def update_facts(self, **kwargs):
+ if kwargs:
+ self.facts_dict.update(kwargs)
+
+ def __init_module__(self):
+ pass
+
+ def __run__(self):
+ raise NotImplementedError()
+
+ @property
+ def changed(self):
+ return self._changed
+
+ @changed.setter
+ def changed(self, value):
+ self._changed = value
+
+ @property
+ def output(self):
+ result = dict(self.vars)
+ result.update(self.output_dict)
+ if self.facts_name:
+ result['ansible_facts'] = {self.facts_name: self.facts_dict}
+ return result
+
+ @module_fails_on_exception
+ def run(self):
+ self.fail_on_missing_deps()
+ self.__init_module__()
+ self.__run__()
+ self.module.exit_json(changed=self.changed, **self.output_dict)
+
+ @classmethod
+ def dependency(cls, name, msg):
+ cls._dependencies.append(DependencyCtxMgr(name, msg))
+ return cls._dependencies[-1]
+
+ def fail_on_missing_deps(self):
+ for d in self._dependencies:
+ if not d.has_it:
+ self.module.fail_json(changed=False,
+ exception=d.exc_val.__traceback__.format_exc(),
+ msg=d.text,
+ **self.output_dict)
+
+
+class StateMixin(object):
+ state_param = 'state'
+ default_state = None
+
+ def _state(self):
+ state = self.module.params.get(self.state_param)
+ return self.default_state if state is None else state
+
+ def __run__(self):
+ state = self._state()
+ self.vars.state = state
+
+ # resolve aliases
+ if state not in self.module.params:
+ aliased = [name for name, param in self.module.argument_spec.items() if state in param.get('aliases', [])]
+ if aliased:
+ state = aliased[0]
+ self.vars.effective_state = state
+
+ method = "state_{0}".format(state)
+ if not hasattr(self, method):
+ return self.__state_fallback__()
+ func = getattr(self, method)
+ return func()
+
+ def __state_fallback__(self):
+ raise ValueError("Cannot find method for state: {0}".format(self._state()))
+
+
+class CmdMixin(object):
+ """
+ Mixin for mapping module options to running a CLI command with its arguments.
+ """
+ command = None
+ command_args_formats = dict()
+ check_rc = False
+ force_lang = "C"
+
+ @property
+ def module_formats(self):
+ result = {}
+ for param in self.module.params.keys():
+ result[param] = ArgFormat(param)
+ return result
+
+ @property
+ def custom_formats(self):
+ result = {}
+ for param, fmt_spec in self.command_args_formats.items():
+ result[param] = ArgFormat(param, **fmt_spec)
+ return result
+
+ def _calculate_args(self, extra_params=None, params=None):
+ def add_arg_formatted_param(_cmd_args, arg_format, _value):
+ args = [x for x in arg_format.to_text(_value)]
+ return _cmd_args + args
+
+ def find_format(_param):
+ return self.custom_formats.get(_param, self.module_formats.get(_param))
+
+ extra_params = extra_params or dict()
+ cmd_args = [self.module.get_bin_path(self.command)]
+ param_list = params if params else self.module.params.keys()
+
+ for param in param_list:
+ if param in self.module.argument_spec:
+ if param not in self.module.params:
+ continue
+ fmt = find_format(param)
+ value = self.module.params[param]
+ else:
+ if param not in extra_params:
+ continue
+ fmt = find_format(param)
+ value = extra_params[param]
+ self.cmd_args = cmd_args
+ cmd_args = add_arg_formatted_param(cmd_args, fmt, value)
+
+ return cmd_args
+
+ def process_command_output(self, rc, out, err):
+ return rc, out, err
+
+ def run_command(self, extra_params=None, params=None, *args, **kwargs):
+ self.vars['cmd_args'] = self._calculate_args(extra_params, params)
+ env_update = kwargs.get('environ_update', {})
+ check_rc = kwargs.get('check_rc', self.check_rc)
+ if self.force_lang:
+ env_update.update({'LANGUAGE': self.force_lang})
+ self.update_output(force_lang=self.force_lang)
+ rc, out, err = self.module.run_command(self.vars['cmd_args'],
+ environ_update=env_update,
+ check_rc=check_rc, *args, **kwargs)
+ self.update_output(rc=rc, stdout=out, stderr=err)
+ return self.process_command_output(rc, out, err)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/net_tools/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/net_tools/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/net_tools/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/net_tools/nios/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/net_tools/nios/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/net_tools/nios/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/net_tools/nios/api.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/net_tools/nios/api.py
new file mode 100644
index 00000000..6cded8e1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/net_tools/nios/api.py
@@ -0,0 +1,590 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# (c) 2018 Red Hat Inc.
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import os
+from functools import partial
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six import iteritems
+from ansible.module_utils._text import to_text
+from ansible.module_utils.basic import env_fallback
+
+try:
+ from infoblox_client.connector import Connector
+ from infoblox_client.exceptions import InfobloxException
+ HAS_INFOBLOX_CLIENT = True
+except ImportError:
+ HAS_INFOBLOX_CLIENT = False
+
+# defining nios constants
+NIOS_DNS_VIEW = 'view'
+NIOS_NETWORK_VIEW = 'networkview'
+NIOS_HOST_RECORD = 'record:host'
+NIOS_IPV4_NETWORK = 'network'
+NIOS_IPV6_NETWORK = 'ipv6network'
+NIOS_ZONE = 'zone_auth'
+NIOS_PTR_RECORD = 'record:ptr'
+NIOS_A_RECORD = 'record:a'
+NIOS_AAAA_RECORD = 'record:aaaa'
+NIOS_CNAME_RECORD = 'record:cname'
+NIOS_MX_RECORD = 'record:mx'
+NIOS_SRV_RECORD = 'record:srv'
+NIOS_NAPTR_RECORD = 'record:naptr'
+NIOS_TXT_RECORD = 'record:txt'
+NIOS_NSGROUP = 'nsgroup'
+NIOS_IPV4_FIXED_ADDRESS = 'fixedaddress'
+NIOS_IPV6_FIXED_ADDRESS = 'ipv6fixedaddress'
+NIOS_NEXT_AVAILABLE_IP = 'func:nextavailableip'
+NIOS_IPV4_NETWORK_CONTAINER = 'networkcontainer'
+NIOS_IPV6_NETWORK_CONTAINER = 'ipv6networkcontainer'
+NIOS_MEMBER = 'member'
+
+NIOS_PROVIDER_SPEC = {
+ 'host': dict(fallback=(env_fallback, ['INFOBLOX_HOST'])),
+ 'username': dict(fallback=(env_fallback, ['INFOBLOX_USERNAME'])),
+ 'password': dict(fallback=(env_fallback, ['INFOBLOX_PASSWORD']), no_log=True),
+ 'validate_certs': dict(type='bool', default=False, fallback=(env_fallback, ['INFOBLOX_SSL_VERIFY']), aliases=['ssl_verify']),
+ 'silent_ssl_warnings': dict(type='bool', default=True),
+ 'http_request_timeout': dict(type='int', default=10, fallback=(env_fallback, ['INFOBLOX_HTTP_REQUEST_TIMEOUT'])),
+ 'http_pool_connections': dict(type='int', default=10),
+ 'http_pool_maxsize': dict(type='int', default=10),
+ 'max_retries': dict(type='int', default=3, fallback=(env_fallback, ['INFOBLOX_MAX_RETRIES'])),
+ 'wapi_version': dict(default='2.1', fallback=(env_fallback, ['INFOBLOX_WAP_VERSION'])),
+ 'max_results': dict(type='int', default=1000, fallback=(env_fallback, ['INFOBLOX_MAX_RETRIES']))
+}
+
+
+def get_connector(*args, **kwargs):
+ ''' Returns an instance of infoblox_client.connector.Connector
+ :params args: positional arguments are silently ignored
+ :params kwargs: dict that is passed to Connector init
+ :returns: Connector
+ '''
+ if not HAS_INFOBLOX_CLIENT:
+ raise Exception('infoblox-client is required but does not appear '
+ 'to be installed. It can be installed using the '
+ 'command `pip install infoblox-client`')
+
+ if not set(kwargs.keys()).issubset(list(NIOS_PROVIDER_SPEC.keys()) + ['ssl_verify']):
+ raise Exception('invalid or unsupported keyword argument for connector')
+ for key, value in iteritems(NIOS_PROVIDER_SPEC):
+ if key not in kwargs:
+ # apply default values from NIOS_PROVIDER_SPEC since we cannot just
+ # assume the provider values are coming from AnsibleModule
+ if 'default' in value:
+ kwargs[key] = value['default']
+
+ # override any values with env variables unless they were
+ # explicitly set
+ env = ('INFOBLOX_%s' % key).upper()
+ if env in os.environ:
+ kwargs[key] = os.environ.get(env)
+
+ if 'validate_certs' in kwargs.keys():
+ kwargs['ssl_verify'] = kwargs['validate_certs']
+ kwargs.pop('validate_certs', None)
+
+ return Connector(kwargs)
+
+
+def normalize_extattrs(value):
+ ''' Normalize extattrs field to expected format
+ The module accepts extattrs as key/value pairs. This method will
+ transform the key/value pairs into a structure suitable for
+ sending across WAPI in the format of:
+ extattrs: {
+ key: {
+ value: <value>
+ }
+ }
+ '''
+ return dict([(k, {'value': v}) for k, v in iteritems(value)])
+
+
+def flatten_extattrs(value):
+ ''' Flatten the key/value struct for extattrs
+ WAPI returns extattrs field as a dict in form of:
+ extattrs: {
+ key: {
+ value: <value>
+ }
+ }
+ This method will flatten the structure to:
+ extattrs: {
+ key: value
+ }
+ '''
+ return dict([(k, v['value']) for k, v in iteritems(value)])
+
+
+def member_normalize(member_spec):
+ ''' Transforms the member module arguments into a valid WAPI struct
+ This function will transform the arguments into a structure that
+ is a valid WAPI structure in the format of:
+ {
+ key: <value>,
+ }
+ It will remove any arguments that are set to None since WAPI will error on
+ that condition.
+ The remainder of the value validation is performed by WAPI
+ Some parameters in ib_spec are passed as a list in order to pass the validation for elements.
+ In this function, they are converted to dictionary.
+ '''
+ member_elements = ['vip_setting', 'ipv6_setting', 'lan2_port_setting', 'mgmt_port_setting',
+ 'pre_provisioning', 'network_setting', 'v6_network_setting',
+ 'ha_port_setting', 'lan_port_setting', 'lan2_physical_setting',
+ 'lan_ha_port_setting', 'mgmt_network_setting', 'v6_mgmt_network_setting']
+ for key in list(member_spec.keys()):
+ if key in member_elements and member_spec[key] is not None:
+ member_spec[key] = member_spec[key][0]
+ if isinstance(member_spec[key], dict):
+ member_spec[key] = member_normalize(member_spec[key])
+ elif isinstance(member_spec[key], list):
+ for x in member_spec[key]:
+ if isinstance(x, dict):
+ x = member_normalize(x)
+ elif member_spec[key] is None:
+ del member_spec[key]
+ return member_spec
+
+
+class WapiBase(object):
+ ''' Base class for implementing Infoblox WAPI API '''
+ provider_spec = {'provider': dict(type='dict', options=NIOS_PROVIDER_SPEC)}
+
+ def __init__(self, provider):
+ self.connector = get_connector(**provider)
+
+ def __getattr__(self, name):
+ try:
+ return self.__dict__[name]
+ except KeyError:
+ if name.startswith('_'):
+ raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, name))
+ return partial(self._invoke_method, name)
+
+ def _invoke_method(self, name, *args, **kwargs):
+ try:
+ method = getattr(self.connector, name)
+ return method(*args, **kwargs)
+ except InfobloxException as exc:
+ if hasattr(self, 'handle_exception'):
+ self.handle_exception(name, exc)
+ else:
+ raise
+
+
+class WapiLookup(WapiBase):
+ ''' Implements WapiBase for lookup plugins '''
+ def handle_exception(self, method_name, exc):
+ if ('text' in exc.response):
+ raise Exception(exc.response['text'])
+ else:
+ raise Exception(exc)
+
+
+class WapiInventory(WapiBase):
+ ''' Implements WapiBase for dynamic inventory script '''
+ pass
+
+
+class WapiModule(WapiBase):
+ ''' Implements WapiBase for executing a NIOS module '''
+ def __init__(self, module):
+ self.module = module
+ provider = module.params['provider']
+ try:
+ super(WapiModule, self).__init__(provider)
+ except Exception as exc:
+ self.module.fail_json(msg=to_text(exc))
+
+ def handle_exception(self, method_name, exc):
+ ''' Handles any exceptions raised
+ This method will be called if an InfobloxException is raised for
+ any call to the instance of Connector and also, in case of generic
+ exception. This method will then gracefully fail the module.
+ :args exc: instance of InfobloxException
+ '''
+ if ('text' in exc.response):
+ self.module.fail_json(
+ msg=exc.response['text'],
+ type=exc.response['Error'].split(':')[0],
+ code=exc.response.get('code'),
+ operation=method_name
+ )
+ else:
+ self.module.fail_json(msg=to_native(exc))
+
+ def run(self, ib_obj_type, ib_spec):
+ ''' Runs the module and performans configuration tasks
+ :args ib_obj_type: the WAPI object type to operate against
+ :args ib_spec: the specification for the WAPI object as a dict
+ :returns: a results dict
+ '''
+
+ update = new_name = None
+ state = self.module.params['state']
+ if state not in ('present', 'absent'):
+ self.module.fail_json(msg='state must be one of `present`, `absent`, got `%s`' % state)
+
+ result = {'changed': False}
+
+ obj_filter = dict([(k, self.module.params[k]) for k, v in iteritems(ib_spec) if v.get('ib_req')])
+
+ # get object reference
+ ib_obj_ref, update, new_name = self.get_object_ref(self.module, ib_obj_type, obj_filter, ib_spec)
+ proposed_object = {}
+ for key, value in iteritems(ib_spec):
+ if self.module.params[key] is not None:
+ if 'transform' in value:
+ proposed_object[key] = value['transform'](self.module)
+ else:
+ proposed_object[key] = self.module.params[key]
+
+ # If configure_by_dns is set to False, then delete the default dns set in the param else throw exception
+ if not proposed_object.get('configure_for_dns') and proposed_object.get('view') == 'default'\
+ and ib_obj_type == NIOS_HOST_RECORD:
+ del proposed_object['view']
+ elif not proposed_object.get('configure_for_dns') and proposed_object.get('view') != 'default'\
+ and ib_obj_type == NIOS_HOST_RECORD:
+ self.module.fail_json(msg='DNS Bypass is not allowed if DNS view is set other than \'default\'')
+
+ if ib_obj_ref:
+ if len(ib_obj_ref) > 1:
+ for each in ib_obj_ref:
+ # To check for existing A_record with same name with input A_record by IP
+ if each.get('ipv4addr') and each.get('ipv4addr') == proposed_object.get('ipv4addr'):
+ current_object = each
+ # To check for existing Host_record with same name with input Host_record by IP
+ elif each.get('ipv4addrs')[0].get('ipv4addr') and each.get('ipv4addrs')[0].get('ipv4addr')\
+ == proposed_object.get('ipv4addrs')[0].get('ipv4addr'):
+ current_object = each
+ # Else set the current_object with input value
+ else:
+ current_object = obj_filter
+ ref = None
+ else:
+ current_object = ib_obj_ref[0]
+ if 'extattrs' in current_object:
+ current_object['extattrs'] = flatten_extattrs(current_object['extattrs'])
+ if current_object.get('_ref'):
+ ref = current_object.pop('_ref')
+ else:
+ current_object = obj_filter
+ ref = None
+ # checks if the object type is member to normalize the attributes being passed
+ if (ib_obj_type == NIOS_MEMBER):
+ proposed_object = member_normalize(proposed_object)
+
+ # checks if the name's field has been updated
+ if update and new_name:
+ proposed_object['name'] = new_name
+
+ check_remove = []
+ if (ib_obj_type == NIOS_HOST_RECORD):
+ # this check is for idempotency, as if the same ip address shall be passed
+ # add param will be removed, and same exists true for remove case as well.
+ if 'ipv4addrs' in [current_object and proposed_object]:
+ for each in current_object['ipv4addrs']:
+ if each['ipv4addr'] == proposed_object['ipv4addrs'][0]['ipv4addr']:
+ if 'add' in proposed_object['ipv4addrs'][0]:
+ del proposed_object['ipv4addrs'][0]['add']
+ break
+ check_remove += each.values()
+ if proposed_object['ipv4addrs'][0]['ipv4addr'] not in check_remove:
+ if 'remove' in proposed_object['ipv4addrs'][0]:
+ del proposed_object['ipv4addrs'][0]['remove']
+
+ res = None
+ modified = not self.compare_objects(current_object, proposed_object)
+ if 'extattrs' in proposed_object:
+ proposed_object['extattrs'] = normalize_extattrs(proposed_object['extattrs'])
+
+ # Checks if nios_next_ip param is passed in ipv4addrs/ipv4addr args
+ proposed_object = self.check_if_nios_next_ip_exists(proposed_object)
+
+ if state == 'present':
+ if ref is None:
+ if not self.module.check_mode:
+ self.create_object(ib_obj_type, proposed_object)
+ result['changed'] = True
+ # Check if NIOS_MEMBER and the flag to call function create_token is set
+ elif (ib_obj_type == NIOS_MEMBER) and (proposed_object['create_token']):
+ proposed_object = None
+ # the function creates a token that can be used by a pre-provisioned member to join the grid
+ result['api_results'] = self.call_func('create_token', ref, proposed_object)
+ result['changed'] = True
+ elif modified:
+ if 'ipv4addrs' in proposed_object:
+ if ('add' not in proposed_object['ipv4addrs'][0]) and ('remove' not in proposed_object['ipv4addrs'][0]):
+ self.check_if_recordname_exists(obj_filter, ib_obj_ref, ib_obj_type, current_object, proposed_object)
+
+ if (ib_obj_type in (NIOS_HOST_RECORD, NIOS_NETWORK_VIEW, NIOS_DNS_VIEW)):
+ run_update = True
+ proposed_object = self.on_update(proposed_object, ib_spec)
+ if 'ipv4addrs' in proposed_object:
+ if ('add' or 'remove') in proposed_object['ipv4addrs'][0]:
+ run_update, proposed_object = self.check_if_add_remove_ip_arg_exists(proposed_object)
+ if run_update:
+ res = self.update_object(ref, proposed_object)
+ result['changed'] = True
+ else:
+ res = ref
+ if (ib_obj_type in (NIOS_A_RECORD, NIOS_AAAA_RECORD, NIOS_PTR_RECORD, NIOS_SRV_RECORD)):
+ # popping 'view' key as update of 'view' is not supported with respect to a:record/aaaa:record/srv:record/ptr:record
+ proposed_object = self.on_update(proposed_object, ib_spec)
+ del proposed_object['view']
+ if not self.module.check_mode:
+ res = self.update_object(ref, proposed_object)
+ result['changed'] = True
+ elif 'network_view' in proposed_object:
+ proposed_object.pop('network_view')
+ result['changed'] = True
+ if not self.module.check_mode and res is None:
+ proposed_object = self.on_update(proposed_object, ib_spec)
+ self.update_object(ref, proposed_object)
+ result['changed'] = True
+
+ elif state == 'absent':
+ if ref is not None:
+ if 'ipv4addrs' in proposed_object:
+ if 'remove' in proposed_object['ipv4addrs'][0]:
+ self.check_if_add_remove_ip_arg_exists(proposed_object)
+ self.update_object(ref, proposed_object)
+ result['changed'] = True
+ elif not self.module.check_mode:
+ self.delete_object(ref)
+ result['changed'] = True
+
+ return result
+
+ def check_if_recordname_exists(self, obj_filter, ib_obj_ref, ib_obj_type, current_object, proposed_object):
+ ''' Send POST request if host record input name and retrieved ref name is same,
+ but input IP and retrieved IP is different'''
+
+ if 'name' in (obj_filter and ib_obj_ref[0]) and ib_obj_type == NIOS_HOST_RECORD:
+ obj_host_name = obj_filter['name']
+ ref_host_name = ib_obj_ref[0]['name']
+ if 'ipv4addrs' in (current_object and proposed_object):
+ current_ip_addr = current_object['ipv4addrs'][0]['ipv4addr']
+ proposed_ip_addr = proposed_object['ipv4addrs'][0]['ipv4addr']
+ elif 'ipv6addrs' in (current_object and proposed_object):
+ current_ip_addr = current_object['ipv6addrs'][0]['ipv6addr']
+ proposed_ip_addr = proposed_object['ipv6addrs'][0]['ipv6addr']
+
+ if obj_host_name == ref_host_name and current_ip_addr != proposed_ip_addr:
+ self.create_object(ib_obj_type, proposed_object)
+
+ def check_if_nios_next_ip_exists(self, proposed_object):
+ ''' Check if nios_next_ip argument is passed in ipaddr while creating
+ host record, if yes then format proposed object ipv4addrs and pass
+ func:nextavailableip and ipaddr range to create hostrecord with next
+ available ip in one call to avoid any race condition '''
+
+ if 'ipv4addrs' in proposed_object:
+ if 'nios_next_ip' in proposed_object['ipv4addrs'][0]['ipv4addr']:
+ ip_range = self.module._check_type_dict(proposed_object['ipv4addrs'][0]['ipv4addr'])['nios_next_ip']
+ proposed_object['ipv4addrs'][0]['ipv4addr'] = NIOS_NEXT_AVAILABLE_IP + ':' + ip_range
+ elif 'ipv4addr' in proposed_object:
+ if 'nios_next_ip' in proposed_object['ipv4addr']:
+ ip_range = self.module._check_type_dict(proposed_object['ipv4addr'])['nios_next_ip']
+ proposed_object['ipv4addr'] = NIOS_NEXT_AVAILABLE_IP + ':' + ip_range
+
+ return proposed_object
+
+ def check_if_add_remove_ip_arg_exists(self, proposed_object):
+ '''
+ This function shall check if add/remove param is set to true and
+ is passed in the args, then we will update the proposed dictionary
+ to add/remove IP to existing host_record, if the user passes false
+ param with the argument nothing shall be done.
+ :returns: True if param is changed based on add/remove, and also the
+ changed proposed_object.
+ '''
+ update = False
+ if 'add' in proposed_object['ipv4addrs'][0]:
+ if proposed_object['ipv4addrs'][0]['add']:
+ proposed_object['ipv4addrs+'] = proposed_object['ipv4addrs']
+ del proposed_object['ipv4addrs']
+ del proposed_object['ipv4addrs+'][0]['add']
+ update = True
+ else:
+ del proposed_object['ipv4addrs'][0]['add']
+ elif 'remove' in proposed_object['ipv4addrs'][0]:
+ if proposed_object['ipv4addrs'][0]['remove']:
+ proposed_object['ipv4addrs-'] = proposed_object['ipv4addrs']
+ del proposed_object['ipv4addrs']
+ del proposed_object['ipv4addrs-'][0]['remove']
+ update = True
+ else:
+ del proposed_object['ipv4addrs'][0]['remove']
+ return update, proposed_object
+
+ def issubset(self, item, objects):
+ ''' Checks if item is a subset of objects
+ :args item: the subset item to validate
+ :args objects: superset list of objects to validate against
+ :returns: True if item is a subset of one entry in objects otherwise
+ this method will return None
+ '''
+ for obj in objects:
+ if isinstance(item, dict):
+ if all(entry in obj.items() for entry in item.items()):
+ return True
+ else:
+ if item in obj:
+ return True
+
+ def compare_objects(self, current_object, proposed_object):
+ for key, proposed_item in iteritems(proposed_object):
+ current_item = current_object.get(key)
+
+ # if proposed has a key that current doesn't then the objects are
+ # not equal and False will be immediately returned
+ if current_item is None:
+ return False
+
+ elif isinstance(proposed_item, list):
+ if key == 'aliases':
+ if set(current_item) != set(proposed_item):
+ return False
+ for subitem in proposed_item:
+ if not self.issubset(subitem, current_item):
+ return False
+
+ elif isinstance(proposed_item, dict):
+ return self.compare_objects(current_item, proposed_item)
+
+ else:
+ if current_item != proposed_item:
+ return False
+
+ return True
+
+ def get_object_ref(self, module, ib_obj_type, obj_filter, ib_spec):
+ ''' this function gets the reference object of pre-existing nios objects '''
+
+ update = False
+ old_name = new_name = None
+ if ('name' in obj_filter):
+ # gets and returns the current object based on name/old_name passed
+ try:
+ name_obj = self.module._check_type_dict(obj_filter['name'])
+ old_name = name_obj['old_name']
+ new_name = name_obj['new_name']
+ except TypeError:
+ name = obj_filter['name']
+
+ if old_name and new_name:
+ if (ib_obj_type == NIOS_HOST_RECORD):
+ test_obj_filter = dict([('name', old_name), ('view', obj_filter['view'])])
+ elif (ib_obj_type in (NIOS_AAAA_RECORD, NIOS_A_RECORD)):
+ test_obj_filter = obj_filter
+ else:
+ test_obj_filter = dict([('name', old_name)])
+ # get the object reference
+ ib_obj = self.get_object(ib_obj_type, test_obj_filter, return_fields=ib_spec.keys())
+ if ib_obj:
+ obj_filter['name'] = new_name
+ else:
+ test_obj_filter['name'] = new_name
+ ib_obj = self.get_object(ib_obj_type, test_obj_filter, return_fields=ib_spec.keys())
+ update = True
+ return ib_obj, update, new_name
+ if (ib_obj_type == NIOS_HOST_RECORD):
+ # to check only by name if dns bypassing is set
+ if not obj_filter['configure_for_dns']:
+ test_obj_filter = dict([('name', name)])
+ else:
+ test_obj_filter = dict([('name', name), ('view', obj_filter['view'])])
+ elif (ib_obj_type == NIOS_IPV4_FIXED_ADDRESS or ib_obj_type == NIOS_IPV6_FIXED_ADDRESS and 'mac' in obj_filter):
+ test_obj_filter = dict([['mac', obj_filter['mac']]])
+ elif (ib_obj_type == NIOS_A_RECORD):
+ # resolves issue where a_record with uppercase name was returning null and was failing
+ test_obj_filter = obj_filter
+ test_obj_filter['name'] = test_obj_filter['name'].lower()
+ # resolves issue where multiple a_records with same name and different IP address
+ try:
+ ipaddr_obj = self.module._check_type_dict(obj_filter['ipv4addr'])
+ ipaddr = ipaddr_obj['old_ipv4addr']
+ except TypeError:
+ ipaddr = obj_filter['ipv4addr']
+ test_obj_filter['ipv4addr'] = ipaddr
+ elif (ib_obj_type == NIOS_TXT_RECORD):
+ # resolves issue where multiple txt_records with same name and different text
+ test_obj_filter = obj_filter
+ try:
+ text_obj = self.module._check_type_dict(obj_filter['text'])
+ txt = text_obj['old_text']
+ except TypeError:
+ txt = obj_filter['text']
+ test_obj_filter['text'] = txt
+ # check if test_obj_filter is empty copy passed obj_filter
+ else:
+ test_obj_filter = obj_filter
+ ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=ib_spec.keys())
+ elif (ib_obj_type == NIOS_A_RECORD):
+ # resolves issue where multiple a_records with same name and different IP address
+ test_obj_filter = obj_filter
+ try:
+ ipaddr_obj = self.module._check_type_dict(obj_filter['ipv4addr'])
+ ipaddr = ipaddr_obj['old_ipv4addr']
+ except TypeError:
+ ipaddr = obj_filter['ipv4addr']
+ test_obj_filter['ipv4addr'] = ipaddr
+ ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=ib_spec.keys())
+ elif (ib_obj_type == NIOS_TXT_RECORD):
+ # resolves issue where multiple txt_records with same name and different text
+ test_obj_filter = obj_filter
+ try:
+ text_obj = self.module._check_type_dict(obj_filter['text'])
+ txt = text_obj['old_text']
+ except TypeError:
+ txt = obj_filter['text']
+ test_obj_filter['text'] = txt
+ ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=ib_spec.keys())
+ elif (ib_obj_type == NIOS_ZONE):
+ # del key 'restart_if_needed' as nios_zone get_object fails with the key present
+ temp = ib_spec['restart_if_needed']
+ del ib_spec['restart_if_needed']
+ ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=ib_spec.keys())
+ # reinstate restart_if_needed if ib_obj is none, meaning there's no existing nios_zone ref
+ if not ib_obj:
+ ib_spec['restart_if_needed'] = temp
+ elif (ib_obj_type == NIOS_MEMBER):
+ # del key 'create_token' as nios_member get_object fails with the key present
+ temp = ib_spec['create_token']
+ del ib_spec['create_token']
+ ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=ib_spec.keys())
+ if temp:
+ # reinstate 'create_token' key
+ ib_spec['create_token'] = temp
+ else:
+ ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=ib_spec.keys())
+ return ib_obj, update, new_name
+
+ def on_update(self, proposed_object, ib_spec):
+ ''' Event called before the update is sent to the API endpoing
+ This method will allow the final proposed object to be changed
+ and/or keys filtered before it is sent to the API endpoint to
+ be processed.
+ :args proposed_object: A dict item that will be encoded and sent
+ the API endpoint with the updated data structure
+ :returns: updated object to be sent to API endpoint
+ '''
+ keys = set()
+ for key, value in iteritems(proposed_object):
+ update = ib_spec[key].get('update', True)
+ if not update:
+ keys.add(key)
+ return dict([(k, v) for k, v in iteritems(proposed_object) if k not in keys])
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/oneandone.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/oneandone.py
new file mode 100644
index 00000000..466d2665
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/oneandone.py
@@ -0,0 +1,263 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import time
+
+
+class OneAndOneResources:
+ firewall_policy = 'firewall_policy'
+ load_balancer = 'load_balancer'
+ monitoring_policy = 'monitoring_policy'
+ private_network = 'private_network'
+ public_ip = 'public_ip'
+ role = 'role'
+ server = 'server'
+ user = 'user'
+ vpn = 'vpn'
+
+
+def get_resource(oneandone_conn, resource_type, resource_id):
+ switcher = {
+ 'firewall_policy': oneandone_conn.get_firewall,
+ 'load_balancer': oneandone_conn.get_load_balancer,
+ 'monitoring_policy': oneandone_conn.get_monitoring_policy,
+ 'private_network': oneandone_conn.get_private_network,
+ 'public_ip': oneandone_conn.get_public_ip,
+ 'role': oneandone_conn.get_role,
+ 'server': oneandone_conn.get_server,
+ 'user': oneandone_conn.get_user,
+ 'vpn': oneandone_conn.get_vpn,
+ }
+
+ return switcher.get(resource_type, None)(resource_id)
+
+
+def get_datacenter(oneandone_conn, datacenter, full_object=False):
+ """
+ Validates the datacenter exists by ID or country code.
+ Returns the datacenter ID.
+ """
+ for _datacenter in oneandone_conn.list_datacenters():
+ if datacenter in (_datacenter['id'], _datacenter['country_code']):
+ if full_object:
+ return _datacenter
+ return _datacenter['id']
+
+
+def get_fixed_instance_size(oneandone_conn, fixed_instance_size, full_object=False):
+ """
+ Validates the fixed instance size exists by ID or name.
+ Return the instance size ID.
+ """
+ for _fixed_instance_size in oneandone_conn.fixed_server_flavors():
+ if fixed_instance_size in (_fixed_instance_size['id'],
+ _fixed_instance_size['name']):
+ if full_object:
+ return _fixed_instance_size
+ return _fixed_instance_size['id']
+
+
+def get_appliance(oneandone_conn, appliance, full_object=False):
+ """
+ Validates the appliance exists by ID or name.
+ Return the appliance ID.
+ """
+ for _appliance in oneandone_conn.list_appliances(q='IMAGE'):
+ if appliance in (_appliance['id'], _appliance['name']):
+ if full_object:
+ return _appliance
+ return _appliance['id']
+
+
+def get_private_network(oneandone_conn, private_network, full_object=False):
+ """
+ Validates the private network exists by ID or name.
+ Return the private network ID.
+ """
+ for _private_network in oneandone_conn.list_private_networks():
+ if private_network in (_private_network['name'],
+ _private_network['id']):
+ if full_object:
+ return _private_network
+ return _private_network['id']
+
+
+def get_monitoring_policy(oneandone_conn, monitoring_policy, full_object=False):
+ """
+ Validates the monitoring policy exists by ID or name.
+ Return the monitoring policy ID.
+ """
+ for _monitoring_policy in oneandone_conn.list_monitoring_policies():
+ if monitoring_policy in (_monitoring_policy['name'],
+ _monitoring_policy['id']):
+ if full_object:
+ return _monitoring_policy
+ return _monitoring_policy['id']
+
+
+def get_firewall_policy(oneandone_conn, firewall_policy, full_object=False):
+ """
+ Validates the firewall policy exists by ID or name.
+ Return the firewall policy ID.
+ """
+ for _firewall_policy in oneandone_conn.list_firewall_policies():
+ if firewall_policy in (_firewall_policy['name'],
+ _firewall_policy['id']):
+ if full_object:
+ return _firewall_policy
+ return _firewall_policy['id']
+
+
+def get_load_balancer(oneandone_conn, load_balancer, full_object=False):
+ """
+ Validates the load balancer exists by ID or name.
+ Return the load balancer ID.
+ """
+ for _load_balancer in oneandone_conn.list_load_balancers():
+ if load_balancer in (_load_balancer['name'],
+ _load_balancer['id']):
+ if full_object:
+ return _load_balancer
+ return _load_balancer['id']
+
+
+def get_server(oneandone_conn, instance, full_object=False):
+ """
+ Validates that the server exists whether by ID or name.
+ Returns the server if one was found.
+ """
+ for server in oneandone_conn.list_servers(per_page=1000):
+ if instance in (server['id'], server['name']):
+ if full_object:
+ return server
+ return server['id']
+
+
+def get_user(oneandone_conn, user, full_object=False):
+ """
+ Validates that the user exists by ID or a name.
+ Returns the user if one was found.
+ """
+ for _user in oneandone_conn.list_users(per_page=1000):
+ if user in (_user['id'], _user['name']):
+ if full_object:
+ return _user
+ return _user['id']
+
+
+def get_role(oneandone_conn, role, full_object=False):
+ """
+ Given a name, validates that the role exists
+ whether it is a proper ID or a name.
+ Returns the role if one was found, else None.
+ """
+ for _role in oneandone_conn.list_roles(per_page=1000):
+ if role in (_role['id'], _role['name']):
+ if full_object:
+ return _role
+ return _role['id']
+
+
+def get_vpn(oneandone_conn, vpn, full_object=False):
+ """
+ Validates that the vpn exists by ID or a name.
+ Returns the vpn if one was found.
+ """
+ for _vpn in oneandone_conn.list_vpns(per_page=1000):
+ if vpn in (_vpn['id'], _vpn['name']):
+ if full_object:
+ return _vpn
+ return _vpn['id']
+
+
+def get_public_ip(oneandone_conn, public_ip, full_object=False):
+ """
+ Validates that the public ip exists by ID or a name.
+ Returns the public ip if one was found.
+ """
+ for _public_ip in oneandone_conn.list_public_ips(per_page=1000):
+ if public_ip in (_public_ip['id'], _public_ip['ip']):
+ if full_object:
+ return _public_ip
+ return _public_ip['id']
+
+
+def wait_for_resource_creation_completion(oneandone_conn,
+ resource_type,
+ resource_id,
+ wait_timeout,
+ wait_interval):
+ """
+ Waits for the resource create operation to complete based on the timeout period.
+ """
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(wait_interval)
+
+ # Refresh the resource info
+ resource = get_resource(oneandone_conn, resource_type, resource_id)
+
+ if resource_type == OneAndOneResources.server:
+ resource_state = resource['status']['state']
+ else:
+ resource_state = resource['state']
+
+ if ((resource_type == OneAndOneResources.server and resource_state.lower() == 'powered_on') or
+ (resource_type != OneAndOneResources.server and resource_state.lower() == 'active')):
+ return
+ elif resource_state.lower() == 'failed':
+ raise Exception('%s creation failed for %s' % (resource_type, resource_id))
+ elif resource_state.lower() in ('active',
+ 'enabled',
+ 'deploying',
+ 'configuring'):
+ continue
+ else:
+ raise Exception(
+ 'Unknown %s state %s' % (resource_type, resource_state))
+
+ raise Exception(
+ 'Timed out waiting for %s completion for %s' % (resource_type, resource_id))
+
+
+def wait_for_resource_deletion_completion(oneandone_conn,
+ resource_type,
+ resource_id,
+ wait_timeout,
+ wait_interval):
+ """
+ Waits for the resource delete operation to complete based on the timeout period.
+ """
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(wait_interval)
+
+ # Refresh the operation info
+ logs = oneandone_conn.list_logs(q='DELETE',
+ period='LAST_HOUR',
+ sort='-start_date')
+
+ if resource_type == OneAndOneResources.server:
+ _type = 'VM'
+ elif resource_type == OneAndOneResources.private_network:
+ _type = 'PRIVATENETWORK'
+ else:
+ raise Exception(
+ 'Unsupported wait_for delete operation for %s resource' % resource_type)
+
+ for log in logs:
+ if (log['resource']['id'] == resource_id and
+ log['action'] == 'DELETE' and
+ log['type'] == _type and
+ log['status']['state'] == 'OK'):
+ return
+ raise Exception(
+ 'Timed out waiting for %s deletion for %s' % (resource_type, resource_id))
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/oneview.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/oneview.py
new file mode 100644
index 00000000..bfa5f091
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/oneview.py
@@ -0,0 +1,485 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (2016-2017) Hewlett Packard Enterprise Development LP
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import abc
+import collections
+import json
+import os
+import traceback
+
+HPE_ONEVIEW_IMP_ERR = None
+try:
+ from hpOneView.oneview_client import OneViewClient
+ HAS_HPE_ONEVIEW = True
+except ImportError:
+ HPE_ONEVIEW_IMP_ERR = traceback.format_exc()
+ HAS_HPE_ONEVIEW = False
+
+from ansible.module_utils import six
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+from ansible.module_utils.common._collections_compat import Mapping
+
+
+def transform_list_to_dict(list_):
+ """
+ Transforms a list into a dictionary, putting values as keys.
+
+ :arg list list_: List of values
+ :return: dict: dictionary built
+ """
+
+ ret = {}
+
+ if not list_:
+ return ret
+
+ for value in list_:
+ if isinstance(value, Mapping):
+ ret.update(value)
+ else:
+ ret[to_native(value, errors='surrogate_or_strict')] = True
+
+ return ret
+
+
+def merge_list_by_key(original_list, updated_list, key, ignore_when_null=None):
+ """
+ Merge two lists by the key. It basically:
+
+ 1. Adds the items that are present on updated_list and are absent on original_list.
+
+ 2. Removes items that are absent on updated_list and are present on original_list.
+
+ 3. For all items that are in both lists, overwrites the values from the original item by the updated item.
+
+ :arg list original_list: original list.
+ :arg list updated_list: list with changes.
+ :arg str key: unique identifier.
+ :arg list ignore_when_null: list with the keys from the updated items that should be ignored in the merge,
+ if its values are null.
+ :return: list: Lists merged.
+ """
+ ignore_when_null = [] if ignore_when_null is None else ignore_when_null
+
+ if not original_list:
+ return updated_list
+
+ items_map = collections.OrderedDict([(i[key], i.copy()) for i in original_list])
+
+ merged_items = collections.OrderedDict()
+
+ for item in updated_list:
+ item_key = item[key]
+ if item_key in items_map:
+ for ignored_key in ignore_when_null:
+ if ignored_key in item and item[ignored_key] is None:
+ item.pop(ignored_key)
+ merged_items[item_key] = items_map[item_key]
+ merged_items[item_key].update(item)
+ else:
+ merged_items[item_key] = item
+
+ return list(merged_items.values())
+
+
+def _str_sorted(obj):
+ if isinstance(obj, Mapping):
+ return json.dumps(obj, sort_keys=True)
+ else:
+ return str(obj)
+
+
+def _standardize_value(value):
+ """
+ Convert value to string to enhance the comparison.
+
+ :arg value: Any object type.
+
+ :return: str: Converted value.
+ """
+ if isinstance(value, float) and value.is_integer():
+ # Workaround to avoid erroneous comparison between int and float
+ # Removes zero from integer floats
+ value = int(value)
+
+ return str(value)
+
+
+class OneViewModuleException(Exception):
+ """
+ OneView base Exception.
+
+ Attributes:
+ msg (str): Exception message.
+ oneview_response (dict): OneView rest response.
+ """
+
+ def __init__(self, data):
+ self.msg = None
+ self.oneview_response = None
+
+ if isinstance(data, six.string_types):
+ self.msg = data
+ else:
+ self.oneview_response = data
+
+ if data and isinstance(data, dict):
+ self.msg = data.get('message')
+
+ if self.oneview_response:
+ Exception.__init__(self, self.msg, self.oneview_response)
+ else:
+ Exception.__init__(self, self.msg)
+
+
+class OneViewModuleTaskError(OneViewModuleException):
+ """
+ OneView Task Error Exception.
+
+ Attributes:
+ msg (str): Exception message.
+ error_code (str): A code which uniquely identifies the specific error.
+ """
+
+ def __init__(self, msg, error_code=None):
+ super(OneViewModuleTaskError, self).__init__(msg)
+ self.error_code = error_code
+
+
+class OneViewModuleValueError(OneViewModuleException):
+ """
+ OneView Value Error.
+ The exception is raised when the data contains an inappropriate value.
+
+ Attributes:
+ msg (str): Exception message.
+ """
+ pass
+
+
+class OneViewModuleResourceNotFound(OneViewModuleException):
+ """
+ OneView Resource Not Found Exception.
+ The exception is raised when an associated resource was not found.
+
+ Attributes:
+ msg (str): Exception message.
+ """
+ pass
+
+
+@six.add_metaclass(abc.ABCMeta)
+class OneViewModuleBase(object):
+ MSG_CREATED = 'Resource created successfully.'
+ MSG_UPDATED = 'Resource updated successfully.'
+ MSG_DELETED = 'Resource deleted successfully.'
+ MSG_ALREADY_PRESENT = 'Resource is already present.'
+ MSG_ALREADY_ABSENT = 'Resource is already absent.'
+ MSG_DIFF_AT_KEY = 'Difference found at key \'{0}\'. '
+
+ ONEVIEW_COMMON_ARGS = dict(
+ config=dict(type='path'),
+ hostname=dict(type='str'),
+ username=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ api_version=dict(type='int'),
+ image_streamer_hostname=dict(type='str')
+ )
+
+ ONEVIEW_VALIDATE_ETAG_ARGS = dict(validate_etag=dict(type='bool', default=True))
+
+ resource_client = None
+
+ def __init__(self, additional_arg_spec=None, validate_etag_support=False):
+ """
+ OneViewModuleBase constructor.
+
+ :arg dict additional_arg_spec: Additional argument spec definition.
+ :arg bool validate_etag_support: Enables support to eTag validation.
+ """
+ argument_spec = self._build_argument_spec(additional_arg_spec, validate_etag_support)
+
+ self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
+
+ self._check_hpe_oneview_sdk()
+ self._create_oneview_client()
+
+ self.state = self.module.params.get('state')
+ self.data = self.module.params.get('data')
+
+ # Preload params for get_all - used by facts
+ self.facts_params = self.module.params.get('params') or {}
+
+ # Preload options as dict - used by facts
+ self.options = transform_list_to_dict(self.module.params.get('options'))
+
+ self.validate_etag_support = validate_etag_support
+
+ def _build_argument_spec(self, additional_arg_spec, validate_etag_support):
+
+ merged_arg_spec = dict()
+ merged_arg_spec.update(self.ONEVIEW_COMMON_ARGS)
+
+ if validate_etag_support:
+ merged_arg_spec.update(self.ONEVIEW_VALIDATE_ETAG_ARGS)
+
+ if additional_arg_spec:
+ merged_arg_spec.update(additional_arg_spec)
+
+ return merged_arg_spec
+
+ def _check_hpe_oneview_sdk(self):
+ if not HAS_HPE_ONEVIEW:
+ self.module.fail_json(msg=missing_required_lib('hpOneView'), exception=HPE_ONEVIEW_IMP_ERR)
+
+ def _create_oneview_client(self):
+ if self.module.params.get('hostname'):
+ config = dict(ip=self.module.params['hostname'],
+ credentials=dict(userName=self.module.params['username'], password=self.module.params['password']),
+ api_version=self.module.params['api_version'],
+ image_streamer_ip=self.module.params['image_streamer_hostname'])
+ self.oneview_client = OneViewClient(config)
+ elif not self.module.params['config']:
+ self.oneview_client = OneViewClient.from_environment_variables()
+ else:
+ self.oneview_client = OneViewClient.from_json_file(self.module.params['config'])
+
+ @abc.abstractmethod
+ def execute_module(self):
+ """
+ Abstract method, must be implemented by the inheritor.
+
+ This method is called from the run method. It should contains the module logic
+
+ :return: dict: It must return a dictionary with the attributes for the module result,
+ such as ansible_facts, msg and changed.
+ """
+ pass
+
+ def run(self):
+ """
+ Common implementation of the OneView run modules.
+
+ It calls the inheritor 'execute_module' function and sends the return to the Ansible.
+
+ It handles any OneViewModuleException in order to signal a failure to Ansible, with a descriptive error message.
+
+ """
+ try:
+ if self.validate_etag_support:
+ if not self.module.params.get('validate_etag'):
+ self.oneview_client.connection.disable_etag_validation()
+
+ result = self.execute_module()
+
+ if "changed" not in result:
+ result['changed'] = False
+
+ self.module.exit_json(**result)
+
+ except OneViewModuleException as exception:
+ error_msg = '; '.join(to_native(e) for e in exception.args)
+ self.module.fail_json(msg=error_msg, exception=traceback.format_exc())
+
+ def resource_absent(self, resource, method='delete'):
+ """
+ Generic implementation of the absent state for the OneView resources.
+
+ It checks if the resource needs to be removed.
+
+ :arg dict resource: Resource to delete.
+ :arg str method: Function of the OneView client that will be called for resource deletion.
+ Usually delete or remove.
+ :return: A dictionary with the expected arguments for the AnsibleModule.exit_json
+ """
+ if resource:
+ getattr(self.resource_client, method)(resource)
+
+ return {"changed": True, "msg": self.MSG_DELETED}
+ else:
+ return {"changed": False, "msg": self.MSG_ALREADY_ABSENT}
+
+ def get_by_name(self, name):
+ """
+ Generic get by name implementation.
+
+ :arg str name: Resource name to search for.
+
+ :return: The resource found or None.
+ """
+ result = self.resource_client.get_by('name', name)
+ return result[0] if result else None
+
+ def resource_present(self, resource, fact_name, create_method='create'):
+ """
+ Generic implementation of the present state for the OneView resources.
+
+ It checks if the resource needs to be created or updated.
+
+ :arg dict resource: Resource to create or update.
+ :arg str fact_name: Name of the fact returned to the Ansible.
+ :arg str create_method: Function of the OneView client that will be called for resource creation.
+ Usually create or add.
+ :return: A dictionary with the expected arguments for the AnsibleModule.exit_json
+ """
+
+ changed = False
+ if "newName" in self.data:
+ self.data["name"] = self.data.pop("newName")
+
+ if not resource:
+ resource = getattr(self.resource_client, create_method)(self.data)
+ msg = self.MSG_CREATED
+ changed = True
+
+ else:
+ merged_data = resource.copy()
+ merged_data.update(self.data)
+
+ if self.compare(resource, merged_data):
+ msg = self.MSG_ALREADY_PRESENT
+ else:
+ resource = self.resource_client.update(merged_data)
+ changed = True
+ msg = self.MSG_UPDATED
+
+ return dict(
+ msg=msg,
+ changed=changed,
+ ansible_facts={fact_name: resource}
+ )
+
+ def resource_scopes_set(self, state, fact_name, scope_uris):
+ """
+ Generic implementation of the scopes update PATCH for the OneView resources.
+ It checks if the resource needs to be updated with the current scopes.
+ This method is meant to be run after ensuring the present state.
+ :arg dict state: Dict containing the data from the last state results in the resource.
+ It needs to have the 'msg', 'changed', and 'ansible_facts' entries.
+ :arg str fact_name: Name of the fact returned to the Ansible.
+ :arg list scope_uris: List with all the scope URIs to be added to the resource.
+ :return: A dictionary with the expected arguments for the AnsibleModule.exit_json
+ """
+ if scope_uris is None:
+ scope_uris = []
+ resource = state['ansible_facts'][fact_name]
+ operation_data = dict(operation='replace', path='/scopeUris', value=scope_uris)
+
+ if resource['scopeUris'] is None or set(resource['scopeUris']) != set(scope_uris):
+ state['ansible_facts'][fact_name] = self.resource_client.patch(resource['uri'], **operation_data)
+ state['changed'] = True
+ state['msg'] = self.MSG_UPDATED
+
+ return state
+
+ def compare(self, first_resource, second_resource):
+ """
+ Recursively compares dictionary contents equivalence, ignoring types and elements order.
+ Particularities of the comparison:
+ - Inexistent key = None
+ - These values are considered equal: None, empty, False
+ - Lists are compared value by value after a sort, if they have same size.
+ - Each element is converted to str before the comparison.
+ :arg dict first_resource: first dictionary
+ :arg dict second_resource: second dictionary
+ :return: bool: True when equal, False when different.
+ """
+ resource1 = first_resource
+ resource2 = second_resource
+
+ debug_resources = "resource1 = {0}, resource2 = {1}".format(resource1, resource2)
+
+ # The first resource is True / Not Null and the second resource is False / Null
+ if resource1 and not resource2:
+ self.module.log("resource1 and not resource2. " + debug_resources)
+ return False
+
+ # Checks all keys in first dict against the second dict
+ for key in resource1:
+ if key not in resource2:
+ if resource1[key] is not None:
+ # Inexistent key is equivalent to exist with value None
+ self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
+ return False
+ # If both values are null, empty or False it will be considered equal.
+ elif not resource1[key] and not resource2[key]:
+ continue
+ elif isinstance(resource1[key], Mapping):
+ # recursive call
+ if not self.compare(resource1[key], resource2[key]):
+ self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
+ return False
+ elif isinstance(resource1[key], list):
+ # change comparison function to compare_list
+ if not self.compare_list(resource1[key], resource2[key]):
+ self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
+ return False
+ elif _standardize_value(resource1[key]) != _standardize_value(resource2[key]):
+ self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
+ return False
+
+ # Checks all keys in the second dict, looking for missing elements
+ for key in resource2.keys():
+ if key not in resource1:
+ if resource2[key] is not None:
+ # Inexistent key is equivalent to exist with value None
+ self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
+ return False
+
+ return True
+
+ def compare_list(self, first_resource, second_resource):
+ """
+ Recursively compares lists contents equivalence, ignoring types and element orders.
+ Lists with same size are compared value by value after a sort,
+ each element is converted to str before the comparison.
+ :arg list first_resource: first list
+ :arg list second_resource: second list
+ :return: True when equal; False when different.
+ """
+
+ resource1 = first_resource
+ resource2 = second_resource
+
+ debug_resources = "resource1 = {0}, resource2 = {1}".format(resource1, resource2)
+
+ # The second list is null / empty / False
+ if not resource2:
+ self.module.log("resource 2 is null. " + debug_resources)
+ return False
+
+ if len(resource1) != len(resource2):
+ self.module.log("resources have different length. " + debug_resources)
+ return False
+
+ resource1 = sorted(resource1, key=_str_sorted)
+ resource2 = sorted(resource2, key=_str_sorted)
+
+ for i, val in enumerate(resource1):
+ if isinstance(val, Mapping):
+ # change comparison function to compare dictionaries
+ if not self.compare(val, resource2[i]):
+ self.module.log("resources are different. " + debug_resources)
+ return False
+ elif isinstance(val, list):
+ # recursive call
+ if not self.compare_list(val, resource2[i]):
+ self.module.log("lists are different. " + debug_resources)
+ return False
+ elif _standardize_value(val) != _standardize_value(resource2[i]):
+ self.module.log("values are different. " + debug_resources)
+ return False
+
+ # no differences found
+ return True
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/online.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/online.py
new file mode 100644
index 00000000..464e4542
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/online.py
@@ -0,0 +1,121 @@
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import sys
+
+from ansible.module_utils.basic import env_fallback
+from ansible.module_utils.urls import fetch_url
+
+
+def online_argument_spec():
+ return dict(
+ api_token=dict(required=True, fallback=(env_fallback, ['ONLINE_TOKEN', 'ONLINE_API_KEY', 'ONLINE_OAUTH_TOKEN', 'ONLINE_API_TOKEN']),
+ no_log=True, aliases=['oauth_token']),
+ api_url=dict(fallback=(env_fallback, ['ONLINE_API_URL']), default='https://api.online.net', aliases=['base_url']),
+ api_timeout=dict(type='int', default=30, aliases=['timeout']),
+ validate_certs=dict(default=True, type='bool'),
+ )
+
+
+class OnlineException(Exception):
+
+ def __init__(self, message):
+ self.message = message
+
+
+class Response(object):
+
+ def __init__(self, resp, info):
+ self.body = None
+ if resp:
+ self.body = resp.read()
+ self.info = info
+
+ @property
+ def json(self):
+ if not self.body:
+ if "body" in self.info:
+ return json.loads(self.info["body"])
+ return None
+ try:
+ return json.loads(self.body)
+ except ValueError:
+ return None
+
+ @property
+ def status_code(self):
+ return self.info["status"]
+
+ @property
+ def ok(self):
+ return self.status_code in (200, 201, 202, 204)
+
+
+class Online(object):
+
+ def __init__(self, module):
+ self.module = module
+ self.headers = {
+ 'Authorization': "Bearer %s" % self.module.params.get('api_token'),
+ 'User-Agent': self.get_user_agent_string(module),
+ 'Content-type': 'application/json',
+ }
+ self.name = None
+
+ def get_resources(self):
+ results = self.get('/%s' % self.name)
+ if not results.ok:
+ raise OnlineException('Error fetching {0} ({1}) [{2}: {3}]'.format(
+ self.name, '%s/%s' % (self.module.params.get('api_url'), self.name),
+ results.status_code, results.json['message']
+ ))
+
+ return results.json
+
+ def _url_builder(self, path):
+ if path[0] == '/':
+ path = path[1:]
+ return '%s/%s' % (self.module.params.get('api_url'), path)
+
+ def send(self, method, path, data=None, headers=None):
+ url = self._url_builder(path)
+ data = self.module.jsonify(data)
+
+ if headers is not None:
+ self.headers.update(headers)
+
+ resp, info = fetch_url(
+ self.module, url, data=data, headers=self.headers, method=method,
+ timeout=self.module.params.get('api_timeout')
+ )
+
+ # Exceptions in fetch_url may result in a status -1, the ensures a proper error to the user in all cases
+ if info['status'] == -1:
+ self.module.fail_json(msg=info['msg'])
+
+ return Response(resp, info)
+
+ @staticmethod
+ def get_user_agent_string(module):
+ return "ansible %s Python %s" % (module.ansible_version, sys.version.split(' ')[0])
+
+ def get(self, path, data=None, headers=None):
+ return self.send('GET', path, data, headers)
+
+ def put(self, path, data=None, headers=None):
+ return self.send('PUT', path, data, headers)
+
+ def post(self, path, data=None, headers=None):
+ return self.send('POST', path, data, headers)
+
+ def delete(self, path, data=None, headers=None):
+ return self.send('DELETE', path, data, headers)
+
+ def patch(self, path, data=None, headers=None):
+ return self.send("PATCH", path, data, headers)
+
+ def update(self, path, data=None, headers=None):
+ return self.send("UPDATE", path, data, headers)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/opennebula.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/opennebula.py
new file mode 100644
index 00000000..0b95c618
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/opennebula.py
@@ -0,0 +1,310 @@
+#
+# Copyright 2018 www.privaz.io Valletech AB
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import time
+import ssl
+from os import environ
+from ansible.module_utils.six import string_types
+from ansible.module_utils.basic import AnsibleModule
+
+
+HAS_PYONE = True
+
+try:
+ from pyone import OneException
+ from pyone.server import OneServer
+except ImportError:
+ OneException = Exception
+ HAS_PYONE = False
+
+
+class OpenNebulaModule:
+ """
+ Base class for all OpenNebula Ansible Modules.
+ This is basically a wrapper of the common arguments, the pyone client and
+ some utility methods.
+ """
+
+ common_args = dict(
+ api_url=dict(type='str', aliases=['api_endpoint'], default=environ.get("ONE_URL")),
+ api_username=dict(type='str', default=environ.get("ONE_USERNAME")),
+ api_password=dict(type='str', no_log=True, aliases=['api_token'], default=environ.get("ONE_PASSWORD")),
+ validate_certs=dict(default=True, type='bool'),
+ wait_timeout=dict(type='int', default=300),
+ )
+
+ def __init__(self, argument_spec, supports_check_mode=False, mutually_exclusive=None):
+
+ module_args = OpenNebulaModule.common_args
+ module_args.update(argument_spec)
+
+ self.module = AnsibleModule(argument_spec=module_args,
+ supports_check_mode=supports_check_mode,
+ mutually_exclusive=mutually_exclusive)
+ self.result = dict(changed=False,
+ original_message='',
+ message='')
+ self.one = self.create_one_client()
+
+ self.resolved_parameters = self.resolve_parameters()
+
+ def create_one_client(self):
+ """
+ Creates an XMLPRC client to OpenNebula.
+
+ Returns: the new xmlrpc client.
+
+ """
+
+ # context required for not validating SSL, old python versions won't validate anyway.
+ if hasattr(ssl, '_create_unverified_context'):
+ no_ssl_validation_context = ssl._create_unverified_context()
+ else:
+ no_ssl_validation_context = None
+
+ # Check if the module can run
+ if not HAS_PYONE:
+ self.fail("pyone is required for this module")
+
+ if self.module.params.get("api_url"):
+ url = self.module.params.get("api_url")
+ else:
+ self.fail("Either api_url or the environment variable ONE_URL must be provided")
+
+ if self.module.params.get("api_username"):
+ username = self.module.params.get("api_username")
+ else:
+ self.fail("Either api_username or the environment vairable ONE_USERNAME must be provided")
+
+ if self.module.params.get("api_password"):
+ password = self.module.params.get("api_password")
+ else:
+ self.fail("Either api_password or the environment vairable ONE_PASSWORD must be provided")
+
+ session = "%s:%s" % (username, password)
+
+ if not self.module.params.get("validate_certs") and "PYTHONHTTPSVERIFY" not in environ:
+ return OneServer(url, session=session, context=no_ssl_validation_context)
+ else:
+ return OneServer(url, session)
+
+ def close_one_client(self):
+ """
+ Close the pyone session.
+ """
+ self.one.server_close()
+
+ def fail(self, msg):
+ """
+ Utility failure method, will ensure pyone is properly closed before failing.
+ Args:
+ msg: human readable failure reason.
+ """
+ if hasattr(self, 'one'):
+ self.close_one_client()
+ self.module.fail_json(msg=msg)
+
+ def exit(self):
+ """
+ Utility exit method, will ensure pyone is properly closed before exiting.
+
+ """
+ if hasattr(self, 'one'):
+ self.close_one_client()
+ self.module.exit_json(**self.result)
+
+ def resolve_parameters(self):
+ """
+ This method resolves parameters provided by a secondary ID to the primary ID.
+ For example if cluster_name is present, cluster_id will be introduced by performing
+ the required resolution
+
+ Returns: a copy of the parameters that includes the resolved parameters.
+
+ """
+
+ resolved_params = dict(self.module.params)
+
+ if 'cluster_name' in self.module.params:
+ clusters = self.one.clusterpool.info()
+ for cluster in clusters.CLUSTER:
+ if cluster.NAME == self.module.params.get('cluster_name'):
+ resolved_params['cluster_id'] = cluster.ID
+
+ return resolved_params
+
+ def is_parameter(self, name):
+ """
+ Utility method to check if a parameter was provided or is resolved
+ Args:
+ name: the parameter to check
+ """
+ if name in self.resolved_parameters:
+ return self.get_parameter(name) is not None
+ else:
+ return False
+
+ def get_parameter(self, name):
+ """
+ Utility method for accessing parameters that includes resolved ID
+ parameters from provided Name parameters.
+ """
+ return self.resolved_parameters.get(name)
+
+ def get_host_by_name(self, name):
+ '''
+ Returns a host given its name.
+ Args:
+ name: the name of the host
+
+ Returns: the host object or None if the host is absent.
+
+ '''
+ hosts = self.one.hostpool.info()
+ for h in hosts.HOST:
+ if h.NAME == name:
+ return h
+ return None
+
+ def get_cluster_by_name(self, name):
+ """
+ Returns a cluster given its name.
+ Args:
+ name: the name of the cluster
+
+ Returns: the cluster object or None if the host is absent.
+ """
+
+ clusters = self.one.clusterpool.info()
+ for c in clusters.CLUSTER:
+ if c.NAME == name:
+ return c
+ return None
+
+ def get_template_by_name(self, name):
+ '''
+ Returns a template given its name.
+ Args:
+ name: the name of the template
+
+ Returns: the template object or None if the host is absent.
+
+ '''
+ templates = self.one.templatepool.info()
+ for t in templates.TEMPLATE:
+ if t.NAME == name:
+ return t
+ return None
+
+ def cast_template(self, template):
+ """
+ OpenNebula handles all template elements as strings
+ At some point there is a cast being performed on types provided by the user
+ This function mimics that transformation so that required template updates are detected properly
+ additionally an array will be converted to a comma separated list,
+ which works for labels and hopefully for something more.
+
+ Args:
+ template: the template to transform
+
+ Returns: the transformed template with data casts applied.
+ """
+
+ # TODO: check formally available data types in templates
+ # TODO: some arrays might be converted to space separated
+
+ for key in template:
+ value = template[key]
+ if isinstance(value, dict):
+ self.cast_template(template[key])
+ elif isinstance(value, list):
+ template[key] = ', '.join(value)
+ elif not isinstance(value, string_types):
+ template[key] = str(value)
+
+ def requires_template_update(self, current, desired):
+ """
+ This function will help decide if a template update is required or not
+ If a desired key is missing from the current dictionary an update is required
+ If the intersection of both dictionaries is not deep equal, an update is required
+ Args:
+ current: current template as a dictionary
+ desired: desired template as a dictionary
+
+ Returns: True if a template update is required
+ """
+
+ if not desired:
+ return False
+
+ self.cast_template(desired)
+ intersection = dict()
+ for dkey in desired.keys():
+ if dkey in current.keys():
+ intersection[dkey] = current[dkey]
+ else:
+ return True
+ return not (desired == intersection)
+
+ def wait_for_state(self, element_name, state, state_name, target_states,
+ invalid_states=None, transition_states=None,
+ wait_timeout=None):
+ """
+ Args:
+ element_name: the name of the object we are waiting for: HOST, VM, etc.
+ state: lambda that returns the current state, will be queried until target state is reached
+ state_name: lambda that returns the readable form of a given state
+ target_states: states expected to be reached
+ invalid_states: if any of this states is reached, fail
+ transition_states: when used, these are the valid states during the transition.
+ wait_timeout: timeout period in seconds. Defaults to the provided parameter.
+ """
+
+ if not wait_timeout:
+ wait_timeout = self.module.params.get("wait_timeout")
+
+ start_time = time.time()
+
+ while (time.time() - start_time) < wait_timeout:
+ current_state = state()
+
+ if current_state in invalid_states:
+ self.fail('invalid %s state %s' % (element_name, state_name(current_state)))
+
+ if transition_states:
+ if current_state not in transition_states:
+ self.fail('invalid %s transition state %s' % (element_name, state_name(current_state)))
+
+ if current_state in target_states:
+ return True
+
+ time.sleep(self.one.server_retry_interval())
+
+ self.fail(msg="Wait timeout has expired!")
+
+ def run_module(self):
+ """
+ trigger the start of the execution of the module.
+ Returns:
+
+ """
+ try:
+ self.run(self.one, self.module, self.result)
+ except OneException as e:
+ self.fail(msg="OpenNebula Exception: %s" % e)
+
+ def run(self, one, module, result):
+ """
+ to be implemented by subclass with the actual module actions.
+ Args:
+ one: the OpenNebula XMLRPC client
+ module: the Ansible Module object
+ result: the Ansible result
+ """
+ raise NotImplementedError("Method requires implementation")
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/oracle/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/oracle/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/oracle/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/oracle/oci_utils.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/oracle/oci_utils.py
new file mode 100644
index 00000000..72a872fc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/oracle/oci_utils.py
@@ -0,0 +1,1962 @@
+# Copyright (c) 2017, 2018, 2019 Oracle and/or its affiliates.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import logging
+import logging.config
+import os
+import tempfile
+from datetime import datetime
+from operator import eq
+
+import time
+
+try:
+ import yaml
+
+ import oci
+ from oci.constants import HEADER_NEXT_PAGE
+
+ from oci.exceptions import (
+ InvalidConfig,
+ InvalidPrivateKey,
+ MissingPrivateKeyPassphrase,
+ ConfigFileNotFound,
+ ServiceError,
+ MaximumWaitTimeExceeded,
+ )
+ from oci.identity.identity_client import IdentityClient
+ from oci.object_storage.models import CreateBucketDetails
+ from oci.object_storage.models import UpdateBucketDetails
+ from oci.retry import RetryStrategyBuilder
+ from oci.util import to_dict, Sentinel
+
+ HAS_OCI_PY_SDK = True
+except ImportError:
+ HAS_OCI_PY_SDK = False
+
+
+from ansible.module_utils._text import to_bytes
+from ansible.module_utils.six import iteritems
+
+__version__ = "1.6.0-dev"
+
+MAX_WAIT_TIMEOUT_IN_SECONDS = 1200
+
+# If a resource is in one of these states it would be considered inactive
+DEAD_STATES = [
+ "TERMINATING",
+ "TERMINATED",
+ "FAULTY",
+ "FAILED",
+ "DELETING",
+ "DELETED",
+ "UNKNOWN_ENUM_VALUE",
+ "DETACHING",
+ "DETACHED",
+]
+
+# If a resource is in one of these states it would be considered available
+DEFAULT_READY_STATES = [
+ "AVAILABLE",
+ "ACTIVE",
+ "RUNNING",
+ "PROVISIONED",
+ "ATTACHED",
+ "ASSIGNED",
+ "SUCCEEDED",
+ "PENDING_PROVIDER",
+]
+
+# If a resource is in one of these states, it would be considered deleted
+DEFAULT_TERMINATED_STATES = ["TERMINATED", "DETACHED", "DELETED"]
+
+
+def get_common_arg_spec(supports_create=False, supports_wait=False):
+ """
+ Return the common set of module arguments for all OCI cloud modules.
+ :param supports_create: Variable to decide whether to add options related to idempotency of create operation.
+ :param supports_wait: Variable to decide whether to add options related to waiting for completion.
+ :return: A dict with applicable module options.
+ """
+ # Note: This method is used by most OCI ansible resource modules during initialization. When making changes to this
+ # method, ensure that no `oci` python sdk dependencies are introduced in this method. This ensures that the modules
+ # can check for absence of OCI Python SDK and fail with an appropriate message. Introducing an OCI dependency in
+ # this method would break that error handling logic.
+ common_args = dict(
+ config_file_location=dict(type="str"),
+ config_profile_name=dict(type="str", default="DEFAULT"),
+ api_user=dict(type="str"),
+ api_user_fingerprint=dict(type="str", no_log=True),
+ api_user_key_file=dict(type="str"),
+ api_user_key_pass_phrase=dict(type="str", no_log=True),
+ auth_type=dict(
+ type="str",
+ required=False,
+ choices=["api_key", "instance_principal"],
+ default="api_key",
+ ),
+ tenancy=dict(type="str"),
+ region=dict(type="str"),
+ )
+
+ if supports_create:
+ common_args.update(
+ key_by=dict(type="list"),
+ force_create=dict(type="bool", default=False),
+ )
+
+ if supports_wait:
+ common_args.update(
+ wait=dict(type="bool", default=True),
+ wait_timeout=dict(
+ type="int", default=MAX_WAIT_TIMEOUT_IN_SECONDS
+ ),
+ wait_until=dict(type="str"),
+ )
+
+ return common_args
+
+
+def get_facts_module_arg_spec(filter_by_name=False):
+ # Note: This method is used by most OCI ansible fact modules during initialization. When making changes to this
+ # method, ensure that no `oci` python sdk dependencies are introduced in this method. This ensures that the modules
+ # can check for absence of OCI Python SDK and fail with an appropriate message. Introducing an OCI dependency in
+ # this method would break that error handling logic.
+ facts_module_arg_spec = get_common_arg_spec()
+ if filter_by_name:
+ facts_module_arg_spec.update(name=dict(type="str"))
+ else:
+ facts_module_arg_spec.update(display_name=dict(type="str"))
+ return facts_module_arg_spec
+
+
+def get_oci_config(module, service_client_class=None):
+ """Return the OCI configuration to use for all OCI API calls. The effective OCI configuration is derived by merging
+ any overrides specified for configuration attributes through Ansible module options or environment variables. The
+ order of precedence for deriving the effective configuration dict is:
+ 1. If a config file is provided, use that to setup the initial config dict.
+ 2. If a config profile is specified, use that config profile to setup the config dict.
+ 3. For each authentication attribute, check if an override is provided either through
+ a. Ansible Module option
+ b. Environment variable
+ and override the value in the config dict in that order."""
+ config = {}
+
+ config_file = module.params.get("config_file_location")
+ _debug("Config file through module options - {0} ".format(config_file))
+ if not config_file:
+ if "OCI_CONFIG_FILE" in os.environ:
+ config_file = os.environ["OCI_CONFIG_FILE"]
+ _debug(
+ "Config file through OCI_CONFIG_FILE environment variable - {0}".format(
+ config_file
+ )
+ )
+ else:
+ config_file = "~/.oci/config"
+ _debug("Config file (fallback) - {0} ".format(config_file))
+
+ config_profile = module.params.get("config_profile_name")
+ if not config_profile:
+ if "OCI_CONFIG_PROFILE" in os.environ:
+ config_profile = os.environ["OCI_CONFIG_PROFILE"]
+ else:
+ config_profile = "DEFAULT"
+ try:
+ config = oci.config.from_file(
+ file_location=config_file, profile_name=config_profile
+ )
+ except (
+ ConfigFileNotFound,
+ InvalidConfig,
+ InvalidPrivateKey,
+ MissingPrivateKeyPassphrase,
+ ) as ex:
+ if not _is_instance_principal_auth(module):
+ # When auth_type is not instance_principal, config file is required
+ module.fail_json(msg=str(ex))
+ else:
+ _debug(
+ "Ignore {0} as the auth_type is set to instance_principal".format(
+ str(ex)
+ )
+ )
+ # if instance_principal auth is used, an empty 'config' map is used below.
+
+ config["additional_user_agent"] = "Oracle-Ansible/{0}".format(__version__)
+ # Merge any overrides through other IAM options
+ _merge_auth_option(
+ config,
+ module,
+ module_option_name="api_user",
+ env_var_name="OCI_USER_ID",
+ config_attr_name="user",
+ )
+ _merge_auth_option(
+ config,
+ module,
+ module_option_name="api_user_fingerprint",
+ env_var_name="OCI_USER_FINGERPRINT",
+ config_attr_name="fingerprint",
+ )
+ _merge_auth_option(
+ config,
+ module,
+ module_option_name="api_user_key_file",
+ env_var_name="OCI_USER_KEY_FILE",
+ config_attr_name="key_file",
+ )
+ _merge_auth_option(
+ config,
+ module,
+ module_option_name="api_user_key_pass_phrase",
+ env_var_name="OCI_USER_KEY_PASS_PHRASE",
+ config_attr_name="pass_phrase",
+ )
+ _merge_auth_option(
+ config,
+ module,
+ module_option_name="tenancy",
+ env_var_name="OCI_TENANCY",
+ config_attr_name="tenancy",
+ )
+ _merge_auth_option(
+ config,
+ module,
+ module_option_name="region",
+ env_var_name="OCI_REGION",
+ config_attr_name="region",
+ )
+
+ # Redirect calls to home region for IAM service.
+ do_not_redirect = module.params.get(
+ "do_not_redirect_to_home_region", False
+ ) or os.environ.get("OCI_IDENTITY_DO_NOT_REDIRECT_TO_HOME_REGION")
+ if service_client_class == IdentityClient and not do_not_redirect:
+ _debug("Region passed for module invocation - {0} ".format(config["region"]))
+ identity_client = IdentityClient(config)
+ region_subscriptions = identity_client.list_region_subscriptions(
+ config["tenancy"]
+ ).data
+ # Replace the region in the config with the home region.
+ [config["region"]] = [
+ rs.region_name for rs in region_subscriptions if rs.is_home_region is True
+ ]
+ _debug(
+ "Setting region in the config to home region - {0} ".format(
+ config["region"]
+ )
+ )
+
+ return config
+
+
+def create_service_client(module, service_client_class):
+ """
+ Creates a service client using the common module options provided by the user.
+ :param module: An AnsibleModule that represents user provided options for a Task
+ :param service_client_class: A class that represents a client to an OCI Service
+ :return: A fully configured client
+ """
+ config = get_oci_config(module, service_client_class)
+ kwargs = {}
+
+ if _is_instance_principal_auth(module):
+ try:
+ signer = oci.auth.signers.InstancePrincipalsSecurityTokenSigner()
+ except Exception as ex:
+ message = (
+ "Failed retrieving certificates from localhost. Instance principal based authentication is only"
+ "possible from within OCI compute instances. Exception: {0}".format(
+ str(ex)
+ )
+ )
+ module.fail_json(msg=message)
+
+ kwargs["signer"] = signer
+
+ # XXX: Validate configuration -- this may be redundant, as all Client constructors perform a validation
+ try:
+ oci.config.validate_config(config, **kwargs)
+ except oci.exceptions.InvalidConfig as ic:
+ module.fail_json(
+ msg="Invalid OCI configuration. Exception: {0}".format(str(ic))
+ )
+
+ # Create service client class with the signer
+ client = service_client_class(config, **kwargs)
+
+ return client
+
+
+def _is_instance_principal_auth(module):
+ # check if auth type is overridden via module params
+ instance_principal_auth = (
+ "auth_type" in module.params
+ and module.params["auth_type"] == "instance_principal"
+ )
+ if not instance_principal_auth:
+ instance_principal_auth = (
+ "OCI_ANSIBLE_AUTH_TYPE" in os.environ
+ and os.environ["OCI_ANSIBLE_AUTH_TYPE"] == "instance_principal"
+ )
+ return instance_principal_auth
+
+
+def _merge_auth_option(
+ config, module, module_option_name, env_var_name, config_attr_name
+):
+ """Merge the values for an authentication attribute from ansible module options and
+ environment variables with the values specified in a configuration file"""
+ _debug("Merging {0}".format(module_option_name))
+
+ auth_attribute = module.params.get(module_option_name)
+ _debug(
+ "\t Ansible module option {0} = {1}".format(module_option_name, auth_attribute)
+ )
+ if not auth_attribute:
+ if env_var_name in os.environ:
+ auth_attribute = os.environ[env_var_name]
+ _debug(
+ "\t Environment variable {0} = {1}".format(env_var_name, auth_attribute)
+ )
+
+ # An authentication attribute has been provided through an env-variable or an ansible
+ # option and must override the corresponding attribute's value specified in the
+ # config file [profile].
+ if auth_attribute:
+ _debug(
+ "Updating config attribute {0} -> {1} ".format(
+ config_attr_name, auth_attribute
+ )
+ )
+ config.update({config_attr_name: auth_attribute})
+
+
+def bucket_details_factory(bucket_details_type, module):
+ bucket_details = None
+ if bucket_details_type == "create":
+ bucket_details = CreateBucketDetails()
+ elif bucket_details_type == "update":
+ bucket_details = UpdateBucketDetails()
+
+ bucket_details.compartment_id = module.params["compartment_id"]
+ bucket_details.name = module.params["name"]
+ bucket_details.public_access_type = module.params["public_access_type"]
+ bucket_details.metadata = module.params["metadata"]
+
+ return bucket_details
+
+
+def filter_resources(all_resources, filter_params):
+ if not filter_params:
+ return all_resources
+ filtered_resources = []
+ filtered_resources.extend(
+ [
+ resource
+ for resource in all_resources
+ for key, value in filter_params.items()
+ if getattr(resource, key) == value
+ ]
+ )
+ return filtered_resources
+
+
+def list_all_resources(target_fn, **kwargs):
+ """
+ Return all resources after paging through all results returned by target_fn. If a `display_name` or `name` is
+ provided as a kwarg, then only resources matching the specified name are returned.
+ :param target_fn: The target OCI SDK paged function to call
+ :param kwargs: All arguments that the OCI SDK paged function expects
+ :return: List of all objects returned by target_fn
+ :raises ServiceError: When the Service returned an Error response
+ :raises MaximumWaitTimeExceededError: When maximum wait time is exceeded while invoking target_fn
+ """
+ filter_params = None
+ try:
+ response = call_with_backoff(target_fn, **kwargs)
+ except ValueError as ex:
+ if "unknown kwargs" in str(ex):
+ if "display_name" in kwargs:
+ if kwargs["display_name"]:
+ filter_params = {"display_name": kwargs["display_name"]}
+ del kwargs["display_name"]
+ elif "name" in kwargs:
+ if kwargs["name"]:
+ filter_params = {"name": kwargs["name"]}
+ del kwargs["name"]
+ response = call_with_backoff(target_fn, **kwargs)
+
+ existing_resources = response.data
+ while response.has_next_page:
+ kwargs.update(page=response.headers.get(HEADER_NEXT_PAGE))
+ response = call_with_backoff(target_fn, **kwargs)
+ existing_resources += response.data
+
+ # If the underlying SDK Service list* method doesn't support filtering by name or display_name, filter the resources
+ # and return the matching list of resources
+ return filter_resources(existing_resources, filter_params)
+
+
+def _debug(s):
+ get_logger("oci_utils").debug(s)
+
+
+def get_logger(module_name):
+ oci_logging = setup_logging()
+ return oci_logging.getLogger(module_name)
+
+
+def setup_logging(
+ default_level="INFO",
+):
+ """Setup logging configuration"""
+ env_log_path = "LOG_PATH"
+ env_log_level = "LOG_LEVEL"
+
+ default_log_path = tempfile.gettempdir()
+ log_path = os.getenv(env_log_path, default_log_path)
+ log_level_str = os.getenv(env_log_level, default_level)
+ log_level = logging.getLevelName(log_level_str)
+ log_file_path = os.path.join(log_path, "oci_ansible_module.log")
+ logging.basicConfig(filename=log_file_path, filemode="a", level=log_level)
+ return logging
+
+
+def check_and_update_attributes(
+ target_instance, attr_name, input_value, existing_value, changed
+):
+ """
+ This function checks the difference between two resource attributes of literal types and sets the attrbute
+ value in the target instance type holding the attribute.
+ :param target_instance: The instance which contains the attribute whose values to be compared
+ :param attr_name: Name of the attribute whose value required to be compared
+ :param input_value: The value of the attribute provided by user
+ :param existing_value: The value of the attribute in the existing resource
+ :param changed: Flag to indicate whether there is any difference between the values
+ :return: Returns a boolean value indicating whether there is any difference between the values
+ """
+ if input_value is not None and not eq(input_value, existing_value):
+ changed = True
+ target_instance.__setattr__(attr_name, input_value)
+ else:
+ target_instance.__setattr__(attr_name, existing_value)
+ return changed
+
+
+def check_and_update_resource(
+ resource_type,
+ get_fn,
+ kwargs_get,
+ update_fn,
+ primitive_params_update,
+ kwargs_non_primitive_update,
+ module,
+ update_attributes,
+ client=None,
+ sub_attributes_of_update_model=None,
+ wait_applicable=True,
+ states=None,
+):
+
+ """
+ This function handles update operation on a resource. It checks whether update is required and accordingly returns
+ the resource and the changed status.
+ :param wait_applicable: Indicates if the resource support wait
+ :param client: The resource Client class to use to perform the wait checks. This param must be specified if
+ wait_applicable is True
+ :param resource_type: The type of the resource. e.g. "private_ip"
+ :param get_fn: Function used to get the resource. e.g. virtual_network_client.get_private_ip
+ :param kwargs_get: Dictionary containing the arguments to be used to call get function.
+ e.g. {"private_ip_id": module.params["private_ip_id"]}
+ :param update_fn: Function used to update the resource. e.g virtual_network_client.update_private_ip
+ :param primitive_params_update: List of primitive parameters used for update function. e.g. ['private_ip_id']
+ :param kwargs_non_primitive_update: Dictionary containing the non-primitive arguments to be used to call get
+ function with key as the non-primitive argument type & value as the name of the non-primitive argument to be passed
+ to the update function. e.g. {UpdatePrivateIpDetails: "update_private_ip_details"}
+ :param module: Instance of AnsibleModule
+ :param update_attributes: Attributes in update model.
+ :param states: List of lifecycle states to watch for while waiting after create_fn is called.
+ e.g. [module.params['wait_until'], "FAULTY"]
+ :param sub_attributes_of_update_model: Dictionary of non-primitive sub-attributes of update model. for example,
+ {'services': [ServiceIdRequestDetails()]} as in UpdateServiceGatewayDetails.
+ :return: Returns a dictionary containing the "changed" status and the resource.
+ """
+ try:
+ result = dict(changed=False)
+ attributes_to_update, resource = get_attr_to_update(
+ get_fn, kwargs_get, module, update_attributes
+ )
+
+ if attributes_to_update:
+ kwargs_update = get_kwargs_update(
+ attributes_to_update,
+ kwargs_non_primitive_update,
+ module,
+ primitive_params_update,
+ sub_attributes_of_update_model,
+ )
+ resource = call_with_backoff(update_fn, **kwargs_update).data
+ if wait_applicable:
+ if client is None:
+ module.fail_json(
+ msg="wait_applicable is True, but client is not specified."
+ )
+ resource = wait_for_resource_lifecycle_state(
+ client, module, True, kwargs_get, get_fn, None, resource, states
+ )
+ result["changed"] = True
+ result[resource_type] = to_dict(resource)
+ return result
+ except ServiceError as ex:
+ module.fail_json(msg=ex.message)
+
+
+def get_kwargs_update(
+ attributes_to_update,
+ kwargs_non_primitive_update,
+ module,
+ primitive_params_update,
+ sub_attributes_of_update_model=None,
+):
+ kwargs_update = dict()
+ for param in primitive_params_update:
+ kwargs_update[param] = module.params[param]
+ for param in kwargs_non_primitive_update:
+ update_object = param()
+ for key in update_object.attribute_map:
+ if key in attributes_to_update:
+ if (
+ sub_attributes_of_update_model
+ and key in sub_attributes_of_update_model
+ ):
+ setattr(update_object, key, sub_attributes_of_update_model[key])
+ else:
+ setattr(update_object, key, module.params[key])
+ kwargs_update[kwargs_non_primitive_update[param]] = update_object
+ return kwargs_update
+
+
+def is_dictionary_subset(sub, super_dict):
+ """
+ This function checks if `sub` dictionary is a subset of `super` dictionary.
+ :param sub: subset dictionary, for example user_provided_attr_value.
+ :param super_dict: super dictionary, for example resources_attr_value.
+ :return: True if sub is contained in super.
+ """
+ for key in sub:
+ if sub[key] != super_dict[key]:
+ return False
+ return True
+
+
+def are_lists_equal(s, t):
+ if s is None and t is None:
+ return True
+
+ if (s is None and len(t) >= 0) or (t is None and len(s) >= 0) or (len(s) != len(t)):
+ return False
+
+ if len(s) == 0:
+ return True
+
+ s = to_dict(s)
+ t = to_dict(t)
+
+ if type(s[0]) == dict:
+ # Handle list of dicts. Dictionary returned by the API may have additional keys. For example, a get call on
+ # service gateway has an attribute `services` which is a list of `ServiceIdResponseDetails`. This has a key
+ # `service_name` which is not provided in the list of `services` by a user while making an update call; only
+ # `service_id` is provided by the user in the update call.
+ sorted_s = sort_list_of_dictionary(s)
+ sorted_t = sort_list_of_dictionary(t)
+ for index, d in enumerate(sorted_s):
+ if not is_dictionary_subset(d, sorted_t[index]):
+ return False
+ return True
+ else:
+ # Handle lists of primitive types.
+ try:
+ for elem in s:
+ t.remove(elem)
+ except ValueError:
+ return False
+ return not t
+
+
+def get_attr_to_update(get_fn, kwargs_get, module, update_attributes):
+ try:
+ resource = call_with_backoff(get_fn, **kwargs_get).data
+ except ServiceError as ex:
+ module.fail_json(msg=ex.message)
+
+ attributes_to_update = []
+
+ for attr in update_attributes:
+ resources_attr_value = getattr(resource, attr, None)
+ user_provided_attr_value = module.params.get(attr, None)
+
+ unequal_list_attr = (
+ type(resources_attr_value) == list or type(user_provided_attr_value) == list
+ ) and not are_lists_equal(user_provided_attr_value, resources_attr_value)
+ unequal_attr = type(resources_attr_value) != list and to_dict(
+ resources_attr_value
+ ) != to_dict(user_provided_attr_value)
+ if unequal_list_attr or unequal_attr:
+ # only update if the user has explicitly provided a value for this attribute
+ # otherwise, no update is necessary because the user hasn't expressed a particular
+ # value for that attribute
+ if module.params.get(attr, None):
+ attributes_to_update.append(attr)
+
+ return attributes_to_update, resource
+
+
+def get_taggable_arg_spec(supports_create=False, supports_wait=False):
+ """
+ Returns an arg_spec that is valid for taggable OCI resources.
+ :return: A dict that represents an ansible arg spec that builds over the common_arg_spec and adds free-form and
+ defined tags.
+ """
+ tag_arg_spec = get_common_arg_spec(supports_create, supports_wait)
+ tag_arg_spec.update(
+ dict(freeform_tags=dict(type="dict"), defined_tags=dict(type="dict"))
+ )
+ return tag_arg_spec
+
+
+def add_tags_to_model_from_module(model, module):
+ """
+ Adds free-form and defined tags from an ansible module to a resource model
+ :param model: A resource model instance that supports 'freeform_tags' and 'defined_tags' as attributes
+ :param module: An AnsibleModule representing the options provided by the user
+ :return: The updated model class with the tags specified by the user.
+ """
+ freeform_tags = module.params.get("freeform_tags", None)
+ defined_tags = module.params.get("defined_tags", None)
+ return add_tags_to_model_class(model, freeform_tags, defined_tags)
+
+
+def add_tags_to_model_class(model, freeform_tags, defined_tags):
+ """
+ Add free-form and defined tags to a resource model.
+ :param model: A resource model instance that supports 'freeform_tags' and 'defined_tags' as attributes
+ :param freeform_tags: A dict representing the freeform_tags to be applied to the model
+ :param defined_tags: A dict representing the defined_tags to be applied to the model
+ :return: The updated model class with the tags specified by the user
+ """
+ try:
+ if freeform_tags is not None:
+ _debug("Model {0} set freeform tags to {1}".format(model, freeform_tags))
+ model.__setattr__("freeform_tags", freeform_tags)
+
+ if defined_tags is not None:
+ _debug("Model {0} set defined tags to {1}".format(model, defined_tags))
+ model.__setattr__("defined_tags", defined_tags)
+ except AttributeError as ae:
+ _debug("Model {0} doesn't support tags. Error {1}".format(model, ae))
+
+ return model
+
+
+def check_and_create_resource(
+ resource_type,
+ create_fn,
+ kwargs_create,
+ list_fn,
+ kwargs_list,
+ module,
+ model,
+ existing_resources=None,
+ exclude_attributes=None,
+ dead_states=None,
+ default_attribute_values=None,
+ supports_sort_by_time_created=True,
+):
+ """
+ This function checks whether there is a resource with same attributes as specified in the module options. If not,
+ it creates and returns the resource.
+ :param resource_type: Type of the resource to be created.
+ :param create_fn: Function used in the module to handle create operation. The function should return a dict with
+ keys as resource & changed.
+ :param kwargs_create: Dictionary of parameters for create operation.
+ :param list_fn: List function in sdk to list all the resources of type resource_type.
+ :param kwargs_list: Dictionary of parameters for list operation.
+ :param module: Instance of AnsibleModule
+ :param model: Model used to create a resource.
+ :param exclude_attributes: The attributes which should not be used to distinguish the resource. e.g. display_name,
+ dns_label.
+ :param dead_states: List of states which can't transition to any of the usable states of the resource. This deafults
+ to ["TERMINATING", "TERMINATED", "FAULTY", "FAILED", "DELETING", "DELETED", "UNKNOWN_ENUM_VALUE"]
+ :param default_attribute_values: A dictionary containing default values for attributes.
+ :return: A dictionary containing the resource & the "changed" status. e.g. {"vcn":{x:y}, "changed":True}
+ """
+
+ if module.params.get("force_create", None):
+ _debug("Force creating {0}".format(resource_type))
+ result = call_with_backoff(create_fn, **kwargs_create)
+ return result
+
+ # Get the existing resources list sorted by creation time in descending order. Return the latest matching resource
+ # in case of multiple resource matches.
+ if exclude_attributes is None:
+ exclude_attributes = {}
+ if default_attribute_values is None:
+ default_attribute_values = {}
+ try:
+ if existing_resources is None:
+ if supports_sort_by_time_created:
+ kwargs_list["sort_by"] = "TIMECREATED"
+ existing_resources = list_all_resources(list_fn, **kwargs_list)
+ except ValueError:
+ # list_fn doesn't support sort_by, so remove the sort_by key in kwargs_list and retry
+ kwargs_list.pop("sort_by", None)
+ try:
+ existing_resources = list_all_resources(list_fn, **kwargs_list)
+ # Handle errors like 404 due to bad arguments to the list_all_resources call.
+ except ServiceError as ex:
+ module.fail_json(msg=ex.message)
+ except ServiceError as ex:
+ module.fail_json(msg=ex.message)
+
+ result = dict()
+
+ attributes_to_consider = _get_attributes_to_consider(
+ exclude_attributes, model, module
+ )
+ if "defined_tags" not in default_attribute_values:
+ default_attribute_values["defined_tags"] = {}
+ resource_matched = None
+ _debug(
+ "Trying to find a match within {0} existing resources".format(
+ len(existing_resources)
+ )
+ )
+
+ for resource in existing_resources:
+ if _is_resource_active(resource, dead_states):
+ _debug(
+ "Comparing user specified values {0} against an existing resource's "
+ "values {1}".format(module.params, to_dict(resource))
+ )
+ if does_existing_resource_match_user_inputs(
+ to_dict(resource),
+ module,
+ attributes_to_consider,
+ exclude_attributes,
+ default_attribute_values,
+ ):
+ resource_matched = to_dict(resource)
+ break
+
+ if resource_matched:
+ _debug("Resource with same attributes found: {0}.".format(resource_matched))
+ result[resource_type] = resource_matched
+ result["changed"] = False
+ else:
+ _debug("No matching resource found. Attempting to create a new resource.")
+ result = call_with_backoff(create_fn, **kwargs_create)
+
+ return result
+
+
+def _get_attributes_to_consider(exclude_attributes, model, module):
+ """
+ Determine the attributes to detect if an existing resource already matches the requested resource state
+ :param exclude_attributes: Attributes to not consider for matching
+ :param model: The model class used to create the Resource
+ :param module: An instance of AnsibleModule that contains user's desires around a resource's state
+ :return: A list of attributes that needs to be matched
+ """
+
+ # If a user explicitly requests us to match only against a set of resources (using 'key_by', use that as the list
+ # of attributes to consider for matching.
+ if "key_by" in module.params and module.params["key_by"] is not None:
+ attributes_to_consider = module.params["key_by"]
+ else:
+ # Consider all attributes except freeform_tags as freeform tags do not distinguish a resource.
+ attributes_to_consider = list(model.attribute_map)
+ if "freeform_tags" in attributes_to_consider:
+ attributes_to_consider.remove("freeform_tags")
+ # Temporarily removing node_count as the exisiting resource does not reflect it
+ if "node_count" in attributes_to_consider:
+ attributes_to_consider.remove("node_count")
+ _debug("attributes to consider: {0}".format(attributes_to_consider))
+ return attributes_to_consider
+
+
+def _is_resource_active(resource, dead_states):
+ if dead_states is None:
+ dead_states = DEAD_STATES
+
+ if "lifecycle_state" not in resource.attribute_map:
+ return True
+ return resource.lifecycle_state not in dead_states
+
+
+def is_attr_assigned_default(default_attribute_values, attr, assigned_value):
+ if not default_attribute_values:
+ return False
+
+ if attr in default_attribute_values:
+ default_val_for_attr = default_attribute_values.get(attr, None)
+ if isinstance(default_val_for_attr, dict):
+ # When default value for a resource's attribute is empty dictionary, check if the corresponding value of the
+ # existing resource's attribute is also empty.
+ if not default_val_for_attr:
+ return not assigned_value
+ # only compare keys that are in default_attribute_values[attr]
+ # this is to ensure forward compatibility when the API returns new keys that are not known during
+ # the time when the module author provided default values for the attribute
+ keys = {}
+ for k, v in iteritems(assigned_value.items()):
+ if k in default_val_for_attr:
+ keys[k] = v
+
+ return default_val_for_attr == keys
+ # non-dict, normal comparison
+ return default_val_for_attr == assigned_value
+ else:
+ # module author has not provided a default value for attr
+ return True
+
+
+def create_resource(resource_type, create_fn, kwargs_create, module):
+ """
+ Create an OCI resource
+ :param resource_type: Type of the resource to be created. e.g.: "vcn"
+ :param create_fn: Function in the SDK to create the resource. e.g. virtual_network_client.create_vcn
+ :param kwargs_create: Dictionary containing arguments to be used to call the create function create_fn
+ :param module: Instance of AnsibleModule
+ """
+ result = dict(changed=False)
+ try:
+ resource = to_dict(call_with_backoff(create_fn, **kwargs_create).data)
+ _debug("Created {0}, {1}".format(resource_type, resource))
+ result["changed"] = True
+ result[resource_type] = resource
+ return result
+ except (ServiceError, TypeError) as ex:
+ module.fail_json(msg=str(ex))
+
+
+def does_existing_resource_match_user_inputs(
+ existing_resource,
+ module,
+ attributes_to_compare,
+ exclude_attributes,
+ default_attribute_values=None,
+):
+ """
+ Check if 'attributes_to_compare' in an existing_resource match the desired state provided by a user in 'module'.
+ :param existing_resource: A dictionary representing an existing resource's values.
+ :param module: The AnsibleModule representing the options provided by the user.
+ :param attributes_to_compare: A list of attributes of a resource that are used to compare if an existing resource
+ matches the desire state of the resource expressed by the user in 'module'.
+ :param exclude_attributes: The attributes, that a module author provides, which should not be used to match the
+ resource. This dictionary typically includes: (a) attributes which are initialized with dynamic default values
+ like 'display_name', 'security_list_ids' for subnets and (b) attributes that don't have any defaults like
+ 'dns_label' in VCNs. The attributes are part of keys and 'True' is the value for all existing keys.
+ :param default_attribute_values: A dictionary containing default values for attributes.
+ :return: True if the values for the list of attributes is the same in the existing_resource and module instances.
+ """
+ if not default_attribute_values:
+ default_attribute_values = {}
+ for attr in attributes_to_compare:
+ attribute_with_default_metadata = None
+ if attr in existing_resource:
+ resources_value_for_attr = existing_resource[attr]
+ # Check if the user has explicitly provided the value for attr.
+ user_provided_value_for_attr = _get_user_provided_value(module, attr)
+ if user_provided_value_for_attr is not None:
+ res = [True]
+ check_if_user_value_matches_resources_attr(
+ attr,
+ resources_value_for_attr,
+ user_provided_value_for_attr,
+ exclude_attributes,
+ default_attribute_values,
+ res,
+ )
+ if not res[0]:
+ _debug(
+ "Mismatch on attribute '{0}'. User provided value is {1} & existing resource's value"
+ "is {2}.".format(
+ attr, user_provided_value_for_attr, resources_value_for_attr
+ )
+ )
+ return False
+ else:
+ # If the user has not explicitly provided the value for attr and attr is in exclude_list, we can
+ # consider this as a 'pass'. For example, if an attribute 'display_name' is not specified by user and
+ # that attribute is in the 'exclude_list' according to the module author(Not User), then exclude
+ if (
+ exclude_attributes.get(attr) is None
+ and resources_value_for_attr is not None
+ ):
+ if module.argument_spec.get(attr):
+ attribute_with_default_metadata = module.argument_spec.get(attr)
+ default_attribute_value = attribute_with_default_metadata.get(
+ "default", None
+ )
+ if default_attribute_value is not None:
+ if existing_resource[attr] != default_attribute_value:
+ return False
+ # Check if attr has a value that is not default. For example, a custom `security_list_id`
+ # is assigned to the subnet's attribute `security_list_ids`. If the attribute is assigned a
+ # value that is not the default, then it must be considered a mismatch and false returned.
+ elif not is_attr_assigned_default(
+ default_attribute_values, attr, existing_resource[attr]
+ ):
+ return False
+
+ else:
+ _debug(
+ "Attribute {0} is in the create model of resource {1}"
+ "but doesn't exist in the get model of the resource".format(
+ attr, existing_resource.__class__
+ )
+ )
+ return True
+
+
+def tuplize(d):
+ """
+ This function takes a dictionary and converts it to a list of tuples recursively.
+ :param d: A dictionary.
+ :return: List of tuples.
+ """
+ list_of_tuples = []
+ key_list = sorted(list(d.keys()))
+ for key in key_list:
+ if type(d[key]) == list:
+ # Convert a value which is itself a list of dict to a list of tuples.
+ if d[key] and type(d[key][0]) == dict:
+ sub_tuples = []
+ for sub_dict in d[key]:
+ sub_tuples.append(tuplize(sub_dict))
+ # To handle comparing two None values, while creating a tuple for a {key: value}, make the first element
+ # in the tuple a boolean `True` if value is None so that attributes with None value are put at last
+ # in the sorted list.
+ list_of_tuples.append((sub_tuples is None, key, sub_tuples))
+ else:
+ list_of_tuples.append((d[key] is None, key, d[key]))
+ elif type(d[key]) == dict:
+ tupled_value = tuplize(d[key])
+ list_of_tuples.append((tupled_value is None, key, tupled_value))
+ else:
+ list_of_tuples.append((d[key] is None, key, d[key]))
+ return list_of_tuples
+
+
+def get_key_for_comparing_dict(d):
+ tuple_form_of_d = tuplize(d)
+ return tuple_form_of_d
+
+
+def sort_dictionary(d):
+ """
+ This function sorts values of a dictionary recursively.
+ :param d: A dictionary.
+ :return: Dictionary with sorted elements.
+ """
+ sorted_d = {}
+ for key in d:
+ if type(d[key]) == list:
+ if d[key] and type(d[key][0]) == dict:
+ sorted_value = sort_list_of_dictionary(d[key])
+ sorted_d[key] = sorted_value
+ else:
+ sorted_d[key] = sorted(d[key])
+ elif type(d[key]) == dict:
+ sorted_d[key] = sort_dictionary(d[key])
+ else:
+ sorted_d[key] = d[key]
+ return sorted_d
+
+
+def sort_list_of_dictionary(list_of_dict):
+ """
+ This functions sorts a list of dictionaries. It first sorts each value of the dictionary and then sorts the list of
+ individually sorted dictionaries. For sorting, each dictionary's tuple equivalent is used.
+ :param list_of_dict: List of dictionaries.
+ :return: A sorted dictionary.
+ """
+ list_with_sorted_dict = []
+ for d in list_of_dict:
+ sorted_d = sort_dictionary(d)
+ list_with_sorted_dict.append(sorted_d)
+ return sorted(list_with_sorted_dict, key=get_key_for_comparing_dict)
+
+
+def check_if_user_value_matches_resources_attr(
+ attribute_name,
+ resources_value_for_attr,
+ user_provided_value_for_attr,
+ exclude_attributes,
+ default_attribute_values,
+ res,
+):
+ if isinstance(default_attribute_values.get(attribute_name), dict):
+ default_attribute_values = default_attribute_values.get(attribute_name)
+
+ if isinstance(exclude_attributes.get(attribute_name), dict):
+ exclude_attributes = exclude_attributes.get(attribute_name)
+
+ if isinstance(resources_value_for_attr, list) or isinstance(
+ user_provided_value_for_attr, list
+ ):
+ # Perform a deep equivalence check for a List attribute
+ if exclude_attributes.get(attribute_name):
+ return
+ if (
+ user_provided_value_for_attr is None
+ and default_attribute_values.get(attribute_name) is not None
+ ):
+ user_provided_value_for_attr = default_attribute_values.get(attribute_name)
+
+ if resources_value_for_attr is None and user_provided_value_for_attr is None:
+ return
+
+ if (
+ resources_value_for_attr is None
+ and len(user_provided_value_for_attr) >= 0
+ or user_provided_value_for_attr is None
+ and len(resources_value_for_attr) >= 0
+ ):
+ res[0] = False
+ return
+
+ if (
+ resources_value_for_attr is not None
+ and user_provided_value_for_attr is not None
+ and len(resources_value_for_attr) != len(user_provided_value_for_attr)
+ ):
+ res[0] = False
+ return
+
+ if (
+ user_provided_value_for_attr
+ and type(user_provided_value_for_attr[0]) == dict
+ ):
+ # Process a list of dict
+ sorted_user_provided_value_for_attr = sort_list_of_dictionary(
+ user_provided_value_for_attr
+ )
+ sorted_resources_value_for_attr = sort_list_of_dictionary(
+ resources_value_for_attr
+ )
+
+ else:
+ sorted_user_provided_value_for_attr = sorted(user_provided_value_for_attr)
+ sorted_resources_value_for_attr = sorted(resources_value_for_attr)
+
+ # Walk through the sorted list values of the resource's value for this attribute, and compare against user
+ # provided values.
+ for index, resources_value_for_attr_part in enumerate(
+ sorted_resources_value_for_attr
+ ):
+ check_if_user_value_matches_resources_attr(
+ attribute_name,
+ resources_value_for_attr_part,
+ sorted_user_provided_value_for_attr[index],
+ exclude_attributes,
+ default_attribute_values,
+ res,
+ )
+
+ elif isinstance(resources_value_for_attr, dict):
+ # Perform a deep equivalence check for dict typed attributes
+
+ if not resources_value_for_attr and user_provided_value_for_attr:
+ res[0] = False
+ for key in resources_value_for_attr:
+ if (
+ user_provided_value_for_attr is not None
+ and user_provided_value_for_attr
+ ):
+ check_if_user_value_matches_resources_attr(
+ key,
+ resources_value_for_attr.get(key),
+ user_provided_value_for_attr.get(key),
+ exclude_attributes,
+ default_attribute_values,
+ res,
+ )
+ else:
+ if exclude_attributes.get(key) is None:
+ if default_attribute_values.get(key) is not None:
+ user_provided_value_for_attr = default_attribute_values.get(key)
+ check_if_user_value_matches_resources_attr(
+ key,
+ resources_value_for_attr.get(key),
+ user_provided_value_for_attr,
+ exclude_attributes,
+ default_attribute_values,
+ res,
+ )
+ else:
+ res[0] = is_attr_assigned_default(
+ default_attribute_values,
+ attribute_name,
+ resources_value_for_attr.get(key),
+ )
+
+ elif resources_value_for_attr != user_provided_value_for_attr:
+ if (
+ exclude_attributes.get(attribute_name) is None
+ and default_attribute_values.get(attribute_name) is not None
+ ):
+ # As the user has not specified a value for an optional attribute, if the existing resource's
+ # current state has a DEFAULT value for that attribute, we must not consider this incongruence
+ # an issue and continue with other checks. If the existing resource's value for the attribute
+ # is not the default value, then the existing resource is not a match.
+ if not is_attr_assigned_default(
+ default_attribute_values, attribute_name, resources_value_for_attr
+ ):
+ res[0] = False
+ elif user_provided_value_for_attr is not None:
+ res[0] = False
+
+
+def are_dicts_equal(
+ option_name,
+ existing_resource_dict,
+ user_provided_dict,
+ exclude_list,
+ default_attribute_values,
+):
+ if not user_provided_dict:
+ # User has not provided a value for the map option. In this case, the user hasn't expressed an intent around
+ # this optional attribute. Check if existing_resource_dict matches default.
+ # For example, source_details attribute in volume is optional and does not have any defaults.
+ return is_attr_assigned_default(
+ default_attribute_values, option_name, existing_resource_dict
+ )
+
+ # If the existing resource has an empty dict, while the user has provided entries, dicts are not equal
+ if not existing_resource_dict and user_provided_dict:
+ return False
+
+ # check if all keys of an existing resource's dict attribute matches user-provided dict's entries
+ for sub_attr in existing_resource_dict:
+ # If user has provided value for sub-attribute, then compare it with corresponding key in existing resource.
+ if sub_attr in user_provided_dict:
+ if existing_resource_dict[sub_attr] != user_provided_dict[sub_attr]:
+ _debug(
+ "Failed to match: Existing resource's attr {0} sub-attr {1} value is {2}, while user "
+ "provided value is {3}".format(
+ option_name,
+ sub_attr,
+ existing_resource_dict[sub_attr],
+ user_provided_dict.get(sub_attr, None),
+ )
+ )
+ return False
+
+ # If sub_attr not provided by user, check if the sub-attribute value of existing resource matches default value.
+ else:
+ if not should_dict_attr_be_excluded(option_name, sub_attr, exclude_list):
+ default_value_for_dict_attr = default_attribute_values.get(
+ option_name, None
+ )
+ if default_value_for_dict_attr:
+ # if a default value for the sub-attr was provided by the module author, fail if the existing
+ # resource's value for the sub-attr is not the default
+ if not is_attr_assigned_default(
+ default_value_for_dict_attr,
+ sub_attr,
+ existing_resource_dict[sub_attr],
+ ):
+ return False
+ else:
+ # No default value specified by module author for sub_attr
+ _debug(
+ "Consider as match: Existing resource's attr {0} sub-attr {1} value is {2}, while user did"
+ "not provide a value for it. The module author also has not provided a default value for it"
+ "or marked it for exclusion. So ignoring this attribute during matching and continuing with"
+ "other checks".format(
+ option_name, sub_attr, existing_resource_dict[sub_attr]
+ )
+ )
+
+ return True
+
+
+def should_dict_attr_be_excluded(map_option_name, option_key, exclude_list):
+ """An entry for the Exclude list for excluding a map's key is specifed as a dict with the map option name as the
+ key, and the value as a list of keys to be excluded within that map. For example, if the keys "k1" and "k2" of a map
+ option named "m1" needs to be excluded, the exclude list must have an entry {'m1': ['k1','k2']} """
+ for exclude_item in exclude_list:
+ if isinstance(exclude_item, dict):
+ if map_option_name in exclude_item:
+ if option_key in exclude_item[map_option_name]:
+ return True
+ return False
+
+
+def create_and_wait(
+ resource_type,
+ client,
+ create_fn,
+ kwargs_create,
+ get_fn,
+ get_param,
+ module,
+ states=None,
+ wait_applicable=True,
+ kwargs_get=None,
+):
+ """
+ A utility function to create a resource and wait for the resource to get into the state as specified in the module
+ options.
+ :param wait_applicable: Specifies if wait for create is applicable for this resource
+ :param resource_type: Type of the resource to be created. e.g. "vcn"
+ :param client: OCI service client instance to call the service periodically to retrieve data.
+ e.g. VirtualNetworkClient()
+ :param create_fn: Function in the SDK to create the resource. e.g. virtual_network_client.create_vcn
+ :param kwargs_create: Dictionary containing arguments to be used to call the create function create_fn.
+ :param get_fn: Function in the SDK to get the resource. e.g. virtual_network_client.get_vcn
+ :param get_param: Name of the argument in the SDK get function. e.g. "vcn_id"
+ :param module: Instance of AnsibleModule.
+ :param states: List of lifecycle states to watch for while waiting after create_fn is called.
+ e.g. [module.params['wait_until'], "FAULTY"]
+ :param kwargs_get: Dictionary containing arguments to be used to call a multi-argument `get` function
+ :return: A dictionary containing the resource & the "changed" status. e.g. {"vcn":{x:y}, "changed":True}
+ """
+ try:
+ return create_or_update_resource_and_wait(
+ resource_type,
+ create_fn,
+ kwargs_create,
+ module,
+ wait_applicable,
+ get_fn,
+ get_param,
+ states,
+ client,
+ kwargs_get,
+ )
+ except MaximumWaitTimeExceeded as ex:
+ module.fail_json(msg=str(ex))
+ except ServiceError as ex:
+ module.fail_json(msg=ex.message)
+
+
+def update_and_wait(
+ resource_type,
+ client,
+ update_fn,
+ kwargs_update,
+ get_fn,
+ get_param,
+ module,
+ states=None,
+ wait_applicable=True,
+ kwargs_get=None,
+):
+ """
+ A utility function to update a resource and wait for the resource to get into the state as specified in the module
+ options. It wraps the create_and_wait method as apart from the method and arguments, everything else is similar.
+ :param wait_applicable: Specifies if wait for create is applicable for this resource
+ :param resource_type: Type of the resource to be created. e.g. "vcn"
+ :param client: OCI service client instance to call the service periodically to retrieve data.
+ e.g. VirtualNetworkClient()
+ :param update_fn: Function in the SDK to update the resource. e.g. virtual_network_client.update_vcn
+ :param kwargs_update: Dictionary containing arguments to be used to call the update function update_fn.
+ :param get_fn: Function in the SDK to get the resource. e.g. virtual_network_client.get_vcn
+ :param get_param: Name of the argument in the SDK get function. e.g. "vcn_id"
+ :param module: Instance of AnsibleModule.
+ :param kwargs_get: Dictionary containing arguments to be used to call the get function which requires multiple arguments.
+ :param states: List of lifecycle states to watch for while waiting after update_fn is called.
+ e.g. [module.params['wait_until'], "FAULTY"]
+ :return: A dictionary containing the resource & the "changed" status. e.g. {"vcn":{x:y}, "changed":True}
+ """
+ try:
+ return create_or_update_resource_and_wait(
+ resource_type,
+ update_fn,
+ kwargs_update,
+ module,
+ wait_applicable,
+ get_fn,
+ get_param,
+ states,
+ client,
+ kwargs_get=kwargs_get,
+ )
+ except MaximumWaitTimeExceeded as ex:
+ module.fail_json(msg=str(ex))
+ except ServiceError as ex:
+ module.fail_json(msg=ex.message)
+
+
+def create_or_update_resource_and_wait(
+ resource_type,
+ function,
+ kwargs_function,
+ module,
+ wait_applicable,
+ get_fn,
+ get_param,
+ states,
+ client,
+ update_target_resource_id_in_get_param=False,
+ kwargs_get=None,
+):
+ """
+ A utility function to create or update a resource and wait for the resource to get into the state as specified in
+ the module options.
+ :param resource_type: Type of the resource to be created. e.g. "vcn"
+ :param function: Function in the SDK to create or update the resource.
+ :param kwargs_function: Dictionary containing arguments to be used to call the create or update function
+ :param module: Instance of AnsibleModule.
+ :param wait_applicable: Specifies if wait for create is applicable for this resource
+ :param get_fn: Function in the SDK to get the resource. e.g. virtual_network_client.get_vcn
+ :param get_param: Name of the argument in the SDK get function. e.g. "vcn_id"
+ :param states: List of lifecycle states to watch for while waiting after create_fn is called.
+ e.g. [module.params['wait_until'], "FAULTY"]
+ :param client: OCI service client instance to call the service periodically to retrieve data.
+ e.g. VirtualNetworkClient()
+ :param kwargs_get: Dictionary containing arguments to be used to call the get function which requires multiple arguments.
+ :return: A dictionary containing the resource & the "changed" status. e.g. {"vcn":{x:y}, "changed":True}
+ """
+ result = create_resource(resource_type, function, kwargs_function, module)
+ resource = result[resource_type]
+ result[resource_type] = wait_for_resource_lifecycle_state(
+ client,
+ module,
+ wait_applicable,
+ kwargs_get,
+ get_fn,
+ get_param,
+ resource,
+ states,
+ resource_type,
+ )
+ return result
+
+
+def wait_for_resource_lifecycle_state(
+ client,
+ module,
+ wait_applicable,
+ kwargs_get,
+ get_fn,
+ get_param,
+ resource,
+ states,
+ resource_type=None,
+):
+ """
+ A utility function to wait for the resource to get into the state as specified in
+ the module options.
+ :param client: OCI service client instance to call the service periodically to retrieve data.
+ e.g. VirtualNetworkClient
+ :param module: Instance of AnsibleModule.
+ :param wait_applicable: Specifies if wait for create is applicable for this resource
+ :param kwargs_get: Dictionary containing arguments to be used to call the get function which requires multiple arguments.
+ :param get_fn: Function in the SDK to get the resource. e.g. virtual_network_client.get_vcn
+ :param get_param: Name of the argument in the SDK get function. e.g. "vcn_id"
+ :param resource_type: Type of the resource to be created. e.g. "vcn"
+ :param states: List of lifecycle states to watch for while waiting after create_fn is called.
+ e.g. [module.params['wait_until'], "FAULTY"]
+ :return: A dictionary containing the resource & the "changed" status. e.g. {"vcn":{x:y}, "changed":True}
+ """
+ if wait_applicable and module.params.get("wait", None):
+ if resource_type == "compartment":
+ # An immediate attempt to retrieve a compartment after a compartment is created fails with
+ # 'Authorization failed or requested resource not found', 'status': 404}.
+ # This is because it takes few seconds for the permissions on a compartment to be ready.
+ # Wait for few seconds before attempting a get call on compartment.
+ _debug(
+ "Pausing execution for permission on the newly created compartment to be ready."
+ )
+ time.sleep(15)
+ if kwargs_get:
+ _debug(
+ "Waiting for resource to reach READY state. get_args: {0}".format(
+ kwargs_get
+ )
+ )
+ response_get = call_with_backoff(get_fn, **kwargs_get)
+ else:
+ _debug(
+ "Waiting for resource with id {0} to reach READY state.".format(
+ resource["id"]
+ )
+ )
+ response_get = call_with_backoff(get_fn, **{get_param: resource["id"]})
+ if states is None:
+ states = module.params.get("wait_until") or DEFAULT_READY_STATES
+ resource = to_dict(
+ oci.wait_until(
+ client,
+ response_get,
+ evaluate_response=lambda r: r.data.lifecycle_state in states,
+ max_wait_seconds=module.params.get(
+ "wait_timeout", MAX_WAIT_TIMEOUT_IN_SECONDS
+ ),
+ ).data
+ )
+ return resource
+
+
+def wait_on_work_request(client, response, module):
+ try:
+ if module.params.get("wait", None):
+ _debug(
+ "Waiting for work request with id {0} to reach SUCCEEDED state.".format(
+ response.data.id
+ )
+ )
+ wait_response = oci.wait_until(
+ client,
+ response,
+ evaluate_response=lambda r: r.data.status == "SUCCEEDED",
+ max_wait_seconds=module.params.get(
+ "wait_timeout", MAX_WAIT_TIMEOUT_IN_SECONDS
+ ),
+ )
+ else:
+ _debug(
+ "Waiting for work request with id {0} to reach ACCEPTED state.".format(
+ response.data.id
+ )
+ )
+ wait_response = oci.wait_until(
+ client,
+ response,
+ evaluate_response=lambda r: r.data.status == "ACCEPTED",
+ max_wait_seconds=module.params.get(
+ "wait_timeout", MAX_WAIT_TIMEOUT_IN_SECONDS
+ ),
+ )
+ except MaximumWaitTimeExceeded as ex:
+ _debug(str(ex))
+ module.fail_json(msg=str(ex))
+ except ServiceError as ex:
+ _debug(str(ex))
+ module.fail_json(msg=str(ex))
+ return wait_response.data
+
+
+def delete_and_wait(
+ resource_type,
+ client,
+ get_fn,
+ kwargs_get,
+ delete_fn,
+ kwargs_delete,
+ module,
+ states=None,
+ wait_applicable=True,
+ process_work_request=False,
+):
+ """A utility function to delete a resource and wait for the resource to get into the state as specified in the
+ module options.
+ :param wait_applicable: Specifies if wait for delete is applicable for this resource
+ :param resource_type: Type of the resource to be deleted. e.g. "vcn"
+ :param client: OCI service client instance to call the service periodically to retrieve data.
+ e.g. VirtualNetworkClient()
+ :param get_fn: Function in the SDK to get the resource. e.g. virtual_network_client.get_vcn
+ :param kwargs_get: Dictionary of arguments for get function get_fn. e.g. {"vcn_id": module.params["id"]}
+ :param delete_fn: Function in the SDK to delete the resource. e.g. virtual_network_client.delete_vcn
+ :param kwargs_delete: Dictionary of arguments for delete function delete_fn. e.g. {"vcn_id": module.params["id"]}
+ :param module: Instance of AnsibleModule.
+ :param states: List of lifecycle states to watch for while waiting after delete_fn is called. If nothing is passed,
+ defaults to ["TERMINATED", "DETACHED", "DELETED"].
+ :param process_work_request: Whether a work request is generated on an API call and if it needs to be handled.
+ :return: A dictionary containing the resource & the "changed" status. e.g. {"vcn":{x:y}, "changed":True}
+ """
+
+ states_set = set(["DETACHING", "DETACHED", "DELETING", "DELETED", "TERMINATING", "TERMINATED"])
+ result = dict(changed=False)
+ result[resource_type] = dict()
+ try:
+ resource = to_dict(call_with_backoff(get_fn, **kwargs_get).data)
+ if resource:
+ if "lifecycle_state" not in resource or resource["lifecycle_state"] not in states_set:
+ response = call_with_backoff(delete_fn, **kwargs_delete)
+ if process_work_request:
+ wr_id = response.headers.get("opc-work-request-id")
+ get_wr_response = call_with_backoff(
+ client.get_work_request, work_request_id=wr_id
+ )
+ result["work_request"] = to_dict(
+ wait_on_work_request(client, get_wr_response, module)
+ )
+ # Set changed to True as work request has been created to delete the resource.
+ result["changed"] = True
+ resource = to_dict(call_with_backoff(get_fn, **kwargs_get).data)
+ else:
+ _debug("Deleted {0}, {1}".format(resource_type, resource))
+ result["changed"] = True
+
+ if wait_applicable and module.params.get("wait", None):
+ if states is None:
+ states = (
+ module.params.get("wait_until")
+ or DEFAULT_TERMINATED_STATES
+ )
+ try:
+ wait_response = oci.wait_until(
+ client,
+ get_fn(**kwargs_get),
+ evaluate_response=lambda r: r.data.lifecycle_state
+ in states,
+ max_wait_seconds=module.params.get(
+ "wait_timeout", MAX_WAIT_TIMEOUT_IN_SECONDS
+ ),
+ succeed_on_not_found=True,
+ )
+ except MaximumWaitTimeExceeded as ex:
+ module.fail_json(msg=str(ex))
+ except ServiceError as ex:
+ if ex.status != 404:
+ module.fail_json(msg=ex.message)
+ else:
+ # While waiting for resource to get into terminated state, if the resource is not found.
+ _debug(
+ "API returned Status:404(Not Found) while waiting for resource to get into"
+ " terminated state."
+ )
+ resource["lifecycle_state"] = "DELETED"
+ result[resource_type] = resource
+ return result
+ # oci.wait_until() returns an instance of oci.util.Sentinel in case the resource is not found.
+ if type(wait_response) is not Sentinel:
+ resource = to_dict(wait_response.data)
+ else:
+ resource["lifecycle_state"] = "DELETED"
+
+ result[resource_type] = resource
+ else:
+ _debug(
+ "Resource {0} with {1} already deleted. So returning changed=False".format(
+ resource_type, kwargs_get
+ )
+ )
+ except ServiceError as ex:
+ # DNS API throws a 400 InvalidParameter when a zone id is provided for zone_name_or_id and if the zone
+ # resource is not available, instead of the expected 404. So working around this for now.
+ if type(client) == oci.dns.DnsClient:
+ if ex.status == 400 and ex.code == "InvalidParameter":
+ _debug(
+ "Resource {0} with {1} already deleted. So returning changed=False".format(
+ resource_type, kwargs_get
+ )
+ )
+ elif ex.status != 404:
+ module.fail_json(msg=ex.message)
+ result[resource_type] = dict()
+ return result
+
+
+def are_attrs_equal(current_resource, module, attributes):
+ """
+ Check if the specified attributes are equal in the specified 'model' and 'module'. This is used to check if an OCI
+ Model instance already has the values specified by an Ansible user while invoking an OCI Ansible module and if a
+ resource needs to be updated.
+ :param current_resource: A resource model instance
+ :param module: The AnsibleModule representing the options provided by the user
+ :param attributes: A list of attributes that would need to be compared in the model and the module instances.
+ :return: True if the values for the list of attributes is the same in the model and module instances
+ """
+ for attr in attributes:
+ curr_value = getattr(current_resource, attr, None)
+ user_provided_value = _get_user_provided_value(module, attribute_name=attr)
+
+ if user_provided_value is not None:
+ if curr_value != user_provided_value:
+ _debug(
+ "are_attrs_equal - current resource's attribute "
+ + attr
+ + " value is "
+ + str(curr_value)
+ + " and this doesn't match user provided value of "
+ + str(user_provided_value)
+ )
+ return False
+ return True
+
+
+def _get_user_provided_value(module, attribute_name):
+ """
+ Returns the user provided value for "attribute_name". We consider aliases in the module.
+ """
+ user_provided_value = module.params.get(attribute_name, None)
+ if user_provided_value is None:
+ # If the attribute_name is set as an alias for some option X and user has provided value in the playbook using
+ # option X, then user provided value for attribute_name is equal to value for X.
+ # Get option name for attribute_name from module.aliases.
+ # module.aliases is a dictionary with key as alias name and its value as option name.
+ option_alias_for_attribute = module.aliases.get(attribute_name, None)
+ if option_alias_for_attribute is not None:
+ user_provided_value = module.params.get(option_alias_for_attribute, None)
+ return user_provided_value
+
+
+def update_model_with_user_options(curr_model, update_model, module):
+ """
+ Update the 'update_model' with user provided values in 'module' for the specified 'attributes' if they are different
+ from the values in the 'curr_model'.
+ :param curr_model: A resource model instance representing the state of the current resource
+ :param update_model: An instance of the update resource model for the current resource's type
+ :param module: An AnsibleModule representing the options provided by the user
+ :return: An updated 'update_model' instance filled with values that would need to be updated in the current resource
+ state to satisfy the user's requested state.
+ """
+ attributes = update_model.attribute_map.keys()
+ for attr in attributes:
+ curr_value_for_attr = getattr(curr_model, attr, None)
+ user_provided_value = _get_user_provided_value(module, attribute_name=attr)
+
+ if curr_value_for_attr != user_provided_value:
+ if user_provided_value is not None:
+ # Only update if a user has specified a value for an option
+ _debug(
+ "User requested {0} for attribute {1}, whereas the current value is {2}. So adding it "
+ "to the update model".format(
+ user_provided_value, attr, curr_value_for_attr
+ )
+ )
+ setattr(update_model, attr, user_provided_value)
+ else:
+ # Always set current values of the resource in the update model if there is no request for change in
+ # values
+ setattr(update_model, attr, curr_value_for_attr)
+ return update_model
+
+
+def _get_retry_strategy():
+ retry_strategy_builder = RetryStrategyBuilder(
+ max_attempts_check=True,
+ max_attempts=10,
+ retry_max_wait_between_calls_seconds=30,
+ retry_base_sleep_time_seconds=3,
+ backoff_type=oci.retry.BACKOFF_FULL_JITTER_EQUAL_ON_THROTTLE_VALUE,
+ )
+ retry_strategy_builder.add_service_error_check(
+ service_error_retry_config={
+ 429: [],
+ 400: ["QuotaExceeded", "LimitExceeded"],
+ 409: ["Conflict"],
+ },
+ service_error_retry_on_any_5xx=True,
+ )
+ return retry_strategy_builder.get_retry_strategy()
+
+
+def call_with_backoff(fn, **kwargs):
+ if "retry_strategy" not in kwargs:
+ kwargs["retry_strategy"] = _get_retry_strategy()
+ try:
+ return fn(**kwargs)
+ except TypeError as te:
+ if "unexpected keyword argument" in str(te):
+ # to handle older SDKs that did not support retry_strategy
+ del kwargs["retry_strategy"]
+ return fn(**kwargs)
+ else:
+ # A validation error raised by the SDK, throw it back
+ raise
+
+
+def generic_hash(obj):
+ """
+ Compute a hash of all the fields in the object
+ :param obj: Object whose hash needs to be computed
+ :return: a hash value for the object
+ """
+ sum = 0
+ for field in obj.attribute_map.keys():
+ field_value = getattr(obj, field)
+ if isinstance(field_value, list):
+ for value in field_value:
+ sum = sum + hash(value)
+ elif isinstance(field_value, dict):
+ for k, v in field_value.items():
+ sum = sum + hash(hash(k) + hash(":") + hash(v))
+ else:
+ sum = sum + hash(getattr(obj, field))
+ return sum
+
+
+def generic_eq(s, other):
+ if other is None:
+ return False
+ return s.__dict__ == other.__dict__
+
+
+def generate_subclass(parent_class):
+ """Make a class hash-able by generating a subclass with a __hash__ method that returns the sum of all fields within
+ the parent class"""
+ dict_of_method_in_subclass = {
+ "__init__": parent_class.__init__,
+ "__hash__": generic_hash,
+ "__eq__": generic_eq,
+ }
+ subclass_name = "GeneratedSub" + parent_class.__name__
+ generated_sub_class = type(
+ subclass_name, (parent_class,), dict_of_method_in_subclass
+ )
+ return generated_sub_class
+
+
+def create_hashed_instance(class_type):
+ hashed_class = generate_subclass(class_type)
+ return hashed_class()
+
+
+def get_hashed_object_list(class_type, object_with_values, attributes_class_type=None):
+ if object_with_values is None:
+ return None
+ hashed_class_instances = []
+ for object_with_value in object_with_values:
+ hashed_class_instances.append(
+ get_hashed_object(class_type, object_with_value, attributes_class_type)
+ )
+ return hashed_class_instances
+
+
+def get_hashed_object(
+ class_type, object_with_value, attributes_class_type=None, supported_attributes=None
+):
+ """
+ Convert any class instance into hashable so that the
+ instances are eligible for various comparison
+ operation available under set() object.
+ :param class_type: Any class type whose instances needs to be hashable
+ :param object_with_value: Instance of the class type with values which
+ would be set in the resulting isinstance
+ :param attributes_class_type: A list of class types of attributes, if attribute is a custom class instance
+ :param supported_attributes: A list of attributes which should be considered while populating the instance
+ with the values in the object. This helps in avoiding new attributes of the class_type which are still not
+ supported by the current implementation.
+ :return: A hashable instance with same state of the provided object_with_value
+ """
+ if object_with_value is None:
+ return None
+
+ HashedClass = generate_subclass(class_type)
+ hashed_class_instance = HashedClass()
+
+ if supported_attributes:
+ class_attributes = list(
+ set(hashed_class_instance.attribute_map) & set(supported_attributes)
+ )
+ else:
+ class_attributes = hashed_class_instance.attribute_map
+
+ for attribute in class_attributes:
+ attribute_value = getattr(object_with_value, attribute)
+ if attributes_class_type:
+ for attribute_class_type in attributes_class_type:
+ if isinstance(attribute_value, attribute_class_type):
+ attribute_value = get_hashed_object(
+ attribute_class_type, attribute_value
+ )
+ hashed_class_instance.__setattr__(attribute, attribute_value)
+
+ return hashed_class_instance
+
+
+def update_class_type_attr_difference(
+ update_class_details, existing_instance, attr_name, attr_class, input_attr_value
+):
+ """
+ Checks the difference and updates an attribute which is represented by a class
+ instance. Not aplicable if the attribute type is a primitive value.
+ For example, if a class name is A with an attribute x, then if A.x = X(), then only
+ this method works.
+ :param update_class_details The instance which should be updated if there is change in
+ attribute value
+ :param existing_instance The instance whose attribute value is compared with input
+ attribute value
+ :param attr_name Name of the attribute whose value should be compared
+ :param attr_class Class type of the attribute
+ :param input_attr_value The value of input attribute which should replaced the current
+ value in case of mismatch
+ :return: A boolean value indicating whether attribute value has been replaced
+ """
+ changed = False
+ # Here existing attribute values is an instance
+ existing_attr_value = get_hashed_object(
+ attr_class, getattr(existing_instance, attr_name)
+ )
+ if input_attr_value is None:
+ update_class_details.__setattr__(attr_name, existing_attr_value)
+ else:
+ changed = not input_attr_value.__eq__(existing_attr_value)
+ if changed:
+ update_class_details.__setattr__(attr_name, input_attr_value)
+ else:
+ update_class_details.__setattr__(attr_name, existing_attr_value)
+
+ return changed
+
+
+def get_existing_resource(target_fn, module, **kwargs):
+ """
+ Returns the requested resource if it exists based on the input arguments.
+ :param target_fn The function which should be used to find the requested resource
+ :param module Instance of AnsibleModule attribute value
+ :param kwargs A map of arguments consisting of values based on which requested resource should be searched
+ :return: Instance of requested resource
+ """
+ existing_resource = None
+ try:
+ response = call_with_backoff(target_fn, **kwargs)
+ existing_resource = response.data
+ except ServiceError as ex:
+ if ex.status != 404:
+ module.fail_json(msg=ex.message)
+
+ return existing_resource
+
+
+def get_attached_instance_info(
+ module, lookup_attached_instance, list_attachments_fn, list_attachments_args
+):
+ config = get_oci_config(module)
+ identity_client = create_service_client(module, IdentityClient)
+
+ volume_attachments = []
+
+ if lookup_attached_instance:
+ # Get all the compartments in the tenancy
+ compartments = to_dict(
+ identity_client.list_compartments(
+ config.get("tenancy"), compartment_id_in_subtree=True
+ ).data
+ )
+ # For each compartment, get the volume attachments for the compartment_id with the other args in
+ # list_attachments_args.
+ for compartment in compartments:
+ list_attachments_args["compartment_id"] = compartment["id"]
+ try:
+ volume_attachments += list_all_resources(
+ list_attachments_fn, **list_attachments_args
+ )
+
+ # Pass ServiceError due to authorization issue in accessing volume attachments of a compartment
+ except ServiceError as ex:
+ if ex.status == 404:
+ pass
+
+ else:
+ volume_attachments = list_all_resources(
+ list_attachments_fn, **list_attachments_args
+ )
+
+ volume_attachments = to_dict(volume_attachments)
+ # volume_attachments has attachments in DETACHING or DETACHED state. Return the volume attachment in ATTACHING or
+ # ATTACHED state
+
+ return next(
+ (
+ volume_attachment
+ for volume_attachment in volume_attachments
+ if volume_attachment["lifecycle_state"] in ["ATTACHING", "ATTACHED"]
+ ),
+ None,
+ )
+
+
+def check_mode(fn):
+ def wrapper(*args, **kwargs):
+ if os.environ.get("OCI_ANSIBLE_EXPERIMENTAL", None):
+ return fn(*args, **kwargs)
+ return None
+
+ return wrapper
+
+
+def check_and_return_component_list_difference(
+ input_component_list, existing_components, purge_components, delete_components=False
+):
+ if input_component_list:
+ existing_components, changed = get_component_list_difference(
+ input_component_list,
+ existing_components,
+ purge_components,
+ delete_components,
+ )
+ else:
+ existing_components = []
+ changed = True
+ return existing_components, changed
+
+
+def get_component_list_difference(
+ input_component_list, existing_components, purge_components, delete_components=False
+):
+ if delete_components:
+ if existing_components is None:
+ return None, False
+ component_differences = set(existing_components).intersection(
+ set(input_component_list)
+ )
+ if component_differences:
+ return list(set(existing_components) - component_differences), True
+ else:
+ return None, False
+ if existing_components is None:
+ return input_component_list, True
+ if purge_components:
+ components_differences = set(input_component_list).symmetric_difference(
+ set(existing_components)
+ )
+
+ if components_differences:
+ return input_component_list, True
+
+ components_differences = set(input_component_list).difference(
+ set(existing_components)
+ )
+ if components_differences:
+ return list(components_differences) + existing_components, True
+ return None, False
+
+
+def write_to_file(path, content):
+ with open(to_bytes(path), "wb") as dest_file:
+ dest_file.write(content)
+
+
+def get_target_resource_from_list(
+ module, list_resource_fn, target_resource_id=None, **kwargs
+):
+ """
+ Returns a resource filtered by identifer from a list of resources. This method should be
+ used as an alternative of 'get resource' method when 'get resource' is nor provided by
+ resource api. This method returns a wrapper of response object but that should not be
+ used as an input to 'wait_until' utility as this is only a partial wrapper of response object.
+ :param module The AnsibleModule representing the options provided by the user
+ :param list_resource_fn The function which lists all the resources
+ :param target_resource_id The identifier of the resource which should be filtered from the list
+ :param kwargs A map of arguments consisting of values based on which requested resource should be searched
+ :return: A custom wrapper which partially wraps a response object where the data field contains the target
+ resource, if found.
+ """
+
+ class ResponseWrapper:
+ def __init__(self, data):
+ self.data = data
+
+ try:
+ resources = list_all_resources(list_resource_fn, **kwargs)
+ if resources is not None:
+ for resource in resources:
+ if resource.id == target_resource_id:
+ # Returning an object that mimics an OCI response as oci_utils methods assumes an Response-ish
+ # object
+ return ResponseWrapper(data=resource)
+ return ResponseWrapper(data=None)
+ except ServiceError as ex:
+ module.fail_json(msg=ex.message)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/postgres.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/postgres.py
new file mode 100644
index 00000000..f0d6f88e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/postgres.py
@@ -0,0 +1,314 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Ted Timmons <ted@timmons.me>, 2017.
+# Most of this was originally added by other creators in the postgresql_user module.
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+psycopg2 = None # This line needs for unit tests
+try:
+ import psycopg2
+ HAS_PSYCOPG2 = True
+except ImportError:
+ HAS_PSYCOPG2 = False
+
+from ansible.module_utils.basic import missing_required_lib
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six import iteritems
+from distutils.version import LooseVersion
+
+
+def postgres_common_argument_spec():
+ """
+ Return a dictionary with connection options.
+
+ The options are commonly used by most of PostgreSQL modules.
+ """
+ return dict(
+ login_user=dict(default='postgres'),
+ login_password=dict(default='', no_log=True),
+ login_host=dict(default=''),
+ login_unix_socket=dict(default=''),
+ port=dict(type='int', default=5432, aliases=['login_port']),
+ ssl_mode=dict(default='prefer', choices=['allow', 'disable', 'prefer', 'require', 'verify-ca', 'verify-full']),
+ ca_cert=dict(aliases=['ssl_rootcert']),
+ )
+
+
+def ensure_required_libs(module):
+ """Check required libraries."""
+ if not HAS_PSYCOPG2:
+ module.fail_json(msg=missing_required_lib('psycopg2'))
+
+ if module.params.get('ca_cert') and LooseVersion(psycopg2.__version__) < LooseVersion('2.4.3'):
+ module.fail_json(msg='psycopg2 must be at least 2.4.3 in order to use the ca_cert parameter')
+
+
+def connect_to_db(module, conn_params, autocommit=False, fail_on_conn=True):
+ """Connect to a PostgreSQL database.
+
+ Return psycopg2 connection object.
+
+ Args:
+ module (AnsibleModule) -- object of ansible.module_utils.basic.AnsibleModule class
+ conn_params (dict) -- dictionary with connection parameters
+
+ Kwargs:
+ autocommit (bool) -- commit automatically (default False)
+ fail_on_conn (bool) -- fail if connection failed or just warn and return None (default True)
+ """
+ ensure_required_libs(module)
+
+ db_connection = None
+ try:
+ db_connection = psycopg2.connect(**conn_params)
+ if autocommit:
+ if LooseVersion(psycopg2.__version__) >= LooseVersion('2.4.2'):
+ db_connection.set_session(autocommit=True)
+ else:
+ db_connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
+
+ # Switch role, if specified:
+ if module.params.get('session_role'):
+ cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
+
+ try:
+ cursor.execute('SET ROLE "%s"' % module.params['session_role'])
+ except Exception as e:
+ module.fail_json(msg="Could not switch role: %s" % to_native(e))
+ finally:
+ cursor.close()
+
+ except TypeError as e:
+ if 'sslrootcert' in e.args[0]:
+ module.fail_json(msg='Postgresql server must be at least '
+ 'version 8.4 to support sslrootcert')
+
+ if fail_on_conn:
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e))
+ else:
+ module.warn("PostgreSQL server is unavailable: %s" % to_native(e))
+ db_connection = None
+
+ except Exception as e:
+ if fail_on_conn:
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e))
+ else:
+ module.warn("PostgreSQL server is unavailable: %s" % to_native(e))
+ db_connection = None
+
+ return db_connection
+
+
+def exec_sql(obj, query, query_params=None, return_bool=False, add_to_executed=True, dont_exec=False):
+ """Execute SQL.
+
+ Auxiliary function for PostgreSQL user classes.
+
+ Returns a query result if possible or a boolean value.
+
+ Args:
+ obj (obj) -- must be an object of a user class.
+ The object must have module (AnsibleModule class object) and
+ cursor (psycopg cursor object) attributes
+ query (str) -- SQL query to execute
+
+ Kwargs:
+ query_params (dict or tuple) -- Query parameters to prevent SQL injections,
+ could be a dict or tuple
+ return_bool (bool) -- return True instead of rows if a query was successfully executed.
+ It's necessary for statements that don't return any result like DDL queries (default False).
+ add_to_executed (bool) -- append the query to obj.executed_queries attribute
+ dont_exec (bool) -- used with add_to_executed=True to generate a query, add it
+ to obj.executed_queries list and return True (default False)
+ """
+
+ if dont_exec:
+ # This is usually needed to return queries in check_mode
+ # without execution
+ query = obj.cursor.mogrify(query, query_params)
+ if add_to_executed:
+ obj.executed_queries.append(query)
+
+ return True
+
+ try:
+ if query_params is not None:
+ obj.cursor.execute(query, query_params)
+ else:
+ obj.cursor.execute(query)
+
+ if add_to_executed:
+ if query_params is not None:
+ obj.executed_queries.append(obj.cursor.mogrify(query, query_params))
+ else:
+ obj.executed_queries.append(query)
+
+ if not return_bool:
+ res = obj.cursor.fetchall()
+ return res
+ return True
+ except Exception as e:
+ obj.module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e)))
+ return False
+
+
+def get_conn_params(module, params_dict, warn_db_default=True):
+ """Get connection parameters from the passed dictionary.
+
+ Return a dictionary with parameters to connect to PostgreSQL server.
+
+ Args:
+ module (AnsibleModule) -- object of ansible.module_utils.basic.AnsibleModule class
+ params_dict (dict) -- dictionary with variables
+
+ Kwargs:
+ warn_db_default (bool) -- warn that the default DB is used (default True)
+ """
+ # To use defaults values, keyword arguments must be absent, so
+ # check which values are empty and don't include in the return dictionary
+ params_map = {
+ "login_host": "host",
+ "login_user": "user",
+ "login_password": "password",
+ "port": "port",
+ "ssl_mode": "sslmode",
+ "ca_cert": "sslrootcert"
+ }
+
+ # Might be different in the modules:
+ if params_dict.get('db'):
+ params_map['db'] = 'database'
+ elif params_dict.get('database'):
+ params_map['database'] = 'database'
+ elif params_dict.get('login_db'):
+ params_map['login_db'] = 'database'
+ else:
+ if warn_db_default:
+ module.warn('Database name has not been passed, '
+ 'used default database to connect to.')
+
+ kw = dict((params_map[k], v) for (k, v) in iteritems(params_dict)
+ if k in params_map and v != '' and v is not None)
+
+ # If a login_unix_socket is specified, incorporate it here.
+ is_localhost = "host" not in kw or kw["host"] is None or kw["host"] == "localhost"
+ if is_localhost and params_dict["login_unix_socket"] != "":
+ kw["host"] = params_dict["login_unix_socket"]
+
+ return kw
+
+
+class PgMembership(object):
+ def __init__(self, module, cursor, groups, target_roles, fail_on_role=True):
+ self.module = module
+ self.cursor = cursor
+ self.target_roles = [r.strip() for r in target_roles]
+ self.groups = [r.strip() for r in groups]
+ self.executed_queries = []
+ self.granted = {}
+ self.revoked = {}
+ self.fail_on_role = fail_on_role
+ self.non_existent_roles = []
+ self.changed = False
+ self.__check_roles_exist()
+
+ def grant(self):
+ for group in self.groups:
+ self.granted[group] = []
+
+ for role in self.target_roles:
+ # If role is in a group now, pass:
+ if self.__check_membership(group, role):
+ continue
+
+ query = 'GRANT "%s" TO "%s"' % (group, role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ if self.changed:
+ self.granted[group].append(role)
+
+ return self.changed
+
+ def revoke(self):
+ for group in self.groups:
+ self.revoked[group] = []
+
+ for role in self.target_roles:
+ # If role is not in a group now, pass:
+ if not self.__check_membership(group, role):
+ continue
+
+ query = 'REVOKE "%s" FROM "%s"' % (group, role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ if self.changed:
+ self.revoked[group].append(role)
+
+ return self.changed
+
+ def __check_membership(self, src_role, dst_role):
+ query = ("SELECT ARRAY(SELECT b.rolname FROM "
+ "pg_catalog.pg_auth_members m "
+ "JOIN pg_catalog.pg_roles b ON (m.roleid = b.oid) "
+ "WHERE m.member = r.oid) "
+ "FROM pg_catalog.pg_roles r "
+ "WHERE r.rolname = %(dst_role)s")
+
+ res = exec_sql(self, query, query_params={'dst_role': dst_role}, add_to_executed=False)
+ membership = []
+ if res:
+ membership = res[0][0]
+
+ if not membership:
+ return False
+
+ if src_role in membership:
+ return True
+
+ return False
+
+ def __check_roles_exist(self):
+ existent_groups = self.__roles_exist(self.groups)
+ existent_roles = self.__roles_exist(self.target_roles)
+
+ for group in self.groups:
+ if group not in existent_groups:
+ if self.fail_on_role:
+ self.module.fail_json(msg="Role %s does not exist" % group)
+ else:
+ self.module.warn("Role %s does not exist, pass" % group)
+ self.non_existent_roles.append(group)
+
+ for role in self.target_roles:
+ if role not in existent_roles:
+ if self.fail_on_role:
+ self.module.fail_json(msg="Role %s does not exist" % role)
+ else:
+ self.module.warn("Role %s does not exist, pass" % role)
+
+ if role not in self.groups:
+ self.non_existent_roles.append(role)
+
+ else:
+ if self.fail_on_role:
+ self.module.exit_json(msg="Role role '%s' is a member of role '%s'" % (role, role))
+ else:
+ self.module.warn("Role role '%s' is a member of role '%s', pass" % (role, role))
+
+ # Update role lists, excluding non existent roles:
+ self.groups = [g for g in self.groups if g not in self.non_existent_roles]
+
+ self.target_roles = [r for r in self.target_roles if r not in self.non_existent_roles]
+
+ def __roles_exist(self, roles):
+ tmp = ["'" + x + "'" for x in roles]
+ query = "SELECT rolname FROM pg_roles WHERE rolname IN (%s)" % ','.join(tmp)
+ return [x[0] for x in exec_sql(self, query, add_to_executed=False)]
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/proxmox.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/proxmox.py
new file mode 100644
index 00000000..666f8777
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/proxmox.py
@@ -0,0 +1,86 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2020, Tristan Le Guern <tleguern at bouledef.eu>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import atexit
+import time
+import re
+import traceback
+
+PROXMOXER_IMP_ERR = None
+try:
+ from proxmoxer import ProxmoxAPI
+ HAS_PROXMOXER = True
+except ImportError:
+ HAS_PROXMOXER = False
+ PROXMOXER_IMP_ERR = traceback.format_exc()
+
+
+from ansible.module_utils.basic import env_fallback, missing_required_lib
+
+
+def proxmox_auth_argument_spec():
+ return dict(
+ api_host=dict(type='str',
+ required=True,
+ fallback=(env_fallback, ['PROXMOX_HOST'])
+ ),
+ api_user=dict(type='str',
+ required=True,
+ fallback=(env_fallback, ['PROXMOX_USER'])
+ ),
+ api_password=dict(type='str',
+ no_log=True,
+ fallback=(env_fallback, ['PROXMOX_PASSWORD'])
+ ),
+ api_token_id=dict(type='str',
+ no_log=False
+ ),
+ api_token_secret=dict(type='str',
+ no_log=True
+ ),
+ validate_certs=dict(type='bool',
+ default=False
+ ),
+ )
+
+
+def proxmox_to_ansible_bool(value):
+ '''Convert Proxmox representation of a boolean to be ansible-friendly'''
+ return True if value == 1 else False
+
+
+class ProxmoxAnsible(object):
+ """Base class for Proxmox modules"""
+ def __init__(self, module):
+ self.module = module
+ self.proxmox_api = self._connect()
+ # Test token validity
+ try:
+ self.proxmox_api.version.get()
+ except Exception as e:
+ module.fail_json(msg='%s' % e, exception=traceback.format_exc())
+
+ def _connect(self):
+ api_host = self.module.params['api_host']
+ api_user = self.module.params['api_user']
+ api_password = self.module.params['api_password']
+ api_token_id = self.module.params['api_token_id']
+ api_token_secret = self.module.params['api_token_secret']
+ validate_certs = self.module.params['validate_certs']
+
+ auth_args = {'user': api_user}
+ if api_password:
+ auth_args['password'] = api_password
+ else:
+ auth_args['token_name'] = api_token_id
+ auth_args['token_value'] = api_token_secret
+
+ try:
+ return ProxmoxAPI(api_host, verify_ssl=validate_certs, **auth_args)
+ except Exception as e:
+ self.module.fail_json(msg='%s' % e, exception=traceback.format_exc())
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/pure.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/pure.py
new file mode 100644
index 00000000..ebd41b1c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/pure.py
@@ -0,0 +1,112 @@
+# -*- coding: utf-8 -*-
+
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Simon Dodsley <simon@purestorage.com>,2017
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+HAS_PURESTORAGE = True
+try:
+ from purestorage import purestorage
+except ImportError:
+ HAS_PURESTORAGE = False
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import PurityFb, FileSystem, FileSystemSnapshot, SnapshotSuffix, rest
+except ImportError:
+ HAS_PURITY_FB = False
+
+from functools import wraps
+from os import environ
+from os import path
+import platform
+
+VERSION = 1.2
+USER_AGENT_BASE = 'Ansible'
+API_AGENT_VERSION = 1.5
+
+
+def get_system(module):
+ """Return System Object or Fail"""
+ user_agent = '%(base)s %(class)s/%(version)s (%(platform)s)' % {
+ 'base': USER_AGENT_BASE,
+ 'class': __name__,
+ 'version': VERSION,
+ 'platform': platform.platform()
+ }
+ array_name = module.params['fa_url']
+ api = module.params['api_token']
+
+ if array_name and api:
+ system = purestorage.FlashArray(array_name, api_token=api, user_agent=user_agent)
+ elif environ.get('PUREFA_URL') and environ.get('PUREFA_API'):
+ system = purestorage.FlashArray(environ.get('PUREFA_URL'), api_token=(environ.get('PUREFA_API')), user_agent=user_agent)
+ else:
+ module.fail_json(msg="You must set PUREFA_URL and PUREFA_API environment variables or the fa_url and api_token module arguments")
+ try:
+ system.get()
+ except Exception:
+ module.fail_json(msg="Pure Storage FlashArray authentication failed. Check your credentials")
+ return system
+
+
+def get_blade(module):
+ """Return System Object or Fail"""
+ user_agent = '%(base)s %(class)s/%(version)s (%(platform)s)' % {
+ 'base': USER_AGENT_BASE,
+ 'class': __name__,
+ 'version': VERSION,
+ 'platform': platform.platform()
+ }
+ blade_name = module.params['fb_url']
+ api = module.params['api_token']
+
+ if blade_name and api:
+ blade = PurityFb(blade_name)
+ blade.disable_verify_ssl()
+ try:
+ blade.login(api)
+ versions = blade.api_version.list_versions().versions
+ if API_AGENT_VERSION in versions:
+ blade._api_client.user_agent = user_agent
+ except rest.ApiException as e:
+ module.fail_json(msg="Pure Storage FlashBlade authentication failed. Check your credentials")
+ elif environ.get('PUREFB_URL') and environ.get('PUREFB_API'):
+ blade = PurityFb(environ.get('PUREFB_URL'))
+ blade.disable_verify_ssl()
+ try:
+ blade.login(environ.get('PUREFB_API'))
+ versions = blade.api_version.list_versions().versions
+ if API_AGENT_VERSION in versions:
+ blade._api_client.user_agent = user_agent
+ except rest.ApiException as e:
+ module.fail_json(msg="Pure Storage FlashBlade authentication failed. Check your credentials")
+ else:
+ module.fail_json(msg="You must set PUREFB_URL and PUREFB_API environment variables or the fb_url and api_token module arguments")
+ return blade
+
+
+def purefa_argument_spec():
+ """Return standard base dictionary used for the argument_spec argument in AnsibleModule"""
+
+ return dict(
+ fa_url=dict(),
+ api_token=dict(no_log=True),
+ )
+
+
+def purefb_argument_spec():
+ """Return standard base dictionary used for the argument_spec argument in AnsibleModule"""
+
+ return dict(
+ fb_url=dict(),
+ api_token=dict(no_log=True),
+ )
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/rax.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/rax.py
new file mode 100644
index 00000000..e8c455e0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/rax.py
@@ -0,0 +1,315 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by
+# Ansible still belong to the author of the module, and may assign their own
+# license to the complete work.
+#
+# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import os
+import re
+from uuid import UUID
+
+from ansible.module_utils.six import text_type, binary_type
+
+FINAL_STATUSES = ('ACTIVE', 'ERROR')
+VOLUME_STATUS = ('available', 'attaching', 'creating', 'deleting', 'in-use',
+ 'error', 'error_deleting')
+
+CLB_ALGORITHMS = ['RANDOM', 'LEAST_CONNECTIONS', 'ROUND_ROBIN',
+ 'WEIGHTED_LEAST_CONNECTIONS', 'WEIGHTED_ROUND_ROBIN']
+CLB_PROTOCOLS = ['DNS_TCP', 'DNS_UDP', 'FTP', 'HTTP', 'HTTPS', 'IMAPS',
+ 'IMAPv4', 'LDAP', 'LDAPS', 'MYSQL', 'POP3', 'POP3S', 'SMTP',
+ 'TCP', 'TCP_CLIENT_FIRST', 'UDP', 'UDP_STREAM', 'SFTP']
+
+NON_CALLABLES = (text_type, binary_type, bool, dict, int, list, type(None))
+PUBLIC_NET_ID = "00000000-0000-0000-0000-000000000000"
+SERVICE_NET_ID = "11111111-1111-1111-1111-111111111111"
+
+
+def rax_slugify(value):
+ """Prepend a key with rax_ and normalize the key name"""
+ return 'rax_%s' % (re.sub(r'[^\w-]', '_', value).lower().lstrip('_'))
+
+
+def rax_clb_node_to_dict(obj):
+ """Function to convert a CLB Node object to a dict"""
+ if not obj:
+ return {}
+ node = obj.to_dict()
+ node['id'] = obj.id
+ node['weight'] = obj.weight
+ return node
+
+
+def rax_to_dict(obj, obj_type='standard'):
+ """Generic function to convert a pyrax object to a dict
+
+ obj_type values:
+ standard
+ clb
+ server
+
+ """
+ instance = {}
+ for key in dir(obj):
+ value = getattr(obj, key)
+ if obj_type == 'clb' and key == 'nodes':
+ instance[key] = []
+ for node in value:
+ instance[key].append(rax_clb_node_to_dict(node))
+ elif (isinstance(value, list) and len(value) > 0 and
+ not isinstance(value[0], NON_CALLABLES)):
+ instance[key] = []
+ for item in value:
+ instance[key].append(rax_to_dict(item))
+ elif (isinstance(value, NON_CALLABLES) and not key.startswith('_')):
+ if obj_type == 'server':
+ if key == 'image':
+ if not value:
+ instance['rax_boot_source'] = 'volume'
+ else:
+ instance['rax_boot_source'] = 'local'
+ key = rax_slugify(key)
+ instance[key] = value
+
+ if obj_type == 'server':
+ for attr in ['id', 'accessIPv4', 'name', 'status']:
+ instance[attr] = instance.get(rax_slugify(attr))
+
+ return instance
+
+
+def rax_find_bootable_volume(module, rax_module, server, exit=True):
+ """Find a servers bootable volume"""
+ cs = rax_module.cloudservers
+ cbs = rax_module.cloud_blockstorage
+ server_id = rax_module.utils.get_id(server)
+ volumes = cs.volumes.get_server_volumes(server_id)
+ bootable_volumes = []
+ for volume in volumes:
+ vol = cbs.get(volume)
+ if module.boolean(vol.bootable):
+ bootable_volumes.append(vol)
+ if not bootable_volumes:
+ if exit:
+ module.fail_json(msg='No bootable volumes could be found for '
+ 'server %s' % server_id)
+ else:
+ return False
+ elif len(bootable_volumes) > 1:
+ if exit:
+ module.fail_json(msg='Multiple bootable volumes found for server '
+ '%s' % server_id)
+ else:
+ return False
+
+ return bootable_volumes[0]
+
+
+def rax_find_image(module, rax_module, image, exit=True):
+ """Find a server image by ID or Name"""
+ cs = rax_module.cloudservers
+ try:
+ UUID(image)
+ except ValueError:
+ try:
+ image = cs.images.find(human_id=image)
+ except(cs.exceptions.NotFound,
+ cs.exceptions.NoUniqueMatch):
+ try:
+ image = cs.images.find(name=image)
+ except (cs.exceptions.NotFound,
+ cs.exceptions.NoUniqueMatch):
+ if exit:
+ module.fail_json(msg='No matching image found (%s)' %
+ image)
+ else:
+ return False
+
+ return rax_module.utils.get_id(image)
+
+
+def rax_find_volume(module, rax_module, name):
+ """Find a Block storage volume by ID or name"""
+ cbs = rax_module.cloud_blockstorage
+ try:
+ UUID(name)
+ volume = cbs.get(name)
+ except ValueError:
+ try:
+ volume = cbs.find(name=name)
+ except rax_module.exc.NotFound:
+ volume = None
+ except Exception as e:
+ module.fail_json(msg='%s' % e)
+ return volume
+
+
+def rax_find_network(module, rax_module, network):
+ """Find a cloud network by ID or name"""
+ cnw = rax_module.cloud_networks
+ try:
+ UUID(network)
+ except ValueError:
+ if network.lower() == 'public':
+ return cnw.get_server_networks(PUBLIC_NET_ID)
+ elif network.lower() == 'private':
+ return cnw.get_server_networks(SERVICE_NET_ID)
+ else:
+ try:
+ network_obj = cnw.find_network_by_label(network)
+ except (rax_module.exceptions.NetworkNotFound,
+ rax_module.exceptions.NetworkLabelNotUnique):
+ module.fail_json(msg='No matching network found (%s)' %
+ network)
+ else:
+ return cnw.get_server_networks(network_obj)
+ else:
+ return cnw.get_server_networks(network)
+
+
+def rax_find_server(module, rax_module, server):
+ """Find a Cloud Server by ID or name"""
+ cs = rax_module.cloudservers
+ try:
+ UUID(server)
+ server = cs.servers.get(server)
+ except ValueError:
+ servers = cs.servers.list(search_opts=dict(name='^%s$' % server))
+ if not servers:
+ module.fail_json(msg='No Server was matched by name, '
+ 'try using the Server ID instead')
+ if len(servers) > 1:
+ module.fail_json(msg='Multiple servers matched by name, '
+ 'try using the Server ID instead')
+
+ # We made it this far, grab the first and hopefully only server
+ # in the list
+ server = servers[0]
+ return server
+
+
+def rax_find_loadbalancer(module, rax_module, loadbalancer):
+ """Find a Cloud Load Balancer by ID or name"""
+ clb = rax_module.cloud_loadbalancers
+ try:
+ found = clb.get(loadbalancer)
+ except Exception:
+ found = []
+ for lb in clb.list():
+ if loadbalancer == lb.name:
+ found.append(lb)
+
+ if not found:
+ module.fail_json(msg='No loadbalancer was matched')
+
+ if len(found) > 1:
+ module.fail_json(msg='Multiple loadbalancers matched')
+
+ # We made it this far, grab the first and hopefully only item
+ # in the list
+ found = found[0]
+
+ return found
+
+
+def rax_argument_spec():
+ """Return standard base dictionary used for the argument_spec
+ argument in AnsibleModule
+
+ """
+ return dict(
+ api_key=dict(type='str', aliases=['password'], no_log=True),
+ auth_endpoint=dict(type='str'),
+ credentials=dict(type='path', aliases=['creds_file']),
+ env=dict(type='str'),
+ identity_type=dict(type='str', default='rackspace'),
+ region=dict(type='str'),
+ tenant_id=dict(type='str'),
+ tenant_name=dict(type='str'),
+ username=dict(type='str'),
+ validate_certs=dict(type='bool', aliases=['verify_ssl']),
+ )
+
+
+def rax_required_together():
+ """Return the default list used for the required_together argument to
+ AnsibleModule"""
+ return [['api_key', 'username']]
+
+
+def setup_rax_module(module, rax_module, region_required=True):
+ """Set up pyrax in a standard way for all modules"""
+ rax_module.USER_AGENT = 'ansible/%s %s' % (module.ansible_version,
+ rax_module.USER_AGENT)
+
+ api_key = module.params.get('api_key')
+ auth_endpoint = module.params.get('auth_endpoint')
+ credentials = module.params.get('credentials')
+ env = module.params.get('env')
+ identity_type = module.params.get('identity_type')
+ region = module.params.get('region')
+ tenant_id = module.params.get('tenant_id')
+ tenant_name = module.params.get('tenant_name')
+ username = module.params.get('username')
+ verify_ssl = module.params.get('validate_certs')
+
+ if env is not None:
+ rax_module.set_environment(env)
+
+ rax_module.set_setting('identity_type', identity_type)
+ if verify_ssl is not None:
+ rax_module.set_setting('verify_ssl', verify_ssl)
+ if auth_endpoint is not None:
+ rax_module.set_setting('auth_endpoint', auth_endpoint)
+ if tenant_id is not None:
+ rax_module.set_setting('tenant_id', tenant_id)
+ if tenant_name is not None:
+ rax_module.set_setting('tenant_name', tenant_name)
+
+ try:
+ username = username or os.environ.get('RAX_USERNAME')
+ if not username:
+ username = rax_module.get_setting('keyring_username')
+ if username:
+ api_key = 'USE_KEYRING'
+ if not api_key:
+ api_key = os.environ.get('RAX_API_KEY')
+ credentials = (credentials or os.environ.get('RAX_CREDENTIALS') or
+ os.environ.get('RAX_CREDS_FILE'))
+ region = (region or os.environ.get('RAX_REGION') or
+ rax_module.get_setting('region'))
+ except KeyError as e:
+ module.fail_json(msg='Unable to load %s' % e.message)
+
+ try:
+ if api_key and username:
+ if api_key == 'USE_KEYRING':
+ rax_module.keyring_auth(username, region=region)
+ else:
+ rax_module.set_credentials(username, api_key=api_key,
+ region=region)
+ elif credentials:
+ credentials = os.path.expanduser(credentials)
+ rax_module.set_credential_file(credentials, region=region)
+ else:
+ raise Exception('No credentials supplied!')
+ except Exception as e:
+ if e.message:
+ msg = str(e.message)
+ else:
+ msg = repr(e)
+ module.fail_json(msg=msg)
+
+ if region_required and region not in rax_module.regions:
+ module.fail_json(msg='%s is not a valid region, must be one of: %s' %
+ (region, ','.join(rax_module.regions)))
+
+ return rax_module
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/redfish_utils.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/redfish_utils.py
new file mode 100644
index 00000000..8f14dbad
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/redfish_utils.py
@@ -0,0 +1,2694 @@
+# Copyright (c) 2017-2018 Dell EMC Inc.
+# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import json
+from ansible.module_utils.urls import open_url
+from ansible.module_utils._text import to_native
+from ansible.module_utils._text import to_text
+from ansible.module_utils.six.moves import http_client
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+
+GET_HEADERS = {'accept': 'application/json', 'OData-Version': '4.0'}
+POST_HEADERS = {'content-type': 'application/json', 'accept': 'application/json',
+ 'OData-Version': '4.0'}
+PATCH_HEADERS = {'content-type': 'application/json', 'accept': 'application/json',
+ 'OData-Version': '4.0'}
+DELETE_HEADERS = {'accept': 'application/json', 'OData-Version': '4.0'}
+
+DEPRECATE_MSG = 'Issuing a data modification command without specifying the '\
+ 'ID of the target %(resource)s resource when there is more '\
+ 'than one %(resource)s will use the first one in the '\
+ 'collection. Use the `resource_id` option to specify the '\
+ 'target %(resource)s ID'
+
+
+class RedfishUtils(object):
+
+ def __init__(self, creds, root_uri, timeout, module, resource_id=None,
+ data_modification=False):
+ self.root_uri = root_uri
+ self.creds = creds
+ self.timeout = timeout
+ self.module = module
+ self.service_root = '/redfish/v1/'
+ self.resource_id = resource_id
+ self.data_modification = data_modification
+ self._init_session()
+
+ # The following functions are to send GET/POST/PATCH/DELETE requests
+ def get_request(self, uri):
+ try:
+ resp = open_url(uri, method="GET", headers=GET_HEADERS,
+ url_username=self.creds['user'],
+ url_password=self.creds['pswd'],
+ force_basic_auth=True, validate_certs=False,
+ follow_redirects='all',
+ use_proxy=True, timeout=self.timeout)
+ data = json.loads(to_native(resp.read()))
+ headers = dict((k.lower(), v) for (k, v) in resp.info().items())
+ except HTTPError as e:
+ msg = self._get_extended_message(e)
+ return {'ret': False,
+ 'msg': "HTTP Error %s on GET request to '%s', extended message: '%s'"
+ % (e.code, uri, msg),
+ 'status': e.code}
+ except URLError as e:
+ return {'ret': False, 'msg': "URL Error on GET request to '%s': '%s'"
+ % (uri, e.reason)}
+ # Almost all errors should be caught above, but just in case
+ except Exception as e:
+ return {'ret': False,
+ 'msg': "Failed GET request to '%s': '%s'" % (uri, to_text(e))}
+ return {'ret': True, 'data': data, 'headers': headers}
+
+ def post_request(self, uri, pyld):
+ try:
+ resp = open_url(uri, data=json.dumps(pyld),
+ headers=POST_HEADERS, method="POST",
+ url_username=self.creds['user'],
+ url_password=self.creds['pswd'],
+ force_basic_auth=True, validate_certs=False,
+ follow_redirects='all',
+ use_proxy=True, timeout=self.timeout)
+ except HTTPError as e:
+ msg = self._get_extended_message(e)
+ return {'ret': False,
+ 'msg': "HTTP Error %s on POST request to '%s', extended message: '%s'"
+ % (e.code, uri, msg),
+ 'status': e.code}
+ except URLError as e:
+ return {'ret': False, 'msg': "URL Error on POST request to '%s': '%s'"
+ % (uri, e.reason)}
+ # Almost all errors should be caught above, but just in case
+ except Exception as e:
+ return {'ret': False,
+ 'msg': "Failed POST request to '%s': '%s'" % (uri, to_text(e))}
+ return {'ret': True, 'resp': resp}
+
+ def patch_request(self, uri, pyld):
+ headers = PATCH_HEADERS
+ r = self.get_request(uri)
+ if r['ret']:
+ # Get etag from etag header or @odata.etag property
+ etag = r['headers'].get('etag')
+ if not etag:
+ etag = r['data'].get('@odata.etag')
+ if etag:
+ # Make copy of headers and add If-Match header
+ headers = dict(headers)
+ headers['If-Match'] = etag
+ try:
+ resp = open_url(uri, data=json.dumps(pyld),
+ headers=headers, method="PATCH",
+ url_username=self.creds['user'],
+ url_password=self.creds['pswd'],
+ force_basic_auth=True, validate_certs=False,
+ follow_redirects='all',
+ use_proxy=True, timeout=self.timeout)
+ except HTTPError as e:
+ msg = self._get_extended_message(e)
+ return {'ret': False,
+ 'msg': "HTTP Error %s on PATCH request to '%s', extended message: '%s'"
+ % (e.code, uri, msg),
+ 'status': e.code}
+ except URLError as e:
+ return {'ret': False, 'msg': "URL Error on PATCH request to '%s': '%s'"
+ % (uri, e.reason)}
+ # Almost all errors should be caught above, but just in case
+ except Exception as e:
+ return {'ret': False,
+ 'msg': "Failed PATCH request to '%s': '%s'" % (uri, to_text(e))}
+ return {'ret': True, 'resp': resp}
+
+ def delete_request(self, uri, pyld=None):
+ try:
+ data = json.dumps(pyld) if pyld else None
+ resp = open_url(uri, data=data,
+ headers=DELETE_HEADERS, method="DELETE",
+ url_username=self.creds['user'],
+ url_password=self.creds['pswd'],
+ force_basic_auth=True, validate_certs=False,
+ follow_redirects='all',
+ use_proxy=True, timeout=self.timeout)
+ except HTTPError as e:
+ msg = self._get_extended_message(e)
+ return {'ret': False,
+ 'msg': "HTTP Error %s on DELETE request to '%s', extended message: '%s'"
+ % (e.code, uri, msg),
+ 'status': e.code}
+ except URLError as e:
+ return {'ret': False, 'msg': "URL Error on DELETE request to '%s': '%s'"
+ % (uri, e.reason)}
+ # Almost all errors should be caught above, but just in case
+ except Exception as e:
+ return {'ret': False,
+ 'msg': "Failed DELETE request to '%s': '%s'" % (uri, to_text(e))}
+ return {'ret': True, 'resp': resp}
+
+ @staticmethod
+ def _get_extended_message(error):
+ """
+ Get Redfish ExtendedInfo message from response payload if present
+ :param error: an HTTPError exception
+ :type error: HTTPError
+ :return: the ExtendedInfo message if present, else standard HTTP error
+ """
+ msg = http_client.responses.get(error.code, '')
+ if error.code >= 400:
+ try:
+ body = error.read().decode('utf-8')
+ data = json.loads(body)
+ ext_info = data['error']['@Message.ExtendedInfo']
+ msg = ext_info[0]['Message']
+ except Exception:
+ pass
+ return msg
+
+ def _init_session(self):
+ pass
+
+ def _find_accountservice_resource(self):
+ response = self.get_request(self.root_uri + self.service_root)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'AccountService' not in data:
+ return {'ret': False, 'msg': "AccountService resource not found"}
+ else:
+ account_service = data["AccountService"]["@odata.id"]
+ response = self.get_request(self.root_uri + account_service)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ accounts = data['Accounts']['@odata.id']
+ if accounts[-1:] == '/':
+ accounts = accounts[:-1]
+ self.accounts_uri = accounts
+ return {'ret': True}
+
+ def _find_sessionservice_resource(self):
+ response = self.get_request(self.root_uri + self.service_root)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'SessionService' not in data:
+ return {'ret': False, 'msg': "SessionService resource not found"}
+ else:
+ session_service = data["SessionService"]["@odata.id"]
+ response = self.get_request(self.root_uri + session_service)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ sessions = data['Sessions']['@odata.id']
+ if sessions[-1:] == '/':
+ sessions = sessions[:-1]
+ self.sessions_uri = sessions
+ return {'ret': True}
+
+ def _get_resource_uri_by_id(self, uris, id_prop):
+ for uri in uris:
+ response = self.get_request(self.root_uri + uri)
+ if response['ret'] is False:
+ continue
+ data = response['data']
+ if id_prop == data.get('Id'):
+ return uri
+ return None
+
+ def _find_systems_resource(self):
+ response = self.get_request(self.root_uri + self.service_root)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'Systems' not in data:
+ return {'ret': False, 'msg': "Systems resource not found"}
+ response = self.get_request(self.root_uri + data['Systems']['@odata.id'])
+ if response['ret'] is False:
+ return response
+ self.systems_uris = [
+ i['@odata.id'] for i in response['data'].get('Members', [])]
+ if not self.systems_uris:
+ return {
+ 'ret': False,
+ 'msg': "ComputerSystem's Members array is either empty or missing"}
+ self.systems_uri = self.systems_uris[0]
+ if self.data_modification:
+ if self.resource_id:
+ self.systems_uri = self._get_resource_uri_by_id(self.systems_uris,
+ self.resource_id)
+ if not self.systems_uri:
+ return {
+ 'ret': False,
+ 'msg': "System resource %s not found" % self.resource_id}
+ elif len(self.systems_uris) > 1:
+ self.module.deprecate(DEPRECATE_MSG % {'resource': 'System'},
+ version='3.0.0', collection_name='community.general') # was Ansible 2.14
+ return {'ret': True}
+
+ def _find_updateservice_resource(self):
+ response = self.get_request(self.root_uri + self.service_root)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'UpdateService' not in data:
+ return {'ret': False, 'msg': "UpdateService resource not found"}
+ else:
+ update = data["UpdateService"]["@odata.id"]
+ self.update_uri = update
+ response = self.get_request(self.root_uri + update)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ self.firmware_uri = self.software_uri = None
+ if 'FirmwareInventory' in data:
+ self.firmware_uri = data['FirmwareInventory'][u'@odata.id']
+ if 'SoftwareInventory' in data:
+ self.software_uri = data['SoftwareInventory'][u'@odata.id']
+ return {'ret': True}
+
+ def _find_chassis_resource(self):
+ response = self.get_request(self.root_uri + self.service_root)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'Chassis' not in data:
+ return {'ret': False, 'msg': "Chassis resource not found"}
+ chassis = data["Chassis"]["@odata.id"]
+ response = self.get_request(self.root_uri + chassis)
+ if response['ret'] is False:
+ return response
+ self.chassis_uris = [
+ i['@odata.id'] for i in response['data'].get('Members', [])]
+ if not self.chassis_uris:
+ return {'ret': False,
+ 'msg': "Chassis Members array is either empty or missing"}
+ self.chassis_uri = self.chassis_uris[0]
+ if self.data_modification:
+ if self.resource_id:
+ self.chassis_uri = self._get_resource_uri_by_id(self.chassis_uris,
+ self.resource_id)
+ if not self.chassis_uri:
+ return {
+ 'ret': False,
+ 'msg': "Chassis resource %s not found" % self.resource_id}
+ elif len(self.chassis_uris) > 1:
+ self.module.deprecate(DEPRECATE_MSG % {'resource': 'Chassis'},
+ version='3.0.0', collection_name='community.general') # was Ansible 2.14
+ return {'ret': True}
+
+ def _find_managers_resource(self):
+ response = self.get_request(self.root_uri + self.service_root)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'Managers' not in data:
+ return {'ret': False, 'msg': "Manager resource not found"}
+ manager = data["Managers"]["@odata.id"]
+ response = self.get_request(self.root_uri + manager)
+ if response['ret'] is False:
+ return response
+ self.manager_uris = [
+ i['@odata.id'] for i in response['data'].get('Members', [])]
+ if not self.manager_uris:
+ return {'ret': False,
+ 'msg': "Managers Members array is either empty or missing"}
+ self.manager_uri = self.manager_uris[0]
+ if self.data_modification:
+ if self.resource_id:
+ self.manager_uri = self._get_resource_uri_by_id(self.manager_uris,
+ self.resource_id)
+ if not self.manager_uri:
+ return {
+ 'ret': False,
+ 'msg': "Manager resource %s not found" % self.resource_id}
+ elif len(self.manager_uris) > 1:
+ self.module.deprecate(DEPRECATE_MSG % {'resource': 'Manager'},
+ version='3.0.0', collection_name='community.general') # was Ansible 2.14
+ return {'ret': True}
+
+ def _get_all_action_info_values(self, action):
+ """Retrieve all parameter values for an Action from ActionInfo.
+ Fall back to AllowableValue annotations if no ActionInfo found.
+ Return the result in an ActionInfo-like dictionary, keyed
+ by the name of the parameter. """
+ ai = {}
+ if '@Redfish.ActionInfo' in action:
+ ai_uri = action['@Redfish.ActionInfo']
+ response = self.get_request(self.root_uri + ai_uri)
+ if response['ret'] is True:
+ data = response['data']
+ if 'Parameters' in data:
+ params = data['Parameters']
+ ai = dict((p['Name'], p)
+ for p in params if 'Name' in p)
+ if not ai:
+ ai = dict((k[:-24],
+ {'AllowableValues': v}) for k, v in action.items()
+ if k.endswith('@Redfish.AllowableValues'))
+ return ai
+
+ def _get_allowable_values(self, action, name, default_values=None):
+ if default_values is None:
+ default_values = []
+ ai = self._get_all_action_info_values(action)
+ allowable_values = ai.get(name, {}).get('AllowableValues')
+ # fallback to default values
+ if allowable_values is None:
+ allowable_values = default_values
+ return allowable_values
+
+ def get_logs(self):
+ log_svcs_uri_list = []
+ list_of_logs = []
+ properties = ['Severity', 'Created', 'EntryType', 'OemRecordFormat',
+ 'Message', 'MessageId', 'MessageArgs']
+
+ # Find LogService
+ response = self.get_request(self.root_uri + self.manager_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'LogServices' not in data:
+ return {'ret': False, 'msg': "LogServices resource not found"}
+
+ # Find all entries in LogServices
+ logs_uri = data["LogServices"]["@odata.id"]
+ response = self.get_request(self.root_uri + logs_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ for log_svcs_entry in data.get('Members', []):
+ response = self.get_request(self.root_uri + log_svcs_entry[u'@odata.id'])
+ if response['ret'] is False:
+ return response
+ _data = response['data']
+ if 'Entries' in _data:
+ log_svcs_uri_list.append(_data['Entries'][u'@odata.id'])
+
+ # For each entry in LogServices, get log name and all log entries
+ for log_svcs_uri in log_svcs_uri_list:
+ logs = {}
+ list_of_log_entries = []
+ response = self.get_request(self.root_uri + log_svcs_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ logs['Description'] = data.get('Description',
+ 'Collection of log entries')
+ # Get all log entries for each type of log found
+ for logEntry in data.get('Members', []):
+ entry = {}
+ for prop in properties:
+ if prop in logEntry:
+ entry[prop] = logEntry.get(prop)
+ if entry:
+ list_of_log_entries.append(entry)
+ log_name = log_svcs_uri.split('/')[-1]
+ logs[log_name] = list_of_log_entries
+ list_of_logs.append(logs)
+
+ # list_of_logs[logs{list_of_log_entries[entry{}]}]
+ return {'ret': True, 'entries': list_of_logs}
+
+ def clear_logs(self):
+ # Find LogService
+ response = self.get_request(self.root_uri + self.manager_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'LogServices' not in data:
+ return {'ret': False, 'msg': "LogServices resource not found"}
+
+ # Find all entries in LogServices
+ logs_uri = data["LogServices"]["@odata.id"]
+ response = self.get_request(self.root_uri + logs_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ for log_svcs_entry in data[u'Members']:
+ response = self.get_request(self.root_uri + log_svcs_entry["@odata.id"])
+ if response['ret'] is False:
+ return response
+ _data = response['data']
+ # Check to make sure option is available, otherwise error is ugly
+ if "Actions" in _data:
+ if "#LogService.ClearLog" in _data[u"Actions"]:
+ self.post_request(self.root_uri + _data[u"Actions"]["#LogService.ClearLog"]["target"], {})
+ if response['ret'] is False:
+ return response
+ return {'ret': True}
+
+ def aggregate(self, func, uri_list, uri_name):
+ ret = True
+ entries = []
+ for uri in uri_list:
+ inventory = func(uri)
+ ret = inventory.pop('ret') and ret
+ if 'entries' in inventory:
+ entries.append(({uri_name: uri},
+ inventory['entries']))
+ return dict(ret=ret, entries=entries)
+
+ def aggregate_chassis(self, func):
+ return self.aggregate(func, self.chassis_uris, 'chassis_uri')
+
+ def aggregate_managers(self, func):
+ return self.aggregate(func, self.manager_uris, 'manager_uri')
+
+ def aggregate_systems(self, func):
+ return self.aggregate(func, self.systems_uris, 'system_uri')
+
+ def get_storage_controller_inventory(self, systems_uri):
+ result = {}
+ controller_list = []
+ controller_results = []
+ # Get these entries, but does not fail if not found
+ properties = ['CacheSummary', 'FirmwareVersion', 'Identifiers',
+ 'Location', 'Manufacturer', 'Model', 'Name', 'Id',
+ 'PartNumber', 'SerialNumber', 'SpeedGbps', 'Status']
+ key = "StorageControllers"
+
+ # Find Storage service
+ response = self.get_request(self.root_uri + systems_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ if 'Storage' not in data:
+ return {'ret': False, 'msg': "Storage resource not found"}
+
+ # Get a list of all storage controllers and build respective URIs
+ storage_uri = data['Storage']["@odata.id"]
+ response = self.get_request(self.root_uri + storage_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ # Loop through Members and their StorageControllers
+ # and gather properties from each StorageController
+ if data[u'Members']:
+ for storage_member in data[u'Members']:
+ storage_member_uri = storage_member[u'@odata.id']
+ response = self.get_request(self.root_uri + storage_member_uri)
+ data = response['data']
+
+ if key in data:
+ controller_list = data[key]
+ for controller in controller_list:
+ controller_result = {}
+ for property in properties:
+ if property in controller:
+ controller_result[property] = controller[property]
+ controller_results.append(controller_result)
+ result['entries'] = controller_results
+ return result
+ else:
+ return {'ret': False, 'msg': "Storage resource not found"}
+
+ def get_multi_storage_controller_inventory(self):
+ return self.aggregate_systems(self.get_storage_controller_inventory)
+
+ def get_disk_inventory(self, systems_uri):
+ result = {'entries': []}
+ controller_list = []
+ # Get these entries, but does not fail if not found
+ properties = ['BlockSizeBytes', 'CapableSpeedGbs', 'CapacityBytes',
+ 'EncryptionAbility', 'EncryptionStatus',
+ 'FailurePredicted', 'HotspareType', 'Id', 'Identifiers',
+ 'Manufacturer', 'MediaType', 'Model', 'Name',
+ 'PartNumber', 'PhysicalLocation', 'Protocol', 'Revision',
+ 'RotationSpeedRPM', 'SerialNumber', 'Status']
+
+ # Find Storage service
+ response = self.get_request(self.root_uri + systems_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ if 'SimpleStorage' not in data and 'Storage' not in data:
+ return {'ret': False, 'msg': "SimpleStorage and Storage resource \
+ not found"}
+
+ if 'Storage' in data:
+ # Get a list of all storage controllers and build respective URIs
+ storage_uri = data[u'Storage'][u'@odata.id']
+ response = self.get_request(self.root_uri + storage_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ if data[u'Members']:
+ for controller in data[u'Members']:
+ controller_list.append(controller[u'@odata.id'])
+ for c in controller_list:
+ uri = self.root_uri + c
+ response = self.get_request(uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ controller_name = 'Controller 1'
+ if 'StorageControllers' in data:
+ sc = data['StorageControllers']
+ if sc:
+ if 'Name' in sc[0]:
+ controller_name = sc[0]['Name']
+ else:
+ sc_id = sc[0].get('Id', '1')
+ controller_name = 'Controller %s' % sc_id
+ drive_results = []
+ if 'Drives' in data:
+ for device in data[u'Drives']:
+ disk_uri = self.root_uri + device[u'@odata.id']
+ response = self.get_request(disk_uri)
+ data = response['data']
+
+ drive_result = {}
+ for property in properties:
+ if property in data:
+ if data[property] is not None:
+ drive_result[property] = data[property]
+ drive_results.append(drive_result)
+ drives = {'Controller': controller_name,
+ 'Drives': drive_results}
+ result["entries"].append(drives)
+
+ if 'SimpleStorage' in data:
+ # Get a list of all storage controllers and build respective URIs
+ storage_uri = data["SimpleStorage"]["@odata.id"]
+ response = self.get_request(self.root_uri + storage_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ for controller in data[u'Members']:
+ controller_list.append(controller[u'@odata.id'])
+
+ for c in controller_list:
+ uri = self.root_uri + c
+ response = self.get_request(uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'Name' in data:
+ controller_name = data['Name']
+ else:
+ sc_id = data.get('Id', '1')
+ controller_name = 'Controller %s' % sc_id
+ drive_results = []
+ for device in data[u'Devices']:
+ drive_result = {}
+ for property in properties:
+ if property in device:
+ drive_result[property] = device[property]
+ drive_results.append(drive_result)
+ drives = {'Controller': controller_name,
+ 'Drives': drive_results}
+ result["entries"].append(drives)
+
+ return result
+
+ def get_multi_disk_inventory(self):
+ return self.aggregate_systems(self.get_disk_inventory)
+
+ def get_volume_inventory(self, systems_uri):
+ result = {'entries': []}
+ controller_list = []
+ volume_list = []
+ # Get these entries, but does not fail if not found
+ properties = ['Id', 'Name', 'RAIDType', 'VolumeType', 'BlockSizeBytes',
+ 'Capacity', 'CapacityBytes', 'CapacitySources',
+ 'Encrypted', 'EncryptionTypes', 'Identifiers',
+ 'Operations', 'OptimumIOSizeBytes', 'AccessCapabilities',
+ 'AllocatedPools', 'Status']
+
+ # Find Storage service
+ response = self.get_request(self.root_uri + systems_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ if 'SimpleStorage' not in data and 'Storage' not in data:
+ return {'ret': False, 'msg': "SimpleStorage and Storage resource \
+ not found"}
+
+ if 'Storage' in data:
+ # Get a list of all storage controllers and build respective URIs
+ storage_uri = data[u'Storage'][u'@odata.id']
+ response = self.get_request(self.root_uri + storage_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ if data.get('Members'):
+ for controller in data[u'Members']:
+ controller_list.append(controller[u'@odata.id'])
+ for c in controller_list:
+ uri = self.root_uri + c
+ response = self.get_request(uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ controller_name = 'Controller 1'
+ if 'StorageControllers' in data:
+ sc = data['StorageControllers']
+ if sc:
+ if 'Name' in sc[0]:
+ controller_name = sc[0]['Name']
+ else:
+ sc_id = sc[0].get('Id', '1')
+ controller_name = 'Controller %s' % sc_id
+ volume_results = []
+ if 'Volumes' in data:
+ # Get a list of all volumes and build respective URIs
+ volumes_uri = data[u'Volumes'][u'@odata.id']
+ response = self.get_request(self.root_uri + volumes_uri)
+ data = response['data']
+
+ if data.get('Members'):
+ for volume in data[u'Members']:
+ volume_list.append(volume[u'@odata.id'])
+ for v in volume_list:
+ uri = self.root_uri + v
+ response = self.get_request(uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ volume_result = {}
+ for property in properties:
+ if property in data:
+ if data[property] is not None:
+ volume_result[property] = data[property]
+
+ # Get related Drives Id
+ drive_id_list = []
+ if 'Links' in data:
+ if 'Drives' in data[u'Links']:
+ for link in data[u'Links'][u'Drives']:
+ drive_id_link = link[u'@odata.id']
+ drive_id = drive_id_link.split("/")[-1]
+ drive_id_list.append({'Id': drive_id})
+ volume_result['Linked_drives'] = drive_id_list
+ volume_results.append(volume_result)
+ volumes = {'Controller': controller_name,
+ 'Volumes': volume_results}
+ result["entries"].append(volumes)
+ else:
+ return {'ret': False, 'msg': "Storage resource not found"}
+
+ return result
+
+ def get_multi_volume_inventory(self):
+ return self.aggregate_systems(self.get_volume_inventory)
+
+ def manage_indicator_led(self, command):
+ result = {}
+ key = 'IndicatorLED'
+
+ payloads = {'IndicatorLedOn': 'Lit', 'IndicatorLedOff': 'Off', "IndicatorLedBlink": 'Blinking'}
+
+ result = {}
+ response = self.get_request(self.root_uri + self.chassis_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+ if key not in data:
+ return {'ret': False, 'msg': "Key %s not found" % key}
+
+ if command in payloads.keys():
+ payload = {'IndicatorLED': payloads[command]}
+ response = self.patch_request(self.root_uri + self.chassis_uri, payload)
+ if response['ret'] is False:
+ return response
+ else:
+ return {'ret': False, 'msg': 'Invalid command'}
+
+ return result
+
+ def _map_reset_type(self, reset_type, allowable_values):
+ equiv_types = {
+ 'On': 'ForceOn',
+ 'ForceOn': 'On',
+ 'ForceOff': 'GracefulShutdown',
+ 'GracefulShutdown': 'ForceOff',
+ 'GracefulRestart': 'ForceRestart',
+ 'ForceRestart': 'GracefulRestart'
+ }
+
+ if reset_type in allowable_values:
+ return reset_type
+ if reset_type not in equiv_types:
+ return reset_type
+ mapped_type = equiv_types[reset_type]
+ if mapped_type in allowable_values:
+ return mapped_type
+ return reset_type
+
+ def manage_system_power(self, command):
+ return self.manage_power(command, self.systems_uri,
+ '#ComputerSystem.Reset')
+
+ def manage_manager_power(self, command):
+ return self.manage_power(command, self.manager_uri,
+ '#Manager.Reset')
+
+ def manage_power(self, command, resource_uri, action_name):
+ key = "Actions"
+ reset_type_values = ['On', 'ForceOff', 'GracefulShutdown',
+ 'GracefulRestart', 'ForceRestart', 'Nmi',
+ 'ForceOn', 'PushPowerButton', 'PowerCycle']
+
+ # command should be PowerOn, PowerForceOff, etc.
+ if not command.startswith('Power'):
+ return {'ret': False, 'msg': 'Invalid Command (%s)' % command}
+ reset_type = command[5:]
+
+ # map Reboot to a ResetType that does a reboot
+ if reset_type == 'Reboot':
+ reset_type = 'GracefulRestart'
+
+ if reset_type not in reset_type_values:
+ return {'ret': False, 'msg': 'Invalid Command (%s)' % command}
+
+ # read the resource and get the current power state
+ response = self.get_request(self.root_uri + resource_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ power_state = data.get('PowerState')
+
+ # if power is already in target state, nothing to do
+ if power_state == "On" and reset_type in ['On', 'ForceOn']:
+ return {'ret': True, 'changed': False}
+ if power_state == "Off" and reset_type in ['GracefulShutdown', 'ForceOff']:
+ return {'ret': True, 'changed': False}
+
+ # get the reset Action and target URI
+ if key not in data or action_name not in data[key]:
+ return {'ret': False, 'msg': 'Action %s not found' % action_name}
+ reset_action = data[key][action_name]
+ if 'target' not in reset_action:
+ return {'ret': False,
+ 'msg': 'target URI missing from Action %s' % action_name}
+ action_uri = reset_action['target']
+
+ # get AllowableValues
+ ai = self._get_all_action_info_values(reset_action)
+ allowable_values = ai.get('ResetType', {}).get('AllowableValues', [])
+
+ # map ResetType to an allowable value if needed
+ if reset_type not in allowable_values:
+ reset_type = self._map_reset_type(reset_type, allowable_values)
+
+ # define payload
+ payload = {'ResetType': reset_type}
+
+ # POST to Action URI
+ response = self.post_request(self.root_uri + action_uri, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True, 'changed': True}
+
+ def _find_account_uri(self, username=None, acct_id=None):
+ if not any((username, acct_id)):
+ return {'ret': False, 'msg':
+ 'Must provide either account_id or account_username'}
+
+ response = self.get_request(self.root_uri + self.accounts_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ uris = [a.get('@odata.id') for a in data.get('Members', []) if
+ a.get('@odata.id')]
+ for uri in uris:
+ response = self.get_request(self.root_uri + uri)
+ if response['ret'] is False:
+ continue
+ data = response['data']
+ headers = response['headers']
+ if username:
+ if username == data.get('UserName'):
+ return {'ret': True, 'data': data,
+ 'headers': headers, 'uri': uri}
+ if acct_id:
+ if acct_id == data.get('Id'):
+ return {'ret': True, 'data': data,
+ 'headers': headers, 'uri': uri}
+
+ return {'ret': False, 'no_match': True, 'msg':
+ 'No account with the given account_id or account_username found'}
+
+ def _find_empty_account_slot(self):
+ response = self.get_request(self.root_uri + self.accounts_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ uris = [a.get('@odata.id') for a in data.get('Members', []) if
+ a.get('@odata.id')]
+ if uris:
+ # first slot may be reserved, so move to end of list
+ uris += [uris.pop(0)]
+ for uri in uris:
+ response = self.get_request(self.root_uri + uri)
+ if response['ret'] is False:
+ continue
+ data = response['data']
+ headers = response['headers']
+ if data.get('UserName') == "" and not data.get('Enabled', True):
+ return {'ret': True, 'data': data,
+ 'headers': headers, 'uri': uri}
+
+ return {'ret': False, 'no_match': True, 'msg':
+ 'No empty account slot found'}
+
+ def list_users(self):
+ result = {}
+ # listing all users has always been slower than other operations, why?
+ user_list = []
+ users_results = []
+ # Get these entries, but does not fail if not found
+ properties = ['Id', 'Name', 'UserName', 'RoleId', 'Locked', 'Enabled']
+
+ response = self.get_request(self.root_uri + self.accounts_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ for users in data.get('Members', []):
+ user_list.append(users[u'@odata.id']) # user_list[] are URIs
+
+ # for each user, get details
+ for uri in user_list:
+ user = {}
+ response = self.get_request(self.root_uri + uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ for property in properties:
+ if property in data:
+ user[property] = data[property]
+
+ users_results.append(user)
+ result["entries"] = users_results
+ return result
+
+ def add_user_via_patch(self, user):
+ if user.get('account_id'):
+ # If Id slot specified, use it
+ response = self._find_account_uri(acct_id=user.get('account_id'))
+ else:
+ # Otherwise find first empty slot
+ response = self._find_empty_account_slot()
+
+ if not response['ret']:
+ return response
+ uri = response['uri']
+ payload = {}
+ if user.get('account_username'):
+ payload['UserName'] = user.get('account_username')
+ if user.get('account_password'):
+ payload['Password'] = user.get('account_password')
+ if user.get('account_roleid'):
+ payload['RoleId'] = user.get('account_roleid')
+ response = self.patch_request(self.root_uri + uri, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True}
+
+ def add_user(self, user):
+ if not user.get('account_username'):
+ return {'ret': False, 'msg':
+ 'Must provide account_username for AddUser command'}
+
+ response = self._find_account_uri(username=user.get('account_username'))
+ if response['ret']:
+ # account_username already exists, nothing to do
+ return {'ret': True, 'changed': False}
+
+ response = self.get_request(self.root_uri + self.accounts_uri)
+ if not response['ret']:
+ return response
+ headers = response['headers']
+
+ if 'allow' in headers:
+ methods = [m.strip() for m in headers.get('allow').split(',')]
+ if 'POST' not in methods:
+ # if Allow header present and POST not listed, add via PATCH
+ return self.add_user_via_patch(user)
+
+ payload = {}
+ if user.get('account_username'):
+ payload['UserName'] = user.get('account_username')
+ if user.get('account_password'):
+ payload['Password'] = user.get('account_password')
+ if user.get('account_roleid'):
+ payload['RoleId'] = user.get('account_roleid')
+
+ response = self.post_request(self.root_uri + self.accounts_uri, payload)
+ if not response['ret']:
+ if response.get('status') == 405:
+ # if POST returned a 405, try to add via PATCH
+ return self.add_user_via_patch(user)
+ else:
+ return response
+ return {'ret': True}
+
+ def enable_user(self, user):
+ response = self._find_account_uri(username=user.get('account_username'),
+ acct_id=user.get('account_id'))
+ if not response['ret']:
+ return response
+ uri = response['uri']
+ data = response['data']
+
+ if data.get('Enabled', True):
+ # account already enabled, nothing to do
+ return {'ret': True, 'changed': False}
+
+ payload = {'Enabled': True}
+ response = self.patch_request(self.root_uri + uri, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True}
+
+ def delete_user_via_patch(self, user, uri=None, data=None):
+ if not uri:
+ response = self._find_account_uri(username=user.get('account_username'),
+ acct_id=user.get('account_id'))
+ if not response['ret']:
+ return response
+ uri = response['uri']
+ data = response['data']
+
+ if data and data.get('UserName') == '' and not data.get('Enabled', False):
+ # account UserName already cleared, nothing to do
+ return {'ret': True, 'changed': False}
+
+ payload = {'UserName': ''}
+ if data.get('Enabled', False):
+ payload['Enabled'] = False
+ response = self.patch_request(self.root_uri + uri, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True}
+
+ def delete_user(self, user):
+ response = self._find_account_uri(username=user.get('account_username'),
+ acct_id=user.get('account_id'))
+ if not response['ret']:
+ if response.get('no_match'):
+ # account does not exist, nothing to do
+ return {'ret': True, 'changed': False}
+ else:
+ # some error encountered
+ return response
+
+ uri = response['uri']
+ headers = response['headers']
+ data = response['data']
+
+ if 'allow' in headers:
+ methods = [m.strip() for m in headers.get('allow').split(',')]
+ if 'DELETE' not in methods:
+ # if Allow header present and DELETE not listed, del via PATCH
+ return self.delete_user_via_patch(user, uri=uri, data=data)
+
+ response = self.delete_request(self.root_uri + uri)
+ if not response['ret']:
+ if response.get('status') == 405:
+ # if DELETE returned a 405, try to delete via PATCH
+ return self.delete_user_via_patch(user, uri=uri, data=data)
+ else:
+ return response
+ return {'ret': True}
+
+ def disable_user(self, user):
+ response = self._find_account_uri(username=user.get('account_username'),
+ acct_id=user.get('account_id'))
+ if not response['ret']:
+ return response
+ uri = response['uri']
+ data = response['data']
+
+ if not data.get('Enabled'):
+ # account already disabled, nothing to do
+ return {'ret': True, 'changed': False}
+
+ payload = {'Enabled': False}
+ response = self.patch_request(self.root_uri + uri, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True}
+
+ def update_user_role(self, user):
+ if not user.get('account_roleid'):
+ return {'ret': False, 'msg':
+ 'Must provide account_roleid for UpdateUserRole command'}
+
+ response = self._find_account_uri(username=user.get('account_username'),
+ acct_id=user.get('account_id'))
+ if not response['ret']:
+ return response
+ uri = response['uri']
+ data = response['data']
+
+ if data.get('RoleId') == user.get('account_roleid'):
+ # account already has RoleId , nothing to do
+ return {'ret': True, 'changed': False}
+
+ payload = {'RoleId': user.get('account_roleid')}
+ response = self.patch_request(self.root_uri + uri, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True}
+
+ def update_user_password(self, user):
+ response = self._find_account_uri(username=user.get('account_username'),
+ acct_id=user.get('account_id'))
+ if not response['ret']:
+ return response
+ uri = response['uri']
+ payload = {'Password': user['account_password']}
+ response = self.patch_request(self.root_uri + uri, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True}
+
+ def update_user_name(self, user):
+ if not user.get('account_updatename'):
+ return {'ret': False, 'msg':
+ 'Must provide account_updatename for UpdateUserName command'}
+
+ response = self._find_account_uri(username=user.get('account_username'),
+ acct_id=user.get('account_id'))
+ if not response['ret']:
+ return response
+ uri = response['uri']
+ payload = {'UserName': user['account_updatename']}
+ response = self.patch_request(self.root_uri + uri, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True}
+
+ def update_accountservice_properties(self, user):
+ if user.get('account_properties') is None:
+ return {'ret': False, 'msg':
+ 'Must provide account_properties for UpdateAccountServiceProperties command'}
+ account_properties = user.get('account_properties')
+
+ # Find AccountService
+ response = self.get_request(self.root_uri + self.service_root)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'AccountService' not in data:
+ return {'ret': False, 'msg': "AccountService resource not found"}
+ accountservice_uri = data["AccountService"]["@odata.id"]
+
+ # Check support or not
+ response = self.get_request(self.root_uri + accountservice_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ for property_name in account_properties.keys():
+ if property_name not in data:
+ return {'ret': False, 'msg':
+ 'property %s not supported' % property_name}
+
+ # if properties is already matched, nothing to do
+ need_change = False
+ for property_name in account_properties.keys():
+ if account_properties[property_name] != data[property_name]:
+ need_change = True
+ break
+
+ if not need_change:
+ return {'ret': True, 'changed': False, 'msg': "AccountService properties already set"}
+
+ payload = account_properties
+ response = self.patch_request(self.root_uri + accountservice_uri, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True, 'changed': True, 'msg': "Modified AccountService properties"}
+
+ def get_sessions(self):
+ result = {}
+ # listing all users has always been slower than other operations, why?
+ session_list = []
+ sessions_results = []
+ # Get these entries, but does not fail if not found
+ properties = ['Description', 'Id', 'Name', 'UserName']
+
+ response = self.get_request(self.root_uri + self.sessions_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ for sessions in data[u'Members']:
+ session_list.append(sessions[u'@odata.id']) # session_list[] are URIs
+
+ # for each session, get details
+ for uri in session_list:
+ session = {}
+ response = self.get_request(self.root_uri + uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ for property in properties:
+ if property in data:
+ session[property] = data[property]
+
+ sessions_results.append(session)
+ result["entries"] = sessions_results
+ return result
+
+ def clear_sessions(self):
+ response = self.get_request(self.root_uri + self.sessions_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ # if no active sessions, return as success
+ if data['Members@odata.count'] == 0:
+ return {'ret': True, 'changed': False, 'msg': "There is no active sessions"}
+
+ # loop to delete every active session
+ for session in data[u'Members']:
+ response = self.delete_request(self.root_uri + session[u'@odata.id'])
+ if response['ret'] is False:
+ return response
+
+ return {'ret': True, 'changed': True, 'msg': "Clear all sessions successfully"}
+
+ def get_firmware_update_capabilities(self):
+ result = {}
+ response = self.get_request(self.root_uri + self.update_uri)
+ if response['ret'] is False:
+ return response
+
+ result['ret'] = True
+
+ result['entries'] = {}
+
+ data = response['data']
+
+ if "Actions" in data:
+ actions = data['Actions']
+ if len(actions) > 0:
+ for key in actions.keys():
+ action = actions.get(key)
+ if 'title' in action:
+ title = action['title']
+ else:
+ title = key
+ result['entries'][title] = action.get('TransferProtocol@Redfish.AllowableValues',
+ ["Key TransferProtocol@Redfish.AllowableValues not found"])
+ else:
+ return {'ret': "False", 'msg': "Actions list is empty."}
+ else:
+ return {'ret': "False", 'msg': "Key Actions not found."}
+ return result
+
+ def _software_inventory(self, uri):
+ result = {}
+ response = self.get_request(self.root_uri + uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ result['entries'] = []
+ for member in data[u'Members']:
+ uri = self.root_uri + member[u'@odata.id']
+ # Get details for each software or firmware member
+ response = self.get_request(uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+ software = {}
+ # Get these standard properties if present
+ for key in ['Name', 'Id', 'Status', 'Version', 'Updateable',
+ 'SoftwareId', 'LowestSupportedVersion', 'Manufacturer',
+ 'ReleaseDate']:
+ if key in data:
+ software[key] = data.get(key)
+ result['entries'].append(software)
+ return result
+
+ def get_firmware_inventory(self):
+ if self.firmware_uri is None:
+ return {'ret': False, 'msg': 'No FirmwareInventory resource found'}
+ else:
+ return self._software_inventory(self.firmware_uri)
+
+ def get_software_inventory(self):
+ if self.software_uri is None:
+ return {'ret': False, 'msg': 'No SoftwareInventory resource found'}
+ else:
+ return self._software_inventory(self.software_uri)
+
+ def simple_update(self, update_opts):
+ image_uri = update_opts.get('update_image_uri')
+ protocol = update_opts.get('update_protocol')
+ targets = update_opts.get('update_targets')
+ creds = update_opts.get('update_creds')
+
+ if not image_uri:
+ return {'ret': False, 'msg':
+ 'Must specify update_image_uri for the SimpleUpdate command'}
+
+ response = self.get_request(self.root_uri + self.update_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'Actions' not in data:
+ return {'ret': False, 'msg': 'Service does not support SimpleUpdate'}
+ if '#UpdateService.SimpleUpdate' not in data['Actions']:
+ return {'ret': False, 'msg': 'Service does not support SimpleUpdate'}
+ action = data['Actions']['#UpdateService.SimpleUpdate']
+ if 'target' not in action:
+ return {'ret': False, 'msg': 'Service does not support SimpleUpdate'}
+ update_uri = action['target']
+ if protocol:
+ default_values = ['CIFS', 'FTP', 'SFTP', 'HTTP', 'HTTPS', 'NSF',
+ 'SCP', 'TFTP', 'OEM', 'NFS']
+ allowable_values = self._get_allowable_values(action,
+ 'TransferProtocol',
+ default_values)
+ if protocol not in allowable_values:
+ return {'ret': False,
+ 'msg': 'Specified update_protocol (%s) not supported '
+ 'by service. Supported protocols: %s' %
+ (protocol, allowable_values)}
+ if targets:
+ allowable_values = self._get_allowable_values(action, 'Targets')
+ if allowable_values:
+ for target in targets:
+ if target not in allowable_values:
+ return {'ret': False,
+ 'msg': 'Specified target (%s) not supported '
+ 'by service. Supported targets: %s' %
+ (target, allowable_values)}
+
+ payload = {
+ 'ImageURI': image_uri
+ }
+ if protocol:
+ payload["TransferProtocol"] = protocol
+ if targets:
+ payload["Targets"] = targets
+ if creds:
+ if creds.get('username'):
+ payload["Username"] = creds.get('username')
+ if creds.get('password'):
+ payload["Password"] = creds.get('password')
+ response = self.post_request(self.root_uri + update_uri, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True, 'changed': True,
+ 'msg': "SimpleUpdate requested"}
+
+ def get_bios_attributes(self, systems_uri):
+ result = {}
+ bios_attributes = {}
+ key = "Bios"
+
+ # Search for 'key' entry and extract URI from it
+ response = self.get_request(self.root_uri + systems_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ if key not in data:
+ return {'ret': False, 'msg': "Key %s not found" % key}
+
+ bios_uri = data[key]["@odata.id"]
+
+ response = self.get_request(self.root_uri + bios_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+ for attribute in data[u'Attributes'].items():
+ bios_attributes[attribute[0]] = attribute[1]
+ result["entries"] = bios_attributes
+ return result
+
+ def get_multi_bios_attributes(self):
+ return self.aggregate_systems(self.get_bios_attributes)
+
+ def _get_boot_options_dict(self, boot):
+ # Get these entries from BootOption, if present
+ properties = ['DisplayName', 'BootOptionReference']
+
+ # Retrieve BootOptions if present
+ if 'BootOptions' in boot and '@odata.id' in boot['BootOptions']:
+ boot_options_uri = boot['BootOptions']["@odata.id"]
+ # Get BootOptions resource
+ response = self.get_request(self.root_uri + boot_options_uri)
+ if response['ret'] is False:
+ return {}
+ data = response['data']
+
+ # Retrieve Members array
+ if 'Members' not in data:
+ return {}
+ members = data['Members']
+ else:
+ members = []
+
+ # Build dict of BootOptions keyed by BootOptionReference
+ boot_options_dict = {}
+ for member in members:
+ if '@odata.id' not in member:
+ return {}
+ boot_option_uri = member['@odata.id']
+ response = self.get_request(self.root_uri + boot_option_uri)
+ if response['ret'] is False:
+ return {}
+ data = response['data']
+ if 'BootOptionReference' not in data:
+ return {}
+ boot_option_ref = data['BootOptionReference']
+
+ # fetch the props to display for this boot device
+ boot_props = {}
+ for prop in properties:
+ if prop in data:
+ boot_props[prop] = data[prop]
+
+ boot_options_dict[boot_option_ref] = boot_props
+
+ return boot_options_dict
+
+ def get_boot_order(self, systems_uri):
+ result = {}
+
+ # Retrieve System resource
+ response = self.get_request(self.root_uri + systems_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ # Confirm needed Boot properties are present
+ if 'Boot' not in data or 'BootOrder' not in data['Boot']:
+ return {'ret': False, 'msg': "Key BootOrder not found"}
+
+ boot = data['Boot']
+ boot_order = boot['BootOrder']
+ boot_options_dict = self._get_boot_options_dict(boot)
+
+ # Build boot device list
+ boot_device_list = []
+ for ref in boot_order:
+ boot_device_list.append(
+ boot_options_dict.get(ref, {'BootOptionReference': ref}))
+
+ result["entries"] = boot_device_list
+ return result
+
+ def get_multi_boot_order(self):
+ return self.aggregate_systems(self.get_boot_order)
+
+ def get_boot_override(self, systems_uri):
+ result = {}
+
+ properties = ["BootSourceOverrideEnabled", "BootSourceOverrideTarget",
+ "BootSourceOverrideMode", "UefiTargetBootSourceOverride", "BootSourceOverrideTarget@Redfish.AllowableValues"]
+
+ response = self.get_request(self.root_uri + systems_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ if 'Boot' not in data:
+ return {'ret': False, 'msg': "Key Boot not found"}
+
+ boot = data['Boot']
+
+ boot_overrides = {}
+ if "BootSourceOverrideEnabled" in boot:
+ if boot["BootSourceOverrideEnabled"] is not False:
+ for property in properties:
+ if property in boot:
+ if boot[property] is not None:
+ boot_overrides[property] = boot[property]
+ else:
+ return {'ret': False, 'msg': "No boot override is enabled."}
+
+ result['entries'] = boot_overrides
+ return result
+
+ def get_multi_boot_override(self):
+ return self.aggregate_systems(self.get_boot_override)
+
+ def set_bios_default_settings(self):
+ result = {}
+ key = "Bios"
+
+ # Search for 'key' entry and extract URI from it
+ response = self.get_request(self.root_uri + self.systems_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ if key not in data:
+ return {'ret': False, 'msg': "Key %s not found" % key}
+
+ bios_uri = data[key]["@odata.id"]
+
+ # Extract proper URI
+ response = self.get_request(self.root_uri + bios_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+ reset_bios_settings_uri = data["Actions"]["#Bios.ResetBios"]["target"]
+
+ response = self.post_request(self.root_uri + reset_bios_settings_uri, {})
+ if response['ret'] is False:
+ return response
+ return {'ret': True, 'changed': True, 'msg': "Set BIOS to default settings"}
+
+ def set_boot_override(self, boot_opts):
+ result = {}
+ key = "Boot"
+
+ bootdevice = boot_opts.get('bootdevice')
+ uefi_target = boot_opts.get('uefi_target')
+ boot_next = boot_opts.get('boot_next')
+ override_enabled = boot_opts.get('override_enabled')
+
+ if not bootdevice and override_enabled != 'Disabled':
+ return {'ret': False,
+ 'msg': "bootdevice option required for temporary boot override"}
+
+ # Search for 'key' entry and extract URI from it
+ response = self.get_request(self.root_uri + self.systems_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ if key not in data:
+ return {'ret': False, 'msg': "Key %s not found" % key}
+
+ boot = data[key]
+
+ annotation = 'BootSourceOverrideTarget@Redfish.AllowableValues'
+ if annotation in boot:
+ allowable_values = boot[annotation]
+ if isinstance(allowable_values, list) and bootdevice not in allowable_values:
+ return {'ret': False,
+ 'msg': "Boot device %s not in list of allowable values (%s)" %
+ (bootdevice, allowable_values)}
+
+ # read existing values
+ cur_enabled = boot.get('BootSourceOverrideEnabled')
+ target = boot.get('BootSourceOverrideTarget')
+ cur_uefi_target = boot.get('UefiTargetBootSourceOverride')
+ cur_boot_next = boot.get('BootNext')
+
+ if override_enabled == 'Disabled':
+ payload = {
+ 'Boot': {
+ 'BootSourceOverrideEnabled': override_enabled
+ }
+ }
+ elif bootdevice == 'UefiTarget':
+ if not uefi_target:
+ return {'ret': False,
+ 'msg': "uefi_target option required to SetOneTimeBoot for UefiTarget"}
+ if override_enabled == cur_enabled and target == bootdevice and uefi_target == cur_uefi_target:
+ # If properties are already set, no changes needed
+ return {'ret': True, 'changed': False}
+ payload = {
+ 'Boot': {
+ 'BootSourceOverrideEnabled': override_enabled,
+ 'BootSourceOverrideTarget': bootdevice,
+ 'UefiTargetBootSourceOverride': uefi_target
+ }
+ }
+ elif bootdevice == 'UefiBootNext':
+ if not boot_next:
+ return {'ret': False,
+ 'msg': "boot_next option required to SetOneTimeBoot for UefiBootNext"}
+ if cur_enabled == override_enabled and target == bootdevice and boot_next == cur_boot_next:
+ # If properties are already set, no changes needed
+ return {'ret': True, 'changed': False}
+ payload = {
+ 'Boot': {
+ 'BootSourceOverrideEnabled': override_enabled,
+ 'BootSourceOverrideTarget': bootdevice,
+ 'BootNext': boot_next
+ }
+ }
+ else:
+ if cur_enabled == override_enabled and target == bootdevice:
+ # If properties are already set, no changes needed
+ return {'ret': True, 'changed': False}
+ payload = {
+ 'Boot': {
+ 'BootSourceOverrideEnabled': override_enabled,
+ 'BootSourceOverrideTarget': bootdevice
+ }
+ }
+
+ response = self.patch_request(self.root_uri + self.systems_uri, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True, 'changed': True}
+
+ def set_bios_attributes(self, attributes):
+ result = {}
+ key = "Bios"
+
+ # Search for 'key' entry and extract URI from it
+ response = self.get_request(self.root_uri + self.systems_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ if key not in data:
+ return {'ret': False, 'msg': "Key %s not found" % key}
+
+ bios_uri = data[key]["@odata.id"]
+
+ # Extract proper URI
+ response = self.get_request(self.root_uri + bios_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ # Make a copy of the attributes dict
+ attrs_to_patch = dict(attributes)
+
+ # Check the attributes
+ for attr in attributes:
+ if attr not in data[u'Attributes']:
+ return {'ret': False, 'msg': "BIOS attribute %s not found" % attr}
+ # If already set to requested value, remove it from PATCH payload
+ if data[u'Attributes'][attr] == attributes[attr]:
+ del attrs_to_patch[attr]
+
+ # Return success w/ changed=False if no attrs need to be changed
+ if not attrs_to_patch:
+ return {'ret': True, 'changed': False,
+ 'msg': "BIOS attributes already set"}
+
+ # Get the SettingsObject URI
+ set_bios_attr_uri = data["@Redfish.Settings"]["SettingsObject"]["@odata.id"]
+
+ # Construct payload and issue PATCH command
+ payload = {"Attributes": attrs_to_patch}
+ response = self.patch_request(self.root_uri + set_bios_attr_uri, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True, 'changed': True, 'msg': "Modified BIOS attribute"}
+
+ def set_boot_order(self, boot_list):
+ if not boot_list:
+ return {'ret': False,
+ 'msg': "boot_order list required for SetBootOrder command"}
+
+ systems_uri = self.systems_uri
+ response = self.get_request(self.root_uri + systems_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ # Confirm needed Boot properties are present
+ if 'Boot' not in data or 'BootOrder' not in data['Boot']:
+ return {'ret': False, 'msg': "Key BootOrder not found"}
+
+ boot = data['Boot']
+ boot_order = boot['BootOrder']
+ boot_options_dict = self._get_boot_options_dict(boot)
+
+ # validate boot_list against BootOptionReferences if available
+ if boot_options_dict:
+ boot_option_references = boot_options_dict.keys()
+ for ref in boot_list:
+ if ref not in boot_option_references:
+ return {'ret': False,
+ 'msg': "BootOptionReference %s not found in BootOptions" % ref}
+
+ # If requested BootOrder is already set, nothing to do
+ if boot_order == boot_list:
+ return {'ret': True, 'changed': False,
+ 'msg': "BootOrder already set to %s" % boot_list}
+
+ payload = {
+ 'Boot': {
+ 'BootOrder': boot_list
+ }
+ }
+ response = self.patch_request(self.root_uri + systems_uri, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True, 'changed': True, 'msg': "BootOrder set"}
+
+ def set_default_boot_order(self):
+ systems_uri = self.systems_uri
+ response = self.get_request(self.root_uri + systems_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ # get the #ComputerSystem.SetDefaultBootOrder Action and target URI
+ action = '#ComputerSystem.SetDefaultBootOrder'
+ if 'Actions' not in data or action not in data['Actions']:
+ return {'ret': False, 'msg': 'Action %s not found' % action}
+ if 'target' not in data['Actions'][action]:
+ return {'ret': False,
+ 'msg': 'target URI missing from Action %s' % action}
+ action_uri = data['Actions'][action]['target']
+
+ # POST to Action URI
+ payload = {}
+ response = self.post_request(self.root_uri + action_uri, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True, 'changed': True,
+ 'msg': "BootOrder set to default"}
+
+ def get_chassis_inventory(self):
+ result = {}
+ chassis_results = []
+
+ # Get these entries, but does not fail if not found
+ properties = ['Name', 'Id', 'ChassisType', 'PartNumber', 'AssetTag',
+ 'Manufacturer', 'IndicatorLED', 'SerialNumber', 'Model']
+
+ # Go through list
+ for chassis_uri in self.chassis_uris:
+ response = self.get_request(self.root_uri + chassis_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+ chassis_result = {}
+ for property in properties:
+ if property in data:
+ chassis_result[property] = data[property]
+ chassis_results.append(chassis_result)
+
+ result["entries"] = chassis_results
+ return result
+
+ def get_fan_inventory(self):
+ result = {}
+ fan_results = []
+ key = "Thermal"
+ # Get these entries, but does not fail if not found
+ properties = ['Name', 'FanName', 'Reading', 'ReadingUnits', 'Status']
+
+ # Go through list
+ for chassis_uri in self.chassis_uris:
+ response = self.get_request(self.root_uri + chassis_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+ if key in data:
+ # match: found an entry for "Thermal" information = fans
+ thermal_uri = data[key]["@odata.id"]
+ response = self.get_request(self.root_uri + thermal_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ for device in data[u'Fans']:
+ fan = {}
+ for property in properties:
+ if property in device:
+ fan[property] = device[property]
+ fan_results.append(fan)
+ result["entries"] = fan_results
+ return result
+
+ def get_chassis_power(self):
+ result = {}
+ key = "Power"
+
+ # Get these entries, but does not fail if not found
+ properties = ['Name', 'PowerAllocatedWatts',
+ 'PowerAvailableWatts', 'PowerCapacityWatts',
+ 'PowerConsumedWatts', 'PowerMetrics',
+ 'PowerRequestedWatts', 'RelatedItem', 'Status']
+
+ chassis_power_results = []
+ # Go through list
+ for chassis_uri in self.chassis_uris:
+ chassis_power_result = {}
+ response = self.get_request(self.root_uri + chassis_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+ if key in data:
+ response = self.get_request(self.root_uri + data[key]['@odata.id'])
+ data = response['data']
+ if 'PowerControl' in data:
+ if len(data['PowerControl']) > 0:
+ data = data['PowerControl'][0]
+ for property in properties:
+ if property in data:
+ chassis_power_result[property] = data[property]
+ else:
+ return {'ret': False, 'msg': 'Key PowerControl not found.'}
+ chassis_power_results.append(chassis_power_result)
+ else:
+ return {'ret': False, 'msg': 'Key Power not found.'}
+
+ result['entries'] = chassis_power_results
+ return result
+
+ def get_chassis_thermals(self):
+ result = {}
+ sensors = []
+ key = "Thermal"
+
+ # Get these entries, but does not fail if not found
+ properties = ['Name', 'PhysicalContext', 'UpperThresholdCritical',
+ 'UpperThresholdFatal', 'UpperThresholdNonCritical',
+ 'LowerThresholdCritical', 'LowerThresholdFatal',
+ 'LowerThresholdNonCritical', 'MaxReadingRangeTemp',
+ 'MinReadingRangeTemp', 'ReadingCelsius', 'RelatedItem',
+ 'SensorNumber']
+
+ # Go through list
+ for chassis_uri in self.chassis_uris:
+ response = self.get_request(self.root_uri + chassis_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+ if key in data:
+ thermal_uri = data[key]["@odata.id"]
+ response = self.get_request(self.root_uri + thermal_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+ if "Temperatures" in data:
+ for sensor in data[u'Temperatures']:
+ sensor_result = {}
+ for property in properties:
+ if property in sensor:
+ if sensor[property] is not None:
+ sensor_result[property] = sensor[property]
+ sensors.append(sensor_result)
+
+ if sensors is None:
+ return {'ret': False, 'msg': 'Key Temperatures was not found.'}
+
+ result['entries'] = sensors
+ return result
+
+ def get_cpu_inventory(self, systems_uri):
+ result = {}
+ cpu_list = []
+ cpu_results = []
+ key = "Processors"
+ # Get these entries, but does not fail if not found
+ properties = ['Id', 'Name', 'Manufacturer', 'Model', 'MaxSpeedMHz',
+ 'TotalCores', 'TotalThreads', 'Status']
+
+ # Search for 'key' entry and extract URI from it
+ response = self.get_request(self.root_uri + systems_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ if key not in data:
+ return {'ret': False, 'msg': "Key %s not found" % key}
+
+ processors_uri = data[key]["@odata.id"]
+
+ # Get a list of all CPUs and build respective URIs
+ response = self.get_request(self.root_uri + processors_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ for cpu in data[u'Members']:
+ cpu_list.append(cpu[u'@odata.id'])
+
+ for c in cpu_list:
+ cpu = {}
+ uri = self.root_uri + c
+ response = self.get_request(uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ for property in properties:
+ if property in data:
+ cpu[property] = data[property]
+
+ cpu_results.append(cpu)
+ result["entries"] = cpu_results
+ return result
+
+ def get_multi_cpu_inventory(self):
+ return self.aggregate_systems(self.get_cpu_inventory)
+
+ def get_memory_inventory(self, systems_uri):
+ result = {}
+ memory_list = []
+ memory_results = []
+ key = "Memory"
+ # Get these entries, but does not fail if not found
+ properties = ['Id', 'SerialNumber', 'MemoryDeviceType', 'PartNumber',
+ 'MemoryLocation', 'RankCount', 'CapacityMiB', 'OperatingMemoryModes', 'Status', 'Manufacturer', 'Name']
+
+ # Search for 'key' entry and extract URI from it
+ response = self.get_request(self.root_uri + systems_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ if key not in data:
+ return {'ret': False, 'msg': "Key %s not found" % key}
+
+ memory_uri = data[key]["@odata.id"]
+
+ # Get a list of all DIMMs and build respective URIs
+ response = self.get_request(self.root_uri + memory_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ for dimm in data[u'Members']:
+ memory_list.append(dimm[u'@odata.id'])
+
+ for m in memory_list:
+ dimm = {}
+ uri = self.root_uri + m
+ response = self.get_request(uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ if "Status" in data:
+ if "State" in data["Status"]:
+ if data["Status"]["State"] == "Absent":
+ continue
+ else:
+ continue
+
+ for property in properties:
+ if property in data:
+ dimm[property] = data[property]
+
+ memory_results.append(dimm)
+ result["entries"] = memory_results
+ return result
+
+ def get_multi_memory_inventory(self):
+ return self.aggregate_systems(self.get_memory_inventory)
+
+ def get_nic_inventory(self, resource_uri):
+ result = {}
+ nic_list = []
+ nic_results = []
+ key = "EthernetInterfaces"
+ # Get these entries, but does not fail if not found
+ properties = ['Name', 'Id', 'Description', 'FQDN', 'IPv4Addresses', 'IPv6Addresses',
+ 'NameServers', 'MACAddress', 'PermanentMACAddress',
+ 'SpeedMbps', 'MTUSize', 'AutoNeg', 'Status']
+
+ response = self.get_request(self.root_uri + resource_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ if key not in data:
+ return {'ret': False, 'msg': "Key %s not found" % key}
+
+ ethernetinterfaces_uri = data[key]["@odata.id"]
+
+ # Get a list of all network controllers and build respective URIs
+ response = self.get_request(self.root_uri + ethernetinterfaces_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ for nic in data[u'Members']:
+ nic_list.append(nic[u'@odata.id'])
+
+ for n in nic_list:
+ nic = {}
+ uri = self.root_uri + n
+ response = self.get_request(uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ for property in properties:
+ if property in data:
+ nic[property] = data[property]
+
+ nic_results.append(nic)
+ result["entries"] = nic_results
+ return result
+
+ def get_multi_nic_inventory(self, resource_type):
+ ret = True
+ entries = []
+
+ # Given resource_type, use the proper URI
+ if resource_type == 'Systems':
+ resource_uris = self.systems_uris
+ elif resource_type == 'Manager':
+ resource_uris = self.manager_uris
+
+ for resource_uri in resource_uris:
+ inventory = self.get_nic_inventory(resource_uri)
+ ret = inventory.pop('ret') and ret
+ if 'entries' in inventory:
+ entries.append(({'resource_uri': resource_uri},
+ inventory['entries']))
+ return dict(ret=ret, entries=entries)
+
+ def get_virtualmedia(self, resource_uri):
+ result = {}
+ virtualmedia_list = []
+ virtualmedia_results = []
+ key = "VirtualMedia"
+ # Get these entries, but does not fail if not found
+ properties = ['Description', 'ConnectedVia', 'Id', 'MediaTypes',
+ 'Image', 'ImageName', 'Name', 'WriteProtected',
+ 'TransferMethod', 'TransferProtocolType']
+
+ response = self.get_request(self.root_uri + resource_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ if key not in data:
+ return {'ret': False, 'msg': "Key %s not found" % key}
+
+ virtualmedia_uri = data[key]["@odata.id"]
+
+ # Get a list of all virtual media and build respective URIs
+ response = self.get_request(self.root_uri + virtualmedia_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ for virtualmedia in data[u'Members']:
+ virtualmedia_list.append(virtualmedia[u'@odata.id'])
+
+ for n in virtualmedia_list:
+ virtualmedia = {}
+ uri = self.root_uri + n
+ response = self.get_request(uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ for property in properties:
+ if property in data:
+ virtualmedia[property] = data[property]
+
+ virtualmedia_results.append(virtualmedia)
+ result["entries"] = virtualmedia_results
+ return result
+
+ def get_multi_virtualmedia(self):
+ ret = True
+ entries = []
+
+ resource_uris = self.manager_uris
+
+ for resource_uri in resource_uris:
+ virtualmedia = self.get_virtualmedia(resource_uri)
+ ret = virtualmedia.pop('ret') and ret
+ if 'entries' in virtualmedia:
+ entries.append(({'resource_uri': resource_uri},
+ virtualmedia['entries']))
+ return dict(ret=ret, entries=entries)
+
+ @staticmethod
+ def _find_empty_virt_media_slot(resources, media_types,
+ media_match_strict=True):
+ for uri, data in resources.items():
+ # check MediaTypes
+ if 'MediaTypes' in data and media_types:
+ if not set(media_types).intersection(set(data['MediaTypes'])):
+ continue
+ else:
+ if media_match_strict:
+ continue
+ # if ejected, 'Inserted' should be False and 'ImageName' cleared
+ if (not data.get('Inserted', False) and
+ not data.get('ImageName')):
+ return uri, data
+ return None, None
+
+ @staticmethod
+ def _virt_media_image_inserted(resources, image_url):
+ for uri, data in resources.items():
+ if data.get('Image'):
+ if urlparse(image_url) == urlparse(data.get('Image')):
+ if data.get('Inserted', False) and data.get('ImageName'):
+ return True
+ return False
+
+ @staticmethod
+ def _find_virt_media_to_eject(resources, image_url):
+ matched_uri, matched_data = None, None
+ for uri, data in resources.items():
+ if data.get('Image'):
+ if urlparse(image_url) == urlparse(data.get('Image')):
+ matched_uri, matched_data = uri, data
+ if data.get('Inserted', True) and data.get('ImageName', 'x'):
+ return uri, data, True
+ return matched_uri, matched_data, False
+
+ def _read_virt_media_resources(self, uri_list):
+ resources = {}
+ headers = {}
+ for uri in uri_list:
+ response = self.get_request(self.root_uri + uri)
+ if response['ret'] is False:
+ continue
+ resources[uri] = response['data']
+ headers[uri] = response['headers']
+ return resources, headers
+
+ @staticmethod
+ def _insert_virt_media_payload(options, param_map, data, ai):
+ payload = {
+ 'Image': options.get('image_url')
+ }
+ for param, option in param_map.items():
+ if options.get(option) is not None and param in data:
+ allowable = ai.get(param, {}).get('AllowableValues', [])
+ if allowable and options.get(option) not in allowable:
+ return {'ret': False,
+ 'msg': "Value '%s' specified for option '%s' not "
+ "in list of AllowableValues %s" % (
+ options.get(option), option,
+ allowable)}
+ payload[param] = options.get(option)
+ return payload
+
+ def virtual_media_insert_via_patch(self, options, param_map, uri, data):
+ # get AllowableValues
+ ai = dict((k[:-24],
+ {'AllowableValues': v}) for k, v in data.items()
+ if k.endswith('@Redfish.AllowableValues'))
+ # construct payload
+ payload = self._insert_virt_media_payload(options, param_map, data, ai)
+ if 'Inserted' not in payload:
+ payload['Inserted'] = True
+ # PATCH the resource
+ response = self.patch_request(self.root_uri + uri, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True, 'changed': True, 'msg': "VirtualMedia inserted"}
+
+ def virtual_media_insert(self, options):
+ param_map = {
+ 'Inserted': 'inserted',
+ 'WriteProtected': 'write_protected',
+ 'UserName': 'username',
+ 'Password': 'password',
+ 'TransferProtocolType': 'transfer_protocol_type',
+ 'TransferMethod': 'transfer_method'
+ }
+ image_url = options.get('image_url')
+ if not image_url:
+ return {'ret': False,
+ 'msg': "image_url option required for VirtualMediaInsert"}
+ media_types = options.get('media_types')
+
+ # locate and read the VirtualMedia resources
+ response = self.get_request(self.root_uri + self.manager_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'VirtualMedia' not in data:
+ return {'ret': False, 'msg': "VirtualMedia resource not found"}
+ virt_media_uri = data["VirtualMedia"]["@odata.id"]
+ response = self.get_request(self.root_uri + virt_media_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ virt_media_list = []
+ for member in data[u'Members']:
+ virt_media_list.append(member[u'@odata.id'])
+ resources, headers = self._read_virt_media_resources(virt_media_list)
+
+ # see if image already inserted; if so, nothing to do
+ if self._virt_media_image_inserted(resources, image_url):
+ return {'ret': True, 'changed': False,
+ 'msg': "VirtualMedia '%s' already inserted" % image_url}
+
+ # find an empty slot to insert the media
+ # try first with strict media_type matching
+ uri, data = self._find_empty_virt_media_slot(
+ resources, media_types, media_match_strict=True)
+ if not uri:
+ # if not found, try without strict media_type matching
+ uri, data = self._find_empty_virt_media_slot(
+ resources, media_types, media_match_strict=False)
+ if not uri:
+ return {'ret': False,
+ 'msg': "Unable to find an available VirtualMedia resource "
+ "%s" % ('supporting ' + str(media_types)
+ if media_types else '')}
+
+ # confirm InsertMedia action found
+ if ('Actions' not in data or
+ '#VirtualMedia.InsertMedia' not in data['Actions']):
+ # try to insert via PATCH if no InsertMedia action found
+ h = headers[uri]
+ if 'allow' in h:
+ methods = [m.strip() for m in h.get('allow').split(',')]
+ if 'PATCH' not in methods:
+ # if Allow header present and PATCH missing, return error
+ return {'ret': False,
+ 'msg': "%s action not found and PATCH not allowed"
+ % '#VirtualMedia.InsertMedia'}
+ return self.virtual_media_insert_via_patch(options, param_map,
+ uri, data)
+
+ # get the action property
+ action = data['Actions']['#VirtualMedia.InsertMedia']
+ if 'target' not in action:
+ return {'ret': False,
+ 'msg': "target URI missing from Action "
+ "#VirtualMedia.InsertMedia"}
+ action_uri = action['target']
+ # get ActionInfo or AllowableValues
+ ai = self._get_all_action_info_values(action)
+ # construct payload
+ payload = self._insert_virt_media_payload(options, param_map, data, ai)
+ # POST to action
+ response = self.post_request(self.root_uri + action_uri, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True, 'changed': True, 'msg': "VirtualMedia inserted"}
+
+ def virtual_media_eject_via_patch(self, uri):
+ # construct payload
+ payload = {
+ 'Inserted': False,
+ 'Image': None
+ }
+ # PATCH resource
+ response = self.patch_request(self.root_uri + uri, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True, 'changed': True,
+ 'msg': "VirtualMedia ejected"}
+
+ def virtual_media_eject(self, options):
+ image_url = options.get('image_url')
+ if not image_url:
+ return {'ret': False,
+ 'msg': "image_url option required for VirtualMediaEject"}
+
+ # locate and read the VirtualMedia resources
+ response = self.get_request(self.root_uri + self.manager_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'VirtualMedia' not in data:
+ return {'ret': False, 'msg': "VirtualMedia resource not found"}
+ virt_media_uri = data["VirtualMedia"]["@odata.id"]
+ response = self.get_request(self.root_uri + virt_media_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ virt_media_list = []
+ for member in data[u'Members']:
+ virt_media_list.append(member[u'@odata.id'])
+ resources, headers = self._read_virt_media_resources(virt_media_list)
+
+ # find the VirtualMedia resource to eject
+ uri, data, eject = self._find_virt_media_to_eject(resources, image_url)
+ if uri and eject:
+ if ('Actions' not in data or
+ '#VirtualMedia.EjectMedia' not in data['Actions']):
+ # try to eject via PATCH if no EjectMedia action found
+ h = headers[uri]
+ if 'allow' in h:
+ methods = [m.strip() for m in h.get('allow').split(',')]
+ if 'PATCH' not in methods:
+ # if Allow header present and PATCH missing, return error
+ return {'ret': False,
+ 'msg': "%s action not found and PATCH not allowed"
+ % '#VirtualMedia.EjectMedia'}
+ return self.virtual_media_eject_via_patch(uri)
+ else:
+ # POST to the EjectMedia Action
+ action = data['Actions']['#VirtualMedia.EjectMedia']
+ if 'target' not in action:
+ return {'ret': False,
+ 'msg': "target URI property missing from Action "
+ "#VirtualMedia.EjectMedia"}
+ action_uri = action['target']
+ # empty payload for Eject action
+ payload = {}
+ # POST to action
+ response = self.post_request(self.root_uri + action_uri,
+ payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True, 'changed': True,
+ 'msg': "VirtualMedia ejected"}
+ elif uri and not eject:
+ # already ejected: return success but changed=False
+ return {'ret': True, 'changed': False,
+ 'msg': "VirtualMedia image '%s' already ejected" %
+ image_url}
+ else:
+ # return failure (no resources matching image_url found)
+ return {'ret': False, 'changed': False,
+ 'msg': "No VirtualMedia resource found with image '%s' "
+ "inserted" % image_url}
+
+ def get_psu_inventory(self):
+ result = {}
+ psu_list = []
+ psu_results = []
+ key = "PowerSupplies"
+ # Get these entries, but does not fail if not found
+ properties = ['Name', 'Model', 'SerialNumber', 'PartNumber', 'Manufacturer',
+ 'FirmwareVersion', 'PowerCapacityWatts', 'PowerSupplyType',
+ 'Status']
+
+ # Get a list of all Chassis and build URIs, then get all PowerSupplies
+ # from each Power entry in the Chassis
+ chassis_uri_list = self.chassis_uris
+ for chassis_uri in chassis_uri_list:
+ response = self.get_request(self.root_uri + chassis_uri)
+ if response['ret'] is False:
+ return response
+
+ result['ret'] = True
+ data = response['data']
+
+ if 'Power' in data:
+ power_uri = data[u'Power'][u'@odata.id']
+ else:
+ continue
+
+ response = self.get_request(self.root_uri + power_uri)
+ data = response['data']
+
+ if key not in data:
+ return {'ret': False, 'msg': "Key %s not found" % key}
+
+ psu_list = data[key]
+ for psu in psu_list:
+ psu_not_present = False
+ psu_data = {}
+ for property in properties:
+ if property in psu:
+ if psu[property] is not None:
+ if property == 'Status':
+ if 'State' in psu[property]:
+ if psu[property]['State'] == 'Absent':
+ psu_not_present = True
+ psu_data[property] = psu[property]
+ if psu_not_present:
+ continue
+ psu_results.append(psu_data)
+
+ result["entries"] = psu_results
+ if not result["entries"]:
+ return {'ret': False, 'msg': "No PowerSupply objects found"}
+ return result
+
+ def get_multi_psu_inventory(self):
+ return self.aggregate_systems(self.get_psu_inventory)
+
+ def get_system_inventory(self, systems_uri):
+ result = {}
+ inventory = {}
+ # Get these entries, but does not fail if not found
+ properties = ['Status', 'HostName', 'PowerState', 'Model', 'Manufacturer',
+ 'PartNumber', 'SystemType', 'AssetTag', 'ServiceTag',
+ 'SerialNumber', 'SKU', 'BiosVersion', 'MemorySummary',
+ 'ProcessorSummary', 'TrustedModules', 'Name', 'Id']
+
+ response = self.get_request(self.root_uri + systems_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ for property in properties:
+ if property in data:
+ inventory[property] = data[property]
+
+ result["entries"] = inventory
+ return result
+
+ def get_multi_system_inventory(self):
+ return self.aggregate_systems(self.get_system_inventory)
+
+ def get_network_protocols(self):
+ result = {}
+ service_result = {}
+ # Find NetworkProtocol
+ response = self.get_request(self.root_uri + self.manager_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'NetworkProtocol' not in data:
+ return {'ret': False, 'msg': "NetworkProtocol resource not found"}
+ networkprotocol_uri = data["NetworkProtocol"]["@odata.id"]
+
+ response = self.get_request(self.root_uri + networkprotocol_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ protocol_services = ['SNMP', 'VirtualMedia', 'Telnet', 'SSDP', 'IPMI', 'SSH',
+ 'KVMIP', 'NTP', 'HTTP', 'HTTPS', 'DHCP', 'DHCPv6', 'RDP',
+ 'RFB']
+ for protocol_service in protocol_services:
+ if protocol_service in data.keys():
+ service_result[protocol_service] = data[protocol_service]
+
+ result['ret'] = True
+ result["entries"] = service_result
+ return result
+
+ def set_network_protocols(self, manager_services):
+ # Check input data validity
+ protocol_services = ['SNMP', 'VirtualMedia', 'Telnet', 'SSDP', 'IPMI', 'SSH',
+ 'KVMIP', 'NTP', 'HTTP', 'HTTPS', 'DHCP', 'DHCPv6', 'RDP',
+ 'RFB']
+ protocol_state_onlist = ['true', 'True', True, 'on', 1]
+ protocol_state_offlist = ['false', 'False', False, 'off', 0]
+ payload = {}
+ for service_name in manager_services.keys():
+ if service_name not in protocol_services:
+ return {'ret': False, 'msg': "Service name %s is invalid" % service_name}
+ payload[service_name] = {}
+ for service_property in manager_services[service_name].keys():
+ value = manager_services[service_name][service_property]
+ if service_property in ['ProtocolEnabled', 'protocolenabled']:
+ if value in protocol_state_onlist:
+ payload[service_name]['ProtocolEnabled'] = True
+ elif value in protocol_state_offlist:
+ payload[service_name]['ProtocolEnabled'] = False
+ else:
+ return {'ret': False, 'msg': "Value of property %s is invalid" % service_property}
+ elif service_property in ['port', 'Port']:
+ if isinstance(value, int):
+ payload[service_name]['Port'] = value
+ elif isinstance(value, str) and value.isdigit():
+ payload[service_name]['Port'] = int(value)
+ else:
+ return {'ret': False, 'msg': "Value of property %s is invalid" % service_property}
+ else:
+ payload[service_name][service_property] = value
+
+ # Find NetworkProtocol
+ response = self.get_request(self.root_uri + self.manager_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'NetworkProtocol' not in data:
+ return {'ret': False, 'msg': "NetworkProtocol resource not found"}
+ networkprotocol_uri = data["NetworkProtocol"]["@odata.id"]
+
+ # Check service property support or not
+ response = self.get_request(self.root_uri + networkprotocol_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ for service_name in payload.keys():
+ if service_name not in data:
+ return {'ret': False, 'msg': "%s service not supported" % service_name}
+ for service_property in payload[service_name].keys():
+ if service_property not in data[service_name]:
+ return {'ret': False, 'msg': "%s property for %s service not supported" % (service_property, service_name)}
+
+ # if the protocol is already set, nothing to do
+ need_change = False
+ for service_name in payload.keys():
+ for service_property in payload[service_name].keys():
+ value = payload[service_name][service_property]
+ if value != data[service_name][service_property]:
+ need_change = True
+ break
+
+ if not need_change:
+ return {'ret': True, 'changed': False, 'msg': "Manager NetworkProtocol services already set"}
+
+ response = self.patch_request(self.root_uri + networkprotocol_uri, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True, 'changed': True, 'msg': "Modified Manager NetworkProtocol services"}
+
+ @staticmethod
+ def to_singular(resource_name):
+ if resource_name.endswith('ies'):
+ resource_name = resource_name[:-3] + 'y'
+ elif resource_name.endswith('s'):
+ resource_name = resource_name[:-1]
+ return resource_name
+
+ def get_health_resource(self, subsystem, uri, health, expanded):
+ status = 'Status'
+
+ if expanded:
+ d = expanded
+ else:
+ r = self.get_request(self.root_uri + uri)
+ if r.get('ret'):
+ d = r.get('data')
+ else:
+ return
+
+ if 'Members' in d: # collections case
+ for m in d.get('Members'):
+ u = m.get('@odata.id')
+ r = self.get_request(self.root_uri + u)
+ if r.get('ret'):
+ p = r.get('data')
+ if p:
+ e = {self.to_singular(subsystem.lower()) + '_uri': u,
+ status: p.get(status,
+ "Status not available")}
+ health[subsystem].append(e)
+ else: # non-collections case
+ e = {self.to_singular(subsystem.lower()) + '_uri': uri,
+ status: d.get(status,
+ "Status not available")}
+ health[subsystem].append(e)
+
+ def get_health_subsystem(self, subsystem, data, health):
+ if subsystem in data:
+ sub = data.get(subsystem)
+ if isinstance(sub, list):
+ for r in sub:
+ if '@odata.id' in r:
+ uri = r.get('@odata.id')
+ expanded = None
+ if '#' in uri and len(r) > 1:
+ expanded = r
+ self.get_health_resource(subsystem, uri, health, expanded)
+ elif isinstance(sub, dict):
+ if '@odata.id' in sub:
+ uri = sub.get('@odata.id')
+ self.get_health_resource(subsystem, uri, health, None)
+ elif 'Members' in data:
+ for m in data.get('Members'):
+ u = m.get('@odata.id')
+ r = self.get_request(self.root_uri + u)
+ if r.get('ret'):
+ d = r.get('data')
+ self.get_health_subsystem(subsystem, d, health)
+
+ def get_health_report(self, category, uri, subsystems):
+ result = {}
+ health = {}
+ status = 'Status'
+
+ # Get health status of top level resource
+ response = self.get_request(self.root_uri + uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+ health[category] = {status: data.get(status, "Status not available")}
+
+ # Get health status of subsystems
+ for sub in subsystems:
+ d = None
+ if sub.startswith('Links.'): # ex: Links.PCIeDevices
+ sub = sub[len('Links.'):]
+ d = data.get('Links', {})
+ elif '.' in sub: # ex: Thermal.Fans
+ p, sub = sub.split('.')
+ u = data.get(p, {}).get('@odata.id')
+ if u:
+ r = self.get_request(self.root_uri + u)
+ if r['ret']:
+ d = r['data']
+ if not d:
+ continue
+ else: # ex: Memory
+ d = data
+ health[sub] = []
+ self.get_health_subsystem(sub, d, health)
+ if not health[sub]:
+ del health[sub]
+
+ result["entries"] = health
+ return result
+
+ def get_system_health_report(self, systems_uri):
+ subsystems = ['Processors', 'Memory', 'SimpleStorage', 'Storage',
+ 'EthernetInterfaces', 'NetworkInterfaces.NetworkPorts',
+ 'NetworkInterfaces.NetworkDeviceFunctions']
+ return self.get_health_report('System', systems_uri, subsystems)
+
+ def get_multi_system_health_report(self):
+ return self.aggregate_systems(self.get_system_health_report)
+
+ def get_chassis_health_report(self, chassis_uri):
+ subsystems = ['Power.PowerSupplies', 'Thermal.Fans',
+ 'Links.PCIeDevices']
+ return self.get_health_report('Chassis', chassis_uri, subsystems)
+
+ def get_multi_chassis_health_report(self):
+ return self.aggregate_chassis(self.get_chassis_health_report)
+
+ def get_manager_health_report(self, manager_uri):
+ subsystems = []
+ return self.get_health_report('Manager', manager_uri, subsystems)
+
+ def get_multi_manager_health_report(self):
+ return self.aggregate_managers(self.get_manager_health_report)
+
+ def set_manager_nic(self, nic_addr, nic_config):
+ # Get EthernetInterface collection
+ response = self.get_request(self.root_uri + self.manager_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'EthernetInterfaces' not in data:
+ return {'ret': False, 'msg': "EthernetInterfaces resource not found"}
+ ethernetinterfaces_uri = data["EthernetInterfaces"]["@odata.id"]
+ response = self.get_request(self.root_uri + ethernetinterfaces_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ uris = [a.get('@odata.id') for a in data.get('Members', []) if
+ a.get('@odata.id')]
+
+ # Find target EthernetInterface
+ target_ethernet_uri = None
+ target_ethernet_current_setting = None
+ if nic_addr == 'null':
+ # Find root_uri matched EthernetInterface when nic_addr is not specified
+ nic_addr = (self.root_uri).split('/')[-1]
+ nic_addr = nic_addr.split(':')[0] # split port if existing
+ for uri in uris:
+ response = self.get_request(self.root_uri + uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if '"' + nic_addr + '"' in str(data) or "'" + nic_addr + "'" in str(data):
+ target_ethernet_uri = uri
+ target_ethernet_current_setting = data
+ break
+ if target_ethernet_uri is None:
+ return {'ret': False, 'msg': "No matched EthernetInterface found under Manager"}
+
+ # Convert input to payload and check validity
+ payload = {}
+ for property in nic_config.keys():
+ value = nic_config[property]
+ if property not in target_ethernet_current_setting:
+ return {'ret': False, 'msg': "Property %s in nic_config is invalid" % property}
+ if isinstance(value, dict):
+ if isinstance(target_ethernet_current_setting[property], dict):
+ payload[property] = value
+ elif isinstance(target_ethernet_current_setting[property], list):
+ payload[property] = list()
+ payload[property].append(value)
+ else:
+ return {'ret': False, 'msg': "Value of property %s in nic_config is invalid" % property}
+ else:
+ payload[property] = value
+
+ # If no need change, nothing to do. If error detected, report it
+ need_change = False
+ for property in payload.keys():
+ set_value = payload[property]
+ cur_value = target_ethernet_current_setting[property]
+ # type is simple(not dict/list)
+ if not isinstance(set_value, dict) and not isinstance(set_value, list):
+ if set_value != cur_value:
+ need_change = True
+ # type is dict
+ if isinstance(set_value, dict):
+ for subprop in payload[property].keys():
+ if subprop not in target_ethernet_current_setting[property]:
+ return {'ret': False, 'msg': "Sub-property %s in nic_config is invalid" % subprop}
+ sub_set_value = payload[property][subprop]
+ sub_cur_value = target_ethernet_current_setting[property][subprop]
+ if sub_set_value != sub_cur_value:
+ need_change = True
+ # type is list
+ if isinstance(set_value, list):
+ for i in range(len(set_value)):
+ for subprop in payload[property][i].keys():
+ if subprop not in target_ethernet_current_setting[property][i]:
+ return {'ret': False, 'msg': "Sub-property %s in nic_config is invalid" % subprop}
+ sub_set_value = payload[property][i][subprop]
+ sub_cur_value = target_ethernet_current_setting[property][i][subprop]
+ if sub_set_value != sub_cur_value:
+ need_change = True
+
+ if not need_change:
+ return {'ret': True, 'changed': False, 'msg': "Manager NIC already set"}
+
+ response = self.patch_request(self.root_uri + target_ethernet_uri, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True, 'changed': True, 'msg': "Modified Manager NIC"}
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/redhat.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/redhat.py
new file mode 100644
index 00000000..0fb6e9b1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/redhat.py
@@ -0,0 +1,270 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), James Laska
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import os
+import re
+import shutil
+import tempfile
+import types
+
+from ansible.module_utils.six.moves import configparser
+
+
+class RegistrationBase(object):
+ def __init__(self, module, username=None, password=None):
+ self.module = module
+ self.username = username
+ self.password = password
+
+ def configure(self):
+ raise NotImplementedError("Must be implemented by a sub-class")
+
+ def enable(self):
+ # Remove any existing redhat.repo
+ redhat_repo = '/etc/yum.repos.d/redhat.repo'
+ if os.path.isfile(redhat_repo):
+ os.unlink(redhat_repo)
+
+ def register(self):
+ raise NotImplementedError("Must be implemented by a sub-class")
+
+ def unregister(self):
+ raise NotImplementedError("Must be implemented by a sub-class")
+
+ def unsubscribe(self):
+ raise NotImplementedError("Must be implemented by a sub-class")
+
+ def update_plugin_conf(self, plugin, enabled=True):
+ plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin
+
+ if os.path.isfile(plugin_conf):
+ tmpfd, tmpfile = tempfile.mkstemp()
+ shutil.copy2(plugin_conf, tmpfile)
+ cfg = configparser.ConfigParser()
+ cfg.read([tmpfile])
+
+ if enabled:
+ cfg.set('main', 'enabled', 1)
+ else:
+ cfg.set('main', 'enabled', 0)
+
+ fd = open(tmpfile, 'w+')
+ cfg.write(fd)
+ fd.close()
+ self.module.atomic_move(tmpfile, plugin_conf)
+
+ def subscribe(self, **kwargs):
+ raise NotImplementedError("Must be implemented by a sub-class")
+
+
+class Rhsm(RegistrationBase):
+ def __init__(self, module, username=None, password=None):
+ RegistrationBase.__init__(self, module, username, password)
+ self.config = self._read_config()
+ self.module = module
+
+ def _read_config(self, rhsm_conf='/etc/rhsm/rhsm.conf'):
+ '''
+ Load RHSM configuration from /etc/rhsm/rhsm.conf.
+ Returns:
+ * ConfigParser object
+ '''
+
+ # Read RHSM defaults ...
+ cp = configparser.ConfigParser()
+ cp.read(rhsm_conf)
+
+ # Add support for specifying a default value w/o having to standup some configuration
+ # Yeah, I know this should be subclassed ... but, oh well
+ def get_option_default(self, key, default=''):
+ sect, opt = key.split('.', 1)
+ if self.has_section(sect) and self.has_option(sect, opt):
+ return self.get(sect, opt)
+ else:
+ return default
+
+ cp.get_option = types.MethodType(get_option_default, cp, configparser.ConfigParser)
+
+ return cp
+
+ def enable(self):
+ '''
+ Enable the system to receive updates from subscription-manager.
+ This involves updating affected yum plugins and removing any
+ conflicting yum repositories.
+ '''
+ RegistrationBase.enable(self)
+ self.update_plugin_conf('rhnplugin', False)
+ self.update_plugin_conf('subscription-manager', True)
+
+ def configure(self, **kwargs):
+ '''
+ Configure the system as directed for registration with RHN
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+ args = ['subscription-manager', 'config']
+
+ # Pass supplied **kwargs as parameters to subscription-manager. Ignore
+ # non-configuration parameters and replace '_' with '.'. For example,
+ # 'server_hostname' becomes '--system.hostname'.
+ for k, v in kwargs.items():
+ if re.search(r'^(system|rhsm)_', k):
+ args.append('--%s=%s' % (k.replace('_', '.'), v))
+
+ self.module.run_command(args, check_rc=True)
+
+ @property
+ def is_registered(self):
+ '''
+ Determine whether the current system
+ Returns:
+ * Boolean - whether the current system is currently registered to
+ RHN.
+ '''
+ args = ['subscription-manager', 'identity']
+ rc, stdout, stderr = self.module.run_command(args, check_rc=False)
+ if rc == 0:
+ return True
+ else:
+ return False
+
+ def register(self, username, password, autosubscribe, activationkey):
+ '''
+ Register the current system to the provided RHN server
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+ args = ['subscription-manager', 'register']
+
+ # Generate command arguments
+ if activationkey:
+ args.append('--activationkey "%s"' % activationkey)
+ else:
+ if autosubscribe:
+ args.append('--autosubscribe')
+ if username:
+ args.extend(['--username', username])
+ if password:
+ args.extend(['--password', password])
+
+ # Do the needful...
+ rc, stderr, stdout = self.module.run_command(args, check_rc=True)
+
+ def unsubscribe(self):
+ '''
+ Unsubscribe a system from all subscribed channels
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+ args = ['subscription-manager', 'unsubscribe', '--all']
+ rc, stderr, stdout = self.module.run_command(args, check_rc=True)
+
+ def unregister(self):
+ '''
+ Unregister a currently registered system
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+ args = ['subscription-manager', 'unregister']
+ rc, stderr, stdout = self.module.run_command(args, check_rc=True)
+ self.update_plugin_conf('rhnplugin', False)
+ self.update_plugin_conf('subscription-manager', False)
+
+ def subscribe(self, regexp):
+ '''
+ Subscribe current system to available pools matching the specified
+ regular expression
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+
+ # Available pools ready for subscription
+ available_pools = RhsmPools(self.module)
+
+ for pool in available_pools.filter(regexp):
+ pool.subscribe()
+
+
+class RhsmPool(object):
+ '''
+ Convenience class for housing subscription information
+ '''
+
+ def __init__(self, module, **kwargs):
+ self.module = module
+ for k, v in kwargs.items():
+ setattr(self, k, v)
+
+ def __str__(self):
+ return str(self.__getattribute__('_name'))
+
+ def subscribe(self):
+ args = "subscription-manager subscribe --pool %s" % self.PoolId
+ rc, stdout, stderr = self.module.run_command(args, check_rc=True)
+ if rc == 0:
+ return True
+ else:
+ return False
+
+
+class RhsmPools(object):
+ """
+ This class is used for manipulating pools subscriptions with RHSM
+ """
+ def __init__(self, module):
+ self.module = module
+ self.products = self._load_product_list()
+
+ def __iter__(self):
+ return self.products.__iter__()
+
+ def _load_product_list(self):
+ """
+ Loads list of all available pools for system in data structure
+ """
+ args = "subscription-manager list --available"
+ rc, stdout, stderr = self.module.run_command(args, check_rc=True)
+
+ products = []
+ for line in stdout.split('\n'):
+ # Remove leading+trailing whitespace
+ line = line.strip()
+ # An empty line implies the end of an output group
+ if len(line) == 0:
+ continue
+ # If a colon ':' is found, parse
+ elif ':' in line:
+ (key, value) = line.split(':', 1)
+ key = key.strip().replace(" ", "") # To unify
+ value = value.strip()
+ if key in ['ProductName', 'SubscriptionName']:
+ # Remember the name for later processing
+ products.append(RhsmPool(self.module, _name=value, key=value))
+ elif products:
+ # Associate value with most recently recorded product
+ products[-1].__setattr__(key, value)
+ # FIXME - log some warning?
+ # else:
+ # warnings.warn("Unhandled subscription key/value: %s/%s" % (key,value))
+ return products
+
+ def filter(self, regexp='^$'):
+ '''
+ Return a list of RhsmPools whose name matches the provided regular expression
+ '''
+ r = re.compile(regexp)
+ for product in self.products:
+ if r.search(product._name):
+ yield product
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/dellemc/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/dellemc/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/dellemc/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/dellemc/dellemc_idrac.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/dellemc/dellemc_idrac.py
new file mode 100644
index 00000000..93d3bfcb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/dellemc/dellemc_idrac.py
@@ -0,0 +1,56 @@
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 1.0
+# Copyright (C) 2018 Dell Inc.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# All rights reserved. Dell, EMC, and other trademarks are trademarks of Dell Inc. or its subsidiaries.
+# Other trademarks may be trademarks of their respective owners.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+try:
+ from omsdk.sdkinfra import sdkinfra
+ from omsdk.sdkcreds import UserCredentials
+ from omsdk.sdkfile import FileOnShare, file_share_manager
+ from omsdk.sdkprotopref import ProtoPreference, ProtocolEnum
+ from omsdk.http.sdkwsmanbase import WsManOptions
+ HAS_OMSDK = True
+except ImportError:
+ HAS_OMSDK = False
+
+
+class iDRACConnection:
+
+ def __init__(self, module_params):
+ if not HAS_OMSDK:
+ raise ImportError("Dell EMC OMSDK library is required for this module")
+ self.idrac_ip = module_params['idrac_ip']
+ self.idrac_user = module_params['idrac_user']
+ self.idrac_pwd = module_params['idrac_password']
+ self.idrac_port = module_params['idrac_port']
+ if not all((self.idrac_ip, self.idrac_user, self.idrac_pwd)):
+ raise ValueError("hostname, username and password required")
+ self.handle = None
+ self.creds = UserCredentials(self.idrac_user, self.idrac_pwd)
+ self.pOp = WsManOptions(port=self.idrac_port)
+ self.sdk = sdkinfra()
+ if self.sdk is None:
+ msg = "Could not initialize iDRAC drivers."
+ raise RuntimeError(msg)
+
+ def __enter__(self):
+ self.sdk.importPath()
+ self.handle = self.sdk.get_driver(self.sdk.driver_enum.iDRAC, self.idrac_ip, self.creds, pOptions=self.pOp)
+ if self.handle is None:
+ msg = "Could not find device driver for iDRAC with IP Address: {0}".format(self.idrac_ip)
+ raise RuntimeError(msg)
+ return self.handle
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.handle.disconnect()
+ return False
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/dellemc/ome.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/dellemc/ome.py
new file mode 100644
index 00000000..9d02e550
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/dellemc/ome.py
@@ -0,0 +1,163 @@
+# -*- coding: utf-8 -*-
+
+# Dell EMC OpenManage Ansible Modules
+# Version 1.3
+# Copyright (C) 2019 Dell Inc. or its subsidiaries. All Rights Reserved.
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+
+SESSION_RESOURCE_COLLECTION = {
+ "SESSION": "SessionService/Sessions",
+ "SESSION_ID": "SessionService/Sessions('{Id}')",
+}
+
+
+class OpenURLResponse(object):
+ """Handles HTTPResponse"""
+
+ def __init__(self, resp):
+ self.body = None
+ self.resp = resp
+ if self.resp:
+ self.body = self.resp.read()
+
+ @property
+ def json_data(self):
+ try:
+ return json.loads(self.body)
+ except ValueError:
+ raise ValueError("Unable to parse json")
+
+ @property
+ def status_code(self):
+ return self.resp.getcode()
+
+ @property
+ def success(self):
+ return self.status_code in (200, 201, 202, 204)
+
+ @property
+ def token_header(self):
+ return self.resp.headers.get('X-Auth-Token')
+
+
+class RestOME(object):
+ """Handles OME API requests"""
+
+ def __init__(self, module_params=None, req_session=False):
+ self.module_params = module_params
+ self.hostname = self.module_params["hostname"]
+ self.username = self.module_params["username"]
+ self.password = self.module_params["password"]
+ self.port = self.module_params["port"]
+ self.req_session = req_session
+ self.session_id = None
+ self.protocol = 'https'
+ self._headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
+
+ def _get_base_url(self):
+ """builds base url"""
+ return '{0}://{1}:{2}/api'.format(self.protocol, self.hostname, self.port)
+
+ def _build_url(self, path, query_param=None):
+ """builds complete url"""
+ url = path
+ base_uri = self._get_base_url()
+ if path:
+ url = '{0}/{1}'.format(base_uri, path)
+ if query_param:
+ url += "?{0}".format(urlencode(query_param))
+ return url
+
+ def _url_common_args_spec(self, method, api_timeout, headers=None):
+ """Creates an argument common spec"""
+ req_header = self._headers
+ if headers:
+ req_header.update(headers)
+ url_kwargs = {
+ "method": method,
+ "validate_certs": False,
+ "use_proxy": True,
+ "headers": req_header,
+ "timeout": api_timeout,
+ "follow_redirects": 'all',
+ }
+ return url_kwargs
+
+ def _args_without_session(self, method, api_timeout=30, headers=None):
+ """Creates an argument spec in case of basic authentication"""
+ req_header = self._headers
+ if headers:
+ req_header.update(headers)
+ url_kwargs = self._url_common_args_spec(method, api_timeout, headers=headers)
+ url_kwargs["url_username"] = self.username
+ url_kwargs["url_password"] = self.password
+ url_kwargs["force_basic_auth"] = True
+ return url_kwargs
+
+ def _args_with_session(self, method, api_timeout=30, headers=None):
+ """Creates an argument spec, in case of authentication with session"""
+ url_kwargs = self._url_common_args_spec(method, api_timeout, headers=headers)
+ url_kwargs["force_basic_auth"] = False
+ return url_kwargs
+
+ def invoke_request(self, method, path, data=None, query_param=None, headers=None,
+ api_timeout=30, dump=True):
+ """
+ Sends a request via open_url
+ Returns :class:`OpenURLResponse` object.
+ :arg method: HTTP verb to use for the request
+ :arg path: path to request without query parameter
+ :arg data: (optional) Payload to send with the request
+ :arg query_param: (optional) Dictionary of query parameter to send with request
+ :arg headers: (optional) Dictionary of HTTP Headers to send with the
+ request
+ :arg api_timeout: (optional) How long to wait for the server to send
+ data before giving up
+ :arg dump: (Optional) boolean value for dumping payload data.
+ :returns: OpenURLResponse
+ """
+ try:
+ if 'X-Auth-Token' in self._headers:
+ url_kwargs = self._args_with_session(method, api_timeout, headers=headers)
+ else:
+ url_kwargs = self._args_without_session(method, api_timeout, headers=headers)
+ if data and dump:
+ data = json.dumps(data)
+ url = self._build_url(path, query_param=query_param)
+ resp = open_url(url, data=data, **url_kwargs)
+ resp_data = OpenURLResponse(resp)
+ except (HTTPError, URLError, SSLValidationError, ConnectionError) as err:
+ raise err
+ return resp_data
+
+ def __enter__(self):
+ """Creates sessions by passing it to header"""
+ if self.req_session:
+ payload = {'UserName': self.username,
+ 'Password': self.password,
+ 'SessionType': 'API', }
+ path = SESSION_RESOURCE_COLLECTION["SESSION"]
+ resp = self.invoke_request('POST', path, data=payload)
+ if resp and resp.success:
+ self.session_id = resp.json_data.get("Id")
+ self._headers["X-Auth-Token"] = resp.token_header
+ else:
+ msg = "Could not create the session"
+ raise ConnectionError(msg)
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ """Deletes a session id, which is in use for request"""
+ if self.session_id:
+ path = SESSION_RESOURCE_COLLECTION["SESSION_ID"].format(Id=self.session_id)
+ self.invoke_request('DELETE', path)
+ return False
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/lxca/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/lxca/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/lxca/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/lxca/common.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/lxca/common.py
new file mode 100644
index 00000000..297397e3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/remote_management/lxca/common.py
@@ -0,0 +1,78 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by
+# Ansible still belong to the author of the module, and may assign their
+# own license to the complete work.
+#
+# Copyright (C) 2017 Lenovo, Inc.
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+#
+# Contains LXCA common class
+# Lenovo xClarity Administrator (LXCA)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import traceback
+try:
+ from pylxca import connect, disconnect
+ HAS_PYLXCA = True
+except ImportError:
+ HAS_PYLXCA = False
+
+
+PYLXCA_REQUIRED = "Lenovo xClarity Administrator Python Client (Python package 'pylxca') is required for this module."
+
+
+def has_pylxca(module):
+ """
+ Check pylxca is installed
+ :param module:
+ """
+ if not HAS_PYLXCA:
+ module.fail_json(msg=PYLXCA_REQUIRED)
+
+
+LXCA_COMMON_ARGS = dict(
+ login_user=dict(required=True),
+ login_password=dict(required=True, no_log=True),
+ auth_url=dict(required=True),
+)
+
+
+class connection_object:
+ def __init__(self, module):
+ self.module = module
+
+ def __enter__(self):
+ return setup_conn(self.module)
+
+ def __exit__(self, type, value, traceback):
+ close_conn()
+
+
+def setup_conn(module):
+ """
+ this function create connection to LXCA
+ :param module:
+ :return: lxca connection
+ """
+ lxca_con = None
+ try:
+ lxca_con = connect(module.params['auth_url'],
+ module.params['login_user'],
+ module.params['login_password'],
+ "True")
+ except Exception as exception:
+ error_msg = '; '.join(exception.args)
+ module.fail_json(msg=error_msg, exception=traceback.format_exc())
+ return lxca_con
+
+
+def close_conn():
+ """
+ this function close connection to LXCA
+ :param module:
+ :return: None
+ """
+ disconnect()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/saslprep.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/saslprep.py
new file mode 100644
index 00000000..3e16c716
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/saslprep.py
@@ -0,0 +1,178 @@
+# -*- coding: utf-8 -*-
+
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+
+# Copyright: (c) 2020, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from stringprep import (
+ in_table_a1,
+ in_table_b1,
+ in_table_c3,
+ in_table_c4,
+ in_table_c5,
+ in_table_c6,
+ in_table_c7,
+ in_table_c8,
+ in_table_c9,
+ in_table_c12,
+ in_table_c21_c22,
+ in_table_d1,
+ in_table_d2,
+)
+from unicodedata import normalize
+
+from ansible.module_utils.six import text_type
+
+
+def is_unicode_str(string):
+ return True if isinstance(string, text_type) else False
+
+
+def mapping_profile(string):
+ """RFC4013 Mapping profile implementation."""
+ # Regarding RFC4013,
+ # This profile specifies:
+ # - non-ASCII space characters [StringPrep, C.1.2] that can be
+ # mapped to SPACE (U+0020), and
+ # - the "commonly mapped to nothing" characters [StringPrep, B.1]
+ # that can be mapped to nothing.
+
+ tmp = []
+ for c in string:
+ # If not the "commonly mapped to nothing"
+ if not in_table_b1(c):
+ if in_table_c12(c):
+ # map non-ASCII space characters
+ # (that can be mapped) to Unicode space
+ tmp.append(u' ')
+ else:
+ tmp.append(c)
+
+ return u"".join(tmp)
+
+
+def is_ral_string(string):
+ """RFC3454 Check bidirectional category of the string"""
+ # Regarding RFC3454,
+ # Table D.1 lists the characters that belong
+ # to Unicode bidirectional categories "R" and "AL".
+ # If a string contains any RandALCat character, a RandALCat
+ # character MUST be the first character of the string, and a
+ # RandALCat character MUST be the last character of the string.
+ if in_table_d1(string[0]):
+ if not in_table_d1(string[-1]):
+ raise ValueError('RFC3454: incorrect bidirectional RandALCat string.')
+ return True
+ return False
+
+
+def prohibited_output_profile(string):
+ """RFC4013 Prohibited output profile implementation."""
+ # Implements:
+ # RFC4013, 2.3. Prohibited Output.
+ # This profile specifies the following characters as prohibited input:
+ # - Non-ASCII space characters [StringPrep, C.1.2]
+ # - ASCII control characters [StringPrep, C.2.1]
+ # - Non-ASCII control characters [StringPrep, C.2.2]
+ # - Private Use characters [StringPrep, C.3]
+ # - Non-character code points [StringPrep, C.4]
+ # - Surrogate code points [StringPrep, C.5]
+ # - Inappropriate for plain text characters [StringPrep, C.6]
+ # - Inappropriate for canonical representation characters [StringPrep, C.7]
+ # - Change display properties or deprecated characters [StringPrep, C.8]
+ # - Tagging characters [StringPrep, C.9]
+ # RFC4013, 2.4. Bidirectional Characters.
+ # RFC4013, 2.5. Unassigned Code Points.
+
+ # Determine how to handle bidirectional characters (RFC3454):
+ if is_ral_string(string):
+ # If a string contains any RandALCat characters,
+ # The string MUST NOT contain any LCat character:
+ is_prohibited_bidi_ch = in_table_d2
+ bidi_table = 'D.2'
+ else:
+ # Forbid RandALCat characters in LCat string:
+ is_prohibited_bidi_ch = in_table_d1
+ bidi_table = 'D.1'
+
+ RFC = 'RFC4013'
+ for c in string:
+ # RFC4013 2.3. Prohibited Output:
+ if in_table_c12(c):
+ raise ValueError('%s: prohibited non-ASCII space characters '
+ 'that cannot be replaced (C.1.2).' % RFC)
+ if in_table_c21_c22(c):
+ raise ValueError('%s: prohibited control characters (C.2.1).' % RFC)
+ if in_table_c3(c):
+ raise ValueError('%s: prohibited private Use characters (C.3).' % RFC)
+ if in_table_c4(c):
+ raise ValueError('%s: prohibited non-character code points (C.4).' % RFC)
+ if in_table_c5(c):
+ raise ValueError('%s: prohibited surrogate code points (C.5).' % RFC)
+ if in_table_c6(c):
+ raise ValueError('%s: prohibited inappropriate for plain text '
+ 'characters (C.6).' % RFC)
+ if in_table_c7(c):
+ raise ValueError('%s: prohibited inappropriate for canonical '
+ 'representation characters (C.7).' % RFC)
+ if in_table_c8(c):
+ raise ValueError('%s: prohibited change display properties / '
+ 'deprecated characters (C.8).' % RFC)
+ if in_table_c9(c):
+ raise ValueError('%s: prohibited tagging characters (C.9).' % RFC)
+
+ # RFC4013, 2.4. Bidirectional Characters:
+ if is_prohibited_bidi_ch(c):
+ raise ValueError('%s: prohibited bidi characters (%s).' % (RFC, bidi_table))
+
+ # RFC4013, 2.5. Unassigned Code Points:
+ if in_table_a1(c):
+ raise ValueError('%s: prohibited unassigned code points (A.1).' % RFC)
+
+
+def saslprep(string):
+ """RFC4013 implementation.
+ Implements "SASLprep" profile (RFC4013) of the "stringprep" algorithm (RFC3454)
+ to prepare Unicode strings representing user names and passwords for comparison.
+ Regarding the RFC4013, the "SASLprep" profile is intended to be used by
+ Simple Authentication and Security Layer (SASL) mechanisms
+ (such as PLAIN, CRAM-MD5, and DIGEST-MD5), as well as other protocols
+ exchanging simple user names and/or passwords.
+
+ Args:
+ string (unicode string): Unicode string to validate and prepare.
+
+ Returns:
+ Prepared unicode string.
+ """
+ # RFC4013: "The algorithm assumes all strings are
+ # comprised of characters from the Unicode [Unicode] character set."
+ # Validate the string is a Unicode string
+ # (text_type is the string type if PY3 and unicode otherwise):
+ if not is_unicode_str(string):
+ raise TypeError('input must be of type %s, not %s' % (text_type, type(string)))
+
+ # RFC4013: 2.1. Mapping.
+ string = mapping_profile(string)
+
+ # RFC4013: 2.2. Normalization.
+ # "This profile specifies using Unicode normalization form KC."
+ string = normalize('NFKC', string)
+ if not string:
+ return u''
+
+ # RFC4013: 2.3. Prohibited Output.
+ # RFC4013: 2.4. Bidirectional Characters.
+ # RFC4013: 2.5. Unassigned Code Points.
+ prohibited_output_profile(string)
+
+ return string
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/scaleway.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/scaleway.py
new file mode 100644
index 00000000..f5107fed
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/scaleway.py
@@ -0,0 +1,195 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import re
+import sys
+
+from ansible.module_utils.basic import env_fallback
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+
+
+def scaleway_argument_spec():
+ return dict(
+ api_token=dict(required=True, fallback=(env_fallback, ['SCW_TOKEN', 'SCW_API_KEY', 'SCW_OAUTH_TOKEN', 'SCW_API_TOKEN']),
+ no_log=True, aliases=['oauth_token']),
+ api_url=dict(fallback=(env_fallback, ['SCW_API_URL']), default='https://api.scaleway.com', aliases=['base_url']),
+ api_timeout=dict(type='int', default=30, aliases=['timeout']),
+ query_parameters=dict(type='dict', default={}),
+ validate_certs=dict(default=True, type='bool'),
+ )
+
+
+def payload_from_object(scw_object):
+ return dict(
+ (k, v)
+ for k, v in scw_object.items()
+ if k != 'id' and v is not None
+ )
+
+
+class ScalewayException(Exception):
+
+ def __init__(self, message):
+ self.message = message
+
+
+# Specify a complete Link header, for validation purposes
+R_LINK_HEADER = r'''<[^>]+>;\srel="(first|previous|next|last)"
+ (,<[^>]+>;\srel="(first|previous|next|last)")*'''
+# Specify a single relation, for iteration and string extraction purposes
+R_RELATION = r'<(?P<target_IRI>[^>]+)>; rel="(?P<relation>first|previous|next|last)"'
+
+
+def parse_pagination_link(header):
+ if not re.match(R_LINK_HEADER, header, re.VERBOSE):
+ raise ScalewayException('Scaleway API answered with an invalid Link pagination header')
+ else:
+ relations = header.split(',')
+ parsed_relations = {}
+ rc_relation = re.compile(R_RELATION)
+ for relation in relations:
+ match = rc_relation.match(relation)
+ if not match:
+ raise ScalewayException('Scaleway API answered with an invalid relation in the Link pagination header')
+ data = match.groupdict()
+ parsed_relations[data['relation']] = data['target_IRI']
+ return parsed_relations
+
+
+class Response(object):
+
+ def __init__(self, resp, info):
+ self.body = None
+ if resp:
+ self.body = resp.read()
+ self.info = info
+
+ @property
+ def json(self):
+ if not self.body:
+ if "body" in self.info:
+ return json.loads(self.info["body"])
+ return None
+ try:
+ return json.loads(self.body)
+ except ValueError:
+ return None
+
+ @property
+ def status_code(self):
+ return self.info["status"]
+
+ @property
+ def ok(self):
+ return self.status_code in (200, 201, 202, 204)
+
+
+class Scaleway(object):
+
+ def __init__(self, module):
+ self.module = module
+ self.headers = {
+ 'X-Auth-Token': self.module.params.get('api_token'),
+ 'User-Agent': self.get_user_agent_string(module),
+ 'Content-Type': 'application/json',
+ }
+ self.name = None
+
+ def get_resources(self):
+ results = self.get('/%s' % self.name)
+
+ if not results.ok:
+ raise ScalewayException('Error fetching {0} ({1}) [{2}: {3}]'.format(
+ self.name, '%s/%s' % (self.module.params.get('api_url'), self.name),
+ results.status_code, results.json['message']
+ ))
+
+ return results.json.get(self.name)
+
+ def _url_builder(self, path, params):
+ d = self.module.params.get('query_parameters')
+ if params is not None:
+ d.update(params)
+ query_string = urlencode(d, doseq=True)
+
+ if path[0] == '/':
+ path = path[1:]
+ return '%s/%s?%s' % (self.module.params.get('api_url'), path, query_string)
+
+ def send(self, method, path, data=None, headers=None, params=None):
+ url = self._url_builder(path=path, params=params)
+ self.warn(url)
+
+ if headers is not None:
+ self.headers.update(headers)
+
+ if self.headers['Content-Type'] == "application/json":
+ data = self.module.jsonify(data)
+
+ resp, info = fetch_url(
+ self.module, url, data=data, headers=self.headers, method=method,
+ timeout=self.module.params.get('api_timeout')
+ )
+
+ # Exceptions in fetch_url may result in a status -1, the ensures a proper error to the user in all cases
+ if info['status'] == -1:
+ self.module.fail_json(msg=info['msg'])
+
+ return Response(resp, info)
+
+ @staticmethod
+ def get_user_agent_string(module):
+ return "ansible %s Python %s" % (module.ansible_version, sys.version.split(' ')[0])
+
+ def get(self, path, data=None, headers=None, params=None):
+ return self.send(method='GET', path=path, data=data, headers=headers, params=params)
+
+ def put(self, path, data=None, headers=None, params=None):
+ return self.send(method='PUT', path=path, data=data, headers=headers, params=params)
+
+ def post(self, path, data=None, headers=None, params=None):
+ return self.send(method='POST', path=path, data=data, headers=headers, params=params)
+
+ def delete(self, path, data=None, headers=None, params=None):
+ return self.send(method='DELETE', path=path, data=data, headers=headers, params=params)
+
+ def patch(self, path, data=None, headers=None, params=None):
+ return self.send(method="PATCH", path=path, data=data, headers=headers, params=params)
+
+ def update(self, path, data=None, headers=None, params=None):
+ return self.send(method="UPDATE", path=path, data=data, headers=headers, params=params)
+
+ def warn(self, x):
+ self.module.warn(str(x))
+
+
+SCALEWAY_LOCATION = {
+ 'par1': {'name': 'Paris 1', 'country': 'FR', "api_endpoint": 'https://api.scaleway.com/instance/v1/zones/fr-par-1'},
+ 'EMEA-FR-PAR1': {'name': 'Paris 1', 'country': 'FR', "api_endpoint": 'https://api.scaleway.com/instance/v1/zones/fr-par-1'},
+
+ 'par2': {'name': 'Paris 2', 'country': 'FR', "api_endpoint": 'https://api.scaleway.com/instance/v1/zones/fr-par-2'},
+ 'EMEA-FR-PAR2': {'name': 'Paris 2', 'country': 'FR', "api_endpoint": 'https://api.scaleway.com/instance/v1/zones/fr-par-2'},
+
+ 'ams1': {'name': 'Amsterdam 1', 'country': 'NL', "api_endpoint": 'https://api.scaleway.com/instance/v1/zones/nl-ams-1'},
+ 'EMEA-NL-EVS': {'name': 'Amsterdam 1', 'country': 'NL', "api_endpoint": 'https://api.scaleway.com/instance/v1/zones/nl-ams-1'},
+
+ 'waw1': {'name': 'Warsaw 1', 'country': 'PL', "api_endpoint": 'https://api.scaleway.com/instance/v1/zones/pl-waw-1'},
+ 'EMEA-PL-WAW1': {'name': 'Warsaw 1', 'country': 'PL', "api_endpoint": 'https://api.scaleway.com/instance/v1/zones/pl-waw-1'},
+}
+
+SCALEWAY_ENDPOINT = "https://api.scaleway.com"
+
+SCALEWAY_REGIONS = [
+ "fr-par",
+ "nl-ams",
+ "pl-waw",
+]
+
+SCALEWAY_ZONES = [
+ "fr-par-1",
+ "fr-par-2",
+ "nl-ams-1",
+ "pl-waw-1",
+]
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/source_control/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/source_control/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/source_control/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/source_control/bitbucket.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/source_control/bitbucket.py
new file mode 100644
index 00000000..c17dcb1d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/source_control/bitbucket.py
@@ -0,0 +1,92 @@
+# -*- coding: utf-8 -*-
+
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.basic import env_fallback
+from ansible.module_utils.urls import fetch_url, basic_auth_header
+
+
+class BitbucketHelper:
+ BITBUCKET_API_URL = 'https://api.bitbucket.org'
+
+ error_messages = {
+ 'required_client_id': '`client_id` must be specified as a parameter or '
+ 'BITBUCKET_CLIENT_ID environment variable',
+ 'required_client_secret': '`client_secret` must be specified as a parameter or '
+ 'BITBUCKET_CLIENT_SECRET environment variable',
+ }
+
+ def __init__(self, module):
+ self.module = module
+ self.access_token = None
+
+ @staticmethod
+ def bitbucket_argument_spec():
+ return dict(
+ client_id=dict(type='str', no_log=True, fallback=(env_fallback, ['BITBUCKET_CLIENT_ID'])),
+ client_secret=dict(type='str', no_log=True, fallback=(env_fallback, ['BITBUCKET_CLIENT_SECRET'])),
+ )
+
+ def check_arguments(self):
+ if self.module.params['client_id'] is None:
+ self.module.fail_json(msg=self.error_messages['required_client_id'])
+
+ if self.module.params['client_secret'] is None:
+ self.module.fail_json(msg=self.error_messages['required_client_secret'])
+
+ def fetch_access_token(self):
+ self.check_arguments()
+
+ headers = {
+ 'Authorization': basic_auth_header(self.module.params['client_id'], self.module.params['client_secret'])
+ }
+
+ info, content = self.request(
+ api_url='https://bitbucket.org/site/oauth2/access_token',
+ method='POST',
+ data='grant_type=client_credentials',
+ headers=headers,
+ )
+
+ if info['status'] == 200:
+ self.access_token = content['access_token']
+ else:
+ self.module.fail_json(msg='Failed to retrieve access token: {0}'.format(info))
+
+ def request(self, api_url, method, data=None, headers=None):
+ headers = headers or {}
+
+ if self.access_token:
+ headers.update({
+ 'Authorization': 'Bearer {0}'.format(self.access_token),
+ })
+
+ if isinstance(data, dict):
+ data = self.module.jsonify(data)
+ headers.update({
+ 'Content-type': 'application/json',
+ })
+
+ response, info = fetch_url(
+ module=self.module,
+ url=api_url,
+ method=method,
+ headers=headers,
+ data=data,
+ force=True,
+ )
+
+ content = {}
+
+ if response is not None:
+ body = to_text(response.read())
+ if body:
+ content = json.loads(body)
+
+ return info, content
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/storage/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/storage/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/storage/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/storage/emc/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/storage/emc/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/storage/emc/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/storage/emc/emc_vnx.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/storage/emc/emc_vnx.py
new file mode 100644
index 00000000..afb1b697
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/storage/emc/emc_vnx.py
@@ -0,0 +1,20 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# (c) 2018 Luca 'remix_tj' Lorenzetto
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+emc_vnx_argument_spec = {
+ 'sp_address': dict(type='str', required=True),
+ 'sp_user': dict(type='str', required=False, default='sysadmin'),
+ 'sp_password': dict(type='str', required=False, default='sysadmin',
+ no_log=True),
+}
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/storage/hpe3par/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/storage/hpe3par/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/storage/hpe3par/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/storage/hpe3par/hpe3par.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/storage/hpe3par/hpe3par.py
new file mode 100644
index 00000000..47868a4b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/storage/hpe3par/hpe3par.py
@@ -0,0 +1,94 @@
+# Copyright: (c) 2018, Hewlett Packard Enterprise Development LP
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible.module_utils import basic
+
+
+def convert_to_binary_multiple(size_with_unit):
+ if size_with_unit is None:
+ return -1
+ valid_units = ['MiB', 'GiB', 'TiB']
+ valid_unit = False
+ for unit in valid_units:
+ if size_with_unit.strip().endswith(unit):
+ valid_unit = True
+ size = size_with_unit.split(unit)[0]
+ if float(size) < 0:
+ return -1
+ if not valid_unit:
+ raise ValueError("%s does not have a valid unit. The unit must be one of %s" % (size_with_unit, valid_units))
+
+ size = size_with_unit.replace(" ", "").split('iB')[0]
+ size_kib = basic.human_to_bytes(size)
+ return int(size_kib / (1024 * 1024))
+
+
+storage_system_spec = {
+ "storage_system_ip": {
+ "required": True,
+ "type": "str"
+ },
+ "storage_system_username": {
+ "required": True,
+ "type": "str",
+ "no_log": True
+ },
+ "storage_system_password": {
+ "required": True,
+ "type": "str",
+ "no_log": True
+ },
+ "secure": {
+ "type": "bool",
+ "default": False
+ }
+}
+
+
+def cpg_argument_spec():
+ spec = {
+ "state": {
+ "required": True,
+ "choices": ['present', 'absent'],
+ "type": 'str'
+ },
+ "cpg_name": {
+ "required": True,
+ "type": "str"
+ },
+ "domain": {
+ "type": "str"
+ },
+ "growth_increment": {
+ "type": "str",
+ },
+ "growth_limit": {
+ "type": "str",
+ },
+ "growth_warning": {
+ "type": "str",
+ },
+ "raid_type": {
+ "required": False,
+ "type": "str",
+ "choices": ['R0', 'R1', 'R5', 'R6']
+ },
+ "set_size": {
+ "required": False,
+ "type": "int"
+ },
+ "high_availability": {
+ "type": "str",
+ "choices": ['PORT', 'CAGE', 'MAG']
+ },
+ "disk_type": {
+ "type": "str",
+ "choices": ['FC', 'NL', 'SSD']
+ }
+ }
+ spec.update(storage_system_spec)
+ return spec
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/univention_umc.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/univention_umc.py
new file mode 100644
index 00000000..c1d8b777
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/univention_umc.py
@@ -0,0 +1,278 @@
+# -*- coding: UTF-8 -*-
+
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2016, Adfinis SyGroup AG
+# Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch>
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+"""Univention Corporate Server (UCS) access module.
+
+Provides the following functions for working with an UCS server.
+
+ - ldap_search(filter, base=None, attr=None)
+ Search the LDAP via Univention's LDAP wrapper (ULDAP)
+
+ - config_registry()
+ Return the UCR registry object
+
+ - base_dn()
+ Return the configured Base DN according to the UCR
+
+ - uldap()
+ Return a handle to the ULDAP LDAP wrapper
+
+ - umc_module_for_add(module, container_dn, superordinate=None)
+ Return a UMC module for creating a new object of the given type
+
+ - umc_module_for_edit(module, object_dn, superordinate=None)
+ Return a UMC module for editing an existing object of the given type
+
+
+Any other module is not part of the "official" API and may change at any time.
+"""
+
+import re
+
+
+__all__ = [
+ 'ldap_search',
+ 'config_registry',
+ 'base_dn',
+ 'uldap',
+ 'umc_module_for_add',
+ 'umc_module_for_edit',
+]
+
+
+_singletons = {}
+
+
+def ldap_module():
+ import ldap as orig_ldap
+ return orig_ldap
+
+
+def _singleton(name, constructor):
+ if name in _singletons:
+ return _singletons[name]
+ _singletons[name] = constructor()
+ return _singletons[name]
+
+
+def config_registry():
+
+ def construct():
+ import univention.config_registry
+ ucr = univention.config_registry.ConfigRegistry()
+ ucr.load()
+ return ucr
+
+ return _singleton('config_registry', construct)
+
+
+def base_dn():
+ return config_registry()['ldap/base']
+
+
+def uldap():
+ "Return a configured univention uldap object"
+
+ def construct():
+ try:
+ secret_file = open('/etc/ldap.secret', 'r')
+ bind_dn = 'cn=admin,{0}'.format(base_dn())
+ except IOError: # pragma: no cover
+ secret_file = open('/etc/machine.secret', 'r')
+ bind_dn = config_registry()["ldap/hostdn"]
+ pwd_line = secret_file.readline()
+ pwd = re.sub('\n', '', pwd_line)
+
+ import univention.admin.uldap
+ return univention.admin.uldap.access(
+ host=config_registry()['ldap/master'],
+ base=base_dn(),
+ binddn=bind_dn,
+ bindpw=pwd,
+ start_tls=1,
+ )
+
+ return _singleton('uldap', construct)
+
+
+def config():
+ def construct():
+ import univention.admin.config
+ return univention.admin.config.config()
+ return _singleton('config', construct)
+
+
+def init_modules():
+ def construct():
+ import univention.admin.modules
+ univention.admin.modules.update()
+ return True
+ return _singleton('modules_initialized', construct)
+
+
+def position_base_dn():
+ def construct():
+ import univention.admin.uldap
+ return univention.admin.uldap.position(base_dn())
+ return _singleton('position_base_dn', construct)
+
+
+def ldap_dn_tree_parent(dn, count=1):
+ dn_array = dn.split(',')
+ dn_array[0:count] = []
+ return ','.join(dn_array)
+
+
+def ldap_search(filter, base=None, attr=None):
+ """Replaces uldaps search and uses a generator.
+ !! Arguments are not the same."""
+
+ if base is None:
+ base = base_dn()
+ msgid = uldap().lo.lo.search(
+ base,
+ ldap_module().SCOPE_SUBTREE,
+ filterstr=filter,
+ attrlist=attr
+ )
+ # I used to have a try: finally: here but there seems to be a bug in python
+ # which swallows the KeyboardInterrupt
+ # The abandon now doesn't make too much sense
+ while True:
+ result_type, result_data = uldap().lo.lo.result(msgid, all=0)
+ if not result_data:
+ break
+ if result_type is ldap_module().RES_SEARCH_RESULT: # pragma: no cover
+ break
+ else:
+ if result_type is ldap_module().RES_SEARCH_ENTRY:
+ for res in result_data:
+ yield res
+ uldap().lo.lo.abandon(msgid)
+
+
+def module_by_name(module_name_):
+ """Returns an initialized UMC module, identified by the given name.
+
+ The module is a module specification according to the udm commandline.
+ Example values are:
+ * users/user
+ * shares/share
+ * groups/group
+
+ If the module does not exist, a KeyError is raised.
+
+ The modules are cached, so they won't be re-initialized
+ in subsequent calls.
+ """
+
+ def construct():
+ import univention.admin.modules
+ init_modules()
+ module = univention.admin.modules.get(module_name_)
+ univention.admin.modules.init(uldap(), position_base_dn(), module)
+ return module
+
+ return _singleton('module/%s' % module_name_, construct)
+
+
+def get_umc_admin_objects():
+ """Convenience accessor for getting univention.admin.objects.
+
+ This implements delayed importing, so the univention.* modules
+ are not loaded until this function is called.
+ """
+ import univention.admin
+ return univention.admin.objects
+
+
+def umc_module_for_add(module, container_dn, superordinate=None):
+ """Returns an UMC module object prepared for creating a new entry.
+
+ The module is a module specification according to the udm commandline.
+ Example values are:
+ * users/user
+ * shares/share
+ * groups/group
+
+ The container_dn MUST be the dn of the container (not of the object to
+ be created itself!).
+ """
+ mod = module_by_name(module)
+
+ position = position_base_dn()
+ position.setDn(container_dn)
+
+ # config, ldap objects from common module
+ obj = mod.object(config(), uldap(), position, superordinate=superordinate)
+ obj.open()
+
+ return obj
+
+
+def umc_module_for_edit(module, object_dn, superordinate=None):
+ """Returns an UMC module object prepared for editing an existing entry.
+
+ The module is a module specification according to the udm commandline.
+ Example values are:
+ * users/user
+ * shares/share
+ * groups/group
+
+ The object_dn MUST be the dn of the object itself, not the container!
+ """
+ mod = module_by_name(module)
+
+ objects = get_umc_admin_objects()
+
+ position = position_base_dn()
+ position.setDn(ldap_dn_tree_parent(object_dn))
+
+ obj = objects.get(
+ mod,
+ config(),
+ uldap(),
+ position=position,
+ superordinate=superordinate,
+ dn=object_dn
+ )
+ obj.open()
+
+ return obj
+
+
+def create_containers_and_parents(container_dn):
+ """Create a container and if needed the parents containers"""
+ import univention.admin.uexceptions as uexcp
+ if not container_dn.startswith("cn="):
+ raise AssertionError()
+ try:
+ parent = ldap_dn_tree_parent(container_dn)
+ obj = umc_module_for_add(
+ 'container/cn',
+ parent
+ )
+ obj['name'] = container_dn.split(',')[0].split('=')[1]
+ obj['description'] = "container created by import"
+ except uexcp.ldapError:
+ create_containers_and_parents(parent)
+ obj = umc_module_for_add(
+ 'container/cn',
+ parent
+ )
+ obj['name'] = container_dn.split(',')[0].split('=')[1]
+ obj['description'] = "container created by import"
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/utm_utils.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/utm_utils.py
new file mode 100644
index 00000000..0966dc50
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/utm_utils.py
@@ -0,0 +1,216 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright: (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+class UTMModuleConfigurationError(Exception):
+
+ def __init__(self, msg, **args):
+ super(UTMModuleConfigurationError, self).__init__(self, msg)
+ self.msg = msg
+ self.module_fail_args = args
+
+ def do_fail(self, module):
+ module.fail_json(msg=self.msg, other=self.module_fail_args)
+
+
+class UTMModule(AnsibleModule):
+ """
+ This is a helper class to construct any UTM Module. This will automatically add the utm host, port, token,
+ protocol, validate_certs and state field to the module. If you want to implement your own sophos utm module
+ just initialize this UTMModule class and define the Payload fields that are needed for your module.
+ See the other modules like utm_aaa_group for example.
+ """
+
+ def __init__(self, argument_spec, bypass_checks=False, no_log=False,
+ mutually_exclusive=None, required_together=None, required_one_of=None, add_file_common_args=False,
+ supports_check_mode=False, required_if=None):
+ default_specs = dict(
+ headers=dict(type='dict', required=False, default={}),
+ utm_host=dict(type='str', required=True),
+ utm_port=dict(type='int', default=4444),
+ utm_token=dict(type='str', required=True, no_log=True),
+ utm_protocol=dict(type='str', required=False, default="https", choices=["https", "http"]),
+ validate_certs=dict(type='bool', required=False, default=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+ super(UTMModule, self).__init__(self._merge_specs(default_specs, argument_spec), bypass_checks, no_log,
+ mutually_exclusive, required_together, required_one_of,
+ add_file_common_args, supports_check_mode, required_if)
+
+ def _merge_specs(self, default_specs, custom_specs):
+ result = default_specs.copy()
+ result.update(custom_specs)
+ return result
+
+
+class UTM:
+
+ def __init__(self, module, endpoint, change_relevant_keys, info_only=False):
+ """
+ Initialize UTM Class
+ :param module: The Ansible module
+ :param endpoint: The corresponding endpoint to the module
+ :param change_relevant_keys: The keys of the object to check for changes
+ :param info_only: When implementing an info module, set this to true. Will allow access to the info method only
+ """
+ self.info_only = info_only
+ self.module = module
+ self.request_url = module.params.get('utm_protocol') + "://" + module.params.get('utm_host') + ":" + to_native(
+ module.params.get('utm_port')) + "/api/objects/" + endpoint + "/"
+
+ """
+ The change_relevant_keys will be checked for changes to determine whether the object needs to be updated
+ """
+ self.change_relevant_keys = change_relevant_keys
+ self.module.params['url_username'] = 'token'
+ self.module.params['url_password'] = module.params.get('utm_token')
+ if all(elem in self.change_relevant_keys for elem in module.params.keys()):
+ raise UTMModuleConfigurationError(
+ "The keys " + to_native(
+ self.change_relevant_keys) + " to check are not in the modules keys:\n" + to_native(
+ module.params.keys()))
+
+ def execute(self):
+ try:
+ if not self.info_only:
+ if self.module.params.get('state') == 'present':
+ self._add()
+ elif self.module.params.get('state') == 'absent':
+ self._remove()
+ else:
+ self._info()
+ except Exception as e:
+ self.module.fail_json(msg=to_native(e))
+
+ def _info(self):
+ """
+ returns the info for an object in utm
+ """
+ info, result = self._lookup_entry(self.module, self.request_url)
+ if info["status"] >= 400:
+ self.module.fail_json(result=json.loads(info))
+ else:
+ if result is None:
+ self.module.exit_json(changed=False)
+ else:
+ self.module.exit_json(result=result, changed=False)
+
+ def _add(self):
+ """
+ adds or updates a host object on utm
+ """
+
+ combined_headers = self._combine_headers()
+
+ is_changed = False
+ info, result = self._lookup_entry(self.module, self.request_url)
+ if info["status"] >= 400:
+ self.module.fail_json(result=json.loads(info))
+ else:
+ data_as_json_string = self.module.jsonify(self.module.params)
+ if result is None:
+ response, info = fetch_url(self.module, self.request_url, method="POST",
+ headers=combined_headers,
+ data=data_as_json_string)
+ if info["status"] >= 400:
+ self.module.fail_json(msg=json.loads(info["body"]))
+ is_changed = True
+ result = self._clean_result(json.loads(response.read()))
+ else:
+ if self._is_object_changed(self.change_relevant_keys, self.module, result):
+ response, info = fetch_url(self.module, self.request_url + result['_ref'], method="PUT",
+ headers=combined_headers,
+ data=data_as_json_string)
+ if info['status'] >= 400:
+ self.module.fail_json(msg=json.loads(info["body"]))
+ is_changed = True
+ result = self._clean_result(json.loads(response.read()))
+ self.module.exit_json(result=result, changed=is_changed)
+
+ def _combine_headers(self):
+ """
+ This will combine a header default with headers that come from the module declaration
+ :return: A combined headers dict
+ """
+ default_headers = {"Accept": "application/json", "Content-type": "application/json"}
+ if self.module.params.get('headers') is not None:
+ result = default_headers.copy()
+ result.update(self.module.params.get('headers'))
+ else:
+ result = default_headers
+ return result
+
+ def _remove(self):
+ """
+ removes an object from utm
+ """
+ is_changed = False
+ info, result = self._lookup_entry(self.module, self.request_url)
+ if result is not None:
+ response, info = fetch_url(self.module, self.request_url + result['_ref'], method="DELETE",
+ headers={"Accept": "application/json", "X-Restd-Err-Ack": "all"},
+ data=self.module.jsonify(self.module.params))
+ if info["status"] >= 400:
+ self.module.fail_json(msg=json.loads(info["body"]))
+ else:
+ is_changed = True
+ self.module.exit_json(changed=is_changed)
+
+ def _lookup_entry(self, module, request_url):
+ """
+ Lookup for existing entry
+ :param module:
+ :param request_url:
+ :return:
+ """
+ response, info = fetch_url(module, request_url, method="GET", headers={"Accept": "application/json"})
+ result = None
+ if response is not None:
+ results = json.loads(response.read())
+ result = next(iter(filter(lambda d: d['name'] == module.params.get('name'), results)), None)
+ return info, result
+
+ def _clean_result(self, result):
+ """
+ Will clean the result from irrelevant fields
+ :param result: The result from the query
+ :return: The modified result
+ """
+ del result['utm_host']
+ del result['utm_port']
+ del result['utm_token']
+ del result['utm_protocol']
+ del result['validate_certs']
+ del result['url_username']
+ del result['url_password']
+ del result['state']
+ return result
+
+ def _is_object_changed(self, keys, module, result):
+ """
+ Check if my object is changed
+ :param keys: The keys that will determine if an object is changed
+ :param module: The module
+ :param result: The result from the query
+ :return:
+ """
+ for key in keys:
+ if module.params.get(key) != result[key]:
+ return True
+ return False
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/vexata.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/vexata.py
new file mode 100644
index 00000000..e5c9bdb8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/vexata.py
@@ -0,0 +1,97 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2019, Sandeep Kasargod <sandeep@vexata.com>
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+HAS_VEXATAPI = True
+try:
+ from vexatapi.vexata_api_proxy import VexataAPIProxy
+except ImportError:
+ HAS_VEXATAPI = False
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.basic import env_fallback
+
+VXOS_VERSION = None
+
+
+def get_version(iocs_json):
+ if not iocs_json:
+ raise Exception('Invalid IOC json')
+ active = filter(lambda x: x['mgmtRole'], iocs_json)
+ if not active:
+ raise Exception('Unable to detect active IOC')
+ active = active[0]
+ ver = active['swVersion']
+ if ver[0] != 'v':
+ raise Exception('Illegal version string')
+ ver = ver[1:ver.find('-')]
+ ver = map(int, ver.split('.'))
+ return tuple(ver)
+
+
+def get_array(module):
+ """Return storage array object or fail"""
+ global VXOS_VERSION
+ array = module.params['array']
+ user = module.params.get('user', None)
+ password = module.params.get('password', None)
+ validate = module.params.get('validate_certs')
+
+ if not HAS_VEXATAPI:
+ module.fail_json(msg='vexatapi library is required for this module. '
+ 'To install, use `pip install vexatapi`')
+
+ if user and password:
+ system = VexataAPIProxy(array, user, password, verify_cert=validate)
+ else:
+ module.fail_json(msg='The user/password are required to be passed in to '
+ 'the module as arguments or by setting the '
+ 'VEXATA_USER and VEXATA_PASSWORD environment variables.')
+ try:
+ if system.test_connection():
+ VXOS_VERSION = get_version(system.iocs())
+ return system
+ else:
+ module.fail_json(msg='Test connection to array failed.')
+ except Exception as e:
+ module.fail_json(msg='Vexata API access failed: {0}'.format(to_native(e)))
+
+
+def argument_spec():
+ """Return standard base dictionary used for the argument_spec argument in AnsibleModule"""
+ return dict(
+ array=dict(type='str',
+ required=True),
+ user=dict(type='str',
+ fallback=(env_fallback, ['VEXATA_USER'])),
+ password=dict(type='str',
+ no_log=True,
+ fallback=(env_fallback, ['VEXATA_PASSWORD'])),
+ validate_certs=dict(type='bool',
+ required=False,
+ default=False),
+ )
+
+
+def required_together():
+ """Return the default list used for the required_together argument to AnsibleModule"""
+ return [['user', 'password']]
+
+
+def size_to_MiB(size):
+ """Convert a '<integer>[MGT]' string to MiB, return -1 on error."""
+ quant = size[:-1]
+ exponent = size[-1]
+ if not quant.isdigit() or exponent not in 'MGT':
+ return -1
+ quant = int(quant)
+ if exponent == 'G':
+ quant <<= 10
+ elif exponent == 'T':
+ quant <<= 20
+ return quant
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/xenserver.py b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/xenserver.py
new file mode 100644
index 00000000..dbc6a0ad
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/module_utils/xenserver.py
@@ -0,0 +1,862 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2018, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import atexit
+import time
+import re
+import traceback
+
+XENAPI_IMP_ERR = None
+try:
+ import XenAPI
+ HAS_XENAPI = True
+except ImportError:
+ HAS_XENAPI = False
+ XENAPI_IMP_ERR = traceback.format_exc()
+
+from ansible.module_utils.basic import env_fallback, missing_required_lib
+from ansible.module_utils.common.network import is_mac
+from ansible.module_utils.ansible_release import __version__ as ANSIBLE_VERSION
+
+
+def xenserver_common_argument_spec():
+ return dict(
+ hostname=dict(type='str',
+ aliases=['host', 'pool'],
+ required=False,
+ default='localhost',
+ fallback=(env_fallback, ['XENSERVER_HOST']),
+ ),
+ username=dict(type='str',
+ aliases=['user', 'admin'],
+ required=False,
+ default='root',
+ fallback=(env_fallback, ['XENSERVER_USER'])),
+ password=dict(type='str',
+ aliases=['pass', 'pwd'],
+ required=False,
+ no_log=True,
+ fallback=(env_fallback, ['XENSERVER_PASSWORD'])),
+ validate_certs=dict(type='bool',
+ required=False,
+ default=True,
+ fallback=(env_fallback, ['XENSERVER_VALIDATE_CERTS'])),
+ )
+
+
+def xapi_to_module_vm_power_state(power_state):
+ """Maps XAPI VM power states to module VM power states."""
+ module_power_state_map = {
+ "running": "poweredon",
+ "halted": "poweredoff",
+ "suspended": "suspended",
+ "paused": "paused"
+ }
+
+ return module_power_state_map.get(power_state)
+
+
+def module_to_xapi_vm_power_state(power_state):
+ """Maps module VM power states to XAPI VM power states."""
+ vm_power_state_map = {
+ "poweredon": "running",
+ "poweredoff": "halted",
+ "restarted": "running",
+ "suspended": "suspended",
+ "shutdownguest": "halted",
+ "rebootguest": "running",
+ }
+
+ return vm_power_state_map.get(power_state)
+
+
+def is_valid_ip_addr(ip_addr):
+ """Validates given string as IPv4 address for given string.
+
+ Args:
+ ip_addr (str): string to validate as IPv4 address.
+
+ Returns:
+ bool: True if string is valid IPv4 address, else False.
+ """
+ ip_addr_split = ip_addr.split('.')
+
+ if len(ip_addr_split) != 4:
+ return False
+
+ for ip_addr_octet in ip_addr_split:
+ if not ip_addr_octet.isdigit():
+ return False
+
+ ip_addr_octet_int = int(ip_addr_octet)
+
+ if ip_addr_octet_int < 0 or ip_addr_octet_int > 255:
+ return False
+
+ return True
+
+
+def is_valid_ip_netmask(ip_netmask):
+ """Validates given string as IPv4 netmask.
+
+ Args:
+ ip_netmask (str): string to validate as IPv4 netmask.
+
+ Returns:
+ bool: True if string is valid IPv4 netmask, else False.
+ """
+ ip_netmask_split = ip_netmask.split('.')
+
+ if len(ip_netmask_split) != 4:
+ return False
+
+ valid_octet_values = ['0', '128', '192', '224', '240', '248', '252', '254', '255']
+
+ for ip_netmask_octet in ip_netmask_split:
+ if ip_netmask_octet not in valid_octet_values:
+ return False
+
+ if ip_netmask_split[0] != '255' and (ip_netmask_split[1] != '0' or ip_netmask_split[2] != '0' or ip_netmask_split[3] != '0'):
+ return False
+ elif ip_netmask_split[1] != '255' and (ip_netmask_split[2] != '0' or ip_netmask_split[3] != '0'):
+ return False
+ elif ip_netmask_split[2] != '255' and ip_netmask_split[3] != '0':
+ return False
+
+ return True
+
+
+def is_valid_ip_prefix(ip_prefix):
+ """Validates given string as IPv4 prefix.
+
+ Args:
+ ip_prefix (str): string to validate as IPv4 prefix.
+
+ Returns:
+ bool: True if string is valid IPv4 prefix, else False.
+ """
+ if not ip_prefix.isdigit():
+ return False
+
+ ip_prefix_int = int(ip_prefix)
+
+ if ip_prefix_int < 0 or ip_prefix_int > 32:
+ return False
+
+ return True
+
+
+def ip_prefix_to_netmask(ip_prefix, skip_check=False):
+ """Converts IPv4 prefix to netmask.
+
+ Args:
+ ip_prefix (str): IPv4 prefix to convert.
+ skip_check (bool): Skip validation of IPv4 prefix
+ (default: False). Use if you are sure IPv4 prefix is valid.
+
+ Returns:
+ str: IPv4 netmask equivalent to given IPv4 prefix if
+ IPv4 prefix is valid, else an empty string.
+ """
+ if skip_check:
+ ip_prefix_valid = True
+ else:
+ ip_prefix_valid = is_valid_ip_prefix(ip_prefix)
+
+ if ip_prefix_valid:
+ return '.'.join([str((0xffffffff << (32 - int(ip_prefix)) >> i) & 0xff) for i in [24, 16, 8, 0]])
+ else:
+ return ""
+
+
+def ip_netmask_to_prefix(ip_netmask, skip_check=False):
+ """Converts IPv4 netmask to prefix.
+
+ Args:
+ ip_netmask (str): IPv4 netmask to convert.
+ skip_check (bool): Skip validation of IPv4 netmask
+ (default: False). Use if you are sure IPv4 netmask is valid.
+
+ Returns:
+ str: IPv4 prefix equivalent to given IPv4 netmask if
+ IPv4 netmask is valid, else an empty string.
+ """
+ if skip_check:
+ ip_netmask_valid = True
+ else:
+ ip_netmask_valid = is_valid_ip_netmask(ip_netmask)
+
+ if ip_netmask_valid:
+ return str(sum([bin(int(i)).count("1") for i in ip_netmask.split(".")]))
+ else:
+ return ""
+
+
+def is_valid_ip6_addr(ip6_addr):
+ """Validates given string as IPv6 address.
+
+ Args:
+ ip6_addr (str): string to validate as IPv6 address.
+
+ Returns:
+ bool: True if string is valid IPv6 address, else False.
+ """
+ ip6_addr = ip6_addr.lower()
+ ip6_addr_split = ip6_addr.split(':')
+
+ if ip6_addr_split[0] == "":
+ ip6_addr_split.pop(0)
+
+ if ip6_addr_split[-1] == "":
+ ip6_addr_split.pop(-1)
+
+ if len(ip6_addr_split) > 8:
+ return False
+
+ if ip6_addr_split.count("") > 1:
+ return False
+ elif ip6_addr_split.count("") == 1:
+ ip6_addr_split.remove("")
+ else:
+ if len(ip6_addr_split) != 8:
+ return False
+
+ ip6_addr_hextet_regex = re.compile('^[0-9a-f]{1,4}$')
+
+ for ip6_addr_hextet in ip6_addr_split:
+ if not bool(ip6_addr_hextet_regex.match(ip6_addr_hextet)):
+ return False
+
+ return True
+
+
+def is_valid_ip6_prefix(ip6_prefix):
+ """Validates given string as IPv6 prefix.
+
+ Args:
+ ip6_prefix (str): string to validate as IPv6 prefix.
+
+ Returns:
+ bool: True if string is valid IPv6 prefix, else False.
+ """
+ if not ip6_prefix.isdigit():
+ return False
+
+ ip6_prefix_int = int(ip6_prefix)
+
+ if ip6_prefix_int < 0 or ip6_prefix_int > 128:
+ return False
+
+ return True
+
+
+def get_object_ref(module, name, uuid=None, obj_type="VM", fail=True, msg_prefix=""):
+ """Finds and returns a reference to arbitrary XAPI object.
+
+ An object is searched by using either name (name_label) or UUID
+ with UUID taken precedence over name.
+
+ Args:
+ module: Reference to Ansible module object.
+ name (str): Name (name_label) of an object to search for.
+ uuid (str): UUID of an object to search for.
+ obj_type (str): Any valid XAPI object type. See XAPI docs.
+ fail (bool): Should function fail with error message if object
+ is not found or exit silently (default: True). The function
+ always fails if multiple objects with same name are found.
+ msg_prefix (str): A string error messages should be prefixed
+ with (default: "").
+
+ Returns:
+ XAPI reference to found object or None if object is not found
+ and fail=False.
+ """
+ xapi_session = XAPI.connect(module)
+
+ if obj_type in ["template", "snapshot"]:
+ real_obj_type = "VM"
+ elif obj_type == "home server":
+ real_obj_type = "host"
+ elif obj_type == "ISO image":
+ real_obj_type = "VDI"
+ else:
+ real_obj_type = obj_type
+
+ obj_ref = None
+
+ # UUID has precedence over name.
+ if uuid:
+ try:
+ # Find object by UUID. If no object is found using given UUID,
+ # an exception will be generated.
+ obj_ref = xapi_session.xenapi_request("%s.get_by_uuid" % real_obj_type, (uuid,))
+ except XenAPI.Failure as f:
+ if fail:
+ module.fail_json(msg="%s%s with UUID '%s' not found!" % (msg_prefix, obj_type, uuid))
+ elif name:
+ try:
+ # Find object by name (name_label).
+ obj_ref_list = xapi_session.xenapi_request("%s.get_by_name_label" % real_obj_type, (name,))
+ except XenAPI.Failure as f:
+ module.fail_json(msg="XAPI ERROR: %s" % f.details)
+
+ # If obj_ref_list is empty.
+ if not obj_ref_list:
+ if fail:
+ module.fail_json(msg="%s%s with name '%s' not found!" % (msg_prefix, obj_type, name))
+ # If obj_ref_list contains multiple object references.
+ elif len(obj_ref_list) > 1:
+ module.fail_json(msg="%smultiple %ss with name '%s' found! Please use UUID." % (msg_prefix, obj_type, name))
+ # The obj_ref_list contains only one object reference.
+ else:
+ obj_ref = obj_ref_list[0]
+ else:
+ module.fail_json(msg="%sno valid name or UUID supplied for %s!" % (msg_prefix, obj_type))
+
+ return obj_ref
+
+
+def gather_vm_params(module, vm_ref):
+ """Gathers all VM parameters available in XAPI database.
+
+ Args:
+ module: Reference to Ansible module object.
+ vm_ref (str): XAPI reference to VM.
+
+ Returns:
+ dict: VM parameters.
+ """
+ # We silently return empty vm_params if bad vm_ref was supplied.
+ if not vm_ref or vm_ref == "OpaqueRef:NULL":
+ return {}
+
+ xapi_session = XAPI.connect(module)
+
+ try:
+ vm_params = xapi_session.xenapi.VM.get_record(vm_ref)
+
+ # We need some params like affinity, VBDs, VIFs, VDIs etc. dereferenced.
+
+ # Affinity.
+ if vm_params['affinity'] != "OpaqueRef:NULL":
+ vm_affinity = xapi_session.xenapi.host.get_record(vm_params['affinity'])
+ vm_params['affinity'] = vm_affinity
+ else:
+ vm_params['affinity'] = {}
+
+ # VBDs.
+ vm_vbd_params_list = [xapi_session.xenapi.VBD.get_record(vm_vbd_ref) for vm_vbd_ref in vm_params['VBDs']]
+
+ # List of VBDs is usually sorted by userdevice but we sort just
+ # in case. We need this list sorted by userdevice so that we can
+ # make positional pairing with module.params['disks'].
+ vm_vbd_params_list = sorted(vm_vbd_params_list, key=lambda vm_vbd_params: int(vm_vbd_params['userdevice']))
+ vm_params['VBDs'] = vm_vbd_params_list
+
+ # VDIs.
+ for vm_vbd_params in vm_params['VBDs']:
+ if vm_vbd_params['VDI'] != "OpaqueRef:NULL":
+ vm_vdi_params = xapi_session.xenapi.VDI.get_record(vm_vbd_params['VDI'])
+ else:
+ vm_vdi_params = {}
+
+ vm_vbd_params['VDI'] = vm_vdi_params
+
+ # VIFs.
+ vm_vif_params_list = [xapi_session.xenapi.VIF.get_record(vm_vif_ref) for vm_vif_ref in vm_params['VIFs']]
+
+ # List of VIFs is usually sorted by device but we sort just
+ # in case. We need this list sorted by device so that we can
+ # make positional pairing with module.params['networks'].
+ vm_vif_params_list = sorted(vm_vif_params_list, key=lambda vm_vif_params: int(vm_vif_params['device']))
+ vm_params['VIFs'] = vm_vif_params_list
+
+ # Networks.
+ for vm_vif_params in vm_params['VIFs']:
+ if vm_vif_params['network'] != "OpaqueRef:NULL":
+ vm_network_params = xapi_session.xenapi.network.get_record(vm_vif_params['network'])
+ else:
+ vm_network_params = {}
+
+ vm_vif_params['network'] = vm_network_params
+
+ # Guest metrics.
+ if vm_params['guest_metrics'] != "OpaqueRef:NULL":
+ vm_guest_metrics = xapi_session.xenapi.VM_guest_metrics.get_record(vm_params['guest_metrics'])
+ vm_params['guest_metrics'] = vm_guest_metrics
+ else:
+ vm_params['guest_metrics'] = {}
+
+ # Detect customization agent.
+ xenserver_version = get_xenserver_version(module)
+
+ if (xenserver_version[0] >= 7 and xenserver_version[1] >= 0 and vm_params.get('guest_metrics') and
+ "feature-static-ip-setting" in vm_params['guest_metrics']['other']):
+ vm_params['customization_agent'] = "native"
+ else:
+ vm_params['customization_agent'] = "custom"
+
+ except XenAPI.Failure as f:
+ module.fail_json(msg="XAPI ERROR: %s" % f.details)
+
+ return vm_params
+
+
+def gather_vm_facts(module, vm_params):
+ """Gathers VM facts.
+
+ Args:
+ module: Reference to Ansible module object.
+ vm_params (dict): A dictionary with VM parameters as returned
+ by gather_vm_params() function.
+
+ Returns:
+ dict: VM facts.
+ """
+ # We silently return empty vm_facts if no vm_params are available.
+ if not vm_params:
+ return {}
+
+ xapi_session = XAPI.connect(module)
+
+ # Gather facts.
+ vm_facts = {
+ "state": xapi_to_module_vm_power_state(vm_params['power_state'].lower()),
+ "name": vm_params['name_label'],
+ "name_desc": vm_params['name_description'],
+ "uuid": vm_params['uuid'],
+ "is_template": vm_params['is_a_template'],
+ "folder": vm_params['other_config'].get('folder', ''),
+ "hardware": {
+ "num_cpus": int(vm_params['VCPUs_max']),
+ "num_cpu_cores_per_socket": int(vm_params['platform'].get('cores-per-socket', '1')),
+ "memory_mb": int(int(vm_params['memory_dynamic_max']) / 1048576),
+ },
+ "disks": [],
+ "cdrom": {},
+ "networks": [],
+ "home_server": vm_params['affinity'].get('name_label', ''),
+ "domid": vm_params['domid'],
+ "platform": vm_params['platform'],
+ "other_config": vm_params['other_config'],
+ "xenstore_data": vm_params['xenstore_data'],
+ "customization_agent": vm_params['customization_agent'],
+ }
+
+ for vm_vbd_params in vm_params['VBDs']:
+ if vm_vbd_params['type'] == "Disk":
+ vm_disk_sr_params = xapi_session.xenapi.SR.get_record(vm_vbd_params['VDI']['SR'])
+
+ vm_disk_params = {
+ "size": int(vm_vbd_params['VDI']['virtual_size']),
+ "name": vm_vbd_params['VDI']['name_label'],
+ "name_desc": vm_vbd_params['VDI']['name_description'],
+ "sr": vm_disk_sr_params['name_label'],
+ "sr_uuid": vm_disk_sr_params['uuid'],
+ "os_device": vm_vbd_params['device'],
+ "vbd_userdevice": vm_vbd_params['userdevice'],
+ }
+
+ vm_facts['disks'].append(vm_disk_params)
+ elif vm_vbd_params['type'] == "CD":
+ if vm_vbd_params['empty']:
+ vm_facts['cdrom'].update(type="none")
+ else:
+ vm_facts['cdrom'].update(type="iso")
+ vm_facts['cdrom'].update(iso_name=vm_vbd_params['VDI']['name_label'])
+
+ for vm_vif_params in vm_params['VIFs']:
+ vm_guest_metrics_networks = vm_params['guest_metrics'].get('networks', {})
+
+ vm_network_params = {
+ "name": vm_vif_params['network']['name_label'],
+ "mac": vm_vif_params['MAC'],
+ "vif_device": vm_vif_params['device'],
+ "mtu": vm_vif_params['MTU'],
+ "ip": vm_guest_metrics_networks.get("%s/ip" % vm_vif_params['device'], ''),
+ "prefix": "",
+ "netmask": "",
+ "gateway": "",
+ "ip6": [vm_guest_metrics_networks[ipv6] for ipv6 in sorted(vm_guest_metrics_networks.keys()) if ipv6.startswith("%s/ipv6/" %
+ vm_vif_params['device'])],
+ "prefix6": "",
+ "gateway6": "",
+ }
+
+ if vm_params['customization_agent'] == "native":
+ if vm_vif_params['ipv4_addresses'] and vm_vif_params['ipv4_addresses'][0]:
+ vm_network_params['prefix'] = vm_vif_params['ipv4_addresses'][0].split('/')[1]
+ vm_network_params['netmask'] = ip_prefix_to_netmask(vm_network_params['prefix'])
+
+ vm_network_params['gateway'] = vm_vif_params['ipv4_gateway']
+
+ if vm_vif_params['ipv6_addresses'] and vm_vif_params['ipv6_addresses'][0]:
+ vm_network_params['prefix6'] = vm_vif_params['ipv6_addresses'][0].split('/')[1]
+
+ vm_network_params['gateway6'] = vm_vif_params['ipv6_gateway']
+
+ elif vm_params['customization_agent'] == "custom":
+ vm_xenstore_data = vm_params['xenstore_data']
+
+ for f in ['prefix', 'netmask', 'gateway', 'prefix6', 'gateway6']:
+ vm_network_params[f] = vm_xenstore_data.get("vm-data/networks/%s/%s" % (vm_vif_params['device'], f), "")
+
+ vm_facts['networks'].append(vm_network_params)
+
+ return vm_facts
+
+
+def set_vm_power_state(module, vm_ref, power_state, timeout=300):
+ """Controls VM power state.
+
+ Args:
+ module: Reference to Ansible module object.
+ vm_ref (str): XAPI reference to VM.
+ power_state (str): Power state to put VM into. Accepted values:
+
+ - poweredon
+ - poweredoff
+ - restarted
+ - suspended
+ - shutdownguest
+ - rebootguest
+
+ timeout (int): timeout in seconds (default: 300).
+
+ Returns:
+ tuple (bool, str): Bool element is True if VM power state has
+ changed by calling this function, else False. Str element carries
+ a value of resulting power state as defined by XAPI - 'running',
+ 'halted' or 'suspended'.
+ """
+ # Fail if we don't have a valid VM reference.
+ if not vm_ref or vm_ref == "OpaqueRef:NULL":
+ module.fail_json(msg="Cannot set VM power state. Invalid VM reference supplied!")
+
+ xapi_session = XAPI.connect(module)
+
+ power_state = power_state.replace('_', '').replace('-', '').lower()
+ vm_power_state_resulting = module_to_xapi_vm_power_state(power_state)
+
+ state_changed = False
+
+ try:
+ # Get current state of the VM.
+ vm_power_state_current = xapi_to_module_vm_power_state(xapi_session.xenapi.VM.get_power_state(vm_ref).lower())
+
+ if vm_power_state_current != power_state:
+ if power_state == "poweredon":
+ if not module.check_mode:
+ # VM can be in either halted, suspended, paused or running state.
+ # For VM to be in running state, start has to be called on halted,
+ # resume on suspended and unpause on paused VM.
+ if vm_power_state_current == "poweredoff":
+ xapi_session.xenapi.VM.start(vm_ref, False, False)
+ elif vm_power_state_current == "suspended":
+ xapi_session.xenapi.VM.resume(vm_ref, False, False)
+ elif vm_power_state_current == "paused":
+ xapi_session.xenapi.VM.unpause(vm_ref)
+ elif power_state == "poweredoff":
+ if not module.check_mode:
+ # hard_shutdown will halt VM regardless of current state.
+ xapi_session.xenapi.VM.hard_shutdown(vm_ref)
+ elif power_state == "restarted":
+ # hard_reboot will restart VM only if VM is in paused or running state.
+ if vm_power_state_current in ["paused", "poweredon"]:
+ if not module.check_mode:
+ xapi_session.xenapi.VM.hard_reboot(vm_ref)
+ else:
+ module.fail_json(msg="Cannot restart VM in state '%s'!" % vm_power_state_current)
+ elif power_state == "suspended":
+ # running state is required for suspend.
+ if vm_power_state_current == "poweredon":
+ if not module.check_mode:
+ xapi_session.xenapi.VM.suspend(vm_ref)
+ else:
+ module.fail_json(msg="Cannot suspend VM in state '%s'!" % vm_power_state_current)
+ elif power_state == "shutdownguest":
+ # running state is required for guest shutdown.
+ if vm_power_state_current == "poweredon":
+ if not module.check_mode:
+ if timeout == 0:
+ xapi_session.xenapi.VM.clean_shutdown(vm_ref)
+ else:
+ task_ref = xapi_session.xenapi.Async.VM.clean_shutdown(vm_ref)
+ task_result = wait_for_task(module, task_ref, timeout)
+
+ if task_result:
+ module.fail_json(msg="Guest shutdown task failed: '%s'!" % task_result)
+ else:
+ module.fail_json(msg="Cannot shutdown guest when VM is in state '%s'!" % vm_power_state_current)
+ elif power_state == "rebootguest":
+ # running state is required for guest reboot.
+ if vm_power_state_current == "poweredon":
+ if not module.check_mode:
+ if timeout == 0:
+ xapi_session.xenapi.VM.clean_reboot(vm_ref)
+ else:
+ task_ref = xapi_session.xenapi.Async.VM.clean_reboot(vm_ref)
+ task_result = wait_for_task(module, task_ref, timeout)
+
+ if task_result:
+ module.fail_json(msg="Guest reboot task failed: '%s'!" % task_result)
+ else:
+ module.fail_json(msg="Cannot reboot guest when VM is in state '%s'!" % vm_power_state_current)
+ else:
+ module.fail_json(msg="Requested VM power state '%s' is unsupported!" % power_state)
+
+ state_changed = True
+ except XenAPI.Failure as f:
+ module.fail_json(msg="XAPI ERROR: %s" % f.details)
+
+ return (state_changed, vm_power_state_resulting)
+
+
+def wait_for_task(module, task_ref, timeout=300):
+ """Waits for async XAPI task to finish.
+
+ Args:
+ module: Reference to Ansible module object.
+ task_ref (str): XAPI reference to task.
+ timeout (int): timeout in seconds (default: 300).
+
+ Returns:
+ str: failure message on failure, else an empty string.
+ """
+ # Fail if we don't have a valid task reference.
+ if not task_ref or task_ref == "OpaqueRef:NULL":
+ module.fail_json(msg="Cannot wait for task. Invalid task reference supplied!")
+
+ xapi_session = XAPI.connect(module)
+
+ interval = 2
+
+ result = ""
+
+ # If we have to wait indefinitely, make time_left larger than 0 so we can
+ # enter while loop.
+ if timeout == 0:
+ time_left = 1
+ else:
+ time_left = timeout
+
+ try:
+ while time_left > 0:
+ task_status = xapi_session.xenapi.task.get_status(task_ref).lower()
+
+ if task_status == "pending":
+ # Task is still running.
+ time.sleep(interval)
+
+ # We decrease time_left only if we don't wait indefinitely.
+ if timeout != 0:
+ time_left -= interval
+
+ continue
+ elif task_status == "success":
+ # Task is done.
+ break
+ else:
+ # Task failed.
+ result = task_status
+ break
+ else:
+ # We timed out.
+ result = "timeout"
+
+ xapi_session.xenapi.task.destroy(task_ref)
+ except XenAPI.Failure as f:
+ module.fail_json(msg="XAPI ERROR: %s" % f.details)
+
+ return result
+
+
+def wait_for_vm_ip_address(module, vm_ref, timeout=300):
+ """Waits for VM to acquire an IP address.
+
+ Args:
+ module: Reference to Ansible module object.
+ vm_ref (str): XAPI reference to VM.
+ timeout (int): timeout in seconds (default: 300).
+
+ Returns:
+ dict: VM guest metrics as retrieved by
+ VM_guest_metrics.get_record() XAPI method with info
+ on IP address acquired.
+ """
+ # Fail if we don't have a valid VM reference.
+ if not vm_ref or vm_ref == "OpaqueRef:NULL":
+ module.fail_json(msg="Cannot wait for VM IP address. Invalid VM reference supplied!")
+
+ xapi_session = XAPI.connect(module)
+
+ vm_guest_metrics = {}
+
+ try:
+ # We translate VM power state string so that error message can be
+ # consistent with module VM power states.
+ vm_power_state = xapi_to_module_vm_power_state(xapi_session.xenapi.VM.get_power_state(vm_ref).lower())
+
+ if vm_power_state != 'poweredon':
+ module.fail_json(msg="Cannot wait for VM IP address when VM is in state '%s'!" % vm_power_state)
+
+ interval = 2
+
+ # If we have to wait indefinitely, make time_left larger than 0 so we can
+ # enter while loop.
+ if timeout == 0:
+ time_left = 1
+ else:
+ time_left = timeout
+
+ while time_left > 0:
+ vm_guest_metrics_ref = xapi_session.xenapi.VM.get_guest_metrics(vm_ref)
+
+ if vm_guest_metrics_ref != "OpaqueRef:NULL":
+ vm_guest_metrics = xapi_session.xenapi.VM_guest_metrics.get_record(vm_guest_metrics_ref)
+ vm_ips = vm_guest_metrics['networks']
+
+ if "0/ip" in vm_ips:
+ break
+
+ time.sleep(interval)
+
+ # We decrease time_left only if we don't wait indefinitely.
+ if timeout != 0:
+ time_left -= interval
+ else:
+ # We timed out.
+ module.fail_json(msg="Timed out waiting for VM IP address!")
+
+ except XenAPI.Failure as f:
+ module.fail_json(msg="XAPI ERROR: %s" % f.details)
+
+ return vm_guest_metrics
+
+
+def get_xenserver_version(module):
+ """Returns XenServer version.
+
+ Args:
+ module: Reference to Ansible module object.
+
+ Returns:
+ list: Element [0] is major version. Element [1] is minor version.
+ Element [2] is update number.
+ """
+ xapi_session = XAPI.connect(module)
+
+ host_ref = xapi_session.xenapi.session.get_this_host(xapi_session._session)
+
+ try:
+ xenserver_version = [int(version_number) for version_number in xapi_session.xenapi.host.get_software_version(host_ref)['product_version'].split('.')]
+ except ValueError:
+ xenserver_version = [0, 0, 0]
+
+ return xenserver_version
+
+
+class XAPI(object):
+ """Class for XAPI session management."""
+ _xapi_session = None
+
+ @classmethod
+ def connect(cls, module, disconnect_atexit=True):
+ """Establishes XAPI connection and returns session reference.
+
+ If no existing session is available, establishes a new one
+ and returns it, else returns existing one.
+
+ Args:
+ module: Reference to Ansible module object.
+ disconnect_atexit (bool): Controls if method should
+ register atexit handler to disconnect from XenServer
+ on module exit (default: True).
+
+ Returns:
+ XAPI session reference.
+ """
+ if cls._xapi_session is not None:
+ return cls._xapi_session
+
+ hostname = module.params['hostname']
+ username = module.params['username']
+ password = module.params['password']
+ ignore_ssl = not module.params['validate_certs']
+
+ if hostname == 'localhost':
+ cls._xapi_session = XenAPI.xapi_local()
+ username = ''
+ password = ''
+ else:
+ # If scheme is not specified we default to http:// because https://
+ # is problematic in most setups.
+ if not hostname.startswith("http://") and not hostname.startswith("https://"):
+ hostname = "http://%s" % hostname
+
+ try:
+ # ignore_ssl is supported in XenAPI library from XenServer 7.2
+ # SDK onward but there is no way to tell which version we
+ # are using. TypeError will be raised if ignore_ssl is not
+ # supported. Additionally, ignore_ssl requires Python 2.7.9
+ # or newer.
+ cls._xapi_session = XenAPI.Session(hostname, ignore_ssl=ignore_ssl)
+ except TypeError:
+ # Try without ignore_ssl.
+ cls._xapi_session = XenAPI.Session(hostname)
+
+ if not password:
+ password = ''
+
+ try:
+ cls._xapi_session.login_with_password(username, password, ANSIBLE_VERSION, 'Ansible')
+ except XenAPI.Failure as f:
+ module.fail_json(msg="Unable to log on to XenServer at %s as %s: %s" % (hostname, username, f.details))
+
+ # Disabling atexit should be used in special cases only.
+ if disconnect_atexit:
+ atexit.register(cls._xapi_session.logout)
+
+ return cls._xapi_session
+
+
+class XenServerObject(object):
+ """Base class for all XenServer objects.
+
+ This class contains active XAPI session reference and common
+ attributes with useful info about XenServer host/pool.
+
+ Attributes:
+ module: Reference to Ansible module object.
+ xapi_session: Reference to XAPI session.
+ pool_ref (str): XAPI reference to a pool currently connected to.
+ default_sr_ref (str): XAPI reference to a pool default
+ Storage Repository.
+ host_ref (str): XAPI rerefence to a host currently connected to.
+ xenserver_version (list of str): Contains XenServer major and
+ minor version.
+ """
+
+ def __init__(self, module):
+ """Inits XenServerObject using common module parameters.
+
+ Args:
+ module: Reference to Ansible module object.
+ """
+ if not HAS_XENAPI:
+ module.fail_json(changed=False, msg=missing_required_lib("XenAPI"), exception=XENAPI_IMP_ERR)
+
+ self.module = module
+ self.xapi_session = XAPI.connect(module)
+
+ try:
+ self.pool_ref = self.xapi_session.xenapi.pool.get_all()[0]
+ self.default_sr_ref = self.xapi_session.xenapi.pool.get_default_SR(self.pool_ref)
+ self.xenserver_version = get_xenserver_version(module)
+ except XenAPI.Failure as f:
+ self.module.fail_json(msg="XAPI ERROR: %s" % f.details)
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/__init__.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/aerospike_migrations.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/aerospike_migrations.py
new file mode 100644
index 00000000..27bfc1a9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/aerospike_migrations.py
@@ -0,0 +1,521 @@
+#!/usr/bin/python
+"""short_description: Check or wait for migrations between nodes"""
+
+# Copyright: (c) 2018, Albert Autin
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: aerospike_migrations
+short_description: Check or wait for migrations between nodes
+description:
+ - This can be used to check for migrations in a cluster.
+ This makes it easy to do a rolling upgrade/update on Aerospike nodes.
+ - If waiting for migrations is not desired, simply just poll until
+ port 3000 if available or asinfo -v status returns ok
+author: "Albert Autin (@Alb0t)"
+options:
+ host:
+ description:
+ - Which host do we use as seed for info connection
+ required: False
+ type: str
+ default: localhost
+ port:
+ description:
+ - Which port to connect to Aerospike on (service port)
+ required: False
+ type: int
+ default: 3000
+ connect_timeout:
+ description:
+ - How long to try to connect before giving up (milliseconds)
+ required: False
+ type: int
+ default: 1000
+ consecutive_good_checks:
+ description:
+ - How many times should the cluster report "no migrations"
+ consecutively before returning OK back to ansible?
+ required: False
+ type: int
+ default: 3
+ sleep_between_checks:
+ description:
+ - How long to sleep between each check (seconds).
+ required: False
+ type: int
+ default: 60
+ tries_limit:
+ description:
+ - How many times do we poll before giving up and failing?
+ default: 300
+ required: False
+ type: int
+ local_only:
+ description:
+ - Do you wish to only check for migrations on the local node
+ before returning, or do you want all nodes in the cluster
+ to finish before returning?
+ required: True
+ type: bool
+ min_cluster_size:
+ description:
+ - Check will return bad until cluster size is met
+ or until tries is exhausted
+ required: False
+ type: int
+ default: 1
+ fail_on_cluster_change:
+ description:
+ - Fail if the cluster key changes
+ if something else is changing the cluster, we may want to fail
+ required: False
+ type: bool
+ default: True
+ migrate_tx_key:
+ description:
+ - The metric key used to determine if we have tx migrations
+ remaining. Changeable due to backwards compatibility.
+ required: False
+ type: str
+ default: migrate_tx_partitions_remaining
+ migrate_rx_key:
+ description:
+ - The metric key used to determine if we have rx migrations
+ remaining. Changeable due to backwards compatibility.
+ required: False
+ type: str
+ default: migrate_rx_partitions_remaining
+ target_cluster_size:
+ description:
+ - When all aerospike builds in the cluster are greater than
+ version 4.3, then the C(cluster-stable) info command will be used.
+ Inside this command, you can optionally specify what the target
+ cluster size is - but it is not necessary. You can still rely on
+ min_cluster_size if you don't want to use this option.
+ - If this option is specified on a cluster that has at least 1
+ host <4.3 then it will be ignored until the min version reaches
+ 4.3.
+ required: False
+ type: int
+'''
+EXAMPLES = '''
+# check for migrations on local node
+- name: Wait for migrations on local node before proceeding
+ community.general.aerospike_migrations:
+ host: "localhost"
+ connect_timeout: 2000
+ consecutive_good_checks: 5
+ sleep_between_checks: 15
+ tries_limit: 600
+ local_only: False
+
+# example playbook:
+---
+- name: Upgrade aerospike
+ hosts: all
+ become: true
+ serial: 1
+ tasks:
+ - name: Install dependencies
+ ansible.builtin.apt:
+ name:
+ - python
+ - python-pip
+ - python-setuptools
+ state: latest
+ - name: Setup aerospike
+ ansible.builtin.pip:
+ name: aerospike
+# check for migrations every (sleep_between_checks)
+# If at least (consecutive_good_checks) checks come back OK in a row, then return OK.
+# Will exit if any exception, which can be caused by bad nodes,
+# nodes not returning data, or other reasons.
+# Maximum runtime before giving up in this case will be:
+# Tries Limit * Sleep Between Checks * delay * retries
+ - name: Wait for aerospike migrations
+ community.general.aerospike_migrations:
+ local_only: True
+ sleep_between_checks: 1
+ tries_limit: 5
+ consecutive_good_checks: 3
+ fail_on_cluster_change: true
+ min_cluster_size: 3
+ target_cluster_size: 4
+ register: migrations_check
+ until: migrations_check is succeeded
+ changed_when: false
+ delay: 60
+ retries: 120
+ - name: Another thing
+ ansible.builtin.shell: |
+ echo foo
+ - name: Reboot
+ ansible.builtin.reboot:
+'''
+
+RETURN = '''
+# Returns only a success/failure result. Changed is always false.
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+LIB_FOUND_ERR = None
+try:
+ import aerospike
+ from time import sleep
+ import re
+except ImportError as ie:
+ LIB_FOUND = False
+ LIB_FOUND_ERR = traceback.format_exc()
+else:
+ LIB_FOUND = True
+
+
+def run_module():
+ """run ansible module"""
+ module_args = dict(
+ host=dict(type='str', required=False, default='localhost'),
+ port=dict(type='int', required=False, default=3000),
+ connect_timeout=dict(type='int', required=False, default=1000),
+ consecutive_good_checks=dict(type='int', required=False, default=3),
+ sleep_between_checks=dict(type='int', required=False, default=60),
+ tries_limit=dict(type='int', required=False, default=300),
+ local_only=dict(type='bool', required=True),
+ min_cluster_size=dict(type='int', required=False, default=1),
+ target_cluster_size=dict(type='int', required=False, default=None),
+ fail_on_cluster_change=dict(type='bool', required=False, default=True),
+ migrate_tx_key=dict(type='str', required=False,
+ default="migrate_tx_partitions_remaining"),
+ migrate_rx_key=dict(type='str', required=False,
+ default="migrate_rx_partitions_remaining")
+ )
+
+ result = dict(
+ changed=False,
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True
+ )
+ if not LIB_FOUND:
+ module.fail_json(msg=missing_required_lib('aerospike'),
+ exception=LIB_FOUND_ERR)
+
+ try:
+ if module.check_mode:
+ has_migrations, skip_reason = False, None
+ else:
+ migrations = Migrations(module)
+ has_migrations, skip_reason = migrations.has_migs(
+ module.params['local_only']
+ )
+
+ if has_migrations:
+ module.fail_json(msg="Failed.", skip_reason=skip_reason)
+ except Exception as e:
+ module.fail_json(msg="Error: {0}".format(e))
+
+ module.exit_json(**result)
+
+
+class Migrations:
+ """ Check or wait for migrations between nodes """
+
+ def __init__(self, module):
+ self.module = module
+ self._client = self._create_client().connect()
+ self._nodes = {}
+ self._update_nodes_list()
+ self._cluster_statistics = {}
+ self._update_cluster_statistics()
+ self._namespaces = set()
+ self._update_cluster_namespace_list()
+ self._build_list = set()
+ self._update_build_list()
+ self._start_cluster_key = \
+ self._cluster_statistics[self._nodes[0]]['cluster_key']
+
+ def _create_client(self):
+ """ TODO: add support for auth, tls, and other special features
+ I won't use those features, so I'll wait until somebody complains
+ or does it for me (Cross fingers)
+ create the client object"""
+ config = {
+ 'hosts': [
+ (self.module.params['host'], self.module.params['port'])
+ ],
+ 'policies': {
+ 'timeout': self.module.params['connect_timeout']
+ }
+ }
+ return aerospike.client(config)
+
+ def _info_cmd_helper(self, cmd, node=None, delimiter=';'):
+ """delimiter is for separate stats that come back, NOT for kv
+ separation which is ="""
+ if node is None: # If no node passed, use the first one (local)
+ node = self._nodes[0]
+ data = self._client.info_node(cmd, node)
+ data = data.split("\t")
+ if len(data) != 1 and len(data) != 2:
+ self.module.fail_json(
+ msg="Unexpected number of values returned in info command: " +
+ str(len(data))
+ )
+ # data will be in format 'command\touput'
+ data = data[-1]
+ data = data.rstrip("\n\r")
+ data_arr = data.split(delimiter)
+
+ # some commands don't return in kv format
+ # so we dont want a dict from those.
+ if '=' in data:
+ retval = dict(
+ metric.split("=", 1) for metric in data_arr
+ )
+ else:
+ # if only 1 element found, and not kv, return just the value.
+ if len(data_arr) == 1:
+ retval = data_arr[0]
+ else:
+ retval = data_arr
+ return retval
+
+ def _update_build_list(self):
+ """creates self._build_list which is a unique list
+ of build versions."""
+ self._build_list = set()
+ for node in self._nodes:
+ build = self._info_cmd_helper('build', node)
+ self._build_list.add(build)
+
+ # just checks to see if the version is 4.3 or greater
+ def _can_use_cluster_stable(self):
+ # if version <4.3 we can't use cluster-stable info cmd
+ # regex hack to check for versions beginning with 0-3 or
+ # beginning with 4.0,4.1,4.2
+ if re.search(R'^([0-3]\.|4\.[0-2])', min(self._build_list)):
+ return False
+ return True
+
+ def _update_cluster_namespace_list(self):
+ """ make a unique list of namespaces
+ TODO: does this work on a rolling namespace add/deletion?
+ thankfully if it doesn't, we dont need this on builds >=4.3"""
+ self._namespaces = set()
+ for node in self._nodes:
+ namespaces = self._info_cmd_helper('namespaces', node)
+ for namespace in namespaces:
+ self._namespaces.add(namespace)
+
+ def _update_cluster_statistics(self):
+ """create a dict of nodes with their related stats """
+ self._cluster_statistics = {}
+ for node in self._nodes:
+ self._cluster_statistics[node] = \
+ self._info_cmd_helper('statistics', node)
+
+ def _update_nodes_list(self):
+ """get a fresh list of all the nodes"""
+ self._nodes = self._client.get_nodes()
+ if not self._nodes:
+ self.module.fail_json("Failed to retrieve at least 1 node.")
+
+ def _namespace_has_migs(self, namespace, node=None):
+ """returns a True or False.
+ Does the namespace have migrations for the node passed?
+ If no node passed, uses the local node or the first one in the list"""
+ namespace_stats = self._info_cmd_helper("namespace/" + namespace, node)
+ try:
+ namespace_tx = \
+ int(namespace_stats[self.module.params['migrate_tx_key']])
+ namespace_rx = \
+ int(namespace_stats[self.module.params['migrate_tx_key']])
+ except KeyError:
+ self.module.fail_json(
+ msg="Did not find partition remaining key:" +
+ self.module.params['migrate_tx_key'] +
+ " or key:" +
+ self.module.params['migrate_rx_key'] +
+ " in 'namespace/" +
+ namespace +
+ "' output."
+ )
+ except TypeError:
+ self.module.fail_json(
+ msg="namespace stat returned was not numerical"
+ )
+ return namespace_tx != 0 or namespace_rx != 0
+
+ def _node_has_migs(self, node=None):
+ """just calls namespace_has_migs and
+ if any namespace has migs returns true"""
+ migs = 0
+ self._update_cluster_namespace_list()
+ for namespace in self._namespaces:
+ if self._namespace_has_migs(namespace, node):
+ migs += 1
+ return migs != 0
+
+ def _cluster_key_consistent(self):
+ """create a dictionary to store what each node
+ returns the cluster key as. we should end up with only 1 dict key,
+ with the key being the cluster key."""
+ cluster_keys = {}
+ for node in self._nodes:
+ cluster_key = self._cluster_statistics[node][
+ 'cluster_key']
+ if cluster_key not in cluster_keys:
+ cluster_keys[cluster_key] = 1
+ else:
+ cluster_keys[cluster_key] += 1
+ if len(cluster_keys.keys()) == 1 and \
+ self._start_cluster_key in cluster_keys:
+ return True
+ return False
+
+ def _cluster_migrates_allowed(self):
+ """ensure all nodes have 'migrate_allowed' in their stats output"""
+ for node in self._nodes:
+ node_stats = self._info_cmd_helper('statistics', node)
+ allowed = node_stats['migrate_allowed']
+ if allowed == "false":
+ return False
+ return True
+
+ def _cluster_has_migs(self):
+ """calls node_has_migs for each node"""
+ migs = 0
+ for node in self._nodes:
+ if self._node_has_migs(node):
+ migs += 1
+ if migs == 0:
+ return False
+ return True
+
+ def _has_migs(self, local):
+ if local:
+ return self._local_node_has_migs()
+ return self._cluster_has_migs()
+
+ def _local_node_has_migs(self):
+ return self._node_has_migs(None)
+
+ def _is_min_cluster_size(self):
+ """checks that all nodes in the cluster are returning the
+ minimum cluster size specified in their statistics output"""
+ sizes = set()
+ for node in self._cluster_statistics:
+ sizes.add(int(self._cluster_statistics[node]['cluster_size']))
+
+ if (len(sizes)) > 1: # if we are getting more than 1 size, lets say no
+ return False
+ if (min(sizes)) >= self.module.params['min_cluster_size']:
+ return True
+ return False
+
+ def _cluster_stable(self):
+ """Added 4.3:
+ cluster-stable:size=<target-cluster-size>;ignore-migrations=<yes/no>;namespace=<namespace-name>
+ Returns the current 'cluster_key' when the following are satisfied:
+
+ If 'size' is specified then the target node's 'cluster-size'
+ must match size.
+ If 'ignore-migrations' is either unspecified or 'false' then
+ the target node's migrations counts must be zero for the provided
+ 'namespace' or all namespaces if 'namespace' is not provided."""
+ cluster_key = set()
+ cluster_key.add(self._info_cmd_helper('statistics')['cluster_key'])
+ cmd = "cluster-stable:"
+ target_cluster_size = self.module.params['target_cluster_size']
+ if target_cluster_size is not None:
+ cmd = cmd + "size=" + str(target_cluster_size) + ";"
+ for node in self._nodes:
+ try:
+ cluster_key.add(self._info_cmd_helper(cmd, node))
+ except aerospike.exception.ServerError as e: # unstable-cluster is returned in form of Exception
+ if 'unstable-cluster' in e.msg:
+ return False
+ raise e
+ if len(cluster_key) == 1:
+ return True
+ return False
+
+ def _cluster_good_state(self):
+ """checks a few things to make sure we're OK to say the cluster
+ has no migs. It could be in a unhealthy condition that does not allow
+ migs, or a split brain"""
+ if self._cluster_key_consistent() is not True:
+ return False, "Cluster key inconsistent."
+ if self._is_min_cluster_size() is not True:
+ return False, "Cluster min size not reached."
+ if self._cluster_migrates_allowed() is not True:
+ return False, "migrate_allowed is false somewhere."
+ return True, "OK."
+
+ def has_migs(self, local=True):
+ """returns a boolean, False if no migrations otherwise True"""
+ consecutive_good = 0
+ try_num = 0
+ skip_reason = list()
+ while \
+ try_num < int(self.module.params['tries_limit']) and \
+ consecutive_good < \
+ int(self.module.params['consecutive_good_checks']):
+
+ self._update_nodes_list()
+ self._update_cluster_statistics()
+
+ # These checks are outside of the while loop because
+ # we probably want to skip & sleep instead of failing entirely
+ stable, reason = self._cluster_good_state()
+ if stable is not True:
+ skip_reason.append(
+ "Skipping on try#" + str(try_num) +
+ " for reason:" + reason
+ )
+ else:
+ if self._can_use_cluster_stable():
+ if self._cluster_stable():
+ consecutive_good += 1
+ else:
+ consecutive_good = 0
+ skip_reason.append(
+ "Skipping on try#" + str(try_num) +
+ " for reason:" + " cluster_stable"
+ )
+ elif self._has_migs(local):
+ # print("_has_migs")
+ skip_reason.append(
+ "Skipping on try#" + str(try_num) +
+ " for reason:" + " migrations"
+ )
+ consecutive_good = 0
+ else:
+ consecutive_good += 1
+ if consecutive_good == self.module.params[
+ 'consecutive_good_checks']:
+ break
+ try_num += 1
+ sleep(self.module.params['sleep_between_checks'])
+ # print(skip_reason)
+ if consecutive_good == self.module.params['consecutive_good_checks']:
+ return False, None
+ return True, skip_reason
+
+
+def main():
+ """main method for ansible module"""
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/airbrake_deployment.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/airbrake_deployment.py
new file mode 100644
index 00000000..3e7938bf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/airbrake_deployment.py
@@ -0,0 +1,204 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2013 Bruce Pennypacker <bruce@pennypacker.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: airbrake_deployment
+author:
+- "Bruce Pennypacker (@bpennypacker)"
+- "Patrick Humpal (@phumpal)"
+short_description: Notify airbrake about app deployments
+description:
+ - Notify airbrake about app deployments (see U(https://airbrake.io/docs/api/#deploys-v4)).
+ - Parameter I(token) has been deprecated for community.general 0.2.0. Please remove entry.
+options:
+ project_id:
+ description:
+ - Airbrake PROJECT_ID
+ required: false
+ type: str
+ version_added: '0.2.0'
+ project_key:
+ description:
+ - Airbrake PROJECT_KEY.
+ required: false
+ type: str
+ version_added: '0.2.0'
+ environment:
+ description:
+ - The airbrake environment name, typically 'production', 'staging', etc.
+ required: true
+ type: str
+ user:
+ description:
+ - The username of the person doing the deployment
+ required: false
+ type: str
+ repo:
+ description:
+ - URL of the project repository
+ required: false
+ type: str
+ revision:
+ description:
+ - A hash, number, tag, or other identifier showing what revision from version control was deployed
+ required: false
+ type: str
+ version:
+ description:
+ - A string identifying what version was deployed
+ required: false
+ type: str
+ version_added: '1.0.0'
+ url:
+ description:
+ - Optional URL to submit the notification to. Use to send notifications to Airbrake-compliant tools like Errbit.
+ required: false
+ default: "https://api.airbrake.io/api/v4/projects/"
+ type: str
+ validate_certs:
+ description:
+ - If C(no), SSL certificates for the target url will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ type: bool
+ token:
+ description:
+ - This parameter (API token) has been deprecated in community.general 0.2.0. Please remove it from your tasks.
+ required: false
+ type: str
+
+requirements: []
+'''
+
+EXAMPLES = '''
+- name: Notify airbrake about an app deployment
+ community.general.airbrake_deployment:
+ project_id: '12345'
+ project_key: 'AAAAAA'
+ environment: staging
+ user: ansible
+ revision: '4.2'
+
+- name: Notify airbrake about an app deployment, using git hash as revision
+ community.general.airbrake_deployment:
+ project_id: '12345'
+ project_key: 'AAAAAA'
+ environment: staging
+ user: ansible
+ revision: 'e54dd3a01f2c421b558ef33b5f79db936e2dcf15'
+ version: '0.2.0'
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(required=False, no_log=True, type='str'),
+ project_id=dict(required=False, no_log=True, type='str'),
+ project_key=dict(required=False, no_log=True, type='str'),
+ environment=dict(required=True, type='str'),
+ user=dict(required=False, type='str'),
+ repo=dict(required=False, type='str'),
+ revision=dict(required=False, type='str'),
+ version=dict(required=False, type='str'),
+ url=dict(required=False, default='https://api.airbrake.io/api/v4/projects/', type='str'),
+ validate_certs=dict(default=True, type='bool'),
+ ),
+ supports_check_mode=True,
+ required_together=[('project_id', 'project_key')],
+ mutually_exclusive=[('project_id', 'token')],
+ )
+
+ # Build list of params
+ params = {}
+
+ # If we're in check mode, just exit pretending like we succeeded
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ if module.params["token"]:
+ # v2 API documented at https://airbrake.io/docs/legacy-xml-api/#tracking-deploys
+ if module.params["environment"]:
+ params["deploy[rails_env]"] = module.params["environment"]
+
+ if module.params["user"]:
+ params["deploy[local_username]"] = module.params["user"]
+
+ if module.params["repo"]:
+ params["deploy[scm_repository]"] = module.params["repo"]
+
+ if module.params["revision"]:
+ params["deploy[scm_revision]"] = module.params["revision"]
+
+ # version not supported in v2 API; omit
+
+ module.deprecate("Parameter 'token' is deprecated since community.general 0.2.0. Please remove "
+ "it and use 'project_id' and 'project_key' instead",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.14
+
+ params["api_key"] = module.params["token"]
+
+ # Allow sending to Airbrake compliant v2 APIs
+ if module.params["url"] == 'https://api.airbrake.io/api/v4/projects/':
+ url = 'https://api.airbrake.io/deploys.txt'
+ else:
+ url = module.params["url"]
+
+ # Send the data to airbrake
+ data = urlencode(params)
+ response, info = fetch_url(module, url, data=data)
+
+ if module.params["project_id"] and module.params["project_key"]:
+ # v4 API documented at https://airbrake.io/docs/api/#create-deploy-v4
+ if module.params["environment"]:
+ params["environment"] = module.params["environment"]
+
+ if module.params["user"]:
+ params["username"] = module.params["user"]
+
+ if module.params["repo"]:
+ params["repository"] = module.params["repo"]
+
+ if module.params["revision"]:
+ params["revision"] = module.params["revision"]
+
+ if module.params["version"]:
+ params["version"] = module.params["version"]
+
+ # Build deploy url
+ url = module.params.get('url') + module.params["project_id"] + '/deploys?key=' + module.params["project_key"]
+ json_body = module.jsonify(params)
+
+ # Build header
+ headers = {'Content-Type': 'application/json'}
+
+ # Notify Airbrake of deploy
+ response, info = fetch_url(module, url, data=json_body,
+ headers=headers, method='POST')
+
+ if info['status'] == 200 or info['status'] == 201:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/aix_devices.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/aix_devices.py
new file mode 100644
index 00000000..89468059
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/aix_devices.py
@@ -0,0 +1,369 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, 2018 Kairo Araujo <kairo@kairo.eti.br>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+- Kairo Araujo (@kairoaraujo)
+module: aix_devices
+short_description: Manages AIX devices
+description:
+- This module discovers, defines, removes and modifies attributes of AIX devices.
+options:
+ attributes:
+ description:
+ - A list of device attributes.
+ type: dict
+ device:
+ description:
+ - The name of the device.
+ - C(all) is valid to rescan C(available) all devices (AIX cfgmgr command).
+ type: str
+ force:
+ description:
+ - Forces action.
+ type: bool
+ default: no
+ recursive:
+ description:
+ - Removes or defines a device and children devices.
+ type: bool
+ default: no
+ state:
+ description:
+ - Controls the device state.
+ - C(available) (alias C(present)) rescan a specific device or all devices (when C(device) is not specified).
+ - C(removed) (alias C(absent) removes a device.
+ - C(defined) changes device to Defined state.
+ type: str
+ choices: [ available, defined, removed ]
+ default: available
+'''
+
+EXAMPLES = r'''
+- name: Scan new devices
+ community.general.aix_devices:
+ device: all
+ state: available
+
+- name: Scan new virtual devices (vio0)
+ community.general.aix_devices:
+ device: vio0
+ state: available
+
+- name: Removing IP alias to en0
+ community.general.aix_devices:
+ device: en0
+ attributes:
+ delalias4: 10.0.0.100,255.255.255.0
+
+- name: Removes ent2
+ community.general.aix_devices:
+ device: ent2
+ state: removed
+
+- name: Put device en2 in Defined
+ community.general.aix_devices:
+ device: en2
+ state: defined
+
+- name: Removes ent4 (inexistent).
+ community.general.aix_devices:
+ device: ent4
+ state: removed
+
+- name: Put device en4 in Defined (inexistent)
+ community.general.aix_devices:
+ device: en4
+ state: defined
+
+- name: Put vscsi1 and children devices in Defined state.
+ community.general.aix_devices:
+ device: vscsi1
+ recursive: yes
+ state: defined
+
+- name: Removes vscsi1 and children devices.
+ community.general.aix_devices:
+ device: vscsi1
+ recursive: yes
+ state: removed
+
+- name: Changes en1 mtu to 9000 and disables arp.
+ community.general.aix_devices:
+ device: en1
+ attributes:
+ mtu: 900
+ arp: off
+ state: available
+
+- name: Configure IP, netmask and set en1 up.
+ community.general.aix_devices:
+ device: en1
+ attributes:
+ netaddr: 192.168.0.100
+ netmask: 255.255.255.0
+ state: up
+ state: available
+
+- name: Adding IP alias to en0
+ community.general.aix_devices:
+ device: en0
+ attributes:
+ alias4: 10.0.0.100,255.255.255.0
+ state: available
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def _check_device(module, device):
+ """
+ Check if device already exists and the state.
+ Args:
+ module: Ansible module.
+ device: device to be checked.
+
+ Returns: bool, device state
+
+ """
+ lsdev_cmd = module.get_bin_path('lsdev', True)
+ rc, lsdev_out, err = module.run_command(["%s" % lsdev_cmd, '-C', '-l', "%s" % device])
+
+ if rc != 0:
+ module.fail_json(msg="Failed to run lsdev", rc=rc, err=err)
+
+ if lsdev_out:
+ device_state = lsdev_out.split()[1]
+ return True, device_state
+
+ device_state = None
+ return False, device_state
+
+
+def _check_device_attr(module, device, attr):
+ """
+
+ Args:
+ module: Ansible module.
+ device: device to check attributes.
+ attr: attribute to be checked.
+
+ Returns:
+
+ """
+ lsattr_cmd = module.get_bin_path('lsattr', True)
+ rc, lsattr_out, err = module.run_command(["%s" % lsattr_cmd, '-El', "%s" % device, '-a', "%s" % attr])
+
+ hidden_attrs = ['delalias4', 'delalias6']
+
+ if rc == 255:
+
+ if attr in hidden_attrs:
+ current_param = ''
+ else:
+ current_param = None
+
+ return current_param
+
+ elif rc != 0:
+ module.fail_json(msg="Failed to run lsattr: %s" % err, rc=rc, err=err)
+
+ current_param = lsattr_out.split()[1]
+ return current_param
+
+
+def discover_device(module, device):
+ """ Discover AIX devices."""
+ cfgmgr_cmd = module.get_bin_path('cfgmgr', True)
+
+ if device is not None:
+ device = "-l %s" % device
+
+ else:
+ device = ''
+
+ changed = True
+ msg = ''
+ if not module.check_mode:
+ rc, cfgmgr_out, err = module.run_command(["%s" % cfgmgr_cmd, "%s" % device])
+ changed = True
+ msg = cfgmgr_out
+
+ return changed, msg
+
+
+def change_device_attr(module, attributes, device, force):
+ """ Change AIX device attribute. """
+
+ attr_changed = []
+ attr_not_changed = []
+ attr_invalid = []
+ chdev_cmd = module.get_bin_path('chdev', True)
+
+ for attr in list(attributes.keys()):
+ new_param = attributes[attr]
+ current_param = _check_device_attr(module, device, attr)
+
+ if current_param is None:
+ attr_invalid.append(attr)
+
+ elif current_param != new_param:
+ if force:
+ cmd = ["%s" % chdev_cmd, '-l', "%s" % device, '-a', "%s=%s" % (attr, attributes[attr]), "%s" % force]
+ else:
+ cmd = ["%s" % chdev_cmd, '-l', "%s" % device, '-a', "%s=%s" % (attr, attributes[attr])]
+
+ if not module.check_mode:
+ rc, chdev_out, err = module.run_command(cmd)
+ if rc != 0:
+ module.exit_json(msg="Failed to run chdev.", rc=rc, err=err)
+
+ attr_changed.append(attributes[attr])
+ else:
+ attr_not_changed.append(attributes[attr])
+
+ if len(attr_changed) > 0:
+ changed = True
+ attr_changed_msg = "Attributes changed: %s. " % ','.join(attr_changed)
+ else:
+ changed = False
+ attr_changed_msg = ''
+
+ if len(attr_not_changed) > 0:
+ attr_not_changed_msg = "Attributes already set: %s. " % ','.join(attr_not_changed)
+ else:
+ attr_not_changed_msg = ''
+
+ if len(attr_invalid) > 0:
+ attr_invalid_msg = "Invalid attributes: %s " % ', '.join(attr_invalid)
+ else:
+ attr_invalid_msg = ''
+
+ msg = "%s%s%s" % (attr_changed_msg, attr_not_changed_msg, attr_invalid_msg)
+
+ return changed, msg
+
+
+def remove_device(module, device, force, recursive, state):
+ """ Puts device in defined state or removes device. """
+
+ state_opt = {
+ 'removed': '-d',
+ 'absent': '-d',
+ 'defined': ''
+ }
+
+ recursive_opt = {
+ True: '-R',
+ False: ''
+ }
+
+ recursive = recursive_opt[recursive]
+ state = state_opt[state]
+
+ changed = True
+ msg = ''
+ rmdev_cmd = module.get_bin_path('rmdev', True)
+
+ if not module.check_mode:
+ if state:
+ rc, rmdev_out, err = module.run_command(["%s" % rmdev_cmd, "-l", "%s" % device, "%s" % recursive, "%s" % force])
+ else:
+ rc, rmdev_out, err = module.run_command(["%s" % rmdev_cmd, "-l", "%s" % device, "%s" % recursive])
+
+ if rc != 0:
+ module.fail_json(msg="Failed to run rmdev", rc=rc, err=err)
+
+ msg = rmdev_out
+
+ return changed, msg
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ attributes=dict(type='dict'),
+ device=dict(type='str'),
+ force=dict(type='bool', default=False),
+ recursive=dict(type='bool', default=False),
+ state=dict(type='str', default='available', choices=['available', 'defined', 'removed']),
+ ),
+ supports_check_mode=True,
+ )
+
+ force_opt = {
+ True: '-f',
+ False: '',
+ }
+
+ attributes = module.params['attributes']
+ device = module.params['device']
+ force = force_opt[module.params['force']]
+ recursive = module.params['recursive']
+ state = module.params['state']
+
+ result = dict(
+ changed=False,
+ msg='',
+ )
+
+ if state == 'available' or state == 'present':
+ if attributes:
+ # change attributes on device
+ device_status, device_state = _check_device(module, device)
+ if device_status:
+ result['changed'], result['msg'] = change_device_attr(module, attributes, device, force)
+ else:
+ result['msg'] = "Device %s does not exist." % device
+
+ else:
+ # discovery devices (cfgmgr)
+ if device and device != 'all':
+ device_status, device_state = _check_device(module, device)
+ if device_status:
+ # run cfgmgr on specific device
+ result['changed'], result['msg'] = discover_device(module, device)
+
+ else:
+ result['msg'] = "Device %s does not exist." % device
+
+ else:
+ result['changed'], result['msg'] = discover_device(module, device)
+
+ elif state == 'removed' or state == 'absent' or state == 'defined':
+ if not device:
+ result['msg'] = "device is required to removed or defined state."
+
+ else:
+ # Remove device
+ check_device, device_state = _check_device(module, device)
+ if check_device:
+ if state == 'defined' and device_state == 'Defined':
+ result['changed'] = False
+ result['msg'] = 'Device %s already in Defined' % device
+
+ else:
+ result['changed'], result['msg'] = remove_device(module, device, force, recursive, state)
+
+ else:
+ result['msg'] = "Device %s does not exist." % device
+
+ else:
+ result['msg'] = "Unexpected state %s." % state
+ module.fail_json(**result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/aix_filesystem.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/aix_filesystem.py
new file mode 100644
index 00000000..58a5c25d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/aix_filesystem.py
@@ -0,0 +1,567 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Kairo Araujo <kairo@kairo.eti.br>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+ - Kairo Araujo (@kairoaraujo)
+module: aix_filesystem
+short_description: Configure LVM and NFS file systems for AIX
+description:
+ - This module creates, removes, mount and unmount LVM and NFS file system for
+ AIX using C(/etc/filesystems).
+ - For LVM file systems is possible to resize a file system.
+options:
+ account_subsystem:
+ description:
+ - Specifies whether the file system is to be processed by the accounting subsystem.
+ type: bool
+ default: no
+ attributes:
+ description:
+ - Specifies attributes for files system separated by comma.
+ type: list
+ elements: str
+ default: agblksize='4096',isnapshot='no'
+ auto_mount:
+ description:
+ - File system is automatically mounted at system restart.
+ type: bool
+ default: yes
+ device:
+ description:
+ - Logical volume (LV) device name or remote export device to create a NFS file system.
+ - It is used to create a file system on an already existing logical volume or the exported NFS file system.
+ - If not mentioned a new logical volume name will be created following AIX standards (LVM).
+ type: str
+ fs_type:
+ description:
+ - Specifies the virtual file system type.
+ type: str
+ default: jfs2
+ permissions:
+ description:
+ - Set file system permissions. C(rw) (read-write) or C(ro) (read-only).
+ type: str
+ choices: [ ro, rw ]
+ default: rw
+ mount_group:
+ description:
+ - Specifies the mount group.
+ type: str
+ filesystem:
+ description:
+ - Specifies the mount point, which is the directory where the file system will be mounted.
+ type: str
+ required: true
+ nfs_server:
+ description:
+ - Specifies a Network File System (NFS) server.
+ type: str
+ rm_mount_point:
+ description:
+ - Removes the mount point directory when used with state C(absent).
+ type: bool
+ default: no
+ size:
+ description:
+ - Specifies the file system size.
+ - For already C(present) it will be resized.
+ - 512-byte blocks, Megabytes or Gigabytes. If the value has M specified
+ it will be in Megabytes. If the value has G specified it will be in
+ Gigabytes.
+ - If no M or G the value will be 512-byte blocks.
+ - If "+" is specified in begin of value, the value will be added.
+ - If "-" is specified in begin of value, the value will be removed.
+ - If "+" or "-" is not specified, the total value will be the specified.
+ - Size will respects the LVM AIX standards.
+ type: str
+ state:
+ description:
+ - Controls the file system state.
+ - C(present) check if file system exists, creates or resize.
+ - C(absent) removes existing file system if already C(unmounted).
+ - C(mounted) checks if the file system is mounted or mount the file system.
+ - C(unmounted) check if the file system is unmounted or unmount the file system.
+ type: str
+ choices: [ absent, mounted, present, unmounted ]
+ default: present
+ vg:
+ description:
+ - Specifies an existing volume group (VG).
+ type: str
+notes:
+ - For more C(attributes), please check "crfs" AIX manual.
+'''
+
+EXAMPLES = r'''
+- name: Create filesystem in a previously defined logical volume.
+ community.general.aix_filesystem:
+ device: testlv
+ community.general.filesystem: /testfs
+ state: present
+
+- name: Creating NFS filesystem from nfshost.
+ community.general.aix_filesystem:
+ device: /home/ftp
+ nfs_server: nfshost
+ community.general.filesystem: /home/ftp
+ state: present
+
+- name: Creating a new file system without a previously logical volume.
+ community.general.aix_filesystem:
+ community.general.filesystem: /newfs
+ size: 1G
+ state: present
+ vg: datavg
+
+- name: Unmounting /testfs.
+ community.general.aix_filesystem:
+ community.general.filesystem: /testfs
+ state: unmounted
+
+- name: Resizing /mksysb to +512M.
+ community.general.aix_filesystem:
+ community.general.filesystem: /mksysb
+ size: +512M
+ state: present
+
+- name: Resizing /mksysb to 11G.
+ community.general.aix_filesystem:
+ community.general.filesystem: /mksysb
+ size: 11G
+ state: present
+
+- name: Resizing /mksysb to -2G.
+ community.general.aix_filesystem:
+ community.general.filesystem: /mksysb
+ size: -2G
+ state: present
+
+- name: Remove NFS filesystem /home/ftp.
+ community.general.aix_filesystem:
+ community.general.filesystem: /home/ftp
+ rm_mount_point: yes
+ state: absent
+
+- name: Remove /newfs.
+ community.general.aix_filesystem:
+ community.general.filesystem: /newfs
+ rm_mount_point: yes
+ state: absent
+'''
+
+RETURN = r'''
+changed:
+ description: Return changed for aix_filesystems actions as true or false.
+ returned: always
+ type: bool
+msg:
+ description: Return message regarding the action.
+ returned: always
+ type: str
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._mount import ismount
+import re
+
+
+def _fs_exists(module, filesystem):
+ """
+ Check if file system already exists on /etc/filesystems.
+
+ :param module: Ansible module.
+ :param community.general.filesystem: filesystem name.
+ :return: True or False.
+ """
+ lsfs_cmd = module.get_bin_path('lsfs', True)
+ rc, lsfs_out, err = module.run_command("%s -l %s" % (lsfs_cmd, filesystem))
+ if rc == 1:
+ if re.findall("No record matching", err):
+ return False
+
+ else:
+ module.fail_json(msg="Failed to run lsfs. Error message: %s" % err)
+
+ else:
+
+ return True
+
+
+def _check_nfs_device(module, nfs_host, device):
+ """
+ Validate if NFS server is exporting the device (remote export).
+
+ :param module: Ansible module.
+ :param nfs_host: nfs_host parameter, NFS server.
+ :param device: device parameter, remote export.
+ :return: True or False.
+ """
+ showmount_cmd = module.get_bin_path('showmount', True)
+ rc, showmount_out, err = module.run_command(
+ "%s -a %s" % (showmount_cmd, nfs_host))
+ if rc != 0:
+ module.fail_json(msg="Failed to run showmount. Error message: %s" % err)
+ else:
+ showmount_data = showmount_out.splitlines()
+ for line in showmount_data:
+ if line.split(':')[1] == device:
+ return True
+
+ return False
+
+
+def _validate_vg(module, vg):
+ """
+ Check the current state of volume group.
+
+ :param module: Ansible module argument spec.
+ :param vg: Volume Group name.
+ :return: True (VG in varyon state) or False (VG in varyoff state) or
+ None (VG does not exist), message.
+ """
+ lsvg_cmd = module.get_bin_path('lsvg', True)
+ rc, current_active_vgs, err = module.run_command("%s -o" % lsvg_cmd)
+ if rc != 0:
+ module.fail_json(msg="Failed executing %s command." % lsvg_cmd)
+
+ rc, current_all_vgs, err = module.run_command("%s" % lsvg_cmd)
+ if rc != 0:
+ module.fail_json(msg="Failed executing %s command." % lsvg_cmd)
+
+ if vg in current_all_vgs and vg not in current_active_vgs:
+ msg = "Volume group %s is in varyoff state." % vg
+ return False, msg
+ elif vg in current_active_vgs:
+ msg = "Volume group %s is in varyon state." % vg
+ return True, msg
+ else:
+ msg = "Volume group %s does not exist." % vg
+ return None, msg
+
+
+def resize_fs(module, filesystem, size):
+ """ Resize LVM file system. """
+
+ chfs_cmd = module.get_bin_path('chfs', True)
+ if not module.check_mode:
+ rc, chfs_out, err = module.run_command('%s -a size="%s" %s' % (chfs_cmd, size, filesystem))
+
+ if rc == 28:
+ changed = False
+ return changed, chfs_out
+ elif rc != 0:
+ if re.findall('Maximum allocation for logical', err):
+ changed = False
+ return changed, err
+ else:
+ module.fail_json(msg="Failed to run chfs. Error message: %s" % err)
+
+ else:
+ if re.findall('The filesystem size is already', chfs_out):
+ changed = False
+ else:
+ changed = True
+
+ return changed, chfs_out
+ else:
+ changed = True
+ msg = ''
+
+ return changed, msg
+
+
+def create_fs(
+ module, fs_type, filesystem, vg, device, size, mount_group, auto_mount,
+ account_subsystem, permissions, nfs_server, attributes):
+ """ Create LVM file system or NFS remote mount point. """
+
+ attributes = ' -a '.join(attributes)
+
+ # Parameters definition.
+ account_subsys_opt = {
+ True: '-t yes',
+ False: '-t no'
+ }
+
+ if nfs_server is not None:
+ auto_mount_opt = {
+ True: '-A',
+ False: '-a'
+ }
+
+ else:
+ auto_mount_opt = {
+ True: '-A yes',
+ False: '-A no'
+ }
+
+ if size is None:
+ size = ''
+ else:
+ size = "-a size=%s" % size
+
+ if device is None:
+ device = ''
+ else:
+ device = "-d %s" % device
+
+ if vg is None:
+ vg = ''
+ else:
+ vg_state, msg = _validate_vg(module, vg)
+ if vg_state:
+ vg = "-g %s" % vg
+ else:
+ changed = False
+
+ return changed, msg
+
+ if mount_group is None:
+ mount_group = ''
+
+ else:
+ mount_group = "-u %s" % mount_group
+
+ auto_mount = auto_mount_opt[auto_mount]
+ account_subsystem = account_subsys_opt[account_subsystem]
+
+ if nfs_server is not None:
+ # Creates a NFS file system.
+ mknfsmnt_cmd = module.get_bin_path('mknfsmnt', True)
+ if not module.check_mode:
+ rc, mknfsmnt_out, err = module.run_command('%s -f "%s" %s -h "%s" -t "%s" "%s" -w "bg"' % (
+ mknfsmnt_cmd, filesystem, device, nfs_server, permissions, auto_mount))
+ if rc != 0:
+ module.fail_json(msg="Failed to run mknfsmnt. Error message: %s" % err)
+ else:
+ changed = True
+ msg = "NFS file system %s created." % filesystem
+
+ return changed, msg
+ else:
+ changed = True
+ msg = ''
+
+ return changed, msg
+
+ else:
+ # Creates a LVM file system.
+ crfs_cmd = module.get_bin_path('crfs', True)
+ if not module.check_mode:
+ cmd = "%s -v %s -m %s %s %s %s %s %s -p %s %s -a %s" % (
+ crfs_cmd, fs_type, filesystem, vg, device, mount_group, auto_mount, account_subsystem, permissions, size, attributes)
+ rc, crfs_out, err = module.run_command(cmd)
+
+ if rc == 10:
+ module.exit_json(
+ msg="Using a existent previously defined logical volume, "
+ "volume group needs to be empty. %s" % err)
+
+ elif rc != 0:
+ module.fail_json(msg="Failed to run %s. Error message: %s" % (cmd, err))
+
+ else:
+ changed = True
+ return changed, crfs_out
+ else:
+ changed = True
+ msg = ''
+
+ return changed, msg
+
+
+def remove_fs(module, filesystem, rm_mount_point):
+ """ Remove an LVM file system or NFS entry. """
+
+ # Command parameters.
+ rm_mount_point_opt = {
+ True: '-r',
+ False: ''
+ }
+
+ rm_mount_point = rm_mount_point_opt[rm_mount_point]
+
+ rmfs_cmd = module.get_bin_path('rmfs', True)
+ if not module.check_mode:
+ cmd = "%s -r %s %s" % (rmfs_cmd, rm_mount_point, filesystem)
+ rc, rmfs_out, err = module.run_command(cmd)
+ if rc != 0:
+ module.fail_json(msg="Failed to run %s. Error message: %s" % (cmd, err))
+ else:
+ changed = True
+ msg = rmfs_out
+ if not rmfs_out:
+ msg = "File system %s removed." % filesystem
+
+ return changed, msg
+ else:
+ changed = True
+ msg = ''
+
+ return changed, msg
+
+
+def mount_fs(module, filesystem):
+ """ Mount a file system. """
+ mount_cmd = module.get_bin_path('mount', True)
+
+ if not module.check_mode:
+ rc, mount_out, err = module.run_command(
+ "%s %s" % (mount_cmd, filesystem))
+ if rc != 0:
+ module.fail_json(msg="Failed to run mount. Error message: %s" % err)
+ else:
+ changed = True
+ msg = "File system %s mounted." % filesystem
+
+ return changed, msg
+ else:
+ changed = True
+ msg = ''
+
+ return changed, msg
+
+
+def unmount_fs(module, filesystem):
+ """ Unmount a file system."""
+ unmount_cmd = module.get_bin_path('unmount', True)
+
+ if not module.check_mode:
+ rc, unmount_out, err = module.run_command("%s %s" % (unmount_cmd, filesystem))
+ if rc != 0:
+ module.fail_json(msg="Failed to run unmount. Error message: %s" % err)
+ else:
+ changed = True
+ msg = "File system %s unmounted." % filesystem
+
+ return changed, msg
+ else:
+ changed = True
+ msg = ''
+
+ return changed, msg
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ account_subsystem=dict(type='bool', default=False),
+ attributes=dict(type='list', elements='str', default=["agblksize='4096'", "isnapshot='no'"]),
+ auto_mount=dict(type='bool', default=True),
+ device=dict(type='str'),
+ filesystem=dict(type='str', required=True),
+ fs_type=dict(type='str', default='jfs2'),
+ permissions=dict(type='str', default='rw', choices=['rw', 'ro']),
+ mount_group=dict(type='str'),
+ nfs_server=dict(type='str'),
+ rm_mount_point=dict(type='bool', default=False),
+ size=dict(type='str'),
+ state=dict(type='str', default='present', choices=['absent', 'mounted', 'present', 'unmounted']),
+ vg=dict(type='str'),
+ ),
+ supports_check_mode=True,
+ )
+
+ account_subsystem = module.params['account_subsystem']
+ attributes = module.params['attributes']
+ auto_mount = module.params['auto_mount']
+ device = module.params['device']
+ fs_type = module.params['fs_type']
+ permissions = module.params['permissions']
+ mount_group = module.params['mount_group']
+ filesystem = module.params['filesystem']
+ nfs_server = module.params['nfs_server']
+ rm_mount_point = module.params['rm_mount_point']
+ size = module.params['size']
+ state = module.params['state']
+ vg = module.params['vg']
+
+ result = dict(
+ changed=False,
+ msg='',
+ )
+
+ if state == 'present':
+ fs_mounted = ismount(filesystem)
+ fs_exists = _fs_exists(module, filesystem)
+
+ # Check if fs is mounted or exists.
+ if fs_mounted or fs_exists:
+ result['msg'] = "File system %s already exists." % filesystem
+ result['changed'] = False
+
+ # If parameter size was passed, resize fs.
+ if size is not None:
+ result['changed'], result['msg'] = resize_fs(module, filesystem, size)
+
+ # If fs doesn't exist, create it.
+ else:
+ # Check if fs will be a NFS device.
+ if nfs_server is not None:
+ if device is None:
+ result['msg'] = 'Parameter "device" is required when "nfs_server" is defined.'
+ module.fail_json(**result)
+ else:
+ # Create a fs from NFS export.
+ if _check_nfs_device(module, nfs_server, device):
+ result['changed'], result['msg'] = create_fs(
+ module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, account_subsystem, permissions, nfs_server, attributes)
+
+ if device is None:
+ if vg is None:
+ result['msg'] = 'Required parameter "device" and/or "vg" is missing for filesystem creation.'
+ module.fail_json(**result)
+ else:
+ # Create a fs from
+ result['changed'], result['msg'] = create_fs(
+ module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, account_subsystem, permissions, nfs_server, attributes)
+
+ if device is not None and nfs_server is None:
+ # Create a fs from a previously lv device.
+ result['changed'], result['msg'] = create_fs(
+ module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, account_subsystem, permissions, nfs_server, attributes)
+
+ elif state == 'absent':
+ if ismount(filesystem):
+ result['msg'] = "File system %s mounted." % filesystem
+
+ else:
+ fs_status = _fs_exists(module, filesystem)
+ if not fs_status:
+ result['msg'] = "File system %s does not exist." % filesystem
+ else:
+ result['changed'], result['msg'] = remove_fs(module, filesystem, rm_mount_point)
+
+ elif state == 'mounted':
+ if ismount(filesystem):
+ result['changed'] = False
+ result['msg'] = "File system %s already mounted." % filesystem
+ else:
+ result['changed'], result['msg'] = mount_fs(module, filesystem)
+
+ elif state == 'unmounted':
+ if not ismount(filesystem):
+ result['changed'] = False
+ result['msg'] = "File system %s already unmounted." % filesystem
+ else:
+ result['changed'], result['msg'] = unmount_fs(module, filesystem)
+
+ else:
+ # Unreachable codeblock
+ result['msg'] = "Unexpected state %s." % state
+ module.fail_json(**result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/aix_inittab.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/aix_inittab.py
new file mode 100644
index 00000000..c2daface
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/aix_inittab.py
@@ -0,0 +1,247 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Joris Weijters <joris.weijters@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+- Joris Weijters (@molekuul)
+module: aix_inittab
+short_description: Manages the inittab on AIX
+description:
+ - Manages the inittab on AIX.
+options:
+ name:
+ description:
+ - Name of the inittab entry.
+ type: str
+ required: yes
+ aliases: [ service ]
+ runlevel:
+ description:
+ - Runlevel of the entry.
+ type: str
+ required: yes
+ action:
+ description:
+ - Action what the init has to do with this entry.
+ type: str
+ choices:
+ - boot
+ - bootwait
+ - hold
+ - initdefault
+ - 'off'
+ - once
+ - ondemand
+ - powerfail
+ - powerwait
+ - respawn
+ - sysinit
+ - wait
+ command:
+ description:
+ - What command has to run.
+ type: str
+ required: yes
+ insertafter:
+ description:
+ - After which inittabline should the new entry inserted.
+ type: str
+ state:
+ description:
+ - Whether the entry should be present or absent in the inittab file.
+ type: str
+ choices: [ absent, present ]
+ default: present
+notes:
+ - The changes are persistent across reboots.
+ - You need root rights to read or adjust the inittab with the C(lsitab), C(chitab), C(mkitab) or C(rmitab) commands.
+ - Tested on AIX 7.1.
+requirements:
+- itertools
+'''
+
+EXAMPLES = '''
+# Add service startmyservice to the inittab, directly after service existingservice.
+- name: Add startmyservice to inittab
+ community.general.aix_inittab:
+ name: startmyservice
+ runlevel: 4
+ action: once
+ command: echo hello
+ insertafter: existingservice
+ state: present
+ become: yes
+
+# Change inittab entry startmyservice to runlevel "2" and processaction "wait".
+- name: Change startmyservice to inittab
+ community.general.aix_inittab:
+ name: startmyservice
+ runlevel: 2
+ action: wait
+ command: echo hello
+ state: present
+ become: yes
+
+- name: Remove startmyservice from inittab
+ community.general.aix_inittab:
+ name: startmyservice
+ runlevel: 2
+ action: wait
+ command: echo hello
+ state: absent
+ become: yes
+'''
+
+RETURN = '''
+name:
+ description: Name of the adjusted inittab entry
+ returned: always
+ type: str
+ sample: startmyservice
+msg:
+ description: Action done with the inittab entry
+ returned: changed
+ type: str
+ sample: changed inittab entry startmyservice
+changed:
+ description: Whether the inittab changed or not
+ returned: always
+ type: bool
+ sample: true
+'''
+
+# Import necessary libraries
+try:
+ # python 2
+ from itertools import izip
+except ImportError:
+ izip = zip
+
+from ansible.module_utils.basic import AnsibleModule
+
+# end import modules
+# start defining the functions
+
+
+def check_current_entry(module):
+ # Check if entry exists, if not return False in exists in return dict,
+ # if true return True and the entry in return dict
+ existsdict = {'exist': False}
+ lsitab = module.get_bin_path('lsitab')
+ (rc, out, err) = module.run_command([lsitab, module.params['name']])
+ if rc == 0:
+ keys = ('name', 'runlevel', 'action', 'command')
+ values = out.split(":")
+ # strip non readable characters as \n
+ values = map(lambda s: s.strip(), values)
+ existsdict = dict(izip(keys, values))
+ existsdict.update({'exist': True})
+ return existsdict
+
+
+def main():
+ # initialize
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True, aliases=['service']),
+ runlevel=dict(type='str', required=True),
+ action=dict(type='str', choices=[
+ 'boot',
+ 'bootwait',
+ 'hold',
+ 'initdefault',
+ 'off',
+ 'once',
+ 'ondemand',
+ 'powerfail',
+ 'powerwait',
+ 'respawn',
+ 'sysinit',
+ 'wait',
+ ]),
+ command=dict(type='str', required=True),
+ insertafter=dict(type='str'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ),
+ supports_check_mode=True,
+ )
+
+ result = {
+ 'name': module.params['name'],
+ 'changed': False,
+ 'msg': ""
+ }
+
+ # Find commandline strings
+ mkitab = module.get_bin_path('mkitab')
+ rmitab = module.get_bin_path('rmitab')
+ chitab = module.get_bin_path('chitab')
+ rc = 0
+
+ # check if the new entry exists
+ current_entry = check_current_entry(module)
+
+ # if action is install or change,
+ if module.params['state'] == 'present':
+
+ # create new entry string
+ new_entry = module.params['name'] + ":" + module.params['runlevel'] + \
+ ":" + module.params['action'] + ":" + module.params['command']
+
+ # If current entry exists or fields are different(if the entry does not
+ # exists, then the entry wil be created
+ if (not current_entry['exist']) or (
+ module.params['runlevel'] != current_entry['runlevel'] or
+ module.params['action'] != current_entry['action'] or
+ module.params['command'] != current_entry['command']):
+
+ # If the entry does exist then change the entry
+ if current_entry['exist']:
+ if not module.check_mode:
+ (rc, out, err) = module.run_command([chitab, new_entry])
+ if rc != 0:
+ module.fail_json(
+ msg="could not change inittab", rc=rc, err=err)
+ result['msg'] = "changed inittab entry" + " " + current_entry['name']
+ result['changed'] = True
+
+ # If the entry does not exist create the entry
+ elif not current_entry['exist']:
+ if module.params['insertafter']:
+ if not module.check_mode:
+ (rc, out, err) = module.run_command(
+ [mkitab, '-i', module.params['insertafter'], new_entry])
+ else:
+ if not module.check_mode:
+ (rc, out, err) = module.run_command(
+ [mkitab, new_entry])
+
+ if rc != 0:
+ module.fail_json(msg="could not adjust inittab", rc=rc, err=err)
+ result['msg'] = "add inittab entry" + " " + module.params['name']
+ result['changed'] = True
+
+ elif module.params['state'] == 'absent':
+ # If the action is remove and the entry exists then remove the entry
+ if current_entry['exist']:
+ if not module.check_mode:
+ (rc, out, err) = module.run_command(
+ [rmitab, module.params['name']])
+ if rc != 0:
+ module.fail_json(
+ msg="could not remove entry from inittab)", rc=rc, err=err)
+ result['msg'] = "removed inittab entry" + " " + current_entry['name']
+ result['changed'] = True
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/aix_lvg.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/aix_lvg.py
new file mode 100644
index 00000000..569711f4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/aix_lvg.py
@@ -0,0 +1,363 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Kairo Araujo <kairo@kairo.eti.br>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+- Kairo Araujo (@kairoaraujo)
+module: aix_lvg
+short_description: Manage LVM volume groups on AIX
+description:
+- This module creates, removes or resize volume groups on AIX LVM.
+options:
+ force:
+ description:
+ - Force volume group creation.
+ type: bool
+ default: no
+ pp_size:
+ description:
+ - The size of the physical partition in megabytes.
+ type: int
+ pvs:
+ description:
+ - List of comma-separated devices to use as physical devices in this volume group.
+ - Required when creating or extending (C(present) state) the volume group.
+ - If not informed reducing (C(absent) state) the volume group will be removed.
+ type: list
+ elements: str
+ state:
+ description:
+ - Control if the volume group exists and volume group AIX state varyonvg C(varyon) or varyoffvg C(varyoff).
+ type: str
+ choices: [ absent, present, varyoff, varyon ]
+ default: present
+ vg:
+ description:
+ - The name of the volume group.
+ type: str
+ required: true
+ vg_type:
+ description:
+ - The type of the volume group.
+ type: str
+ choices: [ big, normal, scalable ]
+ default: normal
+notes:
+- AIX will permit remove VG only if all LV/Filesystems are not busy.
+- Module does not modify PP size for already present volume group.
+'''
+
+EXAMPLES = r'''
+- name: Create a volume group datavg
+ community.general.aix_lvg:
+ vg: datavg
+ pp_size: 128
+ vg_type: scalable
+ state: present
+
+- name: Removing a volume group datavg
+ community.general.aix_lvg:
+ vg: datavg
+ state: absent
+
+- name: Extending rootvg
+ community.general.aix_lvg:
+ vg: rootvg
+ pvs: hdisk1
+ state: present
+
+- name: Reducing rootvg
+ community.general.aix_lvg:
+ vg: rootvg
+ pvs: hdisk1
+ state: absent
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def _validate_pv(module, vg, pvs):
+ """
+ Function to validate if the physical volume (PV) is not already in use by
+ another volume group or Oracle ASM.
+
+ :param module: Ansible module argument spec.
+ :param vg: Volume group name.
+ :param pvs: Physical volume list.
+ :return: [bool, message] or module.fail_json for errors.
+ """
+
+ lspv_cmd = module.get_bin_path('lspv', True)
+ rc, current_lspv, stderr = module.run_command("%s" % lspv_cmd)
+ if rc != 0:
+ module.fail_json(msg="Failed executing 'lspv' command.", rc=rc, stdout=current_lspv, stderr=stderr)
+
+ for pv in pvs:
+ # Get pv list.
+ lspv_list = {}
+ for line in current_lspv.splitlines():
+ pv_data = line.split()
+ lspv_list[pv_data[0]] = pv_data[2]
+
+ # Check if pv exists and is free.
+ if pv not in lspv_list.keys():
+ module.fail_json(msg="Physical volume '%s' doesn't exist." % pv)
+
+ if lspv_list[pv] == 'None':
+ # Disk None, looks free.
+ # Check if PV is not already in use by Oracle ASM.
+ lquerypv_cmd = module.get_bin_path('lquerypv', True)
+ rc, current_lquerypv, stderr = module.run_command("%s -h /dev/%s 20 10" % (lquerypv_cmd, pv))
+ if rc != 0:
+ module.fail_json(msg="Failed executing lquerypv command.", rc=rc, stdout=current_lquerypv, stderr=stderr)
+
+ if 'ORCLDISK' in current_lquerypv:
+ module.fail_json("Physical volume '%s' is already used by Oracle ASM." % pv)
+
+ msg = "Physical volume '%s' is ok to be used." % pv
+ return True, msg
+
+ # Check if PV is already in use for the same vg.
+ elif vg != lspv_list[pv]:
+ module.fail_json(msg="Physical volume '%s' is in use by another volume group '%s'." % (pv, lspv_list[pv]))
+
+ msg = "Physical volume '%s' is already used by volume group '%s'." % (pv, lspv_list[pv])
+ return False, msg
+
+
+def _validate_vg(module, vg):
+ """
+ Check the current state of volume group.
+
+ :param module: Ansible module argument spec.
+ :param vg: Volume Group name.
+ :return: True (VG in varyon state) or False (VG in varyoff state) or
+ None (VG does not exist), message.
+ """
+ lsvg_cmd = module.get_bin_path('lsvg', True)
+ rc, current_active_vgs, err = module.run_command("%s -o" % lsvg_cmd)
+ if rc != 0:
+ module.fail_json(msg="Failed executing '%s' command." % lsvg_cmd)
+
+ rc, current_all_vgs, err = module.run_command("%s" % lsvg_cmd)
+ if rc != 0:
+ module.fail_json(msg="Failed executing '%s' command." % lsvg_cmd)
+
+ if vg in current_all_vgs and vg not in current_active_vgs:
+ msg = "Volume group '%s' is in varyoff state." % vg
+ return False, msg
+
+ if vg in current_active_vgs:
+ msg = "Volume group '%s' is in varyon state." % vg
+ return True, msg
+
+ msg = "Volume group '%s' does not exist." % vg
+ return None, msg
+
+
+def create_extend_vg(module, vg, pvs, pp_size, vg_type, force, vg_validation):
+ """ Creates or extend a volume group. """
+
+ # Command option parameters.
+ force_opt = {
+ True: '-f',
+ False: ''
+ }
+
+ vg_opt = {
+ 'normal': '',
+ 'big': '-B',
+ 'scalable': '-S',
+ }
+
+ # Validate if PV are not already in use.
+ pv_state, msg = _validate_pv(module, vg, pvs)
+ if not pv_state:
+ changed = False
+ return changed, msg
+
+ vg_state, msg = vg_validation
+ if vg_state is False:
+ changed = False
+ return changed, msg
+
+ elif vg_state is True:
+ # Volume group extension.
+ changed = True
+ msg = ""
+
+ if not module.check_mode:
+ extendvg_cmd = module.get_bin_path('extendvg', True)
+ rc, output, err = module.run_command("%s %s %s" % (extendvg_cmd, vg, ' '.join(pvs)))
+ if rc != 0:
+ changed = False
+ msg = "Extending volume group '%s' has failed." % vg
+ return changed, msg
+
+ msg = "Volume group '%s' extended." % vg
+ return changed, msg
+
+ elif vg_state is None:
+ # Volume group creation.
+ changed = True
+ msg = ''
+
+ if not module.check_mode:
+ mkvg_cmd = module.get_bin_path('mkvg', True)
+ rc, output, err = module.run_command("%s %s %s %s -y %s %s" % (mkvg_cmd, vg_opt[vg_type], pp_size, force_opt[force], vg, ' '.join(pvs)))
+ if rc != 0:
+ changed = False
+ msg = "Creating volume group '%s' failed." % vg
+ return changed, msg
+
+ msg = "Volume group '%s' created." % vg
+ return changed, msg
+
+
+def reduce_vg(module, vg, pvs, vg_validation):
+ vg_state, msg = vg_validation
+
+ if vg_state is False:
+ changed = False
+ return changed, msg
+
+ elif vg_state is None:
+ changed = False
+ return changed, msg
+
+ # Define pvs_to_remove (list of physical volumes to be removed).
+ if pvs is None:
+ # Remove VG if pvs are note informed.
+ # Remark: AIX will permit remove only if the VG has not LVs.
+ lsvg_cmd = module.get_bin_path('lsvg', True)
+ rc, current_pvs, err = module.run_command("%s -p %s" % (lsvg_cmd, vg))
+ if rc != 0:
+ module.fail_json(msg="Failing to execute '%s' command." % lsvg_cmd)
+
+ pvs_to_remove = []
+ for line in current_pvs.splitlines()[2:]:
+ pvs_to_remove.append(line.split()[0])
+
+ reduce_msg = "Volume group '%s' removed." % vg
+ else:
+ pvs_to_remove = pvs
+ reduce_msg = ("Physical volume(s) '%s' removed from Volume group '%s'." % (' '.join(pvs_to_remove), vg))
+
+ # Reduce volume group.
+ if len(pvs_to_remove) <= 0:
+ changed = False
+ msg = "No physical volumes to remove."
+ return changed, msg
+
+ changed = True
+ msg = ''
+
+ if not module.check_mode:
+ reducevg_cmd = module.get_bin_path('reducevg', True)
+ rc, stdout, stderr = module.run_command("%s -df %s %s" % (reducevg_cmd, vg, ' '.join(pvs_to_remove)))
+ if rc != 0:
+ module.fail_json(msg="Unable to remove '%s'." % vg, rc=rc, stdout=stdout, stderr=stderr)
+
+ msg = reduce_msg
+ return changed, msg
+
+
+def state_vg(module, vg, state, vg_validation):
+ vg_state, msg = vg_validation
+
+ if vg_state is None:
+ module.fail_json(msg=msg)
+
+ if state == 'varyon':
+ if vg_state is True:
+ changed = False
+ return changed, msg
+
+ changed = True
+ msg = ''
+ if not module.check_mode:
+ varyonvg_cmd = module.get_bin_path('varyonvg', True)
+ rc, varyonvg_out, err = module.run_command("%s %s" % (varyonvg_cmd, vg))
+ if rc != 0:
+ module.fail_json(msg="Command 'varyonvg' failed.", rc=rc, err=err)
+
+ msg = "Varyon volume group %s completed." % vg
+ return changed, msg
+
+ elif state == 'varyoff':
+ if vg_state is False:
+ changed = False
+ return changed, msg
+
+ changed = True
+ msg = ''
+
+ if not module.check_mode:
+ varyonvg_cmd = module.get_bin_path('varyoffvg', True)
+ rc, varyonvg_out, stderr = module.run_command("%s %s" % (varyonvg_cmd, vg))
+ if rc != 0:
+ module.fail_json(msg="Command 'varyoffvg' failed.", rc=rc, stdout=varyonvg_out, stderr=stderr)
+
+ msg = "Varyoff volume group %s completed." % vg
+ return changed, msg
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ force=dict(type='bool', default=False),
+ pp_size=dict(type='int'),
+ pvs=dict(type='list', elements='str'),
+ state=dict(type='str', default='present', choices=['absent', 'present', 'varyoff', 'varyon']),
+ vg=dict(type='str', required=True),
+ vg_type=dict(type='str', default='normal', choices=['big', 'normal', 'scalable'])
+ ),
+ supports_check_mode=True,
+ )
+
+ force = module.params['force']
+ pp_size = module.params['pp_size']
+ pvs = module.params['pvs']
+ state = module.params['state']
+ vg = module.params['vg']
+ vg_type = module.params['vg_type']
+
+ if pp_size is None:
+ pp_size = ''
+ else:
+ pp_size = "-s %s" % pp_size
+
+ vg_validation = _validate_vg(module, vg)
+
+ if state == 'present':
+ if not pvs:
+ changed = False
+ msg = "pvs is required to state 'present'."
+ module.fail_json(msg=msg)
+ else:
+ changed, msg = create_extend_vg(module, vg, pvs, pp_size, vg_type, force, vg_validation)
+
+ elif state == 'absent':
+ changed, msg = reduce_vg(module, vg, pvs, vg_validation)
+
+ elif state == 'varyon' or state == 'varyoff':
+ changed, msg = state_vg(module, vg, state, vg_validation)
+
+ else:
+ changed = False
+ msg = "Unexpected state"
+
+ module.exit_json(changed=changed, msg=msg, state=state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/aix_lvol.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/aix_lvol.py
new file mode 100644
index 00000000..02b4f06c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/aix_lvol.py
@@ -0,0 +1,337 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Alain Dejoux <adejoux@djouxtech.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+author:
+ - Alain Dejoux (@adejoux)
+module: aix_lvol
+short_description: Configure AIX LVM logical volumes
+description:
+ - This module creates, removes or resizes AIX logical volumes. Inspired by lvol module.
+options:
+ vg:
+ description:
+ - The volume group this logical volume is part of.
+ type: str
+ required: true
+ lv:
+ description:
+ - The name of the logical volume.
+ type: str
+ required: true
+ lv_type:
+ description:
+ - The type of the logical volume.
+ type: str
+ default: jfs2
+ size:
+ description:
+ - The size of the logical volume with one of the [MGT] units.
+ type: str
+ copies:
+ description:
+ - The number of copies of the logical volume.
+ - Maximum copies are 3.
+ type: int
+ default: 1
+ policy:
+ description:
+ - Sets the interphysical volume allocation policy.
+ - C(maximum) allocates logical partitions across the maximum number of physical volumes.
+ - C(minimum) allocates logical partitions across the minimum number of physical volumes.
+ type: str
+ choices: [ maximum, minimum ]
+ default: maximum
+ state:
+ description:
+ - Control if the logical volume exists. If C(present) and the
+ volume does not already exist then the C(size) option is required.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ opts:
+ description:
+ - Free-form options to be passed to the mklv command.
+ type: str
+ pvs:
+ description:
+ - A list of physical volumes e.g. C(hdisk1,hdisk2).
+ type: list
+ elements: str
+'''
+
+EXAMPLES = r'''
+- name: Create a logical volume of 512M
+ community.general.aix_lvol:
+ vg: testvg
+ lv: testlv
+ size: 512M
+
+- name: Create a logical volume of 512M with disks hdisk1 and hdisk2
+ community.general.aix_lvol:
+ vg: testvg
+ lv: test2lv
+ size: 512M
+ pvs: [ hdisk1, hdisk2 ]
+
+- name: Create a logical volume of 512M mirrored
+ community.general.aix_lvol:
+ vg: testvg
+ lv: test3lv
+ size: 512M
+ copies: 2
+
+- name: Create a logical volume of 1G with a minimum placement policy
+ community.general.aix_lvol:
+ vg: rootvg
+ lv: test4lv
+ size: 1G
+ policy: minimum
+
+- name: Create a logical volume with special options like mirror pool
+ community.general.aix_lvol:
+ vg: testvg
+ lv: testlv
+ size: 512M
+ opts: -p copy1=poolA -p copy2=poolB
+
+- name: Extend the logical volume to 1200M
+ community.general.aix_lvol:
+ vg: testvg
+ lv: test4lv
+ size: 1200M
+
+- name: Remove the logical volume
+ community.general.aix_lvol:
+ vg: testvg
+ lv: testlv
+ state: absent
+'''
+
+RETURN = r'''
+msg:
+ type: str
+ description: A friendly message describing the task result.
+ returned: always
+ sample: Logical volume testlv created.
+'''
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def convert_size(module, size):
+ unit = size[-1].upper()
+ units = ['M', 'G', 'T']
+ try:
+ multiplier = 1024 ** units.index(unit)
+ except ValueError:
+ module.fail_json(msg="No valid size unit specified.")
+
+ return int(size[:-1]) * multiplier
+
+
+def round_ppsize(x, base=16):
+ new_size = int(base * round(float(x) / base))
+ if new_size < x:
+ new_size += base
+ return new_size
+
+
+def parse_lv(data):
+ name = None
+
+ for line in data.splitlines():
+ match = re.search(r"LOGICAL VOLUME:\s+(\w+)\s+VOLUME GROUP:\s+(\w+)", line)
+ if match is not None:
+ name = match.group(1)
+ vg = match.group(2)
+ continue
+ match = re.search(r"LPs:\s+(\d+).*PPs", line)
+ if match is not None:
+ lps = int(match.group(1))
+ continue
+ match = re.search(r"PP SIZE:\s+(\d+)", line)
+ if match is not None:
+ pp_size = int(match.group(1))
+ continue
+ match = re.search(r"INTER-POLICY:\s+(\w+)", line)
+ if match is not None:
+ policy = match.group(1)
+ continue
+
+ if not name:
+ return None
+
+ size = lps * pp_size
+
+ return {'name': name, 'vg': vg, 'size': size, 'policy': policy}
+
+
+def parse_vg(data):
+
+ for line in data.splitlines():
+
+ match = re.search(r"VOLUME GROUP:\s+(\w+)", line)
+ if match is not None:
+ name = match.group(1)
+ continue
+
+ match = re.search(r"TOTAL PP.*\((\d+)", line)
+ if match is not None:
+ size = int(match.group(1))
+ continue
+
+ match = re.search(r"PP SIZE:\s+(\d+)", line)
+ if match is not None:
+ pp_size = int(match.group(1))
+ continue
+
+ match = re.search(r"FREE PP.*\((\d+)", line)
+ if match is not None:
+ free = int(match.group(1))
+ continue
+
+ return {'name': name, 'size': size, 'free': free, 'pp_size': pp_size}
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ vg=dict(type='str', required=True),
+ lv=dict(type='str', required=True),
+ lv_type=dict(type='str', default='jfs2'),
+ size=dict(type='str'),
+ opts=dict(type='str', default=''),
+ copies=dict(type='int', default=1),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ policy=dict(type='str', default='maximum', choices=['maximum', 'minimum']),
+ pvs=dict(type='list', elements='str', default=list())
+ ),
+ supports_check_mode=True,
+ )
+
+ vg = module.params['vg']
+ lv = module.params['lv']
+ lv_type = module.params['lv_type']
+ size = module.params['size']
+ opts = module.params['opts']
+ copies = module.params['copies']
+ policy = module.params['policy']
+ state = module.params['state']
+ pvs = module.params['pvs']
+
+ pv_list = ' '.join(pvs)
+
+ if policy == 'maximum':
+ lv_policy = 'x'
+ else:
+ lv_policy = 'm'
+
+ # Add echo command when running in check-mode
+ if module.check_mode:
+ test_opt = 'echo '
+ else:
+ test_opt = ''
+
+ # check if system commands are available
+ lsvg_cmd = module.get_bin_path("lsvg", required=True)
+ lslv_cmd = module.get_bin_path("lslv", required=True)
+
+ # Get information on volume group requested
+ rc, vg_info, err = module.run_command("%s %s" % (lsvg_cmd, vg))
+
+ if rc != 0:
+ if state == 'absent':
+ module.exit_json(changed=False, msg="Volume group %s does not exist." % vg)
+ else:
+ module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, out=vg_info, err=err)
+
+ this_vg = parse_vg(vg_info)
+
+ if size is not None:
+ # Calculate pp size and round it up based on pp size.
+ lv_size = round_ppsize(convert_size(module, size), base=this_vg['pp_size'])
+
+ # Get information on logical volume requested
+ rc, lv_info, err = module.run_command(
+ "%s %s" % (lslv_cmd, lv))
+
+ if rc != 0:
+ if state == 'absent':
+ module.exit_json(changed=False, msg="Logical Volume %s does not exist." % lv)
+
+ changed = False
+
+ this_lv = parse_lv(lv_info)
+
+ if state == 'present' and not size:
+ if this_lv is None:
+ module.fail_json(msg="No size given.")
+
+ if this_lv is None:
+ if state == 'present':
+ if lv_size > this_vg['free']:
+ module.fail_json(msg="Not enough free space in volume group %s: %s MB free." % (this_vg['name'], this_vg['free']))
+
+ # create LV
+ mklv_cmd = module.get_bin_path("mklv", required=True)
+
+ cmd = "%s %s -t %s -y %s -c %s -e %s %s %s %sM %s" % (test_opt, mklv_cmd, lv_type, lv, copies, lv_policy, opts, vg, lv_size, pv_list)
+ rc, out, err = module.run_command(cmd)
+ if rc == 0:
+ module.exit_json(changed=True, msg="Logical volume %s created." % lv)
+ else:
+ module.fail_json(msg="Creating logical volume %s failed." % lv, rc=rc, out=out, err=err)
+ else:
+ if state == 'absent':
+ # remove LV
+ rmlv_cmd = module.get_bin_path("rmlv", required=True)
+ rc, out, err = module.run_command("%s %s -f %s" % (test_opt, rmlv_cmd, this_lv['name']))
+ if rc == 0:
+ module.exit_json(changed=True, msg="Logical volume %s deleted." % lv)
+ else:
+ module.fail_json(msg="Failed to remove logical volume %s." % lv, rc=rc, out=out, err=err)
+ else:
+ if this_lv['policy'] != policy:
+ # change lv allocation policy
+ chlv_cmd = module.get_bin_path("chlv", required=True)
+ rc, out, err = module.run_command("%s %s -e %s %s" % (test_opt, chlv_cmd, lv_policy, this_lv['name']))
+ if rc == 0:
+ module.exit_json(changed=True, msg="Logical volume %s policy changed: %s." % (lv, policy))
+ else:
+ module.fail_json(msg="Failed to change logical volume %s policy." % lv, rc=rc, out=out, err=err)
+
+ if vg != this_lv['vg']:
+ module.fail_json(msg="Logical volume %s already exist in volume group %s" % (lv, this_lv['vg']))
+
+ # from here the last remaining action is to resize it, if no size parameter is passed we do nothing.
+ if not size:
+ module.exit_json(changed=False, msg="Logical volume %s already exist." % (lv))
+
+ # resize LV based on absolute values
+ if int(lv_size) > this_lv['size']:
+ extendlv_cmd = module.get_bin_path("extendlv", required=True)
+ cmd = "%s %s %s %sM" % (test_opt, extendlv_cmd, lv, lv_size - this_lv['size'])
+ rc, out, err = module.run_command(cmd)
+ if rc == 0:
+ module.exit_json(changed=True, msg="Logical volume %s size extended to %sMB." % (lv, lv_size))
+ else:
+ module.fail_json(msg="Unable to resize %s to %sMB." % (lv, lv_size), rc=rc, out=out, err=err)
+ elif lv_size < this_lv['size']:
+ module.fail_json(msg="No shrinking of Logical Volume %s permitted. Current size: %s MB" % (lv, this_lv['size']))
+ else:
+ module.exit_json(changed=False, msg="Logical volume %s size is already %sMB." % (lv, lv_size))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ali_instance.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ali_instance.py
new file mode 100644
index 00000000..09754ccd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ali_instance.py
@@ -0,0 +1,1013 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin <heguimin36@163.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see http://www.gnu.org/licenses/.
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ali_instance
+short_description: Create, Start, Stop, Restart or Terminate an Instance in ECS. Add or Remove Instance to/from a Security Group.
+description:
+ - Create, start, stop, restart, modify or terminate ecs instances.
+ - Add or remove ecs instances to/from security group.
+options:
+ state:
+ description:
+ - The state of the instance after operating.
+ default: 'present'
+ choices: ['present', 'running', 'stopped', 'restarted', 'absent']
+ type: str
+ availability_zone:
+ description:
+ - Aliyun availability zone ID in which to launch the instance.
+ If it is not specified, it will be allocated by system automatically.
+ aliases: ['alicloud_zone', 'zone_id']
+ type: str
+ image_id:
+ description:
+ - Image ID used to launch instances. Required when C(state=present) and creating new ECS instances.
+ aliases: ['image']
+ type: str
+ instance_type:
+ description:
+ - Instance type used to launch instances. Required when C(state=present) and creating new ECS instances.
+ aliases: ['type']
+ type: str
+ security_groups:
+ description:
+ - A list of security group IDs.
+ aliases: ['group_ids']
+ type: list
+ elements: str
+ vswitch_id:
+ description:
+ - The subnet ID in which to launch the instances (VPC).
+ aliases: ['subnet_id']
+ type: str
+ instance_name:
+ description:
+ - The name of ECS instance, which is a string of 2 to 128 Chinese or English characters. It must begin with an
+ uppercase/lowercase letter or a Chinese character and can contain numerals, ".", "_" or "-".
+ It cannot begin with http:// or https://.
+ aliases: ['name']
+ type: str
+ description:
+ description:
+ - The description of ECS instance, which is a string of 2 to 256 characters. It cannot begin with http:// or https://.
+ type: str
+ internet_charge_type:
+ description:
+ - Internet charge type of ECS instance.
+ default: 'PayByBandwidth'
+ choices: ['PayByBandwidth', 'PayByTraffic']
+ type: str
+ max_bandwidth_in:
+ description:
+ - Maximum incoming bandwidth from the public network, measured in Mbps (Megabits per second).
+ default: 200
+ type: int
+ max_bandwidth_out:
+ description:
+ - Maximum outgoing bandwidth to the public network, measured in Mbps (Megabits per second).
+ Required when C(allocate_public_ip=True). Ignored when C(allocate_public_ip=False).
+ default: 0
+ type: int
+ host_name:
+ description:
+ - Instance host name. Ordered hostname is not supported.
+ type: str
+ unique_suffix:
+ description:
+ - Specifies whether to add sequential suffixes to the host_name.
+ The sequential suffix ranges from 001 to 999.
+ default: False
+ type: bool
+ version_added: '0.2.0'
+ password:
+ description:
+ - The password to login instance. After rebooting instances, modified password will take effect.
+ type: str
+ system_disk_category:
+ description:
+ - Category of the system disk.
+ default: 'cloud_efficiency'
+ choices: ['cloud_efficiency', 'cloud_ssd']
+ type: str
+ system_disk_size:
+ description:
+ - Size of the system disk, in GB. The valid values are 40~500.
+ default: 40
+ type: int
+ system_disk_name:
+ description:
+ - Name of the system disk.
+ type: str
+ system_disk_description:
+ description:
+ - Description of the system disk.
+ type: str
+ count:
+ description:
+ - The number of the new instance. An integer value which indicates how many instances that match I(count_tag)
+ should be running. Instances are either created or terminated based on this value.
+ default: 1
+ type: int
+ count_tag:
+ description:
+ - I(count) determines how many instances based on a specific tag criteria should be present.
+ This can be expressed in multiple ways and is shown in the EXAMPLES section.
+ The specified count_tag must already exist or be passed in as the I(tags) option.
+ If it is not specified, it will be replaced by I(instance_name).
+ type: str
+ allocate_public_ip:
+ description:
+ - Whether allocate a public ip for the new instance.
+ default: False
+ aliases: [ 'assign_public_ip' ]
+ type: bool
+ instance_charge_type:
+ description:
+ - The charge type of the instance.
+ choices: ['PrePaid', 'PostPaid']
+ default: 'PostPaid'
+ type: str
+ period:
+ description:
+ - The charge duration of the instance, in month. Required when C(instance_charge_type=PrePaid).
+ - The valid value are [1-9, 12, 24, 36].
+ default: 1
+ type: int
+ auto_renew:
+ description:
+ - Whether automate renew the charge of the instance.
+ type: bool
+ default: False
+ auto_renew_period:
+ description:
+ - The duration of the automatic renew the charge of the instance. Required when C(auto_renew=True).
+ choices: [1, 2, 3, 6, 12]
+ type: int
+ instance_ids:
+ description:
+ - A list of instance ids. It is required when need to operate existing instances.
+ If it is specified, I(count) will lose efficacy.
+ type: list
+ elements: str
+ force:
+ description:
+ - Whether the current operation needs to be execute forcibly.
+ default: False
+ type: bool
+ tags:
+ description:
+ - A hash/dictionaries of instance tags, to add to the new instance or for starting/stopping instance by tag. C({"key":"value"})
+ aliases: ["instance_tags"]
+ type: dict
+ version_added: '0.2.0'
+ purge_tags:
+ description:
+ - Delete any tags not specified in the task that are on the instance.
+ If True, it means you have to specify all the desired tags on each task affecting an instance.
+ default: False
+ type: bool
+ version_added: '0.2.0'
+ key_name:
+ description:
+ - The name of key pair which is used to access ECS instance in SSH.
+ required: false
+ type: str
+ aliases: ['keypair']
+ user_data:
+ description:
+ - User-defined data to customize the startup behaviors of an ECS instance and to pass data into an ECS instance.
+ It only will take effect when launching the new ECS instances.
+ required: false
+ type: str
+ ram_role_name:
+ description:
+ - The name of the instance RAM role.
+ type: str
+ version_added: '0.2.0'
+ spot_price_limit:
+ description:
+ - The maximum hourly price for the preemptible instance. This parameter supports a maximum of three decimal
+ places and takes effect when the SpotStrategy parameter is set to SpotWithPriceLimit.
+ type: float
+ version_added: '0.2.0'
+ spot_strategy:
+ description:
+ - The bidding mode of the pay-as-you-go instance. This parameter is valid when InstanceChargeType is set to PostPaid.
+ choices: ['NoSpot', 'SpotWithPriceLimit', 'SpotAsPriceGo']
+ default: 'NoSpot'
+ type: str
+ version_added: '0.2.0'
+ period_unit:
+ description:
+ - The duration unit that you will buy the resource. It is valid when C(instance_charge_type=PrePaid)
+ choices: ['Month', 'Week']
+ default: 'Month'
+ type: str
+ version_added: '0.2.0'
+ dry_run:
+ description:
+ - Specifies whether to send a dry-run request.
+ - If I(dry_run=True), Only a dry-run request is sent and no instance is created. The system checks whether the
+ required parameters are set, and validates the request format, service permissions, and available ECS instances.
+ If the validation fails, the corresponding error code is returned. If the validation succeeds, the DryRunOperation error code is returned.
+ - If I(dry_run=False), A request is sent. If the validation succeeds, the instance is created.
+ default: False
+ type: bool
+ version_added: '0.2.0'
+ include_data_disks:
+ description:
+ - Whether to change instance disks charge type when changing instance charge type.
+ default: True
+ type: bool
+ version_added: '0.2.0'
+author:
+ - "He Guimin (@xiaozhu36)"
+requirements:
+ - "python >= 3.6"
+ - "footmark >= 1.19.0"
+extends_documentation_fragment:
+ - community.general.alicloud
+'''
+
+EXAMPLES = '''
+# basic provisioning example vpc network
+- name: Basic provisioning example
+ hosts: localhost
+ vars:
+ alicloud_access_key: <your-alicloud-access-key-id>
+ alicloud_secret_key: <your-alicloud-access-secret-key>
+ alicloud_region: cn-beijing
+ image: ubuntu1404_64_40G_cloudinit_20160727.raw
+ instance_type: ecs.n4.small
+ vswitch_id: vsw-abcd1234
+ assign_public_ip: True
+ max_bandwidth_out: 10
+ host_name: myhost
+ password: mypassword
+ system_disk_category: cloud_efficiency
+ system_disk_size: 100
+ internet_charge_type: PayByBandwidth
+ security_groups: ["sg-f2rwnfh23r"]
+
+ instance_ids: ["i-abcd12346", "i-abcd12345"]
+ force: True
+
+ tasks:
+ - name: Launch ECS instance in VPC network
+ community.general.ali_instance:
+ alicloud_access_key: '{{ alicloud_access_key }}'
+ alicloud_secret_key: '{{ alicloud_secret_key }}'
+ alicloud_region: '{{ alicloud_region }}'
+ image: '{{ image }}'
+ system_disk_category: '{{ system_disk_category }}'
+ system_disk_size: '{{ system_disk_size }}'
+ instance_type: '{{ instance_type }}'
+ vswitch_id: '{{ vswitch_id }}'
+ assign_public_ip: '{{ assign_public_ip }}'
+ internet_charge_type: '{{ internet_charge_type }}'
+ max_bandwidth_out: '{{ max_bandwidth_out }}'
+ tags:
+ Name: created_one
+ host_name: '{{ host_name }}'
+ password: '{{ password }}'
+
+ - name: With count and count_tag to create a number of instances
+ community.general.ali_instance:
+ alicloud_access_key: '{{ alicloud_access_key }}'
+ alicloud_secret_key: '{{ alicloud_secret_key }}'
+ alicloud_region: '{{ alicloud_region }}'
+ image: '{{ image }}'
+ system_disk_category: '{{ system_disk_category }}'
+ system_disk_size: '{{ system_disk_size }}'
+ instance_type: '{{ instance_type }}'
+ assign_public_ip: '{{ assign_public_ip }}'
+ security_groups: '{{ security_groups }}'
+ internet_charge_type: '{{ internet_charge_type }}'
+ max_bandwidth_out: '{{ max_bandwidth_out }}'
+ tags:
+ Name: created_one
+ Version: 0.1
+ count: 2
+ count_tag:
+ Name: created_one
+ host_name: '{{ host_name }}'
+ password: '{{ password }}'
+
+ - name: Start instance
+ community.general.ali_instance:
+ alicloud_access_key: '{{ alicloud_access_key }}'
+ alicloud_secret_key: '{{ alicloud_secret_key }}'
+ alicloud_region: '{{ alicloud_region }}'
+ instance_ids: '{{ instance_ids }}'
+ state: 'running'
+
+ - name: Reboot instance forcibly
+ ecs:
+ alicloud_access_key: '{{ alicloud_access_key }}'
+ alicloud_secret_key: '{{ alicloud_secret_key }}'
+ alicloud_region: '{{ alicloud_region }}'
+ instance_ids: '{{ instance_ids }}'
+ state: 'restarted'
+ force: '{{ force }}'
+
+ - name: Add instances to an security group
+ ecs:
+ alicloud_access_key: '{{ alicloud_access_key }}'
+ alicloud_secret_key: '{{ alicloud_secret_key }}'
+ alicloud_region: '{{ alicloud_region }}'
+ instance_ids: '{{ instance_ids }}'
+ security_groups: '{{ security_groups }}'
+'''
+
+RETURN = '''
+instances:
+ description: List of ECS instances
+ returned: always
+ type: complex
+ contains:
+ availability_zone:
+ description: The availability zone of the instance is in.
+ returned: always
+ type: str
+ sample: cn-beijing-a
+ block_device_mappings:
+ description: Any block device mapping entries for the instance.
+ returned: always
+ type: complex
+ contains:
+ device_name:
+ description: The device name exposed to the instance (for example, /dev/xvda).
+ returned: always
+ type: str
+ sample: /dev/xvda
+ attach_time:
+ description: The time stamp when the attachment initiated.
+ returned: always
+ type: str
+ sample: "2018-06-25T04:08:26Z"
+ delete_on_termination:
+ description: Indicates whether the volume is deleted on instance termination.
+ returned: always
+ type: bool
+ sample: true
+ status:
+ description: The attachment state.
+ returned: always
+ type: str
+ sample: in_use
+ volume_id:
+ description: The ID of the cloud disk.
+ returned: always
+ type: str
+ sample: d-2zei53pjsi117y6gf9t6
+ cpu:
+ description: The CPU core count of the instance.
+ returned: always
+ type: int
+ sample: 4
+ creation_time:
+ description: The time the instance was created.
+ returned: always
+ type: str
+ sample: "2018-06-25T04:08Z"
+ description:
+ description: The instance description.
+ returned: always
+ type: str
+ sample: "my ansible instance"
+ eip:
+ description: The attribution of EIP associated with the instance.
+ returned: always
+ type: complex
+ contains:
+ allocation_id:
+ description: The ID of the EIP.
+ returned: always
+ type: str
+ sample: eip-12345
+ internet_charge_type:
+ description: The internet charge type of the EIP.
+ returned: always
+ type: str
+ sample: "paybybandwidth"
+ ip_address:
+ description: EIP address.
+ returned: always
+ type: str
+ sample: 42.10.2.2
+ expired_time:
+ description: The time the instance will expire.
+ returned: always
+ type: str
+ sample: "2099-12-31T15:59Z"
+ gpu:
+ description: The attribution of instance GPU.
+ returned: always
+ type: complex
+ contains:
+ amount:
+ description: The count of the GPU.
+ returned: always
+ type: int
+ sample: 0
+ spec:
+ description: The specification of the GPU.
+ returned: always
+ type: str
+ sample: ""
+ host_name:
+ description: The host name of the instance.
+ returned: always
+ type: str
+ sample: iZ2zewaoZ
+ id:
+ description: Alias of instance_id.
+ returned: always
+ type: str
+ sample: i-abc12345
+ instance_id:
+ description: ECS instance resource ID.
+ returned: always
+ type: str
+ sample: i-abc12345
+ image_id:
+ description: The ID of the image used to launch the instance.
+ returned: always
+ type: str
+ sample: m-0011223344
+ inner_ip_address:
+ description: The inner IPv4 address of the classic instance.
+ returned: always
+ type: str
+ sample: 10.0.0.2
+ instance_charge_type:
+ description: The instance charge type.
+ returned: always
+ type: str
+ sample: PostPaid
+ instance_name:
+ description: The name of the instance.
+ returned: always
+ type: str
+ sample: my-ecs
+ instance_type:
+ description: The instance type of the running instance.
+ returned: always
+ type: str
+ sample: ecs.sn1ne.xlarge
+ instance_type_family:
+ description: The instance type family of the instance belongs.
+ returned: always
+ type: str
+ sample: ecs.sn1ne
+ internet_charge_type:
+ description: The billing method of the network bandwidth.
+ returned: always
+ type: str
+ sample: PayByBandwidth
+ internet_max_bandwidth_in:
+ description: Maximum incoming bandwidth from the internet network.
+ returned: always
+ type: int
+ sample: 200
+ internet_max_bandwidth_out:
+ description: Maximum incoming bandwidth from the internet network.
+ returned: always
+ type: int
+ sample: 20
+ io_optimized:
+ description: Indicates whether the instance is optimized for EBS I/O.
+ returned: always
+ type: bool
+ sample: false
+ memory:
+ description: Memory size of the instance.
+ returned: always
+ type: int
+ sample: 8192
+ network_interfaces:
+ description: One or more network interfaces for the instance.
+ returned: always
+ type: complex
+ contains:
+ mac_address:
+ description: The MAC address.
+ returned: always
+ type: str
+ sample: "00:11:22:33:44:55"
+ network_interface_id:
+ description: The ID of the network interface.
+ returned: always
+ type: str
+ sample: eni-01234567
+ primary_ip_address:
+ description: The primary IPv4 address of the network interface within the vswitch.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ osname:
+ description: The operation system name of the instance owned.
+ returned: always
+ type: str
+ sample: CentOS
+ ostype:
+ description: The operation system type of the instance owned.
+ returned: always
+ type: str
+ sample: linux
+ private_ip_address:
+ description: The IPv4 address of the network interface within the subnet.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ public_ip_address:
+ description: The public IPv4 address assigned to the instance or eip address
+ returned: always
+ type: str
+ sample: 43.0.0.1
+ resource_group_id:
+ description: The id of the resource group to which the instance belongs.
+ returned: always
+ type: str
+ sample: my-ecs-group
+ security_groups:
+ description: One or more security groups for the instance.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ group_id:
+ description: The ID of the security group.
+ returned: always
+ type: str
+ sample: sg-0123456
+ group_name:
+ description: The name of the security group.
+ returned: always
+ type: str
+ sample: my-security-group
+ status:
+ description: The current status of the instance.
+ returned: always
+ type: str
+ sample: running
+ tags:
+ description: Any tags assigned to the instance.
+ returned: always
+ type: dict
+ sample:
+ user_data:
+ description: User-defined data.
+ returned: always
+ type: dict
+ sample:
+ vswitch_id:
+ description: The ID of the vswitch in which the instance is running.
+ returned: always
+ type: str
+ sample: vsw-dew00abcdef
+ vpc_id:
+ description: The ID of the VPC the instance is in.
+ returned: always
+ type: str
+ sample: vpc-0011223344
+ spot_price_limit:
+ description:
+ - The maximum hourly price for the preemptible instance.
+ returned: always
+ type: float
+ sample: 0.97
+ spot_strategy:
+ description:
+ - The bidding mode of the pay-as-you-go instance.
+ returned: always
+ type: str
+ sample: NoSpot
+ids:
+ description: List of ECS instance IDs
+ returned: always
+ type: list
+ sample: [i-12345er, i-3245fs]
+'''
+
+import re
+import time
+import traceback
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils.alicloud_ecs import ecs_argument_spec, ecs_connect
+
+HAS_FOOTMARK = False
+FOOTMARK_IMP_ERR = None
+try:
+ from footmark.exception import ECSResponseError
+ HAS_FOOTMARK = True
+except ImportError:
+ FOOTMARK_IMP_ERR = traceback.format_exc()
+ HAS_FOOTMARK = False
+
+
+def get_instances_info(connection, ids):
+ result = []
+ instances = connection.describe_instances(instance_ids=ids)
+ if len(instances) > 0:
+ for inst in instances:
+ volumes = connection.describe_disks(instance_id=inst.id)
+ setattr(inst, 'block_device_mappings', volumes)
+ setattr(inst, 'user_data', inst.describe_user_data())
+ result.append(inst.read())
+ return result
+
+
+def run_instance(module, ecs, exact_count):
+ if exact_count <= 0:
+ return None
+ zone_id = module.params['availability_zone']
+ image_id = module.params['image_id']
+ instance_type = module.params['instance_type']
+ security_groups = module.params['security_groups']
+ vswitch_id = module.params['vswitch_id']
+ instance_name = module.params['instance_name']
+ description = module.params['description']
+ internet_charge_type = module.params['internet_charge_type']
+ max_bandwidth_out = module.params['max_bandwidth_out']
+ max_bandwidth_in = module.params['max_bandwidth_in']
+ host_name = module.params['host_name']
+ password = module.params['password']
+ system_disk_category = module.params['system_disk_category']
+ system_disk_size = module.params['system_disk_size']
+ system_disk_name = module.params['system_disk_name']
+ system_disk_description = module.params['system_disk_description']
+ allocate_public_ip = module.params['allocate_public_ip']
+ period = module.params['period']
+ auto_renew = module.params['auto_renew']
+ instance_charge_type = module.params['instance_charge_type']
+ auto_renew_period = module.params['auto_renew_period']
+ user_data = module.params['user_data']
+ key_name = module.params['key_name']
+ ram_role_name = module.params['ram_role_name']
+ spot_price_limit = module.params['spot_price_limit']
+ spot_strategy = module.params['spot_strategy']
+ unique_suffix = module.params['unique_suffix']
+ # check whether the required parameter passed or not
+ if not image_id:
+ module.fail_json(msg='image_id is required for new instance')
+ if not instance_type:
+ module.fail_json(msg='instance_type is required for new instance')
+ if not isinstance(security_groups, list):
+ module.fail_json(msg='The parameter security_groups should be a list, aborting')
+ if len(security_groups) <= 0:
+ module.fail_json(msg='Expected the parameter security_groups is non-empty when create new ECS instances, aborting')
+
+ client_token = "Ansible-Alicloud-{0}-{1}".format(hash(str(module.params)), str(time.time()))
+
+ try:
+ # call to create_instance method from footmark
+ instances = ecs.run_instances(image_id=image_id, instance_type=instance_type, security_group_id=security_groups[0],
+ zone_id=zone_id, instance_name=instance_name, description=description,
+ internet_charge_type=internet_charge_type, internet_max_bandwidth_out=max_bandwidth_out,
+ internet_max_bandwidth_in=max_bandwidth_in, host_name=host_name, password=password,
+ io_optimized='optimized', system_disk_category=system_disk_category,
+ system_disk_size=system_disk_size, system_disk_disk_name=system_disk_name,
+ system_disk_description=system_disk_description, vswitch_id=vswitch_id,
+ amount=exact_count, instance_charge_type=instance_charge_type, period=period, period_unit="Month",
+ auto_renew=auto_renew, auto_renew_period=auto_renew_period, key_pair_name=key_name,
+ user_data=user_data, client_token=client_token, ram_role_name=ram_role_name,
+ spot_price_limit=spot_price_limit, spot_strategy=spot_strategy, unique_suffix=unique_suffix)
+
+ except Exception as e:
+ module.fail_json(msg='Unable to create instance, error: {0}'.format(e))
+
+ return instances
+
+
+def modify_instance(module, instance):
+ # According to state to modify instance's some special attribute
+ state = module.params["state"]
+ name = module.params['instance_name']
+ unique_suffix = module.params['unique_suffix']
+ if not name:
+ name = instance.name
+
+ description = module.params['description']
+ if not description:
+ description = instance.description
+
+ host_name = module.params['host_name']
+ if unique_suffix and host_name:
+ suffix = instance.host_name[-3:]
+ host_name = host_name + suffix
+
+ if not host_name:
+ host_name = instance.host_name
+
+ # password can be modified only when restart instance
+ password = ""
+ if state == "restarted":
+ password = module.params['password']
+
+ # userdata can be modified only when instance is stopped
+ setattr(instance, "user_data", instance.describe_user_data())
+ user_data = instance.user_data
+ if state == "stopped":
+ user_data = module.params['user_data'].encode()
+
+ try:
+ return instance.modify(name=name, description=description, host_name=host_name, password=password, user_data=user_data)
+ except Exception as e:
+ module.fail_json(msg="Modify instance {0} attribute got an error: {1}".format(instance.id, e))
+
+
+def wait_for_instance_modify_charge(ecs, instance_ids, charge_type, delay=10, timeout=300):
+ """
+ To verify instance charge type has become expected after modify instance charge type
+ """
+ try:
+ while True:
+ instances = ecs.describe_instances(instance_ids=instance_ids)
+ flag = True
+ for inst in instances:
+ if inst and inst.instance_charge_type != charge_type:
+ flag = False
+ if flag:
+ return
+ timeout -= delay
+ time.sleep(delay)
+ if timeout <= 0:
+ raise Exception("Timeout Error: Waiting for instance to {0}. ".format(charge_type))
+ except Exception as e:
+ raise e
+
+
+def main():
+ argument_spec = ecs_argument_spec()
+ argument_spec.update(dict(
+ security_groups=dict(type='list', elements='str', aliases=['group_ids']),
+ availability_zone=dict(type='str', aliases=['alicloud_zone', 'zone_id']),
+ instance_type=dict(type='str', aliases=['type']),
+ image_id=dict(type='str', aliases=['image']),
+ count=dict(type='int', default=1),
+ count_tag=dict(type='str'),
+ vswitch_id=dict(type='str', aliases=['subnet_id']),
+ instance_name=dict(type='str', aliases=['name']),
+ host_name=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ internet_charge_type=dict(type='str', default='PayByBandwidth', choices=['PayByBandwidth', 'PayByTraffic']),
+ max_bandwidth_in=dict(type='int', default=200),
+ max_bandwidth_out=dict(type='int', default=0),
+ system_disk_category=dict(type='str', default='cloud_efficiency', choices=['cloud_efficiency', 'cloud_ssd']),
+ system_disk_size=dict(type='int', default=40),
+ system_disk_name=dict(type='str'),
+ system_disk_description=dict(type='str'),
+ force=dict(type='bool', default=False),
+ tags=dict(type='dict', aliases=['instance_tags']),
+ purge_tags=dict(type='bool', default=False),
+ state=dict(default='present', choices=['present', 'running', 'stopped', 'restarted', 'absent']),
+ description=dict(type='str'),
+ allocate_public_ip=dict(type='bool', aliases=['assign_public_ip'], default=False),
+ instance_charge_type=dict(type='str', default='PostPaid', choices=['PrePaid', 'PostPaid']),
+ period=dict(type='int', default=1),
+ auto_renew=dict(type='bool', default=False),
+ instance_ids=dict(type='list', elements='str'),
+ auto_renew_period=dict(type='int', choices=[1, 2, 3, 6, 12]),
+ key_name=dict(type='str', aliases=['keypair']),
+ user_data=dict(type='str'),
+ ram_role_name=dict(type='str'),
+ spot_price_limit=dict(type='float'),
+ spot_strategy=dict(type='str', default='NoSpot', choices=['NoSpot', 'SpotWithPriceLimit', 'SpotAsPriceGo']),
+ unique_suffix=dict(type='bool', default=False),
+ period_unit=dict(type='str', default='Month', choices=['Month', 'Week']),
+ dry_run=dict(type='bool', default=False),
+ include_data_disks=dict(type='bool', default=True)
+ )
+ )
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if HAS_FOOTMARK is False:
+ module.fail_json(msg=missing_required_lib('footmark'), exception=FOOTMARK_IMP_ERR)
+
+ ecs = ecs_connect(module)
+ host_name = module.params['host_name']
+ state = module.params['state']
+ instance_ids = module.params['instance_ids']
+ count_tag = module.params['count_tag']
+ count = module.params['count']
+ instance_name = module.params['instance_name']
+ force = module.params['force']
+ zone_id = module.params['availability_zone']
+ key_name = module.params['key_name']
+ tags = module.params['tags']
+ max_bandwidth_out = module.params['max_bandwidth_out']
+ instance_charge_type = module.params['instance_charge_type']
+ if instance_charge_type == "PrePaid":
+ module.params['spot_strategy'] = ''
+ changed = False
+
+ instances = []
+ if instance_ids:
+ if not isinstance(instance_ids, list):
+ module.fail_json(msg='The parameter instance_ids should be a list, aborting')
+ instances = ecs.describe_instances(zone_id=zone_id, instance_ids=instance_ids)
+ if not instances:
+ module.fail_json(msg="There are no instances in our record based on instance_ids {0}. "
+ "Please check it and try again.".format(instance_ids))
+ elif count_tag:
+ instances = ecs.describe_instances(zone_id=zone_id, tags=eval(count_tag))
+ elif instance_name:
+ instances = ecs.describe_instances(zone_id=zone_id, instance_name=instance_name)
+
+ ids = []
+ if state == 'absent':
+ if len(instances) < 1:
+ module.fail_json(msg='Please specify ECS instances that you want to operate by using '
+ 'parameters instance_ids, tags or instance_name, aborting')
+ try:
+ targets = []
+ for inst in instances:
+ if inst.status != 'stopped' and not force:
+ module.fail_json(msg="Instance is running, and please stop it or set 'force' as True.")
+ targets.append(inst.id)
+ if ecs.delete_instances(instance_ids=targets, force=force):
+ changed = True
+ ids.extend(targets)
+
+ module.exit_json(changed=changed, ids=ids, instances=[])
+ except Exception as e:
+ module.fail_json(msg='Delete instance got an error: {0}'.format(e))
+
+ if module.params['allocate_public_ip'] and max_bandwidth_out < 0:
+ module.fail_json(msg="'max_bandwidth_out' should be greater than 0 when 'allocate_public_ip' is True.")
+ if not module.params['allocate_public_ip']:
+ module.params['max_bandwidth_out'] = 0
+
+ if state == 'present':
+ if not instance_ids:
+ if len(instances) > count:
+ for i in range(0, len(instances) - count):
+ inst = instances[len(instances) - 1]
+ if inst.status != 'stopped' and not force:
+ module.fail_json(msg="That to delete instance {0} is failed results from it is running, "
+ "and please stop it or set 'force' as True.".format(inst.id))
+ try:
+ if inst.terminate(force=force):
+ changed = True
+ except Exception as e:
+ module.fail_json(msg="Delete instance {0} got an error: {1}".format(inst.id, e))
+ instances.pop(len(instances) - 1)
+ else:
+ try:
+ if re.search(r"-\[\d+,\d+\]-", host_name):
+ module.fail_json(msg='Ordered hostname is not supported, If you want to add an ordered '
+ 'suffix to the hostname, you can set unique_suffix to True')
+ new_instances = run_instance(module, ecs, count - len(instances))
+ if new_instances:
+ changed = True
+ instances.extend(new_instances)
+ except Exception as e:
+ module.fail_json(msg="Create new instances got an error: {0}".format(e))
+
+ # Security Group join/leave begin
+ security_groups = module.params['security_groups']
+ if security_groups:
+ if not isinstance(security_groups, list):
+ module.fail_json(msg='The parameter security_groups should be a list, aborting')
+ for inst in instances:
+ existing = inst.security_group_ids['security_group_id']
+ remove = list(set(existing).difference(set(security_groups)))
+ add = list(set(security_groups).difference(set(existing)))
+ for sg in remove:
+ if inst.leave_security_group(sg):
+ changed = True
+ for sg in add:
+ if inst.join_security_group(sg):
+ changed = True
+ # Security Group join/leave ends here
+
+ # Attach/Detach key pair
+ inst_ids = []
+ for inst in instances:
+ if key_name is not None and key_name != inst.key_name:
+ if key_name == "":
+ if inst.detach_key_pair():
+ changed = True
+ else:
+ inst_ids.append(inst.id)
+ if inst_ids:
+ changed = ecs.attach_key_pair(instance_ids=inst_ids, key_pair_name=key_name)
+
+ # Modify instance attribute
+ for inst in instances:
+ if modify_instance(module, inst):
+ changed = True
+ if inst.id not in ids:
+ ids.append(inst.id)
+
+ # Modify instance charge type
+ ids = []
+ for inst in instances:
+ if inst.instance_charge_type != instance_charge_type:
+ ids.append(inst.id)
+ if ids:
+ params = {"instance_ids": ids, "instance_charge_type": instance_charge_type,
+ "include_data_disks": module.params['include_data_disks'], "dry_run": module.params['dry_run'],
+ "auto_pay": True}
+ if instance_charge_type == 'PrePaid':
+ params['period'] = module.params['period']
+ params['period_unit'] = module.params['period_unit']
+
+ if ecs.modify_instance_charge_type(**params):
+ changed = True
+ wait_for_instance_modify_charge(ecs, ids, instance_charge_type)
+
+ else:
+ if len(instances) < 1:
+ module.fail_json(msg='Please specify ECS instances that you want to operate by using '
+ 'parameters instance_ids, tags or instance_name, aborting')
+ if state == 'running':
+ try:
+ targets = []
+ for inst in instances:
+ if modify_instance(module, inst):
+ changed = True
+ if inst.status != "running":
+ targets.append(inst.id)
+ ids.append(inst.id)
+ if targets and ecs.start_instances(instance_ids=targets):
+ changed = True
+ ids.extend(targets)
+ except Exception as e:
+ module.fail_json(msg='Start instances got an error: {0}'.format(e))
+ elif state == 'stopped':
+ try:
+ targets = []
+ for inst in instances:
+ if inst.status != "stopped":
+ targets.append(inst.id)
+ if targets and ecs.stop_instances(instance_ids=targets, force_stop=force):
+ changed = True
+ ids.extend(targets)
+ for inst in instances:
+ if modify_instance(module, inst):
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='Stop instances got an error: {0}'.format(e))
+ elif state == 'restarted':
+ try:
+ targets = []
+ for inst in instances:
+ if modify_instance(module, inst):
+ changed = True
+ targets.append(inst.id)
+ if ecs.reboot_instances(instance_ids=targets, force_stop=module.params['force']):
+ changed = True
+ ids.extend(targets)
+ except Exception as e:
+ module.fail_json(msg='Reboot instances got an error: {0}'.format(e))
+
+ tags = module.params['tags']
+ if module.params['purge_tags']:
+ for inst in instances:
+ if not tags:
+ tags = inst.tags
+ try:
+ if inst.remove_tags(tags):
+ changed = True
+ except Exception as e:
+ module.fail_json(msg="{0}".format(e))
+ module.exit_json(changed=changed, instances=get_instances_info(ecs, ids))
+
+ if tags:
+ for inst in instances:
+ try:
+ if inst.add_tags(tags):
+ changed = True
+ except Exception as e:
+ module.fail_json(msg="{0}".format(e))
+ module.exit_json(changed=changed, instances=get_instances_info(ecs, ids))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ali_instance_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ali_instance_facts.py
new file mode 100644
index 00000000..33b3f8a6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ali_instance_facts.py
@@ -0,0 +1,440 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin <heguimin36@163.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see http://www.gnu.org/licenses/.
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ali_instance_info
+short_description: Gather information on instances of Alibaba Cloud ECS.
+description:
+ - This module fetches data from the Open API in Alicloud.
+ The module must be called from within the ECS instance itself.
+ - This module was called C(ali_instance_facts) before Ansible 2.9. The usage did not change.
+
+options:
+ availability_zone:
+ description:
+ - (Deprecated) Aliyun availability zone ID in which to launch the instance. Please use filter item 'zone_id' instead.
+ aliases: ['alicloud_zone']
+ type: str
+ instance_names:
+ description:
+ - (Deprecated) A list of ECS instance names. Please use filter item 'instance_name' instead.
+ aliases: ["names"]
+ type: list
+ elements: str
+ instance_ids:
+ description:
+ - A list of ECS instance ids.
+ aliases: ["ids"]
+ type: list
+ elements: str
+ name_prefix:
+ description:
+ - Use a instance name prefix to filter ecs instances.
+ type: str
+ version_added: '0.2.0'
+ tags:
+ description:
+ - A hash/dictionaries of instance tags. C({"key":"value"})
+ aliases: ["instance_tags"]
+ type: dict
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. The filter keys can be
+ all of request parameters. See U(https://www.alibabacloud.com/help/doc-detail/25506.htm) for parameter details.
+ Filter keys can be same as request parameter name or be lower case and use underscore ("_") or dash ("-") to
+ connect different words in one parameter. 'InstanceIds' should be a list and it will be appended to
+ I(instance_ids) automatically. 'Tag.n.Key' and 'Tag.n.Value' should be a dict and using I(tags) instead.
+ type: dict
+ version_added: '0.2.0'
+author:
+ - "He Guimin (@xiaozhu36)"
+requirements:
+ - "python >= 3.6"
+ - "footmark >= 1.13.0"
+extends_documentation_fragment:
+ - community.general.alicloud
+'''
+
+EXAMPLES = '''
+# Fetch instances details according to setting different filters
+
+- name: Find all instances in the specified region
+ community.general.ali_instance_info:
+ register: all_instances
+
+- name: Find all instances based on the specified ids
+ community.general.ali_instance_info:
+ instance_ids:
+ - "i-35b333d9"
+ - "i-ddav43kd"
+ register: instances_by_ids
+
+- name: Find all instances based on the specified name_prefix
+ community.general.ali_instance_info:
+ name_prefix: "ecs_instance_"
+ register: instances_by_name_prefix
+
+- name: Find instances based on tags
+ community.general.ali_instance_info:
+ tags:
+ Test: "add"
+'''
+
+RETURN = '''
+instances:
+ description: List of ECS instances
+ returned: always
+ type: complex
+ contains:
+ availability_zone:
+ description: The availability zone of the instance is in.
+ returned: always
+ type: str
+ sample: cn-beijing-a
+ block_device_mappings:
+ description: Any block device mapping entries for the instance.
+ returned: always
+ type: complex
+ contains:
+ device_name:
+ description: The device name exposed to the instance (for example, /dev/xvda).
+ returned: always
+ type: str
+ sample: /dev/xvda
+ attach_time:
+ description: The time stamp when the attachment initiated.
+ returned: always
+ type: str
+ sample: "2018-06-25T04:08:26Z"
+ delete_on_termination:
+ description: Indicates whether the volume is deleted on instance termination.
+ returned: always
+ type: bool
+ sample: true
+ status:
+ description: The attachment state.
+ returned: always
+ type: str
+ sample: in_use
+ volume_id:
+ description: The ID of the cloud disk.
+ returned: always
+ type: str
+ sample: d-2zei53pjsi117y6gf9t6
+ cpu:
+ description: The CPU core count of the instance.
+ returned: always
+ type: int
+ sample: 4
+ creation_time:
+ description: The time the instance was created.
+ returned: always
+ type: str
+ sample: "2018-06-25T04:08Z"
+ description:
+ description: The instance description.
+ returned: always
+ type: str
+ sample: "my ansible instance"
+ eip:
+ description: The attribution of EIP associated with the instance.
+ returned: always
+ type: complex
+ contains:
+ allocation_id:
+ description: The ID of the EIP.
+ returned: always
+ type: str
+ sample: eip-12345
+ internet_charge_type:
+ description: The internet charge type of the EIP.
+ returned: always
+ type: str
+ sample: "paybybandwidth"
+ ip_address:
+ description: EIP address.
+ returned: always
+ type: str
+ sample: 42.10.2.2
+ expired_time:
+ description: The time the instance will expire.
+ returned: always
+ type: str
+ sample: "2099-12-31T15:59Z"
+ gpu:
+ description: The attribution of instance GPU.
+ returned: always
+ type: complex
+ contains:
+ amount:
+ description: The count of the GPU.
+ returned: always
+ type: int
+ sample: 0
+ spec:
+ description: The specification of the GPU.
+ returned: always
+ type: str
+ sample: ""
+ host_name:
+ description: The host name of the instance.
+ returned: always
+ type: str
+ sample: iZ2zewaoZ
+ id:
+ description: Alias of instance_id.
+ returned: always
+ type: str
+ sample: i-abc12345
+ instance_id:
+ description: ECS instance resource ID.
+ returned: always
+ type: str
+ sample: i-abc12345
+ image_id:
+ description: The ID of the image used to launch the instance.
+ returned: always
+ type: str
+ sample: m-0011223344
+ inner_ip_address:
+ description: The inner IPv4 address of the classic instance.
+ returned: always
+ type: str
+ sample: 10.0.0.2
+ instance_charge_type:
+ description: The instance charge type.
+ returned: always
+ type: str
+ sample: PostPaid
+ instance_name:
+ description: The name of the instance.
+ returned: always
+ type: str
+ sample: my-ecs
+ instance_type_family:
+ description: The instance type family of the instance belongs.
+ returned: always
+ type: str
+ sample: ecs.sn1ne
+ instance_type:
+ description: The instance type of the running instance.
+ returned: always
+ type: str
+ sample: ecs.sn1ne.xlarge
+ internet_charge_type:
+ description: The billing method of the network bandwidth.
+ returned: always
+ type: str
+ sample: PayByBandwidth
+ internet_max_bandwidth_in:
+ description: Maximum incoming bandwidth from the internet network.
+ returned: always
+ type: int
+ sample: 200
+ internet_max_bandwidth_out:
+ description: Maximum incoming bandwidth from the internet network.
+ returned: always
+ type: int
+ sample: 20
+ io_optimized:
+ description: Indicates whether the instance is optimized for EBS I/O.
+ returned: always
+ type: bool
+ sample: false
+ memory:
+ description: Memory size of the instance.
+ returned: always
+ type: int
+ sample: 8192
+ network_interfaces:
+ description: One or more network interfaces for the instance.
+ returned: always
+ type: complex
+ contains:
+ mac_address:
+ description: The MAC address.
+ returned: always
+ type: str
+ sample: "00:11:22:33:44:55"
+ network_interface_id:
+ description: The ID of the network interface.
+ returned: always
+ type: str
+ sample: eni-01234567
+ primary_ip_address:
+ description: The primary IPv4 address of the network interface within the vswitch.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ osname:
+ description: The operation system name of the instance owned.
+ returned: always
+ type: str
+ sample: CentOS
+ ostype:
+ description: The operation system type of the instance owned.
+ returned: always
+ type: str
+ sample: linux
+ private_ip_address:
+ description: The IPv4 address of the network interface within the subnet.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ public_ip_address:
+ description: The public IPv4 address assigned to the instance or eip address
+ returned: always
+ type: str
+ sample: 43.0.0.1
+ resource_group_id:
+ description: The id of the resource group to which the instance belongs.
+ returned: always
+ type: str
+ sample: my-ecs-group
+ security_groups:
+ description: One or more security groups for the instance.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ group_id:
+ description: The ID of the security group.
+ returned: always
+ type: str
+ sample: sg-0123456
+ group_name:
+ description: The name of the security group.
+ returned: always
+ type: str
+ sample: my-security-group
+ status:
+ description: The current status of the instance.
+ returned: always
+ type: str
+ sample: running
+ tags:
+ description: Any tags assigned to the instance.
+ returned: always
+ type: dict
+ sample:
+ vswitch_id:
+ description: The ID of the vswitch in which the instance is running.
+ returned: always
+ type: str
+ sample: vsw-dew00abcdef
+ vpc_id:
+ description: The ID of the VPC the instance is in.
+ returned: always
+ type: str
+ sample: vpc-0011223344
+ids:
+ description: List of ECS instance IDs
+ returned: always
+ type: list
+ sample: [i-12345er, i-3245fs]
+'''
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils.alicloud_ecs import ecs_argument_spec, ecs_connect
+
+HAS_FOOTMARK = False
+FOOTMARK_IMP_ERR = None
+try:
+ from footmark.exception import ECSResponseError
+ HAS_FOOTMARK = True
+except ImportError:
+ FOOTMARK_IMP_ERR = traceback.format_exc()
+ HAS_FOOTMARK = False
+
+
+def main():
+ argument_spec = ecs_argument_spec()
+ argument_spec.update(dict(
+ availability_zone=dict(aliases=['alicloud_zone']),
+ instance_ids=dict(type='list', elements='str', aliases=['ids']),
+ instance_names=dict(type='list', elements='str', aliases=['names']),
+ name_prefix=dict(type='str'),
+ tags=dict(type='dict', aliases=['instance_tags']),
+ filters=dict(type='dict')
+ )
+ )
+ module = AnsibleModule(argument_spec=argument_spec)
+ if module._name in ('ali_instance_facts', 'community.general.ali_instance_facts'):
+ module.deprecate("The 'ali_instance_facts' module has been renamed to 'ali_instance_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ if HAS_FOOTMARK is False:
+ module.fail_json(msg=missing_required_lib('footmark'), exception=FOOTMARK_IMP_ERR)
+
+ ecs = ecs_connect(module)
+
+ instances = []
+ instance_ids = []
+ ids = module.params['instance_ids']
+ name_prefix = module.params['name_prefix']
+ names = module.params['instance_names']
+ zone_id = module.params['availability_zone']
+ if ids and (not isinstance(ids, list) or len(ids) < 1):
+ module.fail_json(msg='instance_ids should be a list of instances, aborting')
+
+ if names and (not isinstance(names, list) or len(names) < 1):
+ module.fail_json(msg='instance_names should be a list of instances, aborting')
+
+ filters = module.params['filters']
+ if not filters:
+ filters = {}
+ if not ids:
+ ids = []
+ for key, value in list(filters.items()):
+ if key in ["InstanceIds", "instance_ids", "instance-ids"] and isinstance(ids, list):
+ for id in value:
+ if id not in ids:
+ ids.append(value)
+ if ids:
+ filters['instance_ids'] = ids
+ if module.params['tags']:
+ filters['tags'] = module.params['tags']
+ if zone_id:
+ filters['zone_id'] = zone_id
+ if names:
+ filters['instance_name'] = names[0]
+
+ for inst in ecs.describe_instances(**filters):
+ if name_prefix:
+ if not str(inst.instance_name).startswith(name_prefix):
+ continue
+ volumes = ecs.describe_disks(instance_id=inst.id)
+ setattr(inst, 'block_device_mappings', volumes)
+ setattr(inst, 'user_data', inst.describe_user_data())
+ instances.append(inst.read())
+ instance_ids.append(inst.id)
+
+ module.exit_json(changed=False, ids=instance_ids, instances=instances)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ali_instance_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ali_instance_info.py
new file mode 100644
index 00000000..33b3f8a6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ali_instance_info.py
@@ -0,0 +1,440 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin <heguimin36@163.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see http://www.gnu.org/licenses/.
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ali_instance_info
+short_description: Gather information on instances of Alibaba Cloud ECS.
+description:
+ - This module fetches data from the Open API in Alicloud.
+ The module must be called from within the ECS instance itself.
+ - This module was called C(ali_instance_facts) before Ansible 2.9. The usage did not change.
+
+options:
+ availability_zone:
+ description:
+ - (Deprecated) Aliyun availability zone ID in which to launch the instance. Please use filter item 'zone_id' instead.
+ aliases: ['alicloud_zone']
+ type: str
+ instance_names:
+ description:
+ - (Deprecated) A list of ECS instance names. Please use filter item 'instance_name' instead.
+ aliases: ["names"]
+ type: list
+ elements: str
+ instance_ids:
+ description:
+ - A list of ECS instance ids.
+ aliases: ["ids"]
+ type: list
+ elements: str
+ name_prefix:
+ description:
+ - Use a instance name prefix to filter ecs instances.
+ type: str
+ version_added: '0.2.0'
+ tags:
+ description:
+ - A hash/dictionaries of instance tags. C({"key":"value"})
+ aliases: ["instance_tags"]
+ type: dict
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. The filter keys can be
+ all of request parameters. See U(https://www.alibabacloud.com/help/doc-detail/25506.htm) for parameter details.
+ Filter keys can be same as request parameter name or be lower case and use underscore ("_") or dash ("-") to
+ connect different words in one parameter. 'InstanceIds' should be a list and it will be appended to
+ I(instance_ids) automatically. 'Tag.n.Key' and 'Tag.n.Value' should be a dict and using I(tags) instead.
+ type: dict
+ version_added: '0.2.0'
+author:
+ - "He Guimin (@xiaozhu36)"
+requirements:
+ - "python >= 3.6"
+ - "footmark >= 1.13.0"
+extends_documentation_fragment:
+ - community.general.alicloud
+'''
+
+EXAMPLES = '''
+# Fetch instances details according to setting different filters
+
+- name: Find all instances in the specified region
+ community.general.ali_instance_info:
+ register: all_instances
+
+- name: Find all instances based on the specified ids
+ community.general.ali_instance_info:
+ instance_ids:
+ - "i-35b333d9"
+ - "i-ddav43kd"
+ register: instances_by_ids
+
+- name: Find all instances based on the specified name_prefix
+ community.general.ali_instance_info:
+ name_prefix: "ecs_instance_"
+ register: instances_by_name_prefix
+
+- name: Find instances based on tags
+ community.general.ali_instance_info:
+ tags:
+ Test: "add"
+'''
+
+RETURN = '''
+instances:
+ description: List of ECS instances
+ returned: always
+ type: complex
+ contains:
+ availability_zone:
+ description: The availability zone of the instance is in.
+ returned: always
+ type: str
+ sample: cn-beijing-a
+ block_device_mappings:
+ description: Any block device mapping entries for the instance.
+ returned: always
+ type: complex
+ contains:
+ device_name:
+ description: The device name exposed to the instance (for example, /dev/xvda).
+ returned: always
+ type: str
+ sample: /dev/xvda
+ attach_time:
+ description: The time stamp when the attachment initiated.
+ returned: always
+ type: str
+ sample: "2018-06-25T04:08:26Z"
+ delete_on_termination:
+ description: Indicates whether the volume is deleted on instance termination.
+ returned: always
+ type: bool
+ sample: true
+ status:
+ description: The attachment state.
+ returned: always
+ type: str
+ sample: in_use
+ volume_id:
+ description: The ID of the cloud disk.
+ returned: always
+ type: str
+ sample: d-2zei53pjsi117y6gf9t6
+ cpu:
+ description: The CPU core count of the instance.
+ returned: always
+ type: int
+ sample: 4
+ creation_time:
+ description: The time the instance was created.
+ returned: always
+ type: str
+ sample: "2018-06-25T04:08Z"
+ description:
+ description: The instance description.
+ returned: always
+ type: str
+ sample: "my ansible instance"
+ eip:
+ description: The attribution of EIP associated with the instance.
+ returned: always
+ type: complex
+ contains:
+ allocation_id:
+ description: The ID of the EIP.
+ returned: always
+ type: str
+ sample: eip-12345
+ internet_charge_type:
+ description: The internet charge type of the EIP.
+ returned: always
+ type: str
+ sample: "paybybandwidth"
+ ip_address:
+ description: EIP address.
+ returned: always
+ type: str
+ sample: 42.10.2.2
+ expired_time:
+ description: The time the instance will expire.
+ returned: always
+ type: str
+ sample: "2099-12-31T15:59Z"
+ gpu:
+ description: The attribution of instance GPU.
+ returned: always
+ type: complex
+ contains:
+ amount:
+ description: The count of the GPU.
+ returned: always
+ type: int
+ sample: 0
+ spec:
+ description: The specification of the GPU.
+ returned: always
+ type: str
+ sample: ""
+ host_name:
+ description: The host name of the instance.
+ returned: always
+ type: str
+ sample: iZ2zewaoZ
+ id:
+ description: Alias of instance_id.
+ returned: always
+ type: str
+ sample: i-abc12345
+ instance_id:
+ description: ECS instance resource ID.
+ returned: always
+ type: str
+ sample: i-abc12345
+ image_id:
+ description: The ID of the image used to launch the instance.
+ returned: always
+ type: str
+ sample: m-0011223344
+ inner_ip_address:
+ description: The inner IPv4 address of the classic instance.
+ returned: always
+ type: str
+ sample: 10.0.0.2
+ instance_charge_type:
+ description: The instance charge type.
+ returned: always
+ type: str
+ sample: PostPaid
+ instance_name:
+ description: The name of the instance.
+ returned: always
+ type: str
+ sample: my-ecs
+ instance_type_family:
+ description: The instance type family of the instance belongs.
+ returned: always
+ type: str
+ sample: ecs.sn1ne
+ instance_type:
+ description: The instance type of the running instance.
+ returned: always
+ type: str
+ sample: ecs.sn1ne.xlarge
+ internet_charge_type:
+ description: The billing method of the network bandwidth.
+ returned: always
+ type: str
+ sample: PayByBandwidth
+ internet_max_bandwidth_in:
+ description: Maximum incoming bandwidth from the internet network.
+ returned: always
+ type: int
+ sample: 200
+ internet_max_bandwidth_out:
+ description: Maximum incoming bandwidth from the internet network.
+ returned: always
+ type: int
+ sample: 20
+ io_optimized:
+ description: Indicates whether the instance is optimized for EBS I/O.
+ returned: always
+ type: bool
+ sample: false
+ memory:
+ description: Memory size of the instance.
+ returned: always
+ type: int
+ sample: 8192
+ network_interfaces:
+ description: One or more network interfaces for the instance.
+ returned: always
+ type: complex
+ contains:
+ mac_address:
+ description: The MAC address.
+ returned: always
+ type: str
+ sample: "00:11:22:33:44:55"
+ network_interface_id:
+ description: The ID of the network interface.
+ returned: always
+ type: str
+ sample: eni-01234567
+ primary_ip_address:
+ description: The primary IPv4 address of the network interface within the vswitch.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ osname:
+ description: The operation system name of the instance owned.
+ returned: always
+ type: str
+ sample: CentOS
+ ostype:
+ description: The operation system type of the instance owned.
+ returned: always
+ type: str
+ sample: linux
+ private_ip_address:
+ description: The IPv4 address of the network interface within the subnet.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ public_ip_address:
+ description: The public IPv4 address assigned to the instance or eip address
+ returned: always
+ type: str
+ sample: 43.0.0.1
+ resource_group_id:
+ description: The id of the resource group to which the instance belongs.
+ returned: always
+ type: str
+ sample: my-ecs-group
+ security_groups:
+ description: One or more security groups for the instance.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ group_id:
+ description: The ID of the security group.
+ returned: always
+ type: str
+ sample: sg-0123456
+ group_name:
+ description: The name of the security group.
+ returned: always
+ type: str
+ sample: my-security-group
+ status:
+ description: The current status of the instance.
+ returned: always
+ type: str
+ sample: running
+ tags:
+ description: Any tags assigned to the instance.
+ returned: always
+ type: dict
+ sample:
+ vswitch_id:
+ description: The ID of the vswitch in which the instance is running.
+ returned: always
+ type: str
+ sample: vsw-dew00abcdef
+ vpc_id:
+ description: The ID of the VPC the instance is in.
+ returned: always
+ type: str
+ sample: vpc-0011223344
+ids:
+ description: List of ECS instance IDs
+ returned: always
+ type: list
+ sample: [i-12345er, i-3245fs]
+'''
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils.alicloud_ecs import ecs_argument_spec, ecs_connect
+
+HAS_FOOTMARK = False
+FOOTMARK_IMP_ERR = None
+try:
+ from footmark.exception import ECSResponseError
+ HAS_FOOTMARK = True
+except ImportError:
+ FOOTMARK_IMP_ERR = traceback.format_exc()
+ HAS_FOOTMARK = False
+
+
+def main():
+ argument_spec = ecs_argument_spec()
+ argument_spec.update(dict(
+ availability_zone=dict(aliases=['alicloud_zone']),
+ instance_ids=dict(type='list', elements='str', aliases=['ids']),
+ instance_names=dict(type='list', elements='str', aliases=['names']),
+ name_prefix=dict(type='str'),
+ tags=dict(type='dict', aliases=['instance_tags']),
+ filters=dict(type='dict')
+ )
+ )
+ module = AnsibleModule(argument_spec=argument_spec)
+ if module._name in ('ali_instance_facts', 'community.general.ali_instance_facts'):
+ module.deprecate("The 'ali_instance_facts' module has been renamed to 'ali_instance_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ if HAS_FOOTMARK is False:
+ module.fail_json(msg=missing_required_lib('footmark'), exception=FOOTMARK_IMP_ERR)
+
+ ecs = ecs_connect(module)
+
+ instances = []
+ instance_ids = []
+ ids = module.params['instance_ids']
+ name_prefix = module.params['name_prefix']
+ names = module.params['instance_names']
+ zone_id = module.params['availability_zone']
+ if ids and (not isinstance(ids, list) or len(ids) < 1):
+ module.fail_json(msg='instance_ids should be a list of instances, aborting')
+
+ if names and (not isinstance(names, list) or len(names) < 1):
+ module.fail_json(msg='instance_names should be a list of instances, aborting')
+
+ filters = module.params['filters']
+ if not filters:
+ filters = {}
+ if not ids:
+ ids = []
+ for key, value in list(filters.items()):
+ if key in ["InstanceIds", "instance_ids", "instance-ids"] and isinstance(ids, list):
+ for id in value:
+ if id not in ids:
+ ids.append(value)
+ if ids:
+ filters['instance_ids'] = ids
+ if module.params['tags']:
+ filters['tags'] = module.params['tags']
+ if zone_id:
+ filters['zone_id'] = zone_id
+ if names:
+ filters['instance_name'] = names[0]
+
+ for inst in ecs.describe_instances(**filters):
+ if name_prefix:
+ if not str(inst.instance_name).startswith(name_prefix):
+ continue
+ volumes = ecs.describe_disks(instance_id=inst.id)
+ setattr(inst, 'block_device_mappings', volumes)
+ setattr(inst, 'user_data', inst.describe_user_data())
+ instances.append(inst.read())
+ instance_ids.append(inst.id)
+
+ module.exit_json(changed=False, ids=instance_ids, instances=instances)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/alternatives.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/alternatives.py
new file mode 100644
index 00000000..56db6dc6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/alternatives.py
@@ -0,0 +1,159 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Gabe Mulley <gabe.mulley@gmail.com>
+# Copyright: (c) 2015, David Wittman <dwittman@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: alternatives
+short_description: Manages alternative programs for common commands
+description:
+ - Manages symbolic links using the 'update-alternatives' tool.
+ - Useful when multiple programs are installed but provide similar functionality (e.g. different editors).
+author:
+ - David Wittman (@DavidWittman)
+ - Gabe Mulley (@mulby)
+options:
+ name:
+ description:
+ - The generic name of the link.
+ type: str
+ required: true
+ path:
+ description:
+ - The path to the real executable that the link should point to.
+ type: path
+ required: true
+ link:
+ description:
+ - The path to the symbolic link that should point to the real executable.
+ - This option is always required on RHEL-based distributions. On Debian-based distributions this option is
+ required when the alternative I(name) is unknown to the system.
+ type: path
+ priority:
+ description:
+ - The priority of the alternative.
+ type: int
+ default: 50
+requirements: [ update-alternatives ]
+'''
+
+EXAMPLES = r'''
+- name: Correct java version selected
+ community.general.alternatives:
+ name: java
+ path: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java
+
+- name: Alternatives link created
+ community.general.alternatives:
+ name: hadoop-conf
+ link: /etc/hadoop/conf
+ path: /etc/hadoop/conf.ansible
+
+- name: Make java 32 bit an alternative with low priority
+ community.general.alternatives:
+ name: java
+ path: /usr/lib/jvm/java-7-openjdk-i386/jre/bin/java
+ priority: -10
+'''
+
+import os
+import re
+import subprocess
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ path=dict(type='path', required=True),
+ link=dict(type='path'),
+ priority=dict(type='int', default=50),
+ ),
+ supports_check_mode=True,
+ )
+
+ params = module.params
+ name = params['name']
+ path = params['path']
+ link = params['link']
+ priority = params['priority']
+
+ UPDATE_ALTERNATIVES = module.get_bin_path('update-alternatives', True)
+
+ current_path = None
+ all_alternatives = []
+
+ # Run `update-alternatives --display <name>` to find existing alternatives
+ (rc, display_output, _) = module.run_command(
+ ['env', 'LC_ALL=C', UPDATE_ALTERNATIVES, '--display', name]
+ )
+
+ if rc == 0:
+ # Alternatives already exist for this link group
+ # Parse the output to determine the current path of the symlink and
+ # available alternatives
+ current_path_regex = re.compile(r'^\s*link currently points to (.*)$',
+ re.MULTILINE)
+ alternative_regex = re.compile(r'^(\/.*)\s-\spriority', re.MULTILINE)
+
+ match = current_path_regex.search(display_output)
+ if match:
+ current_path = match.group(1)
+ all_alternatives = alternative_regex.findall(display_output)
+
+ if not link:
+ # Read the current symlink target from `update-alternatives --query`
+ # in case we need to install the new alternative before setting it.
+ #
+ # This is only compatible on Debian-based systems, as the other
+ # alternatives don't have --query available
+ rc, query_output, _ = module.run_command(
+ ['env', 'LC_ALL=C', UPDATE_ALTERNATIVES, '--query', name]
+ )
+ if rc == 0:
+ for line in query_output.splitlines():
+ if line.startswith('Link:'):
+ link = line.split()[1]
+ break
+
+ if current_path != path:
+ if module.check_mode:
+ module.exit_json(changed=True, current_path=current_path)
+ try:
+ # install the requested path if necessary
+ if path not in all_alternatives:
+ if not os.path.exists(path):
+ module.fail_json(msg="Specified path %s does not exist" % path)
+ if not link:
+ module.fail_json(msg="Needed to install the alternative, but unable to do so as we are missing the link")
+
+ module.run_command(
+ [UPDATE_ALTERNATIVES, '--install', link, name, path, str(priority)],
+ check_rc=True
+ )
+
+ # select the requested path
+ module.run_command(
+ [UPDATE_ALTERNATIVES, '--set', name, path],
+ check_rc=True
+ )
+
+ module.exit_json(changed=True)
+ except subprocess.CalledProcessError as cpe:
+ module.fail_json(msg=str(dir(cpe)))
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/apache2_mod_proxy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/apache2_mod_proxy.py
new file mode 100644
index 00000000..dcf1656f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/apache2_mod_proxy.py
@@ -0,0 +1,450 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Olivier Boukili <boukili.olivier@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: apache2_mod_proxy
+author: Olivier Boukili (@oboukili)
+short_description: Set and/or get members' attributes of an Apache httpd 2.4 mod_proxy balancer pool
+description:
+ - Set and/or get members' attributes of an Apache httpd 2.4 mod_proxy balancer
+ pool, using HTTP POST and GET requests. The httpd mod_proxy balancer-member
+ status page has to be enabled and accessible, as this module relies on parsing
+ this page. This module supports ansible check_mode, and requires BeautifulSoup
+ python module.
+options:
+ balancer_url_suffix:
+ type: str
+ description:
+ - Suffix of the balancer pool url required to access the balancer pool
+ status page (e.g. balancer_vhost[:port]/balancer_url_suffix).
+ default: /balancer-manager/
+ balancer_vhost:
+ type: str
+ description:
+ - (ipv4|ipv6|fqdn):port of the Apache httpd 2.4 mod_proxy balancer pool.
+ required: true
+ member_host:
+ type: str
+ description:
+ - (ipv4|ipv6|fqdn) of the balancer member to get or to set attributes to.
+ Port number is autodetected and should not be specified here.
+ If undefined, apache2_mod_proxy module will return a members list of
+ dictionaries of all the current balancer pool members' attributes.
+ state:
+ type: str
+ description:
+ - Desired state of the member host.
+ (absent|disabled),drained,hot_standby,ignore_errors can be
+ simultaneously invoked by separating them with a comma (e.g. state=drained,ignore_errors).
+ - 'Accepted state values: ["present", "absent", "enabled", "disabled", "drained", "hot_standby", "ignore_errors"]'
+ tls:
+ description:
+ - Use https to access balancer management page.
+ type: bool
+ default: 'no'
+ validate_certs:
+ description:
+ - Validate ssl/tls certificates.
+ type: bool
+ default: 'yes'
+'''
+
+EXAMPLES = '''
+- name: Get all current balancer pool members attributes
+ community.general.apache2_mod_proxy:
+ balancer_vhost: 10.0.0.2
+
+- name: Get a specific member attributes
+ community.general.apache2_mod_proxy:
+ balancer_vhost: myws.mydomain.org
+ balancer_suffix: /lb/
+ member_host: node1.myws.mydomain.org
+
+# Enable all balancer pool members:
+- name: Get attributes
+ community.general.apache2_mod_proxy:
+ balancer_vhost: '{{ myloadbalancer_host }}'
+ register: result
+
+- name: Enable all balancer pool members
+ community.general.apache2_mod_proxy:
+ balancer_vhost: '{{ myloadbalancer_host }}'
+ member_host: '{{ item.host }}'
+ state: present
+ with_items: '{{ result.members }}'
+
+# Gracefully disable a member from a loadbalancer node:
+- name: Step 1
+ community.general.apache2_mod_proxy:
+ balancer_vhost: '{{ vhost_host }}'
+ member_host: '{{ member.host }}'
+ state: drained
+ delegate_to: myloadbalancernode
+
+- name: Step 2
+ ansible.builtin.wait_for:
+ host: '{{ member.host }}'
+ port: '{{ member.port }}'
+ state: drained
+ delegate_to: myloadbalancernode
+
+- name: Step 3
+ community.general.apache2_mod_proxy:
+ balancer_vhost: '{{ vhost_host }}'
+ member_host: '{{ member.host }}'
+ state: absent
+ delegate_to: myloadbalancernode
+'''
+
+RETURN = '''
+member:
+ description: specific balancer member information dictionary, returned when apache2_mod_proxy module is invoked with member_host parameter.
+ type: dict
+ returned: success
+ sample:
+ {"attributes":
+ {"Busy": "0",
+ "Elected": "42",
+ "Factor": "1",
+ "From": "136K",
+ "Load": "0",
+ "Route": null,
+ "RouteRedir": null,
+ "Set": "0",
+ "Status": "Init Ok ",
+ "To": " 47K",
+ "Worker URL": null
+ },
+ "balancer_url": "http://10.10.0.2/balancer-manager/",
+ "host": "10.10.0.20",
+ "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b",
+ "path": "/ws",
+ "port": 8080,
+ "protocol": "http",
+ "status": {
+ "disabled": false,
+ "drained": false,
+ "hot_standby": false,
+ "ignore_errors": false
+ }
+ }
+members:
+ description: list of member (defined above) dictionaries, returned when apache2_mod_proxy is invoked with no member_host and state args.
+ returned: success
+ type: list
+ sample:
+ [{"attributes": {
+ "Busy": "0",
+ "Elected": "42",
+ "Factor": "1",
+ "From": "136K",
+ "Load": "0",
+ "Route": null,
+ "RouteRedir": null,
+ "Set": "0",
+ "Status": "Init Ok ",
+ "To": " 47K",
+ "Worker URL": null
+ },
+ "balancer_url": "http://10.10.0.2/balancer-manager/",
+ "host": "10.10.0.20",
+ "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b",
+ "path": "/ws",
+ "port": 8080,
+ "protocol": "http",
+ "status": {
+ "disabled": false,
+ "drained": false,
+ "hot_standby": false,
+ "ignore_errors": false
+ }
+ },
+ {"attributes": {
+ "Busy": "0",
+ "Elected": "42",
+ "Factor": "1",
+ "From": "136K",
+ "Load": "0",
+ "Route": null,
+ "RouteRedir": null,
+ "Set": "0",
+ "Status": "Init Ok ",
+ "To": " 47K",
+ "Worker URL": null
+ },
+ "balancer_url": "http://10.10.0.2/balancer-manager/",
+ "host": "10.10.0.21",
+ "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.21:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b",
+ "path": "/ws",
+ "port": 8080,
+ "protocol": "http",
+ "status": {
+ "disabled": false,
+ "drained": false,
+ "hot_standby": false,
+ "ignore_errors": false}
+ }
+ ]
+'''
+
+import re
+import traceback
+
+BEAUTIFUL_SOUP_IMP_ERR = None
+try:
+ from BeautifulSoup import BeautifulSoup
+except ImportError:
+ BEAUTIFUL_SOUP_IMP_ERR = traceback.format_exc()
+ HAS_BEAUTIFULSOUP = False
+else:
+ HAS_BEAUTIFULSOUP = True
+
+# balancer member attributes extraction regexp:
+EXPRESSION = r"(b=([\w\.\-]+)&w=(https?|ajp|wss?|ftp|[sf]cgi)://([\w\.\-]+):?(\d*)([/\w\.\-]*)&?[\w\-\=]*)"
+# Apache2 server version extraction regexp:
+APACHE_VERSION_EXPRESSION = r"SERVER VERSION: APACHE/([\d.]+)"
+
+
+def regexp_extraction(string, _regexp, groups=1):
+ """ Returns the capture group (default=1) specified in the regexp, applied to the string """
+ regexp_search = re.search(string=str(string), pattern=str(_regexp))
+ if regexp_search:
+ if regexp_search.group(groups) != '':
+ return str(regexp_search.group(groups))
+ return None
+
+
+class BalancerMember(object):
+ """ Apache 2.4 mod_proxy LB balancer member.
+ attributes:
+ read-only:
+ host -> member host (string),
+ management_url -> member management url (string),
+ protocol -> member protocol (string)
+ port -> member port (string),
+ path -> member location (string),
+ balancer_url -> url of this member's parent balancer (string),
+ attributes -> whole member attributes (dictionary)
+ module -> ansible module instance (AnsibleModule object).
+ writable:
+ status -> status of the member (dictionary)
+ """
+
+ def __init__(self, management_url, balancer_url, module):
+ self.host = regexp_extraction(management_url, str(EXPRESSION), 4)
+ self.management_url = str(management_url)
+ self.protocol = regexp_extraction(management_url, EXPRESSION, 3)
+ self.port = regexp_extraction(management_url, EXPRESSION, 5)
+ self.path = regexp_extraction(management_url, EXPRESSION, 6)
+ self.balancer_url = str(balancer_url)
+ self.module = module
+
+ def get_member_attributes(self):
+ """ Returns a dictionary of a balancer member's attributes."""
+
+ balancer_member_page = fetch_url(self.module, self.management_url)
+
+ if balancer_member_page[1]['status'] != 200:
+ self.module.fail_json(msg="Could not get balancer_member_page, check for connectivity! " + balancer_member_page[1])
+ else:
+ try:
+ soup = BeautifulSoup(balancer_member_page[0])
+ except TypeError:
+ self.module.fail_json(msg="Cannot parse balancer_member_page HTML! " + str(soup))
+ else:
+ subsoup = soup.findAll('table')[1].findAll('tr')
+ keys = subsoup[0].findAll('th')
+ for valuesset in subsoup[1::1]:
+ if re.search(pattern=self.host, string=str(valuesset)):
+ values = valuesset.findAll('td')
+ return dict((keys[x].string, values[x].string) for x in range(0, len(keys)))
+
+ def get_member_status(self):
+ """ Returns a dictionary of a balancer member's status attributes."""
+ status_mapping = {'disabled': 'Dis',
+ 'drained': 'Drn',
+ 'hot_standby': 'Stby',
+ 'ignore_errors': 'Ign'}
+ status = {}
+ actual_status = str(self.attributes['Status'])
+ for mode in status_mapping.keys():
+ if re.search(pattern=status_mapping[mode], string=actual_status):
+ status[mode] = True
+ else:
+ status[mode] = False
+ return status
+
+ def set_member_status(self, values):
+ """ Sets a balancer member's status attributes amongst pre-mapped values."""
+ values_mapping = {'disabled': '&w_status_D',
+ 'drained': '&w_status_N',
+ 'hot_standby': '&w_status_H',
+ 'ignore_errors': '&w_status_I'}
+
+ request_body = regexp_extraction(self.management_url, EXPRESSION, 1)
+ for k in values_mapping.keys():
+ if values[str(k)]:
+ request_body = request_body + str(values_mapping[k]) + '=1'
+ else:
+ request_body = request_body + str(values_mapping[k]) + '=0'
+
+ response = fetch_url(self.module, self.management_url, data=str(request_body))
+ if response[1]['status'] != 200:
+ self.module.fail_json(msg="Could not set the member status! " + self.host + " " + response[1]['status'])
+
+ attributes = property(get_member_attributes)
+ status = property(get_member_status, set_member_status)
+
+
+class Balancer(object):
+ """ Apache httpd 2.4 mod_proxy balancer object"""
+
+ def __init__(self, host, suffix, module, members=None, tls=False):
+ if tls:
+ self.base_url = str(str('https://') + str(host))
+ self.url = str(str('https://') + str(host) + str(suffix))
+ else:
+ self.base_url = str(str('http://') + str(host))
+ self.url = str(str('http://') + str(host) + str(suffix))
+ self.module = module
+ self.page = self.fetch_balancer_page()
+ if members is None:
+ self._members = []
+
+ def fetch_balancer_page(self):
+ """ Returns the balancer management html page as a string for later parsing."""
+ page = fetch_url(self.module, str(self.url))
+ if page[1]['status'] != 200:
+ self.module.fail_json(msg="Could not get balancer page! HTTP status response: " + str(page[1]['status']))
+ else:
+ content = page[0].read()
+ apache_version = regexp_extraction(content.upper(), APACHE_VERSION_EXPRESSION, 1)
+ if apache_version:
+ if not re.search(pattern=r"2\.4\.[\d]*", string=apache_version):
+ self.module.fail_json(msg="This module only acts on an Apache2 2.4+ instance, current Apache2 version: " + str(apache_version))
+ return content
+ else:
+ self.module.fail_json(msg="Could not get the Apache server version from the balancer-manager")
+
+ def get_balancer_members(self):
+ """ Returns members of the balancer as a generator object for later iteration."""
+ try:
+ soup = BeautifulSoup(self.page)
+ except TypeError:
+ self.module.fail_json(msg="Cannot parse balancer page HTML! " + str(self.page))
+ else:
+ for element in soup.findAll('a')[1::1]:
+ balancer_member_suffix = str(element.get('href'))
+ if not balancer_member_suffix:
+ self.module.fail_json(msg="Argument 'balancer_member_suffix' is empty!")
+ else:
+ yield BalancerMember(str(self.base_url + balancer_member_suffix), str(self.url), self.module)
+
+ members = property(get_balancer_members)
+
+
+def main():
+ """ Initiates module."""
+ module = AnsibleModule(
+ argument_spec=dict(
+ balancer_vhost=dict(required=True, type='str'),
+ balancer_url_suffix=dict(default="/balancer-manager/", type='str'),
+ member_host=dict(type='str'),
+ state=dict(type='str'),
+ tls=dict(default=False, type='bool'),
+ validate_certs=dict(default=True, type='bool')
+ ),
+ supports_check_mode=True
+ )
+
+ if HAS_BEAUTIFULSOUP is False:
+ module.fail_json(msg=missing_required_lib('BeautifulSoup'), exception=BEAUTIFUL_SOUP_IMP_ERR)
+
+ if module.params['state'] is not None:
+ states = module.params['state'].split(',')
+ if (len(states) > 1) and (("present" in states) or ("enabled" in states)):
+ module.fail_json(msg="state present/enabled is mutually exclusive with other states!")
+ else:
+ for _state in states:
+ if _state not in ['present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors']:
+ module.fail_json(
+ msg="State can only take values amongst 'present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors'."
+ )
+ else:
+ states = ['None']
+
+ mybalancer = Balancer(module.params['balancer_vhost'],
+ module.params['balancer_url_suffix'],
+ module=module,
+ tls=module.params['tls'])
+
+ if module.params['member_host'] is None:
+ json_output_list = []
+ for member in mybalancer.members:
+ json_output_list.append({
+ "host": member.host,
+ "status": member.status,
+ "protocol": member.protocol,
+ "port": member.port,
+ "path": member.path,
+ "attributes": member.attributes,
+ "management_url": member.management_url,
+ "balancer_url": member.balancer_url
+ })
+ module.exit_json(
+ changed=False,
+ members=json_output_list
+ )
+ else:
+ changed = False
+ member_exists = False
+ member_status = {'disabled': False, 'drained': False, 'hot_standby': False, 'ignore_errors': False}
+ for mode in member_status.keys():
+ for state in states:
+ if mode == state:
+ member_status[mode] = True
+ elif mode == 'disabled' and state == 'absent':
+ member_status[mode] = True
+
+ for member in mybalancer.members:
+ if str(member.host) == str(module.params['member_host']):
+ member_exists = True
+ if module.params['state'] is not None:
+ member_status_before = member.status
+ if not module.check_mode:
+ member_status_after = member.status = member_status
+ else:
+ member_status_after = member_status
+ if member_status_before != member_status_after:
+ changed = True
+ json_output = {
+ "host": member.host,
+ "status": member.status,
+ "protocol": member.protocol,
+ "port": member.port,
+ "path": member.path,
+ "attributes": member.attributes,
+ "management_url": member.management_url,
+ "balancer_url": member.balancer_url
+ }
+ if member_exists:
+ module.exit_json(
+ changed=changed,
+ member=json_output
+ )
+ else:
+ module.fail_json(msg=str(module.params['member_host']) + ' is not a member of the balancer ' + str(module.params['balancer_vhost']) + '!')
+
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.urls import fetch_url
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/apache2_module.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/apache2_module.py
new file mode 100644
index 00000000..4cc0ef8b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/apache2_module.py
@@ -0,0 +1,266 @@
+#!/usr/bin/python
+# coding: utf-8 -*-
+
+# (c) 2013-2014, Christian Berendt <berendt@b1-systems.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: apache2_module
+author:
+ - Christian Berendt (@berendt)
+ - Ralf Hertel (@n0trax)
+ - Robin Roth (@robinro)
+short_description: Enables/disables a module of the Apache2 webserver.
+description:
+ - Enables or disables a specified module of the Apache2 webserver.
+options:
+ name:
+ type: str
+ description:
+ - Name of the module to enable/disable as given to C(a2enmod/a2dismod).
+ required: true
+ identifier:
+ type: str
+ description:
+ - Identifier of the module as listed by C(apache2ctl -M).
+ This is optional and usually determined automatically by the common convention of
+ appending C(_module) to I(name) as well as custom exception for popular modules.
+ required: False
+ force:
+ description:
+ - Force disabling of default modules and override Debian warnings.
+ required: false
+ type: bool
+ default: False
+ state:
+ type: str
+ description:
+ - Desired state of the module.
+ choices: ['present', 'absent']
+ default: present
+ ignore_configcheck:
+ description:
+ - Ignore configuration checks about inconsistent module configuration. Especially for mpm_* modules.
+ type: bool
+ default: False
+requirements: ["a2enmod","a2dismod"]
+'''
+
+EXAMPLES = '''
+- name: Enable the Apache2 module wsgi
+ community.general.apache2_module:
+ state: present
+ name: wsgi
+
+- name: Disables the Apache2 module wsgi
+ community.general.apache2_module:
+ state: absent
+ name: wsgi
+
+- name: Disable default modules for Debian
+ community.general.apache2_module:
+ state: absent
+ name: autoindex
+ force: True
+
+- name: Disable mpm_worker and ignore warnings about missing mpm module
+ community.general.apache2_module:
+ state: absent
+ name: mpm_worker
+ ignore_configcheck: True
+
+- name: Enable dump_io module, which is identified as dumpio_module inside apache2
+ community.general.apache2_module:
+ state: present
+ name: dump_io
+ identifier: dumpio_module
+'''
+
+RETURN = '''
+result:
+ description: message about action taken
+ returned: always
+ type: str
+warnings:
+ description: list of warning messages
+ returned: when needed
+ type: list
+rc:
+ description: return code of underlying command
+ returned: failed
+ type: int
+stdout:
+ description: stdout of underlying command
+ returned: failed
+ type: str
+stderr:
+ description: stderr of underlying command
+ returned: failed
+ type: str
+'''
+
+import re
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+
+
+def _run_threaded(module):
+ control_binary = _get_ctl_binary(module)
+
+ result, stdout, stderr = module.run_command("%s -V" % control_binary)
+
+ return bool(re.search(r'threaded:[ ]*yes', stdout))
+
+
+def _get_ctl_binary(module):
+ for command in ['apache2ctl', 'apachectl']:
+ ctl_binary = module.get_bin_path(command)
+ if ctl_binary is not None:
+ return ctl_binary
+
+ module.fail_json(
+ msg="Neither of apache2ctl nor apachctl found."
+ " At least one apache control binary is necessary."
+ )
+
+
+def _module_is_enabled(module):
+ control_binary = _get_ctl_binary(module)
+ result, stdout, stderr = module.run_command("%s -M" % control_binary)
+
+ if result != 0:
+ error_msg = "Error executing %s: %s" % (control_binary, stderr)
+ if module.params['ignore_configcheck']:
+ if 'AH00534' in stderr and 'mpm_' in module.params['name']:
+ module.warnings.append(
+ "No MPM module loaded! apache2 reload AND other module actions"
+ " will fail if no MPM module is loaded immediately."
+ )
+ else:
+ module.warnings.append(error_msg)
+ return False
+ else:
+ module.fail_json(msg=error_msg)
+
+ searchstring = ' ' + module.params['identifier']
+ return searchstring in stdout
+
+
+def create_apache_identifier(name):
+ """
+ By convention if a module is loaded via name, it appears in apache2ctl -M as
+ name_module.
+
+ Some modules don't follow this convention and we use replacements for those."""
+
+ # a2enmod name replacement to apache2ctl -M names
+ text_workarounds = [
+ ('shib', 'mod_shib'),
+ ('shib2', 'mod_shib'),
+ ('evasive', 'evasive20_module'),
+ ]
+
+ # re expressions to extract subparts of names
+ re_workarounds = [
+ ('php', r'^(php\d)\.'),
+ ]
+
+ for a2enmod_spelling, module_name in text_workarounds:
+ if a2enmod_spelling in name:
+ return module_name
+
+ for search, reexpr in re_workarounds:
+ if search in name:
+ try:
+ rematch = re.search(reexpr, name)
+ return rematch.group(1) + '_module'
+ except AttributeError:
+ pass
+
+ return name + '_module'
+
+
+def _set_state(module, state):
+ name = module.params['name']
+ force = module.params['force']
+
+ want_enabled = state == 'present'
+ state_string = {'present': 'enabled', 'absent': 'disabled'}[state]
+ a2mod_binary = {'present': 'a2enmod', 'absent': 'a2dismod'}[state]
+ success_msg = "Module %s %s" % (name, state_string)
+
+ if _module_is_enabled(module) != want_enabled:
+ if module.check_mode:
+ module.exit_json(changed=True,
+ result=success_msg,
+ warnings=module.warnings)
+
+ a2mod_binary = module.get_bin_path(a2mod_binary)
+ if a2mod_binary is None:
+ module.fail_json(msg="%s not found. Perhaps this system does not use %s to manage apache" % (a2mod_binary, a2mod_binary))
+
+ if not want_enabled and force:
+ # force exists only for a2dismod on debian
+ a2mod_binary += ' -f'
+
+ result, stdout, stderr = module.run_command("%s %s" % (a2mod_binary, name))
+
+ if _module_is_enabled(module) == want_enabled:
+ module.exit_json(changed=True,
+ result=success_msg,
+ warnings=module.warnings)
+ else:
+ msg = (
+ 'Failed to set module {name} to {state}:\n'
+ '{stdout}\n'
+ 'Maybe the module identifier ({identifier}) was guessed incorrectly.'
+ 'Consider setting the "identifier" option.'
+ ).format(
+ name=name,
+ state=state_string,
+ stdout=stdout,
+ identifier=module.params['identifier']
+ )
+ module.fail_json(msg=msg,
+ rc=result,
+ stdout=stdout,
+ stderr=stderr)
+ else:
+ module.exit_json(changed=False,
+ result=success_msg,
+ warnings=module.warnings)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ identifier=dict(required=False, type='str'),
+ force=dict(required=False, type='bool', default=False),
+ state=dict(default='present', choices=['absent', 'present']),
+ ignore_configcheck=dict(required=False, type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+
+ module.warnings = []
+
+ name = module.params['name']
+ if name == 'cgi' and _run_threaded(module):
+ module.fail_json(msg="Your MPM seems to be threaded. No automatic actions on module %s possible." % name)
+
+ if not module.params['identifier']:
+ module.params['identifier'] = create_apache_identifier(module.params['name'])
+
+ if module.params['state'] in ['present', 'absent']:
+ _set_state(module, module.params['state'])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/apk.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/apk.py
new file mode 100644
index 00000000..74b738de
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/apk.py
@@ -0,0 +1,357 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Kevin Brebanov <https://github.com/kbrebanov>
+# Based on pacman (Afterburn <https://github.com/afterburn>, Aaron Bull Schaefer <aaron@elasticdog.com>)
+# and apt (Matthew Williams <matthew@flowroute.com>) modules.
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: apk
+short_description: Manages apk packages
+description:
+ - Manages I(apk) packages for Alpine Linux.
+author: "Kevin Brebanov (@kbrebanov)"
+options:
+ available:
+ description:
+ - During upgrade, reset versioned world dependencies and change logic to prefer replacing or downgrading packages (instead of holding them)
+ if the currently installed package is no longer available from any repository.
+ type: bool
+ default: no
+ name:
+ description:
+ - A package name, like C(foo), or multiple packages, like C(foo, bar).
+ type: list
+ elements: str
+ no_cache:
+ description:
+ - Do not use any local cache path.
+ type: bool
+ default: no
+ version_added: 1.0.0
+ repository:
+ description:
+ - A package repository or multiple repositories.
+ Unlike with the underlying apk command, this list will override the system repositories rather than supplement them.
+ type: list
+ elements: str
+ state:
+ description:
+ - Indicates the desired package(s) state.
+ - C(present) ensures the package(s) is/are present. C(installed) can be used as an alias.
+ - C(absent) ensures the package(s) is/are absent. C(removed) can be used as an alias.
+ - C(latest) ensures the package(s) is/are present and the latest version(s).
+ default: present
+ choices: [ "present", "absent", "latest", "installed", "removed" ]
+ type: str
+ update_cache:
+ description:
+ - Update repository indexes. Can be run with other steps or on it's own.
+ type: bool
+ default: no
+ upgrade:
+ description:
+ - Upgrade all installed packages to their latest version.
+ type: bool
+ default: no
+notes:
+ - '"name" and "upgrade" are mutually exclusive.'
+ - When used with a `loop:` each package will be processed individually, it is much more efficient to pass the list directly to the `name` option.
+'''
+
+EXAMPLES = '''
+- name: Update repositories and install foo package
+ community.general.apk:
+ name: foo
+ update_cache: yes
+
+- name: Update repositories and install foo and bar packages
+ community.general.apk:
+ name: foo,bar
+ update_cache: yes
+
+- name: Remove foo package
+ community.general.apk:
+ name: foo
+ state: absent
+
+- name: Remove foo and bar packages
+ community.general.apk:
+ name: foo,bar
+ state: absent
+
+- name: Install the package foo
+ community.general.apk:
+ name: foo
+ state: present
+
+- name: Install the packages foo and bar
+ community.general.apk:
+ name: foo,bar
+ state: present
+
+- name: Update repositories and update package foo to latest version
+ community.general.apk:
+ name: foo
+ state: latest
+ update_cache: yes
+
+- name: Update repositories and update packages foo and bar to latest versions
+ community.general.apk:
+ name: foo,bar
+ state: latest
+ update_cache: yes
+
+- name: Update all installed packages to the latest versions
+ community.general.apk:
+ upgrade: yes
+
+- name: Upgrade / replace / downgrade / uninstall all installed packages to the latest versions available
+ community.general.apk:
+ available: yes
+ upgrade: yes
+
+- name: Update repositories as a separate step
+ community.general.apk:
+ update_cache: yes
+
+- name: Install package from a specific repository
+ community.general.apk:
+ name: foo
+ state: latest
+ update_cache: yes
+ repository: http://dl-3.alpinelinux.org/alpine/edge/main
+
+- name: Install package without using cache
+ community.general.apk:
+ name: foo
+ state: latest
+ no_cache: yes
+'''
+
+RETURN = '''
+packages:
+ description: a list of packages that have been changed
+ returned: when packages have changed
+ type: list
+ sample: ['package', 'other-package']
+'''
+
+import re
+# Import module snippets.
+from ansible.module_utils.basic import AnsibleModule
+
+
+def parse_for_packages(stdout):
+ packages = []
+ data = stdout.split('\n')
+ regex = re.compile(r'^\(\d+/\d+\)\s+\S+\s+(\S+)')
+ for l in data:
+ p = regex.search(l)
+ if p:
+ packages.append(p.group(1))
+ return packages
+
+
+def update_package_db(module, exit):
+ cmd = "%s update" % (APK_PATH)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ if rc != 0:
+ module.fail_json(msg="could not update package db", stdout=stdout, stderr=stderr)
+ elif exit:
+ module.exit_json(changed=True, msg='updated repository indexes', stdout=stdout, stderr=stderr)
+ else:
+ return True
+
+
+def query_toplevel(module, name):
+ # /etc/apk/world contains a list of top-level packages separated by ' ' or \n
+ # packages may contain repository (@) or version (=<>~) separator characters or start with negation !
+ regex = re.compile(r'^' + re.escape(name) + r'([@=<>~].+)?$')
+ with open('/etc/apk/world') as f:
+ content = f.read().split()
+ for p in content:
+ if regex.search(p):
+ return True
+ return False
+
+
+def query_package(module, name):
+ cmd = "%s -v info --installed %s" % (APK_PATH, name)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ if rc == 0:
+ return True
+ else:
+ return False
+
+
+def query_latest(module, name):
+ cmd = "%s version %s" % (APK_PATH, name)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ search_pattern = r"(%s)-[\d\.\w]+-[\d\w]+\s+(.)\s+[\d\.\w]+-[\d\w]+\s+" % (re.escape(name))
+ match = re.search(search_pattern, stdout)
+ if match and match.group(2) == "<":
+ return False
+ return True
+
+
+def query_virtual(module, name):
+ cmd = "%s -v info --description %s" % (APK_PATH, name)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ search_pattern = r"^%s: virtual meta package" % (re.escape(name))
+ if re.search(search_pattern, stdout):
+ return True
+ return False
+
+
+def get_dependencies(module, name):
+ cmd = "%s -v info --depends %s" % (APK_PATH, name)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ dependencies = stdout.split()
+ if len(dependencies) > 1:
+ return dependencies[1:]
+ else:
+ return []
+
+
+def upgrade_packages(module, available):
+ if module.check_mode:
+ cmd = "%s upgrade --simulate" % (APK_PATH)
+ else:
+ cmd = "%s upgrade" % (APK_PATH)
+ if available:
+ cmd = "%s --available" % cmd
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ packagelist = parse_for_packages(stdout)
+ if rc != 0:
+ module.fail_json(msg="failed to upgrade packages", stdout=stdout, stderr=stderr, packages=packagelist)
+ if re.search(r'^OK', stdout):
+ module.exit_json(changed=False, msg="packages already upgraded", stdout=stdout, stderr=stderr, packages=packagelist)
+ module.exit_json(changed=True, msg="upgraded packages", stdout=stdout, stderr=stderr, packages=packagelist)
+
+
+def install_packages(module, names, state):
+ upgrade = False
+ to_install = []
+ to_upgrade = []
+ for name in names:
+ # Check if virtual package
+ if query_virtual(module, name):
+ # Get virtual package dependencies
+ dependencies = get_dependencies(module, name)
+ for dependency in dependencies:
+ if state == 'latest' and not query_latest(module, dependency):
+ to_upgrade.append(dependency)
+ else:
+ if not query_toplevel(module, name):
+ to_install.append(name)
+ elif state == 'latest' and not query_latest(module, name):
+ to_upgrade.append(name)
+ if to_upgrade:
+ upgrade = True
+ if not to_install and not upgrade:
+ module.exit_json(changed=False, msg="package(s) already installed")
+ packages = " ".join(to_install + to_upgrade)
+ if upgrade:
+ if module.check_mode:
+ cmd = "%s add --upgrade --simulate %s" % (APK_PATH, packages)
+ else:
+ cmd = "%s add --upgrade %s" % (APK_PATH, packages)
+ else:
+ if module.check_mode:
+ cmd = "%s add --simulate %s" % (APK_PATH, packages)
+ else:
+ cmd = "%s add %s" % (APK_PATH, packages)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ packagelist = parse_for_packages(stdout)
+ if rc != 0:
+ module.fail_json(msg="failed to install %s" % (packages), stdout=stdout, stderr=stderr, packages=packagelist)
+ module.exit_json(changed=True, msg="installed %s package(s)" % (packages), stdout=stdout, stderr=stderr, packages=packagelist)
+
+
+def remove_packages(module, names):
+ installed = []
+ for name in names:
+ if query_package(module, name):
+ installed.append(name)
+ if not installed:
+ module.exit_json(changed=False, msg="package(s) already removed")
+ names = " ".join(installed)
+ if module.check_mode:
+ cmd = "%s del --purge --simulate %s" % (APK_PATH, names)
+ else:
+ cmd = "%s del --purge %s" % (APK_PATH, names)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ packagelist = parse_for_packages(stdout)
+ # Check to see if packages are still present because of dependencies
+ for name in installed:
+ if query_package(module, name):
+ rc = 1
+ break
+ if rc != 0:
+ module.fail_json(msg="failed to remove %s package(s)" % (names), stdout=stdout, stderr=stderr, packages=packagelist)
+ module.exit_json(changed=True, msg="removed %s package(s)" % (names), stdout=stdout, stderr=stderr, packages=packagelist)
+
+# ==========================================
+# Main control flow.
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'installed', 'absent', 'removed', 'latest']),
+ name=dict(type='list', elements='str'),
+ no_cache=dict(default=False, type='bool'),
+ repository=dict(type='list', elements='str'),
+ update_cache=dict(default=False, type='bool'),
+ upgrade=dict(default=False, type='bool'),
+ available=dict(default=False, type='bool'),
+ ),
+ required_one_of=[['name', 'update_cache', 'upgrade']],
+ mutually_exclusive=[['name', 'upgrade']],
+ supports_check_mode=True
+ )
+
+ # Set LANG env since we parse stdout
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ global APK_PATH
+ APK_PATH = module.get_bin_path('apk', required=True)
+
+ p = module.params
+
+ if p['no_cache']:
+ APK_PATH = "%s --no-cache" % (APK_PATH, )
+
+ # add repositories to the APK_PATH
+ if p['repository']:
+ for r in p['repository']:
+ APK_PATH = "%s --repository %s --repositories-file /dev/null" % (APK_PATH, r)
+
+ # normalize the state parameter
+ if p['state'] in ['present', 'installed']:
+ p['state'] = 'present'
+ if p['state'] in ['absent', 'removed']:
+ p['state'] = 'absent'
+
+ if p['update_cache']:
+ update_package_db(module, not p['name'] and not p['upgrade'])
+
+ if p['upgrade']:
+ upgrade_packages(module, p['available'])
+
+ if p['state'] in ['present', 'latest']:
+ install_packages(module, p['name'], p['state'])
+ elif p['state'] == 'absent':
+ remove_packages(module, p['name'])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/apt_repo.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/apt_repo.py
new file mode 100644
index 00000000..d196e03b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/apt_repo.py
@@ -0,0 +1,146 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Mikhail Gordeev
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: apt_repo
+short_description: Manage APT repositories via apt-repo
+description:
+ - Manages APT repositories using apt-repo tool.
+ - See U(https://www.altlinux.org/Apt-repo) for details about apt-repo
+notes:
+ - This module works on ALT based distros.
+ - Does NOT support checkmode, due to a limitation in apt-repo tool.
+options:
+ repo:
+ description:
+ - Name of the repository to add or remove.
+ required: true
+ type: str
+ state:
+ description:
+ - Indicates the desired repository state.
+ choices: [ absent, present ]
+ default: present
+ type: str
+ remove_others:
+ description:
+ - Remove other then added repositories
+ - Used if I(state=present)
+ type: bool
+ default: no
+ update:
+ description:
+ - Update the package database after changing repositories.
+ type: bool
+ default: no
+author:
+- Mikhail Gordeev (@obirvalger)
+'''
+
+EXAMPLES = '''
+- name: Remove all repositories
+ community.general.apt_repo:
+ repo: all
+ state: absent
+
+- name: Add repository `Sisysphus` and remove other repositories
+ community.general.apt_repo:
+ repo: Sisysphus
+ state: present
+ remove_others: yes
+
+- name: Add local repository `/space/ALT/Sisyphus` and update package cache
+ community.general.apt_repo:
+ repo: copy:///space/ALT/Sisyphus
+ state: present
+ update: yes
+'''
+
+RETURN = ''' # '''
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+APT_REPO_PATH = "/usr/bin/apt-repo"
+
+
+def apt_repo(module, *args):
+ """run apt-repo with args and return its output"""
+ # make args list to use in concatenation
+ args = list(args)
+ rc, out, err = module.run_command([APT_REPO_PATH] + args)
+
+ if rc != 0:
+ module.fail_json(msg="'%s' failed: %s" % (' '.join(['apt-repo'] + args), err))
+
+ return out
+
+
+def add_repo(module, repo):
+ """add a repository"""
+ apt_repo(module, 'add', repo)
+
+
+def rm_repo(module, repo):
+ """remove a repository"""
+ apt_repo(module, 'rm', repo)
+
+
+def set_repo(module, repo):
+ """add a repository and remove other repositories"""
+ # first add to validate repository
+ apt_repo(module, 'add', repo)
+ apt_repo(module, 'rm', 'all')
+ apt_repo(module, 'add', repo)
+
+
+def update(module):
+ """update package cache"""
+ apt_repo(module, 'update')
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ repo=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ remove_others=dict(type='bool', default=False),
+ update=dict(type='bool', default=False),
+ ),
+ )
+
+ if not os.path.exists(APT_REPO_PATH):
+ module.fail_json(msg='cannot find /usr/bin/apt-repo')
+
+ params = module.params
+ repo = params['repo']
+ state = params['state']
+ old_repositories = apt_repo(module)
+
+ if state == 'present':
+ if params['remove_others']:
+ set_repo(module, repo)
+ else:
+ add_repo(module, repo)
+ elif state == 'absent':
+ rm_repo(module, repo)
+
+ if params['update']:
+ update(module)
+
+ new_repositories = apt_repo(module)
+ changed = old_repositories != new_repositories
+ module.exit_json(changed=changed, repo=repo, state=state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/apt_rpm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/apt_rpm.py
new file mode 100644
index 00000000..6b6bb7ec
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/apt_rpm.py
@@ -0,0 +1,183 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Evgenii Terechkov
+# Written by Evgenii Terechkov <evg@altlinux.org>
+# Based on urpmi module written by Philippe Makowski <philippem@mageia.org>
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: apt_rpm
+short_description: apt_rpm package manager
+description:
+ - Manages packages with I(apt-rpm). Both low-level (I(rpm)) and high-level (I(apt-get)) package manager binaries required.
+options:
+ package:
+ description:
+ - list of packages to install, upgrade or remove.
+ required: true
+ aliases: [ name, pkg ]
+ type: list
+ elements: str
+ state:
+ description:
+ - Indicates the desired package state.
+ choices: [ absent, present, installed, removed ]
+ default: present
+ type: str
+ update_cache:
+ description:
+ - update the package database first C(apt-get update).
+ aliases: [ 'update-cache' ]
+ type: bool
+ default: no
+author:
+- Evgenii Terechkov (@evgkrsk)
+'''
+
+EXAMPLES = '''
+- name: Install package foo
+ community.general.apt_rpm:
+ pkg: foo
+ state: present
+
+- name: Install packages foo and bar
+ community.general.apt_rpm:
+ pkg:
+ - foo
+ - bar
+ state: present
+
+- name: Remove package foo
+ community.general.apt_rpm:
+ pkg: foo
+ state: absent
+
+- name: Remove packages foo and bar
+ community.general.apt_rpm:
+ pkg: foo,bar
+ state: absent
+
+# bar will be the updated if a newer version exists
+- name: Update the package database and install bar
+ community.general.apt_rpm:
+ name: bar
+ state: present
+ update_cache: yes
+'''
+
+import json
+import os
+import shlex
+import sys
+
+from ansible.module_utils.basic import AnsibleModule
+
+APT_PATH = "/usr/bin/apt-get"
+RPM_PATH = "/usr/bin/rpm"
+
+
+def query_package(module, name):
+ # rpm -q returns 0 if the package is installed,
+ # 1 if it is not installed
+ rc, out, err = module.run_command("%s -q %s" % (RPM_PATH, name))
+ if rc == 0:
+ return True
+ else:
+ return False
+
+
+def query_package_provides(module, name):
+ # rpm -q returns 0 if the package is installed,
+ # 1 if it is not installed
+ rc, out, err = module.run_command("%s -q --provides %s" % (RPM_PATH, name))
+ return rc == 0
+
+
+def update_package_db(module):
+ rc, out, err = module.run_command("%s update" % APT_PATH)
+
+ if rc != 0:
+ module.fail_json(msg="could not update package db: %s" % err)
+
+
+def remove_packages(module, packages):
+
+ remove_c = 0
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ if not query_package(module, package):
+ continue
+
+ rc, out, err = module.run_command("%s -y remove %s" % (APT_PATH, package))
+
+ if rc != 0:
+ module.fail_json(msg="failed to remove %s: %s" % (package, err))
+
+ remove_c += 1
+
+ if remove_c > 0:
+ module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, pkgspec):
+
+ packages = ""
+ for package in pkgspec:
+ if not query_package_provides(module, package):
+ packages += "'%s' " % package
+
+ if len(packages) != 0:
+
+ rc, out, err = module.run_command("%s -y install %s" % (APT_PATH, packages))
+
+ installed = True
+ for packages in pkgspec:
+ if not query_package_provides(module, package):
+ installed = False
+
+ # apt-rpm always have 0 for exit code if --force is used
+ if rc or not installed:
+ module.fail_json(msg="'apt-get -y install %s' failed: %s" % (packages, err))
+ else:
+ module.exit_json(changed=True, msg="%s present(s)" % packages)
+ else:
+ module.exit_json(changed=False)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['absent', 'installed', 'present', 'removed']),
+ update_cache=dict(type='bool', default=False, aliases=['update-cache']),
+ package=dict(type='list', elements='str', required=True, aliases=['name', 'pkg']),
+ ),
+ )
+
+ if not os.path.exists(APT_PATH) or not os.path.exists(RPM_PATH):
+ module.fail_json(msg="cannot find /usr/bin/apt-get and/or /usr/bin/rpm")
+
+ p = module.params
+
+ if p['update_cache']:
+ update_package_db(module)
+
+ packages = p['package']
+
+ if p['state'] in ['installed', 'present']:
+ install_packages(module, packages)
+
+ elif p['state'] in ['absent', 'removed']:
+ remove_packages(module, packages)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/archive.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/archive.py
new file mode 100644
index 00000000..2872b5ac
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/archive.py
@@ -0,0 +1,572 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Ben Doherty <bendohmv@gmail.com>
+# Sponsored by Oomph, Inc. http://www.oomphinc.com
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: archive
+short_description: Creates a compressed archive of one or more files or trees
+extends_documentation_fragment: files
+description:
+ - Creates or extends an archive.
+ - The source and archive are on the remote host, and the archive I(is not) copied to the local host.
+ - Source files can be deleted after archival by specifying I(remove=True).
+options:
+ path:
+ description:
+ - Remote absolute path, glob, or list of paths or globs for the file or files to compress or archive.
+ type: list
+ elements: path
+ required: true
+ format:
+ description:
+ - The type of compression to use.
+ - Support for xz was added in Ansible 2.5.
+ type: str
+ choices: [ bz2, gz, tar, xz, zip ]
+ default: gz
+ dest:
+ description:
+ - The file name of the destination archive. The parent directory must exists on the remote host.
+ - This is required when C(path) refers to multiple files by either specifying a glob, a directory or multiple paths in a list.
+ type: path
+ exclude_path:
+ description:
+ - Remote absolute path, glob, or list of paths or globs for the file or files to exclude from I(path) list and glob expansion.
+ type: list
+ elements: path
+ force_archive:
+ description:
+ - Allow you to force the module to treat this as an archive even if only a single file is specified.
+ - By default behaviour is maintained. i.e A when a single file is specified it is compressed only (not archived).
+ type: bool
+ default: false
+ remove:
+ description:
+ - Remove any added source files and trees after adding to archive.
+ type: bool
+ default: no
+notes:
+ - Requires tarfile, zipfile, gzip and bzip2 packages on target host.
+ - Requires lzma or backports.lzma if using xz format.
+ - Can produce I(gzip), I(bzip2), I(lzma) and I(zip) compressed files or archives.
+seealso:
+- module: ansible.builtin.unarchive
+author:
+- Ben Doherty (@bendoh)
+'''
+
+EXAMPLES = r'''
+- name: Compress directory /path/to/foo/ into /path/to/foo.tgz
+ community.general.archive:
+ path: /path/to/foo
+ dest: /path/to/foo.tgz
+
+- name: Compress regular file /path/to/foo into /path/to/foo.gz and remove it
+ community.general.archive:
+ path: /path/to/foo
+ remove: yes
+
+- name: Create a zip archive of /path/to/foo
+ community.general.archive:
+ path: /path/to/foo
+ format: zip
+
+- name: Create a bz2 archive of multiple files, rooted at /path
+ community.general.archive:
+ path:
+ - /path/to/foo
+ - /path/wong/foo
+ dest: /path/file.tar.bz2
+ format: bz2
+
+- name: Create a bz2 archive of a globbed path, while excluding specific dirnames
+ community.general.archive:
+ path:
+ - /path/to/foo/*
+ dest: /path/file.tar.bz2
+ exclude_path:
+ - /path/to/foo/bar
+ - /path/to/foo/baz
+ format: bz2
+
+- name: Create a bz2 archive of a globbed path, while excluding a glob of dirnames
+ community.general.archive:
+ path:
+ - /path/to/foo/*
+ dest: /path/file.tar.bz2
+ exclude_path:
+ - /path/to/foo/ba*
+ format: bz2
+
+- name: Use gzip to compress a single archive (i.e don't archive it first with tar)
+ community.general.archive:
+ path: /path/to/foo/single.file
+ dest: /path/file.gz
+ format: gz
+
+- name: Create a tar.gz archive of a single file.
+ community.general.archive:
+ path: /path/to/foo/single.file
+ dest: /path/file.tar.gz
+ format: gz
+ force_archive: true
+'''
+
+RETURN = r'''
+state:
+ description:
+ The current state of the archived file.
+ If 'absent', then no source files were found and the archive does not exist.
+ If 'compress', then the file source file is in the compressed state.
+ If 'archive', then the source file or paths are currently archived.
+ If 'incomplete', then an archive was created, but not all source paths were found.
+ type: str
+ returned: always
+missing:
+ description: Any files that were missing from the source.
+ type: list
+ returned: success
+archived:
+ description: Any files that were compressed or added to the archive.
+ type: list
+ returned: success
+arcroot:
+ description: The archive root.
+ type: str
+ returned: always
+expanded_paths:
+ description: The list of matching paths from paths argument.
+ type: list
+ returned: always
+expanded_exclude_paths:
+ description: The list of matching exclude paths from the exclude_path argument.
+ type: list
+ returned: always
+'''
+
+import bz2
+import filecmp
+import glob
+import gzip
+import io
+import os
+import re
+import shutil
+import tarfile
+import zipfile
+from traceback import format_exc
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_bytes, to_native
+from ansible.module_utils.six import PY3
+
+
+LZMA_IMP_ERR = None
+if PY3:
+ try:
+ import lzma
+ HAS_LZMA = True
+ except ImportError:
+ LZMA_IMP_ERR = format_exc()
+ HAS_LZMA = False
+else:
+ try:
+ from backports import lzma
+ HAS_LZMA = True
+ except ImportError:
+ LZMA_IMP_ERR = format_exc()
+ HAS_LZMA = False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='list', elements='path', required=True),
+ format=dict(type='str', default='gz', choices=['bz2', 'gz', 'tar', 'xz', 'zip']),
+ dest=dict(type='path'),
+ exclude_path=dict(type='list', elements='path'),
+ force_archive=dict(type='bool', default=False),
+ remove=dict(type='bool', default=False),
+ ),
+ add_file_common_args=True,
+ supports_check_mode=True,
+ )
+
+ params = module.params
+ check_mode = module.check_mode
+ paths = params['path']
+ dest = params['dest']
+ b_dest = None if not dest else to_bytes(dest, errors='surrogate_or_strict')
+ exclude_paths = params['exclude_path']
+ remove = params['remove']
+
+ b_expanded_paths = []
+ b_expanded_exclude_paths = []
+ fmt = params['format']
+ b_fmt = to_bytes(fmt, errors='surrogate_or_strict')
+ force_archive = params['force_archive']
+ globby = False
+ changed = False
+ state = 'absent'
+
+ # Simple or archive file compression (inapplicable with 'zip' since it's always an archive)
+ archive = False
+ b_successes = []
+
+ # Fail early
+ if not HAS_LZMA and fmt == 'xz':
+ module.fail_json(msg=missing_required_lib("lzma or backports.lzma", reason="when using xz format"),
+ exception=LZMA_IMP_ERR)
+ module.fail_json(msg="lzma or backports.lzma is required when using xz format.")
+
+ for path in paths:
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+
+ # Expand any glob characters. If found, add the expanded glob to the
+ # list of expanded_paths, which might be empty.
+ if (b'*' in b_path or b'?' in b_path):
+ b_expanded_paths.extend(glob.glob(b_path))
+ globby = True
+
+ # If there are no glob characters the path is added to the expanded paths
+ # whether the path exists or not
+ else:
+ b_expanded_paths.append(b_path)
+
+ # Only attempt to expand the exclude paths if it exists
+ if exclude_paths:
+ for exclude_path in exclude_paths:
+ b_exclude_path = to_bytes(exclude_path, errors='surrogate_or_strict')
+
+ # Expand any glob characters. If found, add the expanded glob to the
+ # list of expanded_paths, which might be empty.
+ if (b'*' in b_exclude_path or b'?' in b_exclude_path):
+ b_expanded_exclude_paths.extend(glob.glob(b_exclude_path))
+
+ # If there are no glob character the exclude path is added to the expanded
+ # exclude paths whether the path exists or not.
+ else:
+ b_expanded_exclude_paths.append(b_exclude_path)
+
+ if not b_expanded_paths:
+ return module.fail_json(
+ path=', '.join(paths),
+ expanded_paths=to_native(b', '.join(b_expanded_paths), errors='surrogate_or_strict'),
+ msg='Error, no source paths were found'
+ )
+
+ # Only try to determine if we are working with an archive or not if we haven't set archive to true
+ if not force_archive:
+ # If we actually matched multiple files or TRIED to, then
+ # treat this as a multi-file archive
+ archive = globby or os.path.isdir(b_expanded_paths[0]) or len(b_expanded_paths) > 1
+ else:
+ archive = True
+
+ # Default created file name (for single-file archives) to
+ # <file>.<format>
+ if not b_dest and not archive:
+ b_dest = b'%s.%s' % (b_expanded_paths[0], b_fmt)
+
+ # Force archives to specify 'dest'
+ if archive and not b_dest:
+ module.fail_json(dest=dest, path=', '.join(paths), msg='Error, must specify "dest" when archiving multiple files or trees')
+
+ b_sep = to_bytes(os.sep, errors='surrogate_or_strict')
+
+ b_archive_paths = []
+ b_missing = []
+ b_arcroot = b''
+
+ for b_path in b_expanded_paths:
+ # Use the longest common directory name among all the files
+ # as the archive root path
+ if b_arcroot == b'':
+ b_arcroot = os.path.dirname(b_path) + b_sep
+ else:
+ for i in range(len(b_arcroot)):
+ if b_path[i] != b_arcroot[i]:
+ break
+
+ if i < len(b_arcroot):
+ b_arcroot = os.path.dirname(b_arcroot[0:i + 1])
+
+ b_arcroot += b_sep
+
+ # Don't allow archives to be created anywhere within paths to be removed
+ if remove and os.path.isdir(b_path):
+ b_path_dir = b_path
+ if not b_path.endswith(b'/'):
+ b_path_dir += b'/'
+
+ if b_dest.startswith(b_path_dir):
+ module.fail_json(
+ path=', '.join(paths),
+ msg='Error, created archive can not be contained in source paths when remove=True'
+ )
+
+ if os.path.lexists(b_path) and b_path not in b_expanded_exclude_paths:
+ b_archive_paths.append(b_path)
+ else:
+ b_missing.append(b_path)
+
+ # No source files were found but the named archive exists: are we 'compress' or 'archive' now?
+ if len(b_missing) == len(b_expanded_paths) and b_dest and os.path.exists(b_dest):
+ # Just check the filename to know if it's an archive or simple compressed file
+ if re.search(br'(\.tar|\.tar\.gz|\.tgz|\.tbz2|\.tar\.bz2|\.tar\.xz|\.zip)$', os.path.basename(b_dest), re.IGNORECASE):
+ state = 'archive'
+ else:
+ state = 'compress'
+
+ # Multiple files, or globbiness
+ elif archive:
+ if not b_archive_paths:
+ # No source files were found, but the archive is there.
+ if os.path.lexists(b_dest):
+ state = 'archive'
+ elif b_missing:
+ # SOME source files were found, but not all of them
+ state = 'incomplete'
+
+ archive = None
+ size = 0
+ errors = []
+
+ if os.path.lexists(b_dest):
+ size = os.path.getsize(b_dest)
+
+ if state != 'archive':
+ if check_mode:
+ changed = True
+
+ else:
+ try:
+ # Slightly more difficult (and less efficient!) compression using zipfile module
+ if fmt == 'zip':
+ arcfile = zipfile.ZipFile(
+ to_native(b_dest, errors='surrogate_or_strict', encoding='ascii'),
+ 'w',
+ zipfile.ZIP_DEFLATED,
+ True
+ )
+
+ # Easier compression using tarfile module
+ elif fmt == 'gz' or fmt == 'bz2':
+ arcfile = tarfile.open(to_native(b_dest, errors='surrogate_or_strict', encoding='ascii'), 'w|' + fmt)
+
+ # python3 tarfile module allows xz format but for python2 we have to create the tarfile
+ # in memory and then compress it with lzma.
+ elif fmt == 'xz':
+ arcfileIO = io.BytesIO()
+ arcfile = tarfile.open(fileobj=arcfileIO, mode='w')
+
+ # Or plain tar archiving
+ elif fmt == 'tar':
+ arcfile = tarfile.open(to_native(b_dest, errors='surrogate_or_strict', encoding='ascii'), 'w')
+
+ b_match_root = re.compile(br'^%s' % re.escape(b_arcroot))
+ for b_path in b_archive_paths:
+ if os.path.isdir(b_path):
+ # Recurse into directories
+ for b_dirpath, b_dirnames, b_filenames in os.walk(b_path, topdown=True):
+ if not b_dirpath.endswith(b_sep):
+ b_dirpath += b_sep
+
+ for b_dirname in b_dirnames:
+ b_fullpath = b_dirpath + b_dirname
+ n_fullpath = to_native(b_fullpath, errors='surrogate_or_strict', encoding='ascii')
+ n_arcname = to_native(b_match_root.sub(b'', b_fullpath), errors='surrogate_or_strict')
+
+ try:
+ if fmt == 'zip':
+ arcfile.write(n_fullpath, n_arcname)
+ else:
+ arcfile.add(n_fullpath, n_arcname, recursive=False)
+
+ except Exception as e:
+ errors.append('%s: %s' % (n_fullpath, to_native(e)))
+
+ for b_filename in b_filenames:
+ b_fullpath = b_dirpath + b_filename
+ n_fullpath = to_native(b_fullpath, errors='surrogate_or_strict', encoding='ascii')
+ n_arcname = to_native(b_match_root.sub(b'', b_fullpath), errors='surrogate_or_strict')
+
+ try:
+ if fmt == 'zip':
+ arcfile.write(n_fullpath, n_arcname)
+ else:
+ arcfile.add(n_fullpath, n_arcname, recursive=False)
+
+ b_successes.append(b_fullpath)
+ except Exception as e:
+ errors.append('Adding %s: %s' % (to_native(b_path), to_native(e)))
+ else:
+ path = to_native(b_path, errors='surrogate_or_strict', encoding='ascii')
+ arcname = to_native(b_match_root.sub(b'', b_path), errors='surrogate_or_strict')
+ if fmt == 'zip':
+ arcfile.write(path, arcname)
+ else:
+ arcfile.add(path, arcname, recursive=False)
+
+ b_successes.append(b_path)
+
+ except Exception as e:
+ expanded_fmt = 'zip' if fmt == 'zip' else ('tar.' + fmt)
+ module.fail_json(
+ msg='Error when writing %s archive at %s: %s' % (expanded_fmt, dest, to_native(e)),
+ exception=format_exc()
+ )
+
+ if arcfile:
+ arcfile.close()
+ state = 'archive'
+
+ if fmt == 'xz':
+ with lzma.open(b_dest, 'wb') as f:
+ f.write(arcfileIO.getvalue())
+ arcfileIO.close()
+
+ if errors:
+ module.fail_json(msg='Errors when writing archive at %s: %s' % (dest, '; '.join(errors)))
+
+ if state in ['archive', 'incomplete'] and remove:
+ for b_path in b_successes:
+ try:
+ if os.path.isdir(b_path):
+ shutil.rmtree(b_path)
+ elif not check_mode:
+ os.remove(b_path)
+ except OSError as e:
+ errors.append(to_native(b_path))
+
+ if errors:
+ module.fail_json(dest=dest, msg='Error deleting some source files: ', files=errors)
+
+ # Rudimentary check: If size changed then file changed. Not perfect, but easy.
+ if not check_mode and os.path.getsize(b_dest) != size:
+ changed = True
+
+ if b_successes and state != 'incomplete':
+ state = 'archive'
+
+ # Simple, single-file compression
+ else:
+ b_path = b_expanded_paths[0]
+
+ # No source or compressed file
+ if not (os.path.exists(b_path) or os.path.lexists(b_dest)):
+ state = 'absent'
+
+ # if it already exists and the source file isn't there, consider this done
+ elif not os.path.lexists(b_path) and os.path.lexists(b_dest):
+ state = 'compress'
+
+ else:
+ if module.check_mode:
+ if not os.path.exists(b_dest):
+ changed = True
+ else:
+ size = 0
+ f_in = f_out = arcfile = None
+
+ if os.path.lexists(b_dest):
+ size = os.path.getsize(b_dest)
+
+ try:
+ if fmt == 'zip':
+ arcfile = zipfile.ZipFile(
+ to_native(b_dest, errors='surrogate_or_strict', encoding='ascii'),
+ 'w',
+ zipfile.ZIP_DEFLATED,
+ True
+ )
+ arcfile.write(
+ to_native(b_path, errors='surrogate_or_strict', encoding='ascii'),
+ to_native(b_path[len(b_arcroot):], errors='surrogate_or_strict')
+ )
+ arcfile.close()
+ state = 'archive' # because all zip files are archives
+ elif fmt == 'tar':
+ arcfile = tarfile.open(to_native(b_dest, errors='surrogate_or_strict', encoding='ascii'), 'w')
+ arcfile.add(to_native(b_path, errors='surrogate_or_strict', encoding='ascii'))
+ arcfile.close()
+ else:
+ f_in = open(b_path, 'rb')
+
+ n_dest = to_native(b_dest, errors='surrogate_or_strict', encoding='ascii')
+ if fmt == 'gz':
+ f_out = gzip.open(n_dest, 'wb')
+ elif fmt == 'bz2':
+ f_out = bz2.BZ2File(n_dest, 'wb')
+ elif fmt == 'xz':
+ f_out = lzma.LZMAFile(n_dest, 'wb')
+ else:
+ raise OSError("Invalid format")
+
+ shutil.copyfileobj(f_in, f_out)
+
+ b_successes.append(b_path)
+
+ except OSError as e:
+ module.fail_json(
+ path=to_native(b_path),
+ dest=dest,
+ msg='Unable to write to compressed file: %s' % to_native(e), exception=format_exc()
+ )
+
+ if arcfile:
+ arcfile.close()
+ if f_in:
+ f_in.close()
+ if f_out:
+ f_out.close()
+
+ # Rudimentary check: If size changed then file changed. Not perfect, but easy.
+ if os.path.getsize(b_dest) != size:
+ changed = True
+
+ state = 'compress'
+
+ if remove and not check_mode:
+ try:
+ os.remove(b_path)
+
+ except OSError as e:
+ module.fail_json(
+ path=to_native(b_path),
+ msg='Unable to remove source file: %s' % to_native(e), exception=format_exc()
+ )
+
+ try:
+ file_args = module.load_file_common_arguments(params, path=b_dest)
+ except TypeError:
+ # The path argument is only supported in Ansible-base 2.10+. Fall back to
+ # pre-2.10 behavior for older Ansible versions.
+ params['path'] = b_dest
+ file_args = module.load_file_common_arguments(params)
+
+ if not check_mode:
+ changed = module.set_fs_attributes_if_different(file_args, changed)
+
+ module.exit_json(
+ archived=[to_native(p, errors='surrogate_or_strict') for p in b_successes],
+ dest=dest,
+ changed=changed,
+ state=state,
+ arcroot=to_native(b_arcroot, errors='surrogate_or_strict'),
+ missing=[to_native(p, errors='surrogate_or_strict') for p in b_missing],
+ expanded_paths=[to_native(p, errors='surrogate_or_strict') for p in b_expanded_paths],
+ expanded_exclude_paths=[to_native(p, errors='surrogate_or_strict') for p in b_expanded_exclude_paths],
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/atomic_container.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/atomic_container.py
new file mode 100644
index 00000000..1364a42c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/atomic_container.py
@@ -0,0 +1,208 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: atomic_container
+short_description: Manage the containers on the atomic host platform
+description:
+ - Manage the containers on the atomic host platform.
+ - Allows to manage the lifecycle of a container on the atomic host platform.
+author: "Giuseppe Scrivano (@giuseppe)"
+notes:
+ - Host should support C(atomic) command
+requirements:
+ - atomic
+ - "python >= 2.6"
+options:
+ backend:
+ description:
+ - Define the backend to use for the container.
+ required: True
+ choices: ["docker", "ostree"]
+ type: str
+ name:
+ description:
+ - Name of the container.
+ required: True
+ type: str
+ image:
+ description:
+ - The image to use to install the container.
+ required: True
+ type: str
+ rootfs:
+ description:
+ - Define the rootfs of the image.
+ type: str
+ state:
+ description:
+ - State of the container.
+ choices: ["absent", "latest", "present", "rollback"]
+ default: "latest"
+ type: str
+ mode:
+ description:
+ - Define if it is an user or a system container.
+ choices: ["user", "system"]
+ type: str
+ values:
+ description:
+ - Values for the installation of the container.
+ - This option is permitted only with mode 'user' or 'system'.
+ - The values specified here will be used at installation time as --set arguments for atomic install.
+ type: list
+ elements: str
+'''
+
+EXAMPLES = r'''
+
+- name: Install the etcd system container
+ community.general.atomic_container:
+ name: etcd
+ image: rhel/etcd
+ backend: ostree
+ state: latest
+ mode: system
+ values:
+ - ETCD_NAME=etcd.server
+
+- name: Uninstall the etcd system container
+ community.general.atomic_container:
+ name: etcd
+ image: rhel/etcd
+ backend: ostree
+ state: absent
+ mode: system
+'''
+
+RETURN = r'''
+msg:
+ description: The command standard output
+ returned: always
+ type: str
+ sample: [u'Using default tag: latest ...']
+'''
+
+# import module snippets
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def do_install(module, mode, rootfs, container, image, values_list, backend):
+ system_list = ["--system"] if mode == 'system' else []
+ user_list = ["--user"] if mode == 'user' else []
+ rootfs_list = ["--rootfs=%s" % rootfs] if rootfs else []
+ args = ['atomic', 'install', "--storage=%s" % backend, '--name=%s' % container] + system_list + user_list + rootfs_list + values_list + [image]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ else:
+ changed = "Extracting" in out or "Copying blob" in out
+ module.exit_json(msg=out, changed=changed)
+
+
+def do_update(module, container, image, values_list):
+ args = ['atomic', 'containers', 'update', "--rebase=%s" % image] + values_list + [container]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ else:
+ changed = "Extracting" in out or "Copying blob" in out
+ module.exit_json(msg=out, changed=changed)
+
+
+def do_uninstall(module, name, backend):
+ args = ['atomic', 'uninstall', "--storage=%s" % backend, name]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ module.exit_json(msg=out, changed=True)
+
+
+def do_rollback(module, name):
+ args = ['atomic', 'containers', 'rollback', name]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ else:
+ changed = "Rolling back" in out
+ module.exit_json(msg=out, changed=changed)
+
+
+def core(module):
+ mode = module.params['mode']
+ name = module.params['name']
+ image = module.params['image']
+ rootfs = module.params['rootfs']
+ values = module.params['values']
+ backend = module.params['backend']
+ state = module.params['state']
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+ out = {}
+ err = {}
+ rc = 0
+
+ values_list = ["--set=%s" % x for x in values] if values else []
+
+ args = ['atomic', 'containers', 'list', '--no-trunc', '-n', '--all', '-f', 'backend=%s' % backend, '-f', 'container=%s' % name]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ return
+ present = name in out
+
+ if state == 'present' and present:
+ module.exit_json(msg=out, changed=False)
+ elif (state in ['latest', 'present']) and not present:
+ do_install(module, mode, rootfs, name, image, values_list, backend)
+ elif state == 'latest':
+ do_update(module, name, image, values_list)
+ elif state == 'absent':
+ if not present:
+ module.exit_json(msg="The container is not present", changed=False)
+ else:
+ do_uninstall(module, name, backend)
+ elif state == 'rollback':
+ do_rollback(module, name)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ mode=dict(default=None, choices=['user', 'system']),
+ name=dict(required=True),
+ image=dict(required=True),
+ rootfs=dict(default=None),
+ state=dict(default='latest', choices=['present', 'absent', 'latest', 'rollback']),
+ backend=dict(required=True, choices=['docker', 'ostree']),
+ values=dict(type='list', default=[], elements='str'),
+ ),
+ )
+
+ if module.params['values'] is not None and module.params['mode'] == 'default':
+ module.fail_json(msg="values is supported only with user or system mode")
+
+ # Verify that the platform supports atomic command
+ rc, out, err = module.run_command('atomic -v', check_rc=False)
+ if rc != 0:
+ module.fail_json(msg="Error in running atomic command", err=err)
+
+ try:
+ core(module)
+ except Exception as e:
+ module.fail_json(msg='Unanticipated error running atomic: %s' % to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/atomic_host.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/atomic_host.py
new file mode 100644
index 00000000..993933e5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/atomic_host.py
@@ -0,0 +1,101 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: atomic_host
+short_description: Manage the atomic host platform
+description:
+ - Manage the atomic host platform.
+ - Rebooting of Atomic host platform should be done outside this module.
+author:
+- Saravanan KR (@krsacme)
+notes:
+ - Host should be an atomic platform (verified by existence of '/run/ostree-booted' file).
+requirements:
+ - atomic
+ - python >= 2.6
+options:
+ revision:
+ description:
+ - The version number of the atomic host to be deployed.
+ - Providing C(latest) will upgrade to the latest available version.
+ default: 'latest'
+ aliases: [ version ]
+ type: str
+'''
+
+EXAMPLES = r'''
+- name: Upgrade the atomic host platform to the latest version (atomic host upgrade)
+ community.general.atomic_host:
+ revision: latest
+
+- name: Deploy a specific revision as the atomic host (atomic host deploy 23.130)
+ community.general.atomic_host:
+ revision: 23.130
+'''
+
+RETURN = r'''
+msg:
+ description: The command standard output
+ returned: always
+ type: str
+ sample: 'Already on latest'
+'''
+import os
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def core(module):
+ revision = module.params['revision']
+ args = []
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+
+ if revision == 'latest':
+ args = ['atomic', 'host', 'upgrade']
+ else:
+ args = ['atomic', 'host', 'deploy', revision]
+
+ out = {}
+ err = {}
+ rc = 0
+
+ rc, out, err = module.run_command(args, check_rc=False)
+
+ if rc == 77 and revision == 'latest':
+ module.exit_json(msg="Already on latest", changed=False)
+ elif rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ else:
+ module.exit_json(msg=out, changed=True)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ revision=dict(type='str', default='latest', aliases=["version"]),
+ ),
+ )
+
+ # Verify that the platform is atomic host
+ if not os.path.exists("/run/ostree-booted"):
+ module.fail_json(msg="Module atomic_host is applicable for Atomic Host Platforms only")
+
+ try:
+ core(module)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/atomic_image.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/atomic_image.py
new file mode 100644
index 00000000..c915ed0b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/atomic_image.py
@@ -0,0 +1,169 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: atomic_image
+short_description: Manage the container images on the atomic host platform
+description:
+ - Manage the container images on the atomic host platform.
+ - Allows to execute the commands specified by the RUN label in the container image when present.
+author:
+- Saravanan KR (@krsacme)
+notes:
+ - Host should support C(atomic) command.
+requirements:
+ - atomic
+ - python >= 2.6
+options:
+ backend:
+ description:
+ - Define the backend where the image is pulled.
+ choices: [ 'docker', 'ostree' ]
+ type: str
+ name:
+ description:
+ - Name of the container image.
+ required: True
+ type: str
+ state:
+ description:
+ - The state of the container image.
+ - The state C(latest) will ensure container image is upgraded to the latest version and forcefully restart container, if running.
+ choices: [ 'absent', 'latest', 'present' ]
+ default: 'latest'
+ type: str
+ started:
+ description:
+ - Start or Stop the container.
+ type: bool
+ default: 'yes'
+'''
+
+EXAMPLES = r'''
+- name: Execute the run command on rsyslog container image (atomic run rhel7/rsyslog)
+ community.general.atomic_image:
+ name: rhel7/rsyslog
+ state: latest
+
+- name: Pull busybox to the OSTree backend
+ community.general.atomic_image:
+ name: busybox
+ state: latest
+ backend: ostree
+'''
+
+RETURN = r'''
+msg:
+ description: The command standard output
+ returned: always
+ type: str
+ sample: [u'Using default tag: latest ...']
+'''
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def do_upgrade(module, image):
+ args = ['atomic', 'update', '--force', image]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0: # something went wrong emit the msg
+ module.fail_json(rc=rc, msg=err)
+ elif 'Image is up to date' in out:
+ return False
+
+ return True
+
+
+def core(module):
+ image = module.params['name']
+ state = module.params['state']
+ started = module.params['started']
+ backend = module.params['backend']
+ is_upgraded = False
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+ out = {}
+ err = {}
+ rc = 0
+
+ if backend:
+ if state == 'present' or state == 'latest':
+ args = ['atomic', 'pull', "--storage=%s" % backend, image]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc < 0:
+ module.fail_json(rc=rc, msg=err)
+ else:
+ out_run = ""
+ if started:
+ args = ['atomic', 'run', "--storage=%s" % backend, image]
+ rc, out_run, err = module.run_command(args, check_rc=False)
+ if rc < 0:
+ module.fail_json(rc=rc, msg=err)
+
+ changed = "Extracting" in out or "Copying blob" in out
+ module.exit_json(msg=(out + out_run), changed=changed)
+ elif state == 'absent':
+ args = ['atomic', 'images', 'delete', "--storage=%s" % backend, image]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc < 0:
+ module.fail_json(rc=rc, msg=err)
+ else:
+ changed = "Unable to find" not in out
+ module.exit_json(msg=out, changed=changed)
+ return
+
+ if state == 'present' or state == 'latest':
+ if state == 'latest':
+ is_upgraded = do_upgrade(module, image)
+
+ if started:
+ args = ['atomic', 'run', image]
+ else:
+ args = ['atomic', 'install', image]
+ elif state == 'absent':
+ args = ['atomic', 'uninstall', image]
+
+ rc, out, err = module.run_command(args, check_rc=False)
+
+ if rc < 0:
+ module.fail_json(rc=rc, msg=err)
+ elif rc == 1 and 'already present' in err:
+ module.exit_json(restult=err, changed=is_upgraded)
+ elif started and 'Container is running' in out:
+ module.exit_json(result=out, changed=is_upgraded)
+ else:
+ module.exit_json(msg=out, changed=True)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ backend=dict(type='str', choices=['docker', 'ostree']),
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='latest', choices=['absent', 'latest', 'present']),
+ started=dict(type='bool', default=True),
+ ),
+ )
+
+ # Verify that the platform supports atomic command
+ rc, out, err = module.run_command('atomic -v', check_rc=False)
+ if rc != 0:
+ module.fail_json(msg="Error in running atomic command", err=err)
+
+ try:
+ core(module)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/awall.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/awall.py
new file mode 100644
index 00000000..260c7ae4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/awall.py
@@ -0,0 +1,153 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ted Trask <ttrask01@yahoo.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: awall
+short_description: Manage awall policies
+author: Ted Trask (@tdtrask) <ttrask01@yahoo.com>
+description:
+ - This modules allows for enable/disable/activate of I(awall) policies.
+ - Alpine Wall (I(awall)) generates a firewall configuration from the enabled policy files
+ and activates the configuration on the system.
+options:
+ name:
+ description:
+ - One or more policy names.
+ type: list
+ elements: str
+ state:
+ description:
+ - Whether the policies should be enabled or disabled.
+ type: str
+ choices: [ disabled, enabled ]
+ default: enabled
+ activate:
+ description:
+ - Activate the new firewall rules.
+ - Can be run with other steps or on its own.
+ type: bool
+ default: no
+'''
+
+EXAMPLES = r'''
+- name: Enable "foo" and "bar" policy
+ community.general.awall:
+ name: [ foo bar ]
+ state: enabled
+
+- name: Disable "foo" and "bar" policy and activate new rules
+ community.general.awall:
+ name:
+ - foo
+ - bar
+ state: disabled
+ activate: no
+
+- name: Activate currently enabled firewall rules
+ community.general.awall:
+ activate: yes
+'''
+
+RETURN = ''' # '''
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+
+
+def activate(module):
+ cmd = "%s activate --force" % (AWALL_PATH)
+ rc, stdout, stderr = module.run_command(cmd)
+ if rc == 0:
+ return True
+ else:
+ module.fail_json(msg="could not activate new rules", stdout=stdout, stderr=stderr)
+
+
+def is_policy_enabled(module, name):
+ cmd = "%s list" % (AWALL_PATH)
+ rc, stdout, stderr = module.run_command(cmd)
+ if re.search(r"^%s\s+enabled" % name, stdout, re.MULTILINE):
+ return True
+ return False
+
+
+def enable_policy(module, names, act):
+ policies = []
+ for name in names:
+ if not is_policy_enabled(module, name):
+ policies.append(name)
+ if not policies:
+ module.exit_json(changed=False, msg="policy(ies) already enabled")
+ names = " ".join(policies)
+ if module.check_mode:
+ cmd = "%s list" % (AWALL_PATH)
+ else:
+ cmd = "%s enable %s" % (AWALL_PATH, names)
+ rc, stdout, stderr = module.run_command(cmd)
+ if rc != 0:
+ module.fail_json(msg="failed to enable %s" % names, stdout=stdout, stderr=stderr)
+ if act and not module.check_mode:
+ activate(module)
+ module.exit_json(changed=True, msg="enabled awall policy(ies): %s" % names)
+
+
+def disable_policy(module, names, act):
+ policies = []
+ for name in names:
+ if is_policy_enabled(module, name):
+ policies.append(name)
+ if not policies:
+ module.exit_json(changed=False, msg="policy(ies) already disabled")
+ names = " ".join(policies)
+ if module.check_mode:
+ cmd = "%s list" % (AWALL_PATH)
+ else:
+ cmd = "%s disable %s" % (AWALL_PATH, names)
+ rc, stdout, stderr = module.run_command(cmd)
+ if rc != 0:
+ module.fail_json(msg="failed to disable %s" % names, stdout=stdout, stderr=stderr)
+ if act and not module.check_mode:
+ activate(module)
+ module.exit_json(changed=True, msg="disabled awall policy(ies): %s" % names)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='enabled', choices=['disabled', 'enabled']),
+ name=dict(type='list', elements='str'),
+ activate=dict(type='bool', default=False),
+ ),
+ required_one_of=[['name', 'activate']],
+ supports_check_mode=True,
+ )
+
+ global AWALL_PATH
+ AWALL_PATH = module.get_bin_path('awall', required=True)
+
+ p = module.params
+
+ if p['name']:
+ if p['state'] == 'enabled':
+ enable_policy(module, p['name'], p['activate'])
+ elif p['state'] == 'disabled':
+ disable_policy(module, p['name'], p['activate'])
+
+ if p['activate']:
+ if not module.check_mode:
+ activate(module)
+ module.exit_json(changed=True, msg="activated awall rules")
+
+ module.fail_json(msg="no action defined")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/beadm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/beadm.py
new file mode 100644
index 00000000..ab53d066
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/beadm.py
@@ -0,0 +1,431 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Adam Števko <adam.stevko@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: beadm
+short_description: Manage ZFS boot environments on FreeBSD/Solaris/illumos systems.
+description:
+ - Create, delete or activate ZFS boot environments.
+ - Mount and unmount ZFS boot environments.
+author: Adam Števko (@xen0l)
+options:
+ name:
+ description:
+ - ZFS boot environment name.
+ type: str
+ required: True
+ aliases: [ "be" ]
+ snapshot:
+ description:
+ - If specified, the new boot environment will be cloned from the given
+ snapshot or inactive boot environment.
+ type: str
+ description:
+ description:
+ - Associate a description with a new boot environment. This option is
+ available only on Solarish platforms.
+ type: str
+ options:
+ description:
+ - Create the datasets for new BE with specific ZFS properties.
+ - Multiple options can be specified.
+ - This option is available only on Solarish platforms.
+ type: str
+ mountpoint:
+ description:
+ - Path where to mount the ZFS boot environment.
+ type: path
+ state:
+ description:
+ - Create or delete ZFS boot environment.
+ type: str
+ choices: [ absent, activated, mounted, present, unmounted ]
+ default: present
+ force:
+ description:
+ - Specifies if the unmount should be forced.
+ type: bool
+ default: false
+'''
+
+EXAMPLES = r'''
+- name: Create ZFS boot environment
+ community.general.beadm:
+ name: upgrade-be
+ state: present
+
+- name: Create ZFS boot environment from existing inactive boot environment
+ community.general.beadm:
+ name: upgrade-be
+ snapshot: be@old
+ state: present
+
+- name: Create ZFS boot environment with compression enabled and description "upgrade"
+ community.general.beadm:
+ name: upgrade-be
+ options: "compression=on"
+ description: upgrade
+ state: present
+
+- name: Delete ZFS boot environment
+ community.general.beadm:
+ name: old-be
+ state: absent
+
+- name: Mount ZFS boot environment on /tmp/be
+ community.general.beadm:
+ name: BE
+ mountpoint: /tmp/be
+ state: mounted
+
+- name: Unmount ZFS boot environment
+ community.general.beadm:
+ name: BE
+ state: unmounted
+
+- name: Activate ZFS boot environment
+ community.general.beadm:
+ name: upgrade-be
+ state: activated
+'''
+
+RETURN = r'''
+name:
+ description: BE name
+ returned: always
+ type: str
+ sample: pre-upgrade
+snapshot:
+ description: ZFS snapshot to create BE from
+ returned: always
+ type: str
+ sample: rpool/ROOT/oi-hipster@fresh
+description:
+ description: BE description
+ returned: always
+ type: str
+ sample: Upgrade from 9.0 to 10.0
+options:
+ description: BE additional options
+ returned: always
+ type: str
+ sample: compression=on
+mountpoint:
+ description: BE mountpoint
+ returned: always
+ type: str
+ sample: /mnt/be
+state:
+ description: state of the target
+ returned: always
+ type: str
+ sample: present
+force:
+ description: If forced action is wanted
+ returned: always
+ type: bool
+ sample: False
+'''
+
+import os
+import re
+from ansible.module_utils.basic import AnsibleModule
+
+
+class BE(object):
+ def __init__(self, module):
+ self.module = module
+
+ self.name = module.params['name']
+ self.snapshot = module.params['snapshot']
+ self.description = module.params['description']
+ self.options = module.params['options']
+ self.mountpoint = module.params['mountpoint']
+ self.state = module.params['state']
+ self.force = module.params['force']
+ self.is_freebsd = os.uname()[0] == 'FreeBSD'
+
+ def _beadm_list(self):
+ cmd = [self.module.get_bin_path('beadm')]
+ cmd.append('list')
+ cmd.append('-H')
+ if '@' in self.name:
+ cmd.append('-s')
+ return self.module.run_command(cmd)
+
+ def _find_be_by_name(self, out):
+ if '@' in self.name:
+ for line in out.splitlines():
+ if self.is_freebsd:
+ check = line.split()
+ if(check == []):
+ continue
+ full_name = check[0].split('/')
+ if(full_name == []):
+ continue
+ check[0] = full_name[len(full_name) - 1]
+ if check[0] == self.name:
+ return check
+ else:
+ check = line.split(';')
+ if check[0] == self.name:
+ return check
+ else:
+ for line in out.splitlines():
+ if self.is_freebsd:
+ check = line.split()
+ if check[0] == self.name:
+ return check
+ else:
+ check = line.split(';')
+ if check[0] == self.name:
+ return check
+ return None
+
+ def exists(self):
+ (rc, out, _) = self._beadm_list()
+
+ if rc == 0:
+ if self._find_be_by_name(out):
+ return True
+ else:
+ return False
+ else:
+ return False
+
+ def is_activated(self):
+ (rc, out, _) = self._beadm_list()
+
+ if rc == 0:
+ line = self._find_be_by_name(out)
+ if line is None:
+ return False
+ if self.is_freebsd:
+ if 'R' in line[1]:
+ return True
+ else:
+ if 'R' in line[2]:
+ return True
+
+ return False
+
+ def activate_be(self):
+ cmd = [self.module.get_bin_path('beadm')]
+
+ cmd.append('activate')
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+ def create_be(self):
+ cmd = [self.module.get_bin_path('beadm')]
+
+ cmd.append('create')
+
+ if self.snapshot:
+ cmd.append('-e')
+ cmd.append(self.snapshot)
+
+ if not self.is_freebsd:
+ if self.description:
+ cmd.append('-d')
+ cmd.append(self.description)
+
+ if self.options:
+ cmd.append('-o')
+ cmd.append(self.options)
+
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+ def destroy_be(self):
+ cmd = [self.module.get_bin_path('beadm')]
+
+ cmd.append('destroy')
+ cmd.append('-F')
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+ def is_mounted(self):
+ (rc, out, _) = self._beadm_list()
+
+ if rc == 0:
+ line = self._find_be_by_name(out)
+ if line is None:
+ return False
+ if self.is_freebsd:
+ # On FreeBSD, we exclude currently mounted BE on /, as it is
+ # special and can be activated even if it is mounted. That is not
+ # possible with non-root BEs.
+ if line[2] != '-' and line[2] != '/':
+ return True
+ else:
+ if line[3]:
+ return True
+
+ return False
+
+ def mount_be(self):
+ cmd = [self.module.get_bin_path('beadm')]
+
+ cmd.append('mount')
+ cmd.append(self.name)
+
+ if self.mountpoint:
+ cmd.append(self.mountpoint)
+
+ return self.module.run_command(cmd)
+
+ def unmount_be(self):
+ cmd = [self.module.get_bin_path('beadm')]
+
+ cmd.append('unmount')
+ if self.force:
+ cmd.append('-f')
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True, aliases=['be']),
+ snapshot=dict(type='str'),
+ description=dict(type='str'),
+ options=dict(type='str'),
+ mountpoint=dict(type='path'),
+ state=dict(type='str', default='present', choices=['absent', 'activated', 'mounted', 'present', 'unmounted']),
+ force=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+
+ be = BE(module)
+
+ rc = None
+ out = ''
+ err = ''
+ result = {}
+ result['name'] = be.name
+ result['state'] = be.state
+
+ if be.snapshot:
+ result['snapshot'] = be.snapshot
+
+ if be.description:
+ result['description'] = be.description
+
+ if be.options:
+ result['options'] = be.options
+
+ if be.mountpoint:
+ result['mountpoint'] = be.mountpoint
+
+ if be.state == 'absent':
+ # beadm on FreeBSD and Solarish systems differs in delete behaviour in
+ # that we are not allowed to delete activated BE on FreeBSD while on
+ # Solarish systems we cannot delete BE if it is mounted. We add mount
+ # check for both platforms as BE should be explicitly unmounted before
+ # being deleted. On FreeBSD, we also check if the BE is activated.
+ if be.exists():
+ if not be.is_mounted():
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ if be.is_freebsd:
+ if be.is_activated():
+ module.fail_json(msg='Unable to remove active BE!')
+
+ (rc, out, err) = be.destroy_be()
+
+ if rc != 0:
+ module.fail_json(msg='Error while destroying BE: "%s"' % err,
+ name=be.name,
+ stderr=err,
+ rc=rc)
+ else:
+ module.fail_json(msg='Unable to remove BE as it is mounted!')
+
+ elif be.state == 'present':
+ if not be.exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ (rc, out, err) = be.create_be()
+
+ if rc != 0:
+ module.fail_json(msg='Error while creating BE: "%s"' % err,
+ name=be.name,
+ stderr=err,
+ rc=rc)
+
+ elif be.state == 'activated':
+ if not be.is_activated():
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ # On FreeBSD, beadm is unable to activate mounted BEs, so we add
+ # an explicit check for that case.
+ if be.is_freebsd:
+ if be.is_mounted():
+ module.fail_json(msg='Unable to activate mounted BE!')
+
+ (rc, out, err) = be.activate_be()
+
+ if rc != 0:
+ module.fail_json(msg='Error while activating BE: "%s"' % err,
+ name=be.name,
+ stderr=err,
+ rc=rc)
+ elif be.state == 'mounted':
+ if not be.is_mounted():
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ (rc, out, err) = be.mount_be()
+
+ if rc != 0:
+ module.fail_json(msg='Error while mounting BE: "%s"' % err,
+ name=be.name,
+ stderr=err,
+ rc=rc)
+
+ elif be.state == 'unmounted':
+ if be.is_mounted():
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ (rc, out, err) = be.unmount_be()
+
+ if rc != 0:
+ module.fail_json(msg='Error while unmounting BE: "%s"' % err,
+ name=be.name,
+ stderr=err,
+ rc=rc)
+
+ if rc is None:
+ result['changed'] = False
+ else:
+ result['changed'] = True
+
+ if out:
+ result['stdout'] = out
+ if err:
+ result['stderr'] = err
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/bearychat.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/bearychat.py
new file mode 100644
index 00000000..4c907ea6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/bearychat.py
@@ -0,0 +1,182 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Jiangge Zhang <tonyseek@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: bearychat
+short_description: Send BearyChat notifications
+description:
+ - The M(community.general.bearychat) module sends notifications to U(https://bearychat.com)
+ via the Incoming Robot integration.
+author: "Jiangge Zhang (@tonyseek)"
+options:
+ url:
+ type: str
+ description:
+ - BearyChat WebHook URL. This authenticates you to the bearychat
+ service. It looks like
+ C(https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60).
+ required: true
+ text:
+ type: str
+ description:
+ - Message to send.
+ markdown:
+ description:
+ - If C(yes), text will be parsed as markdown.
+ default: 'yes'
+ type: bool
+ channel:
+ type: str
+ description:
+ - Channel to send the message to. If absent, the message goes to the
+ default channel selected by the I(url).
+ attachments:
+ type: list
+ elements: dict
+ description:
+ - Define a list of attachments. For more information, see
+ https://github.com/bearyinnovative/bearychat-tutorial/blob/master/robots/incoming.md#attachments
+'''
+
+EXAMPLES = """
+- name: Send notification message via BearyChat
+ local_action:
+ module: bearychat
+ url: |
+ https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60
+ text: "{{ inventory_hostname }} completed"
+
+- name: Send notification message via BearyChat all options
+ local_action:
+ module: bearychat
+ url: |
+ https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60
+ text: "{{ inventory_hostname }} completed"
+ markdown: no
+ channel: "#ansible"
+ attachments:
+ - title: "Ansible on {{ inventory_hostname }}"
+ text: "May the Force be with you."
+ color: "#ffffff"
+ images:
+ - http://example.com/index.png
+"""
+
+RETURN = """
+msg:
+ description: execution result
+ returned: success
+ type: str
+ sample: "OK"
+"""
+
+try:
+ from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse
+ HAS_URLPARSE = True
+except Exception:
+ HAS_URLPARSE = False
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def build_payload_for_bearychat(module, text, markdown, channel, attachments):
+ payload = {}
+ if text is not None:
+ payload['text'] = text
+ if markdown is not None:
+ payload['markdown'] = markdown
+ if channel is not None:
+ payload['channel'] = channel
+ if attachments is not None:
+ payload.setdefault('attachments', []).extend(
+ build_payload_for_bearychat_attachment(
+ module, item.get('title'), item.get('text'), item.get('color'),
+ item.get('images'))
+ for item in attachments)
+ payload = 'payload=%s' % module.jsonify(payload)
+ return payload
+
+
+def build_payload_for_bearychat_attachment(module, title, text, color, images):
+ attachment = {}
+ if title is not None:
+ attachment['title'] = title
+ if text is not None:
+ attachment['text'] = text
+ if color is not None:
+ attachment['color'] = color
+ if images is not None:
+ target_images = attachment.setdefault('images', [])
+ if not isinstance(images, (list, tuple)):
+ images = [images]
+ for image in images:
+ if isinstance(image, dict) and 'url' in image:
+ image = {'url': image['url']}
+ elif hasattr(image, 'startswith') and image.startswith('http'):
+ image = {'url': image}
+ else:
+ module.fail_json(
+ msg="BearyChat doesn't have support for this kind of "
+ "attachment image")
+ target_images.append(image)
+ return attachment
+
+
+def do_notify_bearychat(module, url, payload):
+ response, info = fetch_url(module, url, data=payload)
+ if info['status'] != 200:
+ url_info = urlparse(url)
+ obscured_incoming_webhook = urlunparse(
+ (url_info.scheme, url_info.netloc, '[obscured]', '', '', ''))
+ module.fail_json(
+ msg=" failed to send %s to %s: %s" % (
+ payload, obscured_incoming_webhook, info['msg']))
+
+
+def main():
+ module = AnsibleModule(argument_spec={
+ 'url': dict(type='str', required=True, no_log=True),
+ 'text': dict(type='str'),
+ 'markdown': dict(default=True, type='bool'),
+ 'channel': dict(type='str'),
+ 'attachments': dict(type='list', elements='dict'),
+ })
+
+ if not HAS_URLPARSE:
+ module.fail_json(msg='urlparse is not installed')
+
+ url = module.params['url']
+ text = module.params['text']
+ markdown = module.params['markdown']
+ channel = module.params['channel']
+ attachments = module.params['attachments']
+
+ payload = build_payload_for_bearychat(
+ module, text, markdown, channel, attachments)
+ do_notify_bearychat(module, url, payload)
+
+ module.exit_json(msg="OK")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/bigpanda.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/bigpanda.py
new file mode 100644
index 00000000..ea693eb8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/bigpanda.py
@@ -0,0 +1,222 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: bigpanda
+author: "Hagai Kariti (@hkariti)"
+short_description: Notify BigPanda about deployments
+description:
+ - Notify BigPanda when deployments start and end (successfully or not). Returns a deployment object containing all the parameters for future module calls.
+options:
+ component:
+ type: str
+ description:
+ - "The name of the component being deployed. Ex: billing"
+ required: true
+ aliases: ['name']
+ version:
+ type: str
+ description:
+ - The deployment version.
+ required: true
+ token:
+ type: str
+ description:
+ - API token.
+ required: true
+ state:
+ type: str
+ description:
+ - State of the deployment.
+ required: true
+ choices: ['started', 'finished', 'failed']
+ hosts:
+ type: str
+ description:
+ - Name of affected host name. Can be a list.
+ - If not specified, it defaults to the remote system's hostname.
+ required: false
+ aliases: ['host']
+ env:
+ type: str
+ description:
+ - The environment name, typically 'production', 'staging', etc.
+ required: false
+ owner:
+ type: str
+ description:
+ - The person responsible for the deployment.
+ required: false
+ description:
+ type: str
+ description:
+ - Free text description of the deployment.
+ required: false
+ url:
+ type: str
+ description:
+ - Base URL of the API server.
+ required: False
+ default: https://api.bigpanda.io
+ validate_certs:
+ description:
+ - If C(no), SSL certificates for the target url will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ type: bool
+ deployment_message:
+ type: str
+ description:
+ - Message about the deployment.
+ - C(message) alias is deprecated in community.general 0.2.0, since it is used internally by Ansible Core Engine.
+ aliases: ['message']
+ version_added: '0.2.0'
+ source_system:
+ type: str
+ description:
+ - Source system used in the requests to the API
+ default: ansible
+
+# informational: requirements for nodes
+requirements: [ ]
+'''
+
+EXAMPLES = '''
+- name: Notify BigPanda about a deployment
+ community.general.bigpanda:
+ component: myapp
+ version: '1.3'
+ token: '{{ bigpanda_token }}'
+ state: started
+
+- name: Notify BigPanda about a deployment
+ community.general.bigpanda:
+ component: myapp
+ version: '1.3'
+ token: '{{ bigpanda_token }}'
+ state: finished
+
+# If outside servers aren't reachable from your machine, use delegate_to and override hosts:
+- name: Notify BigPanda about a deployment
+ community.general.bigpanda:
+ component: myapp
+ version: '1.3'
+ token: '{{ bigpanda_token }}'
+ hosts: '{{ ansible_hostname }}'
+ state: started
+ delegate_to: localhost
+ register: deployment
+
+- name: Notify BigPanda about a deployment
+ community.general.bigpanda:
+ component: '{{ deployment.component }}'
+ version: '{{ deployment.version }}'
+ token: '{{ deployment.token }}'
+ state: finished
+ delegate_to: localhost
+'''
+
+# ===========================================
+# Module execution.
+#
+import json
+import socket
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import fetch_url
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ component=dict(required=True, aliases=['name']),
+ version=dict(required=True),
+ token=dict(required=True, no_log=True),
+ state=dict(required=True, choices=['started', 'finished', 'failed']),
+ hosts=dict(required=False, aliases=['host']),
+ env=dict(required=False),
+ owner=dict(required=False),
+ description=dict(required=False),
+ deployment_message=dict(required=False, aliases=['message'],
+ deprecated_aliases=[dict(name='message', version='3.0.0',
+ collection_name='community.general')]), # was Ansible 2.14
+ source_system=dict(required=False, default='ansible'),
+ validate_certs=dict(default=True, type='bool'),
+ url=dict(required=False, default='https://api.bigpanda.io'),
+ ),
+ supports_check_mode=True,
+ )
+
+ token = module.params['token']
+ state = module.params['state']
+ url = module.params['url']
+
+ # Build the common request body
+ body = dict()
+ for k in ('component', 'version', 'hosts'):
+ v = module.params[k]
+ if v is not None:
+ body[k] = v
+ if body.get('hosts') is None:
+ body['hosts'] = [socket.gethostname()]
+
+ if not isinstance(body['hosts'], list):
+ body['hosts'] = [body['hosts']]
+
+ # Insert state-specific attributes to body
+ if state == 'started':
+ for k in ('source_system', 'env', 'owner', 'description'):
+ v = module.params[k]
+ if v is not None:
+ body[k] = v
+
+ request_url = url + '/data/events/deployments/start'
+ else:
+ message = module.params['message']
+ if message is not None:
+ body['errorMessage'] = message
+
+ if state == 'finished':
+ body['status'] = 'success'
+ else:
+ body['status'] = 'failure'
+
+ request_url = url + '/data/events/deployments/end'
+
+ # Build the deployment object we return
+ deployment = dict(token=token, url=url)
+ deployment.update(body)
+ if 'errorMessage' in deployment:
+ message = deployment.pop('errorMessage')
+ deployment['message'] = message
+
+ # If we're in check mode, just exit pretending like we succeeded
+ if module.check_mode:
+ module.exit_json(changed=True, **deployment)
+
+ # Send the data to bigpanda
+ data = json.dumps(body)
+ headers = {'Authorization': 'Bearer %s' % token, 'Content-Type': 'application/json'}
+ try:
+ response, info = fetch_url(module, request_url, data=data, headers=headers)
+ if info['status'] == 200:
+ module.exit_json(changed=True, **deployment)
+ else:
+ module.fail_json(msg=json.dumps(info))
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/bitbucket_access_key.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/bitbucket_access_key.py
new file mode 100644
index 00000000..80c1c493
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/bitbucket_access_key.py
@@ -0,0 +1,278 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Evgeniy Krysanov <evgeniy.krysanov@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: bitbucket_access_key
+short_description: Manages Bitbucket repository access keys
+description:
+ - Manages Bitbucket repository access keys (also called deploy keys).
+author:
+ - Evgeniy Krysanov (@catcombo)
+options:
+ client_id:
+ description:
+ - The OAuth consumer key.
+ - If not set the environment variable C(BITBUCKET_CLIENT_ID) will be used.
+ type: str
+ client_secret:
+ description:
+ - The OAuth consumer secret.
+ - If not set the environment variable C(BITBUCKET_CLIENT_SECRET) will be used.
+ type: str
+ repository:
+ description:
+ - The repository name.
+ type: str
+ required: true
+ username:
+ description:
+ - The repository owner.
+ type: str
+ required: true
+ key:
+ description:
+ - The SSH public key.
+ type: str
+ label:
+ description:
+ - The key label.
+ type: str
+ required: true
+ state:
+ description:
+ - Indicates desired state of the access key.
+ type: str
+ required: true
+ choices: [ absent, present ]
+notes:
+ - Bitbucket OAuth consumer key and secret can be obtained from Bitbucket profile -> Settings -> Access Management -> OAuth.
+ - Bitbucket OAuth consumer should have permissions to read and administrate account repositories.
+ - Check mode is supported.
+'''
+
+EXAMPLES = r'''
+- name: Create access key
+ community.general.bitbucket_access_key:
+ repository: 'bitbucket-repo'
+ username: bitbucket_username
+ key: '{{lookup("file", "bitbucket.pub") }}'
+ label: 'Bitbucket'
+ state: present
+
+- name: Delete access key
+ community.general.bitbucket_access_key:
+ repository: bitbucket-repo
+ username: bitbucket_username
+ label: Bitbucket
+ state: absent
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper
+
+error_messages = {
+ 'required_key': '`key` is required when the `state` is `present`',
+ 'required_permission': 'OAuth consumer `client_id` should have permissions to read and administrate the repository',
+ 'invalid_username_or_repo': 'Invalid `repository` or `username`',
+ 'invalid_key': 'Invalid SSH key or key is already in use',
+}
+
+BITBUCKET_API_ENDPOINTS = {
+ 'deploy-key-list': '%s/2.0/repositories/{username}/{repo_slug}/deploy-keys/' % BitbucketHelper.BITBUCKET_API_URL,
+ 'deploy-key-detail': '%s/2.0/repositories/{username}/{repo_slug}/deploy-keys/{key_id}' % BitbucketHelper.BITBUCKET_API_URL,
+}
+
+
+def get_existing_deploy_key(module, bitbucket):
+ """
+ Search for an existing deploy key on Bitbucket
+ with the label specified in module param `label`
+
+ :param module: instance of the :class:`AnsibleModule`
+ :param bitbucket: instance of the :class:`BitbucketHelper`
+ :return: existing deploy key or None if not found
+ :rtype: dict or None
+
+ Return example::
+
+ {
+ "id": 123,
+ "label": "mykey",
+ "created_on": "2019-03-23T10:15:21.517377+00:00",
+ "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADA...AdkTg7HGqL3rlaDrEcWfL7Lu6TnhBdq5",
+ "type": "deploy_key",
+ "comment": "",
+ "last_used": None,
+ "repository": {
+ "links": {
+ "self": {
+ "href": "https://api.bitbucket.org/2.0/repositories/mleu/test"
+ },
+ "html": {
+ "href": "https://bitbucket.org/mleu/test"
+ },
+ "avatar": {
+ "href": "..."
+ }
+ },
+ "type": "repository",
+ "name": "test",
+ "full_name": "mleu/test",
+ "uuid": "{85d08b4e-571d-44e9-a507-fa476535aa98}"
+ },
+ "links": {
+ "self": {
+ "href": "https://api.bitbucket.org/2.0/repositories/mleu/test/deploy-keys/123"
+ }
+ },
+ }
+ """
+ content = {
+ 'next': BITBUCKET_API_ENDPOINTS['deploy-key-list'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ )
+ }
+
+ # Look through the all response pages in search of deploy key we need
+ while 'next' in content:
+ info, content = bitbucket.request(
+ api_url=content['next'],
+ method='GET',
+ )
+
+ if info['status'] == 404:
+ module.fail_json(msg=error_messages['invalid_username_or_repo'])
+
+ if info['status'] == 403:
+ module.fail_json(msg=error_messages['required_permission'])
+
+ if info['status'] != 200:
+ module.fail_json(msg='Failed to retrieve the list of deploy keys: {0}'.format(info))
+
+ res = next(iter(filter(lambda v: v['label'] == module.params['label'], content['values'])), None)
+
+ if res is not None:
+ return res
+
+ return None
+
+
+def create_deploy_key(module, bitbucket):
+ info, content = bitbucket.request(
+ api_url=BITBUCKET_API_ENDPOINTS['deploy-key-list'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ ),
+ method='POST',
+ data={
+ 'key': module.params['key'],
+ 'label': module.params['label'],
+ },
+ )
+
+ if info['status'] == 404:
+ module.fail_json(msg=error_messages['invalid_username_or_repo'])
+
+ if info['status'] == 403:
+ module.fail_json(msg=error_messages['required_permission'])
+
+ if info['status'] == 400:
+ module.fail_json(msg=error_messages['invalid_key'])
+
+ if info['status'] != 200:
+ module.fail_json(msg='Failed to create deploy key `{label}`: {info}'.format(
+ label=module.params['label'],
+ info=info,
+ ))
+
+
+def delete_deploy_key(module, bitbucket, key_id):
+ info, content = bitbucket.request(
+ api_url=BITBUCKET_API_ENDPOINTS['deploy-key-detail'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ key_id=key_id,
+ ),
+ method='DELETE',
+ )
+
+ if info['status'] == 404:
+ module.fail_json(msg=error_messages['invalid_username_or_repo'])
+
+ if info['status'] == 403:
+ module.fail_json(msg=error_messages['required_permission'])
+
+ if info['status'] != 204:
+ module.fail_json(msg='Failed to delete deploy key `{label}`: {info}'.format(
+ label=module.params['label'],
+ info=info,
+ ))
+
+
+def main():
+ argument_spec = BitbucketHelper.bitbucket_argument_spec()
+ argument_spec.update(
+ repository=dict(type='str', required=True),
+ username=dict(type='str', required=True),
+ key=dict(type='str'),
+ label=dict(type='str', required=True),
+ state=dict(type='str', choices=['present', 'absent'], required=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ bitbucket = BitbucketHelper(module)
+
+ key = module.params['key']
+ state = module.params['state']
+
+ # Check parameters
+ if (key is None) and (state == 'present'):
+ module.fail_json(msg=error_messages['required_key'])
+
+ # Retrieve access token for authorized API requests
+ bitbucket.fetch_access_token()
+
+ # Retrieve existing deploy key (if any)
+ existing_deploy_key = get_existing_deploy_key(module, bitbucket)
+ changed = False
+
+ # Create new deploy key in case it doesn't exists
+ if not existing_deploy_key and (state == 'present'):
+ if not module.check_mode:
+ create_deploy_key(module, bitbucket)
+ changed = True
+
+ # Update deploy key if the old value does not match the new one
+ elif existing_deploy_key and (state == 'present'):
+ if not key.startswith(existing_deploy_key.get('key')):
+ if not module.check_mode:
+ # Bitbucket doesn't support update key for the same label,
+ # so we need to delete the old one first
+ delete_deploy_key(module, bitbucket, existing_deploy_key['id'])
+ create_deploy_key(module, bitbucket)
+ changed = True
+
+ # Delete deploy key
+ elif existing_deploy_key and (state == 'absent'):
+ if not module.check_mode:
+ delete_deploy_key(module, bitbucket, existing_deploy_key['id'])
+ changed = True
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_key_pair.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_key_pair.py
new file mode 100644
index 00000000..ab3b7ec4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_key_pair.py
@@ -0,0 +1,206 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Evgeniy Krysanov <evgeniy.krysanov@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: bitbucket_pipeline_key_pair
+short_description: Manages Bitbucket pipeline SSH key pair
+description:
+ - Manages Bitbucket pipeline SSH key pair.
+author:
+ - Evgeniy Krysanov (@catcombo)
+options:
+ client_id:
+ description:
+ - OAuth consumer key.
+ - If not set the environment variable C(BITBUCKET_CLIENT_ID) will be used.
+ type: str
+ client_secret:
+ description:
+ - OAuth consumer secret.
+ - If not set the environment variable C(BITBUCKET_CLIENT_SECRET) will be used.
+ type: str
+ repository:
+ description:
+ - The repository name.
+ type: str
+ required: true
+ username:
+ description:
+ - The repository owner.
+ type: str
+ required: true
+ public_key:
+ description:
+ - The public key.
+ type: str
+ private_key:
+ description:
+ - The private key.
+ type: str
+ state:
+ description:
+ - Indicates desired state of the key pair.
+ type: str
+ required: true
+ choices: [ absent, present ]
+notes:
+ - Bitbucket OAuth consumer key and secret can be obtained from Bitbucket profile -> Settings -> Access Management -> OAuth.
+ - Check mode is supported.
+'''
+
+EXAMPLES = r'''
+- name: Create or update SSH key pair
+ community.general.bitbucket_pipeline_key_pair:
+ repository: 'bitbucket-repo'
+ username: bitbucket_username
+ public_key: '{{lookup("file", "bitbucket.pub") }}'
+ private_key: '{{lookup("file", "bitbucket") }}'
+ state: present
+
+- name: Remove SSH key pair
+ community.general.bitbucket_pipeline_key_pair:
+ repository: bitbucket-repo
+ username: bitbucket_username
+ state: absent
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper
+
+error_messages = {
+ 'invalid_params': 'Account, repository or SSH key pair was not found',
+ 'required_keys': '`public_key` and `private_key` are required when the `state` is `present`',
+}
+
+BITBUCKET_API_ENDPOINTS = {
+ 'ssh-key-pair': '%s/2.0/repositories/{username}/{repo_slug}/pipelines_config/ssh/key_pair' % BitbucketHelper.BITBUCKET_API_URL,
+}
+
+
+def get_existing_ssh_key_pair(module, bitbucket):
+ """
+ Retrieves an existing ssh key pair from repository
+ specified in module param `repository`
+
+ :param module: instance of the :class:`AnsibleModule`
+ :param bitbucket: instance of the :class:`BitbucketHelper`
+ :return: existing key pair or None if not found
+ :rtype: dict or None
+
+ Return example::
+
+ {
+ "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQ...2E8HAeT",
+ "type": "pipeline_ssh_key_pair"
+ }
+ """
+ api_url = BITBUCKET_API_ENDPOINTS['ssh-key-pair'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ )
+
+ info, content = bitbucket.request(
+ api_url=api_url,
+ method='GET',
+ )
+
+ if info['status'] == 404:
+ # Account, repository or SSH key pair was not found.
+ return None
+
+ return content
+
+
+def update_ssh_key_pair(module, bitbucket):
+ info, content = bitbucket.request(
+ api_url=BITBUCKET_API_ENDPOINTS['ssh-key-pair'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ ),
+ method='PUT',
+ data={
+ 'private_key': module.params['private_key'],
+ 'public_key': module.params['public_key'],
+ },
+ )
+
+ if info['status'] == 404:
+ module.fail_json(msg=error_messages['invalid_params'])
+
+ if info['status'] != 200:
+ module.fail_json(msg='Failed to create or update pipeline ssh key pair : {0}'.format(info))
+
+
+def delete_ssh_key_pair(module, bitbucket):
+ info, content = bitbucket.request(
+ api_url=BITBUCKET_API_ENDPOINTS['ssh-key-pair'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ ),
+ method='DELETE',
+ )
+
+ if info['status'] == 404:
+ module.fail_json(msg=error_messages['invalid_params'])
+
+ if info['status'] != 204:
+ module.fail_json(msg='Failed to delete pipeline ssh key pair: {0}'.format(info))
+
+
+def main():
+ argument_spec = BitbucketHelper.bitbucket_argument_spec()
+ argument_spec.update(
+ repository=dict(type='str', required=True),
+ username=dict(type='str', required=True),
+ public_key=dict(type='str'),
+ private_key=dict(type='str', no_log=True),
+ state=dict(type='str', choices=['present', 'absent'], required=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ bitbucket = BitbucketHelper(module)
+
+ state = module.params['state']
+ public_key = module.params['public_key']
+ private_key = module.params['private_key']
+
+ # Check parameters
+ if ((public_key is None) or (private_key is None)) and (state == 'present'):
+ module.fail_json(msg=error_messages['required_keys'])
+
+ # Retrieve access token for authorized API requests
+ bitbucket.fetch_access_token()
+
+ # Retrieve existing ssh key
+ key_pair = get_existing_ssh_key_pair(module, bitbucket)
+ changed = False
+
+ # Create or update key pair
+ if (not key_pair or (key_pair.get('public_key') != public_key)) and (state == 'present'):
+ if not module.check_mode:
+ update_ssh_key_pair(module, bitbucket)
+ changed = True
+
+ # Delete key pair
+ elif key_pair and (state == 'absent'):
+ if not module.check_mode:
+ delete_ssh_key_pair(module, bitbucket)
+ changed = True
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_known_host.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_known_host.py
new file mode 100644
index 00000000..dba9f9aa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_known_host.py
@@ -0,0 +1,303 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Evgeniy Krysanov <evgeniy.krysanov@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: bitbucket_pipeline_known_host
+short_description: Manages Bitbucket pipeline known hosts
+description:
+ - Manages Bitbucket pipeline known hosts under the "SSH Keys" menu.
+ - The host fingerprint will be retrieved automatically, but in case of an error, one can use I(key) field to specify it manually.
+author:
+ - Evgeniy Krysanov (@catcombo)
+requirements:
+ - paramiko
+options:
+ client_id:
+ description:
+ - The OAuth consumer key.
+ - If not set the environment variable C(BITBUCKET_CLIENT_ID) will be used.
+ type: str
+ client_secret:
+ description:
+ - The OAuth consumer secret.
+ - If not set the environment variable C(BITBUCKET_CLIENT_SECRET) will be used.
+ type: str
+ repository:
+ description:
+ - The repository name.
+ type: str
+ required: true
+ username:
+ description:
+ - The repository owner.
+ type: str
+ required: true
+ name:
+ description:
+ - The FQDN of the known host.
+ type: str
+ required: true
+ key:
+ description:
+ - The public key.
+ type: str
+ state:
+ description:
+ - Indicates desired state of the record.
+ type: str
+ required: true
+ choices: [ absent, present ]
+notes:
+ - Bitbucket OAuth consumer key and secret can be obtained from Bitbucket profile -> Settings -> Access Management -> OAuth.
+ - Check mode is supported.
+'''
+
+EXAMPLES = r'''
+- name: Create known hosts from the list
+ community.general.bitbucket_pipeline_known_host:
+ repository: 'bitbucket-repo'
+ username: bitbucket_username
+ name: '{{ item }}'
+ state: present
+ with_items:
+ - bitbucket.org
+ - example.com
+
+- name: Remove known host
+ community.general.bitbucket_pipeline_known_host:
+ repository: bitbucket-repo
+ username: bitbucket_username
+ name: bitbucket.org
+ state: absent
+
+- name: Specify public key file
+ community.general.bitbucket_pipeline_known_host:
+ repository: bitbucket-repo
+ username: bitbucket_username
+ name: bitbucket.org
+ key: '{{lookup("file", "bitbucket.pub") }}'
+ state: absent
+'''
+
+RETURN = r''' # '''
+
+import socket
+
+try:
+ import paramiko
+ HAS_PARAMIKO = True
+except ImportError:
+ HAS_PARAMIKO = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper
+
+error_messages = {
+ 'invalid_params': 'Account or repository was not found',
+ 'unknown_key_type': 'Public key type is unknown',
+}
+
+BITBUCKET_API_ENDPOINTS = {
+ 'known-host-list': '%s/2.0/repositories/{username}/{repo_slug}/pipelines_config/ssh/known_hosts/' % BitbucketHelper.BITBUCKET_API_URL,
+ 'known-host-detail': '%s/2.0/repositories/{username}/{repo_slug}/pipelines_config/ssh/known_hosts/{known_host_uuid}' % BitbucketHelper.BITBUCKET_API_URL,
+}
+
+
+def get_existing_known_host(module, bitbucket):
+ """
+ Search for a host in Bitbucket pipelines known hosts
+ with the name specified in module param `name`
+
+ :param module: instance of the :class:`AnsibleModule`
+ :param bitbucket: instance of the :class:`BitbucketHelper`
+ :return: existing host or None if not found
+ :rtype: dict or None
+
+ Return example::
+
+ {
+ 'type': 'pipeline_known_host',
+ 'uuid': '{21cc0590-bebe-4fae-8baf-03722704119a7}'
+ 'hostname': 'bitbucket.org',
+ 'public_key': {
+ 'type': 'pipeline_ssh_public_key',
+ 'md5_fingerprint': 'md5:97:8c:1b:f2:6f:14:6b:4b:3b:ec:aa:46:46:74:7c:40',
+ 'sha256_fingerprint': 'SHA256:zzXQOXSFBEiUtuE8AikoYKwbHaxvSc0ojez9YXaGp1A',
+ 'key_type': 'ssh-rsa',
+ 'key': 'AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kN...seeFVBoGqzHM9yXw=='
+ },
+ }
+ """
+ content = {
+ 'next': BITBUCKET_API_ENDPOINTS['known-host-list'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ )
+ }
+
+ # Look through all response pages in search of hostname we need
+ while 'next' in content:
+ info, content = bitbucket.request(
+ api_url=content['next'],
+ method='GET',
+ )
+
+ if info['status'] == 404:
+ module.fail_json(msg='Invalid `repository` or `username`.')
+
+ if info['status'] != 200:
+ module.fail_json(msg='Failed to retrieve list of known hosts: {0}'.format(info))
+
+ host = next(filter(lambda v: v['hostname'] == module.params['name'], content['values']), None)
+
+ if host is not None:
+ return host
+
+ return None
+
+
+def get_host_key(module, hostname):
+ """
+ Fetches public key for specified host
+
+ :param module: instance of the :class:`AnsibleModule`
+ :param hostname: host name
+ :return: key type and key content
+ :rtype: tuple
+
+ Return example::
+
+ (
+ 'ssh-rsa',
+ 'AAAAB3NzaC1yc2EAAAABIwAAA...SBne8+seeFVBoGqzHM9yXw==',
+ )
+ """
+ try:
+ sock = socket.socket()
+ sock.connect((hostname, 22))
+ except socket.error:
+ module.fail_json(msg='Error opening socket to {0}'.format(hostname))
+
+ try:
+ trans = paramiko.transport.Transport(sock)
+ trans.start_client()
+ host_key = trans.get_remote_server_key()
+ except paramiko.SSHException:
+ module.fail_json(msg='SSH error on retrieving {0} server key'.format(hostname))
+
+ trans.close()
+ sock.close()
+
+ key_type = host_key.get_name()
+ key = host_key.get_base64()
+
+ return key_type, key
+
+
+def create_known_host(module, bitbucket):
+ hostname = module.params['name']
+ key_param = module.params['key']
+
+ if key_param is None:
+ key_type, key = get_host_key(module, hostname)
+ elif ' ' in key_param:
+ key_type, key = key_param.split(' ', 1)
+ else:
+ module.fail_json(msg=error_messages['unknown_key_type'])
+
+ info, content = bitbucket.request(
+ api_url=BITBUCKET_API_ENDPOINTS['known-host-list'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ ),
+ method='POST',
+ data={
+ 'hostname': hostname,
+ 'public_key': {
+ 'key_type': key_type,
+ 'key': key,
+ }
+ },
+ )
+
+ if info['status'] == 404:
+ module.fail_json(msg=error_messages['invalid_params'])
+
+ if info['status'] != 201:
+ module.fail_json(msg='Failed to create known host `{hostname}`: {info}'.format(
+ hostname=module.params['hostname'],
+ info=info,
+ ))
+
+
+def delete_known_host(module, bitbucket, known_host_uuid):
+ info, content = bitbucket.request(
+ api_url=BITBUCKET_API_ENDPOINTS['known-host-detail'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ known_host_uuid=known_host_uuid,
+ ),
+ method='DELETE',
+ )
+
+ if info['status'] == 404:
+ module.fail_json(msg=error_messages['invalid_params'])
+
+ if info['status'] != 204:
+ module.fail_json(msg='Failed to delete known host `{hostname}`: {info}'.format(
+ hostname=module.params['name'],
+ info=info,
+ ))
+
+
+def main():
+ argument_spec = BitbucketHelper.bitbucket_argument_spec()
+ argument_spec.update(
+ repository=dict(type='str', required=True),
+ username=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ key=dict(type='str'),
+ state=dict(type='str', choices=['present', 'absent'], required=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ if (module.params['key'] is None) and (not HAS_PARAMIKO):
+ module.fail_json(msg='`paramiko` package not found, please install it.')
+
+ bitbucket = BitbucketHelper(module)
+
+ # Retrieve access token for authorized API requests
+ bitbucket.fetch_access_token()
+
+ # Retrieve existing known host
+ existing_host = get_existing_known_host(module, bitbucket)
+ state = module.params['state']
+ changed = False
+
+ # Create new host in case it doesn't exists
+ if not existing_host and (state == 'present'):
+ if not module.check_mode:
+ create_known_host(module, bitbucket)
+ changed = True
+
+ # Delete host
+ elif existing_host and (state == 'absent'):
+ if not module.check_mode:
+ delete_known_host(module, bitbucket, existing_host['uuid'])
+ changed = True
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_variable.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_variable.py
new file mode 100644
index 00000000..33457fca
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_variable.py
@@ -0,0 +1,277 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Evgeniy Krysanov <evgeniy.krysanov@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: bitbucket_pipeline_variable
+short_description: Manages Bitbucket pipeline variables
+description:
+ - Manages Bitbucket pipeline variables.
+author:
+ - Evgeniy Krysanov (@catcombo)
+options:
+ client_id:
+ description:
+ - The OAuth consumer key.
+ - If not set the environment variable C(BITBUCKET_CLIENT_ID) will be used.
+ type: str
+ client_secret:
+ description:
+ - The OAuth consumer secret.
+ - If not set the environment variable C(BITBUCKET_CLIENT_SECRET) will be used.
+ type: str
+ repository:
+ description:
+ - The repository name.
+ type: str
+ required: true
+ username:
+ description:
+ - The repository owner.
+ type: str
+ required: true
+ name:
+ description:
+ - The pipeline variable name.
+ type: str
+ required: true
+ value:
+ description:
+ - The pipeline variable value.
+ type: str
+ secured:
+ description:
+ - Whether to encrypt the variable value.
+ type: bool
+ default: no
+ state:
+ description:
+ - Indicates desired state of the variable.
+ type: str
+ required: true
+ choices: [ absent, present ]
+notes:
+ - Bitbucket OAuth consumer key and secret can be obtained from Bitbucket profile -> Settings -> Access Management -> OAuth.
+ - Check mode is supported.
+ - For secured values return parameter C(changed) is always C(True).
+'''
+
+EXAMPLES = r'''
+- name: Create or update pipeline variables from the list
+ community.general.bitbucket_pipeline_variable:
+ repository: 'bitbucket-repo'
+ username: bitbucket_username
+ name: '{{ item.name }}'
+ value: '{{ item.value }}'
+ secured: '{{ item.secured }}'
+ state: present
+ with_items:
+ - { name: AWS_ACCESS_KEY, value: ABCD1234, secured: False }
+ - { name: AWS_SECRET, value: qwe789poi123vbn0, secured: True }
+
+- name: Remove pipeline variable
+ community.general.bitbucket_pipeline_variable:
+ repository: bitbucket-repo
+ username: bitbucket_username
+ name: AWS_ACCESS_KEY
+ state: absent
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule, _load_params
+from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper
+
+error_messages = {
+ 'required_value': '`value` is required when the `state` is `present`',
+}
+
+BITBUCKET_API_ENDPOINTS = {
+ 'pipeline-variable-list': '%s/2.0/repositories/{username}/{repo_slug}/pipelines_config/variables/' % BitbucketHelper.BITBUCKET_API_URL,
+ 'pipeline-variable-detail': '%s/2.0/repositories/{username}/{repo_slug}/pipelines_config/variables/{variable_uuid}' % BitbucketHelper.BITBUCKET_API_URL,
+}
+
+
+def get_existing_pipeline_variable(module, bitbucket):
+ """
+ Search for a pipeline variable
+
+ :param module: instance of the :class:`AnsibleModule`
+ :param bitbucket: instance of the :class:`BitbucketHelper`
+ :return: existing variable or None if not found
+ :rtype: dict or None
+
+ Return example::
+
+ {
+ 'name': 'AWS_ACCESS_OBKEY_ID',
+ 'value': 'x7HU80-a2',
+ 'type': 'pipeline_variable',
+ 'secured': False,
+ 'uuid': '{9ddb0507-439a-495a-99f3-5464f15128127}'
+ }
+
+ The `value` key in dict is absent in case of secured variable.
+ """
+ variables_base_url = BITBUCKET_API_ENDPOINTS['pipeline-variable-list'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ )
+ # Look through the all response pages in search of variable we need
+ page = 1
+ while True:
+ next_url = "%s?page=%s" % (variables_base_url, page)
+ info, content = bitbucket.request(
+ api_url=next_url,
+ method='GET',
+ )
+
+ if info['status'] == 404:
+ module.fail_json(msg='Invalid `repository` or `username`.')
+
+ if info['status'] != 200:
+ module.fail_json(msg='Failed to retrieve the list of pipeline variables: {0}'.format(info))
+
+ # We are at the end of list
+ if 'pagelen' in content and content['pagelen'] == 0:
+ return None
+
+ page += 1
+ var = next(filter(lambda v: v['key'] == module.params['name'], content['values']), None)
+
+ if var is not None:
+ var['name'] = var.pop('key')
+ return var
+
+ return None
+
+
+def create_pipeline_variable(module, bitbucket):
+ info, content = bitbucket.request(
+ api_url=BITBUCKET_API_ENDPOINTS['pipeline-variable-list'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ ),
+ method='POST',
+ data={
+ 'key': module.params['name'],
+ 'value': module.params['value'],
+ 'secured': module.params['secured'],
+ },
+ )
+
+ if info['status'] != 201:
+ module.fail_json(msg='Failed to create pipeline variable `{name}`: {info}'.format(
+ name=module.params['name'],
+ info=info,
+ ))
+
+
+def update_pipeline_variable(module, bitbucket, variable_uuid):
+ info, content = bitbucket.request(
+ api_url=BITBUCKET_API_ENDPOINTS['pipeline-variable-detail'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ variable_uuid=variable_uuid,
+ ),
+ method='PUT',
+ data={
+ 'value': module.params['value'],
+ 'secured': module.params['secured'],
+ },
+ )
+
+ if info['status'] != 200:
+ module.fail_json(msg='Failed to update pipeline variable `{name}`: {info}'.format(
+ name=module.params['name'],
+ info=info,
+ ))
+
+
+def delete_pipeline_variable(module, bitbucket, variable_uuid):
+ info, content = bitbucket.request(
+ api_url=BITBUCKET_API_ENDPOINTS['pipeline-variable-detail'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ variable_uuid=variable_uuid,
+ ),
+ method='DELETE',
+ )
+
+ if info['status'] != 204:
+ module.fail_json(msg='Failed to delete pipeline variable `{name}`: {info}'.format(
+ name=module.params['name'],
+ info=info,
+ ))
+
+
+class BitBucketPipelineVariable(AnsibleModule):
+ def __init__(self, *args, **kwargs):
+ params = _load_params() or {}
+ if params.get('secured'):
+ kwargs['argument_spec']['value'].update({'no_log': True})
+ super(BitBucketPipelineVariable, self).__init__(*args, **kwargs)
+
+
+def main():
+ argument_spec = BitbucketHelper.bitbucket_argument_spec()
+ argument_spec.update(
+ repository=dict(type='str', required=True),
+ username=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ value=dict(type='str'),
+ secured=dict(type='bool', default=False),
+ state=dict(type='str', choices=['present', 'absent'], required=True),
+ )
+ module = BitBucketPipelineVariable(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ bitbucket = BitbucketHelper(module)
+
+ value = module.params['value']
+ state = module.params['state']
+ secured = module.params['secured']
+
+ # Check parameters
+ if (value is None) and (state == 'present'):
+ module.fail_json(msg=error_messages['required_value'])
+
+ # Retrieve access token for authorized API requests
+ bitbucket.fetch_access_token()
+
+ # Retrieve existing pipeline variable (if any)
+ existing_variable = get_existing_pipeline_variable(module, bitbucket)
+ changed = False
+
+ # Create new variable in case it doesn't exists
+ if not existing_variable and (state == 'present'):
+ if not module.check_mode:
+ create_pipeline_variable(module, bitbucket)
+ changed = True
+
+ # Update variable if it is secured or the old value does not match the new one
+ elif existing_variable and (state == 'present'):
+ if (existing_variable['secured'] != secured) or (existing_variable.get('value') != value):
+ if not module.check_mode:
+ update_pipeline_variable(module, bitbucket, existing_variable['uuid'])
+ changed = True
+
+ # Delete variable
+ elif existing_variable and (state == 'absent'):
+ if not module.check_mode:
+ delete_pipeline_variable(module, bitbucket, existing_variable['uuid'])
+ changed = True
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/bower.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/bower.py
new file mode 100644
index 00000000..911d99b7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/bower.py
@@ -0,0 +1,228 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Michael Warkentin <mwarkentin@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: bower
+short_description: Manage bower packages with bower
+description:
+ - Manage bower packages with bower
+author: "Michael Warkentin (@mwarkentin)"
+options:
+ name:
+ type: str
+ description:
+ - The name of a bower package to install
+ offline:
+ description:
+ - Install packages from local cache, if the packages were installed before
+ type: bool
+ default: 'no'
+ production:
+ description:
+ - Install with --production flag
+ type: bool
+ default: 'no'
+ path:
+ type: path
+ description:
+ - The base path where to install the bower packages
+ required: true
+ relative_execpath:
+ type: path
+ description:
+ - Relative path to bower executable from install path
+ state:
+ type: str
+ description:
+ - The state of the bower package
+ default: present
+ choices: [ "present", "absent", "latest" ]
+ version:
+ type: str
+ description:
+ - The version to be installed
+'''
+
+EXAMPLES = '''
+- name: Install "bootstrap" bower package.
+ community.general.bower:
+ name: bootstrap
+
+- name: Install "bootstrap" bower package on version 3.1.1.
+ community.general.bower:
+ name: bootstrap
+ version: '3.1.1'
+
+- name: Remove the "bootstrap" bower package.
+ community.general.bower:
+ name: bootstrap
+ state: absent
+
+- name: Install packages based on bower.json.
+ community.general.bower:
+ path: /app/location
+
+- name: Update packages based on bower.json to their latest version.
+ community.general.bower:
+ path: /app/location
+ state: latest
+
+# install bower locally and run from there
+- npm:
+ path: /app/location
+ name: bower
+ global: no
+- community.general.bower:
+ path: /app/location
+ relative_execpath: node_modules/.bin
+'''
+import json
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Bower(object):
+ def __init__(self, module, **kwargs):
+ self.module = module
+ self.name = kwargs['name']
+ self.offline = kwargs['offline']
+ self.production = kwargs['production']
+ self.path = kwargs['path']
+ self.relative_execpath = kwargs['relative_execpath']
+ self.version = kwargs['version']
+
+ if kwargs['version']:
+ self.name_version = self.name + '#' + self.version
+ else:
+ self.name_version = self.name
+
+ def _exec(self, args, run_in_check_mode=False, check_rc=True):
+ if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
+ cmd = []
+
+ if self.relative_execpath:
+ cmd.append(os.path.join(self.path, self.relative_execpath, "bower"))
+ if not os.path.isfile(cmd[-1]):
+ self.module.fail_json(msg="bower not found at relative path %s" % self.relative_execpath)
+ else:
+ cmd.append("bower")
+
+ cmd.extend(args)
+ cmd.extend(['--config.interactive=false', '--allow-root'])
+
+ if self.name:
+ cmd.append(self.name_version)
+
+ if self.offline:
+ cmd.append('--offline')
+
+ if self.production:
+ cmd.append('--production')
+
+ # If path is specified, cd into that path and run the command.
+ cwd = None
+ if self.path:
+ if not os.path.exists(self.path):
+ os.makedirs(self.path)
+ if not os.path.isdir(self.path):
+ self.module.fail_json(msg="path %s is not a directory" % self.path)
+ cwd = self.path
+
+ rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd)
+ return out
+ return ''
+
+ def list(self):
+ cmd = ['list', '--json']
+
+ installed = list()
+ missing = list()
+ outdated = list()
+ data = json.loads(self._exec(cmd, True, False))
+ if 'dependencies' in data:
+ for dep in data['dependencies']:
+ dep_data = data['dependencies'][dep]
+ if dep_data.get('missing', False):
+ missing.append(dep)
+ elif ('version' in dep_data['pkgMeta'] and
+ 'update' in dep_data and
+ dep_data['pkgMeta']['version'] != dep_data['update']['latest']):
+ outdated.append(dep)
+ elif dep_data.get('incompatible', False):
+ outdated.append(dep)
+ else:
+ installed.append(dep)
+ # Named dependency not installed
+ else:
+ missing.append(self.name)
+
+ return installed, missing, outdated
+
+ def install(self):
+ return self._exec(['install'])
+
+ def update(self):
+ return self._exec(['update'])
+
+ def uninstall(self):
+ return self._exec(['uninstall'])
+
+
+def main():
+ arg_spec = dict(
+ name=dict(default=None),
+ offline=dict(default=False, type='bool'),
+ production=dict(default=False, type='bool'),
+ path=dict(required=True, type='path'),
+ relative_execpath=dict(default=None, required=False, type='path'),
+ state=dict(default='present', choices=['present', 'absent', 'latest', ]),
+ version=dict(default=None),
+ )
+ module = AnsibleModule(
+ argument_spec=arg_spec
+ )
+
+ name = module.params['name']
+ offline = module.params['offline']
+ production = module.params['production']
+ path = module.params['path']
+ relative_execpath = module.params['relative_execpath']
+ state = module.params['state']
+ version = module.params['version']
+
+ if state == 'absent' and not name:
+ module.fail_json(msg='uninstalling a package is only available for named packages')
+
+ bower = Bower(module, name=name, offline=offline, production=production, path=path, relative_execpath=relative_execpath, version=version)
+
+ changed = False
+ if state == 'present':
+ installed, missing, outdated = bower.list()
+ if missing:
+ changed = True
+ bower.install()
+ elif state == 'latest':
+ installed, missing, outdated = bower.list()
+ if missing or outdated:
+ changed = True
+ bower.update()
+ else: # Absent
+ installed, missing, outdated = bower.list()
+ if name in installed:
+ changed = True
+ bower.uninstall()
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/bundler.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/bundler.py
new file mode 100644
index 00000000..8be17d6f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/bundler.py
@@ -0,0 +1,202 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Tim Hoiberg <tim.hoiberg@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: bundler
+short_description: Manage Ruby Gem dependencies with Bundler
+description:
+ - Manage installation and Gem version dependencies for Ruby using the Bundler gem
+options:
+ executable:
+ type: str
+ description:
+ - The path to the bundler executable
+ state:
+ type: str
+ description:
+ - The desired state of the Gem bundle. C(latest) updates gems to the most recent, acceptable version
+ choices: [present, latest]
+ default: present
+ chdir:
+ type: path
+ description:
+ - The directory to execute the bundler commands from. This directory
+ needs to contain a valid Gemfile or .bundle/ directory
+ - If not specified, it will default to the temporary working directory
+ exclude_groups:
+ type: list
+ description:
+ - A list of Gemfile groups to exclude during operations. This only
+ applies when state is C(present). Bundler considers this
+ a 'remembered' property for the Gemfile and will automatically exclude
+ groups in future operations even if C(exclude_groups) is not set
+ clean:
+ description:
+ - Only applies if state is C(present). If set removes any gems on the
+ target host that are not in the gemfile
+ type: bool
+ default: 'no'
+ gemfile:
+ type: path
+ description:
+ - Only applies if state is C(present). The path to the gemfile to use to install gems.
+ - If not specified it will default to the Gemfile in current directory
+ local:
+ description:
+ - If set only installs gems from the cache on the target host
+ type: bool
+ default: 'no'
+ deployment_mode:
+ description:
+ - Only applies if state is C(present). If set it will install gems in
+ ./vendor/bundle instead of the default location. Requires a Gemfile.lock
+ file to have been created prior
+ type: bool
+ default: 'no'
+ user_install:
+ description:
+ - Only applies if state is C(present). Installs gems in the local user's cache or for all users
+ type: bool
+ default: 'yes'
+ gem_path:
+ type: path
+ description:
+ - Only applies if state is C(present). Specifies the directory to
+ install the gems into. If C(chdir) is set then this path is relative to
+ C(chdir)
+ - If not specified the default RubyGems gem paths will be used.
+ binstub_directory:
+ type: path
+ description:
+ - Only applies if state is C(present). Specifies the directory to
+ install any gem bins files to. When executed the bin files will run
+ within the context of the Gemfile and fail if any required gem
+ dependencies are not installed. If C(chdir) is set then this path is
+ relative to C(chdir)
+ extra_args:
+ type: str
+ description:
+ - A space separated string of additional commands that can be applied to
+ the Bundler command. Refer to the Bundler documentation for more
+ information
+author: "Tim Hoiberg (@thoiberg)"
+'''
+
+EXAMPLES = '''
+- name: Install gems from a Gemfile in the current directory
+ community.general.bundler:
+ state: present
+ executable: ~/.rvm/gems/2.1.5/bin/bundle
+
+- name: Exclude the production group from installing
+ community.general.bundler:
+ state: present
+ exclude_groups: production
+
+- name: Install gems into ./vendor/bundle
+ community.general.bundler:
+ state: present
+ deployment_mode: yes
+
+- name: Install gems using a Gemfile in another directory
+ community.general.bundler:
+ state: present
+ gemfile: ../rails_project/Gemfile
+
+- name: Update Gemfile in another directory
+ community.general.bundler:
+ state: latest
+ chdir: ~/rails_project
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def get_bundler_executable(module):
+ if module.params.get('executable'):
+ result = module.params.get('executable').split(' ')
+ else:
+ result = [module.get_bin_path('bundle', True)]
+ return result
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ executable=dict(default=None, required=False),
+ state=dict(default='present', required=False, choices=['present', 'latest']),
+ chdir=dict(default=None, required=False, type='path'),
+ exclude_groups=dict(default=None, required=False, type='list'),
+ clean=dict(default=False, required=False, type='bool'),
+ gemfile=dict(default=None, required=False, type='path'),
+ local=dict(default=False, required=False, type='bool'),
+ deployment_mode=dict(default=False, required=False, type='bool'),
+ user_install=dict(default=True, required=False, type='bool'),
+ gem_path=dict(default=None, required=False, type='path'),
+ binstub_directory=dict(default=None, required=False, type='path'),
+ extra_args=dict(default=None, required=False),
+ ),
+ supports_check_mode=True
+ )
+
+ state = module.params.get('state')
+ chdir = module.params.get('chdir')
+ exclude_groups = module.params.get('exclude_groups')
+ clean = module.params.get('clean')
+ gemfile = module.params.get('gemfile')
+ local = module.params.get('local')
+ deployment_mode = module.params.get('deployment_mode')
+ user_install = module.params.get('user_install')
+ gem_path = module.params.get('gem_path')
+ binstub_directory = module.params.get('binstub_directory')
+ extra_args = module.params.get('extra_args')
+
+ cmd = get_bundler_executable(module)
+
+ if module.check_mode:
+ cmd.append('check')
+ rc, out, err = module.run_command(cmd, cwd=chdir, check_rc=False)
+
+ module.exit_json(changed=rc != 0, state=state, stdout=out, stderr=err)
+
+ if state == 'present':
+ cmd.append('install')
+ if exclude_groups:
+ cmd.extend(['--without', ':'.join(exclude_groups)])
+ if clean:
+ cmd.append('--clean')
+ if gemfile:
+ cmd.extend(['--gemfile', gemfile])
+ if local:
+ cmd.append('--local')
+ if deployment_mode:
+ cmd.append('--deployment')
+ if not user_install:
+ cmd.append('--system')
+ if gem_path:
+ cmd.extend(['--path', gem_path])
+ if binstub_directory:
+ cmd.extend(['--binstubs', binstub_directory])
+ else:
+ cmd.append('update')
+ if local:
+ cmd.append('--local')
+
+ if extra_args:
+ cmd.extend(extra_args.split(' '))
+
+ rc, out, err = module.run_command(cmd, cwd=chdir, check_rc=True)
+
+ module.exit_json(changed='Installing' in out, state=state, stdout=out, stderr=err)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/bzr.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/bzr.py
new file mode 100644
index 00000000..7af3f279
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/bzr.py
@@ -0,0 +1,190 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, André Paramés <git@andreparames.com>
+# Based on the Git module by Michael DeHaan <michael.dehaan@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: bzr
+author:
+- André Paramés (@andreparames)
+short_description: Deploy software (or files) from bzr branches
+description:
+ - Manage I(bzr) branches to deploy files or software.
+options:
+ name:
+ description:
+ - SSH or HTTP protocol address of the parent branch.
+ aliases: [ parent ]
+ required: yes
+ dest:
+ description:
+ - Absolute path of where the branch should be cloned to.
+ required: yes
+ version:
+ description:
+ - What version of the branch to clone. This can be the
+ bzr revno or revid.
+ default: head
+ force:
+ description:
+ - If C(yes), any modified files in the working
+ tree will be discarded. Before 1.9 the default
+ value was C(yes).
+ type: bool
+ default: 'no'
+ executable:
+ description:
+ - Path to bzr executable to use. If not supplied,
+ the normal mechanism for resolving binary paths will be used.
+'''
+
+EXAMPLES = '''
+- name: Checkout
+ community.general.bzr:
+ name: bzr+ssh://foosball.example.org/path/to/branch
+ dest: /srv/checkout
+ version: 22
+'''
+
+import os
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Bzr(object):
+ def __init__(self, module, parent, dest, version, bzr_path):
+ self.module = module
+ self.parent = parent
+ self.dest = dest
+ self.version = version
+ self.bzr_path = bzr_path
+
+ def _command(self, args_list, cwd=None, **kwargs):
+ (rc, out, err) = self.module.run_command([self.bzr_path] + args_list, cwd=cwd, **kwargs)
+ return (rc, out, err)
+
+ def get_version(self):
+ '''samples the version of the bzr branch'''
+
+ cmd = "%s revno" % self.bzr_path
+ rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest)
+ revno = stdout.strip()
+ return revno
+
+ def clone(self):
+ '''makes a new bzr branch if it does not already exist'''
+ dest_dirname = os.path.dirname(self.dest)
+ try:
+ os.makedirs(dest_dirname)
+ except Exception:
+ pass
+ if self.version.lower() != 'head':
+ args_list = ["branch", "-r", self.version, self.parent, self.dest]
+ else:
+ args_list = ["branch", self.parent, self.dest]
+ return self._command(args_list, check_rc=True, cwd=dest_dirname)
+
+ def has_local_mods(self):
+
+ cmd = "%s status -S" % self.bzr_path
+ rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest)
+ lines = stdout.splitlines()
+
+ lines = filter(lambda c: not re.search('^\\?\\?.*$', c), lines)
+ return len(lines) > 0
+
+ def reset(self, force):
+ '''
+ Resets the index and working tree to head.
+ Discards any changes to tracked files in the working
+ tree since that commit.
+ '''
+ if not force and self.has_local_mods():
+ self.module.fail_json(msg="Local modifications exist in branch (force=no).")
+ return self._command(["revert"], check_rc=True, cwd=self.dest)
+
+ def fetch(self):
+ '''updates branch from remote sources'''
+ if self.version.lower() != 'head':
+ (rc, out, err) = self._command(["pull", "-r", self.version], cwd=self.dest)
+ else:
+ (rc, out, err) = self._command(["pull"], cwd=self.dest)
+ if rc != 0:
+ self.module.fail_json(msg="Failed to pull")
+ return (rc, out, err)
+
+ def switch_version(self):
+ '''once pulled, switch to a particular revno or revid'''
+ if self.version.lower() != 'head':
+ args_list = ["revert", "-r", self.version]
+ else:
+ args_list = ["revert"]
+ return self._command(args_list, check_rc=True, cwd=self.dest)
+
+
+# ===========================================
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ dest=dict(type='path', required=True),
+ name=dict(type='str', required=True, aliases=['parent']),
+ version=dict(type='str', default='head'),
+ force=dict(type='bool', default=False),
+ executable=dict(type='str'),
+ )
+ )
+
+ dest = module.params['dest']
+ parent = module.params['name']
+ version = module.params['version']
+ force = module.params['force']
+ bzr_path = module.params['executable'] or module.get_bin_path('bzr', True)
+
+ bzrconfig = os.path.join(dest, '.bzr', 'branch', 'branch.conf')
+
+ rc, out, err = (0, None, None)
+
+ bzr = Bzr(module, parent, dest, version, bzr_path)
+
+ # if there is no bzr configuration, do a branch operation
+ # else pull and switch the version
+ before = None
+ local_mods = False
+ if not os.path.exists(bzrconfig):
+ (rc, out, err) = bzr.clone()
+
+ else:
+ # else do a pull
+ local_mods = bzr.has_local_mods()
+ before = bzr.get_version()
+ (rc, out, err) = bzr.reset(force)
+ if rc != 0:
+ module.fail_json(msg=err)
+ (rc, out, err) = bzr.fetch()
+ if rc != 0:
+ module.fail_json(msg=err)
+
+ # switch to version specified regardless of whether
+ # we cloned or pulled
+ (rc, out, err) = bzr.switch_version()
+
+ # determine if we changed anything
+ after = bzr.get_version()
+ changed = False
+
+ if before != after or local_mods:
+ changed = True
+
+ module.exit_json(changed=changed, before=before, after=after)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/campfire.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/campfire.py
new file mode 100644
index 00000000..c6848238
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/campfire.py
@@ -0,0 +1,154 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: campfire
+short_description: Send a message to Campfire
+description:
+ - Send a message to Campfire.
+ - Messages with newlines will result in a "Paste" message being sent.
+options:
+ subscription:
+ type: str
+ description:
+ - The subscription name to use.
+ required: true
+ token:
+ type: str
+ description:
+ - API token.
+ required: true
+ room:
+ type: str
+ description:
+ - Room number to which the message should be sent.
+ required: true
+ msg:
+ type: str
+ description:
+ - The message body.
+ required: true
+ notify:
+ type: str
+ description:
+ - Send a notification sound before the message.
+ required: false
+ choices: ["56k", "bell", "bezos", "bueller", "clowntown",
+ "cottoneyejoe", "crickets", "dadgummit", "dangerzone",
+ "danielsan", "deeper", "drama", "greatjob", "greyjoy",
+ "guarantee", "heygirl", "horn", "horror",
+ "inconceivable", "live", "loggins", "makeitso", "noooo",
+ "nyan", "ohmy", "ohyeah", "pushit", "rimshot",
+ "rollout", "rumble", "sax", "secret", "sexyback",
+ "story", "tada", "tmyk", "trololo", "trombone", "unix",
+ "vuvuzela", "what", "whoomp", "yeah", "yodel"]
+
+# informational: requirements for nodes
+requirements: [ ]
+author: "Adam Garside (@fabulops)"
+'''
+
+EXAMPLES = '''
+- name: Send a message to Campfire
+ community.general.campfire:
+ subscription: foo
+ token: 12345
+ room: 123
+ msg: Task completed.
+
+- name: Send a message to Campfire
+ community.general.campfire:
+ subscription: foo
+ token: 12345
+ room: 123
+ notify: loggins
+ msg: Task completed ... with feeling.
+'''
+
+try:
+ from html import escape as html_escape
+except ImportError:
+ # Python-3.2 or later
+ import cgi
+
+ def html_escape(text, quote=True):
+ return cgi.escape(text, quote)
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ subscription=dict(required=True),
+ token=dict(required=True, no_log=True),
+ room=dict(required=True),
+ msg=dict(required=True),
+ notify=dict(required=False,
+ choices=["56k", "bell", "bezos", "bueller",
+ "clowntown", "cottoneyejoe",
+ "crickets", "dadgummit", "dangerzone",
+ "danielsan", "deeper", "drama",
+ "greatjob", "greyjoy", "guarantee",
+ "heygirl", "horn", "horror",
+ "inconceivable", "live", "loggins",
+ "makeitso", "noooo", "nyan", "ohmy",
+ "ohyeah", "pushit", "rimshot",
+ "rollout", "rumble", "sax", "secret",
+ "sexyback", "story", "tada", "tmyk",
+ "trololo", "trombone", "unix",
+ "vuvuzela", "what", "whoomp", "yeah",
+ "yodel"]),
+ ),
+ supports_check_mode=False
+ )
+
+ subscription = module.params["subscription"]
+ token = module.params["token"]
+ room = module.params["room"]
+ msg = module.params["msg"]
+ notify = module.params["notify"]
+
+ URI = "https://%s.campfirenow.com" % subscription
+ NSTR = "<message><type>SoundMessage</type><body>%s</body></message>"
+ MSTR = "<message><body>%s</body></message>"
+ AGENT = "Ansible/1.2"
+
+ # Hack to add basic auth username and password the way fetch_url expects
+ module.params['url_username'] = token
+ module.params['url_password'] = 'X'
+
+ target_url = '%s/room/%s/speak.xml' % (URI, room)
+ headers = {'Content-Type': 'application/xml',
+ 'User-agent': AGENT}
+
+ # Send some audible notification if requested
+ if notify:
+ response, info = fetch_url(module, target_url, data=NSTR % html_escape(notify), headers=headers)
+ if info['status'] not in [200, 201]:
+ module.fail_json(msg="unable to send msg: '%s', campfire api"
+ " returned error code: '%s'" %
+ (notify, info['status']))
+
+ # Send the message
+ response, info = fetch_url(module, target_url, data=MSTR % html_escape(msg), headers=headers)
+ if info['status'] not in [200, 201]:
+ module.fail_json(msg="unable to send msg: '%s', campfire api"
+ " returned error code: '%s'" %
+ (msg, info['status']))
+
+ module.exit_json(changed=True, room=room, msg=msg, notify=notify)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/capabilities.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/capabilities.py
new file mode 100644
index 00000000..ac6dde67
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/capabilities.py
@@ -0,0 +1,180 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Nate Coraor <nate@bx.psu.edu>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: capabilities
+short_description: Manage Linux capabilities
+description:
+ - This module manipulates files privileges using the Linux capabilities(7) system.
+options:
+ path:
+ description:
+ - Specifies the path to the file to be managed.
+ type: str
+ required: yes
+ aliases: [ key ]
+ capability:
+ description:
+ - Desired capability to set (with operator and flags, if state is C(present)) or remove (if state is C(absent))
+ type: str
+ required: yes
+ aliases: [ cap ]
+ state:
+ description:
+ - Whether the entry should be present or absent in the file's capabilities.
+ type: str
+ choices: [ absent, present ]
+ default: present
+notes:
+ - The capabilities system will automatically transform operators and flags into the effective set,
+ so for example, C(cap_foo=ep) will probably become C(cap_foo+ep).
+ - This module does not attempt to determine the final operator and flags to compare,
+ so you will want to ensure that your capabilities argument matches the final capabilities.
+author:
+- Nate Coraor (@natefoo)
+'''
+
+EXAMPLES = r'''
+- name: Set cap_sys_chroot+ep on /foo
+ community.general.capabilities:
+ path: /foo
+ capability: cap_sys_chroot+ep
+ state: present
+
+- name: Remove cap_net_bind_service from /bar
+ community.general.capabilities:
+ path: /bar
+ capability: cap_net_bind_service
+ state: absent
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+OPS = ('=', '-', '+')
+
+
+class CapabilitiesModule(object):
+ platform = 'Linux'
+ distribution = None
+
+ def __init__(self, module):
+ self.module = module
+ self.path = module.params['path'].strip()
+ self.capability = module.params['capability'].strip().lower()
+ self.state = module.params['state']
+ self.getcap_cmd = module.get_bin_path('getcap', required=True)
+ self.setcap_cmd = module.get_bin_path('setcap', required=True)
+ self.capability_tup = self._parse_cap(self.capability, op_required=self.state == 'present')
+
+ self.run()
+
+ def run(self):
+
+ current = self.getcap(self.path)
+ caps = [cap[0] for cap in current]
+
+ if self.state == 'present' and self.capability_tup not in current:
+ # need to add capability
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, msg='capabilities changed')
+ else:
+ # remove from current cap list if it's already set (but op/flags differ)
+ current = list(filter(lambda x: x[0] != self.capability_tup[0], current))
+ # add new cap with correct op/flags
+ current.append(self.capability_tup)
+ self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current))
+ elif self.state == 'absent' and self.capability_tup[0] in caps:
+ # need to remove capability
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, msg='capabilities changed')
+ else:
+ # remove from current cap list and then set current list
+ current = filter(lambda x: x[0] != self.capability_tup[0], current)
+ self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current))
+ self.module.exit_json(changed=False, state=self.state)
+
+ def getcap(self, path):
+ rval = []
+ cmd = "%s -v %s" % (self.getcap_cmd, path)
+ rc, stdout, stderr = self.module.run_command(cmd)
+ # If file xattrs are set but no caps are set the output will be:
+ # '/foo ='
+ # If file xattrs are unset the output will be:
+ # '/foo'
+ # If the file does not exist, the stderr will be (with rc == 0...):
+ # '/foo (No such file or directory)'
+ if rc != 0 or stderr != "":
+ self.module.fail_json(msg="Unable to get capabilities of %s" % path, stdout=stdout.strip(), stderr=stderr)
+ if stdout.strip() != path:
+ if ' =' in stdout:
+ # process output of an older version of libcap
+ caps = stdout.split(' =')[1].strip().split()
+ else:
+ # otherwise, we have a newer version here
+ # see original commit message of cap/v0.2.40-18-g177cd41 in libcap.git
+ caps = stdout.split()[1].strip().split()
+ for cap in caps:
+ cap = cap.lower()
+ # getcap condenses capabilities with the same op/flags into a
+ # comma-separated list, so we have to parse that
+ if ',' in cap:
+ cap_group = cap.split(',')
+ cap_group[-1], op, flags = self._parse_cap(cap_group[-1])
+ for subcap in cap_group:
+ rval.append((subcap, op, flags))
+ else:
+ rval.append(self._parse_cap(cap))
+ return rval
+
+ def setcap(self, path, caps):
+ caps = ' '.join([''.join(cap) for cap in caps])
+ cmd = "%s '%s' %s" % (self.setcap_cmd, caps, path)
+ rc, stdout, stderr = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Unable to set capabilities of %s" % path, stdout=stdout, stderr=stderr)
+ else:
+ return stdout
+
+ def _parse_cap(self, cap, op_required=True):
+ opind = -1
+ try:
+ i = 0
+ while opind == -1:
+ opind = cap.find(OPS[i])
+ i += 1
+ except Exception:
+ if op_required:
+ self.module.fail_json(msg="Couldn't find operator (one of: %s)" % str(OPS))
+ else:
+ return (cap, None, None)
+ op = cap[opind]
+ cap, flags = cap.split(op)
+ return (cap, op, flags)
+
+
+# ==============================================================
+# main
+
+def main():
+ # defining module
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='str', required=True, aliases=['key']),
+ capability=dict(type='str', required=True, aliases=['cap']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ),
+ supports_check_mode=True,
+ )
+
+ CapabilitiesModule(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/catapult.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/catapult.py
new file mode 100644
index 00000000..13833620
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/catapult.py
@@ -0,0 +1,154 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Jonathan Mainguy <jon@soh.re>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+# basis of code taken from the ansible twillio and nexmo modules
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: catapult
+short_description: Send a sms / mms using the catapult bandwidth api
+description:
+ - Allows notifications to be sent using sms / mms via the catapult bandwidth api.
+options:
+ src:
+ type: str
+ description:
+ - One of your catapult telephone numbers the message should come from (must be in E.164 format, like C(+19195551212)).
+ required: true
+ dest:
+ type: list
+ elements: str
+ description:
+ - The phone number or numbers the message should be sent to (must be in E.164 format, like C(+19195551212)).
+ required: true
+ msg:
+ type: str
+ description:
+ - The contents of the text message (must be 2048 characters or less).
+ required: true
+ media:
+ type: str
+ description:
+ - For MMS messages, a media url to the location of the media to be sent with the message.
+ user_id:
+ type: str
+ description:
+ - User Id from Api account page.
+ required: true
+ api_token:
+ type: str
+ description:
+ - Api Token from Api account page.
+ required: true
+ api_secret:
+ type: str
+ description:
+ - Api Secret from Api account page.
+ required: true
+
+author: "Jonathan Mainguy (@Jmainguy)"
+notes:
+ - Will return changed even if the media url is wrong.
+ - Will return changed if the destination number is invalid.
+
+'''
+
+EXAMPLES = '''
+- name: Send a mms to multiple users
+ community.general.catapult:
+ src: "+15035555555"
+ dest:
+ - "+12525089000"
+ - "+12018994225"
+ media: "http://example.com/foobar.jpg"
+ msg: "Task is complete"
+ user_id: "{{ user_id }}"
+ api_token: "{{ api_token }}"
+ api_secret: "{{ api_secret }}"
+
+- name: Send a sms to a single user
+ community.general.catapult:
+ src: "+15035555555"
+ dest: "+12018994225"
+ msg: "Consider yourself notified"
+ user_id: "{{ user_id }}"
+ api_token: "{{ api_token }}"
+ api_secret: "{{ api_secret }}"
+
+'''
+
+RETURN = '''
+changed:
+ description: Whether the api accepted the message.
+ returned: always
+ type: bool
+ sample: True
+'''
+
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def send(module, src, dest, msg, media, user_id, api_token, api_secret):
+ """
+ Send the message
+ """
+ AGENT = "Ansible"
+ URI = "https://api.catapult.inetwork.com/v1/users/%s/messages" % user_id
+ data = {'from': src, 'to': dest, 'text': msg}
+ if media:
+ data['media'] = media
+
+ headers = {'User-Agent': AGENT, 'Content-type': 'application/json'}
+
+ # Hack module params to have the Basic auth params that fetch_url expects
+ module.params['url_username'] = api_token.replace('\n', '')
+ module.params['url_password'] = api_secret.replace('\n', '')
+
+ return fetch_url(module, URI, data=json.dumps(data), headers=headers, method="post")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ src=dict(required=True),
+ dest=dict(required=True, type='list', elements='str'),
+ msg=dict(required=True),
+ user_id=dict(required=True),
+ api_token=dict(required=True, no_log=True),
+ api_secret=dict(required=True, no_log=True),
+ media=dict(default=None, required=False),
+ ),
+ )
+
+ src = module.params['src']
+ dest = module.params['dest']
+ msg = module.params['msg']
+ media = module.params['media']
+ user_id = module.params['user_id']
+ api_token = module.params['api_token']
+ api_secret = module.params['api_secret']
+
+ for number in dest:
+ rc, info = send(module, src, number, msg, media, user_id, api_token, api_secret)
+ if info["status"] != 201:
+ body = json.loads(info["body"])
+ fail_msg = body["message"]
+ module.fail_json(msg=fail_msg)
+
+ changed = True
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/circonus_annotation.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/circonus_annotation.py
new file mode 100644
index 00000000..27d23168
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/circonus_annotation.py
@@ -0,0 +1,234 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2014-2015, Epic Games, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: circonus_annotation
+short_description: create an annotation in circonus
+description:
+ - Create an annotation event with a given category, title and description. Optionally start, end or durations can be provided
+author: "Nick Harring (@NickatEpic)"
+requirements:
+ - requests (either >= 2.0.0 for Python 3, or >= 1.0.0 for Python 2)
+notes:
+ - Check mode isn't supported.
+options:
+ api_key:
+ type: str
+ description:
+ - Circonus API key
+ required: true
+ category:
+ type: str
+ description:
+ - Annotation Category
+ required: true
+ description:
+ type: str
+ description:
+ - Description of annotation
+ required: true
+ title:
+ type: str
+ description:
+ - Title of annotation
+ required: true
+ start:
+ type: int
+ description:
+ - Unix timestamp of event start
+ - If not specified, it defaults to I(now).
+ stop:
+ type: int
+ description:
+ - Unix timestamp of event end
+ - If not specified, it defaults to I(now) + I(duration).
+ duration:
+ type: int
+ description:
+ - Duration in seconds of annotation
+ default: 0
+'''
+EXAMPLES = '''
+- name: Create a simple annotation event with a source, defaults to start and end time of now
+ community.general.circonus_annotation:
+ api_key: XXXXXXXXXXXXXXXXX
+ title: App Config Change
+ description: This is a detailed description of the config change
+ category: This category groups like annotations
+
+- name: Create an annotation with a duration of 5 minutes and a default start time of now
+ community.general.circonus_annotation:
+ api_key: XXXXXXXXXXXXXXXXX
+ title: App Config Change
+ description: This is a detailed description of the config change
+ category: This category groups like annotations
+ duration: 300
+
+- name: Create an annotation with a start_time and end_time
+ community.general.circonus_annotation:
+ api_key: XXXXXXXXXXXXXXXXX
+ title: App Config Change
+ description: This is a detailed description of the config change
+ category: This category groups like annotations
+ start_time: 1395940006
+ end_time: 1395954407
+'''
+
+RETURN = '''
+annotation:
+ description: details about the created annotation
+ returned: success
+ type: complex
+ contains:
+ _cid:
+ description: annotation identifier
+ returned: success
+ type: str
+ sample: /annotation/100000
+ _created:
+ description: creation timestamp
+ returned: success
+ type: int
+ sample: 1502236928
+ _last_modified:
+ description: last modification timestamp
+ returned: success
+ type: int
+ sample: 1502236928
+ _last_modified_by:
+ description: last modified by
+ returned: success
+ type: str
+ sample: /user/1000
+ category:
+ description: category of the created annotation
+ returned: success
+ type: str
+ sample: alerts
+ title:
+ description: title of the created annotation
+ returned: success
+ type: str
+ sample: WARNING
+ description:
+ description: description of the created annotation
+ returned: success
+ type: str
+ sample: Host is down.
+ start:
+ description: timestamp, since annotation applies
+ returned: success
+ type: int
+ sample: Host is down.
+ stop:
+ description: timestamp, since annotation ends
+ returned: success
+ type: str
+ sample: Host is down.
+ rel_metrics:
+ description: Array of metrics related to this annotation, each metrics is a string.
+ returned: success
+ type: list
+ sample:
+ - 54321_kbps
+'''
+import json
+import time
+import traceback
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+ HAS_REQUESTS = True
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ HAS_REQUESTS = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.six import PY3
+from ansible.module_utils._text import to_native
+
+
+def check_requests_dep(module):
+ """Check if an adequate requests version is available"""
+ if not HAS_REQUESTS:
+ module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ else:
+ required_version = '2.0.0' if PY3 else '1.0.0'
+ if LooseVersion(requests.__version__) < LooseVersion(required_version):
+ module.fail_json(msg="'requests' library version should be >= %s, found: %s." % (required_version, requests.__version__))
+
+
+def post_annotation(annotation, api_key):
+ ''' Takes annotation dict and api_key string'''
+ base_url = 'https://api.circonus.com/v2'
+ anootate_post_endpoint = '/annotation'
+ resp = requests.post(base_url + anootate_post_endpoint,
+ headers=build_headers(api_key), data=json.dumps(annotation))
+ resp.raise_for_status()
+ return resp
+
+
+def create_annotation(module):
+ ''' Takes ansible module object '''
+ annotation = {}
+ duration = module.params['duration']
+ if module.params['start'] is not None:
+ start = module.params['start']
+ else:
+ start = int(time.time())
+ if module.params['stop'] is not None:
+ stop = module.params['stop']
+ else:
+ stop = int(time.time()) + duration
+ annotation['start'] = start
+ annotation['stop'] = stop
+ annotation['category'] = module.params['category']
+ annotation['description'] = module.params['description']
+ annotation['title'] = module.params['title']
+ return annotation
+
+
+def build_headers(api_token):
+ '''Takes api token, returns headers with it included.'''
+ headers = {'X-Circonus-App-Name': 'ansible',
+ 'Host': 'api.circonus.com', 'X-Circonus-Auth-Token': api_token,
+ 'Accept': 'application/json'}
+ return headers
+
+
+def main():
+ '''Main function, dispatches logic'''
+ module = AnsibleModule(
+ argument_spec=dict(
+ start=dict(type='int'),
+ stop=dict(type='int'),
+ category=dict(required=True),
+ title=dict(required=True),
+ description=dict(required=True),
+ duration=dict(default=0, type='int'),
+ api_key=dict(required=True, no_log=True)
+ )
+ )
+
+ check_requests_dep(module)
+
+ annotation = create_annotation(module)
+ try:
+ resp = post_annotation(annotation, module.params['api_key'])
+ except requests.exceptions.RequestException as e:
+ module.fail_json(msg='Request Failed', reason=to_native(e), exception=traceback.format_exc())
+ module.exit_json(changed=True, annotation=resp.json())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cisco_spark.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cisco_spark.py
new file mode 100644
index 00000000..4015c185
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cisco_spark.py
@@ -0,0 +1,191 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: cisco_webex
+short_description: Send a message to a Cisco Webex Teams Room or Individual
+description:
+ - Send a message to a Cisco Webex Teams Room or Individual with options to control the formatting.
+author: Drew Rusell (@drew-russell)
+notes:
+ - The C(recipient_id) type must be valid for the supplied C(recipient_id).
+ - Full API documentation can be found at U(https://developer.webex.com/docs/api/basics).
+
+options:
+
+ recipient_type:
+ description:
+ - The request parameter you would like to send the message to.
+ - Messages can be sent to either a room or individual (by ID or E-Mail).
+ required: yes
+ choices: ['roomId', 'toPersonEmail', 'toPersonId']
+ type: str
+
+ recipient_id:
+ description:
+ - The unique identifier associated with the supplied C(recipient_type).
+ required: yes
+ type: str
+
+ msg_type:
+ description:
+ - Specifies how you would like the message formatted.
+ default: text
+ choices: ['text', 'markdown']
+ type: str
+ aliases: ['message_type']
+
+ personal_token:
+ description:
+ - Your personal access token required to validate the Webex Teams API.
+ required: yes
+ aliases: ['token']
+ type: str
+
+ msg:
+ description:
+ - The message you would like to send.
+ required: yes
+ type: str
+ aliases: ['message']
+'''
+
+EXAMPLES = """
+# Note: The following examples assume a variable file has been imported
+# that contains the appropriate information.
+
+- name: Cisco Webex Teams - Markdown Message to a Room
+ community.general.cisco_webex:
+ recipient_type: roomId
+ recipient_id: "{{ room_id }}"
+ msg_type: markdown
+ personal_token: "{{ token }}"
+ msg: "**Cisco Webex Teams Ansible Module - Room Message in Markdown**"
+
+- name: Cisco Webex Teams - Text Message to a Room
+ community.general.cisco_webex:
+ recipient_type: roomId
+ recipient_id: "{{ room_id }}"
+ msg_type: text
+ personal_token: "{{ token }}"
+ msg: "Cisco Webex Teams Ansible Module - Room Message in Text"
+
+- name: Cisco Webex Teams - Text Message by an Individuals ID
+ community.general.cisco_webex:
+ recipient_type: toPersonId
+ recipient_id: "{{ person_id}}"
+ msg_type: text
+ personal_token: "{{ token }}"
+ msg: "Cisco Webex Teams Ansible Module - Text Message to Individual by ID"
+
+- name: Cisco Webex Teams - Text Message by an Individuals E-Mail Address
+ community.general.cisco_webex:
+ recipient_type: toPersonEmail
+ recipient_id: "{{ person_email }}"
+ msg_type: text
+ personal_token: "{{ token }}"
+ msg: "Cisco Webex Teams Ansible Module - Text Message to Individual by E-Mail"
+
+"""
+
+RETURN = """
+status_code:
+ description:
+ - The Response Code returned by the Webex Teams API.
+ - Full Response Code explanations can be found at U(https://developer.webex.com/docs/api/basics).
+ returned: always
+ type: int
+ sample: 200
+
+message:
+ description:
+ - The Response Message returned by the Webex Teams API.
+ - Full Response Code explanations can be found at U(https://developer.webex.com/docs/api/basics).
+ returned: always
+ type: str
+ sample: OK (585 bytes)
+"""
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def webex_msg(module):
+ """When check mode is specified, establish a read only connection, that does not return any user specific
+ data, to validate connectivity. In regular mode, send a message to a Cisco Webex Teams Room or Individual"""
+
+ # Ansible Specific Variables
+ results = {}
+ ansible = module.params
+
+ headers = {
+ 'Authorization': 'Bearer {0}'.format(ansible['personal_token']),
+ 'content-type': 'application/json'
+ }
+
+ if module.check_mode:
+ url = "https://webexapis.com/v1/people/me"
+ payload = None
+
+ else:
+ url = "https://webexapis.com/v1/messages"
+
+ payload = {
+ ansible['recipient_type']: ansible['recipient_id'],
+ ansible['msg_type']: ansible['msg']
+ }
+
+ payload = module.jsonify(payload)
+
+ response, info = fetch_url(module, url, data=payload, headers=headers)
+
+ status_code = info['status']
+ msg = info['msg']
+
+ # Module will fail if the response is not 200
+ if status_code != 200:
+ results['failed'] = True
+ results['status_code'] = status_code
+ results['message'] = msg
+ else:
+ results['failed'] = False
+ results['status_code'] = status_code
+
+ if module.check_mode:
+ results['message'] = 'Authentication Successful.'
+ else:
+ results['message'] = msg
+
+ return results
+
+
+def main():
+ '''Ansible main. '''
+ module = AnsibleModule(
+ argument_spec=dict(
+ recipient_type=dict(required=True, choices=['roomId', 'toPersonEmail', 'toPersonId']),
+ recipient_id=dict(required=True, no_log=True),
+ msg_type=dict(required=False, default='text', aliases=['message_type'], choices=['text', 'markdown']),
+ personal_token=dict(required=True, no_log=True, aliases=['token']),
+ msg=dict(required=True, aliases=['message'],
+ deprecated_aliases=[dict(name='message', version='3.0.0',
+ collection_name='community.general')]), # was Ansible 2.14
+ ),
+
+ supports_check_mode=True
+ )
+
+ results = webex_msg(module)
+
+ module.exit_json(**results)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cisco_webex.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cisco_webex.py
new file mode 100644
index 00000000..4015c185
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cisco_webex.py
@@ -0,0 +1,191 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: cisco_webex
+short_description: Send a message to a Cisco Webex Teams Room or Individual
+description:
+ - Send a message to a Cisco Webex Teams Room or Individual with options to control the formatting.
+author: Drew Rusell (@drew-russell)
+notes:
+ - The C(recipient_id) type must be valid for the supplied C(recipient_id).
+ - Full API documentation can be found at U(https://developer.webex.com/docs/api/basics).
+
+options:
+
+ recipient_type:
+ description:
+ - The request parameter you would like to send the message to.
+ - Messages can be sent to either a room or individual (by ID or E-Mail).
+ required: yes
+ choices: ['roomId', 'toPersonEmail', 'toPersonId']
+ type: str
+
+ recipient_id:
+ description:
+ - The unique identifier associated with the supplied C(recipient_type).
+ required: yes
+ type: str
+
+ msg_type:
+ description:
+ - Specifies how you would like the message formatted.
+ default: text
+ choices: ['text', 'markdown']
+ type: str
+ aliases: ['message_type']
+
+ personal_token:
+ description:
+ - Your personal access token required to validate the Webex Teams API.
+ required: yes
+ aliases: ['token']
+ type: str
+
+ msg:
+ description:
+ - The message you would like to send.
+ required: yes
+ type: str
+ aliases: ['message']
+'''
+
+EXAMPLES = """
+# Note: The following examples assume a variable file has been imported
+# that contains the appropriate information.
+
+- name: Cisco Webex Teams - Markdown Message to a Room
+ community.general.cisco_webex:
+ recipient_type: roomId
+ recipient_id: "{{ room_id }}"
+ msg_type: markdown
+ personal_token: "{{ token }}"
+ msg: "**Cisco Webex Teams Ansible Module - Room Message in Markdown**"
+
+- name: Cisco Webex Teams - Text Message to a Room
+ community.general.cisco_webex:
+ recipient_type: roomId
+ recipient_id: "{{ room_id }}"
+ msg_type: text
+ personal_token: "{{ token }}"
+ msg: "Cisco Webex Teams Ansible Module - Room Message in Text"
+
+- name: Cisco Webex Teams - Text Message by an Individuals ID
+ community.general.cisco_webex:
+ recipient_type: toPersonId
+ recipient_id: "{{ person_id}}"
+ msg_type: text
+ personal_token: "{{ token }}"
+ msg: "Cisco Webex Teams Ansible Module - Text Message to Individual by ID"
+
+- name: Cisco Webex Teams - Text Message by an Individuals E-Mail Address
+ community.general.cisco_webex:
+ recipient_type: toPersonEmail
+ recipient_id: "{{ person_email }}"
+ msg_type: text
+ personal_token: "{{ token }}"
+ msg: "Cisco Webex Teams Ansible Module - Text Message to Individual by E-Mail"
+
+"""
+
+RETURN = """
+status_code:
+ description:
+ - The Response Code returned by the Webex Teams API.
+ - Full Response Code explanations can be found at U(https://developer.webex.com/docs/api/basics).
+ returned: always
+ type: int
+ sample: 200
+
+message:
+ description:
+ - The Response Message returned by the Webex Teams API.
+ - Full Response Code explanations can be found at U(https://developer.webex.com/docs/api/basics).
+ returned: always
+ type: str
+ sample: OK (585 bytes)
+"""
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def webex_msg(module):
+ """When check mode is specified, establish a read only connection, that does not return any user specific
+ data, to validate connectivity. In regular mode, send a message to a Cisco Webex Teams Room or Individual"""
+
+ # Ansible Specific Variables
+ results = {}
+ ansible = module.params
+
+ headers = {
+ 'Authorization': 'Bearer {0}'.format(ansible['personal_token']),
+ 'content-type': 'application/json'
+ }
+
+ if module.check_mode:
+ url = "https://webexapis.com/v1/people/me"
+ payload = None
+
+ else:
+ url = "https://webexapis.com/v1/messages"
+
+ payload = {
+ ansible['recipient_type']: ansible['recipient_id'],
+ ansible['msg_type']: ansible['msg']
+ }
+
+ payload = module.jsonify(payload)
+
+ response, info = fetch_url(module, url, data=payload, headers=headers)
+
+ status_code = info['status']
+ msg = info['msg']
+
+ # Module will fail if the response is not 200
+ if status_code != 200:
+ results['failed'] = True
+ results['status_code'] = status_code
+ results['message'] = msg
+ else:
+ results['failed'] = False
+ results['status_code'] = status_code
+
+ if module.check_mode:
+ results['message'] = 'Authentication Successful.'
+ else:
+ results['message'] = msg
+
+ return results
+
+
+def main():
+ '''Ansible main. '''
+ module = AnsibleModule(
+ argument_spec=dict(
+ recipient_type=dict(required=True, choices=['roomId', 'toPersonEmail', 'toPersonId']),
+ recipient_id=dict(required=True, no_log=True),
+ msg_type=dict(required=False, default='text', aliases=['message_type'], choices=['text', 'markdown']),
+ personal_token=dict(required=True, no_log=True, aliases=['token']),
+ msg=dict(required=True, aliases=['message'],
+ deprecated_aliases=[dict(name='message', version='3.0.0',
+ collection_name='community.general')]), # was Ansible 2.14
+ ),
+
+ supports_check_mode=True
+ )
+
+ results = webex_msg(module)
+
+ module.exit_json(**results)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_aa_policy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_aa_policy.py
new file mode 100644
index 00000000..a2750937
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_aa_policy.py
@@ -0,0 +1,349 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 CenturyLink
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_aa_policy
+short_description: Create or Delete Anti Affinity Policies at CenturyLink Cloud.
+description:
+ - An Ansible module to Create or Delete Anti Affinity Policies at CenturyLink Cloud.
+options:
+ name:
+ description:
+ - The name of the Anti Affinity Policy.
+ type: str
+ required: True
+ location:
+ description:
+ - Datacenter in which the policy lives/should live.
+ type: str
+ required: True
+ state:
+ description:
+ - Whether to create or delete the policy.
+ type: str
+ required: False
+ default: present
+ choices: ['present','absent']
+ wait:
+ description:
+ - This option does nothing and will be removed in community.general 3.0.0.
+ type: bool
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+---
+- name: Create AA Policy
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Create an Anti Affinity Policy
+ community.general.clc_aa_policy:
+ name: Hammer Time
+ location: UK3
+ state: present
+ register: policy
+
+ - name: Debug
+ ansible.builtin.debug:
+ var: policy
+
+- name: Delete AA Policy
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Delete an Anti Affinity Policy
+ community.general.clc_aa_policy:
+ name: Hammer Time
+ location: UK3
+ state: absent
+ register: policy
+
+ - name: Debug
+ ansible.builtin.debug:
+ var: policy
+'''
+
+RETURN = '''
+policy:
+ description: The anti affinity policy information
+ returned: success
+ type: dict
+ sample:
+ {
+ "id":"1a28dd0988984d87b9cd61fa8da15424",
+ "name":"test_aa_policy",
+ "location":"UC1",
+ "links":[
+ {
+ "rel":"self",
+ "href":"/v2/antiAffinityPolicies/wfad/1a28dd0988984d87b9cd61fa8da15424",
+ "verbs":[
+ "GET",
+ "DELETE",
+ "PUT"
+ ]
+ },
+ {
+ "rel":"location",
+ "href":"/v2/datacenters/wfad/UC1",
+ "id":"uc1",
+ "name":"UC1 - US West (Santa Clara)"
+ }
+ ]
+ }
+'''
+
+__version__ = '${version}'
+
+import os
+import traceback
+
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk:
+# sudo pip install clc-sdk
+#
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcAntiAffinityPolicy:
+
+ clc = clc_sdk
+ module = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.module = module
+ self.policy_dict = {}
+
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'),
+ exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'),
+ exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ name=dict(required=True),
+ location=dict(required=True),
+ wait=dict(type='bool', removed_in_version='3.0.0', removed_from_collection='community.general'), # was Ansible 2.14
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ return argument_spec
+
+ # Module Behavior Goodness
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ p = self.module.params
+
+ self._set_clc_credentials_from_env()
+ self.policy_dict = self._get_policies_for_datacenter(p)
+
+ if p['state'] == "absent":
+ changed, policy = self._ensure_policy_is_absent(p)
+ else:
+ changed, policy = self._ensure_policy_is_present(p)
+
+ if hasattr(policy, 'data'):
+ policy = policy.data
+ elif hasattr(policy, '__dict__'):
+ policy = policy.__dict__
+
+ self.module.exit_json(changed=changed, policy=policy)
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ def _get_policies_for_datacenter(self, p):
+ """
+ Get the Policies for a datacenter by calling the CLC API.
+ :param p: datacenter to get policies from
+ :return: policies in the datacenter
+ """
+ response = {}
+
+ policies = self.clc.v2.AntiAffinity.GetAll(location=p['location'])
+
+ for policy in policies:
+ response[policy.name] = policy
+ return response
+
+ def _create_policy(self, p):
+ """
+ Create an Anti Affinity Policy using the CLC API.
+ :param p: datacenter to create policy in
+ :return: response dictionary from the CLC API.
+ """
+ try:
+ return self.clc.v2.AntiAffinity.Create(
+ name=p['name'],
+ location=p['location'])
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to create anti affinity policy : {0}. {1}'.format(
+ p['name'], ex.response_text
+ ))
+
+ def _delete_policy(self, p):
+ """
+ Delete an Anti Affinity Policy using the CLC API.
+ :param p: datacenter to delete a policy from
+ :return: none
+ """
+ try:
+ policy = self.policy_dict[p['name']]
+ policy.Delete()
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to delete anti affinity policy : {0}. {1}'.format(
+ p['name'], ex.response_text
+ ))
+
+ def _policy_exists(self, policy_name):
+ """
+ Check to see if an Anti Affinity Policy exists
+ :param policy_name: name of the policy
+ :return: boolean of if the policy exists
+ """
+ if policy_name in self.policy_dict:
+ return self.policy_dict.get(policy_name)
+
+ return False
+
+ def _ensure_policy_is_absent(self, p):
+ """
+ Makes sure that a policy is absent
+ :param p: dictionary of policy name
+ :return: tuple of if a deletion occurred and the name of the policy that was deleted
+ """
+ changed = False
+ if self._policy_exists(policy_name=p['name']):
+ changed = True
+ if not self.module.check_mode:
+ self._delete_policy(p)
+ return changed, None
+
+ def _ensure_policy_is_present(self, p):
+ """
+ Ensures that a policy is present
+ :param p: dictionary of a policy name
+ :return: tuple of if an addition occurred and the name of the policy that was added
+ """
+ changed = False
+ policy = self._policy_exists(policy_name=p['name'])
+ if not policy:
+ changed = True
+ policy = None
+ if not self.module.check_mode:
+ policy = self._create_policy(p)
+ return changed, policy
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ module = AnsibleModule(
+ argument_spec=ClcAntiAffinityPolicy._define_module_argument_spec(),
+ supports_check_mode=True)
+ clc_aa_policy = ClcAntiAffinityPolicy(module)
+ clc_aa_policy.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_alert_policy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_alert_policy.py
new file mode 100644
index 00000000..7a10c0b3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_alert_policy.py
@@ -0,0 +1,526 @@
+#!/usr/bin/python
+
+#
+# Copyright (c) 2015 CenturyLink
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_alert_policy
+short_description: Create or Delete Alert Policies at CenturyLink Cloud.
+description:
+ - An Ansible module to Create or Delete Alert Policies at CenturyLink Cloud.
+options:
+ alias:
+ description:
+ - The alias of your CLC Account
+ type: str
+ required: True
+ name:
+ description:
+ - The name of the alert policy. This is mutually exclusive with id
+ type: str
+ id:
+ description:
+ - The alert policy id. This is mutually exclusive with name
+ type: str
+ alert_recipients:
+ description:
+ - A list of recipient email ids to notify the alert.
+ This is required for state 'present'
+ type: list
+ metric:
+ description:
+ - The metric on which to measure the condition that will trigger the alert.
+ This is required for state 'present'
+ type: str
+ choices: ['cpu','memory','disk']
+ duration:
+ description:
+ - The length of time in minutes that the condition must exceed the threshold.
+ This is required for state 'present'
+ type: str
+ threshold:
+ description:
+ - The threshold that will trigger the alert when the metric equals or exceeds it.
+ This is required for state 'present'
+ This number represents a percentage and must be a value between 5.0 - 95.0 that is a multiple of 5.0
+ type: int
+ state:
+ description:
+ - Whether to create or delete the policy.
+ type: str
+ default: present
+ choices: ['present','absent']
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+---
+- name: Create Alert Policy Example
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Create an Alert Policy for disk above 80% for 5 minutes
+ community.general.clc_alert_policy:
+ alias: wfad
+ name: 'alert for disk > 80%'
+ alert_recipients:
+ - test1@centurylink.com
+ - test2@centurylink.com
+ metric: 'disk'
+ duration: '00:05:00'
+ threshold: 80
+ state: present
+ register: policy
+
+ - name: Debug
+ ansible.builtin.debug: var=policy
+
+- name: Delete Alert Policy Example
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Delete an Alert Policy
+ community.general.clc_alert_policy:
+ alias: wfad
+ name: 'alert for disk > 80%'
+ state: absent
+ register: policy
+
+ - name: Debug
+ ansible.builtin.debug: var=policy
+'''
+
+RETURN = '''
+policy:
+ description: The alert policy information
+ returned: success
+ type: dict
+ sample:
+ {
+ "actions": [
+ {
+ "action": "email",
+ "settings": {
+ "recipients": [
+ "user1@domain.com",
+ "user1@domain.com"
+ ]
+ }
+ }
+ ],
+ "id": "ba54ac54a60d4a4f1ed6d48c1ce240a7",
+ "links": [
+ {
+ "href": "/v2/alertPolicies/alias/ba54ac54a60d4a4fb1d6d48c1ce240a7",
+ "rel": "self",
+ "verbs": [
+ "GET",
+ "DELETE",
+ "PUT"
+ ]
+ }
+ ],
+ "name": "test_alert",
+ "triggers": [
+ {
+ "duration": "00:05:00",
+ "metric": "disk",
+ "threshold": 80.0
+ }
+ ]
+ }
+'''
+
+__version__ = '${version}'
+
+import json
+import os
+import traceback
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import APIFailedResponse
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcAlertPolicy:
+
+ clc = clc_sdk
+ module = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.module = module
+ self.policy_dict = {}
+
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ name=dict(),
+ id=dict(),
+ alias=dict(required=True),
+ alert_recipients=dict(type='list'),
+ metric=dict(
+ choices=[
+ 'cpu',
+ 'memory',
+ 'disk'],
+ default=None),
+ duration=dict(type='str'),
+ threshold=dict(type='int'),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+ mutually_exclusive = [
+ ['name', 'id']
+ ]
+ return {'argument_spec': argument_spec,
+ 'mutually_exclusive': mutually_exclusive}
+
+ # Module Behavior Goodness
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ p = self.module.params
+
+ self._set_clc_credentials_from_env()
+ self.policy_dict = self._get_alert_policies(p['alias'])
+
+ if p['state'] == 'present':
+ changed, policy = self._ensure_alert_policy_is_present()
+ else:
+ changed, policy = self._ensure_alert_policy_is_absent()
+
+ self.module.exit_json(changed=changed, policy=policy)
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ def _ensure_alert_policy_is_present(self):
+ """
+ Ensures that the alert policy is present
+ :return: (changed, policy)
+ changed: A flag representing if anything is modified
+ policy: the created/updated alert policy
+ """
+ changed = False
+ p = self.module.params
+ policy_name = p.get('name')
+
+ if not policy_name:
+ self.module.fail_json(msg='Policy name is a required')
+ policy = self._alert_policy_exists(policy_name)
+ if not policy:
+ changed = True
+ policy = None
+ if not self.module.check_mode:
+ policy = self._create_alert_policy()
+ else:
+ changed_u, policy = self._ensure_alert_policy_is_updated(policy)
+ if changed_u:
+ changed = True
+ return changed, policy
+
+ def _ensure_alert_policy_is_absent(self):
+ """
+ Ensures that the alert policy is absent
+ :return: (changed, None)
+ changed: A flag representing if anything is modified
+ """
+ changed = False
+ p = self.module.params
+ alert_policy_id = p.get('id')
+ alert_policy_name = p.get('name')
+ alias = p.get('alias')
+ if not alert_policy_id and not alert_policy_name:
+ self.module.fail_json(
+ msg='Either alert policy id or policy name is required')
+ if not alert_policy_id and alert_policy_name:
+ alert_policy_id = self._get_alert_policy_id(
+ self.module,
+ alert_policy_name)
+ if alert_policy_id and alert_policy_id in self.policy_dict:
+ changed = True
+ if not self.module.check_mode:
+ self._delete_alert_policy(alias, alert_policy_id)
+ return changed, None
+
+ def _ensure_alert_policy_is_updated(self, alert_policy):
+ """
+ Ensures the alert policy is updated if anything is changed in the alert policy configuration
+ :param alert_policy: the target alert policy
+ :return: (changed, policy)
+ changed: A flag representing if anything is modified
+ policy: the updated the alert policy
+ """
+ changed = False
+ p = self.module.params
+ alert_policy_id = alert_policy.get('id')
+ email_list = p.get('alert_recipients')
+ metric = p.get('metric')
+ duration = p.get('duration')
+ threshold = p.get('threshold')
+ policy = alert_policy
+ if (metric and metric != str(alert_policy.get('triggers')[0].get('metric'))) or \
+ (duration and duration != str(alert_policy.get('triggers')[0].get('duration'))) or \
+ (threshold and float(threshold) != float(alert_policy.get('triggers')[0].get('threshold'))):
+ changed = True
+ elif email_list:
+ t_email_list = list(
+ alert_policy.get('actions')[0].get('settings').get('recipients'))
+ if set(email_list) != set(t_email_list):
+ changed = True
+ if changed and not self.module.check_mode:
+ policy = self._update_alert_policy(alert_policy_id)
+ return changed, policy
+
+ def _get_alert_policies(self, alias):
+ """
+ Get the alert policies for account alias by calling the CLC API.
+ :param alias: the account alias
+ :return: the alert policies for the account alias
+ """
+ response = {}
+
+ policies = self.clc.v2.API.Call('GET',
+ '/v2/alertPolicies/%s'
+ % alias)
+
+ for policy in policies.get('items'):
+ response[policy.get('id')] = policy
+ return response
+
+ def _create_alert_policy(self):
+ """
+ Create an alert Policy using the CLC API.
+ :return: response dictionary from the CLC API.
+ """
+ p = self.module.params
+ alias = p['alias']
+ email_list = p['alert_recipients']
+ metric = p['metric']
+ duration = p['duration']
+ threshold = p['threshold']
+ policy_name = p['name']
+ arguments = json.dumps(
+ {
+ 'name': policy_name,
+ 'actions': [{
+ 'action': 'email',
+ 'settings': {
+ 'recipients': email_list
+ }
+ }],
+ 'triggers': [{
+ 'metric': metric,
+ 'duration': duration,
+ 'threshold': threshold
+ }]
+ }
+ )
+ try:
+ result = self.clc.v2.API.Call(
+ 'POST',
+ '/v2/alertPolicies/%s' % alias,
+ arguments)
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg='Unable to create alert policy "{0}". {1}'.format(
+ policy_name, str(e.response_text)))
+ return result
+
+ def _update_alert_policy(self, alert_policy_id):
+ """
+ Update alert policy using the CLC API.
+ :param alert_policy_id: The clc alert policy id
+ :return: response dictionary from the CLC API.
+ """
+ p = self.module.params
+ alias = p['alias']
+ email_list = p['alert_recipients']
+ metric = p['metric']
+ duration = p['duration']
+ threshold = p['threshold']
+ policy_name = p['name']
+ arguments = json.dumps(
+ {
+ 'name': policy_name,
+ 'actions': [{
+ 'action': 'email',
+ 'settings': {
+ 'recipients': email_list
+ }
+ }],
+ 'triggers': [{
+ 'metric': metric,
+ 'duration': duration,
+ 'threshold': threshold
+ }]
+ }
+ )
+ try:
+ result = self.clc.v2.API.Call(
+ 'PUT', '/v2/alertPolicies/%s/%s' %
+ (alias, alert_policy_id), arguments)
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg='Unable to update alert policy "{0}". {1}'.format(
+ policy_name, str(e.response_text)))
+ return result
+
+ def _delete_alert_policy(self, alias, policy_id):
+ """
+ Delete an alert policy using the CLC API.
+ :param alias : the account alias
+ :param policy_id: the alert policy id
+ :return: response dictionary from the CLC API.
+ """
+ try:
+ result = self.clc.v2.API.Call(
+ 'DELETE', '/v2/alertPolicies/%s/%s' %
+ (alias, policy_id), None)
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg='Unable to delete alert policy id "{0}". {1}'.format(
+ policy_id, str(e.response_text)))
+ return result
+
+ def _alert_policy_exists(self, policy_name):
+ """
+ Check to see if an alert policy exists
+ :param policy_name: name of the alert policy
+ :return: boolean of if the policy exists
+ """
+ result = False
+ for policy_id in self.policy_dict:
+ if self.policy_dict.get(policy_id).get('name') == policy_name:
+ result = self.policy_dict.get(policy_id)
+ return result
+
+ def _get_alert_policy_id(self, module, alert_policy_name):
+ """
+ retrieves the alert policy id of the account based on the name of the policy
+ :param module: the AnsibleModule object
+ :param alert_policy_name: the alert policy name
+ :return: alert_policy_id: The alert policy id
+ """
+ alert_policy_id = None
+ for policy_id in self.policy_dict:
+ if self.policy_dict.get(policy_id).get('name') == alert_policy_name:
+ if not alert_policy_id:
+ alert_policy_id = policy_id
+ else:
+ return module.fail_json(
+ msg='multiple alert policies were found with policy name : %s' % alert_policy_name)
+ return alert_policy_id
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ argument_dict = ClcAlertPolicy._define_module_argument_spec()
+ module = AnsibleModule(supports_check_mode=True, **argument_dict)
+ clc_alert_policy = ClcAlertPolicy(module)
+ clc_alert_policy.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_blueprint_package.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_blueprint_package.py
new file mode 100644
index 00000000..c45ca919
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_blueprint_package.py
@@ -0,0 +1,299 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 CenturyLink
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_blueprint_package
+short_description: deploys a blue print package on a set of servers in CenturyLink Cloud.
+description:
+ - An Ansible module to deploy blue print package on a set of servers in CenturyLink Cloud.
+options:
+ server_ids:
+ description:
+ - A list of server Ids to deploy the blue print package.
+ type: list
+ required: True
+ package_id:
+ description:
+ - The package id of the blue print.
+ type: str
+ required: True
+ package_params:
+ description:
+ - The dictionary of arguments required to deploy the blue print.
+ type: dict
+ default: {}
+ required: False
+ state:
+ description:
+ - Whether to install or uninstall the package. Currently it supports only "present" for install action.
+ type: str
+ required: False
+ default: present
+ choices: ['present']
+ wait:
+ description:
+ - Whether to wait for the tasks to finish before returning.
+ type: str
+ default: True
+ required: False
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+- name: Deploy package
+ community.general.clc_blueprint_package:
+ server_ids:
+ - UC1TEST-SERVER1
+ - UC1TEST-SERVER2
+ package_id: 77abb844-579d-478d-3955-c69ab4a7ba1a
+ package_params: {}
+'''
+
+RETURN = '''
+server_ids:
+ description: The list of server ids that are changed
+ returned: success
+ type: list
+ sample:
+ [
+ "UC1TEST-SERVER1",
+ "UC1TEST-SERVER2"
+ ]
+'''
+
+__version__ = '${version}'
+
+import os
+import traceback
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcBlueprintPackage:
+
+ clc = clc_sdk
+ module = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.module = module
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(
+ requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ p = self.module.params
+ changed = False
+ changed_server_ids = []
+ self._set_clc_credentials_from_env()
+ server_ids = p['server_ids']
+ package_id = p['package_id']
+ package_params = p['package_params']
+ state = p['state']
+ if state == 'present':
+ changed, changed_server_ids, request_list = self.ensure_package_installed(
+ server_ids, package_id, package_params)
+ self._wait_for_requests_to_complete(request_list)
+ self.module.exit_json(changed=changed, server_ids=changed_server_ids)
+
+ @staticmethod
+ def define_argument_spec():
+ """
+ This function defines the dictionary object required for
+ package module
+ :return: the package dictionary object
+ """
+ argument_spec = dict(
+ server_ids=dict(type='list', required=True),
+ package_id=dict(required=True),
+ package_params=dict(type='dict', default={}),
+ wait=dict(default=True), # @FIXME should be bool?
+ state=dict(default='present', choices=['present'])
+ )
+ return argument_spec
+
+ def ensure_package_installed(self, server_ids, package_id, package_params):
+ """
+ Ensure the package is installed in the given list of servers
+ :param server_ids: the server list where the package needs to be installed
+ :param package_id: the blueprint package id
+ :param package_params: the package arguments
+ :return: (changed, server_ids, request_list)
+ changed: A flag indicating if a change was made
+ server_ids: The list of servers modified
+ request_list: The list of request objects from clc-sdk
+ """
+ changed = False
+ request_list = []
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to get servers from CLC')
+ for server in servers:
+ if not self.module.check_mode:
+ request = self.clc_install_package(
+ server,
+ package_id,
+ package_params)
+ request_list.append(request)
+ changed = True
+ return changed, server_ids, request_list
+
+ def clc_install_package(self, server, package_id, package_params):
+ """
+ Install the package to a given clc server
+ :param server: The server object where the package needs to be installed
+ :param package_id: The blue print package id
+ :param package_params: the required argument dict for the package installation
+ :return: The result object from the CLC API call
+ """
+ result = None
+ try:
+ result = server.ExecutePackage(
+ package_id=package_id,
+ parameters=package_params)
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to install package : {0} to server {1}. {2}'.format(
+ package_id, server.id, ex.message
+ ))
+ return result
+
+ def _wait_for_requests_to_complete(self, request_lst):
+ """
+ Waits until the CLC requests are complete if the wait argument is True
+ :param request_lst: The list of CLC request objects
+ :return: none
+ """
+ if not self.module.params['wait']:
+ return
+ for request in request_lst:
+ request.WaitUntilComplete()
+ for request_details in request.requests:
+ if request_details.Status() != 'succeeded':
+ self.module.fail_json(
+ msg='Unable to process package install request')
+
+ def _get_servers_from_clc(self, server_list, message):
+ """
+ Internal function to fetch list of CLC server objects from a list of server ids
+ :param server_list: the list of server ids
+ :param message: the error message to raise if there is any error
+ :return the list of CLC server objects
+ """
+ try:
+ return self.clc.v2.Servers(server_list).servers
+ except CLCException as ex:
+ self.module.fail_json(msg=message + ': %s' % ex)
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ Main function
+ :return: None
+ """
+ module = AnsibleModule(
+ argument_spec=ClcBlueprintPackage.define_argument_spec(),
+ supports_check_mode=True
+ )
+ clc_blueprint_package = ClcBlueprintPackage(module)
+ clc_blueprint_package.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_firewall_policy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_firewall_policy.py
new file mode 100644
index 00000000..105d793c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_firewall_policy.py
@@ -0,0 +1,584 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 CenturyLink
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_firewall_policy
+short_description: Create/delete/update firewall policies
+description:
+ - Create or delete or update firewall policies on Centurylink Cloud
+options:
+ location:
+ description:
+ - Target datacenter for the firewall policy
+ type: str
+ required: True
+ state:
+ description:
+ - Whether to create or delete the firewall policy
+ type: str
+ default: present
+ choices: ['present', 'absent']
+ source:
+ description:
+ - The list of source addresses for traffic on the originating firewall.
+ This is required when state is 'present'
+ type: list
+ destination:
+ description:
+ - The list of destination addresses for traffic on the terminating firewall.
+ This is required when state is 'present'
+ type: list
+ ports:
+ description:
+ - The list of ports associated with the policy.
+ TCP and UDP can take in single ports or port ranges.
+ - "Example: C(['any', 'icmp', 'TCP/123', 'UDP/123', 'TCP/123-456', 'UDP/123-456'])."
+ type: list
+ firewall_policy_id:
+ description:
+ - Id of the firewall policy. This is required to update or delete an existing firewall policy
+ type: str
+ source_account_alias:
+ description:
+ - CLC alias for the source account
+ type: str
+ required: True
+ destination_account_alias:
+ description:
+ - CLC alias for the destination account
+ type: str
+ wait:
+ description:
+ - Whether to wait for the provisioning tasks to finish before returning.
+ type: str
+ default: 'True'
+ enabled:
+ description:
+ - Whether the firewall policy is enabled or disabled
+ type: str
+ choices: [True, False]
+ default: True
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+---
+- name: Create Firewall Policy
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Create / Verify an Firewall Policy at CenturyLink Cloud
+ clc_firewall:
+ source_account_alias: WFAD
+ location: VA1
+ state: present
+ source: 10.128.216.0/24
+ destination: 10.128.216.0/24
+ ports: Any
+ destination_account_alias: WFAD
+
+- name: Delete Firewall Policy
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Delete an Firewall Policy at CenturyLink Cloud
+ clc_firewall:
+ source_account_alias: WFAD
+ location: VA1
+ state: absent
+ firewall_policy_id: c62105233d7a4231bd2e91b9c791e43e1
+'''
+
+RETURN = '''
+firewall_policy_id:
+ description: The fire wall policy id
+ returned: success
+ type: str
+ sample: fc36f1bfd47242e488a9c44346438c05
+firewall_policy:
+ description: The fire wall policy information
+ returned: success
+ type: dict
+ sample:
+ {
+ "destination":[
+ "10.1.1.0/24",
+ "10.2.2.0/24"
+ ],
+ "destinationAccount":"wfad",
+ "enabled":true,
+ "id":"fc36f1bfd47242e488a9c44346438c05",
+ "links":[
+ {
+ "href":"http://api.ctl.io/v2-experimental/firewallPolicies/wfad/uc1/fc36f1bfd47242e488a9c44346438c05",
+ "rel":"self",
+ "verbs":[
+ "GET",
+ "PUT",
+ "DELETE"
+ ]
+ }
+ ],
+ "ports":[
+ "any"
+ ],
+ "source":[
+ "10.1.1.0/24",
+ "10.2.2.0/24"
+ ],
+ "status":"active"
+ }
+'''
+
+__version__ = '${version}'
+
+import os
+import traceback
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+from time import sleep
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import APIFailedResponse
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcFirewallPolicy:
+
+ clc = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.clc = clc_sdk
+ self.module = module
+ self.firewall_dict = {}
+
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(
+ requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ location=dict(required=True),
+ source_account_alias=dict(required=True),
+ destination_account_alias=dict(),
+ firewall_policy_id=dict(),
+ ports=dict(type='list'),
+ source=dict(type='list'),
+ destination=dict(type='list'),
+ wait=dict(default=True), # @FIXME type=bool
+ state=dict(default='present', choices=['present', 'absent']),
+ enabled=dict(default=True, choices=[True, False])
+ )
+ return argument_spec
+
+ def process_request(self):
+ """
+ Execute the main code path, and handle the request
+ :return: none
+ """
+ changed = False
+ firewall_policy = None
+ location = self.module.params.get('location')
+ source_account_alias = self.module.params.get('source_account_alias')
+ destination_account_alias = self.module.params.get(
+ 'destination_account_alias')
+ firewall_policy_id = self.module.params.get('firewall_policy_id')
+ ports = self.module.params.get('ports')
+ source = self.module.params.get('source')
+ destination = self.module.params.get('destination')
+ wait = self.module.params.get('wait')
+ state = self.module.params.get('state')
+ enabled = self.module.params.get('enabled')
+
+ self.firewall_dict = {
+ 'location': location,
+ 'source_account_alias': source_account_alias,
+ 'destination_account_alias': destination_account_alias,
+ 'firewall_policy_id': firewall_policy_id,
+ 'ports': ports,
+ 'source': source,
+ 'destination': destination,
+ 'wait': wait,
+ 'state': state,
+ 'enabled': enabled}
+
+ self._set_clc_credentials_from_env()
+
+ if state == 'absent':
+ changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_absent(
+ source_account_alias, location, self.firewall_dict)
+
+ elif state == 'present':
+ changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_present(
+ source_account_alias, location, self.firewall_dict)
+
+ return self.module.exit_json(
+ changed=changed,
+ firewall_policy_id=firewall_policy_id,
+ firewall_policy=firewall_policy)
+
+ @staticmethod
+ def _get_policy_id_from_response(response):
+ """
+ Method to parse out the policy id from creation response
+ :param response: response from firewall creation API call
+ :return: policy_id: firewall policy id from creation call
+ """
+ url = response.get('links')[0]['href']
+ path = urlparse(url).path
+ path_list = os.path.split(path)
+ policy_id = path_list[-1]
+ return policy_id
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ def _ensure_firewall_policy_is_present(
+ self,
+ source_account_alias,
+ location,
+ firewall_dict):
+ """
+ Ensures that a given firewall policy is present
+ :param source_account_alias: the source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_dict: dictionary of request parameters for firewall policy
+ :return: (changed, firewall_policy_id, firewall_policy)
+ changed: flag for if a change occurred
+ firewall_policy_id: the firewall policy id that was created/updated
+ firewall_policy: The firewall_policy object
+ """
+ firewall_policy = None
+ firewall_policy_id = firewall_dict.get('firewall_policy_id')
+
+ if firewall_policy_id is None:
+ if not self.module.check_mode:
+ response = self._create_firewall_policy(
+ source_account_alias,
+ location,
+ firewall_dict)
+ firewall_policy_id = self._get_policy_id_from_response(
+ response)
+ changed = True
+ else:
+ firewall_policy = self._get_firewall_policy(
+ source_account_alias, location, firewall_policy_id)
+ if not firewall_policy:
+ return self.module.fail_json(
+ msg='Unable to find the firewall policy id : {0}'.format(
+ firewall_policy_id))
+ changed = self._compare_get_request_with_dict(
+ firewall_policy,
+ firewall_dict)
+ if not self.module.check_mode and changed:
+ self._update_firewall_policy(
+ source_account_alias,
+ location,
+ firewall_policy_id,
+ firewall_dict)
+ if changed and firewall_policy_id:
+ firewall_policy = self._wait_for_requests_to_complete(
+ source_account_alias,
+ location,
+ firewall_policy_id)
+ return changed, firewall_policy_id, firewall_policy
+
+ def _ensure_firewall_policy_is_absent(
+ self,
+ source_account_alias,
+ location,
+ firewall_dict):
+ """
+ Ensures that a given firewall policy is removed if present
+ :param source_account_alias: the source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_dict: firewall policy to delete
+ :return: (changed, firewall_policy_id, response)
+ changed: flag for if a change occurred
+ firewall_policy_id: the firewall policy id that was deleted
+ response: response from CLC API call
+ """
+ changed = False
+ response = []
+ firewall_policy_id = firewall_dict.get('firewall_policy_id')
+ result = self._get_firewall_policy(
+ source_account_alias, location, firewall_policy_id)
+ if result:
+ if not self.module.check_mode:
+ response = self._delete_firewall_policy(
+ source_account_alias,
+ location,
+ firewall_policy_id)
+ changed = True
+ return changed, firewall_policy_id, response
+
+ def _create_firewall_policy(
+ self,
+ source_account_alias,
+ location,
+ firewall_dict):
+ """
+ Creates the firewall policy for the given account alias
+ :param source_account_alias: the source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_dict: dictionary of request parameters for firewall policy
+ :return: response from CLC API call
+ """
+ payload = {
+ 'destinationAccount': firewall_dict.get('destination_account_alias'),
+ 'source': firewall_dict.get('source'),
+ 'destination': firewall_dict.get('destination'),
+ 'ports': firewall_dict.get('ports')}
+ try:
+ response = self.clc.v2.API.Call(
+ 'POST', '/v2-experimental/firewallPolicies/%s/%s' %
+ (source_account_alias, location), payload)
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg="Unable to create firewall policy. %s" %
+ str(e.response_text))
+ return response
+
+ def _delete_firewall_policy(
+ self,
+ source_account_alias,
+ location,
+ firewall_policy_id):
+ """
+ Deletes a given firewall policy for an account alias in a datacenter
+ :param source_account_alias: the source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_policy_id: firewall policy id to delete
+ :return: response: response from CLC API call
+ """
+ try:
+ response = self.clc.v2.API.Call(
+ 'DELETE', '/v2-experimental/firewallPolicies/%s/%s/%s' %
+ (source_account_alias, location, firewall_policy_id))
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg="Unable to delete the firewall policy id : {0}. {1}".format(
+ firewall_policy_id, str(e.response_text)))
+ return response
+
+ def _update_firewall_policy(
+ self,
+ source_account_alias,
+ location,
+ firewall_policy_id,
+ firewall_dict):
+ """
+ Updates a firewall policy for a given datacenter and account alias
+ :param source_account_alias: the source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_policy_id: firewall policy id to update
+ :param firewall_dict: dictionary of request parameters for firewall policy
+ :return: response: response from CLC API call
+ """
+ try:
+ response = self.clc.v2.API.Call(
+ 'PUT',
+ '/v2-experimental/firewallPolicies/%s/%s/%s' %
+ (source_account_alias,
+ location,
+ firewall_policy_id),
+ firewall_dict)
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg="Unable to update the firewall policy id : {0}. {1}".format(
+ firewall_policy_id, str(e.response_text)))
+ return response
+
+ @staticmethod
+ def _compare_get_request_with_dict(response, firewall_dict):
+ """
+ Helper method to compare the json response for getting the firewall policy with the request parameters
+ :param response: response from the get method
+ :param firewall_dict: dictionary of request parameters for firewall policy
+ :return: changed: Boolean that returns true if there are differences between
+ the response parameters and the playbook parameters
+ """
+
+ changed = False
+
+ response_dest_account_alias = response.get('destinationAccount')
+ response_enabled = response.get('enabled')
+ response_source = response.get('source')
+ response_dest = response.get('destination')
+ response_ports = response.get('ports')
+ request_dest_account_alias = firewall_dict.get(
+ 'destination_account_alias')
+ request_enabled = firewall_dict.get('enabled')
+ if request_enabled is None:
+ request_enabled = True
+ request_source = firewall_dict.get('source')
+ request_dest = firewall_dict.get('destination')
+ request_ports = firewall_dict.get('ports')
+
+ if (
+ response_dest_account_alias and str(response_dest_account_alias) != str(request_dest_account_alias)) or (
+ response_enabled != request_enabled) or (
+ response_source and response_source != request_source) or (
+ response_dest and response_dest != request_dest) or (
+ response_ports and response_ports != request_ports):
+ changed = True
+ return changed
+
+ def _get_firewall_policy(
+ self,
+ source_account_alias,
+ location,
+ firewall_policy_id):
+ """
+ Get back details for a particular firewall policy
+ :param source_account_alias: the source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_policy_id: id of the firewall policy to get
+ :return: response - The response from CLC API call
+ """
+ response = None
+ try:
+ response = self.clc.v2.API.Call(
+ 'GET', '/v2-experimental/firewallPolicies/%s/%s/%s' %
+ (source_account_alias, location, firewall_policy_id))
+ except APIFailedResponse as e:
+ if e.response_status_code != 404:
+ self.module.fail_json(
+ msg="Unable to fetch the firewall policy with id : {0}. {1}".format(
+ firewall_policy_id, str(e.response_text)))
+ return response
+
+ def _wait_for_requests_to_complete(
+ self,
+ source_account_alias,
+ location,
+ firewall_policy_id,
+ wait_limit=50):
+ """
+ Waits until the CLC requests are complete if the wait argument is True
+ :param source_account_alias: The source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_policy_id: The firewall policy id
+ :param wait_limit: The number of times to check the status for completion
+ :return: the firewall_policy object
+ """
+ wait = self.module.params.get('wait')
+ count = 0
+ firewall_policy = None
+ while wait:
+ count += 1
+ firewall_policy = self._get_firewall_policy(
+ source_account_alias, location, firewall_policy_id)
+ status = firewall_policy.get('status')
+ if status == 'active' or count > wait_limit:
+ wait = False
+ else:
+ # wait for 2 seconds
+ sleep(2)
+ return firewall_policy
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ module = AnsibleModule(
+ argument_spec=ClcFirewallPolicy._define_module_argument_spec(),
+ supports_check_mode=True)
+
+ clc_firewall = ClcFirewallPolicy(module)
+ clc_firewall.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_group.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_group.py
new file mode 100644
index 00000000..a80cc400
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_group.py
@@ -0,0 +1,512 @@
+#!/usr/bin/python
+
+#
+# Copyright (c) 2015 CenturyLink
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_group
+short_description: Create/delete Server Groups at Centurylink Cloud
+description:
+ - Create or delete Server Groups at Centurylink Centurylink Cloud
+options:
+ name:
+ description:
+ - The name of the Server Group
+ type: str
+ required: True
+ description:
+ description:
+ - A description of the Server Group
+ type: str
+ required: False
+ parent:
+ description:
+ - The parent group of the server group. If parent is not provided, it creates the group at top level.
+ type: str
+ required: False
+ location:
+ description:
+ - Datacenter to create the group in. If location is not provided, the group gets created in the default datacenter
+ associated with the account
+ type: str
+ required: False
+ state:
+ description:
+ - Whether to create or delete the group
+ type: str
+ default: present
+ choices: ['present', 'absent']
+ wait:
+ description:
+ - Whether to wait for the tasks to finish before returning.
+ type: bool
+ default: True
+ required: False
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+
+# Create a Server Group
+
+---
+- name: Create Server Group
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Create / Verify a Server Group at CenturyLink Cloud
+ community.general.clc_group:
+ name: My Cool Server Group
+ parent: Default Group
+ state: present
+ register: clc
+
+ - name: Debug
+ ansible.builtin.debug:
+ var: clc
+
+# Delete a Server Group
+- name: Delete Server Group
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Delete / Verify Absent a Server Group at CenturyLink Cloud
+ community.general.clc_group:
+ name: My Cool Server Group
+ parent: Default Group
+ state: absent
+ register: clc
+
+ - name: Debug
+ ansible.builtin.debug:
+ var: clc
+'''
+
+RETURN = '''
+group:
+ description: The group information
+ returned: success
+ type: dict
+ sample:
+ {
+ "changeInfo":{
+ "createdBy":"service.wfad",
+ "createdDate":"2015-07-29T18:52:47Z",
+ "modifiedBy":"service.wfad",
+ "modifiedDate":"2015-07-29T18:52:47Z"
+ },
+ "customFields":[
+
+ ],
+ "description":"test group",
+ "groups":[
+
+ ],
+ "id":"bb5f12a3c6044ae4ad0a03e73ae12cd1",
+ "links":[
+ {
+ "href":"/v2/groups/wfad",
+ "rel":"createGroup",
+ "verbs":[
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad",
+ "rel":"createServer",
+ "verbs":[
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1",
+ "rel":"self",
+ "verbs":[
+ "GET",
+ "PATCH",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
+ "id":"086ac1dfe0b6411989e8d1b77c4065f0",
+ "rel":"parentGroup"
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/defaults",
+ "rel":"defaults",
+ "verbs":[
+ "GET",
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/billing",
+ "rel":"billing"
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/archive",
+ "rel":"archiveGroupAction"
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/statistics",
+ "rel":"statistics"
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/upcomingScheduledActivities",
+ "rel":"upcomingScheduledActivities"
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/horizontalAutoscalePolicy",
+ "rel":"horizontalAutoscalePolicyMapping",
+ "verbs":[
+ "GET",
+ "PUT",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/scheduledActivities",
+ "rel":"scheduledActivities",
+ "verbs":[
+ "GET",
+ "POST"
+ ]
+ }
+ ],
+ "locationId":"UC1",
+ "name":"test group",
+ "status":"active",
+ "type":"default"
+ }
+'''
+
+__version__ = '${version}'
+
+import os
+import traceback
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcGroup(object):
+
+ clc = None
+ root_group = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.clc = clc_sdk
+ self.module = module
+ self.group_dict = {}
+
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Execute the main code path, and handle the request
+ :return: none
+ """
+ location = self.module.params.get('location')
+ group_name = self.module.params.get('name')
+ parent_name = self.module.params.get('parent')
+ group_description = self.module.params.get('description')
+ state = self.module.params.get('state')
+
+ self._set_clc_credentials_from_env()
+ self.group_dict = self._get_group_tree_for_datacenter(
+ datacenter=location)
+
+ if state == "absent":
+ changed, group, requests = self._ensure_group_is_absent(
+ group_name=group_name, parent_name=parent_name)
+ if requests:
+ self._wait_for_requests_to_complete(requests)
+ else:
+ changed, group = self._ensure_group_is_present(
+ group_name=group_name, parent_name=parent_name, group_description=group_description)
+ try:
+ group = group.data
+ except AttributeError:
+ group = group_name
+ self.module.exit_json(changed=changed, group=group)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ name=dict(required=True),
+ description=dict(default=None),
+ parent=dict(default=None),
+ location=dict(default=None),
+ state=dict(default='present', choices=['present', 'absent']),
+ wait=dict(type='bool', default=True))
+
+ return argument_spec
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ def _ensure_group_is_absent(self, group_name, parent_name):
+ """
+ Ensure that group_name is absent by deleting it if necessary
+ :param group_name: string - the name of the clc server group to delete
+ :param parent_name: string - the name of the parent group for group_name
+ :return: changed, group
+ """
+ changed = False
+ group = []
+ results = []
+
+ if self._group_exists(group_name=group_name, parent_name=parent_name):
+ if not self.module.check_mode:
+ group.append(group_name)
+ result = self._delete_group(group_name)
+ results.append(result)
+ changed = True
+ return changed, group, results
+
+ def _delete_group(self, group_name):
+ """
+ Delete the provided server group
+ :param group_name: string - the server group to delete
+ :return: none
+ """
+ response = None
+ group, parent = self.group_dict.get(group_name)
+ try:
+ response = group.Delete()
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to delete group :{0}. {1}'.format(
+ group_name, ex.response_text
+ ))
+ return response
+
+ def _ensure_group_is_present(
+ self,
+ group_name,
+ parent_name,
+ group_description):
+ """
+ Checks to see if a server group exists, creates it if it doesn't.
+ :param group_name: the name of the group to validate/create
+ :param parent_name: the name of the parent group for group_name
+ :param group_description: a short description of the server group (used when creating)
+ :return: (changed, group) -
+ changed: Boolean- whether a change was made,
+ group: A clc group object for the group
+ """
+ if not self.root_group:
+ raise AssertionError("Implementation Error: Root Group not set")
+ parent = parent_name if parent_name is not None else self.root_group.name
+ description = group_description
+ changed = False
+ group = group_name
+
+ parent_exists = self._group_exists(group_name=parent, parent_name=None)
+ child_exists = self._group_exists(
+ group_name=group_name,
+ parent_name=parent)
+
+ if parent_exists and child_exists:
+ group, parent = self.group_dict[group_name]
+ changed = False
+ elif parent_exists and not child_exists:
+ if not self.module.check_mode:
+ group = self._create_group(
+ group=group,
+ parent=parent,
+ description=description)
+ changed = True
+ else:
+ self.module.fail_json(
+ msg="parent group: " +
+ parent +
+ " does not exist")
+
+ return changed, group
+
+ def _create_group(self, group, parent, description):
+ """
+ Create the provided server group
+ :param group: clc_sdk.Group - the group to create
+ :param parent: clc_sdk.Parent - the parent group for {group}
+ :param description: string - a text description of the group
+ :return: clc_sdk.Group - the created group
+ """
+ response = None
+ (parent, grandparent) = self.group_dict[parent]
+ try:
+ response = parent.Create(name=group, description=description)
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to create group :{0}. {1}'.format(
+ group, ex.response_text))
+ return response
+
+ def _group_exists(self, group_name, parent_name):
+ """
+ Check to see if a group exists
+ :param group_name: string - the group to check
+ :param parent_name: string - the parent of group_name
+ :return: boolean - whether the group exists
+ """
+ result = False
+ if group_name in self.group_dict:
+ (group, parent) = self.group_dict[group_name]
+ if parent_name is None or parent_name == parent.name:
+ result = True
+ return result
+
+ def _get_group_tree_for_datacenter(self, datacenter=None):
+ """
+ Walk the tree of groups for a datacenter
+ :param datacenter: string - the datacenter to walk (ex: 'UC1')
+ :return: a dictionary of groups and parents
+ """
+ self.root_group = self.clc.v2.Datacenter(
+ location=datacenter).RootGroup()
+ return self._walk_groups_recursive(
+ parent_group=None,
+ child_group=self.root_group)
+
+ def _walk_groups_recursive(self, parent_group, child_group):
+ """
+ Walk a parent-child tree of groups, starting with the provided child group
+ :param parent_group: clc_sdk.Group - the parent group to start the walk
+ :param child_group: clc_sdk.Group - the child group to start the walk
+ :return: a dictionary of groups and parents
+ """
+ result = {str(child_group): (child_group, parent_group)}
+ groups = child_group.Subgroups().groups
+ if len(groups) > 0:
+ for group in groups:
+ if group.type != 'default':
+ continue
+
+ result.update(self._walk_groups_recursive(child_group, group))
+ return result
+
+ def _wait_for_requests_to_complete(self, requests_lst):
+ """
+ Waits until the CLC requests are complete if the wait argument is True
+ :param requests_lst: The list of CLC request objects
+ :return: none
+ """
+ if not self.module.params['wait']:
+ return
+ for request in requests_lst:
+ request.WaitUntilComplete()
+ for request_details in request.requests:
+ if request_details.Status() != 'succeeded':
+ self.module.fail_json(
+ msg='Unable to process group request')
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ module = AnsibleModule(
+ argument_spec=ClcGroup._define_module_argument_spec(),
+ supports_check_mode=True)
+
+ clc_group = ClcGroup(module)
+ clc_group.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_loadbalancer.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_loadbalancer.py
new file mode 100644
index 00000000..2a8d2e9b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_loadbalancer.py
@@ -0,0 +1,935 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 CenturyLink
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_loadbalancer
+short_description: Create, Delete shared loadbalancers in CenturyLink Cloud.
+description:
+ - An Ansible module to Create, Delete shared loadbalancers in CenturyLink Cloud.
+options:
+ name:
+ description:
+ - The name of the loadbalancer
+ type: str
+ required: True
+ description:
+ description:
+ - A description for the loadbalancer
+ type: str
+ alias:
+ description:
+ - The alias of your CLC Account
+ type: str
+ required: True
+ location:
+ description:
+ - The location of the datacenter where the load balancer resides in
+ type: str
+ required: True
+ method:
+ description:
+ -The balancing method for the load balancer pool
+ type: str
+ choices: ['leastConnection', 'roundRobin']
+ persistence:
+ description:
+ - The persistence method for the load balancer
+ type: str
+ choices: ['standard', 'sticky']
+ port:
+ description:
+ - Port to configure on the public-facing side of the load balancer pool
+ type: str
+ choices: [80, 443]
+ nodes:
+ description:
+ - A list of nodes that needs to be added to the load balancer pool
+ type: list
+ default: []
+ status:
+ description:
+ - The status of the loadbalancer
+ type: str
+ default: enabled
+ choices: ['enabled', 'disabled']
+ state:
+ description:
+ - Whether to create or delete the load balancer pool
+ type: str
+ default: present
+ choices: ['present', 'absent', 'port_absent', 'nodes_present', 'nodes_absent']
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+- name: Create Loadbalancer
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Actually Create things
+ community.general.clc_loadbalancer:
+ name: test
+ description: test
+ alias: TEST
+ location: WA1
+ port: 443
+ nodes:
+ - ipAddress: 10.11.22.123
+ privatePort: 80
+ state: present
+
+- name: Add node to an existing loadbalancer pool
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Actually Create things
+ community.general.clc_loadbalancer:
+ name: test
+ description: test
+ alias: TEST
+ location: WA1
+ port: 443
+ nodes:
+ - ipAddress: 10.11.22.234
+ privatePort: 80
+ state: nodes_present
+
+- name: Remove node from an existing loadbalancer pool
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Actually Create things
+ community.general.clc_loadbalancer:
+ name: test
+ description: test
+ alias: TEST
+ location: WA1
+ port: 443
+ nodes:
+ - ipAddress: 10.11.22.234
+ privatePort: 80
+ state: nodes_absent
+
+- name: Delete LoadbalancerPool
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Actually Delete things
+ community.general.clc_loadbalancer:
+ name: test
+ description: test
+ alias: TEST
+ location: WA1
+ port: 443
+ nodes:
+ - ipAddress: 10.11.22.123
+ privatePort: 80
+ state: port_absent
+
+- name: Delete Loadbalancer
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Actually Delete things
+ community.general.clc_loadbalancer:
+ name: test
+ description: test
+ alias: TEST
+ location: WA1
+ port: 443
+ nodes:
+ - ipAddress: 10.11.22.123
+ privatePort: 80
+ state: absent
+'''
+
+RETURN = '''
+loadbalancer:
+ description: The load balancer result object from CLC
+ returned: success
+ type: dict
+ sample:
+ {
+ "description":"test-lb",
+ "id":"ab5b18cb81e94ab9925b61d1ca043fb5",
+ "ipAddress":"66.150.174.197",
+ "links":[
+ {
+ "href":"/v2/sharedLoadBalancers/wfad/wa1/ab5b18cb81e94ab9925b61d1ca043fb5",
+ "rel":"self",
+ "verbs":[
+ "GET",
+ "PUT",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/sharedLoadBalancers/wfad/wa1/ab5b18cb81e94ab9925b61d1ca043fb5/pools",
+ "rel":"pools",
+ "verbs":[
+ "GET",
+ "POST"
+ ]
+ }
+ ],
+ "name":"test-lb",
+ "pools":[
+
+ ],
+ "status":"enabled"
+ }
+'''
+
+__version__ = '${version}'
+
+import json
+import os
+import traceback
+from time import sleep
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import APIFailedResponse
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcLoadBalancer:
+
+ clc = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.clc = clc_sdk
+ self.module = module
+ self.lb_dict = {}
+
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(
+ requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Execute the main code path, and handle the request
+ :return: none
+ """
+ changed = False
+ result_lb = None
+ loadbalancer_name = self.module.params.get('name')
+ loadbalancer_alias = self.module.params.get('alias')
+ loadbalancer_location = self.module.params.get('location')
+ loadbalancer_description = self.module.params.get('description')
+ loadbalancer_port = self.module.params.get('port')
+ loadbalancer_method = self.module.params.get('method')
+ loadbalancer_persistence = self.module.params.get('persistence')
+ loadbalancer_nodes = self.module.params.get('nodes')
+ loadbalancer_status = self.module.params.get('status')
+ state = self.module.params.get('state')
+
+ if loadbalancer_description is None:
+ loadbalancer_description = loadbalancer_name
+
+ self._set_clc_credentials_from_env()
+
+ self.lb_dict = self._get_loadbalancer_list(
+ alias=loadbalancer_alias,
+ location=loadbalancer_location)
+
+ if state == 'present':
+ changed, result_lb, lb_id = self.ensure_loadbalancer_present(
+ name=loadbalancer_name,
+ alias=loadbalancer_alias,
+ location=loadbalancer_location,
+ description=loadbalancer_description,
+ status=loadbalancer_status)
+ if loadbalancer_port:
+ changed, result_pool, pool_id = self.ensure_loadbalancerpool_present(
+ lb_id=lb_id,
+ alias=loadbalancer_alias,
+ location=loadbalancer_location,
+ method=loadbalancer_method,
+ persistence=loadbalancer_persistence,
+ port=loadbalancer_port)
+
+ if loadbalancer_nodes:
+ changed, result_nodes = self.ensure_lbpool_nodes_set(
+ alias=loadbalancer_alias,
+ location=loadbalancer_location,
+ name=loadbalancer_name,
+ port=loadbalancer_port,
+ nodes=loadbalancer_nodes)
+ elif state == 'absent':
+ changed, result_lb = self.ensure_loadbalancer_absent(
+ name=loadbalancer_name,
+ alias=loadbalancer_alias,
+ location=loadbalancer_location)
+
+ elif state == 'port_absent':
+ changed, result_lb = self.ensure_loadbalancerpool_absent(
+ alias=loadbalancer_alias,
+ location=loadbalancer_location,
+ name=loadbalancer_name,
+ port=loadbalancer_port)
+
+ elif state == 'nodes_present':
+ changed, result_lb = self.ensure_lbpool_nodes_present(
+ alias=loadbalancer_alias,
+ location=loadbalancer_location,
+ name=loadbalancer_name,
+ port=loadbalancer_port,
+ nodes=loadbalancer_nodes)
+
+ elif state == 'nodes_absent':
+ changed, result_lb = self.ensure_lbpool_nodes_absent(
+ alias=loadbalancer_alias,
+ location=loadbalancer_location,
+ name=loadbalancer_name,
+ port=loadbalancer_port,
+ nodes=loadbalancer_nodes)
+
+ self.module.exit_json(changed=changed, loadbalancer=result_lb)
+
+ def ensure_loadbalancer_present(
+ self, name, alias, location, description, status):
+ """
+ Checks to see if a load balancer exists and creates one if it does not.
+ :param name: Name of loadbalancer
+ :param alias: Alias of account
+ :param location: Datacenter
+ :param description: Description of loadbalancer
+ :param status: Enabled / Disabled
+ :return: (changed, result, lb_id)
+ changed: Boolean whether a change was made
+ result: The result object from the CLC load balancer request
+ lb_id: The load balancer id
+ """
+ changed = False
+ result = name
+ lb_id = self._loadbalancer_exists(name=name)
+ if not lb_id:
+ if not self.module.check_mode:
+ result = self.create_loadbalancer(name=name,
+ alias=alias,
+ location=location,
+ description=description,
+ status=status)
+ lb_id = result.get('id')
+ changed = True
+
+ return changed, result, lb_id
+
+ def ensure_loadbalancerpool_present(
+ self, lb_id, alias, location, method, persistence, port):
+ """
+ Checks to see if a load balancer pool exists and creates one if it does not.
+ :param lb_id: The loadbalancer id
+ :param alias: The account alias
+ :param location: the datacenter the load balancer resides in
+ :param method: the load balancing method
+ :param persistence: the load balancing persistence type
+ :param port: the port that the load balancer will listen on
+ :return: (changed, group, pool_id) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ pool_id: The string id of the load balancer pool
+ """
+ changed = False
+ result = port
+ if not lb_id:
+ return changed, None, None
+ pool_id = self._loadbalancerpool_exists(
+ alias=alias,
+ location=location,
+ port=port,
+ lb_id=lb_id)
+ if not pool_id:
+ if not self.module.check_mode:
+ result = self.create_loadbalancerpool(
+ alias=alias,
+ location=location,
+ lb_id=lb_id,
+ method=method,
+ persistence=persistence,
+ port=port)
+ pool_id = result.get('id')
+ changed = True
+
+ return changed, result, pool_id
+
+ def ensure_loadbalancer_absent(self, name, alias, location):
+ """
+ Checks to see if a load balancer exists and deletes it if it does
+ :param name: Name of the load balancer
+ :param alias: Alias of account
+ :param location: Datacenter
+ :return: (changed, result)
+ changed: Boolean whether a change was made
+ result: The result from the CLC API Call
+ """
+ changed = False
+ result = name
+ lb_exists = self._loadbalancer_exists(name=name)
+ if lb_exists:
+ if not self.module.check_mode:
+ result = self.delete_loadbalancer(alias=alias,
+ location=location,
+ name=name)
+ changed = True
+ return changed, result
+
+ def ensure_loadbalancerpool_absent(self, alias, location, name, port):
+ """
+ Checks to see if a load balancer pool exists and deletes it if it does
+ :param alias: The account alias
+ :param location: the datacenter the load balancer resides in
+ :param name: the name of the load balancer
+ :param port: the port that the load balancer listens on
+ :return: (changed, result) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ result = None
+ lb_exists = self._loadbalancer_exists(name=name)
+ if lb_exists:
+ lb_id = self._get_loadbalancer_id(name=name)
+ pool_id = self._loadbalancerpool_exists(
+ alias=alias,
+ location=location,
+ port=port,
+ lb_id=lb_id)
+ if pool_id:
+ changed = True
+ if not self.module.check_mode:
+ result = self.delete_loadbalancerpool(
+ alias=alias,
+ location=location,
+ lb_id=lb_id,
+ pool_id=pool_id)
+ else:
+ result = "Pool doesn't exist"
+ else:
+ result = "LB Doesn't Exist"
+ return changed, result
+
+ def ensure_lbpool_nodes_set(self, alias, location, name, port, nodes):
+ """
+ Checks to see if the provided list of nodes exist for the pool
+ and set the nodes if any in the list those doesn't exist
+ :param alias: The account alias
+ :param location: the datacenter the load balancer resides in
+ :param name: the name of the load balancer
+ :param port: the port that the load balancer will listen on
+ :param nodes: The list of nodes to be updated to the pool
+ :return: (changed, result) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ result = {}
+ changed = False
+ lb_exists = self._loadbalancer_exists(name=name)
+ if lb_exists:
+ lb_id = self._get_loadbalancer_id(name=name)
+ pool_id = self._loadbalancerpool_exists(
+ alias=alias,
+ location=location,
+ port=port,
+ lb_id=lb_id)
+ if pool_id:
+ nodes_exist = self._loadbalancerpool_nodes_exists(alias=alias,
+ location=location,
+ lb_id=lb_id,
+ pool_id=pool_id,
+ nodes_to_check=nodes)
+ if not nodes_exist:
+ changed = True
+ result = self.set_loadbalancernodes(alias=alias,
+ location=location,
+ lb_id=lb_id,
+ pool_id=pool_id,
+ nodes=nodes)
+ else:
+ result = "Pool doesn't exist"
+ else:
+ result = "Load balancer doesn't Exist"
+ return changed, result
+
+ def ensure_lbpool_nodes_present(self, alias, location, name, port, nodes):
+ """
+ Checks to see if the provided list of nodes exist for the pool and add the missing nodes to the pool
+ :param alias: The account alias
+ :param location: the datacenter the load balancer resides in
+ :param name: the name of the load balancer
+ :param port: the port that the load balancer will listen on
+ :param nodes: the list of nodes to be added
+ :return: (changed, result) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ lb_exists = self._loadbalancer_exists(name=name)
+ if lb_exists:
+ lb_id = self._get_loadbalancer_id(name=name)
+ pool_id = self._loadbalancerpool_exists(
+ alias=alias,
+ location=location,
+ port=port,
+ lb_id=lb_id)
+ if pool_id:
+ changed, result = self.add_lbpool_nodes(alias=alias,
+ location=location,
+ lb_id=lb_id,
+ pool_id=pool_id,
+ nodes_to_add=nodes)
+ else:
+ result = "Pool doesn't exist"
+ else:
+ result = "Load balancer doesn't Exist"
+ return changed, result
+
+ def ensure_lbpool_nodes_absent(self, alias, location, name, port, nodes):
+ """
+ Checks to see if the provided list of nodes exist for the pool and removes them if found any
+ :param alias: The account alias
+ :param location: the datacenter the load balancer resides in
+ :param name: the name of the load balancer
+ :param port: the port that the load balancer will listen on
+ :param nodes: the list of nodes to be removed
+ :return: (changed, result) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ lb_exists = self._loadbalancer_exists(name=name)
+ if lb_exists:
+ lb_id = self._get_loadbalancer_id(name=name)
+ pool_id = self._loadbalancerpool_exists(
+ alias=alias,
+ location=location,
+ port=port,
+ lb_id=lb_id)
+ if pool_id:
+ changed, result = self.remove_lbpool_nodes(alias=alias,
+ location=location,
+ lb_id=lb_id,
+ pool_id=pool_id,
+ nodes_to_remove=nodes)
+ else:
+ result = "Pool doesn't exist"
+ else:
+ result = "Load balancer doesn't Exist"
+ return changed, result
+
+ def create_loadbalancer(self, name, alias, location, description, status):
+ """
+ Create a loadbalancer w/ params
+ :param name: Name of loadbalancer
+ :param alias: Alias of account
+ :param location: Datacenter
+ :param description: Description for loadbalancer to be created
+ :param status: Enabled / Disabled
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ try:
+ result = self.clc.v2.API.Call('POST',
+ '/v2/sharedLoadBalancers/%s/%s' % (alias,
+ location),
+ json.dumps({"name": name,
+ "description": description,
+ "status": status}))
+ sleep(1)
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to create load balancer "{0}". {1}'.format(
+ name, str(e.response_text)))
+ return result
+
+ def create_loadbalancerpool(
+ self, alias, location, lb_id, method, persistence, port):
+ """
+ Creates a pool on the provided load balancer
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the load balancer
+ :param method: the load balancing method
+ :param persistence: the load balancing persistence type
+ :param port: the port that the load balancer will listen on
+ :return: result: The result from the create API call
+ """
+ result = None
+ try:
+ result = self.clc.v2.API.Call(
+ 'POST', '/v2/sharedLoadBalancers/%s/%s/%s/pools' %
+ (alias, location, lb_id), json.dumps(
+ {
+ "port": port, "method": method, "persistence": persistence
+ }))
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to create pool for load balancer id "{0}". {1}'.format(
+ lb_id, str(e.response_text)))
+ return result
+
+ def delete_loadbalancer(self, alias, location, name):
+ """
+ Delete CLC loadbalancer
+ :param alias: Alias for account
+ :param location: Datacenter
+ :param name: Name of the loadbalancer to delete
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ lb_id = self._get_loadbalancer_id(name=name)
+ try:
+ result = self.clc.v2.API.Call(
+ 'DELETE', '/v2/sharedLoadBalancers/%s/%s/%s' %
+ (alias, location, lb_id))
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to delete load balancer "{0}". {1}'.format(
+ name, str(e.response_text)))
+ return result
+
+ def delete_loadbalancerpool(self, alias, location, lb_id, pool_id):
+ """
+ Delete the pool on the provided load balancer
+ :param alias: The account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the load balancer
+ :param pool_id: the id string of the load balancer pool
+ :return: result: The result from the delete API call
+ """
+ result = None
+ try:
+ result = self.clc.v2.API.Call(
+ 'DELETE', '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s' %
+ (alias, location, lb_id, pool_id))
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to delete pool for load balancer id "{0}". {1}'.format(
+ lb_id, str(e.response_text)))
+ return result
+
+ def _get_loadbalancer_id(self, name):
+ """
+ Retrieves unique ID of loadbalancer
+ :param name: Name of loadbalancer
+ :return: Unique ID of the loadbalancer
+ """
+ id = None
+ for lb in self.lb_dict:
+ if lb.get('name') == name:
+ id = lb.get('id')
+ return id
+
+ def _get_loadbalancer_list(self, alias, location):
+ """
+ Retrieve a list of loadbalancers
+ :param alias: Alias for account
+ :param location: Datacenter
+ :return: JSON data for all loadbalancers at datacenter
+ """
+ result = None
+ try:
+ result = self.clc.v2.API.Call(
+ 'GET', '/v2/sharedLoadBalancers/%s/%s' % (alias, location))
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to fetch load balancers for account: {0}. {1}'.format(
+ alias, str(e.response_text)))
+ return result
+
+ def _loadbalancer_exists(self, name):
+ """
+ Verify a loadbalancer exists
+ :param name: Name of loadbalancer
+ :return: False or the ID of the existing loadbalancer
+ """
+ result = False
+
+ for lb in self.lb_dict:
+ if lb.get('name') == name:
+ result = lb.get('id')
+ return result
+
+ def _loadbalancerpool_exists(self, alias, location, port, lb_id):
+ """
+ Checks to see if a pool exists on the specified port on the provided load balancer
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param port: the port to check and see if it exists
+ :param lb_id: the id string of the provided load balancer
+ :return: result: The id string of the pool or False
+ """
+ result = False
+ try:
+ pool_list = self.clc.v2.API.Call(
+ 'GET', '/v2/sharedLoadBalancers/%s/%s/%s/pools' %
+ (alias, location, lb_id))
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg='Unable to fetch the load balancer pools for for load balancer id: {0}. {1}'.format(
+ lb_id, str(e.response_text)))
+ for pool in pool_list:
+ if int(pool.get('port')) == int(port):
+ result = pool.get('id')
+ return result
+
+ def _loadbalancerpool_nodes_exists(
+ self, alias, location, lb_id, pool_id, nodes_to_check):
+ """
+ Checks to see if a set of nodes exists on the specified port on the provided load balancer
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the provided load balancer
+ :param pool_id: the id string of the load balancer pool
+ :param nodes_to_check: the list of nodes to check for
+ :return: result: True / False indicating if the given nodes exist
+ """
+ result = False
+ nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id)
+ for node in nodes_to_check:
+ if not node.get('status'):
+ node['status'] = 'enabled'
+ if node in nodes:
+ result = True
+ else:
+ result = False
+ return result
+
+ def set_loadbalancernodes(self, alias, location, lb_id, pool_id, nodes):
+ """
+ Updates nodes to the provided pool
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the load balancer
+ :param pool_id: the id string of the pool
+ :param nodes: a list of dictionaries containing the nodes to set
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ if not lb_id:
+ return result
+ if not self.module.check_mode:
+ try:
+ result = self.clc.v2.API.Call('PUT',
+ '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes'
+ % (alias, location, lb_id, pool_id), json.dumps(nodes))
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to set nodes for the load balancer pool id "{0}". {1}'.format(
+ pool_id, str(e.response_text)))
+ return result
+
+ def add_lbpool_nodes(self, alias, location, lb_id, pool_id, nodes_to_add):
+ """
+ Add nodes to the provided pool
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the load balancer
+ :param pool_id: the id string of the pool
+ :param nodes_to_add: a list of dictionaries containing the nodes to add
+ :return: (changed, result) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ result = {}
+ nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id)
+ for node in nodes_to_add:
+ if not node.get('status'):
+ node['status'] = 'enabled'
+ if node not in nodes:
+ changed = True
+ nodes.append(node)
+ if changed is True and not self.module.check_mode:
+ result = self.set_loadbalancernodes(
+ alias,
+ location,
+ lb_id,
+ pool_id,
+ nodes)
+ return changed, result
+
+ def remove_lbpool_nodes(
+ self, alias, location, lb_id, pool_id, nodes_to_remove):
+ """
+ Removes nodes from the provided pool
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the load balancer
+ :param pool_id: the id string of the pool
+ :param nodes_to_remove: a list of dictionaries containing the nodes to remove
+ :return: (changed, result) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ result = {}
+ nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id)
+ for node in nodes_to_remove:
+ if not node.get('status'):
+ node['status'] = 'enabled'
+ if node in nodes:
+ changed = True
+ nodes.remove(node)
+ if changed is True and not self.module.check_mode:
+ result = self.set_loadbalancernodes(
+ alias,
+ location,
+ lb_id,
+ pool_id,
+ nodes)
+ return changed, result
+
+ def _get_lbpool_nodes(self, alias, location, lb_id, pool_id):
+ """
+ Return the list of nodes available to the provided load balancer pool
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the load balancer
+ :param pool_id: the id string of the pool
+ :return: result: The list of nodes
+ """
+ result = None
+ try:
+ result = self.clc.v2.API.Call('GET',
+ '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes'
+ % (alias, location, lb_id, pool_id))
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to fetch list of available nodes for load balancer pool id: {0}. {1}'.format(
+ pool_id, str(e.response_text)))
+ return result
+
+ @staticmethod
+ def define_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ name=dict(required=True),
+ description=dict(default=None),
+ location=dict(required=True),
+ alias=dict(required=True),
+ port=dict(choices=[80, 443]),
+ method=dict(choices=['leastConnection', 'roundRobin']),
+ persistence=dict(choices=['standard', 'sticky']),
+ nodes=dict(type='list', default=[]),
+ status=dict(default='enabled', choices=['enabled', 'disabled']),
+ state=dict(
+ default='present',
+ choices=[
+ 'present',
+ 'absent',
+ 'port_absent',
+ 'nodes_present',
+ 'nodes_absent'])
+ )
+ return argument_spec
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ module = AnsibleModule(argument_spec=ClcLoadBalancer.define_argument_spec(),
+ supports_check_mode=True)
+ clc_loadbalancer = ClcLoadBalancer(module)
+ clc_loadbalancer.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_modify_server.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_modify_server.py
new file mode 100644
index 00000000..3c1b08cd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_modify_server.py
@@ -0,0 +1,965 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 CenturyLink
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_modify_server
+short_description: modify servers in CenturyLink Cloud.
+description:
+ - An Ansible module to modify servers in CenturyLink Cloud.
+options:
+ server_ids:
+ description:
+ - A list of server Ids to modify.
+ type: list
+ required: True
+ cpu:
+ description:
+ - How many CPUs to update on the server
+ type: str
+ memory:
+ description:
+ - Memory (in GB) to set to the server.
+ type: str
+ anti_affinity_policy_id:
+ description:
+ - The anti affinity policy id to be set for a hyper scale server.
+ This is mutually exclusive with 'anti_affinity_policy_name'
+ type: str
+ anti_affinity_policy_name:
+ description:
+ - The anti affinity policy name to be set for a hyper scale server.
+ This is mutually exclusive with 'anti_affinity_policy_id'
+ type: str
+ alert_policy_id:
+ description:
+ - The alert policy id to be associated to the server.
+ This is mutually exclusive with 'alert_policy_name'
+ type: str
+ alert_policy_name:
+ description:
+ - The alert policy name to be associated to the server.
+ This is mutually exclusive with 'alert_policy_id'
+ type: str
+ state:
+ description:
+ - The state to insure that the provided resources are in.
+ type: str
+ default: 'present'
+ choices: ['present', 'absent']
+ wait:
+ description:
+ - Whether to wait for the provisioning tasks to finish before returning.
+ type: bool
+ default: 'yes'
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+- name: Set the cpu count to 4 on a server
+ community.general.clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ cpu: 4
+ state: present
+
+- name: Set the memory to 8GB on a server
+ community.general.clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ memory: 8
+ state: present
+
+- name: Set the anti affinity policy on a server
+ community.general.clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ anti_affinity_policy_name: 'aa_policy'
+ state: present
+
+- name: Remove the anti affinity policy on a server
+ community.general.clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ anti_affinity_policy_name: 'aa_policy'
+ state: absent
+
+- name: Add the alert policy on a server
+ community.general.clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ alert_policy_name: 'alert_policy'
+ state: present
+
+- name: Remove the alert policy on a server
+ community.general.clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ alert_policy_name: 'alert_policy'
+ state: absent
+
+- name: Ret the memory to 16GB and cpu to 8 core on a lust if servers
+ community.general.clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ cpu: 8
+ memory: 16
+ state: present
+'''
+
+RETURN = '''
+server_ids:
+ description: The list of server ids that are changed
+ returned: success
+ type: list
+ sample:
+ [
+ "UC1TEST-SVR01",
+ "UC1TEST-SVR02"
+ ]
+servers:
+ description: The list of server objects that are changed
+ returned: success
+ type: list
+ sample:
+ [
+ {
+ "changeInfo":{
+ "createdBy":"service.wfad",
+ "createdDate":1438196820,
+ "modifiedBy":"service.wfad",
+ "modifiedDate":1438196820
+ },
+ "description":"test-server",
+ "details":{
+ "alertPolicies":[
+
+ ],
+ "cpu":1,
+ "customFields":[
+
+ ],
+ "diskCount":3,
+ "disks":[
+ {
+ "id":"0:0",
+ "partitionPaths":[
+
+ ],
+ "sizeGB":1
+ },
+ {
+ "id":"0:1",
+ "partitionPaths":[
+
+ ],
+ "sizeGB":2
+ },
+ {
+ "id":"0:2",
+ "partitionPaths":[
+
+ ],
+ "sizeGB":14
+ }
+ ],
+ "hostName":"",
+ "inMaintenanceMode":false,
+ "ipAddresses":[
+ {
+ "internal":"10.1.1.1"
+ }
+ ],
+ "memoryGB":1,
+ "memoryMB":1024,
+ "partitions":[
+
+ ],
+ "powerState":"started",
+ "snapshots":[
+
+ ],
+ "storageGB":17
+ },
+ "groupId":"086ac1dfe0b6411989e8d1b77c4065f0",
+ "id":"test-server",
+ "ipaddress":"10.120.45.23",
+ "isTemplate":false,
+ "links":[
+ {
+ "href":"/v2/servers/wfad/test-server",
+ "id":"test-server",
+ "rel":"self",
+ "verbs":[
+ "GET",
+ "PATCH",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
+ "id":"086ac1dfe0b6411989e8d1b77c4065f0",
+ "rel":"group"
+ },
+ {
+ "href":"/v2/accounts/wfad",
+ "id":"wfad",
+ "rel":"account"
+ },
+ {
+ "href":"/v2/billing/wfad/serverPricing/test-server",
+ "rel":"billing"
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/publicIPAddresses",
+ "rel":"publicIPAddresses",
+ "verbs":[
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/credentials",
+ "rel":"credentials"
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/statistics",
+ "rel":"statistics"
+ },
+ {
+ "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/upcomingScheduledActivities",
+ "rel":"upcomingScheduledActivities"
+ },
+ {
+ "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/scheduledActivities",
+ "rel":"scheduledActivities",
+ "verbs":[
+ "GET",
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/capabilities",
+ "rel":"capabilities"
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/alertPolicies",
+ "rel":"alertPolicyMappings",
+ "verbs":[
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/antiAffinityPolicy",
+ "rel":"antiAffinityPolicyMapping",
+ "verbs":[
+ "PUT",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/cpuAutoscalePolicy",
+ "rel":"cpuAutoscalePolicyMapping",
+ "verbs":[
+ "PUT",
+ "DELETE"
+ ]
+ }
+ ],
+ "locationId":"UC1",
+ "name":"test-server",
+ "os":"ubuntu14_64Bit",
+ "osType":"Ubuntu 14 64-bit",
+ "status":"active",
+ "storageType":"standard",
+ "type":"standard"
+ }
+ ]
+'''
+
+__version__ = '${version}'
+
+import json
+import os
+import traceback
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+ from clc import APIFailedResponse
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcModifyServer:
+ clc = clc_sdk
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.clc = clc_sdk
+ self.module = module
+
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(
+ requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ self._set_clc_credentials_from_env()
+
+ p = self.module.params
+ cpu = p.get('cpu')
+ memory = p.get('memory')
+ state = p.get('state')
+ if state == 'absent' and (cpu or memory):
+ return self.module.fail_json(
+ msg='\'absent\' state is not supported for \'cpu\' and \'memory\' arguments')
+
+ server_ids = p['server_ids']
+ if not isinstance(server_ids, list):
+ return self.module.fail_json(
+ msg='server_ids needs to be a list of instances to modify: %s' %
+ server_ids)
+
+ (changed, server_dict_array, changed_server_ids) = self._modify_servers(
+ server_ids=server_ids)
+
+ self.module.exit_json(
+ changed=changed,
+ server_ids=changed_server_ids,
+ servers=server_dict_array)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ server_ids=dict(type='list', required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ cpu=dict(),
+ memory=dict(),
+ anti_affinity_policy_id=dict(),
+ anti_affinity_policy_name=dict(),
+ alert_policy_id=dict(),
+ alert_policy_name=dict(),
+ wait=dict(type='bool', default=True)
+ )
+ mutually_exclusive = [
+ ['anti_affinity_policy_id', 'anti_affinity_policy_name'],
+ ['alert_policy_id', 'alert_policy_name']
+ ]
+ return {"argument_spec": argument_spec,
+ "mutually_exclusive": mutually_exclusive}
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ def _get_servers_from_clc(self, server_list, message):
+ """
+ Internal function to fetch list of CLC server objects from a list of server ids
+ :param server_list: The list of server ids
+ :param message: the error message to throw in case of any error
+ :return the list of CLC server objects
+ """
+ try:
+ return self.clc.v2.Servers(server_list).servers
+ except CLCException as ex:
+ return self.module.fail_json(msg=message + ': %s' % ex.message)
+
+ def _modify_servers(self, server_ids):
+ """
+ modify the servers configuration on the provided list
+ :param server_ids: list of servers to modify
+ :return: a list of dictionaries with server information about the servers that were modified
+ """
+ p = self.module.params
+ state = p.get('state')
+ server_params = {
+ 'cpu': p.get('cpu'),
+ 'memory': p.get('memory'),
+ 'anti_affinity_policy_id': p.get('anti_affinity_policy_id'),
+ 'anti_affinity_policy_name': p.get('anti_affinity_policy_name'),
+ 'alert_policy_id': p.get('alert_policy_id'),
+ 'alert_policy_name': p.get('alert_policy_name'),
+ }
+ changed = False
+ server_changed = False
+ aa_changed = False
+ ap_changed = False
+ server_dict_array = []
+ result_server_ids = []
+ request_list = []
+ changed_servers = []
+
+ if not isinstance(server_ids, list) or len(server_ids) < 1:
+ return self.module.fail_json(
+ msg='server_ids should be a list of servers, aborting')
+
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to obtain server list from the CLC API')
+ for server in servers:
+ if state == 'present':
+ server_changed, server_result = self._ensure_server_config(
+ server, server_params)
+ if server_result:
+ request_list.append(server_result)
+ aa_changed = self._ensure_aa_policy_present(
+ server,
+ server_params)
+ ap_changed = self._ensure_alert_policy_present(
+ server,
+ server_params)
+ elif state == 'absent':
+ aa_changed = self._ensure_aa_policy_absent(
+ server,
+ server_params)
+ ap_changed = self._ensure_alert_policy_absent(
+ server,
+ server_params)
+ if server_changed or aa_changed or ap_changed:
+ changed_servers.append(server)
+ changed = True
+
+ self._wait_for_requests(self.module, request_list)
+ self._refresh_servers(self.module, changed_servers)
+
+ for server in changed_servers:
+ server_dict_array.append(server.data)
+ result_server_ids.append(server.id)
+
+ return changed, server_dict_array, result_server_ids
+
+ def _ensure_server_config(
+ self, server, server_params):
+ """
+ ensures the server is updated with the provided cpu and memory
+ :param server: the CLC server object
+ :param server_params: the dictionary of server parameters
+ :return: (changed, group) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ cpu = server_params.get('cpu')
+ memory = server_params.get('memory')
+ changed = False
+ result = None
+
+ if not cpu:
+ cpu = server.cpu
+ if not memory:
+ memory = server.memory
+ if memory != server.memory or cpu != server.cpu:
+ if not self.module.check_mode:
+ result = self._modify_clc_server(
+ self.clc,
+ self.module,
+ server.id,
+ cpu,
+ memory)
+ changed = True
+ return changed, result
+
+ @staticmethod
+ def _modify_clc_server(clc, module, server_id, cpu, memory):
+ """
+ Modify the memory or CPU of a clc server.
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param server_id: id of the server to modify
+ :param cpu: the new cpu value
+ :param memory: the new memory value
+ :return: the result of CLC API call
+ """
+ result = None
+ acct_alias = clc.v2.Account.GetAlias()
+ try:
+ # Update the server configuration
+ job_obj = clc.v2.API.Call('PATCH',
+ 'servers/%s/%s' % (acct_alias,
+ server_id),
+ json.dumps([{"op": "set",
+ "member": "memory",
+ "value": memory},
+ {"op": "set",
+ "member": "cpu",
+ "value": cpu}]))
+ result = clc.v2.Requests(job_obj)
+ except APIFailedResponse as ex:
+ module.fail_json(
+ msg='Unable to update the server configuration for server : "{0}". {1}'.format(
+ server_id, str(ex.response_text)))
+ return result
+
+ @staticmethod
+ def _wait_for_requests(module, request_list):
+ """
+ Block until server provisioning requests are completed.
+ :param module: the AnsibleModule object
+ :param request_list: a list of clc-sdk.Request instances
+ :return: none
+ """
+ wait = module.params.get('wait')
+ if wait:
+ # Requests.WaitUntilComplete() returns the count of failed requests
+ failed_requests_count = sum(
+ [request.WaitUntilComplete() for request in request_list])
+
+ if failed_requests_count > 0:
+ module.fail_json(
+ msg='Unable to process modify server request')
+
+ @staticmethod
+ def _refresh_servers(module, servers):
+ """
+ Loop through a list of servers and refresh them.
+ :param module: the AnsibleModule object
+ :param servers: list of clc-sdk.Server instances to refresh
+ :return: none
+ """
+ for server in servers:
+ try:
+ server.Refresh()
+ except CLCException as ex:
+ module.fail_json(msg='Unable to refresh the server {0}. {1}'.format(
+ server.id, ex.message
+ ))
+
+ def _ensure_aa_policy_present(
+ self, server, server_params):
+ """
+ ensures the server is updated with the provided anti affinity policy
+ :param server: the CLC server object
+ :param server_params: the dictionary of server parameters
+ :return: (changed, group) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ acct_alias = self.clc.v2.Account.GetAlias()
+
+ aa_policy_id = server_params.get('anti_affinity_policy_id')
+ aa_policy_name = server_params.get('anti_affinity_policy_name')
+ if not aa_policy_id and aa_policy_name:
+ aa_policy_id = self._get_aa_policy_id_by_name(
+ self.clc,
+ self.module,
+ acct_alias,
+ aa_policy_name)
+ current_aa_policy_id = self._get_aa_policy_id_of_server(
+ self.clc,
+ self.module,
+ acct_alias,
+ server.id)
+
+ if aa_policy_id and aa_policy_id != current_aa_policy_id:
+ self._modify_aa_policy(
+ self.clc,
+ self.module,
+ acct_alias,
+ server.id,
+ aa_policy_id)
+ changed = True
+ return changed
+
+ def _ensure_aa_policy_absent(
+ self, server, server_params):
+ """
+ ensures the provided anti affinity policy is removed from the server
+ :param server: the CLC server object
+ :param server_params: the dictionary of server parameters
+ :return: (changed, group) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ acct_alias = self.clc.v2.Account.GetAlias()
+ aa_policy_id = server_params.get('anti_affinity_policy_id')
+ aa_policy_name = server_params.get('anti_affinity_policy_name')
+ if not aa_policy_id and aa_policy_name:
+ aa_policy_id = self._get_aa_policy_id_by_name(
+ self.clc,
+ self.module,
+ acct_alias,
+ aa_policy_name)
+ current_aa_policy_id = self._get_aa_policy_id_of_server(
+ self.clc,
+ self.module,
+ acct_alias,
+ server.id)
+
+ if aa_policy_id and aa_policy_id == current_aa_policy_id:
+ self._delete_aa_policy(
+ self.clc,
+ self.module,
+ acct_alias,
+ server.id)
+ changed = True
+ return changed
+
+ @staticmethod
+ def _modify_aa_policy(clc, module, acct_alias, server_id, aa_policy_id):
+ """
+ modifies the anti affinity policy of the CLC server
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param acct_alias: the CLC account alias
+ :param server_id: the CLC server id
+ :param aa_policy_id: the anti affinity policy id
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ if not module.check_mode:
+ try:
+ result = clc.v2.API.Call('PUT',
+ 'servers/%s/%s/antiAffinityPolicy' % (
+ acct_alias,
+ server_id),
+ json.dumps({"id": aa_policy_id}))
+ except APIFailedResponse as ex:
+ module.fail_json(
+ msg='Unable to modify anti affinity policy to server : "{0}". {1}'.format(
+ server_id, str(ex.response_text)))
+ return result
+
+ @staticmethod
+ def _delete_aa_policy(clc, module, acct_alias, server_id):
+ """
+ Delete the anti affinity policy of the CLC server
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param acct_alias: the CLC account alias
+ :param server_id: the CLC server id
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ if not module.check_mode:
+ try:
+ result = clc.v2.API.Call('DELETE',
+ 'servers/%s/%s/antiAffinityPolicy' % (
+ acct_alias,
+ server_id),
+ json.dumps({}))
+ except APIFailedResponse as ex:
+ module.fail_json(
+ msg='Unable to delete anti affinity policy to server : "{0}". {1}'.format(
+ server_id, str(ex.response_text)))
+ return result
+
+ @staticmethod
+ def _get_aa_policy_id_by_name(clc, module, alias, aa_policy_name):
+ """
+ retrieves the anti affinity policy id of the server based on the name of the policy
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param alias: the CLC account alias
+ :param aa_policy_name: the anti affinity policy name
+ :return: aa_policy_id: The anti affinity policy id
+ """
+ aa_policy_id = None
+ try:
+ aa_policies = clc.v2.API.Call(method='GET',
+ url='antiAffinityPolicies/%s' % alias)
+ except APIFailedResponse as ex:
+ return module.fail_json(
+ msg='Unable to fetch anti affinity policies from account alias : "{0}". {1}'.format(
+ alias, str(ex.response_text)))
+ for aa_policy in aa_policies.get('items'):
+ if aa_policy.get('name') == aa_policy_name:
+ if not aa_policy_id:
+ aa_policy_id = aa_policy.get('id')
+ else:
+ return module.fail_json(
+ msg='multiple anti affinity policies were found with policy name : %s' % aa_policy_name)
+ if not aa_policy_id:
+ module.fail_json(
+ msg='No anti affinity policy was found with policy name : %s' % aa_policy_name)
+ return aa_policy_id
+
+ @staticmethod
+ def _get_aa_policy_id_of_server(clc, module, alias, server_id):
+ """
+ retrieves the anti affinity policy id of the server based on the CLC server id
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param alias: the CLC account alias
+ :param server_id: the CLC server id
+ :return: aa_policy_id: The anti affinity policy id
+ """
+ aa_policy_id = None
+ try:
+ result = clc.v2.API.Call(
+ method='GET', url='servers/%s/%s/antiAffinityPolicy' %
+ (alias, server_id))
+ aa_policy_id = result.get('id')
+ except APIFailedResponse as ex:
+ if ex.response_status_code != 404:
+ module.fail_json(msg='Unable to fetch anti affinity policy for server "{0}". {1}'.format(
+ server_id, str(ex.response_text)))
+ return aa_policy_id
+
+ def _ensure_alert_policy_present(
+ self, server, server_params):
+ """
+ ensures the server is updated with the provided alert policy
+ :param server: the CLC server object
+ :param server_params: the dictionary of server parameters
+ :return: (changed, group) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ acct_alias = self.clc.v2.Account.GetAlias()
+ alert_policy_id = server_params.get('alert_policy_id')
+ alert_policy_name = server_params.get('alert_policy_name')
+ if not alert_policy_id and alert_policy_name:
+ alert_policy_id = self._get_alert_policy_id_by_name(
+ self.clc,
+ self.module,
+ acct_alias,
+ alert_policy_name)
+ if alert_policy_id and not self._alert_policy_exists(
+ server, alert_policy_id):
+ self._add_alert_policy_to_server(
+ self.clc,
+ self.module,
+ acct_alias,
+ server.id,
+ alert_policy_id)
+ changed = True
+ return changed
+
+ def _ensure_alert_policy_absent(
+ self, server, server_params):
+ """
+ ensures the alert policy is removed from the server
+ :param server: the CLC server object
+ :param server_params: the dictionary of server parameters
+ :return: (changed, group) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+
+ acct_alias = self.clc.v2.Account.GetAlias()
+ alert_policy_id = server_params.get('alert_policy_id')
+ alert_policy_name = server_params.get('alert_policy_name')
+ if not alert_policy_id and alert_policy_name:
+ alert_policy_id = self._get_alert_policy_id_by_name(
+ self.clc,
+ self.module,
+ acct_alias,
+ alert_policy_name)
+
+ if alert_policy_id and self._alert_policy_exists(
+ server, alert_policy_id):
+ self._remove_alert_policy_to_server(
+ self.clc,
+ self.module,
+ acct_alias,
+ server.id,
+ alert_policy_id)
+ changed = True
+ return changed
+
+ @staticmethod
+ def _add_alert_policy_to_server(
+ clc, module, acct_alias, server_id, alert_policy_id):
+ """
+ add the alert policy to CLC server
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param acct_alias: the CLC account alias
+ :param server_id: the CLC server id
+ :param alert_policy_id: the alert policy id
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ if not module.check_mode:
+ try:
+ result = clc.v2.API.Call('POST',
+ 'servers/%s/%s/alertPolicies' % (
+ acct_alias,
+ server_id),
+ json.dumps({"id": alert_policy_id}))
+ except APIFailedResponse as ex:
+ module.fail_json(msg='Unable to set alert policy to the server : "{0}". {1}'.format(
+ server_id, str(ex.response_text)))
+ return result
+
+ @staticmethod
+ def _remove_alert_policy_to_server(
+ clc, module, acct_alias, server_id, alert_policy_id):
+ """
+ remove the alert policy to the CLC server
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param acct_alias: the CLC account alias
+ :param server_id: the CLC server id
+ :param alert_policy_id: the alert policy id
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ if not module.check_mode:
+ try:
+ result = clc.v2.API.Call('DELETE',
+ 'servers/%s/%s/alertPolicies/%s'
+ % (acct_alias, server_id, alert_policy_id))
+ except APIFailedResponse as ex:
+ module.fail_json(msg='Unable to remove alert policy from the server : "{0}". {1}'.format(
+ server_id, str(ex.response_text)))
+ return result
+
+ @staticmethod
+ def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name):
+ """
+ retrieves the alert policy id of the server based on the name of the policy
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param alias: the CLC account alias
+ :param alert_policy_name: the alert policy name
+ :return: alert_policy_id: The alert policy id
+ """
+ alert_policy_id = None
+ try:
+ alert_policies = clc.v2.API.Call(method='GET',
+ url='alertPolicies/%s' % alias)
+ except APIFailedResponse as ex:
+ return module.fail_json(msg='Unable to fetch alert policies for account : "{0}". {1}'.format(
+ alias, str(ex.response_text)))
+ for alert_policy in alert_policies.get('items'):
+ if alert_policy.get('name') == alert_policy_name:
+ if not alert_policy_id:
+ alert_policy_id = alert_policy.get('id')
+ else:
+ return module.fail_json(
+ msg='multiple alert policies were found with policy name : %s' % alert_policy_name)
+ return alert_policy_id
+
+ @staticmethod
+ def _alert_policy_exists(server, alert_policy_id):
+ """
+ Checks if the alert policy exists for the server
+ :param server: the clc server object
+ :param alert_policy_id: the alert policy
+ :return: True: if the given alert policy id associated to the server, False otherwise
+ """
+ result = False
+ alert_policies = server.alertPolicies
+ if alert_policies:
+ for alert_policy in alert_policies:
+ if alert_policy.get('id') == alert_policy_id:
+ result = True
+ return result
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+
+ argument_dict = ClcModifyServer._define_module_argument_spec()
+ module = AnsibleModule(supports_check_mode=True, **argument_dict)
+ clc_modify_server = ClcModifyServer(module)
+ clc_modify_server.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_publicip.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_publicip.py
new file mode 100644
index 00000000..e31546b2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_publicip.py
@@ -0,0 +1,357 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 CenturyLink
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_publicip
+short_description: Add and Delete public ips on servers in CenturyLink Cloud.
+description:
+ - An Ansible module to add or delete public ip addresses on an existing server or servers in CenturyLink Cloud.
+options:
+ protocol:
+ description:
+ - The protocol that the public IP will listen for.
+ type: str
+ default: TCP
+ choices: ['TCP', 'UDP', 'ICMP']
+ ports:
+ description:
+ - A list of ports to expose. This is required when state is 'present'
+ type: list
+ server_ids:
+ description:
+ - A list of servers to create public ips on.
+ type: list
+ required: True
+ state:
+ description:
+ - Determine whether to create or delete public IPs. If present module will not create a second public ip if one
+ already exists.
+ type: str
+ default: present
+ choices: ['present', 'absent']
+ wait:
+ description:
+ - Whether to wait for the tasks to finish before returning.
+ type: bool
+ default: 'yes'
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+- name: Add Public IP to Server
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Create Public IP For Servers
+ community.general.clc_publicip:
+ protocol: TCP
+ ports:
+ - 80
+ server_ids:
+ - UC1TEST-SVR01
+ - UC1TEST-SVR02
+ state: present
+ register: clc
+
+ - name: Debug
+ ansible.builtin.debug:
+ var: clc
+
+- name: Delete Public IP from Server
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Create Public IP For Servers
+ community.general.clc_publicip:
+ server_ids:
+ - UC1TEST-SVR01
+ - UC1TEST-SVR02
+ state: absent
+ register: clc
+
+ - name: Debug
+ ansible.builtin.debug:
+ var: clc
+'''
+
+RETURN = '''
+server_ids:
+ description: The list of server ids that are changed
+ returned: success
+ type: list
+ sample:
+ [
+ "UC1TEST-SVR01",
+ "UC1TEST-SVR02"
+ ]
+'''
+
+__version__ = '${version}'
+
+import os
+import traceback
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcPublicIp(object):
+ clc = clc_sdk
+ module = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.module = module
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ self._set_clc_credentials_from_env()
+ params = self.module.params
+ server_ids = params['server_ids']
+ ports = params['ports']
+ protocol = params['protocol']
+ state = params['state']
+
+ if state == 'present':
+ changed, changed_server_ids, requests = self.ensure_public_ip_present(
+ server_ids=server_ids, protocol=protocol, ports=ports)
+ elif state == 'absent':
+ changed, changed_server_ids, requests = self.ensure_public_ip_absent(
+ server_ids=server_ids)
+ else:
+ return self.module.fail_json(msg="Unknown State: " + state)
+ self._wait_for_requests_to_complete(requests)
+ return self.module.exit_json(changed=changed,
+ server_ids=changed_server_ids)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ server_ids=dict(type='list', required=True),
+ protocol=dict(default='TCP', choices=['TCP', 'UDP', 'ICMP']),
+ ports=dict(type='list'),
+ wait=dict(type='bool', default=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ return argument_spec
+
+ def ensure_public_ip_present(self, server_ids, protocol, ports):
+ """
+ Ensures the given server ids having the public ip available
+ :param server_ids: the list of server ids
+ :param protocol: the ip protocol
+ :param ports: the list of ports to expose
+ :return: (changed, changed_server_ids, results)
+ changed: A flag indicating if there is any change
+ changed_server_ids : the list of server ids that are changed
+ results: The result list from clc public ip call
+ """
+ changed = False
+ results = []
+ changed_server_ids = []
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to obtain server list from the CLC API')
+ servers_to_change = [
+ server for server in servers if len(
+ server.PublicIPs().public_ips) == 0]
+ ports_to_expose = [{'protocol': protocol, 'port': port}
+ for port in ports]
+ for server in servers_to_change:
+ if not self.module.check_mode:
+ result = self._add_publicip_to_server(server, ports_to_expose)
+ results.append(result)
+ changed_server_ids.append(server.id)
+ changed = True
+ return changed, changed_server_ids, results
+
+ def _add_publicip_to_server(self, server, ports_to_expose):
+ result = None
+ try:
+ result = server.PublicIPs().Add(ports_to_expose)
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to add public ip to the server : {0}. {1}'.format(
+ server.id, ex.response_text
+ ))
+ return result
+
+ def ensure_public_ip_absent(self, server_ids):
+ """
+ Ensures the given server ids having the public ip removed if there is any
+ :param server_ids: the list of server ids
+ :return: (changed, changed_server_ids, results)
+ changed: A flag indicating if there is any change
+ changed_server_ids : the list of server ids that are changed
+ results: The result list from clc public ip call
+ """
+ changed = False
+ results = []
+ changed_server_ids = []
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to obtain server list from the CLC API')
+ servers_to_change = [
+ server for server in servers if len(
+ server.PublicIPs().public_ips) > 0]
+ for server in servers_to_change:
+ if not self.module.check_mode:
+ result = self._remove_publicip_from_server(server)
+ results.append(result)
+ changed_server_ids.append(server.id)
+ changed = True
+ return changed, changed_server_ids, results
+
+ def _remove_publicip_from_server(self, server):
+ result = None
+ try:
+ for ip_address in server.PublicIPs().public_ips:
+ result = ip_address.Delete()
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to remove public ip from the server : {0}. {1}'.format(
+ server.id, ex.response_text
+ ))
+ return result
+
+ def _wait_for_requests_to_complete(self, requests_lst):
+ """
+ Waits until the CLC requests are complete if the wait argument is True
+ :param requests_lst: The list of CLC request objects
+ :return: none
+ """
+ if not self.module.params['wait']:
+ return
+ for request in requests_lst:
+ request.WaitUntilComplete()
+ for request_details in request.requests:
+ if request_details.Status() != 'succeeded':
+ self.module.fail_json(
+ msg='Unable to process public ip request')
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ def _get_servers_from_clc(self, server_ids, message):
+ """
+ Gets list of servers form CLC api
+ """
+ try:
+ return self.clc.v2.Servers(server_ids).servers
+ except CLCException as exception:
+ self.module.fail_json(msg=message + ': %s' % exception)
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ module = AnsibleModule(
+ argument_spec=ClcPublicIp._define_module_argument_spec(),
+ supports_check_mode=True
+ )
+ clc_public_ip = ClcPublicIp(module)
+ clc_public_ip.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_server.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_server.py
new file mode 100644
index 00000000..6b7e9c4b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_server.py
@@ -0,0 +1,1557 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 CenturyLink
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_server
+short_description: Create, Delete, Start and Stop servers in CenturyLink Cloud.
+description:
+ - An Ansible module to Create, Delete, Start and Stop servers in CenturyLink Cloud.
+options:
+ additional_disks:
+ description:
+ - The list of additional disks for the server
+ type: list
+ default: []
+ add_public_ip:
+ description:
+ - Whether to add a public ip to the server
+ type: bool
+ default: 'no'
+ alias:
+ description:
+ - The account alias to provision the servers under.
+ type: str
+ anti_affinity_policy_id:
+ description:
+ - The anti-affinity policy to assign to the server. This is mutually exclusive with 'anti_affinity_policy_name'.
+ type: str
+ anti_affinity_policy_name:
+ description:
+ - The anti-affinity policy to assign to the server. This is mutually exclusive with 'anti_affinity_policy_id'.
+ type: str
+ alert_policy_id:
+ description:
+ - The alert policy to assign to the server. This is mutually exclusive with 'alert_policy_name'.
+ type: str
+ alert_policy_name:
+ description:
+ - The alert policy to assign to the server. This is mutually exclusive with 'alert_policy_id'.
+ type: str
+ count:
+ description:
+ - The number of servers to build (mutually exclusive with exact_count)
+ default: 1
+ type: int
+ count_group:
+ description:
+ - Required when exact_count is specified. The Server Group use to determine how many servers to deploy.
+ type: str
+ cpu:
+ description:
+ - How many CPUs to provision on the server
+ default: 1
+ type: int
+ cpu_autoscale_policy_id:
+ description:
+ - The autoscale policy to assign to the server.
+ type: str
+ custom_fields:
+ description:
+ - The list of custom fields to set on the server.
+ type: list
+ default: []
+ description:
+ description:
+ - The description to set for the server.
+ type: str
+ exact_count:
+ description:
+ - Run in idempotent mode. Will insure that this exact number of servers are running in the provided group,
+ creating and deleting them to reach that count. Requires count_group to be set.
+ type: int
+ group:
+ description:
+ - The Server Group to create servers under.
+ type: str
+ default: 'Default Group'
+ ip_address:
+ description:
+ - The IP Address for the server. One is assigned if not provided.
+ type: str
+ location:
+ description:
+ - The Datacenter to create servers in.
+ type: str
+ managed_os:
+ description:
+ - Whether to create the server as 'Managed' or not.
+ type: bool
+ default: 'no'
+ required: False
+ memory:
+ description:
+ - Memory in GB.
+ type: int
+ default: 1
+ name:
+ description:
+ - A 1 to 6 character identifier to use for the server. This is required when state is 'present'
+ type: str
+ network_id:
+ description:
+ - The network UUID on which to create servers.
+ type: str
+ packages:
+ description:
+ - The list of blue print packages to run on the server after its created.
+ type: list
+ default: []
+ password:
+ description:
+ - Password for the administrator / root user
+ type: str
+ primary_dns:
+ description:
+ - Primary DNS used by the server.
+ type: str
+ public_ip_protocol:
+ description:
+ - The protocol to use for the public ip if add_public_ip is set to True.
+ type: str
+ default: 'TCP'
+ choices: ['TCP', 'UDP', 'ICMP']
+ public_ip_ports:
+ description:
+ - A list of ports to allow on the firewall to the servers public ip, if add_public_ip is set to True.
+ type: list
+ default: []
+ secondary_dns:
+ description:
+ - Secondary DNS used by the server.
+ type: str
+ server_ids:
+ description:
+ - Required for started, stopped, and absent states.
+ A list of server Ids to insure are started, stopped, or absent.
+ type: list
+ default: []
+ source_server_password:
+ description:
+ - The password for the source server if a clone is specified.
+ type: str
+ state:
+ description:
+ - The state to insure that the provided resources are in.
+ type: str
+ default: 'present'
+ choices: ['present', 'absent', 'started', 'stopped']
+ storage_type:
+ description:
+ - The type of storage to attach to the server.
+ type: str
+ default: 'standard'
+ choices: ['standard', 'hyperscale']
+ template:
+ description:
+ - The template to use for server creation. Will search for a template if a partial string is provided.
+ This is required when state is 'present'
+ type: str
+ ttl:
+ description:
+ - The time to live for the server in seconds. The server will be deleted when this time expires.
+ type: str
+ type:
+ description:
+ - The type of server to create.
+ type: str
+ default: 'standard'
+ choices: ['standard', 'hyperscale', 'bareMetal']
+ configuration_id:
+ description:
+ - Only required for bare metal servers.
+ Specifies the identifier for the specific configuration type of bare metal server to deploy.
+ type: str
+ os_type:
+ description:
+ - Only required for bare metal servers.
+ Specifies the OS to provision with the bare metal server.
+ type: str
+ choices: ['redHat6_64Bit', 'centOS6_64Bit', 'windows2012R2Standard_64Bit', 'ubuntu14_64Bit']
+ wait:
+ description:
+ - Whether to wait for the provisioning tasks to finish before returning.
+ type: bool
+ default: 'yes'
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+- name: Provision a single Ubuntu Server
+ community.general.clc_server:
+ name: test
+ template: ubuntu-14-64
+ count: 1
+ group: Default Group
+ state: present
+
+- name: Ensure 'Default Group' has exactly 5 servers
+ community.general.clc_server:
+ name: test
+ template: ubuntu-14-64
+ exact_count: 5
+ count_group: Default Group
+ group: Default Group
+
+- name: Stop a Server
+ community.general.clc_server:
+ server_ids:
+ - UC1ACCT-TEST01
+ state: stopped
+
+- name: Start a Server
+ community.general.clc_server:
+ server_ids:
+ - UC1ACCT-TEST01
+ state: started
+
+- name: Delete a Server
+ community.general.clc_server:
+ server_ids:
+ - UC1ACCT-TEST01
+ state: absent
+'''
+
+RETURN = '''
+server_ids:
+ description: The list of server ids that are created
+ returned: success
+ type: list
+ sample:
+ [
+ "UC1TEST-SVR01",
+ "UC1TEST-SVR02"
+ ]
+partially_created_server_ids:
+ description: The list of server ids that are partially created
+ returned: success
+ type: list
+ sample:
+ [
+ "UC1TEST-SVR01",
+ "UC1TEST-SVR02"
+ ]
+servers:
+ description: The list of server objects returned from CLC
+ returned: success
+ type: list
+ sample:
+ [
+ {
+ "changeInfo":{
+ "createdBy":"service.wfad",
+ "createdDate":1438196820,
+ "modifiedBy":"service.wfad",
+ "modifiedDate":1438196820
+ },
+ "description":"test-server",
+ "details":{
+ "alertPolicies":[
+
+ ],
+ "cpu":1,
+ "customFields":[
+
+ ],
+ "diskCount":3,
+ "disks":[
+ {
+ "id":"0:0",
+ "partitionPaths":[
+
+ ],
+ "sizeGB":1
+ },
+ {
+ "id":"0:1",
+ "partitionPaths":[
+
+ ],
+ "sizeGB":2
+ },
+ {
+ "id":"0:2",
+ "partitionPaths":[
+
+ ],
+ "sizeGB":14
+ }
+ ],
+ "hostName":"",
+ "inMaintenanceMode":false,
+ "ipAddresses":[
+ {
+ "internal":"10.1.1.1"
+ }
+ ],
+ "memoryGB":1,
+ "memoryMB":1024,
+ "partitions":[
+
+ ],
+ "powerState":"started",
+ "snapshots":[
+
+ ],
+ "storageGB":17
+ },
+ "groupId":"086ac1dfe0b6411989e8d1b77c4065f0",
+ "id":"test-server",
+ "ipaddress":"10.120.45.23",
+ "isTemplate":false,
+ "links":[
+ {
+ "href":"/v2/servers/wfad/test-server",
+ "id":"test-server",
+ "rel":"self",
+ "verbs":[
+ "GET",
+ "PATCH",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
+ "id":"086ac1dfe0b6411989e8d1b77c4065f0",
+ "rel":"group"
+ },
+ {
+ "href":"/v2/accounts/wfad",
+ "id":"wfad",
+ "rel":"account"
+ },
+ {
+ "href":"/v2/billing/wfad/serverPricing/test-server",
+ "rel":"billing"
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/publicIPAddresses",
+ "rel":"publicIPAddresses",
+ "verbs":[
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/credentials",
+ "rel":"credentials"
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/statistics",
+ "rel":"statistics"
+ },
+ {
+ "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/upcomingScheduledActivities",
+ "rel":"upcomingScheduledActivities"
+ },
+ {
+ "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/scheduledActivities",
+ "rel":"scheduledActivities",
+ "verbs":[
+ "GET",
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/capabilities",
+ "rel":"capabilities"
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/alertPolicies",
+ "rel":"alertPolicyMappings",
+ "verbs":[
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/antiAffinityPolicy",
+ "rel":"antiAffinityPolicyMapping",
+ "verbs":[
+ "PUT",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/cpuAutoscalePolicy",
+ "rel":"cpuAutoscalePolicyMapping",
+ "verbs":[
+ "PUT",
+ "DELETE"
+ ]
+ }
+ ],
+ "locationId":"UC1",
+ "name":"test-server",
+ "os":"ubuntu14_64Bit",
+ "osType":"Ubuntu 14 64-bit",
+ "status":"active",
+ "storageType":"standard",
+ "type":"standard"
+ }
+ ]
+'''
+
+__version__ = '${version}'
+
+import json
+import os
+import time
+import traceback
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+ from clc import APIFailedResponse
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcServer:
+ clc = clc_sdk
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.clc = clc_sdk
+ self.module = module
+ self.group_dict = {}
+
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(
+ requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ changed = False
+ new_server_ids = []
+ server_dict_array = []
+
+ self._set_clc_credentials_from_env()
+ self.module.params = self._validate_module_params(
+ self.clc,
+ self.module)
+ p = self.module.params
+ state = p.get('state')
+
+ #
+ # Handle each state
+ #
+ partial_servers_ids = []
+ if state == 'absent':
+ server_ids = p['server_ids']
+ if not isinstance(server_ids, list):
+ return self.module.fail_json(
+ msg='server_ids needs to be a list of instances to delete: %s' %
+ server_ids)
+
+ (changed,
+ server_dict_array,
+ new_server_ids) = self._delete_servers(module=self.module,
+ clc=self.clc,
+ server_ids=server_ids)
+
+ elif state in ('started', 'stopped'):
+ server_ids = p.get('server_ids')
+ if not isinstance(server_ids, list):
+ return self.module.fail_json(
+ msg='server_ids needs to be a list of servers to run: %s' %
+ server_ids)
+
+ (changed,
+ server_dict_array,
+ new_server_ids) = self._start_stop_servers(self.module,
+ self.clc,
+ server_ids)
+
+ elif state == 'present':
+ # Changed is always set to true when provisioning new instances
+ if not p.get('template') and p.get('type') != 'bareMetal':
+ return self.module.fail_json(
+ msg='template parameter is required for new instance')
+
+ if p.get('exact_count') is None:
+ (server_dict_array,
+ new_server_ids,
+ partial_servers_ids,
+ changed) = self._create_servers(self.module,
+ self.clc)
+ else:
+ (server_dict_array,
+ new_server_ids,
+ partial_servers_ids,
+ changed) = self._enforce_count(self.module,
+ self.clc)
+
+ self.module.exit_json(
+ changed=changed,
+ server_ids=new_server_ids,
+ partially_created_server_ids=partial_servers_ids,
+ servers=server_dict_array)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ name=dict(),
+ template=dict(),
+ group=dict(default='Default Group'),
+ network_id=dict(),
+ location=dict(default=None),
+ cpu=dict(default=1, type='int'),
+ memory=dict(default=1, type='int'),
+ alias=dict(default=None),
+ password=dict(default=None, no_log=True),
+ ip_address=dict(default=None),
+ storage_type=dict(
+ default='standard',
+ choices=[
+ 'standard',
+ 'hyperscale']),
+ type=dict(default='standard', choices=['standard', 'hyperscale', 'bareMetal']),
+ primary_dns=dict(default=None),
+ secondary_dns=dict(default=None),
+ additional_disks=dict(type='list', default=[]),
+ custom_fields=dict(type='list', default=[]),
+ ttl=dict(default=None),
+ managed_os=dict(type='bool', default=False),
+ description=dict(default=None),
+ source_server_password=dict(default=None, no_log=True),
+ cpu_autoscale_policy_id=dict(default=None),
+ anti_affinity_policy_id=dict(default=None),
+ anti_affinity_policy_name=dict(default=None),
+ alert_policy_id=dict(default=None),
+ alert_policy_name=dict(default=None),
+ packages=dict(type='list', default=[]),
+ state=dict(
+ default='present',
+ choices=[
+ 'present',
+ 'absent',
+ 'started',
+ 'stopped']),
+ count=dict(type='int', default=1),
+ exact_count=dict(type='int', default=None),
+ count_group=dict(),
+ server_ids=dict(type='list', default=[]),
+ add_public_ip=dict(type='bool', default=False),
+ public_ip_protocol=dict(
+ default='TCP',
+ choices=[
+ 'TCP',
+ 'UDP',
+ 'ICMP']),
+ public_ip_ports=dict(type='list', default=[]),
+ configuration_id=dict(default=None),
+ os_type=dict(default=None,
+ choices=[
+ 'redHat6_64Bit',
+ 'centOS6_64Bit',
+ 'windows2012R2Standard_64Bit',
+ 'ubuntu14_64Bit'
+ ]),
+ wait=dict(type='bool', default=True))
+
+ mutually_exclusive = [
+ ['exact_count', 'count'],
+ ['exact_count', 'state'],
+ ['anti_affinity_policy_id', 'anti_affinity_policy_name'],
+ ['alert_policy_id', 'alert_policy_name'],
+ ]
+ return {"argument_spec": argument_spec,
+ "mutually_exclusive": mutually_exclusive}
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ @staticmethod
+ def _validate_module_params(clc, module):
+ """
+ Validate the module params, and lookup default values.
+ :param clc: clc-sdk instance to use
+ :param module: module to validate
+ :return: dictionary of validated params
+ """
+ params = module.params
+ datacenter = ClcServer._find_datacenter(clc, module)
+
+ ClcServer._validate_types(module)
+ ClcServer._validate_name(module)
+
+ params['alias'] = ClcServer._find_alias(clc, module)
+ params['cpu'] = ClcServer._find_cpu(clc, module)
+ params['memory'] = ClcServer._find_memory(clc, module)
+ params['description'] = ClcServer._find_description(module)
+ params['ttl'] = ClcServer._find_ttl(clc, module)
+ params['template'] = ClcServer._find_template_id(module, datacenter)
+ params['group'] = ClcServer._find_group(module, datacenter).id
+ params['network_id'] = ClcServer._find_network_id(module, datacenter)
+ params['anti_affinity_policy_id'] = ClcServer._find_aa_policy_id(
+ clc,
+ module)
+ params['alert_policy_id'] = ClcServer._find_alert_policy_id(
+ clc,
+ module)
+
+ return params
+
+ @staticmethod
+ def _find_datacenter(clc, module):
+ """
+ Find the datacenter by calling the CLC API.
+ :param clc: clc-sdk instance to use
+ :param module: module to validate
+ :return: clc-sdk.Datacenter instance
+ """
+ location = module.params.get('location')
+ try:
+ if not location:
+ account = clc.v2.Account()
+ location = account.data.get('primaryDataCenter')
+ data_center = clc.v2.Datacenter(location)
+ return data_center
+ except CLCException:
+ module.fail_json(msg="Unable to find location: {0}".format(location))
+
+ @staticmethod
+ def _find_alias(clc, module):
+ """
+ Find or Validate the Account Alias by calling the CLC API
+ :param clc: clc-sdk instance to use
+ :param module: module to validate
+ :return: clc-sdk.Account instance
+ """
+ alias = module.params.get('alias')
+ if not alias:
+ try:
+ alias = clc.v2.Account.GetAlias()
+ except CLCException as ex:
+ module.fail_json(msg='Unable to find account alias. {0}'.format(
+ ex.message
+ ))
+ return alias
+
+ @staticmethod
+ def _find_cpu(clc, module):
+ """
+ Find or validate the CPU value by calling the CLC API
+ :param clc: clc-sdk instance to use
+ :param module: module to validate
+ :return: Int value for CPU
+ """
+ cpu = module.params.get('cpu')
+ group_id = module.params.get('group_id')
+ alias = module.params.get('alias')
+ state = module.params.get('state')
+
+ if not cpu and state == 'present':
+ group = clc.v2.Group(id=group_id,
+ alias=alias)
+ if group.Defaults("cpu"):
+ cpu = group.Defaults("cpu")
+ else:
+ module.fail_json(
+ msg=str("Can\'t determine a default cpu value. Please provide a value for cpu."))
+ return cpu
+
+ @staticmethod
+ def _find_memory(clc, module):
+ """
+ Find or validate the Memory value by calling the CLC API
+ :param clc: clc-sdk instance to use
+ :param module: module to validate
+ :return: Int value for Memory
+ """
+ memory = module.params.get('memory')
+ group_id = module.params.get('group_id')
+ alias = module.params.get('alias')
+ state = module.params.get('state')
+
+ if not memory and state == 'present':
+ group = clc.v2.Group(id=group_id,
+ alias=alias)
+ if group.Defaults("memory"):
+ memory = group.Defaults("memory")
+ else:
+ module.fail_json(msg=str(
+ "Can\'t determine a default memory value. Please provide a value for memory."))
+ return memory
+
+ @staticmethod
+ def _find_description(module):
+ """
+ Set the description module param to name if description is blank
+ :param module: the module to validate
+ :return: string description
+ """
+ description = module.params.get('description')
+ if not description:
+ description = module.params.get('name')
+ return description
+
+ @staticmethod
+ def _validate_types(module):
+ """
+ Validate that type and storage_type are set appropriately, and fail if not
+ :param module: the module to validate
+ :return: none
+ """
+ state = module.params.get('state')
+ server_type = module.params.get(
+ 'type').lower() if module.params.get('type') else None
+ storage_type = module.params.get(
+ 'storage_type').lower() if module.params.get('storage_type') else None
+
+ if state == "present":
+ if server_type == "standard" and storage_type not in (
+ "standard", "premium"):
+ module.fail_json(
+ msg=str("Standard VMs must have storage_type = 'standard' or 'premium'"))
+
+ if server_type == "hyperscale" and storage_type != "hyperscale":
+ module.fail_json(
+ msg=str("Hyperscale VMs must have storage_type = 'hyperscale'"))
+
+ @staticmethod
+ def _validate_name(module):
+ """
+ Validate that name is the correct length if provided, fail if it's not
+ :param module: the module to validate
+ :return: none
+ """
+ server_name = module.params.get('name')
+ state = module.params.get('state')
+
+ if state == 'present' and (
+ len(server_name) < 1 or len(server_name) > 6):
+ module.fail_json(msg=str(
+ "When state = 'present', name must be a string with a minimum length of 1 and a maximum length of 6"))
+
+ @staticmethod
+ def _find_ttl(clc, module):
+ """
+ Validate that TTL is > 3600 if set, and fail if not
+ :param clc: clc-sdk instance to use
+ :param module: module to validate
+ :return: validated ttl
+ """
+ ttl = module.params.get('ttl')
+
+ if ttl:
+ if ttl <= 3600:
+ return module.fail_json(msg=str("Ttl cannot be <= 3600"))
+ else:
+ ttl = clc.v2.time_utils.SecondsToZuluTS(int(time.time()) + ttl)
+ return ttl
+
+ @staticmethod
+ def _find_template_id(module, datacenter):
+ """
+ Find the template id by calling the CLC API.
+ :param module: the module to validate
+ :param datacenter: the datacenter to search for the template
+ :return: a valid clc template id
+ """
+ lookup_template = module.params.get('template')
+ state = module.params.get('state')
+ type = module.params.get('type')
+ result = None
+
+ if state == 'present' and type != 'bareMetal':
+ try:
+ result = datacenter.Templates().Search(lookup_template)[0].id
+ except CLCException:
+ module.fail_json(
+ msg=str(
+ "Unable to find a template: " +
+ lookup_template +
+ " in location: " +
+ datacenter.id))
+ return result
+
+ @staticmethod
+ def _find_network_id(module, datacenter):
+ """
+ Validate the provided network id or return a default.
+ :param module: the module to validate
+ :param datacenter: the datacenter to search for a network id
+ :return: a valid network id
+ """
+ network_id = module.params.get('network_id')
+
+ if not network_id:
+ try:
+ network_id = datacenter.Networks().networks[0].id
+ # -- added for clc-sdk 2.23 compatibility
+ # datacenter_networks = clc_sdk.v2.Networks(
+ # networks_lst=datacenter._DeploymentCapabilities()['deployableNetworks'])
+ # network_id = datacenter_networks.networks[0].id
+ # -- end
+ except CLCException:
+ module.fail_json(
+ msg=str(
+ "Unable to find a network in location: " +
+ datacenter.id))
+
+ return network_id
+
+ @staticmethod
+ def _find_aa_policy_id(clc, module):
+ """
+ Validate if the anti affinity policy exist for the given name and throw error if not
+ :param clc: the clc-sdk instance
+ :param module: the module to validate
+ :return: aa_policy_id: the anti affinity policy id of the given name.
+ """
+ aa_policy_id = module.params.get('anti_affinity_policy_id')
+ aa_policy_name = module.params.get('anti_affinity_policy_name')
+ if not aa_policy_id and aa_policy_name:
+ alias = module.params.get('alias')
+ aa_policy_id = ClcServer._get_anti_affinity_policy_id(
+ clc,
+ module,
+ alias,
+ aa_policy_name)
+ if not aa_policy_id:
+ module.fail_json(
+ msg='No anti affinity policy was found with policy name : %s' % aa_policy_name)
+ return aa_policy_id
+
+ @staticmethod
+ def _find_alert_policy_id(clc, module):
+ """
+ Validate if the alert policy exist for the given name and throw error if not
+ :param clc: the clc-sdk instance
+ :param module: the module to validate
+ :return: alert_policy_id: the alert policy id of the given name.
+ """
+ alert_policy_id = module.params.get('alert_policy_id')
+ alert_policy_name = module.params.get('alert_policy_name')
+ if not alert_policy_id and alert_policy_name:
+ alias = module.params.get('alias')
+ alert_policy_id = ClcServer._get_alert_policy_id_by_name(
+ clc=clc,
+ module=module,
+ alias=alias,
+ alert_policy_name=alert_policy_name
+ )
+ if not alert_policy_id:
+ module.fail_json(
+ msg='No alert policy exist with name : %s' % alert_policy_name)
+ return alert_policy_id
+
+ def _create_servers(self, module, clc, override_count=None):
+ """
+ Create New Servers in CLC cloud
+ :param module: the AnsibleModule object
+ :param clc: the clc-sdk instance to use
+ :return: a list of dictionaries with server information about the servers that were created
+ """
+ p = module.params
+ request_list = []
+ servers = []
+ server_dict_array = []
+ created_server_ids = []
+ partial_created_servers_ids = []
+
+ add_public_ip = p.get('add_public_ip')
+ public_ip_protocol = p.get('public_ip_protocol')
+ public_ip_ports = p.get('public_ip_ports')
+
+ params = {
+ 'name': p.get('name'),
+ 'template': p.get('template'),
+ 'group_id': p.get('group'),
+ 'network_id': p.get('network_id'),
+ 'cpu': p.get('cpu'),
+ 'memory': p.get('memory'),
+ 'alias': p.get('alias'),
+ 'password': p.get('password'),
+ 'ip_address': p.get('ip_address'),
+ 'storage_type': p.get('storage_type'),
+ 'type': p.get('type'),
+ 'primary_dns': p.get('primary_dns'),
+ 'secondary_dns': p.get('secondary_dns'),
+ 'additional_disks': p.get('additional_disks'),
+ 'custom_fields': p.get('custom_fields'),
+ 'ttl': p.get('ttl'),
+ 'managed_os': p.get('managed_os'),
+ 'description': p.get('description'),
+ 'source_server_password': p.get('source_server_password'),
+ 'cpu_autoscale_policy_id': p.get('cpu_autoscale_policy_id'),
+ 'anti_affinity_policy_id': p.get('anti_affinity_policy_id'),
+ 'packages': p.get('packages'),
+ 'configuration_id': p.get('configuration_id'),
+ 'os_type': p.get('os_type')
+ }
+
+ count = override_count if override_count else p.get('count')
+
+ changed = False if count == 0 else True
+
+ if not changed:
+ return server_dict_array, created_server_ids, partial_created_servers_ids, changed
+ for i in range(0, count):
+ if not module.check_mode:
+ req = self._create_clc_server(clc=clc,
+ module=module,
+ server_params=params)
+ server = req.requests[0].Server()
+ request_list.append(req)
+ servers.append(server)
+
+ self._wait_for_requests(module, request_list)
+ self._refresh_servers(module, servers)
+
+ ip_failed_servers = self._add_public_ip_to_servers(
+ module=module,
+ should_add_public_ip=add_public_ip,
+ servers=servers,
+ public_ip_protocol=public_ip_protocol,
+ public_ip_ports=public_ip_ports)
+ ap_failed_servers = self._add_alert_policy_to_servers(clc=clc,
+ module=module,
+ servers=servers)
+
+ for server in servers:
+ if server in ip_failed_servers or server in ap_failed_servers:
+ partial_created_servers_ids.append(server.id)
+ else:
+ # reload server details
+ server = clc.v2.Server(server.id)
+ server.data['ipaddress'] = server.details[
+ 'ipAddresses'][0]['internal']
+
+ if add_public_ip and len(server.PublicIPs().public_ips) > 0:
+ server.data['publicip'] = str(
+ server.PublicIPs().public_ips[0])
+ created_server_ids.append(server.id)
+ server_dict_array.append(server.data)
+
+ return server_dict_array, created_server_ids, partial_created_servers_ids, changed
+
+ def _enforce_count(self, module, clc):
+ """
+ Enforce that there is the right number of servers in the provided group.
+ Starts or stops servers as necessary.
+ :param module: the AnsibleModule object
+ :param clc: the clc-sdk instance to use
+ :return: a list of dictionaries with server information about the servers that were created or deleted
+ """
+ p = module.params
+ changed = False
+ count_group = p.get('count_group')
+ datacenter = ClcServer._find_datacenter(clc, module)
+ exact_count = p.get('exact_count')
+ server_dict_array = []
+ partial_servers_ids = []
+ changed_server_ids = []
+
+ # fail here if the exact count was specified without filtering
+ # on a group, as this may lead to a undesired removal of instances
+ if exact_count and count_group is None:
+ return module.fail_json(
+ msg="you must use the 'count_group' option with exact_count")
+
+ servers, running_servers = ClcServer._find_running_servers_by_group(
+ module, datacenter, count_group)
+
+ if len(running_servers) == exact_count:
+ changed = False
+
+ elif len(running_servers) < exact_count:
+ to_create = exact_count - len(running_servers)
+ server_dict_array, changed_server_ids, partial_servers_ids, changed \
+ = self._create_servers(module, clc, override_count=to_create)
+
+ for server in server_dict_array:
+ running_servers.append(server)
+
+ elif len(running_servers) > exact_count:
+ to_remove = len(running_servers) - exact_count
+ all_server_ids = sorted([x.id for x in running_servers])
+ remove_ids = all_server_ids[0:to_remove]
+
+ (changed, server_dict_array, changed_server_ids) \
+ = ClcServer._delete_servers(module, clc, remove_ids)
+
+ return server_dict_array, changed_server_ids, partial_servers_ids, changed
+
+ @staticmethod
+ def _wait_for_requests(module, request_list):
+ """
+ Block until server provisioning requests are completed.
+ :param module: the AnsibleModule object
+ :param request_list: a list of clc-sdk.Request instances
+ :return: none
+ """
+ wait = module.params.get('wait')
+ if wait:
+ # Requests.WaitUntilComplete() returns the count of failed requests
+ failed_requests_count = sum(
+ [request.WaitUntilComplete() for request in request_list])
+
+ if failed_requests_count > 0:
+ module.fail_json(
+ msg='Unable to process server request')
+
+ @staticmethod
+ def _refresh_servers(module, servers):
+ """
+ Loop through a list of servers and refresh them.
+ :param module: the AnsibleModule object
+ :param servers: list of clc-sdk.Server instances to refresh
+ :return: none
+ """
+ for server in servers:
+ try:
+ server.Refresh()
+ except CLCException as ex:
+ module.fail_json(msg='Unable to refresh the server {0}. {1}'.format(
+ server.id, ex.message
+ ))
+
+ @staticmethod
+ def _add_public_ip_to_servers(
+ module,
+ should_add_public_ip,
+ servers,
+ public_ip_protocol,
+ public_ip_ports):
+ """
+ Create a public IP for servers
+ :param module: the AnsibleModule object
+ :param should_add_public_ip: boolean - whether or not to provision a public ip for servers. Skipped if False
+ :param servers: List of servers to add public ips to
+ :param public_ip_protocol: a protocol to allow for the public ips
+ :param public_ip_ports: list of ports to allow for the public ips
+ :return: none
+ """
+ failed_servers = []
+ if not should_add_public_ip:
+ return failed_servers
+
+ ports_lst = []
+ request_list = []
+ server = None
+
+ for port in public_ip_ports:
+ ports_lst.append(
+ {'protocol': public_ip_protocol, 'port': port})
+ try:
+ if not module.check_mode:
+ for server in servers:
+ request = server.PublicIPs().Add(ports_lst)
+ request_list.append(request)
+ except APIFailedResponse:
+ failed_servers.append(server)
+ ClcServer._wait_for_requests(module, request_list)
+ return failed_servers
+
+ @staticmethod
+ def _add_alert_policy_to_servers(clc, module, servers):
+ """
+ Associate the alert policy to servers
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param servers: List of servers to add alert policy to
+ :return: failed_servers: the list of servers which failed while associating alert policy
+ """
+ failed_servers = []
+ p = module.params
+ alert_policy_id = p.get('alert_policy_id')
+ alias = p.get('alias')
+
+ if alert_policy_id and not module.check_mode:
+ for server in servers:
+ try:
+ ClcServer._add_alert_policy_to_server(
+ clc=clc,
+ alias=alias,
+ server_id=server.id,
+ alert_policy_id=alert_policy_id)
+ except CLCException:
+ failed_servers.append(server)
+ return failed_servers
+
+ @staticmethod
+ def _add_alert_policy_to_server(
+ clc, alias, server_id, alert_policy_id):
+ """
+ Associate an alert policy to a clc server
+ :param clc: the clc-sdk instance to use
+ :param alias: the clc account alias
+ :param server_id: The clc server id
+ :param alert_policy_id: the alert policy id to be associated to the server
+ :return: none
+ """
+ try:
+ clc.v2.API.Call(
+ method='POST',
+ url='servers/%s/%s/alertPolicies' % (alias, server_id),
+ payload=json.dumps(
+ {
+ 'id': alert_policy_id
+ }))
+ except APIFailedResponse as e:
+ raise CLCException(
+ 'Failed to associate alert policy to the server : {0} with Error {1}'.format(
+ server_id, str(e.response_text)))
+
+ @staticmethod
+ def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name):
+ """
+ Returns the alert policy id for the given alert policy name
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param alias: the clc account alias
+ :param alert_policy_name: the name of the alert policy
+ :return: alert_policy_id: the alert policy id
+ """
+ alert_policy_id = None
+ policies = clc.v2.API.Call('GET', '/v2/alertPolicies/%s' % alias)
+ if not policies:
+ return alert_policy_id
+ for policy in policies.get('items'):
+ if policy.get('name') == alert_policy_name:
+ if not alert_policy_id:
+ alert_policy_id = policy.get('id')
+ else:
+ return module.fail_json(
+ msg='multiple alert policies were found with policy name : %s' % alert_policy_name)
+ return alert_policy_id
+
+ @staticmethod
+ def _delete_servers(module, clc, server_ids):
+ """
+ Delete the servers on the provided list
+ :param module: the AnsibleModule object
+ :param clc: the clc-sdk instance to use
+ :param server_ids: list of servers to delete
+ :return: a list of dictionaries with server information about the servers that were deleted
+ """
+ terminated_server_ids = []
+ server_dict_array = []
+ request_list = []
+
+ if not isinstance(server_ids, list) or len(server_ids) < 1:
+ return module.fail_json(
+ msg='server_ids should be a list of servers, aborting')
+
+ servers = clc.v2.Servers(server_ids).Servers()
+ for server in servers:
+ if not module.check_mode:
+ request_list.append(server.Delete())
+ ClcServer._wait_for_requests(module, request_list)
+
+ for server in servers:
+ terminated_server_ids.append(server.id)
+
+ return True, server_dict_array, terminated_server_ids
+
+ @staticmethod
+ def _start_stop_servers(module, clc, server_ids):
+ """
+ Start or Stop the servers on the provided list
+ :param module: the AnsibleModule object
+ :param clc: the clc-sdk instance to use
+ :param server_ids: list of servers to start or stop
+ :return: a list of dictionaries with server information about the servers that were started or stopped
+ """
+ p = module.params
+ state = p.get('state')
+ changed = False
+ changed_servers = []
+ server_dict_array = []
+ result_server_ids = []
+ request_list = []
+
+ if not isinstance(server_ids, list) or len(server_ids) < 1:
+ return module.fail_json(
+ msg='server_ids should be a list of servers, aborting')
+
+ servers = clc.v2.Servers(server_ids).Servers()
+ for server in servers:
+ if server.powerState != state:
+ changed_servers.append(server)
+ if not module.check_mode:
+ request_list.append(
+ ClcServer._change_server_power_state(
+ module,
+ server,
+ state))
+ changed = True
+
+ ClcServer._wait_for_requests(module, request_list)
+ ClcServer._refresh_servers(module, changed_servers)
+
+ for server in set(changed_servers + servers):
+ try:
+ server.data['ipaddress'] = server.details[
+ 'ipAddresses'][0]['internal']
+ server.data['publicip'] = str(
+ server.PublicIPs().public_ips[0])
+ except (KeyError, IndexError):
+ pass
+
+ server_dict_array.append(server.data)
+ result_server_ids.append(server.id)
+
+ return changed, server_dict_array, result_server_ids
+
+ @staticmethod
+ def _change_server_power_state(module, server, state):
+ """
+ Change the server powerState
+ :param module: the module to check for intended state
+ :param server: the server to start or stop
+ :param state: the intended powerState for the server
+ :return: the request object from clc-sdk call
+ """
+ result = None
+ try:
+ if state == 'started':
+ result = server.PowerOn()
+ else:
+ # Try to shut down the server and fall back to power off when unable to shut down.
+ result = server.ShutDown()
+ if result and hasattr(result, 'requests') and result.requests[0]:
+ return result
+ else:
+ result = server.PowerOff()
+ except CLCException:
+ module.fail_json(
+ msg='Unable to change power state for server {0}'.format(
+ server.id))
+ return result
+
+ @staticmethod
+ def _find_running_servers_by_group(module, datacenter, count_group):
+ """
+ Find a list of running servers in the provided group
+ :param module: the AnsibleModule object
+ :param datacenter: the clc-sdk.Datacenter instance to use to lookup the group
+ :param count_group: the group to count the servers
+ :return: list of servers, and list of running servers
+ """
+ group = ClcServer._find_group(
+ module=module,
+ datacenter=datacenter,
+ lookup_group=count_group)
+
+ servers = group.Servers().Servers()
+ running_servers = []
+
+ for server in servers:
+ if server.status == 'active' and server.powerState == 'started':
+ running_servers.append(server)
+
+ return servers, running_servers
+
+ @staticmethod
+ def _find_group(module, datacenter, lookup_group=None):
+ """
+ Find a server group in a datacenter by calling the CLC API
+ :param module: the AnsibleModule instance
+ :param datacenter: clc-sdk.Datacenter instance to search for the group
+ :param lookup_group: string name of the group to search for
+ :return: clc-sdk.Group instance
+ """
+ if not lookup_group:
+ lookup_group = module.params.get('group')
+ try:
+ return datacenter.Groups().Get(lookup_group)
+ except CLCException:
+ pass
+
+ # The search above only acts on the main
+ result = ClcServer._find_group_recursive(
+ module,
+ datacenter.Groups(),
+ lookup_group)
+
+ if result is None:
+ module.fail_json(
+ msg=str(
+ "Unable to find group: " +
+ lookup_group +
+ " in location: " +
+ datacenter.id))
+
+ return result
+
+ @staticmethod
+ def _find_group_recursive(module, group_list, lookup_group):
+ """
+ Find a server group by recursively walking the tree
+ :param module: the AnsibleModule instance to use
+ :param group_list: a list of groups to search
+ :param lookup_group: the group to look for
+ :return: list of groups
+ """
+ result = None
+ for group in group_list.groups:
+ subgroups = group.Subgroups()
+ try:
+ return subgroups.Get(lookup_group)
+ except CLCException:
+ result = ClcServer._find_group_recursive(
+ module,
+ subgroups,
+ lookup_group)
+
+ if result is not None:
+ break
+
+ return result
+
+ @staticmethod
+ def _create_clc_server(
+ clc,
+ module,
+ server_params):
+ """
+ Call the CLC Rest API to Create a Server
+ :param clc: the clc-python-sdk instance to use
+ :param module: the AnsibleModule instance to use
+ :param server_params: a dictionary of params to use to create the servers
+ :return: clc-sdk.Request object linked to the queued server request
+ """
+
+ try:
+ res = clc.v2.API.Call(
+ method='POST',
+ url='servers/%s' %
+ (server_params.get('alias')),
+ payload=json.dumps(
+ {
+ 'name': server_params.get('name'),
+ 'description': server_params.get('description'),
+ 'groupId': server_params.get('group_id'),
+ 'sourceServerId': server_params.get('template'),
+ 'isManagedOS': server_params.get('managed_os'),
+ 'primaryDNS': server_params.get('primary_dns'),
+ 'secondaryDNS': server_params.get('secondary_dns'),
+ 'networkId': server_params.get('network_id'),
+ 'ipAddress': server_params.get('ip_address'),
+ 'password': server_params.get('password'),
+ 'sourceServerPassword': server_params.get('source_server_password'),
+ 'cpu': server_params.get('cpu'),
+ 'cpuAutoscalePolicyId': server_params.get('cpu_autoscale_policy_id'),
+ 'memoryGB': server_params.get('memory'),
+ 'type': server_params.get('type'),
+ 'storageType': server_params.get('storage_type'),
+ 'antiAffinityPolicyId': server_params.get('anti_affinity_policy_id'),
+ 'customFields': server_params.get('custom_fields'),
+ 'additionalDisks': server_params.get('additional_disks'),
+ 'ttl': server_params.get('ttl'),
+ 'packages': server_params.get('packages'),
+ 'configurationId': server_params.get('configuration_id'),
+ 'osType': server_params.get('os_type')}))
+
+ result = clc.v2.Requests(res)
+ except APIFailedResponse as ex:
+ return module.fail_json(msg='Unable to create the server: {0}. {1}'.format(
+ server_params.get('name'),
+ ex.response_text
+ ))
+
+ #
+ # Patch the Request object so that it returns a valid server
+
+ # Find the server's UUID from the API response
+ server_uuid = [obj['id']
+ for obj in res['links'] if obj['rel'] == 'self'][0]
+
+ # Change the request server method to a _find_server_by_uuid closure so
+ # that it will work
+ result.requests[0].Server = lambda: ClcServer._find_server_by_uuid_w_retry(
+ clc,
+ module,
+ server_uuid,
+ server_params.get('alias'))
+
+ return result
+
+ @staticmethod
+ def _get_anti_affinity_policy_id(clc, module, alias, aa_policy_name):
+ """
+ retrieves the anti affinity policy id of the server based on the name of the policy
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param alias: the CLC account alias
+ :param aa_policy_name: the anti affinity policy name
+ :return: aa_policy_id: The anti affinity policy id
+ """
+ aa_policy_id = None
+ try:
+ aa_policies = clc.v2.API.Call(method='GET',
+ url='antiAffinityPolicies/%s' % alias)
+ except APIFailedResponse as ex:
+ return module.fail_json(msg='Unable to fetch anti affinity policies for account: {0}. {1}'.format(
+ alias, ex.response_text))
+ for aa_policy in aa_policies.get('items'):
+ if aa_policy.get('name') == aa_policy_name:
+ if not aa_policy_id:
+ aa_policy_id = aa_policy.get('id')
+ else:
+ return module.fail_json(
+ msg='multiple anti affinity policies were found with policy name : %s' % aa_policy_name)
+ return aa_policy_id
+
+ #
+ # This is the function that gets patched to the Request.server object using a lamda closure
+ #
+
+ @staticmethod
+ def _find_server_by_uuid_w_retry(
+ clc, module, svr_uuid, alias=None, retries=5, back_out=2):
+ """
+ Find the clc server by the UUID returned from the provisioning request. Retry the request if a 404 is returned.
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param svr_uuid: UUID of the server
+ :param retries: the number of retry attempts to make prior to fail. default is 5
+ :param alias: the Account Alias to search
+ :return: a clc-sdk.Server instance
+ """
+ if not alias:
+ alias = clc.v2.Account.GetAlias()
+
+ # Wait and retry if the api returns a 404
+ while True:
+ retries -= 1
+ try:
+ server_obj = clc.v2.API.Call(
+ method='GET', url='servers/%s/%s?uuid=true' %
+ (alias, svr_uuid))
+ server_id = server_obj['id']
+ server = clc.v2.Server(
+ id=server_id,
+ alias=alias,
+ server_obj=server_obj)
+ return server
+
+ except APIFailedResponse as e:
+ if e.response_status_code != 404:
+ return module.fail_json(
+ msg='A failure response was received from CLC API when '
+ 'attempting to get details for a server: UUID=%s, Code=%i, Message=%s' %
+ (svr_uuid, e.response_status_code, e.message))
+ if retries == 0:
+ return module.fail_json(
+ msg='Unable to reach the CLC API after 5 attempts')
+ time.sleep(back_out)
+ back_out *= 2
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ argument_dict = ClcServer._define_module_argument_spec()
+ module = AnsibleModule(supports_check_mode=True, **argument_dict)
+ clc_server = ClcServer(module)
+ clc_server.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_server_snapshot.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_server_snapshot.py
new file mode 100644
index 00000000..1d289f66
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clc_server_snapshot.py
@@ -0,0 +1,409 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 CenturyLink
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_server_snapshot
+short_description: Create, Delete and Restore server snapshots in CenturyLink Cloud.
+description:
+ - An Ansible module to Create, Delete and Restore server snapshots in CenturyLink Cloud.
+options:
+ server_ids:
+ description:
+ - The list of CLC server Ids.
+ type: list
+ required: True
+ expiration_days:
+ description:
+ - The number of days to keep the server snapshot before it expires.
+ type: int
+ default: 7
+ required: False
+ state:
+ description:
+ - The state to insure that the provided resources are in.
+ type: str
+ default: 'present'
+ required: False
+ choices: ['present', 'absent', 'restore']
+ wait:
+ description:
+ - Whether to wait for the provisioning tasks to finish before returning.
+ default: True
+ required: False
+ type: str
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+- name: Create server snapshot
+ community.general.clc_server_snapshot:
+ server_ids:
+ - UC1TEST-SVR01
+ - UC1TEST-SVR02
+ expiration_days: 10
+ wait: True
+ state: present
+
+- name: Restore server snapshot
+ community.general.clc_server_snapshot:
+ server_ids:
+ - UC1TEST-SVR01
+ - UC1TEST-SVR02
+ wait: True
+ state: restore
+
+- name: Delete server snapshot
+ community.general.clc_server_snapshot:
+ server_ids:
+ - UC1TEST-SVR01
+ - UC1TEST-SVR02
+ wait: True
+ state: absent
+'''
+
+RETURN = '''
+server_ids:
+ description: The list of server ids that are changed
+ returned: success
+ type: list
+ sample:
+ [
+ "UC1TEST-SVR01",
+ "UC1TEST-SVR02"
+ ]
+'''
+
+__version__ = '${version}'
+
+import os
+import traceback
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcSnapshot:
+
+ clc = clc_sdk
+ module = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.module = module
+
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(
+ requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ p = self.module.params
+ server_ids = p['server_ids']
+ expiration_days = p['expiration_days']
+ state = p['state']
+ request_list = []
+ changed = False
+ changed_servers = []
+
+ self._set_clc_credentials_from_env()
+ if state == 'present':
+ changed, request_list, changed_servers = self.ensure_server_snapshot_present(
+ server_ids=server_ids,
+ expiration_days=expiration_days)
+ elif state == 'absent':
+ changed, request_list, changed_servers = self.ensure_server_snapshot_absent(
+ server_ids=server_ids)
+ elif state == 'restore':
+ changed, request_list, changed_servers = self.ensure_server_snapshot_restore(
+ server_ids=server_ids)
+
+ self._wait_for_requests_to_complete(request_list)
+ return self.module.exit_json(
+ changed=changed,
+ server_ids=changed_servers)
+
+ def ensure_server_snapshot_present(self, server_ids, expiration_days):
+ """
+ Ensures the given set of server_ids have the snapshots created
+ :param server_ids: The list of server_ids to create the snapshot
+ :param expiration_days: The number of days to keep the snapshot
+ :return: (changed, request_list, changed_servers)
+ changed: A flag indicating whether any change was made
+ request_list: the list of clc request objects from CLC API call
+ changed_servers: The list of servers ids that are modified
+ """
+ request_list = []
+ changed = False
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to obtain server list from the CLC API')
+ servers_to_change = [
+ server for server in servers if len(
+ server.GetSnapshots()) == 0]
+ for server in servers_to_change:
+ changed = True
+ if not self.module.check_mode:
+ request = self._create_server_snapshot(server, expiration_days)
+ request_list.append(request)
+ changed_servers = [
+ server.id for server in servers_to_change if server.id]
+ return changed, request_list, changed_servers
+
+ def _create_server_snapshot(self, server, expiration_days):
+ """
+ Create the snapshot for the CLC server
+ :param server: the CLC server object
+ :param expiration_days: The number of days to keep the snapshot
+ :return: the create request object from CLC API Call
+ """
+ result = None
+ try:
+ result = server.CreateSnapshot(
+ delete_existing=True,
+ expiration_days=expiration_days)
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to create snapshot for server : {0}. {1}'.format(
+ server.id, ex.response_text
+ ))
+ return result
+
+ def ensure_server_snapshot_absent(self, server_ids):
+ """
+ Ensures the given set of server_ids have the snapshots removed
+ :param server_ids: The list of server_ids to delete the snapshot
+ :return: (changed, request_list, changed_servers)
+ changed: A flag indicating whether any change was made
+ request_list: the list of clc request objects from CLC API call
+ changed_servers: The list of servers ids that are modified
+ """
+ request_list = []
+ changed = False
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to obtain server list from the CLC API')
+ servers_to_change = [
+ server for server in servers if len(
+ server.GetSnapshots()) > 0]
+ for server in servers_to_change:
+ changed = True
+ if not self.module.check_mode:
+ request = self._delete_server_snapshot(server)
+ request_list.append(request)
+ changed_servers = [
+ server.id for server in servers_to_change if server.id]
+ return changed, request_list, changed_servers
+
+ def _delete_server_snapshot(self, server):
+ """
+ Delete snapshot for the CLC server
+ :param server: the CLC server object
+ :return: the delete snapshot request object from CLC API
+ """
+ result = None
+ try:
+ result = server.DeleteSnapshot()
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to delete snapshot for server : {0}. {1}'.format(
+ server.id, ex.response_text
+ ))
+ return result
+
+ def ensure_server_snapshot_restore(self, server_ids):
+ """
+ Ensures the given set of server_ids have the snapshots restored
+ :param server_ids: The list of server_ids to delete the snapshot
+ :return: (changed, request_list, changed_servers)
+ changed: A flag indicating whether any change was made
+ request_list: the list of clc request objects from CLC API call
+ changed_servers: The list of servers ids that are modified
+ """
+ request_list = []
+ changed = False
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to obtain server list from the CLC API')
+ servers_to_change = [
+ server for server in servers if len(
+ server.GetSnapshots()) > 0]
+ for server in servers_to_change:
+ changed = True
+ if not self.module.check_mode:
+ request = self._restore_server_snapshot(server)
+ request_list.append(request)
+ changed_servers = [
+ server.id for server in servers_to_change if server.id]
+ return changed, request_list, changed_servers
+
+ def _restore_server_snapshot(self, server):
+ """
+ Restore snapshot for the CLC server
+ :param server: the CLC server object
+ :return: the restore snapshot request object from CLC API
+ """
+ result = None
+ try:
+ result = server.RestoreSnapshot()
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to restore snapshot for server : {0}. {1}'.format(
+ server.id, ex.response_text
+ ))
+ return result
+
+ def _wait_for_requests_to_complete(self, requests_lst):
+ """
+ Waits until the CLC requests are complete if the wait argument is True
+ :param requests_lst: The list of CLC request objects
+ :return: none
+ """
+ if not self.module.params['wait']:
+ return
+ for request in requests_lst:
+ request.WaitUntilComplete()
+ for request_details in request.requests:
+ if request_details.Status() != 'succeeded':
+ self.module.fail_json(
+ msg='Unable to process server snapshot request')
+
+ @staticmethod
+ def define_argument_spec():
+ """
+ This function defines the dictionary object required for
+ package module
+ :return: the package dictionary object
+ """
+ argument_spec = dict(
+ server_ids=dict(type='list', required=True),
+ expiration_days=dict(default=7, type='int'),
+ wait=dict(default=True),
+ state=dict(
+ default='present',
+ choices=[
+ 'present',
+ 'absent',
+ 'restore']),
+ )
+ return argument_spec
+
+ def _get_servers_from_clc(self, server_list, message):
+ """
+ Internal function to fetch list of CLC server objects from a list of server ids
+ :param server_list: The list of server ids
+ :param message: The error message to throw in case of any error
+ :return the list of CLC server objects
+ """
+ try:
+ return self.clc.v2.Servers(server_list).servers
+ except CLCException as ex:
+ return self.module.fail_json(msg=message + ': %s' % ex)
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ Main function
+ :return: None
+ """
+ module = AnsibleModule(
+ argument_spec=ClcSnapshot.define_argument_spec(),
+ supports_check_mode=True
+ )
+ clc_snapshot = ClcSnapshot(module)
+ clc_snapshot.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/alicloud/ali_instance.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/alicloud/ali_instance.py
new file mode 100644
index 00000000..09754ccd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/alicloud/ali_instance.py
@@ -0,0 +1,1013 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin <heguimin36@163.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see http://www.gnu.org/licenses/.
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ali_instance
+short_description: Create, Start, Stop, Restart or Terminate an Instance in ECS. Add or Remove Instance to/from a Security Group.
+description:
+ - Create, start, stop, restart, modify or terminate ecs instances.
+ - Add or remove ecs instances to/from security group.
+options:
+ state:
+ description:
+ - The state of the instance after operating.
+ default: 'present'
+ choices: ['present', 'running', 'stopped', 'restarted', 'absent']
+ type: str
+ availability_zone:
+ description:
+ - Aliyun availability zone ID in which to launch the instance.
+ If it is not specified, it will be allocated by system automatically.
+ aliases: ['alicloud_zone', 'zone_id']
+ type: str
+ image_id:
+ description:
+ - Image ID used to launch instances. Required when C(state=present) and creating new ECS instances.
+ aliases: ['image']
+ type: str
+ instance_type:
+ description:
+ - Instance type used to launch instances. Required when C(state=present) and creating new ECS instances.
+ aliases: ['type']
+ type: str
+ security_groups:
+ description:
+ - A list of security group IDs.
+ aliases: ['group_ids']
+ type: list
+ elements: str
+ vswitch_id:
+ description:
+ - The subnet ID in which to launch the instances (VPC).
+ aliases: ['subnet_id']
+ type: str
+ instance_name:
+ description:
+ - The name of ECS instance, which is a string of 2 to 128 Chinese or English characters. It must begin with an
+ uppercase/lowercase letter or a Chinese character and can contain numerals, ".", "_" or "-".
+ It cannot begin with http:// or https://.
+ aliases: ['name']
+ type: str
+ description:
+ description:
+ - The description of ECS instance, which is a string of 2 to 256 characters. It cannot begin with http:// or https://.
+ type: str
+ internet_charge_type:
+ description:
+ - Internet charge type of ECS instance.
+ default: 'PayByBandwidth'
+ choices: ['PayByBandwidth', 'PayByTraffic']
+ type: str
+ max_bandwidth_in:
+ description:
+ - Maximum incoming bandwidth from the public network, measured in Mbps (Megabits per second).
+ default: 200
+ type: int
+ max_bandwidth_out:
+ description:
+ - Maximum outgoing bandwidth to the public network, measured in Mbps (Megabits per second).
+ Required when C(allocate_public_ip=True). Ignored when C(allocate_public_ip=False).
+ default: 0
+ type: int
+ host_name:
+ description:
+ - Instance host name. Ordered hostname is not supported.
+ type: str
+ unique_suffix:
+ description:
+ - Specifies whether to add sequential suffixes to the host_name.
+ The sequential suffix ranges from 001 to 999.
+ default: False
+ type: bool
+ version_added: '0.2.0'
+ password:
+ description:
+ - The password to login instance. After rebooting instances, modified password will take effect.
+ type: str
+ system_disk_category:
+ description:
+ - Category of the system disk.
+ default: 'cloud_efficiency'
+ choices: ['cloud_efficiency', 'cloud_ssd']
+ type: str
+ system_disk_size:
+ description:
+ - Size of the system disk, in GB. The valid values are 40~500.
+ default: 40
+ type: int
+ system_disk_name:
+ description:
+ - Name of the system disk.
+ type: str
+ system_disk_description:
+ description:
+ - Description of the system disk.
+ type: str
+ count:
+ description:
+ - The number of the new instance. An integer value which indicates how many instances that match I(count_tag)
+ should be running. Instances are either created or terminated based on this value.
+ default: 1
+ type: int
+ count_tag:
+ description:
+ - I(count) determines how many instances based on a specific tag criteria should be present.
+ This can be expressed in multiple ways and is shown in the EXAMPLES section.
+ The specified count_tag must already exist or be passed in as the I(tags) option.
+ If it is not specified, it will be replaced by I(instance_name).
+ type: str
+ allocate_public_ip:
+ description:
+ - Whether allocate a public ip for the new instance.
+ default: False
+ aliases: [ 'assign_public_ip' ]
+ type: bool
+ instance_charge_type:
+ description:
+ - The charge type of the instance.
+ choices: ['PrePaid', 'PostPaid']
+ default: 'PostPaid'
+ type: str
+ period:
+ description:
+ - The charge duration of the instance, in month. Required when C(instance_charge_type=PrePaid).
+ - The valid value are [1-9, 12, 24, 36].
+ default: 1
+ type: int
+ auto_renew:
+ description:
+ - Whether automate renew the charge of the instance.
+ type: bool
+ default: False
+ auto_renew_period:
+ description:
+ - The duration of the automatic renew the charge of the instance. Required when C(auto_renew=True).
+ choices: [1, 2, 3, 6, 12]
+ type: int
+ instance_ids:
+ description:
+ - A list of instance ids. It is required when need to operate existing instances.
+ If it is specified, I(count) will lose efficacy.
+ type: list
+ elements: str
+ force:
+ description:
+ - Whether the current operation needs to be execute forcibly.
+ default: False
+ type: bool
+ tags:
+ description:
+ - A hash/dictionaries of instance tags, to add to the new instance or for starting/stopping instance by tag. C({"key":"value"})
+ aliases: ["instance_tags"]
+ type: dict
+ version_added: '0.2.0'
+ purge_tags:
+ description:
+ - Delete any tags not specified in the task that are on the instance.
+ If True, it means you have to specify all the desired tags on each task affecting an instance.
+ default: False
+ type: bool
+ version_added: '0.2.0'
+ key_name:
+ description:
+ - The name of key pair which is used to access ECS instance in SSH.
+ required: false
+ type: str
+ aliases: ['keypair']
+ user_data:
+ description:
+ - User-defined data to customize the startup behaviors of an ECS instance and to pass data into an ECS instance.
+ It only will take effect when launching the new ECS instances.
+ required: false
+ type: str
+ ram_role_name:
+ description:
+ - The name of the instance RAM role.
+ type: str
+ version_added: '0.2.0'
+ spot_price_limit:
+ description:
+ - The maximum hourly price for the preemptible instance. This parameter supports a maximum of three decimal
+ places and takes effect when the SpotStrategy parameter is set to SpotWithPriceLimit.
+ type: float
+ version_added: '0.2.0'
+ spot_strategy:
+ description:
+ - The bidding mode of the pay-as-you-go instance. This parameter is valid when InstanceChargeType is set to PostPaid.
+ choices: ['NoSpot', 'SpotWithPriceLimit', 'SpotAsPriceGo']
+ default: 'NoSpot'
+ type: str
+ version_added: '0.2.0'
+ period_unit:
+ description:
+ - The duration unit that you will buy the resource. It is valid when C(instance_charge_type=PrePaid)
+ choices: ['Month', 'Week']
+ default: 'Month'
+ type: str
+ version_added: '0.2.0'
+ dry_run:
+ description:
+ - Specifies whether to send a dry-run request.
+ - If I(dry_run=True), Only a dry-run request is sent and no instance is created. The system checks whether the
+ required parameters are set, and validates the request format, service permissions, and available ECS instances.
+ If the validation fails, the corresponding error code is returned. If the validation succeeds, the DryRunOperation error code is returned.
+ - If I(dry_run=False), A request is sent. If the validation succeeds, the instance is created.
+ default: False
+ type: bool
+ version_added: '0.2.0'
+ include_data_disks:
+ description:
+ - Whether to change instance disks charge type when changing instance charge type.
+ default: True
+ type: bool
+ version_added: '0.2.0'
+author:
+ - "He Guimin (@xiaozhu36)"
+requirements:
+ - "python >= 3.6"
+ - "footmark >= 1.19.0"
+extends_documentation_fragment:
+ - community.general.alicloud
+'''
+
+EXAMPLES = '''
+# basic provisioning example vpc network
+- name: Basic provisioning example
+ hosts: localhost
+ vars:
+ alicloud_access_key: <your-alicloud-access-key-id>
+ alicloud_secret_key: <your-alicloud-access-secret-key>
+ alicloud_region: cn-beijing
+ image: ubuntu1404_64_40G_cloudinit_20160727.raw
+ instance_type: ecs.n4.small
+ vswitch_id: vsw-abcd1234
+ assign_public_ip: True
+ max_bandwidth_out: 10
+ host_name: myhost
+ password: mypassword
+ system_disk_category: cloud_efficiency
+ system_disk_size: 100
+ internet_charge_type: PayByBandwidth
+ security_groups: ["sg-f2rwnfh23r"]
+
+ instance_ids: ["i-abcd12346", "i-abcd12345"]
+ force: True
+
+ tasks:
+ - name: Launch ECS instance in VPC network
+ community.general.ali_instance:
+ alicloud_access_key: '{{ alicloud_access_key }}'
+ alicloud_secret_key: '{{ alicloud_secret_key }}'
+ alicloud_region: '{{ alicloud_region }}'
+ image: '{{ image }}'
+ system_disk_category: '{{ system_disk_category }}'
+ system_disk_size: '{{ system_disk_size }}'
+ instance_type: '{{ instance_type }}'
+ vswitch_id: '{{ vswitch_id }}'
+ assign_public_ip: '{{ assign_public_ip }}'
+ internet_charge_type: '{{ internet_charge_type }}'
+ max_bandwidth_out: '{{ max_bandwidth_out }}'
+ tags:
+ Name: created_one
+ host_name: '{{ host_name }}'
+ password: '{{ password }}'
+
+ - name: With count and count_tag to create a number of instances
+ community.general.ali_instance:
+ alicloud_access_key: '{{ alicloud_access_key }}'
+ alicloud_secret_key: '{{ alicloud_secret_key }}'
+ alicloud_region: '{{ alicloud_region }}'
+ image: '{{ image }}'
+ system_disk_category: '{{ system_disk_category }}'
+ system_disk_size: '{{ system_disk_size }}'
+ instance_type: '{{ instance_type }}'
+ assign_public_ip: '{{ assign_public_ip }}'
+ security_groups: '{{ security_groups }}'
+ internet_charge_type: '{{ internet_charge_type }}'
+ max_bandwidth_out: '{{ max_bandwidth_out }}'
+ tags:
+ Name: created_one
+ Version: 0.1
+ count: 2
+ count_tag:
+ Name: created_one
+ host_name: '{{ host_name }}'
+ password: '{{ password }}'
+
+ - name: Start instance
+ community.general.ali_instance:
+ alicloud_access_key: '{{ alicloud_access_key }}'
+ alicloud_secret_key: '{{ alicloud_secret_key }}'
+ alicloud_region: '{{ alicloud_region }}'
+ instance_ids: '{{ instance_ids }}'
+ state: 'running'
+
+ - name: Reboot instance forcibly
+ ecs:
+ alicloud_access_key: '{{ alicloud_access_key }}'
+ alicloud_secret_key: '{{ alicloud_secret_key }}'
+ alicloud_region: '{{ alicloud_region }}'
+ instance_ids: '{{ instance_ids }}'
+ state: 'restarted'
+ force: '{{ force }}'
+
+ - name: Add instances to an security group
+ ecs:
+ alicloud_access_key: '{{ alicloud_access_key }}'
+ alicloud_secret_key: '{{ alicloud_secret_key }}'
+ alicloud_region: '{{ alicloud_region }}'
+ instance_ids: '{{ instance_ids }}'
+ security_groups: '{{ security_groups }}'
+'''
+
+RETURN = '''
+instances:
+ description: List of ECS instances
+ returned: always
+ type: complex
+ contains:
+ availability_zone:
+ description: The availability zone of the instance is in.
+ returned: always
+ type: str
+ sample: cn-beijing-a
+ block_device_mappings:
+ description: Any block device mapping entries for the instance.
+ returned: always
+ type: complex
+ contains:
+ device_name:
+ description: The device name exposed to the instance (for example, /dev/xvda).
+ returned: always
+ type: str
+ sample: /dev/xvda
+ attach_time:
+ description: The time stamp when the attachment initiated.
+ returned: always
+ type: str
+ sample: "2018-06-25T04:08:26Z"
+ delete_on_termination:
+ description: Indicates whether the volume is deleted on instance termination.
+ returned: always
+ type: bool
+ sample: true
+ status:
+ description: The attachment state.
+ returned: always
+ type: str
+ sample: in_use
+ volume_id:
+ description: The ID of the cloud disk.
+ returned: always
+ type: str
+ sample: d-2zei53pjsi117y6gf9t6
+ cpu:
+ description: The CPU core count of the instance.
+ returned: always
+ type: int
+ sample: 4
+ creation_time:
+ description: The time the instance was created.
+ returned: always
+ type: str
+ sample: "2018-06-25T04:08Z"
+ description:
+ description: The instance description.
+ returned: always
+ type: str
+ sample: "my ansible instance"
+ eip:
+ description: The attribution of EIP associated with the instance.
+ returned: always
+ type: complex
+ contains:
+ allocation_id:
+ description: The ID of the EIP.
+ returned: always
+ type: str
+ sample: eip-12345
+ internet_charge_type:
+ description: The internet charge type of the EIP.
+ returned: always
+ type: str
+ sample: "paybybandwidth"
+ ip_address:
+ description: EIP address.
+ returned: always
+ type: str
+ sample: 42.10.2.2
+ expired_time:
+ description: The time the instance will expire.
+ returned: always
+ type: str
+ sample: "2099-12-31T15:59Z"
+ gpu:
+ description: The attribution of instance GPU.
+ returned: always
+ type: complex
+ contains:
+ amount:
+ description: The count of the GPU.
+ returned: always
+ type: int
+ sample: 0
+ spec:
+ description: The specification of the GPU.
+ returned: always
+ type: str
+ sample: ""
+ host_name:
+ description: The host name of the instance.
+ returned: always
+ type: str
+ sample: iZ2zewaoZ
+ id:
+ description: Alias of instance_id.
+ returned: always
+ type: str
+ sample: i-abc12345
+ instance_id:
+ description: ECS instance resource ID.
+ returned: always
+ type: str
+ sample: i-abc12345
+ image_id:
+ description: The ID of the image used to launch the instance.
+ returned: always
+ type: str
+ sample: m-0011223344
+ inner_ip_address:
+ description: The inner IPv4 address of the classic instance.
+ returned: always
+ type: str
+ sample: 10.0.0.2
+ instance_charge_type:
+ description: The instance charge type.
+ returned: always
+ type: str
+ sample: PostPaid
+ instance_name:
+ description: The name of the instance.
+ returned: always
+ type: str
+ sample: my-ecs
+ instance_type:
+ description: The instance type of the running instance.
+ returned: always
+ type: str
+ sample: ecs.sn1ne.xlarge
+ instance_type_family:
+ description: The instance type family of the instance belongs.
+ returned: always
+ type: str
+ sample: ecs.sn1ne
+ internet_charge_type:
+ description: The billing method of the network bandwidth.
+ returned: always
+ type: str
+ sample: PayByBandwidth
+ internet_max_bandwidth_in:
+ description: Maximum incoming bandwidth from the internet network.
+ returned: always
+ type: int
+ sample: 200
+ internet_max_bandwidth_out:
+ description: Maximum incoming bandwidth from the internet network.
+ returned: always
+ type: int
+ sample: 20
+ io_optimized:
+ description: Indicates whether the instance is optimized for EBS I/O.
+ returned: always
+ type: bool
+ sample: false
+ memory:
+ description: Memory size of the instance.
+ returned: always
+ type: int
+ sample: 8192
+ network_interfaces:
+ description: One or more network interfaces for the instance.
+ returned: always
+ type: complex
+ contains:
+ mac_address:
+ description: The MAC address.
+ returned: always
+ type: str
+ sample: "00:11:22:33:44:55"
+ network_interface_id:
+ description: The ID of the network interface.
+ returned: always
+ type: str
+ sample: eni-01234567
+ primary_ip_address:
+ description: The primary IPv4 address of the network interface within the vswitch.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ osname:
+ description: The operation system name of the instance owned.
+ returned: always
+ type: str
+ sample: CentOS
+ ostype:
+ description: The operation system type of the instance owned.
+ returned: always
+ type: str
+ sample: linux
+ private_ip_address:
+ description: The IPv4 address of the network interface within the subnet.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ public_ip_address:
+ description: The public IPv4 address assigned to the instance or eip address
+ returned: always
+ type: str
+ sample: 43.0.0.1
+ resource_group_id:
+ description: The id of the resource group to which the instance belongs.
+ returned: always
+ type: str
+ sample: my-ecs-group
+ security_groups:
+ description: One or more security groups for the instance.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ group_id:
+ description: The ID of the security group.
+ returned: always
+ type: str
+ sample: sg-0123456
+ group_name:
+ description: The name of the security group.
+ returned: always
+ type: str
+ sample: my-security-group
+ status:
+ description: The current status of the instance.
+ returned: always
+ type: str
+ sample: running
+ tags:
+ description: Any tags assigned to the instance.
+ returned: always
+ type: dict
+ sample:
+ user_data:
+ description: User-defined data.
+ returned: always
+ type: dict
+ sample:
+ vswitch_id:
+ description: The ID of the vswitch in which the instance is running.
+ returned: always
+ type: str
+ sample: vsw-dew00abcdef
+ vpc_id:
+ description: The ID of the VPC the instance is in.
+ returned: always
+ type: str
+ sample: vpc-0011223344
+ spot_price_limit:
+ description:
+ - The maximum hourly price for the preemptible instance.
+ returned: always
+ type: float
+ sample: 0.97
+ spot_strategy:
+ description:
+ - The bidding mode of the pay-as-you-go instance.
+ returned: always
+ type: str
+ sample: NoSpot
+ids:
+ description: List of ECS instance IDs
+ returned: always
+ type: list
+ sample: [i-12345er, i-3245fs]
+'''
+
+import re
+import time
+import traceback
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils.alicloud_ecs import ecs_argument_spec, ecs_connect
+
+HAS_FOOTMARK = False
+FOOTMARK_IMP_ERR = None
+try:
+ from footmark.exception import ECSResponseError
+ HAS_FOOTMARK = True
+except ImportError:
+ FOOTMARK_IMP_ERR = traceback.format_exc()
+ HAS_FOOTMARK = False
+
+
+def get_instances_info(connection, ids):
+ result = []
+ instances = connection.describe_instances(instance_ids=ids)
+ if len(instances) > 0:
+ for inst in instances:
+ volumes = connection.describe_disks(instance_id=inst.id)
+ setattr(inst, 'block_device_mappings', volumes)
+ setattr(inst, 'user_data', inst.describe_user_data())
+ result.append(inst.read())
+ return result
+
+
+def run_instance(module, ecs, exact_count):
+ if exact_count <= 0:
+ return None
+ zone_id = module.params['availability_zone']
+ image_id = module.params['image_id']
+ instance_type = module.params['instance_type']
+ security_groups = module.params['security_groups']
+ vswitch_id = module.params['vswitch_id']
+ instance_name = module.params['instance_name']
+ description = module.params['description']
+ internet_charge_type = module.params['internet_charge_type']
+ max_bandwidth_out = module.params['max_bandwidth_out']
+ max_bandwidth_in = module.params['max_bandwidth_in']
+ host_name = module.params['host_name']
+ password = module.params['password']
+ system_disk_category = module.params['system_disk_category']
+ system_disk_size = module.params['system_disk_size']
+ system_disk_name = module.params['system_disk_name']
+ system_disk_description = module.params['system_disk_description']
+ allocate_public_ip = module.params['allocate_public_ip']
+ period = module.params['period']
+ auto_renew = module.params['auto_renew']
+ instance_charge_type = module.params['instance_charge_type']
+ auto_renew_period = module.params['auto_renew_period']
+ user_data = module.params['user_data']
+ key_name = module.params['key_name']
+ ram_role_name = module.params['ram_role_name']
+ spot_price_limit = module.params['spot_price_limit']
+ spot_strategy = module.params['spot_strategy']
+ unique_suffix = module.params['unique_suffix']
+ # check whether the required parameter passed or not
+ if not image_id:
+ module.fail_json(msg='image_id is required for new instance')
+ if not instance_type:
+ module.fail_json(msg='instance_type is required for new instance')
+ if not isinstance(security_groups, list):
+ module.fail_json(msg='The parameter security_groups should be a list, aborting')
+ if len(security_groups) <= 0:
+ module.fail_json(msg='Expected the parameter security_groups is non-empty when create new ECS instances, aborting')
+
+ client_token = "Ansible-Alicloud-{0}-{1}".format(hash(str(module.params)), str(time.time()))
+
+ try:
+ # call to create_instance method from footmark
+ instances = ecs.run_instances(image_id=image_id, instance_type=instance_type, security_group_id=security_groups[0],
+ zone_id=zone_id, instance_name=instance_name, description=description,
+ internet_charge_type=internet_charge_type, internet_max_bandwidth_out=max_bandwidth_out,
+ internet_max_bandwidth_in=max_bandwidth_in, host_name=host_name, password=password,
+ io_optimized='optimized', system_disk_category=system_disk_category,
+ system_disk_size=system_disk_size, system_disk_disk_name=system_disk_name,
+ system_disk_description=system_disk_description, vswitch_id=vswitch_id,
+ amount=exact_count, instance_charge_type=instance_charge_type, period=period, period_unit="Month",
+ auto_renew=auto_renew, auto_renew_period=auto_renew_period, key_pair_name=key_name,
+ user_data=user_data, client_token=client_token, ram_role_name=ram_role_name,
+ spot_price_limit=spot_price_limit, spot_strategy=spot_strategy, unique_suffix=unique_suffix)
+
+ except Exception as e:
+ module.fail_json(msg='Unable to create instance, error: {0}'.format(e))
+
+ return instances
+
+
+def modify_instance(module, instance):
+ # According to state to modify instance's some special attribute
+ state = module.params["state"]
+ name = module.params['instance_name']
+ unique_suffix = module.params['unique_suffix']
+ if not name:
+ name = instance.name
+
+ description = module.params['description']
+ if not description:
+ description = instance.description
+
+ host_name = module.params['host_name']
+ if unique_suffix and host_name:
+ suffix = instance.host_name[-3:]
+ host_name = host_name + suffix
+
+ if not host_name:
+ host_name = instance.host_name
+
+ # password can be modified only when restart instance
+ password = ""
+ if state == "restarted":
+ password = module.params['password']
+
+ # userdata can be modified only when instance is stopped
+ setattr(instance, "user_data", instance.describe_user_data())
+ user_data = instance.user_data
+ if state == "stopped":
+ user_data = module.params['user_data'].encode()
+
+ try:
+ return instance.modify(name=name, description=description, host_name=host_name, password=password, user_data=user_data)
+ except Exception as e:
+ module.fail_json(msg="Modify instance {0} attribute got an error: {1}".format(instance.id, e))
+
+
+def wait_for_instance_modify_charge(ecs, instance_ids, charge_type, delay=10, timeout=300):
+ """
+ To verify instance charge type has become expected after modify instance charge type
+ """
+ try:
+ while True:
+ instances = ecs.describe_instances(instance_ids=instance_ids)
+ flag = True
+ for inst in instances:
+ if inst and inst.instance_charge_type != charge_type:
+ flag = False
+ if flag:
+ return
+ timeout -= delay
+ time.sleep(delay)
+ if timeout <= 0:
+ raise Exception("Timeout Error: Waiting for instance to {0}. ".format(charge_type))
+ except Exception as e:
+ raise e
+
+
+def main():
+ argument_spec = ecs_argument_spec()
+ argument_spec.update(dict(
+ security_groups=dict(type='list', elements='str', aliases=['group_ids']),
+ availability_zone=dict(type='str', aliases=['alicloud_zone', 'zone_id']),
+ instance_type=dict(type='str', aliases=['type']),
+ image_id=dict(type='str', aliases=['image']),
+ count=dict(type='int', default=1),
+ count_tag=dict(type='str'),
+ vswitch_id=dict(type='str', aliases=['subnet_id']),
+ instance_name=dict(type='str', aliases=['name']),
+ host_name=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ internet_charge_type=dict(type='str', default='PayByBandwidth', choices=['PayByBandwidth', 'PayByTraffic']),
+ max_bandwidth_in=dict(type='int', default=200),
+ max_bandwidth_out=dict(type='int', default=0),
+ system_disk_category=dict(type='str', default='cloud_efficiency', choices=['cloud_efficiency', 'cloud_ssd']),
+ system_disk_size=dict(type='int', default=40),
+ system_disk_name=dict(type='str'),
+ system_disk_description=dict(type='str'),
+ force=dict(type='bool', default=False),
+ tags=dict(type='dict', aliases=['instance_tags']),
+ purge_tags=dict(type='bool', default=False),
+ state=dict(default='present', choices=['present', 'running', 'stopped', 'restarted', 'absent']),
+ description=dict(type='str'),
+ allocate_public_ip=dict(type='bool', aliases=['assign_public_ip'], default=False),
+ instance_charge_type=dict(type='str', default='PostPaid', choices=['PrePaid', 'PostPaid']),
+ period=dict(type='int', default=1),
+ auto_renew=dict(type='bool', default=False),
+ instance_ids=dict(type='list', elements='str'),
+ auto_renew_period=dict(type='int', choices=[1, 2, 3, 6, 12]),
+ key_name=dict(type='str', aliases=['keypair']),
+ user_data=dict(type='str'),
+ ram_role_name=dict(type='str'),
+ spot_price_limit=dict(type='float'),
+ spot_strategy=dict(type='str', default='NoSpot', choices=['NoSpot', 'SpotWithPriceLimit', 'SpotAsPriceGo']),
+ unique_suffix=dict(type='bool', default=False),
+ period_unit=dict(type='str', default='Month', choices=['Month', 'Week']),
+ dry_run=dict(type='bool', default=False),
+ include_data_disks=dict(type='bool', default=True)
+ )
+ )
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if HAS_FOOTMARK is False:
+ module.fail_json(msg=missing_required_lib('footmark'), exception=FOOTMARK_IMP_ERR)
+
+ ecs = ecs_connect(module)
+ host_name = module.params['host_name']
+ state = module.params['state']
+ instance_ids = module.params['instance_ids']
+ count_tag = module.params['count_tag']
+ count = module.params['count']
+ instance_name = module.params['instance_name']
+ force = module.params['force']
+ zone_id = module.params['availability_zone']
+ key_name = module.params['key_name']
+ tags = module.params['tags']
+ max_bandwidth_out = module.params['max_bandwidth_out']
+ instance_charge_type = module.params['instance_charge_type']
+ if instance_charge_type == "PrePaid":
+ module.params['spot_strategy'] = ''
+ changed = False
+
+ instances = []
+ if instance_ids:
+ if not isinstance(instance_ids, list):
+ module.fail_json(msg='The parameter instance_ids should be a list, aborting')
+ instances = ecs.describe_instances(zone_id=zone_id, instance_ids=instance_ids)
+ if not instances:
+ module.fail_json(msg="There are no instances in our record based on instance_ids {0}. "
+ "Please check it and try again.".format(instance_ids))
+ elif count_tag:
+ instances = ecs.describe_instances(zone_id=zone_id, tags=eval(count_tag))
+ elif instance_name:
+ instances = ecs.describe_instances(zone_id=zone_id, instance_name=instance_name)
+
+ ids = []
+ if state == 'absent':
+ if len(instances) < 1:
+ module.fail_json(msg='Please specify ECS instances that you want to operate by using '
+ 'parameters instance_ids, tags or instance_name, aborting')
+ try:
+ targets = []
+ for inst in instances:
+ if inst.status != 'stopped' and not force:
+ module.fail_json(msg="Instance is running, and please stop it or set 'force' as True.")
+ targets.append(inst.id)
+ if ecs.delete_instances(instance_ids=targets, force=force):
+ changed = True
+ ids.extend(targets)
+
+ module.exit_json(changed=changed, ids=ids, instances=[])
+ except Exception as e:
+ module.fail_json(msg='Delete instance got an error: {0}'.format(e))
+
+ if module.params['allocate_public_ip'] and max_bandwidth_out < 0:
+ module.fail_json(msg="'max_bandwidth_out' should be greater than 0 when 'allocate_public_ip' is True.")
+ if not module.params['allocate_public_ip']:
+ module.params['max_bandwidth_out'] = 0
+
+ if state == 'present':
+ if not instance_ids:
+ if len(instances) > count:
+ for i in range(0, len(instances) - count):
+ inst = instances[len(instances) - 1]
+ if inst.status != 'stopped' and not force:
+ module.fail_json(msg="That to delete instance {0} is failed results from it is running, "
+ "and please stop it or set 'force' as True.".format(inst.id))
+ try:
+ if inst.terminate(force=force):
+ changed = True
+ except Exception as e:
+ module.fail_json(msg="Delete instance {0} got an error: {1}".format(inst.id, e))
+ instances.pop(len(instances) - 1)
+ else:
+ try:
+ if re.search(r"-\[\d+,\d+\]-", host_name):
+ module.fail_json(msg='Ordered hostname is not supported, If you want to add an ordered '
+ 'suffix to the hostname, you can set unique_suffix to True')
+ new_instances = run_instance(module, ecs, count - len(instances))
+ if new_instances:
+ changed = True
+ instances.extend(new_instances)
+ except Exception as e:
+ module.fail_json(msg="Create new instances got an error: {0}".format(e))
+
+ # Security Group join/leave begin
+ security_groups = module.params['security_groups']
+ if security_groups:
+ if not isinstance(security_groups, list):
+ module.fail_json(msg='The parameter security_groups should be a list, aborting')
+ for inst in instances:
+ existing = inst.security_group_ids['security_group_id']
+ remove = list(set(existing).difference(set(security_groups)))
+ add = list(set(security_groups).difference(set(existing)))
+ for sg in remove:
+ if inst.leave_security_group(sg):
+ changed = True
+ for sg in add:
+ if inst.join_security_group(sg):
+ changed = True
+ # Security Group join/leave ends here
+
+ # Attach/Detach key pair
+ inst_ids = []
+ for inst in instances:
+ if key_name is not None and key_name != inst.key_name:
+ if key_name == "":
+ if inst.detach_key_pair():
+ changed = True
+ else:
+ inst_ids.append(inst.id)
+ if inst_ids:
+ changed = ecs.attach_key_pair(instance_ids=inst_ids, key_pair_name=key_name)
+
+ # Modify instance attribute
+ for inst in instances:
+ if modify_instance(module, inst):
+ changed = True
+ if inst.id not in ids:
+ ids.append(inst.id)
+
+ # Modify instance charge type
+ ids = []
+ for inst in instances:
+ if inst.instance_charge_type != instance_charge_type:
+ ids.append(inst.id)
+ if ids:
+ params = {"instance_ids": ids, "instance_charge_type": instance_charge_type,
+ "include_data_disks": module.params['include_data_disks'], "dry_run": module.params['dry_run'],
+ "auto_pay": True}
+ if instance_charge_type == 'PrePaid':
+ params['period'] = module.params['period']
+ params['period_unit'] = module.params['period_unit']
+
+ if ecs.modify_instance_charge_type(**params):
+ changed = True
+ wait_for_instance_modify_charge(ecs, ids, instance_charge_type)
+
+ else:
+ if len(instances) < 1:
+ module.fail_json(msg='Please specify ECS instances that you want to operate by using '
+ 'parameters instance_ids, tags or instance_name, aborting')
+ if state == 'running':
+ try:
+ targets = []
+ for inst in instances:
+ if modify_instance(module, inst):
+ changed = True
+ if inst.status != "running":
+ targets.append(inst.id)
+ ids.append(inst.id)
+ if targets and ecs.start_instances(instance_ids=targets):
+ changed = True
+ ids.extend(targets)
+ except Exception as e:
+ module.fail_json(msg='Start instances got an error: {0}'.format(e))
+ elif state == 'stopped':
+ try:
+ targets = []
+ for inst in instances:
+ if inst.status != "stopped":
+ targets.append(inst.id)
+ if targets and ecs.stop_instances(instance_ids=targets, force_stop=force):
+ changed = True
+ ids.extend(targets)
+ for inst in instances:
+ if modify_instance(module, inst):
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='Stop instances got an error: {0}'.format(e))
+ elif state == 'restarted':
+ try:
+ targets = []
+ for inst in instances:
+ if modify_instance(module, inst):
+ changed = True
+ targets.append(inst.id)
+ if ecs.reboot_instances(instance_ids=targets, force_stop=module.params['force']):
+ changed = True
+ ids.extend(targets)
+ except Exception as e:
+ module.fail_json(msg='Reboot instances got an error: {0}'.format(e))
+
+ tags = module.params['tags']
+ if module.params['purge_tags']:
+ for inst in instances:
+ if not tags:
+ tags = inst.tags
+ try:
+ if inst.remove_tags(tags):
+ changed = True
+ except Exception as e:
+ module.fail_json(msg="{0}".format(e))
+ module.exit_json(changed=changed, instances=get_instances_info(ecs, ids))
+
+ if tags:
+ for inst in instances:
+ try:
+ if inst.add_tags(tags):
+ changed = True
+ except Exception as e:
+ module.fail_json(msg="{0}".format(e))
+ module.exit_json(changed=changed, instances=get_instances_info(ecs, ids))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/alicloud/ali_instance_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/alicloud/ali_instance_facts.py
new file mode 100644
index 00000000..33b3f8a6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/alicloud/ali_instance_facts.py
@@ -0,0 +1,440 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin <heguimin36@163.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see http://www.gnu.org/licenses/.
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ali_instance_info
+short_description: Gather information on instances of Alibaba Cloud ECS.
+description:
+ - This module fetches data from the Open API in Alicloud.
+ The module must be called from within the ECS instance itself.
+ - This module was called C(ali_instance_facts) before Ansible 2.9. The usage did not change.
+
+options:
+ availability_zone:
+ description:
+ - (Deprecated) Aliyun availability zone ID in which to launch the instance. Please use filter item 'zone_id' instead.
+ aliases: ['alicloud_zone']
+ type: str
+ instance_names:
+ description:
+ - (Deprecated) A list of ECS instance names. Please use filter item 'instance_name' instead.
+ aliases: ["names"]
+ type: list
+ elements: str
+ instance_ids:
+ description:
+ - A list of ECS instance ids.
+ aliases: ["ids"]
+ type: list
+ elements: str
+ name_prefix:
+ description:
+ - Use a instance name prefix to filter ecs instances.
+ type: str
+ version_added: '0.2.0'
+ tags:
+ description:
+ - A hash/dictionaries of instance tags. C({"key":"value"})
+ aliases: ["instance_tags"]
+ type: dict
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. The filter keys can be
+ all of request parameters. See U(https://www.alibabacloud.com/help/doc-detail/25506.htm) for parameter details.
+ Filter keys can be same as request parameter name or be lower case and use underscore ("_") or dash ("-") to
+ connect different words in one parameter. 'InstanceIds' should be a list and it will be appended to
+ I(instance_ids) automatically. 'Tag.n.Key' and 'Tag.n.Value' should be a dict and using I(tags) instead.
+ type: dict
+ version_added: '0.2.0'
+author:
+ - "He Guimin (@xiaozhu36)"
+requirements:
+ - "python >= 3.6"
+ - "footmark >= 1.13.0"
+extends_documentation_fragment:
+ - community.general.alicloud
+'''
+
+EXAMPLES = '''
+# Fetch instances details according to setting different filters
+
+- name: Find all instances in the specified region
+ community.general.ali_instance_info:
+ register: all_instances
+
+- name: Find all instances based on the specified ids
+ community.general.ali_instance_info:
+ instance_ids:
+ - "i-35b333d9"
+ - "i-ddav43kd"
+ register: instances_by_ids
+
+- name: Find all instances based on the specified name_prefix
+ community.general.ali_instance_info:
+ name_prefix: "ecs_instance_"
+ register: instances_by_name_prefix
+
+- name: Find instances based on tags
+ community.general.ali_instance_info:
+ tags:
+ Test: "add"
+'''
+
+RETURN = '''
+instances:
+ description: List of ECS instances
+ returned: always
+ type: complex
+ contains:
+ availability_zone:
+ description: The availability zone of the instance is in.
+ returned: always
+ type: str
+ sample: cn-beijing-a
+ block_device_mappings:
+ description: Any block device mapping entries for the instance.
+ returned: always
+ type: complex
+ contains:
+ device_name:
+ description: The device name exposed to the instance (for example, /dev/xvda).
+ returned: always
+ type: str
+ sample: /dev/xvda
+ attach_time:
+ description: The time stamp when the attachment initiated.
+ returned: always
+ type: str
+ sample: "2018-06-25T04:08:26Z"
+ delete_on_termination:
+ description: Indicates whether the volume is deleted on instance termination.
+ returned: always
+ type: bool
+ sample: true
+ status:
+ description: The attachment state.
+ returned: always
+ type: str
+ sample: in_use
+ volume_id:
+ description: The ID of the cloud disk.
+ returned: always
+ type: str
+ sample: d-2zei53pjsi117y6gf9t6
+ cpu:
+ description: The CPU core count of the instance.
+ returned: always
+ type: int
+ sample: 4
+ creation_time:
+ description: The time the instance was created.
+ returned: always
+ type: str
+ sample: "2018-06-25T04:08Z"
+ description:
+ description: The instance description.
+ returned: always
+ type: str
+ sample: "my ansible instance"
+ eip:
+ description: The attribution of EIP associated with the instance.
+ returned: always
+ type: complex
+ contains:
+ allocation_id:
+ description: The ID of the EIP.
+ returned: always
+ type: str
+ sample: eip-12345
+ internet_charge_type:
+ description: The internet charge type of the EIP.
+ returned: always
+ type: str
+ sample: "paybybandwidth"
+ ip_address:
+ description: EIP address.
+ returned: always
+ type: str
+ sample: 42.10.2.2
+ expired_time:
+ description: The time the instance will expire.
+ returned: always
+ type: str
+ sample: "2099-12-31T15:59Z"
+ gpu:
+ description: The attribution of instance GPU.
+ returned: always
+ type: complex
+ contains:
+ amount:
+ description: The count of the GPU.
+ returned: always
+ type: int
+ sample: 0
+ spec:
+ description: The specification of the GPU.
+ returned: always
+ type: str
+ sample: ""
+ host_name:
+ description: The host name of the instance.
+ returned: always
+ type: str
+ sample: iZ2zewaoZ
+ id:
+ description: Alias of instance_id.
+ returned: always
+ type: str
+ sample: i-abc12345
+ instance_id:
+ description: ECS instance resource ID.
+ returned: always
+ type: str
+ sample: i-abc12345
+ image_id:
+ description: The ID of the image used to launch the instance.
+ returned: always
+ type: str
+ sample: m-0011223344
+ inner_ip_address:
+ description: The inner IPv4 address of the classic instance.
+ returned: always
+ type: str
+ sample: 10.0.0.2
+ instance_charge_type:
+ description: The instance charge type.
+ returned: always
+ type: str
+ sample: PostPaid
+ instance_name:
+ description: The name of the instance.
+ returned: always
+ type: str
+ sample: my-ecs
+ instance_type_family:
+ description: The instance type family of the instance belongs.
+ returned: always
+ type: str
+ sample: ecs.sn1ne
+ instance_type:
+ description: The instance type of the running instance.
+ returned: always
+ type: str
+ sample: ecs.sn1ne.xlarge
+ internet_charge_type:
+ description: The billing method of the network bandwidth.
+ returned: always
+ type: str
+ sample: PayByBandwidth
+ internet_max_bandwidth_in:
+ description: Maximum incoming bandwidth from the internet network.
+ returned: always
+ type: int
+ sample: 200
+ internet_max_bandwidth_out:
+ description: Maximum incoming bandwidth from the internet network.
+ returned: always
+ type: int
+ sample: 20
+ io_optimized:
+ description: Indicates whether the instance is optimized for EBS I/O.
+ returned: always
+ type: bool
+ sample: false
+ memory:
+ description: Memory size of the instance.
+ returned: always
+ type: int
+ sample: 8192
+ network_interfaces:
+ description: One or more network interfaces for the instance.
+ returned: always
+ type: complex
+ contains:
+ mac_address:
+ description: The MAC address.
+ returned: always
+ type: str
+ sample: "00:11:22:33:44:55"
+ network_interface_id:
+ description: The ID of the network interface.
+ returned: always
+ type: str
+ sample: eni-01234567
+ primary_ip_address:
+ description: The primary IPv4 address of the network interface within the vswitch.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ osname:
+ description: The operation system name of the instance owned.
+ returned: always
+ type: str
+ sample: CentOS
+ ostype:
+ description: The operation system type of the instance owned.
+ returned: always
+ type: str
+ sample: linux
+ private_ip_address:
+ description: The IPv4 address of the network interface within the subnet.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ public_ip_address:
+ description: The public IPv4 address assigned to the instance or eip address
+ returned: always
+ type: str
+ sample: 43.0.0.1
+ resource_group_id:
+ description: The id of the resource group to which the instance belongs.
+ returned: always
+ type: str
+ sample: my-ecs-group
+ security_groups:
+ description: One or more security groups for the instance.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ group_id:
+ description: The ID of the security group.
+ returned: always
+ type: str
+ sample: sg-0123456
+ group_name:
+ description: The name of the security group.
+ returned: always
+ type: str
+ sample: my-security-group
+ status:
+ description: The current status of the instance.
+ returned: always
+ type: str
+ sample: running
+ tags:
+ description: Any tags assigned to the instance.
+ returned: always
+ type: dict
+ sample:
+ vswitch_id:
+ description: The ID of the vswitch in which the instance is running.
+ returned: always
+ type: str
+ sample: vsw-dew00abcdef
+ vpc_id:
+ description: The ID of the VPC the instance is in.
+ returned: always
+ type: str
+ sample: vpc-0011223344
+ids:
+ description: List of ECS instance IDs
+ returned: always
+ type: list
+ sample: [i-12345er, i-3245fs]
+'''
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils.alicloud_ecs import ecs_argument_spec, ecs_connect
+
+HAS_FOOTMARK = False
+FOOTMARK_IMP_ERR = None
+try:
+ from footmark.exception import ECSResponseError
+ HAS_FOOTMARK = True
+except ImportError:
+ FOOTMARK_IMP_ERR = traceback.format_exc()
+ HAS_FOOTMARK = False
+
+
+def main():
+ argument_spec = ecs_argument_spec()
+ argument_spec.update(dict(
+ availability_zone=dict(aliases=['alicloud_zone']),
+ instance_ids=dict(type='list', elements='str', aliases=['ids']),
+ instance_names=dict(type='list', elements='str', aliases=['names']),
+ name_prefix=dict(type='str'),
+ tags=dict(type='dict', aliases=['instance_tags']),
+ filters=dict(type='dict')
+ )
+ )
+ module = AnsibleModule(argument_spec=argument_spec)
+ if module._name in ('ali_instance_facts', 'community.general.ali_instance_facts'):
+ module.deprecate("The 'ali_instance_facts' module has been renamed to 'ali_instance_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ if HAS_FOOTMARK is False:
+ module.fail_json(msg=missing_required_lib('footmark'), exception=FOOTMARK_IMP_ERR)
+
+ ecs = ecs_connect(module)
+
+ instances = []
+ instance_ids = []
+ ids = module.params['instance_ids']
+ name_prefix = module.params['name_prefix']
+ names = module.params['instance_names']
+ zone_id = module.params['availability_zone']
+ if ids and (not isinstance(ids, list) or len(ids) < 1):
+ module.fail_json(msg='instance_ids should be a list of instances, aborting')
+
+ if names and (not isinstance(names, list) or len(names) < 1):
+ module.fail_json(msg='instance_names should be a list of instances, aborting')
+
+ filters = module.params['filters']
+ if not filters:
+ filters = {}
+ if not ids:
+ ids = []
+ for key, value in list(filters.items()):
+ if key in ["InstanceIds", "instance_ids", "instance-ids"] and isinstance(ids, list):
+ for id in value:
+ if id not in ids:
+ ids.append(value)
+ if ids:
+ filters['instance_ids'] = ids
+ if module.params['tags']:
+ filters['tags'] = module.params['tags']
+ if zone_id:
+ filters['zone_id'] = zone_id
+ if names:
+ filters['instance_name'] = names[0]
+
+ for inst in ecs.describe_instances(**filters):
+ if name_prefix:
+ if not str(inst.instance_name).startswith(name_prefix):
+ continue
+ volumes = ecs.describe_disks(instance_id=inst.id)
+ setattr(inst, 'block_device_mappings', volumes)
+ setattr(inst, 'user_data', inst.describe_user_data())
+ instances.append(inst.read())
+ instance_ids.append(inst.id)
+
+ module.exit_json(changed=False, ids=instance_ids, instances=instances)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/alicloud/ali_instance_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/alicloud/ali_instance_info.py
new file mode 100644
index 00000000..33b3f8a6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/alicloud/ali_instance_info.py
@@ -0,0 +1,440 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin <heguimin36@163.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see http://www.gnu.org/licenses/.
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ali_instance_info
+short_description: Gather information on instances of Alibaba Cloud ECS.
+description:
+ - This module fetches data from the Open API in Alicloud.
+ The module must be called from within the ECS instance itself.
+ - This module was called C(ali_instance_facts) before Ansible 2.9. The usage did not change.
+
+options:
+ availability_zone:
+ description:
+ - (Deprecated) Aliyun availability zone ID in which to launch the instance. Please use filter item 'zone_id' instead.
+ aliases: ['alicloud_zone']
+ type: str
+ instance_names:
+ description:
+ - (Deprecated) A list of ECS instance names. Please use filter item 'instance_name' instead.
+ aliases: ["names"]
+ type: list
+ elements: str
+ instance_ids:
+ description:
+ - A list of ECS instance ids.
+ aliases: ["ids"]
+ type: list
+ elements: str
+ name_prefix:
+ description:
+ - Use a instance name prefix to filter ecs instances.
+ type: str
+ version_added: '0.2.0'
+ tags:
+ description:
+ - A hash/dictionaries of instance tags. C({"key":"value"})
+ aliases: ["instance_tags"]
+ type: dict
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. The filter keys can be
+ all of request parameters. See U(https://www.alibabacloud.com/help/doc-detail/25506.htm) for parameter details.
+ Filter keys can be same as request parameter name or be lower case and use underscore ("_") or dash ("-") to
+ connect different words in one parameter. 'InstanceIds' should be a list and it will be appended to
+ I(instance_ids) automatically. 'Tag.n.Key' and 'Tag.n.Value' should be a dict and using I(tags) instead.
+ type: dict
+ version_added: '0.2.0'
+author:
+ - "He Guimin (@xiaozhu36)"
+requirements:
+ - "python >= 3.6"
+ - "footmark >= 1.13.0"
+extends_documentation_fragment:
+ - community.general.alicloud
+'''
+
+EXAMPLES = '''
+# Fetch instances details according to setting different filters
+
+- name: Find all instances in the specified region
+ community.general.ali_instance_info:
+ register: all_instances
+
+- name: Find all instances based on the specified ids
+ community.general.ali_instance_info:
+ instance_ids:
+ - "i-35b333d9"
+ - "i-ddav43kd"
+ register: instances_by_ids
+
+- name: Find all instances based on the specified name_prefix
+ community.general.ali_instance_info:
+ name_prefix: "ecs_instance_"
+ register: instances_by_name_prefix
+
+- name: Find instances based on tags
+ community.general.ali_instance_info:
+ tags:
+ Test: "add"
+'''
+
+RETURN = '''
+instances:
+ description: List of ECS instances
+ returned: always
+ type: complex
+ contains:
+ availability_zone:
+ description: The availability zone of the instance is in.
+ returned: always
+ type: str
+ sample: cn-beijing-a
+ block_device_mappings:
+ description: Any block device mapping entries for the instance.
+ returned: always
+ type: complex
+ contains:
+ device_name:
+ description: The device name exposed to the instance (for example, /dev/xvda).
+ returned: always
+ type: str
+ sample: /dev/xvda
+ attach_time:
+ description: The time stamp when the attachment initiated.
+ returned: always
+ type: str
+ sample: "2018-06-25T04:08:26Z"
+ delete_on_termination:
+ description: Indicates whether the volume is deleted on instance termination.
+ returned: always
+ type: bool
+ sample: true
+ status:
+ description: The attachment state.
+ returned: always
+ type: str
+ sample: in_use
+ volume_id:
+ description: The ID of the cloud disk.
+ returned: always
+ type: str
+ sample: d-2zei53pjsi117y6gf9t6
+ cpu:
+ description: The CPU core count of the instance.
+ returned: always
+ type: int
+ sample: 4
+ creation_time:
+ description: The time the instance was created.
+ returned: always
+ type: str
+ sample: "2018-06-25T04:08Z"
+ description:
+ description: The instance description.
+ returned: always
+ type: str
+ sample: "my ansible instance"
+ eip:
+ description: The attribution of EIP associated with the instance.
+ returned: always
+ type: complex
+ contains:
+ allocation_id:
+ description: The ID of the EIP.
+ returned: always
+ type: str
+ sample: eip-12345
+ internet_charge_type:
+ description: The internet charge type of the EIP.
+ returned: always
+ type: str
+ sample: "paybybandwidth"
+ ip_address:
+ description: EIP address.
+ returned: always
+ type: str
+ sample: 42.10.2.2
+ expired_time:
+ description: The time the instance will expire.
+ returned: always
+ type: str
+ sample: "2099-12-31T15:59Z"
+ gpu:
+ description: The attribution of instance GPU.
+ returned: always
+ type: complex
+ contains:
+ amount:
+ description: The count of the GPU.
+ returned: always
+ type: int
+ sample: 0
+ spec:
+ description: The specification of the GPU.
+ returned: always
+ type: str
+ sample: ""
+ host_name:
+ description: The host name of the instance.
+ returned: always
+ type: str
+ sample: iZ2zewaoZ
+ id:
+ description: Alias of instance_id.
+ returned: always
+ type: str
+ sample: i-abc12345
+ instance_id:
+ description: ECS instance resource ID.
+ returned: always
+ type: str
+ sample: i-abc12345
+ image_id:
+ description: The ID of the image used to launch the instance.
+ returned: always
+ type: str
+ sample: m-0011223344
+ inner_ip_address:
+ description: The inner IPv4 address of the classic instance.
+ returned: always
+ type: str
+ sample: 10.0.0.2
+ instance_charge_type:
+ description: The instance charge type.
+ returned: always
+ type: str
+ sample: PostPaid
+ instance_name:
+ description: The name of the instance.
+ returned: always
+ type: str
+ sample: my-ecs
+ instance_type_family:
+ description: The instance type family of the instance belongs.
+ returned: always
+ type: str
+ sample: ecs.sn1ne
+ instance_type:
+ description: The instance type of the running instance.
+ returned: always
+ type: str
+ sample: ecs.sn1ne.xlarge
+ internet_charge_type:
+ description: The billing method of the network bandwidth.
+ returned: always
+ type: str
+ sample: PayByBandwidth
+ internet_max_bandwidth_in:
+ description: Maximum incoming bandwidth from the internet network.
+ returned: always
+ type: int
+ sample: 200
+ internet_max_bandwidth_out:
+ description: Maximum incoming bandwidth from the internet network.
+ returned: always
+ type: int
+ sample: 20
+ io_optimized:
+ description: Indicates whether the instance is optimized for EBS I/O.
+ returned: always
+ type: bool
+ sample: false
+ memory:
+ description: Memory size of the instance.
+ returned: always
+ type: int
+ sample: 8192
+ network_interfaces:
+ description: One or more network interfaces for the instance.
+ returned: always
+ type: complex
+ contains:
+ mac_address:
+ description: The MAC address.
+ returned: always
+ type: str
+ sample: "00:11:22:33:44:55"
+ network_interface_id:
+ description: The ID of the network interface.
+ returned: always
+ type: str
+ sample: eni-01234567
+ primary_ip_address:
+ description: The primary IPv4 address of the network interface within the vswitch.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ osname:
+ description: The operation system name of the instance owned.
+ returned: always
+ type: str
+ sample: CentOS
+ ostype:
+ description: The operation system type of the instance owned.
+ returned: always
+ type: str
+ sample: linux
+ private_ip_address:
+ description: The IPv4 address of the network interface within the subnet.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ public_ip_address:
+ description: The public IPv4 address assigned to the instance or eip address
+ returned: always
+ type: str
+ sample: 43.0.0.1
+ resource_group_id:
+ description: The id of the resource group to which the instance belongs.
+ returned: always
+ type: str
+ sample: my-ecs-group
+ security_groups:
+ description: One or more security groups for the instance.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ group_id:
+ description: The ID of the security group.
+ returned: always
+ type: str
+ sample: sg-0123456
+ group_name:
+ description: The name of the security group.
+ returned: always
+ type: str
+ sample: my-security-group
+ status:
+ description: The current status of the instance.
+ returned: always
+ type: str
+ sample: running
+ tags:
+ description: Any tags assigned to the instance.
+ returned: always
+ type: dict
+ sample:
+ vswitch_id:
+ description: The ID of the vswitch in which the instance is running.
+ returned: always
+ type: str
+ sample: vsw-dew00abcdef
+ vpc_id:
+ description: The ID of the VPC the instance is in.
+ returned: always
+ type: str
+ sample: vpc-0011223344
+ids:
+ description: List of ECS instance IDs
+ returned: always
+ type: list
+ sample: [i-12345er, i-3245fs]
+'''
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils.alicloud_ecs import ecs_argument_spec, ecs_connect
+
+HAS_FOOTMARK = False
+FOOTMARK_IMP_ERR = None
+try:
+ from footmark.exception import ECSResponseError
+ HAS_FOOTMARK = True
+except ImportError:
+ FOOTMARK_IMP_ERR = traceback.format_exc()
+ HAS_FOOTMARK = False
+
+
+def main():
+ argument_spec = ecs_argument_spec()
+ argument_spec.update(dict(
+ availability_zone=dict(aliases=['alicloud_zone']),
+ instance_ids=dict(type='list', elements='str', aliases=['ids']),
+ instance_names=dict(type='list', elements='str', aliases=['names']),
+ name_prefix=dict(type='str'),
+ tags=dict(type='dict', aliases=['instance_tags']),
+ filters=dict(type='dict')
+ )
+ )
+ module = AnsibleModule(argument_spec=argument_spec)
+ if module._name in ('ali_instance_facts', 'community.general.ali_instance_facts'):
+ module.deprecate("The 'ali_instance_facts' module has been renamed to 'ali_instance_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ if HAS_FOOTMARK is False:
+ module.fail_json(msg=missing_required_lib('footmark'), exception=FOOTMARK_IMP_ERR)
+
+ ecs = ecs_connect(module)
+
+ instances = []
+ instance_ids = []
+ ids = module.params['instance_ids']
+ name_prefix = module.params['name_prefix']
+ names = module.params['instance_names']
+ zone_id = module.params['availability_zone']
+ if ids and (not isinstance(ids, list) or len(ids) < 1):
+ module.fail_json(msg='instance_ids should be a list of instances, aborting')
+
+ if names and (not isinstance(names, list) or len(names) < 1):
+ module.fail_json(msg='instance_names should be a list of instances, aborting')
+
+ filters = module.params['filters']
+ if not filters:
+ filters = {}
+ if not ids:
+ ids = []
+ for key, value in list(filters.items()):
+ if key in ["InstanceIds", "instance_ids", "instance-ids"] and isinstance(ids, list):
+ for id in value:
+ if id not in ids:
+ ids.append(value)
+ if ids:
+ filters['instance_ids'] = ids
+ if module.params['tags']:
+ filters['tags'] = module.params['tags']
+ if zone_id:
+ filters['zone_id'] = zone_id
+ if names:
+ filters['instance_name'] = names[0]
+
+ for inst in ecs.describe_instances(**filters):
+ if name_prefix:
+ if not str(inst.instance_name).startswith(name_prefix):
+ continue
+ volumes = ecs.describe_disks(instance_id=inst.id)
+ setattr(inst, 'block_device_mappings', volumes)
+ setattr(inst, 'user_data', inst.describe_user_data())
+ instances.append(inst.read())
+ instance_ids.append(inst.id)
+
+ module.exit_json(changed=False, ids=instance_ids, instances=instances)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/atomic/atomic_container.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/atomic/atomic_container.py
new file mode 100644
index 00000000..1364a42c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/atomic/atomic_container.py
@@ -0,0 +1,208 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: atomic_container
+short_description: Manage the containers on the atomic host platform
+description:
+ - Manage the containers on the atomic host platform.
+ - Allows to manage the lifecycle of a container on the atomic host platform.
+author: "Giuseppe Scrivano (@giuseppe)"
+notes:
+ - Host should support C(atomic) command
+requirements:
+ - atomic
+ - "python >= 2.6"
+options:
+ backend:
+ description:
+ - Define the backend to use for the container.
+ required: True
+ choices: ["docker", "ostree"]
+ type: str
+ name:
+ description:
+ - Name of the container.
+ required: True
+ type: str
+ image:
+ description:
+ - The image to use to install the container.
+ required: True
+ type: str
+ rootfs:
+ description:
+ - Define the rootfs of the image.
+ type: str
+ state:
+ description:
+ - State of the container.
+ choices: ["absent", "latest", "present", "rollback"]
+ default: "latest"
+ type: str
+ mode:
+ description:
+ - Define if it is an user or a system container.
+ choices: ["user", "system"]
+ type: str
+ values:
+ description:
+ - Values for the installation of the container.
+ - This option is permitted only with mode 'user' or 'system'.
+ - The values specified here will be used at installation time as --set arguments for atomic install.
+ type: list
+ elements: str
+'''
+
+EXAMPLES = r'''
+
+- name: Install the etcd system container
+ community.general.atomic_container:
+ name: etcd
+ image: rhel/etcd
+ backend: ostree
+ state: latest
+ mode: system
+ values:
+ - ETCD_NAME=etcd.server
+
+- name: Uninstall the etcd system container
+ community.general.atomic_container:
+ name: etcd
+ image: rhel/etcd
+ backend: ostree
+ state: absent
+ mode: system
+'''
+
+RETURN = r'''
+msg:
+ description: The command standard output
+ returned: always
+ type: str
+ sample: [u'Using default tag: latest ...']
+'''
+
+# import module snippets
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def do_install(module, mode, rootfs, container, image, values_list, backend):
+ system_list = ["--system"] if mode == 'system' else []
+ user_list = ["--user"] if mode == 'user' else []
+ rootfs_list = ["--rootfs=%s" % rootfs] if rootfs else []
+ args = ['atomic', 'install', "--storage=%s" % backend, '--name=%s' % container] + system_list + user_list + rootfs_list + values_list + [image]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ else:
+ changed = "Extracting" in out or "Copying blob" in out
+ module.exit_json(msg=out, changed=changed)
+
+
+def do_update(module, container, image, values_list):
+ args = ['atomic', 'containers', 'update', "--rebase=%s" % image] + values_list + [container]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ else:
+ changed = "Extracting" in out or "Copying blob" in out
+ module.exit_json(msg=out, changed=changed)
+
+
+def do_uninstall(module, name, backend):
+ args = ['atomic', 'uninstall', "--storage=%s" % backend, name]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ module.exit_json(msg=out, changed=True)
+
+
+def do_rollback(module, name):
+ args = ['atomic', 'containers', 'rollback', name]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ else:
+ changed = "Rolling back" in out
+ module.exit_json(msg=out, changed=changed)
+
+
+def core(module):
+ mode = module.params['mode']
+ name = module.params['name']
+ image = module.params['image']
+ rootfs = module.params['rootfs']
+ values = module.params['values']
+ backend = module.params['backend']
+ state = module.params['state']
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+ out = {}
+ err = {}
+ rc = 0
+
+ values_list = ["--set=%s" % x for x in values] if values else []
+
+ args = ['atomic', 'containers', 'list', '--no-trunc', '-n', '--all', '-f', 'backend=%s' % backend, '-f', 'container=%s' % name]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ return
+ present = name in out
+
+ if state == 'present' and present:
+ module.exit_json(msg=out, changed=False)
+ elif (state in ['latest', 'present']) and not present:
+ do_install(module, mode, rootfs, name, image, values_list, backend)
+ elif state == 'latest':
+ do_update(module, name, image, values_list)
+ elif state == 'absent':
+ if not present:
+ module.exit_json(msg="The container is not present", changed=False)
+ else:
+ do_uninstall(module, name, backend)
+ elif state == 'rollback':
+ do_rollback(module, name)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ mode=dict(default=None, choices=['user', 'system']),
+ name=dict(required=True),
+ image=dict(required=True),
+ rootfs=dict(default=None),
+ state=dict(default='latest', choices=['present', 'absent', 'latest', 'rollback']),
+ backend=dict(required=True, choices=['docker', 'ostree']),
+ values=dict(type='list', default=[], elements='str'),
+ ),
+ )
+
+ if module.params['values'] is not None and module.params['mode'] == 'default':
+ module.fail_json(msg="values is supported only with user or system mode")
+
+ # Verify that the platform supports atomic command
+ rc, out, err = module.run_command('atomic -v', check_rc=False)
+ if rc != 0:
+ module.fail_json(msg="Error in running atomic command", err=err)
+
+ try:
+ core(module)
+ except Exception as e:
+ module.fail_json(msg='Unanticipated error running atomic: %s' % to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/atomic/atomic_host.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/atomic/atomic_host.py
new file mode 100644
index 00000000..993933e5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/atomic/atomic_host.py
@@ -0,0 +1,101 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: atomic_host
+short_description: Manage the atomic host platform
+description:
+ - Manage the atomic host platform.
+ - Rebooting of Atomic host platform should be done outside this module.
+author:
+- Saravanan KR (@krsacme)
+notes:
+ - Host should be an atomic platform (verified by existence of '/run/ostree-booted' file).
+requirements:
+ - atomic
+ - python >= 2.6
+options:
+ revision:
+ description:
+ - The version number of the atomic host to be deployed.
+ - Providing C(latest) will upgrade to the latest available version.
+ default: 'latest'
+ aliases: [ version ]
+ type: str
+'''
+
+EXAMPLES = r'''
+- name: Upgrade the atomic host platform to the latest version (atomic host upgrade)
+ community.general.atomic_host:
+ revision: latest
+
+- name: Deploy a specific revision as the atomic host (atomic host deploy 23.130)
+ community.general.atomic_host:
+ revision: 23.130
+'''
+
+RETURN = r'''
+msg:
+ description: The command standard output
+ returned: always
+ type: str
+ sample: 'Already on latest'
+'''
+import os
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def core(module):
+ revision = module.params['revision']
+ args = []
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+
+ if revision == 'latest':
+ args = ['atomic', 'host', 'upgrade']
+ else:
+ args = ['atomic', 'host', 'deploy', revision]
+
+ out = {}
+ err = {}
+ rc = 0
+
+ rc, out, err = module.run_command(args, check_rc=False)
+
+ if rc == 77 and revision == 'latest':
+ module.exit_json(msg="Already on latest", changed=False)
+ elif rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ else:
+ module.exit_json(msg=out, changed=True)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ revision=dict(type='str', default='latest', aliases=["version"]),
+ ),
+ )
+
+ # Verify that the platform is atomic host
+ if not os.path.exists("/run/ostree-booted"):
+ module.fail_json(msg="Module atomic_host is applicable for Atomic Host Platforms only")
+
+ try:
+ core(module)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/atomic/atomic_image.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/atomic/atomic_image.py
new file mode 100644
index 00000000..c915ed0b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/atomic/atomic_image.py
@@ -0,0 +1,169 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: atomic_image
+short_description: Manage the container images on the atomic host platform
+description:
+ - Manage the container images on the atomic host platform.
+ - Allows to execute the commands specified by the RUN label in the container image when present.
+author:
+- Saravanan KR (@krsacme)
+notes:
+ - Host should support C(atomic) command.
+requirements:
+ - atomic
+ - python >= 2.6
+options:
+ backend:
+ description:
+ - Define the backend where the image is pulled.
+ choices: [ 'docker', 'ostree' ]
+ type: str
+ name:
+ description:
+ - Name of the container image.
+ required: True
+ type: str
+ state:
+ description:
+ - The state of the container image.
+ - The state C(latest) will ensure container image is upgraded to the latest version and forcefully restart container, if running.
+ choices: [ 'absent', 'latest', 'present' ]
+ default: 'latest'
+ type: str
+ started:
+ description:
+ - Start or Stop the container.
+ type: bool
+ default: 'yes'
+'''
+
+EXAMPLES = r'''
+- name: Execute the run command on rsyslog container image (atomic run rhel7/rsyslog)
+ community.general.atomic_image:
+ name: rhel7/rsyslog
+ state: latest
+
+- name: Pull busybox to the OSTree backend
+ community.general.atomic_image:
+ name: busybox
+ state: latest
+ backend: ostree
+'''
+
+RETURN = r'''
+msg:
+ description: The command standard output
+ returned: always
+ type: str
+ sample: [u'Using default tag: latest ...']
+'''
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def do_upgrade(module, image):
+ args = ['atomic', 'update', '--force', image]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0: # something went wrong emit the msg
+ module.fail_json(rc=rc, msg=err)
+ elif 'Image is up to date' in out:
+ return False
+
+ return True
+
+
+def core(module):
+ image = module.params['name']
+ state = module.params['state']
+ started = module.params['started']
+ backend = module.params['backend']
+ is_upgraded = False
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+ out = {}
+ err = {}
+ rc = 0
+
+ if backend:
+ if state == 'present' or state == 'latest':
+ args = ['atomic', 'pull', "--storage=%s" % backend, image]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc < 0:
+ module.fail_json(rc=rc, msg=err)
+ else:
+ out_run = ""
+ if started:
+ args = ['atomic', 'run', "--storage=%s" % backend, image]
+ rc, out_run, err = module.run_command(args, check_rc=False)
+ if rc < 0:
+ module.fail_json(rc=rc, msg=err)
+
+ changed = "Extracting" in out or "Copying blob" in out
+ module.exit_json(msg=(out + out_run), changed=changed)
+ elif state == 'absent':
+ args = ['atomic', 'images', 'delete', "--storage=%s" % backend, image]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc < 0:
+ module.fail_json(rc=rc, msg=err)
+ else:
+ changed = "Unable to find" not in out
+ module.exit_json(msg=out, changed=changed)
+ return
+
+ if state == 'present' or state == 'latest':
+ if state == 'latest':
+ is_upgraded = do_upgrade(module, image)
+
+ if started:
+ args = ['atomic', 'run', image]
+ else:
+ args = ['atomic', 'install', image]
+ elif state == 'absent':
+ args = ['atomic', 'uninstall', image]
+
+ rc, out, err = module.run_command(args, check_rc=False)
+
+ if rc < 0:
+ module.fail_json(rc=rc, msg=err)
+ elif rc == 1 and 'already present' in err:
+ module.exit_json(restult=err, changed=is_upgraded)
+ elif started and 'Container is running' in out:
+ module.exit_json(result=out, changed=is_upgraded)
+ else:
+ module.exit_json(msg=out, changed=True)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ backend=dict(type='str', choices=['docker', 'ostree']),
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='latest', choices=['absent', 'latest', 'present']),
+ started=dict(type='bool', default=True),
+ ),
+ )
+
+ # Verify that the platform supports atomic command
+ rc, out, err = module.run_command('atomic -v', check_rc=False)
+ if rc != 0:
+ module.fail_json(msg="Error in running atomic command", err=err)
+
+ try:
+ core(module)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_aa_policy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_aa_policy.py
new file mode 100644
index 00000000..a2750937
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_aa_policy.py
@@ -0,0 +1,349 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 CenturyLink
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_aa_policy
+short_description: Create or Delete Anti Affinity Policies at CenturyLink Cloud.
+description:
+ - An Ansible module to Create or Delete Anti Affinity Policies at CenturyLink Cloud.
+options:
+ name:
+ description:
+ - The name of the Anti Affinity Policy.
+ type: str
+ required: True
+ location:
+ description:
+ - Datacenter in which the policy lives/should live.
+ type: str
+ required: True
+ state:
+ description:
+ - Whether to create or delete the policy.
+ type: str
+ required: False
+ default: present
+ choices: ['present','absent']
+ wait:
+ description:
+ - This option does nothing and will be removed in community.general 3.0.0.
+ type: bool
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+---
+- name: Create AA Policy
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Create an Anti Affinity Policy
+ community.general.clc_aa_policy:
+ name: Hammer Time
+ location: UK3
+ state: present
+ register: policy
+
+ - name: Debug
+ ansible.builtin.debug:
+ var: policy
+
+- name: Delete AA Policy
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Delete an Anti Affinity Policy
+ community.general.clc_aa_policy:
+ name: Hammer Time
+ location: UK3
+ state: absent
+ register: policy
+
+ - name: Debug
+ ansible.builtin.debug:
+ var: policy
+'''
+
+RETURN = '''
+policy:
+ description: The anti affinity policy information
+ returned: success
+ type: dict
+ sample:
+ {
+ "id":"1a28dd0988984d87b9cd61fa8da15424",
+ "name":"test_aa_policy",
+ "location":"UC1",
+ "links":[
+ {
+ "rel":"self",
+ "href":"/v2/antiAffinityPolicies/wfad/1a28dd0988984d87b9cd61fa8da15424",
+ "verbs":[
+ "GET",
+ "DELETE",
+ "PUT"
+ ]
+ },
+ {
+ "rel":"location",
+ "href":"/v2/datacenters/wfad/UC1",
+ "id":"uc1",
+ "name":"UC1 - US West (Santa Clara)"
+ }
+ ]
+ }
+'''
+
+__version__ = '${version}'
+
+import os
+import traceback
+
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk:
+# sudo pip install clc-sdk
+#
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcAntiAffinityPolicy:
+
+ clc = clc_sdk
+ module = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.module = module
+ self.policy_dict = {}
+
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'),
+ exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'),
+ exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ name=dict(required=True),
+ location=dict(required=True),
+ wait=dict(type='bool', removed_in_version='3.0.0', removed_from_collection='community.general'), # was Ansible 2.14
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ return argument_spec
+
+ # Module Behavior Goodness
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ p = self.module.params
+
+ self._set_clc_credentials_from_env()
+ self.policy_dict = self._get_policies_for_datacenter(p)
+
+ if p['state'] == "absent":
+ changed, policy = self._ensure_policy_is_absent(p)
+ else:
+ changed, policy = self._ensure_policy_is_present(p)
+
+ if hasattr(policy, 'data'):
+ policy = policy.data
+ elif hasattr(policy, '__dict__'):
+ policy = policy.__dict__
+
+ self.module.exit_json(changed=changed, policy=policy)
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ def _get_policies_for_datacenter(self, p):
+ """
+ Get the Policies for a datacenter by calling the CLC API.
+ :param p: datacenter to get policies from
+ :return: policies in the datacenter
+ """
+ response = {}
+
+ policies = self.clc.v2.AntiAffinity.GetAll(location=p['location'])
+
+ for policy in policies:
+ response[policy.name] = policy
+ return response
+
+ def _create_policy(self, p):
+ """
+ Create an Anti Affinity Policy using the CLC API.
+ :param p: datacenter to create policy in
+ :return: response dictionary from the CLC API.
+ """
+ try:
+ return self.clc.v2.AntiAffinity.Create(
+ name=p['name'],
+ location=p['location'])
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to create anti affinity policy : {0}. {1}'.format(
+ p['name'], ex.response_text
+ ))
+
+ def _delete_policy(self, p):
+ """
+ Delete an Anti Affinity Policy using the CLC API.
+ :param p: datacenter to delete a policy from
+ :return: none
+ """
+ try:
+ policy = self.policy_dict[p['name']]
+ policy.Delete()
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to delete anti affinity policy : {0}. {1}'.format(
+ p['name'], ex.response_text
+ ))
+
+ def _policy_exists(self, policy_name):
+ """
+ Check to see if an Anti Affinity Policy exists
+ :param policy_name: name of the policy
+ :return: boolean of if the policy exists
+ """
+ if policy_name in self.policy_dict:
+ return self.policy_dict.get(policy_name)
+
+ return False
+
+ def _ensure_policy_is_absent(self, p):
+ """
+ Makes sure that a policy is absent
+ :param p: dictionary of policy name
+ :return: tuple of if a deletion occurred and the name of the policy that was deleted
+ """
+ changed = False
+ if self._policy_exists(policy_name=p['name']):
+ changed = True
+ if not self.module.check_mode:
+ self._delete_policy(p)
+ return changed, None
+
+ def _ensure_policy_is_present(self, p):
+ """
+ Ensures that a policy is present
+ :param p: dictionary of a policy name
+ :return: tuple of if an addition occurred and the name of the policy that was added
+ """
+ changed = False
+ policy = self._policy_exists(policy_name=p['name'])
+ if not policy:
+ changed = True
+ policy = None
+ if not self.module.check_mode:
+ policy = self._create_policy(p)
+ return changed, policy
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ module = AnsibleModule(
+ argument_spec=ClcAntiAffinityPolicy._define_module_argument_spec(),
+ supports_check_mode=True)
+ clc_aa_policy = ClcAntiAffinityPolicy(module)
+ clc_aa_policy.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_alert_policy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_alert_policy.py
new file mode 100644
index 00000000..7a10c0b3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_alert_policy.py
@@ -0,0 +1,526 @@
+#!/usr/bin/python
+
+#
+# Copyright (c) 2015 CenturyLink
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_alert_policy
+short_description: Create or Delete Alert Policies at CenturyLink Cloud.
+description:
+ - An Ansible module to Create or Delete Alert Policies at CenturyLink Cloud.
+options:
+ alias:
+ description:
+ - The alias of your CLC Account
+ type: str
+ required: True
+ name:
+ description:
+ - The name of the alert policy. This is mutually exclusive with id
+ type: str
+ id:
+ description:
+ - The alert policy id. This is mutually exclusive with name
+ type: str
+ alert_recipients:
+ description:
+ - A list of recipient email ids to notify the alert.
+ This is required for state 'present'
+ type: list
+ metric:
+ description:
+ - The metric on which to measure the condition that will trigger the alert.
+ This is required for state 'present'
+ type: str
+ choices: ['cpu','memory','disk']
+ duration:
+ description:
+ - The length of time in minutes that the condition must exceed the threshold.
+ This is required for state 'present'
+ type: str
+ threshold:
+ description:
+ - The threshold that will trigger the alert when the metric equals or exceeds it.
+ This is required for state 'present'
+ This number represents a percentage and must be a value between 5.0 - 95.0 that is a multiple of 5.0
+ type: int
+ state:
+ description:
+ - Whether to create or delete the policy.
+ type: str
+ default: present
+ choices: ['present','absent']
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+---
+- name: Create Alert Policy Example
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Create an Alert Policy for disk above 80% for 5 minutes
+ community.general.clc_alert_policy:
+ alias: wfad
+ name: 'alert for disk > 80%'
+ alert_recipients:
+ - test1@centurylink.com
+ - test2@centurylink.com
+ metric: 'disk'
+ duration: '00:05:00'
+ threshold: 80
+ state: present
+ register: policy
+
+ - name: Debug
+ ansible.builtin.debug: var=policy
+
+- name: Delete Alert Policy Example
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Delete an Alert Policy
+ community.general.clc_alert_policy:
+ alias: wfad
+ name: 'alert for disk > 80%'
+ state: absent
+ register: policy
+
+ - name: Debug
+ ansible.builtin.debug: var=policy
+'''
+
+RETURN = '''
+policy:
+ description: The alert policy information
+ returned: success
+ type: dict
+ sample:
+ {
+ "actions": [
+ {
+ "action": "email",
+ "settings": {
+ "recipients": [
+ "user1@domain.com",
+ "user1@domain.com"
+ ]
+ }
+ }
+ ],
+ "id": "ba54ac54a60d4a4f1ed6d48c1ce240a7",
+ "links": [
+ {
+ "href": "/v2/alertPolicies/alias/ba54ac54a60d4a4fb1d6d48c1ce240a7",
+ "rel": "self",
+ "verbs": [
+ "GET",
+ "DELETE",
+ "PUT"
+ ]
+ }
+ ],
+ "name": "test_alert",
+ "triggers": [
+ {
+ "duration": "00:05:00",
+ "metric": "disk",
+ "threshold": 80.0
+ }
+ ]
+ }
+'''
+
+__version__ = '${version}'
+
+import json
+import os
+import traceback
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import APIFailedResponse
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcAlertPolicy:
+
+ clc = clc_sdk
+ module = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.module = module
+ self.policy_dict = {}
+
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ name=dict(),
+ id=dict(),
+ alias=dict(required=True),
+ alert_recipients=dict(type='list'),
+ metric=dict(
+ choices=[
+ 'cpu',
+ 'memory',
+ 'disk'],
+ default=None),
+ duration=dict(type='str'),
+ threshold=dict(type='int'),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+ mutually_exclusive = [
+ ['name', 'id']
+ ]
+ return {'argument_spec': argument_spec,
+ 'mutually_exclusive': mutually_exclusive}
+
+ # Module Behavior Goodness
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ p = self.module.params
+
+ self._set_clc_credentials_from_env()
+ self.policy_dict = self._get_alert_policies(p['alias'])
+
+ if p['state'] == 'present':
+ changed, policy = self._ensure_alert_policy_is_present()
+ else:
+ changed, policy = self._ensure_alert_policy_is_absent()
+
+ self.module.exit_json(changed=changed, policy=policy)
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ def _ensure_alert_policy_is_present(self):
+ """
+ Ensures that the alert policy is present
+ :return: (changed, policy)
+ changed: A flag representing if anything is modified
+ policy: the created/updated alert policy
+ """
+ changed = False
+ p = self.module.params
+ policy_name = p.get('name')
+
+ if not policy_name:
+ self.module.fail_json(msg='Policy name is a required')
+ policy = self._alert_policy_exists(policy_name)
+ if not policy:
+ changed = True
+ policy = None
+ if not self.module.check_mode:
+ policy = self._create_alert_policy()
+ else:
+ changed_u, policy = self._ensure_alert_policy_is_updated(policy)
+ if changed_u:
+ changed = True
+ return changed, policy
+
+ def _ensure_alert_policy_is_absent(self):
+ """
+ Ensures that the alert policy is absent
+ :return: (changed, None)
+ changed: A flag representing if anything is modified
+ """
+ changed = False
+ p = self.module.params
+ alert_policy_id = p.get('id')
+ alert_policy_name = p.get('name')
+ alias = p.get('alias')
+ if not alert_policy_id and not alert_policy_name:
+ self.module.fail_json(
+ msg='Either alert policy id or policy name is required')
+ if not alert_policy_id and alert_policy_name:
+ alert_policy_id = self._get_alert_policy_id(
+ self.module,
+ alert_policy_name)
+ if alert_policy_id and alert_policy_id in self.policy_dict:
+ changed = True
+ if not self.module.check_mode:
+ self._delete_alert_policy(alias, alert_policy_id)
+ return changed, None
+
+ def _ensure_alert_policy_is_updated(self, alert_policy):
+ """
+ Ensures the alert policy is updated if anything is changed in the alert policy configuration
+ :param alert_policy: the target alert policy
+ :return: (changed, policy)
+ changed: A flag representing if anything is modified
+ policy: the updated the alert policy
+ """
+ changed = False
+ p = self.module.params
+ alert_policy_id = alert_policy.get('id')
+ email_list = p.get('alert_recipients')
+ metric = p.get('metric')
+ duration = p.get('duration')
+ threshold = p.get('threshold')
+ policy = alert_policy
+ if (metric and metric != str(alert_policy.get('triggers')[0].get('metric'))) or \
+ (duration and duration != str(alert_policy.get('triggers')[0].get('duration'))) or \
+ (threshold and float(threshold) != float(alert_policy.get('triggers')[0].get('threshold'))):
+ changed = True
+ elif email_list:
+ t_email_list = list(
+ alert_policy.get('actions')[0].get('settings').get('recipients'))
+ if set(email_list) != set(t_email_list):
+ changed = True
+ if changed and not self.module.check_mode:
+ policy = self._update_alert_policy(alert_policy_id)
+ return changed, policy
+
+ def _get_alert_policies(self, alias):
+ """
+ Get the alert policies for account alias by calling the CLC API.
+ :param alias: the account alias
+ :return: the alert policies for the account alias
+ """
+ response = {}
+
+ policies = self.clc.v2.API.Call('GET',
+ '/v2/alertPolicies/%s'
+ % alias)
+
+ for policy in policies.get('items'):
+ response[policy.get('id')] = policy
+ return response
+
+ def _create_alert_policy(self):
+ """
+ Create an alert Policy using the CLC API.
+ :return: response dictionary from the CLC API.
+ """
+ p = self.module.params
+ alias = p['alias']
+ email_list = p['alert_recipients']
+ metric = p['metric']
+ duration = p['duration']
+ threshold = p['threshold']
+ policy_name = p['name']
+ arguments = json.dumps(
+ {
+ 'name': policy_name,
+ 'actions': [{
+ 'action': 'email',
+ 'settings': {
+ 'recipients': email_list
+ }
+ }],
+ 'triggers': [{
+ 'metric': metric,
+ 'duration': duration,
+ 'threshold': threshold
+ }]
+ }
+ )
+ try:
+ result = self.clc.v2.API.Call(
+ 'POST',
+ '/v2/alertPolicies/%s' % alias,
+ arguments)
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg='Unable to create alert policy "{0}". {1}'.format(
+ policy_name, str(e.response_text)))
+ return result
+
+ def _update_alert_policy(self, alert_policy_id):
+ """
+ Update alert policy using the CLC API.
+ :param alert_policy_id: The clc alert policy id
+ :return: response dictionary from the CLC API.
+ """
+ p = self.module.params
+ alias = p['alias']
+ email_list = p['alert_recipients']
+ metric = p['metric']
+ duration = p['duration']
+ threshold = p['threshold']
+ policy_name = p['name']
+ arguments = json.dumps(
+ {
+ 'name': policy_name,
+ 'actions': [{
+ 'action': 'email',
+ 'settings': {
+ 'recipients': email_list
+ }
+ }],
+ 'triggers': [{
+ 'metric': metric,
+ 'duration': duration,
+ 'threshold': threshold
+ }]
+ }
+ )
+ try:
+ result = self.clc.v2.API.Call(
+ 'PUT', '/v2/alertPolicies/%s/%s' %
+ (alias, alert_policy_id), arguments)
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg='Unable to update alert policy "{0}". {1}'.format(
+ policy_name, str(e.response_text)))
+ return result
+
+ def _delete_alert_policy(self, alias, policy_id):
+ """
+ Delete an alert policy using the CLC API.
+ :param alias : the account alias
+ :param policy_id: the alert policy id
+ :return: response dictionary from the CLC API.
+ """
+ try:
+ result = self.clc.v2.API.Call(
+ 'DELETE', '/v2/alertPolicies/%s/%s' %
+ (alias, policy_id), None)
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg='Unable to delete alert policy id "{0}". {1}'.format(
+ policy_id, str(e.response_text)))
+ return result
+
+ def _alert_policy_exists(self, policy_name):
+ """
+ Check to see if an alert policy exists
+ :param policy_name: name of the alert policy
+ :return: boolean of if the policy exists
+ """
+ result = False
+ for policy_id in self.policy_dict:
+ if self.policy_dict.get(policy_id).get('name') == policy_name:
+ result = self.policy_dict.get(policy_id)
+ return result
+
+ def _get_alert_policy_id(self, module, alert_policy_name):
+ """
+ retrieves the alert policy id of the account based on the name of the policy
+ :param module: the AnsibleModule object
+ :param alert_policy_name: the alert policy name
+ :return: alert_policy_id: The alert policy id
+ """
+ alert_policy_id = None
+ for policy_id in self.policy_dict:
+ if self.policy_dict.get(policy_id).get('name') == alert_policy_name:
+ if not alert_policy_id:
+ alert_policy_id = policy_id
+ else:
+ return module.fail_json(
+ msg='multiple alert policies were found with policy name : %s' % alert_policy_name)
+ return alert_policy_id
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ argument_dict = ClcAlertPolicy._define_module_argument_spec()
+ module = AnsibleModule(supports_check_mode=True, **argument_dict)
+ clc_alert_policy = ClcAlertPolicy(module)
+ clc_alert_policy.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_blueprint_package.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_blueprint_package.py
new file mode 100644
index 00000000..c45ca919
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_blueprint_package.py
@@ -0,0 +1,299 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 CenturyLink
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_blueprint_package
+short_description: deploys a blue print package on a set of servers in CenturyLink Cloud.
+description:
+ - An Ansible module to deploy blue print package on a set of servers in CenturyLink Cloud.
+options:
+ server_ids:
+ description:
+ - A list of server Ids to deploy the blue print package.
+ type: list
+ required: True
+ package_id:
+ description:
+ - The package id of the blue print.
+ type: str
+ required: True
+ package_params:
+ description:
+ - The dictionary of arguments required to deploy the blue print.
+ type: dict
+ default: {}
+ required: False
+ state:
+ description:
+ - Whether to install or uninstall the package. Currently it supports only "present" for install action.
+ type: str
+ required: False
+ default: present
+ choices: ['present']
+ wait:
+ description:
+ - Whether to wait for the tasks to finish before returning.
+ type: str
+ default: True
+ required: False
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+- name: Deploy package
+ community.general.clc_blueprint_package:
+ server_ids:
+ - UC1TEST-SERVER1
+ - UC1TEST-SERVER2
+ package_id: 77abb844-579d-478d-3955-c69ab4a7ba1a
+ package_params: {}
+'''
+
+RETURN = '''
+server_ids:
+ description: The list of server ids that are changed
+ returned: success
+ type: list
+ sample:
+ [
+ "UC1TEST-SERVER1",
+ "UC1TEST-SERVER2"
+ ]
+'''
+
+__version__ = '${version}'
+
+import os
+import traceback
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcBlueprintPackage:
+
+ clc = clc_sdk
+ module = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.module = module
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(
+ requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ p = self.module.params
+ changed = False
+ changed_server_ids = []
+ self._set_clc_credentials_from_env()
+ server_ids = p['server_ids']
+ package_id = p['package_id']
+ package_params = p['package_params']
+ state = p['state']
+ if state == 'present':
+ changed, changed_server_ids, request_list = self.ensure_package_installed(
+ server_ids, package_id, package_params)
+ self._wait_for_requests_to_complete(request_list)
+ self.module.exit_json(changed=changed, server_ids=changed_server_ids)
+
+ @staticmethod
+ def define_argument_spec():
+ """
+ This function defines the dictionary object required for
+ package module
+ :return: the package dictionary object
+ """
+ argument_spec = dict(
+ server_ids=dict(type='list', required=True),
+ package_id=dict(required=True),
+ package_params=dict(type='dict', default={}),
+ wait=dict(default=True), # @FIXME should be bool?
+ state=dict(default='present', choices=['present'])
+ )
+ return argument_spec
+
+ def ensure_package_installed(self, server_ids, package_id, package_params):
+ """
+ Ensure the package is installed in the given list of servers
+ :param server_ids: the server list where the package needs to be installed
+ :param package_id: the blueprint package id
+ :param package_params: the package arguments
+ :return: (changed, server_ids, request_list)
+ changed: A flag indicating if a change was made
+ server_ids: The list of servers modified
+ request_list: The list of request objects from clc-sdk
+ """
+ changed = False
+ request_list = []
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to get servers from CLC')
+ for server in servers:
+ if not self.module.check_mode:
+ request = self.clc_install_package(
+ server,
+ package_id,
+ package_params)
+ request_list.append(request)
+ changed = True
+ return changed, server_ids, request_list
+
+ def clc_install_package(self, server, package_id, package_params):
+ """
+ Install the package to a given clc server
+ :param server: The server object where the package needs to be installed
+ :param package_id: The blue print package id
+ :param package_params: the required argument dict for the package installation
+ :return: The result object from the CLC API call
+ """
+ result = None
+ try:
+ result = server.ExecutePackage(
+ package_id=package_id,
+ parameters=package_params)
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to install package : {0} to server {1}. {2}'.format(
+ package_id, server.id, ex.message
+ ))
+ return result
+
+ def _wait_for_requests_to_complete(self, request_lst):
+ """
+ Waits until the CLC requests are complete if the wait argument is True
+ :param request_lst: The list of CLC request objects
+ :return: none
+ """
+ if not self.module.params['wait']:
+ return
+ for request in request_lst:
+ request.WaitUntilComplete()
+ for request_details in request.requests:
+ if request_details.Status() != 'succeeded':
+ self.module.fail_json(
+ msg='Unable to process package install request')
+
+ def _get_servers_from_clc(self, server_list, message):
+ """
+ Internal function to fetch list of CLC server objects from a list of server ids
+ :param server_list: the list of server ids
+ :param message: the error message to raise if there is any error
+ :return the list of CLC server objects
+ """
+ try:
+ return self.clc.v2.Servers(server_list).servers
+ except CLCException as ex:
+ self.module.fail_json(msg=message + ': %s' % ex)
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ Main function
+ :return: None
+ """
+ module = AnsibleModule(
+ argument_spec=ClcBlueprintPackage.define_argument_spec(),
+ supports_check_mode=True
+ )
+ clc_blueprint_package = ClcBlueprintPackage(module)
+ clc_blueprint_package.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_firewall_policy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_firewall_policy.py
new file mode 100644
index 00000000..105d793c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_firewall_policy.py
@@ -0,0 +1,584 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 CenturyLink
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_firewall_policy
+short_description: Create/delete/update firewall policies
+description:
+ - Create or delete or update firewall policies on Centurylink Cloud
+options:
+ location:
+ description:
+ - Target datacenter for the firewall policy
+ type: str
+ required: True
+ state:
+ description:
+ - Whether to create or delete the firewall policy
+ type: str
+ default: present
+ choices: ['present', 'absent']
+ source:
+ description:
+ - The list of source addresses for traffic on the originating firewall.
+ This is required when state is 'present'
+ type: list
+ destination:
+ description:
+ - The list of destination addresses for traffic on the terminating firewall.
+ This is required when state is 'present'
+ type: list
+ ports:
+ description:
+ - The list of ports associated with the policy.
+ TCP and UDP can take in single ports or port ranges.
+ - "Example: C(['any', 'icmp', 'TCP/123', 'UDP/123', 'TCP/123-456', 'UDP/123-456'])."
+ type: list
+ firewall_policy_id:
+ description:
+ - Id of the firewall policy. This is required to update or delete an existing firewall policy
+ type: str
+ source_account_alias:
+ description:
+ - CLC alias for the source account
+ type: str
+ required: True
+ destination_account_alias:
+ description:
+ - CLC alias for the destination account
+ type: str
+ wait:
+ description:
+ - Whether to wait for the provisioning tasks to finish before returning.
+ type: str
+ default: 'True'
+ enabled:
+ description:
+ - Whether the firewall policy is enabled or disabled
+ type: str
+ choices: [True, False]
+ default: True
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+---
+- name: Create Firewall Policy
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Create / Verify an Firewall Policy at CenturyLink Cloud
+ clc_firewall:
+ source_account_alias: WFAD
+ location: VA1
+ state: present
+ source: 10.128.216.0/24
+ destination: 10.128.216.0/24
+ ports: Any
+ destination_account_alias: WFAD
+
+- name: Delete Firewall Policy
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Delete an Firewall Policy at CenturyLink Cloud
+ clc_firewall:
+ source_account_alias: WFAD
+ location: VA1
+ state: absent
+ firewall_policy_id: c62105233d7a4231bd2e91b9c791e43e1
+'''
+
+RETURN = '''
+firewall_policy_id:
+ description: The fire wall policy id
+ returned: success
+ type: str
+ sample: fc36f1bfd47242e488a9c44346438c05
+firewall_policy:
+ description: The fire wall policy information
+ returned: success
+ type: dict
+ sample:
+ {
+ "destination":[
+ "10.1.1.0/24",
+ "10.2.2.0/24"
+ ],
+ "destinationAccount":"wfad",
+ "enabled":true,
+ "id":"fc36f1bfd47242e488a9c44346438c05",
+ "links":[
+ {
+ "href":"http://api.ctl.io/v2-experimental/firewallPolicies/wfad/uc1/fc36f1bfd47242e488a9c44346438c05",
+ "rel":"self",
+ "verbs":[
+ "GET",
+ "PUT",
+ "DELETE"
+ ]
+ }
+ ],
+ "ports":[
+ "any"
+ ],
+ "source":[
+ "10.1.1.0/24",
+ "10.2.2.0/24"
+ ],
+ "status":"active"
+ }
+'''
+
+__version__ = '${version}'
+
+import os
+import traceback
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+from time import sleep
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import APIFailedResponse
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcFirewallPolicy:
+
+ clc = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.clc = clc_sdk
+ self.module = module
+ self.firewall_dict = {}
+
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(
+ requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ location=dict(required=True),
+ source_account_alias=dict(required=True),
+ destination_account_alias=dict(),
+ firewall_policy_id=dict(),
+ ports=dict(type='list'),
+ source=dict(type='list'),
+ destination=dict(type='list'),
+ wait=dict(default=True), # @FIXME type=bool
+ state=dict(default='present', choices=['present', 'absent']),
+ enabled=dict(default=True, choices=[True, False])
+ )
+ return argument_spec
+
+ def process_request(self):
+ """
+ Execute the main code path, and handle the request
+ :return: none
+ """
+ changed = False
+ firewall_policy = None
+ location = self.module.params.get('location')
+ source_account_alias = self.module.params.get('source_account_alias')
+ destination_account_alias = self.module.params.get(
+ 'destination_account_alias')
+ firewall_policy_id = self.module.params.get('firewall_policy_id')
+ ports = self.module.params.get('ports')
+ source = self.module.params.get('source')
+ destination = self.module.params.get('destination')
+ wait = self.module.params.get('wait')
+ state = self.module.params.get('state')
+ enabled = self.module.params.get('enabled')
+
+ self.firewall_dict = {
+ 'location': location,
+ 'source_account_alias': source_account_alias,
+ 'destination_account_alias': destination_account_alias,
+ 'firewall_policy_id': firewall_policy_id,
+ 'ports': ports,
+ 'source': source,
+ 'destination': destination,
+ 'wait': wait,
+ 'state': state,
+ 'enabled': enabled}
+
+ self._set_clc_credentials_from_env()
+
+ if state == 'absent':
+ changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_absent(
+ source_account_alias, location, self.firewall_dict)
+
+ elif state == 'present':
+ changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_present(
+ source_account_alias, location, self.firewall_dict)
+
+ return self.module.exit_json(
+ changed=changed,
+ firewall_policy_id=firewall_policy_id,
+ firewall_policy=firewall_policy)
+
+ @staticmethod
+ def _get_policy_id_from_response(response):
+ """
+ Method to parse out the policy id from creation response
+ :param response: response from firewall creation API call
+ :return: policy_id: firewall policy id from creation call
+ """
+ url = response.get('links')[0]['href']
+ path = urlparse(url).path
+ path_list = os.path.split(path)
+ policy_id = path_list[-1]
+ return policy_id
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ def _ensure_firewall_policy_is_present(
+ self,
+ source_account_alias,
+ location,
+ firewall_dict):
+ """
+ Ensures that a given firewall policy is present
+ :param source_account_alias: the source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_dict: dictionary of request parameters for firewall policy
+ :return: (changed, firewall_policy_id, firewall_policy)
+ changed: flag for if a change occurred
+ firewall_policy_id: the firewall policy id that was created/updated
+ firewall_policy: The firewall_policy object
+ """
+ firewall_policy = None
+ firewall_policy_id = firewall_dict.get('firewall_policy_id')
+
+ if firewall_policy_id is None:
+ if not self.module.check_mode:
+ response = self._create_firewall_policy(
+ source_account_alias,
+ location,
+ firewall_dict)
+ firewall_policy_id = self._get_policy_id_from_response(
+ response)
+ changed = True
+ else:
+ firewall_policy = self._get_firewall_policy(
+ source_account_alias, location, firewall_policy_id)
+ if not firewall_policy:
+ return self.module.fail_json(
+ msg='Unable to find the firewall policy id : {0}'.format(
+ firewall_policy_id))
+ changed = self._compare_get_request_with_dict(
+ firewall_policy,
+ firewall_dict)
+ if not self.module.check_mode and changed:
+ self._update_firewall_policy(
+ source_account_alias,
+ location,
+ firewall_policy_id,
+ firewall_dict)
+ if changed and firewall_policy_id:
+ firewall_policy = self._wait_for_requests_to_complete(
+ source_account_alias,
+ location,
+ firewall_policy_id)
+ return changed, firewall_policy_id, firewall_policy
+
+ def _ensure_firewall_policy_is_absent(
+ self,
+ source_account_alias,
+ location,
+ firewall_dict):
+ """
+ Ensures that a given firewall policy is removed if present
+ :param source_account_alias: the source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_dict: firewall policy to delete
+ :return: (changed, firewall_policy_id, response)
+ changed: flag for if a change occurred
+ firewall_policy_id: the firewall policy id that was deleted
+ response: response from CLC API call
+ """
+ changed = False
+ response = []
+ firewall_policy_id = firewall_dict.get('firewall_policy_id')
+ result = self._get_firewall_policy(
+ source_account_alias, location, firewall_policy_id)
+ if result:
+ if not self.module.check_mode:
+ response = self._delete_firewall_policy(
+ source_account_alias,
+ location,
+ firewall_policy_id)
+ changed = True
+ return changed, firewall_policy_id, response
+
+ def _create_firewall_policy(
+ self,
+ source_account_alias,
+ location,
+ firewall_dict):
+ """
+ Creates the firewall policy for the given account alias
+ :param source_account_alias: the source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_dict: dictionary of request parameters for firewall policy
+ :return: response from CLC API call
+ """
+ payload = {
+ 'destinationAccount': firewall_dict.get('destination_account_alias'),
+ 'source': firewall_dict.get('source'),
+ 'destination': firewall_dict.get('destination'),
+ 'ports': firewall_dict.get('ports')}
+ try:
+ response = self.clc.v2.API.Call(
+ 'POST', '/v2-experimental/firewallPolicies/%s/%s' %
+ (source_account_alias, location), payload)
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg="Unable to create firewall policy. %s" %
+ str(e.response_text))
+ return response
+
+ def _delete_firewall_policy(
+ self,
+ source_account_alias,
+ location,
+ firewall_policy_id):
+ """
+ Deletes a given firewall policy for an account alias in a datacenter
+ :param source_account_alias: the source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_policy_id: firewall policy id to delete
+ :return: response: response from CLC API call
+ """
+ try:
+ response = self.clc.v2.API.Call(
+ 'DELETE', '/v2-experimental/firewallPolicies/%s/%s/%s' %
+ (source_account_alias, location, firewall_policy_id))
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg="Unable to delete the firewall policy id : {0}. {1}".format(
+ firewall_policy_id, str(e.response_text)))
+ return response
+
+ def _update_firewall_policy(
+ self,
+ source_account_alias,
+ location,
+ firewall_policy_id,
+ firewall_dict):
+ """
+ Updates a firewall policy for a given datacenter and account alias
+ :param source_account_alias: the source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_policy_id: firewall policy id to update
+ :param firewall_dict: dictionary of request parameters for firewall policy
+ :return: response: response from CLC API call
+ """
+ try:
+ response = self.clc.v2.API.Call(
+ 'PUT',
+ '/v2-experimental/firewallPolicies/%s/%s/%s' %
+ (source_account_alias,
+ location,
+ firewall_policy_id),
+ firewall_dict)
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg="Unable to update the firewall policy id : {0}. {1}".format(
+ firewall_policy_id, str(e.response_text)))
+ return response
+
+ @staticmethod
+ def _compare_get_request_with_dict(response, firewall_dict):
+ """
+ Helper method to compare the json response for getting the firewall policy with the request parameters
+ :param response: response from the get method
+ :param firewall_dict: dictionary of request parameters for firewall policy
+ :return: changed: Boolean that returns true if there are differences between
+ the response parameters and the playbook parameters
+ """
+
+ changed = False
+
+ response_dest_account_alias = response.get('destinationAccount')
+ response_enabled = response.get('enabled')
+ response_source = response.get('source')
+ response_dest = response.get('destination')
+ response_ports = response.get('ports')
+ request_dest_account_alias = firewall_dict.get(
+ 'destination_account_alias')
+ request_enabled = firewall_dict.get('enabled')
+ if request_enabled is None:
+ request_enabled = True
+ request_source = firewall_dict.get('source')
+ request_dest = firewall_dict.get('destination')
+ request_ports = firewall_dict.get('ports')
+
+ if (
+ response_dest_account_alias and str(response_dest_account_alias) != str(request_dest_account_alias)) or (
+ response_enabled != request_enabled) or (
+ response_source and response_source != request_source) or (
+ response_dest and response_dest != request_dest) or (
+ response_ports and response_ports != request_ports):
+ changed = True
+ return changed
+
+ def _get_firewall_policy(
+ self,
+ source_account_alias,
+ location,
+ firewall_policy_id):
+ """
+ Get back details for a particular firewall policy
+ :param source_account_alias: the source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_policy_id: id of the firewall policy to get
+ :return: response - The response from CLC API call
+ """
+ response = None
+ try:
+ response = self.clc.v2.API.Call(
+ 'GET', '/v2-experimental/firewallPolicies/%s/%s/%s' %
+ (source_account_alias, location, firewall_policy_id))
+ except APIFailedResponse as e:
+ if e.response_status_code != 404:
+ self.module.fail_json(
+ msg="Unable to fetch the firewall policy with id : {0}. {1}".format(
+ firewall_policy_id, str(e.response_text)))
+ return response
+
+ def _wait_for_requests_to_complete(
+ self,
+ source_account_alias,
+ location,
+ firewall_policy_id,
+ wait_limit=50):
+ """
+ Waits until the CLC requests are complete if the wait argument is True
+ :param source_account_alias: The source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_policy_id: The firewall policy id
+ :param wait_limit: The number of times to check the status for completion
+ :return: the firewall_policy object
+ """
+ wait = self.module.params.get('wait')
+ count = 0
+ firewall_policy = None
+ while wait:
+ count += 1
+ firewall_policy = self._get_firewall_policy(
+ source_account_alias, location, firewall_policy_id)
+ status = firewall_policy.get('status')
+ if status == 'active' or count > wait_limit:
+ wait = False
+ else:
+ # wait for 2 seconds
+ sleep(2)
+ return firewall_policy
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ module = AnsibleModule(
+ argument_spec=ClcFirewallPolicy._define_module_argument_spec(),
+ supports_check_mode=True)
+
+ clc_firewall = ClcFirewallPolicy(module)
+ clc_firewall.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_group.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_group.py
new file mode 100644
index 00000000..a80cc400
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_group.py
@@ -0,0 +1,512 @@
+#!/usr/bin/python
+
+#
+# Copyright (c) 2015 CenturyLink
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_group
+short_description: Create/delete Server Groups at Centurylink Cloud
+description:
+ - Create or delete Server Groups at Centurylink Centurylink Cloud
+options:
+ name:
+ description:
+ - The name of the Server Group
+ type: str
+ required: True
+ description:
+ description:
+ - A description of the Server Group
+ type: str
+ required: False
+ parent:
+ description:
+ - The parent group of the server group. If parent is not provided, it creates the group at top level.
+ type: str
+ required: False
+ location:
+ description:
+ - Datacenter to create the group in. If location is not provided, the group gets created in the default datacenter
+ associated with the account
+ type: str
+ required: False
+ state:
+ description:
+ - Whether to create or delete the group
+ type: str
+ default: present
+ choices: ['present', 'absent']
+ wait:
+ description:
+ - Whether to wait for the tasks to finish before returning.
+ type: bool
+ default: True
+ required: False
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+
+# Create a Server Group
+
+---
+- name: Create Server Group
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Create / Verify a Server Group at CenturyLink Cloud
+ community.general.clc_group:
+ name: My Cool Server Group
+ parent: Default Group
+ state: present
+ register: clc
+
+ - name: Debug
+ ansible.builtin.debug:
+ var: clc
+
+# Delete a Server Group
+- name: Delete Server Group
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Delete / Verify Absent a Server Group at CenturyLink Cloud
+ community.general.clc_group:
+ name: My Cool Server Group
+ parent: Default Group
+ state: absent
+ register: clc
+
+ - name: Debug
+ ansible.builtin.debug:
+ var: clc
+'''
+
+RETURN = '''
+group:
+ description: The group information
+ returned: success
+ type: dict
+ sample:
+ {
+ "changeInfo":{
+ "createdBy":"service.wfad",
+ "createdDate":"2015-07-29T18:52:47Z",
+ "modifiedBy":"service.wfad",
+ "modifiedDate":"2015-07-29T18:52:47Z"
+ },
+ "customFields":[
+
+ ],
+ "description":"test group",
+ "groups":[
+
+ ],
+ "id":"bb5f12a3c6044ae4ad0a03e73ae12cd1",
+ "links":[
+ {
+ "href":"/v2/groups/wfad",
+ "rel":"createGroup",
+ "verbs":[
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad",
+ "rel":"createServer",
+ "verbs":[
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1",
+ "rel":"self",
+ "verbs":[
+ "GET",
+ "PATCH",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
+ "id":"086ac1dfe0b6411989e8d1b77c4065f0",
+ "rel":"parentGroup"
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/defaults",
+ "rel":"defaults",
+ "verbs":[
+ "GET",
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/billing",
+ "rel":"billing"
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/archive",
+ "rel":"archiveGroupAction"
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/statistics",
+ "rel":"statistics"
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/upcomingScheduledActivities",
+ "rel":"upcomingScheduledActivities"
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/horizontalAutoscalePolicy",
+ "rel":"horizontalAutoscalePolicyMapping",
+ "verbs":[
+ "GET",
+ "PUT",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/scheduledActivities",
+ "rel":"scheduledActivities",
+ "verbs":[
+ "GET",
+ "POST"
+ ]
+ }
+ ],
+ "locationId":"UC1",
+ "name":"test group",
+ "status":"active",
+ "type":"default"
+ }
+'''
+
+__version__ = '${version}'
+
+import os
+import traceback
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcGroup(object):
+
+ clc = None
+ root_group = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.clc = clc_sdk
+ self.module = module
+ self.group_dict = {}
+
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Execute the main code path, and handle the request
+ :return: none
+ """
+ location = self.module.params.get('location')
+ group_name = self.module.params.get('name')
+ parent_name = self.module.params.get('parent')
+ group_description = self.module.params.get('description')
+ state = self.module.params.get('state')
+
+ self._set_clc_credentials_from_env()
+ self.group_dict = self._get_group_tree_for_datacenter(
+ datacenter=location)
+
+ if state == "absent":
+ changed, group, requests = self._ensure_group_is_absent(
+ group_name=group_name, parent_name=parent_name)
+ if requests:
+ self._wait_for_requests_to_complete(requests)
+ else:
+ changed, group = self._ensure_group_is_present(
+ group_name=group_name, parent_name=parent_name, group_description=group_description)
+ try:
+ group = group.data
+ except AttributeError:
+ group = group_name
+ self.module.exit_json(changed=changed, group=group)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ name=dict(required=True),
+ description=dict(default=None),
+ parent=dict(default=None),
+ location=dict(default=None),
+ state=dict(default='present', choices=['present', 'absent']),
+ wait=dict(type='bool', default=True))
+
+ return argument_spec
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ def _ensure_group_is_absent(self, group_name, parent_name):
+ """
+ Ensure that group_name is absent by deleting it if necessary
+ :param group_name: string - the name of the clc server group to delete
+ :param parent_name: string - the name of the parent group for group_name
+ :return: changed, group
+ """
+ changed = False
+ group = []
+ results = []
+
+ if self._group_exists(group_name=group_name, parent_name=parent_name):
+ if not self.module.check_mode:
+ group.append(group_name)
+ result = self._delete_group(group_name)
+ results.append(result)
+ changed = True
+ return changed, group, results
+
+ def _delete_group(self, group_name):
+ """
+ Delete the provided server group
+ :param group_name: string - the server group to delete
+ :return: none
+ """
+ response = None
+ group, parent = self.group_dict.get(group_name)
+ try:
+ response = group.Delete()
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to delete group :{0}. {1}'.format(
+ group_name, ex.response_text
+ ))
+ return response
+
+ def _ensure_group_is_present(
+ self,
+ group_name,
+ parent_name,
+ group_description):
+ """
+ Checks to see if a server group exists, creates it if it doesn't.
+ :param group_name: the name of the group to validate/create
+ :param parent_name: the name of the parent group for group_name
+ :param group_description: a short description of the server group (used when creating)
+ :return: (changed, group) -
+ changed: Boolean- whether a change was made,
+ group: A clc group object for the group
+ """
+ if not self.root_group:
+ raise AssertionError("Implementation Error: Root Group not set")
+ parent = parent_name if parent_name is not None else self.root_group.name
+ description = group_description
+ changed = False
+ group = group_name
+
+ parent_exists = self._group_exists(group_name=parent, parent_name=None)
+ child_exists = self._group_exists(
+ group_name=group_name,
+ parent_name=parent)
+
+ if parent_exists and child_exists:
+ group, parent = self.group_dict[group_name]
+ changed = False
+ elif parent_exists and not child_exists:
+ if not self.module.check_mode:
+ group = self._create_group(
+ group=group,
+ parent=parent,
+ description=description)
+ changed = True
+ else:
+ self.module.fail_json(
+ msg="parent group: " +
+ parent +
+ " does not exist")
+
+ return changed, group
+
+ def _create_group(self, group, parent, description):
+ """
+ Create the provided server group
+ :param group: clc_sdk.Group - the group to create
+ :param parent: clc_sdk.Parent - the parent group for {group}
+ :param description: string - a text description of the group
+ :return: clc_sdk.Group - the created group
+ """
+ response = None
+ (parent, grandparent) = self.group_dict[parent]
+ try:
+ response = parent.Create(name=group, description=description)
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to create group :{0}. {1}'.format(
+ group, ex.response_text))
+ return response
+
+ def _group_exists(self, group_name, parent_name):
+ """
+ Check to see if a group exists
+ :param group_name: string - the group to check
+ :param parent_name: string - the parent of group_name
+ :return: boolean - whether the group exists
+ """
+ result = False
+ if group_name in self.group_dict:
+ (group, parent) = self.group_dict[group_name]
+ if parent_name is None or parent_name == parent.name:
+ result = True
+ return result
+
+ def _get_group_tree_for_datacenter(self, datacenter=None):
+ """
+ Walk the tree of groups for a datacenter
+ :param datacenter: string - the datacenter to walk (ex: 'UC1')
+ :return: a dictionary of groups and parents
+ """
+ self.root_group = self.clc.v2.Datacenter(
+ location=datacenter).RootGroup()
+ return self._walk_groups_recursive(
+ parent_group=None,
+ child_group=self.root_group)
+
+ def _walk_groups_recursive(self, parent_group, child_group):
+ """
+ Walk a parent-child tree of groups, starting with the provided child group
+ :param parent_group: clc_sdk.Group - the parent group to start the walk
+ :param child_group: clc_sdk.Group - the child group to start the walk
+ :return: a dictionary of groups and parents
+ """
+ result = {str(child_group): (child_group, parent_group)}
+ groups = child_group.Subgroups().groups
+ if len(groups) > 0:
+ for group in groups:
+ if group.type != 'default':
+ continue
+
+ result.update(self._walk_groups_recursive(child_group, group))
+ return result
+
+ def _wait_for_requests_to_complete(self, requests_lst):
+ """
+ Waits until the CLC requests are complete if the wait argument is True
+ :param requests_lst: The list of CLC request objects
+ :return: none
+ """
+ if not self.module.params['wait']:
+ return
+ for request in requests_lst:
+ request.WaitUntilComplete()
+ for request_details in request.requests:
+ if request_details.Status() != 'succeeded':
+ self.module.fail_json(
+ msg='Unable to process group request')
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ module = AnsibleModule(
+ argument_spec=ClcGroup._define_module_argument_spec(),
+ supports_check_mode=True)
+
+ clc_group = ClcGroup(module)
+ clc_group.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_loadbalancer.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_loadbalancer.py
new file mode 100644
index 00000000..2a8d2e9b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_loadbalancer.py
@@ -0,0 +1,935 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 CenturyLink
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_loadbalancer
+short_description: Create, Delete shared loadbalancers in CenturyLink Cloud.
+description:
+ - An Ansible module to Create, Delete shared loadbalancers in CenturyLink Cloud.
+options:
+ name:
+ description:
+ - The name of the loadbalancer
+ type: str
+ required: True
+ description:
+ description:
+ - A description for the loadbalancer
+ type: str
+ alias:
+ description:
+ - The alias of your CLC Account
+ type: str
+ required: True
+ location:
+ description:
+ - The location of the datacenter where the load balancer resides in
+ type: str
+ required: True
+ method:
+ description:
+ -The balancing method for the load balancer pool
+ type: str
+ choices: ['leastConnection', 'roundRobin']
+ persistence:
+ description:
+ - The persistence method for the load balancer
+ type: str
+ choices: ['standard', 'sticky']
+ port:
+ description:
+ - Port to configure on the public-facing side of the load balancer pool
+ type: str
+ choices: [80, 443]
+ nodes:
+ description:
+ - A list of nodes that needs to be added to the load balancer pool
+ type: list
+ default: []
+ status:
+ description:
+ - The status of the loadbalancer
+ type: str
+ default: enabled
+ choices: ['enabled', 'disabled']
+ state:
+ description:
+ - Whether to create or delete the load balancer pool
+ type: str
+ default: present
+ choices: ['present', 'absent', 'port_absent', 'nodes_present', 'nodes_absent']
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+- name: Create Loadbalancer
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Actually Create things
+ community.general.clc_loadbalancer:
+ name: test
+ description: test
+ alias: TEST
+ location: WA1
+ port: 443
+ nodes:
+ - ipAddress: 10.11.22.123
+ privatePort: 80
+ state: present
+
+- name: Add node to an existing loadbalancer pool
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Actually Create things
+ community.general.clc_loadbalancer:
+ name: test
+ description: test
+ alias: TEST
+ location: WA1
+ port: 443
+ nodes:
+ - ipAddress: 10.11.22.234
+ privatePort: 80
+ state: nodes_present
+
+- name: Remove node from an existing loadbalancer pool
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Actually Create things
+ community.general.clc_loadbalancer:
+ name: test
+ description: test
+ alias: TEST
+ location: WA1
+ port: 443
+ nodes:
+ - ipAddress: 10.11.22.234
+ privatePort: 80
+ state: nodes_absent
+
+- name: Delete LoadbalancerPool
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Actually Delete things
+ community.general.clc_loadbalancer:
+ name: test
+ description: test
+ alias: TEST
+ location: WA1
+ port: 443
+ nodes:
+ - ipAddress: 10.11.22.123
+ privatePort: 80
+ state: port_absent
+
+- name: Delete Loadbalancer
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Actually Delete things
+ community.general.clc_loadbalancer:
+ name: test
+ description: test
+ alias: TEST
+ location: WA1
+ port: 443
+ nodes:
+ - ipAddress: 10.11.22.123
+ privatePort: 80
+ state: absent
+'''
+
+RETURN = '''
+loadbalancer:
+ description: The load balancer result object from CLC
+ returned: success
+ type: dict
+ sample:
+ {
+ "description":"test-lb",
+ "id":"ab5b18cb81e94ab9925b61d1ca043fb5",
+ "ipAddress":"66.150.174.197",
+ "links":[
+ {
+ "href":"/v2/sharedLoadBalancers/wfad/wa1/ab5b18cb81e94ab9925b61d1ca043fb5",
+ "rel":"self",
+ "verbs":[
+ "GET",
+ "PUT",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/sharedLoadBalancers/wfad/wa1/ab5b18cb81e94ab9925b61d1ca043fb5/pools",
+ "rel":"pools",
+ "verbs":[
+ "GET",
+ "POST"
+ ]
+ }
+ ],
+ "name":"test-lb",
+ "pools":[
+
+ ],
+ "status":"enabled"
+ }
+'''
+
+__version__ = '${version}'
+
+import json
+import os
+import traceback
+from time import sleep
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import APIFailedResponse
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcLoadBalancer:
+
+ clc = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.clc = clc_sdk
+ self.module = module
+ self.lb_dict = {}
+
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(
+ requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Execute the main code path, and handle the request
+ :return: none
+ """
+ changed = False
+ result_lb = None
+ loadbalancer_name = self.module.params.get('name')
+ loadbalancer_alias = self.module.params.get('alias')
+ loadbalancer_location = self.module.params.get('location')
+ loadbalancer_description = self.module.params.get('description')
+ loadbalancer_port = self.module.params.get('port')
+ loadbalancer_method = self.module.params.get('method')
+ loadbalancer_persistence = self.module.params.get('persistence')
+ loadbalancer_nodes = self.module.params.get('nodes')
+ loadbalancer_status = self.module.params.get('status')
+ state = self.module.params.get('state')
+
+ if loadbalancer_description is None:
+ loadbalancer_description = loadbalancer_name
+
+ self._set_clc_credentials_from_env()
+
+ self.lb_dict = self._get_loadbalancer_list(
+ alias=loadbalancer_alias,
+ location=loadbalancer_location)
+
+ if state == 'present':
+ changed, result_lb, lb_id = self.ensure_loadbalancer_present(
+ name=loadbalancer_name,
+ alias=loadbalancer_alias,
+ location=loadbalancer_location,
+ description=loadbalancer_description,
+ status=loadbalancer_status)
+ if loadbalancer_port:
+ changed, result_pool, pool_id = self.ensure_loadbalancerpool_present(
+ lb_id=lb_id,
+ alias=loadbalancer_alias,
+ location=loadbalancer_location,
+ method=loadbalancer_method,
+ persistence=loadbalancer_persistence,
+ port=loadbalancer_port)
+
+ if loadbalancer_nodes:
+ changed, result_nodes = self.ensure_lbpool_nodes_set(
+ alias=loadbalancer_alias,
+ location=loadbalancer_location,
+ name=loadbalancer_name,
+ port=loadbalancer_port,
+ nodes=loadbalancer_nodes)
+ elif state == 'absent':
+ changed, result_lb = self.ensure_loadbalancer_absent(
+ name=loadbalancer_name,
+ alias=loadbalancer_alias,
+ location=loadbalancer_location)
+
+ elif state == 'port_absent':
+ changed, result_lb = self.ensure_loadbalancerpool_absent(
+ alias=loadbalancer_alias,
+ location=loadbalancer_location,
+ name=loadbalancer_name,
+ port=loadbalancer_port)
+
+ elif state == 'nodes_present':
+ changed, result_lb = self.ensure_lbpool_nodes_present(
+ alias=loadbalancer_alias,
+ location=loadbalancer_location,
+ name=loadbalancer_name,
+ port=loadbalancer_port,
+ nodes=loadbalancer_nodes)
+
+ elif state == 'nodes_absent':
+ changed, result_lb = self.ensure_lbpool_nodes_absent(
+ alias=loadbalancer_alias,
+ location=loadbalancer_location,
+ name=loadbalancer_name,
+ port=loadbalancer_port,
+ nodes=loadbalancer_nodes)
+
+ self.module.exit_json(changed=changed, loadbalancer=result_lb)
+
+ def ensure_loadbalancer_present(
+ self, name, alias, location, description, status):
+ """
+ Checks to see if a load balancer exists and creates one if it does not.
+ :param name: Name of loadbalancer
+ :param alias: Alias of account
+ :param location: Datacenter
+ :param description: Description of loadbalancer
+ :param status: Enabled / Disabled
+ :return: (changed, result, lb_id)
+ changed: Boolean whether a change was made
+ result: The result object from the CLC load balancer request
+ lb_id: The load balancer id
+ """
+ changed = False
+ result = name
+ lb_id = self._loadbalancer_exists(name=name)
+ if not lb_id:
+ if not self.module.check_mode:
+ result = self.create_loadbalancer(name=name,
+ alias=alias,
+ location=location,
+ description=description,
+ status=status)
+ lb_id = result.get('id')
+ changed = True
+
+ return changed, result, lb_id
+
+ def ensure_loadbalancerpool_present(
+ self, lb_id, alias, location, method, persistence, port):
+ """
+ Checks to see if a load balancer pool exists and creates one if it does not.
+ :param lb_id: The loadbalancer id
+ :param alias: The account alias
+ :param location: the datacenter the load balancer resides in
+ :param method: the load balancing method
+ :param persistence: the load balancing persistence type
+ :param port: the port that the load balancer will listen on
+ :return: (changed, group, pool_id) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ pool_id: The string id of the load balancer pool
+ """
+ changed = False
+ result = port
+ if not lb_id:
+ return changed, None, None
+ pool_id = self._loadbalancerpool_exists(
+ alias=alias,
+ location=location,
+ port=port,
+ lb_id=lb_id)
+ if not pool_id:
+ if not self.module.check_mode:
+ result = self.create_loadbalancerpool(
+ alias=alias,
+ location=location,
+ lb_id=lb_id,
+ method=method,
+ persistence=persistence,
+ port=port)
+ pool_id = result.get('id')
+ changed = True
+
+ return changed, result, pool_id
+
+ def ensure_loadbalancer_absent(self, name, alias, location):
+ """
+ Checks to see if a load balancer exists and deletes it if it does
+ :param name: Name of the load balancer
+ :param alias: Alias of account
+ :param location: Datacenter
+ :return: (changed, result)
+ changed: Boolean whether a change was made
+ result: The result from the CLC API Call
+ """
+ changed = False
+ result = name
+ lb_exists = self._loadbalancer_exists(name=name)
+ if lb_exists:
+ if not self.module.check_mode:
+ result = self.delete_loadbalancer(alias=alias,
+ location=location,
+ name=name)
+ changed = True
+ return changed, result
+
+ def ensure_loadbalancerpool_absent(self, alias, location, name, port):
+ """
+ Checks to see if a load balancer pool exists and deletes it if it does
+ :param alias: The account alias
+ :param location: the datacenter the load balancer resides in
+ :param name: the name of the load balancer
+ :param port: the port that the load balancer listens on
+ :return: (changed, result) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ result = None
+ lb_exists = self._loadbalancer_exists(name=name)
+ if lb_exists:
+ lb_id = self._get_loadbalancer_id(name=name)
+ pool_id = self._loadbalancerpool_exists(
+ alias=alias,
+ location=location,
+ port=port,
+ lb_id=lb_id)
+ if pool_id:
+ changed = True
+ if not self.module.check_mode:
+ result = self.delete_loadbalancerpool(
+ alias=alias,
+ location=location,
+ lb_id=lb_id,
+ pool_id=pool_id)
+ else:
+ result = "Pool doesn't exist"
+ else:
+ result = "LB Doesn't Exist"
+ return changed, result
+
+ def ensure_lbpool_nodes_set(self, alias, location, name, port, nodes):
+ """
+ Checks to see if the provided list of nodes exist for the pool
+ and set the nodes if any in the list those doesn't exist
+ :param alias: The account alias
+ :param location: the datacenter the load balancer resides in
+ :param name: the name of the load balancer
+ :param port: the port that the load balancer will listen on
+ :param nodes: The list of nodes to be updated to the pool
+ :return: (changed, result) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ result = {}
+ changed = False
+ lb_exists = self._loadbalancer_exists(name=name)
+ if lb_exists:
+ lb_id = self._get_loadbalancer_id(name=name)
+ pool_id = self._loadbalancerpool_exists(
+ alias=alias,
+ location=location,
+ port=port,
+ lb_id=lb_id)
+ if pool_id:
+ nodes_exist = self._loadbalancerpool_nodes_exists(alias=alias,
+ location=location,
+ lb_id=lb_id,
+ pool_id=pool_id,
+ nodes_to_check=nodes)
+ if not nodes_exist:
+ changed = True
+ result = self.set_loadbalancernodes(alias=alias,
+ location=location,
+ lb_id=lb_id,
+ pool_id=pool_id,
+ nodes=nodes)
+ else:
+ result = "Pool doesn't exist"
+ else:
+ result = "Load balancer doesn't Exist"
+ return changed, result
+
+ def ensure_lbpool_nodes_present(self, alias, location, name, port, nodes):
+ """
+ Checks to see if the provided list of nodes exist for the pool and add the missing nodes to the pool
+ :param alias: The account alias
+ :param location: the datacenter the load balancer resides in
+ :param name: the name of the load balancer
+ :param port: the port that the load balancer will listen on
+ :param nodes: the list of nodes to be added
+ :return: (changed, result) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ lb_exists = self._loadbalancer_exists(name=name)
+ if lb_exists:
+ lb_id = self._get_loadbalancer_id(name=name)
+ pool_id = self._loadbalancerpool_exists(
+ alias=alias,
+ location=location,
+ port=port,
+ lb_id=lb_id)
+ if pool_id:
+ changed, result = self.add_lbpool_nodes(alias=alias,
+ location=location,
+ lb_id=lb_id,
+ pool_id=pool_id,
+ nodes_to_add=nodes)
+ else:
+ result = "Pool doesn't exist"
+ else:
+ result = "Load balancer doesn't Exist"
+ return changed, result
+
+ def ensure_lbpool_nodes_absent(self, alias, location, name, port, nodes):
+ """
+ Checks to see if the provided list of nodes exist for the pool and removes them if found any
+ :param alias: The account alias
+ :param location: the datacenter the load balancer resides in
+ :param name: the name of the load balancer
+ :param port: the port that the load balancer will listen on
+ :param nodes: the list of nodes to be removed
+ :return: (changed, result) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ lb_exists = self._loadbalancer_exists(name=name)
+ if lb_exists:
+ lb_id = self._get_loadbalancer_id(name=name)
+ pool_id = self._loadbalancerpool_exists(
+ alias=alias,
+ location=location,
+ port=port,
+ lb_id=lb_id)
+ if pool_id:
+ changed, result = self.remove_lbpool_nodes(alias=alias,
+ location=location,
+ lb_id=lb_id,
+ pool_id=pool_id,
+ nodes_to_remove=nodes)
+ else:
+ result = "Pool doesn't exist"
+ else:
+ result = "Load balancer doesn't Exist"
+ return changed, result
+
+ def create_loadbalancer(self, name, alias, location, description, status):
+ """
+ Create a loadbalancer w/ params
+ :param name: Name of loadbalancer
+ :param alias: Alias of account
+ :param location: Datacenter
+ :param description: Description for loadbalancer to be created
+ :param status: Enabled / Disabled
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ try:
+ result = self.clc.v2.API.Call('POST',
+ '/v2/sharedLoadBalancers/%s/%s' % (alias,
+ location),
+ json.dumps({"name": name,
+ "description": description,
+ "status": status}))
+ sleep(1)
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to create load balancer "{0}". {1}'.format(
+ name, str(e.response_text)))
+ return result
+
+ def create_loadbalancerpool(
+ self, alias, location, lb_id, method, persistence, port):
+ """
+ Creates a pool on the provided load balancer
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the load balancer
+ :param method: the load balancing method
+ :param persistence: the load balancing persistence type
+ :param port: the port that the load balancer will listen on
+ :return: result: The result from the create API call
+ """
+ result = None
+ try:
+ result = self.clc.v2.API.Call(
+ 'POST', '/v2/sharedLoadBalancers/%s/%s/%s/pools' %
+ (alias, location, lb_id), json.dumps(
+ {
+ "port": port, "method": method, "persistence": persistence
+ }))
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to create pool for load balancer id "{0}". {1}'.format(
+ lb_id, str(e.response_text)))
+ return result
+
+ def delete_loadbalancer(self, alias, location, name):
+ """
+ Delete CLC loadbalancer
+ :param alias: Alias for account
+ :param location: Datacenter
+ :param name: Name of the loadbalancer to delete
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ lb_id = self._get_loadbalancer_id(name=name)
+ try:
+ result = self.clc.v2.API.Call(
+ 'DELETE', '/v2/sharedLoadBalancers/%s/%s/%s' %
+ (alias, location, lb_id))
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to delete load balancer "{0}". {1}'.format(
+ name, str(e.response_text)))
+ return result
+
+ def delete_loadbalancerpool(self, alias, location, lb_id, pool_id):
+ """
+ Delete the pool on the provided load balancer
+ :param alias: The account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the load balancer
+ :param pool_id: the id string of the load balancer pool
+ :return: result: The result from the delete API call
+ """
+ result = None
+ try:
+ result = self.clc.v2.API.Call(
+ 'DELETE', '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s' %
+ (alias, location, lb_id, pool_id))
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to delete pool for load balancer id "{0}". {1}'.format(
+ lb_id, str(e.response_text)))
+ return result
+
+ def _get_loadbalancer_id(self, name):
+ """
+ Retrieves unique ID of loadbalancer
+ :param name: Name of loadbalancer
+ :return: Unique ID of the loadbalancer
+ """
+ id = None
+ for lb in self.lb_dict:
+ if lb.get('name') == name:
+ id = lb.get('id')
+ return id
+
+ def _get_loadbalancer_list(self, alias, location):
+ """
+ Retrieve a list of loadbalancers
+ :param alias: Alias for account
+ :param location: Datacenter
+ :return: JSON data for all loadbalancers at datacenter
+ """
+ result = None
+ try:
+ result = self.clc.v2.API.Call(
+ 'GET', '/v2/sharedLoadBalancers/%s/%s' % (alias, location))
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to fetch load balancers for account: {0}. {1}'.format(
+ alias, str(e.response_text)))
+ return result
+
+ def _loadbalancer_exists(self, name):
+ """
+ Verify a loadbalancer exists
+ :param name: Name of loadbalancer
+ :return: False or the ID of the existing loadbalancer
+ """
+ result = False
+
+ for lb in self.lb_dict:
+ if lb.get('name') == name:
+ result = lb.get('id')
+ return result
+
+ def _loadbalancerpool_exists(self, alias, location, port, lb_id):
+ """
+ Checks to see if a pool exists on the specified port on the provided load balancer
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param port: the port to check and see if it exists
+ :param lb_id: the id string of the provided load balancer
+ :return: result: The id string of the pool or False
+ """
+ result = False
+ try:
+ pool_list = self.clc.v2.API.Call(
+ 'GET', '/v2/sharedLoadBalancers/%s/%s/%s/pools' %
+ (alias, location, lb_id))
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg='Unable to fetch the load balancer pools for for load balancer id: {0}. {1}'.format(
+ lb_id, str(e.response_text)))
+ for pool in pool_list:
+ if int(pool.get('port')) == int(port):
+ result = pool.get('id')
+ return result
+
+ def _loadbalancerpool_nodes_exists(
+ self, alias, location, lb_id, pool_id, nodes_to_check):
+ """
+ Checks to see if a set of nodes exists on the specified port on the provided load balancer
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the provided load balancer
+ :param pool_id: the id string of the load balancer pool
+ :param nodes_to_check: the list of nodes to check for
+ :return: result: True / False indicating if the given nodes exist
+ """
+ result = False
+ nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id)
+ for node in nodes_to_check:
+ if not node.get('status'):
+ node['status'] = 'enabled'
+ if node in nodes:
+ result = True
+ else:
+ result = False
+ return result
+
+ def set_loadbalancernodes(self, alias, location, lb_id, pool_id, nodes):
+ """
+ Updates nodes to the provided pool
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the load balancer
+ :param pool_id: the id string of the pool
+ :param nodes: a list of dictionaries containing the nodes to set
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ if not lb_id:
+ return result
+ if not self.module.check_mode:
+ try:
+ result = self.clc.v2.API.Call('PUT',
+ '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes'
+ % (alias, location, lb_id, pool_id), json.dumps(nodes))
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to set nodes for the load balancer pool id "{0}". {1}'.format(
+ pool_id, str(e.response_text)))
+ return result
+
+ def add_lbpool_nodes(self, alias, location, lb_id, pool_id, nodes_to_add):
+ """
+ Add nodes to the provided pool
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the load balancer
+ :param pool_id: the id string of the pool
+ :param nodes_to_add: a list of dictionaries containing the nodes to add
+ :return: (changed, result) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ result = {}
+ nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id)
+ for node in nodes_to_add:
+ if not node.get('status'):
+ node['status'] = 'enabled'
+ if node not in nodes:
+ changed = True
+ nodes.append(node)
+ if changed is True and not self.module.check_mode:
+ result = self.set_loadbalancernodes(
+ alias,
+ location,
+ lb_id,
+ pool_id,
+ nodes)
+ return changed, result
+
+ def remove_lbpool_nodes(
+ self, alias, location, lb_id, pool_id, nodes_to_remove):
+ """
+ Removes nodes from the provided pool
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the load balancer
+ :param pool_id: the id string of the pool
+ :param nodes_to_remove: a list of dictionaries containing the nodes to remove
+ :return: (changed, result) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ result = {}
+ nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id)
+ for node in nodes_to_remove:
+ if not node.get('status'):
+ node['status'] = 'enabled'
+ if node in nodes:
+ changed = True
+ nodes.remove(node)
+ if changed is True and not self.module.check_mode:
+ result = self.set_loadbalancernodes(
+ alias,
+ location,
+ lb_id,
+ pool_id,
+ nodes)
+ return changed, result
+
+ def _get_lbpool_nodes(self, alias, location, lb_id, pool_id):
+ """
+ Return the list of nodes available to the provided load balancer pool
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the load balancer
+ :param pool_id: the id string of the pool
+ :return: result: The list of nodes
+ """
+ result = None
+ try:
+ result = self.clc.v2.API.Call('GET',
+ '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes'
+ % (alias, location, lb_id, pool_id))
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to fetch list of available nodes for load balancer pool id: {0}. {1}'.format(
+ pool_id, str(e.response_text)))
+ return result
+
+ @staticmethod
+ def define_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ name=dict(required=True),
+ description=dict(default=None),
+ location=dict(required=True),
+ alias=dict(required=True),
+ port=dict(choices=[80, 443]),
+ method=dict(choices=['leastConnection', 'roundRobin']),
+ persistence=dict(choices=['standard', 'sticky']),
+ nodes=dict(type='list', default=[]),
+ status=dict(default='enabled', choices=['enabled', 'disabled']),
+ state=dict(
+ default='present',
+ choices=[
+ 'present',
+ 'absent',
+ 'port_absent',
+ 'nodes_present',
+ 'nodes_absent'])
+ )
+ return argument_spec
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ module = AnsibleModule(argument_spec=ClcLoadBalancer.define_argument_spec(),
+ supports_check_mode=True)
+ clc_loadbalancer = ClcLoadBalancer(module)
+ clc_loadbalancer.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_modify_server.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_modify_server.py
new file mode 100644
index 00000000..3c1b08cd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_modify_server.py
@@ -0,0 +1,965 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 CenturyLink
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_modify_server
+short_description: modify servers in CenturyLink Cloud.
+description:
+ - An Ansible module to modify servers in CenturyLink Cloud.
+options:
+ server_ids:
+ description:
+ - A list of server Ids to modify.
+ type: list
+ required: True
+ cpu:
+ description:
+ - How many CPUs to update on the server
+ type: str
+ memory:
+ description:
+ - Memory (in GB) to set to the server.
+ type: str
+ anti_affinity_policy_id:
+ description:
+ - The anti affinity policy id to be set for a hyper scale server.
+ This is mutually exclusive with 'anti_affinity_policy_name'
+ type: str
+ anti_affinity_policy_name:
+ description:
+ - The anti affinity policy name to be set for a hyper scale server.
+ This is mutually exclusive with 'anti_affinity_policy_id'
+ type: str
+ alert_policy_id:
+ description:
+ - The alert policy id to be associated to the server.
+ This is mutually exclusive with 'alert_policy_name'
+ type: str
+ alert_policy_name:
+ description:
+ - The alert policy name to be associated to the server.
+ This is mutually exclusive with 'alert_policy_id'
+ type: str
+ state:
+ description:
+ - The state to insure that the provided resources are in.
+ type: str
+ default: 'present'
+ choices: ['present', 'absent']
+ wait:
+ description:
+ - Whether to wait for the provisioning tasks to finish before returning.
+ type: bool
+ default: 'yes'
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+- name: Set the cpu count to 4 on a server
+ community.general.clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ cpu: 4
+ state: present
+
+- name: Set the memory to 8GB on a server
+ community.general.clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ memory: 8
+ state: present
+
+- name: Set the anti affinity policy on a server
+ community.general.clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ anti_affinity_policy_name: 'aa_policy'
+ state: present
+
+- name: Remove the anti affinity policy on a server
+ community.general.clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ anti_affinity_policy_name: 'aa_policy'
+ state: absent
+
+- name: Add the alert policy on a server
+ community.general.clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ alert_policy_name: 'alert_policy'
+ state: present
+
+- name: Remove the alert policy on a server
+ community.general.clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ alert_policy_name: 'alert_policy'
+ state: absent
+
+- name: Ret the memory to 16GB and cpu to 8 core on a lust if servers
+ community.general.clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ cpu: 8
+ memory: 16
+ state: present
+'''
+
+RETURN = '''
+server_ids:
+ description: The list of server ids that are changed
+ returned: success
+ type: list
+ sample:
+ [
+ "UC1TEST-SVR01",
+ "UC1TEST-SVR02"
+ ]
+servers:
+ description: The list of server objects that are changed
+ returned: success
+ type: list
+ sample:
+ [
+ {
+ "changeInfo":{
+ "createdBy":"service.wfad",
+ "createdDate":1438196820,
+ "modifiedBy":"service.wfad",
+ "modifiedDate":1438196820
+ },
+ "description":"test-server",
+ "details":{
+ "alertPolicies":[
+
+ ],
+ "cpu":1,
+ "customFields":[
+
+ ],
+ "diskCount":3,
+ "disks":[
+ {
+ "id":"0:0",
+ "partitionPaths":[
+
+ ],
+ "sizeGB":1
+ },
+ {
+ "id":"0:1",
+ "partitionPaths":[
+
+ ],
+ "sizeGB":2
+ },
+ {
+ "id":"0:2",
+ "partitionPaths":[
+
+ ],
+ "sizeGB":14
+ }
+ ],
+ "hostName":"",
+ "inMaintenanceMode":false,
+ "ipAddresses":[
+ {
+ "internal":"10.1.1.1"
+ }
+ ],
+ "memoryGB":1,
+ "memoryMB":1024,
+ "partitions":[
+
+ ],
+ "powerState":"started",
+ "snapshots":[
+
+ ],
+ "storageGB":17
+ },
+ "groupId":"086ac1dfe0b6411989e8d1b77c4065f0",
+ "id":"test-server",
+ "ipaddress":"10.120.45.23",
+ "isTemplate":false,
+ "links":[
+ {
+ "href":"/v2/servers/wfad/test-server",
+ "id":"test-server",
+ "rel":"self",
+ "verbs":[
+ "GET",
+ "PATCH",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
+ "id":"086ac1dfe0b6411989e8d1b77c4065f0",
+ "rel":"group"
+ },
+ {
+ "href":"/v2/accounts/wfad",
+ "id":"wfad",
+ "rel":"account"
+ },
+ {
+ "href":"/v2/billing/wfad/serverPricing/test-server",
+ "rel":"billing"
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/publicIPAddresses",
+ "rel":"publicIPAddresses",
+ "verbs":[
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/credentials",
+ "rel":"credentials"
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/statistics",
+ "rel":"statistics"
+ },
+ {
+ "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/upcomingScheduledActivities",
+ "rel":"upcomingScheduledActivities"
+ },
+ {
+ "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/scheduledActivities",
+ "rel":"scheduledActivities",
+ "verbs":[
+ "GET",
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/capabilities",
+ "rel":"capabilities"
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/alertPolicies",
+ "rel":"alertPolicyMappings",
+ "verbs":[
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/antiAffinityPolicy",
+ "rel":"antiAffinityPolicyMapping",
+ "verbs":[
+ "PUT",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/cpuAutoscalePolicy",
+ "rel":"cpuAutoscalePolicyMapping",
+ "verbs":[
+ "PUT",
+ "DELETE"
+ ]
+ }
+ ],
+ "locationId":"UC1",
+ "name":"test-server",
+ "os":"ubuntu14_64Bit",
+ "osType":"Ubuntu 14 64-bit",
+ "status":"active",
+ "storageType":"standard",
+ "type":"standard"
+ }
+ ]
+'''
+
+__version__ = '${version}'
+
+import json
+import os
+import traceback
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+ from clc import APIFailedResponse
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcModifyServer:
+ clc = clc_sdk
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.clc = clc_sdk
+ self.module = module
+
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(
+ requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ self._set_clc_credentials_from_env()
+
+ p = self.module.params
+ cpu = p.get('cpu')
+ memory = p.get('memory')
+ state = p.get('state')
+ if state == 'absent' and (cpu or memory):
+ return self.module.fail_json(
+ msg='\'absent\' state is not supported for \'cpu\' and \'memory\' arguments')
+
+ server_ids = p['server_ids']
+ if not isinstance(server_ids, list):
+ return self.module.fail_json(
+ msg='server_ids needs to be a list of instances to modify: %s' %
+ server_ids)
+
+ (changed, server_dict_array, changed_server_ids) = self._modify_servers(
+ server_ids=server_ids)
+
+ self.module.exit_json(
+ changed=changed,
+ server_ids=changed_server_ids,
+ servers=server_dict_array)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ server_ids=dict(type='list', required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ cpu=dict(),
+ memory=dict(),
+ anti_affinity_policy_id=dict(),
+ anti_affinity_policy_name=dict(),
+ alert_policy_id=dict(),
+ alert_policy_name=dict(),
+ wait=dict(type='bool', default=True)
+ )
+ mutually_exclusive = [
+ ['anti_affinity_policy_id', 'anti_affinity_policy_name'],
+ ['alert_policy_id', 'alert_policy_name']
+ ]
+ return {"argument_spec": argument_spec,
+ "mutually_exclusive": mutually_exclusive}
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ def _get_servers_from_clc(self, server_list, message):
+ """
+ Internal function to fetch list of CLC server objects from a list of server ids
+ :param server_list: The list of server ids
+ :param message: the error message to throw in case of any error
+ :return the list of CLC server objects
+ """
+ try:
+ return self.clc.v2.Servers(server_list).servers
+ except CLCException as ex:
+ return self.module.fail_json(msg=message + ': %s' % ex.message)
+
+ def _modify_servers(self, server_ids):
+ """
+ modify the servers configuration on the provided list
+ :param server_ids: list of servers to modify
+ :return: a list of dictionaries with server information about the servers that were modified
+ """
+ p = self.module.params
+ state = p.get('state')
+ server_params = {
+ 'cpu': p.get('cpu'),
+ 'memory': p.get('memory'),
+ 'anti_affinity_policy_id': p.get('anti_affinity_policy_id'),
+ 'anti_affinity_policy_name': p.get('anti_affinity_policy_name'),
+ 'alert_policy_id': p.get('alert_policy_id'),
+ 'alert_policy_name': p.get('alert_policy_name'),
+ }
+ changed = False
+ server_changed = False
+ aa_changed = False
+ ap_changed = False
+ server_dict_array = []
+ result_server_ids = []
+ request_list = []
+ changed_servers = []
+
+ if not isinstance(server_ids, list) or len(server_ids) < 1:
+ return self.module.fail_json(
+ msg='server_ids should be a list of servers, aborting')
+
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to obtain server list from the CLC API')
+ for server in servers:
+ if state == 'present':
+ server_changed, server_result = self._ensure_server_config(
+ server, server_params)
+ if server_result:
+ request_list.append(server_result)
+ aa_changed = self._ensure_aa_policy_present(
+ server,
+ server_params)
+ ap_changed = self._ensure_alert_policy_present(
+ server,
+ server_params)
+ elif state == 'absent':
+ aa_changed = self._ensure_aa_policy_absent(
+ server,
+ server_params)
+ ap_changed = self._ensure_alert_policy_absent(
+ server,
+ server_params)
+ if server_changed or aa_changed or ap_changed:
+ changed_servers.append(server)
+ changed = True
+
+ self._wait_for_requests(self.module, request_list)
+ self._refresh_servers(self.module, changed_servers)
+
+ for server in changed_servers:
+ server_dict_array.append(server.data)
+ result_server_ids.append(server.id)
+
+ return changed, server_dict_array, result_server_ids
+
+ def _ensure_server_config(
+ self, server, server_params):
+ """
+ ensures the server is updated with the provided cpu and memory
+ :param server: the CLC server object
+ :param server_params: the dictionary of server parameters
+ :return: (changed, group) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ cpu = server_params.get('cpu')
+ memory = server_params.get('memory')
+ changed = False
+ result = None
+
+ if not cpu:
+ cpu = server.cpu
+ if not memory:
+ memory = server.memory
+ if memory != server.memory or cpu != server.cpu:
+ if not self.module.check_mode:
+ result = self._modify_clc_server(
+ self.clc,
+ self.module,
+ server.id,
+ cpu,
+ memory)
+ changed = True
+ return changed, result
+
+ @staticmethod
+ def _modify_clc_server(clc, module, server_id, cpu, memory):
+ """
+ Modify the memory or CPU of a clc server.
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param server_id: id of the server to modify
+ :param cpu: the new cpu value
+ :param memory: the new memory value
+ :return: the result of CLC API call
+ """
+ result = None
+ acct_alias = clc.v2.Account.GetAlias()
+ try:
+ # Update the server configuration
+ job_obj = clc.v2.API.Call('PATCH',
+ 'servers/%s/%s' % (acct_alias,
+ server_id),
+ json.dumps([{"op": "set",
+ "member": "memory",
+ "value": memory},
+ {"op": "set",
+ "member": "cpu",
+ "value": cpu}]))
+ result = clc.v2.Requests(job_obj)
+ except APIFailedResponse as ex:
+ module.fail_json(
+ msg='Unable to update the server configuration for server : "{0}". {1}'.format(
+ server_id, str(ex.response_text)))
+ return result
+
+ @staticmethod
+ def _wait_for_requests(module, request_list):
+ """
+ Block until server provisioning requests are completed.
+ :param module: the AnsibleModule object
+ :param request_list: a list of clc-sdk.Request instances
+ :return: none
+ """
+ wait = module.params.get('wait')
+ if wait:
+ # Requests.WaitUntilComplete() returns the count of failed requests
+ failed_requests_count = sum(
+ [request.WaitUntilComplete() for request in request_list])
+
+ if failed_requests_count > 0:
+ module.fail_json(
+ msg='Unable to process modify server request')
+
+ @staticmethod
+ def _refresh_servers(module, servers):
+ """
+ Loop through a list of servers and refresh them.
+ :param module: the AnsibleModule object
+ :param servers: list of clc-sdk.Server instances to refresh
+ :return: none
+ """
+ for server in servers:
+ try:
+ server.Refresh()
+ except CLCException as ex:
+ module.fail_json(msg='Unable to refresh the server {0}. {1}'.format(
+ server.id, ex.message
+ ))
+
+ def _ensure_aa_policy_present(
+ self, server, server_params):
+ """
+ ensures the server is updated with the provided anti affinity policy
+ :param server: the CLC server object
+ :param server_params: the dictionary of server parameters
+ :return: (changed, group) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ acct_alias = self.clc.v2.Account.GetAlias()
+
+ aa_policy_id = server_params.get('anti_affinity_policy_id')
+ aa_policy_name = server_params.get('anti_affinity_policy_name')
+ if not aa_policy_id and aa_policy_name:
+ aa_policy_id = self._get_aa_policy_id_by_name(
+ self.clc,
+ self.module,
+ acct_alias,
+ aa_policy_name)
+ current_aa_policy_id = self._get_aa_policy_id_of_server(
+ self.clc,
+ self.module,
+ acct_alias,
+ server.id)
+
+ if aa_policy_id and aa_policy_id != current_aa_policy_id:
+ self._modify_aa_policy(
+ self.clc,
+ self.module,
+ acct_alias,
+ server.id,
+ aa_policy_id)
+ changed = True
+ return changed
+
+ def _ensure_aa_policy_absent(
+ self, server, server_params):
+ """
+ ensures the provided anti affinity policy is removed from the server
+ :param server: the CLC server object
+ :param server_params: the dictionary of server parameters
+ :return: (changed, group) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ acct_alias = self.clc.v2.Account.GetAlias()
+ aa_policy_id = server_params.get('anti_affinity_policy_id')
+ aa_policy_name = server_params.get('anti_affinity_policy_name')
+ if not aa_policy_id and aa_policy_name:
+ aa_policy_id = self._get_aa_policy_id_by_name(
+ self.clc,
+ self.module,
+ acct_alias,
+ aa_policy_name)
+ current_aa_policy_id = self._get_aa_policy_id_of_server(
+ self.clc,
+ self.module,
+ acct_alias,
+ server.id)
+
+ if aa_policy_id and aa_policy_id == current_aa_policy_id:
+ self._delete_aa_policy(
+ self.clc,
+ self.module,
+ acct_alias,
+ server.id)
+ changed = True
+ return changed
+
+ @staticmethod
+ def _modify_aa_policy(clc, module, acct_alias, server_id, aa_policy_id):
+ """
+ modifies the anti affinity policy of the CLC server
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param acct_alias: the CLC account alias
+ :param server_id: the CLC server id
+ :param aa_policy_id: the anti affinity policy id
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ if not module.check_mode:
+ try:
+ result = clc.v2.API.Call('PUT',
+ 'servers/%s/%s/antiAffinityPolicy' % (
+ acct_alias,
+ server_id),
+ json.dumps({"id": aa_policy_id}))
+ except APIFailedResponse as ex:
+ module.fail_json(
+ msg='Unable to modify anti affinity policy to server : "{0}". {1}'.format(
+ server_id, str(ex.response_text)))
+ return result
+
+ @staticmethod
+ def _delete_aa_policy(clc, module, acct_alias, server_id):
+ """
+ Delete the anti affinity policy of the CLC server
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param acct_alias: the CLC account alias
+ :param server_id: the CLC server id
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ if not module.check_mode:
+ try:
+ result = clc.v2.API.Call('DELETE',
+ 'servers/%s/%s/antiAffinityPolicy' % (
+ acct_alias,
+ server_id),
+ json.dumps({}))
+ except APIFailedResponse as ex:
+ module.fail_json(
+ msg='Unable to delete anti affinity policy to server : "{0}". {1}'.format(
+ server_id, str(ex.response_text)))
+ return result
+
+ @staticmethod
+ def _get_aa_policy_id_by_name(clc, module, alias, aa_policy_name):
+ """
+ retrieves the anti affinity policy id of the server based on the name of the policy
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param alias: the CLC account alias
+ :param aa_policy_name: the anti affinity policy name
+ :return: aa_policy_id: The anti affinity policy id
+ """
+ aa_policy_id = None
+ try:
+ aa_policies = clc.v2.API.Call(method='GET',
+ url='antiAffinityPolicies/%s' % alias)
+ except APIFailedResponse as ex:
+ return module.fail_json(
+ msg='Unable to fetch anti affinity policies from account alias : "{0}". {1}'.format(
+ alias, str(ex.response_text)))
+ for aa_policy in aa_policies.get('items'):
+ if aa_policy.get('name') == aa_policy_name:
+ if not aa_policy_id:
+ aa_policy_id = aa_policy.get('id')
+ else:
+ return module.fail_json(
+ msg='multiple anti affinity policies were found with policy name : %s' % aa_policy_name)
+ if not aa_policy_id:
+ module.fail_json(
+ msg='No anti affinity policy was found with policy name : %s' % aa_policy_name)
+ return aa_policy_id
+
+ @staticmethod
+ def _get_aa_policy_id_of_server(clc, module, alias, server_id):
+ """
+ retrieves the anti affinity policy id of the server based on the CLC server id
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param alias: the CLC account alias
+ :param server_id: the CLC server id
+ :return: aa_policy_id: The anti affinity policy id
+ """
+ aa_policy_id = None
+ try:
+ result = clc.v2.API.Call(
+ method='GET', url='servers/%s/%s/antiAffinityPolicy' %
+ (alias, server_id))
+ aa_policy_id = result.get('id')
+ except APIFailedResponse as ex:
+ if ex.response_status_code != 404:
+ module.fail_json(msg='Unable to fetch anti affinity policy for server "{0}". {1}'.format(
+ server_id, str(ex.response_text)))
+ return aa_policy_id
+
+ def _ensure_alert_policy_present(
+ self, server, server_params):
+ """
+ ensures the server is updated with the provided alert policy
+ :param server: the CLC server object
+ :param server_params: the dictionary of server parameters
+ :return: (changed, group) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ acct_alias = self.clc.v2.Account.GetAlias()
+ alert_policy_id = server_params.get('alert_policy_id')
+ alert_policy_name = server_params.get('alert_policy_name')
+ if not alert_policy_id and alert_policy_name:
+ alert_policy_id = self._get_alert_policy_id_by_name(
+ self.clc,
+ self.module,
+ acct_alias,
+ alert_policy_name)
+ if alert_policy_id and not self._alert_policy_exists(
+ server, alert_policy_id):
+ self._add_alert_policy_to_server(
+ self.clc,
+ self.module,
+ acct_alias,
+ server.id,
+ alert_policy_id)
+ changed = True
+ return changed
+
+ def _ensure_alert_policy_absent(
+ self, server, server_params):
+ """
+ ensures the alert policy is removed from the server
+ :param server: the CLC server object
+ :param server_params: the dictionary of server parameters
+ :return: (changed, group) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+
+ acct_alias = self.clc.v2.Account.GetAlias()
+ alert_policy_id = server_params.get('alert_policy_id')
+ alert_policy_name = server_params.get('alert_policy_name')
+ if not alert_policy_id and alert_policy_name:
+ alert_policy_id = self._get_alert_policy_id_by_name(
+ self.clc,
+ self.module,
+ acct_alias,
+ alert_policy_name)
+
+ if alert_policy_id and self._alert_policy_exists(
+ server, alert_policy_id):
+ self._remove_alert_policy_to_server(
+ self.clc,
+ self.module,
+ acct_alias,
+ server.id,
+ alert_policy_id)
+ changed = True
+ return changed
+
+ @staticmethod
+ def _add_alert_policy_to_server(
+ clc, module, acct_alias, server_id, alert_policy_id):
+ """
+ add the alert policy to CLC server
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param acct_alias: the CLC account alias
+ :param server_id: the CLC server id
+ :param alert_policy_id: the alert policy id
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ if not module.check_mode:
+ try:
+ result = clc.v2.API.Call('POST',
+ 'servers/%s/%s/alertPolicies' % (
+ acct_alias,
+ server_id),
+ json.dumps({"id": alert_policy_id}))
+ except APIFailedResponse as ex:
+ module.fail_json(msg='Unable to set alert policy to the server : "{0}". {1}'.format(
+ server_id, str(ex.response_text)))
+ return result
+
+ @staticmethod
+ def _remove_alert_policy_to_server(
+ clc, module, acct_alias, server_id, alert_policy_id):
+ """
+ remove the alert policy to the CLC server
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param acct_alias: the CLC account alias
+ :param server_id: the CLC server id
+ :param alert_policy_id: the alert policy id
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ if not module.check_mode:
+ try:
+ result = clc.v2.API.Call('DELETE',
+ 'servers/%s/%s/alertPolicies/%s'
+ % (acct_alias, server_id, alert_policy_id))
+ except APIFailedResponse as ex:
+ module.fail_json(msg='Unable to remove alert policy from the server : "{0}". {1}'.format(
+ server_id, str(ex.response_text)))
+ return result
+
+ @staticmethod
+ def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name):
+ """
+ retrieves the alert policy id of the server based on the name of the policy
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param alias: the CLC account alias
+ :param alert_policy_name: the alert policy name
+ :return: alert_policy_id: The alert policy id
+ """
+ alert_policy_id = None
+ try:
+ alert_policies = clc.v2.API.Call(method='GET',
+ url='alertPolicies/%s' % alias)
+ except APIFailedResponse as ex:
+ return module.fail_json(msg='Unable to fetch alert policies for account : "{0}". {1}'.format(
+ alias, str(ex.response_text)))
+ for alert_policy in alert_policies.get('items'):
+ if alert_policy.get('name') == alert_policy_name:
+ if not alert_policy_id:
+ alert_policy_id = alert_policy.get('id')
+ else:
+ return module.fail_json(
+ msg='multiple alert policies were found with policy name : %s' % alert_policy_name)
+ return alert_policy_id
+
+ @staticmethod
+ def _alert_policy_exists(server, alert_policy_id):
+ """
+ Checks if the alert policy exists for the server
+ :param server: the clc server object
+ :param alert_policy_id: the alert policy
+ :return: True: if the given alert policy id associated to the server, False otherwise
+ """
+ result = False
+ alert_policies = server.alertPolicies
+ if alert_policies:
+ for alert_policy in alert_policies:
+ if alert_policy.get('id') == alert_policy_id:
+ result = True
+ return result
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+
+ argument_dict = ClcModifyServer._define_module_argument_spec()
+ module = AnsibleModule(supports_check_mode=True, **argument_dict)
+ clc_modify_server = ClcModifyServer(module)
+ clc_modify_server.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_publicip.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_publicip.py
new file mode 100644
index 00000000..e31546b2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_publicip.py
@@ -0,0 +1,357 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 CenturyLink
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_publicip
+short_description: Add and Delete public ips on servers in CenturyLink Cloud.
+description:
+ - An Ansible module to add or delete public ip addresses on an existing server or servers in CenturyLink Cloud.
+options:
+ protocol:
+ description:
+ - The protocol that the public IP will listen for.
+ type: str
+ default: TCP
+ choices: ['TCP', 'UDP', 'ICMP']
+ ports:
+ description:
+ - A list of ports to expose. This is required when state is 'present'
+ type: list
+ server_ids:
+ description:
+ - A list of servers to create public ips on.
+ type: list
+ required: True
+ state:
+ description:
+ - Determine whether to create or delete public IPs. If present module will not create a second public ip if one
+ already exists.
+ type: str
+ default: present
+ choices: ['present', 'absent']
+ wait:
+ description:
+ - Whether to wait for the tasks to finish before returning.
+ type: bool
+ default: 'yes'
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+- name: Add Public IP to Server
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Create Public IP For Servers
+ community.general.clc_publicip:
+ protocol: TCP
+ ports:
+ - 80
+ server_ids:
+ - UC1TEST-SVR01
+ - UC1TEST-SVR02
+ state: present
+ register: clc
+
+ - name: Debug
+ ansible.builtin.debug:
+ var: clc
+
+- name: Delete Public IP from Server
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Create Public IP For Servers
+ community.general.clc_publicip:
+ server_ids:
+ - UC1TEST-SVR01
+ - UC1TEST-SVR02
+ state: absent
+ register: clc
+
+ - name: Debug
+ ansible.builtin.debug:
+ var: clc
+'''
+
+RETURN = '''
+server_ids:
+ description: The list of server ids that are changed
+ returned: success
+ type: list
+ sample:
+ [
+ "UC1TEST-SVR01",
+ "UC1TEST-SVR02"
+ ]
+'''
+
+__version__ = '${version}'
+
+import os
+import traceback
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcPublicIp(object):
+ clc = clc_sdk
+ module = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.module = module
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ self._set_clc_credentials_from_env()
+ params = self.module.params
+ server_ids = params['server_ids']
+ ports = params['ports']
+ protocol = params['protocol']
+ state = params['state']
+
+ if state == 'present':
+ changed, changed_server_ids, requests = self.ensure_public_ip_present(
+ server_ids=server_ids, protocol=protocol, ports=ports)
+ elif state == 'absent':
+ changed, changed_server_ids, requests = self.ensure_public_ip_absent(
+ server_ids=server_ids)
+ else:
+ return self.module.fail_json(msg="Unknown State: " + state)
+ self._wait_for_requests_to_complete(requests)
+ return self.module.exit_json(changed=changed,
+ server_ids=changed_server_ids)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ server_ids=dict(type='list', required=True),
+ protocol=dict(default='TCP', choices=['TCP', 'UDP', 'ICMP']),
+ ports=dict(type='list'),
+ wait=dict(type='bool', default=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ return argument_spec
+
+ def ensure_public_ip_present(self, server_ids, protocol, ports):
+ """
+ Ensures the given server ids having the public ip available
+ :param server_ids: the list of server ids
+ :param protocol: the ip protocol
+ :param ports: the list of ports to expose
+ :return: (changed, changed_server_ids, results)
+ changed: A flag indicating if there is any change
+ changed_server_ids : the list of server ids that are changed
+ results: The result list from clc public ip call
+ """
+ changed = False
+ results = []
+ changed_server_ids = []
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to obtain server list from the CLC API')
+ servers_to_change = [
+ server for server in servers if len(
+ server.PublicIPs().public_ips) == 0]
+ ports_to_expose = [{'protocol': protocol, 'port': port}
+ for port in ports]
+ for server in servers_to_change:
+ if not self.module.check_mode:
+ result = self._add_publicip_to_server(server, ports_to_expose)
+ results.append(result)
+ changed_server_ids.append(server.id)
+ changed = True
+ return changed, changed_server_ids, results
+
+ def _add_publicip_to_server(self, server, ports_to_expose):
+ result = None
+ try:
+ result = server.PublicIPs().Add(ports_to_expose)
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to add public ip to the server : {0}. {1}'.format(
+ server.id, ex.response_text
+ ))
+ return result
+
+ def ensure_public_ip_absent(self, server_ids):
+ """
+ Ensures the given server ids having the public ip removed if there is any
+ :param server_ids: the list of server ids
+ :return: (changed, changed_server_ids, results)
+ changed: A flag indicating if there is any change
+ changed_server_ids : the list of server ids that are changed
+ results: The result list from clc public ip call
+ """
+ changed = False
+ results = []
+ changed_server_ids = []
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to obtain server list from the CLC API')
+ servers_to_change = [
+ server for server in servers if len(
+ server.PublicIPs().public_ips) > 0]
+ for server in servers_to_change:
+ if not self.module.check_mode:
+ result = self._remove_publicip_from_server(server)
+ results.append(result)
+ changed_server_ids.append(server.id)
+ changed = True
+ return changed, changed_server_ids, results
+
+ def _remove_publicip_from_server(self, server):
+ result = None
+ try:
+ for ip_address in server.PublicIPs().public_ips:
+ result = ip_address.Delete()
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to remove public ip from the server : {0}. {1}'.format(
+ server.id, ex.response_text
+ ))
+ return result
+
+ def _wait_for_requests_to_complete(self, requests_lst):
+ """
+ Waits until the CLC requests are complete if the wait argument is True
+ :param requests_lst: The list of CLC request objects
+ :return: none
+ """
+ if not self.module.params['wait']:
+ return
+ for request in requests_lst:
+ request.WaitUntilComplete()
+ for request_details in request.requests:
+ if request_details.Status() != 'succeeded':
+ self.module.fail_json(
+ msg='Unable to process public ip request')
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ def _get_servers_from_clc(self, server_ids, message):
+ """
+ Gets list of servers form CLC api
+ """
+ try:
+ return self.clc.v2.Servers(server_ids).servers
+ except CLCException as exception:
+ self.module.fail_json(msg=message + ': %s' % exception)
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ module = AnsibleModule(
+ argument_spec=ClcPublicIp._define_module_argument_spec(),
+ supports_check_mode=True
+ )
+ clc_public_ip = ClcPublicIp(module)
+ clc_public_ip.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_server.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_server.py
new file mode 100644
index 00000000..6b7e9c4b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_server.py
@@ -0,0 +1,1557 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 CenturyLink
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_server
+short_description: Create, Delete, Start and Stop servers in CenturyLink Cloud.
+description:
+ - An Ansible module to Create, Delete, Start and Stop servers in CenturyLink Cloud.
+options:
+ additional_disks:
+ description:
+ - The list of additional disks for the server
+ type: list
+ default: []
+ add_public_ip:
+ description:
+ - Whether to add a public ip to the server
+ type: bool
+ default: 'no'
+ alias:
+ description:
+ - The account alias to provision the servers under.
+ type: str
+ anti_affinity_policy_id:
+ description:
+ - The anti-affinity policy to assign to the server. This is mutually exclusive with 'anti_affinity_policy_name'.
+ type: str
+ anti_affinity_policy_name:
+ description:
+ - The anti-affinity policy to assign to the server. This is mutually exclusive with 'anti_affinity_policy_id'.
+ type: str
+ alert_policy_id:
+ description:
+ - The alert policy to assign to the server. This is mutually exclusive with 'alert_policy_name'.
+ type: str
+ alert_policy_name:
+ description:
+ - The alert policy to assign to the server. This is mutually exclusive with 'alert_policy_id'.
+ type: str
+ count:
+ description:
+ - The number of servers to build (mutually exclusive with exact_count)
+ default: 1
+ type: int
+ count_group:
+ description:
+ - Required when exact_count is specified. The Server Group use to determine how many servers to deploy.
+ type: str
+ cpu:
+ description:
+ - How many CPUs to provision on the server
+ default: 1
+ type: int
+ cpu_autoscale_policy_id:
+ description:
+ - The autoscale policy to assign to the server.
+ type: str
+ custom_fields:
+ description:
+ - The list of custom fields to set on the server.
+ type: list
+ default: []
+ description:
+ description:
+ - The description to set for the server.
+ type: str
+ exact_count:
+ description:
+ - Run in idempotent mode. Will insure that this exact number of servers are running in the provided group,
+ creating and deleting them to reach that count. Requires count_group to be set.
+ type: int
+ group:
+ description:
+ - The Server Group to create servers under.
+ type: str
+ default: 'Default Group'
+ ip_address:
+ description:
+ - The IP Address for the server. One is assigned if not provided.
+ type: str
+ location:
+ description:
+ - The Datacenter to create servers in.
+ type: str
+ managed_os:
+ description:
+ - Whether to create the server as 'Managed' or not.
+ type: bool
+ default: 'no'
+ required: False
+ memory:
+ description:
+ - Memory in GB.
+ type: int
+ default: 1
+ name:
+ description:
+ - A 1 to 6 character identifier to use for the server. This is required when state is 'present'
+ type: str
+ network_id:
+ description:
+ - The network UUID on which to create servers.
+ type: str
+ packages:
+ description:
+ - The list of blue print packages to run on the server after its created.
+ type: list
+ default: []
+ password:
+ description:
+ - Password for the administrator / root user
+ type: str
+ primary_dns:
+ description:
+ - Primary DNS used by the server.
+ type: str
+ public_ip_protocol:
+ description:
+ - The protocol to use for the public ip if add_public_ip is set to True.
+ type: str
+ default: 'TCP'
+ choices: ['TCP', 'UDP', 'ICMP']
+ public_ip_ports:
+ description:
+ - A list of ports to allow on the firewall to the servers public ip, if add_public_ip is set to True.
+ type: list
+ default: []
+ secondary_dns:
+ description:
+ - Secondary DNS used by the server.
+ type: str
+ server_ids:
+ description:
+ - Required for started, stopped, and absent states.
+ A list of server Ids to insure are started, stopped, or absent.
+ type: list
+ default: []
+ source_server_password:
+ description:
+ - The password for the source server if a clone is specified.
+ type: str
+ state:
+ description:
+ - The state to insure that the provided resources are in.
+ type: str
+ default: 'present'
+ choices: ['present', 'absent', 'started', 'stopped']
+ storage_type:
+ description:
+ - The type of storage to attach to the server.
+ type: str
+ default: 'standard'
+ choices: ['standard', 'hyperscale']
+ template:
+ description:
+ - The template to use for server creation. Will search for a template if a partial string is provided.
+ This is required when state is 'present'
+ type: str
+ ttl:
+ description:
+ - The time to live for the server in seconds. The server will be deleted when this time expires.
+ type: str
+ type:
+ description:
+ - The type of server to create.
+ type: str
+ default: 'standard'
+ choices: ['standard', 'hyperscale', 'bareMetal']
+ configuration_id:
+ description:
+ - Only required for bare metal servers.
+ Specifies the identifier for the specific configuration type of bare metal server to deploy.
+ type: str
+ os_type:
+ description:
+ - Only required for bare metal servers.
+ Specifies the OS to provision with the bare metal server.
+ type: str
+ choices: ['redHat6_64Bit', 'centOS6_64Bit', 'windows2012R2Standard_64Bit', 'ubuntu14_64Bit']
+ wait:
+ description:
+ - Whether to wait for the provisioning tasks to finish before returning.
+ type: bool
+ default: 'yes'
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+- name: Provision a single Ubuntu Server
+ community.general.clc_server:
+ name: test
+ template: ubuntu-14-64
+ count: 1
+ group: Default Group
+ state: present
+
+- name: Ensure 'Default Group' has exactly 5 servers
+ community.general.clc_server:
+ name: test
+ template: ubuntu-14-64
+ exact_count: 5
+ count_group: Default Group
+ group: Default Group
+
+- name: Stop a Server
+ community.general.clc_server:
+ server_ids:
+ - UC1ACCT-TEST01
+ state: stopped
+
+- name: Start a Server
+ community.general.clc_server:
+ server_ids:
+ - UC1ACCT-TEST01
+ state: started
+
+- name: Delete a Server
+ community.general.clc_server:
+ server_ids:
+ - UC1ACCT-TEST01
+ state: absent
+'''
+
+RETURN = '''
+server_ids:
+ description: The list of server ids that are created
+ returned: success
+ type: list
+ sample:
+ [
+ "UC1TEST-SVR01",
+ "UC1TEST-SVR02"
+ ]
+partially_created_server_ids:
+ description: The list of server ids that are partially created
+ returned: success
+ type: list
+ sample:
+ [
+ "UC1TEST-SVR01",
+ "UC1TEST-SVR02"
+ ]
+servers:
+ description: The list of server objects returned from CLC
+ returned: success
+ type: list
+ sample:
+ [
+ {
+ "changeInfo":{
+ "createdBy":"service.wfad",
+ "createdDate":1438196820,
+ "modifiedBy":"service.wfad",
+ "modifiedDate":1438196820
+ },
+ "description":"test-server",
+ "details":{
+ "alertPolicies":[
+
+ ],
+ "cpu":1,
+ "customFields":[
+
+ ],
+ "diskCount":3,
+ "disks":[
+ {
+ "id":"0:0",
+ "partitionPaths":[
+
+ ],
+ "sizeGB":1
+ },
+ {
+ "id":"0:1",
+ "partitionPaths":[
+
+ ],
+ "sizeGB":2
+ },
+ {
+ "id":"0:2",
+ "partitionPaths":[
+
+ ],
+ "sizeGB":14
+ }
+ ],
+ "hostName":"",
+ "inMaintenanceMode":false,
+ "ipAddresses":[
+ {
+ "internal":"10.1.1.1"
+ }
+ ],
+ "memoryGB":1,
+ "memoryMB":1024,
+ "partitions":[
+
+ ],
+ "powerState":"started",
+ "snapshots":[
+
+ ],
+ "storageGB":17
+ },
+ "groupId":"086ac1dfe0b6411989e8d1b77c4065f0",
+ "id":"test-server",
+ "ipaddress":"10.120.45.23",
+ "isTemplate":false,
+ "links":[
+ {
+ "href":"/v2/servers/wfad/test-server",
+ "id":"test-server",
+ "rel":"self",
+ "verbs":[
+ "GET",
+ "PATCH",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
+ "id":"086ac1dfe0b6411989e8d1b77c4065f0",
+ "rel":"group"
+ },
+ {
+ "href":"/v2/accounts/wfad",
+ "id":"wfad",
+ "rel":"account"
+ },
+ {
+ "href":"/v2/billing/wfad/serverPricing/test-server",
+ "rel":"billing"
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/publicIPAddresses",
+ "rel":"publicIPAddresses",
+ "verbs":[
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/credentials",
+ "rel":"credentials"
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/statistics",
+ "rel":"statistics"
+ },
+ {
+ "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/upcomingScheduledActivities",
+ "rel":"upcomingScheduledActivities"
+ },
+ {
+ "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/scheduledActivities",
+ "rel":"scheduledActivities",
+ "verbs":[
+ "GET",
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/capabilities",
+ "rel":"capabilities"
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/alertPolicies",
+ "rel":"alertPolicyMappings",
+ "verbs":[
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/antiAffinityPolicy",
+ "rel":"antiAffinityPolicyMapping",
+ "verbs":[
+ "PUT",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/cpuAutoscalePolicy",
+ "rel":"cpuAutoscalePolicyMapping",
+ "verbs":[
+ "PUT",
+ "DELETE"
+ ]
+ }
+ ],
+ "locationId":"UC1",
+ "name":"test-server",
+ "os":"ubuntu14_64Bit",
+ "osType":"Ubuntu 14 64-bit",
+ "status":"active",
+ "storageType":"standard",
+ "type":"standard"
+ }
+ ]
+'''
+
+__version__ = '${version}'
+
+import json
+import os
+import time
+import traceback
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+ from clc import APIFailedResponse
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcServer:
+ clc = clc_sdk
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.clc = clc_sdk
+ self.module = module
+ self.group_dict = {}
+
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(
+ requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ changed = False
+ new_server_ids = []
+ server_dict_array = []
+
+ self._set_clc_credentials_from_env()
+ self.module.params = self._validate_module_params(
+ self.clc,
+ self.module)
+ p = self.module.params
+ state = p.get('state')
+
+ #
+ # Handle each state
+ #
+ partial_servers_ids = []
+ if state == 'absent':
+ server_ids = p['server_ids']
+ if not isinstance(server_ids, list):
+ return self.module.fail_json(
+ msg='server_ids needs to be a list of instances to delete: %s' %
+ server_ids)
+
+ (changed,
+ server_dict_array,
+ new_server_ids) = self._delete_servers(module=self.module,
+ clc=self.clc,
+ server_ids=server_ids)
+
+ elif state in ('started', 'stopped'):
+ server_ids = p.get('server_ids')
+ if not isinstance(server_ids, list):
+ return self.module.fail_json(
+ msg='server_ids needs to be a list of servers to run: %s' %
+ server_ids)
+
+ (changed,
+ server_dict_array,
+ new_server_ids) = self._start_stop_servers(self.module,
+ self.clc,
+ server_ids)
+
+ elif state == 'present':
+ # Changed is always set to true when provisioning new instances
+ if not p.get('template') and p.get('type') != 'bareMetal':
+ return self.module.fail_json(
+ msg='template parameter is required for new instance')
+
+ if p.get('exact_count') is None:
+ (server_dict_array,
+ new_server_ids,
+ partial_servers_ids,
+ changed) = self._create_servers(self.module,
+ self.clc)
+ else:
+ (server_dict_array,
+ new_server_ids,
+ partial_servers_ids,
+ changed) = self._enforce_count(self.module,
+ self.clc)
+
+ self.module.exit_json(
+ changed=changed,
+ server_ids=new_server_ids,
+ partially_created_server_ids=partial_servers_ids,
+ servers=server_dict_array)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ name=dict(),
+ template=dict(),
+ group=dict(default='Default Group'),
+ network_id=dict(),
+ location=dict(default=None),
+ cpu=dict(default=1, type='int'),
+ memory=dict(default=1, type='int'),
+ alias=dict(default=None),
+ password=dict(default=None, no_log=True),
+ ip_address=dict(default=None),
+ storage_type=dict(
+ default='standard',
+ choices=[
+ 'standard',
+ 'hyperscale']),
+ type=dict(default='standard', choices=['standard', 'hyperscale', 'bareMetal']),
+ primary_dns=dict(default=None),
+ secondary_dns=dict(default=None),
+ additional_disks=dict(type='list', default=[]),
+ custom_fields=dict(type='list', default=[]),
+ ttl=dict(default=None),
+ managed_os=dict(type='bool', default=False),
+ description=dict(default=None),
+ source_server_password=dict(default=None, no_log=True),
+ cpu_autoscale_policy_id=dict(default=None),
+ anti_affinity_policy_id=dict(default=None),
+ anti_affinity_policy_name=dict(default=None),
+ alert_policy_id=dict(default=None),
+ alert_policy_name=dict(default=None),
+ packages=dict(type='list', default=[]),
+ state=dict(
+ default='present',
+ choices=[
+ 'present',
+ 'absent',
+ 'started',
+ 'stopped']),
+ count=dict(type='int', default=1),
+ exact_count=dict(type='int', default=None),
+ count_group=dict(),
+ server_ids=dict(type='list', default=[]),
+ add_public_ip=dict(type='bool', default=False),
+ public_ip_protocol=dict(
+ default='TCP',
+ choices=[
+ 'TCP',
+ 'UDP',
+ 'ICMP']),
+ public_ip_ports=dict(type='list', default=[]),
+ configuration_id=dict(default=None),
+ os_type=dict(default=None,
+ choices=[
+ 'redHat6_64Bit',
+ 'centOS6_64Bit',
+ 'windows2012R2Standard_64Bit',
+ 'ubuntu14_64Bit'
+ ]),
+ wait=dict(type='bool', default=True))
+
+ mutually_exclusive = [
+ ['exact_count', 'count'],
+ ['exact_count', 'state'],
+ ['anti_affinity_policy_id', 'anti_affinity_policy_name'],
+ ['alert_policy_id', 'alert_policy_name'],
+ ]
+ return {"argument_spec": argument_spec,
+ "mutually_exclusive": mutually_exclusive}
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ @staticmethod
+ def _validate_module_params(clc, module):
+ """
+ Validate the module params, and lookup default values.
+ :param clc: clc-sdk instance to use
+ :param module: module to validate
+ :return: dictionary of validated params
+ """
+ params = module.params
+ datacenter = ClcServer._find_datacenter(clc, module)
+
+ ClcServer._validate_types(module)
+ ClcServer._validate_name(module)
+
+ params['alias'] = ClcServer._find_alias(clc, module)
+ params['cpu'] = ClcServer._find_cpu(clc, module)
+ params['memory'] = ClcServer._find_memory(clc, module)
+ params['description'] = ClcServer._find_description(module)
+ params['ttl'] = ClcServer._find_ttl(clc, module)
+ params['template'] = ClcServer._find_template_id(module, datacenter)
+ params['group'] = ClcServer._find_group(module, datacenter).id
+ params['network_id'] = ClcServer._find_network_id(module, datacenter)
+ params['anti_affinity_policy_id'] = ClcServer._find_aa_policy_id(
+ clc,
+ module)
+ params['alert_policy_id'] = ClcServer._find_alert_policy_id(
+ clc,
+ module)
+
+ return params
+
+ @staticmethod
+ def _find_datacenter(clc, module):
+ """
+ Find the datacenter by calling the CLC API.
+ :param clc: clc-sdk instance to use
+ :param module: module to validate
+ :return: clc-sdk.Datacenter instance
+ """
+ location = module.params.get('location')
+ try:
+ if not location:
+ account = clc.v2.Account()
+ location = account.data.get('primaryDataCenter')
+ data_center = clc.v2.Datacenter(location)
+ return data_center
+ except CLCException:
+ module.fail_json(msg="Unable to find location: {0}".format(location))
+
+ @staticmethod
+ def _find_alias(clc, module):
+ """
+ Find or Validate the Account Alias by calling the CLC API
+ :param clc: clc-sdk instance to use
+ :param module: module to validate
+ :return: clc-sdk.Account instance
+ """
+ alias = module.params.get('alias')
+ if not alias:
+ try:
+ alias = clc.v2.Account.GetAlias()
+ except CLCException as ex:
+ module.fail_json(msg='Unable to find account alias. {0}'.format(
+ ex.message
+ ))
+ return alias
+
+ @staticmethod
+ def _find_cpu(clc, module):
+ """
+ Find or validate the CPU value by calling the CLC API
+ :param clc: clc-sdk instance to use
+ :param module: module to validate
+ :return: Int value for CPU
+ """
+ cpu = module.params.get('cpu')
+ group_id = module.params.get('group_id')
+ alias = module.params.get('alias')
+ state = module.params.get('state')
+
+ if not cpu and state == 'present':
+ group = clc.v2.Group(id=group_id,
+ alias=alias)
+ if group.Defaults("cpu"):
+ cpu = group.Defaults("cpu")
+ else:
+ module.fail_json(
+ msg=str("Can\'t determine a default cpu value. Please provide a value for cpu."))
+ return cpu
+
+ @staticmethod
+ def _find_memory(clc, module):
+ """
+ Find or validate the Memory value by calling the CLC API
+ :param clc: clc-sdk instance to use
+ :param module: module to validate
+ :return: Int value for Memory
+ """
+ memory = module.params.get('memory')
+ group_id = module.params.get('group_id')
+ alias = module.params.get('alias')
+ state = module.params.get('state')
+
+ if not memory and state == 'present':
+ group = clc.v2.Group(id=group_id,
+ alias=alias)
+ if group.Defaults("memory"):
+ memory = group.Defaults("memory")
+ else:
+ module.fail_json(msg=str(
+ "Can\'t determine a default memory value. Please provide a value for memory."))
+ return memory
+
+ @staticmethod
+ def _find_description(module):
+ """
+ Set the description module param to name if description is blank
+ :param module: the module to validate
+ :return: string description
+ """
+ description = module.params.get('description')
+ if not description:
+ description = module.params.get('name')
+ return description
+
+ @staticmethod
+ def _validate_types(module):
+ """
+ Validate that type and storage_type are set appropriately, and fail if not
+ :param module: the module to validate
+ :return: none
+ """
+ state = module.params.get('state')
+ server_type = module.params.get(
+ 'type').lower() if module.params.get('type') else None
+ storage_type = module.params.get(
+ 'storage_type').lower() if module.params.get('storage_type') else None
+
+ if state == "present":
+ if server_type == "standard" and storage_type not in (
+ "standard", "premium"):
+ module.fail_json(
+ msg=str("Standard VMs must have storage_type = 'standard' or 'premium'"))
+
+ if server_type == "hyperscale" and storage_type != "hyperscale":
+ module.fail_json(
+ msg=str("Hyperscale VMs must have storage_type = 'hyperscale'"))
+
+ @staticmethod
+ def _validate_name(module):
+ """
+ Validate that name is the correct length if provided, fail if it's not
+ :param module: the module to validate
+ :return: none
+ """
+ server_name = module.params.get('name')
+ state = module.params.get('state')
+
+ if state == 'present' and (
+ len(server_name) < 1 or len(server_name) > 6):
+ module.fail_json(msg=str(
+ "When state = 'present', name must be a string with a minimum length of 1 and a maximum length of 6"))
+
+ @staticmethod
+ def _find_ttl(clc, module):
+ """
+ Validate that TTL is > 3600 if set, and fail if not
+ :param clc: clc-sdk instance to use
+ :param module: module to validate
+ :return: validated ttl
+ """
+ ttl = module.params.get('ttl')
+
+ if ttl:
+ if ttl <= 3600:
+ return module.fail_json(msg=str("Ttl cannot be <= 3600"))
+ else:
+ ttl = clc.v2.time_utils.SecondsToZuluTS(int(time.time()) + ttl)
+ return ttl
+
+ @staticmethod
+ def _find_template_id(module, datacenter):
+ """
+ Find the template id by calling the CLC API.
+ :param module: the module to validate
+ :param datacenter: the datacenter to search for the template
+ :return: a valid clc template id
+ """
+ lookup_template = module.params.get('template')
+ state = module.params.get('state')
+ type = module.params.get('type')
+ result = None
+
+ if state == 'present' and type != 'bareMetal':
+ try:
+ result = datacenter.Templates().Search(lookup_template)[0].id
+ except CLCException:
+ module.fail_json(
+ msg=str(
+ "Unable to find a template: " +
+ lookup_template +
+ " in location: " +
+ datacenter.id))
+ return result
+
+ @staticmethod
+ def _find_network_id(module, datacenter):
+ """
+ Validate the provided network id or return a default.
+ :param module: the module to validate
+ :param datacenter: the datacenter to search for a network id
+ :return: a valid network id
+ """
+ network_id = module.params.get('network_id')
+
+ if not network_id:
+ try:
+ network_id = datacenter.Networks().networks[0].id
+ # -- added for clc-sdk 2.23 compatibility
+ # datacenter_networks = clc_sdk.v2.Networks(
+ # networks_lst=datacenter._DeploymentCapabilities()['deployableNetworks'])
+ # network_id = datacenter_networks.networks[0].id
+ # -- end
+ except CLCException:
+ module.fail_json(
+ msg=str(
+ "Unable to find a network in location: " +
+ datacenter.id))
+
+ return network_id
+
+ @staticmethod
+ def _find_aa_policy_id(clc, module):
+ """
+ Validate if the anti affinity policy exist for the given name and throw error if not
+ :param clc: the clc-sdk instance
+ :param module: the module to validate
+ :return: aa_policy_id: the anti affinity policy id of the given name.
+ """
+ aa_policy_id = module.params.get('anti_affinity_policy_id')
+ aa_policy_name = module.params.get('anti_affinity_policy_name')
+ if not aa_policy_id and aa_policy_name:
+ alias = module.params.get('alias')
+ aa_policy_id = ClcServer._get_anti_affinity_policy_id(
+ clc,
+ module,
+ alias,
+ aa_policy_name)
+ if not aa_policy_id:
+ module.fail_json(
+ msg='No anti affinity policy was found with policy name : %s' % aa_policy_name)
+ return aa_policy_id
+
+ @staticmethod
+ def _find_alert_policy_id(clc, module):
+ """
+ Validate if the alert policy exist for the given name and throw error if not
+ :param clc: the clc-sdk instance
+ :param module: the module to validate
+ :return: alert_policy_id: the alert policy id of the given name.
+ """
+ alert_policy_id = module.params.get('alert_policy_id')
+ alert_policy_name = module.params.get('alert_policy_name')
+ if not alert_policy_id and alert_policy_name:
+ alias = module.params.get('alias')
+ alert_policy_id = ClcServer._get_alert_policy_id_by_name(
+ clc=clc,
+ module=module,
+ alias=alias,
+ alert_policy_name=alert_policy_name
+ )
+ if not alert_policy_id:
+ module.fail_json(
+ msg='No alert policy exist with name : %s' % alert_policy_name)
+ return alert_policy_id
+
+ def _create_servers(self, module, clc, override_count=None):
+ """
+ Create New Servers in CLC cloud
+ :param module: the AnsibleModule object
+ :param clc: the clc-sdk instance to use
+ :return: a list of dictionaries with server information about the servers that were created
+ """
+ p = module.params
+ request_list = []
+ servers = []
+ server_dict_array = []
+ created_server_ids = []
+ partial_created_servers_ids = []
+
+ add_public_ip = p.get('add_public_ip')
+ public_ip_protocol = p.get('public_ip_protocol')
+ public_ip_ports = p.get('public_ip_ports')
+
+ params = {
+ 'name': p.get('name'),
+ 'template': p.get('template'),
+ 'group_id': p.get('group'),
+ 'network_id': p.get('network_id'),
+ 'cpu': p.get('cpu'),
+ 'memory': p.get('memory'),
+ 'alias': p.get('alias'),
+ 'password': p.get('password'),
+ 'ip_address': p.get('ip_address'),
+ 'storage_type': p.get('storage_type'),
+ 'type': p.get('type'),
+ 'primary_dns': p.get('primary_dns'),
+ 'secondary_dns': p.get('secondary_dns'),
+ 'additional_disks': p.get('additional_disks'),
+ 'custom_fields': p.get('custom_fields'),
+ 'ttl': p.get('ttl'),
+ 'managed_os': p.get('managed_os'),
+ 'description': p.get('description'),
+ 'source_server_password': p.get('source_server_password'),
+ 'cpu_autoscale_policy_id': p.get('cpu_autoscale_policy_id'),
+ 'anti_affinity_policy_id': p.get('anti_affinity_policy_id'),
+ 'packages': p.get('packages'),
+ 'configuration_id': p.get('configuration_id'),
+ 'os_type': p.get('os_type')
+ }
+
+ count = override_count if override_count else p.get('count')
+
+ changed = False if count == 0 else True
+
+ if not changed:
+ return server_dict_array, created_server_ids, partial_created_servers_ids, changed
+ for i in range(0, count):
+ if not module.check_mode:
+ req = self._create_clc_server(clc=clc,
+ module=module,
+ server_params=params)
+ server = req.requests[0].Server()
+ request_list.append(req)
+ servers.append(server)
+
+ self._wait_for_requests(module, request_list)
+ self._refresh_servers(module, servers)
+
+ ip_failed_servers = self._add_public_ip_to_servers(
+ module=module,
+ should_add_public_ip=add_public_ip,
+ servers=servers,
+ public_ip_protocol=public_ip_protocol,
+ public_ip_ports=public_ip_ports)
+ ap_failed_servers = self._add_alert_policy_to_servers(clc=clc,
+ module=module,
+ servers=servers)
+
+ for server in servers:
+ if server in ip_failed_servers or server in ap_failed_servers:
+ partial_created_servers_ids.append(server.id)
+ else:
+ # reload server details
+ server = clc.v2.Server(server.id)
+ server.data['ipaddress'] = server.details[
+ 'ipAddresses'][0]['internal']
+
+ if add_public_ip and len(server.PublicIPs().public_ips) > 0:
+ server.data['publicip'] = str(
+ server.PublicIPs().public_ips[0])
+ created_server_ids.append(server.id)
+ server_dict_array.append(server.data)
+
+ return server_dict_array, created_server_ids, partial_created_servers_ids, changed
+
+ def _enforce_count(self, module, clc):
+ """
+ Enforce that there is the right number of servers in the provided group.
+ Starts or stops servers as necessary.
+ :param module: the AnsibleModule object
+ :param clc: the clc-sdk instance to use
+ :return: a list of dictionaries with server information about the servers that were created or deleted
+ """
+ p = module.params
+ changed = False
+ count_group = p.get('count_group')
+ datacenter = ClcServer._find_datacenter(clc, module)
+ exact_count = p.get('exact_count')
+ server_dict_array = []
+ partial_servers_ids = []
+ changed_server_ids = []
+
+ # fail here if the exact count was specified without filtering
+ # on a group, as this may lead to a undesired removal of instances
+ if exact_count and count_group is None:
+ return module.fail_json(
+ msg="you must use the 'count_group' option with exact_count")
+
+ servers, running_servers = ClcServer._find_running_servers_by_group(
+ module, datacenter, count_group)
+
+ if len(running_servers) == exact_count:
+ changed = False
+
+ elif len(running_servers) < exact_count:
+ to_create = exact_count - len(running_servers)
+ server_dict_array, changed_server_ids, partial_servers_ids, changed \
+ = self._create_servers(module, clc, override_count=to_create)
+
+ for server in server_dict_array:
+ running_servers.append(server)
+
+ elif len(running_servers) > exact_count:
+ to_remove = len(running_servers) - exact_count
+ all_server_ids = sorted([x.id for x in running_servers])
+ remove_ids = all_server_ids[0:to_remove]
+
+ (changed, server_dict_array, changed_server_ids) \
+ = ClcServer._delete_servers(module, clc, remove_ids)
+
+ return server_dict_array, changed_server_ids, partial_servers_ids, changed
+
+ @staticmethod
+ def _wait_for_requests(module, request_list):
+ """
+ Block until server provisioning requests are completed.
+ :param module: the AnsibleModule object
+ :param request_list: a list of clc-sdk.Request instances
+ :return: none
+ """
+ wait = module.params.get('wait')
+ if wait:
+ # Requests.WaitUntilComplete() returns the count of failed requests
+ failed_requests_count = sum(
+ [request.WaitUntilComplete() for request in request_list])
+
+ if failed_requests_count > 0:
+ module.fail_json(
+ msg='Unable to process server request')
+
+ @staticmethod
+ def _refresh_servers(module, servers):
+ """
+ Loop through a list of servers and refresh them.
+ :param module: the AnsibleModule object
+ :param servers: list of clc-sdk.Server instances to refresh
+ :return: none
+ """
+ for server in servers:
+ try:
+ server.Refresh()
+ except CLCException as ex:
+ module.fail_json(msg='Unable to refresh the server {0}. {1}'.format(
+ server.id, ex.message
+ ))
+
+ @staticmethod
+ def _add_public_ip_to_servers(
+ module,
+ should_add_public_ip,
+ servers,
+ public_ip_protocol,
+ public_ip_ports):
+ """
+ Create a public IP for servers
+ :param module: the AnsibleModule object
+ :param should_add_public_ip: boolean - whether or not to provision a public ip for servers. Skipped if False
+ :param servers: List of servers to add public ips to
+ :param public_ip_protocol: a protocol to allow for the public ips
+ :param public_ip_ports: list of ports to allow for the public ips
+ :return: none
+ """
+ failed_servers = []
+ if not should_add_public_ip:
+ return failed_servers
+
+ ports_lst = []
+ request_list = []
+ server = None
+
+ for port in public_ip_ports:
+ ports_lst.append(
+ {'protocol': public_ip_protocol, 'port': port})
+ try:
+ if not module.check_mode:
+ for server in servers:
+ request = server.PublicIPs().Add(ports_lst)
+ request_list.append(request)
+ except APIFailedResponse:
+ failed_servers.append(server)
+ ClcServer._wait_for_requests(module, request_list)
+ return failed_servers
+
+ @staticmethod
+ def _add_alert_policy_to_servers(clc, module, servers):
+ """
+ Associate the alert policy to servers
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param servers: List of servers to add alert policy to
+ :return: failed_servers: the list of servers which failed while associating alert policy
+ """
+ failed_servers = []
+ p = module.params
+ alert_policy_id = p.get('alert_policy_id')
+ alias = p.get('alias')
+
+ if alert_policy_id and not module.check_mode:
+ for server in servers:
+ try:
+ ClcServer._add_alert_policy_to_server(
+ clc=clc,
+ alias=alias,
+ server_id=server.id,
+ alert_policy_id=alert_policy_id)
+ except CLCException:
+ failed_servers.append(server)
+ return failed_servers
+
+ @staticmethod
+ def _add_alert_policy_to_server(
+ clc, alias, server_id, alert_policy_id):
+ """
+ Associate an alert policy to a clc server
+ :param clc: the clc-sdk instance to use
+ :param alias: the clc account alias
+ :param server_id: The clc server id
+ :param alert_policy_id: the alert policy id to be associated to the server
+ :return: none
+ """
+ try:
+ clc.v2.API.Call(
+ method='POST',
+ url='servers/%s/%s/alertPolicies' % (alias, server_id),
+ payload=json.dumps(
+ {
+ 'id': alert_policy_id
+ }))
+ except APIFailedResponse as e:
+ raise CLCException(
+ 'Failed to associate alert policy to the server : {0} with Error {1}'.format(
+ server_id, str(e.response_text)))
+
+ @staticmethod
+ def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name):
+ """
+ Returns the alert policy id for the given alert policy name
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param alias: the clc account alias
+ :param alert_policy_name: the name of the alert policy
+ :return: alert_policy_id: the alert policy id
+ """
+ alert_policy_id = None
+ policies = clc.v2.API.Call('GET', '/v2/alertPolicies/%s' % alias)
+ if not policies:
+ return alert_policy_id
+ for policy in policies.get('items'):
+ if policy.get('name') == alert_policy_name:
+ if not alert_policy_id:
+ alert_policy_id = policy.get('id')
+ else:
+ return module.fail_json(
+ msg='multiple alert policies were found with policy name : %s' % alert_policy_name)
+ return alert_policy_id
+
+ @staticmethod
+ def _delete_servers(module, clc, server_ids):
+ """
+ Delete the servers on the provided list
+ :param module: the AnsibleModule object
+ :param clc: the clc-sdk instance to use
+ :param server_ids: list of servers to delete
+ :return: a list of dictionaries with server information about the servers that were deleted
+ """
+ terminated_server_ids = []
+ server_dict_array = []
+ request_list = []
+
+ if not isinstance(server_ids, list) or len(server_ids) < 1:
+ return module.fail_json(
+ msg='server_ids should be a list of servers, aborting')
+
+ servers = clc.v2.Servers(server_ids).Servers()
+ for server in servers:
+ if not module.check_mode:
+ request_list.append(server.Delete())
+ ClcServer._wait_for_requests(module, request_list)
+
+ for server in servers:
+ terminated_server_ids.append(server.id)
+
+ return True, server_dict_array, terminated_server_ids
+
+ @staticmethod
+ def _start_stop_servers(module, clc, server_ids):
+ """
+ Start or Stop the servers on the provided list
+ :param module: the AnsibleModule object
+ :param clc: the clc-sdk instance to use
+ :param server_ids: list of servers to start or stop
+ :return: a list of dictionaries with server information about the servers that were started or stopped
+ """
+ p = module.params
+ state = p.get('state')
+ changed = False
+ changed_servers = []
+ server_dict_array = []
+ result_server_ids = []
+ request_list = []
+
+ if not isinstance(server_ids, list) or len(server_ids) < 1:
+ return module.fail_json(
+ msg='server_ids should be a list of servers, aborting')
+
+ servers = clc.v2.Servers(server_ids).Servers()
+ for server in servers:
+ if server.powerState != state:
+ changed_servers.append(server)
+ if not module.check_mode:
+ request_list.append(
+ ClcServer._change_server_power_state(
+ module,
+ server,
+ state))
+ changed = True
+
+ ClcServer._wait_for_requests(module, request_list)
+ ClcServer._refresh_servers(module, changed_servers)
+
+ for server in set(changed_servers + servers):
+ try:
+ server.data['ipaddress'] = server.details[
+ 'ipAddresses'][0]['internal']
+ server.data['publicip'] = str(
+ server.PublicIPs().public_ips[0])
+ except (KeyError, IndexError):
+ pass
+
+ server_dict_array.append(server.data)
+ result_server_ids.append(server.id)
+
+ return changed, server_dict_array, result_server_ids
+
+ @staticmethod
+ def _change_server_power_state(module, server, state):
+ """
+ Change the server powerState
+ :param module: the module to check for intended state
+ :param server: the server to start or stop
+ :param state: the intended powerState for the server
+ :return: the request object from clc-sdk call
+ """
+ result = None
+ try:
+ if state == 'started':
+ result = server.PowerOn()
+ else:
+ # Try to shut down the server and fall back to power off when unable to shut down.
+ result = server.ShutDown()
+ if result and hasattr(result, 'requests') and result.requests[0]:
+ return result
+ else:
+ result = server.PowerOff()
+ except CLCException:
+ module.fail_json(
+ msg='Unable to change power state for server {0}'.format(
+ server.id))
+ return result
+
+ @staticmethod
+ def _find_running_servers_by_group(module, datacenter, count_group):
+ """
+ Find a list of running servers in the provided group
+ :param module: the AnsibleModule object
+ :param datacenter: the clc-sdk.Datacenter instance to use to lookup the group
+ :param count_group: the group to count the servers
+ :return: list of servers, and list of running servers
+ """
+ group = ClcServer._find_group(
+ module=module,
+ datacenter=datacenter,
+ lookup_group=count_group)
+
+ servers = group.Servers().Servers()
+ running_servers = []
+
+ for server in servers:
+ if server.status == 'active' and server.powerState == 'started':
+ running_servers.append(server)
+
+ return servers, running_servers
+
+ @staticmethod
+ def _find_group(module, datacenter, lookup_group=None):
+ """
+ Find a server group in a datacenter by calling the CLC API
+ :param module: the AnsibleModule instance
+ :param datacenter: clc-sdk.Datacenter instance to search for the group
+ :param lookup_group: string name of the group to search for
+ :return: clc-sdk.Group instance
+ """
+ if not lookup_group:
+ lookup_group = module.params.get('group')
+ try:
+ return datacenter.Groups().Get(lookup_group)
+ except CLCException:
+ pass
+
+ # The search above only acts on the main
+ result = ClcServer._find_group_recursive(
+ module,
+ datacenter.Groups(),
+ lookup_group)
+
+ if result is None:
+ module.fail_json(
+ msg=str(
+ "Unable to find group: " +
+ lookup_group +
+ " in location: " +
+ datacenter.id))
+
+ return result
+
+ @staticmethod
+ def _find_group_recursive(module, group_list, lookup_group):
+ """
+ Find a server group by recursively walking the tree
+ :param module: the AnsibleModule instance to use
+ :param group_list: a list of groups to search
+ :param lookup_group: the group to look for
+ :return: list of groups
+ """
+ result = None
+ for group in group_list.groups:
+ subgroups = group.Subgroups()
+ try:
+ return subgroups.Get(lookup_group)
+ except CLCException:
+ result = ClcServer._find_group_recursive(
+ module,
+ subgroups,
+ lookup_group)
+
+ if result is not None:
+ break
+
+ return result
+
+ @staticmethod
+ def _create_clc_server(
+ clc,
+ module,
+ server_params):
+ """
+ Call the CLC Rest API to Create a Server
+ :param clc: the clc-python-sdk instance to use
+ :param module: the AnsibleModule instance to use
+ :param server_params: a dictionary of params to use to create the servers
+ :return: clc-sdk.Request object linked to the queued server request
+ """
+
+ try:
+ res = clc.v2.API.Call(
+ method='POST',
+ url='servers/%s' %
+ (server_params.get('alias')),
+ payload=json.dumps(
+ {
+ 'name': server_params.get('name'),
+ 'description': server_params.get('description'),
+ 'groupId': server_params.get('group_id'),
+ 'sourceServerId': server_params.get('template'),
+ 'isManagedOS': server_params.get('managed_os'),
+ 'primaryDNS': server_params.get('primary_dns'),
+ 'secondaryDNS': server_params.get('secondary_dns'),
+ 'networkId': server_params.get('network_id'),
+ 'ipAddress': server_params.get('ip_address'),
+ 'password': server_params.get('password'),
+ 'sourceServerPassword': server_params.get('source_server_password'),
+ 'cpu': server_params.get('cpu'),
+ 'cpuAutoscalePolicyId': server_params.get('cpu_autoscale_policy_id'),
+ 'memoryGB': server_params.get('memory'),
+ 'type': server_params.get('type'),
+ 'storageType': server_params.get('storage_type'),
+ 'antiAffinityPolicyId': server_params.get('anti_affinity_policy_id'),
+ 'customFields': server_params.get('custom_fields'),
+ 'additionalDisks': server_params.get('additional_disks'),
+ 'ttl': server_params.get('ttl'),
+ 'packages': server_params.get('packages'),
+ 'configurationId': server_params.get('configuration_id'),
+ 'osType': server_params.get('os_type')}))
+
+ result = clc.v2.Requests(res)
+ except APIFailedResponse as ex:
+ return module.fail_json(msg='Unable to create the server: {0}. {1}'.format(
+ server_params.get('name'),
+ ex.response_text
+ ))
+
+ #
+ # Patch the Request object so that it returns a valid server
+
+ # Find the server's UUID from the API response
+ server_uuid = [obj['id']
+ for obj in res['links'] if obj['rel'] == 'self'][0]
+
+ # Change the request server method to a _find_server_by_uuid closure so
+ # that it will work
+ result.requests[0].Server = lambda: ClcServer._find_server_by_uuid_w_retry(
+ clc,
+ module,
+ server_uuid,
+ server_params.get('alias'))
+
+ return result
+
+ @staticmethod
+ def _get_anti_affinity_policy_id(clc, module, alias, aa_policy_name):
+ """
+ retrieves the anti affinity policy id of the server based on the name of the policy
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param alias: the CLC account alias
+ :param aa_policy_name: the anti affinity policy name
+ :return: aa_policy_id: The anti affinity policy id
+ """
+ aa_policy_id = None
+ try:
+ aa_policies = clc.v2.API.Call(method='GET',
+ url='antiAffinityPolicies/%s' % alias)
+ except APIFailedResponse as ex:
+ return module.fail_json(msg='Unable to fetch anti affinity policies for account: {0}. {1}'.format(
+ alias, ex.response_text))
+ for aa_policy in aa_policies.get('items'):
+ if aa_policy.get('name') == aa_policy_name:
+ if not aa_policy_id:
+ aa_policy_id = aa_policy.get('id')
+ else:
+ return module.fail_json(
+ msg='multiple anti affinity policies were found with policy name : %s' % aa_policy_name)
+ return aa_policy_id
+
+ #
+ # This is the function that gets patched to the Request.server object using a lamda closure
+ #
+
+ @staticmethod
+ def _find_server_by_uuid_w_retry(
+ clc, module, svr_uuid, alias=None, retries=5, back_out=2):
+ """
+ Find the clc server by the UUID returned from the provisioning request. Retry the request if a 404 is returned.
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param svr_uuid: UUID of the server
+ :param retries: the number of retry attempts to make prior to fail. default is 5
+ :param alias: the Account Alias to search
+ :return: a clc-sdk.Server instance
+ """
+ if not alias:
+ alias = clc.v2.Account.GetAlias()
+
+ # Wait and retry if the api returns a 404
+ while True:
+ retries -= 1
+ try:
+ server_obj = clc.v2.API.Call(
+ method='GET', url='servers/%s/%s?uuid=true' %
+ (alias, svr_uuid))
+ server_id = server_obj['id']
+ server = clc.v2.Server(
+ id=server_id,
+ alias=alias,
+ server_obj=server_obj)
+ return server
+
+ except APIFailedResponse as e:
+ if e.response_status_code != 404:
+ return module.fail_json(
+ msg='A failure response was received from CLC API when '
+ 'attempting to get details for a server: UUID=%s, Code=%i, Message=%s' %
+ (svr_uuid, e.response_status_code, e.message))
+ if retries == 0:
+ return module.fail_json(
+ msg='Unable to reach the CLC API after 5 attempts')
+ time.sleep(back_out)
+ back_out *= 2
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ argument_dict = ClcServer._define_module_argument_spec()
+ module = AnsibleModule(supports_check_mode=True, **argument_dict)
+ clc_server = ClcServer(module)
+ clc_server.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_server_snapshot.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_server_snapshot.py
new file mode 100644
index 00000000..1d289f66
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_server_snapshot.py
@@ -0,0 +1,409 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 CenturyLink
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_server_snapshot
+short_description: Create, Delete and Restore server snapshots in CenturyLink Cloud.
+description:
+ - An Ansible module to Create, Delete and Restore server snapshots in CenturyLink Cloud.
+options:
+ server_ids:
+ description:
+ - The list of CLC server Ids.
+ type: list
+ required: True
+ expiration_days:
+ description:
+ - The number of days to keep the server snapshot before it expires.
+ type: int
+ default: 7
+ required: False
+ state:
+ description:
+ - The state to insure that the provided resources are in.
+ type: str
+ default: 'present'
+ required: False
+ choices: ['present', 'absent', 'restore']
+ wait:
+ description:
+ - Whether to wait for the provisioning tasks to finish before returning.
+ default: True
+ required: False
+ type: str
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+- name: Create server snapshot
+ community.general.clc_server_snapshot:
+ server_ids:
+ - UC1TEST-SVR01
+ - UC1TEST-SVR02
+ expiration_days: 10
+ wait: True
+ state: present
+
+- name: Restore server snapshot
+ community.general.clc_server_snapshot:
+ server_ids:
+ - UC1TEST-SVR01
+ - UC1TEST-SVR02
+ wait: True
+ state: restore
+
+- name: Delete server snapshot
+ community.general.clc_server_snapshot:
+ server_ids:
+ - UC1TEST-SVR01
+ - UC1TEST-SVR02
+ wait: True
+ state: absent
+'''
+
+RETURN = '''
+server_ids:
+ description: The list of server ids that are changed
+ returned: success
+ type: list
+ sample:
+ [
+ "UC1TEST-SVR01",
+ "UC1TEST-SVR02"
+ ]
+'''
+
+__version__ = '${version}'
+
+import os
+import traceback
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcSnapshot:
+
+ clc = clc_sdk
+ module = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.module = module
+
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(
+ requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ p = self.module.params
+ server_ids = p['server_ids']
+ expiration_days = p['expiration_days']
+ state = p['state']
+ request_list = []
+ changed = False
+ changed_servers = []
+
+ self._set_clc_credentials_from_env()
+ if state == 'present':
+ changed, request_list, changed_servers = self.ensure_server_snapshot_present(
+ server_ids=server_ids,
+ expiration_days=expiration_days)
+ elif state == 'absent':
+ changed, request_list, changed_servers = self.ensure_server_snapshot_absent(
+ server_ids=server_ids)
+ elif state == 'restore':
+ changed, request_list, changed_servers = self.ensure_server_snapshot_restore(
+ server_ids=server_ids)
+
+ self._wait_for_requests_to_complete(request_list)
+ return self.module.exit_json(
+ changed=changed,
+ server_ids=changed_servers)
+
+ def ensure_server_snapshot_present(self, server_ids, expiration_days):
+ """
+ Ensures the given set of server_ids have the snapshots created
+ :param server_ids: The list of server_ids to create the snapshot
+ :param expiration_days: The number of days to keep the snapshot
+ :return: (changed, request_list, changed_servers)
+ changed: A flag indicating whether any change was made
+ request_list: the list of clc request objects from CLC API call
+ changed_servers: The list of servers ids that are modified
+ """
+ request_list = []
+ changed = False
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to obtain server list from the CLC API')
+ servers_to_change = [
+ server for server in servers if len(
+ server.GetSnapshots()) == 0]
+ for server in servers_to_change:
+ changed = True
+ if not self.module.check_mode:
+ request = self._create_server_snapshot(server, expiration_days)
+ request_list.append(request)
+ changed_servers = [
+ server.id for server in servers_to_change if server.id]
+ return changed, request_list, changed_servers
+
+ def _create_server_snapshot(self, server, expiration_days):
+ """
+ Create the snapshot for the CLC server
+ :param server: the CLC server object
+ :param expiration_days: The number of days to keep the snapshot
+ :return: the create request object from CLC API Call
+ """
+ result = None
+ try:
+ result = server.CreateSnapshot(
+ delete_existing=True,
+ expiration_days=expiration_days)
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to create snapshot for server : {0}. {1}'.format(
+ server.id, ex.response_text
+ ))
+ return result
+
+ def ensure_server_snapshot_absent(self, server_ids):
+ """
+ Ensures the given set of server_ids have the snapshots removed
+ :param server_ids: The list of server_ids to delete the snapshot
+ :return: (changed, request_list, changed_servers)
+ changed: A flag indicating whether any change was made
+ request_list: the list of clc request objects from CLC API call
+ changed_servers: The list of servers ids that are modified
+ """
+ request_list = []
+ changed = False
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to obtain server list from the CLC API')
+ servers_to_change = [
+ server for server in servers if len(
+ server.GetSnapshots()) > 0]
+ for server in servers_to_change:
+ changed = True
+ if not self.module.check_mode:
+ request = self._delete_server_snapshot(server)
+ request_list.append(request)
+ changed_servers = [
+ server.id for server in servers_to_change if server.id]
+ return changed, request_list, changed_servers
+
+ def _delete_server_snapshot(self, server):
+ """
+ Delete snapshot for the CLC server
+ :param server: the CLC server object
+ :return: the delete snapshot request object from CLC API
+ """
+ result = None
+ try:
+ result = server.DeleteSnapshot()
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to delete snapshot for server : {0}. {1}'.format(
+ server.id, ex.response_text
+ ))
+ return result
+
+ def ensure_server_snapshot_restore(self, server_ids):
+ """
+ Ensures the given set of server_ids have the snapshots restored
+ :param server_ids: The list of server_ids to delete the snapshot
+ :return: (changed, request_list, changed_servers)
+ changed: A flag indicating whether any change was made
+ request_list: the list of clc request objects from CLC API call
+ changed_servers: The list of servers ids that are modified
+ """
+ request_list = []
+ changed = False
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to obtain server list from the CLC API')
+ servers_to_change = [
+ server for server in servers if len(
+ server.GetSnapshots()) > 0]
+ for server in servers_to_change:
+ changed = True
+ if not self.module.check_mode:
+ request = self._restore_server_snapshot(server)
+ request_list.append(request)
+ changed_servers = [
+ server.id for server in servers_to_change if server.id]
+ return changed, request_list, changed_servers
+
+ def _restore_server_snapshot(self, server):
+ """
+ Restore snapshot for the CLC server
+ :param server: the CLC server object
+ :return: the restore snapshot request object from CLC API
+ """
+ result = None
+ try:
+ result = server.RestoreSnapshot()
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to restore snapshot for server : {0}. {1}'.format(
+ server.id, ex.response_text
+ ))
+ return result
+
+ def _wait_for_requests_to_complete(self, requests_lst):
+ """
+ Waits until the CLC requests are complete if the wait argument is True
+ :param requests_lst: The list of CLC request objects
+ :return: none
+ """
+ if not self.module.params['wait']:
+ return
+ for request in requests_lst:
+ request.WaitUntilComplete()
+ for request_details in request.requests:
+ if request_details.Status() != 'succeeded':
+ self.module.fail_json(
+ msg='Unable to process server snapshot request')
+
+ @staticmethod
+ def define_argument_spec():
+ """
+ This function defines the dictionary object required for
+ package module
+ :return: the package dictionary object
+ """
+ argument_spec = dict(
+ server_ids=dict(type='list', required=True),
+ expiration_days=dict(default=7, type='int'),
+ wait=dict(default=True),
+ state=dict(
+ default='present',
+ choices=[
+ 'present',
+ 'absent',
+ 'restore']),
+ )
+ return argument_spec
+
+ def _get_servers_from_clc(self, server_list, message):
+ """
+ Internal function to fetch list of CLC server objects from a list of server ids
+ :param server_list: The list of server ids
+ :param message: The error message to throw in case of any error
+ :return the list of CLC server objects
+ """
+ try:
+ return self.clc.v2.Servers(server_list).servers
+ except CLCException as ex:
+ return self.module.fail_json(msg=message + ': %s' % ex)
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ Main function
+ :return: None
+ """
+ module = AnsibleModule(
+ argument_spec=ClcSnapshot.define_argument_spec(),
+ supports_check_mode=True
+ )
+ clc_snapshot = ClcSnapshot(module)
+ clc_snapshot.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/dimensiondata/dimensiondata_network.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/dimensiondata/dimensiondata_network.py
new file mode 100644
index 00000000..2187ceaa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/dimensiondata/dimensiondata_network.py
@@ -0,0 +1,296 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Dimension Data
+# Authors:
+# - Aimon Bustardo <aimon.bustardo@dimensiondata.com>
+# - Bert Diwa <Lamberto.Diwa@dimensiondata.com>
+# - Adam Friedman <tintoy@tintoy.io>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: dimensiondata_network
+short_description: Create, update, and delete MCP 1.0 & 2.0 networks
+extends_documentation_fragment:
+- community.general.dimensiondata
+- community.general.dimensiondata_wait
+
+description:
+ - Create, update, and delete MCP 1.0 & 2.0 networks
+author: 'Aimon Bustardo (@aimonb)'
+options:
+ name:
+ description:
+ - The name of the network domain to create.
+ required: true
+ type: str
+ description:
+ description:
+ - Additional description of the network domain.
+ required: false
+ type: str
+ service_plan:
+ description:
+ - The service plan, either "ESSENTIALS" or "ADVANCED".
+ - MCP 2.0 Only.
+ choices: [ESSENTIALS, ADVANCED]
+ default: ESSENTIALS
+ type: str
+ state:
+ description:
+ - Should the resource be present or absent.
+ choices: [present, absent]
+ default: present
+ type: str
+'''
+
+EXAMPLES = '''
+- name: Create an MCP 1.0 network
+ community.general.dimensiondata_network:
+ region: na
+ location: NA5
+ name: mynet
+
+- name: Create an MCP 2.0 network
+ community.general.dimensiondata_network:
+ region: na
+ mcp_user: my_user
+ mcp_password: my_password
+ location: NA9
+ name: mynet
+ service_plan: ADVANCED
+
+- name: Delete a network
+ community.general.dimensiondata_network:
+ region: na
+ location: NA1
+ name: mynet
+ state: absent
+'''
+
+RETURN = '''
+network:
+ description: Dictionary describing the network.
+ returned: On success when I(state=present).
+ type: complex
+ contains:
+ id:
+ description: Network ID.
+ type: str
+ sample: "8c787000-a000-4050-a215-280893411a7d"
+ name:
+ description: Network name.
+ type: str
+ sample: "My network"
+ description:
+ description: Network description.
+ type: str
+ sample: "My network description"
+ location:
+ description: Datacenter location.
+ type: str
+ sample: NA3
+ status:
+ description: Network status. (MCP 2.0 only)
+ type: str
+ sample: NORMAL
+ private_net:
+ description: Private network subnet. (MCP 1.0 only)
+ type: str
+ sample: "10.2.3.0"
+ multicast:
+ description: Multicast enabled? (MCP 1.0 only)
+ type: bool
+ sample: false
+'''
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.dimensiondata import HAS_LIBCLOUD, DimensionDataModule
+from ansible.module_utils._text import to_native
+
+if HAS_LIBCLOUD:
+ from libcloud.compute.base import NodeLocation
+ from libcloud.common.dimensiondata import DimensionDataAPIException
+
+
+class DimensionDataNetworkModule(DimensionDataModule):
+ """
+ The dimensiondata_network module for Ansible.
+ """
+
+ def __init__(self):
+ """
+ Create a new Dimension Data network module.
+ """
+
+ super(DimensionDataNetworkModule, self).__init__(
+ module=AnsibleModule(
+ argument_spec=DimensionDataModule.argument_spec_with_wait(
+ name=dict(type='str', required=True),
+ description=dict(type='str', required=False),
+ service_plan=dict(default='ESSENTIALS', choices=['ADVANCED', 'ESSENTIALS']),
+ state=dict(default='present', choices=['present', 'absent'])
+ ),
+ required_together=DimensionDataModule.required_together()
+ )
+ )
+
+ self.name = self.module.params['name']
+ self.description = self.module.params['description']
+ self.service_plan = self.module.params['service_plan']
+ self.state = self.module.params['state']
+
+ def state_present(self):
+ network = self._get_network()
+
+ if network:
+ self.module.exit_json(
+ changed=False,
+ msg='Network already exists',
+ network=self._network_to_dict(network)
+ )
+
+ network = self._create_network()
+
+ self.module.exit_json(
+ changed=True,
+ msg='Created network "%s" in datacenter "%s".' % (self.name, self.location),
+ network=self._network_to_dict(network)
+ )
+
+ def state_absent(self):
+ network = self._get_network()
+
+ if not network:
+ self.module.exit_json(
+ changed=False,
+ msg='Network "%s" does not exist' % self.name,
+ network=self._network_to_dict(network)
+ )
+
+ self._delete_network(network)
+
+ def _get_network(self):
+ if self.mcp_version == '1.0':
+ networks = self.driver.list_networks(location=self.location)
+ else:
+ networks = self.driver.ex_list_network_domains(location=self.location)
+
+ matched_network = [network for network in networks if network.name == self.name]
+ if matched_network:
+ return matched_network[0]
+
+ return None
+
+ def _network_to_dict(self, network):
+ network_dict = dict(
+ id=network.id,
+ name=network.name,
+ description=network.description
+ )
+
+ if isinstance(network.location, NodeLocation):
+ network_dict['location'] = network.location.id
+ else:
+ network_dict['location'] = network.location
+
+ if self.mcp_version == '1.0':
+ network_dict['private_net'] = network.private_net
+ network_dict['multicast'] = network.multicast
+ network_dict['status'] = None
+ else:
+ network_dict['private_net'] = None
+ network_dict['multicast'] = None
+ network_dict['status'] = network.status
+
+ return network_dict
+
+ def _create_network(self):
+
+ # Make sure service_plan argument is defined
+ if self.mcp_version == '2.0' and 'service_plan' not in self.module.params:
+ self.module.fail_json(
+ msg='service_plan required when creating network and location is MCP 2.0'
+ )
+
+ # Create network
+ try:
+ if self.mcp_version == '1.0':
+ network = self.driver.ex_create_network(
+ self.location,
+ self.name,
+ description=self.description
+ )
+ else:
+ network = self.driver.ex_create_network_domain(
+ self.location,
+ self.name,
+ self.module.params['service_plan'],
+ description=self.description
+ )
+ except DimensionDataAPIException as e:
+
+ self.module.fail_json(
+ msg="Failed to create new network: %s" % to_native(e), exception=traceback.format_exc()
+ )
+
+ if self.module.params['wait'] is True:
+ network = self._wait_for_network_state(network.id, 'NORMAL')
+
+ return network
+
+ def _delete_network(self, network):
+ try:
+ if self.mcp_version == '1.0':
+ deleted = self.driver.ex_delete_network(network)
+ else:
+ deleted = self.driver.ex_delete_network_domain(network)
+
+ if deleted:
+ self.module.exit_json(
+ changed=True,
+ msg="Deleted network with id %s" % network.id
+ )
+
+ self.module.fail_json(
+ "Unexpected failure deleting network with id %s", network.id
+ )
+
+ except DimensionDataAPIException as e:
+ self.module.fail_json(
+ msg="Failed to delete network: %s" % to_native(e), exception=traceback.format_exc()
+ )
+
+ def _wait_for_network_state(self, net_id, state_to_wait_for):
+ try:
+ return self.driver.connection.wait_for_state(
+ state_to_wait_for,
+ self.driver.ex_get_network_domain,
+ self.module.params['wait_poll_interval'],
+ self.module.params['wait_time'],
+ net_id
+ )
+ except DimensionDataAPIException as e:
+ self.module.fail_json(
+ msg='Network did not reach % state in time: %s' % (state_to_wait_for, to_native(e)),
+ exception=traceback.format_exc()
+ )
+
+
+def main():
+ module = DimensionDataNetworkModule()
+ if module.state == 'present':
+ module.state_present()
+ elif module.state == 'absent':
+ module.state_absent()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/dimensiondata/dimensiondata_vlan.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/dimensiondata/dimensiondata_vlan.py
new file mode 100644
index 00000000..26c621f4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/dimensiondata/dimensiondata_vlan.py
@@ -0,0 +1,568 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Dimension Data
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# - Adam Friedman <tintoy@tintoy.io>
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: dimensiondata_vlan
+short_description: Manage a VLAN in a Cloud Control network domain.
+extends_documentation_fragment:
+- community.general.dimensiondata
+- community.general.dimensiondata_wait
+
+description:
+ - Manage VLANs in Cloud Control network domains.
+author: 'Adam Friedman (@tintoy)'
+options:
+ name:
+ description:
+ - The name of the target VLAN.
+ type: str
+ required: true
+ description:
+ description:
+ - A description of the VLAN.
+ type: str
+ network_domain:
+ description:
+ - The Id or name of the target network domain.
+ required: true
+ type: str
+ private_ipv4_base_address:
+ description:
+ - The base address for the VLAN's IPv4 network (e.g. 192.168.1.0).
+ type: str
+ private_ipv4_prefix_size:
+ description:
+ - The size of the IPv4 address space, e.g 24.
+ - Required, if C(private_ipv4_base_address) is specified.
+ type: int
+ state:
+ description:
+ - The desired state for the target VLAN.
+ - C(readonly) ensures that the state is only ever read, not modified (the module will fail if the resource does not exist).
+ choices: [present, absent, readonly]
+ default: present
+ type: str
+ allow_expand:
+ description:
+ - Permit expansion of the target VLAN's network if the module parameters specify a larger network than the VLAN currently possesses.
+ - If C(False), the module will fail under these conditions.
+ - This is intended to prevent accidental expansion of a VLAN's network (since this operation is not reversible).
+ type: bool
+ default: 'no'
+'''
+
+EXAMPLES = '''
+- name: Add or update VLAN
+ community.general.dimensiondata_vlan:
+ region: na
+ location: NA5
+ network_domain: test_network
+ name: my_vlan1
+ description: A test VLAN
+ private_ipv4_base_address: 192.168.23.0
+ private_ipv4_prefix_size: 24
+ state: present
+ wait: yes
+
+- name: Read / get VLAN details
+ community.general.dimensiondata_vlan:
+ region: na
+ location: NA5
+ network_domain: test_network
+ name: my_vlan1
+ state: readonly
+ wait: yes
+
+- name: Delete a VLAN
+ community.general.dimensiondata_vlan:
+ region: na
+ location: NA5
+ network_domain: test_network
+ name: my_vlan_1
+ state: absent
+ wait: yes
+'''
+
+RETURN = '''
+vlan:
+ description: Dictionary describing the VLAN.
+ returned: On success when I(state) is 'present'
+ type: complex
+ contains:
+ id:
+ description: VLAN ID.
+ type: str
+ sample: "aaaaa000-a000-4050-a215-2808934ccccc"
+ name:
+ description: VLAN name.
+ type: str
+ sample: "My VLAN"
+ description:
+ description: VLAN description.
+ type: str
+ sample: "My VLAN description"
+ location:
+ description: Datacenter location.
+ type: str
+ sample: NA3
+ private_ipv4_base_address:
+ description: The base address for the VLAN's private IPV4 network.
+ type: str
+ sample: 192.168.23.0
+ private_ipv4_prefix_size:
+ description: The prefix size for the VLAN's private IPV4 network.
+ type: int
+ sample: 24
+ private_ipv4_gateway_address:
+ description: The gateway address for the VLAN's private IPV4 network.
+ type: str
+ sample: 192.168.23.1
+ private_ipv6_base_address:
+ description: The base address for the VLAN's IPV6 network.
+ type: str
+ sample: 2402:9900:111:1195:0:0:0:0
+ private_ipv6_prefix_size:
+ description: The prefix size for the VLAN's IPV6 network.
+ type: int
+ sample: 64
+ private_ipv6_gateway_address:
+ description: The gateway address for the VLAN's IPV6 network.
+ type: str
+ sample: 2402:9900:111:1195:0:0:0:1
+ status:
+ description: VLAN status.
+ type: str
+ sample: NORMAL
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.dimensiondata import DimensionDataModule, UnknownNetworkError
+
+try:
+ from libcloud.common.dimensiondata import DimensionDataVlan, DimensionDataAPIException
+
+ HAS_LIBCLOUD = True
+
+except ImportError:
+ DimensionDataVlan = None
+
+ HAS_LIBCLOUD = False
+
+
+class DimensionDataVlanModule(DimensionDataModule):
+ """
+ The dimensiondata_vlan module for Ansible.
+ """
+
+ def __init__(self):
+ """
+ Create a new Dimension Data VLAN module.
+ """
+
+ super(DimensionDataVlanModule, self).__init__(
+ module=AnsibleModule(
+ argument_spec=DimensionDataModule.argument_spec_with_wait(
+ name=dict(required=True, type='str'),
+ description=dict(default='', type='str'),
+ network_domain=dict(required=True, type='str'),
+ private_ipv4_base_address=dict(default='', type='str'),
+ private_ipv4_prefix_size=dict(default=0, type='int'),
+ allow_expand=dict(required=False, default=False, type='bool'),
+ state=dict(default='present', choices=['present', 'absent', 'readonly'])
+ ),
+ required_together=DimensionDataModule.required_together()
+ )
+ )
+
+ self.name = self.module.params['name']
+ self.description = self.module.params['description']
+ self.network_domain_selector = self.module.params['network_domain']
+ self.private_ipv4_base_address = self.module.params['private_ipv4_base_address']
+ self.private_ipv4_prefix_size = self.module.params['private_ipv4_prefix_size']
+ self.state = self.module.params['state']
+ self.allow_expand = self.module.params['allow_expand']
+
+ if self.wait and self.state != 'present':
+ self.module.fail_json(
+ msg='The wait parameter is only supported when state is "present".'
+ )
+
+ def state_present(self):
+ """
+ Ensure that the target VLAN is present.
+ """
+
+ network_domain = self._get_network_domain()
+
+ vlan = self._get_vlan(network_domain)
+ if not vlan:
+ if self.module.check_mode:
+ self.module.exit_json(
+ msg='VLAN "{0}" is absent from network domain "{1}" (should be present).'.format(
+ self.name, self.network_domain_selector
+ ),
+ changed=True
+ )
+
+ vlan = self._create_vlan(network_domain)
+ self.module.exit_json(
+ msg='Created VLAN "{0}" in network domain "{1}".'.format(
+ self.name, self.network_domain_selector
+ ),
+ vlan=vlan_to_dict(vlan),
+ changed=True
+ )
+ else:
+ diff = VlanDiff(vlan, self.module.params)
+ if not diff.has_changes():
+ self.module.exit_json(
+ msg='VLAN "{0}" is present in network domain "{1}" (no changes detected).'.format(
+ self.name, self.network_domain_selector
+ ),
+ vlan=vlan_to_dict(vlan),
+ changed=False
+ )
+
+ return
+
+ try:
+ diff.ensure_legal_change()
+ except InvalidVlanChangeError as invalid_vlan_change:
+ self.module.fail_json(
+ msg='Unable to update VLAN "{0}" in network domain "{1}": {2}'.format(
+ self.name, self.network_domain_selector, invalid_vlan_change
+ )
+ )
+
+ if diff.needs_expand() and not self.allow_expand:
+ self.module.fail_json(
+ msg='The configured private IPv4 network size ({0}-bit prefix) for '.format(
+ self.private_ipv4_prefix_size
+ ) + 'the VLAN differs from its current network size ({0}-bit prefix) '.format(
+ vlan.private_ipv4_range_size
+ ) + 'and needs to be expanded. Use allow_expand=true if this is what you want.'
+ )
+
+ if self.module.check_mode:
+ self.module.exit_json(
+ msg='VLAN "{0}" is present in network domain "{1}" (changes detected).'.format(
+ self.name, self.network_domain_selector
+ ),
+ vlan=vlan_to_dict(vlan),
+ changed=True
+ )
+
+ if diff.needs_edit():
+ vlan.name = self.name
+ vlan.description = self.description
+
+ self.driver.ex_update_vlan(vlan)
+
+ if diff.needs_expand():
+ vlan.private_ipv4_range_size = self.private_ipv4_prefix_size
+ self.driver.ex_expand_vlan(vlan)
+
+ self.module.exit_json(
+ msg='Updated VLAN "{0}" in network domain "{1}".'.format(
+ self.name, self.network_domain_selector
+ ),
+ vlan=vlan_to_dict(vlan),
+ changed=True
+ )
+
+ def state_readonly(self):
+ """
+ Read the target VLAN's state.
+ """
+
+ network_domain = self._get_network_domain()
+
+ vlan = self._get_vlan(network_domain)
+ if vlan:
+ self.module.exit_json(
+ vlan=vlan_to_dict(vlan),
+ changed=False
+ )
+ else:
+ self.module.fail_json(
+ msg='VLAN "{0}" does not exist in network domain "{1}".'.format(
+ self.name, self.network_domain_selector
+ )
+ )
+
+ def state_absent(self):
+ """
+ Ensure that the target VLAN is not present.
+ """
+
+ network_domain = self._get_network_domain()
+
+ vlan = self._get_vlan(network_domain)
+ if not vlan:
+ self.module.exit_json(
+ msg='VLAN "{0}" is absent from network domain "{1}".'.format(
+ self.name, self.network_domain_selector
+ ),
+ changed=False
+ )
+
+ return
+
+ if self.module.check_mode:
+ self.module.exit_json(
+ msg='VLAN "{0}" is present in network domain "{1}" (should be absent).'.format(
+ self.name, self.network_domain_selector
+ ),
+ vlan=vlan_to_dict(vlan),
+ changed=True
+ )
+
+ self._delete_vlan(vlan)
+
+ self.module.exit_json(
+ msg='Deleted VLAN "{0}" from network domain "{1}".'.format(
+ self.name, self.network_domain_selector
+ ),
+ changed=True
+ )
+
+ def _get_vlan(self, network_domain):
+ """
+ Retrieve the target VLAN details from CloudControl.
+
+ :param network_domain: The target network domain.
+ :return: The VLAN, or None if the target VLAN was not found.
+ :rtype: DimensionDataVlan
+ """
+
+ vlans = self.driver.ex_list_vlans(
+ location=self.location,
+ network_domain=network_domain
+ )
+ matching_vlans = [vlan for vlan in vlans if vlan.name == self.name]
+ if matching_vlans:
+ return matching_vlans[0]
+
+ return None
+
+ def _create_vlan(self, network_domain):
+ vlan = self.driver.ex_create_vlan(
+ network_domain,
+ self.name,
+ self.private_ipv4_base_address,
+ self.description,
+ self.private_ipv4_prefix_size
+ )
+
+ if self.wait:
+ vlan = self._wait_for_vlan_state(vlan.id, 'NORMAL')
+
+ return vlan
+
+ def _delete_vlan(self, vlan):
+ try:
+ self.driver.ex_delete_vlan(vlan)
+
+ # Not currently supported for deletes due to a bug in libcloud (module will error out if "wait" is specified when "state" is not "present").
+ if self.wait:
+ self._wait_for_vlan_state(vlan, 'NOT_FOUND')
+
+ except DimensionDataAPIException as api_exception:
+ self.module.fail_json(
+ msg='Failed to delete VLAN "{0}" due to unexpected error from the CloudControl API: {1}'.format(
+ vlan.id, api_exception.msg
+ )
+ )
+
+ def _wait_for_vlan_state(self, vlan, state_to_wait_for):
+ network_domain = self._get_network_domain()
+
+ wait_poll_interval = self.module.params['wait_poll_interval']
+ wait_time = self.module.params['wait_time']
+
+ # Bizarre bug in libcloud when checking status after delete; socket.error is too generic to catch in this context so for now we don't even try.
+
+ try:
+ return self.driver.connection.wait_for_state(
+ state_to_wait_for,
+ self.driver.ex_get_vlan,
+ wait_poll_interval,
+ wait_time,
+ vlan
+ )
+
+ except DimensionDataAPIException as api_exception:
+ if api_exception.code != 'RESOURCE_NOT_FOUND':
+ raise
+
+ return DimensionDataVlan(
+ id=vlan.id,
+ status='NOT_FOUND',
+ name='',
+ description='',
+ private_ipv4_range_address='',
+ private_ipv4_range_size=0,
+ ipv4_gateway='',
+ ipv6_range_address='',
+ ipv6_range_size=0,
+ ipv6_gateway='',
+ location=self.location,
+ network_domain=network_domain
+ )
+
+ def _get_network_domain(self):
+ """
+ Retrieve the target network domain from the Cloud Control API.
+
+ :return: The network domain.
+ """
+
+ try:
+ return self.get_network_domain(
+ self.network_domain_selector, self.location
+ )
+ except UnknownNetworkError:
+ self.module.fail_json(
+ msg='Cannot find network domain "{0}" in datacenter "{1}".'.format(
+ self.network_domain_selector, self.location
+ )
+ )
+
+ return None
+
+
+class InvalidVlanChangeError(Exception):
+ """
+ Error raised when an illegal change to VLAN state is attempted.
+ """
+
+ pass
+
+
+class VlanDiff(object):
+ """
+ Represents differences between VLAN information (from CloudControl) and module parameters.
+ """
+
+ def __init__(self, vlan, module_params):
+ """
+
+ :param vlan: The VLAN information from CloudControl.
+ :type vlan: DimensionDataVlan
+ :param module_params: The module parameters.
+ :type module_params: dict
+ """
+
+ self.vlan = vlan
+ self.module_params = module_params
+
+ self.name_changed = module_params['name'] != vlan.name
+ self.description_changed = module_params['description'] != vlan.description
+ self.private_ipv4_base_address_changed = module_params['private_ipv4_base_address'] != vlan.private_ipv4_range_address
+ self.private_ipv4_prefix_size_changed = module_params['private_ipv4_prefix_size'] != vlan.private_ipv4_range_size
+
+ # Is configured prefix size greater than or less than the actual prefix size?
+ private_ipv4_prefix_size_difference = module_params['private_ipv4_prefix_size'] - vlan.private_ipv4_range_size
+ self.private_ipv4_prefix_size_increased = private_ipv4_prefix_size_difference > 0
+ self.private_ipv4_prefix_size_decreased = private_ipv4_prefix_size_difference < 0
+
+ def has_changes(self):
+ """
+ Does the VlanDiff represent any changes between the VLAN and module configuration?
+
+ :return: True, if there are change changes; otherwise, False.
+ """
+
+ return self.needs_edit() or self.needs_expand()
+
+ def ensure_legal_change(self):
+ """
+ Ensure the change (if any) represented by the VlanDiff represents a legal change to VLAN state.
+
+ - private_ipv4_base_address cannot be changed
+ - private_ipv4_prefix_size must be greater than or equal to the VLAN's existing private_ipv4_range_size
+
+ :raise InvalidVlanChangeError: The VlanDiff does not represent a legal change to VLAN state.
+ """
+
+ # Cannot change base address for private IPv4 network.
+ if self.private_ipv4_base_address_changed:
+ raise InvalidVlanChangeError('Cannot change the private IPV4 base address for an existing VLAN.')
+
+ # Cannot shrink private IPv4 network (by increasing prefix size).
+ if self.private_ipv4_prefix_size_increased:
+ raise InvalidVlanChangeError('Cannot shrink the private IPV4 network for an existing VLAN (only expand is supported).')
+
+ def needs_edit(self):
+ """
+ Is an Edit operation required to resolve the differences between the VLAN information and the module parameters?
+
+ :return: True, if an Edit operation is required; otherwise, False.
+ """
+
+ return self.name_changed or self.description_changed
+
+ def needs_expand(self):
+ """
+ Is an Expand operation required to resolve the differences between the VLAN information and the module parameters?
+
+ The VLAN's network is expanded by reducing the size of its network prefix.
+
+ :return: True, if an Expand operation is required; otherwise, False.
+ """
+
+ return self.private_ipv4_prefix_size_decreased
+
+
+def vlan_to_dict(vlan):
+ return {
+ 'id': vlan.id,
+ 'name': vlan.name,
+ 'description': vlan.description,
+ 'location': vlan.location.id,
+ 'private_ipv4_base_address': vlan.private_ipv4_range_address,
+ 'private_ipv4_prefix_size': vlan.private_ipv4_range_size,
+ 'private_ipv4_gateway_address': vlan.ipv4_gateway,
+ 'ipv6_base_address': vlan.ipv6_range_address,
+ 'ipv6_prefix_size': vlan.ipv6_range_size,
+ 'ipv6_gateway_address': vlan.ipv6_gateway,
+ 'status': vlan.status
+ }
+
+
+def main():
+ module = DimensionDataVlanModule()
+
+ if module.state == 'present':
+ module.state_present()
+ elif module.state == 'readonly':
+ module.state_readonly()
+ elif module.state == 'absent':
+ module.state_absent()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_compose.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_compose.py
new file mode 100644
index 00000000..96f89e6c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_compose.py
@@ -0,0 +1,1155 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: docker_compose
+
+short_description: Manage multi-container Docker applications with Docker Compose.
+
+
+author: "Chris Houseknecht (@chouseknecht)"
+
+description:
+ - Uses Docker Compose to start, shutdown and scale services.
+ - Works with compose versions 1 and 2.
+ - Configuration can be read from a C(docker-compose.yml) or C(docker-compose.yaml) file or inline using the I(definition) option.
+ - See the examples for more details.
+ - Supports check mode.
+ - This module was called C(docker_service) before Ansible 2.8. The usage did not change.
+
+options:
+ project_src:
+ description:
+ - Path to a directory containing a C(docker-compose.yml) or C(docker-compose.yaml) file.
+ - Mutually exclusive with I(definition).
+ - Required when no I(definition) is provided.
+ type: path
+ project_name:
+ description:
+ - Provide a project name. If not provided, the project name is taken from the basename of I(project_src).
+ - Required when I(definition) is provided.
+ type: str
+ files:
+ description:
+ - List of Compose file names relative to I(project_src). Overrides C(docker-compose.yml) or C(docker-compose.yaml).
+ - Files are loaded and merged in the order given.
+ type: list
+ elements: path
+ state:
+ description:
+ - Desired state of the project.
+ - Specifying C(present) is the same as running C(docker-compose up) resp. C(docker-compose stop) (with I(stopped)) resp. C(docker-compose restart)
+ (with I(restarted)).
+ - Specifying C(absent) is the same as running C(docker-compose down).
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+ services:
+ description:
+ - When I(state) is C(present) run C(docker-compose up) resp. C(docker-compose stop) (with I(stopped)) resp. C(docker-compose restart) (with I(restarted))
+ on a subset of services.
+ - If empty, which is the default, the operation will be performed on all services defined in the Compose file (or inline I(definition)).
+ type: list
+ elements: str
+ scale:
+ description:
+ - When I(state) is C(present) scale services. Provide a dictionary of key/value pairs where the key
+ is the name of the service and the value is an integer count for the number of containers.
+ type: dict
+ dependencies:
+ description:
+ - When I(state) is C(present) specify whether or not to include linked services.
+ type: bool
+ default: yes
+ definition:
+ description:
+ - Compose file describing one or more services, networks and volumes.
+ - Mutually exclusive with I(project_src) and I(files).
+ type: dict
+ hostname_check:
+ description:
+ - Whether or not to check the Docker daemon's hostname against the name provided in the client certificate.
+ type: bool
+ default: no
+ recreate:
+ description:
+ - By default containers will be recreated when their configuration differs from the service definition.
+ - Setting to C(never) ignores configuration differences and leaves existing containers unchanged.
+ - Setting to C(always) forces recreation of all existing containers.
+ type: str
+ default: smart
+ choices:
+ - always
+ - never
+ - smart
+ build:
+ description:
+ - Use with I(state) C(present) to always build images prior to starting the application.
+ - Same as running C(docker-compose build) with the pull option.
+ - Images will only be rebuilt if Docker detects a change in the Dockerfile or build directory contents.
+ - Use the I(nocache) option to ignore the image cache when performing the build.
+ - If an existing image is replaced, services using the image will be recreated unless I(recreate) is C(never).
+ type: bool
+ default: no
+ pull:
+ description:
+ - Use with I(state) C(present) to always pull images prior to starting the application.
+ - Same as running C(docker-compose pull).
+ - When a new image is pulled, services using the image will be recreated unless I(recreate) is C(never).
+ type: bool
+ default: no
+ nocache:
+ description:
+ - Use with the I(build) option to ignore the cache during the image build process.
+ type: bool
+ default: no
+ remove_images:
+ description:
+ - Use with I(state) C(absent) to remove all images or only local images.
+ type: str
+ choices:
+ - 'all'
+ - 'local'
+ remove_volumes:
+ description:
+ - Use with I(state) C(absent) to remove data volumes.
+ type: bool
+ default: no
+ stopped:
+ description:
+ - Use with I(state) C(present) to stop all containers defined in the Compose file.
+ - If I(services) is defined, only the containers listed there will be stopped.
+ type: bool
+ default: no
+ restarted:
+ description:
+ - Use with I(state) C(present) to restart all containers defined in the Compose file.
+ - If I(services) is defined, only the containers listed there will be restarted.
+ type: bool
+ default: no
+ remove_orphans:
+ description:
+ - Remove containers for services not defined in the Compose file.
+ type: bool
+ default: no
+ timeout:
+ description:
+ - timeout in seconds for container shutdown when attached or when containers are already running.
+ type: int
+ default: 10
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "docker-compose >= 1.7.0"
+ - "Docker API >= 1.20"
+ - "PyYAML >= 3.11"
+'''
+
+EXAMPLES = '''
+# Examples use the django example at https://docs.docker.com/compose/django. Follow it to create the
+# flask directory
+
+- name: Run using a project directory
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: Tear down existing services
+ community.general.docker_compose:
+ project_src: flask
+ state: absent
+
+ - name: Create and start services
+ community.general.docker_compose:
+ project_src: flask
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - name: Run `docker-compose up` again
+ community.general.docker_compose:
+ project_src: flask
+ build: no
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that: "not output.changed "
+
+ - name: Stop all services
+ community.general.docker_compose:
+ project_src: flask
+ build: no
+ stopped: yes
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that:
+ - "not web.flask_web_1.state.running"
+ - "not db.flask_db_1.state.running"
+
+ - name: Restart services
+ community.general.docker_compose:
+ project_src: flask
+ build: no
+ restarted: yes
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that:
+ - "web.flask_web_1.state.running"
+ - "db.flask_db_1.state.running"
+
+- name: Scale the web service to 2
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - community.general.docker_compose:
+ project_src: flask
+ scale:
+ web: 2
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+- name: Run with inline v2 compose
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - community.general.docker_compose:
+ project_src: flask
+ state: absent
+
+ - community.general.docker_compose:
+ project_name: flask
+ definition:
+ version: '2'
+ services:
+ db:
+ image: postgres
+ web:
+ build: "{{ playbook_dir }}/flask"
+ command: "python manage.py runserver 0.0.0.0:8000"
+ volumes:
+ - "{{ playbook_dir }}/flask:/code"
+ ports:
+ - "8000:8000"
+ depends_on:
+ - db
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that:
+ - "web.flask_web_1.state.running"
+ - "db.flask_db_1.state.running"
+
+- name: Run with inline v1 compose
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - community.general.docker_compose:
+ project_src: flask
+ state: absent
+
+ - community.general.docker_compose:
+ project_name: flask
+ definition:
+ db:
+ image: postgres
+ web:
+ build: "{{ playbook_dir }}/flask"
+ command: "python manage.py runserver 0.0.0.0:8000"
+ volumes:
+ - "{{ playbook_dir }}/flask:/code"
+ ports:
+ - "8000:8000"
+ links:
+ - db
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that:
+ - "web.flask_web_1.state.running"
+ - "db.flask_db_1.state.running"
+'''
+
+RETURN = '''
+services:
+ description:
+ - A dictionary mapping the service's name to a dictionary of containers.
+ - Note that facts are part of the registered vars since Ansible 2.8. For compatibility reasons, the facts
+ are also accessible directly. The service's name is the variable with which the container dictionary
+ can be accessed. Note that the returned facts will be removed in community.general 2.0.0.
+ returned: success
+ type: complex
+ contains:
+ container_name:
+ description: Name of the container. Format is C(project_service_#).
+ returned: success
+ type: complex
+ contains:
+ cmd:
+ description: One or more commands to be executed in the container.
+ returned: success
+ type: list
+ elements: str
+ example: ["postgres"]
+ image:
+ description: Name of the image from which the container was built.
+ returned: success
+ type: str
+ example: postgres
+ labels:
+ description: Meta data assigned to the container.
+ returned: success
+ type: dict
+ example: {...}
+ networks:
+ description: Contains a dictionary for each network to which the container is a member.
+ returned: success
+ type: list
+ elements: dict
+ contains:
+ IPAddress:
+ description: The IP address assigned to the container.
+ returned: success
+ type: str
+ example: 172.17.0.2
+ IPPrefixLen:
+ description: Number of bits used by the subnet.
+ returned: success
+ type: int
+ example: 16
+ aliases:
+ description: Aliases assigned to the container by the network.
+ returned: success
+ type: list
+ elements: str
+ example: ['db']
+ globalIPv6:
+ description: IPv6 address assigned to the container.
+ returned: success
+ type: str
+ example: ''
+ globalIPv6PrefixLen:
+ description: IPv6 subnet length.
+ returned: success
+ type: int
+ example: 0
+ links:
+ description: List of container names to which this container is linked.
+ returned: success
+ type: list
+ elements: str
+ example: null
+ macAddress:
+ description: Mac Address assigned to the virtual NIC.
+ returned: success
+ type: str
+ example: "02:42:ac:11:00:02"
+ state:
+ description: Information regarding the current disposition of the container.
+ returned: success
+ type: dict
+ contains:
+ running:
+ description: Whether or not the container is up with a running process.
+ returned: success
+ type: bool
+ example: true
+ status:
+ description: Description of the running state.
+ returned: success
+ type: str
+ example: running
+
+actions:
+ description: Provides the actions to be taken on each service as determined by compose.
+ returned: when in check mode or I(debug) is C(yes)
+ type: complex
+ contains:
+ service_name:
+ description: Name of the service.
+ returned: always
+ type: complex
+ contains:
+ pulled_image:
+ description: Provides image details when a new image is pulled for the service.
+ returned: on image pull
+ type: complex
+ contains:
+ name:
+ description: name of the image
+ returned: always
+ type: str
+ id:
+ description: image hash
+ returned: always
+ type: str
+ built_image:
+ description: Provides image details when a new image is built for the service.
+ returned: on image build
+ type: complex
+ contains:
+ name:
+ description: name of the image
+ returned: always
+ type: str
+ id:
+ description: image hash
+ returned: always
+ type: str
+
+ action:
+ description: A descriptive name of the action to be performed on the service's containers.
+ returned: always
+ type: list
+ elements: str
+ contains:
+ id:
+ description: the container's long ID
+ returned: always
+ type: str
+ name:
+ description: the container's name
+ returned: always
+ type: str
+ short_id:
+ description: the container's short ID
+ returned: always
+ type: str
+'''
+
+import os
+import re
+import sys
+import tempfile
+import traceback
+from contextlib import contextmanager
+from distutils.version import LooseVersion
+
+try:
+ import yaml
+ HAS_YAML = True
+ HAS_YAML_EXC = None
+except ImportError as dummy:
+ HAS_YAML = False
+ HAS_YAML_EXC = traceback.format_exc()
+
+try:
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+try:
+ from compose import __version__ as compose_version
+ from compose.cli.command import project_from_options
+ from compose.service import NoSuchImageError
+ from compose.cli.main import convergence_strategy_from_opts, build_action_from_opts, image_type_from_opt
+ from compose.const import DEFAULT_TIMEOUT, LABEL_SERVICE, LABEL_PROJECT, LABEL_ONE_OFF
+ HAS_COMPOSE = True
+ HAS_COMPOSE_EXC = None
+ MINIMUM_COMPOSE_VERSION = '1.7.0'
+except ImportError as dummy:
+ HAS_COMPOSE = False
+ HAS_COMPOSE_EXC = traceback.format_exc()
+ DEFAULT_TIMEOUT = 10
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ DockerBaseClass,
+ RequestException,
+)
+
+
+AUTH_PARAM_MAPPING = {
+ u'docker_host': u'--host',
+ u'tls': u'--tls',
+ u'cacert_path': u'--tlscacert',
+ u'cert_path': u'--tlscert',
+ u'key_path': u'--tlskey',
+ u'tls_verify': u'--tlsverify'
+}
+
+
+@contextmanager
+def stdout_redirector(path_name):
+ old_stdout = sys.stdout
+ fd = open(path_name, 'w')
+ sys.stdout = fd
+ try:
+ yield
+ finally:
+ sys.stdout = old_stdout
+
+
+@contextmanager
+def stderr_redirector(path_name):
+ old_fh = sys.stderr
+ fd = open(path_name, 'w')
+ sys.stderr = fd
+ try:
+ yield
+ finally:
+ sys.stderr = old_fh
+
+
+def make_redirection_tempfiles():
+ dummy, out_redir_name = tempfile.mkstemp(prefix="ansible")
+ dummy, err_redir_name = tempfile.mkstemp(prefix="ansible")
+ return (out_redir_name, err_redir_name)
+
+
+def cleanup_redirection_tempfiles(out_name, err_name):
+ for i in [out_name, err_name]:
+ os.remove(i)
+
+
+def get_redirected_output(path_name):
+ output = []
+ with open(path_name, 'r') as fd:
+ for line in fd:
+ # strip terminal format/color chars
+ new_line = re.sub(r'\x1b\[.+m', '', line)
+ output.append(new_line)
+ os.remove(path_name)
+ return output
+
+
+def attempt_extract_errors(exc_str, stdout, stderr):
+ errors = [l.strip() for l in stderr if l.strip().startswith('ERROR:')]
+ errors.extend([l.strip() for l in stdout if l.strip().startswith('ERROR:')])
+
+ warnings = [l.strip() for l in stderr if l.strip().startswith('WARNING:')]
+ warnings.extend([l.strip() for l in stdout if l.strip().startswith('WARNING:')])
+
+ # assume either the exception body (if present) or the last warning was the 'most'
+ # fatal.
+
+ if exc_str.strip():
+ msg = exc_str.strip()
+ elif errors:
+ msg = errors[-1].encode('utf-8')
+ else:
+ msg = 'unknown cause'
+
+ return {
+ 'warnings': [w.encode('utf-8') for w in warnings],
+ 'errors': [e.encode('utf-8') for e in errors],
+ 'msg': msg,
+ 'module_stderr': ''.join(stderr),
+ 'module_stdout': ''.join(stdout)
+ }
+
+
+def get_failure_info(exc, out_name, err_name=None, msg_format='%s'):
+ if err_name is None:
+ stderr = []
+ else:
+ stderr = get_redirected_output(err_name)
+ stdout = get_redirected_output(out_name)
+
+ reason = attempt_extract_errors(str(exc), stdout, stderr)
+ reason['msg'] = msg_format % reason['msg']
+ return reason
+
+
+class ContainerManager(DockerBaseClass):
+
+ def __init__(self, client):
+
+ super(ContainerManager, self).__init__()
+
+ self.client = client
+ self.project_src = None
+ self.files = None
+ self.project_name = None
+ self.state = None
+ self.definition = None
+ self.hostname_check = None
+ self.timeout = None
+ self.remove_images = None
+ self.remove_orphans = None
+ self.remove_volumes = None
+ self.stopped = None
+ self.restarted = None
+ self.recreate = None
+ self.build = None
+ self.dependencies = None
+ self.services = None
+ self.scale = None
+ self.debug = None
+ self.pull = None
+ self.nocache = None
+
+ for key, value in client.module.params.items():
+ setattr(self, key, value)
+
+ self.check_mode = client.check_mode
+
+ if not self.debug:
+ self.debug = client.module._debug
+
+ self.options = dict()
+ self.options.update(self._get_auth_options())
+ self.options[u'--skip-hostname-check'] = (not self.hostname_check)
+
+ if self.project_name:
+ self.options[u'--project-name'] = self.project_name
+
+ if self.files:
+ self.options[u'--file'] = self.files
+
+ if not HAS_COMPOSE:
+ self.client.fail("Unable to load docker-compose. Try `pip install docker-compose`. Error: %s" %
+ HAS_COMPOSE_EXC)
+
+ if LooseVersion(compose_version) < LooseVersion(MINIMUM_COMPOSE_VERSION):
+ self.client.fail("Found docker-compose version %s. Minimum required version is %s. "
+ "Upgrade docker-compose to a min version of %s." %
+ (compose_version, MINIMUM_COMPOSE_VERSION, MINIMUM_COMPOSE_VERSION))
+
+ if self.restarted and self.stopped:
+ self.client.fail("Cannot use restarted and stopped at the same time.")
+
+ self.log("options: ")
+ self.log(self.options, pretty_print=True)
+
+ if self.definition:
+ if not HAS_YAML:
+ self.client.fail("Unable to load yaml. Try `pip install PyYAML`. Error: %s" % HAS_YAML_EXC)
+
+ if not self.project_name:
+ self.client.fail("Parameter error - project_name required when providing definition.")
+
+ self.project_src = tempfile.mkdtemp(prefix="ansible")
+ compose_file = os.path.join(self.project_src, "docker-compose.yml")
+ try:
+ self.log('writing: ')
+ self.log(yaml.dump(self.definition, default_flow_style=False))
+ with open(compose_file, 'w') as f:
+ f.write(yaml.dump(self.definition, default_flow_style=False))
+ except Exception as exc:
+ self.client.fail("Error writing to %s - %s" % (compose_file, str(exc)))
+ else:
+ if not self.project_src:
+ self.client.fail("Parameter error - project_src required.")
+
+ try:
+ self.log("project_src: %s" % self.project_src)
+ self.project = project_from_options(self.project_src, self.options)
+ except Exception as exc:
+ self.client.fail("Configuration error - %s" % str(exc))
+
+ def exec_module(self):
+ result = dict()
+
+ if self.state == 'present':
+ result = self.cmd_up()
+ elif self.state == 'absent':
+ result = self.cmd_down()
+
+ if self.definition:
+ compose_file = os.path.join(self.project_src, "docker-compose.yml")
+ self.log("removing %s" % compose_file)
+ os.remove(compose_file)
+ self.log("removing %s" % self.project_src)
+ os.rmdir(self.project_src)
+
+ if not self.check_mode and not self.debug and result.get('actions'):
+ result.pop('actions')
+
+ return result
+
+ def _get_auth_options(self):
+ options = dict()
+ for key, value in self.client.auth_params.items():
+ if value is not None:
+ option = AUTH_PARAM_MAPPING.get(key)
+ if option:
+ options[option] = value
+ return options
+
+ def cmd_up(self):
+
+ start_deps = self.dependencies
+ service_names = self.services
+ detached = True
+ result = dict(changed=False, actions=[], ansible_facts=dict(), services=dict())
+
+ up_options = {
+ u'--no-recreate': False,
+ u'--build': False,
+ u'--no-build': False,
+ u'--no-deps': False,
+ u'--force-recreate': False,
+ }
+
+ if self.recreate == 'never':
+ up_options[u'--no-recreate'] = True
+ elif self.recreate == 'always':
+ up_options[u'--force-recreate'] = True
+
+ if self.remove_orphans:
+ up_options[u'--remove-orphans'] = True
+
+ converge = convergence_strategy_from_opts(up_options)
+ self.log("convergence strategy: %s" % converge)
+
+ if self.pull:
+ pull_output = self.cmd_pull()
+ result['changed'] = pull_output['changed']
+ result['actions'] += pull_output['actions']
+
+ if self.build:
+ build_output = self.cmd_build()
+ result['changed'] = build_output['changed']
+ result['actions'] += build_output['actions']
+
+ if self.remove_orphans:
+ containers = self.client.containers(
+ filters={
+ 'label': [
+ '{0}={1}'.format(LABEL_PROJECT, self.project.name),
+ '{0}={1}'.format(LABEL_ONE_OFF, "False")
+ ],
+ }
+ )
+
+ orphans = []
+ for container in containers:
+ service_name = container.get('Labels', {}).get(LABEL_SERVICE)
+ if service_name not in self.project.service_names:
+ orphans.append(service_name)
+
+ if orphans:
+ result['changed'] = True
+
+ for service in self.project.services:
+ if not service_names or service.name in service_names:
+ plan = service.convergence_plan(strategy=converge)
+ if plan.action != 'noop':
+ result['changed'] = True
+ result_action = dict(service=service.name)
+ result_action[plan.action] = []
+ for container in plan.containers:
+ result_action[plan.action].append(dict(
+ id=container.id,
+ name=container.name,
+ short_id=container.short_id,
+ ))
+ result['actions'].append(result_action)
+
+ if not self.check_mode and result['changed'] and not self.stopped:
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ do_build = build_action_from_opts(up_options)
+ self.log('Setting do_build to %s' % do_build)
+ self.project.up(
+ service_names=service_names,
+ start_deps=start_deps,
+ strategy=converge,
+ do_build=do_build,
+ detached=detached,
+ remove_orphans=self.remove_orphans,
+ timeout=self.timeout)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error starting project %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+
+ if self.stopped:
+ stop_output = self.cmd_stop(service_names)
+ result['changed'] = stop_output['changed']
+ result['actions'] += stop_output['actions']
+
+ if self.restarted:
+ restart_output = self.cmd_restart(service_names)
+ result['changed'] = restart_output['changed']
+ result['actions'] += restart_output['actions']
+
+ if self.scale:
+ scale_output = self.cmd_scale()
+ result['changed'] = scale_output['changed']
+ result['actions'] += scale_output['actions']
+
+ for service in self.project.services:
+ service_facts = dict()
+ result['ansible_facts'][service.name] = service_facts
+ result['services'][service.name] = service_facts
+ for container in service.containers(stopped=True):
+ inspection = container.inspect()
+ # pare down the inspection data to the most useful bits
+ facts = dict(
+ cmd=[],
+ labels=dict(),
+ image=None,
+ state=dict(
+ running=None,
+ status=None
+ ),
+ networks=dict()
+ )
+ if inspection['Config'].get('Cmd', None) is not None:
+ facts['cmd'] = inspection['Config']['Cmd']
+ if inspection['Config'].get('Labels', None) is not None:
+ facts['labels'] = inspection['Config']['Labels']
+ if inspection['Config'].get('Image', None) is not None:
+ facts['image'] = inspection['Config']['Image']
+ if inspection['State'].get('Running', None) is not None:
+ facts['state']['running'] = inspection['State']['Running']
+ if inspection['State'].get('Status', None) is not None:
+ facts['state']['status'] = inspection['State']['Status']
+
+ if inspection.get('NetworkSettings') and inspection['NetworkSettings'].get('Networks'):
+ networks = inspection['NetworkSettings']['Networks']
+ for key in networks:
+ facts['networks'][key] = dict(
+ aliases=[],
+ globalIPv6=None,
+ globalIPv6PrefixLen=0,
+ IPAddress=None,
+ IPPrefixLen=0,
+ links=None,
+ macAddress=None,
+ )
+ if networks[key].get('Aliases', None) is not None:
+ facts['networks'][key]['aliases'] = networks[key]['Aliases']
+ if networks[key].get('GlobalIPv6Address', None) is not None:
+ facts['networks'][key]['globalIPv6'] = networks[key]['GlobalIPv6Address']
+ if networks[key].get('GlobalIPv6PrefixLen', None) is not None:
+ facts['networks'][key]['globalIPv6PrefixLen'] = networks[key]['GlobalIPv6PrefixLen']
+ if networks[key].get('IPAddress', None) is not None:
+ facts['networks'][key]['IPAddress'] = networks[key]['IPAddress']
+ if networks[key].get('IPPrefixLen', None) is not None:
+ facts['networks'][key]['IPPrefixLen'] = networks[key]['IPPrefixLen']
+ if networks[key].get('Links', None) is not None:
+ facts['networks'][key]['links'] = networks[key]['Links']
+ if networks[key].get('MacAddress', None) is not None:
+ facts['networks'][key]['macAddress'] = networks[key]['MacAddress']
+
+ service_facts[container.name] = facts
+
+ return result
+
+ def cmd_pull(self):
+ result = dict(
+ changed=False,
+ actions=[],
+ )
+
+ if not self.check_mode:
+ for service in self.project.get_services(self.services, include_deps=False):
+ if 'image' not in service.options:
+ continue
+
+ self.log('Pulling image for service %s' % service.name)
+ # store the existing image ID
+ old_image_id = ''
+ try:
+ image = service.image()
+ if image and image.get('Id'):
+ old_image_id = image['Id']
+ except NoSuchImageError:
+ pass
+ except Exception as exc:
+ self.client.fail("Error: service image lookup failed - %s" % str(exc))
+
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ # pull the image
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ service.pull(ignore_pull_failures=False)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error: pull failed with %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+
+ # store the new image ID
+ new_image_id = ''
+ try:
+ image = service.image()
+ if image and image.get('Id'):
+ new_image_id = image['Id']
+ except NoSuchImageError as exc:
+ self.client.fail("Error: service image lookup failed after pull - %s" % str(exc))
+
+ if new_image_id != old_image_id:
+ # if a new image was pulled
+ result['changed'] = True
+ result['actions'].append(dict(
+ service=service.name,
+ pulled_image=dict(
+ name=service.image_name,
+ id=new_image_id
+ )
+ ))
+ return result
+
+ def cmd_build(self):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+ if not self.check_mode:
+ for service in self.project.get_services(self.services, include_deps=False):
+ if service.can_be_built():
+ self.log('Building image for service %s' % service.name)
+ # store the existing image ID
+ old_image_id = ''
+ try:
+ image = service.image()
+ if image and image.get('Id'):
+ old_image_id = image['Id']
+ except NoSuchImageError:
+ pass
+ except Exception as exc:
+ self.client.fail("Error: service image lookup failed - %s" % str(exc))
+
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ # build the image
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ new_image_id = service.build(pull=self.pull, no_cache=self.nocache)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error: build failed with %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+
+ if new_image_id not in old_image_id:
+ # if a new image was built
+ result['changed'] = True
+ result['actions'].append(dict(
+ service=service.name,
+ built_image=dict(
+ name=service.image_name,
+ id=new_image_id
+ )
+ ))
+ return result
+
+ def cmd_down(self):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+ for service in self.project.services:
+ containers = service.containers(stopped=True)
+ if len(containers):
+ result['changed'] = True
+ result['actions'].append(dict(
+ service=service.name,
+ deleted=[container.name for container in containers]
+ ))
+ if not self.check_mode and result['changed']:
+ image_type = image_type_from_opt('--rmi', self.remove_images)
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ self.project.down(image_type, self.remove_volumes, self.remove_orphans)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error stopping project - %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+ return result
+
+ def cmd_stop(self, service_names):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+ for service in self.project.services:
+ if not service_names or service.name in service_names:
+ service_res = dict(
+ service=service.name,
+ stop=[]
+ )
+ for container in service.containers(stopped=False):
+ result['changed'] = True
+ service_res['stop'].append(dict(
+ id=container.id,
+ name=container.name,
+ short_id=container.short_id
+ ))
+ result['actions'].append(service_res)
+ if not self.check_mode and result['changed']:
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ self.project.stop(service_names=service_names, timeout=self.timeout)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error stopping project %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+ return result
+
+ def cmd_restart(self, service_names):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+
+ for service in self.project.services:
+ if not service_names or service.name in service_names:
+ service_res = dict(
+ service=service.name,
+ restart=[]
+ )
+ for container in service.containers(stopped=True):
+ result['changed'] = True
+ service_res['restart'].append(dict(
+ id=container.id,
+ name=container.name,
+ short_id=container.short_id
+ ))
+ result['actions'].append(service_res)
+
+ if not self.check_mode and result['changed']:
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ self.project.restart(service_names=service_names, timeout=self.timeout)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error restarting project %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+ return result
+
+ def cmd_scale(self):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+ for service in self.project.services:
+ if service.name in self.scale:
+ service_res = dict(
+ service=service.name,
+ scale=0
+ )
+ containers = service.containers(stopped=True)
+ scale = self.parse_scale(service.name)
+ if len(containers) != scale:
+ result['changed'] = True
+ service_res['scale'] = scale - len(containers)
+ if not self.check_mode:
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ service.scale(scale)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error scaling {0} - %s".format(service.name))
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+ result['actions'].append(service_res)
+ return result
+
+ def parse_scale(self, service_name):
+ try:
+ return int(self.scale[service_name])
+ except ValueError:
+ self.client.fail("Error scaling %s - expected int, got %s",
+ service_name, str(type(self.scale[service_name])))
+
+
+def main():
+ argument_spec = dict(
+ project_src=dict(type='path'),
+ project_name=dict(type='str',),
+ files=dict(type='list', elements='path'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ definition=dict(type='dict'),
+ hostname_check=dict(type='bool', default=False),
+ recreate=dict(type='str', default='smart', choices=['always', 'never', 'smart']),
+ build=dict(type='bool', default=False),
+ remove_images=dict(type='str', choices=['all', 'local']),
+ remove_volumes=dict(type='bool', default=False),
+ remove_orphans=dict(type='bool', default=False),
+ stopped=dict(type='bool', default=False),
+ restarted=dict(type='bool', default=False),
+ scale=dict(type='dict'),
+ services=dict(type='list', elements='str'),
+ dependencies=dict(type='bool', default=True),
+ pull=dict(type='bool', default=False),
+ nocache=dict(type='bool', default=False),
+ debug=dict(type='bool', default=False),
+ timeout=dict(type='int', default=DEFAULT_TIMEOUT)
+ )
+
+ mutually_exclusive = [
+ ('definition', 'project_src'),
+ ('definition', 'files')
+ ]
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True,
+ min_docker_api_version='1.20',
+ )
+ if client.module._name in ('docker_service', 'community.general.docker_service'):
+ client.module.deprecate("The 'docker_service' module has been renamed to 'docker_compose'.",
+ version='2.0.0', collection_name='community.general') # was Ansible 2.12
+
+ try:
+ result = ContainerManager(client).exec_module()
+ client.module.exit_json(**result)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_config.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_config.py
new file mode 100644
index 00000000..5e7e426c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_config.py
@@ -0,0 +1,299 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_config
+
+short_description: Manage docker configs.
+
+
+description:
+ - Create and remove Docker configs in a Swarm environment. Similar to C(docker config create) and C(docker config rm).
+ - Adds to the metadata of new configs 'ansible_key', an encrypted hash representation of the data, which is then used
+ in future runs to test if a config has changed. If 'ansible_key' is not present, then a config will not be updated
+ unless the I(force) option is set.
+ - Updates to configs are performed by removing the config and creating it again.
+options:
+ data:
+ description:
+ - The value of the config. Required when state is C(present).
+ type: str
+ data_is_b64:
+ description:
+ - If set to C(true), the data is assumed to be Base64 encoded and will be
+ decoded before being used.
+ - To use binary I(data), it is better to keep it Base64 encoded and let it
+ be decoded by this option.
+ type: bool
+ default: no
+ labels:
+ description:
+ - "A map of key:value meta data, where both the I(key) and I(value) are expected to be a string."
+ - If new meta data is provided, or existing meta data is modified, the config will be updated by removing it and creating it again.
+ type: dict
+ force:
+ description:
+ - Use with state C(present) to always remove and recreate an existing config.
+ - If C(true), an existing config will be replaced, even if it has not been changed.
+ type: bool
+ default: no
+ name:
+ description:
+ - The name of the config.
+ type: str
+ required: yes
+ state:
+ description:
+ - Set to C(present), if the config should exist, and C(absent), if it should not.
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_2_documentation
+
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.6.0"
+ - "Docker API >= 1.30"
+
+author:
+ - Chris Houseknecht (@chouseknecht)
+ - John Hu (@ushuz)
+'''
+
+EXAMPLES = '''
+
+- name: Create config foo (from a file on the control machine)
+ community.general.docker_config:
+ name: foo
+ # If the file is JSON or binary, Ansible might modify it (because
+ # it is first decoded and later re-encoded). Base64-encoding the
+ # file directly after reading it prevents this to happen.
+ data: "{{ lookup('file', '/path/to/config/file') | b64encode }}"
+ data_is_b64: true
+ state: present
+
+- name: Change the config data
+ community.general.docker_config:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: baz
+ one: '1'
+ state: present
+
+- name: Add a new label
+ community.general.docker_config:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: baz
+ one: '1'
+ # Adding a new label will cause a remove/create of the config
+ two: '2'
+ state: present
+
+- name: No change
+ community.general.docker_config:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: baz
+ one: '1'
+ # Even though 'two' is missing, there is no change to the existing config
+ state: present
+
+- name: Update an existing label
+ community.general.docker_config:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: monkey # Changing a label will cause a remove/create of the config
+ one: '1'
+ state: present
+
+- name: Force the (re-)creation of the config
+ community.general.docker_config:
+ name: foo
+ data: Goodnight everyone!
+ force: yes
+ state: present
+
+- name: Remove config foo
+ community.general.docker_config:
+ name: foo
+ state: absent
+'''
+
+RETURN = '''
+config_id:
+ description:
+ - The ID assigned by Docker to the config object.
+ returned: success and I(state) is C(present)
+ type: str
+ sample: 'hzehrmyjigmcp2gb6nlhmjqcv'
+'''
+
+import base64
+import hashlib
+import traceback
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ DockerBaseClass,
+ compare_generic,
+ RequestException,
+)
+from ansible.module_utils._text import to_native, to_bytes
+
+
+class ConfigManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(ConfigManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.check_mode = self.client.check_mode
+
+ parameters = self.client.module.params
+ self.name = parameters.get('name')
+ self.state = parameters.get('state')
+ self.data = parameters.get('data')
+ if self.data is not None:
+ if parameters.get('data_is_b64'):
+ self.data = base64.b64decode(self.data)
+ else:
+ self.data = to_bytes(self.data)
+ self.labels = parameters.get('labels')
+ self.force = parameters.get('force')
+ self.data_key = None
+
+ def __call__(self):
+ if self.state == 'present':
+ self.data_key = hashlib.sha224(self.data).hexdigest()
+ self.present()
+ elif self.state == 'absent':
+ self.absent()
+
+ def get_config(self):
+ ''' Find an existing config. '''
+ try:
+ configs = self.client.configs(filters={'name': self.name})
+ except APIError as exc:
+ self.client.fail("Error accessing config %s: %s" % (self.name, to_native(exc)))
+
+ for config in configs:
+ if config['Spec']['Name'] == self.name:
+ return config
+ return None
+
+ def create_config(self):
+ ''' Create a new config '''
+ config_id = None
+ # We can't see the data after creation, so adding a label we can use for idempotency check
+ labels = {
+ 'ansible_key': self.data_key
+ }
+ if self.labels:
+ labels.update(self.labels)
+
+ try:
+ if not self.check_mode:
+ config_id = self.client.create_config(self.name, self.data, labels=labels)
+ except APIError as exc:
+ self.client.fail("Error creating config: %s" % to_native(exc))
+
+ if isinstance(config_id, dict):
+ config_id = config_id['ID']
+
+ return config_id
+
+ def present(self):
+ ''' Handles state == 'present', creating or updating the config '''
+ config = self.get_config()
+ if config:
+ self.results['config_id'] = config['ID']
+ data_changed = False
+ attrs = config.get('Spec', {})
+ if attrs.get('Labels', {}).get('ansible_key'):
+ if attrs['Labels']['ansible_key'] != self.data_key:
+ data_changed = True
+ labels_changed = not compare_generic(self.labels, attrs.get('Labels'), 'allow_more_present', 'dict')
+ if data_changed or labels_changed or self.force:
+ # if something changed or force, delete and re-create the config
+ self.absent()
+ config_id = self.create_config()
+ self.results['changed'] = True
+ self.results['config_id'] = config_id
+ else:
+ self.results['changed'] = True
+ self.results['config_id'] = self.create_config()
+
+ def absent(self):
+ ''' Handles state == 'absent', removing the config '''
+ config = self.get_config()
+ if config:
+ try:
+ if not self.check_mode:
+ self.client.remove_config(config['ID'])
+ except APIError as exc:
+ self.client.fail("Error removing config %s: %s" % (self.name, to_native(exc)))
+ self.results['changed'] = True
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ data=dict(type='str'),
+ data_is_b64=dict(type='bool', default=False),
+ labels=dict(type='dict'),
+ force=dict(type='bool', default=False)
+ )
+
+ required_if = [
+ ('state', 'present', ['data'])
+ ]
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=required_if,
+ min_docker_version='2.6.0',
+ min_docker_api_version='1.30',
+ )
+
+ try:
+ results = dict(
+ changed=False,
+ )
+
+ ConfigManager(client, results)()
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_container.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_container.py
new file mode 100644
index 00000000..30033ebf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_container.py
@@ -0,0 +1,3563 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_container
+
+short_description: manage docker containers
+
+description:
+ - Manage the life cycle of docker containers.
+ - Supports check mode. Run with C(--check) and C(--diff) to view config difference and list of actions to be taken.
+
+
+notes:
+ - For most config changes, the container needs to be recreated, i.e. the existing container has to be destroyed and
+ a new one created. This can cause unexpected data loss and downtime. You can use the I(comparisons) option to
+ prevent this.
+ - If the module needs to recreate the container, it will only use the options provided to the module to create the
+ new container (except I(image)). Therefore, always specify *all* options relevant to the container.
+ - When I(restart) is set to C(true), the module will only restart the container if no config changes are detected.
+ Please note that several options have default values; if the container to be restarted uses different values for
+ these options, it will be recreated instead. The options with default values which can cause this are I(auto_remove),
+ I(detach), I(init), I(interactive), I(memory), I(paused), I(privileged), I(read_only) and I(tty). This behavior
+ can be changed by setting I(container_default_behavior) to C(no_defaults), which will be the default value from
+ community.general 3.0.0 on.
+
+options:
+ auto_remove:
+ description:
+ - Enable auto-removal of the container on daemon side when the container's process exits.
+ - If I(container_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no).
+ type: bool
+ blkio_weight:
+ description:
+ - Block IO (relative weight), between 10 and 1000.
+ type: int
+ capabilities:
+ description:
+ - List of capabilities to add to the container.
+ type: list
+ elements: str
+ cap_drop:
+ description:
+ - List of capabilities to drop from the container.
+ type: list
+ elements: str
+ cleanup:
+ description:
+ - Use with I(detach=false) to remove the container after successful execution.
+ type: bool
+ default: no
+ command:
+ description:
+ - Command to execute when the container starts. A command may be either a string or a list.
+ - Prior to version 2.4, strings were split on commas.
+ type: raw
+ comparisons:
+ description:
+ - Allows to specify how properties of existing containers are compared with
+ module options to decide whether the container should be recreated / updated
+ or not.
+ - Only options which correspond to the state of a container as handled by the
+ Docker daemon can be specified, as well as C(networks).
+ - Must be a dictionary specifying for an option one of the keys C(strict), C(ignore)
+ and C(allow_more_present).
+ - If C(strict) is specified, values are tested for equality, and changes always
+ result in updating or restarting. If C(ignore) is specified, changes are ignored.
+ - C(allow_more_present) is allowed only for lists, sets and dicts. If it is
+ specified for lists or sets, the container will only be updated or restarted if
+ the module option contains a value which is not present in the container's
+ options. If the option is specified for a dict, the container will only be updated
+ or restarted if the module option contains a key which isn't present in the
+ container's option, or if the value of a key present differs.
+ - The wildcard option C(*) can be used to set one of the default values C(strict)
+ or C(ignore) to *all* comparisons which are not explicitly set to other values.
+ - See the examples for details.
+ type: dict
+ container_default_behavior:
+ description:
+ - Various module options used to have default values. This causes problems with
+ containers which use different values for these options.
+ - The default value is C(compatibility), which will ensure that the default values
+ are used when the values are not explicitly specified by the user.
+ - From community.general 3.0.0 on, the default value will switch to C(no_defaults). To avoid
+ deprecation warnings, please set I(container_default_behavior) to an explicit
+ value.
+ - This affects the I(auto_remove), I(detach), I(init), I(interactive), I(memory),
+ I(paused), I(privileged), I(read_only) and I(tty) options.
+ type: str
+ choices:
+ - compatibility
+ - no_defaults
+ version_added: '0.2.0'
+ cpu_period:
+ description:
+ - Limit CPU CFS (Completely Fair Scheduler) period.
+ - See I(cpus) for an easier to use alternative.
+ type: int
+ cpu_quota:
+ description:
+ - Limit CPU CFS (Completely Fair Scheduler) quota.
+ - See I(cpus) for an easier to use alternative.
+ type: int
+ cpus:
+ description:
+ - Specify how much of the available CPU resources a container can use.
+ - A value of C(1.5) means that at most one and a half CPU (core) will be used.
+ type: float
+ version_added: '0.2.0'
+ cpuset_cpus:
+ description:
+ - CPUs in which to allow execution C(1,3) or C(1-3).
+ type: str
+ cpuset_mems:
+ description:
+ - Memory nodes (MEMs) in which to allow execution C(0-3) or C(0,1).
+ type: str
+ cpu_shares:
+ description:
+ - CPU shares (relative weight).
+ type: int
+ detach:
+ description:
+ - Enable detached mode to leave the container running in background.
+ - If disabled, the task will reflect the status of the container run (failed if the command failed).
+ - If I(container_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(yes).
+ type: bool
+ devices:
+ description:
+ - List of host device bindings to add to the container.
+ - "Each binding is a mapping expressed in the format C(<path_on_host>:<path_in_container>:<cgroup_permissions>)."
+ type: list
+ elements: str
+ device_read_bps:
+ description:
+ - "List of device path and read rate (bytes per second) from device."
+ type: list
+ elements: dict
+ suboptions:
+ path:
+ description:
+ - Device path in the container.
+ type: str
+ required: yes
+ rate:
+ description:
+ - "Device read limit in format C(<number>[<unit>])."
+ - "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - "Omitting the unit defaults to bytes."
+ type: str
+ required: yes
+ device_write_bps:
+ description:
+ - "List of device and write rate (bytes per second) to device."
+ type: list
+ elements: dict
+ suboptions:
+ path:
+ description:
+ - Device path in the container.
+ type: str
+ required: yes
+ rate:
+ description:
+ - "Device read limit in format C(<number>[<unit>])."
+ - "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - "Omitting the unit defaults to bytes."
+ type: str
+ required: yes
+ device_read_iops:
+ description:
+ - "List of device and read rate (IO per second) from device."
+ type: list
+ elements: dict
+ suboptions:
+ path:
+ description:
+ - Device path in the container.
+ type: str
+ required: yes
+ rate:
+ description:
+ - "Device read limit."
+ - "Must be a positive integer."
+ type: int
+ required: yes
+ device_write_iops:
+ description:
+ - "List of device and write rate (IO per second) to device."
+ type: list
+ elements: dict
+ suboptions:
+ path:
+ description:
+ - Device path in the container.
+ type: str
+ required: yes
+ rate:
+ description:
+ - "Device read limit."
+ - "Must be a positive integer."
+ type: int
+ required: yes
+ device_requests:
+ description:
+ - Allows to request additional resources, such as GPUs.
+ type: list
+ elements: dict
+ suboptions:
+ capabilities:
+ description:
+ - List of lists of strings to request capabilities.
+ - The top-level list entries are combined by OR, and for every list entry,
+ the entries in the list it contains are combined by AND.
+ - The driver tries to satisfy one of the sub-lists.
+ - Available capabilities for the C(nvidia) driver can be found at
+ U(https://github.com/NVIDIA/nvidia-container-runtime).
+ type: list
+ elements: list
+ count:
+ description:
+ - Number or devices to request.
+ - Set to C(-1) to request all available devices.
+ type: int
+ device_ids:
+ description:
+ - List of device IDs.
+ type: list
+ elements: str
+ driver:
+ description:
+ - Which driver to use for this device.
+ type: str
+ options:
+ description:
+ - Driver-specific options.
+ type: dict
+ dns_opts:
+ description:
+ - List of DNS options.
+ type: list
+ elements: str
+ dns_servers:
+ description:
+ - List of custom DNS servers.
+ type: list
+ elements: str
+ dns_search_domains:
+ description:
+ - List of custom DNS search domains.
+ type: list
+ elements: str
+ domainname:
+ description:
+ - Container domainname.
+ type: str
+ env:
+ description:
+ - Dictionary of key,value pairs.
+ - Values which might be parsed as numbers, booleans or other types by the YAML parser must be quoted (e.g. C("true")) in order to avoid data loss.
+ type: dict
+ env_file:
+ description:
+ - Path to a file, present on the target, containing environment variables I(FOO=BAR).
+ - If variable also present in I(env), then the I(env) value will override.
+ type: path
+ entrypoint:
+ description:
+ - Command that overwrites the default C(ENTRYPOINT) of the image.
+ type: list
+ elements: str
+ etc_hosts:
+ description:
+ - Dict of host-to-IP mappings, where each host name is a key in the dictionary.
+ Each host name will be added to the container's C(/etc/hosts) file.
+ type: dict
+ exposed_ports:
+ description:
+ - List of additional container ports which informs Docker that the container
+ listens on the specified network ports at runtime.
+ - If the port is already exposed using C(EXPOSE) in a Dockerfile, it does not
+ need to be exposed again.
+ type: list
+ elements: str
+ aliases:
+ - exposed
+ - expose
+ force_kill:
+ description:
+ - Use the kill command when stopping a running container.
+ type: bool
+ default: no
+ aliases:
+ - forcekill
+ groups:
+ description:
+ - List of additional group names and/or IDs that the container process will run as.
+ type: list
+ elements: str
+ healthcheck:
+ description:
+ - Configure a check that is run to determine whether or not containers for this service are "healthy".
+ - "See the docs for the L(HEALTHCHECK Dockerfile instruction,https://docs.docker.com/engine/reference/builder/#healthcheck)
+ for details on how healthchecks work."
+ - "I(interval), I(timeout) and I(start_period) are specified as durations. They accept duration as a string in a format
+ that look like: C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ type: dict
+ suboptions:
+ test:
+ description:
+ - Command to run to check health.
+ - Must be either a string or a list. If it is a list, the first item must be one of C(NONE), C(CMD) or C(CMD-SHELL).
+ type: raw
+ interval:
+ description:
+ - Time between running the check.
+ - The default used by the Docker daemon is C(30s).
+ type: str
+ timeout:
+ description:
+ - Maximum time to allow one check to run.
+ - The default used by the Docker daemon is C(30s).
+ type: str
+ retries:
+ description:
+ - Consecutive number of failures needed to report unhealthy.
+ - The default used by the Docker daemon is C(3).
+ type: int
+ start_period:
+ description:
+ - Start period for the container to initialize before starting health-retries countdown.
+ - The default used by the Docker daemon is C(0s).
+ type: str
+ hostname:
+ description:
+ - The container's hostname.
+ type: str
+ ignore_image:
+ description:
+ - When I(state) is C(present) or C(started), the module compares the configuration of an existing
+ container to requested configuration. The evaluation includes the image version. If the image
+ version in the registry does not match the container, the container will be recreated. You can
+ stop this behavior by setting I(ignore_image) to C(True).
+ - "*Warning:* This option is ignored if C(image: ignore) or C(*: ignore) is specified in the
+ I(comparisons) option."
+ type: bool
+ default: no
+ image:
+ description:
+ - Repository path and tag used to create the container. If an image is not found or pull is true, the image
+ will be pulled from the registry. If no tag is included, C(latest) will be used.
+ - Can also be an image ID. If this is the case, the image is assumed to be available locally.
+ The I(pull) option is ignored for this case.
+ type: str
+ init:
+ description:
+ - Run an init inside the container that forwards signals and reaps processes.
+ - This option requires Docker API >= 1.25.
+ - If I(container_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no).
+ type: bool
+ interactive:
+ description:
+ - Keep stdin open after a container is launched, even if not attached.
+ - If I(container_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no).
+ type: bool
+ ipc_mode:
+ description:
+ - Set the IPC mode for the container.
+ - Can be one of C(container:<name|id>) to reuse another container's IPC namespace or C(host) to use
+ the host's IPC namespace within the container.
+ type: str
+ keep_volumes:
+ description:
+ - Retain anonymous volumes associated with a removed container.
+ type: bool
+ default: yes
+ kill_signal:
+ description:
+ - Override default signal used to kill a running container.
+ type: str
+ kernel_memory:
+ description:
+ - "Kernel memory limit in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte). Minimum is C(4M)."
+ - Omitting the unit defaults to bytes.
+ type: str
+ labels:
+ description:
+ - Dictionary of key value pairs.
+ type: dict
+ links:
+ description:
+ - List of name aliases for linked containers in the format C(container_name:alias).
+ - Setting this will force container to be restarted.
+ type: list
+ elements: str
+ log_driver:
+ description:
+ - Specify the logging driver. Docker uses C(json-file) by default.
+ - See L(here,https://docs.docker.com/config/containers/logging/configure/) for possible choices.
+ type: str
+ log_options:
+ description:
+ - Dictionary of options specific to the chosen I(log_driver).
+ - See U(https://docs.docker.com/engine/admin/logging/overview/) for details.
+ type: dict
+ aliases:
+ - log_opt
+ mac_address:
+ description:
+ - Container MAC address (e.g. 92:d0:c6:0a:29:33).
+ type: str
+ memory:
+ description:
+ - "Memory limit in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - Omitting the unit defaults to bytes.
+ - If I(container_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C("0").
+ type: str
+ memory_reservation:
+ description:
+ - "Memory soft limit in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - Omitting the unit defaults to bytes.
+ type: str
+ memory_swap:
+ description:
+ - "Total memory limit (memory + swap) in format C(<number>[<unit>]).
+ Number is a positive integer. Unit can be C(B) (byte), C(K) (kibibyte, 1024B),
+ C(M) (mebibyte), C(G) (gibibyte), C(T) (tebibyte), or C(P) (pebibyte)."
+ - Omitting the unit defaults to bytes.
+ type: str
+ memory_swappiness:
+ description:
+ - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100.
+ - If not set, the value will be remain the same if container exists and will be inherited
+ from the host machine if it is (re-)created.
+ type: int
+ mounts:
+ type: list
+ elements: dict
+ description:
+ - Specification for mounts to be added to the container. More powerful alternative to I(volumes).
+ suboptions:
+ target:
+ description:
+ - Path inside the container.
+ type: str
+ required: true
+ source:
+ description:
+ - Mount source (e.g. a volume name or a host path).
+ type: str
+ type:
+ description:
+ - The mount type.
+ - Note that C(npipe) is only supported by Docker for Windows.
+ type: str
+ choices:
+ - bind
+ - npipe
+ - tmpfs
+ - volume
+ default: volume
+ read_only:
+ description:
+ - Whether the mount should be read-only.
+ type: bool
+ consistency:
+ description:
+ - The consistency requirement for the mount.
+ type: str
+ choices:
+ - cached
+ - consistent
+ - default
+ - delegated
+ propagation:
+ description:
+ - Propagation mode. Only valid for the C(bind) type.
+ type: str
+ choices:
+ - private
+ - rprivate
+ - shared
+ - rshared
+ - slave
+ - rslave
+ no_copy:
+ description:
+ - False if the volume should be populated with the data from the target. Only valid for the C(volume) type.
+ - The default value is C(false).
+ type: bool
+ labels:
+ description:
+ - User-defined name and labels for the volume. Only valid for the C(volume) type.
+ type: dict
+ volume_driver:
+ description:
+ - Specify the volume driver. Only valid for the C(volume) type.
+ - See L(here,https://docs.docker.com/storage/volumes/#use-a-volume-driver) for details.
+ type: str
+ volume_options:
+ description:
+ - Dictionary of options specific to the chosen volume_driver. See
+ L(here,https://docs.docker.com/storage/volumes/#use-a-volume-driver) for details.
+ type: dict
+ tmpfs_size:
+ description:
+ - "The size for the tmpfs mount in bytes in format <number>[<unit>]."
+ - "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - "Omitting the unit defaults to bytes."
+ type: str
+ tmpfs_mode:
+ description:
+ - The permission mode for the tmpfs mount.
+ type: str
+ name:
+ description:
+ - Assign a name to a new container or match an existing container.
+ - When identifying an existing container name may be a name or a long or short container ID.
+ type: str
+ required: yes
+ network_mode:
+ description:
+ - Connect the container to a network. Choices are C(bridge), C(host), C(none), C(container:<name|id>), C(<network_name>) or C(default).
+ - "*Note* that from community.general 3.0.0 on, if I(networks_cli_compatible) is C(true) and I(networks) contains at least one network,
+ the default value for I(network_mode) will be the name of the first network in the I(networks) list. You can prevent this
+ by explicitly specifying a value for I(network_mode), like the default value C(default) which will be used by Docker if
+ I(network_mode) is not specified."
+ type: str
+ userns_mode:
+ description:
+ - Set the user namespace mode for the container. Currently, the only valid value are C(host) and the empty string.
+ type: str
+ networks:
+ description:
+ - List of networks the container belongs to.
+ - For examples of the data structure and usage see EXAMPLES below.
+ - To remove a container from one or more networks, use the I(purge_networks) option.
+ - Note that as opposed to C(docker run ...), M(community.general.docker_container) does not remove the default
+ network if I(networks) is specified. You need to explicitly use I(purge_networks) to enforce
+ the removal of the default network (and all other networks not explicitly mentioned in I(networks)).
+ Alternatively, use the I(networks_cli_compatible) option, which will be enabled by default from community.general 2.0.0 on.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - The network's name.
+ type: str
+ required: yes
+ ipv4_address:
+ description:
+ - The container's IPv4 address in this network.
+ type: str
+ ipv6_address:
+ description:
+ - The container's IPv6 address in this network.
+ type: str
+ links:
+ description:
+ - A list of containers to link to.
+ type: list
+ elements: str
+ aliases:
+ description:
+ - List of aliases for this container in this network. These names
+ can be used in the network to reach this container.
+ type: list
+ elements: str
+ networks_cli_compatible:
+ description:
+ - "When networks are provided to the module via the I(networks) option, the module
+ behaves differently than C(docker run --network): C(docker run --network other)
+ will create a container with network C(other) attached, but the default network
+ not attached. This module with I(networks: {name: other}) will create a container
+ with both C(default) and C(other) attached. If I(purge_networks) is set to C(yes),
+ the C(default) network will be removed afterwards."
+ - "If I(networks_cli_compatible) is set to C(yes), this module will behave as
+ C(docker run --network) and will *not* add the default network if I(networks) is
+ specified. If I(networks) is not specified, the default network will be attached."
+ - "*Note* that docker CLI also sets I(network_mode) to the name of the first network
+ added if C(--network) is specified. For more compatibility with docker CLI, you
+ explicitly have to set I(network_mode) to the name of the first network you're
+ adding. This behavior will change for community.general 3.0.0: then I(network_mode) will
+ automatically be set to the first network name in I(networks) if I(network_mode)
+ is not specified, I(networks) has at least one entry and I(networks_cli_compatible)
+ is C(true)."
+ - Current value is C(no). A new default of C(yes) will be set in community.general 2.0.0.
+ type: bool
+ oom_killer:
+ description:
+ - Whether or not to disable OOM Killer for the container.
+ type: bool
+ oom_score_adj:
+ description:
+ - An integer value containing the score given to the container in order to tune
+ OOM killer preferences.
+ type: int
+ output_logs:
+ description:
+ - If set to true, output of the container command will be printed.
+ - Only effective when I(log_driver) is set to C(json-file) or C(journald).
+ type: bool
+ default: no
+ paused:
+ description:
+ - Use with the started state to pause running processes inside the container.
+ - If I(container_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no).
+ type: bool
+ pid_mode:
+ description:
+ - Set the PID namespace mode for the container.
+ - Note that Docker SDK for Python < 2.0 only supports C(host). Newer versions of the
+ Docker SDK for Python (docker) allow all values supported by the Docker daemon.
+ type: str
+ pids_limit:
+ description:
+ - Set PIDs limit for the container. It accepts an integer value.
+ - Set C(-1) for unlimited PIDs.
+ type: int
+ privileged:
+ description:
+ - Give extended privileges to the container.
+ - If I(container_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no).
+ type: bool
+ published_ports:
+ description:
+ - List of ports to publish from the container to the host.
+ - "Use docker CLI syntax: C(8000), C(9000:8000), or C(0.0.0.0:9000:8000), where 8000 is a
+ container port, 9000 is a host port, and 0.0.0.0 is a host interface."
+ - Port ranges can be used for source and destination ports. If two ranges with
+ different lengths are specified, the shorter range will be used.
+ Since community.general 0.2.0, if the source port range has length 1, the port will not be assigned
+ to the first port of the destination range, but to a free port in that range. This is the
+ same behavior as for C(docker) command line utility.
+ - "Bind addresses must be either IPv4 or IPv6 addresses. Hostnames are *not* allowed. This
+ is different from the C(docker) command line utility. Use the L(dig lookup,../lookup/dig.html)
+ to resolve hostnames."
+ - A value of C(all) will publish all exposed container ports to random host ports, ignoring
+ any other mappings.
+ - If I(networks) parameter is provided, will inspect each network to see if there exists
+ a bridge network with optional parameter C(com.docker.network.bridge.host_binding_ipv4).
+ If such a network is found, then published ports where no host IP address is specified
+ will be bound to the host IP pointed to by C(com.docker.network.bridge.host_binding_ipv4).
+ Note that the first bridge network with a C(com.docker.network.bridge.host_binding_ipv4)
+ value encountered in the list of I(networks) is the one that will be used.
+ type: list
+ elements: str
+ aliases:
+ - ports
+ pull:
+ description:
+ - If true, always pull the latest version of an image. Otherwise, will only pull an image
+ when missing.
+ - "*Note:* images are only pulled when specified by name. If the image is specified
+ as a image ID (hash), it cannot be pulled."
+ type: bool
+ default: no
+ purge_networks:
+ description:
+ - Remove the container from ALL networks not included in I(networks) parameter.
+ - Any default networks such as C(bridge), if not found in I(networks), will be removed as well.
+ type: bool
+ default: no
+ read_only:
+ description:
+ - Mount the container's root file system as read-only.
+ - If I(container_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no).
+ type: bool
+ recreate:
+ description:
+ - Use with present and started states to force the re-creation of an existing container.
+ type: bool
+ default: no
+ removal_wait_timeout:
+ description:
+ - When removing an existing container, the docker daemon API call exists after the container
+ is scheduled for removal. Removal usually is very fast, but it can happen that during high I/O
+ load, removal can take longer. By default, the module will wait until the container has been
+ removed before trying to (re-)create it, however long this takes.
+ - By setting this option, the module will wait at most this many seconds for the container to be
+ removed. If the container is still in the removal phase after this many seconds, the module will
+ fail.
+ type: float
+ version_added: '0.2.0'
+ restart:
+ description:
+ - Use with started state to force a matching container to be stopped and restarted.
+ type: bool
+ default: no
+ restart_policy:
+ description:
+ - Container restart policy.
+ - Place quotes around C(no) option.
+ type: str
+ choices:
+ - 'no'
+ - 'on-failure'
+ - 'always'
+ - 'unless-stopped'
+ restart_retries:
+ description:
+ - Use with restart policy to control maximum number of restart attempts.
+ type: int
+ runtime:
+ description:
+ - Runtime to use for the container.
+ type: str
+ shm_size:
+ description:
+ - "Size of C(/dev/shm) in format C(<number>[<unit>]). Number is positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - Omitting the unit defaults to bytes. If you omit the size entirely, Docker daemon uses C(64M).
+ type: str
+ security_opts:
+ description:
+ - List of security options in the form of C("label:user:User").
+ type: list
+ elements: str
+ state:
+ description:
+ - 'C(absent) - A container matching the specified name will be stopped and removed. Use I(force_kill) to kill the container
+ rather than stopping it. Use I(keep_volumes) to retain anonymous volumes associated with the removed container.'
+ - 'C(present) - Asserts the existence of a container matching the name and any provided configuration parameters. If no
+ container matches the name, a container will be created. If a container matches the name but the provided configuration
+ does not match, the container will be updated, if it can be. If it cannot be updated, it will be removed and re-created
+ with the requested config.'
+ - 'C(started) - Asserts that the container is first C(present), and then if the container is not running moves it to a running
+ state. Use I(restart) to force a matching container to be stopped and restarted.'
+ - 'C(stopped) - Asserts that the container is first C(present), and then if the container is running moves it to a stopped
+ state.'
+ - To control what will be taken into account when comparing configuration, see the I(comparisons) option. To avoid that the
+ image version will be taken into account, you can also use the I(ignore_image) option.
+ - Use the I(recreate) option to always force re-creation of a matching container, even if it is running.
+ - If the container should be killed instead of stopped in case it needs to be stopped for recreation, or because I(state) is
+ C(stopped), please use the I(force_kill) option. Use I(keep_volumes) to retain anonymous volumes associated with a removed container.
+ - Use I(keep_volumes) to retain anonymous volumes associated with a removed container.
+ type: str
+ default: started
+ choices:
+ - absent
+ - present
+ - stopped
+ - started
+ stop_signal:
+ description:
+ - Override default signal used to stop the container.
+ type: str
+ stop_timeout:
+ description:
+ - Number of seconds to wait for the container to stop before sending C(SIGKILL).
+ When the container is created by this module, its C(StopTimeout) configuration
+ will be set to this value.
+ - When the container is stopped, will be used as a timeout for stopping the
+ container. In case the container has a custom C(StopTimeout) configuration,
+ the behavior depends on the version of the docker daemon. New versions of
+ the docker daemon will always use the container's configured C(StopTimeout)
+ value if it has been configured.
+ type: int
+ trust_image_content:
+ description:
+ - If C(yes), skip image verification.
+ - The option has never been used by the module. It will be removed in community.general 3.0.0.
+ type: bool
+ default: no
+ tmpfs:
+ description:
+ - Mount a tmpfs directory.
+ type: list
+ elements: str
+ tty:
+ description:
+ - Allocate a pseudo-TTY.
+ - If I(container_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no).
+ type: bool
+ ulimits:
+ description:
+ - "List of ulimit options. A ulimit is specified as C(nofile:262144:262144)."
+ type: list
+ elements: str
+ sysctls:
+ description:
+ - Dictionary of key,value pairs.
+ type: dict
+ user:
+ description:
+ - Sets the username or UID used and optionally the groupname or GID for the specified command.
+ - "Can be of the forms C(user), C(user:group), C(uid), C(uid:gid), C(user:gid) or C(uid:group)."
+ type: str
+ uts:
+ description:
+ - Set the UTS namespace mode for the container.
+ type: str
+ volumes:
+ description:
+ - List of volumes to mount within the container.
+ - "Use docker CLI-style syntax: C(/host:/container[:mode])"
+ - "Mount modes can be a comma-separated list of various modes such as C(ro), C(rw), C(consistent),
+ C(delegated), C(cached), C(rprivate), C(private), C(rshared), C(shared), C(rslave), C(slave), and
+ C(nocopy). Note that the docker daemon might not support all modes and combinations of such modes."
+ - SELinux hosts can additionally use C(z) or C(Z) to use a shared or private label for the volume.
+ - "Note that Ansible 2.7 and earlier only supported one mode, which had to be one of C(ro), C(rw),
+ C(z), and C(Z)."
+ type: list
+ elements: str
+ volume_driver:
+ description:
+ - The container volume driver.
+ type: str
+ volumes_from:
+ description:
+ - List of container names or IDs to get volumes from.
+ type: list
+ elements: str
+ working_dir:
+ description:
+ - Path to the working directory.
+ type: str
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+author:
+ - "Cove Schneider (@cove)"
+ - "Joshua Conner (@joshuaconner)"
+ - "Pavel Antonov (@softzilla)"
+ - "Thomas Steinbach (@ThomasSteinbach)"
+ - "Philippe Jandot (@zfil)"
+ - "Daan Oosterveld (@dusdanig)"
+ - "Chris Houseknecht (@chouseknecht)"
+ - "Kassian Sun (@kassiansun)"
+ - "Felix Fontein (@felixfontein)"
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "Docker API >= 1.20"
+'''
+
+EXAMPLES = '''
+- name: Create a data container
+ community.general.docker_container:
+ name: mydata
+ image: busybox
+ volumes:
+ - /data
+
+- name: Re-create a redis container
+ community.general.docker_container:
+ name: myredis
+ image: redis
+ command: redis-server --appendonly yes
+ state: present
+ recreate: yes
+ exposed_ports:
+ - 6379
+ volumes_from:
+ - mydata
+
+- name: Restart a container
+ community.general.docker_container:
+ name: myapplication
+ image: someuser/appimage
+ state: started
+ restart: yes
+ links:
+ - "myredis:aliasedredis"
+ devices:
+ - "/dev/sda:/dev/xvda:rwm"
+ ports:
+ # Publish container port 9000 as host port 8080
+ - "8080:9000"
+ # Publish container UDP port 9001 as host port 8081 on interface 127.0.0.1
+ - "127.0.0.1:8081:9001/udp"
+ # Publish container port 9002 as a random host port
+ - "9002"
+ # Publish container port 9003 as a free host port in range 8000-8100
+ # (the host port will be selected by the Docker daemon)
+ - "8000-8100:9003"
+ # Publish container ports 9010-9020 to host ports 7000-7010
+ - "7000-7010:9010-9020"
+ env:
+ SECRET_KEY: "ssssh"
+ # Values which might be parsed as numbers, booleans or other types by the YAML parser need to be quoted
+ BOOLEAN_KEY: "yes"
+
+- name: Container present
+ community.general.docker_container:
+ name: mycontainer
+ state: present
+ image: ubuntu:14.04
+ command: sleep infinity
+
+- name: Stop a container
+ community.general.docker_container:
+ name: mycontainer
+ state: stopped
+
+- name: Start 4 load-balanced containers
+ community.general.docker_container:
+ name: "container{{ item }}"
+ recreate: yes
+ image: someuser/anotherappimage
+ command: sleep 1d
+ with_sequence: count=4
+
+- name: Remove container
+ community.general.docker_container:
+ name: ohno
+ state: absent
+
+- name: Syslogging output
+ community.general.docker_container:
+ name: myservice
+ image: busybox
+ log_driver: syslog
+ log_options:
+ syslog-address: tcp://my-syslog-server:514
+ syslog-facility: daemon
+ # NOTE: in Docker 1.13+ the "syslog-tag" option was renamed to "tag" for
+ # older docker installs, use "syslog-tag" instead
+ tag: myservice
+
+- name: Create db container and connect to network
+ community.general.docker_container:
+ name: db_test
+ image: "postgres:latest"
+ networks:
+ - name: "{{ docker_network_name }}"
+
+- name: Start container, connect to network and link
+ community.general.docker_container:
+ name: sleeper
+ image: ubuntu:14.04
+ networks:
+ - name: TestingNet
+ ipv4_address: "172.1.1.100"
+ aliases:
+ - sleepyzz
+ links:
+ - db_test:db
+ - name: TestingNet2
+
+- name: Start a container with a command
+ community.general.docker_container:
+ name: sleepy
+ image: ubuntu:14.04
+ command: ["sleep", "infinity"]
+
+- name: Add container to networks
+ community.general.docker_container:
+ name: sleepy
+ networks:
+ - name: TestingNet
+ ipv4_address: 172.1.1.18
+ links:
+ - sleeper
+ - name: TestingNet2
+ ipv4_address: 172.1.10.20
+
+- name: Update network with aliases
+ community.general.docker_container:
+ name: sleepy
+ networks:
+ - name: TestingNet
+ aliases:
+ - sleepyz
+ - zzzz
+
+- name: Remove container from one network
+ community.general.docker_container:
+ name: sleepy
+ networks:
+ - name: TestingNet2
+ purge_networks: yes
+
+- name: Remove container from all networks
+ community.general.docker_container:
+ name: sleepy
+ purge_networks: yes
+
+- name: Start a container and use an env file
+ community.general.docker_container:
+ name: agent
+ image: jenkinsci/ssh-slave
+ env_file: /var/tmp/jenkins/agent.env
+
+- name: Create a container with limited capabilities
+ community.general.docker_container:
+ name: sleepy
+ image: ubuntu:16.04
+ command: sleep infinity
+ capabilities:
+ - sys_time
+ cap_drop:
+ - all
+
+- name: Finer container restart/update control
+ community.general.docker_container:
+ name: test
+ image: ubuntu:18.04
+ env:
+ arg1: "true"
+ arg2: "whatever"
+ volumes:
+ - /tmp:/tmp
+ comparisons:
+ image: ignore # don't restart containers with older versions of the image
+ env: strict # we want precisely this environment
+ volumes: allow_more_present # if there are more volumes, that's ok, as long as `/tmp:/tmp` is there
+
+- name: Finer container restart/update control II
+ community.general.docker_container:
+ name: test
+ image: ubuntu:18.04
+ env:
+ arg1: "true"
+ arg2: "whatever"
+ comparisons:
+ '*': ignore # by default, ignore *all* options (including image)
+ env: strict # except for environment variables; there, we want to be strict
+
+- name: Start container with healthstatus
+ community.general.docker_container:
+ name: nginx-proxy
+ image: nginx:1.13
+ state: started
+ healthcheck:
+ # Check if nginx server is healthy by curl'ing the server.
+ # If this fails or timeouts, the healthcheck fails.
+ test: ["CMD", "curl", "--fail", "http://nginx.host.com"]
+ interval: 1m30s
+ timeout: 10s
+ retries: 3
+ start_period: 30s
+
+- name: Remove healthcheck from container
+ community.general.docker_container:
+ name: nginx-proxy
+ image: nginx:1.13
+ state: started
+ healthcheck:
+ # The "NONE" check needs to be specified
+ test: ["NONE"]
+
+- name: Start container with block device read limit
+ community.general.docker_container:
+ name: test
+ image: ubuntu:18.04
+ state: started
+ device_read_bps:
+ # Limit read rate for /dev/sda to 20 mebibytes per second
+ - path: /dev/sda
+ rate: 20M
+ device_read_iops:
+ # Limit read rate for /dev/sdb to 300 IO per second
+ - path: /dev/sdb
+ rate: 300
+
+- name: Start container with GPUs
+ community.general.docker_container:
+ name: test
+ image: ubuntu:18.04
+ state: started
+ device_requests:
+ - # Add some specific devices to this container
+ device_ids:
+ - '0'
+ - 'GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a'
+ - # Add nVidia GPUs to this container
+ driver: nvidia
+ count: -1 # this means we want all
+ capabilities:
+ # We have one OR condition: 'gpu' AND 'utility'
+ - - gpu
+ - utility
+ # See https://github.com/NVIDIA/nvidia-container-runtime#supported-driver-capabilities
+ # for a list of capabilities supported by the nvidia driver
+'''
+
+RETURN = '''
+container:
+ description:
+ - Facts representing the current state of the container. Matches the docker inspection output.
+ - Note that facts are part of the registered vars since Ansible 2.8. For compatibility reasons, the facts
+ are also accessible directly as C(docker_container). Note that the returned fact will be removed in
+ community.general 2.0.0.
+ - Before 2.3 this was C(ansible_docker_container) but was renamed in 2.3 to C(docker_container) due to
+ conflicts with the connection plugin.
+ - Empty if I(state) is C(absent)
+ - If I(detached) is C(false), will include C(Output) attribute containing any output from container run.
+ returned: always
+ type: dict
+ sample: '{
+ "AppArmorProfile": "",
+ "Args": [],
+ "Config": {
+ "AttachStderr": false,
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "Cmd": [
+ "/usr/bin/supervisord"
+ ],
+ "Domainname": "",
+ "Entrypoint": null,
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "ExposedPorts": {
+ "443/tcp": {},
+ "80/tcp": {}
+ },
+ "Hostname": "8e47bf643eb9",
+ "Image": "lnmp_nginx:v1",
+ "Labels": {},
+ "OnBuild": null,
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Tty": false,
+ "User": "",
+ "Volumes": {
+ "/tmp/lnmp/nginx-sites/logs/": {}
+ },
+ ...
+ }'
+'''
+
+import os
+import re
+import shlex
+import traceback
+from distutils.version import LooseVersion
+from time import sleep
+
+from ansible.module_utils.common.text.formatters import human_to_bytes
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_native, to_text
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ DifferenceTracker,
+ DockerBaseClass,
+ compare_generic,
+ is_image_name_id,
+ sanitize_result,
+ clean_dict_booleans_for_docker_api,
+ omit_none_from_dict,
+ parse_healthcheck,
+ DOCKER_COMMON_ARGS,
+ RequestException,
+)
+
+try:
+ from docker import utils
+ from ansible_collections.community.general.plugins.module_utils.docker.common import docker_version
+ if LooseVersion(docker_version) >= LooseVersion('1.10.0'):
+ from docker.types import Ulimit, LogConfig
+ from docker import types as docker_types
+ else:
+ from docker.utils.types import Ulimit, LogConfig
+ from docker.errors import DockerException, APIError, NotFound
+except Exception:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+
+REQUIRES_CONVERSION_TO_BYTES = [
+ 'kernel_memory',
+ 'memory',
+ 'memory_reservation',
+ 'memory_swap',
+ 'shm_size'
+]
+
+
+def is_volume_permissions(mode):
+ for part in mode.split(','):
+ if part not in ('rw', 'ro', 'z', 'Z', 'consistent', 'delegated', 'cached', 'rprivate', 'private', 'rshared', 'shared', 'rslave', 'slave', 'nocopy'):
+ return False
+ return True
+
+
+def parse_port_range(range_or_port, client):
+ '''
+ Parses a string containing either a single port or a range of ports.
+
+ Returns a list of integers for each port in the list.
+ '''
+ if '-' in range_or_port:
+ try:
+ start, end = [int(port) for port in range_or_port.split('-')]
+ except Exception:
+ client.fail('Invalid port range: "{0}"'.format(range_or_port))
+ if end < start:
+ client.fail('Invalid port range: "{0}"'.format(range_or_port))
+ return list(range(start, end + 1))
+ else:
+ try:
+ return [int(range_or_port)]
+ except Exception:
+ client.fail('Invalid port: "{0}"'.format(range_or_port))
+
+
+def split_colon_ipv6(text, client):
+ '''
+ Split string by ':', while keeping IPv6 addresses in square brackets in one component.
+ '''
+ if '[' not in text:
+ return text.split(':')
+ start = 0
+ result = []
+ while start < len(text):
+ i = text.find('[', start)
+ if i < 0:
+ result.extend(text[start:].split(':'))
+ break
+ j = text.find(']', i)
+ if j < 0:
+ client.fail('Cannot find closing "]" in input "{0}" for opening "[" at index {1}!'.format(text, i + 1))
+ result.extend(text[start:i].split(':'))
+ k = text.find(':', j)
+ if k < 0:
+ result[-1] += text[i:]
+ start = len(text)
+ else:
+ result[-1] += text[i:k]
+ if k == len(text):
+ result.append('')
+ break
+ start = k + 1
+ return result
+
+
+class TaskParameters(DockerBaseClass):
+ '''
+ Access and parse module parameters
+ '''
+
+ def __init__(self, client):
+ super(TaskParameters, self).__init__()
+ self.client = client
+
+ self.auto_remove = None
+ self.blkio_weight = None
+ self.capabilities = None
+ self.cap_drop = None
+ self.cleanup = None
+ self.command = None
+ self.cpu_period = None
+ self.cpu_quota = None
+ self.cpus = None
+ self.cpuset_cpus = None
+ self.cpuset_mems = None
+ self.cpu_shares = None
+ self.detach = None
+ self.debug = None
+ self.devices = None
+ self.device_read_bps = None
+ self.device_write_bps = None
+ self.device_read_iops = None
+ self.device_write_iops = None
+ self.device_requests = None
+ self.dns_servers = None
+ self.dns_opts = None
+ self.dns_search_domains = None
+ self.domainname = None
+ self.env = None
+ self.env_file = None
+ self.entrypoint = None
+ self.etc_hosts = None
+ self.exposed_ports = None
+ self.force_kill = None
+ self.groups = None
+ self.healthcheck = None
+ self.hostname = None
+ self.ignore_image = None
+ self.image = None
+ self.init = None
+ self.interactive = None
+ self.ipc_mode = None
+ self.keep_volumes = None
+ self.kernel_memory = None
+ self.kill_signal = None
+ self.labels = None
+ self.links = None
+ self.log_driver = None
+ self.output_logs = None
+ self.log_options = None
+ self.mac_address = None
+ self.memory = None
+ self.memory_reservation = None
+ self.memory_swap = None
+ self.memory_swappiness = None
+ self.mounts = None
+ self.name = None
+ self.network_mode = None
+ self.userns_mode = None
+ self.networks = None
+ self.networks_cli_compatible = None
+ self.oom_killer = None
+ self.oom_score_adj = None
+ self.paused = None
+ self.pid_mode = None
+ self.pids_limit = None
+ self.privileged = None
+ self.purge_networks = None
+ self.pull = None
+ self.read_only = None
+ self.recreate = None
+ self.removal_wait_timeout = None
+ self.restart = None
+ self.restart_retries = None
+ self.restart_policy = None
+ self.runtime = None
+ self.shm_size = None
+ self.security_opts = None
+ self.state = None
+ self.stop_signal = None
+ self.stop_timeout = None
+ self.tmpfs = None
+ self.trust_image_content = None
+ self.tty = None
+ self.user = None
+ self.uts = None
+ self.volumes = None
+ self.volume_binds = dict()
+ self.volumes_from = None
+ self.volume_driver = None
+ self.working_dir = None
+
+ for key, value in client.module.params.items():
+ setattr(self, key, value)
+ self.comparisons = client.comparisons
+
+ # If state is 'absent', parameters do not have to be parsed or interpreted.
+ # Only the container's name is needed.
+ if self.state == 'absent':
+ return
+
+ if self.cpus is not None:
+ self.cpus = int(round(self.cpus * 1E9))
+
+ if self.groups:
+ # In case integers are passed as groups, we need to convert them to
+ # strings as docker internally treats them as strings.
+ self.groups = [to_text(g, errors='surrogate_or_strict') for g in self.groups]
+
+ for param_name in REQUIRES_CONVERSION_TO_BYTES:
+ if client.module.params.get(param_name):
+ try:
+ setattr(self, param_name, human_to_bytes(client.module.params.get(param_name)))
+ except ValueError as exc:
+ self.fail("Failed to convert %s to bytes: %s" % (param_name, exc))
+
+ self.publish_all_ports = False
+ self.published_ports = self._parse_publish_ports()
+ if self.published_ports in ('all', 'ALL'):
+ self.publish_all_ports = True
+ self.published_ports = None
+
+ self.ports = self._parse_exposed_ports(self.published_ports)
+ self.log("expose ports:")
+ self.log(self.ports, pretty_print=True)
+
+ self.links = self._parse_links(self.links)
+
+ if self.volumes:
+ self.volumes = self._expand_host_paths()
+
+ self.tmpfs = self._parse_tmpfs()
+ self.env = self._get_environment()
+ self.ulimits = self._parse_ulimits()
+ self.sysctls = self._parse_sysctls()
+ self.log_config = self._parse_log_config()
+ try:
+ self.healthcheck, self.disable_healthcheck = parse_healthcheck(self.healthcheck)
+ except ValueError as e:
+ self.fail(to_native(e))
+
+ self.exp_links = None
+ self.volume_binds = self._get_volume_binds(self.volumes)
+ self.pid_mode = self._replace_container_names(self.pid_mode)
+ self.ipc_mode = self._replace_container_names(self.ipc_mode)
+ self.network_mode = self._replace_container_names(self.network_mode)
+
+ self.log("volumes:")
+ self.log(self.volumes, pretty_print=True)
+ self.log("volume binds:")
+ self.log(self.volume_binds, pretty_print=True)
+
+ if self.networks:
+ for network in self.networks:
+ network['id'] = self._get_network_id(network['name'])
+ if not network['id']:
+ self.fail("Parameter error: network named %s could not be found. Does it exist?" % network['name'])
+ if network.get('links'):
+ network['links'] = self._parse_links(network['links'])
+
+ if self.mac_address:
+ # Ensure the MAC address uses colons instead of hyphens for later comparison
+ self.mac_address = self.mac_address.replace('-', ':')
+
+ if self.entrypoint:
+ # convert from list to str.
+ self.entrypoint = ' '.join([to_text(x, errors='surrogate_or_strict') for x in self.entrypoint])
+
+ if self.command:
+ # convert from list to str
+ if isinstance(self.command, list):
+ self.command = ' '.join([to_text(x, errors='surrogate_or_strict') for x in self.command])
+
+ self.mounts_opt, self.expected_mounts = self._process_mounts()
+
+ self._check_mount_target_collisions()
+
+ for param_name in ["device_read_bps", "device_write_bps"]:
+ if client.module.params.get(param_name):
+ self._process_rate_bps(option=param_name)
+
+ for param_name in ["device_read_iops", "device_write_iops"]:
+ if client.module.params.get(param_name):
+ self._process_rate_iops(option=param_name)
+
+ if self.device_requests:
+ for dr_index, dr in enumerate(self.device_requests):
+ # Make sure that capabilities are lists of lists of strings
+ if dr['capabilities']:
+ for or_index, or_list in enumerate(dr['capabilities']):
+ for and_index, and_term in enumerate(or_list):
+ if not isinstance(and_term, string_types):
+ self.fail(
+ "device_requests[{0}].capabilities[{1}][{2}] is not a string".format(
+ dr_index, or_index, and_index))
+ or_list[and_index] = to_native(and_term)
+ # Make sure that options is a dictionary mapping strings to strings
+ if dr['options']:
+ dr['options'] = clean_dict_booleans_for_docker_api(dr['options'])
+
+ def fail(self, msg):
+ self.client.fail(msg)
+
+ @property
+ def update_parameters(self):
+ '''
+ Returns parameters used to update a container
+ '''
+
+ update_parameters = dict(
+ blkio_weight='blkio_weight',
+ cpu_period='cpu_period',
+ cpu_quota='cpu_quota',
+ cpu_shares='cpu_shares',
+ cpuset_cpus='cpuset_cpus',
+ cpuset_mems='cpuset_mems',
+ mem_limit='memory',
+ mem_reservation='memory_reservation',
+ memswap_limit='memory_swap',
+ kernel_memory='kernel_memory',
+ restart_policy='restart_policy',
+ )
+
+ result = dict()
+ for key, value in update_parameters.items():
+ if getattr(self, value, None) is not None:
+ if key == 'restart_policy' and self.client.option_minimal_versions[value]['supported']:
+ restart_policy = dict(Name=self.restart_policy,
+ MaximumRetryCount=self.restart_retries)
+ result[key] = restart_policy
+ elif self.client.option_minimal_versions[value]['supported']:
+ result[key] = getattr(self, value)
+ return result
+
+ @property
+ def create_parameters(self):
+ '''
+ Returns parameters used to create a container
+ '''
+ create_params = dict(
+ command='command',
+ domainname='domainname',
+ hostname='hostname',
+ user='user',
+ detach='detach',
+ stdin_open='interactive',
+ tty='tty',
+ ports='ports',
+ environment='env',
+ name='name',
+ entrypoint='entrypoint',
+ mac_address='mac_address',
+ labels='labels',
+ stop_signal='stop_signal',
+ working_dir='working_dir',
+ stop_timeout='stop_timeout',
+ healthcheck='healthcheck',
+ )
+
+ if self.client.docker_py_version < LooseVersion('3.0'):
+ # cpu_shares and volume_driver moved to create_host_config in > 3
+ create_params['cpu_shares'] = 'cpu_shares'
+ create_params['volume_driver'] = 'volume_driver'
+
+ result = dict(
+ host_config=self._host_config(),
+ volumes=self._get_mounts(),
+ )
+
+ for key, value in create_params.items():
+ if getattr(self, value, None) is not None:
+ if self.client.option_minimal_versions[value]['supported']:
+ result[key] = getattr(self, value)
+
+ if self.disable_healthcheck:
+ # Make sure image's health check is overridden
+ result['healthcheck'] = {'test': ['NONE']}
+
+ if self.networks_cli_compatible and self.networks:
+ network = self.networks[0]
+ params = dict()
+ for para in ('ipv4_address', 'ipv6_address', 'links', 'aliases'):
+ if network.get(para):
+ params[para] = network[para]
+ network_config = dict()
+ network_config[network['name']] = self.client.create_endpoint_config(**params)
+ result['networking_config'] = self.client.create_networking_config(network_config)
+ return result
+
+ def _expand_host_paths(self):
+ new_vols = []
+ for vol in self.volumes:
+ if ':' in vol:
+ parts = vol.split(':')
+ if len(parts) == 3:
+ host, container, mode = parts
+ if not is_volume_permissions(mode):
+ self.fail('Found invalid volumes mode: {0}'.format(mode))
+ if re.match(r'[.~]', host):
+ host = os.path.abspath(os.path.expanduser(host))
+ new_vols.append("%s:%s:%s" % (host, container, mode))
+ continue
+ elif len(parts) == 2:
+ if not is_volume_permissions(parts[1]) and re.match(r'[.~]', parts[0]):
+ host = os.path.abspath(os.path.expanduser(parts[0]))
+ new_vols.append("%s:%s:rw" % (host, parts[1]))
+ continue
+ new_vols.append(vol)
+ return new_vols
+
+ def _get_mounts(self):
+ '''
+ Return a list of container mounts.
+ :return:
+ '''
+ result = []
+ if self.volumes:
+ for vol in self.volumes:
+ # Only pass anonymous volumes to create container
+ if ':' in vol:
+ parts = vol.split(':')
+ if len(parts) == 3:
+ continue
+ if len(parts) == 2:
+ if not is_volume_permissions(parts[1]):
+ continue
+ result.append(vol)
+ self.log("mounts:")
+ self.log(result, pretty_print=True)
+ return result
+
+ def _host_config(self):
+ '''
+ Returns parameters used to create a HostConfig object
+ '''
+
+ host_config_params = dict(
+ port_bindings='published_ports',
+ publish_all_ports='publish_all_ports',
+ links='links',
+ privileged='privileged',
+ dns='dns_servers',
+ dns_opt='dns_opts',
+ dns_search='dns_search_domains',
+ binds='volume_binds',
+ volumes_from='volumes_from',
+ network_mode='network_mode',
+ userns_mode='userns_mode',
+ cap_add='capabilities',
+ cap_drop='cap_drop',
+ extra_hosts='etc_hosts',
+ read_only='read_only',
+ ipc_mode='ipc_mode',
+ security_opt='security_opts',
+ ulimits='ulimits',
+ sysctls='sysctls',
+ log_config='log_config',
+ mem_limit='memory',
+ memswap_limit='memory_swap',
+ mem_swappiness='memory_swappiness',
+ oom_score_adj='oom_score_adj',
+ oom_kill_disable='oom_killer',
+ shm_size='shm_size',
+ group_add='groups',
+ devices='devices',
+ pid_mode='pid_mode',
+ tmpfs='tmpfs',
+ init='init',
+ uts_mode='uts',
+ runtime='runtime',
+ auto_remove='auto_remove',
+ device_read_bps='device_read_bps',
+ device_write_bps='device_write_bps',
+ device_read_iops='device_read_iops',
+ device_write_iops='device_write_iops',
+ pids_limit='pids_limit',
+ mounts='mounts',
+ nano_cpus='cpus',
+ )
+
+ if self.client.docker_py_version >= LooseVersion('1.9') and self.client.docker_api_version >= LooseVersion('1.22'):
+ # blkio_weight can always be updated, but can only be set on creation
+ # when Docker SDK for Python and Docker API are new enough
+ host_config_params['blkio_weight'] = 'blkio_weight'
+
+ if self.client.docker_py_version >= LooseVersion('3.0'):
+ # cpu_shares and volume_driver moved to create_host_config in > 3
+ host_config_params['cpu_shares'] = 'cpu_shares'
+ host_config_params['volume_driver'] = 'volume_driver'
+
+ params = dict()
+ for key, value in host_config_params.items():
+ if getattr(self, value, None) is not None:
+ if self.client.option_minimal_versions[value]['supported']:
+ params[key] = getattr(self, value)
+
+ if self.restart_policy:
+ params['restart_policy'] = dict(Name=self.restart_policy,
+ MaximumRetryCount=self.restart_retries)
+
+ if 'mounts' in params:
+ params['mounts'] = self.mounts_opt
+
+ if self.device_requests is not None:
+ params['device_requests'] = [dict((k, v) for k, v in dr.items() if v is not None) for dr in self.device_requests]
+
+ return self.client.create_host_config(**params)
+
+ @property
+ def default_host_ip(self):
+ ip = '0.0.0.0'
+ if not self.networks:
+ return ip
+ for net in self.networks:
+ if net.get('name'):
+ try:
+ network = self.client.inspect_network(net['name'])
+ if network.get('Driver') == 'bridge' and \
+ network.get('Options', {}).get('com.docker.network.bridge.host_binding_ipv4'):
+ ip = network['Options']['com.docker.network.bridge.host_binding_ipv4']
+ break
+ except NotFound as nfe:
+ self.client.fail(
+ "Cannot inspect the network '{0}' to determine the default IP: {1}".format(net['name'], nfe),
+ exception=traceback.format_exc()
+ )
+ return ip
+
+ def _parse_publish_ports(self):
+ '''
+ Parse ports from docker CLI syntax
+ '''
+ if self.published_ports is None:
+ return None
+
+ if 'all' in self.published_ports:
+ return 'all'
+
+ default_ip = self.default_host_ip
+
+ binds = {}
+ for port in self.published_ports:
+ parts = split_colon_ipv6(to_text(port, errors='surrogate_or_strict'), self.client)
+ container_port = parts[-1]
+ protocol = ''
+ if '/' in container_port:
+ container_port, protocol = parts[-1].split('/')
+ container_ports = parse_port_range(container_port, self.client)
+
+ p_len = len(parts)
+ if p_len == 1:
+ port_binds = len(container_ports) * [(default_ip,)]
+ elif p_len == 2:
+ if len(container_ports) == 1:
+ port_binds = [(default_ip, parts[0])]
+ else:
+ port_binds = [(default_ip, port) for port in parse_port_range(parts[0], self.client)]
+ elif p_len == 3:
+ # We only allow IPv4 and IPv6 addresses for the bind address
+ ipaddr = parts[0]
+ if not re.match(r'^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$', parts[0]) and not re.match(r'^\[[0-9a-fA-F:]+(?:|%[^\]/]+)\]$', ipaddr):
+ self.fail(('Bind addresses for published ports must be IPv4 or IPv6 addresses, not hostnames. '
+ 'Use the dig lookup to resolve hostnames. (Found hostname: {0})').format(ipaddr))
+ if re.match(r'^\[[0-9a-fA-F:]+\]$', ipaddr):
+ ipaddr = ipaddr[1:-1]
+ if parts[1]:
+ if len(container_ports) == 1:
+ port_binds = [(ipaddr, parts[1])]
+ else:
+ port_binds = [(ipaddr, port) for port in parse_port_range(parts[1], self.client)]
+ else:
+ port_binds = len(container_ports) * [(ipaddr,)]
+
+ for bind, container_port in zip(port_binds, container_ports):
+ idx = '{0}/{1}'.format(container_port, protocol) if protocol else container_port
+ if idx in binds:
+ old_bind = binds[idx]
+ if isinstance(old_bind, list):
+ old_bind.append(bind)
+ else:
+ binds[idx] = [old_bind, bind]
+ else:
+ binds[idx] = bind
+ return binds
+
+ def _get_volume_binds(self, volumes):
+ '''
+ Extract host bindings, if any, from list of volume mapping strings.
+
+ :return: dictionary of bind mappings
+ '''
+ result = dict()
+ if volumes:
+ for vol in volumes:
+ host = None
+ if ':' in vol:
+ parts = vol.split(':')
+ if len(parts) == 3:
+ host, container, mode = parts
+ if not is_volume_permissions(mode):
+ self.fail('Found invalid volumes mode: {0}'.format(mode))
+ elif len(parts) == 2:
+ if not is_volume_permissions(parts[1]):
+ host, container, mode = (parts + ['rw'])
+ if host is not None:
+ result[host] = dict(
+ bind=container,
+ mode=mode
+ )
+ return result
+
+ def _parse_exposed_ports(self, published_ports):
+ '''
+ Parse exposed ports from docker CLI-style ports syntax.
+ '''
+ exposed = []
+ if self.exposed_ports:
+ for port in self.exposed_ports:
+ port = to_text(port, errors='surrogate_or_strict').strip()
+ protocol = 'tcp'
+ match = re.search(r'(/.+$)', port)
+ if match:
+ protocol = match.group(1).replace('/', '')
+ port = re.sub(r'/.+$', '', port)
+ exposed.append((port, protocol))
+ if published_ports:
+ # Any published port should also be exposed
+ for publish_port in published_ports:
+ match = False
+ if isinstance(publish_port, string_types) and '/' in publish_port:
+ port, protocol = publish_port.split('/')
+ port = int(port)
+ else:
+ protocol = 'tcp'
+ port = int(publish_port)
+ for exposed_port in exposed:
+ if exposed_port[1] != protocol:
+ continue
+ if isinstance(exposed_port[0], string_types) and '-' in exposed_port[0]:
+ start_port, end_port = exposed_port[0].split('-')
+ if int(start_port) <= port <= int(end_port):
+ match = True
+ elif exposed_port[0] == port:
+ match = True
+ if not match:
+ exposed.append((port, protocol))
+ return exposed
+
+ @staticmethod
+ def _parse_links(links):
+ '''
+ Turn links into a dictionary
+ '''
+ if links is None:
+ return None
+
+ result = []
+ for link in links:
+ parsed_link = link.split(':', 1)
+ if len(parsed_link) == 2:
+ result.append((parsed_link[0], parsed_link[1]))
+ else:
+ result.append((parsed_link[0], parsed_link[0]))
+ return result
+
+ def _parse_ulimits(self):
+ '''
+ Turn ulimits into an array of Ulimit objects
+ '''
+ if self.ulimits is None:
+ return None
+
+ results = []
+ for limit in self.ulimits:
+ limits = dict()
+ pieces = limit.split(':')
+ if len(pieces) >= 2:
+ limits['name'] = pieces[0]
+ limits['soft'] = int(pieces[1])
+ limits['hard'] = int(pieces[1])
+ if len(pieces) == 3:
+ limits['hard'] = int(pieces[2])
+ try:
+ results.append(Ulimit(**limits))
+ except ValueError as exc:
+ self.fail("Error parsing ulimits value %s - %s" % (limit, exc))
+ return results
+
+ def _parse_sysctls(self):
+ '''
+ Turn sysctls into an hash of Sysctl objects
+ '''
+ return self.sysctls
+
+ def _parse_log_config(self):
+ '''
+ Create a LogConfig object
+ '''
+ if self.log_driver is None:
+ return None
+
+ options = dict(
+ Type=self.log_driver,
+ Config=dict()
+ )
+
+ if self.log_options is not None:
+ options['Config'] = dict()
+ for k, v in self.log_options.items():
+ if not isinstance(v, string_types):
+ self.client.module.warn(
+ "Non-string value found for log_options option '%s'. The value is automatically converted to '%s'. "
+ "If this is not correct, or you want to avoid such warnings, please quote the value." % (
+ k, to_text(v, errors='surrogate_or_strict'))
+ )
+ v = to_text(v, errors='surrogate_or_strict')
+ self.log_options[k] = v
+ options['Config'][k] = v
+
+ try:
+ return LogConfig(**options)
+ except ValueError as exc:
+ self.fail('Error parsing logging options - %s' % (exc))
+
+ def _parse_tmpfs(self):
+ '''
+ Turn tmpfs into a hash of Tmpfs objects
+ '''
+ result = dict()
+ if self.tmpfs is None:
+ return result
+
+ for tmpfs_spec in self.tmpfs:
+ split_spec = tmpfs_spec.split(":", 1)
+ if len(split_spec) > 1:
+ result[split_spec[0]] = split_spec[1]
+ else:
+ result[split_spec[0]] = ""
+ return result
+
+ def _get_environment(self):
+ """
+ If environment file is combined with explicit environment variables, the explicit environment variables
+ take precedence.
+ """
+ final_env = {}
+ if self.env_file:
+ parsed_env_file = utils.parse_env_file(self.env_file)
+ for name, value in parsed_env_file.items():
+ final_env[name] = to_text(value, errors='surrogate_or_strict')
+ if self.env:
+ for name, value in self.env.items():
+ if not isinstance(value, string_types):
+ self.fail("Non-string value found for env option. Ambiguous env options must be "
+ "wrapped in quotes to avoid them being interpreted. Key: %s" % (name, ))
+ final_env[name] = to_text(value, errors='surrogate_or_strict')
+ return final_env
+
+ def _get_network_id(self, network_name):
+ network_id = None
+ try:
+ for network in self.client.networks(names=[network_name]):
+ if network['Name'] == network_name:
+ network_id = network['Id']
+ break
+ except Exception as exc:
+ self.fail("Error getting network id for %s - %s" % (network_name, to_native(exc)))
+ return network_id
+
+ def _process_mounts(self):
+ if self.mounts is None:
+ return None, None
+ mounts_list = []
+ mounts_expected = []
+ for mount in self.mounts:
+ target = mount['target']
+ datatype = mount['type']
+ mount_dict = dict(mount)
+ # Sanity checks (so we don't wait for docker-py to barf on input)
+ if mount_dict.get('source') is None and datatype != 'tmpfs':
+ self.client.fail('source must be specified for mount "{0}" of type "{1}"'.format(target, datatype))
+ mount_option_types = dict(
+ volume_driver='volume',
+ volume_options='volume',
+ propagation='bind',
+ no_copy='volume',
+ labels='volume',
+ tmpfs_size='tmpfs',
+ tmpfs_mode='tmpfs',
+ )
+ for option, req_datatype in mount_option_types.items():
+ if mount_dict.get(option) is not None and datatype != req_datatype:
+ self.client.fail('{0} cannot be specified for mount "{1}" of type "{2}" (needs type "{3}")'.format(option, target, datatype, req_datatype))
+ # Handle volume_driver and volume_options
+ volume_driver = mount_dict.pop('volume_driver')
+ volume_options = mount_dict.pop('volume_options')
+ if volume_driver:
+ if volume_options:
+ volume_options = clean_dict_booleans_for_docker_api(volume_options)
+ mount_dict['driver_config'] = docker_types.DriverConfig(name=volume_driver, options=volume_options)
+ if mount_dict['labels']:
+ mount_dict['labels'] = clean_dict_booleans_for_docker_api(mount_dict['labels'])
+ if mount_dict.get('tmpfs_size') is not None:
+ try:
+ mount_dict['tmpfs_size'] = human_to_bytes(mount_dict['tmpfs_size'])
+ except ValueError as exc:
+ self.fail('Failed to convert tmpfs_size of mount "{0}" to bytes: {1}'.format(target, exc))
+ if mount_dict.get('tmpfs_mode') is not None:
+ try:
+ mount_dict['tmpfs_mode'] = int(mount_dict['tmpfs_mode'], 8)
+ except Exception as dummy:
+ self.client.fail('tmp_fs mode of mount "{0}" is not an octal string!'.format(target))
+ # Fill expected mount dict
+ mount_expected = dict(mount)
+ mount_expected['tmpfs_size'] = mount_dict['tmpfs_size']
+ mount_expected['tmpfs_mode'] = mount_dict['tmpfs_mode']
+ # Add result to lists
+ mounts_list.append(docker_types.Mount(**mount_dict))
+ mounts_expected.append(omit_none_from_dict(mount_expected))
+ return mounts_list, mounts_expected
+
+ def _process_rate_bps(self, option):
+ """
+ Format device_read_bps and device_write_bps option
+ """
+ devices_list = []
+ for v in getattr(self, option):
+ device_dict = dict((x.title(), y) for x, y in v.items())
+ device_dict['Rate'] = human_to_bytes(device_dict['Rate'])
+ devices_list.append(device_dict)
+
+ setattr(self, option, devices_list)
+
+ def _process_rate_iops(self, option):
+ """
+ Format device_read_iops and device_write_iops option
+ """
+ devices_list = []
+ for v in getattr(self, option):
+ device_dict = dict((x.title(), y) for x, y in v.items())
+ devices_list.append(device_dict)
+
+ setattr(self, option, devices_list)
+
+ def _replace_container_names(self, mode):
+ """
+ Parse IPC and PID modes. If they contain a container name, replace
+ with the container's ID.
+ """
+ if mode is None or not mode.startswith('container:'):
+ return mode
+ container_name = mode[len('container:'):]
+ # Try to inspect container to see whether this is an ID or a
+ # name (and in the latter case, retrieve it's ID)
+ container = self.client.get_container(container_name)
+ if container is None:
+ # If we can't find the container, issue a warning and continue with
+ # what the user specified.
+ self.client.module.warn('Cannot find a container with name or ID "{0}"'.format(container_name))
+ return mode
+ return 'container:{0}'.format(container['Id'])
+
+ def _check_mount_target_collisions(self):
+ last = dict()
+
+ def f(t, name):
+ if t in last:
+ if name == last[t]:
+ self.client.fail('The mount point "{0}" appears twice in the {1} option'.format(t, name))
+ else:
+ self.client.fail('The mount point "{0}" appears both in the {1} and {2} option'.format(t, name, last[t]))
+ last[t] = name
+
+ if self.expected_mounts:
+ for t in [m['target'] for m in self.expected_mounts]:
+ f(t, 'mounts')
+ if self.volumes:
+ for v in self.volumes:
+ vs = v.split(':')
+ f(vs[0 if len(vs) == 1 else 1], 'volumes')
+
+
+class Container(DockerBaseClass):
+
+ def __init__(self, container, parameters):
+ super(Container, self).__init__()
+ self.raw = container
+ self.Id = None
+ self.container = container
+ if container:
+ self.Id = container['Id']
+ self.Image = container['Image']
+ self.log(self.container, pretty_print=True)
+ self.parameters = parameters
+ self.parameters.expected_links = None
+ self.parameters.expected_ports = None
+ self.parameters.expected_exposed = None
+ self.parameters.expected_volumes = None
+ self.parameters.expected_ulimits = None
+ self.parameters.expected_sysctls = None
+ self.parameters.expected_etc_hosts = None
+ self.parameters.expected_env = None
+ self.parameters.expected_device_requests = None
+ self.parameters_map = dict()
+ self.parameters_map['expected_links'] = 'links'
+ self.parameters_map['expected_ports'] = 'expected_ports'
+ self.parameters_map['expected_exposed'] = 'exposed_ports'
+ self.parameters_map['expected_volumes'] = 'volumes'
+ self.parameters_map['expected_ulimits'] = 'ulimits'
+ self.parameters_map['expected_sysctls'] = 'sysctls'
+ self.parameters_map['expected_etc_hosts'] = 'etc_hosts'
+ self.parameters_map['expected_env'] = 'env'
+ self.parameters_map['expected_entrypoint'] = 'entrypoint'
+ self.parameters_map['expected_binds'] = 'volumes'
+ self.parameters_map['expected_cmd'] = 'command'
+ self.parameters_map['expected_devices'] = 'devices'
+ self.parameters_map['expected_healthcheck'] = 'healthcheck'
+ self.parameters_map['expected_mounts'] = 'mounts'
+ self.parameters_map['expected_device_requests'] = 'device_requests'
+
+ def fail(self, msg):
+ self.parameters.client.fail(msg)
+
+ @property
+ def exists(self):
+ return True if self.container else False
+
+ @property
+ def removing(self):
+ if self.container and self.container.get('State'):
+ return self.container['State'].get('Status') == 'removing'
+ return False
+
+ @property
+ def running(self):
+ if self.container and self.container.get('State'):
+ if self.container['State'].get('Running') and not self.container['State'].get('Ghost', False):
+ return True
+ return False
+
+ @property
+ def paused(self):
+ if self.container and self.container.get('State'):
+ return self.container['State'].get('Paused', False)
+ return False
+
+ def _compare(self, a, b, compare):
+ '''
+ Compare values a and b as described in compare.
+ '''
+ return compare_generic(a, b, compare['comparison'], compare['type'])
+
+ def _decode_mounts(self, mounts):
+ if not mounts:
+ return mounts
+ result = []
+ empty_dict = dict()
+ for mount in mounts:
+ res = dict()
+ res['type'] = mount.get('Type')
+ res['source'] = mount.get('Source')
+ res['target'] = mount.get('Target')
+ res['read_only'] = mount.get('ReadOnly', False) # golang's omitempty for bool returns None for False
+ res['consistency'] = mount.get('Consistency')
+ res['propagation'] = mount.get('BindOptions', empty_dict).get('Propagation')
+ res['no_copy'] = mount.get('VolumeOptions', empty_dict).get('NoCopy', False)
+ res['labels'] = mount.get('VolumeOptions', empty_dict).get('Labels', empty_dict)
+ res['volume_driver'] = mount.get('VolumeOptions', empty_dict).get('DriverConfig', empty_dict).get('Name')
+ res['volume_options'] = mount.get('VolumeOptions', empty_dict).get('DriverConfig', empty_dict).get('Options', empty_dict)
+ res['tmpfs_size'] = mount.get('TmpfsOptions', empty_dict).get('SizeBytes')
+ res['tmpfs_mode'] = mount.get('TmpfsOptions', empty_dict).get('Mode')
+ result.append(res)
+ return result
+
+ def has_different_configuration(self, image):
+ '''
+ Diff parameters vs existing container config. Returns tuple: (True | False, List of differences)
+ '''
+ self.log('Starting has_different_configuration')
+ self.parameters.expected_entrypoint = self._get_expected_entrypoint()
+ self.parameters.expected_links = self._get_expected_links()
+ self.parameters.expected_ports = self._get_expected_ports()
+ self.parameters.expected_exposed = self._get_expected_exposed(image)
+ self.parameters.expected_volumes = self._get_expected_volumes(image)
+ self.parameters.expected_binds = self._get_expected_binds(image)
+ self.parameters.expected_ulimits = self._get_expected_ulimits(self.parameters.ulimits)
+ self.parameters.expected_sysctls = self._get_expected_sysctls(self.parameters.sysctls)
+ self.parameters.expected_etc_hosts = self._convert_simple_dict_to_list('etc_hosts')
+ self.parameters.expected_env = self._get_expected_env(image)
+ self.parameters.expected_cmd = self._get_expected_cmd()
+ self.parameters.expected_devices = self._get_expected_devices()
+ self.parameters.expected_healthcheck = self._get_expected_healthcheck()
+ self.parameters.expected_device_requests = self._get_expected_device_requests()
+
+ if not self.container.get('HostConfig'):
+ self.fail("has_config_diff: Error parsing container properties. HostConfig missing.")
+ if not self.container.get('Config'):
+ self.fail("has_config_diff: Error parsing container properties. Config missing.")
+ if not self.container.get('NetworkSettings'):
+ self.fail("has_config_diff: Error parsing container properties. NetworkSettings missing.")
+
+ host_config = self.container['HostConfig']
+ log_config = host_config.get('LogConfig', dict())
+ config = self.container['Config']
+ network = self.container['NetworkSettings']
+
+ # The previous version of the docker module ignored the detach state by
+ # assuming if the container was running, it must have been detached.
+ detach = not (config.get('AttachStderr') and config.get('AttachStdout'))
+
+ # "ExposedPorts": null returns None type & causes AttributeError - PR #5517
+ if config.get('ExposedPorts') is not None:
+ expected_exposed = [self._normalize_port(p) for p in config.get('ExposedPorts', dict()).keys()]
+ else:
+ expected_exposed = []
+
+ # Map parameters to container inspect results
+ config_mapping = dict(
+ expected_cmd=config.get('Cmd'),
+ domainname=config.get('Domainname'),
+ hostname=config.get('Hostname'),
+ user=config.get('User'),
+ detach=detach,
+ init=host_config.get('Init'),
+ interactive=config.get('OpenStdin'),
+ capabilities=host_config.get('CapAdd'),
+ cap_drop=host_config.get('CapDrop'),
+ expected_devices=host_config.get('Devices'),
+ dns_servers=host_config.get('Dns'),
+ dns_opts=host_config.get('DnsOptions'),
+ dns_search_domains=host_config.get('DnsSearch'),
+ expected_env=(config.get('Env') or []),
+ expected_entrypoint=config.get('Entrypoint'),
+ expected_etc_hosts=host_config['ExtraHosts'],
+ expected_exposed=expected_exposed,
+ groups=host_config.get('GroupAdd'),
+ ipc_mode=host_config.get("IpcMode"),
+ labels=config.get('Labels'),
+ expected_links=host_config.get('Links'),
+ mac_address=config.get('MacAddress', network.get('MacAddress')),
+ memory_swappiness=host_config.get('MemorySwappiness'),
+ network_mode=host_config.get('NetworkMode'),
+ userns_mode=host_config.get('UsernsMode'),
+ oom_killer=host_config.get('OomKillDisable'),
+ oom_score_adj=host_config.get('OomScoreAdj'),
+ pid_mode=host_config.get('PidMode'),
+ privileged=host_config.get('Privileged'),
+ expected_ports=host_config.get('PortBindings'),
+ read_only=host_config.get('ReadonlyRootfs'),
+ runtime=host_config.get('Runtime'),
+ shm_size=host_config.get('ShmSize'),
+ security_opts=host_config.get("SecurityOpt"),
+ stop_signal=config.get("StopSignal"),
+ tmpfs=host_config.get('Tmpfs'),
+ tty=config.get('Tty'),
+ expected_ulimits=host_config.get('Ulimits'),
+ expected_sysctls=host_config.get('Sysctls'),
+ uts=host_config.get('UTSMode'),
+ expected_volumes=config.get('Volumes'),
+ expected_binds=host_config.get('Binds'),
+ volume_driver=host_config.get('VolumeDriver'),
+ volumes_from=host_config.get('VolumesFrom'),
+ working_dir=config.get('WorkingDir'),
+ publish_all_ports=host_config.get('PublishAllPorts'),
+ expected_healthcheck=config.get('Healthcheck'),
+ disable_healthcheck=(not config.get('Healthcheck') or config.get('Healthcheck').get('Test') == ['NONE']),
+ device_read_bps=host_config.get('BlkioDeviceReadBps'),
+ device_write_bps=host_config.get('BlkioDeviceWriteBps'),
+ device_read_iops=host_config.get('BlkioDeviceReadIOps'),
+ device_write_iops=host_config.get('BlkioDeviceWriteIOps'),
+ expected_device_requests=host_config.get('DeviceRequests'),
+ pids_limit=host_config.get('PidsLimit'),
+ # According to https://github.com/moby/moby/, support for HostConfig.Mounts
+ # has been included at least since v17.03.0-ce, which has API version 1.26.
+ # The previous tag, v1.9.1, has API version 1.21 and does not have
+ # HostConfig.Mounts. I have no idea what about API 1.25...
+ expected_mounts=self._decode_mounts(host_config.get('Mounts')),
+ cpus=host_config.get('NanoCpus'),
+ )
+ # Options which don't make sense without their accompanying option
+ if self.parameters.log_driver:
+ config_mapping['log_driver'] = log_config.get('Type')
+ config_mapping['log_options'] = log_config.get('Config')
+
+ if self.parameters.client.option_minimal_versions['auto_remove']['supported']:
+ # auto_remove is only supported in Docker SDK for Python >= 2.0.0; unfortunately
+ # it has a default value, that's why we have to jump through the hoops here
+ config_mapping['auto_remove'] = host_config.get('AutoRemove')
+
+ if self.parameters.client.option_minimal_versions['stop_timeout']['supported']:
+ # stop_timeout is only supported in Docker SDK for Python >= 2.1. Note that
+ # stop_timeout has a hybrid role, in that it used to be something only used
+ # for stopping containers, and is now also used as a container property.
+ # That's why it needs special handling here.
+ config_mapping['stop_timeout'] = config.get('StopTimeout')
+
+ if self.parameters.client.docker_api_version < LooseVersion('1.22'):
+ # For docker API < 1.22, update_container() is not supported. Thus
+ # we need to handle all limits which are usually handled by
+ # update_container() as configuration changes which require a container
+ # restart.
+ restart_policy = host_config.get('RestartPolicy', dict())
+
+ # Options which don't make sense without their accompanying option
+ if self.parameters.restart_policy:
+ config_mapping['restart_retries'] = restart_policy.get('MaximumRetryCount')
+
+ config_mapping.update(dict(
+ blkio_weight=host_config.get('BlkioWeight'),
+ cpu_period=host_config.get('CpuPeriod'),
+ cpu_quota=host_config.get('CpuQuota'),
+ cpu_shares=host_config.get('CpuShares'),
+ cpuset_cpus=host_config.get('CpusetCpus'),
+ cpuset_mems=host_config.get('CpusetMems'),
+ kernel_memory=host_config.get("KernelMemory"),
+ memory=host_config.get('Memory'),
+ memory_reservation=host_config.get('MemoryReservation'),
+ memory_swap=host_config.get('MemorySwap'),
+ restart_policy=restart_policy.get('Name')
+ ))
+
+ differences = DifferenceTracker()
+ for key, value in config_mapping.items():
+ minimal_version = self.parameters.client.option_minimal_versions.get(key, {})
+ if not minimal_version.get('supported', True):
+ continue
+ compare = self.parameters.client.comparisons[self.parameters_map.get(key, key)]
+ self.log('check differences %s %s vs %s (%s)' % (key, getattr(self.parameters, key), to_text(value, errors='surrogate_or_strict'), compare))
+ if getattr(self.parameters, key, None) is not None:
+ match = self._compare(getattr(self.parameters, key), value, compare)
+
+ if not match:
+ # no match. record the differences
+ p = getattr(self.parameters, key)
+ c = value
+ if compare['type'] == 'set':
+ # Since the order does not matter, sort so that the diff output is better.
+ if p is not None:
+ p = sorted(p)
+ if c is not None:
+ c = sorted(c)
+ elif compare['type'] == 'set(dict)':
+ # Since the order does not matter, sort so that the diff output is better.
+ if key == 'expected_mounts':
+ # For selected values, use one entry as key
+ def sort_key_fn(x):
+ return x['target']
+ else:
+ # We sort the list of dictionaries by using the sorted items of a dict as its key.
+ def sort_key_fn(x):
+ return sorted((a, to_text(b, errors='surrogate_or_strict')) for a, b in x.items())
+ if p is not None:
+ p = sorted(p, key=sort_key_fn)
+ if c is not None:
+ c = sorted(c, key=sort_key_fn)
+ differences.add(key, parameter=p, active=c)
+
+ has_differences = not differences.empty
+ return has_differences, differences
+
+ def has_different_resource_limits(self):
+ '''
+ Diff parameters and container resource limits
+ '''
+ if not self.container.get('HostConfig'):
+ self.fail("limits_differ_from_container: Error parsing container properties. HostConfig missing.")
+ if self.parameters.client.docker_api_version < LooseVersion('1.22'):
+ # update_container() call not supported
+ return False, []
+
+ host_config = self.container['HostConfig']
+
+ restart_policy = host_config.get('RestartPolicy') or dict()
+
+ config_mapping = dict(
+ blkio_weight=host_config.get('BlkioWeight'),
+ cpu_period=host_config.get('CpuPeriod'),
+ cpu_quota=host_config.get('CpuQuota'),
+ cpu_shares=host_config.get('CpuShares'),
+ cpuset_cpus=host_config.get('CpusetCpus'),
+ cpuset_mems=host_config.get('CpusetMems'),
+ kernel_memory=host_config.get("KernelMemory"),
+ memory=host_config.get('Memory'),
+ memory_reservation=host_config.get('MemoryReservation'),
+ memory_swap=host_config.get('MemorySwap'),
+ restart_policy=restart_policy.get('Name')
+ )
+
+ # Options which don't make sense without their accompanying option
+ if self.parameters.restart_policy:
+ config_mapping['restart_retries'] = restart_policy.get('MaximumRetryCount')
+
+ differences = DifferenceTracker()
+ for key, value in config_mapping.items():
+ if getattr(self.parameters, key, None):
+ compare = self.parameters.client.comparisons[self.parameters_map.get(key, key)]
+ match = self._compare(getattr(self.parameters, key), value, compare)
+
+ if not match:
+ # no match. record the differences
+ differences.add(key, parameter=getattr(self.parameters, key), active=value)
+ different = not differences.empty
+ return different, differences
+
+ def has_network_differences(self):
+ '''
+ Check if the container is connected to requested networks with expected options: links, aliases, ipv4, ipv6
+ '''
+ different = False
+ differences = []
+
+ if not self.parameters.networks:
+ return different, differences
+
+ if not self.container.get('NetworkSettings'):
+ self.fail("has_missing_networks: Error parsing container properties. NetworkSettings missing.")
+
+ connected_networks = self.container['NetworkSettings']['Networks']
+ for network in self.parameters.networks:
+ network_info = connected_networks.get(network['name'])
+ if network_info is None:
+ different = True
+ differences.append(dict(
+ parameter=network,
+ container=None
+ ))
+ else:
+ diff = False
+ network_info_ipam = network_info.get('IPAMConfig') or {}
+ if network.get('ipv4_address') and network['ipv4_address'] != network_info_ipam.get('IPv4Address'):
+ diff = True
+ if network.get('ipv6_address') and network['ipv6_address'] != network_info_ipam.get('IPv6Address'):
+ diff = True
+ if network.get('aliases'):
+ if not compare_generic(network['aliases'], network_info.get('Aliases'), 'allow_more_present', 'set'):
+ diff = True
+ if network.get('links'):
+ expected_links = []
+ for link, alias in network['links']:
+ expected_links.append("%s:%s" % (link, alias))
+ if not compare_generic(expected_links, network_info.get('Links'), 'allow_more_present', 'set'):
+ diff = True
+ if diff:
+ different = True
+ differences.append(dict(
+ parameter=network,
+ container=dict(
+ name=network['name'],
+ ipv4_address=network_info_ipam.get('IPv4Address'),
+ ipv6_address=network_info_ipam.get('IPv6Address'),
+ aliases=network_info.get('Aliases'),
+ links=network_info.get('Links')
+ )
+ ))
+ return different, differences
+
+ def has_extra_networks(self):
+ '''
+ Check if the container is connected to non-requested networks
+ '''
+ extra_networks = []
+ extra = False
+
+ if not self.container.get('NetworkSettings'):
+ self.fail("has_extra_networks: Error parsing container properties. NetworkSettings missing.")
+
+ connected_networks = self.container['NetworkSettings'].get('Networks')
+ if connected_networks:
+ for network, network_config in connected_networks.items():
+ keep = False
+ if self.parameters.networks:
+ for expected_network in self.parameters.networks:
+ if expected_network['name'] == network:
+ keep = True
+ if not keep:
+ extra = True
+ extra_networks.append(dict(name=network, id=network_config['NetworkID']))
+ return extra, extra_networks
+
+ def _get_expected_devices(self):
+ if not self.parameters.devices:
+ return None
+ expected_devices = []
+ for device in self.parameters.devices:
+ parts = device.split(':')
+ if len(parts) == 1:
+ expected_devices.append(
+ dict(
+ CgroupPermissions='rwm',
+ PathInContainer=parts[0],
+ PathOnHost=parts[0]
+ ))
+ elif len(parts) == 2:
+ parts = device.split(':')
+ expected_devices.append(
+ dict(
+ CgroupPermissions='rwm',
+ PathInContainer=parts[1],
+ PathOnHost=parts[0]
+ )
+ )
+ else:
+ expected_devices.append(
+ dict(
+ CgroupPermissions=parts[2],
+ PathInContainer=parts[1],
+ PathOnHost=parts[0]
+ ))
+ return expected_devices
+
+ def _get_expected_entrypoint(self):
+ if not self.parameters.entrypoint:
+ return None
+ return shlex.split(self.parameters.entrypoint)
+
+ def _get_expected_ports(self):
+ if self.parameters.published_ports is None:
+ return None
+ expected_bound_ports = {}
+ for container_port, config in self.parameters.published_ports.items():
+ if isinstance(container_port, int):
+ container_port = "%s/tcp" % container_port
+ if len(config) == 1:
+ if isinstance(config[0], int):
+ expected_bound_ports[container_port] = [{'HostIp': "0.0.0.0", 'HostPort': config[0]}]
+ else:
+ expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': ""}]
+ elif isinstance(config[0], tuple):
+ expected_bound_ports[container_port] = []
+ for host_ip, host_port in config:
+ expected_bound_ports[container_port].append({'HostIp': host_ip, 'HostPort': to_text(host_port, errors='surrogate_or_strict')})
+ else:
+ expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': to_text(config[1], errors='surrogate_or_strict')}]
+ return expected_bound_ports
+
+ def _get_expected_links(self):
+ if self.parameters.links is None:
+ return None
+ self.log('parameter links:')
+ self.log(self.parameters.links, pretty_print=True)
+ exp_links = []
+ for link, alias in self.parameters.links:
+ exp_links.append("/%s:%s/%s" % (link, ('/' + self.parameters.name), alias))
+ return exp_links
+
+ def _get_expected_binds(self, image):
+ self.log('_get_expected_binds')
+ image_vols = []
+ if image:
+ image_vols = self._get_image_binds(image[self.parameters.client.image_inspect_source].get('Volumes'))
+ param_vols = []
+ if self.parameters.volumes:
+ for vol in self.parameters.volumes:
+ host = None
+ if ':' in vol:
+ parts = vol.split(':')
+ if len(parts) == 3:
+ host, container, mode = parts
+ if not is_volume_permissions(mode):
+ self.fail('Found invalid volumes mode: {0}'.format(mode))
+ if len(parts) == 2:
+ if not is_volume_permissions(parts[1]):
+ host, container, mode = parts + ['rw']
+ if host:
+ param_vols.append("%s:%s:%s" % (host, container, mode))
+ result = list(set(image_vols + param_vols))
+ self.log("expected_binds:")
+ self.log(result, pretty_print=True)
+ return result
+
+ def _get_expected_device_requests(self):
+ if self.parameters.device_requests is None:
+ return None
+ device_requests = []
+ for dr in self.parameters.device_requests:
+ device_requests.append({
+ 'Driver': dr['driver'],
+ 'Count': dr['count'],
+ 'DeviceIDs': dr['device_ids'],
+ 'Capabilities': dr['capabilities'],
+ 'Options': dr['options'],
+ })
+ return device_requests
+
+ def _get_image_binds(self, volumes):
+ '''
+ Convert array of binds to array of strings with format host_path:container_path:mode
+
+ :param volumes: array of bind dicts
+ :return: array of strings
+ '''
+ results = []
+ if isinstance(volumes, dict):
+ results += self._get_bind_from_dict(volumes)
+ elif isinstance(volumes, list):
+ for vol in volumes:
+ results += self._get_bind_from_dict(vol)
+ return results
+
+ @staticmethod
+ def _get_bind_from_dict(volume_dict):
+ results = []
+ if volume_dict:
+ for host_path, config in volume_dict.items():
+ if isinstance(config, dict) and config.get('bind'):
+ container_path = config.get('bind')
+ mode = config.get('mode', 'rw')
+ results.append("%s:%s:%s" % (host_path, container_path, mode))
+ return results
+
+ def _get_expected_volumes(self, image):
+ self.log('_get_expected_volumes')
+ expected_vols = dict()
+ if image and image[self.parameters.client.image_inspect_source].get('Volumes'):
+ expected_vols.update(image[self.parameters.client.image_inspect_source].get('Volumes'))
+
+ if self.parameters.volumes:
+ for vol in self.parameters.volumes:
+ # We only expect anonymous volumes to show up in the list
+ if ':' in vol:
+ parts = vol.split(':')
+ if len(parts) == 3:
+ continue
+ if len(parts) == 2:
+ if not is_volume_permissions(parts[1]):
+ continue
+ expected_vols[vol] = dict()
+
+ if not expected_vols:
+ expected_vols = None
+ self.log("expected_volumes:")
+ self.log(expected_vols, pretty_print=True)
+ return expected_vols
+
+ def _get_expected_env(self, image):
+ self.log('_get_expected_env')
+ expected_env = dict()
+ if image and image[self.parameters.client.image_inspect_source].get('Env'):
+ for env_var in image[self.parameters.client.image_inspect_source]['Env']:
+ parts = env_var.split('=', 1)
+ expected_env[parts[0]] = parts[1]
+ if self.parameters.env:
+ expected_env.update(self.parameters.env)
+ param_env = []
+ for key, value in expected_env.items():
+ param_env.append("%s=%s" % (key, value))
+ return param_env
+
+ def _get_expected_exposed(self, image):
+ self.log('_get_expected_exposed')
+ image_ports = []
+ if image:
+ image_exposed_ports = image[self.parameters.client.image_inspect_source].get('ExposedPorts') or {}
+ image_ports = [self._normalize_port(p) for p in image_exposed_ports.keys()]
+ param_ports = []
+ if self.parameters.ports:
+ param_ports = [to_text(p[0], errors='surrogate_or_strict') + '/' + p[1] for p in self.parameters.ports]
+ result = list(set(image_ports + param_ports))
+ self.log(result, pretty_print=True)
+ return result
+
+ def _get_expected_ulimits(self, config_ulimits):
+ self.log('_get_expected_ulimits')
+ if config_ulimits is None:
+ return None
+ results = []
+ for limit in config_ulimits:
+ results.append(dict(
+ Name=limit.name,
+ Soft=limit.soft,
+ Hard=limit.hard
+ ))
+ return results
+
+ def _get_expected_sysctls(self, config_sysctls):
+ self.log('_get_expected_sysctls')
+ if config_sysctls is None:
+ return None
+ result = dict()
+ for key, value in config_sysctls.items():
+ result[key] = to_text(value, errors='surrogate_or_strict')
+ return result
+
+ def _get_expected_cmd(self):
+ self.log('_get_expected_cmd')
+ if not self.parameters.command:
+ return None
+ return shlex.split(self.parameters.command)
+
+ def _convert_simple_dict_to_list(self, param_name, join_with=':'):
+ if getattr(self.parameters, param_name, None) is None:
+ return None
+ results = []
+ for key, value in getattr(self.parameters, param_name).items():
+ results.append("%s%s%s" % (key, join_with, value))
+ return results
+
+ def _normalize_port(self, port):
+ if '/' not in port:
+ return port + '/tcp'
+ return port
+
+ def _get_expected_healthcheck(self):
+ self.log('_get_expected_healthcheck')
+ expected_healthcheck = dict()
+
+ if self.parameters.healthcheck:
+ expected_healthcheck.update([(k.title().replace("_", ""), v)
+ for k, v in self.parameters.healthcheck.items()])
+
+ return expected_healthcheck
+
+
+class ContainerManager(DockerBaseClass):
+ '''
+ Perform container management tasks
+ '''
+
+ def __init__(self, client):
+
+ super(ContainerManager, self).__init__()
+
+ if client.module.params.get('log_options') and not client.module.params.get('log_driver'):
+ client.module.warn('log_options is ignored when log_driver is not specified')
+ if client.module.params.get('healthcheck') and not client.module.params.get('healthcheck').get('test'):
+ client.module.warn('healthcheck is ignored when test is not specified')
+ if client.module.params.get('restart_retries') is not None and not client.module.params.get('restart_policy'):
+ client.module.warn('restart_retries is ignored when restart_policy is not specified')
+
+ self.client = client
+ self.parameters = TaskParameters(client)
+ self.check_mode = self.client.check_mode
+ self.results = {'changed': False, 'actions': []}
+ self.diff = {}
+ self.diff_tracker = DifferenceTracker()
+ self.facts = {}
+
+ state = self.parameters.state
+ if state in ('stopped', 'started', 'present'):
+ self.present(state)
+ elif state == 'absent':
+ self.absent()
+
+ if not self.check_mode and not self.parameters.debug:
+ self.results.pop('actions')
+
+ if self.client.module._diff or self.parameters.debug:
+ self.diff['before'], self.diff['after'] = self.diff_tracker.get_before_after()
+ self.results['diff'] = self.diff
+
+ if self.facts:
+ self.results['ansible_facts'] = {'docker_container': self.facts}
+ self.results['container'] = self.facts
+
+ def wait_for_state(self, container_id, complete_states=None, wait_states=None, accept_removal=False, max_wait=None):
+ delay = 1.0
+ total_wait = 0
+ while True:
+ # Inspect container
+ result = self.client.get_container_by_id(container_id)
+ if result is None:
+ if accept_removal:
+ return
+ msg = 'Encontered vanished container while waiting for container "{0}"'
+ self.fail(msg.format(container_id))
+ # Check container state
+ state = result.get('State', {}).get('Status')
+ if complete_states is not None and state in complete_states:
+ return
+ if wait_states is not None and state not in wait_states:
+ msg = 'Encontered unexpected state "{1}" while waiting for container "{0}"'
+ self.fail(msg.format(container_id, state))
+ # Wait
+ if max_wait is not None:
+ if total_wait > max_wait:
+ msg = 'Timeout of {1} seconds exceeded while waiting for container "{0}"'
+ self.fail(msg.format(container_id, max_wait))
+ if total_wait + delay > max_wait:
+ delay = max_wait - total_wait
+ sleep(delay)
+ total_wait += delay
+ # Exponential backoff, but never wait longer than 10 seconds
+ # (1.1**24 < 10, 1.1**25 > 10, so it will take 25 iterations
+ # until the maximal 10 seconds delay is reached. By then, the
+ # code will have slept for ~1.5 minutes.)
+ delay = min(delay * 1.1, 10)
+
+ def present(self, state):
+ container = self._get_container(self.parameters.name)
+ was_running = container.running
+ was_paused = container.paused
+ container_created = False
+
+ # If the image parameter was passed then we need to deal with the image
+ # version comparison. Otherwise we handle this depending on whether
+ # the container already runs or not; in the former case, in case the
+ # container needs to be restarted, we use the existing container's
+ # image ID.
+ image = self._get_image()
+ self.log(image, pretty_print=True)
+ if not container.exists or container.removing:
+ # New container
+ if container.removing:
+ self.log('Found container in removal phase')
+ else:
+ self.log('No container found')
+ if not self.parameters.image:
+ self.fail('Cannot create container when image is not specified!')
+ self.diff_tracker.add('exists', parameter=True, active=False)
+ if container.removing and not self.check_mode:
+ # Wait for container to be removed before trying to create it
+ self.wait_for_state(
+ container.Id, wait_states=['removing'], accept_removal=True, max_wait=self.parameters.removal_wait_timeout)
+ new_container = self.container_create(self.parameters.image, self.parameters.create_parameters)
+ if new_container:
+ container = new_container
+ container_created = True
+ else:
+ # Existing container
+ different, differences = container.has_different_configuration(image)
+ image_different = False
+ if self.parameters.comparisons['image']['comparison'] == 'strict':
+ image_different = self._image_is_different(image, container)
+ if image_different or different or self.parameters.recreate:
+ self.diff_tracker.merge(differences)
+ self.diff['differences'] = differences.get_legacy_docker_container_diffs()
+ if image_different:
+ self.diff['image_different'] = True
+ self.log("differences")
+ self.log(differences.get_legacy_docker_container_diffs(), pretty_print=True)
+ image_to_use = self.parameters.image
+ if not image_to_use and container and container.Image:
+ image_to_use = container.Image
+ if not image_to_use:
+ self.fail('Cannot recreate container when image is not specified or cannot be extracted from current container!')
+ if container.running:
+ self.container_stop(container.Id)
+ self.container_remove(container.Id)
+ if not self.check_mode:
+ self.wait_for_state(
+ container.Id, wait_states=['removing'], accept_removal=True, max_wait=self.parameters.removal_wait_timeout)
+ new_container = self.container_create(image_to_use, self.parameters.create_parameters)
+ if new_container:
+ container = new_container
+ container_created = True
+
+ if container and container.exists:
+ container = self.update_limits(container)
+ container = self.update_networks(container, container_created)
+
+ if state == 'started' and not container.running:
+ self.diff_tracker.add('running', parameter=True, active=was_running)
+ container = self.container_start(container.Id)
+ elif state == 'started' and self.parameters.restart:
+ self.diff_tracker.add('running', parameter=True, active=was_running)
+ self.diff_tracker.add('restarted', parameter=True, active=False)
+ container = self.container_restart(container.Id)
+ elif state == 'stopped' and container.running:
+ self.diff_tracker.add('running', parameter=False, active=was_running)
+ self.container_stop(container.Id)
+ container = self._get_container(container.Id)
+
+ if state == 'started' and self.parameters.paused is not None and container.paused != self.parameters.paused:
+ self.diff_tracker.add('paused', parameter=self.parameters.paused, active=was_paused)
+ if not self.check_mode:
+ try:
+ if self.parameters.paused:
+ self.client.pause(container=container.Id)
+ else:
+ self.client.unpause(container=container.Id)
+ except Exception as exc:
+ self.fail("Error %s container %s: %s" % (
+ "pausing" if self.parameters.paused else "unpausing", container.Id, to_native(exc)
+ ))
+ container = self._get_container(container.Id)
+ self.results['changed'] = True
+ self.results['actions'].append(dict(set_paused=self.parameters.paused))
+
+ self.facts = container.raw
+
+ def absent(self):
+ container = self._get_container(self.parameters.name)
+ if container.exists:
+ if container.running:
+ self.diff_tracker.add('running', parameter=False, active=True)
+ self.container_stop(container.Id)
+ self.diff_tracker.add('exists', parameter=False, active=True)
+ self.container_remove(container.Id)
+
+ def fail(self, msg, **kwargs):
+ self.client.fail(msg, **kwargs)
+
+ def _output_logs(self, msg):
+ self.client.module.log(msg=msg)
+
+ def _get_container(self, container):
+ '''
+ Expects container ID or Name. Returns a container object
+ '''
+ return Container(self.client.get_container(container), self.parameters)
+
+ def _get_image(self):
+ if not self.parameters.image:
+ self.log('No image specified')
+ return None
+ if is_image_name_id(self.parameters.image):
+ image = self.client.find_image_by_id(self.parameters.image)
+ else:
+ repository, tag = utils.parse_repository_tag(self.parameters.image)
+ if not tag:
+ tag = "latest"
+ image = self.client.find_image(repository, tag)
+ if not image or self.parameters.pull:
+ if not self.check_mode:
+ self.log("Pull the image.")
+ image, alreadyToLatest = self.client.pull_image(repository, tag)
+ if alreadyToLatest:
+ self.results['changed'] = False
+ else:
+ self.results['changed'] = True
+ self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag)))
+ elif not image:
+ # If the image isn't there, claim we'll pull.
+ # (Implicitly: if the image is there, claim it already was latest.)
+ self.results['changed'] = True
+ self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag)))
+
+ self.log("image")
+ self.log(image, pretty_print=True)
+ return image
+
+ def _image_is_different(self, image, container):
+ if image and image.get('Id'):
+ if container and container.Image:
+ if image.get('Id') != container.Image:
+ self.diff_tracker.add('image', parameter=image.get('Id'), active=container.Image)
+ return True
+ return False
+
+ def update_limits(self, container):
+ limits_differ, different_limits = container.has_different_resource_limits()
+ if limits_differ:
+ self.log("limit differences:")
+ self.log(different_limits.get_legacy_docker_container_diffs(), pretty_print=True)
+ self.diff_tracker.merge(different_limits)
+ if limits_differ and not self.check_mode:
+ self.container_update(container.Id, self.parameters.update_parameters)
+ return self._get_container(container.Id)
+ return container
+
+ def update_networks(self, container, container_created):
+ updated_container = container
+ if self.parameters.comparisons['networks']['comparison'] != 'ignore' or container_created:
+ has_network_differences, network_differences = container.has_network_differences()
+ if has_network_differences:
+ if self.diff.get('differences'):
+ self.diff['differences'].append(dict(network_differences=network_differences))
+ else:
+ self.diff['differences'] = [dict(network_differences=network_differences)]
+ for netdiff in network_differences:
+ self.diff_tracker.add(
+ 'network.{0}'.format(netdiff['parameter']['name']),
+ parameter=netdiff['parameter'],
+ active=netdiff['container']
+ )
+ self.results['changed'] = True
+ updated_container = self._add_networks(container, network_differences)
+
+ if (self.parameters.comparisons['networks']['comparison'] == 'strict' and self.parameters.networks is not None) or self.parameters.purge_networks:
+ has_extra_networks, extra_networks = container.has_extra_networks()
+ if has_extra_networks:
+ if self.diff.get('differences'):
+ self.diff['differences'].append(dict(purge_networks=extra_networks))
+ else:
+ self.diff['differences'] = [dict(purge_networks=extra_networks)]
+ for extra_network in extra_networks:
+ self.diff_tracker.add(
+ 'network.{0}'.format(extra_network['name']),
+ active=extra_network
+ )
+ self.results['changed'] = True
+ updated_container = self._purge_networks(container, extra_networks)
+ return updated_container
+
+ def _add_networks(self, container, differences):
+ for diff in differences:
+ # remove the container from the network, if connected
+ if diff.get('container'):
+ self.results['actions'].append(dict(removed_from_network=diff['parameter']['name']))
+ if not self.check_mode:
+ try:
+ self.client.disconnect_container_from_network(container.Id, diff['parameter']['id'])
+ except Exception as exc:
+ self.fail("Error disconnecting container from network %s - %s" % (diff['parameter']['name'],
+ to_native(exc)))
+ # connect to the network
+ params = dict()
+ for para in ('ipv4_address', 'ipv6_address', 'links', 'aliases'):
+ if diff['parameter'].get(para):
+ params[para] = diff['parameter'][para]
+ self.results['actions'].append(dict(added_to_network=diff['parameter']['name'], network_parameters=params))
+ if not self.check_mode:
+ try:
+ self.log("Connecting container to network %s" % diff['parameter']['id'])
+ self.log(params, pretty_print=True)
+ self.client.connect_container_to_network(container.Id, diff['parameter']['id'], **params)
+ except Exception as exc:
+ self.fail("Error connecting container to network %s - %s" % (diff['parameter']['name'], to_native(exc)))
+ return self._get_container(container.Id)
+
+ def _purge_networks(self, container, networks):
+ for network in networks:
+ self.results['actions'].append(dict(removed_from_network=network['name']))
+ if not self.check_mode:
+ try:
+ self.client.disconnect_container_from_network(container.Id, network['name'])
+ except Exception as exc:
+ self.fail("Error disconnecting container from network %s - %s" % (network['name'],
+ to_native(exc)))
+ return self._get_container(container.Id)
+
+ def container_create(self, image, create_parameters):
+ self.log("create container")
+ self.log("image: %s parameters:" % image)
+ self.log(create_parameters, pretty_print=True)
+ self.results['actions'].append(dict(created="Created container", create_parameters=create_parameters))
+ self.results['changed'] = True
+ new_container = None
+ if not self.check_mode:
+ try:
+ new_container = self.client.create_container(image, **create_parameters)
+ self.client.report_warnings(new_container)
+ except Exception as exc:
+ self.fail("Error creating container: %s" % to_native(exc))
+ return self._get_container(new_container['Id'])
+ return new_container
+
+ def container_start(self, container_id):
+ self.log("start container %s" % (container_id))
+ self.results['actions'].append(dict(started=container_id))
+ self.results['changed'] = True
+ if not self.check_mode:
+ try:
+ self.client.start(container=container_id)
+ except Exception as exc:
+ self.fail("Error starting container %s: %s" % (container_id, to_native(exc)))
+
+ if self.parameters.detach is False:
+ if self.client.docker_py_version >= LooseVersion('3.0'):
+ status = self.client.wait(container_id)['StatusCode']
+ else:
+ status = self.client.wait(container_id)
+ if self.parameters.auto_remove:
+ output = "Cannot retrieve result as auto_remove is enabled"
+ if self.parameters.output_logs:
+ self.client.module.warn('Cannot output_logs if auto_remove is enabled!')
+ else:
+ config = self.client.inspect_container(container_id)
+ logging_driver = config['HostConfig']['LogConfig']['Type']
+
+ if logging_driver in ('json-file', 'journald'):
+ output = self.client.logs(container_id, stdout=True, stderr=True, stream=False, timestamps=False)
+ if self.parameters.output_logs:
+ self._output_logs(msg=output)
+ else:
+ output = "Result logged using `%s` driver" % logging_driver
+
+ if status != 0:
+ self.fail(output, status=status)
+ if self.parameters.cleanup:
+ self.container_remove(container_id, force=True)
+ insp = self._get_container(container_id)
+ if insp.raw:
+ insp.raw['Output'] = output
+ else:
+ insp.raw = dict(Output=output)
+ return insp
+ return self._get_container(container_id)
+
+ def container_remove(self, container_id, link=False, force=False):
+ volume_state = (not self.parameters.keep_volumes)
+ self.log("remove container container:%s v:%s link:%s force%s" % (container_id, volume_state, link, force))
+ self.results['actions'].append(dict(removed=container_id, volume_state=volume_state, link=link, force=force))
+ self.results['changed'] = True
+ response = None
+ if not self.check_mode:
+ count = 0
+ while True:
+ try:
+ response = self.client.remove_container(container_id, v=volume_state, link=link, force=force)
+ except NotFound as dummy:
+ pass
+ except APIError as exc:
+ if 'Unpause the container before stopping or killing' in exc.explanation:
+ # New docker daemon versions do not allow containers to be removed
+ # if they are paused. Make sure we don't end up in an infinite loop.
+ if count == 3:
+ self.fail("Error removing container %s (tried to unpause three times): %s" % (container_id, to_native(exc)))
+ count += 1
+ # Unpause
+ try:
+ self.client.unpause(container=container_id)
+ except Exception as exc2:
+ self.fail("Error unpausing container %s for removal: %s" % (container_id, to_native(exc2)))
+ # Now try again
+ continue
+ if 'removal of container ' in exc.explanation and ' is already in progress' in exc.explanation:
+ pass
+ else:
+ self.fail("Error removing container %s: %s" % (container_id, to_native(exc)))
+ except Exception as exc:
+ self.fail("Error removing container %s: %s" % (container_id, to_native(exc)))
+ # We only loop when explicitly requested by 'continue'
+ break
+ return response
+
+ def container_update(self, container_id, update_parameters):
+ if update_parameters:
+ self.log("update container %s" % (container_id))
+ self.log(update_parameters, pretty_print=True)
+ self.results['actions'].append(dict(updated=container_id, update_parameters=update_parameters))
+ self.results['changed'] = True
+ if not self.check_mode and callable(getattr(self.client, 'update_container')):
+ try:
+ result = self.client.update_container(container_id, **update_parameters)
+ self.client.report_warnings(result)
+ except Exception as exc:
+ self.fail("Error updating container %s: %s" % (container_id, to_native(exc)))
+ return self._get_container(container_id)
+
+ def container_kill(self, container_id):
+ self.results['actions'].append(dict(killed=container_id, signal=self.parameters.kill_signal))
+ self.results['changed'] = True
+ response = None
+ if not self.check_mode:
+ try:
+ if self.parameters.kill_signal:
+ response = self.client.kill(container_id, signal=self.parameters.kill_signal)
+ else:
+ response = self.client.kill(container_id)
+ except Exception as exc:
+ self.fail("Error killing container %s: %s" % (container_id, exc))
+ return response
+
+ def container_restart(self, container_id):
+ self.results['actions'].append(dict(restarted=container_id, timeout=self.parameters.stop_timeout))
+ self.results['changed'] = True
+ if not self.check_mode:
+ try:
+ if self.parameters.stop_timeout:
+ dummy = self.client.restart(container_id, timeout=self.parameters.stop_timeout)
+ else:
+ dummy = self.client.restart(container_id)
+ except Exception as exc:
+ self.fail("Error restarting container %s: %s" % (container_id, to_native(exc)))
+ return self._get_container(container_id)
+
+ def container_stop(self, container_id):
+ if self.parameters.force_kill:
+ self.container_kill(container_id)
+ return
+ self.results['actions'].append(dict(stopped=container_id, timeout=self.parameters.stop_timeout))
+ self.results['changed'] = True
+ response = None
+ if not self.check_mode:
+ count = 0
+ while True:
+ try:
+ if self.parameters.stop_timeout:
+ response = self.client.stop(container_id, timeout=self.parameters.stop_timeout)
+ else:
+ response = self.client.stop(container_id)
+ except APIError as exc:
+ if 'Unpause the container before stopping or killing' in exc.explanation:
+ # New docker daemon versions do not allow containers to be removed
+ # if they are paused. Make sure we don't end up in an infinite loop.
+ if count == 3:
+ self.fail("Error removing container %s (tried to unpause three times): %s" % (container_id, to_native(exc)))
+ count += 1
+ # Unpause
+ try:
+ self.client.unpause(container=container_id)
+ except Exception as exc2:
+ self.fail("Error unpausing container %s for removal: %s" % (container_id, to_native(exc2)))
+ # Now try again
+ continue
+ self.fail("Error stopping container %s: %s" % (container_id, to_native(exc)))
+ except Exception as exc:
+ self.fail("Error stopping container %s: %s" % (container_id, to_native(exc)))
+ # We only loop when explicitly requested by 'continue'
+ break
+ return response
+
+
+def detect_ipvX_address_usage(client):
+ '''
+ Helper function to detect whether any specified network uses ipv4_address or ipv6_address
+ '''
+ for network in client.module.params.get("networks") or []:
+ if network.get('ipv4_address') is not None or network.get('ipv6_address') is not None:
+ return True
+ return False
+
+
+class AnsibleDockerClientContainer(AnsibleDockerClient):
+ # A list of module options which are not docker container properties
+ __NON_CONTAINER_PROPERTY_OPTIONS = tuple([
+ 'env_file', 'force_kill', 'keep_volumes', 'ignore_image', 'name', 'pull', 'purge_networks',
+ 'recreate', 'restart', 'state', 'trust_image_content', 'networks', 'cleanup', 'kill_signal',
+ 'output_logs', 'paused', 'removal_wait_timeout'
+ ] + list(DOCKER_COMMON_ARGS.keys()))
+
+ def _parse_comparisons(self):
+ comparisons = {}
+ comp_aliases = {}
+ # Put in defaults
+ explicit_types = dict(
+ command='list',
+ devices='set(dict)',
+ device_requests='set(dict)',
+ dns_search_domains='list',
+ dns_servers='list',
+ env='set',
+ entrypoint='list',
+ etc_hosts='set',
+ mounts='set(dict)',
+ networks='set(dict)',
+ ulimits='set(dict)',
+ device_read_bps='set(dict)',
+ device_write_bps='set(dict)',
+ device_read_iops='set(dict)',
+ device_write_iops='set(dict)',
+ )
+ all_options = set() # this is for improving user feedback when a wrong option was specified for comparison
+ default_values = dict(
+ stop_timeout='ignore',
+ )
+ for option, data in self.module.argument_spec.items():
+ all_options.add(option)
+ for alias in data.get('aliases', []):
+ all_options.add(alias)
+ # Ignore options which aren't used as container properties
+ if option in self.__NON_CONTAINER_PROPERTY_OPTIONS and option != 'networks':
+ continue
+ # Determine option type
+ if option in explicit_types:
+ datatype = explicit_types[option]
+ elif data['type'] == 'list':
+ datatype = 'set'
+ elif data['type'] == 'dict':
+ datatype = 'dict'
+ else:
+ datatype = 'value'
+ # Determine comparison type
+ if option in default_values:
+ comparison = default_values[option]
+ elif datatype in ('list', 'value'):
+ comparison = 'strict'
+ else:
+ comparison = 'allow_more_present'
+ comparisons[option] = dict(type=datatype, comparison=comparison, name=option)
+ # Keep track of aliases
+ comp_aliases[option] = option
+ for alias in data.get('aliases', []):
+ comp_aliases[alias] = option
+ # Process legacy ignore options
+ if self.module.params['ignore_image']:
+ comparisons['image']['comparison'] = 'ignore'
+ if self.module.params['purge_networks']:
+ comparisons['networks']['comparison'] = 'strict'
+ # Process options
+ if self.module.params.get('comparisons'):
+ # If '*' appears in comparisons, process it first
+ if '*' in self.module.params['comparisons']:
+ value = self.module.params['comparisons']['*']
+ if value not in ('strict', 'ignore'):
+ self.fail("The wildcard can only be used with comparison modes 'strict' and 'ignore'!")
+ for option, v in comparisons.items():
+ if option == 'networks':
+ # `networks` is special: only update if
+ # some value is actually specified
+ if self.module.params['networks'] is None:
+ continue
+ v['comparison'] = value
+ # Now process all other comparisons.
+ comp_aliases_used = {}
+ for key, value in self.module.params['comparisons'].items():
+ if key == '*':
+ continue
+ # Find main key
+ key_main = comp_aliases.get(key)
+ if key_main is None:
+ if key_main in all_options:
+ self.fail("The module option '%s' cannot be specified in the comparisons dict, "
+ "since it does not correspond to container's state!" % key)
+ self.fail("Unknown module option '%s' in comparisons dict!" % key)
+ if key_main in comp_aliases_used:
+ self.fail("Both '%s' and '%s' (aliases of %s) are specified in comparisons dict!" % (key, comp_aliases_used[key_main], key_main))
+ comp_aliases_used[key_main] = key
+ # Check value and update accordingly
+ if value in ('strict', 'ignore'):
+ comparisons[key_main]['comparison'] = value
+ elif value == 'allow_more_present':
+ if comparisons[key_main]['type'] == 'value':
+ self.fail("Option '%s' is a value and not a set/list/dict, so its comparison cannot be %s" % (key, value))
+ comparisons[key_main]['comparison'] = value
+ else:
+ self.fail("Unknown comparison mode '%s'!" % value)
+ # Add implicit options
+ comparisons['publish_all_ports'] = dict(type='value', comparison='strict', name='published_ports')
+ comparisons['expected_ports'] = dict(type='dict', comparison=comparisons['published_ports']['comparison'], name='expected_ports')
+ comparisons['disable_healthcheck'] = dict(type='value',
+ comparison='ignore' if comparisons['healthcheck']['comparison'] == 'ignore' else 'strict',
+ name='disable_healthcheck')
+ # Check legacy values
+ if self.module.params['ignore_image'] and comparisons['image']['comparison'] != 'ignore':
+ self.module.warn('The ignore_image option has been overridden by the comparisons option!')
+ if self.module.params['purge_networks'] and comparisons['networks']['comparison'] != 'strict':
+ self.module.warn('The purge_networks option has been overridden by the comparisons option!')
+ self.comparisons = comparisons
+
+ def _get_additional_minimal_versions(self):
+ stop_timeout_supported = self.docker_api_version >= LooseVersion('1.25')
+ stop_timeout_needed_for_update = self.module.params.get("stop_timeout") is not None and self.module.params.get('state') != 'absent'
+ if stop_timeout_supported:
+ stop_timeout_supported = self.docker_py_version >= LooseVersion('2.1')
+ if stop_timeout_needed_for_update and not stop_timeout_supported:
+ # We warn (instead of fail) since in older versions, stop_timeout was not used
+ # to update the container's configuration, but only when stopping a container.
+ self.module.warn("Docker SDK for Python's version is %s. Minimum version required is 2.1 to update "
+ "the container's stop_timeout configuration. "
+ "If you use the 'docker-py' module, you have to switch to the 'docker' Python package." % (docker_version,))
+ else:
+ if stop_timeout_needed_for_update and not stop_timeout_supported:
+ # We warn (instead of fail) since in older versions, stop_timeout was not used
+ # to update the container's configuration, but only when stopping a container.
+ self.module.warn("Docker API version is %s. Minimum version required is 1.25 to set or "
+ "update the container's stop_timeout configuration." % (self.docker_api_version_str,))
+ self.option_minimal_versions['stop_timeout']['supported'] = stop_timeout_supported
+
+ def __init__(self, **kwargs):
+ option_minimal_versions = dict(
+ # internal options
+ log_config=dict(),
+ publish_all_ports=dict(),
+ ports=dict(),
+ volume_binds=dict(),
+ name=dict(),
+ # normal options
+ device_read_bps=dict(docker_py_version='1.9.0', docker_api_version='1.22'),
+ device_read_iops=dict(docker_py_version='1.9.0', docker_api_version='1.22'),
+ device_write_bps=dict(docker_py_version='1.9.0', docker_api_version='1.22'),
+ device_write_iops=dict(docker_py_version='1.9.0', docker_api_version='1.22'),
+ device_requests=dict(docker_py_version='4.3.0', docker_api_version='1.40'),
+ dns_opts=dict(docker_api_version='1.21', docker_py_version='1.10.0'),
+ ipc_mode=dict(docker_api_version='1.25'),
+ mac_address=dict(docker_api_version='1.25'),
+ oom_score_adj=dict(docker_api_version='1.22'),
+ shm_size=dict(docker_api_version='1.22'),
+ stop_signal=dict(docker_api_version='1.21'),
+ tmpfs=dict(docker_api_version='1.22'),
+ volume_driver=dict(docker_api_version='1.21'),
+ memory_reservation=dict(docker_api_version='1.21'),
+ kernel_memory=dict(docker_api_version='1.21'),
+ auto_remove=dict(docker_py_version='2.1.0', docker_api_version='1.25'),
+ healthcheck=dict(docker_py_version='2.0.0', docker_api_version='1.24'),
+ init=dict(docker_py_version='2.2.0', docker_api_version='1.25'),
+ runtime=dict(docker_py_version='2.4.0', docker_api_version='1.25'),
+ sysctls=dict(docker_py_version='1.10.0', docker_api_version='1.24'),
+ userns_mode=dict(docker_py_version='1.10.0', docker_api_version='1.23'),
+ uts=dict(docker_py_version='3.5.0', docker_api_version='1.25'),
+ pids_limit=dict(docker_py_version='1.10.0', docker_api_version='1.23'),
+ mounts=dict(docker_py_version='2.6.0', docker_api_version='1.25'),
+ cpus=dict(docker_py_version='2.3.0', docker_api_version='1.25'),
+ # specials
+ ipvX_address_supported=dict(docker_py_version='1.9.0', docker_api_version='1.22',
+ detect_usage=detect_ipvX_address_usage,
+ usage_msg='ipv4_address or ipv6_address in networks'),
+ stop_timeout=dict(), # see _get_additional_minimal_versions()
+ )
+
+ super(AnsibleDockerClientContainer, self).__init__(
+ option_minimal_versions=option_minimal_versions,
+ option_minimal_versions_ignore_params=self.__NON_CONTAINER_PROPERTY_OPTIONS,
+ **kwargs
+ )
+
+ self.image_inspect_source = 'Config'
+ if self.docker_api_version < LooseVersion('1.21'):
+ self.image_inspect_source = 'ContainerConfig'
+
+ self._get_additional_minimal_versions()
+ self._parse_comparisons()
+
+ if self.module.params['container_default_behavior'] is None:
+ self.module.params['container_default_behavior'] = 'compatibility'
+ self.module.deprecate(
+ 'The container_default_behavior option will change its default value from "compatibility" to '
+ '"no_defaults" in community.general 3.0.0. To remove this warning, please specify an explicit value for it now',
+ version='3.0.0', collection_name='community.general' # was Ansible 2.14
+ )
+ if self.module.params['container_default_behavior'] == 'compatibility':
+ old_default_values = dict(
+ auto_remove=False,
+ detach=True,
+ init=False,
+ interactive=False,
+ memory="0",
+ paused=False,
+ privileged=False,
+ read_only=False,
+ tty=False,
+ )
+ for param, value in old_default_values.items():
+ if self.module.params[param] is None:
+ self.module.params[param] = value
+
+
+def main():
+ argument_spec = dict(
+ auto_remove=dict(type='bool'),
+ blkio_weight=dict(type='int'),
+ capabilities=dict(type='list', elements='str'),
+ cap_drop=dict(type='list', elements='str'),
+ cleanup=dict(type='bool', default=False),
+ command=dict(type='raw'),
+ comparisons=dict(type='dict'),
+ container_default_behavior=dict(type='str', choices=['compatibility', 'no_defaults']),
+ cpu_period=dict(type='int'),
+ cpu_quota=dict(type='int'),
+ cpus=dict(type='float'),
+ cpuset_cpus=dict(type='str'),
+ cpuset_mems=dict(type='str'),
+ cpu_shares=dict(type='int'),
+ detach=dict(type='bool'),
+ devices=dict(type='list', elements='str'),
+ device_read_bps=dict(type='list', elements='dict', options=dict(
+ path=dict(required=True, type='str'),
+ rate=dict(required=True, type='str'),
+ )),
+ device_write_bps=dict(type='list', elements='dict', options=dict(
+ path=dict(required=True, type='str'),
+ rate=dict(required=True, type='str'),
+ )),
+ device_read_iops=dict(type='list', elements='dict', options=dict(
+ path=dict(required=True, type='str'),
+ rate=dict(required=True, type='int'),
+ )),
+ device_write_iops=dict(type='list', elements='dict', options=dict(
+ path=dict(required=True, type='str'),
+ rate=dict(required=True, type='int'),
+ )),
+ device_requests=dict(type='list', elements='dict', options=dict(
+ capabilities=dict(type='list', elements='list'),
+ count=dict(type='int'),
+ device_ids=dict(type='list', elements='str'),
+ driver=dict(type='str'),
+ options=dict(type='dict'),
+ )),
+ dns_servers=dict(type='list', elements='str'),
+ dns_opts=dict(type='list', elements='str'),
+ dns_search_domains=dict(type='list', elements='str'),
+ domainname=dict(type='str'),
+ entrypoint=dict(type='list', elements='str'),
+ env=dict(type='dict'),
+ env_file=dict(type='path'),
+ etc_hosts=dict(type='dict'),
+ exposed_ports=dict(type='list', elements='str', aliases=['exposed', 'expose']),
+ force_kill=dict(type='bool', default=False, aliases=['forcekill']),
+ groups=dict(type='list', elements='str'),
+ healthcheck=dict(type='dict', options=dict(
+ test=dict(type='raw'),
+ interval=dict(type='str'),
+ timeout=dict(type='str'),
+ start_period=dict(type='str'),
+ retries=dict(type='int'),
+ )),
+ hostname=dict(type='str'),
+ ignore_image=dict(type='bool', default=False),
+ image=dict(type='str'),
+ init=dict(type='bool'),
+ interactive=dict(type='bool'),
+ ipc_mode=dict(type='str'),
+ keep_volumes=dict(type='bool', default=True),
+ kernel_memory=dict(type='str'),
+ kill_signal=dict(type='str'),
+ labels=dict(type='dict'),
+ links=dict(type='list', elements='str'),
+ log_driver=dict(type='str'),
+ log_options=dict(type='dict', aliases=['log_opt']),
+ mac_address=dict(type='str'),
+ memory=dict(type='str'),
+ memory_reservation=dict(type='str'),
+ memory_swap=dict(type='str'),
+ memory_swappiness=dict(type='int'),
+ mounts=dict(type='list', elements='dict', options=dict(
+ target=dict(type='str', required=True),
+ source=dict(type='str'),
+ type=dict(type='str', choices=['bind', 'volume', 'tmpfs', 'npipe'], default='volume'),
+ read_only=dict(type='bool'),
+ consistency=dict(type='str', choices=['default', 'consistent', 'cached', 'delegated']),
+ propagation=dict(type='str', choices=['private', 'rprivate', 'shared', 'rshared', 'slave', 'rslave']),
+ no_copy=dict(type='bool'),
+ labels=dict(type='dict'),
+ volume_driver=dict(type='str'),
+ volume_options=dict(type='dict'),
+ tmpfs_size=dict(type='str'),
+ tmpfs_mode=dict(type='str'),
+ )),
+ name=dict(type='str', required=True),
+ network_mode=dict(type='str'),
+ networks=dict(type='list', elements='dict', options=dict(
+ name=dict(type='str', required=True),
+ ipv4_address=dict(type='str'),
+ ipv6_address=dict(type='str'),
+ aliases=dict(type='list', elements='str'),
+ links=dict(type='list', elements='str'),
+ )),
+ networks_cli_compatible=dict(type='bool'),
+ oom_killer=dict(type='bool'),
+ oom_score_adj=dict(type='int'),
+ output_logs=dict(type='bool', default=False),
+ paused=dict(type='bool'),
+ pid_mode=dict(type='str'),
+ pids_limit=dict(type='int'),
+ privileged=dict(type='bool'),
+ published_ports=dict(type='list', elements='str', aliases=['ports']),
+ pull=dict(type='bool', default=False),
+ purge_networks=dict(type='bool', default=False),
+ read_only=dict(type='bool'),
+ recreate=dict(type='bool', default=False),
+ removal_wait_timeout=dict(type='float'),
+ restart=dict(type='bool', default=False),
+ restart_policy=dict(type='str', choices=['no', 'on-failure', 'always', 'unless-stopped']),
+ restart_retries=dict(type='int'),
+ runtime=dict(type='str'),
+ security_opts=dict(type='list', elements='str'),
+ shm_size=dict(type='str'),
+ state=dict(type='str', default='started', choices=['absent', 'present', 'started', 'stopped']),
+ stop_signal=dict(type='str'),
+ stop_timeout=dict(type='int'),
+ sysctls=dict(type='dict'),
+ tmpfs=dict(type='list', elements='str'),
+ trust_image_content=dict(type='bool', default=False, removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ tty=dict(type='bool'),
+ ulimits=dict(type='list', elements='str'),
+ user=dict(type='str'),
+ userns_mode=dict(type='str'),
+ uts=dict(type='str'),
+ volume_driver=dict(type='str'),
+ volumes=dict(type='list', elements='str'),
+ volumes_from=dict(type='list', elements='str'),
+ working_dir=dict(type='str'),
+ )
+
+ required_if = [
+ ('state', 'present', ['image'])
+ ]
+
+ client = AnsibleDockerClientContainer(
+ argument_spec=argument_spec,
+ required_if=required_if,
+ supports_check_mode=True,
+ min_docker_api_version='1.20',
+ )
+ if client.module.params['networks_cli_compatible'] is None and client.module.params['networks']:
+ client.module.deprecate(
+ 'Please note that docker_container handles networks slightly different than docker CLI. '
+ 'If you specify networks, the default network will still be attached as the first network. '
+ '(You can specify purge_networks to remove all networks not explicitly listed.) '
+ 'This behavior will change in community.general 2.0.0. You can change the behavior now by setting '
+ 'the new `networks_cli_compatible` option to `yes`, and remove this warning by setting '
+ 'it to `no`',
+ version='2.0.0', collection_name='community.general', # was Ansible 2.12
+ )
+ if client.module.params['networks_cli_compatible'] is True and client.module.params['networks'] and client.module.params['network_mode'] is None:
+ client.module.deprecate(
+ 'Please note that the default value for `network_mode` will change from not specified '
+ '(which is equal to `default`) to the name of the first network in `networks` if '
+ '`networks` has at least one entry and `networks_cli_compatible` is `true`. You can '
+ 'change the behavior now by explicitly setting `network_mode` to the name of the first '
+ 'network in `networks`, and remove this warning by setting `network_mode` to `default`. '
+ 'Please make sure that the value you set to `network_mode` equals the inspection result '
+ 'for existing containers, otherwise the module will recreate them. You can find out the '
+ 'correct value by running "docker inspect --format \'{{.HostConfig.NetworkMode}}\' <container_name>"',
+ version='3.0.0', collection_name='community.general', # was Ansible 2.14
+ )
+
+ try:
+ cm = ContainerManager(client)
+ client.module.exit_json(**sanitize_result(cm.results))
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_container_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_container_info.py
new file mode 100644
index 00000000..80025067
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_container_info.py
@@ -0,0 +1,145 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_container_info
+
+short_description: Retrieves facts about docker container
+
+description:
+ - Retrieves facts about a docker container.
+ - Essentially returns the output of C(docker inspect <name>), similar to what M(community.general.docker_container)
+ returns for a non-absent container.
+
+
+options:
+ name:
+ description:
+ - The name of the container to inspect.
+ - When identifying an existing container name may be a name or a long or short container ID.
+ type: str
+ required: yes
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+author:
+ - "Felix Fontein (@felixfontein)"
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "Docker API >= 1.20"
+'''
+
+EXAMPLES = '''
+- name: Get infos on container
+ community.general.docker_container_info:
+ name: mydata
+ register: result
+
+- name: Does container exist?
+ ansible.builtin.debug:
+ msg: "The container {{ 'exists' if result.exists else 'does not exist' }}"
+
+- name: Print information about container
+ ansible.builtin.debug:
+ var: result.container
+ when: result.exists
+'''
+
+RETURN = '''
+exists:
+ description:
+ - Returns whether the container exists.
+ type: bool
+ returned: always
+ sample: true
+container:
+ description:
+ - Facts representing the current state of the container. Matches the docker inspection output.
+ - Will be C(none) if container does not exist.
+ returned: always
+ type: dict
+ sample: '{
+ "AppArmorProfile": "",
+ "Args": [],
+ "Config": {
+ "AttachStderr": false,
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "Cmd": [
+ "/usr/bin/supervisord"
+ ],
+ "Domainname": "",
+ "Entrypoint": null,
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "ExposedPorts": {
+ "443/tcp": {},
+ "80/tcp": {}
+ },
+ "Hostname": "8e47bf643eb9",
+ "Image": "lnmp_nginx:v1",
+ "Labels": {},
+ "OnBuild": null,
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Tty": false,
+ "User": "",
+ "Volumes": {
+ "/tmp/lnmp/nginx-sites/logs/": {}
+ },
+ ...
+ }'
+'''
+
+import traceback
+
+try:
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ RequestException,
+)
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_api_version='1.20',
+ )
+
+ try:
+ container = client.get_container(client.module.params['name'])
+
+ client.module.exit_json(
+ changed=False,
+ exists=(True if container else False),
+ container=container,
+ )
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_host_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_host_info.py
new file mode 100644
index 00000000..674f8ad0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_host_info.py
@@ -0,0 +1,343 @@
+#!/usr/bin/python
+#
+# (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_host_info
+
+short_description: Retrieves facts about docker host and lists of objects of the services.
+
+description:
+ - Retrieves facts about a docker host.
+ - Essentially returns the output of C(docker system info).
+ - The module also allows to list object names for containers, images, networks and volumes.
+ It also allows to query information on disk usage.
+ - The output differs depending on API version of the docker daemon.
+ - If the docker daemon cannot be contacted or does not meet the API version requirements,
+ the module will fail.
+
+
+options:
+ containers:
+ description:
+ - Whether to list containers.
+ type: bool
+ default: no
+ containers_filters:
+ description:
+ - A dictionary of filter values used for selecting containers to list.
+ - "For example, C(until: 24h)."
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/container_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ images:
+ description:
+ - Whether to list images.
+ type: bool
+ default: no
+ images_filters:
+ description:
+ - A dictionary of filter values used for selecting images to list.
+ - "For example, C(dangling: true)."
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/image_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ networks:
+ description:
+ - Whether to list networks.
+ type: bool
+ default: no
+ networks_filters:
+ description:
+ - A dictionary of filter values used for selecting networks to list.
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/network_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ volumes:
+ description:
+ - Whether to list volumes.
+ type: bool
+ default: no
+ volumes_filters:
+ description:
+ - A dictionary of filter values used for selecting volumes to list.
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/volume_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ disk_usage:
+ description:
+ - Summary information on used disk space by all Docker layers.
+ - The output is a sum of images, volumes, containers and build cache.
+ type: bool
+ default: no
+ verbose_output:
+ description:
+ - When set to C(yes) and I(networks), I(volumes), I(images), I(containers) or I(disk_usage) is set to C(yes)
+ then output will contain verbose information about objects matching the full output of API method.
+ For details see the documentation of your version of Docker API at L(https://docs.docker.com/engine/api/).
+ - The verbose output in this module contains only subset of information returned by I(_info) module
+ for each type of the objects.
+ type: bool
+ default: no
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+author:
+ - Piotr Wojciechowski (@WojciechowskiPiotr)
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "Docker API >= 1.21"
+'''
+
+EXAMPLES = '''
+- name: Get info on docker host
+ community.general.docker_host_info:
+ register: result
+
+- name: Get info on docker host and list images
+ community.general.docker_host_info:
+ images: yes
+ register: result
+
+- name: Get info on docker host and list images matching the filter
+ community.general.docker_host_info:
+ images: yes
+ images_filters:
+ label: "mylabel"
+ register: result
+
+- name: Get info on docker host and verbose list images
+ community.general.docker_host_info:
+ images: yes
+ verbose_output: yes
+ register: result
+
+- name: Get info on docker host and used disk space
+ community.general.docker_host_info:
+ disk_usage: yes
+ register: result
+
+- ansible.builtin.debug:
+ var: result.host_info
+
+'''
+
+RETURN = '''
+can_talk_to_docker:
+ description:
+ - Will be C(true) if the module can talk to the docker daemon.
+ returned: both on success and on error
+ type: bool
+
+host_info:
+ description:
+ - Facts representing the basic state of the docker host. Matches the C(docker system info) output.
+ returned: always
+ type: dict
+volumes:
+ description:
+ - List of dict objects containing the basic information about each volume.
+ Keys matches the C(docker volume ls) output unless I(verbose_output=yes).
+ See description for I(verbose_output).
+ returned: When I(volumes) is C(yes)
+ type: list
+ elements: dict
+networks:
+ description:
+ - List of dict objects containing the basic information about each network.
+ Keys matches the C(docker network ls) output unless I(verbose_output=yes).
+ See description for I(verbose_output).
+ returned: When I(networks) is C(yes)
+ type: list
+ elements: dict
+containers:
+ description:
+ - List of dict objects containing the basic information about each container.
+ Keys matches the C(docker container ls) output unless I(verbose_output=yes).
+ See description for I(verbose_output).
+ returned: When I(containers) is C(yes)
+ type: list
+ elements: dict
+images:
+ description:
+ - List of dict objects containing the basic information about each image.
+ Keys matches the C(docker image ls) output unless I(verbose_output=yes).
+ See description for I(verbose_output).
+ returned: When I(images) is C(yes)
+ type: list
+ elements: dict
+disk_usage:
+ description:
+ - Information on summary disk usage by images, containers and volumes on docker host
+ unless I(verbose_output=yes). See description for I(verbose_output).
+ returned: When I(disk_usage) is C(yes)
+ type: dict
+
+'''
+
+import traceback
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ DockerBaseClass,
+ RequestException,
+)
+from ansible.module_utils._text import to_native
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # Missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import clean_dict_booleans_for_docker_api
+
+
+class DockerHostManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(DockerHostManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.verbose_output = self.client.module.params['verbose_output']
+
+ listed_objects = ['volumes', 'networks', 'containers', 'images']
+
+ self.results['host_info'] = self.get_docker_host_info()
+
+ if self.client.module.params['disk_usage']:
+ self.results['disk_usage'] = self.get_docker_disk_usage_facts()
+
+ for docker_object in listed_objects:
+ if self.client.module.params[docker_object]:
+ returned_name = docker_object
+ filter_name = docker_object + "_filters"
+ filters = clean_dict_booleans_for_docker_api(client.module.params.get(filter_name))
+ self.results[returned_name] = self.get_docker_items_list(docker_object, filters)
+
+ def get_docker_host_info(self):
+ try:
+ return self.client.info()
+ except APIError as exc:
+ self.client.fail("Error inspecting docker host: %s" % to_native(exc))
+
+ def get_docker_disk_usage_facts(self):
+ try:
+ if self.verbose_output:
+ return self.client.df()
+ else:
+ return dict(LayersSize=self.client.df()['LayersSize'])
+ except APIError as exc:
+ self.client.fail("Error inspecting docker host: %s" % to_native(exc))
+
+ def get_docker_items_list(self, docker_object=None, filters=None, verbose=False):
+ items = None
+ items_list = []
+
+ header_containers = ['Id', 'Image', 'Command', 'Created', 'Status', 'Ports', 'Names']
+ header_volumes = ['Driver', 'Name']
+ header_images = ['Id', 'RepoTags', 'Created', 'Size']
+ header_networks = ['Id', 'Driver', 'Name', 'Scope']
+
+ filter_arg = dict()
+ if filters:
+ filter_arg['filters'] = filters
+ try:
+ if docker_object == 'containers':
+ items = self.client.containers(**filter_arg)
+ elif docker_object == 'networks':
+ items = self.client.networks(**filter_arg)
+ elif docker_object == 'images':
+ items = self.client.images(**filter_arg)
+ elif docker_object == 'volumes':
+ items = self.client.volumes(**filter_arg)
+ except APIError as exc:
+ self.client.fail("Error inspecting docker host for object '%s': %s" %
+ (docker_object, to_native(exc)))
+
+ if self.verbose_output:
+ if docker_object != 'volumes':
+ return items
+ else:
+ return items['Volumes']
+
+ if docker_object == 'volumes':
+ items = items['Volumes']
+
+ for item in items:
+ item_record = dict()
+
+ if docker_object == 'containers':
+ for key in header_containers:
+ item_record[key] = item.get(key)
+ elif docker_object == 'networks':
+ for key in header_networks:
+ item_record[key] = item.get(key)
+ elif docker_object == 'images':
+ for key in header_images:
+ item_record[key] = item.get(key)
+ elif docker_object == 'volumes':
+ for key in header_volumes:
+ item_record[key] = item.get(key)
+ items_list.append(item_record)
+
+ return items_list
+
+
+def main():
+ argument_spec = dict(
+ containers=dict(type='bool', default=False),
+ containers_filters=dict(type='dict'),
+ images=dict(type='bool', default=False),
+ images_filters=dict(type='dict'),
+ networks=dict(type='bool', default=False),
+ networks_filters=dict(type='dict'),
+ volumes=dict(type='bool', default=False),
+ volumes_filters=dict(type='dict'),
+ disk_usage=dict(type='bool', default=False),
+ verbose_output=dict(type='bool', default=False),
+ )
+
+ option_minimal_versions = dict(
+ network_filters=dict(docker_py_version='2.0.2'),
+ disk_usage=dict(docker_py_version='2.2.0'),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_version='1.10.0',
+ min_docker_api_version='1.21',
+ option_minimal_versions=option_minimal_versions,
+ fail_results=dict(
+ can_talk_to_docker=False,
+ ),
+ )
+ client.fail_results['can_talk_to_docker'] = True
+
+ try:
+ results = dict(
+ changed=False,
+ )
+
+ DockerHostManager(client, results)
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_image.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_image.py
new file mode 100644
index 00000000..1e2976be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_image.py
@@ -0,0 +1,1021 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_image
+
+short_description: Manage docker images.
+
+
+description:
+ - Build, load or pull an image, making the image available for creating containers. Also supports tagging an
+ image into a repository and archiving an image to a .tar file.
+ - Since Ansible 2.8, it is recommended to explicitly specify the image's source (I(source) can be C(build),
+ C(load), C(pull) or C(local)). This will be required from community.general 2.0.0 on.
+
+options:
+ source:
+ description:
+ - "Determines where the module will try to retrieve the image from."
+ - "Use C(build) to build the image from a C(Dockerfile). I(build.path) must
+ be specified when this value is used."
+ - "Use C(load) to load the image from a C(.tar) file. I(load_path) must
+ be specified when this value is used."
+ - "Use C(pull) to pull the image from a registry."
+ - "Use C(local) to make sure that the image is already available on the local
+ docker daemon, i.e. do not try to build, pull or load the image."
+ - "Before community.general 2.0.0, the value of this option will be auto-detected
+ to be backwards compatible, but a warning will be issued if it is not
+ explicitly specified. From community.general 2.0.0 on, auto-detection will be disabled
+ and this option will be made mandatory."
+ type: str
+ choices:
+ - build
+ - load
+ - pull
+ - local
+ build:
+ description:
+ - "Specifies options used for building images."
+ type: dict
+ suboptions:
+ cache_from:
+ description:
+ - List of image names to consider as cache source.
+ type: list
+ elements: str
+ dockerfile:
+ description:
+ - Use with state C(present) and source C(build) to provide an alternate name for the Dockerfile to use when building an image.
+ - This can also include a relative path (relative to I(path)).
+ type: str
+ http_timeout:
+ description:
+ - Timeout for HTTP requests during the image build operation. Provide a positive integer value for the number of
+ seconds.
+ type: int
+ path:
+ description:
+ - Use with state 'present' to build an image. Will be the path to a directory containing the context and
+ Dockerfile for building an image.
+ type: path
+ required: yes
+ pull:
+ description:
+ - When building an image downloads any updates to the FROM image in Dockerfile.
+ - The default is currently C(yes). This will change to C(no) in community.general 2.0.0.
+ type: bool
+ rm:
+ description:
+ - Remove intermediate containers after build.
+ type: bool
+ default: yes
+ network:
+ description:
+ - The network to use for C(RUN) build instructions.
+ type: str
+ nocache:
+ description:
+ - Do not use cache when building an image.
+ type: bool
+ default: no
+ etc_hosts:
+ description:
+ - Extra hosts to add to C(/etc/hosts) in building containers, as a mapping of hostname to IP address.
+ type: dict
+ args:
+ description:
+ - Provide a dictionary of C(key:value) build arguments that map to Dockerfile ARG directive.
+ - Docker expects the value to be a string. For convenience any non-string values will be converted to strings.
+ - Requires Docker API >= 1.21.
+ type: dict
+ container_limits:
+ description:
+ - A dictionary of limits applied to each container created by the build process.
+ type: dict
+ suboptions:
+ memory:
+ description:
+ - Set memory limit for build.
+ type: int
+ memswap:
+ description:
+ - Total memory (memory + swap), -1 to disable swap.
+ type: int
+ cpushares:
+ description:
+ - CPU shares (relative weight).
+ type: int
+ cpusetcpus:
+ description:
+ - CPUs in which to allow execution, e.g., "0-3", "0,1".
+ type: str
+ use_config_proxy:
+ description:
+ - If set to C(yes) and a proxy configuration is specified in the docker client configuration
+ (by default C($HOME/.docker/config.json)), the corresponding environment variables will
+ be set in the container being built.
+ - Needs Docker SDK for Python >= 3.7.0.
+ type: bool
+ target:
+ description:
+ - When building an image specifies an intermediate build stage by
+ name as a final stage for the resulting image.
+ type: str
+ archive_path:
+ description:
+ - Use with state C(present) to archive an image to a .tar file.
+ type: path
+ load_path:
+ description:
+ - Use with state C(present) to load an image from a .tar file.
+ - Set I(source) to C(load) if you want to load the image. The option will
+ be set automatically before community.general 2.0.0 if this option is used (except
+ if I(path) is specified as well, in which case building will take precedence).
+ From community.general 2.0.0 on, you have to set I(source) to C(load).
+ type: path
+ dockerfile:
+ description:
+ - Use with state C(present) and source C(build) to provide an alternate name for the Dockerfile to use when building an image.
+ - This can also include a relative path (relative to I(path)).
+ - Please use I(build.dockerfile) instead. This option will be removed in community.general 2.0.0.
+ type: str
+ force:
+ description:
+ - Use with state I(absent) to un-tag and remove all images matching the specified name. Use with state
+ C(present) to build, load or pull an image when the image already exists. Also use with state C(present)
+ to force tagging an image.
+ - Please stop using this option, and use the more specialized force options
+ I(force_source), I(force_absent) and I(force_tag) instead.
+ - This option will be removed in community.general 2.0.0.
+ type: bool
+ force_source:
+ description:
+ - Use with state C(present) to build, load or pull an image (depending on the
+ value of the I(source) option) when the image already exists.
+ type: bool
+ default: false
+ force_absent:
+ description:
+ - Use with state I(absent) to un-tag and remove all images matching the specified name.
+ type: bool
+ default: false
+ force_tag:
+ description:
+ - Use with state C(present) to force tagging an image.
+ type: bool
+ default: false
+ http_timeout:
+ description:
+ - Timeout for HTTP requests during the image build operation. Provide a positive integer value for the number of
+ seconds.
+ - Please use I(build.http_timeout) instead. This option will be removed in community.general 2.0.0.
+ type: int
+ name:
+ description:
+ - "Image name. Name format will be one of: name, repository/name, registry_server:port/name.
+ When pushing or pulling an image the name can optionally include the tag by appending ':tag_name'."
+ - Note that image IDs (hashes) are not supported.
+ type: str
+ required: yes
+ path:
+ description:
+ - Use with state 'present' to build an image. Will be the path to a directory containing the context and
+ Dockerfile for building an image.
+ - Set I(source) to C(build) if you want to build the image. The option will
+ be set automatically before community.general 2.0.0 if this option is used. From community.general 2.0.0
+ on, you have to set I(source) to C(build).
+ - Please use I(build.path) instead. This option will be removed in community.general 2.0.0.
+ type: path
+ aliases:
+ - build_path
+ pull:
+ description:
+ - When building an image downloads any updates to the FROM image in Dockerfile.
+ - Please use I(build.pull) instead. This option will be removed in community.general 2.0.0.
+ - The default is currently C(yes). This will change to C(no) in community.general 2.0.0.
+ type: bool
+ push:
+ description:
+ - Push the image to the registry. Specify the registry as part of the I(name) or I(repository) parameter.
+ type: bool
+ default: no
+ rm:
+ description:
+ - Remove intermediate containers after build.
+ - Please use I(build.rm) instead. This option will be removed in community.general 2.0.0.
+ type: bool
+ default: yes
+ nocache:
+ description:
+ - Do not use cache when building an image.
+ - Please use I(build.nocache) instead. This option will be removed in community.general 2.0.0.
+ type: bool
+ default: no
+ repository:
+ description:
+ - Full path to a repository. Use with state C(present) to tag the image into the repository. Expects
+ format I(repository:tag). If no tag is provided, will use the value of the C(tag) parameter or I(latest).
+ type: str
+ state:
+ description:
+ - Make assertions about the state of an image.
+ - When C(absent) an image will be removed. Use the force option to un-tag and remove all images
+ matching the provided name.
+ - When C(present) check if an image exists using the provided name and tag. If the image is not found or the
+ force option is used, the image will either be pulled, built or loaded, depending on the I(source) option.
+ - By default the image will be pulled from Docker Hub, or the registry specified in the image's name. Note that
+ this will change in community.general 2.0.0, so to make sure that you are pulling, set I(source) to C(pull). To build
+ the image, provide a I(path) value set to a directory containing a context and Dockerfile, and set I(source)
+ to C(build). To load an image, specify I(load_path) to provide a path to an archive file. To tag an image to
+ a repository, provide a I(repository) path. If the name contains a repository path, it will be pushed.
+ - "*Note:* C(state=build) is DEPRECATED and will be removed in community.general 2.0.0. Specifying C(build) will behave the
+ same as C(present)."
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+ - build
+ tag:
+ description:
+ - Used to select an image when pulling. Will be added to the image when pushing, tagging or building. Defaults to
+ I(latest).
+ - If I(name) parameter format is I(name:tag), then tag value from I(name) will take precedence.
+ type: str
+ default: latest
+ buildargs:
+ description:
+ - Provide a dictionary of C(key:value) build arguments that map to Dockerfile ARG directive.
+ - Docker expects the value to be a string. For convenience any non-string values will be converted to strings.
+ - Requires Docker API >= 1.21.
+ - Please use I(build.args) instead. This option will be removed in community.general 2.0.0.
+ type: dict
+ container_limits:
+ description:
+ - A dictionary of limits applied to each container created by the build process.
+ - Please use I(build.container_limits) instead. This option will be removed in community.general 2.0.0.
+ type: dict
+ suboptions:
+ memory:
+ description:
+ - Set memory limit for build.
+ type: int
+ memswap:
+ description:
+ - Total memory (memory + swap), -1 to disable swap.
+ type: int
+ cpushares:
+ description:
+ - CPU shares (relative weight).
+ type: int
+ cpusetcpus:
+ description:
+ - CPUs in which to allow execution, e.g., "0-3", "0,1".
+ type: str
+ use_tls:
+ description:
+ - "DEPRECATED. Whether to use tls to connect to the docker daemon. Set to
+ C(encrypt) to use TLS. And set to C(verify) to use TLS and verify that
+ the server's certificate is valid for the server."
+ - "*Note:* If you specify this option, it will set the value of the I(tls) or
+ I(validate_certs) parameters if not set to C(no)."
+ - Will be removed in community.general 2.0.0.
+ type: str
+ choices:
+ - 'no'
+ - 'encrypt'
+ - 'verify'
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "Docker API >= 1.20"
+
+author:
+ - Pavel Antonov (@softzilla)
+ - Chris Houseknecht (@chouseknecht)
+ - Sorin Sbarnea (@ssbarnea)
+
+'''
+
+EXAMPLES = '''
+
+- name: Pull an image
+ community.general.docker_image:
+ name: pacur/centos-7
+ source: pull
+
+- name: Tag and push to docker hub
+ community.general.docker_image:
+ name: pacur/centos-7:56
+ repository: dcoppenhagan/myimage:7.56
+ push: yes
+ source: local
+
+- name: Tag and push to local registry
+ community.general.docker_image:
+ # Image will be centos:7
+ name: centos
+ # Will be pushed to localhost:5000/centos:7
+ repository: localhost:5000/centos
+ tag: 7
+ push: yes
+ source: local
+
+- name: Add tag latest to image
+ community.general.docker_image:
+ name: myimage:7.1.2
+ repository: myimage:latest
+ # As 'latest' usually already is present, we need to enable overwriting of existing tags:
+ force_tag: yes
+ source: local
+
+- name: Remove image
+ community.general.docker_image:
+ state: absent
+ name: registry.ansible.com/chouseknecht/sinatra
+ tag: v1
+
+- name: Build an image and push it to a private repo
+ community.general.docker_image:
+ build:
+ path: ./sinatra
+ name: registry.ansible.com/chouseknecht/sinatra
+ tag: v1
+ push: yes
+ source: build
+
+- name: Archive image
+ community.general.docker_image:
+ name: registry.ansible.com/chouseknecht/sinatra
+ tag: v1
+ archive_path: my_sinatra.tar
+ source: local
+
+- name: Load image from archive and push to a private registry
+ community.general.docker_image:
+ name: localhost:5000/myimages/sinatra
+ tag: v1
+ push: yes
+ load_path: my_sinatra.tar
+ source: load
+
+- name: Build image and with build args
+ community.general.docker_image:
+ name: myimage
+ build:
+ path: /path/to/build/dir
+ args:
+ log_volume: /var/log/myapp
+ listen_port: 8080
+ source: build
+
+- name: Build image using cache source
+ community.general.docker_image:
+ name: myimage:latest
+ build:
+ path: /path/to/build/dir
+ # Use as cache source for building myimage
+ cache_from:
+ - nginx:latest
+ - alpine:3.8
+ source: build
+'''
+
+RETURN = '''
+image:
+ description: Image inspection results for the affected image.
+ returned: success
+ type: dict
+ sample: {}
+stdout:
+ description: Docker build output when building an image.
+ returned: success
+ type: str
+ sample: ""
+ version_added: 1.3.0
+'''
+
+import errno
+import os
+import re
+import traceback
+
+from distutils.version import LooseVersion
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ clean_dict_booleans_for_docker_api,
+ docker_version,
+ AnsibleDockerClient,
+ DockerBaseClass,
+ is_image_name_id,
+ is_valid_tag,
+ RequestException,
+)
+from ansible.module_utils._text import to_native
+
+if docker_version is not None:
+ try:
+ if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
+ from docker.auth import resolve_repository_name
+ else:
+ from docker.auth.auth import resolve_repository_name
+ from docker.utils.utils import parse_repository_tag
+ from docker.errors import DockerException
+ except ImportError:
+ # missing Docker SDK for Python handled in module_utils.docker.common
+ pass
+
+
+class ImageManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(ImageManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ parameters = self.client.module.params
+ self.check_mode = self.client.check_mode
+
+ self.source = parameters['source']
+ build = parameters['build'] or dict()
+ self.archive_path = parameters.get('archive_path')
+ self.cache_from = build.get('cache_from')
+ self.container_limits = build.get('container_limits')
+ self.dockerfile = build.get('dockerfile')
+ self.force_source = parameters.get('force_source')
+ self.force_absent = parameters.get('force_absent')
+ self.force_tag = parameters.get('force_tag')
+ self.load_path = parameters.get('load_path')
+ self.name = parameters.get('name')
+ self.network = build.get('network')
+ self.extra_hosts = clean_dict_booleans_for_docker_api(build.get('etc_hosts'))
+ self.nocache = build.get('nocache', False)
+ self.build_path = build.get('path')
+ self.pull = build.get('pull')
+ self.target = build.get('target')
+ self.repository = parameters.get('repository')
+ self.rm = build.get('rm', True)
+ self.state = parameters.get('state')
+ self.tag = parameters.get('tag')
+ self.http_timeout = build.get('http_timeout')
+ self.push = parameters.get('push')
+ self.buildargs = build.get('args')
+ self.use_config_proxy = build.get('use_config_proxy')
+
+ # If name contains a tag, it takes precedence over tag parameter.
+ if not is_image_name_id(self.name):
+ repo, repo_tag = parse_repository_tag(self.name)
+ if repo_tag:
+ self.name = repo
+ self.tag = repo_tag
+
+ if self.state == 'present':
+ self.present()
+ elif self.state == 'absent':
+ self.absent()
+
+ def fail(self, msg):
+ self.client.fail(msg)
+
+ def present(self):
+ '''
+ Handles state = 'present', which includes building, loading or pulling an image,
+ depending on user provided parameters.
+
+ :returns None
+ '''
+ image = self.client.find_image(name=self.name, tag=self.tag)
+
+ if not image or self.force_source:
+ if self.source == 'build':
+ # Build the image
+ if not os.path.isdir(self.build_path):
+ self.fail("Requested build path %s could not be found or you do not have access." % self.build_path)
+ image_name = self.name
+ if self.tag:
+ image_name = "%s:%s" % (self.name, self.tag)
+ self.log("Building image %s" % image_name)
+ self.results['actions'].append("Built image %s from %s" % (image_name, self.build_path))
+ self.results['changed'] = True
+ if not self.check_mode:
+ self.results.update(self.build_image())
+
+ elif self.source == 'load':
+ # Load the image from an archive
+ if not os.path.isfile(self.load_path):
+ self.fail("Error loading image %s. Specified path %s does not exist." % (self.name,
+ self.load_path))
+ image_name = self.name
+ if self.tag:
+ image_name = "%s:%s" % (self.name, self.tag)
+ self.results['actions'].append("Loaded image %s from %s" % (image_name, self.load_path))
+ self.results['changed'] = True
+ if not self.check_mode:
+ self.results['image'] = self.load_image()
+ elif self.source == 'pull':
+ # pull the image
+ self.results['actions'].append('Pulled image %s:%s' % (self.name, self.tag))
+ self.results['changed'] = True
+ if not self.check_mode:
+ self.results['image'], dummy = self.client.pull_image(self.name, tag=self.tag)
+ elif self.source == 'local':
+ if image is None:
+ name = self.name
+ if self.tag:
+ name = "%s:%s" % (self.name, self.tag)
+ self.client.fail('Cannot find the image %s locally.' % name)
+ if not self.check_mode and image and image['Id'] == self.results['image']['Id']:
+ self.results['changed'] = False
+
+ if self.archive_path:
+ self.archive_image(self.name, self.tag)
+
+ if self.push and not self.repository:
+ self.push_image(self.name, self.tag)
+ elif self.repository:
+ self.tag_image(self.name, self.tag, self.repository, push=self.push)
+
+ def absent(self):
+ '''
+ Handles state = 'absent', which removes an image.
+
+ :return None
+ '''
+ name = self.name
+ if is_image_name_id(name):
+ image = self.client.find_image_by_id(name)
+ else:
+ image = self.client.find_image(name, self.tag)
+ if self.tag:
+ name = "%s:%s" % (self.name, self.tag)
+ if image:
+ if not self.check_mode:
+ try:
+ self.client.remove_image(name, force=self.force_absent)
+ except Exception as exc:
+ self.fail("Error removing image %s - %s" % (name, str(exc)))
+
+ self.results['changed'] = True
+ self.results['actions'].append("Removed image %s" % (name))
+ self.results['image']['state'] = 'Deleted'
+
+ def archive_image(self, name, tag):
+ '''
+ Archive an image to a .tar file. Called when archive_path is passed.
+
+ :param name - name of the image. Type: str
+ :return None
+ '''
+
+ if not tag:
+ tag = "latest"
+
+ image = self.client.find_image(name=name, tag=tag)
+ if not image:
+ self.log("archive image: image %s:%s not found" % (name, tag))
+ return
+
+ image_name = "%s:%s" % (name, tag)
+ self.results['actions'].append('Archived image %s to %s' % (image_name, self.archive_path))
+ self.results['changed'] = True
+ if not self.check_mode:
+ self.log("Getting archive of image %s" % image_name)
+ try:
+ image = self.client.get_image(image_name)
+ except Exception as exc:
+ self.fail("Error getting image %s - %s" % (image_name, str(exc)))
+
+ try:
+ with open(self.archive_path, 'wb') as fd:
+ if self.client.docker_py_version >= LooseVersion('3.0.0'):
+ for chunk in image:
+ fd.write(chunk)
+ else:
+ for chunk in image.stream(2048, decode_content=False):
+ fd.write(chunk)
+ except Exception as exc:
+ self.fail("Error writing image archive %s - %s" % (self.archive_path, str(exc)))
+
+ image = self.client.find_image(name=name, tag=tag)
+ if image:
+ self.results['image'] = image
+
+ def push_image(self, name, tag=None):
+ '''
+ If the name of the image contains a repository path, then push the image.
+
+ :param name Name of the image to push.
+ :param tag Use a specific tag.
+ :return: None
+ '''
+
+ repository = name
+ if not tag:
+ repository, tag = parse_repository_tag(name)
+ registry, repo_name = resolve_repository_name(repository)
+
+ self.log("push %s to %s/%s:%s" % (self.name, registry, repo_name, tag))
+
+ if registry:
+ self.results['actions'].append("Pushed image %s to %s/%s:%s" % (self.name, registry, repo_name, tag))
+ self.results['changed'] = True
+ if not self.check_mode:
+ status = None
+ try:
+ changed = False
+ for line in self.client.push(repository, tag=tag, stream=True, decode=True):
+ self.log(line, pretty_print=True)
+ if line.get('errorDetail'):
+ raise Exception(line['errorDetail']['message'])
+ status = line.get('status')
+ if status == 'Pushing':
+ changed = True
+ self.results['changed'] = changed
+ except Exception as exc:
+ if re.search('unauthorized', str(exc)):
+ if re.search('authentication required', str(exc)):
+ self.fail("Error pushing image %s/%s:%s - %s. Try logging into %s first." %
+ (registry, repo_name, tag, str(exc), registry))
+ else:
+ self.fail("Error pushing image %s/%s:%s - %s. Does the repository exist?" %
+ (registry, repo_name, tag, str(exc)))
+ self.fail("Error pushing image %s: %s" % (repository, str(exc)))
+ self.results['image'] = self.client.find_image(name=repository, tag=tag)
+ if not self.results['image']:
+ self.results['image'] = dict()
+ self.results['image']['push_status'] = status
+
+ def tag_image(self, name, tag, repository, push=False):
+ '''
+ Tag an image into a repository.
+
+ :param name: name of the image. required.
+ :param tag: image tag.
+ :param repository: path to the repository. required.
+ :param push: bool. push the image once it's tagged.
+ :return: None
+ '''
+ repo, repo_tag = parse_repository_tag(repository)
+ if not repo_tag:
+ repo_tag = "latest"
+ if tag:
+ repo_tag = tag
+ image = self.client.find_image(name=repo, tag=repo_tag)
+ found = 'found' if image else 'not found'
+ self.log("image %s was %s" % (repo, found))
+
+ if not image or self.force_tag:
+ self.log("tagging %s:%s to %s:%s" % (name, tag, repo, repo_tag))
+ self.results['changed'] = True
+ self.results['actions'].append("Tagged image %s:%s to %s:%s" % (name, tag, repo, repo_tag))
+ if not self.check_mode:
+ try:
+ # Finding the image does not always work, especially running a localhost registry. In those
+ # cases, if we don't set force=True, it errors.
+ image_name = name
+ if tag and not re.search(tag, name):
+ image_name = "%s:%s" % (name, tag)
+ tag_status = self.client.tag(image_name, repo, tag=repo_tag, force=True)
+ if not tag_status:
+ raise Exception("Tag operation failed.")
+ except Exception as exc:
+ self.fail("Error: failed to tag image - %s" % str(exc))
+ self.results['image'] = self.client.find_image(name=repo, tag=repo_tag)
+ if image and image['Id'] == self.results['image']['Id']:
+ self.results['changed'] = False
+
+ if push:
+ self.push_image(repo, repo_tag)
+
+ def build_image(self):
+ '''
+ Build an image
+
+ :return: image dict
+ '''
+ params = dict(
+ path=self.build_path,
+ tag=self.name,
+ rm=self.rm,
+ nocache=self.nocache,
+ timeout=self.http_timeout,
+ pull=self.pull,
+ forcerm=self.rm,
+ dockerfile=self.dockerfile,
+ decode=True,
+ )
+ if self.client.docker_py_version < LooseVersion('3.0.0'):
+ params['stream'] = True
+
+ if self.tag:
+ params['tag'] = "%s:%s" % (self.name, self.tag)
+ if self.container_limits:
+ params['container_limits'] = self.container_limits
+ if self.buildargs:
+ for key, value in self.buildargs.items():
+ self.buildargs[key] = to_native(value)
+ params['buildargs'] = self.buildargs
+ if self.cache_from:
+ params['cache_from'] = self.cache_from
+ if self.network:
+ params['network_mode'] = self.network
+ if self.extra_hosts:
+ params['extra_hosts'] = self.extra_hosts
+ if self.use_config_proxy:
+ params['use_config_proxy'] = self.use_config_proxy
+ # Due to a bug in docker-py, it will crash if
+ # use_config_proxy is True and buildargs is None
+ if 'buildargs' not in params:
+ params['buildargs'] = {}
+ if self.target:
+ params['target'] = self.target
+
+ build_output = []
+ for line in self.client.build(**params):
+ # line = json.loads(line)
+ self.log(line, pretty_print=True)
+ if "stream" in line or "status" in line:
+ build_line = line.get("stream") or line.get("status") or ''
+ build_output.append(build_line)
+
+ if line.get('error'):
+ if line.get('errorDetail'):
+ errorDetail = line.get('errorDetail')
+ self.fail(
+ "Error building %s - code: %s, message: %s, logs: %s" % (
+ self.name,
+ errorDetail.get('code'),
+ errorDetail.get('message'),
+ build_output))
+ else:
+ self.fail("Error building %s - message: %s, logs: %s" % (
+ self.name, line.get('error'), build_output))
+
+ return {"stdout": "\n".join(build_output),
+ "image": self.client.find_image(name=self.name, tag=self.tag)}
+
+ def load_image(self):
+ '''
+ Load an image from a .tar archive
+
+ :return: image dict
+ '''
+ # Load image(s) from file
+ load_output = []
+ has_output = False
+ try:
+ self.log("Opening image %s" % self.load_path)
+ with open(self.load_path, 'rb') as image_tar:
+ self.log("Loading image from %s" % self.load_path)
+ output = self.client.load_image(image_tar)
+ if output is not None:
+ # Old versions of Docker SDK of Python (before version 2.5.0) do not return anything.
+ # (See https://github.com/docker/docker-py/commit/7139e2d8f1ea82340417add02090bfaf7794f159)
+ # Note that before that commit, something else than None was returned, but that was also
+ # only introduced in a commit that first appeared in 2.5.0 (see
+ # https://github.com/docker/docker-py/commit/9e793806ff79559c3bc591d8c52a3bbe3cdb7350).
+ # So the above check works for every released version of Docker SDK for Python.
+ has_output = True
+ for line in output:
+ self.log(line, pretty_print=True)
+ if "stream" in line or "status" in line:
+ load_line = line.get("stream") or line.get("status") or ''
+ load_output.append(load_line)
+ else:
+ if LooseVersion(docker_version) < LooseVersion('2.5.0'):
+ self.client.module.warn(
+ 'The installed version of the Docker SDK for Python does not return the loading results'
+ ' from the Docker daemon. Therefore, we cannot verify whether the expected image was'
+ ' loaded, whether multiple images where loaded, or whether the load actually succeeded.'
+ ' If you are not stuck with Python 2.6, *please* upgrade to a version newer than 2.5.0'
+ ' (2.5.0 was released in August 2017).'
+ )
+ else:
+ self.client.module.warn(
+ 'The API version of your Docker daemon is < 1.23, which does not return the image'
+ ' loading result from the Docker daemon. Therefore, we cannot verify whether the'
+ ' expected image was loaded, whether multiple images where loaded, or whether the load'
+ ' actually succeeded. You should consider upgrading your Docker daemon.'
+ )
+ except EnvironmentError as exc:
+ if exc.errno == errno.ENOENT:
+ self.client.fail("Error opening image %s - %s" % (self.load_path, str(exc)))
+ self.client.fail("Error loading image %s - %s" % (self.name, str(exc)), stdout='\n'.join(load_output))
+ except Exception as exc:
+ self.client.fail("Error loading image %s - %s" % (self.name, str(exc)), stdout='\n'.join(load_output))
+
+ # Collect loaded images
+ if has_output:
+ # We can only do this when we actually got some output from Docker daemon
+ loaded_images = set()
+ for line in load_output:
+ if line.startswith('Loaded image:'):
+ loaded_images.add(line[len('Loaded image:'):].strip())
+
+ if not loaded_images:
+ self.client.fail("Detected no loaded images. Archive potentially corrupt?", stdout='\n'.join(load_output))
+
+ expected_image = '%s:%s' % (self.name, self.tag)
+ if expected_image not in loaded_images:
+ self.client.fail(
+ "The archive did not contain image '%s'. Instead, found %s." % (
+ expected_image, ', '.join(["'%s'" % image for image in sorted(loaded_images)])),
+ stdout='\n'.join(load_output))
+ loaded_images.remove(expected_image)
+
+ if loaded_images:
+ self.client.module.warn(
+ "The archive contained more images than specified: %s" % (
+ ', '.join(["'%s'" % image for image in sorted(loaded_images)]), ))
+
+ return self.client.find_image(self.name, self.tag)
+
+
+def main():
+ argument_spec = dict(
+ source=dict(type='str', choices=['build', 'load', 'pull', 'local']),
+ build=dict(type='dict', options=dict(
+ cache_from=dict(type='list', elements='str'),
+ container_limits=dict(type='dict', options=dict(
+ memory=dict(type='int'),
+ memswap=dict(type='int'),
+ cpushares=dict(type='int'),
+ cpusetcpus=dict(type='str'),
+ )),
+ dockerfile=dict(type='str'),
+ http_timeout=dict(type='int'),
+ network=dict(type='str'),
+ nocache=dict(type='bool', default=False),
+ path=dict(type='path', required=True),
+ pull=dict(type='bool'),
+ rm=dict(type='bool', default=True),
+ args=dict(type='dict'),
+ use_config_proxy=dict(type='bool'),
+ target=dict(type='str'),
+ etc_hosts=dict(type='dict'),
+ )),
+ archive_path=dict(type='path'),
+ container_limits=dict(type='dict', options=dict(
+ memory=dict(type='int'),
+ memswap=dict(type='int'),
+ cpushares=dict(type='int'),
+ cpusetcpus=dict(type='str'),
+ ), removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
+ dockerfile=dict(type='str', removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
+ force=dict(type='bool', removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
+ force_source=dict(type='bool', default=False),
+ force_absent=dict(type='bool', default=False),
+ force_tag=dict(type='bool', default=False),
+ http_timeout=dict(type='int', removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
+ load_path=dict(type='path'),
+ name=dict(type='str', required=True),
+ nocache=dict(type='bool', default=False, removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
+ path=dict(type='path', aliases=['build_path'], removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
+ pull=dict(type='bool', removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
+ push=dict(type='bool', default=False),
+ repository=dict(type='str'),
+ rm=dict(type='bool', default=True, removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
+ state=dict(type='str', default='present', choices=['absent', 'present', 'build']),
+ tag=dict(type='str', default='latest'),
+ use_tls=dict(type='str', choices=['no', 'encrypt', 'verify'], removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ buildargs=dict(type='dict', removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
+ )
+
+ required_if = [
+ # ('state', 'present', ['source']), -- enable in community.general 2.0.0
+ # ('source', 'build', ['build']), -- enable in community.general 2.0.0
+ ('source', 'load', ['load_path']),
+ ]
+
+ def detect_build_cache_from(client):
+ return client.module.params['build'] and client.module.params['build'].get('cache_from') is not None
+
+ def detect_build_network(client):
+ return client.module.params['build'] and client.module.params['build'].get('network') is not None
+
+ def detect_build_target(client):
+ return client.module.params['build'] and client.module.params['build'].get('target') is not None
+
+ def detect_use_config_proxy(client):
+ return client.module.params['build'] and client.module.params['build'].get('use_config_proxy') is not None
+
+ def detect_etc_hosts(client):
+ return client.module.params['build'] and bool(client.module.params['build'].get('etc_hosts'))
+
+ option_minimal_versions = dict()
+ option_minimal_versions["build.cache_from"] = dict(docker_py_version='2.1.0', docker_api_version='1.25', detect_usage=detect_build_cache_from)
+ option_minimal_versions["build.network"] = dict(docker_py_version='2.4.0', docker_api_version='1.25', detect_usage=detect_build_network)
+ option_minimal_versions["build.target"] = dict(docker_py_version='2.4.0', detect_usage=detect_build_target)
+ option_minimal_versions["build.use_config_proxy"] = dict(docker_py_version='3.7.0', detect_usage=detect_use_config_proxy)
+ option_minimal_versions["build.etc_hosts"] = dict(docker_py_version='2.6.0', docker_api_version='1.27', detect_usage=detect_etc_hosts)
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ required_if=required_if,
+ supports_check_mode=True,
+ min_docker_version='1.8.0',
+ min_docker_api_version='1.20',
+ option_minimal_versions=option_minimal_versions,
+ )
+
+ if client.module.params['state'] == 'build':
+ client.module.deprecate('The "build" state has been deprecated for a long time. '
+ 'Please use "present", which has the same meaning as "build".',
+ version='2.0.0', collection_name='community.general') # was Ansible 2.11
+ client.module.params['state'] = 'present'
+ if client.module.params['use_tls']:
+ client.module.deprecate('The "use_tls" option has been deprecated for a long time. '
+ 'Please use the "tls" and "validate_certs" options instead.',
+ version='2.0.0', collection_name='community.general') # was Ansible 2.11
+
+ if not is_valid_tag(client.module.params['tag'], allow_empty=True):
+ client.fail('"{0}" is not a valid docker tag!'.format(client.module.params['tag']))
+
+ build_options = dict(
+ container_limits='container_limits',
+ dockerfile='dockerfile',
+ http_timeout='http_timeout',
+ nocache='nocache',
+ path='path',
+ pull='pull',
+ rm='rm',
+ buildargs='args',
+ )
+ for option, build_option in build_options.items():
+ default_value = None
+ if option in ('rm', ):
+ default_value = True
+ elif option in ('nocache', ):
+ default_value = False
+ if client.module.params[option] != default_value:
+ if client.module.params['build'] is None:
+ client.module.params['build'] = dict()
+ if client.module.params['build'].get(build_option, default_value) != default_value:
+ client.fail('Cannot specify both %s and build.%s!' % (option, build_option))
+ client.module.params['build'][build_option] = client.module.params[option]
+ client.module.deprecate('Please specify build.%s instead of %s. The %s option '
+ 'has been renamed' % (build_option, option, option),
+ version='2.0.0', collection_name='community.general') # was Ansible 2.12
+ if client.module.params['source'] == 'build':
+ if (not client.module.params['build'] or not client.module.params['build'].get('path')):
+ client.fail('If "source" is set to "build", the "build.path" option must be specified.')
+ if client.module.params['build'].get('pull') is None:
+ client.module.deprecate("The default for build.pull is currently 'yes', but will be changed to "
+ "'no' in community.general 2.0.0. Please set build.pull explicitly to the value you need",
+ version='2.0.0', collection_name='community.general') # was Ansible 2.12
+ client.module.params['build']['pull'] = True # TODO: change to False in community.general 2.0.0
+
+ if client.module.params['state'] == 'present' and client.module.params['source'] is None:
+ # Autodetection. To be removed in community.general 2.0.0.
+ if (client.module.params['build'] or dict()).get('path'):
+ client.module.params['source'] = 'build'
+ elif client.module.params['load_path']:
+ client.module.params['source'] = 'load'
+ else:
+ client.module.params['source'] = 'pull'
+ client.module.deprecate('The value of the "source" option was determined to be "%s". '
+ 'Please set the "source" option explicitly. Autodetection will '
+ 'be removed in community.general 2.0.0.' % client.module.params['source'],
+ version='2.0.0', collection_name='community.general') # was Ansible 2.12
+
+ if client.module.params['force']:
+ client.module.params['force_source'] = True
+ client.module.params['force_absent'] = True
+ client.module.params['force_tag'] = True
+ client.module.deprecate('The "force" option will be removed in community.general 2.0.0. Please '
+ 'use the "force_source", "force_absent" or "force_tag" option '
+ 'instead, depending on what you want to force.',
+ version='2.0.0', collection_name='community.general') # was Ansible 2.12
+
+ try:
+ results = dict(
+ changed=False,
+ actions=[],
+ image={}
+ )
+
+ ImageManager(client, results)
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_image_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_image_facts.py
new file mode 100644
index 00000000..8cf08ef0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_image_facts.py
@@ -0,0 +1,270 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_image_info
+
+short_description: Inspect docker images
+
+
+description:
+ - Provide one or more image names, and the module will inspect each, returning an array of inspection results.
+ - If an image does not exist locally, it will not appear in the results. If you want to check whether an image exists
+ locally, you can call the module with the image name, then check whether the result list is empty (image does not
+ exist) or has one element (the image exists locally).
+ - The module will not attempt to pull images from registries. Use M(community.general.docker_image) with I(source) set to C(pull)
+ to ensure an image is pulled.
+
+notes:
+ - This module was called C(docker_image_facts) before Ansible 2.8. The usage did not change.
+
+options:
+ name:
+ description:
+ - An image name or a list of image names. Name format will be C(name[:tag]) or C(repository/name[:tag]),
+ where C(tag) is optional. If a tag is not provided, C(latest) will be used. Instead of image names, also
+ image IDs can be used.
+ - If no name is provided, a list of all images will be returned.
+ type: list
+ elements: str
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "Docker API >= 1.20"
+
+author:
+ - Chris Houseknecht (@chouseknecht)
+
+'''
+
+EXAMPLES = '''
+- name: Inspect a single image
+ community.general.docker_image_info:
+ name: pacur/centos-7
+
+- name: Inspect multiple images
+ community.general.docker_image_info:
+ name:
+ - pacur/centos-7
+ - sinatra
+ register: result
+
+- name: Make sure that both images pacur/centos-7 and sinatra exist locally
+ ansible.builtin.assert:
+ that:
+ - result.images | length == 2
+'''
+
+RETURN = '''
+images:
+ description:
+ - Inspection results for the selected images.
+ - The list only contains inspection results of images existing locally.
+ returned: always
+ type: list
+ elements: dict
+ sample: [
+ {
+ "Architecture": "amd64",
+ "Author": "",
+ "Comment": "",
+ "Config": {
+ "AttachStderr": false,
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "Cmd": [
+ "/etc/docker/registry/config.yml"
+ ],
+ "Domainname": "",
+ "Entrypoint": [
+ "/bin/registry"
+ ],
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "ExposedPorts": {
+ "5000/tcp": {}
+ },
+ "Hostname": "e5c68db50333",
+ "Image": "c72dce2618dc8f7b794d2b2c2b1e64e0205ead5befc294f8111da23bd6a2c799",
+ "Labels": {},
+ "OnBuild": [],
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Tty": false,
+ "User": "",
+ "Volumes": {
+ "/var/lib/registry": {}
+ },
+ "WorkingDir": ""
+ },
+ "Container": "e83a452b8fb89d78a25a6739457050131ca5c863629a47639530d9ad2008d610",
+ "ContainerConfig": {
+ "AttachStderr": false,
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "Cmd": [
+ "/bin/sh",
+ "-c",
+ '#(nop) CMD ["/etc/docker/registry/config.yml"]'
+ ],
+ "Domainname": "",
+ "Entrypoint": [
+ "/bin/registry"
+ ],
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "ExposedPorts": {
+ "5000/tcp": {}
+ },
+ "Hostname": "e5c68db50333",
+ "Image": "c72dce2618dc8f7b794d2b2c2b1e64e0205ead5befc294f8111da23bd6a2c799",
+ "Labels": {},
+ "OnBuild": [],
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Tty": false,
+ "User": "",
+ "Volumes": {
+ "/var/lib/registry": {}
+ },
+ "WorkingDir": ""
+ },
+ "Created": "2016-03-08T21:08:15.399680378Z",
+ "DockerVersion": "1.9.1",
+ "GraphDriver": {
+ "Data": null,
+ "Name": "aufs"
+ },
+ "Id": "53773d8552f07b730f3e19979e32499519807d67b344141d965463a950a66e08",
+ "Name": "registry:2",
+ "Os": "linux",
+ "Parent": "f0b1f729f784b755e7bf9c8c2e65d8a0a35a533769c2588f02895f6781ac0805",
+ "RepoDigests": [],
+ "RepoTags": [
+ "registry:2"
+ ],
+ "Size": 0,
+ "VirtualSize": 165808884
+ }
+ ]
+'''
+
+import traceback
+
+try:
+ from docker import utils
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ DockerBaseClass,
+ is_image_name_id,
+ RequestException,
+)
+
+
+class ImageManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(ImageManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.name = self.client.module.params.get('name')
+ self.log("Gathering facts for images: %s" % (str(self.name)))
+
+ if self.name:
+ self.results['images'] = self.get_facts()
+ else:
+ self.results['images'] = self.get_all_images()
+
+ def fail(self, msg):
+ self.client.fail(msg)
+
+ def get_facts(self):
+ '''
+ Lookup and inspect each image name found in the names parameter.
+
+ :returns array of image dictionaries
+ '''
+
+ results = []
+
+ names = self.name
+ if not isinstance(names, list):
+ names = [names]
+
+ for name in names:
+ if is_image_name_id(name):
+ self.log('Fetching image %s (ID)' % (name))
+ image = self.client.find_image_by_id(name)
+ else:
+ repository, tag = utils.parse_repository_tag(name)
+ if not tag:
+ tag = 'latest'
+ self.log('Fetching image %s:%s' % (repository, tag))
+ image = self.client.find_image(name=repository, tag=tag)
+ if image:
+ results.append(image)
+ return results
+
+ def get_all_images(self):
+ results = []
+ images = self.client.images()
+ for image in images:
+ try:
+ inspection = self.client.inspect_image(image['Id'])
+ except Exception as exc:
+ self.fail("Error inspecting image %s - %s" % (image['Id'], str(exc)))
+ results.append(inspection)
+ return results
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='list', elements='str'),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_api_version='1.20',
+ )
+ if client.module._name in ('docker_image_facts', 'community.general.docker_image_facts'):
+ client.module.deprecate("The 'docker_image_facts' module has been renamed to 'docker_image_info'",
+ version='2.0.0', collection_name='community.general') # was Ansible 2.12
+
+ try:
+ results = dict(
+ changed=False,
+ images=[]
+ )
+
+ ImageManager(client, results)
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_image_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_image_info.py
new file mode 100644
index 00000000..8cf08ef0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_image_info.py
@@ -0,0 +1,270 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_image_info
+
+short_description: Inspect docker images
+
+
+description:
+ - Provide one or more image names, and the module will inspect each, returning an array of inspection results.
+ - If an image does not exist locally, it will not appear in the results. If you want to check whether an image exists
+ locally, you can call the module with the image name, then check whether the result list is empty (image does not
+ exist) or has one element (the image exists locally).
+ - The module will not attempt to pull images from registries. Use M(community.general.docker_image) with I(source) set to C(pull)
+ to ensure an image is pulled.
+
+notes:
+ - This module was called C(docker_image_facts) before Ansible 2.8. The usage did not change.
+
+options:
+ name:
+ description:
+ - An image name or a list of image names. Name format will be C(name[:tag]) or C(repository/name[:tag]),
+ where C(tag) is optional. If a tag is not provided, C(latest) will be used. Instead of image names, also
+ image IDs can be used.
+ - If no name is provided, a list of all images will be returned.
+ type: list
+ elements: str
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "Docker API >= 1.20"
+
+author:
+ - Chris Houseknecht (@chouseknecht)
+
+'''
+
+EXAMPLES = '''
+- name: Inspect a single image
+ community.general.docker_image_info:
+ name: pacur/centos-7
+
+- name: Inspect multiple images
+ community.general.docker_image_info:
+ name:
+ - pacur/centos-7
+ - sinatra
+ register: result
+
+- name: Make sure that both images pacur/centos-7 and sinatra exist locally
+ ansible.builtin.assert:
+ that:
+ - result.images | length == 2
+'''
+
+RETURN = '''
+images:
+ description:
+ - Inspection results for the selected images.
+ - The list only contains inspection results of images existing locally.
+ returned: always
+ type: list
+ elements: dict
+ sample: [
+ {
+ "Architecture": "amd64",
+ "Author": "",
+ "Comment": "",
+ "Config": {
+ "AttachStderr": false,
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "Cmd": [
+ "/etc/docker/registry/config.yml"
+ ],
+ "Domainname": "",
+ "Entrypoint": [
+ "/bin/registry"
+ ],
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "ExposedPorts": {
+ "5000/tcp": {}
+ },
+ "Hostname": "e5c68db50333",
+ "Image": "c72dce2618dc8f7b794d2b2c2b1e64e0205ead5befc294f8111da23bd6a2c799",
+ "Labels": {},
+ "OnBuild": [],
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Tty": false,
+ "User": "",
+ "Volumes": {
+ "/var/lib/registry": {}
+ },
+ "WorkingDir": ""
+ },
+ "Container": "e83a452b8fb89d78a25a6739457050131ca5c863629a47639530d9ad2008d610",
+ "ContainerConfig": {
+ "AttachStderr": false,
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "Cmd": [
+ "/bin/sh",
+ "-c",
+ '#(nop) CMD ["/etc/docker/registry/config.yml"]'
+ ],
+ "Domainname": "",
+ "Entrypoint": [
+ "/bin/registry"
+ ],
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "ExposedPorts": {
+ "5000/tcp": {}
+ },
+ "Hostname": "e5c68db50333",
+ "Image": "c72dce2618dc8f7b794d2b2c2b1e64e0205ead5befc294f8111da23bd6a2c799",
+ "Labels": {},
+ "OnBuild": [],
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Tty": false,
+ "User": "",
+ "Volumes": {
+ "/var/lib/registry": {}
+ },
+ "WorkingDir": ""
+ },
+ "Created": "2016-03-08T21:08:15.399680378Z",
+ "DockerVersion": "1.9.1",
+ "GraphDriver": {
+ "Data": null,
+ "Name": "aufs"
+ },
+ "Id": "53773d8552f07b730f3e19979e32499519807d67b344141d965463a950a66e08",
+ "Name": "registry:2",
+ "Os": "linux",
+ "Parent": "f0b1f729f784b755e7bf9c8c2e65d8a0a35a533769c2588f02895f6781ac0805",
+ "RepoDigests": [],
+ "RepoTags": [
+ "registry:2"
+ ],
+ "Size": 0,
+ "VirtualSize": 165808884
+ }
+ ]
+'''
+
+import traceback
+
+try:
+ from docker import utils
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ DockerBaseClass,
+ is_image_name_id,
+ RequestException,
+)
+
+
+class ImageManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(ImageManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.name = self.client.module.params.get('name')
+ self.log("Gathering facts for images: %s" % (str(self.name)))
+
+ if self.name:
+ self.results['images'] = self.get_facts()
+ else:
+ self.results['images'] = self.get_all_images()
+
+ def fail(self, msg):
+ self.client.fail(msg)
+
+ def get_facts(self):
+ '''
+ Lookup and inspect each image name found in the names parameter.
+
+ :returns array of image dictionaries
+ '''
+
+ results = []
+
+ names = self.name
+ if not isinstance(names, list):
+ names = [names]
+
+ for name in names:
+ if is_image_name_id(name):
+ self.log('Fetching image %s (ID)' % (name))
+ image = self.client.find_image_by_id(name)
+ else:
+ repository, tag = utils.parse_repository_tag(name)
+ if not tag:
+ tag = 'latest'
+ self.log('Fetching image %s:%s' % (repository, tag))
+ image = self.client.find_image(name=repository, tag=tag)
+ if image:
+ results.append(image)
+ return results
+
+ def get_all_images(self):
+ results = []
+ images = self.client.images()
+ for image in images:
+ try:
+ inspection = self.client.inspect_image(image['Id'])
+ except Exception as exc:
+ self.fail("Error inspecting image %s - %s" % (image['Id'], str(exc)))
+ results.append(inspection)
+ return results
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='list', elements='str'),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_api_version='1.20',
+ )
+ if client.module._name in ('docker_image_facts', 'community.general.docker_image_facts'):
+ client.module.deprecate("The 'docker_image_facts' module has been renamed to 'docker_image_info'",
+ version='2.0.0', collection_name='community.general') # was Ansible 2.12
+
+ try:
+ results = dict(
+ changed=False,
+ images=[]
+ )
+
+ ImageManager(client, results)
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_login.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_login.py
new file mode 100644
index 00000000..6522e642
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_login.py
@@ -0,0 +1,485 @@
+#!/usr/bin/python
+#
+# (c) 2016 Olaf Kilian <olaf.kilian@symanex.com>
+# Chris Houseknecht, <house@redhat.com>
+# James Tanner, <jtanner@redhat.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_login
+short_description: Log into a Docker registry.
+description:
+ - Provides functionality similar to the "docker login" command.
+ - Authenticate with a docker registry and add the credentials to your local Docker config file respectively the
+ credentials store associated to the registry. Adding the credentials to the config files resp. the credential
+ store allows future connections to the registry using tools such as Ansible's Docker modules, the Docker CLI
+ and Docker SDK for Python without needing to provide credentials.
+ - Running in check mode will perform the authentication without updating the config file.
+options:
+ registry_url:
+ description:
+ - The registry URL.
+ type: str
+ default: "https://index.docker.io/v1/"
+ aliases:
+ - registry
+ - url
+ username:
+ description:
+ - The username for the registry account.
+ - Required when I(state) is C(present).
+ type: str
+ password:
+ description:
+ - The plaintext password for the registry account.
+ - Required when I(state) is C(present).
+ type: str
+ email:
+ description:
+ - Does nothing, do not use.
+ - Will be removed in community.general 3.0.0.
+ type: str
+ reauthorize:
+ description:
+ - Refresh existing authentication found in the configuration file.
+ type: bool
+ default: no
+ aliases:
+ - reauth
+ config_path:
+ description:
+ - Custom path to the Docker CLI configuration file.
+ type: path
+ default: ~/.docker/config.json
+ aliases:
+ - dockercfg_path
+ state:
+ description:
+ - This controls the current state of the user. C(present) will login in a user, C(absent) will log them out.
+ - To logout you only need the registry server, which defaults to DockerHub.
+ - Before 2.1 you could ONLY log in.
+ - Docker does not support 'logout' with a custom config file.
+ type: str
+ default: 'present'
+ choices: ['present', 'absent']
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "L(Python bindings for docker credentials store API) >= 0.2.1
+ (use L(docker-pycreds,https://pypi.org/project/docker-pycreds/) when using Docker SDK for Python < 4.0.0)"
+ - "Docker API >= 1.20"
+author:
+ - Olaf Kilian (@olsaki) <olaf.kilian@symanex.com>
+ - Chris Houseknecht (@chouseknecht)
+'''
+
+EXAMPLES = '''
+
+- name: Log into DockerHub
+ community.general.docker_login:
+ username: docker
+ password: rekcod
+
+- name: Log into private registry and force re-authorization
+ community.general.docker_login:
+ registry_url: your.private.registry.io
+ username: yourself
+ password: secrets3
+ reauthorize: yes
+
+- name: Log into DockerHub using a custom config file
+ community.general.docker_login:
+ username: docker
+ password: rekcod
+ config_path: /tmp/.mydockercfg
+
+- name: Log out of DockerHub
+ community.general.docker_login:
+ state: absent
+'''
+
+RETURN = '''
+login_results:
+ description: Results from the login.
+ returned: when state='present'
+ type: dict
+ sample: {
+ "serveraddress": "localhost:5000",
+ "username": "testuser"
+ }
+'''
+
+import base64
+import json
+import os
+import re
+import traceback
+from ansible.module_utils._text import to_bytes, to_text
+
+try:
+ from docker.errors import DockerException
+ from docker import auth
+
+ # Earlier versions of docker/docker-py put decode_auth
+ # in docker.auth.auth instead of docker.auth
+ if hasattr(auth, 'decode_auth'):
+ from docker.auth import decode_auth
+ else:
+ from docker.auth.auth import decode_auth
+
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ HAS_DOCKER_PY,
+ DEFAULT_DOCKER_REGISTRY,
+ DockerBaseClass,
+ EMAIL_REGEX,
+ RequestException,
+)
+
+NEEDS_DOCKER_PYCREDS = False
+
+# Early versions of docker/docker-py rely on docker-pycreds for
+# the credential store api.
+if HAS_DOCKER_PY:
+ try:
+ from docker.credentials.errors import StoreError, CredentialsNotFound
+ from docker.credentials import Store
+ except ImportError:
+ try:
+ from dockerpycreds.errors import StoreError, CredentialsNotFound
+ from dockerpycreds.store import Store
+ except ImportError as exc:
+ HAS_DOCKER_ERROR = str(exc)
+ NEEDS_DOCKER_PYCREDS = True
+
+
+if NEEDS_DOCKER_PYCREDS:
+ # docker-pycreds missing, so we need to create some place holder classes
+ # to allow instantiation.
+
+ class StoreError(Exception):
+ pass
+
+ class CredentialsNotFound(Exception):
+ pass
+
+
+class DockerFileStore(object):
+ '''
+ A custom credential store class that implements only the functionality we need to
+ update the docker config file when no credential helpers is provided.
+ '''
+
+ program = "<legacy config>"
+
+ def __init__(self, config_path):
+ self._config_path = config_path
+
+ # Make sure we have a minimal config if none is available.
+ self._config = dict(
+ auths=dict()
+ )
+
+ try:
+ # Attempt to read the existing config.
+ with open(self._config_path, "r") as f:
+ config = json.load(f)
+ except (ValueError, IOError):
+ # No config found or an invalid config found so we'll ignore it.
+ config = dict()
+
+ # Update our internal config with what ever was loaded.
+ self._config.update(config)
+
+ @property
+ def config_path(self):
+ '''
+ Return the config path configured in this DockerFileStore instance.
+ '''
+
+ return self._config_path
+
+ def get(self, server):
+ '''
+ Retrieve credentials for `server` if there are any in the config file.
+ Otherwise raise a `StoreError`
+ '''
+
+ server_creds = self._config['auths'].get(server)
+ if not server_creds:
+ raise CredentialsNotFound('No matching credentials')
+
+ (username, password) = decode_auth(server_creds['auth'])
+
+ return dict(
+ Username=username,
+ Secret=password
+ )
+
+ def _write(self):
+ '''
+ Write config back out to disk.
+ '''
+ # Make sure directory exists
+ dir = os.path.dirname(self._config_path)
+ if not os.path.exists(dir):
+ os.makedirs(dir)
+ # Write config; make sure it has permissions 0x600
+ content = json.dumps(self._config, indent=4, sort_keys=True).encode('utf-8')
+ f = os.open(self._config_path, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600)
+ try:
+ os.write(f, content)
+ finally:
+ os.close(f)
+
+ def store(self, server, username, password):
+ '''
+ Add a credentials for `server` to the current configuration.
+ '''
+
+ b64auth = base64.b64encode(
+ to_bytes(username) + b':' + to_bytes(password)
+ )
+ auth = to_text(b64auth)
+
+ # build up the auth structure
+ if 'auths' not in self._config:
+ self._config['auths'] = dict()
+
+ self._config['auths'][server] = dict(
+ auth=auth
+ )
+
+ self._write()
+
+ def erase(self, server):
+ '''
+ Remove credentials for the given server from the configuration.
+ '''
+
+ if 'auths' in self._config and server in self._config['auths']:
+ self._config['auths'].pop(server)
+ self._write()
+
+
+class LoginManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(LoginManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ parameters = self.client.module.params
+ self.check_mode = self.client.check_mode
+
+ self.registry_url = parameters.get('registry_url')
+ self.username = parameters.get('username')
+ self.password = parameters.get('password')
+ self.email = parameters.get('email')
+ self.reauthorize = parameters.get('reauthorize')
+ self.config_path = parameters.get('config_path')
+ self.state = parameters.get('state')
+
+ def run(self):
+ '''
+ Do the actuall work of this task here. This allows instantiation for partial
+ testing.
+ '''
+
+ if self.state == 'present':
+ self.login()
+ else:
+ self.logout()
+
+ def fail(self, msg):
+ self.client.fail(msg)
+
+ def login(self):
+ '''
+ Log into the registry with provided username/password. On success update the config
+ file with the new authorization.
+
+ :return: None
+ '''
+
+ if self.email and not re.match(EMAIL_REGEX, self.email):
+ self.fail("Parameter error: the email address appears to be incorrect. Expecting it to match "
+ "/%s/" % (EMAIL_REGEX))
+
+ self.results['actions'].append("Logged into %s" % (self.registry_url))
+ self.log("Log into %s with username %s" % (self.registry_url, self.username))
+ try:
+ response = self.client.login(
+ self.username,
+ password=self.password,
+ email=self.email,
+ registry=self.registry_url,
+ reauth=self.reauthorize,
+ dockercfg_path=self.config_path
+ )
+ except Exception as exc:
+ self.fail("Logging into %s for user %s failed - %s" % (self.registry_url, self.username, str(exc)))
+
+ # If user is already logged in, then response contains password for user
+ if 'password' in response:
+ # This returns correct password if user is logged in and wrong password is given.
+ # So if it returns another password as we passed, and the user didn't request to
+ # reauthorize, still do it.
+ if not self.reauthorize and response['password'] != self.password:
+ try:
+ response = self.client.login(
+ self.username,
+ password=self.password,
+ email=self.email,
+ registry=self.registry_url,
+ reauth=True,
+ dockercfg_path=self.config_path
+ )
+ except Exception as exc:
+ self.fail("Logging into %s for user %s failed - %s" % (self.registry_url, self.username, str(exc)))
+ response.pop('password', None)
+ self.results['login_result'] = response
+
+ self.update_credentials()
+
+ def logout(self):
+ '''
+ Log out of the registry. On success update the config file.
+
+ :return: None
+ '''
+
+ # Get the configuration store.
+ store = self.get_credential_store_instance(self.registry_url, self.config_path)
+
+ try:
+ current = store.get(self.registry_url)
+ except CredentialsNotFound:
+ # get raises an exception on not found.
+ self.log("Credentials for %s not present, doing nothing." % (self.registry_url))
+ self.results['changed'] = False
+ return
+
+ if not self.check_mode:
+ store.erase(self.registry_url)
+ self.results['changed'] = True
+
+ def update_credentials(self):
+ '''
+ If the authorization is not stored attempt to store authorization values via
+ the appropriate credential helper or to the config file.
+
+ :return: None
+ '''
+
+ # Check to see if credentials already exist.
+ store = self.get_credential_store_instance(self.registry_url, self.config_path)
+
+ try:
+ current = store.get(self.registry_url)
+ except CredentialsNotFound:
+ # get raises an exception on not found.
+ current = dict(
+ Username='',
+ Secret=''
+ )
+
+ if current['Username'] != self.username or current['Secret'] != self.password or self.reauthorize:
+ if not self.check_mode:
+ store.store(self.registry_url, self.username, self.password)
+ self.log("Writing credentials to configured helper %s for %s" % (store.program, self.registry_url))
+ self.results['actions'].append("Wrote credentials to configured helper %s for %s" % (
+ store.program, self.registry_url))
+ self.results['changed'] = True
+
+ def get_credential_store_instance(self, registry, dockercfg_path):
+ '''
+ Return an instance of docker.credentials.Store used by the given registry.
+
+ :return: A Store or None
+ :rtype: Union[docker.credentials.Store, NoneType]
+ '''
+
+ # Older versions of docker-py don't have this feature.
+ try:
+ credstore_env = self.client.credstore_env
+ except AttributeError:
+ credstore_env = None
+
+ config = auth.load_config(config_path=dockercfg_path)
+
+ if hasattr(auth, 'get_credential_store'):
+ store_name = auth.get_credential_store(config, registry)
+ elif 'credsStore' in config:
+ store_name = config['credsStore']
+ else:
+ store_name = None
+
+ # Make sure that there is a credential helper before trying to instantiate a
+ # Store object.
+ if store_name:
+ self.log("Found credential store %s" % store_name)
+ return Store(store_name, environment=credstore_env)
+
+ return DockerFileStore(dockercfg_path)
+
+
+def main():
+
+ argument_spec = dict(
+ registry_url=dict(type='str', default=DEFAULT_DOCKER_REGISTRY, aliases=['registry', 'url']),
+ username=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ email=dict(type='str', removed_in_version='3.0.0', removed_from_collection='community.general'), # was Ansible 2.14
+ reauthorize=dict(type='bool', default=False, aliases=['reauth']),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ config_path=dict(type='path', default='~/.docker/config.json', aliases=['dockercfg_path']),
+ )
+
+ required_if = [
+ ('state', 'present', ['username', 'password']),
+ ]
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=required_if,
+ min_docker_api_version='1.20',
+ )
+
+ try:
+ results = dict(
+ changed=False,
+ actions=[],
+ login_result={}
+ )
+
+ manager = LoginManager(client, results)
+ manager.run()
+
+ if 'actions' in results:
+ del results['actions']
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_network.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_network.py
new file mode 100644
index 00000000..f70cc67d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_network.py
@@ -0,0 +1,717 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: docker_network
+short_description: Manage Docker networks
+description:
+ - Create/remove Docker networks and connect containers to them.
+ - Performs largely the same function as the "docker network" CLI subcommand.
+options:
+ name:
+ description:
+ - Name of the network to operate on.
+ type: str
+ required: yes
+ aliases:
+ - network_name
+
+ connected:
+ description:
+ - List of container names or container IDs to connect to a network.
+ - Please note that the module only makes sure that these containers are connected to the network,
+ but does not care about connection options. If you rely on specific IP addresses etc., use the
+ M(community.general.docker_container) module to ensure your containers are correctly connected to this network.
+ type: list
+ elements: str
+ aliases:
+ - containers
+
+ driver:
+ description:
+ - Specify the type of network. Docker provides bridge and overlay drivers, but 3rd party drivers can also be used.
+ type: str
+ default: bridge
+
+ driver_options:
+ description:
+ - Dictionary of network settings. Consult docker docs for valid options and values.
+ type: dict
+
+ force:
+ description:
+ - With state C(absent) forces disconnecting all containers from the
+ network prior to deleting the network. With state C(present) will
+ disconnect all containers, delete the network and re-create the
+ network.
+ - This option is required if you have changed the IPAM or driver options
+ and want an existing network to be updated to use the new options.
+ type: bool
+ default: no
+
+ appends:
+ description:
+ - By default the connected list is canonical, meaning containers not on the list are removed from the network.
+ - Use I(appends) to leave existing containers connected.
+ type: bool
+ default: no
+ aliases:
+ - incremental
+
+ enable_ipv6:
+ description:
+ - Enable IPv6 networking.
+ type: bool
+
+ ipam_driver:
+ description:
+ - Specify an IPAM driver.
+ type: str
+
+ ipam_driver_options:
+ description:
+ - Dictionary of IPAM driver options.
+ type: dict
+
+ ipam_options:
+ description:
+ - Dictionary of IPAM options.
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter I(ipam_config) instead. In Docker 1.10.0, IPAM
+ options were introduced (see L(here,https://github.com/moby/moby/pull/17316)). This module parameter addresses
+ the IPAM config not the newly introduced IPAM options. For the IPAM options, see the I(ipam_driver_options)
+ parameter.
+ type: dict
+ suboptions:
+ subnet:
+ description:
+ - IP subset in CIDR notation.
+ type: str
+ iprange:
+ description:
+ - IP address range in CIDR notation.
+ type: str
+ gateway:
+ description:
+ - IP gateway address.
+ type: str
+ aux_addresses:
+ description:
+ - Auxiliary IP addresses used by Network driver, as a mapping from hostname to IP.
+ type: dict
+
+ ipam_config:
+ description:
+ - List of IPAM config blocks. Consult
+ L(Docker docs,https://docs.docker.com/compose/compose-file/compose-file-v2/#ipam) for valid options and values.
+ Note that I(iprange) is spelled differently here (we use the notation from the Docker SDK for Python).
+ type: list
+ elements: dict
+ suboptions:
+ subnet:
+ description:
+ - IP subset in CIDR notation.
+ type: str
+ iprange:
+ description:
+ - IP address range in CIDR notation.
+ type: str
+ gateway:
+ description:
+ - IP gateway address.
+ type: str
+ aux_addresses:
+ description:
+ - Auxiliary IP addresses used by Network driver, as a mapping from hostname to IP.
+ type: dict
+
+ state:
+ description:
+ - C(absent) deletes the network. If a network has connected containers, it
+ cannot be deleted. Use the I(force) option to disconnect all containers
+ and delete the network.
+ - C(present) creates the network, if it does not already exist with the
+ specified parameters, and connects the list of containers provided via
+ the connected parameter. Containers not on the list will be disconnected.
+ An empty list will leave no containers connected to the network. Use the
+ I(appends) option to leave existing containers connected. Use the I(force)
+ options to force re-creation of the network.
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+
+ internal:
+ description:
+ - Restrict external access to the network.
+ type: bool
+
+ labels:
+ description:
+ - Dictionary of labels.
+ type: dict
+
+ scope:
+ description:
+ - Specify the network's scope.
+ type: str
+ choices:
+ - local
+ - global
+ - swarm
+
+ attachable:
+ description:
+ - If enabled, and the network is in the global scope, non-service containers on worker nodes will be able to connect to the network.
+ type: bool
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+notes:
+ - When network options are changed, the module disconnects all containers from the network, deletes the network, and re-creates the network.
+ It does not try to reconnect containers, except the ones listed in (I(connected), and even for these, it does not consider specific
+ connection options like fixed IP addresses or MAC addresses. If you need more control over how the containers are connected to the
+ network, loop the M(community.general.docker_container) module to loop over your containers to make sure they are connected properly.
+ - The module does not support Docker Swarm, i.e. it will not try to disconnect or reconnect services. If services are connected to the
+ network, deleting the network will fail. When network options are changed, the network has to be deleted and recreated, so this will
+ fail as well.
+
+author:
+ - "Ben Keith (@keitwb)"
+ - "Chris Houseknecht (@chouseknecht)"
+ - "Dave Bendit (@DBendit)"
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "The docker server >= 1.10.0"
+'''
+
+EXAMPLES = '''
+- name: Create a network
+ community.general.docker_network:
+ name: network_one
+
+- name: Remove all but selected list of containers
+ community.general.docker_network:
+ name: network_one
+ connected:
+ - container_a
+ - container_b
+ - container_c
+
+- name: Remove a single container
+ community.general.docker_network:
+ name: network_one
+ connected: "{{ fulllist|difference(['container_a']) }}"
+
+- name: Add a container to a network, leaving existing containers connected
+ community.general.docker_network:
+ name: network_one
+ connected:
+ - container_a
+ appends: yes
+
+- name: Create a network with driver options
+ community.general.docker_network:
+ name: network_two
+ driver_options:
+ com.docker.network.bridge.name: net2
+
+- name: Create a network with custom IPAM config
+ community.general.docker_network:
+ name: network_three
+ ipam_config:
+ - subnet: 172.3.27.0/24
+ gateway: 172.3.27.2
+ iprange: 172.3.27.0/26
+ aux_addresses:
+ host1: 172.3.27.3
+ host2: 172.3.27.4
+
+- name: Create a network with labels
+ community.general.docker_network:
+ name: network_four
+ labels:
+ key1: value1
+ key2: value2
+
+- name: Create a network with IPv6 IPAM config
+ community.general.docker_network:
+ name: network_ipv6_one
+ enable_ipv6: yes
+ ipam_config:
+ - subnet: fdd1:ac8c:0557:7ce1::/64
+
+- name: Create a network with IPv6 and custom IPv4 IPAM config
+ community.general.docker_network:
+ name: network_ipv6_two
+ enable_ipv6: yes
+ ipam_config:
+ - subnet: 172.4.27.0/24
+ - subnet: fdd1:ac8c:0557:7ce2::/64
+
+- name: Delete a network, disconnecting all containers
+ community.general.docker_network:
+ name: network_one
+ state: absent
+ force: yes
+'''
+
+RETURN = '''
+network:
+ description:
+ - Network inspection results for the affected network.
+ - Note that facts are part of the registered vars since Ansible 2.8. For compatibility reasons, the facts
+ are also accessible directly as C(docker_network). Note that the returned fact will be removed in community.general 2.0.0.
+ returned: success
+ type: dict
+ sample: {}
+'''
+
+import re
+import traceback
+
+from distutils.version import LooseVersion
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ DockerBaseClass,
+ docker_version,
+ DifferenceTracker,
+ clean_dict_booleans_for_docker_api,
+ RequestException,
+)
+
+try:
+ from docker import utils
+ from docker.errors import DockerException
+ if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
+ from docker.types import IPAMPool, IPAMConfig
+except Exception:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+
+class TaskParameters(DockerBaseClass):
+ def __init__(self, client):
+ super(TaskParameters, self).__init__()
+ self.client = client
+
+ self.name = None
+ self.connected = None
+ self.driver = None
+ self.driver_options = None
+ self.ipam_driver = None
+ self.ipam_driver_options = None
+ self.ipam_options = None
+ self.ipam_config = None
+ self.appends = None
+ self.force = None
+ self.internal = None
+ self.labels = None
+ self.debug = None
+ self.enable_ipv6 = None
+ self.scope = None
+ self.attachable = None
+
+ for key, value in client.module.params.items():
+ setattr(self, key, value)
+
+
+def container_names_in_network(network):
+ return [c['Name'] for c in network['Containers'].values()] if network['Containers'] else []
+
+
+CIDR_IPV4 = re.compile(r'^([0-9]{1,3}\.){3}[0-9]{1,3}/([0-9]|[1-2][0-9]|3[0-2])$')
+CIDR_IPV6 = re.compile(r'^[0-9a-fA-F:]+/([0-9]|[1-9][0-9]|1[0-2][0-9])$')
+
+
+def validate_cidr(cidr):
+ """Validate CIDR. Return IP version of a CIDR string on success.
+
+ :param cidr: Valid CIDR
+ :type cidr: str
+ :return: ``ipv4`` or ``ipv6``
+ :rtype: str
+ :raises ValueError: If ``cidr`` is not a valid CIDR
+ """
+ if CIDR_IPV4.match(cidr):
+ return 'ipv4'
+ elif CIDR_IPV6.match(cidr):
+ return 'ipv6'
+ raise ValueError('"{0}" is not a valid CIDR'.format(cidr))
+
+
+def normalize_ipam_config_key(key):
+ """Normalizes IPAM config keys returned by Docker API to match Ansible keys.
+
+ :param key: Docker API key
+ :type key: str
+ :return Ansible module key
+ :rtype str
+ """
+ special_cases = {
+ 'AuxiliaryAddresses': 'aux_addresses'
+ }
+ return special_cases.get(key, key.lower())
+
+
+def dicts_are_essentially_equal(a, b):
+ """Make sure that a is a subset of b, where None entries of a are ignored."""
+ for k, v in a.items():
+ if v is None:
+ continue
+ if b.get(k) != v:
+ return False
+ return True
+
+
+class DockerNetworkManager(object):
+
+ def __init__(self, client):
+ self.client = client
+ self.parameters = TaskParameters(client)
+ self.check_mode = self.client.check_mode
+ self.results = {
+ u'changed': False,
+ u'actions': []
+ }
+ self.diff = self.client.module._diff
+ self.diff_tracker = DifferenceTracker()
+ self.diff_result = dict()
+
+ self.existing_network = self.get_existing_network()
+
+ if not self.parameters.connected and self.existing_network:
+ self.parameters.connected = container_names_in_network(self.existing_network)
+
+ if (self.parameters.ipam_options['subnet'] or self.parameters.ipam_options['iprange'] or
+ self.parameters.ipam_options['gateway'] or self.parameters.ipam_options['aux_addresses']):
+ self.parameters.ipam_config = [self.parameters.ipam_options]
+
+ if self.parameters.ipam_config:
+ try:
+ for ipam_config in self.parameters.ipam_config:
+ validate_cidr(ipam_config['subnet'])
+ except ValueError as e:
+ self.client.fail(str(e))
+
+ if self.parameters.driver_options:
+ self.parameters.driver_options = clean_dict_booleans_for_docker_api(self.parameters.driver_options)
+
+ state = self.parameters.state
+ if state == 'present':
+ self.present()
+ elif state == 'absent':
+ self.absent()
+
+ if self.diff or self.check_mode or self.parameters.debug:
+ if self.diff:
+ self.diff_result['before'], self.diff_result['after'] = self.diff_tracker.get_before_after()
+ self.results['diff'] = self.diff_result
+
+ def get_existing_network(self):
+ return self.client.get_network(name=self.parameters.name)
+
+ def has_different_config(self, net):
+ '''
+ Evaluates an existing network and returns a tuple containing a boolean
+ indicating if the configuration is different and a list of differences.
+
+ :param net: the inspection output for an existing network
+ :return: (bool, list)
+ '''
+ differences = DifferenceTracker()
+ if self.parameters.driver and self.parameters.driver != net['Driver']:
+ differences.add('driver',
+ parameter=self.parameters.driver,
+ active=net['Driver'])
+ if self.parameters.driver_options:
+ if not net.get('Options'):
+ differences.add('driver_options',
+ parameter=self.parameters.driver_options,
+ active=net.get('Options'))
+ else:
+ for key, value in self.parameters.driver_options.items():
+ if not (key in net['Options']) or value != net['Options'][key]:
+ differences.add('driver_options.%s' % key,
+ parameter=value,
+ active=net['Options'].get(key))
+
+ if self.parameters.ipam_driver:
+ if not net.get('IPAM') or net['IPAM']['Driver'] != self.parameters.ipam_driver:
+ differences.add('ipam_driver',
+ parameter=self.parameters.ipam_driver,
+ active=net.get('IPAM'))
+
+ if self.parameters.ipam_driver_options is not None:
+ ipam_driver_options = net['IPAM'].get('Options') or {}
+ if ipam_driver_options != self.parameters.ipam_driver_options:
+ differences.add('ipam_driver_options',
+ parameter=self.parameters.ipam_driver_options,
+ active=ipam_driver_options)
+
+ if self.parameters.ipam_config is not None and self.parameters.ipam_config:
+ if not net.get('IPAM') or not net['IPAM']['Config']:
+ differences.add('ipam_config',
+ parameter=self.parameters.ipam_config,
+ active=net.get('IPAM', {}).get('Config'))
+ else:
+ # Put network's IPAM config into the same format as module's IPAM config
+ net_ipam_configs = []
+ for net_ipam_config in net['IPAM']['Config']:
+ config = dict()
+ for k, v in net_ipam_config.items():
+ config[normalize_ipam_config_key(k)] = v
+ net_ipam_configs.append(config)
+ # Compare lists of dicts as sets of dicts
+ for idx, ipam_config in enumerate(self.parameters.ipam_config):
+ net_config = dict()
+ for net_ipam_config in net_ipam_configs:
+ if dicts_are_essentially_equal(ipam_config, net_ipam_config):
+ net_config = net_ipam_config
+ break
+ for key, value in ipam_config.items():
+ if value is None:
+ # due to recursive argument_spec, all keys are always present
+ # (but have default value None if not specified)
+ continue
+ if value != net_config.get(key):
+ differences.add('ipam_config[%s].%s' % (idx, key),
+ parameter=value,
+ active=net_config.get(key))
+
+ if self.parameters.enable_ipv6 is not None and self.parameters.enable_ipv6 != net.get('EnableIPv6', False):
+ differences.add('enable_ipv6',
+ parameter=self.parameters.enable_ipv6,
+ active=net.get('EnableIPv6', False))
+
+ if self.parameters.internal is not None and self.parameters.internal != net.get('Internal', False):
+ differences.add('internal',
+ parameter=self.parameters.internal,
+ active=net.get('Internal'))
+
+ if self.parameters.scope is not None and self.parameters.scope != net.get('Scope'):
+ differences.add('scope',
+ parameter=self.parameters.scope,
+ active=net.get('Scope'))
+
+ if self.parameters.attachable is not None and self.parameters.attachable != net.get('Attachable', False):
+ differences.add('attachable',
+ parameter=self.parameters.attachable,
+ active=net.get('Attachable'))
+ if self.parameters.labels:
+ if not net.get('Labels'):
+ differences.add('labels',
+ parameter=self.parameters.labels,
+ active=net.get('Labels'))
+ else:
+ for key, value in self.parameters.labels.items():
+ if not (key in net['Labels']) or value != net['Labels'][key]:
+ differences.add('labels.%s' % key,
+ parameter=value,
+ active=net['Labels'].get(key))
+
+ return not differences.empty, differences
+
+ def create_network(self):
+ if not self.existing_network:
+ params = dict(
+ driver=self.parameters.driver,
+ options=self.parameters.driver_options,
+ )
+
+ ipam_pools = []
+ if self.parameters.ipam_config:
+ for ipam_pool in self.parameters.ipam_config:
+ if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
+ ipam_pools.append(IPAMPool(**ipam_pool))
+ else:
+ ipam_pools.append(utils.create_ipam_pool(**ipam_pool))
+
+ if self.parameters.ipam_driver or self.parameters.ipam_driver_options or ipam_pools:
+ # Only add ipam parameter if a driver was specified or if IPAM parameters
+ # were specified. Leaving this parameter away can significantly speed up
+ # creation; on my machine creation with this option needs ~15 seconds,
+ # and without just a few seconds.
+ if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
+ params['ipam'] = IPAMConfig(driver=self.parameters.ipam_driver,
+ pool_configs=ipam_pools,
+ options=self.parameters.ipam_driver_options)
+ else:
+ params['ipam'] = utils.create_ipam_config(driver=self.parameters.ipam_driver,
+ pool_configs=ipam_pools)
+
+ if self.parameters.enable_ipv6 is not None:
+ params['enable_ipv6'] = self.parameters.enable_ipv6
+ if self.parameters.internal is not None:
+ params['internal'] = self.parameters.internal
+ if self.parameters.scope is not None:
+ params['scope'] = self.parameters.scope
+ if self.parameters.attachable is not None:
+ params['attachable'] = self.parameters.attachable
+ if self.parameters.labels:
+ params['labels'] = self.parameters.labels
+
+ if not self.check_mode:
+ resp = self.client.create_network(self.parameters.name, **params)
+ self.client.report_warnings(resp, ['Warning'])
+ self.existing_network = self.client.get_network(network_id=resp['Id'])
+ self.results['actions'].append("Created network %s with driver %s" % (self.parameters.name, self.parameters.driver))
+ self.results['changed'] = True
+
+ def remove_network(self):
+ if self.existing_network:
+ self.disconnect_all_containers()
+ if not self.check_mode:
+ self.client.remove_network(self.parameters.name)
+ self.results['actions'].append("Removed network %s" % (self.parameters.name,))
+ self.results['changed'] = True
+
+ def is_container_connected(self, container_name):
+ if not self.existing_network:
+ return False
+ return container_name in container_names_in_network(self.existing_network)
+
+ def connect_containers(self):
+ for name in self.parameters.connected:
+ if not self.is_container_connected(name):
+ if not self.check_mode:
+ self.client.connect_container_to_network(name, self.parameters.name)
+ self.results['actions'].append("Connected container %s" % (name,))
+ self.results['changed'] = True
+ self.diff_tracker.add('connected.{0}'.format(name),
+ parameter=True,
+ active=False)
+
+ def disconnect_missing(self):
+ if not self.existing_network:
+ return
+ containers = self.existing_network['Containers']
+ if not containers:
+ return
+ for c in containers.values():
+ name = c['Name']
+ if name not in self.parameters.connected:
+ self.disconnect_container(name)
+
+ def disconnect_all_containers(self):
+ containers = self.client.get_network(name=self.parameters.name)['Containers']
+ if not containers:
+ return
+ for cont in containers.values():
+ self.disconnect_container(cont['Name'])
+
+ def disconnect_container(self, container_name):
+ if not self.check_mode:
+ self.client.disconnect_container_from_network(container_name, self.parameters.name)
+ self.results['actions'].append("Disconnected container %s" % (container_name,))
+ self.results['changed'] = True
+ self.diff_tracker.add('connected.{0}'.format(container_name),
+ parameter=False,
+ active=True)
+
+ def present(self):
+ different = False
+ differences = DifferenceTracker()
+ if self.existing_network:
+ different, differences = self.has_different_config(self.existing_network)
+
+ self.diff_tracker.add('exists', parameter=True, active=self.existing_network is not None)
+ if self.parameters.force or different:
+ self.remove_network()
+ self.existing_network = None
+
+ self.create_network()
+ self.connect_containers()
+ if not self.parameters.appends:
+ self.disconnect_missing()
+
+ if self.diff or self.check_mode or self.parameters.debug:
+ self.diff_result['differences'] = differences.get_legacy_docker_diffs()
+ self.diff_tracker.merge(differences)
+
+ if not self.check_mode and not self.parameters.debug:
+ self.results.pop('actions')
+
+ network_facts = self.get_existing_network()
+ self.results['ansible_facts'] = {u'docker_network': network_facts}
+ self.results['network'] = network_facts
+
+ def absent(self):
+ self.diff_tracker.add('exists', parameter=False, active=self.existing_network is not None)
+ self.remove_network()
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True, aliases=['network_name']),
+ connected=dict(type='list', default=[], elements='str', aliases=['containers']),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ driver=dict(type='str', default='bridge'),
+ driver_options=dict(type='dict', default={}),
+ force=dict(type='bool', default=False),
+ appends=dict(type='bool', default=False, aliases=['incremental']),
+ ipam_driver=dict(type='str'),
+ ipam_driver_options=dict(type='dict'),
+ ipam_options=dict(type='dict', default={}, options=dict(
+ subnet=dict(type='str'),
+ iprange=dict(type='str'),
+ gateway=dict(type='str'),
+ aux_addresses=dict(type='dict'),
+ ), removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
+ ipam_config=dict(type='list', elements='dict', options=dict(
+ subnet=dict(type='str'),
+ iprange=dict(type='str'),
+ gateway=dict(type='str'),
+ aux_addresses=dict(type='dict'),
+ )),
+ enable_ipv6=dict(type='bool'),
+ internal=dict(type='bool'),
+ labels=dict(type='dict', default={}),
+ debug=dict(type='bool', default=False),
+ scope=dict(type='str', choices=['local', 'global', 'swarm']),
+ attachable=dict(type='bool'),
+ )
+
+ mutually_exclusive = [
+ ('ipam_config', 'ipam_options')
+ ]
+
+ option_minimal_versions = dict(
+ scope=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
+ attachable=dict(docker_py_version='2.0.0', docker_api_version='1.26'),
+ labels=dict(docker_api_version='1.23'),
+ ipam_driver_options=dict(docker_py_version='2.0.0'),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True,
+ min_docker_version='1.10.0',
+ min_docker_api_version='1.22',
+ # "The docker server >= 1.10.0"
+ option_minimal_versions=option_minimal_versions,
+ )
+
+ try:
+ cm = DockerNetworkManager(client)
+ client.module.exit_json(**cm.results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_network_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_network_info.py
new file mode 100644
index 00000000..feeff6a4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_network_info.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_network_info
+
+short_description: Retrieves facts about docker network
+
+description:
+ - Retrieves facts about a docker network.
+ - Essentially returns the output of C(docker network inspect <name>), similar to what M(community.general.docker_network)
+ returns for a non-absent network.
+
+
+options:
+ name:
+ description:
+ - The name of the network to inspect.
+ - When identifying an existing network name may be a name or a long or short network ID.
+ type: str
+ required: yes
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+author:
+ - "Dave Bendit (@DBendit)"
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "Docker API >= 1.21"
+'''
+
+EXAMPLES = '''
+- name: Get infos on network
+ community.general.docker_network_info:
+ name: mydata
+ register: result
+
+- name: Does network exist?
+ ansible.builtin.debug:
+ msg: "The network {{ 'exists' if result.exists else 'does not exist' }}"
+
+- name: Print information about network
+ ansible.builtin.debug:
+ var: result.network
+ when: result.exists
+'''
+
+RETURN = '''
+exists:
+ description:
+ - Returns whether the network exists.
+ type: bool
+ returned: always
+ sample: true
+network:
+ description:
+ - Facts representing the current state of the network. Matches the docker inspection output.
+ - Will be C(none) if network does not exist.
+ returned: always
+ type: dict
+ sample: '{
+ "Attachable": false,
+ "ConfigFrom": {
+ "Network": ""
+ },
+ "ConfigOnly": false,
+ "Containers": {},
+ "Created": "2018-12-07T01:47:51.250835114-06:00",
+ "Driver": "bridge",
+ "EnableIPv6": false,
+ "IPAM": {
+ "Config": [
+ {
+ "Gateway": "192.168.96.1",
+ "Subnet": "192.168.96.0/20"
+ }
+ ],
+ "Driver": "default",
+ "Options": null
+ },
+ "Id": "0856968545f22026c41c2c7c3d448319d3b4a6a03a40b148b3ac4031696d1c0a",
+ "Ingress": false,
+ "Internal": false,
+ "Labels": {},
+ "Name": "ansible-test-f2700bba",
+ "Options": {},
+ "Scope": "local"
+ }'
+'''
+
+import traceback
+
+try:
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ RequestException,
+)
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_api_version='1.21',
+ )
+
+ try:
+ network = client.get_network(client.module.params['name'])
+
+ client.module.exit_json(
+ changed=False,
+ exists=(True if network else False),
+ network=network,
+ )
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_node.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_node.py
new file mode 100644
index 00000000..12980e5f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_node.py
@@ -0,0 +1,294 @@
+#!/usr/bin/python
+#
+# (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: docker_node
+short_description: Manage Docker Swarm node
+description:
+ - Manages the Docker nodes via Swarm Manager.
+ - This module allows to change the node's role, its availability, and to modify, add or remove node labels.
+options:
+ hostname:
+ description:
+ - The hostname or ID of node as registered in Swarm.
+ - If more than one node is registered using the same hostname the ID must be used,
+ otherwise module will fail.
+ type: str
+ required: yes
+ labels:
+ description:
+ - User-defined key/value metadata that will be assigned as node attribute.
+ - Label operations in this module apply to the docker swarm node specified by I(hostname).
+ Use M(community.general.docker_swarm) module to add/modify/remove swarm cluster labels.
+ - The actual state of labels assigned to the node when module completes its work depends on
+ I(labels_state) and I(labels_to_remove) parameters values. See description below.
+ type: dict
+ labels_state:
+ description:
+ - It defines the operation on the labels assigned to node and labels specified in I(labels) option.
+ - Set to C(merge) to combine labels provided in I(labels) with those already assigned to the node.
+ If no labels are assigned then it will add listed labels. For labels that are already assigned
+ to the node, it will update their values. The labels not specified in I(labels) will remain unchanged.
+ If I(labels) is empty then no changes will be made.
+ - Set to C(replace) to replace all assigned labels with provided ones. If I(labels) is empty then
+ all labels assigned to the node will be removed.
+ type: str
+ default: 'merge'
+ choices:
+ - merge
+ - replace
+ labels_to_remove:
+ description:
+ - List of labels that will be removed from the node configuration. The list has to contain only label
+ names, not their values.
+ - If the label provided on the list is not assigned to the node, the entry is ignored.
+ - If the label is both on the I(labels_to_remove) and I(labels), then value provided in I(labels) remains
+ assigned to the node.
+ - If I(labels_state) is C(replace) and I(labels) is not provided or empty then all labels assigned to
+ node are removed and I(labels_to_remove) is ignored.
+ type: list
+ elements: str
+ availability:
+ description: Node availability to assign. If not provided then node availability remains unchanged.
+ choices:
+ - active
+ - pause
+ - drain
+ type: str
+ role:
+ description: Node role to assign. If not provided then node role remains unchanged.
+ choices:
+ - manager
+ - worker
+ type: str
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.4.0"
+ - Docker API >= 1.25
+author:
+ - Piotr Wojciechowski (@WojciechowskiPiotr)
+ - Thierry Bouvet (@tbouvet)
+
+'''
+
+EXAMPLES = '''
+- name: Set node role
+ community.general.docker_node:
+ hostname: mynode
+ role: manager
+
+- name: Set node availability
+ community.general.docker_node:
+ hostname: mynode
+ availability: drain
+
+- name: Replace node labels with new labels
+ community.general.docker_node:
+ hostname: mynode
+ labels:
+ key: value
+ labels_state: replace
+
+- name: Merge node labels and new labels
+ community.general.docker_node:
+ hostname: mynode
+ labels:
+ key: value
+
+- name: Remove all labels assigned to node
+ community.general.docker_node:
+ hostname: mynode
+ labels_state: replace
+
+- name: Remove selected labels from the node
+ community.general.docker_node:
+ hostname: mynode
+ labels_to_remove:
+ - key1
+ - key2
+'''
+
+RETURN = '''
+node:
+ description: Information about node after 'update' operation
+ returned: success
+ type: dict
+
+'''
+
+import traceback
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ DockerBaseClass,
+ RequestException,
+)
+
+from ansible.module_utils._text import to_native
+
+from ansible_collections.community.general.plugins.module_utils.docker.swarm import AnsibleDockerSwarmClient
+
+
+class TaskParameters(DockerBaseClass):
+ def __init__(self, client):
+ super(TaskParameters, self).__init__()
+
+ # Spec
+ self.name = None
+ self.labels = None
+ self.labels_state = None
+ self.labels_to_remove = None
+
+ # Node
+ self.availability = None
+ self.role = None
+
+ for key, value in client.module.params.items():
+ setattr(self, key, value)
+
+
+class SwarmNodeManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(SwarmNodeManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.check_mode = self.client.check_mode
+
+ self.client.fail_task_if_not_swarm_manager()
+
+ self.parameters = TaskParameters(client)
+
+ self.node_update()
+
+ def node_update(self):
+ if not (self.client.check_if_swarm_node(node_id=self.parameters.hostname)):
+ self.client.fail("This node is not part of a swarm.")
+ return
+
+ if self.client.check_if_swarm_node_is_down():
+ self.client.fail("Can not update the node. The node is down.")
+
+ try:
+ node_info = self.client.inspect_node(node_id=self.parameters.hostname)
+ except APIError as exc:
+ self.client.fail("Failed to get node information for %s" % to_native(exc))
+
+ changed = False
+ node_spec = dict(
+ Availability=self.parameters.availability,
+ Role=self.parameters.role,
+ Labels=self.parameters.labels,
+ )
+
+ if self.parameters.role is None:
+ node_spec['Role'] = node_info['Spec']['Role']
+ else:
+ if not node_info['Spec']['Role'] == self.parameters.role:
+ node_spec['Role'] = self.parameters.role
+ changed = True
+
+ if self.parameters.availability is None:
+ node_spec['Availability'] = node_info['Spec']['Availability']
+ else:
+ if not node_info['Spec']['Availability'] == self.parameters.availability:
+ node_info['Spec']['Availability'] = self.parameters.availability
+ changed = True
+
+ if self.parameters.labels_state == 'replace':
+ if self.parameters.labels is None:
+ node_spec['Labels'] = {}
+ if node_info['Spec']['Labels']:
+ changed = True
+ else:
+ if (node_info['Spec']['Labels'] or {}) != self.parameters.labels:
+ node_spec['Labels'] = self.parameters.labels
+ changed = True
+ elif self.parameters.labels_state == 'merge':
+ node_spec['Labels'] = dict(node_info['Spec']['Labels'] or {})
+ if self.parameters.labels is not None:
+ for key, value in self.parameters.labels.items():
+ if node_spec['Labels'].get(key) != value:
+ node_spec['Labels'][key] = value
+ changed = True
+
+ if self.parameters.labels_to_remove is not None:
+ for key in self.parameters.labels_to_remove:
+ if self.parameters.labels is not None:
+ if not self.parameters.labels.get(key):
+ if node_spec['Labels'].get(key):
+ node_spec['Labels'].pop(key)
+ changed = True
+ else:
+ self.client.module.warn(
+ "Label '%s' listed both in 'labels' and 'labels_to_remove'. "
+ "Keeping the assigned label value."
+ % to_native(key))
+ else:
+ if node_spec['Labels'].get(key):
+ node_spec['Labels'].pop(key)
+ changed = True
+
+ if changed is True:
+ if not self.check_mode:
+ try:
+ self.client.update_node(node_id=node_info['ID'], version=node_info['Version']['Index'],
+ node_spec=node_spec)
+ except APIError as exc:
+ self.client.fail("Failed to update node : %s" % to_native(exc))
+ self.results['node'] = self.client.get_node_inspect(node_id=node_info['ID'])
+ self.results['changed'] = changed
+ else:
+ self.results['node'] = node_info
+ self.results['changed'] = changed
+
+
+def main():
+ argument_spec = dict(
+ hostname=dict(type='str', required=True),
+ labels=dict(type='dict'),
+ labels_state=dict(type='str', default='merge', choices=['merge', 'replace']),
+ labels_to_remove=dict(type='list', elements='str'),
+ availability=dict(type='str', choices=['active', 'pause', 'drain']),
+ role=dict(type='str', choices=['worker', 'manager']),
+ )
+
+ client = AnsibleDockerSwarmClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_version='2.4.0',
+ min_docker_api_version='1.25',
+ )
+
+ try:
+ results = dict(
+ changed=False,
+ )
+
+ SwarmNodeManager(client, results)
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_node_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_node_info.py
new file mode 100644
index 00000000..c01edadc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_node_info.py
@@ -0,0 +1,156 @@
+#!/usr/bin/python
+#
+# (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_node_info
+
+short_description: Retrieves facts about docker swarm node from Swarm Manager
+
+description:
+ - Retrieves facts about a docker node.
+ - Essentially returns the output of C(docker node inspect <name>).
+ - Must be executed on a host running as Swarm Manager, otherwise the module will fail.
+
+
+options:
+ name:
+ description:
+ - The name of the node to inspect.
+ - The list of nodes names to inspect.
+ - If empty then return information of all nodes in Swarm cluster.
+ - When identifying the node use either the hostname of the node (as registered in Swarm) or node ID.
+ - If I(self) is C(true) then this parameter is ignored.
+ type: list
+ elements: str
+ self:
+ description:
+ - If C(true), queries the node (i.e. the docker daemon) the module communicates with.
+ - If C(true) then I(name) is ignored.
+ - If C(false) then query depends on I(name) presence and value.
+ type: bool
+ default: no
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+author:
+ - Piotr Wojciechowski (@WojciechowskiPiotr)
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.4.0"
+ - "Docker API >= 1.24"
+'''
+
+EXAMPLES = '''
+- name: Get info on all nodes
+ community.general.docker_node_info:
+ register: result
+
+- name: Get info on node
+ community.general.docker_node_info:
+ name: mynode
+ register: result
+
+- name: Get info on list of nodes
+ community.general.docker_node_info:
+ name:
+ - mynode1
+ - mynode2
+ register: result
+
+- name: Get info on host if it is Swarm Manager
+ community.general.docker_node_info:
+ self: true
+ register: result
+'''
+
+RETURN = '''
+nodes:
+ description:
+ - Facts representing the current state of the nodes. Matches the C(docker node inspect) output.
+ - Can contain multiple entries if more than one node provided in I(name), or I(name) is not provided.
+ - If I(name) contains a list of nodes, the output will provide information on all nodes registered
+ at the swarm, including nodes that left the swarm but haven't been removed from the cluster on swarm
+ managers and nodes that are unreachable.
+ returned: always
+ type: list
+ elements: dict
+'''
+
+import traceback
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ RequestException,
+)
+from ansible_collections.community.general.plugins.module_utils.docker.swarm import AnsibleDockerSwarmClient
+
+try:
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+
+def get_node_facts(client):
+
+ results = []
+
+ if client.module.params['self'] is True:
+ self_node_id = client.get_swarm_node_id()
+ node_info = client.get_node_inspect(node_id=self_node_id)
+ results.append(node_info)
+ return results
+
+ if client.module.params['name'] is None:
+ node_info = client.get_all_nodes_inspect()
+ return node_info
+
+ nodes = client.module.params['name']
+ if not isinstance(nodes, list):
+ nodes = [nodes]
+
+ for next_node_name in nodes:
+ next_node_info = client.get_node_inspect(node_id=next_node_name, skip_missing=True)
+ if next_node_info:
+ results.append(next_node_info)
+ return results
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='list', elements='str'),
+ self=dict(type='bool', default=False),
+ )
+
+ client = AnsibleDockerSwarmClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_version='2.4.0',
+ min_docker_api_version='1.24',
+ )
+
+ client.fail_task_if_not_swarm_manager()
+
+ try:
+ nodes = get_node_facts(client)
+
+ client.module.exit_json(
+ changed=False,
+ nodes=nodes,
+ )
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_prune.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_prune.py
new file mode 100644
index 00000000..025c6130
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_prune.py
@@ -0,0 +1,265 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_prune
+
+short_description: Allows to prune various docker objects
+
+description:
+ - Allows to run C(docker container prune), C(docker image prune), C(docker network prune)
+ and C(docker volume prune) via the Docker API.
+
+
+options:
+ containers:
+ description:
+ - Whether to prune containers.
+ type: bool
+ default: no
+ containers_filters:
+ description:
+ - A dictionary of filter values used for selecting containers to delete.
+ - "For example, C(until: 24h)."
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/container_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ images:
+ description:
+ - Whether to prune images.
+ type: bool
+ default: no
+ images_filters:
+ description:
+ - A dictionary of filter values used for selecting images to delete.
+ - "For example, C(dangling: true)."
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/image_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ networks:
+ description:
+ - Whether to prune networks.
+ type: bool
+ default: no
+ networks_filters:
+ description:
+ - A dictionary of filter values used for selecting networks to delete.
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/network_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ volumes:
+ description:
+ - Whether to prune volumes.
+ type: bool
+ default: no
+ volumes_filters:
+ description:
+ - A dictionary of filter values used for selecting volumes to delete.
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/volume_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ builder_cache:
+ description:
+ - Whether to prune the builder cache.
+ - Requires version 3.3.0 of the Docker SDK for Python or newer.
+ type: bool
+ default: no
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_2_documentation
+
+
+author:
+ - "Felix Fontein (@felixfontein)"
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.1.0"
+ - "Docker API >= 1.25"
+'''
+
+EXAMPLES = '''
+- name: Prune containers older than 24h
+ community.general.docker_prune:
+ containers: yes
+ containers_filters:
+ # only consider containers created more than 24 hours ago
+ until: 24h
+
+- name: Prune everything
+ community.general.docker_prune:
+ containers: yes
+ images: yes
+ networks: yes
+ volumes: yes
+ builder_cache: yes
+
+- name: Prune everything (including non-dangling images)
+ community.general.docker_prune:
+ containers: yes
+ images: yes
+ images_filters:
+ dangling: false
+ networks: yes
+ volumes: yes
+ builder_cache: yes
+'''
+
+RETURN = '''
+# containers
+containers:
+ description:
+ - List of IDs of deleted containers.
+ returned: I(containers) is C(true)
+ type: list
+ elements: str
+ sample: '[]'
+containers_space_reclaimed:
+ description:
+ - Amount of reclaimed disk space from container pruning in bytes.
+ returned: I(containers) is C(true)
+ type: int
+ sample: '0'
+
+# images
+images:
+ description:
+ - List of IDs of deleted images.
+ returned: I(images) is C(true)
+ type: list
+ elements: str
+ sample: '[]'
+images_space_reclaimed:
+ description:
+ - Amount of reclaimed disk space from image pruning in bytes.
+ returned: I(images) is C(true)
+ type: int
+ sample: '0'
+
+# networks
+networks:
+ description:
+ - List of IDs of deleted networks.
+ returned: I(networks) is C(true)
+ type: list
+ elements: str
+ sample: '[]'
+
+# volumes
+volumes:
+ description:
+ - List of IDs of deleted volumes.
+ returned: I(volumes) is C(true)
+ type: list
+ elements: str
+ sample: '[]'
+volumes_space_reclaimed:
+ description:
+ - Amount of reclaimed disk space from volumes pruning in bytes.
+ returned: I(volumes) is C(true)
+ type: int
+ sample: '0'
+
+# builder_cache
+builder_cache_space_reclaimed:
+ description:
+ - Amount of reclaimed disk space from builder cache pruning in bytes.
+ returned: I(builder_cache) is C(true)
+ type: int
+ sample: '0'
+'''
+
+import traceback
+
+try:
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from distutils.version import LooseVersion
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ RequestException,
+)
+
+try:
+ from ansible_collections.community.general.plugins.module_utils.docker.common import docker_version, clean_dict_booleans_for_docker_api
+except Exception as dummy:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+
+def main():
+ argument_spec = dict(
+ containers=dict(type='bool', default=False),
+ containers_filters=dict(type='dict'),
+ images=dict(type='bool', default=False),
+ images_filters=dict(type='dict'),
+ networks=dict(type='bool', default=False),
+ networks_filters=dict(type='dict'),
+ volumes=dict(type='bool', default=False),
+ volumes_filters=dict(type='dict'),
+ builder_cache=dict(type='bool', default=False),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ # supports_check_mode=True,
+ min_docker_api_version='1.25',
+ min_docker_version='2.1.0',
+ )
+
+ # Version checks
+ cache_min_version = '3.3.0'
+ if client.module.params['builder_cache'] and client.docker_py_version < LooseVersion(cache_min_version):
+ msg = "Error: Docker SDK for Python's version is %s. Minimum version required for builds option is %s. Use `pip install --upgrade docker` to upgrade."
+ client.fail(msg % (docker_version, cache_min_version))
+
+ try:
+ result = dict()
+
+ if client.module.params['containers']:
+ filters = clean_dict_booleans_for_docker_api(client.module.params.get('containers_filters'))
+ res = client.prune_containers(filters=filters)
+ result['containers'] = res.get('ContainersDeleted') or []
+ result['containers_space_reclaimed'] = res['SpaceReclaimed']
+
+ if client.module.params['images']:
+ filters = clean_dict_booleans_for_docker_api(client.module.params.get('images_filters'))
+ res = client.prune_images(filters=filters)
+ result['images'] = res.get('ImagesDeleted') or []
+ result['images_space_reclaimed'] = res['SpaceReclaimed']
+
+ if client.module.params['networks']:
+ filters = clean_dict_booleans_for_docker_api(client.module.params.get('networks_filters'))
+ res = client.prune_networks(filters=filters)
+ result['networks'] = res.get('NetworksDeleted') or []
+
+ if client.module.params['volumes']:
+ filters = clean_dict_booleans_for_docker_api(client.module.params.get('volumes_filters'))
+ res = client.prune_volumes(filters=filters)
+ result['volumes'] = res.get('VolumesDeleted') or []
+ result['volumes_space_reclaimed'] = res['SpaceReclaimed']
+
+ if client.module.params['builder_cache']:
+ res = client.prune_builds()
+ result['builder_cache_space_reclaimed'] = res['SpaceReclaimed']
+
+ client.module.exit_json(**result)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_secret.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_secret.py
new file mode 100644
index 00000000..b6ce7f28
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_secret.py
@@ -0,0 +1,302 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_secret
+
+short_description: Manage docker secrets.
+
+
+description:
+ - Create and remove Docker secrets in a Swarm environment. Similar to C(docker secret create) and C(docker secret rm).
+ - Adds to the metadata of new secrets 'ansible_key', an encrypted hash representation of the data, which is then used
+ in future runs to test if a secret has changed. If 'ansible_key is not present, then a secret will not be updated
+ unless the I(force) option is set.
+ - Updates to secrets are performed by removing the secret and creating it again.
+options:
+ data:
+ description:
+ - The value of the secret. Required when state is C(present).
+ type: str
+ data_is_b64:
+ description:
+ - If set to C(true), the data is assumed to be Base64 encoded and will be
+ decoded before being used.
+ - To use binary I(data), it is better to keep it Base64 encoded and let it
+ be decoded by this option.
+ type: bool
+ default: no
+ labels:
+ description:
+ - "A map of key:value meta data, where both key and value are expected to be strings."
+ - If new meta data is provided, or existing meta data is modified, the secret will be updated by removing it and creating it again.
+ type: dict
+ force:
+ description:
+ - Use with state C(present) to always remove and recreate an existing secret.
+ - If C(true), an existing secret will be replaced, even if it has not changed.
+ type: bool
+ default: no
+ name:
+ description:
+ - The name of the secret.
+ type: str
+ required: yes
+ state:
+ description:
+ - Set to C(present), if the secret should exist, and C(absent), if it should not.
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_2_documentation
+
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.1.0"
+ - "Docker API >= 1.25"
+
+author:
+ - Chris Houseknecht (@chouseknecht)
+'''
+
+EXAMPLES = '''
+
+- name: Create secret foo (from a file on the control machine)
+ community.general.docker_secret:
+ name: foo
+ # If the file is JSON or binary, Ansible might modify it (because
+ # it is first decoded and later re-encoded). Base64-encoding the
+ # file directly after reading it prevents this to happen.
+ data: "{{ lookup('file', '/path/to/secret/file') | b64encode }}"
+ data_is_b64: true
+ state: present
+
+- name: Change the secret data
+ community.general.docker_secret:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: baz
+ one: '1'
+ state: present
+
+- name: Add a new label
+ community.general.docker_secret:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: baz
+ one: '1'
+ # Adding a new label will cause a remove/create of the secret
+ two: '2'
+ state: present
+
+- name: No change
+ community.general.docker_secret:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: baz
+ one: '1'
+ # Even though 'two' is missing, there is no change to the existing secret
+ state: present
+
+- name: Update an existing label
+ community.general.docker_secret:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: monkey # Changing a label will cause a remove/create of the secret
+ one: '1'
+ state: present
+
+- name: Force the removal/creation of the secret
+ community.general.docker_secret:
+ name: foo
+ data: Goodnight everyone!
+ force: yes
+ state: present
+
+- name: Remove secret foo
+ community.general.docker_secret:
+ name: foo
+ state: absent
+'''
+
+RETURN = '''
+secret_id:
+ description:
+ - The ID assigned by Docker to the secret object.
+ returned: success and I(state) is C(present)
+ type: str
+ sample: 'hzehrmyjigmcp2gb6nlhmjqcv'
+'''
+
+import base64
+import hashlib
+import traceback
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ DockerBaseClass,
+ compare_generic,
+ RequestException,
+)
+from ansible.module_utils._text import to_native, to_bytes
+
+
+class SecretManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(SecretManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.check_mode = self.client.check_mode
+
+ parameters = self.client.module.params
+ self.name = parameters.get('name')
+ self.state = parameters.get('state')
+ self.data = parameters.get('data')
+ if self.data is not None:
+ if parameters.get('data_is_b64'):
+ self.data = base64.b64decode(self.data)
+ else:
+ self.data = to_bytes(self.data)
+ self.labels = parameters.get('labels')
+ self.force = parameters.get('force')
+ self.data_key = None
+
+ def __call__(self):
+ if self.state == 'present':
+ self.data_key = hashlib.sha224(self.data).hexdigest()
+ self.present()
+ elif self.state == 'absent':
+ self.absent()
+
+ def get_secret(self):
+ ''' Find an existing secret. '''
+ try:
+ secrets = self.client.secrets(filters={'name': self.name})
+ except APIError as exc:
+ self.client.fail("Error accessing secret %s: %s" % (self.name, to_native(exc)))
+
+ for secret in secrets:
+ if secret['Spec']['Name'] == self.name:
+ return secret
+ return None
+
+ def create_secret(self):
+ ''' Create a new secret '''
+ secret_id = None
+ # We can't see the data after creation, so adding a label we can use for idempotency check
+ labels = {
+ 'ansible_key': self.data_key
+ }
+ if self.labels:
+ labels.update(self.labels)
+
+ try:
+ if not self.check_mode:
+ secret_id = self.client.create_secret(self.name, self.data, labels=labels)
+ except APIError as exc:
+ self.client.fail("Error creating secret: %s" % to_native(exc))
+
+ if isinstance(secret_id, dict):
+ secret_id = secret_id['ID']
+
+ return secret_id
+
+ def present(self):
+ ''' Handles state == 'present', creating or updating the secret '''
+ secret = self.get_secret()
+ if secret:
+ self.results['secret_id'] = secret['ID']
+ data_changed = False
+ attrs = secret.get('Spec', {})
+ if attrs.get('Labels', {}).get('ansible_key'):
+ if attrs['Labels']['ansible_key'] != self.data_key:
+ data_changed = True
+ else:
+ if not self.force:
+ self.client.module.warn("'ansible_key' label not found. Secret will not be changed unless the force parameter is set to 'yes'")
+ labels_changed = not compare_generic(self.labels, attrs.get('Labels'), 'allow_more_present', 'dict')
+ if data_changed or labels_changed or self.force:
+ # if something changed or force, delete and re-create the secret
+ self.absent()
+ secret_id = self.create_secret()
+ self.results['changed'] = True
+ self.results['secret_id'] = secret_id
+ else:
+ self.results['changed'] = True
+ self.results['secret_id'] = self.create_secret()
+
+ def absent(self):
+ ''' Handles state == 'absent', removing the secret '''
+ secret = self.get_secret()
+ if secret:
+ try:
+ if not self.check_mode:
+ self.client.remove_secret(secret['ID'])
+ except APIError as exc:
+ self.client.fail("Error removing secret %s: %s" % (self.name, to_native(exc)))
+ self.results['changed'] = True
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ data=dict(type='str', no_log=True),
+ data_is_b64=dict(type='bool', default=False),
+ labels=dict(type='dict'),
+ force=dict(type='bool', default=False)
+ )
+
+ required_if = [
+ ('state', 'present', ['data'])
+ ]
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=required_if,
+ min_docker_version='2.1.0',
+ min_docker_api_version='1.25',
+ )
+
+ try:
+ results = dict(
+ changed=False,
+ secret_id=''
+ )
+
+ SecretManager(client, results)()
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_service.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_service.py
new file mode 100644
index 00000000..96f89e6c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_service.py
@@ -0,0 +1,1155 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: docker_compose
+
+short_description: Manage multi-container Docker applications with Docker Compose.
+
+
+author: "Chris Houseknecht (@chouseknecht)"
+
+description:
+ - Uses Docker Compose to start, shutdown and scale services.
+ - Works with compose versions 1 and 2.
+ - Configuration can be read from a C(docker-compose.yml) or C(docker-compose.yaml) file or inline using the I(definition) option.
+ - See the examples for more details.
+ - Supports check mode.
+ - This module was called C(docker_service) before Ansible 2.8. The usage did not change.
+
+options:
+ project_src:
+ description:
+ - Path to a directory containing a C(docker-compose.yml) or C(docker-compose.yaml) file.
+ - Mutually exclusive with I(definition).
+ - Required when no I(definition) is provided.
+ type: path
+ project_name:
+ description:
+ - Provide a project name. If not provided, the project name is taken from the basename of I(project_src).
+ - Required when I(definition) is provided.
+ type: str
+ files:
+ description:
+ - List of Compose file names relative to I(project_src). Overrides C(docker-compose.yml) or C(docker-compose.yaml).
+ - Files are loaded and merged in the order given.
+ type: list
+ elements: path
+ state:
+ description:
+ - Desired state of the project.
+ - Specifying C(present) is the same as running C(docker-compose up) resp. C(docker-compose stop) (with I(stopped)) resp. C(docker-compose restart)
+ (with I(restarted)).
+ - Specifying C(absent) is the same as running C(docker-compose down).
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+ services:
+ description:
+ - When I(state) is C(present) run C(docker-compose up) resp. C(docker-compose stop) (with I(stopped)) resp. C(docker-compose restart) (with I(restarted))
+ on a subset of services.
+ - If empty, which is the default, the operation will be performed on all services defined in the Compose file (or inline I(definition)).
+ type: list
+ elements: str
+ scale:
+ description:
+ - When I(state) is C(present) scale services. Provide a dictionary of key/value pairs where the key
+ is the name of the service and the value is an integer count for the number of containers.
+ type: dict
+ dependencies:
+ description:
+ - When I(state) is C(present) specify whether or not to include linked services.
+ type: bool
+ default: yes
+ definition:
+ description:
+ - Compose file describing one or more services, networks and volumes.
+ - Mutually exclusive with I(project_src) and I(files).
+ type: dict
+ hostname_check:
+ description:
+ - Whether or not to check the Docker daemon's hostname against the name provided in the client certificate.
+ type: bool
+ default: no
+ recreate:
+ description:
+ - By default containers will be recreated when their configuration differs from the service definition.
+ - Setting to C(never) ignores configuration differences and leaves existing containers unchanged.
+ - Setting to C(always) forces recreation of all existing containers.
+ type: str
+ default: smart
+ choices:
+ - always
+ - never
+ - smart
+ build:
+ description:
+ - Use with I(state) C(present) to always build images prior to starting the application.
+ - Same as running C(docker-compose build) with the pull option.
+ - Images will only be rebuilt if Docker detects a change in the Dockerfile or build directory contents.
+ - Use the I(nocache) option to ignore the image cache when performing the build.
+ - If an existing image is replaced, services using the image will be recreated unless I(recreate) is C(never).
+ type: bool
+ default: no
+ pull:
+ description:
+ - Use with I(state) C(present) to always pull images prior to starting the application.
+ - Same as running C(docker-compose pull).
+ - When a new image is pulled, services using the image will be recreated unless I(recreate) is C(never).
+ type: bool
+ default: no
+ nocache:
+ description:
+ - Use with the I(build) option to ignore the cache during the image build process.
+ type: bool
+ default: no
+ remove_images:
+ description:
+ - Use with I(state) C(absent) to remove all images or only local images.
+ type: str
+ choices:
+ - 'all'
+ - 'local'
+ remove_volumes:
+ description:
+ - Use with I(state) C(absent) to remove data volumes.
+ type: bool
+ default: no
+ stopped:
+ description:
+ - Use with I(state) C(present) to stop all containers defined in the Compose file.
+ - If I(services) is defined, only the containers listed there will be stopped.
+ type: bool
+ default: no
+ restarted:
+ description:
+ - Use with I(state) C(present) to restart all containers defined in the Compose file.
+ - If I(services) is defined, only the containers listed there will be restarted.
+ type: bool
+ default: no
+ remove_orphans:
+ description:
+ - Remove containers for services not defined in the Compose file.
+ type: bool
+ default: no
+ timeout:
+ description:
+ - timeout in seconds for container shutdown when attached or when containers are already running.
+ type: int
+ default: 10
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "docker-compose >= 1.7.0"
+ - "Docker API >= 1.20"
+ - "PyYAML >= 3.11"
+'''
+
+EXAMPLES = '''
+# Examples use the django example at https://docs.docker.com/compose/django. Follow it to create the
+# flask directory
+
+- name: Run using a project directory
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: Tear down existing services
+ community.general.docker_compose:
+ project_src: flask
+ state: absent
+
+ - name: Create and start services
+ community.general.docker_compose:
+ project_src: flask
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - name: Run `docker-compose up` again
+ community.general.docker_compose:
+ project_src: flask
+ build: no
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that: "not output.changed "
+
+ - name: Stop all services
+ community.general.docker_compose:
+ project_src: flask
+ build: no
+ stopped: yes
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that:
+ - "not web.flask_web_1.state.running"
+ - "not db.flask_db_1.state.running"
+
+ - name: Restart services
+ community.general.docker_compose:
+ project_src: flask
+ build: no
+ restarted: yes
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that:
+ - "web.flask_web_1.state.running"
+ - "db.flask_db_1.state.running"
+
+- name: Scale the web service to 2
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - community.general.docker_compose:
+ project_src: flask
+ scale:
+ web: 2
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+- name: Run with inline v2 compose
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - community.general.docker_compose:
+ project_src: flask
+ state: absent
+
+ - community.general.docker_compose:
+ project_name: flask
+ definition:
+ version: '2'
+ services:
+ db:
+ image: postgres
+ web:
+ build: "{{ playbook_dir }}/flask"
+ command: "python manage.py runserver 0.0.0.0:8000"
+ volumes:
+ - "{{ playbook_dir }}/flask:/code"
+ ports:
+ - "8000:8000"
+ depends_on:
+ - db
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that:
+ - "web.flask_web_1.state.running"
+ - "db.flask_db_1.state.running"
+
+- name: Run with inline v1 compose
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - community.general.docker_compose:
+ project_src: flask
+ state: absent
+
+ - community.general.docker_compose:
+ project_name: flask
+ definition:
+ db:
+ image: postgres
+ web:
+ build: "{{ playbook_dir }}/flask"
+ command: "python manage.py runserver 0.0.0.0:8000"
+ volumes:
+ - "{{ playbook_dir }}/flask:/code"
+ ports:
+ - "8000:8000"
+ links:
+ - db
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that:
+ - "web.flask_web_1.state.running"
+ - "db.flask_db_1.state.running"
+'''
+
+RETURN = '''
+services:
+ description:
+ - A dictionary mapping the service's name to a dictionary of containers.
+ - Note that facts are part of the registered vars since Ansible 2.8. For compatibility reasons, the facts
+ are also accessible directly. The service's name is the variable with which the container dictionary
+ can be accessed. Note that the returned facts will be removed in community.general 2.0.0.
+ returned: success
+ type: complex
+ contains:
+ container_name:
+ description: Name of the container. Format is C(project_service_#).
+ returned: success
+ type: complex
+ contains:
+ cmd:
+ description: One or more commands to be executed in the container.
+ returned: success
+ type: list
+ elements: str
+ example: ["postgres"]
+ image:
+ description: Name of the image from which the container was built.
+ returned: success
+ type: str
+ example: postgres
+ labels:
+ description: Meta data assigned to the container.
+ returned: success
+ type: dict
+ example: {...}
+ networks:
+ description: Contains a dictionary for each network to which the container is a member.
+ returned: success
+ type: list
+ elements: dict
+ contains:
+ IPAddress:
+ description: The IP address assigned to the container.
+ returned: success
+ type: str
+ example: 172.17.0.2
+ IPPrefixLen:
+ description: Number of bits used by the subnet.
+ returned: success
+ type: int
+ example: 16
+ aliases:
+ description: Aliases assigned to the container by the network.
+ returned: success
+ type: list
+ elements: str
+ example: ['db']
+ globalIPv6:
+ description: IPv6 address assigned to the container.
+ returned: success
+ type: str
+ example: ''
+ globalIPv6PrefixLen:
+ description: IPv6 subnet length.
+ returned: success
+ type: int
+ example: 0
+ links:
+ description: List of container names to which this container is linked.
+ returned: success
+ type: list
+ elements: str
+ example: null
+ macAddress:
+ description: Mac Address assigned to the virtual NIC.
+ returned: success
+ type: str
+ example: "02:42:ac:11:00:02"
+ state:
+ description: Information regarding the current disposition of the container.
+ returned: success
+ type: dict
+ contains:
+ running:
+ description: Whether or not the container is up with a running process.
+ returned: success
+ type: bool
+ example: true
+ status:
+ description: Description of the running state.
+ returned: success
+ type: str
+ example: running
+
+actions:
+ description: Provides the actions to be taken on each service as determined by compose.
+ returned: when in check mode or I(debug) is C(yes)
+ type: complex
+ contains:
+ service_name:
+ description: Name of the service.
+ returned: always
+ type: complex
+ contains:
+ pulled_image:
+ description: Provides image details when a new image is pulled for the service.
+ returned: on image pull
+ type: complex
+ contains:
+ name:
+ description: name of the image
+ returned: always
+ type: str
+ id:
+ description: image hash
+ returned: always
+ type: str
+ built_image:
+ description: Provides image details when a new image is built for the service.
+ returned: on image build
+ type: complex
+ contains:
+ name:
+ description: name of the image
+ returned: always
+ type: str
+ id:
+ description: image hash
+ returned: always
+ type: str
+
+ action:
+ description: A descriptive name of the action to be performed on the service's containers.
+ returned: always
+ type: list
+ elements: str
+ contains:
+ id:
+ description: the container's long ID
+ returned: always
+ type: str
+ name:
+ description: the container's name
+ returned: always
+ type: str
+ short_id:
+ description: the container's short ID
+ returned: always
+ type: str
+'''
+
+import os
+import re
+import sys
+import tempfile
+import traceback
+from contextlib import contextmanager
+from distutils.version import LooseVersion
+
+try:
+ import yaml
+ HAS_YAML = True
+ HAS_YAML_EXC = None
+except ImportError as dummy:
+ HAS_YAML = False
+ HAS_YAML_EXC = traceback.format_exc()
+
+try:
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+try:
+ from compose import __version__ as compose_version
+ from compose.cli.command import project_from_options
+ from compose.service import NoSuchImageError
+ from compose.cli.main import convergence_strategy_from_opts, build_action_from_opts, image_type_from_opt
+ from compose.const import DEFAULT_TIMEOUT, LABEL_SERVICE, LABEL_PROJECT, LABEL_ONE_OFF
+ HAS_COMPOSE = True
+ HAS_COMPOSE_EXC = None
+ MINIMUM_COMPOSE_VERSION = '1.7.0'
+except ImportError as dummy:
+ HAS_COMPOSE = False
+ HAS_COMPOSE_EXC = traceback.format_exc()
+ DEFAULT_TIMEOUT = 10
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ DockerBaseClass,
+ RequestException,
+)
+
+
+AUTH_PARAM_MAPPING = {
+ u'docker_host': u'--host',
+ u'tls': u'--tls',
+ u'cacert_path': u'--tlscacert',
+ u'cert_path': u'--tlscert',
+ u'key_path': u'--tlskey',
+ u'tls_verify': u'--tlsverify'
+}
+
+
+@contextmanager
+def stdout_redirector(path_name):
+ old_stdout = sys.stdout
+ fd = open(path_name, 'w')
+ sys.stdout = fd
+ try:
+ yield
+ finally:
+ sys.stdout = old_stdout
+
+
+@contextmanager
+def stderr_redirector(path_name):
+ old_fh = sys.stderr
+ fd = open(path_name, 'w')
+ sys.stderr = fd
+ try:
+ yield
+ finally:
+ sys.stderr = old_fh
+
+
+def make_redirection_tempfiles():
+ dummy, out_redir_name = tempfile.mkstemp(prefix="ansible")
+ dummy, err_redir_name = tempfile.mkstemp(prefix="ansible")
+ return (out_redir_name, err_redir_name)
+
+
+def cleanup_redirection_tempfiles(out_name, err_name):
+ for i in [out_name, err_name]:
+ os.remove(i)
+
+
+def get_redirected_output(path_name):
+ output = []
+ with open(path_name, 'r') as fd:
+ for line in fd:
+ # strip terminal format/color chars
+ new_line = re.sub(r'\x1b\[.+m', '', line)
+ output.append(new_line)
+ os.remove(path_name)
+ return output
+
+
+def attempt_extract_errors(exc_str, stdout, stderr):
+ errors = [l.strip() for l in stderr if l.strip().startswith('ERROR:')]
+ errors.extend([l.strip() for l in stdout if l.strip().startswith('ERROR:')])
+
+ warnings = [l.strip() for l in stderr if l.strip().startswith('WARNING:')]
+ warnings.extend([l.strip() for l in stdout if l.strip().startswith('WARNING:')])
+
+ # assume either the exception body (if present) or the last warning was the 'most'
+ # fatal.
+
+ if exc_str.strip():
+ msg = exc_str.strip()
+ elif errors:
+ msg = errors[-1].encode('utf-8')
+ else:
+ msg = 'unknown cause'
+
+ return {
+ 'warnings': [w.encode('utf-8') for w in warnings],
+ 'errors': [e.encode('utf-8') for e in errors],
+ 'msg': msg,
+ 'module_stderr': ''.join(stderr),
+ 'module_stdout': ''.join(stdout)
+ }
+
+
+def get_failure_info(exc, out_name, err_name=None, msg_format='%s'):
+ if err_name is None:
+ stderr = []
+ else:
+ stderr = get_redirected_output(err_name)
+ stdout = get_redirected_output(out_name)
+
+ reason = attempt_extract_errors(str(exc), stdout, stderr)
+ reason['msg'] = msg_format % reason['msg']
+ return reason
+
+
+class ContainerManager(DockerBaseClass):
+
+ def __init__(self, client):
+
+ super(ContainerManager, self).__init__()
+
+ self.client = client
+ self.project_src = None
+ self.files = None
+ self.project_name = None
+ self.state = None
+ self.definition = None
+ self.hostname_check = None
+ self.timeout = None
+ self.remove_images = None
+ self.remove_orphans = None
+ self.remove_volumes = None
+ self.stopped = None
+ self.restarted = None
+ self.recreate = None
+ self.build = None
+ self.dependencies = None
+ self.services = None
+ self.scale = None
+ self.debug = None
+ self.pull = None
+ self.nocache = None
+
+ for key, value in client.module.params.items():
+ setattr(self, key, value)
+
+ self.check_mode = client.check_mode
+
+ if not self.debug:
+ self.debug = client.module._debug
+
+ self.options = dict()
+ self.options.update(self._get_auth_options())
+ self.options[u'--skip-hostname-check'] = (not self.hostname_check)
+
+ if self.project_name:
+ self.options[u'--project-name'] = self.project_name
+
+ if self.files:
+ self.options[u'--file'] = self.files
+
+ if not HAS_COMPOSE:
+ self.client.fail("Unable to load docker-compose. Try `pip install docker-compose`. Error: %s" %
+ HAS_COMPOSE_EXC)
+
+ if LooseVersion(compose_version) < LooseVersion(MINIMUM_COMPOSE_VERSION):
+ self.client.fail("Found docker-compose version %s. Minimum required version is %s. "
+ "Upgrade docker-compose to a min version of %s." %
+ (compose_version, MINIMUM_COMPOSE_VERSION, MINIMUM_COMPOSE_VERSION))
+
+ if self.restarted and self.stopped:
+ self.client.fail("Cannot use restarted and stopped at the same time.")
+
+ self.log("options: ")
+ self.log(self.options, pretty_print=True)
+
+ if self.definition:
+ if not HAS_YAML:
+ self.client.fail("Unable to load yaml. Try `pip install PyYAML`. Error: %s" % HAS_YAML_EXC)
+
+ if not self.project_name:
+ self.client.fail("Parameter error - project_name required when providing definition.")
+
+ self.project_src = tempfile.mkdtemp(prefix="ansible")
+ compose_file = os.path.join(self.project_src, "docker-compose.yml")
+ try:
+ self.log('writing: ')
+ self.log(yaml.dump(self.definition, default_flow_style=False))
+ with open(compose_file, 'w') as f:
+ f.write(yaml.dump(self.definition, default_flow_style=False))
+ except Exception as exc:
+ self.client.fail("Error writing to %s - %s" % (compose_file, str(exc)))
+ else:
+ if not self.project_src:
+ self.client.fail("Parameter error - project_src required.")
+
+ try:
+ self.log("project_src: %s" % self.project_src)
+ self.project = project_from_options(self.project_src, self.options)
+ except Exception as exc:
+ self.client.fail("Configuration error - %s" % str(exc))
+
+ def exec_module(self):
+ result = dict()
+
+ if self.state == 'present':
+ result = self.cmd_up()
+ elif self.state == 'absent':
+ result = self.cmd_down()
+
+ if self.definition:
+ compose_file = os.path.join(self.project_src, "docker-compose.yml")
+ self.log("removing %s" % compose_file)
+ os.remove(compose_file)
+ self.log("removing %s" % self.project_src)
+ os.rmdir(self.project_src)
+
+ if not self.check_mode and not self.debug and result.get('actions'):
+ result.pop('actions')
+
+ return result
+
+ def _get_auth_options(self):
+ options = dict()
+ for key, value in self.client.auth_params.items():
+ if value is not None:
+ option = AUTH_PARAM_MAPPING.get(key)
+ if option:
+ options[option] = value
+ return options
+
+ def cmd_up(self):
+
+ start_deps = self.dependencies
+ service_names = self.services
+ detached = True
+ result = dict(changed=False, actions=[], ansible_facts=dict(), services=dict())
+
+ up_options = {
+ u'--no-recreate': False,
+ u'--build': False,
+ u'--no-build': False,
+ u'--no-deps': False,
+ u'--force-recreate': False,
+ }
+
+ if self.recreate == 'never':
+ up_options[u'--no-recreate'] = True
+ elif self.recreate == 'always':
+ up_options[u'--force-recreate'] = True
+
+ if self.remove_orphans:
+ up_options[u'--remove-orphans'] = True
+
+ converge = convergence_strategy_from_opts(up_options)
+ self.log("convergence strategy: %s" % converge)
+
+ if self.pull:
+ pull_output = self.cmd_pull()
+ result['changed'] = pull_output['changed']
+ result['actions'] += pull_output['actions']
+
+ if self.build:
+ build_output = self.cmd_build()
+ result['changed'] = build_output['changed']
+ result['actions'] += build_output['actions']
+
+ if self.remove_orphans:
+ containers = self.client.containers(
+ filters={
+ 'label': [
+ '{0}={1}'.format(LABEL_PROJECT, self.project.name),
+ '{0}={1}'.format(LABEL_ONE_OFF, "False")
+ ],
+ }
+ )
+
+ orphans = []
+ for container in containers:
+ service_name = container.get('Labels', {}).get(LABEL_SERVICE)
+ if service_name not in self.project.service_names:
+ orphans.append(service_name)
+
+ if orphans:
+ result['changed'] = True
+
+ for service in self.project.services:
+ if not service_names or service.name in service_names:
+ plan = service.convergence_plan(strategy=converge)
+ if plan.action != 'noop':
+ result['changed'] = True
+ result_action = dict(service=service.name)
+ result_action[plan.action] = []
+ for container in plan.containers:
+ result_action[plan.action].append(dict(
+ id=container.id,
+ name=container.name,
+ short_id=container.short_id,
+ ))
+ result['actions'].append(result_action)
+
+ if not self.check_mode and result['changed'] and not self.stopped:
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ do_build = build_action_from_opts(up_options)
+ self.log('Setting do_build to %s' % do_build)
+ self.project.up(
+ service_names=service_names,
+ start_deps=start_deps,
+ strategy=converge,
+ do_build=do_build,
+ detached=detached,
+ remove_orphans=self.remove_orphans,
+ timeout=self.timeout)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error starting project %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+
+ if self.stopped:
+ stop_output = self.cmd_stop(service_names)
+ result['changed'] = stop_output['changed']
+ result['actions'] += stop_output['actions']
+
+ if self.restarted:
+ restart_output = self.cmd_restart(service_names)
+ result['changed'] = restart_output['changed']
+ result['actions'] += restart_output['actions']
+
+ if self.scale:
+ scale_output = self.cmd_scale()
+ result['changed'] = scale_output['changed']
+ result['actions'] += scale_output['actions']
+
+ for service in self.project.services:
+ service_facts = dict()
+ result['ansible_facts'][service.name] = service_facts
+ result['services'][service.name] = service_facts
+ for container in service.containers(stopped=True):
+ inspection = container.inspect()
+ # pare down the inspection data to the most useful bits
+ facts = dict(
+ cmd=[],
+ labels=dict(),
+ image=None,
+ state=dict(
+ running=None,
+ status=None
+ ),
+ networks=dict()
+ )
+ if inspection['Config'].get('Cmd', None) is not None:
+ facts['cmd'] = inspection['Config']['Cmd']
+ if inspection['Config'].get('Labels', None) is not None:
+ facts['labels'] = inspection['Config']['Labels']
+ if inspection['Config'].get('Image', None) is not None:
+ facts['image'] = inspection['Config']['Image']
+ if inspection['State'].get('Running', None) is not None:
+ facts['state']['running'] = inspection['State']['Running']
+ if inspection['State'].get('Status', None) is not None:
+ facts['state']['status'] = inspection['State']['Status']
+
+ if inspection.get('NetworkSettings') and inspection['NetworkSettings'].get('Networks'):
+ networks = inspection['NetworkSettings']['Networks']
+ for key in networks:
+ facts['networks'][key] = dict(
+ aliases=[],
+ globalIPv6=None,
+ globalIPv6PrefixLen=0,
+ IPAddress=None,
+ IPPrefixLen=0,
+ links=None,
+ macAddress=None,
+ )
+ if networks[key].get('Aliases', None) is not None:
+ facts['networks'][key]['aliases'] = networks[key]['Aliases']
+ if networks[key].get('GlobalIPv6Address', None) is not None:
+ facts['networks'][key]['globalIPv6'] = networks[key]['GlobalIPv6Address']
+ if networks[key].get('GlobalIPv6PrefixLen', None) is not None:
+ facts['networks'][key]['globalIPv6PrefixLen'] = networks[key]['GlobalIPv6PrefixLen']
+ if networks[key].get('IPAddress', None) is not None:
+ facts['networks'][key]['IPAddress'] = networks[key]['IPAddress']
+ if networks[key].get('IPPrefixLen', None) is not None:
+ facts['networks'][key]['IPPrefixLen'] = networks[key]['IPPrefixLen']
+ if networks[key].get('Links', None) is not None:
+ facts['networks'][key]['links'] = networks[key]['Links']
+ if networks[key].get('MacAddress', None) is not None:
+ facts['networks'][key]['macAddress'] = networks[key]['MacAddress']
+
+ service_facts[container.name] = facts
+
+ return result
+
+ def cmd_pull(self):
+ result = dict(
+ changed=False,
+ actions=[],
+ )
+
+ if not self.check_mode:
+ for service in self.project.get_services(self.services, include_deps=False):
+ if 'image' not in service.options:
+ continue
+
+ self.log('Pulling image for service %s' % service.name)
+ # store the existing image ID
+ old_image_id = ''
+ try:
+ image = service.image()
+ if image and image.get('Id'):
+ old_image_id = image['Id']
+ except NoSuchImageError:
+ pass
+ except Exception as exc:
+ self.client.fail("Error: service image lookup failed - %s" % str(exc))
+
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ # pull the image
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ service.pull(ignore_pull_failures=False)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error: pull failed with %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+
+ # store the new image ID
+ new_image_id = ''
+ try:
+ image = service.image()
+ if image and image.get('Id'):
+ new_image_id = image['Id']
+ except NoSuchImageError as exc:
+ self.client.fail("Error: service image lookup failed after pull - %s" % str(exc))
+
+ if new_image_id != old_image_id:
+ # if a new image was pulled
+ result['changed'] = True
+ result['actions'].append(dict(
+ service=service.name,
+ pulled_image=dict(
+ name=service.image_name,
+ id=new_image_id
+ )
+ ))
+ return result
+
+ def cmd_build(self):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+ if not self.check_mode:
+ for service in self.project.get_services(self.services, include_deps=False):
+ if service.can_be_built():
+ self.log('Building image for service %s' % service.name)
+ # store the existing image ID
+ old_image_id = ''
+ try:
+ image = service.image()
+ if image and image.get('Id'):
+ old_image_id = image['Id']
+ except NoSuchImageError:
+ pass
+ except Exception as exc:
+ self.client.fail("Error: service image lookup failed - %s" % str(exc))
+
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ # build the image
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ new_image_id = service.build(pull=self.pull, no_cache=self.nocache)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error: build failed with %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+
+ if new_image_id not in old_image_id:
+ # if a new image was built
+ result['changed'] = True
+ result['actions'].append(dict(
+ service=service.name,
+ built_image=dict(
+ name=service.image_name,
+ id=new_image_id
+ )
+ ))
+ return result
+
+ def cmd_down(self):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+ for service in self.project.services:
+ containers = service.containers(stopped=True)
+ if len(containers):
+ result['changed'] = True
+ result['actions'].append(dict(
+ service=service.name,
+ deleted=[container.name for container in containers]
+ ))
+ if not self.check_mode and result['changed']:
+ image_type = image_type_from_opt('--rmi', self.remove_images)
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ self.project.down(image_type, self.remove_volumes, self.remove_orphans)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error stopping project - %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+ return result
+
+ def cmd_stop(self, service_names):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+ for service in self.project.services:
+ if not service_names or service.name in service_names:
+ service_res = dict(
+ service=service.name,
+ stop=[]
+ )
+ for container in service.containers(stopped=False):
+ result['changed'] = True
+ service_res['stop'].append(dict(
+ id=container.id,
+ name=container.name,
+ short_id=container.short_id
+ ))
+ result['actions'].append(service_res)
+ if not self.check_mode and result['changed']:
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ self.project.stop(service_names=service_names, timeout=self.timeout)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error stopping project %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+ return result
+
+ def cmd_restart(self, service_names):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+
+ for service in self.project.services:
+ if not service_names or service.name in service_names:
+ service_res = dict(
+ service=service.name,
+ restart=[]
+ )
+ for container in service.containers(stopped=True):
+ result['changed'] = True
+ service_res['restart'].append(dict(
+ id=container.id,
+ name=container.name,
+ short_id=container.short_id
+ ))
+ result['actions'].append(service_res)
+
+ if not self.check_mode and result['changed']:
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ self.project.restart(service_names=service_names, timeout=self.timeout)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error restarting project %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+ return result
+
+ def cmd_scale(self):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+ for service in self.project.services:
+ if service.name in self.scale:
+ service_res = dict(
+ service=service.name,
+ scale=0
+ )
+ containers = service.containers(stopped=True)
+ scale = self.parse_scale(service.name)
+ if len(containers) != scale:
+ result['changed'] = True
+ service_res['scale'] = scale - len(containers)
+ if not self.check_mode:
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ service.scale(scale)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error scaling {0} - %s".format(service.name))
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+ result['actions'].append(service_res)
+ return result
+
+ def parse_scale(self, service_name):
+ try:
+ return int(self.scale[service_name])
+ except ValueError:
+ self.client.fail("Error scaling %s - expected int, got %s",
+ service_name, str(type(self.scale[service_name])))
+
+
+def main():
+ argument_spec = dict(
+ project_src=dict(type='path'),
+ project_name=dict(type='str',),
+ files=dict(type='list', elements='path'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ definition=dict(type='dict'),
+ hostname_check=dict(type='bool', default=False),
+ recreate=dict(type='str', default='smart', choices=['always', 'never', 'smart']),
+ build=dict(type='bool', default=False),
+ remove_images=dict(type='str', choices=['all', 'local']),
+ remove_volumes=dict(type='bool', default=False),
+ remove_orphans=dict(type='bool', default=False),
+ stopped=dict(type='bool', default=False),
+ restarted=dict(type='bool', default=False),
+ scale=dict(type='dict'),
+ services=dict(type='list', elements='str'),
+ dependencies=dict(type='bool', default=True),
+ pull=dict(type='bool', default=False),
+ nocache=dict(type='bool', default=False),
+ debug=dict(type='bool', default=False),
+ timeout=dict(type='int', default=DEFAULT_TIMEOUT)
+ )
+
+ mutually_exclusive = [
+ ('definition', 'project_src'),
+ ('definition', 'files')
+ ]
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True,
+ min_docker_api_version='1.20',
+ )
+ if client.module._name in ('docker_service', 'community.general.docker_service'):
+ client.module.deprecate("The 'docker_service' module has been renamed to 'docker_compose'.",
+ version='2.0.0', collection_name='community.general') # was Ansible 2.12
+
+ try:
+ result = ContainerManager(client).exec_module()
+ client.module.exit_json(**result)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_stack.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_stack.py
new file mode 100644
index 00000000..d3089e20
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_stack.py
@@ -0,0 +1,308 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018 Dario Zanzico (git@dariozanzico.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_stack
+author: "Dario Zanzico (@dariko)"
+short_description: docker stack module
+description:
+ - Manage docker stacks using the 'docker stack' command
+ on the target node (see examples).
+options:
+ name:
+ description:
+ - Stack name
+ type: str
+ required: yes
+ state:
+ description:
+ - Service state.
+ type: str
+ default: "present"
+ choices:
+ - present
+ - absent
+ compose:
+ description:
+ - List of compose definitions. Any element may be a string
+ referring to the path of the compose file on the target host
+ or the YAML contents of a compose file nested as dictionary.
+ type: list
+ elements: raw
+ default: []
+ prune:
+ description:
+ - If true will add the C(--prune) option to the C(docker stack deploy) command.
+ This will have docker remove the services not present in the
+ current stack definition.
+ type: bool
+ default: no
+ with_registry_auth:
+ description:
+ - If true will add the C(--with-registry-auth) option to the C(docker stack deploy) command.
+ This will have docker send registry authentication details to Swarm agents.
+ type: bool
+ default: no
+ resolve_image:
+ description:
+ - If set will add the C(--resolve-image) option to the C(docker stack deploy) command.
+ This will have docker query the registry to resolve image digest and
+ supported platforms. If not set, docker use "always" by default.
+ type: str
+ choices: ["always", "changed", "never"]
+ absent_retries:
+ description:
+ - If C(>0) and I(state) is C(absent) the module will retry up to
+ I(absent_retries) times to delete the stack until all the
+ resources have been effectively deleted.
+ If the last try still reports the stack as not completely
+ removed the module will fail.
+ type: int
+ default: 0
+ absent_retries_interval:
+ description:
+ - Interval in seconds between consecutive I(absent_retries).
+ type: int
+ default: 1
+
+requirements:
+ - jsondiff
+ - pyyaml
+
+notes:
+ - Return values I(out) and I(err) have been deprecated and will be removed in community.general 3.0.0. Use I(stdout) and I(stderr) instead.
+'''
+
+RETURN = '''
+stack_spec_diff:
+ description: |
+ dictionary containing the differences between the 'Spec' field
+ of the stack services before and after applying the new stack
+ definition.
+ sample: >
+ "stack_spec_diff":
+ {'test_stack_test_service': {u'TaskTemplate': {u'ContainerSpec': {delete: [u'Env']}}}}
+ returned: on change
+ type: dict
+'''
+
+EXAMPLES = '''
+ - name: Deploy stack from a compose file
+ community.general.docker_stack:
+ state: present
+ name: mystack
+ compose:
+ - /opt/docker-compose.yml
+
+ - name: Deploy stack from base compose file and override the web service
+ community.general.docker_stack:
+ state: present
+ name: mystack
+ compose:
+ - /opt/docker-compose.yml
+ - version: '3'
+ services:
+ web:
+ image: nginx:latest
+ environment:
+ ENVVAR: envvar
+
+ - name: Remove stack
+ community.general.docker_stack:
+ name: mystack
+ state: absent
+'''
+
+
+import json
+import tempfile
+from ansible.module_utils.six import string_types
+from time import sleep
+
+try:
+ from jsondiff import diff as json_diff
+ HAS_JSONDIFF = True
+except ImportError:
+ HAS_JSONDIFF = False
+
+try:
+ from yaml import dump as yaml_dump
+ HAS_YAML = True
+except ImportError:
+ HAS_YAML = False
+
+from ansible.module_utils.basic import AnsibleModule, os
+
+
+def docker_stack_services(module, stack_name):
+ docker_bin = module.get_bin_path('docker', required=True)
+ rc, out, err = module.run_command([docker_bin,
+ "stack",
+ "services",
+ stack_name,
+ "--format",
+ "{{.Name}}"])
+ if err == "Nothing found in stack: %s\n" % stack_name:
+ return []
+ return out.strip().split('\n')
+
+
+def docker_service_inspect(module, service_name):
+ docker_bin = module.get_bin_path('docker', required=True)
+ rc, out, err = module.run_command([docker_bin,
+ "service",
+ "inspect",
+ service_name])
+ if rc != 0:
+ return None
+ else:
+ ret = json.loads(out)[0]['Spec']
+ return ret
+
+
+def docker_stack_deploy(module, stack_name, compose_files):
+ docker_bin = module.get_bin_path('docker', required=True)
+ command = [docker_bin, "stack", "deploy"]
+ if module.params["prune"]:
+ command += ["--prune"]
+ if module.params["with_registry_auth"]:
+ command += ["--with-registry-auth"]
+ if module.params["resolve_image"]:
+ command += ["--resolve-image",
+ module.params["resolve_image"]]
+ for compose_file in compose_files:
+ command += ["--compose-file",
+ compose_file]
+ command += [stack_name]
+ return module.run_command(command)
+
+
+def docker_stack_inspect(module, stack_name):
+ ret = {}
+ for service_name in docker_stack_services(module, stack_name):
+ ret[service_name] = docker_service_inspect(module, service_name)
+ return ret
+
+
+def docker_stack_rm(module, stack_name, retries, interval):
+ docker_bin = module.get_bin_path('docker', required=True)
+ command = [docker_bin, "stack", "rm", stack_name]
+
+ rc, out, err = module.run_command(command)
+
+ while err != "Nothing found in stack: %s\n" % stack_name and retries > 0:
+ sleep(interval)
+ retries = retries - 1
+ rc, out, err = module.run_command(command)
+ return rc, out, err
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec={
+ 'name': dict(type='str', required=True),
+ 'compose': dict(type='list', elements='raw', default=[]),
+ 'prune': dict(type='bool', default=False),
+ 'with_registry_auth': dict(type='bool', default=False),
+ 'resolve_image': dict(type='str', choices=['always', 'changed', 'never']),
+ 'state': dict(type='str', default='present', choices=['present', 'absent']),
+ 'absent_retries': dict(type='int', default=0),
+ 'absent_retries_interval': dict(type='int', default=1)
+ },
+ supports_check_mode=False
+ )
+
+ if not HAS_JSONDIFF:
+ return module.fail_json(msg="jsondiff is not installed, try 'pip install jsondiff'")
+
+ if not HAS_YAML:
+ return module.fail_json(msg="yaml is not installed, try 'pip install pyyaml'")
+
+ state = module.params['state']
+ compose = module.params['compose']
+ name = module.params['name']
+ absent_retries = module.params['absent_retries']
+ absent_retries_interval = module.params['absent_retries_interval']
+
+ if state == 'present':
+ if not compose:
+ module.fail_json(msg=("compose parameter must be a list "
+ "containing at least one element"))
+
+ compose_files = []
+ for i, compose_def in enumerate(compose):
+ if isinstance(compose_def, dict):
+ compose_file_fd, compose_file = tempfile.mkstemp()
+ module.add_cleanup_file(compose_file)
+ with os.fdopen(compose_file_fd, 'w') as stack_file:
+ compose_files.append(compose_file)
+ stack_file.write(yaml_dump(compose_def))
+ elif isinstance(compose_def, string_types):
+ compose_files.append(compose_def)
+ else:
+ module.fail_json(msg="compose element '%s' must be a " +
+ "string or a dictionary" % compose_def)
+
+ before_stack_services = docker_stack_inspect(module, name)
+
+ rc, out, err = docker_stack_deploy(module, name, compose_files)
+
+ after_stack_services = docker_stack_inspect(module, name)
+
+ if rc != 0:
+ module.fail_json(msg="docker stack up deploy command failed",
+ rc=rc,
+ out=out, err=err, # Deprecated
+ stdout=out, stderr=err)
+
+ before_after_differences = json_diff(before_stack_services,
+ after_stack_services)
+ for k in before_after_differences.keys():
+ if isinstance(before_after_differences[k], dict):
+ before_after_differences[k].pop('UpdatedAt', None)
+ before_after_differences[k].pop('Version', None)
+ if not list(before_after_differences[k].keys()):
+ before_after_differences.pop(k)
+
+ if not before_after_differences:
+ module.exit_json(
+ changed=False,
+ rc=rc,
+ stdout=out,
+ stderr=err)
+ else:
+ module.exit_json(
+ changed=True,
+ rc=rc,
+ stdout=out,
+ stderr=err,
+ stack_spec_diff=json_diff(before_stack_services,
+ after_stack_services,
+ dump=True))
+
+ else:
+ if docker_stack_services(module, name):
+ rc, out, err = docker_stack_rm(module, name, absent_retries, absent_retries_interval)
+ if rc != 0:
+ module.fail_json(msg="'docker stack down' command failed",
+ rc=rc,
+ out=out, err=err, # Deprecated
+ stdout=out, stderr=err)
+ else:
+ module.exit_json(changed=True,
+ msg=out, rc=rc,
+ err=err, # Deprecated
+ stdout=out, stderr=err)
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_stack_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_stack_info.py
new file mode 100644
index 00000000..74a3648d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_stack_info.py
@@ -0,0 +1,84 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2020 Jose Angel Munoz (@imjoseangel)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: docker_stack_info
+author: "Jose Angel Munoz (@imjoseangel)"
+short_description: Return information on a docker stack
+description:
+ - Retrieve information on docker stacks using the C(docker stack) command
+ on the target node (see examples).
+version_added: "1.0.0"
+'''
+
+RETURN = '''
+results:
+ description: |
+ List of dictionaries containing the list of stacks or tasks associated
+ to a stack name.
+ sample: >
+ "results": [{"name":"grafana","namespace":"default","orchestrator":"Kubernetes","services":"2"}]
+ returned: always
+ type: list
+'''
+
+EXAMPLES = '''
+ - name: Shows stack info
+ community.general.docker_stack_info:
+ register: result
+
+ - name: Show results
+ ansible.builtin.debug:
+ var: result.results
+'''
+
+import json
+from ansible.module_utils.basic import AnsibleModule
+
+
+def docker_stack_list(module):
+ docker_bin = module.get_bin_path('docker', required=True)
+ rc, out, err = module.run_command(
+ [docker_bin, "stack", "ls", "--format={{json .}}"])
+
+ return rc, out.strip(), err.strip()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec={
+ },
+ supports_check_mode=False
+ )
+
+ rc, out, err = docker_stack_list(module)
+
+ if rc != 0:
+ module.fail_json(msg="Error running docker stack. {0}".format(err),
+ rc=rc, stdout=out, stderr=err)
+ else:
+ if out:
+ ret = list(
+ json.loads(outitem)
+ for outitem in out.splitlines())
+
+ else:
+ ret = []
+
+ module.exit_json(changed=False,
+ rc=rc,
+ stdout=out,
+ stderr=err,
+ results=ret)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_stack_task_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_stack_task_info.py
new file mode 100644
index 00000000..966a4266
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_stack_task_info.py
@@ -0,0 +1,95 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2020 Jose Angel Munoz (@imjoseangel)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: docker_stack_task_info
+author: "Jose Angel Munoz (@imjoseangel)"
+short_description: Return information of the tasks on a docker stack
+description:
+ - Retrieve information on docker stacks tasks using the C(docker stack) command
+ on the target node (see examples).
+options:
+ name:
+ description:
+ - Stack name.
+ type: str
+ required: yes
+version_added: "1.1.0"
+'''
+
+RETURN = '''
+results:
+ description: |
+ List of dictionaries containing the list of tasks associated
+ to a stack name.
+ sample: >
+ [{"CurrentState":"Running","DesiredState":"Running","Error":"","ID":"7wqv6m02ugkw","Image":"busybox","Name":"test_stack.1","Node":"swarm","Ports":""}]
+ returned: always
+ type: list
+ elements: dict
+'''
+
+EXAMPLES = '''
+ - name: Shows stack info
+ community.general.docker_stack_task_info:
+ name: test_stack
+ register: result
+
+ - name: Show results
+ ansible.builtin.debug:
+ var: result.results
+'''
+
+import json
+from ansible.module_utils.basic import AnsibleModule
+
+
+def docker_stack_task(module, stack_name):
+ docker_bin = module.get_bin_path('docker', required=True)
+ rc, out, err = module.run_command(
+ [docker_bin, "stack", "ps", stack_name, "--format={{json .}}"])
+
+ return rc, out.strip(), err.strip()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec={
+ 'name': dict(type='str', required=True)
+ },
+ supports_check_mode=False
+ )
+
+ name = module.params['name']
+
+ rc, out, err = docker_stack_task(module, name)
+
+ if rc != 0:
+ module.fail_json(msg="Error running docker stack. {0}".format(err),
+ rc=rc, stdout=out, stderr=err)
+ else:
+ if out:
+ ret = list(
+ json.loads(outitem)
+ for outitem in out.splitlines())
+
+ else:
+ ret = []
+
+ module.exit_json(changed=False,
+ rc=rc,
+ stdout=out,
+ stderr=err,
+ results=ret)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_swarm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_swarm.py
new file mode 100644
index 00000000..52f37643
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_swarm.py
@@ -0,0 +1,675 @@
+#!/usr/bin/python
+
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: docker_swarm
+short_description: Manage Swarm cluster
+description:
+ - Create a new Swarm cluster.
+ - Add/Remove nodes or managers to an existing cluster.
+options:
+ advertise_addr:
+ description:
+ - Externally reachable address advertised to other nodes.
+ - This can either be an address/port combination
+ in the form C(192.168.1.1:4567), or an interface followed by a
+ port number, like C(eth0:4567).
+ - If the port number is omitted,
+ the port number from the listen address is used.
+ - If I(advertise_addr) is not specified, it will be automatically
+ detected when possible.
+ - Only used when swarm is initialised or joined. Because of this it's not
+ considered for idempotency checking.
+ type: str
+ default_addr_pool:
+ description:
+ - Default address pool in CIDR format.
+ - Only used when swarm is initialised. Because of this it's not considered
+ for idempotency checking.
+ - Requires API version >= 1.39.
+ type: list
+ elements: str
+ subnet_size:
+ description:
+ - Default address pool subnet mask length.
+ - Only used when swarm is initialised. Because of this it's not considered
+ for idempotency checking.
+ - Requires API version >= 1.39.
+ type: int
+ listen_addr:
+ description:
+ - Listen address used for inter-manager communication.
+ - This can either be an address/port combination in the form
+ C(192.168.1.1:4567), or an interface followed by a port number,
+ like C(eth0:4567).
+ - If the port number is omitted, the default swarm listening port
+ is used.
+ - Only used when swarm is initialised or joined. Because of this it's not
+ considered for idempotency checking.
+ type: str
+ default: 0.0.0.0:2377
+ force:
+ description:
+ - Use with state C(present) to force creating a new Swarm, even if already part of one.
+ - Use with state C(absent) to Leave the swarm even if this node is a manager.
+ type: bool
+ default: no
+ state:
+ description:
+ - Set to C(present), to create/update a new cluster.
+ - Set to C(join), to join an existing cluster.
+ - Set to C(absent), to leave an existing cluster.
+ - Set to C(remove), to remove an absent node from the cluster.
+ Note that removing requires Docker SDK for Python >= 2.4.0.
+ - Set to C(inspect) to display swarm informations.
+ type: str
+ default: present
+ choices:
+ - present
+ - join
+ - absent
+ - remove
+ - inspect
+ node_id:
+ description:
+ - Swarm id of the node to remove.
+ - Used with I(state=remove).
+ type: str
+ join_token:
+ description:
+ - Swarm token used to join a swarm cluster.
+ - Used with I(state=join).
+ type: str
+ remote_addrs:
+ description:
+ - Remote address of one or more manager nodes of an existing Swarm to connect to.
+ - Used with I(state=join).
+ type: list
+ elements: str
+ task_history_retention_limit:
+ description:
+ - Maximum number of tasks history stored.
+ - Docker default value is C(5).
+ type: int
+ snapshot_interval:
+ description:
+ - Number of logs entries between snapshot.
+ - Docker default value is C(10000).
+ type: int
+ keep_old_snapshots:
+ description:
+ - Number of snapshots to keep beyond the current snapshot.
+ - Docker default value is C(0).
+ type: int
+ log_entries_for_slow_followers:
+ description:
+ - Number of log entries to keep around to sync up slow followers after a snapshot is created.
+ type: int
+ heartbeat_tick:
+ description:
+ - Amount of ticks (in seconds) between each heartbeat.
+ - Docker default value is C(1s).
+ type: int
+ election_tick:
+ description:
+ - Amount of ticks (in seconds) needed without a leader to trigger a new election.
+ - Docker default value is C(10s).
+ type: int
+ dispatcher_heartbeat_period:
+ description:
+ - The delay for an agent to send a heartbeat to the dispatcher.
+ - Docker default value is C(5s).
+ type: int
+ node_cert_expiry:
+ description:
+ - Automatic expiry for nodes certificates.
+ - Docker default value is C(3months).
+ type: int
+ name:
+ description:
+ - The name of the swarm.
+ type: str
+ labels:
+ description:
+ - User-defined key/value metadata.
+ - Label operations in this module apply to the docker swarm cluster.
+ Use M(community.general.docker_node) module to add/modify/remove swarm node labels.
+ - Requires API version >= 1.32.
+ type: dict
+ signing_ca_cert:
+ description:
+ - The desired signing CA certificate for all swarm node TLS leaf certificates, in PEM format.
+ - This must not be a path to a certificate, but the contents of the certificate.
+ - Requires API version >= 1.30.
+ type: str
+ signing_ca_key:
+ description:
+ - The desired signing CA key for all swarm node TLS leaf certificates, in PEM format.
+ - This must not be a path to a key, but the contents of the key.
+ - Requires API version >= 1.30.
+ type: str
+ ca_force_rotate:
+ description:
+ - An integer whose purpose is to force swarm to generate a new signing CA certificate and key,
+ if none have been specified.
+ - Docker default value is C(0).
+ - Requires API version >= 1.30.
+ type: int
+ autolock_managers:
+ description:
+ - If set, generate a key and use it to lock data stored on the managers.
+ - Docker default value is C(no).
+ - M(community.general.docker_swarm_info) can be used to retrieve the unlock key.
+ type: bool
+ rotate_worker_token:
+ description: Rotate the worker join token.
+ type: bool
+ default: no
+ rotate_manager_token:
+ description: Rotate the manager join token.
+ type: bool
+ default: no
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - Docker API >= 1.25
+author:
+ - Thierry Bouvet (@tbouvet)
+ - Piotr Wojciechowski (@WojciechowskiPiotr)
+'''
+
+EXAMPLES = '''
+
+- name: Init a new swarm with default parameters
+ community.general.docker_swarm:
+ state: present
+
+- name: Update swarm configuration
+ community.general.docker_swarm:
+ state: present
+ election_tick: 5
+
+- name: Add nodes
+ community.general.docker_swarm:
+ state: join
+ advertise_addr: 192.168.1.2
+ join_token: SWMTKN-1--xxxxx
+ remote_addrs: [ '192.168.1.1:2377' ]
+
+- name: Leave swarm for a node
+ community.general.docker_swarm:
+ state: absent
+
+- name: Remove a swarm manager
+ community.general.docker_swarm:
+ state: absent
+ force: true
+
+- name: Remove node from swarm
+ community.general.docker_swarm:
+ state: remove
+ node_id: mynode
+
+- name: Inspect swarm
+ community.general.docker_swarm:
+ state: inspect
+ register: swarm_info
+'''
+
+RETURN = '''
+swarm_facts:
+ description: Informations about swarm.
+ returned: success
+ type: dict
+ contains:
+ JoinTokens:
+ description: Tokens to connect to the Swarm.
+ returned: success
+ type: dict
+ contains:
+ Worker:
+ description: Token to create a new *worker* node
+ returned: success
+ type: str
+ example: SWMTKN-1--xxxxx
+ Manager:
+ description: Token to create a new *manager* node
+ returned: success
+ type: str
+ example: SWMTKN-1--xxxxx
+ UnlockKey:
+ description: The swarm unlock-key if I(autolock_managers) is C(true).
+ returned: on success if I(autolock_managers) is C(true)
+ and swarm is initialised, or if I(autolock_managers) has changed.
+ type: str
+ example: SWMKEY-1-xxx
+
+actions:
+ description: Provides the actions done on the swarm.
+ returned: when action failed.
+ type: list
+ elements: str
+ example: "['This cluster is already a swarm cluster']"
+
+'''
+
+import json
+import traceback
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ DockerBaseClass,
+ DifferenceTracker,
+ RequestException,
+)
+
+from ansible_collections.community.general.plugins.module_utils.docker.swarm import AnsibleDockerSwarmClient
+
+from ansible.module_utils._text import to_native
+
+
+class TaskParameters(DockerBaseClass):
+ def __init__(self):
+ super(TaskParameters, self).__init__()
+
+ self.advertise_addr = None
+ self.listen_addr = None
+ self.remote_addrs = None
+ self.join_token = None
+
+ # Spec
+ self.snapshot_interval = None
+ self.task_history_retention_limit = None
+ self.keep_old_snapshots = None
+ self.log_entries_for_slow_followers = None
+ self.heartbeat_tick = None
+ self.election_tick = None
+ self.dispatcher_heartbeat_period = None
+ self.node_cert_expiry = None
+ self.name = None
+ self.labels = None
+ self.log_driver = None
+ self.signing_ca_cert = None
+ self.signing_ca_key = None
+ self.ca_force_rotate = None
+ self.autolock_managers = None
+ self.rotate_worker_token = None
+ self.rotate_manager_token = None
+ self.default_addr_pool = None
+ self.subnet_size = None
+
+ @staticmethod
+ def from_ansible_params(client):
+ result = TaskParameters()
+ for key, value in client.module.params.items():
+ if key in result.__dict__:
+ setattr(result, key, value)
+
+ result.update_parameters(client)
+ return result
+
+ def update_from_swarm_info(self, swarm_info):
+ spec = swarm_info['Spec']
+
+ ca_config = spec.get('CAConfig') or dict()
+ if self.node_cert_expiry is None:
+ self.node_cert_expiry = ca_config.get('NodeCertExpiry')
+ if self.ca_force_rotate is None:
+ self.ca_force_rotate = ca_config.get('ForceRotate')
+
+ dispatcher = spec.get('Dispatcher') or dict()
+ if self.dispatcher_heartbeat_period is None:
+ self.dispatcher_heartbeat_period = dispatcher.get('HeartbeatPeriod')
+
+ raft = spec.get('Raft') or dict()
+ if self.snapshot_interval is None:
+ self.snapshot_interval = raft.get('SnapshotInterval')
+ if self.keep_old_snapshots is None:
+ self.keep_old_snapshots = raft.get('KeepOldSnapshots')
+ if self.heartbeat_tick is None:
+ self.heartbeat_tick = raft.get('HeartbeatTick')
+ if self.log_entries_for_slow_followers is None:
+ self.log_entries_for_slow_followers = raft.get('LogEntriesForSlowFollowers')
+ if self.election_tick is None:
+ self.election_tick = raft.get('ElectionTick')
+
+ orchestration = spec.get('Orchestration') or dict()
+ if self.task_history_retention_limit is None:
+ self.task_history_retention_limit = orchestration.get('TaskHistoryRetentionLimit')
+
+ encryption_config = spec.get('EncryptionConfig') or dict()
+ if self.autolock_managers is None:
+ self.autolock_managers = encryption_config.get('AutoLockManagers')
+
+ if self.name is None:
+ self.name = spec['Name']
+
+ if self.labels is None:
+ self.labels = spec.get('Labels') or {}
+
+ if 'LogDriver' in spec['TaskDefaults']:
+ self.log_driver = spec['TaskDefaults']['LogDriver']
+
+ def update_parameters(self, client):
+ assign = dict(
+ snapshot_interval='snapshot_interval',
+ task_history_retention_limit='task_history_retention_limit',
+ keep_old_snapshots='keep_old_snapshots',
+ log_entries_for_slow_followers='log_entries_for_slow_followers',
+ heartbeat_tick='heartbeat_tick',
+ election_tick='election_tick',
+ dispatcher_heartbeat_period='dispatcher_heartbeat_period',
+ node_cert_expiry='node_cert_expiry',
+ name='name',
+ labels='labels',
+ signing_ca_cert='signing_ca_cert',
+ signing_ca_key='signing_ca_key',
+ ca_force_rotate='ca_force_rotate',
+ autolock_managers='autolock_managers',
+ log_driver='log_driver',
+ )
+ params = dict()
+ for dest, source in assign.items():
+ if not client.option_minimal_versions[source]['supported']:
+ continue
+ value = getattr(self, source)
+ if value is not None:
+ params[dest] = value
+ self.spec = client.create_swarm_spec(**params)
+
+ def compare_to_active(self, other, client, differences):
+ for k in self.__dict__:
+ if k in ('advertise_addr', 'listen_addr', 'remote_addrs', 'join_token',
+ 'rotate_worker_token', 'rotate_manager_token', 'spec',
+ 'default_addr_pool', 'subnet_size'):
+ continue
+ if not client.option_minimal_versions[k]['supported']:
+ continue
+ value = getattr(self, k)
+ if value is None:
+ continue
+ other_value = getattr(other, k)
+ if value != other_value:
+ differences.add(k, parameter=value, active=other_value)
+ if self.rotate_worker_token:
+ differences.add('rotate_worker_token', parameter=True, active=False)
+ if self.rotate_manager_token:
+ differences.add('rotate_manager_token', parameter=True, active=False)
+ return differences
+
+
+class SwarmManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(SwarmManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.check_mode = self.client.check_mode
+ self.swarm_info = {}
+
+ self.state = client.module.params['state']
+ self.force = client.module.params['force']
+ self.node_id = client.module.params['node_id']
+
+ self.differences = DifferenceTracker()
+ self.parameters = TaskParameters.from_ansible_params(client)
+
+ self.created = False
+
+ def __call__(self):
+ choice_map = {
+ "present": self.init_swarm,
+ "join": self.join,
+ "absent": self.leave,
+ "remove": self.remove,
+ "inspect": self.inspect_swarm
+ }
+
+ if self.state == 'inspect':
+ self.client.module.deprecate(
+ "The 'inspect' state is deprecated, please use 'docker_swarm_info' to inspect swarm cluster",
+ version='2.0.0', collection_name='community.general') # was Ansible 2.12
+
+ choice_map.get(self.state)()
+
+ if self.client.module._diff or self.parameters.debug:
+ diff = dict()
+ diff['before'], diff['after'] = self.differences.get_before_after()
+ self.results['diff'] = diff
+
+ def inspect_swarm(self):
+ try:
+ data = self.client.inspect_swarm()
+ json_str = json.dumps(data, ensure_ascii=False)
+ self.swarm_info = json.loads(json_str)
+
+ self.results['changed'] = False
+ self.results['swarm_facts'] = self.swarm_info
+
+ unlock_key = self.get_unlock_key()
+ self.swarm_info.update(unlock_key)
+ except APIError:
+ return
+
+ def get_unlock_key(self):
+ default = {'UnlockKey': None}
+ if not self.has_swarm_lock_changed():
+ return default
+ try:
+ return self.client.get_unlock_key() or default
+ except APIError:
+ return default
+
+ def has_swarm_lock_changed(self):
+ return self.parameters.autolock_managers and (
+ self.created or self.differences.has_difference_for('autolock_managers')
+ )
+
+ def init_swarm(self):
+ if not self.force and self.client.check_if_swarm_manager():
+ self.__update_swarm()
+ return
+
+ if not self.check_mode:
+ init_arguments = {
+ 'advertise_addr': self.parameters.advertise_addr,
+ 'listen_addr': self.parameters.listen_addr,
+ 'force_new_cluster': self.force,
+ 'swarm_spec': self.parameters.spec,
+ }
+ if self.parameters.default_addr_pool is not None:
+ init_arguments['default_addr_pool'] = self.parameters.default_addr_pool
+ if self.parameters.subnet_size is not None:
+ init_arguments['subnet_size'] = self.parameters.subnet_size
+ try:
+ self.client.init_swarm(**init_arguments)
+ except APIError as exc:
+ self.client.fail("Can not create a new Swarm Cluster: %s" % to_native(exc))
+
+ if not self.client.check_if_swarm_manager():
+ if not self.check_mode:
+ self.client.fail("Swarm not created or other error!")
+
+ self.created = True
+ self.inspect_swarm()
+ self.results['actions'].append("New Swarm cluster created: %s" % (self.swarm_info.get('ID')))
+ self.differences.add('state', parameter='present', active='absent')
+ self.results['changed'] = True
+ self.results['swarm_facts'] = {
+ 'JoinTokens': self.swarm_info.get('JoinTokens'),
+ 'UnlockKey': self.swarm_info.get('UnlockKey')
+ }
+
+ def __update_swarm(self):
+ try:
+ self.inspect_swarm()
+ version = self.swarm_info['Version']['Index']
+ self.parameters.update_from_swarm_info(self.swarm_info)
+ old_parameters = TaskParameters()
+ old_parameters.update_from_swarm_info(self.swarm_info)
+ self.parameters.compare_to_active(old_parameters, self.client, self.differences)
+ if self.differences.empty:
+ self.results['actions'].append("No modification")
+ self.results['changed'] = False
+ return
+ update_parameters = TaskParameters.from_ansible_params(self.client)
+ update_parameters.update_parameters(self.client)
+ if not self.check_mode:
+ self.client.update_swarm(
+ version=version, swarm_spec=update_parameters.spec,
+ rotate_worker_token=self.parameters.rotate_worker_token,
+ rotate_manager_token=self.parameters.rotate_manager_token)
+ except APIError as exc:
+ self.client.fail("Can not update a Swarm Cluster: %s" % to_native(exc))
+ return
+
+ self.inspect_swarm()
+ self.results['actions'].append("Swarm cluster updated")
+ self.results['changed'] = True
+
+ def join(self):
+ if self.client.check_if_swarm_node():
+ self.results['actions'].append("This node is already part of a swarm.")
+ return
+ if not self.check_mode:
+ try:
+ self.client.join_swarm(
+ remote_addrs=self.parameters.remote_addrs, join_token=self.parameters.join_token,
+ listen_addr=self.parameters.listen_addr, advertise_addr=self.parameters.advertise_addr)
+ except APIError as exc:
+ self.client.fail("Can not join the Swarm Cluster: %s" % to_native(exc))
+ self.results['actions'].append("New node is added to swarm cluster")
+ self.differences.add('joined', parameter=True, active=False)
+ self.results['changed'] = True
+
+ def leave(self):
+ if not self.client.check_if_swarm_node():
+ self.results['actions'].append("This node is not part of a swarm.")
+ return
+ if not self.check_mode:
+ try:
+ self.client.leave_swarm(force=self.force)
+ except APIError as exc:
+ self.client.fail("This node can not leave the Swarm Cluster: %s" % to_native(exc))
+ self.results['actions'].append("Node has left the swarm cluster")
+ self.differences.add('joined', parameter='absent', active='present')
+ self.results['changed'] = True
+
+ def remove(self):
+ if not self.client.check_if_swarm_manager():
+ self.client.fail("This node is not a manager.")
+
+ try:
+ status_down = self.client.check_if_swarm_node_is_down(node_id=self.node_id, repeat_check=5)
+ except APIError:
+ return
+
+ if not status_down:
+ self.client.fail("Can not remove the node. The status node is ready and not down.")
+
+ if not self.check_mode:
+ try:
+ self.client.remove_node(node_id=self.node_id, force=self.force)
+ except APIError as exc:
+ self.client.fail("Can not remove the node from the Swarm Cluster: %s" % to_native(exc))
+ self.results['actions'].append("Node is removed from swarm cluster.")
+ self.differences.add('joined', parameter=False, active=True)
+ self.results['changed'] = True
+
+
+def _detect_remove_operation(client):
+ return client.module.params['state'] == 'remove'
+
+
+def main():
+ argument_spec = dict(
+ advertise_addr=dict(type='str'),
+ state=dict(type='str', default='present', choices=['present', 'join', 'absent', 'remove', 'inspect']),
+ force=dict(type='bool', default=False),
+ listen_addr=dict(type='str', default='0.0.0.0:2377'),
+ remote_addrs=dict(type='list', elements='str'),
+ join_token=dict(type='str'),
+ snapshot_interval=dict(type='int'),
+ task_history_retention_limit=dict(type='int'),
+ keep_old_snapshots=dict(type='int'),
+ log_entries_for_slow_followers=dict(type='int'),
+ heartbeat_tick=dict(type='int'),
+ election_tick=dict(type='int'),
+ dispatcher_heartbeat_period=dict(type='int'),
+ node_cert_expiry=dict(type='int'),
+ name=dict(type='str'),
+ labels=dict(type='dict'),
+ signing_ca_cert=dict(type='str'),
+ signing_ca_key=dict(type='str', no_log=True),
+ ca_force_rotate=dict(type='int'),
+ autolock_managers=dict(type='bool'),
+ node_id=dict(type='str'),
+ rotate_worker_token=dict(type='bool', default=False),
+ rotate_manager_token=dict(type='bool', default=False),
+ default_addr_pool=dict(type='list', elements='str'),
+ subnet_size=dict(type='int'),
+ )
+
+ required_if = [
+ ('state', 'join', ['remote_addrs', 'join_token']),
+ ('state', 'remove', ['node_id'])
+ ]
+
+ option_minimal_versions = dict(
+ labels=dict(docker_py_version='2.6.0', docker_api_version='1.32'),
+ signing_ca_cert=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
+ signing_ca_key=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
+ ca_force_rotate=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
+ autolock_managers=dict(docker_py_version='2.6.0'),
+ log_driver=dict(docker_py_version='2.6.0'),
+ remove_operation=dict(
+ docker_py_version='2.4.0',
+ detect_usage=_detect_remove_operation,
+ usage_msg='remove swarm nodes'
+ ),
+ default_addr_pool=dict(docker_py_version='4.0.0', docker_api_version='1.39'),
+ subnet_size=dict(docker_py_version='4.0.0', docker_api_version='1.39'),
+ )
+
+ client = AnsibleDockerSwarmClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=required_if,
+ min_docker_version='1.10.0',
+ min_docker_api_version='1.25',
+ option_minimal_versions=option_minimal_versions,
+ )
+
+ try:
+ results = dict(
+ changed=False,
+ result='',
+ actions=[]
+ )
+
+ SwarmManager(client, results)()
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_swarm_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_swarm_info.py
new file mode 100644
index 00000000..f6d5fad1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_swarm_info.py
@@ -0,0 +1,384 @@
+#!/usr/bin/python
+#
+# (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: docker_swarm_info
+
+short_description: Retrieves facts about Docker Swarm cluster.
+
+description:
+ - Retrieves facts about a Docker Swarm.
+ - Returns lists of swarm objects names for the services - nodes, services, tasks.
+ - The output differs depending on API version available on docker host.
+ - Must be run on Swarm Manager node; otherwise module fails with error message.
+ It does return boolean flags in on both error and success which indicate whether
+ the docker daemon can be communicated with, whether it is in Swarm mode, and
+ whether it is a Swarm Manager node.
+
+
+author:
+ - Piotr Wojciechowski (@WojciechowskiPiotr)
+
+options:
+ nodes:
+ description:
+ - Whether to list swarm nodes.
+ type: bool
+ default: no
+ nodes_filters:
+ description:
+ - A dictionary of filter values used for selecting nodes to list.
+ - "For example, C(name: mynode)."
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/node_ls/#filtering)
+ for more information on possible filters.
+ type: dict
+ services:
+ description:
+ - Whether to list swarm services.
+ type: bool
+ default: no
+ services_filters:
+ description:
+ - A dictionary of filter values used for selecting services to list.
+ - "For example, C(name: myservice)."
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/service_ls/#filtering)
+ for more information on possible filters.
+ type: dict
+ tasks:
+ description:
+ - Whether to list containers.
+ type: bool
+ default: no
+ tasks_filters:
+ description:
+ - A dictionary of filter values used for selecting tasks to list.
+ - "For example, C(node: mynode-1)."
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/service_ps/#filtering)
+ for more information on possible filters.
+ type: dict
+ unlock_key:
+ description:
+ - Whether to retrieve the swarm unlock key.
+ type: bool
+ default: no
+ verbose_output:
+ description:
+ - When set to C(yes) and I(nodes), I(services) or I(tasks) is set to C(yes), then the module output will
+ contain verbose information about objects matching the full output of API method.
+ - For details see the documentation of your version of Docker API at U(https://docs.docker.com/engine/api/).
+ - The verbose output in this module contains only subset of information returned by I(_info) module
+ for each type of the objects.
+ type: bool
+ default: no
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "Docker API >= 1.24"
+'''
+
+EXAMPLES = '''
+- name: Get info on Docker Swarm
+ community.general.docker_swarm_info:
+ ignore_errors: yes
+ register: result
+
+- name: Inform about basic flags
+ ansible.builtin.debug:
+ msg: |
+ Was able to talk to docker daemon: {{ result.can_talk_to_docker }}
+ Docker in Swarm mode: {{ result.docker_swarm_active }}
+ This is a Manager node: {{ result.docker_swarm_manager }}
+
+- block:
+
+- name: Get info on Docker Swarm and list of registered nodes
+ community.general.docker_swarm_info:
+ nodes: yes
+ register: result
+
+- name: Get info on Docker Swarm and extended list of registered nodes
+ community.general.docker_swarm_info:
+ nodes: yes
+ verbose_output: yes
+ register: result
+
+- name: Get info on Docker Swarm and filtered list of registered nodes
+ community.general.docker_swarm_info:
+ nodes: yes
+ nodes_filters:
+ name: mynode
+ register: result
+
+- ansible.builtin.debug:
+ var: result.swarm_facts
+
+- name: Get the swarm unlock key
+ community.general.docker_swarm_info:
+ unlock_key: yes
+ register: result
+
+- ansible.builtin.debug:
+ var: result.swarm_unlock_key
+
+'''
+
+RETURN = '''
+can_talk_to_docker:
+ description:
+ - Will be C(true) if the module can talk to the docker daemon.
+ returned: both on success and on error
+ type: bool
+docker_swarm_active:
+ description:
+ - Will be C(true) if the module can talk to the docker daemon,
+ and the docker daemon is in Swarm mode.
+ returned: both on success and on error
+ type: bool
+docker_swarm_manager:
+ description:
+ - Will be C(true) if the module can talk to the docker daemon,
+ the docker daemon is in Swarm mode, and the current node is
+ a manager node.
+ - Only if this one is C(true), the module will not fail.
+ returned: both on success and on error
+ type: bool
+swarm_facts:
+ description:
+ - Facts representing the basic state of the docker Swarm cluster.
+ - Contains tokens to connect to the Swarm
+ returned: always
+ type: dict
+swarm_unlock_key:
+ description:
+ - Contains the key needed to unlock the swarm.
+ returned: When I(unlock_key) is C(true).
+ type: str
+nodes:
+ description:
+ - List of dict objects containing the basic information about each volume.
+ Keys matches the C(docker node ls) output unless I(verbose_output=yes).
+ See description for I(verbose_output).
+ returned: When I(nodes) is C(yes)
+ type: list
+ elements: dict
+services:
+ description:
+ - List of dict objects containing the basic information about each volume.
+ Keys matches the C(docker service ls) output unless I(verbose_output=yes).
+ See description for I(verbose_output).
+ returned: When I(services) is C(yes)
+ type: list
+ elements: dict
+tasks:
+ description:
+ - List of dict objects containing the basic information about each volume.
+ Keys matches the C(docker service ps) output unless I(verbose_output=yes).
+ See description for I(verbose_output).
+ returned: When I(tasks) is C(yes)
+ type: list
+ elements: dict
+
+'''
+
+import traceback
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker_common
+ pass
+
+from ansible.module_utils._text import to_native
+
+from ansible_collections.community.general.plugins.module_utils.docker.swarm import AnsibleDockerSwarmClient
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ DockerBaseClass,
+ clean_dict_booleans_for_docker_api,
+ RequestException,
+)
+
+
+class DockerSwarmManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(DockerSwarmManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.verbose_output = self.client.module.params['verbose_output']
+
+ listed_objects = ['tasks', 'services', 'nodes']
+
+ self.client.fail_task_if_not_swarm_manager()
+
+ self.results['swarm_facts'] = self.get_docker_swarm_facts()
+
+ for docker_object in listed_objects:
+ if self.client.module.params[docker_object]:
+ returned_name = docker_object
+ filter_name = docker_object + "_filters"
+ filters = clean_dict_booleans_for_docker_api(client.module.params.get(filter_name))
+ self.results[returned_name] = self.get_docker_items_list(docker_object, filters)
+ if self.client.module.params['unlock_key']:
+ self.results['swarm_unlock_key'] = self.get_docker_swarm_unlock_key()
+
+ def get_docker_swarm_facts(self):
+ try:
+ return self.client.inspect_swarm()
+ except APIError as exc:
+ self.client.fail("Error inspecting docker swarm: %s" % to_native(exc))
+
+ def get_docker_items_list(self, docker_object=None, filters=None):
+ items = None
+ items_list = []
+
+ try:
+ if docker_object == 'nodes':
+ items = self.client.nodes(filters=filters)
+ elif docker_object == 'tasks':
+ items = self.client.tasks(filters=filters)
+ elif docker_object == 'services':
+ items = self.client.services(filters=filters)
+ except APIError as exc:
+ self.client.fail("Error inspecting docker swarm for object '%s': %s" %
+ (docker_object, to_native(exc)))
+
+ if self.verbose_output:
+ return items
+
+ for item in items:
+ item_record = dict()
+
+ if docker_object == 'nodes':
+ item_record = self.get_essential_facts_nodes(item)
+ elif docker_object == 'tasks':
+ item_record = self.get_essential_facts_tasks(item)
+ elif docker_object == 'services':
+ item_record = self.get_essential_facts_services(item)
+ if item_record['Mode'] == 'Global':
+ item_record['Replicas'] = len(items)
+ items_list.append(item_record)
+
+ return items_list
+
+ @staticmethod
+ def get_essential_facts_nodes(item):
+ object_essentials = dict()
+
+ object_essentials['ID'] = item.get('ID')
+ object_essentials['Hostname'] = item['Description']['Hostname']
+ object_essentials['Status'] = item['Status']['State']
+ object_essentials['Availability'] = item['Spec']['Availability']
+ if 'ManagerStatus' in item:
+ object_essentials['ManagerStatus'] = item['ManagerStatus']['Reachability']
+ if 'Leader' in item['ManagerStatus'] and item['ManagerStatus']['Leader'] is True:
+ object_essentials['ManagerStatus'] = "Leader"
+ else:
+ object_essentials['ManagerStatus'] = None
+ object_essentials['EngineVersion'] = item['Description']['Engine']['EngineVersion']
+
+ return object_essentials
+
+ def get_essential_facts_tasks(self, item):
+ object_essentials = dict()
+
+ object_essentials['ID'] = item['ID']
+ # Returning container ID to not trigger another connection to host
+ # Container ID is sufficient to get extended info in other tasks
+ object_essentials['ContainerID'] = item['Status']['ContainerStatus']['ContainerID']
+ object_essentials['Image'] = item['Spec']['ContainerSpec']['Image']
+ object_essentials['Node'] = self.client.get_node_name_by_id(item['NodeID'])
+ object_essentials['DesiredState'] = item['DesiredState']
+ object_essentials['CurrentState'] = item['Status']['State']
+ if 'Err' in item['Status']:
+ object_essentials['Error'] = item['Status']['Err']
+ else:
+ object_essentials['Error'] = None
+
+ return object_essentials
+
+ @staticmethod
+ def get_essential_facts_services(item):
+ object_essentials = dict()
+
+ object_essentials['ID'] = item['ID']
+ object_essentials['Name'] = item['Spec']['Name']
+ if 'Replicated' in item['Spec']['Mode']:
+ object_essentials['Mode'] = "Replicated"
+ object_essentials['Replicas'] = item['Spec']['Mode']['Replicated']['Replicas']
+ elif 'Global' in item['Spec']['Mode']:
+ object_essentials['Mode'] = "Global"
+ # Number of replicas have to be updated in calling method or may be left as None
+ object_essentials['Replicas'] = None
+ object_essentials['Image'] = item['Spec']['TaskTemplate']['ContainerSpec']['Image']
+ if 'Ports' in item['Spec']['EndpointSpec']:
+ object_essentials['Ports'] = item['Spec']['EndpointSpec']['Ports']
+ else:
+ object_essentials['Ports'] = []
+
+ return object_essentials
+
+ def get_docker_swarm_unlock_key(self):
+ unlock_key = self.client.get_unlock_key() or {}
+ return unlock_key.get('UnlockKey') or None
+
+
+def main():
+ argument_spec = dict(
+ nodes=dict(type='bool', default=False),
+ nodes_filters=dict(type='dict'),
+ tasks=dict(type='bool', default=False),
+ tasks_filters=dict(type='dict'),
+ services=dict(type='bool', default=False),
+ services_filters=dict(type='dict'),
+ unlock_key=dict(type='bool', default=False),
+ verbose_output=dict(type='bool', default=False),
+ )
+ option_minimal_versions = dict(
+ unlock_key=dict(docker_py_version='2.7.0', docker_api_version='1.25'),
+ )
+
+ client = AnsibleDockerSwarmClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_version='1.10.0',
+ min_docker_api_version='1.24',
+ option_minimal_versions=option_minimal_versions,
+ fail_results=dict(
+ can_talk_to_docker=False,
+ docker_swarm_active=False,
+ docker_swarm_manager=False,
+ ),
+ )
+ client.fail_results['can_talk_to_docker'] = True
+ client.fail_results['docker_swarm_active'] = client.check_if_swarm_node()
+ client.fail_results['docker_swarm_manager'] = client.check_if_swarm_manager()
+
+ try:
+ results = dict(
+ changed=False,
+ )
+
+ DockerSwarmManager(client, results)
+ results.update(client.fail_results)
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_swarm_service.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_swarm_service.py
new file mode 100644
index 00000000..7c6f23a0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_swarm_service.py
@@ -0,0 +1,3004 @@
+#!/usr/bin/python
+#
+# (c) 2017, Dario Zanzico (git@dariozanzico.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: docker_swarm_service
+author:
+ - "Dario Zanzico (@dariko)"
+ - "Jason Witkowski (@jwitko)"
+ - "Hannes Ljungberg (@hannseman)"
+short_description: docker swarm service
+description:
+ - Manages docker services via a swarm manager node.
+options:
+ args:
+ description:
+ - List arguments to be passed to the container.
+ - Corresponds to the C(ARG) parameter of C(docker service create).
+ type: list
+ elements: str
+ command:
+ description:
+ - Command to execute when the container starts.
+ - A command may be either a string or a list or a list of strings.
+ - Corresponds to the C(COMMAND) parameter of C(docker service create).
+ type: raw
+ configs:
+ description:
+ - List of dictionaries describing the service configs.
+ - Corresponds to the C(--config) option of C(docker service create).
+ - Requires API version >= 1.30.
+ type: list
+ elements: dict
+ suboptions:
+ config_id:
+ description:
+ - Config's ID.
+ type: str
+ config_name:
+ description:
+ - Config's name as defined at its creation.
+ type: str
+ required: yes
+ filename:
+ description:
+ - Name of the file containing the config. Defaults to the I(config_name) if not specified.
+ type: str
+ uid:
+ description:
+ - UID of the config file's owner.
+ type: str
+ gid:
+ description:
+ - GID of the config file's group.
+ type: str
+ mode:
+ description:
+ - File access mode inside the container. Must be an octal number (like C(0644) or C(0444)).
+ type: int
+ constraints:
+ description:
+ - List of the service constraints.
+ - Corresponds to the C(--constraint) option of C(docker service create).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(placement.constraints) instead.
+ type: list
+ elements: str
+ container_labels:
+ description:
+ - Dictionary of key value pairs.
+ - Corresponds to the C(--container-label) option of C(docker service create).
+ type: dict
+ dns:
+ description:
+ - List of custom DNS servers.
+ - Corresponds to the C(--dns) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: list
+ elements: str
+ dns_search:
+ description:
+ - List of custom DNS search domains.
+ - Corresponds to the C(--dns-search) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: list
+ elements: str
+ dns_options:
+ description:
+ - List of custom DNS options.
+ - Corresponds to the C(--dns-option) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: list
+ elements: str
+ endpoint_mode:
+ description:
+ - Service endpoint mode.
+ - Corresponds to the C(--endpoint-mode) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: str
+ choices:
+ - vip
+ - dnsrr
+ env:
+ description:
+ - List or dictionary of the service environment variables.
+ - If passed a list each items need to be in the format of C(KEY=VALUE).
+ - If passed a dictionary values which might be parsed as numbers,
+ booleans or other types by the YAML parser must be quoted (e.g. C("true"))
+ in order to avoid data loss.
+ - Corresponds to the C(--env) option of C(docker service create).
+ type: raw
+ env_files:
+ description:
+ - List of paths to files, present on the target, containing environment variables C(FOO=BAR).
+ - The order of the list is significant in determining the value assigned to a
+ variable that shows up more than once.
+ - If variable also present in I(env), then I(env) value will override.
+ type: list
+ elements: path
+ force_update:
+ description:
+ - Force update even if no changes require it.
+ - Corresponds to the C(--force) option of C(docker service update).
+ - Requires API version >= 1.25.
+ type: bool
+ default: no
+ groups:
+ description:
+ - List of additional group names and/or IDs that the container process will run as.
+ - Corresponds to the C(--group) option of C(docker service update).
+ - Requires API version >= 1.25.
+ type: list
+ elements: str
+ healthcheck:
+ description:
+ - Configure a check that is run to determine whether or not containers for this service are "healthy".
+ See the docs for the L(HEALTHCHECK Dockerfile instruction,https://docs.docker.com/engine/reference/builder/#healthcheck)
+ for details on how healthchecks work.
+ - "I(interval), I(timeout) and I(start_period) are specified as durations. They accept duration as a string in a format
+ that look like: C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Requires API version >= 1.25.
+ type: dict
+ suboptions:
+ test:
+ description:
+ - Command to run to check health.
+ - Must be either a string or a list. If it is a list, the first item must be one of C(NONE), C(CMD) or C(CMD-SHELL).
+ type: raw
+ interval:
+ description:
+ - Time between running the check.
+ type: str
+ timeout:
+ description:
+ - Maximum time to allow one check to run.
+ type: str
+ retries:
+ description:
+ - Consecutive failures needed to report unhealthy. It accept integer value.
+ type: int
+ start_period:
+ description:
+ - Start period for the container to initialize before starting health-retries countdown.
+ type: str
+ hostname:
+ description:
+ - Container hostname.
+ - Corresponds to the C(--hostname) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: str
+ hosts:
+ description:
+ - Dict of host-to-IP mappings, where each host name is a key in the dictionary.
+ Each host name will be added to the container's /etc/hosts file.
+ - Corresponds to the C(--host) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: dict
+ image:
+ description:
+ - Service image path and tag.
+ - Corresponds to the C(IMAGE) parameter of C(docker service create).
+ type: str
+ init:
+ description:
+ - Use an init inside each service container to forward signals and reap processes.
+ - Corresponds to the C(--init) option of C(docker service create).
+ - Requires API version >= 1.37.
+ type: bool
+ version_added: '0.2.0'
+ labels:
+ description:
+ - Dictionary of key value pairs.
+ - Corresponds to the C(--label) option of C(docker service create).
+ type: dict
+ limits:
+ description:
+ - Configures service resource limits.
+ suboptions:
+ cpus:
+ description:
+ - Service CPU limit. C(0) equals no limit.
+ - Corresponds to the C(--limit-cpu) option of C(docker service create).
+ type: float
+ memory:
+ description:
+ - "Service memory limit in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - C(0) equals no limit.
+ - Omitting the unit defaults to bytes.
+ - Corresponds to the C(--limit-memory) option of C(docker service create).
+ type: str
+ type: dict
+ limit_cpu:
+ description:
+ - Service CPU limit. C(0) equals no limit.
+ - Corresponds to the C(--limit-cpu) option of C(docker service create).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(limits.cpus) instead.
+ type: float
+ limit_memory:
+ description:
+ - "Service memory limit in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - C(0) equals no limit.
+ - Omitting the unit defaults to bytes.
+ - Corresponds to the C(--limit-memory) option of C(docker service create).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(limits.memory) instead.
+ type: str
+ logging:
+ description:
+ - "Logging configuration for the service."
+ suboptions:
+ driver:
+ description:
+ - Configure the logging driver for a service.
+ - Corresponds to the C(--log-driver) option of C(docker service create).
+ type: str
+ options:
+ description:
+ - Options for service logging driver.
+ - Corresponds to the C(--log-opt) option of C(docker service create).
+ type: dict
+ type: dict
+ log_driver:
+ description:
+ - Configure the logging driver for a service.
+ - Corresponds to the C(--log-driver) option of C(docker service create).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(logging.driver) instead.
+ type: str
+ log_driver_options:
+ description:
+ - Options for service logging driver.
+ - Corresponds to the C(--log-opt) option of C(docker service create).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(logging.options) instead.
+ type: dict
+ mode:
+ description:
+ - Service replication mode.
+ - Service will be removed and recreated when changed.
+ - Corresponds to the C(--mode) option of C(docker service create).
+ type: str
+ default: replicated
+ choices:
+ - replicated
+ - global
+ mounts:
+ description:
+ - List of dictionaries describing the service mounts.
+ - Corresponds to the C(--mount) option of C(docker service create).
+ type: list
+ elements: dict
+ suboptions:
+ source:
+ description:
+ - Mount source (e.g. a volume name or a host path).
+ - Must be specified if I(type) is not C(tmpfs).
+ type: str
+ target:
+ description:
+ - Container path.
+ type: str
+ required: yes
+ type:
+ description:
+ - The mount type.
+ - Note that C(npipe) is only supported by Docker for Windows. Also note that C(npipe) was added in Ansible 2.9.
+ type: str
+ default: bind
+ choices:
+ - bind
+ - volume
+ - tmpfs
+ - npipe
+ readonly:
+ description:
+ - Whether the mount should be read-only.
+ type: bool
+ labels:
+ description:
+ - Volume labels to apply.
+ type: dict
+ propagation:
+ description:
+ - The propagation mode to use.
+ - Can only be used when I(type) is C(bind).
+ type: str
+ choices:
+ - shared
+ - slave
+ - private
+ - rshared
+ - rslave
+ - rprivate
+ no_copy:
+ description:
+ - Disable copying of data from a container when a volume is created.
+ - Can only be used when I(type) is C(volume).
+ type: bool
+ driver_config:
+ description:
+ - Volume driver configuration.
+ - Can only be used when I(type) is C(volume).
+ suboptions:
+ name:
+ description:
+ - Name of the volume-driver plugin to use for the volume.
+ type: str
+ options:
+ description:
+ - Options as key-value pairs to pass to the driver for this volume.
+ type: dict
+ type: dict
+ tmpfs_size:
+ description:
+ - "Size of the tmpfs mount in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - Can only be used when I(type) is C(tmpfs).
+ type: str
+ tmpfs_mode:
+ description:
+ - File mode of the tmpfs in octal.
+ - Can only be used when I(type) is C(tmpfs).
+ type: int
+ name:
+ description:
+ - Service name.
+ - Corresponds to the C(--name) option of C(docker service create).
+ type: str
+ required: yes
+ networks:
+ description:
+ - List of the service networks names or dictionaries.
+ - When passed dictionaries valid sub-options are I(name), which is required, and
+ I(aliases) and I(options).
+ - Prior to API version 1.29, updating and removing networks is not supported.
+ If changes are made the service will then be removed and recreated.
+ - Corresponds to the C(--network) option of C(docker service create).
+ type: list
+ elements: raw
+ placement:
+ description:
+ - Configures service placement preferences and constraints.
+ suboptions:
+ constraints:
+ description:
+ - List of the service constraints.
+ - Corresponds to the C(--constraint) option of C(docker service create).
+ type: list
+ elements: str
+ preferences:
+ description:
+ - List of the placement preferences as key value pairs.
+ - Corresponds to the C(--placement-pref) option of C(docker service create).
+ - Requires API version >= 1.27.
+ type: list
+ elements: dict
+ type: dict
+ publish:
+ description:
+ - List of dictionaries describing the service published ports.
+ - Corresponds to the C(--publish) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: list
+ elements: dict
+ suboptions:
+ published_port:
+ description:
+ - The port to make externally available.
+ type: int
+ required: yes
+ target_port:
+ description:
+ - The port inside the container to expose.
+ type: int
+ required: yes
+ protocol:
+ description:
+ - What protocol to use.
+ type: str
+ default: tcp
+ choices:
+ - tcp
+ - udp
+ mode:
+ description:
+ - What publish mode to use.
+ - Requires API version >= 1.32.
+ type: str
+ choices:
+ - ingress
+ - host
+ read_only:
+ description:
+ - Mount the containers root filesystem as read only.
+ - Corresponds to the C(--read-only) option of C(docker service create).
+ type: bool
+ replicas:
+ description:
+ - Number of containers instantiated in the service. Valid only if I(mode) is C(replicated).
+ - If set to C(-1), and service is not present, service replicas will be set to C(1).
+ - If set to C(-1), and service is present, service replicas will be unchanged.
+ - Corresponds to the C(--replicas) option of C(docker service create).
+ type: int
+ default: -1
+ reservations:
+ description:
+ - Configures service resource reservations.
+ suboptions:
+ cpus:
+ description:
+ - Service CPU reservation. C(0) equals no reservation.
+ - Corresponds to the C(--reserve-cpu) option of C(docker service create).
+ type: float
+ memory:
+ description:
+ - "Service memory reservation in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - C(0) equals no reservation.
+ - Omitting the unit defaults to bytes.
+ - Corresponds to the C(--reserve-memory) option of C(docker service create).
+ type: str
+ type: dict
+ reserve_cpu:
+ description:
+ - Service CPU reservation. C(0) equals no reservation.
+ - Corresponds to the C(--reserve-cpu) option of C(docker service create).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(reservations.cpus) instead.
+ type: float
+ reserve_memory:
+ description:
+ - "Service memory reservation in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - C(0) equals no reservation.
+ - Omitting the unit defaults to bytes.
+ - Corresponds to the C(--reserve-memory) option of C(docker service create).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(reservations.memory) instead.
+ type: str
+ resolve_image:
+ description:
+ - If the current image digest should be resolved from registry and updated if changed.
+ - Requires API version >= 1.30.
+ type: bool
+ default: no
+ restart_config:
+ description:
+ - Configures if and how to restart containers when they exit.
+ suboptions:
+ condition:
+ description:
+ - Restart condition of the service.
+ - Corresponds to the C(--restart-condition) option of C(docker service create).
+ type: str
+ choices:
+ - none
+ - on-failure
+ - any
+ delay:
+ description:
+ - Delay between restarts.
+ - "Accepts a a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--restart-delay) option of C(docker service create).
+ type: str
+ max_attempts:
+ description:
+ - Maximum number of service restarts.
+ - Corresponds to the C(--restart-condition) option of C(docker service create).
+ type: int
+ window:
+ description:
+ - Restart policy evaluation window.
+ - "Accepts a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--restart-window) option of C(docker service create).
+ type: str
+ type: dict
+ restart_policy:
+ description:
+ - Restart condition of the service.
+ - Corresponds to the C(--restart-condition) option of C(docker service create).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(restart_config.condition) instead.
+ type: str
+ choices:
+ - none
+ - on-failure
+ - any
+ restart_policy_attempts:
+ description:
+ - Maximum number of service restarts.
+ - Corresponds to the C(--restart-condition) option of C(docker service create).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(restart_config.max_attempts) instead.
+ type: int
+ restart_policy_delay:
+ description:
+ - Delay between restarts.
+ - "Accepts a duration as an integer in nanoseconds or as a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--restart-delay) option of C(docker service create).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(restart_config.delay) instead.
+ type: raw
+ restart_policy_window:
+ description:
+ - Restart policy evaluation window.
+ - "Accepts a duration as an integer in nanoseconds or as a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--restart-window) option of C(docker service create).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(restart_config.window) instead.
+ type: raw
+ rollback_config:
+ description:
+ - Configures how the service should be rolled back in case of a failing update.
+ suboptions:
+ parallelism:
+ description:
+ - The number of containers to rollback at a time. If set to 0, all containers rollback simultaneously.
+ - Corresponds to the C(--rollback-parallelism) option of C(docker service create).
+ - Requires API version >= 1.28.
+ type: int
+ delay:
+ description:
+ - Delay between task rollbacks.
+ - "Accepts a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--rollback-delay) option of C(docker service create).
+ - Requires API version >= 1.28.
+ type: str
+ failure_action:
+ description:
+ - Action to take in case of rollback failure.
+ - Corresponds to the C(--rollback-failure-action) option of C(docker service create).
+ - Requires API version >= 1.28.
+ type: str
+ choices:
+ - continue
+ - pause
+ monitor:
+ description:
+ - Duration after each task rollback to monitor for failure.
+ - "Accepts a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--rollback-monitor) option of C(docker service create).
+ - Requires API version >= 1.28.
+ type: str
+ max_failure_ratio:
+ description:
+ - Fraction of tasks that may fail during a rollback.
+ - Corresponds to the C(--rollback-max-failure-ratio) option of C(docker service create).
+ - Requires API version >= 1.28.
+ type: float
+ order:
+ description:
+ - Specifies the order of operations during rollbacks.
+ - Corresponds to the C(--rollback-order) option of C(docker service create).
+ - Requires API version >= 1.29.
+ type: str
+ type: dict
+ secrets:
+ description:
+ - List of dictionaries describing the service secrets.
+ - Corresponds to the C(--secret) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: list
+ elements: dict
+ suboptions:
+ secret_id:
+ description:
+ - Secret's ID.
+ type: str
+ secret_name:
+ description:
+ - Secret's name as defined at its creation.
+ type: str
+ required: yes
+ filename:
+ description:
+ - Name of the file containing the secret. Defaults to the I(secret_name) if not specified.
+ - Corresponds to the C(target) key of C(docker service create --secret).
+ type: str
+ uid:
+ description:
+ - UID of the secret file's owner.
+ type: str
+ gid:
+ description:
+ - GID of the secret file's group.
+ type: str
+ mode:
+ description:
+ - File access mode inside the container. Must be an octal number (like C(0644) or C(0444)).
+ type: int
+ state:
+ description:
+ - C(absent) - A service matching the specified name will be removed and have its tasks stopped.
+ - C(present) - Asserts the existence of a service matching the name and provided configuration parameters.
+ Unspecified configuration parameters will be set to docker defaults.
+ type: str
+ default: present
+ choices:
+ - present
+ - absent
+ stop_grace_period:
+ description:
+ - Time to wait before force killing a container.
+ - "Accepts a duration as a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--stop-grace-period) option of C(docker service create).
+ type: str
+ stop_signal:
+ description:
+ - Override default signal used to stop the container.
+ - Corresponds to the C(--stop-signal) option of C(docker service create).
+ type: str
+ tty:
+ description:
+ - Allocate a pseudo-TTY.
+ - Corresponds to the C(--tty) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: bool
+ update_config:
+ description:
+ - Configures how the service should be updated. Useful for configuring rolling updates.
+ suboptions:
+ parallelism:
+ description:
+ - Rolling update parallelism.
+ - Corresponds to the C(--update-parallelism) option of C(docker service create).
+ type: int
+ delay:
+ description:
+ - Rolling update delay.
+ - "Accepts a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--update-delay) option of C(docker service create).
+ type: str
+ failure_action:
+ description:
+ - Action to take in case of container failure.
+ - Corresponds to the C(--update-failure-action) option of C(docker service create).
+ - Usage of I(rollback) requires API version >= 1.29.
+ type: str
+ choices:
+ - continue
+ - pause
+ - rollback
+ monitor:
+ description:
+ - Time to monitor updated tasks for failures.
+ - "Accepts a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--update-monitor) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: str
+ max_failure_ratio:
+ description:
+ - Fraction of tasks that may fail during an update before the failure action is invoked.
+ - Corresponds to the C(--update-max-failure-ratio) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: float
+ order:
+ description:
+ - Specifies the order of operations when rolling out an updated task.
+ - Corresponds to the C(--update-order) option of C(docker service create).
+ - Requires API version >= 1.29.
+ type: str
+ type: dict
+ update_delay:
+ description:
+ - Rolling update delay.
+ - "Accepts a duration as an integer in nanoseconds or as a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--update-delay) option of C(docker service create).
+ - Before Ansible 2.8, the default value for this option was C(10).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(update_config.delay) instead.
+ type: raw
+ update_parallelism:
+ description:
+ - Rolling update parallelism.
+ - Corresponds to the C(--update-parallelism) option of C(docker service create).
+ - Before Ansible 2.8, the default value for this option was C(1).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(update_config.parallelism) instead.
+ type: int
+ update_failure_action:
+ description:
+ - Action to take in case of container failure.
+ - Corresponds to the C(--update-failure-action) option of C(docker service create).
+ - Usage of I(rollback) requires API version >= 1.29.
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(update_config.failure_action) instead.
+ type: str
+ choices:
+ - continue
+ - pause
+ - rollback
+ update_monitor:
+ description:
+ - Time to monitor updated tasks for failures.
+ - "Accepts a duration as an integer in nanoseconds or as a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--update-monitor) option of C(docker service create).
+ - Requires API version >= 1.25.
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(update_config.monitor) instead.
+ type: raw
+ update_max_failure_ratio:
+ description:
+ - Fraction of tasks that may fail during an update before the failure action is invoked.
+ - Corresponds to the C(--update-max-failure-ratio) option of C(docker service create).
+ - Requires API version >= 1.25.
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(update_config.max_failure_ratio) instead.
+ type: float
+ update_order:
+ description:
+ - Specifies the order of operations when rolling out an updated task.
+ - Corresponds to the C(--update-order) option of C(docker service create).
+ - Requires API version >= 1.29.
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(update_config.order) instead.
+ type: str
+ choices:
+ - stop-first
+ - start-first
+ user:
+ description:
+ - Sets the username or UID used for the specified command.
+ - Before Ansible 2.8, the default value for this option was C(root).
+ - The default has been removed so that the user defined in the image is used if no user is specified here.
+ - Corresponds to the C(--user) option of C(docker service create).
+ type: str
+ working_dir:
+ description:
+ - Path to the working directory.
+ - Corresponds to the C(--workdir) option of C(docker service create).
+ type: str
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_2_documentation
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.0.2"
+ - "Docker API >= 1.24"
+notes:
+ - "Images will only resolve to the latest digest when using Docker API >= 1.30 and Docker SDK for Python >= 3.2.0.
+ When using older versions use C(force_update: true) to trigger the swarm to resolve a new image."
+'''
+
+RETURN = '''
+swarm_service:
+ returned: always
+ type: dict
+ description:
+ - Dictionary of variables representing the current state of the service.
+ Matches the module parameters format.
+ - Note that facts are not part of registered vars but accessible directly.
+ - Note that before Ansible 2.7.9, the return variable was documented as C(ansible_swarm_service),
+ while the module actually returned a variable called C(ansible_docker_service). The variable
+ was renamed to C(swarm_service) in both code and documentation for Ansible 2.7.9 and Ansible 2.8.0.
+ In Ansible 2.7.x, the old name C(ansible_docker_service) can still be used.
+ sample: '{
+ "args": [
+ "3600"
+ ],
+ "command": [
+ "sleep"
+ ],
+ "configs": null,
+ "constraints": [
+ "node.role == manager",
+ "engine.labels.operatingsystem == ubuntu 14.04"
+ ],
+ "container_labels": null,
+ "dns": null,
+ "dns_options": null,
+ "dns_search": null,
+ "endpoint_mode": null,
+ "env": [
+ "ENVVAR1=envvar1",
+ "ENVVAR2=envvar2"
+ ],
+ "force_update": null,
+ "groups": null,
+ "healthcheck": {
+ "interval": 90000000000,
+ "retries": 3,
+ "start_period": 30000000000,
+ "test": [
+ "CMD",
+ "curl",
+ "--fail",
+ "http://nginx.host.com"
+ ],
+ "timeout": 10000000000
+ },
+ "healthcheck_disabled": false,
+ "hostname": null,
+ "hosts": null,
+ "image": "alpine:latest@sha256:b3dbf31b77fd99d9c08f780ce6f5282aba076d70a513a8be859d8d3a4d0c92b8",
+ "labels": {
+ "com.example.department": "Finance",
+ "com.example.description": "Accounting webapp"
+ },
+ "limit_cpu": 0.5,
+ "limit_memory": 52428800,
+ "log_driver": "fluentd",
+ "log_driver_options": {
+ "fluentd-address": "127.0.0.1:24224",
+ "fluentd-async-connect": "true",
+ "tag": "myservice"
+ },
+ "mode": "replicated",
+ "mounts": [
+ {
+ "readonly": false,
+ "source": "/tmp/",
+ "target": "/remote_tmp/",
+ "type": "bind",
+ "labels": null,
+ "propagation": null,
+ "no_copy": null,
+ "driver_config": null,
+ "tmpfs_size": null,
+ "tmpfs_mode": null
+ }
+ ],
+ "networks": null,
+ "placement_preferences": [
+ {
+ "spread": "node.labels.mylabel"
+ }
+ ],
+ "publish": null,
+ "read_only": null,
+ "replicas": 1,
+ "reserve_cpu": 0.25,
+ "reserve_memory": 20971520,
+ "restart_policy": "on-failure",
+ "restart_policy_attempts": 3,
+ "restart_policy_delay": 5000000000,
+ "restart_policy_window": 120000000000,
+ "secrets": null,
+ "stop_grace_period": null,
+ "stop_signal": null,
+ "tty": null,
+ "update_delay": 10000000000,
+ "update_failure_action": null,
+ "update_max_failure_ratio": null,
+ "update_monitor": null,
+ "update_order": "stop-first",
+ "update_parallelism": 2,
+ "user": null,
+ "working_dir": null
+ }'
+changes:
+ returned: always
+ description:
+ - List of changed service attributes if a service has been altered, [] otherwise.
+ type: list
+ elements: str
+ sample: ['container_labels', 'replicas']
+rebuilt:
+ returned: always
+ description:
+ - True if the service has been recreated (removed and created)
+ type: bool
+ sample: True
+'''
+
+EXAMPLES = '''
+- name: Set command and arguments
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine
+ command: sleep
+ args:
+ - "3600"
+
+- name: Set a bind mount
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine
+ mounts:
+ - source: /tmp/
+ target: /remote_tmp/
+ type: bind
+
+- name: Set service labels
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine
+ labels:
+ com.example.description: "Accounting webapp"
+ com.example.department: "Finance"
+
+- name: Set environment variables
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine
+ env:
+ ENVVAR1: envvar1
+ ENVVAR2: envvar2
+ env_files:
+ - envs/common.env
+ - envs/apps/web.env
+
+- name: Set fluentd logging
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine
+ logging:
+ driver: fluentd
+ options:
+ fluentd-address: "127.0.0.1:24224"
+ fluentd-async-connect: "true"
+ tag: myservice
+
+- name: Set restart policies
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine
+ restart_config:
+ condition: on-failure
+ delay: 5s
+ max_attempts: 3
+ window: 120s
+
+- name: Set update config
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine
+ update_config:
+ parallelism: 2
+ delay: 10s
+ order: stop-first
+
+- name: Set rollback config
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine
+ update_config:
+ failure_action: rollback
+ rollback_config:
+ parallelism: 2
+ delay: 10s
+ order: stop-first
+
+- name: Set placement preferences
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine:edge
+ placement:
+ preferences:
+ - spread: node.labels.mylabel
+ constraints:
+ - node.role == manager
+ - engine.labels.operatingsystem == ubuntu 14.04
+
+- name: Set configs
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine:edge
+ configs:
+ - config_name: myconfig_name
+ filename: "/tmp/config.txt"
+
+- name: Set networks
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine:edge
+ networks:
+ - mynetwork
+
+- name: Set networks as a dictionary
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine:edge
+ networks:
+ - name: "mynetwork"
+ aliases:
+ - "mynetwork_alias"
+ options:
+ foo: bar
+
+- name: Set secrets
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine:edge
+ secrets:
+ - secret_name: mysecret_name
+ filename: "/run/secrets/secret.txt"
+
+- name: Start service with healthcheck
+ community.general.docker_swarm_service:
+ name: myservice
+ image: nginx:1.13
+ healthcheck:
+ # Check if nginx server is healthy by curl'ing the server.
+ # If this fails or timeouts, the healthcheck fails.
+ test: ["CMD", "curl", "--fail", "http://nginx.host.com"]
+ interval: 1m30s
+ timeout: 10s
+ retries: 3
+ start_period: 30s
+
+- name: Configure service resources
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine:edge
+ reservations:
+ cpus: 0.25
+ memory: 20M
+ limits:
+ cpus: 0.50
+ memory: 50M
+
+- name: Remove service
+ community.general.docker_swarm_service:
+ name: myservice
+ state: absent
+'''
+
+import shlex
+import time
+import operator
+import traceback
+
+from distutils.version import LooseVersion
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ DifferenceTracker,
+ DockerBaseClass,
+ convert_duration_to_nanosecond,
+ parse_healthcheck,
+ clean_dict_booleans_for_docker_api,
+ RequestException,
+)
+
+from ansible.module_utils.basic import human_to_bytes
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_text
+
+try:
+ from docker import types
+ from docker.utils import (
+ parse_repository_tag,
+ parse_env_file,
+ format_environment,
+ )
+ from docker.errors import (
+ APIError,
+ DockerException,
+ NotFound,
+ )
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+
+def get_docker_environment(env, env_files):
+ """
+ Will return a list of "KEY=VALUE" items. Supplied env variable can
+ be either a list or a dictionary.
+
+ If environment files are combined with explicit environment variables,
+ the explicit environment variables take precedence.
+ """
+ env_dict = {}
+ if env_files:
+ for env_file in env_files:
+ parsed_env_file = parse_env_file(env_file)
+ for name, value in parsed_env_file.items():
+ env_dict[name] = str(value)
+ if env is not None and isinstance(env, string_types):
+ env = env.split(',')
+ if env is not None and isinstance(env, dict):
+ for name, value in env.items():
+ if not isinstance(value, string_types):
+ raise ValueError(
+ 'Non-string value found for env option. '
+ 'Ambiguous env options must be wrapped in quotes to avoid YAML parsing. Key: %s' % name
+ )
+ env_dict[name] = str(value)
+ elif env is not None and isinstance(env, list):
+ for item in env:
+ try:
+ name, value = item.split('=', 1)
+ except ValueError:
+ raise ValueError('Invalid environment variable found in list, needs to be in format KEY=VALUE.')
+ env_dict[name] = value
+ elif env is not None:
+ raise ValueError(
+ 'Invalid type for env %s (%s). Only list or dict allowed.' % (env, type(env))
+ )
+ env_list = format_environment(env_dict)
+ if not env_list:
+ if env is not None or env_files is not None:
+ return []
+ else:
+ return None
+ return sorted(env_list)
+
+
+def get_docker_networks(networks, network_ids):
+ """
+ Validate a list of network names or a list of network dictionaries.
+ Network names will be resolved to ids by using the network_ids mapping.
+ """
+ if networks is None:
+ return None
+ parsed_networks = []
+ for network in networks:
+ if isinstance(network, string_types):
+ parsed_network = {'name': network}
+ elif isinstance(network, dict):
+ if 'name' not in network:
+ raise TypeError(
+ '"name" is required when networks are passed as dictionaries.'
+ )
+ name = network.pop('name')
+ parsed_network = {'name': name}
+ aliases = network.pop('aliases', None)
+ if aliases is not None:
+ if not isinstance(aliases, list):
+ raise TypeError('"aliases" network option is only allowed as a list')
+ if not all(
+ isinstance(alias, string_types) for alias in aliases
+ ):
+ raise TypeError('Only strings are allowed as network aliases.')
+ parsed_network['aliases'] = aliases
+ options = network.pop('options', None)
+ if options is not None:
+ if not isinstance(options, dict):
+ raise TypeError('Only dict is allowed as network options.')
+ parsed_network['options'] = clean_dict_booleans_for_docker_api(options)
+ # Check if any invalid keys left
+ if network:
+ invalid_keys = ', '.join(network.keys())
+ raise TypeError(
+ '%s are not valid keys for the networks option' % invalid_keys
+ )
+
+ else:
+ raise TypeError(
+ 'Only a list of strings or dictionaries are allowed to be passed as networks.'
+ )
+ network_name = parsed_network.pop('name')
+ try:
+ parsed_network['id'] = network_ids[network_name]
+ except KeyError as e:
+ raise ValueError('Could not find a network named: %s.' % e)
+ parsed_networks.append(parsed_network)
+ return parsed_networks or []
+
+
+def get_nanoseconds_from_raw_option(name, value):
+ if value is None:
+ return None
+ elif isinstance(value, int):
+ return value
+ elif isinstance(value, string_types):
+ try:
+ return int(value)
+ except ValueError:
+ return convert_duration_to_nanosecond(value)
+ else:
+ raise ValueError(
+ 'Invalid type for %s %s (%s). Only string or int allowed.'
+ % (name, value, type(value))
+ )
+
+
+def get_value(key, values, default=None):
+ value = values.get(key)
+ return value if value is not None else default
+
+
+def has_dict_changed(new_dict, old_dict):
+ """
+ Check if new_dict has differences compared to old_dict while
+ ignoring keys in old_dict which are None in new_dict.
+ """
+ if new_dict is None:
+ return False
+ if not new_dict and old_dict:
+ return True
+ if not old_dict and new_dict:
+ return True
+ defined_options = dict(
+ (option, value) for option, value in new_dict.items()
+ if value is not None
+ )
+ for option, value in defined_options.items():
+ old_value = old_dict.get(option)
+ if not value and not old_value:
+ continue
+ if value != old_value:
+ return True
+ return False
+
+
+def has_list_changed(new_list, old_list, sort_lists=True, sort_key=None):
+ """
+ Check two lists have differences. Sort lists by default.
+ """
+
+ def sort_list(unsorted_list):
+ """
+ Sort a given list.
+ The list may contain dictionaries, so use the sort key to handle them.
+ """
+
+ if unsorted_list and isinstance(unsorted_list[0], dict):
+ if not sort_key:
+ raise Exception(
+ 'A sort key was not specified when sorting list'
+ )
+ else:
+ return sorted(unsorted_list, key=lambda k: k[sort_key])
+
+ # Either the list is empty or does not contain dictionaries
+ try:
+ return sorted(unsorted_list)
+ except TypeError:
+ return unsorted_list
+
+ if new_list is None:
+ return False
+ old_list = old_list or []
+ if len(new_list) != len(old_list):
+ return True
+
+ if sort_lists:
+ zip_data = zip(sort_list(new_list), sort_list(old_list))
+ else:
+ zip_data = zip(new_list, old_list)
+ for new_item, old_item in zip_data:
+ is_same_type = type(new_item) == type(old_item)
+ if not is_same_type:
+ if isinstance(new_item, string_types) and isinstance(old_item, string_types):
+ # Even though the types are different between these items,
+ # they are both strings. Try matching on the same string type.
+ try:
+ new_item_type = type(new_item)
+ old_item_casted = new_item_type(old_item)
+ if new_item != old_item_casted:
+ return True
+ else:
+ continue
+ except UnicodeEncodeError:
+ # Fallback to assuming the strings are different
+ return True
+ else:
+ return True
+ if isinstance(new_item, dict):
+ if has_dict_changed(new_item, old_item):
+ return True
+ elif new_item != old_item:
+ return True
+
+ return False
+
+
+def have_networks_changed(new_networks, old_networks):
+ """Special case list checking for networks to sort aliases"""
+
+ if new_networks is None:
+ return False
+ old_networks = old_networks or []
+ if len(new_networks) != len(old_networks):
+ return True
+
+ zip_data = zip(
+ sorted(new_networks, key=lambda k: k['id']),
+ sorted(old_networks, key=lambda k: k['id'])
+ )
+
+ for new_item, old_item in zip_data:
+ new_item = dict(new_item)
+ old_item = dict(old_item)
+ # Sort the aliases
+ if 'aliases' in new_item:
+ new_item['aliases'] = sorted(new_item['aliases'] or [])
+ if 'aliases' in old_item:
+ old_item['aliases'] = sorted(old_item['aliases'] or [])
+
+ if has_dict_changed(new_item, old_item):
+ return True
+
+ return False
+
+
+class DockerService(DockerBaseClass):
+ def __init__(self, docker_api_version, docker_py_version):
+ super(DockerService, self).__init__()
+ self.image = ""
+ self.command = None
+ self.args = None
+ self.endpoint_mode = None
+ self.dns = None
+ self.healthcheck = None
+ self.healthcheck_disabled = None
+ self.hostname = None
+ self.hosts = None
+ self.tty = None
+ self.dns_search = None
+ self.dns_options = None
+ self.env = None
+ self.force_update = None
+ self.groups = None
+ self.log_driver = None
+ self.log_driver_options = None
+ self.labels = None
+ self.container_labels = None
+ self.limit_cpu = None
+ self.limit_memory = None
+ self.reserve_cpu = None
+ self.reserve_memory = None
+ self.mode = "replicated"
+ self.user = None
+ self.mounts = None
+ self.configs = None
+ self.secrets = None
+ self.constraints = None
+ self.networks = None
+ self.stop_grace_period = None
+ self.stop_signal = None
+ self.publish = None
+ self.placement_preferences = None
+ self.replicas = -1
+ self.service_id = False
+ self.service_version = False
+ self.read_only = None
+ self.restart_policy = None
+ self.restart_policy_attempts = None
+ self.restart_policy_delay = None
+ self.restart_policy_window = None
+ self.rollback_config = None
+ self.update_delay = None
+ self.update_parallelism = None
+ self.update_failure_action = None
+ self.update_monitor = None
+ self.update_max_failure_ratio = None
+ self.update_order = None
+ self.working_dir = None
+ self.init = None
+
+ self.docker_api_version = docker_api_version
+ self.docker_py_version = docker_py_version
+
+ def get_facts(self):
+ return {
+ 'image': self.image,
+ 'mounts': self.mounts,
+ 'configs': self.configs,
+ 'networks': self.networks,
+ 'command': self.command,
+ 'args': self.args,
+ 'tty': self.tty,
+ 'dns': self.dns,
+ 'dns_search': self.dns_search,
+ 'dns_options': self.dns_options,
+ 'healthcheck': self.healthcheck,
+ 'healthcheck_disabled': self.healthcheck_disabled,
+ 'hostname': self.hostname,
+ 'hosts': self.hosts,
+ 'env': self.env,
+ 'force_update': self.force_update,
+ 'groups': self.groups,
+ 'log_driver': self.log_driver,
+ 'log_driver_options': self.log_driver_options,
+ 'publish': self.publish,
+ 'constraints': self.constraints,
+ 'placement_preferences': self.placement_preferences,
+ 'labels': self.labels,
+ 'container_labels': self.container_labels,
+ 'mode': self.mode,
+ 'replicas': self.replicas,
+ 'endpoint_mode': self.endpoint_mode,
+ 'restart_policy': self.restart_policy,
+ 'secrets': self.secrets,
+ 'stop_grace_period': self.stop_grace_period,
+ 'stop_signal': self.stop_signal,
+ 'limit_cpu': self.limit_cpu,
+ 'limit_memory': self.limit_memory,
+ 'read_only': self.read_only,
+ 'reserve_cpu': self.reserve_cpu,
+ 'reserve_memory': self.reserve_memory,
+ 'restart_policy_delay': self.restart_policy_delay,
+ 'restart_policy_attempts': self.restart_policy_attempts,
+ 'restart_policy_window': self.restart_policy_window,
+ 'rollback_config': self.rollback_config,
+ 'update_delay': self.update_delay,
+ 'update_parallelism': self.update_parallelism,
+ 'update_failure_action': self.update_failure_action,
+ 'update_monitor': self.update_monitor,
+ 'update_max_failure_ratio': self.update_max_failure_ratio,
+ 'update_order': self.update_order,
+ 'user': self.user,
+ 'working_dir': self.working_dir,
+ 'init': self.init,
+ }
+
+ @property
+ def can_update_networks(self):
+ # Before Docker API 1.29 adding/removing networks was not supported
+ return (
+ self.docker_api_version >= LooseVersion('1.29') and
+ self.docker_py_version >= LooseVersion('2.7')
+ )
+
+ @property
+ def can_use_task_template_networks(self):
+ # In Docker API 1.25 attaching networks to TaskTemplate is preferred over Spec
+ return (
+ self.docker_api_version >= LooseVersion('1.25') and
+ self.docker_py_version >= LooseVersion('2.7')
+ )
+
+ @staticmethod
+ def get_restart_config_from_ansible_params(params):
+ restart_config = params['restart_config'] or {}
+ condition = get_value(
+ 'condition',
+ restart_config,
+ default=params['restart_policy']
+ )
+ delay = get_value(
+ 'delay',
+ restart_config,
+ default=params['restart_policy_delay']
+ )
+ delay = get_nanoseconds_from_raw_option(
+ 'restart_policy_delay',
+ delay
+ )
+ max_attempts = get_value(
+ 'max_attempts',
+ restart_config,
+ default=params['restart_policy_attempts']
+ )
+ window = get_value(
+ 'window',
+ restart_config,
+ default=params['restart_policy_window']
+ )
+ window = get_nanoseconds_from_raw_option(
+ 'restart_policy_window',
+ window
+ )
+ return {
+ 'restart_policy': condition,
+ 'restart_policy_delay': delay,
+ 'restart_policy_attempts': max_attempts,
+ 'restart_policy_window': window
+ }
+
+ @staticmethod
+ def get_update_config_from_ansible_params(params):
+ update_config = params['update_config'] or {}
+ parallelism = get_value(
+ 'parallelism',
+ update_config,
+ default=params['update_parallelism']
+ )
+ delay = get_value(
+ 'delay',
+ update_config,
+ default=params['update_delay']
+ )
+ delay = get_nanoseconds_from_raw_option(
+ 'update_delay',
+ delay
+ )
+ failure_action = get_value(
+ 'failure_action',
+ update_config,
+ default=params['update_failure_action']
+ )
+ monitor = get_value(
+ 'monitor',
+ update_config,
+ default=params['update_monitor']
+ )
+ monitor = get_nanoseconds_from_raw_option(
+ 'update_monitor',
+ monitor
+ )
+ max_failure_ratio = get_value(
+ 'max_failure_ratio',
+ update_config,
+ default=params['update_max_failure_ratio']
+ )
+ order = get_value(
+ 'order',
+ update_config,
+ default=params['update_order']
+ )
+ return {
+ 'update_parallelism': parallelism,
+ 'update_delay': delay,
+ 'update_failure_action': failure_action,
+ 'update_monitor': monitor,
+ 'update_max_failure_ratio': max_failure_ratio,
+ 'update_order': order
+ }
+
+ @staticmethod
+ def get_rollback_config_from_ansible_params(params):
+ if params['rollback_config'] is None:
+ return None
+ rollback_config = params['rollback_config'] or {}
+ delay = get_nanoseconds_from_raw_option(
+ 'rollback_config.delay',
+ rollback_config.get('delay')
+ )
+ monitor = get_nanoseconds_from_raw_option(
+ 'rollback_config.monitor',
+ rollback_config.get('monitor')
+ )
+ return {
+ 'parallelism': rollback_config.get('parallelism'),
+ 'delay': delay,
+ 'failure_action': rollback_config.get('failure_action'),
+ 'monitor': monitor,
+ 'max_failure_ratio': rollback_config.get('max_failure_ratio'),
+ 'order': rollback_config.get('order'),
+
+ }
+
+ @staticmethod
+ def get_logging_from_ansible_params(params):
+ logging_config = params['logging'] or {}
+ driver = get_value(
+ 'driver',
+ logging_config,
+ default=params['log_driver']
+ )
+ options = get_value(
+ 'options',
+ logging_config,
+ default=params['log_driver_options']
+ )
+ return {
+ 'log_driver': driver,
+ 'log_driver_options': options,
+ }
+
+ @staticmethod
+ def get_limits_from_ansible_params(params):
+ limits = params['limits'] or {}
+ cpus = get_value(
+ 'cpus',
+ limits,
+ default=params['limit_cpu']
+ )
+ memory = get_value(
+ 'memory',
+ limits,
+ default=params['limit_memory']
+ )
+ if memory is not None:
+ try:
+ memory = human_to_bytes(memory)
+ except ValueError as exc:
+ raise Exception('Failed to convert limit_memory to bytes: %s' % exc)
+ return {
+ 'limit_cpu': cpus,
+ 'limit_memory': memory,
+ }
+
+ @staticmethod
+ def get_reservations_from_ansible_params(params):
+ reservations = params['reservations'] or {}
+ cpus = get_value(
+ 'cpus',
+ reservations,
+ default=params['reserve_cpu']
+ )
+ memory = get_value(
+ 'memory',
+ reservations,
+ default=params['reserve_memory']
+ )
+
+ if memory is not None:
+ try:
+ memory = human_to_bytes(memory)
+ except ValueError as exc:
+ raise Exception('Failed to convert reserve_memory to bytes: %s' % exc)
+ return {
+ 'reserve_cpu': cpus,
+ 'reserve_memory': memory,
+ }
+
+ @staticmethod
+ def get_placement_from_ansible_params(params):
+ placement = params['placement'] or {}
+ constraints = get_value(
+ 'constraints',
+ placement,
+ default=params['constraints']
+ )
+
+ preferences = placement.get('preferences')
+ return {
+ 'constraints': constraints,
+ 'placement_preferences': preferences,
+ }
+
+ @classmethod
+ def from_ansible_params(
+ cls,
+ ap,
+ old_service,
+ image_digest,
+ secret_ids,
+ config_ids,
+ network_ids,
+ docker_api_version,
+ docker_py_version,
+ ):
+ s = DockerService(docker_api_version, docker_py_version)
+ s.image = image_digest
+ s.args = ap['args']
+ s.endpoint_mode = ap['endpoint_mode']
+ s.dns = ap['dns']
+ s.dns_search = ap['dns_search']
+ s.dns_options = ap['dns_options']
+ s.healthcheck, s.healthcheck_disabled = parse_healthcheck(ap['healthcheck'])
+ s.hostname = ap['hostname']
+ s.hosts = ap['hosts']
+ s.tty = ap['tty']
+ s.labels = ap['labels']
+ s.container_labels = ap['container_labels']
+ s.mode = ap['mode']
+ s.stop_signal = ap['stop_signal']
+ s.user = ap['user']
+ s.working_dir = ap['working_dir']
+ s.read_only = ap['read_only']
+ s.init = ap['init']
+
+ s.networks = get_docker_networks(ap['networks'], network_ids)
+
+ s.command = ap['command']
+ if isinstance(s.command, string_types):
+ s.command = shlex.split(s.command)
+ elif isinstance(s.command, list):
+ invalid_items = [
+ (index, item)
+ for index, item in enumerate(s.command)
+ if not isinstance(item, string_types)
+ ]
+ if invalid_items:
+ errors = ', '.join(
+ [
+ '%s (%s) at index %s' % (item, type(item), index)
+ for index, item in invalid_items
+ ]
+ )
+ raise Exception(
+ 'All items in a command list need to be strings. '
+ 'Check quoting. Invalid items: %s.'
+ % errors
+ )
+ s.command = ap['command']
+ elif s.command is not None:
+ raise ValueError(
+ 'Invalid type for command %s (%s). '
+ 'Only string or list allowed. Check quoting.'
+ % (s.command, type(s.command))
+ )
+
+ s.env = get_docker_environment(ap['env'], ap['env_files'])
+ s.rollback_config = cls.get_rollback_config_from_ansible_params(ap)
+
+ update_config = cls.get_update_config_from_ansible_params(ap)
+ for key, value in update_config.items():
+ setattr(s, key, value)
+
+ restart_config = cls.get_restart_config_from_ansible_params(ap)
+ for key, value in restart_config.items():
+ setattr(s, key, value)
+
+ logging_config = cls.get_logging_from_ansible_params(ap)
+ for key, value in logging_config.items():
+ setattr(s, key, value)
+
+ limits = cls.get_limits_from_ansible_params(ap)
+ for key, value in limits.items():
+ setattr(s, key, value)
+
+ reservations = cls.get_reservations_from_ansible_params(ap)
+ for key, value in reservations.items():
+ setattr(s, key, value)
+
+ placement = cls.get_placement_from_ansible_params(ap)
+ for key, value in placement.items():
+ setattr(s, key, value)
+
+ if ap['stop_grace_period'] is not None:
+ s.stop_grace_period = convert_duration_to_nanosecond(ap['stop_grace_period'])
+
+ if ap['force_update']:
+ s.force_update = int(str(time.time()).replace('.', ''))
+
+ if ap['groups'] is not None:
+ # In case integers are passed as groups, we need to convert them to
+ # strings as docker internally treats them as strings.
+ s.groups = [str(g) for g in ap['groups']]
+
+ if ap['replicas'] == -1:
+ if old_service:
+ s.replicas = old_service.replicas
+ else:
+ s.replicas = 1
+ else:
+ s.replicas = ap['replicas']
+
+ if ap['publish'] is not None:
+ s.publish = []
+ for param_p in ap['publish']:
+ service_p = {}
+ service_p['protocol'] = param_p['protocol']
+ service_p['mode'] = param_p['mode']
+ service_p['published_port'] = param_p['published_port']
+ service_p['target_port'] = param_p['target_port']
+ s.publish.append(service_p)
+
+ if ap['mounts'] is not None:
+ s.mounts = []
+ for param_m in ap['mounts']:
+ service_m = {}
+ service_m['readonly'] = param_m['readonly']
+ service_m['type'] = param_m['type']
+ if param_m['source'] is None and param_m['type'] != 'tmpfs':
+ raise ValueError('Source must be specified for mounts which are not of type tmpfs')
+ service_m['source'] = param_m['source'] or ''
+ service_m['target'] = param_m['target']
+ service_m['labels'] = param_m['labels']
+ service_m['no_copy'] = param_m['no_copy']
+ service_m['propagation'] = param_m['propagation']
+ service_m['driver_config'] = param_m['driver_config']
+ service_m['tmpfs_mode'] = param_m['tmpfs_mode']
+ tmpfs_size = param_m['tmpfs_size']
+ if tmpfs_size is not None:
+ try:
+ tmpfs_size = human_to_bytes(tmpfs_size)
+ except ValueError as exc:
+ raise ValueError(
+ 'Failed to convert tmpfs_size to bytes: %s' % exc
+ )
+
+ service_m['tmpfs_size'] = tmpfs_size
+ s.mounts.append(service_m)
+
+ if ap['configs'] is not None:
+ s.configs = []
+ for param_m in ap['configs']:
+ service_c = {}
+ config_name = param_m['config_name']
+ service_c['config_id'] = param_m['config_id'] or config_ids[config_name]
+ service_c['config_name'] = config_name
+ service_c['filename'] = param_m['filename'] or config_name
+ service_c['uid'] = param_m['uid']
+ service_c['gid'] = param_m['gid']
+ service_c['mode'] = param_m['mode']
+ s.configs.append(service_c)
+
+ if ap['secrets'] is not None:
+ s.secrets = []
+ for param_m in ap['secrets']:
+ service_s = {}
+ secret_name = param_m['secret_name']
+ service_s['secret_id'] = param_m['secret_id'] or secret_ids[secret_name]
+ service_s['secret_name'] = secret_name
+ service_s['filename'] = param_m['filename'] or secret_name
+ service_s['uid'] = param_m['uid']
+ service_s['gid'] = param_m['gid']
+ service_s['mode'] = param_m['mode']
+ s.secrets.append(service_s)
+
+ return s
+
+ def compare(self, os):
+ differences = DifferenceTracker()
+ needs_rebuild = False
+ force_update = False
+ if self.endpoint_mode is not None and self.endpoint_mode != os.endpoint_mode:
+ differences.add('endpoint_mode', parameter=self.endpoint_mode, active=os.endpoint_mode)
+ if has_list_changed(self.env, os.env):
+ differences.add('env', parameter=self.env, active=os.env)
+ if self.log_driver is not None and self.log_driver != os.log_driver:
+ differences.add('log_driver', parameter=self.log_driver, active=os.log_driver)
+ if self.log_driver_options is not None and self.log_driver_options != (os.log_driver_options or {}):
+ differences.add('log_opt', parameter=self.log_driver_options, active=os.log_driver_options)
+ if self.mode != os.mode:
+ needs_rebuild = True
+ differences.add('mode', parameter=self.mode, active=os.mode)
+ if has_list_changed(self.mounts, os.mounts, sort_key='target'):
+ differences.add('mounts', parameter=self.mounts, active=os.mounts)
+ if has_list_changed(self.configs, os.configs, sort_key='config_name'):
+ differences.add('configs', parameter=self.configs, active=os.configs)
+ if has_list_changed(self.secrets, os.secrets, sort_key='secret_name'):
+ differences.add('secrets', parameter=self.secrets, active=os.secrets)
+ if have_networks_changed(self.networks, os.networks):
+ differences.add('networks', parameter=self.networks, active=os.networks)
+ needs_rebuild = not self.can_update_networks
+ if self.replicas != os.replicas:
+ differences.add('replicas', parameter=self.replicas, active=os.replicas)
+ if has_list_changed(self.command, os.command, sort_lists=False):
+ differences.add('command', parameter=self.command, active=os.command)
+ if has_list_changed(self.args, os.args, sort_lists=False):
+ differences.add('args', parameter=self.args, active=os.args)
+ if has_list_changed(self.constraints, os.constraints):
+ differences.add('constraints', parameter=self.constraints, active=os.constraints)
+ if has_list_changed(self.placement_preferences, os.placement_preferences, sort_lists=False):
+ differences.add('placement_preferences', parameter=self.placement_preferences, active=os.placement_preferences)
+ if has_list_changed(self.groups, os.groups):
+ differences.add('groups', parameter=self.groups, active=os.groups)
+ if self.labels is not None and self.labels != (os.labels or {}):
+ differences.add('labels', parameter=self.labels, active=os.labels)
+ if self.limit_cpu is not None and self.limit_cpu != os.limit_cpu:
+ differences.add('limit_cpu', parameter=self.limit_cpu, active=os.limit_cpu)
+ if self.limit_memory is not None and self.limit_memory != os.limit_memory:
+ differences.add('limit_memory', parameter=self.limit_memory, active=os.limit_memory)
+ if self.reserve_cpu is not None and self.reserve_cpu != os.reserve_cpu:
+ differences.add('reserve_cpu', parameter=self.reserve_cpu, active=os.reserve_cpu)
+ if self.reserve_memory is not None and self.reserve_memory != os.reserve_memory:
+ differences.add('reserve_memory', parameter=self.reserve_memory, active=os.reserve_memory)
+ if self.container_labels is not None and self.container_labels != (os.container_labels or {}):
+ differences.add('container_labels', parameter=self.container_labels, active=os.container_labels)
+ if self.stop_signal is not None and self.stop_signal != os.stop_signal:
+ differences.add('stop_signal', parameter=self.stop_signal, active=os.stop_signal)
+ if self.stop_grace_period is not None and self.stop_grace_period != os.stop_grace_period:
+ differences.add('stop_grace_period', parameter=self.stop_grace_period, active=os.stop_grace_period)
+ if self.has_publish_changed(os.publish):
+ differences.add('publish', parameter=self.publish, active=os.publish)
+ if self.read_only is not None and self.read_only != os.read_only:
+ differences.add('read_only', parameter=self.read_only, active=os.read_only)
+ if self.restart_policy is not None and self.restart_policy != os.restart_policy:
+ differences.add('restart_policy', parameter=self.restart_policy, active=os.restart_policy)
+ if self.restart_policy_attempts is not None and self.restart_policy_attempts != os.restart_policy_attempts:
+ differences.add('restart_policy_attempts', parameter=self.restart_policy_attempts, active=os.restart_policy_attempts)
+ if self.restart_policy_delay is not None and self.restart_policy_delay != os.restart_policy_delay:
+ differences.add('restart_policy_delay', parameter=self.restart_policy_delay, active=os.restart_policy_delay)
+ if self.restart_policy_window is not None and self.restart_policy_window != os.restart_policy_window:
+ differences.add('restart_policy_window', parameter=self.restart_policy_window, active=os.restart_policy_window)
+ if has_dict_changed(self.rollback_config, os.rollback_config):
+ differences.add('rollback_config', parameter=self.rollback_config, active=os.rollback_config)
+ if self.update_delay is not None and self.update_delay != os.update_delay:
+ differences.add('update_delay', parameter=self.update_delay, active=os.update_delay)
+ if self.update_parallelism is not None and self.update_parallelism != os.update_parallelism:
+ differences.add('update_parallelism', parameter=self.update_parallelism, active=os.update_parallelism)
+ if self.update_failure_action is not None and self.update_failure_action != os.update_failure_action:
+ differences.add('update_failure_action', parameter=self.update_failure_action, active=os.update_failure_action)
+ if self.update_monitor is not None and self.update_monitor != os.update_monitor:
+ differences.add('update_monitor', parameter=self.update_monitor, active=os.update_monitor)
+ if self.update_max_failure_ratio is not None and self.update_max_failure_ratio != os.update_max_failure_ratio:
+ differences.add('update_max_failure_ratio', parameter=self.update_max_failure_ratio, active=os.update_max_failure_ratio)
+ if self.update_order is not None and self.update_order != os.update_order:
+ differences.add('update_order', parameter=self.update_order, active=os.update_order)
+ has_image_changed, change = self.has_image_changed(os.image)
+ if has_image_changed:
+ differences.add('image', parameter=self.image, active=change)
+ if self.user and self.user != os.user:
+ differences.add('user', parameter=self.user, active=os.user)
+ if has_list_changed(self.dns, os.dns, sort_lists=False):
+ differences.add('dns', parameter=self.dns, active=os.dns)
+ if has_list_changed(self.dns_search, os.dns_search, sort_lists=False):
+ differences.add('dns_search', parameter=self.dns_search, active=os.dns_search)
+ if has_list_changed(self.dns_options, os.dns_options):
+ differences.add('dns_options', parameter=self.dns_options, active=os.dns_options)
+ if self.has_healthcheck_changed(os):
+ differences.add('healthcheck', parameter=self.healthcheck, active=os.healthcheck)
+ if self.hostname is not None and self.hostname != os.hostname:
+ differences.add('hostname', parameter=self.hostname, active=os.hostname)
+ if self.hosts is not None and self.hosts != (os.hosts or {}):
+ differences.add('hosts', parameter=self.hosts, active=os.hosts)
+ if self.tty is not None and self.tty != os.tty:
+ differences.add('tty', parameter=self.tty, active=os.tty)
+ if self.working_dir is not None and self.working_dir != os.working_dir:
+ differences.add('working_dir', parameter=self.working_dir, active=os.working_dir)
+ if self.force_update:
+ force_update = True
+ if self.init is not None and self.init != os.init:
+ differences.add('init', parameter=self.init, active=os.init)
+ return not differences.empty or force_update, differences, needs_rebuild, force_update
+
+ def has_healthcheck_changed(self, old_publish):
+ if self.healthcheck_disabled is False and self.healthcheck is None:
+ return False
+ if self.healthcheck_disabled:
+ if old_publish.healthcheck is None:
+ return False
+ if old_publish.healthcheck.get('test') == ['NONE']:
+ return False
+ return self.healthcheck != old_publish.healthcheck
+
+ def has_publish_changed(self, old_publish):
+ if self.publish is None:
+ return False
+ old_publish = old_publish or []
+ if len(self.publish) != len(old_publish):
+ return True
+ publish_sorter = operator.itemgetter('published_port', 'target_port', 'protocol')
+ publish = sorted(self.publish, key=publish_sorter)
+ old_publish = sorted(old_publish, key=publish_sorter)
+ for publish_item, old_publish_item in zip(publish, old_publish):
+ ignored_keys = set()
+ if not publish_item.get('mode'):
+ ignored_keys.add('mode')
+ # Create copies of publish_item dicts where keys specified in ignored_keys are left out
+ filtered_old_publish_item = dict(
+ (k, v) for k, v in old_publish_item.items() if k not in ignored_keys
+ )
+ filtered_publish_item = dict(
+ (k, v) for k, v in publish_item.items() if k not in ignored_keys
+ )
+ if filtered_publish_item != filtered_old_publish_item:
+ return True
+ return False
+
+ def has_image_changed(self, old_image):
+ if '@' not in self.image:
+ old_image = old_image.split('@')[0]
+ return self.image != old_image, old_image
+
+ def build_container_spec(self):
+ mounts = None
+ if self.mounts is not None:
+ mounts = []
+ for mount_config in self.mounts:
+ mount_options = {
+ 'target': 'target',
+ 'source': 'source',
+ 'type': 'type',
+ 'readonly': 'read_only',
+ 'propagation': 'propagation',
+ 'labels': 'labels',
+ 'no_copy': 'no_copy',
+ 'driver_config': 'driver_config',
+ 'tmpfs_size': 'tmpfs_size',
+ 'tmpfs_mode': 'tmpfs_mode'
+ }
+ mount_args = {}
+ for option, mount_arg in mount_options.items():
+ value = mount_config.get(option)
+ if value is not None:
+ mount_args[mount_arg] = value
+
+ mounts.append(types.Mount(**mount_args))
+
+ configs = None
+ if self.configs is not None:
+ configs = []
+ for config_config in self.configs:
+ config_args = {
+ 'config_id': config_config['config_id'],
+ 'config_name': config_config['config_name']
+ }
+ filename = config_config.get('filename')
+ if filename:
+ config_args['filename'] = filename
+ uid = config_config.get('uid')
+ if uid:
+ config_args['uid'] = uid
+ gid = config_config.get('gid')
+ if gid:
+ config_args['gid'] = gid
+ mode = config_config.get('mode')
+ if mode:
+ config_args['mode'] = mode
+
+ configs.append(types.ConfigReference(**config_args))
+
+ secrets = None
+ if self.secrets is not None:
+ secrets = []
+ for secret_config in self.secrets:
+ secret_args = {
+ 'secret_id': secret_config['secret_id'],
+ 'secret_name': secret_config['secret_name']
+ }
+ filename = secret_config.get('filename')
+ if filename:
+ secret_args['filename'] = filename
+ uid = secret_config.get('uid')
+ if uid:
+ secret_args['uid'] = uid
+ gid = secret_config.get('gid')
+ if gid:
+ secret_args['gid'] = gid
+ mode = secret_config.get('mode')
+ if mode:
+ secret_args['mode'] = mode
+
+ secrets.append(types.SecretReference(**secret_args))
+
+ dns_config_args = {}
+ if self.dns is not None:
+ dns_config_args['nameservers'] = self.dns
+ if self.dns_search is not None:
+ dns_config_args['search'] = self.dns_search
+ if self.dns_options is not None:
+ dns_config_args['options'] = self.dns_options
+ dns_config = types.DNSConfig(**dns_config_args) if dns_config_args else None
+
+ container_spec_args = {}
+ if self.command is not None:
+ container_spec_args['command'] = self.command
+ if self.args is not None:
+ container_spec_args['args'] = self.args
+ if self.env is not None:
+ container_spec_args['env'] = self.env
+ if self.user is not None:
+ container_spec_args['user'] = self.user
+ if self.container_labels is not None:
+ container_spec_args['labels'] = self.container_labels
+ if self.healthcheck is not None:
+ container_spec_args['healthcheck'] = types.Healthcheck(**self.healthcheck)
+ elif self.healthcheck_disabled:
+ container_spec_args['healthcheck'] = types.Healthcheck(test=['NONE'])
+ if self.hostname is not None:
+ container_spec_args['hostname'] = self.hostname
+ if self.hosts is not None:
+ container_spec_args['hosts'] = self.hosts
+ if self.read_only is not None:
+ container_spec_args['read_only'] = self.read_only
+ if self.stop_grace_period is not None:
+ container_spec_args['stop_grace_period'] = self.stop_grace_period
+ if self.stop_signal is not None:
+ container_spec_args['stop_signal'] = self.stop_signal
+ if self.tty is not None:
+ container_spec_args['tty'] = self.tty
+ if self.groups is not None:
+ container_spec_args['groups'] = self.groups
+ if self.working_dir is not None:
+ container_spec_args['workdir'] = self.working_dir
+ if secrets is not None:
+ container_spec_args['secrets'] = secrets
+ if mounts is not None:
+ container_spec_args['mounts'] = mounts
+ if dns_config is not None:
+ container_spec_args['dns_config'] = dns_config
+ if configs is not None:
+ container_spec_args['configs'] = configs
+ if self.init is not None:
+ container_spec_args['init'] = self.init
+
+ return types.ContainerSpec(self.image, **container_spec_args)
+
+ def build_placement(self):
+ placement_args = {}
+ if self.constraints is not None:
+ placement_args['constraints'] = self.constraints
+ if self.placement_preferences is not None:
+ placement_args['preferences'] = [
+ {key.title(): {'SpreadDescriptor': value}}
+ for preference in self.placement_preferences
+ for key, value in preference.items()
+ ]
+ return types.Placement(**placement_args) if placement_args else None
+
+ def build_update_config(self):
+ update_config_args = {}
+ if self.update_parallelism is not None:
+ update_config_args['parallelism'] = self.update_parallelism
+ if self.update_delay is not None:
+ update_config_args['delay'] = self.update_delay
+ if self.update_failure_action is not None:
+ update_config_args['failure_action'] = self.update_failure_action
+ if self.update_monitor is not None:
+ update_config_args['monitor'] = self.update_monitor
+ if self.update_max_failure_ratio is not None:
+ update_config_args['max_failure_ratio'] = self.update_max_failure_ratio
+ if self.update_order is not None:
+ update_config_args['order'] = self.update_order
+ return types.UpdateConfig(**update_config_args) if update_config_args else None
+
+ def build_log_driver(self):
+ log_driver_args = {}
+ if self.log_driver is not None:
+ log_driver_args['name'] = self.log_driver
+ if self.log_driver_options is not None:
+ log_driver_args['options'] = self.log_driver_options
+ return types.DriverConfig(**log_driver_args) if log_driver_args else None
+
+ def build_restart_policy(self):
+ restart_policy_args = {}
+ if self.restart_policy is not None:
+ restart_policy_args['condition'] = self.restart_policy
+ if self.restart_policy_delay is not None:
+ restart_policy_args['delay'] = self.restart_policy_delay
+ if self.restart_policy_attempts is not None:
+ restart_policy_args['max_attempts'] = self.restart_policy_attempts
+ if self.restart_policy_window is not None:
+ restart_policy_args['window'] = self.restart_policy_window
+ return types.RestartPolicy(**restart_policy_args) if restart_policy_args else None
+
+ def build_rollback_config(self):
+ if self.rollback_config is None:
+ return None
+ rollback_config_options = [
+ 'parallelism',
+ 'delay',
+ 'failure_action',
+ 'monitor',
+ 'max_failure_ratio',
+ 'order',
+ ]
+ rollback_config_args = {}
+ for option in rollback_config_options:
+ value = self.rollback_config.get(option)
+ if value is not None:
+ rollback_config_args[option] = value
+ return types.RollbackConfig(**rollback_config_args) if rollback_config_args else None
+
+ def build_resources(self):
+ resources_args = {}
+ if self.limit_cpu is not None:
+ resources_args['cpu_limit'] = int(self.limit_cpu * 1000000000.0)
+ if self.limit_memory is not None:
+ resources_args['mem_limit'] = self.limit_memory
+ if self.reserve_cpu is not None:
+ resources_args['cpu_reservation'] = int(self.reserve_cpu * 1000000000.0)
+ if self.reserve_memory is not None:
+ resources_args['mem_reservation'] = self.reserve_memory
+ return types.Resources(**resources_args) if resources_args else None
+
+ def build_task_template(self, container_spec, placement=None):
+ log_driver = self.build_log_driver()
+ restart_policy = self.build_restart_policy()
+ resources = self.build_resources()
+
+ task_template_args = {}
+ if placement is not None:
+ task_template_args['placement'] = placement
+ if log_driver is not None:
+ task_template_args['log_driver'] = log_driver
+ if restart_policy is not None:
+ task_template_args['restart_policy'] = restart_policy
+ if resources is not None:
+ task_template_args['resources'] = resources
+ if self.force_update:
+ task_template_args['force_update'] = self.force_update
+ if self.can_use_task_template_networks:
+ networks = self.build_networks()
+ if networks:
+ task_template_args['networks'] = networks
+ return types.TaskTemplate(container_spec=container_spec, **task_template_args)
+
+ def build_service_mode(self):
+ if self.mode == 'global':
+ self.replicas = None
+ return types.ServiceMode(self.mode, replicas=self.replicas)
+
+ def build_networks(self):
+ networks = None
+ if self.networks is not None:
+ networks = []
+ for network in self.networks:
+ docker_network = {'Target': network['id']}
+ if 'aliases' in network:
+ docker_network['Aliases'] = network['aliases']
+ if 'options' in network:
+ docker_network['DriverOpts'] = network['options']
+ networks.append(docker_network)
+ return networks
+
+ def build_endpoint_spec(self):
+ endpoint_spec_args = {}
+ if self.publish is not None:
+ ports = []
+ for port in self.publish:
+ port_spec = {
+ 'Protocol': port['protocol'],
+ 'PublishedPort': port['published_port'],
+ 'TargetPort': port['target_port']
+ }
+ if port.get('mode'):
+ port_spec['PublishMode'] = port['mode']
+ ports.append(port_spec)
+ endpoint_spec_args['ports'] = ports
+ if self.endpoint_mode is not None:
+ endpoint_spec_args['mode'] = self.endpoint_mode
+ return types.EndpointSpec(**endpoint_spec_args) if endpoint_spec_args else None
+
+ def build_docker_service(self):
+ container_spec = self.build_container_spec()
+ placement = self.build_placement()
+ task_template = self.build_task_template(container_spec, placement)
+
+ update_config = self.build_update_config()
+ rollback_config = self.build_rollback_config()
+ service_mode = self.build_service_mode()
+ endpoint_spec = self.build_endpoint_spec()
+
+ service = {'task_template': task_template, 'mode': service_mode}
+ if update_config:
+ service['update_config'] = update_config
+ if rollback_config:
+ service['rollback_config'] = rollback_config
+ if endpoint_spec:
+ service['endpoint_spec'] = endpoint_spec
+ if self.labels:
+ service['labels'] = self.labels
+ if not self.can_use_task_template_networks:
+ networks = self.build_networks()
+ if networks:
+ service['networks'] = networks
+ return service
+
+
+class DockerServiceManager(object):
+
+ def __init__(self, client):
+ self.client = client
+ self.retries = 2
+ self.diff_tracker = None
+
+ def get_service(self, name):
+ try:
+ raw_data = self.client.inspect_service(name)
+ except NotFound:
+ return None
+ ds = DockerService(self.client.docker_api_version, self.client.docker_py_version)
+
+ task_template_data = raw_data['Spec']['TaskTemplate']
+ ds.image = task_template_data['ContainerSpec']['Image']
+ ds.user = task_template_data['ContainerSpec'].get('User')
+ ds.env = task_template_data['ContainerSpec'].get('Env')
+ ds.command = task_template_data['ContainerSpec'].get('Command')
+ ds.args = task_template_data['ContainerSpec'].get('Args')
+ ds.groups = task_template_data['ContainerSpec'].get('Groups')
+ ds.stop_grace_period = task_template_data['ContainerSpec'].get('StopGracePeriod')
+ ds.stop_signal = task_template_data['ContainerSpec'].get('StopSignal')
+ ds.working_dir = task_template_data['ContainerSpec'].get('Dir')
+ ds.read_only = task_template_data['ContainerSpec'].get('ReadOnly')
+
+ healthcheck_data = task_template_data['ContainerSpec'].get('Healthcheck')
+ if healthcheck_data:
+ options = {
+ 'Test': 'test',
+ 'Interval': 'interval',
+ 'Timeout': 'timeout',
+ 'StartPeriod': 'start_period',
+ 'Retries': 'retries'
+ }
+ healthcheck = dict(
+ (options[key], value) for key, value in healthcheck_data.items()
+ if value is not None and key in options
+ )
+ ds.healthcheck = healthcheck
+
+ update_config_data = raw_data['Spec'].get('UpdateConfig')
+ if update_config_data:
+ ds.update_delay = update_config_data.get('Delay')
+ ds.update_parallelism = update_config_data.get('Parallelism')
+ ds.update_failure_action = update_config_data.get('FailureAction')
+ ds.update_monitor = update_config_data.get('Monitor')
+ ds.update_max_failure_ratio = update_config_data.get('MaxFailureRatio')
+ ds.update_order = update_config_data.get('Order')
+
+ rollback_config_data = raw_data['Spec'].get('RollbackConfig')
+ if rollback_config_data:
+ ds.rollback_config = {
+ 'parallelism': rollback_config_data.get('Parallelism'),
+ 'delay': rollback_config_data.get('Delay'),
+ 'failure_action': rollback_config_data.get('FailureAction'),
+ 'monitor': rollback_config_data.get('Monitor'),
+ 'max_failure_ratio': rollback_config_data.get('MaxFailureRatio'),
+ 'order': rollback_config_data.get('Order'),
+ }
+
+ dns_config = task_template_data['ContainerSpec'].get('DNSConfig')
+ if dns_config:
+ ds.dns = dns_config.get('Nameservers')
+ ds.dns_search = dns_config.get('Search')
+ ds.dns_options = dns_config.get('Options')
+
+ ds.hostname = task_template_data['ContainerSpec'].get('Hostname')
+
+ hosts = task_template_data['ContainerSpec'].get('Hosts')
+ if hosts:
+ hosts = [
+ list(reversed(host.split(":", 1)))
+ if ":" in host
+ else host.split(" ", 1)
+ for host in hosts
+ ]
+ ds.hosts = dict((hostname, ip) for ip, hostname in hosts)
+ ds.tty = task_template_data['ContainerSpec'].get('TTY')
+
+ placement = task_template_data.get('Placement')
+ if placement:
+ ds.constraints = placement.get('Constraints')
+ placement_preferences = []
+ for preference in placement.get('Preferences', []):
+ placement_preferences.append(
+ dict(
+ (key.lower(), value['SpreadDescriptor'])
+ for key, value in preference.items()
+ )
+ )
+ ds.placement_preferences = placement_preferences or None
+
+ restart_policy_data = task_template_data.get('RestartPolicy')
+ if restart_policy_data:
+ ds.restart_policy = restart_policy_data.get('Condition')
+ ds.restart_policy_delay = restart_policy_data.get('Delay')
+ ds.restart_policy_attempts = restart_policy_data.get('MaxAttempts')
+ ds.restart_policy_window = restart_policy_data.get('Window')
+
+ raw_data_endpoint_spec = raw_data['Spec'].get('EndpointSpec')
+ if raw_data_endpoint_spec:
+ ds.endpoint_mode = raw_data_endpoint_spec.get('Mode')
+ raw_data_ports = raw_data_endpoint_spec.get('Ports')
+ if raw_data_ports:
+ ds.publish = []
+ for port in raw_data_ports:
+ ds.publish.append({
+ 'protocol': port['Protocol'],
+ 'mode': port.get('PublishMode', None),
+ 'published_port': int(port['PublishedPort']),
+ 'target_port': int(port['TargetPort'])
+ })
+
+ raw_data_limits = task_template_data.get('Resources', {}).get('Limits')
+ if raw_data_limits:
+ raw_cpu_limits = raw_data_limits.get('NanoCPUs')
+ if raw_cpu_limits:
+ ds.limit_cpu = float(raw_cpu_limits) / 1000000000
+
+ raw_memory_limits = raw_data_limits.get('MemoryBytes')
+ if raw_memory_limits:
+ ds.limit_memory = int(raw_memory_limits)
+
+ raw_data_reservations = task_template_data.get('Resources', {}).get('Reservations')
+ if raw_data_reservations:
+ raw_cpu_reservations = raw_data_reservations.get('NanoCPUs')
+ if raw_cpu_reservations:
+ ds.reserve_cpu = float(raw_cpu_reservations) / 1000000000
+
+ raw_memory_reservations = raw_data_reservations.get('MemoryBytes')
+ if raw_memory_reservations:
+ ds.reserve_memory = int(raw_memory_reservations)
+
+ ds.labels = raw_data['Spec'].get('Labels')
+ ds.log_driver = task_template_data.get('LogDriver', {}).get('Name')
+ ds.log_driver_options = task_template_data.get('LogDriver', {}).get('Options')
+ ds.container_labels = task_template_data['ContainerSpec'].get('Labels')
+
+ mode = raw_data['Spec']['Mode']
+ if 'Replicated' in mode.keys():
+ ds.mode = to_text('replicated', encoding='utf-8')
+ ds.replicas = mode['Replicated']['Replicas']
+ elif 'Global' in mode.keys():
+ ds.mode = 'global'
+ else:
+ raise Exception('Unknown service mode: %s' % mode)
+
+ raw_data_mounts = task_template_data['ContainerSpec'].get('Mounts')
+ if raw_data_mounts:
+ ds.mounts = []
+ for mount_data in raw_data_mounts:
+ bind_options = mount_data.get('BindOptions', {})
+ volume_options = mount_data.get('VolumeOptions', {})
+ tmpfs_options = mount_data.get('TmpfsOptions', {})
+ driver_config = volume_options.get('DriverConfig', {})
+ driver_config = dict(
+ (key.lower(), value) for key, value in driver_config.items()
+ ) or None
+ ds.mounts.append({
+ 'source': mount_data.get('Source', ''),
+ 'type': mount_data['Type'],
+ 'target': mount_data['Target'],
+ 'readonly': mount_data.get('ReadOnly'),
+ 'propagation': bind_options.get('Propagation'),
+ 'no_copy': volume_options.get('NoCopy'),
+ 'labels': volume_options.get('Labels'),
+ 'driver_config': driver_config,
+ 'tmpfs_mode': tmpfs_options.get('Mode'),
+ 'tmpfs_size': tmpfs_options.get('SizeBytes'),
+ })
+
+ raw_data_configs = task_template_data['ContainerSpec'].get('Configs')
+ if raw_data_configs:
+ ds.configs = []
+ for config_data in raw_data_configs:
+ ds.configs.append({
+ 'config_id': config_data['ConfigID'],
+ 'config_name': config_data['ConfigName'],
+ 'filename': config_data['File'].get('Name'),
+ 'uid': config_data['File'].get('UID'),
+ 'gid': config_data['File'].get('GID'),
+ 'mode': config_data['File'].get('Mode')
+ })
+
+ raw_data_secrets = task_template_data['ContainerSpec'].get('Secrets')
+ if raw_data_secrets:
+ ds.secrets = []
+ for secret_data in raw_data_secrets:
+ ds.secrets.append({
+ 'secret_id': secret_data['SecretID'],
+ 'secret_name': secret_data['SecretName'],
+ 'filename': secret_data['File'].get('Name'),
+ 'uid': secret_data['File'].get('UID'),
+ 'gid': secret_data['File'].get('GID'),
+ 'mode': secret_data['File'].get('Mode')
+ })
+
+ raw_networks_data = task_template_data.get('Networks', raw_data['Spec'].get('Networks'))
+ if raw_networks_data:
+ ds.networks = []
+ for network_data in raw_networks_data:
+ network = {'id': network_data['Target']}
+ if 'Aliases' in network_data:
+ network['aliases'] = network_data['Aliases']
+ if 'DriverOpts' in network_data:
+ network['options'] = network_data['DriverOpts']
+ ds.networks.append(network)
+ ds.service_version = raw_data['Version']['Index']
+ ds.service_id = raw_data['ID']
+
+ ds.init = task_template_data['ContainerSpec'].get('Init', False)
+ return ds
+
+ def update_service(self, name, old_service, new_service):
+ service_data = new_service.build_docker_service()
+ result = self.client.update_service(
+ old_service.service_id,
+ old_service.service_version,
+ name=name,
+ **service_data
+ )
+ # Prior to Docker SDK 4.0.0 no warnings were returned and will thus be ignored.
+ # (see https://github.com/docker/docker-py/pull/2272)
+ self.client.report_warnings(result, ['Warning'])
+
+ def create_service(self, name, service):
+ service_data = service.build_docker_service()
+ result = self.client.create_service(name=name, **service_data)
+ self.client.report_warnings(result, ['Warning'])
+
+ def remove_service(self, name):
+ self.client.remove_service(name)
+
+ def get_image_digest(self, name, resolve=False):
+ if (
+ not name
+ or not resolve
+ ):
+ return name
+ repo, tag = parse_repository_tag(name)
+ if not tag:
+ tag = 'latest'
+ name = repo + ':' + tag
+ distribution_data = self.client.inspect_distribution(name)
+ digest = distribution_data['Descriptor']['digest']
+ return '%s@%s' % (name, digest)
+
+ def get_networks_names_ids(self):
+ return dict(
+ (network['Name'], network['Id']) for network in self.client.networks()
+ )
+
+ def get_missing_secret_ids(self):
+ """
+ Resolve missing secret ids by looking them up by name
+ """
+ secret_names = [
+ secret['secret_name']
+ for secret in self.client.module.params.get('secrets') or []
+ if secret['secret_id'] is None
+ ]
+ if not secret_names:
+ return {}
+ secrets = self.client.secrets(filters={'name': secret_names})
+ secrets = dict(
+ (secret['Spec']['Name'], secret['ID'])
+ for secret in secrets
+ if secret['Spec']['Name'] in secret_names
+ )
+ for secret_name in secret_names:
+ if secret_name not in secrets:
+ self.client.fail(
+ 'Could not find a secret named "%s"' % secret_name
+ )
+ return secrets
+
+ def get_missing_config_ids(self):
+ """
+ Resolve missing config ids by looking them up by name
+ """
+ config_names = [
+ config['config_name']
+ for config in self.client.module.params.get('configs') or []
+ if config['config_id'] is None
+ ]
+ if not config_names:
+ return {}
+ configs = self.client.configs(filters={'name': config_names})
+ configs = dict(
+ (config['Spec']['Name'], config['ID'])
+ for config in configs
+ if config['Spec']['Name'] in config_names
+ )
+ for config_name in config_names:
+ if config_name not in configs:
+ self.client.fail(
+ 'Could not find a config named "%s"' % config_name
+ )
+ return configs
+
+ def run(self):
+ self.diff_tracker = DifferenceTracker()
+ module = self.client.module
+
+ image = module.params['image']
+ try:
+ image_digest = self.get_image_digest(
+ name=image,
+ resolve=module.params['resolve_image']
+ )
+ except DockerException as e:
+ self.client.fail(
+ 'Error looking for an image named %s: %s'
+ % (image, e)
+ )
+
+ try:
+ current_service = self.get_service(module.params['name'])
+ except Exception as e:
+ self.client.fail(
+ 'Error looking for service named %s: %s'
+ % (module.params['name'], e)
+ )
+ try:
+ secret_ids = self.get_missing_secret_ids()
+ config_ids = self.get_missing_config_ids()
+ network_ids = self.get_networks_names_ids()
+ new_service = DockerService.from_ansible_params(
+ module.params,
+ current_service,
+ image_digest,
+ secret_ids,
+ config_ids,
+ network_ids,
+ self.client.docker_api_version,
+ self.client.docker_py_version
+ )
+ except Exception as e:
+ return self.client.fail(
+ 'Error parsing module parameters: %s' % e
+ )
+
+ changed = False
+ msg = 'noop'
+ rebuilt = False
+ differences = DifferenceTracker()
+ facts = {}
+
+ if current_service:
+ if module.params['state'] == 'absent':
+ if not module.check_mode:
+ self.remove_service(module.params['name'])
+ msg = 'Service removed'
+ changed = True
+ else:
+ changed, differences, need_rebuild, force_update = new_service.compare(
+ current_service
+ )
+ if changed:
+ self.diff_tracker.merge(differences)
+ if need_rebuild:
+ if not module.check_mode:
+ self.remove_service(module.params['name'])
+ self.create_service(
+ module.params['name'],
+ new_service
+ )
+ msg = 'Service rebuilt'
+ rebuilt = True
+ else:
+ if not module.check_mode:
+ self.update_service(
+ module.params['name'],
+ current_service,
+ new_service
+ )
+ msg = 'Service updated'
+ rebuilt = False
+ else:
+ if force_update:
+ if not module.check_mode:
+ self.update_service(
+ module.params['name'],
+ current_service,
+ new_service
+ )
+ msg = 'Service forcefully updated'
+ rebuilt = False
+ changed = True
+ else:
+ msg = 'Service unchanged'
+ facts = new_service.get_facts()
+ else:
+ if module.params['state'] == 'absent':
+ msg = 'Service absent'
+ else:
+ if not module.check_mode:
+ self.create_service(module.params['name'], new_service)
+ msg = 'Service created'
+ changed = True
+ facts = new_service.get_facts()
+
+ return msg, changed, rebuilt, differences.get_legacy_docker_diffs(), facts
+
+ def run_safe(self):
+ while True:
+ try:
+ return self.run()
+ except APIError as e:
+ # Sometimes Version.Index will have changed between an inspect and
+ # update. If this is encountered we'll retry the update.
+ if self.retries > 0 and 'update out of sequence' in str(e.explanation):
+ self.retries -= 1
+ time.sleep(1)
+ else:
+ raise
+
+
+def _detect_publish_mode_usage(client):
+ for publish_def in client.module.params['publish'] or []:
+ if publish_def.get('mode'):
+ return True
+ return False
+
+
+def _detect_healthcheck_start_period(client):
+ if client.module.params['healthcheck']:
+ return client.module.params['healthcheck']['start_period'] is not None
+ return False
+
+
+def _detect_mount_tmpfs_usage(client):
+ for mount in client.module.params['mounts'] or []:
+ if mount.get('type') == 'tmpfs':
+ return True
+ if mount.get('tmpfs_size') is not None:
+ return True
+ if mount.get('tmpfs_mode') is not None:
+ return True
+ return False
+
+
+def _detect_update_config_failure_action_rollback(client):
+ rollback_config_failure_action = (
+ (client.module.params['update_config'] or {}).get('failure_action')
+ )
+ update_failure_action = client.module.params['update_failure_action']
+ failure_action = rollback_config_failure_action or update_failure_action
+ return failure_action == 'rollback'
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ image=dict(type='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ mounts=dict(type='list', elements='dict', options=dict(
+ source=dict(type='str'),
+ target=dict(type='str', required=True),
+ type=dict(
+ type='str',
+ default='bind',
+ choices=['bind', 'volume', 'tmpfs', 'npipe'],
+ ),
+ readonly=dict(type='bool'),
+ labels=dict(type='dict'),
+ propagation=dict(
+ type='str',
+ choices=[
+ 'shared',
+ 'slave',
+ 'private',
+ 'rshared',
+ 'rslave',
+ 'rprivate'
+ ]
+ ),
+ no_copy=dict(type='bool'),
+ driver_config=dict(type='dict', options=dict(
+ name=dict(type='str'),
+ options=dict(type='dict')
+ )),
+ tmpfs_size=dict(type='str'),
+ tmpfs_mode=dict(type='int')
+ )),
+ configs=dict(type='list', elements='dict', options=dict(
+ config_id=dict(type='str'),
+ config_name=dict(type='str', required=True),
+ filename=dict(type='str'),
+ uid=dict(type='str'),
+ gid=dict(type='str'),
+ mode=dict(type='int'),
+ )),
+ secrets=dict(type='list', elements='dict', options=dict(
+ secret_id=dict(type='str'),
+ secret_name=dict(type='str', required=True),
+ filename=dict(type='str'),
+ uid=dict(type='str'),
+ gid=dict(type='str'),
+ mode=dict(type='int'),
+ )),
+ networks=dict(type='list', elements='raw'),
+ command=dict(type='raw'),
+ args=dict(type='list', elements='str'),
+ env=dict(type='raw'),
+ env_files=dict(type='list', elements='path'),
+ force_update=dict(type='bool', default=False),
+ groups=dict(type='list', elements='str'),
+ logging=dict(type='dict', options=dict(
+ driver=dict(type='str'),
+ options=dict(type='dict'),
+ )),
+ log_driver=dict(type='str', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ log_driver_options=dict(type='dict', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ publish=dict(type='list', elements='dict', options=dict(
+ published_port=dict(type='int', required=True),
+ target_port=dict(type='int', required=True),
+ protocol=dict(type='str', default='tcp', choices=['tcp', 'udp']),
+ mode=dict(type='str', choices=['ingress', 'host']),
+ )),
+ placement=dict(type='dict', options=dict(
+ constraints=dict(type='list', elements='str'),
+ preferences=dict(type='list', elements='dict'),
+ )),
+ constraints=dict(type='list', elements='str', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ tty=dict(type='bool'),
+ dns=dict(type='list', elements='str'),
+ dns_search=dict(type='list', elements='str'),
+ dns_options=dict(type='list', elements='str'),
+ healthcheck=dict(type='dict', options=dict(
+ test=dict(type='raw'),
+ interval=dict(type='str'),
+ timeout=dict(type='str'),
+ start_period=dict(type='str'),
+ retries=dict(type='int'),
+ )),
+ hostname=dict(type='str'),
+ hosts=dict(type='dict'),
+ labels=dict(type='dict'),
+ container_labels=dict(type='dict'),
+ mode=dict(
+ type='str',
+ default='replicated',
+ choices=['replicated', 'global']
+ ),
+ replicas=dict(type='int', default=-1),
+ endpoint_mode=dict(type='str', choices=['vip', 'dnsrr']),
+ stop_grace_period=dict(type='str'),
+ stop_signal=dict(type='str'),
+ limits=dict(type='dict', options=dict(
+ cpus=dict(type='float'),
+ memory=dict(type='str'),
+ )),
+ limit_cpu=dict(type='float', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ limit_memory=dict(type='str', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ read_only=dict(type='bool'),
+ reservations=dict(type='dict', options=dict(
+ cpus=dict(type='float'),
+ memory=dict(type='str'),
+ )),
+ reserve_cpu=dict(type='float', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ reserve_memory=dict(type='str', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ resolve_image=dict(type='bool', default=False),
+ restart_config=dict(type='dict', options=dict(
+ condition=dict(type='str', choices=['none', 'on-failure', 'any']),
+ delay=dict(type='str'),
+ max_attempts=dict(type='int'),
+ window=dict(type='str'),
+ )),
+ restart_policy=dict(
+ type='str',
+ choices=['none', 'on-failure', 'any'],
+ removed_in_version='2.0.0',
+ removed_from_collection='community.general', # was Ansible 2.12
+ ),
+ restart_policy_delay=dict(type='raw', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ restart_policy_attempts=dict(type='int', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ restart_policy_window=dict(type='raw', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ rollback_config=dict(type='dict', options=dict(
+ parallelism=dict(type='int'),
+ delay=dict(type='str'),
+ failure_action=dict(
+ type='str',
+ choices=['continue', 'pause']
+ ),
+ monitor=dict(type='str'),
+ max_failure_ratio=dict(type='float'),
+ order=dict(type='str'),
+ )),
+ update_config=dict(type='dict', options=dict(
+ parallelism=dict(type='int'),
+ delay=dict(type='str'),
+ failure_action=dict(
+ type='str',
+ choices=['continue', 'pause', 'rollback']
+ ),
+ monitor=dict(type='str'),
+ max_failure_ratio=dict(type='float'),
+ order=dict(type='str'),
+ )),
+ update_delay=dict(type='raw', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ update_parallelism=dict(type='int', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ update_failure_action=dict(
+ type='str',
+ choices=['continue', 'pause', 'rollback'],
+ removed_in_version='2.0.0',
+ removed_from_collection='community.general', # was Ansible 2.12
+ ),
+ update_monitor=dict(type='raw', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ update_max_failure_ratio=dict(type='float', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ update_order=dict(
+ type='str',
+ choices=['stop-first', 'start-first'],
+ removed_in_version='2.0.0',
+ removed_from_collection='community.general', # was Ansible 2.12
+ ),
+ user=dict(type='str'),
+ working_dir=dict(type='str'),
+ init=dict(type='bool'),
+ )
+
+ option_minimal_versions = dict(
+ constraints=dict(docker_py_version='2.4.0'),
+ dns=dict(docker_py_version='2.6.0', docker_api_version='1.25'),
+ dns_options=dict(docker_py_version='2.6.0', docker_api_version='1.25'),
+ dns_search=dict(docker_py_version='2.6.0', docker_api_version='1.25'),
+ endpoint_mode=dict(docker_py_version='3.0.0', docker_api_version='1.25'),
+ force_update=dict(docker_py_version='2.1.0', docker_api_version='1.25'),
+ healthcheck=dict(docker_py_version='2.6.0', docker_api_version='1.25'),
+ hostname=dict(docker_py_version='2.2.0', docker_api_version='1.25'),
+ hosts=dict(docker_py_version='2.6.0', docker_api_version='1.25'),
+ groups=dict(docker_py_version='2.6.0', docker_api_version='1.25'),
+ tty=dict(docker_py_version='2.4.0', docker_api_version='1.25'),
+ secrets=dict(docker_py_version='2.4.0', docker_api_version='1.25'),
+ configs=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
+ update_max_failure_ratio=dict(docker_py_version='2.1.0', docker_api_version='1.25'),
+ update_monitor=dict(docker_py_version='2.1.0', docker_api_version='1.25'),
+ update_order=dict(docker_py_version='2.7.0', docker_api_version='1.29'),
+ stop_signal=dict(docker_py_version='2.6.0', docker_api_version='1.28'),
+ publish=dict(docker_py_version='3.0.0', docker_api_version='1.25'),
+ read_only=dict(docker_py_version='2.6.0', docker_api_version='1.28'),
+ resolve_image=dict(docker_api_version='1.30', docker_py_version='3.2.0'),
+ rollback_config=dict(docker_py_version='3.5.0', docker_api_version='1.28'),
+ init=dict(docker_py_version='4.0.0', docker_api_version='1.37'),
+ # specials
+ publish_mode=dict(
+ docker_py_version='3.0.0',
+ docker_api_version='1.25',
+ detect_usage=_detect_publish_mode_usage,
+ usage_msg='set publish.mode'
+ ),
+ healthcheck_start_period=dict(
+ docker_py_version='2.6.0',
+ docker_api_version='1.29',
+ detect_usage=_detect_healthcheck_start_period,
+ usage_msg='set healthcheck.start_period'
+ ),
+ update_config_max_failure_ratio=dict(
+ docker_py_version='2.1.0',
+ docker_api_version='1.25',
+ detect_usage=lambda c: (c.module.params['update_config'] or {}).get(
+ 'max_failure_ratio'
+ ) is not None,
+ usage_msg='set update_config.max_failure_ratio'
+ ),
+ update_config_failure_action=dict(
+ docker_py_version='3.5.0',
+ docker_api_version='1.28',
+ detect_usage=_detect_update_config_failure_action_rollback,
+ usage_msg='set update_config.failure_action.rollback'
+ ),
+ update_config_monitor=dict(
+ docker_py_version='2.1.0',
+ docker_api_version='1.25',
+ detect_usage=lambda c: (c.module.params['update_config'] or {}).get(
+ 'monitor'
+ ) is not None,
+ usage_msg='set update_config.monitor'
+ ),
+ update_config_order=dict(
+ docker_py_version='2.7.0',
+ docker_api_version='1.29',
+ detect_usage=lambda c: (c.module.params['update_config'] or {}).get(
+ 'order'
+ ) is not None,
+ usage_msg='set update_config.order'
+ ),
+ placement_config_preferences=dict(
+ docker_py_version='2.4.0',
+ docker_api_version='1.27',
+ detect_usage=lambda c: (c.module.params['placement'] or {}).get(
+ 'preferences'
+ ) is not None,
+ usage_msg='set placement.preferences'
+ ),
+ placement_config_constraints=dict(
+ docker_py_version='2.4.0',
+ detect_usage=lambda c: (c.module.params['placement'] or {}).get(
+ 'constraints'
+ ) is not None,
+ usage_msg='set placement.constraints'
+ ),
+ mounts_tmpfs=dict(
+ docker_py_version='2.6.0',
+ detect_usage=_detect_mount_tmpfs_usage,
+ usage_msg='set mounts.tmpfs'
+ ),
+ rollback_config_order=dict(
+ docker_api_version='1.29',
+ detect_usage=lambda c: (c.module.params['rollback_config'] or {}).get(
+ 'order'
+ ) is not None,
+ usage_msg='set rollback_config.order'
+ ),
+ )
+ required_if = [
+ ('state', 'present', ['image'])
+ ]
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ required_if=required_if,
+ supports_check_mode=True,
+ min_docker_version='2.0.2',
+ min_docker_api_version='1.24',
+ option_minimal_versions=option_minimal_versions,
+ )
+
+ try:
+ dsm = DockerServiceManager(client)
+ msg, changed, rebuilt, changes, facts = dsm.run_safe()
+
+ results = dict(
+ msg=msg,
+ changed=changed,
+ rebuilt=rebuilt,
+ changes=changes,
+ swarm_service=facts,
+ )
+ if client.module._diff:
+ before, after = dsm.diff_tracker.get_before_after()
+ results['diff'] = dict(before=before, after=after)
+
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_swarm_service_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_swarm_service_info.py
new file mode 100644
index 00000000..130be7b3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_swarm_service_info.py
@@ -0,0 +1,115 @@
+#!/usr/bin/python
+#
+# (c) 2019 Hannes Ljungberg <hannes.ljungberg@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_swarm_service_info
+
+short_description: Retrieves information about docker services from a Swarm Manager
+
+description:
+ - Retrieves information about a docker service.
+ - Essentially returns the output of C(docker service inspect <name>).
+ - Must be executed on a host running as Swarm Manager, otherwise the module will fail.
+
+
+options:
+ name:
+ description:
+ - The name of the service to inspect.
+ type: str
+ required: yes
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+author:
+ - Hannes Ljungberg (@hannseman)
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.0.0"
+ - "Docker API >= 1.24"
+'''
+
+EXAMPLES = '''
+- name: Get info from a service
+ community.general.docker_swarm_service_info:
+ name: myservice
+ register: result
+'''
+
+RETURN = '''
+exists:
+ description:
+ - Returns whether the service exists.
+ type: bool
+ returned: always
+ sample: true
+service:
+ description:
+ - A dictionary representing the current state of the service. Matches the C(docker service inspect) output.
+ - Will be C(none) if service does not exist.
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+try:
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ RequestException,
+)
+
+from ansible_collections.community.general.plugins.module_utils.docker.swarm import AnsibleDockerSwarmClient
+
+
+def get_service_info(client):
+ service = client.module.params['name']
+ return client.get_service_inspect(
+ service_id=service,
+ skip_missing=True
+ )
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ )
+
+ client = AnsibleDockerSwarmClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_version='2.0.0',
+ min_docker_api_version='1.24',
+ )
+
+ client.fail_task_if_not_swarm_manager()
+
+ try:
+ service = get_service_info(client)
+
+ client.module.exit_json(
+ changed=False,
+ service=service,
+ exists=bool(service)
+ )
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_volume.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_volume.py
new file mode 100644
index 00000000..dca92df5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_volume.py
@@ -0,0 +1,332 @@
+#!/usr/bin/python
+# coding: utf-8
+#
+# Copyright 2017 Red Hat | Ansible, Alex Grönholm <alex.gronholm@nextday.fi>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: docker_volume
+short_description: Manage Docker volumes
+description:
+ - Create/remove Docker volumes.
+ - Performs largely the same function as the "docker volume" CLI subcommand.
+options:
+ volume_name:
+ description:
+ - Name of the volume to operate on.
+ type: str
+ required: yes
+ aliases:
+ - name
+
+ driver:
+ description:
+ - Specify the type of volume. Docker provides the C(local) driver, but 3rd party drivers can also be used.
+ type: str
+ default: local
+
+ driver_options:
+ description:
+ - "Dictionary of volume settings. Consult docker docs for valid options and values:
+ U(https://docs.docker.com/engine/reference/commandline/volume_create/#driver-specific-options)"
+ type: dict
+
+ labels:
+ description:
+ - Dictionary of label key/values to set for the volume
+ type: dict
+
+ force:
+ description:
+ - With state C(present) causes the volume to be deleted and recreated if the volume already
+ exist and the driver, driver options or labels differ. This will cause any data in the existing
+ volume to be lost.
+ - Deprecated. Will be removed in community.general 2.0.0. Set I(recreate) to C(options-changed) instead
+ for the same behavior of setting I(force) to C(yes).
+ type: bool
+
+ recreate:
+ description:
+ - Controls when a volume will be recreated when I(state) is C(present). Please
+ note that recreating an existing volume will cause **any data in the existing volume
+ to be lost!** The volume will be deleted and a new volume with the same name will be
+ created.
+ - The value C(always) forces the volume to be always recreated.
+ - The value C(never) makes sure the volume will not be recreated.
+ - The value C(options-changed) makes sure the volume will be recreated if the volume
+ already exist and the driver, driver options or labels differ.
+ type: str
+ default: never
+ choices:
+ - always
+ - never
+ - options-changed
+
+ state:
+ description:
+ - C(absent) deletes the volume.
+ - C(present) creates the volume, if it does not already exist.
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+author:
+ - Alex Grönholm (@agronholm)
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "The docker server >= 1.9.0"
+'''
+
+EXAMPLES = '''
+- name: Create a volume
+ community.general.docker_volume:
+ name: volume_one
+
+- name: Remove a volume
+ community.general.docker_volume:
+ name: volume_one
+ state: absent
+
+- name: Create a volume with options
+ community.general.docker_volume:
+ name: volume_two
+ driver_options:
+ type: btrfs
+ device: /dev/sda2
+'''
+
+RETURN = '''
+volume:
+ description:
+ - Volume inspection results for the affected volume.
+ - Note that facts are part of the registered vars since Ansible 2.8. For compatibility reasons, the facts
+ are also accessible directly as C(docker_volume). Note that the returned fact will be removed in community.general 2.0.0.
+ returned: success
+ type: dict
+ sample: {}
+'''
+
+import traceback
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ DockerBaseClass,
+ AnsibleDockerClient,
+ DifferenceTracker,
+ RequestException,
+)
+from ansible.module_utils.six import iteritems, text_type
+
+
+class TaskParameters(DockerBaseClass):
+ def __init__(self, client):
+ super(TaskParameters, self).__init__()
+ self.client = client
+
+ self.volume_name = None
+ self.driver = None
+ self.driver_options = None
+ self.labels = None
+ self.force = None
+ self.recreate = None
+ self.debug = None
+
+ for key, value in iteritems(client.module.params):
+ setattr(self, key, value)
+
+ if self.force is not None:
+ if self.recreate != 'never':
+ client.fail('Cannot use the deprecated "force" '
+ 'option when "recreate" is set. Please stop '
+ 'using the force option.')
+ client.module.warn('The "force" option of docker_volume has been deprecated '
+ 'in Ansible 2.8. Please use the "recreate" '
+ 'option, which provides the same functionality as "force".')
+ self.recreate = 'options-changed' if self.force else 'never'
+
+
+class DockerVolumeManager(object):
+
+ def __init__(self, client):
+ self.client = client
+ self.parameters = TaskParameters(client)
+ self.check_mode = self.client.check_mode
+ self.results = {
+ u'changed': False,
+ u'actions': []
+ }
+ self.diff = self.client.module._diff
+ self.diff_tracker = DifferenceTracker()
+ self.diff_result = dict()
+
+ self.existing_volume = self.get_existing_volume()
+
+ state = self.parameters.state
+ if state == 'present':
+ self.present()
+ elif state == 'absent':
+ self.absent()
+
+ if self.diff or self.check_mode or self.parameters.debug:
+ if self.diff:
+ self.diff_result['before'], self.diff_result['after'] = self.diff_tracker.get_before_after()
+ self.results['diff'] = self.diff_result
+
+ def get_existing_volume(self):
+ try:
+ volumes = self.client.volumes()
+ except APIError as e:
+ self.client.fail(text_type(e))
+
+ if volumes[u'Volumes'] is None:
+ return None
+
+ for volume in volumes[u'Volumes']:
+ if volume['Name'] == self.parameters.volume_name:
+ return volume
+
+ return None
+
+ def has_different_config(self):
+ """
+ Return the list of differences between the current parameters and the existing volume.
+
+ :return: list of options that differ
+ """
+ differences = DifferenceTracker()
+ if self.parameters.driver and self.parameters.driver != self.existing_volume['Driver']:
+ differences.add('driver', parameter=self.parameters.driver, active=self.existing_volume['Driver'])
+ if self.parameters.driver_options:
+ if not self.existing_volume.get('Options'):
+ differences.add('driver_options',
+ parameter=self.parameters.driver_options,
+ active=self.existing_volume.get('Options'))
+ else:
+ for key, value in iteritems(self.parameters.driver_options):
+ if (not self.existing_volume['Options'].get(key) or
+ value != self.existing_volume['Options'][key]):
+ differences.add('driver_options.%s' % key,
+ parameter=value,
+ active=self.existing_volume['Options'].get(key))
+ if self.parameters.labels:
+ existing_labels = self.existing_volume.get('Labels', {})
+ for label in self.parameters.labels:
+ if existing_labels.get(label) != self.parameters.labels.get(label):
+ differences.add('labels.%s' % label,
+ parameter=self.parameters.labels.get(label),
+ active=existing_labels.get(label))
+
+ return differences
+
+ def create_volume(self):
+ if not self.existing_volume:
+ if not self.check_mode:
+ try:
+ params = dict(
+ driver=self.parameters.driver,
+ driver_opts=self.parameters.driver_options,
+ )
+
+ if self.parameters.labels is not None:
+ params['labels'] = self.parameters.labels
+
+ resp = self.client.create_volume(self.parameters.volume_name, **params)
+ self.existing_volume = self.client.inspect_volume(resp['Name'])
+ except APIError as e:
+ self.client.fail(text_type(e))
+
+ self.results['actions'].append("Created volume %s with driver %s" % (self.parameters.volume_name, self.parameters.driver))
+ self.results['changed'] = True
+
+ def remove_volume(self):
+ if self.existing_volume:
+ if not self.check_mode:
+ try:
+ self.client.remove_volume(self.parameters.volume_name)
+ except APIError as e:
+ self.client.fail(text_type(e))
+
+ self.results['actions'].append("Removed volume %s" % self.parameters.volume_name)
+ self.results['changed'] = True
+
+ def present(self):
+ differences = DifferenceTracker()
+ if self.existing_volume:
+ differences = self.has_different_config()
+
+ self.diff_tracker.add('exists', parameter=True, active=self.existing_volume is not None)
+ if (not differences.empty and self.parameters.recreate == 'options-changed') or self.parameters.recreate == 'always':
+ self.remove_volume()
+ self.existing_volume = None
+
+ self.create_volume()
+
+ if self.diff or self.check_mode or self.parameters.debug:
+ self.diff_result['differences'] = differences.get_legacy_docker_diffs()
+ self.diff_tracker.merge(differences)
+
+ if not self.check_mode and not self.parameters.debug:
+ self.results.pop('actions')
+
+ volume_facts = self.get_existing_volume()
+ self.results['ansible_facts'] = {u'docker_volume': volume_facts}
+ self.results['volume'] = volume_facts
+
+ def absent(self):
+ self.diff_tracker.add('exists', parameter=False, active=self.existing_volume is not None)
+ self.remove_volume()
+
+
+def main():
+ argument_spec = dict(
+ volume_name=dict(type='str', required=True, aliases=['name']),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ driver=dict(type='str', default='local'),
+ driver_options=dict(type='dict', default={}),
+ labels=dict(type='dict'),
+ force=dict(type='bool', removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
+ recreate=dict(type='str', default='never', choices=['always', 'never', 'options-changed']),
+ debug=dict(type='bool', default=False)
+ )
+
+ option_minimal_versions = dict(
+ labels=dict(docker_py_version='1.10.0', docker_api_version='1.23'),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_version='1.10.0',
+ min_docker_api_version='1.21',
+ # "The docker server >= 1.9.0"
+ option_minimal_versions=option_minimal_versions,
+ )
+
+ try:
+ cm = DockerVolumeManager(client)
+ client.module.exit_json(**cm.results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_volume_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_volume_info.py
new file mode 100644
index 00000000..c00c2425
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/docker/docker_volume_info.py
@@ -0,0 +1,128 @@
+#!/usr/bin/python
+# coding: utf-8
+#
+# Copyright 2017 Red Hat | Ansible, Alex Grönholm <alex.gronholm@nextday.fi>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: docker_volume_info
+short_description: Retrieve facts about Docker volumes
+description:
+ - Performs largely the same function as the "docker volume inspect" CLI subcommand.
+options:
+ name:
+ description:
+ - Name of the volume to inspect.
+ type: str
+ required: yes
+ aliases:
+ - volume_name
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+author:
+ - Felix Fontein (@felixfontein)
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "Docker API >= 1.21"
+'''
+
+EXAMPLES = '''
+- name: Get infos on volume
+ community.general.docker_volume_info:
+ name: mydata
+ register: result
+
+- name: Does volume exist?
+ ansible.builtin.debug:
+ msg: "The volume {{ 'exists' if result.exists else 'does not exist' }}"
+
+- name: Print information about volume
+ ansible.builtin.debug:
+ var: result.volume
+ when: result.exists
+'''
+
+RETURN = '''
+exists:
+ description:
+ - Returns whether the volume exists.
+ type: bool
+ returned: always
+ sample: true
+volume:
+ description:
+ - Volume inspection results for the affected volume.
+ - Will be C(none) if volume does not exist.
+ returned: success
+ type: dict
+ sample: '{
+ "CreatedAt": "2018-12-09T17:43:44+01:00",
+ "Driver": "local",
+ "Labels": null,
+ "Mountpoint": "/var/lib/docker/volumes/ansible-test-bd3f6172/_data",
+ "Name": "ansible-test-bd3f6172",
+ "Options": {},
+ "Scope": "local"
+ }'
+'''
+
+import traceback
+
+try:
+ from docker.errors import DockerException, NotFound
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ RequestException,
+)
+
+
+def get_existing_volume(client, volume_name):
+ try:
+ return client.inspect_volume(volume_name)
+ except NotFound as dummy:
+ return None
+ except Exception as exc:
+ client.fail("Error inspecting volume: %s" % exc)
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True, aliases=['volume_name']),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_version='1.8.0',
+ min_docker_api_version='1.21',
+ )
+
+ try:
+ volume = get_existing_volume(client, client.module.params['name'])
+
+ client.module.exit_json(
+ changed=False,
+ exists=(True if volume else False),
+ volume=volume,
+ )
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gc_storage.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gc_storage.py
new file mode 100644
index 00000000..52ca18fa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gc_storage.py
@@ -0,0 +1,497 @@
+#!/usr/bin/python
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gc_storage
+short_description: This module manages objects/buckets in Google Cloud Storage.
+description:
+ - This module allows users to manage their objects/buckets in Google Cloud Storage. It allows upload and download operations and can set some
+ canned permissions. It also allows retrieval of URLs for objects for use in playbooks, and retrieval of string contents of objects. This module
+ requires setting the default project in GCS prior to playbook usage. See U(https://developers.google.com/storage/docs/reference/v1/apiversion1) for
+ information about setting the default project.
+
+options:
+ bucket:
+ type: str
+ description:
+ - Bucket name.
+ required: true
+ object:
+ type: path
+ description:
+ - Keyname of the object inside the bucket. Can be also be used to create "virtual directories" (see examples).
+ src:
+ type: str
+ description:
+ - The source file path when performing a PUT operation.
+ dest:
+ type: path
+ description:
+ - The destination file path when downloading an object/key with a GET operation.
+ overwrite:
+ description:
+ - Forces an overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations.
+ type: bool
+ default: 'yes'
+ aliases: [ 'force' ]
+ permission:
+ type: str
+ description:
+ - This option let's the user set the canned permissions on the object/bucket that are created. The permissions that can be set are 'private',
+ 'public-read', 'authenticated-read'.
+ default: private
+ choices: ['private', 'public-read', 'authenticated-read']
+ headers:
+ type: dict
+ description:
+ - Headers to attach to object.
+ default: {}
+ expiration:
+ type: int
+ default: 600
+ description:
+ - Time limit (in seconds) for the URL generated and returned by GCA when performing a mode=put or mode=get_url operation. This url is only
+ available when public-read is the acl for the object.
+ aliases: [expiry]
+ mode:
+ type: str
+ description:
+ - Switches the module behaviour between upload, download, get_url (return download url) , get_str (download object as string), create (bucket) and
+ delete (bucket).
+ required: true
+ choices: [ 'get', 'put', 'get_url', 'get_str', 'delete', 'create' ]
+ gs_secret_key:
+ type: str
+ description:
+ - GS secret key. If not set then the value of the GS_SECRET_ACCESS_KEY environment variable is used.
+ required: true
+ gs_access_key:
+ type: str
+ description:
+ - GS access key. If not set then the value of the GS_ACCESS_KEY_ID environment variable is used.
+ required: true
+ region:
+ type: str
+ description:
+ - The gs region to use. If not defined then the value 'US' will be used. See U(https://cloud.google.com/storage/docs/bucket-locations)
+ default: 'US'
+ versioning:
+ description:
+ - Whether versioning is enabled or disabled (note that once versioning is enabled, it can only be suspended)
+ type: bool
+ default: false
+
+requirements:
+ - "python >= 2.6"
+ - "boto >= 2.9"
+
+author:
+- Benno Joy (@bennojoy)
+- Lukas Beumer (@Nitaco)
+
+'''
+
+EXAMPLES = '''
+- name: Upload some content
+ community.general.gc_storage:
+ bucket: mybucket
+ object: key.txt
+ src: /usr/local/myfile.txt
+ mode: put
+ permission: public-read
+
+- name: Upload some headers
+ community.general.gc_storage:
+ bucket: mybucket
+ object: key.txt
+ src: /usr/local/myfile.txt
+ headers: '{"Content-Encoding": "gzip"}'
+
+- name: Download some content
+ community.general.gc_storage:
+ bucket: mybucket
+ object: key.txt
+ dest: /usr/local/myfile.txt
+ mode: get
+
+- name: Download an object as a string to use else where in your playbook
+ community.general.gc_storage:
+ bucket: mybucket
+ object: key.txt
+ mode: get_str
+
+- name: Create an empty bucket
+ community.general.gc_storage:
+ bucket: mybucket
+ mode: create
+
+- name: Create a bucket with key as directory
+ community.general.gc_storage:
+ bucket: mybucket
+ object: /my/directory/path
+ mode: create
+
+- name: Delete a bucket and all contents
+ community.general.gc_storage:
+ bucket: mybucket
+ mode: delete
+
+- name: Create a bucket with versioning enabled
+ community.general.gc_storage:
+ bucket: "mybucket"
+ versioning: yes
+ mode: create
+
+- name: Create a bucket located in the eu
+ community.general.gc_storage:
+ bucket: "mybucket"
+ region: "europe-west3"
+ mode: create
+
+'''
+
+import os
+
+try:
+ import boto
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def grant_check(module, gs, obj):
+ try:
+ acp = obj.get_acl()
+ if module.params.get('permission') == 'public-read':
+ grant = [x for x in acp.entries.entry_list if x.scope.type == 'AllUsers']
+ if not grant:
+ obj.set_acl('public-read')
+ module.exit_json(changed=True, result="The objects permission as been set to public-read")
+ if module.params.get('permission') == 'authenticated-read':
+ grant = [x for x in acp.entries.entry_list if x.scope.type == 'AllAuthenticatedUsers']
+ if not grant:
+ obj.set_acl('authenticated-read')
+ module.exit_json(changed=True, result="The objects permission as been set to authenticated-read")
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+ return True
+
+
+def key_check(module, gs, bucket, obj):
+ try:
+ bucket = gs.lookup(bucket)
+ key_check = bucket.get_key(obj)
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+ if key_check:
+ grant_check(module, gs, key_check)
+ return True
+ else:
+ return False
+
+
+def keysum(module, gs, bucket, obj):
+ bucket = gs.lookup(bucket)
+ key_check = bucket.get_key(obj)
+ if not key_check:
+ return None
+ md5_remote = key_check.etag[1:-1]
+ etag_multipart = '-' in md5_remote # Check for multipart, etag is not md5
+ if etag_multipart is True:
+ module.fail_json(msg="Files uploaded with multipart of gs are not supported with checksum, unable to compute checksum.")
+ return md5_remote
+
+
+def bucket_check(module, gs, bucket):
+ try:
+ result = gs.lookup(bucket)
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+ if result:
+ grant_check(module, gs, result)
+ return True
+ else:
+ return False
+
+
+def create_bucket(module, gs, bucket):
+ try:
+ bucket = gs.create_bucket(bucket, transform_headers(module.params.get('headers')), module.params.get('region'))
+ bucket.set_acl(module.params.get('permission'))
+ bucket.configure_versioning(module.params.get('versioning'))
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+ if bucket:
+ return True
+
+
+def delete_bucket(module, gs, bucket):
+ try:
+ bucket = gs.lookup(bucket)
+ bucket_contents = bucket.list()
+ for key in bucket_contents:
+ bucket.delete_key(key.name)
+ bucket.delete()
+ return True
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+
+
+def delete_key(module, gs, bucket, obj):
+ try:
+ bucket = gs.lookup(bucket)
+ bucket.delete_key(obj)
+ module.exit_json(msg="Object deleted from bucket ", changed=True)
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+
+
+def create_dirkey(module, gs, bucket, obj):
+ try:
+ bucket = gs.lookup(bucket)
+ key = bucket.new_key(obj)
+ key.set_contents_from_string('')
+ module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket.name), changed=True)
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+
+
+def path_check(path):
+ if os.path.exists(path):
+ return True
+ else:
+ return False
+
+
+def transform_headers(headers):
+ """
+ Boto url-encodes values unless we convert the value to `str`, so doing
+ this prevents 'max-age=100000' from being converted to "max-age%3D100000".
+
+ :param headers: Headers to convert
+ :type headers: dict
+ :rtype: dict
+
+ """
+
+ for key, value in headers.items():
+ headers[key] = str(value)
+ return headers
+
+
+def upload_gsfile(module, gs, bucket, obj, src, expiry):
+ try:
+ bucket = gs.lookup(bucket)
+ key = bucket.new_key(obj)
+ key.set_contents_from_filename(
+ filename=src,
+ headers=transform_headers(module.params.get('headers'))
+ )
+ key.set_acl(module.params.get('permission'))
+ url = key.generate_url(expiry)
+ module.exit_json(msg="PUT operation complete", url=url, changed=True)
+ except gs.provider.storage_copy_error as e:
+ module.fail_json(msg=str(e))
+
+
+def download_gsfile(module, gs, bucket, obj, dest):
+ try:
+ bucket = gs.lookup(bucket)
+ key = bucket.lookup(obj)
+ key.get_contents_to_filename(dest)
+ module.exit_json(msg="GET operation complete", changed=True)
+ except gs.provider.storage_copy_error as e:
+ module.fail_json(msg=str(e))
+
+
+def download_gsstr(module, gs, bucket, obj):
+ try:
+ bucket = gs.lookup(bucket)
+ key = bucket.lookup(obj)
+ contents = key.get_contents_as_string()
+ module.exit_json(msg="GET operation complete", contents=contents, changed=True)
+ except gs.provider.storage_copy_error as e:
+ module.fail_json(msg=str(e))
+
+
+def get_download_url(module, gs, bucket, obj, expiry):
+ try:
+ bucket = gs.lookup(bucket)
+ key = bucket.lookup(obj)
+ url = key.generate_url(expiry)
+ module.exit_json(msg="Download url:", url=url, expiration=expiry, changed=True)
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+
+
+def handle_get(module, gs, bucket, obj, overwrite, dest):
+ md5_remote = keysum(module, gs, bucket, obj)
+ md5_local = module.md5(dest)
+ if md5_local == md5_remote:
+ module.exit_json(changed=False)
+ if md5_local != md5_remote and not overwrite:
+ module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.", failed=True)
+ else:
+ download_gsfile(module, gs, bucket, obj, dest)
+
+
+def handle_put(module, gs, bucket, obj, overwrite, src, expiration):
+ # Lets check to see if bucket exists to get ground truth.
+ bucket_rc = bucket_check(module, gs, bucket)
+ key_rc = key_check(module, gs, bucket, obj)
+
+ # Lets check key state. Does it exist and if it does, compute the etag md5sum.
+ if bucket_rc and key_rc:
+ md5_remote = keysum(module, gs, bucket, obj)
+ md5_local = module.md5(src)
+ if md5_local == md5_remote:
+ module.exit_json(msg="Local and remote object are identical", changed=False)
+ if md5_local != md5_remote and not overwrite:
+ module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.", failed=True)
+ else:
+ upload_gsfile(module, gs, bucket, obj, src, expiration)
+
+ if not bucket_rc:
+ create_bucket(module, gs, bucket)
+ upload_gsfile(module, gs, bucket, obj, src, expiration)
+
+ # If bucket exists but key doesn't, just upload.
+ if bucket_rc and not key_rc:
+ upload_gsfile(module, gs, bucket, obj, src, expiration)
+
+
+def handle_delete(module, gs, bucket, obj):
+ if bucket and not obj:
+ if bucket_check(module, gs, bucket):
+ module.exit_json(msg="Bucket %s and all keys have been deleted." % bucket, changed=delete_bucket(module, gs, bucket))
+ else:
+ module.exit_json(msg="Bucket does not exist.", changed=False)
+ if bucket and obj:
+ if bucket_check(module, gs, bucket):
+ if key_check(module, gs, bucket, obj):
+ module.exit_json(msg="Object has been deleted.", changed=delete_key(module, gs, bucket, obj))
+ else:
+ module.exit_json(msg="Object does not exist.", changed=False)
+ else:
+ module.exit_json(msg="Bucket does not exist.", changed=False)
+ else:
+ module.fail_json(msg="Bucket or Bucket & object parameter is required.", failed=True)
+
+
+def handle_create(module, gs, bucket, obj):
+ if bucket and not obj:
+ if bucket_check(module, gs, bucket):
+ module.exit_json(msg="Bucket already exists.", changed=False)
+ else:
+ module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, gs, bucket))
+ if bucket and obj:
+ if obj.endswith('/'):
+ dirobj = obj
+ else:
+ dirobj = obj + "/"
+
+ if bucket_check(module, gs, bucket):
+ if key_check(module, gs, bucket, dirobj):
+ module.exit_json(msg="Bucket %s and key %s already exists." % (bucket, obj), changed=False)
+ else:
+ create_dirkey(module, gs, bucket, dirobj)
+ else:
+ create_bucket(module, gs, bucket)
+ create_dirkey(module, gs, bucket, dirobj)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ bucket=dict(required=True),
+ object=dict(default=None, type='path'),
+ src=dict(default=None),
+ dest=dict(default=None, type='path'),
+ expiration=dict(type='int', default=600, aliases=['expiry']),
+ mode=dict(choices=['get', 'put', 'delete', 'create', 'get_url', 'get_str'], required=True),
+ permission=dict(choices=['private', 'public-read', 'authenticated-read'], default='private'),
+ headers=dict(type='dict', default={}),
+ gs_secret_key=dict(no_log=True, required=True),
+ gs_access_key=dict(required=True),
+ overwrite=dict(default=True, type='bool', aliases=['force']),
+ region=dict(default='US', type='str'),
+ versioning=dict(default=False, type='bool')
+ ),
+ )
+
+ if not HAS_BOTO:
+ module.fail_json(msg='`boto` 2.9+ is required for this module. Try: pip install `boto` --upgrade')
+
+ bucket = module.params.get('bucket')
+ obj = module.params.get('object')
+ src = module.params.get('src')
+ dest = module.params.get('dest')
+ mode = module.params.get('mode')
+ expiry = module.params.get('expiration')
+ gs_secret_key = module.params.get('gs_secret_key')
+ gs_access_key = module.params.get('gs_access_key')
+ overwrite = module.params.get('overwrite')
+
+ if mode == 'put':
+ if not src or not object:
+ module.fail_json(msg="When using PUT, src, bucket, object are mandatory parameters")
+ if mode == 'get':
+ if not dest or not object:
+ module.fail_json(msg="When using GET, dest, bucket, object are mandatory parameters")
+
+ try:
+ gs = boto.connect_gs(gs_access_key, gs_secret_key)
+ except boto.exception.NoAuthHandlerFound as e:
+ module.fail_json(msg=str(e))
+
+ if mode == 'get':
+ if not bucket_check(module, gs, bucket) or not key_check(module, gs, bucket, obj):
+ module.fail_json(msg="Target bucket/key cannot be found", failed=True)
+ if not path_check(dest):
+ download_gsfile(module, gs, bucket, obj, dest)
+ else:
+ handle_get(module, gs, bucket, obj, overwrite, dest)
+
+ if mode == 'put':
+ if not path_check(src):
+ module.fail_json(msg="Local object for PUT does not exist", failed=True)
+ handle_put(module, gs, bucket, obj, overwrite, src, expiry)
+
+ # Support for deleting an object if we have both params.
+ if mode == 'delete':
+ handle_delete(module, gs, bucket, obj)
+
+ if mode == 'create':
+ handle_create(module, gs, bucket, obj)
+
+ if mode == 'get_url':
+ if bucket and obj:
+ if bucket_check(module, gs, bucket) and key_check(module, gs, bucket, obj):
+ get_download_url(module, gs, bucket, obj, expiry)
+ else:
+ module.fail_json(msg="Key/Bucket does not exist", failed=True)
+ else:
+ module.fail_json(msg="Bucket and Object parameters must be set", failed=True)
+
+ # --------------------------- Get the String contents of an Object -------------------------
+ if mode == 'get_str':
+ if bucket and obj:
+ if bucket_check(module, gs, bucket) and key_check(module, gs, bucket, obj):
+ download_gsstr(module, gs, bucket, obj)
+ else:
+ module.fail_json(msg="Key/Bucket does not exist", failed=True)
+ else:
+ module.fail_json(msg="Bucket and Object parameters must be set", failed=True)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcdns_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcdns_record.py
new file mode 100644
index 00000000..b97377b2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcdns_record.py
@@ -0,0 +1,780 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2015 CallFire Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+################################################################################
+# Documentation
+################################################################################
+
+DOCUMENTATION = '''
+---
+module: gcdns_record
+short_description: Creates or removes resource records in Google Cloud DNS
+description:
+ - Creates or removes resource records in Google Cloud DNS.
+author: "William Albert (@walbert947)"
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.19.0"
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.12
+ why: Updated modules released with increased functionality
+ alternative: Use M(google.cloud.gcp_dns_resource_record_set) instead.
+options:
+ state:
+ type: str
+ description:
+ - Whether the given resource record should or should not be present.
+ choices: ["present", "absent"]
+ default: "present"
+ record:
+ type: str
+ description:
+ - The fully-qualified domain name of the resource record.
+ required: true
+ aliases: ['name']
+ zone:
+ type: str
+ description:
+ - The DNS domain name of the zone (e.g., example.com).
+ - One of either I(zone) or I(zone_id) must be specified as an
+ option, or the module will fail.
+ - If both I(zone) and I(zone_id) are specified, I(zone_id) will be
+ used.
+ zone_id:
+ type: str
+ description:
+ - The Google Cloud ID of the zone (e.g., example-com).
+ - One of either I(zone) or I(zone_id) must be specified as an
+ option, or the module will fail.
+ - These usually take the form of domain names with the dots replaced
+ with dashes. A zone ID will never have any dots in it.
+ - I(zone_id) can be faster than I(zone) in projects with a large
+ number of zones.
+ - If both I(zone) and I(zone_id) are specified, I(zone_id) will be
+ used.
+ type:
+ type: str
+ description:
+ - The type of resource record to add.
+ required: true
+ choices: [ 'A', 'AAAA', 'CNAME', 'SRV', 'TXT', 'SOA', 'NS', 'MX', 'SPF', 'PTR' ]
+ record_data:
+ type: list
+ description:
+ - The record_data to use for the resource record.
+ - I(record_data) must be specified if I(state) is C(present) or
+ I(overwrite) is C(True), or the module will fail.
+ - Valid record_data vary based on the record's I(type). In addition,
+ resource records that contain a DNS domain name in the value
+ field (e.g., CNAME, PTR, SRV, .etc) MUST include a trailing dot
+ in the value.
+ - Individual string record_data for TXT records must be enclosed in
+ double quotes.
+ - For resource records that have the same name but different
+ record_data (e.g., multiple A records), they must be defined as
+ multiple list entries in a single record.
+ required: false
+ aliases: ['value']
+ ttl:
+ type: int
+ description:
+ - The amount of time in seconds that a resource record will remain
+ cached by a caching resolver.
+ default: 300
+ overwrite:
+ description:
+ - Whether an attempt to overwrite an existing record should succeed
+ or fail. The behavior of this option depends on I(state).
+ - If I(state) is C(present) and I(overwrite) is C(True), this
+ module will replace an existing resource record of the same name
+ with the provided I(record_data). If I(state) is C(present) and
+ I(overwrite) is C(False), this module will fail if there is an
+ existing resource record with the same name and type, but
+ different resource data.
+ - If I(state) is C(absent) and I(overwrite) is C(True), this
+ module will remove the given resource record unconditionally.
+ If I(state) is C(absent) and I(overwrite) is C(False), this
+ module will fail if the provided record_data do not match exactly
+ with the existing resource record's record_data.
+ type: bool
+ default: 'no'
+ service_account_email:
+ type: str
+ description:
+ - The e-mail address for a service account with access to Google
+ Cloud DNS.
+ pem_file:
+ type: path
+ description:
+ - The path to the PEM file associated with the service account
+ email.
+ - This option is deprecated and may be removed in a future release.
+ Use I(credentials_file) instead.
+ credentials_file:
+ type: path
+ description:
+ - The path to the JSON file associated with the service account
+ email.
+ project_id:
+ type: str
+ description:
+ - The Google Cloud Platform project ID to use.
+notes:
+ - See also M(community.general.gcdns_zone).
+ - This modules's underlying library does not support in-place updates for
+ DNS resource records. Instead, resource records are quickly deleted and
+ recreated.
+ - SOA records are technically supported, but their functionality is limited
+ to verifying that a zone's existing SOA record matches a pre-determined
+ value. The SOA record cannot be updated.
+ - Root NS records cannot be updated.
+ - NAPTR records are not supported.
+'''
+
+EXAMPLES = '''
+- name: Create an A record
+ community.general.gcdns_record:
+ record: 'www1.example.com'
+ zone: 'example.com'
+ type: A
+ value: '1.2.3.4'
+
+- name: Update an existing record
+ community.general.gcdns_record:
+ record: 'www1.example.com'
+ zone: 'example.com'
+ type: A
+ overwrite: true
+ value: '5.6.7.8'
+
+- name: Remove an A record
+ community.general.gcdns_record:
+ record: 'www1.example.com'
+ zone_id: 'example-com'
+ state: absent
+ type: A
+ value: '5.6.7.8'
+
+- name: Create a CNAME record. Note the trailing dot of value
+ community.general.gcdns_record:
+ record: 'www.example.com'
+ zone_id: 'example-com'
+ type: CNAME
+ value: 'www.example.com.'
+
+- name: Create an MX record with a custom TTL. Note the trailing dot of value
+ community.general.gcdns_record:
+ record: 'example.com'
+ zone: 'example.com'
+ type: MX
+ ttl: 3600
+ value: '10 mail.example.com.'
+
+- name: Create multiple A records with the same name
+ community.general.gcdns_record:
+ record: 'api.example.com'
+ zone_id: 'example-com'
+ type: A
+ record_data:
+ - '192.0.2.23'
+ - '10.4.5.6'
+ - '198.51.100.5'
+ - '203.0.113.10'
+
+- name: Change the value of an existing record with multiple record_data
+ community.general.gcdns_record:
+ record: 'api.example.com'
+ zone: 'example.com'
+ type: A
+ overwrite: true
+ record_data: # WARNING: All values in a record will be replaced
+ - '192.0.2.23'
+ - '192.0.2.42' # The changed record
+ - '198.51.100.5'
+ - '203.0.113.10'
+
+- name: Safely remove a multi-line record
+ community.general.gcdns_record:
+ record: 'api.example.com'
+ zone_id: 'example-com'
+ state: absent
+ type: A
+ record_data: # NOTE: All of the values must match exactly
+ - '192.0.2.23'
+ - '192.0.2.42'
+ - '198.51.100.5'
+ - '203.0.113.10'
+
+- name: Unconditionally remove a record
+ community.general.gcdns_record:
+ record: 'api.example.com'
+ zone_id: 'example-com'
+ state: absent
+ overwrite: true # overwrite is true, so no values are needed
+ type: A
+
+- name: Create an AAAA record
+ community.general.gcdns_record:
+ record: 'www1.example.com'
+ zone: 'example.com'
+ type: AAAA
+ value: 'fd00:db8::1'
+
+- name: Create a PTR record
+ community.general.gcdns_record:
+ record: '10.5.168.192.in-addr.arpa'
+ zone: '5.168.192.in-addr.arpa'
+ type: PTR
+ value: 'api.example.com.' # Note the trailing dot.
+
+- name: Create an NS record
+ community.general.gcdns_record:
+ record: 'subdomain.example.com'
+ zone: 'example.com'
+ type: NS
+ ttl: 21600
+ record_data:
+ - 'ns-cloud-d1.googledomains.com.' # Note the trailing dots on values
+ - 'ns-cloud-d2.googledomains.com.'
+ - 'ns-cloud-d3.googledomains.com.'
+ - 'ns-cloud-d4.googledomains.com.'
+
+- name: Create a TXT record
+ community.general.gcdns_record:
+ record: 'example.com'
+ zone_id: 'example-com'
+ type: TXT
+ record_data:
+ - '"v=spf1 include:_spf.google.com -all"' # A single-string TXT value
+ - '"hello " "world"' # A multi-string TXT value
+'''
+
+RETURN = '''
+overwrite:
+ description: Whether to the module was allowed to overwrite the record
+ returned: success
+ type: bool
+ sample: True
+record:
+ description: Fully-qualified domain name of the resource record
+ returned: success
+ type: str
+ sample: mail.example.com.
+state:
+ description: Whether the record is present or absent
+ returned: success
+ type: str
+ sample: present
+ttl:
+ description: The time-to-live of the resource record
+ returned: success
+ type: int
+ sample: 300
+type:
+ description: The type of the resource record
+ returned: success
+ type: str
+ sample: A
+record_data:
+ description: The resource record values
+ returned: success
+ type: list
+ sample: ['5.6.7.8', '9.10.11.12']
+zone:
+ description: The dns name of the zone
+ returned: success
+ type: str
+ sample: example.com.
+zone_id:
+ description: The Google Cloud DNS ID of the zone
+ returned: success
+ type: str
+ sample: example-com
+'''
+
+
+################################################################################
+# Imports
+################################################################################
+
+import socket
+from distutils.version import LooseVersion
+
+try:
+ from libcloud import __version__ as LIBCLOUD_VERSION
+ from libcloud.common.google import InvalidRequestError
+ from libcloud.common.types import LibcloudError
+ from libcloud.dns.types import Provider
+ from libcloud.dns.types import RecordDoesNotExistError
+ from libcloud.dns.types import ZoneDoesNotExistError
+ HAS_LIBCLOUD = True
+ # The libcloud Google Cloud DNS provider.
+ PROVIDER = Provider.GOOGLE
+except ImportError:
+ HAS_LIBCLOUD = False
+ PROVIDER = None
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcdns import gcdns_connect
+
+
+################################################################################
+# Constants
+################################################################################
+
+# Apache libcloud 0.19.0 was the first to contain the non-beta Google Cloud DNS
+# v1 API. Earlier versions contained the beta v1 API, which has since been
+# deprecated and decommissioned.
+MINIMUM_LIBCLOUD_VERSION = '0.19.0'
+
+# The records that libcloud's Google Cloud DNS provider supports.
+#
+# Libcloud has a RECORD_TYPE_MAP dictionary in the provider that also contains
+# this information and is the authoritative source on which records are
+# supported, but accessing the dictionary requires creating a Google Cloud DNS
+# driver object, which is done in a helper module.
+#
+# I'm hard-coding the supported record types here, because they (hopefully!)
+# shouldn't change much, and it allows me to use it as a "choices" parameter
+# in an AnsibleModule argument_spec.
+SUPPORTED_RECORD_TYPES = ['A', 'AAAA', 'CNAME', 'SRV', 'TXT', 'SOA', 'NS', 'MX', 'SPF', 'PTR']
+
+
+################################################################################
+# Functions
+################################################################################
+
+def create_record(module, gcdns, zone, record):
+ """Creates or overwrites a resource record."""
+
+ overwrite = module.boolean(module.params['overwrite'])
+ record_name = module.params['record']
+ record_type = module.params['type']
+ ttl = module.params['ttl']
+ record_data = module.params['record_data']
+ data = dict(ttl=ttl, rrdatas=record_data)
+
+ # Google Cloud DNS wants the trailing dot on all DNS names.
+ if record_name[-1] != '.':
+ record_name = record_name + '.'
+
+ # If we found a record, we need to check if the values match.
+ if record is not None:
+ # If the record matches, we obviously don't have to change anything.
+ if _records_match(record.data['ttl'], record.data['rrdatas'], ttl, record_data):
+ return False
+
+ # The record doesn't match, so we need to check if we can overwrite it.
+ if not overwrite:
+ module.fail_json(
+ msg='cannot overwrite existing record, overwrite protection enabled',
+ changed=False
+ )
+
+ # The record either doesn't exist, or it exists and we can overwrite it.
+ if record is None and not module.check_mode:
+ # There's no existing record, so we'll just create it.
+ try:
+ gcdns.create_record(record_name, zone, record_type, data)
+ except InvalidRequestError as error:
+ if error.code == 'invalid':
+ # The resource record name and type are valid by themselves, but
+ # not when combined (e.g., an 'A' record with "www.example.com"
+ # as its value).
+ module.fail_json(
+ msg='value is invalid for the given type: ' +
+ "%s, got value: %s" % (record_type, record_data),
+ changed=False
+ )
+
+ elif error.code == 'cnameResourceRecordSetConflict':
+ # We're attempting to create a CNAME resource record when we
+ # already have another type of resource record with the name
+ # domain name.
+ module.fail_json(
+ msg="non-CNAME resource record already exists: %s" % record_name,
+ changed=False
+ )
+
+ else:
+ # The error is something else that we don't know how to handle,
+ # so we'll just re-raise the exception.
+ raise
+
+ elif record is not None and not module.check_mode:
+ # The Google provider in libcloud doesn't support updating a record in
+ # place, so if the record already exists, we need to delete it and
+ # recreate it using the new information.
+ gcdns.delete_record(record)
+
+ try:
+ gcdns.create_record(record_name, zone, record_type, data)
+ except InvalidRequestError:
+ # Something blew up when creating the record. This will usually be a
+ # result of invalid value data in the new record. Unfortunately, we
+ # already changed the state of the record by deleting the old one,
+ # so we'll try to roll back before failing out.
+ try:
+ gcdns.create_record(record.name, record.zone, record.type, record.data)
+ module.fail_json(
+ msg='error updating record, the original record was restored',
+ changed=False
+ )
+ except LibcloudError:
+ # We deleted the old record, couldn't create the new record, and
+ # couldn't roll back. That really sucks. We'll dump the original
+ # record to the failure output so the user can restore it if
+ # necessary.
+ module.fail_json(
+ msg='error updating record, and could not restore original record, ' +
+ "original name: %s " % record.name +
+ "original zone: %s " % record.zone +
+ "original type: %s " % record.type +
+ "original data: %s" % record.data,
+ changed=True)
+
+ return True
+
+
+def remove_record(module, gcdns, record):
+ """Remove a resource record."""
+
+ overwrite = module.boolean(module.params['overwrite'])
+ ttl = module.params['ttl']
+ record_data = module.params['record_data']
+
+ # If there is no record, we're obviously done.
+ if record is None:
+ return False
+
+ # If there is an existing record, do our values match the values of the
+ # existing record?
+ if not overwrite:
+ if not _records_match(record.data['ttl'], record.data['rrdatas'], ttl, record_data):
+ module.fail_json(
+ msg='cannot delete due to non-matching ttl or record_data: ' +
+ "ttl: %d, record_data: %s " % (ttl, record_data) +
+ "original ttl: %d, original record_data: %s" % (record.data['ttl'], record.data['rrdatas']),
+ changed=False
+ )
+
+ # If we got to this point, we're okay to delete the record.
+ if not module.check_mode:
+ gcdns.delete_record(record)
+
+ return True
+
+
+def _get_record(gcdns, zone, record_type, record_name):
+ """Gets the record object for a given FQDN."""
+
+ # The record ID is a combination of its type and FQDN. For example, the
+ # ID of an A record for www.example.com would be 'A:www.example.com.'
+ record_id = "%s:%s" % (record_type, record_name)
+
+ try:
+ return gcdns.get_record(zone.id, record_id)
+ except RecordDoesNotExistError:
+ return None
+
+
+def _get_zone(gcdns, zone_name, zone_id):
+ """Gets the zone object for a given domain name."""
+
+ if zone_id is not None:
+ try:
+ return gcdns.get_zone(zone_id)
+ except ZoneDoesNotExistError:
+ return None
+
+ # To create a zone, we need to supply a domain name. However, to delete a
+ # zone, we need to supply a zone ID. Zone ID's are often based on domain
+ # names, but that's not guaranteed, so we'll iterate through the list of
+ # zones to see if we can find a matching domain name.
+ available_zones = gcdns.iterate_zones()
+ found_zone = None
+
+ for zone in available_zones:
+ if zone.domain == zone_name:
+ found_zone = zone
+ break
+
+ return found_zone
+
+
+def _records_match(old_ttl, old_record_data, new_ttl, new_record_data):
+ """Checks to see if original and new TTL and values match."""
+
+ matches = True
+
+ if old_ttl != new_ttl:
+ matches = False
+ if old_record_data != new_record_data:
+ matches = False
+
+ return matches
+
+
+def _sanity_check(module):
+ """Run sanity checks that don't depend on info from the zone/record."""
+
+ overwrite = module.params['overwrite']
+ record_name = module.params['record']
+ record_type = module.params['type']
+ state = module.params['state']
+ ttl = module.params['ttl']
+ record_data = module.params['record_data']
+
+ # Apache libcloud needs to be installed and at least the minimum version.
+ if not HAS_LIBCLOUD:
+ module.fail_json(
+ msg='This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
+ changed=False
+ )
+ elif LooseVersion(LIBCLOUD_VERSION) < MINIMUM_LIBCLOUD_VERSION:
+ module.fail_json(
+ msg='This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
+ changed=False
+ )
+
+ # A negative TTL is not permitted (how would they even work?!).
+ if ttl < 0:
+ module.fail_json(
+ msg='TTL cannot be less than zero, got: %d' % ttl,
+ changed=False
+ )
+
+ # Deleting SOA records is not permitted.
+ if record_type == 'SOA' and state == 'absent':
+ module.fail_json(msg='cannot delete SOA records', changed=False)
+
+ # Updating SOA records is not permitted.
+ if record_type == 'SOA' and state == 'present' and overwrite:
+ module.fail_json(msg='cannot update SOA records', changed=False)
+
+ # Some sanity checks depend on what value was supplied.
+ if record_data is not None and (state == 'present' or not overwrite):
+ # A records must contain valid IPv4 addresses.
+ if record_type == 'A':
+ for value in record_data:
+ try:
+ socket.inet_aton(value)
+ except socket.error:
+ module.fail_json(
+ msg='invalid A record value, got: %s' % value,
+ changed=False
+ )
+
+ # AAAA records must contain valid IPv6 addresses.
+ if record_type == 'AAAA':
+ for value in record_data:
+ try:
+ socket.inet_pton(socket.AF_INET6, value)
+ except socket.error:
+ module.fail_json(
+ msg='invalid AAAA record value, got: %s' % value,
+ changed=False
+ )
+
+ # CNAME and SOA records can't have multiple values.
+ if record_type in ['CNAME', 'SOA'] and len(record_data) > 1:
+ module.fail_json(
+ msg='CNAME or SOA records cannot have more than one value, ' +
+ "got: %s" % record_data,
+ changed=False
+ )
+
+ # Google Cloud DNS does not support wildcard NS records.
+ if record_type == 'NS' and record_name[0] == '*':
+ module.fail_json(
+ msg="wildcard NS records not allowed, got: %s" % record_name,
+ changed=False
+ )
+
+ # Values for txt records must begin and end with a double quote.
+ if record_type == 'TXT':
+ for value in record_data:
+ if value[0] != '"' and value[-1] != '"':
+ module.fail_json(
+ msg='TXT record_data must be enclosed in double quotes, ' +
+ 'got: %s' % value,
+ changed=False
+ )
+
+
+def _additional_sanity_checks(module, zone):
+ """Run input sanity checks that depend on info from the zone/record."""
+
+ overwrite = module.params['overwrite']
+ record_name = module.params['record']
+ record_type = module.params['type']
+ state = module.params['state']
+
+ # CNAME records are not allowed to have the same name as the root domain.
+ if record_type == 'CNAME' and record_name == zone.domain:
+ module.fail_json(
+ msg='CNAME records cannot match the zone name',
+ changed=False
+ )
+
+ # The root domain must always have an NS record.
+ if record_type == 'NS' and record_name == zone.domain and state == 'absent':
+ module.fail_json(
+ msg='cannot delete root NS records',
+ changed=False
+ )
+
+ # Updating NS records with the name as the root domain is not allowed
+ # because libcloud does not support in-place updates and root domain NS
+ # records cannot be removed.
+ if record_type == 'NS' and record_name == zone.domain and overwrite:
+ module.fail_json(
+ msg='cannot update existing root NS records',
+ changed=False
+ )
+
+ # SOA records with names that don't match the root domain are not permitted
+ # (and wouldn't make sense anyway).
+ if record_type == 'SOA' and record_name != zone.domain:
+ module.fail_json(
+ msg='non-root SOA records are not permitted, got: %s' % record_name,
+ changed=False
+ )
+
+
+################################################################################
+# Main
+################################################################################
+
+def main():
+ """Main function"""
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'], type='str'),
+ record=dict(required=True, aliases=['name'], type='str'),
+ zone=dict(type='str'),
+ zone_id=dict(type='str'),
+ type=dict(required=True, choices=SUPPORTED_RECORD_TYPES, type='str'),
+ record_data=dict(aliases=['value'], type='list'),
+ ttl=dict(default=300, type='int'),
+ overwrite=dict(default=False, type='bool'),
+ service_account_email=dict(type='str'),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ project_id=dict(type='str')
+ ),
+ required_if=[
+ ('state', 'present', ['record_data']),
+ ('overwrite', False, ['record_data'])
+ ],
+ required_one_of=[['zone', 'zone_id']],
+ supports_check_mode=True
+ )
+
+ _sanity_check(module)
+
+ record_name = module.params['record']
+ record_type = module.params['type']
+ state = module.params['state']
+ ttl = module.params['ttl']
+ zone_name = module.params['zone']
+ zone_id = module.params['zone_id']
+
+ json_output = dict(
+ state=state,
+ record=record_name,
+ zone=zone_name,
+ zone_id=zone_id,
+ type=record_type,
+ record_data=module.params['record_data'],
+ ttl=ttl,
+ overwrite=module.boolean(module.params['overwrite'])
+ )
+
+ # Google Cloud DNS wants the trailing dot on all DNS names.
+ if zone_name is not None and zone_name[-1] != '.':
+ zone_name = zone_name + '.'
+ if record_name[-1] != '.':
+ record_name = record_name + '.'
+
+ # Build a connection object that we can use to connect with Google Cloud
+ # DNS.
+ gcdns = gcdns_connect(module, provider=PROVIDER)
+
+ # We need to check that the zone we're creating a record for actually
+ # exists.
+ zone = _get_zone(gcdns, zone_name, zone_id)
+ if zone is None and zone_name is not None:
+ module.fail_json(
+ msg='zone name was not found: %s' % zone_name,
+ changed=False
+ )
+ elif zone is None and zone_id is not None:
+ module.fail_json(
+ msg='zone id was not found: %s' % zone_id,
+ changed=False
+ )
+
+ # Populate the returns with the actual zone information.
+ json_output['zone'] = zone.domain
+ json_output['zone_id'] = zone.id
+
+ # We also need to check if the record we want to create or remove actually
+ # exists.
+ try:
+ record = _get_record(gcdns, zone, record_type, record_name)
+ except InvalidRequestError:
+ # We gave Google Cloud DNS an invalid DNS record name.
+ module.fail_json(
+ msg='record name is invalid: %s' % record_name,
+ changed=False
+ )
+
+ _additional_sanity_checks(module, zone)
+
+ diff = dict()
+
+ # Build the 'before' diff
+ if record is None:
+ diff['before'] = ''
+ diff['before_header'] = '<absent>'
+ else:
+ diff['before'] = dict(
+ record=record.data['name'],
+ type=record.data['type'],
+ record_data=record.data['rrdatas'],
+ ttl=record.data['ttl']
+ )
+ diff['before_header'] = "%s:%s" % (record_type, record_name)
+
+ # Create, remove, or modify the record.
+ if state == 'present':
+ diff['after'] = dict(
+ record=record_name,
+ type=record_type,
+ record_data=module.params['record_data'],
+ ttl=ttl
+ )
+ diff['after_header'] = "%s:%s" % (record_type, record_name)
+
+ changed = create_record(module, gcdns, zone, record)
+
+ elif state == 'absent':
+ diff['after'] = ''
+ diff['after_header'] = '<absent>'
+
+ changed = remove_record(module, gcdns, record)
+
+ module.exit_json(changed=changed, diff=diff, **json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcdns_zone.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcdns_zone.py
new file mode 100644
index 00000000..6f66b5fe
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcdns_zone.py
@@ -0,0 +1,372 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2015 CallFire Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+################################################################################
+# Documentation
+################################################################################
+
+DOCUMENTATION = '''
+---
+module: gcdns_zone
+short_description: Creates or removes zones in Google Cloud DNS
+description:
+ - Creates or removes managed zones in Google Cloud DNS.
+author: "William Albert (@walbert947)"
+requirements:
+ - "apache-libcloud >= 0.19.0"
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.12
+ why: Updated modules released with increased functionality
+ alternative: Use M(google.cloud.gcp_dns_managed_zone) instead.
+options:
+ state:
+ type: str
+ description:
+ - Whether the given zone should or should not be present.
+ choices: ["present", "absent"]
+ default: "present"
+ zone:
+ type: str
+ description:
+ - The DNS domain name of the zone.
+ - This is NOT the Google Cloud DNS zone ID (e.g., example-com). If
+ you attempt to specify a zone ID, this module will attempt to
+ create a TLD and will fail.
+ required: true
+ aliases: ['name']
+ description:
+ type: str
+ description:
+ - An arbitrary text string to use for the zone description.
+ default: ""
+ service_account_email:
+ type: str
+ description:
+ - The e-mail address for a service account with access to Google
+ Cloud DNS.
+ pem_file:
+ type: path
+ description:
+ - The path to the PEM file associated with the service account
+ email.
+ - This option is deprecated and may be removed in a future release.
+ Use I(credentials_file) instead.
+ credentials_file:
+ type: path
+ description:
+ - The path to the JSON file associated with the service account
+ email.
+ project_id:
+ type: str
+ description:
+ - The Google Cloud Platform project ID to use.
+notes:
+ - See also M(community.general.gcdns_record).
+ - Zones that are newly created must still be set up with a domain registrar
+ before they can be used.
+'''
+
+EXAMPLES = '''
+# Basic zone creation example.
+- name: Create a basic zone with the minimum number of parameters.
+ community.general.gcdns_zone: zone=example.com
+
+# Zone removal example.
+- name: Remove a zone.
+ community.general.gcdns_zone: zone=example.com state=absent
+
+# Zone creation with description
+- name: Creating a zone with a description
+ community.general.gcdns_zone: zone=example.com description="This is an awesome zone"
+'''
+
+RETURN = '''
+description:
+ description: The zone's description
+ returned: success
+ type: str
+ sample: This is an awesome zone
+state:
+ description: Whether the zone is present or absent
+ returned: success
+ type: str
+ sample: present
+zone:
+ description: The zone's DNS name
+ returned: success
+ type: str
+ sample: example.com.
+'''
+
+
+################################################################################
+# Imports
+################################################################################
+
+from distutils.version import LooseVersion
+
+try:
+ from libcloud import __version__ as LIBCLOUD_VERSION
+ from libcloud.common.google import InvalidRequestError
+ from libcloud.common.google import ResourceExistsError
+ from libcloud.common.google import ResourceNotFoundError
+ from libcloud.dns.types import Provider
+ # The libcloud Google Cloud DNS provider.
+ PROVIDER = Provider.GOOGLE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+ PROVIDER = None
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcdns import gcdns_connect
+
+
+################################################################################
+# Constants
+################################################################################
+
+# Apache libcloud 0.19.0 was the first to contain the non-beta Google Cloud DNS
+# v1 API. Earlier versions contained the beta v1 API, which has since been
+# deprecated and decommissioned.
+MINIMUM_LIBCLOUD_VERSION = '0.19.0'
+
+# The URL used to verify ownership of a zone in Google Cloud DNS.
+ZONE_VERIFICATION_URL = 'https://www.google.com/webmasters/verification/'
+
+################################################################################
+# Functions
+################################################################################
+
+
+def create_zone(module, gcdns, zone):
+ """Creates a new Google Cloud DNS zone."""
+
+ description = module.params['description']
+ extra = dict(description=description)
+ zone_name = module.params['zone']
+
+ # Google Cloud DNS wants the trailing dot on the domain name.
+ if zone_name[-1] != '.':
+ zone_name = zone_name + '.'
+
+ # If we got a zone back, then the domain exists.
+ if zone is not None:
+ return False
+
+ # The zone doesn't exist yet.
+ try:
+ if not module.check_mode:
+ gcdns.create_zone(domain=zone_name, extra=extra)
+ return True
+
+ except ResourceExistsError:
+ # The zone already exists. We checked for this already, so either
+ # Google is lying, or someone was a ninja and created the zone
+ # within milliseconds of us checking for its existence. In any case,
+ # the zone has already been created, so we have nothing more to do.
+ return False
+
+ except InvalidRequestError as error:
+ if error.code == 'invalid':
+ # The zone name or a parameter might be completely invalid. This is
+ # typically caused by an illegal DNS name (e.g. foo..com).
+ module.fail_json(
+ msg="zone name is not a valid DNS name: %s" % zone_name,
+ changed=False
+ )
+
+ elif error.code == 'managedZoneDnsNameNotAvailable':
+ # Google Cloud DNS will refuse to create zones with certain domain
+ # names, such as TLDs, ccTLDs, or special domain names such as
+ # example.com.
+ module.fail_json(
+ msg="zone name is reserved or already in use: %s" % zone_name,
+ changed=False
+ )
+
+ elif error.code == 'verifyManagedZoneDnsNameOwnership':
+ # This domain name needs to be verified before Google will create
+ # it. This occurs when a user attempts to create a zone which shares
+ # a domain name with a zone hosted elsewhere in Google Cloud DNS.
+ module.fail_json(
+ msg="ownership of zone %s needs to be verified at %s" % (zone_name, ZONE_VERIFICATION_URL),
+ changed=False
+ )
+
+ else:
+ # The error is something else that we don't know how to handle,
+ # so we'll just re-raise the exception.
+ raise
+
+
+def remove_zone(module, gcdns, zone):
+ """Removes an existing Google Cloud DNS zone."""
+
+ # If there's no zone, then we're obviously done.
+ if zone is None:
+ return False
+
+ # An empty zone will have two resource records:
+ # 1. An NS record with a list of authoritative name servers
+ # 2. An SOA record
+ # If any additional resource records are present, Google Cloud DNS will
+ # refuse to remove the zone.
+ if len(zone.list_records()) > 2:
+ module.fail_json(
+ msg="zone is not empty and cannot be removed: %s" % zone.domain,
+ changed=False
+ )
+
+ try:
+ if not module.check_mode:
+ gcdns.delete_zone(zone)
+ return True
+
+ except ResourceNotFoundError:
+ # When we performed our check, the zone existed. It may have been
+ # deleted by something else. It's gone, so whatever.
+ return False
+
+ except InvalidRequestError as error:
+ if error.code == 'containerNotEmpty':
+ # When we performed our check, the zone existed and was empty. In
+ # the milliseconds between the check and the removal command,
+ # records were added to the zone.
+ module.fail_json(
+ msg="zone is not empty and cannot be removed: %s" % zone.domain,
+ changed=False
+ )
+
+ else:
+ # The error is something else that we don't know how to handle,
+ # so we'll just re-raise the exception.
+ raise
+
+
+def _get_zone(gcdns, zone_name):
+ """Gets the zone object for a given domain name."""
+
+ # To create a zone, we need to supply a zone name. However, to delete a
+ # zone, we need to supply a zone ID. Zone ID's are often based on zone
+ # names, but that's not guaranteed, so we'll iterate through the list of
+ # zones to see if we can find a matching name.
+ available_zones = gcdns.iterate_zones()
+ found_zone = None
+
+ for zone in available_zones:
+ if zone.domain == zone_name:
+ found_zone = zone
+ break
+
+ return found_zone
+
+
+def _sanity_check(module):
+ """Run module sanity checks."""
+
+ zone_name = module.params['zone']
+
+ # Apache libcloud needs to be installed and at least the minimum version.
+ if not HAS_LIBCLOUD:
+ module.fail_json(
+ msg='This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
+ changed=False
+ )
+ elif LooseVersion(LIBCLOUD_VERSION) < MINIMUM_LIBCLOUD_VERSION:
+ module.fail_json(
+ msg='This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
+ changed=False
+ )
+
+ # Google Cloud DNS does not support the creation of TLDs.
+ if '.' not in zone_name or len([label for label in zone_name.split('.') if label]) == 1:
+ module.fail_json(
+ msg='cannot create top-level domain: %s' % zone_name,
+ changed=False
+ )
+
+################################################################################
+# Main
+################################################################################
+
+
+def main():
+ """Main function"""
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'], type='str'),
+ zone=dict(required=True, aliases=['name'], type='str'),
+ description=dict(default='', type='str'),
+ service_account_email=dict(type='str'),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ project_id=dict(type='str')
+ ),
+ supports_check_mode=True
+ )
+
+ _sanity_check(module)
+
+ zone_name = module.params['zone']
+ state = module.params['state']
+
+ # Google Cloud DNS wants the trailing dot on the domain name.
+ if zone_name[-1] != '.':
+ zone_name = zone_name + '.'
+
+ json_output = dict(
+ state=state,
+ zone=zone_name,
+ description=module.params['description']
+ )
+
+ # Build a connection object that was can use to connect with Google
+ # Cloud DNS.
+ gcdns = gcdns_connect(module, provider=PROVIDER)
+
+ # We need to check if the zone we're attempting to create already exists.
+ zone = _get_zone(gcdns, zone_name)
+
+ diff = dict()
+
+ # Build the 'before' diff
+ if zone is None:
+ diff['before'] = ''
+ diff['before_header'] = '<absent>'
+ else:
+ diff['before'] = dict(
+ zone=zone.domain,
+ description=zone.extra['description']
+ )
+ diff['before_header'] = zone_name
+
+ # Create or remove the zone.
+ if state == 'present':
+ diff['after'] = dict(
+ zone=zone_name,
+ description=module.params['description']
+ )
+ diff['after_header'] = zone_name
+
+ changed = create_zone(module, gcdns, zone)
+
+ elif state == 'absent':
+ diff['after'] = ''
+ diff['after_header'] = '<absent>'
+
+ changed = remove_zone(module, gcdns, zone)
+
+ module.exit_json(changed=changed, diff=diff, **json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce.py
new file mode 100644
index 00000000..7e658786
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce.py
@@ -0,0 +1,753 @@
+#!/usr/bin/python
+# Copyright 2013 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gce
+short_description: create or terminate GCE instances
+description:
+ - Creates or terminates Google Compute Engine (GCE) instances. See
+ U(https://cloud.google.com/compute) for an overview.
+ Full install/configuration instructions for the gce* modules can
+ be found in the comments of ansible/test/gce_tests.py.
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.12
+ why: Updated modules released with increased functionality
+ alternative: Use M(google.cloud.gcp_compute_instance) instead.
+options:
+ image:
+ type: str
+ description:
+ - image string to use for the instance (default will follow latest
+ stable debian image)
+ default: "debian-8"
+ image_family:
+ type: str
+ description:
+ - image family from which to select the image. The most recent
+ non-deprecated image in the family will be used.
+ external_projects:
+ type: list
+ description:
+ - A list of other projects (accessible with the provisioning credentials)
+ to be searched for the image.
+ instance_names:
+ type: str
+ description:
+ - a comma-separated list of instance names to create or destroy
+ machine_type:
+ type: str
+ description:
+ - machine type to use for the instance, use 'n1-standard-1' by default
+ default: "n1-standard-1"
+ metadata:
+ type: str
+ description:
+ - a hash/dictionary of custom data for the instance;
+ '{"key":"value", ...}'
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account permissions (see
+ U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create),
+ --scopes section for detailed information)
+ - >
+ Available choices are:
+ C(bigquery), C(cloud-platform), C(compute-ro), C(compute-rw),
+ C(useraccounts-ro), C(useraccounts-rw), C(datastore), C(logging-write),
+ C(monitoring), C(sql-admin), C(storage-full), C(storage-ro),
+ C(storage-rw), C(taskqueue), C(userinfo-email).
+ pem_file:
+ type: path
+ description:
+ - path to the pem file associated with the service account email
+ This option is deprecated. Use 'credentials_file'.
+ credentials_file:
+ type: path
+ description:
+ - path to the JSON file associated with the service account email
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ name:
+ type: str
+ description:
+ - either a name of a single instance or when used with 'num_instances',
+ the base name of a cluster of nodes
+ aliases: ['base_name']
+ num_instances:
+ type: int
+ description:
+ - can be used with 'name', specifies
+ the number of nodes to provision using 'name'
+ as a base name
+ network:
+ type: str
+ description:
+ - name of the network, 'default' will be used if not specified
+ default: "default"
+ subnetwork:
+ type: str
+ description:
+ - name of the subnetwork in which the instance should be created
+ persistent_boot_disk:
+ description:
+ - if set, create the instance with a persistent boot disk
+ type: bool
+ default: 'no'
+ disks:
+ type: list
+ description:
+ - a list of persistent disks to attach to the instance; a string value
+ gives the name of the disk; alternatively, a dictionary value can
+ define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry
+ will be the boot disk (which must be READ_WRITE).
+ state:
+ type: str
+ description:
+ - desired state of the resource
+ default: "present"
+ choices: ["active", "present", "absent", "deleted", "started", "stopped", "terminated"]
+ tags:
+ type: list
+ description:
+ - a comma-separated list of tags to associate with the instance
+ zone:
+ type: str
+ description:
+ - the GCE zone to use. The list of available zones is at U(https://cloud.google.com/compute/docs/regions-zones/regions-zones#available).
+ default: "us-central1-a"
+ ip_forward:
+ description:
+ - set to C(yes) if the instance can forward ip packets (useful for
+ gateways)
+ type: bool
+ default: 'no'
+ external_ip:
+ type: str
+ description:
+ - type of external ip, ephemeral by default; alternatively, a fixed gce ip or ip name can be given. Specify 'none' if no external ip is desired.
+ default: "ephemeral"
+ disk_auto_delete:
+ description:
+ - if set boot disk will be removed after instance destruction
+ type: bool
+ default: 'yes'
+ preemptible:
+ description:
+ - if set to C(yes), instances will be preemptible and time-limited.
+ (requires libcloud >= 0.20.0)
+ type: bool
+ disk_size:
+ type: int
+ description:
+ - The size of the boot disk created for this instance (in GB)
+ default: 10
+
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials,
+ >= 0.20.0 if using preemptible option"
+notes:
+ - Either I(instance_names) or I(name) is required.
+ - JSON credentials strongly preferred.
+author:
+ - Eric Johnson (@erjohnso) <erjohnso@google.com>
+ - Tom Melendez (@supertom) <supertom@google.com>
+'''
+
+EXAMPLES = '''
+# Basic provisioning example. Create a single Debian 8 instance in the
+# us-central1-a Zone of the n1-standard-1 machine type.
+# Create multiple instances by specifying multiple names, separated by
+# commas in the instance_names field
+# (e.g. my-test-instance1,my-test-instance2)
+ - community.general.gce:
+ instance_names: my-test-instance1
+ zone: us-central1-a
+ machine_type: n1-standard-1
+ image: debian-8
+ state: present
+ service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
+ credentials_file: "/path/to/your-key.json"
+ project_id: "your-project-name"
+ disk_size: 32
+
+# Create a single instance of an image from the "my-base-image" image family
+# in the us-central1-a Zone of the n1-standard-1 machine type.
+# This image family is in the "my-other-project" GCP project.
+ - community.general.gce:
+ instance_names: my-test-instance1
+ zone: us-central1-a
+ machine_type: n1-standard-1
+ image_family: my-base-image
+ external_projects:
+ - my-other-project
+ state: present
+ service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
+ credentials_file: "/path/to/your-key.json"
+ project_id: "your-project-name"
+ disk_size: 32
+
+# Create a single Debian 8 instance in the us-central1-a Zone
+# Use existing disks, custom network/subnetwork, set service account permissions
+# add tags and metadata.
+ - community.general.gce:
+ instance_names: my-test-instance
+ zone: us-central1-a
+ machine_type: n1-standard-1
+ state: present
+ metadata: '{"db":"postgres", "group":"qa", "id":500}'
+ tags:
+ - http-server
+ - my-other-tag
+ disks:
+ - name: disk-2
+ mode: READ_WRITE
+ - name: disk-3
+ mode: READ_ONLY
+ disk_auto_delete: false
+ network: foobar-network
+ subnetwork: foobar-subnetwork-1
+ preemptible: true
+ ip_forward: true
+ service_account_permissions:
+ - storage-full
+ - taskqueue
+ - bigquery
+ - https://www.googleapis.com/auth/ndev.clouddns.readwrite
+ service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
+ credentials_file: "/path/to/your-key.json"
+ project_id: "your-project-name"
+
+---
+# Example Playbook
+- name: Compute Engine Instance Examples
+ hosts: localhost
+ vars:
+ service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
+ credentials_file: "/path/to/your-key.json"
+ project_id: "your-project-name"
+ tasks:
+ - name: Create multiple instances
+ # Basic provisioning example. Create multiple Debian 8 instances in the
+ # us-central1-a Zone of n1-standard-1 machine type.
+ community.general.gce:
+ instance_names: test1,test2,test3
+ zone: us-central1-a
+ machine_type: n1-standard-1
+ image: debian-8
+ state: present
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ metadata : '{ "startup-script" : "apt-get update" }'
+ register: gce
+
+ - name: Save host data
+ ansible.builtin.add_host:
+ hostname: "{{ item.public_ip }}"
+ groupname: gce_instances_ips
+ with_items: "{{ gce.instance_data }}"
+
+ - name: Wait for SSH for instances
+ ansible.builtin.wait_for:
+ delay: 1
+ host: "{{ item.public_ip }}"
+ port: 22
+ state: started
+ timeout: 30
+ with_items: "{{ gce.instance_data }}"
+
+ - name: Configure Hosts
+ hosts: gce_instances_ips
+ become: yes
+ become_method: sudo
+ roles:
+ - my-role-one
+ - my-role-two
+ tags:
+ - config
+
+ - name: Delete test-instances
+ # Basic termination of instance.
+ community.general.gce:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ instance_names: "{{ gce.instance_names }}"
+ zone: us-central1-a
+ state: absent
+ tags:
+ - delete
+'''
+
+import socket
+import logging
+
+try:
+ from ast import literal_eval
+
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ import libcloud
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
+ ResourceExistsError, ResourceInUseError, ResourceNotFoundError
+ from libcloud.compute.drivers.gce import GCEAddress
+
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gce import gce_connect, unexpected_error_msg
+from ansible_collections.community.general.plugins.module_utils.gcp import get_valid_location
+from ansible.module_utils.six.moves import reduce
+
+
+def get_instance_info(inst):
+ """Retrieves instance information from an instance object and returns it
+ as a dictionary.
+
+ """
+ metadata = {}
+ if 'metadata' in inst.extra and 'items' in inst.extra['metadata']:
+ for md in inst.extra['metadata']['items']:
+ metadata[md['key']] = md['value']
+
+ try:
+ netname = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
+ except Exception:
+ netname = None
+ try:
+ subnetname = inst.extra['networkInterfaces'][0]['subnetwork'].split('/')[-1]
+ except Exception:
+ subnetname = None
+ if 'disks' in inst.extra:
+ disk_names = [disk_info['source'].split('/')[-1]
+ for disk_info
+ in sorted(inst.extra['disks'],
+ key=lambda disk_info: disk_info['index'])]
+ else:
+ disk_names = []
+
+ if len(inst.public_ips) == 0:
+ public_ip = None
+ else:
+ public_ip = inst.public_ips[0]
+
+ return ({
+ 'image': inst.image is not None and inst.image.split('/')[-1] or None,
+ 'disks': disk_names,
+ 'machine_type': inst.size,
+ 'metadata': metadata,
+ 'name': inst.name,
+ 'network': netname,
+ 'subnetwork': subnetname,
+ 'private_ip': inst.private_ips[0],
+ 'public_ip': public_ip,
+ 'status': ('status' in inst.extra) and inst.extra['status'] or None,
+ 'tags': ('tags' in inst.extra) and inst.extra['tags'] or [],
+ 'zone': ('zone' in inst.extra) and inst.extra['zone'].name or None,
+ })
+
+
+def create_instances(module, gce, instance_names, number, lc_zone):
+ """Creates new instances. Attributes other than instance_names are picked
+ up from 'module'
+
+ module : AnsibleModule object
+ community.general.gce: authenticated GCE libcloud driver
+ instance_names: python list of instance names to create
+ number: number of instances to create
+ lc_zone: GCEZone object
+
+ Returns:
+ A list of dictionaries with instance information
+ about the instances that were launched.
+
+ """
+ image = module.params.get('image')
+ image_family = module.params.get('image_family')
+ external_projects = module.params.get('external_projects')
+ machine_type = module.params.get('machine_type')
+ metadata = module.params.get('metadata')
+ network = module.params.get('network')
+ subnetwork = module.params.get('subnetwork')
+ persistent_boot_disk = module.params.get('persistent_boot_disk')
+ disks = module.params.get('disks')
+ tags = module.params.get('tags')
+ ip_forward = module.params.get('ip_forward')
+ external_ip = module.params.get('external_ip')
+ disk_auto_delete = module.params.get('disk_auto_delete')
+ preemptible = module.params.get('preemptible')
+ disk_size = module.params.get('disk_size')
+ service_account_permissions = module.params.get('service_account_permissions')
+
+ if external_ip == "none":
+ instance_external_ip = None
+ elif external_ip != "ephemeral":
+ instance_external_ip = external_ip
+ try:
+ # check if instance_external_ip is an ip or a name
+ try:
+ socket.inet_aton(instance_external_ip)
+ instance_external_ip = GCEAddress(id='unknown', name='unknown', address=instance_external_ip, region='unknown', driver=gce)
+ except socket.error:
+ instance_external_ip = gce.ex_get_address(instance_external_ip)
+ except GoogleBaseError as e:
+ module.fail_json(msg='Unexpected error attempting to get a static ip %s, error: %s' % (external_ip, e.value))
+ else:
+ instance_external_ip = external_ip
+
+ new_instances = []
+ changed = False
+
+ lc_disks = []
+ disk_modes = []
+ for i, disk in enumerate(disks or []):
+ if isinstance(disk, dict):
+ lc_disks.append(gce.ex_get_volume(disk['name'], lc_zone))
+ disk_modes.append(disk['mode'])
+ else:
+ lc_disks.append(gce.ex_get_volume(disk, lc_zone))
+ # boot disk is implicitly READ_WRITE
+ disk_modes.append('READ_ONLY' if i > 0 else 'READ_WRITE')
+ lc_network = gce.ex_get_network(network)
+ lc_machine_type = gce.ex_get_size(machine_type, lc_zone)
+
+ # Try to convert the user's metadata value into the format expected
+ # by GCE. First try to ensure user has proper quoting of a
+ # dictionary-like syntax using 'literal_eval', then convert the python
+ # dict into a python list of 'key' / 'value' dicts. Should end up
+ # with:
+ # [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...]
+ if metadata:
+ if isinstance(metadata, dict):
+ md = metadata
+ else:
+ try:
+ md = literal_eval(str(metadata))
+ if not isinstance(md, dict):
+ raise ValueError('metadata must be a dict')
+ except ValueError as e:
+ module.fail_json(msg='bad metadata: %s' % str(e))
+ except SyntaxError as e:
+ module.fail_json(msg='bad metadata syntax')
+
+ if hasattr(libcloud, '__version__') and libcloud.__version__ < '0.15':
+ items = []
+ for k, v in md.items():
+ items.append({"key": k, "value": v})
+ metadata = {'items': items}
+ else:
+ metadata = md
+
+ lc_image = LazyDiskImage(module, gce, image, lc_disks, family=image_family, projects=external_projects)
+ ex_sa_perms = []
+ bad_perms = []
+ if service_account_permissions:
+ for perm in service_account_permissions:
+ if perm not in gce.SA_SCOPES_MAP and not perm.startswith('https://www.googleapis.com/auth'):
+ bad_perms.append(perm)
+ if len(bad_perms) > 0:
+ module.fail_json(msg='bad permissions: %s' % str(bad_perms))
+ ex_sa_perms.append({'email': "default"})
+ ex_sa_perms[0]['scopes'] = service_account_permissions
+
+ # These variables all have default values but check just in case
+ if not lc_network or not lc_machine_type or not lc_zone:
+ module.fail_json(msg='Missing required create instance variable',
+ changed=False)
+
+ gce_args = dict(
+ location=lc_zone,
+ ex_network=network, ex_tags=tags, ex_metadata=metadata,
+ ex_can_ip_forward=ip_forward,
+ external_ip=instance_external_ip, ex_disk_auto_delete=disk_auto_delete,
+ ex_service_accounts=ex_sa_perms
+ )
+ if preemptible is not None:
+ gce_args['ex_preemptible'] = preemptible
+ if subnetwork is not None:
+ gce_args['ex_subnetwork'] = subnetwork
+
+ if isinstance(instance_names, str) and not number:
+ instance_names = [instance_names]
+
+ if isinstance(instance_names, str) and number:
+ instance_responses = gce.ex_create_multiple_nodes(instance_names, lc_machine_type,
+ lc_image(), number, **gce_args)
+ for resp in instance_responses:
+ n = resp
+ if isinstance(resp, libcloud.compute.drivers.gce.GCEFailedNode):
+ try:
+ n = gce.ex_get_node(n.name, lc_zone)
+ except ResourceNotFoundError:
+ pass
+ else:
+ # Assure that at least one node has been created to set changed=True
+ changed = True
+ new_instances.append(n)
+ else:
+ for instance in instance_names:
+ pd = None
+ if lc_disks:
+ pd = lc_disks[0]
+ elif persistent_boot_disk:
+ try:
+ pd = gce.ex_get_volume("%s" % instance, lc_zone)
+ except ResourceNotFoundError:
+ pd = gce.create_volume(disk_size, "%s" % instance, image=lc_image())
+ gce_args['ex_boot_disk'] = pd
+
+ inst = None
+ try:
+ inst = gce.ex_get_node(instance, lc_zone)
+ except ResourceNotFoundError:
+ inst = gce.create_node(
+ instance, lc_machine_type, lc_image(), **gce_args
+ )
+ changed = True
+ except GoogleBaseError as e:
+ module.fail_json(msg='Unexpected error attempting to create ' +
+ 'instance %s, error: %s' % (instance, e.value))
+ if inst:
+ new_instances.append(inst)
+
+ for inst in new_instances:
+ for i, lc_disk in enumerate(lc_disks):
+ # Check whether the disk is already attached
+ if (len(inst.extra['disks']) > i):
+ attached_disk = inst.extra['disks'][i]
+ if attached_disk['source'] != lc_disk.extra['selfLink']:
+ module.fail_json(
+ msg=("Disk at index %d does not match: requested=%s found=%s" % (
+ i, lc_disk.extra['selfLink'], attached_disk['source'])))
+ elif attached_disk['mode'] != disk_modes[i]:
+ module.fail_json(
+ msg=("Disk at index %d is in the wrong mode: requested=%s found=%s" % (
+ i, disk_modes[i], attached_disk['mode'])))
+ else:
+ continue
+ gce.attach_volume(inst, lc_disk, ex_mode=disk_modes[i])
+ # Work around libcloud bug: attached volumes don't get added
+ # to the instance metadata. get_instance_info() only cares about
+ # source and index.
+ if len(inst.extra['disks']) != i + 1:
+ inst.extra['disks'].append(
+ {'source': lc_disk.extra['selfLink'], 'index': i})
+
+ instance_names = []
+ instance_json_data = []
+ for inst in new_instances:
+ d = get_instance_info(inst)
+ instance_names.append(d['name'])
+ instance_json_data.append(d)
+
+ return (changed, instance_json_data, instance_names)
+
+
+def change_instance_state(module, gce, instance_names, number, zone, state):
+ """Changes the state of a list of instances. For example,
+ change from started to stopped, or started to absent.
+
+ module: Ansible module object
+ community.general.gce: authenticated GCE connection object
+ instance_names: a list of instance names to terminate
+ zone: GCEZone object where the instances reside prior to termination
+ state: 'state' parameter passed into module as argument
+
+ Returns a dictionary of instance names that were changed.
+
+ """
+ changed = False
+ nodes = []
+ state_instance_names = []
+
+ if isinstance(instance_names, str) and number:
+ node_names = ['%s-%03d' % (instance_names, i) for i in range(number)]
+ elif isinstance(instance_names, str) and not number:
+ node_names = [instance_names]
+ else:
+ node_names = instance_names
+
+ for name in node_names:
+ inst = None
+ try:
+ inst = gce.ex_get_node(name, zone)
+ except ResourceNotFoundError:
+ state_instance_names.append(name)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ else:
+ nodes.append(inst)
+ state_instance_names.append(name)
+
+ if state in ['absent', 'deleted'] and number:
+ changed_nodes = gce.ex_destroy_multiple_nodes(nodes) or [False]
+ changed = reduce(lambda x, y: x or y, changed_nodes)
+ else:
+ for node in nodes:
+ if state in ['absent', 'deleted']:
+ gce.destroy_node(node)
+ changed = True
+ elif state == 'started' and node.state == libcloud.compute.types.NodeState.STOPPED:
+ gce.ex_start_node(node)
+ changed = True
+ elif state in ['stopped', 'terminated'] and node.state == libcloud.compute.types.NodeState.RUNNING:
+ gce.ex_stop_node(node)
+ changed = True
+
+ return (changed, state_instance_names)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ image=dict(default='debian-8'),
+ image_family=dict(),
+ external_projects=dict(type='list'),
+ instance_names=dict(),
+ machine_type=dict(default='n1-standard-1'),
+ metadata=dict(),
+ name=dict(aliases=['base_name']),
+ num_instances=dict(type='int'),
+ network=dict(default='default'),
+ subnetwork=dict(),
+ persistent_boot_disk=dict(type='bool', default=False),
+ disks=dict(type='list'),
+ state=dict(choices=['active', 'present', 'absent', 'deleted',
+ 'started', 'stopped', 'terminated'],
+ default='present'),
+ tags=dict(type='list'),
+ zone=dict(default='us-central1-a'),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ project_id=dict(),
+ ip_forward=dict(type='bool', default=False),
+ external_ip=dict(default='ephemeral'),
+ disk_auto_delete=dict(type='bool', default=True),
+ disk_size=dict(type='int', default=10),
+ preemptible=dict(type='bool', default=None),
+ ),
+ mutually_exclusive=[('instance_names', 'name')]
+ )
+
+ if not HAS_PYTHON26:
+ module.fail_json(msg="GCE module requires python's 'ast' module, python v2.6+")
+ if not HAS_LIBCLOUD:
+ module.fail_json(msg='libcloud with GCE support (0.17.0+) required for this module')
+
+ gce = gce_connect(module)
+
+ image = module.params.get('image')
+ image_family = module.params.get('image_family')
+ external_projects = module.params.get('external_projects')
+ instance_names = module.params.get('instance_names')
+ name = module.params.get('name')
+ number = module.params.get('num_instances')
+ subnetwork = module.params.get('subnetwork')
+ state = module.params.get('state')
+ zone = module.params.get('zone')
+ preemptible = module.params.get('preemptible')
+ changed = False
+
+ inames = None
+ if isinstance(instance_names, list):
+ inames = instance_names
+ elif isinstance(instance_names, str):
+ inames = instance_names.split(',')
+ if name:
+ inames = name
+ if not inames:
+ module.fail_json(msg='Must specify a "name" or "instance_names"',
+ changed=False)
+ if not zone:
+ module.fail_json(msg='Must specify a "zone"', changed=False)
+
+ lc_zone = get_valid_location(module, gce, zone)
+ if preemptible is not None and hasattr(libcloud, '__version__') and libcloud.__version__ < '0.20':
+ module.fail_json(msg="Apache Libcloud 0.20.0+ is required to use 'preemptible' option",
+ changed=False)
+
+ if subnetwork is not None and not hasattr(gce, 'ex_get_subnetwork'):
+ module.fail_json(msg="Apache Libcloud 1.0.0+ is required to use 'subnetwork' option",
+ changed=False)
+
+ json_output = {'zone': zone}
+ if state in ['absent', 'deleted', 'started', 'stopped', 'terminated']:
+ json_output['state'] = state
+ (changed, state_instance_names) = change_instance_state(
+ module, gce, inames, number, lc_zone, state)
+
+ # based on what user specified, return the same variable, although
+ # value could be different if an instance could not be destroyed
+ if instance_names or name and number:
+ json_output['instance_names'] = state_instance_names
+ elif name:
+ json_output['name'] = name
+
+ elif state in ['active', 'present']:
+ json_output['state'] = 'present'
+ (changed, instance_data, instance_name_list) = create_instances(
+ module, gce, inames, number, lc_zone)
+ json_output['instance_data'] = instance_data
+ if instance_names:
+ json_output['instance_names'] = instance_name_list
+ elif name:
+ json_output['name'] = name
+
+ json_output['changed'] = changed
+ module.exit_json(**json_output)
+
+
+class LazyDiskImage:
+ """
+ Object for lazy instantiation of disk image
+ gce.ex_get_image is a very expensive call, so we want to avoid calling it as much as possible.
+ """
+
+ def __init__(self, module, gce, name, has_pd, family=None, projects=None):
+ self.image = None
+ self.was_called = False
+ self.gce = gce
+ self.name = name
+ self.has_pd = has_pd
+ self.module = module
+ self.family = family
+ self.projects = projects
+
+ def __call__(self):
+ if not self.was_called:
+ self.was_called = True
+ if not self.has_pd:
+ if self.family:
+ self.image = self.gce.ex_get_image_from_family(self.family, ex_project_list=self.projects)
+ else:
+ self.image = self.gce.ex_get_image(self.name, ex_project_list=self.projects)
+ if not self.image:
+ self.module.fail_json(msg='image or disks missing for create instance', changed=False)
+ return self.image
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_eip.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_eip.py
new file mode 100644
index 00000000..b5fd4bf3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_eip.py
@@ -0,0 +1,247 @@
+#!/usr/bin/python
+# Copyright 2017 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: gce_eip
+short_description: Create or Destroy Global or Regional External IP addresses.
+description:
+ - Create (reserve) or Destroy (release) Regional or Global IP Addresses. See
+ U(https://cloud.google.com/compute/docs/configure-instance-ip-addresses#reserve_new_static) for more on reserving static addresses.
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.19.0"
+notes:
+ - Global addresses can only be used with Global Forwarding Rules.
+author:
+ - "Tom Melendez (@supertom) <tom@supertom.com>"
+options:
+ name:
+ type: str
+ description:
+ - Name of Address.
+ required: true
+ region:
+ type: str
+ description:
+ - Region to create the address in. Set to 'global' to create a global address.
+ required: true
+ state:
+ type: str
+ description: The state the address should be in. C(present) or C(absent) are the only valid options.
+ default: present
+ required: false
+ choices: [present, absent]
+ project_id:
+ type: str
+ description:
+ - The Google Cloud Platform project ID to use.
+ pem_file:
+ type: path
+ description:
+ - The path to the PEM file associated with the service account email.
+ - This option is deprecated and may be removed in a future release. Use I(credentials_file) instead.
+ credentials_file:
+ type: path
+ description:
+ - The path to the JSON file associated with the service account email.
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account permissions
+'''
+
+EXAMPLES = '''
+- name: Create a Global external IP address
+ community.general.gce_eip:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ name: my-global-ip
+ region: global
+ state: present
+
+- name: Create a Regional external IP address
+ community.general.gce_eip:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ name: my-global-ip
+ region: us-east1
+ state: present
+'''
+
+RETURN = '''
+address:
+ description: IP address being operated on
+ returned: always
+ type: str
+ sample: "35.186.222.233"
+name:
+ description: name of the address being operated on
+ returned: always
+ type: str
+ sample: "my-address"
+region:
+ description: Which region an address belongs.
+ returned: always
+ type: str
+ sample: "global"
+'''
+
+USER_AGENT_VERSION = 'v1'
+USER_AGENT_PRODUCT = 'Ansible-gce_eip'
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ import libcloud
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
+ ResourceExistsError, ResourceInUseError, ResourceNotFoundError
+ from libcloud.compute.drivers.gce import GCEAddress
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcp import gcp_connect
+
+
+def get_address(gce, name, region):
+ """
+ Get an Address from GCE.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param name: Name of the Address.
+ :type name: ``str``
+
+ :return: A GCEAddress object or None.
+ :rtype: :class: `GCEAddress` or None
+ """
+ try:
+ return gce.ex_get_address(name=name, region=region)
+
+ except ResourceNotFoundError:
+ return None
+
+
+def create_address(gce, params):
+ """
+ Create a new Address.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param params: Dictionary of parameters needed by the module.
+ :type params: ``dict``
+
+ :return: Tuple with changed status and address.
+ :rtype: tuple in the format of (bool, str)
+ """
+ changed = False
+ return_data = []
+
+ address = gce.ex_create_address(
+ name=params['name'], region=params['region'])
+
+ if address:
+ changed = True
+ return_data = address.address
+
+ return (changed, return_data)
+
+
+def delete_address(address):
+ """
+ Delete an Address.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param params: Dictionary of parameters needed by the module.
+ :type params: ``dict``
+
+ :return: Tuple with changed status and address.
+ :rtype: tuple in the format of (bool, str)
+ """
+ changed = False
+ return_data = []
+ if address.destroy():
+ changed = True
+ return_data = address.address
+ return (changed, return_data)
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ name=dict(required=True),
+ state=dict(choices=['absent', 'present'], default='present'),
+ region=dict(required=True),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ project_id=dict(), ), )
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+ if not HAS_LIBCLOUD:
+ module.fail_json(
+ msg='libcloud with GCE support (+0.19) required for this module.')
+
+ gce = gcp_connect(module, Provider.GCE, get_driver,
+ USER_AGENT_PRODUCT, USER_AGENT_VERSION)
+
+ params = {}
+ params['state'] = module.params.get('state')
+ params['name'] = module.params.get('name')
+ params['region'] = module.params.get('region')
+
+ changed = False
+ json_output = {'state': params['state']}
+ address = get_address(gce, params['name'], region=params['region'])
+
+ if params['state'] == 'absent':
+ if not address:
+ # Doesn't exist in GCE, and state==absent.
+ changed = False
+ module.fail_json(
+ msg="Cannot delete unknown address: %s" %
+ (params['name']))
+ else:
+ # Delete
+ (changed, json_output['address']) = delete_address(address)
+ else:
+ if not address:
+ # Create
+ (changed, json_output['address']) = create_address(gce,
+ params)
+ else:
+ changed = False
+ json_output['address'] = address.address
+
+ json_output['changed'] = changed
+ json_output.update(params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_img.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_img.py
new file mode 100644
index 00000000..c4705098
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_img.py
@@ -0,0 +1,211 @@
+#!/usr/bin/python
+# Copyright 2015 Google Inc. All Rights Reserved.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+"""An Ansible module to utilize GCE image resources."""
+
+DOCUMENTATION = '''
+---
+module: gce_img
+short_description: utilize GCE image resources
+description:
+ - This module can create and delete GCE private images from gzipped
+ compressed tarball containing raw disk data or from existing detached
+ disks in any zone. U(https://cloud.google.com/compute/docs/images)
+options:
+ name:
+ type: str
+ description:
+ - the name of the image to create or delete
+ required: true
+ description:
+ type: str
+ description:
+ - an optional description
+ family:
+ type: str
+ description:
+ - an optional family name
+ source:
+ type: str
+ description:
+ - the source disk or the Google Cloud Storage URI to create the image from
+ state:
+ type: str
+ description:
+ - desired state of the image
+ default: "present"
+ choices: ["present", "absent"]
+ zone:
+ type: str
+ description:
+ - the zone of the disk specified by source
+ default: "us-central1-a"
+ timeout:
+ type: int
+ description:
+ - timeout for the operation
+ default: 180
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ pem_file:
+ type: path
+ description:
+ - path to the pem file associated with the service account email
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud"
+author: "Tom Melendez (@supertom)"
+'''
+
+EXAMPLES = '''
+- name: Create an image named test-image from the disk 'test-disk' in zone us-central1-a
+ community.general.gce_img:
+ name: test-image
+ source: test-disk
+ zone: us-central1-a
+ state: present
+
+- name: Create an image named test-image from a tarball in Google Cloud Storage
+ community.general.gce_img:
+ name: test-image
+ source: https://storage.googleapis.com/bucket/path/to/image.tgz
+
+- name: Alternatively use the gs scheme
+ community.general.gce_img:
+ name: test-image
+ source: gs://bucket/path/to/image.tgz
+
+- name: Delete an image named test-image
+ community.general.gce_img:
+ name: test-image
+ state: absent
+'''
+
+
+try:
+ import libcloud
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError
+ from libcloud.common.google import ResourceExistsError
+ from libcloud.common.google import ResourceNotFoundError
+ _ = Provider.GCE
+ has_libcloud = True
+except ImportError:
+ has_libcloud = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gce import gce_connect
+
+
+GCS_URI = 'https://storage.googleapis.com/'
+
+
+def create_image(gce, name, module):
+ """Create an image with the specified name."""
+ source = module.params.get('source')
+ zone = module.params.get('zone')
+ desc = module.params.get('description')
+ timeout = module.params.get('timeout')
+ family = module.params.get('family')
+
+ if not source:
+ module.fail_json(msg='Must supply a source', changed=False)
+
+ if source.startswith(GCS_URI):
+ # source is a Google Cloud Storage URI
+ volume = source
+ elif source.startswith('gs://'):
+ # libcloud only accepts https URI.
+ volume = source.replace('gs://', GCS_URI)
+ else:
+ try:
+ volume = gce.ex_get_volume(source, zone)
+ except ResourceNotFoundError:
+ module.fail_json(msg='Disk %s not found in zone %s' % (source, zone),
+ changed=False)
+ except GoogleBaseError as e:
+ module.fail_json(msg=str(e), changed=False)
+
+ gce_extra_args = {}
+ if family is not None:
+ gce_extra_args['family'] = family
+
+ old_timeout = gce.connection.timeout
+ try:
+ gce.connection.timeout = timeout
+ gce.ex_create_image(name, volume, desc, use_existing=False, **gce_extra_args)
+ return True
+ except ResourceExistsError:
+ return False
+ except GoogleBaseError as e:
+ module.fail_json(msg=str(e), changed=False)
+ finally:
+ gce.connection.timeout = old_timeout
+
+
+def delete_image(gce, name, module):
+ """Delete a specific image resource by name."""
+ try:
+ gce.ex_delete_image(name)
+ return True
+ except ResourceNotFoundError:
+ return False
+ except GoogleBaseError as e:
+ module.fail_json(msg=str(e), changed=False)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ family=dict(),
+ description=dict(),
+ source=dict(),
+ state=dict(default='present', choices=['present', 'absent']),
+ zone=dict(default='us-central1-a'),
+ service_account_email=dict(),
+ pem_file=dict(type='path'),
+ project_id=dict(),
+ timeout=dict(type='int', default=180)
+ )
+ )
+
+ if not has_libcloud:
+ module.fail_json(msg='libcloud with GCE support is required.')
+
+ gce = gce_connect(module)
+
+ name = module.params.get('name')
+ state = module.params.get('state')
+ family = module.params.get('family')
+ changed = False
+
+ if family is not None and hasattr(libcloud, '__version__') and libcloud.__version__ <= '0.20.1':
+ module.fail_json(msg="Apache Libcloud 1.0.0+ is required to use 'family' option",
+ changed=False)
+
+ # user wants to create an image.
+ if state == 'present':
+ changed = create_image(gce, name, module)
+
+ # user wants to delete the image.
+ if state == 'absent':
+ changed = delete_image(gce, name, module)
+
+ module.exit_json(changed=changed, name=name)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_instance_template.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_instance_template.py
new file mode 100644
index 00000000..04ddacce
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_instance_template.py
@@ -0,0 +1,605 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gce_instance_template
+short_description: create or destroy instance templates of Compute Engine of GCP.
+description:
+ - Creates or destroy Google instance templates
+ of Compute Engine of Google Cloud Platform.
+options:
+ state:
+ type: str
+ description:
+ - The desired state for the instance template.
+ default: "present"
+ choices: ["present", "absent"]
+ name:
+ type: str
+ description:
+ - The name of the GCE instance template.
+ required: True
+ aliases: [base_name]
+ size:
+ type: str
+ description:
+ - The desired machine type for the instance template.
+ default: "f1-micro"
+ source:
+ type: str
+ description:
+ - A source disk to attach to the instance.
+ Cannot specify both I(image) and I(source).
+ image:
+ type: str
+ description:
+ - The image to use to create the instance.
+ Cannot specify both both I(image) and I(source).
+ image_family:
+ type: str
+ description:
+ - The image family to use to create the instance.
+ If I(image) has been used I(image_family) is ignored.
+ Cannot specify both I(image) and I(source).
+ default: debian-8
+ disk_type:
+ type: str
+ description:
+ - Specify a C(pd-standard) disk or C(pd-ssd) for an SSD disk.
+ choices:
+ - pd-standard
+ - pd-ssd
+ default: pd-standard
+ disk_auto_delete:
+ description:
+ - Indicate that the boot disk should be
+ deleted when the Node is deleted.
+ default: true
+ type: bool
+ network:
+ type: str
+ description:
+ - The network to associate with the instance.
+ default: "default"
+ subnetwork:
+ type: str
+ description:
+ - The Subnetwork resource name for this instance.
+ can_ip_forward:
+ description:
+ - Set to C(yes) to allow instance to
+ send/receive non-matching src/dst packets.
+ type: bool
+ default: 'no'
+ external_ip:
+ type: str
+ description:
+ - The external IP address to use.
+ If C(ephemeral), a new non-static address will be
+ used. If C(None), then no external address will
+ be used. To use an existing static IP address
+ specify address name.
+ default: "ephemeral"
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account permissions (see
+ U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create),
+ --scopes section for detailed information)
+ - >
+ Available choices are:
+ C(bigquery), C(cloud-platform), C(compute-ro), C(compute-rw),
+ C(useraccounts-ro), C(useraccounts-rw), C(datastore), C(logging-write),
+ C(monitoring), C(sql-admin), C(storage-full), C(storage-ro),
+ C(storage-rw), C(taskqueue), C(userinfo-email).
+ automatic_restart:
+ description:
+ - Defines whether the instance should be
+ automatically restarted when it is
+ terminated by Compute Engine.
+ type: bool
+ preemptible:
+ description:
+ - Defines whether the instance is preemptible.
+ type: bool
+ tags:
+ type: list
+ description:
+ - a comma-separated list of tags to associate with the instance
+ metadata:
+ description:
+ - a hash/dictionary of custom data for the instance;
+ '{"key":"value", ...}'
+ description:
+ type: str
+ description:
+ - description of instance template
+ disks:
+ type: list
+ description:
+ - a list of persistent disks to attach to the instance; a string value
+ gives the name of the disk; alternatively, a dictionary value can
+ define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry
+ will be the boot disk (which must be READ_WRITE).
+ nic_gce_struct:
+ type: list
+ description:
+ - Support passing in the GCE-specific
+ formatted networkInterfaces[] structure.
+ disks_gce_struct:
+ type: list
+ description:
+ - Support passing in the GCE-specific
+ formatted formatted disks[] structure. Case sensitive.
+ see U(https://cloud.google.com/compute/docs/reference/latest/instanceTemplates#resource) for detailed information
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ pem_file:
+ type: path
+ description:
+ - path to the pem file associated with the service account email
+ This option is deprecated. Use 'credentials_file'.
+ credentials_file:
+ type: path
+ description:
+ - path to the JSON file associated with the service account email
+ subnetwork_region:
+ type: str
+ description:
+ - Region that subnetwork resides in. (Required for subnetwork to successfully complete)
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials,
+ >= 0.20.0 if using preemptible option"
+notes:
+ - JSON credentials strongly preferred.
+author: "Gwenael Pellen (@GwenaelPellenArkeup) <gwenael.pellen@arkeup.com>"
+'''
+
+EXAMPLES = '''
+# Usage
+- name: Create instance template named foo
+ community.general.gce_instance_template:
+ name: foo
+ size: n1-standard-1
+ image_family: ubuntu-1604-lts
+ state: present
+ project_id: "your-project-name"
+ credentials_file: "/path/to/your-key.json"
+ service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
+
+# Example Playbook
+- name: Compute Engine Instance Template Examples
+ hosts: localhost
+ vars:
+ service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
+ credentials_file: "/path/to/your-key.json"
+ project_id: "your-project-name"
+ tasks:
+ - name: Create instance template
+ community.general.gce_instance_template:
+ name: my-test-instance-template
+ size: n1-standard-1
+ image_family: ubuntu-1604-lts
+ state: present
+ project_id: "{{ project_id }}"
+ credentials_file: "{{ credentials_file }}"
+ service_account_email: "{{ service_account_email }}"
+ - name: Delete instance template
+ community.general.gce_instance_template:
+ name: my-test-instance-template
+ size: n1-standard-1
+ image_family: ubuntu-1604-lts
+ state: absent
+ project_id: "{{ project_id }}"
+ credentials_file: "{{ credentials_file }}"
+ service_account_email: "{{ service_account_email }}"
+
+# Example playbook using disks_gce_struct
+- name: Compute Engine Instance Template Examples
+ hosts: localhost
+ vars:
+ service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
+ credentials_file: "/path/to/your-key.json"
+ project_id: "your-project-name"
+ tasks:
+ - name: Create instance template
+ community.general.gce_instance_template:
+ name: foo
+ size: n1-standard-1
+ state: present
+ project_id: "{{ project_id }}"
+ credentials_file: "{{ credentials_file }}"
+ service_account_email: "{{ service_account_email }}"
+ disks_gce_struct:
+ - device_name: /dev/sda
+ boot: true
+ autoDelete: true
+ initializeParams:
+ diskSizeGb: 30
+ diskType: pd-ssd
+ sourceImage: projects/debian-cloud/global/images/family/debian-8
+
+'''
+
+RETURN = '''
+'''
+
+import traceback
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ import libcloud
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
+ ResourceExistsError, ResourceInUseError, ResourceNotFoundError
+ from libcloud.compute.drivers.gce import GCEAddress
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gce import gce_connect
+from ansible.module_utils._text import to_native
+
+
+def get_info(inst):
+ """Retrieves instance template information
+ """
+ return({
+ 'name': inst.name,
+ 'extra': inst.extra,
+ })
+
+
+def create_instance_template(module, gce):
+ """Create an instance template
+ module : AnsibleModule object
+ gce: authenticated GCE libcloud driver
+ Returns:
+ instance template information
+ """
+ # get info from module
+ name = module.params.get('name')
+ size = module.params.get('size')
+ source = module.params.get('source')
+ image = module.params.get('image')
+ image_family = module.params.get('image_family')
+ disk_type = module.params.get('disk_type')
+ disk_auto_delete = module.params.get('disk_auto_delete')
+ network = module.params.get('network')
+ subnetwork = module.params.get('subnetwork')
+ subnetwork_region = module.params.get('subnetwork_region')
+ can_ip_forward = module.params.get('can_ip_forward')
+ external_ip = module.params.get('external_ip')
+ service_account_permissions = module.params.get(
+ 'service_account_permissions')
+ service_account_email = module.params.get('service_account_email')
+ on_host_maintenance = module.params.get('on_host_maintenance')
+ automatic_restart = module.params.get('automatic_restart')
+ preemptible = module.params.get('preemptible')
+ tags = module.params.get('tags')
+ metadata = module.params.get('metadata')
+ description = module.params.get('description')
+ disks_gce_struct = module.params.get('disks_gce_struct')
+ changed = False
+
+ # args of ex_create_instancetemplate
+ gce_args = dict(
+ name="instance",
+ size="f1-micro",
+ source=None,
+ image=None,
+ disk_type='pd-standard',
+ disk_auto_delete=True,
+ network='default',
+ subnetwork=None,
+ can_ip_forward=None,
+ external_ip='ephemeral',
+ service_accounts=None,
+ on_host_maintenance=None,
+ automatic_restart=None,
+ preemptible=None,
+ tags=None,
+ metadata=None,
+ description=None,
+ disks_gce_struct=None,
+ nic_gce_struct=None
+ )
+
+ gce_args['name'] = name
+ gce_args['size'] = size
+
+ if source is not None:
+ gce_args['source'] = source
+
+ if image:
+ gce_args['image'] = image
+ else:
+ if image_family:
+ image = gce.ex_get_image_from_family(image_family)
+ gce_args['image'] = image
+ else:
+ gce_args['image'] = "debian-8"
+
+ gce_args['disk_type'] = disk_type
+ gce_args['disk_auto_delete'] = disk_auto_delete
+
+ gce_network = gce.ex_get_network(network)
+ gce_args['network'] = gce_network
+
+ if subnetwork is not None:
+ gce_args['subnetwork'] = gce.ex_get_subnetwork(subnetwork, region=subnetwork_region)
+
+ if can_ip_forward is not None:
+ gce_args['can_ip_forward'] = can_ip_forward
+
+ if external_ip == "ephemeral":
+ instance_external_ip = external_ip
+ elif external_ip == "none":
+ instance_external_ip = None
+ else:
+ try:
+ instance_external_ip = gce.ex_get_address(external_ip)
+ except GoogleBaseError as err:
+ # external_ip is name ?
+ instance_external_ip = external_ip
+ gce_args['external_ip'] = instance_external_ip
+
+ ex_sa_perms = []
+ bad_perms = []
+ if service_account_permissions:
+ for perm in service_account_permissions:
+ if perm not in gce.SA_SCOPES_MAP:
+ bad_perms.append(perm)
+ if len(bad_perms) > 0:
+ module.fail_json(msg='bad permissions: %s' % str(bad_perms))
+ if service_account_email is not None:
+ ex_sa_perms.append({'email': str(service_account_email)})
+ else:
+ ex_sa_perms.append({'email': "default"})
+ ex_sa_perms[0]['scopes'] = service_account_permissions
+ gce_args['service_accounts'] = ex_sa_perms
+
+ if on_host_maintenance is not None:
+ gce_args['on_host_maintenance'] = on_host_maintenance
+
+ if automatic_restart is not None:
+ gce_args['automatic_restart'] = automatic_restart
+
+ if preemptible is not None:
+ gce_args['preemptible'] = preemptible
+
+ if tags is not None:
+ gce_args['tags'] = tags
+
+ if disks_gce_struct is not None:
+ gce_args['disks_gce_struct'] = disks_gce_struct
+
+ # Try to convert the user's metadata value into the format expected
+ # by GCE. First try to ensure user has proper quoting of a
+ # dictionary-like syntax using 'literal_eval', then convert the python
+ # dict into a python list of 'key' / 'value' dicts. Should end up
+ # with:
+ # [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...]
+ if metadata:
+ if isinstance(metadata, dict):
+ md = metadata
+ else:
+ try:
+ md = literal_eval(str(metadata))
+ if not isinstance(md, dict):
+ raise ValueError('metadata must be a dict')
+ except ValueError as e:
+ module.fail_json(msg='bad metadata: %s' % str(e))
+ except SyntaxError as e:
+ module.fail_json(msg='bad metadata syntax')
+
+ if hasattr(libcloud, '__version__') and libcloud.__version__ < '0.15':
+ items = []
+ for k, v in md.items():
+ items.append({"key": k, "value": v})
+ metadata = {'items': items}
+ else:
+ metadata = md
+ gce_args['metadata'] = metadata
+
+ if description is not None:
+ gce_args['description'] = description
+
+ instance = None
+ try:
+ instance = gce.ex_get_instancetemplate(name)
+ except ResourceNotFoundError:
+ try:
+ instance = gce.ex_create_instancetemplate(**gce_args)
+ changed = True
+ except GoogleBaseError as err:
+ module.fail_json(
+ msg='Unexpected error attempting to create instance {0}, error: {1}'
+ .format(
+ instance,
+ err.value
+ )
+ )
+
+ if instance:
+ json_data = get_info(instance)
+ else:
+ module.fail_json(msg="no instance template!")
+
+ return (changed, json_data, name)
+
+
+def delete_instance_template(module, gce):
+ """ Delete instance template.
+ module : AnsibleModule object
+ gce: authenticated GCE libcloud driver
+ Returns:
+ instance template information
+ """
+ name = module.params.get('name')
+ current_state = "absent"
+ changed = False
+
+ # get instance template
+ instance = None
+ try:
+ instance = gce.ex_get_instancetemplate(name)
+ current_state = "present"
+ except GoogleBaseError as e:
+ json_data = dict(msg='instance template not exists: %s' % to_native(e),
+ exception=traceback.format_exc())
+
+ if current_state == "present":
+ rc = instance.destroy()
+ if rc:
+ changed = True
+ else:
+ module.fail_json(
+ msg='instance template destroy failed'
+ )
+
+ json_data = {}
+ return (changed, json_data, name)
+
+
+def module_controller(module, gce):
+ ''' Control module state parameter.
+ module : AnsibleModule object
+ gce: authenticated GCE libcloud driver
+ Returns:
+ nothing
+ Exit:
+ AnsibleModule object exit with json data.
+ '''
+ json_output = dict()
+ state = module.params.get("state")
+ if state == "present":
+ (changed, output, name) = create_instance_template(module, gce)
+ json_output['changed'] = changed
+ json_output['msg'] = output
+ elif state == "absent":
+ (changed, output, name) = delete_instance_template(module, gce)
+ json_output['changed'] = changed
+ json_output['msg'] = output
+
+ module.exit_json(**json_output)
+
+
+def check_if_system_state_would_be_changed(module, gce):
+ ''' check_if_system_state_would_be_changed !
+ module : AnsibleModule object
+ gce: authenticated GCE libcloud driver
+ Returns:
+ system_state changed
+ '''
+ changed = False
+ current_state = "absent"
+
+ state = module.params.get("state")
+ name = module.params.get("name")
+
+ try:
+ gce.ex_get_instancetemplate(name)
+ current_state = "present"
+ except GoogleBaseError as e:
+ module.fail_json(msg='GCE get instancetemplate problem: %s' % to_native(e),
+ exception=traceback.format_exc())
+
+ if current_state != state:
+ changed = True
+
+ if current_state == "absent":
+ if changed:
+ output = 'instance template {0} will be created'.format(name)
+ else:
+ output = 'nothing to do for instance template {0} '.format(name)
+ if current_state == "present":
+ if changed:
+ output = 'instance template {0} will be destroyed'.format(name)
+ else:
+ output = 'nothing to do for instance template {0} '.format(name)
+
+ return (changed, output)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(choices=['present', 'absent'], default='present'),
+ name=dict(required=True, aliases=['base_name']),
+ size=dict(default='f1-micro'),
+ source=dict(),
+ image=dict(),
+ image_family=dict(default='debian-8'),
+ disk_type=dict(choices=['pd-standard', 'pd-ssd'], default='pd-standard', type='str'),
+ disk_auto_delete=dict(type='bool', default=True),
+ network=dict(default='default'),
+ subnetwork=dict(),
+ can_ip_forward=dict(type='bool', default=False),
+ external_ip=dict(default='ephemeral'),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ automatic_restart=dict(type='bool', default=None),
+ preemptible=dict(type='bool', default=None),
+ tags=dict(type='list'),
+ metadata=dict(),
+ description=dict(),
+ disks=dict(type='list'),
+ nic_gce_struct=dict(type='list'),
+ project_id=dict(),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ subnetwork_region=dict(),
+ disks_gce_struct=dict(type='list')
+ ),
+ mutually_exclusive=[['source', 'image']],
+ required_one_of=[['image', 'image_family']],
+ supports_check_mode=True
+ )
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+ if not HAS_LIBCLOUD:
+ module.fail_json(
+ msg='libcloud with GCE support (0.17.0+) required for this module')
+
+ try:
+ gce = gce_connect(module)
+ except GoogleBaseError as e:
+ module.fail_json(msg='GCE Connection failed %s' % to_native(e), exception=traceback.format_exc())
+
+ if module.check_mode:
+ (changed, output) = check_if_system_state_would_be_changed(module, gce)
+ module.exit_json(
+ changed=changed,
+ msg=output
+ )
+ else:
+ module_controller(module, gce)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_labels.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_labels.py
new file mode 100644
index 00000000..dced7599
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_labels.py
@@ -0,0 +1,350 @@
+#!/usr/bin/python
+# Copyright 2017 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gce_labels
+short_description: Create, Update or Destroy GCE Labels.
+description:
+ - Create, Update or Destroy GCE Labels on instances, disks, snapshots, etc.
+ When specifying the GCE resource, users may specify the full URL for
+ the resource (its 'self_link'), or the individual parameters of the
+ resource (type, location, name). Examples for the two options can be
+ seen in the documentation.
+ See U(https://cloud.google.com/compute/docs/label-or-tag-resources) for
+ more information about GCE Labels. Labels are gradually being added to
+ more GCE resources, so this module will need to be updated as new
+ resources are added to the GCE (v1) API.
+requirements:
+ - 'python >= 2.6'
+ - 'google-api-python-client >= 1.6.2'
+ - 'google-auth >= 1.0.0'
+ - 'google-auth-httplib2 >= 0.0.2'
+notes:
+ - Labels support resources such as instances, disks, images, etc. See
+ U(https://cloud.google.com/compute/docs/labeling-resources) for the list
+ of resources available in the GCE v1 API (not alpha or beta).
+author:
+ - 'Eric Johnson (@erjohnso) <erjohnso@google.com>'
+options:
+ labels:
+ type: dict
+ description:
+ - A list of labels (key/value pairs) to add or remove for the resource.
+ required: false
+ resource_url:
+ type: str
+ description:
+ - The 'self_link' for the resource (instance, disk, snapshot, etc)
+ required: false
+ resource_type:
+ type: str
+ description:
+ - The type of resource (instances, disks, snapshots, images)
+ required: false
+ resource_location:
+ type: str
+ description:
+ - The location of resource (global, us-central1-f, etc.)
+ required: false
+ resource_name:
+ type: str
+ description:
+ - The name of resource.
+ required: false
+ state:
+ type: str
+ description: The state the labels should be in. C(present) or C(absent) are the only valid options.
+ default: present
+ required: false
+ choices: [present, absent]
+ project_id:
+ type: str
+ description:
+ - The Google Cloud Platform project ID to use.
+ pem_file:
+ type: str
+ description:
+ - The path to the PEM file associated with the service account email.
+ - This option is deprecated and may be removed in a future release. Use I(credentials_file) instead.
+ credentials_file:
+ type: str
+ description:
+ - The path to the JSON file associated with the service account email.
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account email
+'''
+
+EXAMPLES = '''
+- name: Add labels on an existing instance (using resource_url)
+ community.general.gce_labels:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ labels:
+ webserver-frontend: homepage
+ environment: test
+ experiment-name: kennedy
+ resource_url: https://www.googleapis.com/compute/beta/projects/myproject/zones/us-central1-f/instances/example-instance
+ state: present
+- name: Add labels on an image (using resource params)
+ community.general.gce_labels:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ labels:
+ webserver-frontend: homepage
+ environment: test
+ experiment-name: kennedy
+ resource_type: images
+ resource_location: global
+ resource_name: my-custom-image
+ state: present
+- name: Remove specified labels from the GCE instance
+ community.general.gce_labels:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ labels:
+ environment: prod
+ experiment-name: kennedy
+ resource_url: https://www.googleapis.com/compute/beta/projects/myproject/zones/us-central1-f/instances/example-instance
+ state: absent
+'''
+
+RETURN = '''
+labels:
+ description: List of labels that exist on the resource.
+ returned: Always.
+ type: dict
+ sample: [ { 'webserver-frontend': 'homepage', 'environment': 'test', 'environment-name': 'kennedy' } ]
+resource_url:
+ description: The 'self_link' of the GCE resource.
+ returned: Always.
+ type: str
+ sample: 'https://www.googleapis.com/compute/beta/projects/myproject/zones/us-central1-f/instances/example-instance'
+resource_type:
+ description: The type of the GCE resource.
+ returned: Always.
+ type: str
+ sample: instances
+resource_location:
+ description: The location of the GCE resource.
+ returned: Always.
+ type: str
+ sample: us-central1-f
+resource_name:
+ description: The name of the GCE resource.
+ returned: Always.
+ type: str
+ sample: my-happy-little-instance
+state:
+ description: state of the labels
+ returned: Always.
+ type: str
+ sample: present
+'''
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcp import check_params, get_google_api_client, GCPUtils
+
+
+UA_PRODUCT = 'ansible-gce_labels'
+UA_VERSION = '0.0.1'
+GCE_API_VERSION = 'v1'
+
+# TODO(all): As Labels are added to more GCE resources, this list will need to
+# be updated (along with some code changes below). The list can *only* include
+# resources from the 'v1' GCE API and will *not* work with 'beta' or 'alpha'.
+KNOWN_RESOURCES = ['instances', 'disks', 'snapshots', 'images']
+
+
+def _fetch_resource(client, module):
+ params = module.params
+ if params['resource_url']:
+ if not params['resource_url'].startswith('https://www.googleapis.com/compute'):
+ module.fail_json(
+ msg='Invalid self_link url: %s' % params['resource_url'])
+ else:
+ parts = params['resource_url'].split('/')[8:]
+ if len(parts) == 2:
+ resource_type, resource_name = parts
+ resource_location = 'global'
+ else:
+ resource_location, resource_type, resource_name = parts
+ else:
+ if not params['resource_type'] or not params['resource_location'] \
+ or not params['resource_name']:
+ module.fail_json(msg='Missing required resource params.')
+ resource_type = params['resource_type'].lower()
+ resource_name = params['resource_name'].lower()
+ resource_location = params['resource_location'].lower()
+
+ if resource_type not in KNOWN_RESOURCES:
+ module.fail_json(msg='Unsupported resource_type: %s' % resource_type)
+
+ # TODO(all): See the comment above for KNOWN_RESOURCES. As labels are
+ # added to the v1 GCE API for more resources, some minor code work will
+ # need to be added here.
+ if resource_type == 'instances':
+ resource = client.instances().get(project=params['project_id'],
+ zone=resource_location,
+ instance=resource_name).execute()
+ elif resource_type == 'disks':
+ resource = client.disks().get(project=params['project_id'],
+ zone=resource_location,
+ disk=resource_name).execute()
+ elif resource_type == 'snapshots':
+ resource = client.snapshots().get(project=params['project_id'],
+ snapshot=resource_name).execute()
+ elif resource_type == 'images':
+ resource = client.images().get(project=params['project_id'],
+ image=resource_name).execute()
+ else:
+ module.fail_json(msg='Unsupported resource type: %s' % resource_type)
+
+ return resource.get('labelFingerprint', ''), {
+ 'resource_name': resource.get('name'),
+ 'resource_url': resource.get('selfLink'),
+ 'resource_type': resource_type,
+ 'resource_location': resource_location,
+ 'labels': resource.get('labels', {})
+ }
+
+
+def _set_labels(client, new_labels, module, ri, fingerprint):
+ params = module.params
+ result = err = None
+ labels = {
+ 'labels': new_labels,
+ 'labelFingerprint': fingerprint
+ }
+
+ # TODO(all): See the comment above for KNOWN_RESOURCES. As labels are
+ # added to the v1 GCE API for more resources, some minor code work will
+ # need to be added here.
+ if ri['resource_type'] == 'instances':
+ req = client.instances().setLabels(project=params['project_id'],
+ instance=ri['resource_name'],
+ zone=ri['resource_location'],
+ body=labels)
+ elif ri['resource_type'] == 'disks':
+ req = client.disks().setLabels(project=params['project_id'],
+ zone=ri['resource_location'],
+ resource=ri['resource_name'],
+ body=labels)
+ elif ri['resource_type'] == 'snapshots':
+ req = client.snapshots().setLabels(project=params['project_id'],
+ resource=ri['resource_name'],
+ body=labels)
+ elif ri['resource_type'] == 'images':
+ req = client.images().setLabels(project=params['project_id'],
+ resource=ri['resource_name'],
+ body=labels)
+ else:
+ module.fail_json(msg='Unsupported resource type: %s' % ri['resource_type'])
+
+ # TODO(erjohnso): Once Labels goes GA, we'll be able to use the GCPUtils
+ # method to poll for the async request/operation to complete before
+ # returning. However, during 'beta', we are in an odd state where
+ # API requests must be sent to the 'compute/beta' API, but the python
+ # client library only allows for *Operations.get() requests to be
+ # sent to 'compute/v1' API. The response operation is in the 'beta'
+ # API-scope, but the client library cannot find the operation (404).
+ # result = GCPUtils.execute_api_client_req(req, client=client, raw=False)
+ # return result, err
+ result = req.execute()
+ return True, err
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(choices=['absent', 'present'], default='present'),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ pem_file=dict(),
+ credentials_file=dict(),
+ labels=dict(required=False, type='dict', default={}),
+ resource_url=dict(required=False, type='str'),
+ resource_name=dict(required=False, type='str'),
+ resource_location=dict(required=False, type='str'),
+ resource_type=dict(required=False, type='str'),
+ project_id=dict()
+ ),
+ required_together=[
+ ['resource_name', 'resource_location', 'resource_type']
+ ],
+ mutually_exclusive=[
+ ['resource_url', 'resource_name'],
+ ['resource_url', 'resource_location'],
+ ['resource_url', 'resource_type']
+ ]
+ )
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+
+ client, cparams = get_google_api_client(module, 'compute',
+ user_agent_product=UA_PRODUCT,
+ user_agent_version=UA_VERSION,
+ api_version=GCE_API_VERSION)
+
+ # Get current resource info including labelFingerprint
+ fingerprint, resource_info = _fetch_resource(client, module)
+ new_labels = resource_info['labels'].copy()
+
+ update_needed = False
+ if module.params['state'] == 'absent':
+ for k, v in module.params['labels'].items():
+ if k in new_labels:
+ if new_labels[k] == v:
+ update_needed = True
+ new_labels.pop(k, None)
+ else:
+ module.fail_json(msg="Could not remove unmatched label pair '%s':'%s'" % (k, v))
+ else:
+ for k, v in module.params['labels'].items():
+ if k not in new_labels:
+ update_needed = True
+ new_labels[k] = v
+
+ changed = False
+ json_output = {'state': module.params['state']}
+ if update_needed:
+ changed, err = _set_labels(client, new_labels, module, resource_info,
+ fingerprint)
+ json_output['changed'] = changed
+
+ # TODO(erjohnso): probably want to re-fetch the resource to return the
+ # new labelFingerprint, check that desired labels match updated labels.
+ # BUT! Will need to wait for setLabels() to hit v1 API so we can use the
+ # GCPUtils feature to poll for the operation to be complete. For now,
+ # we'll just update the output with what we have from the original
+ # state of the resource.
+ json_output.update(resource_info)
+ json_output.update(module.params)
+
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_lb.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_lb.py
new file mode 100644
index 00000000..50e26a58
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_lb.py
@@ -0,0 +1,310 @@
+#!/usr/bin/python
+# Copyright 2013 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gce_lb
+short_description: create/destroy GCE load-balancer resources
+description:
+ - This module can create and destroy Google Compute Engine C(loadbalancer)
+ and C(httphealthcheck) resources. The primary LB resource is the
+ C(load_balancer) resource and the health check parameters are all
+ prefixed with I(httphealthcheck).
+ The full documentation for Google Compute Engine load balancing is at
+ U(https://developers.google.com/compute/docs/load-balancing/). However,
+ the ansible module simplifies the configuration by following the
+ libcloud model.
+ Full install/configuration instructions for the gce* modules can
+ be found in the comments of ansible/test/gce_tests.py.
+options:
+ httphealthcheck_name:
+ type: str
+ description:
+ - the name identifier for the HTTP health check
+ httphealthcheck_port:
+ type: int
+ description:
+ - the TCP port to use for HTTP health checking
+ default: 80
+ httphealthcheck_path:
+ type: str
+ description:
+ - the url path to use for HTTP health checking
+ default: "/"
+ httphealthcheck_interval:
+ type: int
+ description:
+ - the duration in seconds between each health check request
+ default: 5
+ httphealthcheck_timeout:
+ type: int
+ description:
+ - the timeout in seconds before a request is considered a failed check
+ default: 5
+ httphealthcheck_unhealthy_count:
+ type: int
+ description:
+ - number of consecutive failed checks before marking a node unhealthy
+ default: 2
+ httphealthcheck_healthy_count:
+ type: int
+ description:
+ - number of consecutive successful checks before marking a node healthy
+ default: 2
+ httphealthcheck_host:
+ type: str
+ description:
+ - host header to pass through on HTTP check requests
+ name:
+ type: str
+ description:
+ - name of the load-balancer resource
+ protocol:
+ type: str
+ description:
+ - the protocol used for the load-balancer packet forwarding, tcp or udp
+ - "the available choices are: C(tcp) or C(udp)."
+ default: "tcp"
+ region:
+ type: str
+ description:
+ - the GCE region where the load-balancer is defined
+ external_ip:
+ type: str
+ description:
+ - the external static IPv4 (or auto-assigned) address for the LB
+ port_range:
+ type: str
+ description:
+ - the port (range) to forward, e.g. 80 or 8000-8888 defaults to all ports
+ members:
+ type: list
+ description:
+ - a list of zone/nodename pairs, e.g ['us-central1-a/www-a', ...]
+ state:
+ type: str
+ description:
+ - desired state of the LB
+ - "the available choices are: C(active), C(present), C(absent), C(deleted)."
+ default: "present"
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ pem_file:
+ type: path
+ description:
+ - path to the pem file associated with the service account email
+ This option is deprecated. Use 'credentials_file'.
+ credentials_file:
+ type: path
+ description:
+ - path to the JSON file associated with the service account email
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials"
+author: "Eric Johnson (@erjohnso) <erjohnso@google.com>"
+'''
+
+EXAMPLES = '''
+- name: Simple example of creating a new LB, adding members, and a health check
+ local_action:
+ module: gce_lb
+ name: testlb
+ region: us-central1
+ members: ["us-central1-a/www-a", "us-central1-b/www-b"]
+ httphealthcheck_name: hc
+ httphealthcheck_port: 80
+ httphealthcheck_path: "/up"
+'''
+
+try:
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.loadbalancer.types import Provider as Provider_lb
+ from libcloud.loadbalancer.providers import get_driver as get_driver_lb
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, ResourceExistsError, ResourceNotFoundError
+
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gce import USER_AGENT_PRODUCT, USER_AGENT_VERSION, gce_connect, unexpected_error_msg
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ httphealthcheck_name=dict(),
+ httphealthcheck_port=dict(default=80, type='int'),
+ httphealthcheck_path=dict(default='/'),
+ httphealthcheck_interval=dict(default=5, type='int'),
+ httphealthcheck_timeout=dict(default=5, type='int'),
+ httphealthcheck_unhealthy_count=dict(default=2, type='int'),
+ httphealthcheck_healthy_count=dict(default=2, type='int'),
+ httphealthcheck_host=dict(),
+ name=dict(),
+ protocol=dict(default='tcp'),
+ region=dict(),
+ external_ip=dict(),
+ port_range=dict(),
+ members=dict(type='list'),
+ state=dict(default='present'),
+ service_account_email=dict(),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ project_id=dict(),
+ )
+ )
+
+ if not HAS_LIBCLOUD:
+ module.fail_json(msg='libcloud with GCE support (0.13.3+) required for this module.')
+
+ gce = gce_connect(module)
+
+ httphealthcheck_name = module.params.get('httphealthcheck_name')
+ httphealthcheck_port = module.params.get('httphealthcheck_port')
+ httphealthcheck_path = module.params.get('httphealthcheck_path')
+ httphealthcheck_interval = module.params.get('httphealthcheck_interval')
+ httphealthcheck_timeout = module.params.get('httphealthcheck_timeout')
+ httphealthcheck_unhealthy_count = module.params.get('httphealthcheck_unhealthy_count')
+ httphealthcheck_healthy_count = module.params.get('httphealthcheck_healthy_count')
+ httphealthcheck_host = module.params.get('httphealthcheck_host')
+ name = module.params.get('name')
+ protocol = module.params.get('protocol')
+ region = module.params.get('region')
+ external_ip = module.params.get('external_ip')
+ port_range = module.params.get('port_range')
+ members = module.params.get('members')
+ state = module.params.get('state')
+
+ try:
+ gcelb = get_driver_lb(Provider_lb.GCE)(gce_driver=gce)
+ gcelb.connection.user_agent_append("%s/%s" % (
+ USER_AGENT_PRODUCT, USER_AGENT_VERSION))
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ changed = False
+ json_output = {'name': name, 'state': state}
+
+ if not name and not httphealthcheck_name:
+ module.fail_json(msg='Nothing to do, please specify a "name" ' + 'or "httphealthcheck_name" parameter', changed=False)
+
+ if state in ['active', 'present']:
+ # first, create the httphealthcheck if requested
+ hc = None
+ if httphealthcheck_name:
+ json_output['httphealthcheck_name'] = httphealthcheck_name
+ try:
+ hc = gcelb.ex_create_healthcheck(httphealthcheck_name,
+ host=httphealthcheck_host, path=httphealthcheck_path,
+ port=httphealthcheck_port,
+ interval=httphealthcheck_interval,
+ timeout=httphealthcheck_timeout,
+ unhealthy_threshold=httphealthcheck_unhealthy_count,
+ healthy_threshold=httphealthcheck_healthy_count)
+ changed = True
+ except ResourceExistsError:
+ hc = gce.ex_get_healthcheck(httphealthcheck_name)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ if hc is not None:
+ json_output['httphealthcheck_host'] = hc.extra['host']
+ json_output['httphealthcheck_path'] = hc.path
+ json_output['httphealthcheck_port'] = hc.port
+ json_output['httphealthcheck_interval'] = hc.interval
+ json_output['httphealthcheck_timeout'] = hc.timeout
+ json_output['httphealthcheck_unhealthy_count'] = hc.unhealthy_threshold
+ json_output['httphealthcheck_healthy_count'] = hc.healthy_threshold
+
+ # create the forwarding rule (and target pool under the hood)
+ lb = None
+ if name:
+ if not region:
+ module.fail_json(msg='Missing required region name',
+ changed=False)
+ nodes = []
+ output_nodes = []
+ json_output['name'] = name
+ # members is a python list of 'zone/inst' strings
+ if members:
+ for node in members:
+ try:
+ zone, node_name = node.split('/')
+ nodes.append(gce.ex_get_node(node_name, zone))
+ output_nodes.append(node)
+ except Exception:
+ # skip nodes that are badly formatted or don't exist
+ pass
+ try:
+ if hc is not None:
+ lb = gcelb.create_balancer(name, port_range, protocol,
+ None, nodes, ex_region=region, ex_healthchecks=[hc],
+ ex_address=external_ip)
+ else:
+ lb = gcelb.create_balancer(name, port_range, protocol,
+ None, nodes, ex_region=region, ex_address=external_ip)
+ changed = True
+ except ResourceExistsError:
+ lb = gcelb.get_balancer(name)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ if lb is not None:
+ json_output['members'] = output_nodes
+ json_output['protocol'] = protocol
+ json_output['region'] = region
+ json_output['external_ip'] = lb.ip
+ json_output['port_range'] = lb.port
+ hc_names = []
+ if 'healthchecks' in lb.extra:
+ for hc in lb.extra['healthchecks']:
+ hc_names.append(hc.name)
+ json_output['httphealthchecks'] = hc_names
+
+ if state in ['absent', 'deleted']:
+ # first, delete the load balancer (forwarding rule and target pool)
+ # if specified.
+ if name:
+ json_output['name'] = name
+ try:
+ lb = gcelb.get_balancer(name)
+ gcelb.destroy_balancer(lb)
+ changed = True
+ except ResourceNotFoundError:
+ pass
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ # destroy the health check if specified
+ if httphealthcheck_name:
+ json_output['httphealthcheck_name'] = httphealthcheck_name
+ try:
+ hc = gce.ex_get_healthcheck(httphealthcheck_name)
+ gce.ex_destroy_healthcheck(hc)
+ changed = True
+ except ResourceNotFoundError:
+ pass
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ json_output['changed'] = changed
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_mig.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_mig.py
new file mode 100644
index 00000000..42db08bc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_mig.py
@@ -0,0 +1,904 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gce_mig
+short_description: Create, Update or Destroy a Managed Instance Group (MIG).
+description:
+ - Create, Update or Destroy a Managed Instance Group (MIG). See
+ U(https://cloud.google.com/compute/docs/instance-groups) for an overview.
+ Full install/configuration instructions for the gce* modules can
+ be found in the comments of ansible/test/gce_tests.py.
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 1.2.0"
+notes:
+ - Resizing and Recreating VM are also supported.
+ - An existing instance template is required in order to create a
+ Managed Instance Group.
+author:
+ - "Tom Melendez (@supertom) <tom@supertom.com>"
+options:
+ name:
+ type: str
+ description:
+ - Name of the Managed Instance Group.
+ required: true
+ template:
+ type: str
+ description:
+ - Instance Template to be used in creating the VMs. See
+ U(https://cloud.google.com/compute/docs/instance-templates) to learn more
+ about Instance Templates. Required for creating MIGs.
+ size:
+ type: int
+ description:
+ - Size of Managed Instance Group. If MIG already exists, it will be
+ resized to the number provided here. Required for creating MIGs.
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account permissions
+ pem_file:
+ type: path
+ description:
+ - path to the pem file associated with the service account email
+ This option is deprecated. Use 'credentials_file'.
+ credentials_file:
+ type: path
+ description:
+ - Path to the JSON file associated with the service account email
+ project_id:
+ type: str
+ description:
+ - GCE project ID
+ state:
+ type: str
+ description:
+ - desired state of the resource
+ default: "present"
+ choices: ["absent", "present"]
+ zone:
+ type: str
+ description:
+ - The GCE zone to use for this Managed Instance Group.
+ required: true
+ autoscaling:
+ type: dict
+ description:
+ - A dictionary of configuration for the autoscaler. 'enabled (bool)', 'name (str)'
+ and policy.max_instances (int) are required fields if autoscaling is used. See
+ U(https://cloud.google.com/compute/docs/reference/beta/autoscalers) for more information
+ on Autoscaling.
+ named_ports:
+ type: list
+ description:
+ - Define named ports that backend services can forward data to. Format is a a list of
+ name:port dictionaries.
+ recreate_instances:
+ type: bool
+ default: no
+ description:
+ - Recreate MIG instances.
+'''
+
+EXAMPLES = '''
+# Following playbook creates, rebuilds instances, resizes and then deletes a MIG.
+# Notes:
+# - Two valid Instance Templates must exist in your GCE project in order to run
+# this playbook. Change the fields to match the templates used in your
+# project.
+# - The use of the 'pause' module is not required, it is just for convenience.
+- name: Managed Instance Group Example
+ hosts: localhost
+ gather_facts: False
+ tasks:
+ - name: Create MIG
+ community.general.gce_mig:
+ name: ansible-mig-example
+ zone: us-central1-c
+ state: present
+ size: 1
+ template: my-instance-template-1
+ named_ports:
+ - name: http
+ port: 80
+ - name: foobar
+ port: 82
+
+ - name: Pause for 30 seconds
+ ansible.builtin.pause:
+ seconds: 30
+
+ - name: Recreate MIG Instances with Instance Template change.
+ community.general.gce_mig:
+ name: ansible-mig-example
+ zone: us-central1-c
+ state: present
+ template: my-instance-template-2-small
+ recreate_instances: yes
+
+ - name: Pause for 30 seconds
+ ansible.builtin.pause:
+ seconds: 30
+
+ - name: Resize MIG
+ community.general.gce_mig:
+ name: ansible-mig-example
+ zone: us-central1-c
+ state: present
+ size: 3
+
+ - name: Update MIG with Autoscaler
+ community.general.gce_mig:
+ name: ansible-mig-example
+ zone: us-central1-c
+ state: present
+ size: 3
+ template: my-instance-template-2-small
+ recreate_instances: yes
+ autoscaling:
+ enabled: yes
+ name: my-autoscaler
+ policy:
+ min_instances: 2
+ max_instances: 5
+ cool_down_period: 37
+ cpu_utilization:
+ target: .39
+ load_balancing_utilization:
+ target: 0.4
+
+ - name: Pause for 30 seconds
+ ansible.builtin.pause:
+ seconds: 30
+
+ - name: Delete MIG
+ community.general.gce_mig:
+ name: ansible-mig-example
+ zone: us-central1-c
+ state: absent
+ autoscaling:
+ enabled: no
+ name: my-autoscaler
+'''
+RETURN = '''
+zone:
+ description: Zone in which to launch MIG.
+ returned: always
+ type: str
+ sample: "us-central1-b"
+
+template:
+ description: Instance Template to use for VMs. Must exist prior to using with MIG.
+ returned: changed
+ type: str
+ sample: "my-instance-template"
+
+name:
+ description: Name of the Managed Instance Group.
+ returned: changed
+ type: str
+ sample: "my-managed-instance-group"
+
+named_ports:
+ description: list of named ports acted upon
+ returned: when named_ports are initially set or updated
+ type: list
+ sample: [{ "name": "http", "port": 80 }, { "name": "foo", "port": 82 }]
+
+size:
+ description: Number of VMs in Managed Instance Group.
+ returned: changed
+ type: int
+ sample: 4
+
+created_instances:
+ description: Names of instances created.
+ returned: When instances are created.
+ type: list
+ sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"]
+
+deleted_instances:
+ description: Names of instances deleted.
+ returned: When instances are deleted.
+ type: list
+ sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"]
+
+resize_created_instances:
+ description: Names of instances created during resizing.
+ returned: When a resize results in the creation of instances.
+ type: list
+ sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"]
+
+resize_deleted_instances:
+ description: Names of instances deleted during resizing.
+ returned: When a resize results in the deletion of instances.
+ type: list
+ sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"]
+
+recreated_instances:
+ description: Names of instances recreated.
+ returned: When instances are recreated.
+ type: list
+ sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"]
+
+created_autoscaler:
+ description: True if Autoscaler was attempted and created. False otherwise.
+ returned: When the creation of an Autoscaler was attempted.
+ type: bool
+ sample: true
+
+updated_autoscaler:
+ description: True if an Autoscaler update was attempted and succeeded.
+ False returned if update failed.
+ returned: When the update of an Autoscaler was attempted.
+ type: bool
+ sample: true
+
+deleted_autoscaler:
+ description: True if an Autoscaler delete attempted and succeeded.
+ False returned if delete failed.
+ returned: When the delete of an Autoscaler was attempted.
+ type: bool
+ sample: true
+
+set_named_ports:
+ description: True if the named_ports have been set
+ returned: named_ports have been set
+ type: bool
+ sample: true
+
+updated_named_ports:
+ description: True if the named_ports have been updated
+ returned: named_ports have been updated
+ type: bool
+ sample: true
+'''
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ import libcloud
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
+ ResourceExistsError, ResourceInUseError, ResourceNotFoundError
+ from libcloud.compute.drivers.gce import GCEAddress
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gce import gce_connect
+
+
+def _check_params(params, field_list):
+ """
+ Helper to validate params.
+
+ Use this in function definitions if they require specific fields
+ to be present.
+
+ :param params: structure that contains the fields
+ :type params: ``dict``
+
+ :param field_list: list of dict representing the fields
+ [{'name': str, 'required': True/False', 'type': cls}]
+ :type field_list: ``list`` of ``dict``
+
+ :return True, exits otherwise
+ :rtype: ``bool``
+ """
+ for d in field_list:
+ if not d['name'] in params:
+ if d['required'] is True:
+ return (False, "%s is required and must be of type: %s" %
+ (d['name'], str(d['type'])))
+ else:
+ if not isinstance(params[d['name']], d['type']):
+ return (False,
+ "%s must be of type: %s" % (d['name'], str(d['type'])))
+
+ return (True, '')
+
+
+def _validate_autoscaling_params(params):
+ """
+ Validate that the minimum configuration is present for autoscaling.
+
+ :param params: Ansible dictionary containing autoscaling configuration
+ It is expected that autoscaling config will be found at the
+ key 'autoscaling'.
+ :type params: ``dict``
+
+ :return: Tuple containing a boolean and a string. True if autoscaler
+ is valid, False otherwise, plus str for message.
+ :rtype: ``(``bool``, ``str``)``
+ """
+ if not params['autoscaling']:
+ # It's optional, so if not set at all, it's valid.
+ return (True, '')
+ if not isinstance(params['autoscaling'], dict):
+ return (False,
+ 'autoscaling: configuration expected to be a dictionary.')
+
+ # check first-level required fields
+ as_req_fields = [
+ {'name': 'name', 'required': True, 'type': str},
+ {'name': 'enabled', 'required': True, 'type': bool},
+ {'name': 'policy', 'required': True, 'type': dict}
+ ] # yapf: disable
+
+ (as_req_valid, as_req_msg) = _check_params(params['autoscaling'],
+ as_req_fields)
+ if not as_req_valid:
+ return (False, as_req_msg)
+
+ # check policy configuration
+ as_policy_fields = [
+ {'name': 'max_instances', 'required': True, 'type': int},
+ {'name': 'min_instances', 'required': False, 'type': int},
+ {'name': 'cool_down_period', 'required': False, 'type': int}
+ ] # yapf: disable
+
+ (as_policy_valid, as_policy_msg) = _check_params(
+ params['autoscaling']['policy'], as_policy_fields)
+ if not as_policy_valid:
+ return (False, as_policy_msg)
+
+ # TODO(supertom): check utilization fields
+
+ return (True, '')
+
+
+def _validate_named_port_params(params):
+ """
+ Validate the named ports parameters
+
+ :param params: Ansible dictionary containing named_ports configuration
+ It is expected that autoscaling config will be found at the
+ key 'named_ports'. That key should contain a list of
+ {name : port} dictionaries.
+ :type params: ``dict``
+
+ :return: Tuple containing a boolean and a string. True if params
+ are valid, False otherwise, plus str for message.
+ :rtype: ``(``bool``, ``str``)``
+ """
+ if not params['named_ports']:
+ # It's optional, so if not set at all, it's valid.
+ return (True, '')
+ if not isinstance(params['named_ports'], list):
+ return (False, 'named_ports: expected list of name:port dictionaries.')
+ req_fields = [
+ {'name': 'name', 'required': True, 'type': str},
+ {'name': 'port', 'required': True, 'type': int}
+ ] # yapf: disable
+
+ for np in params['named_ports']:
+ (valid_named_ports, np_msg) = _check_params(np, req_fields)
+ if not valid_named_ports:
+ return (False, np_msg)
+
+ return (True, '')
+
+
+def _get_instance_list(mig, field='name', filter_list=None):
+ """
+ Helper to grab field from instances response.
+
+ :param mig: Managed Instance Group Object from libcloud.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :param field: Field name in list_managed_instances response. Defaults
+ to 'name'.
+ :type field: ``str``
+
+ :param filter_list: list of 'currentAction' strings to filter on. Only
+ items that match a currentAction in this list will
+ be returned. Default is "['NONE']".
+ :type filter_list: ``list`` of ``str``
+
+ :return: List of strings from list_managed_instances response.
+ :rtype: ``list``
+ """
+ filter_list = ['NONE'] if filter_list is None else filter_list
+
+ return [x[field] for x in mig.list_managed_instances()
+ if x['currentAction'] in filter_list]
+
+
+def _gen_gce_as_policy(as_params):
+ """
+ Take Autoscaler params and generate GCE-compatible policy.
+
+ :param as_params: Dictionary in Ansible-playbook format
+ containing policy arguments.
+ :type as_params: ``dict``
+
+ :return: GCE-compatible policy dictionary
+ :rtype: ``dict``
+ """
+ asp_data = {}
+ asp_data['maxNumReplicas'] = as_params['max_instances']
+ if 'min_instances' in as_params:
+ asp_data['minNumReplicas'] = as_params['min_instances']
+ if 'cool_down_period' in as_params:
+ asp_data['coolDownPeriodSec'] = as_params['cool_down_period']
+ if 'cpu_utilization' in as_params and 'target' in as_params[
+ 'cpu_utilization']:
+ asp_data['cpuUtilization'] = {'utilizationTarget':
+ as_params['cpu_utilization']['target']}
+ if 'load_balancing_utilization' in as_params and 'target' in as_params[
+ 'load_balancing_utilization']:
+ asp_data['loadBalancingUtilization'] = {
+ 'utilizationTarget':
+ as_params['load_balancing_utilization']['target']
+ }
+
+ return asp_data
+
+
+def create_autoscaler(gce, mig, params):
+ """
+ Create a new Autoscaler for a MIG.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param mig: An initialized GCEInstanceGroupManager.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :param params: Dictionary of autoscaling parameters.
+ :type params: ``dict``
+
+ :return: Tuple with changed stats.
+ :rtype: tuple in the format of (bool, list)
+ """
+ changed = False
+ as_policy = _gen_gce_as_policy(params['policy'])
+ autoscaler = gce.ex_create_autoscaler(name=params['name'], zone=mig.zone,
+ instance_group=mig, policy=as_policy)
+ if autoscaler:
+ changed = True
+ return changed
+
+
+def update_autoscaler(gce, autoscaler, params):
+ """
+ Update an Autoscaler.
+
+ Takes an existing Autoscaler object, and updates it with
+ the supplied params before calling libcloud's update method.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param autoscaler: An initialized GCEAutoscaler.
+ :type autoscaler: :class: `GCEAutoscaler`
+
+ :param params: Dictionary of autoscaling parameters.
+ :type params: ``dict``
+
+ :return: True if changes, False otherwise.
+ :rtype: ``bool``
+ """
+ as_policy = _gen_gce_as_policy(params['policy'])
+ if autoscaler.policy != as_policy:
+ autoscaler.policy = as_policy
+ autoscaler = gce.ex_update_autoscaler(autoscaler)
+ if autoscaler:
+ return True
+ return False
+
+
+def delete_autoscaler(autoscaler):
+ """
+ Delete an Autoscaler. Does not affect MIG.
+
+ :param mig: Managed Instance Group Object from Libcloud.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :return: Tuple with changed stats and a list of affected instances.
+ :rtype: tuple in the format of (bool, list)
+ """
+ changed = False
+ if autoscaler.destroy():
+ changed = True
+ return changed
+
+
+def get_autoscaler(gce, name, zone):
+ """
+ Get an Autoscaler from GCE.
+
+ If the Autoscaler is not found, None is found.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param name: Name of the Autoscaler.
+ :type name: ``str``
+
+ :param zone: Zone that the Autoscaler is located in.
+ :type zone: ``str``
+
+ :return: A GCEAutoscaler object or None.
+ :rtype: :class: `GCEAutoscaler` or None
+ """
+ try:
+ # Does the Autoscaler already exist?
+ return gce.ex_get_autoscaler(name, zone)
+
+ except ResourceNotFoundError:
+ return None
+
+
+def create_mig(gce, params):
+ """
+ Create a new Managed Instance Group.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param params: Dictionary of parameters needed by the module.
+ :type params: ``dict``
+
+ :return: Tuple with changed stats and a list of affected instances.
+ :rtype: tuple in the format of (bool, list)
+ """
+
+ changed = False
+ return_data = []
+ actions_filter = ['CREATING']
+
+ mig = gce.ex_create_instancegroupmanager(
+ name=params['name'], size=params['size'], template=params['template'],
+ zone=params['zone'])
+
+ if mig:
+ changed = True
+ return_data = _get_instance_list(mig, filter_list=actions_filter)
+
+ return (changed, return_data)
+
+
+def delete_mig(mig):
+ """
+ Delete a Managed Instance Group. All VMs in that MIG are also deleted."
+
+ :param mig: Managed Instance Group Object from Libcloud.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :return: Tuple with changed stats and a list of affected instances.
+ :rtype: tuple in the format of (bool, list)
+ """
+ changed = False
+ return_data = []
+ actions_filter = ['NONE', 'CREATING', 'RECREATING', 'DELETING',
+ 'ABANDONING', 'RESTARTING', 'REFRESHING']
+ instance_names = _get_instance_list(mig, filter_list=actions_filter)
+ if mig.destroy():
+ changed = True
+ return_data = instance_names
+
+ return (changed, return_data)
+
+
+def recreate_instances_in_mig(mig):
+ """
+ Recreate the instances for a Managed Instance Group.
+
+ :param mig: Managed Instance Group Object from libcloud.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :return: Tuple with changed stats and a list of affected instances.
+ :rtype: tuple in the format of (bool, list)
+ """
+ changed = False
+ return_data = []
+ actions_filter = ['RECREATING']
+
+ if mig.recreate_instances():
+ changed = True
+ return_data = _get_instance_list(mig, filter_list=actions_filter)
+
+ return (changed, return_data)
+
+
+def resize_mig(mig, size):
+ """
+ Resize a Managed Instance Group.
+
+ Based on the size provided, GCE will automatically create and delete
+ VMs as needed.
+
+ :param mig: Managed Instance Group Object from libcloud.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :return: Tuple with changed stats and a list of affected instances.
+ :rtype: tuple in the format of (bool, list)
+ """
+ changed = False
+ return_data = []
+ actions_filter = ['CREATING', 'DELETING']
+
+ if mig.resize(size):
+ changed = True
+ return_data = _get_instance_list(mig, filter_list=actions_filter)
+
+ return (changed, return_data)
+
+
+def get_mig(gce, name, zone):
+ """
+ Get a Managed Instance Group from GCE.
+
+ If the MIG is not found, None is found.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param name: Name of the Managed Instance Group.
+ :type name: ``str``
+
+ :param zone: Zone that the Managed Instance Group is located in.
+ :type zone: ``str``
+
+ :return: A GCEInstanceGroupManager object or None.
+ :rtype: :class: `GCEInstanceGroupManager` or None
+ """
+ try:
+ # Does the MIG already exist?
+ return gce.ex_get_instancegroupmanager(name=name, zone=zone)
+
+ except ResourceNotFoundError:
+ return None
+
+
+def update_named_ports(mig, named_ports):
+ """
+ Set the named ports on a Managed Instance Group.
+
+ Sort the existing named ports and new. If different, update.
+ This also implicitly allows for the removal of named_por
+
+ :param mig: Managed Instance Group Object from libcloud.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :param named_ports: list of dictionaries in the format of {'name': port}
+ :type named_ports: ``list`` of ``dict``
+
+ :return: True if successful
+ :rtype: ``bool``
+ """
+ changed = False
+ existing_ports = []
+ new_ports = []
+ if hasattr(mig.instance_group, 'named_ports'):
+ existing_ports = sorted(mig.instance_group.named_ports,
+ key=lambda x: x['name'])
+ if named_ports is not None:
+ new_ports = sorted(named_ports, key=lambda x: x['name'])
+
+ if existing_ports != new_ports:
+ if mig.instance_group.set_named_ports(named_ports):
+ changed = True
+
+ return changed
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ name=dict(required=True),
+ template=dict(),
+ recreate_instances=dict(type='bool', default=False),
+ # Do not set a default size here. For Create and some update
+ # operations, it is required and should be explicitly set.
+ # Below, we set it to the existing value if it has not been set.
+ size=dict(type='int'),
+ state=dict(choices=['absent', 'present'], default='present'),
+ zone=dict(required=True),
+ autoscaling=dict(type='dict', default=None),
+ named_ports=dict(type='list', default=None),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ project_id=dict(), ), )
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+ if not HAS_LIBCLOUD:
+ module.fail_json(
+ msg='libcloud with GCE Managed Instance Group support (1.2+) required for this module.')
+
+ gce = gce_connect(module)
+ if not hasattr(gce, 'ex_create_instancegroupmanager'):
+ module.fail_json(
+ msg='libcloud with GCE Managed Instance Group support (1.2+) required for this module.',
+ changed=False)
+
+ params = {}
+ params['state'] = module.params.get('state')
+ params['zone'] = module.params.get('zone')
+ params['name'] = module.params.get('name')
+ params['size'] = module.params.get('size')
+ params['template'] = module.params.get('template')
+ params['recreate_instances'] = module.params.get('recreate_instances')
+ params['autoscaling'] = module.params.get('autoscaling', None)
+ params['named_ports'] = module.params.get('named_ports', None)
+
+ (valid_autoscaling, as_msg) = _validate_autoscaling_params(params)
+ if not valid_autoscaling:
+ module.fail_json(msg=as_msg, changed=False)
+
+ if params['named_ports'] is not None and not hasattr(
+ gce, 'ex_instancegroup_set_named_ports'):
+ module.fail_json(
+ msg="Apache Libcloud 1.3.0+ is required to use 'named_ports' option",
+ changed=False)
+
+ (valid_named_ports, np_msg) = _validate_named_port_params(params)
+ if not valid_named_ports:
+ module.fail_json(msg=np_msg, changed=False)
+
+ changed = False
+ json_output = {'state': params['state'], 'zone': params['zone']}
+ mig = get_mig(gce, params['name'], params['zone'])
+
+ if not mig:
+ if params['state'] == 'absent':
+ # Doesn't exist in GCE, and state==absent.
+ changed = False
+ module.fail_json(
+ msg="Cannot delete unknown managed instance group: %s" %
+ (params['name']))
+ else:
+ # Create MIG
+ req_create_fields = [
+ {'name': 'template', 'required': True, 'type': str},
+ {'name': 'size', 'required': True, 'type': int}
+ ] # yapf: disable
+
+ (valid_create_fields, valid_create_msg) = _check_params(
+ params, req_create_fields)
+ if not valid_create_fields:
+ module.fail_json(msg=valid_create_msg, changed=False)
+
+ (changed, json_output['created_instances']) = create_mig(gce,
+ params)
+ if params['autoscaling'] and params['autoscaling'][
+ 'enabled'] is True:
+ # Fetch newly-created MIG and create Autoscaler for it.
+ mig = get_mig(gce, params['name'], params['zone'])
+ if not mig:
+ module.fail_json(
+ msg='Unable to fetch created MIG %s to create \
+ autoscaler in zone: %s' % (
+ params['name'], params['zone']), changed=False)
+
+ if not create_autoscaler(gce, mig, params['autoscaling']):
+ module.fail_json(
+ msg='Unable to fetch MIG %s to create autoscaler \
+ in zone: %s' % (params['name'], params['zone']),
+ changed=False)
+
+ json_output['created_autoscaler'] = True
+ # Add named ports if available
+ if params['named_ports']:
+ mig = get_mig(gce, params['name'], params['zone'])
+ if not mig:
+ module.fail_json(
+ msg='Unable to fetch created MIG %s to create \
+ autoscaler in zone: %s' % (
+ params['name'], params['zone']), changed=False)
+ json_output['set_named_ports'] = update_named_ports(
+ mig, params['named_ports'])
+ if json_output['set_named_ports']:
+ json_output['named_ports'] = params['named_ports']
+
+ elif params['state'] == 'absent':
+ # Delete MIG
+
+ # First, check and remove the autoscaler, if present.
+ # Note: multiple autoscalers can be associated to a single MIG. We
+ # only handle the one that is named, but we might want to think about this.
+ if params['autoscaling']:
+ autoscaler = get_autoscaler(gce, params['autoscaling']['name'],
+ params['zone'])
+ if not autoscaler:
+ module.fail_json(msg='Unable to fetch autoscaler %s to delete \
+ in zone: %s' % (params['autoscaling']['name'], params['zone']),
+ changed=False)
+
+ changed = delete_autoscaler(autoscaler)
+ json_output['deleted_autoscaler'] = changed
+
+ # Now, delete the MIG.
+ (changed, json_output['deleted_instances']) = delete_mig(mig)
+
+ else:
+ # Update MIG
+
+ # If we're going to update a MIG, we need a size and template values.
+ # If not specified, we use the values from the existing MIG.
+ if not params['size']:
+ params['size'] = mig.size
+
+ if not params['template']:
+ params['template'] = mig.template.name
+
+ if params['template'] != mig.template.name:
+ # Update Instance Template.
+ new_template = gce.ex_get_instancetemplate(params['template'])
+ mig.set_instancetemplate(new_template)
+ json_output['updated_instancetemplate'] = True
+ changed = True
+ if params['recreate_instances'] is True:
+ # Recreate Instances.
+ (changed, json_output['recreated_instances']
+ ) = recreate_instances_in_mig(mig)
+
+ if params['size'] != mig.size:
+ # Resize MIG.
+ keystr = 'created' if params['size'] > mig.size else 'deleted'
+ (changed, json_output['resize_%s_instances' %
+ (keystr)]) = resize_mig(mig, params['size'])
+
+ # Update Autoscaler
+ if params['autoscaling']:
+ autoscaler = get_autoscaler(gce, params['autoscaling']['name'],
+ params['zone'])
+ if not autoscaler:
+ # Try to create autoscaler.
+ # Note: this isn't perfect, if the autoscaler name has changed
+ # we wouldn't know that here.
+ if not create_autoscaler(gce, mig, params['autoscaling']):
+ module.fail_json(
+ msg='Unable to create autoscaler %s for existing MIG %s\
+ in zone: %s' % (params['autoscaling']['name'],
+ params['name'], params['zone']),
+ changed=False)
+ json_output['created_autoscaler'] = True
+ changed = True
+ else:
+ if params['autoscaling']['enabled'] is False:
+ # Delete autoscaler
+ changed = delete_autoscaler(autoscaler)
+ json_output['delete_autoscaler'] = changed
+ else:
+ # Update policy, etc.
+ changed = update_autoscaler(gce, autoscaler,
+ params['autoscaling'])
+ json_output['updated_autoscaler'] = changed
+ named_ports = params['named_ports'] or []
+ json_output['updated_named_ports'] = update_named_ports(mig,
+ named_ports)
+ if json_output['updated_named_ports']:
+ json_output['named_ports'] = named_ports
+
+ json_output['changed'] = changed
+ json_output.update(params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_net.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_net.py
new file mode 100644
index 00000000..48971ae7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_net.py
@@ -0,0 +1,511 @@
+#!/usr/bin/python
+# Copyright 2013 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gce_net
+short_description: create/destroy GCE networks and firewall rules
+description:
+ - This module can create and destroy Google Compute Engine networks and
+ firewall rules U(https://cloud.google.com/compute/docs/networking).
+ The I(name) parameter is reserved for referencing a network while the
+ I(fwname) parameter is used to reference firewall rules.
+ IPv4 Address ranges must be specified using the CIDR
+ U(http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) format.
+ Full install/configuration instructions for the gce* modules can
+ be found in the comments of ansible/test/gce_tests.py.
+options:
+ allowed:
+ type: str
+ description:
+ - the protocol:ports to allow (I(tcp:80) or I(tcp:80,443) or I(tcp:80-800;udp:1-25))
+ this parameter is mandatory when creating or updating a firewall rule
+ ipv4_range:
+ type: str
+ description:
+ - the IPv4 address range in CIDR notation for the network
+ this parameter is not mandatory when you specified existing network in name parameter,
+ but when you create new network, this parameter is mandatory
+ fwname:
+ type: str
+ description:
+ - name of the firewall rule
+ name:
+ type: str
+ description:
+ - name of the network
+ src_range:
+ type: list
+ description:
+ - the source IPv4 address range in CIDR notation
+ default: []
+ src_tags:
+ type: list
+ description:
+ - the source instance tags for creating a firewall rule
+ default: []
+ target_tags:
+ type: list
+ description:
+ - the target instance tags for creating a firewall rule
+ default: []
+ state:
+ type: str
+ description:
+ - desired state of the network or firewall
+ - "Available choices are: C(active), C(present), C(absent), C(deleted)."
+ default: "present"
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ pem_file:
+ type: path
+ description:
+ - path to the pem file associated with the service account email
+ This option is deprecated. Use C(credentials_file).
+ credentials_file:
+ type: path
+ description:
+ - path to the JSON file associated with the service account email
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ mode:
+ type: str
+ description:
+ - network mode for Google Cloud
+ C(legacy) indicates a network with an IP address range;
+ C(auto) automatically generates subnetworks in different regions;
+ C(custom) uses networks to group subnets of user specified IP address ranges
+ https://cloud.google.com/compute/docs/networking#network_types
+ default: "legacy"
+ choices: ["legacy", "auto", "custom"]
+ subnet_name:
+ type: str
+ description:
+ - name of subnet to create
+ subnet_region:
+ type: str
+ description:
+ - region of subnet to create
+ subnet_desc:
+ type: str
+ description:
+ - description of subnet to create
+
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials"
+author: "Eric Johnson (@erjohnso) <erjohnso@google.com>, Tom Melendez (@supertom) <supertom@google.com>"
+'''
+
+EXAMPLES = '''
+# Create a 'legacy' Network
+- name: Create Legacy Network
+ community.general.gce_net:
+ name: legacynet
+ ipv4_range: '10.24.17.0/24'
+ mode: legacy
+ state: present
+
+# Create an 'auto' Network
+- name: Create Auto Network
+ community.general.gce_net:
+ name: autonet
+ mode: auto
+ state: present
+
+# Create a 'custom' Network
+- name: Create Custom Network
+ community.general.gce_net:
+ name: customnet
+ mode: custom
+ subnet_name: "customsubnet"
+ subnet_region: us-east1
+ ipv4_range: '10.240.16.0/24'
+ state: "present"
+
+# Create Firewall Rule with Source Tags
+- name: Create Firewall Rule w/Source Tags
+ community.general.gce_net:
+ name: default
+ fwname: "my-firewall-rule"
+ allowed: tcp:80
+ state: "present"
+ src_tags: "foo,bar"
+
+# Create Firewall Rule with Source Range
+- name: Create Firewall Rule w/Source Range
+ community.general.gce_net:
+ name: default
+ fwname: "my-firewall-rule"
+ allowed: tcp:80
+ state: "present"
+ src_range: ['10.1.1.1/32']
+
+# Create Custom Subnetwork
+- name: Create Custom Subnetwork
+ community.general.gce_net:
+ name: privatenet
+ mode: custom
+ subnet_name: subnet_example
+ subnet_region: us-central1
+ ipv4_range: '10.0.0.0/16'
+'''
+
+RETURN = '''
+allowed:
+ description: Rules (ports and protocols) specified by this firewall rule.
+ returned: When specified
+ type: str
+ sample: "tcp:80;icmp"
+
+fwname:
+ description: Name of the firewall rule.
+ returned: When specified
+ type: str
+ sample: "my-fwname"
+
+ipv4_range:
+ description: IPv4 range of the specified network or subnetwork.
+ returned: when specified or when a subnetwork is created
+ type: str
+ sample: "10.0.0.0/16"
+
+name:
+ description: Name of the network.
+ returned: always
+ type: str
+ sample: "my-network"
+
+src_range:
+ description: IP address blocks a firewall rule applies to.
+ returned: when specified
+ type: list
+ sample: [ '10.1.1.12/8' ]
+
+src_tags:
+ description: Instance Tags firewall rule applies to.
+ returned: when specified while creating a firewall rule
+ type: list
+ sample: [ 'foo', 'bar' ]
+
+state:
+ description: State of the item operated on.
+ returned: always
+ type: str
+ sample: "present"
+
+subnet_name:
+ description: Name of the subnetwork.
+ returned: when specified or when a subnetwork is created
+ type: str
+ sample: "my-subnetwork"
+
+subnet_region:
+ description: Region of the specified subnet.
+ returned: when specified or when a subnetwork is created
+ type: str
+ sample: "us-east1"
+
+target_tags:
+ description: Instance Tags with these tags receive traffic allowed by firewall rule.
+ returned: when specified while creating a firewall rule
+ type: list
+ sample: [ 'foo', 'bar' ]
+'''
+try:
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, ResourceExistsError, ResourceNotFoundError
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gce import gce_connect, unexpected_error_msg
+
+
+def format_allowed_section(allowed):
+ """Format each section of the allowed list"""
+ if allowed.count(":") == 0:
+ protocol = allowed
+ ports = []
+ elif allowed.count(":") == 1:
+ protocol, ports = allowed.split(":")
+ else:
+ return []
+ if ports.count(","):
+ ports = ports.split(",")
+ elif ports:
+ ports = [ports]
+ return_val = {"IPProtocol": protocol}
+ if ports:
+ return_val["ports"] = ports
+ return return_val
+
+
+def format_allowed(allowed):
+ """Format the 'allowed' value so that it is GCE compatible."""
+ return_value = []
+ if allowed.count(";") == 0:
+ return [format_allowed_section(allowed)]
+ else:
+ sections = allowed.split(";")
+ for section in sections:
+ return_value.append(format_allowed_section(section))
+ return return_value
+
+
+def sorted_allowed_list(allowed_list):
+ """Sort allowed_list (output of format_allowed) by protocol and port."""
+ # sort by protocol
+ allowed_by_protocol = sorted(allowed_list, key=lambda x: x['IPProtocol'])
+ # sort the ports list
+ return sorted(allowed_by_protocol, key=lambda y: sorted(y.get('ports', [])))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ allowed=dict(),
+ ipv4_range=dict(),
+ fwname=dict(),
+ name=dict(),
+ src_range=dict(default=[], type='list'),
+ src_tags=dict(default=[], type='list'),
+ target_tags=dict(default=[], type='list'),
+ state=dict(default='present'),
+ service_account_email=dict(),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ project_id=dict(),
+ mode=dict(default='legacy', choices=['legacy', 'auto', 'custom']),
+ subnet_name=dict(),
+ subnet_region=dict(),
+ subnet_desc=dict(),
+ )
+ )
+
+ if not HAS_LIBCLOUD:
+ module.fail_json(msg='libcloud with GCE support (0.17.0+) required for this module')
+
+ gce = gce_connect(module)
+
+ allowed = module.params.get('allowed')
+ ipv4_range = module.params.get('ipv4_range')
+ fwname = module.params.get('fwname')
+ name = module.params.get('name')
+ src_range = module.params.get('src_range')
+ src_tags = module.params.get('src_tags')
+ target_tags = module.params.get('target_tags')
+ state = module.params.get('state')
+ mode = module.params.get('mode')
+ subnet_name = module.params.get('subnet_name')
+ subnet_region = module.params.get('subnet_region')
+ subnet_desc = module.params.get('subnet_desc')
+
+ changed = False
+ json_output = {'state': state}
+
+ if state in ['active', 'present']:
+ network = None
+ subnet = None
+ try:
+ network = gce.ex_get_network(name)
+ json_output['name'] = name
+ if mode == 'legacy':
+ json_output['ipv4_range'] = network.cidr
+ if network and mode == 'custom' and subnet_name:
+ if not hasattr(gce, 'ex_get_subnetwork'):
+ module.fail_json(msg="Update libcloud to a more recent version (>1.0) that supports network 'mode' parameter", changed=False)
+
+ subnet = gce.ex_get_subnetwork(subnet_name, region=subnet_region)
+ json_output['subnet_name'] = subnet_name
+ json_output['ipv4_range'] = subnet.cidr
+ except ResourceNotFoundError:
+ pass
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ # user wants to create a new network that doesn't yet exist
+ if name and not network:
+ if not ipv4_range and mode != 'auto':
+ module.fail_json(msg="Network '" + name + "' is not found. To create network in legacy or custom mode, 'ipv4_range' parameter is required",
+ changed=False)
+ args = [ipv4_range if mode == 'legacy' else None]
+ kwargs = {}
+ if mode != 'legacy':
+ kwargs['mode'] = mode
+
+ try:
+ network = gce.ex_create_network(name, *args, **kwargs)
+ json_output['name'] = name
+ json_output['ipv4_range'] = ipv4_range
+ changed = True
+ except TypeError:
+ module.fail_json(msg="Update libcloud to a more recent version (>1.0) that supports network 'mode' parameter", changed=False)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ if (subnet_name or ipv4_range) and not subnet and mode == 'custom':
+ if not hasattr(gce, 'ex_create_subnetwork'):
+ module.fail_json(msg='Update libcloud to a more recent version (>1.0) that supports subnetwork creation', changed=changed)
+ if not subnet_name or not ipv4_range or not subnet_region:
+ module.fail_json(msg="subnet_name, ipv4_range, and subnet_region required for custom mode", changed=changed)
+
+ try:
+ subnet = gce.ex_create_subnetwork(subnet_name, cidr=ipv4_range, network=name, region=subnet_region, description=subnet_desc)
+ json_output['subnet_name'] = subnet_name
+ json_output['ipv4_range'] = ipv4_range
+ changed = True
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=changed)
+
+ if fwname:
+ # user creating a firewall rule
+ if not allowed and not src_range and not src_tags:
+ if changed and network:
+ module.fail_json(
+ msg="Network created, but missing required " + "firewall rule parameter(s)", changed=True)
+ module.fail_json(
+ msg="Missing required firewall rule parameter(s)",
+ changed=False)
+
+ allowed_list = format_allowed(allowed)
+
+ # Fetch existing rule and if it exists, compare attributes
+ # update if attributes changed. Create if doesn't exist.
+ try:
+ fw_changed = False
+ fw = gce.ex_get_firewall(fwname)
+
+ # If old and new attributes are different, we update the firewall rule.
+ # This implicitly lets us clear out attributes as well.
+ # allowed_list is required and must not be None for firewall rules.
+ if allowed_list and (sorted_allowed_list(allowed_list) != sorted_allowed_list(fw.allowed)):
+ fw.allowed = allowed_list
+ fw_changed = True
+
+ # source_ranges might not be set in the project; cast it to an empty list
+ fw.source_ranges = fw.source_ranges or []
+
+ # If these attributes are lists, we sort them first, then compare.
+ # Otherwise, we update if they differ.
+ if fw.source_ranges != src_range:
+ if isinstance(src_range, list):
+ if sorted(fw.source_ranges) != sorted(src_range):
+ fw.source_ranges = src_range
+ fw_changed = True
+ else:
+ fw.source_ranges = src_range
+ fw_changed = True
+
+ # source_tags might not be set in the project; cast it to an empty list
+ fw.source_tags = fw.source_tags or []
+
+ if fw.source_tags != src_tags:
+ if isinstance(src_tags, list):
+ if sorted(fw.source_tags) != sorted(src_tags):
+ fw.source_tags = src_tags
+ fw_changed = True
+ else:
+ fw.source_tags = src_tags
+ fw_changed = True
+
+ # target_tags might not be set in the project; cast it to an empty list
+ fw.target_tags = fw.target_tags or []
+
+ if fw.target_tags != target_tags:
+ if isinstance(target_tags, list):
+ if sorted(fw.target_tags) != sorted(target_tags):
+ fw.target_tags = target_tags
+ fw_changed = True
+ else:
+ fw.target_tags = target_tags
+ fw_changed = True
+
+ if fw_changed is True:
+ try:
+ gce.ex_update_firewall(fw)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ # Firewall rule not found so we try to create it.
+ except ResourceNotFoundError:
+ try:
+ gce.ex_create_firewall(fwname, allowed_list, network=name,
+ source_ranges=src_range, source_tags=src_tags, target_tags=target_tags)
+ changed = True
+
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ json_output['fwname'] = fwname
+ json_output['allowed'] = allowed
+ json_output['src_range'] = src_range
+ json_output['src_tags'] = src_tags
+ json_output['target_tags'] = target_tags
+
+ if state in ['absent', 'deleted']:
+ if fwname:
+ json_output['fwname'] = fwname
+ fw = None
+ try:
+ fw = gce.ex_get_firewall(fwname)
+ except ResourceNotFoundError:
+ pass
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ if fw:
+ gce.ex_destroy_firewall(fw)
+ changed = True
+ elif subnet_name:
+ if not hasattr(gce, 'ex_get_subnetwork') or not hasattr(gce, 'ex_destroy_subnetwork'):
+ module.fail_json(msg='Update libcloud to a more recent version (>1.0) that supports subnetwork creation', changed=changed)
+ json_output['name'] = subnet_name
+ subnet = None
+ try:
+ subnet = gce.ex_get_subnetwork(subnet_name, region=subnet_region)
+ except ResourceNotFoundError:
+ pass
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ if subnet:
+ gce.ex_destroy_subnetwork(subnet)
+ changed = True
+ elif name:
+ json_output['name'] = name
+ network = None
+ try:
+ network = gce.ex_get_network(name)
+
+ except ResourceNotFoundError:
+ pass
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ if network:
+ try:
+ gce.ex_destroy_network(network)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ changed = True
+
+ json_output['changed'] = changed
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_pd.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_pd.py
new file mode 100644
index 00000000..7e60285f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_pd.py
@@ -0,0 +1,293 @@
+#!/usr/bin/python
+# Copyright 2013 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gce_pd
+short_description: utilize GCE persistent disk resources
+description:
+ - This module can create and destroy unformatted GCE persistent disks
+ U(https://developers.google.com/compute/docs/disks#persistentdisks).
+ It also supports attaching and detaching disks from running instances.
+ Full install/configuration instructions for the gce* modules can
+ be found in the comments of ansible/test/gce_tests.py.
+options:
+ detach_only:
+ description:
+ - do not destroy the disk, merely detach it from an instance
+ type: bool
+ instance_name:
+ type: str
+ description:
+ - instance name if you wish to attach or detach the disk
+ mode:
+ type: str
+ description:
+ - GCE mount mode of disk, READ_ONLY (default) or READ_WRITE
+ default: "READ_ONLY"
+ choices: ["READ_WRITE", "READ_ONLY"]
+ name:
+ type: str
+ description:
+ - name of the disk
+ required: true
+ size_gb:
+ type: str
+ description:
+ - whole integer size of disk (in GB) to create, default is 10 GB
+ default: "10"
+ image:
+ type: str
+ description:
+ - the source image to use for the disk
+ snapshot:
+ type: str
+ description:
+ - the source snapshot to use for the disk
+ state:
+ type: str
+ description:
+ - desired state of the persistent disk
+ - "Available choices are: C(active), C(present), C(absent), C(deleted)."
+ default: "present"
+ zone:
+ type: str
+ description:
+ - zone in which to create the disk
+ default: "us-central1-b"
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ pem_file:
+ type: path
+ description:
+ - path to the pem file associated with the service account email
+ This option is deprecated. Use 'credentials_file'.
+ credentials_file:
+ type: path
+ description:
+ - path to the JSON file associated with the service account email
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ disk_type:
+ type: str
+ description:
+ - Specify a C(pd-standard) disk or C(pd-ssd) for an SSD disk.
+ default: "pd-standard"
+ delete_on_termination:
+ description:
+ - If C(yes), deletes the volume when instance is terminated
+ type: bool
+ image_family:
+ type: str
+ description:
+ - The image family to use to create the instance.
+ If I(image) has been used I(image_family) is ignored.
+ Cannot specify both I(image) and I(source).
+ external_projects:
+ type: list
+ description:
+ - A list of other projects (accessible with the provisioning credentials)
+ to be searched for the image.
+
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials"
+author: "Eric Johnson (@erjohnso) <erjohnso@google.com>"
+'''
+
+EXAMPLES = '''
+- name: Simple attachment action to an existing instance
+ local_action:
+ module: gce_pd
+ instance_name: notlocalhost
+ size_gb: 5
+ name: pd
+'''
+
+try:
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, ResourceExistsError, ResourceNotFoundError, ResourceInUseError
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gce import gce_connect, unexpected_error_msg
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ delete_on_termination=dict(type='bool'),
+ detach_only=dict(type='bool'),
+ instance_name=dict(),
+ mode=dict(default='READ_ONLY', choices=['READ_WRITE', 'READ_ONLY']),
+ name=dict(required=True),
+ size_gb=dict(default=10),
+ disk_type=dict(default='pd-standard'),
+ image=dict(),
+ image_family=dict(),
+ external_projects=dict(type='list'),
+ snapshot=dict(),
+ state=dict(default='present'),
+ zone=dict(default='us-central1-b'),
+ service_account_email=dict(),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ project_id=dict(),
+ )
+ )
+ if not HAS_LIBCLOUD:
+ module.fail_json(msg='libcloud with GCE support (0.17.0+) is required for this module')
+
+ gce = gce_connect(module)
+
+ delete_on_termination = module.params.get('delete_on_termination')
+ detach_only = module.params.get('detach_only')
+ instance_name = module.params.get('instance_name')
+ mode = module.params.get('mode')
+ name = module.params.get('name')
+ size_gb = module.params.get('size_gb')
+ disk_type = module.params.get('disk_type')
+ image = module.params.get('image')
+ image_family = module.params.get('image_family')
+ external_projects = module.params.get('external_projects')
+ snapshot = module.params.get('snapshot')
+ state = module.params.get('state')
+ zone = module.params.get('zone')
+
+ if delete_on_termination and not instance_name:
+ module.fail_json(
+ msg='Must specify an instance name when requesting delete on termination',
+ changed=False)
+
+ if detach_only and not instance_name:
+ module.fail_json(
+ msg='Must specify an instance name when detaching a disk',
+ changed=False)
+
+ disk = inst = None
+ changed = is_attached = False
+
+ json_output = {'name': name, 'zone': zone, 'state': state, 'disk_type': disk_type}
+ if detach_only:
+ json_output['detach_only'] = True
+ json_output['detached_from_instance'] = instance_name
+
+ if instance_name:
+ # user wants to attach/detach from an existing instance
+ try:
+ inst = gce.ex_get_node(instance_name, zone)
+ # is the disk attached?
+ for d in inst.extra['disks']:
+ if d['deviceName'] == name:
+ is_attached = True
+ json_output['attached_mode'] = d['mode']
+ json_output['attached_to_instance'] = inst.name
+ except Exception:
+ pass
+
+ # find disk if it already exists
+ try:
+ disk = gce.ex_get_volume(name)
+ json_output['size_gb'] = int(disk.size)
+ except ResourceNotFoundError:
+ pass
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ # user wants a disk to exist. If "instance_name" is supplied the user
+ # also wants it attached
+ if state in ['active', 'present']:
+
+ if not size_gb:
+ module.fail_json(msg="Must supply a size_gb", changed=False)
+ try:
+ size_gb = int(round(float(size_gb)))
+ if size_gb < 1:
+ raise Exception
+ except Exception:
+ module.fail_json(msg="Must supply a size_gb larger than 1 GB",
+ changed=False)
+
+ if instance_name and inst is None:
+ module.fail_json(msg='Instance %s does not exist in zone %s' % (
+ instance_name, zone), changed=False)
+
+ if not disk:
+ if image is not None and snapshot is not None:
+ module.fail_json(
+ msg='Cannot give both image (%s) and snapshot (%s)' % (
+ image, snapshot), changed=False)
+ lc_image = None
+ lc_snapshot = None
+ if image_family is not None:
+ lc_image = gce.ex_get_image_from_family(image_family, ex_project_list=external_projects)
+ elif image is not None:
+ lc_image = gce.ex_get_image(image, ex_project_list=external_projects)
+ elif snapshot is not None:
+ lc_snapshot = gce.ex_get_snapshot(snapshot)
+ try:
+ disk = gce.create_volume(
+ size_gb, name, location=zone, image=lc_image,
+ snapshot=lc_snapshot, ex_disk_type=disk_type)
+ except ResourceExistsError:
+ pass
+ except QuotaExceededError:
+ module.fail_json(msg='Requested disk size exceeds quota',
+ changed=False)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ json_output['size_gb'] = size_gb
+ if image is not None:
+ json_output['image'] = image
+ if snapshot is not None:
+ json_output['snapshot'] = snapshot
+ changed = True
+ if inst and not is_attached:
+ try:
+ gce.attach_volume(inst, disk, device=name, ex_mode=mode,
+ ex_auto_delete=delete_on_termination)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ json_output['attached_to_instance'] = inst.name
+ json_output['attached_mode'] = mode
+ if delete_on_termination:
+ json_output['delete_on_termination'] = True
+ changed = True
+
+ # user wants to delete a disk (or perhaps just detach it).
+ if state in ['absent', 'deleted'] and disk:
+
+ if inst and is_attached:
+ try:
+ gce.detach_volume(disk, ex_node=inst)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ changed = True
+ if not detach_only:
+ try:
+ gce.destroy_volume(disk)
+ except ResourceInUseError as e:
+ module.fail_json(msg=str(e.value), changed=False)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ changed = True
+
+ json_output['changed'] = changed
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_snapshot.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_snapshot.py
new file mode 100644
index 00000000..4fca1b05
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_snapshot.py
@@ -0,0 +1,225 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gce_snapshot
+short_description: Create or destroy snapshots for GCE storage volumes
+description:
+ - Manages snapshots for GCE instances. This module manages snapshots for
+ the storage volumes of a GCE compute instance. If there are multiple
+ volumes, each snapshot will be prepended with the disk name
+options:
+ instance_name:
+ type: str
+ description:
+ - The GCE instance to snapshot
+ required: True
+ snapshot_name:
+ type: str
+ description:
+ - The name of the snapshot to manage
+ required: True
+ disks:
+ type: list
+ description:
+ - A list of disks to create snapshots for. If none is provided,
+ all of the volumes will have snapshots created.
+ required: False
+ state:
+ type: str
+ description:
+ - Whether a snapshot should be C(present) or C(absent)
+ required: false
+ default: present
+ choices: [present, absent]
+ service_account_email:
+ type: str
+ description:
+ - GCP service account email for the project where the instance resides
+ credentials_file:
+ type: path
+ description:
+ - The path to the credentials file associated with the service account
+ project_id:
+ type: str
+ description:
+ - The GCP project ID to use
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.19.0"
+author: Rob Wagner (@robwagner33)
+'''
+
+EXAMPLES = '''
+- name: Create gce snapshot
+ community.general.gce_snapshot:
+ instance_name: example-instance
+ snapshot_name: example-snapshot
+ state: present
+ service_account_email: project_name@appspot.gserviceaccount.com
+ credentials_file: /path/to/credentials
+ project_id: project_name
+ delegate_to: localhost
+
+- name: Delete gce snapshot
+ community.general.gce_snapshot:
+ instance_name: example-instance
+ snapshot_name: example-snapshot
+ state: absent
+ service_account_email: project_name@appspot.gserviceaccount.com
+ credentials_file: /path/to/credentials
+ project_id: project_name
+ delegate_to: localhost
+
+# This example creates snapshots for only two of the available disks as
+# disk0-example-snapshot and disk1-example-snapshot
+- name: Create snapshots of specific disks
+ community.general.gce_snapshot:
+ instance_name: example-instance
+ snapshot_name: example-snapshot
+ state: present
+ disks:
+ - disk0
+ - disk1
+ service_account_email: project_name@appspot.gserviceaccount.com
+ credentials_file: /path/to/credentials
+ project_id: project_name
+ delegate_to: localhost
+'''
+
+RETURN = '''
+snapshots_created:
+ description: List of newly created snapshots
+ returned: When snapshots are created
+ type: list
+ sample: "[disk0-example-snapshot, disk1-example-snapshot]"
+
+snapshots_deleted:
+ description: List of destroyed snapshots
+ returned: When snapshots are deleted
+ type: list
+ sample: "[disk0-example-snapshot, disk1-example-snapshot]"
+
+snapshots_existing:
+ description: List of snapshots that already existed (no-op)
+ returned: When snapshots were already present
+ type: list
+ sample: "[disk0-example-snapshot, disk1-example-snapshot]"
+
+snapshots_absent:
+ description: List of snapshots that were already absent (no-op)
+ returned: When snapshots were already absent
+ type: list
+ sample: "[disk0-example-snapshot, disk1-example-snapshot]"
+'''
+
+try:
+ from libcloud.compute.types import Provider
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gce import gce_connect
+
+
+def find_snapshot(volume, name):
+ '''
+ Check if there is a snapshot already created with the given name for
+ the passed in volume.
+
+ Args:
+ volume: A gce StorageVolume object to manage
+ name: The name of the snapshot to look for
+
+ Returns:
+ The VolumeSnapshot object if one is found
+ '''
+ found_snapshot = None
+ snapshots = volume.list_snapshots()
+ for snapshot in snapshots:
+ if name == snapshot.name:
+ found_snapshot = snapshot
+ return found_snapshot
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ instance_name=dict(required=True),
+ snapshot_name=dict(required=True),
+ state=dict(choices=['present', 'absent'], default='present'),
+ disks=dict(default=None, type='list'),
+ service_account_email=dict(type='str'),
+ credentials_file=dict(type='path'),
+ project_id=dict(type='str')
+ )
+ )
+
+ if not HAS_LIBCLOUD:
+ module.fail_json(msg='libcloud with GCE support (0.19.0+) is required for this module')
+
+ gce = gce_connect(module)
+
+ instance_name = module.params.get('instance_name')
+ snapshot_name = module.params.get('snapshot_name')
+ disks = module.params.get('disks')
+ state = module.params.get('state')
+
+ json_output = dict(
+ changed=False,
+ snapshots_created=[],
+ snapshots_deleted=[],
+ snapshots_existing=[],
+ snapshots_absent=[]
+ )
+
+ snapshot = None
+
+ instance = gce.ex_get_node(instance_name, 'all')
+ instance_disks = instance.extra['disks']
+
+ for instance_disk in instance_disks:
+ disk_snapshot_name = snapshot_name
+ disk_info = gce._get_components_from_path(instance_disk['source'])
+ device_name = disk_info['name']
+ device_zone = disk_info['zone']
+ if disks is None or device_name in disks:
+ volume_obj = gce.ex_get_volume(device_name, device_zone)
+
+ # If we have more than one disk to snapshot, prepend the disk name
+ if len(instance_disks) > 1:
+ disk_snapshot_name = device_name + "-" + disk_snapshot_name
+
+ snapshot = find_snapshot(volume_obj, disk_snapshot_name)
+
+ if snapshot and state == 'present':
+ json_output['snapshots_existing'].append(disk_snapshot_name)
+
+ elif snapshot and state == 'absent':
+ snapshot.destroy()
+ json_output['changed'] = True
+ json_output['snapshots_deleted'].append(disk_snapshot_name)
+
+ elif not snapshot and state == 'present':
+ volume_obj.snapshot(disk_snapshot_name)
+ json_output['changed'] = True
+ json_output['snapshots_created'].append(disk_snapshot_name)
+
+ elif not snapshot and state == 'absent':
+ json_output['snapshots_absent'].append(disk_snapshot_name)
+
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_tag.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_tag.py
new file mode 100644
index 00000000..1e36ed4d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gce_tag.py
@@ -0,0 +1,218 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gce_tag
+short_description: add or remove tag(s) to/from GCE instances
+description:
+ - This module can add or remove tags U(https://cloud.google.com/compute/docs/label-or-tag-resources#tags)
+ to/from GCE instances. Use 'instance_pattern' to update multiple instances in a specify zone.
+options:
+ instance_name:
+ type: str
+ description:
+ - The name of the GCE instance to add/remove tags.
+ - Required if C(instance_pattern) is not specified.
+ instance_pattern:
+ type: str
+ description:
+ - The pattern of GCE instance names to match for adding/removing tags. Full-Python regex is supported.
+ See U(https://docs.python.org/2/library/re.html) for details.
+ - If C(instance_name) is not specified, this field is required.
+ tags:
+ type: list
+ description:
+ - Comma-separated list of tags to add or remove.
+ required: yes
+ state:
+ type: str
+ description:
+ - Desired state of the tags.
+ choices: [ absent, present ]
+ default: present
+ zone:
+ type: str
+ description:
+ - The zone of the disk specified by source.
+ default: us-central1-a
+ service_account_email:
+ type: str
+ description:
+ - Service account email.
+ pem_file:
+ type: path
+ description:
+ - Path to the PEM file associated with the service account email.
+ project_id:
+ type: str
+ description:
+ - Your GCE project ID.
+requirements:
+ - python >= 2.6
+ - apache-libcloud >= 0.17.0
+notes:
+ - Either I(instance_name) or I(instance_pattern) is required.
+author:
+ - Do Hoang Khiem (@dohoangkhiem) <(dohoangkhiem@gmail.com>
+ - Tom Melendez (@supertom)
+'''
+
+EXAMPLES = '''
+- name: Add tags to instance
+ community.general.gce_tag:
+ instance_name: staging-server
+ tags: http-server,https-server,staging
+ zone: us-central1-a
+ state: present
+
+- name: Remove tags from instance in default zone (us-central1-a)
+ community.general.gce_tag:
+ instance_name: test-server
+ tags: foo,bar
+ state: absent
+
+- name: Add tags to instances in zone that match pattern
+ community.general.gce_tag:
+ instance_pattern: test-server-*
+ tags: foo,bar
+ zone: us-central1-a
+ state: present
+'''
+
+import re
+import traceback
+
+try:
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
+ ResourceExistsError, ResourceNotFoundError, InvalidRequestError
+
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gce import gce_connect
+
+
+def _union_items(baselist, comparelist):
+ """Combine two lists, removing duplicates."""
+ return list(set(baselist) | set(comparelist))
+
+
+def _intersect_items(baselist, comparelist):
+ """Return matching items in both lists."""
+ return list(set(baselist) & set(comparelist))
+
+
+def _get_changed_items(baselist, comparelist):
+ """Return changed items as they relate to baselist."""
+ return list(set(baselist) & set(set(baselist) ^ set(comparelist)))
+
+
+def modify_tags(gce, module, node, tags, state='present'):
+ """Modify tags on an instance."""
+
+ existing_tags = node.extra['tags']
+ tags = [x.lower() for x in tags]
+ tags_changed = []
+
+ if state == 'absent':
+ # tags changed are any that intersect
+ tags_changed = _intersect_items(existing_tags, tags)
+ if not tags_changed:
+ return False, None
+ # update instance with tags in existing tags that weren't specified
+ node_tags = _get_changed_items(existing_tags, tags)
+ else:
+ # tags changed are any that in the new list that weren't in existing
+ tags_changed = _get_changed_items(tags, existing_tags)
+ if not tags_changed:
+ return False, None
+ # update instance with the combined list
+ node_tags = _union_items(existing_tags, tags)
+
+ try:
+ gce.ex_set_node_tags(node, node_tags)
+ return True, tags_changed
+ except (GoogleBaseError, InvalidRequestError) as e:
+ module.fail_json(msg=str(e), changed=False)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ instance_name=dict(type='str'),
+ instance_pattern=dict(type='str'),
+ tags=dict(type='list', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ zone=dict(type='str', default='us-central1-a'),
+ service_account_email=dict(type='str'),
+ pem_file=dict(type='path'),
+ project_id=dict(type='str'),
+ ),
+ mutually_exclusive=[
+ ['instance_name', 'instance_pattern']
+ ],
+ required_one_of=[
+ ['instance_name', 'instance_pattern']
+ ],
+ )
+
+ instance_name = module.params.get('instance_name')
+ instance_pattern = module.params.get('instance_pattern')
+ state = module.params.get('state')
+ tags = module.params.get('tags')
+ zone = module.params.get('zone')
+ changed = False
+
+ if not HAS_LIBCLOUD:
+ module.fail_json(msg='libcloud with GCE support (0.17.0+) required for this module')
+
+ gce = gce_connect(module)
+
+ # Create list of nodes to operate on
+ matching_nodes = []
+ try:
+ if instance_pattern:
+ instances = gce.list_nodes(ex_zone=zone)
+ # no instances in zone
+ if not instances:
+ module.exit_json(changed=False, tags=tags, zone=zone, instances_updated=[])
+ try:
+ # Python regex fully supported: https://docs.python.org/2/library/re.html
+ p = re.compile(instance_pattern)
+ matching_nodes = [i for i in instances if p.search(i.name) is not None]
+ except re.error as e:
+ module.fail_json(msg='Regex error for pattern %s: %s' % (instance_pattern, e), changed=False)
+ else:
+ matching_nodes = [gce.ex_get_node(instance_name, zone=zone)]
+ except ResourceNotFoundError:
+ module.fail_json(msg='Instance %s not found in zone %s' % (instance_name, zone), changed=False)
+ except GoogleBaseError as e:
+ module.fail_json(msg=str(e), changed=False, exception=traceback.format_exc())
+
+ # Tag nodes
+ instance_pattern_matches = []
+ tags_changed = []
+ for node in matching_nodes:
+ changed, tags_changed = modify_tags(gce, module, node, tags, state)
+ if changed:
+ instance_pattern_matches.append({'instance_name': node.name, 'tags_changed': tags_changed})
+ if instance_pattern:
+ module.exit_json(changed=changed, instance_pattern=instance_pattern, tags=tags_changed, zone=zone, instances_updated=instance_pattern_matches)
+ else:
+ module.exit_json(changed=changed, instance_name=instance_name, tags=tags_changed, zone=zone)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcp_backend_service.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcp_backend_service.py
new file mode 100644
index 00000000..ee564ae0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcp_backend_service.py
@@ -0,0 +1,420 @@
+#!/usr/bin/python
+# Copyright 2017 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: gcp_backend_service
+short_description: Create or Destroy a Backend Service.
+description:
+ - Create or Destroy a Backend Service. See
+ U(https://cloud.google.com/compute/docs/load-balancing/http/backend-service) for an overview.
+ Full install/configuration instructions for the Google Cloud modules can
+ be found in the comments of ansible/test/gce_tests.py.
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 1.3.0"
+notes:
+ - Update is not currently supported.
+ - Only global backend services are currently supported. Regional backends not currently supported.
+ - Internal load balancing not currently supported.
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.12
+ why: Updated modules released with increased functionality
+ alternative: Use M(google.cloud.gcp_compute_backend_service) instead.
+author:
+ - "Tom Melendez (@supertom) <tom@supertom.com>"
+options:
+ backend_service_name:
+ type: str
+ description:
+ - Name of the Backend Service.
+ required: true
+ backends:
+ type: list
+ description:
+ - List of backends that make up the backend service. A backend is made up of
+ an instance group and optionally several other parameters. See
+ U(https://cloud.google.com/compute/docs/reference/latest/backendServices)
+ for details.
+ required: true
+ healthchecks:
+ type: list
+ description:
+ - List of healthchecks. Only one healthcheck is supported.
+ required: true
+ enable_cdn:
+ description:
+ - If true, enable Cloud CDN for this Backend Service.
+ type: bool
+ port_name:
+ type: str
+ description:
+ - Name of the port on the managed instance group (MIG) that backend
+ services can forward data to. Required for external load balancing.
+ protocol:
+ type: str
+ description:
+ - The protocol this Backend Service uses to communicate with backends.
+ Possible values are HTTP, HTTPS, TCP, and SSL. The default is TCP.
+ choices: [HTTP, HTTPS, TCP, SSL]
+ default: TCP
+ required: false
+ timeout:
+ type: int
+ description:
+ - How many seconds to wait for the backend before considering it a failed
+ request. Default is 30 seconds. Valid range is 1-86400.
+ required: false
+ service_account_email:
+ type: str
+ description:
+ - Service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account permissions
+ credentials_file:
+ type: str
+ description:
+ - Path to the JSON file associated with the service account email.
+ pem_file:
+ type: str
+ description:
+ - Path to the PEM file associated with the service account email.
+ project_id:
+ type: str
+ description:
+ - GCE project ID.
+ state:
+ type: str
+ description:
+ - Desired state of the resource
+ default: "present"
+ choices: ["absent", "present"]
+'''
+
+EXAMPLES = '''
+- name: Create Minimum Backend Service
+ community.general.gcp_backend_service:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ backend_service_name: "{{ bes }}"
+ backends:
+ - instance_group: managed_instance_group_1
+ healthchecks:
+ - healthcheck_name_for_backend_service
+ port_name: myhttpport
+ state: present
+
+- name: Create BES with extended backend parameters
+ community.general.gcp_backend_service:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ backend_service_name: "{{ bes }}"
+ backends:
+ - instance_group: managed_instance_group_1
+ max_utilization: 0.6
+ max_rate: 10
+ - instance_group: managed_instance_group_2
+ max_utilization: 0.5
+ max_rate: 4
+ healthchecks:
+ - healthcheck_name_for_backend_service
+ port_name: myhttpport
+ state: present
+ timeout: 60
+'''
+
+RETURN = '''
+backend_service_created:
+ description: Indicator Backend Service was created.
+ returned: When a Backend Service is created.
+ type: bool
+ sample: "True"
+backend_service_deleted:
+ description: Indicator Backend Service was deleted.
+ returned: When a Backend Service is deleted.
+ type: bool
+ sample: "True"
+backend_service_name:
+ description: Name of the Backend Service.
+ returned: Always.
+ type: str
+ sample: "my-backend-service"
+backends:
+ description: List of backends (comprised of instance_group) that
+ make up a Backend Service.
+ returned: When a Backend Service exists.
+ type: list
+ sample: "[ { 'instance_group': 'mig_one', 'zone': 'us-central1-b'} ]"
+enable_cdn:
+ description: If Cloud CDN is enabled. null if not set.
+ returned: When a backend service exists.
+ type: bool
+ sample: "True"
+healthchecks:
+ description: List of healthchecks applied to the Backend Service.
+ returned: When a Backend Service exists.
+ type: list
+ sample: "[ 'my-healthcheck' ]"
+protocol:
+ description: Protocol used to communicate with the Backends.
+ returned: When a Backend Service exists.
+ type: str
+ sample: "HTTP"
+port_name:
+ description: Name of Backend Port.
+ returned: When a Backend Service exists.
+ type: str
+ sample: "myhttpport"
+timeout:
+ description: In seconds, how long before a request sent to a backend is
+ considered failed.
+ returned: If specified.
+ type: int
+ sample: "myhttpport"
+'''
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ import libcloud
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
+ ResourceExistsError, ResourceInUseError, ResourceNotFoundError
+ from libcloud.compute.drivers.gce import GCEAddress
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gce import gce_connect
+from ansible_collections.community.general.plugins.module_utils.gcp import check_params
+
+
+def _validate_params(params):
+ """
+ Validate backend_service params.
+
+ This function calls _validate_backend_params to verify
+ the backend-specific parameters.
+
+ :param params: Ansible dictionary containing configuration.
+ :type params: ``dict``
+
+ :return: True or raises ValueError
+ :rtype: ``bool`` or `class:ValueError`
+ """
+ fields = [
+ {'name': 'timeout', 'type': int, 'min': 1, 'max': 86400},
+ ]
+ try:
+ check_params(params, fields)
+ _validate_backend_params(params['backends'])
+ except Exception:
+ raise
+
+ return (True, '')
+
+
+def _validate_backend_params(backends):
+ """
+ Validate configuration for backends.
+
+ :param backends: Ansible dictionary containing backends configuration (only).
+ :type backends: ``dict``
+
+ :return: True or raises ValueError
+ :rtype: ``bool`` or `class:ValueError`
+ """
+ fields = [
+ {'name': 'balancing_mode', 'type': str, 'values': ['UTILIZATION', 'RATE', 'CONNECTION']},
+ {'name': 'max_utilization', 'type': float},
+ {'name': 'max_connections', 'type': int},
+ {'name': 'max_rate', 'type': int},
+ {'name': 'max_rate_per_instance', 'type': float},
+ ]
+
+ if not backends:
+ raise ValueError('backends should be a list.')
+
+ for backend in backends:
+ try:
+ check_params(backend, fields)
+ except Exception:
+ raise
+
+ if 'max_rate' in backend and 'max_rate_per_instance' in backend:
+ raise ValueError('Both maxRate or maxRatePerInstance cannot be set.')
+
+ return (True, '')
+
+
+def get_backend_service(gce, name):
+ """
+ Get a Backend Service from GCE.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param name: Name of the Backend Service.
+ :type name: ``str``
+
+ :return: A GCEBackendService object or None.
+ :rtype: :class: `GCEBackendService` or None
+ """
+ try:
+ # Does the Backend Service already exist?
+ return gce.ex_get_backendservice(name=name)
+
+ except ResourceNotFoundError:
+ return None
+
+
+def get_healthcheck(gce, name):
+ return gce.ex_get_healthcheck(name)
+
+
+def get_instancegroup(gce, name, zone=None):
+ return gce.ex_get_instancegroup(name=name, zone=zone)
+
+
+def create_backend_service(gce, params):
+ """
+ Create a new Backend Service.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param params: Dictionary of parameters needed by the module.
+ :type params: ``dict``
+
+ :return: Tuple with changed stats
+ :rtype: tuple in the format of (bool, bool)
+ """
+ from copy import deepcopy
+
+ changed = False
+ return_data = False
+ # only one healthcheck is currently supported
+ hc_name = params['healthchecks'][0]
+ hc = get_healthcheck(gce, hc_name)
+ backends = []
+ for backend in params['backends']:
+ ig = get_instancegroup(gce, backend['instance_group'],
+ backend.get('zone', None))
+ kwargs = deepcopy(backend)
+ kwargs['instance_group'] = ig
+ backends.append(gce.ex_create_backend(
+ **kwargs))
+
+ bes = gce.ex_create_backendservice(
+ name=params['backend_service_name'], healthchecks=[hc], backends=backends,
+ enable_cdn=params['enable_cdn'], port_name=params['port_name'],
+ timeout_sec=params['timeout'], protocol=params['protocol'])
+
+ if bes:
+ changed = True
+ return_data = True
+
+ return (changed, return_data)
+
+
+def delete_backend_service(bes):
+ """
+ Delete a Backend Service. The Instance Groups are NOT destroyed.
+ """
+ changed = False
+ return_data = False
+ if bes.destroy():
+ changed = True
+ return_data = True
+ return (changed, return_data)
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ backends=dict(type='list', required=True),
+ backend_service_name=dict(required=True),
+ healthchecks=dict(type='list', required=True),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ enable_cdn=dict(type='bool'),
+ port_name=dict(type='str'),
+ protocol=dict(type='str', default='TCP',
+ choices=['HTTP', 'HTTPS', 'SSL', 'TCP']),
+ timeout=dict(type='int'),
+ state=dict(choices=['absent', 'present'], default='present'),
+ pem_file=dict(),
+ credentials_file=dict(),
+ project_id=dict(), ), )
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+ if not HAS_LIBCLOUD:
+ module.fail_json(
+ msg='libcloud with GCE Backend Service support (1.3+) required for this module.')
+
+ gce = gce_connect(module)
+ if not hasattr(gce, 'ex_create_instancegroupmanager'):
+ module.fail_json(
+ msg='libcloud with GCE Backend Service support (1.3+) required for this module.',
+ changed=False)
+
+ params = {}
+ params['state'] = module.params.get('state')
+ params['backend_service_name'] = module.params.get('backend_service_name')
+ params['backends'] = module.params.get('backends')
+ params['healthchecks'] = module.params.get('healthchecks')
+ params['enable_cdn'] = module.params.get('enable_cdn', None)
+ params['port_name'] = module.params.get('port_name', None)
+ params['protocol'] = module.params.get('protocol', None)
+ params['timeout'] = module.params.get('timeout', None)
+
+ try:
+ _validate_params(params)
+ except Exception as e:
+ module.fail_json(msg=e.message, changed=False)
+
+ changed = False
+ json_output = {'state': params['state']}
+ bes = get_backend_service(gce, params['backend_service_name'])
+
+ if not bes:
+ if params['state'] == 'absent':
+ # Doesn't exist and state==absent.
+ changed = False
+ module.fail_json(
+ msg="Cannot delete unknown backend service: %s" %
+ (params['backend_service_name']))
+ else:
+ # Create
+ (changed, json_output['backend_service_created']) = create_backend_service(gce,
+ params)
+ elif params['state'] == 'absent':
+ # Delete
+ (changed, json_output['backend_service_deleted']) = delete_backend_service(bes)
+ else:
+ # TODO(supertom): Add update support when it is available in libcloud.
+ changed = False
+
+ json_output['changed'] = changed
+ json_output.update(params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcp_forwarding_rule.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcp_forwarding_rule.py
new file mode 100644
index 00000000..56dbfa7e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcp_forwarding_rule.py
@@ -0,0 +1,385 @@
+#!/usr/bin/python
+# Copyright 2017 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gcp_forwarding_rule
+short_description: Create, Update or Destroy a Forwarding_Rule.
+description:
+ - Create, Update or Destroy a Forwarding_Rule. See
+ U(https://cloud.google.com/compute/docs/load-balancing/http/target-proxies) for an overview.
+ More details on the Global Forwarding_Rule API can be found at
+ U(https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules)
+ More details on the Forwarding Rules API can be found at
+ U(https://cloud.google.com/compute/docs/reference/latest/forwardingRules)
+requirements:
+ - "python >= 2.6"
+ - "google-api-python-client >= 1.6.2"
+ - "google-auth >= 0.9.0"
+ - "google-auth-httplib2 >= 0.0.2"
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.12
+ why: Updated modules released with increased functionality
+ alternative: Use M(google.cloud.gcp_compute_forwarding_rule) or M(google.cloud.gcp_compute_global_forwarding_rule) instead.
+notes:
+ - Currently only supports global forwarding rules.
+ As such, Load Balancing Scheme is always EXTERNAL.
+author:
+ - "Tom Melendez (@supertom) <tom@supertom.com>"
+options:
+ address:
+ type: str
+ description:
+ - IPv4 or named IP address. Must be of the same scope (regional, global).
+ Reserved addresses can (and probably should) be used for global
+ forwarding rules. You may reserve IPs from the console or
+ via the gce_eip module.
+ required: false
+ forwarding_rule_name:
+ type: str
+ description:
+ - Name of the Forwarding_Rule.
+ required: true
+ port_range:
+ type: str
+ description:
+ - For global forwarding rules, must be set to 80 or 8080 for TargetHttpProxy, and
+ 443 for TargetHttpsProxy or TargetSslProxy.
+ required: false
+ protocol:
+ type: str
+ description:
+ - For global forwarding rules, TCP, UDP, ESP, AH, SCTP or ICMP. Default is TCP.
+ required: false
+ choices: [TCP]
+ default: TCP
+ region:
+ type: str
+ description:
+ - The region for this forwarding rule. Currently, only 'global' is supported.
+ required: true
+ state:
+ type: str
+ description:
+ - The state of the Forwarding Rule. 'present' or 'absent'
+ required: true
+ choices: ["present", "absent"]
+ target:
+ type: str
+ description:
+ - Target resource for forwarding rule. For global proxy, this is a Global
+ TargetProxy resource. Required for external load balancing (including Global load balancing)
+ required: false
+ project_id:
+ type: str
+ description:
+ - The Google Cloud Platform project ID to use.
+ pem_file:
+ type: str
+ description:
+ - The path to the PEM file associated with the service account email.
+ - This option is deprecated and may be removed in a future release. Use I(credentials_file) instead.
+ credentials_file:
+ type: str
+ description:
+ - The path to the JSON file associated with the service account email.
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account permissions
+ load_balancing_scheme:
+ type: str
+ choices: [EXTERNAL]
+ default: EXTERNAL
+ description:
+ - Load balancing scheme. At the moment the only choice is EXTERNAL.
+'''
+
+EXAMPLES = '''
+- name: Create Minimum GLOBAL Forwarding_Rule
+ community.general.gcp_forwarding_rule:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ forwarding_rule_name: my-forwarding_rule
+ protocol: TCP
+ port_range: 80
+ region: global
+ target: my-target-proxy
+ state: present
+
+- name: Create Forwarding_Rule w/reserved static address
+ community.general.gcp_forwarding_rule:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ forwarding_rule_name: my-forwarding_rule
+ protocol: TCP
+ port_range: 80
+ address: my-reserved-static-address-name
+ region: global
+ target: my-target-proxy
+ state: present
+'''
+
+RETURN = '''
+forwarding_rule_name:
+ description: Name of the Forwarding_Rule
+ returned: Always
+ type: str
+ sample: my-target-proxy
+forwarding_rule:
+ description: GCP Forwarding_Rule dictionary
+ returned: Always. Refer to GCP documentation for detailed field descriptions.
+ type: dict
+ sample: { "name": "my-forwarding_rule", "target": "..." }
+region:
+ description: Region for Forwarding Rule.
+ returned: Always
+ type: bool
+ sample: true
+state:
+ description: state of the Forwarding_Rule
+ returned: Always.
+ type: str
+ sample: present
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcp import get_google_api_client, GCPUtils
+
+
+USER_AGENT_PRODUCT = 'ansible-forwarding_rule'
+USER_AGENT_VERSION = '0.0.1'
+
+
+def _build_global_forwarding_rule_dict(params, project_id=None):
+ """
+ Reformat services in Ansible Params.
+
+ :param params: Params from AnsibleModule object
+ :type params: ``dict``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: dictionary suitable for submission to GCP API.
+ :rtype ``dict``
+ """
+ url = ''
+ if project_id:
+ url = GCPUtils.build_googleapi_url(project_id)
+ gcp_dict = GCPUtils.params_to_gcp_dict(params, 'forwarding_rule_name')
+ if 'target' in gcp_dict:
+ gcp_dict['target'] = '%s/global/targetHttpProxies/%s' % (url,
+ gcp_dict['target'])
+ if 'address' in gcp_dict:
+ gcp_dict['IPAddress'] = '%s/global/addresses/%s' % (url,
+ gcp_dict['address'])
+ del gcp_dict['address']
+ if 'protocol' in gcp_dict:
+ gcp_dict['IPProtocol'] = gcp_dict['protocol']
+ del gcp_dict['protocol']
+ return gcp_dict
+
+
+def get_global_forwarding_rule(client, name, project_id=None):
+ """
+ Get a Global Forwarding Rule from GCP.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param name: Name of the Global Forwarding Rule.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: A dict resp from the respective GCP 'get' request.
+ :rtype: ``dict``
+ """
+ try:
+ req = client.globalForwardingRules().get(
+ project=project_id, forwardingRule=name)
+ return GCPUtils.execute_api_client_req(req, raise_404=False)
+ except Exception:
+ raise
+
+
+def create_global_forwarding_rule(client, params, project_id):
+ """
+ Create a new Global Forwarding Rule.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param params: Dictionary of arguments from AnsibleModule.
+ :type params: ``dict``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ gcp_dict = _build_global_forwarding_rule_dict(params, project_id)
+ try:
+ req = client.globalForwardingRules().insert(project=project_id, body=gcp_dict)
+ return_data = GCPUtils.execute_api_client_req(req, client, raw=False)
+ if not return_data:
+ return_data = get_global_forwarding_rule(client,
+ name=params['forwarding_rule_name'],
+ project_id=project_id)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def delete_global_forwarding_rule(client, name, project_id):
+ """
+ Delete a Global Forwarding Rule.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param name: Name of the Target Proxy.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ try:
+ req = client.globalForwardingRules().delete(
+ project=project_id, forwardingRule=name)
+ return_data = GCPUtils.execute_api_client_req(req, client)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def update_global_forwarding_rule(client, forwarding_rule, params, name, project_id):
+ """
+ Update a Global Forwarding_Rule. Currently, only a target can be updated.
+
+ If the forwarding_rule has not changed, the update will not occur.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param forwarding_rule: Name of the Target Proxy.
+ :type forwarding_rule: ``dict``
+
+ :param params: Dictionary of arguments from AnsibleModule.
+ :type params: ``dict``
+
+ :param name: Name of the Global Forwarding Rule.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ gcp_dict = _build_global_forwarding_rule_dict(params, project_id)
+
+ GCPUtils.are_params_equal(forwarding_rule, gcp_dict)
+ if forwarding_rule['target'] == gcp_dict['target']:
+ return (False, 'no update necessary')
+
+ try:
+ req = client.globalForwardingRules().setTarget(project=project_id,
+ forwardingRule=name,
+ body={'target': gcp_dict['target']})
+ return_data = GCPUtils.execute_api_client_req(
+ req, client=client, raw=False)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ forwarding_rule_name=dict(required=True),
+ region=dict(required=True),
+ target=dict(required=False),
+ address=dict(type='str', required=False),
+ protocol=dict(required=False, default='TCP', choices=['TCP']),
+ port_range=dict(required=False),
+ load_balancing_scheme=dict(
+ required=False, default='EXTERNAL', choices=['EXTERNAL']),
+ state=dict(required=True, choices=['absent', 'present']),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ pem_file=dict(),
+ credentials_file=dict(),
+ project_id=dict(), ), )
+
+ client, conn_params = get_google_api_client(module, 'compute', user_agent_product=USER_AGENT_PRODUCT,
+ user_agent_version=USER_AGENT_VERSION)
+
+ params = {}
+ params['state'] = module.params.get('state')
+ params['forwarding_rule_name'] = module.params.get('forwarding_rule_name')
+ params['region'] = module.params.get('region')
+ params['target'] = module.params.get('target', None)
+ params['protocol'] = module.params.get('protocol', None)
+ params['port_range'] = module.params.get('port_range')
+ if module.params.get('address', None):
+ params['address'] = module.params.get('address', None)
+
+ if params['region'] != 'global':
+ # This module currently doesn't support regional rules.
+ module.fail_json(
+ msg=("%s - Only global forwarding rules currently supported. "
+ "Be sure to specify 'global' for the region option.") %
+ (params['forwarding_rule_name']))
+
+ changed = False
+ json_output = {'state': params['state']}
+ forwarding_rule = None
+ if params['region'] == 'global':
+ forwarding_rule = get_global_forwarding_rule(client,
+ name=params['forwarding_rule_name'],
+ project_id=conn_params['project_id'])
+ if not forwarding_rule:
+ if params['state'] == 'absent':
+ # Doesn't exist in GCE, and state==absent.
+ changed = False
+ module.fail_json(
+ msg="Cannot delete unknown forwarding_rule: %s" %
+ (params['forwarding_rule_name']))
+ else:
+ # Create
+ changed, json_output['forwarding_rule'] = create_global_forwarding_rule(client,
+ params=params,
+ project_id=conn_params['project_id'])
+ elif params['state'] == 'absent':
+ # Delete
+ changed, json_output['forwarding_rule'] = delete_global_forwarding_rule(client,
+ name=params['forwarding_rule_name'],
+ project_id=conn_params['project_id'])
+ else:
+ changed, json_output['forwarding_rule'] = update_global_forwarding_rule(client,
+ forwarding_rule=forwarding_rule,
+ params=params,
+ name=params['forwarding_rule_name'],
+ project_id=conn_params['project_id'])
+
+ json_output['changed'] = changed
+ json_output.update(params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcp_healthcheck.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcp_healthcheck.py
new file mode 100644
index 00000000..19b28653
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcp_healthcheck.py
@@ -0,0 +1,457 @@
+#!/usr/bin/python
+# Copyright 2017 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gcp_healthcheck
+short_description: Create, Update or Destroy a Healthcheck.
+description:
+ - Create, Update or Destroy a Healthcheck. Currently only HTTP and
+ HTTPS Healthchecks are supported. Healthchecks are used to monitor
+ individual instances, managed instance groups and/or backend
+ services. Healtchecks are reusable.
+ - Visit
+ U(https://cloud.google.com/compute/docs/load-balancing/health-checks)
+ for an overview of Healthchecks on GCP.
+ - See
+ U(https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks) for
+ API details on HTTP Healthchecks.
+ - See
+ U(https://cloud.google.com/compute/docs/reference/latest/httpsHealthChecks)
+ for more details on the HTTPS Healtcheck API.
+requirements:
+ - "python >= 2.6"
+ - "google-api-python-client >= 1.6.2"
+ - "google-auth >= 0.9.0"
+ - "google-auth-httplib2 >= 0.0.2"
+notes:
+ - Only supports HTTP and HTTPS Healthchecks currently.
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.12
+ why: Updated modules released with increased functionality
+ alternative: >
+ Use M(google.cloud.gcp_compute_health_check), M(google.cloud.gcp_compute_http_health_check) or
+ M(google.cloud.gcp_compute_https_health_check) instead.
+author:
+ - "Tom Melendez (@supertom) <tom@supertom.com>"
+options:
+ check_interval:
+ type: int
+ description:
+ - How often (in seconds) to send a health check.
+ default: 5
+ healthcheck_name:
+ type: str
+ description:
+ - Name of the Healthcheck.
+ required: true
+ healthcheck_type:
+ type: str
+ description:
+ - Type of Healthcheck.
+ required: true
+ choices: ["HTTP", "HTTPS"]
+ host_header:
+ type: str
+ description:
+ - The value of the host header in the health check request. If left
+ empty, the public IP on behalf of which this health
+ check is performed will be used.
+ default: ""
+ port:
+ type: int
+ description:
+ - The TCP port number for the health check request. The default value is
+ 443 for HTTPS and 80 for HTTP.
+ request_path:
+ type: str
+ description:
+ - The request path of the HTTPS health check request.
+ required: false
+ default: "/"
+ state:
+ type: str
+ description: State of the Healthcheck.
+ choices: ["present", "absent"]
+ default: present
+ timeout:
+ type: int
+ description:
+ - How long (in seconds) to wait for a response before claiming
+ failure. It is invalid for timeout
+ to have a greater value than check_interval.
+ default: 5
+ unhealthy_threshold:
+ type: int
+ description:
+ - A so-far healthy instance will be marked unhealthy after this
+ many consecutive failures.
+ default: 2
+ healthy_threshold:
+ type: int
+ description:
+ - A so-far unhealthy instance will be marked healthy after this
+ many consecutive successes.
+ default: 2
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account permissions (see
+ U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create),
+ --scopes section for detailed information)
+ - >
+ Available choices are:
+ C(bigquery), C(cloud-platform), C(compute-ro), C(compute-rw),
+ C(useraccounts-ro), C(useraccounts-rw), C(datastore), C(logging-write),
+ C(monitoring), C(sql-admin), C(storage-full), C(storage-ro),
+ C(storage-rw), C(taskqueue), C(userinfo-email).
+ credentials_file:
+ type: str
+ description:
+ - Path to the JSON file associated with the service account email
+ project_id:
+ type: str
+ description:
+ - Your GCP project ID
+'''
+
+EXAMPLES = '''
+- name: Create Minimum HealthCheck
+ community.general.gcp_healthcheck:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ healthcheck_name: my-healthcheck
+ healthcheck_type: HTTP
+ state: present
+- name: Create HTTP HealthCheck
+ community.general.gcp_healthcheck:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ healthcheck_name: my-healthcheck
+ healthcheck_type: HTTP
+ host: my-host
+ request_path: /hc
+ check_interval: 10
+ timeout: 30
+ unhealthy_threshhold: 2
+ healthy_threshhold: 1
+ state: present
+- name: Create HTTPS HealthCheck
+ community.general.gcp_healthcheck:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ healthcheck_name: "{{ https_healthcheck }}"
+ healthcheck_type: HTTPS
+ host_header: my-host
+ request_path: /hc
+ check_interval: 5
+ timeout: 5
+ unhealthy_threshold: 2
+ healthy_threshold: 1
+ state: present
+'''
+
+RETURN = '''
+state:
+ description: state of the Healthcheck
+ returned: Always.
+ type: str
+ sample: present
+healthcheck_name:
+ description: Name of the Healthcheck
+ returned: Always
+ type: str
+ sample: my-url-map
+healthcheck_type:
+ description: Type of the Healthcheck
+ returned: Always
+ type: str
+ sample: HTTP
+healthcheck:
+ description: GCP Healthcheck dictionary
+ returned: Always. Refer to GCP documentation for detailed field descriptions.
+ type: dict
+ sample: { "name": "my-hc", "port": 443, "requestPath": "/foo" }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcp import get_google_api_client, GCPUtils
+
+
+USER_AGENT_PRODUCT = 'ansible-healthcheck'
+USER_AGENT_VERSION = '0.0.1'
+
+
+def _validate_healthcheck_params(params):
+ """
+ Validate healthcheck params.
+
+ Simple validation has already assumed by AnsibleModule.
+
+ :param params: Ansible dictionary containing configuration.
+ :type params: ``dict``
+
+ :return: True or raises ValueError
+ :rtype: ``bool`` or `class:ValueError`
+ """
+ if params['timeout'] > params['check_interval']:
+ raise ValueError("timeout (%s) is greater than check_interval (%s)" % (
+ params['timeout'], params['check_interval']))
+
+ return (True, '')
+
+
+def _build_healthcheck_dict(params):
+ """
+ Reformat services in Ansible Params for GCP.
+
+ :param params: Params from AnsibleModule object
+ :type params: ``dict``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: dictionary suitable for submission to GCP
+ HealthCheck (HTTP/HTTPS) API.
+ :rtype ``dict``
+ """
+ gcp_dict = GCPUtils.params_to_gcp_dict(params, 'healthcheck_name')
+ if 'timeout' in gcp_dict:
+ gcp_dict['timeoutSec'] = gcp_dict['timeout']
+ del gcp_dict['timeout']
+
+ if 'checkInterval' in gcp_dict:
+ gcp_dict['checkIntervalSec'] = gcp_dict['checkInterval']
+ del gcp_dict['checkInterval']
+
+ if 'hostHeader' in gcp_dict:
+ gcp_dict['host'] = gcp_dict['hostHeader']
+ del gcp_dict['hostHeader']
+
+ if 'healthcheckType' in gcp_dict:
+ del gcp_dict['healthcheckType']
+ return gcp_dict
+
+
+def _get_req_resource(client, resource_type):
+ if resource_type == 'HTTPS':
+ return (client.httpsHealthChecks(), 'httpsHealthCheck')
+ else:
+ return (client.httpHealthChecks(), 'httpHealthCheck')
+
+
+def get_healthcheck(client, name, project_id=None, resource_type='HTTP'):
+ """
+ Get a Healthcheck from GCP.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param name: Name of the Url Map.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: A dict resp from the respective GCP 'get' request.
+ :rtype: ``dict``
+ """
+ try:
+ resource, entity_name = _get_req_resource(client, resource_type)
+ args = {'project': project_id, entity_name: name}
+ req = resource.get(**args)
+ return GCPUtils.execute_api_client_req(req, raise_404=False)
+ except Exception:
+ raise
+
+
+def create_healthcheck(client, params, project_id, resource_type='HTTP'):
+ """
+ Create a new Healthcheck.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param params: Dictionary of arguments from AnsibleModule.
+ :type params: ``dict``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ gcp_dict = _build_healthcheck_dict(params)
+ try:
+ resource, _ = _get_req_resource(client, resource_type)
+ args = {'project': project_id, 'body': gcp_dict}
+ req = resource.insert(**args)
+ return_data = GCPUtils.execute_api_client_req(req, client, raw=False)
+ if not return_data:
+ return_data = get_healthcheck(client,
+ name=params['healthcheck_name'],
+ project_id=project_id)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def delete_healthcheck(client, name, project_id, resource_type='HTTP'):
+ """
+ Delete a Healthcheck.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param name: Name of the Url Map.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ try:
+ resource, entity_name = _get_req_resource(client, resource_type)
+ args = {'project': project_id, entity_name: name}
+ req = resource.delete(**args)
+ return_data = GCPUtils.execute_api_client_req(req, client)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def update_healthcheck(client, healthcheck, params, name, project_id,
+ resource_type='HTTP'):
+ """
+ Update a Healthcheck.
+
+ If the healthcheck has not changed, the update will not occur.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param healthcheck: Name of the Url Map.
+ :type healthcheck: ``dict``
+
+ :param params: Dictionary of arguments from AnsibleModule.
+ :type params: ``dict``
+
+ :param name: Name of the Url Map.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ gcp_dict = _build_healthcheck_dict(params)
+ ans = GCPUtils.are_params_equal(healthcheck, gcp_dict)
+ if ans:
+ return (False, 'no update necessary')
+
+ try:
+ resource, entity_name = _get_req_resource(client, resource_type)
+ args = {'project': project_id, entity_name: name, 'body': gcp_dict}
+ req = resource.update(**args)
+ return_data = GCPUtils.execute_api_client_req(
+ req, client=client, raw=False)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ healthcheck_name=dict(required=True),
+ healthcheck_type=dict(required=True,
+ choices=['HTTP', 'HTTPS']),
+ request_path=dict(required=False, default='/'),
+ check_interval=dict(required=False, type='int', default=5),
+ healthy_threshold=dict(required=False, type='int', default=2),
+ unhealthy_threshold=dict(required=False, type='int', default=2),
+ host_header=dict(required=False, type='str', default=''),
+ timeout=dict(required=False, type='int', default=5),
+ port=dict(required=False, type='int'),
+ state=dict(choices=['absent', 'present'], default='present'),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ credentials_file=dict(),
+ project_id=dict(), ), )
+
+ client, conn_params = get_google_api_client(module, 'compute', user_agent_product=USER_AGENT_PRODUCT,
+ user_agent_version=USER_AGENT_VERSION)
+
+ params = {}
+
+ params['healthcheck_name'] = module.params.get('healthcheck_name')
+ params['healthcheck_type'] = module.params.get('healthcheck_type')
+ params['request_path'] = module.params.get('request_path')
+ params['check_interval'] = module.params.get('check_interval')
+ params['healthy_threshold'] = module.params.get('healthy_threshold')
+ params['unhealthy_threshold'] = module.params.get('unhealthy_threshold')
+ params['host_header'] = module.params.get('host_header')
+ params['timeout'] = module.params.get('timeout')
+ params['port'] = module.params.get('port', None)
+ params['state'] = module.params.get('state')
+
+ if not params['port']:
+ params['port'] = 80
+ if params['healthcheck_type'] == 'HTTPS':
+ params['port'] = 443
+ try:
+ _validate_healthcheck_params(params)
+ except Exception as e:
+ module.fail_json(msg=e.message, changed=False)
+
+ changed = False
+ json_output = {'state': params['state']}
+ healthcheck = get_healthcheck(client,
+ name=params['healthcheck_name'],
+ project_id=conn_params['project_id'],
+ resource_type=params['healthcheck_type'])
+
+ if not healthcheck:
+ if params['state'] == 'absent':
+ # Doesn't exist in GCE, and state==absent.
+ changed = False
+ module.fail_json(
+ msg="Cannot delete unknown healthcheck: %s" %
+ (params['healthcheck_name']))
+ else:
+ # Create
+ changed, json_output['healthcheck'] = create_healthcheck(client,
+ params=params,
+ project_id=conn_params['project_id'],
+ resource_type=params['healthcheck_type'])
+ elif params['state'] == 'absent':
+ # Delete
+ changed, json_output['healthcheck'] = delete_healthcheck(client,
+ name=params['healthcheck_name'],
+ project_id=conn_params['project_id'],
+ resource_type=params['healthcheck_type'])
+ else:
+ changed, json_output['healthcheck'] = update_healthcheck(client,
+ healthcheck=healthcheck,
+ params=params,
+ name=params['healthcheck_name'],
+ project_id=conn_params['project_id'],
+ resource_type=params['healthcheck_type'])
+ json_output['changed'] = changed
+ json_output.update(params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcp_target_proxy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcp_target_proxy.py
new file mode 100644
index 00000000..611cee04
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcp_target_proxy.py
@@ -0,0 +1,320 @@
+#!/usr/bin/python
+# Copyright 2017 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gcp_target_proxy
+short_description: Create, Update or Destroy a Target_Proxy.
+description:
+ - Create, Update or Destroy a Target_Proxy. See
+ U(https://cloud.google.com/compute/docs/load-balancing/http/target-proxies) for an overview.
+ More details on the Target_Proxy API can be found at
+ U(https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies#resource-representations).
+requirements:
+ - "python >= 2.6"
+ - "google-api-python-client >= 1.6.2"
+ - "google-auth >= 0.9.0"
+ - "google-auth-httplib2 >= 0.0.2"
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.12
+ why: Updated modules released with increased functionality
+ alternative: Use M(google.cloud.gcp_compute_target_http_proxy) instead.
+notes:
+ - Currently only supports global HTTP proxy.
+author:
+ - "Tom Melendez (@supertom) <tom@supertom.com>"
+options:
+ target_proxy_name:
+ type: str
+ description:
+ - Name of the Target_Proxy.
+ required: true
+ target_proxy_type:
+ type: str
+ description:
+ - Type of Target_Proxy. HTTP, HTTPS or SSL. Only HTTP is currently supported.
+ required: true
+ choices: [HTTP]
+ url_map_name:
+ type: str
+ description:
+ - Name of the Url Map. Required if type is HTTP or HTTPS proxy.
+ required: false
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ pem_file:
+ type: str
+ description:
+ - path to the pem file associated with the service account email
+ This option is deprecated. Use 'credentials_file'.
+ credentials_file:
+ type: str
+ description:
+ - path to the JSON file associated with the service account email
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account permissions
+ state:
+ type: str
+ description: The state the target proxy should be in. C(present) or C(absent) are the only valid options.
+ required: true
+ choices: [present, absent]
+'''
+
+EXAMPLES = '''
+- name: Create Minimum HTTP Target_Proxy
+ community.general.gcp_target_proxy:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ target_proxy_name: my-target_proxy
+ target_proxy_type: HTTP
+ url_map_name: my-url-map
+ state: present
+'''
+
+RETURN = '''
+state:
+ description: state of the Target_Proxy
+ returned: Always.
+ type: str
+ sample: present
+updated_target_proxy:
+ description: True if the target_proxy has been updated. Will not appear on
+ initial target_proxy creation.
+ returned: if the target_proxy has been updated.
+ type: bool
+ sample: true
+target_proxy_name:
+ description: Name of the Target_Proxy
+ returned: Always
+ type: str
+ sample: my-target-proxy
+target_proxy_type:
+ description: Type of Target_Proxy. One of HTTP, HTTPS or SSL.
+ returned: Always
+ type: str
+ sample: HTTP
+target_proxy:
+ description: GCP Target_Proxy dictionary
+ returned: Always. Refer to GCP documentation for detailed field descriptions.
+ type: dict
+ sample: { "name": "my-target-proxy", "urlMap": "..." }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcp import get_google_api_client, GCPUtils
+
+
+USER_AGENT_PRODUCT = 'ansible-target_proxy'
+USER_AGENT_VERSION = '0.0.1'
+
+
+def _build_target_proxy_dict(params, project_id=None):
+ """
+ Reformat services in Ansible Params.
+
+ :param params: Params from AnsibleModule object
+ :type params: ``dict``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: dictionary suitable for submission to GCP UrlMap API.
+ :rtype ``dict``
+ """
+ url = ''
+ if project_id:
+ url = GCPUtils.build_googleapi_url(project_id)
+ gcp_dict = GCPUtils.params_to_gcp_dict(params, 'target_proxy_name')
+ if 'urlMap' in gcp_dict:
+ gcp_dict['urlMap'] = '%s/global/urlMaps/%s' % (url,
+ gcp_dict['urlMap'])
+ return gcp_dict
+
+
+def get_target_http_proxy(client, name, project_id=None):
+ """
+ Get a Target HTTP Proxy from GCP.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param name: Name of the Target Proxy.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: A dict resp from the respective GCP 'get' request.
+ :rtype: ``dict``
+ """
+ req = client.targetHttpProxies().get(project=project_id,
+ targetHttpProxy=name)
+ return GCPUtils.execute_api_client_req(req, raise_404=False)
+
+
+def create_target_http_proxy(client, params, project_id):
+ """
+ Create a new Target_Proxy.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param params: Dictionary of arguments from AnsibleModule.
+ :type params: ``dict``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ gcp_dict = _build_target_proxy_dict(params, project_id)
+ try:
+ req = client.targetHttpProxies().insert(project=project_id,
+ body=gcp_dict)
+ return_data = GCPUtils.execute_api_client_req(req, client, raw=False)
+ if not return_data:
+ return_data = get_target_http_proxy(client,
+ name=params['target_proxy_name'],
+ project_id=project_id)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def delete_target_http_proxy(client, name, project_id):
+ """
+ Delete a Target_Proxy.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param name: Name of the Target Proxy.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ try:
+ req = client.targetHttpProxies().delete(
+ project=project_id, targetHttpProxy=name)
+ return_data = GCPUtils.execute_api_client_req(req, client)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def update_target_http_proxy(client, target_proxy, params, name, project_id):
+ """
+ Update a HTTP Target_Proxy. Currently only the Url Map can be updated.
+
+ If the target_proxy has not changed, the update will not occur.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param target_proxy: Name of the Target Proxy.
+ :type target_proxy: ``dict``
+
+ :param params: Dictionary of arguments from AnsibleModule.
+ :type params: ``dict``
+
+ :param name: Name of the Target Proxy.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ gcp_dict = _build_target_proxy_dict(params, project_id)
+
+ GCPUtils.are_params_equal(target_proxy, gcp_dict)
+ if target_proxy['urlMap'] == gcp_dict['urlMap']:
+ return (False, 'no update necessary')
+
+ try:
+ req = client.targetHttpProxies().setUrlMap(project=project_id,
+ targetHttpProxy=name,
+ body={"urlMap": gcp_dict['urlMap']})
+ return_data = GCPUtils.execute_api_client_req(
+ req, client=client, raw=False)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ target_proxy_name=dict(required=True),
+ target_proxy_type=dict(required=True, choices=['HTTP']),
+ url_map_name=dict(required=False),
+ state=dict(required=True, choices=['absent', 'present']),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ pem_file=dict(),
+ credentials_file=dict(),
+ project_id=dict(), ), )
+
+ client, conn_params = get_google_api_client(module, 'compute', user_agent_product=USER_AGENT_PRODUCT,
+ user_agent_version=USER_AGENT_VERSION)
+
+ params = {}
+ params['state'] = module.params.get('state')
+ params['target_proxy_name'] = module.params.get('target_proxy_name')
+ params['target_proxy_type'] = module.params.get('target_proxy_type')
+ params['url_map'] = module.params.get('url_map_name', None)
+
+ changed = False
+ json_output = {'state': params['state']}
+ target_proxy = get_target_http_proxy(client,
+ name=params['target_proxy_name'],
+ project_id=conn_params['project_id'])
+
+ if not target_proxy:
+ if params['state'] == 'absent':
+ # Doesn't exist in GCE, and state==absent.
+ changed = False
+ module.fail_json(
+ msg="Cannot delete unknown target_proxy: %s" %
+ (params['target_proxy_name']))
+ else:
+ # Create
+ changed, json_output['target_proxy'] = create_target_http_proxy(client,
+ params=params,
+ project_id=conn_params['project_id'])
+ elif params['state'] == 'absent':
+ # Delete
+ changed, json_output['target_proxy'] = delete_target_http_proxy(client,
+ name=params['target_proxy_name'],
+ project_id=conn_params['project_id'])
+ else:
+ changed, json_output['target_proxy'] = update_target_http_proxy(client,
+ target_proxy=target_proxy,
+ params=params,
+ name=params['target_proxy_name'],
+ project_id=conn_params['project_id'])
+ json_output['updated_target_proxy'] = changed
+
+ json_output['changed'] = changed
+ json_output.update(params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcp_url_map.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcp_url_map.py
new file mode 100644
index 00000000..3fc2c96b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcp_url_map.py
@@ -0,0 +1,535 @@
+#!/usr/bin/python
+# Copyright 2017 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gcp_url_map
+short_description: Create, Update or Destroy a Url_Map.
+description:
+ - Create, Update or Destroy a Url_Map. See
+ U(https://cloud.google.com/compute/docs/load-balancing/http/url-map) for an overview.
+ More details on the Url_Map API can be found at
+ U(https://cloud.google.com/compute/docs/reference/latest/urlMaps#resource).
+requirements:
+ - "python >= 2.6"
+ - "google-api-python-client >= 1.6.2"
+ - "google-auth >= 0.9.0"
+ - "google-auth-httplib2 >= 0.0.2"
+notes:
+ - Only supports global Backend Services.
+ - Url_Map tests are not currently supported.
+author:
+ - "Tom Melendez (@supertom) <tom@supertom.com>"
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.12
+ why: Updated modules released with increased functionality
+ alternative: Use M(google.cloud.gcp_compute_url_map) instead.
+options:
+ url_map_name:
+ type: str
+ description:
+ - Name of the Url_Map.
+ required: true
+ default_service:
+ type: str
+ description:
+ - Default Backend Service if no host rules match.
+ required: true
+ host_rules:
+ type: list
+ description:
+ - The list of HostRules to use against the URL. Contains
+ a list of hosts and an associated path_matcher.
+ - The 'hosts' parameter is a list of host patterns to match. They
+ must be valid hostnames, except * will match any string of
+ ([a-z0-9-.]*). In that case, * must be the first character
+ and must be followed in the pattern by either - or ..
+ - The 'path_matcher' parameter is name of the PathMatcher to use
+ to match the path portion of the URL if the hostRule matches the URL's
+ host portion.
+ required: false
+ path_matchers:
+ type: list
+ description:
+ - The list of named PathMatchers to use against the URL. Contains
+ path_rules, which is a list of paths and an associated service. A
+ default_service can also be specified for each path_matcher.
+ - The 'name' parameter to which this path_matcher is referred by the
+ host_rule.
+ - The 'default_service' parameter is the name of the
+ BackendService resource. This will be used if none of the path_rules
+ defined by this path_matcher is matched by the URL's path portion.
+ - The 'path_rules' parameter is a list of dictionaries containing a
+ list of paths and a service to direct traffic to. Each path item must
+ start with / and the only place a * is allowed is at the end following
+ a /. The string fed to the path matcher does not include any text after
+ the first ? or #, and those chars are not allowed here.
+ required: false
+ project_id:
+ type: str
+ description:
+ - The Google Cloud Platform project ID to use.
+ pem_file:
+ type: str
+ description:
+ - The path to the PEM file associated with the service account email.
+ - This option is deprecated and may be removed in a future release. Use I(credentials_file) instead.
+ credentials_file:
+ type: str
+ description:
+ - The path to the JSON file associated with the service account email.
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account permissions
+ state:
+ type: str
+ description: The state the URL map should be in. C(present) or C(absent) are the only valid options.
+ default: present
+ required: false
+ choices: [present, absent]
+'''
+
+EXAMPLES = '''
+- name: Create Minimal Url_Map
+ community.general.gcp_url_map:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ url_map_name: my-url_map
+ default_service: my-backend-service
+ state: present
+- name: Create UrlMap with pathmatcher
+ community.general.gcp_url_map:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ url_map_name: my-url-map-pm
+ default_service: default-backend-service
+ path_matchers:
+ - name: 'path-matcher-one'
+ description: 'path matcher one'
+ default_service: 'bes-pathmatcher-one-default'
+ path_rules:
+ - service: 'my-one-bes'
+ paths:
+ - '/data'
+ - '/aboutus'
+ host_rules:
+ - hosts:
+ - '*.'
+ path_matcher: 'path-matcher-one'
+ state: "present"
+'''
+
+RETURN = '''
+host_rules:
+ description: List of HostRules.
+ returned: If specified.
+ type: dict
+ sample: [ { hosts: ["*."], "path_matcher": "my-pm" } ]
+path_matchers:
+ description: The list of named PathMatchers to use against the URL.
+ returned: If specified.
+ type: dict
+ sample: [ { "name": "my-pm", "path_rules": [ { "paths": [ "/data" ] } ], "service": "my-service" } ]
+state:
+ description: state of the Url_Map
+ returned: Always.
+ type: str
+ sample: present
+updated_url_map:
+ description: True if the url_map has been updated. Will not appear on
+ initial url_map creation.
+ returned: if the url_map has been updated.
+ type: bool
+ sample: true
+url_map_name:
+ description: Name of the Url_Map
+ returned: Always
+ type: str
+ sample: my-url-map
+url_map:
+ description: GCP Url_Map dictionary
+ returned: Always. Refer to GCP documentation for detailed field descriptions.
+ type: dict
+ sample: { "name": "my-url-map", "hostRules": [...], "pathMatchers": [...] }
+'''
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcp import check_params, get_google_api_client, GCPUtils
+from ansible.module_utils.six import string_types
+
+
+USER_AGENT_PRODUCT = 'ansible-url_map'
+USER_AGENT_VERSION = '0.0.1'
+
+
+def _validate_params(params):
+ """
+ Validate url_map params.
+
+ This function calls _validate_host_rules_params to verify
+ the host_rules-specific parameters.
+
+ This function calls _validate_path_matchers_params to verify
+ the path_matchers-specific parameters.
+
+ :param params: Ansible dictionary containing configuration.
+ :type params: ``dict``
+
+ :return: True or raises ValueError
+ :rtype: ``bool`` or `class:ValueError`
+ """
+ fields = [
+ {'name': 'default_service', 'type': str, 'required': True},
+ {'name': 'host_rules', 'type': list},
+ {'name': 'path_matchers', 'type': list},
+ ]
+ try:
+ check_params(params, fields)
+ if 'path_matchers' in params and params['path_matchers'] is not None:
+ _validate_path_matcher_params(params['path_matchers'])
+ if 'host_rules' in params and params['host_rules'] is not None:
+ _validate_host_rules_params(params['host_rules'])
+ except Exception:
+ raise
+
+ return (True, '')
+
+
+def _validate_path_matcher_params(path_matchers):
+ """
+ Validate configuration for path_matchers.
+
+ :param path_matchers: Ansible dictionary containing path_matchers
+ configuration (only).
+ :type path_matchers: ``dict``
+
+ :return: True or raises ValueError
+ :rtype: ``bool`` or `class:ValueError`
+ """
+ fields = [
+ {'name': 'name', 'type': str, 'required': True},
+ {'name': 'default_service', 'type': str, 'required': True},
+ {'name': 'path_rules', 'type': list, 'required': True},
+ {'name': 'max_rate', 'type': int},
+ {'name': 'max_rate_per_instance', 'type': float},
+ ]
+ pr_fields = [
+ {'name': 'service', 'type': str, 'required': True},
+ {'name': 'paths', 'type': list, 'required': True},
+ ]
+
+ if not path_matchers:
+ raise ValueError(('path_matchers should be a list. %s (%s) provided'
+ % (path_matchers, type(path_matchers))))
+
+ for pm in path_matchers:
+ try:
+ check_params(pm, fields)
+ for pr in pm['path_rules']:
+ check_params(pr, pr_fields)
+ for path in pr['paths']:
+ if not path.startswith('/'):
+ raise ValueError("path for %s must start with /" % (
+ pm['name']))
+ except Exception:
+ raise
+
+ return (True, '')
+
+
+def _validate_host_rules_params(host_rules):
+ """
+ Validate configuration for host_rules.
+
+ :param host_rules: Ansible dictionary containing host_rules
+ configuration (only).
+ :type host_rules ``dict``
+
+ :return: True or raises ValueError
+ :rtype: ``bool`` or `class:ValueError`
+ """
+ fields = [
+ {'name': 'path_matcher', 'type': str, 'required': True},
+ ]
+
+ if not host_rules:
+ raise ValueError('host_rules should be a list.')
+
+ for hr in host_rules:
+ try:
+ check_params(hr, fields)
+ for host in hr['hosts']:
+ if not isinstance(host, string_types):
+ raise ValueError("host in hostrules must be a string")
+ elif '*' in host:
+ if host.index('*') != 0:
+ raise ValueError("wildcard must be first char in host, %s" % (
+ host))
+ else:
+ if host[1] not in ['.', '-', ]:
+ raise ValueError("wildcard be followed by a '.' or '-', %s" % (
+ host))
+
+ except Exception:
+ raise
+
+ return (True, '')
+
+
+def _build_path_matchers(path_matcher_list, project_id):
+ """
+ Reformat services in path matchers list.
+
+ Specifically, builds out URLs.
+
+ :param path_matcher_list: The GCP project ID.
+ :type path_matcher_list: ``list`` of ``dict``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: list suitable for submission to GCP
+ UrlMap API Path Matchers list.
+ :rtype ``list`` of ``dict``
+ """
+ url = ''
+ if project_id:
+ url = GCPUtils.build_googleapi_url(project_id)
+ for pm in path_matcher_list:
+ if 'defaultService' in pm:
+ pm['defaultService'] = '%s/global/backendServices/%s' % (url,
+ pm['defaultService'])
+ if 'pathRules' in pm:
+ for rule in pm['pathRules']:
+ if 'service' in rule:
+ rule['service'] = '%s/global/backendServices/%s' % (url,
+ rule['service'])
+ return path_matcher_list
+
+
+def _build_url_map_dict(params, project_id=None):
+ """
+ Reformat services in Ansible Params.
+
+ :param params: Params from AnsibleModule object
+ :type params: ``dict``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: dictionary suitable for submission to GCP UrlMap API.
+ :rtype ``dict``
+ """
+ url = ''
+ if project_id:
+ url = GCPUtils.build_googleapi_url(project_id)
+ gcp_dict = GCPUtils.params_to_gcp_dict(params, 'url_map_name')
+ if 'defaultService' in gcp_dict:
+ gcp_dict['defaultService'] = '%s/global/backendServices/%s' % (url,
+ gcp_dict['defaultService'])
+ if 'pathMatchers' in gcp_dict:
+ gcp_dict['pathMatchers'] = _build_path_matchers(gcp_dict['pathMatchers'], project_id)
+
+ return gcp_dict
+
+
+def get_url_map(client, name, project_id=None):
+ """
+ Get a Url_Map from GCP.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param name: Name of the Url Map.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: A dict resp from the respective GCP 'get' request.
+ :rtype: ``dict``
+ """
+ try:
+ req = client.urlMaps().get(project=project_id, urlMap=name)
+ return GCPUtils.execute_api_client_req(req, raise_404=False)
+ except Exception:
+ raise
+
+
+def create_url_map(client, params, project_id):
+ """
+ Create a new Url_Map.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param params: Dictionary of arguments from AnsibleModule.
+ :type params: ``dict``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ gcp_dict = _build_url_map_dict(params, project_id)
+ try:
+ req = client.urlMaps().insert(project=project_id, body=gcp_dict)
+ return_data = GCPUtils.execute_api_client_req(req, client, raw=False)
+ if not return_data:
+ return_data = get_url_map(client,
+ name=params['url_map_name'],
+ project_id=project_id)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def delete_url_map(client, name, project_id):
+ """
+ Delete a Url_Map.
+
+ :param client: An initialized GCE Compute Discover resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param name: Name of the Url Map.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ try:
+ req = client.urlMaps().delete(project=project_id, urlMap=name)
+ return_data = GCPUtils.execute_api_client_req(req, client)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def update_url_map(client, url_map, params, name, project_id):
+ """
+ Update a Url_Map.
+
+ If the url_map has not changed, the update will not occur.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param url_map: Name of the Url Map.
+ :type url_map: ``dict``
+
+ :param params: Dictionary of arguments from AnsibleModule.
+ :type params: ``dict``
+
+ :param name: Name of the Url Map.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ gcp_dict = _build_url_map_dict(params, project_id)
+
+ ans = GCPUtils.are_params_equal(url_map, gcp_dict)
+ if ans:
+ return (False, 'no update necessary')
+
+ gcp_dict['fingerprint'] = url_map['fingerprint']
+ try:
+ req = client.urlMaps().update(project=project_id,
+ urlMap=name, body=gcp_dict)
+ return_data = GCPUtils.execute_api_client_req(req, client=client, raw=False)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ url_map_name=dict(required=True),
+ state=dict(choices=['absent', 'present'], default='present'),
+ default_service=dict(required=True),
+ path_matchers=dict(type='list', required=False),
+ host_rules=dict(type='list', required=False),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ pem_file=dict(),
+ credentials_file=dict(),
+ project_id=dict(), ), required_together=[
+ ['path_matchers', 'host_rules'], ])
+
+ client, conn_params = get_google_api_client(module, 'compute', user_agent_product=USER_AGENT_PRODUCT,
+ user_agent_version=USER_AGENT_VERSION)
+
+ params = {}
+ params['state'] = module.params.get('state')
+ params['url_map_name'] = module.params.get('url_map_name')
+ params['default_service'] = module.params.get('default_service')
+ if module.params.get('path_matchers'):
+ params['path_matchers'] = module.params.get('path_matchers')
+ if module.params.get('host_rules'):
+ params['host_rules'] = module.params.get('host_rules')
+
+ try:
+ _validate_params(params)
+ except Exception as e:
+ module.fail_json(msg=e.message, changed=False)
+
+ changed = False
+ json_output = {'state': params['state']}
+ url_map = get_url_map(client,
+ name=params['url_map_name'],
+ project_id=conn_params['project_id'])
+
+ if not url_map:
+ if params['state'] == 'absent':
+ # Doesn't exist in GCE, and state==absent.
+ changed = False
+ module.fail_json(
+ msg="Cannot delete unknown url_map: %s" %
+ (params['url_map_name']))
+ else:
+ # Create
+ changed, json_output['url_map'] = create_url_map(client,
+ params=params,
+ project_id=conn_params['project_id'])
+ elif params['state'] == 'absent':
+ # Delete
+ changed, json_output['url_map'] = delete_url_map(client,
+ name=params['url_map_name'],
+ project_id=conn_params['project_id'])
+ else:
+ changed, json_output['url_map'] = update_url_map(client,
+ url_map=url_map,
+ params=params,
+ name=params['url_map_name'],
+ project_id=conn_params['project_id'])
+ json_output['updated_url_map'] = changed
+
+ json_output['changed'] = changed
+ json_output.update(params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcpubsub.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcpubsub.py
new file mode 100644
index 00000000..de257503
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcpubsub.py
@@ -0,0 +1,349 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gcpubsub
+short_description: Create and Delete Topics/Subscriptions, Publish and pull messages on PubSub
+description:
+ - Create and Delete Topics/Subscriptions, Publish and pull messages on PubSub.
+ See U(https://cloud.google.com/pubsub/docs) for an overview.
+requirements:
+ - google-auth >= 0.5.0
+ - google-cloud-pubsub >= 0.22.0
+notes:
+ - Subscription pull happens before publish. You cannot publish and pull in the same task.
+author:
+ - Tom Melendez (@supertom) <tom@supertom.com>
+options:
+ topic:
+ type: str
+ description:
+ - GCP pubsub topic name.
+ - Only the name, not the full path, is required.
+ required: yes
+ subscription:
+ type: dict
+ description:
+ - Dictionary containing a subscription name associated with a topic (required), along with optional ack_deadline, push_endpoint and pull.
+ For pulling from a subscription, message_ack (bool), max_messages (int) and return_immediate are available as subfields.
+ See subfields name, push_endpoint and ack_deadline for more information.
+ suboptions:
+ name:
+ description:
+ - Subfield of subscription. Required if subscription is specified. See examples.
+ ack_deadline:
+ description:
+ - Subfield of subscription. Not required. Default deadline for subscriptions to ACK the message before it is resent. See examples.
+ pull:
+ description:
+ - Subfield of subscription. Not required. If specified, messages will be retrieved from topic via the
+ provided subscription name. max_messages (int; default None; max number of messages to pull),
+ message_ack (bool; default False; acknowledge the message) and return_immediately
+ (bool; default True, don't wait for messages to appear). If the messages are acknowledged,
+ changed is set to True, otherwise, changed is False.
+ push_endpoint:
+ description:
+ - Subfield of subscription. Not required. If specified, message will be sent to an endpoint.
+ See U(https://cloud.google.com/pubsub/docs/advanced#push_endpoints) for more information.
+ publish:
+ type: list
+ description:
+ - List of dictionaries describing messages and attributes to be published. Dictionary is in message(str):attributes(dict) format.
+ Only message is required.
+ state:
+ type: str
+ description:
+ - State of the topic or queue.
+ - Applies to the most granular resource.
+ - If subscription isspecified we remove it.
+ - If only topic is specified, that is what is removed.
+ - NOTE - A topic can be removed without first removing the subscription.
+ choices: [ absent, present ]
+ default: present
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ credentials_file:
+ type: str
+ description:
+ - path to the JSON file associated with the service account email
+ service_account_email:
+ type: str
+ description:
+ - service account email
+'''
+
+EXAMPLES = '''
+# (Message will be pushed; there is no check to see if the message was pushed before
+- name: Create a topic and publish a message to it
+ community.general.gcpubsub:
+ topic: ansible-topic-example
+ state: present
+
+# Subscriptions associated with topic are not deleted.
+- name: Delete Topic
+ community.general.gcpubsub:
+ topic: ansible-topic-example
+ state: absent
+
+# Setting absent will keep the messages from being sent
+- name: Publish multiple messages, with attributes (key:value available with the message)
+ community.general.gcpubsub:
+ topic: '{{ topic_name }}'
+ state: present
+ publish:
+ - message: this is message 1
+ attributes:
+ mykey1: myvalue
+ mykey2: myvalu2
+ mykey3: myvalue3
+ - message: this is message 2
+ attributes:
+ server: prod
+ sla: "99.9999"
+ owner: fred
+
+- name: Create Subscription (pull)
+ community.general.gcpubsub:
+ topic: ansible-topic-example
+ subscription:
+ - name: mysub
+ state: present
+
+# pull is default, ack_deadline is not required
+- name: Create Subscription with ack_deadline and push endpoint
+ community.general.gcpubsub:
+ topic: ansible-topic-example
+ subscription:
+ - name: mysub
+ ack_deadline: "60"
+ push_endpoint: http://pushendpoint.example.com
+ state: present
+
+# Setting push_endpoint to "None" converts subscription to pull.
+- name: Subscription change from push to pull
+ community.general.gcpubsub:
+ topic: ansible-topic-example
+ subscription:
+ name: mysub
+ push_endpoint: "None"
+
+### Topic will not be deleted
+- name: Delete subscription
+ community.general.gcpubsub:
+ topic: ansible-topic-example
+ subscription:
+ - name: mysub
+ state: absent
+
+# only pull keyword is required.
+- name: Pull messages from subscription
+ community.general.gcpubsub:
+ topic: ansible-topic-example
+ subscription:
+ name: ansible-topic-example-sub
+ pull:
+ message_ack: yes
+ max_messages: "100"
+'''
+
+RETURN = '''
+publish:
+ description: List of dictionaries describing messages and attributes to be published. Dictionary is in message(str):attributes(dict) format.
+ Only message is required.
+ returned: Only when specified
+ type: list
+ sample: "publish: ['message': 'my message', attributes: {'key1': 'value1'}]"
+
+pulled_messages:
+ description: list of dictionaries containing message info. Fields are ack_id, attributes, data, message_id.
+ returned: Only when subscription.pull is specified
+ type: list
+ sample: [{ "ack_id": "XkASTCcYREl...","attributes": {"key1": "val1",...}, "data": "this is message 1", "message_id": "49107464153705"},..]
+
+state:
+ description: The state of the topic or subscription. Value will be either 'absent' or 'present'.
+ returned: Always
+ type: str
+ sample: "present"
+
+subscription:
+ description: Name of subscription.
+ returned: When subscription fields are specified
+ type: str
+ sample: "mysubscription"
+
+topic:
+ description: Name of topic.
+ returned: Always
+ type: str
+ sample: "mytopic"
+'''
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ from google.cloud import pubsub
+ HAS_GOOGLE_CLOUD_PUBSUB = True
+except ImportError as e:
+ HAS_GOOGLE_CLOUD_PUBSUB = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcp import check_min_pkg_version, get_google_cloud_credentials
+
+
+CLOUD_CLIENT = 'google-cloud-pubsub'
+CLOUD_CLIENT_MINIMUM_VERSION = '0.22.0'
+CLOUD_CLIENT_USER_AGENT = 'ansible-pubsub-0.1'
+
+
+def publish_messages(message_list, topic):
+ with topic.batch() as batch:
+ for message in message_list:
+ msg = message['message']
+ attrs = {}
+ if 'attributes' in message:
+ attrs = message['attributes']
+ batch.publish(bytes(msg), **attrs)
+ return True
+
+
+def pull_messages(pull_params, sub):
+ """
+ :rtype: tuple (output, changed)
+ """
+ changed = False
+ max_messages = pull_params.get('max_messages', None)
+ message_ack = pull_params.get('message_ack', 'no')
+ return_immediately = pull_params.get('return_immediately', False)
+
+ output = []
+ pulled = sub.pull(return_immediately=return_immediately, max_messages=max_messages)
+
+ for ack_id, msg in pulled:
+ msg_dict = {'message_id': msg.message_id,
+ 'attributes': msg.attributes,
+ 'data': msg.data,
+ 'ack_id': ack_id}
+ output.append(msg_dict)
+
+ if message_ack:
+ ack_ids = [m['ack_id'] for m in output]
+ if ack_ids:
+ sub.acknowledge(ack_ids)
+ changed = True
+ return (output, changed)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ topic=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ publish=dict(type='list'),
+ subscription=dict(type='dict'),
+ service_account_email=dict(type='str'),
+ credentials_file=dict(type='str'),
+ project_id=dict(type='str'),
+ ),
+ )
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+
+ if not HAS_GOOGLE_CLOUD_PUBSUB:
+ module.fail_json(msg="Please install google-cloud-pubsub library.")
+
+ if not check_min_pkg_version(CLOUD_CLIENT, CLOUD_CLIENT_MINIMUM_VERSION):
+ module.fail_json(msg="Please install %s client version %s" % (CLOUD_CLIENT, CLOUD_CLIENT_MINIMUM_VERSION))
+
+ mod_params = {}
+ mod_params['publish'] = module.params.get('publish')
+ mod_params['state'] = module.params.get('state')
+ mod_params['topic'] = module.params.get('topic')
+ mod_params['subscription'] = module.params.get('subscription')
+
+ creds, params = get_google_cloud_credentials(module)
+ pubsub_client = pubsub.Client(project=params['project_id'], credentials=creds, use_gax=False)
+ pubsub_client.user_agent = CLOUD_CLIENT_USER_AGENT
+
+ changed = False
+ json_output = {}
+
+ t = None
+ if mod_params['topic']:
+ t = pubsub_client.topic(mod_params['topic'])
+ s = None
+ if mod_params['subscription']:
+ # Note: default ack deadline cannot be changed without deleting/recreating subscription
+ s = t.subscription(mod_params['subscription']['name'],
+ ack_deadline=mod_params['subscription'].get('ack_deadline', None),
+ push_endpoint=mod_params['subscription'].get('push_endpoint', None))
+
+ if mod_params['state'] == 'absent':
+ # Remove the most granular resource. If subscription is specified
+ # we remove it. If only topic is specified, that is what is removed.
+ # Note that a topic can be removed without first removing the subscription.
+ # TODO(supertom): Enhancement: Provide an option to only delete a topic
+ # if there are no subscriptions associated with it (which the API does not support).
+ if s is not None:
+ if s.exists():
+ s.delete()
+ changed = True
+ else:
+ if t.exists():
+ t.delete()
+ changed = True
+ elif mod_params['state'] == 'present':
+ if not t.exists():
+ t.create()
+ changed = True
+ if s:
+ if not s.exists():
+ s.create()
+ s.reload()
+ changed = True
+ else:
+ # Subscription operations
+ # TODO(supertom): if more 'update' operations arise, turn this into a function.
+ s.reload()
+ push_endpoint = mod_params['subscription'].get('push_endpoint', None)
+ if push_endpoint is not None:
+ if push_endpoint != s.push_endpoint:
+ if push_endpoint == 'None':
+ push_endpoint = None
+ s.modify_push_configuration(push_endpoint=push_endpoint)
+ s.reload()
+ changed = push_endpoint == s.push_endpoint
+
+ if 'pull' in mod_params['subscription']:
+ if s.push_endpoint is not None:
+ module.fail_json(msg="Cannot pull messages, push_endpoint is configured.")
+ (json_output['pulled_messages'], changed) = pull_messages(
+ mod_params['subscription']['pull'], s)
+
+ # publish messages to the topic
+ if mod_params['publish'] and len(mod_params['publish']) > 0:
+ changed = publish_messages(mod_params['publish'], t)
+
+ json_output['changed'] = changed
+ json_output.update(mod_params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcpubsub_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcpubsub_facts.py
new file mode 100644
index 00000000..dbb8d359
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcpubsub_facts.py
@@ -0,0 +1,164 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gcpubsub_info
+short_description: List Topics/Subscriptions and Messages from Google PubSub.
+description:
+ - List Topics/Subscriptions from Google PubSub. Use the gcpubsub module for
+ topic/subscription management.
+ See U(https://cloud.google.com/pubsub/docs) for an overview.
+ - This module was called C(gcpubsub_facts) before Ansible 2.9. The usage did not change.
+requirements:
+ - "python >= 2.6"
+ - "google-auth >= 0.5.0"
+ - "google-cloud-pubsub >= 0.22.0"
+notes:
+ - list state enables user to list topics or subscriptions in the project. See examples for details.
+author:
+ - "Tom Melendez (@supertom) <tom@supertom.com>"
+options:
+ topic:
+ type: str
+ description:
+ - GCP pubsub topic name. Only the name, not the full path, is required.
+ required: False
+ view:
+ type: str
+ description:
+ - Choices are 'topics' or 'subscriptions'
+ choices: [topics, subscriptions]
+ default: topics
+ state:
+ type: str
+ description:
+ - list is the only valid option.
+ required: False
+ choices: [list]
+ default: list
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ credentials_file:
+ type: str
+ description:
+ - path to the JSON file associated with the service account email
+ service_account_email:
+ type: str
+ description:
+ - service account email
+'''
+
+EXAMPLES = '''
+- name: List all Topics in a project
+ community.general.gcpubsub_info:
+ view: topics
+ state: list
+
+- name: List all Subscriptions in a project
+ community.general.gcpubsub_info:
+ view: subscriptions
+ state: list
+
+- name: List all Subscriptions for a Topic in a project
+ community.general.gcpubsub_info:
+ view: subscriptions
+ topic: my-topic
+ state: list
+'''
+
+RETURN = '''
+subscriptions:
+ description: List of subscriptions.
+ returned: When view is set to subscriptions.
+ type: list
+ sample: ["mysubscription", "mysubscription2"]
+topic:
+ description: Name of topic. Used to filter subscriptions.
+ returned: Always
+ type: str
+ sample: "mytopic"
+topics:
+ description: List of topics.
+ returned: When view is set to topics.
+ type: list
+ sample: ["mytopic", "mytopic2"]
+'''
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ from google.cloud import pubsub
+ HAS_GOOGLE_CLOUD_PUBSUB = True
+except ImportError as e:
+ HAS_GOOGLE_CLOUD_PUBSUB = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcp import check_min_pkg_version, get_google_cloud_credentials
+
+
+def list_func(data, member='name'):
+ """Used for state=list."""
+ return [getattr(x, member) for x in data]
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ view=dict(choices=['topics', 'subscriptions'], default='topics'),
+ topic=dict(required=False),
+ state=dict(choices=['list'], default='list'),
+ service_account_email=dict(),
+ credentials_file=dict(),
+ project_id=dict(), ),)
+ if module._name in ('gcpubsub_facts', 'community.general.gcpubsub_facts'):
+ module.deprecate("The 'gcpubsub_facts' module has been renamed to 'gcpubsub_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+
+ if not HAS_GOOGLE_CLOUD_PUBSUB:
+ module.fail_json(msg="Please install google-cloud-pubsub library.")
+
+ CLIENT_MINIMUM_VERSION = '0.22.0'
+ if not check_min_pkg_version('google-cloud-pubsub', CLIENT_MINIMUM_VERSION):
+ module.fail_json(msg="Please install google-cloud-pubsub library version %s" % CLIENT_MINIMUM_VERSION)
+
+ mod_params = {}
+ mod_params['state'] = module.params.get('state')
+ mod_params['topic'] = module.params.get('topic')
+ mod_params['view'] = module.params.get('view')
+
+ creds, params = get_google_cloud_credentials(module)
+ pubsub_client = pubsub.Client(project=params['project_id'], credentials=creds, use_gax=False)
+ pubsub_client.user_agent = 'ansible-pubsub-0.1'
+
+ json_output = {}
+ if mod_params['view'] == 'topics':
+ json_output['topics'] = list_func(pubsub_client.list_topics())
+ elif mod_params['view'] == 'subscriptions':
+ if mod_params['topic']:
+ t = pubsub_client.topic(mod_params['topic'])
+ json_output['subscriptions'] = list_func(t.list_subscriptions())
+ else:
+ json_output['subscriptions'] = list_func(pubsub_client.list_subscriptions())
+
+ json_output['changed'] = False
+ json_output.update(mod_params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcpubsub_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcpubsub_info.py
new file mode 100644
index 00000000..dbb8d359
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcpubsub_info.py
@@ -0,0 +1,164 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gcpubsub_info
+short_description: List Topics/Subscriptions and Messages from Google PubSub.
+description:
+ - List Topics/Subscriptions from Google PubSub. Use the gcpubsub module for
+ topic/subscription management.
+ See U(https://cloud.google.com/pubsub/docs) for an overview.
+ - This module was called C(gcpubsub_facts) before Ansible 2.9. The usage did not change.
+requirements:
+ - "python >= 2.6"
+ - "google-auth >= 0.5.0"
+ - "google-cloud-pubsub >= 0.22.0"
+notes:
+ - list state enables user to list topics or subscriptions in the project. See examples for details.
+author:
+ - "Tom Melendez (@supertom) <tom@supertom.com>"
+options:
+ topic:
+ type: str
+ description:
+ - GCP pubsub topic name. Only the name, not the full path, is required.
+ required: False
+ view:
+ type: str
+ description:
+ - Choices are 'topics' or 'subscriptions'
+ choices: [topics, subscriptions]
+ default: topics
+ state:
+ type: str
+ description:
+ - list is the only valid option.
+ required: False
+ choices: [list]
+ default: list
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ credentials_file:
+ type: str
+ description:
+ - path to the JSON file associated with the service account email
+ service_account_email:
+ type: str
+ description:
+ - service account email
+'''
+
+EXAMPLES = '''
+- name: List all Topics in a project
+ community.general.gcpubsub_info:
+ view: topics
+ state: list
+
+- name: List all Subscriptions in a project
+ community.general.gcpubsub_info:
+ view: subscriptions
+ state: list
+
+- name: List all Subscriptions for a Topic in a project
+ community.general.gcpubsub_info:
+ view: subscriptions
+ topic: my-topic
+ state: list
+'''
+
+RETURN = '''
+subscriptions:
+ description: List of subscriptions.
+ returned: When view is set to subscriptions.
+ type: list
+ sample: ["mysubscription", "mysubscription2"]
+topic:
+ description: Name of topic. Used to filter subscriptions.
+ returned: Always
+ type: str
+ sample: "mytopic"
+topics:
+ description: List of topics.
+ returned: When view is set to topics.
+ type: list
+ sample: ["mytopic", "mytopic2"]
+'''
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ from google.cloud import pubsub
+ HAS_GOOGLE_CLOUD_PUBSUB = True
+except ImportError as e:
+ HAS_GOOGLE_CLOUD_PUBSUB = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcp import check_min_pkg_version, get_google_cloud_credentials
+
+
+def list_func(data, member='name'):
+ """Used for state=list."""
+ return [getattr(x, member) for x in data]
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ view=dict(choices=['topics', 'subscriptions'], default='topics'),
+ topic=dict(required=False),
+ state=dict(choices=['list'], default='list'),
+ service_account_email=dict(),
+ credentials_file=dict(),
+ project_id=dict(), ),)
+ if module._name in ('gcpubsub_facts', 'community.general.gcpubsub_facts'):
+ module.deprecate("The 'gcpubsub_facts' module has been renamed to 'gcpubsub_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+
+ if not HAS_GOOGLE_CLOUD_PUBSUB:
+ module.fail_json(msg="Please install google-cloud-pubsub library.")
+
+ CLIENT_MINIMUM_VERSION = '0.22.0'
+ if not check_min_pkg_version('google-cloud-pubsub', CLIENT_MINIMUM_VERSION):
+ module.fail_json(msg="Please install google-cloud-pubsub library version %s" % CLIENT_MINIMUM_VERSION)
+
+ mod_params = {}
+ mod_params['state'] = module.params.get('state')
+ mod_params['topic'] = module.params.get('topic')
+ mod_params['view'] = module.params.get('view')
+
+ creds, params = get_google_cloud_credentials(module)
+ pubsub_client = pubsub.Client(project=params['project_id'], credentials=creds, use_gax=False)
+ pubsub_client.user_agent = 'ansible-pubsub-0.1'
+
+ json_output = {}
+ if mod_params['view'] == 'topics':
+ json_output['topics'] = list_func(pubsub_client.list_topics())
+ elif mod_params['view'] == 'subscriptions':
+ if mod_params['topic']:
+ t = pubsub_client.topic(mod_params['topic'])
+ json_output['subscriptions'] = list_func(t.list_subscriptions())
+ else:
+ json_output['subscriptions'] = list_func(pubsub_client.list_subscriptions())
+
+ json_output['changed'] = False
+ json_output.update(mod_params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcspanner.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcspanner.py
new file mode 100644
index 00000000..e88fc26b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/google/gcspanner.py
@@ -0,0 +1,304 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gcspanner
+short_description: Create and Delete Instances/Databases on Spanner
+description:
+ - Create and Delete Instances/Databases on Spanner.
+ See U(https://cloud.google.com/spanner/docs) for an overview.
+requirements:
+ - python >= 2.6
+ - google-auth >= 0.5.0
+ - google-cloud-spanner >= 0.23.0
+notes:
+ - Changing the configuration on an existing instance is not supported.
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.12
+ why: Updated modules released with increased functionality
+ alternative: Use M(google.cloud.gcp_spanner_database) and/or M(google.cloud.gcp_spanner_instance) instead.
+author:
+ - Tom Melendez (@supertom) <tom@supertom.com>
+options:
+ configuration:
+ type: str
+ description:
+ - Configuration the instance should use.
+ - Examples are us-central1, asia-east1 and europe-west1.
+ required: yes
+ instance_id:
+ type: str
+ description:
+ - GCP spanner instance name.
+ required: yes
+ database_name:
+ type: str
+ description:
+ - Name of database contained on the instance.
+ force_instance_delete:
+ description:
+ - To delete an instance, this argument must exist and be true (along with state being equal to absent).
+ type: bool
+ default: 'no'
+ instance_display_name:
+ type: str
+ description:
+ - Name of Instance to display.
+ - If not specified, instance_id will be used instead.
+ node_count:
+ type: int
+ description:
+ - Number of nodes in the instance.
+ default: 1
+ state:
+ type: str
+ description:
+ - State of the instance or database. Applies to the most granular resource.
+ - If a C(database_name) is specified we remove it.
+ - If only C(instance_id) is specified, that is what is removed.
+ choices: [ absent, present ]
+ default: present
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ credentials_file:
+ type: str
+ description:
+ - path to the JSON file associated with the service account email
+ service_account_email:
+ type: str
+ description:
+ - service account email
+'''
+
+EXAMPLES = '''
+- name: Create instance
+ community.general.gcspanner:
+ instance_id: '{{ instance_id }}'
+ configuration: '{{ configuration }}'
+ state: present
+ node_count: 1
+
+- name: Create database
+ community.general.gcspanner:
+ instance_id: '{{ instance_id }}'
+ configuration: '{{ configuration }}'
+ database_name: '{{ database_name }}'
+ state: present
+
+- name: Delete instance (and all databases)
+- community.general.gcspanner:
+ instance_id: '{{ instance_id }}'
+ configuration: '{{ configuration }}'
+ state: absent
+ force_instance_delete: yes
+'''
+
+RETURN = '''
+state:
+ description: The state of the instance or database. Value will be either 'absent' or 'present'.
+ returned: Always
+ type: str
+ sample: "present"
+
+database_name:
+ description: Name of database.
+ returned: When database name is specified
+ type: str
+ sample: "mydatabase"
+
+instance_id:
+ description: Name of instance.
+ returned: Always
+ type: str
+ sample: "myinstance"
+
+previous_values:
+ description: List of dictionaries containing previous values prior to update.
+ returned: When an instance update has occurred and a field has been modified.
+ type: dict
+ sample: "'previous_values': { 'instance': { 'instance_display_name': 'my-instance', 'node_count': 1 } }"
+
+updated:
+ description: Boolean field to denote an update has occurred.
+ returned: When an update has occurred.
+ type: bool
+ sample: True
+'''
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ from google.cloud import spanner
+ from google.gax.errors import GaxError
+ HAS_GOOGLE_CLOUD_SPANNER = True
+except ImportError as e:
+ HAS_GOOGLE_CLOUD_SPANNER = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcp import check_min_pkg_version, get_google_cloud_credentials
+from ansible.module_utils.six import string_types
+
+
+CLOUD_CLIENT = 'google-cloud-spanner'
+CLOUD_CLIENT_MINIMUM_VERSION = '0.23.0'
+CLOUD_CLIENT_USER_AGENT = 'ansible-spanner-0.1'
+
+
+def get_spanner_configuration_name(config_name, project_name):
+ config_name = 'projects/%s/instanceConfigs/regional-%s' % (project_name,
+ config_name)
+ return config_name
+
+
+def instance_update(instance):
+ """
+ Call update method on spanner client.
+
+ Note: A ValueError exception is thrown despite the client succeeding.
+ So, we validate the node_count and instance_display_name parameters and then
+ ignore the ValueError exception.
+
+ :param instance: a Spanner instance object
+ :type instance: class `google.cloud.spanner.Instance`
+
+ :returns True on success, raises ValueError on type error.
+ :rtype ``bool``
+ """
+ errmsg = ''
+ if not isinstance(instance.node_count, int):
+ errmsg = 'node_count must be an integer %s (%s)' % (
+ instance.node_count, type(instance.node_count))
+ if instance.display_name and not isinstance(instance.display_name,
+ string_types):
+ errmsg = 'instance_display_name must be an string %s (%s)' % (
+ instance.display_name, type(instance.display_name))
+ if errmsg:
+ raise ValueError(errmsg)
+
+ try:
+ instance.update()
+ except ValueError:
+ # The ValueError here is the one we 'expect'.
+ pass
+
+ return True
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ instance_id=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ database_name=dict(type='str'),
+ configuration=dict(type='str', required=True),
+ node_count=dict(type='int', default=1),
+ instance_display_name=dict(type='str'),
+ force_instance_delete=dict(type='bool', default=False),
+ service_account_email=dict(type='str'),
+ credentials_file=dict(type='str'),
+ project_id=dict(type='str'),
+ ),
+ )
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+
+ if not HAS_GOOGLE_CLOUD_SPANNER:
+ module.fail_json(msg="Please install google-cloud-spanner.")
+
+ if not check_min_pkg_version(CLOUD_CLIENT, CLOUD_CLIENT_MINIMUM_VERSION):
+ module.fail_json(msg="Please install %s client version %s" %
+ (CLOUD_CLIENT, CLOUD_CLIENT_MINIMUM_VERSION))
+
+ mod_params = {}
+ mod_params['state'] = module.params.get('state')
+ mod_params['instance_id'] = module.params.get('instance_id')
+ mod_params['database_name'] = module.params.get('database_name')
+ mod_params['configuration'] = module.params.get('configuration')
+ mod_params['node_count'] = module.params.get('node_count', None)
+ mod_params['instance_display_name'] = module.params.get('instance_display_name')
+ mod_params['force_instance_delete'] = module.params.get('force_instance_delete')
+
+ creds, params = get_google_cloud_credentials(module)
+ spanner_client = spanner.Client(project=params['project_id'],
+ credentials=creds,
+ user_agent=CLOUD_CLIENT_USER_AGENT)
+ changed = False
+ json_output = {}
+
+ i = None
+ if mod_params['instance_id']:
+ config_name = get_spanner_configuration_name(
+ mod_params['configuration'], params['project_id'])
+ i = spanner_client.instance(mod_params['instance_id'],
+ configuration_name=config_name)
+ d = None
+ if mod_params['database_name']:
+ # TODO(supertom): support DDL
+ ddl_statements = ''
+ d = i.database(mod_params['database_name'], ddl_statements)
+
+ if mod_params['state'] == 'absent':
+ # Remove the most granular resource. If database is specified
+ # we remove it. If only instance is specified, that is what is removed.
+ if d is not None and d.exists():
+ d.drop()
+ changed = True
+ else:
+ if i.exists():
+ if mod_params['force_instance_delete']:
+ i.delete()
+ else:
+ module.fail_json(
+ msg=(("Cannot delete Spanner instance: "
+ "'force_instance_delete' argument not specified")))
+ changed = True
+ elif mod_params['state'] == 'present':
+ if not i.exists():
+ i = spanner_client.instance(mod_params['instance_id'],
+ configuration_name=config_name,
+ display_name=mod_params['instance_display_name'],
+ node_count=mod_params['node_count'] or 1)
+ i.create()
+ changed = True
+ else:
+ # update instance
+ i.reload()
+ inst_prev_vals = {}
+ if i.display_name != mod_params['instance_display_name']:
+ inst_prev_vals['instance_display_name'] = i.display_name
+ i.display_name = mod_params['instance_display_name']
+ if mod_params['node_count']:
+ if i.node_count != mod_params['node_count']:
+ inst_prev_vals['node_count'] = i.node_count
+ i.node_count = mod_params['node_count']
+ if inst_prev_vals:
+ changed = instance_update(i)
+ json_output['updated'] = changed
+ json_output['previous_values'] = {'instance': inst_prev_vals}
+ if d:
+ if not d.exists():
+ d.create()
+ d.reload()
+ changed = True
+
+ json_output['changed'] = changed
+ json_output.update(mod_params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/heroku/heroku_collaborator.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/heroku/heroku_collaborator.py
new file mode 100644
index 00000000..276b5b12
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/heroku/heroku_collaborator.py
@@ -0,0 +1,128 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: heroku_collaborator
+short_description: "Add or delete app collaborators on Heroku"
+description:
+ - Manages collaborators for Heroku apps.
+ - If set to C(present) and heroku user is already collaborator, then do nothing.
+ - If set to C(present) and heroku user is not collaborator, then add user to app.
+ - If set to C(absent) and heroku user is collaborator, then delete user from app.
+author:
+ - Marcel Arns (@marns93)
+requirements:
+ - heroku3
+options:
+ api_key:
+ type: str
+ description:
+ - Heroku API key
+ apps:
+ type: list
+ description:
+ - List of Heroku App names
+ required: true
+ suppress_invitation:
+ description:
+ - Suppress email invitation when creating collaborator
+ type: bool
+ default: "no"
+ user:
+ type: str
+ description:
+ - User ID or e-mail
+ required: true
+ state:
+ type: str
+ description:
+ - Create or remove the heroku collaborator
+ choices: ["present", "absent"]
+ default: "present"
+notes:
+ - C(HEROKU_API_KEY) and C(TF_VAR_HEROKU_API_KEY) env variable can be used instead setting C(api_key).
+ - If you use I(--check), you can also pass the I(-v) flag to see affected apps in C(msg), e.g. ["heroku-example-app"].
+'''
+
+EXAMPLES = '''
+- name: Create a heroku collaborator
+ community.general.heroku_collaborator:
+ api_key: YOUR_API_KEY
+ user: max.mustermann@example.com
+ apps: heroku-example-app
+ state: present
+
+- name: An example of using the module in loop
+ community.general.heroku_collaborator:
+ api_key: YOUR_API_KEY
+ user: '{{ item.user }}'
+ apps: '{{ item.apps | default(apps) }}'
+ suppress_invitation: '{{ item.suppress_invitation | default(suppress_invitation) }}'
+ state: '{{ item.state | default("present") }}'
+ with_items:
+ - { user: 'a.b@example.com' }
+ - { state: 'absent', user: 'b.c@example.com', suppress_invitation: false }
+ - { user: 'x.y@example.com', apps: ["heroku-example-app"] }
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.heroku import HerokuHelper
+
+
+def add_or_delete_heroku_collaborator(module, client):
+ user = module.params['user']
+ state = module.params['state']
+ affected_apps = []
+ result_state = False
+
+ for app in module.params['apps']:
+ if app not in client.apps():
+ module.fail_json(msg='App {0} does not exist'.format(app))
+
+ heroku_app = client.apps()[app]
+
+ heroku_collaborator_list = [collaborator.user.email for collaborator in heroku_app.collaborators()]
+
+ if state == 'absent' and user in heroku_collaborator_list:
+ if not module.check_mode:
+ heroku_app.remove_collaborator(user)
+ affected_apps += [app]
+ result_state = True
+ elif state == 'present' and user not in heroku_collaborator_list:
+ if not module.check_mode:
+ heroku_app.add_collaborator(user_id_or_email=user, silent=module.params['suppress_invitation'])
+ affected_apps += [app]
+ result_state = True
+
+ return result_state, affected_apps
+
+
+def main():
+ argument_spec = HerokuHelper.heroku_argument_spec()
+ argument_spec.update(
+ user=dict(required=True, type='str'),
+ apps=dict(required=True, type='list'),
+ suppress_invitation=dict(default=False, type='bool'),
+ state=dict(default='present', type='str', choices=['present', 'absent']),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ client = HerokuHelper(module).get_heroku_client()
+
+ has_changed, msg = add_or_delete_heroku_collaborator(module, client)
+ module.exit_json(changed=has_changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_ecs_instance.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_ecs_instance.py
new file mode 100644
index 00000000..3d4ba84b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_ecs_instance.py
@@ -0,0 +1,2135 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_ecs_instance
+description:
+ - instance management.
+short_description: Creates a resource of Ecs/Instance in Huawei Cloud
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huawei Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ timeouts:
+ description:
+ - The timeouts for each operations.
+ type: dict
+ suboptions:
+ create:
+ description:
+ - The timeouts for create operation.
+ type: str
+ default: '30m'
+ update:
+ description:
+ - The timeouts for update operation.
+ type: str
+ default: '30m'
+ delete:
+ description:
+ - The timeouts for delete operation.
+ type: str
+ default: '30m'
+ availability_zone:
+ description:
+ - Specifies the name of the AZ where the ECS is located.
+ type: str
+ required: true
+ flavor_name:
+ description:
+ - Specifies the name of the system flavor.
+ type: str
+ required: true
+ image_id:
+ description:
+ - Specifies the ID of the system image.
+ type: str
+ required: true
+ name:
+ description:
+ - Specifies the ECS name. Value requirements consists of 1 to 64
+ characters, including letters, digits, underscores C(_), hyphens
+ (-), periods (.).
+ type: str
+ required: true
+ nics:
+ description:
+ - Specifies the NIC information of the ECS. Constraints the
+ network of the NIC must belong to the VPC specified by vpc_id. A
+ maximum of 12 NICs can be attached to an ECS.
+ type: list
+ elements: dict
+ required: true
+ suboptions:
+ ip_address:
+ description:
+ - Specifies the IP address of the NIC. The value is an IPv4
+ address. Its value must be an unused IP
+ address in the network segment of the subnet.
+ type: str
+ required: true
+ subnet_id:
+ description:
+ - Specifies the ID of subnet.
+ type: str
+ required: true
+ root_volume:
+ description:
+ - Specifies the configuration of the ECS's system disks.
+ type: dict
+ required: true
+ suboptions:
+ volume_type:
+ description:
+ - Specifies the ECS system disk type.
+ - SATA is common I/O disk type.
+ - SAS is high I/O disk type.
+ - SSD is ultra-high I/O disk type.
+ - co-p1 is high I/O (performance-optimized I) disk type.
+ - uh-l1 is ultra-high I/O (latency-optimized) disk type.
+ - NOTE is For HANA, HL1, and HL2 ECSs, use co-p1 and uh-l1
+ disks. For other ECSs, do not use co-p1 or uh-l1 disks.
+ type: str
+ required: true
+ size:
+ description:
+ - Specifies the system disk size, in GB. The value range is
+ 1 to 1024. The system disk size must be
+ greater than or equal to the minimum system disk size
+ supported by the image (min_disk attribute of the image).
+ If this parameter is not specified or is set to 0, the
+ default system disk size is the minimum value of the
+ system disk in the image (min_disk attribute of the
+ image).
+ type: int
+ required: false
+ snapshot_id:
+ description:
+ - Specifies the snapshot ID or ID of the original data disk
+ contained in the full-ECS image.
+ type: str
+ required: false
+ vpc_id:
+ description:
+ - Specifies the ID of the VPC to which the ECS belongs.
+ type: str
+ required: true
+ admin_pass:
+ description:
+ - Specifies the initial login password of the administrator account
+ for logging in to an ECS using password authentication. The Linux
+ administrator is root, and the Windows administrator is
+ Administrator. Password complexity requirements, consists of 8 to
+ 26 characters. The password must contain at least three of the
+ following character types 'uppercase letters, lowercase letters,
+ digits, and special characters (!@$%^-_=+[{}]:,./?)'. The password
+ cannot contain the username or the username in reverse. The
+ Windows ECS password cannot contain the username, the username in
+ reverse, or more than two consecutive characters in the username.
+ type: str
+ required: false
+ data_volumes:
+ description:
+ - Specifies the data disks of ECS instance.
+ type: list
+ elements: dict
+ required: false
+ suboptions:
+ volume_id:
+ description:
+ - Specifies the disk ID.
+ type: str
+ required: true
+ device:
+ description:
+ - Specifies the disk device name.
+ type: str
+ required: false
+ description:
+ description:
+ - Specifies the description of an ECS, which is a null string by
+ default. Can contain a maximum of 85 characters. Cannot contain
+ special characters, such as < and >.
+ type: str
+ required: false
+ eip_id:
+ description:
+ - Specifies the ID of the elastic IP address assigned to the ECS.
+ Only elastic IP addresses in the DOWN state can be
+ assigned.
+ type: str
+ required: false
+ enable_auto_recovery:
+ description:
+ - Specifies whether automatic recovery is enabled on the ECS.
+ type: bool
+ required: false
+ enterprise_project_id:
+ description:
+ - Specifies the ID of the enterprise project to which the ECS
+ belongs.
+ type: str
+ required: false
+ security_groups:
+ description:
+ - Specifies the security groups of the ECS. If this
+ parameter is left blank, the default security group is bound to
+ the ECS by default.
+ type: list
+ elements: str
+ required: false
+ server_metadata:
+ description:
+ - Specifies the metadata of ECS to be created.
+ type: dict
+ required: false
+ server_tags:
+ description:
+ - Specifies the tags of an ECS. When you create ECSs, one ECS
+ supports up to 10 tags.
+ type: dict
+ required: false
+ ssh_key_name:
+ description:
+ - Specifies the name of the SSH key used for logging in to the ECS.
+ type: str
+ required: false
+ user_data:
+ description:
+ - Specifies the user data to be injected during the ECS creation
+ process. Text, text files, and gzip files can be injected.
+ The content to be injected must be encoded with
+ base64. The maximum size of the content to be injected (before
+ encoding) is 32 KB. For Linux ECSs, this parameter does not take
+ effect when adminPass is used.
+ type: str
+ required: false
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+# create an ecs instance
+- name: Create a vpc
+ hwc_network_vpc:
+ cidr: "192.168.100.0/24"
+ name: "ansible_network_vpc_test"
+ register: vpc
+- name: Create a subnet
+ hwc_vpc_subnet:
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: true
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ register: subnet
+- name: Create a eip
+ hwc_vpc_eip:
+ dedicated_bandwidth:
+ charge_mode: "traffic"
+ name: "ansible_test_dedicated_bandwidth"
+ size: 1
+ type: "5_bgp"
+ register: eip
+- name: Create a disk
+ hwc_evs_disk:
+ availability_zone: "cn-north-1a"
+ name: "ansible_evs_disk_test"
+ volume_type: "SATA"
+ size: 10
+ register: disk
+- name: Create an instance
+ community.general.hwc_ecs_instance:
+ data_volumes:
+ - volume_id: "{{ disk.id }}"
+ enable_auto_recovery: false
+ eip_id: "{{ eip.id }}"
+ name: "ansible_ecs_instance_test"
+ availability_zone: "cn-north-1a"
+ nics:
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.34"
+ server_tags:
+ my_server: "my_server"
+ image_id: "8da46d6d-6079-4e31-ad6d-a7167efff892"
+ flavor_name: "s3.small.1"
+ vpc_id: "{{ vpc.id }}"
+ root_volume:
+ volume_type: "SAS"
+'''
+
+RETURN = '''
+ availability_zone:
+ description:
+ - Specifies the name of the AZ where the ECS is located.
+ type: str
+ returned: success
+ flavor_name:
+ description:
+ - Specifies the name of the system flavor.
+ type: str
+ returned: success
+ image_id:
+ description:
+ - Specifies the ID of the system image.
+ type: str
+ returned: success
+ name:
+ description:
+ - Specifies the ECS name. Value requirements "Consists of 1 to 64
+ characters, including letters, digits, underscores C(_), hyphens
+ (-), periods (.)".
+ type: str
+ returned: success
+ nics:
+ description:
+ - Specifies the NIC information of the ECS. The
+ network of the NIC must belong to the VPC specified by vpc_id. A
+ maximum of 12 NICs can be attached to an ECS.
+ type: list
+ returned: success
+ contains:
+ ip_address:
+ description:
+ - Specifies the IP address of the NIC. The value is an IPv4
+ address. Its value must be an unused IP
+ address in the network segment of the subnet.
+ type: str
+ returned: success
+ subnet_id:
+ description:
+ - Specifies the ID of subnet.
+ type: str
+ returned: success
+ port_id:
+ description:
+ - Specifies the port ID corresponding to the IP address.
+ type: str
+ returned: success
+ root_volume:
+ description:
+ - Specifies the configuration of the ECS's system disks.
+ type: dict
+ returned: success
+ contains:
+ volume_type:
+ description:
+ - Specifies the ECS system disk type.
+ - SATA is common I/O disk type.
+ - SAS is high I/O disk type.
+ - SSD is ultra-high I/O disk type.
+ - co-p1 is high I/O (performance-optimized I) disk type.
+ - uh-l1 is ultra-high I/O (latency-optimized) disk type.
+ - NOTE is For HANA, HL1, and HL2 ECSs, use co-p1 and uh-l1
+ disks. For other ECSs, do not use co-p1 or uh-l1 disks.
+ type: str
+ returned: success
+ size:
+ description:
+ - Specifies the system disk size, in GB. The value range is
+ 1 to 1024. The system disk size must be
+ greater than or equal to the minimum system disk size
+ supported by the image (min_disk attribute of the image).
+ If this parameter is not specified or is set to 0, the
+ default system disk size is the minimum value of the
+ system disk in the image (min_disk attribute of the
+ image).
+ type: int
+ returned: success
+ snapshot_id:
+ description:
+ - Specifies the snapshot ID or ID of the original data disk
+ contained in the full-ECS image.
+ type: str
+ returned: success
+ device:
+ description:
+ - Specifies the disk device name.
+ type: str
+ returned: success
+ volume_id:
+ description:
+ - Specifies the disk ID.
+ type: str
+ returned: success
+ vpc_id:
+ description:
+ - Specifies the ID of the VPC to which the ECS belongs.
+ type: str
+ returned: success
+ admin_pass:
+ description:
+ - Specifies the initial login password of the administrator account
+ for logging in to an ECS using password authentication. The Linux
+ administrator is root, and the Windows administrator is
+ Administrator. Password complexity requirements consists of 8 to
+ 26 characters. The password must contain at least three of the
+ following character types "uppercase letters, lowercase letters,
+ digits, and special characters (!@$%^-_=+[{}]:,./?)". The password
+ cannot contain the username or the username in reverse. The
+ Windows ECS password cannot contain the username, the username in
+ reverse, or more than two consecutive characters in the username.
+ type: str
+ returned: success
+ data_volumes:
+ description:
+ - Specifies the data disks of ECS instance.
+ type: list
+ returned: success
+ contains:
+ volume_id:
+ description:
+ - Specifies the disk ID.
+ type: str
+ returned: success
+ device:
+ description:
+ - Specifies the disk device name.
+ type: str
+ returned: success
+ description:
+ description:
+ - Specifies the description of an ECS, which is a null string by
+ default. Can contain a maximum of 85 characters. Cannot contain
+ special characters, such as < and >.
+ type: str
+ returned: success
+ eip_id:
+ description:
+ - Specifies the ID of the elastic IP address assigned to the ECS.
+ Only elastic IP addresses in the DOWN state can be assigned.
+ type: str
+ returned: success
+ enable_auto_recovery:
+ description:
+ - Specifies whether automatic recovery is enabled on the ECS.
+ type: bool
+ returned: success
+ enterprise_project_id:
+ description:
+ - Specifies the ID of the enterprise project to which the ECS
+ belongs.
+ type: str
+ returned: success
+ security_groups:
+ description:
+ - Specifies the security groups of the ECS. If this parameter is left
+ blank, the default security group is bound to the ECS by default.
+ type: list
+ returned: success
+ server_metadata:
+ description:
+ - Specifies the metadata of ECS to be created.
+ type: dict
+ returned: success
+ server_tags:
+ description:
+ - Specifies the tags of an ECS. When you create ECSs, one ECS
+ supports up to 10 tags.
+ type: dict
+ returned: success
+ ssh_key_name:
+ description:
+ - Specifies the name of the SSH key used for logging in to the ECS.
+ type: str
+ returned: success
+ user_data:
+ description:
+ - Specifies the user data to be injected during the ECS creation
+ process. Text, text files, and gzip files can be injected.
+ The content to be injected must be encoded with base64. The maximum
+ size of the content to be injected (before encoding) is 32 KB. For
+ Linux ECSs, this parameter does not take effect when adminPass is
+ used.
+ type: str
+ returned: success
+ config_drive:
+ description:
+ - Specifies the configuration driver.
+ type: str
+ returned: success
+ created:
+ description:
+ - Specifies the time when an ECS was created.
+ type: str
+ returned: success
+ disk_config_type:
+ description:
+ - Specifies the disk configuration type. MANUAL is The image
+ space is not expanded. AUTO is the image space of the system disk
+ will be expanded to be as same as the flavor.
+ type: str
+ returned: success
+ host_name:
+ description:
+ - Specifies the host name of the ECS.
+ type: str
+ returned: success
+ image_name:
+ description:
+ - Specifies the image name of the ECS.
+ type: str
+ returned: success
+ power_state:
+ description:
+ - Specifies the power status of the ECS.
+ type: int
+ returned: success
+ server_alias:
+ description:
+ - Specifies the ECS alias.
+ type: str
+ returned: success
+ status:
+ description:
+ - Specifies the ECS status. Options are ACTIVE, REBOOT, HARD_REBOOT,
+ REBUILD, MIGRATING, BUILD, SHUTOFF, RESIZE, VERIFY_RESIZE, ERROR,
+ and DELETED.
+ type: str
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcModule, are_different_dicts, build_path,
+ get_region, is_empty_value, navigate_value, wait_to_finish)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ timeouts=dict(type='dict', options=dict(
+ create=dict(default='30m', type='str'),
+ update=dict(default='30m', type='str'),
+ delete=dict(default='30m', type='str'),
+ ), default=dict()),
+ availability_zone=dict(type='str', required=True),
+ flavor_name=dict(type='str', required=True),
+ image_id=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ nics=dict(
+ type='list', required=True, elements='dict',
+ options=dict(
+ ip_address=dict(type='str', required=True),
+ subnet_id=dict(type='str', required=True)
+ ),
+ ),
+ root_volume=dict(type='dict', required=True, options=dict(
+ volume_type=dict(type='str', required=True),
+ size=dict(type='int'),
+ snapshot_id=dict(type='str')
+ )),
+ vpc_id=dict(type='str', required=True),
+ admin_pass=dict(type='str', no_log=True),
+ data_volumes=dict(type='list', elements='dict', options=dict(
+ volume_id=dict(type='str', required=True),
+ device=dict(type='str')
+ )),
+ description=dict(type='str'),
+ eip_id=dict(type='str'),
+ enable_auto_recovery=dict(type='bool'),
+ enterprise_project_id=dict(type='str'),
+ security_groups=dict(type='list', elements='str'),
+ server_metadata=dict(type='dict'),
+ server_tags=dict(type='dict'),
+ ssh_key_name=dict(type='str'),
+ user_data=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "ecs")
+
+ try:
+ _init(config)
+ is_exist = module.params['id']
+
+ result = None
+ changed = False
+ if module.params['state'] == 'present':
+ if not is_exist:
+ if not module.check_mode:
+ create(config)
+ changed = True
+
+ inputv = user_input_parameters(module)
+ resp, array_index = read_resource(config)
+ result = build_state(inputv, resp, array_index)
+ set_readonly_options(inputv, result)
+ if are_different_dicts(inputv, result):
+ if not module.check_mode:
+ update(config, inputv, result)
+
+ inputv = user_input_parameters(module)
+ resp, array_index = read_resource(config)
+ result = build_state(inputv, resp, array_index)
+ set_readonly_options(inputv, result)
+ if are_different_dicts(inputv, result):
+ raise Exception("Update resource failed, "
+ "some attributes are not updated")
+
+ changed = True
+
+ result['id'] = module.params.get('id')
+ else:
+ result = dict()
+ if is_exist:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def _init(config):
+ module = config.module
+ if module.params['id']:
+ return
+
+ v = search_resource(config)
+ n = len(v)
+ if n > 1:
+ raise Exception("Found more than one resource(%s)" % ", ".join([
+ navigate_value(i, ["id"])
+ for i in v
+ ]))
+
+ if n == 1:
+ module.params['id'] = navigate_value(v[0], ["id"])
+
+
+def user_input_parameters(module):
+ return {
+ "admin_pass": module.params.get("admin_pass"),
+ "availability_zone": module.params.get("availability_zone"),
+ "data_volumes": module.params.get("data_volumes"),
+ "description": module.params.get("description"),
+ "eip_id": module.params.get("eip_id"),
+ "enable_auto_recovery": module.params.get("enable_auto_recovery"),
+ "enterprise_project_id": module.params.get("enterprise_project_id"),
+ "flavor_name": module.params.get("flavor_name"),
+ "image_id": module.params.get("image_id"),
+ "name": module.params.get("name"),
+ "nics": module.params.get("nics"),
+ "root_volume": module.params.get("root_volume"),
+ "security_groups": module.params.get("security_groups"),
+ "server_metadata": module.params.get("server_metadata"),
+ "server_tags": module.params.get("server_tags"),
+ "ssh_key_name": module.params.get("ssh_key_name"),
+ "user_data": module.params.get("user_data"),
+ "vpc_id": module.params.get("vpc_id"),
+ }
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "ecs", "project")
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ opts = user_input_parameters(module)
+ opts["ansible_module"] = module
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+ obj = async_wait(config, r, client, timeout)
+
+ sub_job_identity = {
+ "job_type": "createSingleServer",
+ }
+ for item in navigate_value(obj, ["entities", "sub_jobs"]):
+ for k, v in sub_job_identity.items():
+ if item[k] != v:
+ break
+ else:
+ obj = item
+ break
+ else:
+ raise Exception("Can't find the sub job")
+ module.params['id'] = navigate_value(obj, ["entities", "server_id"])
+
+
+def update(config, expect_state, current_state):
+ module = config.module
+ expect_state["current_state"] = current_state
+ current_state["current_state"] = current_state
+ timeout = 60 * int(module.params['timeouts']['update'].rstrip('m'))
+ client = config.client(get_region(module), "ecs", "project")
+
+ params = build_delete_nics_parameters(expect_state)
+ params1 = build_delete_nics_parameters(current_state)
+ if params and are_different_dicts(params, params1):
+ r = send_delete_nics_request(module, params, client)
+ async_wait(config, r, client, timeout)
+
+ params = build_set_auto_recovery_parameters(expect_state)
+ params1 = build_set_auto_recovery_parameters(current_state)
+ if params and are_different_dicts(params, params1):
+ send_set_auto_recovery_request(module, params, client)
+
+ params = build_attach_nics_parameters(expect_state)
+ params1 = build_attach_nics_parameters(current_state)
+ if params and are_different_dicts(params, params1):
+ r = send_attach_nics_request(module, params, client)
+ async_wait(config, r, client, timeout)
+
+ multi_invoke_delete_volume(config, expect_state, client, timeout)
+
+ multi_invoke_attach_data_disk(config, expect_state, client, timeout)
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "ecs", "project")
+ timeout = 60 * int(module.params['timeouts']['delete'].rstrip('m'))
+
+ opts = user_input_parameters(module)
+ opts["ansible_module"] = module
+
+ params = build_delete_parameters(opts)
+ if params:
+ r = send_delete_request(module, params, client)
+ async_wait(config, r, client, timeout)
+
+
+def read_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "ecs", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ preprocess_read_response(r)
+ res["read"] = fill_read_resp_body(r)
+
+ r = send_read_auto_recovery_request(module, client)
+ res["read_auto_recovery"] = fill_read_auto_recovery_resp_body(r)
+
+ return res, None
+
+
+def preprocess_read_response(resp):
+ v = resp.get("os-extended-volumes:volumes_attached")
+ if v and isinstance(v, list):
+ for i in range(len(v)):
+ if v[i].get("bootIndex") == "0":
+ root_volume = v[i]
+
+ if (i + 1) != len(v):
+ v[i] = v[-1]
+
+ v.pop()
+
+ resp["root_volume"] = root_volume
+ break
+
+ v = resp.get("addresses")
+ if v:
+ rv = {}
+ eips = []
+ for val in v.values():
+ for item in val:
+ if item["OS-EXT-IPS:type"] == "floating":
+ eips.append(item)
+ else:
+ rv[item["OS-EXT-IPS:port_id"]] = item
+
+ for item in eips:
+ k = item["OS-EXT-IPS:port_id"]
+ if k in rv:
+ rv[k]["eip_address"] = item.get("addr", "")
+ else:
+ rv[k] = item
+ item["eip_address"] = item.get("addr", "")
+ item["addr"] = ""
+
+ resp["address"] = rv.values()
+
+
+def build_state(opts, response, array_index):
+ states = flatten_options(response, array_index)
+ set_unreadable_options(opts, states)
+ adjust_options(opts, states)
+ return states
+
+
+def _build_query_link(opts):
+ query_params = []
+
+ v = navigate_value(opts, ["enterprise_project_id"])
+ if v or v in [False, 0]:
+ query_params.append(
+ "enterprise_project_id=" + (str(v) if v else str(v).lower()))
+
+ v = navigate_value(opts, ["name"])
+ if v or v in [False, 0]:
+ query_params.append(
+ "name=" + (str(v) if v else str(v).lower()))
+
+ query_link = "?limit=10&offset={offset}"
+ if query_params:
+ query_link += "&" + "&".join(query_params)
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "ecs", "project")
+ opts = user_input_parameters(module)
+ identity_obj = _build_identity_object(opts)
+ query_link = _build_query_link(opts)
+ link = "cloudservers/detail" + query_link
+
+ result = []
+ p = {'offset': 1}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ item = fill_list_resp_body(item)
+ adjust_list_resp(identity_obj, item)
+ if not are_different_dicts(identity_obj, item):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['offset'] += 1
+
+ return result
+
+
+def build_delete_nics_parameters(opts):
+ params = dict()
+
+ v = expand_delete_nics_nics(opts, None)
+ if not is_empty_value(v):
+ params["nics"] = v
+
+ return params
+
+
+def expand_delete_nics_nics(d, array_index):
+ cv = d["current_state"].get("nics")
+ if not cv:
+ return None
+
+ val = cv
+
+ ev = d.get("nics")
+ if ev:
+ m = [item.get("ip_address") for item in ev]
+ val = [item for item in cv if item.get("ip_address") not in m]
+
+ r = []
+ for item in val:
+ transformed = dict()
+
+ v = item.get("port_id")
+ if not is_empty_value(v):
+ transformed["id"] = v
+
+ if transformed:
+ r.append(transformed)
+
+ return r
+
+
+def send_delete_nics_request(module, params, client):
+ url = build_path(module, "cloudservers/{id}/nics/delete")
+
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(delete_nics), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def build_set_auto_recovery_parameters(opts):
+ params = dict()
+
+ v = expand_set_auto_recovery_support_auto_recovery(opts, None)
+ if v is not None:
+ params["support_auto_recovery"] = v
+
+ return params
+
+
+def expand_set_auto_recovery_support_auto_recovery(d, array_index):
+ v = navigate_value(d, ["enable_auto_recovery"], None)
+ return None if v is None else str(v).lower()
+
+
+def send_set_auto_recovery_request(module, params, client):
+ url = build_path(module, "cloudservers/{id}/autorecovery")
+
+ try:
+ r = client.put(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(set_auto_recovery), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["admin_pass"], None)
+ if not is_empty_value(v):
+ params["adminPass"] = v
+
+ v = navigate_value(opts, ["availability_zone"], None)
+ if not is_empty_value(v):
+ params["availability_zone"] = v
+
+ v = navigate_value(opts, ["description"], None)
+ if not is_empty_value(v):
+ params["description"] = v
+
+ v = expand_create_extendparam(opts, None)
+ if not is_empty_value(v):
+ params["extendparam"] = v
+
+ v = navigate_value(opts, ["flavor_name"], None)
+ if not is_empty_value(v):
+ params["flavorRef"] = v
+
+ v = navigate_value(opts, ["image_id"], None)
+ if not is_empty_value(v):
+ params["imageRef"] = v
+
+ v = navigate_value(opts, ["ssh_key_name"], None)
+ if not is_empty_value(v):
+ params["key_name"] = v
+
+ v = navigate_value(opts, ["server_metadata"], None)
+ if not is_empty_value(v):
+ params["metadata"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ v = expand_create_nics(opts, None)
+ if not is_empty_value(v):
+ params["nics"] = v
+
+ v = expand_create_publicip(opts, None)
+ if not is_empty_value(v):
+ params["publicip"] = v
+
+ v = expand_create_root_volume(opts, None)
+ if not is_empty_value(v):
+ params["root_volume"] = v
+
+ v = expand_create_security_groups(opts, None)
+ if not is_empty_value(v):
+ params["security_groups"] = v
+
+ v = expand_create_server_tags(opts, None)
+ if not is_empty_value(v):
+ params["server_tags"] = v
+
+ v = navigate_value(opts, ["user_data"], None)
+ if not is_empty_value(v):
+ params["user_data"] = v
+
+ v = navigate_value(opts, ["vpc_id"], None)
+ if not is_empty_value(v):
+ params["vpcid"] = v
+
+ if not params:
+ return params
+
+ params = {"server": params}
+
+ return params
+
+
+def expand_create_extendparam(d, array_index):
+ r = dict()
+
+ r["chargingMode"] = 0
+
+ v = navigate_value(d, ["enterprise_project_id"], array_index)
+ if not is_empty_value(v):
+ r["enterprise_project_id"] = v
+
+ v = navigate_value(d, ["enable_auto_recovery"], array_index)
+ if not is_empty_value(v):
+ r["support_auto_recovery"] = v
+
+ return r
+
+
+def expand_create_nics(d, array_index):
+ new_ai = dict()
+ if array_index:
+ new_ai.update(array_index)
+
+ req = []
+
+ v = navigate_value(
+ d, ["nics"], new_ai)
+
+ if not v:
+ return req
+ n = len(v)
+ for i in range(n):
+ new_ai["nics"] = i
+ transformed = dict()
+
+ v = navigate_value(d, ["nics", "ip_address"], new_ai)
+ if not is_empty_value(v):
+ transformed["ip_address"] = v
+
+ v = navigate_value(d, ["nics", "subnet_id"], new_ai)
+ if not is_empty_value(v):
+ transformed["subnet_id"] = v
+
+ if transformed:
+ req.append(transformed)
+
+ return req
+
+
+def expand_create_publicip(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["eip_id"], array_index)
+ if not is_empty_value(v):
+ r["id"] = v
+
+ return r
+
+
+def expand_create_root_volume(d, array_index):
+ r = dict()
+
+ v = expand_create_root_volume_extendparam(d, array_index)
+ if not is_empty_value(v):
+ r["extendparam"] = v
+
+ v = navigate_value(d, ["root_volume", "size"], array_index)
+ if not is_empty_value(v):
+ r["size"] = v
+
+ v = navigate_value(d, ["root_volume", "volume_type"], array_index)
+ if not is_empty_value(v):
+ r["volumetype"] = v
+
+ return r
+
+
+def expand_create_root_volume_extendparam(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["root_volume", "snapshot_id"], array_index)
+ if not is_empty_value(v):
+ r["snapshotId"] = v
+
+ return r
+
+
+def expand_create_security_groups(d, array_index):
+ v = d.get("security_groups")
+ if not v:
+ return None
+
+ return [{"id": i} for i in v]
+
+
+def expand_create_server_tags(d, array_index):
+ v = d.get("server_tags")
+ if not v:
+ return None
+
+ return [{"key": k, "value": v1} for k, v1 in v.items()]
+
+
+def send_create_request(module, params, client):
+ url = "cloudservers"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def build_attach_nics_parameters(opts):
+ params = dict()
+
+ v = expand_attach_nics_nics(opts, None)
+ if not is_empty_value(v):
+ params["nics"] = v
+
+ return params
+
+
+def expand_attach_nics_nics(d, array_index):
+ ev = d.get("nics")
+ if not ev:
+ return None
+
+ val = ev
+
+ cv = d["current_state"].get("nics")
+ if cv:
+ m = [item.get("ip_address") for item in cv]
+ val = [item for item in ev if item.get("ip_address") not in m]
+
+ r = []
+ for item in val:
+ transformed = dict()
+
+ v = item.get("ip_address")
+ if not is_empty_value(v):
+ transformed["ip_address"] = v
+
+ v = item.get("subnet_id")
+ if not is_empty_value(v):
+ transformed["subnet_id"] = v
+
+ if transformed:
+ r.append(transformed)
+
+ return r
+
+
+def send_attach_nics_request(module, params, client):
+ url = build_path(module, "cloudservers/{id}/nics")
+
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(attach_nics), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_delete_volume_request(module, params, client, info):
+ path_parameters = {
+ "volume_id": ["volume_id"],
+ }
+ data = dict((key, navigate_value(info, path))
+ for key, path in path_parameters.items())
+
+ url = build_path(module, "cloudservers/{id}/detachvolume/{volume_id}", data)
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(delete_volume), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def build_attach_data_disk_parameters(opts, array_index):
+ params = dict()
+
+ v = expand_attach_data_disk_volume_attachment(opts, array_index)
+ if not is_empty_value(v):
+ params["volumeAttachment"] = v
+
+ return params
+
+
+def expand_attach_data_disk_volume_attachment(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["data_volumes", "device"], array_index)
+ if not is_empty_value(v):
+ r["device"] = v
+
+ v = navigate_value(d, ["data_volumes", "volume_id"], array_index)
+ if not is_empty_value(v):
+ r["volumeId"] = v
+
+ return r
+
+
+def send_attach_data_disk_request(module, params, client):
+ url = build_path(module, "cloudservers/{id}/attachvolume")
+
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(attach_data_disk), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def build_delete_parameters(opts):
+ params = dict()
+
+ params["delete_publicip"] = False
+
+ params["delete_volume"] = False
+
+ v = expand_delete_servers(opts, None)
+ if not is_empty_value(v):
+ params["servers"] = v
+
+ return params
+
+
+def expand_delete_servers(d, array_index):
+ new_ai = dict()
+ if array_index:
+ new_ai.update(array_index)
+
+ req = []
+
+ n = 1
+ for i in range(n):
+ transformed = dict()
+
+ v = expand_delete_servers_id(d, new_ai)
+ if not is_empty_value(v):
+ transformed["id"] = v
+
+ if transformed:
+ req.append(transformed)
+
+ return req
+
+
+def expand_delete_servers_id(d, array_index):
+ return d["ansible_module"].params.get("id")
+
+
+def send_delete_request(module, params, client):
+ url = "cloudservers/delete"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def async_wait(config, result, client, timeout):
+ module = config.module
+
+ url = build_path(module, "jobs/{job_id}", result)
+
+ def _query_status():
+ r = None
+ try:
+ r = client.get(url, timeout=timeout)
+ except HwcClientException:
+ return None, ""
+
+ try:
+ s = navigate_value(r, ["status"])
+ return r, s
+ except Exception:
+ return None, ""
+
+ try:
+ return wait_to_finish(
+ ["SUCCESS"],
+ ["RUNNING", "INIT"],
+ _query_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_ecs_instance): error "
+ "waiting to be done, error= %s" % str(ex))
+
+
+def multi_invoke_delete_volume(config, opts, client, timeout):
+ module = config.module
+
+ opts1 = None
+ expect = opts["data_volumes"]
+ current = opts["current_state"]["data_volumes"]
+ if expect and current:
+ v = [i["volume_id"] for i in expect]
+ opts1 = {
+ "data_volumes": [
+ i for i in current if i["volume_id"] not in v
+ ]
+ }
+
+ loop_val = navigate_value(opts1, ["data_volumes"])
+ if not loop_val:
+ return
+
+ for i in range(len(loop_val)):
+ r = send_delete_volume_request(module, None, client, loop_val[i])
+ async_wait(config, r, client, timeout)
+
+
+def multi_invoke_attach_data_disk(config, opts, client, timeout):
+ module = config.module
+
+ opts1 = opts
+ expect = opts["data_volumes"]
+ current = opts["current_state"]["data_volumes"]
+ if expect and current:
+ v = [i["volume_id"] for i in current]
+ opts1 = {
+ "data_volumes": [
+ i for i in expect if i["volume_id"] not in v
+ ]
+ }
+
+ loop_val = navigate_value(opts1, ["data_volumes"])
+ if not loop_val:
+ return
+
+ for i in range(len(loop_val)):
+ params = build_attach_data_disk_parameters(opts1, {"data_volumes": i})
+ r = send_attach_data_disk_request(module, params, client)
+ async_wait(config, r, client, timeout)
+
+
+def send_read_request(module, client):
+ url = build_path(module, "cloudservers/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["server"], None)
+
+
+def fill_read_resp_body(body):
+ result = dict()
+
+ result["OS-DCF:diskConfig"] = body.get("OS-DCF:diskConfig")
+
+ result["OS-EXT-AZ:availability_zone"] = body.get(
+ "OS-EXT-AZ:availability_zone")
+
+ result["OS-EXT-SRV-ATTR:hostname"] = body.get("OS-EXT-SRV-ATTR:hostname")
+
+ result["OS-EXT-SRV-ATTR:instance_name"] = body.get(
+ "OS-EXT-SRV-ATTR:instance_name")
+
+ result["OS-EXT-SRV-ATTR:user_data"] = body.get("OS-EXT-SRV-ATTR:user_data")
+
+ result["OS-EXT-STS:power_state"] = body.get("OS-EXT-STS:power_state")
+
+ v = fill_read_resp_address(body.get("address"))
+ result["address"] = v
+
+ result["config_drive"] = body.get("config_drive")
+
+ result["created"] = body.get("created")
+
+ result["description"] = body.get("description")
+
+ result["enterprise_project_id"] = body.get("enterprise_project_id")
+
+ v = fill_read_resp_flavor(body.get("flavor"))
+ result["flavor"] = v
+
+ result["id"] = body.get("id")
+
+ v = fill_read_resp_image(body.get("image"))
+ result["image"] = v
+
+ result["key_name"] = body.get("key_name")
+
+ v = fill_read_resp_metadata(body.get("metadata"))
+ result["metadata"] = v
+
+ result["name"] = body.get("name")
+
+ v = fill_read_resp_os_extended_volumes_volumes_attached(
+ body.get("os-extended-volumes:volumes_attached"))
+ result["os-extended-volumes:volumes_attached"] = v
+
+ v = fill_read_resp_root_volume(body.get("root_volume"))
+ result["root_volume"] = v
+
+ result["status"] = body.get("status")
+
+ result["tags"] = body.get("tags")
+
+ return result
+
+
+def fill_read_resp_address(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["OS-EXT-IPS:port_id"] = item.get("OS-EXT-IPS:port_id")
+
+ val["OS-EXT-IPS:type"] = item.get("OS-EXT-IPS:type")
+
+ val["addr"] = item.get("addr")
+
+ result.append(val)
+
+ return result
+
+
+def fill_read_resp_flavor(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["id"] = value.get("id")
+
+ return result
+
+
+def fill_read_resp_image(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["id"] = value.get("id")
+
+ return result
+
+
+def fill_read_resp_metadata(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["image_name"] = value.get("image_name")
+
+ result["vpc_id"] = value.get("vpc_id")
+
+ return result
+
+
+def fill_read_resp_os_extended_volumes_volumes_attached(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["bootIndex"] = item.get("bootIndex")
+
+ val["device"] = item.get("device")
+
+ val["id"] = item.get("id")
+
+ result.append(val)
+
+ return result
+
+
+def fill_read_resp_root_volume(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["device"] = value.get("device")
+
+ result["id"] = value.get("id")
+
+ return result
+
+
+def send_read_auto_recovery_request(module, client):
+ url = build_path(module, "cloudservers/{id}/autorecovery")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(read_auto_recovery), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def fill_read_auto_recovery_resp_body(body):
+ result = dict()
+
+ result["support_auto_recovery"] = body.get("support_auto_recovery")
+
+ return result
+
+
+def flatten_options(response, array_index):
+ r = dict()
+
+ v = navigate_value(
+ response, ["read", "OS-EXT-AZ:availability_zone"], array_index)
+ r["availability_zone"] = v
+
+ v = navigate_value(response, ["read", "config_drive"], array_index)
+ r["config_drive"] = v
+
+ v = navigate_value(response, ["read", "created"], array_index)
+ r["created"] = v
+
+ v = flatten_data_volumes(response, array_index)
+ r["data_volumes"] = v
+
+ v = navigate_value(response, ["read", "description"], array_index)
+ r["description"] = v
+
+ v = navigate_value(response, ["read", "OS-DCF:diskConfig"], array_index)
+ r["disk_config_type"] = v
+
+ v = flatten_enable_auto_recovery(response, array_index)
+ r["enable_auto_recovery"] = v
+
+ v = navigate_value(
+ response, ["read", "enterprise_project_id"], array_index)
+ r["enterprise_project_id"] = v
+
+ v = navigate_value(response, ["read", "flavor", "id"], array_index)
+ r["flavor_name"] = v
+
+ v = navigate_value(
+ response, ["read", "OS-EXT-SRV-ATTR:hostname"], array_index)
+ r["host_name"] = v
+
+ v = navigate_value(response, ["read", "image", "id"], array_index)
+ r["image_id"] = v
+
+ v = navigate_value(
+ response, ["read", "metadata", "image_name"], array_index)
+ r["image_name"] = v
+
+ v = navigate_value(response, ["read", "name"], array_index)
+ r["name"] = v
+
+ v = flatten_nics(response, array_index)
+ r["nics"] = v
+
+ v = navigate_value(
+ response, ["read", "OS-EXT-STS:power_state"], array_index)
+ r["power_state"] = v
+
+ v = flatten_root_volume(response, array_index)
+ r["root_volume"] = v
+
+ v = navigate_value(
+ response, ["read", "OS-EXT-SRV-ATTR:instance_name"], array_index)
+ r["server_alias"] = v
+
+ v = flatten_server_tags(response, array_index)
+ r["server_tags"] = v
+
+ v = navigate_value(response, ["read", "key_name"], array_index)
+ r["ssh_key_name"] = v
+
+ v = navigate_value(response, ["read", "status"], array_index)
+ r["status"] = v
+
+ v = navigate_value(
+ response, ["read", "OS-EXT-SRV-ATTR:user_data"], array_index)
+ r["user_data"] = v
+
+ v = navigate_value(response, ["read", "metadata", "vpc_id"], array_index)
+ r["vpc_id"] = v
+
+ return r
+
+
+def flatten_data_volumes(d, array_index):
+ v = navigate_value(d, ["read", "os-extended-volumes:volumes_attached"],
+ array_index)
+ if not v:
+ return None
+ n = len(v)
+ result = []
+
+ new_ai = dict()
+ if array_index:
+ new_ai.update(array_index)
+
+ for i in range(n):
+ new_ai["read.os-extended-volumes:volumes_attached"] = i
+
+ val = dict()
+
+ v = navigate_value(
+ d, ["read", "os-extended-volumes:volumes_attached", "device"], new_ai)
+ val["device"] = v
+
+ v = navigate_value(
+ d, ["read", "os-extended-volumes:volumes_attached", "id"], new_ai)
+ val["volume_id"] = v
+
+ for v in val.values():
+ if v is not None:
+ result.append(val)
+ break
+
+ return result if result else None
+
+
+def flatten_enable_auto_recovery(d, array_index):
+ v = navigate_value(d, ["read_auto_recovery", "support_auto_recovery"],
+ array_index)
+ return v == "true"
+
+
+def flatten_nics(d, array_index):
+ v = navigate_value(d, ["read", "address"],
+ array_index)
+ if not v:
+ return None
+ n = len(v)
+ result = []
+
+ new_ai = dict()
+ if array_index:
+ new_ai.update(array_index)
+
+ for i in range(n):
+ new_ai["read.address"] = i
+
+ val = dict()
+
+ v = navigate_value(d, ["read", "address", "addr"], new_ai)
+ val["ip_address"] = v
+
+ v = navigate_value(
+ d, ["read", "address", "OS-EXT-IPS:port_id"], new_ai)
+ val["port_id"] = v
+
+ for v in val.values():
+ if v is not None:
+ result.append(val)
+ break
+
+ return result if result else None
+
+
+def flatten_root_volume(d, array_index):
+ result = dict()
+
+ v = navigate_value(d, ["read", "root_volume", "device"], array_index)
+ result["device"] = v
+
+ v = navigate_value(d, ["read", "root_volume", "id"], array_index)
+ result["volume_id"] = v
+
+ for v in result.values():
+ if v is not None:
+ return result
+ return None
+
+
+def flatten_server_tags(d, array_index):
+ v = navigate_value(d, ["read", "tags"], array_index)
+ if not v:
+ return None
+
+ r = dict()
+ for item in v:
+ v1 = item.split("=")
+ if v1:
+ r[v1[0]] = v1[1]
+ return r
+
+
+def adjust_options(opts, states):
+ adjust_data_volumes(opts, states)
+
+ adjust_nics(opts, states)
+
+
+def adjust_data_volumes(parent_input, parent_cur):
+ iv = parent_input.get("data_volumes")
+ if not (iv and isinstance(iv, list)):
+ return
+
+ cv = parent_cur.get("data_volumes")
+ if not (cv and isinstance(cv, list)):
+ return
+
+ lcv = len(cv)
+ result = []
+ q = []
+ for iiv in iv:
+ if len(q) == lcv:
+ break
+
+ icv = None
+ for j in range(lcv):
+ if j in q:
+ continue
+
+ icv = cv[j]
+
+ if iiv["volume_id"] != icv["volume_id"]:
+ continue
+
+ result.append(icv)
+ q.append(j)
+ break
+ else:
+ break
+
+ if len(q) != lcv:
+ for i in range(lcv):
+ if i not in q:
+ result.append(cv[i])
+
+ if len(result) != lcv:
+ raise Exception("adjust property(data_volumes) failed, "
+ "the array number is not equal")
+
+ parent_cur["data_volumes"] = result
+
+
+def adjust_nics(parent_input, parent_cur):
+ iv = parent_input.get("nics")
+ if not (iv and isinstance(iv, list)):
+ return
+
+ cv = parent_cur.get("nics")
+ if not (cv and isinstance(cv, list)):
+ return
+
+ lcv = len(cv)
+ result = []
+ q = []
+ for iiv in iv:
+ if len(q) == lcv:
+ break
+
+ icv = None
+ for j in range(lcv):
+ if j in q:
+ continue
+
+ icv = cv[j]
+
+ if iiv["ip_address"] != icv["ip_address"]:
+ continue
+
+ result.append(icv)
+ q.append(j)
+ break
+ else:
+ break
+
+ if len(q) != lcv:
+ for i in range(lcv):
+ if i not in q:
+ result.append(cv[i])
+
+ if len(result) != lcv:
+ raise Exception("adjust property(nics) failed, "
+ "the array number is not equal")
+
+ parent_cur["nics"] = result
+
+
+def set_unreadable_options(opts, states):
+ states["admin_pass"] = opts.get("admin_pass")
+
+ states["eip_id"] = opts.get("eip_id")
+
+ set_unread_nics(
+ opts.get("nics"), states.get("nics"))
+
+ set_unread_root_volume(
+ opts.get("root_volume"), states.get("root_volume"))
+
+ states["security_groups"] = opts.get("security_groups")
+
+ states["server_metadata"] = opts.get("server_metadata")
+
+
+def set_unread_nics(inputv, curv):
+ if not (inputv and isinstance(inputv, list)):
+ return
+
+ if not (curv and isinstance(curv, list)):
+ return
+
+ lcv = len(curv)
+ q = []
+ for iv in inputv:
+ if len(q) == lcv:
+ break
+
+ cv = None
+ for j in range(lcv):
+ if j in q:
+ continue
+
+ cv = curv[j]
+
+ if iv["ip_address"] != cv["ip_address"]:
+ continue
+
+ q.append(j)
+ break
+ else:
+ continue
+
+ cv["subnet_id"] = iv.get("subnet_id")
+
+
+def set_unread_root_volume(inputv, curv):
+ if not (inputv and isinstance(inputv, dict)):
+ return
+
+ if not (curv and isinstance(curv, dict)):
+ return
+
+ curv["size"] = inputv.get("size")
+
+ curv["snapshot_id"] = inputv.get("snapshot_id")
+
+ curv["volume_type"] = inputv.get("volume_type")
+
+
+def set_readonly_options(opts, states):
+ opts["config_drive"] = states.get("config_drive")
+
+ opts["created"] = states.get("created")
+
+ opts["disk_config_type"] = states.get("disk_config_type")
+
+ opts["host_name"] = states.get("host_name")
+
+ opts["image_name"] = states.get("image_name")
+
+ set_readonly_nics(
+ opts.get("nics"), states.get("nics"))
+
+ opts["power_state"] = states.get("power_state")
+
+ set_readonly_root_volume(
+ opts.get("root_volume"), states.get("root_volume"))
+
+ opts["server_alias"] = states.get("server_alias")
+
+ opts["status"] = states.get("status")
+
+
+def set_readonly_nics(inputv, curv):
+ if not (curv and isinstance(curv, list)):
+ return
+
+ if not (inputv and isinstance(inputv, list)):
+ return
+
+ lcv = len(curv)
+ q = []
+ for iv in inputv:
+ if len(q) == lcv:
+ break
+
+ cv = None
+ for j in range(lcv):
+ if j in q:
+ continue
+
+ cv = curv[j]
+
+ if iv["ip_address"] != cv["ip_address"]:
+ continue
+
+ q.append(j)
+ break
+ else:
+ continue
+
+ iv["port_id"] = cv.get("port_id")
+
+
+def set_readonly_root_volume(inputv, curv):
+ if not (inputv and isinstance(inputv, dict)):
+ return
+
+ if not (curv and isinstance(curv, dict)):
+ return
+
+ inputv["device"] = curv.get("device")
+
+ inputv["volume_id"] = curv.get("volume_id")
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["servers"], None)
+
+
+def _build_identity_object(all_opts):
+ result = dict()
+
+ result["OS-DCF:diskConfig"] = None
+
+ v = navigate_value(all_opts, ["availability_zone"], None)
+ result["OS-EXT-AZ:availability_zone"] = v
+
+ result["OS-EXT-SRV-ATTR:hostname"] = None
+
+ result["OS-EXT-SRV-ATTR:instance_name"] = None
+
+ v = navigate_value(all_opts, ["user_data"], None)
+ result["OS-EXT-SRV-ATTR:user_data"] = v
+
+ result["OS-EXT-STS:power_state"] = None
+
+ result["config_drive"] = None
+
+ result["created"] = None
+
+ v = navigate_value(all_opts, ["description"], None)
+ result["description"] = v
+
+ v = navigate_value(all_opts, ["enterprise_project_id"], None)
+ result["enterprise_project_id"] = v
+
+ v = expand_list_flavor(all_opts, None)
+ result["flavor"] = v
+
+ result["id"] = None
+
+ v = expand_list_image(all_opts, None)
+ result["image"] = v
+
+ v = navigate_value(all_opts, ["ssh_key_name"], None)
+ result["key_name"] = v
+
+ v = expand_list_metadata(all_opts, None)
+ result["metadata"] = v
+
+ v = navigate_value(all_opts, ["name"], None)
+ result["name"] = v
+
+ result["status"] = None
+
+ v = expand_list_tags(all_opts, None)
+ result["tags"] = v
+
+ return result
+
+
+def expand_list_flavor(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["flavor_name"], array_index)
+ r["id"] = v
+
+ for v in r.values():
+ if v is not None:
+ return r
+ return None
+
+
+def expand_list_image(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["image_id"], array_index)
+ r["id"] = v
+
+ for v in r.values():
+ if v is not None:
+ return r
+ return None
+
+
+def expand_list_metadata(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["vpc_id"], array_index)
+ r["vpc_id"] = v
+
+ for v in r.values():
+ if v is not None:
+ return r
+ return None
+
+
+def expand_list_tags(d, array_index):
+ v = d.get("server_tags")
+ if not v:
+ return None
+
+ return [k + "=" + v1 for k, v1 in v.items()]
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ result["OS-DCF:diskConfig"] = body.get("OS-DCF:diskConfig")
+
+ result["OS-EXT-AZ:availability_zone"] = body.get(
+ "OS-EXT-AZ:availability_zone")
+
+ result["OS-EXT-SRV-ATTR:hostname"] = body.get("OS-EXT-SRV-ATTR:hostname")
+
+ result["OS-EXT-SRV-ATTR:instance_name"] = body.get(
+ "OS-EXT-SRV-ATTR:instance_name")
+
+ result["OS-EXT-SRV-ATTR:user_data"] = body.get("OS-EXT-SRV-ATTR:user_data")
+
+ result["OS-EXT-STS:power_state"] = body.get("OS-EXT-STS:power_state")
+
+ result["config_drive"] = body.get("config_drive")
+
+ result["created"] = body.get("created")
+
+ result["description"] = body.get("description")
+
+ result["enterprise_project_id"] = body.get("enterprise_project_id")
+
+ v = fill_list_resp_flavor(body.get("flavor"))
+ result["flavor"] = v
+
+ result["id"] = body.get("id")
+
+ v = fill_list_resp_image(body.get("image"))
+ result["image"] = v
+
+ result["key_name"] = body.get("key_name")
+
+ v = fill_list_resp_metadata(body.get("metadata"))
+ result["metadata"] = v
+
+ result["name"] = body.get("name")
+
+ result["status"] = body.get("status")
+
+ result["tags"] = body.get("tags")
+
+ return result
+
+
+def fill_list_resp_flavor(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["id"] = value.get("id")
+
+ return result
+
+
+def fill_list_resp_image(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["id"] = value.get("id")
+
+ return result
+
+
+def fill_list_resp_metadata(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["vpc_id"] = value.get("vpc_id")
+
+ return result
+
+
+def adjust_list_resp(opts, resp):
+ adjust_list_api_tags(opts, resp)
+
+
+def adjust_list_api_tags(parent_input, parent_cur):
+ iv = parent_input.get("tags")
+ if not (iv and isinstance(iv, list)):
+ return
+
+ cv = parent_cur.get("tags")
+ if not (cv and isinstance(cv, list)):
+ return
+
+ result = []
+ for iiv in iv:
+ if iiv not in cv:
+ break
+
+ result.append(iiv)
+
+ j = cv.index(iiv)
+ cv[j] = cv[-1]
+ cv.pop()
+
+ if cv:
+ result.extend(cv)
+ parent_cur["tags"] = result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_evs_disk.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_evs_disk.py
new file mode 100644
index 00000000..4aec1b94
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_evs_disk.py
@@ -0,0 +1,1210 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_evs_disk
+description:
+ - block storage management.
+short_description: Creates a resource of Evs/Disk in Huawei Cloud
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huaweicloud Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ timeouts:
+ description:
+ - The timeouts for each operations.
+ type: dict
+ suboptions:
+ create:
+ description:
+ - The timeouts for create operation.
+ type: str
+ default: '30m'
+ update:
+ description:
+ - The timeouts for update operation.
+ type: str
+ default: '30m'
+ delete:
+ description:
+ - The timeouts for delete operation.
+ type: str
+ default: '30m'
+ availability_zone:
+ description:
+ - Specifies the AZ where you want to create the disk.
+ type: str
+ required: true
+ name:
+ description:
+ - Specifies the disk name. The value can contain a maximum of 255
+ bytes.
+ type: str
+ required: true
+ volume_type:
+ description:
+ - Specifies the disk type. Currently, the value can be SSD, SAS, or
+ SATA.
+ - SSD specifies the ultra-high I/O disk type.
+ - SAS specifies the high I/O disk type.
+ - SATA specifies the common I/O disk type.
+ - If the specified disk type is not available in the AZ, the
+ disk will fail to create. If the EVS disk is created from a
+ snapshot, the volume_type field must be the same as that of the
+ snapshot's source disk.
+ type: str
+ required: true
+ backup_id:
+ description:
+ - Specifies the ID of the backup that can be used to create a disk.
+ This parameter is mandatory when you use a backup to create the
+ disk.
+ type: str
+ required: false
+ description:
+ description:
+ - Specifies the disk description. The value can contain a maximum
+ of 255 bytes.
+ type: str
+ required: false
+ enable_full_clone:
+ description:
+ - If the disk is created from a snapshot and linked cloning needs
+ to be used, set this parameter to True.
+ type: bool
+ required: false
+ enable_scsi:
+ description:
+ - If this parameter is set to True, the disk device type will be
+ SCSI, which allows ECS OSs to directly access underlying storage
+ media. SCSI reservation command is supported. If this parameter
+ is set to False, the disk device type will be VBD, which supports
+ only simple SCSI read/write commands.
+ - If parameter enable_share is set to True and this parameter
+ is not specified, shared SCSI disks are created. SCSI EVS disks
+ cannot be created from backups, which means that this parameter
+ cannot be True if backup_id has been specified.
+ type: bool
+ required: false
+ enable_share:
+ description:
+ - Specifies whether the disk is shareable. The default value is
+ False.
+ type: bool
+ required: false
+ encryption_id:
+ description:
+ - Specifies the encryption ID. The length of it fixes at 36 bytes.
+ type: str
+ required: false
+ enterprise_project_id:
+ description:
+ - Specifies the enterprise project ID. This ID is associated with
+ the disk during the disk creation. If it is not specified, the
+ disk is bound to the default enterprise project.
+ type: str
+ required: false
+ image_id:
+ description:
+ - Specifies the image ID. If this parameter is specified, the disk
+ is created from an image. BMS system disks cannot be
+ created from BMS images.
+ type: str
+ required: false
+ size:
+ description:
+ - Specifies the disk size, in GB. Its values are as follows, System
+ disk 1 GB to 1024 GB, Data disk 10 GB to 32768 GB. This
+ parameter is mandatory when you create an empty disk or use an
+ image or a snapshot to create a disk. If you use an image or a
+ snapshot to create a disk, the disk size must be greater than or
+ equal to the image or snapshot size. This parameter is optional
+ when you use a backup to create a disk. If this parameter is not
+ specified, the disk size is equal to the backup size.
+ type: int
+ required: false
+ snapshot_id:
+ description:
+ - Specifies the snapshot ID. If this parameter is specified, the
+ disk is created from a snapshot.
+ type: str
+ required: false
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+# test create disk
+- name: Create a disk
+ community.general.hwc_evs_disk:
+ availability_zone: "cn-north-1a"
+ name: "ansible_evs_disk_test"
+ volume_type: "SATA"
+ size: 10
+'''
+
+RETURN = '''
+ availability_zone:
+ description:
+ - Specifies the AZ where you want to create the disk.
+ type: str
+ returned: success
+ name:
+ description:
+ - Specifies the disk name. The value can contain a maximum of 255
+ bytes.
+ type: str
+ returned: success
+ volume_type:
+ description:
+ - Specifies the disk type. Currently, the value can be SSD, SAS, or
+ SATA.
+ - SSD specifies the ultra-high I/O disk type.
+ - SAS specifies the high I/O disk type.
+ - SATA specifies the common I/O disk type.
+ - If the specified disk type is not available in the AZ, the
+ disk will fail to create. If the EVS disk is created from a
+ snapshot, the volume_type field must be the same as that of the
+ snapshot's source disk.
+ type: str
+ returned: success
+ backup_id:
+ description:
+ - Specifies the ID of the backup that can be used to create a disk.
+ This parameter is mandatory when you use a backup to create the
+ disk.
+ type: str
+ returned: success
+ description:
+ description:
+ - Specifies the disk description. The value can contain a maximum
+ of 255 bytes.
+ type: str
+ returned: success
+ enable_full_clone:
+ description:
+ - If the disk is created from a snapshot and linked cloning needs
+ to be used, set this parameter to True.
+ type: bool
+ returned: success
+ enable_scsi:
+ description:
+ - If this parameter is set to True, the disk device type will be
+ SCSI, which allows ECS OSs to directly access underlying storage
+ media. SCSI reservation command is supported. If this parameter
+ is set to False, the disk device type will be VBD, which supports
+ only simple SCSI read/write commands.
+ - If parameter enable_share is set to True and this parameter
+ is not specified, shared SCSI disks are created. SCSI EVS disks
+ cannot be created from backups, which means that this parameter
+ cannot be True if backup_id has been specified.
+ type: bool
+ returned: success
+ enable_share:
+ description:
+ - Specifies whether the disk is shareable. The default value is
+ False.
+ type: bool
+ returned: success
+ encryption_id:
+ description:
+ - Specifies the encryption ID. The length of it fixes at 36 bytes.
+ type: str
+ returned: success
+ enterprise_project_id:
+ description:
+ - Specifies the enterprise project ID. This ID is associated with
+ the disk during the disk creation. If it is not specified, the
+ disk is bound to the default enterprise project.
+ type: str
+ returned: success
+ image_id:
+ description:
+ - Specifies the image ID. If this parameter is specified, the disk
+ is created from an image. BMS system disks cannot be
+ created from BMS images.
+ type: str
+ returned: success
+ size:
+ description:
+ - Specifies the disk size, in GB. Its values are as follows, System
+ disk 1 GB to 1024 GB, Data disk 10 GB to 32768 GB. This
+ parameter is mandatory when you create an empty disk or use an
+ image or a snapshot to create a disk. If you use an image or a
+ snapshot to create a disk, the disk size must be greater than or
+ equal to the image or snapshot size. This parameter is optional
+ when you use a backup to create a disk. If this parameter is not
+ specified, the disk size is equal to the backup size.
+ type: int
+ returned: success
+ snapshot_id:
+ description:
+ - Specifies the snapshot ID. If this parameter is specified, the
+ disk is created from a snapshot.
+ type: str
+ returned: success
+ attachments:
+ description:
+ - Specifies the disk attachment information.
+ type: complex
+ returned: success
+ contains:
+ attached_at:
+ description:
+ - Specifies the time when the disk was attached. Time
+ format is 'UTC YYYY-MM-DDTHH:MM:SS'.
+ type: str
+ returned: success
+ attachment_id:
+ description:
+ - Specifies the ID of the attachment information.
+ type: str
+ returned: success
+ device:
+ description:
+ - Specifies the device name.
+ type: str
+ returned: success
+ server_id:
+ description:
+ - Specifies the ID of the server to which the disk is
+ attached.
+ type: str
+ returned: success
+ backup_policy_id:
+ description:
+ - Specifies the backup policy ID.
+ type: str
+ returned: success
+ created_at:
+ description:
+ - Specifies the time when the disk was created. Time format is 'UTC
+ YYYY-MM-DDTHH:MM:SS'.
+ type: str
+ returned: success
+ is_bootable:
+ description:
+ - Specifies whether the disk is bootable.
+ type: bool
+ returned: success
+ is_readonly:
+ description:
+ - Specifies whether the disk is read-only or read/write. True
+ indicates that the disk is read-only. False indicates that the
+ disk is read/write.
+ type: bool
+ returned: success
+ source_volume_id:
+ description:
+ - Specifies the source disk ID. This parameter has a value if the
+ disk is created from a source disk.
+ type: str
+ returned: success
+ status:
+ description:
+ - Specifies the disk status.
+ type: str
+ returned: success
+ tags:
+ description:
+ - Specifies the disk tags.
+ type: dict
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcModule, are_different_dicts, build_path,
+ get_region, is_empty_value, navigate_value, wait_to_finish)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ timeouts=dict(type='dict', options=dict(
+ create=dict(default='30m', type='str'),
+ update=dict(default='30m', type='str'),
+ delete=dict(default='30m', type='str'),
+ ), default=dict()),
+ availability_zone=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ volume_type=dict(type='str', required=True),
+ backup_id=dict(type='str'),
+ description=dict(type='str'),
+ enable_full_clone=dict(type='bool'),
+ enable_scsi=dict(type='bool'),
+ enable_share=dict(type='bool'),
+ encryption_id=dict(type='str'),
+ enterprise_project_id=dict(type='str'),
+ image_id=dict(type='str'),
+ size=dict(type='int'),
+ snapshot_id=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "evs")
+
+ try:
+ _init(config)
+ is_exist = module.params.get('id')
+
+ result = None
+ changed = False
+ if module.params['state'] == 'present':
+ if not is_exist:
+ if not module.check_mode:
+ create(config)
+ changed = True
+
+ inputv = user_input_parameters(module)
+ resp, array_index = read_resource(config)
+ result = build_state(inputv, resp, array_index)
+ set_readonly_options(inputv, result)
+ if are_different_dicts(inputv, result):
+ if not module.check_mode:
+ update(config, inputv, result)
+
+ inputv = user_input_parameters(module)
+ resp, array_index = read_resource(config)
+ result = build_state(inputv, resp, array_index)
+ set_readonly_options(inputv, result)
+ if are_different_dicts(inputv, result):
+ raise Exception("Update resource failed, "
+ "some attributes are not updated")
+
+ changed = True
+
+ result['id'] = module.params.get('id')
+ else:
+ result = dict()
+ if is_exist:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def _init(config):
+ module = config.module
+ if module.params.get('id'):
+ return
+
+ v = search_resource(config)
+ n = len(v)
+ if n > 1:
+ raise Exception("find more than one resources(%s)" % ", ".join([
+ navigate_value(i, ["id"])
+ for i in v
+ ]))
+
+ if n == 1:
+ module.params['id'] = navigate_value(v[0], ["id"])
+
+
+def user_input_parameters(module):
+ return {
+ "availability_zone": module.params.get("availability_zone"),
+ "backup_id": module.params.get("backup_id"),
+ "description": module.params.get("description"),
+ "enable_full_clone": module.params.get("enable_full_clone"),
+ "enable_scsi": module.params.get("enable_scsi"),
+ "enable_share": module.params.get("enable_share"),
+ "encryption_id": module.params.get("encryption_id"),
+ "enterprise_project_id": module.params.get("enterprise_project_id"),
+ "image_id": module.params.get("image_id"),
+ "name": module.params.get("name"),
+ "size": module.params.get("size"),
+ "snapshot_id": module.params.get("snapshot_id"),
+ "volume_type": module.params.get("volume_type"),
+ }
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "volumev3", "project")
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ opts = user_input_parameters(module)
+ opts["ansible_module"] = module
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+
+ client1 = config.client(get_region(module), "volume", "project")
+ client1.endpoint = client1.endpoint.replace("/v2/", "/v1/")
+ obj = async_wait(config, r, client1, timeout)
+ module.params['id'] = navigate_value(obj, ["entities", "volume_id"])
+
+
+def update(config, expect_state, current_state):
+ module = config.module
+ expect_state["current_state"] = current_state
+ current_state["current_state"] = current_state
+ client = config.client(get_region(module), "evs", "project")
+ timeout = 60 * int(module.params['timeouts']['update'].rstrip('m'))
+
+ params = build_update_parameters(expect_state)
+ params1 = build_update_parameters(current_state)
+ if params and are_different_dicts(params, params1):
+ send_update_request(module, params, client)
+
+ params = build_extend_disk_parameters(expect_state)
+ params1 = build_extend_disk_parameters(current_state)
+ if params and are_different_dicts(params, params1):
+ client1 = config.client(get_region(module), "evsv2.1", "project")
+ r = send_extend_disk_request(module, params, client1)
+
+ client1 = config.client(get_region(module), "volume", "project")
+ client1.endpoint = client1.endpoint.replace("/v2/", "/v1/")
+ async_wait(config, r, client1, timeout)
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "evs", "project")
+ timeout = 60 * int(module.params['timeouts']['delete'].rstrip('m'))
+
+ r = send_delete_request(module, None, client)
+
+ client = config.client(get_region(module), "volume", "project")
+ client.endpoint = client.endpoint.replace("/v2/", "/v1/")
+ async_wait(config, r, client, timeout)
+
+
+def read_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "volumev3", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ res["read"] = fill_read_resp_body(r)
+
+ return res, None
+
+
+def build_state(opts, response, array_index):
+ states = flatten_options(response, array_index)
+ set_unreadable_options(opts, states)
+ return states
+
+
+def _build_query_link(opts):
+ query_params = []
+
+ v = navigate_value(opts, ["enable_share"])
+ if v or v in [False, 0]:
+ query_params.append(
+ "multiattach=" + (str(v) if v else str(v).lower()))
+
+ v = navigate_value(opts, ["name"])
+ if v or v in [False, 0]:
+ query_params.append(
+ "name=" + (str(v) if v else str(v).lower()))
+
+ v = navigate_value(opts, ["availability_zone"])
+ if v or v in [False, 0]:
+ query_params.append(
+ "availability_zone=" + (str(v) if v else str(v).lower()))
+
+ query_link = "?limit=10&offset={start}"
+ if query_params:
+ query_link += "&" + "&".join(query_params)
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "volumev3", "project")
+ opts = user_input_parameters(module)
+ name = module.params.get("name")
+ query_link = _build_query_link(opts)
+ link = "os-vendor-volumes/detail" + query_link
+
+ result = []
+ p = {'start': 0}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ if name == item.get("name"):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['start'] += len(r)
+
+ return result
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["availability_zone"], None)
+ if not is_empty_value(v):
+ params["availability_zone"] = v
+
+ v = navigate_value(opts, ["backup_id"], None)
+ if not is_empty_value(v):
+ params["backup_id"] = v
+
+ v = navigate_value(opts, ["description"], None)
+ if not is_empty_value(v):
+ params["description"] = v
+
+ v = navigate_value(opts, ["enterprise_project_id"], None)
+ if not is_empty_value(v):
+ params["enterprise_project_id"] = v
+
+ v = navigate_value(opts, ["image_id"], None)
+ if not is_empty_value(v):
+ params["imageRef"] = v
+
+ v = expand_create_metadata(opts, None)
+ if not is_empty_value(v):
+ params["metadata"] = v
+
+ v = navigate_value(opts, ["enable_share"], None)
+ if not is_empty_value(v):
+ params["multiattach"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ v = navigate_value(opts, ["size"], None)
+ if not is_empty_value(v):
+ params["size"] = v
+
+ v = navigate_value(opts, ["snapshot_id"], None)
+ if not is_empty_value(v):
+ params["snapshot_id"] = v
+
+ v = navigate_value(opts, ["volume_type"], None)
+ if not is_empty_value(v):
+ params["volume_type"] = v
+
+ if not params:
+ return params
+
+ params = {"volume": params}
+
+ return params
+
+
+def expand_create_metadata(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["encryption_id"], array_index)
+ if not is_empty_value(v):
+ r["__system__cmkid"] = v
+
+ v = expand_create_metadata_system_encrypted(d, array_index)
+ if not is_empty_value(v):
+ r["__system__encrypted"] = v
+
+ v = expand_create_metadata_full_clone(d, array_index)
+ if not is_empty_value(v):
+ r["full_clone"] = v
+
+ v = expand_create_metadata_hw_passthrough(d, array_index)
+ if not is_empty_value(v):
+ r["hw:passthrough"] = v
+
+ return r
+
+
+def expand_create_metadata_system_encrypted(d, array_index):
+ v = navigate_value(d, ["encryption_id"], array_index)
+ return "1" if v else ""
+
+
+def expand_create_metadata_full_clone(d, array_index):
+ v = navigate_value(d, ["enable_full_clone"], array_index)
+ return "0" if v else ""
+
+
+def expand_create_metadata_hw_passthrough(d, array_index):
+ v = navigate_value(d, ["enable_scsi"], array_index)
+ if v is None:
+ return v
+ return "true" if v else "false"
+
+
+def send_create_request(module, params, client):
+ url = "cloudvolumes"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_evs_disk): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def build_update_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["description"], None)
+ if v is not None:
+ params["description"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ if not params:
+ return params
+
+ params = {"volume": params}
+
+ return params
+
+
+def send_update_request(module, params, client):
+ url = build_path(module, "cloudvolumes/{id}")
+
+ try:
+ r = client.put(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_evs_disk): error running "
+ "api(update), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_delete_request(module, params, client):
+ url = build_path(module, "cloudvolumes/{id}")
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_evs_disk): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def build_extend_disk_parameters(opts):
+ params = dict()
+
+ v = expand_extend_disk_os_extend(opts, None)
+ if not is_empty_value(v):
+ params["os-extend"] = v
+
+ return params
+
+
+def expand_extend_disk_os_extend(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["size"], array_index)
+ if not is_empty_value(v):
+ r["new_size"] = v
+
+ return r
+
+
+def send_extend_disk_request(module, params, client):
+ url = build_path(module, "cloudvolumes/{id}/action")
+
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_evs_disk): error running "
+ "api(extend_disk), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def async_wait(config, result, client, timeout):
+ module = config.module
+
+ path_parameters = {
+ "job_id": ["job_id"],
+ }
+ data = dict((key, navigate_value(result, path))
+ for key, path in path_parameters.items())
+
+ url = build_path(module, "jobs/{job_id}", data)
+
+ def _query_status():
+ r = None
+ try:
+ r = client.get(url, timeout=timeout)
+ except HwcClientException:
+ return None, ""
+
+ try:
+ s = navigate_value(r, ["status"])
+ return r, s
+ except Exception:
+ return None, ""
+
+ try:
+ return wait_to_finish(
+ ["SUCCESS"],
+ ["RUNNING", "INIT"],
+ _query_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_evs_disk): error "
+ "waiting to be done, error= %s" % str(ex))
+
+
+def send_read_request(module, client):
+ url = build_path(module, "os-vendor-volumes/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_evs_disk): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["volume"], None)
+
+
+def fill_read_resp_body(body):
+ result = dict()
+
+ v = fill_read_resp_attachments(body.get("attachments"))
+ result["attachments"] = v
+
+ result["availability_zone"] = body.get("availability_zone")
+
+ result["bootable"] = body.get("bootable")
+
+ result["created_at"] = body.get("created_at")
+
+ result["description"] = body.get("description")
+
+ result["enterprise_project_id"] = body.get("enterprise_project_id")
+
+ result["id"] = body.get("id")
+
+ v = fill_read_resp_metadata(body.get("metadata"))
+ result["metadata"] = v
+
+ result["multiattach"] = body.get("multiattach")
+
+ result["name"] = body.get("name")
+
+ result["size"] = body.get("size")
+
+ result["snapshot_id"] = body.get("snapshot_id")
+
+ result["source_volid"] = body.get("source_volid")
+
+ result["status"] = body.get("status")
+
+ result["tags"] = body.get("tags")
+
+ v = fill_read_resp_volume_image_metadata(body.get("volume_image_metadata"))
+ result["volume_image_metadata"] = v
+
+ result["volume_type"] = body.get("volume_type")
+
+ return result
+
+
+def fill_read_resp_attachments(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["attached_at"] = item.get("attached_at")
+
+ val["attachment_id"] = item.get("attachment_id")
+
+ val["device"] = item.get("device")
+
+ val["server_id"] = item.get("server_id")
+
+ result.append(val)
+
+ return result
+
+
+def fill_read_resp_metadata(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["__system__cmkid"] = value.get("__system__cmkid")
+
+ result["attached_mode"] = value.get("attached_mode")
+
+ result["full_clone"] = value.get("full_clone")
+
+ result["hw:passthrough"] = value.get("hw:passthrough")
+
+ result["policy"] = value.get("policy")
+
+ result["readonly"] = value.get("readonly")
+
+ return result
+
+
+def fill_read_resp_volume_image_metadata(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["id"] = value.get("id")
+
+ return result
+
+
+def flatten_options(response, array_index):
+ r = dict()
+
+ v = flatten_attachments(response, array_index)
+ r["attachments"] = v
+
+ v = navigate_value(response, ["read", "availability_zone"], array_index)
+ r["availability_zone"] = v
+
+ v = navigate_value(response, ["read", "metadata", "policy"], array_index)
+ r["backup_policy_id"] = v
+
+ v = navigate_value(response, ["read", "created_at"], array_index)
+ r["created_at"] = v
+
+ v = navigate_value(response, ["read", "description"], array_index)
+ r["description"] = v
+
+ v = flatten_enable_full_clone(response, array_index)
+ r["enable_full_clone"] = v
+
+ v = flatten_enable_scsi(response, array_index)
+ r["enable_scsi"] = v
+
+ v = navigate_value(response, ["read", "multiattach"], array_index)
+ r["enable_share"] = v
+
+ v = navigate_value(
+ response, ["read", "metadata", "__system__cmkid"], array_index)
+ r["encryption_id"] = v
+
+ v = navigate_value(
+ response, ["read", "enterprise_project_id"], array_index)
+ r["enterprise_project_id"] = v
+
+ v = navigate_value(
+ response, ["read", "volume_image_metadata", "id"], array_index)
+ r["image_id"] = v
+
+ v = flatten_is_bootable(response, array_index)
+ r["is_bootable"] = v
+
+ v = flatten_is_readonly(response, array_index)
+ r["is_readonly"] = v
+
+ v = navigate_value(response, ["read", "name"], array_index)
+ r["name"] = v
+
+ v = navigate_value(response, ["read", "size"], array_index)
+ r["size"] = v
+
+ v = navigate_value(response, ["read", "snapshot_id"], array_index)
+ r["snapshot_id"] = v
+
+ v = navigate_value(response, ["read", "source_volid"], array_index)
+ r["source_volume_id"] = v
+
+ v = navigate_value(response, ["read", "status"], array_index)
+ r["status"] = v
+
+ v = navigate_value(response, ["read", "tags"], array_index)
+ r["tags"] = v
+
+ v = navigate_value(response, ["read", "volume_type"], array_index)
+ r["volume_type"] = v
+
+ return r
+
+
+def flatten_attachments(d, array_index):
+ v = navigate_value(d, ["read", "attachments"],
+ array_index)
+ if not v:
+ return None
+ n = len(v)
+ result = []
+
+ new_ai = dict()
+ if array_index:
+ new_ai.update(array_index)
+
+ for i in range(n):
+ new_ai["read.attachments"] = i
+
+ val = dict()
+
+ v = navigate_value(d, ["read", "attachments", "attached_at"], new_ai)
+ val["attached_at"] = v
+
+ v = navigate_value(d, ["read", "attachments", "attachment_id"], new_ai)
+ val["attachment_id"] = v
+
+ v = navigate_value(d, ["read", "attachments", "device"], new_ai)
+ val["device"] = v
+
+ v = navigate_value(d, ["read", "attachments", "server_id"], new_ai)
+ val["server_id"] = v
+
+ for v in val.values():
+ if v is not None:
+ result.append(val)
+ break
+
+ return result if result else None
+
+
+def flatten_enable_full_clone(d, array_index):
+ v = navigate_value(d, ["read", "metadata", "full_clone"],
+ array_index)
+ if v is None:
+ return v
+ return True if v == "0" else False
+
+
+def flatten_enable_scsi(d, array_index):
+ v = navigate_value(d, ["read", "metadata", "hw:passthrough"],
+ array_index)
+ if v is None:
+ return v
+ return True if v in ["true", "True"] else False
+
+
+def flatten_is_bootable(d, array_index):
+ v = navigate_value(d, ["read", "bootable"], array_index)
+ if v is None:
+ return v
+ return True if v in ["true", "True"] else False
+
+
+def flatten_is_readonly(d, array_index):
+ v = navigate_value(d, ["read", "metadata", "readonly"],
+ array_index)
+ if v is None:
+ return v
+ return True if v in ["true", "True"] else False
+
+
+def set_unreadable_options(opts, states):
+ states["backup_id"] = opts.get("backup_id")
+
+
+def set_readonly_options(opts, states):
+ opts["attachments"] = states.get("attachments")
+
+ opts["backup_policy_id"] = states.get("backup_policy_id")
+
+ opts["created_at"] = states.get("created_at")
+
+ opts["is_bootable"] = states.get("is_bootable")
+
+ opts["is_readonly"] = states.get("is_readonly")
+
+ opts["source_volume_id"] = states.get("source_volume_id")
+
+ opts["status"] = states.get("status")
+
+ opts["tags"] = states.get("tags")
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_evs_disk): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["volumes"], None)
+
+
+def expand_list_metadata(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["encryption_id"], array_index)
+ r["__system__cmkid"] = v
+
+ r["attached_mode"] = None
+
+ v = navigate_value(d, ["enable_full_clone"], array_index)
+ r["full_clone"] = v
+
+ v = navigate_value(d, ["enable_scsi"], array_index)
+ r["hw:passthrough"] = v
+
+ r["policy"] = None
+
+ r["readonly"] = None
+
+ for v in r.values():
+ if v is not None:
+ return r
+ return None
+
+
+def expand_list_volume_image_metadata(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["image_id"], array_index)
+ r["id"] = v
+
+ for v in r.values():
+ if v is not None:
+ return r
+ return None
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ v = fill_list_resp_attachments(body.get("attachments"))
+ result["attachments"] = v
+
+ result["availability_zone"] = body.get("availability_zone")
+
+ result["bootable"] = body.get("bootable")
+
+ result["created_at"] = body.get("created_at")
+
+ result["description"] = body.get("description")
+
+ result["enterprise_project_id"] = body.get("enterprise_project_id")
+
+ result["id"] = body.get("id")
+
+ v = fill_list_resp_metadata(body.get("metadata"))
+ result["metadata"] = v
+
+ result["multiattach"] = body.get("multiattach")
+
+ result["name"] = body.get("name")
+
+ result["size"] = body.get("size")
+
+ result["snapshot_id"] = body.get("snapshot_id")
+
+ result["source_volid"] = body.get("source_volid")
+
+ result["status"] = body.get("status")
+
+ result["tags"] = body.get("tags")
+
+ v = fill_list_resp_volume_image_metadata(body.get("volume_image_metadata"))
+ result["volume_image_metadata"] = v
+
+ result["volume_type"] = body.get("volume_type")
+
+ return result
+
+
+def fill_list_resp_attachments(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["attached_at"] = item.get("attached_at")
+
+ val["attachment_id"] = item.get("attachment_id")
+
+ val["device"] = item.get("device")
+
+ val["server_id"] = item.get("server_id")
+
+ result.append(val)
+
+ return result
+
+
+def fill_list_resp_metadata(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["__system__cmkid"] = value.get("__system__cmkid")
+
+ result["attached_mode"] = value.get("attached_mode")
+
+ result["full_clone"] = value.get("full_clone")
+
+ result["hw:passthrough"] = value.get("hw:passthrough")
+
+ result["policy"] = value.get("policy")
+
+ result["readonly"] = value.get("readonly")
+
+ return result
+
+
+def fill_list_resp_volume_image_metadata(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["id"] = value.get("id")
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_network_vpc.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_network_vpc.py
new file mode 100644
index 00000000..f53369ad
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_network_vpc.py
@@ -0,0 +1,493 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2018 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_network_vpc
+description:
+ - Represents an vpc resource.
+short_description: Creates a Huawei Cloud VPC
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - requests >= 2.18.4
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in vpc.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ timeouts:
+ description:
+ - The timeouts for each operations.
+ type: dict
+ suboptions:
+ create:
+ description:
+ - The timeout for create operation.
+ type: str
+ default: '15m'
+ update:
+ description:
+ - The timeout for update operation.
+ type: str
+ default: '15m'
+ delete:
+ description:
+ - The timeout for delete operation.
+ type: str
+ default: '15m'
+ name:
+ description:
+ - The name of vpc.
+ type: str
+ required: true
+ cidr:
+ description:
+ - The range of available subnets in the vpc.
+ type: str
+ required: true
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+- name: Create a vpc
+ community.general.hwc_network_vpc:
+ identity_endpoint: "{{ identity_endpoint }}"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ domain: "{{ domain }}"
+ project: "{{ project }}"
+ region: "{{ region }}"
+ name: "vpc_1"
+ cidr: "192.168.100.0/24"
+ state: present
+'''
+
+RETURN = '''
+ id:
+ description:
+ - the id of vpc.
+ type: str
+ returned: success
+ name:
+ description:
+ - the name of vpc.
+ type: str
+ returned: success
+ cidr:
+ description:
+ - the range of available subnets in the vpc.
+ type: str
+ returned: success
+ status:
+ description:
+ - the status of vpc.
+ type: str
+ returned: success
+ routes:
+ description:
+ - the route information.
+ type: complex
+ returned: success
+ contains:
+ destination:
+ description:
+ - the destination network segment of a route.
+ type: str
+ returned: success
+ next_hop:
+ description:
+ - the next hop of a route. If the route type is peering,
+ it will provide VPC peering connection ID.
+ type: str
+ returned: success
+ enable_shared_snat:
+ description:
+ - show whether the shared snat is enabled.
+ type: bool
+ returned: success
+'''
+
+###############################################################################
+# Imports
+###############################################################################
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (Config, HwcClientException,
+ HwcClientException404, HwcModule,
+ are_different_dicts, is_empty_value,
+ wait_to_finish, get_region,
+ build_path, navigate_value)
+import re
+
+###############################################################################
+# Main
+###############################################################################
+
+
+def main():
+ """Main function"""
+
+ module = HwcModule(
+ argument_spec=dict(
+ state=dict(
+ default='present', choices=['present', 'absent'], type='str'),
+ timeouts=dict(type='dict', options=dict(
+ create=dict(default='15m', type='str'),
+ update=dict(default='15m', type='str'),
+ delete=dict(default='15m', type='str'),
+ ), default=dict()),
+ name=dict(required=True, type='str'),
+ cidr=dict(required=True, type='str')
+ ),
+ supports_check_mode=True,
+ )
+ config = Config(module, 'vpc')
+
+ state = module.params['state']
+
+ if (not module.params.get("id")) and module.params.get("name"):
+ module.params['id'] = get_id_by_name(config)
+
+ fetch = None
+ link = self_link(module)
+ # the link will include Nones if required format parameters are missed
+ if not re.search('/None/|/None$', link):
+ client = config.client(get_region(module), "vpc", "project")
+ fetch = fetch_resource(module, client, link)
+ if fetch:
+ fetch = fetch.get('vpc')
+ changed = False
+
+ if fetch:
+ if state == 'present':
+ expect = _get_editable_properties(module)
+ current_state = response_to_hash(module, fetch)
+ current = {"cidr": current_state["cidr"]}
+ if are_different_dicts(expect, current):
+ if not module.check_mode:
+ fetch = update(config, self_link(module))
+ fetch = response_to_hash(module, fetch.get('vpc'))
+ changed = True
+ else:
+ fetch = current_state
+ else:
+ if not module.check_mode:
+ delete(config, self_link(module))
+ fetch = {}
+ changed = True
+ else:
+ if state == 'present':
+ if not module.check_mode:
+ fetch = create(config, "vpcs")
+ fetch = response_to_hash(module, fetch.get('vpc'))
+ changed = True
+ else:
+ fetch = {}
+
+ fetch.update({'changed': changed})
+
+ module.exit_json(**fetch)
+
+
+def create(config, link):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ r = None
+ try:
+ r = client.post(link, resource_to_create(module))
+ except HwcClientException as ex:
+ msg = ("module(hwc_network_vpc): error creating "
+ "resource, error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ wait_done = wait_for_operation(config, 'create', r)
+ v = ""
+ try:
+ v = navigate_value(wait_done, ['vpc', 'id'])
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ url = build_path(module, 'vpcs/{op_id}', {'op_id': v})
+ return fetch_resource(module, client, url)
+
+
+def update(config, link):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ r = None
+ try:
+ r = client.put(link, resource_to_update(module))
+ except HwcClientException as ex:
+ msg = ("module(hwc_network_vpc): error updating "
+ "resource, error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ wait_for_operation(config, 'update', r)
+
+ return fetch_resource(module, client, link)
+
+
+def delete(config, link):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ try:
+ client.delete(link)
+ except HwcClientException as ex:
+ msg = ("module(hwc_network_vpc): error deleting "
+ "resource, error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ wait_for_delete(module, client, link)
+
+
+def fetch_resource(module, client, link):
+ try:
+ return client.get(link)
+ except HwcClientException as ex:
+ msg = ("module(hwc_network_vpc): error fetching "
+ "resource, error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+
+def get_id_by_name(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ name = module.params.get("name")
+ link = "vpcs"
+ query_link = "?marker={marker}&limit=10"
+ link += query_link
+ not_format_keys = re.findall("={marker}", link)
+ none_values = re.findall("=None", link)
+
+ if not (not_format_keys or none_values):
+ r = None
+ try:
+ r = client.get(link)
+ except Exception:
+ pass
+ if r is None:
+ return None
+ r = r.get('vpcs', [])
+ ids = [
+ i.get('id') for i in r if i.get('name', '') == name
+ ]
+ if not ids:
+ return None
+ elif len(ids) == 1:
+ return ids[0]
+ else:
+ module.fail_json(
+ msg="Multiple resources with same name are found.")
+ elif none_values:
+ module.fail_json(
+ msg="Can not find id by name because url includes None.")
+ else:
+ p = {'marker': ''}
+ ids = set()
+ while True:
+ r = None
+ try:
+ r = client.get(link.format(**p))
+ except Exception:
+ pass
+ if r is None:
+ break
+ r = r.get('vpcs', [])
+ if r == []:
+ break
+ for i in r:
+ if i.get('name') == name:
+ ids.add(i.get('id'))
+ if len(ids) >= 2:
+ module.fail_json(
+ msg="Multiple resources with same name are found.")
+
+ p['marker'] = r[-1].get('id')
+
+ return ids.pop() if ids else None
+
+
+def self_link(module):
+ return build_path(module, "vpcs/{id}")
+
+
+def resource_to_create(module):
+ params = dict()
+
+ v = module.params.get('cidr')
+ if not is_empty_value(v):
+ params["cidr"] = v
+
+ v = module.params.get('name')
+ if not is_empty_value(v):
+ params["name"] = v
+
+ if not params:
+ return params
+
+ params = {"vpc": params}
+
+ return params
+
+
+def resource_to_update(module):
+ params = dict()
+
+ v = module.params.get('cidr')
+ if not is_empty_value(v):
+ params["cidr"] = v
+
+ if not params:
+ return params
+
+ params = {"vpc": params}
+
+ return params
+
+
+def _get_editable_properties(module):
+ return {
+ "cidr": module.params.get("cidr"),
+ }
+
+
+def response_to_hash(module, response):
+ """ Remove unnecessary properties from the response.
+ This is for doing comparisons with Ansible's current parameters.
+ """
+ return {
+ u'id': response.get(u'id'),
+ u'name': response.get(u'name'),
+ u'cidr': response.get(u'cidr'),
+ u'status': response.get(u'status'),
+ u'routes': VpcRoutesArray(
+ response.get(u'routes', []), module).from_response(),
+ u'enable_shared_snat': response.get(u'enable_shared_snat')
+ }
+
+
+def wait_for_operation(config, op_type, op_result):
+ module = config.module
+ op_id = ""
+ try:
+ op_id = navigate_value(op_result, ['vpc', 'id'])
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ url = build_path(module, "vpcs/{op_id}", {'op_id': op_id})
+ timeout = 60 * int(module.params['timeouts'][op_type].rstrip('m'))
+ states = {
+ 'create': {
+ 'allowed': ['CREATING', 'DONW', 'OK'],
+ 'complete': ['OK'],
+ },
+ 'update': {
+ 'allowed': ['PENDING_UPDATE', 'DONW', 'OK'],
+ 'complete': ['OK'],
+ }
+ }
+
+ return wait_for_completion(url, timeout, states[op_type]['allowed'],
+ states[op_type]['complete'], config)
+
+
+def wait_for_completion(op_uri, timeout, allowed_states,
+ complete_states, config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ def _refresh_status():
+ r = None
+ try:
+ r = fetch_resource(module, client, op_uri)
+ except Exception:
+ return None, ""
+
+ status = ""
+ try:
+ status = navigate_value(r, ['vpc', 'status'])
+ except Exception:
+ return None, ""
+
+ return r, status
+
+ try:
+ return wait_to_finish(complete_states, allowed_states,
+ _refresh_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def wait_for_delete(module, client, link):
+
+ def _refresh_status():
+ try:
+ client.get(link)
+ except HwcClientException404:
+ return True, "Done"
+
+ except Exception:
+ return None, ""
+
+ return True, "Pending"
+
+ timeout = 60 * int(module.params['timeouts']['delete'].rstrip('m'))
+ try:
+ return wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+class VpcRoutesArray(object):
+ def __init__(self, request, module):
+ self.module = module
+ if request:
+ self.request = request
+ else:
+ self.request = []
+
+ def to_request(self):
+ items = []
+ for item in self.request:
+ items.append(self._request_for_item(item))
+ return items
+
+ def from_response(self):
+ items = []
+ for item in self.request:
+ items.append(self._response_from_item(item))
+ return items
+
+ def _request_for_item(self, item):
+ return {
+ u'destination': item.get('destination'),
+ u'nexthop': item.get('next_hop')
+ }
+
+ def _response_from_item(self, item):
+ return {
+ u'destination': item.get(u'destination'),
+ u'next_hop': item.get(u'nexthop')
+ }
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_smn_topic.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_smn_topic.py
new file mode 100644
index 00000000..f7fb4fae
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_smn_topic.py
@@ -0,0 +1,338 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_smn_topic
+description:
+ - Represents a SMN notification topic resource.
+short_description: Creates a resource of SMNTopic in Huaweicloud Cloud
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - requests >= 2.18.4
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huaweicloud Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ display_name:
+ description:
+ - Topic display name, which is presented as the name of the email
+ sender in an email message. The topic display name contains a
+ maximum of 192 bytes.
+ type: str
+ required: false
+ name:
+ description:
+ - Name of the topic to be created. The topic name is a string of 1
+ to 256 characters. It must contain upper- or lower-case letters,
+ digits, hyphens (-), and underscores C(_), and must start with a
+ letter or digit.
+ type: str
+ required: true
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+- name: Create a smn topic
+ community.general.hwc_smn_topic:
+ identity_endpoint: "{{ identity_endpoint }}"
+ user_name: "{{ user_name }}"
+ password: "{{ password }}"
+ domain_name: "{{ domain_name }}"
+ project_name: "{{ project_name }}"
+ region: "{{ region }}"
+ name: "ansible_smn_topic_test"
+ state: present
+'''
+
+RETURN = '''
+create_time:
+ description:
+ - Time when the topic was created.
+ returned: success
+ type: str
+display_name:
+ description:
+ - Topic display name, which is presented as the name of the email
+ sender in an email message. The topic display name contains a
+ maximum of 192 bytes.
+ returned: success
+ type: str
+name:
+ description:
+ - Name of the topic to be created. The topic name is a string of 1
+ to 256 characters. It must contain upper- or lower-case letters,
+ digits, hyphens (-), and underscores C(_), and must start with a
+ letter or digit.
+ returned: success
+ type: str
+push_policy:
+ description:
+ - Message pushing policy. 0 indicates that the message sending
+ fails and the message is cached in the queue. 1 indicates that
+ the failed message is discarded.
+ returned: success
+ type: int
+topic_urn:
+ description:
+ - Resource identifier of a topic, which is unique.
+ returned: success
+ type: str
+update_time:
+ description:
+ - Time when the topic was updated.
+ returned: success
+ type: str
+'''
+
+###############################################################################
+# Imports
+###############################################################################
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (Config, HwcClientException,
+ HwcModule, navigate_value,
+ are_different_dicts, is_empty_value,
+ build_path, get_region)
+import re
+
+###############################################################################
+# Main
+###############################################################################
+
+
+def main():
+ """Main function"""
+
+ module = HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ display_name=dict(type='str'),
+ name=dict(required=True, type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+ config = Config(module, "smn")
+
+ state = module.params['state']
+
+ if not module.params.get("id"):
+ module.params['id'] = get_resource_id(config)
+
+ fetch = None
+ link = self_link(module)
+ # the link will include Nones if required format parameters are missed
+ if not re.search('/None/|/None$', link):
+ client = config.client(get_region(module), "smn", "project")
+ fetch = fetch_resource(module, client, link)
+ changed = False
+
+ if fetch:
+ if state == 'present':
+ expect = _get_resource_editable_properties(module)
+ current_state = response_to_hash(module, fetch)
+ current = {'display_name': current_state['display_name']}
+ if are_different_dicts(expect, current):
+ if not module.check_mode:
+ fetch = update(config)
+ fetch = response_to_hash(module, fetch)
+ changed = True
+ else:
+ fetch = current_state
+ else:
+ if not module.check_mode:
+ delete(config)
+ fetch = {}
+ changed = True
+ else:
+ if state == 'present':
+ if not module.check_mode:
+ fetch = create(config)
+ fetch = response_to_hash(module, fetch)
+ changed = True
+ else:
+ fetch = {}
+
+ fetch.update({'changed': changed})
+
+ module.exit_json(**fetch)
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "smn", "project")
+
+ link = "notifications/topics"
+ r = None
+ try:
+ r = client.post(link, create_resource_opts(module))
+ except HwcClientException as ex:
+ msg = ("module(hwc_smn_topic): error creating "
+ "resource, error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return get_resource(config, r)
+
+
+def update(config):
+ module = config.module
+ client = config.client(get_region(module), "smn", "project")
+
+ link = self_link(module)
+ try:
+ client.put(link, update_resource_opts(module))
+ except HwcClientException as ex:
+ msg = ("module(hwc_smn_topic): error updating "
+ "resource, error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return fetch_resource(module, client, link)
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "smn", "project")
+
+ link = self_link(module)
+ try:
+ client.delete(link)
+ except HwcClientException as ex:
+ msg = ("module(hwc_smn_topic): error deleting "
+ "resource, error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+
+def fetch_resource(module, client, link):
+ try:
+ return client.get(link)
+ except HwcClientException as ex:
+ msg = ("module(hwc_smn_topic): error fetching "
+ "resource, error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+
+def get_resource(config, result):
+ module = config.module
+ client = config.client(get_region(module), "smn", "project")
+
+ v = ""
+ try:
+ v = navigate_value(result, ['topic_urn'])
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ d = {'topic_urn': v}
+ url = build_path(module, 'notifications/topics/{topic_urn}', d)
+
+ return fetch_resource(module, client, url)
+
+
+def get_resource_id(config):
+ module = config.module
+ client = config.client(get_region(module), "smn", "project")
+
+ link = "notifications/topics"
+ query_link = "?offset={offset}&limit=10"
+ link += query_link
+
+ p = {'offset': 0}
+ v = module.params.get('name')
+ ids = set()
+ while True:
+ r = None
+ try:
+ r = client.get(link.format(**p))
+ except Exception:
+ pass
+ if r is None:
+ break
+ r = r.get('topics', [])
+ if r == []:
+ break
+ for i in r:
+ if i.get('name') == v:
+ ids.add(i.get('topic_urn'))
+ if len(ids) >= 2:
+ module.fail_json(msg="Multiple resources are found")
+
+ p['offset'] += 1
+
+ return ids.pop() if ids else None
+
+
+def self_link(module):
+ return build_path(module, "notifications/topics/{id}")
+
+
+def create_resource_opts(module):
+ params = dict()
+
+ v = module.params.get('display_name')
+ if not is_empty_value(v):
+ params["display_name"] = v
+
+ v = module.params.get('name')
+ if not is_empty_value(v):
+ params["name"] = v
+
+ return params
+
+
+def update_resource_opts(module):
+ params = dict()
+
+ v = module.params.get('display_name')
+ if not is_empty_value(v):
+ params["display_name"] = v
+
+ return params
+
+
+def _get_resource_editable_properties(module):
+ return {
+ "display_name": module.params.get("display_name"),
+ }
+
+
+def response_to_hash(module, response):
+ """Remove unnecessary properties from the response.
+ This is for doing comparisons with Ansible's current parameters.
+ """
+ return {
+ u'create_time': response.get(u'create_time'),
+ u'display_name': response.get(u'display_name'),
+ u'name': response.get(u'name'),
+ u'push_policy': _push_policy_convert_from_response(
+ response.get('push_policy')),
+ u'topic_urn': response.get(u'topic_urn'),
+ u'update_time': response.get(u'update_time')
+ }
+
+
+def _push_policy_convert_from_response(value):
+ return {
+ 0: "the message sending fails and is cached in the queue",
+ 1: "the failed message is discarded",
+ }.get(int(value))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_eip.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_eip.py
new file mode 100644
index 00000000..b53395f8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_eip.py
@@ -0,0 +1,877 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_vpc_eip
+description:
+ - elastic ip management.
+short_description: Creates a resource of Vpc/EIP in Huawei Cloud
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huawei Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ timeouts:
+ description:
+ - The timeouts for each operations.
+ type: dict
+ suboptions:
+ create:
+ description:
+ - The timeouts for create operation.
+ type: str
+ default: '5m'
+ update:
+ description:
+ - The timeouts for update operation.
+ type: str
+ default: '5m'
+ type:
+ description:
+ - Specifies the EIP type.
+ type: str
+ required: true
+ dedicated_bandwidth:
+ description:
+ - Specifies the dedicated bandwidth object.
+ type: dict
+ required: false
+ suboptions:
+ charge_mode:
+ description:
+ - Specifies whether the bandwidth is billed by traffic or
+ by bandwidth size. The value can be bandwidth or traffic.
+ If this parameter is left blank or is null character
+ string, default value bandwidth is used. For IPv6
+ addresses, the default parameter value is bandwidth
+ outside China and is traffic in China.
+ type: str
+ required: true
+ name:
+ description:
+ - Specifies the bandwidth name. The value is a string of 1
+ to 64 characters that can contain letters, digits,
+ underscores C(_), hyphens (-), and periods (.).
+ type: str
+ required: true
+ size:
+ description:
+ - Specifies the bandwidth size. The value ranges from 1
+ Mbit/s to 2000 Mbit/s by default. (The specific range may
+ vary depending on the configuration in each region. You
+ can see the bandwidth range of each region on the
+ management console.) The minimum unit for bandwidth
+ adjustment varies depending on the bandwidth range. The
+ details are as follows.
+ - The minimum unit is 1 Mbit/s if the allowed bandwidth
+ size ranges from 0 to 300 Mbit/s (with 300 Mbit/s
+ included).
+ - The minimum unit is 50 Mbit/s if the allowed bandwidth
+ size ranges 300 Mbit/s to 1000 Mbit/s (with 1000 Mbit/s
+ included).
+ - The minimum unit is 500 Mbit/s if the allowed bandwidth
+ size is greater than 1000 Mbit/s.
+ type: int
+ required: true
+ enterprise_project_id:
+ description:
+ - Specifies the enterprise project ID.
+ type: str
+ required: false
+ ip_version:
+ description:
+ - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this
+ parameter is left blank, an IPv4 address will be assigned.
+ type: int
+ required: false
+ ipv4_address:
+ description:
+ - Specifies the obtained IPv4 EIP. The system automatically assigns
+ an EIP if you do not specify it.
+ type: str
+ required: false
+ port_id:
+ description:
+ - Specifies the port ID. This parameter is returned only when a
+ private IP address is bound with the EIP.
+ type: str
+ required: false
+ shared_bandwidth_id:
+ description:
+ - Specifies the ID of shared bandwidth.
+ type: str
+ required: false
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+# create an eip and bind it to a port
+- name: Create vpc
+ hwc_network_vpc:
+ cidr: "192.168.100.0/24"
+ name: "ansible_network_vpc_test"
+ register: vpc
+- name: Create subnet
+ hwc_vpc_subnet:
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: True
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ register: subnet
+- name: Create a port
+ hwc_vpc_port:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ register: port
+- name: Create an eip and bind it to a port
+ community.general.hwc_vpc_eip:
+ type: "5_bgp"
+ dedicated_bandwidth:
+ charge_mode: "traffic"
+ name: "ansible_test_dedicated_bandwidth"
+ size: 1
+ port_id: "{{ port.id }}"
+'''
+
+RETURN = '''
+ type:
+ description:
+ - Specifies the EIP type.
+ type: str
+ returned: success
+ dedicated_bandwidth:
+ description:
+ - Specifies the dedicated bandwidth object.
+ type: dict
+ returned: success
+ contains:
+ charge_mode:
+ description:
+ - Specifies whether the bandwidth is billed by traffic or
+ by bandwidth size. The value can be bandwidth or traffic.
+ If this parameter is left blank or is null character
+ string, default value bandwidth is used. For IPv6
+ addresses, the default parameter value is bandwidth
+ outside China and is traffic in China.
+ type: str
+ returned: success
+ name:
+ description:
+ - Specifies the bandwidth name. The value is a string of 1
+ to 64 characters that can contain letters, digits,
+ underscores C(_), hyphens (-), and periods (.).
+ type: str
+ returned: success
+ size:
+ description:
+ - Specifies the bandwidth size. The value ranges from 1
+ Mbit/s to 2000 Mbit/s by default. (The specific range may
+ vary depending on the configuration in each region. You
+ can see the bandwidth range of each region on the
+ management console.) The minimum unit for bandwidth
+ adjustment varies depending on the bandwidth range. The
+ details are as follows:.
+ - The minimum unit is 1 Mbit/s if the allowed bandwidth
+ size ranges from 0 to 300 Mbit/s (with 300 Mbit/s
+ included).
+ - The minimum unit is 50 Mbit/s if the allowed bandwidth
+ size ranges 300 Mbit/s to 1000 Mbit/s (with 1000 Mbit/s
+ included).
+ - The minimum unit is 500 Mbit/s if the allowed bandwidth
+ size is greater than 1000 Mbit/s.
+ type: int
+ returned: success
+ id:
+ description:
+ - Specifies the ID of dedicated bandwidth.
+ type: str
+ returned: success
+ enterprise_project_id:
+ description:
+ - Specifies the enterprise project ID.
+ type: str
+ returned: success
+ ip_version:
+ description:
+ - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this
+ parameter is left blank, an IPv4 address will be assigned.
+ type: int
+ returned: success
+ ipv4_address:
+ description:
+ - Specifies the obtained IPv4 EIP. The system automatically assigns
+ an EIP if you do not specify it.
+ type: str
+ returned: success
+ port_id:
+ description:
+ - Specifies the port ID. This parameter is returned only when a
+ private IP address is bound with the EIP.
+ type: str
+ returned: success
+ shared_bandwidth_id:
+ description:
+ - Specifies the ID of shared bandwidth.
+ type: str
+ returned: success
+ create_time:
+ description:
+ - Specifies the time (UTC time) when the EIP was assigned.
+ type: str
+ returned: success
+ ipv6_address:
+ description:
+ - Specifies the obtained IPv6 EIP.
+ type: str
+ returned: success
+ private_ip_address:
+ description:
+ - Specifies the private IP address bound with the EIP. This
+ parameter is returned only when a private IP address is bound
+ with the EIP.
+ type: str
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcClientException404, HwcModule,
+ are_different_dicts, build_path, get_region, is_empty_value,
+ navigate_value, wait_to_finish)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ timeouts=dict(type='dict', options=dict(
+ create=dict(default='5m', type='str'),
+ update=dict(default='5m', type='str'),
+ ), default=dict()),
+ type=dict(type='str', required=True),
+ dedicated_bandwidth=dict(type='dict', options=dict(
+ charge_mode=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ size=dict(type='int', required=True)
+ )),
+ enterprise_project_id=dict(type='str'),
+ ip_version=dict(type='int'),
+ ipv4_address=dict(type='str'),
+ port_id=dict(type='str'),
+ shared_bandwidth_id=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "vpc")
+
+ try:
+ resource = None
+ if module.params['id']:
+ resource = True
+ else:
+ v = search_resource(config)
+ if len(v) > 1:
+ raise Exception("Found more than one resource(%s)" % ", ".join([
+ navigate_value(i, ["id"]) for i in v]))
+
+ if len(v) == 1:
+ resource = v[0]
+ module.params['id'] = navigate_value(resource, ["id"])
+
+ result = {}
+ changed = False
+ if module.params['state'] == 'present':
+ if resource is None:
+ if not module.check_mode:
+ create(config)
+ changed = True
+
+ current = read_resource(config, exclude_output=True)
+ expect = user_input_parameters(module)
+ if are_different_dicts(expect, current):
+ if not module.check_mode:
+ update(config)
+ changed = True
+
+ result = read_resource(config)
+ result['id'] = module.params.get('id')
+ else:
+ if resource:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def user_input_parameters(module):
+ return {
+ "dedicated_bandwidth": module.params.get("dedicated_bandwidth"),
+ "enterprise_project_id": module.params.get("enterprise_project_id"),
+ "ip_version": module.params.get("ip_version"),
+ "ipv4_address": module.params.get("ipv4_address"),
+ "port_id": module.params.get("port_id"),
+ "shared_bandwidth_id": module.params.get("shared_bandwidth_id"),
+ "type": module.params.get("type"),
+ }
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ opts = user_input_parameters(module)
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+ obj = async_wait_create(config, r, client, timeout)
+ module.params['id'] = navigate_value(obj, ["publicip", "id"])
+
+
+def update(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ timeout = 60 * int(module.params['timeouts']['update'].rstrip('m'))
+ opts = user_input_parameters(module)
+
+ params = build_update_parameters(opts)
+ if params:
+ r = send_update_request(module, params, client)
+ async_wait_update(config, r, client, timeout)
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ if module.params["port_id"]:
+ module.params["port_id"] = ""
+ update(config)
+
+ send_delete_request(module, None, client)
+
+ url = build_path(module, "publicips/{id}")
+
+ def _refresh_status():
+ try:
+ client.get(url)
+ except HwcClientException404:
+ return True, "Done"
+
+ except Exception:
+ return None, ""
+
+ return True, "Pending"
+
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ try:
+ wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_eip): error "
+ "waiting for api(delete) to "
+ "be done, error= %s" % str(ex))
+
+
+def read_resource(config, exclude_output=False):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ res["read"] = fill_read_resp_body(r)
+
+ return update_properties(module, res, None, exclude_output)
+
+
+def _build_query_link(opts):
+ query_params = []
+
+ v = navigate_value(opts, ["ip_version"])
+ if v:
+ query_params.append("ip_version=" + str(v))
+
+ v = navigate_value(opts, ["enterprise_project_id"])
+ if v:
+ query_params.append("enterprise_project_id=" + str(v))
+
+ query_link = "?marker={marker}&limit=10"
+ if query_params:
+ query_link += "&" + "&".join(query_params)
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+ identity_obj = _build_identity_object(opts)
+ query_link = _build_query_link(opts)
+ link = "publicips" + query_link
+
+ result = []
+ p = {'marker': ''}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ item = fill_list_resp_body(item)
+ if not are_different_dicts(identity_obj, item):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['marker'] = r[-1].get('id')
+
+ return result
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = expand_create_bandwidth(opts, None)
+ if not is_empty_value(v):
+ params["bandwidth"] = v
+
+ v = navigate_value(opts, ["enterprise_project_id"], None)
+ if not is_empty_value(v):
+ params["enterprise_project_id"] = v
+
+ v = expand_create_publicip(opts, None)
+ if not is_empty_value(v):
+ params["publicip"] = v
+
+ return params
+
+
+def expand_create_bandwidth(d, array_index):
+ v = navigate_value(d, ["dedicated_bandwidth"], array_index)
+ sbwid = navigate_value(d, ["shared_bandwidth_id"], array_index)
+ if v and sbwid:
+ raise Exception("don't input shared_bandwidth_id and "
+ "dedicated_bandwidth at same time")
+
+ if not (v or sbwid):
+ raise Exception("must input shared_bandwidth_id or "
+ "dedicated_bandwidth")
+
+ if sbwid:
+ return {
+ "id": sbwid,
+ "share_type": "WHOLE"}
+
+ return {
+ "charge_mode": v["charge_mode"],
+ "name": v["name"],
+ "share_type": "PER",
+ "size": v["size"]}
+
+
+def expand_create_publicip(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["ipv4_address"], array_index)
+ if not is_empty_value(v):
+ r["ip_address"] = v
+
+ v = navigate_value(d, ["ip_version"], array_index)
+ if not is_empty_value(v):
+ r["ip_version"] = v
+
+ v = navigate_value(d, ["type"], array_index)
+ if not is_empty_value(v):
+ r["type"] = v
+
+ return r
+
+
+def send_create_request(module, params, client):
+ url = "publicips"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_eip): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def async_wait_create(config, result, client, timeout):
+ module = config.module
+
+ path_parameters = {
+ "publicip_id": ["publicip", "id"],
+ }
+ data = dict((key, navigate_value(result, path))
+ for key, path in path_parameters.items())
+
+ url = build_path(module, "publicips/{publicip_id}", data)
+
+ def _query_status():
+ r = None
+ try:
+ r = client.get(url, timeout=timeout)
+ except HwcClientException:
+ return None, ""
+
+ try:
+ s = navigate_value(r, ["publicip", "status"])
+ return r, s
+ except Exception:
+ return None, ""
+
+ try:
+ return wait_to_finish(
+ ["ACTIVE", "DOWN"],
+ None,
+ _query_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_eip): error "
+ "waiting for api(create) to "
+ "be done, error= %s" % str(ex))
+
+
+def build_update_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["ip_version"], None)
+ if not is_empty_value(v):
+ params["ip_version"] = v
+
+ v = navigate_value(opts, ["port_id"], None)
+ if v is not None:
+ params["port_id"] = v
+
+ if not params:
+ return params
+
+ params = {"publicip": params}
+
+ return params
+
+
+def send_update_request(module, params, client):
+ url = build_path(module, "publicips/{id}")
+
+ try:
+ r = client.put(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_eip): error running "
+ "api(update), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def async_wait_update(config, result, client, timeout):
+ module = config.module
+
+ url = build_path(module, "publicips/{id}")
+
+ def _query_status():
+ r = None
+ try:
+ r = client.get(url, timeout=timeout)
+ except HwcClientException:
+ return None, ""
+
+ try:
+ s = navigate_value(r, ["publicip", "status"])
+ return r, s
+ except Exception:
+ return None, ""
+
+ try:
+ return wait_to_finish(
+ ["ACTIVE", "DOWN"],
+ None,
+ _query_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_eip): error "
+ "waiting for api(update) to "
+ "be done, error= %s" % str(ex))
+
+
+def send_delete_request(module, params, client):
+ url = build_path(module, "publicips/{id}")
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_eip): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_read_request(module, client):
+ url = build_path(module, "publicips/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_eip): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["publicip"], None)
+
+
+def fill_read_resp_body(body):
+ result = dict()
+
+ result["bandwidth_id"] = body.get("bandwidth_id")
+
+ result["bandwidth_name"] = body.get("bandwidth_name")
+
+ result["bandwidth_share_type"] = body.get("bandwidth_share_type")
+
+ result["bandwidth_size"] = body.get("bandwidth_size")
+
+ result["create_time"] = body.get("create_time")
+
+ result["enterprise_project_id"] = body.get("enterprise_project_id")
+
+ result["id"] = body.get("id")
+
+ result["ip_version"] = body.get("ip_version")
+
+ result["port_id"] = body.get("port_id")
+
+ result["private_ip_address"] = body.get("private_ip_address")
+
+ result["public_ip_address"] = body.get("public_ip_address")
+
+ result["public_ipv6_address"] = body.get("public_ipv6_address")
+
+ result["status"] = body.get("status")
+
+ result["tenant_id"] = body.get("tenant_id")
+
+ result["type"] = body.get("type")
+
+ return result
+
+
+def update_properties(module, response, array_index, exclude_output=False):
+ r = user_input_parameters(module)
+
+ if not exclude_output:
+ v = navigate_value(response, ["read", "create_time"], array_index)
+ r["create_time"] = v
+
+ v = r.get("dedicated_bandwidth")
+ v = flatten_dedicated_bandwidth(response, array_index, v, exclude_output)
+ r["dedicated_bandwidth"] = v
+
+ v = navigate_value(response, ["read", "enterprise_project_id"],
+ array_index)
+ r["enterprise_project_id"] = v
+
+ v = navigate_value(response, ["read", "ip_version"], array_index)
+ r["ip_version"] = v
+
+ v = navigate_value(response, ["read", "public_ip_address"], array_index)
+ r["ipv4_address"] = v
+
+ if not exclude_output:
+ v = navigate_value(response, ["read", "public_ipv6_address"],
+ array_index)
+ r["ipv6_address"] = v
+
+ v = navigate_value(response, ["read", "port_id"], array_index)
+ r["port_id"] = v
+
+ if not exclude_output:
+ v = navigate_value(response, ["read", "private_ip_address"],
+ array_index)
+ r["private_ip_address"] = v
+
+ v = r.get("shared_bandwidth_id")
+ v = flatten_shared_bandwidth_id(response, array_index, v, exclude_output)
+ r["shared_bandwidth_id"] = v
+
+ v = navigate_value(response, ["read", "type"], array_index)
+ r["type"] = v
+
+ return r
+
+
+def flatten_dedicated_bandwidth(d, array_index, current_value, exclude_output):
+ v = navigate_value(d, ["read", "bandwidth_share_type"], array_index)
+ if not (v and v == "PER"):
+ return current_value
+
+ result = current_value
+ if not result:
+ result = dict()
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "bandwidth_id"], array_index)
+ if v is not None:
+ result["id"] = v
+
+ v = navigate_value(d, ["read", "bandwidth_name"], array_index)
+ if v is not None:
+ result["name"] = v
+
+ v = navigate_value(d, ["read", "bandwidth_size"], array_index)
+ if v is not None:
+ result["size"] = v
+
+ return result if result else current_value
+
+
+def flatten_shared_bandwidth_id(d, array_index, current_value, exclude_output):
+ v = navigate_value(d, ["read", "bandwidth_id"], array_index)
+
+ v1 = navigate_value(d, ["read", "bandwidth_share_type"], array_index)
+
+ return v if (v1 and v1 == "WHOLE") else current_value
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_eip): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["publicips"], None)
+
+
+def _build_identity_object(all_opts):
+ result = dict()
+
+ v = expand_list_bandwidth_id(all_opts, None)
+ result["bandwidth_id"] = v
+
+ v = navigate_value(all_opts, ["dedicated_bandwidth", "name"], None)
+ result["bandwidth_name"] = v
+
+ result["bandwidth_share_type"] = None
+
+ v = navigate_value(all_opts, ["dedicated_bandwidth", "size"], None)
+ result["bandwidth_size"] = v
+
+ result["create_time"] = None
+
+ v = navigate_value(all_opts, ["enterprise_project_id"], None)
+ result["enterprise_project_id"] = v
+
+ result["id"] = None
+
+ v = navigate_value(all_opts, ["ip_version"], None)
+ result["ip_version"] = v
+
+ v = navigate_value(all_opts, ["port_id"], None)
+ result["port_id"] = v
+
+ result["private_ip_address"] = None
+
+ v = navigate_value(all_opts, ["ipv4_address"], None)
+ result["public_ip_address"] = v
+
+ result["public_ipv6_address"] = None
+
+ result["status"] = None
+
+ result["tenant_id"] = None
+
+ v = navigate_value(all_opts, ["type"], None)
+ result["type"] = v
+
+ return result
+
+
+def expand_list_bandwidth_id(d, array_index):
+ v = navigate_value(d, ["dedicated_bandwidth"], array_index)
+ sbwid = navigate_value(d, ["shared_bandwidth_id"], array_index)
+ if v and sbwid:
+ raise Exception("don't input shared_bandwidth_id and "
+ "dedicated_bandwidth at same time")
+
+ return sbwid
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ result["bandwidth_id"] = body.get("bandwidth_id")
+
+ result["bandwidth_name"] = body.get("bandwidth_name")
+
+ result["bandwidth_share_type"] = body.get("bandwidth_share_type")
+
+ result["bandwidth_size"] = body.get("bandwidth_size")
+
+ result["create_time"] = body.get("create_time")
+
+ result["enterprise_project_id"] = body.get("enterprise_project_id")
+
+ result["id"] = body.get("id")
+
+ result["ip_version"] = body.get("ip_version")
+
+ result["port_id"] = body.get("port_id")
+
+ result["private_ip_address"] = body.get("private_ip_address")
+
+ result["public_ip_address"] = body.get("public_ip_address")
+
+ result["public_ipv6_address"] = body.get("public_ipv6_address")
+
+ result["status"] = body.get("status")
+
+ result["tenant_id"] = body.get("tenant_id")
+
+ result["type"] = body.get("type")
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_peering_connect.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_peering_connect.py
new file mode 100644
index 00000000..a4d5921b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_peering_connect.py
@@ -0,0 +1,691 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_vpc_peering_connect
+description:
+ - vpc peering management.
+short_description: Creates a resource of Vpc/PeeringConnect in Huawei Cloud
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huawei Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ timeouts:
+ description:
+ - The timeouts for each operations.
+ type: dict
+ suboptions:
+ create:
+ description:
+ - The timeouts for create operation.
+ type: str
+ default: '15m'
+ local_vpc_id:
+ description:
+ - Specifies the ID of local VPC.
+ type: str
+ required: true
+ name:
+ description:
+ - Specifies the name of the VPC peering connection. The value can
+ contain 1 to 64 characters.
+ type: str
+ required: true
+ peering_vpc:
+ description:
+ - Specifies information about the peering VPC.
+ type: dict
+ required: true
+ suboptions:
+ vpc_id:
+ description:
+ - Specifies the ID of peering VPC.
+ type: str
+ required: true
+ project_id:
+ description:
+ - Specifies the ID of the project which the peering vpc
+ belongs to.
+ type: str
+ required: false
+ description:
+ description:
+ - The description of vpc peering connection.
+ type: str
+ required: false
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+# create a peering connect
+- name: Create a local vpc
+ hwc_network_vpc:
+ cidr: "192.168.0.0/16"
+ name: "ansible_network_vpc_test_local"
+ register: vpc1
+- name: Create a peering vpc
+ hwc_network_vpc:
+ cidr: "192.168.0.0/16"
+ name: "ansible_network_vpc_test_peering"
+ register: vpc2
+- name: Create a peering connect
+ community.general.hwc_vpc_peering_connect:
+ local_vpc_id: "{{ vpc1.id }}"
+ name: "ansible_network_peering_test"
+ peering_vpc:
+ vpc_id: "{{ vpc2.id }}"
+'''
+
+RETURN = '''
+ local_vpc_id:
+ description:
+ - Specifies the ID of local VPC.
+ type: str
+ returned: success
+ name:
+ description:
+ - Specifies the name of the VPC peering connection. The value can
+ contain 1 to 64 characters.
+ type: str
+ returned: success
+ peering_vpc:
+ description:
+ - Specifies information about the peering VPC.
+ type: dict
+ returned: success
+ contains:
+ vpc_id:
+ description:
+ - Specifies the ID of peering VPC.
+ type: str
+ returned: success
+ project_id:
+ description:
+ - Specifies the ID of the project which the peering vpc
+ belongs to.
+ type: str
+ returned: success
+ description:
+ description:
+ - The description of vpc peering connection.
+ type: str
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcClientException404, HwcModule,
+ are_different_dicts, build_path, get_region, is_empty_value,
+ navigate_value, wait_to_finish)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ timeouts=dict(type='dict', options=dict(
+ create=dict(default='15m', type='str'),
+ ), default=dict()),
+ local_vpc_id=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ peering_vpc=dict(type='dict', required=True, options=dict(
+ vpc_id=dict(type='str', required=True),
+ project_id=dict(type='str')
+ )),
+ description=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "vpc")
+
+ try:
+ resource = None
+ if module.params['id']:
+ resource = True
+ else:
+ v = search_resource(config)
+ if len(v) > 1:
+ raise Exception("Found more than one resource(%s)" % ", ".join([
+ navigate_value(i, ["id"]) for i in v]))
+
+ if len(v) == 1:
+ resource = v[0]
+ module.params['id'] = navigate_value(resource, ["id"])
+
+ result = {}
+ changed = False
+ if module.params['state'] == 'present':
+ if resource is None:
+ if not module.check_mode:
+ create(config)
+ changed = True
+
+ current = read_resource(config, exclude_output=True)
+ expect = user_input_parameters(module)
+ if are_different_dicts(expect, current):
+ if not module.check_mode:
+ update(config)
+ changed = True
+
+ result = read_resource(config)
+ result['id'] = module.params.get('id')
+ else:
+ if resource:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def user_input_parameters(module):
+ return {
+ "description": module.params.get("description"),
+ "local_vpc_id": module.params.get("local_vpc_id"),
+ "name": module.params.get("name"),
+ "peering_vpc": module.params.get("peering_vpc"),
+ }
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "network", "project")
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ opts = user_input_parameters(module)
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+ obj = async_wait_create(config, r, client, timeout)
+ module.params['id'] = navigate_value(obj, ["peering", "id"])
+
+
+def update(config):
+ module = config.module
+ client = config.client(get_region(module), "network", "project")
+ opts = user_input_parameters(module)
+
+ params = build_update_parameters(opts)
+ if params:
+ send_update_request(module, params, client)
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "network", "project")
+
+ send_delete_request(module, None, client)
+
+ url = build_path(module, "v2.0/vpc/peerings/{id}")
+
+ def _refresh_status():
+ try:
+ client.get(url)
+ except HwcClientException404:
+ return True, "Done"
+
+ except Exception:
+ return None, ""
+
+ return True, "Pending"
+
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ try:
+ wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_peering_connect): error "
+ "waiting for api(delete) to "
+ "be done, error= %s" % str(ex))
+
+
+def read_resource(config, exclude_output=False):
+ module = config.module
+ client = config.client(get_region(module), "network", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ res["read"] = fill_read_resp_body(r)
+
+ return update_properties(module, res, None, exclude_output)
+
+
+def _build_query_link(opts):
+ query_params = []
+
+ v = navigate_value(opts, ["local_vpc_id"])
+ if v:
+ query_params.append("vpc_id=" + str(v))
+
+ v = navigate_value(opts, ["name"])
+ if v:
+ query_params.append("name=" + str(v))
+
+ query_link = "?marker={marker}&limit=10"
+ if query_params:
+ query_link += "&" + "&".join(query_params)
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "network", "project")
+ opts = user_input_parameters(module)
+ identity_obj = _build_identity_object(opts)
+ query_link = _build_query_link(opts)
+ link = "v2.0/vpc/peerings" + query_link
+
+ result = []
+ p = {'marker': ''}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ item = fill_list_resp_body(item)
+ if not are_different_dicts(identity_obj, item):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['marker'] = r[-1].get('id')
+
+ return result
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = expand_create_accept_vpc_info(opts, None)
+ if not is_empty_value(v):
+ params["accept_vpc_info"] = v
+
+ v = navigate_value(opts, ["description"], None)
+ if not is_empty_value(v):
+ params["description"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ v = expand_create_request_vpc_info(opts, None)
+ if not is_empty_value(v):
+ params["request_vpc_info"] = v
+
+ if not params:
+ return params
+
+ params = {"peering": params}
+
+ return params
+
+
+def expand_create_accept_vpc_info(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["peering_vpc", "project_id"], array_index)
+ if not is_empty_value(v):
+ r["tenant_id"] = v
+
+ v = navigate_value(d, ["peering_vpc", "vpc_id"], array_index)
+ if not is_empty_value(v):
+ r["vpc_id"] = v
+
+ return r
+
+
+def expand_create_request_vpc_info(d, array_index):
+ r = dict()
+
+ r["tenant_id"] = ""
+
+ v = navigate_value(d, ["local_vpc_id"], array_index)
+ if not is_empty_value(v):
+ r["vpc_id"] = v
+
+ return r
+
+
+def send_create_request(module, params, client):
+ url = "v2.0/vpc/peerings"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_peering_connect): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def async_wait_create(config, result, client, timeout):
+ module = config.module
+
+ path_parameters = {
+ "peering_id": ["peering", "id"],
+ }
+ data = dict((key, navigate_value(result, path))
+ for key, path in path_parameters.items())
+
+ url = build_path(module, "v2.0/vpc/peerings/{peering_id}", data)
+
+ def _query_status():
+ r = None
+ try:
+ r = client.get(url, timeout=timeout)
+ except HwcClientException:
+ return None, ""
+
+ try:
+ s = navigate_value(r, ["peering", "status"])
+ return r, s
+ except Exception:
+ return None, ""
+
+ try:
+ return wait_to_finish(
+ ["ACTIVE"],
+ ["PENDING_ACCEPTANCE"],
+ _query_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_peering_connect): error "
+ "waiting for api(create) to "
+ "be done, error= %s" % str(ex))
+
+
+def build_update_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["description"], None)
+ if not is_empty_value(v):
+ params["description"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ if not params:
+ return params
+
+ params = {"peering": params}
+
+ return params
+
+
+def send_update_request(module, params, client):
+ url = build_path(module, "v2.0/vpc/peerings/{id}")
+
+ try:
+ r = client.put(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_peering_connect): error running "
+ "api(update), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_delete_request(module, params, client):
+ url = build_path(module, "v2.0/vpc/peerings/{id}")
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_peering_connect): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_read_request(module, client):
+ url = build_path(module, "v2.0/vpc/peerings/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_peering_connect): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["peering"], None)
+
+
+def fill_read_resp_body(body):
+ result = dict()
+
+ v = fill_read_resp_accept_vpc_info(body.get("accept_vpc_info"))
+ result["accept_vpc_info"] = v
+
+ result["description"] = body.get("description")
+
+ result["id"] = body.get("id")
+
+ result["name"] = body.get("name")
+
+ v = fill_read_resp_request_vpc_info(body.get("request_vpc_info"))
+ result["request_vpc_info"] = v
+
+ result["status"] = body.get("status")
+
+ return result
+
+
+def fill_read_resp_accept_vpc_info(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["tenant_id"] = value.get("tenant_id")
+
+ result["vpc_id"] = value.get("vpc_id")
+
+ return result
+
+
+def fill_read_resp_request_vpc_info(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["tenant_id"] = value.get("tenant_id")
+
+ result["vpc_id"] = value.get("vpc_id")
+
+ return result
+
+
+def update_properties(module, response, array_index, exclude_output=False):
+ r = user_input_parameters(module)
+
+ v = navigate_value(response, ["read", "description"], array_index)
+ r["description"] = v
+
+ v = navigate_value(response, ["read", "request_vpc_info", "vpc_id"],
+ array_index)
+ r["local_vpc_id"] = v
+
+ v = navigate_value(response, ["read", "name"], array_index)
+ r["name"] = v
+
+ v = r.get("peering_vpc")
+ v = flatten_peering_vpc(response, array_index, v, exclude_output)
+ r["peering_vpc"] = v
+
+ return r
+
+
+def flatten_peering_vpc(d, array_index, current_value, exclude_output):
+ result = current_value
+ has_init_value = True
+ if not result:
+ result = dict()
+ has_init_value = False
+
+ v = navigate_value(d, ["read", "accept_vpc_info", "tenant_id"],
+ array_index)
+ result["project_id"] = v
+
+ v = navigate_value(d, ["read", "accept_vpc_info", "vpc_id"], array_index)
+ result["vpc_id"] = v
+
+ if has_init_value:
+ return result
+
+ for v in result.values():
+ if v is not None:
+ return result
+ return current_value
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_peering_connect): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["peerings"], None)
+
+
+def _build_identity_object(all_opts):
+ result = dict()
+
+ v = expand_list_accept_vpc_info(all_opts, None)
+ result["accept_vpc_info"] = v
+
+ v = navigate_value(all_opts, ["description"], None)
+ result["description"] = v
+
+ result["id"] = None
+
+ v = navigate_value(all_opts, ["name"], None)
+ result["name"] = v
+
+ v = expand_list_request_vpc_info(all_opts, None)
+ result["request_vpc_info"] = v
+
+ result["status"] = None
+
+ return result
+
+
+def expand_list_accept_vpc_info(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["peering_vpc", "project_id"], array_index)
+ r["tenant_id"] = v
+
+ v = navigate_value(d, ["peering_vpc", "vpc_id"], array_index)
+ r["vpc_id"] = v
+
+ for v in r.values():
+ if v is not None:
+ return r
+ return None
+
+
+def expand_list_request_vpc_info(d, array_index):
+ r = dict()
+
+ r["tenant_id"] = None
+
+ v = navigate_value(d, ["local_vpc_id"], array_index)
+ r["vpc_id"] = v
+
+ for v in r.values():
+ if v is not None:
+ return r
+ return None
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ v = fill_list_resp_accept_vpc_info(body.get("accept_vpc_info"))
+ result["accept_vpc_info"] = v
+
+ result["description"] = body.get("description")
+
+ result["id"] = body.get("id")
+
+ result["name"] = body.get("name")
+
+ v = fill_list_resp_request_vpc_info(body.get("request_vpc_info"))
+ result["request_vpc_info"] = v
+
+ result["status"] = body.get("status")
+
+ return result
+
+
+def fill_list_resp_accept_vpc_info(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["tenant_id"] = value.get("tenant_id")
+
+ result["vpc_id"] = value.get("vpc_id")
+
+ return result
+
+
+def fill_list_resp_request_vpc_info(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["tenant_id"] = value.get("tenant_id")
+
+ result["vpc_id"] = value.get("vpc_id")
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_port.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_port.py
new file mode 100644
index 00000000..cf0718f5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_port.py
@@ -0,0 +1,1160 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_vpc_port
+description:
+ - vpc port management.
+short_description: Creates a resource of Vpc/Port in Huawei Cloud
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huawei Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ timeouts:
+ description:
+ - The timeouts for each operations.
+ type: dict
+ suboptions:
+ create:
+ description:
+ - The timeouts for create operation.
+ type: str
+ default: '15m'
+ subnet_id:
+ description:
+ - Specifies the ID of the subnet to which the port belongs.
+ type: str
+ required: true
+ admin_state_up:
+ description:
+ - Specifies the administrative state of the port.
+ type: bool
+ required: false
+ allowed_address_pairs:
+ description:
+ - Specifies a set of zero or more allowed address pairs.
+ required: false
+ type: list
+ elements: dict
+ suboptions:
+ ip_address:
+ description:
+ - Specifies the IP address. It cannot set it to 0.0.0.0.
+ Configure an independent security group for the port if a
+ large CIDR block (subnet mask less than 24) is configured
+ for parameter allowed_address_pairs.
+ type: str
+ required: false
+ mac_address:
+ description:
+ - Specifies the MAC address.
+ type: str
+ required: false
+ extra_dhcp_opts:
+ description:
+ - Specifies the extended option of DHCP.
+ type: list
+ elements: dict
+ required: false
+ suboptions:
+ name:
+ description:
+ - Specifies the option name.
+ type: str
+ required: false
+ value:
+ description:
+ - Specifies the option value.
+ type: str
+ required: false
+ ip_address:
+ description:
+ - Specifies the port IP address.
+ type: str
+ required: false
+ name:
+ description:
+ - Specifies the port name. The value can contain no more than 255
+ characters.
+ type: str
+ required: false
+ security_groups:
+ description:
+ - Specifies the ID of the security group.
+ type: list
+ elements: str
+ required: false
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+# create a port
+- name: Create vpc
+ hwc_network_vpc:
+ cidr: "192.168.100.0/24"
+ name: "ansible_network_vpc_test"
+ register: vpc
+- name: Create subnet
+ hwc_vpc_subnet:
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: True
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ register: subnet
+- name: Create a port
+ community.general.hwc_vpc_port:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+'''
+
+RETURN = '''
+ subnet_id:
+ description:
+ - Specifies the ID of the subnet to which the port belongs.
+ type: str
+ returned: success
+ admin_state_up:
+ description:
+ - Specifies the administrative state of the port.
+ type: bool
+ returned: success
+ allowed_address_pairs:
+ description:
+ - Specifies a set of zero or more allowed address pairs.
+ type: list
+ returned: success
+ contains:
+ ip_address:
+ description:
+ - Specifies the IP address. It cannot set it to 0.0.0.0.
+ Configure an independent security group for the port if a
+ large CIDR block (subnet mask less than 24) is configured
+ for parameter allowed_address_pairs.
+ type: str
+ returned: success
+ mac_address:
+ description:
+ - Specifies the MAC address.
+ type: str
+ returned: success
+ extra_dhcp_opts:
+ description:
+ - Specifies the extended option of DHCP.
+ type: list
+ returned: success
+ contains:
+ name:
+ description:
+ - Specifies the option name.
+ type: str
+ returned: success
+ value:
+ description:
+ - Specifies the option value.
+ type: str
+ returned: success
+ ip_address:
+ description:
+ - Specifies the port IP address.
+ type: str
+ returned: success
+ name:
+ description:
+ - Specifies the port name. The value can contain no more than 255
+ characters.
+ type: str
+ returned: success
+ security_groups:
+ description:
+ - Specifies the ID of the security group.
+ type: list
+ returned: success
+ mac_address:
+ description:
+ - Specifies the port MAC address.
+ type: str
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcClientException404, HwcModule,
+ are_different_dicts, build_path, get_region, is_empty_value,
+ navigate_value, wait_to_finish)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ timeouts=dict(type='dict', options=dict(
+ create=dict(default='15m', type='str'),
+ ), default=dict()),
+ subnet_id=dict(type='str', required=True),
+ admin_state_up=dict(type='bool'),
+ allowed_address_pairs=dict(
+ type='list', elements='dict',
+ options=dict(
+ ip_address=dict(type='str'),
+ mac_address=dict(type='str')
+ ),
+ ),
+ extra_dhcp_opts=dict(type='list', elements='dict', options=dict(
+ name=dict(type='str'),
+ value=dict(type='str')
+ )),
+ ip_address=dict(type='str'),
+ name=dict(type='str'),
+ security_groups=dict(type='list', elements='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "vpc")
+
+ try:
+ resource = None
+ if module.params['id']:
+ resource = True
+ else:
+ v = search_resource(config)
+ if len(v) > 1:
+ raise Exception("Found more than one resource(%s)" % ", ".join([
+ navigate_value(i, ["id"]) for i in v]))
+
+ if len(v) == 1:
+ resource = v[0]
+ module.params['id'] = navigate_value(resource, ["id"])
+
+ result = {}
+ changed = False
+ if module.params['state'] == 'present':
+ if resource is None:
+ if not module.check_mode:
+ create(config)
+ changed = True
+
+ current = read_resource(config, exclude_output=True)
+ expect = user_input_parameters(module)
+ if are_different_dicts(expect, current):
+ if not module.check_mode:
+ update(config)
+ changed = True
+
+ result = read_resource(config)
+ result['id'] = module.params.get('id')
+ else:
+ if resource:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def user_input_parameters(module):
+ return {
+ "admin_state_up": module.params.get("admin_state_up"),
+ "allowed_address_pairs": module.params.get("allowed_address_pairs"),
+ "extra_dhcp_opts": module.params.get("extra_dhcp_opts"),
+ "ip_address": module.params.get("ip_address"),
+ "name": module.params.get("name"),
+ "security_groups": module.params.get("security_groups"),
+ "subnet_id": module.params.get("subnet_id"),
+ }
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ opts = user_input_parameters(module)
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+ obj = async_wait_create(config, r, client, timeout)
+ module.params['id'] = navigate_value(obj, ["port", "id"])
+
+
+def update(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+
+ params = build_update_parameters(opts)
+ if params:
+ send_update_request(module, params, client)
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ send_delete_request(module, None, client)
+
+ url = build_path(module, "ports/{id}")
+
+ def _refresh_status():
+ try:
+ client.get(url)
+ except HwcClientException404:
+ return True, "Done"
+
+ except Exception:
+ return None, ""
+
+ return True, "Pending"
+
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ try:
+ wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_port): error "
+ "waiting for api(delete) to "
+ "be done, error= %s" % str(ex))
+
+
+def read_resource(config, exclude_output=False):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ res["read"] = fill_read_resp_body(r)
+
+ array_index = {
+ "read.fixed_ips": 0,
+ }
+
+ return update_properties(module, res, array_index, exclude_output)
+
+
+def _build_query_link(opts):
+ query_params = []
+
+ v = navigate_value(opts, ["subnet_id"])
+ if v:
+ query_params.append("network_id=" + str(v))
+
+ v = navigate_value(opts, ["name"])
+ if v:
+ query_params.append("name=" + str(v))
+
+ v = navigate_value(opts, ["admin_state_up"])
+ if v:
+ query_params.append("admin_state_up=" + str(v))
+
+ query_link = "?marker={marker}&limit=10"
+ if query_params:
+ query_link += "&" + "&".join(query_params)
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+ identity_obj = _build_identity_object(opts)
+ query_link = _build_query_link(opts)
+ link = "ports" + query_link
+
+ result = []
+ p = {'marker': ''}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ item = fill_list_resp_body(item)
+ if not are_different_dicts(identity_obj, item):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['marker'] = r[-1].get('id')
+
+ return result
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["admin_state_up"], None)
+ if not is_empty_value(v):
+ params["admin_state_up"] = v
+
+ v = expand_create_allowed_address_pairs(opts, None)
+ if not is_empty_value(v):
+ params["allowed_address_pairs"] = v
+
+ v = expand_create_extra_dhcp_opts(opts, None)
+ if not is_empty_value(v):
+ params["extra_dhcp_opts"] = v
+
+ v = expand_create_fixed_ips(opts, None)
+ if not is_empty_value(v):
+ params["fixed_ips"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ v = navigate_value(opts, ["subnet_id"], None)
+ if not is_empty_value(v):
+ params["network_id"] = v
+
+ v = navigate_value(opts, ["security_groups"], None)
+ if not is_empty_value(v):
+ params["security_groups"] = v
+
+ if not params:
+ return params
+
+ params = {"port": params}
+
+ return params
+
+
+def expand_create_allowed_address_pairs(d, array_index):
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ req = []
+
+ v = navigate_value(d, ["allowed_address_pairs"],
+ new_array_index)
+ if not v:
+ return req
+ n = len(v)
+ for i in range(n):
+ new_array_index["allowed_address_pairs"] = i
+ transformed = dict()
+
+ v = navigate_value(d, ["allowed_address_pairs", "ip_address"],
+ new_array_index)
+ if not is_empty_value(v):
+ transformed["ip_address"] = v
+
+ v = navigate_value(d, ["allowed_address_pairs", "mac_address"],
+ new_array_index)
+ if not is_empty_value(v):
+ transformed["mac_address"] = v
+
+ if transformed:
+ req.append(transformed)
+
+ return req
+
+
+def expand_create_extra_dhcp_opts(d, array_index):
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ req = []
+
+ v = navigate_value(d, ["extra_dhcp_opts"],
+ new_array_index)
+ if not v:
+ return req
+ n = len(v)
+ for i in range(n):
+ new_array_index["extra_dhcp_opts"] = i
+ transformed = dict()
+
+ v = navigate_value(d, ["extra_dhcp_opts", "name"], new_array_index)
+ if not is_empty_value(v):
+ transformed["opt_name"] = v
+
+ v = navigate_value(d, ["extra_dhcp_opts", "value"], new_array_index)
+ if not is_empty_value(v):
+ transformed["opt_value"] = v
+
+ if transformed:
+ req.append(transformed)
+
+ return req
+
+
+def expand_create_fixed_ips(d, array_index):
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ req = []
+
+ n = 1
+ for i in range(n):
+ transformed = dict()
+
+ v = navigate_value(d, ["ip_address"], new_array_index)
+ if not is_empty_value(v):
+ transformed["ip_address"] = v
+
+ if transformed:
+ req.append(transformed)
+
+ return req
+
+
+def send_create_request(module, params, client):
+ url = "ports"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_port): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def async_wait_create(config, result, client, timeout):
+ module = config.module
+
+ path_parameters = {
+ "port_id": ["port", "id"],
+ }
+ data = dict((key, navigate_value(result, path))
+ for key, path in path_parameters.items())
+
+ url = build_path(module, "ports/{port_id}", data)
+
+ def _query_status():
+ r = None
+ try:
+ r = client.get(url, timeout=timeout)
+ except HwcClientException:
+ return None, ""
+
+ try:
+ s = navigate_value(r, ["port", "status"])
+ return r, s
+ except Exception:
+ return None, ""
+
+ try:
+ return wait_to_finish(
+ ["ACTIVE", "DOWN"],
+ ["BUILD"],
+ _query_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_port): error "
+ "waiting for api(create) to "
+ "be done, error= %s" % str(ex))
+
+
+def build_update_parameters(opts):
+ params = dict()
+
+ v = expand_update_allowed_address_pairs(opts, None)
+ if v is not None:
+ params["allowed_address_pairs"] = v
+
+ v = expand_update_extra_dhcp_opts(opts, None)
+ if v is not None:
+ params["extra_dhcp_opts"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ v = navigate_value(opts, ["security_groups"], None)
+ if not is_empty_value(v):
+ params["security_groups"] = v
+
+ if not params:
+ return params
+
+ params = {"port": params}
+
+ return params
+
+
+def expand_update_allowed_address_pairs(d, array_index):
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ req = []
+
+ v = navigate_value(d, ["allowed_address_pairs"],
+ new_array_index)
+ if not v:
+ return req
+ n = len(v)
+ for i in range(n):
+ new_array_index["allowed_address_pairs"] = i
+ transformed = dict()
+
+ v = navigate_value(d, ["allowed_address_pairs", "ip_address"],
+ new_array_index)
+ if not is_empty_value(v):
+ transformed["ip_address"] = v
+
+ v = navigate_value(d, ["allowed_address_pairs", "mac_address"],
+ new_array_index)
+ if not is_empty_value(v):
+ transformed["mac_address"] = v
+
+ if transformed:
+ req.append(transformed)
+
+ return req
+
+
+def expand_update_extra_dhcp_opts(d, array_index):
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ req = []
+
+ v = navigate_value(d, ["extra_dhcp_opts"],
+ new_array_index)
+ if not v:
+ return req
+ n = len(v)
+ for i in range(n):
+ new_array_index["extra_dhcp_opts"] = i
+ transformed = dict()
+
+ v = navigate_value(d, ["extra_dhcp_opts", "name"], new_array_index)
+ if not is_empty_value(v):
+ transformed["opt_name"] = v
+
+ v = navigate_value(d, ["extra_dhcp_opts", "value"], new_array_index)
+ if not is_empty_value(v):
+ transformed["opt_value"] = v
+
+ if transformed:
+ req.append(transformed)
+
+ return req
+
+
+def send_update_request(module, params, client):
+ url = build_path(module, "ports/{id}")
+
+ try:
+ r = client.put(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_port): error running "
+ "api(update), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_delete_request(module, params, client):
+ url = build_path(module, "ports/{id}")
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_port): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_read_request(module, client):
+ url = build_path(module, "ports/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_port): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["port"], None)
+
+
+def fill_read_resp_body(body):
+ result = dict()
+
+ result["admin_state_up"] = body.get("admin_state_up")
+
+ v = fill_read_resp_allowed_address_pairs(body.get("allowed_address_pairs"))
+ result["allowed_address_pairs"] = v
+
+ result["binding_host_id"] = body.get("binding_host_id")
+
+ result["binding_vnic_type"] = body.get("binding_vnic_type")
+
+ result["device_id"] = body.get("device_id")
+
+ result["device_owner"] = body.get("device_owner")
+
+ result["dns_name"] = body.get("dns_name")
+
+ v = fill_read_resp_extra_dhcp_opts(body.get("extra_dhcp_opts"))
+ result["extra_dhcp_opts"] = v
+
+ v = fill_read_resp_fixed_ips(body.get("fixed_ips"))
+ result["fixed_ips"] = v
+
+ result["id"] = body.get("id")
+
+ result["mac_address"] = body.get("mac_address")
+
+ result["name"] = body.get("name")
+
+ result["network_id"] = body.get("network_id")
+
+ result["security_groups"] = body.get("security_groups")
+
+ result["status"] = body.get("status")
+
+ result["tenant_id"] = body.get("tenant_id")
+
+ return result
+
+
+def fill_read_resp_allowed_address_pairs(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["ip_address"] = item.get("ip_address")
+
+ val["mac_address"] = item.get("mac_address")
+
+ result.append(val)
+
+ return result
+
+
+def fill_read_resp_extra_dhcp_opts(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["opt_name"] = item.get("opt_name")
+
+ val["opt_value"] = item.get("opt_value")
+
+ result.append(val)
+
+ return result
+
+
+def fill_read_resp_fixed_ips(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["ip_address"] = item.get("ip_address")
+
+ result.append(val)
+
+ return result
+
+
+def update_properties(module, response, array_index, exclude_output=False):
+ r = user_input_parameters(module)
+
+ v = navigate_value(response, ["read", "admin_state_up"], array_index)
+ r["admin_state_up"] = v
+
+ v = r.get("allowed_address_pairs")
+ v = flatten_allowed_address_pairs(response, array_index, v, exclude_output)
+ r["allowed_address_pairs"] = v
+
+ v = r.get("extra_dhcp_opts")
+ v = flatten_extra_dhcp_opts(response, array_index, v, exclude_output)
+ r["extra_dhcp_opts"] = v
+
+ v = navigate_value(response, ["read", "fixed_ips", "ip_address"],
+ array_index)
+ r["ip_address"] = v
+
+ if not exclude_output:
+ v = navigate_value(response, ["read", "mac_address"], array_index)
+ r["mac_address"] = v
+
+ v = navigate_value(response, ["read", "name"], array_index)
+ r["name"] = v
+
+ v = navigate_value(response, ["read", "security_groups"], array_index)
+ r["security_groups"] = v
+
+ v = navigate_value(response, ["read", "network_id"], array_index)
+ r["subnet_id"] = v
+
+ return r
+
+
+def flatten_allowed_address_pairs(d, array_index,
+ current_value, exclude_output):
+ n = 0
+ result = current_value
+ has_init_value = True
+ if result:
+ n = len(result)
+ else:
+ has_init_value = False
+ result = []
+ v = navigate_value(d, ["read", "allowed_address_pairs"],
+ array_index)
+ if not v:
+ return current_value
+ n = len(v)
+
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ for i in range(n):
+ new_array_index["read.allowed_address_pairs"] = i
+
+ val = dict()
+ if len(result) >= (i + 1) and result[i]:
+ val = result[i]
+
+ v = navigate_value(d, ["read", "allowed_address_pairs", "ip_address"],
+ new_array_index)
+ val["ip_address"] = v
+
+ v = navigate_value(d, ["read", "allowed_address_pairs", "mac_address"],
+ new_array_index)
+ val["mac_address"] = v
+
+ if len(result) >= (i + 1):
+ result[i] = val
+ else:
+ for v in val.values():
+ if v is not None:
+ result.append(val)
+ break
+
+ return result if (has_init_value or result) else current_value
+
+
+def flatten_extra_dhcp_opts(d, array_index, current_value, exclude_output):
+ n = 0
+ result = current_value
+ has_init_value = True
+ if result:
+ n = len(result)
+ else:
+ has_init_value = False
+ result = []
+ v = navigate_value(d, ["read", "extra_dhcp_opts"],
+ array_index)
+ if not v:
+ return current_value
+ n = len(v)
+
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ for i in range(n):
+ new_array_index["read.extra_dhcp_opts"] = i
+
+ val = dict()
+ if len(result) >= (i + 1) and result[i]:
+ val = result[i]
+
+ v = navigate_value(d, ["read", "extra_dhcp_opts", "opt_name"],
+ new_array_index)
+ val["name"] = v
+
+ v = navigate_value(d, ["read", "extra_dhcp_opts", "opt_value"],
+ new_array_index)
+ val["value"] = v
+
+ if len(result) >= (i + 1):
+ result[i] = val
+ else:
+ for v in val.values():
+ if v is not None:
+ result.append(val)
+ break
+
+ return result if (has_init_value or result) else current_value
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_port): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["ports"], None)
+
+
+def _build_identity_object(all_opts):
+ result = dict()
+
+ v = navigate_value(all_opts, ["admin_state_up"], None)
+ result["admin_state_up"] = v
+
+ v = expand_list_allowed_address_pairs(all_opts, None)
+ result["allowed_address_pairs"] = v
+
+ result["binding_host_id"] = None
+
+ result["binding_vnic_type"] = None
+
+ result["device_id"] = None
+
+ result["device_owner"] = None
+
+ result["dns_name"] = None
+
+ v = expand_list_extra_dhcp_opts(all_opts, None)
+ result["extra_dhcp_opts"] = v
+
+ v = expand_list_fixed_ips(all_opts, None)
+ result["fixed_ips"] = v
+
+ result["id"] = None
+
+ result["mac_address"] = None
+
+ v = navigate_value(all_opts, ["name"], None)
+ result["name"] = v
+
+ v = navigate_value(all_opts, ["subnet_id"], None)
+ result["network_id"] = v
+
+ v = navigate_value(all_opts, ["security_groups"], None)
+ result["security_groups"] = v
+
+ result["status"] = None
+
+ result["tenant_id"] = None
+
+ return result
+
+
+def expand_list_allowed_address_pairs(d, array_index):
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ req = []
+
+ v = navigate_value(d, ["allowed_address_pairs"],
+ new_array_index)
+
+ n = len(v) if v else 1
+ for i in range(n):
+ new_array_index["allowed_address_pairs"] = i
+ transformed = dict()
+
+ v = navigate_value(d, ["allowed_address_pairs", "ip_address"],
+ new_array_index)
+ transformed["ip_address"] = v
+
+ v = navigate_value(d, ["allowed_address_pairs", "mac_address"],
+ new_array_index)
+ transformed["mac_address"] = v
+
+ for v in transformed.values():
+ if v is not None:
+ req.append(transformed)
+ break
+
+ return req if req else None
+
+
+def expand_list_extra_dhcp_opts(d, array_index):
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ req = []
+
+ v = navigate_value(d, ["extra_dhcp_opts"],
+ new_array_index)
+
+ n = len(v) if v else 1
+ for i in range(n):
+ new_array_index["extra_dhcp_opts"] = i
+ transformed = dict()
+
+ v = navigate_value(d, ["extra_dhcp_opts", "name"], new_array_index)
+ transformed["opt_name"] = v
+
+ v = navigate_value(d, ["extra_dhcp_opts", "value"], new_array_index)
+ transformed["opt_value"] = v
+
+ for v in transformed.values():
+ if v is not None:
+ req.append(transformed)
+ break
+
+ return req if req else None
+
+
+def expand_list_fixed_ips(d, array_index):
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ req = []
+
+ n = 1
+ for i in range(n):
+ transformed = dict()
+
+ v = navigate_value(d, ["ip_address"], new_array_index)
+ transformed["ip_address"] = v
+
+ for v in transformed.values():
+ if v is not None:
+ req.append(transformed)
+ break
+
+ return req if req else None
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ result["admin_state_up"] = body.get("admin_state_up")
+
+ v = fill_list_resp_allowed_address_pairs(body.get("allowed_address_pairs"))
+ result["allowed_address_pairs"] = v
+
+ result["binding_host_id"] = body.get("binding_host_id")
+
+ result["binding_vnic_type"] = body.get("binding_vnic_type")
+
+ result["device_id"] = body.get("device_id")
+
+ result["device_owner"] = body.get("device_owner")
+
+ result["dns_name"] = body.get("dns_name")
+
+ v = fill_list_resp_extra_dhcp_opts(body.get("extra_dhcp_opts"))
+ result["extra_dhcp_opts"] = v
+
+ v = fill_list_resp_fixed_ips(body.get("fixed_ips"))
+ result["fixed_ips"] = v
+
+ result["id"] = body.get("id")
+
+ result["mac_address"] = body.get("mac_address")
+
+ result["name"] = body.get("name")
+
+ result["network_id"] = body.get("network_id")
+
+ result["security_groups"] = body.get("security_groups")
+
+ result["status"] = body.get("status")
+
+ result["tenant_id"] = body.get("tenant_id")
+
+ return result
+
+
+def fill_list_resp_allowed_address_pairs(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["ip_address"] = item.get("ip_address")
+
+ val["mac_address"] = item.get("mac_address")
+
+ result.append(val)
+
+ return result
+
+
+def fill_list_resp_extra_dhcp_opts(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["opt_name"] = item.get("opt_name")
+
+ val["opt_value"] = item.get("opt_value")
+
+ result.append(val)
+
+ return result
+
+
+def fill_list_resp_fixed_ips(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["ip_address"] = item.get("ip_address")
+
+ result.append(val)
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_private_ip.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_private_ip.py
new file mode 100644
index 00000000..901755f3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_private_ip.py
@@ -0,0 +1,354 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_vpc_private_ip
+description:
+ - vpc private ip management.
+short_description: Creates a resource of Vpc/PrivateIP in Huawei Cloud
+notes:
+ - If I(id) option is provided, it takes precedence over I(subnet_id), I(ip_address) for private ip selection.
+ - I(subnet_id), I(ip_address) are used for private ip selection. If more than one private ip with this options exists, execution is aborted.
+ - No parameter support updating. If one of option is changed, the module will create a new resource.
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huawei Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ subnet_id:
+ description:
+ - Specifies the ID of the subnet from which IP addresses are
+ assigned. Cannot be changed after creating the private ip.
+ type: str
+ required: true
+ ip_address:
+ description:
+ - Specifies the target IP address. The value can be an available IP
+ address in the subnet. If it is not specified, the system
+ automatically assigns an IP address. Cannot be changed after
+ creating the private ip.
+ type: str
+ required: false
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+# create a private ip
+- name: Create vpc
+ hwc_network_vpc:
+ cidr: "192.168.100.0/24"
+ name: "ansible_network_vpc_test"
+ register: vpc
+- name: Create subnet
+ hwc_vpc_subnet:
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: True
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ register: subnet
+- name: Create a private ip
+ community.general.hwc_vpc_private_ip:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+'''
+
+RETURN = '''
+ subnet_id:
+ description:
+ - Specifies the ID of the subnet from which IP addresses are
+ assigned.
+ type: str
+ returned: success
+ ip_address:
+ description:
+ - Specifies the target IP address. The value can be an available IP
+ address in the subnet. If it is not specified, the system
+ automatically assigns an IP address.
+ type: str
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcModule, are_different_dicts, build_path,
+ get_region, is_empty_value, navigate_value)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ subnet_id=dict(type='str', required=True),
+ ip_address=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "vpc")
+
+ try:
+ resource = None
+ if module.params['id']:
+ resource = True
+ else:
+ v = search_resource(config)
+ if len(v) > 1:
+ raise Exception("Found more than one resource(%s)" % ", ".join([
+ navigate_value(i, ["id"]) for i in v]))
+
+ if len(v) == 1:
+ resource = v[0]
+ module.params['id'] = navigate_value(resource, ["id"])
+
+ result = {}
+ changed = False
+ if module.params['state'] == 'present':
+ if resource is None:
+ if not module.check_mode:
+ create(config)
+ changed = True
+
+ current = read_resource(config, exclude_output=True)
+ expect = user_input_parameters(module)
+ if are_different_dicts(expect, current):
+ raise Exception(
+ "Cannot change option from (%s) to (%s)of an"
+ " existing resource.(%s)" % (current, expect, module.params.get('id')))
+
+ result = read_resource(config)
+ result['id'] = module.params.get('id')
+ else:
+ if resource:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def user_input_parameters(module):
+ return {
+ "ip_address": module.params.get("ip_address"),
+ "subnet_id": module.params.get("subnet_id"),
+ }
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+ module.params['id'] = navigate_value(r, ["privateips", "id"],
+ {"privateips": 0})
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ send_delete_request(module, None, client)
+
+
+def read_resource(config, exclude_output=False):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ res["read"] = fill_read_resp_body(r)
+
+ return update_properties(module, res, None, exclude_output)
+
+
+def _build_query_link(opts):
+ query_link = "?marker={marker}&limit=10"
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+ identity_obj = _build_identity_object(opts)
+ query_link = _build_query_link(opts)
+ link = build_path(module, "subnets/{subnet_id}/privateips") + query_link
+
+ result = []
+ p = {'marker': ''}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ item = fill_list_resp_body(item)
+ if not are_different_dicts(identity_obj, item):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['marker'] = r[-1].get('id')
+
+ return result
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["ip_address"], None)
+ if not is_empty_value(v):
+ params["ip_address"] = v
+
+ v = navigate_value(opts, ["subnet_id"], None)
+ if not is_empty_value(v):
+ params["subnet_id"] = v
+
+ if not params:
+ return params
+
+ params = {"privateips": [params]}
+
+ return params
+
+
+def send_create_request(module, params, client):
+ url = "privateips"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_private_ip): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_delete_request(module, params, client):
+ url = build_path(module, "privateips/{id}")
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_private_ip): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_read_request(module, client):
+ url = build_path(module, "privateips/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_private_ip): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["privateip"], None)
+
+
+def fill_read_resp_body(body):
+ result = dict()
+
+ result["id"] = body.get("id")
+
+ result["ip_address"] = body.get("ip_address")
+
+ result["subnet_id"] = body.get("subnet_id")
+
+ return result
+
+
+def update_properties(module, response, array_index, exclude_output=False):
+ r = user_input_parameters(module)
+
+ v = navigate_value(response, ["read", "ip_address"], array_index)
+ r["ip_address"] = v
+
+ v = navigate_value(response, ["read", "subnet_id"], array_index)
+ r["subnet_id"] = v
+
+ return r
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_private_ip): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["privateips"], None)
+
+
+def _build_identity_object(all_opts):
+ result = dict()
+
+ result["id"] = None
+
+ v = navigate_value(all_opts, ["ip_address"], None)
+ result["ip_address"] = v
+
+ v = navigate_value(all_opts, ["subnet_id"], None)
+ result["subnet_id"] = v
+
+ return result
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ result["id"] = body.get("id")
+
+ result["ip_address"] = body.get("ip_address")
+
+ result["subnet_id"] = body.get("subnet_id")
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_route.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_route.py
new file mode 100644
index 00000000..31829dc6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_route.py
@@ -0,0 +1,437 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_vpc_route
+description:
+ - vpc route management.
+short_description: Creates a resource of Vpc/Route in Huawei Cloud
+notes:
+ - If I(id) option is provided, it takes precedence over I(destination), I(vpc_id), I(type) and I(next_hop) for route selection.
+ - I(destination), I(vpc_id), I(type) and I(next_hop) are used for route selection. If more than one route with this options exists, execution is aborted.
+ - No parameter support updating. If one of option is changed, the module will create a new resource.
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huawei Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ destination:
+ description:
+ - Specifies the destination IP address or CIDR block.
+ type: str
+ required: true
+ next_hop:
+ description:
+ - Specifies the next hop. The value is VPC peering connection ID.
+ type: str
+ required: true
+ vpc_id:
+ description:
+ - Specifies the VPC ID to which route is added.
+ type: str
+ required: true
+ type:
+ description:
+ - Specifies the type of route.
+ type: str
+ required: false
+ default: 'peering'
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+# create a peering connect
+- name: Create a local vpc
+ hwc_network_vpc:
+ cidr: "192.168.0.0/16"
+ name: "ansible_network_vpc_test_local"
+ register: vpc1
+- name: Create a peering vpc
+ hwc_network_vpc:
+ cidr: "192.168.0.0/16"
+ name: "ansible_network_vpc_test_peering"
+ register: vpc2
+- name: Create a peering connect
+ hwc_vpc_peering_connect:
+ local_vpc_id: "{{ vpc1.id }}"
+ name: "ansible_network_peering_test"
+ filters:
+ - "name"
+ peering_vpc:
+ vpc_id: "{{ vpc2.id }}"
+ register: connect
+- name: Create a route
+ community.general.hwc_vpc_route:
+ vpc_id: "{{ vpc1.id }}"
+ destination: "192.168.0.0/16"
+ next_hop: "{{ connect.id }}"
+'''
+
+RETURN = '''
+ id:
+ description:
+ - UUID of the route.
+ type: str
+ returned: success
+ destination:
+ description:
+ - Specifies the destination IP address or CIDR block.
+ type: str
+ returned: success
+ next_hop:
+ description:
+ - Specifies the next hop. The value is VPC peering connection ID.
+ type: str
+ returned: success
+ vpc_id:
+ description:
+ - Specifies the VPC ID to which route is added.
+ type: str
+ returned: success
+ type:
+ description:
+ - Specifies the type of route.
+ type: str
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcModule, are_different_dicts, build_path,
+ get_region, is_empty_value, navigate_value)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ destination=dict(type='str', required=True),
+ next_hop=dict(type='str', required=True),
+ vpc_id=dict(type='str', required=True),
+ type=dict(type='str', default='peering'),
+ id=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "vpc")
+
+ try:
+ resource = None
+ if module.params.get("id"):
+ resource = get_resource_by_id(config)
+ if module.params['state'] == 'present':
+ opts = user_input_parameters(module)
+ if are_different_dicts(resource, opts):
+ raise Exception(
+ "Cannot change option from (%s) to (%s) for an"
+ " existing route.(%s)" % (resource, opts,
+ config.module.params.get(
+ 'id')))
+ else:
+ v = search_resource(config)
+ if len(v) > 1:
+ raise Exception("Found more than one resource(%s)" % ", ".join([
+ navigate_value(i, ["id"]) for i in v]))
+
+ if len(v) == 1:
+ resource = update_properties(module, {"read": v[0]}, None)
+ module.params['id'] = navigate_value(resource, ["id"])
+
+ result = {}
+ changed = False
+ if module.params['state'] == 'present':
+ if resource is None:
+ if not module.check_mode:
+ resource = create(config)
+ changed = True
+
+ result = resource
+ else:
+ if resource:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def user_input_parameters(module):
+ return {
+ "destination": module.params.get("destination"),
+ "next_hop": module.params.get("next_hop"),
+ "type": module.params.get("type"),
+ "vpc_id": module.params.get("vpc_id"),
+ "id": module.params.get("id"),
+ }
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "network", "project")
+ opts = user_input_parameters(module)
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+ module.params['id'] = navigate_value(r, ["route", "id"])
+
+ result = update_properties(module, {"read": fill_resp_body(r)}, None)
+ return result
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "network", "project")
+
+ send_delete_request(module, None, client)
+
+
+def get_resource_by_id(config, exclude_output=False):
+ module = config.module
+ client = config.client(get_region(module), "network", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ res["read"] = fill_resp_body(r)
+
+ result = update_properties(module, res, None, exclude_output)
+ return result
+
+
+def _build_query_link(opts):
+ query_params = []
+
+ v = navigate_value(opts, ["type"])
+ if v:
+ query_params.append("type=" + str(v))
+
+ v = navigate_value(opts, ["destination"])
+ if v:
+ query_params.append("destination=" + str(v))
+
+ v = navigate_value(opts, ["vpc_id"])
+ if v:
+ query_params.append("vpc_id=" + str(v))
+
+ query_link = "?marker={marker}&limit=10"
+ if query_params:
+ query_link += "&" + "&".join(query_params)
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "network", "project")
+ opts = user_input_parameters(module)
+ identity_obj = _build_identity_object(opts)
+ query_link = _build_query_link(opts)
+ link = "v2.0/vpc/routes" + query_link
+
+ result = []
+ p = {'marker': ''}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ item = fill_list_resp_body(item)
+ if not are_different_dicts(identity_obj, item):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['marker'] = r[-1].get('id')
+
+ return result
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["destination"], None)
+ if not is_empty_value(v):
+ params["destination"] = v
+
+ v = navigate_value(opts, ["next_hop"], None)
+ if not is_empty_value(v):
+ params["nexthop"] = v
+
+ v = navigate_value(opts, ["type"], None)
+ if not is_empty_value(v):
+ params["type"] = v
+
+ v = navigate_value(opts, ["vpc_id"], None)
+ if not is_empty_value(v):
+ params["vpc_id"] = v
+
+ if not params:
+ return params
+
+ params = {"route": params}
+
+ return params
+
+
+def send_create_request(module, params, client):
+ url = "v2.0/vpc/routes"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_route): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_delete_request(module, params, client):
+ url = build_path(module, "v2.0/vpc/routes/{id}")
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_route): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_read_request(module, client):
+ url = build_path(module, "v2.0/vpc/routes/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_route): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["route"], None)
+
+
+def fill_resp_body(body):
+ result = dict()
+
+ result["destination"] = body.get("destination")
+
+ result["id"] = body.get("id")
+
+ result["nexthop"] = body.get("nexthop")
+
+ result["type"] = body.get("type")
+
+ result["vpc_id"] = body.get("vpc_id")
+
+ return result
+
+
+def update_properties(module, response, array_index, exclude_output=False):
+ r = user_input_parameters(module)
+
+ v = navigate_value(response, ["read", "destination"], array_index)
+ r["destination"] = v
+
+ v = navigate_value(response, ["read", "nexthop"], array_index)
+ r["next_hop"] = v
+
+ v = navigate_value(response, ["read", "type"], array_index)
+ r["type"] = v
+
+ v = navigate_value(response, ["read", "vpc_id"], array_index)
+ r["vpc_id"] = v
+
+ v = navigate_value(response, ["read", "id"], array_index)
+ r["id"] = v
+
+ return r
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_route): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["routes"], None)
+
+
+def _build_identity_object(all_opts):
+ result = dict()
+
+ v = navigate_value(all_opts, ["destination"], None)
+ result["destination"] = v
+
+ v = navigate_value(all_opts, ["id"], None)
+ result["id"] = v
+
+ v = navigate_value(all_opts, ["next_hop"], None)
+ result["nexthop"] = v
+
+ v = navigate_value(all_opts, ["type"], None)
+ result["type"] = v
+
+ v = navigate_value(all_opts, ["vpc_id"], None)
+ result["vpc_id"] = v
+
+ return result
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ result["destination"] = body.get("destination")
+
+ result["id"] = body.get("id")
+
+ result["nexthop"] = body.get("nexthop")
+
+ result["type"] = body.get("type")
+
+ result["vpc_id"] = body.get("vpc_id")
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_security_group.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_security_group.py
new file mode 100644
index 00000000..60351815
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_security_group.py
@@ -0,0 +1,645 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_vpc_security_group
+description:
+ - vpc security group management.
+short_description: Creates a resource of Vpc/SecurityGroup in Huawei Cloud
+notes:
+ - If I(id) option is provided, it takes precedence over I(name),
+ I(enterprise_project_id) and I(vpc_id) for security group selection.
+ - I(name), I(enterprise_project_id) and I(vpc_id) are used for security
+ group selection. If more than one security group with this options exists,
+ execution is aborted.
+ - No parameter support updating. If one of option is changed, the module
+ will create a new resource.
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huawei Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ name:
+ description:
+ - Specifies the security group name. The value is a string of 1 to
+ 64 characters that can contain letters, digits, underscores C(_),
+ hyphens (-), and periods (.).
+ type: str
+ required: true
+ enterprise_project_id:
+ description:
+ - Specifies the enterprise project ID. When creating a security
+ group, associate the enterprise project ID with the security
+ group.s
+ type: str
+ required: false
+ default: 0
+ vpc_id:
+ description:
+ - Specifies the resource ID of the VPC to which the security group
+ belongs.
+ type: str
+ required: false
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+# create a security group
+- name: Create a security group
+ community.general.hwc_vpc_security_group:
+ name: "ansible_network_security_group_test"
+'''
+
+RETURN = '''
+ name:
+ description:
+ - Specifies the security group name. The value is a string of 1 to
+ 64 characters that can contain letters, digits, underscores C(_),
+ hyphens (-), and periods (.).
+ type: str
+ returned: success
+ enterprise_project_id:
+ description:
+ - Specifies the enterprise project ID. When creating a security
+ group, associate the enterprise project ID with the security
+ group.
+ type: str
+ returned: success
+ vpc_id:
+ description:
+ - Specifies the resource ID of the VPC to which the security group
+ belongs.
+ type: str
+ returned: success
+ rules:
+ description:
+ - Specifies the security group rule, which ensures that resources
+ in the security group can communicate with one another.
+ type: complex
+ returned: success
+ contains:
+ description:
+ description:
+ - Provides supplementary information about the security
+ group rule.
+ type: str
+ returned: success
+ direction:
+ description:
+ - Specifies the direction of access control. The value can
+ be egress or ingress.
+ type: str
+ returned: success
+ ethertype:
+ description:
+ - Specifies the IP protocol version. The value can be IPv4
+ or IPv6.
+ type: str
+ returned: success
+ id:
+ description:
+ - Specifies the security group rule ID.
+ type: str
+ returned: success
+ port_range_max:
+ description:
+ - Specifies the end port number. The value ranges from 1 to
+ 65535. If the protocol is not icmp, the value cannot be
+ smaller than the port_range_min value. An empty value
+ indicates all ports.
+ type: int
+ returned: success
+ port_range_min:
+ description:
+ - Specifies the start port number. The value ranges from 1
+ to 65535. The value cannot be greater than the
+ port_range_max value. An empty value indicates all ports.
+ type: int
+ returned: success
+ protocol:
+ description:
+ - Specifies the protocol type. The value can be icmp, tcp,
+ udp, or others. If the parameter is left blank, the
+ security group supports all protocols.
+ type: str
+ returned: success
+ remote_address_group_id:
+ description:
+ - Specifies the ID of remote IP address group.
+ type: str
+ returned: success
+ remote_group_id:
+ description:
+ - Specifies the ID of the peer security group.
+ type: str
+ returned: success
+ remote_ip_prefix:
+ description:
+ - Specifies the remote IP address. If the access control
+ direction is set to egress, the parameter specifies the
+ source IP address. If the access control direction is set
+ to ingress, the parameter specifies the destination IP
+ address.
+ type: str
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcModule, are_different_dicts, build_path,
+ get_region, is_empty_value, navigate_value)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ name=dict(type='str', required=True),
+ enterprise_project_id=dict(type='str'),
+ vpc_id=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "vpc")
+
+ try:
+ resource = None
+ if module.params.get("id"):
+ resource = read_resource(config)
+ if module.params['state'] == 'present':
+ check_resource_option(resource, module)
+ else:
+ v = search_resource(config)
+ if len(v) > 1:
+ raise Exception("Found more than one resource(%s)" % ", ".join([
+ navigate_value(i, ["id"]) for i in v]))
+
+ if len(v) == 1:
+ resource = update_properties(module, {"read": v[0]}, None)
+ module.params['id'] = navigate_value(resource, ["id"])
+
+ result = {}
+ changed = False
+ if module.params['state'] == 'present':
+ if resource is None:
+ if not module.check_mode:
+ resource = create(config)
+ changed = True
+
+ result = resource
+ else:
+ if resource:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def user_input_parameters(module):
+ return {
+ "enterprise_project_id": module.params.get("enterprise_project_id"),
+ "name": module.params.get("name"),
+ "vpc_id": module.params.get("vpc_id"),
+ "id": module.params.get("id"),
+ }
+
+
+def check_resource_option(resource, module):
+ opts = user_input_parameters(module)
+
+ resource = {
+ "enterprise_project_id": resource.get("enterprise_project_id"),
+ "name": resource.get("name"),
+ "vpc_id": resource.get("vpc_id"),
+ "id": resource.get("id"),
+ }
+
+ if are_different_dicts(resource, opts):
+ raise Exception(
+ "Cannot change option from (%s) to (%s) for an"
+ " existing security group(%s)." % (resource, opts,
+ module.params.get('id')))
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+ module.params['id'] = navigate_value(r, ["security_group", "id"])
+
+ result = update_properties(module, {"read": fill_read_resp_body(r)}, None)
+ return result
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ send_delete_request(module, None, client)
+
+
+def read_resource(config, exclude_output=False):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ res["read"] = fill_read_resp_body(r)
+
+ return update_properties(module, res, None, exclude_output)
+
+
+def _build_query_link(opts):
+ query_params = []
+
+ v = navigate_value(opts, ["enterprise_project_id"])
+ if v:
+ query_params.append("enterprise_project_id=" + str(v))
+
+ v = navigate_value(opts, ["vpc_id"])
+ if v:
+ query_params.append("vpc_id=" + str(v))
+
+ query_link = "?marker={marker}&limit=10"
+ if query_params:
+ query_link += "&" + "&".join(query_params)
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+ identity_obj = _build_identity_object(opts)
+ query_link = _build_query_link(opts)
+ link = "security-groups" + query_link
+
+ result = []
+ p = {'marker': ''}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ item = fill_list_resp_body(item)
+ if not are_different_dicts(identity_obj, item):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['marker'] = r[-1].get('id')
+
+ return result
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["enterprise_project_id"], None)
+ if not is_empty_value(v):
+ params["enterprise_project_id"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ v = navigate_value(opts, ["vpc_id"], None)
+ if not is_empty_value(v):
+ params["vpc_id"] = v
+
+ if not params:
+ return params
+
+ params = {"security_group": params}
+
+ return params
+
+
+def send_create_request(module, params, client):
+ url = "security-groups"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_security_group): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_delete_request(module, params, client):
+ url = build_path(module, "security-groups/{id}")
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_security_group): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_read_request(module, client):
+ url = build_path(module, "security-groups/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_security_group): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["security_group"], None)
+
+
+def fill_read_resp_body(body):
+ result = dict()
+
+ result["enterprise_project_id"] = body.get("enterprise_project_id")
+
+ result["id"] = body.get("id")
+
+ result["name"] = body.get("name")
+
+ v = fill_read_resp_security_group_rules(body.get("security_group_rules"))
+ result["security_group_rules"] = v
+
+ result["vpc_id"] = body.get("vpc_id")
+
+ return result
+
+
+def fill_read_resp_security_group_rules(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["description"] = item.get("description")
+
+ val["direction"] = item.get("direction")
+
+ val["ethertype"] = item.get("ethertype")
+
+ val["id"] = item.get("id")
+
+ val["port_range_max"] = item.get("port_range_max")
+
+ val["port_range_min"] = item.get("port_range_min")
+
+ val["protocol"] = item.get("protocol")
+
+ val["remote_address_group_id"] = item.get("remote_address_group_id")
+
+ val["remote_group_id"] = item.get("remote_group_id")
+
+ val["remote_ip_prefix"] = item.get("remote_ip_prefix")
+
+ val["security_group_id"] = item.get("security_group_id")
+
+ result.append(val)
+
+ return result
+
+
+def update_properties(module, response, array_index, exclude_output=False):
+ r = user_input_parameters(module)
+
+ v = navigate_value(response, ["read", "enterprise_project_id"],
+ array_index)
+ r["enterprise_project_id"] = v
+
+ v = navigate_value(response, ["read", "name"], array_index)
+ r["name"] = v
+
+ if not exclude_output:
+ v = r.get("rules")
+ v = flatten_rules(response, array_index, v, exclude_output)
+ r["rules"] = v
+
+ v = navigate_value(response, ["read", "vpc_id"], array_index)
+ r["vpc_id"] = v
+
+ v = navigate_value(response, ["read", "id"], array_index)
+ r["id"] = v
+
+ return r
+
+
+def flatten_rules(d, array_index, current_value, exclude_output):
+ n = 0
+ result = current_value
+ has_init_value = True
+ if result:
+ n = len(result)
+ else:
+ has_init_value = False
+ result = []
+ v = navigate_value(d, ["read", "security_group_rules"],
+ array_index)
+ if not v:
+ return current_value
+ n = len(v)
+
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ for i in range(n):
+ new_array_index["read.security_group_rules"] = i
+
+ val = dict()
+ if len(result) >= (i + 1) and result[i]:
+ val = result[i]
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "description"],
+ new_array_index)
+ val["description"] = v
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "direction"],
+ new_array_index)
+ val["direction"] = v
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "ethertype"],
+ new_array_index)
+ val["ethertype"] = v
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "id"],
+ new_array_index)
+ val["id"] = v
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "port_range_max"],
+ new_array_index)
+ val["port_range_max"] = v
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "port_range_min"],
+ new_array_index)
+ val["port_range_min"] = v
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "protocol"],
+ new_array_index)
+ val["protocol"] = v
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "remote_address_group_id"],
+ new_array_index)
+ val["remote_address_group_id"] = v
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "remote_group_id"],
+ new_array_index)
+ val["remote_group_id"] = v
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "remote_ip_prefix"],
+ new_array_index)
+ val["remote_ip_prefix"] = v
+
+ if len(result) >= (i + 1):
+ result[i] = val
+ else:
+ for v in val.values():
+ if v is not None:
+ result.append(val)
+ break
+
+ return result if (has_init_value or result) else current_value
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_security_group): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["security_groups"], None)
+
+
+def _build_identity_object(all_opts):
+ result = dict()
+
+ v = navigate_value(all_opts, ["enterprise_project_id"], None)
+ result["enterprise_project_id"] = v
+
+ result["id"] = None
+
+ v = navigate_value(all_opts, ["name"], None)
+ result["name"] = v
+
+ result["security_group_rules"] = None
+
+ v = navigate_value(all_opts, ["vpc_id"], None)
+ result["vpc_id"] = v
+
+ return result
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ result["enterprise_project_id"] = body.get("enterprise_project_id")
+
+ result["id"] = body.get("id")
+
+ result["name"] = body.get("name")
+
+ v = fill_list_resp_security_group_rules(body.get("security_group_rules"))
+ result["security_group_rules"] = v
+
+ result["vpc_id"] = body.get("vpc_id")
+
+ return result
+
+
+def fill_list_resp_security_group_rules(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["description"] = item.get("description")
+
+ val["direction"] = item.get("direction")
+
+ val["ethertype"] = item.get("ethertype")
+
+ val["id"] = item.get("id")
+
+ val["port_range_max"] = item.get("port_range_max")
+
+ val["port_range_min"] = item.get("port_range_min")
+
+ val["protocol"] = item.get("protocol")
+
+ val["remote_address_group_id"] = item.get("remote_address_group_id")
+
+ val["remote_group_id"] = item.get("remote_group_id")
+
+ val["remote_ip_prefix"] = item.get("remote_ip_prefix")
+
+ val["security_group_id"] = item.get("security_group_id")
+
+ result.append(val)
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_security_group_rule.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_security_group_rule.py
new file mode 100644
index 00000000..f92c8276
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_security_group_rule.py
@@ -0,0 +1,570 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_vpc_security_group_rule
+description:
+ - vpc security group management.
+short_description: Creates a resource of Vpc/SecurityGroupRule in Huawei Cloud
+notes:
+ - If I(id) option is provided, it takes precedence over
+ I(enterprise_project_id) for security group rule selection.
+ - I(security_group_id) is used for security group rule selection. If more
+ than one security group rule with this options exists, execution is
+ aborted.
+ - No parameter support updating. If one of option is changed, the module
+ will create a new resource.
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huawei Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ direction:
+ description:
+ - Specifies the direction of access control. The value can be
+ egress or ingress.
+ type: str
+ required: true
+ security_group_id:
+ description:
+ - Specifies the security group rule ID, which uniquely identifies
+ the security group rule.
+ type: str
+ required: true
+ description:
+ description:
+ - Provides supplementary information about the security group rule.
+ The value is a string of no more than 255 characters that can
+ contain letters and digits.
+ type: str
+ required: false
+ ethertype:
+ description:
+ - Specifies the IP protocol version. The value can be IPv4 or IPv6.
+ If you do not set this parameter, IPv4 is used by default.
+ type: str
+ required: false
+ port_range_max:
+ description:
+ - Specifies the end port number. The value ranges from 1 to 65535.
+ If the protocol is not icmp, the value cannot be smaller than the
+ port_range_min value. An empty value indicates all ports.
+ type: int
+ required: false
+ port_range_min:
+ description:
+ - Specifies the start port number. The value ranges from 1 to
+ 65535. The value cannot be greater than the port_range_max value.
+ An empty value indicates all ports.
+ type: int
+ required: false
+ protocol:
+ description:
+ - Specifies the protocol type. The value can be icmp, tcp, or udp.
+ If the parameter is left blank, the security group supports all
+ protocols.
+ type: str
+ required: false
+ remote_group_id:
+ description:
+ - Specifies the ID of the peer security group. The value is
+ exclusive with parameter remote_ip_prefix.
+ type: str
+ required: false
+ remote_ip_prefix:
+ description:
+ - Specifies the remote IP address. If the access control direction
+ is set to egress, the parameter specifies the source IP address.
+ If the access control direction is set to ingress, the parameter
+ specifies the destination IP address. The value can be in the
+ CIDR format or IP addresses. The parameter is exclusive with
+ parameter remote_group_id.
+ type: str
+ required: false
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+# create a security group rule
+- name: Create a security group
+ hwc_vpc_security_group:
+ name: "ansible_network_security_group_test"
+ register: sg
+- name: Create a security group rule
+ community.general.hwc_vpc_security_group_rule:
+ direction: "ingress"
+ protocol: "tcp"
+ ethertype: "IPv4"
+ port_range_max: 22
+ security_group_id: "{{ sg.id }}"
+ port_range_min: 22
+ remote_ip_prefix: "0.0.0.0/0"
+'''
+
+RETURN = '''
+ direction:
+ description:
+ - Specifies the direction of access control. The value can be
+ egress or ingress.
+ type: str
+ returned: success
+ security_group_id:
+ description:
+ - Specifies the security group rule ID, which uniquely identifies
+ the security group rule.
+ type: str
+ returned: success
+ description:
+ description:
+ - Provides supplementary information about the security group rule.
+ The value is a string of no more than 255 characters that can
+ contain letters and digits.
+ type: str
+ returned: success
+ ethertype:
+ description:
+ - Specifies the IP protocol version. The value can be IPv4 or IPv6.
+ If you do not set this parameter, IPv4 is used by default.
+ type: str
+ returned: success
+ port_range_max:
+ description:
+ - Specifies the end port number. The value ranges from 1 to 65535.
+ If the protocol is not icmp, the value cannot be smaller than the
+ port_range_min value. An empty value indicates all ports.
+ type: int
+ returned: success
+ port_range_min:
+ description:
+ - Specifies the start port number. The value ranges from 1 to
+ 65535. The value cannot be greater than the port_range_max value.
+ An empty value indicates all ports.
+ type: int
+ returned: success
+ protocol:
+ description:
+ - Specifies the protocol type. The value can be icmp, tcp, or udp.
+ If the parameter is left blank, the security group supports all
+ protocols.
+ type: str
+ returned: success
+ remote_group_id:
+ description:
+ - Specifies the ID of the peer security group. The value is
+ exclusive with parameter remote_ip_prefix.
+ type: str
+ returned: success
+ remote_ip_prefix:
+ description:
+ - Specifies the remote IP address. If the access control direction
+ is set to egress, the parameter specifies the source IP address.
+ If the access control direction is set to ingress, the parameter
+ specifies the destination IP address. The value can be in the
+ CIDR format or IP addresses. The parameter is exclusive with
+ parameter remote_group_id.
+ type: str
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcModule, are_different_dicts, build_path,
+ get_region, is_empty_value, navigate_value)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ direction=dict(type='str', required=True),
+ security_group_id=dict(type='str', required=True),
+ description=dict(type='str'),
+ ethertype=dict(type='str'),
+ port_range_max=dict(type='int'),
+ port_range_min=dict(type='int'),
+ protocol=dict(type='str'),
+ remote_group_id=dict(type='str'),
+ remote_ip_prefix=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "vpc")
+
+ try:
+ resource = None
+ if module.params['id']:
+ resource = True
+ else:
+ v = search_resource(config)
+ if len(v) > 1:
+ raise Exception("Found more than one resource(%s)" % ", ".join([
+ navigate_value(i, ["id"]) for i in v]))
+
+ if len(v) == 1:
+ resource = v[0]
+ module.params['id'] = navigate_value(resource, ["id"])
+
+ result = {}
+ changed = False
+ if module.params['state'] == 'present':
+ if resource is None:
+ if not module.check_mode:
+ create(config)
+ changed = True
+
+ current = read_resource(config, exclude_output=True)
+ expect = user_input_parameters(module)
+ if are_different_dicts(expect, current):
+ raise Exception(
+ "Cannot change option from (%s) to (%s) for an"
+ " existing security group(%s)." % (current, expect, module.params.get('id')))
+ result = read_resource(config)
+ result['id'] = module.params.get('id')
+ else:
+ if resource:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def user_input_parameters(module):
+ return {
+ "description": module.params.get("description"),
+ "direction": module.params.get("direction"),
+ "ethertype": module.params.get("ethertype"),
+ "port_range_max": module.params.get("port_range_max"),
+ "port_range_min": module.params.get("port_range_min"),
+ "protocol": module.params.get("protocol"),
+ "remote_group_id": module.params.get("remote_group_id"),
+ "remote_ip_prefix": module.params.get("remote_ip_prefix"),
+ "security_group_id": module.params.get("security_group_id"),
+ }
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+ module.params['id'] = navigate_value(r, ["security_group_rule", "id"])
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ send_delete_request(module, None, client)
+
+
+def read_resource(config, exclude_output=False):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ res["read"] = fill_read_resp_body(r)
+
+ return update_properties(module, res, None, exclude_output)
+
+
+def _build_query_link(opts):
+ query_link = "?marker={marker}&limit=10"
+ v = navigate_value(opts, ["security_group_id"])
+ if v:
+ query_link += "&security_group_id=" + str(v)
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+ identity_obj = _build_identity_object(opts)
+ query_link = _build_query_link(opts)
+ link = "security-group-rules" + query_link
+
+ result = []
+ p = {'marker': ''}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ item = fill_list_resp_body(item)
+ if not are_different_dicts(identity_obj, item):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['marker'] = r[-1].get('id')
+
+ return result
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["description"], None)
+ if not is_empty_value(v):
+ params["description"] = v
+
+ v = navigate_value(opts, ["direction"], None)
+ if not is_empty_value(v):
+ params["direction"] = v
+
+ v = navigate_value(opts, ["ethertype"], None)
+ if not is_empty_value(v):
+ params["ethertype"] = v
+
+ v = navigate_value(opts, ["port_range_max"], None)
+ if not is_empty_value(v):
+ params["port_range_max"] = v
+
+ v = navigate_value(opts, ["port_range_min"], None)
+ if not is_empty_value(v):
+ params["port_range_min"] = v
+
+ v = navigate_value(opts, ["protocol"], None)
+ if not is_empty_value(v):
+ params["protocol"] = v
+
+ v = navigate_value(opts, ["remote_group_id"], None)
+ if not is_empty_value(v):
+ params["remote_group_id"] = v
+
+ v = navigate_value(opts, ["remote_ip_prefix"], None)
+ if not is_empty_value(v):
+ params["remote_ip_prefix"] = v
+
+ v = navigate_value(opts, ["security_group_id"], None)
+ if not is_empty_value(v):
+ params["security_group_id"] = v
+
+ if not params:
+ return params
+
+ params = {"security_group_rule": params}
+
+ return params
+
+
+def send_create_request(module, params, client):
+ url = "security-group-rules"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_security_group_rule): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_delete_request(module, params, client):
+ url = build_path(module, "security-group-rules/{id}")
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_security_group_rule): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_read_request(module, client):
+ url = build_path(module, "security-group-rules/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_security_group_rule): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["security_group_rule"], None)
+
+
+def fill_read_resp_body(body):
+ result = dict()
+
+ result["description"] = body.get("description")
+
+ result["direction"] = body.get("direction")
+
+ result["ethertype"] = body.get("ethertype")
+
+ result["id"] = body.get("id")
+
+ result["port_range_max"] = body.get("port_range_max")
+
+ result["port_range_min"] = body.get("port_range_min")
+
+ result["protocol"] = body.get("protocol")
+
+ result["remote_address_group_id"] = body.get("remote_address_group_id")
+
+ result["remote_group_id"] = body.get("remote_group_id")
+
+ result["remote_ip_prefix"] = body.get("remote_ip_prefix")
+
+ result["security_group_id"] = body.get("security_group_id")
+
+ return result
+
+
+def update_properties(module, response, array_index, exclude_output=False):
+ r = user_input_parameters(module)
+
+ v = navigate_value(response, ["read", "description"], array_index)
+ r["description"] = v
+
+ v = navigate_value(response, ["read", "direction"], array_index)
+ r["direction"] = v
+
+ v = navigate_value(response, ["read", "ethertype"], array_index)
+ r["ethertype"] = v
+
+ v = navigate_value(response, ["read", "port_range_max"], array_index)
+ r["port_range_max"] = v
+
+ v = navigate_value(response, ["read", "port_range_min"], array_index)
+ r["port_range_min"] = v
+
+ v = navigate_value(response, ["read", "protocol"], array_index)
+ r["protocol"] = v
+
+ v = navigate_value(response, ["read", "remote_group_id"], array_index)
+ r["remote_group_id"] = v
+
+ v = navigate_value(response, ["read", "remote_ip_prefix"], array_index)
+ r["remote_ip_prefix"] = v
+
+ v = navigate_value(response, ["read", "security_group_id"], array_index)
+ r["security_group_id"] = v
+
+ return r
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_security_group_rule): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["security_group_rules"], None)
+
+
+def _build_identity_object(all_opts):
+ result = dict()
+
+ v = navigate_value(all_opts, ["description"], None)
+ result["description"] = v
+
+ v = navigate_value(all_opts, ["direction"], None)
+ result["direction"] = v
+
+ v = navigate_value(all_opts, ["ethertype"], None)
+ result["ethertype"] = v
+
+ result["id"] = None
+
+ v = navigate_value(all_opts, ["port_range_max"], None)
+ result["port_range_max"] = v
+
+ v = navigate_value(all_opts, ["port_range_min"], None)
+ result["port_range_min"] = v
+
+ v = navigate_value(all_opts, ["protocol"], None)
+ result["protocol"] = v
+
+ result["remote_address_group_id"] = None
+
+ v = navigate_value(all_opts, ["remote_group_id"], None)
+ result["remote_group_id"] = v
+
+ v = navigate_value(all_opts, ["remote_ip_prefix"], None)
+ result["remote_ip_prefix"] = v
+
+ v = navigate_value(all_opts, ["security_group_id"], None)
+ result["security_group_id"] = v
+
+ return result
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ result["description"] = body.get("description")
+
+ result["direction"] = body.get("direction")
+
+ result["ethertype"] = body.get("ethertype")
+
+ result["id"] = body.get("id")
+
+ result["port_range_max"] = body.get("port_range_max")
+
+ result["port_range_min"] = body.get("port_range_min")
+
+ result["protocol"] = body.get("protocol")
+
+ result["remote_address_group_id"] = body.get("remote_address_group_id")
+
+ result["remote_group_id"] = body.get("remote_group_id")
+
+ result["remote_ip_prefix"] = body.get("remote_ip_prefix")
+
+ result["security_group_id"] = body.get("security_group_id")
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_subnet.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_subnet.py
new file mode 100644
index 00000000..ccf18050
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_subnet.py
@@ -0,0 +1,734 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_vpc_subnet
+description:
+ - subnet management.
+short_description: Creates a resource of Vpc/Subnet in Huawei Cloud
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huawei Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ timeouts:
+ description:
+ - The timeouts for each operations.
+ type: dict
+ suboptions:
+ create:
+ description:
+ - The timeouts for create operation.
+ type: str
+ default: '15m'
+ update:
+ description:
+ - The timeouts for update operation.
+ type: str
+ default: '15m'
+ cidr:
+ description:
+ - Specifies the subnet CIDR block. The value must be within the VPC
+ CIDR block and be in CIDR format. The subnet mask cannot be
+ greater than 28. Cannot be changed after creating the subnet.
+ type: str
+ required: true
+ gateway_ip:
+ description:
+ - Specifies the gateway of the subnet. The value must be an IP
+ address in the subnet. Cannot be changed after creating the subnet.
+ type: str
+ required: true
+ name:
+ description:
+ - Specifies the subnet name. The value is a string of 1 to 64
+ characters that can contain letters, digits, underscores C(_),
+ hyphens (-), and periods (.).
+ type: str
+ required: true
+ vpc_id:
+ description:
+ - Specifies the ID of the VPC to which the subnet belongs. Cannot
+ be changed after creating the subnet.
+ type: str
+ required: true
+ availability_zone:
+ description:
+ - Specifies the AZ to which the subnet belongs. Cannot be changed
+ after creating the subnet.
+ type: str
+ required: false
+ dhcp_enable:
+ description:
+ - Specifies whether DHCP is enabled for the subnet. The value can
+ be true (enabled) or false(disabled), and default value is true.
+ If this parameter is set to false, newly created ECSs cannot
+ obtain IP addresses, and usernames and passwords cannot be
+ injected using Cloud-init.
+ type: bool
+ required: false
+ dns_address:
+ description:
+ - Specifies the DNS server addresses for subnet. The address
+ in the head will be used first.
+ type: list
+ elements: str
+ required: false
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+# create subnet
+- name: Create vpc
+ hwc_network_vpc:
+ cidr: "192.168.100.0/24"
+ name: "ansible_network_vpc_test"
+ register: vpc
+- name: Create subnet
+ community.general.hwc_vpc_subnet:
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: True
+'''
+
+RETURN = '''
+ cidr:
+ description:
+ - Specifies the subnet CIDR block. The value must be within the VPC
+ CIDR block and be in CIDR format. The subnet mask cannot be
+ greater than 28.
+ type: str
+ returned: success
+ gateway_ip:
+ description:
+ - Specifies the gateway of the subnet. The value must be an IP
+ address in the subnet.
+ type: str
+ returned: success
+ name:
+ description:
+ - Specifies the subnet name. The value is a string of 1 to 64
+ characters that can contain letters, digits, underscores C(_),
+ hyphens (-), and periods (.).
+ type: str
+ returned: success
+ vpc_id:
+ description:
+ - Specifies the ID of the VPC to which the subnet belongs.
+ type: str
+ returned: success
+ availability_zone:
+ description:
+ - Specifies the AZ to which the subnet belongs.
+ type: str
+ returned: success
+ dhcp_enable:
+ description:
+ - Specifies whether DHCP is enabled for the subnet. The value can
+ be true (enabled) or false(disabled), and default value is true.
+ If this parameter is set to false, newly created ECSs cannot
+ obtain IP addresses, and usernames and passwords cannot be
+ injected using Cloud-init.
+ type: bool
+ returned: success
+ dns_address:
+ description:
+ - Specifies the DNS server addresses for subnet. The address
+ in the head will be used first.
+ type: list
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcClientException404, HwcModule,
+ are_different_dicts, build_path, get_region, is_empty_value,
+ navigate_value, wait_to_finish)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ timeouts=dict(type='dict', options=dict(
+ create=dict(default='15m', type='str'),
+ update=dict(default='15m', type='str'),
+ ), default=dict()),
+ cidr=dict(type='str', required=True),
+ gateway_ip=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ vpc_id=dict(type='str', required=True),
+ availability_zone=dict(type='str'),
+ dhcp_enable=dict(type='bool'),
+ dns_address=dict(type='list', elements='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "vpc")
+
+ try:
+ resource = None
+ if module.params.get('id'):
+ resource = True
+ else:
+ v = search_resource(config)
+ if len(v) > 1:
+ raise Exception("Found more than one resource(%s)" % ", ".join([
+ navigate_value(i, ["id"]) for i in v]))
+
+ if len(v) == 1:
+ resource = v[0]
+ module.params['id'] = navigate_value(resource, ["id"])
+
+ result = {}
+ changed = False
+ if module.params['state'] == 'present':
+ if resource is None:
+ if not module.check_mode:
+ create(config)
+ changed = True
+
+ current = read_resource(config, exclude_output=True)
+ expect = user_input_parameters(module)
+ if are_different_dicts(expect, current):
+ if not module.check_mode:
+ update(config)
+ changed = True
+
+ result = read_resource(config)
+ result['id'] = module.params.get('id')
+ else:
+ if resource:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def user_input_parameters(module):
+ return {
+ "availability_zone": module.params.get("availability_zone"),
+ "cidr": module.params.get("cidr"),
+ "dhcp_enable": module.params.get("dhcp_enable"),
+ "dns_address": module.params.get("dns_address"),
+ "gateway_ip": module.params.get("gateway_ip"),
+ "name": module.params.get("name"),
+ "vpc_id": module.params.get("vpc_id"),
+ }
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ opts = user_input_parameters(module)
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+ obj = async_wait_create(config, r, client, timeout)
+ module.params['id'] = navigate_value(obj, ["subnet", "id"])
+
+
+def update(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ timeout = 60 * int(module.params['timeouts']['update'].rstrip('m'))
+ opts = user_input_parameters(module)
+
+ params = build_update_parameters(opts)
+ if params:
+ r = send_update_request(module, params, client)
+ async_wait_update(config, r, client, timeout)
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ send_delete_request(module, None, client)
+
+ url = build_path(module, "subnets/{id}")
+
+ def _refresh_status():
+ try:
+ client.get(url)
+ except HwcClientException404:
+ return True, "Done"
+
+ except Exception:
+ return None, ""
+
+ return True, "Pending"
+
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ try:
+ wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_subnet): error "
+ "waiting for api(delete) to "
+ "be done, error= %s" % str(ex))
+
+
+def read_resource(config, exclude_output=False):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ res["read"] = fill_read_resp_body(r)
+
+ return update_properties(module, res, None, exclude_output)
+
+
+def _build_query_link(opts):
+ query_link = "?marker={marker}&limit=10"
+ v = navigate_value(opts, ["vpc_id"])
+ if v:
+ query_link += "&vpc_id=" + str(v)
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+ identity_obj = _build_identity_object(opts)
+ query_link = _build_query_link(opts)
+ link = "subnets" + query_link
+
+ result = []
+ p = {'marker': ''}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ item = fill_list_resp_body(item)
+ if not are_different_dicts(identity_obj, item):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['marker'] = r[-1].get('id')
+
+ return result
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["availability_zone"], None)
+ if not is_empty_value(v):
+ params["availability_zone"] = v
+
+ v = navigate_value(opts, ["cidr"], None)
+ if not is_empty_value(v):
+ params["cidr"] = v
+
+ v = navigate_value(opts, ["dhcp_enable"], None)
+ if v is not None:
+ params["dhcp_enable"] = v
+
+ v = expand_create_dns_list(opts, None)
+ if not is_empty_value(v):
+ params["dnsList"] = v
+
+ v = navigate_value(opts, ["gateway_ip"], None)
+ if not is_empty_value(v):
+ params["gateway_ip"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ v = expand_create_primary_dns(opts, None)
+ if not is_empty_value(v):
+ params["primary_dns"] = v
+
+ v = expand_create_secondary_dns(opts, None)
+ if not is_empty_value(v):
+ params["secondary_dns"] = v
+
+ v = navigate_value(opts, ["vpc_id"], None)
+ if not is_empty_value(v):
+ params["vpc_id"] = v
+
+ if not params:
+ return params
+
+ params = {"subnet": params}
+
+ return params
+
+
+def expand_create_dns_list(d, array_index):
+ v = navigate_value(d, ["dns_address"], array_index)
+ return v if (v and len(v) > 2) else []
+
+
+def expand_create_primary_dns(d, array_index):
+ v = navigate_value(d, ["dns_address"], array_index)
+ return v[0] if v else ""
+
+
+def expand_create_secondary_dns(d, array_index):
+ v = navigate_value(d, ["dns_address"], array_index)
+ return v[1] if (v and len(v) > 1) else ""
+
+
+def send_create_request(module, params, client):
+ url = "subnets"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_subnet): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def async_wait_create(config, result, client, timeout):
+ module = config.module
+
+ path_parameters = {
+ "subnet_id": ["subnet", "id"],
+ }
+ data = dict((key, navigate_value(result, path))
+ for key, path in path_parameters.items())
+
+ url = build_path(module, "subnets/{subnet_id}", data)
+
+ def _query_status():
+ r = None
+ try:
+ r = client.get(url, timeout=timeout)
+ except HwcClientException:
+ return None, ""
+
+ try:
+ s = navigate_value(r, ["subnet", "status"])
+ return r, s
+ except Exception:
+ return None, ""
+
+ try:
+ return wait_to_finish(
+ ["ACTIVE"],
+ ["UNKNOWN"],
+ _query_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_subnet): error "
+ "waiting for api(create) to "
+ "be done, error= %s" % str(ex))
+
+
+def build_update_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["dhcp_enable"], None)
+ if v is not None:
+ params["dhcp_enable"] = v
+
+ v = expand_update_dns_list(opts, None)
+ if v is not None:
+ params["dnsList"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ v = expand_update_primary_dns(opts, None)
+ if v is not None:
+ params["primary_dns"] = v
+
+ v = expand_update_secondary_dns(opts, None)
+ if v is not None:
+ params["secondary_dns"] = v
+
+ if not params:
+ return params
+
+ params = {"subnet": params}
+
+ return params
+
+
+def expand_update_dns_list(d, array_index):
+ v = navigate_value(d, ["dns_address"], array_index)
+ if v:
+ if len(v) > 2:
+ return v
+ return None
+ return []
+
+
+def expand_update_primary_dns(d, array_index):
+ v = navigate_value(d, ["dns_address"], array_index)
+ return v[0] if v else ""
+
+
+def expand_update_secondary_dns(d, array_index):
+ v = navigate_value(d, ["dns_address"], array_index)
+ return v[1] if (v and len(v) > 1) else ""
+
+
+def send_update_request(module, params, client):
+ url = build_path(module, "vpcs/{vpc_id}/subnets/{id}")
+
+ try:
+ r = client.put(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_subnet): error running "
+ "api(update), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def async_wait_update(config, result, client, timeout):
+ module = config.module
+
+ path_parameters = {
+ "subnet_id": ["subnet", "id"],
+ }
+ data = dict((key, navigate_value(result, path))
+ for key, path in path_parameters.items())
+
+ url = build_path(module, "subnets/{subnet_id}", data)
+
+ def _query_status():
+ r = None
+ try:
+ r = client.get(url, timeout=timeout)
+ except HwcClientException:
+ return None, ""
+
+ try:
+ s = navigate_value(r, ["subnet", "status"])
+ return r, s
+ except Exception:
+ return None, ""
+
+ try:
+ return wait_to_finish(
+ ["ACTIVE"],
+ ["UNKNOWN"],
+ _query_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_subnet): error "
+ "waiting for api(update) to "
+ "be done, error= %s" % str(ex))
+
+
+def send_delete_request(module, params, client):
+ url = build_path(module, "vpcs/{vpc_id}/subnets/{id}")
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_subnet): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_read_request(module, client):
+ url = build_path(module, "subnets/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_subnet): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["subnet"], None)
+
+
+def fill_read_resp_body(body):
+ result = dict()
+
+ result["availability_zone"] = body.get("availability_zone")
+
+ result["cidr"] = body.get("cidr")
+
+ result["dhcp_enable"] = body.get("dhcp_enable")
+
+ result["dnsList"] = body.get("dnsList")
+
+ result["gateway_ip"] = body.get("gateway_ip")
+
+ result["id"] = body.get("id")
+
+ result["name"] = body.get("name")
+
+ result["neutron_network_id"] = body.get("neutron_network_id")
+
+ result["neutron_subnet_id"] = body.get("neutron_subnet_id")
+
+ result["primary_dns"] = body.get("primary_dns")
+
+ result["secondary_dns"] = body.get("secondary_dns")
+
+ result["status"] = body.get("status")
+
+ result["vpc_id"] = body.get("vpc_id")
+
+ return result
+
+
+def update_properties(module, response, array_index, exclude_output=False):
+ r = user_input_parameters(module)
+
+ v = navigate_value(response, ["read", "availability_zone"], array_index)
+ r["availability_zone"] = v
+
+ v = navigate_value(response, ["read", "cidr"], array_index)
+ r["cidr"] = v
+
+ v = navigate_value(response, ["read", "dhcp_enable"], array_index)
+ r["dhcp_enable"] = v
+
+ v = navigate_value(response, ["read", "dnsList"], array_index)
+ r["dns_address"] = v
+
+ v = navigate_value(response, ["read", "gateway_ip"], array_index)
+ r["gateway_ip"] = v
+
+ v = navigate_value(response, ["read", "name"], array_index)
+ r["name"] = v
+
+ v = navigate_value(response, ["read", "vpc_id"], array_index)
+ r["vpc_id"] = v
+
+ return r
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_subnet): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["subnets"], None)
+
+
+def _build_identity_object(all_opts):
+ result = dict()
+
+ v = navigate_value(all_opts, ["availability_zone"], None)
+ result["availability_zone"] = v
+
+ v = navigate_value(all_opts, ["cidr"], None)
+ result["cidr"] = v
+
+ v = navigate_value(all_opts, ["dhcp_enable"], None)
+ result["dhcp_enable"] = v
+
+ v = navigate_value(all_opts, ["dns_address"], None)
+ result["dnsList"] = v
+
+ v = navigate_value(all_opts, ["gateway_ip"], None)
+ result["gateway_ip"] = v
+
+ result["id"] = None
+
+ v = navigate_value(all_opts, ["name"], None)
+ result["name"] = v
+
+ result["neutron_network_id"] = None
+
+ result["neutron_subnet_id"] = None
+
+ result["primary_dns"] = None
+
+ result["secondary_dns"] = None
+
+ result["status"] = None
+
+ v = navigate_value(all_opts, ["vpc_id"], None)
+ result["vpc_id"] = v
+
+ return result
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ result["availability_zone"] = body.get("availability_zone")
+
+ result["cidr"] = body.get("cidr")
+
+ result["dhcp_enable"] = body.get("dhcp_enable")
+
+ result["dnsList"] = body.get("dnsList")
+
+ result["gateway_ip"] = body.get("gateway_ip")
+
+ result["id"] = body.get("id")
+
+ result["name"] = body.get("name")
+
+ result["neutron_network_id"] = body.get("neutron_network_id")
+
+ result["neutron_subnet_id"] = body.get("neutron_subnet_id")
+
+ result["primary_dns"] = body.get("primary_dns")
+
+ result["secondary_dns"] = body.get("secondary_dns")
+
+ result["status"] = body.get("status")
+
+ result["vpc_id"] = body.get("vpc_id")
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_cdi_upload.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_cdi_upload.py
new file mode 100644
index 00000000..f25d7d70
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_cdi_upload.py
@@ -0,0 +1,184 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: kubevirt_cdi_upload
+
+short_description: Upload local VM images to CDI Upload Proxy.
+
+
+author: KubeVirt Team (@kubevirt)
+
+
+description:
+ - Use Openshift Python SDK to create UploadTokenRequest objects.
+ - Transfer contents of local files to the CDI Upload Proxy.
+
+options:
+ pvc_name:
+ description:
+ - Use to specify the name of the target PersistentVolumeClaim.
+ required: true
+ pvc_namespace:
+ description:
+ - Use to specify the namespace of the target PersistentVolumeClaim.
+ required: true
+ upload_host:
+ description:
+ - URL containing the host and port on which the CDI Upload Proxy is available.
+ - "More info: U(https://github.com/kubevirt/containerized-data-importer/blob/master/doc/upload.md#expose-cdi-uploadproxy-service)"
+ upload_host_validate_certs:
+ description:
+ - Whether or not to verify the CDI Upload Proxy's SSL certificates against your system's CA trust store.
+ default: true
+ type: bool
+ aliases: [ upload_host_verify_ssl ]
+ path:
+ description:
+ - Path of local image file to transfer.
+ merge_type:
+ description:
+ - Whether to override the default patch merge approach with a specific type. By default, the strategic
+ merge will typically be used.
+ type: list
+ choices: [ json, merge, strategic-merge ]
+
+extends_documentation_fragment:
+- community.kubernetes.k8s_auth_options
+
+
+requirements:
+ - python >= 2.7
+ - openshift >= 0.8.2
+ - requests >= 2.0.0
+'''
+
+EXAMPLES = '''
+- name: Upload local image to pvc-vm1
+ community.general.kubevirt_cdi_upload:
+ pvc_namespace: default
+ pvc_name: pvc-vm1
+ upload_host: https://localhost:8443
+ upload_host_validate_certs: false
+ path: /tmp/cirros-0.4.0-x86_64-disk.img
+'''
+
+RETURN = '''# '''
+
+import copy
+import traceback
+
+from collections import defaultdict
+
+from ansible_collections.community.kubernetes.plugins.module_utils.common import AUTH_ARG_SPEC
+from ansible_collections.community.kubernetes.plugins.module_utils.raw import KubernetesRawModule
+
+# 3rd party imports
+try:
+ import requests
+ HAS_REQUESTS = True
+except ImportError:
+ HAS_REQUESTS = False
+
+
+SERVICE_ARG_SPEC = {
+ 'pvc_name': {'required': True},
+ 'pvc_namespace': {'required': True},
+ 'upload_host': {'required': True},
+ 'upload_host_validate_certs': {
+ 'type': 'bool',
+ 'default': True,
+ 'aliases': ['upload_host_verify_ssl']
+ },
+ 'path': {'required': True},
+ 'merge_type': {
+ 'type': 'list',
+ 'choices': ['json', 'merge', 'strategic-merge']
+ },
+}
+
+
+class KubeVirtCDIUpload(KubernetesRawModule):
+ def __init__(self, *args, **kwargs):
+ super(KubeVirtCDIUpload, self).__init__(*args, k8s_kind='UploadTokenRequest', **kwargs)
+
+ if not HAS_REQUESTS:
+ self.fail("This module requires the python 'requests' package. Try `pip install requests`.")
+
+ @property
+ def argspec(self):
+ """ argspec property builder """
+ argument_spec = copy.deepcopy(AUTH_ARG_SPEC)
+ argument_spec.update(SERVICE_ARG_SPEC)
+ return argument_spec
+
+ def execute_module(self):
+ """ Module execution """
+
+ API = 'v1alpha1'
+ KIND = 'UploadTokenRequest'
+
+ self.client = self.get_api_client()
+
+ api_version = 'upload.cdi.kubevirt.io/{0}'.format(API)
+ pvc_name = self.params.get('pvc_name')
+ pvc_namespace = self.params.get('pvc_namespace')
+ upload_host = self.params.get('upload_host')
+ upload_host_verify_ssl = self.params.get('upload_host_validate_certs')
+ path = self.params.get('path')
+
+ definition = defaultdict(defaultdict)
+
+ definition['kind'] = KIND
+ definition['apiVersion'] = api_version
+
+ def_meta = definition['metadata']
+ def_meta['name'] = pvc_name
+ def_meta['namespace'] = pvc_namespace
+
+ def_spec = definition['spec']
+ def_spec['pvcName'] = pvc_name
+
+ # Let's check the file's there before we do anything else
+ imgfile = open(path, 'rb')
+
+ resource = self.find_resource(KIND, api_version, fail=True)
+ definition = self.set_defaults(resource, definition)
+ result = self.perform_action(resource, definition)
+
+ headers = {'Authorization': "Bearer {0}".format(result['result']['status']['token'])}
+ url = "{0}/{1}/upload".format(upload_host, API)
+ ret = requests.post(url, data=imgfile, headers=headers, verify=upload_host_verify_ssl)
+
+ if ret.status_code != 200:
+ self.fail_request("Something went wrong while uploading data", method='POST', url=url,
+ reason=ret.reason, status_code=ret.status_code)
+
+ self.exit_json(changed=True)
+
+ def fail_request(self, msg, **kwargs):
+ req_info = {}
+ for k, v in kwargs.items():
+ req_info['req_' + k] = v
+ self.fail_json(msg=msg, **req_info)
+
+
+def main():
+ module = KubeVirtCDIUpload()
+ try:
+ module.execute_module()
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_preset.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_preset.py
new file mode 100644
index 00000000..7e0776c7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_preset.py
@@ -0,0 +1,154 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: kubevirt_preset
+
+short_description: Manage KubeVirt virtual machine presets
+
+description:
+ - Use Openshift Python SDK to manage the state of KubeVirt virtual machine presets.
+
+
+author: KubeVirt Team (@kubevirt)
+
+options:
+ state:
+ description:
+ - Create or delete virtual machine presets.
+ default: "present"
+ choices:
+ - present
+ - absent
+ type: str
+ name:
+ description:
+ - Name of the virtual machine preset.
+ required: true
+ type: str
+ namespace:
+ description:
+ - Namespace where the virtual machine preset exists.
+ required: true
+ type: str
+ selector:
+ description:
+ - "Selector is a label query over a set of virtual machine preset."
+ type: dict
+
+extends_documentation_fragment:
+- community.kubernetes.k8s_auth_options
+- community.general.kubevirt_vm_options
+- community.general.kubevirt_common_options
+
+
+requirements:
+ - python >= 2.7
+ - openshift >= 0.8.2
+'''
+
+EXAMPLES = '''
+- name: Create virtual machine preset 'vmi-preset-small'
+ community.general.kubevirt_preset:
+ state: present
+ name: vmi-preset-small
+ namespace: vms
+ memory: 64M
+ selector:
+ matchLabels:
+ kubevirt.io/vmPreset: vmi-preset-small
+
+- name: Remove virtual machine preset 'vmi-preset-small'
+ community.general.kubevirt_preset:
+ state: absent
+ name: vmi-preset-small
+ namespace: vms
+'''
+
+RETURN = '''
+kubevirt_preset:
+ description:
+ - The virtual machine preset managed by the user.
+ - "This dictionary contains all values returned by the KubeVirt API all options
+ are described here U(https://kubevirt.io/api-reference/master/definitions.html#_v1_virtualmachineinstancepreset)"
+ returned: success
+ type: complex
+ contains: {}
+'''
+
+import copy
+import traceback
+
+
+from ansible_collections.community.kubernetes.plugins.module_utils.common import AUTH_ARG_SPEC
+
+from ansible_collections.community.general.plugins.module_utils.kubevirt import (
+ virtdict,
+ KubeVirtRawModule,
+ VM_COMMON_ARG_SPEC
+)
+
+
+KIND = 'VirtualMachineInstancePreset'
+VMP_ARG_SPEC = {
+ 'selector': {'type': 'dict'},
+}
+
+
+class KubeVirtVMPreset(KubeVirtRawModule):
+
+ @property
+ def argspec(self):
+ """ argspec property builder """
+ argument_spec = copy.deepcopy(AUTH_ARG_SPEC)
+ argument_spec.update(VM_COMMON_ARG_SPEC)
+ argument_spec.update(VMP_ARG_SPEC)
+ return argument_spec
+
+ def execute_module(self):
+ # Parse parameters specific for this module:
+ definition = virtdict()
+ selector = self.params.get('selector')
+
+ if selector:
+ definition['spec']['selector'] = selector
+
+ # FIXME: Devices must be set, but we don't yet support any
+ # attributes there, remove when we do:
+ definition['spec']['domain']['devices'] = dict()
+
+ # defaults for template
+ defaults = {'disks': [], 'volumes': [], 'interfaces': [], 'networks': []}
+
+ # Execute the CURD of VM:
+ dummy, definition = self.construct_vm_definition(KIND, definition, definition, defaults)
+ result_crud = self.execute_crud(KIND, definition)
+ changed = result_crud['changed']
+ result = result_crud.pop('result')
+
+ # Return from the module:
+ self.exit_json(**{
+ 'changed': changed,
+ 'kubevirt_preset': result,
+ 'result': result_crud,
+ })
+
+
+def main():
+ module = KubeVirtVMPreset()
+ try:
+ module.execute_module()
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_pvc.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_pvc.py
new file mode 100644
index 00000000..5687c23d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_pvc.py
@@ -0,0 +1,457 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: kubevirt_pvc
+
+short_description: Manage PVCs on Kubernetes
+
+
+author: KubeVirt Team (@kubevirt)
+
+description:
+ - Use Openshift Python SDK to manage PVCs on Kubernetes
+ - Support Containerized Data Importer out of the box
+
+options:
+ resource_definition:
+ description:
+ - "A partial YAML definition of the PVC object being created/updated. Here you can define Kubernetes
+ PVC Resource parameters not covered by this module's parameters."
+ - "NOTE: I(resource_definition) has lower priority than module parameters. If you try to define e.g.
+ I(metadata.namespace) here, that value will be ignored and I(namespace) used instead."
+ aliases:
+ - definition
+ - inline
+ type: dict
+ state:
+ description:
+ - "Determines if an object should be created, patched, or deleted. When set to C(present), an object will be
+ created, if it does not already exist. If set to C(absent), an existing object will be deleted. If set to
+ C(present), an existing object will be patched, if its attributes differ from those specified using
+ module options and I(resource_definition)."
+ default: present
+ choices:
+ - present
+ - absent
+ force:
+ description:
+ - If set to C(True), and I(state) is C(present), an existing object will be replaced.
+ default: false
+ type: bool
+ merge_type:
+ description:
+ - Whether to override the default patch merge approach with a specific type.
+ - "This defaults to C(['strategic-merge', 'merge']), which is ideal for using the same parameters
+ on resource kinds that combine Custom Resources and built-in resources."
+ - See U(https://kubernetes.io/docs/tasks/run-application/update-api-object-kubectl-patch/#use-a-json-merge-patch-to-update-a-deployment)
+ - If more than one merge_type is given, the merge_types will be tried in order
+ choices:
+ - json
+ - merge
+ - strategic-merge
+ type: list
+ name:
+ description:
+ - Use to specify a PVC object name.
+ required: true
+ type: str
+ namespace:
+ description:
+ - Use to specify a PVC object namespace.
+ required: true
+ type: str
+ annotations:
+ description:
+ - Annotations attached to this object.
+ - U(https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/)
+ type: dict
+ labels:
+ description:
+ - Labels attached to this object.
+ - U(https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)
+ type: dict
+ selector:
+ description:
+ - A label query over volumes to consider for binding.
+ - U(https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)
+ type: dict
+ access_modes:
+ description:
+ - Contains the desired access modes the volume should have.
+ - "More info: U(https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes)"
+ type: list
+ size:
+ description:
+ - How much storage to allocate to the PVC.
+ type: str
+ aliases:
+ - storage
+ storage_class_name:
+ description:
+ - Name of the StorageClass required by the claim.
+ - "More info: U(https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1)"
+ type: str
+ volume_mode:
+ description:
+ - "This defines what type of volume is required by the claim. Value of Filesystem is implied when not
+ included in claim spec. This is an alpha feature of kubernetes and may change in the future."
+ type: str
+ volume_name:
+ description:
+ - This is the binding reference to the PersistentVolume backing this claim.
+ type: str
+ cdi_source:
+ description:
+ - "If data is to be copied onto the PVC using the Containerized Data Importer you can specify the source of
+ the data (along with any additional configuration) as well as it's format."
+ - "Valid source types are: blank, http, s3, registry, pvc and upload. The last one requires using the
+ M(community.general.kubevirt_cdi_upload) module to actually perform an upload."
+ - "Source data format is specified using the optional I(content_type). Valid options are C(kubevirt)
+ (default; raw image) and C(archive) (tar.gz)."
+ - "This uses the DataVolume source syntax:
+ U(https://github.com/kubevirt/containerized-data-importer/blob/master/doc/datavolumes.md#https3registry-source)"
+ type: dict
+ wait:
+ description:
+ - "If set, this module will wait for the PVC to become bound and CDI (if enabled) to finish its operation
+ before returning."
+ - "Used only if I(state) set to C(present)."
+ - "Unless used in conjunction with I(cdi_source), this might result in a timeout, as clusters may be configured
+ to not bind PVCs until first usage."
+ default: false
+ type: bool
+ wait_timeout:
+ description:
+ - Specifies how much time in seconds to wait for PVC creation to complete if I(wait) option is enabled.
+ - Default value is reasonably high due to an expectation that CDI might take a while to finish its operation.
+ type: int
+ default: 300
+
+extends_documentation_fragment:
+- community.kubernetes.k8s_auth_options
+
+
+requirements:
+ - python >= 2.7
+ - openshift >= 0.8.2
+'''
+
+EXAMPLES = '''
+- name: Create a PVC and import data from an external source
+ community.general.kubevirt_pvc:
+ name: pvc1
+ namespace: default
+ size: 100Mi
+ access_modes:
+ - ReadWriteOnce
+ cdi_source:
+ http:
+ url: https://www.source.example/path/of/data/vm.img
+ # If the URL points to a tar.gz containing the disk image, uncomment the line below:
+ #content_type: archive
+
+- name: Create a PVC as a clone from a different PVC
+ community.general.kubevirt_pvc:
+ name: pvc2
+ namespace: default
+ size: 100Mi
+ access_modes:
+ - ReadWriteOnce
+ cdi_source:
+ pvc:
+ namespace: source-ns
+ name: source-pvc
+
+- name: Create a PVC ready for data upload
+ community.general.kubevirt_pvc:
+ name: pvc3
+ namespace: default
+ size: 100Mi
+ access_modes:
+ - ReadWriteOnce
+ cdi_source:
+ upload: yes
+ # You need the kubevirt_cdi_upload module to actually upload something
+
+- name: Create a PVC with a blank raw image
+ community.general.kubevirt_pvc:
+ name: pvc4
+ namespace: default
+ size: 100Mi
+ access_modes:
+ - ReadWriteOnce
+ cdi_source:
+ blank: yes
+
+- name: Create a PVC and fill it with data from a container
+ community.general.kubevirt_pvc:
+ name: pvc5
+ namespace: default
+ size: 100Mi
+ access_modes:
+ - ReadWriteOnce
+ cdi_source:
+ registry:
+ url: "docker://kubevirt/fedora-cloud-registry-disk-demo"
+
+'''
+
+RETURN = '''
+result:
+ description:
+ - The created, patched, or otherwise present object. Will be empty in the case of a deletion.
+ returned: success
+ type: complex
+ contains:
+ api_version:
+ description: The versioned schema of this representation of an object.
+ returned: success
+ type: str
+ kind:
+ description: Represents the REST resource this object represents.
+ returned: success
+ type: str
+ metadata:
+ description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
+ returned: success
+ type: complex
+ spec:
+ description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
+ returned: success
+ type: complex
+ status:
+ description: Current status details for the object.
+ returned: success
+ type: complex
+ items:
+ description: Returned only when multiple yaml documents are passed to src or resource_definition
+ returned: when resource_definition or src contains list of objects
+ type: list
+ duration:
+ description: elapsed time of task in seconds
+ returned: when C(wait) is true
+ type: int
+ sample: 48
+'''
+
+
+import copy
+import traceback
+
+from collections import defaultdict
+
+from ansible_collections.community.kubernetes.plugins.module_utils.common import AUTH_ARG_SPEC
+from ansible_collections.community.kubernetes.plugins.module_utils.raw import KubernetesRawModule
+from ansible_collections.community.general.plugins.module_utils.kubevirt import virtdict, KubeVirtRawModule
+
+
+PVC_ARG_SPEC = {
+ 'name': {'required': True},
+ 'namespace': {'required': True},
+ 'state': {
+ 'type': 'str',
+ 'choices': [
+ 'present', 'absent'
+ ],
+ 'default': 'present'
+ },
+ 'force': {
+ 'type': 'bool',
+ 'default': False,
+ },
+ 'merge_type': {
+ 'type': 'list',
+ 'choices': ['json', 'merge', 'strategic-merge']
+ },
+ 'resource_definition': {
+ 'type': 'dict',
+ 'aliases': ['definition', 'inline']
+ },
+ 'labels': {'type': 'dict'},
+ 'annotations': {'type': 'dict'},
+ 'selector': {'type': 'dict'},
+ 'access_modes': {'type': 'list'},
+ 'size': {
+ 'type': 'str',
+ 'aliases': ['storage']
+ },
+ 'storage_class_name': {'type': 'str'},
+ 'volume_mode': {'type': 'str'},
+ 'volume_name': {'type': 'str'},
+ 'cdi_source': {'type': 'dict'},
+ 'wait': {
+ 'type': 'bool',
+ 'default': False
+ },
+ 'wait_timeout': {
+ 'type': 'int',
+ 'default': 300
+ }
+}
+
+
+class CreatePVCFailed(Exception):
+ pass
+
+
+class KubevirtPVC(KubernetesRawModule):
+ def __init__(self):
+ super(KubevirtPVC, self).__init__()
+
+ @property
+ def argspec(self):
+ argument_spec = copy.deepcopy(AUTH_ARG_SPEC)
+ argument_spec.update(PVC_ARG_SPEC)
+ return argument_spec
+
+ @staticmethod
+ def fix_serialization(obj):
+ if obj and hasattr(obj, 'to_dict'):
+ return obj.to_dict()
+ return obj
+
+ def _parse_cdi_source(self, _cdi_src, metadata):
+ cdi_src = copy.deepcopy(_cdi_src)
+ annotations = metadata['annotations']
+ labels = metadata['labels']
+
+ valid_content_types = ('kubevirt', 'archive')
+ valid_sources = ('http', 's3', 'pvc', 'upload', 'blank', 'registry')
+
+ if 'content_type' in cdi_src:
+ content_type = cdi_src.pop('content_type')
+ if content_type not in valid_content_types:
+ raise ValueError("cdi_source.content_type must be one of {0}, not: '{1}'".format(
+ valid_content_types, content_type))
+ annotations['cdi.kubevirt.io/storage.contentType'] = content_type
+
+ if len(cdi_src) != 1:
+ raise ValueError("You must specify exactly one valid CDI source, not {0}: {1}".format(len(cdi_src), tuple(cdi_src.keys())))
+
+ src_type = tuple(cdi_src.keys())[0]
+ src_spec = cdi_src[src_type]
+
+ if src_type not in valid_sources:
+ raise ValueError("Got an invalid CDI source type: '{0}', must be one of {1}".format(src_type, valid_sources))
+
+ # True for all cases save one
+ labels['app'] = 'containerized-data-importer'
+
+ if src_type == 'upload':
+ annotations['cdi.kubevirt.io/storage.upload.target'] = ''
+ elif src_type == 'blank':
+ annotations['cdi.kubevirt.io/storage.import.source'] = 'none'
+ elif src_type == 'pvc':
+ if not isinstance(src_spec, dict) or sorted(src_spec.keys()) != ['name', 'namespace']:
+ raise ValueError("CDI Source 'pvc' requires specifying 'name' and 'namespace' (and nothing else)")
+ labels['app'] = 'host-assisted-cloning'
+ annotations['k8s.io/CloneRequest'] = '{0}/{1}'.format(src_spec['namespace'], src_spec['name'])
+ elif src_type in ('http', 's3', 'registry'):
+ if not isinstance(src_spec, dict) or 'url' not in src_spec:
+ raise ValueError("CDI Source '{0}' requires specifying 'url'".format(src_type))
+ unknown_params = set(src_spec.keys()).difference(set(('url', 'secretRef', 'certConfigMap')))
+ if unknown_params:
+ raise ValueError("CDI Source '{0}' does not know recognize params: {1}".format(src_type, tuple(unknown_params)))
+ annotations['cdi.kubevirt.io/storage.import.source'] = src_type
+ annotations['cdi.kubevirt.io/storage.import.endpoint'] = src_spec['url']
+ if 'secretRef' in src_spec:
+ annotations['cdi.kubevirt.io/storage.import.secretName'] = src_spec['secretRef']
+ if 'certConfigMap' in src_spec:
+ annotations['cdi.kubevirt.io/storage.import.certConfigMap'] = src_spec['certConfigMap']
+
+ def _wait_for_creation(self, resource, uid):
+ return_obj = None
+ desired_cdi_status = 'Succeeded'
+ use_cdi = True if self.params.get('cdi_source') else False
+ if use_cdi and 'upload' in self.params['cdi_source']:
+ desired_cdi_status = 'Running'
+
+ for event in resource.watch(namespace=self.namespace, timeout=self.params.get('wait_timeout')):
+ entity = event['object']
+ metadata = entity.metadata
+ if not hasattr(metadata, 'uid') or metadata.uid != uid:
+ continue
+ if entity.status.phase == 'Bound':
+ if use_cdi and hasattr(metadata, 'annotations'):
+ import_status = metadata.annotations.get('cdi.kubevirt.io/storage.pod.phase')
+ if import_status == desired_cdi_status:
+ return_obj = entity
+ break
+ elif import_status == 'Failed':
+ raise CreatePVCFailed("PVC creation incomplete; importing data failed")
+ else:
+ return_obj = entity
+ break
+ elif entity.status.phase == 'Failed':
+ raise CreatePVCFailed("PVC creation failed")
+
+ if not return_obj:
+ raise CreatePVCFailed("PVC creation timed out")
+
+ return self.fix_serialization(return_obj)
+
+ def execute_module(self):
+ KIND = 'PersistentVolumeClaim'
+ API = 'v1'
+
+ definition = virtdict()
+ definition['kind'] = KIND
+ definition['apiVersion'] = API
+
+ metadata = definition['metadata']
+ metadata['name'] = self.params.get('name')
+ metadata['namespace'] = self.params.get('namespace')
+ if self.params.get('annotations'):
+ metadata['annotations'] = self.params.get('annotations')
+ if self.params.get('labels'):
+ metadata['labels'] = self.params.get('labels')
+ if self.params.get('cdi_source'):
+ self._parse_cdi_source(self.params.get('cdi_source'), metadata)
+
+ spec = definition['spec']
+ if self.params.get('access_modes'):
+ spec['accessModes'] = self.params.get('access_modes')
+ if self.params.get('size'):
+ spec['resources']['requests']['storage'] = self.params.get('size')
+ if self.params.get('storage_class_name'):
+ spec['storageClassName'] = self.params.get('storage_class_name')
+ if self.params.get('selector'):
+ spec['selector'] = self.params.get('selector')
+ if self.params.get('volume_mode'):
+ spec['volumeMode'] = self.params.get('volume_mode')
+ if self.params.get('volume_name'):
+ spec['volumeName'] = self.params.get('volume_name')
+
+ # 'resource_definition:' has lower priority than module parameters
+ definition = dict(KubeVirtRawModule.merge_dicts(definition, self.resource_definitions[0]))
+
+ self.client = self.get_api_client()
+ resource = self.find_resource(KIND, API, fail=True)
+ definition = self.set_defaults(resource, definition)
+ result = self.perform_action(resource, definition)
+ if self.params.get('wait') and self.params.get('state') == 'present':
+ result['result'] = self._wait_for_creation(resource, result['result']['metadata']['uid'])
+
+ self.exit_json(**result)
+
+
+def main():
+ module = KubevirtPVC()
+ try:
+ module.execute_module()
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_rs.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_rs.py
new file mode 100644
index 00000000..d1fdc394
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_rs.py
@@ -0,0 +1,211 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: kubevirt_rs
+
+short_description: Manage KubeVirt virtual machine replica sets
+
+description:
+ - Use Openshift Python SDK to manage the state of KubeVirt virtual machine replica sets.
+
+
+author: KubeVirt Team (@kubevirt)
+
+options:
+ state:
+ description:
+ - Create or delete virtual machine replica sets.
+ default: "present"
+ choices:
+ - present
+ - absent
+ type: str
+ name:
+ description:
+ - Name of the virtual machine replica set.
+ required: true
+ type: str
+ namespace:
+ description:
+ - Namespace where the virtual machine replica set exists.
+ required: true
+ type: str
+ selector:
+ description:
+ - "Selector is a label query over a set of virtual machine."
+ required: true
+ type: dict
+ replicas:
+ description:
+ - Number of desired pods. This is a pointer to distinguish between explicit zero and not specified.
+ - Replicas defaults to 1 if newly created replica set.
+ type: int
+
+extends_documentation_fragment:
+- community.kubernetes.k8s_auth_options
+- community.general.kubevirt_vm_options
+- community.general.kubevirt_common_options
+
+
+requirements:
+ - python >= 2.7
+ - openshift >= 0.8.2
+'''
+
+EXAMPLES = '''
+- name: Create virtual machine replica set 'myvmir'
+ community.general.kubevirt_rs:
+ state: present
+ name: myvmir
+ namespace: vms
+ wait: true
+ replicas: 3
+ memory: 64M
+ labels:
+ myvmi: myvmi
+ selector:
+ matchLabels:
+ myvmi: myvmi
+ disks:
+ - name: containerdisk
+ volume:
+ containerDisk:
+ image: kubevirt/cirros-container-disk-demo:latest
+ path: /custom-disk/cirros.img
+ disk:
+ bus: virtio
+
+- name: Remove virtual machine replica set 'myvmir'
+ community.general.kubevirt_rs:
+ state: absent
+ name: myvmir
+ namespace: vms
+ wait: true
+'''
+
+RETURN = '''
+kubevirt_rs:
+ description:
+ - The virtual machine virtual machine replica set managed by the user.
+ - "This dictionary contains all values returned by the KubeVirt API all options
+ are described here U(https://kubevirt.io/api-reference/master/definitions.html#_v1_virtualmachineinstance)"
+ returned: success
+ type: complex
+ contains: {}
+'''
+
+import copy
+import traceback
+
+
+from ansible_collections.community.kubernetes.plugins.module_utils.common import AUTH_ARG_SPEC
+
+from ansible_collections.community.general.plugins.module_utils.kubevirt import (
+ virtdict,
+ KubeVirtRawModule,
+ VM_COMMON_ARG_SPEC,
+)
+
+
+KIND = 'VirtualMachineInstanceReplicaSet'
+VMIR_ARG_SPEC = {
+ 'replicas': {'type': 'int'},
+ 'selector': {'type': 'dict'},
+}
+
+
+class KubeVirtVMIRS(KubeVirtRawModule):
+
+ @property
+ def argspec(self):
+ """ argspec property builder """
+ argument_spec = copy.deepcopy(AUTH_ARG_SPEC)
+ argument_spec.update(copy.deepcopy(VM_COMMON_ARG_SPEC))
+ argument_spec.update(copy.deepcopy(VMIR_ARG_SPEC))
+ return argument_spec
+
+ def wait_for_replicas(self, replicas):
+ """ Wait for ready_replicas to equal the requested number of replicas. """
+ resource = self.find_supported_resource(KIND)
+ return_obj = None
+
+ for event in resource.watch(namespace=self.namespace, timeout=self.params.get('wait_timeout')):
+ entity = event['object']
+ if entity.metadata.name != self.name:
+ continue
+ status = entity.get('status', {})
+ readyReplicas = status.get('readyReplicas', 0)
+ if readyReplicas == replicas:
+ return_obj = entity
+ break
+
+ if not return_obj:
+ self.fail_json(msg="Error fetching the patched object. Try a higher wait_timeout value.")
+ if replicas and return_obj.status.readyReplicas is None:
+ self.fail_json(msg="Failed to fetch the number of ready replicas. Try a higher wait_timeout value.")
+ if replicas and return_obj.status.readyReplicas != replicas:
+ self.fail_json(msg="Number of ready replicas is {0}. Failed to reach {1} ready replicas within "
+ "the wait_timeout period.".format(return_obj.status.ready_replicas, replicas))
+ return return_obj.to_dict()
+
+ def execute_module(self):
+ # Parse parameters specific for this module:
+ definition = virtdict()
+ selector = self.params.get('selector')
+ replicas = self.params.get('replicas')
+
+ if selector:
+ definition['spec']['selector'] = selector
+
+ if replicas is not None:
+ definition['spec']['replicas'] = replicas
+
+ # defaults for template
+ defaults = {'disks': [], 'volumes': [], 'interfaces': [], 'networks': []}
+
+ # Execute the CURD of VM:
+ template = definition['spec']['template']
+ dummy, definition = self.construct_vm_definition(KIND, definition, template, defaults)
+ result_crud = self.execute_crud(KIND, definition)
+ changed = result_crud['changed']
+ result = result_crud.pop('result')
+
+ # When creating a new VMIRS object without specifying `replicas`, assume it's '1' to make the
+ # wait logic work correctly
+ if changed and result_crud['method'] == 'create' and replicas is None:
+ replicas = 1
+
+ # Wait for the new number of ready replicas after a CRUD update
+ # Note1: doesn't work correctly when reducing number of replicas due to how VMIRS works (as of kubevirt 1.5.0)
+ # Note2: not the place to wait for the VMIs to get deleted when deleting the VMIRS object; that *might* be
+ # achievable in execute_crud(); keywords: orphanDependents, propagationPolicy, DeleteOptions
+ if self.params.get('wait') and replicas is not None and self.params.get('state') == 'present':
+ result = self.wait_for_replicas(replicas)
+
+ # Return from the module:
+ self.exit_json(**{
+ 'changed': changed,
+ 'kubevirt_rs': result,
+ 'result': result_crud,
+ })
+
+
+def main():
+ module = KubeVirtVMIRS()
+ try:
+ module.execute_module()
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_template.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_template.py
new file mode 100644
index 00000000..3054b1a7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_template.py
@@ -0,0 +1,385 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: kubevirt_template
+
+short_description: Manage KubeVirt templates
+
+description:
+ - Use Openshift Python SDK to manage the state of KubeVirt templates.
+
+
+author: KubeVirt Team (@kubevirt)
+
+options:
+ name:
+ description:
+ - Name of the Template object.
+ required: true
+ type: str
+ namespace:
+ description:
+ - Namespace where the Template object exists.
+ required: true
+ type: str
+ objects:
+ description:
+ - List of any valid API objects, such as a I(DeploymentConfig), I(Service), etc. The object
+ will be created exactly as defined here, with any parameter values substituted in prior to creation.
+ The definition of these objects can reference parameters defined earlier.
+ - As part of the list user can pass also I(VirtualMachine) kind. When passing I(VirtualMachine)
+ user must use Ansible structure of the parameters not the Kubernetes API structure. For more information
+ please take a look at M(community.general.kubevirt_vm) module and at EXAMPLES section, where you can see example.
+ type: list
+ merge_type:
+ description:
+ - Whether to override the default patch merge approach with a specific type. By default, the strategic
+ merge will typically be used.
+ type: list
+ choices: [ json, merge, strategic-merge ]
+ display_name:
+ description:
+ - "A brief, user-friendly name, which can be employed by user interfaces."
+ type: str
+ description:
+ description:
+ - A description of the template.
+ - Include enough detail that the user will understand what is being deployed...
+ and any caveats they need to know before deploying. It should also provide links to additional information,
+ such as a README file."
+ type: str
+ long_description:
+ description:
+ - "Additional template description. This may be displayed by the service catalog, for example."
+ type: str
+ provider_display_name:
+ description:
+ - "The name of the person or organization providing the template."
+ type: str
+ documentation_url:
+ description:
+ - "A URL referencing further documentation for the template."
+ type: str
+ support_url:
+ description:
+ - "A URL where support can be obtained for the template."
+ type: str
+ editable:
+ description:
+ - "Extension for hinting at which elements should be considered editable.
+ List of jsonpath selectors. The jsonpath root is the objects: element of the template."
+ - This is parameter can be used only when kubevirt addon is installed on your openshift cluster.
+ type: list
+ default_disk:
+ description:
+ - "The goal of default disk is to define what kind of disk is supported by the OS mainly in
+ terms of bus (ide, scsi, sata, virtio, ...)"
+ - The C(default_disk) parameter define configuration overlay for disks that will be applied on top of disks
+ during virtual machine creation to define global compatibility and/or performance defaults defined here.
+ - This is parameter can be used only when kubevirt addon is installed on your openshift cluster.
+ type: dict
+ default_volume:
+ description:
+ - "The goal of default volume is to be able to configure mostly performance parameters like
+ caches if those are exposed by the underlying volume implementation."
+ - The C(default_volume) parameter define configuration overlay for volumes that will be applied on top of volumes
+ during virtual machine creation to define global compatibility and/or performance defaults defined here.
+ - This is parameter can be used only when kubevirt addon is installed on your openshift cluster.
+ type: dict
+ default_nic:
+ description:
+ - "The goal of default network is similar to I(default_disk) and should be used as a template
+ to ensure OS compatibility and performance."
+ - The C(default_nic) parameter define configuration overlay for nic that will be applied on top of nics
+ during virtual machine creation to define global compatibility and/or performance defaults defined here.
+ - This is parameter can be used only when kubevirt addon is installed on your openshift cluster.
+ type: dict
+ default_network:
+ description:
+ - "The goal of default network is similar to I(default_volume) and should be used as a template
+ that specifies performance and connection parameters (L2 bridge for example)"
+ - The C(default_network) parameter define configuration overlay for networks that will be applied on top of networks
+ during virtual machine creation to define global compatibility and/or performance defaults defined here.
+ - This is parameter can be used only when kubevirt addon is installed on your openshift cluster.
+ type: dict
+ icon_class:
+ description:
+ - "An icon to be displayed with your template in the web console. Choose from our existing logo
+ icons when possible. You can also use icons from FontAwesome. Alternatively, provide icons through
+ CSS customizations that can be added to an OpenShift Container Platform cluster that uses your template.
+ You must specify an icon class that exists, or it will prevent falling back to the generic icon."
+ type: str
+ parameters:
+ description:
+ - "Parameters allow a value to be supplied by the user or generated when the template is instantiated.
+ Then, that value is substituted wherever the parameter is referenced. References can be defined in any
+ field in the objects list field. This is useful for generating random passwords or allowing the user to
+ supply a host name or other user-specific value that is required to customize the template."
+ - "More information can be found at: U(https://docs.openshift.com/container-platform/3.6/dev_guide/templates.html#writing-parameters)"
+ type: list
+ version:
+ description:
+ - Template structure version.
+ - This is parameter can be used only when kubevirt addon is installed on your openshift cluster.
+ type: str
+
+extends_documentation_fragment:
+- community.kubernetes.k8s_auth_options
+- community.kubernetes.k8s_state_options
+
+
+requirements:
+ - python >= 2.7
+ - openshift >= 0.8.2
+'''
+
+EXAMPLES = '''
+- name: Create template 'mytemplate'
+ community.general.kubevirt_template:
+ state: present
+ name: myvmtemplate
+ namespace: templates
+ display_name: Generic cirros template
+ description: Basic cirros template
+ long_description: Verbose description of cirros template
+ provider_display_name: Just Be Cool, Inc.
+ documentation_url: http://theverycoolcompany.com
+ support_url: http://support.theverycoolcompany.com
+ icon_class: icon-linux
+ default_disk:
+ disk:
+ bus: virtio
+ default_nic:
+ model: virtio
+ default_network:
+ resource:
+ resourceName: bridge.network.kubevirt.io/cnvmgmt
+ default_volume:
+ containerDisk:
+ image: kubevirt/cirros-container-disk-demo:latest
+ objects:
+ - name: ${NAME}
+ kind: VirtualMachine
+ memory: ${MEMORY_SIZE}
+ state: present
+ namespace: vms
+ parameters:
+ - name: NAME
+ description: VM name
+ generate: expression
+ from: 'vm-[A-Za-z0-9]{8}'
+ - name: MEMORY_SIZE
+ description: Memory size
+ value: 1Gi
+
+- name: Remove template 'myvmtemplate'
+ community.general.kubevirt_template:
+ state: absent
+ name: myvmtemplate
+ namespace: templates
+'''
+
+RETURN = '''
+kubevirt_template:
+ description:
+ - The template dictionary specification returned by the API.
+ returned: success
+ type: complex
+ contains: {}
+'''
+
+
+import copy
+import traceback
+
+from ansible_collections.community.kubernetes.plugins.module_utils.common import AUTH_ARG_SPEC
+
+from ansible_collections.community.general.plugins.module_utils.kubevirt import (
+ virtdict,
+ KubeVirtRawModule,
+ API_GROUP,
+ MAX_SUPPORTED_API_VERSION
+)
+
+
+TEMPLATE_ARG_SPEC = {
+ 'name': {'required': True},
+ 'namespace': {'required': True},
+ 'state': {
+ 'default': 'present',
+ 'choices': ['present', 'absent'],
+ },
+ 'force': {
+ 'type': 'bool',
+ 'default': False,
+ },
+ 'merge_type': {
+ 'type': 'list',
+ 'choices': ['json', 'merge', 'strategic-merge']
+ },
+ 'objects': {
+ 'type': 'list',
+ },
+ 'display_name': {
+ 'type': 'str',
+ },
+ 'description': {
+ 'type': 'str',
+ },
+ 'long_description': {
+ 'type': 'str',
+ },
+ 'provider_display_name': {
+ 'type': 'str',
+ },
+ 'documentation_url': {
+ 'type': 'str',
+ },
+ 'support_url': {
+ 'type': 'str',
+ },
+ 'icon_class': {
+ 'type': 'str',
+ },
+ 'version': {
+ 'type': 'str',
+ },
+ 'editable': {
+ 'type': 'list',
+ },
+ 'default_disk': {
+ 'type': 'dict',
+ },
+ 'default_volume': {
+ 'type': 'dict',
+ },
+ 'default_network': {
+ 'type': 'dict',
+ },
+ 'default_nic': {
+ 'type': 'dict',
+ },
+ 'parameters': {
+ 'type': 'list',
+ },
+}
+
+
+class KubeVirtVMTemplate(KubeVirtRawModule):
+
+ @property
+ def argspec(self):
+ """ argspec property builder """
+ argument_spec = copy.deepcopy(AUTH_ARG_SPEC)
+ argument_spec.update(TEMPLATE_ARG_SPEC)
+ return argument_spec
+
+ def execute_module(self):
+ # Parse parameters specific for this module:
+ definition = virtdict()
+
+ # Execute the CRUD of VM template:
+ kind = 'Template'
+ template_api_version = 'template.openshift.io/v1'
+
+ # Fill in template parameters:
+ definition['parameters'] = self.params.get('parameters')
+
+ # Fill in the default Label
+ labels = definition['metadata']['labels']
+ labels['template.cnv.io/type'] = 'vm'
+
+ # Fill in Openshift/Kubevirt template annotations:
+ annotations = definition['metadata']['annotations']
+ if self.params.get('display_name'):
+ annotations['openshift.io/display-name'] = self.params.get('display_name')
+ if self.params.get('description'):
+ annotations['description'] = self.params.get('description')
+ if self.params.get('long_description'):
+ annotations['openshift.io/long-description'] = self.params.get('long_description')
+ if self.params.get('provider_display_name'):
+ annotations['openshift.io/provider-display-name'] = self.params.get('provider_display_name')
+ if self.params.get('documentation_url'):
+ annotations['openshift.io/documentation-url'] = self.params.get('documentation_url')
+ if self.params.get('support_url'):
+ annotations['openshift.io/support-url'] = self.params.get('support_url')
+ if self.params.get('icon_class'):
+ annotations['iconClass'] = self.params.get('icon_class')
+ if self.params.get('version'):
+ annotations['template.cnv.io/version'] = self.params.get('version')
+
+ # TODO: Make it more Ansiblish, so user don't have to specify API JSON path, but rather Ansible params:
+ if self.params.get('editable'):
+ annotations['template.cnv.io/editable'] = self.params.get('editable')
+
+ # Set defaults annotations:
+ if self.params.get('default_disk'):
+ annotations['defaults.template.cnv.io/disk'] = self.params.get('default_disk').get('name')
+ if self.params.get('default_volume'):
+ annotations['defaults.template.cnv.io/volume'] = self.params.get('default_volume').get('name')
+ if self.params.get('default_nic'):
+ annotations['defaults.template.cnv.io/nic'] = self.params.get('default_nic').get('name')
+ if self.params.get('default_network'):
+ annotations['defaults.template.cnv.io/network'] = self.params.get('default_network').get('name')
+
+ # Process objects:
+ self.client = self.get_api_client()
+ definition['objects'] = []
+ objects = self.params.get('objects') or []
+ for obj in objects:
+ if obj['kind'] != 'VirtualMachine':
+ definition['objects'].append(obj)
+ else:
+ vm_definition = virtdict()
+
+ # Set VM defaults:
+ if self.params.get('default_disk'):
+ vm_definition['spec']['template']['spec']['domain']['devices']['disks'] = [self.params.get('default_disk')]
+ if self.params.get('default_volume'):
+ vm_definition['spec']['template']['spec']['volumes'] = [self.params.get('default_volume')]
+ if self.params.get('default_nic'):
+ vm_definition['spec']['template']['spec']['domain']['devices']['interfaces'] = [self.params.get('default_nic')]
+ if self.params.get('default_network'):
+ vm_definition['spec']['template']['spec']['networks'] = [self.params.get('default_network')]
+
+ # Set kubevirt API version:
+ vm_definition['apiVersion'] = '%s/%s' % (API_GROUP, MAX_SUPPORTED_API_VERSION)
+
+ # Construct k8s vm API object:
+ vm_template = vm_definition['spec']['template']
+ dummy, vm_def = self.construct_vm_template_definition('VirtualMachine', vm_definition, vm_template, obj)
+
+ definition['objects'].append(vm_def)
+
+ # Create template:
+ resource = self.client.resources.get(api_version=template_api_version, kind=kind, name='templates')
+ definition = self.set_defaults(resource, definition)
+ result = self.perform_action(resource, definition)
+
+ # Return from the module:
+ self.exit_json(**{
+ 'changed': result['changed'],
+ 'kubevirt_template': result.pop('result'),
+ 'result': result,
+ })
+
+
+def main():
+ module = KubeVirtVMTemplate()
+ try:
+ module.execute_module()
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_vm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_vm.py
new file mode 100644
index 00000000..4466bee2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/kubevirt/kubevirt_vm.py
@@ -0,0 +1,469 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: kubevirt_vm
+
+short_description: Manage KubeVirt virtual machine
+
+description:
+ - Use Openshift Python SDK to manage the state of KubeVirt virtual machines.
+
+
+author: KubeVirt Team (@kubevirt)
+
+options:
+ state:
+ description:
+ - Set the virtual machine to either I(present), I(absent), I(running) or I(stopped).
+ - "I(present) - Create or update a virtual machine. (And run it if it's ephemeral.)"
+ - "I(absent) - Remove a virtual machine."
+ - "I(running) - Create or update a virtual machine and run it."
+ - "I(stopped) - Stop a virtual machine. (This deletes ephemeral VMs.)"
+ default: "present"
+ choices:
+ - present
+ - absent
+ - running
+ - stopped
+ type: str
+ name:
+ description:
+ - Name of the virtual machine.
+ required: true
+ type: str
+ namespace:
+ description:
+ - Namespace where the virtual machine exists.
+ required: true
+ type: str
+ ephemeral:
+ description:
+ - If (true) ephemeral virtual machine will be created. When destroyed it won't be accessible again.
+ - Works only with C(state) I(present) and I(absent).
+ type: bool
+ default: false
+ datavolumes:
+ description:
+ - "DataVolumes are a way to automate importing virtual machine disks onto pvcs during the virtual machine's
+ launch flow. Without using a DataVolume, users have to prepare a pvc with a disk image before assigning
+ it to a VM or VMI manifest. With a DataVolume, both the pvc creation and import is automated on behalf of the user."
+ type: list
+ template:
+ description:
+ - "Name of Template to be used in creation of a virtual machine."
+ type: str
+ template_parameters:
+ description:
+ - "New values of parameters from Template."
+ type: dict
+
+extends_documentation_fragment:
+- community.kubernetes.k8s_auth_options
+- community.general.kubevirt_vm_options
+- community.general.kubevirt_common_options
+
+
+requirements:
+ - python >= 2.7
+ - openshift >= 0.8.2
+'''
+
+EXAMPLES = '''
+- name: Start virtual machine 'myvm'
+ community.general.kubevirt_vm:
+ state: running
+ name: myvm
+ namespace: vms
+
+- name: Create virtual machine 'myvm' and start it
+ community.general.kubevirt_vm:
+ state: running
+ name: myvm
+ namespace: vms
+ memory: 64Mi
+ cpu_cores: 1
+ bootloader: efi
+ smbios_uuid: 5d307ca9-b3ef-428c-8861-06e72d69f223
+ cpu_model: Conroe
+ headless: true
+ hugepage_size: 2Mi
+ tablets:
+ - bus: virtio
+ name: tablet1
+ cpu_limit: 3
+ cpu_shares: 2
+ disks:
+ - name: containerdisk
+ volume:
+ containerDisk:
+ image: kubevirt/cirros-container-disk-demo:latest
+ path: /custom-disk/cirros.img
+ disk:
+ bus: virtio
+
+- name: Create virtual machine 'myvm' with multus network interface
+ community.general.kubevirt_vm:
+ name: myvm
+ namespace: vms
+ memory: 512M
+ interfaces:
+ - name: default
+ bridge: {}
+ network:
+ pod: {}
+ - name: mynet
+ bridge: {}
+ network:
+ multus:
+ networkName: mynetconf
+
+- name: Combine inline definition with Ansible parameters
+ community.general.kubevirt_vm:
+ # Kubernetes specification:
+ definition:
+ metadata:
+ labels:
+ app: galaxy
+ service: web
+ origin: vmware
+
+ # Ansible parameters:
+ state: running
+ name: myvm
+ namespace: vms
+ memory: 64M
+ disks:
+ - name: containerdisk
+ volume:
+ containerDisk:
+ image: kubevirt/cirros-container-disk-demo:latest
+ path: /custom-disk/cirros.img
+ disk:
+ bus: virtio
+
+- name: Start ephemeral virtual machine 'myvm' and wait to be running
+ community.general.kubevirt_vm:
+ ephemeral: true
+ state: running
+ wait: true
+ wait_timeout: 180
+ name: myvm
+ namespace: vms
+ memory: 64M
+ labels:
+ kubevirt.io/vm: myvm
+ disks:
+ - name: containerdisk
+ volume:
+ containerDisk:
+ image: kubevirt/cirros-container-disk-demo:latest
+ path: /custom-disk/cirros.img
+ disk:
+ bus: virtio
+
+- name: Start fedora vm with cloud init
+ community.general.kubevirt_vm:
+ state: running
+ wait: true
+ name: myvm
+ namespace: vms
+ memory: 1024M
+ cloud_init_nocloud:
+ userData: |-
+ #cloud-config
+ password: fedora
+ chpasswd: { expire: False }
+ disks:
+ - name: containerdisk
+ volume:
+ containerDisk:
+ image: kubevirt/fedora-cloud-container-disk-demo:latest
+ path: /disk/fedora.qcow2
+ disk:
+ bus: virtio
+ node_affinity:
+ soft:
+ - weight: 1
+ term:
+ match_expressions:
+ - key: security
+ operator: In
+ values:
+ - S2
+
+- name: Create virtual machine with datavolume and specify node affinity
+ community.general.kubevirt_vm:
+ name: myvm
+ namespace: default
+ memory: 1024Mi
+ datavolumes:
+ - name: mydv
+ source:
+ http:
+ url: https://url/disk.qcow2
+ pvc:
+ accessModes:
+ - ReadWriteOnce
+ storage: 5Gi
+ node_affinity:
+ hard:
+ - term:
+ match_expressions:
+ - key: security
+ operator: In
+ values:
+ - S1
+
+- name: Remove virtual machine 'myvm'
+ community.general.kubevirt_vm:
+ state: absent
+ name: myvm
+ namespace: vms
+'''
+
+RETURN = '''
+kubevirt_vm:
+ description:
+ - The virtual machine dictionary specification returned by the API.
+ - "This dictionary contains all values returned by the KubeVirt API all options
+ are described here U(https://kubevirt.io/api-reference/master/definitions.html#_v1_virtualmachine)"
+ returned: success
+ type: complex
+ contains: {}
+'''
+
+
+import copy
+import traceback
+
+from ansible_collections.community.kubernetes.plugins.module_utils.common import AUTH_ARG_SPEC
+from ansible_collections.community.general.plugins.module_utils.kubevirt import (
+ virtdict,
+ KubeVirtRawModule,
+ VM_COMMON_ARG_SPEC,
+ VM_SPEC_DEF_ARG_SPEC
+)
+
+VM_ARG_SPEC = {
+ 'ephemeral': {'type': 'bool', 'default': False},
+ 'state': {
+ 'type': 'str',
+ 'choices': [
+ 'present', 'absent', 'running', 'stopped'
+ ],
+ 'default': 'present'
+ },
+ 'datavolumes': {'type': 'list'},
+ 'template': {'type': 'str'},
+ 'template_parameters': {'type': 'dict'},
+}
+
+# Which params (can) modify 'spec:' contents of a VM:
+VM_SPEC_PARAMS = list(VM_SPEC_DEF_ARG_SPEC.keys()) + ['datavolumes', 'template', 'template_parameters']
+
+
+class KubeVirtVM(KubeVirtRawModule):
+
+ @property
+ def argspec(self):
+ """ argspec property builder """
+ argument_spec = copy.deepcopy(AUTH_ARG_SPEC)
+ argument_spec.update(VM_COMMON_ARG_SPEC)
+ argument_spec.update(VM_ARG_SPEC)
+ return argument_spec
+
+ @staticmethod
+ def fix_serialization(obj):
+ if obj and hasattr(obj, 'to_dict'):
+ return obj.to_dict()
+ return obj
+
+ def _wait_for_vmi_running(self):
+ for event in self._kind_resource.watch(namespace=self.namespace, timeout=self.params.get('wait_timeout')):
+ entity = event['object']
+ if entity.metadata.name != self.name:
+ continue
+ status = entity.get('status', {})
+ phase = status.get('phase', None)
+ if phase == 'Running':
+ return entity
+
+ self.fail("Timeout occurred while waiting for virtual machine to start. Maybe try a higher wait_timeout value?")
+
+ def _wait_for_vm_state(self, new_state):
+ if new_state == 'running':
+ want_created = want_ready = True
+ else:
+ want_created = want_ready = False
+
+ for event in self._kind_resource.watch(namespace=self.namespace, timeout=self.params.get('wait_timeout')):
+ entity = event['object']
+ if entity.metadata.name != self.name:
+ continue
+ status = entity.get('status', {})
+ created = status.get('created', False)
+ ready = status.get('ready', False)
+ if (created, ready) == (want_created, want_ready):
+ return entity
+
+ self.fail("Timeout occurred while waiting for virtual machine to achieve '{0}' state. "
+ "Maybe try a higher wait_timeout value?".format(new_state))
+
+ def manage_vm_state(self, new_state, already_changed):
+ new_running = True if new_state == 'running' else False
+ changed = False
+ k8s_obj = {}
+
+ if not already_changed:
+ k8s_obj = self.get_resource(self._kind_resource)
+ if not k8s_obj:
+ self.fail("VirtualMachine object disappeared during module operation, aborting.")
+ if k8s_obj.spec.get('running', False) == new_running:
+ return False, k8s_obj
+
+ newdef = dict(metadata=dict(name=self.name, namespace=self.namespace), spec=dict(running=new_running))
+ k8s_obj, err = self.patch_resource(self._kind_resource, newdef, k8s_obj,
+ self.name, self.namespace, merge_type='merge')
+ if err:
+ self.fail_json(**err)
+ else:
+ changed = True
+
+ if self.params.get('wait'):
+ k8s_obj = self._wait_for_vm_state(new_state)
+
+ return changed, k8s_obj
+
+ def _process_template_defaults(self, proccess_template, processedtemplate, defaults):
+ def set_template_default(default_name, default_name_index, definition_spec):
+ default_value = proccess_template['metadata']['annotations'][default_name]
+ if default_value:
+ values = definition_spec[default_name_index]
+ default_values = [d for d in values if d.get('name') == default_value]
+ defaults[default_name_index] = default_values
+ if definition_spec[default_name_index] is None:
+ definition_spec[default_name_index] = []
+ definition_spec[default_name_index].extend([d for d in values if d.get('name') != default_value])
+
+ devices = processedtemplate['spec']['template']['spec']['domain']['devices']
+ spec = processedtemplate['spec']['template']['spec']
+
+ set_template_default('defaults.template.cnv.io/disk', 'disks', devices)
+ set_template_default('defaults.template.cnv.io/volume', 'volumes', spec)
+ set_template_default('defaults.template.cnv.io/nic', 'interfaces', devices)
+ set_template_default('defaults.template.cnv.io/network', 'networks', spec)
+
+ def construct_definition(self, kind, our_state, ephemeral):
+ definition = virtdict()
+ processedtemplate = {}
+
+ # Construct the API object definition:
+ defaults = {'disks': [], 'volumes': [], 'interfaces': [], 'networks': []}
+ vm_template = self.params.get('template')
+ if vm_template:
+ # Find the template the VM should be created from:
+ template_resource = self.client.resources.get(api_version='template.openshift.io/v1', kind='Template', name='templates')
+ proccess_template = template_resource.get(name=vm_template, namespace=self.params.get('namespace'))
+
+ # Set proper template values taken from module option 'template_parameters':
+ for k, v in self.params.get('template_parameters', {}).items():
+ for parameter in proccess_template.parameters:
+ if parameter.name == k:
+ parameter.value = v
+
+ # Proccess the template:
+ processedtemplates_res = self.client.resources.get(api_version='template.openshift.io/v1', kind='Template', name='processedtemplates')
+ processedtemplate = processedtemplates_res.create(proccess_template.to_dict()).to_dict()['objects'][0]
+
+ # Process defaults of the template:
+ self._process_template_defaults(proccess_template, processedtemplate, defaults)
+
+ if not ephemeral:
+ definition['spec']['running'] = our_state == 'running'
+ template = definition if ephemeral else definition['spec']['template']
+ template['metadata']['labels']['vm.cnv.io/name'] = self.params.get('name')
+ dummy, definition = self.construct_vm_definition(kind, definition, template, defaults)
+
+ return self.merge_dicts(definition, processedtemplate)
+
+ def execute_module(self):
+ # Parse parameters specific to this module:
+ ephemeral = self.params.get('ephemeral')
+ k8s_state = our_state = self.params.get('state')
+ kind = 'VirtualMachineInstance' if ephemeral else 'VirtualMachine'
+ _used_params = [name for name in self.params if self.params[name] is not None]
+ # Is 'spec:' getting changed?
+ vm_spec_change = True if set(VM_SPEC_PARAMS).intersection(_used_params) else False
+ changed = False
+ crud_executed = False
+ method = ''
+
+ # Underlying module_utils/k8s/* code knows only of state == present/absent; let's make sure not to confuse it
+ if ephemeral:
+ # Ephemerals don't actually support running/stopped; we treat those as aliases for present/absent instead
+ if our_state == 'running':
+ self.params['state'] = k8s_state = 'present'
+ elif our_state == 'stopped':
+ self.params['state'] = k8s_state = 'absent'
+ else:
+ if our_state != 'absent':
+ self.params['state'] = k8s_state = 'present'
+
+ # Start with fetching the current object to make sure it exists
+ # If it does, but we end up not performing any operations on it, at least we'll be able to return
+ # its current contents as part of the final json
+ self.client = self.get_api_client()
+ self._kind_resource = self.find_supported_resource(kind)
+ k8s_obj = self.get_resource(self._kind_resource)
+ if not self.check_mode and not vm_spec_change and k8s_state != 'absent' and not k8s_obj:
+ self.fail("It's impossible to create an empty VM or change state of a non-existent VM.")
+
+ # If there are (potential) changes to `spec:` or we want to delete the object, that warrants a full CRUD
+ # Also check_mode always warrants a CRUD, as that'll produce a sane result
+ if vm_spec_change or k8s_state == 'absent' or self.check_mode:
+ definition = self.construct_definition(kind, our_state, ephemeral)
+ result = self.execute_crud(kind, definition)
+ changed = result['changed']
+ k8s_obj = result['result']
+ method = result['method']
+ crud_executed = True
+
+ if ephemeral and self.params.get('wait') and k8s_state == 'present' and not self.check_mode:
+ # Waiting for k8s_state==absent is handled inside execute_crud()
+ k8s_obj = self._wait_for_vmi_running()
+
+ if not ephemeral and our_state in ['running', 'stopped'] and not self.check_mode:
+ # State==present/absent doesn't involve any additional VMI state management and is fully
+ # handled inside execute_crud() (including wait logic)
+ patched, k8s_obj = self.manage_vm_state(our_state, crud_executed)
+ changed = changed or patched
+ if changed:
+ method = method or 'patch'
+
+ # Return from the module:
+ self.exit_json(**{
+ 'changed': changed,
+ 'kubevirt_vm': self.fix_serialization(k8s_obj),
+ 'method': method
+ })
+
+
+def main():
+ module = KubeVirtVM()
+ try:
+ module.execute_module()
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/linode/linode.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/linode/linode.py
new file mode 100644
index 00000000..a35b25b6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/linode/linode.py
@@ -0,0 +1,690 @@
+#!/usr/bin/python
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: linode
+short_description: Manage instances on the Linode Public Cloud
+description:
+ - Manage Linode Public Cloud instances and optionally wait for it to be 'running'.
+options:
+ state:
+ description:
+ - Indicate desired state of the resource
+ choices: [ absent, active, deleted, present, restarted, started, stopped ]
+ default: present
+ type: str
+ api_key:
+ description:
+ - Linode API key
+ type: str
+ name:
+ description:
+ - Name to give the instance (alphanumeric, dashes, underscore).
+ - To keep sanity on the Linode Web Console, name is prepended with C(LinodeID-).
+ required: true
+ type: str
+ displaygroup:
+ description:
+ - Add the instance to a Display Group in Linode Manager.
+ type: str
+ linode_id:
+ description:
+ - Unique ID of a linode server. This value is read-only in the sense that
+ if you specify it on creation of a Linode it will not be used. The
+ Linode API generates these IDs and we can those generated value here to
+ reference a Linode more specifically. This is useful for idempotence.
+ aliases: [ lid ]
+ type: int
+ additional_disks:
+ description:
+ - List of dictionaries for creating additional disks that are added to the Linode configuration settings.
+ - Dictionary takes Size, Label, Type. Size is in MB.
+ type: list
+ alert_bwin_enabled:
+ description:
+ - Set status of bandwidth in alerts.
+ type: bool
+ alert_bwin_threshold:
+ description:
+ - Set threshold in MB of bandwidth in alerts.
+ type: int
+ alert_bwout_enabled:
+ description:
+ - Set status of bandwidth out alerts.
+ type: bool
+ alert_bwout_threshold:
+ description:
+ - Set threshold in MB of bandwidth out alerts.
+ type: int
+ alert_bwquota_enabled:
+ description:
+ - Set status of bandwidth quota alerts as percentage of network transfer quota.
+ type: bool
+ alert_bwquota_threshold:
+ description:
+ - Set threshold in MB of bandwidth quota alerts.
+ type: int
+ alert_cpu_enabled:
+ description:
+ - Set status of receiving CPU usage alerts.
+ type: bool
+ alert_cpu_threshold:
+ description:
+ - Set percentage threshold for receiving CPU usage alerts. Each CPU core adds 100% to total.
+ type: int
+ alert_diskio_enabled:
+ description:
+ - Set status of receiving disk IO alerts.
+ type: bool
+ alert_diskio_threshold:
+ description:
+ - Set threshold for average IO ops/sec over 2 hour period.
+ type: int
+ backupweeklyday:
+ description:
+ - Integer value for what day of the week to store weekly backups.
+ type: int
+ plan:
+ description:
+ - plan to use for the instance (Linode plan)
+ type: int
+ payment_term:
+ description:
+ - payment term to use for the instance (payment term in months)
+ default: 1
+ choices: [ 1, 12, 24 ]
+ type: int
+ password:
+ description:
+ - root password to apply to a new server (auto generated if missing)
+ type: str
+ private_ip:
+ description:
+ - Add private IPv4 address when Linode is created.
+ - Default is C(false).
+ type: bool
+ ssh_pub_key:
+ description:
+ - SSH public key applied to root user
+ type: str
+ swap:
+ description:
+ - swap size in MB
+ default: 512
+ type: int
+ distribution:
+ description:
+ - distribution to use for the instance (Linode Distribution)
+ type: int
+ datacenter:
+ description:
+ - datacenter to create an instance in (Linode Datacenter)
+ type: int
+ kernel_id:
+ description:
+ - kernel to use for the instance (Linode Kernel)
+ type: int
+ wait:
+ description:
+ - wait for the instance to be in state C(running) before returning
+ type: bool
+ default: true
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ default: 300
+ type: int
+ watchdog:
+ description:
+ - Set status of Lassie watchdog.
+ type: bool
+ default: "True"
+requirements:
+ - python >= 2.6
+ - linode-python
+author:
+- Vincent Viallet (@zbal)
+notes:
+ - Please note, linode-python does not have python 3 support.
+ - This module uses the now deprecated v3 of the Linode API.
+ - C(LINODE_API_KEY) env variable can be used instead.
+ - Please review U(https://www.linode.com/api/linode) for determining the required parameters.
+'''
+
+EXAMPLES = '''
+
+- name: Create a new Linode
+ community.general.linode:
+ name: linode-test1
+ plan: 1
+ datacenter: 7
+ distribution: 129
+ state: present
+ register: linode_creation
+
+- name: Create a server with a private IP Address
+ community.general.linode:
+ module: linode
+ api_key: 'longStringFromLinodeApi'
+ name: linode-test1
+ plan: 1
+ datacenter: 2
+ distribution: 99
+ password: 'superSecureRootPassword'
+ private_ip: yes
+ ssh_pub_key: 'ssh-rsa qwerty'
+ swap: 768
+ wait: yes
+ wait_timeout: 600
+ state: present
+ delegate_to: localhost
+ register: linode_creation
+
+- name: Fully configure new server
+ community.general.linode:
+ api_key: 'longStringFromLinodeApi'
+ name: linode-test1
+ plan: 4
+ datacenter: 2
+ distribution: 99
+ kernel_id: 138
+ password: 'superSecureRootPassword'
+ private_ip: yes
+ ssh_pub_key: 'ssh-rsa qwerty'
+ swap: 768
+ wait: yes
+ wait_timeout: 600
+ state: present
+ alert_bwquota_enabled: True
+ alert_bwquota_threshold: 80
+ alert_bwin_enabled: True
+ alert_bwin_threshold: 10
+ alert_cpu_enabled: True
+ alert_cpu_threshold: 210
+ alert_bwout_enabled: True
+ alert_bwout_threshold: 10
+ alert_diskio_enabled: True
+ alert_diskio_threshold: 10000
+ backupweeklyday: 1
+ backupwindow: 2
+ displaygroup: 'test'
+ additional_disks:
+ - {Label: 'disk1', Size: 2500, Type: 'raw'}
+ - {Label: 'newdisk', Size: 2000}
+ watchdog: True
+ delegate_to: localhost
+ register: linode_creation
+
+- name: Ensure a running server (create if missing)
+ community.general.linode:
+ api_key: 'longStringFromLinodeApi'
+ name: linode-test1
+ plan: 1
+ datacenter: 2
+ distribution: 99
+ password: 'superSecureRootPassword'
+ ssh_pub_key: 'ssh-rsa qwerty'
+ swap: 768
+ wait: yes
+ wait_timeout: 600
+ state: present
+ delegate_to: localhost
+ register: linode_creation
+
+- name: Delete a server
+ community.general.linode:
+ api_key: 'longStringFromLinodeApi'
+ name: linode-test1
+ linode_id: "{{ linode_creation.instance.id }}"
+ state: absent
+ delegate_to: localhost
+
+- name: Stop a server
+ community.general.linode:
+ api_key: 'longStringFromLinodeApi'
+ name: linode-test1
+ linode_id: "{{ linode_creation.instance.id }}"
+ state: stopped
+ delegate_to: localhost
+
+- name: Reboot a server
+ community.general.linode:
+ api_key: 'longStringFromLinodeApi'
+ name: linode-test1
+ linode_id: "{{ linode_creation.instance.id }}"
+ state: restarted
+ delegate_to: localhost
+'''
+
+import os
+import time
+import traceback
+
+LINODE_IMP_ERR = None
+try:
+ from linode import api as linode_api
+ HAS_LINODE = True
+except ImportError:
+ LINODE_IMP_ERR = traceback.format_exc()
+ HAS_LINODE = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+def randompass():
+ '''
+ Generate a long random password that comply to Linode requirements
+ '''
+ # Linode API currently requires the following:
+ # It must contain at least two of these four character classes:
+ # lower case letters - upper case letters - numbers - punctuation
+ # we play it safe :)
+ import random
+ import string
+ # as of python 2.4, this reseeds the PRNG from urandom
+ random.seed()
+ lower = ''.join(random.choice(string.ascii_lowercase) for x in range(6))
+ upper = ''.join(random.choice(string.ascii_uppercase) for x in range(6))
+ number = ''.join(random.choice(string.digits) for x in range(6))
+ punct = ''.join(random.choice(string.punctuation) for x in range(6))
+ p = lower + upper + number + punct
+ return ''.join(random.sample(p, len(p)))
+
+
+def getInstanceDetails(api, server):
+ '''
+ Return the details of an instance, populating IPs, etc.
+ '''
+ instance = {'id': server['LINODEID'],
+ 'name': server['LABEL'],
+ 'public': [],
+ 'private': []}
+
+ # Populate with ips
+ for ip in api.linode_ip_list(LinodeId=server['LINODEID']):
+ if ip['ISPUBLIC'] and 'ipv4' not in instance:
+ instance['ipv4'] = ip['IPADDRESS']
+ instance['fqdn'] = ip['RDNS_NAME']
+ if ip['ISPUBLIC']:
+ instance['public'].append({'ipv4': ip['IPADDRESS'],
+ 'fqdn': ip['RDNS_NAME'],
+ 'ip_id': ip['IPADDRESSID']})
+ else:
+ instance['private'].append({'ipv4': ip['IPADDRESS'],
+ 'fqdn': ip['RDNS_NAME'],
+ 'ip_id': ip['IPADDRESSID']})
+ return instance
+
+
+def linodeServers(module, api, state, name,
+ displaygroup, plan, additional_disks, distribution,
+ datacenter, kernel_id, linode_id, payment_term, password,
+ private_ip, ssh_pub_key, swap, wait, wait_timeout, watchdog, **kwargs):
+ instances = []
+ changed = False
+ new_server = False
+ servers = []
+ disks = []
+ configs = []
+ jobs = []
+
+ # See if we can match an existing server details with the provided linode_id
+ if linode_id:
+ # For the moment we only consider linode_id as criteria for match
+ # Later we can use more (size, name, etc.) and update existing
+ servers = api.linode_list(LinodeId=linode_id)
+ # Attempt to fetch details about disks and configs only if servers are
+ # found with linode_id
+ if servers:
+ disks = api.linode_disk_list(LinodeId=linode_id)
+ configs = api.linode_config_list(LinodeId=linode_id)
+
+ # Act on the state
+ if state in ('active', 'present', 'started'):
+ # TODO: validate all the plan / distribution / datacenter are valid
+
+ # Multi step process/validation:
+ # - need linode_id (entity)
+ # - need disk_id for linode_id - create disk from distrib
+ # - need config_id for linode_id - create config (need kernel)
+
+ # Any create step triggers a job that need to be waited for.
+ if not servers:
+ for arg in (name, plan, distribution, datacenter):
+ if not arg:
+ module.fail_json(msg='%s is required for %s state' % (arg, state)) # @TODO use required_if instead
+ # Create linode entity
+ new_server = True
+
+ # Get size of all individually listed disks to subtract from Distribution disk
+ used_disk_space = 0 if additional_disks is None else sum(disk['Size'] for disk in additional_disks)
+
+ try:
+ res = api.linode_create(DatacenterID=datacenter, PlanID=plan,
+ PaymentTerm=payment_term)
+ linode_id = res['LinodeID']
+ # Update linode Label to match name
+ api.linode_update(LinodeId=linode_id, Label='%s-%s' % (linode_id, name))
+ # Update Linode with Ansible configuration options
+ api.linode_update(LinodeId=linode_id, LPM_DISPLAYGROUP=displaygroup, WATCHDOG=watchdog, **kwargs)
+ # Save server
+ servers = api.linode_list(LinodeId=linode_id)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'])
+
+ # Add private IP to Linode
+ if private_ip:
+ try:
+ res = api.linode_ip_addprivate(LinodeID=linode_id)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'])
+
+ if not disks:
+ for arg in (name, linode_id, distribution):
+ if not arg:
+ module.fail_json(msg='%s is required for %s state' % (arg, state))
+ # Create disks (1 from distrib, 1 for SWAP)
+ new_server = True
+ try:
+ if not password:
+ # Password is required on creation, if not provided generate one
+ password = randompass()
+ if not swap:
+ swap = 512
+ # Create data disk
+ size = servers[0]['TOTALHD'] - used_disk_space - swap
+
+ if ssh_pub_key:
+ res = api.linode_disk_createfromdistribution(
+ LinodeId=linode_id, DistributionID=distribution,
+ rootPass=password, rootSSHKey=ssh_pub_key,
+ Label='%s data disk (lid: %s)' % (name, linode_id),
+ Size=size)
+ else:
+ res = api.linode_disk_createfromdistribution(
+ LinodeId=linode_id, DistributionID=distribution,
+ rootPass=password,
+ Label='%s data disk (lid: %s)' % (name, linode_id),
+ Size=size)
+ jobs.append(res['JobID'])
+ # Create SWAP disk
+ res = api.linode_disk_create(LinodeId=linode_id, Type='swap',
+ Label='%s swap disk (lid: %s)' % (name, linode_id),
+ Size=swap)
+ # Create individually listed disks at specified size
+ if additional_disks:
+ for disk in additional_disks:
+ # If a disk Type is not passed in, default to ext4
+ if disk.get('Type') is None:
+ disk['Type'] = 'ext4'
+ res = api.linode_disk_create(LinodeID=linode_id, Label=disk['Label'], Size=disk['Size'], Type=disk['Type'])
+
+ jobs.append(res['JobID'])
+ except Exception as e:
+ # TODO: destroy linode ?
+ module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'])
+
+ if not configs:
+ for arg in (name, linode_id, distribution):
+ if not arg:
+ module.fail_json(msg='%s is required for %s state' % (arg, state))
+
+ # Check architecture
+ for distrib in api.avail_distributions():
+ if distrib['DISTRIBUTIONID'] != distribution:
+ continue
+ arch = '32'
+ if distrib['IS64BIT']:
+ arch = '64'
+ break
+
+ # Get latest kernel matching arch if kernel_id is not specified
+ if not kernel_id:
+ for kernel in api.avail_kernels():
+ if not kernel['LABEL'].startswith('Latest %s' % arch):
+ continue
+ kernel_id = kernel['KERNELID']
+ break
+
+ # Get disk list
+ disks_id = []
+ for disk in api.linode_disk_list(LinodeId=linode_id):
+ if disk['TYPE'] == 'ext3':
+ disks_id.insert(0, str(disk['DISKID']))
+ continue
+ disks_id.append(str(disk['DISKID']))
+ # Trick to get the 9 items in the list
+ while len(disks_id) < 9:
+ disks_id.append('')
+ disks_list = ','.join(disks_id)
+
+ # Create config
+ new_server = True
+ try:
+ api.linode_config_create(LinodeId=linode_id, KernelId=kernel_id,
+ Disklist=disks_list, Label='%s config' % name)
+ configs = api.linode_config_list(LinodeId=linode_id)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'])
+
+ # Start / Ensure servers are running
+ for server in servers:
+ # Refresh server state
+ server = api.linode_list(LinodeId=server['LINODEID'])[0]
+ # Ensure existing servers are up and running, boot if necessary
+ if server['STATUS'] != 1:
+ res = api.linode_boot(LinodeId=linode_id)
+ jobs.append(res['JobID'])
+ changed = True
+
+ # wait here until the instances are up
+ wait_timeout = time.time() + wait_timeout
+ while wait and wait_timeout > time.time():
+ # refresh the server details
+ server = api.linode_list(LinodeId=server['LINODEID'])[0]
+ # status:
+ # -2: Boot failed
+ # 1: Running
+ if server['STATUS'] in (-2, 1):
+ break
+ time.sleep(5)
+ if wait and wait_timeout <= time.time():
+ # waiting took too long
+ module.fail_json(msg='Timeout waiting on %s (lid: %s)' % (server['LABEL'], server['LINODEID']))
+ # Get a fresh copy of the server details
+ server = api.linode_list(LinodeId=server['LINODEID'])[0]
+ if server['STATUS'] == -2:
+ module.fail_json(msg='%s (lid: %s) failed to boot' %
+ (server['LABEL'], server['LINODEID']))
+ # From now on we know the task is a success
+ # Build instance report
+ instance = getInstanceDetails(api, server)
+ # depending on wait flag select the status
+ if wait:
+ instance['status'] = 'Running'
+ else:
+ instance['status'] = 'Starting'
+
+ # Return the root password if this is a new box and no SSH key
+ # has been provided
+ if new_server and not ssh_pub_key:
+ instance['password'] = password
+ instances.append(instance)
+
+ elif state in ('stopped'):
+ if not linode_id:
+ module.fail_json(msg='linode_id is required for stopped state')
+
+ if not servers:
+ module.fail_json(msg='Server (lid: %s) not found' % (linode_id))
+
+ for server in servers:
+ instance = getInstanceDetails(api, server)
+ if server['STATUS'] != 2:
+ try:
+ res = api.linode_shutdown(LinodeId=linode_id)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'])
+ instance['status'] = 'Stopping'
+ changed = True
+ else:
+ instance['status'] = 'Stopped'
+ instances.append(instance)
+
+ elif state in ('restarted'):
+ if not linode_id:
+ module.fail_json(msg='linode_id is required for restarted state')
+
+ if not servers:
+ module.fail_json(msg='Server (lid: %s) not found' % (linode_id))
+
+ for server in servers:
+ instance = getInstanceDetails(api, server)
+ try:
+ res = api.linode_reboot(LinodeId=server['LINODEID'])
+ except Exception as e:
+ module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'])
+ instance['status'] = 'Restarting'
+ changed = True
+ instances.append(instance)
+
+ elif state in ('absent', 'deleted'):
+ for server in servers:
+ instance = getInstanceDetails(api, server)
+ try:
+ api.linode_delete(LinodeId=server['LINODEID'], skipChecks=True)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'])
+ instance['status'] = 'Deleting'
+ changed = True
+ instances.append(instance)
+
+ # Ease parsing if only 1 instance
+ if len(instances) == 1:
+ module.exit_json(changed=changed, instance=instances[0])
+
+ module.exit_json(changed=changed, instances=instances)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present',
+ choices=['absent', 'active', 'deleted', 'present', 'restarted', 'started', 'stopped']),
+ api_key=dict(type='str', no_log=True),
+ name=dict(type='str', required=True),
+ alert_bwin_enabled=dict(type='bool'),
+ alert_bwin_threshold=dict(type='int'),
+ alert_bwout_enabled=dict(type='bool'),
+ alert_bwout_threshold=dict(type='int'),
+ alert_bwquota_enabled=dict(type='bool'),
+ alert_bwquota_threshold=dict(type='int'),
+ alert_cpu_enabled=dict(type='bool'),
+ alert_cpu_threshold=dict(type='int'),
+ alert_diskio_enabled=dict(type='bool'),
+ alert_diskio_threshold=dict(type='int'),
+ backupsenabled=dict(type='int'),
+ backupweeklyday=dict(type='int'),
+ backupwindow=dict(type='int'),
+ displaygroup=dict(type='str', default=''),
+ plan=dict(type='int'),
+ additional_disks=dict(type='list'),
+ distribution=dict(type='int'),
+ datacenter=dict(type='int'),
+ kernel_id=dict(type='int'),
+ linode_id=dict(type='int', aliases=['lid']),
+ payment_term=dict(type='int', default=1, choices=[1, 12, 24]),
+ password=dict(type='str', no_log=True),
+ private_ip=dict(type='bool'),
+ ssh_pub_key=dict(type='str'),
+ swap=dict(type='int', default=512),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=300),
+ watchdog=dict(type='bool', default=True),
+ ),
+ )
+
+ if not HAS_LINODE:
+ module.fail_json(msg=missing_required_lib('linode-python'), exception=LINODE_IMP_ERR)
+
+ state = module.params.get('state')
+ api_key = module.params.get('api_key')
+ name = module.params.get('name')
+ alert_bwin_enabled = module.params.get('alert_bwin_enabled')
+ alert_bwin_threshold = module.params.get('alert_bwin_threshold')
+ alert_bwout_enabled = module.params.get('alert_bwout_enabled')
+ alert_bwout_threshold = module.params.get('alert_bwout_threshold')
+ alert_bwquota_enabled = module.params.get('alert_bwquota_enabled')
+ alert_bwquota_threshold = module.params.get('alert_bwquota_threshold')
+ alert_cpu_enabled = module.params.get('alert_cpu_enabled')
+ alert_cpu_threshold = module.params.get('alert_cpu_threshold')
+ alert_diskio_enabled = module.params.get('alert_diskio_enabled')
+ alert_diskio_threshold = module.params.get('alert_diskio_threshold')
+ backupsenabled = module.params.get('backupsenabled')
+ backupweeklyday = module.params.get('backupweeklyday')
+ backupwindow = module.params.get('backupwindow')
+ displaygroup = module.params.get('displaygroup')
+ plan = module.params.get('plan')
+ additional_disks = module.params.get('additional_disks')
+ distribution = module.params.get('distribution')
+ datacenter = module.params.get('datacenter')
+ kernel_id = module.params.get('kernel_id')
+ linode_id = module.params.get('linode_id')
+ payment_term = module.params.get('payment_term')
+ password = module.params.get('password')
+ private_ip = module.params.get('private_ip')
+ ssh_pub_key = module.params.get('ssh_pub_key')
+ swap = module.params.get('swap')
+ wait = module.params.get('wait')
+ wait_timeout = int(module.params.get('wait_timeout'))
+ watchdog = int(module.params.get('watchdog'))
+
+ kwargs = dict()
+ check_items = dict(
+ alert_bwin_enabled=alert_bwin_enabled,
+ alert_bwin_threshold=alert_bwin_threshold,
+ alert_bwout_enabled=alert_bwout_enabled,
+ alert_bwout_threshold=alert_bwout_threshold,
+ alert_bwquota_enabled=alert_bwquota_enabled,
+ alert_bwquota_threshold=alert_bwquota_threshold,
+ alert_cpu_enabled=alert_cpu_enabled,
+ alert_cpu_threshold=alert_cpu_threshold,
+ alert_diskio_enabled=alert_diskio_enabled,
+ alert_diskio_threshold=alert_diskio_threshold,
+ backupweeklyday=backupweeklyday,
+ backupwindow=backupwindow,
+ )
+
+ for key, value in check_items.items():
+ if value is not None:
+ kwargs[key] = value
+
+ # Setup the api_key
+ if not api_key:
+ try:
+ api_key = os.environ['LINODE_API_KEY']
+ except KeyError as e:
+ module.fail_json(msg='Unable to load %s' % e.message)
+
+ # setup the auth
+ try:
+ api = linode_api.Api(api_key)
+ api.test_echo()
+ except Exception as e:
+ module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'])
+
+ linodeServers(module, api, state, name,
+ displaygroup, plan,
+ additional_disks, distribution, datacenter, kernel_id, linode_id,
+ payment_term, password, private_ip, ssh_pub_key, swap, wait,
+ wait_timeout, watchdog, **kwargs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/linode/linode_v4.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/linode/linode_v4.py
new file mode 100644
index 00000000..17a697b3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/linode/linode_v4.py
@@ -0,0 +1,309 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: linode_v4
+short_description: Manage instances on the Linode cloud.
+description: Manage instances on the Linode cloud.
+requirements:
+ - python >= 2.7
+ - linode_api4 >= 2.0.0
+author:
+ - Luke Murphy (@decentral1se)
+notes:
+ - No Linode resizing is currently implemented. This module will, in time,
+ replace the current Linode module which uses deprecated API bindings on the
+ Linode side.
+options:
+ region:
+ description:
+ - The region of the instance. This is a required parameter only when
+ creating Linode instances. See
+ U(https://www.linode.com/docs/api/regions/).
+ required: false
+ type: str
+ image:
+ description:
+ - The image of the instance. This is a required parameter only when
+ creating Linode instances. See
+ U(https://www.linode.com/docs/api/images/).
+ type: str
+ required: false
+ type:
+ description:
+ - The type of the instance. This is a required parameter only when
+ creating Linode instances. See
+ U(https://www.linode.com/docs/api/linode-types/).
+ type: str
+ required: false
+ label:
+ description:
+ - The instance label. This label is used as the main determiner for
+ idempotence for the module and is therefore mandatory.
+ type: str
+ required: true
+ group:
+ description:
+ - The group that the instance should be marked under. Please note, that
+ group labelling is deprecated but still supported. The encouraged
+ method for marking instances is to use tags.
+ type: str
+ required: false
+ tags:
+ description:
+ - The tags that the instance should be marked under. See
+ U(https://www.linode.com/docs/api/tags/).
+ required: false
+ type: list
+ root_pass:
+ description:
+ - The password for the root user. If not specified, one will be
+ generated. This generated password will be available in the task
+ success JSON.
+ required: false
+ type: str
+ authorized_keys:
+ description:
+ - A list of SSH public key parts to deploy for the root user.
+ required: false
+ type: list
+ state:
+ description:
+ - The desired instance state.
+ type: str
+ choices:
+ - present
+ - absent
+ required: true
+ access_token:
+ description:
+ - The Linode API v4 access token. It may also be specified by exposing
+ the C(LINODE_ACCESS_TOKEN) environment variable. See
+ U(https://www.linode.com/docs/api#access-and-authentication).
+ required: true
+ type: str
+ stackscript_id:
+ description:
+ - The numeric ID of the StackScript to use when creating the instance.
+ See U(https://www.linode.com/docs/api/stackscripts/).
+ type: int
+ version_added: 1.3.0
+ stackscript_data:
+ description:
+ - An object containing arguments to any User Defined Fields present in
+ the StackScript used when creating the instance.
+ Only valid when a stackscript_id is provided.
+ See U(https://www.linode.com/docs/api/stackscripts/).
+ type: dict
+ version_added: 1.3.0
+'''
+
+EXAMPLES = """
+- name: Create a new Linode.
+ community.general.linode_v4:
+ label: new-linode
+ type: g6-nanode-1
+ region: eu-west
+ image: linode/debian9
+ root_pass: passw0rd
+ authorized_keys:
+ - "ssh-rsa ..."
+ stackscript_id: 1337
+ stackscript_data:
+ variable: value
+ state: present
+
+- name: Delete that new Linode.
+ community.general.linode_v4:
+ label: new-linode
+ state: absent
+"""
+
+RETURN = """
+instance:
+ description: The instance description in JSON serialized form.
+ returned: Always.
+ type: dict
+ sample: {
+ "root_pass": "foobar", # if auto-generated
+ "alerts": {
+ "cpu": 90,
+ "io": 10000,
+ "network_in": 10,
+ "network_out": 10,
+ "transfer_quota": 80
+ },
+ "backups": {
+ "enabled": false,
+ "schedule": {
+ "day": null,
+ "window": null
+ }
+ },
+ "created": "2018-09-26T08:12:33",
+ "group": "Foobar Group",
+ "hypervisor": "kvm",
+ "id": 10480444,
+ "image": "linode/centos7",
+ "ipv4": [
+ "130.132.285.233"
+ ],
+ "ipv6": "2a82:7e00::h03c:46ff:fe04:5cd2/64",
+ "label": "lin-foo",
+ "region": "eu-west",
+ "specs": {
+ "disk": 25600,
+ "memory": 1024,
+ "transfer": 1000,
+ "vcpus": 1
+ },
+ "status": "running",
+ "tags": [],
+ "type": "g6-nanode-1",
+ "updated": "2018-09-26T10:10:14",
+ "watchdog_enabled": true
+ }
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils.linode import get_user_agent
+
+LINODE_IMP_ERR = None
+try:
+ from linode_api4 import Instance, LinodeClient
+ HAS_LINODE_DEPENDENCY = True
+except ImportError:
+ LINODE_IMP_ERR = traceback.format_exc()
+ HAS_LINODE_DEPENDENCY = False
+
+
+def create_linode(module, client, **kwargs):
+ """Creates a Linode instance and handles return format."""
+ if kwargs['root_pass'] is None:
+ kwargs.pop('root_pass')
+
+ try:
+ response = client.linode.instance_create(**kwargs)
+ except Exception as exception:
+ module.fail_json(msg='Unable to query the Linode API. Saw: %s' % exception)
+
+ try:
+ if isinstance(response, tuple):
+ instance, root_pass = response
+ instance_json = instance._raw_json
+ instance_json.update({'root_pass': root_pass})
+ return instance_json
+ else:
+ return response._raw_json
+ except TypeError:
+ module.fail_json(msg='Unable to parse Linode instance creation'
+ ' response. Please raise a bug against this'
+ ' module on https://github.com/ansible/ansible/issues'
+ )
+
+
+def maybe_instance_from_label(module, client):
+ """Try to retrieve an instance based on a label."""
+ try:
+ label = module.params['label']
+ result = client.linode.instances(Instance.label == label)
+ return result[0]
+ except IndexError:
+ return None
+ except Exception as exception:
+ module.fail_json(msg='Unable to query the Linode API. Saw: %s' % exception)
+
+
+def initialise_module():
+ """Initialise the module parameter specification."""
+ return AnsibleModule(
+ argument_spec=dict(
+ label=dict(type='str', required=True),
+ state=dict(
+ type='str',
+ required=True,
+ choices=['present', 'absent']
+ ),
+ access_token=dict(
+ type='str',
+ required=True,
+ no_log=True,
+ fallback=(env_fallback, ['LINODE_ACCESS_TOKEN']),
+ ),
+ authorized_keys=dict(type='list', required=False),
+ group=dict(type='str', required=False),
+ image=dict(type='str', required=False),
+ region=dict(type='str', required=False),
+ root_pass=dict(type='str', required=False, no_log=True),
+ tags=dict(type='list', required=False),
+ type=dict(type='str', required=False),
+ stackscript_id=dict(type='int', required=False),
+ stackscript_data=dict(type='dict', required=False),
+ ),
+ supports_check_mode=False,
+ required_one_of=(
+ ['state', 'label'],
+ ),
+ required_together=(
+ ['region', 'image', 'type'],
+ )
+ )
+
+
+def build_client(module):
+ """Build a LinodeClient."""
+ return LinodeClient(
+ module.params['access_token'],
+ user_agent=get_user_agent('linode_v4_module')
+ )
+
+
+def main():
+ """Module entrypoint."""
+ module = initialise_module()
+
+ if not HAS_LINODE_DEPENDENCY:
+ module.fail_json(msg=missing_required_lib('linode-api4'), exception=LINODE_IMP_ERR)
+
+ client = build_client(module)
+ instance = maybe_instance_from_label(module, client)
+
+ if module.params['state'] == 'present' and instance is not None:
+ module.exit_json(changed=False, instance=instance._raw_json)
+
+ elif module.params['state'] == 'present' and instance is None:
+ instance_json = create_linode(
+ module, client,
+ authorized_keys=module.params['authorized_keys'],
+ group=module.params['group'],
+ image=module.params['image'],
+ label=module.params['label'],
+ region=module.params['region'],
+ root_pass=module.params['root_pass'],
+ tags=module.params['tags'],
+ ltype=module.params['type'],
+ stackscript=module.params['stackscript_id'],
+ stackscript_data=module.params['stackscript_data'],
+ )
+ module.exit_json(changed=True, instance=instance_json)
+
+ elif module.params['state'] == 'absent' and instance is not None:
+ instance.delete()
+ module.exit_json(changed=True, instance=instance._raw_json)
+
+ elif module.params['state'] == 'absent' and instance is None:
+ module.exit_json(changed=False, instance={})
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/lxc/lxc_container.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/lxc/lxc_container.py
new file mode 100644
index 00000000..c1a3d1c4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/lxc/lxc_container.py
@@ -0,0 +1,1760 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Kevin Carter <kevin.carter@rackspace.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: lxc_container
+short_description: Manage LXC Containers
+description:
+ - Management of LXC containers.
+author: "Kevin Carter (@cloudnull)"
+options:
+ name:
+ description:
+ - Name of a container.
+ type: str
+ required: true
+ backing_store:
+ choices:
+ - dir
+ - lvm
+ - loop
+ - btrfs
+ - overlayfs
+ - zfs
+ description:
+ - Backend storage type for the container.
+ type: str
+ default: dir
+ template:
+ description:
+ - Name of the template to use within an LXC create.
+ type: str
+ default: ubuntu
+ template_options:
+ description:
+ - Template options when building the container.
+ type: str
+ config:
+ description:
+ - Path to the LXC configuration file.
+ type: path
+ lv_name:
+ description:
+ - Name of the logical volume, defaults to the container name.
+ - If not specified, it defaults to C($CONTAINER_NAME).
+ type: str
+ vg_name:
+ description:
+ - If backend store is lvm, specify the name of the volume group.
+ type: str
+ default: lxc
+ thinpool:
+ description:
+ - Use LVM thin pool called TP.
+ type: str
+ fs_type:
+ description:
+ - Create fstype TYPE.
+ type: str
+ default: ext4
+ fs_size:
+ description:
+ - File system Size.
+ type: str
+ default: 5G
+ directory:
+ description:
+ - Place rootfs directory under DIR.
+ type: path
+ zfs_root:
+ description:
+ - Create zfs under given zfsroot.
+ type: str
+ container_command:
+ description:
+ - Run a command within a container.
+ type: str
+ lxc_path:
+ description:
+ - Place container under PATH.
+ type: path
+ container_log:
+ description:
+ - Enable a container log for host actions to the container.
+ type: bool
+ default: 'no'
+ container_log_level:
+ choices:
+ - Info
+ - info
+ - INFO
+ - Error
+ - error
+ - ERROR
+ - Debug
+ - debug
+ - DEBUG
+ description:
+ - Set the log level for a container where *container_log* was set.
+ type: str
+ required: false
+ default: INFO
+ clone_name:
+ description:
+ - Name of the new cloned server.
+ - This is only used when state is clone.
+ type: str
+ clone_snapshot:
+ description:
+ - Create a snapshot a container when cloning.
+ - This is not supported by all container storage backends.
+ - Enabling this may fail if the backing store does not support snapshots.
+ type: bool
+ default: 'no'
+ archive:
+ description:
+ - Create an archive of a container.
+ - This will create a tarball of the running container.
+ type: bool
+ default: 'no'
+ archive_path:
+ description:
+ - Path the save the archived container.
+ - If the path does not exist the archive method will attempt to create it.
+ type: path
+ archive_compression:
+ choices:
+ - gzip
+ - bzip2
+ - none
+ description:
+ - Type of compression to use when creating an archive of a running
+ container.
+ type: str
+ default: gzip
+ state:
+ choices:
+ - started
+ - stopped
+ - restarted
+ - absent
+ - frozen
+ - clone
+ description:
+ - Define the state of a container.
+ - If you clone a container using I(clone_name) the newly cloned
+ container created in a stopped state.
+ - The running container will be stopped while the clone operation is
+ happening and upon completion of the clone the original container
+ state will be restored.
+ type: str
+ default: started
+ container_config:
+ description:
+ - A list of C(key=value) options to use when configuring a container.
+ type: list
+ elements: str
+requirements:
+ - 'lxc >= 1.0 # OS package'
+ - 'python >= 2.6 # OS Package'
+ - 'lxc-python2 >= 0.1 # PIP Package from https://github.com/lxc/python2-lxc'
+notes:
+ - Containers must have a unique name. If you attempt to create a container
+ with a name that already exists in the users namespace the module will
+ simply return as "unchanged".
+ - The "container_command" can be used with any state except "absent". If
+ used with state "stopped" the container will be "started", the command
+ executed, and then the container "stopped" again. Likewise if the state
+ is "stopped" and the container does not exist it will be first created,
+ "started", the command executed, and then "stopped". If you use a "|"
+ in the variable you can use common script formatting within the variable
+ itself The "container_command" option will always execute as BASH.
+ When using "container_command" a log file is created in the /tmp/ directory
+ which contains both stdout and stderr of any command executed.
+ - If "archive" is **true** the system will attempt to create a compressed
+ tarball of the running container. The "archive" option supports LVM backed
+ containers and will create a snapshot of the running container when
+ creating the archive.
+ - If your distro does not have a package for "python2-lxc", which is a
+ requirement for this module, it can be installed from source at
+ "https://github.com/lxc/python2-lxc" or installed via pip using the package
+ name lxc-python2.
+'''
+
+EXAMPLES = r"""
+- name: Create a started container
+ community.general.lxc_container:
+ name: test-container-started
+ container_log: true
+ template: ubuntu
+ state: started
+ template_options: --release trusty
+
+- name: Create a stopped container
+ community.general.lxc_container:
+ name: test-container-stopped
+ container_log: true
+ template: ubuntu
+ state: stopped
+ template_options: --release trusty
+
+- name: Create a frozen container
+ community.general.lxc_container:
+ name: test-container-frozen
+ container_log: true
+ template: ubuntu
+ state: frozen
+ template_options: --release trusty
+ container_command: |
+ echo 'hello world.' | tee /opt/started-frozen
+
+# Create filesystem container, configure it, and archive it, and start it.
+- name: Create filesystem container
+ community.general.lxc_container:
+ name: test-container-config
+ backing_store: dir
+ container_log: true
+ template: ubuntu
+ state: started
+ archive: true
+ archive_compression: none
+ container_config:
+ - "lxc.aa_profile=unconfined"
+ - "lxc.cgroup.devices.allow=a *:* rmw"
+ template_options: --release trusty
+
+# Create an lvm container, run a complex command in it, add additional
+# configuration to it, create an archive of it, and finally leave the container
+# in a frozen state. The container archive will be compressed using bzip2
+- name: Create a frozen lvm container
+ community.general.lxc_container:
+ name: test-container-lvm
+ container_log: true
+ template: ubuntu
+ state: frozen
+ backing_store: lvm
+ template_options: --release trusty
+ container_command: |
+ apt-get update
+ apt-get install -y vim lxc-dev
+ echo 'hello world.' | tee /opt/started
+ if [[ -f "/opt/started" ]]; then
+ echo 'hello world.' | tee /opt/found-started
+ fi
+ container_config:
+ - "lxc.aa_profile=unconfined"
+ - "lxc.cgroup.devices.allow=a *:* rmw"
+ archive: true
+ archive_compression: bzip2
+ register: lvm_container_info
+
+- name: Debug info on container "test-container-lvm"
+ ansible.builtin.debug:
+ var: lvm_container_info
+
+- name: Run a command in a container and ensure its in a "stopped" state.
+ community.general.lxc_container:
+ name: test-container-started
+ state: stopped
+ container_command: |
+ echo 'hello world.' | tee /opt/stopped
+
+- name: Run a command in a container and ensure its it in a "frozen" state.
+ community.general.lxc_container:
+ name: test-container-stopped
+ state: frozen
+ container_command: |
+ echo 'hello world.' | tee /opt/frozen
+
+- name: Start a container
+ community.general.lxc_container:
+ name: test-container-stopped
+ state: started
+
+- name: Run a command in a container and then restart it
+ community.general.lxc_container:
+ name: test-container-started
+ state: restarted
+ container_command: |
+ echo 'hello world.' | tee /opt/restarted
+
+- name: Run a complex command within a "running" container
+ community.general.lxc_container:
+ name: test-container-started
+ container_command: |
+ apt-get update
+ apt-get install -y curl wget vim apache2
+ echo 'hello world.' | tee /opt/started
+ if [[ -f "/opt/started" ]]; then
+ echo 'hello world.' | tee /opt/found-started
+ fi
+
+# Create an archive of an existing container, save the archive to a defined
+# path and then destroy it.
+- name: Archive container
+ community.general.lxc_container:
+ name: test-container-started
+ state: absent
+ archive: true
+ archive_path: /opt/archives
+
+# Create a container using overlayfs, create an archive of it, create a
+# snapshot clone of the container and and finally leave the container
+# in a frozen state. The container archive will be compressed using gzip.
+- name: Create an overlayfs container archive and clone it
+ community.general.lxc_container:
+ name: test-container-overlayfs
+ container_log: true
+ template: ubuntu
+ state: started
+ backing_store: overlayfs
+ template_options: --release trusty
+ clone_snapshot: true
+ clone_name: test-container-overlayfs-clone-snapshot
+ archive: true
+ archive_compression: gzip
+ register: clone_container_info
+
+- name: Debug info on container "test-container"
+ ansible.builtin.debug:
+ var: clone_container_info
+
+- name: Clone a container using snapshot
+ community.general.lxc_container:
+ name: test-container-overlayfs-clone-snapshot
+ backing_store: overlayfs
+ clone_name: test-container-overlayfs-clone-snapshot2
+ clone_snapshot: true
+
+- name: Create a new container and clone it
+ community.general.lxc_container:
+ name: test-container-new-archive
+ backing_store: dir
+ clone_name: test-container-new-archive-clone
+
+- name: Archive and clone a container then destroy it
+ community.general.lxc_container:
+ name: test-container-new-archive
+ state: absent
+ clone_name: test-container-new-archive-destroyed-clone
+ archive: true
+ archive_compression: gzip
+
+- name: Start a cloned container.
+ community.general.lxc_container:
+ name: test-container-new-archive-destroyed-clone
+ state: started
+
+- name: Destroy a container
+ community.general.lxc_container:
+ name: '{{ item }}'
+ state: absent
+ with_items:
+ - test-container-stopped
+ - test-container-started
+ - test-container-frozen
+ - test-container-lvm
+ - test-container-config
+ - test-container-overlayfs
+ - test-container-overlayfs-clone
+ - test-container-overlayfs-clone-snapshot
+ - test-container-overlayfs-clone-snapshot2
+ - test-container-new-archive
+ - test-container-new-archive-clone
+ - test-container-new-archive-destroyed-clone
+"""
+
+RETURN = r"""
+lxc_container:
+ description: container information
+ returned: success
+ type: complex
+ contains:
+ name:
+ description: name of the lxc container
+ returned: success
+ type: str
+ sample: test_host
+ init_pid:
+ description: pid of the lxc init process
+ returned: success
+ type: int
+ sample: 19786
+ interfaces:
+ description: list of the container's network interfaces
+ returned: success
+ type: list
+ sample: [ "eth0", "lo" ]
+ ips:
+ description: list of ips
+ returned: success
+ type: list
+ sample: [ "10.0.3.3" ]
+ state:
+ description: resulting state of the container
+ returned: success
+ type: str
+ sample: "running"
+ archive:
+ description: resulting state of the container
+ returned: success, when archive is true
+ type: str
+ sample: "/tmp/test-container-config.tar"
+ clone:
+ description: if the container was cloned
+ returned: success, when clone_name is specified
+ type: bool
+ sample: True
+"""
+
+import os
+import os.path
+import re
+import shutil
+import subprocess
+import tempfile
+import time
+
+try:
+ import lxc
+except ImportError:
+ HAS_LXC = False
+else:
+ HAS_LXC = True
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.parsing.convert_bool import BOOLEANS_FALSE, BOOLEANS_TRUE
+from ansible.module_utils.six.moves import xrange
+from ansible.module_utils._text import to_text, to_bytes
+
+
+# LXC_COMPRESSION_MAP is a map of available compression types when creating
+# an archive of a container.
+LXC_COMPRESSION_MAP = {
+ 'gzip': {
+ 'extension': 'tar.tgz',
+ 'argument': '-czf'
+ },
+ 'bzip2': {
+ 'extension': 'tar.bz2',
+ 'argument': '-cjf'
+ },
+ 'none': {
+ 'extension': 'tar',
+ 'argument': '-cf'
+ }
+}
+
+
+# LXC_COMMAND_MAP is a map of variables that are available to a method based
+# on the state the container is in.
+LXC_COMMAND_MAP = {
+ 'create': {
+ 'variables': {
+ 'config': '--config',
+ 'template': '--template',
+ 'backing_store': '--bdev',
+ 'lxc_path': '--lxcpath',
+ 'lv_name': '--lvname',
+ 'vg_name': '--vgname',
+ 'thinpool': '--thinpool',
+ 'fs_type': '--fstype',
+ 'fs_size': '--fssize',
+ 'directory': '--dir',
+ 'zfs_root': '--zfsroot'
+ }
+ },
+ 'clone': {
+ 'variables-lxc-copy': {
+ 'backing_store': '--backingstorage',
+ 'lxc_path': '--lxcpath',
+ 'fs_size': '--fssize',
+ 'name': '--name',
+ 'clone_name': '--newname'
+ },
+ # lxc-clone is deprecated in favor of lxc-copy
+ 'variables-lxc-clone': {
+ 'backing_store': '--backingstore',
+ 'lxc_path': '--lxcpath',
+ 'fs_size': '--fssize',
+ 'name': '--orig',
+ 'clone_name': '--new'
+ }
+ }
+}
+
+
+# LXC_BACKING_STORE is a map of available storage backends and options that
+# are incompatible with the given storage backend.
+LXC_BACKING_STORE = {
+ 'dir': [
+ 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool'
+ ],
+ 'lvm': [
+ 'zfs_root'
+ ],
+ 'btrfs': [
+ 'lv_name', 'vg_name', 'thinpool', 'zfs_root', 'fs_type', 'fs_size'
+ ],
+ 'loop': [
+ 'lv_name', 'vg_name', 'thinpool', 'zfs_root'
+ ],
+ 'overlayfs': [
+ 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool', 'zfs_root'
+ ],
+ 'zfs': [
+ 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool'
+ ]
+}
+
+
+# LXC_LOGGING_LEVELS is a map of available log levels
+LXC_LOGGING_LEVELS = {
+ 'INFO': ['info', 'INFO', 'Info'],
+ 'ERROR': ['error', 'ERROR', 'Error'],
+ 'DEBUG': ['debug', 'DEBUG', 'Debug']
+}
+
+
+# LXC_ANSIBLE_STATES is a map of states that contain values of methods used
+# when a particular state is evoked.
+LXC_ANSIBLE_STATES = {
+ 'started': '_started',
+ 'stopped': '_stopped',
+ 'restarted': '_restarted',
+ 'absent': '_destroyed',
+ 'frozen': '_frozen',
+ 'clone': '_clone'
+}
+
+
+# This is used to attach to a running container and execute commands from
+# within the container on the host. This will provide local access to a
+# container without using SSH. The template will attempt to work within the
+# home directory of the user that was attached to the container and source
+# that users environment variables by default.
+ATTACH_TEMPLATE = """#!/usr/bin/env bash
+pushd "$(getent passwd $(whoami)|cut -f6 -d':')"
+ if [[ -f ".bashrc" ]];then
+ source .bashrc
+ unset HOSTNAME
+ fi
+popd
+
+# User defined command
+%(container_command)s
+"""
+
+
+def create_script(command):
+ """Write out a script onto a target.
+
+ This method should be backward compatible with Python 2.4+ when executing
+ from within the container.
+
+ :param command: command to run, this can be a script and can use spacing
+ with newlines as separation.
+ :type command: ``str``
+ """
+
+ (fd, script_file) = tempfile.mkstemp(prefix='lxc-attach-script')
+ f = os.fdopen(fd, 'wb')
+ try:
+ f.write(to_bytes(ATTACH_TEMPLATE % {'container_command': command}, errors='surrogate_or_strict'))
+ f.flush()
+ finally:
+ f.close()
+
+ # Ensure the script is executable.
+ os.chmod(script_file, int('0700', 8))
+
+ # Output log file.
+ stdout_file = os.fdopen(tempfile.mkstemp(prefix='lxc-attach-script-log')[0], 'ab')
+
+ # Error log file.
+ stderr_file = os.fdopen(tempfile.mkstemp(prefix='lxc-attach-script-err')[0], 'ab')
+
+ # Execute the script command.
+ try:
+ subprocess.Popen(
+ [script_file],
+ stdout=stdout_file,
+ stderr=stderr_file
+ ).communicate()
+ finally:
+ # Close the log files.
+ stderr_file.close()
+ stdout_file.close()
+
+ # Remove the script file upon completion of execution.
+ os.remove(script_file)
+
+
+class LxcContainerManagement(object):
+ def __init__(self, module):
+ """Management of LXC containers via Ansible.
+
+ :param module: Processed Ansible Module.
+ :type module: ``object``
+ """
+ self.module = module
+ self.state = self.module.params.get('state', None)
+ self.state_change = False
+ self.lxc_vg = None
+ self.lxc_path = self.module.params.get('lxc_path', None)
+ self.container_name = self.module.params['name']
+ self.container = self.get_container_bind()
+ self.archive_info = None
+ self.clone_info = None
+
+ def get_container_bind(self):
+ return lxc.Container(name=self.container_name)
+
+ @staticmethod
+ def _roundup(num):
+ """Return a rounded floating point number.
+
+ :param num: Number to round up.
+ :type: ``float``
+ :returns: Rounded up number.
+ :rtype: ``int``
+ """
+ num, part = str(num).split('.')
+ num = int(num)
+ if int(part) != 0:
+ num += 1
+ return num
+
+ @staticmethod
+ def _container_exists(container_name, lxc_path=None):
+ """Check if a container exists.
+
+ :param container_name: Name of the container.
+ :type: ``str``
+ :returns: True or False if the container is found.
+ :rtype: ``bol``
+ """
+ if [i for i in lxc.list_containers(config_path=lxc_path) if i == container_name]:
+ return True
+ else:
+ return False
+
+ @staticmethod
+ def _add_variables(variables_dict, build_command):
+ """Return a command list with all found options.
+
+ :param variables_dict: Pre-parsed optional variables used from a
+ seed command.
+ :type variables_dict: ``dict``
+ :param build_command: Command to run.
+ :type build_command: ``list``
+ :returns: list of command options.
+ :rtype: ``list``
+ """
+
+ for key, value in variables_dict.items():
+ build_command.append(
+ '%s %s' % (key, value)
+ )
+ return build_command
+
+ def _get_vars(self, variables):
+ """Return a dict of all variables as found within the module.
+
+ :param variables: Hash of all variables to find.
+ :type variables: ``dict``
+ """
+
+ # Remove incompatible storage backend options.
+ variables = variables.copy()
+ for v in LXC_BACKING_STORE[self.module.params['backing_store']]:
+ variables.pop(v, None)
+
+ return_dict = dict()
+ false_values = BOOLEANS_FALSE.union([None, ''])
+ for k, v in variables.items():
+ _var = self.module.params.get(k)
+ if _var not in false_values:
+ return_dict[v] = _var
+ return return_dict
+
+ def _run_command(self, build_command, unsafe_shell=False):
+ """Return information from running an Ansible Command.
+
+ This will squash the build command list into a string and then
+ execute the command via Ansible. The output is returned to the method.
+ This output is returned as `return_code`, `stdout`, `stderr`.
+
+ :param build_command: Used for the command and all options.
+ :type build_command: ``list``
+ :param unsafe_shell: Enable or Disable unsafe sell commands.
+ :type unsafe_shell: ``bol``
+ """
+
+ return self.module.run_command(
+ ' '.join(build_command),
+ use_unsafe_shell=unsafe_shell
+ )
+
+ def _config(self):
+ """Configure an LXC container.
+
+ Write new configuration values to the lxc config file. This will
+ stop the container if it's running write the new options and then
+ restart the container upon completion.
+ """
+
+ _container_config = self.module.params.get('container_config')
+ if not _container_config:
+ return False
+
+ container_config_file = self.container.config_file_name
+ with open(container_config_file, 'rb') as f:
+ container_config = to_text(f.read(), errors='surrogate_or_strict').splitlines(True)
+
+ parsed_options = [i.split('=', 1) for i in _container_config]
+ config_change = False
+ for key, value in parsed_options:
+ key = key.strip()
+ value = value.strip()
+ new_entry = '%s = %s\n' % (key, value)
+ keyre = re.compile(r'%s(\s+)?=' % key)
+ for option_line in container_config:
+ # Look for key in config
+ if keyre.match(option_line):
+ _, _value = option_line.split('=', 1)
+ config_value = ' '.join(_value.split())
+ line_index = container_config.index(option_line)
+ # If the sanitized values don't match replace them
+ if value != config_value:
+ line_index += 1
+ if new_entry not in container_config:
+ config_change = True
+ container_config.insert(line_index, new_entry)
+ # Break the flow as values are written or not at this point
+ break
+ else:
+ config_change = True
+ container_config.append(new_entry)
+
+ # If the config changed restart the container.
+ if config_change:
+ container_state = self._get_state()
+ if container_state != 'stopped':
+ self.container.stop()
+
+ with open(container_config_file, 'wb') as f:
+ f.writelines([to_bytes(line, errors='surrogate_or_strict') for line in container_config])
+
+ self.state_change = True
+ if container_state == 'running':
+ self._container_startup()
+ elif container_state == 'frozen':
+ self._container_startup()
+ self.container.freeze()
+
+ def _container_create_clone(self):
+ """Clone a new LXC container from an existing container.
+
+ This method will clone an existing container to a new container using
+ the `clone_name` variable as the new container name. The method will
+ create a container if the container `name` does not exist.
+
+ Note that cloning a container will ensure that the original container
+ is "stopped" before the clone can be done. Because this operation can
+ require a state change the method will return the original container
+ to its prior state upon completion of the clone.
+
+ Once the clone is complete the new container will be left in a stopped
+ state.
+ """
+
+ # Ensure that the state of the original container is stopped
+ container_state = self._get_state()
+ if container_state != 'stopped':
+ self.state_change = True
+ self.container.stop()
+
+ # lxc-clone is deprecated in favor of lxc-copy
+ clone_vars = 'variables-lxc-copy'
+ clone_cmd = self.module.get_bin_path('lxc-copy')
+ if not clone_cmd:
+ clone_vars = 'variables-lxc-clone'
+ clone_cmd = self.module.get_bin_path('lxc-clone', True)
+
+ build_command = [
+ clone_cmd,
+ ]
+
+ build_command = self._add_variables(
+ variables_dict=self._get_vars(
+ variables=LXC_COMMAND_MAP['clone'][clone_vars]
+ ),
+ build_command=build_command
+ )
+
+ # Load logging for the instance when creating it.
+ if self.module.params.get('clone_snapshot') in BOOLEANS_TRUE:
+ build_command.append('--snapshot')
+ # Check for backing_store == overlayfs if so force the use of snapshot
+ # If overlay fs is used and snapshot is unset the clone command will
+ # fail with an unsupported type.
+ elif self.module.params.get('backing_store') == 'overlayfs':
+ build_command.append('--snapshot')
+
+ rc, return_data, err = self._run_command(build_command)
+ if rc != 0:
+ message = "Failed executing %s." % os.path.basename(clone_cmd)
+ self.failure(
+ err=err, rc=rc, msg=message, command=' '.join(
+ build_command
+ )
+ )
+ else:
+ self.state_change = True
+ # Restore the original state of the origin container if it was
+ # not in a stopped state.
+ if container_state == 'running':
+ self.container.start()
+ elif container_state == 'frozen':
+ self.container.start()
+ self.container.freeze()
+
+ return True
+
+ def _create(self):
+ """Create a new LXC container.
+
+ This method will build and execute a shell command to build the
+ container. It would have been nice to simply use the lxc python library
+ however at the time this was written the python library, in both py2
+ and py3 didn't support some of the more advanced container create
+ processes. These missing processes mainly revolve around backing
+ LXC containers with block devices.
+ """
+
+ build_command = [
+ self.module.get_bin_path('lxc-create', True),
+ '--name %s' % self.container_name,
+ '--quiet'
+ ]
+
+ build_command = self._add_variables(
+ variables_dict=self._get_vars(
+ variables=LXC_COMMAND_MAP['create']['variables']
+ ),
+ build_command=build_command
+ )
+
+ # Load logging for the instance when creating it.
+ if self.module.params.get('container_log') in BOOLEANS_TRUE:
+ # Set the logging path to the /var/log/lxc if uid is root. else
+ # set it to the home folder of the user executing.
+ try:
+ if os.getuid() != 0:
+ log_path = os.getenv('HOME')
+ else:
+ if not os.path.isdir('/var/log/lxc/'):
+ os.makedirs('/var/log/lxc/')
+ log_path = '/var/log/lxc/'
+ except OSError:
+ log_path = os.getenv('HOME')
+
+ build_command.extend([
+ '--logfile %s' % os.path.join(
+ log_path, 'lxc-%s.log' % self.container_name
+ ),
+ '--logpriority %s' % self.module.params.get(
+ 'container_log_level'
+ ).upper()
+ ])
+
+ # Add the template commands to the end of the command if there are any
+ template_options = self.module.params.get('template_options', None)
+ if template_options:
+ build_command.append('-- %s' % template_options)
+
+ rc, return_data, err = self._run_command(build_command)
+ if rc != 0:
+ message = "Failed executing lxc-create."
+ self.failure(
+ err=err, rc=rc, msg=message, command=' '.join(build_command)
+ )
+ else:
+ self.state_change = True
+
+ def _container_data(self):
+ """Returns a dict of container information.
+
+ :returns: container data
+ :rtype: ``dict``
+ """
+
+ return {
+ 'interfaces': self.container.get_interfaces(),
+ 'ips': self.container.get_ips(),
+ 'state': self._get_state(),
+ 'init_pid': int(self.container.init_pid),
+ 'name': self.container_name,
+ }
+
+ def _unfreeze(self):
+ """Unfreeze a container.
+
+ :returns: True or False based on if the container was unfrozen.
+ :rtype: ``bol``
+ """
+
+ unfreeze = self.container.unfreeze()
+ if unfreeze:
+ self.state_change = True
+ return unfreeze
+
+ def _get_state(self):
+ """Return the state of a container.
+
+ If the container is not found the state returned is "absent"
+
+ :returns: state of a container as a lower case string.
+ :rtype: ``str``
+ """
+
+ if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
+ return str(self.container.state).lower()
+ return str('absent')
+
+ def _execute_command(self):
+ """Execute a shell command."""
+
+ container_command = self.module.params.get('container_command')
+ if container_command:
+ container_state = self._get_state()
+ if container_state == 'frozen':
+ self._unfreeze()
+ elif container_state == 'stopped':
+ self._container_startup()
+
+ self.container.attach_wait(create_script, container_command)
+ self.state_change = True
+
+ def _container_startup(self, timeout=60):
+ """Ensure a container is started.
+
+ :param timeout: Time before the destroy operation is abandoned.
+ :type timeout: ``int``
+ """
+
+ self.container = self.get_container_bind()
+ for _ in xrange(timeout):
+ if self._get_state() != 'running':
+ self.container.start()
+ self.state_change = True
+ # post startup sleep for 1 second.
+ time.sleep(1)
+ else:
+ return True
+ self.failure(
+ lxc_container=self._container_data(),
+ error='Failed to start container'
+ ' [ %s ]' % self.container_name,
+ rc=1,
+ msg='The container [ %s ] failed to start. Check to lxc is'
+ ' available and that the container is in a functional'
+ ' state.' % self.container_name
+ )
+
+ def _check_archive(self):
+ """Create a compressed archive of a container.
+
+ This will store archive_info in as self.archive_info
+ """
+
+ if self.module.params.get('archive') in BOOLEANS_TRUE:
+ self.archive_info = {
+ 'archive': self._container_create_tar()
+ }
+
+ def _check_clone(self):
+ """Create a compressed archive of a container.
+
+ This will store archive_info in as self.archive_info
+ """
+
+ clone_name = self.module.params.get('clone_name')
+ if clone_name:
+ if not self._container_exists(container_name=clone_name, lxc_path=self.lxc_path):
+ self.clone_info = {
+ 'cloned': self._container_create_clone()
+ }
+ else:
+ self.clone_info = {
+ 'cloned': False
+ }
+
+ def _destroyed(self, timeout=60):
+ """Ensure a container is destroyed.
+
+ :param timeout: Time before the destroy operation is abandoned.
+ :type timeout: ``int``
+ """
+
+ for _ in xrange(timeout):
+ if not self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
+ break
+
+ # Check if the container needs to have an archive created.
+ self._check_archive()
+
+ # Check if the container is to be cloned
+ self._check_clone()
+
+ if self._get_state() != 'stopped':
+ self.state_change = True
+ self.container.stop()
+
+ if self.container.destroy():
+ self.state_change = True
+
+ # post destroy attempt sleep for 1 second.
+ time.sleep(1)
+ else:
+ self.failure(
+ lxc_container=self._container_data(),
+ error='Failed to destroy container'
+ ' [ %s ]' % self.container_name,
+ rc=1,
+ msg='The container [ %s ] failed to be destroyed. Check'
+ ' that lxc is available and that the container is in a'
+ ' functional state.' % self.container_name
+ )
+
+ def _frozen(self, count=0):
+ """Ensure a container is frozen.
+
+ If the container does not exist the container will be created.
+
+ :param count: number of times this command has been called by itself.
+ :type count: ``int``
+ """
+
+ self.check_count(count=count, method='frozen')
+ if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
+ self._execute_command()
+
+ # Perform any configuration updates
+ self._config()
+
+ container_state = self._get_state()
+ if container_state == 'frozen':
+ pass
+ elif container_state == 'running':
+ self.container.freeze()
+ self.state_change = True
+ else:
+ self._container_startup()
+ self.container.freeze()
+ self.state_change = True
+
+ # Check if the container needs to have an archive created.
+ self._check_archive()
+
+ # Check if the container is to be cloned
+ self._check_clone()
+ else:
+ self._create()
+ count += 1
+ self._frozen(count)
+
+ def _restarted(self, count=0):
+ """Ensure a container is restarted.
+
+ If the container does not exist the container will be created.
+
+ :param count: number of times this command has been called by itself.
+ :type count: ``int``
+ """
+
+ self.check_count(count=count, method='restart')
+ if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
+ self._execute_command()
+
+ # Perform any configuration updates
+ self._config()
+
+ if self._get_state() != 'stopped':
+ self.container.stop()
+ self.state_change = True
+
+ # Run container startup
+ self._container_startup()
+
+ # Check if the container needs to have an archive created.
+ self._check_archive()
+
+ # Check if the container is to be cloned
+ self._check_clone()
+ else:
+ self._create()
+ count += 1
+ self._restarted(count)
+
+ def _stopped(self, count=0):
+ """Ensure a container is stopped.
+
+ If the container does not exist the container will be created.
+
+ :param count: number of times this command has been called by itself.
+ :type count: ``int``
+ """
+
+ self.check_count(count=count, method='stop')
+ if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
+ self._execute_command()
+
+ # Perform any configuration updates
+ self._config()
+
+ if self._get_state() != 'stopped':
+ self.container.stop()
+ self.state_change = True
+
+ # Check if the container needs to have an archive created.
+ self._check_archive()
+
+ # Check if the container is to be cloned
+ self._check_clone()
+ else:
+ self._create()
+ count += 1
+ self._stopped(count)
+
+ def _started(self, count=0):
+ """Ensure a container is started.
+
+ If the container does not exist the container will be created.
+
+ :param count: number of times this command has been called by itself.
+ :type count: ``int``
+ """
+
+ self.check_count(count=count, method='start')
+ if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
+ container_state = self._get_state()
+ if container_state == 'running':
+ pass
+ elif container_state == 'frozen':
+ self._unfreeze()
+ elif not self._container_startup():
+ self.failure(
+ lxc_container=self._container_data(),
+ error='Failed to start container'
+ ' [ %s ]' % self.container_name,
+ rc=1,
+ msg='The container [ %s ] failed to start. Check to lxc is'
+ ' available and that the container is in a functional'
+ ' state.' % self.container_name
+ )
+
+ # Return data
+ self._execute_command()
+
+ # Perform any configuration updates
+ self._config()
+
+ # Check if the container needs to have an archive created.
+ self._check_archive()
+
+ # Check if the container is to be cloned
+ self._check_clone()
+ else:
+ self._create()
+ count += 1
+ self._started(count)
+
+ def _get_lxc_vg(self):
+ """Return the name of the Volume Group used in LXC."""
+
+ build_command = [
+ self.module.get_bin_path('lxc-config', True),
+ "lxc.bdev.lvm.vg"
+ ]
+ rc, vg, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='Failed to read LVM VG from LXC config',
+ command=' '.join(build_command)
+ )
+ else:
+ return str(vg.strip())
+
+ def _lvm_lv_list(self):
+ """Return a list of all lv in a current vg."""
+
+ vg = self._get_lxc_vg()
+ build_command = [
+ self.module.get_bin_path('lvs', True)
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='Failed to get list of LVs',
+ command=' '.join(build_command)
+ )
+
+ all_lvms = [i.split() for i in stdout.splitlines()][1:]
+ return [lv_entry[0] for lv_entry in all_lvms if lv_entry[1] == vg]
+
+ def _get_vg_free_pe(self, vg_name):
+ """Return the available size of a given VG.
+
+ :param vg_name: Name of volume.
+ :type vg_name: ``str``
+ :returns: size and measurement of an LV
+ :type: ``tuple``
+ """
+
+ build_command = [
+ 'vgdisplay',
+ vg_name,
+ '--units',
+ 'g'
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to read vg %s' % vg_name,
+ command=' '.join(build_command)
+ )
+
+ vg_info = [i.strip() for i in stdout.splitlines()][1:]
+ free_pe = [i for i in vg_info if i.startswith('Free')]
+ _free_pe = free_pe[0].split()
+ return float(_free_pe[-2]), _free_pe[-1]
+
+ def _get_lv_size(self, lv_name):
+ """Return the available size of a given LV.
+
+ :param lv_name: Name of volume.
+ :type lv_name: ``str``
+ :returns: size and measurement of an LV
+ :type: ``tuple``
+ """
+
+ vg = self._get_lxc_vg()
+ lv = os.path.join(vg, lv_name)
+ build_command = [
+ 'lvdisplay',
+ lv,
+ '--units',
+ 'g'
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to read lv %s' % lv,
+ command=' '.join(build_command)
+ )
+
+ lv_info = [i.strip() for i in stdout.splitlines()][1:]
+ _free_pe = [i for i in lv_info if i.startswith('LV Size')]
+ free_pe = _free_pe[0].split()
+ return self._roundup(float(free_pe[-2])), free_pe[-1]
+
+ def _lvm_snapshot_create(self, source_lv, snapshot_name,
+ snapshot_size_gb=5):
+ """Create an LVM snapshot.
+
+ :param source_lv: Name of lv to snapshot
+ :type source_lv: ``str``
+ :param snapshot_name: Name of lv snapshot
+ :type snapshot_name: ``str``
+ :param snapshot_size_gb: Size of snapshot to create
+ :type snapshot_size_gb: ``int``
+ """
+
+ vg = self._get_lxc_vg()
+ free_space, messurement = self._get_vg_free_pe(vg_name=vg)
+
+ if free_space < float(snapshot_size_gb):
+ message = (
+ 'Snapshot size [ %s ] is > greater than [ %s ] on volume group'
+ ' [ %s ]' % (snapshot_size_gb, free_space, vg)
+ )
+ self.failure(
+ error='Not enough space to create snapshot',
+ rc=2,
+ msg=message
+ )
+
+ # Create LVM Snapshot
+ build_command = [
+ self.module.get_bin_path('lvcreate', True),
+ "-n",
+ snapshot_name,
+ "-s",
+ os.path.join(vg, source_lv),
+ "-L%sg" % snapshot_size_gb
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='Failed to Create LVM snapshot %s/%s --> %s'
+ % (vg, source_lv, snapshot_name)
+ )
+
+ def _lvm_lv_mount(self, lv_name, mount_point):
+ """mount an lv.
+
+ :param lv_name: name of the logical volume to mount
+ :type lv_name: ``str``
+ :param mount_point: path on the file system that is mounted.
+ :type mount_point: ``str``
+ """
+
+ vg = self._get_lxc_vg()
+
+ build_command = [
+ self.module.get_bin_path('mount', True),
+ "/dev/%s/%s" % (vg, lv_name),
+ mount_point,
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to mountlvm lv %s/%s to %s'
+ % (vg, lv_name, mount_point)
+ )
+
+ def _create_tar(self, source_dir):
+ """Create an archive of a given ``source_dir`` to ``output_path``.
+
+ :param source_dir: Path to the directory to be archived.
+ :type source_dir: ``str``
+ """
+
+ old_umask = os.umask(int('0077', 8))
+
+ archive_path = self.module.params.get('archive_path')
+ if not os.path.isdir(archive_path):
+ os.makedirs(archive_path)
+
+ archive_compression = self.module.params.get('archive_compression')
+ compression_type = LXC_COMPRESSION_MAP[archive_compression]
+
+ # remove trailing / if present.
+ archive_name = '%s.%s' % (
+ os.path.join(
+ archive_path,
+ self.container_name
+ ),
+ compression_type['extension']
+ )
+
+ build_command = [
+ self.module.get_bin_path('tar', True),
+ '--directory=%s' % os.path.realpath(
+ os.path.expanduser(source_dir)
+ ),
+ compression_type['argument'],
+ archive_name,
+ '.'
+ ]
+
+ rc, stdout, err = self._run_command(
+ build_command=build_command,
+ unsafe_shell=True
+ )
+
+ os.umask(old_umask)
+
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to create tar archive',
+ command=' '.join(build_command)
+ )
+
+ return archive_name
+
+ def _lvm_lv_remove(self, lv_name):
+ """Remove an LV.
+
+ :param lv_name: The name of the logical volume
+ :type lv_name: ``str``
+ """
+
+ vg = self._get_lxc_vg()
+ build_command = [
+ self.module.get_bin_path('lvremove', True),
+ "-f",
+ "%s/%s" % (vg, lv_name),
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='Failed to remove LVM LV %s/%s' % (vg, lv_name),
+ command=' '.join(build_command)
+ )
+
+ def _rsync_data(self, container_path, temp_dir):
+ """Sync the container directory to the temp directory.
+
+ :param container_path: path to the container container
+ :type container_path: ``str``
+ :param temp_dir: path to the temporary local working directory
+ :type temp_dir: ``str``
+ """
+ # This loop is created to support overlayfs archives. This should
+ # squash all of the layers into a single archive.
+ fs_paths = container_path.split(':')
+ if 'overlayfs' in fs_paths:
+ fs_paths.pop(fs_paths.index('overlayfs'))
+
+ for fs_path in fs_paths:
+ # Set the path to the container data
+ fs_path = os.path.dirname(fs_path)
+
+ # Run the sync command
+ build_command = [
+ self.module.get_bin_path('rsync', True),
+ '-aHAX',
+ fs_path,
+ temp_dir
+ ]
+ rc, stdout, err = self._run_command(
+ build_command,
+ unsafe_shell=True
+ )
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to perform archive',
+ command=' '.join(build_command)
+ )
+
+ def _unmount(self, mount_point):
+ """Unmount a file system.
+
+ :param mount_point: path on the file system that is mounted.
+ :type mount_point: ``str``
+ """
+
+ build_command = [
+ self.module.get_bin_path('umount', True),
+ mount_point,
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to unmount [ %s ]' % mount_point,
+ command=' '.join(build_command)
+ )
+
+ def _overlayfs_mount(self, lowerdir, upperdir, mount_point):
+ """mount an lv.
+
+ :param lowerdir: name/path of the lower directory
+ :type lowerdir: ``str``
+ :param upperdir: name/path of the upper directory
+ :type upperdir: ``str``
+ :param mount_point: path on the file system that is mounted.
+ :type mount_point: ``str``
+ """
+
+ build_command = [
+ self.module.get_bin_path('mount', True),
+ '-t overlayfs',
+ '-o lowerdir=%s,upperdir=%s' % (lowerdir, upperdir),
+ 'overlayfs',
+ mount_point,
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to mount overlayfs:%s:%s to %s -- Command: %s'
+ % (lowerdir, upperdir, mount_point, build_command)
+ )
+
+ def _container_create_tar(self):
+ """Create a tar archive from an LXC container.
+
+ The process is as follows:
+ * Stop or Freeze the container
+ * Create temporary dir
+ * Copy container and config to temporary directory
+ * If LVM backed:
+ * Create LVM snapshot of LV backing the container
+ * Mount the snapshot to tmpdir/rootfs
+ * Restore the state of the container
+ * Create tar of tmpdir
+ * Clean up
+ """
+
+ # Create a temp dir
+ temp_dir = tempfile.mkdtemp()
+
+ # Set the name of the working dir, temp + container_name
+ work_dir = os.path.join(temp_dir, self.container_name)
+
+ # LXC container rootfs
+ lxc_rootfs = self.container.get_config_item('lxc.rootfs')
+
+ # Test if the containers rootfs is a block device
+ block_backed = lxc_rootfs.startswith(os.path.join(os.sep, 'dev'))
+
+ # Test if the container is using overlayfs
+ overlayfs_backed = lxc_rootfs.startswith('overlayfs')
+
+ mount_point = os.path.join(work_dir, 'rootfs')
+
+ # Set the snapshot name if needed
+ snapshot_name = '%s_lxc_snapshot' % self.container_name
+
+ container_state = self._get_state()
+ try:
+ # Ensure the original container is stopped or frozen
+ if container_state not in ['stopped', 'frozen']:
+ if container_state == 'running':
+ self.container.freeze()
+ else:
+ self.container.stop()
+
+ # Sync the container data from the container_path to work_dir
+ self._rsync_data(lxc_rootfs, temp_dir)
+
+ if block_backed:
+ if snapshot_name not in self._lvm_lv_list():
+ if not os.path.exists(mount_point):
+ os.makedirs(mount_point)
+
+ # Take snapshot
+ size, measurement = self._get_lv_size(
+ lv_name=self.container_name
+ )
+ self._lvm_snapshot_create(
+ source_lv=self.container_name,
+ snapshot_name=snapshot_name,
+ snapshot_size_gb=size
+ )
+
+ # Mount snapshot
+ self._lvm_lv_mount(
+ lv_name=snapshot_name,
+ mount_point=mount_point
+ )
+ else:
+ self.failure(
+ err='snapshot [ %s ] already exists' % snapshot_name,
+ rc=1,
+ msg='The snapshot [ %s ] already exists. Please clean'
+ ' up old snapshot of containers before continuing.'
+ % snapshot_name
+ )
+ elif overlayfs_backed:
+ lowerdir, upperdir = lxc_rootfs.split(':')[1:]
+ self._overlayfs_mount(
+ lowerdir=lowerdir,
+ upperdir=upperdir,
+ mount_point=mount_point
+ )
+
+ # Set the state as changed and set a new fact
+ self.state_change = True
+ return self._create_tar(source_dir=work_dir)
+ finally:
+ if block_backed or overlayfs_backed:
+ # unmount snapshot
+ self._unmount(mount_point)
+
+ if block_backed:
+ # Remove snapshot
+ self._lvm_lv_remove(snapshot_name)
+
+ # Restore original state of container
+ if container_state == 'running':
+ if self._get_state() == 'frozen':
+ self.container.unfreeze()
+ else:
+ self.container.start()
+
+ # Remove tmpdir
+ shutil.rmtree(temp_dir)
+
+ def check_count(self, count, method):
+ if count > 1:
+ self.failure(
+ error='Failed to %s container' % method,
+ rc=1,
+ msg='The container [ %s ] failed to %s. Check to lxc is'
+ ' available and that the container is in a functional'
+ ' state.' % (self.container_name, method)
+ )
+
+ def failure(self, **kwargs):
+ """Return a Failure when running an Ansible command.
+
+ :param error: ``str`` Error that occurred.
+ :param rc: ``int`` Return code while executing an Ansible command.
+ :param msg: ``str`` Message to report.
+ """
+
+ self.module.fail_json(**kwargs)
+
+ def run(self):
+ """Run the main method."""
+
+ action = getattr(self, LXC_ANSIBLE_STATES[self.state])
+ action()
+
+ outcome = self._container_data()
+ if self.archive_info:
+ outcome.update(self.archive_info)
+
+ if self.clone_info:
+ outcome.update(self.clone_info)
+
+ self.module.exit_json(
+ changed=self.state_change,
+ lxc_container=outcome
+ )
+
+
+def main():
+ """Ansible Main module."""
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(
+ type='str',
+ required=True
+ ),
+ template=dict(
+ type='str',
+ default='ubuntu'
+ ),
+ backing_store=dict(
+ type='str',
+ choices=LXC_BACKING_STORE.keys(),
+ default='dir'
+ ),
+ template_options=dict(
+ type='str'
+ ),
+ config=dict(
+ type='path',
+ ),
+ vg_name=dict(
+ type='str',
+ default='lxc'
+ ),
+ thinpool=dict(
+ type='str'
+ ),
+ fs_type=dict(
+ type='str',
+ default='ext4'
+ ),
+ fs_size=dict(
+ type='str',
+ default='5G'
+ ),
+ directory=dict(
+ type='path'
+ ),
+ zfs_root=dict(
+ type='str'
+ ),
+ lv_name=dict(
+ type='str'
+ ),
+ lxc_path=dict(
+ type='path'
+ ),
+ state=dict(
+ choices=LXC_ANSIBLE_STATES.keys(),
+ default='started'
+ ),
+ container_command=dict(
+ type='str'
+ ),
+ container_config=dict(
+ type='list',
+ elements='str'
+ ),
+ container_log=dict(
+ type='bool',
+ default=False
+ ),
+ container_log_level=dict(
+ choices=[n for i in LXC_LOGGING_LEVELS.values() for n in i],
+ default='INFO'
+ ),
+ clone_name=dict(
+ type='str',
+ required=False
+ ),
+ clone_snapshot=dict(
+ type='bool',
+ default='false'
+ ),
+ archive=dict(
+ type='bool',
+ default=False
+ ),
+ archive_path=dict(
+ type='path',
+ ),
+ archive_compression=dict(
+ choices=LXC_COMPRESSION_MAP.keys(),
+ default='gzip'
+ )
+ ),
+ supports_check_mode=False,
+ required_if=([
+ ('archive', True, ['archive_path'])
+ ]),
+ )
+
+ if not HAS_LXC:
+ module.fail_json(
+ msg='The `lxc` module is not importable. Check the requirements.'
+ )
+
+ lv_name = module.params.get('lv_name')
+ if not lv_name:
+ module.params['lv_name'] = module.params.get('name')
+
+ lxc_manage = LxcContainerManagement(module=module)
+ lxc_manage.run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/lxd/lxd_container.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/lxd/lxd_container.py
new file mode 100644
index 00000000..119387f9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/lxd/lxd_container.py
@@ -0,0 +1,710 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Hiroaki Nakamura <hnakamur@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: lxd_container
+short_description: Manage LXD Containers
+description:
+ - Management of LXD containers
+author: "Hiroaki Nakamura (@hnakamur)"
+options:
+ name:
+ description:
+ - Name of a container.
+ type: str
+ required: true
+ architecture:
+ description:
+ - The architecture for the container (e.g. "x86_64" or "i686").
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)
+ type: str
+ required: false
+ config:
+ description:
+ - 'The config for the container (e.g. {"limits.cpu": "2"}).
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)'
+ - If the container already exists and its "config" value in metadata
+ obtained from
+ GET /1.0/containers/<name>
+ U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#10containersname)
+ are different, they this module tries to apply the configurations.
+ - The key starts with 'volatile.' are ignored for this comparison.
+ - Not all config values are supported to apply the existing container.
+ Maybe you need to delete and recreate a container.
+ type: dict
+ required: false
+ profiles:
+ description:
+ - Profile to be used by the container
+ type: list
+ devices:
+ description:
+ - 'The devices for the container
+ (e.g. { "rootfs": { "path": "/dev/kvm", "type": "unix-char" }).
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)'
+ type: dict
+ required: false
+ ephemeral:
+ description:
+ - Whether or not the container is ephemeral (e.g. true or false).
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)
+ required: false
+ type: bool
+ source:
+ description:
+ - 'The source for the container
+ (e.g. { "type": "image",
+ "mode": "pull",
+ "server": "https://images.linuxcontainers.org",
+ "protocol": "lxd",
+ "alias": "ubuntu/xenial/amd64" }).'
+ - 'See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1) for complete API documentation.'
+ - 'Note that C(protocol) accepts two choices: C(lxd) or C(simplestreams)'
+ required: false
+ type: dict
+ state:
+ choices:
+ - started
+ - stopped
+ - restarted
+ - absent
+ - frozen
+ description:
+ - Define the state of a container.
+ required: false
+ default: started
+ type: str
+ target:
+ description:
+ - For cluster deployments. Will attempt to create a container on a target node.
+ If container exists elsewhere in a cluster, then container will not be replaced or moved.
+ The name should respond to same name of the node you see in C(lxc cluster list).
+ type: str
+ required: false
+ version_added: 1.0.0
+ timeout:
+ description:
+ - A timeout for changing the state of the container.
+ - This is also used as a timeout for waiting until IPv4 addresses
+ are set to the all network interfaces in the container after
+ starting or restarting.
+ required: false
+ default: 30
+ type: int
+ wait_for_ipv4_addresses:
+ description:
+ - If this is true, the C(lxd_container) waits until IPv4 addresses
+ are set to the all network interfaces in the container after
+ starting or restarting.
+ required: false
+ default: false
+ type: bool
+ force_stop:
+ description:
+ - If this is true, the C(lxd_container) forces to stop the container
+ when it stops or restarts the container.
+ required: false
+ default: false
+ type: bool
+ url:
+ description:
+ - The unix domain socket path or the https URL for the LXD server.
+ required: false
+ default: unix:/var/lib/lxd/unix.socket
+ type: str
+ snap_url:
+ description:
+ - The unix domain socket path when LXD is installed by snap package manager.
+ required: false
+ default: unix:/var/snap/lxd/common/lxd/unix.socket
+ type: str
+ client_key:
+ description:
+ - The client certificate key file path.
+ - If not specified, it defaults to C(${HOME}/.config/lxc/client.key).
+ required: false
+ aliases: [ key_file ]
+ type: str
+ client_cert:
+ description:
+ - The client certificate file path.
+ - If not specified, it defaults to C(${HOME}/.config/lxc/client.crt).
+ required: false
+ aliases: [ cert_file ]
+ type: str
+ trust_password:
+ description:
+ - The client trusted password.
+ - You need to set this password on the LXD server before
+ running this module using the following command.
+ lxc config set core.trust_password <some random password>
+ See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/)
+ - If trust_password is set, this module send a request for
+ authentication before sending any requests.
+ required: false
+ type: str
+notes:
+ - Containers must have a unique name. If you attempt to create a container
+ with a name that already existed in the users namespace the module will
+ simply return as "unchanged".
+ - There are two ways to run commands in containers, using the command
+ module or using the ansible lxd connection plugin bundled in Ansible >=
+ 2.1, the later requires python to be installed in the container which can
+ be done with the command module.
+ - You can copy a file from the host to the container
+ with the Ansible M(ansible.builtin.copy) and M(ansible.builtin.template) module and the `lxd` connection plugin.
+ See the example below.
+ - You can copy a file in the created container to the localhost
+ with `command=lxc file pull container_name/dir/filename filename`.
+ See the first example below.
+'''
+
+EXAMPLES = '''
+# An example for creating a Ubuntu container and install python
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Create a started container
+ community.general.lxd_container:
+ name: mycontainer
+ state: started
+ source:
+ type: image
+ mode: pull
+ server: https://images.linuxcontainers.org
+ protocol: lxd # if you get a 404, try setting protocol: simplestreams
+ alias: ubuntu/xenial/amd64
+ profiles: ["default"]
+ wait_for_ipv4_addresses: true
+ timeout: 600
+
+ - name: Check python is installed in container
+ delegate_to: mycontainer
+ ansible.builtin.raw: dpkg -s python
+ register: python_install_check
+ failed_when: python_install_check.rc not in [0, 1]
+ changed_when: false
+
+ - name: Install python in container
+ delegate_to: mycontainer
+ ansible.builtin.raw: apt-get install -y python
+ when: python_install_check.rc == 1
+
+# An example for creating an Ubuntu 14.04 container using an image fingerprint.
+# This requires changing 'server' and 'protocol' key values, replacing the
+# 'alias' key with with 'fingerprint' and supplying an appropriate value that
+# matches the container image you wish to use.
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Create a started container
+ community.general.lxd_container:
+ name: mycontainer
+ state: started
+ source:
+ type: image
+ mode: pull
+ # Provides current (and older) Ubuntu images with listed fingerprints
+ server: https://cloud-images.ubuntu.com/releases
+ # Protocol used by 'ubuntu' remote (as shown by 'lxc remote list')
+ protocol: simplestreams
+ # This provides an Ubuntu 14.04 LTS amd64 image from 20150814.
+ fingerprint: e9a8bdfab6dc
+ profiles: ["default"]
+ wait_for_ipv4_addresses: true
+ timeout: 600
+
+# An example for deleting a container
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Delete a container
+ community.general.lxd_container:
+ name: mycontainer
+ state: absent
+
+# An example for restarting a container
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Restart a container
+ community.general.lxd_container:
+ name: mycontainer
+ state: restarted
+
+# An example for restarting a container using https to connect to the LXD server
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Restart a container
+ community.general.lxd_container:
+ url: https://127.0.0.1:8443
+ # These client_cert and client_key values are equal to the default values.
+ #client_cert: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt"
+ #client_key: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key"
+ trust_password: mypassword
+ name: mycontainer
+ state: restarted
+
+# Note your container must be in the inventory for the below example.
+#
+# [containers]
+# mycontainer ansible_connection=lxd
+#
+- hosts:
+ - mycontainer
+ tasks:
+ - name: Copy /etc/hosts in the created container to localhost with name "mycontainer-hosts"
+ ansible.builtin.fetch:
+ src: /etc/hosts
+ dest: /tmp/mycontainer-hosts
+ flat: true
+
+# An example for LXD cluster deployments. This example will create two new container on specific
+# nodes - 'node01' and 'node02'. In 'target:', 'node01' and 'node02' are names of LXD cluster
+# members that LXD cluster recognizes, not ansible inventory names, see: 'lxc cluster list'.
+# LXD API calls can be made to any LXD member, in this example, we send API requests to
+#'node01.example.com', which matches ansible inventory name.
+- hosts: node01.example.com
+ tasks:
+ - name: Create LXD container
+ community.general.lxd_container:
+ name: new-container-1
+ state: started
+ source:
+ type: image
+ mode: pull
+ alias: ubuntu/xenial/amd64
+ target: node01
+
+ - name: Create container on another node
+ community.general.lxd_container:
+ name: new-container-2
+ state: started
+ source:
+ type: image
+ mode: pull
+ alias: ubuntu/xenial/amd64
+ target: node02
+'''
+
+RETURN = '''
+addresses:
+ description: Mapping from the network device name to a list of IPv4 addresses in the container
+ returned: when state is started or restarted
+ type: dict
+ sample: {"eth0": ["10.155.92.191"]}
+old_state:
+ description: The old state of the container
+ returned: when state is started or restarted
+ type: str
+ sample: "stopped"
+logs:
+ description: The logs of requests and responses.
+ returned: when ansible-playbook is invoked with -vvvv.
+ type: list
+ sample: "(too long to be placed here)"
+actions:
+ description: List of actions performed for the container.
+ returned: success
+ type: list
+ sample: '["create", "start"]'
+'''
+import datetime
+import os
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+
+# LXD_ANSIBLE_STATES is a map of states that contain values of methods used
+# when a particular state is evoked.
+LXD_ANSIBLE_STATES = {
+ 'started': '_started',
+ 'stopped': '_stopped',
+ 'restarted': '_restarted',
+ 'absent': '_destroyed',
+ 'frozen': '_frozen'
+}
+
+# ANSIBLE_LXD_STATES is a map of states of lxd containers to the Ansible
+# lxc_container module state parameter value.
+ANSIBLE_LXD_STATES = {
+ 'Running': 'started',
+ 'Stopped': 'stopped',
+ 'Frozen': 'frozen',
+}
+
+# ANSIBLE_LXD_DEFAULT_URL is a default value of the lxd endpoint
+ANSIBLE_LXD_DEFAULT_URL = 'unix:/var/lib/lxd/unix.socket'
+
+# CONFIG_PARAMS is a list of config attribute names.
+CONFIG_PARAMS = [
+ 'architecture', 'config', 'devices', 'ephemeral', 'profiles', 'source'
+]
+
+
+class LXDContainerManagement(object):
+ def __init__(self, module):
+ """Management of LXC containers via Ansible.
+
+ :param module: Processed Ansible Module.
+ :type module: ``object``
+ """
+ self.module = module
+ self.name = self.module.params['name']
+ self._build_config()
+
+ self.state = self.module.params['state']
+
+ self.timeout = self.module.params['timeout']
+ self.wait_for_ipv4_addresses = self.module.params['wait_for_ipv4_addresses']
+ self.force_stop = self.module.params['force_stop']
+ self.addresses = None
+ self.target = self.module.params['target']
+
+ self.key_file = self.module.params.get('client_key')
+ if self.key_file is None:
+ self.key_file = '{0}/.config/lxc/client.key'.format(os.environ['HOME'])
+ self.cert_file = self.module.params.get('client_cert')
+ if self.cert_file is None:
+ self.cert_file = '{0}/.config/lxc/client.crt'.format(os.environ['HOME'])
+ self.debug = self.module._verbosity >= 4
+
+ try:
+ if self.module.params['url'] != ANSIBLE_LXD_DEFAULT_URL:
+ self.url = self.module.params['url']
+ elif os.path.exists(self.module.params['snap_url'].replace('unix:', '')):
+ self.url = self.module.params['snap_url']
+ else:
+ self.url = self.module.params['url']
+ except Exception as e:
+ self.module.fail_json(msg=e.msg)
+
+ try:
+ self.client = LXDClient(
+ self.url, key_file=self.key_file, cert_file=self.cert_file,
+ debug=self.debug
+ )
+ except LXDClientException as e:
+ self.module.fail_json(msg=e.msg)
+ self.trust_password = self.module.params.get('trust_password', None)
+ self.actions = []
+
+ def _build_config(self):
+ self.config = {}
+ for attr in CONFIG_PARAMS:
+ param_val = self.module.params.get(attr, None)
+ if param_val is not None:
+ self.config[attr] = param_val
+
+ def _get_container_json(self):
+ return self.client.do(
+ 'GET', '/1.0/containers/{0}'.format(self.name),
+ ok_error_codes=[404]
+ )
+
+ def _get_container_state_json(self):
+ return self.client.do(
+ 'GET', '/1.0/containers/{0}/state'.format(self.name),
+ ok_error_codes=[404]
+ )
+
+ @staticmethod
+ def _container_json_to_module_state(resp_json):
+ if resp_json['type'] == 'error':
+ return 'absent'
+ return ANSIBLE_LXD_STATES[resp_json['metadata']['status']]
+
+ def _change_state(self, action, force_stop=False):
+ body_json = {'action': action, 'timeout': self.timeout}
+ if force_stop:
+ body_json['force'] = True
+ return self.client.do('PUT', '/1.0/containers/{0}/state'.format(self.name), body_json=body_json)
+
+ def _create_container(self):
+ config = self.config.copy()
+ config['name'] = self.name
+ if self.target:
+ self.client.do('POST', '/1.0/containers?' + urlencode(dict(target=self.target)), config)
+ else:
+ self.client.do('POST', '/1.0/containers', config)
+ self.actions.append('create')
+
+ def _start_container(self):
+ self._change_state('start')
+ self.actions.append('start')
+
+ def _stop_container(self):
+ self._change_state('stop', self.force_stop)
+ self.actions.append('stop')
+
+ def _restart_container(self):
+ self._change_state('restart', self.force_stop)
+ self.actions.append('restart')
+
+ def _delete_container(self):
+ self.client.do('DELETE', '/1.0/containers/{0}'.format(self.name))
+ self.actions.append('delete')
+
+ def _freeze_container(self):
+ self._change_state('freeze')
+ self.actions.append('freeze')
+
+ def _unfreeze_container(self):
+ self._change_state('unfreeze')
+ self.actions.append('unfreez')
+
+ def _container_ipv4_addresses(self, ignore_devices=None):
+ ignore_devices = ['lo'] if ignore_devices is None else ignore_devices
+
+ resp_json = self._get_container_state_json()
+ network = resp_json['metadata']['network'] or {}
+ network = dict((k, v) for k, v in network.items() if k not in ignore_devices) or {}
+ addresses = dict((k, [a['address'] for a in v['addresses'] if a['family'] == 'inet']) for k, v in network.items()) or {}
+ return addresses
+
+ @staticmethod
+ def _has_all_ipv4_addresses(addresses):
+ return len(addresses) > 0 and all(len(v) > 0 for v in addresses.values())
+
+ def _get_addresses(self):
+ try:
+ due = datetime.datetime.now() + datetime.timedelta(seconds=self.timeout)
+ while datetime.datetime.now() < due:
+ time.sleep(1)
+ addresses = self._container_ipv4_addresses()
+ if self._has_all_ipv4_addresses(addresses):
+ self.addresses = addresses
+ return
+ except LXDClientException as e:
+ e.msg = 'timeout for getting IPv4 addresses'
+ raise
+
+ def _started(self):
+ if self.old_state == 'absent':
+ self._create_container()
+ self._start_container()
+ else:
+ if self.old_state == 'frozen':
+ self._unfreeze_container()
+ elif self.old_state == 'stopped':
+ self._start_container()
+ if self._needs_to_apply_container_configs():
+ self._apply_container_configs()
+ if self.wait_for_ipv4_addresses:
+ self._get_addresses()
+
+ def _stopped(self):
+ if self.old_state == 'absent':
+ self._create_container()
+ else:
+ if self.old_state == 'stopped':
+ if self._needs_to_apply_container_configs():
+ self._start_container()
+ self._apply_container_configs()
+ self._stop_container()
+ else:
+ if self.old_state == 'frozen':
+ self._unfreeze_container()
+ if self._needs_to_apply_container_configs():
+ self._apply_container_configs()
+ self._stop_container()
+
+ def _restarted(self):
+ if self.old_state == 'absent':
+ self._create_container()
+ self._start_container()
+ else:
+ if self.old_state == 'frozen':
+ self._unfreeze_container()
+ if self._needs_to_apply_container_configs():
+ self._apply_container_configs()
+ self._restart_container()
+ if self.wait_for_ipv4_addresses:
+ self._get_addresses()
+
+ def _destroyed(self):
+ if self.old_state != 'absent':
+ if self.old_state == 'frozen':
+ self._unfreeze_container()
+ if self.old_state != 'stopped':
+ self._stop_container()
+ self._delete_container()
+
+ def _frozen(self):
+ if self.old_state == 'absent':
+ self._create_container()
+ self._start_container()
+ self._freeze_container()
+ else:
+ if self.old_state == 'stopped':
+ self._start_container()
+ if self._needs_to_apply_container_configs():
+ self._apply_container_configs()
+ self._freeze_container()
+
+ def _needs_to_change_container_config(self, key):
+ if key not in self.config:
+ return False
+ if key == 'config':
+ old_configs = dict((k, v) for k, v in self.old_container_json['metadata'][key].items() if not k.startswith('volatile.'))
+ for k, v in self.config['config'].items():
+ if k not in old_configs:
+ return True
+ if old_configs[k] != v:
+ return True
+ return False
+ else:
+ old_configs = self.old_container_json['metadata'][key]
+ return self.config[key] != old_configs
+
+ def _needs_to_apply_container_configs(self):
+ return (
+ self._needs_to_change_container_config('architecture') or
+ self._needs_to_change_container_config('config') or
+ self._needs_to_change_container_config('ephemeral') or
+ self._needs_to_change_container_config('devices') or
+ self._needs_to_change_container_config('profiles')
+ )
+
+ def _apply_container_configs(self):
+ old_metadata = self.old_container_json['metadata']
+ body_json = {
+ 'architecture': old_metadata['architecture'],
+ 'config': old_metadata['config'],
+ 'devices': old_metadata['devices'],
+ 'profiles': old_metadata['profiles']
+ }
+ if self._needs_to_change_container_config('architecture'):
+ body_json['architecture'] = self.config['architecture']
+ if self._needs_to_change_container_config('config'):
+ for k, v in self.config['config'].items():
+ body_json['config'][k] = v
+ if self._needs_to_change_container_config('ephemeral'):
+ body_json['ephemeral'] = self.config['ephemeral']
+ if self._needs_to_change_container_config('devices'):
+ body_json['devices'] = self.config['devices']
+ if self._needs_to_change_container_config('profiles'):
+ body_json['profiles'] = self.config['profiles']
+ self.client.do('PUT', '/1.0/containers/{0}'.format(self.name), body_json=body_json)
+ self.actions.append('apply_container_configs')
+
+ def run(self):
+ """Run the main method."""
+
+ try:
+ if self.trust_password is not None:
+ self.client.authenticate(self.trust_password)
+
+ self.old_container_json = self._get_container_json()
+ self.old_state = self._container_json_to_module_state(self.old_container_json)
+ action = getattr(self, LXD_ANSIBLE_STATES[self.state])
+ action()
+
+ state_changed = len(self.actions) > 0
+ result_json = {
+ 'log_verbosity': self.module._verbosity,
+ 'changed': state_changed,
+ 'old_state': self.old_state,
+ 'actions': self.actions
+ }
+ if self.client.debug:
+ result_json['logs'] = self.client.logs
+ if self.addresses is not None:
+ result_json['addresses'] = self.addresses
+ self.module.exit_json(**result_json)
+ except LXDClientException as e:
+ state_changed = len(self.actions) > 0
+ fail_params = {
+ 'msg': e.msg,
+ 'changed': state_changed,
+ 'actions': self.actions
+ }
+ if self.client.debug:
+ fail_params['logs'] = e.kwargs['logs']
+ self.module.fail_json(**fail_params)
+
+
+def main():
+ """Ansible Main module."""
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(
+ type='str',
+ required=True
+ ),
+ architecture=dict(
+ type='str',
+ ),
+ config=dict(
+ type='dict',
+ ),
+ devices=dict(
+ type='dict',
+ ),
+ ephemeral=dict(
+ type='bool',
+ ),
+ profiles=dict(
+ type='list',
+ ),
+ source=dict(
+ type='dict',
+ ),
+ state=dict(
+ choices=LXD_ANSIBLE_STATES.keys(),
+ default='started'
+ ),
+ target=dict(
+ type='str',
+ ),
+ timeout=dict(
+ type='int',
+ default=30
+ ),
+ wait_for_ipv4_addresses=dict(
+ type='bool',
+ default=False
+ ),
+ force_stop=dict(
+ type='bool',
+ default=False
+ ),
+ url=dict(
+ type='str',
+ default=ANSIBLE_LXD_DEFAULT_URL
+ ),
+ snap_url=dict(
+ type='str',
+ default='unix:/var/snap/lxd/common/lxd/unix.socket'
+ ),
+ client_key=dict(
+ type='str',
+ aliases=['key_file']
+ ),
+ client_cert=dict(
+ type='str',
+ aliases=['cert_file']
+ ),
+ trust_password=dict(type='str', no_log=True)
+ ),
+ supports_check_mode=False,
+ )
+
+ lxd_manage = LXDContainerManagement(module=module)
+ lxd_manage.run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/lxd/lxd_profile.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/lxd/lxd_profile.py
new file mode 100644
index 00000000..ccd74d42
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/lxd/lxd_profile.py
@@ -0,0 +1,404 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Hiroaki Nakamura <hnakamur@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: lxd_profile
+short_description: Manage LXD profiles
+description:
+ - Management of LXD profiles
+author: "Hiroaki Nakamura (@hnakamur)"
+options:
+ name:
+ description:
+ - Name of a profile.
+ required: true
+ type: str
+ description:
+ description:
+ - Description of the profile.
+ type: str
+ config:
+ description:
+ - 'The config for the container (e.g. {"limits.memory": "4GB"}).
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#patch-3)'
+ - If the profile already exists and its "config" value in metadata
+ obtained from
+ GET /1.0/profiles/<name>
+ U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#get-19)
+ are different, they this module tries to apply the configurations.
+ - Not all config values are supported to apply the existing profile.
+ Maybe you need to delete and recreate a profile.
+ required: false
+ type: dict
+ devices:
+ description:
+ - 'The devices for the profile
+ (e.g. {"rootfs": {"path": "/dev/kvm", "type": "unix-char"}).
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#patch-3)'
+ required: false
+ type: dict
+ new_name:
+ description:
+ - A new name of a profile.
+ - If this parameter is specified a profile will be renamed to this name.
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-11)
+ required: false
+ type: str
+ state:
+ choices:
+ - present
+ - absent
+ description:
+ - Define the state of a profile.
+ required: false
+ default: present
+ type: str
+ url:
+ description:
+ - The unix domain socket path or the https URL for the LXD server.
+ required: false
+ default: unix:/var/lib/lxd/unix.socket
+ type: str
+ snap_url:
+ description:
+ - The unix domain socket path when LXD is installed by snap package manager.
+ required: false
+ default: unix:/var/snap/lxd/common/lxd/unix.socket
+ type: str
+ client_key:
+ description:
+ - The client certificate key file path.
+ - If not specified, it defaults to C($HOME/.config/lxc/client.key).
+ required: false
+ aliases: [ key_file ]
+ type: str
+ client_cert:
+ description:
+ - The client certificate file path.
+ - If not specified, it defaults to C($HOME/.config/lxc/client.crt).
+ required: false
+ aliases: [ cert_file ]
+ type: str
+ trust_password:
+ description:
+ - The client trusted password.
+ - You need to set this password on the LXD server before
+ running this module using the following command.
+ lxc config set core.trust_password <some random password>
+ See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/)
+ - If trust_password is set, this module send a request for
+ authentication before sending any requests.
+ required: false
+ type: str
+notes:
+ - Profiles must have a unique name. If you attempt to create a profile
+ with a name that already existed in the users namespace the module will
+ simply return as "unchanged".
+'''
+
+EXAMPLES = '''
+# An example for creating a profile
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Create a profile
+ community.general.lxd_profile:
+ name: macvlan
+ state: present
+ config: {}
+ description: my macvlan profile
+ devices:
+ eth0:
+ nictype: macvlan
+ parent: br0
+ type: nic
+
+# An example for creating a profile via http connection
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Create macvlan profile
+ community.general.lxd_profile:
+ url: https://127.0.0.1:8443
+ # These client_cert and client_key values are equal to the default values.
+ #client_cert: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt"
+ #client_key: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key"
+ trust_password: mypassword
+ name: macvlan
+ state: present
+ config: {}
+ description: my macvlan profile
+ devices:
+ eth0:
+ nictype: macvlan
+ parent: br0
+ type: nic
+
+# An example for deleting a profile
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Delete a profile
+ community.general.lxd_profile:
+ name: macvlan
+ state: absent
+
+# An example for renaming a profile
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Rename a profile
+ community.general.lxd_profile:
+ name: macvlan
+ new_name: macvlan2
+ state: present
+'''
+
+RETURN = '''
+old_state:
+ description: The old state of the profile
+ returned: success
+ type: str
+ sample: "absent"
+logs:
+ description: The logs of requests and responses.
+ returned: when ansible-playbook is invoked with -vvvv.
+ type: list
+ sample: "(too long to be placed here)"
+actions:
+ description: List of actions performed for the profile.
+ returned: success
+ type: list
+ sample: '["create"]'
+'''
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException
+
+# ANSIBLE_LXD_DEFAULT_URL is a default value of the lxd endpoint
+ANSIBLE_LXD_DEFAULT_URL = 'unix:/var/lib/lxd/unix.socket'
+
+# PROFILE_STATES is a list for states supported
+PROFILES_STATES = [
+ 'present', 'absent'
+]
+
+# CONFIG_PARAMS is a list of config attribute names.
+CONFIG_PARAMS = [
+ 'config', 'description', 'devices'
+]
+
+
+class LXDProfileManagement(object):
+ def __init__(self, module):
+ """Management of LXC containers via Ansible.
+
+ :param module: Processed Ansible Module.
+ :type module: ``object``
+ """
+ self.module = module
+ self.name = self.module.params['name']
+ self._build_config()
+ self.state = self.module.params['state']
+ self.new_name = self.module.params.get('new_name', None)
+
+ self.key_file = self.module.params.get('client_key')
+ if self.key_file is None:
+ self.key_file = '{0}/.config/lxc/client.key'.format(os.environ['HOME'])
+ self.cert_file = self.module.params.get('client_cert')
+ if self.cert_file is None:
+ self.cert_file = '{0}/.config/lxc/client.crt'.format(os.environ['HOME'])
+ self.debug = self.module._verbosity >= 4
+
+ try:
+ if self.module.params['url'] != ANSIBLE_LXD_DEFAULT_URL:
+ self.url = self.module.params['url']
+ elif os.path.exists(self.module.params['snap_url'].replace('unix:', '')):
+ self.url = self.module.params['snap_url']
+ else:
+ self.url = self.module.params['url']
+ except Exception as e:
+ self.module.fail_json(msg=e.msg)
+
+ try:
+ self.client = LXDClient(
+ self.url, key_file=self.key_file, cert_file=self.cert_file,
+ debug=self.debug
+ )
+ except LXDClientException as e:
+ self.module.fail_json(msg=e.msg)
+ self.trust_password = self.module.params.get('trust_password', None)
+ self.actions = []
+
+ def _build_config(self):
+ self.config = {}
+ for attr in CONFIG_PARAMS:
+ param_val = self.module.params.get(attr, None)
+ if param_val is not None:
+ self.config[attr] = param_val
+
+ def _get_profile_json(self):
+ return self.client.do(
+ 'GET', '/1.0/profiles/{0}'.format(self.name),
+ ok_error_codes=[404]
+ )
+
+ @staticmethod
+ def _profile_json_to_module_state(resp_json):
+ if resp_json['type'] == 'error':
+ return 'absent'
+ return 'present'
+
+ def _update_profile(self):
+ if self.state == 'present':
+ if self.old_state == 'absent':
+ if self.new_name is None:
+ self._create_profile()
+ else:
+ self.module.fail_json(
+ msg='new_name must not be set when the profile does not exist and the specified state is present',
+ changed=False)
+ else:
+ if self.new_name is not None and self.new_name != self.name:
+ self._rename_profile()
+ if self._needs_to_apply_profile_configs():
+ self._apply_profile_configs()
+ elif self.state == 'absent':
+ if self.old_state == 'present':
+ if self.new_name is None:
+ self._delete_profile()
+ else:
+ self.module.fail_json(
+ msg='new_name must not be set when the profile exists and the specified state is absent',
+ changed=False)
+
+ def _create_profile(self):
+ config = self.config.copy()
+ config['name'] = self.name
+ self.client.do('POST', '/1.0/profiles', config)
+ self.actions.append('create')
+
+ def _rename_profile(self):
+ config = {'name': self.new_name}
+ self.client.do('POST', '/1.0/profiles/{0}'.format(self.name), config)
+ self.actions.append('rename')
+ self.name = self.new_name
+
+ def _needs_to_change_profile_config(self, key):
+ if key not in self.config:
+ return False
+ old_configs = self.old_profile_json['metadata'].get(key, None)
+ return self.config[key] != old_configs
+
+ def _needs_to_apply_profile_configs(self):
+ return (
+ self._needs_to_change_profile_config('config') or
+ self._needs_to_change_profile_config('description') or
+ self._needs_to_change_profile_config('devices')
+ )
+
+ def _apply_profile_configs(self):
+ config = self.old_profile_json.copy()
+ for k, v in self.config.items():
+ config[k] = v
+ self.client.do('PUT', '/1.0/profiles/{0}'.format(self.name), config)
+ self.actions.append('apply_profile_configs')
+
+ def _delete_profile(self):
+ self.client.do('DELETE', '/1.0/profiles/{0}'.format(self.name))
+ self.actions.append('delete')
+
+ def run(self):
+ """Run the main method."""
+
+ try:
+ if self.trust_password is not None:
+ self.client.authenticate(self.trust_password)
+
+ self.old_profile_json = self._get_profile_json()
+ self.old_state = self._profile_json_to_module_state(self.old_profile_json)
+ self._update_profile()
+
+ state_changed = len(self.actions) > 0
+ result_json = {
+ 'changed': state_changed,
+ 'old_state': self.old_state,
+ 'actions': self.actions
+ }
+ if self.client.debug:
+ result_json['logs'] = self.client.logs
+ self.module.exit_json(**result_json)
+ except LXDClientException as e:
+ state_changed = len(self.actions) > 0
+ fail_params = {
+ 'msg': e.msg,
+ 'changed': state_changed,
+ 'actions': self.actions
+ }
+ if self.client.debug:
+ fail_params['logs'] = e.kwargs['logs']
+ self.module.fail_json(**fail_params)
+
+
+def main():
+ """Ansible Main module."""
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(
+ type='str',
+ required=True
+ ),
+ new_name=dict(
+ type='str',
+ ),
+ config=dict(
+ type='dict',
+ ),
+ description=dict(
+ type='str',
+ ),
+ devices=dict(
+ type='dict',
+ ),
+ state=dict(
+ choices=PROFILES_STATES,
+ default='present'
+ ),
+ url=dict(
+ type='str',
+ default=ANSIBLE_LXD_DEFAULT_URL
+ ),
+ snap_url=dict(
+ type='str',
+ default='unix:/var/snap/lxd/common/lxd/unix.socket'
+ ),
+ client_key=dict(
+ type='str',
+ aliases=['key_file']
+ ),
+ client_cert=dict(
+ type='str',
+ aliases=['cert_file']
+ ),
+ trust_password=dict(type='str', no_log=True)
+ ),
+ supports_check_mode=False,
+ )
+
+ lxd_manage = LXDProfileManagement(module=module)
+ lxd_manage.run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_dns_reload.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_dns_reload.py
new file mode 100644
index 00000000..6eefe133
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_dns_reload.py
@@ -0,0 +1,183 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Simon Weald <ansible@simonweald.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: memset_dns_reload
+author: "Simon Weald (@glitchcrab)"
+short_description: Request reload of Memset's DNS infrastructure,
+notes:
+ - DNS reload requests are a best-effort service provided by Memset; these generally
+ happen every 15 minutes by default, however you can request an immediate reload if
+ later tasks rely on the records being created. An API key generated via the
+ Memset customer control panel is required with the following minimum scope -
+ I(dns.reload). If you wish to poll the job status to wait until the reload has
+ completed, then I(job.status) is also required.
+description:
+ - Request a reload of Memset's DNS infrastructure, and optionally poll until it finishes.
+options:
+ api_key:
+ required: true
+ type: str
+ description:
+ - The API key obtained from the Memset control panel.
+ poll:
+ default: false
+ type: bool
+ description:
+ - Boolean value, if set will poll the reload job's status and return
+ when the job has completed (unless the 30 second timeout is reached first).
+ If the timeout is reached then the task will not be marked as failed, but
+ stderr will indicate that the polling failed.
+'''
+
+EXAMPLES = '''
+- name: Submit DNS reload and poll
+ community.general.memset_dns_reload:
+ api_key: 5eb86c9196ab03919abcf03857163741
+ poll: True
+ delegate_to: localhost
+'''
+
+RETURN = '''
+---
+memset_api:
+ description: Raw response from the Memset API.
+ returned: always
+ type: complex
+ contains:
+ error:
+ description: Whether the job ended in error state.
+ returned: always
+ type: bool
+ sample: true
+ finished:
+ description: Whether the job completed before the result was returned.
+ returned: always
+ type: bool
+ sample: true
+ id:
+ description: Job ID.
+ returned: always
+ type: str
+ sample: "c9cc8ad2a3e3fb8c63ed83c424928ef8"
+ status:
+ description: Job status.
+ returned: always
+ type: str
+ sample: "DONE"
+ type:
+ description: Job type.
+ returned: always
+ type: str
+ sample: "dns"
+'''
+
+from time import sleep
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
+
+
+def poll_reload_status(api_key=None, job_id=None, payload=None):
+ '''
+ We poll the `job.status` endpoint every 5 seconds up to a
+ maximum of 6 times. This is a relatively arbitrary choice of
+ timeout, however requests rarely take longer than 15 seconds
+ to complete.
+ '''
+ memset_api, stderr, msg = None, None, None
+ payload['id'] = job_id
+
+ api_method = 'job.status'
+ _has_failed, _msg, response = memset_api_call(api_key=api_key, api_method=api_method, payload=payload)
+
+ while not response.json()['finished']:
+ counter = 0
+ while counter < 6:
+ sleep(5)
+ _has_failed, msg, response = memset_api_call(api_key=api_key, api_method=api_method, payload=payload)
+ counter += 1
+ if response.json()['error']:
+ # the reload job was submitted but polling failed. Don't return this as an overall task failure.
+ stderr = "Reload submitted successfully, but the Memset API returned a job error when attempting to poll the reload status."
+ else:
+ memset_api = response.json()
+ msg = None
+
+ return(memset_api, msg, stderr)
+
+
+def reload_dns(args=None):
+ '''
+ DNS reloads are a single API call and therefore there's not much
+ which can go wrong outside of auth errors.
+ '''
+ retvals, payload = dict(), dict()
+ has_changed, has_failed = False, False
+ memset_api, msg, stderr = None, None, None
+
+ api_method = 'dns.reload'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+
+ if has_failed:
+ # this is the first time the API is called; incorrect credentials will
+ # manifest themselves at this point so we need to ensure the user is
+ # informed of the reason.
+ retvals['failed'] = has_failed
+ retvals['memset_api'] = response.json()
+ retvals['msg'] = msg
+ return(retvals)
+
+ # set changed to true if the reload request was accepted.
+ has_changed = True
+ memset_api = msg
+ # empty msg var as we don't want to return the API's json response twice.
+ msg = None
+
+ if args['poll']:
+ # hand off to the poll function.
+ job_id = response.json()['id']
+ memset_api, msg, stderr = poll_reload_status(api_key=args['api_key'], job_id=job_id, payload=payload)
+
+ # assemble return variables.
+ retvals['failed'] = has_failed
+ retvals['changed'] = has_changed
+ for val in ['msg', 'stderr', 'memset_api']:
+ if val is not None:
+ retvals[val] = eval(val)
+
+ return(retvals)
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(required=True, type='str', no_log=True),
+ poll=dict(required=False, default=False, type='bool')
+ ),
+ supports_check_mode=False
+ )
+
+ # populate the dict with the user-provided vars.
+ args = dict()
+ for key, arg in module.params.items():
+ args[key] = arg
+
+ retvals = reload_dns(args)
+
+ if retvals['failed']:
+ module.fail_json(**retvals)
+ else:
+ module.exit_json(**retvals)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_memstore_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_memstore_facts.py
new file mode 100644
index 00000000..5eea6ab1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_memstore_facts.py
@@ -0,0 +1,172 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Simon Weald <ansible@simonweald.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: memset_memstore_info
+author: "Simon Weald (@glitchcrab)"
+short_description: Retrieve Memstore product usage information.
+notes:
+ - An API key generated via the Memset customer control panel is needed with the
+ following minimum scope - I(memstore.usage).
+description:
+ - Retrieve Memstore product usage information.
+ - This module was called C(memset_memstore_facts) before Ansible 2.9. The usage did not change.
+options:
+ api_key:
+ required: true
+ type: str
+ description:
+ - The API key obtained from the Memset control panel.
+ name:
+ required: true
+ type: str
+ description:
+ - The Memstore product name (i.e. C(mstestyaa1)).
+'''
+
+EXAMPLES = '''
+- name: Get usage for mstestyaa1
+ community.general.memset_memstore_info:
+ name: mstestyaa1
+ api_key: 5eb86c9896ab03919abcf03857163741
+ delegate_to: localhost
+'''
+
+RETURN = '''
+---
+memset_api:
+ description: Info from the Memset API
+ returned: always
+ type: complex
+ contains:
+ cdn_bandwidth:
+ description: Dictionary of CDN bandwidth facts
+ returned: always
+ type: complex
+ contains:
+ bytes_out:
+ description: Outbound CDN bandwidth for the last 24 hours in bytes
+ returned: always
+ type: int
+ sample: 1000
+ requests:
+ description: Number of requests in the last 24 hours
+ returned: always
+ type: int
+ sample: 10
+ bytes_in:
+ description: Inbound CDN bandwidth for the last 24 hours in bytes
+ returned: always
+ type: int
+ sample: 1000
+ containers:
+ description: Number of containers
+ returned: always
+ type: int
+ sample: 10
+ bytes:
+ description: Space used in bytes
+ returned: always
+ type: int
+ sample: 3860997965
+ objs:
+ description: Number of objects
+ returned: always
+ type: int
+ sample: 1000
+ bandwidth:
+ description: Dictionary of CDN bandwidth facts
+ returned: always
+ type: complex
+ contains:
+ bytes_out:
+ description: Outbound bandwidth for the last 24 hours in bytes
+ returned: always
+ type: int
+ sample: 1000
+ requests:
+ description: Number of requests in the last 24 hours
+ returned: always
+ type: int
+ sample: 10
+ bytes_in:
+ description: Inbound bandwidth for the last 24 hours in bytes
+ returned: always
+ type: int
+ sample: 1000
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
+
+
+def get_facts(args=None):
+ '''
+ Performs a simple API call and returns a JSON blob.
+ '''
+ retvals, payload = dict(), dict()
+ has_changed, has_failed = False, False
+ msg, stderr, memset_api = None, None, None
+
+ payload['name'] = args['name']
+
+ api_method = 'memstore.usage'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+
+ if has_failed:
+ # this is the first time the API is called; incorrect credentials will
+ # manifest themselves at this point so we need to ensure the user is
+ # informed of the reason.
+ retvals['failed'] = has_failed
+ retvals['msg'] = msg
+ retvals['stderr'] = "API returned an error: {0}" . format(response.status_code)
+ return(retvals)
+
+ # we don't want to return the same thing twice
+ msg = None
+ memset_api = response.json()
+
+ retvals['changed'] = has_changed
+ retvals['failed'] = has_failed
+ for val in ['msg', 'memset_api']:
+ if val is not None:
+ retvals[val] = eval(val)
+
+ return(retvals)
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(required=True, type='str', no_log=True),
+ name=dict(required=True, type='str')
+ ),
+ supports_check_mode=False
+ )
+ if module._name in ('memset_memstore_facts', 'community.general.memset_memstore_facts'):
+ module.deprecate("The 'memset_memstore_facts' module has been renamed to 'memset_memstore_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ # populate the dict with the user-provided vars.
+ args = dict()
+ for key, arg in module.params.items():
+ args[key] = arg
+
+ retvals = get_facts(args)
+
+ if retvals['failed']:
+ module.fail_json(**retvals)
+ else:
+ module.exit_json(**retvals)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_memstore_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_memstore_info.py
new file mode 100644
index 00000000..5eea6ab1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_memstore_info.py
@@ -0,0 +1,172 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Simon Weald <ansible@simonweald.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: memset_memstore_info
+author: "Simon Weald (@glitchcrab)"
+short_description: Retrieve Memstore product usage information.
+notes:
+ - An API key generated via the Memset customer control panel is needed with the
+ following minimum scope - I(memstore.usage).
+description:
+ - Retrieve Memstore product usage information.
+ - This module was called C(memset_memstore_facts) before Ansible 2.9. The usage did not change.
+options:
+ api_key:
+ required: true
+ type: str
+ description:
+ - The API key obtained from the Memset control panel.
+ name:
+ required: true
+ type: str
+ description:
+ - The Memstore product name (i.e. C(mstestyaa1)).
+'''
+
+EXAMPLES = '''
+- name: Get usage for mstestyaa1
+ community.general.memset_memstore_info:
+ name: mstestyaa1
+ api_key: 5eb86c9896ab03919abcf03857163741
+ delegate_to: localhost
+'''
+
+RETURN = '''
+---
+memset_api:
+ description: Info from the Memset API
+ returned: always
+ type: complex
+ contains:
+ cdn_bandwidth:
+ description: Dictionary of CDN bandwidth facts
+ returned: always
+ type: complex
+ contains:
+ bytes_out:
+ description: Outbound CDN bandwidth for the last 24 hours in bytes
+ returned: always
+ type: int
+ sample: 1000
+ requests:
+ description: Number of requests in the last 24 hours
+ returned: always
+ type: int
+ sample: 10
+ bytes_in:
+ description: Inbound CDN bandwidth for the last 24 hours in bytes
+ returned: always
+ type: int
+ sample: 1000
+ containers:
+ description: Number of containers
+ returned: always
+ type: int
+ sample: 10
+ bytes:
+ description: Space used in bytes
+ returned: always
+ type: int
+ sample: 3860997965
+ objs:
+ description: Number of objects
+ returned: always
+ type: int
+ sample: 1000
+ bandwidth:
+ description: Dictionary of CDN bandwidth facts
+ returned: always
+ type: complex
+ contains:
+ bytes_out:
+ description: Outbound bandwidth for the last 24 hours in bytes
+ returned: always
+ type: int
+ sample: 1000
+ requests:
+ description: Number of requests in the last 24 hours
+ returned: always
+ type: int
+ sample: 10
+ bytes_in:
+ description: Inbound bandwidth for the last 24 hours in bytes
+ returned: always
+ type: int
+ sample: 1000
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
+
+
+def get_facts(args=None):
+ '''
+ Performs a simple API call and returns a JSON blob.
+ '''
+ retvals, payload = dict(), dict()
+ has_changed, has_failed = False, False
+ msg, stderr, memset_api = None, None, None
+
+ payload['name'] = args['name']
+
+ api_method = 'memstore.usage'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+
+ if has_failed:
+ # this is the first time the API is called; incorrect credentials will
+ # manifest themselves at this point so we need to ensure the user is
+ # informed of the reason.
+ retvals['failed'] = has_failed
+ retvals['msg'] = msg
+ retvals['stderr'] = "API returned an error: {0}" . format(response.status_code)
+ return(retvals)
+
+ # we don't want to return the same thing twice
+ msg = None
+ memset_api = response.json()
+
+ retvals['changed'] = has_changed
+ retvals['failed'] = has_failed
+ for val in ['msg', 'memset_api']:
+ if val is not None:
+ retvals[val] = eval(val)
+
+ return(retvals)
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(required=True, type='str', no_log=True),
+ name=dict(required=True, type='str')
+ ),
+ supports_check_mode=False
+ )
+ if module._name in ('memset_memstore_facts', 'community.general.memset_memstore_facts'):
+ module.deprecate("The 'memset_memstore_facts' module has been renamed to 'memset_memstore_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ # populate the dict with the user-provided vars.
+ args = dict()
+ for key, arg in module.params.items():
+ args[key] = arg
+
+ retvals = get_facts(args)
+
+ if retvals['failed']:
+ module.fail_json(**retvals)
+ else:
+ module.exit_json(**retvals)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_server_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_server_facts.py
new file mode 100644
index 00000000..d8943c14
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_server_facts.py
@@ -0,0 +1,297 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Simon Weald <ansible@simonweald.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: memset_server_info
+author: "Simon Weald (@glitchcrab)"
+short_description: Retrieve server information.
+notes:
+ - An API key generated via the Memset customer control panel is needed with the
+ following minimum scope - I(server.info).
+description:
+ - Retrieve server information.
+ - This module was called C(memset_server_facts) before Ansible 2.9. The usage did not change.
+options:
+ api_key:
+ required: true
+ type: str
+ description:
+ - The API key obtained from the Memset control panel.
+ name:
+ required: true
+ type: str
+ description:
+ - The server product name (i.e. C(testyaa1)).
+'''
+
+EXAMPLES = '''
+- name: Get details for testyaa1
+ community.general.memset_server_info:
+ name: testyaa1
+ api_key: 5eb86c9896ab03919abcf03857163741
+ delegate_to: localhost
+'''
+
+RETURN = '''
+---
+memset_api:
+ description: Info from the Memset API
+ returned: always
+ type: complex
+ contains:
+ backups:
+ description: Whether this server has a backup service.
+ returned: always
+ type: bool
+ sample: true
+ control_panel:
+ description: Whether the server has a control panel (i.e. cPanel).
+ returned: always
+ type: str
+ sample: 'cpanel'
+ data_zone:
+ description: The data zone the server is in.
+ returned: always
+ type: str
+ sample: 'Memset Public Cloud'
+ expiry_date:
+ description: Current expiry date of the server.
+ returned: always
+ type: str
+ sample: '2018-08-10'
+ firewall_rule_group:
+ description: Details about the firewall group this server is in.
+ returned: always
+ type: dict
+ sample: {
+ "default_outbound_policy": "RETURN",
+ "name": "testyaa-fw1",
+ "nickname": "testyaa cPanel rules",
+ "notes": "",
+ "public": false,
+ "rules": {
+ "51d7db54d39c3544ef7c48baa0b9944f": {
+ "action": "ACCEPT",
+ "comment": "",
+ "dest_ip6s": "any",
+ "dest_ips": "any",
+ "dest_ports": "any",
+ "direction": "Inbound",
+ "ip_version": "any",
+ "ordering": 2,
+ "protocols": "icmp",
+ "rule_group_name": "testyaa-fw1",
+ "rule_id": "51d7db54d39c3544ef7c48baa0b9944f",
+ "source_ip6s": "any",
+ "source_ips": "any",
+ "source_ports": "any"
+ }
+ }
+ }
+ firewall_type:
+ description: The type of firewall the server has (i.e. self-managed, managed).
+ returned: always
+ type: str
+ sample: 'managed'
+ host_name:
+ description: The server's hostname.
+ returned: always
+ type: str
+ sample: 'testyaa1.miniserver.com'
+ ignore_monitoring_off:
+ description: When true, Memset won't remind the customer that monitoring is disabled.
+ returned: always
+ type: bool
+ sample: true
+ ips:
+ description: List of dictionaries of all IP addresses assigned to the server.
+ returned: always
+ type: list
+ sample: [
+ {
+ "address": "1.2.3.4",
+ "bytes_in_today": 1000.0,
+ "bytes_in_yesterday": 2000.0,
+ "bytes_out_today": 1000.0,
+ "bytes_out_yesterday": 2000.0
+ }
+ ]
+ monitor:
+ description: Whether the server has monitoring enabled.
+ returned: always
+ type: bool
+ sample: true
+ monitoring_level:
+ description: The server's monitoring level (i.e. basic).
+ returned: always
+ type: str
+ sample: 'basic'
+ name:
+ description: Server name (same as the service name).
+ returned: always
+ type: str
+ sample: 'testyaa1'
+ network_zones:
+ description: The network zone(s) the server is in.
+ returned: always
+ type: list
+ sample: [ 'reading' ]
+ nickname:
+ description: Customer-set nickname for the server.
+ returned: always
+ type: str
+ sample: 'database server'
+ no_auto_reboot:
+ description: Whether or not to reboot the server if monitoring detects it down.
+ returned: always
+ type: bool
+ sample: true
+ no_nrpe:
+ description: Whether Memset should use NRPE to monitor this server.
+ returned: always
+ type: bool
+ sample: true
+ os:
+ description: The server's Operating System.
+ returned: always
+ type: str
+ sample: 'debian_stretch_64'
+ penetration_patrol:
+ description: Intrusion detection support level for this server.
+ returned: always
+ type: str
+ sample: 'managed'
+ penetration_patrol_alert_level:
+ description: The alert level at which notifications are sent.
+ returned: always
+ type: int
+ sample: 10
+ primary_ip:
+ description: Server's primary IP.
+ returned: always
+ type: str
+ sample: '1.2.3.4'
+ renewal_price_amount:
+ description: Renewal cost for the server.
+ returned: always
+ type: str
+ sample: '30.00'
+ renewal_price_currency:
+ description: Currency for renewal payments.
+ returned: always
+ type: str
+ sample: 'GBP'
+ renewal_price_vat:
+ description: VAT rate for renewal payments
+ returned: always
+ type: str
+ sample: '20'
+ start_date:
+ description: Server's start date.
+ returned: always
+ type: str
+ sample: '2013-04-10'
+ status:
+ description: Current status of the server (i.e. live, onhold).
+ returned: always
+ type: str
+ sample: 'LIVE'
+ support_level:
+ description: Support level included with the server.
+ returned: always
+ type: str
+ sample: 'managed'
+ type:
+ description: What this server is (i.e. dedicated)
+ returned: always
+ type: str
+ sample: 'miniserver'
+ vlans:
+ description: Dictionary of tagged and untagged VLANs this server is in.
+ returned: always
+ type: dict
+ sample: {
+ tagged: [],
+ untagged: [ 'testyaa-vlan1', 'testyaa-vlan2' ]
+ }
+ vulnscan:
+ description: Vulnerability scanning level.
+ returned: always
+ type: str
+ sample: 'basic'
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
+
+
+def get_facts(args=None):
+ '''
+ Performs a simple API call and returns a JSON blob.
+ '''
+ retvals, payload = dict(), dict()
+ has_changed, has_failed = False, False
+ msg, stderr, memset_api = None, None, None
+
+ payload['name'] = args['name']
+
+ api_method = 'server.info'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+
+ if has_failed:
+ # this is the first time the API is called; incorrect credentials will
+ # manifest themselves at this point so we need to ensure the user is
+ # informed of the reason.
+ retvals['failed'] = has_failed
+ retvals['msg'] = msg
+ retvals['stderr'] = "API returned an error: {0}" . format(response.status_code)
+ return(retvals)
+
+ # we don't want to return the same thing twice
+ msg = None
+ memset_api = response.json()
+
+ retvals['changed'] = has_changed
+ retvals['failed'] = has_failed
+ for val in ['msg', 'memset_api']:
+ if val is not None:
+ retvals[val] = eval(val)
+
+ return(retvals)
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(required=True, type='str', no_log=True),
+ name=dict(required=True, type='str')
+ ),
+ supports_check_mode=False
+ )
+ if module._name in ('memset_server_facts', 'community.general.memset_server_facts'):
+ module.deprecate("The 'memset_server_facts' module has been renamed to 'memset_server_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ # populate the dict with the user-provided vars.
+ args = dict()
+ for key, arg in module.params.items():
+ args[key] = arg
+
+ retvals = get_facts(args)
+
+ if retvals['failed']:
+ module.fail_json(**retvals)
+ else:
+ module.exit_json(**retvals)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_server_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_server_info.py
new file mode 100644
index 00000000..d8943c14
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_server_info.py
@@ -0,0 +1,297 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Simon Weald <ansible@simonweald.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: memset_server_info
+author: "Simon Weald (@glitchcrab)"
+short_description: Retrieve server information.
+notes:
+ - An API key generated via the Memset customer control panel is needed with the
+ following minimum scope - I(server.info).
+description:
+ - Retrieve server information.
+ - This module was called C(memset_server_facts) before Ansible 2.9. The usage did not change.
+options:
+ api_key:
+ required: true
+ type: str
+ description:
+ - The API key obtained from the Memset control panel.
+ name:
+ required: true
+ type: str
+ description:
+ - The server product name (i.e. C(testyaa1)).
+'''
+
+EXAMPLES = '''
+- name: Get details for testyaa1
+ community.general.memset_server_info:
+ name: testyaa1
+ api_key: 5eb86c9896ab03919abcf03857163741
+ delegate_to: localhost
+'''
+
+RETURN = '''
+---
+memset_api:
+ description: Info from the Memset API
+ returned: always
+ type: complex
+ contains:
+ backups:
+ description: Whether this server has a backup service.
+ returned: always
+ type: bool
+ sample: true
+ control_panel:
+ description: Whether the server has a control panel (i.e. cPanel).
+ returned: always
+ type: str
+ sample: 'cpanel'
+ data_zone:
+ description: The data zone the server is in.
+ returned: always
+ type: str
+ sample: 'Memset Public Cloud'
+ expiry_date:
+ description: Current expiry date of the server.
+ returned: always
+ type: str
+ sample: '2018-08-10'
+ firewall_rule_group:
+ description: Details about the firewall group this server is in.
+ returned: always
+ type: dict
+ sample: {
+ "default_outbound_policy": "RETURN",
+ "name": "testyaa-fw1",
+ "nickname": "testyaa cPanel rules",
+ "notes": "",
+ "public": false,
+ "rules": {
+ "51d7db54d39c3544ef7c48baa0b9944f": {
+ "action": "ACCEPT",
+ "comment": "",
+ "dest_ip6s": "any",
+ "dest_ips": "any",
+ "dest_ports": "any",
+ "direction": "Inbound",
+ "ip_version": "any",
+ "ordering": 2,
+ "protocols": "icmp",
+ "rule_group_name": "testyaa-fw1",
+ "rule_id": "51d7db54d39c3544ef7c48baa0b9944f",
+ "source_ip6s": "any",
+ "source_ips": "any",
+ "source_ports": "any"
+ }
+ }
+ }
+ firewall_type:
+ description: The type of firewall the server has (i.e. self-managed, managed).
+ returned: always
+ type: str
+ sample: 'managed'
+ host_name:
+ description: The server's hostname.
+ returned: always
+ type: str
+ sample: 'testyaa1.miniserver.com'
+ ignore_monitoring_off:
+ description: When true, Memset won't remind the customer that monitoring is disabled.
+ returned: always
+ type: bool
+ sample: true
+ ips:
+ description: List of dictionaries of all IP addresses assigned to the server.
+ returned: always
+ type: list
+ sample: [
+ {
+ "address": "1.2.3.4",
+ "bytes_in_today": 1000.0,
+ "bytes_in_yesterday": 2000.0,
+ "bytes_out_today": 1000.0,
+ "bytes_out_yesterday": 2000.0
+ }
+ ]
+ monitor:
+ description: Whether the server has monitoring enabled.
+ returned: always
+ type: bool
+ sample: true
+ monitoring_level:
+ description: The server's monitoring level (i.e. basic).
+ returned: always
+ type: str
+ sample: 'basic'
+ name:
+ description: Server name (same as the service name).
+ returned: always
+ type: str
+ sample: 'testyaa1'
+ network_zones:
+ description: The network zone(s) the server is in.
+ returned: always
+ type: list
+ sample: [ 'reading' ]
+ nickname:
+ description: Customer-set nickname for the server.
+ returned: always
+ type: str
+ sample: 'database server'
+ no_auto_reboot:
+ description: Whether or not to reboot the server if monitoring detects it down.
+ returned: always
+ type: bool
+ sample: true
+ no_nrpe:
+ description: Whether Memset should use NRPE to monitor this server.
+ returned: always
+ type: bool
+ sample: true
+ os:
+ description: The server's Operating System.
+ returned: always
+ type: str
+ sample: 'debian_stretch_64'
+ penetration_patrol:
+ description: Intrusion detection support level for this server.
+ returned: always
+ type: str
+ sample: 'managed'
+ penetration_patrol_alert_level:
+ description: The alert level at which notifications are sent.
+ returned: always
+ type: int
+ sample: 10
+ primary_ip:
+ description: Server's primary IP.
+ returned: always
+ type: str
+ sample: '1.2.3.4'
+ renewal_price_amount:
+ description: Renewal cost for the server.
+ returned: always
+ type: str
+ sample: '30.00'
+ renewal_price_currency:
+ description: Currency for renewal payments.
+ returned: always
+ type: str
+ sample: 'GBP'
+ renewal_price_vat:
+ description: VAT rate for renewal payments
+ returned: always
+ type: str
+ sample: '20'
+ start_date:
+ description: Server's start date.
+ returned: always
+ type: str
+ sample: '2013-04-10'
+ status:
+ description: Current status of the server (i.e. live, onhold).
+ returned: always
+ type: str
+ sample: 'LIVE'
+ support_level:
+ description: Support level included with the server.
+ returned: always
+ type: str
+ sample: 'managed'
+ type:
+ description: What this server is (i.e. dedicated)
+ returned: always
+ type: str
+ sample: 'miniserver'
+ vlans:
+ description: Dictionary of tagged and untagged VLANs this server is in.
+ returned: always
+ type: dict
+ sample: {
+ tagged: [],
+ untagged: [ 'testyaa-vlan1', 'testyaa-vlan2' ]
+ }
+ vulnscan:
+ description: Vulnerability scanning level.
+ returned: always
+ type: str
+ sample: 'basic'
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
+
+
+def get_facts(args=None):
+ '''
+ Performs a simple API call and returns a JSON blob.
+ '''
+ retvals, payload = dict(), dict()
+ has_changed, has_failed = False, False
+ msg, stderr, memset_api = None, None, None
+
+ payload['name'] = args['name']
+
+ api_method = 'server.info'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+
+ if has_failed:
+ # this is the first time the API is called; incorrect credentials will
+ # manifest themselves at this point so we need to ensure the user is
+ # informed of the reason.
+ retvals['failed'] = has_failed
+ retvals['msg'] = msg
+ retvals['stderr'] = "API returned an error: {0}" . format(response.status_code)
+ return(retvals)
+
+ # we don't want to return the same thing twice
+ msg = None
+ memset_api = response.json()
+
+ retvals['changed'] = has_changed
+ retvals['failed'] = has_failed
+ for val in ['msg', 'memset_api']:
+ if val is not None:
+ retvals[val] = eval(val)
+
+ return(retvals)
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(required=True, type='str', no_log=True),
+ name=dict(required=True, type='str')
+ ),
+ supports_check_mode=False
+ )
+ if module._name in ('memset_server_facts', 'community.general.memset_server_facts'):
+ module.deprecate("The 'memset_server_facts' module has been renamed to 'memset_server_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ # populate the dict with the user-provided vars.
+ args = dict()
+ for key, arg in module.params.items():
+ args[key] = arg
+
+ retvals = get_facts(args)
+
+ if retvals['failed']:
+ module.fail_json(**retvals)
+ else:
+ module.exit_json(**retvals)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_zone.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_zone.py
new file mode 100644
index 00000000..9ef798bd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_zone.py
@@ -0,0 +1,311 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Simon Weald <ansible@simonweald.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: memset_zone
+author: "Simon Weald (@glitchcrab)"
+short_description: Creates and deletes Memset DNS zones.
+notes:
+ - Zones can be thought of as a logical group of domains, all of which share the
+ same DNS records (i.e. they point to the same IP). An API key generated via the
+ Memset customer control panel is needed with the following minimum scope -
+ I(dns.zone_create), I(dns.zone_delete), I(dns.zone_list).
+description:
+ - Manage DNS zones in a Memset account.
+options:
+ state:
+ required: true
+ description:
+ - Indicates desired state of resource.
+ type: str
+ choices: [ absent, present ]
+ api_key:
+ required: true
+ description:
+ - The API key obtained from the Memset control panel.
+ type: str
+ name:
+ required: true
+ description:
+ - The zone nickname; usually the same as the main domain. Ensure this
+ value has at most 250 characters.
+ type: str
+ aliases: [ nickname ]
+ ttl:
+ description:
+ - The default TTL for all records created in the zone. This must be a
+ valid int from U(https://www.memset.com/apidocs/methods_dns.html#dns.zone_create).
+ type: int
+ choices: [ 0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400 ]
+ force:
+ required: false
+ default: false
+ type: bool
+ description:
+ - Forces deletion of a zone and all zone domains/zone records it contains.
+'''
+
+EXAMPLES = '''
+# Create the zone 'test'
+- name: Create zone
+ community.general.memset_zone:
+ name: test
+ state: present
+ api_key: 5eb86c9196ab03919abcf03857163741
+ ttl: 300
+ delegate_to: localhost
+
+# Force zone deletion
+- name: Force delete zone
+ community.general.memset_zone:
+ name: test
+ state: absent
+ api_key: 5eb86c9196ab03919abcf03857163741
+ force: true
+ delegate_to: localhost
+'''
+
+RETURN = '''
+memset_api:
+ description: Zone info from the Memset API
+ returned: when state == present
+ type: complex
+ contains:
+ domains:
+ description: List of domains in this zone
+ returned: always
+ type: list
+ sample: []
+ id:
+ description: Zone id
+ returned: always
+ type: str
+ sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c"
+ nickname:
+ description: Zone name
+ returned: always
+ type: str
+ sample: "example.com"
+ records:
+ description: List of DNS records for domains in this zone
+ returned: always
+ type: list
+ sample: []
+ ttl:
+ description: Default TTL for domains in this zone
+ returned: always
+ type: int
+ sample: 300
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.memset import check_zone
+from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id
+from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
+
+
+def api_validation(args=None):
+ '''
+ Perform some validation which will be enforced by Memset's API (see:
+ https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create)
+ '''
+ # zone domain length must be less than 250 chars.
+ if len(args['name']) > 250:
+ stderr = 'Zone name must be less than 250 characters in length.'
+ module.fail_json(failed=True, msg=stderr, stderr=stderr)
+
+
+def check(args=None):
+ '''
+ Support for running with check mode.
+ '''
+ retvals = dict()
+
+ api_method = 'dns.zone_list'
+ has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+
+ zone_exists, counter = check_zone(data=response, name=args['name'])
+
+ # set changed to true if the operation would cause a change.
+ has_changed = ((zone_exists and args['state'] == 'absent') or (not zone_exists and args['state'] == 'present'))
+
+ retvals['changed'] = has_changed
+ retvals['failed'] = has_failed
+
+ return(retvals)
+
+
+def create_zone(args=None, zone_exists=None, payload=None):
+ '''
+ At this point we already know whether the zone exists, so we
+ just need to make the API reflect the desired state.
+ '''
+ has_changed, has_failed = False, False
+ msg, memset_api = None, None
+
+ if not zone_exists:
+ payload['ttl'] = args['ttl']
+ payload['nickname'] = args['name']
+ api_method = 'dns.zone_create'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ if not has_failed:
+ has_changed = True
+ else:
+ api_method = 'dns.zone_list'
+ _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+ for zone in response.json():
+ if zone['nickname'] == args['name']:
+ break
+ if zone['ttl'] != args['ttl']:
+ # update the zone if the desired TTL is different.
+ payload['id'] = zone['id']
+ payload['ttl'] = args['ttl']
+ api_method = 'dns.zone_update'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ if not has_failed:
+ has_changed = True
+
+ # populate return var with zone info.
+ api_method = 'dns.zone_list'
+ _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+
+ zone_exists, msg, counter, zone_id = get_zone_id(zone_name=args['name'], current_zones=response.json())
+
+ if zone_exists:
+ payload = dict()
+ payload['id'] = zone_id
+ api_method = 'dns.zone_info'
+ _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ memset_api = response.json()
+
+ return(has_failed, has_changed, memset_api, msg)
+
+
+def delete_zone(args=None, zone_exists=None, payload=None):
+ '''
+ Deletion requires extra sanity checking as the zone cannot be
+ deleted if it contains domains or records. Setting force=true
+ will override this behaviour.
+ '''
+ has_changed, has_failed = False, False
+ msg, memset_api = None, None
+
+ if zone_exists:
+ api_method = 'dns.zone_list'
+ _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ counter = 0
+ for zone in response.json():
+ if zone['nickname'] == args['name']:
+ counter += 1
+ if counter == 1:
+ for zone in response.json():
+ if zone['nickname'] == args['name']:
+ zone_id = zone['id']
+ domain_count = len(zone['domains'])
+ record_count = len(zone['records'])
+ if (domain_count > 0 or record_count > 0) and args['force'] is False:
+ # we need to fail out if force was not explicitly set.
+ stderr = 'Zone contains domains or records and force was not used.'
+ has_failed = True
+ has_changed = False
+ module.fail_json(failed=has_failed, changed=has_changed, msg=msg, stderr=stderr, rc=1)
+ api_method = 'dns.zone_delete'
+ payload['id'] = zone_id
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ if not has_failed:
+ has_changed = True
+ # return raw JSON from API in named var and then unset msg var so we aren't returning the same thing twice.
+ memset_api = msg
+ msg = None
+ else:
+ # zone names are not unique, so we cannot safely delete the requested
+ # zone at this time.
+ has_failed = True
+ has_changed = False
+ msg = 'Unable to delete zone as multiple zones with the same name exist.'
+ else:
+ has_failed, has_changed = False, False
+
+ return(has_failed, has_changed, memset_api, msg)
+
+
+def create_or_delete(args=None):
+ '''
+ We need to perform some initial sanity checking and also look
+ up required info before handing it off to create or delete.
+ '''
+ retvals, payload = dict(), dict()
+ has_failed, has_changed = False, False
+ msg, memset_api, stderr = None, None, None
+
+ # get the zones and check if the relevant zone exists.
+ api_method = 'dns.zone_list'
+ _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+ if _has_failed:
+ # this is the first time the API is called; incorrect credentials will
+ # manifest themselves at this point so we need to ensure the user is
+ # informed of the reason.
+ retvals['failed'] = _has_failed
+ retvals['msg'] = _msg
+
+ return(retvals)
+
+ zone_exists, _msg, counter, _zone_id = get_zone_id(zone_name=args['name'], current_zones=response.json())
+
+ if args['state'] == 'present':
+ has_failed, has_changed, memset_api, msg = create_zone(args=args, zone_exists=zone_exists, payload=payload)
+
+ elif args['state'] == 'absent':
+ has_failed, has_changed, memset_api, msg = delete_zone(args=args, zone_exists=zone_exists, payload=payload)
+
+ retvals['failed'] = has_failed
+ retvals['changed'] = has_changed
+ for val in ['msg', 'stderr', 'memset_api']:
+ if val is not None:
+ retvals[val] = eval(val)
+
+ return(retvals)
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(required=True, choices=['present', 'absent'], type='str'),
+ api_key=dict(required=True, type='str', no_log=True),
+ name=dict(required=True, aliases=['nickname'], type='str'),
+ ttl=dict(required=False, default=0, choices=[0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400], type='int'),
+ force=dict(required=False, default=False, type='bool')
+ ),
+ supports_check_mode=True
+ )
+
+ # populate the dict with the user-provided vars.
+ args = dict()
+ for key, arg in module.params.items():
+ args[key] = arg
+ args['check_mode'] = module.check_mode
+
+ # validate some API-specific limitations.
+ api_validation(args=args)
+
+ if module.check_mode:
+ retvals = check(args)
+ else:
+ retvals = create_or_delete(args)
+
+ if retvals['failed']:
+ module.fail_json(**retvals)
+ else:
+ module.exit_json(**retvals)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_zone_domain.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_zone_domain.py
new file mode 100644
index 00000000..4aa0eada
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_zone_domain.py
@@ -0,0 +1,266 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Simon Weald <ansible@simonweald.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: memset_zone_domain
+author: "Simon Weald (@glitchcrab)"
+short_description: Create and delete domains in Memset DNS zones.
+notes:
+ - Zone domains can be thought of as a collection of domains, all of which share the
+ same DNS records (i.e. they point to the same IP). An API key generated via the
+ Memset customer control panel is needed with the following minimum scope -
+ I(dns.zone_domain_create), I(dns.zone_domain_delete), I(dns.zone_domain_list).
+ - Currently this module can only create one domain at a time. Multiple domains should
+ be created using C(with_items).
+description:
+ - Manage DNS zone domains in a Memset account.
+options:
+ state:
+ default: present
+ description:
+ - Indicates desired state of resource.
+ type: str
+ choices: [ absent, present ]
+ api_key:
+ required: true
+ description:
+ - The API key obtained from the Memset control panel.
+ type: str
+ domain:
+ required: true
+ description:
+ - The zone domain name. Ensure this value has at most 250 characters.
+ type: str
+ aliases: ['name']
+ zone:
+ required: true
+ description:
+ - The zone to add the domain to (this must already exist).
+ type: str
+'''
+
+EXAMPLES = '''
+# Create the zone domain 'test.com'
+- name: Create zone domain
+ community.general.memset_zone_domain:
+ domain: test.com
+ zone: testzone
+ state: present
+ api_key: 5eb86c9196ab03919abcf03857163741
+ delegate_to: localhost
+'''
+
+RETURN = '''
+memset_api:
+ description: Domain info from the Memset API
+ returned: when changed or state == present
+ type: complex
+ contains:
+ domain:
+ description: Domain name
+ returned: always
+ type: str
+ sample: "example.com"
+ id:
+ description: Domain ID
+ returned: always
+ type: str
+ sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id
+from ansible_collections.community.general.plugins.module_utils.memset import check_zone_domain
+from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
+
+
+def api_validation(args=None):
+ '''
+ Perform some validation which will be enforced by Memset's API (see:
+ https://www.memset.com/apidocs/methods_dns.html#dns.zone_domain_create)
+ '''
+ # zone domain length must be less than 250 chars
+ if len(args['domain']) > 250:
+ stderr = 'Zone domain must be less than 250 characters in length.'
+ module.fail_json(failed=True, msg=stderr)
+
+
+def check(args=None):
+ '''
+ Support for running with check mode.
+ '''
+ retvals = dict()
+ has_changed = False
+
+ api_method = 'dns.zone_domain_list'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+
+ domain_exists = check_zone_domain(data=response, domain=args['domain'])
+
+ # set changed to true if the operation would cause a change.
+ has_changed = ((domain_exists and args['state'] == 'absent') or (not domain_exists and args['state'] == 'present'))
+
+ retvals['changed'] = has_changed
+ retvals['failed'] = has_failed
+
+ return(retvals)
+
+
+def create_zone_domain(args=None, zone_exists=None, zone_id=None, payload=None):
+ '''
+ At this point we already know whether the containing zone exists,
+ so we just need to create the domain (or exit if it already exists).
+ '''
+ has_changed, has_failed = False, False
+ msg = None
+
+ api_method = 'dns.zone_domain_list'
+ _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+
+ for zone_domain in response.json():
+ if zone_domain['domain'] == args['domain']:
+ # zone domain already exists, nothing to change.
+ has_changed = False
+ break
+ else:
+ # we need to create the domain
+ api_method = 'dns.zone_domain_create'
+ payload['domain'] = args['domain']
+ payload['zone_id'] = zone_id
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ if not has_failed:
+ has_changed = True
+
+ return(has_failed, has_changed, msg)
+
+
+def delete_zone_domain(args=None, payload=None):
+ '''
+ Deletion is pretty simple, domains are always unique so we
+ we don't need to do any sanity checking to avoid deleting the
+ wrong thing.
+ '''
+ has_changed, has_failed = False, False
+ msg, memset_api = None, None
+
+ api_method = 'dns.zone_domain_list'
+ _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+
+ domain_exists = check_zone_domain(data=response, domain=args['domain'])
+
+ if domain_exists:
+ api_method = 'dns.zone_domain_delete'
+ payload['domain'] = args['domain']
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ if not has_failed:
+ has_changed = True
+ memset_api = response.json()
+ # unset msg as we don't want to return unnecessary info to the user.
+ msg = None
+
+ return(has_failed, has_changed, memset_api, msg)
+
+
+def create_or_delete_domain(args=None):
+ '''
+ We need to perform some initial sanity checking and also look
+ up required info before handing it off to create or delete.
+ '''
+ retvals, payload = dict(), dict()
+ has_changed, has_failed = False, False
+ msg, stderr, memset_api = None, None, None
+
+ # get the zones and check if the relevant zone exists.
+ api_method = 'dns.zone_list'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+
+ if has_failed:
+ # this is the first time the API is called; incorrect credentials will
+ # manifest themselves at this point so we need to ensure the user is
+ # informed of the reason.
+ retvals['failed'] = has_failed
+ retvals['msg'] = msg
+ retvals['stderr'] = "API returned an error: {0}" . format(response.status_code)
+ return(retvals)
+
+ zone_exists, msg, counter, zone_id = get_zone_id(zone_name=args['zone'], current_zones=response.json())
+
+ if not zone_exists:
+ # the zone needs to be unique - this isn't a requirement of Memset's API but it
+ # makes sense in the context of this module.
+ has_failed = True
+ if counter == 0:
+ stderr = "DNS zone '{0}' does not exist, cannot create domain." . format(args['zone'])
+ elif counter > 1:
+ stderr = "{0} matches multiple zones, cannot create domain." . format(args['zone'])
+
+ retvals['failed'] = has_failed
+ retvals['msg'] = stderr
+ return(retvals)
+
+ if args['state'] == 'present':
+ has_failed, has_changed, msg = create_zone_domain(args=args, zone_exists=zone_exists, zone_id=zone_id, payload=payload)
+
+ if args['state'] == 'absent':
+ has_failed, has_changed, memset_api, msg = delete_zone_domain(args=args, payload=payload)
+
+ retvals['changed'] = has_changed
+ retvals['failed'] = has_failed
+ for val in ['msg', 'stderr', 'memset_api']:
+ if val is not None:
+ retvals[val] = eval(val)
+
+ return(retvals)
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'], type='str'),
+ api_key=dict(required=True, type='str', no_log=True),
+ domain=dict(required=True, aliases=['name'], type='str'),
+ zone=dict(required=True, type='str')
+ ),
+ supports_check_mode=True
+ )
+
+ # populate the dict with the user-provided vars.
+ args = dict()
+ for key, arg in module.params.items():
+ args[key] = arg
+ args['check_mode'] = module.check_mode
+
+ # validate some API-specific limitations.
+ api_validation(args=args)
+
+ if module.check_mode:
+ retvals = check(args)
+ else:
+ retvals = create_or_delete_domain(args)
+
+ # we would need to populate the return values with the API's response
+ # in several places so it's easier to do it at the end instead.
+ if not retvals['failed']:
+ if args['state'] == 'present' and not module.check_mode:
+ payload = dict()
+ payload['domain'] = args['domain']
+ api_method = 'dns.zone_domain_info'
+ _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ retvals['memset_api'] = response.json()
+
+ if retvals['failed']:
+ module.fail_json(**retvals)
+ else:
+ module.exit_json(**retvals)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_zone_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_zone_record.py
new file mode 100644
index 00000000..981d2ac4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/memset/memset_zone_record.py
@@ -0,0 +1,380 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Simon Weald <ansible@simonweald.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: memset_zone_record
+author: "Simon Weald (@glitchcrab)"
+short_description: Create and delete records in Memset DNS zones.
+notes:
+ - Zones can be thought of as a logical group of domains, all of which share the
+ same DNS records (i.e. they point to the same IP). An API key generated via the
+ Memset customer control panel is needed with the following minimum scope -
+ I(dns.zone_create), I(dns.zone_delete), I(dns.zone_list).
+ - Currently this module can only create one DNS record at a time. Multiple records
+ should be created using C(with_items).
+description:
+ - Manage DNS records in a Memset account.
+options:
+ state:
+ default: present
+ description:
+ - Indicates desired state of resource.
+ type: str
+ choices: [ absent, present ]
+ api_key:
+ required: true
+ description:
+ - The API key obtained from the Memset control panel.
+ type: str
+ address:
+ required: true
+ description:
+ - The address for this record (can be IP or text string depending on record type).
+ type: str
+ aliases: [ ip, data ]
+ priority:
+ description:
+ - C(SRV) and C(TXT) record priority, in the range 0 > 999 (inclusive).
+ type: int
+ record:
+ required: false
+ description:
+ - The subdomain to create.
+ type: str
+ type:
+ required: true
+ description:
+ - The type of DNS record to create.
+ choices: [ A, AAAA, CNAME, MX, NS, SRV, TXT ]
+ type: str
+ relative:
+ type: bool
+ default: false
+ description:
+ - If set then the current domain is added onto the address field for C(CNAME), C(MX), C(NS)
+ and C(SRV)record types.
+ ttl:
+ description:
+ - The record's TTL in seconds (will inherit zone's TTL if not explicitly set). This must be a
+ valid int from U(https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create).
+ choices: [ 0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400 ]
+ type: int
+ zone:
+ required: true
+ description:
+ - The name of the zone to which to add the record to.
+ type: str
+'''
+
+EXAMPLES = '''
+# Create DNS record for www.domain.com
+- name: Create DNS record
+ community.general.memset_zone_record:
+ api_key: dcf089a2896940da9ffefb307ef49ccd
+ state: present
+ zone: domain.com
+ type: A
+ record: www
+ address: 1.2.3.4
+ ttl: 300
+ relative: false
+ delegate_to: localhost
+
+# create an SPF record for domain.com
+- name: Create SPF record for domain.com
+ community.general.memset_zone_record:
+ api_key: dcf089a2896940da9ffefb307ef49ccd
+ state: present
+ zone: domain.com
+ type: TXT
+ address: "v=spf1 +a +mx +ip4:a1.2.3.4 ?all"
+ delegate_to: localhost
+
+# create multiple DNS records
+- name: Create multiple DNS records
+ community.general.memset_zone_record:
+ api_key: dcf089a2896940da9ffefb307ef49ccd
+ zone: "{{ item.zone }}"
+ type: "{{ item.type }}"
+ record: "{{ item.record }}"
+ address: "{{ item.address }}"
+ delegate_to: localhost
+ with_items:
+ - { 'zone': 'domain1.com', 'type': 'A', 'record': 'www', 'address': '1.2.3.4' }
+ - { 'zone': 'domain2.com', 'type': 'A', 'record': 'mail', 'address': '4.3.2.1' }
+'''
+
+RETURN = '''
+memset_api:
+ description: Record info from the Memset API.
+ returned: when state == present
+ type: complex
+ contains:
+ address:
+ description: Record content (may be an IP, string or blank depending on record type).
+ returned: always
+ type: str
+ sample: 1.1.1.1
+ id:
+ description: Record ID.
+ returned: always
+ type: str
+ sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c"
+ priority:
+ description: Priority for C(MX) and C(SRV) records.
+ returned: always
+ type: int
+ sample: 10
+ record:
+ description: Name of record.
+ returned: always
+ type: str
+ sample: "www"
+ relative:
+ description: Adds the current domain onto the address field for C(CNAME), C(MX), C(NS) and C(SRV) types.
+ returned: always
+ type: bool
+ sample: False
+ ttl:
+ description: Record TTL.
+ returned: always
+ type: int
+ sample: 10
+ type:
+ description: Record type.
+ returned: always
+ type: str
+ sample: AAAA
+ zone_id:
+ description: Zone ID.
+ returned: always
+ type: str
+ sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id
+from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
+from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id
+
+
+def api_validation(args=None):
+ '''
+ Perform some validation which will be enforced by Memset's API (see:
+ https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create)
+ '''
+ failed_validation = False
+
+ # priority can only be integer 0 > 999
+ if not 0 <= args['priority'] <= 999:
+ failed_validation = True
+ error = 'Priority must be in the range 0 > 999 (inclusive).'
+ # data value must be max 250 chars
+ if len(args['address']) > 250:
+ failed_validation = True
+ error = "Address must be less than 250 characters in length."
+ # record value must be max 250 chars
+ if args['record']:
+ if len(args['record']) > 63:
+ failed_validation = True
+ error = "Record must be less than 63 characters in length."
+ # relative isn't used for all record types
+ if args['relative']:
+ if args['type'] not in ['CNAME', 'MX', 'NS', 'SRV']:
+ failed_validation = True
+ error = "Relative is only valid for CNAME, MX, NS and SRV record types."
+ # if any of the above failed then fail early
+ if failed_validation:
+ module.fail_json(failed=True, msg=error)
+
+
+def create_zone_record(args=None, zone_id=None, records=None, payload=None):
+ '''
+ Sanity checking has already occurred prior to this function being
+ called, so we can go ahead and either create or update the record.
+ As defaults are defined for all values in the argument_spec, this
+ may cause some changes to occur as the defaults are enforced (if
+ the user has only configured required variables).
+ '''
+ has_changed, has_failed = False, False
+ msg, memset_api = None, None
+
+ # assemble the new record.
+ new_record = dict()
+ new_record['zone_id'] = zone_id
+ for arg in ['priority', 'address', 'relative', 'record', 'ttl', 'type']:
+ new_record[arg] = args[arg]
+
+ # if we have any matches, update them.
+ if records:
+ for zone_record in records:
+ # record exists, add ID to payload.
+ new_record['id'] = zone_record['id']
+ if zone_record == new_record:
+ # nothing to do; record is already correct so we populate
+ # the return var with the existing record's details.
+ memset_api = zone_record
+ return(has_changed, has_failed, memset_api, msg)
+ else:
+ # merge dicts ensuring we change any updated values
+ payload = zone_record.copy()
+ payload.update(new_record)
+ api_method = 'dns.zone_record_update'
+ if args['check_mode']:
+ has_changed = True
+ # return the new record to the user in the returned var.
+ memset_api = new_record
+ return(has_changed, has_failed, memset_api, msg)
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ if not has_failed:
+ has_changed = True
+ memset_api = new_record
+ # empty msg as we don't want to return a boatload of json to the user.
+ msg = None
+ else:
+ # no record found, so we need to create it
+ api_method = 'dns.zone_record_create'
+ payload = new_record
+ if args['check_mode']:
+ has_changed = True
+ # populate the return var with the new record's details.
+ memset_api = new_record
+ return(has_changed, has_failed, memset_api, msg)
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ if not has_failed:
+ has_changed = True
+ memset_api = new_record
+ # empty msg as we don't want to return a boatload of json to the user.
+ msg = None
+
+ return(has_changed, has_failed, memset_api, msg)
+
+
+def delete_zone_record(args=None, records=None, payload=None):
+ '''
+ Matching records can be cleanly deleted without affecting other
+ resource types, so this is pretty simple to achieve.
+ '''
+ has_changed, has_failed = False, False
+ msg, memset_api = None, None
+
+ # if we have any matches, delete them.
+ if records:
+ for zone_record in records:
+ if args['check_mode']:
+ has_changed = True
+ return(has_changed, has_failed, memset_api, msg)
+ payload['id'] = zone_record['id']
+ api_method = 'dns.zone_record_delete'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ if not has_failed:
+ has_changed = True
+ memset_api = zone_record
+ # empty msg as we don't want to return a boatload of json to the user.
+ msg = None
+
+ return(has_changed, has_failed, memset_api, msg)
+
+
+def create_or_delete(args=None):
+ '''
+ We need to perform some initial sanity checking and also look
+ up required info before handing it off to create or delete functions.
+ Check mode is integrated into the create or delete functions.
+ '''
+ has_failed, has_changed = False, False
+ msg, memset_api, stderr = None, None, None
+ retvals, payload = dict(), dict()
+
+ # get the zones and check if the relevant zone exists.
+ api_method = 'dns.zone_list'
+ _has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+
+ if _has_failed:
+ # this is the first time the API is called; incorrect credentials will
+ # manifest themselves at this point so we need to ensure the user is
+ # informed of the reason.
+ retvals['failed'] = _has_failed
+ retvals['msg'] = msg
+ retvals['stderr'] = "API returned an error: {0}" . format(response.status_code)
+ return(retvals)
+
+ zone_exists, _msg, counter, zone_id = get_zone_id(zone_name=args['zone'], current_zones=response.json())
+
+ if not zone_exists:
+ has_failed = True
+ if counter == 0:
+ stderr = "DNS zone {0} does not exist." . format(args['zone'])
+ elif counter > 1:
+ stderr = "{0} matches multiple zones." . format(args['zone'])
+ retvals['failed'] = has_failed
+ retvals['msg'] = stderr
+ retvals['stderr'] = stderr
+ return(retvals)
+
+ # get a list of all records ( as we can't limit records by zone)
+ api_method = 'dns.zone_record_list'
+ _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+
+ # find any matching records
+ records = [record for record in response.json() if record['zone_id'] == zone_id
+ and record['record'] == args['record'] and record['type'] == args['type']]
+
+ if args['state'] == 'present':
+ has_changed, has_failed, memset_api, msg = create_zone_record(args=args, zone_id=zone_id, records=records, payload=payload)
+
+ if args['state'] == 'absent':
+ has_changed, has_failed, memset_api, msg = delete_zone_record(args=args, records=records, payload=payload)
+
+ retvals['changed'] = has_changed
+ retvals['failed'] = has_failed
+ for val in ['msg', 'stderr', 'memset_api']:
+ if val is not None:
+ retvals[val] = eval(val)
+
+ return(retvals)
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(required=False, default='present', choices=['present', 'absent'], type='str'),
+ api_key=dict(required=True, type='str', no_log=True),
+ zone=dict(required=True, type='str'),
+ type=dict(required=True, choices=['A', 'AAAA', 'CNAME', 'MX', 'NS', 'SRV', 'TXT'], type='str'),
+ address=dict(required=True, aliases=['ip', 'data'], type='str'),
+ record=dict(required=False, default='', type='str'),
+ ttl=dict(required=False, default=0, choices=[0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400], type='int'),
+ priority=dict(required=False, default=0, type='int'),
+ relative=dict(required=False, default=False, type='bool')
+ ),
+ supports_check_mode=True
+ )
+
+ # populate the dict with the user-provided vars.
+ args = dict()
+ for key, arg in module.params.items():
+ args[key] = arg
+ args['check_mode'] = module.check_mode
+
+ # perform some Memset API-specific validation
+ api_validation(args=args)
+
+ retvals = create_or_delete(args)
+
+ if retvals['failed']:
+ module.fail_json(**retvals)
+ else:
+ module.exit_json(**retvals)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/cloud_init_data_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/cloud_init_data_facts.py
new file mode 100644
index 00000000..2efb90cf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/cloud_init_data_facts.py
@@ -0,0 +1,129 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, René Moser <mail@renemoser.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: cloud_init_data_facts
+short_description: Retrieve facts of cloud-init.
+description:
+ - Gathers facts by reading the status.json and result.json of cloud-init.
+author: René Moser (@resmo)
+options:
+ filter:
+ description:
+ - Filter facts
+ type: str
+ choices: [ status, result ]
+notes:
+ - See http://cloudinit.readthedocs.io/ for more information about cloud-init.
+'''
+
+EXAMPLES = '''
+- name: Gather all facts of cloud init
+ community.general.cloud_init_data_facts:
+ register: result
+
+- ansible.builtin.debug:
+ var: result
+
+- name: Wait for cloud init to finish
+ community.general.cloud_init_data_facts:
+ filter: status
+ register: res
+ until: "res.cloud_init_data_facts.status.v1.stage is defined and not res.cloud_init_data_facts.status.v1.stage"
+ retries: 50
+ delay: 5
+'''
+
+RETURN = '''
+---
+cloud_init_data_facts:
+ description: Facts of result and status.
+ returned: success
+ type: dict
+ sample: '{
+ "status": {
+ "v1": {
+ "datasource": "DataSourceCloudStack",
+ "errors": []
+ },
+ "result": {
+ "v1": {
+ "datasource": "DataSourceCloudStack",
+ "init": {
+ "errors": [],
+ "finished": 1522066377.0185432,
+ "start": 1522066375.2648022
+ },
+ "init-local": {
+ "errors": [],
+ "finished": 1522066373.70919,
+ "start": 1522066373.4726632
+ },
+ "modules-config": {
+ "errors": [],
+ "finished": 1522066380.9097016,
+ "start": 1522066379.0011985
+ },
+ "modules-final": {
+ "errors": [],
+ "finished": 1522066383.56594,
+ "start": 1522066382.3449218
+ },
+ "stage": null
+ }
+ }'
+'''
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_text
+
+
+CLOUD_INIT_PATH = "/var/lib/cloud/data/"
+
+
+def gather_cloud_init_data_facts(module):
+ res = {
+ 'cloud_init_data_facts': dict()
+ }
+
+ for i in ['result', 'status']:
+ filter = module.params.get('filter')
+ if filter is None or filter == i:
+ res['cloud_init_data_facts'][i] = dict()
+ json_file = CLOUD_INIT_PATH + i + '.json'
+
+ if os.path.exists(json_file):
+ f = open(json_file, 'rb')
+ contents = to_text(f.read(), errors='surrogate_or_strict')
+ f.close()
+
+ if contents:
+ res['cloud_init_data_facts'][i] = module.from_json(contents)
+ return res
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ filter=dict(choices=['result', 'status']),
+ ),
+ supports_check_mode=True,
+ )
+
+ facts = gather_cloud_init_data_facts(module)
+ result = dict(changed=False, ansible_facts=facts, **facts)
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/helm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/helm.py
new file mode 100644
index 00000000..dd592d6e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/helm.py
@@ -0,0 +1,216 @@
+#!/usr/bin/python
+# (c) 2016, Flavio Percoco <flavio@redhat.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.14
+ why: For more details https://github.com/ansible/ansible/issues/61546.
+ alternative: Use M(community.kubernetes.helm) instead.
+module: helm
+short_description: Manages Kubernetes packages with the Helm package manager
+author: "Flavio Percoco (@flaper87)"
+description:
+ - Install, upgrade, delete and list packages with the Helm package manager.
+requirements:
+ - "pyhelm"
+ - "grpcio"
+options:
+ host:
+ description:
+ - Tiller's server host.
+ type: str
+ default: "localhost"
+ port:
+ description:
+ - Tiller's server port.
+ type: int
+ default: 44134
+ namespace:
+ description:
+ - Kubernetes namespace where the chart should be installed.
+ type: str
+ default: "default"
+ name:
+ description:
+ - Release name to manage.
+ type: str
+ state:
+ description:
+ - Whether to install C(present), remove C(absent), or purge C(purged) a package.
+ choices: ['absent', 'purged', 'present']
+ type: str
+ default: "present"
+ chart:
+ description:
+ - A map describing the chart to install. See examples for available options.
+ type: dict
+ default: {}
+ values:
+ description:
+ - A map of value options for the chart.
+ type: dict
+ default: {}
+ disable_hooks:
+ description:
+ - Whether to disable hooks during the uninstall process.
+ type: bool
+ default: 'no'
+'''
+
+RETURN = ''' # '''
+
+EXAMPLES = '''
+- name: Install helm chart
+ community.general.helm:
+ host: localhost
+ chart:
+ name: memcached
+ version: 0.4.0
+ source:
+ type: repo
+ location: https://kubernetes-charts.storage.googleapis.com
+ state: present
+ name: my-memcached
+ namespace: default
+
+- name: Uninstall helm chart
+ community.general.helm:
+ host: localhost
+ state: absent
+ name: my-memcached
+
+- name: Install helm chart from a git repo
+ community.general.helm:
+ host: localhost
+ chart:
+ source:
+ type: git
+ location: https://github.com/user/helm-chart.git
+ state: present
+ name: my-example
+ namespace: default
+ values:
+ foo: "bar"
+
+- name: Install helm chart from a git repo specifying path
+ community.general.helm:
+ host: localhost
+ chart:
+ source:
+ type: git
+ location: https://github.com/helm/charts.git
+ path: stable/memcached
+ state: present
+ name: my-memcached
+ namespace: default
+ values: "{{ lookup('file', '/path/to/file/values.yaml') | from_yaml }}"
+'''
+
+import traceback
+HELM_IMPORT_ERR = None
+try:
+ import grpc
+ from pyhelm import tiller
+ from pyhelm import chartbuilder
+except ImportError:
+ HELM_IMPORT_ERR = traceback.format_exc()
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+def install(module, tserver):
+ changed = False
+ params = module.params
+ name = params['name']
+ values = params['values']
+ chart = module.params['chart']
+ namespace = module.params['namespace']
+
+ chartb = chartbuilder.ChartBuilder(chart)
+ r_matches = (x for x in tserver.list_releases()
+ if x.name == name and x.namespace == namespace)
+ installed_release = next(r_matches, None)
+ if installed_release:
+ if installed_release.chart.metadata.version != chart['version']:
+ tserver.update_release(chartb.get_helm_chart(), False,
+ namespace, name=name, values=values)
+ changed = True
+ else:
+ tserver.install_release(chartb.get_helm_chart(), namespace,
+ dry_run=False, name=name,
+ values=values)
+ changed = True
+
+ return dict(changed=changed)
+
+
+def delete(module, tserver, purge=False):
+ changed = False
+ params = module.params
+
+ if not module.params['name']:
+ module.fail_json(msg='Missing required field name')
+
+ name = module.params['name']
+ disable_hooks = params['disable_hooks']
+
+ try:
+ tserver.uninstall_release(name, disable_hooks, purge)
+ changed = True
+ except grpc._channel._Rendezvous as exc:
+ if 'not found' not in str(exc):
+ raise exc
+
+ return dict(changed=changed)
+
+
+def main():
+ """The main function."""
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(type='str', default='localhost'),
+ port=dict(type='int', default=44134),
+ name=dict(type='str', default=''),
+ chart=dict(type='dict'),
+ state=dict(
+ choices=['absent', 'purged', 'present'],
+ default='present'
+ ),
+ # Install options
+ values=dict(type='dict'),
+ namespace=dict(type='str', default='default'),
+
+ # Uninstall options
+ disable_hooks=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True)
+
+ if HELM_IMPORT_ERR:
+ module.fail_json(msg=missing_required_lib('pyhelm'), exception=HELM_IMPORT_ERR)
+
+ host = module.params['host']
+ port = module.params['port']
+ state = module.params['state']
+ tserver = tiller.Tiller(host, port)
+
+ if state == 'present':
+ rst = install(module, tserver)
+
+ if state in 'absent':
+ rst = delete(module, tserver)
+
+ if state in 'purged':
+ rst = delete(module, tserver, True)
+
+ module.exit_json(**rst)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/ovirt.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/ovirt.py
new file mode 100644
index 00000000..25e3081c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/ovirt.py
@@ -0,0 +1,503 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2013, Vincent Van der Kussen <vincent at vanderkussen.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt
+author:
+- Vincent Van der Kussen (@vincentvdk)
+short_description: oVirt/RHEV platform management
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.14
+ why: This module is for deprecated version of ovirt.
+ alternative: Use C(ovirt_vm) from the C(ovirt.ovirt) collection instead
+description:
+ - This module only supports oVirt/RHEV version 3. A newer module M(ovirt.ovirt.ovirt_vm) supports oVirt/RHV version 4.
+ - Allows you to create new instances, either from scratch or an image, in addition to deleting or stopping instances on the oVirt/RHEV platform.
+options:
+ user:
+ description:
+ - The user to authenticate with.
+ type: str
+ required: true
+ url:
+ description:
+ - The url of the oVirt instance.
+ type: str
+ required: true
+ instance_name:
+ description:
+ - The name of the instance to use.
+ type: str
+ required: true
+ aliases: [ vmname ]
+ password:
+ description:
+ - Password of the user to authenticate with.
+ type: str
+ required: true
+ image:
+ description:
+ - The template to use for the instance.
+ type: str
+ resource_type:
+ description:
+ - Whether you want to deploy an image or create an instance from scratch.
+ type: str
+ choices: [ new, template ]
+ zone:
+ description:
+ - Deploy the image to this oVirt cluster.
+ type: str
+ instance_disksize:
+ description:
+ - Size of the instance's disk in GB.
+ type: str
+ aliases: [ vm_disksize]
+ instance_cpus:
+ description:
+ - The instance's number of CPUs.
+ type: str
+ default: 1
+ aliases: [ vmcpus ]
+ instance_nic:
+ description:
+ - The name of the network interface in oVirt/RHEV.
+ type: str
+ aliases: [ vmnic ]
+ instance_network:
+ description:
+ - The logical network the machine should belong to.
+ type: str
+ default: rhevm
+ aliases: [ vmnetwork ]
+ instance_mem:
+ description:
+ - The instance's amount of memory in MB.
+ type: str
+ aliases: [ vmmem ]
+ instance_type:
+ description:
+ - Define whether the instance is a server, desktop or high_performance.
+ - I(high_performance) is supported since Ansible 2.5 and oVirt/RHV 4.2.
+ type: str
+ choices: [ desktop, server, high_performance ]
+ default: server
+ aliases: [ vmtype ]
+ disk_alloc:
+ description:
+ - Define whether disk is thin or preallocated.
+ type: str
+ choices: [ preallocated, thin ]
+ default: thin
+ disk_int:
+ description:
+ - Interface type of the disk.
+ type: str
+ choices: [ ide, virtio ]
+ default: virtio
+ instance_os:
+ description:
+ - Type of Operating System.
+ type: str
+ aliases: [ vmos ]
+ instance_cores:
+ description:
+ - Define the instance's number of cores.
+ type: str
+ default: 1
+ aliases: [ vmcores ]
+ sdomain:
+ description:
+ - The Storage Domain where you want to create the instance's disk on.
+ type: str
+ region:
+ description:
+ - The oVirt/RHEV datacenter where you want to deploy to.
+ type: str
+ instance_dns:
+ description:
+ - Define the instance's Primary DNS server.
+ type: str
+ aliases: [ dns ]
+ instance_domain:
+ description:
+ - Define the instance's Domain.
+ type: str
+ aliases: [ domain ]
+ instance_hostname:
+ description:
+ - Define the instance's Hostname.
+ type: str
+ aliases: [ hostname ]
+ instance_ip:
+ description:
+ - Define the instance's IP.
+ type: str
+ aliases: [ ip ]
+ instance_netmask:
+ description:
+ - Define the instance's Netmask.
+ type: str
+ aliases: [ netmask ]
+ instance_gateway:
+ description:
+ - Define the instance's Gateway.
+ type: str
+ aliases: [ gateway ]
+ instance_rootpw:
+ description:
+ - Define the instance's Root password.
+ type: str
+ aliases: [ rootpw ]
+ instance_key:
+ description:
+ - Define the instance's Authorized key.
+ type: str
+ aliases: [ key ]
+ state:
+ description:
+ - Create, terminate or remove instances.
+ type: str
+ choices: [ absent, present, restart, shutdown, started ]
+ default: present
+requirements:
+ - ovirt-engine-sdk-python
+'''
+
+EXAMPLES = '''
+- name: Basic example to provision from image
+ community.general.ovirt:
+ user: admin@internal
+ url: https://ovirt.example.com
+ instance_name: ansiblevm04
+ password: secret
+ image: centos_64
+ zone: cluster01
+ resource_type: template
+
+- name: Full example to create new instance from scratch
+ community.general.ovirt:
+ instance_name: testansible
+ resource_type: new
+ instance_type: server
+ user: admin@internal
+ password: secret
+ url: https://ovirt.example.com
+ instance_disksize: 10
+ zone: cluster01
+ region: datacenter1
+ instance_cpus: 1
+ instance_nic: nic1
+ instance_network: rhevm
+ instance_mem: 1000
+ disk_alloc: thin
+ sdomain: FIBER01
+ instance_cores: 1
+ instance_os: rhel_6x64
+ disk_int: virtio
+
+- name: Stopping an existing instance
+ community.general.ovirt:
+ instance_name: testansible
+ state: stopped
+ user: admin@internal
+ password: secret
+ url: https://ovirt.example.com
+
+- name: Start an existing instance
+ community.general.ovirt:
+ instance_name: testansible
+ state: started
+ user: admin@internal
+ password: secret
+ url: https://ovirt.example.com
+
+- name: Start an instance with cloud init information
+ community.general.ovirt:
+ instance_name: testansible
+ state: started
+ user: admin@internal
+ password: secret
+ url: https://ovirt.example.com
+ hostname: testansible
+ domain: ansible.local
+ ip: 192.0.2.100
+ netmask: 255.255.255.0
+ gateway: 192.0.2.1
+ rootpw: bigsecret
+'''
+
+import time
+
+try:
+ from ovirtsdk.api import API
+ from ovirtsdk.xml import params
+ HAS_OVIRTSDK = True
+except ImportError:
+ HAS_OVIRTSDK = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.removed import removed_module
+
+
+# ------------------------------------------------------------------- #
+# create connection with API
+#
+def conn(url, user, password):
+ api = API(url=url, username=user, password=password, insecure=True)
+ try:
+ value = api.test()
+ except Exception:
+ raise Exception("error connecting to the oVirt API")
+ return api
+
+
+# ------------------------------------------------------------------- #
+# Create VM from scratch
+def create_vm(conn, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int):
+ if vmdisk_alloc == 'thin':
+ # define VM params
+ vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), os=params.OperatingSystem(type_=vmos),
+ template=conn.templates.get(name="Blank"), memory=1024 * 1024 * int(vmmem),
+ cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores), sockets=vmcpus)), type_=vmtype)
+ # define disk params
+ vmdisk = params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=True, interface=vmdisk_int, type_="System",
+ format='cow',
+ storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)]))
+ # define network parameters
+ network_net = params.Network(name=vmnetwork)
+ nic_net1 = params.NIC(name='nic1', network=network_net, interface='virtio')
+ elif vmdisk_alloc == 'preallocated':
+ # define VM params
+ vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), os=params.OperatingSystem(type_=vmos),
+ template=conn.templates.get(name="Blank"), memory=1024 * 1024 * int(vmmem),
+ cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores), sockets=vmcpus)), type_=vmtype)
+ # define disk params
+ vmdisk = params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=False, interface=vmdisk_int, type_="System",
+ format='raw', storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)]))
+ # define network parameters
+ network_net = params.Network(name=vmnetwork)
+ nic_net1 = params.NIC(name=vmnic, network=network_net, interface='virtio')
+
+ try:
+ conn.vms.add(vmparams)
+ except Exception:
+ raise Exception("Error creating VM with specified parameters")
+ vm = conn.vms.get(name=vmname)
+ try:
+ vm.disks.add(vmdisk)
+ except Exception:
+ raise Exception("Error attaching disk")
+ try:
+ vm.nics.add(nic_net1)
+ except Exception:
+ raise Exception("Error adding nic")
+
+
+# create an instance from a template
+def create_vm_template(conn, vmname, image, zone):
+ vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), template=conn.templates.get(name=image), disks=params.Disks(clone=True))
+ try:
+ conn.vms.add(vmparams)
+ except Exception:
+ raise Exception('error adding template %s' % image)
+
+
+# start instance
+def vm_start(conn, vmname, hostname=None, ip=None, netmask=None, gateway=None,
+ domain=None, dns=None, rootpw=None, key=None):
+ vm = conn.vms.get(name=vmname)
+ use_cloud_init = False
+ nics = None
+ nic = None
+ if hostname or ip or netmask or gateway or domain or dns or rootpw or key:
+ use_cloud_init = True
+ if ip and netmask and gateway:
+ ipinfo = params.IP(address=ip, netmask=netmask, gateway=gateway)
+ nic = params.GuestNicConfiguration(name='eth0', boot_protocol='STATIC', ip=ipinfo, on_boot=True)
+ nics = params.Nics()
+ nics = params.GuestNicsConfiguration(nic_configuration=[nic])
+ initialization = params.Initialization(regenerate_ssh_keys=True, host_name=hostname, domain=domain, user_name='root',
+ root_password=rootpw, nic_configurations=nics, dns_servers=dns,
+ authorized_ssh_keys=key)
+ action = params.Action(use_cloud_init=use_cloud_init, vm=params.VM(initialization=initialization))
+ vm.start(action=action)
+
+
+# Stop instance
+def vm_stop(conn, vmname):
+ vm = conn.vms.get(name=vmname)
+ vm.stop()
+
+
+# restart instance
+def vm_restart(conn, vmname):
+ state = vm_status(conn, vmname)
+ vm = conn.vms.get(name=vmname)
+ vm.stop()
+ while conn.vms.get(vmname).get_status().get_state() != 'down':
+ time.sleep(5)
+ vm.start()
+
+
+# remove an instance
+def vm_remove(conn, vmname):
+ vm = conn.vms.get(name=vmname)
+ vm.delete()
+
+
+# ------------------------------------------------------------------- #
+# VM statuses
+#
+# Get the VMs status
+def vm_status(conn, vmname):
+ status = conn.vms.get(name=vmname).status.state
+ return status
+
+
+# Get VM object and return it's name if object exists
+def get_vm(conn, vmname):
+ vm = conn.vms.get(name=vmname)
+ if vm is None:
+ name = "empty"
+ else:
+ name = vm.get_name()
+ return name
+
+# ------------------------------------------------------------------- #
+# Hypervisor operations
+#
+# not available yet
+# ------------------------------------------------------------------- #
+# Main
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['absent', 'present', 'restart', 'shutdown', 'started']),
+ user=dict(type='str', required=True),
+ url=dict(type='str', required=True),
+ instance_name=dict(type='str', required=True, aliases=['vmname']),
+ password=dict(type='str', required=True, no_log=True),
+ image=dict(type='str'),
+ resource_type=dict(type='str', choices=['new', 'template']),
+ zone=dict(type='str'),
+ instance_disksize=dict(type='str', aliases=['vm_disksize']),
+ instance_cpus=dict(type='str', default=1, aliases=['vmcpus']),
+ instance_nic=dict(type='str', aliases=['vmnic']),
+ instance_network=dict(type='str', default='rhevm', aliases=['vmnetwork']),
+ instance_mem=dict(type='str', aliases=['vmmem']),
+ instance_type=dict(type='str', default='server', aliases=['vmtype'], choices=['desktop', 'server', 'high_performance']),
+ disk_alloc=dict(type='str', default='thin', choices=['preallocated', 'thin']),
+ disk_int=dict(type='str', default='virtio', choices=['ide', 'virtio']),
+ instance_os=dict(type='str', aliases=['vmos']),
+ instance_cores=dict(type='str', default=1, aliases=['vmcores']),
+ instance_hostname=dict(type='str', aliases=['hostname']),
+ instance_ip=dict(type='str', aliases=['ip']),
+ instance_netmask=dict(type='str', aliases=['netmask']),
+ instance_gateway=dict(type='str', aliases=['gateway']),
+ instance_domain=dict(type='str', aliases=['domain']),
+ instance_dns=dict(type='str', aliases=['dns']),
+ instance_rootpw=dict(type='str', aliases=['rootpw'], no_log=True),
+ instance_key=dict(type='str', aliases=['key'], no_log=True),
+ sdomain=dict(type='str'),
+ region=dict(type='str'),
+ ),
+ )
+
+ if not HAS_OVIRTSDK:
+ module.fail_json(msg='ovirtsdk required for this module')
+
+ state = module.params['state']
+ user = module.params['user']
+ url = module.params['url']
+ vmname = module.params['instance_name']
+ password = module.params['password']
+ image = module.params['image'] # name of the image to deploy
+ resource_type = module.params['resource_type'] # template or from scratch
+ zone = module.params['zone'] # oVirt cluster
+ vmdisk_size = module.params['instance_disksize'] # disksize
+ vmcpus = module.params['instance_cpus'] # number of cpu
+ vmnic = module.params['instance_nic'] # network interface
+ vmnetwork = module.params['instance_network'] # logical network
+ vmmem = module.params['instance_mem'] # mem size
+ vmdisk_alloc = module.params['disk_alloc'] # thin, preallocated
+ vmdisk_int = module.params['disk_int'] # disk interface virtio or ide
+ vmos = module.params['instance_os'] # Operating System
+ vmtype = module.params['instance_type'] # server, desktop or high_performance
+ vmcores = module.params['instance_cores'] # number of cores
+ sdomain = module.params['sdomain'] # storage domain to store disk on
+ region = module.params['region'] # oVirt Datacenter
+ hostname = module.params['instance_hostname']
+ ip = module.params['instance_ip']
+ netmask = module.params['instance_netmask']
+ gateway = module.params['instance_gateway']
+ domain = module.params['instance_domain']
+ dns = module.params['instance_dns']
+ rootpw = module.params['instance_rootpw']
+ key = module.params['instance_key']
+ # initialize connection
+ try:
+ c = conn(url + "/api", user, password)
+ except Exception as e:
+ module.fail_json(msg='%s' % e)
+
+ if state == 'present':
+ if get_vm(c, vmname) == "empty":
+ if resource_type == 'template':
+ try:
+ create_vm_template(c, vmname, image, zone)
+ except Exception as e:
+ module.fail_json(msg='%s' % e)
+ module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmname, image))
+ elif resource_type == 'new':
+ # FIXME: refactor, use keyword args.
+ try:
+ create_vm(c, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int)
+ except Exception as e:
+ module.fail_json(msg='%s' % e)
+ module.exit_json(changed=True, msg="deployed VM %s from scratch" % vmname)
+ else:
+ module.exit_json(changed=False, msg="You did not specify a resource type")
+ else:
+ module.exit_json(changed=False, msg="VM %s already exists" % vmname)
+
+ if state == 'started':
+ if vm_status(c, vmname) == 'up':
+ module.exit_json(changed=False, msg="VM %s is already running" % vmname)
+ else:
+ # vm_start(c, vmname)
+ vm_start(c, vmname, hostname, ip, netmask, gateway, domain, dns, rootpw, key)
+ module.exit_json(changed=True, msg="VM %s started" % vmname)
+
+ if state == 'shutdown':
+ if vm_status(c, vmname) == 'down':
+ module.exit_json(changed=False, msg="VM %s is already shutdown" % vmname)
+ else:
+ vm_stop(c, vmname)
+ module.exit_json(changed=True, msg="VM %s is shutting down" % vmname)
+
+ if state == 'restart':
+ if vm_status(c, vmname) == 'up':
+ vm_restart(c, vmname)
+ module.exit_json(changed=True, msg="VM %s is restarted" % vmname)
+ else:
+ module.exit_json(changed=False, msg="VM %s is not running" % vmname)
+
+ if state == 'absent':
+ if get_vm(c, vmname) == "empty":
+ module.exit_json(changed=False, msg="VM %s does not exist" % vmname)
+ else:
+ vm_remove(c, vmname)
+ module.exit_json(changed=True, msg="VM %s removed" % vmname)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox.py
new file mode 100644
index 00000000..140d56f6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox.py
@@ -0,0 +1,735 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: proxmox
+short_description: management of instances in Proxmox VE cluster
+description:
+ - allows you to create/delete/stop instances in Proxmox VE cluster
+ - Starting in Ansible 2.1, it automatically detects containerization type (lxc for PVE 4, openvz for older)
+ - From community.general 4.0.0 on, there will be no default values, see I(proxmox_default_behavior).
+options:
+ api_host:
+ description:
+ - the host of the Proxmox VE cluster
+ type: str
+ required: true
+ api_user:
+ description:
+ - the user to authenticate with
+ type: str
+ required: true
+ api_password:
+ description:
+ - the password to authenticate with
+ - you can use PROXMOX_PASSWORD environment variable
+ type: str
+ api_token_id:
+ description:
+ - Specify the token ID.
+ type: str
+ version_added: 1.3.0
+ api_token_secret:
+ description:
+ - Specify the token secret.
+ type: str
+ version_added: 1.3.0
+ vmid:
+ description:
+ - the instance id
+ - if not set, the next available VM ID will be fetched from ProxmoxAPI.
+ - if not set, will be fetched from PromoxAPI based on the hostname
+ type: str
+ validate_certs:
+ description:
+ - enable / disable https certificate verification
+ type: bool
+ default: 'no'
+ node:
+ description:
+ - Proxmox VE node, when new VM will be created
+ - required only for C(state=present)
+ - for another states will be autodiscovered
+ type: str
+ pool:
+ description:
+ - Proxmox VE resource pool
+ type: str
+ password:
+ description:
+ - the instance root password
+ - required only for C(state=present)
+ type: str
+ hostname:
+ description:
+ - the instance hostname
+ - required only for C(state=present)
+ - must be unique if vmid is not passed
+ type: str
+ ostemplate:
+ description:
+ - the template for VM creating
+ - required only for C(state=present)
+ type: str
+ disk:
+ description:
+ - hard disk size in GB for instance
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(3). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: str
+ cores:
+ description:
+ - Specify number of cores per socket.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(1). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: int
+ cpus:
+ description:
+ - numbers of allocated cpus for instance
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(1). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: int
+ memory:
+ description:
+ - memory size in MB for instance
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(512). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: int
+ swap:
+ description:
+ - swap memory size in MB for instance
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(0). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: int
+ netif:
+ description:
+ - specifies network interfaces for the container. As a hash/dictionary defining interfaces.
+ type: dict
+ mounts:
+ description:
+ - specifies additional mounts (separate disks) for the container. As a hash/dictionary defining mount points
+ type: dict
+ ip_address:
+ description:
+ - specifies the address the container will be assigned
+ type: str
+ onboot:
+ description:
+ - specifies whether a VM will be started during system bootup
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: bool
+ storage:
+ description:
+ - target storage
+ type: str
+ default: 'local'
+ cpuunits:
+ description:
+ - CPU weight for a VM
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(1000). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: int
+ nameserver:
+ description:
+ - sets DNS server IP address for a container
+ type: str
+ searchdomain:
+ description:
+ - sets DNS search domain for a container
+ type: str
+ timeout:
+ description:
+ - timeout for operations
+ type: int
+ default: 30
+ force:
+ description:
+ - forcing operations
+ - can be used only with states C(present), C(stopped), C(restarted)
+ - with C(state=present) force option allow to overwrite existing container
+ - with states C(stopped) , C(restarted) allow to force stop instance
+ type: bool
+ default: 'no'
+ state:
+ description:
+ - Indicate desired state of the instance
+ type: str
+ choices: ['present', 'started', 'absent', 'stopped', 'restarted']
+ default: present
+ pubkey:
+ description:
+ - Public key to add to /root/.ssh/authorized_keys. This was added on Proxmox 4.2, it is ignored for earlier versions
+ type: str
+ unprivileged:
+ description:
+ - Indicate if the container should be unprivileged
+ type: bool
+ default: 'no'
+ description:
+ description:
+ - Specify the description for the container. Only used on the configuration web interface.
+ - This is saved as a comment inside the configuration file.
+ type: str
+ version_added: '0.2.0'
+ hookscript:
+ description:
+ - Script that will be executed during various steps in the containers lifetime.
+ type: str
+ version_added: '0.2.0'
+ proxmox_default_behavior:
+ description:
+ - Various module options used to have default values. This cause problems when
+ user expects different behavior from proxmox by default or fill options which cause
+ problems when they have been set.
+ - The default value is C(compatibility), which will ensure that the default values
+ are used when the values are not explicitly specified by the user.
+ - From community.general 4.0.0 on, the default value will switch to C(no_defaults). To avoid
+ deprecation warnings, please set I(proxmox_default_behavior) to an explicit
+ value.
+ - This affects the I(disk), I(cores), I(cpus), I(memory), I(onboot), I(swap), I(cpuunits) options.
+ type: str
+ choices:
+ - compatibility
+ - no_defaults
+ version_added: "1.3.0"
+
+notes:
+ - Requires proxmoxer and requests modules on host. This modules can be installed with pip.
+requirements: [ "proxmoxer", "python >= 2.7", "requests" ]
+author: Sergei Antipov (@UnderGreen)
+'''
+
+EXAMPLES = r'''
+- name: Create new container with minimal options
+ community.general.proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+
+- name: Create new container with hookscript and description
+ community.general.proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+ hookscript: 'local:snippets/vm_hook.sh'
+ description: created with ansible
+
+- name: Create new container automatically selecting the next available vmid.
+ community.general.proxmox:
+ node: 'uk-mc02'
+ api_user: 'root@pam'
+ api_password: '1q2w3e'
+ api_host: 'node1'
+ password: '123456'
+ hostname: 'example.org'
+ ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+
+- name: Create new container with minimal options with force(it will rewrite existing container)
+ community.general.proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+ force: yes
+
+- name: Create new container with minimal options use environment PROXMOX_PASSWORD variable(you should export it before)
+ community.general.proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+
+- name: Create new container with minimal options defining network interface with dhcp
+ community.general.proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+ netif: '{"net0":"name=eth0,ip=dhcp,ip6=dhcp,bridge=vmbr0"}'
+
+- name: Create new container with minimal options defining network interface with static ip
+ community.general.proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+ netif: '{"net0":"name=eth0,gw=192.168.0.1,ip=192.168.0.2/24,bridge=vmbr0"}'
+
+- name: Create new container with minimal options defining a mount with 8GB
+ community.general.proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+ mounts: '{"mp0":"local:8,mp=/mnt/test/"}'
+
+- name: Create new container with minimal options defining a cpu core limit
+ community.general.proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+ cores: 2
+
+- name: Start container
+ community.general.proxmox:
+ vmid: 100
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ state: started
+
+- name: >
+ Start container with mount. You should enter a 90-second timeout because servers
+ with additional disks take longer to boot
+ community.general.proxmox:
+ vmid: 100
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ state: started
+ timeout: 90
+
+- name: Stop container
+ community.general.proxmox:
+ vmid: 100
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ state: stopped
+
+- name: Stop container with force
+ community.general.proxmox:
+ vmid: 100
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ force: yes
+ state: stopped
+
+- name: Restart container(stopped or mounted container you can't restart)
+ community.general.proxmox:
+ vmid: 100
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ state: restarted
+
+- name: Remove container
+ community.general.proxmox:
+ vmid: 100
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ state: absent
+'''
+
+import os
+import time
+import traceback
+from distutils.version import LooseVersion
+
+try:
+ from proxmoxer import ProxmoxAPI
+ HAS_PROXMOXER = True
+except ImportError:
+ HAS_PROXMOXER = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+VZ_TYPE = None
+
+
+def get_nextvmid(module, proxmox):
+ try:
+ vmid = proxmox.cluster.nextid.get()
+ return vmid
+ except Exception as e:
+ module.fail_json(msg="Unable to get next vmid. Failed with exception: %s" % to_native(e),
+ exception=traceback.format_exc())
+
+
+def get_vmid(proxmox, hostname):
+ return [vm['vmid'] for vm in proxmox.cluster.resources.get(type='vm') if 'name' in vm and vm['name'] == hostname]
+
+
+def get_instance(proxmox, vmid):
+ return [vm for vm in proxmox.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid)]
+
+
+def content_check(proxmox, node, ostemplate, template_store):
+ return [True for cnt in proxmox.nodes(node).storage(template_store).content.get() if cnt['volid'] == ostemplate]
+
+
+def node_check(proxmox, node):
+ return [True for nd in proxmox.nodes.get() if nd['node'] == node]
+
+
+def proxmox_version(proxmox):
+ apireturn = proxmox.version.get()
+ return LooseVersion(apireturn['version'])
+
+
+def create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout, **kwargs):
+ proxmox_node = proxmox.nodes(node)
+ kwargs = dict((k, v) for k, v in kwargs.items() if v is not None)
+
+ if VZ_TYPE == 'lxc':
+ kwargs['cpulimit'] = cpus
+ kwargs['rootfs'] = disk
+ if 'netif' in kwargs:
+ kwargs.update(kwargs['netif'])
+ del kwargs['netif']
+ if 'mounts' in kwargs:
+ kwargs.update(kwargs['mounts'])
+ del kwargs['mounts']
+ if 'pubkey' in kwargs:
+ if proxmox_version(proxmox) >= LooseVersion('4.2'):
+ kwargs['ssh-public-keys'] = kwargs['pubkey']
+ del kwargs['pubkey']
+ else:
+ kwargs['cpus'] = cpus
+ kwargs['disk'] = disk
+
+ taskid = getattr(proxmox_node, VZ_TYPE).create(vmid=vmid, storage=storage, memory=memory, swap=swap, **kwargs)
+
+ while timeout:
+ if (proxmox_node.tasks(taskid).status.get()['status'] == 'stopped' and
+ proxmox_node.tasks(taskid).status.get()['exitstatus'] == 'OK'):
+ return True
+ timeout -= 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s' %
+ proxmox_node.tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ return False
+
+
+def start_instance(module, proxmox, vm, vmid, timeout):
+ taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.start.post()
+ while timeout:
+ if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and
+ proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
+ return True
+ timeout -= 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s' %
+ proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ return False
+
+
+def stop_instance(module, proxmox, vm, vmid, timeout, force):
+ if force:
+ taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.shutdown.post(forceStop=1)
+ else:
+ taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.shutdown.post()
+ while timeout:
+ if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and
+ proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
+ return True
+ timeout -= 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s' %
+ proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ return False
+
+
+def umount_instance(module, proxmox, vm, vmid, timeout):
+ taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.umount.post()
+ while timeout:
+ if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and
+ proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
+ return True
+ timeout -= 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for unmounting VM. Last line in task before timeout: %s' %
+ proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ return False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_host=dict(required=True),
+ api_password=dict(no_log=True),
+ api_token_id=dict(no_log=True),
+ api_token_secret=dict(no_log=True),
+ api_user=dict(required=True),
+ vmid=dict(required=False),
+ validate_certs=dict(type='bool', default=False),
+ node=dict(),
+ pool=dict(),
+ password=dict(no_log=True),
+ hostname=dict(),
+ ostemplate=dict(),
+ disk=dict(type='str'),
+ cores=dict(type='int'),
+ cpus=dict(type='int'),
+ memory=dict(type='int'),
+ swap=dict(type='int'),
+ netif=dict(type='dict'),
+ mounts=dict(type='dict'),
+ ip_address=dict(),
+ onboot=dict(type='bool'),
+ storage=dict(default='local'),
+ cpuunits=dict(type='int'),
+ nameserver=dict(),
+ searchdomain=dict(),
+ timeout=dict(type='int', default=30),
+ force=dict(type='bool', default=False),
+ state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted']),
+ pubkey=dict(type='str', default=None),
+ unprivileged=dict(type='bool', default=False),
+ description=dict(type='str'),
+ hookscript=dict(type='str'),
+ proxmox_default_behavior=dict(type='str', choices=['compatibility', 'no_defaults']),
+ )
+ )
+
+ if not HAS_PROXMOXER:
+ module.fail_json(msg='proxmoxer required for this module')
+
+ state = module.params['state']
+ api_host = module.params['api_host']
+ api_password = module.params['api_password']
+ api_token_id = module.params['api_token_id']
+ api_token_secret = module.params['api_token_secret']
+ api_user = module.params['api_user']
+ vmid = module.params['vmid']
+ validate_certs = module.params['validate_certs']
+ node = module.params['node']
+ disk = module.params['disk']
+ cpus = module.params['cpus']
+ memory = module.params['memory']
+ swap = module.params['swap']
+ storage = module.params['storage']
+ hostname = module.params['hostname']
+ if module.params['ostemplate'] is not None:
+ template_store = module.params['ostemplate'].split(":")[0]
+ timeout = module.params['timeout']
+
+ if module.params['proxmox_default_behavior'] is None:
+ module.params['proxmox_default_behavior'] = 'compatibility'
+ module.deprecate(
+ 'The proxmox_default_behavior option will change its default value from "compatibility" to '
+ '"no_defaults" in community.general 4.0.0. To remove this warning, please specify an explicit value for it now',
+ version='4.0.0', collection_name='community.general'
+ )
+ if module.params['proxmox_default_behavior'] == 'compatibility':
+ old_default_values = dict(
+ disk="3",
+ cores=1,
+ cpus=1,
+ memory=512,
+ swap=0,
+ onboot=False,
+ cpuunits=1000,
+ )
+ for param, value in old_default_values.items():
+ if module.params[param] is None:
+ module.params[param] = value
+
+ auth_args = {'user': api_user}
+ if not (api_token_id and api_token_secret):
+ # If password not set get it from PROXMOX_PASSWORD env
+ if not api_password:
+ try:
+ api_password = os.environ['PROXMOX_PASSWORD']
+ except KeyError as e:
+ module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable')
+ auth_args['password'] = api_password
+ else:
+ auth_args['token_name'] = api_token_id
+ auth_args['token_value'] = api_token_secret
+
+ try:
+ proxmox = ProxmoxAPI(api_host, verify_ssl=validate_certs, **auth_args)
+ global VZ_TYPE
+ VZ_TYPE = 'openvz' if proxmox_version(proxmox) < LooseVersion('4.0') else 'lxc'
+ except Exception as e:
+ module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e)
+
+ # If vmid not set get the Next VM id from ProxmoxAPI
+ # If hostname is set get the VM id from ProxmoxAPI
+ if not vmid and state == 'present':
+ vmid = get_nextvmid(module, proxmox)
+ elif not vmid and hostname:
+ hosts = get_vmid(proxmox, hostname)
+ if len(hosts) == 0:
+ module.fail_json(msg="Vmid could not be fetched => Hostname doesn't exist (action: %s)" % state)
+ vmid = hosts[0]
+ elif not vmid:
+ module.exit_json(changed=False, msg="Vmid could not be fetched for the following action: %s" % state)
+
+ if state == 'present':
+ try:
+ if get_instance(proxmox, vmid) and not module.params['force']:
+ module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid)
+ # If no vmid was passed, there cannot be another VM named 'hostname'
+ if not module.params['vmid'] and get_vmid(proxmox, hostname) and not module.params['force']:
+ module.exit_json(changed=False, msg="VM with hostname %s already exists and has ID number %s" % (hostname, get_vmid(proxmox, hostname)[0]))
+ elif not (node, module.params['hostname'] and module.params['password'] and module.params['ostemplate']):
+ module.fail_json(msg='node, hostname, password and ostemplate are mandatory for creating vm')
+ elif not node_check(proxmox, node):
+ module.fail_json(msg="node '%s' not exists in cluster" % node)
+ elif not content_check(proxmox, node, module.params['ostemplate'], template_store):
+ module.fail_json(msg="ostemplate '%s' not exists on node %s and storage %s"
+ % (module.params['ostemplate'], node, template_store))
+
+ create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout,
+ cores=module.params['cores'],
+ pool=module.params['pool'],
+ password=module.params['password'],
+ hostname=module.params['hostname'],
+ ostemplate=module.params['ostemplate'],
+ netif=module.params['netif'],
+ mounts=module.params['mounts'],
+ ip_address=module.params['ip_address'],
+ onboot=int(module.params['onboot']),
+ cpuunits=module.params['cpuunits'],
+ nameserver=module.params['nameserver'],
+ searchdomain=module.params['searchdomain'],
+ force=int(module.params['force']),
+ pubkey=module.params['pubkey'],
+ unprivileged=int(module.params['unprivileged']),
+ description=module.params['description'],
+ hookscript=module.params['hookscript'])
+
+ module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmid, module.params['ostemplate']))
+ except Exception as e:
+ module.fail_json(msg="creation of %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e))
+
+ elif state == 'started':
+ try:
+ vm = get_instance(proxmox, vmid)
+ if not vm:
+ module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
+ if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running':
+ module.exit_json(changed=False, msg="VM %s is already running" % vmid)
+
+ if start_instance(module, proxmox, vm, vmid, timeout):
+ module.exit_json(changed=True, msg="VM %s started" % vmid)
+ except Exception as e:
+ module.fail_json(msg="starting of VM %s failed with exception: %s" % (vmid, e))
+
+ elif state == 'stopped':
+ try:
+ vm = get_instance(proxmox, vmid)
+ if not vm:
+ module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
+
+ if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted':
+ if module.params['force']:
+ if umount_instance(module, proxmox, vm, vmid, timeout):
+ module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
+ else:
+ module.exit_json(changed=False, msg=("VM %s is already shutdown, but mounted. "
+ "You can use force option to umount it.") % vmid)
+
+ if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped':
+ module.exit_json(changed=False, msg="VM %s is already shutdown" % vmid)
+
+ if stop_instance(module, proxmox, vm, vmid, timeout, force=module.params['force']):
+ module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
+ except Exception as e:
+ module.fail_json(msg="stopping of VM %s failed with exception: %s" % (vmid, e))
+
+ elif state == 'restarted':
+ try:
+ vm = get_instance(proxmox, vmid)
+ if not vm:
+ module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
+ if (getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped' or
+ getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted'):
+ module.exit_json(changed=False, msg="VM %s is not running" % vmid)
+
+ if (stop_instance(module, proxmox, vm, vmid, timeout, force=module.params['force']) and
+ start_instance(module, proxmox, vm, vmid, timeout)):
+ module.exit_json(changed=True, msg="VM %s is restarted" % vmid)
+ except Exception as e:
+ module.fail_json(msg="restarting of VM %s failed with exception: %s" % (vmid, e))
+
+ elif state == 'absent':
+ try:
+ vm = get_instance(proxmox, vmid)
+ if not vm:
+ module.exit_json(changed=False, msg="VM %s does not exist" % vmid)
+
+ if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running':
+ module.exit_json(changed=False, msg="VM %s is running. Stop it before deletion." % vmid)
+
+ if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted':
+ module.exit_json(changed=False, msg="VM %s is mounted. Stop it with force option before deletion." % vmid)
+
+ taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE).delete(vmid)
+ while timeout:
+ if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and
+ proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
+ module.exit_json(changed=True, msg="VM %s removed" % vmid)
+ timeout -= 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s'
+ % proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ except Exception as e:
+ module.fail_json(msg="deletion of VM %s failed with exception: %s" % (vmid, to_native(e)))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_domain_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_domain_info.py
new file mode 100644
index 00000000..fc7c37c6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_domain_info.py
@@ -0,0 +1,133 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Tristan Le Guern (@Aversiste) <tleguern at bouledef.eu>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: proxmox_domain_info
+short_description: Retrieve information about one or more Proxmox VE domains
+version_added: 1.3.0
+description:
+ - Retrieve information about one or more Proxmox VE domains.
+options:
+ domain:
+ description:
+ - Restrict results to a specific authentication realm.
+ aliases: ['realm', 'name']
+ type: str
+author: Tristan Le Guern (@Aversiste)
+extends_documentation_fragment: community.general.proxmox.documentation
+'''
+
+
+EXAMPLES = '''
+- name: List existing domains
+ community.general.proxmox_domain_info:
+ api_host: helldorado
+ api_user: root@pam
+ api_password: "{{ password | default(omit) }}"
+ api_token_id: "{{ token_id | default(omit) }}"
+ api_token_secret: "{{ token_secret | default(omit) }}"
+ register: proxmox_domains
+
+- name: Retrieve information about the pve domain
+ community.general.proxmox_domain_info:
+ api_host: helldorado
+ api_user: root@pam
+ api_password: "{{ password | default(omit) }}"
+ api_token_id: "{{ token_id | default(omit) }}"
+ api_token_secret: "{{ token_secret | default(omit) }}"
+ domain: pve
+ register: proxmox_domain_pve
+'''
+
+
+RETURN = '''
+proxmox_domains:
+ description: List of authentication domains.
+ returned: always, but can be empty
+ type: list
+ elements: dict
+ contains:
+ comment:
+ description: Short description of the realm.
+ returned: on success
+ type: str
+ realm:
+ description: Realm name.
+ returned: on success
+ type: str
+ type:
+ description: Realm type.
+ returned: on success
+ type: str
+ digest:
+ description: Realm hash.
+ returned: on success, can be absent
+ type: str
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils.proxmox import (
+ proxmox_auth_argument_spec, ProxmoxAnsible, HAS_PROXMOXER, PROXMOXER_IMP_ERR)
+
+
+class ProxmoxDomainInfoAnsible(ProxmoxAnsible):
+ def get_domain(self, realm):
+ try:
+ domain = self.proxmox_api.access.domains.get(realm)
+ except Exception:
+ self.module.fail_json(msg="Domain '%s' does not exist" % realm)
+ domain['realm'] = realm
+ return domain
+
+ def get_domains(self):
+ domains = self.proxmox_api.access.domains.get()
+ return domains
+
+
+def proxmox_domain_info_argument_spec():
+ return dict(
+ domain=dict(type='str', aliases=['realm', 'name']),
+ )
+
+
+def main():
+ module_args = proxmox_auth_argument_spec()
+ domain_info_args = proxmox_domain_info_argument_spec()
+ module_args.update(domain_info_args)
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ required_one_of=[('api_password', 'api_token_id')],
+ required_together=[('api_token_id', 'api_token_secret')],
+ supports_check_mode=True
+ )
+ result = dict(
+ changed=False
+ )
+
+ if not HAS_PROXMOXER:
+ module.fail_json(msg=missing_required_lib('proxmoxer'), exception=PROXMOXER_IMP_ERR)
+
+ proxmox = ProxmoxDomainInfoAnsible(module)
+ domain = module.params['domain']
+
+ if domain:
+ domains = [proxmox.get_domain(realm=domain)]
+ else:
+ domains = proxmox.get_domains()
+ result['proxmox_domains'] = domains
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_group_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_group_info.py
new file mode 100644
index 00000000..063d28e5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_group_info.py
@@ -0,0 +1,143 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Tristan Le Guern <tleguern at bouledef.eu>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: proxmox_group_info
+short_description: Retrieve information about one or more Proxmox VE groups
+version_added: 1.3.0
+description:
+ - Retrieve information about one or more Proxmox VE groups
+options:
+ group:
+ description:
+ - Restrict results to a specific group.
+ aliases: ['groupid', 'name']
+ type: str
+author: Tristan Le Guern (@Aversiste)
+extends_documentation_fragment: community.general.proxmox.documentation
+'''
+
+
+EXAMPLES = '''
+- name: List existing groups
+ community.general.proxmox_group_info:
+ api_host: helldorado
+ api_user: root@pam
+ api_password: "{{ password | default(omit) }}"
+ api_token_id: "{{ token_id | default(omit) }}"
+ api_token_secret: "{{ token_secret | default(omit) }}"
+ register: proxmox_groups
+
+- name: Retrieve information about the admin group
+ community.general.proxmox_group_info:
+ api_host: helldorado
+ api_user: root@pam
+ api_password: "{{ password | default(omit) }}"
+ api_token_id: "{{ token_id | default(omit) }}"
+ api_token_secret: "{{ token_secret | default(omit) }}"
+ group: admin
+ register: proxmox_group_admin
+'''
+
+
+RETURN = '''
+proxmox_groups:
+ description: List of groups.
+ returned: always, but can be empty
+ type: list
+ elements: dict
+ contains:
+ comment:
+ description: Short description of the group.
+ returned: on success, can be absent
+ type: str
+ groupid:
+ description: Group name.
+ returned: on success
+ type: str
+ users:
+ description: List of users in the group.
+ returned: on success
+ type: list
+ elements: str
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils.proxmox import (
+ proxmox_auth_argument_spec, ProxmoxAnsible, HAS_PROXMOXER, PROXMOXER_IMP_ERR)
+
+
+class ProxmoxGroupInfoAnsible(ProxmoxAnsible):
+ def get_group(self, groupid):
+ try:
+ group = self.proxmox_api.access.groups.get(groupid)
+ except Exception:
+ self.module.fail_json(msg="Group '%s' does not exist" % groupid)
+ group['groupid'] = groupid
+ return ProxmoxGroup(group)
+
+ def get_groups(self):
+ groups = self.proxmox_api.access.groups.get()
+ return [ProxmoxGroup(group) for group in groups]
+
+
+class ProxmoxGroup:
+ def __init__(self, group):
+ self.group = dict()
+ # Data representation is not the same depending on API calls
+ for k, v in group.items():
+ if k == 'users' and type(v) == str:
+ self.group['users'] = v.split(',')
+ elif k == 'members':
+ self.group['users'] = group['members']
+ else:
+ self.group[k] = v
+
+
+def proxmox_group_info_argument_spec():
+ return dict(
+ group=dict(type='str', aliases=['groupid', 'name']),
+ )
+
+
+def main():
+ module_args = proxmox_auth_argument_spec()
+ group_info_args = proxmox_group_info_argument_spec()
+ module_args.update(group_info_args)
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ required_one_of=[('api_password', 'api_token_id')],
+ required_together=[('api_token_id', 'api_token_secret')],
+ supports_check_mode=True
+ )
+ result = dict(
+ changed=False
+ )
+
+ if not HAS_PROXMOXER:
+ module.fail_json(msg=missing_required_lib('proxmoxer'), exception=PROXMOXER_IMP_ERR)
+
+ proxmox = ProxmoxGroupInfoAnsible(module)
+ group = module.params['group']
+
+ if group:
+ groups = [proxmox.get_group(group=group)]
+ else:
+ groups = proxmox.get_groups()
+ result['proxmox_groups'] = [group.group for group in groups]
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_kvm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_kvm.py
new file mode 100644
index 00000000..0161fefc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_kvm.py
@@ -0,0 +1,1449 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Abdoul Bah (@helldorado) <bahabdoul at gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: proxmox_kvm
+short_description: Management of Qemu(KVM) Virtual Machines in Proxmox VE cluster.
+description:
+ - Allows you to create/delete/stop Qemu(KVM) Virtual Machines in Proxmox VE cluster.
+ - From community.general 4.0.0 on, there will be no default values, see I(proxmox_default_behavior).
+author: "Abdoul Bah (@helldorado) <bahabdoul at gmail.com>"
+options:
+ acpi:
+ description:
+ - Specify if ACPI should be enabled/disabled.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(yes). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: bool
+ agent:
+ description:
+ - Specify if the QEMU Guest Agent should be enabled/disabled.
+ type: bool
+ args:
+ description:
+ - Pass arbitrary arguments to kvm.
+ - This option is for experts only!
+ type: str
+ api_host:
+ description:
+ - Specify the target host of the Proxmox VE cluster.
+ type: str
+ required: true
+ api_user:
+ description:
+ - Specify the user to authenticate with.
+ type: str
+ required: true
+ api_password:
+ description:
+ - Specify the password to authenticate with.
+ - You can use C(PROXMOX_PASSWORD) environment variable.
+ type: str
+ api_token_id:
+ description:
+ - Specify the token ID.
+ type: str
+ version_added: 1.3.0
+ api_token_secret:
+ description:
+ - Specify the token secret.
+ type: str
+ version_added: 1.3.0
+ autostart:
+ description:
+ - Specify if the VM should be automatically restarted after crash (currently ignored in PVE API).
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: bool
+ balloon:
+ description:
+ - Specify the amount of RAM for the VM in MB.
+ - Using zero disables the balloon driver.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(0). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: int
+ bios:
+ description:
+ - Specify the BIOS implementation.
+ type: str
+ choices: ['seabios', 'ovmf']
+ boot:
+ description:
+ - Specify the boot order -> boot on floppy C(a), hard disk C(c), CD-ROM C(d), or network C(n).
+ - You can combine to set order.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(cnd). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: str
+ bootdisk:
+ description:
+ - Enable booting from specified disk. C((ide|sata|scsi|virtio)\d+)
+ type: str
+ cicustom:
+ description:
+ - 'cloud-init: Specify custom files to replace the automatically generated ones at start.'
+ type: str
+ version_added: 1.3.0
+ cipassword:
+ description:
+ - 'cloud-init: password of default user to create.'
+ type: str
+ version_added: 1.3.0
+ citype:
+ description:
+ - 'cloud-init: Specifies the cloud-init configuration format.'
+ - The default depends on the configured operating system type (C(ostype)).
+ - We use the C(nocloud) format for Linux, and C(configdrive2) for Windows.
+ type: str
+ choices: ['nocloud', 'configdrive2']
+ version_added: 1.3.0
+ ciuser:
+ description:
+ - 'cloud-init: username of default user to create.'
+ type: str
+ version_added: 1.3.0
+ clone:
+ description:
+ - Name of VM to be cloned. If C(vmid) is setted, C(clone) can take arbitrary value but required for initiating the clone.
+ type: str
+ cores:
+ description:
+ - Specify number of cores per socket.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(1). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: int
+ cpu:
+ description:
+ - Specify emulated CPU type.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(kvm64). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: str
+ cpulimit:
+ description:
+ - Specify if CPU usage will be limited. Value 0 indicates no CPU limit.
+ - If the computer has 2 CPUs, it has total of '2' CPU time
+ type: int
+ cpuunits:
+ description:
+ - Specify CPU weight for a VM.
+ - You can disable fair-scheduler configuration by setting this to 0
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(1000). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: int
+ delete:
+ description:
+ - Specify a list of settings you want to delete.
+ type: str
+ description:
+ description:
+ - Specify the description for the VM. Only used on the configuration web interface.
+ - This is saved as comment inside the configuration file.
+ type: str
+ digest:
+ description:
+ - Specify if to prevent changes if current configuration file has different SHA1 digest.
+ - This can be used to prevent concurrent modifications.
+ type: str
+ force:
+ description:
+ - Allow to force stop VM.
+ - Can be used with states C(stopped) and C(restarted).
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: bool
+ format:
+ description:
+ - Target drive's backing file's data format.
+ - Used only with clone
+ - Use I(format=unspecified) and I(full=false) for a linked clone.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(qcow2). If I(proxmox_default_behavior) is set to C(no_defaults),
+ not specifying this option is equivalent to setting it to C(unspecified).
+ Note that the default value of I(proxmox_default_behavior) changes in community.general 4.0.0.
+ type: str
+ choices: [ "cloop", "cow", "qcow", "qcow2", "qed", "raw", "vmdk", "unspecified" ]
+ freeze:
+ description:
+ - Specify if PVE should freeze CPU at startup (use 'c' monitor command to start execution).
+ type: bool
+ full:
+ description:
+ - Create a full copy of all disk. This is always done when you clone a normal VM.
+ - For VM templates, we try to create a linked clone by default.
+ - Used only with clone
+ type: bool
+ default: 'yes'
+ hostpci:
+ description:
+ - Specify a hash/dictionary of map host pci devices into guest. C(hostpci='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(hostpci[n]) where 0 ≤ n ≤ N.
+ - Values allowed are - C("host="HOSTPCIID[;HOSTPCIID2...]",pcie="1|0",rombar="1|0",x-vga="1|0"").
+ - The C(host) parameter is Host PCI device pass through. HOSTPCIID syntax is C(bus:dev.func) (hexadecimal numbers).
+ - C(pcie=boolean) I(default=0) Choose the PCI-express bus (needs the q35 machine model).
+ - C(rombar=boolean) I(default=1) Specify whether or not the device's ROM will be visible in the guest's memory map.
+ - C(x-vga=boolean) I(default=0) Enable vfio-vga device support.
+ - /!\ This option allows direct access to host hardware. So it is no longer possible to migrate such machines - use with special care.
+ type: dict
+ hotplug:
+ description:
+ - Selectively enable hotplug features.
+ - This is a comma separated list of hotplug features C('network', 'disk', 'cpu', 'memory' and 'usb').
+ - Value 0 disables hotplug completely and value 1 is an alias for the default C('network,disk,usb').
+ type: str
+ hugepages:
+ description:
+ - Enable/disable hugepages memory.
+ type: str
+ choices: ['any', '2', '1024']
+ ide:
+ description:
+ - A hash/dictionary of volume used as IDE hard disk or CD-ROM. C(ide='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(ide[n]) where 0 ≤ n ≤ 3.
+ - Values allowed are - C("storage:size,format=value").
+ - C(storage) is the storage identifier where to create the disk.
+ - C(size) is the size of the disk in GB.
+ - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol).
+ type: dict
+ ipconfig:
+ description:
+ - 'cloud-init: Set the IP configuration.'
+ - A hash/dictionary of network ip configurations. C(ipconfig='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(ipconfig[n]) where 0 ≤ n ≤ network interfaces.
+ - Values allowed are - C("[gw=<GatewayIPv4>] [,gw6=<GatewayIPv6>] [,ip=<IPv4Format/CIDR>] [,ip6=<IPv6Format/CIDR>]").
+ - 'cloud-init: Specify IP addresses and gateways for the corresponding interface.'
+ - IP addresses use CIDR notation, gateways are optional but they should be in the same subnet of specified IP address.
+ - The special string 'dhcp' can be used for IP addresses to use DHCP, in which case no explicit gateway should be provided.
+ - For IPv6 the special string 'auto' can be used to use stateless autoconfiguration.
+ - If cloud-init is enabled and neither an IPv4 nor an IPv6 address is specified, it defaults to using dhcp on IPv4.
+ type: dict
+ version_added: 1.3.0
+ keyboard:
+ description:
+ - Sets the keyboard layout for VNC server.
+ type: str
+ kvm:
+ description:
+ - Enable/disable KVM hardware virtualization.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(yes). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: bool
+ localtime:
+ description:
+ - Sets the real time clock to local time.
+ - This is enabled by default if ostype indicates a Microsoft OS.
+ type: bool
+ lock:
+ description:
+ - Lock/unlock the VM.
+ type: str
+ choices: ['migrate', 'backup', 'snapshot', 'rollback']
+ machine:
+ description:
+ - Specifies the Qemu machine type.
+ - type => C((pc|pc(-i440fx)?-\d+\.\d+(\.pxe)?|q35|pc-q35-\d+\.\d+(\.pxe)?))
+ type: str
+ memory:
+ description:
+ - Memory size in MB for instance.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(512). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: int
+ migrate_downtime:
+ description:
+ - Sets maximum tolerated downtime (in seconds) for migrations.
+ type: int
+ migrate_speed:
+ description:
+ - Sets maximum speed (in MB/s) for migrations.
+ - A value of 0 is no limit.
+ type: int
+ name:
+ description:
+ - Specifies the VM name. Only used on the configuration web interface.
+ - Required only for C(state=present).
+ type: str
+ nameservers:
+ description:
+ - 'cloud-init: DNS server IP address(es).'
+ - If unset, PVE host settings are used.
+ type: list
+ elements: str
+ version_added: 1.3.0
+ net:
+ description:
+ - A hash/dictionary of network interfaces for the VM. C(net='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(net[n]) where 0 ≤ n ≤ N.
+ - Values allowed are - C("model="XX:XX:XX:XX:XX:XX",bridge="value",rate="value",tag="value",firewall="1|0",trunks="vlanid"").
+ - Model is one of C(e1000 e1000-82540em e1000-82544gc e1000-82545em i82551 i82557b i82559er ne2k_isa ne2k_pci pcnet rtl8139 virtio vmxnet3).
+ - C(XX:XX:XX:XX:XX:XX) should be an unique MAC address. This is automatically generated if not specified.
+ - The C(bridge) parameter can be used to automatically add the interface to a bridge device. The Proxmox VE standard bridge is called 'vmbr0'.
+ - Option C(rate) is used to limit traffic bandwidth from and to this interface. It is specified as floating point number, unit is 'Megabytes per second'.
+ - If you specify no bridge, we create a kvm 'user' (NATed) network device, which provides DHCP and DNS services.
+ type: dict
+ newid:
+ description:
+ - VMID for the clone. Used only with clone.
+ - If newid is not set, the next available VM ID will be fetched from ProxmoxAPI.
+ type: int
+ node:
+ description:
+ - Proxmox VE node, where the new VM will be created.
+ - Only required for C(state=present).
+ - For other states, it will be autodiscovered.
+ type: str
+ numa:
+ description:
+ - A hash/dictionaries of NUMA topology. C(numa='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(numa[n]) where 0 ≤ n ≤ N.
+ - Values allowed are - C("cpu="<id[-id];...>",hostnodes="<id[-id];...>",memory="number",policy="(bind|interleave|preferred)"").
+ - C(cpus) CPUs accessing this NUMA node.
+ - C(hostnodes) Host NUMA nodes to use.
+ - C(memory) Amount of memory this NUMA node provides.
+ - C(policy) NUMA allocation policy.
+ type: dict
+ numa_enabled:
+ description:
+ - Enables NUMA.
+ type: bool
+ onboot:
+ description:
+ - Specifies whether a VM will be started during system bootup.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(yes). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: bool
+ ostype:
+ description:
+ - Specifies guest operating system. This is used to enable special optimization/features for specific operating systems.
+ - The l26 is Linux 2.6/3.X Kernel.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(l26). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: str
+ choices: ['other', 'wxp', 'w2k', 'w2k3', 'w2k8', 'wvista', 'win7', 'win8', 'win10', 'l24', 'l26', 'solaris']
+ parallel:
+ description:
+ - A hash/dictionary of map host parallel devices. C(parallel='{"key":"value", "key":"value"}').
+ - Keys allowed are - (parallel[n]) where 0 ≤ n ≤ 2.
+ - Values allowed are - C("/dev/parport\d+|/dev/usb/lp\d+").
+ type: dict
+ pool:
+ description:
+ - Add the new VM to the specified pool.
+ type: str
+ protection:
+ description:
+ - Enable/disable the protection flag of the VM. This will enable/disable the remove VM and remove disk operations.
+ type: bool
+ reboot:
+ description:
+ - Allow reboot. If set to C(yes), the VM exit on reboot.
+ type: bool
+ revert:
+ description:
+ - Revert a pending change.
+ type: str
+ sata:
+ description:
+ - A hash/dictionary of volume used as sata hard disk or CD-ROM. C(sata='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(sata[n]) where 0 ≤ n ≤ 5.
+ - Values allowed are - C("storage:size,format=value").
+ - C(storage) is the storage identifier where to create the disk.
+ - C(size) is the size of the disk in GB.
+ - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol).
+ type: dict
+ scsi:
+ description:
+ - A hash/dictionary of volume used as SCSI hard disk or CD-ROM. C(scsi='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(sata[n]) where 0 ≤ n ≤ 13.
+ - Values allowed are - C("storage:size,format=value").
+ - C(storage) is the storage identifier where to create the disk.
+ - C(size) is the size of the disk in GB.
+ - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol).
+ type: dict
+ scsihw:
+ description:
+ - Specifies the SCSI controller model.
+ type: str
+ choices: ['lsi', 'lsi53c810', 'virtio-scsi-pci', 'virtio-scsi-single', 'megasas', 'pvscsi']
+ searchdomains:
+ description:
+ - 'cloud-init: Sets DNS search domain(s).'
+ - If unset, PVE host settings are used.
+ type: list
+ elements: str
+ version_added: 1.3.0
+ serial:
+ description:
+ - A hash/dictionary of serial device to create inside the VM. C('{"key":"value", "key":"value"}').
+ - Keys allowed are - serial[n](str; required) where 0 ≤ n ≤ 3.
+ - Values allowed are - C((/dev/.+|socket)).
+ - /!\ If you pass through a host serial device, it is no longer possible to migrate such machines - use with special care.
+ type: dict
+ shares:
+ description:
+ - Rets amount of memory shares for auto-ballooning. (0 - 50000).
+ - The larger the number is, the more memory this VM gets.
+ - The number is relative to weights of all other running VMs.
+ - Using 0 disables auto-ballooning, this means no limit.
+ type: int
+ skiplock:
+ description:
+ - Ignore locks
+ - Only root is allowed to use this option.
+ type: bool
+ smbios:
+ description:
+ - Specifies SMBIOS type 1 fields.
+ type: str
+ snapname:
+ description:
+ - The name of the snapshot. Used only with clone.
+ type: str
+ sockets:
+ description:
+ - Sets the number of CPU sockets. (1 - N).
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(1). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: int
+ sshkeys:
+ description:
+ - 'cloud-init: SSH key to assign to the default user. NOT TESTED with multiple keys but a multi-line value should work.'
+ type: str
+ version_added: 1.3.0
+ startdate:
+ description:
+ - Sets the initial date of the real time clock.
+ - Valid format for date are C('now') or C('2016-09-25T16:01:21') or C('2016-09-25').
+ type: str
+ startup:
+ description:
+ - Startup and shutdown behavior. C([[order=]\d+] [,up=\d+] [,down=\d+]).
+ - Order is a non-negative number defining the general startup order.
+ - Shutdown in done with reverse ordering.
+ type: str
+ state:
+ description:
+ - Indicates desired state of the instance.
+ - If C(current), the current state of the VM will be fetched. You can access it with C(results.status)
+ type: str
+ choices: ['present', 'started', 'absent', 'stopped', 'restarted','current']
+ default: present
+ storage:
+ description:
+ - Target storage for full clone.
+ type: str
+ tablet:
+ description:
+ - Enables/disables the USB tablet device.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: bool
+ target:
+ description:
+ - Target node. Only allowed if the original VM is on shared storage.
+ - Used only with clone
+ type: str
+ tdf:
+ description:
+ - Enables/disables time drift fix.
+ type: bool
+ template:
+ description:
+ - Enables/disables the template.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: bool
+ timeout:
+ description:
+ - Timeout for operations.
+ type: int
+ default: 30
+ update:
+ description:
+ - If C(yes), the VM will be updated with new value.
+ - Cause of the operations of the API and security reasons, I have disabled the update of the following parameters
+ - C(net, virtio, ide, sata, scsi). Per example updating C(net) update the MAC address and C(virtio) create always new disk...
+ - Update of C(pool) is disabled. It needs an additional API endpoint not covered by this module.
+ type: bool
+ default: 'no'
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'no'
+ vcpus:
+ description:
+ - Sets number of hotplugged vcpus.
+ type: int
+ vga:
+ description:
+ - Select VGA type. If you want to use high resolution modes (>= 1280x1024x16) then you should use option 'std' or 'vmware'.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(std). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: str
+ choices: ['std', 'cirrus', 'vmware', 'qxl', 'serial0', 'serial1', 'serial2', 'serial3', 'qxl2', 'qxl3', 'qxl4']
+ virtio:
+ description:
+ - A hash/dictionary of volume used as VIRTIO hard disk. C(virtio='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(virto[n]) where 0 ≤ n ≤ 15.
+ - Values allowed are - C("storage:size,format=value").
+ - C(storage) is the storage identifier where to create the disk.
+ - C(size) is the size of the disk in GB.
+ - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol).
+ type: dict
+ vmid:
+ description:
+ - Specifies the VM ID. Instead use I(name) parameter.
+ - If vmid is not set, the next available VM ID will be fetched from ProxmoxAPI.
+ type: int
+ watchdog:
+ description:
+ - Creates a virtual hardware watchdog device.
+ type: str
+ proxmox_default_behavior:
+ description:
+ - Various module options used to have default values. This cause problems when
+ user expects different behavior from proxmox by default or fill options which cause
+ problems when they have been set.
+ - The default value is C(compatibility), which will ensure that the default values
+ are used when the values are not explicitly specified by the user.
+ - From community.general 4.0.0 on, the default value will switch to C(no_defaults). To avoid
+ deprecation warnings, please set I(proxmox_default_behavior) to an explicit
+ value.
+ - This affects the I(acpi), I(autostart), I(balloon), I(boot), I(cores), I(cpu),
+ I(cpuunits), I(force), I(format), I(kvm), I(memory), I(onboot), I(ostype), I(sockets),
+ I(tablet), I(template), I(vga), options.
+ type: str
+ choices:
+ - compatibility
+ - no_defaults
+ version_added: "1.3.0"
+
+requirements: [ "proxmoxer", "requests" ]
+'''
+
+EXAMPLES = '''
+- name: Create new VM with minimal options
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+
+- name: Create new VM with minimal options and given vmid
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ vmid: 100
+
+- name: Create new VM with two network interface options
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ net:
+ net0: 'virtio,bridge=vmbr1,rate=200'
+ net1: 'e1000,bridge=vmbr2'
+
+- name: Create new VM with one network interface, three virto hard disk, 4 cores, and 2 vcpus
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ net:
+ net0: 'virtio,bridge=vmbr1,rate=200'
+ virtio:
+ virtio0: 'VMs_LVM:10'
+ virtio1: 'VMs:2,format=qcow2'
+ virtio2: 'VMs:5,format=raw'
+ cores: 4
+ vcpus: 2
+
+- name: >
+ Clone VM with only source VM name.
+ The VM source is spynal.
+ The target VM name is zavala
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ clone: spynal
+ name: zavala
+ node: sabrewulf
+ storage: VMs
+ format: qcow2
+ timeout: 500
+
+- name: >
+ Create linked clone VM with only source VM name.
+ The VM source is spynal.
+ The target VM name is zavala
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ clone: spynal
+ name: zavala
+ node: sabrewulf
+ storage: VMs
+ full: no
+ format: unspecified
+ timeout: 500
+
+- name: Clone VM with source vmid and target newid and raw format
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ clone: arbitrary_name
+ vmid: 108
+ newid: 152
+ name: zavala
+ node: sabrewulf
+ storage: LVM_STO
+ format: raw
+ timeout: 300
+
+- name: Create new VM and lock it for snapshot
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ lock: snapshot
+
+- name: Create new VM and set protection to disable the remove VM and remove disk operations
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ protection: yes
+
+- name: Create new VM using cloud-init with a username and password
+ community.general.proxmox_kvm:
+ node: sabrewulf
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ ide:
+ ide2: 'local:cloudinit,format=qcow2'
+ ciuser: mylinuxuser
+ cipassword: supersecret
+ searchdomains: 'mydomain.internal'
+ nameservers: 1.1.1.1
+ net:
+ net0: 'virtio,bridge=vmbr1,tag=77'
+ ipconfig:
+ ipconfig0: 'ip=192.168.1.1/24,gw=192.168.1.1'
+
+- name: Create new VM using Cloud-Init with an ssh key
+ community.general.proxmox_kvm:
+ node: sabrewulf
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ ide:
+ ide2: 'local:cloudinit,format=qcow2'
+ sshkeys: 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILJkVm98B71lD5XHfihwcYHE9TVpsJmK1vR1JcaU82L+'
+ searchdomains: 'mydomain.internal'
+ nameservers:
+ - '1.1.1.1'
+ - '8.8.8.8'
+ net:
+ net0: 'virtio,bridge=vmbr1,tag=77'
+ ipconfig:
+ ipconfig0: 'ip=192.168.1.1/24'
+
+- name: Start VM
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ state: started
+
+- name: Stop VM
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ state: stopped
+
+- name: Stop VM with force
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ state: stopped
+ force: yes
+
+- name: Restart VM
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ state: restarted
+
+- name: Remove VM
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ state: absent
+
+- name: Get VM current state
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ state: current
+
+- name: Update VM configuration
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ cores: 8
+ memory: 16384
+ update: yes
+
+- name: Delete QEMU parameters
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ delete: 'args,template,cpulimit'
+
+- name: Revert a pending change
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ revert: 'template,cpulimit'
+'''
+
+RETURN = '''
+devices:
+ description: The list of devices created or used.
+ returned: success
+ type: dict
+ sample: '
+ {
+ "ide0": "VMS_LVM:vm-115-disk-1",
+ "ide1": "VMs:115/vm-115-disk-3.raw",
+ "virtio0": "VMS_LVM:vm-115-disk-2",
+ "virtio1": "VMs:115/vm-115-disk-1.qcow2",
+ "virtio2": "VMs:115/vm-115-disk-2.raw"
+ }'
+mac:
+ description: List of mac address created and net[n] attached. Useful when you want to use provision systems like Foreman via PXE.
+ returned: success
+ type: dict
+ sample: '
+ {
+ "net0": "3E:6E:97:D2:31:9F",
+ "net1": "B6:A1:FC:EF:78:A4"
+ }'
+vmid:
+ description: The VM vmid.
+ returned: success
+ type: int
+ sample: 115
+status:
+ description:
+ - The current virtual machine status.
+ - Returned only when C(state=current)
+ returned: success
+ type: dict
+ sample: '{
+ "changed": false,
+ "msg": "VM kropta with vmid = 110 is running",
+ "status": "running"
+ }'
+'''
+
+import os
+import re
+import time
+import traceback
+from distutils.version import LooseVersion
+from ansible.module_utils.six.moves.urllib.parse import quote
+
+try:
+ from proxmoxer import ProxmoxAPI
+ HAS_PROXMOXER = True
+except ImportError:
+ HAS_PROXMOXER = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def get_nextvmid(module, proxmox):
+ try:
+ vmid = proxmox.cluster.nextid.get()
+ return vmid
+ except Exception as e:
+ module.fail_json(msg="Unable to get next vmid. Failed with exception: %s" % to_native(e),
+ exception=traceback.format_exc())
+
+
+def get_vmid(proxmox, name):
+ return [vm['vmid'] for vm in proxmox.cluster.resources.get(type='vm') if vm.get('name') == name]
+
+
+def get_vm(proxmox, vmid):
+ return [vm for vm in proxmox.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid)]
+
+
+def node_check(proxmox, node):
+ return [True for nd in proxmox.nodes.get() if nd['node'] == node]
+
+
+def get_vminfo(module, proxmox, node, vmid, **kwargs):
+ global results
+ results = {}
+ mac = {}
+ devices = {}
+ try:
+ vm = proxmox.nodes(node).qemu(vmid).config.get()
+ except Exception as e:
+ module.fail_json(msg='Getting information for VM with vmid = %s failed with exception: %s' % (vmid, e))
+
+ # Sanitize kwargs. Remove not defined args and ensure True and False converted to int.
+ kwargs = dict((k, v) for k, v in kwargs.items() if v is not None)
+
+ # Convert all dict in kwargs to elements. For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n]
+ for k in list(kwargs.keys()):
+ if isinstance(kwargs[k], dict):
+ kwargs.update(kwargs[k])
+ del kwargs[k]
+
+ # Split information by type
+ for k, v in kwargs.items():
+ if re.match(r'net[0-9]', k) is not None:
+ interface = k
+ k = vm[k]
+ k = re.search('=(.*?),', k).group(1)
+ mac[interface] = k
+ if (re.match(r'virtio[0-9]', k) is not None or
+ re.match(r'ide[0-9]', k) is not None or
+ re.match(r'scsi[0-9]', k) is not None or
+ re.match(r'sata[0-9]', k) is not None):
+ device = k
+ k = vm[k]
+ k = re.search('(.*?),', k).group(1)
+ devices[device] = k
+
+ results['mac'] = mac
+ results['devices'] = devices
+ results['vmid'] = int(vmid)
+
+
+def settings(module, proxmox, vmid, node, name, **kwargs):
+ proxmox_node = proxmox.nodes(node)
+
+ # Sanitize kwargs. Remove not defined args and ensure True and False converted to int.
+ kwargs = dict((k, v) for k, v in kwargs.items() if v is not None)
+
+ if proxmox_node.qemu(vmid).config.set(**kwargs) is None:
+ return True
+ else:
+ return False
+
+
+def wait_for_task(module, proxmox, node, taskid):
+ timeout = module.params['timeout']
+
+ while timeout:
+ task = proxmox.nodes(node).tasks(taskid).status.get()
+ if task['status'] == 'stopped' and task['exitstatus'] == 'OK':
+ # Wait an extra second as the API can be a ahead of the hypervisor
+ time.sleep(1)
+ return True
+ timeout = timeout - 1
+ if timeout == 0:
+ break
+ time.sleep(1)
+ return False
+
+
+def create_vm(module, proxmox, vmid, newid, node, name, memory, cpu, cores, sockets, update, **kwargs):
+ # Available only in PVE 4
+ only_v4 = ['force', 'protection', 'skiplock']
+ only_v6 = ['ciuser', 'cipassword', 'sshkeys', 'ipconfig']
+
+ # valide clone parameters
+ valid_clone_params = ['format', 'full', 'pool', 'snapname', 'storage', 'target']
+ clone_params = {}
+ # Default args for vm. Note: -args option is for experts only. It allows you to pass arbitrary arguments to kvm.
+ vm_args = "-serial unix:/var/run/qemu-server/{0}.serial,server,nowait".format(vmid)
+
+ proxmox_node = proxmox.nodes(node)
+
+ # Sanitize kwargs. Remove not defined args and ensure True and False converted to int.
+ kwargs = dict((k, v) for k, v in kwargs.items() if v is not None)
+ kwargs.update(dict([k, int(v)] for k, v in kwargs.items() if isinstance(v, bool)))
+
+ # The features work only on PVE 4+
+ if PVE_MAJOR_VERSION < 4:
+ for p in only_v4:
+ if p in kwargs:
+ del kwargs[p]
+
+ # The features work only on PVE 6
+ if PVE_MAJOR_VERSION < 6:
+ for p in only_v6:
+ if p in kwargs:
+ del kwargs[p]
+
+ # 'sshkeys' param expects an urlencoded string
+ if 'sshkeys' in kwargs:
+ urlencoded_ssh_keys = quote(kwargs['sshkeys'], safe='')
+ kwargs['sshkeys'] = str(urlencoded_ssh_keys)
+
+ # If update, don't update disk (virtio, ide, sata, scsi) and network interface
+ # pool parameter not supported by qemu/<vmid>/config endpoint on "update" (PVE 6.2) - only with "create"
+ if update:
+ if 'virtio' in kwargs:
+ del kwargs['virtio']
+ if 'sata' in kwargs:
+ del kwargs['sata']
+ if 'scsi' in kwargs:
+ del kwargs['scsi']
+ if 'ide' in kwargs:
+ del kwargs['ide']
+ if 'net' in kwargs:
+ del kwargs['net']
+ if 'force' in kwargs:
+ del kwargs['force']
+ if 'pool' in kwargs:
+ del kwargs['pool']
+
+ # Convert all dict in kwargs to elements. For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n], ipconfig[n]
+ for k in list(kwargs.keys()):
+ if isinstance(kwargs[k], dict):
+ kwargs.update(kwargs[k])
+ del kwargs[k]
+
+ # Rename numa_enabled to numa. According the API documentation
+ if 'numa_enabled' in kwargs:
+ kwargs['numa'] = kwargs['numa_enabled']
+ del kwargs['numa_enabled']
+
+ # PVE api expects strings for the following params
+ if 'nameservers' in module.params:
+ nameservers = module.params.pop('nameservers')
+ if nameservers:
+ kwargs['nameserver'] = ' '.join(nameservers)
+ if 'searchdomains' in module.params:
+ searchdomains = module.params.pop('searchdomains')
+ if searchdomains:
+ kwargs['searchdomain'] = ' '.join(searchdomains)
+
+ # -args and skiplock require root@pam user
+ if module.params['api_user'] == "root@pam" and module.params['args'] is None:
+ if not update:
+ kwargs['args'] = vm_args
+ elif module.params['api_user'] == "root@pam" and module.params['args'] is not None:
+ kwargs['args'] = module.params['args']
+ elif module.params['api_user'] != "root@pam" and module.params['args'] is not None:
+ module.fail_json(msg='args parameter require root@pam user. ')
+
+ if module.params['api_user'] != "root@pam" and module.params['skiplock'] is not None:
+ module.fail_json(msg='skiplock parameter require root@pam user. ')
+
+ if update:
+ if proxmox_node.qemu(vmid).config.set(name=name, memory=memory, cpu=cpu, cores=cores, sockets=sockets, **kwargs) is None:
+ return True
+ else:
+ return False
+ elif module.params['clone'] is not None:
+ for param in valid_clone_params:
+ if module.params[param] is not None:
+ clone_params[param] = module.params[param]
+ clone_params.update(dict([k, int(v)] for k, v in clone_params.items() if isinstance(v, bool)))
+ taskid = proxmox_node.qemu(vmid).clone.post(newid=newid, name=name, **clone_params)
+ else:
+ taskid = proxmox_node.qemu.create(vmid=vmid, name=name, memory=memory, cpu=cpu, cores=cores, sockets=sockets, **kwargs)
+
+ if not wait_for_task(module, proxmox, node, taskid):
+ module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s' %
+ proxmox_node.tasks(taskid).log.get()[:1])
+ return False
+ return True
+
+
+def start_vm(module, proxmox, vm):
+ vmid = vm[0]['vmid']
+ proxmox_node = proxmox.nodes(vm[0]['node'])
+ taskid = proxmox_node.qemu(vmid).status.start.post()
+ if not wait_for_task(module, proxmox, vm[0]['node'], taskid):
+ module.fail_json(msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s' %
+ proxmox_node.tasks(taskid).log.get()[:1])
+ return False
+ return True
+
+
+def stop_vm(module, proxmox, vm, force):
+ vmid = vm[0]['vmid']
+ proxmox_node = proxmox.nodes(vm[0]['node'])
+ taskid = proxmox_node.qemu(vmid).status.shutdown.post(forceStop=(1 if force else 0))
+ if not wait_for_task(module, proxmox, vm[0]['node'], taskid):
+ module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s' %
+ proxmox_node.tasks(taskid).log.get()[:1])
+ return False
+ return True
+
+
+def proxmox_version(proxmox):
+ apireturn = proxmox.version.get()
+ return LooseVersion(apireturn['version'])
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ acpi=dict(type='bool'),
+ agent=dict(type='bool'),
+ args=dict(type='str'),
+ api_host=dict(required=True),
+ api_password=dict(no_log=True),
+ api_token_id=dict(no_log=True),
+ api_token_secret=dict(no_log=True),
+ api_user=dict(required=True),
+ autostart=dict(type='bool'),
+ balloon=dict(type='int'),
+ bios=dict(choices=['seabios', 'ovmf']),
+ boot=dict(type='str'),
+ bootdisk=dict(type='str'),
+ cicustom=dict(type='str'),
+ cipassword=dict(type='str', no_log=True),
+ citype=dict(type='str', choices=['nocloud', 'configdrive2']),
+ ciuser=dict(type='str'),
+ clone=dict(type='str', default=None),
+ cores=dict(type='int'),
+ cpu=dict(type='str'),
+ cpulimit=dict(type='int'),
+ cpuunits=dict(type='int'),
+ delete=dict(type='str', default=None),
+ description=dict(type='str'),
+ digest=dict(type='str'),
+ force=dict(type='bool'),
+ format=dict(type='str', choices=['cloop', 'cow', 'qcow', 'qcow2', 'qed', 'raw', 'vmdk', 'unspecified']),
+ freeze=dict(type='bool'),
+ full=dict(type='bool', default=True),
+ hostpci=dict(type='dict'),
+ hotplug=dict(type='str'),
+ hugepages=dict(choices=['any', '2', '1024']),
+ ide=dict(type='dict'),
+ ipconfig=dict(type='dict'),
+ keyboard=dict(type='str'),
+ kvm=dict(type='bool'),
+ localtime=dict(type='bool'),
+ lock=dict(choices=['migrate', 'backup', 'snapshot', 'rollback']),
+ machine=dict(type='str'),
+ memory=dict(type='int'),
+ migrate_downtime=dict(type='int'),
+ migrate_speed=dict(type='int'),
+ name=dict(type='str'),
+ nameservers=dict(type='list', elements='str'),
+ net=dict(type='dict'),
+ newid=dict(type='int', default=None),
+ node=dict(),
+ numa=dict(type='dict'),
+ numa_enabled=dict(type='bool'),
+ onboot=dict(type='bool'),
+ ostype=dict(choices=['other', 'wxp', 'w2k', 'w2k3', 'w2k8', 'wvista', 'win7', 'win8', 'win10', 'l24', 'l26', 'solaris']),
+ parallel=dict(type='dict'),
+ pool=dict(type='str'),
+ protection=dict(type='bool'),
+ reboot=dict(type='bool'),
+ revert=dict(type='str'),
+ sata=dict(type='dict'),
+ scsi=dict(type='dict'),
+ scsihw=dict(choices=['lsi', 'lsi53c810', 'virtio-scsi-pci', 'virtio-scsi-single', 'megasas', 'pvscsi']),
+ serial=dict(type='dict'),
+ searchdomains=dict(type='list', elements='str'),
+ shares=dict(type='int'),
+ skiplock=dict(type='bool'),
+ smbios=dict(type='str'),
+ snapname=dict(type='str'),
+ sockets=dict(type='int'),
+ sshkeys=dict(type='str'),
+ startdate=dict(type='str'),
+ startup=dict(),
+ state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted', 'current']),
+ storage=dict(type='str'),
+ tablet=dict(type='bool'),
+ target=dict(type='str'),
+ tdf=dict(type='bool'),
+ template=dict(type='bool'),
+ timeout=dict(type='int', default=30),
+ update=dict(type='bool', default=False),
+ validate_certs=dict(type='bool', default=False),
+ vcpus=dict(type='int'),
+ vga=dict(choices=['std', 'cirrus', 'vmware', 'qxl', 'serial0', 'serial1', 'serial2', 'serial3', 'qxl2', 'qxl3', 'qxl4']),
+ virtio=dict(type='dict'),
+ vmid=dict(type='int', default=None),
+ watchdog=dict(),
+ proxmox_default_behavior=dict(type='str', choices=['compatibility', 'no_defaults']),
+ ),
+ mutually_exclusive=[('delete', 'revert'), ('delete', 'update'), ('revert', 'update'), ('clone', 'update'), ('clone', 'delete'), ('clone', 'revert')],
+ required_one_of=[('name', 'vmid',)],
+ required_if=[('state', 'present', ['node'])]
+ )
+
+ if not HAS_PROXMOXER:
+ module.fail_json(msg='proxmoxer required for this module')
+
+ api_host = module.params['api_host']
+ api_password = module.params['api_password']
+ api_token_id = module.params['api_token_id']
+ api_token_secret = module.params['api_token_secret']
+ api_user = module.params['api_user']
+ clone = module.params['clone']
+ cpu = module.params['cpu']
+ cores = module.params['cores']
+ delete = module.params['delete']
+ memory = module.params['memory']
+ name = module.params['name']
+ newid = module.params['newid']
+ node = module.params['node']
+ revert = module.params['revert']
+ sockets = module.params['sockets']
+ state = module.params['state']
+ update = bool(module.params['update'])
+ vmid = module.params['vmid']
+ validate_certs = module.params['validate_certs']
+
+ if module.params['proxmox_default_behavior'] is None:
+ module.params['proxmox_default_behavior'] = 'compatibility'
+ module.deprecate(
+ 'The proxmox_default_behavior option will change its default value from "compatibility" to '
+ '"no_defaults" in community.general 4.0.0. To remove this warning, please specify an explicit value for it now',
+ version='4.0.0', collection_name='community.general'
+ )
+ if module.params['proxmox_default_behavior'] == 'compatibility':
+ old_default_values = dict(
+ acpi=True,
+ autostart=False,
+ balloon=0,
+ boot='cnd',
+ cores=1,
+ cpu='kvm64',
+ cpuunits=1000,
+ force=False,
+ format='qcow2',
+ kvm=True,
+ memory=512,
+ ostype='l26',
+ sockets=1,
+ tablet=False,
+ template=False,
+ vga='std',
+ )
+ for param, value in old_default_values.items():
+ if module.params[param] is None:
+ module.params[param] = value
+
+ if module.params['format'] == 'unspecified':
+ module.params['format'] = None
+
+ auth_args = {'user': api_user}
+ if not (api_token_id and api_token_secret):
+ # If password not set get it from PROXMOX_PASSWORD env
+ if not api_password:
+ try:
+ api_password = os.environ['PROXMOX_PASSWORD']
+ except KeyError:
+ module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable')
+ auth_args['password'] = api_password
+ else:
+ auth_args['token_name'] = api_token_id
+ auth_args['token_value'] = api_token_secret
+
+ try:
+ proxmox = ProxmoxAPI(api_host, verify_ssl=validate_certs, **auth_args)
+ global PVE_MAJOR_VERSION
+ version = proxmox_version(proxmox)
+ PVE_MAJOR_VERSION = 3 if version < LooseVersion('4.0') else version.version[0]
+ except Exception as e:
+ module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e)
+
+ # If vmid is not defined then retrieve its value from the vm name,
+ # the cloned vm name or retrieve the next free VM id from ProxmoxAPI.
+ if not vmid:
+ if state == 'present' and not update and not clone and not delete and not revert:
+ try:
+ vmid = get_nextvmid(module, proxmox)
+ except Exception:
+ module.fail_json(msg="Can't get the next vmid for VM {0} automatically. Ensure your cluster state is good".format(name))
+ else:
+ clone_target = clone or name
+ try:
+ vmid = get_vmid(proxmox, clone_target)[0]
+ except Exception:
+ vmid = -1
+
+ if clone is not None:
+ # If newid is not defined then retrieve the next free id from ProxmoxAPI
+ if not newid:
+ try:
+ newid = get_nextvmid(module, proxmox)
+ except Exception:
+ module.fail_json(msg="Can't get the next vmid for VM {0} automatically. Ensure your cluster state is good".format(name))
+
+ # Ensure source VM name exists when cloning
+ if -1 == vmid:
+ module.fail_json(msg='VM with name = %s does not exist in cluster' % clone)
+
+ # Ensure source VM id exists when cloning
+ if not get_vm(proxmox, vmid):
+ module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid)
+
+ # Ensure the choosen VM name doesn't already exist when cloning
+ if get_vmid(proxmox, name):
+ module.exit_json(changed=False, msg="VM with name <%s> already exists" % name)
+
+ # Ensure the choosen VM id doesn't already exist when cloning
+ if get_vm(proxmox, newid):
+ module.exit_json(changed=False, msg="vmid %s with VM name %s already exists" % (newid, name))
+
+ if delete is not None:
+ try:
+ settings(module, proxmox, vmid, node, name, delete=delete)
+ module.exit_json(changed=True, msg="Settings has deleted on VM {0} with vmid {1}".format(name, vmid))
+ except Exception as e:
+ module.fail_json(msg='Unable to delete settings on VM {0} with vmid {1}: '.format(name, vmid) + str(e))
+
+ if revert is not None:
+ try:
+ settings(module, proxmox, vmid, node, name, revert=revert)
+ module.exit_json(changed=True, msg="Settings has reverted on VM {0} with vmid {1}".format(name, vmid))
+ except Exception as e:
+ module.fail_json(msg='Unable to revert settings on VM {0} with vmid {1}: Maybe is not a pending task... '.format(name, vmid) + str(e))
+
+ if state == 'present':
+ try:
+ if get_vm(proxmox, vmid) and not (update or clone):
+ module.exit_json(changed=False, msg="VM with vmid <%s> already exists" % vmid)
+ elif get_vmid(proxmox, name) and not (update or clone):
+ module.exit_json(changed=False, msg="VM with name <%s> already exists" % name)
+ elif not (node, name):
+ module.fail_json(msg='node, name is mandatory for creating/updating vm')
+ elif not node_check(proxmox, node):
+ module.fail_json(msg="node '%s' does not exist in cluster" % node)
+
+ create_vm(module, proxmox, vmid, newid, node, name, memory, cpu, cores, sockets, update,
+ acpi=module.params['acpi'],
+ agent=module.params['agent'],
+ autostart=module.params['autostart'],
+ balloon=module.params['balloon'],
+ bios=module.params['bios'],
+ boot=module.params['boot'],
+ bootdisk=module.params['bootdisk'],
+ cicustom=module.params['cicustom'],
+ cipassword=module.params['cipassword'],
+ citype=module.params['citype'],
+ ciuser=module.params['ciuser'],
+ cpulimit=module.params['cpulimit'],
+ cpuunits=module.params['cpuunits'],
+ description=module.params['description'],
+ digest=module.params['digest'],
+ force=module.params['force'],
+ freeze=module.params['freeze'],
+ hostpci=module.params['hostpci'],
+ hotplug=module.params['hotplug'],
+ hugepages=module.params['hugepages'],
+ ide=module.params['ide'],
+ ipconfig=module.params['ipconfig'],
+ keyboard=module.params['keyboard'],
+ kvm=module.params['kvm'],
+ localtime=module.params['localtime'],
+ lock=module.params['lock'],
+ machine=module.params['machine'],
+ migrate_downtime=module.params['migrate_downtime'],
+ migrate_speed=module.params['migrate_speed'],
+ net=module.params['net'],
+ numa=module.params['numa'],
+ numa_enabled=module.params['numa_enabled'],
+ onboot=module.params['onboot'],
+ ostype=module.params['ostype'],
+ parallel=module.params['parallel'],
+ pool=module.params['pool'],
+ protection=module.params['protection'],
+ reboot=module.params['reboot'],
+ sata=module.params['sata'],
+ scsi=module.params['scsi'],
+ scsihw=module.params['scsihw'],
+ serial=module.params['serial'],
+ shares=module.params['shares'],
+ skiplock=module.params['skiplock'],
+ smbios1=module.params['smbios'],
+ snapname=module.params['snapname'],
+ sshkeys=module.params['sshkeys'],
+ startdate=module.params['startdate'],
+ startup=module.params['startup'],
+ tablet=module.params['tablet'],
+ target=module.params['target'],
+ tdf=module.params['tdf'],
+ template=module.params['template'],
+ vcpus=module.params['vcpus'],
+ vga=module.params['vga'],
+ virtio=module.params['virtio'],
+ watchdog=module.params['watchdog'])
+
+ if not clone:
+ get_vminfo(module, proxmox, node, vmid,
+ ide=module.params['ide'],
+ net=module.params['net'],
+ sata=module.params['sata'],
+ scsi=module.params['scsi'],
+ virtio=module.params['virtio'])
+ if update:
+ module.exit_json(changed=True, msg="VM %s with vmid %s updated" % (name, vmid))
+ elif clone is not None:
+ module.exit_json(changed=True, msg="VM %s with newid %s cloned from vm with vmid %s" % (name, newid, vmid))
+ else:
+ module.exit_json(changed=True, msg="VM %s with vmid %s deployed" % (name, vmid), **results)
+ except Exception as e:
+ if update:
+ module.fail_json(msg="Unable to update vm {0} with vmid {1}=".format(name, vmid) + str(e))
+ elif clone is not None:
+ module.fail_json(msg="Unable to clone vm {0} from vmid {1}=".format(name, vmid) + str(e))
+ else:
+ module.fail_json(msg="creation of qemu VM %s with vmid %s failed with exception=%s" % (name, vmid, e))
+
+ elif state == 'started':
+ try:
+ if -1 == vmid:
+ module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
+ vm = get_vm(proxmox, vmid)
+ if not vm:
+ module.fail_json(msg='VM with vmid <%s> does not exist in cluster' % vmid)
+ if vm[0]['status'] == 'running':
+ module.exit_json(changed=False, msg="VM %s is already running" % vmid)
+
+ if start_vm(module, proxmox, vm):
+ module.exit_json(changed=True, msg="VM %s started" % vmid)
+ except Exception as e:
+ module.fail_json(msg="starting of VM %s failed with exception: %s" % (vmid, e))
+
+ elif state == 'stopped':
+ try:
+ if -1 == vmid:
+ module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
+
+ vm = get_vm(proxmox, vmid)
+ if not vm:
+ module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid)
+
+ if vm[0]['status'] == 'stopped':
+ module.exit_json(changed=False, msg="VM %s is already stopped" % vmid)
+
+ if stop_vm(module, proxmox, vm, force=module.params['force']):
+ module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
+ except Exception as e:
+ module.fail_json(msg="stopping of VM %s failed with exception: %s" % (vmid, e))
+
+ elif state == 'restarted':
+ try:
+ if -1 == vmid:
+ module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
+
+ vm = get_vm(proxmox, vmid)
+ if not vm:
+ module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid)
+ if vm[0]['status'] == 'stopped':
+ module.exit_json(changed=False, msg="VM %s is not running" % vmid)
+
+ if stop_vm(module, proxmox, vm, force=module.params['force']) and start_vm(module, proxmox, vm):
+ module.exit_json(changed=True, msg="VM %s is restarted" % vmid)
+ except Exception as e:
+ module.fail_json(msg="restarting of VM %s failed with exception: %s" % (vmid, e))
+
+ elif state == 'absent':
+ try:
+ vm = get_vm(proxmox, vmid)
+ if not vm:
+ module.exit_json(changed=False)
+
+ proxmox_node = proxmox.nodes(vm[0]['node'])
+ if vm[0]['status'] == 'running':
+ module.exit_json(changed=False, msg="VM %s is running. Stop it before deletion." % vmid)
+ taskid = proxmox_node.qemu.delete(vmid)
+ if not wait_for_task(module, proxmox, vm[0]['node'], taskid):
+ module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s' %
+ proxmox_node.tasks(taskid).log.get()[:1])
+ else:
+ module.exit_json(changed=True, msg="VM %s removed" % vmid)
+ except Exception as e:
+ module.fail_json(msg="deletion of VM %s failed with exception: %s" % (vmid, e))
+
+ elif state == 'current':
+ status = {}
+ if -1 == vmid:
+ module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
+ vm = get_vm(proxmox, vmid)
+ if not vm:
+ module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid)
+ current = proxmox.nodes(vm[0]['node']).qemu(vmid).status.current.get()['status']
+ status['status'] = current
+ if status:
+ module.exit_json(changed=False, msg="VM %s with vmid = %s is %s" % (name, vmid, current), **status)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_template.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_template.py
new file mode 100644
index 00000000..541dc28e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_template.py
@@ -0,0 +1,306 @@
+#!/usr/bin/python
+#
+# Copyright: Ansible Project
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: proxmox_template
+short_description: management of OS templates in Proxmox VE cluster
+description:
+ - allows you to upload/delete templates in Proxmox VE cluster
+options:
+ api_host:
+ description:
+ - the host of the Proxmox VE cluster
+ type: str
+ required: true
+ api_user:
+ description:
+ - the user to authenticate with
+ type: str
+ required: true
+ api_password:
+ description:
+ - the password to authenticate with
+ - you can use PROXMOX_PASSWORD environment variable
+ type: str
+ api_token_id:
+ description:
+ - Specify the token ID.
+ type: str
+ version_added: 1.3.0
+ api_token_secret:
+ description:
+ - Specify the token secret.
+ type: str
+ version_added: 1.3.0
+ validate_certs:
+ description:
+ - enable / disable https certificate verification
+ default: 'no'
+ type: bool
+ node:
+ description:
+ - Proxmox VE node, when you will operate with template
+ type: str
+ src:
+ description:
+ - path to uploaded file
+ - required only for C(state=present)
+ type: path
+ template:
+ description:
+ - the template name
+ - Required for state C(absent) to delete a template.
+ - Required for state C(present) to download an appliance container template (pveam).
+ type: str
+ content_type:
+ description:
+ - content type
+ - required only for C(state=present)
+ type: str
+ default: 'vztmpl'
+ choices: ['vztmpl', 'iso']
+ storage:
+ description:
+ - target storage
+ type: str
+ default: 'local'
+ timeout:
+ description:
+ - timeout for operations
+ type: int
+ default: 30
+ force:
+ description:
+ - can be used only with C(state=present), exists template will be overwritten
+ type: bool
+ default: 'no'
+ state:
+ description:
+ - Indicate desired state of the template
+ type: str
+ choices: ['present', 'absent']
+ default: present
+notes:
+ - Requires proxmoxer and requests modules on host. This modules can be installed with pip.
+requirements: [ "proxmoxer", "requests" ]
+author: Sergei Antipov (@UnderGreen)
+'''
+
+EXAMPLES = '''
+- name: Upload new openvz template with minimal options
+ community.general.proxmox_template:
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ src: ~/ubuntu-14.04-x86_64.tar.gz
+
+- name: >
+ Upload new openvz template with minimal options use environment
+ PROXMOX_PASSWORD variable(you should export it before)
+ community.general.proxmox_template:
+ node: uk-mc02
+ api_user: root@pam
+ api_host: node1
+ src: ~/ubuntu-14.04-x86_64.tar.gz
+
+- name: Upload new openvz template with all options and force overwrite
+ community.general.proxmox_template:
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ storage: local
+ content_type: vztmpl
+ src: ~/ubuntu-14.04-x86_64.tar.gz
+ force: yes
+
+- name: Delete template with minimal options
+ community.general.proxmox_template:
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ template: ubuntu-14.04-x86_64.tar.gz
+ state: absent
+
+- name: Download proxmox appliance container template
+ community.general.proxmox_template:
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ storage: local
+ content_type: vztmpl
+ template: ubuntu-20.04-standard_20.04-1_amd64.tar.gz
+'''
+
+import os
+import time
+
+try:
+ from proxmoxer import ProxmoxAPI
+ HAS_PROXMOXER = True
+except ImportError:
+ HAS_PROXMOXER = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def get_template(proxmox, node, storage, content_type, template):
+ return [True for tmpl in proxmox.nodes(node).storage(storage).content.get()
+ if tmpl['volid'] == '%s:%s/%s' % (storage, content_type, template)]
+
+
+def task_status(module, proxmox, node, taskid, timeout):
+ """
+ Check the task status and wait until the task is completed or the timeout is reached.
+ """
+ while timeout:
+ task_status = proxmox.nodes(node).tasks(taskid).status.get()
+ if task_status['status'] == 'stopped' and task_status['exitstatus'] == 'OK':
+ return True
+ timeout = timeout - 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for uploading/downloading template. Last line in task before timeout: %s'
+ % proxmox.node(node).tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ return False
+
+
+def upload_template(module, proxmox, node, storage, content_type, realpath, timeout):
+ taskid = proxmox.nodes(node).storage(storage).upload.post(content=content_type, filename=open(realpath, 'rb'))
+ return task_status(module, proxmox, node, taskid, timeout)
+
+
+def download_template(module, proxmox, node, storage, template, timeout):
+ taskid = proxmox.nodes(node).aplinfo.post(storage=storage, template=template)
+ return task_status(module, proxmox, node, taskid, timeout)
+
+
+def delete_template(module, proxmox, node, storage, content_type, template, timeout):
+ volid = '%s:%s/%s' % (storage, content_type, template)
+ proxmox.nodes(node).storage(storage).content.delete(volid)
+ while timeout:
+ if not get_template(proxmox, node, storage, content_type, template):
+ return True
+ timeout = timeout - 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for deleting template.')
+
+ time.sleep(1)
+ return False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_host=dict(required=True),
+ api_password=dict(no_log=True),
+ api_token_id=dict(no_log=True),
+ api_token_secret=dict(no_log=True),
+ api_user=dict(required=True),
+ validate_certs=dict(type='bool', default=False),
+ node=dict(),
+ src=dict(type='path'),
+ template=dict(),
+ content_type=dict(default='vztmpl', choices=['vztmpl', 'iso']),
+ storage=dict(default='local'),
+ timeout=dict(type='int', default=30),
+ force=dict(type='bool', default=False),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ )
+
+ if not HAS_PROXMOXER:
+ module.fail_json(msg='proxmoxer required for this module')
+
+ state = module.params['state']
+ api_host = module.params['api_host']
+ api_password = module.params['api_password']
+ api_token_id = module.params['api_token_id']
+ api_token_secret = module.params['api_token_secret']
+ api_user = module.params['api_user']
+ validate_certs = module.params['validate_certs']
+ node = module.params['node']
+ storage = module.params['storage']
+ timeout = module.params['timeout']
+
+ auth_args = {'user': api_user}
+ if not (api_token_id and api_token_secret):
+ # If password not set get it from PROXMOX_PASSWORD env
+ if not api_password:
+ try:
+ api_password = os.environ['PROXMOX_PASSWORD']
+ except KeyError as e:
+ module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable')
+ auth_args['password'] = api_password
+ else:
+ auth_args['token_name'] = api_token_id
+ auth_args['token_value'] = api_token_secret
+
+ try:
+ proxmox = ProxmoxAPI(api_host, verify_ssl=validate_certs, **auth_args)
+ # Used to test the validity of the token if given
+ proxmox.version.get()
+ except Exception as e:
+ module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e)
+
+ if state == 'present':
+ try:
+ content_type = module.params['content_type']
+ src = module.params['src']
+
+ # download appliance template
+ if content_type == 'vztmpl' and not src:
+ template = module.params['template']
+
+ if not template:
+ module.fail_json(msg='template param for downloading appliance template is mandatory')
+
+ if get_template(proxmox, node, storage, content_type, template) and not module.params['force']:
+ module.exit_json(changed=False, msg='template with volid=%s:%s/%s already exists' % (storage, content_type, template))
+
+ if download_template(module, proxmox, node, storage, template, timeout):
+ module.exit_json(changed=True, msg='template with volid=%s:%s/%s downloaded' % (storage, content_type, template))
+
+ template = os.path.basename(src)
+ if get_template(proxmox, node, storage, content_type, template) and not module.params['force']:
+ module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already exists' % (storage, content_type, template))
+ elif not src:
+ module.fail_json(msg='src param to uploading template file is mandatory')
+ elif not (os.path.exists(src) and os.path.isfile(src)):
+ module.fail_json(msg='template file on path %s not exists' % src)
+
+ if upload_template(module, proxmox, node, storage, content_type, src, timeout):
+ module.exit_json(changed=True, msg='template with volid=%s:%s/%s uploaded' % (storage, content_type, template))
+ except Exception as e:
+ module.fail_json(msg="uploading/downloading of template %s failed with exception: %s" % (template, e))
+
+ elif state == 'absent':
+ try:
+ content_type = module.params['content_type']
+ template = module.params['template']
+
+ if not template:
+ module.fail_json(msg='template param is mandatory')
+ elif not get_template(proxmox, node, storage, content_type, template):
+ module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already deleted' % (storage, content_type, template))
+
+ if delete_template(module, proxmox, node, storage, content_type, template, timeout):
+ module.exit_json(changed=True, msg='template with volid=%s:%s/%s deleted' % (storage, content_type, template))
+ except Exception as e:
+ module.fail_json(msg="deleting of template %s failed with exception: %s" % (template, e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_user_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_user_info.py
new file mode 100644
index 00000000..1de93e60
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_user_info.py
@@ -0,0 +1,256 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Tristan Le Guern <tleguern at bouledef.eu>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: proxmox_user_info
+short_description: Retrieve information about one or more Proxmox VE users
+version_added: 1.3.0
+description:
+ - Retrieve information about one or more Proxmox VE users
+options:
+ domain:
+ description:
+ - Restrict results to a specific authentication realm.
+ aliases: ['realm']
+ type: str
+ user:
+ description:
+ - Restrict results to a specific user.
+ aliases: ['name']
+ type: str
+ userid:
+ description:
+ - Restrict results to a specific user ID, which is a concatenation of a user and domain parts.
+ type: str
+author: Tristan Le Guern (@Aversiste)
+extends_documentation_fragment: community.general.proxmox.documentation
+'''
+
+EXAMPLES = '''
+- name: List existing users
+ community.general.proxmox_user_info:
+ api_host: helldorado
+ api_user: root@pam
+ api_password: "{{ password | default(omit) }}"
+ api_token_id: "{{ token_id | default(omit) }}"
+ api_token_secret: "{{ token_secret | default(omit) }}"
+ register: proxmox_users
+
+- name: List existing users in the pve authentication realm
+ community.general.proxmox_user_info:
+ api_host: helldorado
+ api_user: root@pam
+ api_password: "{{ password | default(omit) }}"
+ api_token_id: "{{ token_id | default(omit) }}"
+ api_token_secret: "{{ token_secret | default(omit) }}"
+ domain: pve
+ register: proxmox_users_pve
+
+- name: Retrieve information about admin@pve
+ community.general.proxmox_user_info:
+ api_host: helldorado
+ api_user: root@pam
+ api_password: "{{ password | default(omit) }}"
+ api_token_id: "{{ token_id | default(omit) }}"
+ api_token_secret: "{{ token_secret | default(omit) }}"
+ userid: admin@pve
+ register: proxmox_user_admin
+
+- name: Alternative way to retrieve information about admin@pve
+ community.general.proxmox_user_info:
+ api_host: helldorado
+ api_user: root@pam
+ api_password: "{{ password | default(omit) }}"
+ api_token_id: "{{ token_id | default(omit) }}"
+ api_token_secret: "{{ token_secret | default(omit) }}"
+ user: admin
+ domain: pve
+ register: proxmox_user_admin
+'''
+
+
+RETURN = '''
+proxmox_users:
+ description: List of users.
+ returned: always, but can be empty
+ type: list
+ elements: dict
+ contains:
+ comment:
+ description: Short description of the user.
+ returned: on success
+ type: str
+ domain:
+ description: User's authentication realm, also the right part of the user ID.
+ returned: on success
+ type: str
+ email:
+ description: User's email address.
+ returned: on success
+ type: str
+ enabled:
+ description: User's account state.
+ returned: on success
+ type: bool
+ expire:
+ description: Expiration date in seconds since EPOCH. Zero means no expiration.
+ returned: on success
+ type: int
+ firstname:
+ description: User's first name.
+ returned: on success
+ type: str
+ groups:
+ description: List of groups which the user is a member of.
+ returned: on success
+ type: list
+ elements: str
+ keys:
+ description: User's two factor authentication keys.
+ returned: on success
+ type: str
+ lastname:
+ description: User's last name.
+ returned: on success
+ type: str
+ tokens:
+ description: List of API tokens associated to the user.
+ returned: on success
+ type: list
+ elements: dict
+ contains:
+ comment:
+ description: Short description of the token.
+ returned: on success
+ type: str
+ expire:
+ description: Expiration date in seconds since EPOCH. Zero means no expiration.
+ returned: on success
+ type: int
+ privsep:
+ description: Describe if the API token is further restricted with ACLs or is fully privileged.
+ returned: on success
+ type: bool
+ tokenid:
+ description: Token name.
+ returned: on success
+ type: str
+ user:
+ description: User's login name, also the left part of the user ID.
+ returned: on success
+ type: str
+ userid:
+ description: Proxmox user ID, represented as user@realm.
+ returned: on success
+ type: str
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils.proxmox import (
+ proxmox_auth_argument_spec, ProxmoxAnsible, proxmox_to_ansible_bool, HAS_PROXMOXER, PROXMOXER_IMP_ERR)
+
+
+class ProxmoxUserInfoAnsible(ProxmoxAnsible):
+ def get_user(self, userid):
+ try:
+ user = self.proxmox_api.access.users.get(userid)
+ except Exception:
+ self.module.fail_json(msg="User '%s' does not exist" % userid)
+ user['userid'] = userid
+ return ProxmoxUser(user)
+
+ def get_users(self, domain=None):
+ users = self.proxmox_api.access.users.get(full=1)
+ users = [ProxmoxUser(user) for user in users]
+ if domain:
+ return [user for user in users if user.user['domain'] == domain]
+ return users
+
+
+class ProxmoxUser:
+ def __init__(self, user):
+ self.user = dict()
+ # Data representation is not the same depending on API calls
+ for k, v in user.items():
+ if k == 'enable':
+ self.user['enabled'] = proxmox_to_ansible_bool(user['enable'])
+ elif k == 'userid':
+ self.user['user'] = user['userid'].split('@')[0]
+ self.user['domain'] = user['userid'].split('@')[1]
+ self.user[k] = v
+ elif k in ['groups', 'tokens'] and (v == '' or v is None):
+ self.user[k] = []
+ elif k == 'groups' and type(v) == str:
+ self.user['groups'] = v.split(',')
+ elif k == 'tokens' and type(v) == list:
+ for token in v:
+ if 'privsep' in token:
+ token['privsep'] = proxmox_to_ansible_bool(token['privsep'])
+ self.user['tokens'] = v
+ elif k == 'tokens' and type(v) == dict:
+ self.user['tokens'] = list()
+ for tokenid, tokenvalues in v.items():
+ t = tokenvalues
+ t['tokenid'] = tokenid
+ if 'privsep' in tokenvalues:
+ t['privsep'] = proxmox_to_ansible_bool(tokenvalues['privsep'])
+ self.user['tokens'].append(t)
+ else:
+ self.user[k] = v
+
+
+def proxmox_user_info_argument_spec():
+ return dict(
+ domain=dict(type='str', aliases=['realm']),
+ user=dict(type='str', aliases=['name']),
+ userid=dict(type='str'),
+ )
+
+
+def main():
+ module_args = proxmox_auth_argument_spec()
+ user_info_args = proxmox_user_info_argument_spec()
+ module_args.update(user_info_args)
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ required_one_of=[('api_password', 'api_token_id')],
+ required_together=[('api_token_id', 'api_token_secret')],
+ mutually_exclusive=[('user', 'userid'), ('domain', 'userid')],
+ supports_check_mode=True
+ )
+ result = dict(
+ changed=False
+ )
+
+ if not HAS_PROXMOXER:
+ module.fail_json(msg=missing_required_lib('proxmoxer'), exception=PROXMOXER_IMP_ERR)
+
+ proxmox = ProxmoxUserInfoAnsible(module)
+ domain = module.params['domain']
+ user = module.params['user']
+ if user and domain:
+ userid = user + '@' + domain
+ else:
+ userid = module.params['userid']
+
+ if userid:
+ users = [proxmox.get_user(userid=userid)]
+ else:
+ users = proxmox.get_users(domain=domain)
+ result['proxmox_users'] = [user.user for user in users]
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/rhevm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/rhevm.py
new file mode 100644
index 00000000..2aebc346
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/rhevm.py
@@ -0,0 +1,1516 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Timothy Vandenbrande <timothy.vandenbrande@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: rhevm
+short_description: RHEV/oVirt automation
+description:
+ - This module only supports oVirt/RHEV version 3.
+ - A newer module M(ovirt.ovirt.ovirt_vm) supports oVirt/RHV version 4.
+ - Allows you to create/remove/update or powermanage virtual machines on a RHEV/oVirt platform.
+requirements:
+ - ovirtsdk
+author:
+- Timothy Vandenbrande (@TimothyVandenbrande)
+options:
+ user:
+ description:
+ - The user to authenticate with.
+ type: str
+ default: admin@internal
+ password:
+ description:
+ - The password for user authentication.
+ type: str
+ required: true
+ server:
+ description:
+ - The name/IP of your RHEV-m/oVirt instance.
+ type: str
+ default: 127.0.0.1
+ port:
+ description:
+ - The port on which the API is reachable.
+ type: int
+ default: 443
+ insecure_api:
+ description:
+ - A boolean switch to make a secure or insecure connection to the server.
+ type: bool
+ default: no
+ name:
+ description:
+ - The name of the VM.
+ type: str
+ cluster:
+ description:
+ - The RHEV/oVirt cluster in which you want you VM to start.
+ type: str
+ datacenter:
+ description:
+ - The RHEV/oVirt datacenter in which you want you VM to start.
+ type: str
+ default: Default
+ state:
+ description:
+ - This serves to create/remove/update or powermanage your VM.
+ type: str
+ choices: [ absent, cd, down, info, ping, present, restarted, up ]
+ default: present
+ image:
+ description:
+ - The template to use for the VM.
+ type: str
+ type:
+ description:
+ - To define if the VM is a server or desktop.
+ type: str
+ choices: [ desktop, host, server ]
+ default: server
+ vmhost:
+ description:
+ - The host you wish your VM to run on.
+ type: str
+ vmcpu:
+ description:
+ - The number of CPUs you want in your VM.
+ type: int
+ default: 2
+ cpu_share:
+ description:
+ - This parameter is used to configure the CPU share.
+ type: int
+ default: 0
+ vmmem:
+ description:
+ - The amount of memory you want your VM to use (in GB).
+ type: int
+ default: 1
+ osver:
+ description:
+ - The operating system option in RHEV/oVirt.
+ type: str
+ default: rhel_6x64
+ mempol:
+ description:
+ - The minimum amount of memory you wish to reserve for this system.
+ type: int
+ default: 1
+ vm_ha:
+ description:
+ - To make your VM High Available.
+ type: bool
+ default: yes
+ disks:
+ description:
+ - This option uses complex arguments and is a list of disks with the options name, size and domain.
+ type: list
+ elements: str
+ ifaces:
+ description:
+ - This option uses complex arguments and is a list of interfaces with the options name and vlan.
+ type: list
+ elements: str
+ aliases: [ interfaces, nics ]
+ boot_order:
+ description:
+ - This option uses complex arguments and is a list of items that specify the bootorder.
+ type: list
+ elements: str
+ default: [ hd, network ]
+ del_prot:
+ description:
+ - This option sets the delete protection checkbox.
+ type: bool
+ default: yes
+ cd_drive:
+ description:
+ - The CD you wish to have mounted on the VM when I(state = 'CD').
+ type: str
+ timeout:
+ description:
+ - The timeout you wish to define for power actions.
+ - When I(state = 'up').
+ - When I(state = 'down').
+ - When I(state = 'restarted').
+ type: int
+'''
+
+RETURN = r'''
+vm:
+ description: Returns all of the VMs variables and execution.
+ returned: always
+ type: dict
+ sample: '{
+ "boot_order": [
+ "hd",
+ "network"
+ ],
+ "changed": true,
+ "changes": [
+ "Delete Protection"
+ ],
+ "cluster": "C1",
+ "cpu_share": "0",
+ "created": false,
+ "datacenter": "Default",
+ "del_prot": true,
+ "disks": [
+ {
+ "domain": "ssd-san",
+ "name": "OS",
+ "size": 40
+ }
+ ],
+ "eth0": "00:00:5E:00:53:00",
+ "eth1": "00:00:5E:00:53:01",
+ "eth2": "00:00:5E:00:53:02",
+ "exists": true,
+ "failed": false,
+ "ifaces": [
+ {
+ "name": "eth0",
+ "vlan": "Management"
+ },
+ {
+ "name": "eth1",
+ "vlan": "Internal"
+ },
+ {
+ "name": "eth2",
+ "vlan": "External"
+ }
+ ],
+ "image": false,
+ "mempol": "0",
+ "msg": [
+ "VM exists",
+ "cpu_share was already set to 0",
+ "VM high availability was already set to True",
+ "The boot order has already been set",
+ "VM delete protection has been set to True",
+ "Disk web2_Disk0_OS already exists",
+ "The VM starting host was already set to host416"
+ ],
+ "name": "web2",
+ "type": "server",
+ "uuid": "4ba5a1be-e60b-4368-9533-920f156c817b",
+ "vm_ha": true,
+ "vmcpu": "4",
+ "vmhost": "host416",
+ "vmmem": "16"
+ }'
+'''
+
+EXAMPLES = r'''
+- name: Basic get info from VM
+ community.general.rhevm:
+ server: rhevm01
+ user: '{{ rhev.admin.name }}'
+ password: '{{ rhev.admin.pass }}'
+ name: demo
+ state: info
+
+- name: Basic create example from image
+ community.general.rhevm:
+ server: rhevm01
+ user: '{{ rhev.admin.name }}'
+ password: '{{ rhev.admin.pass }}'
+ name: demo
+ cluster: centos
+ image: centos7_x64
+ state: present
+
+- name: Power management
+ community.general.rhevm:
+ server: rhevm01
+ user: '{{ rhev.admin.name }}'
+ password: '{{ rhev.admin.pass }}'
+ cluster: RH
+ name: uptime_server
+ image: centos7_x64
+ state: down
+
+- name: Multi disk, multi nic create example
+ community.general.rhevm:
+ server: rhevm01
+ user: '{{ rhev.admin.name }}'
+ password: '{{ rhev.admin.pass }}'
+ cluster: RH
+ name: server007
+ type: server
+ vmcpu: 4
+ vmmem: 2
+ ifaces:
+ - name: eth0
+ vlan: vlan2202
+ - name: eth1
+ vlan: vlan36
+ - name: eth2
+ vlan: vlan38
+ - name: eth3
+ vlan: vlan2202
+ disks:
+ - name: root
+ size: 10
+ domain: ssd-san
+ - name: swap
+ size: 10
+ domain: 15kiscsi-san
+ - name: opt
+ size: 10
+ domain: 15kiscsi-san
+ - name: var
+ size: 10
+ domain: 10kiscsi-san
+ - name: home
+ size: 10
+ domain: sata-san
+ boot_order:
+ - network
+ - hd
+ state: present
+
+- name: Add a CD to the disk cd_drive
+ community.general.rhevm:
+ user: '{{ rhev.admin.name }}'
+ password: '{{ rhev.admin.pass }}'
+ name: server007
+ cd_drive: rhev-tools-setup.iso
+ state: cd
+
+- name: New host deployment + host network configuration
+ community.general.rhevm:
+ password: '{{ rhevm.admin.pass }}'
+ name: ovirt_node007
+ type: host
+ cluster: rhevm01
+ ifaces:
+ - name: em1
+ - name: em2
+ - name: p3p1
+ ip: 172.31.224.200
+ netmask: 255.255.254.0
+ - name: p3p2
+ ip: 172.31.225.200
+ netmask: 255.255.254.0
+ - name: bond0
+ bond:
+ - em1
+ - em2
+ network: rhevm
+ ip: 172.31.222.200
+ netmask: 255.255.255.0
+ management: yes
+ - name: bond0.36
+ network: vlan36
+ ip: 10.2.36.200
+ netmask: 255.255.254.0
+ gateway: 10.2.36.254
+ - name: bond0.2202
+ network: vlan2202
+ - name: bond0.38
+ network: vlan38
+ state: present
+'''
+
+import time
+
+try:
+ from ovirtsdk.api import API
+ from ovirtsdk.xml import params
+ HAS_SDK = True
+except ImportError:
+ HAS_SDK = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+RHEV_FAILED = 1
+RHEV_SUCCESS = 0
+RHEV_UNAVAILABLE = 2
+
+RHEV_TYPE_OPTS = ['desktop', 'host', 'server']
+STATE_OPTS = ['absent', 'cd', 'down', 'info', 'ping', 'present', 'restart', 'up']
+
+msg = []
+changed = False
+failed = False
+
+
+class RHEVConn(object):
+ 'Connection to RHEV-M'
+
+ def __init__(self, module):
+ self.module = module
+
+ user = module.params.get('user')
+ password = module.params.get('password')
+ server = module.params.get('server')
+ port = module.params.get('port')
+ insecure_api = module.params.get('insecure_api')
+
+ url = "https://%s:%s" % (server, port)
+
+ try:
+ api = API(url=url, username=user, password=password, insecure=str(insecure_api))
+ api.test()
+ self.conn = api
+ except Exception:
+ raise Exception("Failed to connect to RHEV-M.")
+
+ def __del__(self):
+ self.conn.disconnect()
+
+ def createVMimage(self, name, cluster, template):
+ try:
+ vmparams = params.VM(
+ name=name,
+ cluster=self.conn.clusters.get(name=cluster),
+ template=self.conn.templates.get(name=template),
+ disks=params.Disks(clone=True)
+ )
+ self.conn.vms.add(vmparams)
+ setMsg("VM is created")
+ setChanged()
+ return True
+ except Exception as e:
+ setMsg("Failed to create VM")
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ def createVM(self, name, cluster, os, actiontype):
+ try:
+ vmparams = params.VM(
+ name=name,
+ cluster=self.conn.clusters.get(name=cluster),
+ os=params.OperatingSystem(type_=os),
+ template=self.conn.templates.get(name="Blank"),
+ type_=actiontype
+ )
+ self.conn.vms.add(vmparams)
+ setMsg("VM is created")
+ setChanged()
+ return True
+ except Exception as e:
+ setMsg("Failed to create VM")
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ def createDisk(self, vmname, diskname, disksize, diskdomain, diskinterface, diskformat, diskallocationtype, diskboot):
+ VM = self.get_VM(vmname)
+
+ newdisk = params.Disk(
+ name=diskname,
+ size=1024 * 1024 * 1024 * int(disksize),
+ wipe_after_delete=True,
+ sparse=diskallocationtype,
+ interface=diskinterface,
+ format=diskformat,
+ bootable=diskboot,
+ storage_domains=params.StorageDomains(
+ storage_domain=[self.get_domain(diskdomain)]
+ )
+ )
+
+ try:
+ VM.disks.add(newdisk)
+ VM.update()
+ setMsg("Successfully added disk " + diskname)
+ setChanged()
+ except Exception as e:
+ setFailed()
+ setMsg("Error attaching " + diskname + "disk, please recheck and remove any leftover configuration.")
+ setMsg(str(e))
+ return False
+
+ try:
+ currentdisk = VM.disks.get(name=diskname)
+ attempt = 1
+ while currentdisk.status.state != 'ok':
+ currentdisk = VM.disks.get(name=diskname)
+ if attempt == 100:
+ setMsg("Error, disk %s, state %s" % (diskname, str(currentdisk.status.state)))
+ raise Exception()
+ else:
+ attempt += 1
+ time.sleep(2)
+ setMsg("The disk " + diskname + " is ready.")
+ except Exception as e:
+ setFailed()
+ setMsg("Error getting the state of " + diskname + ".")
+ setMsg(str(e))
+ return False
+ return True
+
+ def createNIC(self, vmname, nicname, vlan, interface):
+ VM = self.get_VM(vmname)
+ CLUSTER = self.get_cluster_byid(VM.cluster.id)
+ DC = self.get_DC_byid(CLUSTER.data_center.id)
+ newnic = params.NIC(
+ name=nicname,
+ network=DC.networks.get(name=vlan),
+ interface=interface
+ )
+
+ try:
+ VM.nics.add(newnic)
+ VM.update()
+ setMsg("Successfully added iface " + nicname)
+ setChanged()
+ except Exception as e:
+ setFailed()
+ setMsg("Error attaching " + nicname + " iface, please recheck and remove any leftover configuration.")
+ setMsg(str(e))
+ return False
+
+ try:
+ currentnic = VM.nics.get(name=nicname)
+ attempt = 1
+ while currentnic.active is not True:
+ currentnic = VM.nics.get(name=nicname)
+ if attempt == 100:
+ setMsg("Error, iface %s, state %s" % (nicname, str(currentnic.active)))
+ raise Exception()
+ else:
+ attempt += 1
+ time.sleep(2)
+ setMsg("The iface " + nicname + " is ready.")
+ except Exception as e:
+ setFailed()
+ setMsg("Error getting the state of " + nicname + ".")
+ setMsg(str(e))
+ return False
+ return True
+
+ def get_DC(self, dc_name):
+ return self.conn.datacenters.get(name=dc_name)
+
+ def get_DC_byid(self, dc_id):
+ return self.conn.datacenters.get(id=dc_id)
+
+ def get_VM(self, vm_name):
+ return self.conn.vms.get(name=vm_name)
+
+ def get_cluster_byid(self, cluster_id):
+ return self.conn.clusters.get(id=cluster_id)
+
+ def get_cluster(self, cluster_name):
+ return self.conn.clusters.get(name=cluster_name)
+
+ def get_domain_byid(self, dom_id):
+ return self.conn.storagedomains.get(id=dom_id)
+
+ def get_domain(self, domain_name):
+ return self.conn.storagedomains.get(name=domain_name)
+
+ def get_disk(self, disk):
+ return self.conn.disks.get(disk)
+
+ def get_network(self, dc_name, network_name):
+ return self.get_DC(dc_name).networks.get(network_name)
+
+ def get_network_byid(self, network_id):
+ return self.conn.networks.get(id=network_id)
+
+ def get_NIC(self, vm_name, nic_name):
+ return self.get_VM(vm_name).nics.get(nic_name)
+
+ def get_Host(self, host_name):
+ return self.conn.hosts.get(name=host_name)
+
+ def get_Host_byid(self, host_id):
+ return self.conn.hosts.get(id=host_id)
+
+ def set_Memory(self, name, memory):
+ VM = self.get_VM(name)
+ VM.memory = int(int(memory) * 1024 * 1024 * 1024)
+ try:
+ VM.update()
+ setMsg("The Memory has been updated.")
+ setChanged()
+ return True
+ except Exception as e:
+ setMsg("Failed to update memory.")
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ def set_Memory_Policy(self, name, memory_policy):
+ VM = self.get_VM(name)
+ VM.memory_policy.guaranteed = int(int(memory_policy) * 1024 * 1024 * 1024)
+ try:
+ VM.update()
+ setMsg("The memory policy has been updated.")
+ setChanged()
+ return True
+ except Exception as e:
+ setMsg("Failed to update memory policy.")
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ def set_CPU(self, name, cpu):
+ VM = self.get_VM(name)
+ VM.cpu.topology.cores = int(cpu)
+ try:
+ VM.update()
+ setMsg("The number of CPUs has been updated.")
+ setChanged()
+ return True
+ except Exception as e:
+ setMsg("Failed to update the number of CPUs.")
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ def set_CPU_share(self, name, cpu_share):
+ VM = self.get_VM(name)
+ VM.cpu_shares = int(cpu_share)
+ try:
+ VM.update()
+ setMsg("The CPU share has been updated.")
+ setChanged()
+ return True
+ except Exception as e:
+ setMsg("Failed to update the CPU share.")
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ def set_Disk(self, diskname, disksize, diskinterface, diskboot):
+ DISK = self.get_disk(diskname)
+ setMsg("Checking disk " + diskname)
+ if DISK.get_bootable() != diskboot:
+ try:
+ DISK.set_bootable(diskboot)
+ setMsg("Updated the boot option on the disk.")
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to set the boot option on the disk.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ else:
+ setMsg("The boot option of the disk is correct")
+ if int(DISK.size) < (1024 * 1024 * 1024 * int(disksize)):
+ try:
+ DISK.size = (1024 * 1024 * 1024 * int(disksize))
+ setMsg("Updated the size of the disk.")
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to update the size of the disk.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ elif int(DISK.size) > (1024 * 1024 * 1024 * int(disksize)):
+ setMsg("Shrinking disks is not supported")
+ setFailed()
+ return False
+ else:
+ setMsg("The size of the disk is correct")
+ if str(DISK.interface) != str(diskinterface):
+ try:
+ DISK.interface = diskinterface
+ setMsg("Updated the interface of the disk.")
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to update the interface of the disk.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ else:
+ setMsg("The interface of the disk is correct")
+ return True
+
+ def set_NIC(self, vmname, nicname, newname, vlan, interface):
+ NIC = self.get_NIC(vmname, nicname)
+ VM = self.get_VM(vmname)
+ CLUSTER = self.get_cluster_byid(VM.cluster.id)
+ DC = self.get_DC_byid(CLUSTER.data_center.id)
+ NETWORK = self.get_network(str(DC.name), vlan)
+ checkFail()
+ if NIC.name != newname:
+ NIC.name = newname
+ setMsg('Updating iface name to ' + newname)
+ setChanged()
+ if str(NIC.network.id) != str(NETWORK.id):
+ NIC.set_network(NETWORK)
+ setMsg('Updating iface network to ' + vlan)
+ setChanged()
+ if NIC.interface != interface:
+ NIC.interface = interface
+ setMsg('Updating iface interface to ' + interface)
+ setChanged()
+ try:
+ NIC.update()
+ setMsg('iface has successfully been updated.')
+ except Exception as e:
+ setMsg("Failed to update the iface.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def set_DeleteProtection(self, vmname, del_prot):
+ VM = self.get_VM(vmname)
+ VM.delete_protected = del_prot
+ try:
+ VM.update()
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to update delete protection.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def set_BootOrder(self, vmname, boot_order):
+ VM = self.get_VM(vmname)
+ bootorder = []
+ for device in boot_order:
+ bootorder.append(params.Boot(dev=device))
+ VM.os.boot = bootorder
+
+ try:
+ VM.update()
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to update the boot order.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def set_Host(self, host_name, cluster, ifaces):
+ HOST = self.get_Host(host_name)
+ CLUSTER = self.get_cluster(cluster)
+
+ if HOST is None:
+ setMsg("Host does not exist.")
+ ifacelist = dict()
+ networklist = []
+ manageip = ''
+
+ try:
+ for iface in ifaces:
+ try:
+ setMsg('creating host interface ' + iface['name'])
+ if 'management' in iface:
+ manageip = iface['ip']
+ if 'boot_protocol' not in iface:
+ if 'ip' in iface:
+ iface['boot_protocol'] = 'static'
+ else:
+ iface['boot_protocol'] = 'none'
+ if 'ip' not in iface:
+ iface['ip'] = ''
+ if 'netmask' not in iface:
+ iface['netmask'] = ''
+ if 'gateway' not in iface:
+ iface['gateway'] = ''
+
+ if 'network' in iface:
+ if 'bond' in iface:
+ bond = []
+ for slave in iface['bond']:
+ bond.append(ifacelist[slave])
+ try:
+ tmpiface = params.Bonding(
+ slaves=params.Slaves(host_nic=bond),
+ options=params.Options(
+ option=[
+ params.Option(name='miimon', value='100'),
+ params.Option(name='mode', value='4')
+ ]
+ )
+ )
+ except Exception as e:
+ setMsg('Failed to create the bond for ' + iface['name'])
+ setFailed()
+ setMsg(str(e))
+ return False
+ try:
+ tmpnetwork = params.HostNIC(
+ network=params.Network(name=iface['network']),
+ name=iface['name'],
+ boot_protocol=iface['boot_protocol'],
+ ip=params.IP(
+ address=iface['ip'],
+ netmask=iface['netmask'],
+ gateway=iface['gateway']
+ ),
+ override_configuration=True,
+ bonding=tmpiface)
+ networklist.append(tmpnetwork)
+ setMsg('Applying network ' + iface['name'])
+ except Exception as e:
+ setMsg('Failed to set' + iface['name'] + ' as network interface')
+ setFailed()
+ setMsg(str(e))
+ return False
+ else:
+ tmpnetwork = params.HostNIC(
+ network=params.Network(name=iface['network']),
+ name=iface['name'],
+ boot_protocol=iface['boot_protocol'],
+ ip=params.IP(
+ address=iface['ip'],
+ netmask=iface['netmask'],
+ gateway=iface['gateway']
+ ))
+ networklist.append(tmpnetwork)
+ setMsg('Applying network ' + iface['name'])
+ else:
+ tmpiface = params.HostNIC(
+ name=iface['name'],
+ network=params.Network(),
+ boot_protocol=iface['boot_protocol'],
+ ip=params.IP(
+ address=iface['ip'],
+ netmask=iface['netmask'],
+ gateway=iface['gateway']
+ ))
+ ifacelist[iface['name']] = tmpiface
+ except Exception as e:
+ setMsg('Failed to set ' + iface['name'])
+ setFailed()
+ setMsg(str(e))
+ return False
+ except Exception as e:
+ setMsg('Failed to set networks')
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ if manageip == '':
+ setMsg('No management network is defined')
+ setFailed()
+ return False
+
+ try:
+ HOST = params.Host(name=host_name, address=manageip, cluster=CLUSTER, ssh=params.SSH(authentication_method='publickey'))
+ if self.conn.hosts.add(HOST):
+ setChanged()
+ HOST = self.get_Host(host_name)
+ state = HOST.status.state
+ while (state != 'non_operational' and state != 'up'):
+ HOST = self.get_Host(host_name)
+ state = HOST.status.state
+ time.sleep(1)
+ if state == 'non_responsive':
+ setMsg('Failed to add host to RHEVM')
+ setFailed()
+ return False
+
+ setMsg('status host: up')
+ time.sleep(5)
+
+ HOST = self.get_Host(host_name)
+ state = HOST.status.state
+ setMsg('State before setting to maintenance: ' + str(state))
+ HOST.deactivate()
+ while state != 'maintenance':
+ HOST = self.get_Host(host_name)
+ state = HOST.status.state
+ time.sleep(1)
+ setMsg('status host: maintenance')
+
+ try:
+ HOST.nics.setupnetworks(params.Action(
+ force=True,
+ check_connectivity=False,
+ host_nics=params.HostNics(host_nic=networklist)
+ ))
+ setMsg('nics are set')
+ except Exception as e:
+ setMsg('Failed to apply networkconfig')
+ setFailed()
+ setMsg(str(e))
+ return False
+
+ try:
+ HOST.commitnetconfig()
+ setMsg('Network config is saved')
+ except Exception as e:
+ setMsg('Failed to save networkconfig')
+ setFailed()
+ setMsg(str(e))
+ return False
+ except Exception as e:
+ if 'The Host name is already in use' in str(e):
+ setMsg("Host already exists")
+ else:
+ setMsg("Failed to add host")
+ setFailed()
+ setMsg(str(e))
+ return False
+
+ HOST.activate()
+ while state != 'up':
+ HOST = self.get_Host(host_name)
+ state = HOST.status.state
+ time.sleep(1)
+ if state == 'non_responsive':
+ setMsg('Failed to apply networkconfig.')
+ setFailed()
+ return False
+ setMsg('status host: up')
+ else:
+ setMsg("Host exists.")
+
+ return True
+
+ def del_NIC(self, vmname, nicname):
+ return self.get_NIC(vmname, nicname).delete()
+
+ def remove_VM(self, vmname):
+ VM = self.get_VM(vmname)
+ try:
+ VM.delete()
+ except Exception as e:
+ setMsg("Failed to remove VM.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def start_VM(self, vmname, timeout):
+ VM = self.get_VM(vmname)
+ try:
+ VM.start()
+ except Exception as e:
+ setMsg("Failed to start VM.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return self.wait_VM(vmname, "up", timeout)
+
+ def wait_VM(self, vmname, state, timeout):
+ VM = self.get_VM(vmname)
+ while VM.status.state != state:
+ VM = self.get_VM(vmname)
+ time.sleep(10)
+ if timeout is not False:
+ timeout -= 10
+ if timeout <= 0:
+ setMsg("Timeout expired")
+ setFailed()
+ return False
+ return True
+
+ def stop_VM(self, vmname, timeout):
+ VM = self.get_VM(vmname)
+ try:
+ VM.stop()
+ except Exception as e:
+ setMsg("Failed to stop VM.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return self.wait_VM(vmname, "down", timeout)
+
+ def set_CD(self, vmname, cd_drive):
+ VM = self.get_VM(vmname)
+ try:
+ if str(VM.status.state) == 'down':
+ cdrom = params.CdRom(file=cd_drive)
+ VM.cdroms.add(cdrom)
+ setMsg("Attached the image.")
+ setChanged()
+ else:
+ cdrom = VM.cdroms.get(id="00000000-0000-0000-0000-000000000000")
+ cdrom.set_file(cd_drive)
+ cdrom.update(current=True)
+ setMsg("Attached the image.")
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to attach image.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def set_VM_Host(self, vmname, vmhost):
+ VM = self.get_VM(vmname)
+ HOST = self.get_Host(vmhost)
+ try:
+ VM.placement_policy.host = HOST
+ VM.update()
+ setMsg("Set startup host to " + vmhost)
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to set startup host.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def migrate_VM(self, vmname, vmhost):
+ VM = self.get_VM(vmname)
+
+ HOST = self.get_Host_byid(VM.host.id)
+ if str(HOST.name) != vmhost:
+ try:
+ VM.migrate(
+ action=params.Action(
+ host=params.Host(
+ name=vmhost,
+ )
+ ),
+ )
+ setChanged()
+ setMsg("VM migrated to " + vmhost)
+ except Exception as e:
+ setMsg("Failed to set startup host.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def remove_CD(self, vmname):
+ VM = self.get_VM(vmname)
+ try:
+ VM.cdroms.get(id="00000000-0000-0000-0000-000000000000").delete()
+ setMsg("Removed the image.")
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to remove the image.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+
+class RHEV(object):
+ def __init__(self, module):
+ self.module = module
+
+ def __get_conn(self):
+ self.conn = RHEVConn(self.module)
+ return self.conn
+
+ def test(self):
+ self.__get_conn()
+ return "OK"
+
+ def getVM(self, name):
+ self.__get_conn()
+ VM = self.conn.get_VM(name)
+ if VM:
+ vminfo = dict()
+ vminfo['uuid'] = VM.id
+ vminfo['name'] = VM.name
+ vminfo['status'] = VM.status.state
+ vminfo['cpu_cores'] = VM.cpu.topology.cores
+ vminfo['cpu_sockets'] = VM.cpu.topology.sockets
+ vminfo['cpu_shares'] = VM.cpu_shares
+ vminfo['memory'] = (int(VM.memory) // 1024 // 1024 // 1024)
+ vminfo['mem_pol'] = (int(VM.memory_policy.guaranteed) // 1024 // 1024 // 1024)
+ vminfo['os'] = VM.get_os().type_
+ vminfo['del_prot'] = VM.delete_protected
+ try:
+ vminfo['host'] = str(self.conn.get_Host_byid(str(VM.host.id)).name)
+ except Exception:
+ vminfo['host'] = None
+ vminfo['boot_order'] = []
+ for boot_dev in VM.os.get_boot():
+ vminfo['boot_order'].append(str(boot_dev.dev))
+ vminfo['disks'] = []
+ for DISK in VM.disks.list():
+ disk = dict()
+ disk['name'] = DISK.name
+ disk['size'] = (int(DISK.size) // 1024 // 1024 // 1024)
+ disk['domain'] = str((self.conn.get_domain_byid(DISK.get_storage_domains().get_storage_domain()[0].id)).name)
+ disk['interface'] = DISK.interface
+ vminfo['disks'].append(disk)
+ vminfo['ifaces'] = []
+ for NIC in VM.nics.list():
+ iface = dict()
+ iface['name'] = str(NIC.name)
+ iface['vlan'] = str(self.conn.get_network_byid(NIC.get_network().id).name)
+ iface['interface'] = NIC.interface
+ iface['mac'] = NIC.mac.address
+ vminfo['ifaces'].append(iface)
+ vminfo[str(NIC.name)] = NIC.mac.address
+ CLUSTER = self.conn.get_cluster_byid(VM.cluster.id)
+ if CLUSTER:
+ vminfo['cluster'] = CLUSTER.name
+ else:
+ vminfo = False
+ return vminfo
+
+ def createVMimage(self, name, cluster, template, disks):
+ self.__get_conn()
+ return self.conn.createVMimage(name, cluster, template, disks)
+
+ def createVM(self, name, cluster, os, actiontype):
+ self.__get_conn()
+ return self.conn.createVM(name, cluster, os, actiontype)
+
+ def setMemory(self, name, memory):
+ self.__get_conn()
+ return self.conn.set_Memory(name, memory)
+
+ def setMemoryPolicy(self, name, memory_policy):
+ self.__get_conn()
+ return self.conn.set_Memory_Policy(name, memory_policy)
+
+ def setCPU(self, name, cpu):
+ self.__get_conn()
+ return self.conn.set_CPU(name, cpu)
+
+ def setCPUShare(self, name, cpu_share):
+ self.__get_conn()
+ return self.conn.set_CPU_share(name, cpu_share)
+
+ def setDisks(self, name, disks):
+ self.__get_conn()
+ counter = 0
+ bootselect = False
+ for disk in disks:
+ if 'bootable' in disk:
+ if disk['bootable'] is True:
+ bootselect = True
+
+ for disk in disks:
+ diskname = name + "_Disk" + str(counter) + "_" + disk.get('name', '').replace('/', '_')
+ disksize = disk.get('size', 1)
+ diskdomain = disk.get('domain', None)
+ if diskdomain is None:
+ setMsg("`domain` is a required disk key.")
+ setFailed()
+ return False
+ diskinterface = disk.get('interface', 'virtio')
+ diskformat = disk.get('format', 'raw')
+ diskallocationtype = disk.get('thin', False)
+ diskboot = disk.get('bootable', False)
+
+ if bootselect is False and counter == 0:
+ diskboot = True
+
+ DISK = self.conn.get_disk(diskname)
+
+ if DISK is None:
+ self.conn.createDisk(name, diskname, disksize, diskdomain, diskinterface, diskformat, diskallocationtype, diskboot)
+ else:
+ self.conn.set_Disk(diskname, disksize, diskinterface, diskboot)
+ checkFail()
+ counter += 1
+
+ return True
+
+ def setNetworks(self, vmname, ifaces):
+ self.__get_conn()
+ VM = self.conn.get_VM(vmname)
+
+ counter = 0
+ length = len(ifaces)
+
+ for NIC in VM.nics.list():
+ if counter < length:
+ iface = ifaces[counter]
+ name = iface.get('name', None)
+ if name is None:
+ setMsg("`name` is a required iface key.")
+ setFailed()
+ elif str(name) != str(NIC.name):
+ setMsg("ifaces are in the wrong order, rebuilding everything.")
+ for NIC in VM.nics.list():
+ self.conn.del_NIC(vmname, NIC.name)
+ self.setNetworks(vmname, ifaces)
+ checkFail()
+ return True
+ vlan = iface.get('vlan', None)
+ if vlan is None:
+ setMsg("`vlan` is a required iface key.")
+ setFailed()
+ checkFail()
+ interface = iface.get('interface', 'virtio')
+ self.conn.set_NIC(vmname, str(NIC.name), name, vlan, interface)
+ else:
+ self.conn.del_NIC(vmname, NIC.name)
+ counter += 1
+ checkFail()
+
+ while counter < length:
+ iface = ifaces[counter]
+ name = iface.get('name', None)
+ if name is None:
+ setMsg("`name` is a required iface key.")
+ setFailed()
+ vlan = iface.get('vlan', None)
+ if vlan is None:
+ setMsg("`vlan` is a required iface key.")
+ setFailed()
+ if failed is True:
+ return False
+ interface = iface.get('interface', 'virtio')
+ self.conn.createNIC(vmname, name, vlan, interface)
+
+ counter += 1
+ checkFail()
+ return True
+
+ def setDeleteProtection(self, vmname, del_prot):
+ self.__get_conn()
+ VM = self.conn.get_VM(vmname)
+ if bool(VM.delete_protected) != bool(del_prot):
+ self.conn.set_DeleteProtection(vmname, del_prot)
+ checkFail()
+ setMsg("`delete protection` has been updated.")
+ else:
+ setMsg("`delete protection` already has the right value.")
+ return True
+
+ def setBootOrder(self, vmname, boot_order):
+ self.__get_conn()
+ VM = self.conn.get_VM(vmname)
+ bootorder = []
+ for boot_dev in VM.os.get_boot():
+ bootorder.append(str(boot_dev.dev))
+
+ if boot_order != bootorder:
+ self.conn.set_BootOrder(vmname, boot_order)
+ setMsg('The boot order has been set')
+ else:
+ setMsg('The boot order has already been set')
+ return True
+
+ def removeVM(self, vmname):
+ self.__get_conn()
+ self.setPower(vmname, "down", 300)
+ return self.conn.remove_VM(vmname)
+
+ def setPower(self, vmname, state, timeout):
+ self.__get_conn()
+ VM = self.conn.get_VM(vmname)
+ if VM is None:
+ setMsg("VM does not exist.")
+ setFailed()
+ return False
+
+ if state == VM.status.state:
+ setMsg("VM state was already " + state)
+ else:
+ if state == "up":
+ setMsg("VM is going to start")
+ self.conn.start_VM(vmname, timeout)
+ setChanged()
+ elif state == "down":
+ setMsg("VM is going to stop")
+ self.conn.stop_VM(vmname, timeout)
+ setChanged()
+ elif state == "restarted":
+ self.setPower(vmname, "down", timeout)
+ checkFail()
+ self.setPower(vmname, "up", timeout)
+ checkFail()
+ setMsg("the vm state is set to " + state)
+ return True
+
+ def setCD(self, vmname, cd_drive):
+ self.__get_conn()
+ if cd_drive:
+ return self.conn.set_CD(vmname, cd_drive)
+ else:
+ return self.conn.remove_CD(vmname)
+
+ def setVMHost(self, vmname, vmhost):
+ self.__get_conn()
+ return self.conn.set_VM_Host(vmname, vmhost)
+
+ # pylint: disable=unreachable
+ VM = self.conn.get_VM(vmname)
+ HOST = self.conn.get_Host(vmhost)
+
+ if VM.placement_policy.host is None:
+ self.conn.set_VM_Host(vmname, vmhost)
+ elif str(VM.placement_policy.host.id) != str(HOST.id):
+ self.conn.set_VM_Host(vmname, vmhost)
+ else:
+ setMsg("VM's startup host was already set to " + vmhost)
+ checkFail()
+
+ if str(VM.status.state) == "up":
+ self.conn.migrate_VM(vmname, vmhost)
+ checkFail()
+
+ return True
+
+ def setHost(self, hostname, cluster, ifaces):
+ self.__get_conn()
+ return self.conn.set_Host(hostname, cluster, ifaces)
+
+
+def checkFail():
+ if failed:
+ module.fail_json(msg=msg)
+ else:
+ return True
+
+
+def setFailed():
+ global failed
+ failed = True
+
+
+def setChanged():
+ global changed
+ changed = True
+
+
+def setMsg(message):
+ global failed
+ msg.append(message)
+
+
+def core(module):
+
+ r = RHEV(module)
+
+ state = module.params.get('state', 'present')
+
+ if state == 'ping':
+ r.test()
+ return RHEV_SUCCESS, {"ping": "pong"}
+ elif state == 'info':
+ name = module.params.get('name')
+ if not name:
+ setMsg("`name` is a required argument.")
+ return RHEV_FAILED, msg
+ vminfo = r.getVM(name)
+ return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo}
+ elif state == 'present':
+ created = False
+ name = module.params.get('name')
+ if not name:
+ setMsg("`name` is a required argument.")
+ return RHEV_FAILED, msg
+ actiontype = module.params.get('type')
+ if actiontype == 'server' or actiontype == 'desktop':
+ vminfo = r.getVM(name)
+ if vminfo:
+ setMsg('VM exists')
+ else:
+ # Create VM
+ cluster = module.params.get('cluster')
+ if cluster is None:
+ setMsg("cluster is a required argument.")
+ setFailed()
+ template = module.params.get('image')
+ if template:
+ disks = module.params.get('disks')
+ if disks is None:
+ setMsg("disks is a required argument.")
+ setFailed()
+ checkFail()
+ if r.createVMimage(name, cluster, template, disks) is False:
+ return RHEV_FAILED, vminfo
+ else:
+ os = module.params.get('osver')
+ if os is None:
+ setMsg("osver is a required argument.")
+ setFailed()
+ checkFail()
+ if r.createVM(name, cluster, os, actiontype) is False:
+ return RHEV_FAILED, vminfo
+ created = True
+
+ # Set MEMORY and MEMORY POLICY
+ vminfo = r.getVM(name)
+ memory = module.params.get('vmmem')
+ if memory is not None:
+ memory_policy = module.params.get('mempol')
+ if memory_policy == 0:
+ memory_policy = memory
+ mem_pol_nok = True
+ if int(vminfo['mem_pol']) == memory_policy:
+ setMsg("Memory is correct")
+ mem_pol_nok = False
+
+ mem_nok = True
+ if int(vminfo['memory']) == memory:
+ setMsg("Memory is correct")
+ mem_nok = False
+
+ if memory_policy > memory:
+ setMsg('memory_policy cannot have a higher value than memory.')
+ return RHEV_FAILED, msg
+
+ if mem_nok and mem_pol_nok:
+ if memory_policy > int(vminfo['memory']):
+ r.setMemory(vminfo['name'], memory)
+ r.setMemoryPolicy(vminfo['name'], memory_policy)
+ else:
+ r.setMemoryPolicy(vminfo['name'], memory_policy)
+ r.setMemory(vminfo['name'], memory)
+ elif mem_nok:
+ r.setMemory(vminfo['name'], memory)
+ elif mem_pol_nok:
+ r.setMemoryPolicy(vminfo['name'], memory_policy)
+ checkFail()
+
+ # Set CPU
+ cpu = module.params.get('vmcpu')
+ if int(vminfo['cpu_cores']) == cpu:
+ setMsg("Number of CPUs is correct")
+ else:
+ if r.setCPU(vminfo['name'], cpu) is False:
+ return RHEV_FAILED, msg
+
+ # Set CPU SHARE
+ cpu_share = module.params.get('cpu_share')
+ if cpu_share is not None:
+ if int(vminfo['cpu_shares']) == cpu_share:
+ setMsg("CPU share is correct.")
+ else:
+ if r.setCPUShare(vminfo['name'], cpu_share) is False:
+ return RHEV_FAILED, msg
+
+ # Set DISKS
+ disks = module.params.get('disks')
+ if disks is not None:
+ if r.setDisks(vminfo['name'], disks) is False:
+ return RHEV_FAILED, msg
+
+ # Set NETWORKS
+ ifaces = module.params.get('ifaces', None)
+ if ifaces is not None:
+ if r.setNetworks(vminfo['name'], ifaces) is False:
+ return RHEV_FAILED, msg
+
+ # Set Delete Protection
+ del_prot = module.params.get('del_prot')
+ if r.setDeleteProtection(vminfo['name'], del_prot) is False:
+ return RHEV_FAILED, msg
+
+ # Set Boot Order
+ boot_order = module.params.get('boot_order')
+ if r.setBootOrder(vminfo['name'], boot_order) is False:
+ return RHEV_FAILED, msg
+
+ # Set VM Host
+ vmhost = module.params.get('vmhost')
+ if vmhost:
+ if r.setVMHost(vminfo['name'], vmhost) is False:
+ return RHEV_FAILED, msg
+
+ vminfo = r.getVM(name)
+ vminfo['created'] = created
+ return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo}
+
+ if actiontype == 'host':
+ cluster = module.params.get('cluster')
+ if cluster is None:
+ setMsg("cluster is a required argument.")
+ setFailed()
+ ifaces = module.params.get('ifaces')
+ if ifaces is None:
+ setMsg("ifaces is a required argument.")
+ setFailed()
+ if r.setHost(name, cluster, ifaces) is False:
+ return RHEV_FAILED, msg
+ return RHEV_SUCCESS, {'changed': changed, 'msg': msg}
+
+ elif state == 'absent':
+ name = module.params.get('name')
+ if not name:
+ setMsg("`name` is a required argument.")
+ return RHEV_FAILED, msg
+ actiontype = module.params.get('type')
+ if actiontype == 'server' or actiontype == 'desktop':
+ vminfo = r.getVM(name)
+ if vminfo:
+ setMsg('VM exists')
+
+ # Set Delete Protection
+ del_prot = module.params.get('del_prot')
+ if r.setDeleteProtection(vminfo['name'], del_prot) is False:
+ return RHEV_FAILED, msg
+
+ # Remove VM
+ if r.removeVM(vminfo['name']) is False:
+ return RHEV_FAILED, msg
+ setMsg('VM has been removed.')
+ vminfo['state'] = 'DELETED'
+ else:
+ setMsg('VM was already removed.')
+ return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo}
+
+ elif state == 'up' or state == 'down' or state == 'restarted':
+ name = module.params.get('name')
+ if not name:
+ setMsg("`name` is a required argument.")
+ return RHEV_FAILED, msg
+ timeout = module.params.get('timeout')
+ if r.setPower(name, state, timeout) is False:
+ return RHEV_FAILED, msg
+ vminfo = r.getVM(name)
+ return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo}
+
+ elif state == 'cd':
+ name = module.params.get('name')
+ cd_drive = module.params.get('cd_drive')
+ if r.setCD(name, cd_drive) is False:
+ return RHEV_FAILED, msg
+ return RHEV_SUCCESS, {'changed': changed, 'msg': msg}
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['absent', 'cd', 'down', 'info', 'ping', 'present', 'restarted', 'up']),
+ user=dict(type='str', default='admin@internal'),
+ password=dict(type='str', required=True, no_log=True),
+ server=dict(type='str', default='127.0.0.1'),
+ port=dict(type='int', default=443),
+ insecure_api=dict(type='bool', default=False),
+ name=dict(type='str'),
+ image=dict(type='str'),
+ datacenter=dict(type='str', default="Default"),
+ type=dict(type='str', default='server', choices=['desktop', 'host', 'server']),
+ cluster=dict(type='str', default=''),
+ vmhost=dict(type='str'),
+ vmcpu=dict(type='int', default=2),
+ vmmem=dict(type='int', default=1),
+ disks=dict(type='list', elements='str'),
+ osver=dict(type='str', default="rhel_6x64"),
+ ifaces=dict(type='list', elements='str', aliases=['interfaces', 'nics']),
+ timeout=dict(type='int'),
+ mempol=dict(type='int', default=1),
+ vm_ha=dict(type='bool', default=True),
+ cpu_share=dict(type='int', default=0),
+ boot_order=dict(type='list', elements='str', default=['hd', 'network']),
+ del_prot=dict(type='bool', default=True),
+ cd_drive=dict(type='str'),
+ ),
+ )
+
+ if not HAS_SDK:
+ module.fail_json(msg="The 'ovirtsdk' module is not importable. Check the requirements.")
+
+ rc = RHEV_SUCCESS
+ try:
+ rc, result = core(module)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ if rc != 0: # something went wrong emit the msg
+ module.fail_json(rc=rc, msg=result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/serverless.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/serverless.py
new file mode 100644
index 00000000..912d4226
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/serverless.py
@@ -0,0 +1,232 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Ryan Scott Brown <ryansb@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: serverless
+short_description: Manages a Serverless Framework project
+description:
+ - Provides support for managing Serverless Framework (https://serverless.com/) project deployments and stacks.
+options:
+ state:
+ description:
+ - Goal state of given stage/project.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ serverless_bin_path:
+ description:
+ - The path of a serverless framework binary relative to the 'service_path' eg. node_module/.bin/serverless
+ type: path
+ service_path:
+ description:
+ - The path to the root of the Serverless Service to be operated on.
+ type: path
+ required: true
+ stage:
+ description:
+ - The name of the serverless framework project stage to deploy to.
+ - This uses the serverless framework default "dev".
+ type: str
+ functions:
+ description:
+ - A list of specific functions to deploy.
+ - If this is not provided, all functions in the service will be deployed.
+ type: list
+ elements: str
+ default: []
+ region:
+ description:
+ - AWS region to deploy the service to.
+ - This parameter defaults to C(us-east-1).
+ type: str
+ deploy:
+ description:
+ - Whether or not to deploy artifacts after building them.
+ - When this option is C(false) all the functions will be built, but no stack update will be run to send them out.
+ - This is mostly useful for generating artifacts to be stored/deployed elsewhere.
+ type: bool
+ default: yes
+ force:
+ description:
+ - Whether or not to force full deployment, equivalent to serverless C(--force) option.
+ type: bool
+ default: no
+ verbose:
+ description:
+ - Shows all stack events during deployment, and display any Stack Output.
+ type: bool
+ default: no
+notes:
+ - Currently, the C(serverless) command must be in the path of the node executing the task.
+ In the future this may be a flag.
+requirements:
+- serverless
+- yaml
+author:
+- Ryan Scott Brown (@ryansb)
+'''
+
+EXAMPLES = r'''
+- name: Basic deploy of a service
+ community.general.serverless:
+ service_path: '{{ project_dir }}'
+ state: present
+
+- name: Deploy specific functions
+ community.general.serverless:
+ service_path: '{{ project_dir }}'
+ functions:
+ - my_func_one
+ - my_func_two
+
+- name: Deploy a project, then pull its resource list back into Ansible
+ community.general.serverless:
+ stage: dev
+ region: us-east-1
+ service_path: '{{ project_dir }}'
+ register: sls
+
+# The cloudformation stack is always named the same as the full service, so the
+# cloudformation_info module can get a full list of the stack resources, as
+# well as stack events and outputs
+- cloudformation_info:
+ region: us-east-1
+ stack_name: '{{ sls.service_name }}'
+ stack_resources: true
+
+- name: Deploy a project using a locally installed serverless binary
+ community.general.serverless:
+ stage: dev
+ region: us-east-1
+ service_path: '{{ project_dir }}'
+ serverless_bin_path: node_modules/.bin/serverless
+'''
+
+RETURN = r'''
+service_name:
+ type: str
+ description: The service name specified in the serverless.yml that was just deployed.
+ returned: always
+ sample: my-fancy-service-dev
+state:
+ type: str
+ description: Whether the stack for the serverless project is present/absent.
+ returned: always
+command:
+ type: str
+ description: Full `serverless` command run by this module, in case you want to re-run the command outside the module.
+ returned: always
+ sample: serverless deploy --stage production
+'''
+
+import os
+
+try:
+ import yaml
+ HAS_YAML = True
+except ImportError:
+ HAS_YAML = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def read_serverless_config(module):
+ path = module.params.get('service_path')
+
+ try:
+ with open(os.path.join(path, 'serverless.yml')) as sls_config:
+ config = yaml.safe_load(sls_config.read())
+ return config
+ except IOError as e:
+ module.fail_json(msg="Could not open serverless.yml in {0}. err: {1}".format(path, str(e)))
+
+ module.fail_json(msg="Failed to open serverless config at {0}".format(
+ os.path.join(path, 'serverless.yml')))
+
+
+def get_service_name(module, stage):
+ config = read_serverless_config(module)
+ if config.get('service') is None:
+ module.fail_json(msg="Could not read `service` key from serverless.yml file")
+
+ if stage:
+ return "{0}-{1}".format(config['service'], stage)
+
+ return "{0}-{1}".format(config['service'], config.get('stage', 'dev'))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ service_path=dict(type='path', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ functions=dict(type='list', elements='str'),
+ region=dict(type='str', default=''),
+ stage=dict(type='str', default=''),
+ deploy=dict(type='bool', default=True),
+ serverless_bin_path=dict(type='path'),
+ force=dict(type='bool', default=False),
+ verbose=dict(type='bool', default=False),
+ ),
+ )
+
+ if not HAS_YAML:
+ module.fail_json(msg='yaml is required for this module')
+
+ service_path = module.params.get('service_path')
+ state = module.params.get('state')
+ functions = module.params.get('functions')
+ region = module.params.get('region')
+ stage = module.params.get('stage')
+ deploy = module.params.get('deploy', True)
+ force = module.params.get('force', False)
+ verbose = module.params.get('verbose', False)
+ serverless_bin_path = module.params.get('serverless_bin_path')
+
+ if serverless_bin_path is not None:
+ command = serverless_bin_path + " "
+ else:
+ command = "serverless "
+
+ if state == 'present':
+ command += 'deploy '
+ elif state == 'absent':
+ command += 'remove '
+ else:
+ module.fail_json(msg="State must either be 'present' or 'absent'. Received: {0}".format(state))
+
+ if state == 'present':
+ if not deploy:
+ command += '--noDeploy '
+ elif force:
+ command += '--force '
+
+ if region:
+ command += '--region {0} '.format(region)
+ if stage:
+ command += '--stage {0} '.format(stage)
+ if verbose:
+ command += '--verbose '
+
+ rc, out, err = module.run_command(command, cwd=service_path)
+ if rc != 0:
+ if state == 'absent' and "-{0}' does not exist".format(stage) in out:
+ module.exit_json(changed=False, state='absent', command=command,
+ out=out, service_name=get_service_name(module, stage))
+
+ module.fail_json(msg="Failure when executing Serverless command. Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, out, err))
+
+ # gather some facts about the deployment
+ module.exit_json(changed=True, state='present', out=out, command=command,
+ service_name=get_service_name(module, stage))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/terraform.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/terraform.py
new file mode 100644
index 00000000..680bab9a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/terraform.py
@@ -0,0 +1,413 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Ryan Scott Brown <ryansb@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: terraform
+short_description: Manages a Terraform deployment (and plans)
+description:
+ - Provides support for deploying resources with Terraform and pulling
+ resource information back into Ansible.
+options:
+ state:
+ choices: ['planned', 'present', 'absent']
+ description:
+ - Goal state of given stage/project
+ type: str
+ default: present
+ binary_path:
+ description:
+ - The path of a terraform binary to use, relative to the 'service_path'
+ unless you supply an absolute path.
+ type: path
+ project_path:
+ description:
+ - The path to the root of the Terraform directory with the
+ vars.tf/main.tf/etc to use.
+ type: path
+ required: true
+ workspace:
+ description:
+ - The terraform workspace to work with.
+ type: str
+ default: default
+ purge_workspace:
+ description:
+ - Only works with state = absent
+ - If true, the workspace will be deleted after the "terraform destroy" action.
+ - The 'default' workspace will not be deleted.
+ default: false
+ type: bool
+ plan_file:
+ description:
+ - The path to an existing Terraform plan file to apply. If this is not
+ specified, Ansible will build a new TF plan and execute it.
+ Note that this option is required if 'state' has the 'planned' value.
+ type: path
+ state_file:
+ description:
+ - The path to an existing Terraform state file to use when building plan.
+ If this is not specified, the default `terraform.tfstate` will be used.
+ - This option is ignored when plan is specified.
+ type: path
+ variables_files:
+ description:
+ - The path to a variables file for Terraform to fill into the TF
+ configurations. This can accept a list of paths to multiple variables files.
+ - Up until Ansible 2.9, this option was usable as I(variables_file).
+ type: list
+ elements: path
+ aliases: [ 'variables_file' ]
+ variables:
+ description:
+ - A group of key-values to override template variables or those in
+ variables files.
+ type: dict
+ targets:
+ description:
+ - A list of specific resources to target in this plan/application. The
+ resources selected here will also auto-include any dependencies.
+ type: list
+ elements: str
+ lock:
+ description:
+ - Enable statefile locking, if you use a service that accepts locks (such
+ as S3+DynamoDB) to store your statefile.
+ type: bool
+ default: true
+ lock_timeout:
+ description:
+ - How long to maintain the lock on the statefile, if you use a service
+ that accepts locks (such as S3+DynamoDB).
+ type: int
+ force_init:
+ description:
+ - To avoid duplicating infra, if a state file can't be found this will
+ force a `terraform init`. Generally, this should be turned off unless
+ you intend to provision an entirely new Terraform deployment.
+ default: false
+ type: bool
+ backend_config:
+ description:
+ - A group of key-values to provide at init stage to the -backend-config parameter.
+ type: dict
+ backend_config_files:
+ description:
+ - The path to a configuration file to provide at init state to the -backend-config parameter.
+ This can accept a list of paths to multiple configuration files.
+ type: list
+ elements: path
+ version_added: '0.2.0'
+ init_reconfigure:
+ description:
+ - Forces backend reconfiguration during init.
+ default: false
+ type: bool
+ version_added: '1.3.0'
+notes:
+ - To just run a `terraform plan`, use check mode.
+requirements: [ "terraform" ]
+author: "Ryan Scott Brown (@ryansb)"
+'''
+
+EXAMPLES = """
+- name: Basic deploy of a service
+ community.general.terraform:
+ project_path: '{{ project_dir }}'
+ state: present
+
+- name: Define the backend configuration at init
+ community.general.terraform:
+ project_path: 'project/'
+ state: "{{ state }}"
+ force_init: true
+ backend_config:
+ region: "eu-west-1"
+ bucket: "some-bucket"
+ key: "random.tfstate"
+
+- name: Define the backend configuration with one or more files at init
+ community.general.terraform:
+ project_path: 'project/'
+ state: "{{ state }}"
+ force_init: true
+ backend_config_files:
+ - /path/to/backend_config_file_1
+ - /path/to/backend_config_file_2
+"""
+
+RETURN = """
+outputs:
+ type: complex
+ description: A dictionary of all the TF outputs by their assigned name. Use `.outputs.MyOutputName.value` to access the value.
+ returned: on success
+ sample: '{"bukkit_arn": {"sensitive": false, "type": "string", "value": "arn:aws:s3:::tf-test-bukkit"}'
+ contains:
+ sensitive:
+ type: bool
+ returned: always
+ description: Whether Terraform has marked this value as sensitive
+ type:
+ type: str
+ returned: always
+ description: The type of the value (string, int, etc)
+ value:
+ type: str
+ returned: always
+ description: The value of the output as interpolated by Terraform
+stdout:
+ type: str
+ description: Full `terraform` command stdout, in case you want to display it or examine the event log
+ returned: always
+ sample: ''
+command:
+ type: str
+ description: Full `terraform` command built by this module, in case you want to re-run the command outside the module or debug a problem.
+ returned: always
+ sample: terraform apply ...
+"""
+
+import os
+import json
+import tempfile
+from ansible.module_utils.six.moves import shlex_quote
+
+from ansible.module_utils.basic import AnsibleModule
+
+DESTROY_ARGS = ('destroy', '-no-color', '-force')
+APPLY_ARGS = ('apply', '-no-color', '-input=false', '-auto-approve=true')
+module = None
+
+
+def preflight_validation(bin_path, project_path, variables_args=None, plan_file=None):
+ if project_path in [None, ''] or '/' not in project_path:
+ module.fail_json(msg="Path for Terraform project can not be None or ''.")
+ if not os.path.exists(bin_path):
+ module.fail_json(msg="Path for Terraform binary '{0}' doesn't exist on this host - check the path and try again please.".format(bin_path))
+ if not os.path.isdir(project_path):
+ module.fail_json(msg="Path for Terraform project '{0}' doesn't exist on this host - check the path and try again please.".format(project_path))
+
+ rc, out, err = module.run_command([bin_path, 'validate'] + variables_args, check_rc=True, cwd=project_path, use_unsafe_shell=True)
+
+
+def _state_args(state_file):
+ if state_file and os.path.exists(state_file):
+ return ['-state', state_file]
+ if state_file and not os.path.exists(state_file):
+ module.fail_json(msg='Could not find state_file "{0}", check the path and try again.'.format(state_file))
+ return []
+
+
+def init_plugins(bin_path, project_path, backend_config, backend_config_files, init_reconfigure):
+ command = [bin_path, 'init', '-input=false']
+ if backend_config:
+ for key, val in backend_config.items():
+ command.extend([
+ '-backend-config',
+ shlex_quote('{0}={1}'.format(key, val))
+ ])
+ if backend_config_files:
+ for f in backend_config_files:
+ command.extend(['-backend-config', f])
+ if init_reconfigure:
+ command.extend(['-reconfigure'])
+ rc, out, err = module.run_command(command, check_rc=True, cwd=project_path)
+
+
+def get_workspace_context(bin_path, project_path):
+ workspace_ctx = {"current": "default", "all": []}
+ command = [bin_path, 'workspace', 'list', '-no-color']
+ rc, out, err = module.run_command(command, cwd=project_path)
+ if rc != 0:
+ module.warn("Failed to list Terraform workspaces:\r\n{0}".format(err))
+ for item in out.split('\n'):
+ stripped_item = item.strip()
+ if not stripped_item:
+ continue
+ elif stripped_item.startswith('* '):
+ workspace_ctx["current"] = stripped_item.replace('* ', '')
+ else:
+ workspace_ctx["all"].append(stripped_item)
+ return workspace_ctx
+
+
+def _workspace_cmd(bin_path, project_path, action, workspace):
+ command = [bin_path, 'workspace', action, workspace, '-no-color']
+ rc, out, err = module.run_command(command, check_rc=True, cwd=project_path)
+ return rc, out, err
+
+
+def create_workspace(bin_path, project_path, workspace):
+ _workspace_cmd(bin_path, project_path, 'new', workspace)
+
+
+def select_workspace(bin_path, project_path, workspace):
+ _workspace_cmd(bin_path, project_path, 'select', workspace)
+
+
+def remove_workspace(bin_path, project_path, workspace):
+ _workspace_cmd(bin_path, project_path, 'delete', workspace)
+
+
+def build_plan(command, project_path, variables_args, state_file, targets, state, plan_path=None):
+ if plan_path is None:
+ f, plan_path = tempfile.mkstemp(suffix='.tfplan')
+
+ plan_command = [command[0], 'plan', '-input=false', '-no-color', '-detailed-exitcode', '-out', plan_path]
+
+ for t in (module.params.get('targets') or []):
+ plan_command.extend(['-target', t])
+
+ plan_command.extend(_state_args(state_file))
+
+ rc, out, err = module.run_command(plan_command + variables_args, cwd=project_path, use_unsafe_shell=True)
+
+ if rc == 0:
+ # no changes
+ return plan_path, False, out, err, plan_command if state == 'planned' else command
+ elif rc == 1:
+ # failure to plan
+ module.fail_json(msg='Terraform plan could not be created\r\nSTDOUT: {0}\r\n\r\nSTDERR: {1}'.format(out, err))
+ elif rc == 2:
+ # changes, but successful
+ return plan_path, True, out, err, plan_command if state == 'planned' else command
+
+ module.fail_json(msg='Terraform plan failed with unexpected exit code {0}. \r\nSTDOUT: {1}\r\n\r\nSTDERR: {2}'.format(rc, out, err))
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ project_path=dict(required=True, type='path'),
+ binary_path=dict(type='path'),
+ workspace=dict(required=False, type='str', default='default'),
+ purge_workspace=dict(type='bool', default=False),
+ state=dict(default='present', choices=['present', 'absent', 'planned']),
+ variables=dict(type='dict'),
+ variables_files=dict(aliases=['variables_file'], type='list', elements='path', default=None),
+ plan_file=dict(type='path'),
+ state_file=dict(type='path'),
+ targets=dict(type='list', elements='str', default=[]),
+ lock=dict(type='bool', default=True),
+ lock_timeout=dict(type='int',),
+ force_init=dict(type='bool', default=False),
+ backend_config=dict(type='dict', default=None),
+ backend_config_files=dict(type='list', elements='path', default=None),
+ init_reconfigure=dict(required=False, type='bool', default=False),
+ ),
+ required_if=[('state', 'planned', ['plan_file'])],
+ supports_check_mode=True,
+ )
+
+ project_path = module.params.get('project_path')
+ bin_path = module.params.get('binary_path')
+ workspace = module.params.get('workspace')
+ purge_workspace = module.params.get('purge_workspace')
+ state = module.params.get('state')
+ variables = module.params.get('variables') or {}
+ variables_files = module.params.get('variables_files')
+ plan_file = module.params.get('plan_file')
+ state_file = module.params.get('state_file')
+ force_init = module.params.get('force_init')
+ backend_config = module.params.get('backend_config')
+ backend_config_files = module.params.get('backend_config_files')
+ init_reconfigure = module.params.get('init_reconfigure')
+
+ if bin_path is not None:
+ command = [bin_path]
+ else:
+ command = [module.get_bin_path('terraform', required=True)]
+
+ if force_init:
+ init_plugins(command[0], project_path, backend_config, backend_config_files, init_reconfigure)
+
+ workspace_ctx = get_workspace_context(command[0], project_path)
+ if workspace_ctx["current"] != workspace:
+ if workspace not in workspace_ctx["all"]:
+ create_workspace(command[0], project_path, workspace)
+ else:
+ select_workspace(command[0], project_path, workspace)
+
+ if state == 'present':
+ command.extend(APPLY_ARGS)
+ elif state == 'absent':
+ command.extend(DESTROY_ARGS)
+
+ variables_args = []
+ for k, v in variables.items():
+ variables_args.extend([
+ '-var',
+ '{0}={1}'.format(k, v)
+ ])
+ if variables_files:
+ for f in variables_files:
+ variables_args.extend(['-var-file', f])
+
+ preflight_validation(command[0], project_path, variables_args)
+
+ if module.params.get('lock') is not None:
+ if module.params.get('lock'):
+ command.append('-lock=true')
+ else:
+ command.append('-lock=false')
+ if module.params.get('lock_timeout') is not None:
+ command.append('-lock-timeout=%ds' % module.params.get('lock_timeout'))
+
+ for t in (module.params.get('targets') or []):
+ command.extend(['-target', t])
+
+ # we aren't sure if this plan will result in changes, so assume yes
+ needs_application, changed = True, False
+
+ out, err = '', ''
+
+ if state == 'absent':
+ command.extend(variables_args)
+ elif state == 'present' and plan_file:
+ if any([os.path.isfile(project_path + "/" + plan_file), os.path.isfile(plan_file)]):
+ command.append(plan_file)
+ else:
+ module.fail_json(msg='Could not find plan_file "{0}", check the path and try again.'.format(plan_file))
+ else:
+ plan_file, needs_application, out, err, command = build_plan(command, project_path, variables_args, state_file,
+ module.params.get('targets'), state, plan_file)
+ command.append(plan_file)
+
+ if needs_application and not module.check_mode and not state == 'planned':
+ rc, out, err = module.run_command(command, check_rc=True, cwd=project_path)
+ # checks out to decide if changes were made during execution
+ if ' 0 added, 0 changed' not in out and not state == "absent" or ' 0 destroyed' not in out:
+ changed = True
+
+ outputs_command = [command[0], 'output', '-no-color', '-json'] + _state_args(state_file)
+ rc, outputs_text, outputs_err = module.run_command(outputs_command, cwd=project_path)
+ if rc == 1:
+ module.warn("Could not get Terraform outputs. This usually means none have been defined.\nstdout: {0}\nstderr: {1}".format(outputs_text, outputs_err))
+ outputs = {}
+ elif rc != 0:
+ module.fail_json(
+ msg="Failure when getting Terraform outputs. "
+ "Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, outputs_text, outputs_err),
+ command=' '.join(outputs_command))
+ else:
+ outputs = json.loads(outputs_text)
+
+ # Restore the Terraform workspace found when running the module
+ if workspace_ctx["current"] != workspace:
+ select_workspace(command[0], project_path, workspace_ctx["current"])
+ if state == 'absent' and workspace != 'default' and purge_workspace is True:
+ remove_workspace(command[0], project_path, workspace)
+
+ module.exit_json(changed=changed, state=state, workspace=workspace, outputs=outputs, stdout=out, stderr=err, command=' '.join(command))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/xenserver_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/xenserver_facts.py
new file mode 100644
index 00000000..25923cb2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/misc/xenserver_facts.py
@@ -0,0 +1,202 @@
+#!/usr/bin/python
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: xenserver_facts
+short_description: get facts reported on xenserver
+description:
+ - Reads data out of XenAPI, can be used instead of multiple xe commands.
+author:
+ - Andy Hill (@andyhky)
+ - Tim Rupp (@caphrim007)
+ - Robin Lee (@cheese)
+options: {}
+'''
+
+EXAMPLES = '''
+- name: Gather facts from xenserver
+ community.general.xenserver_facts:
+
+- name: Print running VMs
+ ansible.builtin.debug:
+ msg: "{{ item }}"
+ with_items: "{{ xs_vms.keys() }}"
+ when: xs_vms[item]['power_state'] == "Running"
+
+# Which will print:
+#
+# TASK: [Print running VMs] ***********************************************************
+# skipping: [10.13.0.22] => (item=CentOS 4.7 (32-bit))
+# ok: [10.13.0.22] => (item=Control domain on host: 10.0.13.22) => {
+# "item": "Control domain on host: 10.0.13.22",
+# "msg": "Control domain on host: 10.0.13.22"
+# }
+'''
+
+
+HAVE_XENAPI = False
+try:
+ import XenAPI
+ HAVE_XENAPI = True
+except ImportError:
+ pass
+
+from ansible.module_utils import distro
+from ansible.module_utils.basic import AnsibleModule
+
+
+class XenServerFacts:
+ def __init__(self):
+ self.codes = {
+ '5.5.0': 'george',
+ '5.6.100': 'oxford',
+ '6.0.0': 'boston',
+ '6.1.0': 'tampa',
+ '6.2.0': 'clearwater'
+ }
+
+ @property
+ def version(self):
+ result = distro.linux_distribution()[1]
+ return result
+
+ @property
+ def codename(self):
+ if self.version in self.codes:
+ result = self.codes[self.version]
+ else:
+ result = None
+
+ return result
+
+
+def get_xenapi_session():
+ session = XenAPI.xapi_local()
+ session.xenapi.login_with_password('', '')
+ return session
+
+
+def get_networks(session):
+ recs = session.xenapi.network.get_all_records()
+ networks = change_keys(recs, key='name_label')
+ return networks
+
+
+def get_pifs(session):
+ recs = session.xenapi.PIF.get_all_records()
+ pifs = change_keys(recs, key='uuid')
+ xs_pifs = {}
+ devicenums = range(0, 7)
+ for pif in pifs.values():
+ for eth in devicenums:
+ interface_name = "eth%s" % (eth)
+ bond_name = interface_name.replace('eth', 'bond')
+ if pif['device'] == interface_name:
+ xs_pifs[interface_name] = pif
+ elif pif['device'] == bond_name:
+ xs_pifs[bond_name] = pif
+ return xs_pifs
+
+
+def get_vlans(session):
+ recs = session.xenapi.VLAN.get_all_records()
+ return change_keys(recs, key='tag')
+
+
+def change_keys(recs, key='uuid', filter_func=None):
+ """
+ Take a xapi dict, and make the keys the value of recs[ref][key].
+
+ Preserves the ref in rec['ref']
+
+ """
+ new_recs = {}
+
+ for ref, rec in recs.items():
+ if filter_func is not None and not filter_func(rec):
+ continue
+
+ for param_name, param_value in rec.items():
+ # param_value may be of type xmlrpc.client.DateTime,
+ # which is not simply convertable to str.
+ # Use 'value' attr to get the str value,
+ # following an example in xmlrpc.client.DateTime document
+ if hasattr(param_value, "value"):
+ rec[param_name] = param_value.value
+ new_recs[rec[key]] = rec
+ new_recs[rec[key]]['ref'] = ref
+
+ return new_recs
+
+
+def get_host(session):
+ """Get the host"""
+ host_recs = session.xenapi.host.get_all()
+ # We only have one host, so just return its entry
+ return session.xenapi.host.get_record(host_recs[0])
+
+
+def get_vms(session):
+ recs = session.xenapi.VM.get_all_records()
+ if not recs:
+ return None
+ vms = change_keys(recs, key='name_label')
+ return vms
+
+
+def get_srs(session):
+ recs = session.xenapi.SR.get_all_records()
+ if not recs:
+ return None
+ srs = change_keys(recs, key='name_label')
+ return srs
+
+
+def main():
+ module = AnsibleModule({})
+
+ if not HAVE_XENAPI:
+ module.fail_json(changed=False, msg="python xen api required for this module")
+
+ obj = XenServerFacts()
+ try:
+ session = get_xenapi_session()
+ except XenAPI.Failure as e:
+ module.fail_json(msg='%s' % e)
+
+ data = {
+ 'xenserver_version': obj.version,
+ 'xenserver_codename': obj.codename
+ }
+
+ xs_networks = get_networks(session)
+ xs_pifs = get_pifs(session)
+ xs_vlans = get_vlans(session)
+ xs_vms = get_vms(session)
+ xs_srs = get_srs(session)
+
+ if xs_vlans:
+ data['xs_vlans'] = xs_vlans
+ if xs_pifs:
+ data['xs_pifs'] = xs_pifs
+ if xs_networks:
+ data['xs_networks'] = xs_networks
+
+ if xs_vms:
+ data['xs_vms'] = xs_vms
+
+ if xs_srs:
+ data['xs_srs'] = xs_srs
+
+ module.exit_json(ansible_facts=data)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_firewall_policy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_firewall_policy.py
new file mode 100644
index 00000000..90694861
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_firewall_policy.py
@@ -0,0 +1,573 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneandone_firewall_policy
+short_description: Configure 1&1 firewall policy.
+description:
+ - Create, remove, reconfigure, update firewall policies.
+ This module has a dependency on 1and1 >= 1.0
+options:
+ state:
+ description:
+ - Define a firewall policy state to create, remove, or update.
+ required: false
+ type: str
+ default: 'present'
+ choices: [ "present", "absent", "update" ]
+ auth_token:
+ description:
+ - Authenticating API token provided by 1&1.
+ type: str
+ api_url:
+ description:
+ - Custom API URL. Overrides the
+ ONEANDONE_API_URL environment variable.
+ type: str
+ required: false
+ name:
+ description:
+ - Firewall policy name used with present state. Used as identifier (id or name) when used with absent state.
+ maxLength=128
+ type: str
+ firewall_policy:
+ description:
+ - The identifier (id or name) of the firewall policy used with update state.
+ type: str
+ rules:
+ description:
+ - A list of rules that will be set for the firewall policy.
+ Each rule must contain protocol parameter, in addition to three optional parameters
+ (port_from, port_to, and source)
+ type: list
+ add_server_ips:
+ description:
+ - A list of server identifiers (id or name) to be assigned to a firewall policy.
+ Used in combination with update state.
+ type: list
+ required: false
+ remove_server_ips:
+ description:
+ - A list of server IP ids to be unassigned from a firewall policy. Used in combination with update state.
+ type: list
+ required: false
+ add_rules:
+ description:
+ - A list of rules that will be added to an existing firewall policy.
+ It is syntax is the same as the one used for rules parameter. Used in combination with update state.
+ type: list
+ required: false
+ remove_rules:
+ description:
+ - A list of rule ids that will be removed from an existing firewall policy. Used in combination with update state.
+ type: list
+ required: false
+ description:
+ description:
+ - Firewall policy description. maxLength=256
+ type: str
+ required: false
+ wait:
+ description:
+ - wait for the instance to be in state 'running' before returning
+ required: false
+ default: "yes"
+ type: bool
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ wait_interval:
+ description:
+ - Defines the number of seconds to wait when using the _wait_for methods
+ type: int
+ default: 5
+
+requirements:
+ - "1and1"
+ - "python >= 2.6"
+
+author:
+ - "Amel Ajdinovic (@aajdinov)"
+ - "Ethan Devenport (@edevenport)"
+'''
+
+EXAMPLES = '''
+- name: Create a firewall policy
+ community.general.oneandone_firewall_policy:
+ auth_token: oneandone_private_api_key
+ name: ansible-firewall-policy
+ description: Testing creation of firewall policies with ansible
+ rules:
+ -
+ protocol: TCP
+ port_from: 80
+ port_to: 80
+ source: 0.0.0.0
+ wait: true
+ wait_timeout: 500
+
+- name: Destroy a firewall policy
+ community.general.oneandone_firewall_policy:
+ auth_token: oneandone_private_api_key
+ state: absent
+ name: ansible-firewall-policy
+
+- name: Update a firewall policy
+ community.general.oneandone_firewall_policy:
+ auth_token: oneandone_private_api_key
+ state: update
+ firewall_policy: ansible-firewall-policy
+ name: ansible-firewall-policy-updated
+ description: Testing creation of firewall policies with ansible - updated
+
+- name: Add server to a firewall policy
+ community.general.oneandone_firewall_policy:
+ auth_token: oneandone_private_api_key
+ firewall_policy: ansible-firewall-policy-updated
+ add_server_ips:
+ - server_identifier (id or name)
+ - server_identifier #2 (id or name)
+ wait: true
+ wait_timeout: 500
+ state: update
+
+- name: Remove server from a firewall policy
+ community.general.oneandone_firewall_policy:
+ auth_token: oneandone_private_api_key
+ firewall_policy: ansible-firewall-policy-updated
+ remove_server_ips:
+ - B2504878540DBC5F7634EB00A07C1EBD (server's IP id)
+ wait: true
+ wait_timeout: 500
+ state: update
+
+- name: Add rules to a firewall policy
+ community.general.oneandone_firewall_policy:
+ auth_token: oneandone_private_api_key
+ firewall_policy: ansible-firewall-policy-updated
+ description: Adding rules to an existing firewall policy
+ add_rules:
+ -
+ protocol: TCP
+ port_from: 70
+ port_to: 70
+ source: 0.0.0.0
+ -
+ protocol: TCP
+ port_from: 60
+ port_to: 60
+ source: 0.0.0.0
+ wait: true
+ wait_timeout: 500
+ state: update
+
+- name: Remove rules from a firewall policy
+ community.general.oneandone_firewall_policy:
+ auth_token: oneandone_private_api_key
+ firewall_policy: ansible-firewall-policy-updated
+ remove_rules:
+ - rule_id #1
+ - rule_id #2
+ - ...
+ wait: true
+ wait_timeout: 500
+ state: update
+'''
+
+RETURN = '''
+firewall_policy:
+ description: Information about the firewall policy that was processed
+ type: dict
+ sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Policy"}'
+ returned: always
+'''
+
+import os
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.oneandone import (
+ get_firewall_policy,
+ get_server,
+ OneAndOneResources,
+ wait_for_resource_creation_completion
+)
+
+HAS_ONEANDONE_SDK = True
+
+try:
+ import oneandone.client
+except ImportError:
+ HAS_ONEANDONE_SDK = False
+
+
+def _check_mode(module, result):
+ if module.check_mode:
+ module.exit_json(
+ changed=result
+ )
+
+
+def _add_server_ips(module, oneandone_conn, firewall_id, server_ids):
+ """
+ Assigns servers to a firewall policy.
+ """
+ try:
+ attach_servers = []
+
+ for _server_id in server_ids:
+ server = get_server(oneandone_conn, _server_id, True)
+ attach_server = oneandone.client.AttachServer(
+ server_id=server['id'],
+ server_ip_id=next(iter(server['ips'] or []), None)['id']
+ )
+ attach_servers.append(attach_server)
+
+ if module.check_mode:
+ if attach_servers:
+ return True
+ return False
+
+ firewall_policy = oneandone_conn.attach_server_firewall_policy(
+ firewall_id=firewall_id,
+ server_ips=attach_servers)
+ return firewall_policy
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def _remove_firewall_server(module, oneandone_conn, firewall_id, server_ip_id):
+ """
+ Unassigns a server/IP from a firewall policy.
+ """
+ try:
+ if module.check_mode:
+ firewall_server = oneandone_conn.get_firewall_server(
+ firewall_id=firewall_id,
+ server_ip_id=server_ip_id)
+ if firewall_server:
+ return True
+ return False
+
+ firewall_policy = oneandone_conn.remove_firewall_server(
+ firewall_id=firewall_id,
+ server_ip_id=server_ip_id)
+ return firewall_policy
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def _add_firewall_rules(module, oneandone_conn, firewall_id, rules):
+ """
+ Adds new rules to a firewall policy.
+ """
+ try:
+ firewall_rules = []
+
+ for rule in rules:
+ firewall_rule = oneandone.client.FirewallPolicyRule(
+ protocol=rule['protocol'],
+ port_from=rule['port_from'],
+ port_to=rule['port_to'],
+ source=rule['source'])
+ firewall_rules.append(firewall_rule)
+
+ if module.check_mode:
+ firewall_policy_id = get_firewall_policy(oneandone_conn, firewall_id)
+ if (firewall_rules and firewall_policy_id):
+ return True
+ return False
+
+ firewall_policy = oneandone_conn.add_firewall_policy_rule(
+ firewall_id=firewall_id,
+ firewall_policy_rules=firewall_rules
+ )
+ return firewall_policy
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def _remove_firewall_rule(module, oneandone_conn, firewall_id, rule_id):
+ """
+ Removes a rule from a firewall policy.
+ """
+ try:
+ if module.check_mode:
+ rule = oneandone_conn.get_firewall_policy_rule(
+ firewall_id=firewall_id,
+ rule_id=rule_id)
+ if rule:
+ return True
+ return False
+
+ firewall_policy = oneandone_conn.remove_firewall_rule(
+ firewall_id=firewall_id,
+ rule_id=rule_id
+ )
+ return firewall_policy
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def update_firewall_policy(module, oneandone_conn):
+ """
+ Updates a firewall policy based on input arguments.
+ Firewall rules and server ips can be added/removed to/from
+ firewall policy. Firewall policy name and description can be
+ updated as well.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ try:
+ firewall_policy_id = module.params.get('firewall_policy')
+ name = module.params.get('name')
+ description = module.params.get('description')
+ add_server_ips = module.params.get('add_server_ips')
+ remove_server_ips = module.params.get('remove_server_ips')
+ add_rules = module.params.get('add_rules')
+ remove_rules = module.params.get('remove_rules')
+
+ changed = False
+
+ firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy_id, True)
+ if firewall_policy is None:
+ _check_mode(module, False)
+
+ if name or description:
+ _check_mode(module, True)
+ firewall_policy = oneandone_conn.modify_firewall(
+ firewall_id=firewall_policy['id'],
+ name=name,
+ description=description)
+ changed = True
+
+ if add_server_ips:
+ if module.check_mode:
+ _check_mode(module, _add_server_ips(module,
+ oneandone_conn,
+ firewall_policy['id'],
+ add_server_ips))
+
+ firewall_policy = _add_server_ips(module, oneandone_conn, firewall_policy['id'], add_server_ips)
+ changed = True
+
+ if remove_server_ips:
+ chk_changed = False
+ for server_ip_id in remove_server_ips:
+ if module.check_mode:
+ chk_changed |= _remove_firewall_server(module,
+ oneandone_conn,
+ firewall_policy['id'],
+ server_ip_id)
+
+ _remove_firewall_server(module,
+ oneandone_conn,
+ firewall_policy['id'],
+ server_ip_id)
+ _check_mode(module, chk_changed)
+ firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy['id'], True)
+ changed = True
+
+ if add_rules:
+ firewall_policy = _add_firewall_rules(module,
+ oneandone_conn,
+ firewall_policy['id'],
+ add_rules)
+ _check_mode(module, firewall_policy)
+ changed = True
+
+ if remove_rules:
+ chk_changed = False
+ for rule_id in remove_rules:
+ if module.check_mode:
+ chk_changed |= _remove_firewall_rule(module,
+ oneandone_conn,
+ firewall_policy['id'],
+ rule_id)
+
+ _remove_firewall_rule(module,
+ oneandone_conn,
+ firewall_policy['id'],
+ rule_id)
+ _check_mode(module, chk_changed)
+ firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy['id'], True)
+ changed = True
+
+ return (changed, firewall_policy)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def create_firewall_policy(module, oneandone_conn):
+ """
+ Create a new firewall policy.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ try:
+ name = module.params.get('name')
+ description = module.params.get('description')
+ rules = module.params.get('rules')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ firewall_rules = []
+
+ for rule in rules:
+ firewall_rule = oneandone.client.FirewallPolicyRule(
+ protocol=rule['protocol'],
+ port_from=rule['port_from'],
+ port_to=rule['port_to'],
+ source=rule['source'])
+ firewall_rules.append(firewall_rule)
+
+ firewall_policy_obj = oneandone.client.FirewallPolicy(
+ name=name,
+ description=description
+ )
+
+ _check_mode(module, True)
+ firewall_policy = oneandone_conn.create_firewall_policy(
+ firewall_policy=firewall_policy_obj,
+ firewall_policy_rules=firewall_rules
+ )
+
+ if wait:
+ wait_for_resource_creation_completion(
+ oneandone_conn,
+ OneAndOneResources.firewall_policy,
+ firewall_policy['id'],
+ wait_timeout,
+ wait_interval)
+
+ firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy['id'], True) # refresh
+ changed = True if firewall_policy else False
+
+ _check_mode(module, False)
+
+ return (changed, firewall_policy)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def remove_firewall_policy(module, oneandone_conn):
+ """
+ Removes a firewall policy.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ try:
+ fp_id = module.params.get('name')
+ firewall_policy_id = get_firewall_policy(oneandone_conn, fp_id)
+ if module.check_mode:
+ if firewall_policy_id is None:
+ _check_mode(module, False)
+ _check_mode(module, True)
+ firewall_policy = oneandone_conn.delete_firewall(firewall_policy_id)
+
+ changed = True if firewall_policy else False
+
+ return (changed, {
+ 'id': firewall_policy['id'],
+ 'name': firewall_policy['name']
+ })
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ auth_token=dict(
+ type='str', no_log=True,
+ default=os.environ.get('ONEANDONE_AUTH_TOKEN')),
+ api_url=dict(
+ type='str',
+ default=os.environ.get('ONEANDONE_API_URL')),
+ name=dict(type='str'),
+ firewall_policy=dict(type='str'),
+ description=dict(type='str'),
+ rules=dict(type='list', default=[]),
+ add_server_ips=dict(type='list', default=[]),
+ remove_server_ips=dict(type='list', default=[]),
+ add_rules=dict(type='list', default=[]),
+ remove_rules=dict(type='list', default=[]),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ wait_interval=dict(type='int', default=5),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'update']),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_ONEANDONE_SDK:
+ module.fail_json(msg='1and1 required for this module')
+
+ if not module.params.get('auth_token'):
+ module.fail_json(
+ msg='The "auth_token" parameter or ' +
+ 'ONEANDONE_AUTH_TOKEN environment variable is required.')
+
+ if not module.params.get('api_url'):
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'))
+ else:
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'), api_url=module.params.get('api_url'))
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('name'):
+ module.fail_json(
+ msg="'name' parameter is required to delete a firewall policy.")
+ try:
+ (changed, firewall_policy) = remove_firewall_policy(module, oneandone_conn)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ elif state == 'update':
+ if not module.params.get('firewall_policy'):
+ module.fail_json(
+ msg="'firewall_policy' parameter is required to update a firewall policy.")
+ try:
+ (changed, firewall_policy) = update_firewall_policy(module, oneandone_conn)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ elif state == 'present':
+ for param in ('name', 'rules'):
+ if not module.params.get(param):
+ module.fail_json(
+ msg="%s parameter is required for new firewall policies." % param)
+ try:
+ (changed, firewall_policy) = create_firewall_policy(module, oneandone_conn)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ module.exit_json(changed=changed, firewall_policy=firewall_policy)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_load_balancer.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_load_balancer.py
new file mode 100644
index 00000000..62551560
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_load_balancer.py
@@ -0,0 +1,677 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneandone_load_balancer
+short_description: Configure 1&1 load balancer.
+description:
+ - Create, remove, update load balancers.
+ This module has a dependency on 1and1 >= 1.0
+options:
+ state:
+ description:
+ - Define a load balancer state to create, remove, or update.
+ type: str
+ required: false
+ default: 'present'
+ choices: [ "present", "absent", "update" ]
+ auth_token:
+ description:
+ - Authenticating API token provided by 1&1.
+ type: str
+ load_balancer:
+ description:
+ - The identifier (id or name) of the load balancer used with update state.
+ type: str
+ api_url:
+ description:
+ - Custom API URL. Overrides the
+ ONEANDONE_API_URL environment variable.
+ type: str
+ required: false
+ name:
+ description:
+ - Load balancer name used with present state. Used as identifier (id or name) when used with absent state.
+ maxLength=128
+ type: str
+ health_check_test:
+ description:
+ - Type of the health check. At the moment, HTTP is not allowed.
+ type: str
+ choices: [ "NONE", "TCP", "HTTP", "ICMP" ]
+ health_check_interval:
+ description:
+ - Health check period in seconds. minimum=5, maximum=300, multipleOf=1
+ type: str
+ health_check_path:
+ description:
+ - Url to call for checking. Required for HTTP health check. maxLength=1000
+ type: str
+ required: false
+ health_check_parse:
+ description:
+ - Regular expression to check. Required for HTTP health check. maxLength=64
+ type: str
+ required: false
+ persistence:
+ description:
+ - Persistence.
+ type: bool
+ persistence_time:
+ description:
+ - Persistence time in seconds. Required if persistence is enabled. minimum=30, maximum=1200, multipleOf=1
+ type: str
+ method:
+ description:
+ - Balancing procedure.
+ type: str
+ choices: [ "ROUND_ROBIN", "LEAST_CONNECTIONS" ]
+ datacenter:
+ description:
+ - ID or country code of the datacenter where the load balancer will be created.
+ - If not specified, it defaults to I(US).
+ type: str
+ choices: [ "US", "ES", "DE", "GB" ]
+ required: false
+ rules:
+ description:
+ - A list of rule objects that will be set for the load balancer. Each rule must contain protocol,
+ port_balancer, and port_server parameters, in addition to source parameter, which is optional.
+ type: list
+ description:
+ description:
+ - Description of the load balancer. maxLength=256
+ type: str
+ required: false
+ add_server_ips:
+ description:
+ - A list of server identifiers (id or name) to be assigned to a load balancer.
+ Used in combination with update state.
+ type: list
+ required: false
+ remove_server_ips:
+ description:
+ - A list of server IP ids to be unassigned from a load balancer. Used in combination with update state.
+ type: list
+ required: false
+ add_rules:
+ description:
+ - A list of rules that will be added to an existing load balancer.
+ It is syntax is the same as the one used for rules parameter. Used in combination with update state.
+ type: list
+ required: false
+ remove_rules:
+ description:
+ - A list of rule ids that will be removed from an existing load balancer. Used in combination with update state.
+ type: list
+ required: false
+ wait:
+ description:
+ - wait for the instance to be in state 'running' before returning
+ required: false
+ default: "yes"
+ type: bool
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ wait_interval:
+ description:
+ - Defines the number of seconds to wait when using the _wait_for methods
+ type: int
+ default: 5
+
+requirements:
+ - "1and1"
+ - "python >= 2.6"
+
+author:
+ - Amel Ajdinovic (@aajdinov)
+ - Ethan Devenport (@edevenport)
+'''
+
+EXAMPLES = '''
+- name: Create a load balancer
+ community.general.oneandone_load_balancer:
+ auth_token: oneandone_private_api_key
+ name: ansible load balancer
+ description: Testing creation of load balancer with ansible
+ health_check_test: TCP
+ health_check_interval: 40
+ persistence: true
+ persistence_time: 1200
+ method: ROUND_ROBIN
+ datacenter: US
+ rules:
+ -
+ protocol: TCP
+ port_balancer: 80
+ port_server: 80
+ source: 0.0.0.0
+ wait: true
+ wait_timeout: 500
+
+- name: Destroy a load balancer
+ community.general.oneandone_load_balancer:
+ auth_token: oneandone_private_api_key
+ name: ansible load balancer
+ wait: true
+ wait_timeout: 500
+ state: absent
+
+- name: Update a load balancer
+ community.general.oneandone_load_balancer:
+ auth_token: oneandone_private_api_key
+ load_balancer: ansible load balancer
+ name: ansible load balancer updated
+ description: Testing the update of a load balancer with ansible
+ wait: true
+ wait_timeout: 500
+ state: update
+
+- name: Add server to a load balancer
+ community.general.oneandone_load_balancer:
+ auth_token: oneandone_private_api_key
+ load_balancer: ansible load balancer updated
+ description: Adding server to a load balancer with ansible
+ add_server_ips:
+ - server identifier (id or name)
+ wait: true
+ wait_timeout: 500
+ state: update
+
+- name: Remove server from a load balancer
+ community.general.oneandone_load_balancer:
+ auth_token: oneandone_private_api_key
+ load_balancer: ansible load balancer updated
+ description: Removing server from a load balancer with ansible
+ remove_server_ips:
+ - B2504878540DBC5F7634EB00A07C1EBD (server's ip id)
+ wait: true
+ wait_timeout: 500
+ state: update
+
+- name: Add rules to a load balancer
+ community.general.oneandone_load_balancer:
+ auth_token: oneandone_private_api_key
+ load_balancer: ansible load balancer updated
+ description: Adding rules to a load balancer with ansible
+ add_rules:
+ -
+ protocol: TCP
+ port_balancer: 70
+ port_server: 70
+ source: 0.0.0.0
+ -
+ protocol: TCP
+ port_balancer: 60
+ port_server: 60
+ source: 0.0.0.0
+ wait: true
+ wait_timeout: 500
+ state: update
+
+- name: Remove rules from a load balancer
+ community.general.oneandone_load_balancer:
+ auth_token: oneandone_private_api_key
+ load_balancer: ansible load balancer updated
+ description: Adding rules to a load balancer with ansible
+ remove_rules:
+ - rule_id #1
+ - rule_id #2
+ - ...
+ wait: true
+ wait_timeout: 500
+ state: update
+'''
+
+RETURN = '''
+load_balancer:
+ description: Information about the load balancer that was processed
+ type: dict
+ sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Balancer"}'
+ returned: always
+'''
+
+import os
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.oneandone import (
+ get_load_balancer,
+ get_server,
+ get_datacenter,
+ OneAndOneResources,
+ wait_for_resource_creation_completion
+)
+
+HAS_ONEANDONE_SDK = True
+
+try:
+ import oneandone.client
+except ImportError:
+ HAS_ONEANDONE_SDK = False
+
+DATACENTERS = ['US', 'ES', 'DE', 'GB']
+HEALTH_CHECK_TESTS = ['NONE', 'TCP', 'HTTP', 'ICMP']
+METHODS = ['ROUND_ROBIN', 'LEAST_CONNECTIONS']
+
+
+def _check_mode(module, result):
+ if module.check_mode:
+ module.exit_json(
+ changed=result
+ )
+
+
+def _add_server_ips(module, oneandone_conn, load_balancer_id, server_ids):
+ """
+ Assigns servers to a load balancer.
+ """
+ try:
+ attach_servers = []
+
+ for server_id in server_ids:
+ server = get_server(oneandone_conn, server_id, True)
+ attach_server = oneandone.client.AttachServer(
+ server_id=server['id'],
+ server_ip_id=next(iter(server['ips'] or []), None)['id']
+ )
+ attach_servers.append(attach_server)
+
+ if module.check_mode:
+ if attach_servers:
+ return True
+ return False
+
+ load_balancer = oneandone_conn.attach_load_balancer_server(
+ load_balancer_id=load_balancer_id,
+ server_ips=attach_servers)
+ return load_balancer
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _remove_load_balancer_server(module, oneandone_conn, load_balancer_id, server_ip_id):
+ """
+ Unassigns a server/IP from a load balancer.
+ """
+ try:
+ if module.check_mode:
+ lb_server = oneandone_conn.get_load_balancer_server(
+ load_balancer_id=load_balancer_id,
+ server_ip_id=server_ip_id)
+ if lb_server:
+ return True
+ return False
+
+ load_balancer = oneandone_conn.remove_load_balancer_server(
+ load_balancer_id=load_balancer_id,
+ server_ip_id=server_ip_id)
+ return load_balancer
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _add_load_balancer_rules(module, oneandone_conn, load_balancer_id, rules):
+ """
+ Adds new rules to a load_balancer.
+ """
+ try:
+ load_balancer_rules = []
+
+ for rule in rules:
+ load_balancer_rule = oneandone.client.LoadBalancerRule(
+ protocol=rule['protocol'],
+ port_balancer=rule['port_balancer'],
+ port_server=rule['port_server'],
+ source=rule['source'])
+ load_balancer_rules.append(load_balancer_rule)
+
+ if module.check_mode:
+ lb_id = get_load_balancer(oneandone_conn, load_balancer_id)
+ if (load_balancer_rules and lb_id):
+ return True
+ return False
+
+ load_balancer = oneandone_conn.add_load_balancer_rule(
+ load_balancer_id=load_balancer_id,
+ load_balancer_rules=load_balancer_rules
+ )
+
+ return load_balancer
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _remove_load_balancer_rule(module, oneandone_conn, load_balancer_id, rule_id):
+ """
+ Removes a rule from a load_balancer.
+ """
+ try:
+ if module.check_mode:
+ rule = oneandone_conn.get_load_balancer_rule(
+ load_balancer_id=load_balancer_id,
+ rule_id=rule_id)
+ if rule:
+ return True
+ return False
+
+ load_balancer = oneandone_conn.remove_load_balancer_rule(
+ load_balancer_id=load_balancer_id,
+ rule_id=rule_id
+ )
+ return load_balancer
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def update_load_balancer(module, oneandone_conn):
+ """
+ Updates a load_balancer based on input arguments.
+ Load balancer rules and server ips can be added/removed to/from
+ load balancer. Load balancer name, description, health_check_test,
+ health_check_interval, persistence, persistence_time, and method
+ can be updated as well.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ load_balancer_id = module.params.get('load_balancer')
+ name = module.params.get('name')
+ description = module.params.get('description')
+ health_check_test = module.params.get('health_check_test')
+ health_check_interval = module.params.get('health_check_interval')
+ health_check_path = module.params.get('health_check_path')
+ health_check_parse = module.params.get('health_check_parse')
+ persistence = module.params.get('persistence')
+ persistence_time = module.params.get('persistence_time')
+ method = module.params.get('method')
+ add_server_ips = module.params.get('add_server_ips')
+ remove_server_ips = module.params.get('remove_server_ips')
+ add_rules = module.params.get('add_rules')
+ remove_rules = module.params.get('remove_rules')
+
+ changed = False
+
+ load_balancer = get_load_balancer(oneandone_conn, load_balancer_id, True)
+ if load_balancer is None:
+ _check_mode(module, False)
+
+ if (name or description or health_check_test or health_check_interval or health_check_path or
+ health_check_parse or persistence or persistence_time or method):
+ _check_mode(module, True)
+ load_balancer = oneandone_conn.modify_load_balancer(
+ load_balancer_id=load_balancer['id'],
+ name=name,
+ description=description,
+ health_check_test=health_check_test,
+ health_check_interval=health_check_interval,
+ health_check_path=health_check_path,
+ health_check_parse=health_check_parse,
+ persistence=persistence,
+ persistence_time=persistence_time,
+ method=method)
+ changed = True
+
+ if add_server_ips:
+ if module.check_mode:
+ _check_mode(module, _add_server_ips(module,
+ oneandone_conn,
+ load_balancer['id'],
+ add_server_ips))
+
+ load_balancer = _add_server_ips(module, oneandone_conn, load_balancer['id'], add_server_ips)
+ changed = True
+
+ if remove_server_ips:
+ chk_changed = False
+ for server_ip_id in remove_server_ips:
+ if module.check_mode:
+ chk_changed |= _remove_load_balancer_server(module,
+ oneandone_conn,
+ load_balancer['id'],
+ server_ip_id)
+
+ _remove_load_balancer_server(module,
+ oneandone_conn,
+ load_balancer['id'],
+ server_ip_id)
+ _check_mode(module, chk_changed)
+ load_balancer = get_load_balancer(oneandone_conn, load_balancer['id'], True)
+ changed = True
+
+ if add_rules:
+ load_balancer = _add_load_balancer_rules(module,
+ oneandone_conn,
+ load_balancer['id'],
+ add_rules)
+ _check_mode(module, load_balancer)
+ changed = True
+
+ if remove_rules:
+ chk_changed = False
+ for rule_id in remove_rules:
+ if module.check_mode:
+ chk_changed |= _remove_load_balancer_rule(module,
+ oneandone_conn,
+ load_balancer['id'],
+ rule_id)
+
+ _remove_load_balancer_rule(module,
+ oneandone_conn,
+ load_balancer['id'],
+ rule_id)
+ _check_mode(module, chk_changed)
+ load_balancer = get_load_balancer(oneandone_conn, load_balancer['id'], True)
+ changed = True
+
+ try:
+ return (changed, load_balancer)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def create_load_balancer(module, oneandone_conn):
+ """
+ Create a new load_balancer.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ try:
+ name = module.params.get('name')
+ description = module.params.get('description')
+ health_check_test = module.params.get('health_check_test')
+ health_check_interval = module.params.get('health_check_interval')
+ health_check_path = module.params.get('health_check_path')
+ health_check_parse = module.params.get('health_check_parse')
+ persistence = module.params.get('persistence')
+ persistence_time = module.params.get('persistence_time')
+ method = module.params.get('method')
+ datacenter = module.params.get('datacenter')
+ rules = module.params.get('rules')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ load_balancer_rules = []
+
+ datacenter_id = None
+ if datacenter is not None:
+ datacenter_id = get_datacenter(oneandone_conn, datacenter)
+ if datacenter_id is None:
+ module.fail_json(
+ msg='datacenter %s not found.' % datacenter)
+
+ for rule in rules:
+ load_balancer_rule = oneandone.client.LoadBalancerRule(
+ protocol=rule['protocol'],
+ port_balancer=rule['port_balancer'],
+ port_server=rule['port_server'],
+ source=rule['source'])
+ load_balancer_rules.append(load_balancer_rule)
+
+ _check_mode(module, True)
+ load_balancer_obj = oneandone.client.LoadBalancer(
+ health_check_path=health_check_path,
+ health_check_parse=health_check_parse,
+ name=name,
+ description=description,
+ health_check_test=health_check_test,
+ health_check_interval=health_check_interval,
+ persistence=persistence,
+ persistence_time=persistence_time,
+ method=method,
+ datacenter_id=datacenter_id
+ )
+
+ load_balancer = oneandone_conn.create_load_balancer(
+ load_balancer=load_balancer_obj,
+ load_balancer_rules=load_balancer_rules
+ )
+
+ if wait:
+ wait_for_resource_creation_completion(oneandone_conn,
+ OneAndOneResources.load_balancer,
+ load_balancer['id'],
+ wait_timeout,
+ wait_interval)
+
+ load_balancer = get_load_balancer(oneandone_conn, load_balancer['id'], True) # refresh
+ changed = True if load_balancer else False
+
+ _check_mode(module, False)
+
+ return (changed, load_balancer)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def remove_load_balancer(module, oneandone_conn):
+ """
+ Removes a load_balancer.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ try:
+ lb_id = module.params.get('name')
+ load_balancer_id = get_load_balancer(oneandone_conn, lb_id)
+ if module.check_mode:
+ if load_balancer_id is None:
+ _check_mode(module, False)
+ _check_mode(module, True)
+ load_balancer = oneandone_conn.delete_load_balancer(load_balancer_id)
+
+ changed = True if load_balancer else False
+
+ return (changed, {
+ 'id': load_balancer['id'],
+ 'name': load_balancer['name']
+ })
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ auth_token=dict(
+ type='str', no_log=True,
+ default=os.environ.get('ONEANDONE_AUTH_TOKEN')),
+ api_url=dict(
+ type='str',
+ default=os.environ.get('ONEANDONE_API_URL')),
+ load_balancer=dict(type='str'),
+ name=dict(type='str'),
+ description=dict(type='str'),
+ health_check_test=dict(
+ choices=HEALTH_CHECK_TESTS),
+ health_check_interval=dict(type='str'),
+ health_check_path=dict(type='str'),
+ health_check_parse=dict(type='str'),
+ persistence=dict(type='bool'),
+ persistence_time=dict(type='str'),
+ method=dict(
+ choices=METHODS),
+ datacenter=dict(
+ choices=DATACENTERS),
+ rules=dict(type='list', default=[]),
+ add_server_ips=dict(type='list', default=[]),
+ remove_server_ips=dict(type='list', default=[]),
+ add_rules=dict(type='list', default=[]),
+ remove_rules=dict(type='list', default=[]),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ wait_interval=dict(type='int', default=5),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'update']),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_ONEANDONE_SDK:
+ module.fail_json(msg='1and1 required for this module')
+
+ if not module.params.get('auth_token'):
+ module.fail_json(
+ msg='auth_token parameter is required.')
+
+ if not module.params.get('api_url'):
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'))
+ else:
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'), api_url=module.params.get('api_url'))
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('name'):
+ module.fail_json(
+ msg="'name' parameter is required for deleting a load balancer.")
+ try:
+ (changed, load_balancer) = remove_load_balancer(module, oneandone_conn)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+ elif state == 'update':
+ if not module.params.get('load_balancer'):
+ module.fail_json(
+ msg="'load_balancer' parameter is required for updating a load balancer.")
+ try:
+ (changed, load_balancer) = update_load_balancer(module, oneandone_conn)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ elif state == 'present':
+ for param in ('name', 'health_check_test', 'health_check_interval', 'persistence',
+ 'persistence_time', 'method', 'rules'):
+ if not module.params.get(param):
+ module.fail_json(
+ msg="%s parameter is required for new load balancers." % param)
+ try:
+ (changed, load_balancer) = create_load_balancer(module, oneandone_conn)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ module.exit_json(changed=changed, load_balancer=load_balancer)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py
new file mode 100644
index 00000000..79fed9a6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py
@@ -0,0 +1,1026 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneandone_monitoring_policy
+short_description: Configure 1&1 monitoring policy.
+description:
+ - Create, remove, update monitoring policies
+ (and add/remove ports, processes, and servers).
+ This module has a dependency on 1and1 >= 1.0
+options:
+ state:
+ description:
+ - Define a monitoring policy's state to create, remove, update.
+ type: str
+ required: false
+ default: present
+ choices: [ "present", "absent", "update" ]
+ auth_token:
+ description:
+ - Authenticating API token provided by 1&1.
+ type: str
+ api_url:
+ description:
+ - Custom API URL. Overrides the
+ ONEANDONE_API_URL environment variable.
+ type: str
+ required: false
+ name:
+ description:
+ - Monitoring policy name used with present state. Used as identifier (id or name) when used with absent state. maxLength=128
+ type: str
+ monitoring_policy:
+ description:
+ - The identifier (id or name) of the monitoring policy used with update state.
+ type: str
+ agent:
+ description:
+ - Set true for using agent.
+ type: str
+ email:
+ description:
+ - User's email. maxLength=128
+ type: str
+ description:
+ description:
+ - Monitoring policy description. maxLength=256
+ type: str
+ required: false
+ thresholds:
+ description:
+ - Monitoring policy thresholds. Each of the suboptions have warning and critical,
+ which both have alert and value suboptions. Warning is used to set limits for
+ warning alerts, critical is used to set critical alerts. alert enables alert,
+ and value is used to advise when the value is exceeded.
+ type: list
+ suboptions:
+ cpu:
+ description:
+ - Consumption limits of CPU.
+ required: true
+ ram:
+ description:
+ - Consumption limits of RAM.
+ required: true
+ disk:
+ description:
+ - Consumption limits of hard disk.
+ required: true
+ internal_ping:
+ description:
+ - Response limits of internal ping.
+ required: true
+ transfer:
+ description:
+ - Consumption limits for transfer.
+ required: true
+ ports:
+ description:
+ - Array of ports that will be monitoring.
+ type: list
+ suboptions:
+ protocol:
+ description:
+ - Internet protocol.
+ choices: [ "TCP", "UDP" ]
+ required: true
+ port:
+ description:
+ - Port number. minimum=1, maximum=65535
+ required: true
+ alert_if:
+ description:
+ - Case of alert.
+ choices: [ "RESPONDING", "NOT_RESPONDING" ]
+ required: true
+ email_notification:
+ description:
+ - Set true for sending e-mail notifications.
+ required: true
+ processes:
+ description:
+ - Array of processes that will be monitoring.
+ type: list
+ suboptions:
+ process:
+ description:
+ - Name of the process. maxLength=50
+ required: true
+ alert_if:
+ description:
+ - Case of alert.
+ choices: [ "RUNNING", "NOT_RUNNING" ]
+ required: true
+ add_ports:
+ description:
+ - Ports to add to the monitoring policy.
+ type: list
+ required: false
+ add_processes:
+ description:
+ - Processes to add to the monitoring policy.
+ type: list
+ required: false
+ add_servers:
+ description:
+ - Servers to add to the monitoring policy.
+ type: list
+ required: false
+ remove_ports:
+ description:
+ - Ports to remove from the monitoring policy.
+ type: list
+ required: false
+ remove_processes:
+ description:
+ - Processes to remove from the monitoring policy.
+ type: list
+ required: false
+ remove_servers:
+ description:
+ - Servers to remove from the monitoring policy.
+ type: list
+ required: false
+ update_ports:
+ description:
+ - Ports to be updated on the monitoring policy.
+ type: list
+ required: false
+ update_processes:
+ description:
+ - Processes to be updated on the monitoring policy.
+ type: list
+ required: false
+ wait:
+ description:
+ - wait for the instance to be in state 'running' before returning
+ required: false
+ default: "yes"
+ type: bool
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ wait_interval:
+ description:
+ - Defines the number of seconds to wait when using the _wait_for methods
+ type: int
+ default: 5
+
+requirements:
+ - "1and1"
+ - "python >= 2.6"
+
+author:
+ - "Amel Ajdinovic (@aajdinov)"
+ - "Ethan Devenport (@edevenport)"
+'''
+
+EXAMPLES = '''
+- name: Create a monitoring policy
+ oneandone_moitoring_policy:
+ auth_token: oneandone_private_api_key
+ name: ansible monitoring policy
+ description: Testing creation of a monitoring policy with ansible
+ email: your@emailaddress.com
+ agent: true
+ thresholds:
+ -
+ cpu:
+ warning:
+ value: 80
+ alert: false
+ critical:
+ value: 92
+ alert: false
+ -
+ ram:
+ warning:
+ value: 80
+ alert: false
+ critical:
+ value: 90
+ alert: false
+ -
+ disk:
+ warning:
+ value: 80
+ alert: false
+ critical:
+ value: 90
+ alert: false
+ -
+ internal_ping:
+ warning:
+ value: 50
+ alert: false
+ critical:
+ value: 100
+ alert: false
+ -
+ transfer:
+ warning:
+ value: 1000
+ alert: false
+ critical:
+ value: 2000
+ alert: false
+ ports:
+ -
+ protocol: TCP
+ port: 22
+ alert_if: RESPONDING
+ email_notification: false
+ processes:
+ -
+ process: test
+ alert_if: NOT_RUNNING
+ email_notification: false
+ wait: true
+
+- name: Destroy a monitoring policy
+ oneandone_moitoring_policy:
+ auth_token: oneandone_private_api_key
+ state: absent
+ name: ansible monitoring policy
+
+- name: Update a monitoring policy
+ oneandone_moitoring_policy:
+ auth_token: oneandone_private_api_key
+ monitoring_policy: ansible monitoring policy
+ name: ansible monitoring policy updated
+ description: Testing creation of a monitoring policy with ansible updated
+ email: another@emailaddress.com
+ thresholds:
+ -
+ cpu:
+ warning:
+ value: 70
+ alert: false
+ critical:
+ value: 90
+ alert: false
+ -
+ ram:
+ warning:
+ value: 70
+ alert: false
+ critical:
+ value: 80
+ alert: false
+ -
+ disk:
+ warning:
+ value: 70
+ alert: false
+ critical:
+ value: 80
+ alert: false
+ -
+ internal_ping:
+ warning:
+ value: 60
+ alert: false
+ critical:
+ value: 90
+ alert: false
+ -
+ transfer:
+ warning:
+ value: 900
+ alert: false
+ critical:
+ value: 1900
+ alert: false
+ wait: true
+ state: update
+
+- name: Add a port to a monitoring policy
+ oneandone_moitoring_policy:
+ auth_token: oneandone_private_api_key
+ monitoring_policy: ansible monitoring policy updated
+ add_ports:
+ -
+ protocol: TCP
+ port: 33
+ alert_if: RESPONDING
+ email_notification: false
+ wait: true
+ state: update
+
+- name: Update existing ports of a monitoring policy
+ oneandone_moitoring_policy:
+ auth_token: oneandone_private_api_key
+ monitoring_policy: ansible monitoring policy updated
+ update_ports:
+ -
+ id: existing_port_id
+ protocol: TCP
+ port: 34
+ alert_if: RESPONDING
+ email_notification: false
+ -
+ id: existing_port_id
+ protocol: TCP
+ port: 23
+ alert_if: RESPONDING
+ email_notification: false
+ wait: true
+ state: update
+
+- name: Remove a port from a monitoring policy
+ oneandone_moitoring_policy:
+ auth_token: oneandone_private_api_key
+ monitoring_policy: ansible monitoring policy updated
+ remove_ports:
+ - port_id
+ state: update
+
+- name: Add a process to a monitoring policy
+ oneandone_moitoring_policy:
+ auth_token: oneandone_private_api_key
+ monitoring_policy: ansible monitoring policy updated
+ add_processes:
+ -
+ process: test_2
+ alert_if: NOT_RUNNING
+ email_notification: false
+ wait: true
+ state: update
+
+- name: Update existing processes of a monitoring policy
+ oneandone_moitoring_policy:
+ auth_token: oneandone_private_api_key
+ monitoring_policy: ansible monitoring policy updated
+ update_processes:
+ -
+ id: process_id
+ process: test_1
+ alert_if: NOT_RUNNING
+ email_notification: false
+ -
+ id: process_id
+ process: test_3
+ alert_if: NOT_RUNNING
+ email_notification: false
+ wait: true
+ state: update
+
+- name: Remove a process from a monitoring policy
+ oneandone_moitoring_policy:
+ auth_token: oneandone_private_api_key
+ monitoring_policy: ansible monitoring policy updated
+ remove_processes:
+ - process_id
+ wait: true
+ state: update
+
+- name: Add server to a monitoring policy
+ oneandone_moitoring_policy:
+ auth_token: oneandone_private_api_key
+ monitoring_policy: ansible monitoring policy updated
+ add_servers:
+ - server id or name
+ wait: true
+ state: update
+
+- name: Remove server from a monitoring policy
+ oneandone_moitoring_policy:
+ auth_token: oneandone_private_api_key
+ monitoring_policy: ansible monitoring policy updated
+ remove_servers:
+ - server01
+ wait: true
+ state: update
+'''
+
+RETURN = '''
+monitoring_policy:
+ description: Information about the monitoring policy that was processed
+ type: dict
+ sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Policy"}'
+ returned: always
+'''
+
+import os
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.oneandone import (
+ get_monitoring_policy,
+ get_server,
+ OneAndOneResources,
+ wait_for_resource_creation_completion
+)
+
+HAS_ONEANDONE_SDK = True
+
+try:
+ import oneandone.client
+except ImportError:
+ HAS_ONEANDONE_SDK = False
+
+
+def _check_mode(module, result):
+ if module.check_mode:
+ module.exit_json(
+ changed=result
+ )
+
+
+def _add_ports(module, oneandone_conn, monitoring_policy_id, ports):
+ """
+ Adds new ports to a monitoring policy.
+ """
+ try:
+ monitoring_policy_ports = []
+
+ for _port in ports:
+ monitoring_policy_port = oneandone.client.Port(
+ protocol=_port['protocol'],
+ port=_port['port'],
+ alert_if=_port['alert_if'],
+ email_notification=_port['email_notification']
+ )
+ monitoring_policy_ports.append(monitoring_policy_port)
+
+ if module.check_mode:
+ if monitoring_policy_ports:
+ return True
+ return False
+
+ monitoring_policy = oneandone_conn.add_port(
+ monitoring_policy_id=monitoring_policy_id,
+ ports=monitoring_policy_ports)
+ return monitoring_policy
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _delete_monitoring_policy_port(module, oneandone_conn, monitoring_policy_id, port_id):
+ """
+ Removes a port from a monitoring policy.
+ """
+ try:
+ if module.check_mode:
+ monitoring_policy = oneandone_conn.delete_monitoring_policy_port(
+ monitoring_policy_id=monitoring_policy_id,
+ port_id=port_id)
+ if monitoring_policy:
+ return True
+ return False
+
+ monitoring_policy = oneandone_conn.delete_monitoring_policy_port(
+ monitoring_policy_id=monitoring_policy_id,
+ port_id=port_id)
+ return monitoring_policy
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _modify_port(module, oneandone_conn, monitoring_policy_id, port_id, port):
+ """
+ Modifies a monitoring policy port.
+ """
+ try:
+ if module.check_mode:
+ cm_port = oneandone_conn.get_monitoring_policy_port(
+ monitoring_policy_id=monitoring_policy_id,
+ port_id=port_id)
+ if cm_port:
+ return True
+ return False
+
+ monitoring_policy_port = oneandone.client.Port(
+ protocol=port['protocol'],
+ port=port['port'],
+ alert_if=port['alert_if'],
+ email_notification=port['email_notification']
+ )
+
+ monitoring_policy = oneandone_conn.modify_port(
+ monitoring_policy_id=monitoring_policy_id,
+ port_id=port_id,
+ port=monitoring_policy_port)
+ return monitoring_policy
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _add_processes(module, oneandone_conn, monitoring_policy_id, processes):
+ """
+ Adds new processes to a monitoring policy.
+ """
+ try:
+ monitoring_policy_processes = []
+
+ for _process in processes:
+ monitoring_policy_process = oneandone.client.Process(
+ process=_process['process'],
+ alert_if=_process['alert_if'],
+ email_notification=_process['email_notification']
+ )
+ monitoring_policy_processes.append(monitoring_policy_process)
+
+ if module.check_mode:
+ mp_id = get_monitoring_policy(oneandone_conn, monitoring_policy_id)
+ if (monitoring_policy_processes and mp_id):
+ return True
+ return False
+
+ monitoring_policy = oneandone_conn.add_process(
+ monitoring_policy_id=monitoring_policy_id,
+ processes=monitoring_policy_processes)
+ return monitoring_policy
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _delete_monitoring_policy_process(module, oneandone_conn, monitoring_policy_id, process_id):
+ """
+ Removes a process from a monitoring policy.
+ """
+ try:
+ if module.check_mode:
+ process = oneandone_conn.get_monitoring_policy_process(
+ monitoring_policy_id=monitoring_policy_id,
+ process_id=process_id
+ )
+ if process:
+ return True
+ return False
+
+ monitoring_policy = oneandone_conn.delete_monitoring_policy_process(
+ monitoring_policy_id=monitoring_policy_id,
+ process_id=process_id)
+ return monitoring_policy
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _modify_process(module, oneandone_conn, monitoring_policy_id, process_id, process):
+ """
+ Modifies a monitoring policy process.
+ """
+ try:
+ if module.check_mode:
+ cm_process = oneandone_conn.get_monitoring_policy_process(
+ monitoring_policy_id=monitoring_policy_id,
+ process_id=process_id)
+ if cm_process:
+ return True
+ return False
+
+ monitoring_policy_process = oneandone.client.Process(
+ process=process['process'],
+ alert_if=process['alert_if'],
+ email_notification=process['email_notification']
+ )
+
+ monitoring_policy = oneandone_conn.modify_process(
+ monitoring_policy_id=monitoring_policy_id,
+ process_id=process_id,
+ process=monitoring_policy_process)
+ return monitoring_policy
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _attach_monitoring_policy_server(module, oneandone_conn, monitoring_policy_id, servers):
+ """
+ Attaches servers to a monitoring policy.
+ """
+ try:
+ attach_servers = []
+
+ for _server_id in servers:
+ server_id = get_server(oneandone_conn, _server_id)
+ attach_server = oneandone.client.AttachServer(
+ server_id=server_id
+ )
+ attach_servers.append(attach_server)
+
+ if module.check_mode:
+ if attach_servers:
+ return True
+ return False
+
+ monitoring_policy = oneandone_conn.attach_monitoring_policy_server(
+ monitoring_policy_id=monitoring_policy_id,
+ servers=attach_servers)
+ return monitoring_policy
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _detach_monitoring_policy_server(module, oneandone_conn, monitoring_policy_id, server_id):
+ """
+ Detaches a server from a monitoring policy.
+ """
+ try:
+ if module.check_mode:
+ mp_server = oneandone_conn.get_monitoring_policy_server(
+ monitoring_policy_id=monitoring_policy_id,
+ server_id=server_id)
+ if mp_server:
+ return True
+ return False
+
+ monitoring_policy = oneandone_conn.detach_monitoring_policy_server(
+ monitoring_policy_id=monitoring_policy_id,
+ server_id=server_id)
+ return monitoring_policy
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def update_monitoring_policy(module, oneandone_conn):
+ """
+ Updates a monitoring_policy based on input arguments.
+ Monitoring policy ports, processes and servers can be added/removed to/from
+ a monitoring policy. Monitoring policy name, description, email,
+ thresholds for cpu, ram, disk, transfer and internal_ping
+ can be updated as well.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ try:
+ monitoring_policy_id = module.params.get('monitoring_policy')
+ name = module.params.get('name')
+ description = module.params.get('description')
+ email = module.params.get('email')
+ thresholds = module.params.get('thresholds')
+ add_ports = module.params.get('add_ports')
+ update_ports = module.params.get('update_ports')
+ remove_ports = module.params.get('remove_ports')
+ add_processes = module.params.get('add_processes')
+ update_processes = module.params.get('update_processes')
+ remove_processes = module.params.get('remove_processes')
+ add_servers = module.params.get('add_servers')
+ remove_servers = module.params.get('remove_servers')
+
+ changed = False
+
+ monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy_id, True)
+ if monitoring_policy is None:
+ _check_mode(module, False)
+
+ _monitoring_policy = oneandone.client.MonitoringPolicy(
+ name=name,
+ description=description,
+ email=email
+ )
+
+ _thresholds = None
+
+ if thresholds:
+ threshold_entities = ['cpu', 'ram', 'disk', 'internal_ping', 'transfer']
+
+ _thresholds = []
+ for treshold in thresholds:
+ key = treshold.keys()[0]
+ if key in threshold_entities:
+ _threshold = oneandone.client.Threshold(
+ entity=key,
+ warning_value=treshold[key]['warning']['value'],
+ warning_alert=str(treshold[key]['warning']['alert']).lower(),
+ critical_value=treshold[key]['critical']['value'],
+ critical_alert=str(treshold[key]['critical']['alert']).lower())
+ _thresholds.append(_threshold)
+
+ if name or description or email or thresholds:
+ _check_mode(module, True)
+ monitoring_policy = oneandone_conn.modify_monitoring_policy(
+ monitoring_policy_id=monitoring_policy['id'],
+ monitoring_policy=_monitoring_policy,
+ thresholds=_thresholds)
+ changed = True
+
+ if add_ports:
+ if module.check_mode:
+ _check_mode(module, _add_ports(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ add_ports))
+
+ monitoring_policy = _add_ports(module, oneandone_conn, monitoring_policy['id'], add_ports)
+ changed = True
+
+ if update_ports:
+ chk_changed = False
+ for update_port in update_ports:
+ if module.check_mode:
+ chk_changed |= _modify_port(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ update_port['id'],
+ update_port)
+
+ _modify_port(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ update_port['id'],
+ update_port)
+ monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True)
+ changed = True
+
+ if remove_ports:
+ chk_changed = False
+ for port_id in remove_ports:
+ if module.check_mode:
+ chk_changed |= _delete_monitoring_policy_port(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ port_id)
+
+ _delete_monitoring_policy_port(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ port_id)
+ _check_mode(module, chk_changed)
+ monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True)
+ changed = True
+
+ if add_processes:
+ monitoring_policy = _add_processes(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ add_processes)
+ _check_mode(module, monitoring_policy)
+ changed = True
+
+ if update_processes:
+ chk_changed = False
+ for update_process in update_processes:
+ if module.check_mode:
+ chk_changed |= _modify_process(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ update_process['id'],
+ update_process)
+
+ _modify_process(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ update_process['id'],
+ update_process)
+ _check_mode(module, chk_changed)
+ monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True)
+ changed = True
+
+ if remove_processes:
+ chk_changed = False
+ for process_id in remove_processes:
+ if module.check_mode:
+ chk_changed |= _delete_monitoring_policy_process(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ process_id)
+
+ _delete_monitoring_policy_process(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ process_id)
+ _check_mode(module, chk_changed)
+ monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True)
+ changed = True
+
+ if add_servers:
+ monitoring_policy = _attach_monitoring_policy_server(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ add_servers)
+ _check_mode(module, monitoring_policy)
+ changed = True
+
+ if remove_servers:
+ chk_changed = False
+ for _server_id in remove_servers:
+ server_id = get_server(oneandone_conn, _server_id)
+
+ if module.check_mode:
+ chk_changed |= _detach_monitoring_policy_server(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ server_id)
+
+ _detach_monitoring_policy_server(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ server_id)
+ _check_mode(module, chk_changed)
+ monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True)
+ changed = True
+
+ return (changed, monitoring_policy)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def create_monitoring_policy(module, oneandone_conn):
+ """
+ Creates a new monitoring policy.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ try:
+ name = module.params.get('name')
+ description = module.params.get('description')
+ email = module.params.get('email')
+ agent = module.params.get('agent')
+ thresholds = module.params.get('thresholds')
+ ports = module.params.get('ports')
+ processes = module.params.get('processes')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ _monitoring_policy = oneandone.client.MonitoringPolicy(name,
+ description,
+ email,
+ agent, )
+
+ _monitoring_policy.specs['agent'] = str(_monitoring_policy.specs['agent']).lower()
+
+ threshold_entities = ['cpu', 'ram', 'disk', 'internal_ping', 'transfer']
+
+ _thresholds = []
+ for treshold in thresholds:
+ key = treshold.keys()[0]
+ if key in threshold_entities:
+ _threshold = oneandone.client.Threshold(
+ entity=key,
+ warning_value=treshold[key]['warning']['value'],
+ warning_alert=str(treshold[key]['warning']['alert']).lower(),
+ critical_value=treshold[key]['critical']['value'],
+ critical_alert=str(treshold[key]['critical']['alert']).lower())
+ _thresholds.append(_threshold)
+
+ _ports = []
+ for port in ports:
+ _port = oneandone.client.Port(
+ protocol=port['protocol'],
+ port=port['port'],
+ alert_if=port['alert_if'],
+ email_notification=str(port['email_notification']).lower())
+ _ports.append(_port)
+
+ _processes = []
+ for process in processes:
+ _process = oneandone.client.Process(
+ process=process['process'],
+ alert_if=process['alert_if'],
+ email_notification=str(process['email_notification']).lower())
+ _processes.append(_process)
+
+ _check_mode(module, True)
+ monitoring_policy = oneandone_conn.create_monitoring_policy(
+ monitoring_policy=_monitoring_policy,
+ thresholds=_thresholds,
+ ports=_ports,
+ processes=_processes
+ )
+
+ if wait:
+ wait_for_resource_creation_completion(
+ oneandone_conn,
+ OneAndOneResources.monitoring_policy,
+ monitoring_policy['id'],
+ wait_timeout,
+ wait_interval)
+
+ changed = True if monitoring_policy else False
+
+ _check_mode(module, False)
+
+ return (changed, monitoring_policy)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def remove_monitoring_policy(module, oneandone_conn):
+ """
+ Removes a monitoring policy.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ try:
+ mp_id = module.params.get('name')
+ monitoring_policy_id = get_monitoring_policy(oneandone_conn, mp_id)
+ if module.check_mode:
+ if monitoring_policy_id is None:
+ _check_mode(module, False)
+ _check_mode(module, True)
+ monitoring_policy = oneandone_conn.delete_monitoring_policy(monitoring_policy_id)
+
+ changed = True if monitoring_policy else False
+
+ return (changed, {
+ 'id': monitoring_policy['id'],
+ 'name': monitoring_policy['name']
+ })
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ auth_token=dict(
+ type='str', no_log=True,
+ default=os.environ.get('ONEANDONE_AUTH_TOKEN')),
+ api_url=dict(
+ type='str',
+ default=os.environ.get('ONEANDONE_API_URL')),
+ name=dict(type='str'),
+ monitoring_policy=dict(type='str'),
+ agent=dict(type='str'),
+ email=dict(type='str'),
+ description=dict(type='str'),
+ thresholds=dict(type='list', default=[]),
+ ports=dict(type='list', default=[]),
+ processes=dict(type='list', default=[]),
+ add_ports=dict(type='list', default=[]),
+ update_ports=dict(type='list', default=[]),
+ remove_ports=dict(type='list', default=[]),
+ add_processes=dict(type='list', default=[]),
+ update_processes=dict(type='list', default=[]),
+ remove_processes=dict(type='list', default=[]),
+ add_servers=dict(type='list', default=[]),
+ remove_servers=dict(type='list', default=[]),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ wait_interval=dict(type='int', default=5),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'update']),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_ONEANDONE_SDK:
+ module.fail_json(msg='1and1 required for this module')
+
+ if not module.params.get('auth_token'):
+ module.fail_json(
+ msg='auth_token parameter is required.')
+
+ if not module.params.get('api_url'):
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'))
+ else:
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'), api_url=module.params.get('api_url'))
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('name'):
+ module.fail_json(
+ msg="'name' parameter is required to delete a monitoring policy.")
+ try:
+ (changed, monitoring_policy) = remove_monitoring_policy(module, oneandone_conn)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+ elif state == 'update':
+ if not module.params.get('monitoring_policy'):
+ module.fail_json(
+ msg="'monitoring_policy' parameter is required to update a monitoring policy.")
+ try:
+ (changed, monitoring_policy) = update_monitoring_policy(module, oneandone_conn)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ elif state == 'present':
+ for param in ('name', 'agent', 'email', 'thresholds', 'ports', 'processes'):
+ if not module.params.get(param):
+ module.fail_json(
+ msg="%s parameter is required for a new monitoring policy." % param)
+ try:
+ (changed, monitoring_policy) = create_monitoring_policy(module, oneandone_conn)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ module.exit_json(changed=changed, monitoring_policy=monitoring_policy)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_private_network.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_private_network.py
new file mode 100644
index 00000000..7eae6ea3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_private_network.py
@@ -0,0 +1,454 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneandone_private_network
+short_description: Configure 1&1 private networking.
+description:
+ - Create, remove, reconfigure, update a private network.
+ This module has a dependency on 1and1 >= 1.0
+options:
+ state:
+ description:
+ - Define a network's state to create, remove, or update.
+ type: str
+ required: false
+ default: 'present'
+ choices: [ "present", "absent", "update" ]
+ auth_token:
+ description:
+ - Authenticating API token provided by 1&1.
+ type: str
+ private_network:
+ description:
+ - The identifier (id or name) of the network used with update state.
+ type: str
+ api_url:
+ description:
+ - Custom API URL. Overrides the
+ ONEANDONE_API_URL environment variable.
+ type: str
+ required: false
+ name:
+ description:
+ - Private network name used with present state. Used as identifier (id or name) when used with absent state.
+ type: str
+ description:
+ description:
+ - Set a description for the network.
+ type: str
+ datacenter:
+ description:
+ - The identifier of the datacenter where the private network will be created
+ type: str
+ choices: [US, ES, DE, GB]
+ network_address:
+ description:
+ - Set a private network space, i.e. 192.168.1.0
+ type: str
+ subnet_mask:
+ description:
+ - Set the netmask for the private network, i.e. 255.255.255.0
+ type: str
+ add_members:
+ description:
+ - List of server identifiers (name or id) to be added to the private network.
+ type: list
+ remove_members:
+ description:
+ - List of server identifiers (name or id) to be removed from the private network.
+ type: list
+ wait:
+ description:
+ - wait for the instance to be in state 'running' before returning
+ required: false
+ default: "yes"
+ type: bool
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ wait_interval:
+ description:
+ - Defines the number of seconds to wait when using the _wait_for methods
+ type: int
+ default: 5
+
+requirements:
+ - "1and1"
+ - "python >= 2.6"
+
+author:
+ - Amel Ajdinovic (@aajdinov)
+ - Ethan Devenport (@edevenport)
+'''
+
+EXAMPLES = '''
+- name: Create a private network
+ community.general.oneandone_private_network:
+ auth_token: oneandone_private_api_key
+ name: backup_network
+ description: Testing creation of a private network with ansible
+ network_address: 70.35.193.100
+ subnet_mask: 255.0.0.0
+ datacenter: US
+
+- name: Destroy a private network
+ community.general.oneandone_private_network:
+ auth_token: oneandone_private_api_key
+ state: absent
+ name: backup_network
+
+- name: Modify the private network
+ community.general.oneandone_private_network:
+ auth_token: oneandone_private_api_key
+ state: update
+ private_network: backup_network
+ network_address: 192.168.2.0
+ subnet_mask: 255.255.255.0
+
+- name: Add members to the private network
+ community.general.oneandone_private_network:
+ auth_token: oneandone_private_api_key
+ state: update
+ private_network: backup_network
+ add_members:
+ - server identifier (id or name)
+
+- name: Remove members from the private network
+ community.general.oneandone_private_network:
+ auth_token: oneandone_private_api_key
+ state: update
+ private_network: backup_network
+ remove_members:
+ - server identifier (id or name)
+'''
+
+RETURN = '''
+private_network:
+ description: Information about the private network.
+ type: dict
+ sample: '{"name": "backup_network", "id": "55726DEDA20C99CF6F2AF8F18CAC9963"}'
+ returned: always
+'''
+
+import os
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.oneandone import (
+ get_private_network,
+ get_server,
+ get_datacenter,
+ OneAndOneResources,
+ wait_for_resource_creation_completion,
+ wait_for_resource_deletion_completion
+)
+
+HAS_ONEANDONE_SDK = True
+
+try:
+ import oneandone.client
+except ImportError:
+ HAS_ONEANDONE_SDK = False
+
+DATACENTERS = ['US', 'ES', 'DE', 'GB']
+
+
+def _check_mode(module, result):
+ if module.check_mode:
+ module.exit_json(
+ changed=result
+ )
+
+
+def _add_servers(module, oneandone_conn, name, members):
+ try:
+ private_network_id = get_private_network(oneandone_conn, name)
+
+ if module.check_mode:
+ if private_network_id and members:
+ return True
+ return False
+
+ network = oneandone_conn.attach_private_network_servers(
+ private_network_id=private_network_id,
+ server_ids=members)
+
+ return network
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def _remove_member(module, oneandone_conn, name, member_id):
+ try:
+ private_network_id = get_private_network(oneandone_conn, name)
+
+ if module.check_mode:
+ if private_network_id:
+ network_member = oneandone_conn.get_private_network_server(
+ private_network_id=private_network_id,
+ server_id=member_id)
+ if network_member:
+ return True
+ return False
+
+ network = oneandone_conn.remove_private_network_server(
+ private_network_id=name,
+ server_id=member_id)
+
+ return network
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def create_network(module, oneandone_conn):
+ """
+ Create new private network
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+
+ Returns a dictionary containing a 'changed' attribute indicating whether
+ any network was added.
+ """
+ name = module.params.get('name')
+ description = module.params.get('description')
+ network_address = module.params.get('network_address')
+ subnet_mask = module.params.get('subnet_mask')
+ datacenter = module.params.get('datacenter')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ if datacenter is not None:
+ datacenter_id = get_datacenter(oneandone_conn, datacenter)
+ if datacenter_id is None:
+ module.fail_json(
+ msg='datacenter %s not found.' % datacenter)
+
+ try:
+ _check_mode(module, True)
+ network = oneandone_conn.create_private_network(
+ private_network=oneandone.client.PrivateNetwork(
+ name=name,
+ description=description,
+ network_address=network_address,
+ subnet_mask=subnet_mask,
+ datacenter_id=datacenter_id
+ ))
+
+ if wait:
+ wait_for_resource_creation_completion(
+ oneandone_conn,
+ OneAndOneResources.private_network,
+ network['id'],
+ wait_timeout,
+ wait_interval)
+ network = get_private_network(oneandone_conn,
+ network['id'],
+ True)
+
+ changed = True if network else False
+
+ _check_mode(module, False)
+
+ return (changed, network)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def update_network(module, oneandone_conn):
+ """
+ Modifies a private network.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ try:
+ _private_network_id = module.params.get('private_network')
+ _name = module.params.get('name')
+ _description = module.params.get('description')
+ _network_address = module.params.get('network_address')
+ _subnet_mask = module.params.get('subnet_mask')
+ _add_members = module.params.get('add_members')
+ _remove_members = module.params.get('remove_members')
+
+ changed = False
+
+ private_network = get_private_network(oneandone_conn,
+ _private_network_id,
+ True)
+ if private_network is None:
+ _check_mode(module, False)
+
+ if _name or _description or _network_address or _subnet_mask:
+ _check_mode(module, True)
+ private_network = oneandone_conn.modify_private_network(
+ private_network_id=private_network['id'],
+ name=_name,
+ description=_description,
+ network_address=_network_address,
+ subnet_mask=_subnet_mask)
+ changed = True
+
+ if _add_members:
+ instances = []
+
+ for member in _add_members:
+ instance_id = get_server(oneandone_conn, member)
+ instance_obj = oneandone.client.AttachServer(server_id=instance_id)
+
+ instances.extend([instance_obj])
+ private_network = _add_servers(module, oneandone_conn, private_network['id'], instances)
+ _check_mode(module, private_network)
+ changed = True
+
+ if _remove_members:
+ chk_changed = False
+ for member in _remove_members:
+ instance = get_server(oneandone_conn, member, True)
+
+ if module.check_mode:
+ chk_changed |= _remove_member(module,
+ oneandone_conn,
+ private_network['id'],
+ instance['id'])
+ _check_mode(module, instance and chk_changed)
+
+ _remove_member(module,
+ oneandone_conn,
+ private_network['id'],
+ instance['id'])
+ private_network = get_private_network(oneandone_conn,
+ private_network['id'],
+ True)
+ changed = True
+
+ return (changed, private_network)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def remove_network(module, oneandone_conn):
+ """
+ Removes a private network.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object.
+ """
+ try:
+ pn_id = module.params.get('name')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ private_network_id = get_private_network(oneandone_conn, pn_id)
+ if module.check_mode:
+ if private_network_id is None:
+ _check_mode(module, False)
+ _check_mode(module, True)
+ private_network = oneandone_conn.delete_private_network(private_network_id)
+ wait_for_resource_deletion_completion(oneandone_conn,
+ OneAndOneResources.private_network,
+ private_network['id'],
+ wait_timeout,
+ wait_interval)
+
+ changed = True if private_network else False
+
+ return (changed, {
+ 'id': private_network['id'],
+ 'name': private_network['name']
+ })
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ auth_token=dict(
+ type='str', no_log=True,
+ default=os.environ.get('ONEANDONE_AUTH_TOKEN')),
+ api_url=dict(
+ type='str',
+ default=os.environ.get('ONEANDONE_API_URL')),
+ private_network=dict(type='str'),
+ name=dict(type='str'),
+ description=dict(type='str'),
+ network_address=dict(type='str'),
+ subnet_mask=dict(type='str'),
+ add_members=dict(type='list', default=[]),
+ remove_members=dict(type='list', default=[]),
+ datacenter=dict(
+ choices=DATACENTERS),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ wait_interval=dict(type='int', default=5),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'update']),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_ONEANDONE_SDK:
+ module.fail_json(msg='1and1 required for this module')
+
+ if not module.params.get('auth_token'):
+ module.fail_json(
+ msg='auth_token parameter is required.')
+
+ if not module.params.get('api_url'):
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'))
+ else:
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'), api_url=module.params.get('api_url'))
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('name'):
+ module.fail_json(
+ msg="'name' parameter is required for deleting a network.")
+ try:
+ (changed, private_network) = remove_network(module, oneandone_conn)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+ elif state == 'update':
+ if not module.params.get('private_network'):
+ module.fail_json(
+ msg="'private_network' parameter is required for updating a network.")
+ try:
+ (changed, private_network) = update_network(module, oneandone_conn)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+ elif state == 'present':
+ if not module.params.get('name'):
+ module.fail_json(
+ msg="'name' parameter is required for new networks.")
+ try:
+ (changed, private_network) = create_network(module, oneandone_conn)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ module.exit_json(changed=changed, private_network=private_network)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_public_ip.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_public_ip.py
new file mode 100644
index 00000000..edefbc93
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_public_ip.py
@@ -0,0 +1,341 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneandone_public_ip
+short_description: Configure 1&1 public IPs.
+description:
+ - Create, update, and remove public IPs.
+ This module has a dependency on 1and1 >= 1.0
+options:
+ state:
+ description:
+ - Define a public ip state to create, remove, or update.
+ type: str
+ required: false
+ default: 'present'
+ choices: [ "present", "absent", "update" ]
+ auth_token:
+ description:
+ - Authenticating API token provided by 1&1.
+ type: str
+ api_url:
+ description:
+ - Custom API URL. Overrides the
+ ONEANDONE_API_URL environment variable.
+ type: str
+ required: false
+ reverse_dns:
+ description:
+ - Reverse DNS name. maxLength=256
+ type: str
+ required: false
+ datacenter:
+ description:
+ - ID of the datacenter where the IP will be created (only for unassigned IPs).
+ type: str
+ choices: [US, ES, DE, GB]
+ default: US
+ required: false
+ type:
+ description:
+ - Type of IP. Currently, only IPV4 is available.
+ type: str
+ choices: ["IPV4", "IPV6"]
+ default: 'IPV4'
+ required: false
+ public_ip_id:
+ description:
+ - The ID of the public IP used with update and delete states.
+ type: str
+ wait:
+ description:
+ - wait for the instance to be in state 'running' before returning
+ required: false
+ default: "yes"
+ type: bool
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ wait_interval:
+ description:
+ - Defines the number of seconds to wait when using the _wait_for methods
+ type: int
+ default: 5
+
+requirements:
+ - "1and1"
+ - "python >= 2.6"
+
+author:
+ - Amel Ajdinovic (@aajdinov)
+ - Ethan Devenport (@edevenport)
+'''
+
+EXAMPLES = '''
+- name: Create a public IP
+ community.general.oneandone_public_ip:
+ auth_token: oneandone_private_api_key
+ reverse_dns: example.com
+ datacenter: US
+ type: IPV4
+
+- name: Update a public IP
+ community.general.oneandone_public_ip:
+ auth_token: oneandone_private_api_key
+ public_ip_id: public ip id
+ reverse_dns: secondexample.com
+ state: update
+
+- name: Delete a public IP
+ community.general.oneandone_public_ip:
+ auth_token: oneandone_private_api_key
+ public_ip_id: public ip id
+ state: absent
+'''
+
+RETURN = '''
+public_ip:
+ description: Information about the public ip that was processed
+ type: dict
+ sample: '{"id": "F77CC589EBC120905B4F4719217BFF6D", "ip": "10.5.132.106"}'
+ returned: always
+'''
+
+import os
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.oneandone import (
+ get_datacenter,
+ get_public_ip,
+ OneAndOneResources,
+ wait_for_resource_creation_completion
+)
+
+HAS_ONEANDONE_SDK = True
+
+try:
+ import oneandone.client
+except ImportError:
+ HAS_ONEANDONE_SDK = False
+
+DATACENTERS = ['US', 'ES', 'DE', 'GB']
+
+TYPES = ['IPV4', 'IPV6']
+
+
+def _check_mode(module, result):
+ if module.check_mode:
+ module.exit_json(
+ changed=result
+ )
+
+
+def create_public_ip(module, oneandone_conn):
+ """
+ Create new public IP
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+
+ Returns a dictionary containing a 'changed' attribute indicating whether
+ any public IP was added.
+ """
+ reverse_dns = module.params.get('reverse_dns')
+ datacenter = module.params.get('datacenter')
+ ip_type = module.params.get('type')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ if datacenter is not None:
+ datacenter_id = get_datacenter(oneandone_conn, datacenter)
+ if datacenter_id is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='datacenter %s not found.' % datacenter)
+
+ try:
+ _check_mode(module, True)
+ public_ip = oneandone_conn.create_public_ip(
+ reverse_dns=reverse_dns,
+ ip_type=ip_type,
+ datacenter_id=datacenter_id)
+
+ if wait:
+ wait_for_resource_creation_completion(oneandone_conn,
+ OneAndOneResources.public_ip,
+ public_ip['id'],
+ wait_timeout,
+ wait_interval)
+ public_ip = oneandone_conn.get_public_ip(public_ip['id'])
+
+ changed = True if public_ip else False
+
+ return (changed, public_ip)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def update_public_ip(module, oneandone_conn):
+ """
+ Update a public IP
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+
+ Returns a dictionary containing a 'changed' attribute indicating whether
+ any public IP was changed.
+ """
+ reverse_dns = module.params.get('reverse_dns')
+ public_ip_id = module.params.get('public_ip_id')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ public_ip = get_public_ip(oneandone_conn, public_ip_id, True)
+ if public_ip is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='public IP %s not found.' % public_ip_id)
+
+ try:
+ _check_mode(module, True)
+ public_ip = oneandone_conn.modify_public_ip(
+ ip_id=public_ip['id'],
+ reverse_dns=reverse_dns)
+
+ if wait:
+ wait_for_resource_creation_completion(oneandone_conn,
+ OneAndOneResources.public_ip,
+ public_ip['id'],
+ wait_timeout,
+ wait_interval)
+ public_ip = oneandone_conn.get_public_ip(public_ip['id'])
+
+ changed = True if public_ip else False
+
+ return (changed, public_ip)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def delete_public_ip(module, oneandone_conn):
+ """
+ Delete a public IP
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+
+ Returns a dictionary containing a 'changed' attribute indicating whether
+ any public IP was deleted.
+ """
+ public_ip_id = module.params.get('public_ip_id')
+
+ public_ip = get_public_ip(oneandone_conn, public_ip_id, True)
+ if public_ip is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='public IP %s not found.' % public_ip_id)
+
+ try:
+ _check_mode(module, True)
+ deleted_public_ip = oneandone_conn.delete_public_ip(
+ ip_id=public_ip['id'])
+
+ changed = True if deleted_public_ip else False
+
+ return (changed, {
+ 'id': public_ip['id']
+ })
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ auth_token=dict(
+ type='str', no_log=True,
+ default=os.environ.get('ONEANDONE_AUTH_TOKEN')),
+ api_url=dict(
+ type='str',
+ default=os.environ.get('ONEANDONE_API_URL')),
+ public_ip_id=dict(type='str'),
+ reverse_dns=dict(type='str'),
+ datacenter=dict(
+ choices=DATACENTERS,
+ default='US'),
+ type=dict(
+ choices=TYPES,
+ default='IPV4'),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ wait_interval=dict(type='int', default=5),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'update']),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_ONEANDONE_SDK:
+ module.fail_json(msg='1and1 required for this module')
+
+ if not module.params.get('auth_token'):
+ module.fail_json(
+ msg='auth_token parameter is required.')
+
+ if not module.params.get('api_url'):
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'))
+ else:
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'), api_url=module.params.get('api_url'))
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('public_ip_id'):
+ module.fail_json(
+ msg="'public_ip_id' parameter is required to delete a public ip.")
+ try:
+ (changed, public_ip) = delete_public_ip(module, oneandone_conn)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+ elif state == 'update':
+ if not module.params.get('public_ip_id'):
+ module.fail_json(
+ msg="'public_ip_id' parameter is required to update a public ip.")
+ try:
+ (changed, public_ip) = update_public_ip(module, oneandone_conn)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ elif state == 'present':
+ try:
+ (changed, public_ip) = create_public_ip(module, oneandone_conn)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ module.exit_json(changed=changed, public_ip=public_ip)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_server.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_server.py
new file mode 100644
index 00000000..1e6caab5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_server.py
@@ -0,0 +1,705 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneandone_server
+short_description: Create, destroy, start, stop, and reboot a 1&1 Host server.
+description:
+ - Create, destroy, update, start, stop, and reboot a 1&1 Host server.
+ When the server is created it can optionally wait for it to be 'running' before returning.
+options:
+ state:
+ description:
+ - Define a server's state to create, remove, start or stop it.
+ type: str
+ default: present
+ choices: [ "present", "absent", "running", "stopped" ]
+ auth_token:
+ description:
+ - Authenticating API token provided by 1&1. Overrides the
+ ONEANDONE_AUTH_TOKEN environment variable.
+ type: str
+ api_url:
+ description:
+ - Custom API URL. Overrides the
+ ONEANDONE_API_URL environment variable.
+ type: str
+ datacenter:
+ description:
+ - The datacenter location.
+ type: str
+ default: US
+ choices: [ "US", "ES", "DE", "GB" ]
+ hostname:
+ description:
+ - The hostname or ID of the server. Only used when state is 'present'.
+ type: str
+ description:
+ description:
+ - The description of the server.
+ type: str
+ appliance:
+ description:
+ - The operating system name or ID for the server.
+ It is required only for 'present' state.
+ type: str
+ fixed_instance_size:
+ description:
+ - The instance size name or ID of the server.
+ It is required only for 'present' state, and it is mutually exclusive with
+ vcore, cores_per_processor, ram, and hdds parameters.
+ - 'The available choices are: C(S), C(M), C(L), C(XL), C(XXL), C(3XL), C(4XL), C(5XL)'
+ type: str
+ vcore:
+ description:
+ - The total number of processors.
+ It must be provided with cores_per_processor, ram, and hdds parameters.
+ type: int
+ cores_per_processor:
+ description:
+ - The number of cores per processor.
+ It must be provided with vcore, ram, and hdds parameters.
+ type: int
+ ram:
+ description:
+ - The amount of RAM memory.
+ It must be provided with with vcore, cores_per_processor, and hdds parameters.
+ type: float
+ hdds:
+ description:
+ - A list of hard disks with nested "size" and "is_main" properties.
+ It must be provided with vcore, cores_per_processor, and ram parameters.
+ type: list
+ private_network:
+ description:
+ - The private network name or ID.
+ type: str
+ firewall_policy:
+ description:
+ - The firewall policy name or ID.
+ type: str
+ load_balancer:
+ description:
+ - The load balancer name or ID.
+ type: str
+ monitoring_policy:
+ description:
+ - The monitoring policy name or ID.
+ type: str
+ server:
+ description:
+ - Server identifier (ID or hostname). It is required for all states except 'running' and 'present'.
+ type: str
+ count:
+ description:
+ - The number of servers to create.
+ type: int
+ default: 1
+ ssh_key:
+ description:
+ - User's public SSH key (contents, not path).
+ type: raw
+ server_type:
+ description:
+ - The type of server to be built.
+ type: str
+ default: "cloud"
+ choices: [ "cloud", "baremetal", "k8s_node" ]
+ wait:
+ description:
+ - Wait for the server to be in state 'running' before returning.
+ Also used for delete operation (set to 'false' if you don't want to wait
+ for each individual server to be deleted before moving on with
+ other tasks.)
+ type: bool
+ default: 'yes'
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ wait_interval:
+ description:
+ - Defines the number of seconds to wait when using the wait_for methods
+ type: int
+ default: 5
+ auto_increment:
+ description:
+ - When creating multiple servers at once, whether to differentiate
+ hostnames by appending a count after them or substituting the count
+ where there is a %02d or %03d in the hostname string.
+ type: bool
+ default: 'yes'
+
+requirements:
+ - "1and1"
+ - "python >= 2.6"
+
+author:
+ - "Amel Ajdinovic (@aajdinov)"
+ - "Ethan Devenport (@edevenport)"
+
+'''
+
+EXAMPLES = '''
+- name: Create three servers and enumerate their names
+ community.general.oneandone_server:
+ auth_token: oneandone_private_api_key
+ hostname: node%02d
+ fixed_instance_size: XL
+ datacenter: US
+ appliance: C5A349786169F140BCBC335675014C08
+ auto_increment: true
+ count: 3
+
+- name: Create three servers, passing in an ssh_key
+ community.general.oneandone_server:
+ auth_token: oneandone_private_api_key
+ hostname: node%02d
+ vcore: 2
+ cores_per_processor: 4
+ ram: 8.0
+ hdds:
+ - size: 50
+ is_main: false
+ datacenter: ES
+ appliance: C5A349786169F140BCBC335675014C08
+ count: 3
+ wait: yes
+ wait_timeout: 600
+ wait_interval: 10
+ ssh_key: SSH_PUBLIC_KEY
+
+- name: Removing server
+ community.general.oneandone_server:
+ auth_token: oneandone_private_api_key
+ state: absent
+ server: 'node01'
+
+- name: Starting server
+ community.general.oneandone_server:
+ auth_token: oneandone_private_api_key
+ state: running
+ server: 'node01'
+
+- name: Stopping server
+ community.general.oneandone_server:
+ auth_token: oneandone_private_api_key
+ state: stopped
+ server: 'node01'
+'''
+
+RETURN = '''
+servers:
+ description: Information about each server that was processed
+ type: list
+ sample: '[{"hostname": "my-server", "id": "server-id"}]'
+ returned: always
+'''
+
+import os
+import time
+from ansible.module_utils.six.moves import xrange
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.oneandone import (
+ get_datacenter,
+ get_fixed_instance_size,
+ get_appliance,
+ get_private_network,
+ get_monitoring_policy,
+ get_firewall_policy,
+ get_load_balancer,
+ get_server,
+ OneAndOneResources,
+ wait_for_resource_creation_completion,
+ wait_for_resource_deletion_completion
+)
+
+HAS_ONEANDONE_SDK = True
+
+try:
+ import oneandone.client
+except ImportError:
+ HAS_ONEANDONE_SDK = False
+
+DATACENTERS = ['US', 'ES', 'DE', 'GB']
+
+ONEANDONE_SERVER_STATES = (
+ 'DEPLOYING',
+ 'POWERED_OFF',
+ 'POWERED_ON',
+ 'POWERING_ON',
+ 'POWERING_OFF',
+)
+
+
+def _check_mode(module, result):
+ if module.check_mode:
+ module.exit_json(
+ changed=result
+ )
+
+
+def _create_server(module, oneandone_conn, hostname, description,
+ fixed_instance_size_id, vcore, cores_per_processor, ram,
+ hdds, datacenter_id, appliance_id, ssh_key,
+ private_network_id, firewall_policy_id, load_balancer_id,
+ monitoring_policy_id, server_type, wait, wait_timeout,
+ wait_interval):
+
+ try:
+ existing_server = get_server(oneandone_conn, hostname)
+
+ if existing_server:
+ if module.check_mode:
+ return False
+ return None
+
+ if module.check_mode:
+ return True
+
+ server = oneandone_conn.create_server(
+ oneandone.client.Server(
+ name=hostname,
+ description=description,
+ fixed_instance_size_id=fixed_instance_size_id,
+ vcore=vcore,
+ cores_per_processor=cores_per_processor,
+ ram=ram,
+ appliance_id=appliance_id,
+ datacenter_id=datacenter_id,
+ rsa_key=ssh_key,
+ private_network_id=private_network_id,
+ firewall_policy_id=firewall_policy_id,
+ load_balancer_id=load_balancer_id,
+ monitoring_policy_id=monitoring_policy_id,
+ server_type=server_type,), hdds)
+
+ if wait:
+ wait_for_resource_creation_completion(
+ oneandone_conn,
+ OneAndOneResources.server,
+ server['id'],
+ wait_timeout,
+ wait_interval)
+ server = oneandone_conn.get_server(server['id']) # refresh
+
+ return server
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _insert_network_data(server):
+ for addr_data in server['ips']:
+ if addr_data['type'] == 'IPV6':
+ server['public_ipv6'] = addr_data['ip']
+ elif addr_data['type'] == 'IPV4':
+ server['public_ipv4'] = addr_data['ip']
+ return server
+
+
+def create_server(module, oneandone_conn):
+ """
+ Create new server
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+
+ Returns a dictionary containing a 'changed' attribute indicating whether
+ any server was added, and a 'servers' attribute with the list of the
+ created servers' hostname, id and ip addresses.
+ """
+ hostname = module.params.get('hostname')
+ description = module.params.get('description')
+ auto_increment = module.params.get('auto_increment')
+ count = module.params.get('count')
+ fixed_instance_size = module.params.get('fixed_instance_size')
+ vcore = module.params.get('vcore')
+ cores_per_processor = module.params.get('cores_per_processor')
+ ram = module.params.get('ram')
+ hdds = module.params.get('hdds')
+ datacenter = module.params.get('datacenter')
+ appliance = module.params.get('appliance')
+ ssh_key = module.params.get('ssh_key')
+ private_network = module.params.get('private_network')
+ monitoring_policy = module.params.get('monitoring_policy')
+ firewall_policy = module.params.get('firewall_policy')
+ load_balancer = module.params.get('load_balancer')
+ server_type = module.params.get('server_type')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ datacenter_id = get_datacenter(oneandone_conn, datacenter)
+ if datacenter_id is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='datacenter %s not found.' % datacenter)
+
+ fixed_instance_size_id = None
+ if fixed_instance_size:
+ fixed_instance_size_id = get_fixed_instance_size(
+ oneandone_conn,
+ fixed_instance_size)
+ if fixed_instance_size_id is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='fixed_instance_size %s not found.' % fixed_instance_size)
+
+ appliance_id = get_appliance(oneandone_conn, appliance)
+ if appliance_id is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='appliance %s not found.' % appliance)
+
+ private_network_id = None
+ if private_network:
+ private_network_id = get_private_network(
+ oneandone_conn,
+ private_network)
+ if private_network_id is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='private network %s not found.' % private_network)
+
+ monitoring_policy_id = None
+ if monitoring_policy:
+ monitoring_policy_id = get_monitoring_policy(
+ oneandone_conn,
+ monitoring_policy)
+ if monitoring_policy_id is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='monitoring policy %s not found.' % monitoring_policy)
+
+ firewall_policy_id = None
+ if firewall_policy:
+ firewall_policy_id = get_firewall_policy(
+ oneandone_conn,
+ firewall_policy)
+ if firewall_policy_id is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='firewall policy %s not found.' % firewall_policy)
+
+ load_balancer_id = None
+ if load_balancer:
+ load_balancer_id = get_load_balancer(
+ oneandone_conn,
+ load_balancer)
+ if load_balancer_id is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='load balancer %s not found.' % load_balancer)
+
+ if auto_increment:
+ hostnames = _auto_increment_hostname(count, hostname)
+ descriptions = _auto_increment_description(count, description)
+ else:
+ hostnames = [hostname] * count
+ descriptions = [description] * count
+
+ hdd_objs = []
+ if hdds:
+ for hdd in hdds:
+ hdd_objs.append(oneandone.client.Hdd(
+ size=hdd['size'],
+ is_main=hdd['is_main']
+ ))
+
+ servers = []
+ for index, name in enumerate(hostnames):
+ server = _create_server(
+ module=module,
+ oneandone_conn=oneandone_conn,
+ hostname=name,
+ description=descriptions[index],
+ fixed_instance_size_id=fixed_instance_size_id,
+ vcore=vcore,
+ cores_per_processor=cores_per_processor,
+ ram=ram,
+ hdds=hdd_objs,
+ datacenter_id=datacenter_id,
+ appliance_id=appliance_id,
+ ssh_key=ssh_key,
+ private_network_id=private_network_id,
+ monitoring_policy_id=monitoring_policy_id,
+ firewall_policy_id=firewall_policy_id,
+ load_balancer_id=load_balancer_id,
+ server_type=server_type,
+ wait=wait,
+ wait_timeout=wait_timeout,
+ wait_interval=wait_interval)
+ if server:
+ servers.append(server)
+
+ changed = False
+
+ if servers:
+ for server in servers:
+ if server:
+ _check_mode(module, True)
+ _check_mode(module, False)
+ servers = [_insert_network_data(_server) for _server in servers]
+ changed = True
+
+ _check_mode(module, False)
+
+ return (changed, servers)
+
+
+def remove_server(module, oneandone_conn):
+ """
+ Removes a server.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object.
+
+ Returns a dictionary containing a 'changed' attribute indicating whether
+ the server was removed, and a 'removed_server' attribute with
+ the removed server's hostname and id.
+ """
+ server_id = module.params.get('server')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ changed = False
+ removed_server = None
+
+ server = get_server(oneandone_conn, server_id, True)
+ if server:
+ _check_mode(module, True)
+ try:
+ oneandone_conn.delete_server(server_id=server['id'])
+ if wait:
+ wait_for_resource_deletion_completion(oneandone_conn,
+ OneAndOneResources.server,
+ server['id'],
+ wait_timeout,
+ wait_interval)
+ changed = True
+ except Exception as ex:
+ module.fail_json(
+ msg="failed to terminate the server: %s" % str(ex))
+
+ removed_server = {
+ 'id': server['id'],
+ 'hostname': server['name']
+ }
+ _check_mode(module, False)
+
+ return (changed, removed_server)
+
+
+def startstop_server(module, oneandone_conn):
+ """
+ Starts or Stops a server.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object.
+
+ Returns a dictionary with a 'changed' attribute indicating whether
+ anything has changed for the server as a result of this function
+ being run, and a 'server' attribute with basic information for
+ the server.
+ """
+ state = module.params.get('state')
+ server_id = module.params.get('server')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ changed = False
+
+ # Resolve server
+ server = get_server(oneandone_conn, server_id, True)
+ if server:
+ # Attempt to change the server state, only if it's not already there
+ # or on its way.
+ try:
+ if state == 'stopped' and server['status']['state'] == 'POWERED_ON':
+ _check_mode(module, True)
+ oneandone_conn.modify_server_status(
+ server_id=server['id'],
+ action='POWER_OFF',
+ method='SOFTWARE')
+ elif state == 'running' and server['status']['state'] == 'POWERED_OFF':
+ _check_mode(module, True)
+ oneandone_conn.modify_server_status(
+ server_id=server['id'],
+ action='POWER_ON',
+ method='SOFTWARE')
+ except Exception as ex:
+ module.fail_json(
+ msg="failed to set server %s to state %s: %s" % (
+ server_id, state, str(ex)))
+
+ _check_mode(module, False)
+
+ # Make sure the server has reached the desired state
+ if wait:
+ operation_completed = False
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(wait_interval)
+ server = oneandone_conn.get_server(server['id']) # refresh
+ server_state = server['status']['state']
+ if state == 'stopped' and server_state == 'POWERED_OFF':
+ operation_completed = True
+ break
+ if state == 'running' and server_state == 'POWERED_ON':
+ operation_completed = True
+ break
+ if not operation_completed:
+ module.fail_json(
+ msg="Timeout waiting for server %s to get to state %s" % (
+ server_id, state))
+
+ changed = True
+ server = _insert_network_data(server)
+
+ _check_mode(module, False)
+
+ return (changed, server)
+
+
+def _auto_increment_hostname(count, hostname):
+ """
+ Allow a custom incremental count in the hostname when defined with the
+ string formatting (%) operator. Otherwise, increment using name-01,
+ name-02, name-03, and so forth.
+ """
+ if '%' not in hostname:
+ hostname = "%s-%%01d" % hostname
+
+ return [
+ hostname % i
+ for i in xrange(1, count + 1)
+ ]
+
+
+def _auto_increment_description(count, description):
+ """
+ Allow the incremental count in the description when defined with the
+ string formatting (%) operator. Otherwise, repeat the same description.
+ """
+ if '%' in description:
+ return [
+ description % i
+ for i in xrange(1, count + 1)
+ ]
+ else:
+ return [description] * count
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ auth_token=dict(
+ type='str',
+ default=os.environ.get('ONEANDONE_AUTH_TOKEN'),
+ no_log=True),
+ api_url=dict(
+ type='str',
+ default=os.environ.get('ONEANDONE_API_URL')),
+ hostname=dict(type='str'),
+ description=dict(type='str'),
+ appliance=dict(type='str'),
+ fixed_instance_size=dict(type='str'),
+ vcore=dict(type='int'),
+ cores_per_processor=dict(type='int'),
+ ram=dict(type='float'),
+ hdds=dict(type='list'),
+ count=dict(type='int', default=1),
+ ssh_key=dict(type='raw'),
+ auto_increment=dict(type='bool', default=True),
+ server=dict(type='str'),
+ datacenter=dict(
+ choices=DATACENTERS,
+ default='US'),
+ private_network=dict(type='str'),
+ firewall_policy=dict(type='str'),
+ load_balancer=dict(type='str'),
+ monitoring_policy=dict(type='str'),
+ server_type=dict(type='str', default='cloud', choices=['cloud', 'baremetal', 'k8s_node']),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ wait_interval=dict(type='int', default=5),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'running', 'stopped']),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=(['fixed_instance_size', 'vcore'], ['fixed_instance_size', 'cores_per_processor'],
+ ['fixed_instance_size', 'ram'], ['fixed_instance_size', 'hdds'],),
+ required_together=(['vcore', 'cores_per_processor', 'ram', 'hdds'],)
+ )
+
+ if not HAS_ONEANDONE_SDK:
+ module.fail_json(msg='1and1 required for this module')
+
+ if not module.params.get('auth_token'):
+ module.fail_json(
+ msg='The "auth_token" parameter or ' +
+ 'ONEANDONE_AUTH_TOKEN environment variable is required.')
+
+ if not module.params.get('api_url'):
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'))
+ else:
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'), api_url=module.params.get('api_url'))
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('server'):
+ module.fail_json(
+ msg="'server' parameter is required for deleting a server.")
+ try:
+ (changed, servers) = remove_server(module, oneandone_conn)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ elif state in ('running', 'stopped'):
+ if not module.params.get('server'):
+ module.fail_json(
+ msg="'server' parameter is required for starting/stopping a server.")
+ try:
+ (changed, servers) = startstop_server(module, oneandone_conn)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ elif state == 'present':
+ for param in ('hostname',
+ 'appliance',
+ 'datacenter'):
+ if not module.params.get(param):
+ module.fail_json(
+ msg="%s parameter is required for new server." % param)
+ try:
+ (changed, servers) = create_server(module, oneandone_conn)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ module.exit_json(changed=changed, servers=servers)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/online/online_server_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/online/online_server_facts.py
new file mode 100644
index 00000000..f1e74aa6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/online/online_server_facts.py
@@ -0,0 +1,175 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: online_server_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favour of C(_info) module.
+ alternative: Use M(community.general.online_server_info) instead.
+short_description: Gather facts about Online servers.
+description:
+ - Gather facts about the servers.
+ - U(https://www.online.net/en/dedicated-server)
+author:
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.online
+
+'''
+
+EXAMPLES = r'''
+- name: Gather Online server facts
+ community.general.online_server_facts:
+ api_token: '0d1627e8-bbf0-44c5-a46f-5c4d3aef033f'
+'''
+
+RETURN = r'''
+---
+online_server_facts:
+ description: Response from Online API
+ returned: success
+ type: complex
+ sample:
+ "online_server_facts": [
+ {
+ "abuse": "abuse@example.com",
+ "anti_ddos": false,
+ "bmc": {
+ "session_key": null
+ },
+ "boot_mode": "normal",
+ "contacts": {
+ "owner": "foobar",
+ "tech": "foobar"
+ },
+ "disks": [
+ {
+ "$ref": "/api/v1/server/hardware/disk/68452"
+ },
+ {
+ "$ref": "/api/v1/server/hardware/disk/68453"
+ }
+ ],
+ "drive_arrays": [
+ {
+ "disks": [
+ {
+ "$ref": "/api/v1/server/hardware/disk/68452"
+ },
+ {
+ "$ref": "/api/v1/server/hardware/disk/68453"
+ }
+ ],
+ "raid_controller": {
+ "$ref": "/api/v1/server/hardware/raidController/9910"
+ },
+ "raid_level": "RAID1"
+ }
+ ],
+ "hardware_watch": true,
+ "hostname": "sd-42",
+ "id": 42,
+ "ip": [
+ {
+ "address": "195.154.172.149",
+ "mac": "28:92:4a:33:5e:c6",
+ "reverse": "195-154-172-149.rev.poneytelecom.eu.",
+ "switch_port_state": "up",
+ "type": "public"
+ },
+ {
+ "address": "10.90.53.212",
+ "mac": "28:92:4a:33:5e:c7",
+ "reverse": null,
+ "switch_port_state": "up",
+ "type": "private"
+ }
+ ],
+ "last_reboot": "2018-08-23T08:32:03.000Z",
+ "location": {
+ "block": "A",
+ "datacenter": "DC3",
+ "position": 19,
+ "rack": "A23",
+ "room": "4 4-4"
+ },
+ "network": {
+ "ip": [
+ "195.154.172.149"
+ ],
+ "ipfo": [],
+ "private": [
+ "10.90.53.212"
+ ]
+ },
+ "offer": "Pro-1-S-SATA",
+ "os": {
+ "name": "FreeBSD",
+ "version": "11.1-RELEASE"
+ },
+ "power": "ON",
+ "proactive_monitoring": false,
+ "raid_controllers": [
+ {
+ "$ref": "/api/v1/server/hardware/raidController/9910"
+ }
+ ],
+ "support": "Basic service level"
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.online import (
+ Online, OnlineException, online_argument_spec
+)
+
+
+class OnlineServerFacts(Online):
+
+ def __init__(self, module):
+ super(OnlineServerFacts, self).__init__(module)
+ self.name = 'api/v1/server'
+
+ def _get_server_detail(self, server_path):
+ try:
+ return self.get(path=server_path).json
+ except OnlineException as exc:
+ self.module.fail_json(msg="A problem occurred while fetching: %s (%s)" % (server_path, exc))
+
+ def all_detailed_servers(self):
+ servers_api_path = self.get_resources()
+
+ server_data = (
+ self._get_server_detail(server_api_path)
+ for server_api_path in servers_api_path
+ )
+
+ return [s for s in server_data if s is not None]
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=online_argument_spec(),
+ supports_check_mode=True,
+ )
+
+ try:
+ servers_facts = OnlineServerFacts(module).all_detailed_servers()
+ module.exit_json(
+ ansible_facts={'online_server_facts': servers_facts}
+ )
+ except OnlineException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/online/online_server_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/online/online_server_info.py
new file mode 100644
index 00000000..f0e73aea
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/online/online_server_info.py
@@ -0,0 +1,175 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: online_server_info
+short_description: Gather information about Online servers.
+description:
+ - Gather information about the servers.
+ - U(https://www.online.net/en/dedicated-server)
+author:
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.online
+
+'''
+
+EXAMPLES = r'''
+- name: Gather Online server information
+ community.general.online_server_info:
+ api_token: '0d1627e8-bbf0-44c5-a46f-5c4d3aef033f'
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.online_server_info }}"
+'''
+
+RETURN = r'''
+---
+online_server_info:
+ description: Response from Online API
+ returned: success
+ type: complex
+ sample:
+ "online_server_info": [
+ {
+ "abuse": "abuse@example.com",
+ "anti_ddos": false,
+ "bmc": {
+ "session_key": null
+ },
+ "boot_mode": "normal",
+ "contacts": {
+ "owner": "foobar",
+ "tech": "foobar"
+ },
+ "disks": [
+ {
+ "$ref": "/api/v1/server/hardware/disk/68452"
+ },
+ {
+ "$ref": "/api/v1/server/hardware/disk/68453"
+ }
+ ],
+ "drive_arrays": [
+ {
+ "disks": [
+ {
+ "$ref": "/api/v1/server/hardware/disk/68452"
+ },
+ {
+ "$ref": "/api/v1/server/hardware/disk/68453"
+ }
+ ],
+ "raid_controller": {
+ "$ref": "/api/v1/server/hardware/raidController/9910"
+ },
+ "raid_level": "RAID1"
+ }
+ ],
+ "hardware_watch": true,
+ "hostname": "sd-42",
+ "id": 42,
+ "ip": [
+ {
+ "address": "195.154.172.149",
+ "mac": "28:92:4a:33:5e:c6",
+ "reverse": "195-154-172-149.rev.poneytelecom.eu.",
+ "switch_port_state": "up",
+ "type": "public"
+ },
+ {
+ "address": "10.90.53.212",
+ "mac": "28:92:4a:33:5e:c7",
+ "reverse": null,
+ "switch_port_state": "up",
+ "type": "private"
+ }
+ ],
+ "last_reboot": "2018-08-23T08:32:03.000Z",
+ "location": {
+ "block": "A",
+ "datacenter": "DC3",
+ "position": 19,
+ "rack": "A23",
+ "room": "4 4-4"
+ },
+ "network": {
+ "ip": [
+ "195.154.172.149"
+ ],
+ "ipfo": [],
+ "private": [
+ "10.90.53.212"
+ ]
+ },
+ "offer": "Pro-1-S-SATA",
+ "os": {
+ "name": "FreeBSD",
+ "version": "11.1-RELEASE"
+ },
+ "power": "ON",
+ "proactive_monitoring": false,
+ "raid_controllers": [
+ {
+ "$ref": "/api/v1/server/hardware/raidController/9910"
+ }
+ ],
+ "support": "Basic service level"
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.online import (
+ Online, OnlineException, online_argument_spec
+)
+
+
+class OnlineServerInfo(Online):
+
+ def __init__(self, module):
+ super(OnlineServerInfo, self).__init__(module)
+ self.name = 'api/v1/server'
+
+ def _get_server_detail(self, server_path):
+ try:
+ return self.get(path=server_path).json
+ except OnlineException as exc:
+ self.module.fail_json(msg="A problem occurred while fetching: %s (%s)" % (server_path, exc))
+
+ def all_detailed_servers(self):
+ servers_api_path = self.get_resources()
+
+ server_data = (
+ self._get_server_detail(server_api_path)
+ for server_api_path in servers_api_path
+ )
+
+ return [s for s in server_data if s is not None]
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=online_argument_spec(),
+ supports_check_mode=True,
+ )
+
+ try:
+ servers_info = OnlineServerInfo(module).all_detailed_servers()
+ module.exit_json(
+ online_server_info=servers_info
+ )
+ except OnlineException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/online/online_user_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/online/online_user_facts.py
new file mode 100644
index 00000000..7b78924e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/online/online_user_facts.py
@@ -0,0 +1,76 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: online_user_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favour of C(_info) module.
+ alternative: Use M(community.general.online_user_info) instead.
+short_description: Gather facts about Online user.
+description:
+ - Gather facts about the user.
+author:
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.online
+
+'''
+
+EXAMPLES = r'''
+- name: Gather Online user facts
+ community.general.online_user_facts:
+'''
+
+RETURN = r'''
+---
+online_user_facts:
+ description: Response from Online API
+ returned: success
+ type: complex
+ sample:
+ "online_user_facts": {
+ "company": "foobar LLC",
+ "email": "foobar@example.com",
+ "first_name": "foo",
+ "id": 42,
+ "last_name": "bar",
+ "login": "foobar"
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.online import (
+ Online, OnlineException, online_argument_spec
+)
+
+
+class OnlineUserFacts(Online):
+
+ def __init__(self, module):
+ super(OnlineUserFacts, self).__init__(module)
+ self.name = 'api/v1/user'
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=online_argument_spec(),
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ ansible_facts={'online_user_facts': OnlineUserFacts(module).get_resources()}
+ )
+ except OnlineException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/online/online_user_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/online/online_user_info.py
new file mode 100644
index 00000000..093a2c68
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/online/online_user_info.py
@@ -0,0 +1,76 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: online_user_info
+short_description: Gather information about Online user.
+description:
+ - Gather information about the user.
+author:
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.online
+
+'''
+
+EXAMPLES = r'''
+- name: Gather Online user info
+ community.general.online_user_info:
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.online_user_info }}"
+'''
+
+RETURN = r'''
+---
+online_user_info:
+ description: Response from Online API
+ returned: success
+ type: complex
+ sample:
+ "online_user_info": {
+ "company": "foobar LLC",
+ "email": "foobar@example.com",
+ "first_name": "foo",
+ "id": 42,
+ "last_name": "bar",
+ "login": "foobar"
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.online import (
+ Online, OnlineException, online_argument_spec
+)
+
+
+class OnlineUserInfo(Online):
+
+ def __init__(self, module):
+ super(OnlineUserInfo, self).__init__(module)
+ self.name = 'api/v1/user'
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=online_argument_spec(),
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ online_user_info=OnlineUserInfo(module).get_resources()
+ )
+ except OnlineException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_host.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_host.py
new file mode 100644
index 00000000..efe1ce22
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_host.py
@@ -0,0 +1,283 @@
+#!/usr/bin/python
+#
+# Copyright 2018 www.privaz.io Valletech AB
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: one_host
+
+short_description: Manages OpenNebula Hosts
+
+
+requirements:
+ - pyone
+
+description:
+ - "Manages OpenNebula Hosts"
+
+options:
+ name:
+ description:
+ - Hostname of the machine to manage.
+ required: true
+ type: str
+ state:
+ description:
+ - Takes the host to the desired lifecycle state.
+ - If C(absent) the host will be deleted from the cluster.
+ - If C(present) the host will be created in the cluster (includes C(enabled), C(disabled) and C(offline) states).
+ - If C(enabled) the host is fully operational.
+ - C(disabled), e.g. to perform maintenance operations.
+ - C(offline), host is totally offline.
+ choices:
+ - absent
+ - present
+ - enabled
+ - disabled
+ - offline
+ default: present
+ type: str
+ im_mad_name:
+ description:
+ - The name of the information manager, this values are taken from the oned.conf with the tag name IM_MAD (name)
+ default: kvm
+ type: str
+ vmm_mad_name:
+ description:
+ - The name of the virtual machine manager mad name, this values are taken from the oned.conf with the tag name VM_MAD (name)
+ default: kvm
+ type: str
+ cluster_id:
+ description:
+ - The cluster ID.
+ default: 0
+ type: int
+ cluster_name:
+ description:
+ - The cluster specified by name.
+ type: str
+ labels:
+ description:
+ - The labels for this host.
+ type: list
+ template:
+ description:
+ - The template or attribute changes to merge into the host template.
+ aliases:
+ - attributes
+ type: dict
+
+extends_documentation_fragment:
+- community.general.opennebula
+
+
+author:
+ - Rafael del Valle (@rvalle)
+'''
+
+EXAMPLES = '''
+- name: Create a new host in OpenNebula
+ community.general.one_host:
+ name: host1
+ cluster_id: 1
+ api_url: http://127.0.0.1:2633/RPC2
+
+- name: Create a host and adjust its template
+ community.general.one_host:
+ name: host2
+ cluster_name: default
+ template:
+ LABELS:
+ - gold
+ - ssd
+ RESERVED_CPU: -100
+'''
+
+# TODO: pending setting guidelines on returned values
+RETURN = '''
+'''
+
+# TODO: Documentation on valid state transitions is required to properly implement all valid cases
+# TODO: To be coherent with CLI this module should also provide "flush" functionality
+
+from ansible_collections.community.general.plugins.module_utils.opennebula import OpenNebulaModule
+
+try:
+ from pyone import HOST_STATES, HOST_STATUS
+except ImportError:
+ pass # handled at module utils
+
+
+# Pseudo definitions...
+
+HOST_ABSENT = -99 # the host is absent (special case defined by this module)
+
+
+class HostModule(OpenNebulaModule):
+
+ def __init__(self):
+
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ state=dict(choices=['present', 'absent', 'enabled', 'disabled', 'offline'], default='present'),
+ im_mad_name=dict(type='str', default="kvm"),
+ vmm_mad_name=dict(type='str', default="kvm"),
+ cluster_id=dict(type='int', default=0),
+ cluster_name=dict(type='str'),
+ labels=dict(type='list'),
+ template=dict(type='dict', aliases=['attributes']),
+ )
+
+ mutually_exclusive = [
+ ['cluster_id', 'cluster_name']
+ ]
+
+ OpenNebulaModule.__init__(self, argument_spec, mutually_exclusive=mutually_exclusive)
+
+ def allocate_host(self):
+ """
+ Creates a host entry in OpenNebula
+ Returns: True on success, fails otherwise.
+
+ """
+ if not self.one.host.allocate(self.get_parameter('name'),
+ self.get_parameter('vmm_mad_name'),
+ self.get_parameter('im_mad_name'),
+ self.get_parameter('cluster_id')):
+ self.fail(msg="could not allocate host")
+ else:
+ self.result['changed'] = True
+ return True
+
+ def wait_for_host_state(self, host, target_states):
+ """
+ Utility method that waits for a host state.
+ Args:
+ host:
+ target_states:
+
+ """
+ return self.wait_for_state('host',
+ lambda: self.one.host.info(host.ID).STATE,
+ lambda s: HOST_STATES(s).name, target_states,
+ invalid_states=[HOST_STATES.ERROR, HOST_STATES.MONITORING_ERROR])
+
+ def run(self, one, module, result):
+
+ # Get the list of hosts
+ host_name = self.get_parameter("name")
+ host = self.get_host_by_name(host_name)
+
+ # manage host state
+ desired_state = self.get_parameter('state')
+ if bool(host):
+ current_state = host.STATE
+ current_state_name = HOST_STATES(host.STATE).name
+ else:
+ current_state = HOST_ABSENT
+ current_state_name = "ABSENT"
+
+ # apply properties
+ if desired_state == 'present':
+ if current_state == HOST_ABSENT:
+ self.allocate_host()
+ host = self.get_host_by_name(host_name)
+ self.wait_for_host_state(host, [HOST_STATES.MONITORED])
+ elif current_state in [HOST_STATES.ERROR, HOST_STATES.MONITORING_ERROR]:
+ self.fail(msg="invalid host state %s" % current_state_name)
+
+ elif desired_state == 'enabled':
+ if current_state == HOST_ABSENT:
+ self.allocate_host()
+ host = self.get_host_by_name(host_name)
+ self.wait_for_host_state(host, [HOST_STATES.MONITORED])
+ elif current_state in [HOST_STATES.DISABLED, HOST_STATES.OFFLINE]:
+ if one.host.status(host.ID, HOST_STATUS.ENABLED):
+ self.wait_for_host_state(host, [HOST_STATES.MONITORED])
+ result['changed'] = True
+ else:
+ self.fail(msg="could not enable host")
+ elif current_state in [HOST_STATES.MONITORED]:
+ pass
+ else:
+ self.fail(msg="unknown host state %s, cowardly refusing to change state to enable" % current_state_name)
+
+ elif desired_state == 'disabled':
+ if current_state == HOST_ABSENT:
+ self.fail(msg='absent host cannot be put in disabled state')
+ elif current_state in [HOST_STATES.MONITORED, HOST_STATES.OFFLINE]:
+ if one.host.status(host.ID, HOST_STATUS.DISABLED):
+ self.wait_for_host_state(host, [HOST_STATES.DISABLED])
+ result['changed'] = True
+ else:
+ self.fail(msg="could not disable host")
+ elif current_state in [HOST_STATES.DISABLED]:
+ pass
+ else:
+ self.fail(msg="unknown host state %s, cowardly refusing to change state to disable" % current_state_name)
+
+ elif desired_state == 'offline':
+ if current_state == HOST_ABSENT:
+ self.fail(msg='absent host cannot be placed in offline state')
+ elif current_state in [HOST_STATES.MONITORED, HOST_STATES.DISABLED]:
+ if one.host.status(host.ID, HOST_STATUS.OFFLINE):
+ self.wait_for_host_state(host, [HOST_STATES.OFFLINE])
+ result['changed'] = True
+ else:
+ self.fail(msg="could not set host offline")
+ elif current_state in [HOST_STATES.OFFLINE]:
+ pass
+ else:
+ self.fail(msg="unknown host state %s, cowardly refusing to change state to offline" % current_state_name)
+
+ elif desired_state == 'absent':
+ if current_state != HOST_ABSENT:
+ if one.host.delete(host.ID):
+ result['changed'] = True
+ else:
+ self.fail(msg="could not delete host from cluster")
+
+ # if we reach this point we can assume that the host was taken to the desired state
+
+ if desired_state != "absent":
+ # manipulate or modify the template
+ desired_template_changes = self.get_parameter('template')
+
+ if desired_template_changes is None:
+ desired_template_changes = dict()
+
+ # complete the template with specific ansible parameters
+ if self.is_parameter('labels'):
+ desired_template_changes['LABELS'] = self.get_parameter('labels')
+
+ if self.requires_template_update(host.TEMPLATE, desired_template_changes):
+ # setup the root element so that pyone will generate XML instead of attribute vector
+ desired_template_changes = {"TEMPLATE": desired_template_changes}
+ if one.host.update(host.ID, desired_template_changes, 1): # merge the template
+ result['changed'] = True
+ else:
+ self.fail(msg="failed to update the host template")
+
+ # the cluster
+ if host.CLUSTER_ID != self.get_parameter('cluster_id'):
+ if one.cluster.addhost(self.get_parameter('cluster_id'), host.ID):
+ result['changed'] = True
+ else:
+ self.fail(msg="failed to update the host cluster")
+
+ # return
+ self.exit()
+
+
+def main():
+ HostModule().run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_image.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_image.py
new file mode 100644
index 00000000..867bab62
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_image.py
@@ -0,0 +1,426 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+"""
+(c) 2018, Milan Ilic <milani@nordeus.com>
+
+This file is part of Ansible
+
+Ansible is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+Ansible is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a clone of the GNU General Public License
+along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+DOCUMENTATION = '''
+---
+module: one_image
+short_description: Manages OpenNebula images
+description:
+ - Manages OpenNebula images
+requirements:
+ - python-oca
+options:
+ api_url:
+ description:
+ - URL of the OpenNebula RPC server.
+ - It is recommended to use HTTPS so that the username/password are not
+ - transferred over the network unencrypted.
+ - If not set then the value of the C(ONE_URL) environment variable is used.
+ type: str
+ api_username:
+ description:
+ - Name of the user to login into the OpenNebula RPC server. If not set
+ - then the value of the C(ONE_USERNAME) environment variable is used.
+ type: str
+ api_password:
+ description:
+ - Password of the user to login into OpenNebula RPC server. If not set
+ - then the value of the C(ONE_PASSWORD) environment variable is used.
+ type: str
+ id:
+ description:
+ - A C(id) of the image you would like to manage.
+ type: int
+ name:
+ description:
+ - A C(name) of the image you would like to manage.
+ type: str
+ state:
+ description:
+ - C(present) - state that is used to manage the image
+ - C(absent) - delete the image
+ - C(cloned) - clone the image
+ - C(renamed) - rename the image to the C(new_name)
+ choices: ["present", "absent", "cloned", "renamed"]
+ default: present
+ type: str
+ enabled:
+ description:
+ - Whether the image should be enabled or disabled.
+ type: bool
+ new_name:
+ description:
+ - A name that will be assigned to the existing or new image.
+ - In the case of cloning, by default C(new_name) will take the name of the origin image with the prefix 'Copy of'.
+ type: str
+author:
+ - "Milan Ilic (@ilicmilan)"
+'''
+
+EXAMPLES = '''
+- name: Fetch the IMAGE by id
+ community.general.one_image:
+ id: 45
+ register: result
+
+- name: Print the IMAGE properties
+ ansible.builtin.debug:
+ msg: result
+
+- name: Rename existing IMAGE
+ community.general.one_image:
+ id: 34
+ state: renamed
+ new_name: bar-image
+
+- name: Disable the IMAGE by id
+ community.general.one_image:
+ id: 37
+ enabled: no
+
+- name: Enable the IMAGE by name
+ community.general.one_image:
+ name: bar-image
+ enabled: yes
+
+- name: Clone the IMAGE by name
+ community.general.one_image:
+ name: bar-image
+ state: cloned
+ new_name: bar-image-clone
+ register: result
+
+- name: Delete the IMAGE by id
+ community.general.one_image:
+ id: '{{ result.id }}'
+ state: absent
+'''
+
+RETURN = '''
+id:
+ description: image id
+ type: int
+ returned: success
+ sample: 153
+name:
+ description: image name
+ type: str
+ returned: success
+ sample: app1
+group_id:
+ description: image's group id
+ type: int
+ returned: success
+ sample: 1
+group_name:
+ description: image's group name
+ type: str
+ returned: success
+ sample: one-users
+owner_id:
+ description: image's owner id
+ type: int
+ returned: success
+ sample: 143
+owner_name:
+ description: image's owner name
+ type: str
+ returned: success
+ sample: ansible-test
+state:
+ description: state of image instance
+ type: str
+ returned: success
+ sample: READY
+used:
+ description: is image in use
+ type: bool
+ returned: success
+ sample: true
+running_vms:
+ description: count of running vms that use this image
+ type: int
+ returned: success
+ sample: 7
+'''
+
+try:
+ import oca
+ HAS_OCA = True
+except ImportError:
+ HAS_OCA = False
+
+from ansible.module_utils.basic import AnsibleModule
+import os
+
+
+def get_image(module, client, predicate):
+ pool = oca.ImagePool(client)
+ # Filter -2 means fetch all images user can Use
+ pool.info(filter=-2)
+
+ for image in pool:
+ if predicate(image):
+ return image
+
+ return None
+
+
+def get_image_by_name(module, client, image_name):
+ return get_image(module, client, lambda image: (image.name == image_name))
+
+
+def get_image_by_id(module, client, image_id):
+ return get_image(module, client, lambda image: (image.id == image_id))
+
+
+def get_image_instance(module, client, requested_id, requested_name):
+ if requested_id:
+ return get_image_by_id(module, client, requested_id)
+ else:
+ return get_image_by_name(module, client, requested_name)
+
+
+IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS']
+
+
+def get_image_info(image):
+ image.info()
+
+ info = {
+ 'id': image.id,
+ 'name': image.name,
+ 'state': IMAGE_STATES[image.state],
+ 'running_vms': image.running_vms,
+ 'used': bool(image.running_vms),
+ 'user_name': image.uname,
+ 'user_id': image.uid,
+ 'group_name': image.gname,
+ 'group_id': image.gid,
+ }
+
+ return info
+
+
+def wait_for_state(module, image, wait_timeout, state_predicate):
+ import time
+ start_time = time.time()
+
+ while (time.time() - start_time) < wait_timeout:
+ image.info()
+ state = image.state
+
+ if state_predicate(state):
+ return image
+
+ time.sleep(1)
+
+ module.fail_json(msg="Wait timeout has expired!")
+
+
+def wait_for_ready(module, image, wait_timeout=60):
+ return wait_for_state(module, image, wait_timeout, lambda state: (state in [IMAGE_STATES.index('READY')]))
+
+
+def wait_for_delete(module, image, wait_timeout=60):
+ return wait_for_state(module, image, wait_timeout, lambda state: (state in [IMAGE_STATES.index('DELETE')]))
+
+
+def enable_image(module, client, image, enable):
+ image.info()
+ changed = False
+
+ state = image.state
+
+ if state not in [IMAGE_STATES.index('READY'), IMAGE_STATES.index('DISABLED'), IMAGE_STATES.index('ERROR')]:
+ if enable:
+ module.fail_json(msg="Cannot enable " + IMAGE_STATES[state] + " image!")
+ else:
+ module.fail_json(msg="Cannot disable " + IMAGE_STATES[state] + " image!")
+
+ if ((enable and state != IMAGE_STATES.index('READY')) or
+ (not enable and state != IMAGE_STATES.index('DISABLED'))):
+ changed = True
+
+ if changed and not module.check_mode:
+ client.call('image.enable', image.id, enable)
+
+ result = get_image_info(image)
+ result['changed'] = changed
+
+ return result
+
+
+def clone_image(module, client, image, new_name):
+ if new_name is None:
+ new_name = "Copy of " + image.name
+
+ tmp_image = get_image_by_name(module, client, new_name)
+ if tmp_image:
+ result = get_image_info(tmp_image)
+ result['changed'] = False
+ return result
+
+ if image.state == IMAGE_STATES.index('DISABLED'):
+ module.fail_json(msg="Cannot clone DISABLED image")
+
+ if not module.check_mode:
+ new_id = client.call('image.clone', image.id, new_name)
+ image = get_image_by_id(module, client, new_id)
+ wait_for_ready(module, image)
+
+ result = get_image_info(image)
+ result['changed'] = True
+
+ return result
+
+
+def rename_image(module, client, image, new_name):
+ if new_name is None:
+ module.fail_json(msg="'new_name' option has to be specified when the state is 'renamed'")
+
+ if new_name == image.name:
+ result = get_image_info(image)
+ result['changed'] = False
+ return result
+
+ tmp_image = get_image_by_name(module, client, new_name)
+ if tmp_image:
+ module.fail_json(msg="Name '" + new_name + "' is already taken by IMAGE with id=" + str(tmp_image.id))
+
+ if not module.check_mode:
+ client.call('image.rename', image.id, new_name)
+
+ result = get_image_info(image)
+ result['changed'] = True
+ return result
+
+
+def delete_image(module, client, image):
+
+ if not image:
+ return {'changed': False}
+
+ if image.running_vms > 0:
+ module.fail_json(msg="Cannot delete image. There are " + str(image.running_vms) + " VMs using it.")
+
+ if not module.check_mode:
+ client.call('image.delete', image.id)
+ wait_for_delete(module, image)
+
+ return {'changed': True}
+
+
+def get_connection_info(module):
+
+ url = module.params.get('api_url')
+ username = module.params.get('api_username')
+ password = module.params.get('api_password')
+
+ if not url:
+ url = os.environ.get('ONE_URL')
+
+ if not username:
+ username = os.environ.get('ONE_USERNAME')
+
+ if not password:
+ password = os.environ.get('ONE_PASSWORD')
+
+ if not(url and username and password):
+ module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified")
+ from collections import namedtuple
+
+ auth_params = namedtuple('auth', ('url', 'username', 'password'))
+
+ return auth_params(url=url, username=username, password=password)
+
+
+def main():
+ fields = {
+ "api_url": {"required": False, "type": "str"},
+ "api_username": {"required": False, "type": "str"},
+ "api_password": {"required": False, "type": "str", "no_log": True},
+ "id": {"required": False, "type": "int"},
+ "name": {"required": False, "type": "str"},
+ "state": {
+ "default": "present",
+ "choices": ['present', 'absent', 'cloned', 'renamed'],
+ "type": "str"
+ },
+ "enabled": {"required": False, "type": "bool"},
+ "new_name": {"required": False, "type": "str"},
+ }
+
+ module = AnsibleModule(argument_spec=fields,
+ mutually_exclusive=[['id', 'name']],
+ supports_check_mode=True)
+
+ if not HAS_OCA:
+ module.fail_json(msg='This module requires python-oca to work!')
+
+ auth = get_connection_info(module)
+ params = module.params
+ id = params.get('id')
+ name = params.get('name')
+ state = params.get('state')
+ enabled = params.get('enabled')
+ new_name = params.get('new_name')
+ client = oca.Client(auth.username + ':' + auth.password, auth.url)
+
+ result = {}
+
+ if not id and state == 'renamed':
+ module.fail_json(msg="Option 'id' is required when the state is 'renamed'")
+
+ image = get_image_instance(module, client, id, name)
+ if not image and state != 'absent':
+ if id:
+ module.fail_json(msg="There is no image with id=" + str(id))
+ else:
+ module.fail_json(msg="There is no image with name=" + name)
+
+ if state == 'absent':
+ result = delete_image(module, client, image)
+ else:
+ result = get_image_info(image)
+ changed = False
+ result['changed'] = False
+
+ if enabled is not None:
+ result = enable_image(module, client, image, enabled)
+ if state == "cloned":
+ result = clone_image(module, client, image, new_name)
+ elif state == "renamed":
+ result = rename_image(module, client, image, new_name)
+
+ changed = changed or result['changed']
+ result['changed'] = changed
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_image_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_image_facts.py
new file mode 100644
index 00000000..0d2bd070
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_image_facts.py
@@ -0,0 +1,293 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+"""
+(c) 2018, Milan Ilic <milani@nordeus.com>
+
+This file is part of Ansible
+
+Ansible is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+Ansible is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a clone of the GNU General Public License
+along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+DOCUMENTATION = '''
+---
+module: one_image_info
+short_description: Gather information on OpenNebula images
+description:
+ - Gather information on OpenNebula images.
+ - This module was called C(one_image_facts) before Ansible 2.9. The usage did not change.
+requirements:
+ - pyone
+options:
+ api_url:
+ description:
+ - URL of the OpenNebula RPC server.
+ - It is recommended to use HTTPS so that the username/password are not
+ - transferred over the network unencrypted.
+ - If not set then the value of the C(ONE_URL) environment variable is used.
+ type: str
+ api_username:
+ description:
+ - Name of the user to login into the OpenNebula RPC server. If not set
+ - then the value of the C(ONE_USERNAME) environment variable is used.
+ type: str
+ api_password:
+ description:
+ - Password of the user to login into OpenNebula RPC server. If not set
+ - then the value of the C(ONE_PASSWORD) environment variable is used.
+ type: str
+ ids:
+ description:
+ - A list of images ids whose facts you want to gather.
+ aliases: ['id']
+ type: list
+ name:
+ description:
+ - A C(name) of the image whose facts will be gathered.
+ - If the C(name) begins with '~' the C(name) will be used as regex pattern
+ - which restricts the list of images (whose facts will be returned) whose names match specified regex.
+ - Also, if the C(name) begins with '~*' case-insensitive matching will be performed.
+ - See examples for more details.
+ type: str
+author:
+ - "Milan Ilic (@ilicmilan)"
+ - "Jan Meerkamp (@meerkampdvv)"
+'''
+
+EXAMPLES = '''
+- name: Gather facts about all images
+ community.general.one_image_info:
+ register: result
+
+- name: Print all images facts
+ ansible.builtin.debug:
+ msg: result
+
+- name: Gather facts about an image using ID
+ community.general.one_image_info:
+ ids:
+ - 123
+
+- name: Gather facts about an image using the name
+ community.general.one_image_info:
+ name: 'foo-image'
+ register: foo_image
+
+- name: Gather facts about all IMAGEs whose name matches regex 'app-image-.*'
+ community.general.one_image_info:
+ name: '~app-image-.*'
+ register: app_images
+
+- name: Gather facts about all IMAGEs whose name matches regex 'foo-image-.*' ignoring cases
+ community.general.one_image_info:
+ name: '~*foo-image-.*'
+ register: foo_images
+'''
+
+RETURN = '''
+images:
+ description: A list of images info
+ type: complex
+ returned: success
+ contains:
+ id:
+ description: image id
+ type: int
+ sample: 153
+ name:
+ description: image name
+ type: str
+ sample: app1
+ group_id:
+ description: image's group id
+ type: int
+ sample: 1
+ group_name:
+ description: image's group name
+ type: str
+ sample: one-users
+ owner_id:
+ description: image's owner id
+ type: int
+ sample: 143
+ owner_name:
+ description: image's owner name
+ type: str
+ sample: ansible-test
+ state:
+ description: state of image instance
+ type: str
+ sample: READY
+ used:
+ description: is image in use
+ type: bool
+ sample: true
+ running_vms:
+ description: count of running vms that use this image
+ type: int
+ sample: 7
+'''
+
+try:
+ import pyone
+ HAS_PYONE = True
+except ImportError:
+ HAS_PYONE = False
+
+from ansible.module_utils.basic import AnsibleModule
+import os
+
+
+def get_all_images(client):
+ pool = client.imagepool.info(-2, -1, -1, -1)
+ # Filter -2 means fetch all images user can Use
+
+ return pool
+
+
+IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS']
+
+
+def get_image_info(image):
+ info = {
+ 'id': image.ID,
+ 'name': image.NAME,
+ 'state': IMAGE_STATES[image.STATE],
+ 'running_vms': image.RUNNING_VMS,
+ 'used': bool(image.RUNNING_VMS),
+ 'user_name': image.UNAME,
+ 'user_id': image.UID,
+ 'group_name': image.GNAME,
+ 'group_id': image.GID,
+ }
+ return info
+
+
+def get_images_by_ids(module, client, ids):
+ images = []
+ pool = get_all_images(client)
+
+ for image in pool.IMAGE:
+ if str(image.ID) in ids:
+ images.append(image)
+ ids.remove(str(image.ID))
+ if len(ids) == 0:
+ break
+
+ if len(ids) > 0:
+ module.fail_json(msg='There is no IMAGE(s) with id(s)=' + ', '.join('{id}'.format(id=str(image_id)) for image_id in ids))
+
+ return images
+
+
+def get_images_by_name(module, client, name_pattern):
+
+ images = []
+ pattern = None
+
+ pool = get_all_images(client)
+
+ if name_pattern.startswith('~'):
+ import re
+ if name_pattern[1] == '*':
+ pattern = re.compile(name_pattern[2:], re.IGNORECASE)
+ else:
+ pattern = re.compile(name_pattern[1:])
+
+ for image in pool.IMAGE:
+ if pattern is not None:
+ if pattern.match(image.NAME):
+ images.append(image)
+ elif name_pattern == image.NAME:
+ images.append(image)
+ break
+
+ # if the specific name is indicated
+ if pattern is None and len(images) == 0:
+ module.fail_json(msg="There is no IMAGE with name=" + name_pattern)
+
+ return images
+
+
+def get_connection_info(module):
+
+ url = module.params.get('api_url')
+ username = module.params.get('api_username')
+ password = module.params.get('api_password')
+
+ if not url:
+ url = os.environ.get('ONE_URL')
+
+ if not username:
+ username = os.environ.get('ONE_USERNAME')
+
+ if not password:
+ password = os.environ.get('ONE_PASSWORD')
+
+ if not(url and username and password):
+ module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified")
+ from collections import namedtuple
+
+ auth_params = namedtuple('auth', ('url', 'username', 'password'))
+
+ return auth_params(url=url, username=username, password=password)
+
+
+def main():
+ fields = {
+ "api_url": {"required": False, "type": "str"},
+ "api_username": {"required": False, "type": "str"},
+ "api_password": {"required": False, "type": "str", "no_log": True},
+ "ids": {"required": False, "aliases": ['id'], "type": "list"},
+ "name": {"required": False, "type": "str"},
+ }
+
+ module = AnsibleModule(argument_spec=fields,
+ mutually_exclusive=[['ids', 'name']],
+ supports_check_mode=True)
+ if module._name in ('one_image_facts', 'community.general.one_image_facts'):
+ module.deprecate("The 'one_image_facts' module has been renamed to 'one_image_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ if not HAS_PYONE:
+ module.fail_json(msg='This module requires pyone to work!')
+
+ auth = get_connection_info(module)
+ params = module.params
+ ids = params.get('ids')
+ name = params.get('name')
+ client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password)
+
+ result = {'images': []}
+ images = []
+
+ if ids:
+ images = get_images_by_ids(module, client, ids)
+ elif name:
+ images = get_images_by_name(module, client, name)
+ else:
+ images = get_all_images(client).IMAGE
+
+ for image in images:
+ result['images'].append(get_image_info(image))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_image_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_image_info.py
new file mode 100644
index 00000000..0d2bd070
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_image_info.py
@@ -0,0 +1,293 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+"""
+(c) 2018, Milan Ilic <milani@nordeus.com>
+
+This file is part of Ansible
+
+Ansible is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+Ansible is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a clone of the GNU General Public License
+along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+DOCUMENTATION = '''
+---
+module: one_image_info
+short_description: Gather information on OpenNebula images
+description:
+ - Gather information on OpenNebula images.
+ - This module was called C(one_image_facts) before Ansible 2.9. The usage did not change.
+requirements:
+ - pyone
+options:
+ api_url:
+ description:
+ - URL of the OpenNebula RPC server.
+ - It is recommended to use HTTPS so that the username/password are not
+ - transferred over the network unencrypted.
+ - If not set then the value of the C(ONE_URL) environment variable is used.
+ type: str
+ api_username:
+ description:
+ - Name of the user to login into the OpenNebula RPC server. If not set
+ - then the value of the C(ONE_USERNAME) environment variable is used.
+ type: str
+ api_password:
+ description:
+ - Password of the user to login into OpenNebula RPC server. If not set
+ - then the value of the C(ONE_PASSWORD) environment variable is used.
+ type: str
+ ids:
+ description:
+ - A list of images ids whose facts you want to gather.
+ aliases: ['id']
+ type: list
+ name:
+ description:
+ - A C(name) of the image whose facts will be gathered.
+ - If the C(name) begins with '~' the C(name) will be used as regex pattern
+ - which restricts the list of images (whose facts will be returned) whose names match specified regex.
+ - Also, if the C(name) begins with '~*' case-insensitive matching will be performed.
+ - See examples for more details.
+ type: str
+author:
+ - "Milan Ilic (@ilicmilan)"
+ - "Jan Meerkamp (@meerkampdvv)"
+'''
+
+EXAMPLES = '''
+- name: Gather facts about all images
+ community.general.one_image_info:
+ register: result
+
+- name: Print all images facts
+ ansible.builtin.debug:
+ msg: result
+
+- name: Gather facts about an image using ID
+ community.general.one_image_info:
+ ids:
+ - 123
+
+- name: Gather facts about an image using the name
+ community.general.one_image_info:
+ name: 'foo-image'
+ register: foo_image
+
+- name: Gather facts about all IMAGEs whose name matches regex 'app-image-.*'
+ community.general.one_image_info:
+ name: '~app-image-.*'
+ register: app_images
+
+- name: Gather facts about all IMAGEs whose name matches regex 'foo-image-.*' ignoring cases
+ community.general.one_image_info:
+ name: '~*foo-image-.*'
+ register: foo_images
+'''
+
+RETURN = '''
+images:
+ description: A list of images info
+ type: complex
+ returned: success
+ contains:
+ id:
+ description: image id
+ type: int
+ sample: 153
+ name:
+ description: image name
+ type: str
+ sample: app1
+ group_id:
+ description: image's group id
+ type: int
+ sample: 1
+ group_name:
+ description: image's group name
+ type: str
+ sample: one-users
+ owner_id:
+ description: image's owner id
+ type: int
+ sample: 143
+ owner_name:
+ description: image's owner name
+ type: str
+ sample: ansible-test
+ state:
+ description: state of image instance
+ type: str
+ sample: READY
+ used:
+ description: is image in use
+ type: bool
+ sample: true
+ running_vms:
+ description: count of running vms that use this image
+ type: int
+ sample: 7
+'''
+
+try:
+ import pyone
+ HAS_PYONE = True
+except ImportError:
+ HAS_PYONE = False
+
+from ansible.module_utils.basic import AnsibleModule
+import os
+
+
+def get_all_images(client):
+ pool = client.imagepool.info(-2, -1, -1, -1)
+ # Filter -2 means fetch all images user can Use
+
+ return pool
+
+
+IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS']
+
+
+def get_image_info(image):
+ info = {
+ 'id': image.ID,
+ 'name': image.NAME,
+ 'state': IMAGE_STATES[image.STATE],
+ 'running_vms': image.RUNNING_VMS,
+ 'used': bool(image.RUNNING_VMS),
+ 'user_name': image.UNAME,
+ 'user_id': image.UID,
+ 'group_name': image.GNAME,
+ 'group_id': image.GID,
+ }
+ return info
+
+
+def get_images_by_ids(module, client, ids):
+ images = []
+ pool = get_all_images(client)
+
+ for image in pool.IMAGE:
+ if str(image.ID) in ids:
+ images.append(image)
+ ids.remove(str(image.ID))
+ if len(ids) == 0:
+ break
+
+ if len(ids) > 0:
+ module.fail_json(msg='There is no IMAGE(s) with id(s)=' + ', '.join('{id}'.format(id=str(image_id)) for image_id in ids))
+
+ return images
+
+
+def get_images_by_name(module, client, name_pattern):
+
+ images = []
+ pattern = None
+
+ pool = get_all_images(client)
+
+ if name_pattern.startswith('~'):
+ import re
+ if name_pattern[1] == '*':
+ pattern = re.compile(name_pattern[2:], re.IGNORECASE)
+ else:
+ pattern = re.compile(name_pattern[1:])
+
+ for image in pool.IMAGE:
+ if pattern is not None:
+ if pattern.match(image.NAME):
+ images.append(image)
+ elif name_pattern == image.NAME:
+ images.append(image)
+ break
+
+ # if the specific name is indicated
+ if pattern is None and len(images) == 0:
+ module.fail_json(msg="There is no IMAGE with name=" + name_pattern)
+
+ return images
+
+
+def get_connection_info(module):
+
+ url = module.params.get('api_url')
+ username = module.params.get('api_username')
+ password = module.params.get('api_password')
+
+ if not url:
+ url = os.environ.get('ONE_URL')
+
+ if not username:
+ username = os.environ.get('ONE_USERNAME')
+
+ if not password:
+ password = os.environ.get('ONE_PASSWORD')
+
+ if not(url and username and password):
+ module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified")
+ from collections import namedtuple
+
+ auth_params = namedtuple('auth', ('url', 'username', 'password'))
+
+ return auth_params(url=url, username=username, password=password)
+
+
+def main():
+ fields = {
+ "api_url": {"required": False, "type": "str"},
+ "api_username": {"required": False, "type": "str"},
+ "api_password": {"required": False, "type": "str", "no_log": True},
+ "ids": {"required": False, "aliases": ['id'], "type": "list"},
+ "name": {"required": False, "type": "str"},
+ }
+
+ module = AnsibleModule(argument_spec=fields,
+ mutually_exclusive=[['ids', 'name']],
+ supports_check_mode=True)
+ if module._name in ('one_image_facts', 'community.general.one_image_facts'):
+ module.deprecate("The 'one_image_facts' module has been renamed to 'one_image_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ if not HAS_PYONE:
+ module.fail_json(msg='This module requires pyone to work!')
+
+ auth = get_connection_info(module)
+ params = module.params
+ ids = params.get('ids')
+ name = params.get('name')
+ client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password)
+
+ result = {'images': []}
+ images = []
+
+ if ids:
+ images = get_images_by_ids(module, client, ids)
+ elif name:
+ images = get_images_by_name(module, client, name)
+ else:
+ images = get_all_images(client).IMAGE
+
+ for image in images:
+ result['images'].append(get_image_info(image))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_service.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_service.py
new file mode 100644
index 00000000..68f8398f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_service.py
@@ -0,0 +1,768 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+"""
+(c) 2017, Milan Ilic <milani@nordeus.com>
+
+This file is part of Ansible
+
+Ansible is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+Ansible is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+DOCUMENTATION = '''
+---
+module: one_service
+short_description: Deploy and manage OpenNebula services
+description:
+ - Manage OpenNebula services
+options:
+ api_url:
+ description:
+ - URL of the OpenNebula OneFlow API server.
+ - It is recommended to use HTTPS so that the username/password are not transferred over the network unencrypted.
+ - If not set then the value of the ONEFLOW_URL environment variable is used.
+ type: str
+ api_username:
+ description:
+ - Name of the user to login into the OpenNebula OneFlow API server. If not set then the value of the C(ONEFLOW_USERNAME) environment variable is used.
+ type: str
+ api_password:
+ description:
+ - Password of the user to login into OpenNebula OneFlow API server. If not set then the value of the C(ONEFLOW_PASSWORD) environment variable is used.
+ type: str
+ template_name:
+ description:
+ - Name of service template to use to create a new instance of a service
+ type: str
+ template_id:
+ description:
+ - ID of a service template to use to create a new instance of a service
+ type: int
+ service_id:
+ description:
+ - ID of a service instance that you would like to manage
+ type: int
+ service_name:
+ description:
+ - Name of a service instance that you would like to manage
+ type: str
+ unique:
+ description:
+ - Setting C(unique=yes) will make sure that there is only one service instance running with a name set with C(service_name) when
+ - instantiating a service from a template specified with C(template_id)/C(template_name). Check examples below.
+ type: bool
+ default: no
+ state:
+ description:
+ - C(present) - instantiate a service from a template specified with C(template_id)/C(template_name).
+ - C(absent) - terminate an instance of a service specified with C(service_id)/C(service_name).
+ choices: ["present", "absent"]
+ default: present
+ type: str
+ mode:
+ description:
+ - Set permission mode of a service instance in octet format, e.g. C(600) to give owner C(use) and C(manage) and nothing to group and others.
+ type: str
+ owner_id:
+ description:
+ - ID of the user which will be set as the owner of the service
+ type: int
+ group_id:
+ description:
+ - ID of the group which will be set as the group of the service
+ type: int
+ wait:
+ description:
+ - Wait for the instance to reach RUNNING state after DEPLOYING or COOLDOWN state after SCALING
+ type: bool
+ default: no
+ wait_timeout:
+ description:
+ - How long before wait gives up, in seconds
+ default: 300
+ type: int
+ custom_attrs:
+ description:
+ - Dictionary of key/value custom attributes which will be used when instantiating a new service.
+ default: {}
+ type: dict
+ role:
+ description:
+ - Name of the role whose cardinality should be changed
+ type: str
+ cardinality:
+ description:
+ - Number of VMs for the specified role
+ type: int
+ force:
+ description:
+ - Force the new cardinality even if it is outside the limits
+ type: bool
+ default: no
+author:
+ - "Milan Ilic (@ilicmilan)"
+'''
+
+EXAMPLES = '''
+- name: Instantiate a new service
+ community.general.one_service:
+ template_id: 90
+ register: result
+
+- name: Print service properties
+ ansible.builtin.debug:
+ msg: result
+
+- name: Instantiate a new service with specified service_name, service group and mode
+ community.general.one_service:
+ template_name: 'app1_template'
+ service_name: 'app1'
+ group_id: 1
+ mode: '660'
+
+- name: Instantiate a new service with template_id and pass custom_attrs dict
+ community.general.one_service:
+ template_id: 90
+ custom_attrs:
+ public_network_id: 21
+ private_network_id: 26
+
+- name: Instantiate a new service 'foo' if the service doesn't already exist, otherwise do nothing
+ community.general.one_service:
+ template_id: 53
+ service_name: 'foo'
+ unique: yes
+
+- name: Delete a service by ID
+ community.general.one_service:
+ service_id: 153
+ state: absent
+
+- name: Get service info
+ community.general.one_service:
+ service_id: 153
+ register: service_info
+
+- name: Change service owner, group and mode
+ community.general.one_service:
+ service_name: 'app2'
+ owner_id: 34
+ group_id: 113
+ mode: '600'
+
+- name: Instantiate service and wait for it to become RUNNING
+ community.general.one_service:
+ template_id: 43
+ service_name: 'foo1'
+
+- name: Wait service to become RUNNING
+ community.general.one_service:
+ service_id: 112
+ wait: yes
+
+- name: Change role cardinality
+ community.general.one_service:
+ service_id: 153
+ role: bar
+ cardinality: 5
+
+- name: Change role cardinality and wait for it to be applied
+ community.general.one_service:
+ service_id: 112
+ role: foo
+ cardinality: 7
+ wait: yes
+'''
+
+RETURN = '''
+service_id:
+ description: service id
+ type: int
+ returned: success
+ sample: 153
+service_name:
+ description: service name
+ type: str
+ returned: success
+ sample: app1
+group_id:
+ description: service's group id
+ type: int
+ returned: success
+ sample: 1
+group_name:
+ description: service's group name
+ type: str
+ returned: success
+ sample: one-users
+owner_id:
+ description: service's owner id
+ type: int
+ returned: success
+ sample: 143
+owner_name:
+ description: service's owner name
+ type: str
+ returned: success
+ sample: ansible-test
+state:
+ description: state of service instance
+ type: str
+ returned: success
+ sample: RUNNING
+mode:
+ description: service's mode
+ type: int
+ returned: success
+ sample: 660
+roles:
+ description: list of dictionaries of roles, each role is described by name, cardinality, state and nodes ids
+ type: list
+ returned: success
+ sample: '[{"cardinality": 1,"name": "foo","state": "RUNNING","ids": [ 123, 456 ]},
+ {"cardinality": 2,"name": "bar","state": "RUNNING", "ids": [ 452, 567, 746 ]}]'
+'''
+
+import os
+import sys
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import open_url
+
+STATES = ("PENDING", "DEPLOYING", "RUNNING", "UNDEPLOYING", "WARNING", "DONE",
+ "FAILED_UNDEPLOYING", "FAILED_DEPLOYING", "SCALING", "FAILED_SCALING", "COOLDOWN")
+
+
+def get_all_templates(module, auth):
+ try:
+ all_templates = open_url(url=(auth.url + "/service_template"), method="GET", force_basic_auth=True, url_username=auth.user, url_password=auth.password)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ return module.from_json(all_templates.read())
+
+
+def get_template(module, auth, pred):
+ all_templates_dict = get_all_templates(module, auth)
+
+ found = 0
+ found_template = None
+ template_name = ''
+
+ if "DOCUMENT_POOL" in all_templates_dict and "DOCUMENT" in all_templates_dict["DOCUMENT_POOL"]:
+ for template in all_templates_dict["DOCUMENT_POOL"]["DOCUMENT"]:
+ if pred(template):
+ found = found + 1
+ found_template = template
+ template_name = template["NAME"]
+
+ if found <= 0:
+ return None
+ elif found > 1:
+ module.fail_json(msg="There is no template with unique name: " + template_name)
+ else:
+ return found_template
+
+
+def get_all_services(module, auth):
+ try:
+ response = open_url(auth.url + "/service", method="GET", force_basic_auth=True, url_username=auth.user, url_password=auth.password)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ return module.from_json(response.read())
+
+
+def get_service(module, auth, pred):
+ all_services_dict = get_all_services(module, auth)
+
+ found = 0
+ found_service = None
+ service_name = ''
+
+ if "DOCUMENT_POOL" in all_services_dict and "DOCUMENT" in all_services_dict["DOCUMENT_POOL"]:
+ for service in all_services_dict["DOCUMENT_POOL"]["DOCUMENT"]:
+ if pred(service):
+ found = found + 1
+ found_service = service
+ service_name = service["NAME"]
+
+ # fail if there are more services with same name
+ if found > 1:
+ module.fail_json(msg="There are multiple services with a name: '" +
+ service_name + "'. You have to use a unique service name or use 'service_id' instead.")
+ elif found <= 0:
+ return None
+ else:
+ return found_service
+
+
+def get_service_by_id(module, auth, service_id):
+ return get_service(module, auth, lambda service: (int(service["ID"]) == int(service_id))) if service_id else None
+
+
+def get_service_by_name(module, auth, service_name):
+ return get_service(module, auth, lambda service: (service["NAME"] == service_name))
+
+
+def get_service_info(module, auth, service):
+
+ result = {
+ "service_id": int(service["ID"]),
+ "service_name": service["NAME"],
+ "group_id": int(service["GID"]),
+ "group_name": service["GNAME"],
+ "owner_id": int(service["UID"]),
+ "owner_name": service["UNAME"],
+ "state": STATES[service["TEMPLATE"]["BODY"]["state"]]
+ }
+
+ roles_status = service["TEMPLATE"]["BODY"]["roles"]
+ roles = []
+ for role in roles_status:
+ nodes_ids = []
+ if "nodes" in role:
+ for node in role["nodes"]:
+ nodes_ids.append(node["deploy_id"])
+ roles.append({"name": role["name"], "cardinality": role["cardinality"], "state": STATES[int(role["state"])], "ids": nodes_ids})
+
+ result["roles"] = roles
+ result["mode"] = int(parse_service_permissions(service))
+
+ return result
+
+
+def create_service(module, auth, template_id, service_name, custom_attrs, unique, wait, wait_timeout):
+ # make sure that the values in custom_attrs dict are strings
+ custom_attrs_with_str = dict((k, str(v)) for k, v in custom_attrs.items())
+
+ data = {
+ "action": {
+ "perform": "instantiate",
+ "params": {
+ "merge_template": {
+ "custom_attrs_values": custom_attrs_with_str,
+ "name": service_name
+ }
+ }
+ }
+ }
+
+ try:
+ response = open_url(auth.url + "/service_template/" + str(template_id) + "/action", method="POST",
+ data=module.jsonify(data), force_basic_auth=True, url_username=auth.user, url_password=auth.password)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ service_result = module.from_json(response.read())["DOCUMENT"]
+
+ return service_result
+
+
+def wait_for_service_to_become_ready(module, auth, service_id, wait_timeout):
+ import time
+ start_time = time.time()
+
+ while (time.time() - start_time) < wait_timeout:
+ try:
+ status_result = open_url(auth.url + "/service/" + str(service_id), method="GET",
+ force_basic_auth=True, url_username=auth.user, url_password=auth.password)
+ except Exception as e:
+ module.fail_json(msg="Request for service status has failed. Error message: " + str(e))
+
+ status_result = module.from_json(status_result.read())
+ service_state = status_result["DOCUMENT"]["TEMPLATE"]["BODY"]["state"]
+
+ if service_state in [STATES.index("RUNNING"), STATES.index("COOLDOWN")]:
+ return status_result["DOCUMENT"]
+ elif service_state not in [STATES.index("PENDING"), STATES.index("DEPLOYING"), STATES.index("SCALING")]:
+ log_message = ''
+ for log_info in status_result["DOCUMENT"]["TEMPLATE"]["BODY"]["log"]:
+ if log_info["severity"] == "E":
+ log_message = log_message + log_info["message"]
+ break
+
+ module.fail_json(msg="Deploying is unsuccessful. Service state: " + STATES[service_state] + ". Error message: " + log_message)
+
+ time.sleep(1)
+
+ module.fail_json(msg="Wait timeout has expired")
+
+
+def change_service_permissions(module, auth, service_id, permissions):
+
+ data = {
+ "action": {
+ "perform": "chmod",
+ "params": {"octet": permissions}
+ }
+ }
+
+ try:
+ status_result = open_url(auth.url + "/service/" + str(service_id) + "/action", method="POST", force_basic_auth=True,
+ url_username=auth.user, url_password=auth.password, data=module.jsonify(data))
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def change_service_owner(module, auth, service_id, owner_id):
+ data = {
+ "action": {
+ "perform": "chown",
+ "params": {"owner_id": owner_id}
+ }
+ }
+
+ try:
+ status_result = open_url(auth.url + "/service/" + str(service_id) + "/action", method="POST", force_basic_auth=True,
+ url_username=auth.user, url_password=auth.password, data=module.jsonify(data))
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def change_service_group(module, auth, service_id, group_id):
+
+ data = {
+ "action": {
+ "perform": "chgrp",
+ "params": {"group_id": group_id}
+ }
+ }
+
+ try:
+ status_result = open_url(auth.url + "/service/" + str(service_id) + "/action", method="POST", force_basic_auth=True,
+ url_username=auth.user, url_password=auth.password, data=module.jsonify(data))
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def change_role_cardinality(module, auth, service_id, role, cardinality, force):
+
+ data = {
+ "cardinality": cardinality,
+ "force": force
+ }
+
+ try:
+ status_result = open_url(auth.url + "/service/" + str(service_id) + "/role/" + role, method="PUT",
+ force_basic_auth=True, url_username=auth.user, url_password=auth.password, data=module.jsonify(data))
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ if status_result.getcode() != 204:
+ module.fail_json(msg="Failed to change cardinality for role: " + role + ". Return code: " + str(status_result.getcode()))
+
+
+def check_change_service_owner(module, service, owner_id):
+ old_owner_id = int(service["UID"])
+
+ return old_owner_id != owner_id
+
+
+def check_change_service_group(module, service, group_id):
+ old_group_id = int(service["GID"])
+
+ return old_group_id != group_id
+
+
+def parse_service_permissions(service):
+ perm_dict = service["PERMISSIONS"]
+ '''
+ This is the structure of the 'PERMISSIONS' dictionary:
+
+ "PERMISSIONS": {
+ "OWNER_U": "1",
+ "OWNER_M": "1",
+ "OWNER_A": "0",
+ "GROUP_U": "0",
+ "GROUP_M": "0",
+ "GROUP_A": "0",
+ "OTHER_U": "0",
+ "OTHER_M": "0",
+ "OTHER_A": "0"
+ }
+ '''
+
+ owner_octal = int(perm_dict["OWNER_U"]) * 4 + int(perm_dict["OWNER_M"]) * 2 + int(perm_dict["OWNER_A"])
+ group_octal = int(perm_dict["GROUP_U"]) * 4 + int(perm_dict["GROUP_M"]) * 2 + int(perm_dict["GROUP_A"])
+ other_octal = int(perm_dict["OTHER_U"]) * 4 + int(perm_dict["OTHER_M"]) * 2 + int(perm_dict["OTHER_A"])
+
+ permissions = str(owner_octal) + str(group_octal) + str(other_octal)
+
+ return permissions
+
+
+def check_change_service_permissions(module, service, permissions):
+ old_permissions = parse_service_permissions(service)
+
+ return old_permissions != permissions
+
+
+def check_change_role_cardinality(module, service, role_name, cardinality):
+ roles_list = service["TEMPLATE"]["BODY"]["roles"]
+
+ for role in roles_list:
+ if role["name"] == role_name:
+ return int(role["cardinality"]) != cardinality
+
+ module.fail_json(msg="There is no role with name: " + role_name)
+
+
+def create_service_and_operation(module, auth, template_id, service_name, owner_id, group_id, permissions, custom_attrs, unique, wait, wait_timeout):
+ if not service_name:
+ service_name = ''
+ changed = False
+ service = None
+
+ if unique:
+ service = get_service_by_name(module, auth, service_name)
+
+ if not service:
+ if not module.check_mode:
+ service = create_service(module, auth, template_id, service_name, custom_attrs, unique, wait, wait_timeout)
+ changed = True
+
+ # if check_mode=true and there would be changes, service doesn't exist and we can not get it
+ if module.check_mode and changed:
+ return {"changed": True}
+
+ result = service_operation(module, auth, owner_id=owner_id, group_id=group_id, wait=wait,
+ wait_timeout=wait_timeout, permissions=permissions, service=service)
+
+ if result["changed"]:
+ changed = True
+
+ result["changed"] = changed
+
+ return result
+
+
+def service_operation(module, auth, service_id=None, owner_id=None, group_id=None, permissions=None,
+ role=None, cardinality=None, force=None, wait=False, wait_timeout=None, service=None):
+
+ changed = False
+
+ if not service:
+ service = get_service_by_id(module, auth, service_id)
+ else:
+ service_id = service["ID"]
+
+ if not service:
+ module.fail_json(msg="There is no service with id: " + str(service_id))
+
+ if owner_id:
+ if check_change_service_owner(module, service, owner_id):
+ if not module.check_mode:
+ change_service_owner(module, auth, service_id, owner_id)
+ changed = True
+ if group_id:
+ if check_change_service_group(module, service, group_id):
+ if not module.check_mode:
+ change_service_group(module, auth, service_id, group_id)
+ changed = True
+ if permissions:
+ if check_change_service_permissions(module, service, permissions):
+ if not module.check_mode:
+ change_service_permissions(module, auth, service_id, permissions)
+ changed = True
+
+ if role:
+ if check_change_role_cardinality(module, service, role, cardinality):
+ if not module.check_mode:
+ change_role_cardinality(module, auth, service_id, role, cardinality, force)
+ changed = True
+
+ if wait and not module.check_mode:
+ service = wait_for_service_to_become_ready(module, auth, service_id, wait_timeout)
+
+ # if something has changed, fetch service info again
+ if changed:
+ service = get_service_by_id(module, auth, service_id)
+
+ service_info = get_service_info(module, auth, service)
+ service_info["changed"] = changed
+
+ return service_info
+
+
+def delete_service(module, auth, service_id):
+ service = get_service_by_id(module, auth, service_id)
+ if not service:
+ return {"changed": False}
+
+ service_info = get_service_info(module, auth, service)
+
+ service_info["changed"] = True
+
+ if module.check_mode:
+ return service_info
+
+ try:
+ result = open_url(auth.url + '/service/' + str(service_id), method="DELETE", force_basic_auth=True, url_username=auth.user, url_password=auth.password)
+ except Exception as e:
+ module.fail_json(msg="Service deletion has failed. Error message: " + str(e))
+
+ return service_info
+
+
+def get_template_by_name(module, auth, template_name):
+ return get_template(module, auth, lambda template: (template["NAME"] == template_name))
+
+
+def get_template_by_id(module, auth, template_id):
+ return get_template(module, auth, lambda template: (int(template["ID"]) == int(template_id))) if template_id else None
+
+
+def get_template_id(module, auth, requested_id, requested_name):
+ template = get_template_by_id(module, auth, requested_id) if requested_id else get_template_by_name(module, auth, requested_name)
+
+ if template:
+ return template["ID"]
+
+ return None
+
+
+def get_service_id_by_name(module, auth, service_name):
+ service = get_service_by_name(module, auth, service_name)
+
+ if service:
+ return service["ID"]
+
+ return None
+
+
+def get_connection_info(module):
+
+ url = module.params.get('api_url')
+ username = module.params.get('api_username')
+ password = module.params.get('api_password')
+
+ if not url:
+ url = os.environ.get('ONEFLOW_URL')
+
+ if not username:
+ username = os.environ.get('ONEFLOW_USERNAME')
+
+ if not password:
+ password = os.environ.get('ONEFLOW_PASSWORD')
+
+ if not(url and username and password):
+ module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified")
+ from collections import namedtuple
+
+ auth_params = namedtuple('auth', ('url', 'user', 'password'))
+
+ return auth_params(url=url, user=username, password=password)
+
+
+def main():
+ fields = {
+ "api_url": {"required": False, "type": "str"},
+ "api_username": {"required": False, "type": "str"},
+ "api_password": {"required": False, "type": "str", "no_log": True},
+ "service_name": {"required": False, "type": "str"},
+ "service_id": {"required": False, "type": "int"},
+ "template_name": {"required": False, "type": "str"},
+ "template_id": {"required": False, "type": "int"},
+ "state": {
+ "default": "present",
+ "choices": ['present', 'absent'],
+ "type": "str"
+ },
+ "mode": {"required": False, "type": "str"},
+ "owner_id": {"required": False, "type": "int"},
+ "group_id": {"required": False, "type": "int"},
+ "unique": {"default": False, "type": "bool"},
+ "wait": {"default": False, "type": "bool"},
+ "wait_timeout": {"default": 300, "type": "int"},
+ "custom_attrs": {"default": {}, "type": "dict"},
+ "role": {"required": False, "type": "str"},
+ "cardinality": {"required": False, "type": "int"},
+ "force": {"default": False, "type": "bool"}
+ }
+
+ module = AnsibleModule(argument_spec=fields,
+ mutually_exclusive=[
+ ['template_id', 'template_name', 'service_id'],
+ ['service_id', 'service_name'],
+ ['template_id', 'template_name', 'role'],
+ ['template_id', 'template_name', 'cardinality'],
+ ['service_id', 'custom_attrs']
+ ],
+ required_together=[['role', 'cardinality']],
+ supports_check_mode=True)
+
+ auth = get_connection_info(module)
+ params = module.params
+ service_name = params.get('service_name')
+ service_id = params.get('service_id')
+
+ requested_template_id = params.get('template_id')
+ requested_template_name = params.get('template_name')
+ state = params.get('state')
+ permissions = params.get('mode')
+ owner_id = params.get('owner_id')
+ group_id = params.get('group_id')
+ unique = params.get('unique')
+ wait = params.get('wait')
+ wait_timeout = params.get('wait_timeout')
+ custom_attrs = params.get('custom_attrs')
+ role = params.get('role')
+ cardinality = params.get('cardinality')
+ force = params.get('force')
+
+ template_id = None
+
+ if requested_template_id or requested_template_name:
+ template_id = get_template_id(module, auth, requested_template_id, requested_template_name)
+ if not template_id:
+ if requested_template_id:
+ module.fail_json(msg="There is no template with template_id: " + str(requested_template_id))
+ elif requested_template_name:
+ module.fail_json(msg="There is no template with name: " + requested_template_name)
+
+ if unique and not service_name:
+ module.fail_json(msg="You cannot use unique without passing service_name!")
+
+ if template_id and state == 'absent':
+ module.fail_json(msg="State absent is not valid for template")
+
+ if template_id and state == 'present': # Instantiate a service
+ result = create_service_and_operation(module, auth, template_id, service_name, owner_id,
+ group_id, permissions, custom_attrs, unique, wait, wait_timeout)
+ else:
+ if not (service_id or service_name):
+ module.fail_json(msg="To manage the service at least the service id or service name should be specified!")
+ if custom_attrs:
+ module.fail_json(msg="You can only set custom_attrs when instantiate service!")
+
+ if not service_id:
+ service_id = get_service_id_by_name(module, auth, service_name)
+ # The task should be failed when we want to manage a non-existent service identified by its name
+ if not service_id and state == 'present':
+ module.fail_json(msg="There is no service with name: " + service_name)
+
+ if state == 'absent':
+ result = delete_service(module, auth, service_id)
+ else:
+ result = service_operation(module, auth, service_id, owner_id, group_id, permissions, role, cardinality, force, wait, wait_timeout)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_vm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_vm.py
new file mode 100644
index 00000000..286514bd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_vm.py
@@ -0,0 +1,1599 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+"""
+(c) 2017, Milan Ilic <milani@nordeus.com>
+(c) 2019, Jan Meerkamp <meerkamp@dvv.de>
+
+This file is part of Ansible
+
+Ansible is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+Ansible is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+DOCUMENTATION = '''
+---
+module: one_vm
+short_description: Creates or terminates OpenNebula instances
+description:
+ - Manages OpenNebula instances
+requirements:
+ - pyone
+options:
+ api_url:
+ description:
+ - URL of the OpenNebula RPC server.
+ - It is recommended to use HTTPS so that the username/password are not
+ - transferred over the network unencrypted.
+ - If not set then the value of the C(ONE_URL) environment variable is used.
+ type: str
+ api_username:
+ description:
+ - Name of the user to login into the OpenNebula RPC server. If not set
+ - then the value of the C(ONE_USERNAME) environment variable is used.
+ type: str
+ api_password:
+ description:
+ - Password of the user to login into OpenNebula RPC server. If not set
+ - then the value of the C(ONE_PASSWORD) environment variable is used.
+ - if both I(api_username) or I(api_password) are not set, then it will try
+ - authenticate with ONE auth file. Default path is "~/.one/one_auth".
+ - Set environment variable C(ONE_AUTH) to override this path.
+ type: str
+ template_name:
+ description:
+ - Name of VM template to use to create a new instace
+ type: str
+ template_id:
+ description:
+ - ID of a VM template to use to create a new instance
+ type: int
+ vm_start_on_hold:
+ description:
+ - Set to true to put vm on hold while creating
+ default: False
+ type: bool
+ instance_ids:
+ description:
+ - A list of instance ids used for states':' C(absent), C(running), C(rebooted), C(poweredoff)
+ aliases: ['ids']
+ type: list
+ state:
+ description:
+ - C(present) - create instances from a template specified with C(template_id)/C(template_name).
+ - C(running) - run instances
+ - C(poweredoff) - power-off instances
+ - C(rebooted) - reboot instances
+ - C(absent) - terminate instances
+ choices: ["present", "absent", "running", "rebooted", "poweredoff"]
+ default: present
+ type: str
+ hard:
+ description:
+ - Reboot, power-off or terminate instances C(hard)
+ default: no
+ type: bool
+ wait:
+ description:
+ - Wait for the instance to reach its desired state before returning. Keep
+ - in mind if you are waiting for instance to be in running state it
+ - doesn't mean that you will be able to SSH on that machine only that
+ - boot process have started on that instance, see 'wait_for' example for
+ - details.
+ default: yes
+ type: bool
+ wait_timeout:
+ description:
+ - How long before wait gives up, in seconds
+ default: 300
+ type: int
+ attributes:
+ description:
+ - A dictionary of key/value attributes to add to new instances, or for
+ - setting C(state) of instances with these attributes.
+ - Keys are case insensitive and OpenNebula automatically converts them to upper case.
+ - Be aware C(NAME) is a special attribute which sets the name of the VM when it's deployed.
+ - C(#) character(s) can be appended to the C(NAME) and the module will automatically add
+ - indexes to the names of VMs.
+ - For example':' C(NAME':' foo-###) would create VMs with names C(foo-000), C(foo-001),...
+ - When used with C(count_attributes) and C(exact_count) the module will
+ - match the base name without the index part.
+ default: {}
+ type: dict
+ labels:
+ description:
+ - A list of labels to associate with new instances, or for setting
+ - C(state) of instances with these labels.
+ default: []
+ type: list
+ count_attributes:
+ description:
+ - A dictionary of key/value attributes that can only be used with
+ - C(exact_count) to determine how many nodes based on a specific
+ - attributes criteria should be deployed. This can be expressed in
+ - multiple ways and is shown in the EXAMPLES section.
+ type: dict
+ count_labels:
+ description:
+ - A list of labels that can only be used with C(exact_count) to determine
+ - how many nodes based on a specific labels criteria should be deployed.
+ - This can be expressed in multiple ways and is shown in the EXAMPLES
+ - section.
+ type: list
+ count:
+ description:
+ - Number of instances to launch
+ default: 1
+ type: int
+ exact_count:
+ description:
+ - Indicates how many instances that match C(count_attributes) and
+ - C(count_labels) parameters should be deployed. Instances are either
+ - created or terminated based on this value.
+ - NOTE':' Instances with the least IDs will be terminated first.
+ type: int
+ mode:
+ description:
+ - Set permission mode of the instance in octet format, e.g. C(600) to give owner C(use) and C(manage) and nothing to group and others.
+ type: str
+ owner_id:
+ description:
+ - ID of the user which will be set as the owner of the instance
+ type: int
+ group_id:
+ description:
+ - ID of the group which will be set as the group of the instance
+ type: int
+ memory:
+ description:
+ - The size of the memory for new instances (in MB, GB, ...)
+ type: str
+ disk_size:
+ description:
+ - The size of the disk created for new instances (in MB, GB, TB,...).
+ - NOTE':' If The Template hats Multiple Disks the Order of the Sizes is
+ - matched against the order specified in C(template_id)/C(template_name).
+ type: list
+ cpu:
+ description:
+ - Percentage of CPU divided by 100 required for the new instance. Half a
+ - processor is written 0.5.
+ type: float
+ vcpu:
+ description:
+ - Number of CPUs (cores) new VM will have.
+ type: int
+ networks:
+ description:
+ - A list of dictionaries with network parameters. See examples for more details.
+ default: []
+ type: list
+ disk_saveas:
+ description:
+ - Creates an image from a VM disk.
+ - It is a dictionary where you have to specify C(name) of the new image.
+ - Optionally you can specify C(disk_id) of the disk you want to save. By default C(disk_id) is 0.
+ - I(NOTE)':' This operation will only be performed on the first VM (if more than one VM ID is passed)
+ - and the VM has to be in the C(poweredoff) state.
+ - Also this operation will fail if an image with specified C(name) already exists.
+ type: dict
+ persistent:
+ description:
+ - Create a private persistent copy of the template plus any image defined in DISK, and instantiate that copy.
+ default: NO
+ type: bool
+ version_added: '0.2.0'
+ datastore_id:
+ description:
+ - Name of Datastore to use to create a new instace
+ version_added: '0.2.0'
+ type: int
+ datastore_name:
+ description:
+ - Name of Datastore to use to create a new instace
+ version_added: '0.2.0'
+ type: str
+author:
+ - "Milan Ilic (@ilicmilan)"
+ - "Jan Meerkamp (@meerkampdvv)"
+'''
+
+
+EXAMPLES = '''
+- name: Create a new instance
+ community.general.one_vm:
+ template_id: 90
+ register: result
+
+- name: Print VM properties
+ ansible.builtin.debug:
+ msg: result
+
+- name: Deploy a new VM on hold
+ community.general.one_vm:
+ template_name: 'app1_template'
+ vm_start_on_hold: 'True'
+
+- name: Deploy a new VM and set its name to 'foo'
+ community.general.one_vm:
+ template_name: 'app1_template'
+ attributes:
+ name: foo
+
+- name: Deploy a new VM and set its group_id and mode
+ community.general.one_vm:
+ template_id: 90
+ group_id: 16
+ mode: 660
+
+- name: Deploy a new VM as persistent
+ community.general.one_vm:
+ template_id: 90
+ persistent: yes
+
+- name: Change VM's permissions to 640
+ community.general.one_vm:
+ instance_ids: 5
+ mode: 640
+
+- name: Deploy 2 new instances and set memory, vcpu, disk_size and 3 networks
+ community.general.one_vm:
+ template_id: 15
+ disk_size: 35.2 GB
+ memory: 4 GB
+ vcpu: 4
+ count: 2
+ networks:
+ - NETWORK_ID: 27
+ - NETWORK: "default-network"
+ NETWORK_UNAME: "app-user"
+ SECURITY_GROUPS: "120,124"
+ - NETWORK_ID: 27
+ SECURITY_GROUPS: "10"
+
+- name: Deploy a new instance which uses a Template with two Disks
+ community.general.one_vm:
+ template_id: 42
+ disk_size:
+ - 35.2 GB
+ - 50 GB
+ memory: 4 GB
+ vcpu: 4
+ count: 1
+ networks:
+ - NETWORK_ID: 27
+
+- name: "Deploy an new instance with attribute 'bar: bar1' and set its name to 'foo'"
+ community.general.one_vm:
+ template_id: 53
+ attributes:
+ name: foo
+ bar: bar1
+
+- name: "Enforce that 2 instances with attributes 'foo1: app1' and 'foo2: app2' are deployed"
+ community.general.one_vm:
+ template_id: 53
+ attributes:
+ foo1: app1
+ foo2: app2
+ exact_count: 2
+ count_attributes:
+ foo1: app1
+ foo2: app2
+
+- name: Enforce that 4 instances with an attribute 'bar' are deployed
+ community.general.one_vm:
+ template_id: 53
+ attributes:
+ name: app
+ bar: bar2
+ exact_count: 4
+ count_attributes:
+ bar:
+
+# Deploy 2 new instances with attribute 'foo: bar' and labels 'app1' and 'app2' and names in format 'fooapp-##'
+# Names will be: fooapp-00 and fooapp-01
+- name: Deploy 2 new instances
+ community.general.one_vm:
+ template_id: 53
+ attributes:
+ name: fooapp-##
+ foo: bar
+ labels:
+ - app1
+ - app2
+ count: 2
+
+# Deploy 2 new instances with attribute 'app: app1' and names in format 'fooapp-###'
+# Names will be: fooapp-002 and fooapp-003
+- name: Deploy 2 new instances
+ community.general.one_vm:
+ template_id: 53
+ attributes:
+ name: fooapp-###
+ app: app1
+ count: 2
+
+# Reboot all instances with name in format 'fooapp-#'
+# Instances 'fooapp-00', 'fooapp-01', 'fooapp-002' and 'fooapp-003' will be rebooted
+- name: Reboot all instances with names in a certain format
+ community.general.one_vm:
+ attributes:
+ name: fooapp-#
+ state: rebooted
+
+# Enforce that only 1 instance with name in format 'fooapp-#' is deployed
+# The task will delete oldest instances, so only the 'fooapp-003' will remain
+- name: Enforce that only 1 instance with name in a certain format is deployed
+ community.general.one_vm:
+ template_id: 53
+ exact_count: 1
+ count_attributes:
+ name: fooapp-#
+
+- name: Deploy an new instance with a network
+ community.general.one_vm:
+ template_id: 53
+ networks:
+ - NETWORK_ID: 27
+ register: vm
+
+- name: Wait for SSH to come up
+ ansible.builtin.wait_for_connection:
+ delegate_to: '{{ vm.instances[0].networks[0].ip }}'
+
+- name: Terminate VMs by ids
+ community.general.one_vm:
+ instance_ids:
+ - 153
+ - 160
+ state: absent
+
+- name: Reboot all VMs that have labels 'foo' and 'app1'
+ community.general.one_vm:
+ labels:
+ - foo
+ - app1
+ state: rebooted
+
+- name: "Fetch all VMs that have name 'foo' and attribute 'app: bar'"
+ community.general.one_vm:
+ attributes:
+ name: foo
+ app: bar
+ register: results
+
+- name: Deploy 2 new instances with labels 'foo1' and 'foo2'
+ community.general.one_vm:
+ template_name: app_template
+ labels:
+ - foo1
+ - foo2
+ count: 2
+
+- name: Enforce that only 1 instance with label 'foo1' will be running
+ community.general.one_vm:
+ template_name: app_template
+ labels:
+ - foo1
+ exact_count: 1
+ count_labels:
+ - foo1
+
+- name: Terminate all instances that have attribute foo
+ community.general.one_vm:
+ template_id: 53
+ exact_count: 0
+ count_attributes:
+ foo:
+
+- name: "Power-off the VM and save VM's disk with id=0 to the image with name 'foo-image'"
+ community.general.one_vm:
+ instance_ids: 351
+ state: poweredoff
+ disk_saveas:
+ name: foo-image
+
+- name: "Save VM's disk with id=1 to the image with name 'bar-image'"
+ community.general.one_vm:
+ instance_ids: 351
+ disk_saveas:
+ name: bar-image
+ disk_id: 1
+'''
+
+RETURN = '''
+instances_ids:
+ description: a list of instances ids whose state is changed or which are fetched with C(instance_ids) option.
+ type: list
+ returned: success
+ sample: [ 1234, 1235 ]
+instances:
+ description: a list of instances info whose state is changed or which are fetched with C(instance_ids) option.
+ type: complex
+ returned: success
+ contains:
+ vm_id:
+ description: vm id
+ type: int
+ sample: 153
+ vm_name:
+ description: vm name
+ type: str
+ sample: foo
+ template_id:
+ description: vm's template id
+ type: int
+ sample: 153
+ group_id:
+ description: vm's group id
+ type: int
+ sample: 1
+ group_name:
+ description: vm's group name
+ type: str
+ sample: one-users
+ owner_id:
+ description: vm's owner id
+ type: int
+ sample: 143
+ owner_name:
+ description: vm's owner name
+ type: str
+ sample: app-user
+ mode:
+ description: vm's mode
+ type: str
+ returned: success
+ sample: 660
+ state:
+ description: state of an instance
+ type: str
+ sample: ACTIVE
+ lcm_state:
+ description: lcm state of an instance that is only relevant when the state is ACTIVE
+ type: str
+ sample: RUNNING
+ cpu:
+ description: Percentage of CPU divided by 100
+ type: float
+ sample: 0.2
+ vcpu:
+ description: Number of CPUs (cores)
+ type: int
+ sample: 2
+ memory:
+ description: The size of the memory in MB
+ type: str
+ sample: 4096 MB
+ disk_size:
+ description: The size of the disk in MB
+ type: str
+ sample: 20480 MB
+ networks:
+ description: a list of dictionaries with info about IP, NAME, MAC, SECURITY_GROUPS for each NIC
+ type: list
+ sample: [
+ {
+ "ip": "10.120.5.33",
+ "mac": "02:00:0a:78:05:21",
+ "name": "default-test-private",
+ "security_groups": "0,10"
+ },
+ {
+ "ip": "10.120.5.34",
+ "mac": "02:00:0a:78:05:22",
+ "name": "default-test-private",
+ "security_groups": "0"
+ }
+ ]
+ uptime_h:
+ description: Uptime of the instance in hours
+ type: int
+ sample: 35
+ labels:
+ description: A list of string labels that are associated with the instance
+ type: list
+ sample: [
+ "foo",
+ "spec-label"
+ ]
+ attributes:
+ description: A dictionary of key/values attributes that are associated with the instance
+ type: dict
+ sample: {
+ "HYPERVISOR": "kvm",
+ "LOGO": "images/logos/centos.png",
+ "TE_GALAXY": "bar",
+ "USER_INPUTS": null
+ }
+tagged_instances:
+ description:
+ - A list of instances info based on a specific attributes and/or
+ - labels that are specified with C(count_attributes) and C(count_labels)
+ - options.
+ type: complex
+ returned: success
+ contains:
+ vm_id:
+ description: vm id
+ type: int
+ sample: 153
+ vm_name:
+ description: vm name
+ type: str
+ sample: foo
+ template_id:
+ description: vm's template id
+ type: int
+ sample: 153
+ group_id:
+ description: vm's group id
+ type: int
+ sample: 1
+ group_name:
+ description: vm's group name
+ type: str
+ sample: one-users
+ owner_id:
+ description: vm's user id
+ type: int
+ sample: 143
+ owner_name:
+ description: vm's user name
+ type: str
+ sample: app-user
+ mode:
+ description: vm's mode
+ type: str
+ returned: success
+ sample: 660
+ state:
+ description: state of an instance
+ type: str
+ sample: ACTIVE
+ lcm_state:
+ description: lcm state of an instance that is only relevant when the state is ACTIVE
+ type: str
+ sample: RUNNING
+ cpu:
+ description: Percentage of CPU divided by 100
+ type: float
+ sample: 0.2
+ vcpu:
+ description: Number of CPUs (cores)
+ type: int
+ sample: 2
+ memory:
+ description: The size of the memory in MB
+ type: str
+ sample: 4096 MB
+ disk_size:
+ description: The size of the disk in MB
+ type: list
+ sample: [
+ "20480 MB",
+ "10240 MB"
+ ]
+ networks:
+ description: a list of dictionaries with info about IP, NAME, MAC, SECURITY_GROUPS for each NIC
+ type: list
+ sample: [
+ {
+ "ip": "10.120.5.33",
+ "mac": "02:00:0a:78:05:21",
+ "name": "default-test-private",
+ "security_groups": "0,10"
+ },
+ {
+ "ip": "10.120.5.34",
+ "mac": "02:00:0a:78:05:22",
+ "name": "default-test-private",
+ "security_groups": "0"
+ }
+ ]
+ uptime_h:
+ description: Uptime of the instance in hours
+ type: int
+ sample: 35
+ labels:
+ description: A list of string labels that are associated with the instance
+ type: list
+ sample: [
+ "foo",
+ "spec-label"
+ ]
+ attributes:
+ description: A dictionary of key/values attributes that are associated with the instance
+ type: dict
+ sample: {
+ "HYPERVISOR": "kvm",
+ "LOGO": "images/logos/centos.png",
+ "TE_GALAXY": "bar",
+ "USER_INPUTS": null
+ }
+'''
+
+try:
+ import pyone
+ HAS_PYONE = True
+except ImportError:
+ HAS_PYONE = False
+
+from ansible.module_utils.basic import AnsibleModule
+import os
+
+
+def get_template(module, client, predicate):
+
+ pool = client.templatepool.info(-2, -1, -1, -1)
+ # Filter -2 means fetch all templates user can Use
+ found = 0
+ found_template = None
+ template_name = ''
+
+ for template in pool.VMTEMPLATE:
+ if predicate(template):
+ found = found + 1
+ found_template = template
+ template_name = template.NAME
+
+ if found == 0:
+ return None
+ elif found > 1:
+ module.fail_json(msg='There are more templates with name: ' + template_name)
+ return found_template
+
+
+def get_template_by_name(module, client, template_name):
+ return get_template(module, client, lambda template: (template.NAME == template_name))
+
+
+def get_template_by_id(module, client, template_id):
+ return get_template(module, client, lambda template: (template.ID == template_id))
+
+
+def get_template_id(module, client, requested_id, requested_name):
+ template = get_template_by_id(module, client, requested_id) if requested_id is not None else get_template_by_name(module, client, requested_name)
+ if template:
+ return template.ID
+ else:
+ return None
+
+
+def get_datastore(module, client, predicate):
+ pool = client.datastorepool.info()
+ found = 0
+ found_datastore = None
+ datastore_name = ''
+
+ for datastore in pool.DATASTORE:
+ if predicate(datastore):
+ found = found + 1
+ found_datastore = datastore
+ datastore_name = datastore.NAME
+
+ if found == 0:
+ return None
+ elif found > 1:
+ module.fail_json(msg='There are more datastores with name: ' + datastore_name)
+ return found_datastore
+
+
+def get_datastore_by_name(module, client, datastore_name):
+ return get_datastore(module, client, lambda datastore: (datastore.NAME == datastore_name))
+
+
+def get_datastore_by_id(module, client, datastore_id):
+ return get_datastore(module, client, lambda datastore: (datastore.ID == datastore_id))
+
+
+def get_datastore_id(module, client, requested_id, requested_name):
+ datastore = get_datastore_by_id(module, client, requested_id) if requested_id else get_datastore_by_name(module, client, requested_name)
+ if datastore:
+ return datastore.ID
+ else:
+ return None
+
+
+def get_vm_by_id(client, vm_id):
+ try:
+ vm = client.vm.info(int(vm_id))
+ except BaseException:
+ return None
+ return vm
+
+
+def get_vms_by_ids(module, client, state, ids):
+ vms = []
+
+ for vm_id in ids:
+ vm = get_vm_by_id(client, vm_id)
+ if vm is None and state != 'absent':
+ module.fail_json(msg='There is no VM with id=' + str(vm_id))
+ vms.append(vm)
+
+ return vms
+
+
+def get_vm_info(client, vm):
+
+ vm = client.vm.info(vm.ID)
+
+ networks_info = []
+
+ disk_size = []
+ if 'DISK' in vm.TEMPLATE:
+ if isinstance(vm.TEMPLATE['DISK'], list):
+ for disk in vm.TEMPLATE['DISK']:
+ disk_size.append(disk['SIZE'] + ' MB')
+ else:
+ disk_size.append(vm.TEMPLATE['DISK']['SIZE'] + ' MB')
+
+ if 'NIC' in vm.TEMPLATE:
+ if isinstance(vm.TEMPLATE['NIC'], list):
+ for nic in vm.TEMPLATE['NIC']:
+ networks_info.append({'ip': nic['IP'], 'mac': nic['MAC'], 'name': nic['NETWORK'], 'security_groups': nic['SECURITY_GROUPS']})
+ else:
+ networks_info.append(
+ {'ip': vm.TEMPLATE['NIC']['IP'], 'mac': vm.TEMPLATE['NIC']['MAC'],
+ 'name': vm.TEMPLATE['NIC']['NETWORK'], 'security_groups': vm.TEMPLATE['NIC']['SECURITY_GROUPS']})
+ import time
+
+ current_time = time.localtime()
+ vm_start_time = time.localtime(vm.STIME)
+
+ vm_uptime = time.mktime(current_time) - time.mktime(vm_start_time)
+ vm_uptime /= (60 * 60)
+
+ permissions_str = parse_vm_permissions(client, vm)
+
+ # LCM_STATE is VM's sub-state that is relevant only when STATE is ACTIVE
+ vm_lcm_state = None
+ if vm.STATE == VM_STATES.index('ACTIVE'):
+ vm_lcm_state = LCM_STATES[vm.LCM_STATE]
+
+ vm_labels, vm_attributes = get_vm_labels_and_attributes_dict(client, vm.ID)
+
+ info = {
+ 'template_id': int(vm.TEMPLATE['TEMPLATE_ID']),
+ 'vm_id': vm.ID,
+ 'vm_name': vm.NAME,
+ 'state': VM_STATES[vm.STATE],
+ 'lcm_state': vm_lcm_state,
+ 'owner_name': vm.UNAME,
+ 'owner_id': vm.UID,
+ 'networks': networks_info,
+ 'disk_size': disk_size,
+ 'memory': vm.TEMPLATE['MEMORY'] + ' MB',
+ 'vcpu': vm.TEMPLATE['VCPU'],
+ 'cpu': vm.TEMPLATE['CPU'],
+ 'group_name': vm.GNAME,
+ 'group_id': vm.GID,
+ 'uptime_h': int(vm_uptime),
+ 'attributes': vm_attributes,
+ 'mode': permissions_str,
+ 'labels': vm_labels
+ }
+
+ return info
+
+
+def parse_vm_permissions(client, vm):
+ vm_PERMISSIONS = client.vm.info(vm.ID).PERMISSIONS
+
+ owner_octal = int(vm_PERMISSIONS.OWNER_U) * 4 + int(vm_PERMISSIONS.OWNER_M) * 2 + int(vm_PERMISSIONS.OWNER_A)
+ group_octal = int(vm_PERMISSIONS.GROUP_U) * 4 + int(vm_PERMISSIONS.GROUP_M) * 2 + int(vm_PERMISSIONS.GROUP_A)
+ other_octal = int(vm_PERMISSIONS.OTHER_U) * 4 + int(vm_PERMISSIONS.OTHER_M) * 2 + int(vm_PERMISSIONS.OTHER_A)
+
+ permissions = str(owner_octal) + str(group_octal) + str(other_octal)
+
+ return permissions
+
+
+def set_vm_permissions(module, client, vms, permissions):
+ changed = False
+
+ for vm in vms:
+ vm = client.vm.info(vm.ID)
+ old_permissions = parse_vm_permissions(client, vm)
+ changed = changed or old_permissions != permissions
+
+ if not module.check_mode and old_permissions != permissions:
+ permissions_str = bin(int(permissions, base=8))[2:] # 600 -> 110000000
+ mode_bits = [int(d) for d in permissions_str]
+ try:
+ client.vm.chmod(
+ vm.ID, mode_bits[0], mode_bits[1], mode_bits[2], mode_bits[3], mode_bits[4], mode_bits[5], mode_bits[6], mode_bits[7], mode_bits[8])
+ except pyone.OneAuthorizationException:
+ module.fail_json(msg="Permissions changing is unsuccessful, but instances are present if you deployed them.")
+
+ return changed
+
+
+def set_vm_ownership(module, client, vms, owner_id, group_id):
+ changed = False
+
+ for vm in vms:
+ vm = client.vm.info(vm.ID)
+ if owner_id is None:
+ owner_id = vm.UID
+ if group_id is None:
+ group_id = vm.GID
+
+ changed = changed or owner_id != vm.UID or group_id != vm.GID
+
+ if not module.check_mode and (owner_id != vm.UID or group_id != vm.GID):
+ try:
+ client.vm.chown(vm.ID, owner_id, group_id)
+ except pyone.OneAuthorizationException:
+ module.fail_json(msg="Ownership changing is unsuccessful, but instances are present if you deployed them.")
+
+ return changed
+
+
+def get_size_in_MB(module, size_str):
+
+ SYMBOLS = ['B', 'KB', 'MB', 'GB', 'TB']
+
+ s = size_str
+ init = size_str
+ num = ""
+ while s and s[0:1].isdigit() or s[0:1] == '.':
+ num += s[0]
+ s = s[1:]
+ num = float(num)
+ symbol = s.strip()
+
+ if symbol not in SYMBOLS:
+ module.fail_json(msg="Cannot interpret %r %r %d" % (init, symbol, num))
+
+ prefix = {'B': 1}
+
+ for i, s in enumerate(SYMBOLS[1:]):
+ prefix[s] = 1 << (i + 1) * 10
+
+ size_in_bytes = int(num * prefix[symbol])
+ size_in_MB = size_in_bytes / (1024 * 1024)
+
+ return size_in_MB
+
+
+def create_disk_str(module, client, template_id, disk_size_list):
+
+ if not disk_size_list:
+ return ''
+
+ template = client.template.info(template_id)
+ if isinstance(template.TEMPLATE['DISK'], list):
+ # check if the number of disks is correct
+ if len(template.TEMPLATE['DISK']) != len(disk_size_list):
+ module.fail_json(msg='This template has ' + str(len(template.TEMPLATE['DISK'])) + ' disks but you defined ' + str(len(disk_size_list)))
+ result = ''
+ index = 0
+ for DISKS in template.TEMPLATE['DISK']:
+ disk = {}
+ diskresult = ''
+ # Get all info about existed disk e.g. IMAGE_ID,...
+ for key, value in DISKS.items():
+ disk[key] = value
+ # copy disk attributes if it is not the size attribute
+ diskresult += 'DISK = [' + ','.join('{key}="{val}"'.format(key=key, val=val) for key, val in disk.items() if key != 'SIZE')
+ # Set the Disk Size
+ diskresult += ', SIZE=' + str(int(get_size_in_MB(module, disk_size_list[index]))) + ']\n'
+ result += diskresult
+ index += 1
+ else:
+ if len(disk_size_list) > 1:
+ module.fail_json(msg='This template has one disk but you defined ' + str(len(disk_size_list)))
+ disk = {}
+ # Get all info about existed disk e.g. IMAGE_ID,...
+ for key, value in template.TEMPLATE['DISK'].items():
+ disk[key] = value
+ # copy disk attributes if it is not the size attribute
+ result = 'DISK = [' + ','.join('{key}="{val}"'.format(key=key, val=val) for key, val in disk.items() if key != 'SIZE')
+ # Set the Disk Size
+ result += ', SIZE=' + str(int(get_size_in_MB(module, disk_size_list[0]))) + ']\n'
+
+ return result
+
+
+def create_attributes_str(attributes_dict, labels_list):
+
+ attributes_str = ''
+
+ if labels_list:
+ attributes_str += 'LABELS="' + ','.join('{label}'.format(label=label) for label in labels_list) + '"\n'
+ if attributes_dict:
+ attributes_str += '\n'.join('{key}="{val}"'.format(key=key.upper(), val=val) for key, val in attributes_dict.items()) + '\n'
+
+ return attributes_str
+
+
+def create_nics_str(network_attrs_list):
+ nics_str = ''
+
+ for network in network_attrs_list:
+ # Packing key-value dict in string with format key="value", key="value"
+ network_str = ','.join('{key}="{val}"'.format(key=key, val=val) for key, val in network.items())
+ nics_str = nics_str + 'NIC = [' + network_str + ']\n'
+
+ return nics_str
+
+
+def create_vm(module, client, template_id, attributes_dict, labels_list, disk_size, network_attrs_list, vm_start_on_hold, vm_persistent):
+
+ if attributes_dict:
+ vm_name = attributes_dict.get('NAME', '')
+
+ disk_str = create_disk_str(module, client, template_id, disk_size)
+ vm_extra_template_str = create_attributes_str(attributes_dict, labels_list) + create_nics_str(network_attrs_list) + disk_str
+ try:
+ vm_id = client.template.instantiate(template_id, vm_name, vm_start_on_hold, vm_extra_template_str, vm_persistent)
+ except pyone.OneException as e:
+ module.fail_json(msg=str(e))
+ vm = get_vm_by_id(client, vm_id)
+
+ return get_vm_info(client, vm)
+
+
+def generate_next_index(vm_filled_indexes_list, num_sign_cnt):
+ counter = 0
+ cnt_str = str(counter).zfill(num_sign_cnt)
+
+ while cnt_str in vm_filled_indexes_list:
+ counter = counter + 1
+ cnt_str = str(counter).zfill(num_sign_cnt)
+
+ return cnt_str
+
+
+def get_vm_labels_and_attributes_dict(client, vm_id):
+ vm_USER_TEMPLATE = client.vm.info(vm_id).USER_TEMPLATE
+
+ attrs_dict = {}
+ labels_list = []
+
+ for key, value in vm_USER_TEMPLATE.items():
+ if key != 'LABELS':
+ attrs_dict[key] = value
+ else:
+ if key is not None:
+ labels_list = value.split(',')
+
+ return labels_list, attrs_dict
+
+
+def get_all_vms_by_attributes(client, attributes_dict, labels_list):
+ pool = client.vmpool.info(-2, -1, -1, -1).VM
+ vm_list = []
+ name = ''
+ if attributes_dict:
+ name = attributes_dict.pop('NAME', '')
+
+ if name != '':
+ base_name = name[:len(name) - name.count('#')]
+ # Check does the name have indexed format
+ with_hash = name.endswith('#')
+
+ for vm in pool:
+ if vm.NAME.startswith(base_name):
+ if with_hash and vm.NAME[len(base_name):].isdigit():
+ # If the name has indexed format and after base_name it has only digits it'll be matched
+ vm_list.append(vm)
+ elif not with_hash and vm.NAME == name:
+ # If the name is not indexed it has to be same
+ vm_list.append(vm)
+ pool = vm_list
+
+ import copy
+
+ vm_list = copy.copy(pool)
+
+ for vm in pool:
+ remove_list = []
+ vm_labels_list, vm_attributes_dict = get_vm_labels_and_attributes_dict(client, vm.ID)
+
+ if attributes_dict and len(attributes_dict) > 0:
+ for key, val in attributes_dict.items():
+ if key in vm_attributes_dict:
+ if val and vm_attributes_dict[key] != val:
+ remove_list.append(vm)
+ break
+ else:
+ remove_list.append(vm)
+ break
+ vm_list = list(set(vm_list).difference(set(remove_list)))
+
+ remove_list = []
+ if labels_list and len(labels_list) > 0:
+ for label in labels_list:
+ if label not in vm_labels_list:
+ remove_list.append(vm)
+ break
+ vm_list = list(set(vm_list).difference(set(remove_list)))
+
+ return vm_list
+
+
+def create_count_of_vms(
+ module, client, template_id, count, attributes_dict, labels_list, disk_size, network_attrs_list, wait, wait_timeout, vm_start_on_hold, vm_persistent):
+ new_vms_list = []
+
+ vm_name = ''
+ if attributes_dict:
+ vm_name = attributes_dict.get('NAME', '')
+
+ if module.check_mode:
+ return True, [], []
+
+ # Create list of used indexes
+ vm_filled_indexes_list = None
+ num_sign_cnt = vm_name.count('#')
+ if vm_name != '' and num_sign_cnt > 0:
+ vm_list = get_all_vms_by_attributes(client, {'NAME': vm_name}, None)
+ base_name = vm_name[:len(vm_name) - num_sign_cnt]
+ vm_name = base_name
+ # Make list which contains used indexes in format ['000', '001',...]
+ vm_filled_indexes_list = list((vm.NAME[len(base_name):].zfill(num_sign_cnt)) for vm in vm_list)
+
+ while count > 0:
+ new_vm_name = vm_name
+ # Create indexed name
+ if vm_filled_indexes_list is not None:
+ next_index = generate_next_index(vm_filled_indexes_list, num_sign_cnt)
+ vm_filled_indexes_list.append(next_index)
+ new_vm_name += next_index
+ # Update NAME value in the attributes in case there is index
+ attributes_dict['NAME'] = new_vm_name
+ new_vm_dict = create_vm(module, client, template_id, attributes_dict, labels_list, disk_size, network_attrs_list, vm_start_on_hold, vm_persistent)
+ new_vm_id = new_vm_dict.get('vm_id')
+ new_vm = get_vm_by_id(client, new_vm_id)
+ new_vms_list.append(new_vm)
+ count -= 1
+
+ if vm_start_on_hold:
+ if wait:
+ for vm in new_vms_list:
+ wait_for_hold(module, client, vm, wait_timeout)
+ else:
+ if wait:
+ for vm in new_vms_list:
+ wait_for_running(module, client, vm, wait_timeout)
+
+ return True, new_vms_list, []
+
+
+def create_exact_count_of_vms(module, client, template_id, exact_count, attributes_dict, count_attributes_dict,
+ labels_list, count_labels_list, disk_size, network_attrs_list, hard, wait, wait_timeout, vm_start_on_hold, vm_persistent):
+
+ vm_list = get_all_vms_by_attributes(client, count_attributes_dict, count_labels_list)
+
+ vm_count_diff = exact_count - len(vm_list)
+ changed = vm_count_diff != 0
+
+ new_vms_list = []
+ instances_list = []
+ tagged_instances_list = vm_list
+
+ if module.check_mode:
+ return changed, instances_list, tagged_instances_list
+
+ if vm_count_diff > 0:
+ # Add more VMs
+ changed, instances_list, tagged_instances = create_count_of_vms(module, client, template_id, vm_count_diff, attributes_dict,
+ labels_list, disk_size, network_attrs_list, wait, wait_timeout,
+ vm_start_on_hold, vm_persistent)
+
+ tagged_instances_list += instances_list
+ elif vm_count_diff < 0:
+ # Delete surplus VMs
+ old_vms_list = []
+
+ while vm_count_diff < 0:
+ old_vm = vm_list.pop(0)
+ old_vms_list.append(old_vm)
+ terminate_vm(module, client, old_vm, hard)
+ vm_count_diff += 1
+
+ if wait:
+ for vm in old_vms_list:
+ wait_for_done(module, client, vm, wait_timeout)
+
+ instances_list = old_vms_list
+ # store only the remaining instances
+ old_vms_set = set(old_vms_list)
+ tagged_instances_list = [vm for vm in vm_list if vm not in old_vms_set]
+
+ return changed, instances_list, tagged_instances_list
+
+
+VM_STATES = ['INIT', 'PENDING', 'HOLD', 'ACTIVE', 'STOPPED', 'SUSPENDED', 'DONE', '', 'POWEROFF', 'UNDEPLOYED', 'CLONING', 'CLONING_FAILURE']
+LCM_STATES = ['LCM_INIT', 'PROLOG', 'BOOT', 'RUNNING', 'MIGRATE', 'SAVE_STOP',
+ 'SAVE_SUSPEND', 'SAVE_MIGRATE', 'PROLOG_MIGRATE', 'PROLOG_RESUME',
+ 'EPILOG_STOP', 'EPILOG', 'SHUTDOWN', 'STATE13', 'STATE14', 'CLEANUP_RESUBMIT', 'UNKNOWN', 'HOTPLUG', 'SHUTDOWN_POWEROFF',
+ 'BOOT_UNKNOWN', 'BOOT_POWEROFF', 'BOOT_SUSPENDED', 'BOOT_STOPPED', 'CLEANUP_DELETE', 'HOTPLUG_SNAPSHOT', 'HOTPLUG_NIC',
+ 'HOTPLUG_SAVEAS', 'HOTPLUG_SAVEAS_POWEROFF', 'HOTPULG_SAVEAS_SUSPENDED', 'SHUTDOWN_UNDEPLOY']
+
+
+def wait_for_state(module, client, vm, wait_timeout, state_predicate):
+ import time
+ start_time = time.time()
+
+ while (time.time() - start_time) < wait_timeout:
+ vm = client.vm.info(vm.ID)
+ state = vm.STATE
+ lcm_state = vm.LCM_STATE
+
+ if state_predicate(state, lcm_state):
+ return vm
+ elif state not in [VM_STATES.index('INIT'), VM_STATES.index('PENDING'), VM_STATES.index('HOLD'),
+ VM_STATES.index('ACTIVE'), VM_STATES.index('CLONING'), VM_STATES.index('POWEROFF')]:
+ module.fail_json(msg='Action is unsuccessful. VM state: ' + VM_STATES[state])
+
+ time.sleep(1)
+
+ module.fail_json(msg="Wait timeout has expired!")
+
+
+def wait_for_running(module, client, vm, wait_timeout):
+ return wait_for_state(module, client, vm, wait_timeout, lambda state,
+ lcm_state: (state in [VM_STATES.index('ACTIVE')] and lcm_state in [LCM_STATES.index('RUNNING')]))
+
+
+def wait_for_done(module, client, vm, wait_timeout):
+ return wait_for_state(module, client, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('DONE')]))
+
+
+def wait_for_hold(module, client, vm, wait_timeout):
+ return wait_for_state(module, client, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('HOLD')]))
+
+
+def wait_for_poweroff(module, client, vm, wait_timeout):
+ return wait_for_state(module, client, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('POWEROFF')]))
+
+
+def terminate_vm(module, client, vm, hard=False):
+ changed = False
+
+ if not vm:
+ return changed
+
+ changed = True
+
+ if not module.check_mode:
+ if hard:
+ client.vm.action('terminate-hard', vm.ID)
+ else:
+ client.vm.action('terminate', vm.ID)
+
+ return changed
+
+
+def terminate_vms(module, client, vms, hard):
+ changed = False
+
+ for vm in vms:
+ changed = terminate_vm(module, client, vm, hard) or changed
+
+ return changed
+
+
+def poweroff_vm(module, client, vm, hard):
+ vm = client.vm.info(vm.ID)
+ changed = False
+
+ lcm_state = vm.LCM_STATE
+ state = vm.STATE
+
+ if lcm_state not in [LCM_STATES.index('SHUTDOWN'), LCM_STATES.index('SHUTDOWN_POWEROFF')] and state not in [VM_STATES.index('POWEROFF')]:
+ changed = True
+
+ if changed and not module.check_mode:
+ if not hard:
+ client.vm.action('poweroff', vm.ID)
+ else:
+ client.vm.action('poweroff-hard', vm.ID)
+
+ return changed
+
+
+def poweroff_vms(module, client, vms, hard):
+ changed = False
+
+ for vm in vms:
+ changed = poweroff_vm(module, client, vm, hard) or changed
+
+ return changed
+
+
+def reboot_vms(module, client, vms, wait_timeout, hard):
+
+ if not module.check_mode:
+ # Firstly, power-off all instances
+ for vm in vms:
+ vm = client.vm.info(vm.ID)
+ lcm_state = vm.LCM_STATE
+ state = vm.STATE
+ if lcm_state not in [LCM_STATES.index('SHUTDOWN_POWEROFF')] and state not in [VM_STATES.index('POWEROFF')]:
+ poweroff_vm(module, client, vm, hard)
+
+ # Wait for all to be power-off
+ for vm in vms:
+ wait_for_poweroff(module, client, vm, wait_timeout)
+
+ for vm in vms:
+ resume_vm(module, client, vm)
+
+ return True
+
+
+def resume_vm(module, client, vm):
+ vm = client.vm.info(vm.ID)
+ changed = False
+
+ lcm_state = vm.LCM_STATE
+ if lcm_state == LCM_STATES.index('SHUTDOWN_POWEROFF'):
+ module.fail_json(msg="Cannot perform action 'resume' because this action is not available " +
+ "for LCM_STATE: 'SHUTDOWN_POWEROFF'. Wait for the VM to shutdown properly")
+ if lcm_state not in [LCM_STATES.index('RUNNING')]:
+ changed = True
+
+ if changed and not module.check_mode:
+ client.vm.action('resume', vm.ID)
+
+ return changed
+
+
+def resume_vms(module, client, vms):
+ changed = False
+
+ for vm in vms:
+ changed = resume_vm(module, client, vm) or changed
+
+ return changed
+
+
+def check_name_attribute(module, attributes):
+ if attributes.get("NAME"):
+ import re
+ if re.match(r'^[^#]+#*$', attributes.get("NAME")) is None:
+ module.fail_json(msg="Ilegal 'NAME' attribute: '" + attributes.get("NAME") +
+ "' .Signs '#' are allowed only at the end of the name and the name cannot contain only '#'.")
+
+
+TEMPLATE_RESTRICTED_ATTRIBUTES = ["CPU", "VCPU", "OS", "FEATURES", "MEMORY", "DISK", "NIC", "INPUT", "GRAPHICS",
+ "CONTEXT", "CREATED_BY", "CPU_COST", "DISK_COST", "MEMORY_COST",
+ "TEMPLATE_ID", "VMID", "AUTOMATIC_DS_REQUIREMENTS", "DEPLOY_FOLDER", "LABELS"]
+
+
+def check_attributes(module, attributes):
+ for key in attributes.keys():
+ if key in TEMPLATE_RESTRICTED_ATTRIBUTES:
+ module.fail_json(msg='Restricted attribute `' + key + '` cannot be used when filtering VMs.')
+ # Check the format of the name attribute
+ check_name_attribute(module, attributes)
+
+
+def disk_save_as(module, client, vm, disk_saveas, wait_timeout):
+ if not disk_saveas.get('name'):
+ module.fail_json(msg="Key 'name' is required for 'disk_saveas' option")
+
+ image_name = disk_saveas.get('name')
+ disk_id = disk_saveas.get('disk_id', 0)
+
+ if not module.check_mode:
+ if vm.STATE != VM_STATES.index('POWEROFF'):
+ module.fail_json(msg="'disksaveas' option can be used only when the VM is in 'POWEROFF' state")
+ try:
+ client.vm.disksaveas(vm.ID, disk_id, image_name, 'OS', -1)
+ except pyone.OneException as e:
+ module.fail_json(msg=str(e))
+ wait_for_poweroff(module, client, vm, wait_timeout) # wait for VM to leave the hotplug_saveas_poweroff state
+
+
+def get_connection_info(module):
+
+ url = module.params.get('api_url')
+ username = module.params.get('api_username')
+ password = module.params.get('api_password')
+
+ if not url:
+ url = os.environ.get('ONE_URL')
+
+ if not username:
+ username = os.environ.get('ONE_USERNAME')
+
+ if not password:
+ password = os.environ.get('ONE_PASSWORD')
+
+ if not username:
+ if not password:
+ authfile = os.environ.get('ONE_AUTH')
+ if authfile is None:
+ authfile = os.path.join(os.environ.get("HOME"), ".one", "one_auth")
+ try:
+ with open(authfile, "r") as fp:
+ authstring = fp.read().rstrip()
+ username = authstring.split(":")[0]
+ password = authstring.split(":")[1]
+ except (OSError, IOError):
+ module.fail_json(msg=("Could not find or read ONE_AUTH file at '%s'" % authfile))
+ except Exception:
+ module.fail_json(msg=("Error occurs when read ONE_AUTH file at '%s'" % authfile))
+ if not url:
+ module.fail_json(msg="Opennebula API url (api_url) is not specified")
+ from collections import namedtuple
+
+ auth_params = namedtuple('auth', ('url', 'username', 'password'))
+
+ return auth_params(url=url, username=username, password=password)
+
+
+def main():
+ fields = {
+ "api_url": {"required": False, "type": "str"},
+ "api_username": {"required": False, "type": "str"},
+ "api_password": {"required": False, "type": "str", "no_log": True},
+ "instance_ids": {"required": False, "aliases": ['ids'], "type": "list"},
+ "template_name": {"required": False, "type": "str"},
+ "template_id": {"required": False, "type": "int"},
+ "vm_start_on_hold": {"default": False, "type": "bool"},
+ "state": {
+ "default": "present",
+ "choices": ['present', 'absent', 'rebooted', 'poweredoff', 'running'],
+ "type": "str"
+ },
+ "mode": {"required": False, "type": "str"},
+ "owner_id": {"required": False, "type": "int"},
+ "group_id": {"required": False, "type": "int"},
+ "wait": {"default": True, "type": "bool"},
+ "wait_timeout": {"default": 300, "type": "int"},
+ "hard": {"default": False, "type": "bool"},
+ "memory": {"required": False, "type": "str"},
+ "cpu": {"required": False, "type": "float"},
+ "vcpu": {"required": False, "type": "int"},
+ "disk_size": {"required": False, "type": "list"},
+ "datastore_name": {"required": False, "type": "str"},
+ "datastore_id": {"required": False, "type": "int"},
+ "networks": {"default": [], "type": "list"},
+ "count": {"default": 1, "type": "int"},
+ "exact_count": {"required": False, "type": "int"},
+ "attributes": {"default": {}, "type": "dict"},
+ "count_attributes": {"required": False, "type": "dict"},
+ "labels": {"default": [], "type": "list"},
+ "count_labels": {"required": False, "type": "list"},
+ "disk_saveas": {"type": "dict"},
+ "persistent": {"default": False, "type": "bool"}
+ }
+
+ module = AnsibleModule(argument_spec=fields,
+ mutually_exclusive=[
+ ['template_id', 'template_name', 'instance_ids'],
+ ['template_id', 'template_name', 'disk_saveas'],
+ ['instance_ids', 'count_attributes', 'count'],
+ ['instance_ids', 'count_labels', 'count'],
+ ['instance_ids', 'exact_count'],
+ ['instance_ids', 'attributes'],
+ ['instance_ids', 'labels'],
+ ['disk_saveas', 'attributes'],
+ ['disk_saveas', 'labels'],
+ ['exact_count', 'count'],
+ ['count', 'hard'],
+ ['instance_ids', 'cpu'], ['instance_ids', 'vcpu'],
+ ['instance_ids', 'memory'], ['instance_ids', 'disk_size'],
+ ['instance_ids', 'networks'],
+ ['persistent', 'disk_size']
+ ],
+ supports_check_mode=True)
+
+ if not HAS_PYONE:
+ module.fail_json(msg='This module requires pyone to work!')
+
+ auth = get_connection_info(module)
+ params = module.params
+ instance_ids = params.get('instance_ids')
+ requested_template_name = params.get('template_name')
+ requested_template_id = params.get('template_id')
+ put_vm_on_hold = params.get('vm_start_on_hold')
+ state = params.get('state')
+ permissions = params.get('mode')
+ owner_id = params.get('owner_id')
+ group_id = params.get('group_id')
+ wait = params.get('wait')
+ wait_timeout = params.get('wait_timeout')
+ hard = params.get('hard')
+ memory = params.get('memory')
+ cpu = params.get('cpu')
+ vcpu = params.get('vcpu')
+ disk_size = params.get('disk_size')
+ requested_datastore_id = params.get('datastore_id')
+ requested_datastore_name = params.get('datastore_name')
+ networks = params.get('networks')
+ count = params.get('count')
+ exact_count = params.get('exact_count')
+ attributes = params.get('attributes')
+ count_attributes = params.get('count_attributes')
+ labels = params.get('labels')
+ count_labels = params.get('count_labels')
+ disk_saveas = params.get('disk_saveas')
+ persistent = params.get('persistent')
+
+ if not (auth.username and auth.password):
+ module.warn("Credentials missing")
+ else:
+ one_client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password)
+
+ if attributes:
+ attributes = dict((key.upper(), value) for key, value in attributes.items())
+ check_attributes(module, attributes)
+
+ if count_attributes:
+ count_attributes = dict((key.upper(), value) for key, value in count_attributes.items())
+ if not attributes:
+ import copy
+ module.warn('When you pass `count_attributes` without `attributes` option when deploying, `attributes` option will have same values implicitly.')
+ attributes = copy.copy(count_attributes)
+ check_attributes(module, count_attributes)
+
+ if count_labels and not labels:
+ module.warn('When you pass `count_labels` without `labels` option when deploying, `labels` option will have same values implicitly.')
+ labels = count_labels
+
+ # Fetch template
+ template_id = None
+ if requested_template_id is not None or requested_template_name:
+ template_id = get_template_id(module, one_client, requested_template_id, requested_template_name)
+ if template_id is None:
+ if requested_template_id is not None:
+ module.fail_json(msg='There is no template with template_id: ' + str(requested_template_id))
+ elif requested_template_name:
+ module.fail_json(msg="There is no template with name: " + requested_template_name)
+
+ # Fetch datastore
+ datastore_id = None
+ if requested_datastore_id or requested_datastore_name:
+ datastore_id = get_datastore_id(module, one_client, requested_datastore_id, requested_datastore_name)
+ if datastore_id is None:
+ if requested_datastore_id:
+ module.fail_json(msg='There is no datastore with datastore_id: ' + str(requested_datastore_id))
+ elif requested_datastore_name:
+ module.fail_json(msg="There is no datastore with name: " + requested_datastore_name)
+ else:
+ attributes['SCHED_DS_REQUIREMENTS'] = 'ID=' + str(datastore_id)
+
+ if exact_count and template_id is None:
+ module.fail_json(msg='Option `exact_count` needs template_id or template_name')
+
+ if exact_count is not None and not (count_attributes or count_labels):
+ module.fail_json(msg='Either `count_attributes` or `count_labels` has to be specified with option `exact_count`.')
+ if (count_attributes or count_labels) and exact_count is None:
+ module.fail_json(msg='Option `exact_count` has to be specified when either `count_attributes` or `count_labels` is used.')
+ if template_id is not None and state != 'present':
+ module.fail_json(msg="Only state 'present' is valid for the template")
+
+ if memory:
+ attributes['MEMORY'] = str(int(get_size_in_MB(module, memory)))
+ if cpu:
+ attributes['CPU'] = str(cpu)
+ if vcpu:
+ attributes['VCPU'] = str(vcpu)
+
+ if exact_count is not None and state != 'present':
+ module.fail_json(msg='The `exact_count` option is valid only for the `present` state')
+ if exact_count is not None and exact_count < 0:
+ module.fail_json(msg='`exact_count` cannot be less than 0')
+ if count <= 0:
+ module.fail_json(msg='`count` has to be greater than 0')
+
+ if permissions is not None:
+ import re
+ if re.match("^[0-7]{3}$", permissions) is None:
+ module.fail_json(msg="Option `mode` has to have exactly 3 digits and be in the octet format e.g. 600")
+
+ if exact_count is not None:
+ # Deploy an exact count of VMs
+ changed, instances_list, tagged_instances_list = create_exact_count_of_vms(module, one_client, template_id, exact_count, attributes,
+ count_attributes, labels, count_labels, disk_size,
+ networks, hard, wait, wait_timeout, put_vm_on_hold, persistent)
+ vms = tagged_instances_list
+ elif template_id is not None and state == 'present':
+ # Deploy count VMs
+ changed, instances_list, tagged_instances_list = create_count_of_vms(module, one_client, template_id, count,
+ attributes, labels, disk_size, networks, wait, wait_timeout,
+ put_vm_on_hold, persistent)
+ # instances_list - new instances
+ # tagged_instances_list - all instances with specified `count_attributes` and `count_labels`
+ vms = instances_list
+ else:
+ # Fetch data of instances, or change their state
+ if not (instance_ids or attributes or labels):
+ module.fail_json(msg="At least one of `instance_ids`,`attributes`,`labels` must be passed!")
+
+ if memory or cpu or vcpu or disk_size or networks:
+ module.fail_json(msg="Parameters as `memory`, `cpu`, `vcpu`, `disk_size` and `networks` you can only set when deploying a VM!")
+
+ if hard and state not in ['rebooted', 'poweredoff', 'absent', 'present']:
+ module.fail_json(msg="The 'hard' option can be used only for one of these states: 'rebooted', 'poweredoff', 'absent' and 'present'")
+
+ vms = []
+ tagged = False
+ changed = False
+
+ if instance_ids:
+ vms = get_vms_by_ids(module, one_client, state, instance_ids)
+ else:
+ tagged = True
+ vms = get_all_vms_by_attributes(one_client, attributes, labels)
+
+ if len(vms) == 0 and state != 'absent' and state != 'present':
+ module.fail_json(msg='There are no instances with specified `instance_ids`, `attributes` and/or `labels`')
+
+ if len(vms) == 0 and state == 'present' and not tagged:
+ module.fail_json(msg='There are no instances with specified `instance_ids`.')
+
+ if tagged and state == 'absent':
+ module.fail_json(msg='Option `instance_ids` is required when state is `absent`.')
+
+ if state == 'absent':
+ changed = terminate_vms(module, one_client, vms, hard)
+ elif state == 'rebooted':
+ changed = reboot_vms(module, one_client, vms, wait_timeout, hard)
+ elif state == 'poweredoff':
+ changed = poweroff_vms(module, one_client, vms, hard)
+ elif state == 'running':
+ changed = resume_vms(module, one_client, vms)
+
+ instances_list = vms
+ tagged_instances_list = []
+
+ if permissions is not None:
+ changed = set_vm_permissions(module, one_client, vms, permissions) or changed
+
+ if owner_id is not None or group_id is not None:
+ changed = set_vm_ownership(module, one_client, vms, owner_id, group_id) or changed
+
+ if wait and not module.check_mode and state != 'present':
+ wait_for = {
+ 'absent': wait_for_done,
+ 'rebooted': wait_for_running,
+ 'poweredoff': wait_for_poweroff,
+ 'running': wait_for_running
+ }
+ for vm in vms:
+ if vm is not None:
+ wait_for[state](module, one_client, vm, wait_timeout)
+
+ if disk_saveas is not None:
+ if len(vms) == 0:
+ module.fail_json(msg="There is no VM whose disk will be saved.")
+ disk_save_as(module, one_client, vms[0], disk_saveas, wait_timeout)
+ changed = True
+
+ # instances - a list of instances info whose state is changed or which are fetched with C(instance_ids) option
+ instances = list(get_vm_info(one_client, vm) for vm in instances_list if vm is not None)
+ instances_ids = list(vm.ID for vm in instances_list if vm is not None)
+ # tagged_instances - A list of instances info based on a specific attributes and/or labels that are specified with C(count_attributes) and C(count_labels)
+ tagged_instances = list(get_vm_info(one_client, vm) for vm in tagged_instances_list if vm is not None)
+
+ result = {'changed': changed, 'instances': instances, 'instances_ids': instances_ids, 'tagged_instances': tagged_instances}
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oracle/oci_vcn.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oracle/oci_vcn.py
new file mode 100644
index 00000000..06dc4af0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/oracle/oci_vcn.py
@@ -0,0 +1,221 @@
+#!/usr/bin/python
+# Copyright (c) 2017, 2018, Oracle and/or its affiliates.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oci_vcn
+short_description: Manage Virtual Cloud Networks(VCN) in OCI
+description:
+ - This module allows the user to create, delete and update virtual cloud networks(VCNs) in OCI.
+ The complete Oracle Cloud Infrastructure Ansible Modules can be downloaded from
+ U(https://github.com/oracle/oci-ansible-modules/releases).
+options:
+ cidr_block:
+ description: The CIDR IP address block of the VCN. Required when creating a VCN with I(state=present).
+ type: str
+ required: false
+ compartment_id:
+ description: The OCID of the compartment to contain the VCN. Required when creating a VCN with I(state=present).
+ This option is mutually exclusive with I(vcn_id).
+ type: str
+ display_name:
+ description: A user-friendly name. Does not have to be unique, and it's changeable.
+ type: str
+ aliases: [ 'name' ]
+ dns_label:
+ description: A DNS label for the VCN, used in conjunction with the VNIC's hostname and subnet's DNS label to
+ form a fully qualified domain name (FQDN) for each VNIC within this subnet (for example,
+ bminstance-1.subnet123.vcn1.oraclevcn.com). Not required to be unique, but it's a best practice
+ to set unique DNS labels for VCNs in your tenancy. Must be an alphanumeric string that begins
+ with a letter. The value cannot be changed.
+ type: str
+ state:
+ description: Create or update a VCN with I(state=present). Use I(state=absent) to delete a VCN.
+ type: str
+ default: present
+ choices: ['present', 'absent']
+ vcn_id:
+ description: The OCID of the VCN. Required when deleting a VCN with I(state=absent) or updating a VCN
+ with I(state=present). This option is mutually exclusive with I(compartment_id).
+ type: str
+ aliases: [ 'id' ]
+author: "Rohit Chaware (@rohitChaware)"
+extends_documentation_fragment:
+- community.general.oracle
+- community.general.oracle_creatable_resource
+- community.general.oracle_wait_options
+- community.general.oracle_tags
+
+'''
+
+EXAMPLES = """
+- name: Create a VCN
+ community.general.oci_vcn:
+ cidr_block: '10.0.0.0/16'
+ compartment_id: 'ocid1.compartment.oc1..xxxxxEXAMPLExxxxx'
+ display_name: my_vcn
+ dns_label: ansiblevcn
+
+- name: Updates the specified VCN's display name
+ community.general.oci_vcn:
+ vcn_id: ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx
+ display_name: ansible_vcn
+
+- name: Delete the specified VCN
+ community.general.oci_vcn:
+ vcn_id: ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx
+ state: absent
+"""
+
+RETURN = """
+vcn:
+ description: Information about the VCN
+ returned: On successful create and update operation
+ type: dict
+ sample: {
+ "cidr_block": "10.0.0.0/16",
+ compartment_id": "ocid1.compartment.oc1..xxxxxEXAMPLExxxxx",
+ "default_dhcp_options_id": "ocid1.dhcpoptions.oc1.phx.xxxxxEXAMPLExxxxx",
+ "default_route_table_id": "ocid1.routetable.oc1.phx.xxxxxEXAMPLExxxxx",
+ "default_security_list_id": "ocid1.securitylist.oc1.phx.xxxxxEXAMPLExxxxx",
+ "display_name": "ansible_vcn",
+ "dns_label": "ansiblevcn",
+ "id": "ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx",
+ "lifecycle_state": "AVAILABLE",
+ "time_created": "2017-11-13T20:22:40.626000+00:00",
+ "vcn_domain_name": "ansiblevcn.oraclevcn.com"
+ }
+"""
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils.oracle import oci_utils
+
+try:
+ from oci.core.virtual_network_client import VirtualNetworkClient
+ from oci.core.models import CreateVcnDetails
+ from oci.core.models import UpdateVcnDetails
+
+ HAS_OCI_PY_SDK = True
+except ImportError:
+ HAS_OCI_PY_SDK = False
+
+
+def delete_vcn(virtual_network_client, module):
+ result = oci_utils.delete_and_wait(
+ resource_type="vcn",
+ client=virtual_network_client,
+ get_fn=virtual_network_client.get_vcn,
+ kwargs_get={"vcn_id": module.params["vcn_id"]},
+ delete_fn=virtual_network_client.delete_vcn,
+ kwargs_delete={"vcn_id": module.params["vcn_id"]},
+ module=module,
+ )
+ return result
+
+
+def update_vcn(virtual_network_client, module):
+ result = oci_utils.check_and_update_resource(
+ resource_type="vcn",
+ client=virtual_network_client,
+ get_fn=virtual_network_client.get_vcn,
+ kwargs_get={"vcn_id": module.params["vcn_id"]},
+ update_fn=virtual_network_client.update_vcn,
+ primitive_params_update=["vcn_id"],
+ kwargs_non_primitive_update={UpdateVcnDetails: "update_vcn_details"},
+ module=module,
+ update_attributes=UpdateVcnDetails().attribute_map.keys(),
+ )
+ return result
+
+
+def create_vcn(virtual_network_client, module):
+ create_vcn_details = CreateVcnDetails()
+ for attribute in create_vcn_details.attribute_map.keys():
+ if attribute in module.params:
+ setattr(create_vcn_details, attribute, module.params[attribute])
+
+ result = oci_utils.create_and_wait(
+ resource_type="vcn",
+ create_fn=virtual_network_client.create_vcn,
+ kwargs_create={"create_vcn_details": create_vcn_details},
+ client=virtual_network_client,
+ get_fn=virtual_network_client.get_vcn,
+ get_param="vcn_id",
+ module=module,
+ )
+ return result
+
+
+def main():
+ module_args = oci_utils.get_taggable_arg_spec(
+ supports_create=True, supports_wait=True
+ )
+ module_args.update(
+ dict(
+ cidr_block=dict(type="str", required=False),
+ compartment_id=dict(type="str", required=False),
+ display_name=dict(type="str", required=False, aliases=["name"]),
+ dns_label=dict(type="str", required=False),
+ state=dict(
+ type="str",
+ required=False,
+ default="present",
+ choices=["absent", "present"],
+ ),
+ vcn_id=dict(type="str", required=False, aliases=["id"]),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=False,
+ mutually_exclusive=[["compartment_id", "vcn_id"]],
+ )
+
+ if not HAS_OCI_PY_SDK:
+ module.fail_json(msg=missing_required_lib("oci"))
+
+ virtual_network_client = oci_utils.create_service_client(
+ module, VirtualNetworkClient
+ )
+
+ exclude_attributes = {"display_name": True, "dns_label": True}
+ state = module.params["state"]
+ vcn_id = module.params["vcn_id"]
+
+ if state == "absent":
+ if vcn_id is not None:
+ result = delete_vcn(virtual_network_client, module)
+ else:
+ module.fail_json(
+ msg="Specify vcn_id with state as 'absent' to delete a VCN."
+ )
+
+ else:
+ if vcn_id is not None:
+ result = update_vcn(virtual_network_client, module)
+ else:
+ result = oci_utils.check_and_create_resource(
+ resource_type="vcn",
+ create_fn=create_vcn,
+ kwargs_create={
+ "virtual_network_client": virtual_network_client,
+ "module": module,
+ },
+ list_fn=virtual_network_client.list_vcns,
+ kwargs_list={"compartment_id": module.params["compartment_id"]},
+ module=module,
+ model=CreateVcnDetails(),
+ exclude_attributes=exclude_attributes,
+ )
+
+ module.exit_json(**result)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovh/ovh_ip_failover.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovh/ovh_ip_failover.py
new file mode 100644
index 00000000..7ed3a5ee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovh/ovh_ip_failover.py
@@ -0,0 +1,261 @@
+#!/usr/bin/python
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ovh_ip_failover
+short_description: Manage OVH IP failover address
+description:
+ - Manage OVH (French European hosting provider) IP Failover Address. For now, this module can only be used to move
+ an ip failover (or failover block) between services
+author: "Pascal HERAUD (@pascalheraud)"
+notes:
+ - Uses the python OVH Api U(https://github.com/ovh/python-ovh).
+ You have to create an application (a key and secret) with a consummer
+ key as described into U(https://docs.ovh.com/gb/en/customer/first-steps-with-ovh-api/)
+requirements:
+ - ovh >= 0.4.8
+options:
+ name:
+ required: true
+ description:
+ - The IP address to manage (can be a single IP like 1.1.1.1
+ or a block like 1.1.1.1/28 )
+ type: str
+ service:
+ required: true
+ description:
+ - The name of the OVH service this IP address should be routed
+ type: str
+ endpoint:
+ required: true
+ description:
+ - The endpoint to use ( for instance ovh-eu)
+ type: str
+ wait_completion:
+ required: false
+ default: true
+ type: bool
+ description:
+ - If true, the module will wait for the IP address to be moved.
+ If false, exit without waiting. The taskId will be returned
+ in module output
+ wait_task_completion:
+ required: false
+ default: 0
+ description:
+ - If not 0, the module will wait for this task id to be
+ completed. Use wait_task_completion if you want to wait for
+ completion of a previously executed task with
+ wait_completion=false. You can execute this module repeatedly on
+ a list of failover IPs using wait_completion=false (see examples)
+ type: int
+ application_key:
+ required: true
+ description:
+ - The applicationKey to use
+ type: str
+ application_secret:
+ required: true
+ description:
+ - The application secret to use
+ type: str
+ consumer_key:
+ required: true
+ description:
+ - The consumer key to use
+ type: str
+ timeout:
+ required: false
+ default: 120
+ description:
+ - The timeout in seconds used to wait for a task to be
+ completed. Default is 120 seconds.
+ type: int
+
+'''
+
+EXAMPLES = '''
+# Route an IP address 1.1.1.1 to the service ns666.ovh.net
+- community.general.ovh_ip_failover:
+ name: 1.1.1.1
+ service: ns666.ovh.net
+ endpoint: ovh-eu
+ application_key: yourkey
+ application_secret: yoursecret
+ consumer_key: yourconsumerkey
+- community.general.ovh_ip_failover:
+ name: 1.1.1.1
+ service: ns666.ovh.net
+ endpoint: ovh-eu
+ wait_completion: false
+ application_key: yourkey
+ application_secret: yoursecret
+ consumer_key: yourconsumerkey
+ register: moved
+- community.general.ovh_ip_failover:
+ name: 1.1.1.1
+ service: ns666.ovh.net
+ endpoint: ovh-eu
+ wait_task_completion: "{{moved.taskId}}"
+ application_key: yourkey
+ application_secret: yoursecret
+ consumer_key: yourconsumerkey
+'''
+
+RETURN = '''
+'''
+
+import time
+
+try:
+ import ovh
+ import ovh.exceptions
+ from ovh.exceptions import APIError
+ HAS_OVH = True
+except ImportError:
+ HAS_OVH = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import quote_plus
+
+
+def getOvhClient(ansibleModule):
+ endpoint = ansibleModule.params.get('endpoint')
+ application_key = ansibleModule.params.get('application_key')
+ application_secret = ansibleModule.params.get('application_secret')
+ consumer_key = ansibleModule.params.get('consumer_key')
+
+ return ovh.Client(
+ endpoint=endpoint,
+ application_key=application_key,
+ application_secret=application_secret,
+ consumer_key=consumer_key
+ )
+
+
+def waitForNoTask(client, name, timeout):
+ currentTimeout = timeout
+ while client.get('/ip/{0}/task'.format(quote_plus(name)),
+ function='genericMoveFloatingIp',
+ status='todo'):
+ time.sleep(1) # Delay for 1 sec
+ currentTimeout -= 1
+ if currentTimeout < 0:
+ return False
+ return True
+
+
+def waitForTaskDone(client, name, taskId, timeout):
+ currentTimeout = timeout
+ while True:
+ task = client.get('/ip/{0}/task/{1}'.format(quote_plus(name), taskId))
+ if task['status'] == 'done':
+ return True
+ time.sleep(5) # Delay for 5 sec because it's long to wait completion, do not harass the API
+ currentTimeout -= 5
+ if currentTimeout < 0:
+ return False
+ return True
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ service=dict(required=True),
+ endpoint=dict(required=True),
+ wait_completion=dict(default=True, type='bool'),
+ wait_task_completion=dict(default=0, type='int'),
+ application_key=dict(required=True, no_log=True),
+ application_secret=dict(required=True, no_log=True),
+ consumer_key=dict(required=True, no_log=True),
+ timeout=dict(default=120, type='int')
+ ),
+ supports_check_mode=True
+ )
+
+ result = dict(
+ changed=False
+ )
+
+ if not HAS_OVH:
+ module.fail_json(msg='ovh-api python module is required to run this module ')
+
+ # Get parameters
+ name = module.params.get('name')
+ service = module.params.get('service')
+ timeout = module.params.get('timeout')
+ wait_completion = module.params.get('wait_completion')
+ wait_task_completion = module.params.get('wait_task_completion')
+
+ # Connect to OVH API
+ client = getOvhClient(module)
+
+ # Check that the load balancing exists
+ try:
+ ips = client.get('/ip', ip=name, type='failover')
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for getting the list of ips, '
+ 'check application key, secret, consumerkey and parameters. '
+ 'Error returned by OVH api was : {0}'.format(apiError))
+
+ if name not in ips and '{0}/32'.format(name) not in ips:
+ module.fail_json(msg='IP {0} does not exist'.format(name))
+
+ # Check that no task is pending before going on
+ try:
+ if not waitForNoTask(client, name, timeout):
+ module.fail_json(
+ msg='Timeout of {0} seconds while waiting for no pending '
+ 'tasks before executing the module '.format(timeout))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for getting the list of pending tasks '
+ 'of the ip, check application key, secret, consumerkey '
+ 'and parameters. Error returned by OVH api was : {0}'
+ .format(apiError))
+
+ try:
+ ipproperties = client.get('/ip/{0}'.format(quote_plus(name)))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for getting the properties '
+ 'of the ip, check application key, secret, consumerkey '
+ 'and parameters. Error returned by OVH api was : {0}'
+ .format(apiError))
+
+ if ipproperties['routedTo']['serviceName'] != service:
+ if not module.check_mode:
+ if wait_task_completion == 0:
+ # Move the IP and get the created taskId
+ task = client.post('/ip/{0}/move'.format(quote_plus(name)), to=service)
+ taskId = task['taskId']
+ result['moved'] = True
+ else:
+ # Just wait for the given taskId to be completed
+ taskId = wait_task_completion
+ result['moved'] = False
+ result['taskId'] = taskId
+ if wait_completion or wait_task_completion != 0:
+ if not waitForTaskDone(client, name, taskId, timeout):
+ module.fail_json(
+ msg='Timeout of {0} seconds while waiting for completion '
+ 'of move ip to service'.format(timeout))
+ result['waited'] = True
+ else:
+ result['waited'] = False
+ result['changed'] = True
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py
new file mode 100644
index 00000000..965a499c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py
@@ -0,0 +1,311 @@
+#!/usr/bin/python
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ovh_ip_loadbalancing_backend
+short_description: Manage OVH IP LoadBalancing backends
+description:
+ - Manage OVH (French European hosting provider) LoadBalancing IP backends
+author: Pascal Heraud (@pascalheraud)
+notes:
+ - Uses the python OVH Api U(https://github.com/ovh/python-ovh).
+ You have to create an application (a key and secret) with a consumer
+ key as described into U(https://docs.ovh.com/gb/en/customer/first-steps-with-ovh-api/)
+requirements:
+ - ovh > 0.3.5
+options:
+ name:
+ required: true
+ description:
+ - Name of the LoadBalancing internal name (ip-X.X.X.X)
+ type: str
+ backend:
+ required: true
+ description:
+ - The IP address of the backend to update / modify / delete
+ type: str
+ state:
+ default: present
+ choices: ['present', 'absent']
+ description:
+ - Determines whether the backend is to be created/modified
+ or deleted
+ type: str
+ probe:
+ default: 'none'
+ choices: ['none', 'http', 'icmp' , 'oco']
+ description:
+ - Determines the type of probe to use for this backend
+ type: str
+ weight:
+ default: 8
+ description:
+ - Determines the weight for this backend
+ type: int
+ endpoint:
+ required: true
+ description:
+ - The endpoint to use ( for instance ovh-eu)
+ type: str
+ application_key:
+ required: true
+ description:
+ - The applicationKey to use
+ type: str
+ application_secret:
+ required: true
+ description:
+ - The application secret to use
+ type: str
+ consumer_key:
+ required: true
+ description:
+ - The consumer key to use
+ type: str
+ timeout:
+ default: 120
+ description:
+ - The timeout in seconds used to wait for a task to be
+ completed.
+ type: int
+
+'''
+
+EXAMPLES = '''
+- name: Adds or modify the backend '212.1.1.1' to a loadbalancing 'ip-1.1.1.1'
+ ovh_ip_loadbalancing:
+ name: ip-1.1.1.1
+ backend: 212.1.1.1
+ state: present
+ probe: none
+ weight: 8
+ endpoint: ovh-eu
+ application_key: yourkey
+ application_secret: yoursecret
+ consumer_key: yourconsumerkey
+
+- name: Removes a backend '212.1.1.1' from a loadbalancing 'ip-1.1.1.1'
+ ovh_ip_loadbalancing:
+ name: ip-1.1.1.1
+ backend: 212.1.1.1
+ state: absent
+ endpoint: ovh-eu
+ application_key: yourkey
+ application_secret: yoursecret
+ consumer_key: yourconsumerkey
+'''
+
+RETURN = '''
+'''
+
+import time
+
+try:
+ import ovh
+ import ovh.exceptions
+ from ovh.exceptions import APIError
+ HAS_OVH = True
+except ImportError:
+ HAS_OVH = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def getOvhClient(ansibleModule):
+ endpoint = ansibleModule.params.get('endpoint')
+ application_key = ansibleModule.params.get('application_key')
+ application_secret = ansibleModule.params.get('application_secret')
+ consumer_key = ansibleModule.params.get('consumer_key')
+
+ return ovh.Client(
+ endpoint=endpoint,
+ application_key=application_key,
+ application_secret=application_secret,
+ consumer_key=consumer_key
+ )
+
+
+def waitForNoTask(client, name, timeout):
+ currentTimeout = timeout
+ while len(client.get('/ip/loadBalancing/{0}/task'.format(name))) > 0:
+ time.sleep(1) # Delay for 1 sec
+ currentTimeout -= 1
+ if currentTimeout < 0:
+ return False
+ return True
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ backend=dict(required=True),
+ weight=dict(default=8, type='int'),
+ probe=dict(default='none',
+ choices=['none', 'http', 'icmp', 'oco']),
+ state=dict(default='present', choices=['present', 'absent']),
+ endpoint=dict(required=True),
+ application_key=dict(required=True, no_log=True),
+ application_secret=dict(required=True, no_log=True),
+ consumer_key=dict(required=True, no_log=True),
+ timeout=dict(default=120, type='int')
+ )
+ )
+
+ if not HAS_OVH:
+ module.fail_json(msg='ovh-api python module'
+ 'is required to run this module ')
+
+ # Get parameters
+ name = module.params.get('name')
+ state = module.params.get('state')
+ backend = module.params.get('backend')
+ weight = module.params.get('weight')
+ probe = module.params.get('probe')
+ timeout = module.params.get('timeout')
+
+ # Connect to OVH API
+ client = getOvhClient(module)
+
+ # Check that the load balancing exists
+ try:
+ loadBalancings = client.get('/ip/loadBalancing')
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for getting the list of loadBalancing, '
+ 'check application key, secret, consumerkey and parameters. '
+ 'Error returned by OVH api was : {0}'.format(apiError))
+
+ if name not in loadBalancings:
+ module.fail_json(msg='IP LoadBalancing {0} does not exist'.format(name))
+
+ # Check that no task is pending before going on
+ try:
+ if not waitForNoTask(client, name, timeout):
+ module.fail_json(
+ msg='Timeout of {0} seconds while waiting for no pending '
+ 'tasks before executing the module '.format(timeout))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for getting the list of pending tasks '
+ 'of the loadBalancing, check application key, secret, consumerkey '
+ 'and parameters. Error returned by OVH api was : {0}'
+ .format(apiError))
+
+ try:
+ backends = client.get('/ip/loadBalancing/{0}/backend'.format(name))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for getting the list of backends '
+ 'of the loadBalancing, check application key, secret, consumerkey '
+ 'and parameters. Error returned by OVH api was : {0}'
+ .format(apiError))
+
+ backendExists = backend in backends
+ moduleChanged = False
+ if state == "absent":
+ if backendExists:
+ # Remove backend
+ try:
+ client.delete(
+ '/ip/loadBalancing/{0}/backend/{1}'.format(name, backend))
+ if not waitForNoTask(client, name, timeout):
+ module.fail_json(
+ msg='Timeout of {0} seconds while waiting for completion '
+ 'of removing backend task'.format(timeout))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for deleting the backend, '
+ 'check application key, secret, consumerkey and '
+ 'parameters. Error returned by OVH api was : {0}'
+ .format(apiError))
+ moduleChanged = True
+ else:
+ if backendExists:
+ # Get properties
+ try:
+ backendProperties = client.get(
+ '/ip/loadBalancing/{0}/backend/{1}'.format(name, backend))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for getting the backend properties, '
+ 'check application key, secret, consumerkey and '
+ 'parameters. Error returned by OVH api was : {0}'
+ .format(apiError))
+
+ if (backendProperties['weight'] != weight):
+ # Change weight
+ try:
+ client.post(
+ '/ip/loadBalancing/{0}/backend/{1}/setWeight'
+ .format(name, backend), weight=weight)
+ if not waitForNoTask(client, name, timeout):
+ module.fail_json(
+ msg='Timeout of {0} seconds while waiting for completion '
+ 'of setWeight to backend task'
+ .format(timeout))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for updating the weight of the '
+ 'backend, check application key, secret, consumerkey '
+ 'and parameters. Error returned by OVH api was : {0}'
+ .format(apiError))
+ moduleChanged = True
+
+ if (backendProperties['probe'] != probe):
+ # Change probe
+ backendProperties['probe'] = probe
+ try:
+ client.put(
+ '/ip/loadBalancing/{0}/backend/{1}'
+ .format(name, backend), probe=probe)
+ if not waitForNoTask(client, name, timeout):
+ module.fail_json(
+ msg='Timeout of {0} seconds while waiting for completion of '
+ 'setProbe to backend task'
+ .format(timeout))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for updating the probe of '
+ 'the backend, check application key, secret, '
+ 'consumerkey and parameters. Error returned by OVH api '
+ 'was : {0}'
+ .format(apiError))
+ moduleChanged = True
+
+ else:
+ # Creates backend
+ try:
+ try:
+ client.post('/ip/loadBalancing/{0}/backend'.format(name),
+ ipBackend=backend, probe=probe, weight=weight)
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for creating the backend, check '
+ 'application key, secret, consumerkey and parameters. '
+ 'Error returned by OVH api was : {0}'
+ .format(apiError))
+
+ if not waitForNoTask(client, name, timeout):
+ module.fail_json(
+ msg='Timeout of {0} seconds while waiting for completion of '
+ 'backend creation task'.format(timeout))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for creating the backend, check '
+ 'application key, secret, consumerkey and parameters. '
+ 'Error returned by OVH api was : {0}'.format(apiError))
+ moduleChanged = True
+
+ module.exit_json(changed=moduleChanged)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovh/ovh_monthly_billing.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovh/ovh_monthly_billing.py
new file mode 100644
index 00000000..75c70a79
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovh/ovh_monthly_billing.py
@@ -0,0 +1,157 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Francois Lallart (@fraff)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovh_monthly_billing
+author: Francois Lallart (@fraff)
+version_added: '0.2.0'
+short_description: Manage OVH monthly billing
+description:
+ - Enable monthly billing on OVH cloud intances (be aware OVH does not allow to disable it).
+requirements: [ "ovh" ]
+options:
+ project_id:
+ required: true
+ type: str
+ description:
+ - ID of the project, get it with U(https://api.ovh.com/console/#/cloud/project#GET)
+ instance_id:
+ required: true
+ type: str
+ description:
+ - ID of the instance, get it with U(https://api.ovh.com/console/#/cloud/project/%7BserviceName%7D/instance#GET)
+ endpoint:
+ type: str
+ description:
+ - The endpoint to use (for instance ovh-eu)
+ application_key:
+ type: str
+ description:
+ - The applicationKey to use
+ application_secret:
+ type: str
+ description:
+ - The application secret to use
+ consumer_key:
+ type: str
+ description:
+ - The consumer key to use
+'''
+
+EXAMPLES = '''
+- name: Basic usage, using auth from /etc/ovh.conf
+ community.general.ovh_monthly_billing:
+ project_id: 0c727a20aa144485b70c44dee9123b46
+ instance_id: 8fa89ad2-8f08-4220-9fa4-9695ea23e948
+
+# Get openstack cloud ID and instance ID, OVH use them in its API
+- name: Get openstack cloud ID and instance ID
+ os_server_info:
+ cloud: myProjectName
+ region_name: myRegionName
+ server: myServerName
+ register: openstack_servers
+
+- name: Use IDs
+ community.general.ovh_monthly_billing:
+ project_id: "{{ openstack_servers.0.tenant_id }}"
+ instance_id: "{{ openstack_servers.0.id }}"
+ application_key: yourkey
+ application_secret: yoursecret
+ consumer_key: yourconsumerkey
+'''
+
+RETURN = '''
+'''
+
+import os
+import sys
+import traceback
+
+try:
+ import ovh
+ import ovh.exceptions
+ from ovh.exceptions import APIError
+ HAS_OVH = True
+except ImportError:
+ HAS_OVH = False
+ OVH_IMPORT_ERROR = traceback.format_exc()
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ project_id=dict(required=True),
+ instance_id=dict(required=True),
+ endpoint=dict(required=False),
+ application_key=dict(required=False, no_log=True),
+ application_secret=dict(required=False, no_log=True),
+ consumer_key=dict(required=False, no_log=True),
+ ),
+ supports_check_mode=True
+ )
+
+ # Get parameters
+ project_id = module.params.get('project_id')
+ instance_id = module.params.get('instance_id')
+ endpoint = module.params.get('endpoint')
+ application_key = module.params.get('application_key')
+ application_secret = module.params.get('application_secret')
+ consumer_key = module.params.get('consumer_key')
+ project = ""
+ instance = ""
+ ovh_billing_status = ""
+
+ if not HAS_OVH:
+ module.fail_json(msg='python-ovh is required to run this module, see https://github.com/ovh/python-ovh')
+
+ # Connect to OVH API
+ client = ovh.Client(
+ endpoint=endpoint,
+ application_key=application_key,
+ application_secret=application_secret,
+ consumer_key=consumer_key
+ )
+
+ # Check that the instance exists
+ try:
+ project = client.get('/cloud/project/{0}'.format(project_id))
+ except ovh.exceptions.ResourceNotFoundError:
+ module.fail_json(msg='project {0} does not exist'.format(project_id))
+
+ # Check that the instance exists
+ try:
+ instance = client.get('/cloud/project/{0}/instance/{1}'.format(project_id, instance_id))
+ except ovh.exceptions.ResourceNotFoundError:
+ module.fail_json(msg='instance {0} does not exist in project {1}'.format(instance_id, project_id))
+
+ # Is monthlyBilling already enabled or pending ?
+ if instance['monthlyBilling'] is not None:
+ if instance['monthlyBilling']['status'] in ['ok', 'activationPending']:
+ module.exit_json(changed=False, ovh_billing_status=instance['monthlyBilling'])
+
+ if module.check_mode:
+ module.exit_json(changed=True, msg="Dry Run!")
+
+ try:
+ ovh_billing_status = client.post('/cloud/project/{0}/instance/{1}/activeMonthlyBilling'.format(project_id, instance_id))
+ module.exit_json(changed=True, ovh_billing_status=ovh_billing_status['monthlyBilling'])
+ except APIError as apiError:
+ module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
+
+ # We should never reach here
+ module.fail_json(msg='Internal ovh_monthly_billing module error')
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_affinity_label_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_affinity_label_facts.py
new file mode 100644
index 00000000..e560e13e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_affinity_label_facts.py
@@ -0,0 +1,196 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_affinity_label_facts
+short_description: Retrieve information about one or more oVirt/RHV affinity labels
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_affinity_label_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV affinity labels."
+notes:
+ - "This module returns a variable C(ovirt_affinity_labels), which
+ contains a list of affinity labels. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ name:
+ description:
+ - "Name of the affinity labels which should be listed."
+ vm:
+ description:
+ - "Name of the VM, which affinity labels should be listed."
+ host:
+ description:
+ - "Name of the host, which affinity labels should be listed."
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all affinity labels, which names start with label
+ ovirt_affinity_label_info:
+ name: label*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_affinity_labels }}"
+
+- name: >
+ Gather information about all affinity labels, which are assigned to VMs
+ which names start with postgres
+ ovirt_affinity_label_info:
+ vm: postgres*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_affinity_labels }}"
+
+- name: >
+ Gather information about all affinity labels, which are assigned to hosts
+ which names start with west
+ ovirt_affinity_label_info:
+ host: west*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_affinity_labels }}"
+
+- name: >
+ Gather information about all affinity labels, which are assigned to hosts
+ which names start with west or VMs which names start with postgres
+ ovirt_affinity_label_info:
+ host: west*
+ vm: postgres*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_affinity_labels }}"
+'''
+
+RETURN = '''
+ovirt_affinity_labels:
+ description: "List of dictionaries describing the affinity labels. Affinity labels attributes are mapped to dictionary keys,
+ all affinity labels attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/affinity_label."
+ returned: On success.
+ type: list
+'''
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ search_by_name,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ name=dict(default=None),
+ host=dict(default=None),
+ vm=dict(default=None),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_affinity_label_facts', 'community.general.ovirt_affinity_label_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_affinity_label_facts' module has been renamed to 'ovirt_affinity_label_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ affinity_labels_service = connection.system_service().affinity_labels_service()
+ labels = []
+ all_labels = affinity_labels_service.list()
+ if module.params['name']:
+ labels.extend([
+ l for l in all_labels
+ if fnmatch.fnmatch(l.name, module.params['name'])
+ ])
+ if module.params['host']:
+ hosts_service = connection.system_service().hosts_service()
+ if search_by_name(hosts_service, module.params['host']) is None:
+ raise Exception("Host '%s' was not found." % module.params['host'])
+ labels.extend([
+ label
+ for label in all_labels
+ for host in connection.follow_link(label.hosts)
+ if fnmatch.fnmatch(hosts_service.service(host.id).get().name, module.params['host'])
+ ])
+ if module.params['vm']:
+ vms_service = connection.system_service().vms_service()
+ if search_by_name(vms_service, module.params['vm']) is None:
+ raise Exception("Vm '%s' was not found." % module.params['vm'])
+ labels.extend([
+ label
+ for label in all_labels
+ for vm in connection.follow_link(label.vms)
+ if fnmatch.fnmatch(vms_service.service(vm.id).get().name, module.params['vm'])
+ ])
+
+ if not (module.params['vm'] or module.params['host'] or module.params['name']):
+ labels = all_labels
+
+ result = dict(
+ ovirt_affinity_labels=[
+ get_dict_of_struct(
+ struct=l,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for l in labels
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_api_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_api_facts.py
new file mode 100644
index 00000000..4085a702
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_api_facts.py
@@ -0,0 +1,98 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ovirt_api_facts
+short_description: Retrieve information about the oVirt/RHV API
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_api_info) instead.
+description:
+ - "Retrieve information about the oVirt/RHV API."
+notes:
+ - "This module returns a variable C(ovirt_api),
+ which contains a information about oVirt/RHV API. You need to register the result with
+ the I(register) keyword to use it."
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information oVirt API
+ ovirt_api_info:
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_api }}"
+'''
+
+RETURN = '''
+ovirt_api:
+ description: "Dictionary describing the oVirt API information.
+ Api attributes are mapped to dictionary keys,
+ all API attributes can be found at following
+ url: https://ovirt.example.com/ovirt-engine/api/model#types/api."
+ returned: On success.
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec()
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_api_facts', 'community.general.ovirt_api_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_api_facts' module has been renamed to 'ovirt_api_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ api = connection.system_service().get()
+ result = dict(
+ ovirt_api=get_dict_of_struct(
+ struct=api,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ )
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_cluster_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_cluster_facts.py
new file mode 100644
index 00000000..e4916a26
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_cluster_facts.py
@@ -0,0 +1,125 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_cluster_facts
+short_description: Retrieve information about one or more oVirt/RHV clusters
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_cluster_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV clusters."
+notes:
+ - "This module returns a variable C(ovirt_clusters), which
+ contains a list of clusters. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search cluster X from datacenter Y use following pattern:
+ name=X and datacenter=Y"
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all clusters which names start with production
+ ovirt_cluster_info:
+ pattern:
+ name: 'production*'
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_clusters }}"
+'''
+
+RETURN = '''
+ovirt_clusters:
+ description: "List of dictionaries describing the clusters. Cluster attributes are mapped to dictionary keys,
+ all clusters attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/cluster."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_cluster_facts', 'community.general.ovirt_cluster_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_cluster_facts' module has been renamed to 'ovirt_cluster_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ clusters_service = connection.system_service().clusters_service()
+ clusters = clusters_service.list(search=module.params['pattern'])
+ result = dict(
+ ovirt_clusters=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in clusters
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_datacenter_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_datacenter_facts.py
new file mode 100644
index 00000000..0de72729
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_datacenter_facts.py
@@ -0,0 +1,108 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_datacenter_facts
+short_description: Retrieve information about one or more oVirt/RHV datacenters
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_datacenter_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV datacenters."
+notes:
+ - "This module returns a variable C(ovirt_datacenters), which
+ contains a list of datacenters. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search datacenter I(X) use following pattern: I(name=X)"
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all data centers which names start with production
+ ovirt_datacenter_info:
+ pattern: name=production*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_datacenters }}"
+'''
+
+RETURN = '''
+ovirt_datacenters:
+ description: "List of dictionaries describing the datacenters. Datacenter attributes are mapped to dictionary keys,
+ all datacenters attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/data_center."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_datacenter_facts', 'community.general.ovirt_datacenter_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_datacenter_facts' module has been renamed to 'ovirt_datacenter_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ datacenters_service = connection.system_service().data_centers_service()
+ datacenters = datacenters_service.list(search=module.params['pattern'])
+ result = dict(
+ ovirt_datacenters=[
+ get_dict_of_struct(
+ struct=d,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for d in datacenters
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_disk_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_disk_facts.py
new file mode 100644
index 00000000..6e0c9f69
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_disk_facts.py
@@ -0,0 +1,125 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2017 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_disk_facts
+short_description: Retrieve information about one or more oVirt/RHV disks
+author: "Katerina Koukiou (@KKoukiou)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_disk_info) instead
+description:
+ - "Retrieve information about one or more oVirt/RHV disks."
+notes:
+ - "This module returns a variable C(ovirt_disks), which
+ contains a list of disks. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search Disk X from storage Y use following pattern:
+ name=X and storage.name=Y"
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all Disks which names start with centos
+ ovirt_disk_info:
+ pattern: name=centos*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_disks }}"
+'''
+
+RETURN = '''
+ovirt_disks:
+ description: "List of dictionaries describing the Disks. Disk attributes are mapped to dictionary keys,
+ all Disks attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/disk."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_disk_facts', 'community.general.ovirt_disk_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_disk_facts' module has been renamed to 'ovirt_disk_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ disks_service = connection.system_service().disks_service()
+ disks = disks_service.list(
+ search=module.params['pattern'],
+ )
+ result = dict(
+ ovirt_disks=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in disks
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_event_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_event_facts.py
new file mode 100644
index 00000000..50a20654
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_event_facts.py
@@ -0,0 +1,170 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_event_facts
+short_description: This module can be used to retrieve information about one or more oVirt/RHV events
+author: "Chris Keller (@nasx)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_event_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV events."
+options:
+ case_sensitive:
+ description:
+ - "Indicates if the search performed using the search parameter should be performed taking case
+ into account. The default value is true, which means that case is taken into account. If you
+ want to search ignoring case set it to false."
+ required: false
+ default: true
+ type: bool
+
+ from_:
+ description:
+ - "Indicates the event index after which events should be returned. The indexes of events are
+ strictly increasing, so when this parameter is used only the events with greater indexes
+ will be returned."
+ required: false
+ type: int
+
+ max:
+ description:
+ - "Sets the maximum number of events to return. If not specified all the events are returned."
+ required: false
+ type: int
+
+ search:
+ description:
+ - "Search term which is accepted by the oVirt/RHV API."
+ - "For example to search for events of severity alert use the following pattern: severity=alert"
+ required: false
+ type: str
+
+ headers:
+ description:
+ - "Additional HTTP headers."
+ required: false
+ type: str
+
+ query:
+ description:
+ - "Additional URL query parameters."
+ required: false
+ type: str
+
+ wait:
+ description:
+ - "If True wait for the response."
+ required: false
+ default: true
+ type: bool
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain the auth parameter for simplicity,
+# look at the ovirt_auth module to see how to reuse authentication.
+
+- name: Return all events
+ ovirt_event_info:
+ register: result
+
+- name: Return the last 10 events
+ ovirt_event_info:
+ max: 10
+ register: result
+
+- name: Return all events of type alert
+ ovirt_event_info:
+ search: "severity=alert"
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_events }}"
+'''
+
+RETURN = '''
+ovirt_events:
+ description: "List of dictionaries describing the events. Event attributes are mapped to dictionary keys.
+ All event attributes can be found at the following url:
+ http://ovirt.github.io/ovirt-engine-api-model/master/#types/event"
+ returned: On success."
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ case_sensitive=dict(default=True, type='bool', required=False),
+ from_=dict(default=None, type='int', required=False),
+ max=dict(default=None, type='int', required=False),
+ search=dict(default='', required=False),
+ headers=dict(default='', required=False),
+ query=dict(default='', required=False),
+ wait=dict(default=True, type='bool', required=False)
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_event_facts', 'community.general.ovirt_event_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_event_facts' module has been renamed to 'ovirt_event_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ events_service = connection.system_service().events_service()
+ events = events_service.list(
+ case_sensitive=module.params['case_sensitive'],
+ from_=module.params['from_'],
+ max=module.params['max'],
+ search=module.params['search'],
+ headers=module.params['headers'],
+ query=module.params['query'],
+ wait=module.params['wait']
+ )
+
+ result = dict(
+ ovirt_events=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in events
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_external_provider_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_external_provider_facts.py
new file mode 100644
index 00000000..f9ac8b97
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_external_provider_facts.py
@@ -0,0 +1,165 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_external_provider_facts
+short_description: Retrieve information about one or more oVirt/RHV external providers
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_external_provider_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV external providers."
+notes:
+ - "This module returns a variable C(ovirt_external_providers), which
+ contains a list of external_providers. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ type:
+ description:
+ - "Type of the external provider."
+ choices: ['os_image', 'os_network', 'os_volume', 'foreman']
+ required: true
+ type: str
+ name:
+ description:
+ - "Name of the external provider, can be used as glob expression."
+ type: str
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all image external providers named glance
+ ovirt_external_provider_info:
+ type: os_image
+ name: glance
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_external_providers }}"
+'''
+
+RETURN = '''
+ovirt_external_providers:
+ description:
+ - "List of dictionaries. Content depends on I(type)."
+ - "For type C(foreman), attributes appearing in the dictionary can be found on your oVirt/RHV instance
+ at the following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/external_host_provider."
+ - "For type C(os_image), attributes appearing in the dictionary can be found on your oVirt/RHV instance
+ at the following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/openstack_image_provider."
+ - "For type C(os_volume), attributes appearing in the dictionary can be found on your oVirt/RHV instance
+ at the following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/openstack_volume_provider."
+ - "For type C(os_network), attributes appearing in the dictionary can be found on your oVirt/RHV instance
+ at the following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/openstack_network_provider."
+ returned: On success
+ type: list
+'''
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def _external_provider_service(provider_type, system_service):
+ if provider_type == 'os_image':
+ return system_service.openstack_image_providers_service()
+ elif provider_type == 'os_network':
+ return system_service.openstack_network_providers_service()
+ elif provider_type == 'os_volume':
+ return system_service.openstack_volume_providers_service()
+ elif provider_type == 'foreman':
+ return system_service.external_host_providers_service()
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ name=dict(default=None, required=False),
+ type=dict(
+ required=True,
+ choices=['os_image', 'os_network', 'os_volume', 'foreman'],
+ aliases=['provider'],
+ ),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_external_provider_facts', 'community.general.ovirt_external_provider_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_external_provider_facts' module has been renamed to 'ovirt_external_provider_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ external_providers_service = _external_provider_service(
+ provider_type=module.params.pop('type'),
+ system_service=connection.system_service(),
+ )
+ if module.params['name']:
+ external_providers = [
+ e for e in external_providers_service.list()
+ if fnmatch.fnmatch(e.name, module.params['name'])
+ ]
+ else:
+ external_providers = external_providers_service.list()
+
+ result = dict(
+ ovirt_external_providers=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in external_providers
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_group_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_group_facts.py
new file mode 100644
index 00000000..40b037f4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_group_facts.py
@@ -0,0 +1,123 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_group_facts
+short_description: Retrieve information about one or more oVirt/RHV groups
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_group_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV groups."
+notes:
+ - "This module returns a variable C(ovirt_groups), which
+ contains a list of groups. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search group X use following pattern: name=X"
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all groups which names start with admin
+ ovirt_group_info:
+ pattern: name=admin*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_groups }}"
+'''
+
+RETURN = '''
+ovirt_groups:
+ description: "List of dictionaries describing the groups. Group attributes are mapped to dictionary keys,
+ all groups attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/group."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_group_facts', 'community.general.ovirt_group_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_group_facts' module has been renamed to 'ovirt_group_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ groups_service = connection.system_service().groups_service()
+ groups = groups_service.list(search=module.params['pattern'])
+ result = dict(
+ ovirt_groups=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in groups
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_host_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_host_facts.py
new file mode 100644
index 00000000..ea585e90
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_host_facts.py
@@ -0,0 +1,149 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_host_facts
+short_description: Retrieve information about one or more oVirt/RHV hosts
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_host_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV hosts."
+notes:
+ - "This module returns a variable C(ovirt_hosts), which
+ contains a list of hosts. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search host X from datacenter Y use following pattern:
+ name=X and datacenter=Y"
+ all_content:
+ description:
+ - "If I(true) all the attributes of the hosts should be
+ included in the response."
+ default: False
+ type: bool
+ cluster_version:
+ description:
+ - "Filter the hosts based on the cluster version."
+ type: str
+
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all hosts which names start with host and belong to data center west
+ ovirt_host_info:
+ pattern: name=host* and datacenter=west
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_hosts }}"
+
+- name: Gather information about all hosts with cluster version 4.2
+ ovirt_host_info:
+ pattern: name=host*
+ cluster_version: "4.2"
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_hosts }}"
+'''
+
+RETURN = '''
+ovirt_hosts:
+ description: "List of dictionaries describing the hosts. Host attributes are mapped to dictionary keys,
+ all hosts attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/host."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def get_filtered_hosts(cluster_version, hosts, connection):
+ # Filtering by cluster version returns only those which have same cluster version as input
+ filtered_hosts = []
+ for host in hosts:
+ cluster = connection.follow_link(host.cluster)
+ cluster_version_host = str(cluster.version.major) + '.' + str(cluster.version.minor)
+ if cluster_version_host == cluster_version:
+ filtered_hosts.append(host)
+ return filtered_hosts
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ all_content=dict(default=False, type='bool'),
+ cluster_version=dict(default=None, type='str'),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_host_facts', 'community.general.ovirt_host_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_host_facts' module has been renamed to 'ovirt_host_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ hosts_service = connection.system_service().hosts_service()
+ hosts = hosts_service.list(
+ search=module.params['pattern'],
+ all_content=module.params['all_content']
+ )
+ cluster_version = module.params.get('cluster_version')
+ if cluster_version is not None:
+ hosts = get_filtered_hosts(cluster_version, hosts, connection)
+ result = dict(
+ ovirt_hosts=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in hosts
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_host_storage_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_host_storage_facts.py
new file mode 100644
index 00000000..62af3e4b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_host_storage_facts.py
@@ -0,0 +1,187 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2017 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_host_storage_facts
+short_description: Retrieve information about one or more oVirt/RHV HostStorages (applicable only for block storage)
+author: "Daniel Erez (@derez)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_host_storage_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV HostStorages (applicable only for block storage)."
+options:
+ host:
+ description:
+ - "Host to get device list from."
+ required: true
+ iscsi:
+ description:
+ - "Dictionary with values for iSCSI storage type:"
+ suboptions:
+ address:
+ description:
+ - "Address of the iSCSI storage server."
+ target:
+ description:
+ - "The target IQN for the storage device."
+ username:
+ description:
+ - "A CHAP user name for logging into a target."
+ password:
+ description:
+ - "A CHAP password for logging into a target."
+ portal:
+ description:
+ - "The portal being used to connect with iscsi."
+ fcp:
+ description:
+ - "Dictionary with values for fibre channel storage type:"
+ suboptions:
+ address:
+ description:
+ - "Address of the fibre channel storage server."
+ port:
+ description:
+ - "Port of the fibre channel storage server."
+ lun_id:
+ description:
+ - "LUN id."
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about HostStorages with specified target and address
+ ovirt_host_storage_info:
+ host: myhost
+ iscsi:
+ target: iqn.2016-08-09.domain-01:nickname
+ address: 10.34.63.204
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_host_storages }}"
+'''
+
+RETURN = '''
+ovirt_host_storages:
+ description: "List of dictionaries describing the HostStorage. HostStorage attributes are mapped to dictionary keys,
+ all HostStorage attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/host_storage."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ get_id_by_name,
+)
+
+
+def _login(host_service, iscsi):
+ host_service.iscsi_login(
+ iscsi=otypes.IscsiDetails(
+ username=iscsi.get('username'),
+ password=iscsi.get('password'),
+ address=iscsi.get('address'),
+ target=iscsi.get('target'),
+ portal=iscsi.get('portal')
+ ),
+ )
+
+
+def _get_storage_type(params):
+ for sd_type in ['iscsi', 'fcp']:
+ if params.get(sd_type) is not None:
+ return sd_type
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ host=dict(required=True),
+ iscsi=dict(default=None, type='dict'),
+ fcp=dict(default=None, type='dict'),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_host_storage_facts', 'community.general.ovirt_host_storage_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_host_storage_facts' module has been renamed to 'ovirt_host_storage_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+
+ # Get Host
+ hosts_service = connection.system_service().hosts_service()
+ host_id = get_id_by_name(hosts_service, module.params['host'])
+ storage_type = _get_storage_type(module.params)
+ host_service = hosts_service.host_service(host_id)
+
+ if storage_type == 'iscsi':
+ # Login
+ iscsi = module.params.get('iscsi')
+ _login(host_service, iscsi)
+
+ # Get LUNs exposed from the specified target
+ host_storages = host_service.storage_service().list()
+
+ if storage_type == 'iscsi':
+ filterred_host_storages = [host_storage for host_storage in host_storages
+ if host_storage.type == otypes.StorageType.ISCSI]
+ if 'target' in iscsi:
+ filterred_host_storages = [host_storage for host_storage in filterred_host_storages
+ if iscsi.get('target') == host_storage.logical_units[0].target]
+ elif storage_type == 'fcp':
+ filterred_host_storages = [host_storage for host_storage in host_storages
+ if host_storage.type == otypes.StorageType.FCP]
+
+ result = dict(
+ ovirt_host_storages=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in filterred_host_storages
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_network_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_network_facts.py
new file mode 100644
index 00000000..781dd858
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_network_facts.py
@@ -0,0 +1,125 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_network_facts
+short_description: Retrieve information about one or more oVirt/RHV networks
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_network_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV networks."
+notes:
+ - "This module returns a variable C(ovirt_networks), which
+ contains a list of networks. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search network starting with string vlan1 use: name=vlan1*"
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all networks which names start with vlan1
+ ovirt_network_info:
+ pattern: name=vlan1*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_networks }}"
+'''
+
+
+RETURN = '''
+ovirt_networks:
+ description: "List of dictionaries describing the networks. Network attributes are mapped to dictionary keys,
+ all networks attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/network."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_network_facts', 'community.general.ovirt_network_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_network_facts' module has been renamed to 'ovirt_network_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ networks_service = connection.system_service().networks_service()
+ networks = networks_service.list(search=module.params['pattern'])
+ result = dict(
+ ovirt_networks=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in networks
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_nic_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_nic_facts.py
new file mode 100644
index 00000000..2cc1194f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_nic_facts.py
@@ -0,0 +1,143 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_nic_facts
+short_description: Retrieve information about one or more oVirt/RHV virtual machine network interfaces
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_nic_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV virtual machine network interfaces."
+notes:
+ - "This module returns a variable C(ovirt_nics), which
+ contains a list of NICs. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ vm:
+ description:
+ - "Name of the VM where NIC is attached."
+ required: true
+ name:
+ description:
+ - "Name of the NIC, can be used as glob expression."
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all NICs which names start with eth for VM named centos7
+ ovirt_nic_info:
+ vm: centos7
+ name: eth*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_nics }}"
+'''
+
+RETURN = '''
+ovirt_nics:
+ description: "List of dictionaries describing the network interfaces. NIC attributes are mapped to dictionary keys,
+ all NICs attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/nic."
+ returned: On success.
+ type: list
+'''
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ search_by_name,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ vm=dict(required=True),
+ name=dict(default=None),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_nic_facts', 'community.general.ovirt_nic_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_nic_facts' module has been renamed to 'ovirt_nic_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ vms_service = connection.system_service().vms_service()
+ vm_name = module.params['vm']
+ vm = search_by_name(vms_service, vm_name)
+ if vm is None:
+ raise Exception("VM '%s' was not found." % vm_name)
+
+ nics_service = vms_service.service(vm.id).nics_service()
+ if module.params['name']:
+ nics = [
+ e for e in nics_service.list()
+ if fnmatch.fnmatch(e.name, module.params['name'])
+ ]
+ else:
+ nics = nics_service.list()
+
+ result = dict(
+ ovirt_nics=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in nics
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_permission_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_permission_facts.py
new file mode 100644
index 00000000..52ba3624
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_permission_facts.py
@@ -0,0 +1,166 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_permission_facts
+short_description: Retrieve information about one or more oVirt/RHV permissions
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_permission_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV permissions."
+notes:
+ - "This module returns a variable C(ovirt_permissions), which
+ contains a list of permissions. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ user_name:
+ description:
+ - "Username of the user to manage. In most LDAPs it's I(uid) of the user, but in Active Directory you must specify I(UPN) of the user."
+ group_name:
+ description:
+ - "Name of the group to manage."
+ authz_name:
+ description:
+ - "Authorization provider of the user/group. In previous versions of oVirt/RHV known as domain."
+ required: true
+ aliases: ['domain']
+ namespace:
+ description:
+ - "Namespace of the authorization provider, where user/group resides."
+ required: false
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all permissions of user with username john
+ ovirt_permission_info:
+ user_name: john
+ authz_name: example.com-authz
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_permissions }}"
+'''
+
+RETURN = '''
+ovirt_permissions:
+ description: "List of dictionaries describing the permissions. Permission attributes are mapped to dictionary keys,
+ all permissions attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/permission."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+try:
+ import ovirtsdk4 as sdk
+except ImportError:
+ pass
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_link_name,
+ ovirt_info_full_argument_spec,
+ search_by_name,
+)
+
+
+def _permissions_service(connection, module):
+ if module.params['user_name']:
+ service = connection.system_service().users_service()
+ entity = next(
+ iter(
+ service.list(
+ search='usrname={0}'.format(
+ '{0}@{1}'.format(module.params['user_name'], module.params['authz_name'])
+ )
+ )
+ ),
+ None
+ )
+ else:
+ service = connection.system_service().groups_service()
+ entity = search_by_name(service, module.params['group_name'])
+
+ if entity is None:
+ raise Exception("User/Group wasn't found.")
+
+ return service.service(entity.id).permissions_service()
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ authz_name=dict(required=True, aliases=['domain']),
+ user_name=dict(default=None),
+ group_name=dict(default=None),
+ namespace=dict(default=None),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_permission_facts', 'community.general.ovirt_permission_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_permission_facts' module has been renamed to 'ovirt_permission_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ permissions_service = _permissions_service(connection, module)
+ permissions = []
+ for p in permissions_service.list():
+ newperm = dict()
+ for key, value in p.__dict__.items():
+ if value and isinstance(value, sdk.Struct):
+ newperm[key[1:]] = get_link_name(connection, value)
+ newperm['%s_id' % key[1:]] = value.id
+ permissions.append(newperm)
+
+ result = dict(ovirt_permissions=permissions)
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_quota_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_quota_facts.py
new file mode 100644
index 00000000..b2424305
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_quota_facts.py
@@ -0,0 +1,143 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_quota_facts
+short_description: Retrieve information about one or more oVirt/RHV quotas
+author: "Maor Lipchuk (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_quota_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV quotas."
+notes:
+ - "This module returns a variable C(ovirt_quotas), which
+ contains a list of quotas. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ data_center:
+ description:
+ - "Name of the datacenter where quota resides."
+ required: true
+ name:
+ description:
+ - "Name of the quota, can be used as glob expression."
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about quota named C<myquota> in Default datacenter
+ ovirt_quota_info:
+ data_center: Default
+ name: myquota
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_quotas }}"
+'''
+
+RETURN = '''
+ovirt_quotas:
+ description: "List of dictionaries describing the quotas. Quota attributes are mapped to dictionary keys,
+ all quotas attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/quota."
+ returned: On success.
+ type: list
+'''
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ search_by_name,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ data_center=dict(required=True),
+ name=dict(default=None),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_quota_facts', 'community.general.ovirt_quota_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_quota_facts' module has been renamed to 'ovirt_quota_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ datacenters_service = connection.system_service().data_centers_service()
+ dc_name = module.params['data_center']
+ dc = search_by_name(datacenters_service, dc_name)
+ if dc is None:
+ raise Exception("Datacenter '%s' was not found." % dc_name)
+
+ quotas_service = datacenters_service.service(dc.id).quotas_service()
+ if module.params['name']:
+ quotas = [
+ e for e in quotas_service.list()
+ if fnmatch.fnmatch(e.name, module.params['name'])
+ ]
+ else:
+ quotas = quotas_service.list()
+
+ result = dict(
+ ovirt_quotas=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in quotas
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_scheduling_policy_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_scheduling_policy_facts.py
new file mode 100644
index 00000000..eeaeb610
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_scheduling_policy_facts.py
@@ -0,0 +1,140 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2017 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_scheduling_policy_facts
+short_description: Retrieve information about one or more oVirt scheduling policies
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_scheduling_policy_info) instead.
+description:
+ - "Retrieve information about one or more oVirt scheduling policies."
+notes:
+ - "This module returns a variable C(ovirt_scheduling_policies),
+ which contains a list of scheduling policies. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ id:
+ description:
+ - "ID of the scheduling policy."
+ name:
+ description:
+ - "Name of the scheduling policy, can be used as glob expression."
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all scheduling policies with name InClusterUpgrade
+ ovirt_scheduling_policy_info:
+ name: InClusterUpgrade
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_scheduling_policies }}"
+'''
+
+RETURN = '''
+ovirt_scheduling_policies:
+ description: "List of dictionaries describing the scheduling policies.
+ Scheduling policies attributes are mapped to dictionary keys,
+ all scheduling policies attributes can be found at following
+ url: https://ovirt.example.com/ovirt-engine/api/model#types/scheduling_policy."
+ returned: On success.
+ type: list
+'''
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ id=dict(default=None),
+ name=dict(default=None),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_scheduling_policy_facts', 'community.general.ovirt_scheduling_policy_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_scheduling_policy_facts' module has been renamed to 'ovirt_scheduling_policy_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ system_service = connection.system_service()
+ sched_policies_service = system_service.scheduling_policies_service()
+ if module.params['name']:
+ sched_policies = [
+ e for e in sched_policies_service.list()
+ if fnmatch.fnmatch(e.name, module.params['name'])
+ ]
+ elif module.params['id']:
+ sched_policies = [
+ sched_policies_service.service(module.params['id']).get()
+ ]
+ else:
+ sched_policies = sched_policies_service.list()
+
+ result = dict(
+ ovirt_scheduling_policies=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in sched_policies
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_snapshot_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_snapshot_facts.py
new file mode 100644
index 00000000..73746883
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_snapshot_facts.py
@@ -0,0 +1,137 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_snapshot_facts
+short_description: Retrieve information about one or more oVirt/RHV virtual machine snapshots
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_snapshot_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV virtual machine snapshots."
+notes:
+ - "This module returns a variable C(ovirt_snapshots), which
+ contains a list of snapshots. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ vm:
+ description:
+ - "Name of the VM with snapshot."
+ required: true
+ description:
+ description:
+ - "Description of the snapshot, can be used as glob expression."
+ snapshot_id:
+ description:
+ - "Id of the snapshot we want to retrieve information about."
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all snapshots which description start with update for VM named centos7
+ ovirt_snapshot_info:
+ vm: centos7
+ description: update*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_snapshots }}"
+'''
+
+RETURN = '''
+ovirt_snapshots:
+ description: "List of dictionaries describing the snapshot. Snapshot attributes are mapped to dictionary keys,
+ all snapshot attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/snapshot."
+ returned: On success.
+ type: list
+'''
+
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ search_by_name,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ vm=dict(required=True),
+ description=dict(default=None),
+ snapshot_id=dict(default=None),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_snapshot_facts', 'community.general.ovirt_snapshot_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_snapshot_facts' module has been renamed to 'ovirt_snapshot_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ vms_service = connection.system_service().vms_service()
+ vm_name = module.params['vm']
+ vm = search_by_name(vms_service, vm_name)
+ if vm is None:
+ raise Exception("VM '%s' was not found." % vm_name)
+
+ snapshots_service = vms_service.service(vm.id).snapshots_service()
+ if module.params['description']:
+ snapshots = [
+ e for e in snapshots_service.list()
+ if fnmatch.fnmatch(e.description, module.params['description'])
+ ]
+ elif module.params['snapshot_id']:
+ snapshots = [
+ snapshots_service.snapshot_service(module.params['snapshot_id']).get()
+ ]
+ else:
+ snapshots = snapshots_service.list()
+
+ result = dict(
+ ovirt_snapshots=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in snapshots
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_storage_domain_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_storage_domain_facts.py
new file mode 100644
index 00000000..b9d814c1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_storage_domain_facts.py
@@ -0,0 +1,126 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_storage_domain_facts
+short_description: Retrieve information about one or more oVirt/RHV storage domains
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_storage_domain_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV storage domains."
+notes:
+ - "This module returns a variable C(ovirt_storage_domains), which
+ contains a list of storage domains. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search storage domain X from datacenter Y use following pattern:
+ name=X and datacenter=Y"
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: >
+ Gather information about all storage domains which names
+ start with data and belong to data center west
+ ovirt_storage_domain_info:
+ pattern: name=data* and datacenter=west
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_storage_domains }}"
+'''
+
+RETURN = '''
+ovirt_storage_domains:
+ description: "List of dictionaries describing the storage domains. Storage_domain attributes are mapped to dictionary keys,
+ all storage domains attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/storage_domain."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_storage_domain_facts', 'community.general.ovirt_storage_domain_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_storage_domain_facts' module has been renamed to 'ovirt_storage_domain_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ storage_domains_service = connection.system_service().storage_domains_service()
+ storage_domains = storage_domains_service.list(search=module.params['pattern'])
+ result = dict(
+ ovirt_storage_domains=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in storage_domains
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_storage_template_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_storage_template_facts.py
new file mode 100644
index 00000000..1c583278
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_storage_template_facts.py
@@ -0,0 +1,142 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2017 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_storage_template_facts
+short_description: Retrieve information about one or more oVirt/RHV templates relate to a storage domain.
+author: "Maor Lipchuk (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_storage_template_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV templates relate to a storage domain."
+notes:
+ - "This module returns a variable C(ovirt_storage_templates), which
+ contains a list of templates. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ unregistered:
+ description:
+ - "Flag which indicates whether to get unregistered templates which contain one or more
+ disks which reside on a storage domain or diskless templates."
+ type: bool
+ default: false
+ max:
+ description:
+ - "Sets the maximum number of templates to return. If not specified all the templates are returned."
+ storage_domain:
+ description:
+ - "The storage domain name where the templates should be listed."
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all templates which relate to a storage domain and are unregistered
+ ovirt_storage_template_info:
+ unregistered: yes
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_storage_templates }}"
+'''
+
+RETURN = '''
+ovirt_storage_templates:
+ description: "List of dictionaries describing the Templates. Template attributes are mapped to dictionary keys,
+ all Templates attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/template."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ get_id_by_name
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ storage_domain=dict(default=None),
+ max=dict(default=None, type='int'),
+ unregistered=dict(default=False, type='bool'),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_storage_template_facts', 'community.general.ovirt_storage_template_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_storage_template_facts' module has been renamed to 'ovirt_storage_template_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ storage_domains_service = connection.system_service().storage_domains_service()
+ sd_id = get_id_by_name(storage_domains_service, module.params['storage_domain'])
+ storage_domain_service = storage_domains_service.storage_domain_service(sd_id)
+ templates_service = storage_domain_service.templates_service()
+
+ # Find the unregistered Template we want to register:
+ if module.params.get('unregistered'):
+ templates = templates_service.list(unregistered=True)
+ else:
+ templates = templates_service.list(max=module.params['max'])
+ result = dict(
+ ovirt_storage_templates=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in templates
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_storage_vm_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_storage_vm_facts.py
new file mode 100644
index 00000000..d0247948
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_storage_vm_facts.py
@@ -0,0 +1,142 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2017 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_storage_vm_facts
+short_description: Retrieve information about one or more oVirt/RHV virtual machines relate to a storage domain.
+author: "Maor Lipchuk (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_storage_vm_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV virtual machines relate to a storage domain."
+notes:
+ - "This module returns a variable C(ovirt_storage_vms), which
+ contains a list of virtual machines. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ unregistered:
+ description:
+ - "Flag which indicates whether to get unregistered virtual machines which contain one or more
+ disks which reside on a storage domain or diskless virtual machines."
+ type: bool
+ default: false
+ max:
+ description:
+ - "Sets the maximum number of virtual machines to return. If not specified all the virtual machines are returned."
+ storage_domain:
+ description:
+ - "The storage domain name where the virtual machines should be listed."
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all VMs which relate to a storage domain and are unregistered
+ ovirt_vms_info:
+ unregistered: yes
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_storage_vms }}"
+'''
+
+RETURN = '''
+ovirt_storage_vms:
+ description: "List of dictionaries describing the VMs. VM attributes are mapped to dictionary keys,
+ all VMs attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/vm."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ get_id_by_name
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ storage_domain=dict(default=None),
+ max=dict(default=None, type='int'),
+ unregistered=dict(default=False, type='bool'),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_storage_vm_facts', 'community.general.ovirt_storage_vm_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_storage_vm_facts' module has been renamed to 'ovirt_storage_vm_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ storage_domains_service = connection.system_service().storage_domains_service()
+ sd_id = get_id_by_name(storage_domains_service, module.params['storage_domain'])
+ storage_domain_service = storage_domains_service.storage_domain_service(sd_id)
+ vms_service = storage_domain_service.vms_service()
+
+ # Find the unregistered VM we want to register:
+ if module.params.get('unregistered'):
+ vms = vms_service.list(unregistered=True)
+ else:
+ vms = vms_service.list()
+ result = dict(
+ ovirt_storage_vms=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in vms
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_tag_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_tag_facts.py
new file mode 100644
index 00000000..c6e9b744
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_tag_facts.py
@@ -0,0 +1,176 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_tag_facts
+short_description: Retrieve information about one or more oVirt/RHV tags
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_tag_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV tags."
+notes:
+ - "This module returns a variable C(ovirt_tags), which
+ contains a list of tags. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ name:
+ description:
+ - "Name of the tag which should be listed."
+ vm:
+ description:
+ - "Name of the VM, which tags should be listed."
+ host:
+ description:
+ - "Name of the host, which tags should be listed."
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all tags, which names start with tag
+ ovirt_tag_info:
+ name: tag*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_tags }}"
+
+- name: Gather information about all tags, which are assigned to VM postgres
+ ovirt_tag_info:
+ vm: postgres
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_tags }}"
+
+- name: Gather information about all tags, which are assigned to host west
+ ovirt_tag_info:
+ host: west
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_tags }}"
+'''
+
+RETURN = '''
+ovirt_tags:
+ description: "List of dictionaries describing the tags. Tags attributes are mapped to dictionary keys,
+ all tags attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/tag."
+ returned: On success.
+ type: list
+'''
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ search_by_name,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ name=dict(default=None),
+ host=dict(default=None),
+ vm=dict(default=None),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_tag_facts', 'community.general.ovirt_tag_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_tag_facts' module has been renamed to 'ovirt_tag_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ tags_service = connection.system_service().tags_service()
+ tags = []
+ all_tags = tags_service.list()
+ if module.params['name']:
+ tags.extend([
+ t for t in all_tags
+ if fnmatch.fnmatch(t.name, module.params['name'])
+ ])
+ if module.params['host']:
+ hosts_service = connection.system_service().hosts_service()
+ host = search_by_name(hosts_service, module.params['host'])
+ if host is None:
+ raise Exception("Host '%s' was not found." % module.params['host'])
+ tags.extend([
+ tag for tag in hosts_service.host_service(host.id).tags_service().list()
+ ])
+ if module.params['vm']:
+ vms_service = connection.system_service().vms_service()
+ vm = search_by_name(vms_service, module.params['vm'])
+ if vm is None:
+ raise Exception("Vm '%s' was not found." % module.params['vm'])
+ tags.extend([
+ tag for tag in vms_service.vm_service(vm.id).tags_service().list()
+ ])
+
+ if not (module.params['vm'] or module.params['host'] or module.params['name']):
+ tags = all_tags
+
+ result = dict(
+ ovirt_tags=[
+ get_dict_of_struct(
+ struct=t,
+ connection=connection,
+ fetch_nested=module.params['fetch_nested'],
+ attributes=module.params['nested_attributes'],
+ ) for t in tags
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_template_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_template_facts.py
new file mode 100644
index 00000000..7595c64a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_template_facts.py
@@ -0,0 +1,124 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_template_facts
+short_description: Retrieve information about one or more oVirt/RHV templates
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_template_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV templates."
+notes:
+ - "This module returns a variable C(ovirt_templates), which
+ contains a list of templates. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search template X from datacenter Y use following pattern:
+ name=X and datacenter=Y"
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all templates which names start with centos and belongs to data center west
+ ovirt_template_info:
+ pattern: name=centos* and datacenter=west
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_templates }}"
+'''
+
+RETURN = '''
+ovirt_templates:
+ description: "List of dictionaries describing the templates. Template attributes are mapped to dictionary keys,
+ all templates attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/template."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_template_facts', 'community.general.ovirt_template_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_template_facts' module has been renamed to 'ovirt_template_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ templates_service = connection.system_service().templates_service()
+ templates = templates_service.list(search=module.params['pattern'])
+ result = dict(
+ ovirt_templates=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in templates
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_user_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_user_facts.py
new file mode 100644
index 00000000..ce7ab8d6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_user_facts.py
@@ -0,0 +1,123 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_user_facts
+short_description: Retrieve information about one or more oVirt/RHV users
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_user_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV users."
+notes:
+ - "This module returns a variable C(ovirt_users), which
+ contains a list of users. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search user X use following pattern: name=X"
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all users which first names start with john
+ ovirt_user_info:
+ pattern: name=john*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_users }}"
+'''
+
+RETURN = '''
+ovirt_users:
+ description: "List of dictionaries describing the users. User attributes are mapped to dictionary keys,
+ all users attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/user."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_user_facts', 'community.general.ovirt_user_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_user_facts' module has been renamed to 'ovirt_user_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ users_service = connection.system_service().users_service()
+ users = users_service.list(search=module.params['pattern'])
+ result = dict(
+ ovirt_users=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in users
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_vm_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_vm_facts.py
new file mode 100644
index 00000000..a5182755
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_vm_facts.py
@@ -0,0 +1,166 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_vm_facts
+short_description: Retrieve information about one or more oVirt/RHV virtual machines
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_vm_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV virtual machines."
+notes:
+ - "This module returns a variable C(ovirt_vms), which
+ contains a list of virtual machines. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search VM X from cluster Y use following pattern:
+ name=X and cluster=Y"
+ all_content:
+ description:
+ - "If I(true) all the attributes of the virtual machines should be
+ included in the response."
+ type: bool
+ default: false
+ case_sensitive:
+ description:
+ - "If I(true) performed search will take case into account."
+ type: bool
+ default: true
+ max:
+ description:
+ - "The maximum number of results to return."
+ next_run:
+ description:
+ - "Indicates if the returned result describes the virtual machine as it is currently running or if describes
+ the virtual machine with the modifications that have already been performed but that will only come into
+ effect when the virtual machine is restarted. By default the value is set by engine."
+ type: bool
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all VMs which names start with centos and belong to cluster west
+ ovirt_vm_info:
+ pattern: name=centos* and cluster=west
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_vms }}"
+
+- name: Gather info about next run configuration of virtual machine named myvm
+ ovirt_vm_info:
+ pattern: name=myvm
+ next_run: true
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_vms[0] }}"
+'''
+
+RETURN = '''
+ovirt_vms:
+ description: "List of dictionaries describing the VMs. VM attributes are mapped to dictionary keys,
+ all VMs attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/vm."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ all_content=dict(default=False, type='bool'),
+ next_run=dict(default=None, type='bool'),
+ case_sensitive=dict(default=True, type='bool'),
+ max=dict(default=None, type='int'),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_vm_facts', 'community.general.ovirt_vm_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_vm_facts' module has been renamed to 'ovirt_vm_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ vms_service = connection.system_service().vms_service()
+ vms = vms_service.list(
+ search=module.params['pattern'],
+ all_content=module.params['all_content'],
+ case_sensitive=module.params['case_sensitive'],
+ max=module.params['max'],
+ )
+ if module.params['next_run']:
+ vms = [vms_service.vm_service(vm.id).get(next_run=True) for vm in vms]
+
+ result = dict(
+ ovirt_vms=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in vms
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py
new file mode 100644
index 00000000..24842be5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py
@@ -0,0 +1,123 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_vmpool_facts
+short_description: Retrieve information about one or more oVirt/RHV vmpools
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_vmpool_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV vmpools."
+notes:
+ - "This module returns a variable C(ovirt_vmpools), which
+ contains a list of vmpools. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search vmpool X: name=X"
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all vm pools which names start with centos
+ ovirt_vmpool_info:
+ pattern: name=centos*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_vm_pools }}"
+'''
+
+RETURN = '''
+ovirt_vm_pools:
+ description: "List of dictionaries describing the vmpools. Vm pool attributes are mapped to dictionary keys,
+ all vmpools attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/vm_pool."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_vmpool_facts', 'community.general.ovirt_vmpool_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_vmpool_facts' module has been renamed to 'ovirt_vmpool_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ vmpools_service = connection.system_service().vm_pools_service()
+ vmpools = vmpools_service.list(search=module.params['pattern'])
+ result = dict(
+ ovirt_vm_pools=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in vmpools
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_device.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_device.py
new file mode 100644
index 00000000..c76530f5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_device.py
@@ -0,0 +1,651 @@
+#!/usr/bin/python
+# (c) 2016, Tomas Karasek <tom.to.the.k@gmail.com>
+# (c) 2016, Matt Baldwin <baldwin@stackpointcloud.com>
+# (c) 2016, Thibaud Morel l'Horset <teebes@gmail.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: packet_device
+
+short_description: Manage a bare metal server in the Packet Host.
+
+description:
+ - Manage a bare metal server in the Packet Host (a "device" in the API terms).
+ - When the machine is created it can optionally wait for public IP address, or for active state.
+ - This module has a dependency on packet >= 1.0.
+ - API is documented at U(https://www.packet.net/developers/api/devices).
+
+
+author:
+ - Tomas Karasek (@t0mk) <tom.to.the.k@gmail.com>
+ - Matt Baldwin (@baldwinSPC) <baldwin@stackpointcloud.com>
+ - Thibaud Morel l'Horset (@teebes) <teebes@gmail.com>
+
+options:
+ auth_token:
+ description:
+ - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN).
+
+ count:
+ description:
+ - The number of devices to create. Count number can be included in hostname via the %d string formatter.
+ default: 1
+
+ count_offset:
+ description:
+ - From which number to start the count.
+ default: 1
+
+ device_ids:
+ description:
+ - List of device IDs on which to operate.
+
+ tags:
+ description:
+ - List of device tags.
+ - Currently implemented only for device creation.
+ type: list
+ elements: str
+ version_added: '0.2.0'
+
+ facility:
+ description:
+ - Facility slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/facilities/).
+
+ features:
+ description:
+ - Dict with "features" for device creation. See Packet API docs for details.
+
+ hostnames:
+ description:
+ - A hostname of a device, or a list of hostnames.
+ - If given string or one-item list, you can use the C("%d") Python string format to expand numbers from I(count).
+ - If only one hostname, it might be expanded to list if I(count)>1.
+ aliases: [name]
+
+ locked:
+ description:
+ - Whether to lock a created device.
+ default: false
+ aliases: [lock]
+ type: bool
+
+ operating_system:
+ description:
+ - OS slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/operatingsystems/).
+
+ plan:
+ description:
+ - Plan slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/plans/).
+
+ project_id:
+ description:
+ - ID of project of the device.
+ required: true
+
+ state:
+ description:
+ - Desired state of the device.
+ - If set to C(present) (the default), the module call will return immediately after the device-creating HTTP request successfully returns.
+ - If set to C(active), the module call will block until all the specified devices are in state active due to the Packet API, or until I(wait_timeout).
+ choices: [present, absent, active, inactive, rebooted]
+ default: present
+
+ user_data:
+ description:
+ - Userdata blob made available to the machine
+
+ wait_for_public_IPv:
+ description:
+ - Whether to wait for the instance to be assigned a public IPv4/IPv6 address.
+ - If set to 4, it will wait until IPv4 is assigned to the instance.
+ - If set to 6, wait until public IPv6 is assigned to the instance.
+ choices: [4,6]
+
+ wait_timeout:
+ description:
+ - How long (seconds) to wait either for automatic IP address assignment, or for the device to reach the C(active) I(state).
+ - If I(wait_for_public_IPv) is set and I(state) is C(active), the module will wait for both events consequently, applying the timeout twice.
+ default: 900
+ ipxe_script_url:
+ description:
+ - URL of custom iPXE script for provisioning.
+ - More about custom iPXE for Packet devices at U(https://help.packet.net/technical/infrastructure/custom-ipxe).
+ always_pxe:
+ description:
+ - Persist PXE as the first boot option.
+ - Normally, the PXE process happens only on the first boot. Set this arg to have your device continuously boot to iPXE.
+ default: false
+ type: bool
+
+
+requirements:
+ - "packet-python >= 1.35"
+
+notes:
+ - Doesn't support check mode.
+
+'''
+
+EXAMPLES = '''
+# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN.
+# You can also pass it to the auth_token parameter of the module instead.
+
+# Creating devices
+
+- name: Create 1 device
+ hosts: localhost
+ tasks:
+ - community.general.packet_device:
+ project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
+ hostnames: myserver
+ tags: ci-xyz
+ operating_system: ubuntu_16_04
+ plan: baremetal_0
+ facility: sjc1
+
+# Create the same device and wait until it is in state "active", (when it's
+# ready for other API operations). Fail if the device is not "active" in
+# 10 minutes.
+
+- name: Create device and wait up to 10 minutes for active state
+ hosts: localhost
+ tasks:
+ - community.general.packet_device:
+ project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
+ hostnames: myserver
+ operating_system: ubuntu_16_04
+ plan: baremetal_0
+ facility: sjc1
+ state: active
+ wait_timeout: 600
+
+- name: Create 3 ubuntu devices called server-01, server-02 and server-03
+ hosts: localhost
+ tasks:
+ - community.general.packet_device:
+ project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
+ hostnames: server-%02d
+ count: 3
+ operating_system: ubuntu_16_04
+ plan: baremetal_0
+ facility: sjc1
+
+- name: Create 3 coreos devices with userdata, wait until they get IPs and then wait for SSH
+ hosts: localhost
+ tasks:
+ - name: Create 3 devices and register their facts
+ community.general.packet_device:
+ hostnames: [coreos-one, coreos-two, coreos-three]
+ operating_system: coreos_stable
+ plan: baremetal_0
+ facility: ewr1
+ locked: true
+ project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
+ wait_for_public_IPv: 4
+ user_data: |
+ #cloud-config
+ ssh_authorized_keys:
+ - {{ lookup('file', 'my_packet_sshkey') }}
+ coreos:
+ etcd:
+ discovery: https://discovery.etcd.io/6a28e078895c5ec737174db2419bb2f3
+ addr: $private_ipv4:4001
+ peer-addr: $private_ipv4:7001
+ fleet:
+ public-ip: $private_ipv4
+ units:
+ - name: etcd.service
+ command: start
+ - name: fleet.service
+ command: start
+ register: newhosts
+
+ - name: Wait for ssh
+ ansible.builtin.wait_for:
+ delay: 1
+ host: "{{ item.public_ipv4 }}"
+ port: 22
+ state: started
+ timeout: 500
+ with_items: "{{ newhosts.devices }}"
+
+
+# Other states of devices
+
+- name: Remove 3 devices by uuid
+ hosts: localhost
+ tasks:
+ - community.general.packet_device:
+ project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
+ state: absent
+ device_ids:
+ - 1fb4faf8-a638-4ac7-8f47-86fe514c30d8
+ - 2eb4faf8-a638-4ac7-8f47-86fe514c3043
+ - 6bb4faf8-a638-4ac7-8f47-86fe514c301f
+'''
+
+RETURN = '''
+changed:
+ description: True if a device was altered in any way (created, modified or removed)
+ type: bool
+ sample: True
+ returned: success
+
+devices:
+ description: Information about each device that was processed
+ type: list
+ sample: '[{"hostname": "my-server.com", "id": "2a5122b9-c323-4d5c-b53c-9ad3f54273e7",
+ "public_ipv4": "147.229.15.12", "private-ipv4": "10.0.15.12",
+ "tags": [], "locked": false, "state": "provisioning",
+ "public_ipv6": ""2604:1380:2:5200::3"}]'
+ returned: success
+''' # NOQA
+
+
+import os
+import re
+import time
+import uuid
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+HAS_PACKET_SDK = True
+try:
+ import packet
+except ImportError:
+ HAS_PACKET_SDK = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+NAME_RE = r'({0}|{0}{1}*{0})'.format(r'[a-zA-Z0-9]', r'[a-zA-Z0-9\-]')
+HOSTNAME_RE = r'({0}\.)*{0}$'.format(NAME_RE)
+MAX_DEVICES = 100
+
+PACKET_DEVICE_STATES = (
+ 'queued',
+ 'provisioning',
+ 'failed',
+ 'powering_on',
+ 'active',
+ 'powering_off',
+ 'inactive',
+ 'rebooting',
+)
+
+PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN"
+
+
+ALLOWED_STATES = ['absent', 'active', 'inactive', 'rebooted', 'present']
+
+
+def serialize_device(device):
+ """
+ Standard representation for a device as returned by various tasks::
+
+ {
+ 'id': 'device_id'
+ 'hostname': 'device_hostname',
+ 'tags': [],
+ 'locked': false,
+ 'state': 'provisioning',
+ 'ip_addresses': [
+ {
+ "address": "147.75.194.227",
+ "address_family": 4,
+ "public": true
+ },
+ {
+ "address": "2604:1380:2:5200::3",
+ "address_family": 6,
+ "public": true
+ },
+ {
+ "address": "10.100.11.129",
+ "address_family": 4,
+ "public": false
+ }
+ ],
+ "private_ipv4": "10.100.11.129",
+ "public_ipv4": "147.75.194.227",
+ "public_ipv6": "2604:1380:2:5200::3",
+ }
+
+ """
+ device_data = {}
+ device_data['id'] = device.id
+ device_data['hostname'] = device.hostname
+ device_data['tags'] = device.tags
+ device_data['locked'] = device.locked
+ device_data['state'] = device.state
+ device_data['ip_addresses'] = [
+ {
+ 'address': addr_data['address'],
+ 'address_family': addr_data['address_family'],
+ 'public': addr_data['public'],
+ }
+ for addr_data in device.ip_addresses
+ ]
+ # Also include each IPs as a key for easier lookup in roles.
+ # Key names:
+ # - public_ipv4
+ # - public_ipv6
+ # - private_ipv4
+ # - private_ipv6 (if there is one)
+ for ipdata in device_data['ip_addresses']:
+ if ipdata['public']:
+ if ipdata['address_family'] == 6:
+ device_data['public_ipv6'] = ipdata['address']
+ elif ipdata['address_family'] == 4:
+ device_data['public_ipv4'] = ipdata['address']
+ elif not ipdata['public']:
+ if ipdata['address_family'] == 6:
+ # Packet doesn't give public ipv6 yet, but maybe one
+ # day they will
+ device_data['private_ipv6'] = ipdata['address']
+ elif ipdata['address_family'] == 4:
+ device_data['private_ipv4'] = ipdata['address']
+ return device_data
+
+
+def is_valid_hostname(hostname):
+ return re.match(HOSTNAME_RE, hostname) is not None
+
+
+def is_valid_uuid(myuuid):
+ try:
+ val = uuid.UUID(myuuid, version=4)
+ except ValueError:
+ return False
+ return str(val) == myuuid
+
+
+def listify_string_name_or_id(s):
+ if ',' in s:
+ return s.split(',')
+ else:
+ return [s]
+
+
+def get_hostname_list(module):
+ # hostname is a list-typed param, so I guess it should return list
+ # (and it does, in Ansible 2.2.1) but in order to be defensive,
+ # I keep here the code to convert an eventual string to list
+ hostnames = module.params.get('hostnames')
+ count = module.params.get('count')
+ count_offset = module.params.get('count_offset')
+ if isinstance(hostnames, str):
+ hostnames = listify_string_name_or_id(hostnames)
+ if not isinstance(hostnames, list):
+ raise Exception("name %s is not convertible to list" % hostnames)
+
+ # at this point, hostnames is a list
+ hostnames = [h.strip() for h in hostnames]
+
+ if (len(hostnames) > 1) and (count > 1):
+ _msg = ("If you set count>1, you should only specify one hostname "
+ "with the %d formatter, not a list of hostnames.")
+ raise Exception(_msg)
+
+ if (len(hostnames) == 1) and (count > 0):
+ hostname_spec = hostnames[0]
+ count_range = range(count_offset, count_offset + count)
+ if re.search(r"%\d{0,2}d", hostname_spec):
+ hostnames = [hostname_spec % i for i in count_range]
+ elif count > 1:
+ hostname_spec = '%s%%02d' % hostname_spec
+ hostnames = [hostname_spec % i for i in count_range]
+
+ for hn in hostnames:
+ if not is_valid_hostname(hn):
+ raise Exception("Hostname '%s' does not seem to be valid" % hn)
+
+ if len(hostnames) > MAX_DEVICES:
+ raise Exception("You specified too many hostnames, max is %d" %
+ MAX_DEVICES)
+ return hostnames
+
+
+def get_device_id_list(module):
+ device_ids = module.params.get('device_ids')
+
+ if isinstance(device_ids, str):
+ device_ids = listify_string_name_or_id(device_ids)
+
+ device_ids = [di.strip() for di in device_ids]
+
+ for di in device_ids:
+ if not is_valid_uuid(di):
+ raise Exception("Device ID '%s' does not seem to be valid" % di)
+
+ if len(device_ids) > MAX_DEVICES:
+ raise Exception("You specified too many devices, max is %d" %
+ MAX_DEVICES)
+ return device_ids
+
+
+def create_single_device(module, packet_conn, hostname):
+
+ for param in ('hostnames', 'operating_system', 'plan'):
+ if not module.params.get(param):
+ raise Exception("%s parameter is required for new device."
+ % param)
+ project_id = module.params.get('project_id')
+ plan = module.params.get('plan')
+ tags = module.params.get('tags')
+ user_data = module.params.get('user_data')
+ facility = module.params.get('facility')
+ operating_system = module.params.get('operating_system')
+ locked = module.params.get('locked')
+ ipxe_script_url = module.params.get('ipxe_script_url')
+ always_pxe = module.params.get('always_pxe')
+ if operating_system != 'custom_ipxe':
+ for param in ('ipxe_script_url', 'always_pxe'):
+ if module.params.get(param):
+ raise Exception('%s parameter is not valid for non custom_ipxe operating_system.' % param)
+
+ device = packet_conn.create_device(
+ project_id=project_id,
+ hostname=hostname,
+ tags=tags,
+ plan=plan,
+ facility=facility,
+ operating_system=operating_system,
+ userdata=user_data,
+ locked=locked,
+ ipxe_script_url=ipxe_script_url,
+ always_pxe=always_pxe)
+ return device
+
+
+def refresh_device_list(module, packet_conn, devices):
+ device_ids = [d.id for d in devices]
+ new_device_list = get_existing_devices(module, packet_conn)
+ return [d for d in new_device_list if d.id in device_ids]
+
+
+def wait_for_devices_active(module, packet_conn, watched_devices):
+ wait_timeout = module.params.get('wait_timeout')
+ wait_timeout = time.time() + wait_timeout
+ refreshed = watched_devices
+ while wait_timeout > time.time():
+ refreshed = refresh_device_list(module, packet_conn, watched_devices)
+ if all(d.state == 'active' for d in refreshed):
+ return refreshed
+ time.sleep(5)
+ raise Exception("Waiting for state \"active\" timed out for devices: %s"
+ % [d.hostname for d in refreshed if d.state != "active"])
+
+
+def wait_for_public_IPv(module, packet_conn, created_devices):
+
+ def has_public_ip(addr_list, ip_v):
+ return any([a['public'] and a['address_family'] == ip_v and
+ a['address'] for a in addr_list])
+
+ def all_have_public_ip(ds, ip_v):
+ return all([has_public_ip(d.ip_addresses, ip_v) for d in ds])
+
+ address_family = module.params.get('wait_for_public_IPv')
+
+ wait_timeout = module.params.get('wait_timeout')
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ refreshed = refresh_device_list(module, packet_conn, created_devices)
+ if all_have_public_ip(refreshed, address_family):
+ return refreshed
+ time.sleep(5)
+
+ raise Exception("Waiting for IPv%d address timed out. Hostnames: %s"
+ % (address_family, [d.hostname for d in created_devices]))
+
+
+def get_existing_devices(module, packet_conn):
+ project_id = module.params.get('project_id')
+ return packet_conn.list_devices(
+ project_id, params={
+ 'per_page': MAX_DEVICES})
+
+
+def get_specified_device_identifiers(module):
+ if module.params.get('device_ids'):
+ device_id_list = get_device_id_list(module)
+ return {'ids': device_id_list, 'hostnames': []}
+ elif module.params.get('hostnames'):
+ hostname_list = get_hostname_list(module)
+ return {'hostnames': hostname_list, 'ids': []}
+
+
+def act_on_devices(module, packet_conn, target_state):
+ specified_identifiers = get_specified_device_identifiers(module)
+ existing_devices = get_existing_devices(module, packet_conn)
+ changed = False
+ create_hostnames = []
+ if target_state in ['present', 'active', 'rebooted']:
+ # states where we might create non-existing specified devices
+ existing_devices_names = [ed.hostname for ed in existing_devices]
+ create_hostnames = [hn for hn in specified_identifiers['hostnames']
+ if hn not in existing_devices_names]
+
+ process_devices = [d for d in existing_devices
+ if (d.id in specified_identifiers['ids']) or
+ (d.hostname in specified_identifiers['hostnames'])]
+
+ if target_state != 'present':
+ _absent_state_map = {}
+ for s in PACKET_DEVICE_STATES:
+ _absent_state_map[s] = packet.Device.delete
+
+ state_map = {
+ 'absent': _absent_state_map,
+ 'active': {'inactive': packet.Device.power_on,
+ 'provisioning': None, 'rebooting': None
+ },
+ 'inactive': {'active': packet.Device.power_off},
+ 'rebooted': {'active': packet.Device.reboot,
+ 'inactive': packet.Device.power_on,
+ 'provisioning': None, 'rebooting': None
+ },
+ }
+
+ # First do non-creation actions, it might be faster
+ for d in process_devices:
+ if d.state == target_state:
+ continue
+ if d.state in state_map[target_state]:
+ api_operation = state_map[target_state].get(d.state)
+ if api_operation is not None:
+ api_operation(d)
+ changed = True
+ else:
+ _msg = (
+ "I don't know how to process existing device %s from state %s "
+ "to state %s" %
+ (d.hostname, d.state, target_state))
+ raise Exception(_msg)
+
+ # At last create missing devices
+ created_devices = []
+ if create_hostnames:
+ created_devices = [create_single_device(module, packet_conn, n)
+ for n in create_hostnames]
+ if module.params.get('wait_for_public_IPv'):
+ created_devices = wait_for_public_IPv(
+ module, packet_conn, created_devices)
+ changed = True
+
+ processed_devices = created_devices + process_devices
+ if target_state == 'active':
+ processed_devices = wait_for_devices_active(
+ module, packet_conn, processed_devices)
+
+ return {
+ 'changed': changed,
+ 'devices': [serialize_device(d) for d in processed_devices]
+ }
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ auth_token=dict(default=os.environ.get(PACKET_API_TOKEN_ENV_VAR),
+ no_log=True),
+ count=dict(type='int', default=1),
+ count_offset=dict(type='int', default=1),
+ device_ids=dict(type='list'),
+ facility=dict(),
+ features=dict(type='dict'),
+ hostnames=dict(type='list', aliases=['name']),
+ tags=dict(type='list', elements='str'),
+ locked=dict(type='bool', default=False, aliases=['lock']),
+ operating_system=dict(),
+ plan=dict(),
+ project_id=dict(required=True),
+ state=dict(choices=ALLOWED_STATES, default='present'),
+ user_data=dict(default=None),
+ wait_for_public_IPv=dict(type='int', choices=[4, 6]),
+ wait_timeout=dict(type='int', default=900),
+ ipxe_script_url=dict(default=''),
+ always_pxe=dict(type='bool', default=False),
+ ),
+ required_one_of=[('device_ids', 'hostnames',)],
+ mutually_exclusive=[
+ ('hostnames', 'device_ids'),
+ ('count', 'device_ids'),
+ ('count_offset', 'device_ids'),
+ ]
+ )
+
+ if not HAS_PACKET_SDK:
+ module.fail_json(msg='packet required for this module')
+
+ if not module.params.get('auth_token'):
+ _fail_msg = ("if Packet API token is not in environment variable %s, "
+ "the auth_token parameter is required" %
+ PACKET_API_TOKEN_ENV_VAR)
+ module.fail_json(msg=_fail_msg)
+
+ auth_token = module.params.get('auth_token')
+
+ packet_conn = packet.Manager(auth_token=auth_token)
+
+ state = module.params.get('state')
+
+ try:
+ module.exit_json(**act_on_devices(module, packet_conn, state))
+ except Exception as e:
+ module.fail_json(msg='failed to set device state %s, error: %s' %
+ (state, to_native(e)), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_ip_subnet.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_ip_subnet.py
new file mode 100644
index 00000000..fbc12698
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_ip_subnet.py
@@ -0,0 +1,326 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Nurfet Becirevic <nurfet.becirevic@gmail.com>
+# Copyright: (c) 2017, Tomas Karasek <tom.to.the.k@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: packet_ip_subnet
+
+short_description: Assign IP subnet to a bare metal server.
+
+description:
+ - Assign or unassign IPv4 or IPv6 subnets to or from a device in the Packet host.
+ - IPv4 subnets must come from already reserved block.
+ - IPv6 subnets must come from publicly routable /56 block from your project.
+ - See U(https://support.packet.com/kb/articles/elastic-ips) for more info on IP block reservation.
+
+version_added: '0.2.0'
+
+author:
+ - Tomas Karasek (@t0mk) <tom.to.the.k@gmail.com>
+ - Nurfet Becirevic (@nurfet-becirevic) <nurfet.becirevic@gmail.com>
+
+options:
+ auth_token:
+ description:
+ - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN).
+ type: str
+
+ hostname:
+ description:
+ - A hostname of a device to/from which to assign/remove a subnet.
+ required: False
+ type: str
+
+ device_id:
+ description:
+ - UUID of a device to/from which to assign/remove a subnet.
+ required: False
+ type: str
+
+ project_id:
+ description:
+ - UUID of a project of the device to/from which to assign/remove a subnet.
+ type: str
+
+ device_count:
+ description:
+ - The number of devices to retrieve from the project. The max allowed value is 1000.
+ - See U(https://www.packet.com/developers/api/#retrieve-all-devices-of-a-project) for more info.
+ default: 100
+ type: int
+
+ cidr:
+ description:
+ - IPv4 or IPv6 subnet which you want to manage. It must come from a reserved block for your project in the Packet Host.
+ aliases: [name]
+ type: str
+ required: true
+
+ state:
+ description:
+ - Desired state of the IP subnet on the specified device.
+ - With state == C(present), you must specify either hostname or device_id. Subnet with given CIDR will then be assigned to the specified device.
+ - With state == C(absent), you can specify either hostname or device_id. The subnet will be removed from specified devices.
+ - If you leave both hostname and device_id empty, the subnet will be removed from any device it's assigned to.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+requirements:
+ - "packet-python >= 1.35"
+ - "python >= 2.6"
+'''
+
+EXAMPLES = '''
+# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN.
+# You can also pass it to the auth_token parameter of the module instead.
+
+- name: Create 1 device and assign an arbitrary public IPv4 subnet to it
+ hosts: localhost
+ tasks:
+
+ - packet_device:
+ project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
+ hostnames: myserver
+ operating_system: ubuntu_16_04
+ plan: baremetal_0
+ facility: sjc1
+ state: active
+
+# Pick an IPv4 address from a block allocated to your project.
+
+ - community.general.packet_ip_subnet:
+ project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
+ hostname: myserver
+ cidr: "147.75.201.78/32"
+
+# Release IP address 147.75.201.78
+
+- name: Unassign IP address from any device in your project
+ hosts: localhost
+ tasks:
+ - community.general.packet_ip_subnet:
+ project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
+ cidr: "147.75.201.78/32"
+ state: absent
+'''
+
+RETURN = '''
+changed:
+ description: True if an IP address assignments were altered in any way (created or removed).
+ type: bool
+ sample: True
+ returned: success
+
+device_id:
+ type: str
+ description: UUID of the device associated with the specified IP address.
+ returned: success
+
+subnet:
+ description: Dict with data about the handled IP subnet.
+ type: dict
+ sample:
+ address: 147.75.90.241
+ address_family: 4
+ assigned_to: { href : /devices/61f9aa5e-0530-47f5-97c2-113828e61ed0 }
+ cidr: 31
+ created_at: '2017-08-07T15:15:30Z'
+ enabled: True
+ gateway: 147.75.90.240
+ href: /ips/31eda960-0a16-4c0f-b196-f3dc4928529f
+ id: 1eda960-0a16-4c0f-b196-f3dc4928529f
+ manageable: True
+ management: True
+ netmask: 255.255.255.254
+ network: 147.75.90.240
+ public: True
+ returned: success
+'''
+
+
+import uuid
+import re
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback
+from ansible.module_utils._text import to_native
+
+HAS_PACKET_SDK = True
+
+try:
+ import packet
+except ImportError:
+ HAS_PACKET_SDK = False
+
+
+NAME_RE = r'({0}|{0}{1}*{0})'.format(r'[a-zA-Z0-9]', r'[a-zA-Z0-9\-]')
+HOSTNAME_RE = r'({0}\.)*{0}$'.format(NAME_RE)
+PROJECT_MAX_DEVICES = 100
+
+
+PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN"
+
+
+ALLOWED_STATES = ['absent', 'present']
+
+
+def is_valid_hostname(hostname):
+ return re.match(HOSTNAME_RE, hostname) is not None
+
+
+def is_valid_uuid(myuuid):
+ try:
+ val = uuid.UUID(myuuid, version=4)
+ except ValueError:
+ return False
+ return str(val) == myuuid
+
+
+def get_existing_devices(module, packet_conn):
+ project_id = module.params.get('project_id')
+ if not is_valid_uuid(project_id):
+ raise Exception("Project ID {0} does not seem to be valid".format(project_id))
+
+ per_page = module.params.get('device_count')
+ return packet_conn.list_devices(
+ project_id, params={'per_page': per_page})
+
+
+def get_specified_device_identifiers(module):
+ if module.params.get('device_id'):
+ _d_id = module.params.get('device_id')
+ if not is_valid_uuid(_d_id):
+ raise Exception("Device ID '{0}' does not seem to be valid".format(_d_id))
+ return {'device_id': _d_id, 'hostname': None}
+ elif module.params.get('hostname'):
+ _hn = module.params.get('hostname')
+ if not is_valid_hostname(_hn):
+ raise Exception("Hostname '{0}' does not seem to be valid".format(_hn))
+ return {'hostname': _hn, 'device_id': None}
+ else:
+ return {'hostname': None, 'device_id': None}
+
+
+def parse_subnet_cidr(cidr):
+ if "/" not in cidr:
+ raise Exception("CIDR expression in wrong format, must be address/prefix_len")
+ addr, prefixlen = cidr.split("/")
+ try:
+ prefixlen = int(prefixlen)
+ except ValueError:
+ raise("Wrong prefix length in CIDR expression {0}".format(cidr))
+ return addr, prefixlen
+
+
+def act_on_assignment(target_state, module, packet_conn):
+ return_dict = {'changed': False}
+ specified_cidr = module.params.get("cidr")
+ address, prefixlen = parse_subnet_cidr(specified_cidr)
+
+ specified_identifier = get_specified_device_identifiers(module)
+
+ if module.check_mode:
+ return return_dict
+
+ if (specified_identifier['hostname'] is None) and (
+ specified_identifier['device_id'] is None):
+ if target_state == 'absent':
+ # The special case to release the IP from any assignment
+ for d in get_existing_devices(module, packet_conn):
+ for ia in d.ip_addresses:
+ if address == ia['address'] and prefixlen == ia['cidr']:
+ packet_conn.call_api(ia['href'], "DELETE")
+ return_dict['changed'] = True
+ return_dict['subnet'] = ia
+ return_dict['device_id'] = d.id
+ return return_dict
+ raise Exception("If you assign an address, you must specify either "
+ "target device ID or target unique hostname.")
+
+ if specified_identifier['device_id'] is not None:
+ device = packet_conn.get_device(specified_identifier['device_id'])
+ else:
+ all_devices = get_existing_devices(module, packet_conn)
+ hn = specified_identifier['hostname']
+ matching_devices = [d for d in all_devices if d.hostname == hn]
+ if len(matching_devices) > 1:
+ raise Exception("There are more than one devices matching given hostname {0}".format(hn))
+ if len(matching_devices) == 0:
+ raise Exception("There is no device matching given hostname {0}".format(hn))
+ device = matching_devices[0]
+
+ return_dict['device_id'] = device.id
+ assignment_dicts = [i for i in device.ip_addresses
+ if i['address'] == address and i['cidr'] == prefixlen]
+ if len(assignment_dicts) > 1:
+ raise Exception("IP address {0} is assigned more than once for device {1}".format(
+ specified_cidr, device.hostname))
+
+ if target_state == "absent":
+ if len(assignment_dicts) == 1:
+ packet_conn.call_api(assignment_dicts[0]['href'], "DELETE")
+ return_dict['subnet'] = assignment_dicts[0]
+ return_dict['changed'] = True
+ elif target_state == "present":
+ if len(assignment_dicts) == 0:
+ new_assignment = packet_conn.call_api(
+ "devices/{0}/ips".format(device.id), "POST", {"address": "{0}".format(specified_cidr)})
+ return_dict['changed'] = True
+ return_dict['subnet'] = new_assignment
+ return return_dict
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ auth_token=dict(
+ type='str',
+ fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]),
+ no_log=True
+ ),
+ device_id=dict(type='str'),
+ hostname=dict(type='str'),
+ project_id=dict(type='str'),
+ device_count=dict(type='int', default=PROJECT_MAX_DEVICES),
+ cidr=dict(type='str', required=True, aliases=['name']),
+ state=dict(choices=ALLOWED_STATES, default='present'),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[('hostname', 'device_id')],
+ required_one_of=[['hostname', 'device_id', 'project_id']],
+ required_by=dict(
+ hostname=('project_id',),
+ ),
+ )
+
+ if not HAS_PACKET_SDK:
+ module.fail_json(msg='packet required for this module')
+
+ if not module.params.get('auth_token'):
+ _fail_msg = ("if Packet API token is not in environment variable {0}, "
+ "the auth_token parameter is required".format(PACKET_API_TOKEN_ENV_VAR))
+ module.fail_json(msg=_fail_msg)
+
+ auth_token = module.params.get('auth_token')
+
+ packet_conn = packet.Manager(auth_token=auth_token)
+
+ state = module.params.get('state')
+
+ try:
+ module.exit_json(**act_on_assignment(state, module, packet_conn))
+ except Exception as e:
+ module.fail_json(
+ msg="failed to set IP subnet to state {0}, error: {1}".format(state, to_native(e)))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_project.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_project.py
new file mode 100644
index 00000000..38d7ca76
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_project.py
@@ -0,0 +1,244 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Nurfet Becirevic <nurfet.becirevic@gmail.com>
+# Copyright: (c) 2019, Tomas Karasek <tom.to.the.k@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: packet_project
+
+short_description: Create/delete a project in Packet host.
+
+description:
+ - Create/delete a project in Packet host.
+ - API is documented at U(https://www.packet.com/developers/api/#projects).
+
+version_added: '0.2.0'
+
+author:
+ - Tomas Karasek (@t0mk) <tom.to.the.k@gmail.com>
+ - Nurfet Becirevic (@nurfet-becirevic) <nurfet.becirevic@gmail.com>
+
+options:
+ state:
+ description:
+ - Indicate desired state of the target.
+ default: present
+ choices: ['present', 'absent']
+ type: str
+
+ payment_method:
+ description:
+ - Payment method is name of one of the payment methods available to your user.
+ - When blank, the API assumes the default payment method.
+ type: str
+
+ auth_token:
+ description:
+ - Packet api token. You can also supply it in env var C(PACKET_API_TOKEN).
+ type: str
+
+ name:
+ description:
+ - Name for/of the project.
+ type: str
+
+ org_id:
+ description:
+ - UUID of the organization to create a project for.
+ - When blank, the API assumes the default organization.
+ type: str
+
+ id:
+ description:
+ - UUID of the project which you want to remove.
+ type: str
+
+ custom_data:
+ description:
+ - Custom data about the project to create.
+ type: str
+
+requirements:
+ - "python >= 2.6"
+ - "packet-python >= 1.40"
+
+'''
+
+EXAMPLES = '''
+# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN.
+# You can also pass the api token in module param auth_token.
+
+- name: Create new project
+ hosts: localhost
+ tasks:
+ community.general.packet_project:
+ name: "new project"
+
+- name: Create new project within non-default organization
+ hosts: localhost
+ tasks:
+ community.general.packet_project:
+ name: "my org project"
+ org_id: a4cc87f9-e00f-48c2-9460-74aa60beb6b0
+
+- name: Remove project by id
+ hosts: localhost
+ tasks:
+ community.general.packet_project:
+ state: absent
+ id: eef49903-7a09-4ca1-af67-4087c29ab5b6
+
+- name: Create new project with non-default billing method
+ hosts: localhost
+ tasks:
+ community.general.packet_project:
+ name: "newer project"
+ payment_method: "the other visa"
+'''
+
+RETURN = '''
+changed:
+ description: True if a project was created or removed.
+ type: bool
+ sample: True
+ returned: success
+
+name:
+ description: Name of addressed project.
+ type: str
+ returned: success
+
+id:
+ description: UUID of addressed project.
+ type: str
+ returned: success
+'''
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback
+from ansible.module_utils._text import to_native
+
+HAS_PACKET_SDK = True
+
+try:
+ import packet
+except ImportError:
+ HAS_PACKET_SDK = False
+
+PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN"
+
+
+def act_on_project(target_state, module, packet_conn):
+ result_dict = {'changed': False}
+ given_id = module.params.get('id')
+ given_name = module.params.get('name')
+ if given_id:
+ matching_projects = [
+ p for p in packet_conn.list_projects() if given_id == p.id]
+ else:
+ matching_projects = [
+ p for p in packet_conn.list_projects() if given_name == p.name]
+
+ if target_state == 'present':
+ if len(matching_projects) == 0:
+ org_id = module.params.get('org_id')
+ custom_data = module.params.get('custom_data')
+ payment_method = module.params.get('payment_method')
+
+ if not org_id:
+ params = {
+ "name": given_name,
+ "payment_method_id": payment_method,
+ "customdata": custom_data
+ }
+ new_project_data = packet_conn.call_api("projects", "POST", params)
+ new_project = packet.Project(new_project_data, packet_conn)
+ else:
+ new_project = packet_conn.create_organization_project(
+ org_id=org_id,
+ name=given_name,
+ payment_method_id=payment_method,
+ customdata=custom_data
+ )
+
+ result_dict['changed'] = True
+ matching_projects.append(new_project)
+
+ result_dict['name'] = matching_projects[0].name
+ result_dict['id'] = matching_projects[0].id
+ else:
+ if len(matching_projects) > 1:
+ _msg = ("More than projects matched for module call with state = absent: "
+ "{0}".format(to_native(matching_projects)))
+ module.fail_json(msg=_msg)
+
+ if len(matching_projects) == 1:
+ p = matching_projects[0]
+ result_dict['name'] = p.name
+ result_dict['id'] = p.id
+ result_dict['changed'] = True
+ try:
+ p.delete()
+ except Exception as e:
+ _msg = ("while trying to remove project {0}, id {1}, got error: {2}".format(
+ p.name, p.id, to_native(e)))
+ module.fail_json(msg=_msg)
+ return result_dict
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(choices=['present', 'absent'], default='present'),
+ auth_token=dict(
+ type='str',
+ fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]),
+ no_log=True
+ ),
+ name=dict(type='str'),
+ id=dict(type='str'),
+ org_id=dict(type='str'),
+ payment_method=dict(type='str'),
+ custom_data=dict(type='str'),
+ ),
+ supports_check_mode=True,
+ required_one_of=[("name", "id",)],
+ mutually_exclusive=[
+ ('name', 'id'),
+ ]
+ )
+ if not HAS_PACKET_SDK:
+ module.fail_json(msg='packet required for this module')
+
+ if not module.params.get('auth_token'):
+ _fail_msg = ("if Packet API token is not in environment variable {0}, "
+ "the auth_token parameter is required".format(PACKET_API_TOKEN_ENV_VAR))
+ module.fail_json(msg=_fail_msg)
+
+ auth_token = module.params.get('auth_token')
+
+ packet_conn = packet.Manager(auth_token=auth_token)
+
+ state = module.params.get('state')
+
+ if state in ['present', 'absent']:
+ if module.check_mode:
+ module.exit_json(changed=False)
+
+ try:
+ module.exit_json(**act_on_project(state, module, packet_conn))
+ except Exception as e:
+ module.fail_json(
+ msg="failed to set project state {0}: {1}".format(state, to_native(e)))
+ else:
+ module.fail_json(msg="{0} is not a valid state for this module".format(state))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_sshkey.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_sshkey.py
new file mode 100644
index 00000000..73233d89
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_sshkey.py
@@ -0,0 +1,261 @@
+#!/usr/bin/python
+# Copyright 2016 Tomas Karasek <tom.to.the.k@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: packet_sshkey
+short_description: Create/delete an SSH key in Packet host.
+description:
+ - Create/delete an SSH key in Packet host.
+ - API is documented at U(https://www.packet.net/help/api/#page:ssh-keys,header:ssh-keys-ssh-keys-post).
+author: "Tomas Karasek (@t0mk) <tom.to.the.k@gmail.com>"
+options:
+ state:
+ description:
+ - Indicate desired state of the target.
+ default: present
+ choices: ['present', 'absent']
+ auth_token:
+ description:
+ - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN).
+ label:
+ description:
+ - Label for the key. If you keep it empty, it will be read from key string.
+ id:
+ description:
+ - UUID of the key which you want to remove.
+ fingerprint:
+ description:
+ - Fingerprint of the key which you want to remove.
+ key:
+ description:
+ - Public Key string ({type} {base64 encoded key} {description}).
+ key_file:
+ description:
+ - File with the public key.
+
+requirements:
+ - "python >= 2.6"
+ - packet-python
+
+'''
+
+EXAMPLES = '''
+# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN.
+# You can also pass the api token in module param auth_token.
+
+- name: Create sshkey from string
+ hosts: localhost
+ tasks:
+ community.general.packet_sshkey:
+ key: "{{ lookup('file', 'my_packet_sshkey.pub') }}"
+
+- name: Create sshkey from file
+ hosts: localhost
+ tasks:
+ community.general.packet_sshkey:
+ label: key from file
+ key_file: ~/ff.pub
+
+- name: Remove sshkey by id
+ hosts: localhost
+ tasks:
+ community.general.packet_sshkey:
+ state: absent
+ id: eef49903-7a09-4ca1-af67-4087c29ab5b6
+'''
+
+RETURN = '''
+changed:
+ description: True if a sshkey was created or removed.
+ type: bool
+ sample: True
+ returned: always
+sshkeys:
+ description: Information about sshkeys that were created/removed.
+ type: list
+ sample: [
+ {
+ "fingerprint": "5c:93:74:7c:ed:07:17:62:28:75:79:23:d6:08:93:46",
+ "id": "41d61bd8-3342-428b-a09c-e67bdd18a9b7",
+ "key": "ssh-dss AAAAB3NzaC1kc3MAAACBAIfNT5S0ncP4BBJBYNhNPxFF9lqVhfPeu6SM1LoCocxqDc1AT3zFRi8hjIf6TLZ2AA4FYbcAWxLMhiBxZRVldT9GdBXile78kAK5z3bKTwq152DCqpxwwbaTIggLFhsU8wrfBsPWnDuAxZ0h7mmrCjoLIE3CNLDA/NmV3iB8xMThAAAAFQCStcesSgR1adPORzBxTr7hug92LwAAAIBOProm3Gk+HWedLyE8IfofLaOeRnbBRHAOL4z0SexKkVOnQ/LGN/uDIIPGGBDYTvXgKZT+jbHeulRJ2jKgfSpGKN4JxFQ8uzVH492jEiiUJtT72Ss1dCV4PmyERVIw+f54itihV3z/t25dWgowhb0int8iC/OY3cGodlmYb3wdcQAAAIBuLbB45djZXzUkOTzzcRDIRfhaxo5WipbtEM2B1fuBt2gyrvksPpH/LK6xTjdIIb0CxPu4OCxwJG0aOz5kJoRnOWIXQGhH7VowrJhsqhIc8gN9ErbO5ea8b1L76MNcAotmBDeTUiPw01IJ8MdDxfmcsCslJKgoRKSmQpCwXQtN2g== tomk@hp2",
+ "label": "mynewkey33"
+ }
+ ]
+ returned: always
+''' # NOQA
+
+import os
+import uuid
+
+from ansible.module_utils.basic import AnsibleModule
+
+HAS_PACKET_SDK = True
+try:
+ import packet
+except ImportError:
+ HAS_PACKET_SDK = False
+
+
+PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN"
+
+
+def serialize_sshkey(sshkey):
+ sshkey_data = {}
+ copy_keys = ['id', 'key', 'label', 'fingerprint']
+ for name in copy_keys:
+ sshkey_data[name] = getattr(sshkey, name)
+ return sshkey_data
+
+
+def is_valid_uuid(myuuid):
+ try:
+ val = uuid.UUID(myuuid, version=4)
+ except ValueError:
+ return False
+ return str(val) == myuuid
+
+
+def load_key_string(key_str):
+ ret_dict = {}
+ key_str = key_str.strip()
+ ret_dict['key'] = key_str
+ cut_key = key_str.split()
+ if len(cut_key) in [2, 3]:
+ if len(cut_key) == 3:
+ ret_dict['label'] = cut_key[2]
+ else:
+ raise Exception("Public key %s is in wrong format" % key_str)
+ return ret_dict
+
+
+def get_sshkey_selector(module):
+ key_id = module.params.get('id')
+ if key_id:
+ if not is_valid_uuid(key_id):
+ raise Exception("sshkey ID %s is not valid UUID" % key_id)
+ selecting_fields = ['label', 'fingerprint', 'id', 'key']
+ select_dict = {}
+ for f in selecting_fields:
+ if module.params.get(f) is not None:
+ select_dict[f] = module.params.get(f)
+
+ if module.params.get('key_file'):
+ with open(module.params.get('key_file')) as _file:
+ loaded_key = load_key_string(_file.read())
+ select_dict['key'] = loaded_key['key']
+ if module.params.get('label') is None:
+ if loaded_key.get('label'):
+ select_dict['label'] = loaded_key['label']
+
+ def selector(k):
+ if 'key' in select_dict:
+ # if key string is specified, compare only the key strings
+ return k.key == select_dict['key']
+ else:
+ # if key string not specified, all the fields must match
+ return all([select_dict[f] == getattr(k, f) for f in select_dict])
+ return selector
+
+
+def act_on_sshkeys(target_state, module, packet_conn):
+ selector = get_sshkey_selector(module)
+ existing_sshkeys = packet_conn.list_ssh_keys()
+ matching_sshkeys = filter(selector, existing_sshkeys)
+ changed = False
+ if target_state == 'present':
+ if matching_sshkeys == []:
+ # there is no key matching the fields from module call
+ # => create the key, label and
+ newkey = {}
+ if module.params.get('key_file'):
+ with open(module.params.get('key_file')) as f:
+ newkey = load_key_string(f.read())
+ if module.params.get('key'):
+ newkey = load_key_string(module.params.get('key'))
+ if module.params.get('label'):
+ newkey['label'] = module.params.get('label')
+ for param in ('label', 'key'):
+ if param not in newkey:
+ _msg = ("If you want to ensure a key is present, you must "
+ "supply both a label and a key string, either in "
+ "module params, or in a key file. %s is missing"
+ % param)
+ raise Exception(_msg)
+ matching_sshkeys = []
+ new_key_response = packet_conn.create_ssh_key(
+ newkey['label'], newkey['key'])
+ changed = True
+
+ matching_sshkeys.append(new_key_response)
+ else:
+ # state is 'absent' => delete matching keys
+ for k in matching_sshkeys:
+ try:
+ k.delete()
+ changed = True
+ except Exception as e:
+ _msg = ("while trying to remove sshkey %s, id %s %s, "
+ "got error: %s" %
+ (k.label, k.id, target_state, e))
+ raise Exception(_msg)
+
+ return {
+ 'changed': changed,
+ 'sshkeys': [serialize_sshkey(k) for k in matching_sshkeys]
+ }
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(choices=['present', 'absent'], default='present'),
+ auth_token=dict(default=os.environ.get(PACKET_API_TOKEN_ENV_VAR),
+ no_log=True),
+ label=dict(type='str', aliases=['name'], default=None),
+ id=dict(type='str', default=None),
+ fingerprint=dict(type='str', default=None),
+ key=dict(type='str', default=None, no_log=True),
+ key_file=dict(type='path', default=None),
+ ),
+ mutually_exclusive=[
+ ('label', 'id'),
+ ('label', 'fingerprint'),
+ ('id', 'fingerprint'),
+ ('key', 'fingerprint'),
+ ('key', 'id'),
+ ('key_file', 'key'),
+ ]
+ )
+
+ if not HAS_PACKET_SDK:
+ module.fail_json(msg='packet required for this module')
+
+ if not module.params.get('auth_token'):
+ _fail_msg = ("if Packet API token is not in environment variable %s, "
+ "the auth_token parameter is required" %
+ PACKET_API_TOKEN_ENV_VAR)
+ module.fail_json(msg=_fail_msg)
+
+ auth_token = module.params.get('auth_token')
+
+ packet_conn = packet.Manager(auth_token=auth_token)
+
+ state = module.params.get('state')
+
+ if state in ['present', 'absent']:
+ try:
+ module.exit_json(**act_on_sshkeys(state, module, packet_conn))
+ except Exception as e:
+ module.fail_json(msg='failed to set sshkey state: %s' % str(e))
+ else:
+ module.fail_json(msg='%s is not a valid state for this module' % state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_volume.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_volume.py
new file mode 100644
index 00000000..2966139a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_volume.py
@@ -0,0 +1,321 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Nurfet Becirevic <nurfet.becirevic@gmail.com>
+# Copyright: (c) 2017, Tomas Karasek <tom.to.the.k@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: packet_volume
+
+short_description: Create/delete a volume in Packet host.
+
+description:
+ - Create/delete a volume in Packet host.
+ - API is documented at U(https://www.packet.com/developers/api/#volumes).
+
+version_added: '0.2.0'
+
+author:
+ - Tomas Karasek (@t0mk) <tom.to.the.k@gmail.com>
+ - Nurfet Becirevic (@nurfet-becirevic) <nurfet.becirevic@gmail.com>
+
+options:
+ state:
+ description:
+ - Desired state of the volume.
+ default: present
+ choices: ['present', 'absent']
+ type: str
+
+ project_id:
+ description:
+ - ID of project of the device.
+ required: true
+ type: str
+
+ auth_token:
+ description:
+ - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN).
+ type: str
+
+ name:
+ description:
+ - Selector for API-generated name of the volume
+ type: str
+
+ description:
+ description:
+ - User-defined description attribute for Packet volume.
+ - "It is used used as idempotent identifier - if volume with given
+ description exists, new one is not created."
+ type: str
+
+ id:
+ description:
+ - UUID of a volume.
+ type: str
+
+ plan:
+ description:
+ - storage_1 for standard tier, storage_2 for premium (performance) tier.
+ - Tiers are described at U(https://www.packet.com/cloud/storage/).
+ choices: ['storage_1', 'storage_2']
+ default: 'storage_1'
+ type: str
+
+ facility:
+ description:
+ - Location of the volume.
+ - Volumes can only be attached to device in the same location.
+ type: str
+
+ size:
+ description:
+ - Size of the volume in gigabytes.
+ type: int
+
+ locked:
+ description:
+ - Create new volume locked.
+ type: bool
+ default: False
+
+ billing_cycle:
+ description:
+ - Billing cycle for new volume.
+ choices: ['hourly', 'monthly']
+ default: 'hourly'
+ type: str
+
+ snapshot_policy:
+ description:
+ - Snapshot policy for new volume.
+ type: dict
+
+ suboptions:
+ snapshot_count:
+ description:
+ - How many snapshots to keep, a positive integer.
+ required: True
+ type: int
+
+ snapshot_frequency:
+ description:
+ - Frequency of snapshots.
+ required: True
+ choices: ["15min", "1hour", "1day", "1week", "1month", "1year"]
+ type: str
+
+requirements:
+ - "python >= 2.6"
+ - "packet-python >= 1.35"
+
+'''
+
+EXAMPLES = '''
+# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN.
+# You can also pass the api token in module param auth_token.
+
+- hosts: localhost
+ vars:
+ volname: testvol123
+ project_id: 53000fb2-ee46-4673-93a8-de2c2bdba33b
+
+ tasks:
+ - name: Create volume
+ community.general.packet_volume:
+ description: "{{ volname }}"
+ project_id: "{{ project_id }}"
+ facility: 'ewr1'
+ plan: 'storage_1'
+ state: present
+ size: 10
+ snapshot_policy:
+ snapshot_count: 10
+ snapshot_frequency: 1day
+ register: result_create
+
+ - name: Delete volume
+ community.general.packet_volume:
+ id: "{{ result_create.id }}"
+ project_id: "{{ project_id }}"
+ state: absent
+'''
+
+RETURN = '''
+id:
+ description: UUID of specified volume
+ type: str
+ returned: success
+ sample: 53000fb2-ee46-4673-93a8-de2c2bdba33c
+name:
+ description: The API-generated name of the volume resource.
+ type: str
+ returned: if volume is attached/detached to/from some device
+ sample: "volume-a91dc506"
+description:
+ description: The user-defined description of the volume resource.
+ type: str
+ returned: success
+ sample: "Just another volume"
+'''
+
+import uuid
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback
+from ansible.module_utils._text import to_native
+
+HAS_PACKET_SDK = True
+
+
+try:
+ import packet
+except ImportError:
+ HAS_PACKET_SDK = False
+
+
+PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN"
+
+VOLUME_PLANS = ["storage_1", "storage_2"]
+VOLUME_STATES = ["present", "absent"]
+BILLING = ["hourly", "monthly"]
+
+
+def is_valid_uuid(myuuid):
+ try:
+ val = uuid.UUID(myuuid, version=4)
+ except ValueError:
+ return False
+ return str(val) == myuuid
+
+
+def get_volume_selector(module):
+ if module.params.get('id'):
+ i = module.params.get('id')
+ if not is_valid_uuid(i):
+ raise Exception("Volume ID '{0}' is not a valid UUID".format(i))
+ return lambda v: v['id'] == i
+ elif module.params.get('name'):
+ n = module.params.get('name')
+ return lambda v: v['name'] == n
+ elif module.params.get('description'):
+ d = module.params.get('description')
+ return lambda v: v['description'] == d
+
+
+def get_or_fail(params, key):
+ item = params.get(key)
+ if item is None:
+ raise Exception("{0} must be specified for new volume".format(key))
+ return item
+
+
+def act_on_volume(target_state, module, packet_conn):
+ return_dict = {'changed': False}
+ s = get_volume_selector(module)
+ project_id = module.params.get("project_id")
+ api_method = "projects/{0}/storage".format(project_id)
+ all_volumes = packet_conn.call_api(api_method, "GET")['volumes']
+ matching_volumes = [v for v in all_volumes if s(v)]
+
+ if target_state == "present":
+ if len(matching_volumes) == 0:
+ params = {
+ "description": get_or_fail(module.params, "description"),
+ "size": get_or_fail(module.params, "size"),
+ "plan": get_or_fail(module.params, "plan"),
+ "facility": get_or_fail(module.params, "facility"),
+ "locked": get_or_fail(module.params, "locked"),
+ "billing_cycle": get_or_fail(module.params, "billing_cycle"),
+ "snapshot_policies": module.params.get("snapshot_policy"),
+ }
+
+ new_volume_data = packet_conn.call_api(api_method, "POST", params)
+ return_dict['changed'] = True
+ for k in ['id', 'name', 'description']:
+ return_dict[k] = new_volume_data[k]
+
+ else:
+ for k in ['id', 'name', 'description']:
+ return_dict[k] = matching_volumes[0][k]
+
+ else:
+ if len(matching_volumes) > 1:
+ _msg = ("More than one volume matches in module call for absent state: {0}".format(
+ to_native(matching_volumes)))
+ module.fail_json(msg=_msg)
+
+ if len(matching_volumes) == 1:
+ volume = matching_volumes[0]
+ packet_conn.call_api("storage/{0}".format(volume['id']), "DELETE")
+ return_dict['changed'] = True
+ for k in ['id', 'name', 'description']:
+ return_dict[k] = volume[k]
+
+ return return_dict
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ id=dict(type='str', default=None),
+ description=dict(type="str", default=None),
+ name=dict(type='str', default=None),
+ state=dict(choices=VOLUME_STATES, default="present"),
+ auth_token=dict(
+ type='str',
+ fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]),
+ no_log=True
+ ),
+ project_id=dict(required=True),
+ plan=dict(choices=VOLUME_PLANS, default="storage_1"),
+ facility=dict(type="str"),
+ size=dict(type="int"),
+ locked=dict(type="bool", default=False),
+ snapshot_policy=dict(type='dict', default=None),
+ billing_cycle=dict(type='str', choices=BILLING, default="hourly"),
+ ),
+ supports_check_mode=True,
+ required_one_of=[("name", "id", "description")],
+ mutually_exclusive=[
+ ('name', 'id'),
+ ('id', 'description'),
+ ('name', 'description'),
+ ]
+ )
+
+ if not HAS_PACKET_SDK:
+ module.fail_json(msg='packet required for this module')
+
+ if not module.params.get('auth_token'):
+ _fail_msg = ("if Packet API token is not in environment variable {0}, "
+ "the auth_token parameter is required".format(PACKET_API_TOKEN_ENV_VAR))
+ module.fail_json(msg=_fail_msg)
+
+ auth_token = module.params.get('auth_token')
+
+ packet_conn = packet.Manager(auth_token=auth_token)
+
+ state = module.params.get('state')
+
+ if state in VOLUME_STATES:
+ if module.check_mode:
+ module.exit_json(changed=False)
+
+ try:
+ module.exit_json(**act_on_volume(state, module, packet_conn))
+ except Exception as e:
+ module.fail_json(
+ msg="failed to set volume state {0}: {1}".format(
+ state, to_native(e)))
+ else:
+ module.fail_json(msg="{0} is not a valid state for this module".format(state))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_volume_attachment.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_volume_attachment.py
new file mode 100644
index 00000000..a1a38bb4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/packet/packet_volume_attachment.py
@@ -0,0 +1,299 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Nurfet Becirevic <nurfet.becirevic@gmail.com>
+# Copyright: (c) 2017, Tomas Karasek <tom.to.the.k@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: packet_volume_attachment
+
+short_description: Attach/detach a volume to a device in the Packet host.
+
+description:
+ - Attach/detach a volume to a device in the Packet host.
+ - API is documented at U(https://www.packet.com/developers/api/volumes/).
+ - "This module creates the attachment route in the Packet API. In order to discover
+ the block devices on the server, you have to run the Attach Scripts,
+ as documented at U(https://help.packet.net/technical/storage/packet-block-storage-linux)."
+
+version_added: '0.2.0'
+
+author:
+ - Tomas Karasek (@t0mk) <tom.to.the.k@gmail.com>
+ - Nurfet Becirevic (@nurfet-becirevic) <nurfet.becirevic@gmail.com>
+
+options:
+ state:
+ description:
+ - Indicate desired state of the attachment.
+ default: present
+ choices: ['present', 'absent']
+ type: str
+
+ auth_token:
+ description:
+ - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN).
+ type: str
+
+ project_id:
+ description:
+ - UUID of the project to which the device and volume belong.
+ type: str
+ required: true
+
+ volume:
+ description:
+ - Selector for the volume.
+ - It can be a UUID, an API-generated volume name, or user-defined description string.
+ - 'Example values: 4a347482-b546-4f67-8300-fb5018ef0c5, volume-4a347482, "my volume"'
+ type: str
+ required: true
+
+ device:
+ description:
+ - Selector for the device.
+ - It can be a UUID of the device, or a hostname.
+ - 'Example values: 98a14f7a-3d27-4478-b7cf-35b5670523f3, "my device"'
+ type: str
+
+requirements:
+ - "python >= 2.6"
+ - "packet-python >= 1.35"
+
+'''
+
+EXAMPLES = '''
+# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN.
+# You can also pass the api token in module param auth_token.
+
+- hosts: localhost
+
+ vars:
+ volname: testvol
+ devname: testdev
+ project_id: 52000fb2-ee46-4673-93a8-de2c2bdba33b
+
+ tasks:
+ - name: Create volume
+ packet_volume:
+ description: "{{ volname }}"
+ project_id: "{{ project_id }}"
+ facility: ewr1
+ plan: storage_1
+ state: present
+ size: 10
+ snapshot_policy:
+ snapshot_count: 10
+ snapshot_frequency: 1day
+
+ - name: Create a device
+ packet_device:
+ project_id: "{{ project_id }}"
+ hostnames: "{{ devname }}"
+ operating_system: ubuntu_16_04
+ plan: baremetal_0
+ facility: ewr1
+ state: present
+
+ - name: Attach testvol to testdev
+ community.general.packet_volume_attachment:
+ project_id: "{{ project_id }}"
+ volume: "{{ volname }}"
+ device: "{{ devname }}"
+
+ - name: Detach testvol from testdev
+ community.general.packet_volume_attachment:
+ project_id: "{{ project_id }}"
+ volume: "{{ volname }}"
+ device: "{{ devname }}"
+ state: absent
+'''
+
+RETURN = '''
+volume_id:
+ description: UUID of volume addressed by the module call.
+ type: str
+ returned: success
+
+device_id:
+ description: UUID of device addressed by the module call.
+ type: str
+ returned: success
+'''
+
+import uuid
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback
+from ansible.module_utils._text import to_native
+
+HAS_PACKET_SDK = True
+
+
+try:
+ import packet
+except ImportError:
+ HAS_PACKET_SDK = False
+
+
+PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN"
+
+STATES = ["present", "absent"]
+
+
+def is_valid_uuid(myuuid):
+ try:
+ val = uuid.UUID(myuuid, version=4)
+ except ValueError:
+ return False
+ return str(val) == myuuid
+
+
+def get_volume_selector(spec):
+ if is_valid_uuid(spec):
+ return lambda v: v['id'] == spec
+ else:
+ return lambda v: v['name'] == spec or v['description'] == spec
+
+
+def get_device_selector(spec):
+ if is_valid_uuid(spec):
+ return lambda v: v['id'] == spec
+ else:
+ return lambda v: v['hostname'] == spec
+
+
+def do_attach(packet_conn, vol_id, dev_id):
+ api_method = "storage/{0}/attachments".format(vol_id)
+ packet_conn.call_api(
+ api_method,
+ params={"device_id": dev_id},
+ type="POST")
+
+
+def do_detach(packet_conn, vol, dev_id=None):
+ def dev_match(a):
+ return (dev_id is None) or (a['device']['id'] == dev_id)
+ for a in vol['attachments']:
+ if dev_match(a):
+ print(a['href'])
+ packet_conn.call_api(a['href'], type="DELETE")
+
+
+def validate_selected(l, resource_type, spec):
+ if len(l) > 1:
+ _msg = ("more than one {0} matches specification {1}: {2}".format(
+ resource_type, spec, l))
+ raise Exception(_msg)
+ if len(l) == 0:
+ _msg = "no {0} matches specification: {1}".format(resource_type, spec)
+ raise Exception(_msg)
+
+
+def get_attached_dev_ids(volume_dict):
+ if len(volume_dict['attachments']) == 0:
+ return []
+ else:
+ return [a['device']['id'] for a in volume_dict['attachments']]
+
+
+def act_on_volume_attachment(target_state, module, packet_conn):
+ return_dict = {'changed': False}
+ volspec = module.params.get("volume")
+ devspec = module.params.get("device")
+ if devspec is None and target_state == 'present':
+ raise Exception("If you want to attach a volume, you must specify a device.")
+ project_id = module.params.get("project_id")
+ volumes_api_method = "projects/{0}/storage".format(project_id)
+ volumes = packet_conn.call_api(volumes_api_method,
+ params={'include': 'facility,attachments.device'})['volumes']
+ v_match = get_volume_selector(volspec)
+ matching_volumes = [v for v in volumes if v_match(v)]
+ validate_selected(matching_volumes, "volume", volspec)
+ volume = matching_volumes[0]
+ return_dict['volume_id'] = volume['id']
+
+ device = None
+ if devspec is not None:
+ devices_api_method = "projects/{0}/devices".format(project_id)
+ devices = packet_conn.call_api(devices_api_method)['devices']
+ d_match = get_device_selector(devspec)
+ matching_devices = [d for d in devices if d_match(d)]
+ validate_selected(matching_devices, "device", devspec)
+ device = matching_devices[0]
+ return_dict['device_id'] = device['id']
+
+ attached_device_ids = get_attached_dev_ids(volume)
+
+ if target_state == "present":
+ if len(attached_device_ids) == 0:
+ do_attach(packet_conn, volume['id'], device['id'])
+ return_dict['changed'] = True
+ elif device['id'] not in attached_device_ids:
+ # Don't reattach volume which is attached to a different device.
+ # Rather fail than force remove a device on state == 'present'.
+ raise Exception("volume {0} is already attached to device {1}".format(
+ volume, attached_device_ids))
+ else:
+ if device is None:
+ if len(attached_device_ids) > 0:
+ do_detach(packet_conn, volume)
+ return_dict['changed'] = True
+ elif device['id'] in attached_device_ids:
+ do_detach(packet_conn, volume, device['id'])
+ return_dict['changed'] = True
+
+ return return_dict
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(choices=STATES, default="present"),
+ auth_token=dict(
+ type='str',
+ fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]),
+ no_log=True
+ ),
+ volume=dict(type="str", required=True),
+ project_id=dict(type="str", required=True),
+ device=dict(type="str"),
+ ),
+ supports_check_mode=True,
+ )
+
+ if not HAS_PACKET_SDK:
+ module.fail_json(msg='packet required for this module')
+
+ if not module.params.get('auth_token'):
+ _fail_msg = ("if Packet API token is not in environment variable {0}, "
+ "the auth_token parameter is required".format(PACKET_API_TOKEN_ENV_VAR))
+ module.fail_json(msg=_fail_msg)
+
+ auth_token = module.params.get('auth_token')
+
+ packet_conn = packet.Manager(auth_token=auth_token)
+
+ state = module.params.get('state')
+
+ if state in STATES:
+ if module.check_mode:
+ module.exit_json(changed=False)
+
+ try:
+ module.exit_json(
+ **act_on_volume_attachment(state, module, packet_conn))
+ except Exception as e:
+ module.fail_json(
+ msg="failed to set volume_attachment state {0}: {1}".format(state, to_native(e)))
+ else:
+ module.fail_json(msg="{0} is not a valid state for this module".format(state))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks.py
new file mode 100644
index 00000000..90798672
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks.py
@@ -0,0 +1,654 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: profitbricks
+short_description: Create, destroy, start, stop, and reboot a ProfitBricks virtual machine.
+description:
+ - Create, destroy, update, start, stop, and reboot a ProfitBricks virtual machine. When the virtual machine is created it can optionally wait
+ for it to be 'running' before returning. This module has a dependency on profitbricks >= 1.0.0
+options:
+ auto_increment:
+ description:
+ - Whether or not to increment a single number in the name for created virtual machines.
+ type: bool
+ default: 'yes'
+ name:
+ description:
+ - The name of the virtual machine.
+ type: str
+ image:
+ description:
+ - The system image ID for creating the virtual machine, e.g. a3eae284-a2fe-11e4-b187-5f1f641608c8.
+ type: str
+ image_password:
+ description:
+ - Password set for the administrative user.
+ type: str
+ ssh_keys:
+ description:
+ - Public SSH keys allowing access to the virtual machine.
+ type: list
+ datacenter:
+ description:
+ - The datacenter to provision this virtual machine.
+ type: str
+ cores:
+ description:
+ - The number of CPU cores to allocate to the virtual machine.
+ default: 2
+ type: int
+ ram:
+ description:
+ - The amount of memory to allocate to the virtual machine.
+ default: 2048
+ type: int
+ cpu_family:
+ description:
+ - The CPU family type to allocate to the virtual machine.
+ type: str
+ default: AMD_OPTERON
+ choices: [ "AMD_OPTERON", "INTEL_XEON" ]
+ volume_size:
+ description:
+ - The size in GB of the boot volume.
+ type: int
+ default: 10
+ bus:
+ description:
+ - The bus type for the volume.
+ type: str
+ default: VIRTIO
+ choices: [ "IDE", "VIRTIO"]
+ instance_ids:
+ description:
+ - list of instance ids, currently only used when state='absent' to remove instances.
+ type: list
+ count:
+ description:
+ - The number of virtual machines to create.
+ type: int
+ default: 1
+ location:
+ description:
+ - The datacenter location. Use only if you want to create the Datacenter or else this value is ignored.
+ type: str
+ default: us/las
+ choices: [ "us/las", "de/fra", "de/fkb" ]
+ assign_public_ip:
+ description:
+ - This will assign the machine to the public LAN. If no LAN exists with public Internet access it is created.
+ type: bool
+ default: 'no'
+ lan:
+ description:
+ - The ID of the LAN you wish to add the servers to.
+ type: int
+ default: 1
+ subscription_user:
+ description:
+ - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
+ type: str
+ subscription_password:
+ description:
+ - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
+ type: str
+ wait:
+ description:
+ - wait for the instance to be in state 'running' before returning
+ type: bool
+ default: 'yes'
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ remove_boot_volume:
+ description:
+ - remove the bootVolume of the virtual machine you're destroying.
+ type: bool
+ default: 'yes'
+ state:
+ description:
+ - create or terminate instances
+ - 'The choices available are: C(running), C(stopped), C(absent), C(present).'
+ type: str
+ default: 'present'
+ disk_type:
+ description:
+ - the type of disk to be allocated.
+ type: str
+ choices: [SSD, HDD]
+ default: HDD
+
+requirements:
+ - "profitbricks"
+ - "python >= 2.6"
+author: Matt Baldwin (@baldwinSPC) <baldwin@stackpointcloud.com>
+'''
+
+EXAMPLES = '''
+
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Provisioning example
+- name: Create three servers and enumerate their names
+ community.general.profitbricks:
+ datacenter: Tardis One
+ name: web%02d.stackpointcloud.com
+ cores: 4
+ ram: 2048
+ volume_size: 50
+ cpu_family: INTEL_XEON
+ image: a3eae284-a2fe-11e4-b187-5f1f641608c8
+ location: us/las
+ count: 3
+ assign_public_ip: true
+
+- name: Remove virtual machines
+ community.general.profitbricks:
+ datacenter: Tardis One
+ instance_ids:
+ - 'web001.stackpointcloud.com'
+ - 'web002.stackpointcloud.com'
+ - 'web003.stackpointcloud.com'
+ wait_timeout: 500
+ state: absent
+
+- name: Start virtual machines
+ community.general.profitbricks:
+ datacenter: Tardis One
+ instance_ids:
+ - 'web001.stackpointcloud.com'
+ - 'web002.stackpointcloud.com'
+ - 'web003.stackpointcloud.com'
+ wait_timeout: 500
+ state: running
+
+- name: Stop virtual machines
+ community.general.profitbricks:
+ datacenter: Tardis One
+ instance_ids:
+ - 'web001.stackpointcloud.com'
+ - 'web002.stackpointcloud.com'
+ - 'web003.stackpointcloud.com'
+ wait_timeout: 500
+ state: stopped
+'''
+
+import re
+import uuid
+import time
+import traceback
+
+HAS_PB_SDK = True
+
+try:
+ from profitbricks.client import ProfitBricksService, Volume, Server, Datacenter, NIC, LAN
+except ImportError:
+ HAS_PB_SDK = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import xrange
+from ansible.module_utils._text import to_native
+
+
+LOCATIONS = ['us/las',
+ 'de/fra',
+ 'de/fkb']
+
+uuid_match = re.compile(
+ r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
+
+
+def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
+ if not promise:
+ return
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(5)
+ operation_result = profitbricks.get_request(
+ request_id=promise['requestId'],
+ status=True)
+
+ if operation_result['metadata']['status'] == "DONE":
+ return
+ elif operation_result['metadata']['status'] == "FAILED":
+ raise Exception(
+ 'Request failed to complete ' + msg + ' "' + str(
+ promise['requestId']) + '" to complete.')
+
+ raise Exception(
+ 'Timed out waiting for async operation ' + msg + ' "' + str(
+ promise['requestId']
+ ) + '" to complete.')
+
+
+def _create_machine(module, profitbricks, datacenter, name):
+ cores = module.params.get('cores')
+ ram = module.params.get('ram')
+ cpu_family = module.params.get('cpu_family')
+ volume_size = module.params.get('volume_size')
+ disk_type = module.params.get('disk_type')
+ image_password = module.params.get('image_password')
+ ssh_keys = module.params.get('ssh_keys')
+ bus = module.params.get('bus')
+ lan = module.params.get('lan')
+ assign_public_ip = module.params.get('assign_public_ip')
+ subscription_user = module.params.get('subscription_user')
+ subscription_password = module.params.get('subscription_password')
+ location = module.params.get('location')
+ image = module.params.get('image')
+ assign_public_ip = module.boolean(module.params.get('assign_public_ip'))
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ if assign_public_ip:
+ public_found = False
+
+ lans = profitbricks.list_lans(datacenter)
+ for lan in lans['items']:
+ if lan['properties']['public']:
+ public_found = True
+ lan = lan['id']
+
+ if not public_found:
+ i = LAN(
+ name='public',
+ public=True)
+
+ lan_response = profitbricks.create_lan(datacenter, i)
+ _wait_for_completion(profitbricks, lan_response,
+ wait_timeout, "_create_machine")
+ lan = lan_response['id']
+
+ v = Volume(
+ name=str(uuid.uuid4()).replace('-', '')[:10],
+ size=volume_size,
+ image=image,
+ image_password=image_password,
+ ssh_keys=ssh_keys,
+ disk_type=disk_type,
+ bus=bus)
+
+ n = NIC(
+ lan=int(lan)
+ )
+
+ s = Server(
+ name=name,
+ ram=ram,
+ cores=cores,
+ cpu_family=cpu_family,
+ create_volumes=[v],
+ nics=[n],
+ )
+
+ try:
+ create_server_response = profitbricks.create_server(
+ datacenter_id=datacenter, server=s)
+
+ _wait_for_completion(profitbricks, create_server_response,
+ wait_timeout, "create_virtual_machine")
+
+ server_response = profitbricks.get_server(
+ datacenter_id=datacenter,
+ server_id=create_server_response['id'],
+ depth=3
+ )
+ except Exception as e:
+ module.fail_json(msg="failed to create the new server: %s" % str(e))
+ else:
+ return server_response
+
+
+def _startstop_machine(module, profitbricks, datacenter_id, server_id):
+ state = module.params.get('state')
+
+ try:
+ if state == 'running':
+ profitbricks.start_server(datacenter_id, server_id)
+ else:
+ profitbricks.stop_server(datacenter_id, server_id)
+
+ return True
+ except Exception as e:
+ module.fail_json(msg="failed to start or stop the virtual machine %s at %s: %s" % (server_id, datacenter_id, str(e)))
+
+
+def _create_datacenter(module, profitbricks):
+ datacenter = module.params.get('datacenter')
+ location = module.params.get('location')
+ wait_timeout = module.params.get('wait_timeout')
+
+ i = Datacenter(
+ name=datacenter,
+ location=location
+ )
+
+ try:
+ datacenter_response = profitbricks.create_datacenter(datacenter=i)
+
+ _wait_for_completion(profitbricks, datacenter_response,
+ wait_timeout, "_create_datacenter")
+
+ return datacenter_response
+ except Exception as e:
+ module.fail_json(msg="failed to create the new server(s): %s" % str(e))
+
+
+def create_virtual_machine(module, profitbricks):
+ """
+ Create new virtual machine
+
+ module : AnsibleModule object
+ community.general.profitbricks: authenticated profitbricks object
+
+ Returns:
+ True if a new virtual machine was created, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ name = module.params.get('name')
+ auto_increment = module.params.get('auto_increment')
+ count = module.params.get('count')
+ lan = module.params.get('lan')
+ wait_timeout = module.params.get('wait_timeout')
+ failed = True
+ datacenter_found = False
+
+ virtual_machines = []
+ virtual_machine_ids = []
+
+ # Locate UUID for datacenter if referenced by name.
+ datacenter_list = profitbricks.list_datacenters()
+ datacenter_id = _get_datacenter_id(datacenter_list, datacenter)
+ if datacenter_id:
+ datacenter_found = True
+
+ if not datacenter_found:
+ datacenter_response = _create_datacenter(module, profitbricks)
+ datacenter_id = datacenter_response['id']
+
+ _wait_for_completion(profitbricks, datacenter_response,
+ wait_timeout, "create_virtual_machine")
+
+ if auto_increment:
+ numbers = set()
+ count_offset = 1
+
+ try:
+ name % 0
+ except TypeError as e:
+ if e.message.startswith('not all'):
+ name = '%s%%d' % name
+ else:
+ module.fail_json(msg=e.message, exception=traceback.format_exc())
+
+ number_range = xrange(count_offset, count_offset + count + len(numbers))
+ available_numbers = list(set(number_range).difference(numbers))
+ names = []
+ numbers_to_use = available_numbers[:count]
+ for number in numbers_to_use:
+ names.append(name % number)
+ else:
+ names = [name]
+
+ # Prefetch a list of servers for later comparison.
+ server_list = profitbricks.list_servers(datacenter_id)
+ for name in names:
+ # Skip server creation if the server already exists.
+ if _get_server_id(server_list, name):
+ continue
+
+ create_response = _create_machine(module, profitbricks, str(datacenter_id), name)
+ nics = profitbricks.list_nics(datacenter_id, create_response['id'])
+ for n in nics['items']:
+ if lan == n['properties']['lan']:
+ create_response.update({'public_ip': n['properties']['ips'][0]})
+
+ virtual_machines.append(create_response)
+
+ failed = False
+
+ results = {
+ 'failed': failed,
+ 'machines': virtual_machines,
+ 'action': 'create',
+ 'instance_ids': {
+ 'instances': [i['id'] for i in virtual_machines],
+ }
+ }
+
+ return results
+
+
+def remove_virtual_machine(module, profitbricks):
+ """
+ Removes a virtual machine.
+
+ This will remove the virtual machine along with the bootVolume.
+
+ module : AnsibleModule object
+ community.general.profitbricks: authenticated profitbricks object.
+
+ Not yet supported: handle deletion of attached data disks.
+
+ Returns:
+ True if a new virtual server was deleted, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ instance_ids = module.params.get('instance_ids')
+ remove_boot_volume = module.params.get('remove_boot_volume')
+ changed = False
+
+ if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1:
+ module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting')
+
+ # Locate UUID for datacenter if referenced by name.
+ datacenter_list = profitbricks.list_datacenters()
+ datacenter_id = _get_datacenter_id(datacenter_list, datacenter)
+ if not datacenter_id:
+ module.fail_json(msg='Virtual data center \'%s\' not found.' % str(datacenter))
+
+ # Prefetch server list for later comparison.
+ server_list = profitbricks.list_servers(datacenter_id)
+ for instance in instance_ids:
+ # Locate UUID for server if referenced by name.
+ server_id = _get_server_id(server_list, instance)
+ if server_id:
+ # Remove the server's boot volume
+ if remove_boot_volume:
+ _remove_boot_volume(module, profitbricks, datacenter_id, server_id)
+
+ # Remove the server
+ try:
+ server_response = profitbricks.delete_server(datacenter_id, server_id)
+ except Exception as e:
+ module.fail_json(msg="failed to terminate the virtual server: %s" % to_native(e), exception=traceback.format_exc())
+ else:
+ changed = True
+
+ return changed
+
+
+def _remove_boot_volume(module, profitbricks, datacenter_id, server_id):
+ """
+ Remove the boot volume from the server
+ """
+ try:
+ server = profitbricks.get_server(datacenter_id, server_id)
+ volume_id = server['properties']['bootVolume']['id']
+ volume_response = profitbricks.delete_volume(datacenter_id, volume_id)
+ except Exception as e:
+ module.fail_json(msg="failed to remove the server's boot volume: %s" % to_native(e), exception=traceback.format_exc())
+
+
+def startstop_machine(module, profitbricks, state):
+ """
+ Starts or Stops a virtual machine.
+
+ module : AnsibleModule object
+ community.general.profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True when the servers process the action successfully, false otherwise.
+ """
+ if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1:
+ module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting')
+
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ changed = False
+
+ datacenter = module.params.get('datacenter')
+ instance_ids = module.params.get('instance_ids')
+
+ # Locate UUID for datacenter if referenced by name.
+ datacenter_list = profitbricks.list_datacenters()
+ datacenter_id = _get_datacenter_id(datacenter_list, datacenter)
+ if not datacenter_id:
+ module.fail_json(msg='Virtual data center \'%s\' not found.' % str(datacenter))
+
+ # Prefetch server list for later comparison.
+ server_list = profitbricks.list_servers(datacenter_id)
+ for instance in instance_ids:
+ # Locate UUID of server if referenced by name.
+ server_id = _get_server_id(server_list, instance)
+ if server_id:
+ _startstop_machine(module, profitbricks, datacenter_id, server_id)
+ changed = True
+
+ if wait:
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ matched_instances = []
+ for res in profitbricks.list_servers(datacenter_id)['items']:
+ if state == 'running':
+ if res['properties']['vmState'].lower() == state:
+ matched_instances.append(res)
+ elif state == 'stopped':
+ if res['properties']['vmState'].lower() == 'shutoff':
+ matched_instances.append(res)
+
+ if len(matched_instances) < len(instance_ids):
+ time.sleep(5)
+ else:
+ break
+
+ if wait_timeout <= time.time():
+ # waiting took too long
+ module.fail_json(msg="wait for virtual machine state timeout on %s" % time.asctime())
+
+ return (changed)
+
+
+def _get_datacenter_id(datacenters, identity):
+ """
+ Fetch and return datacenter UUID by datacenter name if found.
+ """
+ for datacenter in datacenters['items']:
+ if identity in (datacenter['properties']['name'], datacenter['id']):
+ return datacenter['id']
+ return None
+
+
+def _get_server_id(servers, identity):
+ """
+ Fetch and return server UUID by server name if found.
+ """
+ for server in servers['items']:
+ if identity in (server['properties']['name'], server['id']):
+ return server['id']
+ return None
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ datacenter=dict(),
+ name=dict(),
+ image=dict(),
+ cores=dict(type='int', default=2),
+ ram=dict(type='int', default=2048),
+ cpu_family=dict(choices=['AMD_OPTERON', 'INTEL_XEON'],
+ default='AMD_OPTERON'),
+ volume_size=dict(type='int', default=10),
+ disk_type=dict(choices=['HDD', 'SSD'], default='HDD'),
+ image_password=dict(default=None, no_log=True),
+ ssh_keys=dict(type='list', default=[]),
+ bus=dict(choices=['VIRTIO', 'IDE'], default='VIRTIO'),
+ lan=dict(type='int', default=1),
+ count=dict(type='int', default=1),
+ auto_increment=dict(type='bool', default=True),
+ instance_ids=dict(type='list', default=[]),
+ subscription_user=dict(),
+ subscription_password=dict(no_log=True),
+ location=dict(choices=LOCATIONS, default='us/las'),
+ assign_public_ip=dict(type='bool', default=False),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ remove_boot_volume=dict(type='bool', default=True),
+ state=dict(default='present'),
+ )
+ )
+
+ if not HAS_PB_SDK:
+ module.fail_json(msg='profitbricks required for this module')
+
+ subscription_user = module.params.get('subscription_user')
+ subscription_password = module.params.get('subscription_password')
+
+ profitbricks = ProfitBricksService(
+ username=subscription_user,
+ password=subscription_password)
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('datacenter'):
+ module.fail_json(msg='datacenter parameter is required ' +
+ 'for running or stopping machines.')
+
+ try:
+ (changed) = remove_virtual_machine(module, profitbricks)
+ module.exit_json(changed=changed)
+ except Exception as e:
+ module.fail_json(msg='failed to set instance state: %s' % to_native(e), exception=traceback.format_exc())
+
+ elif state in ('running', 'stopped'):
+ if not module.params.get('datacenter'):
+ module.fail_json(msg='datacenter parameter is required for ' +
+ 'running or stopping machines.')
+ try:
+ (changed) = startstop_machine(module, profitbricks, state)
+ module.exit_json(changed=changed)
+ except Exception as e:
+ module.fail_json(msg='failed to set instance state: %s' % to_native(e), exception=traceback.format_exc())
+
+ elif state == 'present':
+ if not module.params.get('name'):
+ module.fail_json(msg='name parameter is required for new instance')
+ if not module.params.get('image'):
+ module.fail_json(msg='image parameter is required for new instance')
+ if not module.params.get('subscription_user'):
+ module.fail_json(msg='subscription_user parameter is ' +
+ 'required for new instance')
+ if not module.params.get('subscription_password'):
+ module.fail_json(msg='subscription_password parameter is ' +
+ 'required for new instance')
+
+ try:
+ (machine_dict_array) = create_virtual_machine(module, profitbricks)
+ module.exit_json(**machine_dict_array)
+ except Exception as e:
+ module.fail_json(msg='failed to set instance state: %s' % to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_datacenter.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_datacenter.py
new file mode 100644
index 00000000..e3ba1d49
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_datacenter.py
@@ -0,0 +1,257 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: profitbricks_datacenter
+short_description: Create or destroy a ProfitBricks Virtual Datacenter.
+description:
+ - This is a simple module that supports creating or removing vDCs. A vDC is required before you can create servers. This module has a dependency
+ on profitbricks >= 1.0.0
+options:
+ name:
+ description:
+ - The name of the virtual datacenter.
+ type: str
+ description:
+ description:
+ - The description of the virtual datacenter.
+ type: str
+ required: false
+ location:
+ description:
+ - The datacenter location.
+ type: str
+ required: false
+ default: us/las
+ choices: [ "us/las", "de/fra", "de/fkb" ]
+ subscription_user:
+ description:
+ - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
+ type: str
+ required: false
+ subscription_password:
+ description:
+ - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
+ type: str
+ required: false
+ wait:
+ description:
+ - wait for the datacenter to be created before returning
+ required: false
+ default: "yes"
+ type: bool
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ state:
+ description:
+ - Create or terminate datacenters.
+ - "The available choices are: C(present), C(absent)."
+ type: str
+ required: false
+ default: 'present'
+
+requirements: [ "profitbricks" ]
+author: Matt Baldwin (@baldwinSPC) <baldwin@stackpointcloud.com>
+'''
+
+EXAMPLES = '''
+- name: Create a datacenter
+ community.general.profitbricks_datacenter:
+ datacenter: Tardis One
+ wait_timeout: 500
+
+- name: Destroy a datacenter (remove all servers, volumes, and other objects in the datacenter)
+ community.general.profitbricks_datacenter:
+ datacenter: Tardis One
+ wait_timeout: 500
+ state: absent
+'''
+
+import re
+import time
+
+HAS_PB_SDK = True
+try:
+ from profitbricks.client import ProfitBricksService, Datacenter
+except ImportError:
+ HAS_PB_SDK = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+LOCATIONS = ['us/las',
+ 'de/fra',
+ 'de/fkb']
+
+uuid_match = re.compile(
+ r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
+
+
+def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
+ if not promise:
+ return
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(5)
+ operation_result = profitbricks.get_request(
+ request_id=promise['requestId'],
+ status=True)
+
+ if operation_result['metadata']['status'] == "DONE":
+ return
+ elif operation_result['metadata']['status'] == "FAILED":
+ raise Exception(
+ 'Request failed to complete ' + msg + ' "' + str(
+ promise['requestId']) + '" to complete.')
+
+ raise Exception(
+ 'Timed out waiting for async operation ' + msg + ' "' + str(
+ promise['requestId']
+ ) + '" to complete.')
+
+
+def _remove_datacenter(module, profitbricks, datacenter):
+ try:
+ profitbricks.delete_datacenter(datacenter)
+ except Exception as e:
+ module.fail_json(msg="failed to remove the datacenter: %s" % str(e))
+
+
+def create_datacenter(module, profitbricks):
+ """
+ Creates a Datacenter
+
+ This will create a new Datacenter in the specified location.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if a new datacenter was created, false otherwise
+ """
+ name = module.params.get('name')
+ location = module.params.get('location')
+ description = module.params.get('description')
+ wait = module.params.get('wait')
+ wait_timeout = int(module.params.get('wait_timeout'))
+
+ i = Datacenter(
+ name=name,
+ location=location,
+ description=description
+ )
+
+ try:
+ datacenter_response = profitbricks.create_datacenter(datacenter=i)
+
+ if wait:
+ _wait_for_completion(profitbricks, datacenter_response,
+ wait_timeout, "_create_datacenter")
+
+ results = {
+ 'datacenter_id': datacenter_response['id']
+ }
+
+ return results
+
+ except Exception as e:
+ module.fail_json(msg="failed to create the new datacenter: %s" % str(e))
+
+
+def remove_datacenter(module, profitbricks):
+ """
+ Removes a Datacenter.
+
+ This will remove a datacenter.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the datacenter was deleted, false otherwise
+ """
+ name = module.params.get('name')
+ changed = False
+
+ if(uuid_match.match(name)):
+ _remove_datacenter(module, profitbricks, name)
+ changed = True
+ else:
+ datacenters = profitbricks.list_datacenters()
+
+ for d in datacenters['items']:
+ vdc = profitbricks.get_datacenter(d['id'])
+
+ if name == vdc['properties']['name']:
+ name = d['id']
+ _remove_datacenter(module, profitbricks, name)
+ changed = True
+
+ return changed
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(),
+ description=dict(),
+ location=dict(choices=LOCATIONS, default='us/las'),
+ subscription_user=dict(),
+ subscription_password=dict(no_log=True),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(default=600, type='int'),
+ state=dict(default='present'), # @TODO add choices
+ )
+ )
+ if not HAS_PB_SDK:
+ module.fail_json(msg='profitbricks required for this module')
+
+ if not module.params.get('subscription_user'):
+ module.fail_json(msg='subscription_user parameter is required')
+ if not module.params.get('subscription_password'):
+ module.fail_json(msg='subscription_password parameter is required')
+
+ subscription_user = module.params.get('subscription_user')
+ subscription_password = module.params.get('subscription_password')
+
+ profitbricks = ProfitBricksService(
+ username=subscription_user,
+ password=subscription_password)
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('name'):
+ module.fail_json(msg='name parameter is required deleting a virtual datacenter.')
+
+ try:
+ (changed) = remove_datacenter(module, profitbricks)
+ module.exit_json(
+ changed=changed)
+ except Exception as e:
+ module.fail_json(msg='failed to set datacenter state: %s' % str(e))
+
+ elif state == 'present':
+ if not module.params.get('name'):
+ module.fail_json(msg='name parameter is required for a new datacenter')
+ if not module.params.get('location'):
+ module.fail_json(msg='location parameter is required for a new datacenter')
+
+ try:
+ (datacenter_dict_array) = create_datacenter(module, profitbricks)
+ module.exit_json(**datacenter_dict_array)
+ except Exception as e:
+ module.fail_json(msg='failed to set datacenter state: %s' % str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_nic.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_nic.py
new file mode 100644
index 00000000..49941241
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_nic.py
@@ -0,0 +1,288 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: profitbricks_nic
+short_description: Create or Remove a NIC.
+description:
+ - This module allows you to create or restore a volume snapshot. This module has a dependency on profitbricks >= 1.0.0
+options:
+ datacenter:
+ description:
+ - The datacenter in which to operate.
+ type: str
+ required: true
+ server:
+ description:
+ - The server name or ID.
+ type: str
+ required: true
+ name:
+ description:
+ - The name or ID of the NIC. This is only required on deletes, but not on create.
+ - If not specified, it defaults to a value based on UUID4.
+ type: str
+ lan:
+ description:
+ - The LAN to place the NIC on. You can pass a LAN that doesn't exist and it will be created. Required on create.
+ type: str
+ subscription_user:
+ description:
+ - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
+ type: str
+ required: true
+ subscription_password:
+ description:
+ - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
+ type: str
+ required: true
+ wait:
+ description:
+ - wait for the operation to complete before returning
+ required: false
+ default: "yes"
+ type: bool
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ state:
+ description:
+ - Indicate desired state of the resource
+ - "The available choices are: C(present), C(absent)."
+ type: str
+ required: false
+ default: 'present'
+
+requirements: [ "profitbricks" ]
+author: Matt Baldwin (@baldwinSPC) <baldwin@stackpointcloud.com>
+'''
+
+EXAMPLES = '''
+- name: Create a NIC
+ community.general.profitbricks_nic:
+ datacenter: Tardis One
+ server: node002
+ lan: 2
+ wait_timeout: 500
+ state: present
+
+- name: Remove a NIC
+ community.general.profitbricks_nic:
+ datacenter: Tardis One
+ server: node002
+ name: 7341c2454f
+ wait_timeout: 500
+ state: absent
+'''
+
+import re
+import uuid
+import time
+
+HAS_PB_SDK = True
+try:
+ from profitbricks.client import ProfitBricksService, NIC
+except ImportError:
+ HAS_PB_SDK = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+uuid_match = re.compile(
+ r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
+
+
+def _make_default_name():
+ return str(uuid.uuid4()).replace('-', '')[:10]
+
+
+def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
+ if not promise:
+ return
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(5)
+ operation_result = profitbricks.get_request(
+ request_id=promise['requestId'],
+ status=True)
+
+ if operation_result['metadata']['status'] == "DONE":
+ return
+ elif operation_result['metadata']['status'] == "FAILED":
+ raise Exception(
+ 'Request failed to complete ' + msg + ' "' + str(
+ promise['requestId']) + '" to complete.')
+
+ raise Exception(
+ 'Timed out waiting for async operation ' + msg + ' "' + str(
+ promise['requestId']
+ ) + '" to complete.')
+
+
+def create_nic(module, profitbricks):
+ """
+ Creates a NIC.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the nic creates, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ server = module.params.get('server')
+ lan = module.params.get('lan')
+ name = module.params.get('name')
+ if name is None:
+ name = _make_default_name()
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ # Locate UUID for Datacenter
+ if not (uuid_match.match(datacenter)):
+ datacenter_list = profitbricks.list_datacenters()
+ for d in datacenter_list['items']:
+ dc = profitbricks.get_datacenter(d['id'])
+ if datacenter == dc['properties']['name']:
+ datacenter = d['id']
+ break
+
+ # Locate UUID for Server
+ if not (uuid_match.match(server)):
+ server_list = profitbricks.list_servers(datacenter)
+ for s in server_list['items']:
+ if server == s['properties']['name']:
+ server = s['id']
+ break
+ try:
+ n = NIC(
+ name=name,
+ lan=lan
+ )
+
+ nic_response = profitbricks.create_nic(datacenter, server, n)
+
+ if wait:
+ _wait_for_completion(profitbricks, nic_response,
+ wait_timeout, "create_nic")
+
+ return nic_response
+
+ except Exception as e:
+ module.fail_json(msg="failed to create the NIC: %s" % str(e))
+
+
+def delete_nic(module, profitbricks):
+ """
+ Removes a NIC
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the NIC was removed, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ server = module.params.get('server')
+ name = module.params.get('name')
+ if name is None:
+ name = _make_default_name()
+
+ # Locate UUID for Datacenter
+ if not (uuid_match.match(datacenter)):
+ datacenter_list = profitbricks.list_datacenters()
+ for d in datacenter_list['items']:
+ dc = profitbricks.get_datacenter(d['id'])
+ if datacenter == dc['properties']['name']:
+ datacenter = d['id']
+ break
+
+ # Locate UUID for Server
+ server_found = False
+ if not (uuid_match.match(server)):
+ server_list = profitbricks.list_servers(datacenter)
+ for s in server_list['items']:
+ if server == s['properties']['name']:
+ server_found = True
+ server = s['id']
+ break
+
+ if not server_found:
+ return False
+
+ # Locate UUID for NIC
+ nic_found = False
+ if not (uuid_match.match(name)):
+ nic_list = profitbricks.list_nics(datacenter, server)
+ for n in nic_list['items']:
+ if name == n['properties']['name']:
+ nic_found = True
+ name = n['id']
+ break
+
+ if not nic_found:
+ return False
+
+ try:
+ nic_response = profitbricks.delete_nic(datacenter, server, name)
+ return nic_response
+ except Exception as e:
+ module.fail_json(msg="failed to remove the NIC: %s" % str(e))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ datacenter=dict(required=True),
+ server=dict(required=True),
+ name=dict(),
+ lan=dict(),
+ subscription_user=dict(required=True),
+ subscription_password=dict(required=True, no_log=True),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ state=dict(default='present'),
+ ),
+ required_if=(
+ ('state', 'absent', ['name']),
+ ('state', 'present', ['lan']),
+ )
+ )
+
+ if not HAS_PB_SDK:
+ module.fail_json(msg='profitbricks required for this module')
+
+ subscription_user = module.params.get('subscription_user')
+ subscription_password = module.params.get('subscription_password')
+
+ profitbricks = ProfitBricksService(
+ username=subscription_user,
+ password=subscription_password)
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ try:
+ (changed) = delete_nic(module, profitbricks)
+ module.exit_json(changed=changed)
+ except Exception as e:
+ module.fail_json(msg='failed to set nic state: %s' % str(e))
+
+ elif state == 'present':
+ try:
+ (nic_dict) = create_nic(module, profitbricks)
+ module.exit_json(nics=nic_dict) # @FIXME changed not calculated?
+ except Exception as e:
+ module.fail_json(msg='failed to set nic state: %s' % str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_volume.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_volume.py
new file mode 100644
index 00000000..a63cbcdd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_volume.py
@@ -0,0 +1,425 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: profitbricks_volume
+short_description: Create or destroy a volume.
+description:
+ - Allows you to create or remove a volume from a ProfitBricks datacenter. This module has a dependency on profitbricks >= 1.0.0
+options:
+ datacenter:
+ description:
+ - The datacenter in which to create the volumes.
+ type: str
+ name:
+ description:
+ - The name of the volumes. You can enumerate the names using auto_increment.
+ type: str
+ size:
+ description:
+ - The size of the volume.
+ type: int
+ required: false
+ default: 10
+ bus:
+ description:
+ - The bus type.
+ type: str
+ required: false
+ default: VIRTIO
+ choices: [ "IDE", "VIRTIO"]
+ image:
+ description:
+ - The system image ID for the volume, e.g. a3eae284-a2fe-11e4-b187-5f1f641608c8. This can also be a snapshot image ID.
+ type: str
+ image_password:
+ description:
+ - Password set for the administrative user.
+ type: str
+ required: false
+ ssh_keys:
+ description:
+ - Public SSH keys allowing access to the virtual machine.
+ type: list
+ required: false
+ disk_type:
+ description:
+ - The disk type of the volume.
+ type: str
+ required: false
+ default: HDD
+ choices: [ "HDD", "SSD" ]
+ licence_type:
+ description:
+ - The licence type for the volume. This is used when the image is non-standard.
+ - "The available choices are: C(LINUX), C(WINDOWS), C(UNKNOWN), C(OTHER)."
+ type: str
+ required: false
+ default: UNKNOWN
+ count:
+ description:
+ - The number of volumes you wish to create.
+ type: int
+ required: false
+ default: 1
+ auto_increment:
+ description:
+ - Whether or not to increment a single number in the name for created virtual machines.
+ default: yes
+ type: bool
+ instance_ids:
+ description:
+ - list of instance ids, currently only used when state='absent' to remove instances.
+ type: list
+ required: false
+ subscription_user:
+ description:
+ - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
+ type: str
+ required: false
+ subscription_password:
+ description:
+ - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
+ type: str
+ required: false
+ wait:
+ description:
+ - wait for the datacenter to be created before returning
+ required: false
+ default: "yes"
+ type: bool
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ state:
+ description:
+ - create or terminate datacenters
+ - "The available choices are: C(present), C(absent)."
+ type: str
+ required: false
+ default: 'present'
+
+requirements: [ "profitbricks" ]
+author: Matt Baldwin (@baldwinSPC) <baldwin@stackpointcloud.com>
+'''
+
+EXAMPLES = '''
+- name: Create multiple volumes
+ community.general.profitbricks_volume:
+ datacenter: Tardis One
+ name: vol%02d
+ count: 5
+ auto_increment: yes
+ wait_timeout: 500
+ state: present
+
+- name: Remove Volumes
+ community.general.profitbricks_volume:
+ datacenter: Tardis One
+ instance_ids:
+ - 'vol01'
+ - 'vol02'
+ wait_timeout: 500
+ state: absent
+'''
+
+import re
+import time
+import traceback
+
+HAS_PB_SDK = True
+try:
+ from profitbricks.client import ProfitBricksService, Volume
+except ImportError:
+ HAS_PB_SDK = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import xrange
+from ansible.module_utils._text import to_native
+
+
+uuid_match = re.compile(
+ r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
+
+
+def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
+ if not promise:
+ return
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(5)
+ operation_result = profitbricks.get_request(
+ request_id=promise['requestId'],
+ status=True)
+
+ if operation_result['metadata']['status'] == "DONE":
+ return
+ elif operation_result['metadata']['status'] == "FAILED":
+ raise Exception(
+ 'Request failed to complete ' + msg + ' "' + str(
+ promise['requestId']) + '" to complete.')
+
+ raise Exception(
+ 'Timed out waiting for async operation ' + msg + ' "' + str(
+ promise['requestId']
+ ) + '" to complete.')
+
+
+def _create_volume(module, profitbricks, datacenter, name):
+ size = module.params.get('size')
+ bus = module.params.get('bus')
+ image = module.params.get('image')
+ image_password = module.params.get('image_password')
+ ssh_keys = module.params.get('ssh_keys')
+ disk_type = module.params.get('disk_type')
+ licence_type = module.params.get('licence_type')
+ wait_timeout = module.params.get('wait_timeout')
+ wait = module.params.get('wait')
+
+ try:
+ v = Volume(
+ name=name,
+ size=size,
+ bus=bus,
+ image=image,
+ image_password=image_password,
+ ssh_keys=ssh_keys,
+ disk_type=disk_type,
+ licence_type=licence_type
+ )
+
+ volume_response = profitbricks.create_volume(datacenter, v)
+
+ if wait:
+ _wait_for_completion(profitbricks, volume_response,
+ wait_timeout, "_create_volume")
+
+ except Exception as e:
+ module.fail_json(msg="failed to create the volume: %s" % str(e))
+
+ return volume_response
+
+
+def _delete_volume(module, profitbricks, datacenter, volume):
+ try:
+ profitbricks.delete_volume(datacenter, volume)
+ except Exception as e:
+ module.fail_json(msg="failed to remove the volume: %s" % str(e))
+
+
+def create_volume(module, profitbricks):
+ """
+ Creates a volume.
+
+ This will create a volume in a datacenter.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the volume was created, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ name = module.params.get('name')
+ auto_increment = module.params.get('auto_increment')
+ count = module.params.get('count')
+
+ datacenter_found = False
+ failed = True
+ volumes = []
+
+ # Locate UUID for Datacenter
+ if not (uuid_match.match(datacenter)):
+ datacenter_list = profitbricks.list_datacenters()
+ for d in datacenter_list['items']:
+ dc = profitbricks.get_datacenter(d['id'])
+ if datacenter == dc['properties']['name']:
+ datacenter = d['id']
+ datacenter_found = True
+ break
+
+ if not datacenter_found:
+ module.fail_json(msg='datacenter could not be found.')
+
+ if auto_increment:
+ numbers = set()
+ count_offset = 1
+
+ try:
+ name % 0
+ except TypeError as e:
+ if e.message.startswith('not all'):
+ name = '%s%%d' % name
+ else:
+ module.fail_json(msg=e.message, exception=traceback.format_exc())
+
+ number_range = xrange(count_offset, count_offset + count + len(numbers))
+ available_numbers = list(set(number_range).difference(numbers))
+ names = []
+ numbers_to_use = available_numbers[:count]
+ for number in numbers_to_use:
+ names.append(name % number)
+ else:
+ names = [name] * count
+
+ for name in names:
+ create_response = _create_volume(module, profitbricks, str(datacenter), name)
+ volumes.append(create_response)
+ _attach_volume(module, profitbricks, datacenter, create_response['id'])
+ failed = False
+
+ results = {
+ 'failed': failed,
+ 'volumes': volumes,
+ 'action': 'create',
+ 'instance_ids': {
+ 'instances': [i['id'] for i in volumes],
+ }
+ }
+
+ return results
+
+
+def delete_volume(module, profitbricks):
+ """
+ Removes a volume.
+
+ This will create a volume in a datacenter.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the volume was removed, false otherwise
+ """
+ if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1:
+ module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting')
+
+ datacenter = module.params.get('datacenter')
+ changed = False
+ instance_ids = module.params.get('instance_ids')
+
+ # Locate UUID for Datacenter
+ if not (uuid_match.match(datacenter)):
+ datacenter_list = profitbricks.list_datacenters()
+ for d in datacenter_list['items']:
+ dc = profitbricks.get_datacenter(d['id'])
+ if datacenter == dc['properties']['name']:
+ datacenter = d['id']
+ break
+
+ for n in instance_ids:
+ if(uuid_match.match(n)):
+ _delete_volume(module, profitbricks, datacenter, n)
+ changed = True
+ else:
+ volumes = profitbricks.list_volumes(datacenter)
+ for v in volumes['items']:
+ if n == v['properties']['name']:
+ volume_id = v['id']
+ _delete_volume(module, profitbricks, datacenter, volume_id)
+ changed = True
+
+ return changed
+
+
+def _attach_volume(module, profitbricks, datacenter, volume):
+ """
+ Attaches a volume.
+
+ This will attach a volume to the server.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the volume was attached, false otherwise
+ """
+ server = module.params.get('server')
+
+ # Locate UUID for Server
+ if server:
+ if not (uuid_match.match(server)):
+ server_list = profitbricks.list_servers(datacenter)
+ for s in server_list['items']:
+ if server == s['properties']['name']:
+ server = s['id']
+ break
+
+ try:
+ return profitbricks.attach_volume(datacenter, server, volume)
+ except Exception as e:
+ module.fail_json(msg='failed to attach volume: %s' % to_native(e), exception=traceback.format_exc())
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ datacenter=dict(),
+ server=dict(),
+ name=dict(),
+ size=dict(type='int', default=10),
+ bus=dict(choices=['VIRTIO', 'IDE'], default='VIRTIO'),
+ image=dict(),
+ image_password=dict(default=None, no_log=True),
+ ssh_keys=dict(type='list', default=[]),
+ disk_type=dict(choices=['HDD', 'SSD'], default='HDD'),
+ licence_type=dict(default='UNKNOWN'),
+ count=dict(type='int', default=1),
+ auto_increment=dict(type='bool', default=True),
+ instance_ids=dict(type='list', default=[]),
+ subscription_user=dict(),
+ subscription_password=dict(no_log=True),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ state=dict(default='present'),
+ )
+ )
+
+ if not module.params.get('subscription_user'):
+ module.fail_json(msg='subscription_user parameter is required')
+ if not module.params.get('subscription_password'):
+ module.fail_json(msg='subscription_password parameter is required')
+
+ subscription_user = module.params.get('subscription_user')
+ subscription_password = module.params.get('subscription_password')
+
+ profitbricks = ProfitBricksService(
+ username=subscription_user,
+ password=subscription_password)
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('datacenter'):
+ module.fail_json(msg='datacenter parameter is required for running or stopping machines.')
+
+ try:
+ (changed) = delete_volume(module, profitbricks)
+ module.exit_json(changed=changed)
+ except Exception as e:
+ module.fail_json(msg='failed to set volume state: %s' % to_native(e), exception=traceback.format_exc())
+
+ elif state == 'present':
+ if not module.params.get('datacenter'):
+ module.fail_json(msg='datacenter parameter is required for new instance')
+ if not module.params.get('name'):
+ module.fail_json(msg='name parameter is required for new instance')
+
+ try:
+ (volume_dict_array) = create_volume(module, profitbricks)
+ module.exit_json(**volume_dict_array)
+ except Exception as e:
+ module.fail_json(msg='failed to set volume state: %s' % to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_volume_attachments.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_volume_attachments.py
new file mode 100644
index 00000000..72f03e67
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_volume_attachments.py
@@ -0,0 +1,258 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: profitbricks_volume_attachments
+short_description: Attach or detach a volume.
+description:
+ - Allows you to attach or detach a volume from a ProfitBricks server. This module has a dependency on profitbricks >= 1.0.0
+options:
+ datacenter:
+ description:
+ - The datacenter in which to operate.
+ type: str
+ server:
+ description:
+ - The name of the server you wish to detach or attach the volume.
+ type: str
+ volume:
+ description:
+ - The volume name or ID.
+ type: str
+ subscription_user:
+ description:
+ - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
+ type: str
+ required: false
+ subscription_password:
+ description:
+ - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
+ type: str
+ required: false
+ wait:
+ description:
+ - wait for the operation to complete before returning
+ required: false
+ default: "yes"
+ type: bool
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ state:
+ description:
+ - Indicate desired state of the resource
+ - "The available choices are: C(present), C(absent)."
+ type: str
+ required: false
+ default: 'present'
+
+requirements: [ "profitbricks" ]
+author: Matt Baldwin (@baldwinSPC) <baldwin@stackpointcloud.com>
+'''
+
+EXAMPLES = '''
+- name: Attach a volume
+ community.general.profitbricks_volume_attachments:
+ datacenter: Tardis One
+ server: node002
+ volume: vol01
+ wait_timeout: 500
+ state: present
+
+- name: Detach a volume
+ community.general.profitbricks_volume_attachments:
+ datacenter: Tardis One
+ server: node002
+ volume: vol01
+ wait_timeout: 500
+ state: absent
+'''
+
+import re
+import time
+
+HAS_PB_SDK = True
+try:
+ from profitbricks.client import ProfitBricksService
+except ImportError:
+ HAS_PB_SDK = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+uuid_match = re.compile(
+ r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
+
+
+def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
+ if not promise:
+ return
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(5)
+ operation_result = profitbricks.get_request(
+ request_id=promise['requestId'],
+ status=True)
+
+ if operation_result['metadata']['status'] == "DONE":
+ return
+ elif operation_result['metadata']['status'] == "FAILED":
+ raise Exception(
+ 'Request failed to complete ' + msg + ' "' + str(
+ promise['requestId']) + '" to complete.')
+
+ raise Exception(
+ 'Timed out waiting for async operation ' + msg + ' "' + str(
+ promise['requestId']
+ ) + '" to complete.')
+
+
+def attach_volume(module, profitbricks):
+ """
+ Attaches a volume.
+
+ This will attach a volume to the server.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the volume was attached, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ server = module.params.get('server')
+ volume = module.params.get('volume')
+
+ # Locate UUID for Datacenter
+ if not (uuid_match.match(datacenter)):
+ datacenter_list = profitbricks.list_datacenters()
+ for d in datacenter_list['items']:
+ dc = profitbricks.get_datacenter(d['id'])
+ if datacenter == dc['properties']['name']:
+ datacenter = d['id']
+ break
+
+ # Locate UUID for Server
+ if not (uuid_match.match(server)):
+ server_list = profitbricks.list_servers(datacenter)
+ for s in server_list['items']:
+ if server == s['properties']['name']:
+ server = s['id']
+ break
+
+ # Locate UUID for Volume
+ if not (uuid_match.match(volume)):
+ volume_list = profitbricks.list_volumes(datacenter)
+ for v in volume_list['items']:
+ if volume == v['properties']['name']:
+ volume = v['id']
+ break
+
+ return profitbricks.attach_volume(datacenter, server, volume)
+
+
+def detach_volume(module, profitbricks):
+ """
+ Detaches a volume.
+
+ This will remove a volume from the server.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the volume was detached, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ server = module.params.get('server')
+ volume = module.params.get('volume')
+
+ # Locate UUID for Datacenter
+ if not (uuid_match.match(datacenter)):
+ datacenter_list = profitbricks.list_datacenters()
+ for d in datacenter_list['items']:
+ dc = profitbricks.get_datacenter(d['id'])
+ if datacenter == dc['properties']['name']:
+ datacenter = d['id']
+ break
+
+ # Locate UUID for Server
+ if not (uuid_match.match(server)):
+ server_list = profitbricks.list_servers(datacenter)
+ for s in server_list['items']:
+ if server == s['properties']['name']:
+ server = s['id']
+ break
+
+ # Locate UUID for Volume
+ if not (uuid_match.match(volume)):
+ volume_list = profitbricks.list_volumes(datacenter)
+ for v in volume_list['items']:
+ if volume == v['properties']['name']:
+ volume = v['id']
+ break
+
+ return profitbricks.detach_volume(datacenter, server, volume)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ datacenter=dict(),
+ server=dict(),
+ volume=dict(),
+ subscription_user=dict(),
+ subscription_password=dict(no_log=True),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ state=dict(default='present'),
+ )
+ )
+
+ if not HAS_PB_SDK:
+ module.fail_json(msg='profitbricks required for this module')
+
+ if not module.params.get('subscription_user'):
+ module.fail_json(msg='subscription_user parameter is required')
+ if not module.params.get('subscription_password'):
+ module.fail_json(msg='subscription_password parameter is required')
+ if not module.params.get('datacenter'):
+ module.fail_json(msg='datacenter parameter is required')
+ if not module.params.get('server'):
+ module.fail_json(msg='server parameter is required')
+ if not module.params.get('volume'):
+ module.fail_json(msg='volume parameter is required')
+
+ subscription_user = module.params.get('subscription_user')
+ subscription_password = module.params.get('subscription_password')
+
+ profitbricks = ProfitBricksService(
+ username=subscription_user,
+ password=subscription_password)
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ try:
+ (changed) = detach_volume(module, profitbricks)
+ module.exit_json(changed=changed)
+ except Exception as e:
+ module.fail_json(msg='failed to set volume_attach state: %s' % str(e))
+ elif state == 'present':
+ try:
+ attach_volume(module, profitbricks)
+ module.exit_json()
+ except Exception as e:
+ module.fail_json(msg='failed to set volume_attach state: %s' % str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/pubnub/pubnub_blocks.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/pubnub/pubnub_blocks.py
new file mode 100644
index 00000000..8d9374a8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/pubnub/pubnub_blocks.py
@@ -0,0 +1,626 @@
+#!/usr/bin/python
+#
+# PubNub Real-time Cloud-Hosted Push API and Push Notification Client
+# Frameworks
+# Copyright (C) 2016 PubNub Inc.
+# http://www.pubnub.com/
+# http://www.pubnub.com/terms
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: pubnub_blocks
+short_description: PubNub blocks management module.
+description:
+ - "This module allows Ansible to interface with the PubNub BLOCKS
+ infrastructure by providing the following operations: create / remove,
+ start / stop and rename for blocks and create / modify / remove for event
+ handlers"
+author:
+ - PubNub <support@pubnub.com> (@pubnub)
+ - Sergey Mamontov <sergey@pubnub.com> (@parfeon)
+requirements:
+ - "python >= 2.7"
+ - "pubnub_blocks_client >= 1.0"
+options:
+ email:
+ description:
+ - Email from account for which new session should be started.
+ - "Not required if C(cache) contains result of previous module call (in
+ same play)."
+ required: false
+ type: str
+ password:
+ description:
+ - Password which match to account to which specified C(email) belong.
+ - "Not required if C(cache) contains result of previous module call (in
+ same play)."
+ required: false
+ type: str
+ cache:
+ description: >
+ In case if single play use blocks management module few times it is
+ preferred to enabled 'caching' by making previous module to share
+ gathered artifacts and pass them to this parameter.
+ required: false
+ type: dict
+ default: {}
+ account:
+ description:
+ - "Name of PubNub account for from which C(application) will be used to
+ manage blocks."
+ - "User's account will be used if value not set or empty."
+ type: str
+ required: false
+ application:
+ description:
+ - "Name of target PubNub application for which blocks configuration on
+ specific C(keyset) will be done."
+ type: str
+ required: true
+ keyset:
+ description:
+ - Name of application's keys set which is bound to managed blocks.
+ type: str
+ required: true
+ state:
+ description:
+ - "Intended block state after event handlers creation / update process
+ will be completed."
+ required: false
+ default: 'present'
+ choices: ['started', 'stopped', 'present', 'absent']
+ type: str
+ name:
+ description:
+ - Name of managed block which will be later visible on admin.pubnub.com.
+ required: true
+ type: str
+ description:
+ description:
+ - Short block description which will be later visible on
+ admin.pubnub.com. Used only if block doesn't exists and won't change
+ description for existing block.
+ required: false
+ type: str
+ event_handlers:
+ description:
+ - "List of event handlers which should be updated for specified block
+ C(name)."
+ - "Each entry for new event handler should contain: C(name), C(src),
+ C(channels), C(event). C(name) used as event handler name which can be
+ used later to make changes to it."
+ - C(src) is full path to file with event handler code.
+ - "C(channels) is name of channel from which event handler is waiting
+ for events."
+ - "C(event) is type of event which is able to trigger event handler:
+ I(js-before-publish), I(js-after-publish), I(js-after-presence)."
+ - "Each entry for existing handlers should contain C(name) (so target
+ handler can be identified). Rest parameters (C(src), C(channels) and
+ C(event)) can be added if changes required for them."
+ - "It is possible to rename event handler by adding C(changes) key to
+ event handler payload and pass dictionary, which will contain single key
+ C(name), where new name should be passed."
+ - "To remove particular event handler it is possible to set C(state) for
+ it to C(absent) and it will be removed."
+ required: false
+ default: []
+ type: list
+ changes:
+ description:
+ - "List of fields which should be changed by block itself (doesn't
+ affect any event handlers)."
+ - "Possible options for change is: C(name)."
+ required: false
+ default: {}
+ type: dict
+ validate_certs:
+ description:
+ - "This key allow to try skip certificates check when performing REST API
+ calls. Sometimes host may have issues with certificates on it and this
+ will cause problems to call PubNub REST API."
+ - If check should be ignored C(False) should be passed to this parameter.
+ required: false
+ default: true
+ type: bool
+'''
+
+EXAMPLES = '''
+# Event handler create example.
+- name: Create single event handler
+ community.general.pubnub_blocks:
+ email: '{{ email }}'
+ password: '{{ password }}'
+ application: '{{ app_name }}'
+ keyset: '{{ keyset_name }}'
+ name: '{{ block_name }}'
+ event_handlers:
+ -
+ src: '{{ path_to_handler_source }}'
+ name: '{{ handler_name }}'
+ event: 'js-before-publish'
+ channels: '{{ handler_channel }}'
+
+# Change event handler trigger event type.
+- name: Change event handler 'event'
+ community.general.pubnub_blocks:
+ email: '{{ email }}'
+ password: '{{ password }}'
+ application: '{{ app_name }}'
+ keyset: '{{ keyset_name }}'
+ name: '{{ block_name }}'
+ event_handlers:
+ -
+ name: '{{ handler_name }}'
+ event: 'js-after-publish'
+
+# Stop block and event handlers.
+- name: Stopping block
+ community.general.pubnub_blocks:
+ email: '{{ email }}'
+ password: '{{ password }}'
+ application: '{{ app_name }}'
+ keyset: '{{ keyset_name }}'
+ name: '{{ block_name }}'
+ state: stop
+
+# Multiple module calls with cached result passing
+- name: Create '{{ block_name }}' block
+ register: module_cache
+ community.general.pubnub_blocks:
+ email: '{{ email }}'
+ password: '{{ password }}'
+ application: '{{ app_name }}'
+ keyset: '{{ keyset_name }}'
+ name: '{{ block_name }}'
+ state: present
+- name: Add '{{ event_handler_1_name }}' handler to '{{ block_name }}'
+ register: module_cache
+ community.general.pubnub_blocks:
+ cache: '{{ module_cache }}'
+ application: '{{ app_name }}'
+ keyset: '{{ keyset_name }}'
+ name: '{{ block_name }}'
+ state: present
+ event_handlers:
+ -
+ src: '{{ path_to_handler_1_source }}'
+ name: '{{ event_handler_1_name }}'
+ channels: '{{ event_handler_1_channel }}'
+ event: 'js-before-publish'
+- name: Add '{{ event_handler_2_name }}' handler to '{{ block_name }}'
+ register: module_cache
+ community.general.pubnub_blocks:
+ cache: '{{ module_cache }}'
+ application: '{{ app_name }}'
+ keyset: '{{ keyset_name }}'
+ name: '{{ block_name }}'
+ state: present
+ event_handlers:
+ -
+ src: '{{ path_to_handler_2_source }}'
+ name: '{{ event_handler_2_name }}'
+ channels: '{{ event_handler_2_channel }}'
+ event: 'js-before-publish'
+- name: Start '{{ block_name }}' block
+ register: module_cache
+ community.general.pubnub_blocks:
+ cache: '{{ module_cache }}'
+ application: '{{ app_name }}'
+ keyset: '{{ keyset_name }}'
+ name: '{{ block_name }}'
+ state: started
+'''
+
+RETURN = '''
+module_cache:
+ description: "Cached account information. In case if with single play module
+ used few times it is better to pass cached data to next module calls to speed
+ up process."
+ type: dict
+ returned: always
+'''
+import copy
+import os
+
+try:
+ # Import PubNub BLOCKS client.
+ from pubnub_blocks_client import User, Account, Owner, Application, Keyset
+ from pubnub_blocks_client import Block, EventHandler
+ from pubnub_blocks_client import exceptions
+ HAS_PUBNUB_BLOCKS_CLIENT = True
+except ImportError:
+ HAS_PUBNUB_BLOCKS_CLIENT = False
+ User = None
+ Account = None
+ Owner = None
+ Application = None
+ Keyset = None
+ Block = None
+ EventHandler = None
+ exceptions = None
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_text
+
+
+def pubnub_user(module):
+ """Create and configure user model if it possible.
+
+ :type module: AnsibleModule
+ :param module: Reference on module which contain module launch
+ information and status report methods.
+
+ :rtype: User
+ :return: Reference on initialized and ready to use user or 'None' in
+ case if not all required information has been passed to block.
+ """
+ user = None
+ params = module.params
+
+ if params.get('cache') and params['cache'].get('module_cache'):
+ cache = params['cache']['module_cache']
+ user = User()
+ user.restore(cache=copy.deepcopy(cache['pnm_user']))
+ elif params.get('email') and params.get('password'):
+ user = User(email=params.get('email'), password=params.get('password'))
+ else:
+ err_msg = 'It looks like not account credentials has been passed or ' \
+ '\'cache\' field doesn\'t have result of previous module ' \
+ 'call.'
+ module.fail_json(msg='Missing account credentials.',
+ description=err_msg, changed=False)
+
+ return user
+
+
+def pubnub_account(module, user):
+ """Create and configure account if it is possible.
+
+ :type module: AnsibleModule
+ :param module: Reference on module which contain module launch
+ information and status report methods.
+ :type user: User
+ :param user: Reference on authorized user for which one of accounts
+ should be used during manipulations with block.
+
+ :rtype: Account
+ :return: Reference on initialized and ready to use account or 'None' in
+ case if not all required information has been passed to block.
+ """
+ params = module.params
+ if params.get('account'):
+ account_name = params.get('account')
+ account = user.account(name=params.get('account'))
+ if account is None:
+ err_frmt = 'It looks like there is no \'{0}\' account for ' \
+ 'authorized user. Please make sure what correct ' \
+ 'name has been passed during module configuration.'
+ module.fail_json(msg='Missing account.',
+ description=err_frmt.format(account_name),
+ changed=False)
+ else:
+ account = user.accounts()[0]
+
+ return account
+
+
+def pubnub_application(module, account):
+ """Retrieve reference on target application from account model.
+
+ NOTE: In case if account authorization will fail or there is no
+ application with specified name, module will exit with error.
+ :type module: AnsibleModule
+ :param module: Reference on module which contain module launch
+ information and status report methods.
+ :type account: Account
+ :param account: Reference on PubNub account model from which reference
+ on application should be fetched.
+
+ :rtype: Application
+ :return: Reference on initialized and ready to use application model.
+ """
+ application = None
+ params = module.params
+ try:
+ application = account.application(params['application'])
+ except (exceptions.AccountError, exceptions.GeneralPubNubError) as exc:
+ exc_msg = _failure_title_from_exception(exc)
+ exc_descr = exc.message if hasattr(exc, 'message') else exc.args[0]
+ module.fail_json(msg=exc_msg, description=exc_descr,
+ changed=account.changed,
+ module_cache=dict(account))
+
+ if application is None:
+ err_fmt = 'There is no \'{0}\' application for {1}. Make sure what ' \
+ 'correct application name has been passed. If application ' \
+ 'doesn\'t exist you can create it on admin.pubnub.com.'
+ email = account.owner.email
+ module.fail_json(msg=err_fmt.format(params['application'], email),
+ changed=account.changed, module_cache=dict(account))
+
+ return application
+
+
+def pubnub_keyset(module, account, application):
+ """Retrieve reference on target keyset from application model.
+
+ NOTE: In case if there is no keyset with specified name, module will
+ exit with error.
+ :type module: AnsibleModule
+ :param module: Reference on module which contain module launch
+ information and status report methods.
+ :type account: Account
+ :param account: Reference on PubNub account model which will be
+ used in case of error to export cached data.
+ :type application: Application
+ :param application: Reference on PubNub application model from which
+ reference on keyset should be fetched.
+
+ :rtype: Keyset
+ :return: Reference on initialized and ready to use keyset model.
+ """
+ params = module.params
+ keyset = application.keyset(params['keyset'])
+ if keyset is None:
+ err_fmt = 'There is no \'{0}\' keyset for \'{1}\' application. Make ' \
+ 'sure what correct keyset name has been passed. If keyset ' \
+ 'doesn\'t exist you can create it on admin.pubnub.com.'
+ module.fail_json(msg=err_fmt.format(params['keyset'],
+ application.name),
+ changed=account.changed, module_cache=dict(account))
+
+ return keyset
+
+
+def pubnub_block(module, account, keyset):
+ """Retrieve reference on target keyset from application model.
+
+ NOTE: In case if there is no block with specified name and module
+ configured to start/stop it, module will exit with error.
+ :type module: AnsibleModule
+ :param module: Reference on module which contain module launch
+ information and status report methods.
+ :type account: Account
+ :param account: Reference on PubNub account model which will be used in
+ case of error to export cached data.
+ :type keyset: Keyset
+ :param keyset: Reference on keyset model from which reference on block
+ should be fetched.
+
+ :rtype: Block
+ :return: Reference on initialized and ready to use keyset model.
+ """
+ block = None
+ params = module.params
+ try:
+ block = keyset.block(params['name'])
+ except (exceptions.KeysetError, exceptions.GeneralPubNubError) as exc:
+ exc_msg = _failure_title_from_exception(exc)
+ exc_descr = exc.message if hasattr(exc, 'message') else exc.args[0]
+ module.fail_json(msg=exc_msg, description=exc_descr,
+ changed=account.changed, module_cache=dict(account))
+
+ # Report error because block doesn't exists and at the same time
+ # requested to start/stop.
+ if block is None and params['state'] in ['started', 'stopped']:
+ block_name = params.get('name')
+ module.fail_json(msg="'{0}' block doesn't exists.".format(block_name),
+ changed=account.changed, module_cache=dict(account))
+
+ if block is None and params['state'] == 'present':
+ block = Block(name=params.get('name'),
+ description=params.get('description'))
+ keyset.add_block(block)
+
+ if block:
+ # Update block information if required.
+ if params.get('changes') and params['changes'].get('name'):
+ block.name = params['changes']['name']
+ if params.get('description'):
+ block.description = params.get('description')
+
+ return block
+
+
+def pubnub_event_handler(block, data):
+ """Retrieve reference on target event handler from application model.
+
+ :type block: Block
+ :param block: Reference on block model from which reference on event
+ handlers should be fetched.
+ :type data: dict
+ :param data: Reference on dictionary which contain information about
+ event handler and whether it should be created or not.
+
+ :rtype: EventHandler
+ :return: Reference on initialized and ready to use event handler model.
+ 'None' will be returned in case if there is no handler with
+ specified name and no request to create it.
+ """
+ event_handler = block.event_handler(data['name'])
+
+ # Prepare payload for event handler update.
+ changed_name = (data.pop('changes').get('name')
+ if 'changes' in data else None)
+ name = data.get('name') or changed_name
+ channels = data.get('channels')
+ event = data.get('event')
+ code = _content_of_file_at_path(data.get('src'))
+ state = data.get('state') or 'present'
+
+ # Create event handler if required.
+ if event_handler is None and state == 'present':
+ event_handler = EventHandler(name=name, channels=channels, event=event,
+ code=code)
+ block.add_event_handler(event_handler)
+
+ # Update event handler if required.
+ if event_handler is not None and state == 'present':
+ if name is not None:
+ event_handler.name = name
+ if channels is not None:
+ event_handler.channels = channels
+ if event is not None:
+ event_handler.event = event
+ if code is not None:
+ event_handler.code = code
+
+ return event_handler
+
+
+def _failure_title_from_exception(exception):
+ """Compose human-readable title for module error title.
+
+ Title will be based on status codes if they has been provided.
+ :type exception: exceptions.GeneralPubNubError
+ :param exception: Reference on exception for which title should be
+ composed.
+
+ :rtype: str
+ :return: Reference on error tile which should be shown on module
+ failure.
+ """
+ title = 'General REST API access error.'
+ if exception.code == exceptions.PN_AUTHORIZATION_MISSING_CREDENTIALS:
+ title = 'Authorization error: missing credentials.'
+ elif exception.code == exceptions.PN_AUTHORIZATION_WRONG_CREDENTIALS:
+ title = 'Authorization error: wrong credentials.'
+ elif exception.code == exceptions.PN_USER_INSUFFICIENT_RIGHTS:
+ title = 'API access error: insufficient access rights.'
+ elif exception.code == exceptions.PN_API_ACCESS_TOKEN_EXPIRED:
+ title = 'API access error: time token expired.'
+ elif exception.code == exceptions.PN_KEYSET_BLOCK_EXISTS:
+ title = 'Block create did fail: block with same name already exists).'
+ elif exception.code == exceptions.PN_KEYSET_BLOCKS_FETCH_DID_FAIL:
+ title = 'Unable fetch list of blocks for keyset.'
+ elif exception.code == exceptions.PN_BLOCK_CREATE_DID_FAIL:
+ title = 'Block creation did fail.'
+ elif exception.code == exceptions.PN_BLOCK_UPDATE_DID_FAIL:
+ title = 'Block update did fail.'
+ elif exception.code == exceptions.PN_BLOCK_REMOVE_DID_FAIL:
+ title = 'Block removal did fail.'
+ elif exception.code == exceptions.PN_BLOCK_START_STOP_DID_FAIL:
+ title = 'Block start/stop did fail.'
+ elif exception.code == exceptions.PN_EVENT_HANDLER_MISSING_FIELDS:
+ title = 'Event handler creation did fail: missing fields.'
+ elif exception.code == exceptions.PN_BLOCK_EVENT_HANDLER_EXISTS:
+ title = 'Event handler creation did fail: missing fields.'
+ elif exception.code == exceptions.PN_EVENT_HANDLER_CREATE_DID_FAIL:
+ title = 'Event handler creation did fail.'
+ elif exception.code == exceptions.PN_EVENT_HANDLER_UPDATE_DID_FAIL:
+ title = 'Event handler update did fail.'
+ elif exception.code == exceptions.PN_EVENT_HANDLER_REMOVE_DID_FAIL:
+ title = 'Event handler removal did fail.'
+
+ return title
+
+
+def _content_of_file_at_path(path):
+ """Read file content.
+
+ Try read content of file at specified path.
+ :type path: str
+ :param path: Full path to location of file which should be read'ed.
+ :rtype: content
+ :return: File content or 'None'
+ """
+ content = None
+ if path and os.path.exists(path):
+ with open(path, mode="rt") as opened_file:
+ b_content = opened_file.read()
+ try:
+ content = to_text(b_content, errors='surrogate_or_strict')
+ except UnicodeError:
+ pass
+
+ return content
+
+
+def main():
+ fields = dict(
+ email=dict(default='', required=False, type='str'),
+ password=dict(default='', required=False, type='str', no_log=True),
+ account=dict(default='', required=False, type='str'),
+ application=dict(required=True, type='str'),
+ keyset=dict(required=True, type='str'),
+ state=dict(default='present', type='str',
+ choices=['started', 'stopped', 'present', 'absent']),
+ name=dict(required=True, type='str'), description=dict(type='str'),
+ event_handlers=dict(default=list(), type='list'),
+ changes=dict(default=dict(), type='dict'),
+ cache=dict(default=dict(), type='dict'),
+ validate_certs=dict(default=True, type='bool'))
+ module = AnsibleModule(argument_spec=fields, supports_check_mode=True)
+
+ if not HAS_PUBNUB_BLOCKS_CLIENT:
+ module.fail_json(msg='pubnub_blocks_client required for this module.')
+
+ params = module.params
+
+ # Authorize user.
+ user = pubnub_user(module)
+ # Initialize PubNub account instance.
+ account = pubnub_account(module, user=user)
+ # Try fetch application with which module should work.
+ application = pubnub_application(module, account=account)
+ # Try fetch keyset with which module should work.
+ keyset = pubnub_keyset(module, account=account, application=application)
+ # Try fetch block with which module should work.
+ block = pubnub_block(module, account=account, keyset=keyset)
+ is_new_block = block is not None and block.uid == -1
+
+ # Check whether block should be removed or not.
+ if block is not None and params['state'] == 'absent':
+ keyset.remove_block(block)
+ block = None
+
+ if block is not None:
+ # Update block information if required.
+ if params.get('changes') and params['changes'].get('name'):
+ block.name = params['changes']['name']
+
+ # Process event changes to event handlers.
+ for event_handler_data in params.get('event_handlers') or list():
+ state = event_handler_data.get('state') or 'present'
+ event_handler = pubnub_event_handler(data=event_handler_data,
+ block=block)
+ if state == 'absent' and event_handler:
+ block.delete_event_handler(event_handler)
+
+ # Update block operation state if required.
+ if block and not is_new_block:
+ if params['state'] == 'started':
+ block.start()
+ elif params['state'] == 'stopped':
+ block.stop()
+
+ # Save current account state.
+ if not module.check_mode:
+ try:
+ account.save()
+ except (exceptions.APIAccessError, exceptions.KeysetError,
+ exceptions.BlockError, exceptions.EventHandlerError,
+ exceptions.GeneralPubNubError) as exc:
+ module_cache = dict(account)
+ module_cache.update(dict(pnm_user=dict(user)))
+ exc_msg = _failure_title_from_exception(exc)
+ exc_descr = exc.message if hasattr(exc, 'message') else exc.args[0]
+ module.fail_json(msg=exc_msg, description=exc_descr,
+ changed=account.changed,
+ module_cache=module_cache)
+
+ # Report module execution results.
+ module_cache = dict(account)
+ module_cache.update(dict(pnm_user=dict(user)))
+ changed_will_change = account.changed or account.will_change
+ module.exit_json(changed=changed_will_change, module_cache=module_cache)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax.py
new file mode 100644
index 00000000..9f7df5c4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax.py
@@ -0,0 +1,897 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax
+short_description: create / delete an instance in Rackspace Public Cloud
+description:
+ - creates / deletes a Rackspace Public Cloud instance and optionally
+ waits for it to be 'running'.
+options:
+ auto_increment:
+ description:
+ - Whether or not to increment a single number with the name of the
+ created servers. Only applicable when used with the I(group) attribute
+ or meta key.
+ type: bool
+ default: 'yes'
+ boot_from_volume:
+ description:
+ - Whether or not to boot the instance from a Cloud Block Storage volume.
+ If C(yes) and I(image) is specified a new volume will be created at
+ boot time. I(boot_volume_size) is required with I(image) to create a
+ new volume at boot time.
+ type: bool
+ default: 'no'
+ boot_volume:
+ type: str
+ description:
+ - Cloud Block Storage ID or Name to use as the boot volume of the
+ instance
+ boot_volume_size:
+ type: int
+ description:
+ - Size of the volume to create in Gigabytes. This is only required with
+ I(image) and I(boot_from_volume).
+ default: 100
+ boot_volume_terminate:
+ description:
+ - Whether the I(boot_volume) or newly created volume from I(image) will
+ be terminated when the server is terminated
+ type: bool
+ default: 'no'
+ config_drive:
+ description:
+ - Attach read-only configuration drive to server as label config-2
+ type: bool
+ default: 'no'
+ count:
+ type: int
+ description:
+ - number of instances to launch
+ default: 1
+ count_offset:
+ type: int
+ description:
+ - number count to start at
+ default: 1
+ disk_config:
+ type: str
+ description:
+ - Disk partitioning strategy
+ - If not specified it will assume the value C(auto).
+ choices:
+ - auto
+ - manual
+ exact_count:
+ description:
+ - Explicitly ensure an exact count of instances, used with
+ state=active/present. If specified as C(yes) and I(count) is less than
+ the servers matched, servers will be deleted to match the count. If
+ the number of matched servers is fewer than specified in I(count)
+ additional servers will be added.
+ type: bool
+ default: 'no'
+ extra_client_args:
+ type: dict
+ description:
+ - A hash of key/value pairs to be used when creating the cloudservers
+ client. This is considered an advanced option, use it wisely and
+ with caution.
+ extra_create_args:
+ type: dict
+ description:
+ - A hash of key/value pairs to be used when creating a new server.
+ This is considered an advanced option, use it wisely and with caution.
+ files:
+ type: dict
+ description:
+ - Files to insert into the instance. remotefilename:localcontent
+ flavor:
+ type: str
+ description:
+ - flavor to use for the instance
+ group:
+ type: str
+ description:
+ - host group to assign to server, is also used for idempotent operations
+ to ensure a specific number of instances
+ image:
+ type: str
+ description:
+ - image to use for the instance. Can be an C(id), C(human_id) or C(name).
+ With I(boot_from_volume), a Cloud Block Storage volume will be created
+ with this image
+ instance_ids:
+ type: list
+ description:
+ - list of instance ids, currently only used when state='absent' to
+ remove instances
+ key_name:
+ type: str
+ description:
+ - key pair to use on the instance
+ aliases:
+ - keypair
+ meta:
+ type: dict
+ description:
+ - A hash of metadata to associate with the instance
+ name:
+ type: str
+ description:
+ - Name to give the instance
+ networks:
+ type: list
+ description:
+ - The network to attach to the instances. If specified, you must include
+ ALL networks including the public and private interfaces. Can be C(id)
+ or C(label).
+ default:
+ - public
+ - private
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+ user_data:
+ type: str
+ description:
+ - Data to be uploaded to the servers config drive. This option implies
+ I(config_drive). Can be a file path or a string
+ wait:
+ description:
+ - wait for the instance to be in state 'running' before returning
+ type: bool
+ default: 'no'
+ wait_timeout:
+ type: int
+ description:
+ - how long before wait gives up, in seconds
+ default: 300
+author:
+ - "Jesse Keating (@omgjlk)"
+ - "Matt Martz (@sivel)"
+notes:
+ - I(exact_count) can be "destructive" if the number of running servers in
+ the I(group) is larger than that specified in I(count). In such a case, the
+ I(state) is effectively set to C(absent) and the extra servers are deleted.
+ In the case of deletion, the returned data structure will have C(action)
+ set to C(delete), and the oldest servers in the group will be deleted.
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Build a Cloud Server
+ gather_facts: False
+ tasks:
+ - name: Server build request
+ local_action:
+ module: rax
+ credentials: ~/.raxpub
+ name: rax-test1
+ flavor: 5
+ image: b11d9567-e412-4255-96b9-bd63ab23bcfe
+ key_name: my_rackspace_key
+ files:
+ /root/test.txt: /home/localuser/test.txt
+ wait: yes
+ state: present
+ networks:
+ - private
+ - public
+ register: rax
+
+- name: Build an exact count of cloud servers with incremented names
+ hosts: local
+ gather_facts: False
+ tasks:
+ - name: Server build requests
+ local_action:
+ module: rax
+ credentials: ~/.raxpub
+ name: test%03d.example.org
+ flavor: performance1-1
+ image: ubuntu-1204-lts-precise-pangolin
+ state: present
+ count: 10
+ count_offset: 10
+ exact_count: yes
+ group: test
+ wait: yes
+ register: rax
+'''
+
+import json
+import os
+import re
+import time
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (FINAL_STATUSES, rax_argument_spec, rax_find_bootable_volume,
+ rax_find_image, rax_find_network, rax_find_volume,
+ rax_required_together, rax_to_dict, setup_rax_module)
+from ansible.module_utils.six.moves import xrange
+from ansible.module_utils.six import string_types
+
+
+def rax_find_server_image(module, server, image, boot_volume):
+ if not image and boot_volume:
+ vol = rax_find_bootable_volume(module, pyrax, server,
+ exit=False)
+ if not vol:
+ return None
+ volume_image_metadata = vol.volume_image_metadata
+ vol_image_id = volume_image_metadata.get('image_id')
+ if vol_image_id:
+ server_image = rax_find_image(module, pyrax,
+ vol_image_id, exit=False)
+ if server_image:
+ server.image = dict(id=server_image)
+
+ # Match image IDs taking care of boot from volume
+ if image and not server.image:
+ vol = rax_find_bootable_volume(module, pyrax, server)
+ volume_image_metadata = vol.volume_image_metadata
+ vol_image_id = volume_image_metadata.get('image_id')
+ if not vol_image_id:
+ return None
+ server_image = rax_find_image(module, pyrax,
+ vol_image_id, exit=False)
+ if image != server_image:
+ return None
+
+ server.image = dict(id=server_image)
+ elif image and server.image['id'] != image:
+ return None
+
+ return server.image
+
+
+def create(module, names=None, flavor=None, image=None, meta=None, key_name=None,
+ files=None, wait=True, wait_timeout=300, disk_config=None,
+ group=None, nics=None, extra_create_args=None, user_data=None,
+ config_drive=False, existing=None, block_device_mapping_v2=None):
+ names = [] if names is None else names
+ meta = {} if meta is None else meta
+ files = {} if files is None else files
+ nics = [] if nics is None else nics
+ extra_create_args = {} if extra_create_args is None else extra_create_args
+ existing = [] if existing is None else existing
+ block_device_mapping_v2 = [] if block_device_mapping_v2 is None else block_device_mapping_v2
+
+ cs = pyrax.cloudservers
+ changed = False
+
+ if user_data:
+ config_drive = True
+
+ if user_data and os.path.isfile(os.path.expanduser(user_data)):
+ try:
+ user_data = os.path.expanduser(user_data)
+ f = open(user_data)
+ user_data = f.read()
+ f.close()
+ except Exception as e:
+ module.fail_json(msg='Failed to load %s' % user_data)
+
+ # Handle the file contents
+ for rpath in files.keys():
+ lpath = os.path.expanduser(files[rpath])
+ try:
+ fileobj = open(lpath, 'r')
+ files[rpath] = fileobj.read()
+ fileobj.close()
+ except Exception as e:
+ module.fail_json(msg='Failed to load %s' % lpath)
+ try:
+ servers = []
+ bdmv2 = block_device_mapping_v2
+ for name in names:
+ servers.append(cs.servers.create(name=name, image=image,
+ flavor=flavor, meta=meta,
+ key_name=key_name,
+ files=files, nics=nics,
+ disk_config=disk_config,
+ config_drive=config_drive,
+ userdata=user_data,
+ block_device_mapping_v2=bdmv2,
+ **extra_create_args))
+ except Exception as e:
+ if e.message:
+ msg = str(e.message)
+ else:
+ msg = repr(e)
+ module.fail_json(msg=msg)
+ else:
+ changed = True
+
+ if wait:
+ end_time = time.time() + wait_timeout
+ infinite = wait_timeout == 0
+ while infinite or time.time() < end_time:
+ for server in servers:
+ try:
+ server.get()
+ except Exception:
+ server.status = 'ERROR'
+
+ if not filter(lambda s: s.status not in FINAL_STATUSES,
+ servers):
+ break
+ time.sleep(5)
+
+ success = []
+ error = []
+ timeout = []
+ for server in servers:
+ try:
+ server.get()
+ except Exception:
+ server.status = 'ERROR'
+ instance = rax_to_dict(server, 'server')
+ if server.status == 'ACTIVE' or not wait:
+ success.append(instance)
+ elif server.status == 'ERROR':
+ error.append(instance)
+ elif wait:
+ timeout.append(instance)
+
+ untouched = [rax_to_dict(s, 'server') for s in existing]
+ instances = success + untouched
+
+ results = {
+ 'changed': changed,
+ 'action': 'create',
+ 'instances': instances,
+ 'success': success,
+ 'error': error,
+ 'timeout': timeout,
+ 'instance_ids': {
+ 'instances': [i['id'] for i in instances],
+ 'success': [i['id'] for i in success],
+ 'error': [i['id'] for i in error],
+ 'timeout': [i['id'] for i in timeout]
+ }
+ }
+
+ if timeout:
+ results['msg'] = 'Timeout waiting for all servers to build'
+ elif error:
+ results['msg'] = 'Failed to build all servers'
+
+ if 'msg' in results:
+ module.fail_json(**results)
+ else:
+ module.exit_json(**results)
+
+
+def delete(module, instance_ids=None, wait=True, wait_timeout=300, kept=None):
+ instance_ids = [] if instance_ids is None else instance_ids
+ kept = [] if kept is None else kept
+
+ cs = pyrax.cloudservers
+
+ changed = False
+ instances = {}
+ servers = []
+
+ for instance_id in instance_ids:
+ servers.append(cs.servers.get(instance_id))
+
+ for server in servers:
+ try:
+ server.delete()
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ changed = True
+
+ instance = rax_to_dict(server, 'server')
+ instances[instance['id']] = instance
+
+ # If requested, wait for server deletion
+ if wait:
+ end_time = time.time() + wait_timeout
+ infinite = wait_timeout == 0
+ while infinite or time.time() < end_time:
+ for server in servers:
+ instance_id = server.id
+ try:
+ server.get()
+ except Exception:
+ instances[instance_id]['status'] = 'DELETED'
+ instances[instance_id]['rax_status'] = 'DELETED'
+
+ if not filter(lambda s: s['status'] not in ('', 'DELETED',
+ 'ERROR'),
+ instances.values()):
+ break
+
+ time.sleep(5)
+
+ timeout = filter(lambda s: s['status'] not in ('', 'DELETED', 'ERROR'),
+ instances.values())
+ error = filter(lambda s: s['status'] in ('ERROR'),
+ instances.values())
+ success = filter(lambda s: s['status'] in ('', 'DELETED'),
+ instances.values())
+
+ instances = [rax_to_dict(s, 'server') for s in kept]
+
+ results = {
+ 'changed': changed,
+ 'action': 'delete',
+ 'instances': instances,
+ 'success': success,
+ 'error': error,
+ 'timeout': timeout,
+ 'instance_ids': {
+ 'instances': [i['id'] for i in instances],
+ 'success': [i['id'] for i in success],
+ 'error': [i['id'] for i in error],
+ 'timeout': [i['id'] for i in timeout]
+ }
+ }
+
+ if timeout:
+ results['msg'] = 'Timeout waiting for all servers to delete'
+ elif error:
+ results['msg'] = 'Failed to delete all servers'
+
+ if 'msg' in results:
+ module.fail_json(**results)
+ else:
+ module.exit_json(**results)
+
+
+def cloudservers(module, state=None, name=None, flavor=None, image=None,
+ meta=None, key_name=None, files=None, wait=True, wait_timeout=300,
+ disk_config=None, count=1, group=None, instance_ids=None,
+ exact_count=False, networks=None, count_offset=0,
+ auto_increment=False, extra_create_args=None, user_data=None,
+ config_drive=False, boot_from_volume=False,
+ boot_volume=None, boot_volume_size=None,
+ boot_volume_terminate=False):
+ meta = {} if meta is None else meta
+ files = {} if files is None else files
+ instance_ids = [] if instance_ids is None else instance_ids
+ networks = [] if networks is None else networks
+ extra_create_args = {} if extra_create_args is None else extra_create_args
+
+ cs = pyrax.cloudservers
+ cnw = pyrax.cloud_networks
+ if not cnw:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if state == 'present' or (state == 'absent' and instance_ids is None):
+ if not boot_from_volume and not boot_volume and not image:
+ module.fail_json(msg='image is required for the "rax" module')
+
+ for arg, value in dict(name=name, flavor=flavor).items():
+ if not value:
+ module.fail_json(msg='%s is required for the "rax" module' %
+ arg)
+
+ if boot_from_volume and not image and not boot_volume:
+ module.fail_json(msg='image or boot_volume are required for the '
+ '"rax" with boot_from_volume')
+
+ if boot_from_volume and image and not boot_volume_size:
+ module.fail_json(msg='boot_volume_size is required for the "rax" '
+ 'module with boot_from_volume and image')
+
+ if boot_from_volume and image and boot_volume:
+ image = None
+
+ servers = []
+
+ # Add the group meta key
+ if group and 'group' not in meta:
+ meta['group'] = group
+ elif 'group' in meta and group is None:
+ group = meta['group']
+
+ # Normalize and ensure all metadata values are strings
+ for k, v in meta.items():
+ if isinstance(v, list):
+ meta[k] = ','.join(['%s' % i for i in v])
+ elif isinstance(v, dict):
+ meta[k] = json.dumps(v)
+ elif not isinstance(v, string_types):
+ meta[k] = '%s' % v
+
+ # When using state=absent with group, the absent block won't match the
+ # names properly. Use the exact_count functionality to decrease the count
+ # to the desired level
+ was_absent = False
+ if group is not None and state == 'absent':
+ exact_count = True
+ state = 'present'
+ was_absent = True
+
+ if image:
+ image = rax_find_image(module, pyrax, image)
+
+ nics = []
+ if networks:
+ for network in networks:
+ nics.extend(rax_find_network(module, pyrax, network))
+
+ # act on the state
+ if state == 'present':
+ # Idempotent ensurance of a specific count of servers
+ if exact_count is not False:
+ # See if we can find servers that match our options
+ if group is None:
+ module.fail_json(msg='"group" must be provided when using '
+ '"exact_count"')
+
+ if auto_increment:
+ numbers = set()
+
+ # See if the name is a printf like string, if not append
+ # %d to the end
+ try:
+ name % 0
+ except TypeError as e:
+ if e.message.startswith('not all'):
+ name = '%s%%d' % name
+ else:
+ module.fail_json(msg=e.message)
+
+ # regex pattern to match printf formatting
+ pattern = re.sub(r'%\d*[sd]', r'(\d+)', name)
+ for server in cs.servers.list():
+ # Ignore DELETED servers
+ if server.status == 'DELETED':
+ continue
+ if server.metadata.get('group') == group:
+ servers.append(server)
+ match = re.search(pattern, server.name)
+ if match:
+ number = int(match.group(1))
+ numbers.add(number)
+
+ number_range = xrange(count_offset, count_offset + count)
+ available_numbers = list(set(number_range)
+ .difference(numbers))
+ else: # Not auto incrementing
+ for server in cs.servers.list():
+ # Ignore DELETED servers
+ if server.status == 'DELETED':
+ continue
+ if server.metadata.get('group') == group:
+ servers.append(server)
+ # available_numbers not needed here, we inspect auto_increment
+ # again later
+
+ # If state was absent but the count was changed,
+ # assume we only wanted to remove that number of instances
+ if was_absent:
+ diff = len(servers) - count
+ if diff < 0:
+ count = 0
+ else:
+ count = diff
+
+ if len(servers) > count:
+ # We have more servers than we need, set state='absent'
+ # and delete the extras, this should delete the oldest
+ state = 'absent'
+ kept = servers[:count]
+ del servers[:count]
+ instance_ids = []
+ for server in servers:
+ instance_ids.append(server.id)
+ delete(module, instance_ids=instance_ids, wait=wait,
+ wait_timeout=wait_timeout, kept=kept)
+ elif len(servers) < count:
+ # we have fewer servers than we need
+ if auto_increment:
+ # auto incrementing server numbers
+ names = []
+ name_slice = count - len(servers)
+ numbers_to_use = available_numbers[:name_slice]
+ for number in numbers_to_use:
+ names.append(name % number)
+ else:
+ # We are not auto incrementing server numbers,
+ # create a list of 'name' that matches how many we need
+ names = [name] * (count - len(servers))
+ else:
+ # we have the right number of servers, just return info
+ # about all of the matched servers
+ instances = []
+ instance_ids = []
+ for server in servers:
+ instances.append(rax_to_dict(server, 'server'))
+ instance_ids.append(server.id)
+ module.exit_json(changed=False, action=None,
+ instances=instances,
+ success=[], error=[], timeout=[],
+ instance_ids={'instances': instance_ids,
+ 'success': [], 'error': [],
+ 'timeout': []})
+ else: # not called with exact_count=True
+ if group is not None:
+ if auto_increment:
+ # we are auto incrementing server numbers, but not with
+ # exact_count
+ numbers = set()
+
+ # See if the name is a printf like string, if not append
+ # %d to the end
+ try:
+ name % 0
+ except TypeError as e:
+ if e.message.startswith('not all'):
+ name = '%s%%d' % name
+ else:
+ module.fail_json(msg=e.message)
+
+ # regex pattern to match printf formatting
+ pattern = re.sub(r'%\d*[sd]', r'(\d+)', name)
+ for server in cs.servers.list():
+ # Ignore DELETED servers
+ if server.status == 'DELETED':
+ continue
+ if server.metadata.get('group') == group:
+ servers.append(server)
+ match = re.search(pattern, server.name)
+ if match:
+ number = int(match.group(1))
+ numbers.add(number)
+
+ number_range = xrange(count_offset,
+ count_offset + count + len(numbers))
+ available_numbers = list(set(number_range)
+ .difference(numbers))
+ names = []
+ numbers_to_use = available_numbers[:count]
+ for number in numbers_to_use:
+ names.append(name % number)
+ else:
+ # Not auto incrementing
+ names = [name] * count
+ else:
+ # No group was specified, and not using exact_count
+ # Perform more simplistic matching
+ search_opts = {
+ 'name': '^%s$' % name,
+ 'flavor': flavor
+ }
+ servers = []
+ for server in cs.servers.list(search_opts=search_opts):
+ # Ignore DELETED servers
+ if server.status == 'DELETED':
+ continue
+
+ if not rax_find_server_image(module, server, image,
+ boot_volume):
+ continue
+
+ # Ignore servers with non matching metadata
+ if server.metadata != meta:
+ continue
+ servers.append(server)
+
+ if len(servers) >= count:
+ # We have more servers than were requested, don't do
+ # anything. Not running with exact_count=True, so we assume
+ # more is OK
+ instances = []
+ for server in servers:
+ instances.append(rax_to_dict(server, 'server'))
+
+ instance_ids = [i['id'] for i in instances]
+ module.exit_json(changed=False, action=None,
+ instances=instances, success=[], error=[],
+ timeout=[],
+ instance_ids={'instances': instance_ids,
+ 'success': [], 'error': [],
+ 'timeout': []})
+
+ # We need more servers to reach out target, create names for
+ # them, we aren't performing auto_increment here
+ names = [name] * (count - len(servers))
+
+ block_device_mapping_v2 = []
+ if boot_from_volume:
+ mapping = {
+ 'boot_index': '0',
+ 'delete_on_termination': boot_volume_terminate,
+ 'destination_type': 'volume',
+ }
+ if image:
+ mapping.update({
+ 'uuid': image,
+ 'source_type': 'image',
+ 'volume_size': boot_volume_size,
+ })
+ image = None
+ elif boot_volume:
+ volume = rax_find_volume(module, pyrax, boot_volume)
+ mapping.update({
+ 'uuid': pyrax.utils.get_id(volume),
+ 'source_type': 'volume',
+ })
+ block_device_mapping_v2.append(mapping)
+
+ create(module, names=names, flavor=flavor, image=image,
+ meta=meta, key_name=key_name, files=files, wait=wait,
+ wait_timeout=wait_timeout, disk_config=disk_config, group=group,
+ nics=nics, extra_create_args=extra_create_args,
+ user_data=user_data, config_drive=config_drive,
+ existing=servers,
+ block_device_mapping_v2=block_device_mapping_v2)
+
+ elif state == 'absent':
+ if instance_ids is None:
+ # We weren't given an explicit list of server IDs to delete
+ # Let's match instead
+ search_opts = {
+ 'name': '^%s$' % name,
+ 'flavor': flavor
+ }
+ for server in cs.servers.list(search_opts=search_opts):
+ # Ignore DELETED servers
+ if server.status == 'DELETED':
+ continue
+
+ if not rax_find_server_image(module, server, image,
+ boot_volume):
+ continue
+
+ # Ignore servers with non matching metadata
+ if meta != server.metadata:
+ continue
+
+ servers.append(server)
+
+ # Build a list of server IDs to delete
+ instance_ids = []
+ for server in servers:
+ if len(instance_ids) < count:
+ instance_ids.append(server.id)
+ else:
+ break
+
+ if not instance_ids:
+ # No server IDs were matched for deletion, or no IDs were
+ # explicitly provided, just exit and don't do anything
+ module.exit_json(changed=False, action=None, instances=[],
+ success=[], error=[], timeout=[],
+ instance_ids={'instances': [],
+ 'success': [], 'error': [],
+ 'timeout': []})
+
+ delete(module, instance_ids=instance_ids, wait=wait,
+ wait_timeout=wait_timeout)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ auto_increment=dict(default=True, type='bool'),
+ boot_from_volume=dict(default=False, type='bool'),
+ boot_volume=dict(type='str'),
+ boot_volume_size=dict(type='int', default=100),
+ boot_volume_terminate=dict(type='bool', default=False),
+ config_drive=dict(default=False, type='bool'),
+ count=dict(default=1, type='int'),
+ count_offset=dict(default=1, type='int'),
+ disk_config=dict(choices=['auto', 'manual']),
+ exact_count=dict(default=False, type='bool'),
+ extra_client_args=dict(type='dict', default={}),
+ extra_create_args=dict(type='dict', default={}),
+ files=dict(type='dict', default={}),
+ flavor=dict(),
+ group=dict(),
+ image=dict(),
+ instance_ids=dict(type='list'),
+ key_name=dict(aliases=['keypair']),
+ meta=dict(type='dict', default={}),
+ name=dict(),
+ networks=dict(type='list', default=['public', 'private']),
+ service=dict(),
+ state=dict(default='present', choices=['present', 'absent']),
+ user_data=dict(no_log=True),
+ wait=dict(default=False, type='bool'),
+ wait_timeout=dict(default=300, type='int'),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ service = module.params.get('service')
+
+ if service is not None:
+ module.fail_json(msg='The "service" attribute has been deprecated, '
+ 'please remove "service: cloudservers" from your '
+ 'playbook pertaining to the "rax" module')
+
+ auto_increment = module.params.get('auto_increment')
+ boot_from_volume = module.params.get('boot_from_volume')
+ boot_volume = module.params.get('boot_volume')
+ boot_volume_size = module.params.get('boot_volume_size')
+ boot_volume_terminate = module.params.get('boot_volume_terminate')
+ config_drive = module.params.get('config_drive')
+ count = module.params.get('count')
+ count_offset = module.params.get('count_offset')
+ disk_config = module.params.get('disk_config')
+ if disk_config:
+ disk_config = disk_config.upper()
+ exact_count = module.params.get('exact_count', False)
+ extra_client_args = module.params.get('extra_client_args')
+ extra_create_args = module.params.get('extra_create_args')
+ files = module.params.get('files')
+ flavor = module.params.get('flavor')
+ group = module.params.get('group')
+ image = module.params.get('image')
+ instance_ids = module.params.get('instance_ids')
+ key_name = module.params.get('key_name')
+ meta = module.params.get('meta')
+ name = module.params.get('name')
+ networks = module.params.get('networks')
+ state = module.params.get('state')
+ user_data = module.params.get('user_data')
+ wait = module.params.get('wait')
+ wait_timeout = int(module.params.get('wait_timeout'))
+
+ setup_rax_module(module, pyrax)
+
+ if extra_client_args:
+ pyrax.cloudservers = pyrax.connect_to_cloudservers(
+ region=pyrax.cloudservers.client.region_name,
+ **extra_client_args)
+ client = pyrax.cloudservers.client
+ if 'bypass_url' in extra_client_args:
+ client.management_url = extra_client_args['bypass_url']
+
+ if pyrax.cloudservers is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ cloudservers(module, state=state, name=name, flavor=flavor,
+ image=image, meta=meta, key_name=key_name, files=files,
+ wait=wait, wait_timeout=wait_timeout, disk_config=disk_config,
+ count=count, group=group, instance_ids=instance_ids,
+ exact_count=exact_count, networks=networks,
+ count_offset=count_offset, auto_increment=auto_increment,
+ extra_create_args=extra_create_args, user_data=user_data,
+ config_drive=config_drive, boot_from_volume=boot_from_volume,
+ boot_volume=boot_volume, boot_volume_size=boot_volume_size,
+ boot_volume_terminate=boot_volume_terminate)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cbs.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cbs.py
new file mode 100644
index 00000000..a681feff
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cbs.py
@@ -0,0 +1,226 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_cbs
+short_description: Manipulate Rackspace Cloud Block Storage Volumes
+description:
+ - Manipulate Rackspace Cloud Block Storage Volumes
+options:
+ description:
+ type: str
+ description:
+ - Description to give the volume being created
+ image:
+ type: str
+ description:
+ - image to use for bootable volumes. Can be an C(id), C(human_id) or
+ C(name). This option requires C(pyrax>=1.9.3)
+ meta:
+ type: dict
+ description:
+ - A hash of metadata to associate with the volume
+ name:
+ type: str
+ description:
+ - Name to give the volume being created
+ required: true
+ size:
+ type: int
+ description:
+ - Size of the volume to create in Gigabytes
+ default: 100
+ snapshot_id:
+ type: str
+ description:
+ - The id of the snapshot to create the volume from
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+ volume_type:
+ type: str
+ description:
+ - Type of the volume being created
+ choices:
+ - SATA
+ - SSD
+ default: SATA
+ wait:
+ description:
+ - wait for the volume to be in state 'available' before returning
+ type: bool
+ default: 'no'
+ wait_timeout:
+ type: int
+ description:
+ - how long before wait gives up, in seconds
+ default: 300
+author:
+ - "Christopher H. Laco (@claco)"
+ - "Matt Martz (@sivel)"
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Build a Block Storage Volume
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Storage volume create request
+ local_action:
+ module: rax_cbs
+ credentials: ~/.raxpub
+ name: my-volume
+ description: My Volume
+ volume_type: SSD
+ size: 150
+ region: DFW
+ wait: yes
+ state: present
+ meta:
+ app: my-cool-app
+ register: my_volume
+'''
+
+from distutils.version import LooseVersion
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (VOLUME_STATUS, rax_argument_spec, rax_find_image, rax_find_volume,
+ rax_required_together, rax_to_dict, setup_rax_module)
+
+
+def cloud_block_storage(module, state, name, description, meta, size,
+ snapshot_id, volume_type, wait, wait_timeout,
+ image):
+ changed = False
+ volume = None
+ instance = {}
+
+ cbs = pyrax.cloud_blockstorage
+
+ if cbs is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if image:
+ # pyrax<1.9.3 did not have support for specifying an image when
+ # creating a volume which is required for bootable volumes
+ if LooseVersion(pyrax.version.version) < LooseVersion('1.9.3'):
+ module.fail_json(msg='Creating a bootable volume requires '
+ 'pyrax>=1.9.3')
+ image = rax_find_image(module, pyrax, image)
+
+ volume = rax_find_volume(module, pyrax, name)
+
+ if state == 'present':
+ if not volume:
+ kwargs = dict()
+ if image:
+ kwargs['image'] = image
+ try:
+ volume = cbs.create(name, size=size, volume_type=volume_type,
+ description=description,
+ metadata=meta,
+ snapshot_id=snapshot_id, **kwargs)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ if wait:
+ attempts = wait_timeout // 5
+ pyrax.utils.wait_for_build(volume, interval=5,
+ attempts=attempts)
+
+ volume.get()
+ instance = rax_to_dict(volume)
+
+ result = dict(changed=changed, volume=instance)
+
+ if volume.status == 'error':
+ result['msg'] = '%s failed to build' % volume.id
+ elif wait and volume.status not in VOLUME_STATUS:
+ result['msg'] = 'Timeout waiting on %s' % volume.id
+
+ if 'msg' in result:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+ elif state == 'absent':
+ if volume:
+ instance = rax_to_dict(volume)
+ try:
+ volume.delete()
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, volume=instance)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ description=dict(type='str'),
+ image=dict(type='str'),
+ meta=dict(type='dict', default={}),
+ name=dict(required=True),
+ size=dict(type='int', default=100),
+ snapshot_id=dict(),
+ state=dict(default='present', choices=['present', 'absent']),
+ volume_type=dict(choices=['SSD', 'SATA'], default='SATA'),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300)
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ description = module.params.get('description')
+ image = module.params.get('image')
+ meta = module.params.get('meta')
+ name = module.params.get('name')
+ size = module.params.get('size')
+ snapshot_id = module.params.get('snapshot_id')
+ state = module.params.get('state')
+ volume_type = module.params.get('volume_type')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_block_storage(module, state, name, description, meta, size,
+ snapshot_id, volume_type, wait, wait_timeout,
+ image)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cbs_attachments.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cbs_attachments.py
new file mode 100644
index 00000000..71d01620
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cbs_attachments.py
@@ -0,0 +1,218 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_cbs_attachments
+short_description: Manipulate Rackspace Cloud Block Storage Volume Attachments
+description:
+ - Manipulate Rackspace Cloud Block Storage Volume Attachments
+options:
+ device:
+ type: str
+ description:
+ - The device path to attach the volume to, e.g. /dev/xvde.
+ - Before 2.4 this was a required field. Now it can be left to null to auto assign the device name.
+ volume:
+ type: str
+ description:
+ - Name or id of the volume to attach/detach
+ required: true
+ server:
+ type: str
+ description:
+ - Name or id of the server to attach/detach
+ required: true
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+ wait:
+ description:
+ - wait for the volume to be in 'in-use'/'available' state before returning
+ type: bool
+ default: 'no'
+ wait_timeout:
+ type: int
+ description:
+ - how long before wait gives up, in seconds
+ default: 300
+author:
+ - "Christopher H. Laco (@claco)"
+ - "Matt Martz (@sivel)"
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Attach a Block Storage Volume
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Storage volume attach request
+ local_action:
+ module: rax_cbs_attachments
+ credentials: ~/.raxpub
+ volume: my-volume
+ server: my-server
+ device: /dev/xvdd
+ region: DFW
+ wait: yes
+ state: present
+ register: my_volume
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (NON_CALLABLES,
+ rax_argument_spec,
+ rax_find_server,
+ rax_find_volume,
+ rax_required_together,
+ rax_to_dict,
+ setup_rax_module,
+ )
+
+
+def cloud_block_storage_attachments(module, state, volume, server, device,
+ wait, wait_timeout):
+ cbs = pyrax.cloud_blockstorage
+ cs = pyrax.cloudservers
+
+ if cbs is None or cs is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ changed = False
+ instance = {}
+
+ volume = rax_find_volume(module, pyrax, volume)
+
+ if not volume:
+ module.fail_json(msg='No matching storage volumes were found')
+
+ if state == 'present':
+ server = rax_find_server(module, pyrax, server)
+
+ if (volume.attachments and
+ volume.attachments[0]['server_id'] == server.id):
+ changed = False
+ elif volume.attachments:
+ module.fail_json(msg='Volume is attached to another server')
+ else:
+ try:
+ volume.attach_to_instance(server, mountpoint=device)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ volume.get()
+
+ for key, value in vars(volume).items():
+ if (isinstance(value, NON_CALLABLES) and
+ not key.startswith('_')):
+ instance[key] = value
+
+ result = dict(changed=changed)
+
+ if volume.status == 'error':
+ result['msg'] = '%s failed to build' % volume.id
+ elif wait:
+ attempts = wait_timeout // 5
+ pyrax.utils.wait_until(volume, 'status', 'in-use',
+ interval=5, attempts=attempts)
+
+ volume.get()
+ result['volume'] = rax_to_dict(volume)
+
+ if 'msg' in result:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+ elif state == 'absent':
+ server = rax_find_server(module, pyrax, server)
+
+ if (volume.attachments and
+ volume.attachments[0]['server_id'] == server.id):
+ try:
+ volume.detach()
+ if wait:
+ pyrax.utils.wait_until(volume, 'status', 'available',
+ interval=3, attempts=0,
+ verbose=False)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ volume.get()
+ changed = True
+ elif volume.attachments:
+ module.fail_json(msg='Volume is attached to another server')
+
+ result = dict(changed=changed, volume=rax_to_dict(volume))
+
+ if volume.status == 'error':
+ result['msg'] = '%s failed to build' % volume.id
+
+ if 'msg' in result:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+ module.exit_json(changed=changed, volume=instance)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ device=dict(required=False),
+ volume=dict(required=True),
+ server=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300)
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ device = module.params.get('device')
+ volume = module.params.get('volume')
+ server = module.params.get('server')
+ state = module.params.get('state')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_block_storage_attachments(module, state, volume, server, device,
+ wait, wait_timeout)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cdb.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cdb.py
new file mode 100644
index 00000000..5b9996cd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cdb.py
@@ -0,0 +1,258 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_cdb
+short_description: create/delete or resize a Rackspace Cloud Databases instance
+description:
+ - creates / deletes or resize a Rackspace Cloud Databases instance
+ and optionally waits for it to be 'running'. The name option needs to be
+ unique since it's used to identify the instance.
+options:
+ name:
+ type: str
+ description:
+ - Name of the databases server instance
+ required: yes
+ flavor:
+ type: int
+ description:
+ - flavor to use for the instance 1 to 6 (i.e. 512MB to 16GB)
+ default: 1
+ volume:
+ type: int
+ description:
+ - Volume size of the database 1-150GB
+ default: 2
+ cdb_type:
+ type: str
+ description:
+ - type of instance (i.e. MySQL, MariaDB, Percona)
+ default: MySQL
+ aliases: ['type']
+ cdb_version:
+ type: str
+ description:
+ - version of database (MySQL supports 5.1 and 5.6, MariaDB supports 10, Percona supports 5.6)
+ - "The available choices are: C(5.1), C(5.6) and C(10)."
+ default: 5.6
+ aliases: ['version']
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices: ['present', 'absent']
+ default: present
+ wait:
+ description:
+ - wait for the instance to be in state 'running' before returning
+ type: bool
+ default: 'no'
+ wait_timeout:
+ type: int
+ description:
+ - how long before wait gives up, in seconds
+ default: 300
+author: "Simon JAILLET (@jails)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Build a Cloud Databases
+ gather_facts: False
+ tasks:
+ - name: Server build request
+ local_action:
+ module: rax_cdb
+ credentials: ~/.raxpub
+ region: IAD
+ name: db-server1
+ flavor: 1
+ volume: 2
+ cdb_type: MySQL
+ cdb_version: 5.6
+ wait: yes
+ state: present
+ register: rax_db_server
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module
+
+
+def find_instance(name):
+
+ cdb = pyrax.cloud_databases
+ instances = cdb.list()
+ if instances:
+ for instance in instances:
+ if instance.name == name:
+ return instance
+ return False
+
+
+def save_instance(module, name, flavor, volume, cdb_type, cdb_version, wait,
+ wait_timeout):
+
+ for arg, value in dict(name=name, flavor=flavor,
+ volume=volume, type=cdb_type, version=cdb_version
+ ).items():
+ if not value:
+ module.fail_json(msg='%s is required for the "rax_cdb"'
+ ' module' % arg)
+
+ if not (volume >= 1 and volume <= 150):
+ module.fail_json(msg='volume is required to be between 1 and 150')
+
+ cdb = pyrax.cloud_databases
+
+ flavors = []
+ for item in cdb.list_flavors():
+ flavors.append(item.id)
+
+ if not (flavor in flavors):
+ module.fail_json(msg='unexisting flavor reference "%s"' % str(flavor))
+
+ changed = False
+
+ instance = find_instance(name)
+
+ if not instance:
+ action = 'create'
+ try:
+ instance = cdb.create(name=name, flavor=flavor, volume=volume,
+ type=cdb_type, version=cdb_version)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ changed = True
+
+ else:
+ action = None
+
+ if instance.volume.size != volume:
+ action = 'resize'
+ if instance.volume.size > volume:
+ module.fail_json(changed=False, action=action,
+ msg='The new volume size must be larger than '
+ 'the current volume size',
+ cdb=rax_to_dict(instance))
+ instance.resize_volume(volume)
+ changed = True
+
+ if int(instance.flavor.id) != flavor:
+ action = 'resize'
+ pyrax.utils.wait_until(instance, 'status', 'ACTIVE',
+ attempts=wait_timeout)
+ instance.resize(flavor)
+ changed = True
+
+ if wait:
+ pyrax.utils.wait_until(instance, 'status', 'ACTIVE',
+ attempts=wait_timeout)
+
+ if wait and instance.status != 'ACTIVE':
+ module.fail_json(changed=changed, action=action,
+ cdb=rax_to_dict(instance),
+ msg='Timeout waiting for "%s" databases instance to '
+ 'be created' % name)
+
+ module.exit_json(changed=changed, action=action, cdb=rax_to_dict(instance))
+
+
+def delete_instance(module, name, wait, wait_timeout):
+
+ if not name:
+ module.fail_json(msg='name is required for the "rax_cdb" module')
+
+ changed = False
+
+ instance = find_instance(name)
+ if not instance:
+ module.exit_json(changed=False, action='delete')
+
+ try:
+ instance.delete()
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ changed = True
+
+ if wait:
+ pyrax.utils.wait_until(instance, 'status', 'SHUTDOWN',
+ attempts=wait_timeout)
+
+ if wait and instance.status != 'SHUTDOWN':
+ module.fail_json(changed=changed, action='delete',
+ cdb=rax_to_dict(instance),
+ msg='Timeout waiting for "%s" databases instance to '
+ 'be deleted' % name)
+
+ module.exit_json(changed=changed, action='delete',
+ cdb=rax_to_dict(instance))
+
+
+def rax_cdb(module, state, name, flavor, volume, cdb_type, cdb_version, wait,
+ wait_timeout):
+
+ # act on the state
+ if state == 'present':
+ save_instance(module, name, flavor, volume, cdb_type, cdb_version, wait,
+ wait_timeout)
+ elif state == 'absent':
+ delete_instance(module, name, wait, wait_timeout)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type='str', required=True),
+ flavor=dict(type='int', default=1),
+ volume=dict(type='int', default=2),
+ cdb_type=dict(type='str', default='MySQL', aliases=['type']),
+ cdb_version=dict(type='str', default='5.6', aliases=['version']),
+ state=dict(default='present', choices=['present', 'absent']),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ name = module.params.get('name')
+ flavor = module.params.get('flavor')
+ volume = module.params.get('volume')
+ cdb_type = module.params.get('cdb_type')
+ cdb_version = module.params.get('cdb_version')
+ state = module.params.get('state')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ setup_rax_module(module, pyrax)
+ rax_cdb(module, state, name, flavor, volume, cdb_type, cdb_version, wait, wait_timeout)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cdb_database.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cdb_database.py
new file mode 100644
index 00000000..6d3435e8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cdb_database.py
@@ -0,0 +1,171 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: rax_cdb_database
+short_description: 'create / delete a database in the Cloud Databases'
+description:
+ - create / delete a database in the Cloud Databases.
+options:
+ cdb_id:
+ type: str
+ description:
+ - The databases server UUID
+ required: yes
+ name:
+ type: str
+ description:
+ - Name to give to the database
+ required: yes
+ character_set:
+ type: str
+ description:
+ - Set of symbols and encodings
+ default: 'utf8'
+ collate:
+ type: str
+ description:
+ - Set of rules for comparing characters in a character set
+ default: 'utf8_general_ci'
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices: ['present', 'absent']
+ default: present
+author: "Simon JAILLET (@jails)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Build a database in Cloud Databases
+ tasks:
+ - name: Database build request
+ local_action:
+ module: rax_cdb_database
+ credentials: ~/.raxpub
+ region: IAD
+ cdb_id: 323e7ce0-9cb0-11e3-a5e2-0800200c9a66
+ name: db1
+ state: present
+ register: rax_db_database
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module
+
+
+def find_database(instance, name):
+ try:
+ database = instance.get_database(name)
+ except Exception:
+ return False
+
+ return database
+
+
+def save_database(module, cdb_id, name, character_set, collate):
+ cdb = pyrax.cloud_databases
+
+ try:
+ instance = cdb.get(cdb_id)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ changed = False
+
+ database = find_database(instance, name)
+
+ if not database:
+ try:
+ database = instance.create_database(name=name,
+ character_set=character_set,
+ collate=collate)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ changed = True
+
+ module.exit_json(changed=changed, action='create',
+ database=rax_to_dict(database))
+
+
+def delete_database(module, cdb_id, name):
+ cdb = pyrax.cloud_databases
+
+ try:
+ instance = cdb.get(cdb_id)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ changed = False
+
+ database = find_database(instance, name)
+
+ if database:
+ try:
+ database.delete()
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ changed = True
+
+ module.exit_json(changed=changed, action='delete',
+ database=rax_to_dict(database))
+
+
+def rax_cdb_database(module, state, cdb_id, name, character_set, collate):
+
+ # act on the state
+ if state == 'present':
+ save_database(module, cdb_id, name, character_set, collate)
+ elif state == 'absent':
+ delete_database(module, cdb_id, name)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ cdb_id=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ character_set=dict(type='str', default='utf8'),
+ collate=dict(type='str', default='utf8_general_ci'),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ cdb_id = module.params.get('cdb_id')
+ name = module.params.get('name')
+ character_set = module.params.get('character_set')
+ collate = module.params.get('collate')
+ state = module.params.get('state')
+
+ setup_rax_module(module, pyrax)
+ rax_cdb_database(module, state, cdb_id, name, character_set, collate)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cdb_user.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cdb_user.py
new file mode 100644
index 00000000..34be49d8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cdb_user.py
@@ -0,0 +1,218 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_cdb_user
+short_description: create / delete a Rackspace Cloud Database
+description:
+ - create / delete a database in the Cloud Databases.
+options:
+ cdb_id:
+ type: str
+ description:
+ - The databases server UUID
+ required: yes
+ db_username:
+ type: str
+ description:
+ - Name of the database user
+ required: yes
+ db_password:
+ type: str
+ description:
+ - Database user password
+ required: yes
+ databases:
+ type: list
+ description:
+ - Name of the databases that the user can access
+ default: []
+ host:
+ type: str
+ description:
+ - Specifies the host from which a user is allowed to connect to
+ the database. Possible values are a string containing an IPv4 address
+ or "%" to allow connecting from any host
+ default: '%'
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices: ['present', 'absent']
+ default: present
+author: "Simon JAILLET (@jails)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Build a user in Cloud Databases
+ tasks:
+ - name: User build request
+ local_action:
+ module: rax_cdb_user
+ credentials: ~/.raxpub
+ region: IAD
+ cdb_id: 323e7ce0-9cb0-11e3-a5e2-0800200c9a66
+ db_username: user1
+ db_password: user1
+ databases: ['db1']
+ state: present
+ register: rax_db_user
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_text
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module
+
+
+def find_user(instance, name):
+ try:
+ user = instance.get_user(name)
+ except Exception:
+ return False
+
+ return user
+
+
+def save_user(module, cdb_id, name, password, databases, host):
+
+ for arg, value in dict(cdb_id=cdb_id, name=name).items():
+ if not value:
+ module.fail_json(msg='%s is required for the "rax_cdb_user" '
+ 'module' % arg)
+
+ cdb = pyrax.cloud_databases
+
+ try:
+ instance = cdb.get(cdb_id)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ changed = False
+
+ user = find_user(instance, name)
+
+ if not user:
+ action = 'create'
+ try:
+ user = instance.create_user(name=name,
+ password=password,
+ database_names=databases,
+ host=host)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ changed = True
+ else:
+ action = 'update'
+
+ if user.host != host:
+ changed = True
+
+ user.update(password=password, host=host)
+
+ former_dbs = set([item.name for item in user.list_user_access()])
+ databases = set(databases)
+
+ if databases != former_dbs:
+ try:
+ revoke_dbs = [db for db in former_dbs if db not in databases]
+ user.revoke_user_access(db_names=revoke_dbs)
+
+ new_dbs = [db for db in databases if db not in former_dbs]
+ user.grant_user_access(db_names=new_dbs)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ changed = True
+
+ module.exit_json(changed=changed, action=action, user=rax_to_dict(user))
+
+
+def delete_user(module, cdb_id, name):
+
+ for arg, value in dict(cdb_id=cdb_id, name=name).items():
+ if not value:
+ module.fail_json(msg='%s is required for the "rax_cdb_user"'
+ ' module' % arg)
+
+ cdb = pyrax.cloud_databases
+
+ try:
+ instance = cdb.get(cdb_id)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ changed = False
+
+ user = find_user(instance, name)
+
+ if user:
+ try:
+ user.delete()
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ changed = True
+
+ module.exit_json(changed=changed, action='delete')
+
+
+def rax_cdb_user(module, state, cdb_id, name, password, databases, host):
+
+ # act on the state
+ if state == 'present':
+ save_user(module, cdb_id, name, password, databases, host)
+ elif state == 'absent':
+ delete_user(module, cdb_id, name)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ cdb_id=dict(type='str', required=True),
+ db_username=dict(type='str', required=True),
+ db_password=dict(type='str', required=True, no_log=True),
+ databases=dict(type='list', default=[]),
+ host=dict(type='str', default='%'),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ cdb_id = module.params.get('cdb_id')
+ name = module.params.get('db_username')
+ password = module.params.get('db_password')
+ databases = module.params.get('databases')
+ host = to_text(module.params.get('host'), errors='surrogate_or_strict')
+ state = module.params.get('state')
+
+ setup_rax_module(module, pyrax)
+ rax_cdb_user(module, state, cdb_id, name, password, databases, host)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_clb.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_clb.py
new file mode 100644
index 00000000..5ff1e314
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_clb.py
@@ -0,0 +1,311 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_clb
+short_description: create / delete a load balancer in Rackspace Public Cloud
+description:
+ - creates / deletes a Rackspace Public Cloud load balancer.
+options:
+ algorithm:
+ type: str
+ description:
+ - algorithm for the balancer being created
+ choices:
+ - RANDOM
+ - LEAST_CONNECTIONS
+ - ROUND_ROBIN
+ - WEIGHTED_LEAST_CONNECTIONS
+ - WEIGHTED_ROUND_ROBIN
+ default: LEAST_CONNECTIONS
+ meta:
+ type: dict
+ description:
+ - A hash of metadata to associate with the instance
+ name:
+ type: str
+ description:
+ - Name to give the load balancer
+ required: yes
+ port:
+ type: int
+ description:
+ - Port for the balancer being created
+ default: 80
+ protocol:
+ type: str
+ description:
+ - Protocol for the balancer being created
+ choices:
+ - DNS_TCP
+ - DNS_UDP
+ - FTP
+ - HTTP
+ - HTTPS
+ - IMAPS
+ - IMAPv4
+ - LDAP
+ - LDAPS
+ - MYSQL
+ - POP3
+ - POP3S
+ - SMTP
+ - TCP
+ - TCP_CLIENT_FIRST
+ - UDP
+ - UDP_STREAM
+ - SFTP
+ default: HTTP
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+ timeout:
+ type: int
+ description:
+ - timeout for communication between the balancer and the node
+ default: 30
+ type:
+ type: str
+ description:
+ - type of interface for the balancer being created
+ choices:
+ - PUBLIC
+ - SERVICENET
+ default: PUBLIC
+ vip_id:
+ type: str
+ description:
+ - Virtual IP ID to use when creating the load balancer for purposes of
+ sharing an IP with another load balancer of another protocol
+ wait:
+ description:
+ - wait for the balancer to be in state 'running' before returning
+ type: bool
+ default: 'no'
+ wait_timeout:
+ type: int
+ description:
+ - how long before wait gives up, in seconds
+ default: 300
+author:
+ - "Christopher H. Laco (@claco)"
+ - "Matt Martz (@sivel)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Build a Load Balancer
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Load Balancer create request
+ local_action:
+ module: rax_clb
+ credentials: ~/.raxpub
+ name: my-lb
+ port: 8080
+ protocol: HTTP
+ type: SERVICENET
+ timeout: 30
+ region: DFW
+ wait: yes
+ state: present
+ meta:
+ app: my-cool-app
+ register: my_lb
+'''
+
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (CLB_ALGORITHMS,
+ CLB_PROTOCOLS,
+ rax_argument_spec,
+ rax_required_together,
+ rax_to_dict,
+ setup_rax_module,
+ )
+
+
+def cloud_load_balancer(module, state, name, meta, algorithm, port, protocol,
+ vip_type, timeout, wait, wait_timeout, vip_id):
+ if int(timeout) < 30:
+ module.fail_json(msg='"timeout" must be greater than or equal to 30')
+
+ changed = False
+ balancers = []
+
+ clb = pyrax.cloud_loadbalancers
+ if not clb:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ balancer_list = clb.list()
+ while balancer_list:
+ retrieved = clb.list(marker=balancer_list.pop().id)
+ balancer_list.extend(retrieved)
+ if len(retrieved) < 2:
+ break
+
+ for balancer in balancer_list:
+ if name != balancer.name and name != balancer.id:
+ continue
+
+ balancers.append(balancer)
+
+ if len(balancers) > 1:
+ module.fail_json(msg='Multiple Load Balancers were matched by name, '
+ 'try using the Load Balancer ID instead')
+
+ if state == 'present':
+ if isinstance(meta, dict):
+ metadata = [dict(key=k, value=v) for k, v in meta.items()]
+
+ if not balancers:
+ try:
+ virtual_ips = [clb.VirtualIP(type=vip_type, id=vip_id)]
+ balancer = clb.create(name, metadata=metadata, port=port,
+ algorithm=algorithm, protocol=protocol,
+ timeout=timeout, virtual_ips=virtual_ips)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ balancer = balancers[0]
+ setattr(balancer, 'metadata',
+ [dict(key=k, value=v) for k, v in
+ balancer.get_metadata().items()])
+ atts = {
+ 'name': name,
+ 'algorithm': algorithm,
+ 'port': port,
+ 'protocol': protocol,
+ 'timeout': timeout
+ }
+ for att, value in atts.items():
+ current = getattr(balancer, att)
+ if current != value:
+ changed = True
+
+ if changed:
+ balancer.update(**atts)
+
+ if balancer.metadata != metadata:
+ balancer.set_metadata(meta)
+ changed = True
+
+ virtual_ips = [clb.VirtualIP(type=vip_type)]
+ current_vip_types = set([v.type for v in balancer.virtual_ips])
+ vip_types = set([v.type for v in virtual_ips])
+ if current_vip_types != vip_types:
+ module.fail_json(msg='Load balancer Virtual IP type cannot '
+ 'be changed')
+
+ if wait:
+ attempts = wait_timeout // 5
+ pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts)
+
+ balancer.get()
+ instance = rax_to_dict(balancer, 'clb')
+
+ result = dict(changed=changed, balancer=instance)
+
+ if balancer.status == 'ERROR':
+ result['msg'] = '%s failed to build' % balancer.id
+ elif wait and balancer.status not in ('ACTIVE', 'ERROR'):
+ result['msg'] = 'Timeout waiting on %s' % balancer.id
+
+ if 'msg' in result:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+ elif state == 'absent':
+ if balancers:
+ balancer = balancers[0]
+ try:
+ balancer.delete()
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ instance = rax_to_dict(balancer, 'clb')
+
+ if wait:
+ attempts = wait_timeout // 5
+ pyrax.utils.wait_until(balancer, 'status', ('DELETED'),
+ interval=5, attempts=attempts)
+ else:
+ instance = {}
+
+ module.exit_json(changed=changed, balancer=instance)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ algorithm=dict(choices=CLB_ALGORITHMS,
+ default='LEAST_CONNECTIONS'),
+ meta=dict(type='dict', default={}),
+ name=dict(required=True),
+ port=dict(type='int', default=80),
+ protocol=dict(choices=CLB_PROTOCOLS, default='HTTP'),
+ state=dict(default='present', choices=['present', 'absent']),
+ timeout=dict(type='int', default=30),
+ type=dict(choices=['PUBLIC', 'SERVICENET'], default='PUBLIC'),
+ vip_id=dict(),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ algorithm = module.params.get('algorithm')
+ meta = module.params.get('meta')
+ name = module.params.get('name')
+ port = module.params.get('port')
+ protocol = module.params.get('protocol')
+ state = module.params.get('state')
+ timeout = int(module.params.get('timeout'))
+ vip_id = module.params.get('vip_id')
+ vip_type = module.params.get('type')
+ wait = module.params.get('wait')
+ wait_timeout = int(module.params.get('wait_timeout'))
+
+ setup_rax_module(module, pyrax)
+
+ cloud_load_balancer(module, state, name, meta, algorithm, port, protocol,
+ vip_type, timeout, wait, wait_timeout, vip_id)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_clb_nodes.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_clb_nodes.py
new file mode 100644
index 00000000..c066ab66
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_clb_nodes.py
@@ -0,0 +1,282 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_clb_nodes
+short_description: add, modify and remove nodes from a Rackspace Cloud Load Balancer
+description:
+ - Adds, modifies and removes nodes from a Rackspace Cloud Load Balancer
+options:
+ address:
+ type: str
+ required: false
+ description:
+ - IP address or domain name of the node
+ condition:
+ type: str
+ required: false
+ choices:
+ - enabled
+ - disabled
+ - draining
+ description:
+ - Condition for the node, which determines its role within the load
+ balancer
+ load_balancer_id:
+ type: int
+ required: true
+ description:
+ - Load balancer id
+ node_id:
+ type: int
+ required: false
+ description:
+ - Node id
+ port:
+ type: int
+ required: false
+ description:
+ - Port number of the load balanced service on the node
+ state:
+ type: str
+ required: false
+ default: "present"
+ choices:
+ - present
+ - absent
+ description:
+ - Indicate desired state of the node
+ type:
+ type: str
+ required: false
+ choices:
+ - primary
+ - secondary
+ description:
+ - Type of node
+ wait:
+ required: false
+ default: "no"
+ type: bool
+ description:
+ - Wait for the load balancer to become active before returning
+ wait_timeout:
+ type: int
+ required: false
+ default: 30
+ description:
+ - How long to wait before giving up and returning an error
+ weight:
+ type: int
+ required: false
+ description:
+ - Weight of node
+ virtualenv:
+ type: path
+ description:
+ - Virtualenv to execute this module in
+author: "Lukasz Kawczynski (@neuroid)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Add a new node to the load balancer
+ local_action:
+ module: rax_clb_nodes
+ load_balancer_id: 71
+ address: 10.2.2.3
+ port: 80
+ condition: enabled
+ type: primary
+ wait: yes
+ credentials: /path/to/credentials
+
+- name: Drain connections from a node
+ local_action:
+ module: rax_clb_nodes
+ load_balancer_id: 71
+ node_id: 410
+ condition: draining
+ wait: yes
+ credentials: /path/to/credentials
+
+- name: Remove a node from the load balancer
+ local_action:
+ module: rax_clb_nodes
+ load_balancer_id: 71
+ node_id: 410
+ state: absent
+ wait: yes
+ credentials: /path/to/credentials
+'''
+
+import os
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_clb_node_to_dict, rax_required_together, setup_rax_module
+
+
+def _activate_virtualenv(path):
+ activate_this = os.path.join(path, 'bin', 'activate_this.py')
+ with open(activate_this) as f:
+ code = compile(f.read(), activate_this, 'exec')
+ exec(code)
+
+
+def _get_node(lb, node_id=None, address=None, port=None):
+ """Return a matching node"""
+ for node in getattr(lb, 'nodes', []):
+ match_list = []
+ if node_id is not None:
+ match_list.append(getattr(node, 'id', None) == node_id)
+ if address is not None:
+ match_list.append(getattr(node, 'address', None) == address)
+ if port is not None:
+ match_list.append(getattr(node, 'port', None) == port)
+
+ if match_list and all(match_list):
+ return node
+
+ return None
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ address=dict(),
+ condition=dict(choices=['enabled', 'disabled', 'draining']),
+ load_balancer_id=dict(required=True, type='int'),
+ node_id=dict(type='int'),
+ port=dict(type='int'),
+ state=dict(default='present', choices=['present', 'absent']),
+ type=dict(choices=['primary', 'secondary']),
+ virtualenv=dict(type='path'),
+ wait=dict(default=False, type='bool'),
+ wait_timeout=dict(default=30, type='int'),
+ weight=dict(type='int'),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ address = module.params['address']
+ condition = (module.params['condition'] and
+ module.params['condition'].upper())
+ load_balancer_id = module.params['load_balancer_id']
+ node_id = module.params['node_id']
+ port = module.params['port']
+ state = module.params['state']
+ typ = module.params['type'] and module.params['type'].upper()
+ virtualenv = module.params['virtualenv']
+ wait = module.params['wait']
+ wait_timeout = module.params['wait_timeout'] or 1
+ weight = module.params['weight']
+
+ if virtualenv:
+ try:
+ _activate_virtualenv(virtualenv)
+ except IOError as e:
+ module.fail_json(msg='Failed to activate virtualenv %s (%s)' % (
+ virtualenv, e))
+
+ setup_rax_module(module, pyrax)
+
+ if not pyrax.cloud_loadbalancers:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ try:
+ lb = pyrax.cloud_loadbalancers.get(load_balancer_id)
+ except pyrax.exc.PyraxException as e:
+ module.fail_json(msg='%s' % e.message)
+
+ node = _get_node(lb, node_id, address, port)
+
+ result = rax_clb_node_to_dict(node)
+
+ if state == 'absent':
+ if not node: # Removing a non-existent node
+ module.exit_json(changed=False, state=state)
+ try:
+ lb.delete_node(node)
+ result = {}
+ except pyrax.exc.NotFound:
+ module.exit_json(changed=False, state=state)
+ except pyrax.exc.PyraxException as e:
+ module.fail_json(msg='%s' % e.message)
+ else: # present
+ if not node:
+ if node_id: # Updating a non-existent node
+ msg = 'Node %d not found' % node_id
+ if lb.nodes:
+ msg += (' (available nodes: %s)' %
+ ', '.join([str(x.id) for x in lb.nodes]))
+ module.fail_json(msg=msg)
+ else: # Creating a new node
+ try:
+ node = pyrax.cloudloadbalancers.Node(
+ address=address, port=port, condition=condition,
+ weight=weight, type=typ)
+ resp, body = lb.add_nodes([node])
+ result.update(body['nodes'][0])
+ except pyrax.exc.PyraxException as e:
+ module.fail_json(msg='%s' % e.message)
+ else: # Updating an existing node
+ mutable = {
+ 'condition': condition,
+ 'type': typ,
+ 'weight': weight,
+ }
+
+ for name, value in mutable.items():
+ if value is None or value == getattr(node, name):
+ mutable.pop(name)
+
+ if not mutable:
+ module.exit_json(changed=False, state=state, node=result)
+
+ try:
+ # The diff has to be set explicitly to update node's weight and
+ # type; this should probably be fixed in pyrax
+ lb.update_node(node, diff=mutable)
+ result.update(mutable)
+ except pyrax.exc.PyraxException as e:
+ module.fail_json(msg='%s' % e.message)
+
+ if wait:
+ pyrax.utils.wait_until(lb, "status", "ACTIVE", interval=1,
+ attempts=wait_timeout)
+ if lb.status != 'ACTIVE':
+ module.fail_json(
+ msg='Load balancer not active after %ds (current status: %s)' %
+ (wait_timeout, lb.status.lower()))
+
+ kwargs = {'node': result} if result else {}
+ module.exit_json(changed=True, state=state, **kwargs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_clb_ssl.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_clb_ssl.py
new file mode 100644
index 00000000..114128e8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_clb_ssl.py
@@ -0,0 +1,281 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: rax_clb_ssl
+short_description: Manage SSL termination for a Rackspace Cloud Load Balancer.
+description:
+- Set up, reconfigure, or remove SSL termination for an existing load balancer.
+options:
+ loadbalancer:
+ type: str
+ description:
+ - Name or ID of the load balancer on which to manage SSL termination.
+ required: true
+ state:
+ type: str
+ description:
+ - If set to "present", SSL termination will be added to this load balancer.
+ - If "absent", SSL termination will be removed instead.
+ choices:
+ - present
+ - absent
+ default: present
+ enabled:
+ description:
+ - If set to "false", temporarily disable SSL termination without discarding
+ - existing credentials.
+ default: true
+ type: bool
+ private_key:
+ type: str
+ description:
+ - The private SSL key as a string in PEM format.
+ certificate:
+ type: str
+ description:
+ - The public SSL certificates as a string in PEM format.
+ intermediate_certificate:
+ type: str
+ description:
+ - One or more intermediate certificate authorities as a string in PEM
+ - format, concatenated into a single string.
+ secure_port:
+ type: int
+ description:
+ - The port to listen for secure traffic.
+ default: 443
+ secure_traffic_only:
+ description:
+ - If "true", the load balancer will *only* accept secure traffic.
+ default: false
+ type: bool
+ https_redirect:
+ description:
+ - If "true", the load balancer will redirect HTTP traffic to HTTPS.
+ - Requires "secure_traffic_only" to be true. Incurs an implicit wait if SSL
+ - termination is also applied or removed.
+ type: bool
+ wait:
+ description:
+ - Wait for the balancer to be in state "running" before turning.
+ default: false
+ type: bool
+ wait_timeout:
+ type: int
+ description:
+ - How long before "wait" gives up, in seconds.
+ default: 300
+author: Ash Wilson (@smashwilson)
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Enable SSL termination on a load balancer
+ community.general.rax_clb_ssl:
+ loadbalancer: the_loadbalancer
+ state: present
+ private_key: "{{ lookup('file', 'credentials/server.key' ) }}"
+ certificate: "{{ lookup('file', 'credentials/server.crt' ) }}"
+ intermediate_certificate: "{{ lookup('file', 'credentials/trust-chain.crt') }}"
+ secure_traffic_only: true
+ wait: true
+
+- name: Disable SSL termination
+ community.general.rax_clb_ssl:
+ loadbalancer: "{{ registered_lb.balancer.id }}"
+ state: absent
+ wait: true
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec,
+ rax_find_loadbalancer,
+ rax_required_together,
+ rax_to_dict,
+ setup_rax_module,
+ )
+
+
+def cloud_load_balancer_ssl(module, loadbalancer, state, enabled, private_key,
+ certificate, intermediate_certificate, secure_port,
+ secure_traffic_only, https_redirect,
+ wait, wait_timeout):
+ # Validate arguments.
+
+ if state == 'present':
+ if not private_key:
+ module.fail_json(msg="private_key must be provided.")
+ else:
+ private_key = private_key.strip()
+
+ if not certificate:
+ module.fail_json(msg="certificate must be provided.")
+ else:
+ certificate = certificate.strip()
+
+ attempts = wait_timeout // 5
+
+ # Locate the load balancer.
+
+ balancer = rax_find_loadbalancer(module, pyrax, loadbalancer)
+ existing_ssl = balancer.get_ssl_termination()
+
+ changed = False
+
+ if state == 'present':
+ # Apply or reconfigure SSL termination on the load balancer.
+ ssl_attrs = dict(
+ securePort=secure_port,
+ privatekey=private_key,
+ certificate=certificate,
+ intermediateCertificate=intermediate_certificate,
+ enabled=enabled,
+ secureTrafficOnly=secure_traffic_only
+ )
+
+ needs_change = False
+
+ if existing_ssl:
+ for ssl_attr, value in ssl_attrs.items():
+ if ssl_attr == 'privatekey':
+ # The private key is not included in get_ssl_termination's
+ # output (as it shouldn't be). Also, if you're changing the
+ # private key, you'll also be changing the certificate,
+ # so we don't lose anything by not checking it.
+ continue
+
+ if value is not None and existing_ssl.get(ssl_attr) != value:
+ # module.fail_json(msg='Unnecessary change', attr=ssl_attr, value=value, existing=existing_ssl.get(ssl_attr))
+ needs_change = True
+ else:
+ needs_change = True
+
+ if needs_change:
+ try:
+ balancer.add_ssl_termination(**ssl_attrs)
+ except pyrax.exceptions.PyraxException as e:
+ module.fail_json(msg='%s' % e.message)
+ changed = True
+ elif state == 'absent':
+ # Remove SSL termination if it's already configured.
+ if existing_ssl:
+ try:
+ balancer.delete_ssl_termination()
+ except pyrax.exceptions.PyraxException as e:
+ module.fail_json(msg='%s' % e.message)
+ changed = True
+
+ if https_redirect is not None and balancer.httpsRedirect != https_redirect:
+ if changed:
+ # This wait is unavoidable because load balancers are immutable
+ # while the SSL termination changes above are being applied.
+ pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts)
+
+ try:
+ balancer.update(httpsRedirect=https_redirect)
+ except pyrax.exceptions.PyraxException as e:
+ module.fail_json(msg='%s' % e.message)
+ changed = True
+
+ if changed and wait:
+ pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts)
+
+ balancer.get()
+ new_ssl_termination = balancer.get_ssl_termination()
+
+ # Intentionally omit the private key from the module output, so you don't
+ # accidentally echo it with `ansible-playbook -v` or `debug`, and the
+ # certificate, which is just long. Convert other attributes to snake_case
+ # and include https_redirect at the top-level.
+ if new_ssl_termination:
+ new_ssl = dict(
+ enabled=new_ssl_termination['enabled'],
+ secure_port=new_ssl_termination['securePort'],
+ secure_traffic_only=new_ssl_termination['secureTrafficOnly']
+ )
+ else:
+ new_ssl = None
+
+ result = dict(
+ changed=changed,
+ https_redirect=balancer.httpsRedirect,
+ ssl_termination=new_ssl,
+ balancer=rax_to_dict(balancer, 'clb')
+ )
+ success = True
+
+ if balancer.status == 'ERROR':
+ result['msg'] = '%s failed to build' % balancer.id
+ success = False
+ elif wait and balancer.status not in ('ACTIVE', 'ERROR'):
+ result['msg'] = 'Timeout waiting on %s' % balancer.id
+ success = False
+
+ if success:
+ module.exit_json(**result)
+ else:
+ module.fail_json(**result)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(dict(
+ loadbalancer=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ enabled=dict(type='bool', default=True),
+ private_key=dict(no_log=True),
+ certificate=dict(),
+ intermediate_certificate=dict(),
+ secure_port=dict(type='int', default=443),
+ secure_traffic_only=dict(type='bool', default=False),
+ https_redirect=dict(type='bool'),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300)
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module.')
+
+ loadbalancer = module.params.get('loadbalancer')
+ state = module.params.get('state')
+ enabled = module.boolean(module.params.get('enabled'))
+ private_key = module.params.get('private_key')
+ certificate = module.params.get('certificate')
+ intermediate_certificate = module.params.get('intermediate_certificate')
+ secure_port = module.params.get('secure_port')
+ secure_traffic_only = module.boolean(module.params.get('secure_traffic_only'))
+ https_redirect = module.boolean(module.params.get('https_redirect'))
+ wait = module.boolean(module.params.get('wait'))
+ wait_timeout = module.params.get('wait_timeout')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_load_balancer_ssl(
+ module, loadbalancer, state, enabled, private_key, certificate,
+ intermediate_certificate, secure_port, secure_traffic_only,
+ https_redirect, wait, wait_timeout
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_dns.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_dns.py
new file mode 100644
index 00000000..e9b7e2be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_dns.py
@@ -0,0 +1,172 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_dns
+short_description: Manage domains on Rackspace Cloud DNS
+description:
+ - Manage domains on Rackspace Cloud DNS
+options:
+ comment:
+ type: str
+ description:
+ - Brief description of the domain. Maximum length of 160 characters
+ email:
+ type: str
+ description:
+ - Email address of the domain administrator
+ name:
+ type: str
+ description:
+ - Domain name to create
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+ ttl:
+ type: int
+ description:
+ - Time to live of domain in seconds
+ default: 3600
+notes:
+ - "It is recommended that plays utilizing this module be run with
+ C(serial: 1) to avoid exceeding the API request limit imposed by
+ the Rackspace CloudDNS API"
+author: "Matt Martz (@sivel)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Create domain
+ hosts: all
+ gather_facts: False
+ tasks:
+ - name: Domain create request
+ local_action:
+ module: rax_dns
+ credentials: ~/.raxpub
+ name: example.org
+ email: admin@example.org
+ register: rax_dns
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec,
+ rax_required_together,
+ rax_to_dict,
+ setup_rax_module,
+ )
+
+
+def rax_dns(module, comment, email, name, state, ttl):
+ changed = False
+
+ dns = pyrax.cloud_dns
+ if not dns:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if state == 'present':
+ if not email:
+ module.fail_json(msg='An "email" attribute is required for '
+ 'creating a domain')
+
+ try:
+ domain = dns.find(name=name)
+ except pyrax.exceptions.NoUniqueMatch as e:
+ module.fail_json(msg='%s' % e.message)
+ except pyrax.exceptions.NotFound:
+ try:
+ domain = dns.create(name=name, emailAddress=email, ttl=ttl,
+ comment=comment)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ update = {}
+ if comment != getattr(domain, 'comment', None):
+ update['comment'] = comment
+ if ttl != getattr(domain, 'ttl', None):
+ update['ttl'] = ttl
+ if email != getattr(domain, 'emailAddress', None):
+ update['emailAddress'] = email
+
+ if update:
+ try:
+ domain.update(**update)
+ changed = True
+ domain.get()
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ elif state == 'absent':
+ try:
+ domain = dns.find(name=name)
+ except pyrax.exceptions.NotFound:
+ domain = {}
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ if domain:
+ try:
+ domain.delete()
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, domain=rax_to_dict(domain))
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ comment=dict(),
+ email=dict(),
+ name=dict(),
+ state=dict(default='present', choices=['present', 'absent']),
+ ttl=dict(type='int', default=3600),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ comment = module.params.get('comment')
+ email = module.params.get('email')
+ name = module.params.get('name')
+ state = module.params.get('state')
+ ttl = module.params.get('ttl')
+
+ setup_rax_module(module, pyrax, False)
+
+ rax_dns(module, comment, email, name, state, ttl)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_dns_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_dns_record.py
new file mode 100644
index 00000000..0b60120a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_dns_record.py
@@ -0,0 +1,352 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_dns_record
+short_description: Manage DNS records on Rackspace Cloud DNS
+description:
+ - Manage DNS records on Rackspace Cloud DNS
+options:
+ comment:
+ type: str
+ description:
+ - Brief description of the domain. Maximum length of 160 characters
+ data:
+ type: str
+ description:
+ - IP address for A/AAAA record, FQDN for CNAME/MX/NS, or text data for
+ SRV/TXT
+ required: True
+ domain:
+ type: str
+ description:
+ - Domain name to create the record in. This is an invalid option when
+ type=PTR
+ loadbalancer:
+ type: str
+ description:
+ - Load Balancer ID to create a PTR record for. Only used with type=PTR
+ name:
+ type: str
+ description:
+ - FQDN record name to create
+ required: True
+ overwrite:
+ description:
+ - Add new records if data doesn't match, instead of updating existing
+ record with matching name. If there are already multiple records with
+ matching name and overwrite=true, this module will fail.
+ default: true
+ type: bool
+ priority:
+ type: int
+ description:
+ - Required for MX and SRV records, but forbidden for other record types.
+ If specified, must be an integer from 0 to 65535.
+ server:
+ type: str
+ description:
+ - Server ID to create a PTR record for. Only used with type=PTR
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+ ttl:
+ type: int
+ description:
+ - Time to live of record in seconds
+ default: 3600
+ type:
+ type: str
+ description:
+ - DNS record type
+ choices:
+ - A
+ - AAAA
+ - CNAME
+ - MX
+ - NS
+ - SRV
+ - TXT
+ - PTR
+ required: true
+notes:
+ - "It is recommended that plays utilizing this module be run with
+ C(serial: 1) to avoid exceeding the API request limit imposed by
+ the Rackspace CloudDNS API"
+ - To manipulate a C(PTR) record either C(loadbalancer) or C(server) must be
+ supplied
+ - As of version 1.7, the C(type) field is required and no longer defaults to an C(A) record.
+ - C(PTR) record support was added in version 1.7
+author: "Matt Martz (@sivel)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Create DNS Records
+ hosts: all
+ gather_facts: False
+ tasks:
+ - name: Create A record
+ local_action:
+ module: rax_dns_record
+ credentials: ~/.raxpub
+ domain: example.org
+ name: www.example.org
+ data: "{{ rax_accessipv4 }}"
+ type: A
+ register: a_record
+
+ - name: Create PTR record
+ local_action:
+ module: rax_dns_record
+ credentials: ~/.raxpub
+ server: "{{ rax_id }}"
+ name: "{{ inventory_hostname }}"
+ region: DFW
+ register: ptr_record
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec,
+ rax_find_loadbalancer,
+ rax_find_server,
+ rax_required_together,
+ rax_to_dict,
+ setup_rax_module,
+ )
+
+
+def rax_dns_record_ptr(module, data=None, comment=None, loadbalancer=None,
+ name=None, server=None, state='present', ttl=7200):
+ changed = False
+ results = []
+
+ dns = pyrax.cloud_dns
+
+ if not dns:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if loadbalancer:
+ item = rax_find_loadbalancer(module, pyrax, loadbalancer)
+ elif server:
+ item = rax_find_server(module, pyrax, server)
+
+ if state == 'present':
+ current = dns.list_ptr_records(item)
+ for record in current:
+ if record.data == data:
+ if record.ttl != ttl or record.name != name:
+ try:
+ dns.update_ptr_record(item, record, name, data, ttl)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ record.ttl = ttl
+ record.name = name
+ results.append(rax_to_dict(record))
+ break
+ else:
+ results.append(rax_to_dict(record))
+ break
+
+ if not results:
+ record = dict(name=name, type='PTR', data=data, ttl=ttl,
+ comment=comment)
+ try:
+ results = dns.add_ptr_records(item, [record])
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, records=results)
+
+ elif state == 'absent':
+ current = dns.list_ptr_records(item)
+ for record in current:
+ if record.data == data:
+ results.append(rax_to_dict(record))
+ break
+
+ if results:
+ try:
+ dns.delete_ptr_records(item, data)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, records=results)
+
+
+def rax_dns_record(module, comment=None, data=None, domain=None, name=None,
+ overwrite=True, priority=None, record_type='A',
+ state='present', ttl=7200):
+ """Function for manipulating record types other than PTR"""
+
+ changed = False
+
+ dns = pyrax.cloud_dns
+ if not dns:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if state == 'present':
+ if not priority and record_type in ['MX', 'SRV']:
+ module.fail_json(msg='A "priority" attribute is required for '
+ 'creating a MX or SRV record')
+
+ try:
+ domain = dns.find(name=domain)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ try:
+ if overwrite:
+ record = domain.find_record(record_type, name=name)
+ else:
+ record = domain.find_record(record_type, name=name, data=data)
+ except pyrax.exceptions.DomainRecordNotUnique as e:
+ module.fail_json(msg='overwrite=true and there are multiple matching records')
+ except pyrax.exceptions.DomainRecordNotFound as e:
+ try:
+ record_data = {
+ 'type': record_type,
+ 'name': name,
+ 'data': data,
+ 'ttl': ttl
+ }
+ if comment:
+ record_data.update(dict(comment=comment))
+ if priority and record_type.upper() in ['MX', 'SRV']:
+ record_data.update(dict(priority=priority))
+
+ record = domain.add_records([record_data])[0]
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ update = {}
+ if comment != getattr(record, 'comment', None):
+ update['comment'] = comment
+ if ttl != getattr(record, 'ttl', None):
+ update['ttl'] = ttl
+ if priority != getattr(record, 'priority', None):
+ update['priority'] = priority
+ if data != getattr(record, 'data', None):
+ update['data'] = data
+
+ if update:
+ try:
+ record.update(**update)
+ changed = True
+ record.get()
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ elif state == 'absent':
+ try:
+ domain = dns.find(name=domain)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ try:
+ record = domain.find_record(record_type, name=name, data=data)
+ except pyrax.exceptions.DomainRecordNotFound as e:
+ record = {}
+ except pyrax.exceptions.DomainRecordNotUnique as e:
+ module.fail_json(msg='%s' % e.message)
+
+ if record:
+ try:
+ record.delete()
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, record=rax_to_dict(record))
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ comment=dict(),
+ data=dict(required=True),
+ domain=dict(),
+ loadbalancer=dict(),
+ name=dict(required=True),
+ overwrite=dict(type='bool', default=True),
+ priority=dict(type='int'),
+ server=dict(),
+ state=dict(default='present', choices=['present', 'absent']),
+ ttl=dict(type='int', default=3600),
+ type=dict(required=True, choices=['A', 'AAAA', 'CNAME', 'MX', 'NS',
+ 'SRV', 'TXT', 'PTR'])
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ mutually_exclusive=[
+ ['server', 'loadbalancer', 'domain'],
+ ],
+ required_one_of=[
+ ['server', 'loadbalancer', 'domain'],
+ ],
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ comment = module.params.get('comment')
+ data = module.params.get('data')
+ domain = module.params.get('domain')
+ loadbalancer = module.params.get('loadbalancer')
+ name = module.params.get('name')
+ overwrite = module.params.get('overwrite')
+ priority = module.params.get('priority')
+ server = module.params.get('server')
+ state = module.params.get('state')
+ ttl = module.params.get('ttl')
+ record_type = module.params.get('type')
+
+ setup_rax_module(module, pyrax, False)
+
+ if record_type.upper() == 'PTR':
+ if not server and not loadbalancer:
+ module.fail_json(msg='one of the following is required: '
+ 'server,loadbalancer')
+ rax_dns_record_ptr(module, data=data, comment=comment,
+ loadbalancer=loadbalancer, name=name, server=server,
+ state=state, ttl=ttl)
+ else:
+ rax_dns_record(module, comment=comment, data=data, domain=domain,
+ name=name, overwrite=overwrite, priority=priority,
+ record_type=record_type, state=state, ttl=ttl)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_facts.py
new file mode 100644
index 00000000..386ca7cf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_facts.py
@@ -0,0 +1,142 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_facts
+short_description: Gather facts for Rackspace Cloud Servers
+description:
+ - Gather facts for Rackspace Cloud Servers.
+options:
+ address:
+ type: str
+ description:
+ - Server IP address to retrieve facts for, will match any IP assigned to
+ the server
+ id:
+ type: str
+ description:
+ - Server ID to retrieve facts for
+ name:
+ type: str
+ description:
+ - Server name to retrieve facts for
+author: "Matt Martz (@sivel)"
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Gather info about servers
+ hosts: all
+ gather_facts: False
+ tasks:
+ - name: Get facts about servers
+ local_action:
+ module: rax_facts
+ credentials: ~/.raxpub
+ name: "{{ inventory_hostname }}"
+ region: DFW
+ - name: Map some facts
+ ansible.builtin.set_fact:
+ ansible_ssh_host: "{{ rax_accessipv4 }}"
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec,
+ rax_required_together,
+ rax_to_dict,
+ setup_rax_module,
+ )
+
+
+def rax_facts(module, address, name, server_id):
+ changed = False
+
+ cs = pyrax.cloudservers
+
+ if cs is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ ansible_facts = {}
+
+ search_opts = {}
+ if name:
+ search_opts = dict(name='^%s$' % name)
+ try:
+ servers = cs.servers.list(search_opts=search_opts)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ elif address:
+ servers = []
+ try:
+ for server in cs.servers.list():
+ for addresses in server.networks.values():
+ if address in addresses:
+ servers.append(server)
+ break
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ elif server_id:
+ servers = []
+ try:
+ servers.append(cs.servers.get(server_id))
+ except Exception as e:
+ pass
+
+ servers[:] = [server for server in servers if server.status != "DELETED"]
+
+ if len(servers) > 1:
+ module.fail_json(msg='Multiple servers found matching provided '
+ 'search parameters')
+ elif len(servers) == 1:
+ ansible_facts = rax_to_dict(servers[0], 'server')
+
+ module.exit_json(changed=changed, ansible_facts=ansible_facts)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ address=dict(),
+ id=dict(),
+ name=dict(),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ mutually_exclusive=[['address', 'id', 'name']],
+ required_one_of=[['address', 'id', 'name']],
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ address = module.params.get('address')
+ server_id = module.params.get('id')
+ name = module.params.get('name')
+
+ setup_rax_module(module, pyrax)
+
+ rax_facts(module, address, name, server_id)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_files.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_files.py
new file mode 100644
index 00000000..7080cc2f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_files.py
@@ -0,0 +1,392 @@
+#!/usr/bin/python
+
+# (c) 2013, Paul Durivage <paul.durivage@rackspace.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_files
+short_description: Manipulate Rackspace Cloud Files Containers
+description:
+ - Manipulate Rackspace Cloud Files Containers
+options:
+ clear_meta:
+ description:
+ - Optionally clear existing metadata when applying metadata to existing containers.
+ Selecting this option is only appropriate when setting type=meta
+ type: bool
+ default: "no"
+ container:
+ type: str
+ description:
+ - The container to use for container or metadata operations.
+ meta:
+ type: dict
+ description:
+ - A hash of items to set as metadata values on a container
+ private:
+ description:
+ - Used to set a container as private, removing it from the CDN. B(Warning!)
+ Private containers, if previously made public, can have live objects
+ available until the TTL on cached objects expires
+ type: bool
+ default: false
+ public:
+ description:
+ - Used to set a container as public, available via the Cloud Files CDN
+ type: bool
+ default: false
+ region:
+ type: str
+ description:
+ - Region to create an instance in
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices: ['present', 'absent', 'list']
+ default: present
+ ttl:
+ type: int
+ description:
+ - In seconds, set a container-wide TTL for all objects cached on CDN edge nodes.
+ Setting a TTL is only appropriate for containers that are public
+ type:
+ type: str
+ description:
+ - Type of object to do work on, i.e. metadata object or a container object
+ choices:
+ - container
+ - meta
+ default: container
+ web_error:
+ type: str
+ description:
+ - Sets an object to be presented as the HTTP error page when accessed by the CDN URL
+ web_index:
+ type: str
+ description:
+ - Sets an object to be presented as the HTTP index page when accessed by the CDN URL
+author: "Paul Durivage (@angstwad)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: "Test Cloud Files Containers"
+ hosts: local
+ gather_facts: no
+ tasks:
+ - name: "List all containers"
+ community.general.rax_files:
+ state: list
+
+ - name: "Create container called 'mycontainer'"
+ community.general.rax_files:
+ container: mycontainer
+
+ - name: "Create container 'mycontainer2' with metadata"
+ community.general.rax_files:
+ container: mycontainer2
+ meta:
+ key: value
+ file_for: someuser@example.com
+
+ - name: "Set a container's web index page"
+ community.general.rax_files:
+ container: mycontainer
+ web_index: index.html
+
+ - name: "Set a container's web error page"
+ community.general.rax_files:
+ container: mycontainer
+ web_error: error.html
+
+ - name: "Make container public"
+ community.general.rax_files:
+ container: mycontainer
+ public: yes
+
+ - name: "Make container public with a 24 hour TTL"
+ community.general.rax_files:
+ container: mycontainer
+ public: yes
+ ttl: 86400
+
+ - name: "Make container private"
+ community.general.rax_files:
+ container: mycontainer
+ private: yes
+
+- name: "Test Cloud Files Containers Metadata Storage"
+ hosts: local
+ gather_facts: no
+ tasks:
+ - name: "Get mycontainer2 metadata"
+ community.general.rax_files:
+ container: mycontainer2
+ type: meta
+
+ - name: "Set mycontainer2 metadata"
+ community.general.rax_files:
+ container: mycontainer2
+ type: meta
+ meta:
+ uploaded_by: someuser@example.com
+
+ - name: "Remove mycontainer2 metadata"
+ community.general.rax_files:
+ container: "mycontainer2"
+ type: meta
+ state: absent
+ meta:
+ key: ""
+ file_for: ""
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError as e:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+
+
+EXIT_DICT = dict(success=True)
+META_PREFIX = 'x-container-meta-'
+
+
+def _get_container(module, cf, container):
+ try:
+ return cf.get_container(container)
+ except pyrax.exc.NoSuchContainer as e:
+ module.fail_json(msg=e.message)
+
+
+def _fetch_meta(module, container):
+ EXIT_DICT['meta'] = dict()
+ try:
+ for k, v in container.get_metadata().items():
+ split_key = k.split(META_PREFIX)[-1]
+ EXIT_DICT['meta'][split_key] = v
+ except Exception as e:
+ module.fail_json(msg=e.message)
+
+
+def meta(cf, module, container_, state, meta_, clear_meta):
+ c = _get_container(module, cf, container_)
+
+ if meta_ and state == 'present':
+ try:
+ meta_set = c.set_metadata(meta_, clear=clear_meta)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ elif meta_ and state == 'absent':
+ remove_results = []
+ for k, v in meta_.items():
+ c.remove_metadata_key(k)
+ remove_results.append(k)
+ EXIT_DICT['deleted_meta_keys'] = remove_results
+ elif state == 'absent':
+ remove_results = []
+ for k, v in c.get_metadata().items():
+ c.remove_metadata_key(k)
+ remove_results.append(k)
+ EXIT_DICT['deleted_meta_keys'] = remove_results
+
+ _fetch_meta(module, c)
+ _locals = locals().keys()
+
+ EXIT_DICT['container'] = c.name
+ if 'meta_set' in _locals or 'remove_results' in _locals:
+ EXIT_DICT['changed'] = True
+
+ module.exit_json(**EXIT_DICT)
+
+
+def container(cf, module, container_, state, meta_, clear_meta, ttl, public,
+ private, web_index, web_error):
+ if public and private:
+ module.fail_json(msg='container cannot be simultaneously '
+ 'set to public and private')
+
+ if state == 'absent' and (meta_ or clear_meta or public or private or web_index or web_error):
+ module.fail_json(msg='state cannot be omitted when setting/removing '
+ 'attributes on a container')
+
+ if state == 'list':
+ # We don't care if attributes are specified, let's list containers
+ EXIT_DICT['containers'] = cf.list_containers()
+ module.exit_json(**EXIT_DICT)
+
+ try:
+ c = cf.get_container(container_)
+ except pyrax.exc.NoSuchContainer as e:
+ # Make the container if state=present, otherwise bomb out
+ if state == 'present':
+ try:
+ c = cf.create_container(container_)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ EXIT_DICT['changed'] = True
+ EXIT_DICT['created'] = True
+ else:
+ module.fail_json(msg=e.message)
+ else:
+ # Successfully grabbed a container object
+ # Delete if state is absent
+ if state == 'absent':
+ try:
+ cont_deleted = c.delete()
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ EXIT_DICT['deleted'] = True
+
+ if meta_:
+ try:
+ meta_set = c.set_metadata(meta_, clear=clear_meta)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ finally:
+ _fetch_meta(module, c)
+
+ if ttl:
+ try:
+ c.cdn_ttl = ttl
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ EXIT_DICT['ttl'] = c.cdn_ttl
+
+ if public:
+ try:
+ cont_public = c.make_public()
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ EXIT_DICT['container_urls'] = dict(url=c.cdn_uri,
+ ssl_url=c.cdn_ssl_uri,
+ streaming_url=c.cdn_streaming_uri,
+ ios_uri=c.cdn_ios_uri)
+
+ if private:
+ try:
+ cont_private = c.make_private()
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ EXIT_DICT['set_private'] = True
+
+ if web_index:
+ try:
+ cont_web_index = c.set_web_index_page(web_index)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ EXIT_DICT['set_index'] = True
+ finally:
+ _fetch_meta(module, c)
+
+ if web_error:
+ try:
+ cont_err_index = c.set_web_error_page(web_error)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ EXIT_DICT['set_error'] = True
+ finally:
+ _fetch_meta(module, c)
+
+ EXIT_DICT['container'] = c.name
+ EXIT_DICT['objs_in_container'] = c.object_count
+ EXIT_DICT['total_bytes'] = c.total_bytes
+
+ _locals = locals().keys()
+ if ('cont_deleted' in _locals
+ or 'meta_set' in _locals
+ or 'cont_public' in _locals
+ or 'cont_private' in _locals
+ or 'cont_web_index' in _locals
+ or 'cont_err_index' in _locals):
+ EXIT_DICT['changed'] = True
+
+ module.exit_json(**EXIT_DICT)
+
+
+def cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public,
+ private, web_index, web_error):
+ """ Dispatch from here to work with metadata or file objects """
+ cf = pyrax.cloudfiles
+
+ if cf is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if typ == "container":
+ container(cf, module, container_, state, meta_, clear_meta, ttl,
+ public, private, web_index, web_error)
+ else:
+ meta(cf, module, container_, state, meta_, clear_meta)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ container=dict(),
+ state=dict(choices=['present', 'absent', 'list'],
+ default='present'),
+ meta=dict(type='dict', default=dict()),
+ clear_meta=dict(default=False, type='bool'),
+ type=dict(choices=['container', 'meta'], default='container'),
+ ttl=dict(type='int'),
+ public=dict(default=False, type='bool'),
+ private=dict(default=False, type='bool'),
+ web_index=dict(),
+ web_error=dict()
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ container_ = module.params.get('container')
+ state = module.params.get('state')
+ meta_ = module.params.get('meta')
+ clear_meta = module.params.get('clear_meta')
+ typ = module.params.get('type')
+ ttl = module.params.get('ttl')
+ public = module.params.get('public')
+ private = module.params.get('private')
+ web_index = module.params.get('web_index')
+ web_error = module.params.get('web_error')
+
+ if state in ['present', 'absent'] and not container_:
+ module.fail_json(msg='please specify a container name')
+ if clear_meta and not typ == 'meta':
+ module.fail_json(msg='clear_meta can only be used when setting '
+ 'metadata')
+
+ setup_rax_module(module, pyrax)
+ cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public,
+ private, web_index, web_error)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_files_objects.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_files_objects.py
new file mode 100644
index 00000000..dc445554
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_files_objects.py
@@ -0,0 +1,608 @@
+#!/usr/bin/python
+
+# (c) 2013, Paul Durivage <paul.durivage@rackspace.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_files_objects
+short_description: Upload, download, and delete objects in Rackspace Cloud Files
+description:
+ - Upload, download, and delete objects in Rackspace Cloud Files
+options:
+ clear_meta:
+ description:
+ - Optionally clear existing metadata when applying metadata to existing objects.
+ Selecting this option is only appropriate when setting type=meta
+ type: bool
+ default: 'no'
+ container:
+ type: str
+ description:
+ - The container to use for file object operations.
+ required: true
+ dest:
+ type: str
+ description:
+ - The destination of a "get" operation; i.e. a local directory, "/home/user/myfolder".
+ Used to specify the destination of an operation on a remote object; i.e. a file name,
+ "file1", or a comma-separated list of remote objects, "file1,file2,file17"
+ expires:
+ type: int
+ description:
+ - Used to set an expiration on a file or folder uploaded to Cloud Files.
+ Requires an integer, specifying expiration in seconds
+ meta:
+ type: dict
+ description:
+ - A hash of items to set as metadata values on an uploaded file or folder
+ method:
+ type: str
+ description:
+ - The method of operation to be performed. For example, put to upload files
+ to Cloud Files, get to download files from Cloud Files or delete to delete
+ remote objects in Cloud Files
+ choices:
+ - get
+ - put
+ - delete
+ default: get
+ src:
+ type: str
+ description:
+ - Source from which to upload files. Used to specify a remote object as a source for
+ an operation, i.e. a file name, "file1", or a comma-separated list of remote objects,
+ "file1,file2,file17". src and dest are mutually exclusive on remote-only object operations
+ structure:
+ description:
+ - Used to specify whether to maintain nested directory structure when downloading objects
+ from Cloud Files. Setting to false downloads the contents of a container to a single,
+ flat directory
+ type: bool
+ default: 'yes'
+ type:
+ type: str
+ description:
+ - Type of object to do work on
+ - Metadata object or a file object
+ choices:
+ - file
+ - meta
+ default: file
+author: "Paul Durivage (@angstwad)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: "Test Cloud Files Objects"
+ hosts: local
+ gather_facts: False
+ tasks:
+ - name: "Get objects from test container"
+ community.general.rax_files_objects:
+ container: testcont
+ dest: ~/Downloads/testcont
+
+ - name: "Get single object from test container"
+ community.general.rax_files_objects:
+ container: testcont
+ src: file1
+ dest: ~/Downloads/testcont
+
+ - name: "Get several objects from test container"
+ community.general.rax_files_objects:
+ container: testcont
+ src: file1,file2,file3
+ dest: ~/Downloads/testcont
+
+ - name: "Delete one object in test container"
+ community.general.rax_files_objects:
+ container: testcont
+ method: delete
+ dest: file1
+
+ - name: "Delete several objects in test container"
+ community.general.rax_files_objects:
+ container: testcont
+ method: delete
+ dest: file2,file3,file4
+
+ - name: "Delete all objects in test container"
+ community.general.rax_files_objects:
+ container: testcont
+ method: delete
+
+ - name: "Upload all files to test container"
+ community.general.rax_files_objects:
+ container: testcont
+ method: put
+ src: ~/Downloads/onehundred
+
+ - name: "Upload one file to test container"
+ community.general.rax_files_objects:
+ container: testcont
+ method: put
+ src: ~/Downloads/testcont/file1
+
+ - name: "Upload one file to test container with metadata"
+ community.general.rax_files_objects:
+ container: testcont
+ src: ~/Downloads/testcont/file2
+ method: put
+ meta:
+ testkey: testdata
+ who_uploaded_this: someuser@example.com
+
+ - name: "Upload one file to test container with TTL of 60 seconds"
+ community.general.rax_files_objects:
+ container: testcont
+ method: put
+ src: ~/Downloads/testcont/file3
+ expires: 60
+
+ - name: "Attempt to get remote object that does not exist"
+ community.general.rax_files_objects:
+ container: testcont
+ method: get
+ src: FileThatDoesNotExist.jpg
+ dest: ~/Downloads/testcont
+ ignore_errors: yes
+
+ - name: "Attempt to delete remote object that does not exist"
+ community.general.rax_files_objects:
+ container: testcont
+ method: delete
+ dest: FileThatDoesNotExist.jpg
+ ignore_errors: yes
+
+- name: "Test Cloud Files Objects Metadata"
+ hosts: local
+ gather_facts: false
+ tasks:
+ - name: "Get metadata on one object"
+ community.general.rax_files_objects:
+ container: testcont
+ type: meta
+ dest: file2
+
+ - name: "Get metadata on several objects"
+ community.general.rax_files_objects:
+ container: testcont
+ type: meta
+ src: file2,file1
+
+ - name: "Set metadata on an object"
+ community.general.rax_files_objects:
+ container: testcont
+ type: meta
+ dest: file17
+ method: put
+ meta:
+ key1: value1
+ key2: value2
+ clear_meta: true
+
+ - name: "Verify metadata is set"
+ community.general.rax_files_objects:
+ container: testcont
+ type: meta
+ src: file17
+
+ - name: "Delete metadata"
+ community.general.rax_files_objects:
+ container: testcont
+ type: meta
+ dest: file17
+ method: delete
+ meta:
+ key1: ''
+ key2: ''
+
+ - name: "Get metadata on all objects"
+ community.general.rax_files_objects:
+ container: testcont
+ type: meta
+'''
+
+import os
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+
+
+EXIT_DICT = dict(success=False)
+META_PREFIX = 'x-object-meta-'
+
+
+def _get_container(module, cf, container):
+ try:
+ return cf.get_container(container)
+ except pyrax.exc.NoSuchContainer as e:
+ module.fail_json(msg=e.message)
+
+
+def _upload_folder(cf, folder, container, ttl=None, headers=None):
+ """ Uploads a folder to Cloud Files.
+ """
+ total_bytes = 0
+ for root, dirs, files in os.walk(folder):
+ for fname in files:
+ full_path = os.path.join(root, fname)
+ obj_name = os.path.relpath(full_path, folder)
+ obj_size = os.path.getsize(full_path)
+ cf.upload_file(container, full_path,
+ obj_name=obj_name, return_none=True, ttl=ttl, headers=headers)
+ total_bytes += obj_size
+ return total_bytes
+
+
+def upload(module, cf, container, src, dest, meta, expires):
+ """ Uploads a single object or a folder to Cloud Files Optionally sets an
+ metadata, TTL value (expires), or Content-Disposition and Content-Encoding
+ headers.
+ """
+ if not src:
+ module.fail_json(msg='src must be specified when uploading')
+
+ c = _get_container(module, cf, container)
+ src = os.path.abspath(os.path.expanduser(src))
+ is_dir = os.path.isdir(src)
+
+ if not is_dir and not os.path.isfile(src) or not os.path.exists(src):
+ module.fail_json(msg='src must be a file or a directory')
+ if dest and is_dir:
+ module.fail_json(msg='dest cannot be set when whole '
+ 'directories are uploaded')
+
+ cont_obj = None
+ total_bytes = 0
+ if dest and not is_dir:
+ try:
+ cont_obj = c.upload_file(src, obj_name=dest, ttl=expires, headers=meta)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ elif is_dir:
+ try:
+ total_bytes = _upload_folder(cf, src, c, ttl=expires, headers=meta)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ try:
+ cont_obj = c.upload_file(src, ttl=expires, headers=meta)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+
+ EXIT_DICT['success'] = True
+ EXIT_DICT['container'] = c.name
+ EXIT_DICT['msg'] = "Uploaded %s to container: %s" % (src, c.name)
+ if cont_obj or total_bytes > 0:
+ EXIT_DICT['changed'] = True
+ if meta:
+ EXIT_DICT['meta'] = dict(updated=True)
+
+ if cont_obj:
+ EXIT_DICT['bytes'] = cont_obj.total_bytes
+ EXIT_DICT['etag'] = cont_obj.etag
+ else:
+ EXIT_DICT['bytes'] = total_bytes
+
+ module.exit_json(**EXIT_DICT)
+
+
+def download(module, cf, container, src, dest, structure):
+ """ Download objects from Cloud Files to a local path specified by "dest".
+ Optionally disable maintaining a directory structure by by passing a
+ false value to "structure".
+ """
+ # Looking for an explicit destination
+ if not dest:
+ module.fail_json(msg='dest is a required argument when '
+ 'downloading from Cloud Files')
+
+ # Attempt to fetch the container by name
+ c = _get_container(module, cf, container)
+
+ # Accept a single object name or a comma-separated list of objs
+ # If not specified, get the entire container
+ if src:
+ objs = src.split(',')
+ objs = map(str.strip, objs)
+ else:
+ objs = c.get_object_names()
+
+ dest = os.path.abspath(os.path.expanduser(dest))
+ is_dir = os.path.isdir(dest)
+
+ if not is_dir:
+ module.fail_json(msg='dest must be a directory')
+
+ results = []
+ for obj in objs:
+ try:
+ c.download_object(obj, dest, structure=structure)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ results.append(obj)
+
+ len_results = len(results)
+ len_objs = len(objs)
+
+ EXIT_DICT['container'] = c.name
+ EXIT_DICT['requested_downloaded'] = results
+ if results:
+ EXIT_DICT['changed'] = True
+ if len_results == len_objs:
+ EXIT_DICT['success'] = True
+ EXIT_DICT['msg'] = "%s objects downloaded to %s" % (len_results, dest)
+ else:
+ EXIT_DICT['msg'] = "Error: only %s of %s objects were " \
+ "downloaded" % (len_results, len_objs)
+ module.exit_json(**EXIT_DICT)
+
+
+def delete(module, cf, container, src, dest):
+ """ Delete specific objects by proving a single file name or a
+ comma-separated list to src OR dest (but not both). Omitting file name(s)
+ assumes the entire container is to be deleted.
+ """
+ objs = None
+ if src and dest:
+ module.fail_json(msg="Error: ambiguous instructions; files to be deleted "
+ "have been specified on both src and dest args")
+ elif dest:
+ objs = dest
+ else:
+ objs = src
+
+ c = _get_container(module, cf, container)
+
+ if objs:
+ objs = objs.split(',')
+ objs = map(str.strip, objs)
+ else:
+ objs = c.get_object_names()
+
+ num_objs = len(objs)
+
+ results = []
+ for obj in objs:
+ try:
+ result = c.delete_object(obj)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ results.append(result)
+
+ num_deleted = results.count(True)
+
+ EXIT_DICT['container'] = c.name
+ EXIT_DICT['deleted'] = num_deleted
+ EXIT_DICT['requested_deleted'] = objs
+
+ if num_deleted:
+ EXIT_DICT['changed'] = True
+
+ if num_objs == num_deleted:
+ EXIT_DICT['success'] = True
+ EXIT_DICT['msg'] = "%s objects deleted" % num_deleted
+ else:
+ EXIT_DICT['msg'] = ("Error: only %s of %s objects "
+ "deleted" % (num_deleted, num_objs))
+ module.exit_json(**EXIT_DICT)
+
+
+def get_meta(module, cf, container, src, dest):
+ """ Get metadata for a single file, comma-separated list, or entire
+ container
+ """
+ c = _get_container(module, cf, container)
+
+ objs = None
+ if src and dest:
+ module.fail_json(msg="Error: ambiguous instructions; files to be deleted "
+ "have been specified on both src and dest args")
+ elif dest:
+ objs = dest
+ else:
+ objs = src
+
+ if objs:
+ objs = objs.split(',')
+ objs = map(str.strip, objs)
+ else:
+ objs = c.get_object_names()
+
+ results = dict()
+ for obj in objs:
+ try:
+ meta = c.get_object(obj).get_metadata()
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ results[obj] = dict()
+ for k, v in meta.items():
+ meta_key = k.split(META_PREFIX)[-1]
+ results[obj][meta_key] = v
+
+ EXIT_DICT['container'] = c.name
+ if results:
+ EXIT_DICT['meta_results'] = results
+ EXIT_DICT['success'] = True
+ module.exit_json(**EXIT_DICT)
+
+
+def put_meta(module, cf, container, src, dest, meta, clear_meta):
+ """ Set metadata on a container, single file, or comma-separated list.
+ Passing a true value to clear_meta clears the metadata stored in Cloud
+ Files before setting the new metadata to the value of "meta".
+ """
+ objs = None
+ if src and dest:
+ module.fail_json(msg="Error: ambiguous instructions; files to set meta"
+ " have been specified on both src and dest args")
+ elif dest:
+ objs = dest
+ else:
+ objs = src
+
+ objs = objs.split(',')
+ objs = map(str.strip, objs)
+
+ c = _get_container(module, cf, container)
+
+ results = []
+ for obj in objs:
+ try:
+ result = c.get_object(obj).set_metadata(meta, clear=clear_meta)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ results.append(result)
+
+ EXIT_DICT['container'] = c.name
+ EXIT_DICT['success'] = True
+ if results:
+ EXIT_DICT['changed'] = True
+ EXIT_DICT['num_changed'] = True
+ module.exit_json(**EXIT_DICT)
+
+
+def delete_meta(module, cf, container, src, dest, meta):
+ """ Removes metadata keys and values specified in meta, if any. Deletes on
+ all objects specified by src or dest (but not both), if any; otherwise it
+ deletes keys on all objects in the container
+ """
+ objs = None
+ if src and dest:
+ module.fail_json(msg="Error: ambiguous instructions; meta keys to be "
+ "deleted have been specified on both src and dest"
+ " args")
+ elif dest:
+ objs = dest
+ else:
+ objs = src
+
+ objs = objs.split(',')
+ objs = map(str.strip, objs)
+
+ c = _get_container(module, cf, container)
+
+ results = [] # Num of metadata keys removed, not objects affected
+ for obj in objs:
+ if meta:
+ for k, v in meta.items():
+ try:
+ result = c.get_object(obj).remove_metadata_key(k)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ results.append(result)
+ else:
+ try:
+ o = c.get_object(obj)
+ except pyrax.exc.NoSuchObject as e:
+ module.fail_json(msg=e.message)
+
+ for k, v in o.get_metadata().items():
+ try:
+ result = o.remove_metadata_key(k)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ results.append(result)
+
+ EXIT_DICT['container'] = c.name
+ EXIT_DICT['success'] = True
+ if results:
+ EXIT_DICT['changed'] = True
+ EXIT_DICT['num_deleted'] = len(results)
+ module.exit_json(**EXIT_DICT)
+
+
+def cloudfiles(module, container, src, dest, method, typ, meta, clear_meta,
+ structure, expires):
+ """ Dispatch from here to work with metadata or file objects """
+ cf = pyrax.cloudfiles
+
+ if cf is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if typ == "file":
+ if method == 'put':
+ upload(module, cf, container, src, dest, meta, expires)
+
+ elif method == 'get':
+ download(module, cf, container, src, dest, structure)
+
+ elif method == 'delete':
+ delete(module, cf, container, src, dest)
+
+ else:
+ if method == 'get':
+ get_meta(module, cf, container, src, dest)
+
+ if method == 'put':
+ put_meta(module, cf, container, src, dest, meta, clear_meta)
+
+ if method == 'delete':
+ delete_meta(module, cf, container, src, dest, meta)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ container=dict(required=True),
+ src=dict(),
+ dest=dict(),
+ method=dict(default='get', choices=['put', 'get', 'delete']),
+ type=dict(default='file', choices=['file', 'meta']),
+ meta=dict(type='dict', default=dict()),
+ clear_meta=dict(default=False, type='bool'),
+ structure=dict(default=True, type='bool'),
+ expires=dict(type='int'),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ container = module.params.get('container')
+ src = module.params.get('src')
+ dest = module.params.get('dest')
+ method = module.params.get('method')
+ typ = module.params.get('type')
+ meta = module.params.get('meta')
+ clear_meta = module.params.get('clear_meta')
+ structure = module.params.get('structure')
+ expires = module.params.get('expires')
+
+ if clear_meta and not typ == 'meta':
+ module.fail_json(msg='clear_meta can only be used when setting metadata')
+
+ setup_rax_module(module, pyrax)
+ cloudfiles(module, container, src, dest, method, typ, meta, clear_meta, structure, expires)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_identity.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_identity.py
new file mode 100644
index 00000000..330c510d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_identity.py
@@ -0,0 +1,102 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_identity
+short_description: Load Rackspace Cloud Identity
+description:
+ - Verifies Rackspace Cloud credentials and returns identity information
+options:
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices: ['present']
+ default: present
+ required: false
+author:
+ - "Christopher H. Laco (@claco)"
+ - "Matt Martz (@sivel)"
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Load Rackspace Cloud Identity
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Load Identity
+ local_action:
+ module: rax_identity
+ credentials: ~/.raxpub
+ region: DFW
+ register: rackspace_identity
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, rax_required_together, rax_to_dict,
+ setup_rax_module)
+
+
+def cloud_identity(module, state, identity):
+ instance = dict(
+ authenticated=identity.authenticated,
+ credentials=identity._creds_file
+ )
+ changed = False
+
+ instance.update(rax_to_dict(identity))
+ instance['services'] = instance.get('services', {}).keys()
+
+ if state == 'present':
+ if not identity.authenticated:
+ module.fail_json(msg='Credentials could not be verified!')
+
+ module.exit_json(changed=changed, identity=instance)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present'])
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ state = module.params.get('state')
+
+ setup_rax_module(module, pyrax)
+
+ if not pyrax.identity:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ cloud_identity(module, state, pyrax.identity)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_keypair.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_keypair.py
new file mode 100644
index 00000000..0314883f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_keypair.py
@@ -0,0 +1,171 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_keypair
+short_description: Create a keypair for use with Rackspace Cloud Servers
+description:
+ - Create a keypair for use with Rackspace Cloud Servers
+options:
+ name:
+ type: str
+ description:
+ - Name of keypair
+ required: true
+ public_key:
+ type: str
+ description:
+ - Public Key string to upload. Can be a file path or string
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+author: "Matt Martz (@sivel)"
+notes:
+ - Keypairs cannot be manipulated, only created and deleted. To "update" a
+ keypair you must first delete and then recreate.
+ - The ability to specify a file path for the public key was added in 1.7
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Create a keypair
+ hosts: localhost
+ gather_facts: False
+ tasks:
+ - name: Keypair request
+ local_action:
+ module: rax_keypair
+ credentials: ~/.raxpub
+ name: my_keypair
+ region: DFW
+ register: keypair
+ - name: Create local public key
+ local_action:
+ module: copy
+ content: "{{ keypair.keypair.public_key }}"
+ dest: "{{ inventory_dir }}/{{ keypair.keypair.name }}.pub"
+ - name: Create local private key
+ local_action:
+ module: copy
+ content: "{{ keypair.keypair.private_key }}"
+ dest: "{{ inventory_dir }}/{{ keypair.keypair.name }}"
+
+- name: Create a keypair
+ hosts: localhost
+ gather_facts: False
+ tasks:
+ - name: Keypair request
+ local_action:
+ module: rax_keypair
+ credentials: ~/.raxpub
+ name: my_keypair
+ public_key: "{{ lookup('file', 'authorized_keys/id_rsa.pub') }}"
+ region: DFW
+ register: keypair
+'''
+import os
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec,
+ rax_required_together,
+ rax_to_dict,
+ setup_rax_module,
+ )
+
+
+def rax_keypair(module, name, public_key, state):
+ changed = False
+
+ cs = pyrax.cloudservers
+
+ if cs is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ keypair = {}
+
+ if state == 'present':
+ if public_key and os.path.isfile(public_key):
+ try:
+ f = open(public_key)
+ public_key = f.read()
+ f.close()
+ except Exception as e:
+ module.fail_json(msg='Failed to load %s' % public_key)
+
+ try:
+ keypair = cs.keypairs.find(name=name)
+ except cs.exceptions.NotFound:
+ try:
+ keypair = cs.keypairs.create(name, public_key)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ elif state == 'absent':
+ try:
+ keypair = cs.keypairs.find(name=name)
+ except Exception:
+ pass
+
+ if keypair:
+ try:
+ keypair.delete()
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, keypair=rax_to_dict(keypair))
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(required=True),
+ public_key=dict(),
+ state=dict(default='present', choices=['absent', 'present']),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ name = module.params.get('name')
+ public_key = module.params.get('public_key')
+ state = module.params.get('state')
+
+ setup_rax_module(module, pyrax)
+
+ rax_keypair(module, name, public_key, state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_meta.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_meta.py
new file mode 100644
index 00000000..b7d172d9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_meta.py
@@ -0,0 +1,173 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_meta
+short_description: Manipulate metadata for Rackspace Cloud Servers
+description:
+ - Manipulate metadata for Rackspace Cloud Servers
+options:
+ address:
+ type: str
+ description:
+ - Server IP address to modify metadata for, will match any IP assigned to
+ the server
+ id:
+ type: str
+ description:
+ - Server ID to modify metadata for
+ name:
+ type: str
+ description:
+ - Server name to modify metadata for
+ meta:
+ type: dict
+ description:
+ - A hash of metadata to associate with the instance
+author: "Matt Martz (@sivel)"
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Set metadata for a server
+ hosts: all
+ gather_facts: False
+ tasks:
+ - name: Set metadata
+ local_action:
+ module: rax_meta
+ credentials: ~/.raxpub
+ name: "{{ inventory_hostname }}"
+ region: DFW
+ meta:
+ group: primary_group
+ groups:
+ - group_two
+ - group_three
+ app: my_app
+
+ - name: Clear metadata
+ local_action:
+ module: rax_meta
+ credentials: ~/.raxpub
+ name: "{{ inventory_hostname }}"
+ region: DFW
+'''
+
+import json
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+from ansible.module_utils.six import string_types
+
+
+def rax_meta(module, address, name, server_id, meta):
+ changed = False
+
+ cs = pyrax.cloudservers
+
+ if cs is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ search_opts = {}
+ if name:
+ search_opts = dict(name='^%s$' % name)
+ try:
+ servers = cs.servers.list(search_opts=search_opts)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ elif address:
+ servers = []
+ try:
+ for server in cs.servers.list():
+ for addresses in server.networks.values():
+ if address in addresses:
+ servers.append(server)
+ break
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ elif server_id:
+ servers = []
+ try:
+ servers.append(cs.servers.get(server_id))
+ except Exception as e:
+ pass
+
+ if len(servers) > 1:
+ module.fail_json(msg='Multiple servers found matching provided '
+ 'search parameters')
+ elif not servers:
+ module.fail_json(msg='Failed to find a server matching provided '
+ 'search parameters')
+
+ # Normalize and ensure all metadata values are strings
+ for k, v in meta.items():
+ if isinstance(v, list):
+ meta[k] = ','.join(['%s' % i for i in v])
+ elif isinstance(v, dict):
+ meta[k] = json.dumps(v)
+ elif not isinstance(v, string_types):
+ meta[k] = '%s' % v
+
+ server = servers[0]
+ if server.metadata == meta:
+ changed = False
+ else:
+ changed = True
+ removed = set(server.metadata.keys()).difference(meta.keys())
+ cs.servers.delete_meta(server, list(removed))
+ cs.servers.set_meta(server, meta)
+ server.get()
+
+ module.exit_json(changed=changed, meta=server.metadata)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ address=dict(),
+ id=dict(),
+ name=dict(),
+ meta=dict(type='dict', default=dict()),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ mutually_exclusive=[['address', 'id', 'name']],
+ required_one_of=[['address', 'id', 'name']],
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ address = module.params.get('address')
+ server_id = module.params.get('id')
+ name = module.params.get('name')
+ meta = module.params.get('meta')
+
+ setup_rax_module(module, pyrax)
+
+ rax_meta(module, address, name, server_id, meta)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_alarm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_alarm.py
new file mode 100644
index 00000000..8de26609
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_alarm.py
@@ -0,0 +1,227 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_mon_alarm
+short_description: Create or delete a Rackspace Cloud Monitoring alarm.
+description:
+- Create or delete a Rackspace Cloud Monitoring alarm that associates an
+ existing rax_mon_entity, rax_mon_check, and rax_mon_notification_plan with
+ criteria that specify what conditions will trigger which levels of
+ notifications. Rackspace monitoring module flow | rax_mon_entity ->
+ rax_mon_check -> rax_mon_notification -> rax_mon_notification_plan ->
+ *rax_mon_alarm*
+options:
+ state:
+ type: str
+ description:
+ - Ensure that the alarm with this C(label) exists or does not exist.
+ choices: [ "present", "absent" ]
+ required: false
+ default: present
+ label:
+ type: str
+ description:
+ - Friendly name for this alarm, used to achieve idempotence. Must be a String
+ between 1 and 255 characters long.
+ required: true
+ entity_id:
+ type: str
+ description:
+ - ID of the entity this alarm is attached to. May be acquired by registering
+ the value of a rax_mon_entity task.
+ required: true
+ check_id:
+ type: str
+ description:
+ - ID of the check that should be alerted on. May be acquired by registering
+ the value of a rax_mon_check task.
+ required: true
+ notification_plan_id:
+ type: str
+ description:
+ - ID of the notification plan to trigger if this alarm fires. May be acquired
+ by registering the value of a rax_mon_notification_plan task.
+ required: true
+ criteria:
+ type: str
+ description:
+ - Alarm DSL that describes alerting conditions and their output states. Must
+ be between 1 and 16384 characters long. See
+ http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/alerts-language.html
+ for a reference on the alerting language.
+ disabled:
+ description:
+ - If yes, create this alarm, but leave it in an inactive state. Defaults to
+ no.
+ type: bool
+ default: false
+ metadata:
+ type: dict
+ description:
+ - Arbitrary key/value pairs to accompany the alarm. Must be a hash of String
+ keys and values between 1 and 255 characters long.
+author: Ash Wilson (@smashwilson)
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Alarm example
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Ensure that a specific alarm exists.
+ community.general.rax_mon_alarm:
+ credentials: ~/.rax_pub
+ state: present
+ label: uhoh
+ entity_id: "{{ the_entity['entity']['id'] }}"
+ check_id: "{{ the_check['check']['id'] }}"
+ notification_plan_id: "{{ defcon1['notification_plan']['id'] }}"
+ criteria: >
+ if (rate(metric['average']) > 10) {
+ return new AlarmStatus(WARNING);
+ }
+ return new AlarmStatus(OK);
+ register: the_alarm
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+
+
+def alarm(module, state, label, entity_id, check_id, notification_plan_id, criteria,
+ disabled, metadata):
+
+ if len(label) < 1 or len(label) > 255:
+ module.fail_json(msg='label must be between 1 and 255 characters long')
+
+ if criteria and len(criteria) < 1 or len(criteria) > 16384:
+ module.fail_json(msg='criteria must be between 1 and 16384 characters long')
+
+ # Coerce attributes.
+
+ changed = False
+ alarm = None
+
+ cm = pyrax.cloud_monitoring
+ if not cm:
+ module.fail_json(msg='Failed to instantiate client. This typically '
+ 'indicates an invalid region or an incorrectly '
+ 'capitalized region name.')
+
+ existing = [a for a in cm.list_alarms(entity_id) if a.label == label]
+
+ if existing:
+ alarm = existing[0]
+
+ if state == 'present':
+ should_create = False
+ should_update = False
+ should_delete = False
+
+ if len(existing) > 1:
+ module.fail_json(msg='%s existing alarms have the label %s.' %
+ (len(existing), label))
+
+ if alarm:
+ if check_id != alarm.check_id or notification_plan_id != alarm.notification_plan_id:
+ should_delete = should_create = True
+
+ should_update = (disabled and disabled != alarm.disabled) or \
+ (metadata and metadata != alarm.metadata) or \
+ (criteria and criteria != alarm.criteria)
+
+ if should_update and not should_delete:
+ cm.update_alarm(entity=entity_id, alarm=alarm,
+ criteria=criteria, disabled=disabled,
+ label=label, metadata=metadata)
+ changed = True
+
+ if should_delete:
+ alarm.delete()
+ changed = True
+ else:
+ should_create = True
+
+ if should_create:
+ alarm = cm.create_alarm(entity=entity_id, check=check_id,
+ notification_plan=notification_plan_id,
+ criteria=criteria, disabled=disabled, label=label,
+ metadata=metadata)
+ changed = True
+ else:
+ for a in existing:
+ a.delete()
+ changed = True
+
+ if alarm:
+ alarm_dict = {
+ "id": alarm.id,
+ "label": alarm.label,
+ "check_id": alarm.check_id,
+ "notification_plan_id": alarm.notification_plan_id,
+ "criteria": alarm.criteria,
+ "disabled": alarm.disabled,
+ "metadata": alarm.metadata
+ }
+ module.exit_json(changed=changed, alarm=alarm_dict)
+ else:
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ label=dict(required=True),
+ entity_id=dict(required=True),
+ check_id=dict(required=True),
+ notification_plan_id=dict(required=True),
+ criteria=dict(),
+ disabled=dict(type='bool', default=False),
+ metadata=dict(type='dict')
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ state = module.params.get('state')
+ label = module.params.get('label')
+ entity_id = module.params.get('entity_id')
+ check_id = module.params.get('check_id')
+ notification_plan_id = module.params.get('notification_plan_id')
+ criteria = module.params.get('criteria')
+ disabled = module.boolean(module.params.get('disabled'))
+ metadata = module.params.get('metadata')
+
+ setup_rax_module(module, pyrax)
+
+ alarm(module, state, label, entity_id, check_id, notification_plan_id,
+ criteria, disabled, metadata)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_check.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_check.py
new file mode 100644
index 00000000..e04dfc74
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_check.py
@@ -0,0 +1,319 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_mon_check
+short_description: Create or delete a Rackspace Cloud Monitoring check for an
+ existing entity.
+description:
+- Create or delete a Rackspace Cloud Monitoring check associated with an
+ existing rax_mon_entity. A check is a specific test or measurement that is
+ performed, possibly from different monitoring zones, on the systems you
+ monitor. Rackspace monitoring module flow | rax_mon_entity ->
+ *rax_mon_check* -> rax_mon_notification -> rax_mon_notification_plan ->
+ rax_mon_alarm
+options:
+ state:
+ type: str
+ description:
+ - Ensure that a check with this C(label) exists or does not exist.
+ choices: ["present", "absent"]
+ default: present
+ entity_id:
+ type: str
+ description:
+ - ID of the rax_mon_entity to target with this check.
+ required: true
+ label:
+ type: str
+ description:
+ - Defines a label for this check, between 1 and 64 characters long.
+ required: true
+ check_type:
+ type: str
+ description:
+ - The type of check to create. C(remote.) checks may be created on any
+ rax_mon_entity. C(agent.) checks may only be created on rax_mon_entities
+ that have a non-null C(agent_id).
+ - |
+ Choices for this option are:
+ - C(remote.dns)
+ - C(remote.ftp-banner)
+ - C(remote.http)
+ - C(remote.imap-banner)
+ - C(remote.mssql-banner)
+ - C(remote.mysql-banner)
+ - C(remote.ping)
+ - C(remote.pop3-banner)
+ - C(remote.postgresql-banner)
+ - C(remote.smtp-banner)
+ - C(remote.smtp)
+ - C(remote.ssh)
+ - C(remote.tcp)
+ - C(remote.telnet-banner)
+ - C(agent.filesystem)
+ - C(agent.memory)
+ - C(agent.load_average)
+ - C(agent.cpu)
+ - C(agent.disk)
+ - C(agent.network)
+ - C(agent.plugin)
+ required: true
+ monitoring_zones_poll:
+ type: str
+ description:
+ - Comma-separated list of the names of the monitoring zones the check should
+ run from. Available monitoring zones include mzdfw, mzhkg, mziad, mzlon,
+ mzord and mzsyd. Required for remote.* checks; prohibited for agent.* checks.
+ target_hostname:
+ type: str
+ description:
+ - One of `target_hostname` and `target_alias` is required for remote.* checks,
+ but prohibited for agent.* checks. The hostname this check should target.
+ Must be a valid IPv4, IPv6, or FQDN.
+ target_alias:
+ type: str
+ description:
+ - One of `target_alias` and `target_hostname` is required for remote.* checks,
+ but prohibited for agent.* checks. Use the corresponding key in the entity's
+ `ip_addresses` hash to resolve an IP address to target.
+ details:
+ type: dict
+ description:
+ - Additional details specific to the check type. Must be a hash of strings
+ between 1 and 255 characters long, or an array or object containing 0 to
+ 256 items.
+ disabled:
+ description:
+ - If "yes", ensure the check is created, but don't actually use it yet.
+ type: bool
+ default: false
+ metadata:
+ type: dict
+ description:
+ - Hash of arbitrary key-value pairs to accompany this check if it fires.
+ Keys and values must be strings between 1 and 255 characters long.
+ period:
+ type: int
+ description:
+ - The number of seconds between each time the check is performed. Must be
+ greater than the minimum period set on your account.
+ timeout:
+ type: int
+ description:
+ - The number of seconds this check will wait when attempting to collect
+ results. Must be less than the period.
+author: Ash Wilson (@smashwilson)
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Create a monitoring check
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Associate a check with an existing entity.
+ community.general.rax_mon_check:
+ credentials: ~/.rax_pub
+ state: present
+ entity_id: "{{ the_entity['entity']['id'] }}"
+ label: the_check
+ check_type: remote.ping
+ monitoring_zones_poll: mziad,mzord,mzdfw
+ details:
+ count: 10
+ meta:
+ hurf: durf
+ register: the_check
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+
+
+def cloud_check(module, state, entity_id, label, check_type,
+ monitoring_zones_poll, target_hostname, target_alias, details,
+ disabled, metadata, period, timeout):
+
+ # Coerce attributes.
+
+ if monitoring_zones_poll and not isinstance(monitoring_zones_poll, list):
+ monitoring_zones_poll = [monitoring_zones_poll]
+
+ if period:
+ period = int(period)
+
+ if timeout:
+ timeout = int(timeout)
+
+ changed = False
+ check = None
+
+ cm = pyrax.cloud_monitoring
+ if not cm:
+ module.fail_json(msg='Failed to instantiate client. This typically '
+ 'indicates an invalid region or an incorrectly '
+ 'capitalized region name.')
+
+ entity = cm.get_entity(entity_id)
+ if not entity:
+ module.fail_json(msg='Failed to instantiate entity. "%s" may not be'
+ ' a valid entity id.' % entity_id)
+
+ existing = [e for e in entity.list_checks() if e.label == label]
+
+ if existing:
+ check = existing[0]
+
+ if state == 'present':
+ if len(existing) > 1:
+ module.fail_json(msg='%s existing checks have a label of %s.' %
+ (len(existing), label))
+
+ should_delete = False
+ should_create = False
+ should_update = False
+
+ if check:
+ # Details may include keys set to default values that are not
+ # included in the initial creation.
+ #
+ # Only force a recreation of the check if one of the *specified*
+ # keys is missing or has a different value.
+ if details:
+ for (key, value) in details.items():
+ if key not in check.details:
+ should_delete = should_create = True
+ elif value != check.details[key]:
+ should_delete = should_create = True
+
+ should_update = label != check.label or \
+ (target_hostname and target_hostname != check.target_hostname) or \
+ (target_alias and target_alias != check.target_alias) or \
+ (disabled != check.disabled) or \
+ (metadata and metadata != check.metadata) or \
+ (period and period != check.period) or \
+ (timeout and timeout != check.timeout) or \
+ (monitoring_zones_poll and monitoring_zones_poll != check.monitoring_zones_poll)
+
+ if should_update and not should_delete:
+ check.update(label=label,
+ disabled=disabled,
+ metadata=metadata,
+ monitoring_zones_poll=monitoring_zones_poll,
+ timeout=timeout,
+ period=period,
+ target_alias=target_alias,
+ target_hostname=target_hostname)
+ changed = True
+ else:
+ # The check doesn't exist yet.
+ should_create = True
+
+ if should_delete:
+ check.delete()
+
+ if should_create:
+ check = cm.create_check(entity,
+ label=label,
+ check_type=check_type,
+ target_hostname=target_hostname,
+ target_alias=target_alias,
+ monitoring_zones_poll=monitoring_zones_poll,
+ details=details,
+ disabled=disabled,
+ metadata=metadata,
+ period=period,
+ timeout=timeout)
+ changed = True
+ elif state == 'absent':
+ if check:
+ check.delete()
+ changed = True
+ else:
+ module.fail_json(msg='state must be either present or absent.')
+
+ if check:
+ check_dict = {
+ "id": check.id,
+ "label": check.label,
+ "type": check.type,
+ "target_hostname": check.target_hostname,
+ "target_alias": check.target_alias,
+ "monitoring_zones_poll": check.monitoring_zones_poll,
+ "details": check.details,
+ "disabled": check.disabled,
+ "metadata": check.metadata,
+ "period": check.period,
+ "timeout": check.timeout
+ }
+ module.exit_json(changed=changed, check=check_dict)
+ else:
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ entity_id=dict(required=True),
+ label=dict(required=True),
+ check_type=dict(required=True),
+ monitoring_zones_poll=dict(),
+ target_hostname=dict(),
+ target_alias=dict(),
+ details=dict(type='dict', default={}),
+ disabled=dict(type='bool', default=False),
+ metadata=dict(type='dict', default={}),
+ period=dict(type='int'),
+ timeout=dict(type='int'),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ entity_id = module.params.get('entity_id')
+ label = module.params.get('label')
+ check_type = module.params.get('check_type')
+ monitoring_zones_poll = module.params.get('monitoring_zones_poll')
+ target_hostname = module.params.get('target_hostname')
+ target_alias = module.params.get('target_alias')
+ details = module.params.get('details')
+ disabled = module.boolean(module.params.get('disabled'))
+ metadata = module.params.get('metadata')
+ period = module.params.get('period')
+ timeout = module.params.get('timeout')
+
+ state = module.params.get('state')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_check(module, state, entity_id, label, check_type,
+ monitoring_zones_poll, target_hostname, target_alias, details,
+ disabled, metadata, period, timeout)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_entity.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_entity.py
new file mode 100644
index 00000000..69f49cd0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_entity.py
@@ -0,0 +1,191 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_mon_entity
+short_description: Create or delete a Rackspace Cloud Monitoring entity
+description:
+- Create or delete a Rackspace Cloud Monitoring entity, which represents a device
+ to monitor. Entities associate checks and alarms with a target system and
+ provide a convenient, centralized place to store IP addresses. Rackspace
+ monitoring module flow | *rax_mon_entity* -> rax_mon_check ->
+ rax_mon_notification -> rax_mon_notification_plan -> rax_mon_alarm
+options:
+ label:
+ type: str
+ description:
+ - Defines a name for this entity. Must be a non-empty string between 1 and
+ 255 characters long.
+ required: true
+ state:
+ type: str
+ description:
+ - Ensure that an entity with this C(name) exists or does not exist.
+ choices: ["present", "absent"]
+ default: present
+ agent_id:
+ type: str
+ description:
+ - Rackspace monitoring agent on the target device to which this entity is
+ bound. Necessary to collect C(agent.) rax_mon_checks against this entity.
+ named_ip_addresses:
+ type: dict
+ description:
+ - Hash of IP addresses that may be referenced by name by rax_mon_checks
+ added to this entity. Must be a dictionary of with keys that are names
+ between 1 and 64 characters long, and values that are valid IPv4 or IPv6
+ addresses.
+ metadata:
+ type: dict
+ description:
+ - Hash of arbitrary C(name), C(value) pairs that are passed to associated
+ rax_mon_alarms. Names and values must all be between 1 and 255 characters
+ long.
+author: Ash Wilson (@smashwilson)
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Entity example
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Ensure an entity exists
+ community.general.rax_mon_entity:
+ credentials: ~/.rax_pub
+ state: present
+ label: my_entity
+ named_ip_addresses:
+ web_box: 192.0.2.4
+ db_box: 192.0.2.5
+ meta:
+ hurf: durf
+ register: the_entity
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+
+
+def cloud_monitoring(module, state, label, agent_id, named_ip_addresses,
+ metadata):
+
+ if len(label) < 1 or len(label) > 255:
+ module.fail_json(msg='label must be between 1 and 255 characters long')
+
+ changed = False
+
+ cm = pyrax.cloud_monitoring
+ if not cm:
+ module.fail_json(msg='Failed to instantiate client. This typically '
+ 'indicates an invalid region or an incorrectly '
+ 'capitalized region name.')
+
+ existing = []
+ for entity in cm.list_entities():
+ if label == entity.label:
+ existing.append(entity)
+
+ entity = None
+
+ if existing:
+ entity = existing[0]
+
+ if state == 'present':
+ should_update = False
+ should_delete = False
+ should_create = False
+
+ if len(existing) > 1:
+ module.fail_json(msg='%s existing entities have the label %s.' %
+ (len(existing), label))
+
+ if entity:
+ if named_ip_addresses and named_ip_addresses != entity.ip_addresses:
+ should_delete = should_create = True
+
+ # Change an existing Entity, unless there's nothing to do.
+ should_update = agent_id and agent_id != entity.agent_id or \
+ (metadata and metadata != entity.metadata)
+
+ if should_update and not should_delete:
+ entity.update(agent_id, metadata)
+ changed = True
+
+ if should_delete:
+ entity.delete()
+ else:
+ should_create = True
+
+ if should_create:
+ # Create a new Entity.
+ entity = cm.create_entity(label=label, agent=agent_id,
+ ip_addresses=named_ip_addresses,
+ metadata=metadata)
+ changed = True
+ else:
+ # Delete the existing Entities.
+ for e in existing:
+ e.delete()
+ changed = True
+
+ if entity:
+ entity_dict = {
+ "id": entity.id,
+ "name": entity.name,
+ "agent_id": entity.agent_id,
+ }
+ module.exit_json(changed=changed, entity=entity_dict)
+ else:
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ label=dict(required=True),
+ agent_id=dict(),
+ named_ip_addresses=dict(type='dict', default={}),
+ metadata=dict(type='dict', default={})
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ state = module.params.get('state')
+
+ label = module.params.get('label')
+ agent_id = module.params.get('agent_id')
+ named_ip_addresses = module.params.get('named_ip_addresses')
+ metadata = module.params.get('metadata')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_monitoring(module, state, label, agent_id, named_ip_addresses, metadata)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_notification.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_notification.py
new file mode 100644
index 00000000..416d03ba
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_notification.py
@@ -0,0 +1,174 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_mon_notification
+short_description: Create or delete a Rackspace Cloud Monitoring notification.
+description:
+- Create or delete a Rackspace Cloud Monitoring notification that specifies a
+ channel that can be used to communicate alarms, such as email, webhooks, or
+ PagerDuty. Rackspace monitoring module flow | rax_mon_entity -> rax_mon_check ->
+ *rax_mon_notification* -> rax_mon_notification_plan -> rax_mon_alarm
+options:
+ state:
+ type: str
+ description:
+ - Ensure that the notification with this C(label) exists or does not exist.
+ choices: ['present', 'absent']
+ default: present
+ label:
+ type: str
+ description:
+ - Defines a friendly name for this notification. String between 1 and 255
+ characters long.
+ required: true
+ notification_type:
+ type: str
+ description:
+ - A supported notification type.
+ choices: ["webhook", "email", "pagerduty"]
+ required: true
+ details:
+ type: dict
+ description:
+ - Dictionary of key-value pairs used to initialize the notification.
+ Required keys and meanings vary with notification type. See
+ http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/
+ service-notification-types-crud.html for details.
+ required: true
+author: Ash Wilson (@smashwilson)
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Monitoring notification example
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Email me when something goes wrong.
+ rax_mon_entity:
+ credentials: ~/.rax_pub
+ label: omg
+ type: email
+ details:
+ address: me@mailhost.com
+ register: the_notification
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+
+
+def notification(module, state, label, notification_type, details):
+
+ if len(label) < 1 or len(label) > 255:
+ module.fail_json(msg='label must be between 1 and 255 characters long')
+
+ changed = False
+ notification = None
+
+ cm = pyrax.cloud_monitoring
+ if not cm:
+ module.fail_json(msg='Failed to instantiate client. This typically '
+ 'indicates an invalid region or an incorrectly '
+ 'capitalized region name.')
+
+ existing = []
+ for n in cm.list_notifications():
+ if n.label == label:
+ existing.append(n)
+
+ if existing:
+ notification = existing[0]
+
+ if state == 'present':
+ should_update = False
+ should_delete = False
+ should_create = False
+
+ if len(existing) > 1:
+ module.fail_json(msg='%s existing notifications are labelled %s.' %
+ (len(existing), label))
+
+ if notification:
+ should_delete = (notification_type != notification.type)
+
+ should_update = (details != notification.details)
+
+ if should_update and not should_delete:
+ notification.update(details=notification.details)
+ changed = True
+
+ if should_delete:
+ notification.delete()
+ else:
+ should_create = True
+
+ if should_create:
+ notification = cm.create_notification(notification_type,
+ label=label, details=details)
+ changed = True
+ else:
+ for n in existing:
+ n.delete()
+ changed = True
+
+ if notification:
+ notification_dict = {
+ "id": notification.id,
+ "type": notification.type,
+ "label": notification.label,
+ "details": notification.details
+ }
+ module.exit_json(changed=changed, notification=notification_dict)
+ else:
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ label=dict(required=True),
+ notification_type=dict(required=True, choices=['webhook', 'email', 'pagerduty']),
+ details=dict(required=True, type='dict')
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ state = module.params.get('state')
+
+ label = module.params.get('label')
+ notification_type = module.params.get('notification_type')
+ details = module.params.get('details')
+
+ setup_rax_module(module, pyrax)
+
+ notification(module, state, label, notification_type, details)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_notification_plan.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_notification_plan.py
new file mode 100644
index 00000000..a4b8920d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_notification_plan.py
@@ -0,0 +1,180 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_mon_notification_plan
+short_description: Create or delete a Rackspace Cloud Monitoring notification
+ plan.
+description:
+- Create or delete a Rackspace Cloud Monitoring notification plan by
+ associating existing rax_mon_notifications with severity levels. Rackspace
+ monitoring module flow | rax_mon_entity -> rax_mon_check ->
+ rax_mon_notification -> *rax_mon_notification_plan* -> rax_mon_alarm
+options:
+ state:
+ type: str
+ description:
+ - Ensure that the notification plan with this C(label) exists or does not
+ exist.
+ choices: ['present', 'absent']
+ default: present
+ label:
+ type: str
+ description:
+ - Defines a friendly name for this notification plan. String between 1 and
+ 255 characters long.
+ required: true
+ critical_state:
+ type: list
+ description:
+ - Notification list to use when the alarm state is CRITICAL. Must be an
+ array of valid rax_mon_notification ids.
+ warning_state:
+ type: list
+ description:
+ - Notification list to use when the alarm state is WARNING. Must be an array
+ of valid rax_mon_notification ids.
+ ok_state:
+ type: list
+ description:
+ - Notification list to use when the alarm state is OK. Must be an array of
+ valid rax_mon_notification ids.
+author: Ash Wilson (@smashwilson)
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Example notification plan
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Establish who gets called when.
+ community.general.rax_mon_notification_plan:
+ credentials: ~/.rax_pub
+ state: present
+ label: defcon1
+ critical_state:
+ - "{{ everyone['notification']['id'] }}"
+ warning_state:
+ - "{{ opsfloor['notification']['id'] }}"
+ register: defcon1
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+
+
+def notification_plan(module, state, label, critical_state, warning_state, ok_state):
+
+ if len(label) < 1 or len(label) > 255:
+ module.fail_json(msg='label must be between 1 and 255 characters long')
+
+ changed = False
+ notification_plan = None
+
+ cm = pyrax.cloud_monitoring
+ if not cm:
+ module.fail_json(msg='Failed to instantiate client. This typically '
+ 'indicates an invalid region or an incorrectly '
+ 'capitalized region name.')
+
+ existing = []
+ for n in cm.list_notification_plans():
+ if n.label == label:
+ existing.append(n)
+
+ if existing:
+ notification_plan = existing[0]
+
+ if state == 'present':
+ should_create = False
+ should_delete = False
+
+ if len(existing) > 1:
+ module.fail_json(msg='%s notification plans are labelled %s.' %
+ (len(existing), label))
+
+ if notification_plan:
+ should_delete = (critical_state and critical_state != notification_plan.critical_state) or \
+ (warning_state and warning_state != notification_plan.warning_state) or \
+ (ok_state and ok_state != notification_plan.ok_state)
+
+ if should_delete:
+ notification_plan.delete()
+ should_create = True
+ else:
+ should_create = True
+
+ if should_create:
+ notification_plan = cm.create_notification_plan(label=label,
+ critical_state=critical_state,
+ warning_state=warning_state,
+ ok_state=ok_state)
+ changed = True
+ else:
+ for np in existing:
+ np.delete()
+ changed = True
+
+ if notification_plan:
+ notification_plan_dict = {
+ "id": notification_plan.id,
+ "critical_state": notification_plan.critical_state,
+ "warning_state": notification_plan.warning_state,
+ "ok_state": notification_plan.ok_state,
+ "metadata": notification_plan.metadata
+ }
+ module.exit_json(changed=changed, notification_plan=notification_plan_dict)
+ else:
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ label=dict(required=True),
+ critical_state=dict(type='list'),
+ warning_state=dict(type='list'),
+ ok_state=dict(type='list')
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ state = module.params.get('state')
+
+ label = module.params.get('label')
+ critical_state = module.params.get('critical_state')
+ warning_state = module.params.get('warning_state')
+ ok_state = module.params.get('ok_state')
+
+ setup_rax_module(module, pyrax)
+
+ notification_plan(module, state, label, critical_state, warning_state, ok_state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_network.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_network.py
new file mode 100644
index 00000000..27a793b5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_network.py
@@ -0,0 +1,138 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_network
+short_description: create / delete an isolated network in Rackspace Public Cloud
+description:
+ - creates / deletes a Rackspace Public Cloud isolated network.
+options:
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+ label:
+ type: str
+ description:
+ - Label (name) to give the network
+ required: yes
+ cidr:
+ type: str
+ description:
+ - cidr of the network being created
+author:
+ - "Christopher H. Laco (@claco)"
+ - "Jesse Keating (@omgjlk)"
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Build an Isolated Network
+ gather_facts: False
+
+ tasks:
+ - name: Network create request
+ local_action:
+ module: rax_network
+ credentials: ~/.raxpub
+ label: my-net
+ cidr: 192.168.3.0/24
+ state: present
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+
+
+def cloud_network(module, state, label, cidr):
+ changed = False
+ network = None
+ networks = []
+
+ if not pyrax.cloud_networks:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if state == 'present':
+ if not cidr:
+ module.fail_json(msg='missing required arguments: cidr')
+
+ try:
+ network = pyrax.cloud_networks.find_network_by_label(label)
+ except pyrax.exceptions.NetworkNotFound:
+ try:
+ network = pyrax.cloud_networks.create(label, cidr=cidr)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ elif state == 'absent':
+ try:
+ network = pyrax.cloud_networks.find_network_by_label(label)
+ network.delete()
+ changed = True
+ except pyrax.exceptions.NetworkNotFound:
+ pass
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ if network:
+ instance = dict(id=network.id,
+ label=network.label,
+ cidr=network.cidr)
+ networks.append(instance)
+
+ module.exit_json(changed=changed, networks=networks)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present',
+ choices=['present', 'absent']),
+ label=dict(required=True),
+ cidr=dict()
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ state = module.params.get('state')
+ label = module.params.get('label')
+ cidr = module.params.get('cidr')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_network(module, state, label, cidr)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_queue.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_queue.py
new file mode 100644
index 00000000..dca006da
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_queue.py
@@ -0,0 +1,139 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_queue
+short_description: create / delete a queue in Rackspace Public Cloud
+description:
+ - creates / deletes a Rackspace Public Cloud queue.
+options:
+ name:
+ type: str
+ description:
+ - Name to give the queue
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+author:
+ - "Christopher H. Laco (@claco)"
+ - "Matt Martz (@sivel)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Build a Queue
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Queue create request
+ local_action:
+ module: rax_queue
+ credentials: ~/.raxpub
+ name: my-queue
+ region: DFW
+ state: present
+ register: my_queue
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+
+
+def cloud_queue(module, state, name):
+ for arg in (state, name):
+ if not arg:
+ module.fail_json(msg='%s is required for rax_queue' % arg)
+
+ changed = False
+ queues = []
+ instance = {}
+
+ cq = pyrax.queues
+ if not cq:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ for queue in cq.list():
+ if name != queue.name:
+ continue
+
+ queues.append(queue)
+
+ if len(queues) > 1:
+ module.fail_json(msg='Multiple Queues were matched by name')
+
+ if state == 'present':
+ if not queues:
+ try:
+ queue = cq.create(name)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ queue = queues[0]
+
+ instance = dict(name=queue.name)
+ result = dict(changed=changed, queue=instance)
+ module.exit_json(**result)
+
+ elif state == 'absent':
+ if queues:
+ queue = queues[0]
+ try:
+ queue.delete()
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, queue=instance)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ name = module.params.get('name')
+ state = module.params.get('state')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_queue(module, state, name)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_scaling_group.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_scaling_group.py
new file mode 100644
index 00000000..7b2b6ace
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_scaling_group.py
@@ -0,0 +1,438 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_scaling_group
+short_description: Manipulate Rackspace Cloud Autoscale Groups
+description:
+ - Manipulate Rackspace Cloud Autoscale Groups
+options:
+ config_drive:
+ description:
+ - Attach read-only configuration drive to server as label config-2
+ type: bool
+ default: 'no'
+ cooldown:
+ type: int
+ description:
+ - The period of time, in seconds, that must pass before any scaling can
+ occur after the previous scaling. Must be an integer between 0 and
+ 86400 (24 hrs).
+ default: 300
+ disk_config:
+ type: str
+ description:
+ - Disk partitioning strategy
+ - If not specified, it will fallback to C(auto).
+ choices:
+ - auto
+ - manual
+ files:
+ type: dict
+ description:
+ - 'Files to insert into the instance. Hash of C(remotepath: localpath)'
+ flavor:
+ type: str
+ description:
+ - flavor to use for the instance
+ required: true
+ image:
+ type: str
+ description:
+ - image to use for the instance. Can be an C(id), C(human_id) or C(name)
+ required: true
+ key_name:
+ type: str
+ description:
+ - key pair to use on the instance
+ loadbalancers:
+ type: list
+ description:
+ - List of load balancer C(id) and C(port) hashes
+ max_entities:
+ type: int
+ description:
+ - The maximum number of entities that are allowed in the scaling group.
+ Must be an integer between 0 and 1000.
+ required: true
+ meta:
+ type: dict
+ description:
+ - A hash of metadata to associate with the instance
+ min_entities:
+ type: int
+ description:
+ - The minimum number of entities that are allowed in the scaling group.
+ Must be an integer between 0 and 1000.
+ required: true
+ name:
+ type: str
+ description:
+ - Name to give the scaling group
+ required: true
+ networks:
+ type: list
+ description:
+ - The network to attach to the instances. If specified, you must include
+ ALL networks including the public and private interfaces. Can be C(id)
+ or C(label).
+ default:
+ - public
+ - private
+ server_name:
+ type: str
+ description:
+ - The base name for servers created by Autoscale
+ required: true
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+ user_data:
+ type: str
+ description:
+ - Data to be uploaded to the servers config drive. This option implies
+ I(config_drive). Can be a file path or a string
+ wait:
+ description:
+ - wait for the scaling group to finish provisioning the minimum amount of
+ servers
+ type: bool
+ default: 'no'
+ wait_timeout:
+ type: int
+ description:
+ - how long before wait gives up, in seconds
+ default: 300
+author: "Matt Martz (@sivel)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+---
+- hosts: localhost
+ gather_facts: false
+ connection: local
+ tasks:
+ - community.general.rax_scaling_group:
+ credentials: ~/.raxpub
+ region: ORD
+ cooldown: 300
+ flavor: performance1-1
+ image: bb02b1a3-bc77-4d17-ab5b-421d89850fca
+ min_entities: 5
+ max_entities: 10
+ name: ASG Test
+ server_name: asgtest
+ loadbalancers:
+ - id: 228385
+ port: 80
+ register: asg
+'''
+
+import base64
+import json
+import os
+import time
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, rax_find_image, rax_find_network,
+ rax_required_together, rax_to_dict, setup_rax_module)
+from ansible.module_utils.six import string_types
+
+
+def rax_asg(module, cooldown=300, disk_config=None, files=None, flavor=None,
+ image=None, key_name=None, loadbalancers=None, meta=None,
+ min_entities=0, max_entities=0, name=None, networks=None,
+ server_name=None, state='present', user_data=None,
+ config_drive=False, wait=True, wait_timeout=300):
+ files = {} if files is None else files
+ loadbalancers = [] if loadbalancers is None else loadbalancers
+ meta = {} if meta is None else meta
+ networks = [] if networks is None else networks
+
+ changed = False
+
+ au = pyrax.autoscale
+ if not au:
+ module.fail_json(msg='Failed to instantiate clients. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if user_data:
+ config_drive = True
+
+ if user_data and os.path.isfile(user_data):
+ try:
+ f = open(user_data)
+ user_data = f.read()
+ f.close()
+ except Exception as e:
+ module.fail_json(msg='Failed to load %s' % user_data)
+
+ if state == 'present':
+ # Normalize and ensure all metadata values are strings
+ if meta:
+ for k, v in meta.items():
+ if isinstance(v, list):
+ meta[k] = ','.join(['%s' % i for i in v])
+ elif isinstance(v, dict):
+ meta[k] = json.dumps(v)
+ elif not isinstance(v, string_types):
+ meta[k] = '%s' % v
+
+ if image:
+ image = rax_find_image(module, pyrax, image)
+
+ nics = []
+ if networks:
+ for network in networks:
+ nics.extend(rax_find_network(module, pyrax, network))
+
+ for nic in nics:
+ # pyrax is currently returning net-id, but we need uuid
+ # this check makes this forward compatible for a time when
+ # pyrax uses uuid instead
+ if nic.get('net-id'):
+ nic.update(uuid=nic['net-id'])
+ del nic['net-id']
+
+ # Handle the file contents
+ personality = []
+ if files:
+ for rpath in files.keys():
+ lpath = os.path.expanduser(files[rpath])
+ try:
+ f = open(lpath, 'r')
+ personality.append({
+ 'path': rpath,
+ 'contents': f.read()
+ })
+ f.close()
+ except Exception as e:
+ module.fail_json(msg='Failed to load %s' % lpath)
+
+ lbs = []
+ if loadbalancers:
+ for lb in loadbalancers:
+ try:
+ lb_id = int(lb.get('id'))
+ except (ValueError, TypeError):
+ module.fail_json(msg='Load balancer ID is not an integer: '
+ '%s' % lb.get('id'))
+ try:
+ port = int(lb.get('port'))
+ except (ValueError, TypeError):
+ module.fail_json(msg='Load balancer port is not an '
+ 'integer: %s' % lb.get('port'))
+ if not lb_id or not port:
+ continue
+ lbs.append((lb_id, port))
+
+ try:
+ sg = au.find(name=name)
+ except pyrax.exceptions.NoUniqueMatch as e:
+ module.fail_json(msg='%s' % e.message)
+ except pyrax.exceptions.NotFound:
+ try:
+ sg = au.create(name, cooldown=cooldown,
+ min_entities=min_entities,
+ max_entities=max_entities,
+ launch_config_type='launch_server',
+ server_name=server_name, image=image,
+ flavor=flavor, disk_config=disk_config,
+ metadata=meta, personality=personality,
+ networks=nics, load_balancers=lbs,
+ key_name=key_name, config_drive=config_drive,
+ user_data=user_data)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ if not changed:
+ # Scaling Group Updates
+ group_args = {}
+ if cooldown != sg.cooldown:
+ group_args['cooldown'] = cooldown
+
+ if min_entities != sg.min_entities:
+ group_args['min_entities'] = min_entities
+
+ if max_entities != sg.max_entities:
+ group_args['max_entities'] = max_entities
+
+ if group_args:
+ changed = True
+ sg.update(**group_args)
+
+ # Launch Configuration Updates
+ lc = sg.get_launch_config()
+ lc_args = {}
+ if server_name != lc.get('name'):
+ lc_args['server_name'] = server_name
+
+ if image != lc.get('image'):
+ lc_args['image'] = image
+
+ if flavor != lc.get('flavor'):
+ lc_args['flavor'] = flavor
+
+ disk_config = disk_config or 'AUTO'
+ if ((disk_config or lc.get('disk_config')) and
+ disk_config != lc.get('disk_config', 'AUTO')):
+ lc_args['disk_config'] = disk_config
+
+ if (meta or lc.get('meta')) and meta != lc.get('metadata'):
+ lc_args['metadata'] = meta
+
+ test_personality = []
+ for p in personality:
+ test_personality.append({
+ 'path': p['path'],
+ 'contents': base64.b64encode(p['contents'])
+ })
+ if ((test_personality or lc.get('personality')) and
+ test_personality != lc.get('personality')):
+ lc_args['personality'] = personality
+
+ if nics != lc.get('networks'):
+ lc_args['networks'] = nics
+
+ if lbs != lc.get('load_balancers'):
+ # Work around for https://github.com/rackspace/pyrax/pull/393
+ lc_args['load_balancers'] = sg.manager._resolve_lbs(lbs)
+
+ if key_name != lc.get('key_name'):
+ lc_args['key_name'] = key_name
+
+ if config_drive != lc.get('config_drive', False):
+ lc_args['config_drive'] = config_drive
+
+ if (user_data and
+ base64.b64encode(user_data) != lc.get('user_data')):
+ lc_args['user_data'] = user_data
+
+ if lc_args:
+ # Work around for https://github.com/rackspace/pyrax/pull/389
+ if 'flavor' not in lc_args:
+ lc_args['flavor'] = lc.get('flavor')
+ changed = True
+ sg.update_launch_config(**lc_args)
+
+ sg.get()
+
+ if wait:
+ end_time = time.time() + wait_timeout
+ infinite = wait_timeout == 0
+ while infinite or time.time() < end_time:
+ state = sg.get_state()
+ if state["pending_capacity"] == 0:
+ break
+
+ time.sleep(5)
+
+ module.exit_json(changed=changed, autoscale_group=rax_to_dict(sg))
+
+ else:
+ try:
+ sg = au.find(name=name)
+ sg.delete()
+ changed = True
+ except pyrax.exceptions.NotFound as e:
+ sg = {}
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, autoscale_group=rax_to_dict(sg))
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ config_drive=dict(default=False, type='bool'),
+ cooldown=dict(type='int', default=300),
+ disk_config=dict(choices=['auto', 'manual']),
+ files=dict(type='dict', default={}),
+ flavor=dict(required=True),
+ image=dict(required=True),
+ key_name=dict(),
+ loadbalancers=dict(type='list'),
+ meta=dict(type='dict', default={}),
+ min_entities=dict(type='int', required=True),
+ max_entities=dict(type='int', required=True),
+ name=dict(required=True),
+ networks=dict(type='list', default=['public', 'private']),
+ server_name=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ user_data=dict(no_log=True),
+ wait=dict(default=False, type='bool'),
+ wait_timeout=dict(default=300, type='int'),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ config_drive = module.params.get('config_drive')
+ cooldown = module.params.get('cooldown')
+ disk_config = module.params.get('disk_config')
+ if disk_config:
+ disk_config = disk_config.upper()
+ files = module.params.get('files')
+ flavor = module.params.get('flavor')
+ image = module.params.get('image')
+ key_name = module.params.get('key_name')
+ loadbalancers = module.params.get('loadbalancers')
+ meta = module.params.get('meta')
+ min_entities = module.params.get('min_entities')
+ max_entities = module.params.get('max_entities')
+ name = module.params.get('name')
+ networks = module.params.get('networks')
+ server_name = module.params.get('server_name')
+ state = module.params.get('state')
+ user_data = module.params.get('user_data')
+
+ if not 0 <= min_entities <= 1000 or not 0 <= max_entities <= 1000:
+ module.fail_json(msg='min_entities and max_entities must be an '
+ 'integer between 0 and 1000')
+
+ if not 0 <= cooldown <= 86400:
+ module.fail_json(msg='cooldown must be an integer between 0 and 86400')
+
+ setup_rax_module(module, pyrax)
+
+ rax_asg(module, cooldown=cooldown, disk_config=disk_config,
+ files=files, flavor=flavor, image=image, meta=meta,
+ key_name=key_name, loadbalancers=loadbalancers,
+ min_entities=min_entities, max_entities=max_entities,
+ name=name, networks=networks, server_name=server_name,
+ state=state, config_drive=config_drive, user_data=user_data)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_scaling_policy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_scaling_policy.py
new file mode 100644
index 00000000..384825f0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_scaling_policy.py
@@ -0,0 +1,286 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_scaling_policy
+short_description: Manipulate Rackspace Cloud Autoscale Scaling Policy
+description:
+ - Manipulate Rackspace Cloud Autoscale Scaling Policy
+options:
+ at:
+ type: str
+ description:
+ - The UTC time when this policy will be executed. The time must be
+ formatted according to C(yyyy-MM-dd'T'HH:mm:ss.SSS) such as
+ C(2013-05-19T08:07:08Z)
+ change:
+ type: int
+ description:
+ - The change, either as a number of servers or as a percentage, to make
+ in the scaling group. If this is a percentage, you must set
+ I(is_percent) to C(true) also.
+ cron:
+ type: str
+ description:
+ - The time when the policy will be executed, as a cron entry. For
+ example, if this is parameter is set to C(1 0 * * *)
+ cooldown:
+ type: int
+ description:
+ - The period of time, in seconds, that must pass before any scaling can
+ occur after the previous scaling. Must be an integer between 0 and
+ 86400 (24 hrs).
+ default: 300
+ desired_capacity:
+ type: int
+ description:
+ - The desired server capacity of the scaling the group; that is, how
+ many servers should be in the scaling group.
+ is_percent:
+ description:
+ - Whether the value in I(change) is a percent value
+ default: false
+ type: bool
+ name:
+ type: str
+ description:
+ - Name to give the policy
+ required: true
+ policy_type:
+ type: str
+ description:
+ - The type of policy that will be executed for the current release.
+ choices:
+ - webhook
+ - schedule
+ required: true
+ scaling_group:
+ type: str
+ description:
+ - Name of the scaling group that this policy will be added to
+ required: true
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+author: "Matt Martz (@sivel)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+---
+- hosts: localhost
+ gather_facts: false
+ connection: local
+ tasks:
+ - community.general.rax_scaling_policy:
+ credentials: ~/.raxpub
+ region: ORD
+ at: '2013-05-19T08:07:08Z'
+ change: 25
+ cooldown: 300
+ is_percent: true
+ name: ASG Test Policy - at
+ policy_type: schedule
+ scaling_group: ASG Test
+ register: asps_at
+
+ - community.general.rax_scaling_policy:
+ credentials: ~/.raxpub
+ region: ORD
+ cron: '1 0 * * *'
+ change: 25
+ cooldown: 300
+ is_percent: true
+ name: ASG Test Policy - cron
+ policy_type: schedule
+ scaling_group: ASG Test
+ register: asp_cron
+
+ - community.general.rax_scaling_policy:
+ credentials: ~/.raxpub
+ region: ORD
+ cooldown: 300
+ desired_capacity: 5
+ name: ASG Test Policy - webhook
+ policy_type: webhook
+ scaling_group: ASG Test
+ register: asp_webhook
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (UUID, rax_argument_spec, rax_required_together, rax_to_dict,
+ setup_rax_module)
+
+
+def rax_asp(module, at=None, change=0, cron=None, cooldown=300,
+ desired_capacity=0, is_percent=False, name=None,
+ policy_type=None, scaling_group=None, state='present'):
+ changed = False
+
+ au = pyrax.autoscale
+ if not au:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ try:
+ UUID(scaling_group)
+ except ValueError:
+ try:
+ sg = au.find(name=scaling_group)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ try:
+ sg = au.get(scaling_group)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ if state == 'present':
+ policies = filter(lambda p: name == p.name, sg.list_policies())
+ if len(policies) > 1:
+ module.fail_json(msg='No unique policy match found by name')
+ if at:
+ args = dict(at=at)
+ elif cron:
+ args = dict(cron=cron)
+ else:
+ args = None
+
+ if not policies:
+ try:
+ policy = sg.add_policy(name, policy_type=policy_type,
+ cooldown=cooldown, change=change,
+ is_percent=is_percent,
+ desired_capacity=desired_capacity,
+ args=args)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ else:
+ policy = policies[0]
+ kwargs = {}
+ if policy_type != policy.type:
+ kwargs['policy_type'] = policy_type
+
+ if cooldown != policy.cooldown:
+ kwargs['cooldown'] = cooldown
+
+ if hasattr(policy, 'change') and change != policy.change:
+ kwargs['change'] = change
+
+ if hasattr(policy, 'changePercent') and is_percent is False:
+ kwargs['change'] = change
+ kwargs['is_percent'] = False
+ elif hasattr(policy, 'change') and is_percent is True:
+ kwargs['change'] = change
+ kwargs['is_percent'] = True
+
+ if hasattr(policy, 'desiredCapacity') and change:
+ kwargs['change'] = change
+ elif ((hasattr(policy, 'change') or
+ hasattr(policy, 'changePercent')) and desired_capacity):
+ kwargs['desired_capacity'] = desired_capacity
+
+ if hasattr(policy, 'args') and args != policy.args:
+ kwargs['args'] = args
+
+ if kwargs:
+ policy.update(**kwargs)
+ changed = True
+
+ policy.get()
+
+ module.exit_json(changed=changed, autoscale_policy=rax_to_dict(policy))
+
+ else:
+ try:
+ policies = filter(lambda p: name == p.name, sg.list_policies())
+ if len(policies) > 1:
+ module.fail_json(msg='No unique policy match found by name')
+ elif not policies:
+ policy = {}
+ else:
+ policy.delete()
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, autoscale_policy=rax_to_dict(policy))
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ at=dict(),
+ change=dict(type='int'),
+ cron=dict(),
+ cooldown=dict(type='int', default=300),
+ desired_capacity=dict(type='int'),
+ is_percent=dict(type='bool', default=False),
+ name=dict(required=True),
+ policy_type=dict(required=True, choices=['webhook', 'schedule']),
+ scaling_group=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ mutually_exclusive=[
+ ['cron', 'at'],
+ ['change', 'desired_capacity'],
+ ]
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ at = module.params.get('at')
+ change = module.params.get('change')
+ cron = module.params.get('cron')
+ cooldown = module.params.get('cooldown')
+ desired_capacity = module.params.get('desired_capacity')
+ is_percent = module.params.get('is_percent')
+ name = module.params.get('name')
+ policy_type = module.params.get('policy_type')
+ scaling_group = module.params.get('scaling_group')
+ state = module.params.get('state')
+
+ if (at or cron) and policy_type == 'webhook':
+ module.fail_json(msg='policy_type=schedule is required for a time '
+ 'based policy')
+
+ setup_rax_module(module, pyrax)
+
+ rax_asp(module, at=at, change=change, cron=cron, cooldown=cooldown,
+ desired_capacity=desired_capacity, is_percent=is_percent,
+ name=name, policy_type=policy_type, scaling_group=scaling_group,
+ state=state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_compute.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_compute.py
new file mode 100644
index 00000000..8df9a5e6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_compute.py
@@ -0,0 +1,671 @@
+#!/usr/bin/python
+#
+# Scaleway Compute management module
+#
+# Copyright (C) 2018 Online SAS.
+# https://www.scaleway.com
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_compute
+short_description: Scaleway compute management module
+author: Remy Leone (@sieben)
+description:
+ - "This module manages compute instances on Scaleway."
+extends_documentation_fragment:
+- community.general.scaleway
+
+
+options:
+
+ public_ip:
+ type: str
+ description:
+ - Manage public IP on a Scaleway server
+ - Could be Scaleway IP address UUID
+ - C(dynamic) Means that IP is destroyed at the same time the host is destroyed
+ - C(absent) Means no public IP at all
+ default: absent
+
+ enable_ipv6:
+ description:
+ - Enable public IPv6 connectivity on the instance
+ default: false
+ type: bool
+
+ image:
+ type: str
+ description:
+ - Image identifier used to start the instance with
+ required: true
+
+ name:
+ type: str
+ description:
+ - Name of the instance
+
+ organization:
+ type: str
+ description:
+ - Organization identifier
+ required: true
+
+ state:
+ type: str
+ description:
+ - Indicate desired state of the instance.
+ default: present
+ choices:
+ - present
+ - absent
+ - running
+ - restarted
+ - stopped
+
+ tags:
+ type: list
+ description:
+ - List of tags to apply to the instance (5 max)
+ required: false
+ default: []
+
+ region:
+ type: str
+ description:
+ - Scaleway compute zone
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+
+ commercial_type:
+ type: str
+ description:
+ - Commercial name of the compute node
+ required: true
+
+ wait:
+ description:
+ - Wait for the instance to reach its desired state before returning.
+ type: bool
+ default: 'no'
+
+ wait_timeout:
+ type: int
+ description:
+ - Time to wait for the server to reach the expected state
+ required: false
+ default: 300
+
+ wait_sleep_time:
+ type: int
+ description:
+ - Time to wait before every attempt to check the state of the server
+ required: false
+ default: 3
+
+ security_group:
+ type: str
+ description:
+ - Security group unique identifier
+ - If no value provided, the default security group or current security group will be used
+ required: false
+'''
+
+EXAMPLES = '''
+- name: Create a server
+ community.general.scaleway_compute:
+ name: foobar
+ state: present
+ image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe
+ organization: 951df375-e094-4d26-97c1-ba548eeb9c42
+ region: ams1
+ commercial_type: VC1S
+ tags:
+ - test
+ - www
+
+- name: Create a server attached to a security group
+ community.general.scaleway_compute:
+ name: foobar
+ state: present
+ image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe
+ organization: 951df375-e094-4d26-97c1-ba548eeb9c42
+ region: ams1
+ commercial_type: VC1S
+ security_group: 4a31b633-118e-4900-bd52-facf1085fc8d
+ tags:
+ - test
+ - www
+
+- name: Destroy it right after
+ community.general.scaleway_compute:
+ name: foobar
+ state: absent
+ image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe
+ organization: 951df375-e094-4d26-97c1-ba548eeb9c42
+ region: ams1
+ commercial_type: VC1S
+'''
+
+RETURN = '''
+'''
+
+import datetime
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import quote as urlquote
+from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway
+
+SCALEWAY_SERVER_STATES = (
+ 'stopped',
+ 'stopping',
+ 'starting',
+ 'running',
+ 'locked'
+)
+
+SCALEWAY_TRANSITIONS_STATES = (
+ "stopping",
+ "starting",
+ "pending"
+)
+
+
+def check_image_id(compute_api, image_id):
+ response = compute_api.get(path="images/%s" % image_id)
+
+ if not response.ok:
+ msg = 'Error in getting image %s on %s : %s' % (image_id, compute_api.module.params.get('api_url'), response.json)
+ compute_api.module.fail_json(msg=msg)
+
+
+def fetch_state(compute_api, server):
+ compute_api.module.debug("fetch_state of server: %s" % server["id"])
+ response = compute_api.get(path="servers/%s" % server["id"])
+
+ if response.status_code == 404:
+ return "absent"
+
+ if not response.ok:
+ msg = 'Error during state fetching: (%s) %s' % (response.status_code, response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ try:
+ compute_api.module.debug("Server %s in state: %s" % (server["id"], response.json["server"]["state"]))
+ return response.json["server"]["state"]
+ except KeyError:
+ compute_api.module.fail_json(msg="Could not fetch state in %s" % response.json)
+
+
+def wait_to_complete_state_transition(compute_api, server, wait=None):
+ if wait is None:
+ wait = compute_api.module.params["wait"]
+ if not wait:
+ return
+
+ wait_timeout = compute_api.module.params["wait_timeout"]
+ wait_sleep_time = compute_api.module.params["wait_sleep_time"]
+
+ start = datetime.datetime.utcnow()
+ end = start + datetime.timedelta(seconds=wait_timeout)
+ while datetime.datetime.utcnow() < end:
+ compute_api.module.debug("We are going to wait for the server to finish its transition")
+ if fetch_state(compute_api, server) not in SCALEWAY_TRANSITIONS_STATES:
+ compute_api.module.debug("It seems that the server is not in transition anymore.")
+ compute_api.module.debug("Server in state: %s" % fetch_state(compute_api, server))
+ break
+ time.sleep(wait_sleep_time)
+ else:
+ compute_api.module.fail_json(msg="Server takes too long to finish its transition")
+
+
+def public_ip_payload(compute_api, public_ip):
+ # We don't want a public ip
+ if public_ip in ("absent",):
+ return {"dynamic_ip_required": False}
+
+ # IP is only attached to the instance and is released as soon as the instance terminates
+ if public_ip in ("dynamic", "allocated"):
+ return {"dynamic_ip_required": True}
+
+ # We check that the IP we want to attach exists, if so its ID is returned
+ response = compute_api.get("ips")
+ if not response.ok:
+ msg = 'Error during public IP validation: (%s) %s' % (response.status_code, response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ ip_list = []
+ try:
+ ip_list = response.json["ips"]
+ except KeyError:
+ compute_api.module.fail_json(msg="Error in getting the IP information from: %s" % response.json)
+
+ lookup = [ip["id"] for ip in ip_list]
+ if public_ip in lookup:
+ return {"public_ip": public_ip}
+
+
+def create_server(compute_api, server):
+ compute_api.module.debug("Starting a create_server")
+ target_server = None
+ data = {"enable_ipv6": server["enable_ipv6"],
+ "tags": server["tags"],
+ "commercial_type": server["commercial_type"],
+ "image": server["image"],
+ "dynamic_ip_required": server["dynamic_ip_required"],
+ "name": server["name"],
+ "organization": server["organization"]
+ }
+
+ if server["security_group"]:
+ data["security_group"] = server["security_group"]
+
+ response = compute_api.post(path="servers", data=data)
+
+ if not response.ok:
+ msg = 'Error during server creation: (%s) %s' % (response.status_code, response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ try:
+ target_server = response.json["server"]
+ except KeyError:
+ compute_api.module.fail_json(msg="Error in getting the server information from: %s" % response.json)
+
+ wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
+
+ return target_server
+
+
+def restart_server(compute_api, server):
+ return perform_action(compute_api=compute_api, server=server, action="reboot")
+
+
+def stop_server(compute_api, server):
+ return perform_action(compute_api=compute_api, server=server, action="poweroff")
+
+
+def start_server(compute_api, server):
+ return perform_action(compute_api=compute_api, server=server, action="poweron")
+
+
+def perform_action(compute_api, server, action):
+ response = compute_api.post(path="servers/%s/action" % server["id"],
+ data={"action": action})
+ if not response.ok:
+ msg = 'Error during server %s: (%s) %s' % (action, response.status_code, response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ wait_to_complete_state_transition(compute_api=compute_api, server=server)
+
+ return response
+
+
+def remove_server(compute_api, server):
+ compute_api.module.debug("Starting remove server strategy")
+ response = compute_api.delete(path="servers/%s" % server["id"])
+ if not response.ok:
+ msg = 'Error during server deletion: (%s) %s' % (response.status_code, response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ wait_to_complete_state_transition(compute_api=compute_api, server=server)
+
+ return response
+
+
+def present_strategy(compute_api, wished_server):
+ compute_api.module.debug("Starting present strategy")
+ changed = False
+ query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1)
+
+ if not query_results:
+ changed = True
+ if compute_api.module.check_mode:
+ return changed, {"status": "A server would be created."}
+
+ target_server = create_server(compute_api=compute_api, server=wished_server)
+ else:
+ target_server = query_results[0]
+
+ if server_attributes_should_be_changed(compute_api=compute_api, target_server=target_server,
+ wished_server=wished_server):
+ changed = True
+
+ if compute_api.module.check_mode:
+ return changed, {"status": "Server %s attributes would be changed." % target_server["id"]}
+
+ target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server)
+
+ return changed, target_server
+
+
+def absent_strategy(compute_api, wished_server):
+ compute_api.module.debug("Starting absent strategy")
+ changed = False
+ target_server = None
+ query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1)
+
+ if not query_results:
+ return changed, {"status": "Server already absent."}
+ else:
+ target_server = query_results[0]
+
+ changed = True
+
+ if compute_api.module.check_mode:
+ return changed, {"status": "Server %s would be made absent." % target_server["id"]}
+
+ # A server MUST be stopped to be deleted.
+ while fetch_state(compute_api=compute_api, server=target_server) != "stopped":
+ wait_to_complete_state_transition(compute_api=compute_api, server=target_server, wait=True)
+ response = stop_server(compute_api=compute_api, server=target_server)
+
+ if not response.ok:
+ err_msg = 'Error while stopping a server before removing it [{0}: {1}]'.format(response.status_code,
+ response.json)
+ compute_api.module.fail_json(msg=err_msg)
+
+ wait_to_complete_state_transition(compute_api=compute_api, server=target_server, wait=True)
+
+ response = remove_server(compute_api=compute_api, server=target_server)
+
+ if not response.ok:
+ err_msg = 'Error while removing server [{0}: {1}]'.format(response.status_code, response.json)
+ compute_api.module.fail_json(msg=err_msg)
+
+ return changed, {"status": "Server %s deleted" % target_server["id"]}
+
+
+def running_strategy(compute_api, wished_server):
+ compute_api.module.debug("Starting running strategy")
+ changed = False
+ query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1)
+
+ if not query_results:
+ changed = True
+ if compute_api.module.check_mode:
+ return changed, {"status": "A server would be created before being run."}
+
+ target_server = create_server(compute_api=compute_api, server=wished_server)
+ else:
+ target_server = query_results[0]
+
+ if server_attributes_should_be_changed(compute_api=compute_api, target_server=target_server,
+ wished_server=wished_server):
+ changed = True
+
+ if compute_api.module.check_mode:
+ return changed, {"status": "Server %s attributes would be changed before running it." % target_server["id"]}
+
+ target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server)
+
+ current_state = fetch_state(compute_api=compute_api, server=target_server)
+ if current_state not in ("running", "starting"):
+ compute_api.module.debug("running_strategy: Server in state: %s" % current_state)
+ changed = True
+
+ if compute_api.module.check_mode:
+ return changed, {"status": "Server %s attributes would be changed." % target_server["id"]}
+
+ response = start_server(compute_api=compute_api, server=target_server)
+ if not response.ok:
+ msg = 'Error while running server [{0}: {1}]'.format(response.status_code, response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ return changed, target_server
+
+
+def stop_strategy(compute_api, wished_server):
+ compute_api.module.debug("Starting stop strategy")
+ query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1)
+
+ changed = False
+
+ if not query_results:
+
+ if compute_api.module.check_mode:
+ return changed, {"status": "A server would be created before being stopped."}
+
+ target_server = create_server(compute_api=compute_api, server=wished_server)
+ changed = True
+ else:
+ target_server = query_results[0]
+
+ compute_api.module.debug("stop_strategy: Servers are found.")
+
+ if server_attributes_should_be_changed(compute_api=compute_api, target_server=target_server,
+ wished_server=wished_server):
+ changed = True
+
+ if compute_api.module.check_mode:
+ return changed, {
+ "status": "Server %s attributes would be changed before stopping it." % target_server["id"]}
+
+ target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server)
+
+ wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
+
+ current_state = fetch_state(compute_api=compute_api, server=target_server)
+ if current_state not in ("stopped",):
+ compute_api.module.debug("stop_strategy: Server in state: %s" % current_state)
+
+ changed = True
+
+ if compute_api.module.check_mode:
+ return changed, {"status": "Server %s would be stopped." % target_server["id"]}
+
+ response = stop_server(compute_api=compute_api, server=target_server)
+ compute_api.module.debug(response.json)
+ compute_api.module.debug(response.ok)
+
+ if not response.ok:
+ msg = 'Error while stopping server [{0}: {1}]'.format(response.status_code, response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ return changed, target_server
+
+
+def restart_strategy(compute_api, wished_server):
+ compute_api.module.debug("Starting restart strategy")
+ changed = False
+ query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1)
+
+ if not query_results:
+ changed = True
+ if compute_api.module.check_mode:
+ return changed, {"status": "A server would be created before being rebooted."}
+
+ target_server = create_server(compute_api=compute_api, server=wished_server)
+ else:
+ target_server = query_results[0]
+
+ if server_attributes_should_be_changed(compute_api=compute_api,
+ target_server=target_server,
+ wished_server=wished_server):
+ changed = True
+
+ if compute_api.module.check_mode:
+ return changed, {
+ "status": "Server %s attributes would be changed before rebooting it." % target_server["id"]}
+
+ target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server)
+
+ changed = True
+ if compute_api.module.check_mode:
+ return changed, {"status": "Server %s would be rebooted." % target_server["id"]}
+
+ wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
+
+ if fetch_state(compute_api=compute_api, server=target_server) in ("running",):
+ response = restart_server(compute_api=compute_api, server=target_server)
+ wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
+ if not response.ok:
+ msg = 'Error while restarting server that was running [{0}: {1}].'.format(response.status_code,
+ response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ if fetch_state(compute_api=compute_api, server=target_server) in ("stopped",):
+ response = restart_server(compute_api=compute_api, server=target_server)
+ wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
+ if not response.ok:
+ msg = 'Error while restarting server that was stopped [{0}: {1}].'.format(response.status_code,
+ response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ return changed, target_server
+
+
+state_strategy = {
+ "present": present_strategy,
+ "restarted": restart_strategy,
+ "stopped": stop_strategy,
+ "running": running_strategy,
+ "absent": absent_strategy
+}
+
+
+def find(compute_api, wished_server, per_page=1):
+ compute_api.module.debug("Getting inside find")
+ # Only the name attribute is accepted in the Compute query API
+ response = compute_api.get("servers", params={"name": wished_server["name"],
+ "per_page": per_page})
+
+ if not response.ok:
+ msg = 'Error during server search: (%s) %s' % (response.status_code, response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ search_results = response.json["servers"]
+
+ return search_results
+
+
+PATCH_MUTABLE_SERVER_ATTRIBUTES = (
+ "ipv6",
+ "tags",
+ "name",
+ "dynamic_ip_required",
+ "security_group",
+)
+
+
+def server_attributes_should_be_changed(compute_api, target_server, wished_server):
+ compute_api.module.debug("Checking if server attributes should be changed")
+ compute_api.module.debug("Current Server: %s" % target_server)
+ compute_api.module.debug("Wished Server: %s" % wished_server)
+ debug_dict = dict((x, (target_server[x], wished_server[x]))
+ for x in PATCH_MUTABLE_SERVER_ATTRIBUTES
+ if x in target_server and x in wished_server)
+ compute_api.module.debug("Debug dict %s" % debug_dict)
+ try:
+ for key in PATCH_MUTABLE_SERVER_ATTRIBUTES:
+ if key in target_server and key in wished_server:
+ # When you are working with dict, only ID matter as we ask user to put only the resource ID in the playbook
+ if isinstance(target_server[key], dict) and wished_server[key] and "id" in target_server[key].keys(
+ ) and target_server[key]["id"] != wished_server[key]:
+ return True
+ # Handling other structure compare simply the two objects content
+ elif not isinstance(target_server[key], dict) and target_server[key] != wished_server[key]:
+ return True
+ return False
+ except AttributeError:
+ compute_api.module.fail_json(msg="Error while checking if attributes should be changed")
+
+
+def server_change_attributes(compute_api, target_server, wished_server):
+ compute_api.module.debug("Starting patching server attributes")
+ patch_payload = dict()
+
+ for key in PATCH_MUTABLE_SERVER_ATTRIBUTES:
+ if key in target_server and key in wished_server:
+ # When you are working with dict, only ID matter as we ask user to put only the resource ID in the playbook
+ if isinstance(target_server[key], dict) and "id" in target_server[key] and wished_server[key]:
+ # Setting all key to current value except ID
+ key_dict = dict((x, target_server[key][x]) for x in target_server[key].keys() if x != "id")
+ # Setting ID to the user specified ID
+ key_dict["id"] = wished_server[key]
+ patch_payload[key] = key_dict
+ elif not isinstance(target_server[key], dict):
+ patch_payload[key] = wished_server[key]
+
+ response = compute_api.patch(path="servers/%s" % target_server["id"],
+ data=patch_payload)
+ if not response.ok:
+ msg = 'Error during server attributes patching: (%s) %s' % (response.status_code, response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ try:
+ target_server = response.json["server"]
+ except KeyError:
+ compute_api.module.fail_json(msg="Error in getting the server information from: %s" % response.json)
+
+ wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
+
+ return target_server
+
+
+def core(module):
+ region = module.params["region"]
+ wished_server = {
+ "state": module.params["state"],
+ "image": module.params["image"],
+ "name": module.params["name"],
+ "commercial_type": module.params["commercial_type"],
+ "enable_ipv6": module.params["enable_ipv6"],
+ "tags": module.params["tags"],
+ "organization": module.params["organization"],
+ "security_group": module.params["security_group"]
+ }
+ module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+ compute_api = Scaleway(module=module)
+
+ check_image_id(compute_api, wished_server["image"])
+
+ # IP parameters of the wished server depends on the configuration
+ ip_payload = public_ip_payload(compute_api=compute_api, public_ip=module.params["public_ip"])
+ wished_server.update(ip_payload)
+
+ changed, summary = state_strategy[wished_server["state"]](compute_api=compute_api, wished_server=wished_server)
+ module.exit_json(changed=changed, msg=summary)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ image=dict(required=True),
+ name=dict(),
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ commercial_type=dict(required=True),
+ enable_ipv6=dict(default=False, type="bool"),
+ public_ip=dict(default="absent"),
+ state=dict(choices=list(state_strategy.keys()), default='present'),
+ tags=dict(type="list", default=[]),
+ organization=dict(required=True),
+ wait=dict(type="bool", default=False),
+ wait_timeout=dict(type="int", default=300),
+ wait_sleep_time=dict(type="int", default=3),
+ security_group=dict(),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_database_backup.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_database_backup.py
new file mode 100644
index 00000000..57803245
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_database_backup.py
@@ -0,0 +1,371 @@
+#!/usr/bin/python
+#
+# Scaleway database backups management module
+#
+# Copyright (C) 2020 Guillaume Rodriguez (g.rodriguez@opendecide.com).
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_database_backup
+short_description: Scaleway database backups management module
+version_added: 1.2.0
+author: Guillaume Rodriguez (@guillaume_ro_fr)
+description:
+ - This module manages database backups on Scaleway account U(https://developer.scaleway.com).
+extends_documentation_fragment:
+ - community.general.scaleway
+options:
+ state:
+ description:
+ - Indicate desired state of the database backup.
+ - C(present) creates a backup.
+ - C(absent) deletes the backup.
+ - C(exported) creates a download link for the backup.
+ - C(restored) restores the backup to a new database.
+ type: str
+ default: present
+ choices:
+ - present
+ - absent
+ - exported
+ - restored
+
+ region:
+ description:
+ - Scaleway region to use (for example C(fr-par)).
+ type: str
+ required: true
+ choices:
+ - fr-par
+ - nl-ams
+ - pl-waw
+
+ id:
+ description:
+ - UUID used to identify the database backup.
+ - Required for C(absent), C(exported) and C(restored) states.
+ type: str
+
+ name:
+ description:
+ - Name used to identify the database backup.
+ - Required for C(present) state.
+ - Ignored when C(state=absent), C(state=exported) or C(state=restored).
+ type: str
+ required: false
+
+ database_name:
+ description:
+ - Name used to identify the database.
+ - Required for C(present) and C(restored) states.
+ - Ignored when C(state=absent) or C(state=exported).
+ type: str
+ required: false
+
+ instance_id:
+ description:
+ - UUID of the instance associated to the database backup.
+ - Required for C(present) and C(restored) states.
+ - Ignored when C(state=absent) or C(state=exported).
+ type: str
+ required: false
+
+ expires_at:
+ description:
+ - Expiration datetime of the database backup (ISO 8601 format).
+ - Ignored when C(state=absent), C(state=exported) or C(state=restored).
+ type: str
+ required: false
+
+ wait:
+ description:
+ - Wait for the instance to reach its desired state before returning.
+ type: bool
+ default: false
+
+ wait_timeout:
+ description:
+ - Time to wait for the backup to reach the expected state.
+ type: int
+ required: false
+ default: 300
+
+ wait_sleep_time:
+ description:
+ - Time to wait before every attempt to check the state of the backup.
+ type: int
+ required: false
+ default: 3
+'''
+
+EXAMPLES = '''
+ - name: Create a backup
+ community.general.scaleway_database_backup:
+ name: 'my_backup'
+ state: present
+ region: 'fr-par'
+ database_name: 'my-database'
+ instance_id: '50968a80-2909-4e5c-b1af-a2e19860dddb'
+
+ - name: Export a backup
+ community.general.scaleway_database_backup:
+ id: '6ef1125a-037e-494f-a911-6d9c49a51691'
+ state: exported
+ region: 'fr-par'
+
+ - name: Restore a backup
+ community.general.scaleway_database_backup:
+ id: '6ef1125a-037e-494f-a911-6d9c49a51691'
+ state: restored
+ region: 'fr-par'
+ database_name: 'my-new-database'
+ instance_id: '50968a80-2909-4e5c-b1af-a2e19860dddb'
+
+ - name: Remove a backup
+ community.general.scaleway_database_backup:
+ id: '6ef1125a-037e-494f-a911-6d9c49a51691'
+ state: absent
+ region: 'fr-par'
+'''
+
+RETURN = '''
+metadata:
+ description: Backup metadata.
+ returned: when C(state=present), C(state=exported) or C(state=restored)
+ type: dict
+ sample: {
+ "metadata": {
+ "created_at": "2020-08-06T12:42:05.631049Z",
+ "database_name": "my-database",
+ "download_url": null,
+ "download_url_expires_at": null,
+ "expires_at": null,
+ "id": "a15297bd-0c4a-4b4f-8fbb-b36a35b7eb07",
+ "instance_id": "617be32e-6497-4ed7-b4c7-0ee5a81edf49",
+ "instance_name": "my-instance",
+ "name": "backup_name",
+ "region": "fr-par",
+ "size": 600000,
+ "status": "ready",
+ "updated_at": "2020-08-06T12:42:10.581649Z"
+ }
+ }
+'''
+
+import datetime
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway,
+ scaleway_argument_spec,
+ SCALEWAY_REGIONS,
+)
+
+stable_states = (
+ 'ready',
+ 'deleting',
+)
+
+
+def wait_to_complete_state_transition(module, account_api, backup=None):
+ wait_timeout = module.params['wait_timeout']
+ wait_sleep_time = module.params['wait_sleep_time']
+
+ if backup is None or backup['status'] in stable_states:
+ return backup
+
+ start = datetime.datetime.utcnow()
+ end = start + datetime.timedelta(seconds=wait_timeout)
+ while datetime.datetime.utcnow() < end:
+ module.debug('We are going to wait for the backup to finish its transition')
+
+ response = account_api.get('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup['id']))
+ if not response.ok:
+ module.fail_json(msg='Error getting backup [{0}: {1}]'.format(response.status_code, response.json))
+ break
+ response_json = response.json
+
+ if response_json['status'] in stable_states:
+ module.debug('It seems that the backup is not in transition anymore.')
+ module.debug('Backup in state: %s' % response_json['status'])
+ return response_json
+ time.sleep(wait_sleep_time)
+ else:
+ module.fail_json(msg='Backup takes too long to finish its transition')
+
+
+def present_strategy(module, account_api, backup):
+ name = module.params['name']
+ database_name = module.params['database_name']
+ instance_id = module.params['instance_id']
+ expiration_date = module.params['expires_at']
+
+ if backup is not None:
+ if (backup['name'] == name or name is None) and (
+ backup['expires_at'] == expiration_date or expiration_date is None):
+ wait_to_complete_state_transition(module, account_api, backup)
+ module.exit_json(changed=False)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ payload = {}
+ if name is not None:
+ payload['name'] = name
+ if expiration_date is not None:
+ payload['expires_at'] = expiration_date
+
+ response = account_api.patch('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup['id']),
+ payload)
+ if response.ok:
+ result = wait_to_complete_state_transition(module, account_api, response.json)
+ module.exit_json(changed=True, metadata=result)
+
+ module.fail_json(msg='Error modifying backup [{0}: {1}]'.format(response.status_code, response.json))
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ payload = {'name': name, 'database_name': database_name, 'instance_id': instance_id}
+ if expiration_date is not None:
+ payload['expires_at'] = expiration_date
+
+ response = account_api.post('/rdb/v1/regions/%s/backups' % module.params.get('region'), payload)
+
+ if response.ok:
+ result = wait_to_complete_state_transition(module, account_api, response.json)
+ module.exit_json(changed=True, metadata=result)
+
+ module.fail_json(msg='Error creating backup [{0}: {1}]'.format(response.status_code, response.json))
+
+
+def absent_strategy(module, account_api, backup):
+ if backup is None:
+ module.exit_json(changed=False)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ response = account_api.delete('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup['id']))
+ if response.ok:
+ result = wait_to_complete_state_transition(module, account_api, response.json)
+ module.exit_json(changed=True, metadata=result)
+
+ module.fail_json(msg='Error deleting backup [{0}: {1}]'.format(response.status_code, response.json))
+
+
+def exported_strategy(module, account_api, backup):
+ if backup is None:
+ module.fail_json(msg=('Backup "%s" not found' % module.params['id']))
+
+ if backup['download_url'] is not None:
+ module.exit_json(changed=False, metadata=backup)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ backup = wait_to_complete_state_transition(module, account_api, backup)
+ response = account_api.post(
+ '/rdb/v1/regions/%s/backups/%s/export' % (module.params.get('region'), backup['id']), {})
+
+ if response.ok:
+ result = wait_to_complete_state_transition(module, account_api, response.json)
+ module.exit_json(changed=True, metadata=result)
+
+ module.fail_json(msg='Error exporting backup [{0}: {1}]'.format(response.status_code, response.json))
+
+
+def restored_strategy(module, account_api, backup):
+ if backup is None:
+ module.fail_json(msg=('Backup "%s" not found' % module.params['id']))
+
+ database_name = module.params['database_name']
+ instance_id = module.params['instance_id']
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ backup = wait_to_complete_state_transition(module, account_api, backup)
+
+ payload = {'database_name': database_name, 'instance_id': instance_id}
+ response = account_api.post('/rdb/v1/regions/%s/backups/%s/restore' % (module.params.get('region'), backup['id']),
+ payload)
+
+ if response.ok:
+ result = wait_to_complete_state_transition(module, account_api, response.json)
+ module.exit_json(changed=True, metadata=result)
+
+ module.fail_json(msg='Error restoring backup [{0}: {1}]'.format(response.status_code, response.json))
+
+
+state_strategy = {
+ 'present': present_strategy,
+ 'absent': absent_strategy,
+ 'exported': exported_strategy,
+ 'restored': restored_strategy,
+}
+
+
+def core(module):
+ state = module.params['state']
+ backup_id = module.params['id']
+
+ account_api = Scaleway(module)
+
+ if backup_id is None:
+ backup_by_id = None
+ else:
+ response = account_api.get('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup_id))
+ status_code = response.status_code
+ backup_json = response.json
+ backup_by_id = None
+ if status_code == 404:
+ backup_by_id = None
+ elif response.ok:
+ backup_by_id = backup_json
+ else:
+ module.fail_json(msg='Error getting backup [{0}: {1}]'.format(status_code, response.json['message']))
+
+ state_strategy[state](module, account_api, backup_by_id)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ state=dict(default='present', choices=['absent', 'present', 'exported', 'restored']),
+ region=dict(required=True, choices=SCALEWAY_REGIONS),
+ id=dict(),
+ name=dict(type='str'),
+ database_name=dict(required=False),
+ instance_id=dict(required=False),
+ expires_at=dict(),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300),
+ wait_sleep_time=dict(type='int', default=3),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_together=[
+ ['database_name', 'instance_id'],
+ ],
+ required_if=[
+ ['state', 'present', ['name', 'database_name', 'instance_id']],
+ ['state', 'absent', ['id']],
+ ['state', 'exported', ['id']],
+ ['state', 'restored', ['id', 'database_name', 'instance_id']],
+ ],
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_image_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_image_facts.py
new file mode 100644
index 00000000..31bbfa76
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_image_facts.py
@@ -0,0 +1,125 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_image_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favour of C(_info) module.
+ alternative: Use M(community.general.scaleway_image_info) instead.
+short_description: Gather facts about the Scaleway images available.
+description:
+ - Gather facts about the Scaleway images available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.scaleway
+
+
+options:
+ region:
+ type: str
+ description:
+ - Scaleway compute zone
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway images facts
+ community.general.scaleway_image_facts:
+ region: par1
+'''
+
+RETURN = r'''
+---
+scaleway_image_facts:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_image_facts": [
+ {
+ "arch": "x86_64",
+ "creation_date": "2018-07-17T16:18:49.276456+00:00",
+ "default_bootscript": {
+ "architecture": "x86_64",
+ "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
+ "default": false,
+ "dtb": "",
+ "id": "15fbd2f7-a0f9-412b-8502-6a44da8d98b8",
+ "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz",
+ "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.9-4.9.93-rev1/vmlinuz-4.9.93",
+ "organization": "11111111-1111-4111-8111-111111111111",
+ "public": true,
+ "title": "x86_64 mainline 4.9.93 rev1"
+ },
+ "extra_volumes": [],
+ "from_server": null,
+ "id": "00ae4a88-3252-4eda-9feb-5f6b56bf5ef0",
+ "modification_date": "2018-07-17T16:42:06.319315+00:00",
+ "name": "Debian Stretch",
+ "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db",
+ "public": true,
+ "root_volume": {
+ "id": "da32dfbb-c5ff-476d-ae2d-c297dd09b7dd",
+ "name": "snapshot-2a7229dc-d431-4dc5-b66e-95db08b773af-2018-07-17_16:18",
+ "size": 25000000000,
+ "volume_type": "l_ssd"
+ },
+ "state": "available"
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway, ScalewayException, scaleway_argument_spec, SCALEWAY_LOCATION)
+
+
+class ScalewayImageFacts(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewayImageFacts, self).__init__(module)
+ self.name = 'images'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ ansible_facts={'scaleway_image_facts': ScalewayImageFacts(module).get_resources()}
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_image_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_image_info.py
new file mode 100644
index 00000000..3fad216e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_image_info.py
@@ -0,0 +1,126 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_image_info
+short_description: Gather information about the Scaleway images available.
+description:
+ - Gather information about the Scaleway images available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.scaleway
+
+
+options:
+
+ region:
+ type: str
+ description:
+ - Scaleway compute zone
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway images information
+ community.general.scaleway_image_info:
+ region: par1
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.scaleway_image_info }}"
+'''
+
+RETURN = r'''
+---
+scaleway_image_info:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_image_info": [
+ {
+ "arch": "x86_64",
+ "creation_date": "2018-07-17T16:18:49.276456+00:00",
+ "default_bootscript": {
+ "architecture": "x86_64",
+ "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
+ "default": false,
+ "dtb": "",
+ "id": "15fbd2f7-a0f9-412b-8502-6a44da8d98b8",
+ "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz",
+ "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.9-4.9.93-rev1/vmlinuz-4.9.93",
+ "organization": "11111111-1111-4111-8111-111111111111",
+ "public": true,
+ "title": "x86_64 mainline 4.9.93 rev1"
+ },
+ "extra_volumes": [],
+ "from_server": null,
+ "id": "00ae4a88-3252-4eda-9feb-5f6b56bf5ef0",
+ "modification_date": "2018-07-17T16:42:06.319315+00:00",
+ "name": "Debian Stretch",
+ "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db",
+ "public": true,
+ "root_volume": {
+ "id": "da32dfbb-c5ff-476d-ae2d-c297dd09b7dd",
+ "name": "snapshot-2a7229dc-d431-4dc5-b66e-95db08b773af-2018-07-17_16:18",
+ "size": 25000000000,
+ "volume_type": "l_ssd"
+ },
+ "state": "available"
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway, ScalewayException, scaleway_argument_spec, SCALEWAY_LOCATION)
+
+
+class ScalewayImageInfo(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewayImageInfo, self).__init__(module)
+ self.name = 'images'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ scaleway_image_info=ScalewayImageInfo(module).get_resources()
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_ip.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_ip.py
new file mode 100644
index 00000000..26da122e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_ip.py
@@ -0,0 +1,261 @@
+#!/usr/bin/python
+#
+# Scaleway IP management module
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_ip
+short_description: Scaleway IP management module
+author: Remy Leone (@sieben)
+description:
+ - This module manages IP on Scaleway account
+ U(https://developer.scaleway.com)
+extends_documentation_fragment:
+- community.general.scaleway
+
+
+options:
+ state:
+ type: str
+ description:
+ - Indicate desired state of the IP.
+ default: present
+ choices:
+ - present
+ - absent
+
+ organization:
+ type: str
+ description:
+ - Scaleway organization identifier
+ required: true
+
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example par1).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+
+ id:
+ type: str
+ description:
+ - id of the Scaleway IP (UUID)
+
+ server:
+ type: str
+ description:
+ - id of the server you want to attach an IP to.
+ - To unattach an IP don't specify this option
+
+ reverse:
+ type: str
+ description:
+ - Reverse to assign to the IP
+'''
+
+EXAMPLES = '''
+- name: Create an IP
+ community.general.scaleway_ip:
+ organization: '{{ scw_org }}'
+ state: present
+ region: par1
+ register: ip_creation_task
+
+- name: Make sure IP deleted
+ community.general.scaleway_ip:
+ id: '{{ ip_creation_task.scaleway_ip.id }}'
+ state: absent
+ region: par1
+'''
+
+RETURN = '''
+data:
+ description: This is only present when C(state=present)
+ returned: when C(state=present)
+ type: dict
+ sample: {
+ "ips": [
+ {
+ "organization": "951df375-e094-4d26-97c1-ba548eeb9c42",
+ "reverse": null,
+ "id": "dd9e8df6-6775-4863-b517-e0b0ee3d7477",
+ "server": {
+ "id": "3f1568ca-b1a2-4e98-b6f7-31a0588157f1",
+ "name": "ansible_tuto-1"
+ },
+ "address": "212.47.232.136"
+ }
+ ]
+ }
+'''
+
+from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway
+from ansible.module_utils.basic import AnsibleModule
+
+
+def ip_attributes_should_be_changed(api, target_ip, wished_ip):
+ patch_payload = {}
+
+ if target_ip["reverse"] != wished_ip["reverse"]:
+ patch_payload["reverse"] = wished_ip["reverse"]
+
+ # IP is assigned to a server
+ if target_ip["server"] is None and wished_ip["server"]:
+ patch_payload["server"] = wished_ip["server"]
+
+ # IP is unassigned to a server
+ try:
+ if target_ip["server"]["id"] and wished_ip["server"] is None:
+ patch_payload["server"] = wished_ip["server"]
+ except (TypeError, KeyError):
+ pass
+
+ # IP is migrated between 2 different servers
+ try:
+ if target_ip["server"]["id"] != wished_ip["server"]:
+ patch_payload["server"] = wished_ip["server"]
+ except (TypeError, KeyError):
+ pass
+
+ return patch_payload
+
+
+def payload_from_wished_ip(wished_ip):
+ return dict(
+ (k, v)
+ for k, v in wished_ip.items()
+ if k != 'id' and v is not None
+ )
+
+
+def present_strategy(api, wished_ip):
+ changed = False
+
+ response = api.get('ips')
+ if not response.ok:
+ api.module.fail_json(msg='Error getting IPs [{0}: {1}]'.format(
+ response.status_code, response.json['message']))
+
+ ips_list = response.json["ips"]
+ ip_lookup = dict((ip["id"], ip)
+ for ip in ips_list)
+
+ if wished_ip["id"] not in ip_lookup.keys():
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "An IP would be created."}
+
+ # Create IP
+ creation_response = api.post('/ips',
+ data=payload_from_wished_ip(wished_ip))
+
+ if not creation_response.ok:
+ msg = "Error during ip creation: %s: '%s' (%s)" % (creation_response.info['msg'],
+ creation_response.json['message'],
+ creation_response.json)
+ api.module.fail_json(msg=msg)
+ return changed, creation_response.json["ip"]
+
+ target_ip = ip_lookup[wished_ip["id"]]
+ patch_payload = ip_attributes_should_be_changed(api=api, target_ip=target_ip, wished_ip=wished_ip)
+
+ if not patch_payload:
+ return changed, target_ip
+
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "IP attributes would be changed."}
+
+ ip_patch_response = api.patch(path="ips/%s" % target_ip["id"],
+ data=patch_payload)
+
+ if not ip_patch_response.ok:
+ api.module.fail_json(msg='Error during IP attributes update: [{0}: {1}]'.format(
+ ip_patch_response.status_code, ip_patch_response.json['message']))
+
+ return changed, ip_patch_response.json["ip"]
+
+
+def absent_strategy(api, wished_ip):
+ response = api.get('ips')
+ changed = False
+
+ status_code = response.status_code
+ ips_json = response.json
+ ips_list = ips_json["ips"]
+
+ if not response.ok:
+ api.module.fail_json(msg='Error getting IPs [{0}: {1}]'.format(
+ status_code, response.json['message']))
+
+ ip_lookup = dict((ip["id"], ip)
+ for ip in ips_list)
+ if wished_ip["id"] not in ip_lookup.keys():
+ return changed, {}
+
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "IP would be destroyed"}
+
+ response = api.delete('/ips/' + wished_ip["id"])
+ if not response.ok:
+ api.module.fail_json(msg='Error deleting IP [{0}: {1}]'.format(
+ response.status_code, response.json))
+
+ return changed, response.json
+
+
+def core(module):
+ wished_ip = {
+ "organization": module.params['organization'],
+ "reverse": module.params["reverse"],
+ "id": module.params["id"],
+ "server": module.params["server"]
+ }
+
+ region = module.params["region"]
+ module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+ api = Scaleway(module=module)
+ if module.params["state"] == "absent":
+ changed, summary = absent_strategy(api=api, wished_ip=wished_ip)
+ else:
+ changed, summary = present_strategy(api=api, wished_ip=wished_ip)
+ module.exit_json(changed=changed, scaleway_ip=summary)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ state=dict(default='present', choices=['absent', 'present']),
+ organization=dict(required=True),
+ server=dict(),
+ reverse=dict(),
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ id=dict()
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_ip_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_ip_facts.py
new file mode 100644
index 00000000..4227f360
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_ip_facts.py
@@ -0,0 +1,108 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_ip_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favour of C(_info) module.
+ alternative: Use M(community.general.scaleway_ip_info) instead.
+short_description: Gather facts about the Scaleway ips available.
+description:
+ - Gather facts about the Scaleway ips available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.scaleway
+
+options:
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example par1).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway ips facts
+ community.general.scaleway_ip_facts:
+ region: par1
+'''
+
+RETURN = r'''
+---
+scaleway_ip_facts:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_ip_facts": [
+ {
+ "address": "163.172.170.243",
+ "id": "ea081794-a581-8899-8451-386ddaf0a451",
+ "organization": "3f709602-5e6c-4619-b80c-e324324324af",
+ "reverse": null,
+ "server": {
+ "id": "12f19bc7-109c-4517-954c-e6b3d0311363",
+ "name": "scw-e0d158"
+ }
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway,
+ ScalewayException,
+ scaleway_argument_spec,
+ SCALEWAY_LOCATION,
+)
+
+
+class ScalewayIpFacts(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewayIpFacts, self).__init__(module)
+ self.name = 'ips'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ ansible_facts={'scaleway_ip_facts': ScalewayIpFacts(module).get_resources()}
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_ip_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_ip_info.py
new file mode 100644
index 00000000..145fb203
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_ip_info.py
@@ -0,0 +1,108 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_ip_info
+short_description: Gather information about the Scaleway ips available.
+description:
+ - Gather information about the Scaleway ips available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.scaleway
+
+options:
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example C(par1)).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway ips information
+ community.general.scaleway_ip_info:
+ region: par1
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.scaleway_ip_info }}"
+'''
+
+RETURN = r'''
+---
+scaleway_ip_info:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_ip_info": [
+ {
+ "address": "163.172.170.243",
+ "id": "ea081794-a581-8899-8451-386ddaf0a451",
+ "organization": "3f709602-5e6c-4619-b80c-e324324324af",
+ "reverse": null,
+ "server": {
+ "id": "12f19bc7-109c-4517-954c-e6b3d0311363",
+ "name": "scw-e0d158"
+ }
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway,
+ ScalewayException,
+ scaleway_argument_spec,
+ SCALEWAY_LOCATION,
+)
+
+
+class ScalewayIpInfo(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewayIpInfo, self).__init__(module)
+ self.name = 'ips'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ scaleway_ip_info=ScalewayIpInfo(module).get_resources()
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_lb.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_lb.py
new file mode 100644
index 00000000..a9358188
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_lb.py
@@ -0,0 +1,356 @@
+#!/usr/bin/python
+#
+# Scaleway Load-balancer management module
+#
+# Copyright (C) 2018 Online SAS.
+# https://www.scaleway.com
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_lb
+short_description: Scaleway load-balancer management module
+author: Remy Leone (@sieben)
+description:
+ - "This module manages load-balancers on Scaleway."
+extends_documentation_fragment:
+- community.general.scaleway
+
+
+options:
+
+ name:
+ type: str
+ description:
+ - Name of the load-balancer
+ required: true
+
+ description:
+ type: str
+ description:
+ - Description of the load-balancer
+ required: true
+
+ organization_id:
+ type: str
+ description:
+ - Organization identifier
+ required: true
+
+ state:
+ type: str
+ description:
+ - Indicate desired state of the instance.
+ default: present
+ choices:
+ - present
+ - absent
+
+ region:
+ type: str
+ description:
+ - Scaleway zone
+ required: true
+ choices:
+ - nl-ams
+ - fr-par
+ - pl-waw
+
+ tags:
+ type: list
+ description:
+ - List of tags to apply to the load-balancer
+
+ wait:
+ description:
+ - Wait for the load-balancer to reach its desired state before returning.
+ type: bool
+ default: 'no'
+
+ wait_timeout:
+ type: int
+ description:
+ - Time to wait for the load-balancer to reach the expected state
+ required: false
+ default: 300
+
+ wait_sleep_time:
+ type: int
+ description:
+ - Time to wait before every attempt to check the state of the load-balancer
+ required: false
+ default: 3
+'''
+
+EXAMPLES = '''
+- name: Create a load-balancer
+ community.general.scaleway_lb:
+ name: foobar
+ state: present
+ organization_id: 951df375-e094-4d26-97c1-ba548eeb9c42
+ region: fr-par
+ tags:
+ - hello
+
+- name: Delete a load-balancer
+ community.general.scaleway_lb:
+ name: foobar
+ state: absent
+ organization_id: 951df375-e094-4d26-97c1-ba548eeb9c42
+ region: fr-par
+'''
+
+RETURNS = '''
+{
+ "scaleway_lb": {
+ "backend_count": 0,
+ "frontend_count": 0,
+ "description": "Description of my load-balancer",
+ "id": "00000000-0000-0000-0000-000000000000",
+ "instances": [
+ {
+ "id": "00000000-0000-0000-0000-000000000000",
+ "ip_address": "10.0.0.1",
+ "region": "fr-par",
+ "status": "ready"
+ },
+ {
+ "id": "00000000-0000-0000-0000-000000000000",
+ "ip_address": "10.0.0.2",
+ "region": "fr-par",
+ "status": "ready"
+ }
+ ],
+ "ip": [
+ {
+ "id": "00000000-0000-0000-0000-000000000000",
+ "ip_address": "192.168.0.1",
+ "lb_id": "00000000-0000-0000-0000-000000000000",
+ "region": "fr-par",
+ "organization_id": "00000000-0000-0000-0000-000000000000",
+ "reverse": ""
+ }
+ ],
+ "name": "lb_ansible_test",
+ "organization_id": "00000000-0000-0000-0000-000000000000",
+ "region": "fr-par",
+ "status": "ready",
+ "tags": [
+ "first_tag",
+ "second_tag"
+ ]
+ }
+}
+'''
+
+import datetime
+import time
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_REGIONS, SCALEWAY_ENDPOINT, scaleway_argument_spec, Scaleway
+
+STABLE_STATES = (
+ "ready",
+ "absent"
+)
+
+MUTABLE_ATTRIBUTES = (
+ "name",
+ "description"
+)
+
+
+def payload_from_wished_lb(wished_lb):
+ return {
+ "organization_id": wished_lb["organization_id"],
+ "name": wished_lb["name"],
+ "tags": wished_lb["tags"],
+ "description": wished_lb["description"]
+ }
+
+
+def fetch_state(api, lb):
+ api.module.debug("fetch_state of load-balancer: %s" % lb["id"])
+ response = api.get(path=api.api_path + "/%s" % lb["id"])
+
+ if response.status_code == 404:
+ return "absent"
+
+ if not response.ok:
+ msg = 'Error during state fetching: (%s) %s' % (response.status_code, response.json)
+ api.module.fail_json(msg=msg)
+
+ try:
+ api.module.debug("Load-balancer %s in state: %s" % (lb["id"], response.json["status"]))
+ return response.json["status"]
+ except KeyError:
+ api.module.fail_json(msg="Could not fetch state in %s" % response.json)
+
+
+def wait_to_complete_state_transition(api, lb, force_wait=False):
+ wait = api.module.params["wait"]
+ if not (wait or force_wait):
+ return
+ wait_timeout = api.module.params["wait_timeout"]
+ wait_sleep_time = api.module.params["wait_sleep_time"]
+
+ start = datetime.datetime.utcnow()
+ end = start + datetime.timedelta(seconds=wait_timeout)
+ while datetime.datetime.utcnow() < end:
+ api.module.debug("We are going to wait for the load-balancer to finish its transition")
+ state = fetch_state(api, lb)
+ if state in STABLE_STATES:
+ api.module.debug("It seems that the load-balancer is not in transition anymore.")
+ api.module.debug("load-balancer in state: %s" % fetch_state(api, lb))
+ break
+ time.sleep(wait_sleep_time)
+ else:
+ api.module.fail_json(msg="Server takes too long to finish its transition")
+
+
+def lb_attributes_should_be_changed(target_lb, wished_lb):
+ diff = dict((attr, wished_lb[attr]) for attr in MUTABLE_ATTRIBUTES if target_lb[attr] != wished_lb[attr])
+
+ if diff:
+ return dict((attr, wished_lb[attr]) for attr in MUTABLE_ATTRIBUTES)
+ else:
+ return diff
+
+
+def present_strategy(api, wished_lb):
+ changed = False
+
+ response = api.get(path=api.api_path)
+ if not response.ok:
+ api.module.fail_json(msg='Error getting load-balancers [{0}: {1}]'.format(
+ response.status_code, response.json['message']))
+
+ lbs_list = response.json["lbs"]
+ lb_lookup = dict((lb["name"], lb)
+ for lb in lbs_list)
+
+ if wished_lb["name"] not in lb_lookup.keys():
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "A load-balancer would be created."}
+
+ # Create Load-balancer
+ api.warn(payload_from_wished_lb(wished_lb))
+ creation_response = api.post(path=api.api_path,
+ data=payload_from_wished_lb(wished_lb))
+
+ if not creation_response.ok:
+ msg = "Error during lb creation: %s: '%s' (%s)" % (creation_response.info['msg'],
+ creation_response.json['message'],
+ creation_response.json)
+ api.module.fail_json(msg=msg)
+
+ wait_to_complete_state_transition(api=api, lb=creation_response.json)
+ response = api.get(path=api.api_path + "/%s" % creation_response.json["id"])
+ return changed, response.json
+
+ target_lb = lb_lookup[wished_lb["name"]]
+ patch_payload = lb_attributes_should_be_changed(target_lb=target_lb,
+ wished_lb=wished_lb)
+
+ if not patch_payload:
+ return changed, target_lb
+
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "Load-balancer attributes would be changed."}
+
+ lb_patch_response = api.put(path=api.api_path + "/%s" % target_lb["id"],
+ data=patch_payload)
+
+ if not lb_patch_response.ok:
+ api.module.fail_json(msg='Error during load-balancer attributes update: [{0}: {1}]'.format(
+ lb_patch_response.status_code, lb_patch_response.json['message']))
+
+ wait_to_complete_state_transition(api=api, lb=target_lb)
+ return changed, lb_patch_response.json
+
+
+def absent_strategy(api, wished_lb):
+ response = api.get(path=api.api_path)
+ changed = False
+
+ status_code = response.status_code
+ lbs_json = response.json
+ lbs_list = lbs_json["lbs"]
+
+ if not response.ok:
+ api.module.fail_json(msg='Error getting load-balancers [{0}: {1}]'.format(
+ status_code, response.json['message']))
+
+ lb_lookup = dict((lb["name"], lb)
+ for lb in lbs_list)
+ if wished_lb["name"] not in lb_lookup.keys():
+ return changed, {}
+
+ target_lb = lb_lookup[wished_lb["name"]]
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "Load-balancer would be destroyed"}
+
+ wait_to_complete_state_transition(api=api, lb=target_lb, force_wait=True)
+ response = api.delete(path=api.api_path + "/%s" % target_lb["id"])
+ if not response.ok:
+ api.module.fail_json(msg='Error deleting load-balancer [{0}: {1}]'.format(
+ response.status_code, response.json))
+
+ wait_to_complete_state_transition(api=api, lb=target_lb)
+ return changed, response.json
+
+
+state_strategy = {
+ "present": present_strategy,
+ "absent": absent_strategy
+}
+
+
+def core(module):
+ region = module.params["region"]
+ wished_load_balancer = {
+ "state": module.params["state"],
+ "name": module.params["name"],
+ "description": module.params["description"],
+ "tags": module.params["tags"],
+ "organization_id": module.params["organization_id"]
+ }
+ module.params['api_url'] = SCALEWAY_ENDPOINT
+ api = Scaleway(module=module)
+ api.api_path = "lb/v1/regions/%s/lbs" % region
+
+ changed, summary = state_strategy[wished_load_balancer["state"]](api=api,
+ wished_lb=wished_load_balancer)
+ module.exit_json(changed=changed, scaleway_lb=summary)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ name=dict(required=True),
+ description=dict(required=True),
+ region=dict(required=True, choices=SCALEWAY_REGIONS),
+ state=dict(choices=list(state_strategy.keys()), default='present'),
+ tags=dict(type="list", default=[]),
+ organization_id=dict(required=True),
+ wait=dict(type="bool", default=False),
+ wait_timeout=dict(type="int", default=300),
+ wait_sleep_time=dict(type="int", default=3),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_organization_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_organization_facts.py
new file mode 100644
index 00000000..ee571cdc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_organization_facts.py
@@ -0,0 +1,104 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_organization_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favour of C(_info) module.
+ alternative: Use M(community.general.scaleway_organization_info) instead.
+short_description: Gather facts about the Scaleway organizations available.
+description:
+ - Gather facts about the Scaleway organizations available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+options:
+ api_url:
+ description:
+ - Scaleway API URL
+ default: 'https://account.scaleway.com'
+ aliases: ['base_url']
+extends_documentation_fragment:
+- community.general.scaleway
+
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway organizations facts
+ community.general.scaleway_organization_facts:
+'''
+
+RETURN = r'''
+---
+scaleway_organization_facts:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_organization_facts": [
+ {
+ "address_city_name": "Paris",
+ "address_country_code": "FR",
+ "address_line1": "42 Rue de l'univers",
+ "address_line2": null,
+ "address_postal_code": "75042",
+ "address_subdivision_code": "FR-75",
+ "creation_date": "2018-08-06T13:43:28.508575+00:00",
+ "currency": "EUR",
+ "customer_class": "individual",
+ "id": "3f709602-5e6c-4619-b80c-e8432ferewtr",
+ "locale": "fr_FR",
+ "modification_date": "2018-08-06T14:56:41.401685+00:00",
+ "name": "James Bond",
+ "support_id": "694324",
+ "support_level": "basic",
+ "support_pin": "9324",
+ "users": [],
+ "vat_number": null,
+ "warnings": []
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway, ScalewayException, scaleway_argument_spec
+)
+
+
+class ScalewayOrganizationFacts(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewayOrganizationFacts, self).__init__(module)
+ self.name = 'organizations'
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ api_url=dict(fallback=(env_fallback, ['SCW_API_URL']), default='https://account.scaleway.com', aliases=['base_url']),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ ansible_facts={'scaleway_organization_facts': ScalewayOrganizationFacts(module).get_resources()}
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_organization_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_organization_info.py
new file mode 100644
index 00000000..f530dcb8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_organization_info.py
@@ -0,0 +1,104 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_organization_info
+short_description: Gather information about the Scaleway organizations available.
+description:
+ - Gather information about the Scaleway organizations available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+options:
+ api_url:
+ description:
+ - Scaleway API URL
+ default: 'https://account.scaleway.com'
+ aliases: ['base_url']
+extends_documentation_fragment:
+- community.general.scaleway
+
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway organizations information
+ community.general.scaleway_organization_info:
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.scaleway_organization_info }}"
+'''
+
+RETURN = r'''
+---
+scaleway_organization_info:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_organization_info": [
+ {
+ "address_city_name": "Paris",
+ "address_country_code": "FR",
+ "address_line1": "42 Rue de l'univers",
+ "address_line2": null,
+ "address_postal_code": "75042",
+ "address_subdivision_code": "FR-75",
+ "creation_date": "2018-08-06T13:43:28.508575+00:00",
+ "currency": "EUR",
+ "customer_class": "individual",
+ "id": "3f709602-5e6c-4619-b80c-e8432ferewtr",
+ "locale": "fr_FR",
+ "modification_date": "2018-08-06T14:56:41.401685+00:00",
+ "name": "James Bond",
+ "support_id": "694324",
+ "support_level": "basic",
+ "support_pin": "9324",
+ "users": [],
+ "vat_number": null,
+ "warnings": []
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway, ScalewayException, scaleway_argument_spec
+)
+
+
+class ScalewayOrganizationInfo(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewayOrganizationInfo, self).__init__(module)
+ self.name = 'organizations'
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ api_url=dict(fallback=(env_fallback, ['SCW_API_URL']), default='https://account.scaleway.com', aliases=['base_url']),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ scaleway_organization_info=ScalewayOrganizationInfo(module).get_resources()
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group.py
new file mode 100644
index 00000000..9303e06e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group.py
@@ -0,0 +1,238 @@
+#!/usr/bin/python
+#
+# Scaleway Security Group management module
+#
+# Copyright (C) 2018 Antoine Barbare (antoinebarbare@gmail.com).
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_security_group
+short_description: Scaleway Security Group management module
+author: Antoine Barbare (@abarbare)
+description:
+ - This module manages Security Group on Scaleway account
+ U(https://developer.scaleway.com).
+extends_documentation_fragment:
+- community.general.scaleway
+
+
+options:
+ state:
+ description:
+ - Indicate desired state of the Security Group.
+ type: str
+ choices: [ absent, present ]
+ default: present
+
+ organization:
+ description:
+ - Organization identifier.
+ type: str
+ required: true
+
+ region:
+ description:
+ - Scaleway region to use (for example C(par1)).
+ type: str
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+
+ name:
+ description:
+ - Name of the Security Group.
+ type: str
+ required: true
+
+ description:
+ description:
+ - Description of the Security Group.
+ type: str
+
+ stateful:
+ description:
+ - Create a stateful security group which allows established connections in and out.
+ type: bool
+ required: true
+
+ inbound_default_policy:
+ description:
+ - Default policy for incoming traffic.
+ type: str
+ choices: [ accept, drop ]
+
+ outbound_default_policy:
+ description:
+ - Default policy for outcoming traffic.
+ type: str
+ choices: [ accept, drop ]
+
+ organization_default:
+ description:
+ - Create security group to be the default one.
+ type: bool
+'''
+
+EXAMPLES = '''
+- name: Create a Security Group
+ community.general.scaleway_security_group:
+ state: present
+ region: par1
+ name: security_group
+ description: "my security group description"
+ organization: "43a3b6c8-916f-477b-b7ec-ff1898f5fdd9"
+ stateful: false
+ inbound_default_policy: accept
+ outbound_default_policy: accept
+ organization_default: false
+ register: security_group_creation_task
+'''
+
+RETURN = '''
+data:
+ description: This is only present when C(state=present)
+ returned: when C(state=present)
+ type: dict
+ sample: {
+ "scaleway_security_group": {
+ "description": "my security group description",
+ "enable_default_security": true,
+ "id": "0168fb1f-cc46-4f69-b4be-c95d2a19bcae",
+ "inbound_default_policy": "accept",
+ "name": "security_group",
+ "organization": "43a3b6c8-916f-477b-b7ec-ff1898f5fdd9",
+ "organization_default": false,
+ "outbound_default_policy": "accept",
+ "servers": [],
+ "stateful": false
+ }
+ }
+'''
+
+from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway
+from ansible.module_utils.basic import AnsibleModule
+from uuid import uuid4
+
+
+def payload_from_security_group(security_group):
+ return dict(
+ (k, v)
+ for k, v in security_group.items()
+ if k != 'id' and v is not None
+ )
+
+
+def present_strategy(api, security_group):
+ ret = {'changed': False}
+
+ response = api.get('security_groups')
+ if not response.ok:
+ api.module.fail_json(msg='Error getting security groups "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json))
+
+ security_group_lookup = dict((sg['name'], sg)
+ for sg in response.json['security_groups'])
+
+ if security_group['name'] not in security_group_lookup.keys():
+ ret['changed'] = True
+ if api.module.check_mode:
+ # Help user when check mode is enabled by defining id key
+ ret['scaleway_security_group'] = {'id': str(uuid4())}
+ return ret
+
+ # Create Security Group
+ response = api.post('/security_groups',
+ data=payload_from_security_group(security_group))
+
+ if not response.ok:
+ msg = 'Error during security group creation: "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json)
+ api.module.fail_json(msg=msg)
+ ret['scaleway_security_group'] = response.json['security_group']
+
+ else:
+ ret['scaleway_security_group'] = security_group_lookup[security_group['name']]
+
+ return ret
+
+
+def absent_strategy(api, security_group):
+ response = api.get('security_groups')
+ ret = {'changed': False}
+
+ if not response.ok:
+ api.module.fail_json(msg='Error getting security groups "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json))
+
+ security_group_lookup = dict((sg['name'], sg)
+ for sg in response.json['security_groups'])
+ if security_group['name'] not in security_group_lookup.keys():
+ return ret
+
+ ret['changed'] = True
+ if api.module.check_mode:
+ return ret
+
+ response = api.delete('/security_groups/' + security_group_lookup[security_group['name']]['id'])
+ if not response.ok:
+ api.module.fail_json(msg='Error deleting security group "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json))
+
+ return ret
+
+
+def core(module):
+ security_group = {
+ 'organization': module.params['organization'],
+ 'name': module.params['name'],
+ 'description': module.params['description'],
+ 'stateful': module.params['stateful'],
+ 'inbound_default_policy': module.params['inbound_default_policy'],
+ 'outbound_default_policy': module.params['outbound_default_policy'],
+ 'organization_default': module.params['organization_default'],
+ }
+
+ region = module.params['region']
+ module.params['api_url'] = SCALEWAY_LOCATION[region]['api_endpoint']
+
+ api = Scaleway(module=module)
+ if module.params['state'] == 'present':
+ summary = present_strategy(api=api, security_group=security_group)
+ else:
+ summary = absent_strategy(api=api, security_group=security_group)
+ module.exit_json(**summary)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ organization=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ description=dict(type='str'),
+ region=dict(type='str', required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ stateful=dict(type='bool', required=True),
+ inbound_default_policy=dict(type='str', choices=['accept', 'drop']),
+ outbound_default_policy=dict(type='str', choices=['accept', 'drop']),
+ organization_default=dict(type='bool'),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=[['stateful', True, ['inbound_default_policy', 'outbound_default_policy']]]
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group_facts.py
new file mode 100644
index 00000000..a43bfedb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group_facts.py
@@ -0,0 +1,112 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_security_group_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favour of C(_info) module.
+ alternative: Use M(community.general.scaleway_security_group_info) instead.
+short_description: Gather facts about the Scaleway security groups available.
+description:
+ - Gather facts about the Scaleway security groups available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+options:
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example par1).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+extends_documentation_fragment:
+- community.general.scaleway
+
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway security groups facts
+ community.general.scaleway_security_group_facts:
+ region: par1
+'''
+
+RETURN = r'''
+---
+scaleway_security_group_facts:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_security_group_facts": [
+ {
+ "description": "test-ams",
+ "enable_default_security": true,
+ "id": "7fcde327-8bed-43a6-95c4-6dfbc56d8b51",
+ "name": "test-ams",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "organization_default": false,
+ "servers": [
+ {
+ "id": "12f19bc7-108c-4517-954c-e6b3d0311363",
+ "name": "scw-e0d158"
+ }
+ ]
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway,
+ ScalewayException,
+ scaleway_argument_spec,
+ SCALEWAY_LOCATION,
+)
+
+
+class ScalewaySecurityGroupFacts(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewaySecurityGroupFacts, self).__init__(module)
+ self.name = 'security_groups'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ ansible_facts={'scaleway_security_group_facts': ScalewaySecurityGroupFacts(module).get_resources()}
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group_info.py
new file mode 100644
index 00000000..d3488f0c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group_info.py
@@ -0,0 +1,112 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_security_group_info
+short_description: Gather information about the Scaleway security groups available.
+description:
+ - Gather information about the Scaleway security groups available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+options:
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example C(par1)).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+extends_documentation_fragment:
+- community.general.scaleway
+
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway security groups information
+ community.general.scaleway_security_group_info:
+ region: par1
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.scaleway_security_group_info }}"
+'''
+
+RETURN = r'''
+---
+scaleway_security_group_info:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_security_group_info": [
+ {
+ "description": "test-ams",
+ "enable_default_security": true,
+ "id": "7fcde327-8bed-43a6-95c4-6dfbc56d8b51",
+ "name": "test-ams",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "organization_default": false,
+ "servers": [
+ {
+ "id": "12f19bc7-108c-4517-954c-e6b3d0311363",
+ "name": "scw-e0d158"
+ }
+ ]
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway,
+ ScalewayException,
+ scaleway_argument_spec,
+ SCALEWAY_LOCATION,
+)
+
+
+class ScalewaySecurityGroupInfo(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewaySecurityGroupInfo, self).__init__(module)
+ self.name = 'security_groups'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ scaleway_security_group_info=ScalewaySecurityGroupInfo(module).get_resources()
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py
new file mode 100644
index 00000000..054a4d47
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py
@@ -0,0 +1,263 @@
+#!/usr/bin/python
+#
+# Scaleway Security Group Rule management module
+#
+# Copyright (C) 2018 Antoine Barbare (antoinebarbare@gmail.com).
+#
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_security_group_rule
+short_description: Scaleway Security Group Rule management module
+author: Antoine Barbare (@abarbare)
+description:
+ - This module manages Security Group Rule on Scaleway account
+ U(https://developer.scaleway.com)
+extends_documentation_fragment:
+- community.general.scaleway
+
+
+options:
+ state:
+ type: str
+ description:
+ - Indicate desired state of the Security Group Rule.
+ default: present
+ choices:
+ - present
+ - absent
+
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example C(par1)).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+
+ protocol:
+ type: str
+ description:
+ - Network protocol to use
+ choices:
+ - TCP
+ - UDP
+ - ICMP
+ required: true
+
+ port:
+ description:
+ - Port related to the rule, null value for all the ports
+ required: true
+ type: int
+
+ ip_range:
+ type: str
+ description:
+ - IPV4 CIDR notation to apply to the rule
+ default: 0.0.0.0/0
+
+ direction:
+ type: str
+ description:
+ - Rule direction
+ choices:
+ - inbound
+ - outbound
+ required: true
+
+ action:
+ type: str
+ description:
+ - Rule action
+ choices:
+ - accept
+ - drop
+ required: true
+
+ security_group:
+ type: str
+ description:
+ - Security Group unique identifier
+ required: true
+'''
+
+EXAMPLES = '''
+ - name: Create a Security Group Rule
+ community.general.scaleway_security_group_rule:
+ state: present
+ region: par1
+ protocol: TCP
+ port: 80
+ ip_range: 0.0.0.0/0
+ direction: inbound
+ action: accept
+ security_group: b57210ee-1281-4820-a6db-329f78596ecb
+ register: security_group_rule_creation_task
+'''
+
+RETURN = '''
+data:
+ description: This is only present when C(state=present)
+ returned: when C(state=present)
+ type: dict
+ sample: {
+ "scaleway_security_group_rule": {
+ "direction": "inbound",
+ "protocol": "TCP",
+ "ip_range": "0.0.0.0/0",
+ "dest_port_from": 80,
+ "action": "accept",
+ "position": 2,
+ "dest_port_to": null,
+ "editable": null,
+ "id": "10cb0b9a-80f6-4830-abd7-a31cd828b5e9"
+ }
+ }
+'''
+
+from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway, payload_from_object
+from ansible_collections.community.general.plugins.module_utils.compat.ipaddress import ip_network
+from ansible.module_utils._text import to_text
+from ansible.module_utils.basic import AnsibleModule
+
+
+def get_sgr_from_api(security_group_rules, security_group_rule):
+ """ Check if a security_group_rule specs are present in security_group_rules
+ Return None if no rules match the specs
+ Return the rule if found
+ """
+ for sgr in security_group_rules:
+ if (sgr['ip_range'] == security_group_rule['ip_range'] and sgr['dest_port_from'] == security_group_rule['dest_port_from'] and
+ sgr['direction'] == security_group_rule['direction'] and sgr['action'] == security_group_rule['action'] and
+ sgr['protocol'] == security_group_rule['protocol']):
+ return sgr
+
+ return None
+
+
+def present_strategy(api, security_group_id, security_group_rule):
+ ret = {'changed': False}
+
+ response = api.get('security_groups/%s/rules' % security_group_id)
+ if not response.ok:
+ api.module.fail_json(
+ msg='Error getting security group rules "%s": "%s" (%s)' %
+ (response.info['msg'], response.json['message'], response.json))
+
+ existing_rule = get_sgr_from_api(
+ response.json['rules'], security_group_rule)
+
+ if not existing_rule:
+ ret['changed'] = True
+ if api.module.check_mode:
+ return ret
+
+ # Create Security Group Rule
+ response = api.post('/security_groups/%s/rules' % security_group_id,
+ data=payload_from_object(security_group_rule))
+
+ if not response.ok:
+ api.module.fail_json(
+ msg='Error during security group rule creation: "%s": "%s" (%s)' %
+ (response.info['msg'], response.json['message'], response.json))
+ ret['scaleway_security_group_rule'] = response.json['rule']
+
+ else:
+ ret['scaleway_security_group_rule'] = existing_rule
+
+ return ret
+
+
+def absent_strategy(api, security_group_id, security_group_rule):
+ ret = {'changed': False}
+
+ response = api.get('security_groups/%s/rules' % security_group_id)
+ if not response.ok:
+ api.module.fail_json(
+ msg='Error getting security group rules "%s": "%s" (%s)' %
+ (response.info['msg'], response.json['message'], response.json))
+
+ existing_rule = get_sgr_from_api(
+ response.json['rules'], security_group_rule)
+
+ if not existing_rule:
+ return ret
+
+ ret['changed'] = True
+ if api.module.check_mode:
+ return ret
+
+ response = api.delete(
+ '/security_groups/%s/rules/%s' %
+ (security_group_id, existing_rule['id']))
+ if not response.ok:
+ api.module.fail_json(
+ msg='Error deleting security group rule "%s": "%s" (%s)' %
+ (response.info['msg'], response.json['message'], response.json))
+
+ return ret
+
+
+def core(module):
+ api = Scaleway(module=module)
+
+ security_group_rule = {
+ 'protocol': module.params['protocol'],
+ 'dest_port_from': module.params['port'],
+ 'ip_range': module.params['ip_range'],
+ 'direction': module.params['direction'],
+ 'action': module.params['action'],
+ }
+
+ region = module.params['region']
+ module.params['api_url'] = SCALEWAY_LOCATION[region]['api_endpoint']
+
+ if module.params['state'] == 'present':
+ summary = present_strategy(
+ api=api,
+ security_group_id=module.params['security_group'],
+ security_group_rule=security_group_rule)
+ else:
+ summary = absent_strategy(
+ api=api,
+ security_group_id=module.params['security_group'],
+ security_group_rule=security_group_rule)
+ module.exit_json(**summary)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ region=dict(type='str', required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ protocol=dict(type='str', required=True, choices=['TCP', 'UDP', 'ICMP']),
+ port=dict(type='int', required=True),
+ ip_range=dict(type='str', default='0.0.0.0/0'),
+ direction=dict(type='str', required=True, choices=['inbound', 'outbound']),
+ action=dict(type='str', required=True, choices=['accept', 'drop']),
+ security_group=dict(type='str', required=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_server_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_server_facts.py
new file mode 100644
index 00000000..d3e73669
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_server_facts.py
@@ -0,0 +1,195 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_server_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favour of C(_info) module.
+ alternative: Use M(community.general.scaleway_server_info) instead.
+short_description: Gather facts about the Scaleway servers available.
+description:
+ - Gather facts about the Scaleway servers available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.scaleway
+
+options:
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example par1).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway servers facts
+ community.general.scaleway_server_facts:
+ region: par1
+'''
+
+RETURN = r'''
+---
+scaleway_server_facts:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_server_facts": [
+ {
+ "arch": "x86_64",
+ "boot_type": "local",
+ "bootscript": {
+ "architecture": "x86_64",
+ "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
+ "default": true,
+ "dtb": "",
+ "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9",
+ "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz",
+ "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127",
+ "organization": "11111111-1111-4111-8111-111111111111",
+ "public": true,
+ "title": "x86_64 mainline 4.4.127 rev1"
+ },
+ "commercial_type": "START1-XS",
+ "creation_date": "2018-08-14T21:36:56.271545+00:00",
+ "dynamic_ip_required": false,
+ "enable_ipv6": false,
+ "extra_networks": [],
+ "hostname": "scw-e0d256",
+ "id": "12f19bc7-108c-4517-954c-e6b3d0311363",
+ "image": {
+ "arch": "x86_64",
+ "creation_date": "2018-04-26T12:42:21.619844+00:00",
+ "default_bootscript": {
+ "architecture": "x86_64",
+ "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
+ "default": true,
+ "dtb": "",
+ "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9",
+ "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz",
+ "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127",
+ "organization": "11111111-1111-4111-8111-111111111111",
+ "public": true,
+ "title": "x86_64 mainline 4.4.127 rev1"
+ },
+ "extra_volumes": [],
+ "from_server": null,
+ "id": "67375eb1-f14d-4f02-bb42-6119cecbde51",
+ "modification_date": "2018-04-26T12:49:07.573004+00:00",
+ "name": "Ubuntu Xenial",
+ "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db",
+ "public": true,
+ "root_volume": {
+ "id": "020b8d61-3867-4a0e-84a4-445c5393e05d",
+ "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42",
+ "size": 25000000000,
+ "volume_type": "l_ssd"
+ },
+ "state": "available"
+ },
+ "ipv6": null,
+ "location": {
+ "cluster_id": "5",
+ "hypervisor_id": "412",
+ "node_id": "2",
+ "platform_id": "13",
+ "zone_id": "par1"
+ },
+ "maintenances": [],
+ "modification_date": "2018-08-14T21:37:28.630882+00:00",
+ "name": "scw-e0d256",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "private_ip": "10.14.222.131",
+ "protected": false,
+ "public_ip": {
+ "address": "163.172.170.197",
+ "dynamic": false,
+ "id": "ea081794-a581-4495-8451-386ddaf0a451"
+ },
+ "security_group": {
+ "id": "a37379d2-d8b0-4668-9cfb-1233fc436f7e",
+ "name": "Default security group"
+ },
+ "state": "running",
+ "state_detail": "booted",
+ "tags": [],
+ "volumes": {
+ "0": {
+ "creation_date": "2018-08-14T21:36:56.271545+00:00",
+ "export_uri": "device://dev/vda",
+ "id": "68386fae-4f55-4fbf-aabb-953036a85872",
+ "modification_date": "2018-08-14T21:36:56.271545+00:00",
+ "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "server": {
+ "id": "12f19bc7-108c-4517-954c-e6b3d0311363",
+ "name": "scw-e0d256"
+ },
+ "size": 25000000000,
+ "state": "available",
+ "volume_type": "l_ssd"
+ }
+ }
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway,
+ ScalewayException,
+ scaleway_argument_spec,
+ SCALEWAY_LOCATION,
+)
+
+
+class ScalewayServerFacts(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewayServerFacts, self).__init__(module)
+ self.name = 'servers'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ ansible_facts={'scaleway_server_facts': ScalewayServerFacts(module).get_resources()}
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_server_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_server_info.py
new file mode 100644
index 00000000..43b0badc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_server_info.py
@@ -0,0 +1,195 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_server_info
+short_description: Gather information about the Scaleway servers available.
+description:
+ - Gather information about the Scaleway servers available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.scaleway
+
+options:
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example C(par1)).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway servers information
+ community.general.scaleway_server_info:
+ region: par1
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.scaleway_server_info }}"
+'''
+
+RETURN = r'''
+---
+scaleway_server_info:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_server_info": [
+ {
+ "arch": "x86_64",
+ "boot_type": "local",
+ "bootscript": {
+ "architecture": "x86_64",
+ "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
+ "default": true,
+ "dtb": "",
+ "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9",
+ "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz",
+ "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127",
+ "organization": "11111111-1111-4111-8111-111111111111",
+ "public": true,
+ "title": "x86_64 mainline 4.4.127 rev1"
+ },
+ "commercial_type": "START1-XS",
+ "creation_date": "2018-08-14T21:36:56.271545+00:00",
+ "dynamic_ip_required": false,
+ "enable_ipv6": false,
+ "extra_networks": [],
+ "hostname": "scw-e0d256",
+ "id": "12f19bc7-108c-4517-954c-e6b3d0311363",
+ "image": {
+ "arch": "x86_64",
+ "creation_date": "2018-04-26T12:42:21.619844+00:00",
+ "default_bootscript": {
+ "architecture": "x86_64",
+ "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
+ "default": true,
+ "dtb": "",
+ "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9",
+ "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz",
+ "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127",
+ "organization": "11111111-1111-4111-8111-111111111111",
+ "public": true,
+ "title": "x86_64 mainline 4.4.127 rev1"
+ },
+ "extra_volumes": [],
+ "from_server": null,
+ "id": "67375eb1-f14d-4f02-bb42-6119cecbde51",
+ "modification_date": "2018-04-26T12:49:07.573004+00:00",
+ "name": "Ubuntu Xenial",
+ "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db",
+ "public": true,
+ "root_volume": {
+ "id": "020b8d61-3867-4a0e-84a4-445c5393e05d",
+ "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42",
+ "size": 25000000000,
+ "volume_type": "l_ssd"
+ },
+ "state": "available"
+ },
+ "ipv6": null,
+ "location": {
+ "cluster_id": "5",
+ "hypervisor_id": "412",
+ "node_id": "2",
+ "platform_id": "13",
+ "zone_id": "par1"
+ },
+ "maintenances": [],
+ "modification_date": "2018-08-14T21:37:28.630882+00:00",
+ "name": "scw-e0d256",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "private_ip": "10.14.222.131",
+ "protected": false,
+ "public_ip": {
+ "address": "163.172.170.197",
+ "dynamic": false,
+ "id": "ea081794-a581-4495-8451-386ddaf0a451"
+ },
+ "security_group": {
+ "id": "a37379d2-d8b0-4668-9cfb-1233fc436f7e",
+ "name": "Default security group"
+ },
+ "state": "running",
+ "state_detail": "booted",
+ "tags": [],
+ "volumes": {
+ "0": {
+ "creation_date": "2018-08-14T21:36:56.271545+00:00",
+ "export_uri": "device://dev/vda",
+ "id": "68386fae-4f55-4fbf-aabb-953036a85872",
+ "modification_date": "2018-08-14T21:36:56.271545+00:00",
+ "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "server": {
+ "id": "12f19bc7-108c-4517-954c-e6b3d0311363",
+ "name": "scw-e0d256"
+ },
+ "size": 25000000000,
+ "state": "available",
+ "volume_type": "l_ssd"
+ }
+ }
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway,
+ ScalewayException,
+ scaleway_argument_spec,
+ SCALEWAY_LOCATION,
+)
+
+
+class ScalewayServerInfo(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewayServerInfo, self).__init__(module)
+ self.name = 'servers'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ scaleway_server_info=ScalewayServerInfo(module).get_resources()
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_snapshot_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_snapshot_facts.py
new file mode 100644
index 00000000..25f99e72
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_snapshot_facts.py
@@ -0,0 +1,113 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_snapshot_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favour of C(_info) module.
+ alternative: Use M(community.general.scaleway_snapshot_info) instead.
+short_description: Gather facts about the Scaleway snapshots available.
+description:
+ - Gather facts about the Scaleway snapshot available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.scaleway
+
+options:
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example par1).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway snapshots facts
+ community.general.scaleway_snapshot_facts:
+ region: par1
+'''
+
+RETURN = r'''
+---
+scaleway_snapshot_facts:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_snapshot_facts": [
+ {
+ "base_volume": {
+ "id": "68386fae-4f55-4fbf-aabb-953036a85872",
+ "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42"
+ },
+ "creation_date": "2018-08-14T22:34:35.299461+00:00",
+ "id": "b61b4b03-a2e9-4da5-b5ea-e462ac0662d2",
+ "modification_date": "2018-08-14T22:34:54.520560+00:00",
+ "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42 snapshot",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "size": 25000000000,
+ "state": "available",
+ "volume_type": "l_ssd"
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway,
+ ScalewayException,
+ scaleway_argument_spec,
+ SCALEWAY_LOCATION
+)
+
+
+class ScalewaySnapshotFacts(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewaySnapshotFacts, self).__init__(module)
+ self.name = 'snapshots'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ ansible_facts={'scaleway_snapshot_facts': ScalewaySnapshotFacts(module).get_resources()}
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_snapshot_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_snapshot_info.py
new file mode 100644
index 00000000..f31b74b0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_snapshot_info.py
@@ -0,0 +1,113 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_snapshot_info
+short_description: Gather information about the Scaleway snapshots available.
+description:
+ - Gather information about the Scaleway snapshot available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.scaleway
+
+options:
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example C(par1)).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway snapshots information
+ community.general.scaleway_snapshot_info:
+ region: par1
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.scaleway_snapshot_info }}"
+'''
+
+RETURN = r'''
+---
+scaleway_snapshot_info:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_snapshot_info": [
+ {
+ "base_volume": {
+ "id": "68386fae-4f55-4fbf-aabb-953036a85872",
+ "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42"
+ },
+ "creation_date": "2018-08-14T22:34:35.299461+00:00",
+ "id": "b61b4b03-a2e9-4da5-b5ea-e462ac0662d2",
+ "modification_date": "2018-08-14T22:34:54.520560+00:00",
+ "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42 snapshot",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "size": 25000000000,
+ "state": "available",
+ "volume_type": "l_ssd"
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway,
+ ScalewayException,
+ scaleway_argument_spec,
+ SCALEWAY_LOCATION
+)
+
+
+class ScalewaySnapshotInfo(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewaySnapshotInfo, self).__init__(module)
+ self.name = 'snapshots'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ scaleway_snapshot_info=ScalewaySnapshotInfo(module).get_resources()
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_sshkey.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_sshkey.py
new file mode 100644
index 00000000..08555b23
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_sshkey.py
@@ -0,0 +1,172 @@
+#!/usr/bin/python
+#
+# Scaleway SSH keys management module
+#
+# Copyright (C) 2018 Online SAS.
+# https://www.scaleway.com
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_sshkey
+short_description: Scaleway SSH keys management module
+author: Remy Leone (@sieben)
+description:
+ - This module manages SSH keys on Scaleway account
+ U(https://developer.scaleway.com)
+extends_documentation_fragment:
+- community.general.scaleway
+
+
+options:
+ state:
+ type: str
+ description:
+ - Indicate desired state of the SSH key.
+ default: present
+ choices:
+ - present
+ - absent
+ ssh_pub_key:
+ type: str
+ description:
+ - The public SSH key as a string to add.
+ required: true
+ api_url:
+ type: str
+ description:
+ - Scaleway API URL
+ default: 'https://account.scaleway.com'
+ aliases: ['base_url']
+'''
+
+EXAMPLES = '''
+- name: "Add SSH key"
+ community.general.scaleway_sshkey:
+ ssh_pub_key: "ssh-rsa AAAA..."
+ state: "present"
+
+- name: "Delete SSH key"
+ community.general.scaleway_sshkey:
+ ssh_pub_key: "ssh-rsa AAAA..."
+ state: "absent"
+
+- name: "Add SSH key with explicit token"
+ community.general.scaleway_sshkey:
+ ssh_pub_key: "ssh-rsa AAAA..."
+ state: "present"
+ oauth_token: "6ecd2c9b-6f4f-44d4-a187-61a92078d08c"
+'''
+
+RETURN = '''
+data:
+ description: This is only present when C(state=present)
+ returned: when C(state=present)
+ type: dict
+ sample: {
+ "ssh_public_keys": [
+ {"key": "ssh-rsa AAAA...."}
+ ]
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback
+from ansible_collections.community.general.plugins.module_utils.scaleway import scaleway_argument_spec, Scaleway
+
+
+def extract_present_sshkeys(raw_organization_dict):
+ ssh_key_list = raw_organization_dict["organizations"][0]["users"][0]["ssh_public_keys"]
+ ssh_key_lookup = [ssh_key["key"] for ssh_key in ssh_key_list]
+ return ssh_key_lookup
+
+
+def extract_user_id(raw_organization_dict):
+ return raw_organization_dict["organizations"][0]["users"][0]["id"]
+
+
+def sshkey_user_patch(ssh_lookup):
+ ssh_list = {"ssh_public_keys": [{"key": key}
+ for key in ssh_lookup]}
+ return ssh_list
+
+
+def core(module):
+ ssh_pub_key = module.params['ssh_pub_key']
+ state = module.params["state"]
+ account_api = Scaleway(module)
+ response = account_api.get('organizations')
+
+ status_code = response.status_code
+ organization_json = response.json
+
+ if not response.ok:
+ module.fail_json(msg='Error getting ssh key [{0}: {1}]'.format(
+ status_code, response.json['message']))
+
+ user_id = extract_user_id(organization_json)
+ present_sshkeys = []
+ try:
+ present_sshkeys = extract_present_sshkeys(organization_json)
+ except (KeyError, IndexError) as e:
+ module.fail_json(changed=False, data="Error while extracting present SSH keys from API")
+
+ if state in ('present',):
+ if ssh_pub_key in present_sshkeys:
+ module.exit_json(changed=False)
+
+ # If key not found create it!
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ present_sshkeys.append(ssh_pub_key)
+ payload = sshkey_user_patch(present_sshkeys)
+
+ response = account_api.patch('/users/%s' % user_id, data=payload)
+
+ if response.ok:
+ module.exit_json(changed=True, data=response.json)
+
+ module.fail_json(msg='Error creating ssh key [{0}: {1}]'.format(
+ response.status_code, response.json))
+
+ elif state in ('absent',):
+ if ssh_pub_key not in present_sshkeys:
+ module.exit_json(changed=False)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ present_sshkeys.remove(ssh_pub_key)
+ payload = sshkey_user_patch(present_sshkeys)
+
+ response = account_api.patch('/users/%s' % user_id, data=payload)
+
+ if response.ok:
+ module.exit_json(changed=True, data=response.json)
+
+ module.fail_json(msg='Error deleting ssh key [{0}: {1}]'.format(
+ response.status_code, response.json))
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ state=dict(default='present', choices=['absent', 'present']),
+ ssh_pub_key=dict(required=True),
+ api_url=dict(fallback=(env_fallback, ['SCW_API_URL']), default='https://account.scaleway.com', aliases=['base_url']),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_user_data.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_user_data.py
new file mode 100644
index 00000000..4a38e76d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_user_data.py
@@ -0,0 +1,171 @@
+#!/usr/bin/python
+#
+# Scaleway user data management module
+#
+# Copyright (C) 2018 Online SAS.
+# https://www.scaleway.com
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_user_data
+short_description: Scaleway user_data management module
+author: Remy Leone (@sieben)
+description:
+ - "This module manages user_data on compute instances on Scaleway."
+ - "It can be used to configure cloud-init for instance"
+extends_documentation_fragment:
+- community.general.scaleway
+
+
+options:
+
+ server_id:
+ type: str
+ description:
+ - Scaleway Compute instance ID of the server
+ required: true
+
+ user_data:
+ type: dict
+ description:
+ - User defined data. Typically used with `cloud-init`.
+ - Pass your cloud-init script here as a string
+ required: false
+
+ region:
+ type: str
+ description:
+ - Scaleway compute zone
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = '''
+- name: Update the cloud-init
+ community.general.scaleway_user_data:
+ server_id: '5a33b4ab-57dd-4eb6-8b0a-d95eb63492ce'
+ region: ams1
+ user_data:
+ cloud-init: 'final_message: "Hello World!"'
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway
+
+
+def patch_user_data(compute_api, server_id, key, value):
+ compute_api.module.debug("Starting patching user_data attributes")
+
+ path = "servers/%s/user_data/%s" % (server_id, key)
+ response = compute_api.patch(path=path, data=value, headers={"Content-type": "text/plain"})
+ if not response.ok:
+ msg = 'Error during user_data patching: %s %s' % (response.status_code, response.body)
+ compute_api.module.fail_json(msg=msg)
+
+ return response
+
+
+def delete_user_data(compute_api, server_id, key):
+ compute_api.module.debug("Starting deleting user_data attributes: %s" % key)
+
+ response = compute_api.delete(path="servers/%s/user_data/%s" % (server_id, key))
+
+ if not response.ok:
+ msg = 'Error during user_data deleting: (%s) %s' % response.status_code, response.body
+ compute_api.module.fail_json(msg=msg)
+
+ return response
+
+
+def get_user_data(compute_api, server_id, key):
+ compute_api.module.debug("Starting patching user_data attributes")
+
+ path = "servers/%s/user_data/%s" % (server_id, key)
+ response = compute_api.get(path=path)
+ if not response.ok:
+ msg = 'Error during user_data patching: %s %s' % (response.status_code, response.body)
+ compute_api.module.fail_json(msg=msg)
+
+ return response.json
+
+
+def core(module):
+ region = module.params["region"]
+ server_id = module.params["server_id"]
+ user_data = module.params["user_data"]
+ changed = False
+
+ module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+ compute_api = Scaleway(module=module)
+
+ user_data_list = compute_api.get(path="servers/%s/user_data" % server_id)
+ if not user_data_list.ok:
+ msg = 'Error during user_data fetching: %s %s' % user_data_list.status_code, user_data_list.body
+ compute_api.module.fail_json(msg=msg)
+
+ present_user_data_keys = user_data_list.json["user_data"]
+ present_user_data = dict(
+ (key, get_user_data(compute_api=compute_api, server_id=server_id, key=key))
+ for key in present_user_data_keys
+ )
+
+ if present_user_data == user_data:
+ module.exit_json(changed=changed, msg=user_data_list.json)
+
+ # First we remove keys that are not defined in the wished user_data
+ for key in present_user_data:
+ if key not in user_data:
+
+ changed = True
+ if compute_api.module.check_mode:
+ module.exit_json(changed=changed, msg={"status": "User-data of %s would be patched." % server_id})
+
+ delete_user_data(compute_api=compute_api, server_id=server_id, key=key)
+
+ # Then we patch keys that are different
+ for key, value in user_data.items():
+ if key not in present_user_data or user_data[key] != present_user_data[key]:
+
+ changed = True
+ if compute_api.module.check_mode:
+ module.exit_json(changed=changed, msg={"status": "User-data of %s would be patched." % server_id})
+
+ patch_user_data(compute_api=compute_api, server_id=server_id, key=key, value=value)
+
+ module.exit_json(changed=changed, msg=user_data)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ user_data=dict(type="dict"),
+ server_id=dict(required=True),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_volume.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_volume.py
new file mode 100644
index 00000000..e879d3c9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_volume.py
@@ -0,0 +1,176 @@
+#!/usr/bin/python
+#
+# Scaleway volumes management module
+#
+# Copyright (C) 2018 Henryk Konsek Consulting (hekonsek@gmail.com).
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_volume
+short_description: Scaleway volumes management module
+author: Henryk Konsek (@hekonsek)
+description:
+ - This module manages volumes on Scaleway account
+ U(https://developer.scaleway.com)
+extends_documentation_fragment:
+- community.general.scaleway
+
+
+options:
+ state:
+ type: str
+ description:
+ - Indicate desired state of the volume.
+ default: present
+ choices:
+ - present
+ - absent
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example par1).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+ name:
+ type: str
+ description:
+ - Name used to identify the volume.
+ required: true
+ organization:
+ type: str
+ description:
+ - ScaleWay organization ID to which volume belongs.
+ size:
+ type: int
+ description:
+ - Size of the volume in bytes.
+ volume_type:
+ type: str
+ description:
+ - Type of the volume (for example 'l_ssd').
+'''
+
+EXAMPLES = '''
+- name: Create 10GB volume
+ community.general.scaleway_volume:
+ name: my-volume
+ state: present
+ region: par1
+ organization: "{{ scw_org }}"
+ "size": 10000000000
+ volume_type: l_ssd
+ register: server_creation_check_task
+
+- name: Make sure volume deleted
+ community.general.scaleway_volume:
+ name: my-volume
+ state: absent
+ region: par1
+'''
+
+RETURN = '''
+data:
+ description: This is only present when C(state=present)
+ returned: when C(state=present)
+ type: dict
+ sample: {
+ "volume": {
+ "export_uri": null,
+ "id": "c675f420-cfeb-48ff-ba2a-9d2a4dbe3fcd",
+ "name": "volume-0-3",
+ "organization": "000a115d-2852-4b0a-9ce8-47f1134ba95a",
+ "server": null,
+ "size": 10000000000,
+ "volume_type": "l_ssd"
+ }
+}
+'''
+
+from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway
+from ansible.module_utils.basic import AnsibleModule
+
+
+def core(module):
+ state = module.params['state']
+ name = module.params['name']
+ organization = module.params['organization']
+ size = module.params['size']
+ volume_type = module.params['volume_type']
+
+ account_api = Scaleway(module)
+ response = account_api.get('volumes')
+ status_code = response.status_code
+ volumes_json = response.json
+
+ if not response.ok:
+ module.fail_json(msg='Error getting volume [{0}: {1}]'.format(
+ status_code, response.json['message']))
+
+ volumeByName = None
+ for volume in volumes_json['volumes']:
+ if volume['organization'] == organization and volume['name'] == name:
+ volumeByName = volume
+
+ if state in ('present',):
+ if volumeByName is not None:
+ module.exit_json(changed=False)
+
+ payload = {'name': name, 'organization': organization, 'size': size, 'volume_type': volume_type}
+
+ response = account_api.post('/volumes', payload)
+
+ if response.ok:
+ module.exit_json(changed=True, data=response.json)
+
+ module.fail_json(msg='Error creating volume [{0}: {1}]'.format(
+ response.status_code, response.json))
+
+ elif state in ('absent',):
+ if volumeByName is None:
+ module.exit_json(changed=False)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ response = account_api.delete('/volumes/' + volumeByName['id'])
+ if response.status_code == 204:
+ module.exit_json(changed=True, data=response.json)
+
+ module.fail_json(msg='Error deleting volume [{0}: {1}]'.format(
+ response.status_code, response.json))
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ state=dict(default='present', choices=['absent', 'present']),
+ name=dict(required=True),
+ size=dict(type='int'),
+ organization=dict(),
+ volume_type=dict(),
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_volume_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_volume_facts.py
new file mode 100644
index 00000000..e894f965
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_volume_facts.py
@@ -0,0 +1,108 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_volume_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favour of C(_info) module.
+ alternative: Use M(community.general.scaleway_volume_info) instead.
+short_description: Gather facts about the Scaleway volumes available.
+description:
+ - Gather facts about the Scaleway volumes available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.scaleway
+
+options:
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example par1).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway volumes facts
+ community.general.scaleway_volume_facts:
+ region: par1
+'''
+
+RETURN = r'''
+---
+scaleway_volume_facts:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_volume_facts": [
+ {
+ "creation_date": "2018-08-14T20:56:24.949660+00:00",
+ "export_uri": null,
+ "id": "b8d51a06-daeb-4fef-9539-a8aea016c1ba",
+ "modification_date": "2018-08-14T20:56:24.949660+00:00",
+ "name": "test-volume",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "server": null,
+ "size": 50000000000,
+ "state": "available",
+ "volume_type": "l_ssd"
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway, ScalewayException, scaleway_argument_spec,
+ SCALEWAY_LOCATION)
+
+
+class ScalewayVolumeFacts(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewayVolumeFacts, self).__init__(module)
+ self.name = 'volumes'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ ansible_facts={'scaleway_volume_facts': ScalewayVolumeFacts(module).get_resources()}
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_volume_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_volume_info.py
new file mode 100644
index 00000000..ff6093e8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_volume_info.py
@@ -0,0 +1,108 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_volume_info
+short_description: Gather information about the Scaleway volumes available.
+description:
+ - Gather information about the Scaleway volumes available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.scaleway
+
+options:
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example C(par1)).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway volumes information
+ community.general.scaleway_volume_info:
+ region: par1
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.scaleway_volume_info }}"
+'''
+
+RETURN = r'''
+---
+scaleway_volume_info:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_volume_info": [
+ {
+ "creation_date": "2018-08-14T20:56:24.949660+00:00",
+ "export_uri": null,
+ "id": "b8d51a06-daeb-4fef-9539-a8aea016c1ba",
+ "modification_date": "2018-08-14T20:56:24.949660+00:00",
+ "name": "test-volume",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "server": null,
+ "size": 50000000000,
+ "state": "available",
+ "volume_type": "l_ssd"
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway, ScalewayException, scaleway_argument_spec,
+ SCALEWAY_LOCATION)
+
+
+class ScalewayVolumeInfo(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewayVolumeInfo, self).__init__(module)
+ self.name = 'volumes'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ scaleway_volume_info=ScalewayVolumeInfo(module).get_resources()
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/smartos/imgadm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/smartos/imgadm.py
new file mode 100644
index 00000000..18a67d01
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/smartos/imgadm.py
@@ -0,0 +1,311 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, 2017 Jasper Lievisse Adriaanse <j@jasper.la>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: imgadm
+short_description: Manage SmartOS images
+description:
+ - Manage SmartOS virtual machine images through imgadm(1M)
+author: Jasper Lievisse Adriaanse (@jasperla)
+options:
+ force:
+ required: false
+ type: bool
+ description:
+ - Force a given operation (where supported by imgadm(1M)).
+ pool:
+ required: false
+ default: zones
+ description:
+ - zpool to import to or delete images from.
+ type: str
+ source:
+ required: false
+ description:
+ - URI for the image source.
+ type: str
+ state:
+ required: true
+ choices: [ present, absent, deleted, imported, updated, vacuumed ]
+ description:
+ - State the object operated on should be in. C(imported) is an alias for
+ for C(present) and C(deleted) for C(absent). When set to C(vacuumed)
+ and C(uuid) to C(*), it will remove all unused images.
+ type: str
+
+ type:
+ required: false
+ choices: [ imgapi, docker, dsapi ]
+ default: imgapi
+ description:
+ - Type for image sources.
+ type: str
+
+ uuid:
+ required: false
+ description:
+ - Image UUID. Can either be a full UUID or C(*) for all images.
+ type: str
+
+requirements:
+ - python >= 2.6
+'''
+
+EXAMPLES = '''
+- name: Import an image
+ community.general.imgadm:
+ uuid: '70e3ae72-96b6-11e6-9056-9737fd4d0764'
+ state: imported
+
+- name: Delete an image
+ community.general.imgadm:
+ uuid: '70e3ae72-96b6-11e6-9056-9737fd4d0764'
+ state: deleted
+
+- name: Update all images
+ community.general.imgadm:
+ uuid: '*'
+ state: updated
+
+- name: Update a single image
+ community.general.imgadm:
+ uuid: '70e3ae72-96b6-11e6-9056-9737fd4d0764'
+ state: updated
+
+- name: Add a source
+ community.general.imgadm:
+ source: 'https://datasets.project-fifo.net'
+ state: present
+
+- name: Add a Docker source
+ community.general.imgadm:
+ source: 'https://docker.io'
+ type: docker
+ state: present
+
+- name: Remove a source
+ community.general.imgadm:
+ source: 'https://docker.io'
+ state: absent
+'''
+
+RETURN = '''
+source:
+ description: Source that is managed.
+ returned: When not managing an image.
+ type: str
+ sample: https://datasets.project-fifo.net
+uuid:
+ description: UUID for an image operated on.
+ returned: When not managing an image source.
+ type: str
+ sample: 70e3ae72-96b6-11e6-9056-9737fd4d0764
+state:
+ description: State of the target, after execution.
+ returned: success
+ type: str
+ sample: 'present'
+'''
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+# Shortcut for the imgadm(1M) command. While imgadm(1M) supports a
+# -E option to return any errors in JSON, the generated JSON does not play well
+# with the JSON parsers of Python. The returned message contains '\n' as part of
+# the stacktrace, which breaks the parsers.
+
+
+class Imgadm(object):
+ def __init__(self, module):
+ self.module = module
+ self.params = module.params
+ self.cmd = module.get_bin_path('imgadm', required=True)
+ self.changed = False
+ self.uuid = module.params['uuid']
+
+ # Since there are a number of (natural) aliases, prevent having to look
+ # them up everytime we operate on `state`.
+ if self.params['state'] in ['present', 'imported', 'updated']:
+ self.present = True
+ else:
+ self.present = False
+
+ # Perform basic UUID validation upfront.
+ if self.uuid and self.uuid != '*':
+ if not re.match('^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$', self.uuid, re.IGNORECASE):
+ module.fail_json(msg='Provided value for uuid option is not a valid UUID.')
+
+ # Helper method to massage stderr
+ def errmsg(self, stderr):
+ match = re.match(r'^imgadm .*?: error \(\w+\): (.*): .*', stderr)
+ if match:
+ return match.groups()[0]
+ else:
+ return 'Unexpected failure'
+
+ def update_images(self):
+ if self.uuid == '*':
+ cmd = '{0} update'.format(self.cmd)
+ else:
+ cmd = '{0} update {1}'.format(self.cmd, self.uuid)
+
+ (rc, stdout, stderr) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.fail_json(msg='Failed to update images: {0}'.format(self.errmsg(stderr)))
+
+ # There is no feedback from imgadm(1M) to determine if anything
+ # was actually changed. So treat this as an 'always-changes' operation.
+ # Note that 'imgadm -v' produces unparseable JSON...
+ self.changed = True
+
+ def manage_sources(self):
+ force = self.params['force']
+ source = self.params['source']
+ imgtype = self.params['type']
+
+ cmd = '{0} sources'.format(self.cmd)
+
+ if force:
+ cmd += ' -f'
+
+ if self.present:
+ cmd = '{0} -a {1} -t {2}'.format(cmd, source, imgtype)
+ (rc, stdout, stderr) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.fail_json(msg='Failed to add source: {0}'.format(self.errmsg(stderr)))
+
+ # Check the various responses.
+ # Note that trying to add a source with the wrong type is handled
+ # above as it results in a non-zero status.
+
+ regex = 'Already have "{0}" image source "{1}", no change'.format(imgtype, source)
+ if re.match(regex, stdout):
+ self.changed = False
+
+ regex = 'Added "%s" image source "%s"' % (imgtype, source)
+ if re.match(regex, stdout):
+ self.changed = True
+ else:
+ # Type is ignored by imgadm(1M) here
+ cmd += ' -d %s' % source
+ (rc, stdout, stderr) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.fail_json(msg='Failed to remove source: {0}'.format(self.errmsg(stderr)))
+
+ regex = 'Do not have image source "%s", no change' % source
+ if re.match(regex, stdout):
+ self.changed = False
+
+ regex = 'Deleted ".*" image source "%s"' % source
+ if re.match(regex, stdout):
+ self.changed = True
+
+ def manage_images(self):
+ pool = self.params['pool']
+ state = self.params['state']
+
+ if state == 'vacuumed':
+ # Unconditionally pass '--force', otherwise we're prompted with 'y/N'
+ cmd = '{0} vacuum -f'.format(self.cmd)
+
+ (rc, stdout, stderr) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.fail_json(msg='Failed to vacuum images: {0}'.format(self.errmsg(stderr)))
+ else:
+ if stdout == '':
+ self.changed = False
+ else:
+ self.changed = True
+ if self.present:
+ cmd = '{0} import -P {1} -q {2}'.format(self.cmd, pool, self.uuid)
+
+ (rc, stdout, stderr) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.fail_json(msg='Failed to import image: {0}'.format(self.errmsg(stderr)))
+
+ regex = r'Image {0} \(.*\) is already installed, skipping'.format(self.uuid)
+ if re.match(regex, stdout):
+ self.changed = False
+
+ regex = '.*ActiveImageNotFound.*'
+ if re.match(regex, stderr):
+ self.changed = False
+
+ regex = 'Imported image {0}.*'.format(self.uuid)
+ if re.match(regex, stdout.splitlines()[-1]):
+ self.changed = True
+ else:
+ cmd = '{0} delete -P {1} {2}'.format(self.cmd, pool, self.uuid)
+
+ (rc, stdout, stderr) = self.module.run_command(cmd)
+
+ regex = '.*ImageNotInstalled.*'
+ if re.match(regex, stderr):
+ # Even if the 'rc' was non-zero (3), we handled the situation
+ # in order to determine if there was a change.
+ self.changed = False
+
+ regex = 'Deleted image {0}'.format(self.uuid)
+ if re.match(regex, stdout):
+ self.changed = True
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ force=dict(type='bool'),
+ pool=dict(default='zones'),
+ source=dict(),
+ state=dict(required=True, choices=['present', 'absent', 'deleted', 'imported', 'updated', 'vacuumed']),
+ type=dict(default='imgapi', choices=['imgapi', 'docker', 'dsapi']),
+ uuid=dict()
+ ),
+ # This module relies largely on imgadm(1M) to enforce idempotency, which does not
+ # provide a "noop" (or equivalent) mode to do a dry-run.
+ supports_check_mode=False,
+ )
+
+ imgadm = Imgadm(module)
+
+ uuid = module.params['uuid']
+ source = module.params['source']
+ state = module.params['state']
+
+ result = {'state': state}
+
+ # Either manage sources or images.
+ if source:
+ result['source'] = source
+ imgadm.manage_sources()
+ else:
+ result['uuid'] = uuid
+
+ if state == 'updated':
+ imgadm.update_images()
+ else:
+ # Make sure operate on a single image for the following actions
+ if (uuid == '*') and (state != 'vacuumed'):
+ module.fail_json(msg='Can only specify uuid as "*" when updating image(s)')
+ imgadm.manage_images()
+
+ result['changed'] = imgadm.changed
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/smartos/nictagadm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/smartos/nictagadm.py
new file mode 100644
index 00000000..7db7c5ea
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/smartos/nictagadm.py
@@ -0,0 +1,234 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Bruce Smith <Bruce.Smith.IT@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: nictagadm
+short_description: Manage nic tags on SmartOS systems
+description:
+ - Create or delete nic tags on SmartOS systems.
+author:
+- Bruce Smith (@SmithX10)
+options:
+ name:
+ description:
+ - Name of the nic tag.
+ required: true
+ type: str
+ mac:
+ description:
+ - Specifies the I(mac) address to attach the nic tag to when not creating an I(etherstub).
+ - Parameters I(mac) and I(etherstub) are mutually exclusive.
+ type: str
+ etherstub:
+ description:
+ - Specifies that the nic tag will be attached to a created I(etherstub).
+ - Parameter I(etherstub) is mutually exclusive with both I(mtu), and I(mac).
+ type: bool
+ default: no
+ mtu:
+ description:
+ - Specifies the size of the I(mtu) of the desired nic tag.
+ - Parameters I(mtu) and I(etherstub) are mutually exclusive.
+ type: int
+ force:
+ description:
+ - When I(state) is absent set this switch will use the C(-f) parameter and delete the nic tag regardless of existing VMs.
+ type: bool
+ default: no
+ state:
+ description:
+ - Create or delete a SmartOS nic tag.
+ type: str
+ choices: [ absent, present ]
+ default: present
+'''
+
+EXAMPLES = r'''
+- name: Create 'storage0' on '00:1b:21:a3:f5:4d'
+ community.general.nictagadm:
+ name: storage0
+ mac: 00:1b:21:a3:f5:4d
+ mtu: 9000
+ state: present
+
+- name: Remove 'storage0' nic tag
+ community.general.nictagadm:
+ name: storage0
+ state: absent
+'''
+
+RETURN = r'''
+name:
+ description: nic tag name
+ returned: always
+ type: str
+ sample: storage0
+mac:
+ description: MAC Address that the nic tag was attached to.
+ returned: always
+ type: str
+ sample: 00:1b:21:a3:f5:4d
+etherstub:
+ description: specifies if the nic tag will create and attach to an etherstub.
+ returned: always
+ type: bool
+ sample: False
+mtu:
+ description: specifies which MTU size was passed during the nictagadm add command. mtu and etherstub are mutually exclusive.
+ returned: always
+ type: int
+ sample: 1500
+force:
+ description: Shows if -f was used during the deletion of a nic tag
+ returned: always
+ type: bool
+ sample: False
+state:
+ description: state of the target
+ returned: always
+ type: str
+ sample: present
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.network import is_mac
+
+
+class NicTag(object):
+
+ def __init__(self, module):
+ self.module = module
+
+ self.name = module.params['name']
+ self.mac = module.params['mac']
+ self.etherstub = module.params['etherstub']
+ self.mtu = module.params['mtu']
+ self.force = module.params['force']
+ self.state = module.params['state']
+
+ self.nictagadm_bin = self.module.get_bin_path('nictagadm', True)
+
+ def is_valid_mac(self):
+ return is_mac(self.mac.lower())
+
+ def nictag_exists(self):
+ cmd = [self.nictagadm_bin]
+
+ cmd.append('exists')
+ cmd.append(self.name)
+
+ (rc, dummy, dummy) = self.module.run_command(cmd)
+
+ return rc == 0
+
+ def add_nictag(self):
+ cmd = [self.nictagadm_bin]
+
+ cmd.append('-v')
+ cmd.append('add')
+
+ if self.etherstub:
+ cmd.append('-l')
+
+ if self.mtu:
+ cmd.append('-p')
+ cmd.append('mtu=' + str(self.mtu))
+
+ if self.mac:
+ cmd.append('-p')
+ cmd.append('mac=' + str(self.mac))
+
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+ def delete_nictag(self):
+ cmd = [self.nictagadm_bin]
+
+ cmd.append('-v')
+ cmd.append('delete')
+
+ if self.force:
+ cmd.append('-f')
+
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ mac=dict(type='str'),
+ etherstub=dict(type='bool', default=False),
+ mtu=dict(type='int'),
+ force=dict(type='bool', default=False),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ),
+ mutually_exclusive=[
+ ['etherstub', 'mac'],
+ ['etherstub', 'mtu'],
+ ],
+ required_if=[
+ ['etherstub', False, ['name', 'mac']],
+ ['state', 'absent', ['name', 'force']],
+ ],
+ supports_check_mode=True
+ )
+
+ nictag = NicTag(module)
+
+ rc = None
+ out = ''
+ err = ''
+ result = dict(
+ changed=False,
+ etherstub=nictag.etherstub,
+ force=nictag.force,
+ name=nictag.name,
+ mac=nictag.mac,
+ mtu=nictag.mtu,
+ state=nictag.state,
+ )
+
+ if not nictag.is_valid_mac():
+ module.fail_json(msg='Invalid MAC Address Value',
+ name=nictag.name,
+ mac=nictag.mac,
+ etherstub=nictag.etherstub)
+
+ if nictag.state == 'absent':
+ if nictag.nictag_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = nictag.delete_nictag()
+ if rc != 0:
+ module.fail_json(name=nictag.name, msg=err, rc=rc)
+ elif nictag.state == 'present':
+ if not nictag.nictag_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = nictag.add_nictag()
+ if rc is not None and rc != 0:
+ module.fail_json(name=nictag.name, msg=err, rc=rc)
+
+ if rc is not None:
+ result['changed'] = True
+ if out:
+ result['stdout'] = out
+ if err:
+ result['stderr'] = err
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/smartos/smartos_image_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/smartos/smartos_image_facts.py
new file mode 100644
index 00000000..17761af8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/smartos/smartos_image_facts.py
@@ -0,0 +1,124 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Adam Števko <adam.stevko@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: smartos_image_info
+short_description: Get SmartOS image details.
+description:
+ - Retrieve information about all installed images on SmartOS.
+ - This module was called C(smartos_image_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.smartos_image_info) module no longer returns C(ansible_facts)!
+author: Adam Števko (@xen0l)
+options:
+ filters:
+ description:
+ - Criteria for selecting image. Can be any value from image
+ manifest and 'published_date', 'published', 'source', 'clones',
+ and 'size'. More information can be found at U(https://smartos.org/man/1m/imgadm)
+ under 'imgadm list'.
+'''
+
+EXAMPLES = '''
+- name: Return information about all installed images
+ community.general.smartos_image_info:
+ register: result
+
+- name: Return all private active Linux images
+ community.general.smartos_image_info:
+ filters: "os=linux state=active public=false"
+ register: result
+
+- name: Show, how many clones does every image have
+ community.general.smartos_image_info:
+ register: result
+
+- name: Print information
+ ansible.builtin.debug:
+ msg: "{{ result.smartos_images[item]['name'] }}-{{ result.smartos_images[item]['version'] }}
+ has {{ result.smartos_images[item]['clones'] }} VM(s)"
+ with_items: "{{ result.smartos_images.keys() | list }}"
+
+# When the module is called as smartos_image_facts, return values are published
+# in ansible_facts['smartos_images'] and can be used as follows.
+# Note that this is deprecated and will stop working in community.general 3.0.0.
+- name: Print information
+ ansible.builtin.debug:
+ msg: "{{ smartos_images[item]['name'] }}-{{ smartos_images[item]['version'] }}
+ has {{ smartos_images[item]['clones'] }} VM(s)"
+ with_items: "{{ smartos_images.keys() | list }}"
+'''
+
+RETURN = '''
+'''
+
+import json
+from ansible.module_utils.basic import AnsibleModule
+
+
+class ImageFacts(object):
+
+ def __init__(self, module):
+ self.module = module
+
+ self.filters = module.params['filters']
+
+ def return_all_installed_images(self):
+ cmd = [self.module.get_bin_path('imgadm')]
+
+ cmd.append('list')
+ cmd.append('-j')
+
+ if self.filters:
+ cmd.append(self.filters)
+
+ (rc, out, err) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.exit_json(
+ msg='Failed to get all installed images', stderr=err)
+
+ images = json.loads(out)
+
+ result = {}
+ for image in images:
+ result[image['manifest']['uuid']] = image['manifest']
+ # Merge additional attributes with the image manifest.
+ for attrib in ['clones', 'source', 'zpool']:
+ result[image['manifest']['uuid']][attrib] = image[attrib]
+
+ return result
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ filters=dict(default=None),
+ ),
+ supports_check_mode=False,
+ )
+ is_old_facts = module._name in ('smartos_image_facts', 'community.general.smartos_image_facts')
+ if is_old_facts:
+ module.deprecate("The 'smartos_image_facts' module has been renamed to 'smartos_image_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ image_facts = ImageFacts(module)
+
+ data = dict(smartos_images=image_facts.return_all_installed_images())
+
+ if is_old_facts:
+ module.exit_json(ansible_facts=data)
+ else:
+ module.exit_json(**data)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/smartos/smartos_image_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/smartos/smartos_image_info.py
new file mode 100644
index 00000000..17761af8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/smartos/smartos_image_info.py
@@ -0,0 +1,124 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Adam Števko <adam.stevko@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: smartos_image_info
+short_description: Get SmartOS image details.
+description:
+ - Retrieve information about all installed images on SmartOS.
+ - This module was called C(smartos_image_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.smartos_image_info) module no longer returns C(ansible_facts)!
+author: Adam Števko (@xen0l)
+options:
+ filters:
+ description:
+ - Criteria for selecting image. Can be any value from image
+ manifest and 'published_date', 'published', 'source', 'clones',
+ and 'size'. More information can be found at U(https://smartos.org/man/1m/imgadm)
+ under 'imgadm list'.
+'''
+
+EXAMPLES = '''
+- name: Return information about all installed images
+ community.general.smartos_image_info:
+ register: result
+
+- name: Return all private active Linux images
+ community.general.smartos_image_info:
+ filters: "os=linux state=active public=false"
+ register: result
+
+- name: Show, how many clones does every image have
+ community.general.smartos_image_info:
+ register: result
+
+- name: Print information
+ ansible.builtin.debug:
+ msg: "{{ result.smartos_images[item]['name'] }}-{{ result.smartos_images[item]['version'] }}
+ has {{ result.smartos_images[item]['clones'] }} VM(s)"
+ with_items: "{{ result.smartos_images.keys() | list }}"
+
+# When the module is called as smartos_image_facts, return values are published
+# in ansible_facts['smartos_images'] and can be used as follows.
+# Note that this is deprecated and will stop working in community.general 3.0.0.
+- name: Print information
+ ansible.builtin.debug:
+ msg: "{{ smartos_images[item]['name'] }}-{{ smartos_images[item]['version'] }}
+ has {{ smartos_images[item]['clones'] }} VM(s)"
+ with_items: "{{ smartos_images.keys() | list }}"
+'''
+
+RETURN = '''
+'''
+
+import json
+from ansible.module_utils.basic import AnsibleModule
+
+
+class ImageFacts(object):
+
+ def __init__(self, module):
+ self.module = module
+
+ self.filters = module.params['filters']
+
+ def return_all_installed_images(self):
+ cmd = [self.module.get_bin_path('imgadm')]
+
+ cmd.append('list')
+ cmd.append('-j')
+
+ if self.filters:
+ cmd.append(self.filters)
+
+ (rc, out, err) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.exit_json(
+ msg='Failed to get all installed images', stderr=err)
+
+ images = json.loads(out)
+
+ result = {}
+ for image in images:
+ result[image['manifest']['uuid']] = image['manifest']
+ # Merge additional attributes with the image manifest.
+ for attrib in ['clones', 'source', 'zpool']:
+ result[image['manifest']['uuid']][attrib] = image[attrib]
+
+ return result
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ filters=dict(default=None),
+ ),
+ supports_check_mode=False,
+ )
+ is_old_facts = module._name in ('smartos_image_facts', 'community.general.smartos_image_facts')
+ if is_old_facts:
+ module.deprecate("The 'smartos_image_facts' module has been renamed to 'smartos_image_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ image_facts = ImageFacts(module)
+
+ data = dict(smartos_images=image_facts.return_all_installed_images())
+
+ if is_old_facts:
+ module.exit_json(ansible_facts=data)
+ else:
+ module.exit_json(**data)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/smartos/vmadm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/smartos/vmadm.py
new file mode 100644
index 00000000..553e6efc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/smartos/vmadm.py
@@ -0,0 +1,796 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Jasper Lievisse Adriaanse <j@jasper.la>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: vmadm
+short_description: Manage SmartOS virtual machines and zones.
+description:
+ - Manage SmartOS virtual machines through vmadm(1M).
+author: Jasper Lievisse Adriaanse (@jasperla)
+options:
+ archive_on_delete:
+ required: false
+ description:
+ - When enabled, the zone dataset will be mounted on C(/zones/archive)
+ upon removal.
+ type: bool
+ autoboot:
+ required: false
+ description:
+ - Whether or not a VM is booted when the system is rebooted.
+ type: bool
+ brand:
+ choices: [ joyent, joyent-minimal, lx, kvm, bhyve ]
+ default: joyent
+ description:
+ - Type of virtual machine. The C(bhyve) option was added in community.general 0.2.0.
+ type: str
+ boot:
+ required: false
+ description:
+ - Set the boot order for KVM VMs.
+ type: str
+ cpu_cap:
+ required: false
+ description:
+ - Sets a limit on the amount of CPU time that can be used by a VM.
+ Use C(0) for no cap.
+ type: int
+ cpu_shares:
+ required: false
+ description:
+ - Sets a limit on the number of fair share scheduler (FSS) CPU shares for
+ a VM. This limit is relative to all other VMs on the system.
+ type: int
+ cpu_type:
+ required: false
+ choices: [ qemu64, host ]
+ default: qemu64
+ description:
+ - Control the type of virtual CPU exposed to KVM VMs.
+ type: str
+ customer_metadata:
+ required: false
+ description:
+ - Metadata to be set and associated with this VM, this contain customer
+ modifiable keys.
+ type: dict
+ delegate_dataset:
+ required: false
+ description:
+ - Whether to delegate a ZFS dataset to an OS VM.
+ type: bool
+ disk_driver:
+ required: false
+ description:
+ - Default value for a virtual disk model for KVM guests.
+ type: str
+ disks:
+ required: false
+ description:
+ - A list of disks to add, valid properties are documented in vmadm(1M).
+ type: list
+ dns_domain:
+ required: false
+ description:
+ - Domain value for C(/etc/hosts).
+ type: str
+ docker:
+ required: false
+ description:
+ - Docker images need this flag enabled along with the I(brand) set to C(lx).
+ type: bool
+ filesystems:
+ required: false
+ description:
+ - Mount additional filesystems into an OS VM.
+ type: list
+ firewall_enabled:
+ required: false
+ description:
+ - Enables the firewall, allowing fwadm(1M) rules to be applied.
+ type: bool
+ force:
+ required: false
+ description:
+ - Force a particular action (i.e. stop or delete a VM).
+ type: bool
+ fs_allowed:
+ required: false
+ description:
+ - Comma separated list of filesystem types this zone is allowed to mount.
+ type: str
+ hostname:
+ required: false
+ description:
+ - Zone/VM hostname.
+ type: str
+ image_uuid:
+ required: false
+ description:
+ - Image UUID.
+ type: str
+ indestructible_delegated:
+ required: false
+ description:
+ - Adds an C(@indestructible) snapshot to delegated datasets.
+ type: bool
+ indestructible_zoneroot:
+ required: false
+ description:
+ - Adds an C(@indestructible) snapshot to zoneroot.
+ type: bool
+ internal_metadata:
+ required: false
+ description:
+ - Metadata to be set and associated with this VM, this contains operator
+ generated keys.
+ type: dict
+ internal_metadata_namespace:
+ required: false
+ description:
+ - List of namespaces to be set as I(internal_metadata-only); these namespaces
+ will come from I(internal_metadata) rather than I(customer_metadata).
+ type: str
+ kernel_version:
+ required: false
+ description:
+ - Kernel version to emulate for LX VMs.
+ type: str
+ limit_priv:
+ required: false
+ description:
+ - Set (comma separated) list of privileges the zone is allowed to use.
+ type: str
+ maintain_resolvers:
+ required: false
+ description:
+ - Resolvers in C(/etc/resolv.conf) will be updated when updating
+ the I(resolvers) property.
+ type: bool
+ max_locked_memory:
+ required: false
+ description:
+ - Total amount of memory (in MiBs) on the host that can be locked by this VM.
+ type: int
+ max_lwps:
+ required: false
+ description:
+ - Maximum number of lightweight processes this VM is allowed to have running.
+ type: int
+ max_physical_memory:
+ required: false
+ description:
+ - Maximum amount of memory (in MiBs) on the host that the VM is allowed to use.
+ type: int
+ max_swap:
+ required: false
+ description:
+ - Maximum amount of virtual memory (in MiBs) the VM is allowed to use.
+ type: int
+ mdata_exec_timeout:
+ required: false
+ description:
+ - Timeout in seconds (or 0 to disable) for the C(svc:/smartdc/mdata:execute) service
+ that runs user-scripts in the zone.
+ type: int
+ name:
+ required: false
+ aliases: [ alias ]
+ description:
+ - Name of the VM. vmadm(1M) uses this as an optional name.
+ type: str
+ nic_driver:
+ required: false
+ description:
+ - Default value for a virtual NIC model for KVM guests.
+ type: str
+ nics:
+ required: false
+ description:
+ - A list of nics to add, valid properties are documented in vmadm(1M).
+ type: list
+ nowait:
+ required: false
+ description:
+ - Consider the provisioning complete when the VM first starts, rather than
+ when the VM has rebooted.
+ type: bool
+ qemu_opts:
+ required: false
+ description:
+ - Additional qemu arguments for KVM guests. This overwrites the default arguments
+ provided by vmadm(1M) and should only be used for debugging.
+ type: str
+ qemu_extra_opts:
+ required: false
+ description:
+ - Additional qemu cmdline arguments for KVM guests.
+ type: str
+ quota:
+ required: false
+ description:
+ - Quota on zone filesystems (in MiBs).
+ type: int
+ ram:
+ required: false
+ description:
+ - Amount of virtual RAM for a KVM guest (in MiBs).
+ type: int
+ resolvers:
+ required: false
+ description:
+ - List of resolvers to be put into C(/etc/resolv.conf).
+ type: list
+ routes:
+ required: false
+ description:
+ - Dictionary that maps destinations to gateways, these will be set as static
+ routes in the VM.
+ type: dict
+ spice_opts:
+ required: false
+ description:
+ - Addition options for SPICE-enabled KVM VMs.
+ type: str
+ spice_password:
+ required: false
+ description:
+ - Password required to connect to SPICE. By default no password is set.
+ Please note this can be read from the Global Zone.
+ type: str
+ state:
+ choices: [ present, running, absent, deleted, stopped, created, restarted, rebooted ]
+ default: running
+ description:
+ - States for the VM to be in. Please note that C(present), C(stopped) and C(restarted)
+ operate on a VM that is currently provisioned. C(present) means that the VM will be
+ created if it was absent, and that it will be in a running state. C(absent) will
+ shutdown the zone before removing it.
+ C(stopped) means the zone will be created if it doesn't exist already, before shutting
+ it down.
+ type: str
+ tmpfs:
+ required: false
+ description:
+ - Amount of memory (in MiBs) that will be available in the VM for the C(/tmp) filesystem.
+ type: int
+ uuid:
+ required: false
+ description:
+ - UUID of the VM. Can either be a full UUID or C(*) for all VMs.
+ type: str
+ vcpus:
+ required: false
+ description:
+ - Number of virtual CPUs for a KVM guest.
+ type: int
+ vga:
+ required: false
+ description:
+ - Specify VGA emulation used by KVM VMs.
+ type: str
+ virtio_txburst:
+ required: false
+ description:
+ - Number of packets that can be sent in a single flush of the tx queue of virtio NICs.
+ type: int
+ virtio_txtimer:
+ required: false
+ description:
+ - Timeout (in nanoseconds) for the TX timer of virtio NICs.
+ type: int
+ vnc_password:
+ required: false
+ description:
+ - Password required to connect to VNC. By default no password is set.
+ Please note this can be read from the Global Zone.
+ type: str
+ vnc_port:
+ required: false
+ description:
+ - TCP port to listen of the VNC server. Or set C(0) for random,
+ or C(-1) to disable.
+ type: int
+ zfs_data_compression:
+ required: false
+ description:
+ - Specifies compression algorithm used for this VMs data dataset. This option
+ only has effect on delegated datasets.
+ type: str
+ zfs_data_recsize:
+ required: false
+ description:
+ - Suggested block size (power of 2) for files in the delegated dataset's filesystem.
+ type: int
+ zfs_filesystem_limit:
+ required: false
+ description:
+ - Maximum number of filesystems the VM can have.
+ type: int
+ zfs_io_priority:
+ required: false
+ description:
+ - IO throttle priority value relative to other VMs.
+ type: int
+ zfs_root_compression:
+ required: false
+ description:
+ - Specifies compression algorithm used for this VMs root dataset. This option
+ only has effect on the zoneroot dataset.
+ type: str
+ zfs_root_recsize:
+ required: false
+ description:
+ - Suggested block size (power of 2) for files in the zoneroot dataset's filesystem.
+ type: int
+ zfs_snapshot_limit:
+ required: false
+ description:
+ - Number of snapshots the VM can have.
+ type: int
+ zpool:
+ required: false
+ description:
+ - ZFS pool the VM's zone dataset will be created in.
+ type: str
+requirements:
+ - python >= 2.6
+'''
+
+EXAMPLES = '''
+- name: Create SmartOS zone
+ community.general.vmadm:
+ brand: joyent
+ state: present
+ alias: fw_zone
+ image_uuid: 95f265b8-96b2-11e6-9597-972f3af4b6d5
+ firewall_enabled: yes
+ indestructible_zoneroot: yes
+ nics:
+ - nic_tag: admin
+ ip: dhcp
+ primary: true
+ internal_metadata:
+ root_pw: 'secret'
+ quota: 1
+
+- name: Delete a zone
+ community.general.vmadm:
+ alias: test_zone
+ state: deleted
+
+- name: Stop all zones
+ community.general.vmadm:
+ uuid: '*'
+ state: stopped
+'''
+
+RETURN = '''
+uuid:
+ description: UUID of the managed VM.
+ returned: always
+ type: str
+ sample: 'b217ab0b-cf57-efd8-cd85-958d0b80be33'
+alias:
+ description: Alias of the managed VM.
+ returned: When addressing a VM by alias.
+ type: str
+ sample: 'dns-zone'
+state:
+ description: State of the target, after execution.
+ returned: success
+ type: str
+ sample: 'running'
+'''
+
+import json
+import os
+import re
+import tempfile
+import traceback
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+# While vmadm(1M) supports a -E option to return any errors in JSON, the
+# generated JSON does not play well with the JSON parsers of Python.
+# The returned message contains '\n' as part of the stacktrace,
+# which breaks the parsers.
+
+
+def get_vm_prop(module, uuid, prop):
+ # Lookup a property for the given VM.
+ # Returns the property, or None if not found.
+ cmd = '{0} lookup -j -o {1} uuid={2}'.format(module.vmadm, prop, uuid)
+
+ (rc, stdout, stderr) = module.run_command(cmd)
+
+ if rc != 0:
+ module.fail_json(
+ msg='Could not perform lookup of {0} on {1}'.format(prop, uuid), exception=stderr)
+
+ try:
+ stdout_json = json.loads(stdout)
+ except Exception as e:
+ module.fail_json(
+ msg='Invalid JSON returned by vmadm for uuid lookup of {0}'.format(prop),
+ details=to_native(e), exception=traceback.format_exc())
+
+ if len(stdout_json) > 0 and prop in stdout_json[0]:
+ return stdout_json[0][prop]
+ else:
+ return None
+
+
+def get_vm_uuid(module, alias):
+ # Lookup the uuid that goes with the given alias.
+ # Returns the uuid or '' if not found.
+ cmd = '{0} lookup -j -o uuid alias={1}'.format(module.vmadm, alias)
+
+ (rc, stdout, stderr) = module.run_command(cmd)
+
+ if rc != 0:
+ module.fail_json(
+ msg='Could not retrieve UUID of {0}'.format(alias), exception=stderr)
+
+ # If no VM was found matching the given alias, we get back an empty array.
+ # That is not an error condition as we might be explicitly checking it's
+ # absence.
+ if stdout.strip() == '[]':
+ return None
+ else:
+ try:
+ stdout_json = json.loads(stdout)
+ except Exception as e:
+ module.fail_json(
+ msg='Invalid JSON returned by vmadm for uuid lookup of {0}'.format(alias),
+ details=to_native(e), exception=traceback.format_exc())
+
+ if len(stdout_json) > 0 and 'uuid' in stdout_json[0]:
+ return stdout_json[0]['uuid']
+
+
+def get_all_vm_uuids(module):
+ # Retrieve the UUIDs for all VMs.
+ cmd = '{0} lookup -j -o uuid'.format(module.vmadm)
+
+ (rc, stdout, stderr) = module.run_command(cmd)
+
+ if rc != 0:
+ module.fail_json(msg='Failed to get VMs list', exception=stderr)
+
+ try:
+ stdout_json = json.loads(stdout)
+ return [v['uuid'] for v in stdout_json]
+ except Exception as e:
+ module.fail_json(msg='Could not retrieve VM UUIDs', details=to_native(e),
+ exception=traceback.format_exc())
+
+
+def new_vm(module, uuid, vm_state):
+ payload_file = create_payload(module, uuid)
+
+ (rc, stdout, stderr) = vmadm_create_vm(module, payload_file)
+
+ if rc != 0:
+ changed = False
+ module.fail_json(msg='Could not create VM', exception=stderr)
+ else:
+ changed = True
+ # 'vmadm create' returns all output to stderr...
+ match = re.match('Successfully created VM (.*)', stderr)
+ if match:
+ vm_uuid = match.groups()[0]
+ if not is_valid_uuid(vm_uuid):
+ module.fail_json(msg='Invalid UUID for VM {0}?'.format(vm_uuid))
+ else:
+ module.fail_json(msg='Could not retrieve UUID of newly created(?) VM')
+
+ # Now that the VM is created, ensure it is in the desired state (if not 'running')
+ if vm_state != 'running':
+ ret = set_vm_state(module, vm_uuid, vm_state)
+ if not ret:
+ module.fail_json(msg='Could not set VM {0} to state {1}'.format(vm_uuid, vm_state))
+
+ try:
+ os.unlink(payload_file)
+ except Exception as e:
+ # Since the payload may contain sensitive information, fail hard
+ # if we cannot remove the file so the operator knows about it.
+ module.fail_json(msg='Could not remove temporary JSON payload file {0}: {1}'.format(payload_file, to_native(e)),
+ exception=traceback.format_exc())
+
+ return changed, vm_uuid
+
+
+def vmadm_create_vm(module, payload_file):
+ # Create a new VM using the provided payload.
+ cmd = '{0} create -f {1}'.format(module.vmadm, payload_file)
+
+ return module.run_command(cmd)
+
+
+def set_vm_state(module, vm_uuid, vm_state):
+ p = module.params
+
+ # Check if the VM is already in the desired state.
+ state = get_vm_prop(module, vm_uuid, 'state')
+ if state and (state == vm_state):
+ return None
+
+ # Lookup table for the state to be in, and which command to use for that.
+ # vm_state: [vmadm commandm, forceable?]
+ cmds = {
+ 'stopped': ['stop', True],
+ 'running': ['start', False],
+ 'deleted': ['delete', True],
+ 'rebooted': ['reboot', False]
+ }
+
+ if p['force'] and cmds[vm_state][1]:
+ force = '-F'
+ else:
+ force = ''
+
+ cmd = 'vmadm {0} {1} {2}'.format(cmds[vm_state][0], force, vm_uuid)
+
+ (rc, stdout, stderr) = module.run_command(cmd)
+
+ match = re.match('^Successfully.*', stderr)
+ if match:
+ return True
+ else:
+ return False
+
+
+def create_payload(module, uuid):
+ # Create the JSON payload (vmdef) and return the filename.
+
+ # Filter out the few options that are not valid VM properties.
+ module_options = ['debug', 'force', 'state']
+ # @TODO make this a simple {} comprehension as soon as py2 is ditched
+ # @TODO {k: v for k, v in p.items() if k not in module_options}
+ vmdef = dict([(k, v) for k, v in module.params.items() if k not in module_options and v])
+
+ try:
+ vmdef_json = json.dumps(vmdef)
+ except Exception as e:
+ module.fail_json(
+ msg='Could not create valid JSON payload', exception=traceback.format_exc())
+
+ # Create the temporary file that contains our payload, and set tight
+ # permissions for it may container sensitive information.
+ try:
+ # XXX: When there's a way to get the current ansible temporary directory
+ # drop the mkstemp call and rely on ANSIBLE_KEEP_REMOTE_FILES to retain
+ # the payload (thus removing the `save_payload` option).
+ fname = tempfile.mkstemp()[1]
+ os.chmod(fname, 0o400)
+ with open(fname, 'w') as fh:
+ fh.write(vmdef_json)
+ except Exception as e:
+ module.fail_json(msg='Could not save JSON payload: %s' % to_native(e), exception=traceback.format_exc())
+
+ return fname
+
+
+def vm_state_transition(module, uuid, vm_state):
+ ret = set_vm_state(module, uuid, vm_state)
+
+ # Whether the VM changed state.
+ if ret is None:
+ return False
+ elif ret:
+ return True
+ else:
+ module.fail_json(msg='Failed to set VM {0} to state {1}'.format(uuid, vm_state))
+
+
+def is_valid_uuid(uuid):
+ if re.match('^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$', uuid, re.IGNORECASE):
+ return True
+ else:
+ return False
+
+
+def validate_uuids(module):
+ # Perform basic UUID validation.
+ failed = []
+
+ for u in [['uuid', module.params['uuid']],
+ ['image_uuid', module.params['image_uuid']]]:
+ if u[1] and u[1] != '*':
+ if not is_valid_uuid(u[1]):
+ failed.append(u[0])
+
+ if len(failed) > 0:
+ module.fail_json(msg='No valid UUID(s) found for: {0}'.format(", ".join(failed)))
+
+
+def manage_all_vms(module, vm_state):
+ # Handle operations for all VMs, which can by definition only
+ # be state transitions.
+ state = module.params['state']
+
+ if state == 'created':
+ module.fail_json(msg='State "created" is only valid for tasks with a single VM')
+
+ # If any of the VMs has a change, the task as a whole has a change.
+ any_changed = False
+
+ # First get all VM uuids and for each check their state, and adjust it if needed.
+ for uuid in get_all_vm_uuids(module):
+ current_vm_state = get_vm_prop(module, uuid, 'state')
+ if not current_vm_state and vm_state == 'deleted':
+ any_changed = False
+ else:
+ if module.check_mode:
+ if (not current_vm_state) or (get_vm_prop(module, uuid, 'state') != state):
+ any_changed = True
+ else:
+ any_changed = (vm_state_transition(module, uuid, vm_state) | any_changed)
+
+ return any_changed
+
+
+def main():
+ # In order to reduce the clutter and boilerplate for trivial options,
+ # abstract the vmadm properties and build the dict of arguments later.
+ # Dict of all options that are simple to define based on their type.
+ # They're not required and have a default of None.
+ properties = {
+ 'str': [
+ 'boot', 'disk_driver', 'dns_domain', 'fs_allowed', 'hostname',
+ 'image_uuid', 'internal_metadata_namespace', 'kernel_version',
+ 'limit_priv', 'nic_driver', 'qemu_opts', 'qemu_extra_opts',
+ 'spice_opts', 'uuid', 'vga', 'zfs_data_compression',
+ 'zfs_root_compression', 'zpool'
+ ],
+ 'bool': [
+ 'archive_on_delete', 'autoboot', 'debug', 'delegate_dataset',
+ 'docker', 'firewall_enabled', 'force', 'indestructible_delegated',
+ 'indestructible_zoneroot', 'maintain_resolvers', 'nowait'
+ ],
+ 'int': [
+ 'cpu_cap', 'cpu_shares', 'max_locked_memory', 'max_lwps',
+ 'max_physical_memory', 'max_swap', 'mdata_exec_timeout',
+ 'quota', 'ram', 'tmpfs', 'vcpus', 'virtio_txburst',
+ 'virtio_txtimer', 'vnc_port', 'zfs_data_recsize',
+ 'zfs_filesystem_limit', 'zfs_io_priority', 'zfs_root_recsize',
+ 'zfs_snapshot_limit'
+ ],
+ 'dict': ['customer_metadata', 'internal_metadata', 'routes'],
+ 'list': ['disks', 'nics', 'resolvers', 'filesystems']
+ }
+
+ # Start with the options that are not as trivial as those above.
+ options = dict(
+ state=dict(
+ default='running',
+ type='str',
+ choices=['present', 'running', 'absent', 'deleted', 'stopped', 'created', 'restarted', 'rebooted']
+ ),
+ name=dict(
+ default=None, type='str',
+ aliases=['alias']
+ ),
+ brand=dict(
+ default='joyent',
+ type='str',
+ choices=['joyent', 'joyent-minimal', 'lx', 'kvm', 'bhyve']
+ ),
+ cpu_type=dict(
+ default='qemu64',
+ type='str',
+ choices=['host', 'qemu64']
+ ),
+ # Regular strings, however these require additional options.
+ spice_password=dict(type='str', no_log=True),
+ vnc_password=dict(type='str', no_log=True),
+ )
+
+ # Add our 'simple' options to options dict.
+ for type in properties:
+ for p in properties[type]:
+ option = dict(default=None, type=type)
+ options[p] = option
+
+ module = AnsibleModule(
+ argument_spec=options,
+ supports_check_mode=True,
+ required_one_of=[['name', 'uuid']]
+ )
+
+ module.vmadm = module.get_bin_path('vmadm', required=True)
+
+ p = module.params
+ uuid = p['uuid']
+ state = p['state']
+
+ # Translate the state parameter into something we can use later on.
+ if state in ['present', 'running']:
+ vm_state = 'running'
+ elif state in ['stopped', 'created']:
+ vm_state = 'stopped'
+ elif state in ['absent', 'deleted']:
+ vm_state = 'deleted'
+ elif state in ['restarted', 'rebooted']:
+ vm_state = 'rebooted'
+
+ result = {'state': state}
+
+ # While it's possible to refer to a given VM by it's `alias`, it's easier
+ # to operate on VMs by their UUID. So if we're not given a `uuid`, look
+ # it up.
+ if not uuid:
+ uuid = get_vm_uuid(module, p['name'])
+ # Bit of a chicken and egg problem here for VMs with state == deleted.
+ # If they're going to be removed in this play, we have to lookup the
+ # uuid. If they're already deleted there's nothing to lookup.
+ # So if state == deleted and get_vm_uuid() returned '', the VM is already
+ # deleted and there's nothing else to do.
+ if uuid is None and vm_state == 'deleted':
+ result['name'] = p['name']
+ module.exit_json(**result)
+
+ validate_uuids(module)
+
+ if p['name']:
+ result['name'] = p['name']
+ result['uuid'] = uuid
+
+ if uuid == '*':
+ result['changed'] = manage_all_vms(module, vm_state)
+ module.exit_json(**result)
+
+ # The general flow is as follows:
+ # - first the current state of the VM is obtained by it's UUID.
+ # - If the state was not found and the desired state is 'deleted', return.
+ # - If the state was not found, it means the VM has to be created.
+ # Subsequently the VM will be set to the desired state (i.e. stopped)
+ # - Otherwise, it means the VM exists already and we operate on it's
+ # state (i.e. reboot it.)
+ #
+ # In the future it should be possible to query the VM for a particular
+ # property as a valid state (i.e. queried) so the result can be
+ # registered.
+ # Also, VMs should be able to get their properties updated.
+ # Managing VM snapshots should be part of a standalone module.
+
+ # First obtain the VM state to determine what needs to be done with it.
+ current_vm_state = get_vm_prop(module, uuid, 'state')
+
+ # First handle the case where the VM should be deleted and is not present.
+ if not current_vm_state and vm_state == 'deleted':
+ result['changed'] = False
+ elif module.check_mode:
+ # Shortcut for check mode, if there is no VM yet, it will need to be created.
+ # Or, if the VM is not in the desired state yet, it needs to transition.
+ if (not current_vm_state) or (get_vm_prop(module, uuid, 'state') != state):
+ result['changed'] = True
+ else:
+ result['changed'] = False
+
+ module.exit_json(**result)
+ # No VM was found that matched the given ID (alias or uuid), so we create it.
+ elif not current_vm_state:
+ result['changed'], result['uuid'] = new_vm(module, uuid, vm_state)
+ else:
+ # VM was found, operate on its state directly.
+ result['changed'] = vm_state_transition(module, uuid, vm_state)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/softlayer/sl_vm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/softlayer/sl_vm.py
new file mode 100644
index 00000000..22556d91
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/softlayer/sl_vm.py
@@ -0,0 +1,428 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: sl_vm
+short_description: create or cancel a virtual instance in SoftLayer
+description:
+ - Creates or cancels SoftLayer instances.
+ - When created, optionally waits for it to be 'running'.
+options:
+ instance_id:
+ description:
+ - Instance Id of the virtual instance to perform action option.
+ type: str
+ hostname:
+ description:
+ - Hostname to be provided to a virtual instance.
+ type: str
+ domain:
+ description:
+ - Domain name to be provided to a virtual instance.
+ type: str
+ datacenter:
+ description:
+ - Datacenter for the virtual instance to be deployed.
+ type: str
+ choices:
+ - ams01
+ - ams03
+ - che01
+ - dal01
+ - dal05
+ - dal06
+ - dal09
+ - dal10
+ - dal12
+ - dal13
+ - fra02
+ - fra04
+ - fra05
+ - hkg02
+ - hou02
+ - lon02
+ - lon04
+ - lon06
+ - mel01
+ - mex01
+ - mil01
+ - mon01
+ - osl01
+ - par01
+ - sao01
+ - sea01
+ - seo01
+ - sjc01
+ - sjc03
+ - sjc04
+ - sng01
+ - syd01
+ - syd04
+ - tok02
+ - tor01
+ - wdc01
+ - wdc04
+ - wdc06
+ - wdc07
+ tags:
+ description:
+ - Tag or list of tags to be provided to a virtual instance.
+ type: str
+ hourly:
+ description:
+ - Flag to determine if the instance should be hourly billed.
+ type: bool
+ default: 'yes'
+ private:
+ description:
+ - Flag to determine if the instance should be private only.
+ type: bool
+ default: 'no'
+ dedicated:
+ description:
+ - Flag to determine if the instance should be deployed in dedicated space.
+ type: bool
+ default: 'no'
+ local_disk:
+ description:
+ - Flag to determine if local disk should be used for the new instance.
+ type: bool
+ default: 'yes'
+ cpus:
+ description:
+ - Count of cpus to be assigned to new virtual instance.
+ type: int
+ choices: [1, 2, 4, 8, 16, 32, 56]
+ memory:
+ description:
+ - Amount of memory to be assigned to new virtual instance.
+ type: int
+ choices: [1024, 2048, 4096, 6144, 8192, 12288, 16384, 32768, 49152, 65536, 131072, 247808]
+ flavor:
+ description:
+ - Specify which SoftLayer flavor template to use instead of cpus and memory.
+ version_added: '0.2.0'
+ type: str
+ disks:
+ description:
+ - List of disk sizes to be assigned to new virtual instance.
+ default: [ 25 ]
+ type: list
+ os_code:
+ description:
+ - OS Code to be used for new virtual instance.
+ type: str
+ image_id:
+ description:
+ - Image Template to be used for new virtual instance.
+ type: str
+ nic_speed:
+ description:
+ - NIC Speed to be assigned to new virtual instance.
+ choices: [10, 100, 1000]
+ type: int
+ public_vlan:
+ description:
+ - VLAN by its Id to be assigned to the public NIC.
+ type: str
+ private_vlan:
+ description:
+ - VLAN by its Id to be assigned to the private NIC.
+ type: str
+ ssh_keys:
+ description:
+ - List of ssh keys by their Id to be assigned to a virtual instance.
+ type: list
+ post_uri:
+ description:
+ - URL of a post provisioning script to be loaded and executed on virtual instance.
+ type: str
+ state:
+ description:
+ - Create, or cancel a virtual instance.
+ - Specify C(present) for create, C(absent) to cancel.
+ choices: [ absent, present ]
+ default: present
+ type: str
+ wait:
+ description:
+ - Flag used to wait for active status before returning.
+ type: bool
+ default: 'yes'
+ wait_time:
+ description:
+ - Time in seconds before wait returns.
+ default: 600
+ type: int
+requirements:
+ - python >= 2.6
+ - softlayer >= 4.1.1
+author:
+- Matt Colton (@mcltn)
+'''
+
+EXAMPLES = '''
+- name: Build instance
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: Build instance request
+ community.general.sl_vm:
+ hostname: instance-1
+ domain: anydomain.com
+ datacenter: dal09
+ tags: ansible-module-test
+ hourly: yes
+ private: no
+ dedicated: no
+ local_disk: yes
+ cpus: 1
+ memory: 1024
+ disks: [25]
+ os_code: UBUNTU_LATEST
+ wait: no
+
+- name: Build additional instances
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: Build instances request
+ community.general.sl_vm:
+ hostname: "{{ item.hostname }}"
+ domain: "{{ item.domain }}"
+ datacenter: "{{ item.datacenter }}"
+ tags: "{{ item.tags }}"
+ hourly: "{{ item.hourly }}"
+ private: "{{ item.private }}"
+ dedicated: "{{ item.dedicated }}"
+ local_disk: "{{ item.local_disk }}"
+ cpus: "{{ item.cpus }}"
+ memory: "{{ item.memory }}"
+ disks: "{{ item.disks }}"
+ os_code: "{{ item.os_code }}"
+ ssh_keys: "{{ item.ssh_keys }}"
+ wait: "{{ item.wait }}"
+ with_items:
+ - hostname: instance-2
+ domain: anydomain.com
+ datacenter: dal09
+ tags:
+ - ansible-module-test
+ - ansible-module-test-slaves
+ hourly: yes
+ private: no
+ dedicated: no
+ local_disk: yes
+ cpus: 1
+ memory: 1024
+ disks:
+ - 25
+ - 100
+ os_code: UBUNTU_LATEST
+ ssh_keys: []
+ wait: True
+ - hostname: instance-3
+ domain: anydomain.com
+ datacenter: dal09
+ tags:
+ - ansible-module-test
+ - ansible-module-test-slaves
+ hourly: yes
+ private: no
+ dedicated: no
+ local_disk: yes
+ cpus: 1
+ memory: 1024
+ disks:
+ - 25
+ - 100
+ os_code: UBUNTU_LATEST
+ ssh_keys: []
+ wait: yes
+
+- name: Cancel instances
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: Cancel by tag
+ community.general.sl_vm:
+ state: absent
+ tags: ansible-module-test
+'''
+
+# TODO: Disabled RETURN as it is breaking the build for docs. Needs to be fixed.
+RETURN = '''# '''
+
+import json
+import time
+
+try:
+ import SoftLayer
+ from SoftLayer import VSManager
+
+ HAS_SL = True
+ vsManager = VSManager(SoftLayer.create_client_from_env())
+except ImportError:
+ HAS_SL = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import string_types
+
+
+# TODO: get this info from API
+STATES = ['present', 'absent']
+DATACENTERS = ['ams01', 'ams03', 'che01', 'dal01', 'dal05', 'dal06', 'dal09', 'dal10', 'dal12', 'dal13', 'fra02',
+ 'fra04', 'fra05', 'hkg02', 'hou02', 'lon02', 'lon04', 'lon06', 'mel01', 'mex01', 'mil01', 'mon01',
+ 'osl01', 'par01', 'sao01', 'sea01', 'seo01', 'sjc01', 'sjc03', 'sjc04', 'sng01', 'syd01', 'syd04',
+ 'tok02', 'tor01', 'wdc01', 'wdc04', 'wdc06', 'wdc07']
+CPU_SIZES = [1, 2, 4, 8, 16, 32, 56]
+MEMORY_SIZES = [1024, 2048, 4096, 6144, 8192, 12288, 16384, 32768, 49152, 65536, 131072, 247808]
+INITIALDISK_SIZES = [25, 100]
+LOCALDISK_SIZES = [25, 100, 150, 200, 300]
+SANDISK_SIZES = [10, 20, 25, 30, 40, 50, 75, 100, 125, 150, 175, 200, 250, 300, 350, 400, 500, 750, 1000, 1500, 2000]
+NIC_SPEEDS = [10, 100, 1000]
+
+
+def create_virtual_instance(module):
+
+ instances = vsManager.list_instances(
+ hostname=module.params.get('hostname'),
+ domain=module.params.get('domain'),
+ datacenter=module.params.get('datacenter')
+ )
+
+ if instances:
+ return False, None
+
+ # Check if OS or Image Template is provided (Can't be both, defaults to OS)
+ if (module.params.get('os_code') is not None and module.params.get('os_code') != ''):
+ module.params['image_id'] = ''
+ elif (module.params.get('image_id') is not None and module.params.get('image_id') != ''):
+ module.params['os_code'] = ''
+ module.params['disks'] = [] # Blank out disks since it will use the template
+ else:
+ return False, None
+
+ tags = module.params.get('tags')
+ if isinstance(tags, list):
+ tags = ','.join(map(str, module.params.get('tags')))
+
+ instance = vsManager.create_instance(
+ hostname=module.params.get('hostname'),
+ domain=module.params.get('domain'),
+ cpus=module.params.get('cpus'),
+ memory=module.params.get('memory'),
+ flavor=module.params.get('flavor'),
+ hourly=module.params.get('hourly'),
+ datacenter=module.params.get('datacenter'),
+ os_code=module.params.get('os_code'),
+ image_id=module.params.get('image_id'),
+ local_disk=module.params.get('local_disk'),
+ disks=module.params.get('disks'),
+ ssh_keys=module.params.get('ssh_keys'),
+ nic_speed=module.params.get('nic_speed'),
+ private=module.params.get('private'),
+ public_vlan=module.params.get('public_vlan'),
+ private_vlan=module.params.get('private_vlan'),
+ dedicated=module.params.get('dedicated'),
+ post_uri=module.params.get('post_uri'),
+ tags=tags,
+ )
+
+ if instance is not None and instance['id'] > 0:
+ return True, instance
+ else:
+ return False, None
+
+
+def wait_for_instance(module, id):
+ instance = None
+ completed = False
+ wait_timeout = time.time() + module.params.get('wait_time')
+ while not completed and wait_timeout > time.time():
+ try:
+ completed = vsManager.wait_for_ready(id, 10, 2)
+ if completed:
+ instance = vsManager.get_instance(id)
+ except Exception:
+ completed = False
+
+ return completed, instance
+
+
+def cancel_instance(module):
+ canceled = True
+ if module.params.get('instance_id') is None and (module.params.get('tags') or module.params.get('hostname') or module.params.get('domain')):
+ tags = module.params.get('tags')
+ if isinstance(tags, string_types):
+ tags = [module.params.get('tags')]
+ instances = vsManager.list_instances(tags=tags, hostname=module.params.get('hostname'), domain=module.params.get('domain'))
+ for instance in instances:
+ try:
+ vsManager.cancel_instance(instance['id'])
+ except Exception:
+ canceled = False
+ elif module.params.get('instance_id') and module.params.get('instance_id') != 0:
+ try:
+ vsManager.cancel_instance(instance['id'])
+ except Exception:
+ canceled = False
+ else:
+ return False, None
+
+ return canceled, None
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ instance_id=dict(type='str'),
+ hostname=dict(type='str'),
+ domain=dict(type='str'),
+ datacenter=dict(type='str', choices=DATACENTERS),
+ tags=dict(type='str'),
+ hourly=dict(type='bool', default=True),
+ private=dict(type='bool', default=False),
+ dedicated=dict(type='bool', default=False),
+ local_disk=dict(type='bool', default=True),
+ cpus=dict(type='int', choices=CPU_SIZES),
+ memory=dict(type='int', choices=MEMORY_SIZES),
+ flavor=dict(type='str'),
+ disks=dict(type='list', default=[25]),
+ os_code=dict(type='str'),
+ image_id=dict(type='str'),
+ nic_speed=dict(type='int', choices=NIC_SPEEDS),
+ public_vlan=dict(type='str'),
+ private_vlan=dict(type='str'),
+ ssh_keys=dict(type='list', default=[]),
+ post_uri=dict(type='str'),
+ state=dict(type='str', default='present', choices=STATES),
+ wait=dict(type='bool', default=True),
+ wait_time=dict(type='int', default=600),
+ )
+ )
+
+ if not HAS_SL:
+ module.fail_json(msg='softlayer python library required for this module')
+
+ if module.params.get('state') == 'absent':
+ (changed, instance) = cancel_instance(module)
+
+ elif module.params.get('state') == 'present':
+ (changed, instance) = create_virtual_instance(module)
+ if module.params.get('wait') is True and instance:
+ (changed, instance) = wait_for_instance(module, instance['id'])
+
+ module.exit_json(changed=changed, instance=json.loads(json.dumps(instance, default=lambda o: o.__dict__)))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py
new file mode 100644
index 00000000..8f05da7b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py
@@ -0,0 +1,1543 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+
+DOCUMENTATION = '''
+---
+module: spotinst_aws_elastigroup
+short_description: Create, update or delete Spotinst AWS Elastigroups
+author: Spotinst (@talzur)
+description:
+ - Can create, update, or delete Spotinst AWS Elastigroups
+ Launch configuration is part of the elastigroup configuration,
+ so no additional modules are necessary for handling the launch configuration.
+ You will have to have a credentials file in this location - <home>/.spotinst/credentials
+ The credentials file must contain a row that looks like this
+ token = <YOUR TOKEN>
+ Full documentation available at https://help.spotinst.com/hc/en-us/articles/115003530285-Ansible-
+requirements:
+ - python >= 2.7
+ - spotinst_sdk >= 1.0.38
+options:
+
+ credentials_path:
+ description:
+ - (Path) Optional parameter that allows to set a non-default credentials path.
+ default: ~/.spotinst/credentials
+ type: path
+
+ account_id:
+ description:
+ - (String) Optional parameter that allows to set an account-id inside the module configuration
+ By default this is retrieved from the credentials path
+ type: str
+
+ availability_vs_cost:
+ description:
+ - (String) The strategy orientation.
+ - "The choices available are: C(availabilityOriented), C(costOriented), C(balanced)."
+ required: true
+ type: str
+
+ availability_zones:
+ description:
+ - (List of Objects) a list of hash/dictionaries of Availability Zones that are configured in the elastigroup;
+ '[{"key":"value", "key":"value"}]';
+ keys allowed are
+ name (String),
+ subnet_id (String),
+ placement_group_name (String),
+ required: true
+ type: list
+
+ block_device_mappings:
+ description:
+ - (List of Objects) a list of hash/dictionaries of Block Device Mappings for elastigroup instances;
+ You can specify virtual devices and EBS volumes.;
+ '[{"key":"value", "key":"value"}]';
+ keys allowed are
+ device_name (List of Strings),
+ virtual_name (String),
+ no_device (String),
+ ebs (Object, expects the following keys-
+ delete_on_termination(Boolean),
+ encrypted(Boolean),
+ iops (Integer),
+ snapshot_id(Integer),
+ volume_type(String),
+ volume_size(Integer))
+ type: list
+
+ chef:
+ description:
+ - (Object) The Chef integration configuration.;
+ Expects the following keys - chef_server (String),
+ organization (String),
+ user (String),
+ pem_key (String),
+ chef_version (String)
+ type: dict
+
+ draining_timeout:
+ description:
+ - (Integer) Time for instance to be drained from incoming requests and deregistered from ELB before termination.
+ type: int
+
+ ebs_optimized:
+ description:
+ - (Boolean) Enable EBS optimization for supported instances which are not enabled by default.;
+ Note - additional charges will be applied.
+ type: bool
+
+ ebs_volume_pool:
+ description:
+ - (List of Objects) a list of hash/dictionaries of EBS devices to reattach to the elastigroup when available;
+ '[{"key":"value", "key":"value"}]';
+ keys allowed are -
+ volume_ids (List of Strings),
+ device_name (String)
+ type: list
+
+ ecs:
+ description:
+ - (Object) The ECS integration configuration.;
+ Expects the following key -
+ cluster_name (String)
+ type: dict
+
+ elastic_ips:
+ description:
+ - (List of Strings) List of ElasticIps Allocation Ids (Example C(eipalloc-9d4e16f8)) to associate to the group instances
+ type: list
+
+ fallback_to_od:
+ description:
+ - (Boolean) In case of no spots available, Elastigroup will launch an On-demand instance instead
+ type: bool
+
+ health_check_grace_period:
+ description:
+ - (Integer) The amount of time, in seconds, after the instance has launched to start and check its health.
+ - If not specified, it defaults to C(300).
+ type: int
+
+ health_check_unhealthy_duration_before_replacement:
+ description:
+ - (Integer) Minimal mount of time instance should be unhealthy for us to consider it unhealthy.
+ type: int
+
+ health_check_type:
+ description:
+ - (String) The service to use for the health check.
+ - "The choices available are: C(ELB), C(HCS), C(TARGET_GROUP), C(MLB), C(EC2)."
+ type: str
+
+ iam_role_name:
+ description:
+ - (String) The instance profile iamRole name
+ - Only use iam_role_arn, or iam_role_name
+ type: str
+
+ iam_role_arn:
+ description:
+ - (String) The instance profile iamRole arn
+ - Only use iam_role_arn, or iam_role_name
+ type: str
+
+ id:
+ description:
+ - (String) The group id if it already exists and you want to update, or delete it.
+ This will not work unless the uniqueness_by field is set to id.
+ When this is set, and the uniqueness_by field is set, the group will either be updated or deleted, but not created.
+ type: str
+
+ image_id:
+ description:
+ - (String) The image Id used to launch the instance.;
+ In case of conflict between Instance type and image type, an error will be returned
+ required: true
+ type: str
+
+ key_pair:
+ description:
+ - (String) Specify a Key Pair to attach to the instances
+ type: str
+
+ kubernetes:
+ description:
+ - (Object) The Kubernetes integration configuration.
+ Expects the following keys -
+ api_server (String),
+ token (String)
+ type: dict
+
+ lifetime_period:
+ description:
+ - (Integer) lifetime period
+ type: int
+
+ load_balancers:
+ description:
+ - (List of Strings) List of classic ELB names
+ type: list
+
+ max_size:
+ description:
+ - (Integer) The upper limit number of instances that you can scale up to
+ required: true
+ type: int
+
+ mesosphere:
+ description:
+ - (Object) The Mesosphere integration configuration.
+ Expects the following key -
+ api_server (String)
+ type: dict
+
+ min_size:
+ description:
+ - (Integer) The lower limit number of instances that you can scale down to
+ required: true
+ type: int
+
+ monitoring:
+ description:
+ - (String) Describes whether instance Enhanced Monitoring is enabled
+ type: str
+
+ name:
+ description:
+ - (String) Unique name for elastigroup to be created, updated or deleted
+ required: true
+ type: str
+
+ network_interfaces:
+ description:
+ - (List of Objects) a list of hash/dictionaries of network interfaces to add to the elastigroup;
+ '[{"key":"value", "key":"value"}]';
+ keys allowed are -
+ description (String),
+ device_index (Integer),
+ secondary_private_ip_address_count (Integer),
+ associate_public_ip_address (Boolean),
+ delete_on_termination (Boolean),
+ groups (List of Strings),
+ network_interface_id (String),
+ private_ip_address (String),
+ subnet_id (String),
+ associate_ipv6_address (Boolean),
+ private_ip_addresses (List of Objects, Keys are privateIpAddress (String, required) and primary (Boolean))
+ type: list
+
+ on_demand_count:
+ description:
+ - (Integer) Required if risk is not set
+ - Number of on demand instances to launch. All other instances will be spot instances.;
+ Either set this parameter or the risk parameter
+ type: int
+
+ on_demand_instance_type:
+ description:
+ - (String) On-demand instance type that will be provisioned
+ type: str
+
+ opsworks:
+ description:
+ - (Object) The elastigroup OpsWorks integration configration.;
+ Expects the following key -
+ layer_id (String)
+ type: dict
+
+ persistence:
+ description:
+ - (Object) The Stateful elastigroup configration.;
+ Accepts the following keys -
+ should_persist_root_device (Boolean),
+ should_persist_block_devices (Boolean),
+ should_persist_private_ip (Boolean)
+ type: dict
+
+ product:
+ description:
+ - (String) Operation system type.
+ - "Available choices are: C(Linux/UNIX), C(SUSE Linux), C(Windows), C(Linux/UNIX (Amazon VPC)), C(SUSE Linux (Amazon VPC))."
+ required: true
+ type: str
+
+ rancher:
+ description:
+ - (Object) The Rancher integration configuration.;
+ Expects the following keys -
+ version (String),
+ access_key (String),
+ secret_key (String),
+ master_host (String)
+ type: dict
+
+ right_scale:
+ description:
+ - (Object) The Rightscale integration configuration.;
+ Expects the following keys -
+ account_id (String),
+ refresh_token (String)
+ type: dict
+
+ risk:
+ description:
+ - (Integer) required if on demand is not set. The percentage of Spot instances to launch (0 - 100).
+ type: int
+
+ roll_config:
+ description:
+ - (Object) Roll configuration.;
+ If you would like the group to roll after updating, please use this feature.
+ Accepts the following keys -
+ batch_size_percentage(Integer, Required),
+ grace_period - (Integer, Required),
+ health_check_type(String, Optional)
+ type: dict
+
+ scheduled_tasks:
+ description:
+ - (List of Objects) a list of hash/dictionaries of scheduled tasks to configure in the elastigroup;
+ '[{"key":"value", "key":"value"}]';
+ keys allowed are -
+ adjustment (Integer),
+ scale_target_capacity (Integer),
+ scale_min_capacity (Integer),
+ scale_max_capacity (Integer),
+ adjustment_percentage (Integer),
+ batch_size_percentage (Integer),
+ cron_expression (String),
+ frequency (String),
+ grace_period (Integer),
+ task_type (String, required),
+ is_enabled (Boolean)
+ type: list
+
+ security_group_ids:
+ description:
+ - (List of Strings) One or more security group IDs. ;
+ In case of update it will override the existing Security Group with the new given array
+ required: true
+ type: list
+
+ shutdown_script:
+ description:
+ - (String) The Base64-encoded shutdown script that executes prior to instance termination.
+ Encode before setting.
+ type: str
+
+ signals:
+ description:
+ - (List of Objects) a list of hash/dictionaries of signals to configure in the elastigroup;
+ keys allowed are -
+ name (String, required),
+ timeout (Integer)
+ type: list
+
+ spin_up_time:
+ description:
+ - (Integer) spin up time, in seconds, for the instance
+ type: int
+
+ spot_instance_types:
+ description:
+ - (List of Strings) Spot instance type that will be provisioned.
+ required: true
+ type: list
+
+ state:
+ choices:
+ - present
+ - absent
+ description:
+ - (String) create or delete the elastigroup
+ default: present
+ type: str
+
+ tags:
+ description:
+ - (List of tagKey:tagValue pairs) a list of tags to configure in the elastigroup. Please specify list of keys and values (key colon value);
+ type: list
+
+ target:
+ description:
+ - (Integer) The number of instances to launch
+ required: true
+ type: int
+
+ target_group_arns:
+ description:
+ - (List of Strings) List of target group arns instances should be registered to
+ type: list
+
+ tenancy:
+ description:
+ - (String) dedicated vs shared tenancy.
+ - "The available choices are: C(default), C(dedicated)."
+ type: str
+
+ terminate_at_end_of_billing_hour:
+ description:
+ - (Boolean) terminate at the end of billing hour
+ type: bool
+
+ unit:
+ description:
+ - (String) The capacity unit to launch instances by.
+ - "The available choices are: C(instance), C(weight)."
+ type: str
+
+ up_scaling_policies:
+ description:
+ - (List of Objects) a list of hash/dictionaries of scaling policies to configure in the elastigroup;
+ '[{"key":"value", "key":"value"}]';
+ keys allowed are -
+ policy_name (String, required),
+ namespace (String, required),
+ metric_name (String, required),
+ dimensions (List of Objects, Keys allowed are name (String, required) and value (String)),
+ statistic (String, required)
+ evaluation_periods (String, required),
+ period (String, required),
+ threshold (String, required),
+ cooldown (String, required),
+ unit (String, required),
+ operator (String, required),
+ action_type (String, required),
+ adjustment (String),
+ min_target_capacity (String),
+ target (String),
+ maximum (String),
+ minimum (String)
+ type: list
+
+ down_scaling_policies:
+ description:
+ - (List of Objects) a list of hash/dictionaries of scaling policies to configure in the elastigroup;
+ '[{"key":"value", "key":"value"}]';
+ keys allowed are -
+ policy_name (String, required),
+ namespace (String, required),
+ metric_name (String, required),
+ dimensions ((List of Objects), Keys allowed are name (String, required) and value (String)),
+ statistic (String, required),
+ evaluation_periods (String, required),
+ period (String, required),
+ threshold (String, required),
+ cooldown (String, required),
+ unit (String, required),
+ operator (String, required),
+ action_type (String, required),
+ adjustment (String),
+ max_target_capacity (String),
+ target (String),
+ maximum (String),
+ minimum (String)
+ type: list
+
+ target_tracking_policies:
+ description:
+ - (List of Objects) a list of hash/dictionaries of target tracking policies to configure in the elastigroup;
+ '[{"key":"value", "key":"value"}]';
+ keys allowed are -
+ policy_name (String, required),
+ namespace (String, required),
+ source (String, required),
+ metric_name (String, required),
+ statistic (String, required),
+ unit (String, required),
+ cooldown (String, required),
+ target (String, required)
+ type: list
+
+ uniqueness_by:
+ choices:
+ - id
+ - name
+ description:
+ - (String) If your group names are not unique, you may use this feature to update or delete a specific group.
+ Whenever this property is set, you must set a group_id in order to update or delete a group, otherwise a group will be created.
+ default: name
+ type: str
+
+ user_data:
+ description:
+ - (String) Base64-encoded MIME user data. Encode before setting the value.
+ type: str
+
+ utilize_reserved_instances:
+ description:
+ - (Boolean) In case of any available Reserved Instances,
+ Elastigroup will utilize your reservations before purchasing Spot instances.
+ type: bool
+
+ wait_for_instances:
+ description:
+ - (Boolean) Whether or not the elastigroup creation / update actions should wait for the instances to spin
+ type: bool
+ default: false
+
+ wait_timeout:
+ description:
+ - (Integer) How long the module should wait for instances before failing the action.;
+ Only works if wait_for_instances is True.
+ type: int
+
+'''
+EXAMPLES = '''
+# Basic configuration YAML example
+
+- hosts: localhost
+ tasks:
+ - name: Create elastigroup
+ community.general.spotinst_aws_elastigroup:
+ state: present
+ risk: 100
+ availability_vs_cost: balanced
+ availability_zones:
+ - name: us-west-2a
+ subnet_id: subnet-2b68a15c
+ image_id: ami-f173cc91
+ key_pair: spotinst-oregon
+ max_size: 15
+ min_size: 0
+ target: 0
+ unit: instance
+ monitoring: True
+ name: ansible-group
+ on_demand_instance_type: c3.large
+ product: Linux/UNIX
+ load_balancers:
+ - test-lb-1
+ security_group_ids:
+ - sg-8f4b8fe9
+ spot_instance_types:
+ - c3.large
+ do_not_update:
+ - image_id
+ - target
+ register: result
+ - ansible.builtin.debug: var=result
+
+# In this example, we create an elastigroup and wait 600 seconds to retrieve the instances, and use their private ips
+
+- hosts: localhost
+ tasks:
+ - name: Create elastigroup
+ community.general.spotinst_aws_elastigroup:
+ state: present
+ account_id: act-1a9dd2b
+ risk: 100
+ availability_vs_cost: balanced
+ availability_zones:
+ - name: us-west-2a
+ subnet_id: subnet-2b68a15c
+ tags:
+ - Environment: someEnvValue
+ - OtherTagKey: otherValue
+ image_id: ami-f173cc91
+ key_pair: spotinst-oregon
+ max_size: 5
+ min_size: 0
+ target: 0
+ unit: instance
+ monitoring: True
+ name: ansible-group-tal
+ on_demand_instance_type: c3.large
+ product: Linux/UNIX
+ security_group_ids:
+ - sg-8f4b8fe9
+ block_device_mappings:
+ - device_name: '/dev/sda1'
+ ebs:
+ volume_size: 100
+ volume_type: gp2
+ spot_instance_types:
+ - c3.large
+ do_not_update:
+ - image_id
+ wait_for_instances: True
+ wait_timeout: 600
+ register: result
+
+ - name: Store private ips to file
+ ansible.builtin.shell: echo {{ item.private_ip }}\\n >> list-of-private-ips
+ with_items: "{{ result.instances }}"
+ - ansible.builtin.debug: var=result
+
+# In this example, we create an elastigroup with multiple block device mappings, tags, and also an account id
+# In organizations with more than one account, it is required to specify an account_id
+
+- hosts: localhost
+ tasks:
+ - name: Create elastigroup
+ community.general.spotinst_aws_elastigroup:
+ state: present
+ account_id: act-1a9dd2b
+ risk: 100
+ availability_vs_cost: balanced
+ availability_zones:
+ - name: us-west-2a
+ subnet_id: subnet-2b68a15c
+ tags:
+ - Environment: someEnvValue
+ - OtherTagKey: otherValue
+ image_id: ami-f173cc91
+ key_pair: spotinst-oregon
+ max_size: 5
+ min_size: 0
+ target: 0
+ unit: instance
+ monitoring: True
+ name: ansible-group-tal
+ on_demand_instance_type: c3.large
+ product: Linux/UNIX
+ security_group_ids:
+ - sg-8f4b8fe9
+ block_device_mappings:
+ - device_name: '/dev/xvda'
+ ebs:
+ volume_size: 60
+ volume_type: gp2
+ - device_name: '/dev/xvdb'
+ ebs:
+ volume_size: 120
+ volume_type: gp2
+ spot_instance_types:
+ - c3.large
+ do_not_update:
+ - image_id
+ wait_for_instances: True
+ wait_timeout: 600
+ register: result
+
+ - name: Store private ips to file
+ ansible.builtin.shell: echo {{ item.private_ip }}\\n >> list-of-private-ips
+ with_items: "{{ result.instances }}"
+ - ansible.builtin.debug: var=result
+
+# In this example we have set up block device mapping with ephemeral devices
+
+- hosts: localhost
+ tasks:
+ - name: Create elastigroup
+ community.general.spotinst_aws_elastigroup:
+ state: present
+ risk: 100
+ availability_vs_cost: balanced
+ availability_zones:
+ - name: us-west-2a
+ subnet_id: subnet-2b68a15c
+ image_id: ami-f173cc91
+ key_pair: spotinst-oregon
+ max_size: 15
+ min_size: 0
+ target: 0
+ unit: instance
+ block_device_mappings:
+ - device_name: '/dev/xvda'
+ virtual_name: ephemeral0
+ - device_name: '/dev/xvdb/'
+ virtual_name: ephemeral1
+ monitoring: True
+ name: ansible-group
+ on_demand_instance_type: c3.large
+ product: Linux/UNIX
+ load_balancers:
+ - test-lb-1
+ security_group_ids:
+ - sg-8f4b8fe9
+ spot_instance_types:
+ - c3.large
+ do_not_update:
+ - image_id
+ - target
+ register: result
+ - ansible.builtin.debug: var=result
+
+# In this example we create a basic group configuration with a network interface defined.
+# Each network interface must have a device index
+
+- hosts: localhost
+ tasks:
+ - name: Create elastigroup
+ community.general.spotinst_aws_elastigroup:
+ state: present
+ risk: 100
+ availability_vs_cost: balanced
+ network_interfaces:
+ - associate_public_ip_address: true
+ device_index: 0
+ availability_zones:
+ - name: us-west-2a
+ subnet_id: subnet-2b68a15c
+ image_id: ami-f173cc91
+ key_pair: spotinst-oregon
+ max_size: 15
+ min_size: 0
+ target: 0
+ unit: instance
+ monitoring: True
+ name: ansible-group
+ on_demand_instance_type: c3.large
+ product: Linux/UNIX
+ load_balancers:
+ - test-lb-1
+ security_group_ids:
+ - sg-8f4b8fe9
+ spot_instance_types:
+ - c3.large
+ do_not_update:
+ - image_id
+ - target
+ register: result
+ - ansible.builtin.debug: var=result
+
+
+# In this example we create a basic group configuration with a target tracking scaling policy defined
+
+- hosts: localhost
+ tasks:
+ - name: Create elastigroup
+ community.general.spotinst_aws_elastigroup:
+ account_id: act-92d45673
+ state: present
+ risk: 100
+ availability_vs_cost: balanced
+ availability_zones:
+ - name: us-west-2a
+ subnet_id: subnet-79da021e
+ image_id: ami-f173cc91
+ fallback_to_od: true
+ tags:
+ - Creator: ValueOfCreatorTag
+ - Environment: ValueOfEnvironmentTag
+ key_pair: spotinst-labs-oregon
+ max_size: 10
+ min_size: 0
+ target: 2
+ unit: instance
+ monitoring: True
+ name: ansible-group-1
+ on_demand_instance_type: c3.large
+ product: Linux/UNIX
+ security_group_ids:
+ - sg-46cdc13d
+ spot_instance_types:
+ - c3.large
+ target_tracking_policies:
+ - policy_name: target-tracking-1
+ namespace: AWS/EC2
+ metric_name: CPUUtilization
+ statistic: average
+ unit: percent
+ target: 50
+ cooldown: 120
+ do_not_update:
+ - image_id
+ register: result
+ - ansible.builtin.debug: var=result
+'''
+
+RETURN = '''
+---
+instances:
+ description: List of active elastigroup instances and their details.
+ returned: success
+ type: dict
+ sample: [
+ {
+ "spotInstanceRequestId": "sir-regs25zp",
+ "instanceId": "i-09640ad8678234c",
+ "instanceType": "m4.large",
+ "product": "Linux/UNIX",
+ "availabilityZone": "us-west-2b",
+ "privateIp": "180.0.2.244",
+ "createdAt": "2017-07-17T12:46:18.000Z",
+ "status": "fulfilled"
+ }
+ ]
+group_id:
+ description: Created / Updated group's ID.
+ returned: success
+ type: str
+ sample: "sig-12345"
+
+'''
+
+HAS_SPOTINST_SDK = False
+__metaclass__ = type
+
+import os
+import time
+from ansible.module_utils.basic import AnsibleModule
+
+try:
+ import spotinst_sdk as spotinst
+ from spotinst_sdk import SpotinstClientException
+
+ HAS_SPOTINST_SDK = True
+
+except ImportError:
+ pass
+
+eni_fields = ('description',
+ 'device_index',
+ 'secondary_private_ip_address_count',
+ 'associate_public_ip_address',
+ 'delete_on_termination',
+ 'groups',
+ 'network_interface_id',
+ 'private_ip_address',
+ 'subnet_id',
+ 'associate_ipv6_address')
+
+private_ip_fields = ('private_ip_address',
+ 'primary')
+
+capacity_fields = (dict(ansible_field_name='min_size',
+ spotinst_field_name='minimum'),
+ dict(ansible_field_name='max_size',
+ spotinst_field_name='maximum'),
+ 'target',
+ 'unit')
+
+lspec_fields = ('user_data',
+ 'key_pair',
+ 'tenancy',
+ 'shutdown_script',
+ 'monitoring',
+ 'ebs_optimized',
+ 'image_id',
+ 'health_check_type',
+ 'health_check_grace_period',
+ 'health_check_unhealthy_duration_before_replacement',
+ 'security_group_ids')
+
+iam_fields = (dict(ansible_field_name='iam_role_name',
+ spotinst_field_name='name'),
+ dict(ansible_field_name='iam_role_arn',
+ spotinst_field_name='arn'))
+
+scheduled_task_fields = ('adjustment',
+ 'adjustment_percentage',
+ 'batch_size_percentage',
+ 'cron_expression',
+ 'frequency',
+ 'grace_period',
+ 'task_type',
+ 'is_enabled',
+ 'scale_target_capacity',
+ 'scale_min_capacity',
+ 'scale_max_capacity')
+
+scaling_policy_fields = ('policy_name',
+ 'namespace',
+ 'metric_name',
+ 'dimensions',
+ 'statistic',
+ 'evaluation_periods',
+ 'period',
+ 'threshold',
+ 'cooldown',
+ 'unit',
+ 'operator')
+
+tracking_policy_fields = ('policy_name',
+ 'namespace',
+ 'source',
+ 'metric_name',
+ 'statistic',
+ 'unit',
+ 'cooldown',
+ 'target',
+ 'threshold')
+
+action_fields = (dict(ansible_field_name='action_type',
+ spotinst_field_name='type'),
+ 'adjustment',
+ 'min_target_capacity',
+ 'max_target_capacity',
+ 'target',
+ 'minimum',
+ 'maximum')
+
+signal_fields = ('name',
+ 'timeout')
+
+multai_lb_fields = ('balancer_id',
+ 'project_id',
+ 'target_set_id',
+ 'az_awareness',
+ 'auto_weight')
+
+persistence_fields = ('should_persist_root_device',
+ 'should_persist_block_devices',
+ 'should_persist_private_ip')
+
+strategy_fields = ('risk',
+ 'utilize_reserved_instances',
+ 'fallback_to_od',
+ 'on_demand_count',
+ 'availability_vs_cost',
+ 'draining_timeout',
+ 'spin_up_time',
+ 'lifetime_period')
+
+ebs_fields = ('delete_on_termination',
+ 'encrypted',
+ 'iops',
+ 'snapshot_id',
+ 'volume_type',
+ 'volume_size')
+
+bdm_fields = ('device_name',
+ 'virtual_name',
+ 'no_device')
+
+kubernetes_fields = ('api_server',
+ 'token')
+
+right_scale_fields = ('account_id',
+ 'refresh_token')
+
+rancher_fields = ('access_key',
+ 'secret_key',
+ 'master_host',
+ 'version')
+
+chef_fields = ('chef_server',
+ 'organization',
+ 'user',
+ 'pem_key',
+ 'chef_version')
+
+az_fields = ('name',
+ 'subnet_id',
+ 'placement_group_name')
+
+opsworks_fields = ('layer_id',)
+
+scaling_strategy_fields = ('terminate_at_end_of_billing_hour',)
+
+mesosphere_fields = ('api_server',)
+
+ecs_fields = ('cluster_name',)
+
+multai_fields = ('multai_token',)
+
+
+def handle_elastigroup(client, module):
+ has_changed = False
+ group_id = None
+ message = 'None'
+
+ name = module.params.get('name')
+ state = module.params.get('state')
+ uniqueness_by = module.params.get('uniqueness_by')
+ external_group_id = module.params.get('id')
+
+ if uniqueness_by == 'id':
+ if external_group_id is None:
+ should_create = True
+ else:
+ should_create = False
+ group_id = external_group_id
+ else:
+ groups = client.get_elastigroups()
+ should_create, group_id = find_group_with_same_name(groups, name)
+
+ if should_create is True:
+ if state == 'present':
+ eg = expand_elastigroup(module, is_update=False)
+ module.debug(str(" [INFO] " + message + "\n"))
+ group = client.create_elastigroup(group=eg)
+ group_id = group['id']
+ message = 'Created group Successfully.'
+ has_changed = True
+
+ elif state == 'absent':
+ message = 'Cannot delete non-existent group.'
+ has_changed = False
+ else:
+ eg = expand_elastigroup(module, is_update=True)
+
+ if state == 'present':
+ group = client.update_elastigroup(group_update=eg, group_id=group_id)
+ message = 'Updated group successfully.'
+
+ try:
+ roll_config = module.params.get('roll_config')
+ if roll_config:
+ eg_roll = spotinst.aws_elastigroup.Roll(
+ batch_size_percentage=roll_config.get('batch_size_percentage'),
+ grace_period=roll_config.get('grace_period'),
+ health_check_type=roll_config.get('health_check_type')
+ )
+ roll_response = client.roll_group(group_roll=eg_roll, group_id=group_id)
+ message = 'Updated and started rolling the group successfully.'
+
+ except SpotinstClientException as exc:
+ message = 'Updated group successfully, but failed to perform roll. Error:' + str(exc)
+ has_changed = True
+
+ elif state == 'absent':
+ try:
+ client.delete_elastigroup(group_id=group_id)
+ except SpotinstClientException as exc:
+ if "GROUP_DOESNT_EXIST" in exc.message:
+ pass
+ else:
+ module.fail_json(msg="Error while attempting to delete group : " + exc.message)
+
+ message = 'Deleted group successfully.'
+ has_changed = True
+
+ return group_id, message, has_changed
+
+
+def retrieve_group_instances(client, module, group_id):
+ wait_timeout = module.params.get('wait_timeout')
+ wait_for_instances = module.params.get('wait_for_instances')
+
+ health_check_type = module.params.get('health_check_type')
+
+ if wait_timeout is None:
+ wait_timeout = 300
+
+ wait_timeout = time.time() + wait_timeout
+ target = module.params.get('target')
+ state = module.params.get('state')
+ instances = list()
+
+ if state == 'present' and group_id is not None and wait_for_instances is True:
+
+ is_amount_fulfilled = False
+ while is_amount_fulfilled is False and wait_timeout > time.time():
+ instances = list()
+ amount_of_fulfilled_instances = 0
+
+ if health_check_type is not None:
+ healthy_instances = client.get_instance_healthiness(group_id=group_id)
+
+ for healthy_instance in healthy_instances:
+ if healthy_instance.get('healthStatus') == 'HEALTHY':
+ amount_of_fulfilled_instances += 1
+ instances.append(healthy_instance)
+
+ else:
+ active_instances = client.get_elastigroup_active_instances(group_id=group_id)
+
+ for active_instance in active_instances:
+ if active_instance.get('private_ip') is not None:
+ amount_of_fulfilled_instances += 1
+ instances.append(active_instance)
+
+ if amount_of_fulfilled_instances >= target:
+ is_amount_fulfilled = True
+
+ time.sleep(10)
+
+ return instances
+
+
+def find_group_with_same_name(groups, name):
+ for group in groups:
+ if group['name'] == name:
+ return False, group.get('id')
+
+ return True, None
+
+
+def expand_elastigroup(module, is_update):
+ do_not_update = module.params['do_not_update']
+ name = module.params.get('name')
+
+ eg = spotinst.aws_elastigroup.Elastigroup()
+ description = module.params.get('description')
+
+ if name is not None:
+ eg.name = name
+ if description is not None:
+ eg.description = description
+
+ # Capacity
+ expand_capacity(eg, module, is_update, do_not_update)
+ # Strategy
+ expand_strategy(eg, module)
+ # Scaling
+ expand_scaling(eg, module)
+ # Third party integrations
+ expand_integrations(eg, module)
+ # Compute
+ expand_compute(eg, module, is_update, do_not_update)
+ # Multai
+ expand_multai(eg, module)
+ # Scheduling
+ expand_scheduled_tasks(eg, module)
+
+ return eg
+
+
+def expand_compute(eg, module, is_update, do_not_update):
+ elastic_ips = module.params['elastic_ips']
+ on_demand_instance_type = module.params.get('on_demand_instance_type')
+ spot_instance_types = module.params['spot_instance_types']
+ ebs_volume_pool = module.params['ebs_volume_pool']
+ availability_zones_list = module.params['availability_zones']
+ product = module.params.get('product')
+
+ eg_compute = spotinst.aws_elastigroup.Compute()
+
+ if product is not None:
+ # Only put product on group creation
+ if is_update is not True:
+ eg_compute.product = product
+
+ if elastic_ips is not None:
+ eg_compute.elastic_ips = elastic_ips
+
+ if on_demand_instance_type or spot_instance_types is not None:
+ eg_instance_types = spotinst.aws_elastigroup.InstanceTypes()
+
+ if on_demand_instance_type is not None:
+ eg_instance_types.spot = spot_instance_types
+ if spot_instance_types is not None:
+ eg_instance_types.ondemand = on_demand_instance_type
+
+ if eg_instance_types.spot is not None or eg_instance_types.ondemand is not None:
+ eg_compute.instance_types = eg_instance_types
+
+ expand_ebs_volume_pool(eg_compute, ebs_volume_pool)
+
+ eg_compute.availability_zones = expand_list(availability_zones_list, az_fields, 'AvailabilityZone')
+
+ expand_launch_spec(eg_compute, module, is_update, do_not_update)
+
+ eg.compute = eg_compute
+
+
+def expand_ebs_volume_pool(eg_compute, ebs_volumes_list):
+ if ebs_volumes_list is not None:
+ eg_volumes = []
+
+ for volume in ebs_volumes_list:
+ eg_volume = spotinst.aws_elastigroup.EbsVolume()
+
+ if volume.get('device_name') is not None:
+ eg_volume.device_name = volume.get('device_name')
+ if volume.get('volume_ids') is not None:
+ eg_volume.volume_ids = volume.get('volume_ids')
+
+ if eg_volume.device_name is not None:
+ eg_volumes.append(eg_volume)
+
+ if len(eg_volumes) > 0:
+ eg_compute.ebs_volume_pool = eg_volumes
+
+
+def expand_launch_spec(eg_compute, module, is_update, do_not_update):
+ eg_launch_spec = expand_fields(lspec_fields, module.params, 'LaunchSpecification')
+
+ if module.params['iam_role_arn'] is not None or module.params['iam_role_name'] is not None:
+ eg_launch_spec.iam_role = expand_fields(iam_fields, module.params, 'IamRole')
+
+ tags = module.params['tags']
+ load_balancers = module.params['load_balancers']
+ target_group_arns = module.params['target_group_arns']
+ block_device_mappings = module.params['block_device_mappings']
+ network_interfaces = module.params['network_interfaces']
+
+ if is_update is True:
+ if 'image_id' in do_not_update:
+ delattr(eg_launch_spec, 'image_id')
+
+ expand_tags(eg_launch_spec, tags)
+
+ expand_load_balancers(eg_launch_spec, load_balancers, target_group_arns)
+
+ expand_block_device_mappings(eg_launch_spec, block_device_mappings)
+
+ expand_network_interfaces(eg_launch_spec, network_interfaces)
+
+ eg_compute.launch_specification = eg_launch_spec
+
+
+def expand_integrations(eg, module):
+ rancher = module.params.get('rancher')
+ mesosphere = module.params.get('mesosphere')
+ ecs = module.params.get('ecs')
+ kubernetes = module.params.get('kubernetes')
+ right_scale = module.params.get('right_scale')
+ opsworks = module.params.get('opsworks')
+ chef = module.params.get('chef')
+
+ integration_exists = False
+
+ eg_integrations = spotinst.aws_elastigroup.ThirdPartyIntegrations()
+
+ if mesosphere is not None:
+ eg_integrations.mesosphere = expand_fields(mesosphere_fields, mesosphere, 'Mesosphere')
+ integration_exists = True
+
+ if ecs is not None:
+ eg_integrations.ecs = expand_fields(ecs_fields, ecs, 'EcsConfiguration')
+ integration_exists = True
+
+ if kubernetes is not None:
+ eg_integrations.kubernetes = expand_fields(kubernetes_fields, kubernetes, 'KubernetesConfiguration')
+ integration_exists = True
+
+ if right_scale is not None:
+ eg_integrations.right_scale = expand_fields(right_scale_fields, right_scale, 'RightScaleConfiguration')
+ integration_exists = True
+
+ if opsworks is not None:
+ eg_integrations.opsworks = expand_fields(opsworks_fields, opsworks, 'OpsWorksConfiguration')
+ integration_exists = True
+
+ if rancher is not None:
+ eg_integrations.rancher = expand_fields(rancher_fields, rancher, 'Rancher')
+ integration_exists = True
+
+ if chef is not None:
+ eg_integrations.chef = expand_fields(chef_fields, chef, 'ChefConfiguration')
+ integration_exists = True
+
+ if integration_exists:
+ eg.third_parties_integration = eg_integrations
+
+
+def expand_capacity(eg, module, is_update, do_not_update):
+ eg_capacity = expand_fields(capacity_fields, module.params, 'Capacity')
+
+ if is_update is True:
+ delattr(eg_capacity, 'unit')
+
+ if 'target' in do_not_update:
+ delattr(eg_capacity, 'target')
+
+ eg.capacity = eg_capacity
+
+
+def expand_strategy(eg, module):
+ persistence = module.params.get('persistence')
+ signals = module.params.get('signals')
+
+ eg_strategy = expand_fields(strategy_fields, module.params, 'Strategy')
+
+ terminate_at_end_of_billing_hour = module.params.get('terminate_at_end_of_billing_hour')
+
+ if terminate_at_end_of_billing_hour is not None:
+ eg_strategy.eg_scaling_strategy = expand_fields(scaling_strategy_fields,
+ module.params, 'ScalingStrategy')
+
+ if persistence is not None:
+ eg_strategy.persistence = expand_fields(persistence_fields, persistence, 'Persistence')
+
+ if signals is not None:
+ eg_signals = expand_list(signals, signal_fields, 'Signal')
+
+ if len(eg_signals) > 0:
+ eg_strategy.signals = eg_signals
+
+ eg.strategy = eg_strategy
+
+
+def expand_multai(eg, module):
+ multai_load_balancers = module.params.get('multai_load_balancers')
+
+ eg_multai = expand_fields(multai_fields, module.params, 'Multai')
+
+ if multai_load_balancers is not None:
+ eg_multai_load_balancers = expand_list(multai_load_balancers, multai_lb_fields, 'MultaiLoadBalancer')
+
+ if len(eg_multai_load_balancers) > 0:
+ eg_multai.balancers = eg_multai_load_balancers
+ eg.multai = eg_multai
+
+
+def expand_scheduled_tasks(eg, module):
+ scheduled_tasks = module.params.get('scheduled_tasks')
+
+ if scheduled_tasks is not None:
+ eg_scheduling = spotinst.aws_elastigroup.Scheduling()
+
+ eg_tasks = expand_list(scheduled_tasks, scheduled_task_fields, 'ScheduledTask')
+
+ if len(eg_tasks) > 0:
+ eg_scheduling.tasks = eg_tasks
+ eg.scheduling = eg_scheduling
+
+
+def expand_load_balancers(eg_launchspec, load_balancers, target_group_arns):
+ if load_balancers is not None or target_group_arns is not None:
+ eg_load_balancers_config = spotinst.aws_elastigroup.LoadBalancersConfig()
+ eg_total_lbs = []
+
+ if load_balancers is not None:
+ for elb_name in load_balancers:
+ eg_elb = spotinst.aws_elastigroup.LoadBalancer()
+ if elb_name is not None:
+ eg_elb.name = elb_name
+ eg_elb.type = 'CLASSIC'
+ eg_total_lbs.append(eg_elb)
+
+ if target_group_arns is not None:
+ for target_arn in target_group_arns:
+ eg_elb = spotinst.aws_elastigroup.LoadBalancer()
+ if target_arn is not None:
+ eg_elb.arn = target_arn
+ eg_elb.type = 'TARGET_GROUP'
+ eg_total_lbs.append(eg_elb)
+
+ if len(eg_total_lbs) > 0:
+ eg_load_balancers_config.load_balancers = eg_total_lbs
+ eg_launchspec.load_balancers_config = eg_load_balancers_config
+
+
+def expand_tags(eg_launchspec, tags):
+ if tags is not None:
+ eg_tags = []
+
+ for tag in tags:
+ eg_tag = spotinst.aws_elastigroup.Tag()
+ if tag.keys():
+ eg_tag.tag_key = tag.keys()[0]
+ if tag.values():
+ eg_tag.tag_value = tag.values()[0]
+
+ eg_tags.append(eg_tag)
+
+ if len(eg_tags) > 0:
+ eg_launchspec.tags = eg_tags
+
+
+def expand_block_device_mappings(eg_launchspec, bdms):
+ if bdms is not None:
+ eg_bdms = []
+
+ for bdm in bdms:
+ eg_bdm = expand_fields(bdm_fields, bdm, 'BlockDeviceMapping')
+
+ if bdm.get('ebs') is not None:
+ eg_bdm.ebs = expand_fields(ebs_fields, bdm.get('ebs'), 'EBS')
+
+ eg_bdms.append(eg_bdm)
+
+ if len(eg_bdms) > 0:
+ eg_launchspec.block_device_mappings = eg_bdms
+
+
+def expand_network_interfaces(eg_launchspec, enis):
+ if enis is not None:
+ eg_enis = []
+
+ for eni in enis:
+ eg_eni = expand_fields(eni_fields, eni, 'NetworkInterface')
+
+ eg_pias = expand_list(eni.get('private_ip_addresses'), private_ip_fields, 'PrivateIpAddress')
+
+ if eg_pias is not None:
+ eg_eni.private_ip_addresses = eg_pias
+
+ eg_enis.append(eg_eni)
+
+ if len(eg_enis) > 0:
+ eg_launchspec.network_interfaces = eg_enis
+
+
+def expand_scaling(eg, module):
+ up_scaling_policies = module.params['up_scaling_policies']
+ down_scaling_policies = module.params['down_scaling_policies']
+ target_tracking_policies = module.params['target_tracking_policies']
+
+ eg_scaling = spotinst.aws_elastigroup.Scaling()
+
+ if up_scaling_policies is not None:
+ eg_up_scaling_policies = expand_scaling_policies(up_scaling_policies)
+ if len(eg_up_scaling_policies) > 0:
+ eg_scaling.up = eg_up_scaling_policies
+
+ if down_scaling_policies is not None:
+ eg_down_scaling_policies = expand_scaling_policies(down_scaling_policies)
+ if len(eg_down_scaling_policies) > 0:
+ eg_scaling.down = eg_down_scaling_policies
+
+ if target_tracking_policies is not None:
+ eg_target_tracking_policies = expand_target_tracking_policies(target_tracking_policies)
+ if len(eg_target_tracking_policies) > 0:
+ eg_scaling.target = eg_target_tracking_policies
+
+ if eg_scaling.down is not None or eg_scaling.up is not None or eg_scaling.target is not None:
+ eg.scaling = eg_scaling
+
+
+def expand_list(items, fields, class_name):
+ if items is not None:
+ new_objects_list = []
+ for item in items:
+ new_obj = expand_fields(fields, item, class_name)
+ new_objects_list.append(new_obj)
+
+ return new_objects_list
+
+
+def expand_fields(fields, item, class_name):
+ class_ = getattr(spotinst.aws_elastigroup, class_name)
+ new_obj = class_()
+
+ # Handle primitive fields
+ if item is not None:
+ for field in fields:
+ if isinstance(field, dict):
+ ansible_field_name = field['ansible_field_name']
+ spotinst_field_name = field['spotinst_field_name']
+ else:
+ ansible_field_name = field
+ spotinst_field_name = field
+ if item.get(ansible_field_name) is not None:
+ setattr(new_obj, spotinst_field_name, item.get(ansible_field_name))
+
+ return new_obj
+
+
+def expand_scaling_policies(scaling_policies):
+ eg_scaling_policies = []
+
+ for policy in scaling_policies:
+ eg_policy = expand_fields(scaling_policy_fields, policy, 'ScalingPolicy')
+ eg_policy.action = expand_fields(action_fields, policy, 'ScalingPolicyAction')
+ eg_scaling_policies.append(eg_policy)
+
+ return eg_scaling_policies
+
+
+def expand_target_tracking_policies(tracking_policies):
+ eg_tracking_policies = []
+
+ for policy in tracking_policies:
+ eg_policy = expand_fields(tracking_policy_fields, policy, 'TargetTrackingPolicy')
+ eg_tracking_policies.append(eg_policy)
+
+ return eg_tracking_policies
+
+
+def main():
+ fields = dict(
+ account_id=dict(type='str'),
+ availability_vs_cost=dict(type='str', required=True),
+ availability_zones=dict(type='list', required=True),
+ block_device_mappings=dict(type='list'),
+ chef=dict(type='dict'),
+ credentials_path=dict(type='path', default="~/.spotinst/credentials"),
+ do_not_update=dict(default=[], type='list'),
+ down_scaling_policies=dict(type='list'),
+ draining_timeout=dict(type='int'),
+ ebs_optimized=dict(type='bool'),
+ ebs_volume_pool=dict(type='list'),
+ ecs=dict(type='dict'),
+ elastic_beanstalk=dict(type='dict'),
+ elastic_ips=dict(type='list'),
+ fallback_to_od=dict(type='bool'),
+ id=dict(type='str'),
+ health_check_grace_period=dict(type='int'),
+ health_check_type=dict(type='str'),
+ health_check_unhealthy_duration_before_replacement=dict(type='int'),
+ iam_role_arn=dict(type='str'),
+ iam_role_name=dict(type='str'),
+ image_id=dict(type='str', required=True),
+ key_pair=dict(type='str'),
+ kubernetes=dict(type='dict'),
+ lifetime_period=dict(type='int'),
+ load_balancers=dict(type='list'),
+ max_size=dict(type='int', required=True),
+ mesosphere=dict(type='dict'),
+ min_size=dict(type='int', required=True),
+ monitoring=dict(type='str'),
+ multai_load_balancers=dict(type='list'),
+ multai_token=dict(type='str', no_log=True),
+ name=dict(type='str', required=True),
+ network_interfaces=dict(type='list'),
+ on_demand_count=dict(type='int'),
+ on_demand_instance_type=dict(type='str'),
+ opsworks=dict(type='dict'),
+ persistence=dict(type='dict'),
+ product=dict(type='str', required=True),
+ rancher=dict(type='dict'),
+ right_scale=dict(type='dict'),
+ risk=dict(type='int'),
+ roll_config=dict(type='dict'),
+ scheduled_tasks=dict(type='list'),
+ security_group_ids=dict(type='list', required=True),
+ shutdown_script=dict(type='str'),
+ signals=dict(type='list'),
+ spin_up_time=dict(type='int'),
+ spot_instance_types=dict(type='list', required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ tags=dict(type='list'),
+ target=dict(type='int', required=True),
+ target_group_arns=dict(type='list'),
+ tenancy=dict(type='str'),
+ terminate_at_end_of_billing_hour=dict(type='bool'),
+ token=dict(type='str', no_log=True),
+ unit=dict(type='str'),
+ user_data=dict(type='str'),
+ utilize_reserved_instances=dict(type='bool'),
+ uniqueness_by=dict(default='name', choices=['name', 'id']),
+ up_scaling_policies=dict(type='list'),
+ target_tracking_policies=dict(type='list'),
+ wait_for_instances=dict(type='bool', default=False),
+ wait_timeout=dict(type='int')
+ )
+
+ module = AnsibleModule(argument_spec=fields)
+
+ if not HAS_SPOTINST_SDK:
+ module.fail_json(msg="the Spotinst SDK library is required. (pip install spotinst_sdk)")
+
+ # Retrieve creds file variables
+ creds_file_loaded_vars = dict()
+
+ credentials_path = module.params.get('credentials_path')
+
+ try:
+ with open(credentials_path, "r") as creds:
+ for line in creds:
+ eq_index = line.find('=')
+ var_name = line[:eq_index].strip()
+ string_value = line[eq_index + 1:].strip()
+ creds_file_loaded_vars[var_name] = string_value
+ except IOError:
+ pass
+ # End of creds file retrieval
+
+ token = module.params.get('token')
+ if not token:
+ token = os.environ.get('SPOTINST_TOKEN')
+ if not token:
+ token = creds_file_loaded_vars.get("token")
+
+ account = module.params.get('account_id')
+ if not account:
+ account = os.environ.get('SPOTINST_ACCOUNT_ID') or os.environ.get('ACCOUNT')
+ if not account:
+ account = creds_file_loaded_vars.get("account")
+
+ client = spotinst.SpotinstClient(auth_token=token, print_output=False)
+
+ if account is not None:
+ client = spotinst.SpotinstClient(auth_token=token, print_output=False, account_id=account)
+
+ group_id, message, has_changed = handle_elastigroup(client=client, module=module)
+
+ instances = retrieve_group_instances(client=client, module=module, group_id=group_id)
+
+ module.exit_json(changed=has_changed, group_id=group_id, message=message, instances=instances)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/univention/udm_dns_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/univention/udm_dns_record.py
new file mode 100644
index 00000000..db89bd46
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/univention/udm_dns_record.py
@@ -0,0 +1,185 @@
+#!/usr/bin/python
+# -*- coding: UTF-8 -*-
+
+# Copyright: (c) 2016, Adfinis SyGroup AG
+# Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: udm_dns_record
+author:
+- Tobias Rüetschi (@keachi)
+short_description: Manage dns entries on a univention corporate server
+description:
+ - "This module allows to manage dns records on a univention corporate server (UCS).
+ It uses the python API of the UCS to create a new object or edit it."
+requirements:
+ - Python >= 2.6
+ - Univention
+options:
+ state:
+ required: false
+ default: "present"
+ choices: [ present, absent ]
+ description:
+ - Whether the dns record is present or not.
+ name:
+ required: true
+ description:
+ - "Name of the record, this is also the DNS record. E.g. www for
+ www.example.com."
+ zone:
+ required: true
+ description:
+ - Corresponding DNS zone for this record, e.g. example.com.
+ type:
+ required: true
+ description:
+ - "Define the record type. C(host_record) is a A or AAAA record,
+ C(alias) is a CNAME, C(ptr_record) is a PTR record, C(srv_record)
+ is a SRV record and C(txt_record) is a TXT record."
+ - "The available choices are: C(host_record), C(alias), C(ptr_record), C(srv_record), C(txt_record)."
+ data:
+ required: false
+ default: []
+ description:
+ - "Additional data for this record, e.g. ['a': '192.0.2.1'].
+ Required if C(state=present)."
+'''
+
+
+EXAMPLES = '''
+- name: Create a DNS record on a UCS
+ community.general.udm_dns_record:
+ name: www
+ zone: example.com
+ type: host_record
+ data:
+ a:
+ - 192.0.2.1
+ - 2001:0db8::42
+'''
+
+
+RETURN = '''#'''
+
+HAVE_UNIVENTION = False
+try:
+ from univention.admin.handlers.dns import (
+ forward_zone,
+ reverse_zone,
+ )
+ HAVE_UNIVENTION = True
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.univention_umc import (
+ umc_module_for_add,
+ umc_module_for_edit,
+ ldap_search,
+ base_dn,
+ config,
+ uldap,
+)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ type=dict(required=True,
+ type='str'),
+ zone=dict(required=True,
+ type='str'),
+ name=dict(required=True,
+ type='str'),
+ data=dict(default=[],
+ type='dict'),
+ state=dict(default='present',
+ choices=['present', 'absent'],
+ type='str')
+ ),
+ supports_check_mode=True,
+ required_if=([
+ ('state', 'present', ['data'])
+ ])
+ )
+
+ if not HAVE_UNIVENTION:
+ module.fail_json(msg="This module requires univention python bindings")
+
+ type = module.params['type']
+ zone = module.params['zone']
+ name = module.params['name']
+ data = module.params['data']
+ state = module.params['state']
+ changed = False
+ diff = None
+
+ obj = list(ldap_search(
+ '(&(objectClass=dNSZone)(zoneName={0})(relativeDomainName={1}))'.format(zone, name),
+ attr=['dNSZone']
+ ))
+
+ exists = bool(len(obj))
+ container = 'zoneName={0},cn=dns,{1}'.format(zone, base_dn())
+ dn = 'relativeDomainName={0},{1}'.format(name, container)
+
+ if state == 'present':
+ try:
+ if not exists:
+ so = forward_zone.lookup(
+ config(),
+ uldap(),
+ '(zone={0})'.format(zone),
+ scope='domain',
+ ) or reverse_zone.lookup(
+ config(),
+ uldap(),
+ '(zone={0})'.format(zone),
+ scope='domain',
+ )
+ obj = umc_module_for_add('dns/{0}'.format(type), container, superordinate=so[0])
+ else:
+ obj = umc_module_for_edit('dns/{0}'.format(type), dn)
+ obj['name'] = name
+ for k, v in data.items():
+ obj[k] = v
+ diff = obj.diff()
+ changed = obj.diff() != []
+ if not module.check_mode:
+ if not exists:
+ obj.create()
+ else:
+ obj.modify()
+ except Exception as e:
+ module.fail_json(
+ msg='Creating/editing dns entry {0} in {1} failed: {2}'.format(name, container, e)
+ )
+
+ if state == 'absent' and exists:
+ try:
+ obj = umc_module_for_edit('dns/{0}'.format(type), dn)
+ if not module.check_mode:
+ obj.remove()
+ changed = True
+ except Exception as e:
+ module.fail_json(
+ msg='Removing dns entry {0} in {1} failed: {2}'.format(name, container, e)
+ )
+
+ module.exit_json(
+ changed=changed,
+ name=name,
+ diff=diff,
+ container=container
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/univention/udm_dns_zone.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/univention/udm_dns_zone.py
new file mode 100644
index 00000000..2428650e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/univention/udm_dns_zone.py
@@ -0,0 +1,231 @@
+#!/usr/bin/python
+# -*- coding: UTF-8 -*-
+
+# Copyright: (c) 2016, Adfinis SyGroup AG
+# Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: udm_dns_zone
+author:
+- Tobias Rüetschi (@keachi)
+short_description: Manage dns zones on a univention corporate server
+description:
+ - "This module allows to manage dns zones on a univention corporate server (UCS).
+ It uses the python API of the UCS to create a new object or edit it."
+requirements:
+ - Python >= 2.6
+options:
+ state:
+ required: false
+ default: "present"
+ choices: [ present, absent ]
+ description:
+ - Whether the dns zone is present or not.
+ type:
+ required: true
+ description:
+ - Define if the zone is a forward or reverse DNS zone.
+ - "The available choices are: C(forward_zone), C(reverse_zone)."
+ zone:
+ required: true
+ description:
+ - DNS zone name, e.g. C(example.com).
+ nameserver:
+ required: false
+ description:
+ - List of appropriate name servers. Required if C(state=present).
+ interfaces:
+ required: false
+ description:
+ - List of interface IP addresses, on which the server should
+ response this zone. Required if C(state=present).
+
+ refresh:
+ required: false
+ default: 3600
+ description:
+ - Interval before the zone should be refreshed.
+ retry:
+ required: false
+ default: 1800
+ description:
+ - Interval that should elapse before a failed refresh should be retried.
+ expire:
+ required: false
+ default: 604800
+ description:
+ - Specifies the upper limit on the time interval that can elapse before the zone is no longer authoritative.
+ ttl:
+ required: false
+ default: 600
+ description:
+ - Minimum TTL field that should be exported with any RR from this zone.
+
+ contact:
+ required: false
+ default: ''
+ description:
+ - Contact person in the SOA record.
+ mx:
+ required: false
+ default: []
+ description:
+ - List of MX servers. (Must declared as A or AAAA records).
+'''
+
+
+EXAMPLES = '''
+- name: Create a DNS zone on a UCS
+ community.general.udm_dns_zone:
+ zone: example.com
+ type: forward_zone
+ nameserver:
+ - ucs.example.com
+ interfaces:
+ - 192.0.2.1
+'''
+
+
+RETURN = '''# '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.univention_umc import (
+ umc_module_for_add,
+ umc_module_for_edit,
+ ldap_search,
+ base_dn,
+)
+
+
+def convert_time(time):
+ """Convert a time in seconds into the biggest unit"""
+ units = [
+ (24 * 60 * 60, 'days'),
+ (60 * 60, 'hours'),
+ (60, 'minutes'),
+ (1, 'seconds'),
+ ]
+
+ if time == 0:
+ return ('0', 'seconds')
+ for unit in units:
+ if time >= unit[0]:
+ return ('{0}'.format(time // unit[0]), unit[1])
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ type=dict(required=True,
+ type='str'),
+ zone=dict(required=True,
+ aliases=['name'],
+ type='str'),
+ nameserver=dict(default=[],
+ type='list'),
+ interfaces=dict(default=[],
+ type='list'),
+ refresh=dict(default=3600,
+ type='int'),
+ retry=dict(default=1800,
+ type='int'),
+ expire=dict(default=604800,
+ type='int'),
+ ttl=dict(default=600,
+ type='int'),
+ contact=dict(default='',
+ type='str'),
+ mx=dict(default=[],
+ type='list'),
+ state=dict(default='present',
+ choices=['present', 'absent'],
+ type='str')
+ ),
+ supports_check_mode=True,
+ required_if=([
+ ('state', 'present', ['nameserver', 'interfaces'])
+ ])
+ )
+ type = module.params['type']
+ zone = module.params['zone']
+ nameserver = module.params['nameserver']
+ interfaces = module.params['interfaces']
+ refresh = module.params['refresh']
+ retry = module.params['retry']
+ expire = module.params['expire']
+ ttl = module.params['ttl']
+ contact = module.params['contact']
+ mx = module.params['mx']
+ state = module.params['state']
+ changed = False
+ diff = None
+
+ obj = list(ldap_search(
+ '(&(objectClass=dNSZone)(zoneName={0}))'.format(zone),
+ attr=['dNSZone']
+ ))
+
+ exists = bool(len(obj))
+ container = 'cn=dns,{0}'.format(base_dn())
+ dn = 'zoneName={0},{1}'.format(zone, container)
+ if contact == '':
+ contact = 'root@{0}.'.format(zone)
+
+ if state == 'present':
+ try:
+ if not exists:
+ obj = umc_module_for_add('dns/{0}'.format(type), container)
+ else:
+ obj = umc_module_for_edit('dns/{0}'.format(type), dn)
+ obj['zone'] = zone
+ obj['nameserver'] = nameserver
+ obj['a'] = interfaces
+ obj['refresh'] = convert_time(refresh)
+ obj['retry'] = convert_time(retry)
+ obj['expire'] = convert_time(expire)
+ obj['ttl'] = convert_time(ttl)
+ obj['contact'] = contact
+ obj['mx'] = mx
+ diff = obj.diff()
+ if exists:
+ for k in obj.keys():
+ if obj.hasChanged(k):
+ changed = True
+ else:
+ changed = True
+ if not module.check_mode:
+ if not exists:
+ obj.create()
+ elif changed:
+ obj.modify()
+ except Exception as e:
+ module.fail_json(
+ msg='Creating/editing dns zone {0} failed: {1}'.format(zone, e)
+ )
+
+ if state == 'absent' and exists:
+ try:
+ obj = umc_module_for_edit('dns/{0}'.format(type), dn)
+ if not module.check_mode:
+ obj.remove()
+ changed = True
+ except Exception as e:
+ module.fail_json(
+ msg='Removing dns zone {0} failed: {1}'.format(zone, e)
+ )
+
+ module.exit_json(
+ changed=changed,
+ diff=diff,
+ zone=zone
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/univention/udm_group.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/univention/udm_group.py
new file mode 100644
index 00000000..d2cf2aea
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/univention/udm_group.py
@@ -0,0 +1,177 @@
+#!/usr/bin/python
+# -*- coding: UTF-8 -*-
+
+# Copyright: (c) 2016, Adfinis SyGroup AG
+# Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: udm_group
+author:
+- Tobias Rüetschi (@keachi)
+short_description: Manage of the posix group
+description:
+ - "This module allows to manage user groups on a univention corporate server (UCS).
+ It uses the python API of the UCS to create a new object or edit it."
+requirements:
+ - Python >= 2.6
+options:
+ state:
+ required: false
+ default: "present"
+ choices: [ present, absent ]
+ description:
+ - Whether the group is present or not.
+ type: str
+ name:
+ required: true
+ description:
+ - Name of the posix group.
+ type: str
+ description:
+ required: false
+ description:
+ - Group description.
+ type: str
+ position:
+ required: false
+ description:
+ - define the whole ldap position of the group, e.g.
+ C(cn=g123m-1A,cn=classes,cn=schueler,cn=groups,ou=schule,dc=example,dc=com).
+ type: str
+ ou:
+ required: false
+ description:
+ - LDAP OU, e.g. school for LDAP OU C(ou=school,dc=example,dc=com).
+ type: str
+ subpath:
+ required: false
+ description:
+ - Subpath inside the OU, e.g. C(cn=classes,cn=students,cn=groups).
+ type: str
+ default: "cn=groups"
+'''
+
+
+EXAMPLES = '''
+- name: Create a POSIX group
+ community.general.udm_group:
+ name: g123m-1A
+
+# Create a POSIX group with the exact DN
+# C(cn=g123m-1A,cn=classes,cn=students,cn=groups,ou=school,dc=school,dc=example,dc=com)
+- name: Create a POSIX group with a DN
+ community.general.udm_group:
+ name: g123m-1A
+ subpath: 'cn=classes,cn=students,cn=groups'
+ ou: school
+
+# or
+- name: Create a POSIX group with a DN
+ community.general.udm_group:
+ name: g123m-1A
+ position: 'cn=classes,cn=students,cn=groups,ou=school,dc=school,dc=example,dc=com'
+'''
+
+
+RETURN = '''# '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.univention_umc import (
+ umc_module_for_add,
+ umc_module_for_edit,
+ ldap_search,
+ base_dn,
+)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True,
+ type='str'),
+ description=dict(default=None,
+ type='str'),
+ position=dict(default='',
+ type='str'),
+ ou=dict(default='',
+ type='str'),
+ subpath=dict(default='cn=groups',
+ type='str'),
+ state=dict(default='present',
+ choices=['present', 'absent'],
+ type='str')
+ ),
+ supports_check_mode=True
+ )
+ name = module.params['name']
+ description = module.params['description']
+ position = module.params['position']
+ ou = module.params['ou']
+ subpath = module.params['subpath']
+ state = module.params['state']
+ changed = False
+ diff = None
+
+ groups = list(ldap_search(
+ '(&(objectClass=posixGroup)(cn={0}))'.format(name),
+ attr=['cn']
+ ))
+ if position != '':
+ container = position
+ else:
+ if ou != '':
+ ou = 'ou={0},'.format(ou)
+ if subpath != '':
+ subpath = '{0},'.format(subpath)
+ container = '{0}{1}{2}'.format(subpath, ou, base_dn())
+ group_dn = 'cn={0},{1}'.format(name, container)
+
+ exists = bool(len(groups))
+
+ if state == 'present':
+ try:
+ if not exists:
+ grp = umc_module_for_add('groups/group', container)
+ else:
+ grp = umc_module_for_edit('groups/group', group_dn)
+ grp['name'] = name
+ grp['description'] = description
+ diff = grp.diff()
+ changed = grp.diff() != []
+ if not module.check_mode:
+ if not exists:
+ grp.create()
+ else:
+ grp.modify()
+ except Exception:
+ module.fail_json(
+ msg="Creating/editing group {0} in {1} failed".format(name, container)
+ )
+
+ if state == 'absent' and exists:
+ try:
+ grp = umc_module_for_edit('groups/group', group_dn)
+ if not module.check_mode:
+ grp.remove()
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Removing group {0} failed".format(name)
+ )
+
+ module.exit_json(
+ changed=changed,
+ name=name,
+ diff=diff,
+ container=container
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/univention/udm_share.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/univention/udm_share.py
new file mode 100644
index 00000000..3e8fb207
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/univention/udm_share.py
@@ -0,0 +1,576 @@
+#!/usr/bin/python
+# -*- coding: UTF-8 -*-
+
+# Copyright: (c) 2016, Adfinis SyGroup AG
+# Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: udm_share
+author:
+- Tobias Rüetschi (@keachi)
+short_description: Manage samba shares on a univention corporate server
+description:
+ - "This module allows to manage samba shares on a univention corporate
+ server (UCS).
+ It uses the python API of the UCS to create a new object or edit it."
+requirements:
+ - Python >= 2.6
+options:
+ state:
+ default: "present"
+ choices: [ present, absent ]
+ description:
+ - Whether the share is present or not.
+ type: str
+ name:
+ required: true
+ description:
+ - Name
+ type: str
+ host:
+ required: false
+ description:
+ - Host FQDN (server which provides the share), e.g. C({{
+ ansible_fqdn }}). Required if C(state=present).
+ type: str
+ path:
+ required: false
+ description:
+ - Directory on the providing server, e.g. C(/home). Required if C(state=present).
+ type: path
+ sambaName:
+ required: false
+ description:
+ - Windows name. Required if C(state=present).
+ type: str
+ aliases: [ samba_name ]
+ ou:
+ required: true
+ description:
+ - Organisational unit, inside the LDAP Base DN.
+ type: str
+ owner:
+ default: '0'
+ description:
+ - Directory owner of the share's root directory.
+ type: str
+ group:
+ default: '0'
+ description:
+ - Directory owner group of the share's root directory.
+ type: str
+ directorymode:
+ default: '00755'
+ description:
+ - Permissions for the share's root directory.
+ type: str
+ root_squash:
+ default: true
+ description:
+ - Modify user ID for root user (root squashing).
+ type: bool
+ subtree_checking:
+ default: true
+ description:
+ - Subtree checking.
+ type: bool
+ sync:
+ default: 'sync'
+ description:
+ - NFS synchronisation.
+ type: str
+ writeable:
+ default: true
+ description:
+ - NFS write access.
+ type: bool
+ sambaBlockSize:
+ description:
+ - Blocking size.
+ type: str
+ aliases: [ samba_block_size ]
+ sambaBlockingLocks:
+ default: true
+ description:
+ - Blocking locks.
+ type: bool
+ aliases: [ samba_blocking_locks ]
+ sambaBrowseable:
+ description:
+ - Show in Windows network environment.
+ type: bool
+ default: True
+ aliases: [ samba_browsable ]
+ sambaCreateMode:
+ default: '0744'
+ description:
+ - File mode.
+ type: str
+ aliases: [ samba_create_mode ]
+ sambaCscPolicy:
+ default: 'manual'
+ description:
+ - Client-side caching policy.
+ type: str
+ aliases: [ samba_csc_policy ]
+ sambaCustomSettings:
+ default: []
+ description:
+ - Option name in smb.conf and its value.
+ type: list
+ aliases: [ samba_custom_settings ]
+ sambaDirectoryMode:
+ default: '0755'
+ description:
+ - Directory mode.
+ type: str
+ aliases: [ samba_directory_mode ]
+ sambaDirectorySecurityMode:
+ default: '0777'
+ description:
+ - Directory security mode.
+ type: str
+ aliases: [ samba_directory_security_mode ]
+ sambaDosFilemode:
+ default: false
+ description:
+ - Users with write access may modify permissions.
+ type: bool
+ aliases: [ samba_dos_filemode ]
+ sambaFakeOplocks:
+ default: false
+ description:
+ - Fake oplocks.
+ type: bool
+ aliases: [ samba_fake_oplocks ]
+ sambaForceCreateMode:
+ default: false
+ description:
+ - Force file mode.
+ type: bool
+ aliases: [ samba_force_create_mode ]
+ sambaForceDirectoryMode:
+ default: false
+ description:
+ - Force directory mode.
+ type: bool
+ aliases: [ samba_force_directory_mode ]
+ sambaForceDirectorySecurityMode:
+ default: false
+ description:
+ - Force directory security mode.
+ type: bool
+ aliases: [ samba_force_directory_security_mode ]
+ sambaForceGroup:
+ description:
+ - Force group.
+ type: str
+ aliases: [ samba_force_group ]
+ sambaForceSecurityMode:
+ default: false
+ description:
+ - Force security mode.
+ type: bool
+ aliases: [ samba_force_security_mode ]
+ sambaForceUser:
+ description:
+ - Force user.
+ type: str
+ aliases: [ samba_force_user ]
+ sambaHideFiles:
+ description:
+ - Hide files.
+ type: str
+ aliases: [ samba_hide_files ]
+ sambaHideUnreadable:
+ default: false
+ description:
+ - Hide unreadable files/directories.
+ type: bool
+ aliases: [ samba_hide_unreadable ]
+ sambaHostsAllow:
+ default: []
+ description:
+ - Allowed host/network.
+ type: list
+ aliases: [ samba_hosts_allow ]
+ sambaHostsDeny:
+ default: []
+ description:
+ - Denied host/network.
+ type: list
+ aliases: [ samba_hosts_deny ]
+ sambaInheritAcls:
+ default: true
+ description:
+ - Inherit ACLs.
+ type: bool
+ aliases: [ samba_inherit_acls ]
+ sambaInheritOwner:
+ default: false
+ description:
+ - Create files/directories with the owner of the parent directory.
+ type: bool
+ aliases: [ samba_inherit_owner ]
+ sambaInheritPermissions:
+ default: false
+ description:
+ - Create files/directories with permissions of the parent directory.
+ type: bool
+ aliases: [ samba_inherit_permissions ]
+ sambaInvalidUsers:
+ description:
+ - Invalid users or groups.
+ type: str
+ aliases: [ samba_invalid_users ]
+ sambaLevel2Oplocks:
+ default: true
+ description:
+ - Level 2 oplocks.
+ type: bool
+ aliases: [ samba_level_2_oplocks ]
+ sambaLocking:
+ default: true
+ description:
+ - Locking.
+ type: bool
+ aliases: [ samba_locking ]
+ sambaMSDFSRoot:
+ default: false
+ description:
+ - MSDFS root.
+ type: bool
+ aliases: [ samba_msdfs_root ]
+ sambaNtAclSupport:
+ default: true
+ description:
+ - NT ACL support.
+ type: bool
+ aliases: [ samba_nt_acl_support ]
+ sambaOplocks:
+ default: true
+ description:
+ - Oplocks.
+ type: bool
+ aliases: [ samba_oplocks ]
+ sambaPostexec:
+ description:
+ - Postexec script.
+ type: str
+ aliases: [ samba_postexec ]
+ sambaPreexec:
+ description:
+ - Preexec script.
+ type: str
+ aliases: [ samba_preexec ]
+ sambaPublic:
+ default: false
+ description:
+ - Allow anonymous read-only access with a guest user.
+ type: bool
+ aliases: [ samba_public ]
+ sambaSecurityMode:
+ default: '0777'
+ description:
+ - Security mode.
+ type: str
+ aliases: [ samba_security_mode ]
+ sambaStrictLocking:
+ default: 'Auto'
+ description:
+ - Strict locking.
+ type: str
+ aliases: [ samba_strict_locking ]
+ sambaVFSObjects:
+ description:
+ - VFS objects.
+ type: str
+ aliases: [ samba_vfs_objects ]
+ sambaValidUsers:
+ description:
+ - Valid users or groups.
+ type: str
+ aliases: [ samba_valid_users ]
+ sambaWriteList:
+ description:
+ - Restrict write access to these users/groups.
+ type: str
+ aliases: [ samba_write_list ]
+ sambaWriteable:
+ default: true
+ description:
+ - Samba write access.
+ type: bool
+ aliases: [ samba_writeable ]
+ nfs_hosts:
+ default: []
+ description:
+ - Only allow access for this host, IP address or network.
+ type: list
+ nfsCustomSettings:
+ default: []
+ description:
+ - Option name in exports file.
+ type: list
+ aliases: [ nfs_custom_settings ]
+'''
+
+
+EXAMPLES = '''
+- name: Create a share named home on the server ucs.example.com with the path /home
+ community.general.udm_share:
+ name: home
+ path: /home
+ host: ucs.example.com
+ sambaName: Home
+'''
+
+
+RETURN = '''# '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.univention_umc import (
+ umc_module_for_add,
+ umc_module_for_edit,
+ ldap_search,
+ base_dn,
+)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True,
+ type='str'),
+ ou=dict(required=True,
+ type='str'),
+ owner=dict(type='str',
+ default='0'),
+ group=dict(type='str',
+ default='0'),
+ path=dict(type='path',
+ default=None),
+ directorymode=dict(type='str',
+ default='00755'),
+ host=dict(type='str',
+ default=None),
+ root_squash=dict(type='bool',
+ default=True),
+ subtree_checking=dict(type='bool',
+ default=True),
+ sync=dict(type='str',
+ default='sync'),
+ writeable=dict(type='bool',
+ default=True),
+ sambaBlockSize=dict(type='str',
+ aliases=['samba_block_size'],
+ default=None),
+ sambaBlockingLocks=dict(type='bool',
+ aliases=['samba_blocking_locks'],
+ default=True),
+ sambaBrowseable=dict(type='bool',
+ aliases=['samba_browsable'],
+ default=True),
+ sambaCreateMode=dict(type='str',
+ aliases=['samba_create_mode'],
+ default='0744'),
+ sambaCscPolicy=dict(type='str',
+ aliases=['samba_csc_policy'],
+ default='manual'),
+ sambaCustomSettings=dict(type='list',
+ aliases=['samba_custom_settings'],
+ default=[]),
+ sambaDirectoryMode=dict(type='str',
+ aliases=['samba_directory_mode'],
+ default='0755'),
+ sambaDirectorySecurityMode=dict(type='str',
+ aliases=['samba_directory_security_mode'],
+ default='0777'),
+ sambaDosFilemode=dict(type='bool',
+ aliases=['samba_dos_filemode'],
+ default=False),
+ sambaFakeOplocks=dict(type='bool',
+ aliases=['samba_fake_oplocks'],
+ default=False),
+ sambaForceCreateMode=dict(type='bool',
+ aliases=['samba_force_create_mode'],
+ default=False),
+ sambaForceDirectoryMode=dict(type='bool',
+ aliases=['samba_force_directory_mode'],
+ default=False),
+ sambaForceDirectorySecurityMode=dict(type='bool',
+ aliases=['samba_force_directory_security_mode'],
+ default=False),
+ sambaForceGroup=dict(type='str',
+ aliases=['samba_force_group'],
+ default=None),
+ sambaForceSecurityMode=dict(type='bool',
+ aliases=['samba_force_security_mode'],
+ default=False),
+ sambaForceUser=dict(type='str',
+ aliases=['samba_force_user'],
+ default=None),
+ sambaHideFiles=dict(type='str',
+ aliases=['samba_hide_files'],
+ default=None),
+ sambaHideUnreadable=dict(type='bool',
+ aliases=['samba_hide_unreadable'],
+ default=False),
+ sambaHostsAllow=dict(type='list',
+ aliases=['samba_hosts_allow'],
+ default=[]),
+ sambaHostsDeny=dict(type='list',
+ aliases=['samba_hosts_deny'],
+ default=[]),
+ sambaInheritAcls=dict(type='bool',
+ aliases=['samba_inherit_acls'],
+ default=True),
+ sambaInheritOwner=dict(type='bool',
+ aliases=['samba_inherit_owner'],
+ default=False),
+ sambaInheritPermissions=dict(type='bool',
+ aliases=['samba_inherit_permissions'],
+ default=False),
+ sambaInvalidUsers=dict(type='str',
+ aliases=['samba_invalid_users'],
+ default=None),
+ sambaLevel2Oplocks=dict(type='bool',
+ aliases=['samba_level_2_oplocks'],
+ default=True),
+ sambaLocking=dict(type='bool',
+ aliases=['samba_locking'],
+ default=True),
+ sambaMSDFSRoot=dict(type='bool',
+ aliases=['samba_msdfs_root'],
+ default=False),
+ sambaName=dict(type='str',
+ aliases=['samba_name'],
+ default=None),
+ sambaNtAclSupport=dict(type='bool',
+ aliases=['samba_nt_acl_support'],
+ default=True),
+ sambaOplocks=dict(type='bool',
+ aliases=['samba_oplocks'],
+ default=True),
+ sambaPostexec=dict(type='str',
+ aliases=['samba_postexec'],
+ default=None),
+ sambaPreexec=dict(type='str',
+ aliases=['samba_preexec'],
+ default=None),
+ sambaPublic=dict(type='bool',
+ aliases=['samba_public'],
+ default=False),
+ sambaSecurityMode=dict(type='str',
+ aliases=['samba_security_mode'],
+ default='0777'),
+ sambaStrictLocking=dict(type='str',
+ aliases=['samba_strict_locking'],
+ default='Auto'),
+ sambaVFSObjects=dict(type='str',
+ aliases=['samba_vfs_objects'],
+ default=None),
+ sambaValidUsers=dict(type='str',
+ aliases=['samba_valid_users'],
+ default=None),
+ sambaWriteList=dict(type='str',
+ aliases=['samba_write_list'],
+ default=None),
+ sambaWriteable=dict(type='bool',
+ aliases=['samba_writeable'],
+ default=True),
+ nfs_hosts=dict(type='list',
+ default=[]),
+ nfsCustomSettings=dict(type='list',
+ aliases=['nfs_custom_settings'],
+ default=[]),
+ state=dict(default='present',
+ choices=['present', 'absent'],
+ type='str')
+ ),
+ supports_check_mode=True,
+ required_if=([
+ ('state', 'present', ['path', 'host', 'sambaName'])
+ ])
+ )
+ name = module.params['name']
+ state = module.params['state']
+ changed = False
+ diff = None
+
+ obj = list(ldap_search(
+ '(&(objectClass=univentionShare)(cn={0}))'.format(name),
+ attr=['cn']
+ ))
+
+ exists = bool(len(obj))
+ container = 'cn=shares,ou={0},{1}'.format(module.params['ou'], base_dn())
+ dn = 'cn={0},{1}'.format(name, container)
+
+ if state == 'present':
+ try:
+ if not exists:
+ obj = umc_module_for_add('shares/share', container)
+ else:
+ obj = umc_module_for_edit('shares/share', dn)
+
+ module.params['printablename'] = '{0} ({1})'.format(name, module.params['host'])
+ for k in obj.keys():
+ if module.params[k] is True:
+ module.params[k] = '1'
+ elif module.params[k] is False:
+ module.params[k] = '0'
+ obj[k] = module.params[k]
+
+ diff = obj.diff()
+ if exists:
+ for k in obj.keys():
+ if obj.hasChanged(k):
+ changed = True
+ else:
+ changed = True
+ if not module.check_mode:
+ if not exists:
+ obj.create()
+ elif changed:
+ obj.modify()
+ except Exception as err:
+ module.fail_json(
+ msg='Creating/editing share {0} in {1} failed: {2}'.format(
+ name,
+ container,
+ err,
+ )
+ )
+
+ if state == 'absent' and exists:
+ try:
+ obj = umc_module_for_edit('shares/share', dn)
+ if not module.check_mode:
+ obj.remove()
+ changed = True
+ except Exception as err:
+ module.fail_json(
+ msg='Removing share {0} in {1} failed: {2}'.format(
+ name,
+ container,
+ err,
+ )
+ )
+
+ module.exit_json(
+ changed=changed,
+ name=name,
+ diff=diff,
+ container=container
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/univention/udm_user.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/univention/udm_user.py
new file mode 100644
index 00000000..efbd95f4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/univention/udm_user.py
@@ -0,0 +1,542 @@
+#!/usr/bin/python
+# -*- coding: UTF-8 -*-
+
+# Copyright: (c) 2016, Adfinis SyGroup AG
+# Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: udm_user
+author:
+- Tobias Rüetschi (@keachi)
+short_description: Manage posix users on a univention corporate server
+description:
+ - "This module allows to manage posix users on a univention corporate
+ server (UCS).
+ It uses the python API of the UCS to create a new object or edit it."
+requirements:
+ - Python >= 2.6
+options:
+ state:
+ default: "present"
+ choices: [ present, absent ]
+ description:
+ - Whether the user is present or not.
+ type: str
+ username:
+ required: true
+ description:
+ - User name
+ aliases: ['name']
+ type: str
+ firstname:
+ description:
+ - First name. Required if C(state=present).
+ type: str
+ lastname:
+ description:
+ - Last name. Required if C(state=present).
+ type: str
+ password:
+ description:
+ - Password. Required if C(state=present).
+ type: str
+ birthday:
+ description:
+ - Birthday
+ type: str
+ city:
+ description:
+ - City of users business address.
+ type: str
+ country:
+ description:
+ - Country of users business address.
+ type: str
+ department_number:
+ description:
+ - Department number of users business address.
+ aliases: [ departmentNumber ]
+ type: str
+ description:
+ description:
+ - Description (not gecos)
+ type: str
+ display_name:
+ description:
+ - Display name (not gecos)
+ aliases: [ displayName ]
+ type: str
+ email:
+ default: ['']
+ description:
+ - A list of e-mail addresses.
+ type: list
+ employee_number:
+ description:
+ - Employee number
+ aliases: [ employeeNumber ]
+ type: str
+ employee_type:
+ description:
+ - Employee type
+ aliases: [ employeeType ]
+ type: str
+ gecos:
+ description:
+ - GECOS
+ type: str
+ groups:
+ default: []
+ description:
+ - "POSIX groups, the LDAP DNs of the groups will be found with the
+ LDAP filter for each group as $GROUP:
+ C((&(objectClass=posixGroup)(cn=$GROUP)))."
+ type: list
+ home_share:
+ description:
+ - "Home NFS share. Must be a LDAP DN, e.g.
+ C(cn=home,cn=shares,ou=school,dc=example,dc=com)."
+ aliases: [ homeShare ]
+ type: str
+ home_share_path:
+ description:
+ - Path to home NFS share, inside the homeShare.
+ aliases: [ homeSharePath ]
+ type: str
+ home_telephone_number:
+ default: []
+ description:
+ - List of private telephone numbers.
+ aliases: [ homeTelephoneNumber ]
+ type: list
+ homedrive:
+ description:
+ - Windows home drive, e.g. C("H:").
+ type: str
+ mail_alternative_address:
+ default: []
+ description:
+ - List of alternative e-mail addresses.
+ aliases: [ mailAlternativeAddress ]
+ type: list
+ mail_home_server:
+ description:
+ - FQDN of mail server
+ aliases: [ mailHomeServer ]
+ type: str
+ mail_primary_address:
+ description:
+ - Primary e-mail address
+ aliases: [ mailPrimaryAddress ]
+ type: str
+ mobile_telephone_number:
+ default: []
+ description:
+ - Mobile phone number
+ aliases: [ mobileTelephoneNumber ]
+ type: list
+ organisation:
+ description:
+ - Organisation
+ aliases: [ organization ]
+ type: str
+ overridePWHistory:
+ type: bool
+ default: 'no'
+ description:
+ - Override password history
+ aliases: [ override_pw_history ]
+ overridePWLength:
+ type: bool
+ default: 'no'
+ description:
+ - Override password check
+ aliases: [ override_pw_length ]
+ pager_telephonenumber:
+ default: []
+ description:
+ - List of pager telephone numbers.
+ aliases: [ pagerTelephonenumber ]
+ type: list
+ phone:
+ description:
+ - List of telephone numbers.
+ type: list
+ postcode:
+ description:
+ - Postal code of users business address.
+ type: str
+ primary_group:
+ description:
+ - Primary group. This must be the group LDAP DN.
+ - If not specified, it defaults to C(cn=Domain Users,cn=groups,$LDAP_BASE_DN).
+ aliases: [ primaryGroup ]
+ type: str
+ profilepath:
+ description:
+ - Windows profile directory
+ type: str
+ pwd_change_next_login:
+ choices: [ '0', '1' ]
+ description:
+ - Change password on next login.
+ aliases: [ pwdChangeNextLogin ]
+ type: str
+ room_number:
+ description:
+ - Room number of users business address.
+ aliases: [ roomNumber ]
+ type: str
+ samba_privileges:
+ description:
+ - "Samba privilege, like allow printer administration, do domain
+ join."
+ aliases: [ sambaPrivileges ]
+ type: list
+ samba_user_workstations:
+ description:
+ - Allow the authentication only on this Microsoft Windows host.
+ aliases: [ sambaUserWorkstations ]
+ type: list
+ sambahome:
+ description:
+ - Windows home path, e.g. C('\\$FQDN\$USERNAME').
+ type: str
+ scriptpath:
+ description:
+ - Windows logon script.
+ type: str
+ secretary:
+ default: []
+ description:
+ - A list of superiors as LDAP DNs.
+ type: list
+ serviceprovider:
+ default: ['']
+ description:
+ - Enable user for the following service providers.
+ type: list
+ shell:
+ default: '/bin/bash'
+ description:
+ - Login shell
+ type: str
+ street:
+ description:
+ - Street of users business address.
+ type: str
+ title:
+ description:
+ - Title, e.g. C(Prof.).
+ type: str
+ unixhome:
+ description:
+ - Unix home directory
+ - If not specified, it defaults to C(/home/$USERNAME).
+ type: str
+ userexpiry:
+ description:
+ - Account expiry date, e.g. C(1999-12-31).
+ - If not specified, it defaults to the current day plus one year.
+ type: str
+ position:
+ default: ''
+ description:
+ - "Define the whole position of users object inside the LDAP tree,
+ e.g. C(cn=employee,cn=users,ou=school,dc=example,dc=com)."
+ type: str
+ update_password:
+ default: always
+ choices: [ always, on_create ]
+ description:
+ - "C(always) will update passwords if they differ.
+ C(on_create) will only set the password for newly created users."
+ type: str
+ ou:
+ default: ''
+ description:
+ - "Organizational Unit inside the LDAP Base DN, e.g. C(school) for
+ LDAP OU C(ou=school,dc=example,dc=com)."
+ type: str
+ subpath:
+ default: 'cn=users'
+ description:
+ - "LDAP subpath inside the organizational unit, e.g.
+ C(cn=teachers,cn=users) for LDAP container
+ C(cn=teachers,cn=users,dc=example,dc=com)."
+ type: str
+'''
+
+
+EXAMPLES = '''
+- name: Create a user on a UCS
+ community.general.udm_user:
+ name: FooBar
+ password: secure_password
+ firstname: Foo
+ lastname: Bar
+
+- name: Create a user with the DN C(uid=foo,cn=teachers,cn=users,ou=school,dc=school,dc=example,dc=com)
+ community.general.udm_user:
+ name: foo
+ password: secure_password
+ firstname: Foo
+ lastname: Bar
+ ou: school
+ subpath: 'cn=teachers,cn=users'
+
+# or define the position
+- name: Create a user with the DN C(uid=foo,cn=teachers,cn=users,ou=school,dc=school,dc=example,dc=com)
+ community.general.udm_user:
+ name: foo
+ password: secure_password
+ firstname: Foo
+ lastname: Bar
+ position: 'cn=teachers,cn=users,ou=school,dc=school,dc=example,dc=com'
+'''
+
+
+RETURN = '''# '''
+
+import crypt
+from datetime import date, timedelta
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.univention_umc import (
+ umc_module_for_add,
+ umc_module_for_edit,
+ ldap_search,
+ base_dn,
+)
+
+
+def main():
+ expiry = date.strftime(date.today() + timedelta(days=365), "%Y-%m-%d")
+ module = AnsibleModule(
+ argument_spec=dict(
+ birthday=dict(type='str'),
+ city=dict(type='str'),
+ country=dict(type='str'),
+ department_number=dict(type='str',
+ aliases=['departmentNumber']),
+ description=dict(type='str'),
+ display_name=dict(type='str',
+ aliases=['displayName']),
+ email=dict(default=[''],
+ type='list'),
+ employee_number=dict(type='str',
+ aliases=['employeeNumber']),
+ employee_type=dict(type='str',
+ aliases=['employeeType']),
+ firstname=dict(type='str'),
+ gecos=dict(type='str'),
+ groups=dict(default=[],
+ type='list'),
+ home_share=dict(type='str',
+ aliases=['homeShare']),
+ home_share_path=dict(type='str',
+ aliases=['homeSharePath']),
+ home_telephone_number=dict(default=[],
+ type='list',
+ aliases=['homeTelephoneNumber']),
+ homedrive=dict(type='str'),
+ lastname=dict(type='str'),
+ mail_alternative_address=dict(default=[],
+ type='list',
+ aliases=['mailAlternativeAddress']),
+ mail_home_server=dict(type='str',
+ aliases=['mailHomeServer']),
+ mail_primary_address=dict(type='str',
+ aliases=['mailPrimaryAddress']),
+ mobile_telephone_number=dict(default=[],
+ type='list',
+ aliases=['mobileTelephoneNumber']),
+ organisation=dict(type='str',
+ aliases=['organization']),
+ overridePWHistory=dict(default=False,
+ type='bool',
+ aliases=['override_pw_history']),
+ overridePWLength=dict(default=False,
+ type='bool',
+ aliases=['override_pw_length']),
+ pager_telephonenumber=dict(default=[],
+ type='list',
+ aliases=['pagerTelephonenumber']),
+ password=dict(type='str',
+ no_log=True),
+ phone=dict(default=[],
+ type='list'),
+ postcode=dict(type='str'),
+ primary_group=dict(type='str',
+ aliases=['primaryGroup']),
+ profilepath=dict(type='str'),
+ pwd_change_next_login=dict(type='str',
+ choices=['0', '1'],
+ aliases=['pwdChangeNextLogin']),
+ room_number=dict(type='str',
+ aliases=['roomNumber']),
+ samba_privileges=dict(default=[],
+ type='list',
+ aliases=['sambaPrivileges']),
+ samba_user_workstations=dict(default=[],
+ type='list',
+ aliases=['sambaUserWorkstations']),
+ sambahome=dict(type='str'),
+ scriptpath=dict(type='str'),
+ secretary=dict(default=[],
+ type='list'),
+ serviceprovider=dict(default=[''],
+ type='list'),
+ shell=dict(default='/bin/bash',
+ type='str'),
+ street=dict(type='str'),
+ title=dict(type='str'),
+ unixhome=dict(type='str'),
+ userexpiry=dict(type='str'),
+ username=dict(required=True,
+ aliases=['name'],
+ type='str'),
+ position=dict(default='',
+ type='str'),
+ update_password=dict(default='always',
+ choices=['always', 'on_create'],
+ type='str'),
+ ou=dict(default='',
+ type='str'),
+ subpath=dict(default='cn=users',
+ type='str'),
+ state=dict(default='present',
+ choices=['present', 'absent'],
+ type='str')
+ ),
+ supports_check_mode=True,
+ required_if=([
+ ('state', 'present', ['firstname', 'lastname', 'password'])
+ ])
+ )
+ username = module.params['username']
+ position = module.params['position']
+ ou = module.params['ou']
+ subpath = module.params['subpath']
+ state = module.params['state']
+ changed = False
+ diff = None
+
+ users = list(ldap_search(
+ '(&(objectClass=posixAccount)(uid={0}))'.format(username),
+ attr=['uid']
+ ))
+ if position != '':
+ container = position
+ else:
+ if ou != '':
+ ou = 'ou={0},'.format(ou)
+ if subpath != '':
+ subpath = '{0},'.format(subpath)
+ container = '{0}{1}{2}'.format(subpath, ou, base_dn())
+ user_dn = 'uid={0},{1}'.format(username, container)
+
+ exists = bool(len(users))
+
+ if state == 'present':
+ try:
+ if not exists:
+ obj = umc_module_for_add('users/user', container)
+ else:
+ obj = umc_module_for_edit('users/user', user_dn)
+
+ if module.params['displayName'] is None:
+ module.params['displayName'] = '{0} {1}'.format(
+ module.params['firstname'],
+ module.params['lastname']
+ )
+ if module.params['unixhome'] is None:
+ module.params['unixhome'] = '/home/{0}'.format(
+ module.params['username']
+ )
+ for k in obj.keys():
+ if (k != 'password' and
+ k != 'groups' and
+ k != 'overridePWHistory' and
+ k in module.params and
+ module.params[k] is not None):
+ obj[k] = module.params[k]
+ # handle some special values
+ obj['e-mail'] = module.params['email']
+ if 'userexpiry' in obj and obj.get('userexpiry') is None:
+ obj['userexpiry'] = expiry
+ password = module.params['password']
+ if obj['password'] is None:
+ obj['password'] = password
+ if module.params['update_password'] == 'always':
+ old_password = obj['password'].split('}', 2)[1]
+ if crypt.crypt(password, old_password) != old_password:
+ obj['overridePWHistory'] = module.params['overridePWHistory']
+ obj['overridePWLength'] = module.params['overridePWLength']
+ obj['password'] = password
+
+ diff = obj.diff()
+ if exists:
+ for k in obj.keys():
+ if obj.hasChanged(k):
+ changed = True
+ else:
+ changed = True
+ if not module.check_mode:
+ if not exists:
+ obj.create()
+ elif changed:
+ obj.modify()
+ except Exception:
+ module.fail_json(
+ msg="Creating/editing user {0} in {1} failed".format(
+ username,
+ container
+ )
+ )
+ try:
+ groups = module.params['groups']
+ if groups:
+ filter = '(&(objectClass=posixGroup)(|(cn={0})))'.format(
+ ')(cn='.join(groups)
+ )
+ group_dns = list(ldap_search(filter, attr=['dn']))
+ for dn in group_dns:
+ grp = umc_module_for_edit('groups/group', dn[0])
+ if user_dn not in grp['users']:
+ grp['users'].append(user_dn)
+ if not module.check_mode:
+ grp.modify()
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Adding groups to user {0} failed".format(username)
+ )
+
+ if state == 'absent' and exists:
+ try:
+ obj = umc_module_for_edit('users/user', user_dn)
+ if not module.check_mode:
+ obj.remove()
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Removing user {0} failed".format(username)
+ )
+
+ module.exit_json(
+ changed=changed,
+ username=username,
+ diff=diff,
+ container=container
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_app.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_app.py
new file mode 100644
index 00000000..9a69ce54
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_app.py
@@ -0,0 +1,190 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Quentin Stafford-Fraser, with contributions gratefully acknowledged from:
+# * Andy Baker
+# * Federico Tarantini
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Create a Webfaction application using Ansible and the Webfaction API
+#
+# Valid application types can be found by looking here:
+# https://docs.webfaction.com/xmlrpc-api/apps.html#application-types
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: webfaction_app
+short_description: Add or remove applications on a Webfaction host
+description:
+ - Add or remove applications on a Webfaction host. Further documentation at U(https://github.com/quentinsf/ansible-webfaction).
+author: Quentin Stafford-Fraser (@quentinsf)
+notes:
+ - >
+ You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
+ The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
+ your host, you may want to add C(serial: 1) to the plays.
+ - See `the webfaction API <https://docs.webfaction.com/xmlrpc-api/>`_ for more info.
+
+options:
+ name:
+ description:
+ - The name of the application
+ required: true
+
+ state:
+ description:
+ - Whether the application should exist
+ choices: ['present', 'absent']
+ default: "present"
+
+ type:
+ description:
+ - The type of application to create. See the Webfaction docs at U(https://docs.webfaction.com/xmlrpc-api/apps.html) for a list.
+ required: true
+
+ autostart:
+ description:
+ - Whether the app should restart with an C(autostart.cgi) script
+ type: bool
+ default: 'no'
+
+ extra_info:
+ description:
+ - Any extra parameters required by the app
+ default: ''
+
+ port_open:
+ description:
+ - IF the port should be opened
+ type: bool
+ default: 'no'
+
+ login_name:
+ description:
+ - The webfaction account to use
+ required: true
+
+ login_password:
+ description:
+ - The webfaction password to use
+ required: true
+
+ machine:
+ description:
+ - The machine name to use (optional for accounts with only one machine)
+
+'''
+
+EXAMPLES = '''
+ - name: Create a test app
+ community.general.webfaction_app:
+ name: "my_wsgi_app1"
+ state: present
+ type: mod_wsgi35-python27
+ login_name: "{{webfaction_user}}"
+ login_password: "{{webfaction_passwd}}"
+ machine: "{{webfaction_machine}}"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import xmlrpc_client
+
+
+webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/')
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ type=dict(required=True),
+ autostart=dict(required=False, type='bool', default=False),
+ extra_info=dict(required=False, default=""),
+ port_open=dict(required=False, type='bool', default=False),
+ login_name=dict(required=True),
+ login_password=dict(required=True, no_log=True),
+ machine=dict(required=False, default=None),
+ ),
+ supports_check_mode=True
+ )
+ app_name = module.params['name']
+ app_type = module.params['type']
+ app_state = module.params['state']
+
+ if module.params['machine']:
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password'],
+ module.params['machine']
+ )
+ else:
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password']
+ )
+
+ app_list = webfaction.list_apps(session_id)
+ app_map = dict([(i['name'], i) for i in app_list])
+ existing_app = app_map.get(app_name)
+
+ result = {}
+
+ # Here's where the real stuff happens
+
+ if app_state == 'present':
+
+ # Does an app with this name already exist?
+ if existing_app:
+ if existing_app['type'] != app_type:
+ module.fail_json(msg="App already exists with different type. Please fix by hand.")
+
+ # If it exists with the right type, we don't change it
+ # Should check other parameters.
+ module.exit_json(
+ changed=False,
+ result=existing_app,
+ )
+
+ if not module.check_mode:
+ # If this isn't a dry run, create the app
+ result.update(
+ webfaction.create_app(
+ session_id, app_name, app_type,
+ module.boolean(module.params['autostart']),
+ module.params['extra_info'],
+ module.boolean(module.params['port_open'])
+ )
+ )
+
+ elif app_state == 'absent':
+
+ # If the app's already not there, nothing changed.
+ if not existing_app:
+ module.exit_json(
+ changed=False,
+ )
+
+ if not module.check_mode:
+ # If this isn't a dry run, delete the app
+ result.update(
+ webfaction.delete_app(session_id, app_name)
+ )
+
+ else:
+ module.fail_json(msg="Unknown state specified: {0}".format(app_state))
+
+ module.exit_json(
+ changed=True,
+ result=result
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_db.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_db.py
new file mode 100644
index 00000000..19bc6ea2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_db.py
@@ -0,0 +1,188 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Quentin Stafford-Fraser, with contributions gratefully acknowledged from:
+# * Andy Baker
+# * Federico Tarantini
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Create a webfaction database using Ansible and the Webfaction API
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: webfaction_db
+short_description: Add or remove a database on Webfaction
+description:
+ - Add or remove a database on a Webfaction host. Further documentation at https://github.com/quentinsf/ansible-webfaction.
+author: Quentin Stafford-Fraser (@quentinsf)
+notes:
+ - >
+ You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
+ The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
+ your host, you may want to add C(serial: 1) to the plays.
+ - See `the webfaction API <https://docs.webfaction.com/xmlrpc-api/>`_ for more info.
+options:
+
+ name:
+ description:
+ - The name of the database
+ required: true
+
+ state:
+ description:
+ - Whether the database should exist
+ choices: ['present', 'absent']
+ default: "present"
+
+ type:
+ description:
+ - The type of database to create.
+ required: true
+ choices: ['mysql', 'postgresql']
+
+ password:
+ description:
+ - The password for the new database user.
+
+ login_name:
+ description:
+ - The webfaction account to use
+ required: true
+
+ login_password:
+ description:
+ - The webfaction password to use
+ required: true
+
+ machine:
+ description:
+ - The machine name to use (optional for accounts with only one machine)
+'''
+
+EXAMPLES = '''
+ # This will also create a default DB user with the same
+ # name as the database, and the specified password.
+
+ - name: Create a database
+ community.general.webfaction_db:
+ name: "{{webfaction_user}}_db1"
+ password: mytestsql
+ type: mysql
+ login_name: "{{webfaction_user}}"
+ login_password: "{{webfaction_passwd}}"
+ machine: "{{webfaction_machine}}"
+
+ # Note that, for symmetry's sake, deleting a database using
+ # 'state: absent' will also delete the matching user.
+
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import xmlrpc_client
+
+
+webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/')
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ # You can specify an IP address or hostname.
+ type=dict(required=True, choices=['mysql', 'postgresql']),
+ password=dict(required=False, default=None, no_log=True),
+ login_name=dict(required=True),
+ login_password=dict(required=True, no_log=True),
+ machine=dict(required=False, default=None),
+ ),
+ supports_check_mode=True
+ )
+ db_name = module.params['name']
+ db_state = module.params['state']
+ db_type = module.params['type']
+ db_passwd = module.params['password']
+
+ if module.params['machine']:
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password'],
+ module.params['machine']
+ )
+ else:
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password']
+ )
+
+ db_list = webfaction.list_dbs(session_id)
+ db_map = dict([(i['name'], i) for i in db_list])
+ existing_db = db_map.get(db_name)
+
+ user_list = webfaction.list_db_users(session_id)
+ user_map = dict([(i['username'], i) for i in user_list])
+ existing_user = user_map.get(db_name)
+
+ result = {}
+
+ # Here's where the real stuff happens
+
+ if db_state == 'present':
+
+ # Does a database with this name already exist?
+ if existing_db:
+ # Yes, but of a different type - fail
+ if existing_db['db_type'] != db_type:
+ module.fail_json(msg="Database already exists but is a different type. Please fix by hand.")
+
+ # If it exists with the right type, we don't change anything.
+ module.exit_json(
+ changed=False,
+ )
+
+ if not module.check_mode:
+ # If this isn't a dry run, create the db
+ # and default user.
+ result.update(
+ webfaction.create_db(
+ session_id, db_name, db_type, db_passwd
+ )
+ )
+
+ elif db_state == 'absent':
+
+ # If this isn't a dry run...
+ if not module.check_mode:
+
+ if not (existing_db or existing_user):
+ module.exit_json(changed=False,)
+
+ if existing_db:
+ # Delete the db if it exists
+ result.update(
+ webfaction.delete_db(session_id, db_name, db_type)
+ )
+
+ if existing_user:
+ # Delete the default db user if it exists
+ result.update(
+ webfaction.delete_db_user(session_id, db_name, db_type)
+ )
+
+ else:
+ module.fail_json(msg="Unknown state specified: {0}".format(db_state))
+
+ module.exit_json(
+ changed=True,
+ result=result
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_domain.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_domain.py
new file mode 100644
index 00000000..a348ef51
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_domain.py
@@ -0,0 +1,162 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Quentin Stafford-Fraser
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Create Webfaction domains and subdomains using Ansible and the Webfaction API
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: webfaction_domain
+short_description: Add or remove domains and subdomains on Webfaction
+description:
+ - Add or remove domains or subdomains on a Webfaction host. Further documentation at https://github.com/quentinsf/ansible-webfaction.
+author: Quentin Stafford-Fraser (@quentinsf)
+notes:
+ - If you are I(deleting) domains by using C(state=absent), then note that if you specify subdomains, just those particular subdomains will be deleted.
+ If you don't specify subdomains, the domain will be deleted.
+ - >
+ You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
+ The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
+ your host, you may want to add C(serial: 1) to the plays.
+ - See `the webfaction API <https://docs.webfaction.com/xmlrpc-api/>`_ for more info.
+
+options:
+
+ name:
+ description:
+ - The name of the domain
+ required: true
+
+ state:
+ description:
+ - Whether the domain should exist
+ choices: ['present', 'absent']
+ default: "present"
+
+ subdomains:
+ description:
+ - Any subdomains to create.
+ default: []
+
+ login_name:
+ description:
+ - The webfaction account to use
+ required: true
+
+ login_password:
+ description:
+ - The webfaction password to use
+ required: true
+'''
+
+EXAMPLES = '''
+ - name: Create a test domain
+ community.general.webfaction_domain:
+ name: mydomain.com
+ state: present
+ subdomains:
+ - www
+ - blog
+ login_name: "{{webfaction_user}}"
+ login_password: "{{webfaction_passwd}}"
+
+ - name: Delete test domain and any subdomains
+ community.general.webfaction_domain:
+ name: mydomain.com
+ state: absent
+ login_name: "{{webfaction_user}}"
+ login_password: "{{webfaction_passwd}}"
+
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import xmlrpc_client
+
+
+webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/')
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ subdomains=dict(required=False, default=[], type='list'),
+ login_name=dict(required=True),
+ login_password=dict(required=True, no_log=True),
+ ),
+ supports_check_mode=True
+ )
+ domain_name = module.params['name']
+ domain_state = module.params['state']
+ domain_subdomains = module.params['subdomains']
+
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password']
+ )
+
+ domain_list = webfaction.list_domains(session_id)
+ domain_map = dict([(i['domain'], i) for i in domain_list])
+ existing_domain = domain_map.get(domain_name)
+
+ result = {}
+
+ # Here's where the real stuff happens
+
+ if domain_state == 'present':
+
+ # Does an app with this name already exist?
+ if existing_domain:
+
+ if set(existing_domain['subdomains']) >= set(domain_subdomains):
+ # If it exists with the right subdomains, we don't change anything.
+ module.exit_json(
+ changed=False,
+ )
+
+ positional_args = [session_id, domain_name] + domain_subdomains
+
+ if not module.check_mode:
+ # If this isn't a dry run, create the app
+ # print positional_args
+ result.update(
+ webfaction.create_domain(
+ *positional_args
+ )
+ )
+
+ elif domain_state == 'absent':
+
+ # If the app's already not there, nothing changed.
+ if not existing_domain:
+ module.exit_json(
+ changed=False,
+ )
+
+ positional_args = [session_id, domain_name] + domain_subdomains
+
+ if not module.check_mode:
+ # If this isn't a dry run, delete the app
+ result.update(
+ webfaction.delete_domain(*positional_args)
+ )
+
+ else:
+ module.fail_json(msg="Unknown state specified: {0}".format(domain_state))
+
+ module.exit_json(
+ changed=True,
+ result=result
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_mailbox.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_mailbox.py
new file mode 100644
index 00000000..144fad29
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_mailbox.py
@@ -0,0 +1,130 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Quentin Stafford-Fraser and Andy Baker
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Create webfaction mailbox using Ansible and the Webfaction API
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: webfaction_mailbox
+short_description: Add or remove mailboxes on Webfaction
+description:
+ - Add or remove mailboxes on a Webfaction account. Further documentation at https://github.com/quentinsf/ansible-webfaction.
+author: Quentin Stafford-Fraser (@quentinsf)
+notes:
+ - >
+ You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
+ The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
+ your host, you may want to add C(serial: 1) to the plays.
+ - See `the webfaction API <https://docs.webfaction.com/xmlrpc-api/>`_ for more info.
+options:
+
+ mailbox_name:
+ description:
+ - The name of the mailbox
+ required: true
+
+ mailbox_password:
+ description:
+ - The password for the mailbox
+ required: true
+
+ state:
+ description:
+ - Whether the mailbox should exist
+ choices: ['present', 'absent']
+ default: "present"
+
+ login_name:
+ description:
+ - The webfaction account to use
+ required: true
+
+ login_password:
+ description:
+ - The webfaction password to use
+ required: true
+'''
+
+EXAMPLES = '''
+ - name: Create a mailbox
+ community.general.webfaction_mailbox:
+ mailbox_name="mybox"
+ mailbox_password="myboxpw"
+ state=present
+ login_name={{webfaction_user}}
+ login_password={{webfaction_passwd}}
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import xmlrpc_client
+
+
+webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/')
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ mailbox_name=dict(required=True),
+ mailbox_password=dict(required=True, no_log=True),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ login_name=dict(required=True),
+ login_password=dict(required=True, no_log=True),
+ ),
+ supports_check_mode=True
+ )
+
+ mailbox_name = module.params['mailbox_name']
+ site_state = module.params['state']
+
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password']
+ )
+
+ mailbox_list = [x['mailbox'] for x in webfaction.list_mailboxes(session_id)]
+ existing_mailbox = mailbox_name in mailbox_list
+
+ result = {}
+
+ # Here's where the real stuff happens
+
+ if site_state == 'present':
+
+ # Does a mailbox with this name already exist?
+ if existing_mailbox:
+ module.exit_json(changed=False,)
+
+ positional_args = [session_id, mailbox_name]
+
+ if not module.check_mode:
+ # If this isn't a dry run, create the mailbox
+ result.update(webfaction.create_mailbox(*positional_args))
+
+ elif site_state == 'absent':
+
+ # If the mailbox is already not there, nothing changed.
+ if not existing_mailbox:
+ module.exit_json(changed=False)
+
+ if not module.check_mode:
+ # If this isn't a dry run, delete the mailbox
+ result.update(webfaction.delete_mailbox(session_id, mailbox_name))
+
+ else:
+ module.fail_json(msg="Unknown state specified: {0}".format(site_state))
+
+ module.exit_json(changed=True, result=result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_site.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_site.py
new file mode 100644
index 00000000..8ae98280
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_site.py
@@ -0,0 +1,198 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Quentin Stafford-Fraser
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Create Webfaction website using Ansible and the Webfaction API
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: webfaction_site
+short_description: Add or remove a website on a Webfaction host
+description:
+ - Add or remove a website on a Webfaction host. Further documentation at https://github.com/quentinsf/ansible-webfaction.
+author: Quentin Stafford-Fraser (@quentinsf)
+notes:
+ - Sadly, you I(do) need to know your webfaction hostname for the C(host) parameter. But at least, unlike the API, you don't need to know the IP
+ address. You can use a DNS name.
+ - If a site of the same name exists in the account but on a different host, the operation will exit.
+ - >
+ You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
+ The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
+ your host, you may want to add C(serial: 1) to the plays.
+ - See `the webfaction API <https://docs.webfaction.com/xmlrpc-api/>`_ for more info.
+
+options:
+
+ name:
+ description:
+ - The name of the website
+ required: true
+
+ state:
+ description:
+ - Whether the website should exist
+ choices: ['present', 'absent']
+ default: "present"
+
+ host:
+ description:
+ - The webfaction host on which the site should be created.
+ required: true
+
+ https:
+ description:
+ - Whether or not to use HTTPS
+ type: bool
+ default: 'no'
+
+ site_apps:
+ description:
+ - A mapping of URLs to apps
+ default: []
+
+ subdomains:
+ description:
+ - A list of subdomains associated with this site.
+ default: []
+
+ login_name:
+ description:
+ - The webfaction account to use
+ required: true
+
+ login_password:
+ description:
+ - The webfaction password to use
+ required: true
+'''
+
+EXAMPLES = '''
+ - name: Create website
+ community.general.webfaction_site:
+ name: testsite1
+ state: present
+ host: myhost.webfaction.com
+ subdomains:
+ - 'testsite1.my_domain.org'
+ site_apps:
+ - ['testapp1', '/']
+ https: no
+ login_name: "{{webfaction_user}}"
+ login_password: "{{webfaction_passwd}}"
+'''
+
+import socket
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import xmlrpc_client
+
+
+webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/')
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ # You can specify an IP address or hostname.
+ host=dict(required=True),
+ https=dict(required=False, type='bool', default=False),
+ subdomains=dict(required=False, type='list', default=[]),
+ site_apps=dict(required=False, type='list', default=[]),
+ login_name=dict(required=True),
+ login_password=dict(required=True, no_log=True),
+ ),
+ supports_check_mode=True
+ )
+ site_name = module.params['name']
+ site_state = module.params['state']
+ site_host = module.params['host']
+ site_ip = socket.gethostbyname(site_host)
+
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password']
+ )
+
+ site_list = webfaction.list_websites(session_id)
+ site_map = dict([(i['name'], i) for i in site_list])
+ existing_site = site_map.get(site_name)
+
+ result = {}
+
+ # Here's where the real stuff happens
+
+ if site_state == 'present':
+
+ # Does a site with this name already exist?
+ if existing_site:
+
+ # If yes, but it's on a different IP address, then fail.
+ # If we wanted to allow relocation, we could add a 'relocate=true' option
+ # which would get the existing IP address, delete the site there, and create it
+ # at the new address. A bit dangerous, perhaps, so for now we'll require manual
+ # deletion if it's on another host.
+
+ if existing_site['ip'] != site_ip:
+ module.fail_json(msg="Website already exists with a different IP address. Please fix by hand.")
+
+ # If it's on this host and the key parameters are the same, nothing needs to be done.
+
+ if (existing_site['https'] == module.boolean(module.params['https'])) and \
+ (set(existing_site['subdomains']) == set(module.params['subdomains'])) and \
+ (dict(existing_site['website_apps']) == dict(module.params['site_apps'])):
+ module.exit_json(
+ changed=False
+ )
+
+ positional_args = [
+ session_id, site_name, site_ip,
+ module.boolean(module.params['https']),
+ module.params['subdomains'],
+ ]
+ for a in module.params['site_apps']:
+ positional_args.append((a[0], a[1]))
+
+ if not module.check_mode:
+ # If this isn't a dry run, create or modify the site
+ result.update(
+ webfaction.create_website(
+ *positional_args
+ ) if not existing_site else webfaction.update_website(
+ *positional_args
+ )
+ )
+
+ elif site_state == 'absent':
+
+ # If the site's already not there, nothing changed.
+ if not existing_site:
+ module.exit_json(
+ changed=False,
+ )
+
+ if not module.check_mode:
+ # If this isn't a dry run, delete the site
+ result.update(
+ webfaction.delete_website(session_id, site_name, site_ip)
+ )
+
+ else:
+ module.fail_json(msg="Unknown state specified: {0}".format(site_state))
+
+ module.exit_json(
+ changed=True,
+ result=result
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest.py
new file mode 100644
index 00000000..a9a5fb4c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest.py
@@ -0,0 +1,1933 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2018, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: xenserver_guest
+short_description: Manages virtual machines running on Citrix Hypervisor/XenServer host or pool
+description: >
+ This module can be used to create new virtual machines from templates or other virtual machines,
+ modify various virtual machine components like network and disk, rename a virtual machine and
+ remove a virtual machine with associated components.
+author:
+- Bojan Vitnik (@bvitnik) <bvitnik@mainstream.rs>
+notes:
+- Minimal supported version of XenServer is 5.6.
+- Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0.
+- 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside
+ Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the XenAPI.py file from the SDK to your Python site-packages on your
+ Ansible Control Node to use it. Latest version of the library can also be acquired from GitHub:
+ U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py)'
+- 'If no scheme is specified in C(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are
+ accessing XenServer host in trusted environment or use C(https://) scheme explicitly.'
+- 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use C(validate_certs: no)
+ which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.'
+- 'Network configuration inside a guest OS, by using C(networks.type), C(networks.ip), C(networks.gateway) etc. parameters, is supported on
+ XenServer 7.0 or newer for Windows guests by using official XenServer Guest agent support for network configuration. The module will try to
+ detect if such support is available and utilize it, else it will use a custom method of configuration via xenstore. Since XenServer Guest
+ agent only support None and Static types of network configuration, where None means DHCP configured interface, C(networks.type) and C(networks.type6)
+ values C(none) and C(dhcp) have same effect. More info here:
+ U(https://www.citrix.com/community/citrix-developer/citrix-hypervisor-developer/citrix-hypervisor-developing-products/citrix-hypervisor-staticip.html)'
+- 'On platforms without official support for network configuration inside a guest OS, network parameters will be written to xenstore
+ C(vm-data/networks/<vif_device>) key. Parameters can be inspected by using C(xenstore ls) and C(xenstore read) tools on \*nix guests or trough
+ WMI interface on Windows guests. They can also be found in VM facts C(instance.xenstore_data) key as returned by the module. It is up to the user
+ to implement a boot time scripts or custom agent that will read the parameters from xenstore and configure network with given parameters.
+ Take note that for xenstore data to become available inside a guest, a VM restart is needed hence module will require VM restart if any
+ parameter is changed. This is a limitation of XenAPI and xenstore. Considering these limitations, network configuration trough xenstore is most
+ useful for bootstraping newly deployed VMs, much less for reconfiguring existing ones. More info here:
+ U(https://support.citrix.com/article/CTX226713)'
+requirements:
+- python >= 2.6
+- XenAPI
+options:
+ state:
+ description:
+ - Specify the state VM should be in.
+ - If C(state) is set to C(present) and VM exists, ensure the VM configuration conforms to given parameters.
+ - If C(state) is set to C(present) and VM does not exist, then VM is deployed with given parameters.
+ - If C(state) is set to C(absent) and VM exists, then VM is removed with its associated components.
+ - If C(state) is set to C(poweredon) and VM does not exist, then VM is deployed with given parameters and powered on automatically.
+ type: str
+ default: present
+ choices: [ present, absent, poweredon ]
+ name:
+ description:
+ - Name of the VM to work with.
+ - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found.
+ - In case of multiple VMs with same name, use C(uuid) to uniquely specify VM to manage.
+ - This parameter is case sensitive.
+ type: str
+ required: yes
+ aliases: [ name_label ]
+ name_desc:
+ description:
+ - VM description.
+ type: str
+ uuid:
+ description:
+ - UUID of the VM to manage if known. This is XenServer's unique identifier.
+ - It is required if name is not unique.
+ - Please note that a supplied UUID will be ignored on VM creation, as XenServer creates the UUID internally.
+ type: str
+ template:
+ description:
+ - Name of a template, an existing VM (must be shut down) or a snapshot that should be used to create VM.
+ - Templates/VMs/snapshots on XenServer do not necessarily have unique names. The module will fail if multiple templates with same name are found.
+ - In case of multiple templates/VMs/snapshots with same name, use C(template_uuid) to uniquely specify source template.
+ - If VM already exists, this setting will be ignored.
+ - This parameter is case sensitive.
+ type: str
+ aliases: [ template_src ]
+ template_uuid:
+ description:
+ - UUID of a template, an existing VM or a snapshot that should be used to create VM.
+ - It is required if template name is not unique.
+ type: str
+ is_template:
+ description:
+ - Convert VM to template.
+ type: bool
+ default: no
+ folder:
+ description:
+ - Destination folder for VM.
+ - This parameter is case sensitive.
+ - 'Example:'
+ - ' folder: /folder1/folder2'
+ type: str
+ hardware:
+ description:
+ - Manage VM's hardware parameters. VM needs to be shut down to reconfigure these parameters.
+ - 'Valid parameters are:'
+ - ' - C(num_cpus) (integer): Number of CPUs.'
+ - ' - C(num_cpu_cores_per_socket) (integer): Number of Cores Per Socket. C(num_cpus) has to be a multiple of C(num_cpu_cores_per_socket).'
+ - ' - C(memory_mb) (integer): Amount of memory in MB.'
+ type: dict
+ disks:
+ description:
+ - A list of disks to add to VM.
+ - All parameters are case sensitive.
+ - Removing or detaching existing disks of VM is not supported.
+ - 'Required parameters per entry:'
+ - ' - C(size_[tb,gb,mb,kb,b]) (integer): Disk storage size in specified unit. VM needs to be shut down to reconfigure this parameter.'
+ - 'Optional parameters per entry:'
+ - ' - C(name) (string): Disk name. You can also use C(name_label) as an alias.'
+ - ' - C(name_desc) (string): Disk description.'
+ - ' - C(sr) (string): Storage Repository to create disk on. If not specified, will use default SR. Cannot be used for moving disk to other SR.'
+ - ' - C(sr_uuid) (string): UUID of a SR to create disk on. Use if SR name is not unique.'
+ type: list
+ elements: dict
+ aliases: [ disk ]
+ cdrom:
+ description:
+ - A CD-ROM configuration for the VM.
+ - All parameters are case sensitive.
+ - 'Valid parameters are:'
+ - ' - C(type) (string): The type of CD-ROM, valid options are C(none) or C(iso). With C(none) the CD-ROM device will be present but empty.'
+ - ' - C(iso_name) (string): The file name of an ISO image from one of the XenServer ISO Libraries (implies C(type: iso)).
+ Required if C(type) is set to C(iso).'
+ type: dict
+ networks:
+ description:
+ - A list of networks (in the order of the NICs).
+ - All parameters are case sensitive.
+ - 'Required parameters per entry:'
+ - ' - C(name) (string): Name of a XenServer network to attach the network interface to. You can also use C(name_label) as an alias.'
+ - 'Optional parameters per entry (used for VM hardware):'
+ - ' - C(mac) (string): Customize MAC address of the interface.'
+ - 'Optional parameters per entry (used for OS customization):'
+ - ' - C(type) (string): Type of IPv4 assignment, valid options are C(none), C(dhcp) or C(static). Value C(none) means whatever is default for OS.
+ On some operating systems it could be DHCP configured (e.g. Windows) or unconfigured interface (e.g. Linux).'
+ - ' - C(ip) (string): Static IPv4 address (implies C(type: static)). Can include prefix in format <IPv4 address>/<prefix> instead of using C(netmask).'
+ - ' - C(netmask) (string): Static IPv4 netmask required for C(ip) if prefix is not specified.'
+ - ' - C(gateway) (string): Static IPv4 gateway.'
+ - ' - C(type6) (string): Type of IPv6 assignment, valid options are C(none), C(dhcp) or C(static). Value C(none) means whatever is default for OS.
+ On some operating systems it could be DHCP configured (e.g. Windows) or unconfigured interface (e.g. Linux).'
+ - ' - C(ip6) (string): Static IPv6 address (implies C(type6: static)) with prefix in format <IPv6 address>/<prefix>.'
+ - ' - C(gateway6) (string): Static IPv6 gateway.'
+ type: list
+ elements: dict
+ aliases: [ network ]
+ home_server:
+ description:
+ - Name of a XenServer host that will be a Home Server for the VM.
+ - This parameter is case sensitive.
+ type: str
+ custom_params:
+ description:
+ - Define a list of custom VM params to set on VM.
+ - Useful for advanced users familiar with managing VM params trough xe CLI.
+ - A custom value object takes two fields C(key) and C(value) (see example below).
+ type: list
+ elements: dict
+ wait_for_ip_address:
+ description:
+ - Wait until XenServer detects an IP address for the VM. If C(state) is set to C(absent), this parameter is ignored.
+ - This requires XenServer Tools to be preinstalled on the VM to work properly.
+ type: bool
+ default: no
+ state_change_timeout:
+ description:
+ - 'By default, module will wait indefinitely for VM to accquire an IP address if C(wait_for_ip_address: yes).'
+ - If this parameter is set to positive value, the module will instead wait specified number of seconds for the state change.
+ - In case of timeout, module will generate an error message.
+ type: int
+ default: 0
+ linked_clone:
+ description:
+ - Whether to create a Linked Clone from the template, existing VM or snapshot. If no, will create a full copy.
+ - This is equivalent to C(Use storage-level fast disk clone) option in XenCenter.
+ type: bool
+ default: no
+ force:
+ description:
+ - Ignore warnings and complete the actions.
+ - This parameter is useful for removing VM in running state or reconfiguring VM params that require VM to be shut down.
+ type: bool
+ default: no
+extends_documentation_fragment:
+- community.general.xenserver.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Create a VM from a template
+ community.general.xenserver_guest:
+ hostname: "{{ xenserver_hostname }}"
+ username: "{{ xenserver_username }}"
+ password: "{{ xenserver_password }}"
+ validate_certs: no
+ folder: /testvms
+ name: testvm_2
+ state: poweredon
+ template: CentOS 7
+ disks:
+ - size_gb: 10
+ sr: my_sr
+ hardware:
+ num_cpus: 6
+ num_cpu_cores_per_socket: 3
+ memory_mb: 512
+ cdrom:
+ type: iso
+ iso_name: guest-tools.iso
+ networks:
+ - name: VM Network
+ mac: aa:bb:dd:aa:00:14
+ wait_for_ip_address: yes
+ delegate_to: localhost
+ register: deploy
+
+- name: Create a VM template
+ community.general.xenserver_guest:
+ hostname: "{{ xenserver_hostname }}"
+ username: "{{ xenserver_username }}"
+ password: "{{ xenserver_password }}"
+ validate_certs: no
+ folder: /testvms
+ name: testvm_6
+ is_template: yes
+ disk:
+ - size_gb: 10
+ sr: my_sr
+ hardware:
+ memory_mb: 512
+ num_cpus: 1
+ delegate_to: localhost
+ register: deploy
+
+- name: Rename a VM (requires the VM's UUID)
+ community.general.xenserver_guest:
+ hostname: "{{ xenserver_hostname }}"
+ username: "{{ xenserver_username }}"
+ password: "{{ xenserver_password }}"
+ uuid: 421e4592-c069-924d-ce20-7e7533fab926
+ name: new_name
+ state: present
+ delegate_to: localhost
+
+- name: Remove a VM by UUID
+ community.general.xenserver_guest:
+ hostname: "{{ xenserver_hostname }}"
+ username: "{{ xenserver_username }}"
+ password: "{{ xenserver_password }}"
+ uuid: 421e4592-c069-924d-ce20-7e7533fab926
+ state: absent
+ delegate_to: localhost
+
+- name: Modify custom params (boot order)
+ community.general.xenserver_guest:
+ hostname: "{{ xenserver_hostname }}"
+ username: "{{ xenserver_username }}"
+ password: "{{ xenserver_password }}"
+ name: testvm_8
+ state: present
+ custom_params:
+ - key: HVM_boot_params
+ value: { "order": "ndc" }
+ delegate_to: localhost
+
+- name: Customize network parameters
+ community.general.xenserver_guest:
+ hostname: "{{ xenserver_hostname }}"
+ username: "{{ xenserver_username }}"
+ password: "{{ xenserver_password }}"
+ name: testvm_10
+ networks:
+ - name: VM Network
+ ip: 192.168.1.100/24
+ gateway: 192.168.1.1
+ - type: dhcp
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+instance:
+ description: Metadata about the VM
+ returned: always
+ type: dict
+ sample: {
+ "cdrom": {
+ "type": "none"
+ },
+ "customization_agent": "native",
+ "disks": [
+ {
+ "name": "testvm_11-0",
+ "name_desc": "",
+ "os_device": "xvda",
+ "size": 42949672960,
+ "sr": "Local storage",
+ "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
+ "vbd_userdevice": "0"
+ },
+ {
+ "name": "testvm_11-1",
+ "name_desc": "",
+ "os_device": "xvdb",
+ "size": 42949672960,
+ "sr": "Local storage",
+ "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
+ "vbd_userdevice": "1"
+ }
+ ],
+ "domid": "56",
+ "folder": "",
+ "hardware": {
+ "memory_mb": 8192,
+ "num_cpu_cores_per_socket": 2,
+ "num_cpus": 4
+ },
+ "home_server": "",
+ "is_template": false,
+ "name": "testvm_11",
+ "name_desc": "",
+ "networks": [
+ {
+ "gateway": "192.168.0.254",
+ "gateway6": "fc00::fffe",
+ "ip": "192.168.0.200",
+ "ip6": [
+ "fe80:0000:0000:0000:e9cb:625a:32c5:c291",
+ "fc00:0000:0000:0000:0000:0000:0000:0001"
+ ],
+ "mac": "ba:91:3a:48:20:76",
+ "mtu": "1500",
+ "name": "Pool-wide network associated with eth1",
+ "netmask": "255.255.255.128",
+ "prefix": "25",
+ "prefix6": "64",
+ "vif_device": "0"
+ }
+ ],
+ "other_config": {
+ "base_template_name": "Windows Server 2016 (64-bit)",
+ "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5",
+ "install-methods": "cdrom",
+ "instant": "true",
+ "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e"
+ },
+ "platform": {
+ "acpi": "1",
+ "apic": "true",
+ "cores-per-socket": "2",
+ "device_id": "0002",
+ "hpet": "true",
+ "nx": "true",
+ "pae": "true",
+ "timeoffset": "-25200",
+ "vga": "std",
+ "videoram": "8",
+ "viridian": "true",
+ "viridian_reference_tsc": "true",
+ "viridian_time_ref_count": "true"
+ },
+ "state": "poweredon",
+ "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda",
+ "xenstore_data": {
+ "vm-data": ""
+ }
+ }
+changes:
+ description: Detected or made changes to VM
+ returned: always
+ type: list
+ sample: [
+ {
+ "hardware": [
+ "num_cpus"
+ ]
+ },
+ {
+ "disks_changed": [
+ [],
+ [
+ "size"
+ ]
+ ]
+ },
+ {
+ "disks_new": [
+ {
+ "name": "new-disk",
+ "name_desc": "",
+ "position": 2,
+ "size_gb": "4",
+ "vbd_userdevice": "2"
+ }
+ ]
+ },
+ {
+ "cdrom": [
+ "type",
+ "iso_name"
+ ]
+ },
+ {
+ "networks_changed": [
+ [
+ "mac"
+ ],
+ ]
+ },
+ {
+ "networks_new": [
+ {
+ "name": "Pool-wide network associated with eth2",
+ "position": 1,
+ "vif_device": "1"
+ }
+ ]
+ },
+ "need_poweredoff"
+ ]
+'''
+
+import re
+
+HAS_XENAPI = False
+try:
+ import XenAPI
+ HAS_XENAPI = True
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.network import is_mac
+from ansible.module_utils import six
+from ansible_collections.community.general.plugins.module_utils.xenserver import (xenserver_common_argument_spec, XAPI, XenServerObject, get_object_ref,
+ gather_vm_params, gather_vm_facts, set_vm_power_state,
+ wait_for_vm_ip_address, is_valid_ip_addr, is_valid_ip_netmask,
+ is_valid_ip_prefix, ip_prefix_to_netmask, ip_netmask_to_prefix,
+ is_valid_ip6_addr, is_valid_ip6_prefix)
+
+
+class XenServerVM(XenServerObject):
+ """Class for managing XenServer VM.
+
+ Attributes:
+ vm_ref (str): XAPI reference to VM.
+ vm_params (dict): A dictionary with VM parameters as returned
+ by gather_vm_params() function.
+ """
+
+ def __init__(self, module):
+ """Inits XenServerVM using module parameters.
+
+ Args:
+ module: Reference to Ansible module object.
+ """
+ super(XenServerVM, self).__init__(module)
+
+ self.vm_ref = get_object_ref(self.module, self.module.params['name'], self.module.params['uuid'], obj_type="VM", fail=False, msg_prefix="VM search: ")
+ self.gather_params()
+
+ def exists(self):
+ """Returns True if VM exists, else False."""
+ return True if self.vm_ref is not None else False
+
+ def gather_params(self):
+ """Gathers all VM parameters available in XAPI database."""
+ self.vm_params = gather_vm_params(self.module, self.vm_ref)
+
+ def gather_facts(self):
+ """Gathers and returns VM facts."""
+ return gather_vm_facts(self.module, self.vm_params)
+
+ def set_power_state(self, power_state):
+ """Controls VM power state."""
+ state_changed, current_state = set_vm_power_state(self.module, self.vm_ref, power_state, self.module.params['state_change_timeout'])
+
+ # If state has changed, update vm_params.
+ if state_changed:
+ self.vm_params['power_state'] = current_state.capitalize()
+
+ return state_changed
+
+ def wait_for_ip_address(self):
+ """Waits for VM to acquire an IP address."""
+ self.vm_params['guest_metrics'] = wait_for_vm_ip_address(self.module, self.vm_ref, self.module.params['state_change_timeout'])
+
+ def deploy(self):
+ """Deploys new VM from template."""
+ # Safety check.
+ if self.exists():
+ self.module.fail_json(msg="Called deploy on existing VM!")
+
+ try:
+ templ_ref = get_object_ref(self.module, self.module.params['template'], self.module.params['template_uuid'], obj_type="template", fail=True,
+ msg_prefix="VM deploy: ")
+
+ # Is this an existing running VM?
+ if self.xapi_session.xenapi.VM.get_power_state(templ_ref).lower() != 'halted':
+ self.module.fail_json(msg="VM deploy: running VM cannot be used as a template!")
+
+ # Find a SR we can use for VM.copy(). We use SR of the first disk
+ # if specified or default SR if not specified.
+ disk_params_list = self.module.params['disks']
+
+ sr_ref = None
+
+ if disk_params_list:
+ disk_params = disk_params_list[0]
+
+ disk_sr_uuid = disk_params.get('sr_uuid')
+ disk_sr = disk_params.get('sr')
+
+ if disk_sr_uuid is not None or disk_sr is not None:
+ sr_ref = get_object_ref(self.module, disk_sr, disk_sr_uuid, obj_type="SR", fail=True,
+ msg_prefix="VM deploy disks[0]: ")
+
+ if not sr_ref:
+ if self.default_sr_ref != "OpaqueRef:NULL":
+ sr_ref = self.default_sr_ref
+ else:
+ self.module.fail_json(msg="VM deploy disks[0]: no default SR found! You must specify SR explicitly.")
+
+ # VM name could be an empty string which is bad.
+ if self.module.params['name'] is not None and not self.module.params['name']:
+ self.module.fail_json(msg="VM deploy: VM name must not be an empty string!")
+
+ # Support for Ansible check mode.
+ if self.module.check_mode:
+ return
+
+ # Now we can instantiate VM. We use VM.clone for linked_clone and
+ # VM.copy for non linked_clone.
+ if self.module.params['linked_clone']:
+ self.vm_ref = self.xapi_session.xenapi.VM.clone(templ_ref, self.module.params['name'])
+ else:
+ self.vm_ref = self.xapi_session.xenapi.VM.copy(templ_ref, self.module.params['name'], sr_ref)
+
+ # Description is copied over from template so we reset it.
+ self.xapi_session.xenapi.VM.set_name_description(self.vm_ref, "")
+
+ # If template is one of built-in XenServer templates, we have to
+ # do some additional steps.
+ # Note: VM.get_is_default_template() is supported from XenServer 7.2
+ # onward so we use an alternative way.
+ templ_other_config = self.xapi_session.xenapi.VM.get_other_config(templ_ref)
+
+ if "default_template" in templ_other_config and templ_other_config['default_template']:
+ # other_config of built-in XenServer templates have a key called
+ # 'disks' with the following content:
+ # disks: <provision><disk bootable="true" device="0" size="10737418240" sr="" type="system"/></provision>
+ # This value of other_data is copied to cloned or copied VM and
+ # it prevents provisioning of VM because sr is not specified and
+ # XAPI returns an error. To get around this, we remove the
+ # 'disks' key and add disks to VM later ourselves.
+ vm_other_config = self.xapi_session.xenapi.VM.get_other_config(self.vm_ref)
+
+ if "disks" in vm_other_config:
+ del vm_other_config['disks']
+
+ self.xapi_session.xenapi.VM.set_other_config(self.vm_ref, vm_other_config)
+
+ # At this point we have VM ready for provisioning.
+ self.xapi_session.xenapi.VM.provision(self.vm_ref)
+
+ # After provisioning we can prepare vm_params for reconfigure().
+ self.gather_params()
+
+ # VM is almost ready. We just need to reconfigure it...
+ self.reconfigure()
+
+ # Power on VM if needed.
+ if self.module.params['state'] == "poweredon":
+ self.set_power_state("poweredon")
+
+ except XenAPI.Failure as f:
+ self.module.fail_json(msg="XAPI ERROR: %s" % f.details)
+
+ def reconfigure(self):
+ """Reconfigures an existing VM.
+
+ Returns:
+ list: parameters that were reconfigured.
+ """
+ # Safety check.
+ if not self.exists():
+ self.module.fail_json(msg="Called reconfigure on non existing VM!")
+
+ config_changes = self.get_changes()
+
+ vm_power_state_save = self.vm_params['power_state'].lower()
+
+ if "need_poweredoff" in config_changes and vm_power_state_save != 'halted' and not self.module.params['force']:
+ self.module.fail_json(msg="VM reconfigure: VM has to be in powered off state to reconfigure but force was not specified!")
+
+ # Support for Ansible check mode.
+ if self.module.check_mode:
+ return config_changes
+
+ if "need_poweredoff" in config_changes and vm_power_state_save != 'halted' and self.module.params['force']:
+ self.set_power_state("shutdownguest")
+
+ try:
+ for change in config_changes:
+ if isinstance(change, six.string_types):
+ if change == "name":
+ self.xapi_session.xenapi.VM.set_name_label(self.vm_ref, self.module.params['name'])
+ elif change == "name_desc":
+ self.xapi_session.xenapi.VM.set_name_description(self.vm_ref, self.module.params['name_desc'])
+ elif change == "folder":
+ self.xapi_session.xenapi.VM.remove_from_other_config(self.vm_ref, 'folder')
+
+ if self.module.params['folder']:
+ self.xapi_session.xenapi.VM.add_to_other_config(self.vm_ref, 'folder', self.module.params['folder'])
+ elif change == "home_server":
+ if self.module.params['home_server']:
+ host_ref = self.xapi_session.xenapi.host.get_by_name_label(self.module.params['home_server'])[0]
+ else:
+ host_ref = "OpaqueRef:NULL"
+
+ self.xapi_session.xenapi.VM.set_affinity(self.vm_ref, host_ref)
+ elif isinstance(change, dict):
+ if change.get('hardware'):
+ for hardware_change in change['hardware']:
+ if hardware_change == "num_cpus":
+ num_cpus = int(self.module.params['hardware']['num_cpus'])
+
+ if num_cpus < int(self.vm_params['VCPUs_at_startup']):
+ self.xapi_session.xenapi.VM.set_VCPUs_at_startup(self.vm_ref, str(num_cpus))
+ self.xapi_session.xenapi.VM.set_VCPUs_max(self.vm_ref, str(num_cpus))
+ else:
+ self.xapi_session.xenapi.VM.set_VCPUs_max(self.vm_ref, str(num_cpus))
+ self.xapi_session.xenapi.VM.set_VCPUs_at_startup(self.vm_ref, str(num_cpus))
+ elif hardware_change == "num_cpu_cores_per_socket":
+ self.xapi_session.xenapi.VM.remove_from_platform(self.vm_ref, 'cores-per-socket')
+ num_cpu_cores_per_socket = int(self.module.params['hardware']['num_cpu_cores_per_socket'])
+
+ if num_cpu_cores_per_socket > 1:
+ self.xapi_session.xenapi.VM.add_to_platform(self.vm_ref, 'cores-per-socket', str(num_cpu_cores_per_socket))
+ elif hardware_change == "memory_mb":
+ memory_b = str(int(self.module.params['hardware']['memory_mb']) * 1048576)
+ vm_memory_static_min_b = str(min(int(memory_b), int(self.vm_params['memory_static_min'])))
+
+ self.xapi_session.xenapi.VM.set_memory_limits(self.vm_ref, vm_memory_static_min_b, memory_b, memory_b, memory_b)
+ elif change.get('disks_changed'):
+ vm_disk_params_list = [disk_params for disk_params in self.vm_params['VBDs'] if disk_params['type'] == "Disk"]
+ position = 0
+
+ for disk_change_list in change['disks_changed']:
+ for disk_change in disk_change_list:
+ vdi_ref = self.xapi_session.xenapi.VDI.get_by_uuid(vm_disk_params_list[position]['VDI']['uuid'])
+
+ if disk_change == "name":
+ self.xapi_session.xenapi.VDI.set_name_label(vdi_ref, self.module.params['disks'][position]['name'])
+ elif disk_change == "name_desc":
+ self.xapi_session.xenapi.VDI.set_name_description(vdi_ref, self.module.params['disks'][position]['name_desc'])
+ elif disk_change == "size":
+ self.xapi_session.xenapi.VDI.resize(vdi_ref, str(self.get_normalized_disk_size(self.module.params['disks'][position],
+ "VM reconfigure disks[%s]: " % position)))
+
+ position += 1
+ elif change.get('disks_new'):
+ for position, disk_userdevice in change['disks_new']:
+ disk_params = self.module.params['disks'][position]
+
+ disk_name = disk_params['name'] if disk_params.get('name') else "%s-%s" % (self.vm_params['name_label'], position)
+ disk_name_desc = disk_params['name_desc'] if disk_params.get('name_desc') else ""
+
+ if disk_params.get('sr_uuid'):
+ sr_ref = self.xapi_session.xenapi.SR.get_by_uuid(disk_params['sr_uuid'])
+ elif disk_params.get('sr'):
+ sr_ref = self.xapi_session.xenapi.SR.get_by_name_label(disk_params['sr'])[0]
+ else:
+ sr_ref = self.default_sr_ref
+
+ disk_size = str(self.get_normalized_disk_size(self.module.params['disks'][position], "VM reconfigure disks[%s]: " % position))
+
+ new_disk_vdi = {
+ "name_label": disk_name,
+ "name_description": disk_name_desc,
+ "SR": sr_ref,
+ "virtual_size": disk_size,
+ "type": "user",
+ "sharable": False,
+ "read_only": False,
+ "other_config": {},
+ }
+
+ new_disk_vbd = {
+ "VM": self.vm_ref,
+ "VDI": None,
+ "userdevice": disk_userdevice,
+ "bootable": False,
+ "mode": "RW",
+ "type": "Disk",
+ "empty": False,
+ "other_config": {},
+ "qos_algorithm_type": "",
+ "qos_algorithm_params": {},
+ }
+
+ new_disk_vbd['VDI'] = self.xapi_session.xenapi.VDI.create(new_disk_vdi)
+ vbd_ref_new = self.xapi_session.xenapi.VBD.create(new_disk_vbd)
+
+ if self.vm_params['power_state'].lower() == "running":
+ self.xapi_session.xenapi.VBD.plug(vbd_ref_new)
+
+ elif change.get('cdrom'):
+ vm_cdrom_params_list = [cdrom_params for cdrom_params in self.vm_params['VBDs'] if cdrom_params['type'] == "CD"]
+
+ # If there is no CD present, we have to create one.
+ if not vm_cdrom_params_list:
+ # We will try to place cdrom at userdevice position
+ # 3 (which is default) if it is not already occupied
+ # else we will place it at first allowed position.
+ cdrom_userdevices_allowed = self.xapi_session.xenapi.VM.get_allowed_VBD_devices(self.vm_ref)
+
+ if "3" in cdrom_userdevices_allowed:
+ cdrom_userdevice = "3"
+ else:
+ cdrom_userdevice = cdrom_userdevices_allowed[0]
+
+ cdrom_vbd = {
+ "VM": self.vm_ref,
+ "VDI": "OpaqueRef:NULL",
+ "userdevice": cdrom_userdevice,
+ "bootable": False,
+ "mode": "RO",
+ "type": "CD",
+ "empty": True,
+ "other_config": {},
+ "qos_algorithm_type": "",
+ "qos_algorithm_params": {},
+ }
+
+ cdrom_vbd_ref = self.xapi_session.xenapi.VBD.create(cdrom_vbd)
+ else:
+ cdrom_vbd_ref = self.xapi_session.xenapi.VBD.get_by_uuid(vm_cdrom_params_list[0]['uuid'])
+
+ cdrom_is_empty = self.xapi_session.xenapi.VBD.get_empty(cdrom_vbd_ref)
+
+ for cdrom_change in change['cdrom']:
+ if cdrom_change == "type":
+ cdrom_type = self.module.params['cdrom']['type']
+
+ if cdrom_type == "none" and not cdrom_is_empty:
+ self.xapi_session.xenapi.VBD.eject(cdrom_vbd_ref)
+ elif cdrom_type == "host":
+ # Unimplemented!
+ pass
+
+ elif cdrom_change == "iso_name":
+ if not cdrom_is_empty:
+ self.xapi_session.xenapi.VBD.eject(cdrom_vbd_ref)
+
+ cdrom_vdi_ref = self.xapi_session.xenapi.VDI.get_by_name_label(self.module.params['cdrom']['iso_name'])[0]
+ self.xapi_session.xenapi.VBD.insert(cdrom_vbd_ref, cdrom_vdi_ref)
+ elif change.get('networks_changed'):
+ position = 0
+
+ for network_change_list in change['networks_changed']:
+ if network_change_list:
+ vm_vif_params = self.vm_params['VIFs'][position]
+ network_params = self.module.params['networks'][position]
+
+ vif_ref = self.xapi_session.xenapi.VIF.get_by_uuid(vm_vif_params['uuid'])
+ network_ref = self.xapi_session.xenapi.network.get_by_uuid(vm_vif_params['network']['uuid'])
+
+ vif_recreated = False
+
+ if "name" in network_change_list or "mac" in network_change_list:
+ # To change network or MAC, we destroy old
+ # VIF and then create a new one with changed
+ # parameters. That's how XenCenter does it.
+
+ # Copy all old parameters to new VIF record.
+ vif = {
+ "device": vm_vif_params['device'],
+ "network": network_ref,
+ "VM": vm_vif_params['VM'],
+ "MAC": vm_vif_params['MAC'],
+ "MTU": vm_vif_params['MTU'],
+ "other_config": vm_vif_params['other_config'],
+ "qos_algorithm_type": vm_vif_params['qos_algorithm_type'],
+ "qos_algorithm_params": vm_vif_params['qos_algorithm_params'],
+ "locking_mode": vm_vif_params['locking_mode'],
+ "ipv4_allowed": vm_vif_params['ipv4_allowed'],
+ "ipv6_allowed": vm_vif_params['ipv6_allowed'],
+ }
+
+ if "name" in network_change_list:
+ network_ref_new = self.xapi_session.xenapi.network.get_by_name_label(network_params['name'])[0]
+ vif['network'] = network_ref_new
+ vif['MTU'] = self.xapi_session.xenapi.network.get_MTU(network_ref_new)
+
+ if "mac" in network_change_list:
+ vif['MAC'] = network_params['mac'].lower()
+
+ if self.vm_params['power_state'].lower() == "running":
+ self.xapi_session.xenapi.VIF.unplug(vif_ref)
+
+ self.xapi_session.xenapi.VIF.destroy(vif_ref)
+ vif_ref_new = self.xapi_session.xenapi.VIF.create(vif)
+
+ if self.vm_params['power_state'].lower() == "running":
+ self.xapi_session.xenapi.VIF.plug(vif_ref_new)
+
+ vif_ref = vif_ref_new
+ vif_recreated = True
+
+ if self.vm_params['customization_agent'] == "native":
+ vif_reconfigure_needed = False
+
+ if "type" in network_change_list:
+ network_type = network_params['type'].capitalize()
+ vif_reconfigure_needed = True
+ else:
+ network_type = vm_vif_params['ipv4_configuration_mode']
+
+ if "ip" in network_change_list:
+ network_ip = network_params['ip']
+ vif_reconfigure_needed = True
+ elif vm_vif_params['ipv4_addresses']:
+ network_ip = vm_vif_params['ipv4_addresses'][0].split('/')[0]
+ else:
+ network_ip = ""
+
+ if "prefix" in network_change_list:
+ network_prefix = "/%s" % network_params['prefix']
+ vif_reconfigure_needed = True
+ elif vm_vif_params['ipv4_addresses'] and vm_vif_params['ipv4_addresses'][0]:
+ network_prefix = "/%s" % vm_vif_params['ipv4_addresses'][0].split('/')[1]
+ else:
+ network_prefix = ""
+
+ if "gateway" in network_change_list:
+ network_gateway = network_params['gateway']
+ vif_reconfigure_needed = True
+ else:
+ network_gateway = vm_vif_params['ipv4_gateway']
+
+ if vif_recreated or vif_reconfigure_needed:
+ self.xapi_session.xenapi.VIF.configure_ipv4(vif_ref, network_type,
+ "%s%s" % (network_ip, network_prefix), network_gateway)
+
+ vif_reconfigure_needed = False
+
+ if "type6" in network_change_list:
+ network_type6 = network_params['type6'].capitalize()
+ vif_reconfigure_needed = True
+ else:
+ network_type6 = vm_vif_params['ipv6_configuration_mode']
+
+ if "ip6" in network_change_list:
+ network_ip6 = network_params['ip6']
+ vif_reconfigure_needed = True
+ elif vm_vif_params['ipv6_addresses']:
+ network_ip6 = vm_vif_params['ipv6_addresses'][0].split('/')[0]
+ else:
+ network_ip6 = ""
+
+ if "prefix6" in network_change_list:
+ network_prefix6 = "/%s" % network_params['prefix6']
+ vif_reconfigure_needed = True
+ elif vm_vif_params['ipv6_addresses'] and vm_vif_params['ipv6_addresses'][0]:
+ network_prefix6 = "/%s" % vm_vif_params['ipv6_addresses'][0].split('/')[1]
+ else:
+ network_prefix6 = ""
+
+ if "gateway6" in network_change_list:
+ network_gateway6 = network_params['gateway6']
+ vif_reconfigure_needed = True
+ else:
+ network_gateway6 = vm_vif_params['ipv6_gateway']
+
+ if vif_recreated or vif_reconfigure_needed:
+ self.xapi_session.xenapi.VIF.configure_ipv6(vif_ref, network_type6,
+ "%s%s" % (network_ip6, network_prefix6), network_gateway6)
+
+ elif self.vm_params['customization_agent'] == "custom":
+ vif_device = vm_vif_params['device']
+
+ # A user could have manually changed network
+ # or mac e.g. trough XenCenter and then also
+ # make those changes in playbook manually.
+ # In that case, module will not detect any
+ # changes and info in xenstore_data will
+ # become stale. For that reason we always
+ # update name and mac in xenstore_data.
+
+ # Since we handle name and mac differently,
+ # we have to remove them from
+ # network_change_list.
+ network_change_list_tmp = [net_chg for net_chg in network_change_list if net_chg not in ['name', 'mac']]
+
+ for network_change in network_change_list_tmp + ['name', 'mac']:
+ self.xapi_session.xenapi.VM.remove_from_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/%s" % (vif_device, network_change))
+
+ if network_params.get('name'):
+ network_name = network_params['name']
+ else:
+ network_name = vm_vif_params['network']['name_label']
+
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/%s" % (vif_device, 'name'), network_name)
+
+ if network_params.get('mac'):
+ network_mac = network_params['mac'].lower()
+ else:
+ network_mac = vm_vif_params['MAC'].lower()
+
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/%s" % (vif_device, 'mac'), network_mac)
+
+ for network_change in network_change_list_tmp:
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/%s" % (vif_device, network_change),
+ network_params[network_change])
+
+ position += 1
+ elif change.get('networks_new'):
+ for position, vif_device in change['networks_new']:
+ network_params = self.module.params['networks'][position]
+
+ network_ref = self.xapi_session.xenapi.network.get_by_name_label(network_params['name'])[0]
+
+ network_name = network_params['name']
+ network_mac = network_params['mac'] if network_params.get('mac') else ""
+ network_type = network_params.get('type')
+ network_ip = network_params['ip'] if network_params.get('ip') else ""
+ network_prefix = network_params['prefix'] if network_params.get('prefix') else ""
+ network_netmask = network_params['netmask'] if network_params.get('netmask') else ""
+ network_gateway = network_params['gateway'] if network_params.get('gateway') else ""
+ network_type6 = network_params.get('type6')
+ network_ip6 = network_params['ip6'] if network_params.get('ip6') else ""
+ network_prefix6 = network_params['prefix6'] if network_params.get('prefix6') else ""
+ network_gateway6 = network_params['gateway6'] if network_params.get('gateway6') else ""
+
+ vif = {
+ "device": vif_device,
+ "network": network_ref,
+ "VM": self.vm_ref,
+ "MAC": network_mac,
+ "MTU": self.xapi_session.xenapi.network.get_MTU(network_ref),
+ "other_config": {},
+ "qos_algorithm_type": "",
+ "qos_algorithm_params": {},
+ }
+
+ vif_ref_new = self.xapi_session.xenapi.VIF.create(vif)
+
+ if self.vm_params['power_state'].lower() == "running":
+ self.xapi_session.xenapi.VIF.plug(vif_ref_new)
+
+ if self.vm_params['customization_agent'] == "native":
+ if network_type and network_type == "static":
+ self.xapi_session.xenapi.VIF.configure_ipv4(vif_ref_new, "Static",
+ "%s/%s" % (network_ip, network_prefix), network_gateway)
+
+ if network_type6 and network_type6 == "static":
+ self.xapi_session.xenapi.VIF.configure_ipv6(vif_ref_new, "Static",
+ "%s/%s" % (network_ip6, network_prefix6), network_gateway6)
+ elif self.vm_params['customization_agent'] == "custom":
+ # We first have to remove any existing data
+ # from xenstore_data because there could be
+ # some old leftover data from some interface
+ # that once occupied same device location as
+ # our new interface.
+ for network_param in ['name', 'mac', 'type', 'ip', 'prefix', 'netmask', 'gateway', 'type6', 'ip6', 'prefix6', 'gateway6']:
+ self.xapi_session.xenapi.VM.remove_from_xenstore_data(self.vm_ref, "vm-data/networks/%s/%s" % (vif_device, network_param))
+
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/name" % vif_device, network_name)
+
+ # We get MAC from VIF itself instead of
+ # networks.mac because it could be
+ # autogenerated.
+ vm_vif_mac = self.xapi_session.xenapi.VIF.get_MAC(vif_ref_new)
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/mac" % vif_device, vm_vif_mac)
+
+ if network_type:
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/type" % vif_device, network_type)
+
+ if network_type == "static":
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/ip" % vif_device, network_ip)
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/prefix" % vif_device, network_prefix)
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/netmask" % vif_device, network_netmask)
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/gateway" % vif_device, network_gateway)
+
+ if network_type6:
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/type6" % vif_device, network_type6)
+
+ if network_type6 == "static":
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/ip6" % vif_device, network_ip6)
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/prefix6" % vif_device, network_prefix6)
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/gateway6" % vif_device, network_gateway6)
+
+ elif change.get('custom_params'):
+ for position in change['custom_params']:
+ custom_param_key = self.module.params['custom_params'][position]['key']
+ custom_param_value = self.module.params['custom_params'][position]['value']
+ self.xapi_session.xenapi_request("VM.set_%s" % custom_param_key, (self.vm_ref, custom_param_value))
+
+ if self.module.params['is_template']:
+ self.xapi_session.xenapi.VM.set_is_a_template(self.vm_ref, True)
+ elif "need_poweredoff" in config_changes and self.module.params['force'] and vm_power_state_save != 'halted':
+ self.set_power_state("poweredon")
+
+ # Gather new params after reconfiguration.
+ self.gather_params()
+
+ except XenAPI.Failure as f:
+ self.module.fail_json(msg="XAPI ERROR: %s" % f.details)
+
+ return config_changes
+
+ def destroy(self):
+ """Removes an existing VM with associated disks"""
+ # Safety check.
+ if not self.exists():
+ self.module.fail_json(msg="Called destroy on non existing VM!")
+
+ if self.vm_params['power_state'].lower() != 'halted' and not self.module.params['force']:
+ self.module.fail_json(msg="VM destroy: VM has to be in powered off state to destroy but force was not specified!")
+
+ # Support for Ansible check mode.
+ if self.module.check_mode:
+ return
+
+ # Make sure that VM is poweredoff before we can destroy it.
+ self.set_power_state("poweredoff")
+
+ try:
+ # Destroy VM!
+ self.xapi_session.xenapi.VM.destroy(self.vm_ref)
+
+ vm_disk_params_list = [disk_params for disk_params in self.vm_params['VBDs'] if disk_params['type'] == "Disk"]
+
+ # Destroy all VDIs associated with VM!
+ for vm_disk_params in vm_disk_params_list:
+ vdi_ref = self.xapi_session.xenapi.VDI.get_by_uuid(vm_disk_params['VDI']['uuid'])
+
+ self.xapi_session.xenapi.VDI.destroy(vdi_ref)
+
+ except XenAPI.Failure as f:
+ self.module.fail_json(msg="XAPI ERROR: %s" % f.details)
+
+ def get_changes(self):
+ """Finds VM parameters that differ from specified ones.
+
+ This method builds a dictionary with hierarchy of VM parameters
+ that differ from those specified in module parameters.
+
+ Returns:
+ list: VM parameters that differ from those specified in
+ module parameters.
+ """
+ # Safety check.
+ if not self.exists():
+ self.module.fail_json(msg="Called get_changes on non existing VM!")
+
+ need_poweredoff = False
+
+ if self.module.params['is_template']:
+ need_poweredoff = True
+
+ try:
+ # This VM could be a template or a snapshot. In that case we fail
+ # because we can't reconfigure them or it would just be too
+ # dangerous.
+ if self.vm_params['is_a_template'] and not self.vm_params['is_a_snapshot']:
+ self.module.fail_json(msg="VM check: targeted VM is a template! Template reconfiguration is not supported.")
+
+ if self.vm_params['is_a_snapshot']:
+ self.module.fail_json(msg="VM check: targeted VM is a snapshot! Snapshot reconfiguration is not supported.")
+
+ # Let's build a list of parameters that changed.
+ config_changes = []
+
+ # Name could only differ if we found an existing VM by uuid.
+ if self.module.params['name'] is not None and self.module.params['name'] != self.vm_params['name_label']:
+ if self.module.params['name']:
+ config_changes.append('name')
+ else:
+ self.module.fail_json(msg="VM check name: VM name cannot be an empty string!")
+
+ if self.module.params['name_desc'] is not None and self.module.params['name_desc'] != self.vm_params['name_description']:
+ config_changes.append('name_desc')
+
+ # Folder parameter is found in other_config.
+ vm_other_config = self.vm_params['other_config']
+ vm_folder = vm_other_config.get('folder', '')
+
+ if self.module.params['folder'] is not None and self.module.params['folder'] != vm_folder:
+ config_changes.append('folder')
+
+ if self.module.params['home_server'] is not None:
+ if (self.module.params['home_server'] and
+ (not self.vm_params['affinity'] or self.module.params['home_server'] != self.vm_params['affinity']['name_label'])):
+
+ # Check existance only. Ignore return value.
+ get_object_ref(self.module, self.module.params['home_server'], uuid=None, obj_type="home server", fail=True,
+ msg_prefix="VM check home_server: ")
+
+ config_changes.append('home_server')
+ elif not self.module.params['home_server'] and self.vm_params['affinity']:
+ config_changes.append('home_server')
+
+ config_changes_hardware = []
+
+ if self.module.params['hardware']:
+ num_cpus = self.module.params['hardware'].get('num_cpus')
+
+ if num_cpus is not None:
+ # Kept for compatibility with older Ansible versions that
+ # do not support subargument specs.
+ try:
+ num_cpus = int(num_cpus)
+ except ValueError as e:
+ self.module.fail_json(msg="VM check hardware.num_cpus: parameter should be an integer value!")
+
+ if num_cpus < 1:
+ self.module.fail_json(msg="VM check hardware.num_cpus: parameter should be greater than zero!")
+
+ # We can use VCPUs_at_startup or VCPUs_max parameter. I'd
+ # say the former is the way to go but this needs
+ # confirmation and testing.
+ if num_cpus != int(self.vm_params['VCPUs_at_startup']):
+ config_changes_hardware.append('num_cpus')
+ # For now, we don't support hotpluging so VM has to be in
+ # poweredoff state to reconfigure.
+ need_poweredoff = True
+
+ num_cpu_cores_per_socket = self.module.params['hardware'].get('num_cpu_cores_per_socket')
+
+ if num_cpu_cores_per_socket is not None:
+ # Kept for compatibility with older Ansible versions that
+ # do not support subargument specs.
+ try:
+ num_cpu_cores_per_socket = int(num_cpu_cores_per_socket)
+ except ValueError as e:
+ self.module.fail_json(msg="VM check hardware.num_cpu_cores_per_socket: parameter should be an integer value!")
+
+ if num_cpu_cores_per_socket < 1:
+ self.module.fail_json(msg="VM check hardware.num_cpu_cores_per_socket: parameter should be greater than zero!")
+
+ if num_cpus and num_cpus % num_cpu_cores_per_socket != 0:
+ self.module.fail_json(msg="VM check hardware.num_cpus: parameter should be a multiple of hardware.num_cpu_cores_per_socket!")
+
+ vm_platform = self.vm_params['platform']
+ vm_cores_per_socket = int(vm_platform.get('cores-per-socket', 1))
+
+ if num_cpu_cores_per_socket != vm_cores_per_socket:
+ config_changes_hardware.append('num_cpu_cores_per_socket')
+ # For now, we don't support hotpluging so VM has to be
+ # in poweredoff state to reconfigure.
+ need_poweredoff = True
+
+ memory_mb = self.module.params['hardware'].get('memory_mb')
+
+ if memory_mb is not None:
+ # Kept for compatibility with older Ansible versions that
+ # do not support subargument specs.
+ try:
+ memory_mb = int(memory_mb)
+ except ValueError as e:
+ self.module.fail_json(msg="VM check hardware.memory_mb: parameter should be an integer value!")
+
+ if memory_mb < 1:
+ self.module.fail_json(msg="VM check hardware.memory_mb: parameter should be greater than zero!")
+
+ # There are multiple memory parameters:
+ # - memory_dynamic_max
+ # - memory_dynamic_min
+ # - memory_static_max
+ # - memory_static_min
+ # - memory_target
+ #
+ # memory_target seems like a good candidate but it returns 0 for
+ # halted VMs so we can't use it.
+ #
+ # I decided to use memory_dynamic_max and memory_static_max
+ # and use whichever is larger. This strategy needs validation
+ # and testing.
+ #
+ # XenServer stores memory size in bytes so we need to divide
+ # it by 1024*1024 = 1048576.
+ if memory_mb != int(max(int(self.vm_params['memory_dynamic_max']), int(self.vm_params['memory_static_max'])) / 1048576):
+ config_changes_hardware.append('memory_mb')
+ # For now, we don't support hotpluging so VM has to be in
+ # poweredoff state to reconfigure.
+ need_poweredoff = True
+
+ if config_changes_hardware:
+ config_changes.append({"hardware": config_changes_hardware})
+
+ config_changes_disks = []
+ config_new_disks = []
+
+ # Find allowed userdevices.
+ vbd_userdevices_allowed = self.xapi_session.xenapi.VM.get_allowed_VBD_devices(self.vm_ref)
+
+ if self.module.params['disks']:
+ # Get the list of all disk. Filter out any CDs found.
+ vm_disk_params_list = [disk_params for disk_params in self.vm_params['VBDs'] if disk_params['type'] == "Disk"]
+
+ # Number of disks defined in module params have to be same or
+ # higher than a number of existing disks attached to the VM.
+ # We don't support removal or detachment of disks.
+ if len(self.module.params['disks']) < len(vm_disk_params_list):
+ self.module.fail_json(msg="VM check disks: provided disks configuration has less disks than the target VM (%d < %d)!" %
+ (len(self.module.params['disks']), len(vm_disk_params_list)))
+
+ # Find the highest disk occupied userdevice.
+ if not vm_disk_params_list:
+ vm_disk_userdevice_highest = "-1"
+ else:
+ vm_disk_userdevice_highest = vm_disk_params_list[-1]['userdevice']
+
+ for position in range(len(self.module.params['disks'])):
+ if position < len(vm_disk_params_list):
+ vm_disk_params = vm_disk_params_list[position]
+ else:
+ vm_disk_params = None
+
+ disk_params = self.module.params['disks'][position]
+
+ disk_size = self.get_normalized_disk_size(self.module.params['disks'][position], "VM check disks[%s]: " % position)
+
+ disk_name = disk_params.get('name')
+
+ if disk_name is not None and not disk_name:
+ self.module.fail_json(msg="VM check disks[%s]: disk name cannot be an empty string!" % position)
+
+ # If this is an existing disk.
+ if vm_disk_params and vm_disk_params['VDI']:
+ disk_changes = []
+
+ if disk_name and disk_name != vm_disk_params['VDI']['name_label']:
+ disk_changes.append('name')
+
+ disk_name_desc = disk_params.get('name_desc')
+
+ if disk_name_desc is not None and disk_name_desc != vm_disk_params['VDI']['name_description']:
+ disk_changes.append('name_desc')
+
+ if disk_size:
+ if disk_size > int(vm_disk_params['VDI']['virtual_size']):
+ disk_changes.append('size')
+ need_poweredoff = True
+ elif disk_size < int(vm_disk_params['VDI']['virtual_size']):
+ self.module.fail_json(msg="VM check disks[%s]: disk size is smaller than existing (%d bytes < %s bytes). "
+ "Reducing disk size is not allowed!" % (position, disk_size, vm_disk_params['VDI']['virtual_size']))
+
+ config_changes_disks.append(disk_changes)
+ # If this is a new disk.
+ else:
+ if not disk_size:
+ self.module.fail_json(msg="VM check disks[%s]: no valid disk size specification found!" % position)
+
+ disk_sr_uuid = disk_params.get('sr_uuid')
+ disk_sr = disk_params.get('sr')
+
+ if disk_sr_uuid is not None or disk_sr is not None:
+ # Check existance only. Ignore return value.
+ get_object_ref(self.module, disk_sr, disk_sr_uuid, obj_type="SR", fail=True,
+ msg_prefix="VM check disks[%s]: " % position)
+ elif self.default_sr_ref == 'OpaqueRef:NULL':
+ self.module.fail_json(msg="VM check disks[%s]: no default SR found! You must specify SR explicitly." % position)
+
+ if not vbd_userdevices_allowed:
+ self.module.fail_json(msg="VM check disks[%s]: maximum number of devices reached!" % position)
+
+ disk_userdevice = None
+
+ # We need to place a new disk right above the highest
+ # placed existing disk to maintain relative disk
+ # positions pairable with disk specifications in
+ # module params. That place must not be occupied by
+ # some other device like CD-ROM.
+ for userdevice in vbd_userdevices_allowed:
+ if int(userdevice) > int(vm_disk_userdevice_highest):
+ disk_userdevice = userdevice
+ vbd_userdevices_allowed.remove(userdevice)
+ vm_disk_userdevice_highest = userdevice
+ break
+
+ # If no place was found.
+ if disk_userdevice is None:
+ # Highest occupied place could be a CD-ROM device
+ # so we have to include all devices regardless of
+ # type when calculating out-of-bound position.
+ disk_userdevice = str(int(self.vm_params['VBDs'][-1]['userdevice']) + 1)
+ self.module.fail_json(msg="VM check disks[%s]: new disk position %s is out of bounds!" % (position, disk_userdevice))
+
+ # For new disks we only track their position.
+ config_new_disks.append((position, disk_userdevice))
+
+ # We should append config_changes_disks to config_changes only
+ # if there is at least one changed disk, else skip.
+ for disk_change in config_changes_disks:
+ if disk_change:
+ config_changes.append({"disks_changed": config_changes_disks})
+ break
+
+ if config_new_disks:
+ config_changes.append({"disks_new": config_new_disks})
+
+ config_changes_cdrom = []
+
+ if self.module.params['cdrom']:
+ # Get the list of all CD-ROMs. Filter out any regular disks
+ # found. If we found no existing CD-ROM, we will create it
+ # later else take the first one found.
+ vm_cdrom_params_list = [cdrom_params for cdrom_params in self.vm_params['VBDs'] if cdrom_params['type'] == "CD"]
+
+ # If no existing CD-ROM is found, we will need to add one.
+ # We need to check if there is any userdevice allowed.
+ if not vm_cdrom_params_list and not vbd_userdevices_allowed:
+ self.module.fail_json(msg="VM check cdrom: maximum number of devices reached!")
+
+ cdrom_type = self.module.params['cdrom'].get('type')
+ cdrom_iso_name = self.module.params['cdrom'].get('iso_name')
+
+ # If cdrom.iso_name is specified but cdrom.type is not,
+ # then set cdrom.type to 'iso', unless cdrom.iso_name is
+ # an empty string, in that case set cdrom.type to 'none'.
+ if not cdrom_type:
+ if cdrom_iso_name:
+ cdrom_type = "iso"
+ elif cdrom_iso_name is not None:
+ cdrom_type = "none"
+
+ self.module.params['cdrom']['type'] = cdrom_type
+
+ # If type changed.
+ if cdrom_type and (not vm_cdrom_params_list or cdrom_type != self.get_cdrom_type(vm_cdrom_params_list[0])):
+ config_changes_cdrom.append('type')
+
+ if cdrom_type == "iso":
+ # Check if ISO exists.
+ # Check existance only. Ignore return value.
+ get_object_ref(self.module, cdrom_iso_name, uuid=None, obj_type="ISO image", fail=True,
+ msg_prefix="VM check cdrom.iso_name: ")
+
+ # Is ISO image changed?
+ if (cdrom_iso_name and
+ (not vm_cdrom_params_list or
+ not vm_cdrom_params_list[0]['VDI'] or
+ cdrom_iso_name != vm_cdrom_params_list[0]['VDI']['name_label'])):
+ config_changes_cdrom.append('iso_name')
+
+ if config_changes_cdrom:
+ config_changes.append({"cdrom": config_changes_cdrom})
+
+ config_changes_networks = []
+ config_new_networks = []
+
+ # Find allowed devices.
+ vif_devices_allowed = self.xapi_session.xenapi.VM.get_allowed_VIF_devices(self.vm_ref)
+
+ if self.module.params['networks']:
+ # Number of VIFs defined in module params have to be same or
+ # higher than a number of existing VIFs attached to the VM.
+ # We don't support removal of VIFs.
+ if len(self.module.params['networks']) < len(self.vm_params['VIFs']):
+ self.module.fail_json(msg="VM check networks: provided networks configuration has less interfaces than the target VM (%d < %d)!" %
+ (len(self.module.params['networks']), len(self.vm_params['VIFs'])))
+
+ # Find the highest occupied device.
+ if not self.vm_params['VIFs']:
+ vif_device_highest = "-1"
+ else:
+ vif_device_highest = self.vm_params['VIFs'][-1]['device']
+
+ for position in range(len(self.module.params['networks'])):
+ if position < len(self.vm_params['VIFs']):
+ vm_vif_params = self.vm_params['VIFs'][position]
+ else:
+ vm_vif_params = None
+
+ network_params = self.module.params['networks'][position]
+
+ network_name = network_params.get('name')
+
+ if network_name is not None and not network_name:
+ self.module.fail_json(msg="VM check networks[%s]: network name cannot be an empty string!" % position)
+
+ if network_name:
+ # Check existance only. Ignore return value.
+ get_object_ref(self.module, network_name, uuid=None, obj_type="network", fail=True,
+ msg_prefix="VM check networks[%s]: " % position)
+
+ network_mac = network_params.get('mac')
+
+ if network_mac is not None:
+ network_mac = network_mac.lower()
+
+ if not is_mac(network_mac):
+ self.module.fail_json(msg="VM check networks[%s]: specified MAC address '%s' is not valid!" % (position, network_mac))
+
+ # IPv4 reconfiguration.
+ network_type = network_params.get('type')
+ network_ip = network_params.get('ip')
+ network_netmask = network_params.get('netmask')
+ network_prefix = None
+
+ # If networks.ip is specified and networks.type is not,
+ # then set networks.type to 'static'.
+ if not network_type and network_ip:
+ network_type = "static"
+
+ # XenServer natively supports only 'none' and 'static'
+ # type with 'none' being the same as 'dhcp'.
+ if self.vm_params['customization_agent'] == "native" and network_type and network_type == "dhcp":
+ network_type = "none"
+
+ if network_type and network_type == "static":
+ if network_ip is not None:
+ network_ip_split = network_ip.split('/')
+ network_ip = network_ip_split[0]
+
+ if network_ip and not is_valid_ip_addr(network_ip):
+ self.module.fail_json(msg="VM check networks[%s]: specified IPv4 address '%s' is not valid!" % (position, network_ip))
+
+ if len(network_ip_split) > 1:
+ network_prefix = network_ip_split[1]
+
+ if not is_valid_ip_prefix(network_prefix):
+ self.module.fail_json(msg="VM check networks[%s]: specified IPv4 prefix '%s' is not valid!" % (position, network_prefix))
+
+ if network_netmask is not None:
+ if not is_valid_ip_netmask(network_netmask):
+ self.module.fail_json(msg="VM check networks[%s]: specified IPv4 netmask '%s' is not valid!" % (position, network_netmask))
+
+ network_prefix = ip_netmask_to_prefix(network_netmask, skip_check=True)
+ elif network_prefix is not None:
+ network_netmask = ip_prefix_to_netmask(network_prefix, skip_check=True)
+
+ # If any parameter is overridden at this point, update it.
+ if network_type:
+ network_params['type'] = network_type
+
+ if network_ip:
+ network_params['ip'] = network_ip
+
+ if network_netmask:
+ network_params['netmask'] = network_netmask
+
+ if network_prefix:
+ network_params['prefix'] = network_prefix
+
+ network_gateway = network_params.get('gateway')
+
+ # Gateway can be an empty string (when removing gateway
+ # configuration) but if it is not, it should be validated.
+ if network_gateway and not is_valid_ip_addr(network_gateway):
+ self.module.fail_json(msg="VM check networks[%s]: specified IPv4 gateway '%s' is not valid!" % (position, network_gateway))
+
+ # IPv6 reconfiguration.
+ network_type6 = network_params.get('type6')
+ network_ip6 = network_params.get('ip6')
+ network_prefix6 = None
+
+ # If networks.ip6 is specified and networks.type6 is not,
+ # then set networks.type6 to 'static'.
+ if not network_type6 and network_ip6:
+ network_type6 = "static"
+
+ # XenServer natively supports only 'none' and 'static'
+ # type with 'none' being the same as 'dhcp'.
+ if self.vm_params['customization_agent'] == "native" and network_type6 and network_type6 == "dhcp":
+ network_type6 = "none"
+
+ if network_type6 and network_type6 == "static":
+ if network_ip6 is not None:
+ network_ip6_split = network_ip6.split('/')
+ network_ip6 = network_ip6_split[0]
+
+ if network_ip6 and not is_valid_ip6_addr(network_ip6):
+ self.module.fail_json(msg="VM check networks[%s]: specified IPv6 address '%s' is not valid!" % (position, network_ip6))
+
+ if len(network_ip6_split) > 1:
+ network_prefix6 = network_ip6_split[1]
+
+ if not is_valid_ip6_prefix(network_prefix6):
+ self.module.fail_json(msg="VM check networks[%s]: specified IPv6 prefix '%s' is not valid!" % (position, network_prefix6))
+
+ # If any parameter is overridden at this point, update it.
+ if network_type6:
+ network_params['type6'] = network_type6
+
+ if network_ip6:
+ network_params['ip6'] = network_ip6
+
+ if network_prefix6:
+ network_params['prefix6'] = network_prefix6
+
+ network_gateway6 = network_params.get('gateway6')
+
+ # Gateway can be an empty string (when removing gateway
+ # configuration) but if it is not, it should be validated.
+ if network_gateway6 and not is_valid_ip6_addr(network_gateway6):
+ self.module.fail_json(msg="VM check networks[%s]: specified IPv6 gateway '%s' is not valid!" % (position, network_gateway6))
+
+ # If this is an existing VIF.
+ if vm_vif_params and vm_vif_params['network']:
+ network_changes = []
+
+ if network_name and network_name != vm_vif_params['network']['name_label']:
+ network_changes.append('name')
+
+ if network_mac and network_mac != vm_vif_params['MAC'].lower():
+ network_changes.append('mac')
+
+ if self.vm_params['customization_agent'] == "native":
+ if network_type and network_type != vm_vif_params['ipv4_configuration_mode'].lower():
+ network_changes.append('type')
+
+ if network_type and network_type == "static":
+ if network_ip and (not vm_vif_params['ipv4_addresses'] or
+ not vm_vif_params['ipv4_addresses'][0] or
+ network_ip != vm_vif_params['ipv4_addresses'][0].split('/')[0]):
+ network_changes.append('ip')
+
+ if network_prefix and (not vm_vif_params['ipv4_addresses'] or
+ not vm_vif_params['ipv4_addresses'][0] or
+ network_prefix != vm_vif_params['ipv4_addresses'][0].split('/')[1]):
+ network_changes.append('prefix')
+ network_changes.append('netmask')
+
+ if network_gateway is not None and network_gateway != vm_vif_params['ipv4_gateway']:
+ network_changes.append('gateway')
+
+ if network_type6 and network_type6 != vm_vif_params['ipv6_configuration_mode'].lower():
+ network_changes.append('type6')
+
+ if network_type6 and network_type6 == "static":
+ if network_ip6 and (not vm_vif_params['ipv6_addresses'] or
+ not vm_vif_params['ipv6_addresses'][0] or
+ network_ip6 != vm_vif_params['ipv6_addresses'][0].split('/')[0]):
+ network_changes.append('ip6')
+
+ if network_prefix6 and (not vm_vif_params['ipv6_addresses'] or
+ not vm_vif_params['ipv6_addresses'][0] or
+ network_prefix6 != vm_vif_params['ipv6_addresses'][0].split('/')[1]):
+ network_changes.append('prefix6')
+
+ if network_gateway6 is not None and network_gateway6 != vm_vif_params['ipv6_gateway']:
+ network_changes.append('gateway6')
+
+ elif self.vm_params['customization_agent'] == "custom":
+ vm_xenstore_data = self.vm_params['xenstore_data']
+
+ if network_type and network_type != vm_xenstore_data.get('vm-data/networks/%s/type' % vm_vif_params['device'], "none"):
+ network_changes.append('type')
+ need_poweredoff = True
+
+ if network_type and network_type == "static":
+ if network_ip and network_ip != vm_xenstore_data.get('vm-data/networks/%s/ip' % vm_vif_params['device'], ""):
+ network_changes.append('ip')
+ need_poweredoff = True
+
+ if network_prefix and network_prefix != vm_xenstore_data.get('vm-data/networks/%s/prefix' % vm_vif_params['device'], ""):
+ network_changes.append('prefix')
+ network_changes.append('netmask')
+ need_poweredoff = True
+
+ if network_gateway is not None and network_gateway != vm_xenstore_data.get('vm-data/networks/%s/gateway' %
+ vm_vif_params['device'], ""):
+ network_changes.append('gateway')
+ need_poweredoff = True
+
+ if network_type6 and network_type6 != vm_xenstore_data.get('vm-data/networks/%s/type6' % vm_vif_params['device'], "none"):
+ network_changes.append('type6')
+ need_poweredoff = True
+
+ if network_type6 and network_type6 == "static":
+ if network_ip6 and network_ip6 != vm_xenstore_data.get('vm-data/networks/%s/ip6' % vm_vif_params['device'], ""):
+ network_changes.append('ip6')
+ need_poweredoff = True
+
+ if network_prefix6 and network_prefix6 != vm_xenstore_data.get('vm-data/networks/%s/prefix6' % vm_vif_params['device'], ""):
+ network_changes.append('prefix6')
+ need_poweredoff = True
+
+ if network_gateway6 is not None and network_gateway6 != vm_xenstore_data.get('vm-data/networks/%s/gateway6' %
+ vm_vif_params['device'], ""):
+ network_changes.append('gateway6')
+ need_poweredoff = True
+
+ config_changes_networks.append(network_changes)
+ # If this is a new VIF.
+ else:
+ if not network_name:
+ self.module.fail_json(msg="VM check networks[%s]: network name is required for new network interface!" % position)
+
+ if network_type and network_type == "static" and network_ip and not network_netmask:
+ self.module.fail_json(msg="VM check networks[%s]: IPv4 netmask or prefix is required for new network interface!" % position)
+
+ if network_type6 and network_type6 == "static" and network_ip6 and not network_prefix6:
+ self.module.fail_json(msg="VM check networks[%s]: IPv6 prefix is required for new network interface!" % position)
+
+ # Restart is needed if we are adding new network
+ # interface with IP/gateway parameters specified
+ # and custom agent is used.
+ if self.vm_params['customization_agent'] == "custom":
+ for parameter in ['type', 'ip', 'prefix', 'gateway', 'type6', 'ip6', 'prefix6', 'gateway6']:
+ if network_params.get(parameter):
+ need_poweredoff = True
+ break
+
+ if not vif_devices_allowed:
+ self.module.fail_json(msg="VM check networks[%s]: maximum number of network interfaces reached!" % position)
+
+ # We need to place a new network interface right above the
+ # highest placed existing interface to maintain relative
+ # positions pairable with network interface specifications
+ # in module params.
+ vif_device = str(int(vif_device_highest) + 1)
+
+ if vif_device not in vif_devices_allowed:
+ self.module.fail_json(msg="VM check networks[%s]: new network interface position %s is out of bounds!" % (position, vif_device))
+
+ vif_devices_allowed.remove(vif_device)
+ vif_device_highest = vif_device
+
+ # For new VIFs we only track their position.
+ config_new_networks.append((position, vif_device))
+
+ # We should append config_changes_networks to config_changes only
+ # if there is at least one changed network, else skip.
+ for network_change in config_changes_networks:
+ if network_change:
+ config_changes.append({"networks_changed": config_changes_networks})
+ break
+
+ if config_new_networks:
+ config_changes.append({"networks_new": config_new_networks})
+
+ config_changes_custom_params = []
+
+ if self.module.params['custom_params']:
+ for position in range(len(self.module.params['custom_params'])):
+ custom_param = self.module.params['custom_params'][position]
+
+ custom_param_key = custom_param['key']
+ custom_param_value = custom_param['value']
+
+ if custom_param_key not in self.vm_params:
+ self.module.fail_json(msg="VM check custom_params[%s]: unknown VM param '%s'!" % (position, custom_param_key))
+
+ if custom_param_value != self.vm_params[custom_param_key]:
+ # We only need to track custom param position.
+ config_changes_custom_params.append(position)
+
+ if config_changes_custom_params:
+ config_changes.append({"custom_params": config_changes_custom_params})
+
+ if need_poweredoff:
+ config_changes.append('need_poweredoff')
+
+ return config_changes
+
+ except XenAPI.Failure as f:
+ self.module.fail_json(msg="XAPI ERROR: %s" % f.details)
+
+ def get_normalized_disk_size(self, disk_params, msg_prefix=""):
+ """Parses disk size parameters and returns disk size in bytes.
+
+ This method tries to parse disk size module parameters. It fails
+ with an error message if size cannot be parsed.
+
+ Args:
+ disk_params (dist): A dictionary with disk parameters.
+ msg_prefix (str): A string error messages should be prefixed
+ with (default: "").
+
+ Returns:
+ int: disk size in bytes if disk size is successfully parsed or
+ None if no disk size parameters were found.
+ """
+ # There should be only single size spec but we make a list of all size
+ # specs just in case. Priority is given to 'size' but if not found, we
+ # check for 'size_tb', 'size_gb', 'size_mb' etc. and use first one
+ # found.
+ disk_size_spec = [x for x in disk_params.keys() if disk_params[x] is not None and (x.startswith('size_') or x == 'size')]
+
+ if disk_size_spec:
+ try:
+ # size
+ if "size" in disk_size_spec:
+ size_regex = re.compile(r'(\d+(?:\.\d+)?)\s*(.*)')
+ disk_size_m = size_regex.match(disk_params['size'])
+
+ if disk_size_m:
+ size = disk_size_m.group(1)
+ unit = disk_size_m.group(2)
+ else:
+ raise ValueError
+ # size_tb, size_gb, size_mb, size_kb, size_b
+ else:
+ size = disk_params[disk_size_spec[0]]
+ unit = disk_size_spec[0].split('_')[-1]
+
+ if not unit:
+ unit = "b"
+ else:
+ unit = unit.lower()
+
+ if re.match(r'\d+\.\d+', size):
+ # We found float value in string, let's typecast it.
+ if unit == "b":
+ # If we found float but unit is bytes, we get the integer part only.
+ size = int(float(size))
+ else:
+ size = float(size)
+ else:
+ # We found int value in string, let's typecast it.
+ size = int(size)
+
+ if not size or size < 0:
+ raise ValueError
+
+ except (TypeError, ValueError, NameError):
+ # Common failure
+ self.module.fail_json(msg="%sfailed to parse disk size! Please review value provided using documentation." % msg_prefix)
+
+ disk_units = dict(tb=4, gb=3, mb=2, kb=1, b=0)
+
+ if unit in disk_units:
+ return int(size * (1024 ** disk_units[unit]))
+ else:
+ self.module.fail_json(msg="%s'%s' is not a supported unit for disk size! Supported units are ['%s']." %
+ (msg_prefix, unit, "', '".join(sorted(disk_units.keys(), key=lambda key: disk_units[key]))))
+ else:
+ return None
+
+ @staticmethod
+ def get_cdrom_type(vm_cdrom_params):
+ """Returns VM CD-ROM type."""
+ # TODO: implement support for detecting type host. No server to test
+ # this on at the moment.
+ if vm_cdrom_params['empty']:
+ return "none"
+ else:
+ return "iso"
+
+
+def main():
+ argument_spec = xenserver_common_argument_spec()
+ argument_spec.update(
+ state=dict(type='str', default='present',
+ choices=['present', 'absent', 'poweredon']),
+ name=dict(type='str', aliases=['name_label']),
+ name_desc=dict(type='str'),
+ uuid=dict(type='str'),
+ template=dict(type='str', aliases=['template_src']),
+ template_uuid=dict(type='str'),
+ is_template=dict(type='bool', default=False),
+ folder=dict(type='str'),
+ hardware=dict(
+ type='dict',
+ options=dict(
+ num_cpus=dict(type='int'),
+ num_cpu_cores_per_socket=dict(type='int'),
+ memory_mb=dict(type='int'),
+ ),
+ ),
+ disks=dict(
+ type='list',
+ elements='dict',
+ options=dict(
+ size=dict(type='str'),
+ size_tb=dict(type='str'),
+ size_gb=dict(type='str'),
+ size_mb=dict(type='str'),
+ size_kb=dict(type='str'),
+ size_b=dict(type='str'),
+ name=dict(type='str', aliases=['name_label']),
+ name_desc=dict(type='str'),
+ sr=dict(type='str'),
+ sr_uuid=dict(type='str'),
+ ),
+ aliases=['disk'],
+ mutually_exclusive=[
+ ['size', 'size_tb', 'size_gb', 'size_mb', 'size_kb', 'size_b'],
+ ['sr', 'sr_uuid'],
+ ],
+ ),
+ cdrom=dict(
+ type='dict',
+ options=dict(
+ type=dict(type='str', choices=['none', 'iso']),
+ iso_name=dict(type='str'),
+ ),
+ required_if=[
+ ['type', 'iso', ['iso_name']],
+ ],
+ ),
+ networks=dict(
+ type='list',
+ elements='dict',
+ options=dict(
+ name=dict(type='str', aliases=['name_label']),
+ mac=dict(type='str'),
+ type=dict(type='str', choices=['none', 'dhcp', 'static']),
+ ip=dict(type='str'),
+ netmask=dict(type='str'),
+ gateway=dict(type='str'),
+ type6=dict(type='str', choices=['none', 'dhcp', 'static']),
+ ip6=dict(type='str'),
+ gateway6=dict(type='str'),
+ ),
+ aliases=['network'],
+ required_if=[
+ ['type', 'static', ['ip']],
+ ['type6', 'static', ['ip6']],
+ ],
+ ),
+ home_server=dict(type='str'),
+ custom_params=dict(
+ type='list',
+ elements='dict',
+ options=dict(
+ key=dict(type='str', required=True),
+ value=dict(type='raw', required=True),
+ ),
+ ),
+ wait_for_ip_address=dict(type='bool', default=False),
+ state_change_timeout=dict(type='int', default=0),
+ linked_clone=dict(type='bool', default=False),
+ force=dict(type='bool', default=False),
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[
+ ['name', 'uuid'],
+ ],
+ mutually_exclusive=[
+ ['template', 'template_uuid'],
+ ],
+ )
+
+ result = {'failed': False, 'changed': False}
+
+ vm = XenServerVM(module)
+
+ # Find existing VM
+ if vm.exists():
+ if module.params['state'] == "absent":
+ vm.destroy()
+ result['changed'] = True
+ elif module.params['state'] == "present":
+ config_changes = vm.reconfigure()
+
+ if config_changes:
+ result['changed'] = True
+
+ # Make new disk and network changes more user friendly
+ # and informative.
+ for change in config_changes:
+ if isinstance(change, dict):
+ if change.get('disks_new'):
+ disks_new = []
+
+ for position, userdevice in change['disks_new']:
+ disk_new_params = {"position": position, "vbd_userdevice": userdevice}
+ disk_params = module.params['disks'][position]
+
+ for k in disk_params.keys():
+ if disk_params[k] is not None:
+ disk_new_params[k] = disk_params[k]
+
+ disks_new.append(disk_new_params)
+
+ if disks_new:
+ change['disks_new'] = disks_new
+
+ elif change.get('networks_new'):
+ networks_new = []
+
+ for position, device in change['networks_new']:
+ network_new_params = {"position": position, "vif_device": device}
+ network_params = module.params['networks'][position]
+
+ for k in network_params.keys():
+ if network_params[k] is not None:
+ network_new_params[k] = network_params[k]
+
+ networks_new.append(network_new_params)
+
+ if networks_new:
+ change['networks_new'] = networks_new
+
+ result['changes'] = config_changes
+
+ elif module.params['state'] in ["poweredon", "poweredoff", "restarted", "shutdownguest", "rebootguest", "suspended"]:
+ result['changed'] = vm.set_power_state(module.params['state'])
+ elif module.params['state'] != "absent":
+ vm.deploy()
+ result['changed'] = True
+
+ if module.params['wait_for_ip_address'] and module.params['state'] != "absent":
+ vm.wait_for_ip_address()
+
+ result['instance'] = vm.gather_facts()
+
+ if result['failed']:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest_facts.py
new file mode 100644
index 00000000..d3260b6e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest_facts.py
@@ -0,0 +1,226 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2018, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: xenserver_guest_info
+short_description: Gathers information for virtual machines running on Citrix Hypervisor/XenServer host or pool
+description: >
+ This module can be used to gather essential VM facts.
+author:
+- Bojan Vitnik (@bvitnik) <bvitnik@mainstream.rs>
+notes:
+- Minimal supported version of XenServer is 5.6.
+- Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0.
+- 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside
+ Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the XenAPI.py file from the SDK to your Python site-packages on your
+ Ansible Control Node to use it. Latest version of the library can also be acquired from GitHub:
+ U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py)'
+- 'If no scheme is specified in C(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are
+ accessing XenServer host in trusted environment or use C(https://) scheme explicitly.'
+- 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use C(validate_certs: no)
+ which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.'
+- This module was called C(xenserver_guest_facts) before Ansible 2.9. The usage did not change.
+requirements:
+- python >= 2.6
+- XenAPI
+options:
+ name:
+ description:
+ - Name of the VM to gather facts from.
+ - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found.
+ - In case of multiple VMs with same name, use C(uuid) to uniquely specify VM to manage.
+ - This parameter is case sensitive.
+ type: str
+ aliases: [ name_label ]
+ uuid:
+ description:
+ - UUID of the VM to gather fact of. This is XenServer's unique identifier.
+ - It is required if name is not unique.
+ type: str
+extends_documentation_fragment:
+- community.general.xenserver.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Gather facts
+ community.general.xenserver_guest_info:
+ hostname: "{{ xenserver_hostname }}"
+ username: "{{ xenserver_username }}"
+ password: "{{ xenserver_password }}"
+ name: testvm_11
+ delegate_to: localhost
+ register: facts
+'''
+
+RETURN = r'''
+instance:
+ description: Metadata about the VM
+ returned: always
+ type: dict
+ sample: {
+ "cdrom": {
+ "type": "none"
+ },
+ "customization_agent": "native",
+ "disks": [
+ {
+ "name": "testvm_11-0",
+ "name_desc": "",
+ "os_device": "xvda",
+ "size": 42949672960,
+ "sr": "Local storage",
+ "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
+ "vbd_userdevice": "0"
+ },
+ {
+ "name": "testvm_11-1",
+ "name_desc": "",
+ "os_device": "xvdb",
+ "size": 42949672960,
+ "sr": "Local storage",
+ "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
+ "vbd_userdevice": "1"
+ }
+ ],
+ "domid": "56",
+ "folder": "",
+ "hardware": {
+ "memory_mb": 8192,
+ "num_cpu_cores_per_socket": 2,
+ "num_cpus": 4
+ },
+ "home_server": "",
+ "is_template": false,
+ "name": "testvm_11",
+ "name_desc": "",
+ "networks": [
+ {
+ "gateway": "192.168.0.254",
+ "gateway6": "fc00::fffe",
+ "ip": "192.168.0.200",
+ "ip6": [
+ "fe80:0000:0000:0000:e9cb:625a:32c5:c291",
+ "fc00:0000:0000:0000:0000:0000:0000:0001"
+ ],
+ "mac": "ba:91:3a:48:20:76",
+ "mtu": "1500",
+ "name": "Pool-wide network associated with eth1",
+ "netmask": "255.255.255.128",
+ "prefix": "25",
+ "prefix6": "64",
+ "vif_device": "0"
+ }
+ ],
+ "other_config": {
+ "base_template_name": "Windows Server 2016 (64-bit)",
+ "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5",
+ "install-methods": "cdrom",
+ "instant": "true",
+ "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e"
+ },
+ "platform": {
+ "acpi": "1",
+ "apic": "true",
+ "cores-per-socket": "2",
+ "device_id": "0002",
+ "hpet": "true",
+ "nx": "true",
+ "pae": "true",
+ "timeoffset": "-25200",
+ "vga": "std",
+ "videoram": "8",
+ "viridian": "true",
+ "viridian_reference_tsc": "true",
+ "viridian_time_ref_count": "true"
+ },
+ "state": "poweredon",
+ "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda",
+ "xenstore_data": {
+ "vm-data": ""
+ }
+ }
+'''
+
+HAS_XENAPI = False
+try:
+ import XenAPI
+ HAS_XENAPI = True
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.xenserver import (xenserver_common_argument_spec, XAPI, XenServerObject, get_object_ref,
+ gather_vm_params, gather_vm_facts)
+
+
+class XenServerVM(XenServerObject):
+ """Class for managing XenServer VM.
+
+ Attributes:
+ vm_ref (str): XAPI reference to VM.
+ vm_params (dict): A dictionary with VM parameters as returned
+ by gather_vm_params() function.
+ """
+
+ def __init__(self, module):
+ """Inits XenServerVM using module parameters.
+
+ Args:
+ module: Reference to AnsibleModule object.
+ """
+ super(XenServerVM, self).__init__(module)
+
+ self.vm_ref = get_object_ref(self.module, self.module.params['name'], self.module.params['uuid'], obj_type="VM", fail=True, msg_prefix="VM search: ")
+ self.gather_params()
+
+ def gather_params(self):
+ """Gathers all VM parameters available in XAPI database."""
+ self.vm_params = gather_vm_params(self.module, self.vm_ref)
+
+ def gather_facts(self):
+ """Gathers and returns VM facts."""
+ return gather_vm_facts(self.module, self.vm_params)
+
+
+def main():
+ argument_spec = xenserver_common_argument_spec()
+ argument_spec.update(
+ name=dict(type='str', aliases=['name_label']),
+ uuid=dict(type='str'),
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[
+ ['name', 'uuid'],
+ ],
+ )
+
+ if module._name in ('xenserver_guest_facts', 'community.general.xenserver_guest_facts'):
+ module.deprecate("The 'xenserver_guest_facts' module has been renamed to 'xenserver_guest_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ result = {'failed': False, 'changed': False}
+
+ # Module will exit with an error message if no VM is found.
+ vm = XenServerVM(module)
+
+ # Gather facts.
+ result['instance'] = vm.gather_facts()
+
+ if result['failed']:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest_info.py
new file mode 100644
index 00000000..d3260b6e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest_info.py
@@ -0,0 +1,226 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2018, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: xenserver_guest_info
+short_description: Gathers information for virtual machines running on Citrix Hypervisor/XenServer host or pool
+description: >
+ This module can be used to gather essential VM facts.
+author:
+- Bojan Vitnik (@bvitnik) <bvitnik@mainstream.rs>
+notes:
+- Minimal supported version of XenServer is 5.6.
+- Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0.
+- 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside
+ Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the XenAPI.py file from the SDK to your Python site-packages on your
+ Ansible Control Node to use it. Latest version of the library can also be acquired from GitHub:
+ U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py)'
+- 'If no scheme is specified in C(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are
+ accessing XenServer host in trusted environment or use C(https://) scheme explicitly.'
+- 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use C(validate_certs: no)
+ which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.'
+- This module was called C(xenserver_guest_facts) before Ansible 2.9. The usage did not change.
+requirements:
+- python >= 2.6
+- XenAPI
+options:
+ name:
+ description:
+ - Name of the VM to gather facts from.
+ - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found.
+ - In case of multiple VMs with same name, use C(uuid) to uniquely specify VM to manage.
+ - This parameter is case sensitive.
+ type: str
+ aliases: [ name_label ]
+ uuid:
+ description:
+ - UUID of the VM to gather fact of. This is XenServer's unique identifier.
+ - It is required if name is not unique.
+ type: str
+extends_documentation_fragment:
+- community.general.xenserver.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Gather facts
+ community.general.xenserver_guest_info:
+ hostname: "{{ xenserver_hostname }}"
+ username: "{{ xenserver_username }}"
+ password: "{{ xenserver_password }}"
+ name: testvm_11
+ delegate_to: localhost
+ register: facts
+'''
+
+RETURN = r'''
+instance:
+ description: Metadata about the VM
+ returned: always
+ type: dict
+ sample: {
+ "cdrom": {
+ "type": "none"
+ },
+ "customization_agent": "native",
+ "disks": [
+ {
+ "name": "testvm_11-0",
+ "name_desc": "",
+ "os_device": "xvda",
+ "size": 42949672960,
+ "sr": "Local storage",
+ "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
+ "vbd_userdevice": "0"
+ },
+ {
+ "name": "testvm_11-1",
+ "name_desc": "",
+ "os_device": "xvdb",
+ "size": 42949672960,
+ "sr": "Local storage",
+ "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
+ "vbd_userdevice": "1"
+ }
+ ],
+ "domid": "56",
+ "folder": "",
+ "hardware": {
+ "memory_mb": 8192,
+ "num_cpu_cores_per_socket": 2,
+ "num_cpus": 4
+ },
+ "home_server": "",
+ "is_template": false,
+ "name": "testvm_11",
+ "name_desc": "",
+ "networks": [
+ {
+ "gateway": "192.168.0.254",
+ "gateway6": "fc00::fffe",
+ "ip": "192.168.0.200",
+ "ip6": [
+ "fe80:0000:0000:0000:e9cb:625a:32c5:c291",
+ "fc00:0000:0000:0000:0000:0000:0000:0001"
+ ],
+ "mac": "ba:91:3a:48:20:76",
+ "mtu": "1500",
+ "name": "Pool-wide network associated with eth1",
+ "netmask": "255.255.255.128",
+ "prefix": "25",
+ "prefix6": "64",
+ "vif_device": "0"
+ }
+ ],
+ "other_config": {
+ "base_template_name": "Windows Server 2016 (64-bit)",
+ "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5",
+ "install-methods": "cdrom",
+ "instant": "true",
+ "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e"
+ },
+ "platform": {
+ "acpi": "1",
+ "apic": "true",
+ "cores-per-socket": "2",
+ "device_id": "0002",
+ "hpet": "true",
+ "nx": "true",
+ "pae": "true",
+ "timeoffset": "-25200",
+ "vga": "std",
+ "videoram": "8",
+ "viridian": "true",
+ "viridian_reference_tsc": "true",
+ "viridian_time_ref_count": "true"
+ },
+ "state": "poweredon",
+ "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda",
+ "xenstore_data": {
+ "vm-data": ""
+ }
+ }
+'''
+
+HAS_XENAPI = False
+try:
+ import XenAPI
+ HAS_XENAPI = True
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.xenserver import (xenserver_common_argument_spec, XAPI, XenServerObject, get_object_ref,
+ gather_vm_params, gather_vm_facts)
+
+
+class XenServerVM(XenServerObject):
+ """Class for managing XenServer VM.
+
+ Attributes:
+ vm_ref (str): XAPI reference to VM.
+ vm_params (dict): A dictionary with VM parameters as returned
+ by gather_vm_params() function.
+ """
+
+ def __init__(self, module):
+ """Inits XenServerVM using module parameters.
+
+ Args:
+ module: Reference to AnsibleModule object.
+ """
+ super(XenServerVM, self).__init__(module)
+
+ self.vm_ref = get_object_ref(self.module, self.module.params['name'], self.module.params['uuid'], obj_type="VM", fail=True, msg_prefix="VM search: ")
+ self.gather_params()
+
+ def gather_params(self):
+ """Gathers all VM parameters available in XAPI database."""
+ self.vm_params = gather_vm_params(self.module, self.vm_ref)
+
+ def gather_facts(self):
+ """Gathers and returns VM facts."""
+ return gather_vm_facts(self.module, self.vm_params)
+
+
+def main():
+ argument_spec = xenserver_common_argument_spec()
+ argument_spec.update(
+ name=dict(type='str', aliases=['name_label']),
+ uuid=dict(type='str'),
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[
+ ['name', 'uuid'],
+ ],
+ )
+
+ if module._name in ('xenserver_guest_facts', 'community.general.xenserver_guest_facts'):
+ module.deprecate("The 'xenserver_guest_facts' module has been renamed to 'xenserver_guest_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ result = {'failed': False, 'changed': False}
+
+ # Module will exit with an error message if no VM is found.
+ vm = XenServerVM(module)
+
+ # Gather facts.
+ result['instance'] = vm.gather_facts()
+
+ if result['failed']:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest_powerstate.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest_powerstate.py
new file mode 100644
index 00000000..4a195ff5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest_powerstate.py
@@ -0,0 +1,270 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2018, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: xenserver_guest_powerstate
+short_description: Manages power states of virtual machines running on Citrix Hypervisor/XenServer host or pool
+description: >
+ This module can be used to power on, power off, restart or suspend virtual machine and gracefully reboot or shutdown guest OS of virtual machine.
+author:
+- Bojan Vitnik (@bvitnik) <bvitnik@mainstream.rs>
+notes:
+- Minimal supported version of XenServer is 5.6.
+- Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0.
+- 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside
+ Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the XenAPI.py file from the SDK to your Python site-packages on your
+ Ansible Control Node to use it. Latest version of the library can also be acquired from GitHub:
+ U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py)'
+- 'If no scheme is specified in C(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are
+ accessing XenServer host in trusted environment or use C(https://) scheme explicitly.'
+- 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use C(validate_certs: no)
+ which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.'
+requirements:
+- python >= 2.6
+- XenAPI
+options:
+ state:
+ description:
+ - Specify the state VM should be in.
+ - If C(state) is set to value other than C(present), then VM is transitioned into required state and facts are returned.
+ - If C(state) is set to C(present), then VM is just checked for existence and facts are returned.
+ type: str
+ default: present
+ choices: [ powered-on, powered-off, restarted, shutdown-guest, reboot-guest, suspended, present ]
+ name:
+ description:
+ - Name of the VM to manage.
+ - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found.
+ - In case of multiple VMs with same name, use C(uuid) to uniquely specify VM to manage.
+ - This parameter is case sensitive.
+ type: str
+ aliases: [ name_label ]
+ uuid:
+ description:
+ - UUID of the VM to manage if known. This is XenServer's unique identifier.
+ - It is required if name is not unique.
+ type: str
+ wait_for_ip_address:
+ description:
+ - Wait until XenServer detects an IP address for the VM.
+ - This requires XenServer Tools to be preinstalled on the VM to work properly.
+ type: bool
+ default: no
+ state_change_timeout:
+ description:
+ - 'By default, module will wait indefinitely for VM to change state or acquire an IP address if C(wait_for_ip_address: yes).'
+ - If this parameter is set to positive value, the module will instead wait specified number of seconds for the state change.
+ - In case of timeout, module will generate an error message.
+ type: int
+ default: 0
+extends_documentation_fragment:
+- community.general.xenserver.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Power on VM
+ community.general.xenserver_guest_powerstate:
+ hostname: "{{ xenserver_hostname }}"
+ username: "{{ xenserver_username }}"
+ password: "{{ xenserver_password }}"
+ name: testvm_11
+ state: powered-on
+ delegate_to: localhost
+ register: facts
+'''
+
+RETURN = r'''
+instance:
+ description: Metadata about the VM
+ returned: always
+ type: dict
+ sample: {
+ "cdrom": {
+ "type": "none"
+ },
+ "customization_agent": "native",
+ "disks": [
+ {
+ "name": "windows-template-testing-0",
+ "name_desc": "",
+ "os_device": "xvda",
+ "size": 42949672960,
+ "sr": "Local storage",
+ "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
+ "vbd_userdevice": "0"
+ },
+ {
+ "name": "windows-template-testing-1",
+ "name_desc": "",
+ "os_device": "xvdb",
+ "size": 42949672960,
+ "sr": "Local storage",
+ "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
+ "vbd_userdevice": "1"
+ }
+ ],
+ "domid": "56",
+ "folder": "",
+ "hardware": {
+ "memory_mb": 8192,
+ "num_cpu_cores_per_socket": 2,
+ "num_cpus": 4
+ },
+ "home_server": "",
+ "is_template": false,
+ "name": "windows-template-testing",
+ "name_desc": "",
+ "networks": [
+ {
+ "gateway": "192.168.0.254",
+ "gateway6": "fc00::fffe",
+ "ip": "192.168.0.200",
+ "ip6": [
+ "fe80:0000:0000:0000:e9cb:625a:32c5:c291",
+ "fc00:0000:0000:0000:0000:0000:0000:0001"
+ ],
+ "mac": "ba:91:3a:48:20:76",
+ "mtu": "1500",
+ "name": "Pool-wide network associated with eth1",
+ "netmask": "255.255.255.128",
+ "prefix": "25",
+ "prefix6": "64",
+ "vif_device": "0"
+ }
+ ],
+ "other_config": {
+ "base_template_name": "Windows Server 2016 (64-bit)",
+ "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5",
+ "install-methods": "cdrom",
+ "instant": "true",
+ "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e"
+ },
+ "platform": {
+ "acpi": "1",
+ "apic": "true",
+ "cores-per-socket": "2",
+ "device_id": "0002",
+ "hpet": "true",
+ "nx": "true",
+ "pae": "true",
+ "timeoffset": "-25200",
+ "vga": "std",
+ "videoram": "8",
+ "viridian": "true",
+ "viridian_reference_tsc": "true",
+ "viridian_time_ref_count": "true"
+ },
+ "state": "poweredon",
+ "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda",
+ "xenstore_data": {
+ "vm-data": ""
+ }
+ }
+'''
+
+import re
+
+HAS_XENAPI = False
+try:
+ import XenAPI
+ HAS_XENAPI = True
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.xenserver import (xenserver_common_argument_spec, XAPI, XenServerObject, get_object_ref,
+ gather_vm_params, gather_vm_facts, set_vm_power_state,
+ wait_for_vm_ip_address)
+
+
+class XenServerVM(XenServerObject):
+ """Class for managing XenServer VM.
+
+ Attributes:
+ vm_ref (str): XAPI reference to VM.
+ vm_params (dict): A dictionary with VM parameters as returned
+ by gather_vm_params() function.
+ """
+
+ def __init__(self, module):
+ """Inits XenServerVM using module parameters.
+
+ Args:
+ module: Reference to Ansible module object.
+ """
+ super(XenServerVM, self).__init__(module)
+
+ self.vm_ref = get_object_ref(self.module, self.module.params['name'], self.module.params['uuid'], obj_type="VM", fail=True, msg_prefix="VM search: ")
+ self.gather_params()
+
+ def gather_params(self):
+ """Gathers all VM parameters available in XAPI database."""
+ self.vm_params = gather_vm_params(self.module, self.vm_ref)
+
+ def gather_facts(self):
+ """Gathers and returns VM facts."""
+ return gather_vm_facts(self.module, self.vm_params)
+
+ def set_power_state(self, power_state):
+ """Controls VM power state."""
+ state_changed, current_state = set_vm_power_state(self.module, self.vm_ref, power_state, self.module.params['state_change_timeout'])
+
+ # If state has changed, update vm_params.
+ if state_changed:
+ self.vm_params['power_state'] = current_state.capitalize()
+
+ return state_changed
+
+ def wait_for_ip_address(self):
+ """Waits for VM to acquire an IP address."""
+ self.vm_params['guest_metrics'] = wait_for_vm_ip_address(self.module, self.vm_ref, self.module.params['state_change_timeout'])
+
+
+def main():
+ argument_spec = xenserver_common_argument_spec()
+ argument_spec.update(
+ state=dict(type='str', default='present',
+ choices=['powered-on', 'powered-off', 'restarted', 'shutdown-guest', 'reboot-guest', 'suspended', 'present']),
+ name=dict(type='str', aliases=['name_label']),
+ uuid=dict(type='str'),
+ wait_for_ip_address=dict(type='bool', default=False),
+ state_change_timeout=dict(type='int', default=0),
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[
+ ['name', 'uuid'],
+ ],
+ )
+
+ result = {'failed': False, 'changed': False}
+
+ # Module will exit with an error message if no VM is found.
+ vm = XenServerVM(module)
+
+ # Set VM power state.
+ if module.params['state'] != "present":
+ result['changed'] = vm.set_power_state(module.params['state'])
+
+ if module.params['wait_for_ip_address']:
+ vm.wait_for_ip_address()
+
+ result['instance'] = vm.gather_facts()
+
+ if result['failed']:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud_init_data_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud_init_data_facts.py
new file mode 100644
index 00000000..2efb90cf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloud_init_data_facts.py
@@ -0,0 +1,129 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, René Moser <mail@renemoser.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: cloud_init_data_facts
+short_description: Retrieve facts of cloud-init.
+description:
+ - Gathers facts by reading the status.json and result.json of cloud-init.
+author: René Moser (@resmo)
+options:
+ filter:
+ description:
+ - Filter facts
+ type: str
+ choices: [ status, result ]
+notes:
+ - See http://cloudinit.readthedocs.io/ for more information about cloud-init.
+'''
+
+EXAMPLES = '''
+- name: Gather all facts of cloud init
+ community.general.cloud_init_data_facts:
+ register: result
+
+- ansible.builtin.debug:
+ var: result
+
+- name: Wait for cloud init to finish
+ community.general.cloud_init_data_facts:
+ filter: status
+ register: res
+ until: "res.cloud_init_data_facts.status.v1.stage is defined and not res.cloud_init_data_facts.status.v1.stage"
+ retries: 50
+ delay: 5
+'''
+
+RETURN = '''
+---
+cloud_init_data_facts:
+ description: Facts of result and status.
+ returned: success
+ type: dict
+ sample: '{
+ "status": {
+ "v1": {
+ "datasource": "DataSourceCloudStack",
+ "errors": []
+ },
+ "result": {
+ "v1": {
+ "datasource": "DataSourceCloudStack",
+ "init": {
+ "errors": [],
+ "finished": 1522066377.0185432,
+ "start": 1522066375.2648022
+ },
+ "init-local": {
+ "errors": [],
+ "finished": 1522066373.70919,
+ "start": 1522066373.4726632
+ },
+ "modules-config": {
+ "errors": [],
+ "finished": 1522066380.9097016,
+ "start": 1522066379.0011985
+ },
+ "modules-final": {
+ "errors": [],
+ "finished": 1522066383.56594,
+ "start": 1522066382.3449218
+ },
+ "stage": null
+ }
+ }'
+'''
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_text
+
+
+CLOUD_INIT_PATH = "/var/lib/cloud/data/"
+
+
+def gather_cloud_init_data_facts(module):
+ res = {
+ 'cloud_init_data_facts': dict()
+ }
+
+ for i in ['result', 'status']:
+ filter = module.params.get('filter')
+ if filter is None or filter == i:
+ res['cloud_init_data_facts'][i] = dict()
+ json_file = CLOUD_INIT_PATH + i + '.json'
+
+ if os.path.exists(json_file):
+ f = open(json_file, 'rb')
+ contents = to_text(f.read(), errors='surrogate_or_strict')
+ f.close()
+
+ if contents:
+ res['cloud_init_data_facts'][i] = module.from_json(contents)
+ return res
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ filter=dict(choices=['result', 'status']),
+ ),
+ supports_check_mode=True,
+ )
+
+ facts = gather_cloud_init_data_facts(module)
+ result = dict(changed=False, ansible_facts=facts, **facts)
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloudflare_dns.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloudflare_dns.py
new file mode 100644
index 00000000..fc62aa70
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cloudflare_dns.py
@@ -0,0 +1,878 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016 Michael Gruener <michael.gruener@chaosmoon.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: cloudflare_dns
+author:
+- Michael Gruener (@mgruener)
+requirements:
+ - python >= 2.6
+short_description: Manage Cloudflare DNS records
+description:
+ - "Manages dns records via the Cloudflare API, see the docs: U(https://api.cloudflare.com/)"
+options:
+ api_token:
+ description:
+ - API token.
+ - Required for api token authentication.
+ - "You can obtain your API token from the bottom of the Cloudflare 'My Account' page, found here: U(https://dash.cloudflare.com/)"
+ type: str
+ required: false
+ version_added: '0.2.0'
+ account_api_key:
+ description:
+ - Account API key.
+ - Required for api keys authentication.
+ - "You can obtain your API key from the bottom of the Cloudflare 'My Account' page, found here: U(https://dash.cloudflare.com/)"
+ type: str
+ required: false
+ aliases: [ account_api_token ]
+ account_email:
+ description:
+ - Account email. Required for api keys authentication.
+ type: str
+ required: false
+ algorithm:
+ description:
+ - Algorithm number.
+ - Required for C(type=DS) and C(type=SSHFP) when C(state=present).
+ type: int
+ cert_usage:
+ description:
+ - Certificate usage number.
+ - Required for C(type=TLSA) when C(state=present).
+ type: int
+ choices: [ 0, 1, 2, 3 ]
+ hash_type:
+ description:
+ - Hash type number.
+ - Required for C(type=DS), C(type=SSHFP) and C(type=TLSA) when C(state=present).
+ type: int
+ choices: [ 1, 2 ]
+ key_tag:
+ description:
+ - DNSSEC key tag.
+ - Needed for C(type=DS) when C(state=present).
+ type: int
+ port:
+ description:
+ - Service port.
+ - Required for C(type=SRV) and C(type=TLSA).
+ type: int
+ priority:
+ description:
+ - Record priority.
+ - Required for C(type=MX) and C(type=SRV)
+ default: 1
+ type: int
+ proto:
+ description:
+ - Service protocol. Required for C(type=SRV) and C(type=TLSA).
+ - Common values are TCP and UDP.
+ - Before Ansible 2.6 only TCP and UDP were available.
+ type: str
+ proxied:
+ description:
+ - Proxy through Cloudflare network or just use DNS.
+ type: bool
+ default: no
+ record:
+ description:
+ - Record to add.
+ - Required if C(state=present).
+ - Default is C(@) (e.g. the zone name).
+ type: str
+ default: '@'
+ aliases: [ name ]
+ selector:
+ description:
+ - Selector number.
+ - Required for C(type=TLSA) when C(state=present).
+ choices: [ 0, 1 ]
+ type: int
+ service:
+ description:
+ - Record service.
+ - Required for C(type=SRV)
+ type: str
+ solo:
+ description:
+ - Whether the record should be the only one for that record type and record name.
+ - Only use with C(state=present).
+ - This will delete all other records with the same record name and type.
+ type: bool
+ state:
+ description:
+ - Whether the record(s) should exist or not.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ timeout:
+ description:
+ - Timeout for Cloudflare API calls.
+ type: int
+ default: 30
+ ttl:
+ description:
+ - The TTL to give the new record.
+ - Must be between 120 and 2,147,483,647 seconds, or 1 for automatic.
+ type: int
+ default: 1
+ type:
+ description:
+ - The type of DNS record to create. Required if C(state=present).
+ - C(type=DS), C(type=SSHFP) and C(type=TLSA) added in Ansible 2.7.
+ type: str
+ choices: [ A, AAAA, CNAME, DS, MX, NS, SPF, SRV, SSHFP, TLSA, TXT ]
+ value:
+ description:
+ - The record value.
+ - Required for C(state=present).
+ type: str
+ aliases: [ content ]
+ weight:
+ description:
+ - Service weight.
+ - Required for C(type=SRV).
+ type: int
+ default: 1
+ zone:
+ description:
+ - The name of the Zone to work with (e.g. "example.com").
+ - The Zone must already exist.
+ type: str
+ required: true
+ aliases: [ domain ]
+'''
+
+EXAMPLES = r'''
+- name: Create a test.example.net A record to point to 127.0.0.1
+ community.general.cloudflare_dns:
+ zone: example.net
+ record: test
+ type: A
+ value: 127.0.0.1
+ account_email: test@example.com
+ account_api_key: dummyapitoken
+ register: record
+
+- name: Create a record using api token
+ community.general.cloudflare_dns:
+ zone: example.net
+ record: test
+ type: A
+ value: 127.0.0.1
+ api_token: dummyapitoken
+
+- name: Create a example.net CNAME record to example.com
+ community.general.cloudflare_dns:
+ zone: example.net
+ type: CNAME
+ value: example.com
+ account_email: test@example.com
+ account_api_key: dummyapitoken
+ state: present
+
+- name: Change its TTL
+ community.general.cloudflare_dns:
+ zone: example.net
+ type: CNAME
+ value: example.com
+ ttl: 600
+ account_email: test@example.com
+ account_api_key: dummyapitoken
+ state: present
+
+- name: Delete the record
+ community.general.cloudflare_dns:
+ zone: example.net
+ type: CNAME
+ value: example.com
+ account_email: test@example.com
+ account_api_key: dummyapitoken
+ state: absent
+
+- name: Create a example.net CNAME record to example.com and proxy through Cloudflare's network
+ community.general.cloudflare_dns:
+ zone: example.net
+ type: CNAME
+ value: example.com
+ proxied: yes
+ account_email: test@example.com
+ account_api_key: dummyapitoken
+ state: present
+
+# This deletes all other TXT records named "test.example.net"
+- name: Create TXT record "test.example.net" with value "unique value"
+ community.general.cloudflare_dns:
+ domain: example.net
+ record: test
+ type: TXT
+ value: unique value
+ solo: true
+ account_email: test@example.com
+ account_api_key: dummyapitoken
+ state: present
+
+- name: Create an SRV record _foo._tcp.example.net
+ community.general.cloudflare_dns:
+ domain: example.net
+ service: foo
+ proto: tcp
+ port: 3500
+ priority: 10
+ weight: 20
+ type: SRV
+ value: fooserver.example.net
+
+- name: Create a SSHFP record login.example.com
+ community.general.cloudflare_dns:
+ zone: example.com
+ record: login
+ type: SSHFP
+ algorithm: 4
+ hash_type: 2
+ value: 9dc1d6742696d2f51ca1f1a78b3d16a840f7d111eb9454239e70db31363f33e1
+
+- name: Create a TLSA record _25._tcp.mail.example.com
+ community.general.cloudflare_dns:
+ zone: example.com
+ record: mail
+ port: 25
+ proto: tcp
+ type: TLSA
+ cert_usage: 3
+ selector: 1
+ hash_type: 1
+ value: 6b76d034492b493e15a7376fccd08e63befdad0edab8e442562f532338364bf3
+
+- name: Create a DS record for subdomain.example.com
+ community.general.cloudflare_dns:
+ zone: example.com
+ record: subdomain
+ type: DS
+ key_tag: 5464
+ algorithm: 8
+ hash_type: 2
+ value: B4EB5AC4467D2DFB3BAF9FB9961DC1B6FED54A58CDFAA3E465081EC86F89BFAB
+'''
+
+RETURN = r'''
+record:
+ description: A dictionary containing the record data.
+ returned: success, except on record deletion
+ type: complex
+ contains:
+ content:
+ description: The record content (details depend on record type).
+ returned: success
+ type: str
+ sample: 192.0.2.91
+ created_on:
+ description: The record creation date.
+ returned: success
+ type: str
+ sample: "2016-03-25T19:09:42.516553Z"
+ data:
+ description: Additional record data.
+ returned: success, if type is SRV, DS, SSHFP or TLSA
+ type: dict
+ sample: {
+ name: "jabber",
+ port: 8080,
+ priority: 10,
+ proto: "_tcp",
+ service: "_xmpp",
+ target: "jabberhost.sample.com",
+ weight: 5,
+ }
+ id:
+ description: The record ID.
+ returned: success
+ type: str
+ sample: f9efb0549e96abcb750de63b38c9576e
+ locked:
+ description: No documentation available.
+ returned: success
+ type: bool
+ sample: False
+ meta:
+ description: No documentation available.
+ returned: success
+ type: dict
+ sample: { auto_added: false }
+ modified_on:
+ description: Record modification date.
+ returned: success
+ type: str
+ sample: "2016-03-25T19:09:42.516553Z"
+ name:
+ description: The record name as FQDN (including _service and _proto for SRV).
+ returned: success
+ type: str
+ sample: www.sample.com
+ priority:
+ description: Priority of the MX record.
+ returned: success, if type is MX
+ type: int
+ sample: 10
+ proxiable:
+ description: Whether this record can be proxied through Cloudflare.
+ returned: success
+ type: bool
+ sample: False
+ proxied:
+ description: Whether the record is proxied through Cloudflare.
+ returned: success
+ type: bool
+ sample: False
+ ttl:
+ description: The time-to-live for the record.
+ returned: success
+ type: int
+ sample: 300
+ type:
+ description: The record type.
+ returned: success
+ type: str
+ sample: A
+ zone_id:
+ description: The ID of the zone containing the record.
+ returned: success
+ type: str
+ sample: abcede0bf9f0066f94029d2e6b73856a
+ zone_name:
+ description: The name of the zone containing the record.
+ returned: success
+ type: str
+ sample: sample.com
+'''
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.urls import fetch_url
+
+
+def lowercase_string(param):
+ if not isinstance(param, str):
+ return param
+ return param.lower()
+
+
+class CloudflareAPI(object):
+
+ cf_api_endpoint = 'https://api.cloudflare.com/client/v4'
+ changed = False
+
+ def __init__(self, module):
+ self.module = module
+ self.api_token = module.params['api_token']
+ self.account_api_key = module.params['account_api_key']
+ self.account_email = module.params['account_email']
+ self.algorithm = module.params['algorithm']
+ self.cert_usage = module.params['cert_usage']
+ self.hash_type = module.params['hash_type']
+ self.key_tag = module.params['key_tag']
+ self.port = module.params['port']
+ self.priority = module.params['priority']
+ self.proto = lowercase_string(module.params['proto'])
+ self.proxied = module.params['proxied']
+ self.selector = module.params['selector']
+ self.record = lowercase_string(module.params['record'])
+ self.service = lowercase_string(module.params['service'])
+ self.is_solo = module.params['solo']
+ self.state = module.params['state']
+ self.timeout = module.params['timeout']
+ self.ttl = module.params['ttl']
+ self.type = module.params['type']
+ self.value = module.params['value']
+ self.weight = module.params['weight']
+ self.zone = lowercase_string(module.params['zone'])
+
+ if self.record == '@':
+ self.record = self.zone
+
+ if (self.type in ['CNAME', 'NS', 'MX', 'SRV']) and (self.value is not None):
+ self.value = self.value.rstrip('.').lower()
+
+ if (self.type == 'AAAA') and (self.value is not None):
+ self.value = self.value.lower()
+
+ if (self.type == 'SRV'):
+ if (self.proto is not None) and (not self.proto.startswith('_')):
+ self.proto = '_' + self.proto
+ if (self.service is not None) and (not self.service.startswith('_')):
+ self.service = '_' + self.service
+
+ if (self.type == 'TLSA'):
+ if (self.proto is not None) and (not self.proto.startswith('_')):
+ self.proto = '_' + self.proto
+ if (self.port is not None):
+ self.port = '_' + str(self.port)
+
+ if not self.record.endswith(self.zone):
+ self.record = self.record + '.' + self.zone
+
+ if (self.type == 'DS'):
+ if self.record == self.zone:
+ self.module.fail_json(msg="DS records only apply to subdomains.")
+
+ def _cf_simple_api_call(self, api_call, method='GET', payload=None):
+ if self.api_token:
+ headers = {
+ 'Authorization': 'Bearer ' + self.api_token,
+ 'Content-Type': 'application/json',
+ }
+ else:
+ headers = {
+ 'X-Auth-Email': self.account_email,
+ 'X-Auth-Key': self.account_api_key,
+ 'Content-Type': 'application/json',
+ }
+ data = None
+ if payload:
+ try:
+ data = json.dumps(payload)
+ except Exception as e:
+ self.module.fail_json(msg="Failed to encode payload as JSON: %s " % to_native(e))
+
+ resp, info = fetch_url(self.module,
+ self.cf_api_endpoint + api_call,
+ headers=headers,
+ data=data,
+ method=method,
+ timeout=self.timeout)
+
+ if info['status'] not in [200, 304, 400, 401, 403, 429, 405, 415]:
+ self.module.fail_json(msg="Failed API call {0}; got unexpected HTTP code {1}: {2}".format(api_call, info['status'], info.get('msg')))
+
+ error_msg = ''
+ if info['status'] == 401:
+ # Unauthorized
+ error_msg = "API user does not have permission; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
+ elif info['status'] == 403:
+ # Forbidden
+ error_msg = "API request not authenticated; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
+ elif info['status'] == 429:
+ # Too many requests
+ error_msg = "API client is rate limited; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
+ elif info['status'] == 405:
+ # Method not allowed
+ error_msg = "API incorrect HTTP method provided; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
+ elif info['status'] == 415:
+ # Unsupported Media Type
+ error_msg = "API request is not valid JSON; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
+ elif info['status'] == 400:
+ # Bad Request
+ error_msg = "API bad request; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
+
+ result = None
+ try:
+ content = resp.read()
+ except AttributeError:
+ if info['body']:
+ content = info['body']
+ else:
+ error_msg += "; The API response was empty"
+
+ if content:
+ try:
+ result = json.loads(to_text(content, errors='surrogate_or_strict'))
+ except (getattr(json, 'JSONDecodeError', ValueError)) as e:
+ error_msg += "; Failed to parse API response with error {0}: {1}".format(to_native(e), content)
+
+ # Without a valid/parsed JSON response no more error processing can be done
+ if result is None:
+ self.module.fail_json(msg=error_msg)
+
+ if 'success' not in result:
+ error_msg += "; Unexpected error details: {0}".format(result.get('error'))
+ self.module.fail_json(msg=error_msg)
+
+ if not result['success']:
+ error_msg += "; Error details: "
+ for error in result['errors']:
+ error_msg += "code: {0}, error: {1}; ".format(error['code'], error['message'])
+ if 'error_chain' in error:
+ for chain_error in error['error_chain']:
+ error_msg += "code: {0}, error: {1}; ".format(chain_error['code'], chain_error['message'])
+ self.module.fail_json(msg=error_msg)
+
+ return result, info['status']
+
+ def _cf_api_call(self, api_call, method='GET', payload=None):
+ result, status = self._cf_simple_api_call(api_call, method, payload)
+
+ data = result['result']
+
+ if 'result_info' in result:
+ pagination = result['result_info']
+ if pagination['total_pages'] > 1:
+ next_page = int(pagination['page']) + 1
+ parameters = ['page={0}'.format(next_page)]
+ # strip "page" parameter from call parameters (if there are any)
+ if '?' in api_call:
+ raw_api_call, query = api_call.split('?', 1)
+ parameters += [param for param in query.split('&') if not param.startswith('page')]
+ else:
+ raw_api_call = api_call
+ while next_page <= pagination['total_pages']:
+ raw_api_call += '?' + '&'.join(parameters)
+ result, status = self._cf_simple_api_call(raw_api_call, method, payload)
+ data += result['result']
+ next_page += 1
+
+ return data, status
+
+ def _get_zone_id(self, zone=None):
+ if not zone:
+ zone = self.zone
+
+ zones = self.get_zones(zone)
+ if len(zones) > 1:
+ self.module.fail_json(msg="More than one zone matches {0}".format(zone))
+
+ if len(zones) < 1:
+ self.module.fail_json(msg="No zone found with name {0}".format(zone))
+
+ return zones[0]['id']
+
+ def get_zones(self, name=None):
+ if not name:
+ name = self.zone
+ param = ''
+ if name:
+ param = '?' + urlencode({'name': name})
+ zones, status = self._cf_api_call('/zones' + param)
+ return zones
+
+ def get_dns_records(self, zone_name=None, type=None, record=None, value=''):
+ if not zone_name:
+ zone_name = self.zone
+ if not type:
+ type = self.type
+ if not record:
+ record = self.record
+ # necessary because None as value means to override user
+ # set module value
+ if (not value) and (value is not None):
+ value = self.value
+
+ zone_id = self._get_zone_id()
+ api_call = '/zones/{0}/dns_records'.format(zone_id)
+ query = {}
+ if type:
+ query['type'] = type
+ if record:
+ query['name'] = record
+ if value:
+ query['content'] = value
+ if query:
+ api_call += '?' + urlencode(query)
+
+ records, status = self._cf_api_call(api_call)
+ return records
+
+ def delete_dns_records(self, **kwargs):
+ params = {}
+ for param in ['port', 'proto', 'service', 'solo', 'type', 'record', 'value', 'weight', 'zone',
+ 'algorithm', 'cert_usage', 'hash_type', 'selector', 'key_tag']:
+ if param in kwargs:
+ params[param] = kwargs[param]
+ else:
+ params[param] = getattr(self, param)
+
+ records = []
+ content = params['value']
+ search_record = params['record']
+ if params['type'] == 'SRV':
+ if not (params['value'] is None or params['value'] == ''):
+ content = str(params['weight']) + '\t' + str(params['port']) + '\t' + params['value']
+ search_record = params['service'] + '.' + params['proto'] + '.' + params['record']
+ elif params['type'] == 'DS':
+ if not (params['value'] is None or params['value'] == ''):
+ content = str(params['key_tag']) + '\t' + str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value']
+ elif params['type'] == 'SSHFP':
+ if not (params['value'] is None or params['value'] == ''):
+ content = str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value']
+ elif params['type'] == 'TLSA':
+ if not (params['value'] is None or params['value'] == ''):
+ content = str(params['cert_usage']) + '\t' + str(params['selector']) + '\t' + str(params['hash_type']) + '\t' + params['value']
+ search_record = params['port'] + '.' + params['proto'] + '.' + params['record']
+ if params['solo']:
+ search_value = None
+ else:
+ search_value = content
+
+ records = self.get_dns_records(params['zone'], params['type'], search_record, search_value)
+
+ for rr in records:
+ if params['solo']:
+ if not ((rr['type'] == params['type']) and (rr['name'] == search_record) and (rr['content'] == content)):
+ self.changed = True
+ if not self.module.check_mode:
+ result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(rr['zone_id'], rr['id']), 'DELETE')
+ else:
+ self.changed = True
+ if not self.module.check_mode:
+ result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(rr['zone_id'], rr['id']), 'DELETE')
+ return self.changed
+
+ def ensure_dns_record(self, **kwargs):
+ params = {}
+ for param in ['port', 'priority', 'proto', 'proxied', 'service', 'ttl', 'type', 'record', 'value', 'weight', 'zone',
+ 'algorithm', 'cert_usage', 'hash_type', 'selector', 'key_tag']:
+ if param in kwargs:
+ params[param] = kwargs[param]
+ else:
+ params[param] = getattr(self, param)
+
+ search_value = params['value']
+ search_record = params['record']
+ new_record = None
+ if (params['type'] is None) or (params['record'] is None):
+ self.module.fail_json(msg="You must provide a type and a record to create a new record")
+
+ if (params['type'] in ['A', 'AAAA', 'CNAME', 'TXT', 'MX', 'NS', 'SPF']):
+ if not params['value']:
+ self.module.fail_json(msg="You must provide a non-empty value to create this record type")
+
+ # there can only be one CNAME per record
+ # ignoring the value when searching for existing
+ # CNAME records allows us to update the value if it
+ # changes
+ if params['type'] == 'CNAME':
+ search_value = None
+
+ new_record = {
+ "type": params['type'],
+ "name": params['record'],
+ "content": params['value'],
+ "ttl": params['ttl']
+ }
+
+ if (params['type'] in ['A', 'AAAA', 'CNAME']):
+ new_record["proxied"] = params["proxied"]
+
+ if params['type'] == 'MX':
+ for attr in [params['priority'], params['value']]:
+ if (attr is None) or (attr == ''):
+ self.module.fail_json(msg="You must provide priority and a value to create this record type")
+ new_record = {
+ "type": params['type'],
+ "name": params['record'],
+ "content": params['value'],
+ "priority": params['priority'],
+ "ttl": params['ttl']
+ }
+
+ if params['type'] == 'SRV':
+ for attr in [params['port'], params['priority'], params['proto'], params['service'], params['weight'], params['value']]:
+ if (attr is None) or (attr == ''):
+ self.module.fail_json(msg="You must provide port, priority, proto, service, weight and a value to create this record type")
+ srv_data = {
+ "target": params['value'],
+ "port": params['port'],
+ "weight": params['weight'],
+ "priority": params['priority'],
+ "name": params['record'][:-len('.' + params['zone'])],
+ "proto": params['proto'],
+ "service": params['service']
+ }
+ new_record = {"type": params['type'], "ttl": params['ttl'], 'data': srv_data}
+ search_value = str(params['weight']) + '\t' + str(params['port']) + '\t' + params['value']
+ search_record = params['service'] + '.' + params['proto'] + '.' + params['record']
+
+ if params['type'] == 'DS':
+ for attr in [params['key_tag'], params['algorithm'], params['hash_type'], params['value']]:
+ if (attr is None) or (attr == ''):
+ self.module.fail_json(msg="You must provide key_tag, algorithm, hash_type and a value to create this record type")
+ ds_data = {
+ "key_tag": params['key_tag'],
+ "algorithm": params['algorithm'],
+ "digest_type": params['hash_type'],
+ "digest": params['value'],
+ }
+ new_record = {
+ "type": params['type'],
+ "name": params['record'],
+ 'data': ds_data,
+ "ttl": params['ttl'],
+ }
+ search_value = str(params['key_tag']) + '\t' + str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value']
+
+ if params['type'] == 'SSHFP':
+ for attr in [params['algorithm'], params['hash_type'], params['value']]:
+ if (attr is None) or (attr == ''):
+ self.module.fail_json(msg="You must provide algorithm, hash_type and a value to create this record type")
+ sshfp_data = {
+ "fingerprint": params['value'],
+ "type": params['hash_type'],
+ "algorithm": params['algorithm'],
+ }
+ new_record = {
+ "type": params['type'],
+ "name": params['record'],
+ 'data': sshfp_data,
+ "ttl": params['ttl'],
+ }
+ search_value = str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value']
+
+ if params['type'] == 'TLSA':
+ for attr in [params['port'], params['proto'], params['cert_usage'], params['selector'], params['hash_type'], params['value']]:
+ if (attr is None) or (attr == ''):
+ self.module.fail_json(msg="You must provide port, proto, cert_usage, selector, hash_type and a value to create this record type")
+ search_record = params['port'] + '.' + params['proto'] + '.' + params['record']
+ tlsa_data = {
+ "usage": params['cert_usage'],
+ "selector": params['selector'],
+ "matching_type": params['hash_type'],
+ "certificate": params['value'],
+ }
+ new_record = {
+ "type": params['type'],
+ "name": search_record,
+ 'data': tlsa_data,
+ "ttl": params['ttl'],
+ }
+ search_value = str(params['cert_usage']) + '\t' + str(params['selector']) + '\t' + str(params['hash_type']) + '\t' + params['value']
+
+ zone_id = self._get_zone_id(params['zone'])
+ records = self.get_dns_records(params['zone'], params['type'], search_record, search_value)
+ # in theory this should be impossible as cloudflare does not allow
+ # the creation of duplicate records but lets cover it anyways
+ if len(records) > 1:
+ self.module.fail_json(msg="More than one record already exists for the given attributes. That should be impossible, please open an issue!")
+ # record already exists, check if it must be updated
+ if len(records) == 1:
+ cur_record = records[0]
+ do_update = False
+ if (params['ttl'] is not None) and (cur_record['ttl'] != params['ttl']):
+ do_update = True
+ if (params['priority'] is not None) and ('priority' in cur_record) and (cur_record['priority'] != params['priority']):
+ do_update = True
+ if ('proxied' in new_record) and ('proxied' in cur_record) and (cur_record['proxied'] != params['proxied']):
+ do_update = True
+ if ('data' in new_record) and ('data' in cur_record):
+ if (cur_record['data'] != new_record['data']):
+ do_update = True
+ if (params['type'] == 'CNAME') and (cur_record['content'] != new_record['content']):
+ do_update = True
+ if do_update:
+ if self.module.check_mode:
+ result = new_record
+ else:
+ result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(zone_id, records[0]['id']), 'PUT', new_record)
+ self.changed = True
+ return result, self.changed
+ else:
+ return records, self.changed
+ if self.module.check_mode:
+ result = new_record
+ else:
+ result, info = self._cf_api_call('/zones/{0}/dns_records'.format(zone_id), 'POST', new_record)
+ self.changed = True
+ return result, self.changed
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_token=dict(type='str', required=False, no_log=True),
+ account_api_key=dict(type='str', required=False, no_log=True, aliases=['account_api_token']),
+ account_email=dict(type='str', required=False),
+ algorithm=dict(type='int'),
+ cert_usage=dict(type='int', choices=[0, 1, 2, 3]),
+ hash_type=dict(type='int', choices=[1, 2]),
+ key_tag=dict(type='int'),
+ port=dict(type='int'),
+ priority=dict(type='int', default=1),
+ proto=dict(type='str'),
+ proxied=dict(type='bool', default=False),
+ record=dict(type='str', default='@', aliases=['name']),
+ selector=dict(type='int', choices=[0, 1]),
+ service=dict(type='str'),
+ solo=dict(type='bool'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ timeout=dict(type='int', default=30),
+ ttl=dict(type='int', default=1),
+ type=dict(type='str', choices=['A', 'AAAA', 'CNAME', 'DS', 'MX', 'NS', 'SPF', 'SRV', 'SSHFP', 'TLSA', 'TXT']),
+ value=dict(type='str', aliases=['content']),
+ weight=dict(type='int', default=1),
+ zone=dict(type='str', required=True, aliases=['domain']),
+ ),
+ supports_check_mode=True,
+ required_if=[
+ ('state', 'present', ['record', 'type', 'value']),
+ ('state', 'absent', ['record']),
+ ('type', 'SRV', ['proto', 'service']),
+ ('type', 'TLSA', ['proto', 'port']),
+ ],
+ )
+
+ if not module.params['api_token'] and not (module.params['account_api_key'] and module.params['account_email']):
+ module.fail_json(msg="Either api_token or account_api_key and account_email params are required.")
+ if module.params['type'] == 'SRV':
+ if not ((module.params['weight'] is not None and module.params['port'] is not None
+ and not (module.params['value'] is None or module.params['value'] == ''))
+ or (module.params['weight'] is None and module.params['port'] is None
+ and (module.params['value'] is None or module.params['value'] == ''))):
+ module.fail_json(msg="For SRV records the params weight, port and value all need to be defined, or not at all.")
+
+ if module.params['type'] == 'SSHFP':
+ if not ((module.params['algorithm'] is not None and module.params['hash_type'] is not None
+ and not (module.params['value'] is None or module.params['value'] == ''))
+ or (module.params['algorithm'] is None and module.params['hash_type'] is None
+ and (module.params['value'] is None or module.params['value'] == ''))):
+ module.fail_json(msg="For SSHFP records the params algorithm, hash_type and value all need to be defined, or not at all.")
+
+ if module.params['type'] == 'TLSA':
+ if not ((module.params['cert_usage'] is not None and module.params['selector'] is not None and module.params['hash_type'] is not None
+ and not (module.params['value'] is None or module.params['value'] == ''))
+ or (module.params['cert_usage'] is None and module.params['selector'] is None and module.params['hash_type'] is None
+ and (module.params['value'] is None or module.params['value'] == ''))):
+ module.fail_json(msg="For TLSA records the params cert_usage, selector, hash_type and value all need to be defined, or not at all.")
+
+ if module.params['type'] == 'DS':
+ if not ((module.params['key_tag'] is not None and module.params['algorithm'] is not None and module.params['hash_type'] is not None
+ and not (module.params['value'] is None or module.params['value'] == ''))
+ or (module.params['key_tag'] is None and module.params['algorithm'] is None and module.params['hash_type'] is None
+ and (module.params['value'] is None or module.params['value'] == ''))):
+ module.fail_json(msg="For DS records the params key_tag, algorithm, hash_type and value all need to be defined, or not at all.")
+
+ changed = False
+ cf_api = CloudflareAPI(module)
+
+ # sanity checks
+ if cf_api.is_solo and cf_api.state == 'absent':
+ module.fail_json(msg="solo=true can only be used with state=present")
+
+ # perform add, delete or update (only the TTL can be updated) of one or
+ # more records
+ if cf_api.state == 'present':
+ # delete all records matching record name + type
+ if cf_api.is_solo:
+ changed = cf_api.delete_dns_records(solo=cf_api.is_solo)
+ result, changed = cf_api.ensure_dns_record()
+ if isinstance(result, list):
+ module.exit_json(changed=changed, result={'record': result[0]})
+
+ module.exit_json(changed=changed, result={'record': result})
+ else:
+ # force solo to False, just to be sure
+ changed = cf_api.delete_dns_records(solo=False)
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/consul/consul.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/consul/consul.py
new file mode 100644
index 00000000..dd8a5f50
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/consul/consul.py
@@ -0,0 +1,603 @@
+#!/usr/bin/python
+#
+# (c) 2015, Steve Gargan <steve.gargan@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: consul
+short_description: "Add, modify & delete services within a consul cluster."
+description:
+ - Registers services and checks for an agent with a consul cluster.
+ A service is some process running on the agent node that should be advertised by
+ consul's discovery mechanism. It may optionally supply a check definition,
+ a periodic service test to notify the consul cluster of service's health.
+ - "Checks may also be registered per node e.g. disk usage, or cpu usage and
+ notify the health of the entire node to the cluster.
+ Service level checks do not require a check name or id as these are derived
+ by Consul from the Service name and id respectively by appending 'service:'
+ Node level checks require a I(check_name) and optionally a I(check_id)."
+ - Currently, there is no complete way to retrieve the script, interval or ttl
+ metadata for a registered check. Without this metadata it is not possible to
+ tell if the data supplied with ansible represents a change to a check. As a
+ result this does not attempt to determine changes and will always report a
+ changed occurred. An API method is planned to supply this metadata so at that
+ stage change management will be added.
+ - "See U(http://consul.io) for more details."
+requirements:
+ - python-consul
+ - requests
+author: "Steve Gargan (@sgargan)"
+options:
+ state:
+ description:
+ - register or deregister the consul service, defaults to present
+ default: present
+ choices: ['present', 'absent']
+ service_name:
+ type: str
+ description:
+ - Unique name for the service on a node, must be unique per node,
+ required if registering a service. May be omitted if registering
+ a node level check
+ service_id:
+ type: str
+ description:
+ - the ID for the service, must be unique per node. If I(state=absent),
+ defaults to the service name if supplied.
+ host:
+ type: str
+ description:
+ - host of the consul agent defaults to localhost
+ default: localhost
+ port:
+ type: int
+ description:
+ - the port on which the consul agent is running
+ default: 8500
+ scheme:
+ type: str
+ description:
+ - the protocol scheme on which the consul agent is running
+ default: http
+ validate_certs:
+ description:
+ - whether to verify the TLS certificate of the consul agent
+ type: bool
+ default: 'yes'
+ notes:
+ type: str
+ description:
+ - Notes to attach to check when registering it.
+ service_port:
+ type: int
+ description:
+ - the port on which the service is listening. Can optionally be supplied for
+ registration of a service, i.e. if I(service_name) or I(service_id) is set
+ service_address:
+ type: str
+ description:
+ - the address to advertise that the service will be listening on.
+ This value will be passed as the I(address) parameter to Consul's
+ U(/v1/agent/service/register) API method, so refer to the Consul API
+ documentation for further details.
+ tags:
+ type: list
+ description:
+ - tags that will be attached to the service registration.
+ script:
+ type: str
+ description:
+ - the script/command that will be run periodically to check the health
+ of the service. Scripts require I(interval) and vice versa.
+ interval:
+ type: str
+ description:
+ - the interval at which the service check will be run. This is a number
+ with a s or m suffix to signify the units of seconds or minutes e.g
+ C(15s) or C(1m). If no suffix is supplied, m will be used by default e.g.
+ C(1) will be C(1m). Required if the I(script) parameter is specified.
+ check_id:
+ type: str
+ description:
+ - an ID for the service check. If I(state=absent), defaults to
+ I(check_name). Ignored if part of a service definition.
+ check_name:
+ type: str
+ description:
+ - a name for the service check. Required if standalone, ignored if
+ part of service definition.
+ ttl:
+ type: str
+ description:
+ - checks can be registered with a ttl instead of a I(script) and I(interval)
+ this means that the service will check in with the agent before the
+ ttl expires. If it doesn't the check will be considered failed.
+ Required if registering a check and the script an interval are missing
+ Similar to the interval this is a number with a s or m suffix to
+ signify the units of seconds or minutes e.g C(15s) or C(1m). If no suffix
+ is supplied, C(m) will be used by default e.g. C(1) will be C(1m)
+ tcp:
+ type: str
+ description:
+ - Checks can be registered with a TCP port. This means that consul
+ will check if the connection attempt to that port is successful (that is, the port is currently accepting connections).
+ The format is C(host:port), for example C(localhost:80).
+ I(interval) must also be provided with this option.
+ version_added: '1.3.0'
+ http:
+ type: str
+ description:
+ - checks can be registered with an HTTP endpoint. This means that consul
+ will check that the http endpoint returns a successful HTTP status.
+ I(interval) must also be provided with this option.
+ timeout:
+ type: str
+ description:
+ - A custom HTTP check timeout. The consul default is 10 seconds.
+ Similar to the interval this is a number with a C(s) or C(m) suffix to
+ signify the units of seconds or minutes, e.g. C(15s) or C(1m).
+ token:
+ type: str
+ description:
+ - the token key identifying an ACL rule set. May be required to register services.
+'''
+
+EXAMPLES = '''
+- name: Register nginx service with the local consul agent
+ community.general.consul:
+ service_name: nginx
+ service_port: 80
+
+- name: Register nginx service with curl check
+ community.general.consul:
+ service_name: nginx
+ service_port: 80
+ script: curl http://localhost
+ interval: 60s
+
+- name: register nginx with a tcp check
+ community.general.consul:
+ service_name: nginx
+ service_port: 80
+ interval: 60s
+ tcp: localhost:80
+
+- name: Register nginx with an http check
+ community.general.consul:
+ service_name: nginx
+ service_port: 80
+ interval: 60s
+ http: http://localhost:80/status
+
+- name: Register external service nginx available at 10.1.5.23
+ community.general.consul:
+ service_name: nginx
+ service_port: 80
+ service_address: 10.1.5.23
+
+- name: Register nginx with some service tags
+ community.general.consul:
+ service_name: nginx
+ service_port: 80
+ tags:
+ - prod
+ - webservers
+
+- name: Remove nginx service
+ community.general.consul:
+ service_name: nginx
+ state: absent
+
+- name: Register celery worker service
+ community.general.consul:
+ service_name: celery-worker
+ tags:
+ - prod
+ - worker
+
+- name: Create a node level check to test disk usage
+ community.general.consul:
+ check_name: Disk usage
+ check_id: disk_usage
+ script: /opt/disk_usage.py
+ interval: 5m
+
+- name: Register an http check against a service that's already registered
+ community.general.consul:
+ check_name: nginx-check2
+ check_id: nginx-check2
+ service_id: nginx
+ interval: 60s
+ http: http://localhost:80/morestatus
+'''
+
+try:
+ import consul
+ from requests.exceptions import ConnectionError
+
+ class PatchedConsulAgentService(consul.Consul.Agent.Service):
+ def deregister(self, service_id, token=None):
+ params = {}
+ if token:
+ params['token'] = token
+ return self.agent.http.put(consul.base.CB.bool(),
+ '/v1/agent/service/deregister/%s' % service_id,
+ params=params)
+
+ python_consul_installed = True
+except ImportError:
+ python_consul_installed = False
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+
+
+def register_with_consul(module):
+ state = module.params.get('state')
+
+ if state == 'present':
+ add(module)
+ else:
+ remove(module)
+
+
+def add(module):
+ ''' adds a service or a check depending on supplied configuration'''
+ check = parse_check(module)
+ service = parse_service(module)
+
+ if not service and not check:
+ module.fail_json(msg='a name and port are required to register a service')
+
+ if service:
+ if check:
+ service.add_check(check)
+ add_service(module, service)
+ elif check:
+ add_check(module, check)
+
+
+def remove(module):
+ ''' removes a service or a check '''
+ service_id = module.params.get('service_id') or module.params.get('service_name')
+ check_id = module.params.get('check_id') or module.params.get('check_name')
+ if not (service_id or check_id):
+ module.fail_json(msg='services and checks are removed by id or name. please supply a service id/name or a check id/name')
+ if service_id:
+ remove_service(module, service_id)
+ else:
+ remove_check(module, check_id)
+
+
+def add_check(module, check):
+ ''' registers a check with the given agent. currently there is no way
+ retrieve the full metadata of an existing check through the consul api.
+ Without this we can't compare to the supplied check and so we must assume
+ a change. '''
+ if not check.name and not check.service_id:
+ module.fail_json(msg='a check name is required for a node level check, one not attached to a service')
+
+ consul_api = get_consul_api(module)
+ check.register(consul_api)
+
+ module.exit_json(changed=True,
+ check_id=check.check_id,
+ check_name=check.name,
+ script=check.script,
+ interval=check.interval,
+ ttl=check.ttl,
+ tcp=check.tcp,
+ http=check.http,
+ timeout=check.timeout,
+ service_id=check.service_id)
+
+
+def remove_check(module, check_id):
+ ''' removes a check using its id '''
+ consul_api = get_consul_api(module)
+
+ if check_id in consul_api.agent.checks():
+ consul_api.agent.check.deregister(check_id)
+ module.exit_json(changed=True, id=check_id)
+
+ module.exit_json(changed=False, id=check_id)
+
+
+def add_service(module, service):
+ ''' registers a service with the current agent '''
+ result = service
+ changed = False
+
+ consul_api = get_consul_api(module)
+ existing = get_service_by_id_or_name(consul_api, service.id)
+
+ # there is no way to retrieve the details of checks so if a check is present
+ # in the service it must be re-registered
+ if service.has_checks() or not existing or not existing == service:
+
+ service.register(consul_api)
+ # check that it registered correctly
+ registered = get_service_by_id_or_name(consul_api, service.id)
+ if registered:
+ result = registered
+ changed = True
+
+ module.exit_json(changed=changed,
+ service_id=result.id,
+ service_name=result.name,
+ service_port=result.port,
+ checks=[check.to_dict() for check in service.checks],
+ tags=result.tags)
+
+
+def remove_service(module, service_id):
+ ''' deregister a service from the given agent using its service id '''
+ consul_api = get_consul_api(module)
+ service = get_service_by_id_or_name(consul_api, service_id)
+ if service:
+ consul_api.agent.service.deregister(service_id, token=module.params.get('token'))
+ module.exit_json(changed=True, id=service_id)
+
+ module.exit_json(changed=False, id=service_id)
+
+
+def get_consul_api(module, token=None):
+ consulClient = consul.Consul(host=module.params.get('host'),
+ port=module.params.get('port'),
+ scheme=module.params.get('scheme'),
+ verify=module.params.get('validate_certs'),
+ token=module.params.get('token'))
+ consulClient.agent.service = PatchedConsulAgentService(consulClient)
+ return consulClient
+
+
+def get_service_by_id_or_name(consul_api, service_id_or_name):
+ ''' iterate the registered services and find one with the given id '''
+ for name, service in consul_api.agent.services().items():
+ if service['ID'] == service_id_or_name or service['Service'] == service_id_or_name:
+ return ConsulService(loaded=service)
+
+
+def parse_check(module):
+ if len([p for p in (module.params.get('script'), module.params.get('ttl'), module.params.get('tcp'), module.params.get('http')) if p]) > 1:
+ module.fail_json(
+ msg='checks are either script, tcp, http or ttl driven, supplying more than one does not make sense')
+
+ if module.params.get('check_id') or module.params.get('script') or module.params.get('ttl') or module.params.get('tcp') or module.params.get('http'):
+
+ return ConsulCheck(
+ module.params.get('check_id'),
+ module.params.get('check_name'),
+ module.params.get('check_node'),
+ module.params.get('check_host'),
+ module.params.get('script'),
+ module.params.get('interval'),
+ module.params.get('ttl'),
+ module.params.get('notes'),
+ module.params.get('tcp'),
+ module.params.get('http'),
+ module.params.get('timeout'),
+ module.params.get('service_id'),
+ )
+
+
+def parse_service(module):
+ if module.params.get('service_name'):
+ return ConsulService(
+ module.params.get('service_id'),
+ module.params.get('service_name'),
+ module.params.get('service_address'),
+ module.params.get('service_port'),
+ module.params.get('tags'),
+ )
+ elif not module.params.get('service_name'):
+ module.fail_json(msg="service_name is required to configure a service.")
+
+
+class ConsulService():
+
+ def __init__(self, service_id=None, name=None, address=None, port=-1,
+ tags=None, loaded=None):
+ self.id = self.name = name
+ if service_id:
+ self.id = service_id
+ self.address = address
+ self.port = port
+ self.tags = tags
+ self.checks = []
+ if loaded:
+ self.id = loaded['ID']
+ self.name = loaded['Service']
+ self.port = loaded['Port']
+ self.tags = loaded['Tags']
+
+ def register(self, consul_api):
+ optional = {}
+
+ if self.port:
+ optional['port'] = self.port
+
+ if len(self.checks) > 0:
+ optional['check'] = self.checks[0].check
+
+ consul_api.agent.service.register(
+ self.name,
+ service_id=self.id,
+ address=self.address,
+ tags=self.tags,
+ **optional)
+
+ def add_check(self, check):
+ self.checks.append(check)
+
+ def checks(self):
+ return self.checks
+
+ def has_checks(self):
+ return len(self.checks) > 0
+
+ def __eq__(self, other):
+ return (isinstance(other, self.__class__) and
+ self.id == other.id and
+ self.name == other.name and
+ self.port == other.port and
+ self.tags == other.tags)
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def to_dict(self):
+ data = {'id': self.id, "name": self.name}
+ if self.port:
+ data['port'] = self.port
+ if self.tags and len(self.tags) > 0:
+ data['tags'] = self.tags
+ if len(self.checks) > 0:
+ data['check'] = self.checks[0].to_dict()
+ return data
+
+
+class ConsulCheck(object):
+
+ def __init__(self, check_id, name, node=None, host='localhost',
+ script=None, interval=None, ttl=None, notes=None, tcp=None, http=None, timeout=None, service_id=None):
+ self.check_id = self.name = name
+ if check_id:
+ self.check_id = check_id
+ self.service_id = service_id
+ self.notes = notes
+ self.node = node
+ self.host = host
+
+ self.interval = self.validate_duration('interval', interval)
+ self.ttl = self.validate_duration('ttl', ttl)
+ self.script = script
+ self.tcp = tcp
+ self.http = http
+ self.timeout = self.validate_duration('timeout', timeout)
+
+ self.check = None
+
+ if script:
+ self.check = consul.Check.script(script, self.interval)
+
+ if ttl:
+ self.check = consul.Check.ttl(self.ttl)
+
+ if http:
+ if interval is None:
+ raise Exception('http check must specify interval')
+
+ self.check = consul.Check.http(http, self.interval, self.timeout)
+
+ if tcp:
+ if interval is None:
+ raise Exception('tcp check must specify interval')
+
+ regex = r"(?P<host>.*)(?::)(?P<port>(?:[0-9]+))$"
+ match = re.match(regex, tcp)
+
+ if match is None:
+ raise Exception('tcp check must be in host:port format')
+
+ self.check = consul.Check.tcp(match.group('host').strip('[]'), int(match.group('port')), self.interval)
+
+ def validate_duration(self, name, duration):
+ if duration:
+ duration_units = ['ns', 'us', 'ms', 's', 'm', 'h']
+ if not any((duration.endswith(suffix) for suffix in duration_units)):
+ duration = "{0}s".format(duration)
+ return duration
+
+ def register(self, consul_api):
+ consul_api.agent.check.register(self.name, check_id=self.check_id, service_id=self.service_id,
+ notes=self.notes,
+ check=self.check)
+
+ def __eq__(self, other):
+ return (isinstance(other, self.__class__) and
+ self.check_id == other.check_id and
+ self.service_id == other.service_id and
+ self.name == other.name and
+ self.script == other.script and
+ self.interval == other.interval)
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def to_dict(self):
+ data = {}
+ self._add(data, 'id', attr='check_id')
+ self._add(data, 'name', attr='check_name')
+ self._add(data, 'script')
+ self._add(data, 'node')
+ self._add(data, 'notes')
+ self._add(data, 'host')
+ self._add(data, 'interval')
+ self._add(data, 'ttl')
+ self._add(data, 'tcp')
+ self._add(data, 'http')
+ self._add(data, 'timeout')
+ self._add(data, 'service_id')
+ return data
+
+ def _add(self, data, key, attr=None):
+ try:
+ if attr is None:
+ attr = key
+ data[key] = getattr(self, attr)
+ except Exception:
+ pass
+
+
+def test_dependencies(module):
+ if not python_consul_installed:
+ module.fail_json(msg="python-consul required for this module. see https://python-consul.readthedocs.io/en/latest/#installation")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(default='localhost'),
+ port=dict(default=8500, type='int'),
+ scheme=dict(required=False, default='http'),
+ validate_certs=dict(required=False, default=True, type='bool'),
+ check_id=dict(required=False),
+ check_name=dict(required=False),
+ check_node=dict(required=False),
+ check_host=dict(required=False),
+ notes=dict(required=False),
+ script=dict(required=False),
+ service_id=dict(required=False),
+ service_name=dict(required=False),
+ service_address=dict(required=False, type='str', default=None),
+ service_port=dict(required=False, type='int', default=None),
+ state=dict(default='present', choices=['present', 'absent']),
+ interval=dict(required=False, type='str'),
+ ttl=dict(required=False, type='str'),
+ tcp=dict(required=False, type='str'),
+ http=dict(required=False, type='str'),
+ timeout=dict(required=False, type='str'),
+ tags=dict(required=False, type='list'),
+ token=dict(required=False, no_log=True)
+ ),
+ supports_check_mode=False,
+ )
+
+ test_dependencies(module)
+
+ try:
+ register_with_consul(module)
+ except ConnectionError as e:
+ module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
+ module.params.get('host'), module.params.get('port'), str(e)))
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/consul/consul_acl.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/consul/consul_acl.py
new file mode 100644
index 00000000..06feeea1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/consul/consul_acl.py
@@ -0,0 +1,657 @@
+#!/usr/bin/python
+#
+# (c) 2015, Steve Gargan <steve.gargan@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: consul_acl
+short_description: Manipulate Consul ACL keys and rules
+description:
+ - Allows the addition, modification and deletion of ACL keys and associated
+ rules in a consul cluster via the agent. For more details on using and
+ configuring ACLs, see https://www.consul.io/docs/guides/acl.html.
+author:
+ - Steve Gargan (@sgargan)
+ - Colin Nolan (@colin-nolan)
+options:
+ mgmt_token:
+ description:
+ - a management token is required to manipulate the acl lists
+ required: true
+ state:
+ description:
+ - whether the ACL pair should be present or absent
+ required: false
+ choices: ['present', 'absent']
+ default: present
+ token_type:
+ description:
+ - the type of token that should be created
+ choices: ['client', 'management']
+ default: client
+ name:
+ description:
+ - the name that should be associated with the acl key, this is opaque
+ to Consul
+ required: false
+ token:
+ description:
+ - the token key identifying an ACL rule set. If generated by consul
+ this will be a UUID
+ required: false
+ rules:
+ type: list
+ description:
+ - rules that should be associated with a given token
+ required: false
+ host:
+ description:
+ - host of the consul agent defaults to localhost
+ required: false
+ default: localhost
+ port:
+ type: int
+ description:
+ - the port on which the consul agent is running
+ required: false
+ default: 8500
+ scheme:
+ description:
+ - the protocol scheme on which the consul agent is running
+ required: false
+ default: http
+ validate_certs:
+ type: bool
+ description:
+ - whether to verify the tls certificate of the consul agent
+ required: false
+ default: True
+requirements:
+ - python-consul
+ - pyhcl
+ - requests
+'''
+
+EXAMPLES = """
+- name: Create an ACL with rules
+ community.general.consul_acl:
+ host: consul1.example.com
+ mgmt_token: some_management_acl
+ name: Foo access
+ rules:
+ - key: "foo"
+ policy: read
+ - key: "private/foo"
+ policy: deny
+
+- name: Create an ACL with a specific token
+ community.general.consul_acl:
+ host: consul1.example.com
+ mgmt_token: some_management_acl
+ name: Foo access
+ token: my-token
+ rules:
+ - key: "foo"
+ policy: read
+
+- name: Update the rules associated to an ACL token
+ community.general.consul_acl:
+ host: consul1.example.com
+ mgmt_token: some_management_acl
+ name: Foo access
+ token: some_client_token
+ rules:
+ - event: "bbq"
+ policy: write
+ - key: "foo"
+ policy: read
+ - key: "private"
+ policy: deny
+ - keyring: write
+ - node: "hgs4"
+ policy: write
+ - operator: read
+ - query: ""
+ policy: write
+ - service: "consul"
+ policy: write
+ - session: "standup"
+ policy: write
+
+- name: Remove a token
+ community.general.consul_acl:
+ host: consul1.example.com
+ mgmt_token: some_management_acl
+ token: 172bd5c8-9fe9-11e4-b1b0-3c15c2c9fd5e
+ state: absent
+"""
+
+RETURN = """
+token:
+ description: the token associated to the ACL (the ACL's ID)
+ returned: success
+ type: str
+ sample: a2ec332f-04cf-6fba-e8b8-acf62444d3da
+rules:
+ description: the HCL JSON representation of the rules associated to the ACL, in the format described in the
+ Consul documentation (https://www.consul.io/docs/guides/acl.html#rule-specification).
+ returned: I(status) == "present"
+ type: str
+ sample: {
+ "key": {
+ "foo": {
+ "policy": "write"
+ },
+ "bar": {
+ "policy": "deny"
+ }
+ }
+ }
+operation:
+ description: the operation performed on the ACL
+ returned: changed
+ type: str
+ sample: update
+"""
+
+
+try:
+ import consul
+ python_consul_installed = True
+except ImportError:
+ python_consul_installed = False
+
+try:
+ import hcl
+ pyhcl_installed = True
+except ImportError:
+ pyhcl_installed = False
+
+try:
+ from requests.exceptions import ConnectionError
+ has_requests = True
+except ImportError:
+ has_requests = False
+
+from collections import defaultdict
+from ansible.module_utils.basic import to_text, AnsibleModule
+
+
+RULE_SCOPES = ["agent", "event", "key", "keyring", "node", "operator", "query", "service", "session"]
+
+MANAGEMENT_PARAMETER_NAME = "mgmt_token"
+HOST_PARAMETER_NAME = "host"
+SCHEME_PARAMETER_NAME = "scheme"
+VALIDATE_CERTS_PARAMETER_NAME = "validate_certs"
+NAME_PARAMETER_NAME = "name"
+PORT_PARAMETER_NAME = "port"
+RULES_PARAMETER_NAME = "rules"
+STATE_PARAMETER_NAME = "state"
+TOKEN_PARAMETER_NAME = "token"
+TOKEN_TYPE_PARAMETER_NAME = "token_type"
+
+PRESENT_STATE_VALUE = "present"
+ABSENT_STATE_VALUE = "absent"
+
+CLIENT_TOKEN_TYPE_VALUE = "client"
+MANAGEMENT_TOKEN_TYPE_VALUE = "management"
+
+REMOVE_OPERATION = "remove"
+UPDATE_OPERATION = "update"
+CREATE_OPERATION = "create"
+
+_POLICY_JSON_PROPERTY = "policy"
+_RULES_JSON_PROPERTY = "Rules"
+_TOKEN_JSON_PROPERTY = "ID"
+_TOKEN_TYPE_JSON_PROPERTY = "Type"
+_NAME_JSON_PROPERTY = "Name"
+_POLICY_YML_PROPERTY = "policy"
+_POLICY_HCL_PROPERTY = "policy"
+
+_ARGUMENT_SPEC = {
+ MANAGEMENT_PARAMETER_NAME: dict(required=True, no_log=True),
+ HOST_PARAMETER_NAME: dict(default='localhost'),
+ SCHEME_PARAMETER_NAME: dict(required=False, default='http'),
+ VALIDATE_CERTS_PARAMETER_NAME: dict(required=False, type='bool', default=True),
+ NAME_PARAMETER_NAME: dict(required=False),
+ PORT_PARAMETER_NAME: dict(default=8500, type='int'),
+ RULES_PARAMETER_NAME: dict(default=None, required=False, type='list'),
+ STATE_PARAMETER_NAME: dict(default=PRESENT_STATE_VALUE, choices=[PRESENT_STATE_VALUE, ABSENT_STATE_VALUE]),
+ TOKEN_PARAMETER_NAME: dict(required=False),
+ TOKEN_TYPE_PARAMETER_NAME: dict(required=False, choices=[CLIENT_TOKEN_TYPE_VALUE, MANAGEMENT_TOKEN_TYPE_VALUE],
+ default=CLIENT_TOKEN_TYPE_VALUE)
+}
+
+
+def set_acl(consul_client, configuration):
+ """
+ Sets an ACL based on the given configuration.
+ :param consul_client: the consul client
+ :param configuration: the run configuration
+ :return: the output of setting the ACL
+ """
+ acls_as_json = decode_acls_as_json(consul_client.acl.list())
+ existing_acls_mapped_by_name = dict((acl.name, acl) for acl in acls_as_json if acl.name is not None)
+ existing_acls_mapped_by_token = dict((acl.token, acl) for acl in acls_as_json)
+ if None in existing_acls_mapped_by_token:
+ raise AssertionError("expecting ACL list to be associated to a token: %s" %
+ existing_acls_mapped_by_token[None])
+
+ if configuration.token is None and configuration.name and configuration.name in existing_acls_mapped_by_name:
+ # No token but name given so can get token from name
+ configuration.token = existing_acls_mapped_by_name[configuration.name].token
+
+ if configuration.token and configuration.token in existing_acls_mapped_by_token:
+ return update_acl(consul_client, configuration)
+ else:
+ if configuration.token in existing_acls_mapped_by_token:
+ raise AssertionError()
+ if configuration.name in existing_acls_mapped_by_name:
+ raise AssertionError()
+ return create_acl(consul_client, configuration)
+
+
+def update_acl(consul_client, configuration):
+ """
+ Updates an ACL.
+ :param consul_client: the consul client
+ :param configuration: the run configuration
+ :return: the output of the update
+ """
+ existing_acl = load_acl_with_token(consul_client, configuration.token)
+ changed = existing_acl.rules != configuration.rules
+
+ if changed:
+ name = configuration.name if configuration.name is not None else existing_acl.name
+ rules_as_hcl = encode_rules_as_hcl_string(configuration.rules)
+ updated_token = consul_client.acl.update(
+ configuration.token, name=name, type=configuration.token_type, rules=rules_as_hcl)
+ if updated_token != configuration.token:
+ raise AssertionError()
+
+ return Output(changed=changed, token=configuration.token, rules=configuration.rules, operation=UPDATE_OPERATION)
+
+
+def create_acl(consul_client, configuration):
+ """
+ Creates an ACL.
+ :param consul_client: the consul client
+ :param configuration: the run configuration
+ :return: the output of the creation
+ """
+ rules_as_hcl = encode_rules_as_hcl_string(configuration.rules) if len(configuration.rules) > 0 else None
+ token = consul_client.acl.create(
+ name=configuration.name, type=configuration.token_type, rules=rules_as_hcl, acl_id=configuration.token)
+ rules = configuration.rules
+ return Output(changed=True, token=token, rules=rules, operation=CREATE_OPERATION)
+
+
+def remove_acl(consul, configuration):
+ """
+ Removes an ACL.
+ :param consul: the consul client
+ :param configuration: the run configuration
+ :return: the output of the removal
+ """
+ token = configuration.token
+ changed = consul.acl.info(token) is not None
+ if changed:
+ consul.acl.destroy(token)
+ return Output(changed=changed, token=token, operation=REMOVE_OPERATION)
+
+
+def load_acl_with_token(consul, token):
+ """
+ Loads the ACL with the given token (token == rule ID).
+ :param consul: the consul client
+ :param token: the ACL "token"/ID (not name)
+ :return: the ACL associated to the given token
+ :exception ConsulACLTokenNotFoundException: raised if the given token does not exist
+ """
+ acl_as_json = consul.acl.info(token)
+ if acl_as_json is None:
+ raise ConsulACLNotFoundException(token)
+ return decode_acl_as_json(acl_as_json)
+
+
+def encode_rules_as_hcl_string(rules):
+ """
+ Converts the given rules into the equivalent HCL (string) representation.
+ :param rules: the rules
+ :return: the equivalent HCL (string) representation of the rules. Will be None if there is no rules (see internal
+ note for justification)
+ """
+ if len(rules) == 0:
+ # Note: empty string is not valid HCL according to `hcl.load` however, the ACL `Rule` property will be an empty
+ # string if there is no rules...
+ return None
+ rules_as_hcl = ""
+ for rule in rules:
+ rules_as_hcl += encode_rule_as_hcl_string(rule)
+ return rules_as_hcl
+
+
+def encode_rule_as_hcl_string(rule):
+ """
+ Converts the given rule into the equivalent HCL (string) representation.
+ :param rule: the rule
+ :return: the equivalent HCL (string) representation of the rule
+ """
+ if rule.pattern is not None:
+ return '%s "%s" {\n %s = "%s"\n}\n' % (rule.scope, rule.pattern, _POLICY_HCL_PROPERTY, rule.policy)
+ else:
+ return '%s = "%s"\n' % (rule.scope, rule.policy)
+
+
+def decode_rules_as_hcl_string(rules_as_hcl):
+ """
+ Converts the given HCL (string) representation of rules into a list of rule domain models.
+ :param rules_as_hcl: the HCL (string) representation of a collection of rules
+ :return: the equivalent domain model to the given rules
+ """
+ rules_as_hcl = to_text(rules_as_hcl)
+ rules_as_json = hcl.loads(rules_as_hcl)
+ return decode_rules_as_json(rules_as_json)
+
+
+def decode_rules_as_json(rules_as_json):
+ """
+ Converts the given JSON representation of rules into a list of rule domain models.
+ :param rules_as_json: the JSON representation of a collection of rules
+ :return: the equivalent domain model to the given rules
+ """
+ rules = RuleCollection()
+ for scope in rules_as_json:
+ if not isinstance(rules_as_json[scope], dict):
+ rules.add(Rule(scope, rules_as_json[scope]))
+ else:
+ for pattern, policy in rules_as_json[scope].items():
+ rules.add(Rule(scope, policy[_POLICY_JSON_PROPERTY], pattern))
+ return rules
+
+
+def encode_rules_as_json(rules):
+ """
+ Converts the given rules into the equivalent JSON representation according to the documentation:
+ https://www.consul.io/docs/guides/acl.html#rule-specification.
+ :param rules: the rules
+ :return: JSON representation of the given rules
+ """
+ rules_as_json = defaultdict(dict)
+ for rule in rules:
+ if rule.pattern is not None:
+ if rule.pattern in rules_as_json[rule.scope]:
+ raise AssertionError()
+ rules_as_json[rule.scope][rule.pattern] = {
+ _POLICY_JSON_PROPERTY: rule.policy
+ }
+ else:
+ if rule.scope in rules_as_json:
+ raise AssertionError()
+ rules_as_json[rule.scope] = rule.policy
+ return rules_as_json
+
+
+def decode_rules_as_yml(rules_as_yml):
+ """
+ Converts the given YAML representation of rules into a list of rule domain models.
+ :param rules_as_yml: the YAML representation of a collection of rules
+ :return: the equivalent domain model to the given rules
+ """
+ rules = RuleCollection()
+ if rules_as_yml:
+ for rule_as_yml in rules_as_yml:
+ rule_added = False
+ for scope in RULE_SCOPES:
+ if scope in rule_as_yml:
+ if rule_as_yml[scope] is None:
+ raise ValueError("Rule for '%s' does not have a value associated to the scope" % scope)
+ policy = rule_as_yml[_POLICY_YML_PROPERTY] if _POLICY_YML_PROPERTY in rule_as_yml \
+ else rule_as_yml[scope]
+ pattern = rule_as_yml[scope] if _POLICY_YML_PROPERTY in rule_as_yml else None
+ rules.add(Rule(scope, policy, pattern))
+ rule_added = True
+ break
+ if not rule_added:
+ raise ValueError("A rule requires one of %s and a policy." % ('/'.join(RULE_SCOPES)))
+ return rules
+
+
+def decode_acl_as_json(acl_as_json):
+ """
+ Converts the given JSON representation of an ACL into the equivalent domain model.
+ :param acl_as_json: the JSON representation of an ACL
+ :return: the equivalent domain model to the given ACL
+ """
+ rules_as_hcl = acl_as_json[_RULES_JSON_PROPERTY]
+ rules = decode_rules_as_hcl_string(acl_as_json[_RULES_JSON_PROPERTY]) if rules_as_hcl.strip() != "" \
+ else RuleCollection()
+ return ACL(
+ rules=rules,
+ token_type=acl_as_json[_TOKEN_TYPE_JSON_PROPERTY],
+ token=acl_as_json[_TOKEN_JSON_PROPERTY],
+ name=acl_as_json[_NAME_JSON_PROPERTY]
+ )
+
+
+def decode_acls_as_json(acls_as_json):
+ """
+ Converts the given JSON representation of ACLs into a list of ACL domain models.
+ :param acls_as_json: the JSON representation of a collection of ACLs
+ :return: list of equivalent domain models for the given ACLs (order not guaranteed to be the same)
+ """
+ return [decode_acl_as_json(acl_as_json) for acl_as_json in acls_as_json]
+
+
+class ConsulACLNotFoundException(Exception):
+ """
+ Exception raised if an ACL with is not found.
+ """
+
+
+class Configuration:
+ """
+ Configuration for this module.
+ """
+
+ def __init__(self, management_token=None, host=None, scheme=None, validate_certs=None, name=None, port=None,
+ rules=None, state=None, token=None, token_type=None):
+ self.management_token = management_token # type: str
+ self.host = host # type: str
+ self.scheme = scheme # type: str
+ self.validate_certs = validate_certs # type: bool
+ self.name = name # type: str
+ self.port = port # type: int
+ self.rules = rules # type: RuleCollection
+ self.state = state # type: str
+ self.token = token # type: str
+ self.token_type = token_type # type: str
+
+
+class Output:
+ """
+ Output of an action of this module.
+ """
+
+ def __init__(self, changed=None, token=None, rules=None, operation=None):
+ self.changed = changed # type: bool
+ self.token = token # type: str
+ self.rules = rules # type: RuleCollection
+ self.operation = operation # type: str
+
+
+class ACL:
+ """
+ Consul ACL. See: https://www.consul.io/docs/guides/acl.html.
+ """
+
+ def __init__(self, rules, token_type, token, name):
+ self.rules = rules
+ self.token_type = token_type
+ self.token = token
+ self.name = name
+
+ def __eq__(self, other):
+ return other \
+ and isinstance(other, self.__class__) \
+ and self.rules == other.rules \
+ and self.token_type == other.token_type \
+ and self.token == other.token \
+ and self.name == other.name
+
+ def __hash__(self):
+ return hash(self.rules) ^ hash(self.token_type) ^ hash(self.token) ^ hash(self.name)
+
+
+class Rule:
+ """
+ ACL rule. See: https://www.consul.io/docs/guides/acl.html#acl-rules-and-scope.
+ """
+
+ def __init__(self, scope, policy, pattern=None):
+ self.scope = scope
+ self.policy = policy
+ self.pattern = pattern
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) \
+ and self.scope == other.scope \
+ and self.policy == other.policy \
+ and self.pattern == other.pattern
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __hash__(self):
+ return (hash(self.scope) ^ hash(self.policy)) ^ hash(self.pattern)
+
+ def __str__(self):
+ return encode_rule_as_hcl_string(self)
+
+
+class RuleCollection:
+ """
+ Collection of ACL rules, which are part of a Consul ACL.
+ """
+
+ def __init__(self):
+ self._rules = {}
+ for scope in RULE_SCOPES:
+ self._rules[scope] = {}
+
+ def __iter__(self):
+ all_rules = []
+ for scope, pattern_keyed_rules in self._rules.items():
+ for pattern, rule in pattern_keyed_rules.items():
+ all_rules.append(rule)
+ return iter(all_rules)
+
+ def __len__(self):
+ count = 0
+ for scope in RULE_SCOPES:
+ count += len(self._rules[scope])
+ return count
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) \
+ and set(self) == set(other)
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __str__(self):
+ return encode_rules_as_hcl_string(self)
+
+ def add(self, rule):
+ """
+ Adds the given rule to this collection.
+ :param rule: model of a rule
+ :raises ValueError: raised if there already exists a rule for a given scope and pattern
+ """
+ if rule.pattern in self._rules[rule.scope]:
+ patten_info = " and pattern '%s'" % rule.pattern if rule.pattern is not None else ""
+ raise ValueError("Duplicate rule for scope '%s'%s" % (rule.scope, patten_info))
+ self._rules[rule.scope][rule.pattern] = rule
+
+
+def get_consul_client(configuration):
+ """
+ Gets a Consul client for the given configuration.
+
+ Does not check if the Consul client can connect.
+ :param configuration: the run configuration
+ :return: Consul client
+ """
+ token = configuration.management_token
+ if token is None:
+ token = configuration.token
+ if token is None:
+ raise AssertionError("Expecting the management token to always be set")
+ return consul.Consul(host=configuration.host, port=configuration.port, scheme=configuration.scheme,
+ verify=configuration.validate_certs, token=token)
+
+
+def check_dependencies():
+ """
+ Checks that the required dependencies have been imported.
+ :exception ImportError: if it is detected that any of the required dependencies have not been imported
+ """
+ if not python_consul_installed:
+ raise ImportError("python-consul required for this module. "
+ "See: https://python-consul.readthedocs.io/en/latest/#installation")
+
+ if not pyhcl_installed:
+ raise ImportError("pyhcl required for this module. "
+ "See: https://pypi.org/project/pyhcl/")
+
+ if not has_requests:
+ raise ImportError("requests required for this module. See https://pypi.org/project/requests/")
+
+
+def main():
+ """
+ Main method.
+ """
+ module = AnsibleModule(_ARGUMENT_SPEC, supports_check_mode=False)
+
+ try:
+ check_dependencies()
+ except ImportError as e:
+ module.fail_json(msg=str(e))
+
+ configuration = Configuration(
+ management_token=module.params.get(MANAGEMENT_PARAMETER_NAME),
+ host=module.params.get(HOST_PARAMETER_NAME),
+ scheme=module.params.get(SCHEME_PARAMETER_NAME),
+ validate_certs=module.params.get(VALIDATE_CERTS_PARAMETER_NAME),
+ name=module.params.get(NAME_PARAMETER_NAME),
+ port=module.params.get(PORT_PARAMETER_NAME),
+ rules=decode_rules_as_yml(module.params.get(RULES_PARAMETER_NAME)),
+ state=module.params.get(STATE_PARAMETER_NAME),
+ token=module.params.get(TOKEN_PARAMETER_NAME),
+ token_type=module.params.get(TOKEN_TYPE_PARAMETER_NAME)
+ )
+ consul_client = get_consul_client(configuration)
+
+ try:
+ if configuration.state == PRESENT_STATE_VALUE:
+ output = set_acl(consul_client, configuration)
+ else:
+ output = remove_acl(consul_client, configuration)
+ except ConnectionError as e:
+ module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
+ configuration.host, configuration.port, str(e)))
+ raise
+
+ return_values = dict(changed=output.changed, token=output.token, operation=output.operation)
+ if output.rules is not None:
+ return_values["rules"] = encode_rules_as_json(output.rules)
+ module.exit_json(**return_values)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/consul/consul_kv.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/consul/consul_kv.py
new file mode 100644
index 00000000..ee5c3970
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/consul/consul_kv.py
@@ -0,0 +1,326 @@
+#!/usr/bin/python
+#
+# (c) 2015, Steve Gargan <steve.gargan@gmail.com>
+# (c) 2018 Genome Research Ltd.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: consul_kv
+short_description: Manipulate entries in the key/value store of a consul cluster
+description:
+ - Allows the retrieval, addition, modification and deletion of key/value entries in a
+ consul cluster via the agent. The entire contents of the record, including
+ the indices, flags and session are returned as C(value).
+ - If the C(key) represents a prefix then note that when a value is removed, the existing
+ value if any is returned as part of the results.
+ - See http://www.consul.io/docs/agent/http.html#kv for more details.
+requirements:
+ - python-consul
+ - requests
+author:
+ - Steve Gargan (@sgargan)
+ - Colin Nolan (@colin-nolan)
+options:
+ state:
+ description:
+ - The action to take with the supplied key and value. If the state is 'present' and `value` is set, the key
+ contents will be set to the value supplied and `changed` will be set to `true` only if the value was
+ different to the current contents. If the state is 'present' and `value` is not set, the existing value
+ associated to the key will be returned. The state 'absent' will remove the key/value pair,
+ again 'changed' will be set to true only if the key actually existed
+ prior to the removal. An attempt can be made to obtain or free the
+ lock associated with a key/value pair with the states 'acquire' or
+ 'release' respectively. a valid session must be supplied to make the
+ attempt changed will be true if the attempt is successful, false
+ otherwise.
+ choices: [ absent, acquire, present, release ]
+ default: present
+ key:
+ description:
+ - The key at which the value should be stored.
+ type: str
+ required: yes
+ value:
+ description:
+ - The value should be associated with the given key, required if C(state)
+ is C(present).
+ type: str
+ recurse:
+ description:
+ - If the key represents a prefix, each entry with the prefix can be
+ retrieved by setting this to C(yes).
+ type: bool
+ retrieve:
+ description:
+ - If the I(state) is C(present) and I(value) is set, perform a
+ read after setting the value and return this value.
+ default: True
+ type: bool
+ session:
+ description:
+ - The session that should be used to acquire or release a lock
+ associated with a key/value pair.
+ type: str
+ token:
+ description:
+ - The token key identifying an ACL rule set that controls access to
+ the key value pair
+ type: str
+ cas:
+ description:
+ - Used when acquiring a lock with a session. If the C(cas) is C(0), then
+ Consul will only put the key if it does not already exist. If the
+ C(cas) value is non-zero, then the key is only set if the index matches
+ the ModifyIndex of that key.
+ type: str
+ flags:
+ description:
+ - Opaque positive integer value that can be passed when setting a value.
+ type: str
+ host:
+ description:
+ - Host of the consul agent.
+ type: str
+ default: localhost
+ port:
+ description:
+ - The port on which the consul agent is running.
+ type: int
+ default: 8500
+ scheme:
+ description:
+ - The protocol scheme on which the consul agent is running.
+ type: str
+ default: http
+ validate_certs:
+ description:
+ - Whether to verify the tls certificate of the consul agent.
+ type: bool
+ default: 'yes'
+'''
+
+
+EXAMPLES = '''
+# If the key does not exist, the value associated to the "data" property in `retrieved_key` will be `None`
+# If the key value is empty string, `retrieved_key["data"]["Value"]` will be `None`
+- name: Retrieve a value from the key/value store
+ community.general.consul_kv:
+ key: somekey
+ register: retrieved_key
+
+- name: Add or update the value associated with a key in the key/value store
+ community.general.consul_kv:
+ key: somekey
+ value: somevalue
+
+- name: Remove a key from the store
+ community.general.consul_kv:
+ key: somekey
+ state: absent
+
+- name: Add a node to an arbitrary group via consul inventory (see consul.ini)
+ community.general.consul_kv:
+ key: ansible/groups/dc1/somenode
+ value: top_secret
+
+- name: Register a key/value pair with an associated session
+ community.general.consul_kv:
+ key: stg/node/server_birthday
+ value: 20160509
+ session: "{{ sessionid }}"
+ state: acquire
+'''
+
+from ansible.module_utils._text import to_text
+
+try:
+ import consul
+ from requests.exceptions import ConnectionError
+ python_consul_installed = True
+except ImportError:
+ python_consul_installed = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+# Note: although the python-consul documentation implies that using a key with a value of `None` with `put` has a
+# special meaning (https://python-consul.readthedocs.io/en/latest/#consul-kv), if not set in the subsequently API call,
+# the value just defaults to an empty string (https://www.consul.io/api/kv.html#create-update-key)
+NOT_SET = None
+
+
+def _has_value_changed(consul_client, key, target_value):
+ """
+ Uses the given Consul client to determine if the value associated to the given key is different to the given target
+ value.
+ :param consul_client: Consul connected client
+ :param key: key in Consul
+ :param target_value: value to be associated to the key
+ :return: tuple where the first element is the value of the "X-Consul-Index" header and the second is `True` if the
+ value has changed (i.e. the stored value is not the target value)
+ """
+ index, existing = consul_client.kv.get(key)
+ if not existing:
+ return index, True
+ try:
+ changed = to_text(existing['Value'], errors='surrogate_or_strict') != target_value
+ return index, changed
+ except UnicodeError:
+ # Existing value was not decodable but all values we set are valid utf-8
+ return index, True
+
+
+def execute(module):
+ state = module.params.get('state')
+
+ if state == 'acquire' or state == 'release':
+ lock(module, state)
+ elif state == 'present':
+ if module.params.get('value') is NOT_SET:
+ get_value(module)
+ else:
+ set_value(module)
+ elif state == 'absent':
+ remove_value(module)
+ else:
+ module.exit_json(msg="Unsupported state: %s" % (state, ))
+
+
+def lock(module, state):
+
+ consul_api = get_consul_api(module)
+
+ session = module.params.get('session')
+ key = module.params.get('key')
+ value = module.params.get('value')
+
+ if not session:
+ module.fail(
+ msg='%s of lock for %s requested but no session supplied' %
+ (state, key))
+
+ index, changed = _has_value_changed(consul_api, key, value)
+
+ if changed and not module.check_mode:
+ if state == 'acquire':
+ changed = consul_api.kv.put(key, value,
+ cas=module.params.get('cas'),
+ acquire=session,
+ flags=module.params.get('flags'))
+ else:
+ changed = consul_api.kv.put(key, value,
+ cas=module.params.get('cas'),
+ release=session,
+ flags=module.params.get('flags'))
+
+ module.exit_json(changed=changed,
+ index=index,
+ key=key)
+
+
+def get_value(module):
+ consul_api = get_consul_api(module)
+ key = module.params.get('key')
+
+ index, existing_value = consul_api.kv.get(key, recurse=module.params.get('recurse'))
+
+ module.exit_json(changed=False, index=index, data=existing_value)
+
+
+def set_value(module):
+ consul_api = get_consul_api(module)
+
+ key = module.params.get('key')
+ value = module.params.get('value')
+
+ if value is NOT_SET:
+ raise AssertionError('Cannot set value of "%s" to `NOT_SET`' % key)
+
+ index, changed = _has_value_changed(consul_api, key, value)
+
+ if changed and not module.check_mode:
+ changed = consul_api.kv.put(key, value,
+ cas=module.params.get('cas'),
+ flags=module.params.get('flags'))
+
+ stored = None
+ if module.params.get('retrieve'):
+ index, stored = consul_api.kv.get(key)
+
+ module.exit_json(changed=changed,
+ index=index,
+ key=key,
+ data=stored)
+
+
+def remove_value(module):
+ ''' remove the value associated with the given key. if the recurse parameter
+ is set then any key prefixed with the given key will be removed. '''
+ consul_api = get_consul_api(module)
+
+ key = module.params.get('key')
+
+ index, existing = consul_api.kv.get(
+ key, recurse=module.params.get('recurse'))
+
+ changed = existing is not None
+ if changed and not module.check_mode:
+ consul_api.kv.delete(key, module.params.get('recurse'))
+
+ module.exit_json(changed=changed,
+ index=index,
+ key=key,
+ data=existing)
+
+
+def get_consul_api(module, token=None):
+ return consul.Consul(host=module.params.get('host'),
+ port=module.params.get('port'),
+ scheme=module.params.get('scheme'),
+ verify=module.params.get('validate_certs'),
+ token=module.params.get('token'))
+
+
+def test_dependencies(module):
+ if not python_consul_installed:
+ module.fail_json(msg="python-consul required for this module. "
+ "see https://python-consul.readthedocs.io/en/latest/#installation")
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ cas=dict(type='str'),
+ flags=dict(type='str'),
+ key=dict(type='str', required=True),
+ host=dict(type='str', default='localhost'),
+ scheme=dict(type='str', default='http'),
+ validate_certs=dict(type='bool', default=True),
+ port=dict(type='int', default=8500),
+ recurse=dict(type='bool'),
+ retrieve=dict(type='bool', default=True),
+ state=dict(type='str', default='present', choices=['absent', 'acquire', 'present', 'release']),
+ token=dict(type='str', no_log=True),
+ value=dict(type='str', default=NOT_SET),
+ session=dict(type='str'),
+ ),
+ supports_check_mode=True
+ )
+
+ test_dependencies(module)
+
+ try:
+ execute(module)
+ except ConnectionError as e:
+ module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
+ module.params.get('host'), module.params.get('port'), e))
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/consul/consul_session.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/consul/consul_session.py
new file mode 100644
index 00000000..f28d3a5e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/consul/consul_session.py
@@ -0,0 +1,276 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Steve Gargan <steve.gargan@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: consul_session
+short_description: Manipulate consul sessions
+description:
+ - Allows the addition, modification and deletion of sessions in a consul
+ cluster. These sessions can then be used in conjunction with key value pairs
+ to implement distributed locks. In depth documentation for working with
+ sessions can be found at http://www.consul.io/docs/internals/sessions.html
+requirements:
+ - python-consul
+ - requests
+author:
+- Steve Gargan (@sgargan)
+options:
+ id:
+ description:
+ - ID of the session, required when I(state) is either C(info) or
+ C(remove).
+ type: str
+ state:
+ description:
+ - Whether the session should be present i.e. created if it doesn't
+ exist, or absent, removed if present. If created, the I(id) for the
+ session is returned in the output. If C(absent), I(id) is
+ required to remove the session. Info for a single session, all the
+ sessions for a node or all available sessions can be retrieved by
+ specifying C(info), C(node) or C(list) for the I(state); for C(node)
+ or C(info), the node I(name) or session I(id) is required as parameter.
+ choices: [ absent, info, list, node, present ]
+ type: str
+ default: present
+ name:
+ description:
+ - The name that should be associated with the session. Required when
+ I(state=node) is used.
+ type: str
+ delay:
+ description:
+ - The optional lock delay that can be attached to the session when it
+ is created. Locks for invalidated sessions ar blocked from being
+ acquired until this delay has expired. Durations are in seconds.
+ type: int
+ default: 15
+ node:
+ description:
+ - The name of the node that with which the session will be associated.
+ by default this is the name of the agent.
+ type: str
+ datacenter:
+ description:
+ - The name of the datacenter in which the session exists or should be
+ created.
+ type: str
+ checks:
+ description:
+ - Checks that will be used to verify the session health. If
+ all the checks fail, the session will be invalidated and any locks
+ associated with the session will be release and can be acquired once
+ the associated lock delay has expired.
+ type: list
+ host:
+ description:
+ - The host of the consul agent defaults to localhost.
+ type: str
+ default: localhost
+ port:
+ description:
+ - The port on which the consul agent is running.
+ type: int
+ default: 8500
+ scheme:
+ description:
+ - The protocol scheme on which the consul agent is running.
+ type: str
+ default: http
+ validate_certs:
+ description:
+ - Whether to verify the TLS certificate of the consul agent.
+ type: bool
+ default: True
+ behavior:
+ description:
+ - The optional behavior that can be attached to the session when it
+ is created. This controls the behavior when a session is invalidated.
+ choices: [ delete, release ]
+ type: str
+ default: release
+'''
+
+EXAMPLES = '''
+- name: Register basic session with consul
+ community.general.consul_session:
+ name: session1
+
+- name: Register a session with an existing check
+ community.general.consul_session:
+ name: session_with_check
+ checks:
+ - existing_check_name
+
+- name: Register a session with lock_delay
+ community.general.consul_session:
+ name: session_with_delay
+ delay: 20s
+
+- name: Retrieve info about session by id
+ community.general.consul_session:
+ id: session_id
+ state: info
+
+- name: Retrieve active sessions
+ community.general.consul_session:
+ state: list
+'''
+
+try:
+ import consul
+ from requests.exceptions import ConnectionError
+ python_consul_installed = True
+except ImportError:
+ python_consul_installed = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def execute(module):
+
+ state = module.params.get('state')
+
+ if state in ['info', 'list', 'node']:
+ lookup_sessions(module)
+ elif state == 'present':
+ update_session(module)
+ else:
+ remove_session(module)
+
+
+def lookup_sessions(module):
+
+ datacenter = module.params.get('datacenter')
+
+ state = module.params.get('state')
+ consul_client = get_consul_api(module)
+ try:
+ if state == 'list':
+ sessions_list = consul_client.session.list(dc=datacenter)
+ # Ditch the index, this can be grabbed from the results
+ if sessions_list and len(sessions_list) >= 2:
+ sessions_list = sessions_list[1]
+ module.exit_json(changed=True,
+ sessions=sessions_list)
+ elif state == 'node':
+ node = module.params.get('node')
+ sessions = consul_client.session.node(node, dc=datacenter)
+ module.exit_json(changed=True,
+ node=node,
+ sessions=sessions)
+ elif state == 'info':
+ session_id = module.params.get('id')
+
+ session_by_id = consul_client.session.info(session_id, dc=datacenter)
+ module.exit_json(changed=True,
+ session_id=session_id,
+ sessions=session_by_id)
+
+ except Exception as e:
+ module.fail_json(msg="Could not retrieve session info %s" % e)
+
+
+def update_session(module):
+
+ name = module.params.get('name')
+ delay = module.params.get('delay')
+ checks = module.params.get('checks')
+ datacenter = module.params.get('datacenter')
+ node = module.params.get('node')
+ behavior = module.params.get('behavior')
+
+ consul_client = get_consul_api(module)
+
+ try:
+ session = consul_client.session.create(
+ name=name,
+ behavior=behavior,
+ node=node,
+ lock_delay=delay,
+ dc=datacenter,
+ checks=checks
+ )
+ module.exit_json(changed=True,
+ session_id=session,
+ name=name,
+ behavior=behavior,
+ delay=delay,
+ checks=checks,
+ node=node)
+ except Exception as e:
+ module.fail_json(msg="Could not create/update session %s" % e)
+
+
+def remove_session(module):
+ session_id = module.params.get('id')
+
+ consul_client = get_consul_api(module)
+
+ try:
+ consul_client.session.destroy(session_id)
+
+ module.exit_json(changed=True,
+ session_id=session_id)
+ except Exception as e:
+ module.fail_json(msg="Could not remove session with id '%s' %s" % (
+ session_id, e))
+
+
+def get_consul_api(module):
+ return consul.Consul(host=module.params.get('host'),
+ port=module.params.get('port'),
+ scheme=module.params.get('scheme'),
+ verify=module.params.get('validate_certs'))
+
+
+def test_dependencies(module):
+ if not python_consul_installed:
+ module.fail_json(msg="python-consul required for this module. "
+ "see https://python-consul.readthedocs.io/en/latest/#installation")
+
+
+def main():
+ argument_spec = dict(
+ checks=dict(type='list'),
+ delay=dict(type='int', default='15'),
+ behavior=dict(type='str', default='release', choices=['release', 'delete']),
+ host=dict(type='str', default='localhost'),
+ port=dict(type='int', default=8500),
+ scheme=dict(type='str', default='http'),
+ validate_certs=dict(type='bool', default=True),
+ id=dict(type='str'),
+ name=dict(type='str'),
+ node=dict(type='str'),
+ state=dict(type='str', default='present', choices=['absent', 'info', 'list', 'node', 'present']),
+ datacenter=dict(type='str'),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_if=[
+ ('state', 'node', ['name']),
+ ('state', 'info', ['id']),
+ ('state', 'remove', ['id']),
+ ],
+ supports_check_mode=False
+ )
+
+ test_dependencies(module)
+
+ try:
+ execute(module)
+ except ConnectionError as e:
+ module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
+ module.params.get('host'), module.params.get('port'), e))
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/etcd3.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/etcd3.py
new file mode 100644
index 00000000..78838429
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/etcd3.py
@@ -0,0 +1,240 @@
+#!/usr/bin/python
+#
+# (c) 2018, Jean-Philippe Evrard <jean-philippe@evrard.me>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: etcd3
+short_description: "Set or delete key value pairs from an etcd3 cluster"
+requirements:
+ - etcd3
+description:
+ - Sets or deletes values in etcd3 cluster using its v3 api.
+ - Needs python etcd3 lib to work
+options:
+ key:
+ description:
+ - the key where the information is stored in the cluster
+ required: true
+ value:
+ description:
+ - the information stored
+ required: true
+ host:
+ description:
+ - the IP address of the cluster
+ default: 'localhost'
+ port:
+ description:
+ - the port number used to connect to the cluster
+ default: 2379
+ state:
+ description:
+ - the state of the value for the key.
+ - can be present or absent
+ required: true
+ choices: [ present, absent ]
+ user:
+ description:
+ - The etcd user to authenticate with.
+ password:
+ description:
+ - The password to use for authentication.
+ - Required if I(user) is defined.
+ ca_cert:
+ description:
+ - The Certificate Authority to use to verify the etcd host.
+ - Required if I(client_cert) and I(client_key) are defined.
+ client_cert:
+ description:
+ - PEM formatted certificate chain file to be used for SSL client authentication.
+ - Required if I(client_key) is defined.
+ client_key:
+ description:
+ - PEM formatted file that contains your private key to be used for SSL client authentication.
+ - Required if I(client_cert) is defined.
+ timeout:
+ description:
+ - The socket level timeout in seconds.
+author:
+ - Jean-Philippe Evrard (@evrardjp)
+ - Victor Fauth (@vfauth)
+'''
+
+EXAMPLES = """
+- name: Store a value "bar" under the key "foo" for a cluster located "http://localhost:2379"
+ community.general.etcd3:
+ key: "foo"
+ value: "baz3"
+ host: "localhost"
+ port: 2379
+ state: "present"
+
+- name: Authenticate using user/password combination with a timeout of 10 seconds
+ community.general.etcd3:
+ key: "foo"
+ value: "baz3"
+ state: "present"
+ user: "someone"
+ password: "password123"
+ timeout: 10
+
+- name: Authenticate using TLS certificates
+ community.general.etcd3:
+ key: "foo"
+ value: "baz3"
+ state: "present"
+ ca_cert: "/etc/ssl/certs/CA_CERT.pem"
+ client_cert: "/etc/ssl/certs/cert.crt"
+ client_key: "/etc/ssl/private/key.pem"
+"""
+
+RETURN = '''
+key:
+ description: The key that was queried
+ returned: always
+ type: str
+old_value:
+ description: The previous value in the cluster
+ returned: always
+ type: str
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+try:
+ import etcd3
+ HAS_ETCD = True
+except ImportError:
+ ETCD_IMP_ERR = traceback.format_exc()
+ HAS_ETCD = False
+
+
+def run_module():
+ # define the available arguments/parameters that a user can pass to
+ # the module
+ module_args = dict(
+ key=dict(type='str', required=True),
+ value=dict(type='str', required=True),
+ host=dict(type='str', default='localhost'),
+ port=dict(type='int', default=2379),
+ state=dict(type='str', required=True, choices=['present', 'absent']),
+ user=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ ca_cert=dict(type='path'),
+ client_cert=dict(type='path'),
+ client_key=dict(type='path'),
+ timeout=dict(type='int'),
+ )
+
+ # seed the result dict in the object
+ # we primarily care about changed and state
+ # change is if this module effectively modified the target
+ # state will include any data that you want your module to pass back
+ # for consumption, for example, in a subsequent task
+ result = dict(
+ changed=False,
+ )
+
+ # the AnsibleModule object will be our abstraction working with Ansible
+ # this includes instantiation, a couple of common attr would be the
+ # args/params passed to the execution, as well as if the module
+ # supports check mode
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True,
+ required_together=[['client_cert', 'client_key'], ['user', 'password']],
+ )
+
+ # It is possible to set `ca_cert` to verify the server identity without
+ # setting `client_cert` or `client_key` to authenticate the client
+ # so required_together is enough
+ # Due to `required_together=[['client_cert', 'client_key']]`, checking the presence
+ # of either `client_cert` or `client_key` is enough
+ if module.params['ca_cert'] is None and module.params['client_cert'] is not None:
+ module.fail_json(msg="The 'ca_cert' parameter must be defined when 'client_cert' and 'client_key' are present.")
+
+ result['key'] = module.params.get('key')
+ module.params['cert_cert'] = module.params.pop('client_cert')
+ module.params['cert_key'] = module.params.pop('client_key')
+
+ if not HAS_ETCD:
+ module.fail_json(msg=missing_required_lib('etcd3'), exception=ETCD_IMP_ERR)
+
+ allowed_keys = ['host', 'port', 'ca_cert', 'cert_cert', 'cert_key',
+ 'timeout', 'user', 'password']
+ # TODO(evrardjp): Move this back to a dict comprehension when python 2.7 is
+ # the minimum supported version
+ # client_params = {key: value for key, value in module.params.items() if key in allowed_keys}
+ client_params = dict()
+ for key, value in module.params.items():
+ if key in allowed_keys:
+ client_params[key] = value
+ try:
+ etcd = etcd3.client(**client_params)
+ except Exception as exp:
+ module.fail_json(msg='Cannot connect to etcd cluster: %s' % (to_native(exp)),
+ exception=traceback.format_exc())
+ try:
+ cluster_value = etcd.get(module.params['key'])
+ except Exception as exp:
+ module.fail_json(msg='Cannot reach data: %s' % (to_native(exp)),
+ exception=traceback.format_exc())
+
+ # Make the cluster_value[0] a string for string comparisons
+ result['old_value'] = to_native(cluster_value[0])
+
+ if module.params['state'] == 'absent':
+ if cluster_value[0] is not None:
+ if module.check_mode:
+ result['changed'] = True
+ else:
+ try:
+ etcd.delete(module.params['key'])
+ except Exception as exp:
+ module.fail_json(msg='Cannot delete %s: %s' % (module.params['key'], to_native(exp)),
+ exception=traceback.format_exc())
+ else:
+ result['changed'] = True
+ elif module.params['state'] == 'present':
+ if result['old_value'] != module.params['value']:
+ if module.check_mode:
+ result['changed'] = True
+ else:
+ try:
+ etcd.put(module.params['key'], module.params['value'])
+ except Exception as exp:
+ module.fail_json(msg='Cannot add or edit key %s: %s' % (module.params['key'], to_native(exp)),
+ exception=traceback.format_exc())
+ else:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="State not recognized")
+
+ # manipulate or modify the state as needed (this is going to be the
+ # part where your module will do what it needs to do)
+
+ # during the execution of the module, if there is an exception or a
+ # conditional state that effectively causes a failure, run
+ # AnsibleModule.fail_json() to pass in the message and the result
+
+ # in the event of a successful module execution, you will want to
+ # simple AnsibleModule.exit_json(), passing the key/value results
+ module.exit_json(**result)
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/nomad/nomad_job.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/nomad/nomad_job.py
new file mode 100644
index 00000000..6c285797
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/nomad/nomad_job.py
@@ -0,0 +1,255 @@
+#!/usr/bin/python
+# coding: utf-8 -*-
+
+# (c) 2020, FERREIRA Christophe <christophe.ferreira@cnaf.fr>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nomad_job
+author: FERREIRA Christophe (@chris93111)
+version_added: "1.3.0"
+short_description: Launch a Nomad Job
+description:
+ - Launch a Nomad job.
+ - Stop a Nomad job.
+ - Force start a Nomad job
+requirements:
+ - python-nomad
+extends_documentation_fragment:
+ - community.general.nomad
+options:
+ name:
+ description:
+ - Name of job for delete, stop and start job without source.
+ - Name of job for delete, stop and start job without source.
+ - Either this or I(content) must be specified.
+ type: str
+ state:
+ description:
+ - Deploy or remove job.
+ choices: ["present", "absent"]
+ required: true
+ type: str
+ force_start:
+ description:
+ - Force job to started.
+ type: bool
+ default: false
+ content:
+ description:
+ - Content of Nomad job.
+ - Either this or I(name) must be specified.
+ type: str
+ content_format:
+ description:
+ - Type of content of Nomad job.
+ choices: ["hcl", "json"]
+ default: hcl
+ type: str
+notes:
+ - C(check_mode) is supported.
+seealso:
+ - name: Nomad jobs documentation
+ description: Complete documentation for Nomad API jobs.
+ link: https://www.nomadproject.io/api-docs/jobs/
+'''
+
+EXAMPLES = '''
+- name: Create job
+ community.general.nomad_job:
+ host: localhost
+ state: present
+ content: "{{ lookup('ansible.builtin.file', 'job.hcl') }}"
+ timeout: 120
+
+- name: Stop job
+ community.general.nomad_job:
+ host: localhost
+ state: absent
+ name: api
+
+- name: Force job to start
+ community.general.nomad_job:
+ host: localhost
+ state: present
+ name: api
+ timeout: 120
+ force_start: true
+'''
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+import_nomad = None
+try:
+ import nomad
+ import_nomad = True
+except ImportError:
+ import_nomad = False
+
+
+def run():
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(required=True, type='str'),
+ state=dict(required=True, choices=['present', 'absent']),
+ use_ssl=dict(type='bool', default=True),
+ timeout=dict(type='int', default=5),
+ validate_certs=dict(type='bool', default=True),
+ client_cert=dict(type='path', default=None),
+ client_key=dict(type='path', default=None),
+ namespace=dict(type='str', default=None),
+ name=dict(type='str', default=None),
+ content_format=dict(choices=['hcl', 'json'], default='hcl'),
+ content=dict(type='str', default=None),
+ force_start=dict(type='bool', default=False),
+ token=dict(type='str', default=None, no_log=True)
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ["name", "content"]
+ ],
+ required_one_of=[
+ ['name', 'content']
+ ]
+ )
+
+ if not import_nomad:
+ module.fail_json(msg=missing_required_lib("python-nomad"))
+
+ certificate_ssl = (module.params.get('client_cert'), module.params.get('client_key'))
+
+ nomad_client = nomad.Nomad(
+ host=module.params.get('host'),
+ secure=module.params.get('use_ssl'),
+ timeout=module.params.get('timeout'),
+ verify=module.params.get('validate_certs'),
+ cert=certificate_ssl,
+ namespace=module.params.get('namespace'),
+ token=module.params.get('token')
+ )
+
+ if module.params.get('state') == "present":
+
+ if module.params.get('name') and not module.params.get('force_start'):
+ module.fail_json(msg='For start job with name, force_start is needed')
+
+ changed = False
+ if module.params.get('content'):
+
+ if module.params.get('content_format') == 'json':
+
+ job_json = module.params.get('content')
+ try:
+ job_json = json.loads(job_json)
+ except ValueError as e:
+ module.fail_json(msg=to_native(e))
+ job = dict()
+ job['job'] = job_json
+ try:
+ job_id = job_json.get('ID')
+ if job_id is None:
+ module.fail_json(msg="Cannot retrieve job with ID None")
+ plan = nomad_client.job.plan_job(job_id, job, diff=True)
+ if not plan['Diff'].get('Type') == "None":
+ changed = True
+ if not module.check_mode:
+ result = nomad_client.jobs.register_job(job)
+ else:
+ result = plan
+ else:
+ result = plan
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+ if module.params.get('content_format') == 'hcl':
+
+ try:
+ job_hcl = module.params.get('content')
+ job_json = nomad_client.jobs.parse(job_hcl)
+ job = dict()
+ job['job'] = job_json
+ except nomad.api.exceptions.BadRequestNomadException as err:
+ msg = str(err.nomad_resp.reason) + " " + str(err.nomad_resp.text)
+ module.fail_json(msg=to_native(msg))
+ try:
+ job_id = job_json.get('ID')
+ plan = nomad_client.job.plan_job(job_id, job, diff=True)
+ if not plan['Diff'].get('Type') == "None":
+ changed = True
+ if not module.check_mode:
+ result = nomad_client.jobs.register_job(job)
+ else:
+ result = plan
+ else:
+ result = plan
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+ if module.params.get('force_start'):
+
+ try:
+ job = dict()
+ if module.params.get('name'):
+ job_name = module.params.get('name')
+ else:
+ job_name = job_json['Name']
+ job_json = nomad_client.job.get_job(job_name)
+ if job_json['Status'] == 'running':
+ result = job_json
+ else:
+ job_json['Status'] = 'running'
+ job_json['Stop'] = False
+ job['job'] = job_json
+ if not module.check_mode:
+ result = nomad_client.jobs.register_job(job)
+ else:
+ result = nomad_client.validate.validate_job(job)
+ if not result.status_code == 200:
+ module.fail_json(msg=to_native(result.text))
+ result = json.loads(result.text)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+ if module.params.get('state') == "absent":
+
+ try:
+ if not module.params.get('name') is None:
+ job_name = module.params.get('name')
+ else:
+ if module.params.get('content_format') == 'hcl':
+ job_json = nomad_client.jobs.parse(module.params.get('content'))
+ job_name = job_json['Name']
+ if module.params.get('content_format') == 'json':
+ job_json = module.params.get('content')
+ job_name = job_json['Name']
+ job = nomad_client.job.get_job(job_name)
+ if job['Status'] == 'dead':
+ changed = False
+ result = job
+ else:
+ if not module.check_mode:
+ result = nomad_client.job.deregister_job(job_name)
+ else:
+ result = job
+ changed = True
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+ module.exit_json(changed=changed, result=result)
+
+
+def main():
+
+ run()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/nomad/nomad_job_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/nomad/nomad_job_info.py
new file mode 100644
index 00000000..9e935328
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/nomad/nomad_job_info.py
@@ -0,0 +1,345 @@
+#!/usr/bin/python
+# coding: utf-8 -*-
+
+# (c) 2020, FERREIRA Christophe <christophe.ferreira@cnaf.fr>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nomad_job_info
+author: FERREIRA Christophe (@chris93111)
+version_added: "1.3.0"
+short_description: Get Nomad Jobs info
+description:
+ - Get info for one Nomad job.
+ - List Nomad jobs.
+requirements:
+ - python-nomad
+extends_documentation_fragment:
+ - community.general.nomad
+options:
+ name:
+ description:
+ - Name of job for Get info.
+ - If not specified, lists all jobs.
+ type: str
+notes:
+ - C(check_mode) is supported.
+seealso:
+ - name: Nomad jobs documentation
+ description: Complete documentation for Nomad API jobs.
+ link: https://www.nomadproject.io/api-docs/jobs/
+'''
+
+EXAMPLES = '''
+- name: Get info for job awx
+ community.general.nomad_job:
+ host: localhost
+ name: awx
+ register: result
+
+- name: List Nomad jobs
+ community.general.nomad_job:
+ host: localhost
+ register: result
+
+'''
+
+RETURN = '''
+result:
+ description: List with dictionary contains jobs info
+ returned: success
+ type: list
+ sample: [
+ {
+ "Affinities": null,
+ "AllAtOnce": false,
+ "Constraints": null,
+ "ConsulToken": "",
+ "CreateIndex": 13,
+ "Datacenters": [
+ "dc1"
+ ],
+ "Dispatched": false,
+ "ID": "example",
+ "JobModifyIndex": 13,
+ "Meta": null,
+ "ModifyIndex": 13,
+ "Multiregion": null,
+ "Name": "example",
+ "Namespace": "default",
+ "NomadTokenID": "",
+ "ParameterizedJob": null,
+ "ParentID": "",
+ "Payload": null,
+ "Periodic": null,
+ "Priority": 50,
+ "Region": "global",
+ "Spreads": null,
+ "Stable": false,
+ "Status": "pending",
+ "StatusDescription": "",
+ "Stop": false,
+ "SubmitTime": 1602244370615307000,
+ "TaskGroups": [
+ {
+ "Affinities": null,
+ "Constraints": null,
+ "Count": 1,
+ "EphemeralDisk": {
+ "Migrate": false,
+ "SizeMB": 300,
+ "Sticky": false
+ },
+ "Meta": null,
+ "Migrate": {
+ "HealthCheck": "checks",
+ "HealthyDeadline": 300000000000,
+ "MaxParallel": 1,
+ "MinHealthyTime": 10000000000
+ },
+ "Name": "cache",
+ "Networks": null,
+ "ReschedulePolicy": {
+ "Attempts": 0,
+ "Delay": 30000000000,
+ "DelayFunction": "exponential",
+ "Interval": 0,
+ "MaxDelay": 3600000000000,
+ "Unlimited": true
+ },
+ "RestartPolicy": {
+ "Attempts": 3,
+ "Delay": 15000000000,
+ "Interval": 1800000000000,
+ "Mode": "fail"
+ },
+ "Scaling": null,
+ "Services": null,
+ "ShutdownDelay": null,
+ "Spreads": null,
+ "StopAfterClientDisconnect": null,
+ "Tasks": [
+ {
+ "Affinities": null,
+ "Artifacts": null,
+ "CSIPluginConfig": null,
+ "Config": {
+ "image": "redis:3.2",
+ "port_map": [
+ {
+ "db": 6379.0
+ }
+ ]
+ },
+ "Constraints": null,
+ "DispatchPayload": null,
+ "Driver": "docker",
+ "Env": null,
+ "KillSignal": "",
+ "KillTimeout": 5000000000,
+ "Kind": "",
+ "Leader": false,
+ "Lifecycle": null,
+ "LogConfig": {
+ "MaxFileSizeMB": 10,
+ "MaxFiles": 10
+ },
+ "Meta": null,
+ "Name": "redis",
+ "Resources": {
+ "CPU": 500,
+ "Devices": null,
+ "DiskMB": 0,
+ "IOPS": 0,
+ "MemoryMB": 256,
+ "Networks": [
+ {
+ "CIDR": "",
+ "DNS": null,
+ "Device": "",
+ "DynamicPorts": [
+ {
+ "HostNetwork": "default",
+ "Label": "db",
+ "To": 0,
+ "Value": 0
+ }
+ ],
+ "IP": "",
+ "MBits": 10,
+ "Mode": "",
+ "ReservedPorts": null
+ }
+ ]
+ },
+ "RestartPolicy": {
+ "Attempts": 3,
+ "Delay": 15000000000,
+ "Interval": 1800000000000,
+ "Mode": "fail"
+ },
+ "Services": [
+ {
+ "AddressMode": "auto",
+ "CanaryMeta": null,
+ "CanaryTags": null,
+ "Checks": [
+ {
+ "AddressMode": "",
+ "Args": null,
+ "CheckRestart": null,
+ "Command": "",
+ "Expose": false,
+ "FailuresBeforeCritical": 0,
+ "GRPCService": "",
+ "GRPCUseTLS": false,
+ "Header": null,
+ "InitialStatus": "",
+ "Interval": 10000000000,
+ "Method": "",
+ "Name": "alive",
+ "Path": "",
+ "PortLabel": "",
+ "Protocol": "",
+ "SuccessBeforePassing": 0,
+ "TLSSkipVerify": false,
+ "TaskName": "",
+ "Timeout": 2000000000,
+ "Type": "tcp"
+ }
+ ],
+ "Connect": null,
+ "EnableTagOverride": false,
+ "Meta": null,
+ "Name": "redis-cache",
+ "PortLabel": "db",
+ "Tags": [
+ "global",
+ "cache"
+ ],
+ "TaskName": ""
+ }
+ ],
+ "ShutdownDelay": 0,
+ "Templates": null,
+ "User": "",
+ "Vault": null,
+ "VolumeMounts": null
+ }
+ ],
+ "Update": {
+ "AutoPromote": false,
+ "AutoRevert": false,
+ "Canary": 0,
+ "HealthCheck": "checks",
+ "HealthyDeadline": 180000000000,
+ "MaxParallel": 1,
+ "MinHealthyTime": 10000000000,
+ "ProgressDeadline": 600000000000,
+ "Stagger": 30000000000
+ },
+ "Volumes": null
+ }
+ ],
+ "Type": "service",
+ "Update": {
+ "AutoPromote": false,
+ "AutoRevert": false,
+ "Canary": 0,
+ "HealthCheck": "",
+ "HealthyDeadline": 0,
+ "MaxParallel": 1,
+ "MinHealthyTime": 0,
+ "ProgressDeadline": 0,
+ "Stagger": 30000000000
+ },
+ "VaultNamespace": "",
+ "VaultToken": "",
+ "Version": 0
+ }
+ ]
+
+'''
+
+
+import os
+import json
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+import_nomad = None
+try:
+ import nomad
+ import_nomad = True
+except ImportError:
+ import_nomad = False
+
+
+def run():
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(required=True, type='str'),
+ use_ssl=dict(type='bool', default=True),
+ timeout=dict(type='int', default=5),
+ validate_certs=dict(type='bool', default=True),
+ client_cert=dict(type='path', default=None),
+ client_key=dict(type='path', default=None),
+ namespace=dict(type='str', default=None),
+ name=dict(type='str', default=None),
+ token=dict(type='str', default=None, no_log=True)
+ ),
+ supports_check_mode=True
+ )
+
+ if not import_nomad:
+ module.fail_json(msg=missing_required_lib("python-nomad"))
+
+ certificate_ssl = (module.params.get('client_cert'), module.params.get('client_key'))
+
+ nomad_client = nomad.Nomad(
+ host=module.params.get('host'),
+ secure=module.params.get('use_ssl'),
+ timeout=module.params.get('timeout'),
+ verify=module.params.get('validate_certs'),
+ cert=certificate_ssl,
+ namespace=module.params.get('namespace'),
+ token=module.params.get('token')
+ )
+
+ changed = False
+ nomad_jobs = list()
+ try:
+ job_list = nomad_client.jobs.get_jobs()
+ for job in job_list:
+ nomad_jobs.append(nomad_client.job.get_job(job.get('ID')))
+ result = nomad_jobs
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+ if module.params.get('name'):
+ filter = list()
+ try:
+ for job in result:
+ if job.get('ID') == module.params.get('name'):
+ filter.append(job)
+ result = filter
+ if not filter:
+ module.fail_json(msg="Couldn't find Job with id " + str(module.params.get('name')))
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+ module.exit_json(changed=changed, result=result)
+
+
+def main():
+
+ run()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/pacemaker_cluster.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/pacemaker_cluster.py
new file mode 100644
index 00000000..4ec6010f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/pacemaker_cluster.py
@@ -0,0 +1,222 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Mathieu Bultel <mbultel@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: pacemaker_cluster
+short_description: Manage pacemaker clusters
+author:
+- Mathieu Bultel (@matbu)
+description:
+ - This module can manage a pacemaker cluster and nodes from Ansible using
+ the pacemaker cli.
+options:
+ state:
+ description:
+ - Indicate desired state of the cluster
+ choices: [ cleanup, offline, online, restart ]
+ type: str
+ node:
+ description:
+ - Specify which node of the cluster you want to manage. None == the
+ cluster status itself, 'all' == check the status of all nodes.
+ type: str
+ timeout:
+ description:
+ - Timeout when the module should considered that the action has failed
+ default: 300
+ type: int
+ force:
+ description:
+ - Force the change of the cluster state
+ type: bool
+ default: 'yes'
+'''
+EXAMPLES = '''
+---
+- name: Set cluster Online
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: Get cluster state
+ community.general.pacemaker_cluster:
+ state: online
+'''
+
+RETURN = '''
+changed:
+ description: True if the cluster state has changed
+ type: bool
+ returned: always
+out:
+ description: The output of the current state of the cluster. It return a
+ list of the nodes state.
+ type: str
+ sample: 'out: [[" overcloud-controller-0", " Online"]]}'
+ returned: always
+rc:
+ description: exit code of the module
+ type: bool
+ returned: always
+'''
+
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+_PCS_CLUSTER_DOWN = "Error: cluster is not currently running on this node"
+
+
+def get_cluster_status(module):
+ cmd = "pcs cluster status"
+ rc, out, err = module.run_command(cmd)
+ if out in _PCS_CLUSTER_DOWN:
+ return 'offline'
+ else:
+ return 'online'
+
+
+def get_node_status(module, node='all'):
+ if node == 'all':
+ cmd = "pcs cluster pcsd-status %s" % node
+ else:
+ cmd = "pcs cluster pcsd-status"
+ rc, out, err = module.run_command(cmd)
+ if rc == 1:
+ module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err))
+ status = []
+ for o in out.splitlines():
+ status.append(o.split(':'))
+ return status
+
+
+def clean_cluster(module, timeout):
+ cmd = "pcs resource cleanup"
+ rc, out, err = module.run_command(cmd)
+ if rc == 1:
+ module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err))
+
+
+def set_cluster(module, state, timeout, force):
+ if state == 'online':
+ cmd = "pcs cluster start"
+ if state == 'offline':
+ cmd = "pcs cluster stop"
+ if force:
+ cmd = "%s --force" % cmd
+ rc, out, err = module.run_command(cmd)
+ if rc == 1:
+ module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err))
+
+ t = time.time()
+ ready = False
+ while time.time() < t + timeout:
+ cluster_state = get_cluster_status(module)
+ if cluster_state == state:
+ ready = True
+ break
+ if not ready:
+ module.fail_json(msg="Failed to set the state `%s` on the cluster\n" % (state))
+
+
+def set_node(module, state, timeout, force, node='all'):
+ # map states
+ if state == 'online':
+ cmd = "pcs cluster start"
+ if state == 'offline':
+ cmd = "pcs cluster stop"
+ if force:
+ cmd = "%s --force" % cmd
+
+ nodes_state = get_node_status(module, node)
+ for node in nodes_state:
+ if node[1].strip().lower() != state:
+ cmd = "%s %s" % (cmd, node[0].strip())
+ rc, out, err = module.run_command(cmd)
+ if rc == 1:
+ module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err))
+
+ t = time.time()
+ ready = False
+ while time.time() < t + timeout:
+ nodes_state = get_node_status(module)
+ for node in nodes_state:
+ if node[1].strip().lower() == state:
+ ready = True
+ break
+ if not ready:
+ module.fail_json(msg="Failed to set the state `%s` on the cluster\n" % (state))
+
+
+def main():
+ argument_spec = dict(
+ state=dict(type='str', choices=['online', 'offline', 'restart', 'cleanup']),
+ node=dict(type='str'),
+ timeout=dict(type='int', default=300),
+ force=dict(type='bool', default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec,
+ supports_check_mode=True,
+ )
+ changed = False
+ state = module.params['state']
+ node = module.params['node']
+ force = module.params['force']
+ timeout = module.params['timeout']
+
+ if state in ['online', 'offline']:
+ # Get cluster status
+ if node is None:
+ cluster_state = get_cluster_status(module)
+ if cluster_state == state:
+ module.exit_json(changed=changed, out=cluster_state)
+ else:
+ set_cluster(module, state, timeout, force)
+ cluster_state = get_cluster_status(module)
+ if cluster_state == state:
+ module.exit_json(changed=True, out=cluster_state)
+ else:
+ module.fail_json(msg="Fail to bring the cluster %s" % state)
+ else:
+ cluster_state = get_node_status(module, node)
+ # Check cluster state
+ for node_state in cluster_state:
+ if node_state[1].strip().lower() == state:
+ module.exit_json(changed=changed, out=cluster_state)
+ else:
+ # Set cluster status if needed
+ set_cluster(module, state, timeout, force)
+ cluster_state = get_node_status(module, node)
+ module.exit_json(changed=True, out=cluster_state)
+
+ if state in ['restart']:
+ set_cluster(module, 'offline', timeout, force)
+ cluster_state = get_cluster_status(module)
+ if cluster_state == 'offline':
+ set_cluster(module, 'online', timeout, force)
+ cluster_state = get_cluster_status(module)
+ if cluster_state == 'online':
+ module.exit_json(changed=True, out=cluster_state)
+ else:
+ module.fail_json(msg="Failed during the restart of the cluster, the cluster can't be started")
+ else:
+ module.fail_json(msg="Failed during the restart of the cluster, the cluster can't be stopped")
+
+ if state in ['cleanup']:
+ clean_cluster(module, timeout)
+ cluster_state = get_cluster_status(module)
+ module.exit_json(changed=True,
+ out=cluster_state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/znode.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/znode.py
new file mode 100644
index 00000000..156a6376
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/clustering/znode.py
@@ -0,0 +1,251 @@
+#!/usr/bin/python
+# Copyright 2015 WP Engine, Inc. All rights reserved.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: znode
+short_description: Create, delete, retrieve, and update znodes using ZooKeeper
+description:
+ - Create, delete, retrieve, and update znodes using ZooKeeper.
+options:
+ hosts:
+ description:
+ - A list of ZooKeeper servers (format '[server]:[port]').
+ required: true
+ name:
+ description:
+ - The path of the znode.
+ required: true
+ value:
+ description:
+ - The value assigned to the znode.
+ op:
+ description:
+ - An operation to perform. Mutually exclusive with state.
+ choices: [ get, wait, list ]
+ state:
+ description:
+ - The state to enforce. Mutually exclusive with op.
+ choices: [ present, absent ]
+ timeout:
+ description:
+ - The amount of time to wait for a node to appear.
+ default: 300
+ recursive:
+ description:
+ - Recursively delete node and all its children.
+ type: bool
+ default: 'no'
+requirements:
+ - kazoo >= 2.1
+ - python >= 2.6
+author: "Trey Perry (@treyperry)"
+'''
+
+EXAMPLES = """
+- name: Creating or updating a znode with a given value
+ community.general.znode:
+ hosts: 'localhost:2181'
+ name: /mypath
+ value: myvalue
+ state: present
+
+- name: Getting the value and stat structure for a znode
+ community.general.znode:
+ hosts: 'localhost:2181'
+ name: /mypath
+ op: get
+
+- name: Listing a particular znode's children
+ community.general.znode:
+ hosts: 'localhost:2181'
+ name: /zookeeper
+ op: list
+
+- name: Waiting 20 seconds for a znode to appear at path /mypath
+ community.general.znode:
+ hosts: 'localhost:2181'
+ name: /mypath
+ op: wait
+ timeout: 20
+
+- name: Deleting a znode at path /mypath
+ community.general.znode:
+ hosts: 'localhost:2181'
+ name: /mypath
+ state: absent
+
+- name: Creating or updating a znode with a given value on a remote Zookeeper
+ community.general.znode:
+ hosts: 'my-zookeeper-node:2181'
+ name: /mypath
+ value: myvalue
+ state: present
+ delegate_to: 127.0.0.1
+"""
+
+import time
+import traceback
+
+KAZOO_IMP_ERR = None
+try:
+ from kazoo.client import KazooClient
+ from kazoo.handlers.threading import KazooTimeoutError
+ KAZOO_INSTALLED = True
+except ImportError:
+ KAZOO_IMP_ERR = traceback.format_exc()
+ KAZOO_INSTALLED = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_bytes
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ hosts=dict(required=True, type='str'),
+ name=dict(required=True, type='str'),
+ value=dict(required=False, default=None, type='str'),
+ op=dict(required=False, default=None, choices=['get', 'wait', 'list']),
+ state=dict(choices=['present', 'absent']),
+ timeout=dict(required=False, default=300, type='int'),
+ recursive=dict(required=False, default=False, type='bool')
+ ),
+ supports_check_mode=False
+ )
+
+ if not KAZOO_INSTALLED:
+ module.fail_json(msg=missing_required_lib('kazoo >= 2.1'), exception=KAZOO_IMP_ERR)
+
+ check = check_params(module.params)
+ if not check['success']:
+ module.fail_json(msg=check['msg'])
+
+ zoo = KazooCommandProxy(module)
+ try:
+ zoo.start()
+ except KazooTimeoutError:
+ module.fail_json(msg='The connection to the ZooKeeper ensemble timed out.')
+
+ command_dict = {
+ 'op': {
+ 'get': zoo.get,
+ 'list': zoo.list,
+ 'wait': zoo.wait
+ },
+ 'state': {
+ 'present': zoo.present,
+ 'absent': zoo.absent
+ }
+ }
+
+ command_type = 'op' if 'op' in module.params and module.params['op'] is not None else 'state'
+ method = module.params[command_type]
+ result, result_dict = command_dict[command_type][method]()
+ zoo.shutdown()
+
+ if result:
+ module.exit_json(**result_dict)
+ else:
+ module.fail_json(**result_dict)
+
+
+def check_params(params):
+ if not params['state'] and not params['op']:
+ return {'success': False, 'msg': 'Please define an operation (op) or a state.'}
+
+ if params['state'] and params['op']:
+ return {'success': False, 'msg': 'Please choose an operation (op) or a state, but not both.'}
+
+ return {'success': True}
+
+
+class KazooCommandProxy():
+ def __init__(self, module):
+ self.module = module
+ self.zk = KazooClient(module.params['hosts'])
+
+ def absent(self):
+ return self._absent(self.module.params['name'])
+
+ def exists(self, znode):
+ return self.zk.exists(znode)
+
+ def list(self):
+ children = self.zk.get_children(self.module.params['name'])
+ return True, {'count': len(children), 'items': children, 'msg': 'Retrieved znodes in path.',
+ 'znode': self.module.params['name']}
+
+ def present(self):
+ return self._present(self.module.params['name'], self.module.params['value'])
+
+ def get(self):
+ return self._get(self.module.params['name'])
+
+ def shutdown(self):
+ self.zk.stop()
+ self.zk.close()
+
+ def start(self):
+ self.zk.start()
+
+ def wait(self):
+ return self._wait(self.module.params['name'], self.module.params['timeout'])
+
+ def _absent(self, znode):
+ if self.exists(znode):
+ self.zk.delete(znode, recursive=self.module.params['recursive'])
+ return True, {'changed': True, 'msg': 'The znode was deleted.'}
+ else:
+ return True, {'changed': False, 'msg': 'The znode does not exist.'}
+
+ def _get(self, path):
+ if self.exists(path):
+ value, zstat = self.zk.get(path)
+ stat_dict = {}
+ for i in dir(zstat):
+ if not i.startswith('_'):
+ attr = getattr(zstat, i)
+ if isinstance(attr, (int, str)):
+ stat_dict[i] = attr
+ result = True, {'msg': 'The node was retrieved.', 'znode': path, 'value': value,
+ 'stat': stat_dict}
+ else:
+ result = False, {'msg': 'The requested node does not exist.'}
+
+ return result
+
+ def _present(self, path, value):
+ if self.exists(path):
+ (current_value, zstat) = self.zk.get(path)
+ if value != current_value:
+ self.zk.set(path, to_bytes(value))
+ return True, {'changed': True, 'msg': 'Updated the znode value.', 'znode': path,
+ 'value': value}
+ else:
+ return True, {'changed': False, 'msg': 'No changes were necessary.', 'znode': path, 'value': value}
+ else:
+ self.zk.create(path, to_bytes(value), makepath=True)
+ return True, {'changed': True, 'msg': 'Created a new znode.', 'znode': path, 'value': value}
+
+ def _wait(self, path, timeout, interval=5):
+ lim = time.time() + timeout
+
+ while time.time() < lim:
+ if self.exists(path):
+ return True, {'msg': 'The node appeared before the configured timeout.',
+ 'znode': path, 'timeout': timeout}
+ else:
+ time.sleep(interval)
+
+ return False, {'msg': 'The node did not appear before the operation timed out.', 'timeout': timeout,
+ 'znode': path}
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cobbler_sync.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cobbler_sync.py
new file mode 100644
index 00000000..2e5f080d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cobbler_sync.py
@@ -0,0 +1,140 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Dag Wieers (dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: cobbler_sync
+short_description: Sync Cobbler
+description:
+- Sync Cobbler to commit changes.
+options:
+ host:
+ description:
+ - The name or IP address of the Cobbler system.
+ default: 127.0.0.1
+ type: str
+ port:
+ description:
+ - Port number to be used for REST connection.
+ - The default value depends on parameter C(use_ssl).
+ type: int
+ username:
+ description:
+ - The username to log in to Cobbler.
+ default: cobbler
+ type: str
+ password:
+ description:
+ - The password to log in to Cobbler.
+ type: str
+ use_ssl:
+ description:
+ - If C(no), an HTTP connection will be used instead of the default HTTPS connection.
+ type: bool
+ default: 'yes'
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated.
+ - This should only set to C(no) when used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+author:
+- Dag Wieers (@dagwieers)
+todo:
+notes:
+- Concurrently syncing Cobbler is bound to fail with weird errors.
+- On python 2.7.8 and older (i.e. on RHEL7) you may need to tweak the python behaviour to disable certificate validation.
+ More information at L(Certificate verification in Python standard library HTTP clients,https://access.redhat.com/articles/2039753).
+'''
+
+EXAMPLES = r'''
+- name: Commit Cobbler changes
+ community.general.cobbler_sync:
+ host: cobbler01
+ username: cobbler
+ password: MySuperSecureP4sswOrd
+ run_once: yes
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+# Default return values
+'''
+
+import datetime
+import ssl
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import xmlrpc_client
+from ansible.module_utils._text import to_text
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(type='str', default='127.0.0.1'),
+ port=dict(type='int'),
+ username=dict(type='str', default='cobbler'),
+ password=dict(type='str', no_log=True),
+ use_ssl=dict(type='bool', default=True),
+ validate_certs=dict(type='bool', default=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ username = module.params['username']
+ password = module.params['password']
+ port = module.params['port']
+ use_ssl = module.params['use_ssl']
+ validate_certs = module.params['validate_certs']
+
+ module.params['proto'] = 'https' if use_ssl else 'http'
+ if not port:
+ module.params['port'] = '443' if use_ssl else '80'
+
+ result = dict(
+ changed=True,
+ )
+
+ start = datetime.datetime.utcnow()
+
+ ssl_context = None
+ if not validate_certs:
+ try: # Python 2.7.9 and newer
+ ssl_context = ssl.create_unverified_context()
+ except AttributeError: # Legacy Python that doesn't verify HTTPS certificates by default
+ ssl._create_default_context = ssl._create_unverified_context
+ else: # Python 2.7.8 and older
+ ssl._create_default_https_context = ssl._create_unverified_https_context
+
+ url = '{proto}://{host}:{port}/cobbler_api'.format(**module.params)
+ if ssl_context:
+ conn = xmlrpc_client.ServerProxy(url, context=ssl_context)
+ else:
+ conn = xmlrpc_client.Server(url)
+
+ try:
+ token = conn.login(username, password)
+ except xmlrpc_client.Fault as e:
+ module.fail_json(msg="Failed to log in to Cobbler '{url}' as '{username}'. {error}".format(url=url, error=to_text(e), **module.params))
+ except Exception as e:
+ module.fail_json(msg="Connection to '{url}' failed. {error}".format(url=url, error=to_text(e)))
+
+ if not module.check_mode:
+ try:
+ conn.sync(token)
+ except Exception as e:
+ module.fail_json(msg="Failed to sync Cobbler. {error}".format(error=to_text(e)))
+
+ elapsed = datetime.datetime.utcnow() - start
+ module.exit_json(elapsed=elapsed.seconds, **result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cobbler_system.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cobbler_system.py
new file mode 100644
index 00000000..ecabcc8e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cobbler_system.py
@@ -0,0 +1,339 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Dag Wieers (dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: cobbler_system
+short_description: Manage system objects in Cobbler
+description:
+- Add, modify or remove systems in Cobbler
+options:
+ host:
+ description:
+ - The name or IP address of the Cobbler system.
+ default: 127.0.0.1
+ type: str
+ port:
+ description:
+ - Port number to be used for REST connection.
+ - The default value depends on parameter C(use_ssl).
+ type: int
+ username:
+ description:
+ - The username to log in to Cobbler.
+ default: cobbler
+ type: str
+ password:
+ description:
+ - The password to log in to Cobbler.
+ type: str
+ use_ssl:
+ description:
+ - If C(no), an HTTP connection will be used instead of the default HTTPS connection.
+ type: bool
+ default: 'yes'
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated.
+ - This should only set to C(no) when used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+ name:
+ description:
+ - The system name to manage.
+ type: str
+ properties:
+ description:
+ - A dictionary with system properties.
+ type: dict
+ interfaces:
+ description:
+ - A list of dictionaries containing interface options.
+ type: dict
+ sync:
+ description:
+ - Sync on changes.
+ - Concurrently syncing Cobbler is bound to fail.
+ type: bool
+ default: no
+ state:
+ description:
+ - Whether the system should be present, absent or a query is made.
+ choices: [ absent, present, query ]
+ default: present
+ type: str
+author:
+- Dag Wieers (@dagwieers)
+notes:
+- Concurrently syncing Cobbler is bound to fail with weird errors.
+- On python 2.7.8 and older (i.e. on RHEL7) you may need to tweak the python behaviour to disable certificate validation.
+ More information at L(Certificate verification in Python standard library HTTP clients,https://access.redhat.com/articles/2039753).
+'''
+
+EXAMPLES = r'''
+- name: Ensure the system exists in Cobbler
+ community.general.cobbler_system:
+ host: cobbler01
+ username: cobbler
+ password: MySuperSecureP4sswOrd
+ name: myhost
+ properties:
+ profile: CentOS6-x86_64
+ name_servers: [ 2.3.4.5, 3.4.5.6 ]
+ name_servers_search: foo.com, bar.com
+ interfaces:
+ eth0:
+ macaddress: 00:01:02:03:04:05
+ ipaddress: 1.2.3.4
+ delegate_to: localhost
+
+- name: Enable network boot in Cobbler
+ community.general.cobbler_system:
+ host: bdsol-aci-cobbler-01
+ username: cobbler
+ password: ins3965!
+ name: bdsol-aci51-apic1.cisco.com
+ properties:
+ netboot_enabled: yes
+ state: present
+ delegate_to: localhost
+
+- name: Query all systems in Cobbler
+ community.general.cobbler_system:
+ host: cobbler01
+ username: cobbler
+ password: MySuperSecureP4sswOrd
+ state: query
+ register: cobbler_systems
+ delegate_to: localhost
+
+- name: Query a specific system in Cobbler
+ community.general.cobbler_system:
+ host: cobbler01
+ username: cobbler
+ password: MySuperSecureP4sswOrd
+ name: '{{ inventory_hostname }}'
+ state: query
+ register: cobbler_properties
+ delegate_to: localhost
+
+- name: Ensure the system does not exist in Cobbler
+ community.general.cobbler_system:
+ host: cobbler01
+ username: cobbler
+ password: MySuperSecureP4sswOrd
+ name: myhost
+ state: absent
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+systems:
+ description: List of systems
+ returned: C(state=query) and C(name) is not provided
+ type: list
+system:
+ description: (Resulting) information about the system we are working with
+ returned: when C(name) is provided
+ type: dict
+'''
+
+import copy
+import datetime
+import ssl
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible.module_utils.six.moves import xmlrpc_client
+from ansible.module_utils._text import to_text
+
+IFPROPS_MAPPING = dict(
+ bondingopts='bonding_opts',
+ bridgeopts='bridge_opts',
+ connected_mode='connected_mode',
+ cnames='cnames',
+ dhcptag='dhcp_tag',
+ dnsname='dns_name',
+ ifgateway='if_gateway',
+ interfacetype='interface_type',
+ interfacemaster='interface_master',
+ ipaddress='ip_address',
+ ipv6address='ipv6_address',
+ ipv6defaultgateway='ipv6_default_gateway',
+ ipv6mtu='ipv6_mtu',
+ ipv6prefix='ipv6_prefix',
+ ipv6secondaries='ipv6_secondariesu',
+ ipv6staticroutes='ipv6_static_routes',
+ macaddress='mac_address',
+ management='management',
+ mtu='mtu',
+ netmask='netmask',
+ static='static',
+ staticroutes='static_routes',
+ virtbridge='virt_bridge',
+)
+
+
+def getsystem(conn, name, token):
+ system = dict()
+ if name:
+ # system = conn.get_system(name, token)
+ systems = conn.find_system(dict(name=name), token)
+ if systems:
+ system = systems[0]
+ return system
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(type='str', default='127.0.0.1'),
+ port=dict(type='int'),
+ username=dict(type='str', default='cobbler'),
+ password=dict(type='str', no_log=True),
+ use_ssl=dict(type='bool', default=True),
+ validate_certs=dict(type='bool', default=True),
+ name=dict(type='str'),
+ interfaces=dict(type='dict'),
+ properties=dict(type='dict'),
+ sync=dict(type='bool', default=False),
+ state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
+ ),
+ supports_check_mode=True,
+ )
+
+ username = module.params['username']
+ password = module.params['password']
+ port = module.params['port']
+ use_ssl = module.params['use_ssl']
+ validate_certs = module.params['validate_certs']
+
+ name = module.params['name']
+ state = module.params['state']
+
+ module.params['proto'] = 'https' if use_ssl else 'http'
+ if not port:
+ module.params['port'] = '443' if use_ssl else '80'
+
+ result = dict(
+ changed=False,
+ )
+
+ start = datetime.datetime.utcnow()
+
+ ssl_context = None
+ if not validate_certs:
+ try: # Python 2.7.9 and newer
+ ssl_context = ssl.create_unverified_context()
+ except AttributeError: # Legacy Python that doesn't verify HTTPS certificates by default
+ ssl._create_default_context = ssl._create_unverified_context
+ else: # Python 2.7.8 and older
+ ssl._create_default_https_context = ssl._create_unverified_https_context
+
+ url = '{proto}://{host}:{port}/cobbler_api'.format(**module.params)
+ if ssl_context:
+ conn = xmlrpc_client.ServerProxy(url, context=ssl_context)
+ else:
+ conn = xmlrpc_client.Server(url)
+
+ try:
+ token = conn.login(username, password)
+ except xmlrpc_client.Fault as e:
+ module.fail_json(msg="Failed to log in to Cobbler '{url}' as '{username}'. {error}".format(url=url, error=to_text(e), **module.params))
+ except Exception as e:
+ module.fail_json(msg="Connection to '{url}' failed. {error}".format(url=url, error=to_text(e), **module.params))
+
+ system = getsystem(conn, name, token)
+ # result['system'] = system
+
+ if state == 'query':
+ if name:
+ result['system'] = system
+ else:
+ # Turn it into a dictionary of dictionaries
+ # all_systems = conn.get_systems()
+ # result['systems'] = { system['name']: system for system in all_systems }
+
+ # Return a list of dictionaries
+ result['systems'] = conn.get_systems()
+
+ elif state == 'present':
+
+ if system:
+ # Update existing entry
+ system_id = conn.get_system_handle(name, token)
+
+ for key, value in iteritems(module.params['properties']):
+ if key not in system:
+ module.warn("Property '{0}' is not a valid system property.".format(key))
+ if system[key] != value:
+ try:
+ conn.modify_system(system_id, key, value, token)
+ result['changed'] = True
+ except Exception as e:
+ module.fail_json(msg="Unable to change '{0}' to '{1}'. {2}".format(key, value, e))
+
+ else:
+ # Create a new entry
+ system_id = conn.new_system(token)
+ conn.modify_system(system_id, 'name', name, token)
+ result['changed'] = True
+
+ if module.params['properties']:
+ for key, value in iteritems(module.params['properties']):
+ try:
+ conn.modify_system(system_id, key, value, token)
+ except Exception as e:
+ module.fail_json(msg="Unable to change '{0}' to '{1}'. {2}".format(key, value, e))
+
+ # Add interface properties
+ interface_properties = dict()
+ if module.params['interfaces']:
+ for device, values in iteritems(module.params['interfaces']):
+ for key, value in iteritems(values):
+ if key == 'name':
+ continue
+ if key not in IFPROPS_MAPPING:
+ module.warn("Property '{0}' is not a valid system property.".format(key))
+ if not system or system['interfaces'][device][IFPROPS_MAPPING[key]] != value:
+ result['changed'] = True
+ interface_properties['{0}-{1}'.format(key, device)] = value
+
+ if result['changed'] is True:
+ conn.modify_system(system_id, "modify_interface", interface_properties, token)
+
+ # Only save when the entry was changed
+ if not module.check_mode and result['changed']:
+ conn.save_system(system_id, token)
+
+ elif state == 'absent':
+
+ if system:
+ if not module.check_mode:
+ conn.remove_system(name, token)
+ result['changed'] = True
+
+ if not module.check_mode and module.params['sync'] and result['changed']:
+ try:
+ conn.sync(token)
+ except Exception as e:
+ module.fail_json(msg="Failed to sync Cobbler. {0}".format(to_text(e)))
+
+ if state in ('absent', 'present'):
+ result['system'] = getsystem(conn, name, token)
+
+ if module._diff:
+ result['diff'] = dict(before=system, after=result['system'])
+
+ elapsed = datetime.datetime.utcnow() - start
+ module.exit_json(elapsed=elapsed.seconds, **result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/composer.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/composer.py
new file mode 100644
index 00000000..3bc09c2d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/composer.py
@@ -0,0 +1,267 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Dimitrios Tydeas Mengidis <tydeas.dr@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: composer
+author:
+ - "Dimitrios Tydeas Mengidis (@dmtrs)"
+ - "René Moser (@resmo)"
+short_description: Dependency Manager for PHP
+description:
+ - >
+ Composer is a tool for dependency management in PHP. It allows you to
+ declare the dependent libraries your project needs and it will install
+ them in your project for you.
+options:
+ command:
+ type: str
+ description:
+ - Composer command like "install", "update" and so on.
+ default: install
+ arguments:
+ type: str
+ description:
+ - Composer arguments like required package, version and so on.
+ executable:
+ type: path
+ description:
+ - Path to PHP Executable on the remote host, if PHP is not in PATH.
+ aliases: [ php_path ]
+ working_dir:
+ type: path
+ description:
+ - Directory of your project (see --working-dir). This is required when
+ the command is not run globally.
+ - Will be ignored if C(global_command=true).
+ aliases: [ working-dir ]
+ global_command:
+ description:
+ - Runs the specified command globally.
+ type: bool
+ default: false
+ aliases: [ global-command ]
+ prefer_source:
+ description:
+ - Forces installation from package sources when possible (see --prefer-source).
+ default: false
+ type: bool
+ aliases: [ prefer-source ]
+ prefer_dist:
+ description:
+ - Forces installation from package dist even for dev versions (see --prefer-dist).
+ default: false
+ type: bool
+ aliases: [ prefer-dist ]
+ no_dev:
+ description:
+ - Disables installation of require-dev packages (see --no-dev).
+ default: true
+ type: bool
+ aliases: [ no-dev ]
+ no_scripts:
+ description:
+ - Skips the execution of all scripts defined in composer.json (see --no-scripts).
+ default: false
+ type: bool
+ aliases: [ no-scripts ]
+ no_plugins:
+ description:
+ - Disables all plugins ( see --no-plugins ).
+ default: false
+ type: bool
+ aliases: [ no-plugins ]
+ optimize_autoloader:
+ description:
+ - Optimize autoloader during autoloader dump (see --optimize-autoloader).
+ - Convert PSR-0/4 autoloading to classmap to get a faster autoloader.
+ - Recommended especially for production, but can take a bit of time to run.
+ default: true
+ type: bool
+ aliases: [ optimize-autoloader ]
+ classmap_authoritative:
+ description:
+ - Autoload classes from classmap only.
+ - Implicitely enable optimize_autoloader.
+ - Recommended especially for production, but can take a bit of time to run.
+ default: false
+ type: bool
+ aliases: [ classmap-authoritative ]
+ apcu_autoloader:
+ description:
+ - Uses APCu to cache found/not-found classes
+ default: false
+ type: bool
+ aliases: [ apcu-autoloader ]
+ ignore_platform_reqs:
+ description:
+ - Ignore php, hhvm, lib-* and ext-* requirements and force the installation even if the local machine does not fulfill these.
+ default: false
+ type: bool
+ aliases: [ ignore-platform-reqs ]
+requirements:
+ - php
+ - composer installed in bin path (recommended /usr/local/bin)
+notes:
+ - Default options that are always appended in each execution are --no-ansi, --no-interaction and --no-progress if available.
+ - We received reports about issues on macOS if composer was installed by Homebrew. Please use the official install method to avoid issues.
+'''
+
+EXAMPLES = '''
+- name: Download and installs all libs and dependencies outlined in the /path/to/project/composer.lock
+ community.general.composer:
+ command: install
+ working_dir: /path/to/project
+
+- name: Install a new package
+ community.general.composer:
+ command: require
+ arguments: my/package
+ working_dir: /path/to/project
+
+- name: Clone and install a project with all dependencies
+ community.general.composer:
+ command: create-project
+ arguments: package/package /path/to/project ~1.0
+ working_dir: /path/to/project
+ prefer_dist: yes
+
+- name: Install a package globally
+ community.general.composer:
+ command: require
+ global_command: yes
+ arguments: my/package
+'''
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+
+
+def parse_out(string):
+ return re.sub(r"\s+", " ", string).strip()
+
+
+def has_changed(string):
+ for no_change in ["Nothing to install or update", "Nothing to install, update or remove"]:
+ if no_change in string:
+ return False
+
+ return True
+
+
+def get_available_options(module, command='install'):
+ # get all available options from a composer command using composer help to json
+ rc, out, err = composer_command(module, "help %s --format=json" % command)
+ if rc != 0:
+ output = parse_out(err)
+ module.fail_json(msg=output)
+
+ command_help_json = module.from_json(out)
+ return command_help_json['definition']['options']
+
+
+def composer_command(module, command, arguments="", options=None, global_command=False):
+ if options is None:
+ options = []
+
+ if module.params['executable'] is None:
+ php_path = module.get_bin_path("php", True, ["/usr/local/bin"])
+ else:
+ php_path = module.params['executable']
+
+ composer_path = module.get_bin_path("composer", True, ["/usr/local/bin"])
+ cmd = "%s %s %s %s %s %s" % (php_path, composer_path, "global" if global_command else "", command, " ".join(options), arguments)
+ return module.run_command(cmd)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ command=dict(default="install", type="str"),
+ arguments=dict(default="", type="str"),
+ executable=dict(type="path", aliases=["php_path"]),
+ working_dir=dict(type="path", aliases=["working-dir"]),
+ global_command=dict(default=False, type="bool", aliases=["global-command"]),
+ prefer_source=dict(default=False, type="bool", aliases=["prefer-source"]),
+ prefer_dist=dict(default=False, type="bool", aliases=["prefer-dist"]),
+ no_dev=dict(default=True, type="bool", aliases=["no-dev"]),
+ no_scripts=dict(default=False, type="bool", aliases=["no-scripts"]),
+ no_plugins=dict(default=False, type="bool", aliases=["no-plugins"]),
+ apcu_autoloader=dict(default=False, type="bool", aliases=["apcu-autoloader"]),
+ optimize_autoloader=dict(default=True, type="bool", aliases=["optimize-autoloader"]),
+ classmap_authoritative=dict(default=False, type="bool", aliases=["classmap-authoritative"]),
+ ignore_platform_reqs=dict(default=False, type="bool", aliases=["ignore-platform-reqs"]),
+ ),
+ required_if=[('global_command', False, ['working_dir'])],
+ supports_check_mode=True
+ )
+
+ # Get composer command with fallback to default
+ command = module.params['command']
+ if re.search(r"\s", command):
+ module.fail_json(msg="Use the 'arguments' param for passing arguments with the 'command'")
+
+ arguments = module.params['arguments']
+ global_command = module.params['global_command']
+ available_options = get_available_options(module=module, command=command)
+
+ options = []
+
+ # Default options
+ default_options = [
+ 'no-ansi',
+ 'no-interaction',
+ 'no-progress',
+ ]
+
+ for option in default_options:
+ if option in available_options:
+ option = "--%s" % option
+ options.append(option)
+
+ if not global_command:
+ options.extend(['--working-dir', "'%s'" % module.params['working_dir']])
+
+ option_params = {
+ 'prefer_source': 'prefer-source',
+ 'prefer_dist': 'prefer-dist',
+ 'no_dev': 'no-dev',
+ 'no_scripts': 'no-scripts',
+ 'no_plugins': 'no-plugins',
+ 'apcu_autoloader': 'acpu-autoloader',
+ 'optimize_autoloader': 'optimize-autoloader',
+ 'classmap_authoritative': 'classmap-authoritative',
+ 'ignore_platform_reqs': 'ignore-platform-reqs',
+ }
+
+ for param, option in option_params.items():
+ if module.params.get(param) and option in available_options:
+ option = "--%s" % option
+ options.append(option)
+
+ if module.check_mode:
+ if 'dry-run' in available_options:
+ options.append('--dry-run')
+ else:
+ module.exit_json(skipped=True, msg="command '%s' does not support check mode, skipping" % command)
+
+ rc, out, err = composer_command(module, command, arguments, options, global_command)
+
+ if rc != 0:
+ output = parse_out(err)
+ module.fail_json(msg=output, stdout=err)
+ else:
+ # Composer version > 1.0.0-alpha9 now use stderr for standard notification messages
+ output = parse_out(out + err)
+ module.exit_json(changed=has_changed(output), msg=output, stdout=out + err)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/consul.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/consul.py
new file mode 100644
index 00000000..dd8a5f50
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/consul.py
@@ -0,0 +1,603 @@
+#!/usr/bin/python
+#
+# (c) 2015, Steve Gargan <steve.gargan@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: consul
+short_description: "Add, modify & delete services within a consul cluster."
+description:
+ - Registers services and checks for an agent with a consul cluster.
+ A service is some process running on the agent node that should be advertised by
+ consul's discovery mechanism. It may optionally supply a check definition,
+ a periodic service test to notify the consul cluster of service's health.
+ - "Checks may also be registered per node e.g. disk usage, or cpu usage and
+ notify the health of the entire node to the cluster.
+ Service level checks do not require a check name or id as these are derived
+ by Consul from the Service name and id respectively by appending 'service:'
+ Node level checks require a I(check_name) and optionally a I(check_id)."
+ - Currently, there is no complete way to retrieve the script, interval or ttl
+ metadata for a registered check. Without this metadata it is not possible to
+ tell if the data supplied with ansible represents a change to a check. As a
+ result this does not attempt to determine changes and will always report a
+ changed occurred. An API method is planned to supply this metadata so at that
+ stage change management will be added.
+ - "See U(http://consul.io) for more details."
+requirements:
+ - python-consul
+ - requests
+author: "Steve Gargan (@sgargan)"
+options:
+ state:
+ description:
+ - register or deregister the consul service, defaults to present
+ default: present
+ choices: ['present', 'absent']
+ service_name:
+ type: str
+ description:
+ - Unique name for the service on a node, must be unique per node,
+ required if registering a service. May be omitted if registering
+ a node level check
+ service_id:
+ type: str
+ description:
+ - the ID for the service, must be unique per node. If I(state=absent),
+ defaults to the service name if supplied.
+ host:
+ type: str
+ description:
+ - host of the consul agent defaults to localhost
+ default: localhost
+ port:
+ type: int
+ description:
+ - the port on which the consul agent is running
+ default: 8500
+ scheme:
+ type: str
+ description:
+ - the protocol scheme on which the consul agent is running
+ default: http
+ validate_certs:
+ description:
+ - whether to verify the TLS certificate of the consul agent
+ type: bool
+ default: 'yes'
+ notes:
+ type: str
+ description:
+ - Notes to attach to check when registering it.
+ service_port:
+ type: int
+ description:
+ - the port on which the service is listening. Can optionally be supplied for
+ registration of a service, i.e. if I(service_name) or I(service_id) is set
+ service_address:
+ type: str
+ description:
+ - the address to advertise that the service will be listening on.
+ This value will be passed as the I(address) parameter to Consul's
+ U(/v1/agent/service/register) API method, so refer to the Consul API
+ documentation for further details.
+ tags:
+ type: list
+ description:
+ - tags that will be attached to the service registration.
+ script:
+ type: str
+ description:
+ - the script/command that will be run periodically to check the health
+ of the service. Scripts require I(interval) and vice versa.
+ interval:
+ type: str
+ description:
+ - the interval at which the service check will be run. This is a number
+ with a s or m suffix to signify the units of seconds or minutes e.g
+ C(15s) or C(1m). If no suffix is supplied, m will be used by default e.g.
+ C(1) will be C(1m). Required if the I(script) parameter is specified.
+ check_id:
+ type: str
+ description:
+ - an ID for the service check. If I(state=absent), defaults to
+ I(check_name). Ignored if part of a service definition.
+ check_name:
+ type: str
+ description:
+ - a name for the service check. Required if standalone, ignored if
+ part of service definition.
+ ttl:
+ type: str
+ description:
+ - checks can be registered with a ttl instead of a I(script) and I(interval)
+ this means that the service will check in with the agent before the
+ ttl expires. If it doesn't the check will be considered failed.
+ Required if registering a check and the script an interval are missing
+ Similar to the interval this is a number with a s or m suffix to
+ signify the units of seconds or minutes e.g C(15s) or C(1m). If no suffix
+ is supplied, C(m) will be used by default e.g. C(1) will be C(1m)
+ tcp:
+ type: str
+ description:
+ - Checks can be registered with a TCP port. This means that consul
+ will check if the connection attempt to that port is successful (that is, the port is currently accepting connections).
+ The format is C(host:port), for example C(localhost:80).
+ I(interval) must also be provided with this option.
+ version_added: '1.3.0'
+ http:
+ type: str
+ description:
+ - checks can be registered with an HTTP endpoint. This means that consul
+ will check that the http endpoint returns a successful HTTP status.
+ I(interval) must also be provided with this option.
+ timeout:
+ type: str
+ description:
+ - A custom HTTP check timeout. The consul default is 10 seconds.
+ Similar to the interval this is a number with a C(s) or C(m) suffix to
+ signify the units of seconds or minutes, e.g. C(15s) or C(1m).
+ token:
+ type: str
+ description:
+ - the token key identifying an ACL rule set. May be required to register services.
+'''
+
+EXAMPLES = '''
+- name: Register nginx service with the local consul agent
+ community.general.consul:
+ service_name: nginx
+ service_port: 80
+
+- name: Register nginx service with curl check
+ community.general.consul:
+ service_name: nginx
+ service_port: 80
+ script: curl http://localhost
+ interval: 60s
+
+- name: register nginx with a tcp check
+ community.general.consul:
+ service_name: nginx
+ service_port: 80
+ interval: 60s
+ tcp: localhost:80
+
+- name: Register nginx with an http check
+ community.general.consul:
+ service_name: nginx
+ service_port: 80
+ interval: 60s
+ http: http://localhost:80/status
+
+- name: Register external service nginx available at 10.1.5.23
+ community.general.consul:
+ service_name: nginx
+ service_port: 80
+ service_address: 10.1.5.23
+
+- name: Register nginx with some service tags
+ community.general.consul:
+ service_name: nginx
+ service_port: 80
+ tags:
+ - prod
+ - webservers
+
+- name: Remove nginx service
+ community.general.consul:
+ service_name: nginx
+ state: absent
+
+- name: Register celery worker service
+ community.general.consul:
+ service_name: celery-worker
+ tags:
+ - prod
+ - worker
+
+- name: Create a node level check to test disk usage
+ community.general.consul:
+ check_name: Disk usage
+ check_id: disk_usage
+ script: /opt/disk_usage.py
+ interval: 5m
+
+- name: Register an http check against a service that's already registered
+ community.general.consul:
+ check_name: nginx-check2
+ check_id: nginx-check2
+ service_id: nginx
+ interval: 60s
+ http: http://localhost:80/morestatus
+'''
+
+try:
+ import consul
+ from requests.exceptions import ConnectionError
+
+ class PatchedConsulAgentService(consul.Consul.Agent.Service):
+ def deregister(self, service_id, token=None):
+ params = {}
+ if token:
+ params['token'] = token
+ return self.agent.http.put(consul.base.CB.bool(),
+ '/v1/agent/service/deregister/%s' % service_id,
+ params=params)
+
+ python_consul_installed = True
+except ImportError:
+ python_consul_installed = False
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+
+
+def register_with_consul(module):
+ state = module.params.get('state')
+
+ if state == 'present':
+ add(module)
+ else:
+ remove(module)
+
+
+def add(module):
+ ''' adds a service or a check depending on supplied configuration'''
+ check = parse_check(module)
+ service = parse_service(module)
+
+ if not service and not check:
+ module.fail_json(msg='a name and port are required to register a service')
+
+ if service:
+ if check:
+ service.add_check(check)
+ add_service(module, service)
+ elif check:
+ add_check(module, check)
+
+
+def remove(module):
+ ''' removes a service or a check '''
+ service_id = module.params.get('service_id') or module.params.get('service_name')
+ check_id = module.params.get('check_id') or module.params.get('check_name')
+ if not (service_id or check_id):
+ module.fail_json(msg='services and checks are removed by id or name. please supply a service id/name or a check id/name')
+ if service_id:
+ remove_service(module, service_id)
+ else:
+ remove_check(module, check_id)
+
+
+def add_check(module, check):
+ ''' registers a check with the given agent. currently there is no way
+ retrieve the full metadata of an existing check through the consul api.
+ Without this we can't compare to the supplied check and so we must assume
+ a change. '''
+ if not check.name and not check.service_id:
+ module.fail_json(msg='a check name is required for a node level check, one not attached to a service')
+
+ consul_api = get_consul_api(module)
+ check.register(consul_api)
+
+ module.exit_json(changed=True,
+ check_id=check.check_id,
+ check_name=check.name,
+ script=check.script,
+ interval=check.interval,
+ ttl=check.ttl,
+ tcp=check.tcp,
+ http=check.http,
+ timeout=check.timeout,
+ service_id=check.service_id)
+
+
+def remove_check(module, check_id):
+ ''' removes a check using its id '''
+ consul_api = get_consul_api(module)
+
+ if check_id in consul_api.agent.checks():
+ consul_api.agent.check.deregister(check_id)
+ module.exit_json(changed=True, id=check_id)
+
+ module.exit_json(changed=False, id=check_id)
+
+
+def add_service(module, service):
+ ''' registers a service with the current agent '''
+ result = service
+ changed = False
+
+ consul_api = get_consul_api(module)
+ existing = get_service_by_id_or_name(consul_api, service.id)
+
+ # there is no way to retrieve the details of checks so if a check is present
+ # in the service it must be re-registered
+ if service.has_checks() or not existing or not existing == service:
+
+ service.register(consul_api)
+ # check that it registered correctly
+ registered = get_service_by_id_or_name(consul_api, service.id)
+ if registered:
+ result = registered
+ changed = True
+
+ module.exit_json(changed=changed,
+ service_id=result.id,
+ service_name=result.name,
+ service_port=result.port,
+ checks=[check.to_dict() for check in service.checks],
+ tags=result.tags)
+
+
+def remove_service(module, service_id):
+ ''' deregister a service from the given agent using its service id '''
+ consul_api = get_consul_api(module)
+ service = get_service_by_id_or_name(consul_api, service_id)
+ if service:
+ consul_api.agent.service.deregister(service_id, token=module.params.get('token'))
+ module.exit_json(changed=True, id=service_id)
+
+ module.exit_json(changed=False, id=service_id)
+
+
+def get_consul_api(module, token=None):
+ consulClient = consul.Consul(host=module.params.get('host'),
+ port=module.params.get('port'),
+ scheme=module.params.get('scheme'),
+ verify=module.params.get('validate_certs'),
+ token=module.params.get('token'))
+ consulClient.agent.service = PatchedConsulAgentService(consulClient)
+ return consulClient
+
+
+def get_service_by_id_or_name(consul_api, service_id_or_name):
+ ''' iterate the registered services and find one with the given id '''
+ for name, service in consul_api.agent.services().items():
+ if service['ID'] == service_id_or_name or service['Service'] == service_id_or_name:
+ return ConsulService(loaded=service)
+
+
+def parse_check(module):
+ if len([p for p in (module.params.get('script'), module.params.get('ttl'), module.params.get('tcp'), module.params.get('http')) if p]) > 1:
+ module.fail_json(
+ msg='checks are either script, tcp, http or ttl driven, supplying more than one does not make sense')
+
+ if module.params.get('check_id') or module.params.get('script') or module.params.get('ttl') or module.params.get('tcp') or module.params.get('http'):
+
+ return ConsulCheck(
+ module.params.get('check_id'),
+ module.params.get('check_name'),
+ module.params.get('check_node'),
+ module.params.get('check_host'),
+ module.params.get('script'),
+ module.params.get('interval'),
+ module.params.get('ttl'),
+ module.params.get('notes'),
+ module.params.get('tcp'),
+ module.params.get('http'),
+ module.params.get('timeout'),
+ module.params.get('service_id'),
+ )
+
+
+def parse_service(module):
+ if module.params.get('service_name'):
+ return ConsulService(
+ module.params.get('service_id'),
+ module.params.get('service_name'),
+ module.params.get('service_address'),
+ module.params.get('service_port'),
+ module.params.get('tags'),
+ )
+ elif not module.params.get('service_name'):
+ module.fail_json(msg="service_name is required to configure a service.")
+
+
+class ConsulService():
+
+ def __init__(self, service_id=None, name=None, address=None, port=-1,
+ tags=None, loaded=None):
+ self.id = self.name = name
+ if service_id:
+ self.id = service_id
+ self.address = address
+ self.port = port
+ self.tags = tags
+ self.checks = []
+ if loaded:
+ self.id = loaded['ID']
+ self.name = loaded['Service']
+ self.port = loaded['Port']
+ self.tags = loaded['Tags']
+
+ def register(self, consul_api):
+ optional = {}
+
+ if self.port:
+ optional['port'] = self.port
+
+ if len(self.checks) > 0:
+ optional['check'] = self.checks[0].check
+
+ consul_api.agent.service.register(
+ self.name,
+ service_id=self.id,
+ address=self.address,
+ tags=self.tags,
+ **optional)
+
+ def add_check(self, check):
+ self.checks.append(check)
+
+ def checks(self):
+ return self.checks
+
+ def has_checks(self):
+ return len(self.checks) > 0
+
+ def __eq__(self, other):
+ return (isinstance(other, self.__class__) and
+ self.id == other.id and
+ self.name == other.name and
+ self.port == other.port and
+ self.tags == other.tags)
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def to_dict(self):
+ data = {'id': self.id, "name": self.name}
+ if self.port:
+ data['port'] = self.port
+ if self.tags and len(self.tags) > 0:
+ data['tags'] = self.tags
+ if len(self.checks) > 0:
+ data['check'] = self.checks[0].to_dict()
+ return data
+
+
+class ConsulCheck(object):
+
+ def __init__(self, check_id, name, node=None, host='localhost',
+ script=None, interval=None, ttl=None, notes=None, tcp=None, http=None, timeout=None, service_id=None):
+ self.check_id = self.name = name
+ if check_id:
+ self.check_id = check_id
+ self.service_id = service_id
+ self.notes = notes
+ self.node = node
+ self.host = host
+
+ self.interval = self.validate_duration('interval', interval)
+ self.ttl = self.validate_duration('ttl', ttl)
+ self.script = script
+ self.tcp = tcp
+ self.http = http
+ self.timeout = self.validate_duration('timeout', timeout)
+
+ self.check = None
+
+ if script:
+ self.check = consul.Check.script(script, self.interval)
+
+ if ttl:
+ self.check = consul.Check.ttl(self.ttl)
+
+ if http:
+ if interval is None:
+ raise Exception('http check must specify interval')
+
+ self.check = consul.Check.http(http, self.interval, self.timeout)
+
+ if tcp:
+ if interval is None:
+ raise Exception('tcp check must specify interval')
+
+ regex = r"(?P<host>.*)(?::)(?P<port>(?:[0-9]+))$"
+ match = re.match(regex, tcp)
+
+ if match is None:
+ raise Exception('tcp check must be in host:port format')
+
+ self.check = consul.Check.tcp(match.group('host').strip('[]'), int(match.group('port')), self.interval)
+
+ def validate_duration(self, name, duration):
+ if duration:
+ duration_units = ['ns', 'us', 'ms', 's', 'm', 'h']
+ if not any((duration.endswith(suffix) for suffix in duration_units)):
+ duration = "{0}s".format(duration)
+ return duration
+
+ def register(self, consul_api):
+ consul_api.agent.check.register(self.name, check_id=self.check_id, service_id=self.service_id,
+ notes=self.notes,
+ check=self.check)
+
+ def __eq__(self, other):
+ return (isinstance(other, self.__class__) and
+ self.check_id == other.check_id and
+ self.service_id == other.service_id and
+ self.name == other.name and
+ self.script == other.script and
+ self.interval == other.interval)
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def to_dict(self):
+ data = {}
+ self._add(data, 'id', attr='check_id')
+ self._add(data, 'name', attr='check_name')
+ self._add(data, 'script')
+ self._add(data, 'node')
+ self._add(data, 'notes')
+ self._add(data, 'host')
+ self._add(data, 'interval')
+ self._add(data, 'ttl')
+ self._add(data, 'tcp')
+ self._add(data, 'http')
+ self._add(data, 'timeout')
+ self._add(data, 'service_id')
+ return data
+
+ def _add(self, data, key, attr=None):
+ try:
+ if attr is None:
+ attr = key
+ data[key] = getattr(self, attr)
+ except Exception:
+ pass
+
+
+def test_dependencies(module):
+ if not python_consul_installed:
+ module.fail_json(msg="python-consul required for this module. see https://python-consul.readthedocs.io/en/latest/#installation")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(default='localhost'),
+ port=dict(default=8500, type='int'),
+ scheme=dict(required=False, default='http'),
+ validate_certs=dict(required=False, default=True, type='bool'),
+ check_id=dict(required=False),
+ check_name=dict(required=False),
+ check_node=dict(required=False),
+ check_host=dict(required=False),
+ notes=dict(required=False),
+ script=dict(required=False),
+ service_id=dict(required=False),
+ service_name=dict(required=False),
+ service_address=dict(required=False, type='str', default=None),
+ service_port=dict(required=False, type='int', default=None),
+ state=dict(default='present', choices=['present', 'absent']),
+ interval=dict(required=False, type='str'),
+ ttl=dict(required=False, type='str'),
+ tcp=dict(required=False, type='str'),
+ http=dict(required=False, type='str'),
+ timeout=dict(required=False, type='str'),
+ tags=dict(required=False, type='list'),
+ token=dict(required=False, no_log=True)
+ ),
+ supports_check_mode=False,
+ )
+
+ test_dependencies(module)
+
+ try:
+ register_with_consul(module)
+ except ConnectionError as e:
+ module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
+ module.params.get('host'), module.params.get('port'), str(e)))
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/consul_acl.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/consul_acl.py
new file mode 100644
index 00000000..06feeea1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/consul_acl.py
@@ -0,0 +1,657 @@
+#!/usr/bin/python
+#
+# (c) 2015, Steve Gargan <steve.gargan@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: consul_acl
+short_description: Manipulate Consul ACL keys and rules
+description:
+ - Allows the addition, modification and deletion of ACL keys and associated
+ rules in a consul cluster via the agent. For more details on using and
+ configuring ACLs, see https://www.consul.io/docs/guides/acl.html.
+author:
+ - Steve Gargan (@sgargan)
+ - Colin Nolan (@colin-nolan)
+options:
+ mgmt_token:
+ description:
+ - a management token is required to manipulate the acl lists
+ required: true
+ state:
+ description:
+ - whether the ACL pair should be present or absent
+ required: false
+ choices: ['present', 'absent']
+ default: present
+ token_type:
+ description:
+ - the type of token that should be created
+ choices: ['client', 'management']
+ default: client
+ name:
+ description:
+ - the name that should be associated with the acl key, this is opaque
+ to Consul
+ required: false
+ token:
+ description:
+ - the token key identifying an ACL rule set. If generated by consul
+ this will be a UUID
+ required: false
+ rules:
+ type: list
+ description:
+ - rules that should be associated with a given token
+ required: false
+ host:
+ description:
+ - host of the consul agent defaults to localhost
+ required: false
+ default: localhost
+ port:
+ type: int
+ description:
+ - the port on which the consul agent is running
+ required: false
+ default: 8500
+ scheme:
+ description:
+ - the protocol scheme on which the consul agent is running
+ required: false
+ default: http
+ validate_certs:
+ type: bool
+ description:
+ - whether to verify the tls certificate of the consul agent
+ required: false
+ default: True
+requirements:
+ - python-consul
+ - pyhcl
+ - requests
+'''
+
+EXAMPLES = """
+- name: Create an ACL with rules
+ community.general.consul_acl:
+ host: consul1.example.com
+ mgmt_token: some_management_acl
+ name: Foo access
+ rules:
+ - key: "foo"
+ policy: read
+ - key: "private/foo"
+ policy: deny
+
+- name: Create an ACL with a specific token
+ community.general.consul_acl:
+ host: consul1.example.com
+ mgmt_token: some_management_acl
+ name: Foo access
+ token: my-token
+ rules:
+ - key: "foo"
+ policy: read
+
+- name: Update the rules associated to an ACL token
+ community.general.consul_acl:
+ host: consul1.example.com
+ mgmt_token: some_management_acl
+ name: Foo access
+ token: some_client_token
+ rules:
+ - event: "bbq"
+ policy: write
+ - key: "foo"
+ policy: read
+ - key: "private"
+ policy: deny
+ - keyring: write
+ - node: "hgs4"
+ policy: write
+ - operator: read
+ - query: ""
+ policy: write
+ - service: "consul"
+ policy: write
+ - session: "standup"
+ policy: write
+
+- name: Remove a token
+ community.general.consul_acl:
+ host: consul1.example.com
+ mgmt_token: some_management_acl
+ token: 172bd5c8-9fe9-11e4-b1b0-3c15c2c9fd5e
+ state: absent
+"""
+
+RETURN = """
+token:
+ description: the token associated to the ACL (the ACL's ID)
+ returned: success
+ type: str
+ sample: a2ec332f-04cf-6fba-e8b8-acf62444d3da
+rules:
+ description: the HCL JSON representation of the rules associated to the ACL, in the format described in the
+ Consul documentation (https://www.consul.io/docs/guides/acl.html#rule-specification).
+ returned: I(status) == "present"
+ type: str
+ sample: {
+ "key": {
+ "foo": {
+ "policy": "write"
+ },
+ "bar": {
+ "policy": "deny"
+ }
+ }
+ }
+operation:
+ description: the operation performed on the ACL
+ returned: changed
+ type: str
+ sample: update
+"""
+
+
+try:
+ import consul
+ python_consul_installed = True
+except ImportError:
+ python_consul_installed = False
+
+try:
+ import hcl
+ pyhcl_installed = True
+except ImportError:
+ pyhcl_installed = False
+
+try:
+ from requests.exceptions import ConnectionError
+ has_requests = True
+except ImportError:
+ has_requests = False
+
+from collections import defaultdict
+from ansible.module_utils.basic import to_text, AnsibleModule
+
+
+RULE_SCOPES = ["agent", "event", "key", "keyring", "node", "operator", "query", "service", "session"]
+
+MANAGEMENT_PARAMETER_NAME = "mgmt_token"
+HOST_PARAMETER_NAME = "host"
+SCHEME_PARAMETER_NAME = "scheme"
+VALIDATE_CERTS_PARAMETER_NAME = "validate_certs"
+NAME_PARAMETER_NAME = "name"
+PORT_PARAMETER_NAME = "port"
+RULES_PARAMETER_NAME = "rules"
+STATE_PARAMETER_NAME = "state"
+TOKEN_PARAMETER_NAME = "token"
+TOKEN_TYPE_PARAMETER_NAME = "token_type"
+
+PRESENT_STATE_VALUE = "present"
+ABSENT_STATE_VALUE = "absent"
+
+CLIENT_TOKEN_TYPE_VALUE = "client"
+MANAGEMENT_TOKEN_TYPE_VALUE = "management"
+
+REMOVE_OPERATION = "remove"
+UPDATE_OPERATION = "update"
+CREATE_OPERATION = "create"
+
+_POLICY_JSON_PROPERTY = "policy"
+_RULES_JSON_PROPERTY = "Rules"
+_TOKEN_JSON_PROPERTY = "ID"
+_TOKEN_TYPE_JSON_PROPERTY = "Type"
+_NAME_JSON_PROPERTY = "Name"
+_POLICY_YML_PROPERTY = "policy"
+_POLICY_HCL_PROPERTY = "policy"
+
+_ARGUMENT_SPEC = {
+ MANAGEMENT_PARAMETER_NAME: dict(required=True, no_log=True),
+ HOST_PARAMETER_NAME: dict(default='localhost'),
+ SCHEME_PARAMETER_NAME: dict(required=False, default='http'),
+ VALIDATE_CERTS_PARAMETER_NAME: dict(required=False, type='bool', default=True),
+ NAME_PARAMETER_NAME: dict(required=False),
+ PORT_PARAMETER_NAME: dict(default=8500, type='int'),
+ RULES_PARAMETER_NAME: dict(default=None, required=False, type='list'),
+ STATE_PARAMETER_NAME: dict(default=PRESENT_STATE_VALUE, choices=[PRESENT_STATE_VALUE, ABSENT_STATE_VALUE]),
+ TOKEN_PARAMETER_NAME: dict(required=False),
+ TOKEN_TYPE_PARAMETER_NAME: dict(required=False, choices=[CLIENT_TOKEN_TYPE_VALUE, MANAGEMENT_TOKEN_TYPE_VALUE],
+ default=CLIENT_TOKEN_TYPE_VALUE)
+}
+
+
+def set_acl(consul_client, configuration):
+ """
+ Sets an ACL based on the given configuration.
+ :param consul_client: the consul client
+ :param configuration: the run configuration
+ :return: the output of setting the ACL
+ """
+ acls_as_json = decode_acls_as_json(consul_client.acl.list())
+ existing_acls_mapped_by_name = dict((acl.name, acl) for acl in acls_as_json if acl.name is not None)
+ existing_acls_mapped_by_token = dict((acl.token, acl) for acl in acls_as_json)
+ if None in existing_acls_mapped_by_token:
+ raise AssertionError("expecting ACL list to be associated to a token: %s" %
+ existing_acls_mapped_by_token[None])
+
+ if configuration.token is None and configuration.name and configuration.name in existing_acls_mapped_by_name:
+ # No token but name given so can get token from name
+ configuration.token = existing_acls_mapped_by_name[configuration.name].token
+
+ if configuration.token and configuration.token in existing_acls_mapped_by_token:
+ return update_acl(consul_client, configuration)
+ else:
+ if configuration.token in existing_acls_mapped_by_token:
+ raise AssertionError()
+ if configuration.name in existing_acls_mapped_by_name:
+ raise AssertionError()
+ return create_acl(consul_client, configuration)
+
+
+def update_acl(consul_client, configuration):
+ """
+ Updates an ACL.
+ :param consul_client: the consul client
+ :param configuration: the run configuration
+ :return: the output of the update
+ """
+ existing_acl = load_acl_with_token(consul_client, configuration.token)
+ changed = existing_acl.rules != configuration.rules
+
+ if changed:
+ name = configuration.name if configuration.name is not None else existing_acl.name
+ rules_as_hcl = encode_rules_as_hcl_string(configuration.rules)
+ updated_token = consul_client.acl.update(
+ configuration.token, name=name, type=configuration.token_type, rules=rules_as_hcl)
+ if updated_token != configuration.token:
+ raise AssertionError()
+
+ return Output(changed=changed, token=configuration.token, rules=configuration.rules, operation=UPDATE_OPERATION)
+
+
+def create_acl(consul_client, configuration):
+ """
+ Creates an ACL.
+ :param consul_client: the consul client
+ :param configuration: the run configuration
+ :return: the output of the creation
+ """
+ rules_as_hcl = encode_rules_as_hcl_string(configuration.rules) if len(configuration.rules) > 0 else None
+ token = consul_client.acl.create(
+ name=configuration.name, type=configuration.token_type, rules=rules_as_hcl, acl_id=configuration.token)
+ rules = configuration.rules
+ return Output(changed=True, token=token, rules=rules, operation=CREATE_OPERATION)
+
+
+def remove_acl(consul, configuration):
+ """
+ Removes an ACL.
+ :param consul: the consul client
+ :param configuration: the run configuration
+ :return: the output of the removal
+ """
+ token = configuration.token
+ changed = consul.acl.info(token) is not None
+ if changed:
+ consul.acl.destroy(token)
+ return Output(changed=changed, token=token, operation=REMOVE_OPERATION)
+
+
+def load_acl_with_token(consul, token):
+ """
+ Loads the ACL with the given token (token == rule ID).
+ :param consul: the consul client
+ :param token: the ACL "token"/ID (not name)
+ :return: the ACL associated to the given token
+ :exception ConsulACLTokenNotFoundException: raised if the given token does not exist
+ """
+ acl_as_json = consul.acl.info(token)
+ if acl_as_json is None:
+ raise ConsulACLNotFoundException(token)
+ return decode_acl_as_json(acl_as_json)
+
+
+def encode_rules_as_hcl_string(rules):
+ """
+ Converts the given rules into the equivalent HCL (string) representation.
+ :param rules: the rules
+ :return: the equivalent HCL (string) representation of the rules. Will be None if there is no rules (see internal
+ note for justification)
+ """
+ if len(rules) == 0:
+ # Note: empty string is not valid HCL according to `hcl.load` however, the ACL `Rule` property will be an empty
+ # string if there is no rules...
+ return None
+ rules_as_hcl = ""
+ for rule in rules:
+ rules_as_hcl += encode_rule_as_hcl_string(rule)
+ return rules_as_hcl
+
+
+def encode_rule_as_hcl_string(rule):
+ """
+ Converts the given rule into the equivalent HCL (string) representation.
+ :param rule: the rule
+ :return: the equivalent HCL (string) representation of the rule
+ """
+ if rule.pattern is not None:
+ return '%s "%s" {\n %s = "%s"\n}\n' % (rule.scope, rule.pattern, _POLICY_HCL_PROPERTY, rule.policy)
+ else:
+ return '%s = "%s"\n' % (rule.scope, rule.policy)
+
+
+def decode_rules_as_hcl_string(rules_as_hcl):
+ """
+ Converts the given HCL (string) representation of rules into a list of rule domain models.
+ :param rules_as_hcl: the HCL (string) representation of a collection of rules
+ :return: the equivalent domain model to the given rules
+ """
+ rules_as_hcl = to_text(rules_as_hcl)
+ rules_as_json = hcl.loads(rules_as_hcl)
+ return decode_rules_as_json(rules_as_json)
+
+
+def decode_rules_as_json(rules_as_json):
+ """
+ Converts the given JSON representation of rules into a list of rule domain models.
+ :param rules_as_json: the JSON representation of a collection of rules
+ :return: the equivalent domain model to the given rules
+ """
+ rules = RuleCollection()
+ for scope in rules_as_json:
+ if not isinstance(rules_as_json[scope], dict):
+ rules.add(Rule(scope, rules_as_json[scope]))
+ else:
+ for pattern, policy in rules_as_json[scope].items():
+ rules.add(Rule(scope, policy[_POLICY_JSON_PROPERTY], pattern))
+ return rules
+
+
+def encode_rules_as_json(rules):
+ """
+ Converts the given rules into the equivalent JSON representation according to the documentation:
+ https://www.consul.io/docs/guides/acl.html#rule-specification.
+ :param rules: the rules
+ :return: JSON representation of the given rules
+ """
+ rules_as_json = defaultdict(dict)
+ for rule in rules:
+ if rule.pattern is not None:
+ if rule.pattern in rules_as_json[rule.scope]:
+ raise AssertionError()
+ rules_as_json[rule.scope][rule.pattern] = {
+ _POLICY_JSON_PROPERTY: rule.policy
+ }
+ else:
+ if rule.scope in rules_as_json:
+ raise AssertionError()
+ rules_as_json[rule.scope] = rule.policy
+ return rules_as_json
+
+
+def decode_rules_as_yml(rules_as_yml):
+ """
+ Converts the given YAML representation of rules into a list of rule domain models.
+ :param rules_as_yml: the YAML representation of a collection of rules
+ :return: the equivalent domain model to the given rules
+ """
+ rules = RuleCollection()
+ if rules_as_yml:
+ for rule_as_yml in rules_as_yml:
+ rule_added = False
+ for scope in RULE_SCOPES:
+ if scope in rule_as_yml:
+ if rule_as_yml[scope] is None:
+ raise ValueError("Rule for '%s' does not have a value associated to the scope" % scope)
+ policy = rule_as_yml[_POLICY_YML_PROPERTY] if _POLICY_YML_PROPERTY in rule_as_yml \
+ else rule_as_yml[scope]
+ pattern = rule_as_yml[scope] if _POLICY_YML_PROPERTY in rule_as_yml else None
+ rules.add(Rule(scope, policy, pattern))
+ rule_added = True
+ break
+ if not rule_added:
+ raise ValueError("A rule requires one of %s and a policy." % ('/'.join(RULE_SCOPES)))
+ return rules
+
+
+def decode_acl_as_json(acl_as_json):
+ """
+ Converts the given JSON representation of an ACL into the equivalent domain model.
+ :param acl_as_json: the JSON representation of an ACL
+ :return: the equivalent domain model to the given ACL
+ """
+ rules_as_hcl = acl_as_json[_RULES_JSON_PROPERTY]
+ rules = decode_rules_as_hcl_string(acl_as_json[_RULES_JSON_PROPERTY]) if rules_as_hcl.strip() != "" \
+ else RuleCollection()
+ return ACL(
+ rules=rules,
+ token_type=acl_as_json[_TOKEN_TYPE_JSON_PROPERTY],
+ token=acl_as_json[_TOKEN_JSON_PROPERTY],
+ name=acl_as_json[_NAME_JSON_PROPERTY]
+ )
+
+
+def decode_acls_as_json(acls_as_json):
+ """
+ Converts the given JSON representation of ACLs into a list of ACL domain models.
+ :param acls_as_json: the JSON representation of a collection of ACLs
+ :return: list of equivalent domain models for the given ACLs (order not guaranteed to be the same)
+ """
+ return [decode_acl_as_json(acl_as_json) for acl_as_json in acls_as_json]
+
+
+class ConsulACLNotFoundException(Exception):
+ """
+ Exception raised if an ACL with is not found.
+ """
+
+
+class Configuration:
+ """
+ Configuration for this module.
+ """
+
+ def __init__(self, management_token=None, host=None, scheme=None, validate_certs=None, name=None, port=None,
+ rules=None, state=None, token=None, token_type=None):
+ self.management_token = management_token # type: str
+ self.host = host # type: str
+ self.scheme = scheme # type: str
+ self.validate_certs = validate_certs # type: bool
+ self.name = name # type: str
+ self.port = port # type: int
+ self.rules = rules # type: RuleCollection
+ self.state = state # type: str
+ self.token = token # type: str
+ self.token_type = token_type # type: str
+
+
+class Output:
+ """
+ Output of an action of this module.
+ """
+
+ def __init__(self, changed=None, token=None, rules=None, operation=None):
+ self.changed = changed # type: bool
+ self.token = token # type: str
+ self.rules = rules # type: RuleCollection
+ self.operation = operation # type: str
+
+
+class ACL:
+ """
+ Consul ACL. See: https://www.consul.io/docs/guides/acl.html.
+ """
+
+ def __init__(self, rules, token_type, token, name):
+ self.rules = rules
+ self.token_type = token_type
+ self.token = token
+ self.name = name
+
+ def __eq__(self, other):
+ return other \
+ and isinstance(other, self.__class__) \
+ and self.rules == other.rules \
+ and self.token_type == other.token_type \
+ and self.token == other.token \
+ and self.name == other.name
+
+ def __hash__(self):
+ return hash(self.rules) ^ hash(self.token_type) ^ hash(self.token) ^ hash(self.name)
+
+
+class Rule:
+ """
+ ACL rule. See: https://www.consul.io/docs/guides/acl.html#acl-rules-and-scope.
+ """
+
+ def __init__(self, scope, policy, pattern=None):
+ self.scope = scope
+ self.policy = policy
+ self.pattern = pattern
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) \
+ and self.scope == other.scope \
+ and self.policy == other.policy \
+ and self.pattern == other.pattern
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __hash__(self):
+ return (hash(self.scope) ^ hash(self.policy)) ^ hash(self.pattern)
+
+ def __str__(self):
+ return encode_rule_as_hcl_string(self)
+
+
+class RuleCollection:
+ """
+ Collection of ACL rules, which are part of a Consul ACL.
+ """
+
+ def __init__(self):
+ self._rules = {}
+ for scope in RULE_SCOPES:
+ self._rules[scope] = {}
+
+ def __iter__(self):
+ all_rules = []
+ for scope, pattern_keyed_rules in self._rules.items():
+ for pattern, rule in pattern_keyed_rules.items():
+ all_rules.append(rule)
+ return iter(all_rules)
+
+ def __len__(self):
+ count = 0
+ for scope in RULE_SCOPES:
+ count += len(self._rules[scope])
+ return count
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) \
+ and set(self) == set(other)
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __str__(self):
+ return encode_rules_as_hcl_string(self)
+
+ def add(self, rule):
+ """
+ Adds the given rule to this collection.
+ :param rule: model of a rule
+ :raises ValueError: raised if there already exists a rule for a given scope and pattern
+ """
+ if rule.pattern in self._rules[rule.scope]:
+ patten_info = " and pattern '%s'" % rule.pattern if rule.pattern is not None else ""
+ raise ValueError("Duplicate rule for scope '%s'%s" % (rule.scope, patten_info))
+ self._rules[rule.scope][rule.pattern] = rule
+
+
+def get_consul_client(configuration):
+ """
+ Gets a Consul client for the given configuration.
+
+ Does not check if the Consul client can connect.
+ :param configuration: the run configuration
+ :return: Consul client
+ """
+ token = configuration.management_token
+ if token is None:
+ token = configuration.token
+ if token is None:
+ raise AssertionError("Expecting the management token to always be set")
+ return consul.Consul(host=configuration.host, port=configuration.port, scheme=configuration.scheme,
+ verify=configuration.validate_certs, token=token)
+
+
+def check_dependencies():
+ """
+ Checks that the required dependencies have been imported.
+ :exception ImportError: if it is detected that any of the required dependencies have not been imported
+ """
+ if not python_consul_installed:
+ raise ImportError("python-consul required for this module. "
+ "See: https://python-consul.readthedocs.io/en/latest/#installation")
+
+ if not pyhcl_installed:
+ raise ImportError("pyhcl required for this module. "
+ "See: https://pypi.org/project/pyhcl/")
+
+ if not has_requests:
+ raise ImportError("requests required for this module. See https://pypi.org/project/requests/")
+
+
+def main():
+ """
+ Main method.
+ """
+ module = AnsibleModule(_ARGUMENT_SPEC, supports_check_mode=False)
+
+ try:
+ check_dependencies()
+ except ImportError as e:
+ module.fail_json(msg=str(e))
+
+ configuration = Configuration(
+ management_token=module.params.get(MANAGEMENT_PARAMETER_NAME),
+ host=module.params.get(HOST_PARAMETER_NAME),
+ scheme=module.params.get(SCHEME_PARAMETER_NAME),
+ validate_certs=module.params.get(VALIDATE_CERTS_PARAMETER_NAME),
+ name=module.params.get(NAME_PARAMETER_NAME),
+ port=module.params.get(PORT_PARAMETER_NAME),
+ rules=decode_rules_as_yml(module.params.get(RULES_PARAMETER_NAME)),
+ state=module.params.get(STATE_PARAMETER_NAME),
+ token=module.params.get(TOKEN_PARAMETER_NAME),
+ token_type=module.params.get(TOKEN_TYPE_PARAMETER_NAME)
+ )
+ consul_client = get_consul_client(configuration)
+
+ try:
+ if configuration.state == PRESENT_STATE_VALUE:
+ output = set_acl(consul_client, configuration)
+ else:
+ output = remove_acl(consul_client, configuration)
+ except ConnectionError as e:
+ module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
+ configuration.host, configuration.port, str(e)))
+ raise
+
+ return_values = dict(changed=output.changed, token=output.token, operation=output.operation)
+ if output.rules is not None:
+ return_values["rules"] = encode_rules_as_json(output.rules)
+ module.exit_json(**return_values)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/consul_kv.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/consul_kv.py
new file mode 100644
index 00000000..ee5c3970
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/consul_kv.py
@@ -0,0 +1,326 @@
+#!/usr/bin/python
+#
+# (c) 2015, Steve Gargan <steve.gargan@gmail.com>
+# (c) 2018 Genome Research Ltd.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: consul_kv
+short_description: Manipulate entries in the key/value store of a consul cluster
+description:
+ - Allows the retrieval, addition, modification and deletion of key/value entries in a
+ consul cluster via the agent. The entire contents of the record, including
+ the indices, flags and session are returned as C(value).
+ - If the C(key) represents a prefix then note that when a value is removed, the existing
+ value if any is returned as part of the results.
+ - See http://www.consul.io/docs/agent/http.html#kv for more details.
+requirements:
+ - python-consul
+ - requests
+author:
+ - Steve Gargan (@sgargan)
+ - Colin Nolan (@colin-nolan)
+options:
+ state:
+ description:
+ - The action to take with the supplied key and value. If the state is 'present' and `value` is set, the key
+ contents will be set to the value supplied and `changed` will be set to `true` only if the value was
+ different to the current contents. If the state is 'present' and `value` is not set, the existing value
+ associated to the key will be returned. The state 'absent' will remove the key/value pair,
+ again 'changed' will be set to true only if the key actually existed
+ prior to the removal. An attempt can be made to obtain or free the
+ lock associated with a key/value pair with the states 'acquire' or
+ 'release' respectively. a valid session must be supplied to make the
+ attempt changed will be true if the attempt is successful, false
+ otherwise.
+ choices: [ absent, acquire, present, release ]
+ default: present
+ key:
+ description:
+ - The key at which the value should be stored.
+ type: str
+ required: yes
+ value:
+ description:
+ - The value should be associated with the given key, required if C(state)
+ is C(present).
+ type: str
+ recurse:
+ description:
+ - If the key represents a prefix, each entry with the prefix can be
+ retrieved by setting this to C(yes).
+ type: bool
+ retrieve:
+ description:
+ - If the I(state) is C(present) and I(value) is set, perform a
+ read after setting the value and return this value.
+ default: True
+ type: bool
+ session:
+ description:
+ - The session that should be used to acquire or release a lock
+ associated with a key/value pair.
+ type: str
+ token:
+ description:
+ - The token key identifying an ACL rule set that controls access to
+ the key value pair
+ type: str
+ cas:
+ description:
+ - Used when acquiring a lock with a session. If the C(cas) is C(0), then
+ Consul will only put the key if it does not already exist. If the
+ C(cas) value is non-zero, then the key is only set if the index matches
+ the ModifyIndex of that key.
+ type: str
+ flags:
+ description:
+ - Opaque positive integer value that can be passed when setting a value.
+ type: str
+ host:
+ description:
+ - Host of the consul agent.
+ type: str
+ default: localhost
+ port:
+ description:
+ - The port on which the consul agent is running.
+ type: int
+ default: 8500
+ scheme:
+ description:
+ - The protocol scheme on which the consul agent is running.
+ type: str
+ default: http
+ validate_certs:
+ description:
+ - Whether to verify the tls certificate of the consul agent.
+ type: bool
+ default: 'yes'
+'''
+
+
+EXAMPLES = '''
+# If the key does not exist, the value associated to the "data" property in `retrieved_key` will be `None`
+# If the key value is empty string, `retrieved_key["data"]["Value"]` will be `None`
+- name: Retrieve a value from the key/value store
+ community.general.consul_kv:
+ key: somekey
+ register: retrieved_key
+
+- name: Add or update the value associated with a key in the key/value store
+ community.general.consul_kv:
+ key: somekey
+ value: somevalue
+
+- name: Remove a key from the store
+ community.general.consul_kv:
+ key: somekey
+ state: absent
+
+- name: Add a node to an arbitrary group via consul inventory (see consul.ini)
+ community.general.consul_kv:
+ key: ansible/groups/dc1/somenode
+ value: top_secret
+
+- name: Register a key/value pair with an associated session
+ community.general.consul_kv:
+ key: stg/node/server_birthday
+ value: 20160509
+ session: "{{ sessionid }}"
+ state: acquire
+'''
+
+from ansible.module_utils._text import to_text
+
+try:
+ import consul
+ from requests.exceptions import ConnectionError
+ python_consul_installed = True
+except ImportError:
+ python_consul_installed = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+# Note: although the python-consul documentation implies that using a key with a value of `None` with `put` has a
+# special meaning (https://python-consul.readthedocs.io/en/latest/#consul-kv), if not set in the subsequently API call,
+# the value just defaults to an empty string (https://www.consul.io/api/kv.html#create-update-key)
+NOT_SET = None
+
+
+def _has_value_changed(consul_client, key, target_value):
+ """
+ Uses the given Consul client to determine if the value associated to the given key is different to the given target
+ value.
+ :param consul_client: Consul connected client
+ :param key: key in Consul
+ :param target_value: value to be associated to the key
+ :return: tuple where the first element is the value of the "X-Consul-Index" header and the second is `True` if the
+ value has changed (i.e. the stored value is not the target value)
+ """
+ index, existing = consul_client.kv.get(key)
+ if not existing:
+ return index, True
+ try:
+ changed = to_text(existing['Value'], errors='surrogate_or_strict') != target_value
+ return index, changed
+ except UnicodeError:
+ # Existing value was not decodable but all values we set are valid utf-8
+ return index, True
+
+
+def execute(module):
+ state = module.params.get('state')
+
+ if state == 'acquire' or state == 'release':
+ lock(module, state)
+ elif state == 'present':
+ if module.params.get('value') is NOT_SET:
+ get_value(module)
+ else:
+ set_value(module)
+ elif state == 'absent':
+ remove_value(module)
+ else:
+ module.exit_json(msg="Unsupported state: %s" % (state, ))
+
+
+def lock(module, state):
+
+ consul_api = get_consul_api(module)
+
+ session = module.params.get('session')
+ key = module.params.get('key')
+ value = module.params.get('value')
+
+ if not session:
+ module.fail(
+ msg='%s of lock for %s requested but no session supplied' %
+ (state, key))
+
+ index, changed = _has_value_changed(consul_api, key, value)
+
+ if changed and not module.check_mode:
+ if state == 'acquire':
+ changed = consul_api.kv.put(key, value,
+ cas=module.params.get('cas'),
+ acquire=session,
+ flags=module.params.get('flags'))
+ else:
+ changed = consul_api.kv.put(key, value,
+ cas=module.params.get('cas'),
+ release=session,
+ flags=module.params.get('flags'))
+
+ module.exit_json(changed=changed,
+ index=index,
+ key=key)
+
+
+def get_value(module):
+ consul_api = get_consul_api(module)
+ key = module.params.get('key')
+
+ index, existing_value = consul_api.kv.get(key, recurse=module.params.get('recurse'))
+
+ module.exit_json(changed=False, index=index, data=existing_value)
+
+
+def set_value(module):
+ consul_api = get_consul_api(module)
+
+ key = module.params.get('key')
+ value = module.params.get('value')
+
+ if value is NOT_SET:
+ raise AssertionError('Cannot set value of "%s" to `NOT_SET`' % key)
+
+ index, changed = _has_value_changed(consul_api, key, value)
+
+ if changed and not module.check_mode:
+ changed = consul_api.kv.put(key, value,
+ cas=module.params.get('cas'),
+ flags=module.params.get('flags'))
+
+ stored = None
+ if module.params.get('retrieve'):
+ index, stored = consul_api.kv.get(key)
+
+ module.exit_json(changed=changed,
+ index=index,
+ key=key,
+ data=stored)
+
+
+def remove_value(module):
+ ''' remove the value associated with the given key. if the recurse parameter
+ is set then any key prefixed with the given key will be removed. '''
+ consul_api = get_consul_api(module)
+
+ key = module.params.get('key')
+
+ index, existing = consul_api.kv.get(
+ key, recurse=module.params.get('recurse'))
+
+ changed = existing is not None
+ if changed and not module.check_mode:
+ consul_api.kv.delete(key, module.params.get('recurse'))
+
+ module.exit_json(changed=changed,
+ index=index,
+ key=key,
+ data=existing)
+
+
+def get_consul_api(module, token=None):
+ return consul.Consul(host=module.params.get('host'),
+ port=module.params.get('port'),
+ scheme=module.params.get('scheme'),
+ verify=module.params.get('validate_certs'),
+ token=module.params.get('token'))
+
+
+def test_dependencies(module):
+ if not python_consul_installed:
+ module.fail_json(msg="python-consul required for this module. "
+ "see https://python-consul.readthedocs.io/en/latest/#installation")
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ cas=dict(type='str'),
+ flags=dict(type='str'),
+ key=dict(type='str', required=True),
+ host=dict(type='str', default='localhost'),
+ scheme=dict(type='str', default='http'),
+ validate_certs=dict(type='bool', default=True),
+ port=dict(type='int', default=8500),
+ recurse=dict(type='bool'),
+ retrieve=dict(type='bool', default=True),
+ state=dict(type='str', default='present', choices=['absent', 'acquire', 'present', 'release']),
+ token=dict(type='str', no_log=True),
+ value=dict(type='str', default=NOT_SET),
+ session=dict(type='str'),
+ ),
+ supports_check_mode=True
+ )
+
+ test_dependencies(module)
+
+ try:
+ execute(module)
+ except ConnectionError as e:
+ module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
+ module.params.get('host'), module.params.get('port'), e))
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/consul_session.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/consul_session.py
new file mode 100644
index 00000000..f28d3a5e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/consul_session.py
@@ -0,0 +1,276 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Steve Gargan <steve.gargan@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: consul_session
+short_description: Manipulate consul sessions
+description:
+ - Allows the addition, modification and deletion of sessions in a consul
+ cluster. These sessions can then be used in conjunction with key value pairs
+ to implement distributed locks. In depth documentation for working with
+ sessions can be found at http://www.consul.io/docs/internals/sessions.html
+requirements:
+ - python-consul
+ - requests
+author:
+- Steve Gargan (@sgargan)
+options:
+ id:
+ description:
+ - ID of the session, required when I(state) is either C(info) or
+ C(remove).
+ type: str
+ state:
+ description:
+ - Whether the session should be present i.e. created if it doesn't
+ exist, or absent, removed if present. If created, the I(id) for the
+ session is returned in the output. If C(absent), I(id) is
+ required to remove the session. Info for a single session, all the
+ sessions for a node or all available sessions can be retrieved by
+ specifying C(info), C(node) or C(list) for the I(state); for C(node)
+ or C(info), the node I(name) or session I(id) is required as parameter.
+ choices: [ absent, info, list, node, present ]
+ type: str
+ default: present
+ name:
+ description:
+ - The name that should be associated with the session. Required when
+ I(state=node) is used.
+ type: str
+ delay:
+ description:
+ - The optional lock delay that can be attached to the session when it
+ is created. Locks for invalidated sessions ar blocked from being
+ acquired until this delay has expired. Durations are in seconds.
+ type: int
+ default: 15
+ node:
+ description:
+ - The name of the node that with which the session will be associated.
+ by default this is the name of the agent.
+ type: str
+ datacenter:
+ description:
+ - The name of the datacenter in which the session exists or should be
+ created.
+ type: str
+ checks:
+ description:
+ - Checks that will be used to verify the session health. If
+ all the checks fail, the session will be invalidated and any locks
+ associated with the session will be release and can be acquired once
+ the associated lock delay has expired.
+ type: list
+ host:
+ description:
+ - The host of the consul agent defaults to localhost.
+ type: str
+ default: localhost
+ port:
+ description:
+ - The port on which the consul agent is running.
+ type: int
+ default: 8500
+ scheme:
+ description:
+ - The protocol scheme on which the consul agent is running.
+ type: str
+ default: http
+ validate_certs:
+ description:
+ - Whether to verify the TLS certificate of the consul agent.
+ type: bool
+ default: True
+ behavior:
+ description:
+ - The optional behavior that can be attached to the session when it
+ is created. This controls the behavior when a session is invalidated.
+ choices: [ delete, release ]
+ type: str
+ default: release
+'''
+
+EXAMPLES = '''
+- name: Register basic session with consul
+ community.general.consul_session:
+ name: session1
+
+- name: Register a session with an existing check
+ community.general.consul_session:
+ name: session_with_check
+ checks:
+ - existing_check_name
+
+- name: Register a session with lock_delay
+ community.general.consul_session:
+ name: session_with_delay
+ delay: 20s
+
+- name: Retrieve info about session by id
+ community.general.consul_session:
+ id: session_id
+ state: info
+
+- name: Retrieve active sessions
+ community.general.consul_session:
+ state: list
+'''
+
+try:
+ import consul
+ from requests.exceptions import ConnectionError
+ python_consul_installed = True
+except ImportError:
+ python_consul_installed = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def execute(module):
+
+ state = module.params.get('state')
+
+ if state in ['info', 'list', 'node']:
+ lookup_sessions(module)
+ elif state == 'present':
+ update_session(module)
+ else:
+ remove_session(module)
+
+
+def lookup_sessions(module):
+
+ datacenter = module.params.get('datacenter')
+
+ state = module.params.get('state')
+ consul_client = get_consul_api(module)
+ try:
+ if state == 'list':
+ sessions_list = consul_client.session.list(dc=datacenter)
+ # Ditch the index, this can be grabbed from the results
+ if sessions_list and len(sessions_list) >= 2:
+ sessions_list = sessions_list[1]
+ module.exit_json(changed=True,
+ sessions=sessions_list)
+ elif state == 'node':
+ node = module.params.get('node')
+ sessions = consul_client.session.node(node, dc=datacenter)
+ module.exit_json(changed=True,
+ node=node,
+ sessions=sessions)
+ elif state == 'info':
+ session_id = module.params.get('id')
+
+ session_by_id = consul_client.session.info(session_id, dc=datacenter)
+ module.exit_json(changed=True,
+ session_id=session_id,
+ sessions=session_by_id)
+
+ except Exception as e:
+ module.fail_json(msg="Could not retrieve session info %s" % e)
+
+
+def update_session(module):
+
+ name = module.params.get('name')
+ delay = module.params.get('delay')
+ checks = module.params.get('checks')
+ datacenter = module.params.get('datacenter')
+ node = module.params.get('node')
+ behavior = module.params.get('behavior')
+
+ consul_client = get_consul_api(module)
+
+ try:
+ session = consul_client.session.create(
+ name=name,
+ behavior=behavior,
+ node=node,
+ lock_delay=delay,
+ dc=datacenter,
+ checks=checks
+ )
+ module.exit_json(changed=True,
+ session_id=session,
+ name=name,
+ behavior=behavior,
+ delay=delay,
+ checks=checks,
+ node=node)
+ except Exception as e:
+ module.fail_json(msg="Could not create/update session %s" % e)
+
+
+def remove_session(module):
+ session_id = module.params.get('id')
+
+ consul_client = get_consul_api(module)
+
+ try:
+ consul_client.session.destroy(session_id)
+
+ module.exit_json(changed=True,
+ session_id=session_id)
+ except Exception as e:
+ module.fail_json(msg="Could not remove session with id '%s' %s" % (
+ session_id, e))
+
+
+def get_consul_api(module):
+ return consul.Consul(host=module.params.get('host'),
+ port=module.params.get('port'),
+ scheme=module.params.get('scheme'),
+ verify=module.params.get('validate_certs'))
+
+
+def test_dependencies(module):
+ if not python_consul_installed:
+ module.fail_json(msg="python-consul required for this module. "
+ "see https://python-consul.readthedocs.io/en/latest/#installation")
+
+
+def main():
+ argument_spec = dict(
+ checks=dict(type='list'),
+ delay=dict(type='int', default='15'),
+ behavior=dict(type='str', default='release', choices=['release', 'delete']),
+ host=dict(type='str', default='localhost'),
+ port=dict(type='int', default=8500),
+ scheme=dict(type='str', default='http'),
+ validate_certs=dict(type='bool', default=True),
+ id=dict(type='str'),
+ name=dict(type='str'),
+ node=dict(type='str'),
+ state=dict(type='str', default='present', choices=['absent', 'info', 'list', 'node', 'present']),
+ datacenter=dict(type='str'),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_if=[
+ ('state', 'node', ['name']),
+ ('state', 'info', ['id']),
+ ('state', 'remove', ['id']),
+ ],
+ supports_check_mode=False
+ )
+
+ test_dependencies(module)
+
+ try:
+ execute(module)
+ except ConnectionError as e:
+ module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
+ module.params.get('host'), module.params.get('port'), e))
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cpanm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cpanm.py
new file mode 100644
index 00000000..3b43b443
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cpanm.py
@@ -0,0 +1,214 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Franck Cuny <franck@lumberjaph.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: cpanm
+short_description: Manages Perl library dependencies.
+description:
+ - Manage Perl library dependencies.
+options:
+ name:
+ type: str
+ description:
+ - The name of the Perl library to install. You may use the "full distribution path", e.g. MIYAGAWA/Plack-0.99_05.tar.gz
+ aliases: ["pkg"]
+ from_path:
+ type: path
+ description:
+ - The local directory from where to install
+ notest:
+ description:
+ - Do not run unit tests
+ type: bool
+ default: no
+ locallib:
+ description:
+ - Specify the install base to install modules
+ type: path
+ mirror:
+ description:
+ - Specifies the base URL for the CPAN mirror to use
+ type: str
+ mirror_only:
+ description:
+ - Use the mirror's index file instead of the CPAN Meta DB
+ type: bool
+ default: no
+ installdeps:
+ description:
+ - Only install dependencies
+ type: bool
+ default: no
+ version:
+ description:
+ - minimum version of perl module to consider acceptable
+ type: str
+ system_lib:
+ description:
+ - Use this if you want to install modules to the system perl include path. You must be root or have "passwordless" sudo for this to work.
+ - This uses the cpanm commandline option '--sudo', which has nothing to do with ansible privilege escalation.
+ type: bool
+ default: no
+ aliases: ['use_sudo']
+ executable:
+ description:
+ - Override the path to the cpanm executable
+ type: path
+notes:
+ - Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host.
+author: "Franck Cuny (@fcuny)"
+'''
+
+EXAMPLES = '''
+- name: Install Dancer perl package
+ community.general.cpanm:
+ name: Dancer
+
+- name: Install version 0.99_05 of the Plack perl package
+ community.general.cpanm:
+ name: MIYAGAWA/Plack-0.99_05.tar.gz
+
+- name: Install Dancer into the specified locallib
+ community.general.cpanm:
+ name: Dancer
+ locallib: /srv/webapps/my_app/extlib
+
+- name: Install perl dependencies from local directory
+ community.general.cpanm:
+ from_path: /srv/webapps/my_app/src/
+
+- name: Install Dancer perl package without running the unit tests in indicated locallib
+ community.general.cpanm:
+ name: Dancer
+ notest: True
+ locallib: /srv/webapps/my_app/extlib
+
+- name: Install Dancer perl package from a specific mirror
+ community.general.cpanm:
+ name: Dancer
+ mirror: 'http://cpan.cpantesters.org/'
+
+- name: Install Dancer perl package into the system root path
+ community.general.cpanm:
+ name: Dancer
+ system_lib: yes
+
+- name: Install Dancer if it is not already installed OR the installed version is older than version 1.0
+ community.general.cpanm:
+ name: Dancer
+ version: '1.0'
+'''
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def _is_package_installed(module, name, locallib, cpanm, version):
+ cmd = ""
+ if locallib:
+ os.environ["PERL5LIB"] = "%s/lib/perl5" % locallib
+ cmd = "%s perl -e ' use %s" % (cmd, name)
+ if version:
+ cmd = "%s %s;'" % (cmd, version)
+ else:
+ cmd = "%s;'" % cmd
+ res, stdout, stderr = module.run_command(cmd, check_rc=False)
+ return res == 0
+
+
+def _build_cmd_line(name, from_path, notest, locallib, mirror, mirror_only, installdeps, cpanm, use_sudo):
+ # this code should use "%s" like everything else and just return early but not fixing all of it now.
+ # don't copy stuff like this
+ if from_path:
+ cmd = cpanm + " " + from_path
+ else:
+ cmd = cpanm + " " + name
+
+ if notest is True:
+ cmd = cmd + " -n"
+
+ if locallib is not None:
+ cmd = cmd + " -l " + locallib
+
+ if mirror is not None:
+ cmd = cmd + " --mirror " + mirror
+
+ if mirror_only is True:
+ cmd = cmd + " --mirror-only"
+
+ if installdeps is True:
+ cmd = cmd + " --installdeps"
+
+ if use_sudo is True:
+ cmd = cmd + " --sudo"
+
+ return cmd
+
+
+def _get_cpanm_path(module):
+ if module.params['executable']:
+ result = module.params['executable']
+ else:
+ result = module.get_bin_path('cpanm', True)
+ return result
+
+
+def main():
+ arg_spec = dict(
+ name=dict(default=None, required=False, aliases=['pkg']),
+ from_path=dict(default=None, required=False, type='path'),
+ notest=dict(default=False, type='bool'),
+ locallib=dict(default=None, required=False, type='path'),
+ mirror=dict(default=None, required=False),
+ mirror_only=dict(default=False, type='bool'),
+ installdeps=dict(default=False, type='bool'),
+ system_lib=dict(default=False, type='bool', aliases=['use_sudo']),
+ version=dict(default=None, required=False),
+ executable=dict(required=False, type='path'),
+ )
+
+ module = AnsibleModule(
+ argument_spec=arg_spec,
+ required_one_of=[['name', 'from_path']],
+ )
+
+ cpanm = _get_cpanm_path(module)
+ name = module.params['name']
+ from_path = module.params['from_path']
+ notest = module.boolean(module.params.get('notest', False))
+ locallib = module.params['locallib']
+ mirror = module.params['mirror']
+ mirror_only = module.params['mirror_only']
+ installdeps = module.params['installdeps']
+ use_sudo = module.params['system_lib']
+ version = module.params['version']
+
+ changed = False
+
+ installed = _is_package_installed(module, name, locallib, cpanm, version)
+
+ if not installed:
+ cmd = _build_cmd_line(name, from_path, notest, locallib, mirror, mirror_only, installdeps, cpanm, use_sudo)
+
+ rc_cpanm, out_cpanm, err_cpanm = module.run_command(cmd, check_rc=False)
+
+ if rc_cpanm != 0:
+ module.fail_json(msg=err_cpanm, cmd=cmd)
+
+ if (err_cpanm.find('is up to date') == -1 and out_cpanm.find('is up to date') == -1):
+ changed = True
+
+ module.exit_json(changed=changed, binary=cpanm, name=name)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/cronvar.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cronvar.py
new file mode 100644
index 00000000..a76f6a78
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/cronvar.py
@@ -0,0 +1,423 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Cronvar Plugin: The goal of this plugin is to provide an idempotent
+# method for set cron variable values. It should play well with the
+# existing cron module as well as allow for manually added variables.
+# Each variable entered will be preceded with a comment describing the
+# variable so that it can be found later. This is required to be
+# present in order for this plugin to find/modify the variable
+
+# This module is based on the crontab module.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: cronvar
+short_description: Manage variables in crontabs
+description:
+ - Use this module to manage crontab variables.
+ - This module allows you to create, update, or delete cron variable definitions.
+options:
+ name:
+ description:
+ - Name of the crontab variable.
+ type: str
+ required: yes
+ value:
+ description:
+ - The value to set this variable to.
+ - Required if C(state=present).
+ type: str
+ insertafter:
+ description:
+ - If specified, the variable will be inserted after the variable specified.
+ - Used with C(state=present).
+ type: str
+ insertbefore:
+ description:
+ - Used with C(state=present). If specified, the variable will be inserted
+ just before the variable specified.
+ type: str
+ state:
+ description:
+ - Whether to ensure that the variable is present or absent.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ user:
+ description:
+ - The specific user whose crontab should be modified.
+ - This parameter defaults to C(root) when unset.
+ type: str
+ cron_file:
+ description:
+ - If specified, uses this file instead of an individual user's crontab.
+ - Without a leading C(/), this is assumed to be in I(/etc/cron.d).
+ - With a leading C(/), this is taken as absolute.
+ type: str
+ backup:
+ description:
+ - If set, create a backup of the crontab before it is modified.
+ The location of the backup is returned in the C(backup) variable by this module.
+ type: bool
+ default: no
+requirements:
+ - cron
+author:
+- Doug Luce (@dougluce)
+'''
+
+EXAMPLES = r'''
+- name: Ensure entry like "EMAIL=doug@ansibmod.con.com" exists
+ community.general.cronvar:
+ name: EMAIL
+ value: doug@ansibmod.con.com
+
+- name: Ensure a variable does not exist. This may remove any variable named "LEGACY"
+ community.general.cronvar:
+ name: LEGACY
+ state: absent
+
+- name: Add a variable to a file under /etc/cron.d
+ community.general.cronvar:
+ name: LOGFILE
+ value: /var/log/yum-autoupdate.log
+ user: root
+ cron_file: ansible_yum-autoupdate
+'''
+
+import os
+import platform
+import pwd
+import re
+import shlex
+import sys
+import tempfile
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import shlex_quote
+
+
+class CronVarError(Exception):
+ pass
+
+
+class CronVar(object):
+ """
+ CronVar object to write variables to crontabs.
+
+ user - the user of the crontab (defaults to root)
+ cron_file - a cron file under /etc/cron.d
+ """
+
+ def __init__(self, module, user=None, cron_file=None):
+ self.module = module
+ self.user = user
+ self.lines = None
+ self.wordchars = ''.join(chr(x) for x in range(128) if chr(x) not in ('=', "'", '"',))
+ self.cron_cmd = self.module.get_bin_path('crontab', required=True)
+
+ if cron_file:
+ self.cron_file = ""
+ if os.path.isabs(cron_file):
+ self.cron_file = cron_file
+ else:
+ self.cron_file = os.path.join('/etc/cron.d', cron_file)
+ else:
+ self.cron_file = None
+
+ self.read()
+
+ def read(self):
+ # Read in the crontab from the system
+ self.lines = []
+ if self.cron_file:
+ # read the cronfile
+ try:
+ f = open(self.cron_file, 'r')
+ self.lines = f.read().splitlines()
+ f.close()
+ except IOError:
+ # cron file does not exist
+ return
+ except Exception:
+ raise CronVarError("Unexpected error:", sys.exc_info()[0])
+ else:
+ # using safely quoted shell for now, but this really should be two non-shell calls instead. FIXME
+ (rc, out, err) = self.module.run_command(self._read_user_execute(), use_unsafe_shell=True)
+
+ if rc != 0 and rc != 1: # 1 can mean that there are no jobs.
+ raise CronVarError("Unable to read crontab")
+
+ lines = out.splitlines()
+ count = 0
+ for l in lines:
+ if count > 2 or (not re.match(r'# DO NOT EDIT THIS FILE - edit the master and reinstall.', l
+ ) and not re.match(r'# \(/tmp/.*installed on.*\)', l) and not re.match(r'# \(.*version.*\)', l)):
+ self.lines.append(l)
+ count += 1
+
+ def log_message(self, message):
+ self.module.debug('ansible: "%s"' % message)
+
+ def write(self, backup_file=None):
+ """
+ Write the crontab to the system. Saves all information.
+ """
+ if backup_file:
+ fileh = open(backup_file, 'w')
+ elif self.cron_file:
+ fileh = open(self.cron_file, 'w')
+ else:
+ filed, path = tempfile.mkstemp(prefix='crontab')
+ fileh = os.fdopen(filed, 'w')
+
+ fileh.write(self.render())
+ fileh.close()
+
+ # return if making a backup
+ if backup_file:
+ return
+
+ # Add the entire crontab back to the user crontab
+ if not self.cron_file:
+ # quoting shell args for now but really this should be two non-shell calls. FIXME
+ (rc, out, err) = self.module.run_command(self._write_execute(path), use_unsafe_shell=True)
+ os.unlink(path)
+
+ if rc != 0:
+ self.module.fail_json(msg=err)
+
+ def remove_variable_file(self):
+ try:
+ os.unlink(self.cron_file)
+ return True
+ except OSError:
+ # cron file does not exist
+ return False
+ except Exception:
+ raise CronVarError("Unexpected error:", sys.exc_info()[0])
+
+ def parse_for_var(self, line):
+ lexer = shlex.shlex(line)
+ lexer.wordchars = self.wordchars
+ varname = lexer.get_token()
+ is_env_var = lexer.get_token() == '='
+ value = ''.join(lexer)
+ if is_env_var:
+ return (varname, value)
+ raise CronVarError("Not a variable.")
+
+ def find_variable(self, name):
+ for l in self.lines:
+ try:
+ (varname, value) = self.parse_for_var(l)
+ if varname == name:
+ return value
+ except CronVarError:
+ pass
+ return None
+
+ def get_var_names(self):
+ var_names = []
+ for l in self.lines:
+ try:
+ (var_name, _) = self.parse_for_var(l)
+ var_names.append(var_name)
+ except CronVarError:
+ pass
+ return var_names
+
+ def add_variable(self, name, value, insertbefore, insertafter):
+ if insertbefore is None and insertafter is None:
+ # Add the variable to the top of the file.
+ self.lines.insert(0, "%s=%s" % (name, value))
+ else:
+ newlines = []
+ for l in self.lines:
+ try:
+ (varname, _) = self.parse_for_var(l) # Throws if not a var line
+ if varname == insertbefore:
+ newlines.append("%s=%s" % (name, value))
+ newlines.append(l)
+ elif varname == insertafter:
+ newlines.append(l)
+ newlines.append("%s=%s" % (name, value))
+ else:
+ raise CronVarError # Append.
+ except CronVarError:
+ newlines.append(l)
+
+ self.lines = newlines
+
+ def remove_variable(self, name):
+ self.update_variable(name, None, remove=True)
+
+ def update_variable(self, name, value, remove=False):
+ newlines = []
+ for l in self.lines:
+ try:
+ (varname, _) = self.parse_for_var(l) # Throws if not a var line
+ if varname != name:
+ raise CronVarError # Append.
+ if not remove:
+ newlines.append("%s=%s" % (name, value))
+ except CronVarError:
+ newlines.append(l)
+
+ self.lines = newlines
+
+ def render(self):
+ """
+ Render a proper crontab
+ """
+ result = '\n'.join(self.lines)
+ if result and result[-1] not in ['\n', '\r']:
+ result += '\n'
+ return result
+
+ def _read_user_execute(self):
+ """
+ Returns the command line for reading a crontab
+ """
+ user = ''
+
+ if self.user:
+ if platform.system() == 'SunOS':
+ return "su %s -c '%s -l'" % (shlex_quote(self.user), shlex_quote(self.cron_cmd))
+ elif platform.system() == 'AIX':
+ return "%s -l %s" % (shlex_quote(self.cron_cmd), shlex_quote(self.user))
+ elif platform.system() == 'HP-UX':
+ return "%s %s %s" % (self.cron_cmd, '-l', shlex_quote(self.user))
+ elif pwd.getpwuid(os.getuid())[0] != self.user:
+ user = '-u %s' % shlex_quote(self.user)
+ return "%s %s %s" % (self.cron_cmd, user, '-l')
+
+ def _write_execute(self, path):
+ """
+ Return the command line for writing a crontab
+ """
+ user = ''
+ if self.user:
+ if platform.system() in ['SunOS', 'HP-UX', 'AIX']:
+ return "chown %s %s ; su '%s' -c '%s %s'" % (
+ shlex_quote(self.user), shlex_quote(path), shlex_quote(self.user), self.cron_cmd, shlex_quote(path))
+ elif pwd.getpwuid(os.getuid())[0] != self.user:
+ user = '-u %s' % shlex_quote(self.user)
+ return "%s %s %s" % (self.cron_cmd, user, shlex_quote(path))
+
+
+# ==================================================
+
+def main():
+ # The following example playbooks:
+ #
+ # - community.general.cronvar: name="SHELL" value="/bin/bash"
+ #
+ # - name: Set the email
+ # community.general.cronvar: name="EMAILTO" value="doug@ansibmod.con.com"
+ #
+ # - name: Get rid of the old new host variable
+ # community.general.cronvar: name="NEW_HOST" state=absent
+ #
+ # Would produce:
+ # SHELL = /bin/bash
+ # EMAILTO = doug@ansibmod.con.com
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ value=dict(type='str'),
+ user=dict(type='str'),
+ cron_file=dict(type='str'),
+ insertafter=dict(type='str'),
+ insertbefore=dict(type='str'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ backup=dict(type='bool', default=False),
+ ),
+ mutually_exclusive=[['insertbefore', 'insertafter']],
+ supports_check_mode=False,
+ )
+
+ name = module.params['name']
+ value = module.params['value']
+ user = module.params['user']
+ cron_file = module.params['cron_file']
+ insertafter = module.params['insertafter']
+ insertbefore = module.params['insertbefore']
+ state = module.params['state']
+ backup = module.params['backup']
+ ensure_present = state == 'present'
+
+ changed = False
+ res_args = dict()
+
+ # Ensure all files generated are only writable by the owning user. Primarily relevant for the cron_file option.
+ os.umask(int('022', 8))
+ cronvar = CronVar(module, user, cron_file)
+
+ module.debug('cronvar instantiated - name: "%s"' % name)
+
+ # --- user input validation ---
+
+ if name is None and ensure_present:
+ module.fail_json(msg="You must specify 'name' to insert a new cron variable")
+
+ if value is None and ensure_present:
+ module.fail_json(msg="You must specify 'value' to insert a new cron variable")
+
+ if name is None and not ensure_present:
+ module.fail_json(msg="You must specify 'name' to remove a cron variable")
+
+ # if requested make a backup before making a change
+ if backup:
+ (_, backup_file) = tempfile.mkstemp(prefix='cronvar')
+ cronvar.write(backup_file)
+
+ if cronvar.cron_file and not name and not ensure_present:
+ changed = cronvar.remove_job_file()
+ module.exit_json(changed=changed, cron_file=cron_file, state=state)
+
+ old_value = cronvar.find_variable(name)
+
+ if ensure_present:
+ if old_value is None:
+ cronvar.add_variable(name, value, insertbefore, insertafter)
+ changed = True
+ elif old_value != value:
+ cronvar.update_variable(name, value)
+ changed = True
+ else:
+ if old_value is not None:
+ cronvar.remove_variable(name)
+ changed = True
+
+ res_args = {
+ "vars": cronvar.get_var_names(),
+ "changed": changed
+ }
+
+ if changed:
+ cronvar.write()
+
+ # retain the backup only if crontab or cron file have changed
+ if backup:
+ if changed:
+ res_args['backup_file'] = backup_file
+ else:
+ os.unlink(backup_file)
+
+ if cron_file:
+ res_args['cron_file'] = cron_file
+
+ module.exit_json(**res_args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/crypttab.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/crypttab.py
new file mode 100644
index 00000000..9841a786
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/crypttab.py
@@ -0,0 +1,354 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Steve <yo@groks.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: crypttab
+short_description: Encrypted Linux block devices
+description:
+ - Control Linux encrypted block devices that are set up during system boot in C(/etc/crypttab).
+options:
+ name:
+ description:
+ - Name of the encrypted block device as it appears in the C(/etc/crypttab) file, or
+ optionally prefixed with C(/dev/mapper/), as it appears in the filesystem. I(/dev/mapper/)
+ will be stripped from I(name).
+ type: str
+ required: yes
+ state:
+ description:
+ - Use I(present) to add a line to C(/etc/crypttab) or update its definition
+ if already present.
+ - Use I(absent) to remove a line with matching I(name).
+ - Use I(opts_present) to add options to those already present; options with
+ different values will be updated.
+ - Use I(opts_absent) to remove options from the existing set.
+ type: str
+ required: yes
+ choices: [ absent, opts_absent, opts_present, present ]
+ backing_device:
+ description:
+ - Path to the underlying block device or file, or the UUID of a block-device
+ prefixed with I(UUID=).
+ type: str
+ password:
+ description:
+ - Encryption password, the path to a file containing the password, or
+ C(-) or unset if the password should be entered at boot.
+ type: path
+ opts:
+ description:
+ - A comma-delimited list of options. See C(crypttab(5) ) for details.
+ type: str
+ path:
+ description:
+ - Path to file to use instead of C(/etc/crypttab).
+ - This might be useful in a chroot environment.
+ type: path
+ default: /etc/crypttab
+author:
+- Steve (@groks)
+'''
+
+EXAMPLES = r'''
+- name: Set the options explicitly a device which must already exist
+ community.general.crypttab:
+ name: luks-home
+ state: present
+ opts: discard,cipher=aes-cbc-essiv:sha256
+
+- name: Add the 'discard' option to any existing options for all devices
+ community.general.crypttab:
+ name: '{{ item.device }}'
+ state: opts_present
+ opts: discard
+ loop: '{{ ansible_mounts }}'
+ when: "'/dev/mapper/luks-' in {{ item.device }}"
+'''
+
+import os
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes, to_native
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', required=True, choices=['absent', 'opts_absent', 'opts_present', 'present']),
+ backing_device=dict(type='str'),
+ password=dict(type='path'),
+ opts=dict(type='str'),
+ path=dict(type='path', default='/etc/crypttab')
+ ),
+ supports_check_mode=True,
+ )
+
+ backing_device = module.params['backing_device']
+ password = module.params['password']
+ opts = module.params['opts']
+ state = module.params['state']
+ path = module.params['path']
+ name = module.params['name']
+ if name.startswith('/dev/mapper/'):
+ name = name[len('/dev/mapper/'):]
+
+ if state != 'absent' and backing_device is None and password is None and opts is None:
+ module.fail_json(msg="expected one or more of 'backing_device', 'password' or 'opts'",
+ **module.params)
+
+ if 'opts' in state and (backing_device is not None or password is not None):
+ module.fail_json(msg="cannot update 'backing_device' or 'password' when state=%s" % state,
+ **module.params)
+
+ for arg_name, arg in (('name', name),
+ ('backing_device', backing_device),
+ ('password', password),
+ ('opts', opts)):
+ if (arg is not None and (' ' in arg or '\t' in arg or arg == '')):
+ module.fail_json(msg="invalid '%s': contains white space or is empty" % arg_name,
+ **module.params)
+
+ try:
+ crypttab = Crypttab(path)
+ existing_line = crypttab.match(name)
+ except Exception as e:
+ module.fail_json(msg="failed to open and parse crypttab file: %s" % to_native(e),
+ exception=traceback.format_exc(), **module.params)
+
+ if 'present' in state and existing_line is None and backing_device is None:
+ module.fail_json(msg="'backing_device' required to add a new entry",
+ **module.params)
+
+ changed, reason = False, '?'
+
+ if state == 'absent':
+ if existing_line is not None:
+ changed, reason = existing_line.remove()
+
+ elif state == 'present':
+ if existing_line is not None:
+ changed, reason = existing_line.set(backing_device, password, opts)
+ else:
+ changed, reason = crypttab.add(Line(None, name, backing_device, password, opts))
+
+ elif state == 'opts_present':
+ if existing_line is not None:
+ changed, reason = existing_line.opts.add(opts)
+ else:
+ changed, reason = crypttab.add(Line(None, name, backing_device, password, opts))
+
+ elif state == 'opts_absent':
+ if existing_line is not None:
+ changed, reason = existing_line.opts.remove(opts)
+
+ if changed and not module.check_mode:
+ try:
+ f = open(path, 'wb')
+ f.write(to_bytes(crypttab, errors='surrogate_or_strict'))
+ finally:
+ f.close()
+
+ module.exit_json(changed=changed, msg=reason, **module.params)
+
+
+class Crypttab(object):
+ _lines = []
+
+ def __init__(self, path):
+ self.path = path
+ if not os.path.exists(path):
+ if not os.path.exists(os.path.dirname(path)):
+ os.makedirs(os.path.dirname(path))
+ open(path, 'a').close()
+
+ try:
+ f = open(path, 'r')
+ for line in f.readlines():
+ self._lines.append(Line(line))
+ finally:
+ f.close()
+
+ def add(self, line):
+ self._lines.append(line)
+ return True, 'added line'
+
+ def lines(self):
+ for line in self._lines:
+ if line.valid():
+ yield line
+
+ def match(self, name):
+ for line in self.lines():
+ if line.name == name:
+ return line
+ return None
+
+ def __str__(self):
+ lines = []
+ for line in self._lines:
+ lines.append(str(line))
+ crypttab = '\n'.join(lines)
+ if len(crypttab) == 0:
+ crypttab += '\n'
+ if crypttab[-1] != '\n':
+ crypttab += '\n'
+ return crypttab
+
+
+class Line(object):
+ def __init__(self, line=None, name=None, backing_device=None, password=None, opts=None):
+ self.line = line
+ self.name = name
+ self.backing_device = backing_device
+ self.password = password
+ self.opts = Options(opts)
+
+ if line is not None:
+ self.line = self.line.rstrip('\n')
+ if self._line_valid(line):
+ self.name, backing_device, password, opts = self._split_line(line)
+
+ self.set(backing_device, password, opts)
+
+ def set(self, backing_device, password, opts):
+ changed = False
+
+ if backing_device is not None and self.backing_device != backing_device:
+ self.backing_device = backing_device
+ changed = True
+
+ if password is not None and self.password != password:
+ self.password = password
+ changed = True
+
+ if opts is not None:
+ opts = Options(opts)
+ if opts != self.opts:
+ self.opts = opts
+ changed = True
+
+ return changed, 'updated line'
+
+ def _line_valid(self, line):
+ if not line.strip() or line.startswith('#') or len(line.split()) not in (2, 3, 4):
+ return False
+ return True
+
+ def _split_line(self, line):
+ fields = line.split()
+ try:
+ field2 = fields[2]
+ except IndexError:
+ field2 = None
+ try:
+ field3 = fields[3]
+ except IndexError:
+ field3 = None
+
+ return (fields[0],
+ fields[1],
+ field2,
+ field3)
+
+ def remove(self):
+ self.line, self.name, self.backing_device = '', None, None
+ return True, 'removed line'
+
+ def valid(self):
+ if self.name is not None and self.backing_device is not None:
+ return True
+ return False
+
+ def __str__(self):
+ if self.valid():
+ fields = [self.name, self.backing_device]
+ if self.password is not None or self.opts:
+ if self.password is not None:
+ fields.append(self.password)
+ else:
+ fields.append('none')
+ if self.opts:
+ fields.append(str(self.opts))
+ return ' '.join(fields)
+ return self.line
+
+
+class Options(dict):
+ """opts_string looks like: 'discard,foo=bar,baz=greeble' """
+
+ def __init__(self, opts_string):
+ super(Options, self).__init__()
+ self.itemlist = []
+ if opts_string is not None:
+ for opt in opts_string.split(','):
+ kv = opt.split('=')
+ if len(kv) > 1:
+ k, v = (kv[0], kv[1])
+ else:
+ k, v = (kv[0], None)
+ self[k] = v
+
+ def add(self, opts_string):
+ changed = False
+ for k, v in Options(opts_string).items():
+ if k in self:
+ if self[k] != v:
+ changed = True
+ else:
+ changed = True
+ self[k] = v
+ return changed, 'updated options'
+
+ def remove(self, opts_string):
+ changed = False
+ for k in Options(opts_string):
+ if k in self:
+ del self[k]
+ changed = True
+ return changed, 'removed options'
+
+ def keys(self):
+ return self.itemlist
+
+ def values(self):
+ return [self[key] for key in self]
+
+ def items(self):
+ return [(key, self[key]) for key in self]
+
+ def __iter__(self):
+ return iter(self.itemlist)
+
+ def __setitem__(self, key, value):
+ if key not in self:
+ self.itemlist.append(key)
+ super(Options, self).__setitem__(key, value)
+
+ def __delitem__(self, key):
+ self.itemlist.remove(key)
+ super(Options, self).__delitem__(key)
+
+ def __ne__(self, obj):
+ return not (isinstance(obj, Options) and sorted(self.items()) == sorted(obj.items()))
+
+ def __str__(self):
+ ret = []
+ for k, v in self.items():
+ if v is None:
+ ret.append(k)
+ else:
+ ret.append('%s=%s' % (k, v))
+ return ','.join(ret)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/aerospike/aerospike_migrations.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/aerospike/aerospike_migrations.py
new file mode 100644
index 00000000..27bfc1a9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/aerospike/aerospike_migrations.py
@@ -0,0 +1,521 @@
+#!/usr/bin/python
+"""short_description: Check or wait for migrations between nodes"""
+
+# Copyright: (c) 2018, Albert Autin
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: aerospike_migrations
+short_description: Check or wait for migrations between nodes
+description:
+ - This can be used to check for migrations in a cluster.
+ This makes it easy to do a rolling upgrade/update on Aerospike nodes.
+ - If waiting for migrations is not desired, simply just poll until
+ port 3000 if available or asinfo -v status returns ok
+author: "Albert Autin (@Alb0t)"
+options:
+ host:
+ description:
+ - Which host do we use as seed for info connection
+ required: False
+ type: str
+ default: localhost
+ port:
+ description:
+ - Which port to connect to Aerospike on (service port)
+ required: False
+ type: int
+ default: 3000
+ connect_timeout:
+ description:
+ - How long to try to connect before giving up (milliseconds)
+ required: False
+ type: int
+ default: 1000
+ consecutive_good_checks:
+ description:
+ - How many times should the cluster report "no migrations"
+ consecutively before returning OK back to ansible?
+ required: False
+ type: int
+ default: 3
+ sleep_between_checks:
+ description:
+ - How long to sleep between each check (seconds).
+ required: False
+ type: int
+ default: 60
+ tries_limit:
+ description:
+ - How many times do we poll before giving up and failing?
+ default: 300
+ required: False
+ type: int
+ local_only:
+ description:
+ - Do you wish to only check for migrations on the local node
+ before returning, or do you want all nodes in the cluster
+ to finish before returning?
+ required: True
+ type: bool
+ min_cluster_size:
+ description:
+ - Check will return bad until cluster size is met
+ or until tries is exhausted
+ required: False
+ type: int
+ default: 1
+ fail_on_cluster_change:
+ description:
+ - Fail if the cluster key changes
+ if something else is changing the cluster, we may want to fail
+ required: False
+ type: bool
+ default: True
+ migrate_tx_key:
+ description:
+ - The metric key used to determine if we have tx migrations
+ remaining. Changeable due to backwards compatibility.
+ required: False
+ type: str
+ default: migrate_tx_partitions_remaining
+ migrate_rx_key:
+ description:
+ - The metric key used to determine if we have rx migrations
+ remaining. Changeable due to backwards compatibility.
+ required: False
+ type: str
+ default: migrate_rx_partitions_remaining
+ target_cluster_size:
+ description:
+ - When all aerospike builds in the cluster are greater than
+ version 4.3, then the C(cluster-stable) info command will be used.
+ Inside this command, you can optionally specify what the target
+ cluster size is - but it is not necessary. You can still rely on
+ min_cluster_size if you don't want to use this option.
+ - If this option is specified on a cluster that has at least 1
+ host <4.3 then it will be ignored until the min version reaches
+ 4.3.
+ required: False
+ type: int
+'''
+EXAMPLES = '''
+# check for migrations on local node
+- name: Wait for migrations on local node before proceeding
+ community.general.aerospike_migrations:
+ host: "localhost"
+ connect_timeout: 2000
+ consecutive_good_checks: 5
+ sleep_between_checks: 15
+ tries_limit: 600
+ local_only: False
+
+# example playbook:
+---
+- name: Upgrade aerospike
+ hosts: all
+ become: true
+ serial: 1
+ tasks:
+ - name: Install dependencies
+ ansible.builtin.apt:
+ name:
+ - python
+ - python-pip
+ - python-setuptools
+ state: latest
+ - name: Setup aerospike
+ ansible.builtin.pip:
+ name: aerospike
+# check for migrations every (sleep_between_checks)
+# If at least (consecutive_good_checks) checks come back OK in a row, then return OK.
+# Will exit if any exception, which can be caused by bad nodes,
+# nodes not returning data, or other reasons.
+# Maximum runtime before giving up in this case will be:
+# Tries Limit * Sleep Between Checks * delay * retries
+ - name: Wait for aerospike migrations
+ community.general.aerospike_migrations:
+ local_only: True
+ sleep_between_checks: 1
+ tries_limit: 5
+ consecutive_good_checks: 3
+ fail_on_cluster_change: true
+ min_cluster_size: 3
+ target_cluster_size: 4
+ register: migrations_check
+ until: migrations_check is succeeded
+ changed_when: false
+ delay: 60
+ retries: 120
+ - name: Another thing
+ ansible.builtin.shell: |
+ echo foo
+ - name: Reboot
+ ansible.builtin.reboot:
+'''
+
+RETURN = '''
+# Returns only a success/failure result. Changed is always false.
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+LIB_FOUND_ERR = None
+try:
+ import aerospike
+ from time import sleep
+ import re
+except ImportError as ie:
+ LIB_FOUND = False
+ LIB_FOUND_ERR = traceback.format_exc()
+else:
+ LIB_FOUND = True
+
+
+def run_module():
+ """run ansible module"""
+ module_args = dict(
+ host=dict(type='str', required=False, default='localhost'),
+ port=dict(type='int', required=False, default=3000),
+ connect_timeout=dict(type='int', required=False, default=1000),
+ consecutive_good_checks=dict(type='int', required=False, default=3),
+ sleep_between_checks=dict(type='int', required=False, default=60),
+ tries_limit=dict(type='int', required=False, default=300),
+ local_only=dict(type='bool', required=True),
+ min_cluster_size=dict(type='int', required=False, default=1),
+ target_cluster_size=dict(type='int', required=False, default=None),
+ fail_on_cluster_change=dict(type='bool', required=False, default=True),
+ migrate_tx_key=dict(type='str', required=False,
+ default="migrate_tx_partitions_remaining"),
+ migrate_rx_key=dict(type='str', required=False,
+ default="migrate_rx_partitions_remaining")
+ )
+
+ result = dict(
+ changed=False,
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True
+ )
+ if not LIB_FOUND:
+ module.fail_json(msg=missing_required_lib('aerospike'),
+ exception=LIB_FOUND_ERR)
+
+ try:
+ if module.check_mode:
+ has_migrations, skip_reason = False, None
+ else:
+ migrations = Migrations(module)
+ has_migrations, skip_reason = migrations.has_migs(
+ module.params['local_only']
+ )
+
+ if has_migrations:
+ module.fail_json(msg="Failed.", skip_reason=skip_reason)
+ except Exception as e:
+ module.fail_json(msg="Error: {0}".format(e))
+
+ module.exit_json(**result)
+
+
+class Migrations:
+ """ Check or wait for migrations between nodes """
+
+ def __init__(self, module):
+ self.module = module
+ self._client = self._create_client().connect()
+ self._nodes = {}
+ self._update_nodes_list()
+ self._cluster_statistics = {}
+ self._update_cluster_statistics()
+ self._namespaces = set()
+ self._update_cluster_namespace_list()
+ self._build_list = set()
+ self._update_build_list()
+ self._start_cluster_key = \
+ self._cluster_statistics[self._nodes[0]]['cluster_key']
+
+ def _create_client(self):
+ """ TODO: add support for auth, tls, and other special features
+ I won't use those features, so I'll wait until somebody complains
+ or does it for me (Cross fingers)
+ create the client object"""
+ config = {
+ 'hosts': [
+ (self.module.params['host'], self.module.params['port'])
+ ],
+ 'policies': {
+ 'timeout': self.module.params['connect_timeout']
+ }
+ }
+ return aerospike.client(config)
+
+ def _info_cmd_helper(self, cmd, node=None, delimiter=';'):
+ """delimiter is for separate stats that come back, NOT for kv
+ separation which is ="""
+ if node is None: # If no node passed, use the first one (local)
+ node = self._nodes[0]
+ data = self._client.info_node(cmd, node)
+ data = data.split("\t")
+ if len(data) != 1 and len(data) != 2:
+ self.module.fail_json(
+ msg="Unexpected number of values returned in info command: " +
+ str(len(data))
+ )
+ # data will be in format 'command\touput'
+ data = data[-1]
+ data = data.rstrip("\n\r")
+ data_arr = data.split(delimiter)
+
+ # some commands don't return in kv format
+ # so we dont want a dict from those.
+ if '=' in data:
+ retval = dict(
+ metric.split("=", 1) for metric in data_arr
+ )
+ else:
+ # if only 1 element found, and not kv, return just the value.
+ if len(data_arr) == 1:
+ retval = data_arr[0]
+ else:
+ retval = data_arr
+ return retval
+
+ def _update_build_list(self):
+ """creates self._build_list which is a unique list
+ of build versions."""
+ self._build_list = set()
+ for node in self._nodes:
+ build = self._info_cmd_helper('build', node)
+ self._build_list.add(build)
+
+ # just checks to see if the version is 4.3 or greater
+ def _can_use_cluster_stable(self):
+ # if version <4.3 we can't use cluster-stable info cmd
+ # regex hack to check for versions beginning with 0-3 or
+ # beginning with 4.0,4.1,4.2
+ if re.search(R'^([0-3]\.|4\.[0-2])', min(self._build_list)):
+ return False
+ return True
+
+ def _update_cluster_namespace_list(self):
+ """ make a unique list of namespaces
+ TODO: does this work on a rolling namespace add/deletion?
+ thankfully if it doesn't, we dont need this on builds >=4.3"""
+ self._namespaces = set()
+ for node in self._nodes:
+ namespaces = self._info_cmd_helper('namespaces', node)
+ for namespace in namespaces:
+ self._namespaces.add(namespace)
+
+ def _update_cluster_statistics(self):
+ """create a dict of nodes with their related stats """
+ self._cluster_statistics = {}
+ for node in self._nodes:
+ self._cluster_statistics[node] = \
+ self._info_cmd_helper('statistics', node)
+
+ def _update_nodes_list(self):
+ """get a fresh list of all the nodes"""
+ self._nodes = self._client.get_nodes()
+ if not self._nodes:
+ self.module.fail_json("Failed to retrieve at least 1 node.")
+
+ def _namespace_has_migs(self, namespace, node=None):
+ """returns a True or False.
+ Does the namespace have migrations for the node passed?
+ If no node passed, uses the local node or the first one in the list"""
+ namespace_stats = self._info_cmd_helper("namespace/" + namespace, node)
+ try:
+ namespace_tx = \
+ int(namespace_stats[self.module.params['migrate_tx_key']])
+ namespace_rx = \
+ int(namespace_stats[self.module.params['migrate_tx_key']])
+ except KeyError:
+ self.module.fail_json(
+ msg="Did not find partition remaining key:" +
+ self.module.params['migrate_tx_key'] +
+ " or key:" +
+ self.module.params['migrate_rx_key'] +
+ " in 'namespace/" +
+ namespace +
+ "' output."
+ )
+ except TypeError:
+ self.module.fail_json(
+ msg="namespace stat returned was not numerical"
+ )
+ return namespace_tx != 0 or namespace_rx != 0
+
+ def _node_has_migs(self, node=None):
+ """just calls namespace_has_migs and
+ if any namespace has migs returns true"""
+ migs = 0
+ self._update_cluster_namespace_list()
+ for namespace in self._namespaces:
+ if self._namespace_has_migs(namespace, node):
+ migs += 1
+ return migs != 0
+
+ def _cluster_key_consistent(self):
+ """create a dictionary to store what each node
+ returns the cluster key as. we should end up with only 1 dict key,
+ with the key being the cluster key."""
+ cluster_keys = {}
+ for node in self._nodes:
+ cluster_key = self._cluster_statistics[node][
+ 'cluster_key']
+ if cluster_key not in cluster_keys:
+ cluster_keys[cluster_key] = 1
+ else:
+ cluster_keys[cluster_key] += 1
+ if len(cluster_keys.keys()) == 1 and \
+ self._start_cluster_key in cluster_keys:
+ return True
+ return False
+
+ def _cluster_migrates_allowed(self):
+ """ensure all nodes have 'migrate_allowed' in their stats output"""
+ for node in self._nodes:
+ node_stats = self._info_cmd_helper('statistics', node)
+ allowed = node_stats['migrate_allowed']
+ if allowed == "false":
+ return False
+ return True
+
+ def _cluster_has_migs(self):
+ """calls node_has_migs for each node"""
+ migs = 0
+ for node in self._nodes:
+ if self._node_has_migs(node):
+ migs += 1
+ if migs == 0:
+ return False
+ return True
+
+ def _has_migs(self, local):
+ if local:
+ return self._local_node_has_migs()
+ return self._cluster_has_migs()
+
+ def _local_node_has_migs(self):
+ return self._node_has_migs(None)
+
+ def _is_min_cluster_size(self):
+ """checks that all nodes in the cluster are returning the
+ minimum cluster size specified in their statistics output"""
+ sizes = set()
+ for node in self._cluster_statistics:
+ sizes.add(int(self._cluster_statistics[node]['cluster_size']))
+
+ if (len(sizes)) > 1: # if we are getting more than 1 size, lets say no
+ return False
+ if (min(sizes)) >= self.module.params['min_cluster_size']:
+ return True
+ return False
+
+ def _cluster_stable(self):
+ """Added 4.3:
+ cluster-stable:size=<target-cluster-size>;ignore-migrations=<yes/no>;namespace=<namespace-name>
+ Returns the current 'cluster_key' when the following are satisfied:
+
+ If 'size' is specified then the target node's 'cluster-size'
+ must match size.
+ If 'ignore-migrations' is either unspecified or 'false' then
+ the target node's migrations counts must be zero for the provided
+ 'namespace' or all namespaces if 'namespace' is not provided."""
+ cluster_key = set()
+ cluster_key.add(self._info_cmd_helper('statistics')['cluster_key'])
+ cmd = "cluster-stable:"
+ target_cluster_size = self.module.params['target_cluster_size']
+ if target_cluster_size is not None:
+ cmd = cmd + "size=" + str(target_cluster_size) + ";"
+ for node in self._nodes:
+ try:
+ cluster_key.add(self._info_cmd_helper(cmd, node))
+ except aerospike.exception.ServerError as e: # unstable-cluster is returned in form of Exception
+ if 'unstable-cluster' in e.msg:
+ return False
+ raise e
+ if len(cluster_key) == 1:
+ return True
+ return False
+
+ def _cluster_good_state(self):
+ """checks a few things to make sure we're OK to say the cluster
+ has no migs. It could be in a unhealthy condition that does not allow
+ migs, or a split brain"""
+ if self._cluster_key_consistent() is not True:
+ return False, "Cluster key inconsistent."
+ if self._is_min_cluster_size() is not True:
+ return False, "Cluster min size not reached."
+ if self._cluster_migrates_allowed() is not True:
+ return False, "migrate_allowed is false somewhere."
+ return True, "OK."
+
+ def has_migs(self, local=True):
+ """returns a boolean, False if no migrations otherwise True"""
+ consecutive_good = 0
+ try_num = 0
+ skip_reason = list()
+ while \
+ try_num < int(self.module.params['tries_limit']) and \
+ consecutive_good < \
+ int(self.module.params['consecutive_good_checks']):
+
+ self._update_nodes_list()
+ self._update_cluster_statistics()
+
+ # These checks are outside of the while loop because
+ # we probably want to skip & sleep instead of failing entirely
+ stable, reason = self._cluster_good_state()
+ if stable is not True:
+ skip_reason.append(
+ "Skipping on try#" + str(try_num) +
+ " for reason:" + reason
+ )
+ else:
+ if self._can_use_cluster_stable():
+ if self._cluster_stable():
+ consecutive_good += 1
+ else:
+ consecutive_good = 0
+ skip_reason.append(
+ "Skipping on try#" + str(try_num) +
+ " for reason:" + " cluster_stable"
+ )
+ elif self._has_migs(local):
+ # print("_has_migs")
+ skip_reason.append(
+ "Skipping on try#" + str(try_num) +
+ " for reason:" + " migrations"
+ )
+ consecutive_good = 0
+ else:
+ consecutive_good += 1
+ if consecutive_good == self.module.params[
+ 'consecutive_good_checks']:
+ break
+ try_num += 1
+ sleep(self.module.params['sleep_between_checks'])
+ # print(skip_reason)
+ if consecutive_good == self.module.params['consecutive_good_checks']:
+ return False, None
+ return True, skip_reason
+
+
+def main():
+ """main method for ansible module"""
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_database.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_database.py
new file mode 100644
index 00000000..7b798c36
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_database.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2016, Kamil Szczygiel <kamil.szczygiel () intel.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: influxdb_database
+short_description: Manage InfluxDB databases
+description:
+ - Manage InfluxDB databases.
+author: "Kamil Szczygiel (@kamsz)"
+requirements:
+ - "python >= 2.6"
+ - "influxdb >= 0.9"
+ - requests
+options:
+ database_name:
+ description:
+ - Name of the database.
+ required: true
+ type: str
+ state:
+ description:
+ - Determines if the database should be created or destroyed.
+ choices: [ absent, present ]
+ default: present
+ type: str
+extends_documentation_fragment:
+- community.general.influxdb
+
+'''
+
+EXAMPLES = r'''
+# Example influxdb_database command from Ansible Playbooks
+- name: Create database
+ community.general.influxdb_database:
+ hostname: "{{influxdb_ip_address}}"
+ database_name: "{{influxdb_database_name}}"
+
+- name: Destroy database
+ community.general.influxdb_database:
+ hostname: "{{influxdb_ip_address}}"
+ database_name: "{{influxdb_database_name}}"
+ state: absent
+
+- name: Create database using custom credentials
+ community.general.influxdb_database:
+ hostname: "{{influxdb_ip_address}}"
+ username: "{{influxdb_username}}"
+ password: "{{influxdb_password}}"
+ database_name: "{{influxdb_database_name}}"
+ ssl: yes
+ validate_certs: yes
+'''
+
+RETURN = r'''
+# only defaults
+'''
+
+try:
+ import requests.exceptions
+ from influxdb import exceptions
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb
+
+
+def find_database(module, client, database_name):
+ database = None
+
+ try:
+ databases = client.get_list_database()
+ for db in databases:
+ if db['name'] == database_name:
+ database = db
+ break
+ except requests.exceptions.ConnectionError as e:
+ module.fail_json(msg=str(e))
+ return database
+
+
+def create_database(module, client, database_name):
+ if not module.check_mode:
+ try:
+ client.create_database(database_name)
+ except requests.exceptions.ConnectionError as e:
+ module.fail_json(msg=str(e))
+
+ module.exit_json(changed=True)
+
+
+def drop_database(module, client, database_name):
+ if not module.check_mode:
+ try:
+ client.drop_database(database_name)
+ except exceptions.InfluxDBClientError as e:
+ module.fail_json(msg=e.content)
+
+ module.exit_json(changed=True)
+
+
+def main():
+ argument_spec = InfluxDb.influxdb_argument_spec()
+ argument_spec.update(
+ database_name=dict(required=True, type='str'),
+ state=dict(default='present', type='str', choices=['present', 'absent'])
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ state = module.params['state']
+
+ influxdb = InfluxDb(module)
+ client = influxdb.connect_to_influxdb()
+ database_name = influxdb.database_name
+ database = find_database(module, client, database_name)
+
+ if state == 'present':
+ if database:
+ module.exit_json(changed=False)
+ else:
+ create_database(module, client, database_name)
+
+ if state == 'absent':
+ if database:
+ drop_database(module, client, database_name)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_query.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_query.py
new file mode 100644
index 00000000..d9cf5007
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_query.py
@@ -0,0 +1,101 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, René Moser <mail@renemoser.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: influxdb_query
+short_description: Query data points from InfluxDB
+description:
+ - Query data points from InfluxDB.
+author: "René Moser (@resmo)"
+requirements:
+ - "python >= 2.6"
+ - "influxdb >= 0.9"
+options:
+ query:
+ description:
+ - Query to be executed.
+ required: true
+ type: str
+ database_name:
+ description:
+ - Name of the database.
+ required: true
+ type: str
+extends_documentation_fragment:
+- community.general.influxdb
+
+'''
+
+EXAMPLES = r'''
+- name: Query connections
+ community.general.influxdb_query:
+ hostname: "{{ influxdb_ip_address }}"
+ database_name: "{{ influxdb_database_name }}"
+ query: "select mean(value) from connections"
+ register: connection
+
+- name: Query connections with tags filters
+ community.general.influxdb_query:
+ hostname: "{{ influxdb_ip_address }}"
+ database_name: "{{ influxdb_database_name }}"
+ query: "select mean(value) from connections where region='zue01' and host='server01'"
+ register: connection
+
+- name: Print results from the query
+ ansible.builtin.debug:
+ var: connection.query_results
+'''
+
+RETURN = r'''
+query_results:
+ description: Result from the query
+ returned: success
+ type: list
+ sample:
+ - mean: 1245.5333333333333
+ time: "1970-01-01T00:00:00Z"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb
+
+
+class AnsibleInfluxDBRead(InfluxDb):
+
+ def read_by_query(self, query):
+ client = self.connect_to_influxdb()
+ try:
+ rs = client.query(query)
+ if rs:
+ return list(rs.get_points())
+ except Exception as e:
+ self.module.fail_json(msg=to_native(e))
+
+
+def main():
+ argument_spec = InfluxDb.influxdb_argument_spec()
+ argument_spec.update(
+ query=dict(type='str', required=True),
+ database_name=dict(required=True, type='str'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ influx = AnsibleInfluxDBRead(module)
+ query = module.params.get('query')
+ results = influx.read_by_query(query)
+ module.exit_json(changed=True, query_results=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_retention_policy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_retention_policy.py
new file mode 100644
index 00000000..0774915f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_retention_policy.py
@@ -0,0 +1,197 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2016, Kamil Szczygiel <kamil.szczygiel () intel.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: influxdb_retention_policy
+short_description: Manage InfluxDB retention policies
+description:
+ - Manage InfluxDB retention policies.
+author: "Kamil Szczygiel (@kamsz)"
+requirements:
+ - "python >= 2.6"
+ - "influxdb >= 0.9"
+ - requests
+options:
+ database_name:
+ description:
+ - Name of the database.
+ required: true
+ type: str
+ policy_name:
+ description:
+ - Name of the retention policy.
+ required: true
+ type: str
+ duration:
+ description:
+ - Determines how long InfluxDB should keep the data.
+ required: true
+ type: str
+ replication:
+ description:
+ - Determines how many independent copies of each point are stored in the cluster.
+ required: true
+ type: int
+ default:
+ description:
+ - Sets the retention policy as default retention policy.
+ type: bool
+ default: false
+extends_documentation_fragment:
+- community.general.influxdb
+
+'''
+
+EXAMPLES = r'''
+# Example influxdb_retention_policy command from Ansible Playbooks
+- name: Create 1 hour retention policy
+ community.general.influxdb_retention_policy:
+ hostname: "{{influxdb_ip_address}}"
+ database_name: "{{influxdb_database_name}}"
+ policy_name: test
+ duration: 1h
+ replication: 1
+ ssl: yes
+ validate_certs: yes
+
+- name: Create 1 day retention policy
+ community.general.influxdb_retention_policy:
+ hostname: "{{influxdb_ip_address}}"
+ database_name: "{{influxdb_database_name}}"
+ policy_name: test
+ duration: 1d
+ replication: 1
+
+- name: Create 1 week retention policy
+ community.general.influxdb_retention_policy:
+ hostname: "{{influxdb_ip_address}}"
+ database_name: "{{influxdb_database_name}}"
+ policy_name: test
+ duration: 1w
+ replication: 1
+
+- name: Create infinite retention policy
+ community.general.influxdb_retention_policy:
+ hostname: "{{influxdb_ip_address}}"
+ database_name: "{{influxdb_database_name}}"
+ policy_name: test
+ duration: INF
+ replication: 1
+ ssl: no
+ validate_certs: no
+'''
+
+RETURN = r'''
+# only defaults
+'''
+
+import re
+
+try:
+ import requests.exceptions
+ from influxdb import exceptions
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb
+from ansible.module_utils._text import to_native
+
+
+def find_retention_policy(module, client):
+ database_name = module.params['database_name']
+ policy_name = module.params['policy_name']
+ hostname = module.params['hostname']
+ retention_policy = None
+
+ try:
+ retention_policies = client.get_list_retention_policies(database=database_name)
+ for policy in retention_policies:
+ if policy['name'] == policy_name:
+ retention_policy = policy
+ break
+ except requests.exceptions.ConnectionError as e:
+ module.fail_json(msg="Cannot connect to database %s on %s : %s" % (database_name, hostname, to_native(e)))
+ return retention_policy
+
+
+def create_retention_policy(module, client):
+ database_name = module.params['database_name']
+ policy_name = module.params['policy_name']
+ duration = module.params['duration']
+ replication = module.params['replication']
+ default = module.params['default']
+
+ if not module.check_mode:
+ try:
+ client.create_retention_policy(policy_name, duration, replication, database_name, default)
+ except exceptions.InfluxDBClientError as e:
+ module.fail_json(msg=e.content)
+ module.exit_json(changed=True)
+
+
+def alter_retention_policy(module, client, retention_policy):
+ database_name = module.params['database_name']
+ policy_name = module.params['policy_name']
+ duration = module.params['duration']
+ replication = module.params['replication']
+ default = module.params['default']
+ duration_regexp = re.compile(r'(\d+)([hdw]{1})|(^INF$){1}')
+ changed = False
+
+ duration_lookup = duration_regexp.search(duration)
+
+ if duration_lookup.group(2) == 'h':
+ influxdb_duration_format = '%s0m0s' % duration
+ elif duration_lookup.group(2) == 'd':
+ influxdb_duration_format = '%sh0m0s' % (int(duration_lookup.group(1)) * 24)
+ elif duration_lookup.group(2) == 'w':
+ influxdb_duration_format = '%sh0m0s' % (int(duration_lookup.group(1)) * 24 * 7)
+ elif duration == 'INF':
+ influxdb_duration_format = '0'
+
+ if (not retention_policy['duration'] == influxdb_duration_format or
+ not retention_policy['replicaN'] == int(replication) or
+ not retention_policy['default'] == default):
+ if not module.check_mode:
+ try:
+ client.alter_retention_policy(policy_name, database_name, duration, replication, default)
+ except exceptions.InfluxDBClientError as e:
+ module.fail_json(msg=e.content)
+ changed = True
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = InfluxDb.influxdb_argument_spec()
+ argument_spec.update(
+ database_name=dict(required=True, type='str'),
+ policy_name=dict(required=True, type='str'),
+ duration=dict(required=True, type='str'),
+ replication=dict(required=True, type='int'),
+ default=dict(default=False, type='bool')
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ influxdb = InfluxDb(module)
+ client = influxdb.connect_to_influxdb()
+
+ retention_policy = find_retention_policy(module, client)
+
+ if retention_policy:
+ alter_retention_policy(module, client, retention_policy)
+ else:
+ create_retention_policy(module, client)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_user.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_user.py
new file mode 100644
index 00000000..e17e3753
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_user.py
@@ -0,0 +1,263 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2017, Vitaliy Zhhuta <zhhuta () gmail.com>
+# insipred by Kamil Szczygiel <kamil.szczygiel () intel.com> influxdb_database module
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: influxdb_user
+short_description: Manage InfluxDB users
+description:
+ - Manage InfluxDB users.
+author: "Vitaliy Zhhuta (@zhhuta)"
+requirements:
+ - "python >= 2.6"
+ - "influxdb >= 0.9"
+options:
+ user_name:
+ description:
+ - Name of the user.
+ required: True
+ type: str
+ user_password:
+ description:
+ - Password to be set for the user.
+ required: false
+ type: str
+ admin:
+ description:
+ - Whether the user should be in the admin role or not.
+ - Since version 2.8, the role will also be updated.
+ default: no
+ type: bool
+ state:
+ description:
+ - State of the user.
+ choices: [ absent, present ]
+ default: present
+ type: str
+ grants:
+ description:
+ - Privileges to grant to this user.
+ - Takes a list of dicts containing the "database" and "privilege" keys.
+ - If this argument is not provided, the current grants will be left alone.
+ - If an empty list is provided, all grants for the user will be removed.
+ type: list
+ elements: dict
+extends_documentation_fragment:
+- community.general.influxdb
+
+'''
+
+EXAMPLES = r'''
+- name: Create a user on localhost using default login credentials
+ community.general.influxdb_user:
+ user_name: john
+ user_password: s3cr3t
+
+- name: Create a user on localhost using custom login credentials
+ community.general.influxdb_user:
+ user_name: john
+ user_password: s3cr3t
+ login_username: "{{ influxdb_username }}"
+ login_password: "{{ influxdb_password }}"
+
+- name: Create an admin user on a remote host using custom login credentials
+ community.general.influxdb_user:
+ user_name: john
+ user_password: s3cr3t
+ admin: yes
+ hostname: "{{ influxdb_hostname }}"
+ login_username: "{{ influxdb_username }}"
+ login_password: "{{ influxdb_password }}"
+
+- name: Create a user on localhost with privileges
+ community.general.influxdb_user:
+ user_name: john
+ user_password: s3cr3t
+ login_username: "{{ influxdb_username }}"
+ login_password: "{{ influxdb_password }}"
+ grants:
+ - database: 'collectd'
+ privilege: 'WRITE'
+ - database: 'graphite'
+ privilege: 'READ'
+
+- name: Destroy a user using custom login credentials
+ community.general.influxdb_user:
+ user_name: john
+ login_username: "{{ influxdb_username }}"
+ login_password: "{{ influxdb_password }}"
+ state: absent
+'''
+
+RETURN = r'''
+#only defaults
+'''
+
+from ansible.module_utils.urls import ConnectionError
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils.influxdb as influx
+
+
+def find_user(module, client, user_name):
+ user_result = None
+
+ try:
+ users = client.get_list_users()
+ for user in users:
+ if user['user'] == user_name:
+ user_result = user
+ break
+ except (ConnectionError, influx.exceptions.InfluxDBClientError) as e:
+ module.fail_json(msg=to_native(e))
+ return user_result
+
+
+def check_user_password(module, client, user_name, user_password):
+ try:
+ client.switch_user(user_name, user_password)
+ client.get_list_users()
+ except influx.exceptions.InfluxDBClientError as e:
+ if e.code == 401:
+ return False
+ except ConnectionError as e:
+ module.fail_json(msg=to_native(e))
+ finally:
+ # restore previous user
+ client.switch_user(module.params['username'], module.params['password'])
+ return True
+
+
+def set_user_password(module, client, user_name, user_password):
+ if not module.check_mode:
+ try:
+ client.set_user_password(user_name, user_password)
+ except ConnectionError as e:
+ module.fail_json(msg=to_native(e))
+
+
+def create_user(module, client, user_name, user_password, admin):
+ if not module.check_mode:
+ try:
+ client.create_user(user_name, user_password, admin)
+ except ConnectionError as e:
+ module.fail_json(msg=to_native(e))
+
+
+def drop_user(module, client, user_name):
+ if not module.check_mode:
+ try:
+ client.drop_user(user_name)
+ except influx.exceptions.InfluxDBClientError as e:
+ module.fail_json(msg=e.content)
+
+ module.exit_json(changed=True)
+
+
+def set_user_grants(module, client, user_name, grants):
+ changed = False
+
+ try:
+ current_grants = client.get_list_privileges(user_name)
+ # Fix privileges wording
+ for i, v in enumerate(current_grants):
+ if v['privilege'] == 'ALL PRIVILEGES':
+ v['privilege'] = 'ALL'
+ current_grants[i] = v
+ elif v['privilege'] == 'NO PRIVILEGES':
+ del(current_grants[i])
+
+ # check if the current grants are included in the desired ones
+ for current_grant in current_grants:
+ if current_grant not in grants:
+ if not module.check_mode:
+ client.revoke_privilege(current_grant['privilege'],
+ current_grant['database'],
+ user_name)
+ changed = True
+
+ # check if the desired grants are included in the current ones
+ for grant in grants:
+ if grant not in current_grants:
+ if not module.check_mode:
+ client.grant_privilege(grant['privilege'],
+ grant['database'],
+ user_name)
+ changed = True
+
+ except influx.exceptions.InfluxDBClientError as e:
+ module.fail_json(msg=e.content)
+
+ return changed
+
+
+def main():
+ argument_spec = influx.InfluxDb.influxdb_argument_spec()
+ argument_spec.update(
+ state=dict(default='present', type='str', choices=['present', 'absent']),
+ user_name=dict(required=True, type='str'),
+ user_password=dict(required=False, type='str', no_log=True),
+ admin=dict(default='False', type='bool'),
+ grants=dict(type='list', elements='dict'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ state = module.params['state']
+ user_name = module.params['user_name']
+ user_password = module.params['user_password']
+ admin = module.params['admin']
+ grants = module.params['grants']
+ influxdb = influx.InfluxDb(module)
+ client = influxdb.connect_to_influxdb()
+ user = find_user(module, client, user_name)
+
+ changed = False
+
+ if state == 'present':
+ if user:
+ if not check_user_password(module, client, user_name, user_password) and user_password is not None:
+ set_user_password(module, client, user_name, user_password)
+ changed = True
+
+ try:
+ if admin and not user['admin']:
+ if not module.check_mode:
+ client.grant_admin_privileges(user_name)
+ changed = True
+ elif not admin and user['admin']:
+ if not module.check_mode:
+ client.revoke_admin_privileges(user_name)
+ changed = True
+ except influx.exceptions.InfluxDBClientError as e:
+ module.fail_json(msg=to_native(e))
+
+ else:
+ user_password = user_password or ''
+ create_user(module, client, user_name, user_password, admin)
+ changed = True
+
+ if grants is not None:
+ if set_user_grants(module, client, user_name, grants):
+ changed = True
+
+ module.exit_json(changed=changed)
+
+ if state == 'absent':
+ if user:
+ drop_user(module, client, user_name)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_write.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_write.py
new file mode 100644
index 00000000..0dc063a7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_write.py
@@ -0,0 +1,96 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, René Moser <mail@renemoser.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: influxdb_write
+short_description: Write data points into InfluxDB
+description:
+ - Write data points into InfluxDB.
+author: "René Moser (@resmo)"
+requirements:
+ - "python >= 2.6"
+ - "influxdb >= 0.9"
+options:
+ data_points:
+ description:
+ - Data points as dict to write into the database.
+ required: true
+ type: list
+ elements: dict
+ database_name:
+ description:
+ - Name of the database.
+ required: true
+ type: str
+extends_documentation_fragment:
+- community.general.influxdb
+
+'''
+
+EXAMPLES = r'''
+- name: Write points into database
+ community.general.influxdb_write:
+ hostname: "{{influxdb_ip_address}}"
+ database_name: "{{influxdb_database_name}}"
+ data_points:
+ - measurement: connections
+ tags:
+ host: server01
+ region: us-west
+ time: "{{ ansible_date_time.iso8601 }}"
+ fields:
+ value: 2000
+ - measurement: connections
+ tags:
+ host: server02
+ region: us-east
+ time: "{{ ansible_date_time.iso8601 }}"
+ fields:
+ value: 3000
+'''
+
+RETURN = r'''
+# only defaults
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb
+
+
+class AnsibleInfluxDBWrite(InfluxDb):
+
+ def write_data_point(self, data_points):
+ client = self.connect_to_influxdb()
+
+ try:
+ client.write_points(data_points)
+ except Exception as e:
+ self.module.fail_json(msg=to_native(e))
+
+
+def main():
+ argument_spec = InfluxDb.influxdb_argument_spec()
+ argument_spec.update(
+ data_points=dict(required=True, type='list', elements='dict'),
+ database_name=dict(required=True, type='str'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ )
+
+ influx = AnsibleInfluxDBWrite(module)
+ data_points = module.params.get('data_points')
+ influx.write_data_point(data_points)
+ module.exit_json(changed=True)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/elasticsearch_plugin.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/elasticsearch_plugin.py
new file mode 100644
index 00000000..27a67406
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/elasticsearch_plugin.py
@@ -0,0 +1,291 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# (c) 2015, Mathew Davies <thepixeldeveloper@googlemail.com>
+# (c) 2017, Sam Doran <sdoran@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: elasticsearch_plugin
+short_description: Manage Elasticsearch plugins
+description:
+ - Manages Elasticsearch plugins.
+author:
+ - Mathew Davies (@ThePixelDeveloper)
+ - Sam Doran (@samdoran)
+options:
+ name:
+ description:
+ - Name of the plugin to install.
+ required: True
+ state:
+ description:
+ - Desired state of a plugin.
+ choices: ["present", "absent"]
+ default: present
+ src:
+ description:
+ - Optionally set the source location to retrieve the plugin from. This can be a file://
+ URL to install from a local file, or a remote URL. If this is not set, the plugin
+ location is just based on the name.
+ - The name parameter must match the descriptor in the plugin ZIP specified.
+ - Is only used if the state would change, which is solely checked based on the name
+ parameter. If, for example, the plugin is already installed, changing this has no
+ effect.
+ - For ES 1.x use url.
+ required: False
+ url:
+ description:
+ - Set exact URL to download the plugin from (Only works for ES 1.x).
+ - For ES 2.x and higher, use src.
+ required: False
+ timeout:
+ description:
+ - "Timeout setting: 30s, 1m, 1h..."
+ - Only valid for Elasticsearch < 5.0. This option is ignored for Elasticsearch > 5.0.
+ default: 1m
+ force:
+ description:
+ - "Force batch mode when installing plugins. This is only necessary if a plugin requires additional permissions and console detection fails."
+ default: False
+ type: bool
+ plugin_bin:
+ description:
+ - Location of the plugin binary. If this file is not found, the default plugin binaries will be used.
+ - The default changed in Ansible 2.4 to None.
+ plugin_dir:
+ description:
+ - Your configured plugin directory specified in Elasticsearch
+ default: /usr/share/elasticsearch/plugins/
+ proxy_host:
+ description:
+ - Proxy host to use during plugin installation
+ proxy_port:
+ description:
+ - Proxy port to use during plugin installation
+ version:
+ description:
+ - Version of the plugin to be installed.
+ If plugin exists with previous version, it will NOT be updated
+'''
+
+EXAMPLES = '''
+- name: Install Elasticsearch Head plugin in Elasticsearch 2.x
+ community.general.elasticsearch_plugin:
+ name: mobz/elasticsearch-head
+ state: present
+
+- name: Install a specific version of Elasticsearch Head in Elasticsearch 2.x
+ community.general.elasticsearch_plugin:
+ name: mobz/elasticsearch-head
+ version: 2.0.0
+
+- name: Uninstall Elasticsearch head plugin in Elasticsearch 2.x
+ community.general.elasticsearch_plugin:
+ name: mobz/elasticsearch-head
+ state: absent
+
+- name: Install a specific plugin in Elasticsearch >= 5.0
+ community.general.elasticsearch_plugin:
+ name: analysis-icu
+ state: present
+
+- name: Install the ingest-geoip plugin with a forced installation
+ community.general.elasticsearch_plugin:
+ name: ingest-geoip
+ state: present
+ force: yes
+'''
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+PACKAGE_STATE_MAP = dict(
+ present="install",
+ absent="remove"
+)
+
+PLUGIN_BIN_PATHS = tuple([
+ '/usr/share/elasticsearch/bin/elasticsearch-plugin',
+ '/usr/share/elasticsearch/bin/plugin'
+])
+
+
+def parse_plugin_repo(string):
+ elements = string.split("/")
+
+ # We first consider the simplest form: pluginname
+ repo = elements[0]
+
+ # We consider the form: username/pluginname
+ if len(elements) > 1:
+ repo = elements[1]
+
+ # remove elasticsearch- prefix
+ # remove es- prefix
+ for string in ("elasticsearch-", "es-"):
+ if repo.startswith(string):
+ return repo[len(string):]
+
+ return repo
+
+
+def is_plugin_present(plugin_name, plugin_dir):
+ return os.path.isdir(os.path.join(plugin_dir, plugin_name))
+
+
+def parse_error(string):
+ reason = "ERROR: "
+ try:
+ return string[string.index(reason) + len(reason):].strip()
+ except ValueError:
+ return string
+
+
+def install_plugin(module, plugin_bin, plugin_name, version, src, url, proxy_host, proxy_port, timeout, force):
+ cmd_args = [plugin_bin, PACKAGE_STATE_MAP["present"]]
+ is_old_command = (os.path.basename(plugin_bin) == 'plugin')
+
+ # Timeout and version are only valid for plugin, not elasticsearch-plugin
+ if is_old_command:
+ if timeout:
+ cmd_args.append("--timeout %s" % timeout)
+
+ if version:
+ plugin_name = plugin_name + '/' + version
+ cmd_args[2] = plugin_name
+
+ if proxy_host and proxy_port:
+ cmd_args.append("-DproxyHost=%s -DproxyPort=%s" % (proxy_host, proxy_port))
+
+ # Legacy ES 1.x
+ if url:
+ cmd_args.append("--url %s" % url)
+
+ if force:
+ cmd_args.append("--batch")
+ if src:
+ cmd_args.append(src)
+ else:
+ cmd_args.append(plugin_name)
+
+ cmd = " ".join(cmd_args)
+
+ if module.check_mode:
+ rc, out, err = 0, "check mode", ""
+ else:
+ rc, out, err = module.run_command(cmd)
+
+ if rc != 0:
+ reason = parse_error(out)
+ module.fail_json(msg="Installing plugin '%s' failed: %s" % (plugin_name, reason), err=err)
+
+ return True, cmd, out, err
+
+
+def remove_plugin(module, plugin_bin, plugin_name):
+ cmd_args = [plugin_bin, PACKAGE_STATE_MAP["absent"], parse_plugin_repo(plugin_name)]
+
+ cmd = " ".join(cmd_args)
+
+ if module.check_mode:
+ rc, out, err = 0, "check mode", ""
+ else:
+ rc, out, err = module.run_command(cmd)
+
+ if rc != 0:
+ reason = parse_error(out)
+ module.fail_json(msg="Removing plugin '%s' failed: %s" % (plugin_name, reason), err=err)
+
+ return True, cmd, out, err
+
+
+def get_plugin_bin(module, plugin_bin=None):
+ # Use the plugin_bin that was supplied first before trying other options
+ valid_plugin_bin = None
+ if plugin_bin and os.path.isfile(plugin_bin):
+ valid_plugin_bin = plugin_bin
+
+ else:
+ # Add the plugin_bin passed into the module to the top of the list of paths to test,
+ # testing for that binary name first before falling back to the default paths.
+ bin_paths = list(PLUGIN_BIN_PATHS)
+ if plugin_bin and plugin_bin not in bin_paths:
+ bin_paths.insert(0, plugin_bin)
+
+ # Get separate lists of dirs and binary names from the full paths to the
+ # plugin binaries.
+ plugin_dirs = list(set([os.path.dirname(x) for x in bin_paths]))
+ plugin_bins = list(set([os.path.basename(x) for x in bin_paths]))
+
+ # Check for the binary names in the default system paths as well as the path
+ # specified in the module arguments.
+ for bin_file in plugin_bins:
+ valid_plugin_bin = module.get_bin_path(bin_file, opt_dirs=plugin_dirs)
+ if valid_plugin_bin:
+ break
+
+ if not valid_plugin_bin:
+ module.fail_json(msg='%s does not exist and no other valid plugin installers were found. Make sure Elasticsearch is installed.' % plugin_bin)
+
+ return valid_plugin_bin
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(default="present", choices=PACKAGE_STATE_MAP.keys()),
+ src=dict(default=None),
+ url=dict(default=None),
+ timeout=dict(default="1m"),
+ force=dict(type='bool', default=False),
+ plugin_bin=dict(type="path"),
+ plugin_dir=dict(default="/usr/share/elasticsearch/plugins/", type="path"),
+ proxy_host=dict(default=None),
+ proxy_port=dict(default=None),
+ version=dict(default=None)
+ ),
+ mutually_exclusive=[("src", "url")],
+ supports_check_mode=True
+ )
+
+ name = module.params["name"]
+ state = module.params["state"]
+ url = module.params["url"]
+ src = module.params["src"]
+ timeout = module.params["timeout"]
+ force = module.params["force"]
+ plugin_bin = module.params["plugin_bin"]
+ plugin_dir = module.params["plugin_dir"]
+ proxy_host = module.params["proxy_host"]
+ proxy_port = module.params["proxy_port"]
+ version = module.params["version"]
+
+ # Search provided path and system paths for valid binary
+ plugin_bin = get_plugin_bin(module, plugin_bin)
+
+ repo = parse_plugin_repo(name)
+ present = is_plugin_present(repo, plugin_dir)
+
+ # skip if the state is correct
+ if (present and state == "present") or (state == "absent" and not present):
+ module.exit_json(changed=False, name=name, state=state)
+
+ if state == "present":
+ changed, cmd, out, err = install_plugin(module, plugin_bin, name, version, src, url, proxy_host, proxy_port, timeout, force)
+
+ elif state == "absent":
+ changed, cmd, out, err = remove_plugin(module, plugin_bin, name)
+
+ module.exit_json(changed=changed, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/kibana_plugin.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/kibana_plugin.py
new file mode 100644
index 00000000..e84d8a6b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/kibana_plugin.py
@@ -0,0 +1,257 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Thierno IB. BARRY @barryib
+# Sponsored by Polyconseil http://polyconseil.fr.
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: kibana_plugin
+short_description: Manage Kibana plugins
+description:
+ - This module can be used to manage Kibana plugins.
+author: Thierno IB. BARRY (@barryib)
+options:
+ name:
+ description:
+ - Name of the plugin to install.
+ required: True
+ state:
+ description:
+ - Desired state of a plugin.
+ choices: ["present", "absent"]
+ default: present
+ url:
+ description:
+ - Set exact URL to download the plugin from.
+ - For local file, prefix its absolute path with file://
+ timeout:
+ description:
+ - "Timeout setting: 30s, 1m, 1h etc."
+ default: 1m
+ plugin_bin:
+ description:
+ - Location of the Kibana binary.
+ default: /opt/kibana/bin/kibana
+ plugin_dir:
+ description:
+ - Your configured plugin directory specified in Kibana.
+ default: /opt/kibana/installedPlugins/
+ version:
+ description:
+ - Version of the plugin to be installed.
+ - If plugin exists with previous version, plugin will NOT be updated unless C(force) is set to yes.
+ force:
+ description:
+ - Delete and re-install the plugin. Can be useful for plugins update.
+ type: bool
+ default: 'no'
+'''
+
+EXAMPLES = '''
+- name: Install Elasticsearch head plugin
+ community.general.kibana_plugin:
+ state: present
+ name: elasticsearch/marvel
+
+- name: Install specific version of a plugin
+ community.general.kibana_plugin:
+ state: present
+ name: elasticsearch/marvel
+ version: '2.3.3'
+
+- name: Uninstall Elasticsearch head plugin
+ community.general.kibana_plugin:
+ state: absent
+ name: elasticsearch/marvel
+'''
+
+RETURN = '''
+cmd:
+ description: the launched command during plugin management (install / remove)
+ returned: success
+ type: str
+name:
+ description: the plugin name to install or remove
+ returned: success
+ type: str
+url:
+ description: the url from where the plugin is installed from
+ returned: success
+ type: str
+timeout:
+ description: the timeout for plugin download
+ returned: success
+ type: str
+stdout:
+ description: the command stdout
+ returned: success
+ type: str
+stderr:
+ description: the command stderr
+ returned: success
+ type: str
+state:
+ description: the state for the managed plugin
+ returned: success
+ type: str
+'''
+
+import os
+from distutils.version import LooseVersion
+from ansible.module_utils.basic import AnsibleModule
+
+
+PACKAGE_STATE_MAP = dict(
+ present="--install",
+ absent="--remove"
+)
+
+
+def parse_plugin_repo(string):
+ elements = string.split("/")
+
+ # We first consider the simplest form: pluginname
+ repo = elements[0]
+
+ # We consider the form: username/pluginname
+ if len(elements) > 1:
+ repo = elements[1]
+
+ # remove elasticsearch- prefix
+ # remove es- prefix
+ for string in ("elasticsearch-", "es-"):
+ if repo.startswith(string):
+ return repo[len(string):]
+
+ return repo
+
+
+def is_plugin_present(plugin_dir, working_dir):
+ return os.path.isdir(os.path.join(working_dir, plugin_dir))
+
+
+def parse_error(string):
+ reason = "reason: "
+ try:
+ return string[string.index(reason) + len(reason):].strip()
+ except ValueError:
+ return string
+
+
+def install_plugin(module, plugin_bin, plugin_name, url, timeout, kibana_version='4.6'):
+ if LooseVersion(kibana_version) > LooseVersion('4.6'):
+ kibana_plugin_bin = os.path.join(os.path.dirname(plugin_bin), 'kibana-plugin')
+ cmd_args = [kibana_plugin_bin, "install"]
+ if url:
+ cmd_args.append(url)
+ else:
+ cmd_args.append(plugin_name)
+ else:
+ cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["present"], plugin_name]
+
+ if url:
+ cmd_args.append("--url %s" % url)
+
+ if timeout:
+ cmd_args.append("--timeout %s" % timeout)
+
+ cmd = " ".join(cmd_args)
+
+ if module.check_mode:
+ return True, cmd, "check mode", ""
+
+ rc, out, err = module.run_command(cmd)
+ if rc != 0:
+ reason = parse_error(out)
+ module.fail_json(msg=reason)
+
+ return True, cmd, out, err
+
+
+def remove_plugin(module, plugin_bin, plugin_name, kibana_version='4.6'):
+ if LooseVersion(kibana_version) > LooseVersion('4.6'):
+ kibana_plugin_bin = os.path.join(os.path.dirname(plugin_bin), 'kibana-plugin')
+ cmd_args = [kibana_plugin_bin, "remove", plugin_name]
+ else:
+ cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["absent"], plugin_name]
+
+ cmd = " ".join(cmd_args)
+
+ if module.check_mode:
+ return True, cmd, "check mode", ""
+
+ rc, out, err = module.run_command(cmd)
+ if rc != 0:
+ reason = parse_error(out)
+ module.fail_json(msg=reason)
+
+ return True, cmd, out, err
+
+
+def get_kibana_version(module, plugin_bin):
+ cmd_args = [plugin_bin, '--version']
+ cmd = " ".join(cmd_args)
+ rc, out, err = module.run_command(cmd)
+ if rc != 0:
+ module.fail_json(msg="Failed to get Kibana version : %s" % err)
+
+ return out.strip()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(default="present", choices=PACKAGE_STATE_MAP.keys()),
+ url=dict(default=None),
+ timeout=dict(default="1m"),
+ plugin_bin=dict(default="/opt/kibana/bin/kibana", type="path"),
+ plugin_dir=dict(default="/opt/kibana/installedPlugins/", type="path"),
+ version=dict(default=None),
+ force=dict(default="no", type="bool")
+ ),
+ supports_check_mode=True,
+ )
+
+ name = module.params["name"]
+ state = module.params["state"]
+ url = module.params["url"]
+ timeout = module.params["timeout"]
+ plugin_bin = module.params["plugin_bin"]
+ plugin_dir = module.params["plugin_dir"]
+ version = module.params["version"]
+ force = module.params["force"]
+
+ changed, cmd, out, err = False, '', '', ''
+
+ kibana_version = get_kibana_version(module, plugin_bin)
+
+ present = is_plugin_present(parse_plugin_repo(name), plugin_dir)
+
+ # skip if the state is correct
+ if (present and state == "present" and not force) or (state == "absent" and not present and not force):
+ module.exit_json(changed=False, name=name, state=state)
+
+ if version:
+ name = name + '/' + version
+
+ if state == "present":
+ if force:
+ remove_plugin(module, plugin_bin, name)
+ changed, cmd, out, err = install_plugin(module, plugin_bin, name, url, timeout, kibana_version)
+
+ elif state == "absent":
+ changed, cmd, out, err = remove_plugin(module, plugin_bin, name, kibana_version)
+
+ module.exit_json(changed=changed, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/odbc.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/odbc.py
new file mode 100644
index 00000000..313a7f70
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/odbc.py
@@ -0,0 +1,168 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, John Westcott <john.westcott.iv@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: odbc
+author: "John Westcott IV (@john-westcott-iv)"
+version_added: "1.0.0"
+short_description: Execute SQL via ODBC
+description:
+ - Read/Write info via ODBC drivers.
+options:
+ dsn:
+ description:
+ - The connection string passed into ODBC.
+ required: yes
+ type: str
+ query:
+ description:
+ - The SQL query to perform.
+ required: yes
+ type: str
+ params:
+ description:
+ - Parameters to pass to the SQL query.
+ type: list
+ elements: str
+ commit:
+ description:
+ - Perform a commit after the execution of the SQL query.
+ - Some databases allow a commit after a select whereas others raise an exception.
+ - Default is C(true) to support legacy module behavior.
+ type: bool
+ default: yes
+ version_added: 1.3.0
+requirements:
+ - "python >= 2.6"
+ - "pyodbc"
+
+notes:
+ - "Like the command module, this module always returns changed = yes whether or not the query would change the database."
+ - "To alter this behavior you can use C(changed_when): [yes or no]."
+ - "For details about return values (description and row_count) see U(https://github.com/mkleehammer/pyodbc/wiki/Cursor)."
+'''
+
+EXAMPLES = '''
+- name: Set some values in the test db
+ community.general.odbc:
+ dsn: "DRIVER={ODBC Driver 13 for SQL Server};Server=db.ansible.com;Database=my_db;UID=admin;PWD=password;"
+ query: "Select * from table_a where column1 = ?"
+ params:
+ - "value1"
+ commit: false
+ changed_when: no
+'''
+
+RETURN = '''
+results:
+ description: List of lists of strings containing selected rows, likely empty for DDL statements.
+ returned: success
+ type: list
+ elements: list
+description:
+ description: "List of dicts about the columns selected from the cursors, likely empty for DDL statements. See notes."
+ returned: success
+ type: list
+ elements: dict
+row_count:
+ description: "The number of rows selected or modified according to the cursor defaults to -1. See notes."
+ returned: success
+ type: str
+'''
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+HAS_PYODBC = None
+try:
+ import pyodbc
+ HAS_PYODBC = True
+except ImportError as e:
+ HAS_PYODBC = False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ dsn=dict(type='str', required=True, no_log=True),
+ query=dict(type='str', required=True),
+ params=dict(type='list', elements='str'),
+ commit=dict(type='bool', default=True),
+ ),
+ )
+
+ dsn = module.params.get('dsn')
+ query = module.params.get('query')
+ params = module.params.get('params')
+ commit = module.params.get('commit')
+
+ if not HAS_PYODBC:
+ module.fail_json(msg=missing_required_lib('pyodbc'))
+
+ # Try to make a connection with the DSN
+ connection = None
+ try:
+ connection = pyodbc.connect(dsn)
+ except Exception as e:
+ module.fail_json(msg='Failed to connect to DSN: {0}'.format(to_native(e)))
+
+ result = dict(
+ changed=True,
+ description=[],
+ row_count=-1,
+ results=[],
+ )
+
+ try:
+ cursor = connection.cursor()
+
+ if params:
+ cursor.execute(query, params)
+ else:
+ cursor.execute(query)
+ if commit:
+ cursor.commit()
+ try:
+ # Get the rows out into an 2d array
+ for row in cursor.fetchall():
+ new_row = []
+ for column in row:
+ new_row.append("{0}".format(column))
+ result['results'].append(new_row)
+
+ # Return additional information from the cursor
+ for row_description in cursor.description:
+ description = {}
+ description['name'] = row_description[0]
+ description['type'] = row_description[1].__name__
+ description['display_size'] = row_description[2]
+ description['internal_size'] = row_description[3]
+ description['precision'] = row_description[4]
+ description['scale'] = row_description[5]
+ description['nullable'] = row_description[6]
+ result['description'].append(description)
+
+ result['row_count'] = cursor.rowcount
+ except pyodbc.ProgrammingError as pe:
+ pass
+ except Exception as e:
+ module.fail_json(msg="Exception while reading rows: {0}".format(to_native(e)))
+
+ cursor.close()
+ except Exception as e:
+ module.fail_json(msg="Failed to execute query: {0}".format(to_native(e)))
+ finally:
+ connection.close()
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/redis.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/redis.py
new file mode 100644
index 00000000..5ffbd7db
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/redis.py
@@ -0,0 +1,324 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: redis
+short_description: Various redis commands, slave and flush
+description:
+ - Unified utility to interact with redis instances.
+options:
+ command:
+ description:
+ - The selected redis command
+ - C(config) (new in 1.6), ensures a configuration setting on an instance.
+ - C(flush) flushes all the instance or a specified db.
+ - C(slave) sets a redis instance in slave or master mode.
+ choices: [ config, flush, slave ]
+ type: str
+ login_password:
+ description:
+ - The password used to authenticate with (usually not used)
+ type: str
+ login_host:
+ description:
+ - The host running the database
+ default: localhost
+ type: str
+ login_port:
+ description:
+ - The port to connect to
+ default: 6379
+ type: int
+ master_host:
+ description:
+ - The host of the master instance [slave command]
+ type: str
+ master_port:
+ description:
+ - The port of the master instance [slave command]
+ type: int
+ slave_mode:
+ description:
+ - the mode of the redis instance [slave command]
+ default: slave
+ choices: [ master, slave ]
+ type: str
+ db:
+ description:
+ - The database to flush (used in db mode) [flush command]
+ type: int
+ flush_mode:
+ description:
+ - Type of flush (all the dbs in a redis instance or a specific one)
+ [flush command]
+ default: all
+ choices: [ all, db ]
+ type: str
+ name:
+ description:
+ - A redis config key.
+ type: str
+ value:
+ description:
+ - A redis config value. When memory size is needed, it is possible
+ to specify it in the usal form of 1KB, 2M, 400MB where the base is 1024.
+ Units are case insensitive i.e. 1m = 1mb = 1M = 1MB.
+ type: str
+
+notes:
+ - Requires the redis-py Python package on the remote host. You can
+ install it with pip (pip install redis) or with a package manager.
+ https://github.com/andymccurdy/redis-py
+ - If the redis master instance we are making slave of is password protected
+ this needs to be in the redis.conf in the masterauth variable
+
+seealso:
+ - module: community.general.redis_info
+requirements: [ redis ]
+author: "Xabier Larrakoetxea (@slok)"
+'''
+
+EXAMPLES = '''
+- name: Set local redis instance to be slave of melee.island on port 6377
+ community.general.redis:
+ command: slave
+ master_host: melee.island
+ master_port: 6377
+
+- name: Deactivate slave mode
+ community.general.redis:
+ command: slave
+ slave_mode: master
+
+- name: Flush all the redis db
+ community.general.redis:
+ command: flush
+ flush_mode: all
+
+- name: Flush only one db in a redis instance
+ community.general.redis:
+ command: flush
+ db: 1
+ flush_mode: db
+
+- name: Configure local redis to have 10000 max clients
+ community.general.redis:
+ command: config
+ name: maxclients
+ value: 10000
+
+- name: Configure local redis maxmemory to 4GB
+ community.general.redis:
+ command: config
+ name: maxmemory
+ value: 4GB
+
+- name: Configure local redis to have lua time limit of 100 ms
+ community.general.redis:
+ command: config
+ name: lua-time-limit
+ value: 100
+'''
+
+import traceback
+
+REDIS_IMP_ERR = None
+try:
+ import redis
+except ImportError:
+ REDIS_IMP_ERR = traceback.format_exc()
+ redis_found = False
+else:
+ redis_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.formatters import human_to_bytes
+from ansible.module_utils._text import to_native
+import re
+
+
+# Redis module specific support methods.
+def set_slave_mode(client, master_host, master_port):
+ try:
+ return client.slaveof(master_host, master_port)
+ except Exception:
+ return False
+
+
+def set_master_mode(client):
+ try:
+ return client.slaveof()
+ except Exception:
+ return False
+
+
+def flush(client, db=None):
+ try:
+ if not isinstance(db, int):
+ return client.flushall()
+ else:
+ # The passed client has been connected to the database already
+ return client.flushdb()
+ except Exception:
+ return False
+
+
+# Module execution.
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ command=dict(type='str', choices=['config', 'flush', 'slave']),
+ login_password=dict(type='str', no_log=True),
+ login_host=dict(type='str', default='localhost'),
+ login_port=dict(type='int', default=6379),
+ master_host=dict(type='str'),
+ master_port=dict(type='int'),
+ slave_mode=dict(type='str', default='slave', choices=['master', 'slave']),
+ db=dict(type='int'),
+ flush_mode=dict(type='str', default='all', choices=['all', 'db']),
+ name=dict(type='str'),
+ value=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+ if not redis_found:
+ module.fail_json(msg=missing_required_lib('redis'), exception=REDIS_IMP_ERR)
+
+ login_password = module.params['login_password']
+ login_host = module.params['login_host']
+ login_port = module.params['login_port']
+ command = module.params['command']
+
+ # Slave Command section -----------
+ if command == "slave":
+ master_host = module.params['master_host']
+ master_port = module.params['master_port']
+ mode = module.params['slave_mode']
+
+ # Check if we have all the data
+ if mode == "slave": # Only need data if we want to be slave
+ if not master_host:
+ module.fail_json(msg='In slave mode master host must be provided')
+
+ if not master_port:
+ module.fail_json(msg='In slave mode master port must be provided')
+
+ # Connect and check
+ r = redis.StrictRedis(host=login_host, port=login_port, password=login_password)
+ try:
+ r.ping()
+ except Exception as e:
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
+
+ # Check if we are already in the mode that we want
+ info = r.info()
+ if mode == "master" and info["role"] == "master":
+ module.exit_json(changed=False, mode=mode)
+
+ elif mode == "slave" and info["role"] == "slave" and info["master_host"] == master_host and info["master_port"] == master_port:
+ status = dict(
+ status=mode,
+ master_host=master_host,
+ master_port=master_port,
+ )
+ module.exit_json(changed=False, mode=status)
+ else:
+ # Do the stuff
+ # (Check Check_mode before commands so the commands aren't evaluated
+ # if not necessary)
+ if mode == "slave":
+ if module.check_mode or\
+ set_slave_mode(r, master_host, master_port):
+ info = r.info()
+ status = {
+ 'status': mode,
+ 'master_host': master_host,
+ 'master_port': master_port,
+ }
+ module.exit_json(changed=True, mode=status)
+ else:
+ module.fail_json(msg='Unable to set slave mode')
+
+ else:
+ if module.check_mode or set_master_mode(r):
+ module.exit_json(changed=True, mode=mode)
+ else:
+ module.fail_json(msg='Unable to set master mode')
+
+ # flush Command section -----------
+ elif command == "flush":
+ db = module.params['db']
+ mode = module.params['flush_mode']
+
+ # Check if we have all the data
+ if mode == "db":
+ if db is None:
+ module.fail_json(msg="In db mode the db number must be provided")
+
+ # Connect and check
+ r = redis.StrictRedis(host=login_host, port=login_port, password=login_password, db=db)
+ try:
+ r.ping()
+ except Exception as e:
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
+
+ # Do the stuff
+ # (Check Check_mode before commands so the commands aren't evaluated
+ # if not necessary)
+ if mode == "all":
+ if module.check_mode or flush(r):
+ module.exit_json(changed=True, flushed=True)
+ else: # Flush never fails :)
+ module.fail_json(msg="Unable to flush all databases")
+
+ else:
+ if module.check_mode or flush(r, db):
+ module.exit_json(changed=True, flushed=True, db=db)
+ else: # Flush never fails :)
+ module.fail_json(msg="Unable to flush '%d' database" % db)
+ elif command == 'config':
+ name = module.params['name']
+
+ try: # try to parse the value as if it were the memory size
+ if re.match(r'^\s*(\d*\.?\d*)\s*([A-Za-z]+)?\s*$', module.params['value'].upper()):
+ value = str(human_to_bytes(module.params['value'].upper()))
+ else:
+ value = module.params['value']
+ except ValueError:
+ value = module.params['value']
+
+ r = redis.StrictRedis(host=login_host, port=login_port, password=login_password)
+
+ try:
+ r.ping()
+ except Exception as e:
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
+
+ try:
+ old_value = r.config_get(name)[name]
+ except Exception as e:
+ module.fail_json(msg="unable to read config: %s" % to_native(e), exception=traceback.format_exc())
+ changed = old_value != value
+
+ if module.check_mode or not changed:
+ module.exit_json(changed=changed, name=name, value=value)
+ else:
+ try:
+ r.config_set(name, value)
+ except Exception as e:
+ module.fail_json(msg="unable to write config: %s" % to_native(e), exception=traceback.format_exc())
+ module.exit_json(changed=changed, name=name, value=value)
+ else:
+ module.fail_json(msg='A valid command must be provided')
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/redis_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/redis_info.py
new file mode 100644
index 00000000..b615addb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/redis_info.py
@@ -0,0 +1,236 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Pavlo Bashynskyi (@levonet) <levonet@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: redis_info
+short_description: Gather information about Redis servers
+version_added: '0.2.0'
+description:
+- Gathers information and statistics about Redis servers.
+options:
+ login_host:
+ description:
+ - The host running the database.
+ type: str
+ default: localhost
+ login_port:
+ description:
+ - The port to connect to.
+ type: int
+ default: 6379
+ login_password:
+ description:
+ - The password used to authenticate with, when authentication is enabled for the Redis server.
+ type: str
+notes:
+- Requires the redis-py Python package on the remote host. You can
+ install it with pip (C(pip install redis)) or with a package manager.
+ U(https://github.com/andymccurdy/redis-py)
+seealso:
+- module: community.general.redis
+requirements: [ redis ]
+author: "Pavlo Bashynskyi (@levonet)"
+'''
+
+EXAMPLES = r'''
+- name: Get server information
+ community.general.redis_info:
+ register: result
+
+- name: Print server information
+ ansible.builtin.debug:
+ var: result.info
+'''
+
+RETURN = r'''
+info:
+ description: The default set of server information sections U(https://redis.io/commands/info).
+ returned: success
+ type: dict
+ sample: {
+ "active_defrag_hits": 0,
+ "active_defrag_key_hits": 0,
+ "active_defrag_key_misses": 0,
+ "active_defrag_misses": 0,
+ "active_defrag_running": 0,
+ "allocator_active": 932409344,
+ "allocator_allocated": 932062792,
+ "allocator_frag_bytes": 346552,
+ "allocator_frag_ratio": 1.0,
+ "allocator_resident": 947253248,
+ "allocator_rss_bytes": 14843904,
+ "allocator_rss_ratio": 1.02,
+ "aof_current_rewrite_time_sec": -1,
+ "aof_enabled": 0,
+ "aof_last_bgrewrite_status": "ok",
+ "aof_last_cow_size": 0,
+ "aof_last_rewrite_time_sec": -1,
+ "aof_last_write_status": "ok",
+ "aof_rewrite_in_progress": 0,
+ "aof_rewrite_scheduled": 0,
+ "arch_bits": 64,
+ "atomicvar_api": "atomic-builtin",
+ "blocked_clients": 0,
+ "client_recent_max_input_buffer": 4,
+ "client_recent_max_output_buffer": 0,
+ "cluster_enabled": 0,
+ "config_file": "",
+ "configured_hz": 10,
+ "connected_clients": 4,
+ "connected_slaves": 0,
+ "db0": {
+ "avg_ttl": 1945628530,
+ "expires": 16,
+ "keys": 3341411
+ },
+ "evicted_keys": 0,
+ "executable": "/data/redis-server",
+ "expired_keys": 9,
+ "expired_stale_perc": 1.72,
+ "expired_time_cap_reached_count": 0,
+ "gcc_version": "9.2.0",
+ "hz": 10,
+ "instantaneous_input_kbps": 0.0,
+ "instantaneous_ops_per_sec": 0,
+ "instantaneous_output_kbps": 0.0,
+ "keyspace_hits": 0,
+ "keyspace_misses": 0,
+ "latest_fork_usec": 0,
+ "lazyfree_pending_objects": 0,
+ "loading": 0,
+ "lru_clock": 11603632,
+ "master_repl_offset": 118831417,
+ "master_replid": "0d904704e424e38c3cd896783e9f9d28d4836e5e",
+ "master_replid2": "0000000000000000000000000000000000000000",
+ "maxmemory": 0,
+ "maxmemory_human": "0B",
+ "maxmemory_policy": "noeviction",
+ "mem_allocator": "jemalloc-5.1.0",
+ "mem_aof_buffer": 0,
+ "mem_clients_normal": 49694,
+ "mem_clients_slaves": 0,
+ "mem_fragmentation_bytes": 12355480,
+ "mem_fragmentation_ratio": 1.01,
+ "mem_not_counted_for_evict": 0,
+ "mem_replication_backlog": 1048576,
+ "migrate_cached_sockets": 0,
+ "multiplexing_api": "epoll",
+ "number_of_cached_scripts": 0,
+ "os": "Linux 3.10.0-862.14.4.el7.x86_64 x86_64",
+ "process_id": 1,
+ "pubsub_channels": 0,
+ "pubsub_patterns": 0,
+ "rdb_bgsave_in_progress": 0,
+ "rdb_changes_since_last_save": 671,
+ "rdb_current_bgsave_time_sec": -1,
+ "rdb_last_bgsave_status": "ok",
+ "rdb_last_bgsave_time_sec": -1,
+ "rdb_last_cow_size": 0,
+ "rdb_last_save_time": 1588702236,
+ "redis_build_id": "a31260535f820267",
+ "redis_git_dirty": 0,
+ "redis_git_sha1": 0,
+ "redis_mode": "standalone",
+ "redis_version": "999.999.999",
+ "rejected_connections": 0,
+ "repl_backlog_active": 1,
+ "repl_backlog_first_byte_offset": 118707937,
+ "repl_backlog_histlen": 123481,
+ "repl_backlog_size": 1048576,
+ "role": "master",
+ "rss_overhead_bytes": -3051520,
+ "rss_overhead_ratio": 1.0,
+ "run_id": "8d252f66c3ef89bd60a060cf8dc5cfe3d511c5e4",
+ "second_repl_offset": 118830003,
+ "slave_expires_tracked_keys": 0,
+ "sync_full": 0,
+ "sync_partial_err": 0,
+ "sync_partial_ok": 0,
+ "tcp_port": 6379,
+ "total_commands_processed": 885,
+ "total_connections_received": 10,
+ "total_net_input_bytes": 802709255,
+ "total_net_output_bytes": 31754,
+ "total_system_memory": 135029538816,
+ "total_system_memory_human": "125.76G",
+ "uptime_in_days": 53,
+ "uptime_in_seconds": 4631778,
+ "used_cpu_sys": 4.668282,
+ "used_cpu_sys_children": 0.002191,
+ "used_cpu_user": 4.21088,
+ "used_cpu_user_children": 0.0,
+ "used_memory": 931908760,
+ "used_memory_dataset": 910774306,
+ "used_memory_dataset_perc": "97.82%",
+ "used_memory_human": "888.74M",
+ "used_memory_lua": 37888,
+ "used_memory_lua_human": "37.00K",
+ "used_memory_overhead": 21134454,
+ "used_memory_peak": 932015216,
+ "used_memory_peak_human": "888.84M",
+ "used_memory_peak_perc": "99.99%",
+ "used_memory_rss": 944201728,
+ "used_memory_rss_human": "900.46M",
+ "used_memory_scripts": 0,
+ "used_memory_scripts_human": "0B",
+ "used_memory_startup": 791264
+ }
+'''
+
+import traceback
+
+REDIS_IMP_ERR = None
+try:
+ from redis import StrictRedis
+ HAS_REDIS_PACKAGE = True
+except ImportError:
+ REDIS_IMP_ERR = traceback.format_exc()
+ HAS_REDIS_PACKAGE = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def redis_client(**client_params):
+ return StrictRedis(**client_params)
+
+
+# Module execution.
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ login_host=dict(type='str', default='localhost'),
+ login_port=dict(type='int', default=6379),
+ login_password=dict(type='str', no_log=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ if not HAS_REDIS_PACKAGE:
+ module.fail_json(msg=missing_required_lib('redis'), exception=REDIS_IMP_ERR)
+
+ login_host = module.params['login_host']
+ login_port = module.params['login_port']
+ login_password = module.params['login_password']
+
+ # Connect and check
+ client = redis_client(host=login_host, port=login_port, password=login_password)
+ try:
+ client.ping()
+ except Exception as e:
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
+
+ info = client.info()
+ module.exit_json(changed=False, info=info)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/riak.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/riak.py
new file mode 100644
index 00000000..848a5e3f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/misc/riak.py
@@ -0,0 +1,221 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, James Martin <jmartin@basho.com>, Drew Kerrigan <dkerrigan@basho.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: riak
+short_description: This module handles some common Riak operations
+description:
+ - This module can be used to join nodes to a cluster, check
+ the status of the cluster.
+author:
+ - "James Martin (@jsmartin)"
+ - "Drew Kerrigan (@drewkerrigan)"
+options:
+ command:
+ description:
+ - The command you would like to perform against the cluster.
+ choices: ['ping', 'kv_test', 'join', 'plan', 'commit']
+ config_dir:
+ description:
+ - The path to the riak configuration directory
+ default: /etc/riak
+ http_conn:
+ description:
+ - The ip address and port that is listening for Riak HTTP queries
+ default: 127.0.0.1:8098
+ target_node:
+ description:
+ - The target node for certain operations (join, ping)
+ default: riak@127.0.0.1
+ wait_for_handoffs:
+ description:
+ - Number of seconds to wait for handoffs to complete.
+ wait_for_ring:
+ description:
+ - Number of seconds to wait for all nodes to agree on the ring.
+ wait_for_service:
+ description:
+ - Waits for a riak service to come online before continuing.
+ choices: ['kv']
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+'''
+
+EXAMPLES = '''
+- name: "Join's a Riak node to another node"
+ community.general.riak:
+ command: join
+ target_node: riak@10.1.1.1
+
+- name: Wait for handoffs to finish. Use with async and poll.
+ community.general.riak:
+ wait_for_handoffs: yes
+
+- name: Wait for riak_kv service to startup
+ community.general.riak:
+ wait_for_service: kv
+'''
+
+import json
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def ring_check(module, riak_admin_bin):
+ cmd = '%s ringready' % riak_admin_bin
+ rc, out, err = module.run_command(cmd)
+ if rc == 0 and 'TRUE All nodes agree on the ring' in out:
+ return True
+ else:
+ return False
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ command=dict(required=False, default=None, choices=[
+ 'ping', 'kv_test', 'join', 'plan', 'commit']),
+ config_dir=dict(default='/etc/riak', type='path'),
+ http_conn=dict(required=False, default='127.0.0.1:8098'),
+ target_node=dict(default='riak@127.0.0.1', required=False),
+ wait_for_handoffs=dict(default=False, type='int'),
+ wait_for_ring=dict(default=False, type='int'),
+ wait_for_service=dict(
+ required=False, default=None, choices=['kv']),
+ validate_certs=dict(default=True, type='bool'))
+ )
+
+ command = module.params.get('command')
+ http_conn = module.params.get('http_conn')
+ target_node = module.params.get('target_node')
+ wait_for_handoffs = module.params.get('wait_for_handoffs')
+ wait_for_ring = module.params.get('wait_for_ring')
+ wait_for_service = module.params.get('wait_for_service')
+
+ # make sure riak commands are on the path
+ riak_bin = module.get_bin_path('riak')
+ riak_admin_bin = module.get_bin_path('riak-admin')
+
+ timeout = time.time() + 120
+ while True:
+ if time.time() > timeout:
+ module.fail_json(msg='Timeout, could not fetch Riak stats.')
+ (response, info) = fetch_url(module, 'http://%s/stats' % (http_conn), force=True, timeout=5)
+ if info['status'] == 200:
+ stats_raw = response.read()
+ break
+ time.sleep(5)
+
+ # here we attempt to load those stats,
+ try:
+ stats = json.loads(stats_raw)
+ except Exception:
+ module.fail_json(msg='Could not parse Riak stats.')
+
+ node_name = stats['nodename']
+ nodes = stats['ring_members']
+ ring_size = stats['ring_creation_size']
+ rc, out, err = module.run_command([riak_bin, 'version'])
+ version = out.strip()
+
+ result = dict(node_name=node_name,
+ nodes=nodes,
+ ring_size=ring_size,
+ version=version)
+
+ if command == 'ping':
+ cmd = '%s ping %s' % (riak_bin, target_node)
+ rc, out, err = module.run_command(cmd)
+ if rc == 0:
+ result['ping'] = out
+ else:
+ module.fail_json(msg=out)
+
+ elif command == 'kv_test':
+ cmd = '%s test' % riak_admin_bin
+ rc, out, err = module.run_command(cmd)
+ if rc == 0:
+ result['kv_test'] = out
+ else:
+ module.fail_json(msg=out)
+
+ elif command == 'join':
+ if nodes.count(node_name) == 1 and len(nodes) > 1:
+ result['join'] = 'Node is already in cluster or staged to be in cluster.'
+ else:
+ cmd = '%s cluster join %s' % (riak_admin_bin, target_node)
+ rc, out, err = module.run_command(cmd)
+ if rc == 0:
+ result['join'] = out
+ result['changed'] = True
+ else:
+ module.fail_json(msg=out)
+
+ elif command == 'plan':
+ cmd = '%s cluster plan' % riak_admin_bin
+ rc, out, err = module.run_command(cmd)
+ if rc == 0:
+ result['plan'] = out
+ if 'Staged Changes' in out:
+ result['changed'] = True
+ else:
+ module.fail_json(msg=out)
+
+ elif command == 'commit':
+ cmd = '%s cluster commit' % riak_admin_bin
+ rc, out, err = module.run_command(cmd)
+ if rc == 0:
+ result['commit'] = out
+ result['changed'] = True
+ else:
+ module.fail_json(msg=out)
+
+# this could take a while, recommend to run in async mode
+ if wait_for_handoffs:
+ timeout = time.time() + wait_for_handoffs
+ while True:
+ cmd = '%s transfers' % riak_admin_bin
+ rc, out, err = module.run_command(cmd)
+ if 'No transfers active' in out:
+ result['handoffs'] = 'No transfers active.'
+ break
+ time.sleep(10)
+ if time.time() > timeout:
+ module.fail_json(msg='Timeout waiting for handoffs.')
+
+ if wait_for_service:
+ cmd = [riak_admin_bin, 'wait_for_service', 'riak_%s' % wait_for_service, node_name]
+ rc, out, err = module.run_command(cmd)
+ result['service'] = out
+
+ if wait_for_ring:
+ timeout = time.time() + wait_for_ring
+ while True:
+ if ring_check(module, riak_admin_bin):
+ break
+ time.sleep(10)
+ if time.time() > timeout:
+ module.fail_json(msg='Timeout waiting for nodes to agree on ring.')
+
+ result['ring_ready'] = ring_check(module, riak_admin_bin)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/mssql/mssql_db.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/mssql/mssql_db.py
new file mode 100644
index 00000000..e6c5f183
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/mssql/mssql_db.py
@@ -0,0 +1,233 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Vedit Firat Arig <firatarig@gmail.com>
+# Outline and parts are reused from Mark Theunissen's mysql_db module
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: mssql_db
+short_description: Add or remove MSSQL databases from a remote host.
+description:
+ - Add or remove MSSQL databases from a remote host.
+options:
+ name:
+ description:
+ - name of the database to add or remove
+ required: true
+ aliases: [ db ]
+ type: str
+ login_user:
+ description:
+ - The username used to authenticate with
+ type: str
+ login_password:
+ description:
+ - The password used to authenticate with
+ type: str
+ login_host:
+ description:
+ - Host running the database
+ type: str
+ required: true
+ login_port:
+ description:
+ - Port of the MSSQL server. Requires login_host be defined as other than localhost if login_port is used
+ default: '1433'
+ type: str
+ state:
+ description:
+ - The database state
+ default: present
+ choices: [ "present", "absent", "import" ]
+ type: str
+ target:
+ description:
+ - Location, on the remote host, of the dump file to read from or write to. Uncompressed SQL
+ files (C(.sql)) files are supported.
+ type: str
+ autocommit:
+ description:
+ - Automatically commit the change only if the import succeed. Sometimes it is necessary to use autocommit=true, since some content can't be changed
+ within a transaction.
+ type: bool
+ default: 'no'
+notes:
+ - Requires the pymssql Python package on the remote host. For Ubuntu, this
+ is as easy as pip install pymssql (See M(ansible.builtin.pip).)
+requirements:
+ - python >= 2.7
+ - pymssql
+author: Vedit Firat Arig (@vedit)
+'''
+
+EXAMPLES = '''
+- name: Create a new database with name 'jackdata'
+ community.general.mssql_db:
+ name: jackdata
+ state: present
+
+# Copy database dump file to remote host and restore it to database 'my_db'
+- name: Copy database dump file to remote host
+ ansible.builtin.copy:
+ src: dump.sql
+ dest: /tmp
+
+- name: Restore the dump file to database 'my_db'
+ community.general.mssql_db:
+ name: my_db
+ state: import
+ target: /tmp/dump.sql
+'''
+
+RETURN = '''
+#
+'''
+
+import os
+import traceback
+
+PYMSSQL_IMP_ERR = None
+try:
+ import pymssql
+except ImportError:
+ PYMSSQL_IMP_ERR = traceback.format_exc()
+ mssql_found = False
+else:
+ mssql_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+def db_exists(conn, cursor, db):
+ cursor.execute("SELECT name FROM master.sys.databases WHERE name = %s", db)
+ conn.commit()
+ return bool(cursor.rowcount)
+
+
+def db_create(conn, cursor, db):
+ cursor.execute("CREATE DATABASE [%s]" % db)
+ return db_exists(conn, cursor, db)
+
+
+def db_delete(conn, cursor, db):
+ try:
+ cursor.execute("ALTER DATABASE [%s] SET single_user WITH ROLLBACK IMMEDIATE" % db)
+ except Exception:
+ pass
+ cursor.execute("DROP DATABASE [%s]" % db)
+ return not db_exists(conn, cursor, db)
+
+
+def db_import(conn, cursor, module, db, target):
+ if os.path.isfile(target):
+ with open(target, 'r') as backup:
+ sqlQuery = "USE [%s]\n" % db
+ for line in backup:
+ if line is None:
+ break
+ elif line.startswith('GO'):
+ cursor.execute(sqlQuery)
+ sqlQuery = "USE [%s]\n" % db
+ else:
+ sqlQuery += line
+ cursor.execute(sqlQuery)
+ conn.commit()
+ return 0, "import successful", ""
+ else:
+ return 1, "cannot find target file", "cannot find target file"
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, aliases=['db']),
+ login_user=dict(default=''),
+ login_password=dict(default='', no_log=True),
+ login_host=dict(required=True),
+ login_port=dict(default='1433'),
+ target=dict(default=None),
+ autocommit=dict(type='bool', default=False),
+ state=dict(
+ default='present', choices=['present', 'absent', 'import'])
+ )
+ )
+
+ if not mssql_found:
+ module.fail_json(msg=missing_required_lib('pymssql'), exception=PYMSSQL_IMP_ERR)
+
+ db = module.params['name']
+ state = module.params['state']
+ autocommit = module.params['autocommit']
+ target = module.params["target"]
+
+ login_user = module.params['login_user']
+ login_password = module.params['login_password']
+ login_host = module.params['login_host']
+ login_port = module.params['login_port']
+
+ login_querystring = login_host
+ if login_port != "1433":
+ login_querystring = "%s:%s" % (login_host, login_port)
+
+ if login_user != "" and login_password == "":
+ module.fail_json(msg="when supplying login_user arguments login_password must be provided")
+
+ try:
+ conn = pymssql.connect(user=login_user, password=login_password, host=login_querystring, database='master')
+ cursor = conn.cursor()
+ except Exception as e:
+ if "Unknown database" in str(e):
+ errno, errstr = e.args
+ module.fail_json(msg="ERROR: %s %s" % (errno, errstr))
+ else:
+ module.fail_json(msg="unable to connect, check login_user and login_password are correct, or alternatively check your "
+ "@sysconfdir@/freetds.conf / ${HOME}/.freetds.conf")
+
+ conn.autocommit(True)
+ changed = False
+
+ if db_exists(conn, cursor, db):
+ if state == "absent":
+ try:
+ changed = db_delete(conn, cursor, db)
+ except Exception as e:
+ module.fail_json(msg="error deleting database: " + str(e))
+ elif state == "import":
+ conn.autocommit(autocommit)
+ rc, stdout, stderr = db_import(conn, cursor, module, db, target)
+
+ if rc != 0:
+ module.fail_json(msg="%s" % stderr)
+ else:
+ module.exit_json(changed=True, db=db, msg=stdout)
+ else:
+ if state == "present":
+ try:
+ changed = db_create(conn, cursor, db)
+ except Exception as e:
+ module.fail_json(msg="error creating database: " + str(e))
+ elif state == "import":
+ try:
+ changed = db_create(conn, cursor, db)
+ except Exception as e:
+ module.fail_json(msg="error creating database: " + str(e))
+
+ conn.autocommit(autocommit)
+ rc, stdout, stderr = db_import(conn, cursor, module, db, target)
+
+ if rc != 0:
+ module.fail_json(msg="%s" % stderr)
+ else:
+ module.exit_json(changed=True, db=db, msg=stdout)
+
+ module.exit_json(changed=changed, db=db)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_copy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_copy.py
new file mode 100644
index 00000000..bf66f3d4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_copy.py
@@ -0,0 +1,420 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_copy
+short_description: Copy data between a file/program and a PostgreSQL table
+description:
+- Copy data between a file/program and a PostgreSQL table.
+
+options:
+ copy_to:
+ description:
+ - Copy the contents of a table to a file.
+ - Can also copy the results of a SELECT query.
+ - Mutually exclusive with I(copy_from) and I(dst).
+ type: path
+ aliases: [ to ]
+ copy_from:
+ description:
+ - Copy data from a file to a table (appending the data to whatever is in the table already).
+ - Mutually exclusive with I(copy_to) and I(src).
+ type: path
+ aliases: [ from ]
+ src:
+ description:
+ - Copy data from I(copy_from) to I(src=tablename).
+ - Used with I(copy_to) only.
+ type: str
+ aliases: [ source ]
+ dst:
+ description:
+ - Copy data to I(dst=tablename) from I(copy_from=/path/to/data.file).
+ - Used with I(copy_from) only.
+ type: str
+ aliases: [ destination ]
+ columns:
+ description:
+ - List of column names for the src/dst table to COPY FROM/TO.
+ type: list
+ elements: str
+ aliases: [ column ]
+ program:
+ description:
+ - Mark I(src)/I(dst) as a program. Data will be copied to/from a program.
+ - See block Examples and PROGRAM arg description U(https://www.postgresql.org/docs/current/sql-copy.html).
+ type: bool
+ default: no
+ options:
+ description:
+ - Options of COPY command.
+ - See the full list of available options U(https://www.postgresql.org/docs/current/sql-copy.html).
+ type: dict
+ db:
+ description:
+ - Name of database to connect to.
+ type: str
+ aliases: [ login_db ]
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+notes:
+- Supports PostgreSQL version 9.4+.
+- COPY command is only allowed to database superusers.
+- if I(check_mode=yes), we just check the src/dst table availability
+ and return the COPY query that actually has not been executed.
+- If i(check_mode=yes) and the source has been passed as SQL, the module
+ will execute it and rolled the transaction back but pay attention
+ it can affect database performance (e.g., if SQL collects a lot of data).
+
+seealso:
+- name: COPY command reference
+ description: Complete reference of the COPY command documentation.
+ link: https://www.postgresql.org/docs/current/sql-copy.html
+
+author:
+- Andrew Klychkov (@Andersson007)
+
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Copy text TAB-separated data from file /tmp/data.txt to acme table
+ community.general.postgresql_copy:
+ copy_from: /tmp/data.txt
+ dst: acme
+
+- name: Copy CSV (comma-separated) data from file /tmp/data.csv to columns id, name of table acme
+ community.general.postgresql_copy:
+ copy_from: /tmp/data.csv
+ dst: acme
+ columns: id,name
+ options:
+ format: csv
+
+- name: >
+ Copy text vertical-bar-separated data from file /tmp/data.txt to bar table.
+ The NULL values are specified as N
+ community.general.postgresql_copy:
+ copy_from: /tmp/data.csv
+ dst: bar
+ options:
+ delimiter: '|'
+ null: 'N'
+
+- name: Copy data from acme table to file /tmp/data.txt in text format, TAB-separated
+ community.general.postgresql_copy:
+ src: acme
+ copy_to: /tmp/data.txt
+
+- name: Copy data from SELECT query to/tmp/data.csv in CSV format
+ community.general.postgresql_copy:
+ src: 'SELECT * FROM acme'
+ copy_to: /tmp/data.csv
+ options:
+ format: csv
+
+- name: Copy CSV data from my_table to gzip
+ community.general.postgresql_copy:
+ src: my_table
+ copy_to: 'gzip > /tmp/data.csv.gz'
+ program: yes
+ options:
+ format: csv
+
+- name: >
+ Copy data from columns id, name of table bar to /tmp/data.txt.
+ Output format is text, vertical-bar-separated, NULL as N
+ community.general.postgresql_copy:
+ src: bar
+ columns:
+ - id
+ - name
+ copy_to: /tmp/data.csv
+ options:
+ delimiter: '|'
+ null: 'N'
+'''
+
+RETURN = r'''
+queries:
+ description: List of executed queries.
+ returned: always
+ type: str
+ sample: [ "COPY test_table FROM '/tmp/data_file.txt' (FORMAT csv, DELIMITER ',', NULL 'NULL')" ]
+src:
+ description: Data source.
+ returned: always
+ type: str
+ sample: "mytable"
+dst:
+ description: Data destination.
+ returned: always
+ type: str
+ sample: "/tmp/data.csv"
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+ pg_quote_identifier,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils.six import iteritems
+
+
+class PgCopyData(object):
+
+ """Implements behavior of COPY FROM, COPY TO PostgreSQL command.
+
+ Arguments:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+
+ Attributes:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+ changed (bool) -- something was changed after execution or not
+ executed_queries (list) -- executed queries
+ dst (str) -- data destination table (when copy_from)
+ src (str) -- data source table (when copy_to)
+ opt_need_quotes (tuple) -- values of these options must be passed
+ to SQL in quotes
+ """
+
+ def __init__(self, module, cursor):
+ self.module = module
+ self.cursor = cursor
+ self.executed_queries = []
+ self.changed = False
+ self.dst = ''
+ self.src = ''
+ self.opt_need_quotes = (
+ 'DELIMITER',
+ 'NULL',
+ 'QUOTE',
+ 'ESCAPE',
+ 'ENCODING',
+ )
+
+ def copy_from(self):
+ """Implements COPY FROM command behavior."""
+ self.src = self.module.params['copy_from']
+ self.dst = self.module.params['dst']
+
+ query_fragments = ['COPY %s' % pg_quote_identifier(self.dst, 'table')]
+
+ if self.module.params.get('columns'):
+ query_fragments.append('(%s)' % ','.join(self.module.params['columns']))
+
+ query_fragments.append('FROM')
+
+ if self.module.params.get('program'):
+ query_fragments.append('PROGRAM')
+
+ query_fragments.append("'%s'" % self.src)
+
+ if self.module.params.get('options'):
+ query_fragments.append(self.__transform_options())
+
+ # Note: check mode is implemented here:
+ if self.module.check_mode:
+ self.changed = self.__check_table(self.dst)
+
+ if self.changed:
+ self.executed_queries.append(' '.join(query_fragments))
+ else:
+ if exec_sql(self, ' '.join(query_fragments), return_bool=True):
+ self.changed = True
+
+ def copy_to(self):
+ """Implements COPY TO command behavior."""
+ self.src = self.module.params['src']
+ self.dst = self.module.params['copy_to']
+
+ if 'SELECT ' in self.src.upper():
+ # If src is SQL SELECT statement:
+ query_fragments = ['COPY (%s)' % self.src]
+ else:
+ # If src is a table:
+ query_fragments = ['COPY %s' % pg_quote_identifier(self.src, 'table')]
+
+ if self.module.params.get('columns'):
+ query_fragments.append('(%s)' % ','.join(self.module.params['columns']))
+
+ query_fragments.append('TO')
+
+ if self.module.params.get('program'):
+ query_fragments.append('PROGRAM')
+
+ query_fragments.append("'%s'" % self.dst)
+
+ if self.module.params.get('options'):
+ query_fragments.append(self.__transform_options())
+
+ # Note: check mode is implemented here:
+ if self.module.check_mode:
+ self.changed = self.__check_table(self.src)
+
+ if self.changed:
+ self.executed_queries.append(' '.join(query_fragments))
+ else:
+ if exec_sql(self, ' '.join(query_fragments), return_bool=True):
+ self.changed = True
+
+ def __transform_options(self):
+ """Transform options dict into a suitable string."""
+ for (key, val) in iteritems(self.module.params['options']):
+ if key.upper() in self.opt_need_quotes:
+ self.module.params['options'][key] = "'%s'" % val
+
+ opt = ['%s %s' % (key, val) for (key, val) in iteritems(self.module.params['options'])]
+ return '(%s)' % ', '.join(opt)
+
+ def __check_table(self, table):
+ """Check table or SQL in transaction mode for check_mode.
+
+ Return True if it is OK.
+
+ Arguments:
+ table (str) - Table name that needs to be checked.
+ It can be SQL SELECT statement that was passed
+ instead of the table name.
+ """
+ if 'SELECT ' in table.upper():
+ # In this case table is actually SQL SELECT statement.
+ # If SQL fails, it's handled by exec_sql():
+ exec_sql(self, table, add_to_executed=False)
+ # If exec_sql was passed, it means all is OK:
+ return True
+
+ exec_sql(self, 'SELECT 1 FROM %s' % pg_quote_identifier(table, 'table'),
+ add_to_executed=False)
+ # If SQL was executed successfully:
+ return True
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ copy_to=dict(type='path', aliases=['to']),
+ copy_from=dict(type='path', aliases=['from']),
+ src=dict(type='str', aliases=['source']),
+ dst=dict(type='str', aliases=['destination']),
+ columns=dict(type='list', elements='str', aliases=['column']),
+ options=dict(type='dict'),
+ program=dict(type='bool', default=False),
+ db=dict(type='str', aliases=['login_db']),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['copy_from', 'copy_to'],
+ ['copy_from', 'src'],
+ ['copy_to', 'dst'],
+ ]
+ )
+
+ if not module.params['trust_input']:
+ # Check input for potentially dangerous elements:
+ opt_list = None
+ if module.params['options']:
+ opt_list = ['%s %s' % (key, val) for (key, val) in iteritems(module.params['options'])]
+
+ check_input(module,
+ module.params['copy_to'],
+ module.params['copy_from'],
+ module.params['src'],
+ module.params['dst'],
+ opt_list,
+ module.params['columns'],
+ module.params['session_role'])
+
+ # Note: we don't need to check mutually exclusive params here, because they are
+ # checked automatically by AnsibleModule (mutually_exclusive=[] list above).
+ if module.params.get('copy_from') and not module.params.get('dst'):
+ module.fail_json(msg='dst param is necessary with copy_from')
+
+ elif module.params.get('copy_to') and not module.params.get('src'):
+ module.fail_json(msg='src param is necessary with copy_to')
+
+ # Connect to DB and make cursor object:
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=False)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ ##############
+ # Create the object and do main job:
+ data = PgCopyData(module, cursor)
+
+ # Note: parameters like dst, src, etc. are got
+ # from module object into data object of PgCopyData class.
+ # Therefore not need to pass args to the methods below.
+ # Note: check mode is implemented inside the methods below
+ # by checking passed module.check_mode arg.
+ if module.params.get('copy_to'):
+ data.copy_to()
+
+ elif module.params.get('copy_from'):
+ data.copy_from()
+
+ # Finish:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ cursor.close()
+ db_connection.close()
+
+ # Return some values:
+ module.exit_json(
+ changed=data.changed,
+ queries=data.executed_queries,
+ src=data.src,
+ dst=data.dst,
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_db.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_db.py
new file mode 100644
index 00000000..8fde39ec
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_db.py
@@ -0,0 +1,667 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_db
+short_description: Add or remove PostgreSQL databases from a remote host.
+description:
+ - Add or remove PostgreSQL databases from a remote host.
+options:
+ name:
+ description:
+ - Name of the database to add or remove
+ type: str
+ required: true
+ aliases: [ db ]
+ port:
+ description:
+ - Database port to connect (if needed)
+ type: int
+ default: 5432
+ aliases:
+ - login_port
+ owner:
+ description:
+ - Name of the role to set as owner of the database
+ type: str
+ template:
+ description:
+ - Template used to create the database
+ type: str
+ encoding:
+ description:
+ - Encoding of the database
+ type: str
+ lc_collate:
+ description:
+ - Collation order (LC_COLLATE) to use in the database. Must match collation order of template database unless C(template0) is used as template.
+ type: str
+ lc_ctype:
+ description:
+ - Character classification (LC_CTYPE) to use in the database (e.g. lower, upper, ...) Must match LC_CTYPE of template database unless C(template0)
+ is used as template.
+ type: str
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally.
+ type: str
+ state:
+ description:
+ - The database state.
+ - C(present) implies that the database should be created if necessary.
+ - C(absent) implies that the database should be removed if present.
+ - C(dump) requires a target definition to which the database will be backed up. (Added in Ansible 2.4)
+ Note that in some PostgreSQL versions of pg_dump, which is an embedded PostgreSQL utility and is used by the module,
+ returns rc 0 even when errors occurred (e.g. the connection is forbidden by pg_hba.conf, etc.),
+ so the module returns changed=True but the dump has not actually been done. Please, be sure that your version of
+ pg_dump returns rc 1 in this case.
+ - C(restore) also requires a target definition from which the database will be restored. (Added in Ansible 2.4)
+ - The format of the backup will be detected based on the target name.
+ - Supported compression formats for dump and restore include C(.pgc), C(.bz2), C(.gz) and C(.xz)
+ - Supported formats for dump and restore include C(.sql) and C(.tar)
+ - "Restore program is selected by target file format: C(.tar) and C(.pgc) are handled by pg_restore, other with pgsql."
+ type: str
+ choices: [ absent, dump, present, restore ]
+ default: present
+ target:
+ description:
+ - File to back up or restore from.
+ - Used when I(state) is C(dump) or C(restore).
+ type: path
+ target_opts:
+ description:
+ - Additional arguments for pg_dump or restore program (pg_restore or psql, depending on target's format).
+ - Used when I(state) is C(dump) or C(restore).
+ type: str
+ maintenance_db:
+ description:
+ - The value specifies the initial database (which is also called as maintenance DB) that Ansible connects to.
+ type: str
+ default: postgres
+ conn_limit:
+ description:
+ - Specifies the database connection limit.
+ type: str
+ tablespace:
+ description:
+ - The tablespace to set for the database
+ U(https://www.postgresql.org/docs/current/sql-alterdatabase.html).
+ - If you want to move the database back to the default tablespace,
+ explicitly set this to pg_default.
+ type: path
+ dump_extra_args:
+ description:
+ - Provides additional arguments when I(state) is C(dump).
+ - Cannot be used with dump-file-format-related arguments like ``--format=d``.
+ type: str
+ version_added: '0.2.0'
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(owner), I(conn_limit), I(encoding),
+ I(db), I(template), I(tablespace), I(session_role) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+seealso:
+- name: CREATE DATABASE reference
+ description: Complete reference of the CREATE DATABASE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createdatabase.html
+- name: DROP DATABASE reference
+ description: Complete reference of the DROP DATABASE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-dropdatabase.html
+- name: pg_dump reference
+ description: Complete reference of pg_dump documentation.
+ link: https://www.postgresql.org/docs/current/app-pgdump.html
+- name: pg_restore reference
+ description: Complete reference of pg_restore documentation.
+ link: https://www.postgresql.org/docs/current/app-pgrestore.html
+- module: community.general.postgresql_tablespace
+- module: community.general.postgresql_info
+- module: community.general.postgresql_ping
+notes:
+- State C(dump) and C(restore) don't require I(psycopg2) since version 2.8.
+author: "Ansible Core Team"
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Create a new database with name "acme"
+ community.general.postgresql_db:
+ name: acme
+
+# Note: If a template different from "template0" is specified, encoding and locale settings must match those of the template.
+- name: Create a new database with name "acme" and specific encoding and locale # settings.
+ community.general.postgresql_db:
+ name: acme
+ encoding: UTF-8
+ lc_collate: de_DE.UTF-8
+ lc_ctype: de_DE.UTF-8
+ template: template0
+
+# Note: Default limit for the number of concurrent connections to a specific database is "-1", which means "unlimited"
+- name: Create a new database with name "acme" which has a limit of 100 concurrent connections
+ community.general.postgresql_db:
+ name: acme
+ conn_limit: "100"
+
+- name: Dump an existing database to a file
+ community.general.postgresql_db:
+ name: acme
+ state: dump
+ target: /tmp/acme.sql
+
+- name: Dump an existing database to a file excluding the test table
+ community.general.postgresql_db:
+ name: acme
+ state: dump
+ target: /tmp/acme.sql
+ dump_extra_args: --exclude-table=test
+
+- name: Dump an existing database to a file (with compression)
+ community.general.postgresql_db:
+ name: acme
+ state: dump
+ target: /tmp/acme.sql.gz
+
+- name: Dump a single schema for an existing database
+ community.general.postgresql_db:
+ name: acme
+ state: dump
+ target: /tmp/acme.sql
+ target_opts: "-n public"
+
+- name: Dump only table1 and table2 from the acme database
+ community.general.postgresql_db:
+ name: acme
+ state: dump
+ target: /tmp/table1_table2.sql
+ target_opts: "-t table1 -t table2"
+
+# Note: In the example below, if database foo exists and has another tablespace
+# the tablespace will be changed to foo. Access to the database will be locked
+# until the copying of database files is finished.
+- name: Create a new database called foo in tablespace bar
+ community.general.postgresql_db:
+ name: foo
+ tablespace: bar
+'''
+
+RETURN = r'''
+executed_commands:
+ description: List of commands which tried to run.
+ returned: always
+ type: list
+ sample: ["CREATE DATABASE acme"]
+ version_added: '0.2.0'
+'''
+
+
+import os
+import subprocess
+import traceback
+
+try:
+ import psycopg2
+ import psycopg2.extras
+except ImportError:
+ HAS_PSYCOPG2 = False
+else:
+ HAS_PSYCOPG2 = True
+
+import ansible_collections.community.general.plugins.module_utils.postgres as pgutils
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+ SQLParseError,
+)
+from ansible.module_utils.six import iteritems
+from ansible.module_utils.six.moves import shlex_quote
+from ansible.module_utils._text import to_native
+
+executed_commands = []
+
+
+class NotSupportedError(Exception):
+ pass
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+
+def set_owner(cursor, db, owner):
+ query = 'ALTER DATABASE "%s" OWNER TO "%s"' % (db, owner)
+ executed_commands.append(query)
+ cursor.execute(query)
+ return True
+
+
+def set_conn_limit(cursor, db, conn_limit):
+ query = 'ALTER DATABASE "%s" CONNECTION LIMIT %s' % (db, conn_limit)
+ executed_commands.append(query)
+ cursor.execute(query)
+ return True
+
+
+def get_encoding_id(cursor, encoding):
+ query = "SELECT pg_char_to_encoding(%(encoding)s) AS encoding_id;"
+ cursor.execute(query, {'encoding': encoding})
+ return cursor.fetchone()['encoding_id']
+
+
+def get_db_info(cursor, db):
+ query = """
+ SELECT rolname AS owner,
+ pg_encoding_to_char(encoding) AS encoding, encoding AS encoding_id,
+ datcollate AS lc_collate, datctype AS lc_ctype, pg_database.datconnlimit AS conn_limit,
+ spcname AS tablespace
+ FROM pg_database
+ JOIN pg_roles ON pg_roles.oid = pg_database.datdba
+ JOIN pg_tablespace ON pg_tablespace.oid = pg_database.dattablespace
+ WHERE datname = %(db)s
+ """
+ cursor.execute(query, {'db': db})
+ return cursor.fetchone()
+
+
+def db_exists(cursor, db):
+ query = "SELECT * FROM pg_database WHERE datname=%(db)s"
+ cursor.execute(query, {'db': db})
+ return cursor.rowcount == 1
+
+
+def db_delete(cursor, db):
+ if db_exists(cursor, db):
+ query = 'DROP DATABASE "%s"' % db
+ executed_commands.append(query)
+ cursor.execute(query)
+ return True
+ else:
+ return False
+
+
+def db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace):
+ params = dict(enc=encoding, collate=lc_collate, ctype=lc_ctype, conn_limit=conn_limit, tablespace=tablespace)
+ if not db_exists(cursor, db):
+ query_fragments = ['CREATE DATABASE "%s"' % db]
+ if owner:
+ query_fragments.append('OWNER "%s"' % owner)
+ if template:
+ query_fragments.append('TEMPLATE "%s"' % template)
+ if encoding:
+ query_fragments.append('ENCODING %(enc)s')
+ if lc_collate:
+ query_fragments.append('LC_COLLATE %(collate)s')
+ if lc_ctype:
+ query_fragments.append('LC_CTYPE %(ctype)s')
+ if tablespace:
+ query_fragments.append('TABLESPACE "%s"' % tablespace)
+ if conn_limit:
+ query_fragments.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit})
+ query = ' '.join(query_fragments)
+ executed_commands.append(cursor.mogrify(query, params))
+ cursor.execute(query, params)
+ return True
+ else:
+ db_info = get_db_info(cursor, db)
+ if (encoding and get_encoding_id(cursor, encoding) != db_info['encoding_id']):
+ raise NotSupportedError(
+ 'Changing database encoding is not supported. '
+ 'Current encoding: %s' % db_info['encoding']
+ )
+ elif lc_collate and lc_collate != db_info['lc_collate']:
+ raise NotSupportedError(
+ 'Changing LC_COLLATE is not supported. '
+ 'Current LC_COLLATE: %s' % db_info['lc_collate']
+ )
+ elif lc_ctype and lc_ctype != db_info['lc_ctype']:
+ raise NotSupportedError(
+ 'Changing LC_CTYPE is not supported.'
+ 'Current LC_CTYPE: %s' % db_info['lc_ctype']
+ )
+ else:
+ changed = False
+
+ if owner and owner != db_info['owner']:
+ changed = set_owner(cursor, db, owner)
+
+ if conn_limit and conn_limit != str(db_info['conn_limit']):
+ changed = set_conn_limit(cursor, db, conn_limit)
+
+ if tablespace and tablespace != db_info['tablespace']:
+ changed = set_tablespace(cursor, db, tablespace)
+
+ return changed
+
+
+def db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace):
+ if not db_exists(cursor, db):
+ return False
+ else:
+ db_info = get_db_info(cursor, db)
+ if (encoding and get_encoding_id(cursor, encoding) != db_info['encoding_id']):
+ return False
+ elif lc_collate and lc_collate != db_info['lc_collate']:
+ return False
+ elif lc_ctype and lc_ctype != db_info['lc_ctype']:
+ return False
+ elif owner and owner != db_info['owner']:
+ return False
+ elif conn_limit and conn_limit != str(db_info['conn_limit']):
+ return False
+ elif tablespace and tablespace != db_info['tablespace']:
+ return False
+ else:
+ return True
+
+
+def db_dump(module, target, target_opts="",
+ db=None,
+ dump_extra_args=None,
+ user=None,
+ password=None,
+ host=None,
+ port=None,
+ **kw):
+
+ flags = login_flags(db, host, port, user, db_prefix=False)
+ cmd = module.get_bin_path('pg_dump', True)
+ comp_prog_path = None
+
+ if os.path.splitext(target)[-1] == '.tar':
+ flags.append(' --format=t')
+ elif os.path.splitext(target)[-1] == '.pgc':
+ flags.append(' --format=c')
+ if os.path.splitext(target)[-1] == '.gz':
+ if module.get_bin_path('pigz'):
+ comp_prog_path = module.get_bin_path('pigz', True)
+ else:
+ comp_prog_path = module.get_bin_path('gzip', True)
+ elif os.path.splitext(target)[-1] == '.bz2':
+ comp_prog_path = module.get_bin_path('bzip2', True)
+ elif os.path.splitext(target)[-1] == '.xz':
+ comp_prog_path = module.get_bin_path('xz', True)
+
+ cmd += "".join(flags)
+
+ if dump_extra_args:
+ cmd += " {0} ".format(dump_extra_args)
+
+ if target_opts:
+ cmd += " {0} ".format(target_opts)
+
+ if comp_prog_path:
+ # Use a fifo to be notified of an error in pg_dump
+ # Using shell pipe has no way to return the code of the first command
+ # in a portable way.
+ fifo = os.path.join(module.tmpdir, 'pg_fifo')
+ os.mkfifo(fifo)
+ cmd = '{1} <{3} > {2} & {0} >{3}'.format(cmd, comp_prog_path, shlex_quote(target), fifo)
+ else:
+ cmd = '{0} > {1}'.format(cmd, shlex_quote(target))
+
+ return do_with_password(module, cmd, password)
+
+
+def db_restore(module, target, target_opts="",
+ db=None,
+ user=None,
+ password=None,
+ host=None,
+ port=None,
+ **kw):
+
+ flags = login_flags(db, host, port, user)
+ comp_prog_path = None
+ cmd = module.get_bin_path('psql', True)
+
+ if os.path.splitext(target)[-1] == '.sql':
+ flags.append(' --file={0}'.format(target))
+
+ elif os.path.splitext(target)[-1] == '.tar':
+ flags.append(' --format=Tar')
+ cmd = module.get_bin_path('pg_restore', True)
+
+ elif os.path.splitext(target)[-1] == '.pgc':
+ flags.append(' --format=Custom')
+ cmd = module.get_bin_path('pg_restore', True)
+
+ elif os.path.splitext(target)[-1] == '.gz':
+ comp_prog_path = module.get_bin_path('zcat', True)
+
+ elif os.path.splitext(target)[-1] == '.bz2':
+ comp_prog_path = module.get_bin_path('bzcat', True)
+
+ elif os.path.splitext(target)[-1] == '.xz':
+ comp_prog_path = module.get_bin_path('xzcat', True)
+
+ cmd += "".join(flags)
+ if target_opts:
+ cmd += " {0} ".format(target_opts)
+
+ if comp_prog_path:
+ env = os.environ.copy()
+ if password:
+ env = {"PGPASSWORD": password}
+ p1 = subprocess.Popen([comp_prog_path, target], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ p2 = subprocess.Popen(cmd, stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=env)
+ (stdout2, stderr2) = p2.communicate()
+ p1.stdout.close()
+ p1.wait()
+ if p1.returncode != 0:
+ stderr1 = p1.stderr.read()
+ return p1.returncode, '', stderr1, 'cmd: ****'
+ else:
+ return p2.returncode, '', stderr2, 'cmd: ****'
+ else:
+ cmd = '{0} < {1}'.format(cmd, shlex_quote(target))
+
+ return do_with_password(module, cmd, password)
+
+
+def login_flags(db, host, port, user, db_prefix=True):
+ """
+ returns a list of connection argument strings each prefixed
+ with a space and quoted where necessary to later be combined
+ in a single shell string with `"".join(rv)`
+
+ db_prefix determines if "--dbname" is prefixed to the db argument,
+ since the argument was introduced in 9.3.
+ """
+ flags = []
+ if db:
+ if db_prefix:
+ flags.append(' --dbname={0}'.format(shlex_quote(db)))
+ else:
+ flags.append(' {0}'.format(shlex_quote(db)))
+ if host:
+ flags.append(' --host={0}'.format(host))
+ if port:
+ flags.append(' --port={0}'.format(port))
+ if user:
+ flags.append(' --username={0}'.format(user))
+ return flags
+
+
+def do_with_password(module, cmd, password):
+ env = {}
+ if password:
+ env = {"PGPASSWORD": password}
+ executed_commands.append(cmd)
+ rc, stderr, stdout = module.run_command(cmd, use_unsafe_shell=True, environ_update=env)
+ return rc, stderr, stdout, cmd
+
+
+def set_tablespace(cursor, db, tablespace):
+ query = 'ALTER DATABASE "%s" SET TABLESPACE "%s"' % (db, tablespace)
+ executed_commands.append(query)
+ cursor.execute(query)
+ return True
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = pgutils.postgres_common_argument_spec()
+ argument_spec.update(
+ db=dict(type='str', required=True, aliases=['name']),
+ owner=dict(type='str', default=''),
+ template=dict(type='str', default=''),
+ encoding=dict(type='str', default=''),
+ lc_collate=dict(type='str', default=''),
+ lc_ctype=dict(type='str', default=''),
+ state=dict(type='str', default='present', choices=['absent', 'dump', 'present', 'restore']),
+ target=dict(type='path', default=''),
+ target_opts=dict(type='str', default=''),
+ maintenance_db=dict(type='str', default="postgres"),
+ session_role=dict(type='str'),
+ conn_limit=dict(type='str', default=''),
+ tablespace=dict(type='path', default=''),
+ dump_extra_args=dict(type='str', default=None),
+ trust_input=dict(type='bool', default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ db = module.params["db"]
+ owner = module.params["owner"]
+ template = module.params["template"]
+ encoding = module.params["encoding"]
+ lc_collate = module.params["lc_collate"]
+ lc_ctype = module.params["lc_ctype"]
+ target = module.params["target"]
+ target_opts = module.params["target_opts"]
+ state = module.params["state"]
+ changed = False
+ maintenance_db = module.params['maintenance_db']
+ session_role = module.params["session_role"]
+ conn_limit = module.params['conn_limit']
+ tablespace = module.params['tablespace']
+ dump_extra_args = module.params['dump_extra_args']
+ trust_input = module.params['trust_input']
+
+ # Check input
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, owner, conn_limit, encoding, db, template, tablespace, session_role)
+
+ raw_connection = state in ("dump", "restore")
+
+ if not raw_connection:
+ pgutils.ensure_required_libs(module)
+
+ # To use defaults values, keyword arguments must be absent, so
+ # check which values are empty and don't include in the **kw
+ # dictionary
+ params_map = {
+ "login_host": "host",
+ "login_user": "user",
+ "login_password": "password",
+ "port": "port",
+ "ssl_mode": "sslmode",
+ "ca_cert": "sslrootcert"
+ }
+ kw = dict((params_map[k], v) for (k, v) in iteritems(module.params)
+ if k in params_map and v != '' and v is not None)
+
+ # If a login_unix_socket is specified, incorporate it here.
+ is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
+
+ if is_localhost and module.params["login_unix_socket"] != "":
+ kw["host"] = module.params["login_unix_socket"]
+
+ if target == "":
+ target = "{0}/{1}.sql".format(os.getcwd(), db)
+ target = os.path.expanduser(target)
+
+ if not raw_connection:
+ try:
+ db_connection = psycopg2.connect(database=maintenance_db, **kw)
+
+ # Enable autocommit so we can create databases
+ if psycopg2.__version__ >= '2.4.2':
+ db_connection.autocommit = True
+ else:
+ db_connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
+ cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
+
+ except TypeError as e:
+ if 'sslrootcert' in e.args[0]:
+ module.fail_json(msg='Postgresql server must be at least version 8.4 to support sslrootcert. Exception: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
+
+ except Exception as e:
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
+
+ if session_role:
+ try:
+ cursor.execute('SET ROLE "%s"' % session_role)
+ except Exception as e:
+ module.fail_json(msg="Could not switch role: %s" % to_native(e), exception=traceback.format_exc())
+
+ try:
+ if module.check_mode:
+ if state == "absent":
+ changed = db_exists(cursor, db)
+ elif state == "present":
+ changed = not db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace)
+ module.exit_json(changed=changed, db=db, executed_commands=executed_commands)
+
+ if state == "absent":
+ try:
+ changed = db_delete(cursor, db)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ elif state == "present":
+ try:
+ changed = db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ elif state in ("dump", "restore"):
+ method = state == "dump" and db_dump or db_restore
+ try:
+ if state == 'dump':
+ rc, stdout, stderr, cmd = method(module, target, target_opts, db, dump_extra_args, **kw)
+ else:
+ rc, stdout, stderr, cmd = method(module, target, target_opts, db, **kw)
+
+ if rc != 0:
+ module.fail_json(msg=stderr, stdout=stdout, rc=rc, cmd=cmd)
+ else:
+ module.exit_json(changed=True, msg=stdout, stderr=stderr, rc=rc, cmd=cmd,
+ executed_commands=executed_commands)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ except NotSupportedError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except SystemExit:
+ # Avoid catching this on Python 2.4
+ raise
+ except Exception as e:
+ module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=changed, db=db, executed_commands=executed_commands)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_ext.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_ext.py
new file mode 100644
index 00000000..3fa82dac
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_ext.py
@@ -0,0 +1,443 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_ext
+short_description: Add or remove PostgreSQL extensions from a database
+description:
+- Add or remove PostgreSQL extensions from a database.
+options:
+ name:
+ description:
+ - Name of the extension to add or remove.
+ required: true
+ type: str
+ aliases:
+ - ext
+ db:
+ description:
+ - Name of the database to add or remove the extension to/from.
+ required: true
+ type: str
+ aliases:
+ - login_db
+ schema:
+ description:
+ - Name of the schema to add the extension to.
+ type: str
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ - The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally.
+ type: str
+ state:
+ description:
+ - The database extension state.
+ default: present
+ choices: [ absent, present ]
+ type: str
+ cascade:
+ description:
+ - Automatically install/remove any extensions that this extension depends on
+ that are not already installed/removed (supported since PostgreSQL 9.6).
+ type: bool
+ default: no
+ login_unix_socket:
+ description:
+ - Path to a Unix domain socket for local connections.
+ type: str
+ ssl_mode:
+ description:
+ - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
+ - See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
+ - Default of C(prefer) matches libpq default.
+ type: str
+ default: prefer
+ choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
+ ca_cert:
+ description:
+ - Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
+ - If the file exists, the server's certificate will be verified to be signed by one of these authorities.
+ type: str
+ aliases: [ ssl_rootcert ]
+ version:
+ description:
+ - Extension version to add or update to. Has effect with I(state=present) only.
+ - If not specified, the latest extension version will be created.
+ - It can't downgrade an extension version.
+ When version downgrade is needed, remove the extension and create new one with appropriate version.
+ - Set I(version=latest) to update the extension to the latest available version.
+ type: str
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(ext), I(schema),
+ I(version), I(session_role) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+seealso:
+- name: PostgreSQL extensions
+ description: General information about PostgreSQL extensions.
+ link: https://www.postgresql.org/docs/current/external-extensions.html
+- name: CREATE EXTENSION reference
+ description: Complete reference of the CREATE EXTENSION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createextension.html
+- name: ALTER EXTENSION reference
+ description: Complete reference of the ALTER EXTENSION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-alterextension.html
+- name: DROP EXTENSION reference
+ description: Complete reference of the DROP EXTENSION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-droppublication.html
+notes:
+- The default authentication assumes that you are either logging in as
+ or sudo'ing to the C(postgres) account on the host.
+- This module uses I(psycopg2), a Python PostgreSQL database adapter.
+- You must ensure that C(psycopg2) is installed on the host before using this module.
+- If the remote host is the PostgreSQL server (which is the default case),
+ then PostgreSQL must also be installed on the remote host.
+- For Ubuntu-based systems, install the C(postgresql), C(libpq-dev),
+ and C(python-psycopg2) packages on the remote host before using this module.
+- Incomparable versions, for example PostGIS ``unpackaged``, cannot be installed.
+requirements: [ psycopg2 ]
+author:
+- Daniel Schep (@dschep)
+- Thomas O'Donnell (@andytom)
+- Sandro Santilli (@strk)
+- Andrew Klychkov (@Andersson007)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Adds postgis extension to the database acme in the schema foo
+ community.general.postgresql_ext:
+ name: postgis
+ db: acme
+ schema: foo
+
+- name: Removes postgis extension to the database acme
+ community.general.postgresql_ext:
+ name: postgis
+ db: acme
+ state: absent
+
+- name: Adds earthdistance extension to the database template1 cascade
+ community.general.postgresql_ext:
+ name: earthdistance
+ db: template1
+ cascade: true
+
+# In the example below, if earthdistance extension is installed,
+# it will be removed too because it depends on cube:
+- name: Removes cube extension from the database acme cascade
+ community.general.postgresql_ext:
+ name: cube
+ db: acme
+ cascade: yes
+ state: absent
+
+- name: Create extension foo of version 1.2 or update it if it's already created
+ community.general.postgresql_ext:
+ db: acme
+ name: foo
+ version: 1.2
+
+- name: Assuming extension foo is created, update it to the latest version
+ community.general.postgresql_ext:
+ db: acme
+ name: foo
+ version: latest
+'''
+
+RETURN = r'''
+query:
+ description: List of executed queries.
+ returned: always
+ type: list
+ sample: ["DROP EXTENSION \"acme\""]
+
+'''
+
+import traceback
+
+from distutils.version import LooseVersion
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils._text import to_native
+
+executed_queries = []
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+def ext_exists(cursor, ext):
+ query = "SELECT * FROM pg_extension WHERE extname=%(ext)s"
+ cursor.execute(query, {'ext': ext})
+ return cursor.rowcount == 1
+
+
+def ext_delete(cursor, ext, cascade):
+ if ext_exists(cursor, ext):
+ query = "DROP EXTENSION \"%s\"" % ext
+ if cascade:
+ query += " CASCADE"
+ cursor.execute(query)
+ executed_queries.append(query)
+ return True
+ else:
+ return False
+
+
+def ext_update_version(cursor, ext, version):
+ """Update extension version.
+
+ Return True if success.
+
+ Args:
+ cursor (cursor) -- cursor object of psycopg2 library
+ ext (str) -- extension name
+ version (str) -- extension version
+ """
+ query = "ALTER EXTENSION \"%s\" UPDATE" % ext
+ params = {}
+
+ if version != 'latest':
+ query += " TO %(ver)s"
+ params['ver'] = version
+
+ cursor.execute(query, params)
+ executed_queries.append(cursor.mogrify(query, params))
+
+ return True
+
+
+def ext_create(cursor, ext, schema, cascade, version):
+ query = "CREATE EXTENSION \"%s\"" % ext
+ params = {}
+
+ if schema:
+ query += " WITH SCHEMA \"%s\"" % schema
+ if version:
+ query += " VERSION %(ver)s"
+ params['ver'] = version
+ if cascade:
+ query += " CASCADE"
+
+ cursor.execute(query, params)
+ executed_queries.append(cursor.mogrify(query, params))
+ return True
+
+
+def ext_get_versions(cursor, ext):
+ """
+ Get the current created extension version and available versions.
+
+ Return tuple (current_version, [list of available versions]).
+
+ Note: the list of available versions contains only versions
+ that higher than the current created version.
+ If the extension is not created, this list will contain all
+ available versions.
+
+ Args:
+ cursor (cursor) -- cursor object of psycopg2 library
+ ext (str) -- extension name
+ """
+
+ # 1. Get the current extension version:
+ query = ("SELECT extversion FROM pg_catalog.pg_extension "
+ "WHERE extname = %(ext)s")
+
+ current_version = '0'
+ cursor.execute(query, {'ext': ext})
+ res = cursor.fetchone()
+ if res:
+ current_version = res[0]
+
+ # 2. Get available versions:
+ query = ("SELECT version FROM pg_available_extension_versions "
+ "WHERE name = %(ext)s")
+ cursor.execute(query, {'ext': ext})
+ res = cursor.fetchall()
+
+ available_versions = parse_ext_versions(current_version, res)
+
+ if current_version == '0':
+ current_version = False
+
+ return (current_version, available_versions)
+
+
+def parse_ext_versions(current_version, ext_ver_list):
+ """Parse ext versions.
+
+ Args:
+ current_version (str) -- version to compare elements of ext_ver_list with
+ ext_ver_list (list) -- list containing dicts with versions
+
+ Return a sorted list with versions that are higher than current_version.
+
+ Note: Incomparable versions (e.g., postgis version "unpackaged") are skipped.
+ """
+ available_versions = []
+
+ for line in ext_ver_list:
+ if line['version'] == 'unpackaged':
+ continue
+
+ try:
+ if LooseVersion(line['version']) > LooseVersion(current_version):
+ available_versions.append(line['version'])
+ except Exception:
+ # When a version cannot be compared, skip it
+ # (there's a note in the documentation)
+ continue
+
+ return sorted(available_versions, key=LooseVersion)
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ db=dict(type="str", required=True, aliases=["login_db"]),
+ ext=dict(type="str", required=True, aliases=["name"]),
+ schema=dict(type="str"),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ cascade=dict(type="bool", default=False),
+ session_role=dict(type="str"),
+ version=dict(type="str"),
+ trust_input=dict(type="bool", default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ ext = module.params["ext"]
+ schema = module.params["schema"]
+ state = module.params["state"]
+ cascade = module.params["cascade"]
+ version = module.params["version"]
+ session_role = module.params["session_role"]
+ trust_input = module.params["trust_input"]
+ changed = False
+
+ if not trust_input:
+ check_input(module, ext, schema, version, session_role)
+
+ if version and state == 'absent':
+ module.warn("Parameter version is ignored when state=absent")
+
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ try:
+ # Get extension info and available versions:
+ curr_version, available_versions = ext_get_versions(cursor, ext)
+
+ if state == "present":
+ if version == 'latest':
+ if available_versions:
+ version = available_versions[-1]
+ else:
+ version = ''
+
+ if version:
+ # If the specific version is passed and it is not available for update:
+ if version not in available_versions:
+ if not curr_version:
+ module.fail_json(msg="Passed version '%s' is not available" % version)
+
+ elif LooseVersion(curr_version) == LooseVersion(version):
+ changed = False
+
+ else:
+ module.fail_json(msg="Passed version '%s' is lower than "
+ "the current created version '%s' or "
+ "the passed version is not available" % (version, curr_version))
+
+ # If the specific version is passed and it is higher that the current version:
+ if curr_version:
+ if LooseVersion(curr_version) < LooseVersion(version):
+ if module.check_mode:
+ changed = True
+ else:
+ changed = ext_update_version(cursor, ext, version)
+
+ # If the specific version is passed and it is created now:
+ if curr_version == version:
+ changed = False
+
+ # If the ext doesn't exist and installed:
+ elif not curr_version and available_versions:
+ if module.check_mode:
+ changed = True
+ else:
+ changed = ext_create(cursor, ext, schema, cascade, version)
+
+ # If version is not passed:
+ else:
+ if not curr_version:
+ # If the ext doesn't exist and it's installed:
+ if available_versions:
+ if module.check_mode:
+ changed = True
+ else:
+ changed = ext_create(cursor, ext, schema, cascade, version)
+
+ # If the ext doesn't exist and not installed:
+ else:
+ module.fail_json(msg="Extension %s is not installed" % ext)
+
+ elif state == "absent":
+ if curr_version:
+ if module.check_mode:
+ changed = True
+ else:
+ changed = ext_delete(cursor, ext, cascade)
+ else:
+ changed = False
+
+ except Exception as e:
+ db_connection.close()
+ module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc())
+
+ db_connection.close()
+ module.exit_json(changed=changed, db=module.params["db"], ext=ext, queries=executed_queries)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_idx.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_idx.py
new file mode 100644
index 00000000..6ffee31d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_idx.py
@@ -0,0 +1,589 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018-2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_idx
+short_description: Create or drop indexes from a PostgreSQL database
+description:
+- Create or drop indexes from a PostgreSQL database.
+
+options:
+ idxname:
+ description:
+ - Name of the index to create or drop.
+ type: str
+ required: true
+ aliases:
+ - name
+ db:
+ description:
+ - Name of database to connect to and where the index will be created/dropped.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ schema:
+ description:
+ - Name of a database schema where the index will be created.
+ type: str
+ state:
+ description:
+ - Index state.
+ - C(present) implies the index will be created if it does not exist.
+ - C(absent) implies the index will be dropped if it exists.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ table:
+ description:
+ - Table to create index on it.
+ - Mutually exclusive with I(state=absent).
+ type: str
+ columns:
+ description:
+ - List of index columns that need to be covered by index.
+ - Mutually exclusive with I(state=absent).
+ type: list
+ elements: str
+ aliases:
+ - column
+ cond:
+ description:
+ - Index conditions.
+ - Mutually exclusive with I(state=absent).
+ type: str
+ idxtype:
+ description:
+ - Index type (like btree, gist, gin, etc.).
+ - Mutually exclusive with I(state=absent).
+ type: str
+ aliases:
+ - type
+ concurrent:
+ description:
+ - Enable or disable concurrent mode (CREATE / DROP INDEX CONCURRENTLY).
+ - Pay attention, if I(concurrent=no), the table will be locked (ACCESS EXCLUSIVE) during the building process.
+ For more information about the lock levels see U(https://www.postgresql.org/docs/current/explicit-locking.html).
+ - If the building process was interrupted for any reason when I(cuncurrent=yes), the index becomes invalid.
+ In this case it should be dropped and created again.
+ - Mutually exclusive with I(cascade=yes).
+ type: bool
+ default: yes
+ unique:
+ description:
+ - Enable unique index.
+ - Only btree currently supports unique indexes.
+ type: bool
+ default: no
+ version_added: '0.2.0'
+ tablespace:
+ description:
+ - Set a tablespace for the index.
+ - Mutually exclusive with I(state=absent).
+ required: false
+ type: str
+ storage_params:
+ description:
+ - Storage parameters like fillfactor, vacuum_cleanup_index_scale_factor, etc.
+ - Mutually exclusive with I(state=absent).
+ type: list
+ elements: str
+ cascade:
+ description:
+ - Automatically drop objects that depend on the index,
+ and in turn all objects that depend on those objects.
+ - It used only with I(state=absent).
+ - Mutually exclusive with I(concurrent=yes)
+ type: bool
+ default: no
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(idxname), I(session_role),
+ I(schema), I(table), I(columns), I(tablespace), I(storage_params),
+ I(cond) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+
+seealso:
+- module: community.general.postgresql_table
+- module: community.general.postgresql_tablespace
+- name: PostgreSQL indexes reference
+ description: General information about PostgreSQL indexes.
+ link: https://www.postgresql.org/docs/current/indexes.html
+- name: CREATE INDEX reference
+ description: Complete reference of the CREATE INDEX command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createindex.html
+- name: ALTER INDEX reference
+ description: Complete reference of the ALTER INDEX command documentation.
+ link: https://www.postgresql.org/docs/current/sql-alterindex.html
+- name: DROP INDEX reference
+ description: Complete reference of the DROP INDEX command documentation.
+ link: https://www.postgresql.org/docs/current/sql-dropindex.html
+
+notes:
+- The index building process can affect database performance.
+- To avoid table locks on production databases, use I(concurrent=yes) (default behavior).
+
+author:
+- Andrew Klychkov (@Andersson007)
+- Thomas O'Donnell (@andytom)
+
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Create btree index if not exists test_idx concurrently covering columns id and name of table products
+ community.general.postgresql_idx:
+ db: acme
+ table: products
+ columns: id,name
+ name: test_idx
+
+- name: Create btree index test_idx concurrently with tablespace called ssd and storage parameter
+ community.general.postgresql_idx:
+ db: acme
+ table: products
+ columns:
+ - id
+ - name
+ idxname: test_idx
+ tablespace: ssd
+ storage_params:
+ - fillfactor=90
+
+- name: Create gist index test_gist_idx concurrently on column geo_data of table map
+ community.general.postgresql_idx:
+ db: somedb
+ table: map
+ idxtype: gist
+ columns: geo_data
+ idxname: test_gist_idx
+
+# Note: for the example below pg_trgm extension must be installed for gin_trgm_ops
+- name: Create gin index gin0_idx not concurrently on column comment of table test
+ community.general.postgresql_idx:
+ idxname: gin0_idx
+ table: test
+ columns: comment gin_trgm_ops
+ concurrent: no
+ idxtype: gin
+
+- name: Drop btree test_idx concurrently
+ community.general.postgresql_idx:
+ db: mydb
+ idxname: test_idx
+ state: absent
+
+- name: Drop test_idx cascade
+ community.general.postgresql_idx:
+ db: mydb
+ idxname: test_idx
+ state: absent
+ cascade: yes
+ concurrent: no
+
+- name: Create btree index test_idx concurrently on columns id,comment where column id > 1
+ community.general.postgresql_idx:
+ db: mydb
+ table: test
+ columns: id,comment
+ idxname: test_idx
+ cond: id > 1
+
+- name: Create unique btree index if not exists test_unique_idx on column name of table products
+ community.general.postgresql_idx:
+ db: acme
+ table: products
+ columns: name
+ name: test_unique_idx
+ unique: yes
+ concurrent: no
+'''
+
+RETURN = r'''
+name:
+ description: Index name.
+ returned: always
+ type: str
+ sample: 'foo_idx'
+state:
+ description: Index state.
+ returned: always
+ type: str
+ sample: 'present'
+schema:
+ description: Schema where index exists.
+ returned: always
+ type: str
+ sample: 'public'
+tablespace:
+ description: Tablespace where index exists.
+ returned: always
+ type: str
+ sample: 'ssd'
+query:
+ description: Query that was tried to be executed.
+ returned: always
+ type: str
+ sample: 'CREATE INDEX CONCURRENTLY foo_idx ON test_table USING BTREE (id)'
+storage_params:
+ description: Index storage parameters.
+ returned: always
+ type: list
+ sample: [ "fillfactor=90" ]
+valid:
+ description: Index validity.
+ returned: always
+ type: bool
+ sample: true
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import check_input
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+
+VALID_IDX_TYPES = ('BTREE', 'HASH', 'GIST', 'SPGIST', 'GIN', 'BRIN')
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+class Index(object):
+
+ """Class for working with PostgreSQL indexes.
+
+ TODO:
+ 1. Add possibility to change ownership
+ 2. Add possibility to change tablespace
+ 3. Add list called executed_queries (executed_query should be left too)
+ 4. Use self.module instead of passing arguments to the methods whenever possible
+
+ Args:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+ schema (str) -- name of the index schema
+ name (str) -- name of the index
+
+ Attrs:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+ schema (str) -- name of the index schema
+ name (str) -- name of the index
+ exists (bool) -- flag the index exists in the DB or not
+ info (dict) -- dict that contents information about the index
+ executed_query (str) -- executed query
+ """
+
+ def __init__(self, module, cursor, schema, name):
+ self.name = name
+ if schema:
+ self.schema = schema
+ else:
+ self.schema = 'public'
+ self.module = module
+ self.cursor = cursor
+ self.info = {
+ 'name': self.name,
+ 'state': 'absent',
+ 'schema': '',
+ 'tblname': '',
+ 'tblspace': '',
+ 'valid': True,
+ 'storage_params': [],
+ }
+ self.exists = False
+ self.__exists_in_db()
+ self.executed_query = ''
+
+ def get_info(self):
+ """Refresh index info.
+
+ Return self.info dict.
+ """
+ self.__exists_in_db()
+ return self.info
+
+ def __exists_in_db(self):
+ """Check index existence, collect info, add it to self.info dict.
+
+ Return True if the index exists, otherwise, return False.
+ """
+ query = ("SELECT i.schemaname, i.tablename, i.tablespace, "
+ "pi.indisvalid, c.reloptions "
+ "FROM pg_catalog.pg_indexes AS i "
+ "JOIN pg_catalog.pg_class AS c "
+ "ON i.indexname = c.relname "
+ "JOIN pg_catalog.pg_index AS pi "
+ "ON c.oid = pi.indexrelid "
+ "WHERE i.indexname = %(name)s")
+
+ res = exec_sql(self, query, query_params={'name': self.name}, add_to_executed=False)
+ if res:
+ self.exists = True
+ self.info = dict(
+ name=self.name,
+ state='present',
+ schema=res[0][0],
+ tblname=res[0][1],
+ tblspace=res[0][2] if res[0][2] else '',
+ valid=res[0][3],
+ storage_params=res[0][4] if res[0][4] else [],
+ )
+ return True
+
+ else:
+ self.exists = False
+ return False
+
+ def create(self, tblname, idxtype, columns, cond, tblspace,
+ storage_params, concurrent=True, unique=False):
+ """Create PostgreSQL index.
+
+ Return True if success, otherwise, return False.
+
+ Args:
+ tblname (str) -- name of a table for the index
+ idxtype (str) -- type of the index like BTREE, BRIN, etc
+ columns (str) -- string of comma-separated columns that need to be covered by index
+ tblspace (str) -- tablespace for storing the index
+ storage_params (str) -- string of comma-separated storage parameters
+
+ Kwargs:
+ concurrent (bool) -- build index in concurrent mode, default True
+ """
+ if self.exists:
+ return False
+
+ if idxtype is None:
+ idxtype = "BTREE"
+
+ query = 'CREATE'
+
+ if unique:
+ query += ' UNIQUE'
+
+ query += ' INDEX'
+
+ if concurrent:
+ query += ' CONCURRENTLY'
+
+ query += ' "%s"' % self.name
+
+ query += ' ON "%s"."%s" ' % (self.schema, tblname)
+
+ query += 'USING %s (%s)' % (idxtype, columns)
+
+ if storage_params:
+ query += ' WITH (%s)' % storage_params
+
+ if tblspace:
+ query += ' TABLESPACE "%s"' % tblspace
+
+ if cond:
+ query += ' WHERE %s' % cond
+
+ self.executed_query = query
+
+ return exec_sql(self, query, return_bool=True, add_to_executed=False)
+
+ def drop(self, cascade=False, concurrent=True):
+ """Drop PostgreSQL index.
+
+ Return True if success, otherwise, return False.
+
+ Args:
+ schema (str) -- name of the index schema
+
+ Kwargs:
+ cascade (bool) -- automatically drop objects that depend on the index,
+ default False
+ concurrent (bool) -- build index in concurrent mode, default True
+ """
+ if not self.exists:
+ return False
+
+ query = 'DROP INDEX'
+
+ if concurrent:
+ query += ' CONCURRENTLY'
+
+ query += ' "%s"."%s"' % (self.schema, self.name)
+
+ if cascade:
+ query += ' CASCADE'
+
+ self.executed_query = query
+
+ return exec_sql(self, query, return_bool=True, add_to_executed=False)
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ idxname=dict(type='str', required=True, aliases=['name']),
+ db=dict(type='str', aliases=['login_db']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ concurrent=dict(type='bool', default=True),
+ unique=dict(type='bool', default=False),
+ table=dict(type='str'),
+ idxtype=dict(type='str', aliases=['type']),
+ columns=dict(type='list', elements='str', aliases=['column']),
+ cond=dict(type='str'),
+ session_role=dict(type='str'),
+ tablespace=dict(type='str'),
+ storage_params=dict(type='list', elements='str'),
+ cascade=dict(type='bool', default=False),
+ schema=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ idxname = module.params["idxname"]
+ state = module.params["state"]
+ concurrent = module.params["concurrent"]
+ unique = module.params["unique"]
+ table = module.params["table"]
+ idxtype = module.params["idxtype"]
+ columns = module.params["columns"]
+ cond = module.params["cond"]
+ tablespace = module.params["tablespace"]
+ storage_params = module.params["storage_params"]
+ cascade = module.params["cascade"]
+ schema = module.params["schema"]
+ session_role = module.params["session_role"]
+ trust_input = module.params["trust_input"]
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, idxname, session_role, schema, table, columns,
+ tablespace, storage_params, cond)
+
+ if concurrent and cascade:
+ module.fail_json(msg="Concurrent mode and cascade parameters are mutually exclusive")
+
+ if unique and (idxtype and idxtype != 'btree'):
+ module.fail_json(msg="Only btree currently supports unique indexes")
+
+ if state == 'present':
+ if not table:
+ module.fail_json(msg="Table must be specified")
+ if not columns:
+ module.fail_json(msg="At least one column must be specified")
+ else:
+ if table or columns or cond or idxtype or tablespace:
+ module.fail_json(msg="Index %s is going to be removed, so it does not "
+ "make sense to pass a table name, columns, conditions, "
+ "index type, or tablespace" % idxname)
+
+ if cascade and state != 'absent':
+ module.fail_json(msg="cascade parameter used only with state=absent")
+
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ # Set defaults:
+ changed = False
+
+ # Do job:
+ index = Index(module, cursor, schema, idxname)
+ kw = index.get_info()
+ kw['query'] = ''
+
+ #
+ # check_mode start
+ if module.check_mode:
+ if state == 'present' and index.exists:
+ kw['changed'] = False
+ module.exit_json(**kw)
+
+ elif state == 'present' and not index.exists:
+ kw['changed'] = True
+ module.exit_json(**kw)
+
+ elif state == 'absent' and not index.exists:
+ kw['changed'] = False
+ module.exit_json(**kw)
+
+ elif state == 'absent' and index.exists:
+ kw['changed'] = True
+ module.exit_json(**kw)
+ # check_mode end
+ #
+
+ if state == "present":
+ if idxtype and idxtype.upper() not in VALID_IDX_TYPES:
+ module.fail_json(msg="Index type '%s' of %s is not in valid types" % (idxtype, idxname))
+
+ columns = ','.join(columns)
+
+ if storage_params:
+ storage_params = ','.join(storage_params)
+
+ changed = index.create(table, idxtype, columns, cond, tablespace, storage_params, concurrent, unique)
+
+ if changed:
+ kw = index.get_info()
+ kw['state'] = 'present'
+ kw['query'] = index.executed_query
+
+ else:
+ changed = index.drop(cascade, concurrent)
+
+ if changed:
+ kw['state'] = 'absent'
+ kw['query'] = index.executed_query
+
+ if not kw['valid']:
+ db_connection.rollback()
+ module.warn("Index %s is invalid! ROLLBACK" % idxname)
+
+ if not concurrent:
+ db_connection.commit()
+
+ kw['changed'] = changed
+ db_connection.close()
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_info.py
new file mode 100644
index 00000000..aeec8651
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_info.py
@@ -0,0 +1,1030 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_info
+short_description: Gather information about PostgreSQL servers
+description:
+- Gathers information about PostgreSQL servers.
+options:
+ filter:
+ description:
+ - Limit the collected information by comma separated string or YAML list.
+ - Allowable values are C(version),
+ C(databases), C(in_recovery), C(settings), C(tablespaces), C(roles),
+ C(replications), C(repl_slots).
+ - By default, collects all subsets.
+ - You can use shell-style (fnmatch) wildcard to pass groups of values (see Examples).
+ - You can use '!' before value (for example, C(!settings)) to exclude it from the information.
+ - If you pass including and excluding values to the filter, for example, I(filter=!settings,ver),
+ the excluding values will be ignored.
+ type: list
+ elements: str
+ db:
+ description:
+ - Name of database to connect.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ trust_input:
+ description:
+ - If C(no), check whether a value of I(session_role) is potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via I(session_role) are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+seealso:
+- module: community.general.postgresql_ping
+author:
+- Andrew Klychkov (@Andersson007)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+# Display info from postgres hosts.
+# ansible postgres -m postgresql_info
+
+# Display only databases and roles info from all hosts using shell-style wildcards:
+# ansible all -m postgresql_info -a 'filter=dat*,rol*'
+
+# Display only replications and repl_slots info from standby hosts using shell-style wildcards:
+# ansible standby -m postgresql_info -a 'filter=repl*'
+
+# Display all info from databases hosts except settings:
+# ansible databases -m postgresql_info -a 'filter=!settings'
+
+- name: Collect PostgreSQL version and extensions
+ become: yes
+ become_user: postgres
+ community.general.postgresql_info:
+ filter: ver*,ext*
+
+- name: Collect all info except settings and roles
+ become: yes
+ become_user: postgres
+ community.general.postgresql_info:
+ filter: "!settings,!roles"
+
+# On FreeBSD with PostgreSQL 9.5 version and lower use pgsql user to become
+# and pass "postgres" as a database to connect to
+- name: Collect tablespaces and repl_slots info
+ become: yes
+ become_user: pgsql
+ community.general.postgresql_info:
+ db: postgres
+ filter:
+ - tablesp*
+ - repl_sl*
+
+- name: Collect all info except databases
+ become: yes
+ become_user: postgres
+ community.general.postgresql_info:
+ filter:
+ - "!databases"
+'''
+
+RETURN = r'''
+version:
+ description: Database server version U(https://www.postgresql.org/support/versioning/).
+ returned: always
+ type: dict
+ sample: { "version": { "major": 10, "minor": 6 } }
+ contains:
+ major:
+ description: Major server version.
+ returned: always
+ type: int
+ sample: 11
+ minor:
+ description: Minor server version.
+ returned: always
+ type: int
+ sample: 1
+in_recovery:
+ description: Indicates if the service is in recovery mode or not.
+ returned: always
+ type: bool
+ sample: false
+databases:
+ description: Information about databases.
+ returned: always
+ type: dict
+ sample:
+ - { "postgres": { "access_priv": "", "collate": "en_US.UTF-8",
+ "ctype": "en_US.UTF-8", "encoding": "UTF8", "owner": "postgres", "size": "7997 kB" } }
+ contains:
+ database_name:
+ description: Database name.
+ returned: always
+ type: dict
+ sample: template1
+ contains:
+ access_priv:
+ description: Database access privileges.
+ returned: always
+ type: str
+ sample: "=c/postgres_npostgres=CTc/postgres"
+ collate:
+ description:
+ - Database collation U(https://www.postgresql.org/docs/current/collation.html).
+ returned: always
+ type: str
+ sample: en_US.UTF-8
+ ctype:
+ description:
+ - Database LC_CTYPE U(https://www.postgresql.org/docs/current/multibyte.html).
+ returned: always
+ type: str
+ sample: en_US.UTF-8
+ encoding:
+ description:
+ - Database encoding U(https://www.postgresql.org/docs/current/multibyte.html).
+ returned: always
+ type: str
+ sample: UTF8
+ owner:
+ description:
+ - Database owner U(https://www.postgresql.org/docs/current/sql-createdatabase.html).
+ returned: always
+ type: str
+ sample: postgres
+ size:
+ description: Database size in bytes.
+ returned: always
+ type: str
+ sample: 8189415
+ extensions:
+ description:
+ - Extensions U(https://www.postgresql.org/docs/current/sql-createextension.html).
+ returned: always
+ type: dict
+ sample:
+ - { "plpgsql": { "description": "PL/pgSQL procedural language",
+ "extversion": { "major": 1, "minor": 0 } } }
+ contains:
+ extdescription:
+ description: Extension description.
+ returned: if existent
+ type: str
+ sample: PL/pgSQL procedural language
+ extversion:
+ description: Extension description.
+ returned: always
+ type: dict
+ contains:
+ major:
+ description: Extension major version.
+ returned: always
+ type: int
+ sample: 1
+ minor:
+ description: Extension minor version.
+ returned: always
+ type: int
+ sample: 0
+ nspname:
+ description: Namespace where the extension is.
+ returned: always
+ type: str
+ sample: pg_catalog
+ languages:
+ description: Procedural languages U(https://www.postgresql.org/docs/current/xplang.html).
+ returned: always
+ type: dict
+ sample: { "sql": { "lanacl": "", "lanowner": "postgres" } }
+ contains:
+ lanacl:
+ description:
+ - Language access privileges
+ U(https://www.postgresql.org/docs/current/catalog-pg-language.html).
+ returned: always
+ type: str
+ sample: "{postgres=UC/postgres,=U/postgres}"
+ lanowner:
+ description:
+ - Language owner U(https://www.postgresql.org/docs/current/catalog-pg-language.html).
+ returned: always
+ type: str
+ sample: postgres
+ namespaces:
+ description:
+ - Namespaces (schema) U(https://www.postgresql.org/docs/current/sql-createschema.html).
+ returned: always
+ type: dict
+ sample: { "pg_catalog": { "nspacl": "{postgres=UC/postgres,=U/postgres}", "nspowner": "postgres" } }
+ contains:
+ nspacl:
+ description:
+ - Access privileges U(https://www.postgresql.org/docs/current/catalog-pg-namespace.html).
+ returned: always
+ type: str
+ sample: "{postgres=UC/postgres,=U/postgres}"
+ nspowner:
+ description:
+ - Schema owner U(https://www.postgresql.org/docs/current/catalog-pg-namespace.html).
+ returned: always
+ type: str
+ sample: postgres
+ publications:
+ description:
+ - Information about logical replication publications (available for PostgreSQL 10 and higher)
+ U(https://www.postgresql.org/docs/current/logical-replication-publication.html).
+ - Content depends on PostgreSQL server version.
+ returned: if configured
+ type: dict
+ sample: { "pub1": { "ownername": "postgres", "puballtables": true, "pubinsert": true, "pubupdate": true } }
+ version_added: '0.2.0'
+ subscriptions:
+ description:
+ - Information about replication subscriptions (available for PostgreSQL 10 and higher)
+ U(https://www.postgresql.org/docs/current/logical-replication-subscription.html).
+ - Content depends on PostgreSQL server version.
+ returned: if configured
+ type: dict
+ sample:
+ - { "my_subscription": {"ownername": "postgres", "subenabled": true, "subpublications": ["first_publication"] } }
+ version_added: '0.2.0'
+repl_slots:
+ description:
+ - Replication slots (available in 9.4 and later)
+ U(https://www.postgresql.org/docs/current/view-pg-replication-slots.html).
+ returned: if existent
+ type: dict
+ sample: { "slot0": { "active": false, "database": null, "plugin": null, "slot_type": "physical" } }
+ contains:
+ active:
+ description:
+ - True means that a receiver has connected to it, and it is currently reserving archives.
+ returned: always
+ type: bool
+ sample: true
+ database:
+ description: Database name this slot is associated with, or null.
+ returned: always
+ type: str
+ sample: acme
+ plugin:
+ description:
+ - Base name of the shared object containing the output plugin
+ this logical slot is using, or null for physical slots.
+ returned: always
+ type: str
+ sample: pgoutput
+ slot_type:
+ description: The slot type - physical or logical.
+ returned: always
+ type: str
+ sample: logical
+replications:
+ description:
+ - Information about the current replications by process PIDs
+ U(https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-STATS-VIEWS-TABLE).
+ returned: if pg_stat_replication view existent
+ type: dict
+ sample:
+ - { "76580": { "app_name": "standby1", "backend_start": "2019-02-03 00:14:33.908593+03",
+ "client_addr": "10.10.10.2", "client_hostname": "", "state": "streaming", "usename": "postgres" } }
+ contains:
+ usename:
+ description:
+ - Name of the user logged into this WAL sender process ('usename' is a column name in pg_stat_replication view).
+ returned: always
+ type: str
+ sample: replication_user
+ app_name:
+ description: Name of the application that is connected to this WAL sender.
+ returned: if existent
+ type: str
+ sample: acme_srv
+ client_addr:
+ description:
+ - IP address of the client connected to this WAL sender.
+ - If this field is null, it indicates that the client is connected
+ via a Unix socket on the server machine.
+ returned: always
+ type: str
+ sample: 10.0.0.101
+ client_hostname:
+ description:
+ - Host name of the connected client, as reported by a reverse DNS lookup of client_addr.
+ - This field will only be non-null for IP connections, and only when log_hostname is enabled.
+ returned: always
+ type: str
+ sample: dbsrv1
+ backend_start:
+ description: Time when this process was started, i.e., when the client connected to this WAL sender.
+ returned: always
+ type: str
+ sample: "2019-02-03 00:14:33.908593+03"
+ state:
+ description: Current WAL sender state.
+ returned: always
+ type: str
+ sample: streaming
+tablespaces:
+ description:
+ - Information about tablespaces U(https://www.postgresql.org/docs/current/catalog-pg-tablespace.html).
+ returned: always
+ type: dict
+ sample:
+ - { "test": { "spcacl": "{postgres=C/postgres,andreyk=C/postgres}", "spcoptions": [ "seq_page_cost=1" ],
+ "spcowner": "postgres" } }
+ contains:
+ spcacl:
+ description: Tablespace access privileges.
+ returned: always
+ type: str
+ sample: "{postgres=C/postgres,andreyk=C/postgres}"
+ spcoptions:
+ description: Tablespace-level options.
+ returned: always
+ type: list
+ sample: [ "seq_page_cost=1" ]
+ spcowner:
+ description: Owner of the tablespace.
+ returned: always
+ type: str
+ sample: test_user
+roles:
+ description:
+ - Information about roles U(https://www.postgresql.org/docs/current/user-manag.html).
+ returned: always
+ type: dict
+ sample:
+ - { "test_role": { "canlogin": true, "member_of": [ "user_ro" ], "superuser": false,
+ "valid_until": "9999-12-31T23:59:59.999999+00:00" } }
+ contains:
+ canlogin:
+ description: Login privilege U(https://www.postgresql.org/docs/current/role-attributes.html).
+ returned: always
+ type: bool
+ sample: true
+ member_of:
+ description:
+ - Role membership U(https://www.postgresql.org/docs/current/role-membership.html).
+ returned: always
+ type: list
+ sample: [ "read_only_users" ]
+ superuser:
+ description: User is a superuser or not.
+ returned: always
+ type: bool
+ sample: false
+ valid_until:
+ description:
+ - Password expiration date U(https://www.postgresql.org/docs/current/sql-alterrole.html).
+ returned: always
+ type: str
+ sample: "9999-12-31T23:59:59.999999+00:00"
+pending_restart_settings:
+ description:
+ - List of settings that are pending restart to be set.
+ returned: always
+ type: list
+ sample: [ "shared_buffers" ]
+settings:
+ description:
+ - Information about run-time server parameters
+ U(https://www.postgresql.org/docs/current/view-pg-settings.html).
+ returned: always
+ type: dict
+ sample:
+ - { "work_mem": { "boot_val": "4096", "context": "user", "max_val": "2147483647",
+ "min_val": "64", "setting": "8192", "sourcefile": "/var/lib/pgsql/10/data/postgresql.auto.conf",
+ "unit": "kB", "vartype": "integer", "val_in_bytes": 4194304 } }
+ contains:
+ setting:
+ description: Current value of the parameter.
+ returned: always
+ type: str
+ sample: 49152
+ unit:
+ description: Implicit unit of the parameter.
+ returned: always
+ type: str
+ sample: kB
+ boot_val:
+ description:
+ - Parameter value assumed at server startup if the parameter is not otherwise set.
+ returned: always
+ type: str
+ sample: 4096
+ min_val:
+ description:
+ - Minimum allowed value of the parameter (null for non-numeric values).
+ returned: always
+ type: str
+ sample: 64
+ max_val:
+ description:
+ - Maximum allowed value of the parameter (null for non-numeric values).
+ returned: always
+ type: str
+ sample: 2147483647
+ sourcefile:
+ description:
+ - Configuration file the current value was set in.
+ - Null for values set from sources other than configuration files,
+ or when examined by a user who is neither a superuser or a member of pg_read_all_settings.
+ - Helpful when using include directives in configuration files.
+ returned: always
+ type: str
+ sample: /var/lib/pgsql/10/data/postgresql.auto.conf
+ context:
+ description:
+ - Context required to set the parameter's value.
+ - For more information see U(https://www.postgresql.org/docs/current/view-pg-settings.html).
+ returned: always
+ type: str
+ sample: user
+ vartype:
+ description:
+ - Parameter type (bool, enum, integer, real, or string).
+ returned: always
+ type: str
+ sample: integer
+ val_in_bytes:
+ description:
+ - Current value of the parameter in bytes.
+ returned: if supported
+ type: int
+ sample: 2147483647
+ pretty_val:
+ description:
+ - Value presented in the pretty form.
+ returned: always
+ type: str
+ sample: 2MB
+ pending_restart:
+ description:
+ - True if the value has been changed in the configuration file but needs a restart; or false otherwise.
+ - Returns only if C(settings) is passed.
+ returned: always
+ type: bool
+ sample: false
+'''
+
+from fnmatch import fnmatch
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils.six import iteritems
+from ansible.module_utils._text import to_native
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+class PgDbConn(object):
+ """Auxiliary class for working with PostgreSQL connection objects.
+
+ Arguments:
+ module (AnsibleModule): Object of AnsibleModule class that
+ contains connection parameters.
+ """
+
+ def __init__(self, module):
+ self.module = module
+ self.db_conn = None
+ self.cursor = None
+
+ def connect(self):
+ """Connect to a PostgreSQL database and return a cursor object.
+
+ Note: connection parameters are passed by self.module object.
+ """
+ conn_params = get_conn_params(self.module, self.module.params, warn_db_default=False)
+ self.db_conn = connect_to_db(self.module, conn_params)
+ return self.db_conn.cursor(cursor_factory=DictCursor)
+
+ def reconnect(self, dbname):
+ """Reconnect to another database and return a PostgreSQL cursor object.
+
+ Arguments:
+ dbname (string): Database name to connect to.
+ """
+ self.db_conn.close()
+
+ self.module.params['database'] = dbname
+ return self.connect()
+
+
+class PgClusterInfo(object):
+ """Class for collection information about a PostgreSQL instance.
+
+ Arguments:
+ module (AnsibleModule): Object of AnsibleModule class.
+ db_conn_obj (psycopg2.connect): PostgreSQL connection object.
+ """
+
+ def __init__(self, module, db_conn_obj):
+ self.module = module
+ self.db_obj = db_conn_obj
+ self.cursor = db_conn_obj.connect()
+ self.pg_info = {
+ "version": {},
+ "in_recovery": None,
+ "tablespaces": {},
+ "databases": {},
+ "replications": {},
+ "repl_slots": {},
+ "settings": {},
+ "roles": {},
+ "pending_restart_settings": [],
+ }
+
+ def collect(self, val_list=False):
+ """Collect information based on 'filter' option."""
+ subset_map = {
+ "version": self.get_pg_version,
+ "in_recovery": self.get_recovery_state,
+ "tablespaces": self.get_tablespaces,
+ "databases": self.get_db_info,
+ "replications": self.get_repl_info,
+ "repl_slots": self.get_rslot_info,
+ "settings": self.get_settings,
+ "roles": self.get_role_info,
+ }
+
+ incl_list = []
+ excl_list = []
+ # Notice: incl_list and excl_list
+ # don't make sense together, therefore,
+ # if incl_list is not empty, we collect
+ # only values from it:
+ if val_list:
+ for i in val_list:
+ if i[0] != '!':
+ incl_list.append(i)
+ else:
+ excl_list.append(i.lstrip('!'))
+
+ if incl_list:
+ for s in subset_map:
+ for i in incl_list:
+ if fnmatch(s, i):
+ subset_map[s]()
+ break
+ elif excl_list:
+ found = False
+ # Collect info:
+ for s in subset_map:
+ for e in excl_list:
+ if fnmatch(s, e):
+ found = True
+
+ if not found:
+ subset_map[s]()
+ else:
+ found = False
+
+ # Default behaviour, if include or exclude is not passed:
+ else:
+ # Just collect info for each item:
+ for s in subset_map:
+ subset_map[s]()
+
+ return self.pg_info
+
+ def get_pub_info(self):
+ """Get publication statistics."""
+ query = ("SELECT p.*, r.rolname AS ownername "
+ "FROM pg_catalog.pg_publication AS p "
+ "JOIN pg_catalog.pg_roles AS r "
+ "ON p.pubowner = r.oid")
+
+ result = self.__exec_sql(query)
+
+ if result:
+ result = [dict(row) for row in result]
+ else:
+ return {}
+
+ publications = {}
+
+ for elem in result:
+ if not publications.get(elem['pubname']):
+ publications[elem['pubname']] = {}
+
+ for key, val in iteritems(elem):
+ if key != 'pubname':
+ publications[elem['pubname']][key] = val
+
+ return publications
+
+ def get_subscr_info(self):
+ """Get subscription statistics."""
+ query = ("SELECT s.*, r.rolname AS ownername, d.datname AS dbname "
+ "FROM pg_catalog.pg_subscription s "
+ "JOIN pg_catalog.pg_database d "
+ "ON s.subdbid = d.oid "
+ "JOIN pg_catalog.pg_roles AS r "
+ "ON s.subowner = r.oid")
+
+ result = self.__exec_sql(query)
+
+ if result:
+ result = [dict(row) for row in result]
+ else:
+ return {}
+
+ subscr_info = {}
+
+ for elem in result:
+ if not subscr_info.get(elem['dbname']):
+ subscr_info[elem['dbname']] = {}
+
+ if not subscr_info[elem['dbname']].get(elem['subname']):
+ subscr_info[elem['dbname']][elem['subname']] = {}
+
+ for key, val in iteritems(elem):
+ if key not in ('subname', 'dbname'):
+ subscr_info[elem['dbname']][elem['subname']][key] = val
+
+ return subscr_info
+
+ def get_tablespaces(self):
+ """Get information about tablespaces."""
+ # Check spcoption exists:
+ opt = self.__exec_sql("SELECT column_name "
+ "FROM information_schema.columns "
+ "WHERE table_name = 'pg_tablespace' "
+ "AND column_name = 'spcoptions'")
+
+ if not opt:
+ query = ("SELECT s.spcname, a.rolname, s.spcacl "
+ "FROM pg_tablespace AS s "
+ "JOIN pg_authid AS a ON s.spcowner = a.oid")
+ else:
+ query = ("SELECT s.spcname, a.rolname, s.spcacl, s.spcoptions "
+ "FROM pg_tablespace AS s "
+ "JOIN pg_authid AS a ON s.spcowner = a.oid")
+
+ res = self.__exec_sql(query)
+ ts_dict = {}
+ for i in res:
+ ts_name = i[0]
+ ts_info = dict(
+ spcowner=i[1],
+ spcacl=i[2] if i[2] else '',
+ )
+ if opt:
+ ts_info['spcoptions'] = i[3] if i[3] else []
+
+ ts_dict[ts_name] = ts_info
+
+ self.pg_info["tablespaces"] = ts_dict
+
+ def get_ext_info(self):
+ """Get information about existing extensions."""
+ # Check that pg_extension exists:
+ res = self.__exec_sql("SELECT EXISTS (SELECT 1 FROM "
+ "information_schema.tables "
+ "WHERE table_name = 'pg_extension')")
+ if not res[0][0]:
+ return True
+
+ query = ("SELECT e.extname, e.extversion, n.nspname, c.description "
+ "FROM pg_catalog.pg_extension AS e "
+ "LEFT JOIN pg_catalog.pg_namespace AS n "
+ "ON n.oid = e.extnamespace "
+ "LEFT JOIN pg_catalog.pg_description AS c "
+ "ON c.objoid = e.oid "
+ "AND c.classoid = 'pg_catalog.pg_extension'::pg_catalog.regclass")
+ res = self.__exec_sql(query)
+ ext_dict = {}
+ for i in res:
+ ext_ver = i[1].split('.')
+
+ ext_dict[i[0]] = dict(
+ extversion=dict(
+ major=int(ext_ver[0]),
+ minor=int(ext_ver[1]),
+ ),
+ nspname=i[2],
+ description=i[3],
+ )
+
+ return ext_dict
+
+ def get_role_info(self):
+ """Get information about roles (in PgSQL groups and users are roles)."""
+ query = ("SELECT r.rolname, r.rolsuper, r.rolcanlogin, "
+ "r.rolvaliduntil, "
+ "ARRAY(SELECT b.rolname "
+ "FROM pg_catalog.pg_auth_members AS m "
+ "JOIN pg_catalog.pg_roles AS b ON (m.roleid = b.oid) "
+ "WHERE m.member = r.oid) AS memberof "
+ "FROM pg_catalog.pg_roles AS r "
+ "WHERE r.rolname !~ '^pg_'")
+
+ res = self.__exec_sql(query)
+ rol_dict = {}
+ for i in res:
+ rol_dict[i[0]] = dict(
+ superuser=i[1],
+ canlogin=i[2],
+ valid_until=i[3] if i[3] else '',
+ member_of=i[4] if i[4] else [],
+ )
+
+ self.pg_info["roles"] = rol_dict
+
+ def get_rslot_info(self):
+ """Get information about replication slots if exist."""
+ # Check that pg_replication_slots exists:
+ res = self.__exec_sql("SELECT EXISTS (SELECT 1 FROM "
+ "information_schema.tables "
+ "WHERE table_name = 'pg_replication_slots')")
+ if not res[0][0]:
+ return True
+
+ query = ("SELECT slot_name, plugin, slot_type, database, "
+ "active FROM pg_replication_slots")
+ res = self.__exec_sql(query)
+
+ # If there is no replication:
+ if not res:
+ return True
+
+ rslot_dict = {}
+ for i in res:
+ rslot_dict[i[0]] = dict(
+ plugin=i[1],
+ slot_type=i[2],
+ database=i[3],
+ active=i[4],
+ )
+
+ self.pg_info["repl_slots"] = rslot_dict
+
+ def get_settings(self):
+ """Get server settings."""
+ # Check pending restart column exists:
+ pend_rest_col_exists = self.__exec_sql("SELECT 1 FROM information_schema.columns "
+ "WHERE table_name = 'pg_settings' "
+ "AND column_name = 'pending_restart'")
+ if not pend_rest_col_exists:
+ query = ("SELECT name, setting, unit, context, vartype, "
+ "boot_val, min_val, max_val, sourcefile "
+ "FROM pg_settings")
+ else:
+ query = ("SELECT name, setting, unit, context, vartype, "
+ "boot_val, min_val, max_val, sourcefile, pending_restart "
+ "FROM pg_settings")
+
+ res = self.__exec_sql(query)
+
+ set_dict = {}
+ for i in res:
+ val_in_bytes = None
+ setting = i[1]
+ if i[2]:
+ unit = i[2]
+ else:
+ unit = ''
+
+ if unit == 'kB':
+ val_in_bytes = int(setting) * 1024
+
+ elif unit == '8kB':
+ val_in_bytes = int(setting) * 1024 * 8
+
+ elif unit == 'MB':
+ val_in_bytes = int(setting) * 1024 * 1024
+
+ if val_in_bytes is not None and val_in_bytes < 0:
+ val_in_bytes = 0
+
+ setting_name = i[0]
+ pretty_val = self.__get_pretty_val(setting_name)
+
+ pending_restart = None
+ if pend_rest_col_exists:
+ pending_restart = i[9]
+
+ set_dict[setting_name] = dict(
+ setting=setting,
+ unit=unit,
+ context=i[3],
+ vartype=i[4],
+ boot_val=i[5] if i[5] else '',
+ min_val=i[6] if i[6] else '',
+ max_val=i[7] if i[7] else '',
+ sourcefile=i[8] if i[8] else '',
+ pretty_val=pretty_val,
+ )
+ if val_in_bytes is not None:
+ set_dict[setting_name]['val_in_bytes'] = val_in_bytes
+
+ if pending_restart is not None:
+ set_dict[setting_name]['pending_restart'] = pending_restart
+ if pending_restart:
+ self.pg_info["pending_restart_settings"].append(setting_name)
+
+ self.pg_info["settings"] = set_dict
+
+ def get_repl_info(self):
+ """Get information about replication if the server is a master."""
+ # Check that pg_replication_slots exists:
+ res = self.__exec_sql("SELECT EXISTS (SELECT 1 FROM "
+ "information_schema.tables "
+ "WHERE table_name = 'pg_stat_replication')")
+ if not res[0][0]:
+ return True
+
+ query = ("SELECT r.pid, a.rolname, r.application_name, r.client_addr, "
+ "r.client_hostname, r.backend_start::text, r.state "
+ "FROM pg_stat_replication AS r "
+ "JOIN pg_authid AS a ON r.usesysid = a.oid")
+ res = self.__exec_sql(query)
+
+ # If there is no replication:
+ if not res:
+ return True
+
+ repl_dict = {}
+ for i in res:
+ repl_dict[i[0]] = dict(
+ usename=i[1],
+ app_name=i[2] if i[2] else '',
+ client_addr=i[3],
+ client_hostname=i[4] if i[4] else '',
+ backend_start=i[5],
+ state=i[6],
+ )
+
+ self.pg_info["replications"] = repl_dict
+
+ def get_lang_info(self):
+ """Get information about current supported languages."""
+ query = ("SELECT l.lanname, a.rolname, l.lanacl "
+ "FROM pg_language AS l "
+ "JOIN pg_authid AS a ON l.lanowner = a.oid")
+ res = self.__exec_sql(query)
+ lang_dict = {}
+ for i in res:
+ lang_dict[i[0]] = dict(
+ lanowner=i[1],
+ lanacl=i[2] if i[2] else '',
+ )
+
+ return lang_dict
+
+ def get_namespaces(self):
+ """Get information about namespaces."""
+ query = ("SELECT n.nspname, a.rolname, n.nspacl "
+ "FROM pg_catalog.pg_namespace AS n "
+ "JOIN pg_authid AS a ON a.oid = n.nspowner")
+ res = self.__exec_sql(query)
+
+ nsp_dict = {}
+ for i in res:
+ nsp_dict[i[0]] = dict(
+ nspowner=i[1],
+ nspacl=i[2] if i[2] else '',
+ )
+
+ return nsp_dict
+
+ def get_pg_version(self):
+ """Get major and minor PostgreSQL server version."""
+ query = "SELECT version()"
+ raw = self.__exec_sql(query)[0][0]
+ raw = raw.split()[1].split('.')
+ self.pg_info["version"] = dict(
+ major=int(raw[0]),
+ minor=int(raw[1]),
+ )
+
+ def get_recovery_state(self):
+ """Get if the service is in recovery mode."""
+ self.pg_info["in_recovery"] = self.__exec_sql("SELECT pg_is_in_recovery()")[0][0]
+
+ def get_db_info(self):
+ """Get information about the current database."""
+ # Following query returns:
+ # Name, Owner, Encoding, Collate, Ctype, Access Priv, Size
+ query = ("SELECT d.datname, "
+ "pg_catalog.pg_get_userbyid(d.datdba), "
+ "pg_catalog.pg_encoding_to_char(d.encoding), "
+ "d.datcollate, "
+ "d.datctype, "
+ "pg_catalog.array_to_string(d.datacl, E'\n'), "
+ "CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') "
+ "THEN pg_catalog.pg_database_size(d.datname)::text "
+ "ELSE 'No Access' END, "
+ "t.spcname "
+ "FROM pg_catalog.pg_database AS d "
+ "JOIN pg_catalog.pg_tablespace t ON d.dattablespace = t.oid "
+ "WHERE d.datname != 'template0'")
+
+ res = self.__exec_sql(query)
+
+ db_dict = {}
+ for i in res:
+ db_dict[i[0]] = dict(
+ owner=i[1],
+ encoding=i[2],
+ collate=i[3],
+ ctype=i[4],
+ access_priv=i[5] if i[5] else '',
+ size=i[6],
+ )
+
+ if self.cursor.connection.server_version >= 100000:
+ subscr_info = self.get_subscr_info()
+
+ for datname in db_dict:
+ self.cursor = self.db_obj.reconnect(datname)
+ db_dict[datname]['namespaces'] = self.get_namespaces()
+ db_dict[datname]['extensions'] = self.get_ext_info()
+ db_dict[datname]['languages'] = self.get_lang_info()
+ if self.cursor.connection.server_version >= 100000:
+ db_dict[datname]['publications'] = self.get_pub_info()
+ db_dict[datname]['subscriptions'] = subscr_info.get(datname, {})
+
+ self.pg_info["databases"] = db_dict
+
+ def __get_pretty_val(self, setting):
+ """Get setting's value represented by SHOW command."""
+ return self.__exec_sql("SHOW %s" % setting)[0][0]
+
+ def __exec_sql(self, query):
+ """Execute SQL and return the result."""
+ try:
+ self.cursor.execute(query)
+ res = self.cursor.fetchall()
+ if res:
+ return res
+ except Exception as e:
+ self.module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e)))
+ self.cursor.close()
+ return False
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ db=dict(type='str', aliases=['login_db']),
+ filter=dict(type='list', elements='str'),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ filter_ = module.params['filter']
+
+ if not module.params['trust_input']:
+ # Check input for potentially dangerous elements:
+ check_input(module, module.params['session_role'])
+
+ db_conn_obj = PgDbConn(module)
+
+ # Do job:
+ pg_info = PgClusterInfo(module, db_conn_obj)
+
+ module.exit_json(**pg_info.collect(filter_))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_lang.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_lang.py
new file mode 100644
index 00000000..8b28cd9c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_lang.py
@@ -0,0 +1,363 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2014, Jens Depuydt <http://www.jensd.be>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: postgresql_lang
+short_description: Adds, removes or changes procedural languages with a PostgreSQL database
+description:
+- Adds, removes or changes procedural languages with a PostgreSQL database.
+- This module allows you to add a language, remote a language or change the trust
+ relationship with a PostgreSQL database.
+- The module can be used on the machine where executed or on a remote host.
+- When removing a language from a database, it is possible that dependencies prevent
+ the database from being removed. In that case, you can specify I(cascade=yes) to
+ automatically drop objects that depend on the language (such as functions in the
+ language).
+- In case the language can't be deleted because it is required by the
+ database system, you can specify I(fail_on_drop=no) to ignore the error.
+- Be careful when marking a language as trusted since this could be a potential
+ security breach. Untrusted languages allow only users with the PostgreSQL superuser
+ privilege to use this language to create new functions.
+options:
+ lang:
+ description:
+ - Name of the procedural language to add, remove or change.
+ required: true
+ type: str
+ aliases:
+ - name
+ trust:
+ description:
+ - Make this language trusted for the selected db.
+ type: bool
+ default: 'no'
+ db:
+ description:
+ - Name of database to connect to and where the language will be added, removed or changed.
+ type: str
+ aliases:
+ - login_db
+ required: true
+ force_trust:
+ description:
+ - Marks the language as trusted, even if it's marked as untrusted in pg_pltemplate.
+ - Use with care!
+ type: bool
+ default: 'no'
+ fail_on_drop:
+ description:
+ - If C(yes), fail when removing a language. Otherwise just log and continue.
+ - In some cases, it is not possible to remove a language (used by the db-system).
+ - When dependencies block the removal, consider using I(cascade).
+ type: bool
+ default: 'yes'
+ cascade:
+ description:
+ - When dropping a language, also delete object that depend on this language.
+ - Only used when I(state=absent).
+ type: bool
+ default: 'no'
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ - The specified I(session_role) must be a role that the current I(login_user) is a member of.
+ - Permissions checking for SQL commands is carried out as though the
+ I(session_role) were the one that had logged in originally.
+ type: str
+ state:
+ description:
+ - The state of the language for the selected database.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ login_unix_socket:
+ description:
+ - Path to a Unix domain socket for local connections.
+ type: str
+ ssl_mode:
+ description:
+ - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
+ - See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
+ - Default of C(prefer) matches libpq default.
+ type: str
+ default: prefer
+ choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
+ ca_cert:
+ description:
+ - Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
+ - If the file exists, the server's certificate will be verified to be signed by one of these authorities.
+ type: str
+ aliases: [ ssl_rootcert ]
+ owner:
+ description:
+ - Set an owner for the language.
+ - Ignored when I(state=absent).
+ type: str
+ version_added: '0.2.0'
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(lang), I(session_role),
+ I(owner) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+seealso:
+- name: PostgreSQL languages
+ description: General information about PostgreSQL languages.
+ link: https://www.postgresql.org/docs/current/xplang.html
+- name: CREATE LANGUAGE reference
+ description: Complete reference of the CREATE LANGUAGE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createlanguage.html
+- name: ALTER LANGUAGE reference
+ description: Complete reference of the ALTER LANGUAGE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-alterlanguage.html
+- name: DROP LANGUAGE reference
+ description: Complete reference of the DROP LANGUAGE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-droplanguage.html
+author:
+- Jens Depuydt (@jensdepuydt)
+- Thomas O'Donnell (@andytom)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Add language pltclu to database testdb if it doesn't exist
+ community.general.postgresql_lang: db=testdb lang=pltclu state=present
+
+# Add language pltclu to database testdb if it doesn't exist and mark it as trusted.
+# Marks the language as trusted if it exists but isn't trusted yet.
+# force_trust makes sure that the language will be marked as trusted
+- name: Add language pltclu to database testdb if it doesn't exist and mark it as trusted
+ community.general.postgresql_lang:
+ db: testdb
+ lang: pltclu
+ state: present
+ trust: yes
+ force_trust: yes
+
+- name: Remove language pltclu from database testdb
+ community.general.postgresql_lang:
+ db: testdb
+ lang: pltclu
+ state: absent
+
+- name: Remove language pltclu from database testdb and remove all dependencies
+ community.general.postgresql_lang:
+ db: testdb
+ lang: pltclu
+ state: absent
+ cascade: yes
+
+- name: Remove language c from database testdb but ignore errors if something prevents the removal
+ community.general.postgresql_lang:
+ db: testdb
+ lang: pltclu
+ state: absent
+ fail_on_drop: no
+
+- name: In testdb change owner of mylang to alice
+ community.general.postgresql_lang:
+ db: testdb
+ lang: mylang
+ owner: alice
+'''
+
+RETURN = r'''
+queries:
+ description: List of executed queries.
+ returned: always
+ type: list
+ sample: ['CREATE LANGUAGE "acme"']
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import check_input
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+executed_queries = []
+
+
+def lang_exists(cursor, lang):
+ """Checks if language exists for db"""
+ query = "SELECT lanname FROM pg_language WHERE lanname = %(lang)s"
+ cursor.execute(query, {'lang': lang})
+ return cursor.rowcount > 0
+
+
+def lang_istrusted(cursor, lang):
+ """Checks if language is trusted for db"""
+ query = "SELECT lanpltrusted FROM pg_language WHERE lanname = %(lang)s"
+ cursor.execute(query, {'lang': lang})
+ return cursor.fetchone()[0]
+
+
+def lang_altertrust(cursor, lang, trust):
+ """Changes if language is trusted for db"""
+ query = "UPDATE pg_language SET lanpltrusted = %(trust)s WHERE lanname = %(lang)s"
+ cursor.execute(query, {'trust': trust, 'lang': lang})
+ executed_queries.append(cursor.mogrify(query, {'trust': trust, 'lang': lang}))
+ return True
+
+
+def lang_add(cursor, lang, trust):
+ """Adds language for db"""
+ if trust:
+ query = 'CREATE TRUSTED LANGUAGE "%s"' % lang
+ else:
+ query = 'CREATE LANGUAGE "%s"' % lang
+ executed_queries.append(query)
+ cursor.execute(query)
+ return True
+
+
+def lang_drop(cursor, lang, cascade):
+ """Drops language for db"""
+ cursor.execute("SAVEPOINT ansible_pgsql_lang_drop")
+ try:
+ if cascade:
+ query = "DROP LANGUAGE \"%s\" CASCADE" % lang
+ else:
+ query = "DROP LANGUAGE \"%s\"" % lang
+ executed_queries.append(query)
+ cursor.execute(query)
+ except Exception:
+ cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_lang_drop")
+ cursor.execute("RELEASE SAVEPOINT ansible_pgsql_lang_drop")
+ return False
+ cursor.execute("RELEASE SAVEPOINT ansible_pgsql_lang_drop")
+ return True
+
+
+def get_lang_owner(cursor, lang):
+ """Get language owner.
+
+ Args:
+ cursor (cursor): psycopg2 cursor object.
+ lang (str): language name.
+ """
+ query = ("SELECT r.rolname FROM pg_language l "
+ "JOIN pg_roles r ON l.lanowner = r.oid "
+ "WHERE l.lanname = %(lang)s")
+ cursor.execute(query, {'lang': lang})
+ return cursor.fetchone()[0]
+
+
+def set_lang_owner(cursor, lang, owner):
+ """Set language owner.
+
+ Args:
+ cursor (cursor): psycopg2 cursor object.
+ lang (str): language name.
+ owner (str): name of new owner.
+ """
+ query = "ALTER LANGUAGE \"%s\" OWNER TO \"%s\"" % (lang, owner)
+ executed_queries.append(query)
+ cursor.execute(query)
+ return True
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ db=dict(type="str", required=True, aliases=["login_db"]),
+ lang=dict(type="str", required=True, aliases=["name"]),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ trust=dict(type="bool", default="no"),
+ force_trust=dict(type="bool", default="no"),
+ cascade=dict(type="bool", default="no"),
+ fail_on_drop=dict(type="bool", default="yes"),
+ session_role=dict(type="str"),
+ owner=dict(type="str"),
+ trust_input=dict(type="bool", default="yes")
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ db = module.params["db"]
+ lang = module.params["lang"]
+ state = module.params["state"]
+ trust = module.params["trust"]
+ force_trust = module.params["force_trust"]
+ cascade = module.params["cascade"]
+ fail_on_drop = module.params["fail_on_drop"]
+ owner = module.params["owner"]
+ session_role = module.params["session_role"]
+ trust_input = module.params["trust_input"]
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, lang, session_role, owner)
+
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=False)
+ cursor = db_connection.cursor()
+
+ changed = False
+ kw = {'db': db, 'lang': lang, 'trust': trust}
+
+ if state == "present":
+ if lang_exists(cursor, lang):
+ lang_trusted = lang_istrusted(cursor, lang)
+ if (lang_trusted and not trust) or (not lang_trusted and trust):
+ if module.check_mode:
+ changed = True
+ else:
+ changed = lang_altertrust(cursor, lang, trust)
+ else:
+ if module.check_mode:
+ changed = True
+ else:
+ changed = lang_add(cursor, lang, trust)
+ if force_trust:
+ changed = lang_altertrust(cursor, lang, trust)
+
+ else:
+ if lang_exists(cursor, lang):
+ if module.check_mode:
+ changed = True
+ kw['lang_dropped'] = True
+ else:
+ changed = lang_drop(cursor, lang, cascade)
+ if fail_on_drop and not changed:
+ msg = ("unable to drop language, use cascade "
+ "to delete dependencies or fail_on_drop=no to ignore")
+ module.fail_json(msg=msg)
+ kw['lang_dropped'] = changed
+
+ if owner and state == 'present':
+ if lang_exists(cursor, lang):
+ if owner != get_lang_owner(cursor, lang):
+ changed = set_lang_owner(cursor, lang, owner)
+
+ if changed:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ kw['changed'] = changed
+ kw['queries'] = executed_queries
+ db_connection.close()
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_membership.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_membership.py
new file mode 100644
index 00000000..3292a6db
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_membership.py
@@ -0,0 +1,228 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_membership
+short_description: Add or remove PostgreSQL roles from groups
+description:
+- Adds or removes PostgreSQL roles from groups (other roles).
+- Users are roles with login privilege.
+- Groups are PostgreSQL roles usually without LOGIN privilege.
+- "Common use case:"
+- 1) add a new group (groups) by M(community.general.postgresql_user) module with I(role_attr_flags=NOLOGIN)
+- 2) grant them desired privileges by M(community.general.postgresql_privs) module
+- 3) add desired PostgreSQL users to the new group (groups) by this module
+options:
+ groups:
+ description:
+ - The list of groups (roles) that need to be granted to or revoked from I(target_roles).
+ required: yes
+ type: list
+ elements: str
+ aliases:
+ - group
+ - source_role
+ - source_roles
+ target_roles:
+ description:
+ - The list of target roles (groups will be granted to them).
+ required: yes
+ type: list
+ elements: str
+ aliases:
+ - target_role
+ - users
+ - user
+ fail_on_role:
+ description:
+ - If C(yes), fail when group or target_role doesn't exist. If C(no), just warn and continue.
+ default: yes
+ type: bool
+ state:
+ description:
+ - Membership state.
+ - I(state=present) implies the I(groups)must be granted to I(target_roles).
+ - I(state=absent) implies the I(groups) must be revoked from I(target_roles).
+ type: str
+ default: present
+ choices: [ absent, present ]
+ db:
+ description:
+ - Name of database to connect to.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(groups),
+ I(target_roles), I(session_role) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+seealso:
+- module: community.general.postgresql_user
+- module: community.general.postgresql_privs
+- module: community.general.postgresql_owner
+- name: PostgreSQL role membership reference
+ description: Complete reference of the PostgreSQL role membership documentation.
+ link: https://www.postgresql.org/docs/current/role-membership.html
+- name: PostgreSQL role attributes reference
+ description: Complete reference of the PostgreSQL role attributes documentation.
+ link: https://www.postgresql.org/docs/current/role-attributes.html
+author:
+- Andrew Klychkov (@Andersson007)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Grant role read_only to alice and bob
+ community.general.postgresql_membership:
+ group: read_only
+ target_roles:
+ - alice
+ - bob
+ state: present
+
+# you can also use target_roles: alice,bob,etc to pass the role list
+
+- name: Revoke role read_only and exec_func from bob. Ignore if roles don't exist
+ community.general.postgresql_membership:
+ groups:
+ - read_only
+ - exec_func
+ target_role: bob
+ fail_on_role: no
+ state: absent
+'''
+
+RETURN = r'''
+queries:
+ description: List of executed queries.
+ returned: always
+ type: str
+ sample: [ "GRANT \"user_ro\" TO \"alice\"" ]
+granted:
+ description: Dict of granted groups and roles.
+ returned: if I(state=present)
+ type: dict
+ sample: { "ro_group": [ "alice", "bob" ] }
+revoked:
+ description: Dict of revoked groups and roles.
+ returned: if I(state=absent)
+ type: dict
+ sample: { "ro_group": [ "alice", "bob" ] }
+state:
+ description: Membership state that tried to be set.
+ returned: always
+ type: str
+ sample: "present"
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import check_input
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ PgMembership,
+ postgres_common_argument_spec,
+)
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ groups=dict(type='list', elements='str', required=True, aliases=['group', 'source_role', 'source_roles']),
+ target_roles=dict(type='list', elements='str', required=True, aliases=['target_role', 'user', 'users']),
+ fail_on_role=dict(type='bool', default=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ db=dict(type='str', aliases=['login_db']),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ groups = module.params['groups']
+ target_roles = module.params['target_roles']
+ fail_on_role = module.params['fail_on_role']
+ state = module.params['state']
+ session_role = module.params['session_role']
+ trust_input = module.params['trust_input']
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, groups, target_roles, session_role)
+
+ conn_params = get_conn_params(module, module.params, warn_db_default=False)
+ db_connection = connect_to_db(module, conn_params, autocommit=False)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ ##############
+ # Create the object and do main job:
+
+ pg_membership = PgMembership(module, cursor, groups, target_roles, fail_on_role)
+
+ if state == 'present':
+ pg_membership.grant()
+
+ elif state == 'absent':
+ pg_membership.revoke()
+
+ # Rollback if it's possible and check_mode:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ cursor.close()
+ db_connection.close()
+
+ # Make return values:
+ return_dict = dict(
+ changed=pg_membership.changed,
+ state=state,
+ groups=pg_membership.groups,
+ target_roles=pg_membership.target_roles,
+ queries=pg_membership.executed_queries,
+ )
+
+ if state == 'present':
+ return_dict['granted'] = pg_membership.granted
+ elif state == 'absent':
+ return_dict['revoked'] = pg_membership.revoked
+
+ module.exit_json(**return_dict)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_owner.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_owner.py
new file mode 100644
index 00000000..06a09c59
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_owner.py
@@ -0,0 +1,453 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_owner
+short_description: Change an owner of PostgreSQL database object
+description:
+- Change an owner of PostgreSQL database object.
+- Also allows to reassign the ownership of database objects owned by a database role to another role.
+
+options:
+ new_owner:
+ description:
+ - Role (user/group) to set as an I(obj_name) owner.
+ type: str
+ required: yes
+ obj_name:
+ description:
+ - Name of a database object to change ownership.
+ - Mutually exclusive with I(reassign_owned_by).
+ type: str
+ obj_type:
+ description:
+ - Type of a database object.
+ - Mutually exclusive with I(reassign_owned_by).
+ type: str
+ choices: [ database, function, matview, sequence, schema, table, tablespace, view ]
+ aliases:
+ - type
+ reassign_owned_by:
+ description:
+ - The list of role names. The ownership of all the objects within the current database,
+ and of all shared objects (databases, tablespaces), owned by this role(s) will be reassigned to I(owner).
+ - Pay attention - it reassigns all objects owned by this role(s) in the I(db)!
+ - If role(s) exists, always returns changed True.
+ - Cannot reassign ownership of objects that are required by the database system.
+ - Mutually exclusive with C(obj_type).
+ type: list
+ elements: str
+ fail_on_role:
+ description:
+ - If C(yes), fail when I(reassign_owned_by) role does not exist.
+ Otherwise just warn and continue.
+ - Mutually exclusive with I(obj_name) and I(obj_type).
+ default: yes
+ type: bool
+ db:
+ description:
+ - Name of database to connect to.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(new_owner), I(obj_name),
+ I(reassign_owned_by), I(session_role) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+seealso:
+- module: community.general.postgresql_user
+- module: community.general.postgresql_privs
+- module: community.general.postgresql_membership
+- name: PostgreSQL REASSIGN OWNED command reference
+ description: Complete reference of the PostgreSQL REASSIGN OWNED command documentation.
+ link: https://www.postgresql.org/docs/current/sql-reassign-owned.html
+author:
+- Andrew Klychkov (@Andersson007)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+# Set owner as alice for function myfunc in database bar by ansible ad-hoc command:
+# ansible -m postgresql_owner -a "db=bar new_owner=alice obj_name=myfunc obj_type=function"
+
+- name: The same as above by playbook
+ community.general.postgresql_owner:
+ db: bar
+ new_owner: alice
+ obj_name: myfunc
+ obj_type: function
+
+- name: Set owner as bob for table acme in database bar
+ community.general.postgresql_owner:
+ db: bar
+ new_owner: bob
+ obj_name: acme
+ obj_type: table
+
+- name: Set owner as alice for view test_view in database bar
+ community.general.postgresql_owner:
+ db: bar
+ new_owner: alice
+ obj_name: test_view
+ obj_type: view
+
+- name: Set owner as bob for tablespace ssd in database foo
+ community.general.postgresql_owner:
+ db: foo
+ new_owner: bob
+ obj_name: ssd
+ obj_type: tablespace
+
+- name: Reassign all object in database bar owned by bob to alice
+ community.general.postgresql_owner:
+ db: bar
+ new_owner: alice
+ reassign_owned_by: bob
+
+- name: Reassign all object in database bar owned by bob and bill to alice
+ community.general.postgresql_owner:
+ db: bar
+ new_owner: alice
+ reassign_owned_by:
+ - bob
+ - bill
+'''
+
+RETURN = r'''
+queries:
+ description: List of executed queries.
+ returned: always
+ type: str
+ sample: [ 'REASSIGN OWNED BY "bob" TO "alice"' ]
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+ pg_quote_identifier,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+
+class PgOwnership(object):
+
+ """Class for changing ownership of PostgreSQL objects.
+
+ Arguments:
+ module (AnsibleModule): Object of Ansible module class.
+ cursor (psycopg2.connect.cursor): Cursor object for interaction with the database.
+ role (str): Role name to set as a new owner of objects.
+
+ Important:
+ If you want to add handling of a new type of database objects:
+ 1. Add a specific method for this like self.__set_db_owner(), etc.
+ 2. Add a condition with a check of ownership for new type objects to self.__is_owner()
+ 3. Add a condition with invocation of the specific method to self.set_owner()
+ 4. Add the information to the module documentation
+ That's all.
+ """
+
+ def __init__(self, module, cursor, role):
+ self.module = module
+ self.cursor = cursor
+ self.check_role_exists(role)
+ self.role = role
+ self.changed = False
+ self.executed_queries = []
+ self.obj_name = ''
+ self.obj_type = ''
+
+ def check_role_exists(self, role, fail_on_role=True):
+ """Check the role exists or not.
+
+ Arguments:
+ role (str): Role name.
+ fail_on_role (bool): If True, fail when the role does not exist.
+ Otherwise just warn and continue.
+ """
+ if not self.__role_exists(role):
+ if fail_on_role:
+ self.module.fail_json(msg="Role '%s' does not exist" % role)
+ else:
+ self.module.warn("Role '%s' does not exist, pass" % role)
+
+ return False
+
+ else:
+ return True
+
+ def reassign(self, old_owners, fail_on_role):
+ """Implements REASSIGN OWNED BY command.
+
+ If success, set self.changed as True.
+
+ Arguments:
+ old_owners (list): The ownership of all the objects within
+ the current database, and of all shared objects (databases, tablespaces),
+ owned by these roles will be reassigned to self.role.
+ fail_on_role (bool): If True, fail when a role from old_owners does not exist.
+ Otherwise just warn and continue.
+ """
+ roles = []
+ for r in old_owners:
+ if self.check_role_exists(r, fail_on_role):
+ roles.append('"%s"' % r)
+
+ # Roles do not exist, nothing to do, exit:
+ if not roles:
+ return False
+
+ old_owners = ','.join(roles)
+
+ query = ['REASSIGN OWNED BY']
+ query.append(old_owners)
+ query.append('TO "%s"' % self.role)
+ query = ' '.join(query)
+
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def set_owner(self, obj_type, obj_name):
+ """Change owner of a database object.
+
+ Arguments:
+ obj_type (str): Type of object (like database, table, view, etc.).
+ obj_name (str): Object name.
+ """
+ self.obj_name = obj_name
+ self.obj_type = obj_type
+
+ # if a new_owner is the object owner now,
+ # nothing to do:
+ if self.__is_owner():
+ return False
+
+ if obj_type == 'database':
+ self.__set_db_owner()
+
+ elif obj_type == 'function':
+ self.__set_func_owner()
+
+ elif obj_type == 'sequence':
+ self.__set_seq_owner()
+
+ elif obj_type == 'schema':
+ self.__set_schema_owner()
+
+ elif obj_type == 'table':
+ self.__set_table_owner()
+
+ elif obj_type == 'tablespace':
+ self.__set_tablespace_owner()
+
+ elif obj_type == 'view':
+ self.__set_view_owner()
+
+ elif obj_type == 'matview':
+ self.__set_mat_view_owner()
+
+ def __is_owner(self):
+ """Return True if self.role is the current object owner."""
+ if self.obj_type == 'table':
+ query = ("SELECT 1 FROM pg_tables "
+ "WHERE tablename = %(obj_name)s "
+ "AND tableowner = %(role)s")
+
+ elif self.obj_type == 'database':
+ query = ("SELECT 1 FROM pg_database AS d "
+ "JOIN pg_roles AS r ON d.datdba = r.oid "
+ "WHERE d.datname = %(obj_name)s "
+ "AND r.rolname = %(role)s")
+
+ elif self.obj_type == 'function':
+ query = ("SELECT 1 FROM pg_proc AS f "
+ "JOIN pg_roles AS r ON f.proowner = r.oid "
+ "WHERE f.proname = %(obj_name)s "
+ "AND r.rolname = %(role)s")
+
+ elif self.obj_type == 'sequence':
+ query = ("SELECT 1 FROM pg_class AS c "
+ "JOIN pg_roles AS r ON c.relowner = r.oid "
+ "WHERE c.relkind = 'S' AND c.relname = %(obj_name)s "
+ "AND r.rolname = %(role)s")
+
+ elif self.obj_type == 'schema':
+ query = ("SELECT 1 FROM information_schema.schemata "
+ "WHERE schema_name = %(obj_name)s "
+ "AND schema_owner = %(role)s")
+
+ elif self.obj_type == 'tablespace':
+ query = ("SELECT 1 FROM pg_tablespace AS t "
+ "JOIN pg_roles AS r ON t.spcowner = r.oid "
+ "WHERE t.spcname = %(obj_name)s "
+ "AND r.rolname = %(role)s")
+
+ elif self.obj_type == 'view':
+ query = ("SELECT 1 FROM pg_views "
+ "WHERE viewname = %(obj_name)s "
+ "AND viewowner = %(role)s")
+
+ elif self.obj_type == 'matview':
+ query = ("SELECT 1 FROM pg_matviews "
+ "WHERE matviewname = %(obj_name)s "
+ "AND matviewowner = %(role)s")
+
+ query_params = {'obj_name': self.obj_name, 'role': self.role}
+ return exec_sql(self, query, query_params, add_to_executed=False)
+
+ def __set_db_owner(self):
+ """Set the database owner."""
+ query = 'ALTER DATABASE "%s" OWNER TO "%s"' % (self.obj_name, self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __set_func_owner(self):
+ """Set the function owner."""
+ query = 'ALTER FUNCTION %s OWNER TO "%s"' % (self.obj_name, self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __set_seq_owner(self):
+ """Set the sequence owner."""
+ query = 'ALTER SEQUENCE %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'),
+ self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __set_schema_owner(self):
+ """Set the schema owner."""
+ query = 'ALTER SCHEMA %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'schema'),
+ self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __set_table_owner(self):
+ """Set the table owner."""
+ query = 'ALTER TABLE %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'),
+ self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __set_tablespace_owner(self):
+ """Set the tablespace owner."""
+ query = 'ALTER TABLESPACE "%s" OWNER TO "%s"' % (self.obj_name, self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __set_view_owner(self):
+ """Set the view owner."""
+ query = 'ALTER VIEW %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'),
+ self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __set_mat_view_owner(self):
+ """Set the materialized view owner."""
+ query = 'ALTER MATERIALIZED VIEW %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'),
+ self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __role_exists(self, role):
+ """Return True if role exists, otherwise return False."""
+ query_params = {'role': role}
+ query = "SELECT 1 FROM pg_roles WHERE rolname = %(role)s"
+ return exec_sql(self, query, query_params, add_to_executed=False)
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ new_owner=dict(type='str', required=True),
+ obj_name=dict(type='str'),
+ obj_type=dict(type='str', aliases=['type'], choices=[
+ 'database', 'function', 'matview', 'sequence', 'schema', 'table', 'tablespace', 'view']),
+ reassign_owned_by=dict(type='list', elements='str'),
+ fail_on_role=dict(type='bool', default=True),
+ db=dict(type='str', aliases=['login_db']),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['obj_name', 'reassign_owned_by'],
+ ['obj_type', 'reassign_owned_by'],
+ ['obj_name', 'fail_on_role'],
+ ['obj_type', 'fail_on_role'],
+ ],
+ supports_check_mode=True,
+ )
+
+ new_owner = module.params['new_owner']
+ obj_name = module.params['obj_name']
+ obj_type = module.params['obj_type']
+ reassign_owned_by = module.params['reassign_owned_by']
+ fail_on_role = module.params['fail_on_role']
+ session_role = module.params['session_role']
+ trust_input = module.params['trust_input']
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, new_owner, obj_name, reassign_owned_by, session_role)
+
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=False)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ ##############
+ # Create the object and do main job:
+ pg_ownership = PgOwnership(module, cursor, new_owner)
+
+ # if we want to change ownership:
+ if obj_name:
+ pg_ownership.set_owner(obj_type, obj_name)
+
+ # if we want to reassign objects owned by roles:
+ elif reassign_owned_by:
+ pg_ownership.reassign(reassign_owned_by, fail_on_role)
+
+ # Rollback if it's possible and check_mode:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ cursor.close()
+ db_connection.close()
+
+ module.exit_json(
+ changed=pg_ownership.changed,
+ queries=pg_ownership.executed_queries,
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_pg_hba.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_pg_hba.py
new file mode 100644
index 00000000..1f484bcf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_pg_hba.py
@@ -0,0 +1,745 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2019, Sebastiaan Mannem (@sebasmannem) <sebastiaan.mannem@enterprisedb.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+'''
+This module is used to manage postgres pg_hba files with Ansible.
+'''
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_pg_hba
+short_description: Add, remove or modify a rule in a pg_hba file
+description:
+ - The fundamental function of the module is to create, or delete lines in pg_hba files.
+ - The lines in the file should be in a typical pg_hba form and lines should be unique per key (type, databases, users, source).
+ If they are not unique and the SID is 'the one to change', only one for C(state=present) or none for C(state=absent) of the SID's will remain.
+extends_documentation_fragment: files
+options:
+ address:
+ description:
+ - The source address/net where the connections could come from.
+ - Will not be used for entries of I(type)=C(local).
+ - You can also use keywords C(all), C(samehost), and C(samenet).
+ default: samehost
+ type: str
+ aliases: [ source, src ]
+ backup:
+ description:
+ - If set, create a backup of the C(pg_hba) file before it is modified.
+ The location of the backup is returned in the (backup) variable by this module.
+ default: false
+ type: bool
+ backup_file:
+ description:
+ - Write backup to a specific backupfile rather than a temp file.
+ type: str
+ create:
+ description:
+ - Create an C(pg_hba) file if none exists.
+ - When set to false, an error is raised when the C(pg_hba) file doesn't exist.
+ default: false
+ type: bool
+ contype:
+ description:
+ - Type of the rule. If not set, C(postgresql_pg_hba) will only return contents.
+ type: str
+ choices: [ local, host, hostnossl, hostssl ]
+ databases:
+ description:
+ - Databases this line applies to.
+ default: all
+ type: str
+ dest:
+ description:
+ - Path to C(pg_hba) file to modify.
+ type: path
+ required: true
+ method:
+ description:
+ - Authentication method to be used.
+ type: str
+ choices: [ cert, gss, ident, krb5, ldap, md5, pam, password, peer, radius, reject, scram-sha-256 , sspi, trust ]
+ default: md5
+ netmask:
+ description:
+ - The netmask of the source address.
+ type: str
+ options:
+ description:
+ - Additional options for the authentication I(method).
+ type: str
+ order:
+ description:
+ - The entries will be written out in a specific order.
+ With this option you can control by which field they are ordered first, second and last.
+ s=source, d=databases, u=users.
+ This option is deprecated since 2.9 and will be removed in community.general 3.0.0.
+ Sortorder is now hardcoded to sdu.
+ type: str
+ default: sdu
+ choices: [ sdu, sud, dsu, dus, usd, uds ]
+ state:
+ description:
+ - The lines will be added/modified when C(state=present) and removed when C(state=absent).
+ type: str
+ default: present
+ choices: [ absent, present ]
+ users:
+ description:
+ - Users this line applies to.
+ type: str
+ default: all
+
+notes:
+ - The default authentication assumes that on the host, you are either logging in as or
+ sudo'ing to an account with appropriate permissions to read and modify the file.
+ - This module also returns the pg_hba info. You can use this module to only retrieve it by only specifying I(dest).
+ The info can be found in the returned data under key pg_hba, being a list, containing a dict per rule.
+ - This module will sort resulting C(pg_hba) files if a rule change is required.
+ This could give unexpected results with manual created hba files, if it was improperly sorted.
+ For example a rule was created for a net first and for a ip in that net range next.
+ In that situation, the 'ip specific rule' will never hit, it is in the C(pg_hba) file obsolete.
+ After the C(pg_hba) file is rewritten by the M(community.general.postgresql_pg_hba) module, the ip specific rule will be sorted above the range rule.
+ And then it will hit, which will give unexpected results.
+ - With the 'order' parameter you can control which field is used to sort first, next and last.
+ - The module supports a check mode and a diff mode.
+
+seealso:
+- name: PostgreSQL pg_hba.conf file reference
+ description: Complete reference of the PostgreSQL pg_hba.conf file documentation.
+ link: https://www.postgresql.org/docs/current/auth-pg-hba-conf.html
+
+requirements:
+ - ipaddress
+
+author: Sebastiaan Mannem (@sebasmannem)
+'''
+
+EXAMPLES = '''
+- name: Grant users joe and simon access to databases sales and logistics from ipv6 localhost ::1/128 using peer authentication.
+ community.general.postgresql_pg_hba:
+ dest: /var/lib/postgres/data/pg_hba.conf
+ contype: host
+ users: joe,simon
+ source: ::1
+ databases: sales,logistics
+ method: peer
+ create: true
+
+- name: Grant user replication from network 192.168.0.100/24 access for replication with client cert authentication.
+ community.general.postgresql_pg_hba:
+ dest: /var/lib/postgres/data/pg_hba.conf
+ contype: host
+ users: replication
+ source: 192.168.0.100/24
+ databases: replication
+ method: cert
+
+- name: Revoke access from local user mary on database mydb.
+ community.general.postgresql_pg_hba:
+ dest: /var/lib/postgres/data/pg_hba.conf
+ contype: local
+ users: mary
+ databases: mydb
+ state: absent
+'''
+
+RETURN = r'''
+msgs:
+ description: List of textual messages what was done
+ returned: always
+ type: list
+ sample:
+ "msgs": [
+ "Removing",
+ "Changed",
+ "Writing"
+ ]
+backup_file:
+ description: File that the original pg_hba file was backed up to
+ returned: changed
+ type: str
+ sample: /tmp/pg_hba_jxobj_p
+pg_hba:
+ description: List of the pg_hba rules as they are configured in the specified hba file
+ returned: always
+ type: list
+ sample:
+ "pg_hba": [
+ {
+ "db": "all",
+ "method": "md5",
+ "src": "samehost",
+ "type": "host",
+ "usr": "all"
+ }
+ ]
+'''
+
+import os
+import re
+import traceback
+
+IPADDRESS_IMP_ERR = None
+try:
+ import ipaddress
+except ImportError:
+ IPADDRESS_IMP_ERR = traceback.format_exc()
+
+import tempfile
+import shutil
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+# from ansible.module_utils.postgres import postgres_common_argument_spec
+
+PG_HBA_METHODS = ["trust", "reject", "md5", "password", "gss", "sspi", "krb5", "ident", "peer",
+ "ldap", "radius", "cert", "pam", "scram-sha-256"]
+PG_HBA_TYPES = ["local", "host", "hostssl", "hostnossl"]
+PG_HBA_ORDERS = ["sdu", "sud", "dsu", "dus", "usd", "uds"]
+PG_HBA_HDR = ['type', 'db', 'usr', 'src', 'mask', 'method', 'options']
+
+WHITESPACES_RE = re.compile(r'\s+')
+
+
+class PgHbaError(Exception):
+ '''
+ This exception is raised when parsing the pg_hba file ends in an error.
+ '''
+
+
+class PgHbaRuleError(PgHbaError):
+ '''
+ This exception is raised when parsing the pg_hba file ends in an error.
+ '''
+
+
+class PgHbaRuleChanged(PgHbaRuleError):
+ '''
+ This exception is raised when a new parsed rule is a changed version of an existing rule.
+ '''
+
+
+class PgHbaValueError(PgHbaError):
+ '''
+ This exception is raised when a new parsed rule is a changed version of an existing rule.
+ '''
+
+
+class PgHbaRuleValueError(PgHbaRuleError):
+ '''
+ This exception is raised when a new parsed rule is a changed version of an existing rule.
+ '''
+
+
+class PgHba(object):
+ """
+ PgHba object to read/write entries to/from.
+ pg_hba_file - the pg_hba file almost always /etc/pg_hba
+ """
+ def __init__(self, pg_hba_file=None, order="sdu", backup=False, create=False):
+ if order not in PG_HBA_ORDERS:
+ msg = "invalid order setting {0} (should be one of '{1}')."
+ raise PgHbaError(msg.format(order, "', '".join(PG_HBA_ORDERS)))
+ self.pg_hba_file = pg_hba_file
+ self.rules = None
+ self.comment = None
+ self.order = order
+ self.backup = backup
+ self.last_backup = None
+ self.create = create
+ self.unchanged()
+ # self.databases will be update by add_rule and gives some idea of the number of databases
+ # (at least that are handled by this pg_hba)
+ self.databases = set(['postgres', 'template0', 'template1'])
+
+ # self.databases will be update by add_rule and gives some idea of the number of users
+ # (at least that are handled by this pg_hba) since this might also be groups with multiple
+ # users, this might be totally off, but at least it is some info...
+ self.users = set(['postgres'])
+
+ self.read()
+
+ def unchanged(self):
+ '''
+ This method resets self.diff to a empty default
+ '''
+ self.diff = {'before': {'file': self.pg_hba_file, 'pg_hba': []},
+ 'after': {'file': self.pg_hba_file, 'pg_hba': []}}
+
+ def read(self):
+ '''
+ Read in the pg_hba from the system
+ '''
+ self.rules = {}
+ self.comment = []
+ # read the pg_hbafile
+ try:
+ with open(self.pg_hba_file, 'r') as file:
+ for line in file:
+ line = line.strip()
+ # uncomment
+ if '#' in line:
+ line, comment = line.split('#', 1)
+ self.comment.append('#' + comment)
+ try:
+ self.add_rule(PgHbaRule(line=line))
+ except PgHbaRuleError:
+ pass
+ self.unchanged()
+ except IOError:
+ pass
+
+ def write(self, backup_file=''):
+ '''
+ This method writes the PgHba rules (back) to a file.
+ '''
+ if not self.changed():
+ return False
+
+ contents = self.render()
+ if self.pg_hba_file:
+ if not (os.path.isfile(self.pg_hba_file) or self.create):
+ raise PgHbaError("pg_hba file '{0}' doesn't exist. "
+ "Use create option to autocreate.".format(self.pg_hba_file))
+ if self.backup and os.path.isfile(self.pg_hba_file):
+ if backup_file:
+ self.last_backup = backup_file
+ else:
+ __backup_file_h, self.last_backup = tempfile.mkstemp(prefix='pg_hba')
+ shutil.copy(self.pg_hba_file, self.last_backup)
+ fileh = open(self.pg_hba_file, 'w')
+ else:
+ filed, __path = tempfile.mkstemp(prefix='pg_hba')
+ fileh = os.fdopen(filed, 'w')
+
+ fileh.write(contents)
+ self.unchanged()
+ fileh.close()
+ return True
+
+ def add_rule(self, rule):
+ '''
+ This method can be used to add a rule to the list of rules in this PgHba object
+ '''
+ key = rule.key()
+ try:
+ try:
+ oldrule = self.rules[key]
+ except KeyError:
+ raise PgHbaRuleChanged
+ ekeys = set(list(oldrule.keys()) + list(rule.keys()))
+ ekeys.remove('line')
+ for k in ekeys:
+ if oldrule.get(k) != rule.get(k):
+ raise PgHbaRuleChanged('{0} changes {1}'.format(rule, oldrule))
+ except PgHbaRuleChanged:
+ self.rules[key] = rule
+ self.diff['after']['pg_hba'].append(rule.line())
+ if rule['db'] not in ['all', 'samerole', 'samegroup', 'replication']:
+ databases = set(rule['db'].split(','))
+ self.databases.update(databases)
+ if rule['usr'] != 'all':
+ user = rule['usr']
+ if user[0] == '+':
+ user = user[1:]
+ self.users.add(user)
+
+ def remove_rule(self, rule):
+ '''
+ This method can be used to find and remove a rule. It doesn't look for the exact rule, only
+ the rule with the same key.
+ '''
+ keys = rule.key()
+ try:
+ del self.rules[keys]
+ self.diff['before']['pg_hba'].append(rule.line())
+ except KeyError:
+ pass
+
+ def get_rules(self, with_lines=False):
+ '''
+ This method returns all the rules of the PgHba object
+ '''
+ rules = sorted(self.rules.values())
+ for rule in rules:
+ ret = {}
+ for key, value in rule.items():
+ ret[key] = value
+ if not with_lines:
+ if 'line' in ret:
+ del ret['line']
+ else:
+ ret['line'] = rule.line()
+
+ yield ret
+
+ def render(self):
+ '''
+ This method renders the content of the PgHba rules and comments.
+ The returning value can be used directly to write to a new file.
+ '''
+ comment = '\n'.join(self.comment)
+ rule_lines = '\n'.join([rule['line'] for rule in self.get_rules(with_lines=True)])
+ result = comment + '\n' + rule_lines
+ # End it properly with a linefeed (if not already).
+ if result and result[-1] not in ['\n', '\r']:
+ result += '\n'
+ return result
+
+ def changed(self):
+ '''
+ This method can be called to detect if the PgHba file has been changed.
+ '''
+ return bool(self.diff['before']['pg_hba'] or self.diff['after']['pg_hba'])
+
+
+class PgHbaRule(dict):
+ '''
+ This class represents one rule as defined in a line in a PgHbaFile.
+ '''
+
+ def __init__(self, contype=None, databases=None, users=None, source=None, netmask=None,
+ method=None, options=None, line=None):
+ '''
+ This function can be called with a comma seperated list of databases and a comma seperated
+ list of users and it will act as a generator that returns a expanded list of rules one by
+ one.
+ '''
+
+ super(PgHbaRule, self).__init__()
+
+ if line:
+ # Read values from line if parsed
+ self.fromline(line)
+
+ # read rule cols from parsed items
+ rule = dict(zip(PG_HBA_HDR, [contype, databases, users, source, netmask, method, options]))
+ for key, value in rule.items():
+ if value:
+ self[key] = value
+
+ # Some sanity checks
+ for key in ['method', 'type']:
+ if key not in self:
+ raise PgHbaRuleError('Missing {0} in rule {1}'.format(key, self))
+
+ if self['method'] not in PG_HBA_METHODS:
+ msg = "invalid method {0} (should be one of '{1}')."
+ raise PgHbaRuleValueError(msg.format(self['method'], "', '".join(PG_HBA_METHODS)))
+
+ if self['type'] not in PG_HBA_TYPES:
+ msg = "invalid connection type {0} (should be one of '{1}')."
+ raise PgHbaRuleValueError(msg.format(self['type'], "', '".join(PG_HBA_TYPES)))
+
+ if self['type'] == 'local':
+ self.unset('src')
+ self.unset('mask')
+ elif 'src' not in self:
+ raise PgHbaRuleError('Missing src in rule {1}'.format(self))
+ elif '/' in self['src']:
+ self.unset('mask')
+ else:
+ self['src'] = str(self.source())
+ self.unset('mask')
+
+ def unset(self, key):
+ '''
+ This method is used to unset certain columns if they exist
+ '''
+ if key in self:
+ del self[key]
+
+ def line(self):
+ '''
+ This method can be used to return (or generate) the line
+ '''
+ try:
+ return self['line']
+ except KeyError:
+ self['line'] = "\t".join([self[k] for k in PG_HBA_HDR if k in self.keys()])
+ return self['line']
+
+ def fromline(self, line):
+ '''
+ split into 'type', 'db', 'usr', 'src', 'mask', 'method', 'options' cols
+ '''
+ if WHITESPACES_RE.sub('', line) == '':
+ # empty line. skip this one...
+ return
+ cols = WHITESPACES_RE.split(line)
+ if len(cols) < 4:
+ msg = "Rule {0} has too few columns."
+ raise PgHbaValueError(msg.format(line))
+ if cols[0] not in PG_HBA_TYPES:
+ msg = "Rule {0} has unknown type: {1}."
+ raise PgHbaValueError(msg.format(line, cols[0]))
+ if cols[0] == 'local':
+ cols.insert(3, None) # No address
+ cols.insert(3, None) # No IP-mask
+ if len(cols) < 6:
+ cols.insert(4, None) # No IP-mask
+ elif cols[5] not in PG_HBA_METHODS:
+ cols.insert(4, None) # No IP-mask
+ if cols[5] not in PG_HBA_METHODS:
+ raise PgHbaValueError("Rule {0} of '{1}' type has invalid auth-method '{2}'".format(line, cols[0], cols[5]))
+
+ if len(cols) < 7:
+ cols.insert(6, None) # No auth-options
+ else:
+ cols[6] = " ".join(cols[6:]) # combine all auth-options
+ rule = dict(zip(PG_HBA_HDR, cols[:7]))
+ for key, value in rule.items():
+ if value:
+ self[key] = value
+
+ def key(self):
+ '''
+ This method can be used to get the key from a rule.
+ '''
+ if self['type'] == 'local':
+ source = 'local'
+ else:
+ source = str(self.source())
+ return (source, self['db'], self['usr'])
+
+ def source(self):
+ '''
+ This method is used to get the source of a rule as an ipaddress object if possible.
+ '''
+ if 'mask' in self.keys():
+ try:
+ ipaddress.ip_address(u'{0}'.format(self['src']))
+ except ValueError:
+ raise PgHbaValueError('Mask was specified, but source "{0}" '
+ 'is no valid ip'.format(self['src']))
+ # ipaddress module cannot work with ipv6 netmask, so lets convert it to prefixlen
+ # furthermore ipv4 with bad netmask throws 'Rule {} doesn't seem to be an ip, but has a
+ # mask error that doesn't seem to describe what is going on.
+ try:
+ mask_as_ip = ipaddress.ip_address(u'{0}'.format(self['mask']))
+ except ValueError:
+ raise PgHbaValueError('Mask {0} seems to be invalid'.format(self['mask']))
+ binvalue = "{0:b}".format(int(mask_as_ip))
+ if '01' in binvalue:
+ raise PgHbaValueError('IP mask {0} seems invalid '
+ '(binary value has 1 after 0)'.format(self['mask']))
+ prefixlen = binvalue.count('1')
+ sourcenw = '{0}/{1}'.format(self['src'], prefixlen)
+ try:
+ return ipaddress.ip_network(u'{0}'.format(sourcenw), strict=False)
+ except ValueError:
+ raise PgHbaValueError('{0} is no valid address range'.format(sourcenw))
+
+ try:
+ return ipaddress.ip_network(u'{0}'.format(self['src']), strict=False)
+ except ValueError:
+ return self['src']
+
+ def __lt__(self, other):
+ """This function helps sorted to decide how to sort.
+
+ It just checks itself against the other and decides on some key values
+ if it should be sorted higher or lower in the list.
+ The way it works:
+ For networks, every 1 in 'netmask in binary' makes the subnet more specific.
+ Therefore I chose to use prefix as the weight.
+ So a single IP (/32) should have twice the weight of a /16 network.
+ To keep everything in the same weight scale,
+ - for ipv6, we use a weight scale of 0 (all possible ipv6 addresses) to 128 (single ip)
+ - for ipv4, we use a weight scale of 0 (all possible ipv4 addresses) to 128 (single ip)
+ Therefore for ipv4, we use prefixlen (0-32) * 4 for weight,
+ which corresponds to ipv6 (0-128).
+ """
+ myweight = self.source_weight()
+ hisweight = other.source_weight()
+ if myweight != hisweight:
+ return myweight > hisweight
+
+ myweight = self.db_weight()
+ hisweight = other.db_weight()
+ if myweight != hisweight:
+ return myweight < hisweight
+
+ myweight = self.user_weight()
+ hisweight = other.user_weight()
+ if myweight != hisweight:
+ return myweight < hisweight
+ try:
+ return self['src'] < other['src']
+ except TypeError:
+ return self.source_type_weight() < other.source_type_weight()
+ except Exception:
+ # When all else fails, just compare the exact line.
+ return self.line() < other.line()
+
+ def source_weight(self):
+ """Report the weight of this source net.
+
+ Basically this is the netmask, where IPv4 is normalized to IPv6
+ (IPv4/32 has the same weight as IPv6/128).
+ """
+ if self['type'] == 'local':
+ return 130
+
+ sourceobj = self.source()
+ if isinstance(sourceobj, ipaddress.IPv4Network):
+ return sourceobj.prefixlen * 4
+ if isinstance(sourceobj, ipaddress.IPv6Network):
+ return sourceobj.prefixlen
+ if isinstance(sourceobj, str):
+ # You can also write all to match any IP address,
+ # samehost to match any of the server's own IP addresses,
+ # or samenet to match any address in any subnet that the server is connected to.
+ if sourceobj == 'all':
+ # (all is considered the full range of all ips, which has a weight of 0)
+ return 0
+ if sourceobj == 'samehost':
+ # (sort samehost second after local)
+ return 129
+ if sourceobj == 'samenet':
+ # Might write some fancy code to determine all prefix's
+ # from all interfaces and find a sane value for this one.
+ # For now, let's assume IPv4/24 or IPv6/96 (both have weight 96).
+ return 96
+ if sourceobj[0] == '.':
+ # suffix matching (domain name), let's assume a very large scale
+ # and therefore a very low weight IPv4/16 or IPv6/64 (both have weight 64).
+ return 64
+ # hostname, let's assume only one host matches, which is
+ # IPv4/32 or IPv6/128 (both have weight 128)
+ return 128
+ raise PgHbaValueError('Cannot deduct the source weight of this source {1}'.format(sourceobj))
+
+ def source_type_weight(self):
+ """Give a weight on the type of this source.
+
+ Basically make sure that IPv6Networks are sorted higher than IPv4Networks.
+ This is a 'when all else fails' solution in __lt__.
+ """
+ if self['type'] == 'local':
+ return 3
+
+ sourceobj = self.source()
+ if isinstance(sourceobj, ipaddress.IPv4Network):
+ return 2
+ if isinstance(sourceobj, ipaddress.IPv6Network):
+ return 1
+ if isinstance(sourceobj, str):
+ return 0
+ raise PgHbaValueError('This source {0} is of an unknown type...'.format(sourceobj))
+
+ def db_weight(self):
+ """Report the weight of the database.
+
+ Normally, just 1, but for replication this is 0, and for 'all', this is more than 2.
+ """
+ if self['db'] == 'all':
+ return 100000
+ if self['db'] == 'replication':
+ return 0
+ if self['db'] in ['samerole', 'samegroup']:
+ return 1
+ return 1 + self['db'].count(',')
+
+ def user_weight(self):
+ """Report weight when comparing users."""
+ if self['usr'] == 'all':
+ return 1000000
+ return 1
+
+
+def main():
+ '''
+ This function is the main function of this module
+ '''
+ # argument_spec = postgres_common_argument_spec()
+ argument_spec = dict()
+ argument_spec.update(
+ address=dict(type='str', default='samehost', aliases=['source', 'src']),
+ backup=dict(type='bool', default=False),
+ backup_file=dict(type='str'),
+ contype=dict(type='str', default=None, choices=PG_HBA_TYPES),
+ create=dict(type='bool', default=False),
+ databases=dict(type='str', default='all'),
+ dest=dict(type='path', required=True),
+ method=dict(type='str', default='md5', choices=PG_HBA_METHODS),
+ netmask=dict(type='str'),
+ options=dict(type='str'),
+ order=dict(type='str', default="sdu", choices=PG_HBA_ORDERS,
+ removed_in_version='3.0.0', removed_from_collection='community.general'),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ users=dict(type='str', default='all')
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ add_file_common_args=True,
+ supports_check_mode=True
+ )
+ if IPADDRESS_IMP_ERR is not None:
+ module.fail_json(msg=missing_required_lib('ipaddress'), exception=IPADDRESS_IMP_ERR)
+
+ contype = module.params["contype"]
+ create = bool(module.params["create"] or module.check_mode)
+ if module.check_mode:
+ backup = False
+ else:
+ backup = module.params['backup']
+ backup_file = module.params['backup_file']
+ databases = module.params["databases"]
+ dest = module.params["dest"]
+
+ method = module.params["method"]
+ netmask = module.params["netmask"]
+ options = module.params["options"]
+ order = module.params["order"]
+ source = module.params["address"]
+ state = module.params["state"]
+ users = module.params["users"]
+
+ ret = {'msgs': []}
+ try:
+ pg_hba = PgHba(dest, order, backup=backup, create=create)
+ except PgHbaError as error:
+ module.fail_json(msg='Error reading file:\n{0}'.format(error))
+
+ if contype:
+ try:
+ for database in databases.split(','):
+ for user in users.split(','):
+ rule = PgHbaRule(contype, database, user, source, netmask, method, options)
+ if state == "present":
+ ret['msgs'].append('Adding')
+ pg_hba.add_rule(rule)
+ else:
+ ret['msgs'].append('Removing')
+ pg_hba.remove_rule(rule)
+ except PgHbaError as error:
+ module.fail_json(msg='Error modifying rules:\n{0}'.format(error))
+ file_args = module.load_file_common_arguments(module.params)
+ ret['changed'] = changed = pg_hba.changed()
+ if changed:
+ ret['msgs'].append('Changed')
+ ret['diff'] = pg_hba.diff
+
+ if not module.check_mode:
+ ret['msgs'].append('Writing')
+ try:
+ if pg_hba.write(backup_file):
+ module.set_fs_attributes_if_different(file_args, True, pg_hba.diff,
+ expand=False)
+ except PgHbaError as error:
+ module.fail_json(msg='Error writing file:\n{0}'.format(error))
+ if pg_hba.last_backup:
+ ret['backup_file'] = pg_hba.last_backup
+
+ ret['pg_hba'] = list(pg_hba.get_rules())
+ module.exit_json(**ret)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_ping.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_ping.py
new file mode 100644
index 00000000..240cea57
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_ping.py
@@ -0,0 +1,170 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_ping
+short_description: Check remote PostgreSQL server availability
+description:
+- Simple module to check remote PostgreSQL server availability.
+options:
+ db:
+ description:
+ - Name of a database to connect to.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ version_added: '0.2.0'
+ trust_input:
+ description:
+ - If C(no), check whether a value of I(session_role) is potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via I(session_role) are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+seealso:
+- module: community.general.postgresql_info
+author:
+- Andrew Klychkov (@Andersson007)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+# PostgreSQL ping dbsrv server from the shell:
+# ansible dbsrv -m postgresql_ping
+
+# In the example below you need to generate certificates previously.
+# See https://www.postgresql.org/docs/current/libpq-ssl.html for more information.
+- name: PostgreSQL ping dbsrv server using not default credentials and ssl
+ community.general.postgresql_ping:
+ db: protected_db
+ login_host: dbsrv
+ login_user: secret
+ login_password: secret_pass
+ ca_cert: /root/root.crt
+ ssl_mode: verify-full
+'''
+
+RETURN = r'''
+is_available:
+ description: PostgreSQL server availability.
+ returned: always
+ type: bool
+ sample: true
+server_version:
+ description: PostgreSQL server version.
+ returned: always
+ type: dict
+ sample: { major: 10, minor: 1 }
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+
+class PgPing(object):
+ def __init__(self, module, cursor):
+ self.module = module
+ self.cursor = cursor
+ self.is_available = False
+ self.version = {}
+
+ def do(self):
+ self.get_pg_version()
+ return (self.is_available, self.version)
+
+ def get_pg_version(self):
+ query = "SELECT version()"
+ raw = exec_sql(self, query, add_to_executed=False)[0][0]
+ if raw:
+ self.is_available = True
+ raw = raw.split()[1].split('.')
+ self.version = dict(
+ major=int(raw[0]),
+ minor=int(raw[1]),
+ )
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ db=dict(type='str', aliases=['login_db']),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ if not module.params['trust_input']:
+ # Check input for potentially dangerous elements:
+ check_input(module, module.params['session_role'])
+
+ # Set some default values:
+ cursor = False
+ db_connection = False
+ result = dict(
+ changed=False,
+ is_available=False,
+ server_version=dict(),
+ )
+
+ conn_params = get_conn_params(module, module.params, warn_db_default=False)
+ db_connection = connect_to_db(module, conn_params, fail_on_conn=False)
+
+ if db_connection is not None:
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ # Do job:
+ pg_ping = PgPing(module, cursor)
+ if cursor:
+ # If connection established:
+ result["is_available"], result["server_version"] = pg_ping.do()
+ db_connection.rollback()
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_privs.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_privs.py
new file mode 100644
index 00000000..e8d64f36
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_privs.py
@@ -0,0 +1,1171 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# Copyright: (c) 2019, Tobias Birkefeld (@tcraxs) <t@craxs.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_privs
+short_description: Grant or revoke privileges on PostgreSQL database objects
+description:
+- Grant or revoke privileges on PostgreSQL database objects.
+- This module is basically a wrapper around most of the functionality of
+ PostgreSQL's GRANT and REVOKE statements with detection of changes
+ (GRANT/REVOKE I(privs) ON I(type) I(objs) TO/FROM I(roles)).
+options:
+ database:
+ description:
+ - Name of database to connect to.
+ required: yes
+ type: str
+ aliases:
+ - db
+ - login_db
+ state:
+ description:
+ - If C(present), the specified privileges are granted, if C(absent) they are revoked.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ privs:
+ description:
+ - Comma separated list of privileges to grant/revoke.
+ type: str
+ aliases:
+ - priv
+ type:
+ description:
+ - Type of database object to set privileges on.
+ - The C(default_privs) choice is available starting at version 2.7.
+ - The C(foreign_data_wrapper) and C(foreign_server) object types are available since Ansible version 2.8.
+ - The C(type) choice is available since Ansible version 2.10.
+ - The C(procedure) is supported since collection version 1.3.0 and PostgreSQL 11.
+ type: str
+ default: table
+ choices: [ database, default_privs, foreign_data_wrapper, foreign_server, function,
+ group, language, table, tablespace, schema, sequence, type , procedure]
+ objs:
+ description:
+ - Comma separated list of database objects to set privileges on.
+ - If I(type) is C(table), C(partition table), C(sequence), C(function) or C(procedure),
+ the special valueC(ALL_IN_SCHEMA) can be provided instead to specify all
+ database objects of type I(type) in the schema specified via I(schema).
+ (This also works with PostgreSQL < 9.0.) (C(ALL_IN_SCHEMA) is available
+ for C(function) and C(partition table) since Ansible 2.8)
+ - C(procedure) is supported since PostgreSQL 11 and M(community.general) collection 1.3.0.
+ - If I(type) is C(database), this parameter can be omitted, in which case
+ privileges are set for the database specified via I(database).
+ - If I(type) is I(function) or I(procedure), colons (":") in object names will be
+ replaced with commas (needed to specify signatures, see examples).
+ type: str
+ aliases:
+ - obj
+ schema:
+ description:
+ - Schema that contains the database objects specified via I(objs).
+ - May only be provided if I(type) is C(table), C(sequence), C(function), C(procedure), C(type),
+ or C(default_privs). Defaults to C(public) in these cases.
+ - Pay attention, for embedded types when I(type=type)
+ I(schema) can be C(pg_catalog) or C(information_schema) respectively.
+ type: str
+ roles:
+ description:
+ - Comma separated list of role (user/group) names to set permissions for.
+ - The special value C(PUBLIC) can be provided instead to set permissions
+ for the implicitly defined PUBLIC group.
+ type: str
+ required: yes
+ aliases:
+ - role
+ fail_on_role:
+ description:
+ - If C(yes), fail when target role (for whom privs need to be granted) does not exist.
+ Otherwise just warn and continue.
+ default: yes
+ type: bool
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ - The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally.
+ type: str
+ target_roles:
+ description:
+ - A list of existing role (user/group) names to set as the
+ default permissions for database objects subsequently created by them.
+ - Parameter I(target_roles) is only available with C(type=default_privs).
+ type: str
+ grant_option:
+ description:
+ - Whether C(role) may grant/revoke the specified privileges/group memberships to others.
+ - Set to C(no) to revoke GRANT OPTION, leave unspecified to make no changes.
+ - I(grant_option) only has an effect if I(state) is C(present).
+ type: bool
+ aliases:
+ - admin_option
+ host:
+ description:
+ - Database host address. If unspecified, connect via Unix socket.
+ type: str
+ aliases:
+ - login_host
+ port:
+ description:
+ - Database port to connect to.
+ type: int
+ default: 5432
+ aliases:
+ - login_port
+ unix_socket:
+ description:
+ - Path to a Unix domain socket for local connections.
+ type: str
+ aliases:
+ - login_unix_socket
+ login:
+ description:
+ - The username to authenticate with.
+ type: str
+ default: postgres
+ aliases:
+ - login_user
+ password:
+ description:
+ - The password to authenticate with.
+ type: str
+ aliases:
+ - login_password
+ ssl_mode:
+ description:
+ - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
+ - See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
+ - Default of C(prefer) matches libpq default.
+ type: str
+ default: prefer
+ choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
+ ca_cert:
+ description:
+ - Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
+ - If the file exists, the server's certificate will be verified to be signed by one of these authorities.
+ type: str
+ aliases:
+ - ssl_rootcert
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(roles), I(target_roles), I(session_role),
+ I(schema) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+ usage_on_types:
+ description:
+ - When adding default privileges, the module always implicitly adds ``USAGE ON TYPES``.
+ - To avoid this behavior, set I(usage_on_types) to C(no).
+ - Added to save backwards compatibility.
+ - Used only when adding default privileges, ignored otherwise.
+ type: bool
+ default: yes
+ version_added: '1.2.0'
+
+notes:
+- Parameters that accept comma separated lists (I(privs), I(objs), I(roles))
+ have singular alias names (I(priv), I(obj), I(role)).
+- To revoke only C(GRANT OPTION) for a specific object, set I(state) to
+ C(present) and I(grant_option) to C(no) (see examples).
+- Note that when revoking privileges from a role R, this role may still have
+ access via privileges granted to any role R is a member of including C(PUBLIC).
+- Note that when you use C(PUBLIC) role, the module always reports that the state has been changed.
+- Note that when revoking privileges from a role R, you do so as the user
+ specified via I(login). If R has been granted the same privileges by
+ another user also, R can still access database objects via these privileges.
+- When revoking privileges, C(RESTRICT) is assumed (see PostgreSQL docs).
+
+seealso:
+- module: community.general.postgresql_user
+- module: community.general.postgresql_owner
+- module: community.general.postgresql_membership
+- name: PostgreSQL privileges
+ description: General information about PostgreSQL privileges.
+ link: https://www.postgresql.org/docs/current/ddl-priv.html
+- name: PostgreSQL GRANT command reference
+ description: Complete reference of the PostgreSQL GRANT command documentation.
+ link: https://www.postgresql.org/docs/current/sql-grant.html
+- name: PostgreSQL REVOKE command reference
+ description: Complete reference of the PostgreSQL REVOKE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-revoke.html
+
+extends_documentation_fragment:
+- community.general.postgres
+
+
+author:
+- Bernhard Weitzhofer (@b6d)
+- Tobias Birkefeld (@tcraxs)
+'''
+
+EXAMPLES = r'''
+# On database "library":
+# GRANT SELECT, INSERT, UPDATE ON TABLE public.books, public.authors
+# TO librarian, reader WITH GRANT OPTION
+- name: Grant privs to librarian and reader on database library
+ community.general.postgresql_privs:
+ database: library
+ state: present
+ privs: SELECT,INSERT,UPDATE
+ type: table
+ objs: books,authors
+ schema: public
+ roles: librarian,reader
+ grant_option: yes
+
+- name: Same as above leveraging default values
+ community.general.postgresql_privs:
+ db: library
+ privs: SELECT,INSERT,UPDATE
+ objs: books,authors
+ roles: librarian,reader
+ grant_option: yes
+
+# REVOKE GRANT OPTION FOR INSERT ON TABLE books FROM reader
+# Note that role "reader" will be *granted* INSERT privilege itself if this
+# isn't already the case (since state: present).
+- name: Revoke privs from reader
+ community.general.postgresql_privs:
+ db: library
+ state: present
+ priv: INSERT
+ obj: books
+ role: reader
+ grant_option: no
+
+# "public" is the default schema. This also works for PostgreSQL 8.x.
+- name: REVOKE INSERT, UPDATE ON ALL TABLES IN SCHEMA public FROM reader
+ community.general.postgresql_privs:
+ db: library
+ state: absent
+ privs: INSERT,UPDATE
+ objs: ALL_IN_SCHEMA
+ role: reader
+
+- name: GRANT ALL PRIVILEGES ON SCHEMA public, math TO librarian
+ community.general.postgresql_privs:
+ db: library
+ privs: ALL
+ type: schema
+ objs: public,math
+ role: librarian
+
+# Note the separation of arguments with colons.
+- name: GRANT ALL PRIVILEGES ON FUNCTION math.add(int, int) TO librarian, reader
+ community.general.postgresql_privs:
+ db: library
+ privs: ALL
+ type: function
+ obj: add(int:int)
+ schema: math
+ roles: librarian,reader
+
+# Note that group role memberships apply cluster-wide and therefore are not
+# restricted to database "library" here.
+- name: GRANT librarian, reader TO alice, bob WITH ADMIN OPTION
+ community.general.postgresql_privs:
+ db: library
+ type: group
+ objs: librarian,reader
+ roles: alice,bob
+ admin_option: yes
+
+# Note that here "db: postgres" specifies the database to connect to, not the
+# database to grant privileges on (which is specified via the "objs" param)
+- name: GRANT ALL PRIVILEGES ON DATABASE library TO librarian
+ community.general.postgresql_privs:
+ db: postgres
+ privs: ALL
+ type: database
+ obj: library
+ role: librarian
+
+# If objs is omitted for type "database", it defaults to the database
+# to which the connection is established
+- name: GRANT ALL PRIVILEGES ON DATABASE library TO librarian
+ community.general.postgresql_privs:
+ db: library
+ privs: ALL
+ type: database
+ role: librarian
+
+# Available since version 2.7
+# Objs must be set, ALL_DEFAULT to TABLES/SEQUENCES/TYPES/FUNCTIONS
+# ALL_DEFAULT works only with privs=ALL
+# For specific
+- name: ALTER DEFAULT PRIVILEGES ON DATABASE library TO librarian
+ community.general.postgresql_privs:
+ db: library
+ objs: ALL_DEFAULT
+ privs: ALL
+ type: default_privs
+ role: librarian
+ grant_option: yes
+
+# Available since version 2.7
+# Objs must be set, ALL_DEFAULT to TABLES/SEQUENCES/TYPES/FUNCTIONS
+# ALL_DEFAULT works only with privs=ALL
+# For specific
+- name: ALTER DEFAULT PRIVILEGES ON DATABASE library TO reader, step 1
+ community.general.postgresql_privs:
+ db: library
+ objs: TABLES,SEQUENCES
+ privs: SELECT
+ type: default_privs
+ role: reader
+
+- name: ALTER DEFAULT PRIVILEGES ON DATABASE library TO reader, step 2
+ community.general.postgresql_privs:
+ db: library
+ objs: TYPES
+ privs: USAGE
+ type: default_privs
+ role: reader
+
+# Available since version 2.8
+- name: GRANT ALL PRIVILEGES ON FOREIGN DATA WRAPPER fdw TO reader
+ community.general.postgresql_privs:
+ db: test
+ objs: fdw
+ privs: ALL
+ type: foreign_data_wrapper
+ role: reader
+
+# Available since community.general 0.2.0
+- name: GRANT ALL PRIVILEGES ON TYPE customtype TO reader
+ community.general.postgresql_privs:
+ db: test
+ objs: customtype
+ privs: ALL
+ type: type
+ role: reader
+
+# Available since version 2.8
+- name: GRANT ALL PRIVILEGES ON FOREIGN SERVER fdw_server TO reader
+ community.general.postgresql_privs:
+ db: test
+ objs: fdw_server
+ privs: ALL
+ type: foreign_server
+ role: reader
+
+# Available since version 2.8
+# Grant 'execute' permissions on all functions in schema 'common' to role 'caller'
+- name: GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA common TO caller
+ community.general.postgresql_privs:
+ type: function
+ state: present
+ privs: EXECUTE
+ roles: caller
+ objs: ALL_IN_SCHEMA
+ schema: common
+
+# Available since collection version 1.3.0
+# Grant 'execute' permissions on all procedures in schema 'common' to role 'caller'
+# Needs PostreSQL 11 or higher and community.general 1.3.0 or higher
+- name: GRANT EXECUTE ON ALL PROCEDURES IN SCHEMA common TO caller
+ community.general.postgresql_privs:
+ type: prucedure
+ state: present
+ privs: EXECUTE
+ roles: caller
+ objs: ALL_IN_SCHEMA
+ schema: common
+
+# Available since version 2.8
+# ALTER DEFAULT PRIVILEGES FOR ROLE librarian IN SCHEMA library GRANT SELECT ON TABLES TO reader
+# GRANT SELECT privileges for new TABLES objects created by librarian as
+# default to the role reader.
+# For specific
+- name: ALTER privs
+ community.general.postgresql_privs:
+ db: library
+ schema: library
+ objs: TABLES
+ privs: SELECT
+ type: default_privs
+ role: reader
+ target_roles: librarian
+
+# Available since version 2.8
+# ALTER DEFAULT PRIVILEGES FOR ROLE librarian IN SCHEMA library REVOKE SELECT ON TABLES FROM reader
+# REVOKE SELECT privileges for new TABLES objects created by librarian as
+# default from the role reader.
+# For specific
+- name: ALTER privs
+ community.general.postgresql_privs:
+ db: library
+ state: absent
+ schema: library
+ objs: TABLES
+ privs: SELECT
+ type: default_privs
+ role: reader
+ target_roles: librarian
+
+# Available since community.general 0.2.0
+- name: Grant type privileges for pg_catalog.numeric type to alice
+ community.general.postgresql_privs:
+ type: type
+ roles: alice
+ privs: ALL
+ objs: numeric
+ schema: pg_catalog
+ db: acme
+'''
+
+RETURN = r'''
+queries:
+ description: List of executed queries.
+ returned: always
+ type: list
+ sample: ['REVOKE GRANT OPTION FOR INSERT ON TABLE "books" FROM "reader";']
+'''
+
+import traceback
+
+PSYCOPG2_IMP_ERR = None
+try:
+ import psycopg2
+ import psycopg2.extensions
+except ImportError:
+ PSYCOPG2_IMP_ERR = traceback.format_exc()
+ psycopg2 = None
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils.database import (
+ pg_quote_identifier,
+ check_input,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import postgres_common_argument_spec
+from ansible.module_utils._text import to_native
+
+VALID_PRIVS = frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE',
+ 'REFERENCES', 'TRIGGER', 'CREATE', 'CONNECT',
+ 'TEMPORARY', 'TEMP', 'EXECUTE', 'USAGE', 'ALL', 'USAGE'))
+VALID_DEFAULT_OBJS = {'TABLES': ('ALL', 'SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER'),
+ 'SEQUENCES': ('ALL', 'SELECT', 'UPDATE', 'USAGE'),
+ 'FUNCTIONS': ('ALL', 'EXECUTE'),
+ 'TYPES': ('ALL', 'USAGE')}
+
+executed_queries = []
+
+
+class Error(Exception):
+ pass
+
+
+def role_exists(module, cursor, rolname):
+ """Check user exists or not"""
+ query = "SELECT 1 FROM pg_roles WHERE rolname = '%s'" % rolname
+ try:
+ cursor.execute(query)
+ return cursor.rowcount > 0
+
+ except Exception as e:
+ module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e)))
+
+ return False
+
+
+# We don't have functools.partial in Python < 2.5
+def partial(f, *args, **kwargs):
+ """Partial function application"""
+
+ def g(*g_args, **g_kwargs):
+ new_kwargs = kwargs.copy()
+ new_kwargs.update(g_kwargs)
+ return f(*(args + g_args), **g_kwargs)
+
+ g.f = f
+ g.args = args
+ g.kwargs = kwargs
+ return g
+
+
+class Connection(object):
+ """Wrapper around a psycopg2 connection with some convenience methods"""
+
+ def __init__(self, params, module):
+ self.database = params.database
+ self.module = module
+ # To use defaults values, keyword arguments must be absent, so
+ # check which values are empty and don't include in the **kw
+ # dictionary
+ params_map = {
+ "host": "host",
+ "login": "user",
+ "password": "password",
+ "port": "port",
+ "database": "database",
+ "ssl_mode": "sslmode",
+ "ca_cert": "sslrootcert"
+ }
+
+ kw = dict((params_map[k], getattr(params, k)) for k in params_map
+ if getattr(params, k) != '' and getattr(params, k) is not None)
+
+ # If a unix_socket is specified, incorporate it here.
+ is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
+ if is_localhost and params.unix_socket != "":
+ kw["host"] = params.unix_socket
+
+ sslrootcert = params.ca_cert
+ if psycopg2.__version__ < '2.4.3' and sslrootcert is not None:
+ raise ValueError('psycopg2 must be at least 2.4.3 in order to user the ca_cert parameter')
+
+ self.connection = psycopg2.connect(**kw)
+ self.cursor = self.connection.cursor()
+ self.pg_version = self.connection.server_version
+
+ def commit(self):
+ self.connection.commit()
+
+ def rollback(self):
+ self.connection.rollback()
+
+ @property
+ def encoding(self):
+ """Connection encoding in Python-compatible form"""
+ return psycopg2.extensions.encodings[self.connection.encoding]
+
+ # Methods for querying database objects
+
+ # PostgreSQL < 9.0 doesn't support "ALL TABLES IN SCHEMA schema"-like
+ # phrases in GRANT or REVOKE statements, therefore alternative methods are
+ # provided here.
+
+ def schema_exists(self, schema):
+ query = """SELECT count(*)
+ FROM pg_catalog.pg_namespace WHERE nspname = %s"""
+ self.cursor.execute(query, (schema,))
+ return self.cursor.fetchone()[0] > 0
+
+ def get_all_tables_in_schema(self, schema):
+ if not self.schema_exists(schema):
+ raise Error('Schema "%s" does not exist.' % schema)
+ query = """SELECT relname
+ FROM pg_catalog.pg_class c
+ JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
+ WHERE nspname = %s AND relkind in ('r', 'v', 'm', 'p')"""
+ self.cursor.execute(query, (schema,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_all_sequences_in_schema(self, schema):
+ if not self.schema_exists(schema):
+ raise Error('Schema "%s" does not exist.' % schema)
+ query = """SELECT relname
+ FROM pg_catalog.pg_class c
+ JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
+ WHERE nspname = %s AND relkind = 'S'"""
+ self.cursor.execute(query, (schema,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_all_functions_in_schema(self, schema):
+ if not self.schema_exists(schema):
+ raise Error('Schema "%s" does not exist.' % schema)
+
+ query = ("SELECT p.proname, oidvectortypes(p.proargtypes) "
+ "FROM pg_catalog.pg_proc p "
+ "JOIN pg_namespace n ON n.oid = p.pronamespace "
+ "WHERE nspname = %s")
+
+ if self.pg_version >= 110000:
+ query += " and p.prokind = 'f'"
+
+ self.cursor.execute(query, (schema,))
+ return ["%s(%s)" % (t[0], t[1]) for t in self.cursor.fetchall()]
+
+ def get_all_procedures_in_schema(self, schema):
+ if self.pg_version < 110000:
+ raise Error("PostgreSQL verion must be >= 11 for type=procedure. Exit")
+
+ if not self.schema_exists(schema):
+ raise Error('Schema "%s" does not exist.' % schema)
+
+ query = ("SELECT p.proname, oidvectortypes(p.proargtypes) "
+ "FROM pg_catalog.pg_proc p "
+ "JOIN pg_namespace n ON n.oid = p.pronamespace "
+ "WHERE nspname = %s and p.prokind = 'p'")
+
+ self.cursor.execute(query, (schema,))
+ return ["%s(%s)" % (t[0], t[1]) for t in self.cursor.fetchall()]
+
+ # Methods for getting access control lists and group membership info
+
+ # To determine whether anything has changed after granting/revoking
+ # privileges, we compare the access control lists of the specified database
+ # objects before and afterwards. Python's list/string comparison should
+ # suffice for change detection, we should not actually have to parse ACLs.
+ # The same should apply to group membership information.
+
+ def get_table_acls(self, schema, tables):
+ query = """SELECT relacl
+ FROM pg_catalog.pg_class c
+ JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
+ WHERE nspname = %s AND relkind in ('r','p','v','m') AND relname = ANY (%s)
+ ORDER BY relname"""
+ self.cursor.execute(query, (schema, tables))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_sequence_acls(self, schema, sequences):
+ query = """SELECT relacl
+ FROM pg_catalog.pg_class c
+ JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
+ WHERE nspname = %s AND relkind = 'S' AND relname = ANY (%s)
+ ORDER BY relname"""
+ self.cursor.execute(query, (schema, sequences))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_function_acls(self, schema, function_signatures):
+ funcnames = [f.split('(', 1)[0] for f in function_signatures]
+ query = """SELECT proacl
+ FROM pg_catalog.pg_proc p
+ JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace
+ WHERE nspname = %s AND proname = ANY (%s)
+ ORDER BY proname, proargtypes"""
+ self.cursor.execute(query, (schema, funcnames))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_schema_acls(self, schemas):
+ query = """SELECT nspacl FROM pg_catalog.pg_namespace
+ WHERE nspname = ANY (%s) ORDER BY nspname"""
+ self.cursor.execute(query, (schemas,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_language_acls(self, languages):
+ query = """SELECT lanacl FROM pg_catalog.pg_language
+ WHERE lanname = ANY (%s) ORDER BY lanname"""
+ self.cursor.execute(query, (languages,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_tablespace_acls(self, tablespaces):
+ query = """SELECT spcacl FROM pg_catalog.pg_tablespace
+ WHERE spcname = ANY (%s) ORDER BY spcname"""
+ self.cursor.execute(query, (tablespaces,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_database_acls(self, databases):
+ query = """SELECT datacl FROM pg_catalog.pg_database
+ WHERE datname = ANY (%s) ORDER BY datname"""
+ self.cursor.execute(query, (databases,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_group_memberships(self, groups):
+ query = """SELECT roleid, grantor, member, admin_option
+ FROM pg_catalog.pg_auth_members am
+ JOIN pg_catalog.pg_roles r ON r.oid = am.roleid
+ WHERE r.rolname = ANY(%s)
+ ORDER BY roleid, grantor, member"""
+ self.cursor.execute(query, (groups,))
+ return self.cursor.fetchall()
+
+ def get_default_privs(self, schema, *args):
+ query = """SELECT defaclacl
+ FROM pg_default_acl a
+ JOIN pg_namespace b ON a.defaclnamespace=b.oid
+ WHERE b.nspname = %s;"""
+ self.cursor.execute(query, (schema,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_foreign_data_wrapper_acls(self, fdws):
+ query = """SELECT fdwacl FROM pg_catalog.pg_foreign_data_wrapper
+ WHERE fdwname = ANY (%s) ORDER BY fdwname"""
+ self.cursor.execute(query, (fdws,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_foreign_server_acls(self, fs):
+ query = """SELECT srvacl FROM pg_catalog.pg_foreign_server
+ WHERE srvname = ANY (%s) ORDER BY srvname"""
+ self.cursor.execute(query, (fs,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_type_acls(self, schema, types):
+ query = """SELECT t.typacl FROM pg_catalog.pg_type t
+ JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace
+ WHERE n.nspname = %s AND t.typname = ANY (%s) ORDER BY typname"""
+ self.cursor.execute(query, (schema, types))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ # Manipulating privileges
+
+ def manipulate_privs(self, obj_type, privs, objs, roles, target_roles,
+ state, grant_option, schema_qualifier=None, fail_on_role=True, usage_on_types=True):
+ """Manipulate database object privileges.
+
+ :param obj_type: Type of database object to grant/revoke
+ privileges for.
+ :param privs: Either a list of privileges to grant/revoke
+ or None if type is "group".
+ :param objs: List of database objects to grant/revoke
+ privileges for.
+ :param roles: Either a list of role names or "PUBLIC"
+ for the implicitly defined "PUBLIC" group
+ :param target_roles: List of role names to grant/revoke
+ default privileges as.
+ :param state: "present" to grant privileges, "absent" to revoke.
+ :param grant_option: Only for state "present": If True, set
+ grant/admin option. If False, revoke it.
+ If None, don't change grant option.
+ :param schema_qualifier: Some object types ("TABLE", "SEQUENCE",
+ "FUNCTION") must be qualified by schema.
+ Ignored for other Types.
+ """
+ # get_status: function to get current status
+ if obj_type == 'table':
+ get_status = partial(self.get_table_acls, schema_qualifier)
+ elif obj_type == 'sequence':
+ get_status = partial(self.get_sequence_acls, schema_qualifier)
+ elif obj_type in ('function', 'procedure'):
+ get_status = partial(self.get_function_acls, schema_qualifier)
+ elif obj_type == 'schema':
+ get_status = self.get_schema_acls
+ elif obj_type == 'language':
+ get_status = self.get_language_acls
+ elif obj_type == 'tablespace':
+ get_status = self.get_tablespace_acls
+ elif obj_type == 'database':
+ get_status = self.get_database_acls
+ elif obj_type == 'group':
+ get_status = self.get_group_memberships
+ elif obj_type == 'default_privs':
+ get_status = partial(self.get_default_privs, schema_qualifier)
+ elif obj_type == 'foreign_data_wrapper':
+ get_status = self.get_foreign_data_wrapper_acls
+ elif obj_type == 'foreign_server':
+ get_status = self.get_foreign_server_acls
+ elif obj_type == 'type':
+ get_status = partial(self.get_type_acls, schema_qualifier)
+ else:
+ raise Error('Unsupported database object type "%s".' % obj_type)
+
+ # Return False (nothing has changed) if there are no objs to work on.
+ if not objs:
+ return False
+
+ # obj_ids: quoted db object identifiers (sometimes schema-qualified)
+ if obj_type in ('function', 'procedure'):
+ obj_ids = []
+ for obj in objs:
+ try:
+ f, args = obj.split('(', 1)
+ except Exception:
+ raise Error('Illegal function / procedure signature: "%s".' % obj)
+ obj_ids.append('"%s"."%s"(%s' % (schema_qualifier, f, args))
+ elif obj_type in ['table', 'sequence', 'type']:
+ obj_ids = ['"%s"."%s"' % (schema_qualifier, o) for o in objs]
+ else:
+ obj_ids = ['"%s"' % o for o in objs]
+
+ # set_what: SQL-fragment specifying what to set for the target roles:
+ # Either group membership or privileges on objects of a certain type
+ if obj_type == 'group':
+ set_what = ','.join(obj_ids)
+ elif obj_type == 'default_privs':
+ # We don't want privs to be quoted here
+ set_what = ','.join(privs)
+ else:
+ # function types are already quoted above
+ if obj_type not in ('function', 'procedure'):
+ obj_ids = [pg_quote_identifier(i, 'table') for i in obj_ids]
+ # Note: obj_type has been checked against a set of string literals
+ # and privs was escaped when it was parsed
+ # Note: Underscores are replaced with spaces to support multi-word obj_type
+ set_what = '%s ON %s %s' % (','.join(privs), obj_type.replace('_', ' '),
+ ','.join(obj_ids))
+
+ # for_whom: SQL-fragment specifying for whom to set the above
+ if roles == 'PUBLIC':
+ for_whom = 'PUBLIC'
+ else:
+ for_whom = []
+ for r in roles:
+ if not role_exists(self.module, self.cursor, r):
+ if fail_on_role:
+ self.module.fail_json(msg="Role '%s' does not exist" % r.strip())
+
+ else:
+ self.module.warn("Role '%s' does not exist, pass it" % r.strip())
+ else:
+ for_whom.append('"%s"' % r)
+
+ if not for_whom:
+ return False
+
+ for_whom = ','.join(for_whom)
+
+ # as_who:
+ as_who = None
+ if target_roles:
+ as_who = ','.join('"%s"' % r for r in target_roles)
+
+ if schema_qualifier:
+ schema_qualifier = '"%s"' % schema_qualifier
+
+ status_before = get_status(objs)
+
+ query = QueryBuilder(state) \
+ .for_objtype(obj_type) \
+ .with_grant_option(grant_option) \
+ .for_whom(for_whom) \
+ .as_who(as_who) \
+ .for_schema(schema_qualifier) \
+ .set_what(set_what) \
+ .for_objs(objs) \
+ .usage_on_types(usage_on_types) \
+ .build()
+
+ executed_queries.append(query)
+ self.cursor.execute(query)
+ if roles == 'PUBLIC':
+ return True
+
+ status_after = get_status(objs)
+
+ def nonesorted(e):
+ # For python 3+ that can fail trying
+ # to compare NoneType elements by sort method.
+ if e is None:
+ return ''
+ return e
+
+ status_before.sort(key=nonesorted)
+ status_after.sort(key=nonesorted)
+ return status_before != status_after
+
+
+class QueryBuilder(object):
+ def __init__(self, state):
+ self._grant_option = None
+ self._for_whom = None
+ self._as_who = None
+ self._set_what = None
+ self._obj_type = None
+ self._state = state
+ self._schema = None
+ self._objs = None
+ self._usage_on_types = None
+ self.query = []
+
+ def for_objs(self, objs):
+ self._objs = objs
+ return self
+
+ def for_schema(self, schema):
+ self._schema = schema
+ return self
+
+ def with_grant_option(self, option):
+ self._grant_option = option
+ return self
+
+ def for_whom(self, who):
+ self._for_whom = who
+ return self
+
+ def usage_on_types(self, usage_on_types):
+ self._usage_on_types = usage_on_types
+ return self
+
+ def as_who(self, target_roles):
+ self._as_who = target_roles
+ return self
+
+ def set_what(self, what):
+ self._set_what = what
+ return self
+
+ def for_objtype(self, objtype):
+ self._obj_type = objtype
+ return self
+
+ def build(self):
+ if self._state == 'present':
+ self.build_present()
+ elif self._state == 'absent':
+ self.build_absent()
+ else:
+ self.build_absent()
+ return '\n'.join(self.query)
+
+ def add_default_revoke(self):
+ for obj in self._objs:
+ if self._as_who:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} REVOKE ALL ON {2} FROM {3};'.format(self._as_who,
+ self._schema, obj,
+ self._for_whom))
+ else:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} REVOKE ALL ON {1} FROM {2};'.format(self._schema, obj,
+ self._for_whom))
+
+ def add_grant_option(self):
+ if self._grant_option:
+ if self._obj_type == 'group':
+ self.query[-1] += ' WITH ADMIN OPTION;'
+ else:
+ self.query[-1] += ' WITH GRANT OPTION;'
+ elif self._grant_option is False:
+ self.query[-1] += ';'
+ if self._obj_type == 'group':
+ self.query.append('REVOKE ADMIN OPTION FOR {0} FROM {1};'.format(self._set_what, self._for_whom))
+ elif not self._obj_type == 'default_privs':
+ self.query.append('REVOKE GRANT OPTION FOR {0} FROM {1};'.format(self._set_what, self._for_whom))
+ else:
+ self.query[-1] += ';'
+
+ def add_default_priv(self):
+ for obj in self._objs:
+ if self._as_who:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} GRANT {2} ON {3} TO {4}'.format(self._as_who,
+ self._schema,
+ self._set_what,
+ obj,
+ self._for_whom))
+ else:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} GRANT {1} ON {2} TO {3}'.format(self._schema,
+ self._set_what,
+ obj,
+ self._for_whom))
+ self.add_grant_option()
+
+ if self._usage_on_types:
+ if self._as_who:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} GRANT USAGE ON TYPES TO {2}'.format(self._as_who,
+ self._schema,
+ self._for_whom))
+ else:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} GRANT USAGE ON TYPES TO {1}'.format(self._schema, self._for_whom))
+ self.add_grant_option()
+
+ def build_present(self):
+ if self._obj_type == 'default_privs':
+ self.add_default_revoke()
+ self.add_default_priv()
+ else:
+ self.query.append('GRANT {0} TO {1}'.format(self._set_what, self._for_whom))
+ self.add_grant_option()
+
+ def build_absent(self):
+ if self._obj_type == 'default_privs':
+ self.query = []
+ for obj in ['TABLES', 'SEQUENCES', 'TYPES']:
+ if self._as_who:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} REVOKE ALL ON {2} FROM {3};'.format(self._as_who,
+ self._schema, obj,
+ self._for_whom))
+ else:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} REVOKE ALL ON {1} FROM {2};'.format(self._schema, obj,
+ self._for_whom))
+ else:
+ self.query.append('REVOKE {0} FROM {1};'.format(self._set_what, self._for_whom))
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ database=dict(required=True, aliases=['db', 'login_db']),
+ state=dict(default='present', choices=['present', 'absent']),
+ privs=dict(required=False, aliases=['priv']),
+ type=dict(default='table',
+ choices=['table',
+ 'sequence',
+ 'function',
+ 'procedure',
+ 'database',
+ 'schema',
+ 'language',
+ 'tablespace',
+ 'group',
+ 'default_privs',
+ 'foreign_data_wrapper',
+ 'foreign_server',
+ 'type', ]),
+ objs=dict(required=False, aliases=['obj']),
+ schema=dict(required=False),
+ roles=dict(required=True, aliases=['role']),
+ session_role=dict(required=False),
+ target_roles=dict(required=False),
+ grant_option=dict(required=False, type='bool',
+ aliases=['admin_option']),
+ host=dict(default='', aliases=['login_host']),
+ unix_socket=dict(default='', aliases=['login_unix_socket']),
+ login=dict(default='postgres', aliases=['login_user']),
+ password=dict(default='', aliases=['login_password'], no_log=True),
+ fail_on_role=dict(type='bool', default=True),
+ trust_input=dict(type='bool', default=True),
+ usage_on_types=dict(type='bool', default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ fail_on_role = module.params['fail_on_role']
+ usage_on_types = module.params['usage_on_types']
+
+ # Create type object as namespace for module params
+ p = type('Params', (), module.params)
+ # param "schema": default, allowed depends on param "type"
+ if p.type in ['table', 'sequence', 'function', 'procedure', 'type', 'default_privs']:
+ p.schema = p.schema or 'public'
+ elif p.schema:
+ module.fail_json(msg='Argument "schema" is not allowed '
+ 'for type "%s".' % p.type)
+
+ # param "objs": default, required depends on param "type"
+ if p.type == 'database':
+ p.objs = p.objs or p.database
+ elif not p.objs:
+ module.fail_json(msg='Argument "objs" is required '
+ 'for type "%s".' % p.type)
+
+ # param "privs": allowed, required depends on param "type"
+ if p.type == 'group':
+ if p.privs:
+ module.fail_json(msg='Argument "privs" is not allowed '
+ 'for type "group".')
+ elif not p.privs:
+ module.fail_json(msg='Argument "privs" is required '
+ 'for type "%s".' % p.type)
+
+ # Check input
+ if not p.trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, p.roles, p.target_roles, p.session_role, p.schema)
+
+ # Connect to Database
+ if not psycopg2:
+ module.fail_json(msg=missing_required_lib('psycopg2'), exception=PSYCOPG2_IMP_ERR)
+ try:
+ conn = Connection(p, module)
+ except psycopg2.Error as e:
+ module.fail_json(msg='Could not connect to database: %s' % to_native(e), exception=traceback.format_exc())
+ except TypeError as e:
+ if 'sslrootcert' in e.args[0]:
+ module.fail_json(msg='Postgresql server must be at least version 8.4 to support sslrootcert')
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
+ except ValueError as e:
+ # We raise this when the psycopg library is too old
+ module.fail_json(msg=to_native(e))
+
+ if p.session_role:
+ try:
+ conn.cursor.execute('SET ROLE "%s"' % p.session_role)
+ except Exception as e:
+ module.fail_json(msg="Could not switch to role %s: %s" % (p.session_role, to_native(e)), exception=traceback.format_exc())
+
+ try:
+ # privs
+ if p.privs:
+ privs = frozenset(pr.upper() for pr in p.privs.split(','))
+ if not privs.issubset(VALID_PRIVS):
+ module.fail_json(msg='Invalid privileges specified: %s' % privs.difference(VALID_PRIVS))
+ else:
+ privs = None
+ # objs:
+ if p.type == 'table' and p.objs == 'ALL_IN_SCHEMA':
+ objs = conn.get_all_tables_in_schema(p.schema)
+ elif p.type == 'sequence' and p.objs == 'ALL_IN_SCHEMA':
+ objs = conn.get_all_sequences_in_schema(p.schema)
+ elif p.type == 'function' and p.objs == 'ALL_IN_SCHEMA':
+ objs = conn.get_all_functions_in_schema(p.schema)
+ elif p.type == 'procedure' and p.objs == 'ALL_IN_SCHEMA':
+ objs = conn.get_all_procedures_in_schema(p.schema)
+ elif p.type == 'default_privs':
+ if p.objs == 'ALL_DEFAULT':
+ objs = frozenset(VALID_DEFAULT_OBJS.keys())
+ else:
+ objs = frozenset(obj.upper() for obj in p.objs.split(','))
+ if not objs.issubset(VALID_DEFAULT_OBJS):
+ module.fail_json(
+ msg='Invalid Object set specified: %s' % objs.difference(VALID_DEFAULT_OBJS.keys()))
+ # Again, do we have valid privs specified for object type:
+ valid_objects_for_priv = frozenset(obj for obj in objs if privs.issubset(VALID_DEFAULT_OBJS[obj]))
+ if not valid_objects_for_priv == objs:
+ module.fail_json(
+ msg='Invalid priv specified. Valid object for priv: {0}. Objects: {1}'.format(
+ valid_objects_for_priv, objs))
+ else:
+ objs = p.objs.split(',')
+
+ # function signatures are encoded using ':' to separate args
+ if p.type in ('function', 'procedure'):
+ objs = [obj.replace(':', ',') for obj in objs]
+
+ # roles
+ if p.roles.upper() == 'PUBLIC':
+ roles = 'PUBLIC'
+ else:
+ roles = p.roles.split(',')
+
+ if len(roles) == 1 and not role_exists(module, conn.cursor, roles[0]):
+ module.exit_json(changed=False)
+
+ if fail_on_role:
+ module.fail_json(msg="Role '%s' does not exist" % roles[0].strip())
+
+ else:
+ module.warn("Role '%s' does not exist, nothing to do" % roles[0].strip())
+
+ # check if target_roles is set with type: default_privs
+ if p.target_roles and not p.type == 'default_privs':
+ module.warn('"target_roles" will be ignored '
+ 'Argument "type: default_privs" is required for usage of "target_roles".')
+
+ # target roles
+ if p.target_roles:
+ target_roles = p.target_roles.split(',')
+ else:
+ target_roles = None
+
+ changed = conn.manipulate_privs(
+ obj_type=p.type,
+ privs=privs,
+ objs=objs,
+ roles=roles,
+ target_roles=target_roles,
+ state=p.state,
+ grant_option=p.grant_option,
+ schema_qualifier=p.schema,
+ fail_on_role=fail_on_role,
+ usage_on_types=usage_on_types,
+ )
+
+ except Error as e:
+ conn.rollback()
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ except psycopg2.Error as e:
+ conn.rollback()
+ module.fail_json(msg=to_native(e))
+
+ if module.check_mode or not changed:
+ conn.rollback()
+ else:
+ conn.commit()
+ module.exit_json(changed=changed, queries=executed_queries)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_publication.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_publication.py
new file mode 100644
index 00000000..1db80adc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_publication.py
@@ -0,0 +1,682 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Loic Blot (@nerzhul) <loic.blot@unix-experience.fr>
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: postgresql_publication
+short_description: Add, update, or remove PostgreSQL publication
+description:
+- Add, update, or remove PostgreSQL publication.
+options:
+ name:
+ description:
+ - Name of the publication to add, update, or remove.
+ required: true
+ type: str
+ db:
+ description:
+ - Name of the database to connect to and where
+ the publication state will be changed.
+ aliases: [ login_db ]
+ type: str
+ tables:
+ description:
+ - List of tables to add to the publication.
+ - If no value is set all tables are targeted.
+ - If the publication already exists for specific tables and I(tables) is not passed,
+ nothing will be changed. If you need to add all tables to the publication with the same name,
+ drop existent and create new without passing I(tables).
+ type: list
+ elements: str
+ state:
+ description:
+ - The publication state.
+ default: present
+ choices: [ absent, present ]
+ type: str
+ parameters:
+ description:
+ - Dictionary with optional publication parameters.
+ - Available parameters depend on PostgreSQL version.
+ type: dict
+ owner:
+ description:
+ - Publication owner.
+ - If I(owner) is not defined, the owner will be set as I(login_user) or I(session_role).
+ type: str
+ cascade:
+ description:
+ - Drop publication dependencies. Has effect with I(state=absent) only.
+ type: bool
+ default: false
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ version_added: '0.2.0'
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(name), I(tables), I(owner),
+ I(session_role), I(params) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+notes:
+- PostgreSQL version must be 10 or greater.
+seealso:
+- name: CREATE PUBLICATION reference
+ description: Complete reference of the CREATE PUBLICATION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createpublication.html
+- name: ALTER PUBLICATION reference
+ description: Complete reference of the ALTER PUBLICATION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-alterpublication.html
+- name: DROP PUBLICATION reference
+ description: Complete reference of the DROP PUBLICATION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-droppublication.html
+author:
+- Loic Blot (@nerzhul) <loic.blot@unix-experience.fr>
+- Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Create a new publication with name "acme" targeting all tables in database "test".
+ community.general.postgresql_publication:
+ db: test
+ name: acme
+
+- name: Create publication "acme" publishing only prices and vehicles tables.
+ community.general.postgresql_publication:
+ name: acme
+ tables:
+ - prices
+ - vehicles
+
+- name: >
+ Create publication "acme", set user alice as an owner, targeting all tables.
+ Allowable DML operations are INSERT and UPDATE only
+ community.general.postgresql_publication:
+ name: acme
+ owner: alice
+ parameters:
+ publish: 'insert,update'
+
+- name: >
+ Assuming publication "acme" exists and there are targeted
+ tables "prices" and "vehicles", add table "stores" to the publication.
+ community.general.postgresql_publication:
+ name: acme
+ tables:
+ - prices
+ - vehicles
+ - stores
+
+- name: Remove publication "acme" if exists in database "test".
+ community.general.postgresql_publication:
+ db: test
+ name: acme
+ state: absent
+'''
+
+RETURN = r'''
+exists:
+ description:
+ - Flag indicates the publication exists or not at the end of runtime.
+ returned: always
+ type: bool
+ sample: true
+queries:
+ description: List of executed queries.
+ returned: always
+ type: str
+ sample: [ 'DROP PUBLICATION "acme" CASCADE' ]
+owner:
+ description: Owner of the publication at the end of runtime.
+ returned: if publication exists
+ type: str
+ sample: "alice"
+tables:
+ description:
+ - List of tables in the publication at the end of runtime.
+ - If all tables are published, returns empty list.
+ returned: if publication exists
+ type: list
+ sample: ["\"public\".\"prices\"", "\"public\".\"vehicles\""]
+alltables:
+ description:
+ - Flag indicates that all tables are published.
+ returned: if publication exists
+ type: bool
+ sample: false
+parameters:
+ description: Publication parameters at the end of runtime.
+ returned: if publication exists
+ type: dict
+ sample: {'publish': {'insert': false, 'delete': false, 'update': true}}
+'''
+
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+ pg_quote_identifier,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils.six import iteritems
+
+SUPPORTED_PG_VERSION = 10000
+
+
+################################
+# Module functions and classes #
+################################
+
+def transform_tables_representation(tbl_list):
+ """Add 'public.' to names of tables where a schema identifier is absent
+ and add quotes to each element.
+
+ Args:
+ tbl_list (list): List of table names.
+
+ Returns:
+ tbl_list (list): Changed list.
+ """
+ for i, table in enumerate(tbl_list):
+ if '.' not in table:
+ tbl_list[i] = pg_quote_identifier('public.%s' % table.strip(), 'table')
+ else:
+ tbl_list[i] = pg_quote_identifier(table.strip(), 'table')
+
+ return tbl_list
+
+
+class PgPublication():
+ """Class to work with PostgreSQL publication.
+
+ Args:
+ module (AnsibleModule): Object of AnsibleModule class.
+ cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
+ name (str): The name of the publication.
+
+ Attributes:
+ module (AnsibleModule): Object of AnsibleModule class.
+ cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
+ name (str): Name of the publication.
+ executed_queries (list): List of executed queries.
+ attrs (dict): Dict with publication attributes.
+ exists (bool): Flag indicates the publication exists or not.
+ """
+
+ def __init__(self, module, cursor, name):
+ self.module = module
+ self.cursor = cursor
+ self.name = name
+ self.executed_queries = []
+ self.attrs = {
+ 'alltables': False,
+ 'tables': [],
+ 'parameters': {},
+ 'owner': '',
+ }
+ self.exists = self.check_pub()
+
+ def get_info(self):
+ """Refresh the publication information.
+
+ Returns:
+ ``self.attrs``.
+ """
+ self.exists = self.check_pub()
+ return self.attrs
+
+ def check_pub(self):
+ """Check the publication and refresh ``self.attrs`` publication attribute.
+
+ Returns:
+ True if the publication with ``self.name`` exists, False otherwise.
+ """
+
+ pub_info = self.__get_general_pub_info()
+
+ if not pub_info:
+ # Publication does not exist:
+ return False
+
+ self.attrs['owner'] = pub_info.get('pubowner')
+
+ # Publication DML operations:
+ self.attrs['parameters']['publish'] = {}
+ self.attrs['parameters']['publish']['insert'] = pub_info.get('pubinsert', False)
+ self.attrs['parameters']['publish']['update'] = pub_info.get('pubupdate', False)
+ self.attrs['parameters']['publish']['delete'] = pub_info.get('pubdelete', False)
+ if pub_info.get('pubtruncate'):
+ self.attrs['parameters']['publish']['truncate'] = pub_info.get('pubtruncate')
+
+ # If alltables flag is False, get the list of targeted tables:
+ if not pub_info.get('puballtables'):
+ table_info = self.__get_tables_pub_info()
+ # Join sublists [['schema', 'table'], ...] to ['schema.table', ...]
+ # for better representation:
+ for i, schema_and_table in enumerate(table_info):
+ table_info[i] = pg_quote_identifier('.'.join(schema_and_table), 'table')
+
+ self.attrs['tables'] = table_info
+ else:
+ self.attrs['alltables'] = True
+
+ # Publication exists:
+ return True
+
+ def create(self, tables, params, owner, check_mode=True):
+ """Create the publication.
+
+ Args:
+ tables (list): List with names of the tables that need to be added to the publication.
+ params (dict): Dict contains optional publication parameters and their values.
+ owner (str): Name of the publication owner.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ changed (bool): True if publication has been created, otherwise False.
+ """
+ changed = True
+
+ query_fragments = ["CREATE PUBLICATION %s" % pg_quote_identifier(self.name, 'publication')]
+
+ if tables:
+ query_fragments.append("FOR TABLE %s" % ', '.join(tables))
+ else:
+ query_fragments.append("FOR ALL TABLES")
+
+ if params:
+ params_list = []
+ # Make list ["param = 'value'", ...] from params dict:
+ for (key, val) in iteritems(params):
+ params_list.append("%s = '%s'" % (key, val))
+
+ # Add the list to query_fragments:
+ query_fragments.append("WITH (%s)" % ', '.join(params_list))
+
+ changed = self.__exec_sql(' '.join(query_fragments), check_mode=check_mode)
+
+ if owner:
+ # If check_mode, just add possible SQL to
+ # executed_queries and return:
+ self.__pub_set_owner(owner, check_mode=check_mode)
+
+ return changed
+
+ def update(self, tables, params, owner, check_mode=True):
+ """Update the publication.
+
+ Args:
+ tables (list): List with names of the tables that need to be presented in the publication.
+ params (dict): Dict contains optional publication parameters and their values.
+ owner (str): Name of the publication owner.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ changed (bool): True if publication has been updated, otherwise False.
+ """
+ changed = False
+
+ # Add or drop tables from published tables suit:
+ if tables and not self.attrs['alltables']:
+
+ # 1. If needs to add table to the publication:
+ for tbl in tables:
+ if tbl not in self.attrs['tables']:
+ # If needs to add table to the publication:
+ changed = self.__pub_add_table(tbl, check_mode=check_mode)
+
+ # 2. if there is a table in targeted tables
+ # that's not presented in the passed tables:
+ for tbl in self.attrs['tables']:
+ if tbl not in tables:
+ changed = self.__pub_drop_table(tbl, check_mode=check_mode)
+
+ elif tables and self.attrs['alltables']:
+ changed = self.__pub_set_tables(tables, check_mode=check_mode)
+
+ # Update pub parameters:
+ if params:
+ for key, val in iteritems(params):
+ if self.attrs['parameters'].get(key):
+
+ # In PostgreSQL 10/11 only 'publish' optional parameter is presented.
+ if key == 'publish':
+ # 'publish' value can be only a string with comma-separated items
+ # of allowed DML operations like 'insert,update' or
+ # 'insert,update,delete', etc.
+ # Make dictionary to compare with current attrs later:
+ val_dict = self.attrs['parameters']['publish'].copy()
+ val_list = val.split(',')
+ for v in val_dict:
+ if v in val_list:
+ val_dict[v] = True
+ else:
+ val_dict[v] = False
+
+ # Compare val_dict and the dict with current 'publish' parameters,
+ # if they're different, set new values:
+ if val_dict != self.attrs['parameters']['publish']:
+ changed = self.__pub_set_param(key, val, check_mode=check_mode)
+
+ # Default behavior for other cases:
+ elif self.attrs['parameters'][key] != val:
+ changed = self.__pub_set_param(key, val, check_mode=check_mode)
+
+ else:
+ # If the parameter was not set before:
+ changed = self.__pub_set_param(key, val, check_mode=check_mode)
+
+ # Update pub owner:
+ if owner:
+ if owner != self.attrs['owner']:
+ changed = self.__pub_set_owner(owner, check_mode=check_mode)
+
+ return changed
+
+ def drop(self, cascade=False, check_mode=True):
+ """Drop the publication.
+
+ Kwargs:
+ cascade (bool): Flag indicates that publication needs to be deleted
+ with its dependencies.
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ changed (bool): True if publication has been updated, otherwise False.
+ """
+ if self.exists:
+ query_fragments = []
+ query_fragments.append("DROP PUBLICATION %s" % pg_quote_identifier(self.name, 'publication'))
+ if cascade:
+ query_fragments.append("CASCADE")
+
+ return self.__exec_sql(' '.join(query_fragments), check_mode=check_mode)
+
+ def __get_general_pub_info(self):
+ """Get and return general publication information.
+
+ Returns:
+ Dict with publication information if successful, False otherwise.
+ """
+ # Check pg_publication.pubtruncate exists (supported from PostgreSQL 11):
+ pgtrunc_sup = exec_sql(self, ("SELECT 1 FROM information_schema.columns "
+ "WHERE table_name = 'pg_publication' "
+ "AND column_name = 'pubtruncate'"), add_to_executed=False)
+
+ if pgtrunc_sup:
+ query = ("SELECT r.rolname AS pubowner, p.puballtables, p.pubinsert, "
+ "p.pubupdate , p.pubdelete, p.pubtruncate FROM pg_publication AS p "
+ "JOIN pg_catalog.pg_roles AS r "
+ "ON p.pubowner = r.oid "
+ "WHERE p.pubname = %(pname)s")
+ else:
+ query = ("SELECT r.rolname AS pubowner, p.puballtables, p.pubinsert, "
+ "p.pubupdate , p.pubdelete FROM pg_publication AS p "
+ "JOIN pg_catalog.pg_roles AS r "
+ "ON p.pubowner = r.oid "
+ "WHERE p.pubname = %(pname)s")
+
+ result = exec_sql(self, query, query_params={'pname': self.name}, add_to_executed=False)
+ if result:
+ return result[0]
+ else:
+ return False
+
+ def __get_tables_pub_info(self):
+ """Get and return tables that are published by the publication.
+
+ Returns:
+ List of dicts with published tables.
+ """
+ query = ("SELECT schemaname, tablename "
+ "FROM pg_publication_tables WHERE pubname = %(pname)s")
+ return exec_sql(self, query, query_params={'pname': self.name}, add_to_executed=False)
+
+ def __pub_add_table(self, table, check_mode=False):
+ """Add a table to the publication.
+
+ Args:
+ table (str): Table name.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = ("ALTER PUBLICATION %s ADD TABLE %s" % (pg_quote_identifier(self.name, 'publication'),
+ pg_quote_identifier(table, 'table')))
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __pub_drop_table(self, table, check_mode=False):
+ """Drop a table from the publication.
+
+ Args:
+ table (str): Table name.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = ("ALTER PUBLICATION %s DROP TABLE %s" % (pg_quote_identifier(self.name, 'publication'),
+ pg_quote_identifier(table, 'table')))
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __pub_set_tables(self, tables, check_mode=False):
+ """Set a table suit that need to be published by the publication.
+
+ Args:
+ tables (list): List of tables.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ quoted_tables = [pg_quote_identifier(t, 'table') for t in tables]
+ query = ("ALTER PUBLICATION %s SET TABLE %s" % (pg_quote_identifier(self.name, 'publication'),
+ ', '.join(quoted_tables)))
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __pub_set_param(self, param, value, check_mode=False):
+ """Set an optional publication parameter.
+
+ Args:
+ param (str): Name of the parameter.
+ value (str): Parameter value.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = ("ALTER PUBLICATION %s SET (%s = '%s')" % (pg_quote_identifier(self.name, 'publication'),
+ param, value))
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __pub_set_owner(self, role, check_mode=False):
+ """Set a publication owner.
+
+ Args:
+ role (str): Role (user) name that needs to be set as a publication owner.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = ('ALTER PUBLICATION %s '
+ 'OWNER TO "%s"' % (pg_quote_identifier(self.name, 'publication'), role))
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __exec_sql(self, query, check_mode=False):
+ """Execute SQL query.
+
+ Note: If we need just to get information from the database,
+ we use ``exec_sql`` function directly.
+
+ Args:
+ query (str): Query that needs to be executed.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just add ``query`` to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ if check_mode:
+ self.executed_queries.append(query)
+ return True
+ else:
+ return exec_sql(self, query, return_bool=True)
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ name=dict(required=True),
+ db=dict(type='str', aliases=['login_db']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ tables=dict(type='list', elements='str'),
+ parameters=dict(type='dict'),
+ owner=dict(type='str'),
+ cascade=dict(type='bool', default=False),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ # Parameters handling:
+ name = module.params['name']
+ state = module.params['state']
+ tables = module.params['tables']
+ params = module.params['parameters']
+ owner = module.params['owner']
+ cascade = module.params['cascade']
+ session_role = module.params['session_role']
+ trust_input = module.params['trust_input']
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ if not params:
+ params_list = None
+ else:
+ params_list = ['%s = %s' % (k, v) for k, v in iteritems(params)]
+
+ check_input(module, name, tables, owner, session_role, params_list)
+
+ if state == 'absent':
+ if tables:
+ module.warn('parameter "tables" is ignored when "state=absent"')
+ if params:
+ module.warn('parameter "parameters" is ignored when "state=absent"')
+ if owner:
+ module.warn('parameter "owner" is ignored when "state=absent"')
+
+ if state == 'present' and cascade:
+ module.warn('parameter "cascade" is ignored when "state=present"')
+
+ # Connect to DB and make cursor object:
+ conn_params = get_conn_params(module, module.params)
+ # We check publication state without DML queries execution, so set autocommit:
+ db_connection = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ # Check version:
+ if cursor.connection.server_version < SUPPORTED_PG_VERSION:
+ module.fail_json(msg="PostgreSQL server version should be 10.0 or greater")
+
+ # Nothing was changed by default:
+ changed = False
+
+ ###################################
+ # Create object and do rock'n'roll:
+ publication = PgPublication(module, cursor, name)
+
+ if tables:
+ tables = transform_tables_representation(tables)
+
+ # If module.check_mode=True, nothing will be changed:
+ if state == 'present':
+ if not publication.exists:
+ changed = publication.create(tables, params, owner, check_mode=module.check_mode)
+
+ else:
+ changed = publication.update(tables, params, owner, check_mode=module.check_mode)
+
+ elif state == 'absent':
+ changed = publication.drop(cascade=cascade, check_mode=module.check_mode)
+
+ # Get final publication info:
+ pub_fin_info = {}
+ if state == 'present' or (state == 'absent' and module.check_mode):
+ pub_fin_info = publication.get_info()
+ elif state == 'absent' and not module.check_mode:
+ publication.exists = False
+
+ # Connection is not needed any more:
+ cursor.close()
+ db_connection.close()
+
+ # Update publication info and return ret values:
+ module.exit_json(changed=changed, queries=publication.executed_queries, exists=publication.exists, **pub_fin_info)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_query.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_query.py
new file mode 100644
index 00000000..e231fbd3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_query.py
@@ -0,0 +1,452 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Felix Archambault
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_query
+short_description: Run PostgreSQL queries
+description:
+- Runs arbitrary PostgreSQL queries.
+- Can run queries from SQL script files.
+- Does not run against backup files. Use M(community.general.postgresql_db) with I(state=restore)
+ to run queries on files made by pg_dump/pg_dumpall utilities.
+options:
+ query:
+ description:
+ - SQL query to run. Variables can be escaped with psycopg2 syntax
+ U(http://initd.org/psycopg/docs/usage.html).
+ type: str
+ positional_args:
+ description:
+ - List of values to be passed as positional arguments to the query.
+ When the value is a list, it will be converted to PostgreSQL array.
+ - Mutually exclusive with I(named_args).
+ type: list
+ elements: raw
+ named_args:
+ description:
+ - Dictionary of key-value arguments to pass to the query.
+ When the value is a list, it will be converted to PostgreSQL array.
+ - Mutually exclusive with I(positional_args).
+ type: dict
+ path_to_script:
+ description:
+ - Path to a SQL script on the target machine.
+ - If the script contains several queries, they must be semicolon-separated.
+ - Mutually exclusive with I(query).
+ type: path
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ db:
+ description:
+ - Name of database to connect to and run queries against.
+ type: str
+ aliases:
+ - login_db
+ autocommit:
+ description:
+ - Execute in autocommit mode when the query can't be run inside a transaction block
+ (e.g., VACUUM).
+ - Mutually exclusive with I(check_mode).
+ type: bool
+ default: no
+ encoding:
+ description:
+ - Set the client encoding for the current session (e.g. C(UTF-8)).
+ - The default is the encoding defined by the database.
+ type: str
+ version_added: '0.2.0'
+ trust_input:
+ description:
+ - If C(no), check whether a value of I(session_role) is potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via I(session_role) are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+ search_path:
+ description:
+ - List of schema names to look in.
+ type: list
+ elements: str
+ version_added: '1.0.0'
+seealso:
+- module: community.general.postgresql_db
+- name: PostgreSQL Schema reference
+ description: Complete reference of the PostgreSQL schema documentation.
+ link: https://www.postgresql.org/docs/current/ddl-schemas.html
+author:
+- Felix Archambault (@archf)
+- Andrew Klychkov (@Andersson007)
+- Will Rouesnel (@wrouesnel)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Simple select query to acme db
+ community.general.postgresql_query:
+ db: acme
+ query: SELECT version()
+
+- name: Select query to db acme with positional arguments and non-default credentials
+ community.general.postgresql_query:
+ db: acme
+ login_user: django
+ login_password: mysecretpass
+ query: SELECT * FROM acme WHERE id = %s AND story = %s
+ positional_args:
+ - 1
+ - test
+
+- name: Select query to test_db with named_args
+ community.general.postgresql_query:
+ db: test_db
+ query: SELECT * FROM test WHERE id = %(id_val)s AND story = %(story_val)s
+ named_args:
+ id_val: 1
+ story_val: test
+
+- name: Insert query to test_table in db test_db
+ community.general.postgresql_query:
+ db: test_db
+ query: INSERT INTO test_table (id, story) VALUES (2, 'my_long_story')
+
+- name: Run queries from SQL script using UTF-8 client encoding for session
+ community.general.postgresql_query:
+ db: test_db
+ path_to_script: /var/lib/pgsql/test.sql
+ positional_args:
+ - 1
+ encoding: UTF-8
+
+- name: Example of using autocommit parameter
+ community.general.postgresql_query:
+ db: test_db
+ query: VACUUM
+ autocommit: yes
+
+- name: >
+ Insert data to the column of array type using positional_args.
+ Note that we use quotes here, the same as for passing JSON, etc.
+ community.general.postgresql_query:
+ query: INSERT INTO test_table (array_column) VALUES (%s)
+ positional_args:
+ - '{1,2,3}'
+
+# Pass list and string vars as positional_args
+- name: Set vars
+ ansible.builtin.set_fact:
+ my_list:
+ - 1
+ - 2
+ - 3
+ my_arr: '{1, 2, 3}'
+
+- name: Select from test table by passing positional_args as arrays
+ community.general.postgresql_query:
+ query: SELECT * FROM test_array_table WHERE arr_col1 = %s AND arr_col2 = %s
+ positional_args:
+ - '{{ my_list }}'
+ - '{{ my_arr|string }}'
+
+# Select from test table looking into app1 schema first, then,
+# if the schema doesn't exist or the table hasn't been found there,
+# try to find it in the schema public
+- name: Select from test using search_path
+ community.general.postgresql_query:
+ query: SELECT * FROM test_array_table
+ search_path:
+ - app1
+ - public
+'''
+
+RETURN = r'''
+query:
+ description:
+ - Executed query.
+ - When reading several queries from a file, it contains only the last one.
+ returned: always
+ type: str
+ sample: 'SELECT * FROM bar'
+statusmessage:
+ description:
+ - Attribute containing the message returned by the command.
+ - When reading several queries from a file, it contains a message of the last one.
+ returned: always
+ type: str
+ sample: 'INSERT 0 1'
+query_result:
+ description:
+ - List of dictionaries in column:value form representing returned rows.
+ - When running queries from a file, returns result of the last query.
+ returned: always
+ type: list
+ elements: dict
+ sample: [{"Column": "Value1"},{"Column": "Value2"}]
+query_list:
+ description:
+ - List of executed queries.
+ Useful when reading several queries from a file.
+ returned: always
+ type: list
+ elements: str
+ sample: ['SELECT * FROM foo', 'SELECT * FROM bar']
+query_all_results:
+ description:
+ - List containing results of all queries executed (one sublist for every query).
+ Useful when reading several queries from a file.
+ returned: always
+ type: list
+ elements: list
+ sample: [[{"Column": "Value1"},{"Column": "Value2"}], [{"Column": "Value1"},{"Column": "Value2"}]]
+rowcount:
+ description:
+ - Number of produced or affected rows.
+ - When using a script with multiple queries,
+ it contains a total number of produced or affected rows.
+ returned: changed
+ type: int
+ sample: 5
+'''
+
+try:
+ from psycopg2 import ProgrammingError as Psycopg2ProgrammingError
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # it is needed for checking 'no result to fetch' in main(),
+ # psycopg2 availability will be checked by connect_to_db() into
+ # ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six import iteritems
+
+
+# ===========================================
+# Module execution.
+#
+
+def list_to_pg_array(elem):
+ """Convert the passed list to PostgreSQL array
+ represented as a string.
+
+ Args:
+ elem (list): List that needs to be converted.
+
+ Returns:
+ elem (str): String representation of PostgreSQL array.
+ """
+ elem = str(elem).strip('[]')
+ elem = '{' + elem + '}'
+ return elem
+
+
+def convert_elements_to_pg_arrays(obj):
+ """Convert list elements of the passed object
+ to PostgreSQL arrays represented as strings.
+
+ Args:
+ obj (dict or list): Object whose elements need to be converted.
+
+ Returns:
+ obj (dict or list): Object with converted elements.
+ """
+ if isinstance(obj, dict):
+ for (key, elem) in iteritems(obj):
+ if isinstance(elem, list):
+ obj[key] = list_to_pg_array(elem)
+
+ elif isinstance(obj, list):
+ for i, elem in enumerate(obj):
+ if isinstance(elem, list):
+ obj[i] = list_to_pg_array(elem)
+
+ return obj
+
+
+def set_search_path(cursor, search_path):
+ """Set session's search_path.
+
+ Args:
+ cursor (Psycopg2 cursor): Database cursor object.
+ search_path (str): String containing comma-separated schema names.
+ """
+ cursor.execute('SET search_path TO %s' % search_path)
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ query=dict(type='str'),
+ db=dict(type='str', aliases=['login_db']),
+ positional_args=dict(type='list', elements='raw'),
+ named_args=dict(type='dict'),
+ session_role=dict(type='str'),
+ path_to_script=dict(type='path'),
+ autocommit=dict(type='bool', default=False),
+ encoding=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ search_path=dict(type='list', elements='str'),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=(('positional_args', 'named_args'),),
+ supports_check_mode=True,
+ )
+
+ query = module.params["query"]
+ positional_args = module.params["positional_args"]
+ named_args = module.params["named_args"]
+ path_to_script = module.params["path_to_script"]
+ autocommit = module.params["autocommit"]
+ encoding = module.params["encoding"]
+ session_role = module.params["session_role"]
+ trust_input = module.params["trust_input"]
+ search_path = module.params["search_path"]
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, session_role)
+
+ if autocommit and module.check_mode:
+ module.fail_json(msg="Using autocommit is mutually exclusive with check_mode")
+
+ if path_to_script and query:
+ module.fail_json(msg="path_to_script is mutually exclusive with query")
+
+ if positional_args:
+ positional_args = convert_elements_to_pg_arrays(positional_args)
+
+ elif named_args:
+ named_args = convert_elements_to_pg_arrays(named_args)
+
+ query_list = []
+ if path_to_script:
+ try:
+ with open(path_to_script, 'rb') as f:
+ query = to_native(f.read())
+ if ';' in query:
+ query_list = [q for q in query.split(';') if q != '\n']
+ else:
+ query_list.append(query)
+ except Exception as e:
+ module.fail_json(msg="Cannot read file '%s' : %s" % (path_to_script, to_native(e)))
+ else:
+ query_list.append(query)
+
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=autocommit)
+ if encoding is not None:
+ db_connection.set_client_encoding(encoding)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ if search_path:
+ set_search_path(cursor, '%s' % ','.join([x.strip(' ') for x in search_path]))
+
+ # Prepare args:
+ if module.params.get("positional_args"):
+ arguments = module.params["positional_args"]
+ elif module.params.get("named_args"):
+ arguments = module.params["named_args"]
+ else:
+ arguments = None
+
+ # Set defaults:
+ changed = False
+
+ query_all_results = []
+ rowcount = 0
+ statusmessage = ''
+
+ # Execute query:
+ for query in query_list:
+ try:
+ cursor.execute(query, arguments)
+ statusmessage = cursor.statusmessage
+ if cursor.rowcount > 0:
+ rowcount += cursor.rowcount
+
+ try:
+ query_result = [dict(row) for row in cursor.fetchall()]
+
+ except Psycopg2ProgrammingError as e:
+ if to_native(e) == 'no results to fetch':
+ query_result = {}
+
+ except Exception as e:
+ module.fail_json(msg="Cannot fetch rows from cursor: %s" % to_native(e))
+
+ query_all_results.append(query_result)
+
+ if 'SELECT' not in statusmessage:
+ if 'UPDATE' in statusmessage or 'INSERT' in statusmessage or 'DELETE' in statusmessage:
+ s = statusmessage.split()
+ if len(s) == 3:
+ if s[2] != '0':
+ changed = True
+
+ elif len(s) == 2:
+ if s[1] != '0':
+ changed = True
+
+ else:
+ changed = True
+
+ else:
+ changed = True
+
+ except Exception as e:
+ if not autocommit:
+ db_connection.rollback()
+
+ cursor.close()
+ db_connection.close()
+ module.fail_json(msg="Cannot execute SQL '%s' %s: %s, query list: %s" % (query, arguments, to_native(e), query_list))
+
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ if not autocommit:
+ db_connection.commit()
+
+ kw = dict(
+ changed=changed,
+ query=cursor.query,
+ query_list=query_list,
+ statusmessage=statusmessage,
+ query_result=query_result,
+ query_all_results=query_all_results,
+ rowcount=rowcount,
+ )
+
+ cursor.close()
+ db_connection.close()
+
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_schema.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_schema.py
new file mode 100644
index 00000000..e7f28ecf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_schema.py
@@ -0,0 +1,293 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_schema
+short_description: Add or remove PostgreSQL schema
+description:
+- Add or remove PostgreSQL schema.
+options:
+ name:
+ description:
+ - Name of the schema to add or remove.
+ required: true
+ type: str
+ aliases:
+ - schema
+ database:
+ description:
+ - Name of the database to connect to and add or remove the schema.
+ type: str
+ default: postgres
+ aliases:
+ - db
+ - login_db
+ owner:
+ description:
+ - Name of the role to set as owner of the schema.
+ type: str
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ - The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though the session_role
+ were the one that had logged in originally.
+ type: str
+ state:
+ description:
+ - The schema state.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ cascade_drop:
+ description:
+ - Drop schema with CASCADE to remove child objects.
+ type: bool
+ default: false
+ ssl_mode:
+ description:
+ - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
+ - See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
+ - Default of C(prefer) matches libpq default.
+ type: str
+ default: prefer
+ choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
+ ca_cert:
+ description:
+ - Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
+ - If the file exists, the server's certificate will be verified to be signed by one of these authorities.
+ type: str
+ aliases: [ ssl_rootcert ]
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(schema), I(owner), I(session_role) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+seealso:
+- name: PostgreSQL schemas
+ description: General information about PostgreSQL schemas.
+ link: https://www.postgresql.org/docs/current/ddl-schemas.html
+- name: CREATE SCHEMA reference
+ description: Complete reference of the CREATE SCHEMA command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createschema.html
+- name: ALTER SCHEMA reference
+ description: Complete reference of the ALTER SCHEMA command documentation.
+ link: https://www.postgresql.org/docs/current/sql-alterschema.html
+- name: DROP SCHEMA reference
+ description: Complete reference of the DROP SCHEMA command documentation.
+ link: https://www.postgresql.org/docs/current/sql-dropschema.html
+author:
+- Flavien Chantelot (@Dorn-) <contact@flavien.io>
+- Thomas O'Donnell (@andytom)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Create a new schema with name acme in test database
+ community.general.postgresql_schema:
+ db: test
+ name: acme
+
+- name: Create a new schema acme with a user bob who will own it
+ community.general.postgresql_schema:
+ name: acme
+ owner: bob
+
+- name: Drop schema "acme" with cascade
+ community.general.postgresql_schema:
+ name: acme
+ state: absent
+ cascade_drop: yes
+'''
+
+RETURN = r'''
+schema:
+ description: Name of the schema.
+ returned: success, changed
+ type: str
+ sample: "acme"
+queries:
+ description: List of executed queries.
+ returned: always
+ type: list
+ sample: ["CREATE SCHEMA \"acme\""]
+'''
+
+import traceback
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+ pg_quote_identifier,
+ SQLParseError,
+)
+from ansible.module_utils._text import to_native
+
+executed_queries = []
+
+
+class NotSupportedError(Exception):
+ pass
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+def set_owner(cursor, schema, owner):
+ query = 'ALTER SCHEMA %s OWNER TO "%s"' % (
+ pg_quote_identifier(schema, 'schema'), owner)
+ cursor.execute(query)
+ executed_queries.append(query)
+ return True
+
+
+def get_schema_info(cursor, schema):
+ query = ("SELECT schema_owner AS owner "
+ "FROM information_schema.schemata "
+ "WHERE schema_name = %(schema)s")
+ cursor.execute(query, {'schema': schema})
+ return cursor.fetchone()
+
+
+def schema_exists(cursor, schema):
+ query = ("SELECT schema_name FROM information_schema.schemata "
+ "WHERE schema_name = %(schema)s")
+ cursor.execute(query, {'schema': schema})
+ return cursor.rowcount == 1
+
+
+def schema_delete(cursor, schema, cascade):
+ if schema_exists(cursor, schema):
+ query = "DROP SCHEMA %s" % pg_quote_identifier(schema, 'schema')
+ if cascade:
+ query += " CASCADE"
+ cursor.execute(query)
+ executed_queries.append(query)
+ return True
+ else:
+ return False
+
+
+def schema_create(cursor, schema, owner):
+ if not schema_exists(cursor, schema):
+ query_fragments = ['CREATE SCHEMA %s' % pg_quote_identifier(schema, 'schema')]
+ if owner:
+ query_fragments.append('AUTHORIZATION "%s"' % owner)
+ query = ' '.join(query_fragments)
+ cursor.execute(query)
+ executed_queries.append(query)
+ return True
+ else:
+ schema_info = get_schema_info(cursor, schema)
+ if owner and owner != schema_info['owner']:
+ return set_owner(cursor, schema, owner)
+ else:
+ return False
+
+
+def schema_matches(cursor, schema, owner):
+ if not schema_exists(cursor, schema):
+ return False
+ else:
+ schema_info = get_schema_info(cursor, schema)
+ if owner and owner != schema_info['owner']:
+ return False
+ else:
+ return True
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ schema=dict(type="str", required=True, aliases=['name']),
+ owner=dict(type="str", default=""),
+ database=dict(type="str", default="postgres", aliases=["db", "login_db"]),
+ cascade_drop=dict(type="bool", default=False),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ session_role=dict(type="str"),
+ trust_input=dict(type="bool", default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ schema = module.params["schema"]
+ owner = module.params["owner"]
+ state = module.params["state"]
+ cascade_drop = module.params["cascade_drop"]
+ session_role = module.params["session_role"]
+ trust_input = module.params["trust_input"]
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, schema, owner, session_role)
+
+ changed = False
+
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ try:
+ if module.check_mode:
+ if state == "absent":
+ changed = not schema_exists(cursor, schema)
+ elif state == "present":
+ changed = not schema_matches(cursor, schema, owner)
+ module.exit_json(changed=changed, schema=schema)
+
+ if state == "absent":
+ try:
+ changed = schema_delete(cursor, schema, cascade_drop)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ elif state == "present":
+ try:
+ changed = schema_create(cursor, schema, owner)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except NotSupportedError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except SystemExit:
+ # Avoid catching this on Python 2.4
+ raise
+ except Exception as e:
+ module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc())
+
+ db_connection.close()
+ module.exit_json(changed=changed, schema=schema, queries=executed_queries)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_sequence.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_sequence.py
new file mode 100644
index 00000000..50cd628a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_sequence.py
@@ -0,0 +1,627 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Tobias Birkefeld (@tcraxs) <t@craxs.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_sequence
+short_description: Create, drop, or alter a PostgreSQL sequence
+description:
+- Allows to create, drop or change the definition of a sequence generator.
+options:
+ sequence:
+ description:
+ - The name of the sequence.
+ required: true
+ type: str
+ aliases:
+ - name
+ state:
+ description:
+ - The sequence state.
+ - If I(state=absent) other options will be ignored except of I(name) and
+ I(schema).
+ default: present
+ choices: [ absent, present ]
+ type: str
+ data_type:
+ description:
+ - Specifies the data type of the sequence. Valid types are bigint, integer,
+ and smallint. bigint is the default. The data type determines the default
+ minimum and maximum values of the sequence. For more info see the
+ documentation
+ U(https://www.postgresql.org/docs/current/sql-createsequence.html).
+ - Supported from PostgreSQL 10.
+ choices: [ bigint, integer, smallint ]
+ type: str
+ increment:
+ description:
+ - Increment specifies which value is added to the current sequence value
+ to create a new value.
+ - A positive value will make an ascending sequence, a negative one a
+ descending sequence. The default value is 1.
+ type: int
+ minvalue:
+ description:
+ - Minvalue determines the minimum value a sequence can generate. The
+ default for an ascending sequence is 1. The default for a descending
+ sequence is the minimum value of the data type.
+ type: int
+ aliases:
+ - min
+ maxvalue:
+ description:
+ - Maxvalue determines the maximum value for the sequence. The default for
+ an ascending sequence is the maximum
+ value of the data type. The default for a descending sequence is -1.
+ type: int
+ aliases:
+ - max
+ start:
+ description:
+ - Start allows the sequence to begin anywhere. The default starting value
+ is I(minvalue) for ascending sequences and I(maxvalue) for descending
+ ones.
+ type: int
+ cache:
+ description:
+ - Cache specifies how many sequence numbers are to be preallocated and
+ stored in memory for faster access. The minimum value is 1 (only one
+ value can be generated at a time, i.e., no cache), and this is also
+ the default.
+ type: int
+ cycle:
+ description:
+ - The cycle option allows the sequence to wrap around when the I(maxvalue)
+ or I(minvalue) has been reached by an ascending or descending sequence
+ respectively. If the limit is reached, the next number generated will be
+ the minvalue or maxvalue, respectively.
+ - If C(false) (NO CYCLE) is specified, any calls to nextval after the sequence
+ has reached its maximum value will return an error. False (NO CYCLE) is
+ the default.
+ type: bool
+ default: no
+ cascade:
+ description:
+ - Automatically drop objects that depend on the sequence, and in turn all
+ objects that depend on those objects.
+ - Ignored if I(state=present).
+ - Only used with I(state=absent).
+ type: bool
+ default: no
+ rename_to:
+ description:
+ - The new name for the I(sequence).
+ - Works only for existing sequences.
+ type: str
+ owner:
+ description:
+ - Set the owner for the I(sequence).
+ type: str
+ schema:
+ description:
+ - The schema of the I(sequence). This is be used to create and relocate
+ a I(sequence) in the given schema.
+ default: public
+ type: str
+ newschema:
+ description:
+ - The new schema for the I(sequence). Will be used for moving a
+ I(sequence) to another I(schema).
+ - Works only for existing sequences.
+ type: str
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified I(session_role)
+ must be a role that the current I(login_user) is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the I(session_role) were the one that had logged in originally.
+ type: str
+ db:
+ description:
+ - Name of database to connect to and run queries against.
+ type: str
+ aliases:
+ - database
+ - login_db
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(sequence), I(schema), I(rename_to),
+ I(owner), I(newschema), I(session_role) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+notes:
+- If you do not pass db parameter, sequence will be created in the database
+ named postgres.
+seealso:
+- module: community.general.postgresql_table
+- module: community.general.postgresql_owner
+- module: community.general.postgresql_privs
+- module: community.general.postgresql_tablespace
+- name: CREATE SEQUENCE reference
+ description: Complete reference of the CREATE SEQUENCE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createsequence.html
+- name: ALTER SEQUENCE reference
+ description: Complete reference of the ALTER SEQUENCE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-altersequence.html
+- name: DROP SEQUENCE reference
+ description: Complete reference of the DROP SEQUENCE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-dropsequence.html
+author:
+- Tobias Birkefeld (@tcraxs)
+- Thomas O'Donnell (@andytom)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Create an ascending bigint sequence called foobar in the default
+ database
+ community.general.postgresql_sequence:
+ name: foobar
+
+- name: Create an ascending integer sequence called foobar, starting at 101
+ community.general.postgresql_sequence:
+ name: foobar
+ data_type: integer
+ start: 101
+
+- name: Create an descending sequence called foobar, starting at 101 and
+ preallocated 10 sequence numbers in cache
+ community.general.postgresql_sequence:
+ name: foobar
+ increment: -1
+ cache: 10
+ start: 101
+
+- name: Create an ascending sequence called foobar, which cycle between 1 to 10
+ community.general.postgresql_sequence:
+ name: foobar
+ cycle: yes
+ min: 1
+ max: 10
+
+- name: Create an ascending bigint sequence called foobar in the default
+ database with owner foobar
+ community.general.postgresql_sequence:
+ name: foobar
+ owner: foobar
+
+- name: Rename an existing sequence named foo to bar
+ community.general.postgresql_sequence:
+ name: foo
+ rename_to: bar
+
+- name: Change the schema of an existing sequence to foobar
+ community.general.postgresql_sequence:
+ name: foobar
+ newschema: foobar
+
+- name: Change the owner of an existing sequence to foobar
+ community.general.postgresql_sequence:
+ name: foobar
+ owner: foobar
+
+- name: Drop a sequence called foobar
+ community.general.postgresql_sequence:
+ name: foobar
+ state: absent
+
+- name: Drop a sequence called foobar with cascade
+ community.general.postgresql_sequence:
+ name: foobar
+ cascade: yes
+ state: absent
+'''
+
+RETURN = r'''
+state:
+ description: Sequence state at the end of execution.
+ returned: always
+ type: str
+ sample: 'present'
+sequence:
+ description: Sequence name.
+ returned: always
+ type: str
+ sample: 'foobar'
+queries:
+ description: List of queries that was tried to be executed.
+ returned: always
+ type: str
+ sample: [ "CREATE SEQUENCE \"foo\"" ]
+schema:
+ description: Name of the schema of the sequence
+ returned: always
+ type: str
+ sample: 'foo'
+data_type:
+ description: Shows the current data type of the sequence.
+ returned: always
+ type: str
+ sample: 'bigint'
+increment:
+ description: The value of increment of the sequence. A positive value will
+ make an ascending sequence, a negative one a descending
+ sequence.
+ returned: always
+ type: int
+ sample: '-1'
+minvalue:
+ description: The value of minvalue of the sequence.
+ returned: always
+ type: int
+ sample: '1'
+maxvalue:
+ description: The value of maxvalue of the sequence.
+ returned: always
+ type: int
+ sample: '9223372036854775807'
+start:
+ description: The value of start of the sequence.
+ returned: always
+ type: int
+ sample: '12'
+cycle:
+ description: Shows if the sequence cycle or not.
+ returned: always
+ type: str
+ sample: 'NO'
+owner:
+ description: Shows the current owner of the sequence
+ after the successful run of the task.
+ returned: always
+ type: str
+ sample: 'postgres'
+newname:
+ description: Shows the new sequence name after rename.
+ returned: on success
+ type: str
+ sample: 'barfoo'
+newschema:
+ description: Shows the new schema of the sequence after schema change.
+ returned: on success
+ type: str
+ sample: 'foobar'
+'''
+
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+
+class Sequence(object):
+ """Implements behavior of CREATE, ALTER or DROP SEQUENCE PostgreSQL command.
+
+ Arguments:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+
+ Attributes:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+ changed (bool) -- something was changed after execution or not
+ executed_queries (list) -- executed queries
+ name (str) -- name of the sequence
+ owner (str) -- name of the owner of the sequence
+ schema (str) -- name of the schema (default: public)
+ data_type (str) -- data type of the sequence
+ start_value (int) -- value of the sequence start
+ minvalue (int) -- minimum value of the sequence
+ maxvalue (int) -- maximum value of the sequence
+ increment (int) -- increment value of the sequence
+ cycle (bool) -- sequence can cycle or not
+ new_name (str) -- name of the renamed sequence
+ new_schema (str) -- name of the new schema
+ exists (bool) -- sequence exists or not
+ """
+
+ def __init__(self, module, cursor):
+ self.module = module
+ self.cursor = cursor
+ self.executed_queries = []
+ self.name = self.module.params['sequence']
+ self.owner = ''
+ self.schema = self.module.params['schema']
+ self.data_type = ''
+ self.start_value = ''
+ self.minvalue = ''
+ self.maxvalue = ''
+ self.increment = ''
+ self.cycle = ''
+ self.new_name = ''
+ self.new_schema = ''
+ self.exists = False
+ # Collect info
+ self.get_info()
+
+ def get_info(self):
+ """Getter to refresh and get sequence info"""
+ query = ("SELECT "
+ "s.sequence_schema AS schemaname, "
+ "s.sequence_name AS sequencename, "
+ "pg_get_userbyid(c.relowner) AS sequenceowner, "
+ "s.data_type::regtype AS data_type, "
+ "s.start_value AS start_value, "
+ "s.minimum_value AS min_value, "
+ "s.maximum_value AS max_value, "
+ "s.increment AS increment_by, "
+ "s.cycle_option AS cycle "
+ "FROM information_schema.sequences s "
+ "JOIN pg_class c ON c.relname = s.sequence_name "
+ "LEFT JOIN pg_namespace n ON n.oid = c.relnamespace "
+ "WHERE NOT pg_is_other_temp_schema(n.oid) "
+ "AND c.relkind = 'S'::\"char\" "
+ "AND sequence_name = %(name)s "
+ "AND sequence_schema = %(schema)s")
+
+ res = exec_sql(self, query,
+ query_params={'name': self.name, 'schema': self.schema},
+ add_to_executed=False)
+
+ if not res:
+ self.exists = False
+ return False
+
+ if res:
+ self.exists = True
+ self.schema = res[0]['schemaname']
+ self.name = res[0]['sequencename']
+ self.owner = res[0]['sequenceowner']
+ self.data_type = res[0]['data_type']
+ self.start_value = res[0]['start_value']
+ self.minvalue = res[0]['min_value']
+ self.maxvalue = res[0]['max_value']
+ self.increment = res[0]['increment_by']
+ self.cycle = res[0]['cycle']
+
+ def create(self):
+ """Implements CREATE SEQUENCE command behavior."""
+ query = ['CREATE SEQUENCE']
+ query.append(self.__add_schema())
+
+ if self.module.params.get('data_type'):
+ query.append('AS %s' % self.module.params['data_type'])
+
+ if self.module.params.get('increment'):
+ query.append('INCREMENT BY %s' % self.module.params['increment'])
+
+ if self.module.params.get('minvalue'):
+ query.append('MINVALUE %s' % self.module.params['minvalue'])
+
+ if self.module.params.get('maxvalue'):
+ query.append('MAXVALUE %s' % self.module.params['maxvalue'])
+
+ if self.module.params.get('start'):
+ query.append('START WITH %s' % self.module.params['start'])
+
+ if self.module.params.get('cache'):
+ query.append('CACHE %s' % self.module.params['cache'])
+
+ if self.module.params.get('cycle'):
+ query.append('CYCLE')
+
+ return exec_sql(self, ' '.join(query), return_bool=True)
+
+ def drop(self):
+ """Implements DROP SEQUENCE command behavior."""
+ query = ['DROP SEQUENCE']
+ query.append(self.__add_schema())
+
+ if self.module.params.get('cascade'):
+ query.append('CASCADE')
+
+ return exec_sql(self, ' '.join(query), return_bool=True)
+
+ def rename(self):
+ """Implements ALTER SEQUENCE RENAME TO command behavior."""
+ query = ['ALTER SEQUENCE']
+ query.append(self.__add_schema())
+ query.append('RENAME TO "%s"' % self.module.params['rename_to'])
+
+ return exec_sql(self, ' '.join(query), return_bool=True)
+
+ def set_owner(self):
+ """Implements ALTER SEQUENCE OWNER TO command behavior."""
+ query = ['ALTER SEQUENCE']
+ query.append(self.__add_schema())
+ query.append('OWNER TO "%s"' % self.module.params['owner'])
+
+ return exec_sql(self, ' '.join(query), return_bool=True)
+
+ def set_schema(self):
+ """Implements ALTER SEQUENCE SET SCHEMA command behavior."""
+ query = ['ALTER SEQUENCE']
+ query.append(self.__add_schema())
+ query.append('SET SCHEMA "%s"' % self.module.params['newschema'])
+
+ return exec_sql(self, ' '.join(query), return_bool=True)
+
+ def __add_schema(self):
+ return '"%s"."%s"' % (self.schema, self.name)
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ sequence=dict(type='str', required=True, aliases=['name']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ data_type=dict(type='str', choices=['bigint', 'integer', 'smallint']),
+ increment=dict(type='int'),
+ minvalue=dict(type='int', aliases=['min']),
+ maxvalue=dict(type='int', aliases=['max']),
+ start=dict(type='int'),
+ cache=dict(type='int'),
+ cycle=dict(type='bool', default=False),
+ schema=dict(type='str', default='public'),
+ cascade=dict(type='bool', default=False),
+ rename_to=dict(type='str'),
+ owner=dict(type='str'),
+ newschema=dict(type='str'),
+ db=dict(type='str', default='', aliases=['login_db', 'database']),
+ session_role=dict(type='str'),
+ trust_input=dict(type="bool", default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['rename_to', 'data_type'],
+ ['rename_to', 'increment'],
+ ['rename_to', 'minvalue'],
+ ['rename_to', 'maxvalue'],
+ ['rename_to', 'start'],
+ ['rename_to', 'cache'],
+ ['rename_to', 'cycle'],
+ ['rename_to', 'cascade'],
+ ['rename_to', 'owner'],
+ ['rename_to', 'newschema'],
+ ['cascade', 'data_type'],
+ ['cascade', 'increment'],
+ ['cascade', 'minvalue'],
+ ['cascade', 'maxvalue'],
+ ['cascade', 'start'],
+ ['cascade', 'cache'],
+ ['cascade', 'cycle'],
+ ['cascade', 'owner'],
+ ['cascade', 'newschema'],
+ ]
+ )
+
+ if not module.params["trust_input"]:
+ check_input(
+ module,
+ module.params['sequence'],
+ module.params['schema'],
+ module.params['rename_to'],
+ module.params['owner'],
+ module.params['newschema'],
+ module.params['session_role'],
+ )
+
+ # Note: we don't need to check mutually exclusive params here, because they are
+ # checked automatically by AnsibleModule (mutually_exclusive=[] list above).
+
+ # Change autocommit to False if check_mode:
+ autocommit = not module.check_mode
+ # Connect to DB and make cursor object:
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=autocommit)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ ##############
+ # Create the object and do main job:
+ data = Sequence(module, cursor)
+
+ # Set defaults:
+ changed = False
+
+ # Create new sequence
+ if not data.exists and module.params['state'] == 'present':
+ if module.params.get('rename_to'):
+ module.fail_json(msg="Sequence '%s' does not exist, nothing to rename" % module.params['sequence'])
+ if module.params.get('newschema'):
+ module.fail_json(msg="Sequence '%s' does not exist, change of schema not possible" % module.params['sequence'])
+
+ changed = data.create()
+
+ # Drop non-existing sequence
+ elif not data.exists and module.params['state'] == 'absent':
+ # Nothing to do
+ changed = False
+
+ # Drop existing sequence
+ elif data.exists and module.params['state'] == 'absent':
+ changed = data.drop()
+
+ # Rename sequence
+ if data.exists and module.params.get('rename_to'):
+ if data.name != module.params['rename_to']:
+ changed = data.rename()
+ if changed:
+ data.new_name = module.params['rename_to']
+
+ # Refresh information
+ if module.params['state'] == 'present':
+ data.get_info()
+
+ # Change owner, schema and settings
+ if module.params['state'] == 'present' and data.exists:
+ # change owner
+ if module.params.get('owner'):
+ if data.owner != module.params['owner']:
+ changed = data.set_owner()
+
+ # Set schema
+ if module.params.get('newschema'):
+ if data.schema != module.params['newschema']:
+ changed = data.set_schema()
+ if changed:
+ data.new_schema = module.params['newschema']
+
+ # Rollback if it's possible and check_mode:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ cursor.close()
+ db_connection.close()
+
+ # Make return values:
+ kw = dict(
+ changed=changed,
+ state='present',
+ sequence=data.name,
+ queries=data.executed_queries,
+ schema=data.schema,
+ data_type=data.data_type,
+ increment=data.increment,
+ minvalue=data.minvalue,
+ maxvalue=data.maxvalue,
+ start=data.start_value,
+ cycle=data.cycle,
+ owner=data.owner,
+ )
+
+ if module.params['state'] == 'present':
+ if data.new_name:
+ kw['newname'] = data.new_name
+ if data.new_schema:
+ kw['newschema'] = data.new_schema
+
+ elif module.params['state'] == 'absent':
+ kw['state'] = 'absent'
+
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_set.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_set.py
new file mode 100644
index 00000000..737bded5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_set.py
@@ -0,0 +1,447 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_set
+short_description: Change a PostgreSQL server configuration parameter
+description:
+ - Allows to change a PostgreSQL server configuration parameter.
+ - The module uses ALTER SYSTEM command and applies changes by reload server configuration.
+ - ALTER SYSTEM is used for changing server configuration parameters across the entire database cluster.
+ - It can be more convenient and safe than the traditional method of manually editing the postgresql.conf file.
+ - ALTER SYSTEM writes the given parameter setting to the $PGDATA/postgresql.auto.conf file,
+ which is read in addition to postgresql.conf.
+ - The module allows to reset parameter to boot_val (cluster initial value) by I(reset=yes) or remove parameter
+ string from postgresql.auto.conf and reload I(value=default) (for settings with postmaster context restart is required).
+ - After change you can see in the ansible output the previous and
+ the new parameter value and other information using returned values and M(ansible.builtin.debug) module.
+options:
+ name:
+ description:
+ - Name of PostgreSQL server parameter.
+ type: str
+ required: true
+ value:
+ description:
+ - Parameter value to set.
+ - To remove parameter string from postgresql.auto.conf and
+ reload the server configuration you must pass I(value=default).
+ With I(value=default) the playbook always returns changed is true.
+ type: str
+ reset:
+ description:
+ - Restore parameter to initial state (boot_val). Mutually exclusive with I(value).
+ type: bool
+ default: false
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ db:
+ description:
+ - Name of database to connect.
+ type: str
+ aliases:
+ - login_db
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+notes:
+- Supported version of PostgreSQL is 9.4 and later.
+- Pay attention, change setting with 'postmaster' context can return changed is true
+ when actually nothing changes because the same value may be presented in
+ several different form, for example, 1024MB, 1GB, etc. However in pg_settings
+ system view it can be defined like 131072 number of 8kB pages.
+ The final check of the parameter value cannot compare it because the server was
+ not restarted and the value in pg_settings is not updated yet.
+- For some parameters restart of PostgreSQL server is required.
+ See official documentation U(https://www.postgresql.org/docs/current/view-pg-settings.html).
+seealso:
+- module: community.general.postgresql_info
+- name: PostgreSQL server configuration
+ description: General information about PostgreSQL server configuration.
+ link: https://www.postgresql.org/docs/current/runtime-config.html
+- name: PostgreSQL view pg_settings reference
+ description: Complete reference of the pg_settings view documentation.
+ link: https://www.postgresql.org/docs/current/view-pg-settings.html
+- name: PostgreSQL ALTER SYSTEM command reference
+ description: Complete reference of the ALTER SYSTEM command documentation.
+ link: https://www.postgresql.org/docs/current/sql-altersystem.html
+author:
+- Andrew Klychkov (@Andersson007)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Restore wal_keep_segments parameter to initial state
+ community.general.postgresql_set:
+ name: wal_keep_segments
+ reset: yes
+
+# Set work_mem parameter to 32MB and show what's been changed and restart is required or not
+# (output example: "msg": "work_mem 4MB >> 64MB restart_req: False")
+- name: Set work mem parameter
+ community.general.postgresql_set:
+ name: work_mem
+ value: 32mb
+ register: set
+
+- ansible.builtin.debug:
+ msg: "{{ set.name }} {{ set.prev_val_pretty }} >> {{ set.value_pretty }} restart_req: {{ set.restart_required }}"
+ when: set.changed
+# Ensure that the restart of PostgreSQL server must be required for some parameters.
+# In this situation you see the same parameter in prev_val_pretty and value_pretty, but 'changed=True'
+# (If you passed the value that was different from the current server setting).
+
+- name: Set log_min_duration_statement parameter to 1 second
+ community.general.postgresql_set:
+ name: log_min_duration_statement
+ value: 1s
+
+- name: Set wal_log_hints parameter to default value (remove parameter from postgresql.auto.conf)
+ community.general.postgresql_set:
+ name: wal_log_hints
+ value: default
+'''
+
+RETURN = r'''
+name:
+ description: Name of PostgreSQL server parameter.
+ returned: always
+ type: str
+ sample: 'shared_buffers'
+restart_required:
+ description: Information about parameter current state.
+ returned: always
+ type: bool
+ sample: true
+prev_val_pretty:
+ description: Information about previous state of the parameter.
+ returned: always
+ type: str
+ sample: '4MB'
+value_pretty:
+ description: Information about current state of the parameter.
+ returned: always
+ type: str
+ sample: '64MB'
+value:
+ description:
+ - Dictionary that contains the current parameter value (at the time of playbook finish).
+ - Pay attention that for real change some parameters restart of PostgreSQL server is required.
+ - Returns the current value in the check mode.
+ returned: always
+ type: dict
+ sample: { "value": 67108864, "unit": "b" }
+context:
+ description:
+ - PostgreSQL setting context.
+ returned: always
+ type: str
+ sample: user
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except Exception:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from copy import deepcopy
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils._text import to_native
+
+PG_REQ_VER = 90400
+
+# To allow to set value like 1mb instead of 1MB, etc:
+POSSIBLE_SIZE_UNITS = ("mb", "gb", "tb")
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+
+def param_get(cursor, module, name):
+ query = ("SELECT name, setting, unit, context, boot_val "
+ "FROM pg_settings WHERE name = %(name)s")
+ try:
+ cursor.execute(query, {'name': name})
+ info = cursor.fetchall()
+ cursor.execute("SHOW %s" % name)
+ val = cursor.fetchone()
+
+ except Exception as e:
+ module.fail_json(msg="Unable to get %s value due to : %s" % (name, to_native(e)))
+
+ raw_val = info[0][1]
+ unit = info[0][2]
+ context = info[0][3]
+ boot_val = info[0][4]
+
+ if val[0] == 'True':
+ val[0] = 'on'
+ elif val[0] == 'False':
+ val[0] = 'off'
+
+ if unit == 'kB':
+ if int(raw_val) > 0:
+ raw_val = int(raw_val) * 1024
+ if int(boot_val) > 0:
+ boot_val = int(boot_val) * 1024
+
+ unit = 'b'
+
+ elif unit == 'MB':
+ if int(raw_val) > 0:
+ raw_val = int(raw_val) * 1024 * 1024
+ if int(boot_val) > 0:
+ boot_val = int(boot_val) * 1024 * 1024
+
+ unit = 'b'
+
+ return (val[0], raw_val, unit, boot_val, context)
+
+
+def pretty_to_bytes(pretty_val):
+ # The function returns a value in bytes
+ # if the value contains 'B', 'kB', 'MB', 'GB', 'TB'.
+ # Otherwise it returns the passed argument.
+
+ val_in_bytes = None
+
+ if 'kB' in pretty_val:
+ num_part = int(''.join(d for d in pretty_val if d.isdigit()))
+ val_in_bytes = num_part * 1024
+
+ elif 'MB' in pretty_val.upper():
+ num_part = int(''.join(d for d in pretty_val if d.isdigit()))
+ val_in_bytes = num_part * 1024 * 1024
+
+ elif 'GB' in pretty_val.upper():
+ num_part = int(''.join(d for d in pretty_val if d.isdigit()))
+ val_in_bytes = num_part * 1024 * 1024 * 1024
+
+ elif 'TB' in pretty_val.upper():
+ num_part = int(''.join(d for d in pretty_val if d.isdigit()))
+ val_in_bytes = num_part * 1024 * 1024 * 1024 * 1024
+
+ elif 'B' in pretty_val.upper():
+ num_part = int(''.join(d for d in pretty_val if d.isdigit()))
+ val_in_bytes = num_part
+
+ else:
+ return pretty_val
+
+ return val_in_bytes
+
+
+def param_set(cursor, module, name, value, context):
+ try:
+ if str(value).lower() == 'default':
+ query = "ALTER SYSTEM SET %s = DEFAULT" % name
+ else:
+ query = "ALTER SYSTEM SET %s = '%s'" % (name, value)
+ cursor.execute(query)
+
+ if context != 'postmaster':
+ cursor.execute("SELECT pg_reload_conf()")
+
+ except Exception as e:
+ module.fail_json(msg="Unable to get %s value due to : %s" % (name, to_native(e)))
+
+ return True
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ name=dict(type='str', required=True),
+ db=dict(type='str', aliases=['login_db']),
+ value=dict(type='str'),
+ reset=dict(type='bool', default=False),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ name = module.params['name']
+ value = module.params['value']
+ reset = module.params['reset']
+ session_role = module.params['session_role']
+ trust_input = module.params['trust_input']
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, name, value, session_role)
+
+ # Allow to pass values like 1mb instead of 1MB, etc:
+ if value:
+ for unit in POSSIBLE_SIZE_UNITS:
+ if value[:-2].isdigit() and unit in value[-2:]:
+ value = value.upper()
+
+ if value is not None and reset:
+ module.fail_json(msg="%s: value and reset params are mutually exclusive" % name)
+
+ if value is None and not reset:
+ module.fail_json(msg="%s: at least one of value or reset param must be specified" % name)
+
+ conn_params = get_conn_params(module, module.params, warn_db_default=False)
+ db_connection = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ kw = {}
+ # Check server version (needs 9.4 or later):
+ ver = db_connection.server_version
+ if ver < PG_REQ_VER:
+ module.warn("PostgreSQL is %s version but %s or later is required" % (ver, PG_REQ_VER))
+ kw = dict(
+ changed=False,
+ restart_required=False,
+ value_pretty="",
+ prev_val_pretty="",
+ value={"value": "", "unit": ""},
+ )
+ kw['name'] = name
+ db_connection.close()
+ module.exit_json(**kw)
+
+ # Set default returned values:
+ restart_required = False
+ changed = False
+ kw['name'] = name
+ kw['restart_required'] = False
+
+ # Get info about param state:
+ res = param_get(cursor, module, name)
+ current_value = res[0]
+ raw_val = res[1]
+ unit = res[2]
+ boot_val = res[3]
+ context = res[4]
+
+ if value == 'True':
+ value = 'on'
+ elif value == 'False':
+ value = 'off'
+
+ kw['prev_val_pretty'] = current_value
+ kw['value_pretty'] = deepcopy(kw['prev_val_pretty'])
+ kw['context'] = context
+
+ # Do job
+ if context == "internal":
+ module.fail_json(msg="%s: cannot be changed (internal context). See "
+ "https://www.postgresql.org/docs/current/runtime-config-preset.html" % name)
+
+ if context == "postmaster":
+ restart_required = True
+
+ # If check_mode, just compare and exit:
+ if module.check_mode:
+ if pretty_to_bytes(value) == pretty_to_bytes(current_value):
+ kw['changed'] = False
+
+ else:
+ kw['value_pretty'] = value
+ kw['changed'] = True
+
+ # Anyway returns current raw value in the check_mode:
+ kw['value'] = dict(
+ value=raw_val,
+ unit=unit,
+ )
+ kw['restart_required'] = restart_required
+ module.exit_json(**kw)
+
+ # Set param (value can be an empty string):
+ if value is not None and value != current_value:
+ changed = param_set(cursor, module, name, value, context)
+
+ kw['value_pretty'] = value
+
+ # Reset param:
+ elif reset:
+ if raw_val == boot_val:
+ # nothing to change, exit:
+ kw['value'] = dict(
+ value=raw_val,
+ unit=unit,
+ )
+ module.exit_json(**kw)
+
+ changed = param_set(cursor, module, name, boot_val, context)
+
+ cursor.close()
+ db_connection.close()
+
+ # Reconnect and recheck current value:
+ if context in ('sighup', 'superuser-backend', 'backend', 'superuser', 'user'):
+ db_connection = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ res = param_get(cursor, module, name)
+ # f_ means 'final'
+ f_value = res[0]
+ f_raw_val = res[1]
+
+ if raw_val == f_raw_val:
+ changed = False
+
+ else:
+ changed = True
+
+ kw['value_pretty'] = f_value
+ kw['value'] = dict(
+ value=f_raw_val,
+ unit=unit,
+ )
+
+ cursor.close()
+ db_connection.close()
+
+ kw['changed'] = changed
+ kw['restart_required'] = restart_required
+
+ if restart_required and changed:
+ module.warn("Restart of PostgreSQL is required for setting %s" % name)
+
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_slot.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_slot.py
new file mode 100644
index 00000000..435a6c59
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_slot.py
@@ -0,0 +1,304 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, John Scalia (@jscalia), Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: postgresql_slot
+short_description: Add or remove replication slots from a PostgreSQL database
+description:
+- Add or remove physical or logical replication slots from a PostgreSQL database.
+
+options:
+ name:
+ description:
+ - Name of the replication slot to add or remove.
+ type: str
+ required: yes
+ aliases:
+ - slot_name
+ slot_type:
+ description:
+ - Slot type.
+ type: str
+ default: physical
+ choices: [ logical, physical ]
+ state:
+ description:
+ - The slot state.
+ - I(state=present) implies the slot must be present in the system.
+ - I(state=absent) implies the I(groups) must be revoked from I(target_roles).
+ type: str
+ default: present
+ choices: [ absent, present ]
+ immediately_reserve:
+ description:
+ - Optional parameter that when C(yes) specifies that the LSN for this replication slot be reserved
+ immediately, otherwise the default, C(no), specifies that the LSN is reserved on the first connection
+ from a streaming replication client.
+ - Is available from PostgreSQL version 9.6.
+ - Uses only with I(slot_type=physical).
+ - Mutually exclusive with I(slot_type=logical).
+ type: bool
+ default: no
+ output_plugin:
+ description:
+ - All logical slots must indicate which output plugin decoder they're using.
+ - This parameter does not apply to physical slots.
+ - It will be ignored with I(slot_type=physical).
+ type: str
+ default: "test_decoding"
+ db:
+ description:
+ - Name of database to connect to.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ trust_input:
+ description:
+ - If C(no), check the value of I(session_role) is potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via I(session_role) are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+
+notes:
+- Physical replication slots were introduced to PostgreSQL with version 9.4,
+ while logical replication slots were added beginning with version 10.0.
+
+seealso:
+- name: PostgreSQL pg_replication_slots view reference
+ description: Complete reference of the PostgreSQL pg_replication_slots view.
+ link: https://www.postgresql.org/docs/current/view-pg-replication-slots.html
+- name: PostgreSQL streaming replication protocol reference
+ description: Complete reference of the PostgreSQL streaming replication protocol documentation.
+ link: https://www.postgresql.org/docs/current/protocol-replication.html
+- name: PostgreSQL logical replication protocol reference
+ description: Complete reference of the PostgreSQL logical replication protocol documentation.
+ link: https://www.postgresql.org/docs/current/protocol-logical-replication.html
+
+author:
+- John Scalia (@jscalia)
+- Andrew Klychkov (@Andersson007)
+- Thomas O'Donnell (@andytom)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Create physical_one physical slot if doesn't exist
+ become_user: postgres
+ community.general.postgresql_slot:
+ slot_name: physical_one
+ db: ansible
+
+- name: Remove physical_one slot if exists
+ become_user: postgres
+ community.general.postgresql_slot:
+ slot_name: physical_one
+ db: ansible
+ state: absent
+
+- name: Create logical_one logical slot to the database acme if doesn't exist
+ community.general.postgresql_slot:
+ name: logical_slot_one
+ slot_type: logical
+ state: present
+ output_plugin: custom_decoder_one
+ db: "acme"
+
+- name: Remove logical_one slot if exists from the cluster running on another host and non-standard port
+ community.general.postgresql_slot:
+ name: logical_one
+ login_host: mydatabase.example.org
+ port: 5433
+ login_user: ourSuperuser
+ login_password: thePassword
+ state: absent
+'''
+
+RETURN = r'''
+name:
+ description: Name of the slot
+ returned: always
+ type: str
+ sample: "physical_one"
+queries:
+ description: List of executed queries.
+ returned: always
+ type: str
+ sample: [ "SELECT pg_create_physical_replication_slot('physical_one', False, False)" ]
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+class PgSlot(object):
+ def __init__(self, module, cursor, name):
+ self.module = module
+ self.cursor = cursor
+ self.name = name
+ self.exists = False
+ self.kind = ''
+ self.__slot_exists()
+ self.changed = False
+ self.executed_queries = []
+
+ def create(self, kind='physical', immediately_reserve=False, output_plugin=False, just_check=False):
+ if self.exists:
+ if self.kind == kind:
+ return False
+ else:
+ self.module.warn("slot with name '%s' already exists "
+ "but has another type '%s'" % (self.name, self.kind))
+ return False
+
+ if just_check:
+ return None
+
+ if kind == 'physical':
+ # Check server version (needs for immedately_reserverd needs 9.6+):
+ if self.cursor.connection.server_version < 96000:
+ query = "SELECT pg_create_physical_replication_slot(%(name)s)"
+
+ else:
+ query = "SELECT pg_create_physical_replication_slot(%(name)s, %(i_reserve)s)"
+
+ self.changed = exec_sql(self, query,
+ query_params={'name': self.name, 'i_reserve': immediately_reserve},
+ return_bool=True)
+
+ elif kind == 'logical':
+ query = "SELECT pg_create_logical_replication_slot(%(name)s, %(o_plugin)s)"
+ self.changed = exec_sql(self, query,
+ query_params={'name': self.name, 'o_plugin': output_plugin}, return_bool=True)
+
+ def drop(self):
+ if not self.exists:
+ return False
+
+ query = "SELECT pg_drop_replication_slot(%(name)s)"
+ self.changed = exec_sql(self, query, query_params={'name': self.name}, return_bool=True)
+
+ def __slot_exists(self):
+ query = "SELECT slot_type FROM pg_replication_slots WHERE slot_name = %(name)s"
+ res = exec_sql(self, query, query_params={'name': self.name}, add_to_executed=False)
+ if res:
+ self.exists = True
+ self.kind = res[0][0]
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ db=dict(type="str", aliases=["login_db"]),
+ name=dict(type="str", required=True, aliases=["slot_name"]),
+ slot_type=dict(type="str", default="physical", choices=["logical", "physical"]),
+ immediately_reserve=dict(type="bool", default=False),
+ session_role=dict(type="str"),
+ output_plugin=dict(type="str", default="test_decoding"),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ trust_input=dict(type="bool", default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ name = module.params["name"]
+ slot_type = module.params["slot_type"]
+ immediately_reserve = module.params["immediately_reserve"]
+ state = module.params["state"]
+ output_plugin = module.params["output_plugin"]
+
+ if not module.params["trust_input"]:
+ check_input(module, module.params['session_role'])
+
+ if immediately_reserve and slot_type == 'logical':
+ module.fail_json(msg="Module parameters immediately_reserve and slot_type=logical are mutually exclusive")
+
+ # When slot_type is logical and parameter db is not passed,
+ # the default database will be used to create the slot and
+ # the user should know about this.
+ # When the slot type is physical,
+ # it doesn't matter which database will be used
+ # because physical slots are global objects.
+ if slot_type == 'logical':
+ warn_db_default = True
+ else:
+ warn_db_default = False
+
+ conn_params = get_conn_params(module, module.params, warn_db_default=warn_db_default)
+ db_connection = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ ##################################
+ # Create an object and do main job
+ pg_slot = PgSlot(module, cursor, name)
+
+ changed = False
+
+ if module.check_mode:
+ if state == "present":
+ if not pg_slot.exists:
+ changed = True
+
+ pg_slot.create(slot_type, immediately_reserve, output_plugin, just_check=True)
+
+ elif state == "absent":
+ if pg_slot.exists:
+ changed = True
+ else:
+ if state == "absent":
+ pg_slot.drop()
+
+ elif state == "present":
+ pg_slot.create(slot_type, immediately_reserve, output_plugin)
+
+ changed = pg_slot.changed
+
+ db_connection.close()
+ module.exit_json(changed=changed, name=name, queries=pg_slot.executed_queries)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_subscription.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_subscription.py
new file mode 100644
index 00000000..0e2b3612
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_subscription.py
@@ -0,0 +1,717 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: postgresql_subscription
+short_description: Add, update, or remove PostgreSQL subscription
+description:
+- Add, update, or remove PostgreSQL subscription.
+version_added: '0.2.0'
+
+options:
+ name:
+ description:
+ - Name of the subscription to add, update, or remove.
+ type: str
+ required: yes
+ db:
+ description:
+ - Name of the database to connect to and where
+ the subscription state will be changed.
+ aliases: [ login_db ]
+ type: str
+ required: yes
+ state:
+ description:
+ - The subscription state.
+ - C(present) implies that if I(name) subscription doesn't exist, it will be created.
+ - C(absent) implies that if I(name) subscription exists, it will be removed.
+ - C(refresh) implies that if I(name) subscription exists, it will be refreshed.
+ Fetch missing table information from publisher. Always returns ``changed`` is ``True``.
+ This will start replication of tables that were added to the subscribed-to publications
+ since the last invocation of REFRESH PUBLICATION or since CREATE SUBSCRIPTION.
+ The existing data in the publications that are being subscribed to
+ should be copied once the replication starts.
+ - For more information about C(refresh) see U(https://www.postgresql.org/docs/current/sql-altersubscription.html).
+ type: str
+ choices: [ absent, present, refresh ]
+ default: present
+ owner:
+ description:
+ - Subscription owner.
+ - If I(owner) is not defined, the owner will be set as I(login_user) or I(session_role).
+ - Ignored when I(state) is not C(present).
+ type: str
+ publications:
+ description:
+ - The publication names on the publisher to use for the subscription.
+ - Ignored when I(state) is not C(present).
+ type: list
+ elements: str
+ connparams:
+ description:
+ - The connection dict param-value to connect to the publisher.
+ - For more information see U(https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING).
+ - Ignored when I(state) is not C(present).
+ type: dict
+ cascade:
+ description:
+ - Drop subscription dependencies. Has effect with I(state=absent) only.
+ - Ignored when I(state) is not C(absent).
+ type: bool
+ default: false
+ subsparams:
+ description:
+ - Dictionary of optional parameters for a subscription, e.g. copy_data, enabled, create_slot, etc.
+ - For update the subscription allowed keys are C(enabled), C(slot_name), C(synchronous_commit), C(publication_name).
+ - See available parameters to create a new subscription
+ on U(https://www.postgresql.org/docs/current/sql-createsubscription.html).
+ - Ignored when I(state) is not C(present).
+ type: dict
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ version_added: '0.2.0'
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(name), I(publications), I(owner),
+ I(session_role), I(connparams), I(subsparams) are potentially dangerous.
+ - It makes sense to use C(yes) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+
+notes:
+- PostgreSQL version must be 10 or greater.
+
+seealso:
+- module: community.general.postgresql_publication
+- module: community.general.postgresql_info
+- name: CREATE SUBSCRIPTION reference
+ description: Complete reference of the CREATE SUBSCRIPTION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createsubscription.html
+- name: ALTER SUBSCRIPTION reference
+ description: Complete reference of the ALTER SUBSCRIPTION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-altersubscription.html
+- name: DROP SUBSCRIPTION reference
+ description: Complete reference of the DROP SUBSCRIPTION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-dropsubscription.html
+
+author:
+- Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: >
+ Create acme subscription in mydb database using acme_publication and
+ the following connection parameters to connect to the publisher.
+ Set the subscription owner as alice.
+ community.general.postgresql_subscription:
+ db: mydb
+ name: acme
+ state: present
+ publications: acme_publication
+ owner: alice
+ connparams:
+ host: 127.0.0.1
+ port: 5432
+ user: repl
+ password: replpass
+ dbname: mydb
+
+- name: Assuming that acme subscription exists, try to change conn parameters
+ community.general.postgresql_subscription:
+ db: mydb
+ name: acme
+ connparams:
+ host: 127.0.0.1
+ port: 5432
+ user: repl
+ password: replpass
+ connect_timeout: 100
+
+- name: Refresh acme publication
+ community.general.postgresql_subscription:
+ db: mydb
+ name: acme
+ state: refresh
+
+- name: Drop acme subscription from mydb with dependencies (cascade=yes)
+ community.general.postgresql_subscription:
+ db: mydb
+ name: acme
+ state: absent
+ cascade: yes
+
+- name: Assuming that acme subscription exists and enabled, disable the subscription
+ community.general.postgresql_subscription:
+ db: mydb
+ name: acme
+ state: present
+ subsparams:
+ enabled: no
+'''
+
+RETURN = r'''
+name:
+ description:
+ - Name of the subscription.
+ returned: always
+ type: str
+ sample: acme
+exists:
+ description:
+ - Flag indicates the subscription exists or not at the end of runtime.
+ returned: always
+ type: bool
+ sample: true
+queries:
+ description: List of executed queries.
+ returned: always
+ type: str
+ sample: [ 'DROP SUBSCRIPTION "mysubscription"' ]
+initial_state:
+ description: Subscription configuration at the beginning of runtime.
+ returned: always
+ type: dict
+ sample: {"conninfo": {}, "enabled": true, "owner": "postgres", "slotname": "test", "synccommit": true}
+final_state:
+ description: Subscription configuration at the end of runtime.
+ returned: always
+ type: dict
+ sample: {"conninfo": {}, "enabled": true, "owner": "postgres", "slotname": "test", "synccommit": true}
+'''
+
+from copy import deepcopy
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import check_input
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils.six import iteritems
+
+SUPPORTED_PG_VERSION = 10000
+
+SUBSPARAMS_KEYS_FOR_UPDATE = ('enabled', 'synchronous_commit', 'slot_name')
+
+
+################################
+# Module functions and classes #
+################################
+
+def convert_conn_params(conn_dict):
+ """Converts the passed connection dictionary to string.
+
+ Args:
+ conn_dict (list): Dictionary which needs to be converted.
+
+ Returns:
+ Connection string.
+ """
+ conn_list = []
+ for (param, val) in iteritems(conn_dict):
+ conn_list.append('%s=%s' % (param, val))
+
+ return ' '.join(conn_list)
+
+
+def convert_subscr_params(params_dict):
+ """Converts the passed params dictionary to string.
+
+ Args:
+ params_dict (list): Dictionary which needs to be converted.
+
+ Returns:
+ Parameters string.
+ """
+ params_list = []
+ for (param, val) in iteritems(params_dict):
+ if val is False:
+ val = 'false'
+ elif val is True:
+ val = 'true'
+
+ params_list.append('%s = %s' % (param, val))
+
+ return ', '.join(params_list)
+
+
+class PgSubscription():
+ """Class to work with PostgreSQL subscription.
+
+ Args:
+ module (AnsibleModule): Object of AnsibleModule class.
+ cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
+ name (str): The name of the subscription.
+ db (str): The database name the subscription will be associated with.
+
+ Attributes:
+ module (AnsibleModule): Object of AnsibleModule class.
+ cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
+ name (str): Name of subscription.
+ executed_queries (list): List of executed queries.
+ attrs (dict): Dict with subscription attributes.
+ exists (bool): Flag indicates the subscription exists or not.
+ """
+
+ def __init__(self, module, cursor, name, db):
+ self.module = module
+ self.cursor = cursor
+ self.name = name
+ self.db = db
+ self.executed_queries = []
+ self.attrs = {
+ 'owner': None,
+ 'enabled': None,
+ 'synccommit': None,
+ 'conninfo': {},
+ 'slotname': None,
+ 'publications': [],
+ }
+ self.empty_attrs = deepcopy(self.attrs)
+ self.exists = self.check_subscr()
+
+ def get_info(self):
+ """Refresh the subscription information.
+
+ Returns:
+ ``self.attrs``.
+ """
+ self.exists = self.check_subscr()
+ return self.attrs
+
+ def check_subscr(self):
+ """Check the subscription and refresh ``self.attrs`` subscription attribute.
+
+ Returns:
+ True if the subscription with ``self.name`` exists, False otherwise.
+ """
+
+ subscr_info = self.__get_general_subscr_info()
+
+ if not subscr_info:
+ # The subscription does not exist:
+ self.attrs = deepcopy(self.empty_attrs)
+ return False
+
+ self.attrs['owner'] = subscr_info.get('rolname')
+ self.attrs['enabled'] = subscr_info.get('subenabled')
+ self.attrs['synccommit'] = subscr_info.get('subenabled')
+ self.attrs['slotname'] = subscr_info.get('subslotname')
+ self.attrs['publications'] = subscr_info.get('subpublications')
+ if subscr_info.get('subconninfo'):
+ for param in subscr_info['subconninfo'].split(' '):
+ tmp = param.split('=')
+ try:
+ self.attrs['conninfo'][tmp[0]] = int(tmp[1])
+ except ValueError:
+ self.attrs['conninfo'][tmp[0]] = tmp[1]
+
+ return True
+
+ def create(self, connparams, publications, subsparams, check_mode=True):
+ """Create the subscription.
+
+ Args:
+ connparams (str): Connection string in libpq style.
+ publications (list): Publications on the master to use.
+ subsparams (str): Parameters string in WITH () clause style.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ changed (bool): True if the subscription has been created, otherwise False.
+ """
+ query_fragments = []
+ query_fragments.append("CREATE SUBSCRIPTION %s CONNECTION '%s' "
+ "PUBLICATION %s" % (self.name, connparams, ', '.join(publications)))
+
+ if subsparams:
+ query_fragments.append("WITH (%s)" % subsparams)
+
+ changed = self.__exec_sql(' '.join(query_fragments), check_mode=check_mode)
+
+ return changed
+
+ def update(self, connparams, publications, subsparams, check_mode=True):
+ """Update the subscription.
+
+ Args:
+ connparams (str): Connection string in libpq style.
+ publications (list): Publications on the master to use.
+ subsparams (dict): Dictionary of optional parameters.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ changed (bool): True if subscription has been updated, otherwise False.
+ """
+ changed = False
+
+ if connparams:
+ if connparams != self.attrs['conninfo']:
+ changed = self.__set_conn_params(convert_conn_params(connparams),
+ check_mode=check_mode)
+
+ if publications:
+ if sorted(self.attrs['publications']) != sorted(publications):
+ changed = self.__set_publications(publications, check_mode=check_mode)
+
+ if subsparams:
+ params_to_update = []
+
+ for (param, value) in iteritems(subsparams):
+ if param == 'enabled':
+ if self.attrs['enabled'] and value is False:
+ changed = self.enable(enabled=False, check_mode=check_mode)
+ elif not self.attrs['enabled'] and value is True:
+ changed = self.enable(enabled=True, check_mode=check_mode)
+
+ elif param == 'synchronous_commit':
+ if self.attrs['synccommit'] is True and value is False:
+ params_to_update.append("%s = false" % param)
+ elif self.attrs['synccommit'] is False and value is True:
+ params_to_update.append("%s = true" % param)
+
+ elif param == 'slot_name':
+ if self.attrs['slotname'] and self.attrs['slotname'] != value:
+ params_to_update.append("%s = %s" % (param, value))
+
+ else:
+ self.module.warn("Parameter '%s' is not in params supported "
+ "for update '%s', ignored..." % (param, SUBSPARAMS_KEYS_FOR_UPDATE))
+
+ if params_to_update:
+ changed = self.__set_params(params_to_update, check_mode=check_mode)
+
+ return changed
+
+ def drop(self, cascade=False, check_mode=True):
+ """Drop the subscription.
+
+ Kwargs:
+ cascade (bool): Flag indicates that the subscription needs to be deleted
+ with its dependencies.
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ changed (bool): True if the subscription has been removed, otherwise False.
+ """
+ if self.exists:
+ query_fragments = ["DROP SUBSCRIPTION %s" % self.name]
+ if cascade:
+ query_fragments.append("CASCADE")
+
+ return self.__exec_sql(' '.join(query_fragments), check_mode=check_mode)
+
+ def set_owner(self, role, check_mode=True):
+ """Set a subscription owner.
+
+ Args:
+ role (str): Role (user) name that needs to be set as a subscription owner.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = 'ALTER SUBSCRIPTION %s OWNER TO "%s"' % (self.name, role)
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def refresh(self, check_mode=True):
+ """Refresh publication.
+
+ Fetches missing table info from publisher.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = 'ALTER SUBSCRIPTION %s REFRESH PUBLICATION' % self.name
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __set_params(self, params_to_update, check_mode=True):
+ """Update optional subscription parameters.
+
+ Args:
+ params_to_update (list): Parameters with values to update.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = 'ALTER SUBSCRIPTION %s SET (%s)' % (self.name, ', '.join(params_to_update))
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __set_conn_params(self, connparams, check_mode=True):
+ """Update connection parameters.
+
+ Args:
+ connparams (str): Connection string in libpq style.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = "ALTER SUBSCRIPTION %s CONNECTION '%s'" % (self.name, connparams)
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __set_publications(self, publications, check_mode=True):
+ """Update publications.
+
+ Args:
+ publications (list): Publications on the master to use.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = 'ALTER SUBSCRIPTION %s SET PUBLICATION %s' % (self.name, ', '.join(publications))
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def enable(self, enabled=True, check_mode=True):
+ """Enable or disable the subscription.
+
+ Kwargs:
+ enable (bool): Flag indicates that the subscription needs
+ to be enabled or disabled.
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ if enabled:
+ query = 'ALTER SUBSCRIPTION %s ENABLE' % self.name
+ else:
+ query = 'ALTER SUBSCRIPTION %s DISABLE' % self.name
+
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __get_general_subscr_info(self):
+ """Get and return general subscription information.
+
+ Returns:
+ Dict with subscription information if successful, False otherwise.
+ """
+ query = ("SELECT d.datname, r.rolname, s.subenabled, "
+ "s.subconninfo, s.subslotname, s.subsynccommit, "
+ "s.subpublications FROM pg_catalog.pg_subscription s "
+ "JOIN pg_catalog.pg_database d "
+ "ON s.subdbid = d.oid "
+ "JOIN pg_catalog.pg_roles AS r "
+ "ON s.subowner = r.oid "
+ "WHERE s.subname = %(name)s AND d.datname = %(db)s")
+
+ result = exec_sql(self, query, query_params={'name': self.name, 'db': self.db}, add_to_executed=False)
+ if result:
+ return result[0]
+ else:
+ return False
+
+ def __exec_sql(self, query, check_mode=False):
+ """Execute SQL query.
+
+ Note: If we need just to get information from the database,
+ we use ``exec_sql`` function directly.
+
+ Args:
+ query (str): Query that needs to be executed.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just add ``query`` to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ if check_mode:
+ self.executed_queries.append(query)
+ return True
+ else:
+ return exec_sql(self, query, return_bool=True)
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ name=dict(type='str', required=True),
+ db=dict(type='str', required=True, aliases=['login_db']),
+ state=dict(type='str', default='present', choices=['absent', 'present', 'refresh']),
+ publications=dict(type='list', elements='str'),
+ connparams=dict(type='dict'),
+ cascade=dict(type='bool', default=False),
+ owner=dict(type='str'),
+ subsparams=dict(type='dict'),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ # Parameters handling:
+ db = module.params['db']
+ name = module.params['name']
+ state = module.params['state']
+ publications = module.params['publications']
+ cascade = module.params['cascade']
+ owner = module.params['owner']
+ subsparams = module.params['subsparams']
+ connparams = module.params['connparams']
+ session_role = module.params['session_role']
+ trust_input = module.params['trust_input']
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ if not subsparams:
+ subsparams_str = None
+ else:
+ subsparams_str = convert_subscr_params(subsparams)
+
+ if not connparams:
+ connparams_str = None
+ else:
+ connparams_str = convert_conn_params(connparams)
+
+ check_input(module, name, publications, owner, session_role,
+ connparams_str, subsparams_str)
+
+ if state == 'present' and cascade:
+ module.warn('parameter "cascade" is ignored when state is not absent')
+
+ if state != 'present':
+ if owner:
+ module.warn("parameter 'owner' is ignored when state is not 'present'")
+ if publications:
+ module.warn("parameter 'publications' is ignored when state is not 'present'")
+ if connparams:
+ module.warn("parameter 'connparams' is ignored when state is not 'present'")
+ if subsparams:
+ module.warn("parameter 'subsparams' is ignored when state is not 'present'")
+
+ # Connect to DB and make cursor object:
+ pg_conn_params = get_conn_params(module, module.params)
+ # We check subscription state without DML queries execution, so set autocommit:
+ db_connection = connect_to_db(module, pg_conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ # Check version:
+ if cursor.connection.server_version < SUPPORTED_PG_VERSION:
+ module.fail_json(msg="PostgreSQL server version should be 10.0 or greater")
+
+ # Set defaults:
+ changed = False
+ initial_state = {}
+ final_state = {}
+
+ ###################################
+ # Create object and do rock'n'roll:
+ subscription = PgSubscription(module, cursor, name, db)
+
+ if subscription.exists:
+ initial_state = deepcopy(subscription.attrs)
+ final_state = deepcopy(initial_state)
+
+ if state == 'present':
+ if not subscription.exists:
+ if subsparams:
+ subsparams = convert_subscr_params(subsparams)
+
+ if connparams:
+ connparams = convert_conn_params(connparams)
+
+ changed = subscription.create(connparams,
+ publications,
+ subsparams,
+ check_mode=module.check_mode)
+
+ else:
+ changed = subscription.update(connparams,
+ publications,
+ subsparams,
+ check_mode=module.check_mode)
+
+ if owner and subscription.attrs['owner'] != owner:
+ changed = subscription.set_owner(owner, check_mode=module.check_mode) or changed
+
+ elif state == 'absent':
+ changed = subscription.drop(cascade, check_mode=module.check_mode)
+
+ elif state == 'refresh':
+ if not subscription.exists:
+ module.fail_json(msg="Refresh failed: subscription '%s' does not exist" % name)
+
+ # Always returns True:
+ changed = subscription.refresh(check_mode=module.check_mode)
+
+ # Get final subscription info:
+ final_state = subscription.get_info()
+
+ # Connection is not needed any more:
+ cursor.close()
+ db_connection.close()
+
+ # Return ret values and exit:
+ module.exit_json(changed=changed,
+ name=name,
+ exists=subscription.exists,
+ queries=subscription.executed_queries,
+ initial_state=initial_state,
+ final_state=final_state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_table.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_table.py
new file mode 100644
index 00000000..5260853d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_table.py
@@ -0,0 +1,611 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_table
+short_description: Create, drop, or modify a PostgreSQL table
+description:
+- Allows to create, drop, rename, truncate a table, or change some table attributes.
+options:
+ table:
+ description:
+ - Table name.
+ required: true
+ aliases:
+ - name
+ type: str
+ state:
+ description:
+ - The table state. I(state=absent) is mutually exclusive with I(tablespace), I(owner), I(unlogged),
+ I(like), I(including), I(columns), I(truncate), I(storage_params) and, I(rename).
+ type: str
+ default: present
+ choices: [ absent, present ]
+ tablespace:
+ description:
+ - Set a tablespace for the table.
+ required: false
+ type: str
+ owner:
+ description:
+ - Set a table owner.
+ type: str
+ unlogged:
+ description:
+ - Create an unlogged table.
+ type: bool
+ default: no
+ like:
+ description:
+ - Create a table like another table (with similar DDL).
+ Mutually exclusive with I(columns), I(rename), and I(truncate).
+ type: str
+ including:
+ description:
+ - Keywords that are used with like parameter, may be DEFAULTS, CONSTRAINTS, INDEXES, STORAGE, COMMENTS or ALL.
+ Needs I(like) specified. Mutually exclusive with I(columns), I(rename), and I(truncate).
+ type: str
+ columns:
+ description:
+ - Columns that are needed.
+ type: list
+ elements: str
+ rename:
+ description:
+ - New table name. Mutually exclusive with I(tablespace), I(owner),
+ I(unlogged), I(like), I(including), I(columns), I(truncate), and I(storage_params).
+ type: str
+ truncate:
+ description:
+ - Truncate a table. Mutually exclusive with I(tablespace), I(owner), I(unlogged),
+ I(like), I(including), I(columns), I(rename), and I(storage_params).
+ type: bool
+ default: no
+ storage_params:
+ description:
+ - Storage parameters like fillfactor, autovacuum_vacuum_treshold, etc.
+ Mutually exclusive with I(rename) and I(truncate).
+ type: list
+ elements: str
+ db:
+ description:
+ - Name of database to connect and where the table will be created.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ cascade:
+ description:
+ - Automatically drop objects that depend on the table (such as views).
+ Used with I(state=absent) only.
+ type: bool
+ default: no
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+notes:
+- If you do not pass db parameter, tables will be created in the database
+ named postgres.
+- PostgreSQL allows to create columnless table, so columns param is optional.
+- Unlogged tables are available from PostgreSQL server version 9.1.
+seealso:
+- module: community.general.postgresql_sequence
+- module: community.general.postgresql_idx
+- module: community.general.postgresql_info
+- module: community.general.postgresql_tablespace
+- module: community.general.postgresql_owner
+- module: community.general.postgresql_privs
+- module: community.general.postgresql_copy
+- name: CREATE TABLE reference
+ description: Complete reference of the CREATE TABLE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createtable.html
+- name: ALTER TABLE reference
+ description: Complete reference of the ALTER TABLE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-altertable.html
+- name: DROP TABLE reference
+ description: Complete reference of the DROP TABLE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-droptable.html
+- name: PostgreSQL data types
+ description: Complete reference of the PostgreSQL data types documentation.
+ link: https://www.postgresql.org/docs/current/datatype.html
+author:
+- Andrei Klychkov (@Andersson007)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Create tbl2 in the acme database with the DDL like tbl1 with testuser as an owner
+ community.general.postgresql_table:
+ db: acme
+ name: tbl2
+ like: tbl1
+ owner: testuser
+
+- name: Create tbl2 in the acme database and tablespace ssd with the DDL like tbl1 including comments and indexes
+ community.general.postgresql_table:
+ db: acme
+ table: tbl2
+ like: tbl1
+ including: comments, indexes
+ tablespace: ssd
+
+- name: Create test_table with several columns in ssd tablespace with fillfactor=10 and autovacuum_analyze_threshold=1
+ community.general.postgresql_table:
+ name: test_table
+ columns:
+ - id bigserial primary key
+ - num bigint
+ - stories text
+ tablespace: ssd
+ storage_params:
+ - fillfactor=10
+ - autovacuum_analyze_threshold=1
+
+- name: Create an unlogged table in schema acme
+ community.general.postgresql_table:
+ name: acme.useless_data
+ columns: waste_id int
+ unlogged: true
+
+- name: Rename table foo to bar
+ community.general.postgresql_table:
+ table: foo
+ rename: bar
+
+- name: Rename table foo from schema acme to bar
+ community.general.postgresql_table:
+ name: acme.foo
+ rename: bar
+
+- name: Set owner to someuser
+ community.general.postgresql_table:
+ name: foo
+ owner: someuser
+
+- name: Change tablespace of foo table to new_tablespace and set owner to new_user
+ community.general.postgresql_table:
+ name: foo
+ tablespace: new_tablespace
+ owner: new_user
+
+- name: Truncate table foo
+ community.general.postgresql_table:
+ name: foo
+ truncate: yes
+
+- name: Drop table foo from schema acme
+ community.general.postgresql_table:
+ name: acme.foo
+ state: absent
+
+- name: Drop table bar cascade
+ community.general.postgresql_table:
+ name: bar
+ state: absent
+ cascade: yes
+'''
+
+RETURN = r'''
+table:
+ description: Name of a table.
+ returned: always
+ type: str
+ sample: 'foo'
+state:
+ description: Table state.
+ returned: always
+ type: str
+ sample: 'present'
+owner:
+ description: Table owner.
+ returned: always
+ type: str
+ sample: 'postgres'
+tablespace:
+ description: Tablespace.
+ returned: always
+ type: str
+ sample: 'ssd_tablespace'
+queries:
+ description: List of executed queries.
+ returned: always
+ type: str
+ sample: [ 'CREATE TABLE "test_table" (id bigint)' ]
+storage_params:
+ description: Storage parameters.
+ returned: always
+ type: list
+ sample: [ "fillfactor=100", "autovacuum_analyze_threshold=1" ]
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+ pg_quote_identifier,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+class Table(object):
+ def __init__(self, name, module, cursor):
+ self.name = name
+ self.module = module
+ self.cursor = cursor
+ self.info = {
+ 'owner': '',
+ 'tblspace': '',
+ 'storage_params': [],
+ }
+ self.exists = False
+ self.__exists_in_db()
+ self.executed_queries = []
+
+ def get_info(self):
+ """Getter to refresh and get table info"""
+ self.__exists_in_db()
+
+ def __exists_in_db(self):
+ """Check table exists and refresh info"""
+ if "." in self.name:
+ schema = self.name.split('.')[-2]
+ tblname = self.name.split('.')[-1]
+ else:
+ schema = 'public'
+ tblname = self.name
+
+ query = ("SELECT t.tableowner, t.tablespace, c.reloptions "
+ "FROM pg_tables AS t "
+ "INNER JOIN pg_class AS c ON c.relname = t.tablename "
+ "INNER JOIN pg_namespace AS n ON c.relnamespace = n.oid "
+ "WHERE t.tablename = %(tblname)s "
+ "AND n.nspname = %(schema)s")
+ res = exec_sql(self, query, query_params={'tblname': tblname, 'schema': schema},
+ add_to_executed=False)
+ if res:
+ self.exists = True
+ self.info = dict(
+ owner=res[0][0],
+ tblspace=res[0][1] if res[0][1] else '',
+ storage_params=res[0][2] if res[0][2] else [],
+ )
+
+ return True
+ else:
+ self.exists = False
+ return False
+
+ def create(self, columns='', params='', tblspace='',
+ unlogged=False, owner=''):
+ """
+ Create table.
+ If table exists, check passed args (params, tblspace, owner) and,
+ if they're different from current, change them.
+ Arguments:
+ params - storage params (passed by "WITH (...)" in SQL),
+ comma separated.
+ tblspace - tablespace.
+ owner - table owner.
+ unlogged - create unlogged table.
+ columns - column string (comma separated).
+ """
+ name = pg_quote_identifier(self.name, 'table')
+
+ changed = False
+
+ if self.exists:
+ if tblspace == 'pg_default' and self.info['tblspace'] is None:
+ pass # Because they have the same meaning
+ elif tblspace and self.info['tblspace'] != tblspace:
+ self.set_tblspace(tblspace)
+ changed = True
+
+ if owner and self.info['owner'] != owner:
+ self.set_owner(owner)
+ changed = True
+
+ if params:
+ param_list = [p.strip(' ') for p in params.split(',')]
+
+ new_param = False
+ for p in param_list:
+ if p not in self.info['storage_params']:
+ new_param = True
+
+ if new_param:
+ self.set_stor_params(params)
+ changed = True
+
+ if changed:
+ return True
+ return False
+
+ query = "CREATE"
+ if unlogged:
+ query += " UNLOGGED TABLE %s" % name
+ else:
+ query += " TABLE %s" % name
+
+ if columns:
+ query += " (%s)" % columns
+ else:
+ query += " ()"
+
+ if params:
+ query += " WITH (%s)" % params
+
+ if tblspace:
+ query += ' TABLESPACE "%s"' % tblspace
+
+ if exec_sql(self, query, return_bool=True):
+ changed = True
+
+ if owner:
+ changed = self.set_owner(owner)
+
+ return changed
+
+ def create_like(self, src_table, including='', tblspace='',
+ unlogged=False, params='', owner=''):
+ """
+ Create table like another table (with similar DDL).
+ Arguments:
+ src_table - source table.
+ including - corresponds to optional INCLUDING expression
+ in CREATE TABLE ... LIKE statement.
+ params - storage params (passed by "WITH (...)" in SQL),
+ comma separated.
+ tblspace - tablespace.
+ owner - table owner.
+ unlogged - create unlogged table.
+ """
+ changed = False
+
+ name = pg_quote_identifier(self.name, 'table')
+
+ query = "CREATE"
+ if unlogged:
+ query += " UNLOGGED TABLE %s" % name
+ else:
+ query += " TABLE %s" % name
+
+ query += " (LIKE %s" % pg_quote_identifier(src_table, 'table')
+
+ if including:
+ including = including.split(',')
+ for i in including:
+ query += " INCLUDING %s" % i
+
+ query += ')'
+
+ if params:
+ query += " WITH (%s)" % params
+
+ if tblspace:
+ query += ' TABLESPACE "%s"' % tblspace
+
+ if exec_sql(self, query, return_bool=True):
+ changed = True
+
+ if owner:
+ changed = self.set_owner(owner)
+
+ return changed
+
+ def truncate(self):
+ query = "TRUNCATE TABLE %s" % pg_quote_identifier(self.name, 'table')
+ return exec_sql(self, query, return_bool=True)
+
+ def rename(self, newname):
+ query = "ALTER TABLE %s RENAME TO %s" % (pg_quote_identifier(self.name, 'table'),
+ pg_quote_identifier(newname, 'table'))
+ return exec_sql(self, query, return_bool=True)
+
+ def set_owner(self, username):
+ query = 'ALTER TABLE %s OWNER TO "%s"' % (pg_quote_identifier(self.name, 'table'), username)
+ return exec_sql(self, query, return_bool=True)
+
+ def drop(self, cascade=False):
+ if not self.exists:
+ return False
+
+ query = "DROP TABLE %s" % pg_quote_identifier(self.name, 'table')
+ if cascade:
+ query += " CASCADE"
+ return exec_sql(self, query, return_bool=True)
+
+ def set_tblspace(self, tblspace):
+ query = 'ALTER TABLE %s SET TABLESPACE "%s"' % (pg_quote_identifier(self.name, 'table'), tblspace)
+ return exec_sql(self, query, return_bool=True)
+
+ def set_stor_params(self, params):
+ query = "ALTER TABLE %s SET (%s)" % (pg_quote_identifier(self.name, 'table'), params)
+ return exec_sql(self, query, return_bool=True)
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ table=dict(type='str', required=True, aliases=['name']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ db=dict(type='str', default='', aliases=['login_db']),
+ tablespace=dict(type='str'),
+ owner=dict(type='str'),
+ unlogged=dict(type='bool', default=False),
+ like=dict(type='str'),
+ including=dict(type='str'),
+ rename=dict(type='str'),
+ truncate=dict(type='bool', default=False),
+ columns=dict(type='list', elements='str'),
+ storage_params=dict(type='list', elements='str'),
+ session_role=dict(type='str'),
+ cascade=dict(type='bool', default=False),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ table = module.params['table']
+ state = module.params['state']
+ tablespace = module.params['tablespace']
+ owner = module.params['owner']
+ unlogged = module.params['unlogged']
+ like = module.params['like']
+ including = module.params['including']
+ newname = module.params['rename']
+ storage_params = module.params['storage_params']
+ truncate = module.params['truncate']
+ columns = module.params['columns']
+ cascade = module.params['cascade']
+ session_role = module.params['session_role']
+ trust_input = module.params['trust_input']
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, table, tablespace, owner, like, including,
+ newname, storage_params, columns, session_role)
+
+ if state == 'present' and cascade:
+ module.warn("cascade=true is ignored when state=present")
+
+ # Check mutual exclusive parameters:
+ if state == 'absent' and (truncate or newname or columns or tablespace or like or storage_params or unlogged or owner or including):
+ module.fail_json(msg="%s: state=absent is mutually exclusive with: "
+ "truncate, rename, columns, tablespace, "
+ "including, like, storage_params, unlogged, owner" % table)
+
+ if truncate and (newname or columns or like or unlogged or storage_params or owner or tablespace or including):
+ module.fail_json(msg="%s: truncate is mutually exclusive with: "
+ "rename, columns, like, unlogged, including, "
+ "storage_params, owner, tablespace" % table)
+
+ if newname and (columns or like or unlogged or storage_params or owner or tablespace or including):
+ module.fail_json(msg="%s: rename is mutually exclusive with: "
+ "columns, like, unlogged, including, "
+ "storage_params, owner, tablespace" % table)
+
+ if like and columns:
+ module.fail_json(msg="%s: like and columns params are mutually exclusive" % table)
+ if including and not like:
+ module.fail_json(msg="%s: including param needs like param specified" % table)
+
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=False)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ if storage_params:
+ storage_params = ','.join(storage_params)
+
+ if columns:
+ columns = ','.join(columns)
+
+ ##############
+ # Do main job:
+ table_obj = Table(table, module, cursor)
+
+ # Set default returned values:
+ changed = False
+ kw = {}
+ kw['table'] = table
+ kw['state'] = ''
+ if table_obj.exists:
+ kw = dict(
+ table=table,
+ state='present',
+ owner=table_obj.info['owner'],
+ tablespace=table_obj.info['tblspace'],
+ storage_params=table_obj.info['storage_params'],
+ )
+
+ if state == 'absent':
+ changed = table_obj.drop(cascade=cascade)
+
+ elif truncate:
+ changed = table_obj.truncate()
+
+ elif newname:
+ changed = table_obj.rename(newname)
+ q = table_obj.executed_queries
+ table_obj = Table(newname, module, cursor)
+ table_obj.executed_queries = q
+
+ elif state == 'present' and not like:
+ changed = table_obj.create(columns, storage_params,
+ tablespace, unlogged, owner)
+
+ elif state == 'present' and like:
+ changed = table_obj.create_like(like, including, tablespace,
+ unlogged, storage_params)
+
+ if changed:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ # Refresh table info for RETURN.
+ # Note, if table has been renamed, it gets info by newname:
+ table_obj.get_info()
+ db_connection.commit()
+ if table_obj.exists:
+ kw = dict(
+ table=table,
+ state='present',
+ owner=table_obj.info['owner'],
+ tablespace=table_obj.info['tblspace'],
+ storage_params=table_obj.info['storage_params'],
+ )
+ else:
+ # We just change the table state here
+ # to keep other information about the dropped table:
+ kw['state'] = 'absent'
+
+ kw['queries'] = table_obj.executed_queries
+ kw['changed'] = changed
+ db_connection.close()
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_tablespace.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_tablespace.py
new file mode 100644
index 00000000..2062e6a1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_tablespace.py
@@ -0,0 +1,541 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Flavien Chantelot (@Dorn-)
+# Copyright: (c) 2018, Antoine Levy-Lambert (@antoinell)
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_tablespace
+short_description: Add or remove PostgreSQL tablespaces from remote hosts
+description:
+- Adds or removes PostgreSQL tablespaces from remote hosts.
+options:
+ tablespace:
+ description:
+ - Name of the tablespace to add or remove.
+ required: true
+ type: str
+ aliases:
+ - name
+ location:
+ description:
+ - Path to the tablespace directory in the file system.
+ - Ensure that the location exists and has right privileges.
+ type: path
+ aliases:
+ - path
+ state:
+ description:
+ - Tablespace state.
+ - I(state=present) implies the tablespace must be created if it doesn't exist.
+ - I(state=absent) implies the tablespace must be removed if present.
+ I(state=absent) is mutually exclusive with I(location), I(owner), i(set).
+ - See the Notes section for information about check mode restrictions.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ owner:
+ description:
+ - Name of the role to set as an owner of the tablespace.
+ - If this option is not specified, the tablespace owner is a role that creates the tablespace.
+ type: str
+ set:
+ description:
+ - Dict of tablespace options to set. Supported from PostgreSQL 9.0.
+ - For more information see U(https://www.postgresql.org/docs/current/sql-createtablespace.html).
+ - When reset is passed as an option's value, if the option was set previously, it will be removed.
+ type: dict
+ rename_to:
+ description:
+ - New name of the tablespace.
+ - The new name cannot begin with pg_, as such names are reserved for system tablespaces.
+ type: str
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ db:
+ description:
+ - Name of database to connect to and run queries against.
+ type: str
+ aliases:
+ - login_db
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(tablespace), I(location), I(owner),
+ I(rename_to), I(session_role), I(settings_list) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+
+notes:
+- I(state=absent) and I(state=present) (the second one if the tablespace doesn't exist) do not
+ support check mode because the corresponding PostgreSQL DROP and CREATE TABLESPACE commands
+ can not be run inside the transaction block.
+
+seealso:
+- name: PostgreSQL tablespaces
+ description: General information about PostgreSQL tablespaces.
+ link: https://www.postgresql.org/docs/current/manage-ag-tablespaces.html
+- name: CREATE TABLESPACE reference
+ description: Complete reference of the CREATE TABLESPACE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createtablespace.html
+- name: ALTER TABLESPACE reference
+ description: Complete reference of the ALTER TABLESPACE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-altertablespace.html
+- name: DROP TABLESPACE reference
+ description: Complete reference of the DROP TABLESPACE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-droptablespace.html
+
+author:
+- Flavien Chantelot (@Dorn-)
+- Antoine Levy-Lambert (@antoinell)
+- Andrew Klychkov (@Andersson007)
+
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Create a new tablespace called acme and set bob as an its owner
+ community.general.postgresql_tablespace:
+ name: acme
+ owner: bob
+ location: /data/foo
+
+- name: Create a new tablespace called bar with tablespace options
+ community.general.postgresql_tablespace:
+ name: bar
+ set:
+ random_page_cost: 1
+ seq_page_cost: 1
+
+- name: Reset random_page_cost option
+ community.general.postgresql_tablespace:
+ name: bar
+ set:
+ random_page_cost: reset
+
+- name: Rename the tablespace from bar to pcie_ssd
+ community.general.postgresql_tablespace:
+ name: bar
+ rename_to: pcie_ssd
+
+- name: Drop tablespace called bloat
+ community.general.postgresql_tablespace:
+ name: bloat
+ state: absent
+'''
+
+RETURN = r'''
+queries:
+ description: List of queries that was tried to be executed.
+ returned: always
+ type: str
+ sample: [ "CREATE TABLESPACE bar LOCATION '/incredible/ssd'" ]
+tablespace:
+ description: Tablespace name.
+ returned: always
+ type: str
+ sample: 'ssd'
+owner:
+ description: Tablespace owner.
+ returned: always
+ type: str
+ sample: 'Bob'
+options:
+ description: Tablespace options.
+ returned: always
+ type: dict
+ sample: { 'random_page_cost': 1, 'seq_page_cost': 1 }
+location:
+ description: Path to the tablespace in the file system.
+ returned: always
+ type: str
+ sample: '/incredible/fast/ssd'
+newname:
+ description: New tablespace name
+ returned: if existent
+ type: str
+ sample: new_ssd
+state:
+ description: Tablespace state at the end of execution.
+ returned: always
+ type: str
+ sample: 'present'
+'''
+
+try:
+ from psycopg2 import __version__ as PSYCOPG2_VERSION
+ from psycopg2.extras import DictCursor
+ from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT as AUTOCOMMIT
+ from psycopg2.extensions import ISOLATION_LEVEL_READ_COMMITTED as READ_COMMITTED
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+ pg_quote_identifier,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+
+class PgTablespace(object):
+
+ """Class for working with PostgreSQL tablespaces.
+
+ Args:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+ name (str) -- name of the tablespace
+
+ Attrs:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+ name (str) -- name of the tablespace
+ exists (bool) -- flag the tablespace exists in the DB or not
+ owner (str) -- tablespace owner
+ location (str) -- path to the tablespace directory in the file system
+ executed_queries (list) -- list of executed queries
+ new_name (str) -- new name for the tablespace
+ opt_not_supported (bool) -- flag indicates a tablespace option is supported or not
+ """
+
+ def __init__(self, module, cursor, name):
+ self.module = module
+ self.cursor = cursor
+ self.name = name
+ self.exists = False
+ self.owner = ''
+ self.settings = {}
+ self.location = ''
+ self.executed_queries = []
+ self.new_name = ''
+ self.opt_not_supported = False
+ # Collect info:
+ self.get_info()
+
+ def get_info(self):
+ """Get tablespace information."""
+ # Check that spcoptions exists:
+ opt = exec_sql(self, "SELECT 1 FROM information_schema.columns "
+ "WHERE table_name = 'pg_tablespace' "
+ "AND column_name = 'spcoptions'", add_to_executed=False)
+
+ # For 9.1 version and earlier:
+ location = exec_sql(self, "SELECT 1 FROM information_schema.columns "
+ "WHERE table_name = 'pg_tablespace' "
+ "AND column_name = 'spclocation'", add_to_executed=False)
+ if location:
+ location = 'spclocation'
+ else:
+ location = 'pg_tablespace_location(t.oid)'
+
+ if not opt:
+ self.opt_not_supported = True
+ query = ("SELECT r.rolname, (SELECT Null), %s "
+ "FROM pg_catalog.pg_tablespace AS t "
+ "JOIN pg_catalog.pg_roles AS r "
+ "ON t.spcowner = r.oid " % location)
+ else:
+ query = ("SELECT r.rolname, t.spcoptions, %s "
+ "FROM pg_catalog.pg_tablespace AS t "
+ "JOIN pg_catalog.pg_roles AS r "
+ "ON t.spcowner = r.oid " % location)
+
+ res = exec_sql(self, query + "WHERE t.spcname = %(name)s",
+ query_params={'name': self.name}, add_to_executed=False)
+
+ if not res:
+ self.exists = False
+ return False
+
+ if res[0][0]:
+ self.exists = True
+ self.owner = res[0][0]
+
+ if res[0][1]:
+ # Options exist:
+ for i in res[0][1]:
+ i = i.split('=')
+ self.settings[i[0]] = i[1]
+
+ if res[0][2]:
+ # Location exists:
+ self.location = res[0][2]
+
+ def create(self, location):
+ """Create tablespace.
+
+ Return True if success, otherwise, return False.
+
+ args:
+ location (str) -- tablespace directory path in the FS
+ """
+ query = ('CREATE TABLESPACE "%s" LOCATION \'%s\'' % (self.name, location))
+ return exec_sql(self, query, return_bool=True)
+
+ def drop(self):
+ """Drop tablespace.
+
+ Return True if success, otherwise, return False.
+ """
+ return exec_sql(self, 'DROP TABLESPACE "%s"' % self.name, return_bool=True)
+
+ def set_owner(self, new_owner):
+ """Set tablespace owner.
+
+ Return True if success, otherwise, return False.
+
+ args:
+ new_owner (str) -- name of a new owner for the tablespace"
+ """
+ if new_owner == self.owner:
+ return False
+
+ query = 'ALTER TABLESPACE "%s" OWNER TO "%s"' % (self.name, new_owner)
+ return exec_sql(self, query, return_bool=True)
+
+ def rename(self, newname):
+ """Rename tablespace.
+
+ Return True if success, otherwise, return False.
+
+ args:
+ newname (str) -- new name for the tablespace"
+ """
+ query = 'ALTER TABLESPACE "%s" RENAME TO "%s"' % (self.name, newname)
+ self.new_name = newname
+ return exec_sql(self, query, return_bool=True)
+
+ def set_settings(self, new_settings):
+ """Set tablespace settings (options).
+
+ If some setting has been changed, set changed = True.
+ After all settings list is handling, return changed.
+
+ args:
+ new_settings (list) -- list of new settings
+ """
+ # settings must be a dict {'key': 'value'}
+ if self.opt_not_supported:
+ return False
+
+ changed = False
+
+ # Apply new settings:
+ for i in new_settings:
+ if new_settings[i] == 'reset':
+ if i in self.settings:
+ changed = self.__reset_setting(i)
+ self.settings[i] = None
+
+ elif (i not in self.settings) or (str(new_settings[i]) != self.settings[i]):
+ changed = self.__set_setting("%s = '%s'" % (i, new_settings[i]))
+
+ return changed
+
+ def __reset_setting(self, setting):
+ """Reset tablespace setting.
+
+ Return True if success, otherwise, return False.
+
+ args:
+ setting (str) -- string in format "setting_name = 'setting_value'"
+ """
+ query = 'ALTER TABLESPACE "%s" RESET (%s)' % (self.name, setting)
+ return exec_sql(self, query, return_bool=True)
+
+ def __set_setting(self, setting):
+ """Set tablespace setting.
+
+ Return True if success, otherwise, return False.
+
+ args:
+ setting (str) -- string in format "setting_name = 'setting_value'"
+ """
+ query = 'ALTER TABLESPACE "%s" SET (%s)' % (self.name, setting)
+ return exec_sql(self, query, return_bool=True)
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ tablespace=dict(type='str', required=True, aliases=['name']),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ location=dict(type='path', aliases=['path']),
+ owner=dict(type='str'),
+ set=dict(type='dict'),
+ rename_to=dict(type='str'),
+ db=dict(type='str', aliases=['login_db']),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=(('positional_args', 'named_args'),),
+ supports_check_mode=True,
+ )
+
+ tablespace = module.params["tablespace"]
+ state = module.params["state"]
+ location = module.params["location"]
+ owner = module.params["owner"]
+ rename_to = module.params["rename_to"]
+ settings = module.params["set"]
+ session_role = module.params["session_role"]
+ trust_input = module.params["trust_input"]
+
+ if state == 'absent' and (location or owner or rename_to or settings):
+ module.fail_json(msg="state=absent is mutually exclusive location, "
+ "owner, rename_to, and set")
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ if not settings:
+ settings_list = None
+ else:
+ settings_list = ['%s = %s' % (k, v) for k, v in iteritems(settings)]
+
+ check_input(module, tablespace, location, owner,
+ rename_to, session_role, settings_list)
+
+ conn_params = get_conn_params(module, module.params, warn_db_default=False)
+ db_connection = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ # Change autocommit to False if check_mode:
+ if module.check_mode:
+ if PSYCOPG2_VERSION >= '2.4.2':
+ db_connection.set_session(autocommit=False)
+ else:
+ db_connection.set_isolation_level(READ_COMMITTED)
+
+ # Set defaults:
+ autocommit = False
+ changed = False
+
+ ##############
+ # Create PgTablespace object and do main job:
+ tblspace = PgTablespace(module, cursor, tablespace)
+
+ # If tablespace exists with different location, exit:
+ if tblspace.exists and location and location != tblspace.location:
+ module.fail_json(msg="Tablespace '%s' exists with "
+ "different location '%s'" % (tblspace.name, tblspace.location))
+
+ # Create new tablespace:
+ if not tblspace.exists and state == 'present':
+ if rename_to:
+ module.fail_json(msg="Tablespace %s does not exist, nothing to rename" % tablespace)
+
+ if not location:
+ module.fail_json(msg="'location' parameter must be passed with "
+ "state=present if the tablespace doesn't exist")
+
+ # Because CREATE TABLESPACE can not be run inside the transaction block:
+ autocommit = True
+ if PSYCOPG2_VERSION >= '2.4.2':
+ db_connection.set_session(autocommit=True)
+ else:
+ db_connection.set_isolation_level(AUTOCOMMIT)
+
+ changed = tblspace.create(location)
+
+ # Drop non-existing tablespace:
+ elif not tblspace.exists and state == 'absent':
+ # Nothing to do:
+ module.fail_json(msg="Tries to drop nonexistent tablespace '%s'" % tblspace.name)
+
+ # Drop existing tablespace:
+ elif tblspace.exists and state == 'absent':
+ # Because DROP TABLESPACE can not be run inside the transaction block:
+ autocommit = True
+ if PSYCOPG2_VERSION >= '2.4.2':
+ db_connection.set_session(autocommit=True)
+ else:
+ db_connection.set_isolation_level(AUTOCOMMIT)
+
+ changed = tblspace.drop()
+
+ # Rename tablespace:
+ elif tblspace.exists and rename_to:
+ if tblspace.name != rename_to:
+ changed = tblspace.rename(rename_to)
+
+ if state == 'present':
+ # Refresh information:
+ tblspace.get_info()
+
+ # Change owner and settings:
+ if state == 'present' and tblspace.exists:
+ if owner:
+ changed = tblspace.set_owner(owner)
+
+ if settings:
+ changed = tblspace.set_settings(settings)
+
+ tblspace.get_info()
+
+ # Rollback if it's possible and check_mode:
+ if not autocommit:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ cursor.close()
+ db_connection.close()
+
+ # Make return values:
+ kw = dict(
+ changed=changed,
+ state='present',
+ tablespace=tblspace.name,
+ owner=tblspace.owner,
+ queries=tblspace.executed_queries,
+ options=tblspace.settings,
+ location=tblspace.location,
+ )
+
+ if state == 'present':
+ kw['state'] = 'present'
+
+ if tblspace.new_name:
+ kw['newname'] = tblspace.new_name
+
+ elif state == 'absent':
+ kw['state'] = 'absent'
+
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_user.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_user.py
new file mode 100644
index 00000000..79c987a7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_user.py
@@ -0,0 +1,993 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_user
+short_description: Create, alter, or remove a user (role) from a PostgreSQL server instance
+description:
+- Creates, alters, or removes a user (role) from a PostgreSQL server instance
+ ("cluster" in PostgreSQL terminology) and, optionally,
+ grants the user access to an existing database or tables.
+- A user is a role with login privilege.
+- You can also use it to grant or revoke user's privileges in a particular database.
+- You cannot remove a user while it still has any privileges granted to it in any database.
+- Set I(fail_on_user) to C(no) to make the module ignore failures when trying to remove a user.
+ In this case, the module reports if changes happened as usual and separately reports
+ whether the user has been removed or not.
+options:
+ name:
+ description:
+ - Name of the user (role) to add or remove.
+ type: str
+ required: true
+ aliases:
+ - user
+ password:
+ description:
+ - Set the user's password, before 1.4 this was required.
+ - Password can be passed unhashed or hashed (MD5-hashed).
+ - An unhashed password is automatically hashed when saved into the
+ database if I(encrypted) is set, otherwise it is saved in
+ plain text format.
+ - When passing an MD5-hashed password, you must generate it with the format
+ C('str["md5"] + md5[ password + username ]'), resulting in a total of
+ 35 characters. An easy way to do this is
+ C(echo "md5`echo -n 'verysecretpasswordJOE' | md5sum | awk '{print $1}'`").
+ - Note that if the provided password string is already in MD5-hashed
+ format, then it is used as-is, regardless of I(encrypted) option.
+ type: str
+ db:
+ description:
+ - Name of database to connect to and where user's permissions are granted.
+ type: str
+ aliases:
+ - login_db
+ fail_on_user:
+ description:
+ - If C(yes), fails when the user (role) cannot be removed. Otherwise just log and continue.
+ default: yes
+ type: bool
+ aliases:
+ - fail_on_role
+ priv:
+ description:
+ - "Slash-separated PostgreSQL privileges string: C(priv1/priv2), where
+ you can define the user's privileges for the database ( allowed options - 'CREATE',
+ 'CONNECT', 'TEMPORARY', 'TEMP', 'ALL'. For example C(CONNECT) ) or
+ for table ( allowed options - 'SELECT', 'INSERT', 'UPDATE', 'DELETE',
+ 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL'. For example
+ C(table:SELECT) ). Mixed example of this string:
+ C(CONNECT/CREATE/table1:SELECT/table2:INSERT)."
+ type: str
+ role_attr_flags:
+ description:
+ - "PostgreSQL user attributes string in the format: CREATEDB,CREATEROLE,SUPERUSER."
+ - Note that '[NO]CREATEUSER' is deprecated.
+ - To create a simple role for using it like a group, use C(NOLOGIN) flag.
+ type: str
+ choices: [ '[NO]SUPERUSER', '[NO]CREATEROLE', '[NO]CREATEDB',
+ '[NO]INHERIT', '[NO]LOGIN', '[NO]REPLICATION', '[NO]BYPASSRLS' ]
+ session_role:
+ description:
+ - Switch to session role after connecting.
+ - The specified session role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though the session role
+ were the one that had logged in originally.
+ type: str
+ state:
+ description:
+ - The user (role) state.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ encrypted:
+ description:
+ - Whether the password is stored hashed in the database.
+ - You can specify an unhashed password, and PostgreSQL ensures
+ the stored password is hashed when I(encrypted=yes) is set.
+ If you specify a hashed password, the module uses it as-is,
+ regardless of the setting of I(encrypted).
+ - "Note: Postgresql 10 and newer does not support unhashed passwords."
+ - Previous to Ansible 2.6, this was C(no) by default.
+ default: yes
+ type: bool
+ expires:
+ description:
+ - The date at which the user's password is to expire.
+ - If set to C('infinity'), user's password never expires.
+ - Note that this value must be a valid SQL date and time type.
+ type: str
+ no_password_changes:
+ description:
+ - If C(yes), does not inspect the database for password changes.
+ Useful when C(pg_authid) is not accessible (such as in AWS RDS).
+ Otherwise, makes password changes as necessary.
+ default: no
+ type: bool
+ conn_limit:
+ description:
+ - Specifies the user (role) connection limit.
+ type: int
+ ssl_mode:
+ description:
+ - Determines how an SSL session is negotiated with the server.
+ - See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
+ - Default of C(prefer) matches libpq default.
+ type: str
+ default: prefer
+ choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
+ ca_cert:
+ description:
+ - Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
+ - If the file exists, verifies that the server's certificate is signed by one of these authorities.
+ type: str
+ aliases: [ ssl_rootcert ]
+ groups:
+ description:
+ - The list of groups (roles) that you want to grant to the user.
+ type: list
+ elements: str
+ comment:
+ description:
+ - Adds a comment on the user (equivalent to the C(COMMENT ON ROLE) statement).
+ type: str
+ version_added: '0.2.0'
+ trust_input:
+ description:
+ - If C(no), checks whether values of options I(name), I(password), I(privs), I(expires),
+ I(role_attr_flags), I(groups), I(comment), I(session_role) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections through the options are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+notes:
+- The module creates a user (role) with login privilege by default.
+ Use C(NOLOGIN) I(role_attr_flags) to change this behaviour.
+- If you specify C(PUBLIC) as the user (role), then the privilege changes apply to all users (roles).
+ You may not specify password or role_attr_flags when the C(PUBLIC) user is specified.
+- SCRAM-SHA-256-hashed passwords (SASL Authentication) require PostgreSQL version 10 or newer.
+ On the previous versions the whole hashed string is used as a password.
+- 'Working with SCRAM-SHA-256-hashed passwords, be sure you use the I(environment:) variable
+ C(PGOPTIONS: "-c password_encryption=scram-sha-256") (see the provided example).'
+- Supports ``check_mode``.
+seealso:
+- module: community.general.postgresql_privs
+- module: community.general.postgresql_membership
+- module: community.general.postgresql_owner
+- name: PostgreSQL database roles
+ description: Complete reference of the PostgreSQL database roles documentation.
+ link: https://www.postgresql.org/docs/current/user-manag.html
+- name: PostgreSQL SASL Authentication
+ description: Complete reference of the PostgreSQL SASL Authentication.
+ link: https://www.postgresql.org/docs/current/sasl-authentication.html
+author:
+- Ansible Core Team
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Connect to acme database, create django user, and grant access to database and products table
+ community.general.postgresql_user:
+ db: acme
+ name: django
+ password: ceec4eif7ya
+ priv: "CONNECT/products:ALL"
+ expires: "Jan 31 2020"
+
+- name: Add a comment on django user
+ community.general.postgresql_user:
+ db: acme
+ name: django
+ comment: This is a test user
+
+# Connect to default database, create rails user, set its password (MD5-hashed),
+# and grant privilege to create other databases and demote rails from super user status if user exists
+- name: Create rails user, set MD5-hashed password, grant privs
+ community.general.postgresql_user:
+ name: rails
+ password: md59543f1d82624df2b31672ec0f7050460
+ role_attr_flags: CREATEDB,NOSUPERUSER
+
+- name: Connect to acme database and remove test user privileges from there
+ community.general.postgresql_user:
+ db: acme
+ name: test
+ priv: "ALL/products:ALL"
+ state: absent
+ fail_on_user: no
+
+- name: Connect to test database, remove test user from cluster
+ community.general.postgresql_user:
+ db: test
+ name: test
+ priv: ALL
+ state: absent
+
+- name: Connect to acme database and set user's password with no expire date
+ community.general.postgresql_user:
+ db: acme
+ name: django
+ password: mysupersecretword
+ priv: "CONNECT/products:ALL"
+ expires: infinity
+
+# Example privileges string format
+# INSERT,UPDATE/table:SELECT/anothertable:ALL
+
+- name: Connect to test database and remove an existing user's password
+ community.general.postgresql_user:
+ db: test
+ user: test
+ password: ""
+
+- name: Create user test and grant group user_ro and user_rw to it
+ community.general.postgresql_user:
+ name: test
+ groups:
+ - user_ro
+ - user_rw
+
+# Create user with a cleartext password if it does not exist or update its password.
+# The password will be encrypted with SCRAM algorithm (available since PostgreSQL 10)
+- name: Create appclient user with SCRAM-hashed password
+ community.general.postgresql_user:
+ name: appclient
+ password: "secret123"
+ environment:
+ PGOPTIONS: "-c password_encryption=scram-sha-256"
+'''
+
+RETURN = r'''
+queries:
+ description: List of executed queries.
+ returned: always
+ type: list
+ sample: ['CREATE USER "alice"', 'GRANT CONNECT ON DATABASE "acme" TO "alice"']
+'''
+
+import itertools
+import re
+import traceback
+from hashlib import md5, sha256
+import hmac
+from base64 import b64decode
+
+try:
+ import psycopg2
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ pg_quote_identifier,
+ SQLParseError,
+ check_input,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ PgMembership,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.six import iteritems
+import ansible_collections.community.general.plugins.module_utils.saslprep as saslprep
+
+try:
+ # pbkdf2_hmac is missing on python 2.6, we can safely assume,
+ # that postresql 10 capable instance have at least python 2.7 installed
+ from hashlib import pbkdf2_hmac
+ pbkdf2_found = True
+except ImportError:
+ pbkdf2_found = False
+
+
+FLAGS = ('SUPERUSER', 'CREATEROLE', 'CREATEDB', 'INHERIT', 'LOGIN', 'REPLICATION')
+FLAGS_BY_VERSION = {'BYPASSRLS': 90500}
+
+SCRAM_SHA256_REGEX = r'^SCRAM-SHA-256\$(\d+):([A-Za-z0-9+\/=]+)\$([A-Za-z0-9+\/=]+):([A-Za-z0-9+\/=]+)$'
+
+VALID_PRIVS = dict(table=frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL')),
+ database=frozenset(
+ ('CREATE', 'CONNECT', 'TEMPORARY', 'TEMP', 'ALL')),
+ )
+
+# map to cope with idiosyncracies of SUPERUSER and LOGIN
+PRIV_TO_AUTHID_COLUMN = dict(SUPERUSER='rolsuper', CREATEROLE='rolcreaterole',
+ CREATEDB='rolcreatedb', INHERIT='rolinherit', LOGIN='rolcanlogin',
+ REPLICATION='rolreplication', BYPASSRLS='rolbypassrls')
+
+executed_queries = []
+
+
+class InvalidFlagsError(Exception):
+ pass
+
+
+class InvalidPrivsError(Exception):
+ pass
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+
+def user_exists(cursor, user):
+ # The PUBLIC user is a special case that is always there
+ if user == 'PUBLIC':
+ return True
+ query = "SELECT rolname FROM pg_roles WHERE rolname=%(user)s"
+ cursor.execute(query, {'user': user})
+ return cursor.rowcount > 0
+
+
+def user_add(cursor, user, password, role_attr_flags, encrypted, expires, conn_limit):
+ """Create a new database user (role)."""
+ # Note: role_attr_flags escaped by parse_role_attrs and encrypted is a
+ # literal
+ query_password_data = dict(password=password, expires=expires)
+ query = ['CREATE USER "%(user)s"' %
+ {"user": user}]
+ if password is not None and password != '':
+ query.append("WITH %(crypt)s" % {"crypt": encrypted})
+ query.append("PASSWORD %(password)s")
+ if expires is not None:
+ query.append("VALID UNTIL %(expires)s")
+ if conn_limit is not None:
+ query.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit})
+ query.append(role_attr_flags)
+ query = ' '.join(query)
+ executed_queries.append(query)
+ cursor.execute(query, query_password_data)
+ return True
+
+
+def user_should_we_change_password(current_role_attrs, user, password, encrypted):
+ """Check if we should change the user's password.
+
+ Compare the proposed password with the existing one, comparing
+ hashes if encrypted. If we can't access it assume yes.
+ """
+
+ if current_role_attrs is None:
+ # on some databases, E.g. AWS RDS instances, there is no access to
+ # the pg_authid relation to check the pre-existing password, so we
+ # just assume password is different
+ return True
+
+ # Do we actually need to do anything?
+ pwchanging = False
+ if password is not None:
+ # Empty password means that the role shouldn't have a password, which
+ # means we need to check if the current password is None.
+ if password == '':
+ if current_role_attrs['rolpassword'] is not None:
+ pwchanging = True
+
+ # SCRAM hashes are represented as a special object, containing hash data:
+ # `SCRAM-SHA-256$<iteration count>:<salt>$<StoredKey>:<ServerKey>`
+ # for reference, see https://www.postgresql.org/docs/current/catalog-pg-authid.html
+ elif current_role_attrs['rolpassword'] is not None \
+ and pbkdf2_found \
+ and re.match(SCRAM_SHA256_REGEX, current_role_attrs['rolpassword']):
+
+ r = re.match(SCRAM_SHA256_REGEX, current_role_attrs['rolpassword'])
+ try:
+ # extract SCRAM params from rolpassword
+ it = int(r.group(1))
+ salt = b64decode(r.group(2))
+ server_key = b64decode(r.group(4))
+ # we'll never need `storedKey` as it is only used for server auth in SCRAM
+ # storedKey = b64decode(r.group(3))
+
+ # from RFC5802 https://tools.ietf.org/html/rfc5802#section-3
+ # SaltedPassword := Hi(Normalize(password), salt, i)
+ # ServerKey := HMAC(SaltedPassword, "Server Key")
+ normalized_password = saslprep.saslprep(to_text(password))
+ salted_password = pbkdf2_hmac('sha256', to_bytes(normalized_password), salt, it)
+
+ server_key_verifier = hmac.new(salted_password, digestmod=sha256)
+ server_key_verifier.update(b'Server Key')
+
+ if server_key_verifier.digest() != server_key:
+ pwchanging = True
+ except Exception:
+ # We assume the password is not scram encrypted
+ # or we cannot check it properly, e.g. due to missing dependencies
+ pwchanging = True
+
+ # 32: MD5 hashes are represented as a sequence of 32 hexadecimal digits
+ # 3: The size of the 'md5' prefix
+ # When the provided password looks like a MD5-hash, value of
+ # 'encrypted' is ignored.
+ elif (password.startswith('md5') and len(password) == 32 + 3) or encrypted == 'UNENCRYPTED':
+ if password != current_role_attrs['rolpassword']:
+ pwchanging = True
+ elif encrypted == 'ENCRYPTED':
+ hashed_password = 'md5{0}'.format(md5(to_bytes(password) + to_bytes(user)).hexdigest())
+ if hashed_password != current_role_attrs['rolpassword']:
+ pwchanging = True
+
+ return pwchanging
+
+
+def user_alter(db_connection, module, user, password, role_attr_flags, encrypted, expires, no_password_changes, conn_limit):
+ """Change user password and/or attributes. Return True if changed, False otherwise."""
+ changed = False
+
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+ # Note: role_attr_flags escaped by parse_role_attrs and encrypted is a
+ # literal
+ if user == 'PUBLIC':
+ if password is not None:
+ module.fail_json(msg="cannot change the password for PUBLIC user")
+ elif role_attr_flags != '':
+ module.fail_json(msg="cannot change the role_attr_flags for PUBLIC user")
+ else:
+ return False
+
+ # Handle passwords.
+ if not no_password_changes and (password is not None or role_attr_flags != '' or expires is not None or conn_limit is not None):
+ # Select password and all flag-like columns in order to verify changes.
+ try:
+ select = "SELECT * FROM pg_authid where rolname=%(user)s"
+ cursor.execute(select, {"user": user})
+ # Grab current role attributes.
+ current_role_attrs = cursor.fetchone()
+ except psycopg2.ProgrammingError:
+ current_role_attrs = None
+ db_connection.rollback()
+
+ pwchanging = user_should_we_change_password(current_role_attrs, user, password, encrypted)
+
+ if current_role_attrs is None:
+ try:
+ # AWS RDS instances does not allow user to access pg_authid
+ # so try to get current_role_attrs from pg_roles tables
+ select = "SELECT * FROM pg_roles where rolname=%(user)s"
+ cursor.execute(select, {"user": user})
+ # Grab current role attributes from pg_roles
+ current_role_attrs = cursor.fetchone()
+ except psycopg2.ProgrammingError as e:
+ db_connection.rollback()
+ module.fail_json(msg="Failed to get role details for current user %s: %s" % (user, e))
+
+ role_attr_flags_changing = False
+ if role_attr_flags:
+ role_attr_flags_dict = {}
+ for r in role_attr_flags.split(' '):
+ if r.startswith('NO'):
+ role_attr_flags_dict[r.replace('NO', '', 1)] = False
+ else:
+ role_attr_flags_dict[r] = True
+
+ for role_attr_name, role_attr_value in role_attr_flags_dict.items():
+ if current_role_attrs[PRIV_TO_AUTHID_COLUMN[role_attr_name]] != role_attr_value:
+ role_attr_flags_changing = True
+
+ if expires is not None:
+ cursor.execute("SELECT %s::timestamptz;", (expires,))
+ expires_with_tz = cursor.fetchone()[0]
+ expires_changing = expires_with_tz != current_role_attrs.get('rolvaliduntil')
+ else:
+ expires_changing = False
+
+ conn_limit_changing = (conn_limit is not None and conn_limit != current_role_attrs['rolconnlimit'])
+
+ if not pwchanging and not role_attr_flags_changing and not expires_changing and not conn_limit_changing:
+ return False
+
+ alter = ['ALTER USER "%(user)s"' % {"user": user}]
+ if pwchanging:
+ if password != '':
+ alter.append("WITH %(crypt)s" % {"crypt": encrypted})
+ alter.append("PASSWORD %(password)s")
+ else:
+ alter.append("WITH PASSWORD NULL")
+ alter.append(role_attr_flags)
+ elif role_attr_flags:
+ alter.append('WITH %s' % role_attr_flags)
+ if expires is not None:
+ alter.append("VALID UNTIL %(expires)s")
+ if conn_limit is not None:
+ alter.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit})
+
+ query_password_data = dict(password=password, expires=expires)
+ try:
+ cursor.execute(' '.join(alter), query_password_data)
+ changed = True
+ except psycopg2.InternalError as e:
+ if e.pgcode == '25006':
+ # Handle errors due to read-only transactions indicated by pgcode 25006
+ # ERROR: cannot execute ALTER ROLE in a read-only transaction
+ changed = False
+ module.fail_json(msg=e.pgerror, exception=traceback.format_exc())
+ return changed
+ else:
+ raise psycopg2.InternalError(e)
+ except psycopg2.NotSupportedError as e:
+ module.fail_json(msg=e.pgerror, exception=traceback.format_exc())
+
+ elif no_password_changes and role_attr_flags != '':
+ # Grab role information from pg_roles instead of pg_authid
+ select = "SELECT * FROM pg_roles where rolname=%(user)s"
+ cursor.execute(select, {"user": user})
+ # Grab current role attributes.
+ current_role_attrs = cursor.fetchone()
+
+ role_attr_flags_changing = False
+
+ if role_attr_flags:
+ role_attr_flags_dict = {}
+ for r in role_attr_flags.split(' '):
+ if r.startswith('NO'):
+ role_attr_flags_dict[r.replace('NO', '', 1)] = False
+ else:
+ role_attr_flags_dict[r] = True
+
+ for role_attr_name, role_attr_value in role_attr_flags_dict.items():
+ if current_role_attrs[PRIV_TO_AUTHID_COLUMN[role_attr_name]] != role_attr_value:
+ role_attr_flags_changing = True
+
+ if not role_attr_flags_changing:
+ return False
+
+ alter = ['ALTER USER "%(user)s"' %
+ {"user": user}]
+ if role_attr_flags:
+ alter.append('WITH %s' % role_attr_flags)
+
+ try:
+ cursor.execute(' '.join(alter))
+ except psycopg2.InternalError as e:
+ if e.pgcode == '25006':
+ # Handle errors due to read-only transactions indicated by pgcode 25006
+ # ERROR: cannot execute ALTER ROLE in a read-only transaction
+ changed = False
+ module.fail_json(msg=e.pgerror, exception=traceback.format_exc())
+ return changed
+ else:
+ raise psycopg2.InternalError(e)
+
+ # Grab new role attributes.
+ cursor.execute(select, {"user": user})
+ new_role_attrs = cursor.fetchone()
+
+ # Detect any differences between current_ and new_role_attrs.
+ changed = current_role_attrs != new_role_attrs
+
+ return changed
+
+
+def user_delete(cursor, user):
+ """Try to remove a user. Returns True if successful otherwise False"""
+ cursor.execute("SAVEPOINT ansible_pgsql_user_delete")
+ try:
+ query = 'DROP USER "%s"' % user
+ executed_queries.append(query)
+ cursor.execute(query)
+ except Exception:
+ cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_user_delete")
+ cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete")
+ return False
+
+ cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete")
+ return True
+
+
+def has_table_privileges(cursor, user, table, privs):
+ """
+ Return the difference between the privileges that a user already has and
+ the privileges that they desire to have.
+
+ :returns: tuple of:
+ * privileges that they have and were requested
+ * privileges they currently hold but were not requested
+ * privileges requested that they do not hold
+ """
+ cur_privs = get_table_privileges(cursor, user, table)
+ have_currently = cur_privs.intersection(privs)
+ other_current = cur_privs.difference(privs)
+ desired = privs.difference(cur_privs)
+ return (have_currently, other_current, desired)
+
+
+def get_table_privileges(cursor, user, table):
+ if '.' in table:
+ schema, table = table.split('.', 1)
+ else:
+ schema = 'public'
+ query = ("SELECT privilege_type FROM information_schema.role_table_grants "
+ "WHERE grantee=%(user)s AND table_name=%(table)s AND table_schema=%(schema)s")
+ cursor.execute(query, {'user': user, 'table': table, 'schema': schema})
+ return frozenset([x[0] for x in cursor.fetchall()])
+
+
+def grant_table_privileges(cursor, user, table, privs):
+ # Note: priv escaped by parse_privs
+ privs = ', '.join(privs)
+ query = 'GRANT %s ON TABLE %s TO "%s"' % (
+ privs, pg_quote_identifier(table, 'table'), user)
+ executed_queries.append(query)
+ cursor.execute(query)
+
+
+def revoke_table_privileges(cursor, user, table, privs):
+ # Note: priv escaped by parse_privs
+ privs = ', '.join(privs)
+ query = 'REVOKE %s ON TABLE %s FROM "%s"' % (
+ privs, pg_quote_identifier(table, 'table'), user)
+ executed_queries.append(query)
+ cursor.execute(query)
+
+
+def get_database_privileges(cursor, user, db):
+ priv_map = {
+ 'C': 'CREATE',
+ 'T': 'TEMPORARY',
+ 'c': 'CONNECT',
+ }
+ query = 'SELECT datacl FROM pg_database WHERE datname = %s'
+ cursor.execute(query, (db,))
+ datacl = cursor.fetchone()[0]
+ if datacl is None:
+ return set()
+ r = re.search(r'%s\\?"?=(C?T?c?)/[^,]+,?' % user, datacl)
+ if r is None:
+ return set()
+ o = set()
+ for v in r.group(1):
+ o.add(priv_map[v])
+ return normalize_privileges(o, 'database')
+
+
+def has_database_privileges(cursor, user, db, privs):
+ """
+ Return the difference between the privileges that a user already has and
+ the privileges that they desire to have.
+
+ :returns: tuple of:
+ * privileges that they have and were requested
+ * privileges they currently hold but were not requested
+ * privileges requested that they do not hold
+ """
+ cur_privs = get_database_privileges(cursor, user, db)
+ have_currently = cur_privs.intersection(privs)
+ other_current = cur_privs.difference(privs)
+ desired = privs.difference(cur_privs)
+ return (have_currently, other_current, desired)
+
+
+def grant_database_privileges(cursor, user, db, privs):
+ # Note: priv escaped by parse_privs
+ privs = ', '.join(privs)
+ if user == "PUBLIC":
+ query = 'GRANT %s ON DATABASE %s TO PUBLIC' % (
+ privs, pg_quote_identifier(db, 'database'))
+ else:
+ query = 'GRANT %s ON DATABASE %s TO "%s"' % (
+ privs, pg_quote_identifier(db, 'database'), user)
+
+ executed_queries.append(query)
+ cursor.execute(query)
+
+
+def revoke_database_privileges(cursor, user, db, privs):
+ # Note: priv escaped by parse_privs
+ privs = ', '.join(privs)
+ if user == "PUBLIC":
+ query = 'REVOKE %s ON DATABASE %s FROM PUBLIC' % (
+ privs, pg_quote_identifier(db, 'database'))
+ else:
+ query = 'REVOKE %s ON DATABASE %s FROM "%s"' % (
+ privs, pg_quote_identifier(db, 'database'), user)
+
+ executed_queries.append(query)
+ cursor.execute(query)
+
+
+def revoke_privileges(cursor, user, privs):
+ if privs is None:
+ return False
+
+ revoke_funcs = dict(table=revoke_table_privileges,
+ database=revoke_database_privileges)
+ check_funcs = dict(table=has_table_privileges,
+ database=has_database_privileges)
+
+ changed = False
+ for type_ in privs:
+ for name, privileges in iteritems(privs[type_]):
+ # Check that any of the privileges requested to be removed are
+ # currently granted to the user
+ differences = check_funcs[type_](cursor, user, name, privileges)
+ if differences[0]:
+ revoke_funcs[type_](cursor, user, name, privileges)
+ changed = True
+ return changed
+
+
+def grant_privileges(cursor, user, privs):
+ if privs is None:
+ return False
+
+ grant_funcs = dict(table=grant_table_privileges,
+ database=grant_database_privileges)
+ check_funcs = dict(table=has_table_privileges,
+ database=has_database_privileges)
+
+ changed = False
+ for type_ in privs:
+ for name, privileges in iteritems(privs[type_]):
+ # Check that any of the privileges requested for the user are
+ # currently missing
+ differences = check_funcs[type_](cursor, user, name, privileges)
+ if differences[2]:
+ grant_funcs[type_](cursor, user, name, privileges)
+ changed = True
+ return changed
+
+
+def parse_role_attrs(cursor, role_attr_flags):
+ """
+ Parse role attributes string for user creation.
+ Format:
+
+ attributes[,attributes,...]
+
+ Where:
+
+ attributes := CREATEDB,CREATEROLE,NOSUPERUSER,...
+ [ "[NO]SUPERUSER","[NO]CREATEROLE", "[NO]CREATEDB",
+ "[NO]INHERIT", "[NO]LOGIN", "[NO]REPLICATION",
+ "[NO]BYPASSRLS" ]
+
+ Note: "[NO]BYPASSRLS" role attribute introduced in 9.5
+ Note: "[NO]CREATEUSER" role attribute is deprecated.
+
+ """
+ flags = frozenset(role.upper() for role in role_attr_flags.split(',') if role)
+
+ valid_flags = frozenset(itertools.chain(FLAGS, get_valid_flags_by_version(cursor)))
+ valid_flags = frozenset(itertools.chain(valid_flags, ('NO%s' % flag for flag in valid_flags)))
+
+ if not flags.issubset(valid_flags):
+ raise InvalidFlagsError('Invalid role_attr_flags specified: %s' %
+ ' '.join(flags.difference(valid_flags)))
+
+ return ' '.join(flags)
+
+
+def normalize_privileges(privs, type_):
+ new_privs = set(privs)
+ if 'ALL' in new_privs:
+ new_privs.update(VALID_PRIVS[type_])
+ new_privs.remove('ALL')
+ if 'TEMP' in new_privs:
+ new_privs.add('TEMPORARY')
+ new_privs.remove('TEMP')
+
+ return new_privs
+
+
+def parse_privs(privs, db):
+ """
+ Parse privilege string to determine permissions for database db.
+ Format:
+
+ privileges[/privileges/...]
+
+ Where:
+
+ privileges := DATABASE_PRIVILEGES[,DATABASE_PRIVILEGES,...] |
+ TABLE_NAME:TABLE_PRIVILEGES[,TABLE_PRIVILEGES,...]
+ """
+ if privs is None:
+ return privs
+
+ o_privs = {
+ 'database': {},
+ 'table': {}
+ }
+ for token in privs.split('/'):
+ if ':' not in token:
+ type_ = 'database'
+ name = db
+ priv_set = frozenset(x.strip().upper()
+ for x in token.split(',') if x.strip())
+ else:
+ type_ = 'table'
+ name, privileges = token.split(':', 1)
+ priv_set = frozenset(x.strip().upper()
+ for x in privileges.split(',') if x.strip())
+
+ if not priv_set.issubset(VALID_PRIVS[type_]):
+ raise InvalidPrivsError('Invalid privs specified for %s: %s' %
+ (type_, ' '.join(priv_set.difference(VALID_PRIVS[type_]))))
+
+ priv_set = normalize_privileges(priv_set, type_)
+ o_privs[type_][name] = priv_set
+
+ return o_privs
+
+
+def get_valid_flags_by_version(cursor):
+ """
+ Some role attributes were introduced after certain versions. We want to
+ compile a list of valid flags against the current Postgres version.
+ """
+ current_version = cursor.connection.server_version
+
+ return [
+ flag
+ for flag, version_introduced in FLAGS_BY_VERSION.items()
+ if current_version >= version_introduced
+ ]
+
+
+def get_comment(cursor, user):
+ """Get user's comment."""
+ query = ("SELECT pg_catalog.shobj_description(r.oid, 'pg_authid') "
+ "FROM pg_catalog.pg_roles r "
+ "WHERE r.rolname = %(user)s")
+ cursor.execute(query, {'user': user})
+ return cursor.fetchone()[0]
+
+
+def add_comment(cursor, user, comment):
+ """Add comment on user."""
+ if comment != get_comment(cursor, user):
+ query = 'COMMENT ON ROLE "%s" IS ' % user
+ cursor.execute(query + '%(comment)s', {'comment': comment})
+ executed_queries.append(cursor.mogrify(query + '%(comment)s', {'comment': comment}))
+ return True
+ else:
+ return False
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ user=dict(type='str', required=True, aliases=['name']),
+ password=dict(type='str', default=None, no_log=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ priv=dict(type='str', default=None),
+ db=dict(type='str', default='', aliases=['login_db']),
+ fail_on_user=dict(type='bool', default=True, aliases=['fail_on_role']),
+ role_attr_flags=dict(type='str', default=''),
+ encrypted=dict(type='bool', default=True),
+ no_password_changes=dict(type='bool', default=False, no_log=False),
+ expires=dict(type='str', default=None),
+ conn_limit=dict(type='int', default=None),
+ session_role=dict(type='str'),
+ groups=dict(type='list', elements='str'),
+ comment=dict(type='str', default=None),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ user = module.params["user"]
+ password = module.params["password"]
+ state = module.params["state"]
+ fail_on_user = module.params["fail_on_user"]
+ if module.params['db'] == '' and module.params["priv"] is not None:
+ module.fail_json(msg="privileges require a database to be specified")
+ privs = parse_privs(module.params["priv"], module.params["db"])
+ no_password_changes = module.params["no_password_changes"]
+ if module.params["encrypted"]:
+ encrypted = "ENCRYPTED"
+ else:
+ encrypted = "UNENCRYPTED"
+ expires = module.params["expires"]
+ conn_limit = module.params["conn_limit"]
+ role_attr_flags = module.params["role_attr_flags"]
+ groups = module.params["groups"]
+ if groups:
+ groups = [e.strip() for e in groups]
+ comment = module.params["comment"]
+ session_role = module.params['session_role']
+
+ trust_input = module.params['trust_input']
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, user, password, privs, expires,
+ role_attr_flags, groups, comment, session_role)
+
+ conn_params = get_conn_params(module, module.params, warn_db_default=False)
+ db_connection = connect_to_db(module, conn_params)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ try:
+ role_attr_flags = parse_role_attrs(cursor, role_attr_flags)
+ except InvalidFlagsError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ kw = dict(user=user)
+ changed = False
+ user_removed = False
+
+ if state == "present":
+ if user_exists(cursor, user):
+ try:
+ changed = user_alter(db_connection, module, user, password,
+ role_attr_flags, encrypted, expires, no_password_changes, conn_limit)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ else:
+ try:
+ changed = user_add(cursor, user, password,
+ role_attr_flags, encrypted, expires, conn_limit)
+ except psycopg2.ProgrammingError as e:
+ module.fail_json(msg="Unable to add user with given requirement "
+ "due to : %s" % to_native(e),
+ exception=traceback.format_exc())
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ try:
+ changed = grant_privileges(cursor, user, privs) or changed
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ if groups:
+ target_roles = []
+ target_roles.append(user)
+ pg_membership = PgMembership(module, cursor, groups, target_roles)
+ changed = pg_membership.grant() or changed
+ executed_queries.extend(pg_membership.executed_queries)
+
+ if comment is not None:
+ try:
+ changed = add_comment(cursor, user, comment) or changed
+ except Exception as e:
+ module.fail_json(msg='Unable to add comment on role: %s' % to_native(e),
+ exception=traceback.format_exc())
+
+ else:
+ if user_exists(cursor, user):
+ if module.check_mode:
+ changed = True
+ kw['user_removed'] = True
+ else:
+ try:
+ changed = revoke_privileges(cursor, user, privs)
+ user_removed = user_delete(cursor, user)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ changed = changed or user_removed
+ if fail_on_user and not user_removed:
+ msg = "Unable to remove user"
+ module.fail_json(msg=msg)
+ kw['user_removed'] = user_removed
+
+ if changed:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ kw['changed'] = changed
+ kw['queries'] = executed_queries
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_user_obj_stat_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_user_obj_stat_info.py
new file mode 100644
index 00000000..9d03408e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/postgresql/postgresql_user_obj_stat_info.py
@@ -0,0 +1,335 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_user_obj_stat_info
+short_description: Gather statistics about PostgreSQL user objects
+description:
+- Gathers statistics about PostgreSQL user objects.
+version_added: '0.2.0'
+options:
+ filter:
+ description:
+ - Limit the collected information by comma separated string or YAML list.
+ - Allowable values are C(functions), C(indexes), C(tables).
+ - By default, collects all subsets.
+ - Unsupported values are ignored.
+ type: list
+ elements: str
+ schema:
+ description:
+ - Restrict the output by certain schema.
+ type: str
+ db:
+ description:
+ - Name of database to connect.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ trust_input:
+ description:
+ - If C(no), check the value of I(session_role) is potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via I(session_role) are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+
+notes:
+- C(size) and C(total_size) returned values are presented in bytes.
+- For tracking function statistics the PostgreSQL C(track_functions) parameter must be enabled.
+ See U(https://www.postgresql.org/docs/current/runtime-config-statistics.html) for more information.
+seealso:
+- module: community.general.postgresql_info
+- module: community.general.postgresql_ping
+- name: PostgreSQL statistics collector reference
+ description: Complete reference of the PostgreSQL statistics collector documentation.
+ link: https://www.postgresql.org/docs/current/monitoring-stats.html
+author:
+- Andrew Klychkov (@Andersson007)
+- Thomas O'Donnell (@andytom)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Collect information about all supported user objects of the acme database
+ community.general.postgresql_user_obj_stat_info:
+ db: acme
+
+- name: Collect information about all supported user objects in the custom schema of the acme database
+ community.general.postgresql_user_obj_stat_info:
+ db: acme
+ schema: custom
+
+- name: Collect information about user tables and indexes in the acme database
+ community.general.postgresql_user_obj_stat_info:
+ db: acme
+ filter: tables, indexes
+'''
+
+RETURN = r'''
+indexes:
+ description: User index statistics
+ returned: always
+ type: dict
+ sample: {"public": {"test_id_idx": {"idx_scan": 0, "idx_tup_fetch": 0, "idx_tup_read": 0, "relname": "test", "size": 8192, ...}}}
+tables:
+ description: User table statistics.
+ returned: always
+ type: dict
+ sample: {"public": {"test": {"analyze_count": 3, "n_dead_tup": 0, "n_live_tup": 0, "seq_scan": 2, "size": 0, "total_size": 8192, ...}}}
+functions:
+ description: User function statistics.
+ returned: always
+ type: dict
+ sample: {"public": {"inc": {"calls": 1, "funcid": 26722, "self_time": 0.23, "total_time": 0.23}}}
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils.six import iteritems
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+
+class PgUserObjStatInfo():
+ """Class to collect information about PostgreSQL user objects.
+
+ Args:
+ module (AnsibleModule): Object of AnsibleModule class.
+ cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
+
+ Attributes:
+ module (AnsibleModule): Object of AnsibleModule class.
+ cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
+ executed_queries (list): List of executed queries.
+ info (dict): Statistics dictionary.
+ obj_func_mapping (dict): Mapping of object types to corresponding functions.
+ schema (str): Name of a schema to restrict stat collecting.
+ """
+
+ def __init__(self, module, cursor):
+ self.module = module
+ self.cursor = cursor
+ self.info = {
+ 'functions': {},
+ 'indexes': {},
+ 'tables': {},
+ }
+ self.obj_func_mapping = {
+ 'functions': self.get_func_stat,
+ 'indexes': self.get_idx_stat,
+ 'tables': self.get_tbl_stat,
+ }
+ self.schema = None
+
+ def collect(self, filter_=None, schema=None):
+ """Collect statistics information of user objects.
+
+ Kwargs:
+ filter_ (list): List of subsets which need to be collected.
+ schema (str): Restrict stat collecting by certain schema.
+
+ Returns:
+ ``self.info``.
+ """
+ if schema:
+ self.set_schema(schema)
+
+ if filter_:
+ for obj_type in filter_:
+ obj_type = obj_type.strip()
+ obj_func = self.obj_func_mapping.get(obj_type)
+
+ if obj_func is not None:
+ obj_func()
+ else:
+ self.module.warn("Unknown filter option '%s'" % obj_type)
+
+ else:
+ for obj_func in self.obj_func_mapping.values():
+ obj_func()
+
+ return self.info
+
+ def get_func_stat(self):
+ """Get function statistics and fill out self.info dictionary."""
+ query = "SELECT * FROM pg_stat_user_functions"
+ if self.schema:
+ query = "SELECT * FROM pg_stat_user_functions WHERE schemaname = %s"
+
+ result = exec_sql(self, query, query_params=(self.schema,),
+ add_to_executed=False)
+
+ if not result:
+ return
+
+ self.__fill_out_info(result,
+ info_key='functions',
+ schema_key='schemaname',
+ name_key='funcname')
+
+ def get_idx_stat(self):
+ """Get index statistics and fill out self.info dictionary."""
+ query = "SELECT * FROM pg_stat_user_indexes"
+ if self.schema:
+ query = "SELECT * FROM pg_stat_user_indexes WHERE schemaname = %s"
+
+ result = exec_sql(self, query, query_params=(self.schema,),
+ add_to_executed=False)
+
+ if not result:
+ return
+
+ self.__fill_out_info(result,
+ info_key='indexes',
+ schema_key='schemaname',
+ name_key='indexrelname')
+
+ def get_tbl_stat(self):
+ """Get table statistics and fill out self.info dictionary."""
+ query = "SELECT * FROM pg_stat_user_tables"
+ if self.schema:
+ query = "SELECT * FROM pg_stat_user_tables WHERE schemaname = %s"
+
+ result = exec_sql(self, query, query_params=(self.schema,),
+ add_to_executed=False)
+
+ if not result:
+ return
+
+ self.__fill_out_info(result,
+ info_key='tables',
+ schema_key='schemaname',
+ name_key='relname')
+
+ def __fill_out_info(self, result, info_key=None, schema_key=None, name_key=None):
+ # Convert result to list of dicts to handle it easier:
+ result = [dict(row) for row in result]
+
+ for elem in result:
+ # Add schema name as a key if not presented:
+ if not self.info[info_key].get(elem[schema_key]):
+ self.info[info_key][elem[schema_key]] = {}
+
+ # Add object name key as a subkey
+ # (they must be uniq over a schema, so no need additional checks):
+ self.info[info_key][elem[schema_key]][elem[name_key]] = {}
+
+ # Add other other attributes to a certain index:
+ for key, val in iteritems(elem):
+ if key not in (schema_key, name_key):
+ self.info[info_key][elem[schema_key]][elem[name_key]][key] = val
+
+ if info_key in ('tables', 'indexes'):
+ schemaname = elem[schema_key]
+ if self.schema:
+ schemaname = self.schema
+
+ relname = '%s.%s' % (schemaname, elem[name_key])
+
+ result = exec_sql(self, "SELECT pg_relation_size (%s)",
+ query_params=(relname,),
+ add_to_executed=False)
+
+ self.info[info_key][elem[schema_key]][elem[name_key]]['size'] = result[0][0]
+
+ if info_key == 'tables':
+ result = exec_sql(self, "SELECT pg_total_relation_size (%s)",
+ query_params=(relname,),
+ add_to_executed=False)
+
+ self.info[info_key][elem[schema_key]][elem[name_key]]['total_size'] = result[0][0]
+
+ def set_schema(self, schema):
+ """If schema exists, sets self.schema, otherwise fails."""
+ query = ("SELECT 1 FROM information_schema.schemata "
+ "WHERE schema_name = %s")
+ result = exec_sql(self, query, query_params=(schema,),
+ add_to_executed=False)
+
+ if result and result[0][0]:
+ self.schema = schema
+ else:
+ self.module.fail_json(msg="Schema '%s' does not exist" % (schema))
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ db=dict(type='str', aliases=['login_db']),
+ filter=dict(type='list', elements='str'),
+ session_role=dict(type='str'),
+ schema=dict(type='str'),
+ trust_input=dict(type="bool", default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ filter_ = module.params["filter"]
+ schema = module.params["schema"]
+
+ if not module.params["trust_input"]:
+ check_input(module, module.params['session_role'])
+
+ # Connect to DB and make cursor object:
+ pg_conn_params = get_conn_params(module, module.params)
+ # We don't need to commit anything, so, set it to False:
+ db_connection = connect_to_db(module, pg_conn_params, autocommit=False)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ ############################
+ # Create object and do work:
+ pg_obj_info = PgUserObjStatInfo(module, cursor)
+
+ info_dict = pg_obj_info.collect(filter_, schema)
+
+ # Clean up:
+ cursor.close()
+ db_connection.close()
+
+ # Return information:
+ module.exit_json(**info_dict)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_configuration.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_configuration.py
new file mode 100644
index 00000000..3d0788e6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_configuration.py
@@ -0,0 +1,196 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: vertica_configuration
+short_description: Updates Vertica configuration parameters.
+description:
+ - Updates Vertica configuration parameters.
+options:
+ name:
+ description:
+ - Name of the parameter to update.
+ required: true
+ aliases: [parameter]
+ type: str
+ value:
+ description:
+ - Value of the parameter to be set.
+ type: str
+ db:
+ description:
+ - Name of the Vertica database.
+ type: str
+ cluster:
+ description:
+ - Name of the Vertica cluster.
+ default: localhost
+ type: str
+ port:
+ description:
+ - Vertica cluster port to connect to.
+ default: '5433'
+ type: str
+ login_user:
+ description:
+ - The username used to authenticate with.
+ default: dbadmin
+ type: str
+ login_password:
+ description:
+ - The password used to authenticate with.
+ type: str
+notes:
+ - The default authentication assumes that you are either logging in as or sudo'ing
+ to the C(dbadmin) account on the host.
+ - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
+ that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
+ - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
+ to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
+ and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
+ to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
+requirements: [ 'unixODBC', 'pyodbc' ]
+author: "Dariusz Owczarek (@dareko)"
+'''
+
+EXAMPLES = """
+- name: Updating load_balance_policy
+ community.general.vertica_configuration: name=failovertostandbyafter value='8 hours'
+"""
+import traceback
+
+PYODBC_IMP_ERR = None
+try:
+ import pyodbc
+except ImportError:
+ PYODBC_IMP_ERR = traceback.format_exc()
+ pyodbc_found = False
+else:
+ pyodbc_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+class NotSupportedError(Exception):
+ pass
+
+
+class CannotDropError(Exception):
+ pass
+
+# module specific functions
+
+
+def get_configuration_facts(cursor, parameter_name=''):
+ facts = {}
+ cursor.execute("""
+ select c.parameter_name, c.current_value, c.default_value
+ from configuration_parameters c
+ where c.node_name = 'ALL'
+ and (? = '' or c.parameter_name ilike ?)
+ """, parameter_name, parameter_name)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ facts[row.parameter_name.lower()] = {
+ 'parameter_name': row.parameter_name,
+ 'current_value': row.current_value,
+ 'default_value': row.default_value}
+ return facts
+
+
+def check(configuration_facts, parameter_name, current_value):
+ parameter_key = parameter_name.lower()
+ if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower():
+ return False
+ return True
+
+
+def present(configuration_facts, cursor, parameter_name, current_value):
+ parameter_key = parameter_name.lower()
+ changed = False
+ if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower():
+ cursor.execute("select set_config_parameter('{0}', '{1}')".format(parameter_name, current_value))
+ changed = True
+ if changed:
+ configuration_facts.update(get_configuration_facts(cursor, parameter_name))
+ return changed
+
+# module logic
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ parameter=dict(required=True, aliases=['name']),
+ value=dict(default=None),
+ db=dict(default=None),
+ cluster=dict(default='localhost'),
+ port=dict(default='5433'),
+ login_user=dict(default='dbadmin'),
+ login_password=dict(default=None, no_log=True),
+ ), supports_check_mode=True)
+
+ if not pyodbc_found:
+ module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR)
+
+ parameter_name = module.params['parameter']
+ current_value = module.params['value']
+ db = ''
+ if module.params['db']:
+ db = module.params['db']
+
+ changed = False
+
+ try:
+ dsn = (
+ "Driver=Vertica;"
+ "Server={0};"
+ "Port={1};"
+ "Database={2};"
+ "User={3};"
+ "Password={4};"
+ "ConnectionLoadBalance={5}"
+ ).format(module.params['cluster'], module.params['port'], db,
+ module.params['login_user'], module.params['login_password'], 'true')
+ db_conn = pyodbc.connect(dsn, autocommit=True)
+ cursor = db_conn.cursor()
+ except Exception as e:
+ module.fail_json(msg="Unable to connect to database: {0}.".format(to_native(e)),
+ exception=traceback.format_exc())
+
+ try:
+ configuration_facts = get_configuration_facts(cursor)
+ if module.check_mode:
+ changed = not check(configuration_facts, parameter_name, current_value)
+ else:
+ try:
+ changed = present(configuration_facts, cursor, parameter_name, current_value)
+ except pyodbc.Error as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except NotSupportedError as e:
+ module.fail_json(msg=to_native(e), ansible_facts={'vertica_configuration': configuration_facts})
+ except CannotDropError as e:
+ module.fail_json(msg=to_native(e), ansible_facts={'vertica_configuration': configuration_facts})
+ except SystemExit:
+ # avoid catching this on python 2.4
+ raise
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=changed, parameter=parameter_name, ansible_facts={'vertica_configuration': configuration_facts})
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_facts.py
new file mode 100644
index 00000000..a5741719
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_facts.py
@@ -0,0 +1,291 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: vertica_info
+short_description: Gathers Vertica database facts.
+description:
+ - Gathers Vertica database information.
+ - This module was called C(vertica_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.vertica_info) module no longer returns C(ansible_facts)!
+options:
+ cluster:
+ description:
+ - Name of the cluster running the schema.
+ default: localhost
+ port:
+ description:
+ Database port to connect to.
+ default: 5433
+ db:
+ description:
+ - Name of the database running the schema.
+ login_user:
+ description:
+ - The username used to authenticate with.
+ default: dbadmin
+ login_password:
+ description:
+ - The password used to authenticate with.
+notes:
+ - The default authentication assumes that you are either logging in as or sudo'ing
+ to the C(dbadmin) account on the host.
+ - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
+ that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
+ - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
+ to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
+ and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
+ to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
+requirements: [ 'unixODBC', 'pyodbc' ]
+author: "Dariusz Owczarek (@dareko)"
+'''
+
+EXAMPLES = """
+- name: Gathering vertica facts
+ community.general.vertica_info: db=db_name
+ register: result
+
+- name: Print schemas
+ ansible.builtin.debug:
+ msg: "{{ result.vertica_schemas }}"
+"""
+import traceback
+
+PYODBC_IMP_ERR = None
+try:
+ import pyodbc
+except ImportError:
+ PYODBC_IMP_ERR = traceback.format_exc()
+ pyodbc_found = False
+else:
+ pyodbc_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+class NotSupportedError(Exception):
+ pass
+
+# module specific functions
+
+
+def get_schema_facts(cursor, schema=''):
+ facts = {}
+ cursor.execute("""
+ select schema_name, schema_owner, create_time
+ from schemata
+ where not is_system_schema and schema_name not in ('public')
+ and (? = '' or schema_name ilike ?)
+ """, schema, schema)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ facts[row.schema_name.lower()] = {
+ 'name': row.schema_name,
+ 'owner': row.schema_owner,
+ 'create_time': str(row.create_time),
+ 'usage_roles': [],
+ 'create_roles': []}
+ cursor.execute("""
+ select g.object_name as schema_name, r.name as role_name,
+ lower(g.privileges_description) privileges_description
+ from roles r join grants g
+ on g.grantee = r.name and g.object_type='SCHEMA'
+ and g.privileges_description like '%USAGE%'
+ and g.grantee not in ('public', 'dbadmin')
+ and (? = '' or g.object_name ilike ?)
+ """, schema, schema)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ schema_key = row.schema_name.lower()
+ if 'create' in row.privileges_description:
+ facts[schema_key]['create_roles'].append(row.role_name)
+ else:
+ facts[schema_key]['usage_roles'].append(row.role_name)
+ return facts
+
+
+def get_user_facts(cursor, user=''):
+ facts = {}
+ cursor.execute("""
+ select u.user_name, u.is_locked, u.lock_time,
+ p.password, p.acctexpired as is_expired,
+ u.profile_name, u.resource_pool,
+ u.all_roles, u.default_roles
+ from users u join password_auditor p on p.user_id = u.user_id
+ where not u.is_super_user
+ and (? = '' or u.user_name ilike ?)
+ """, user, user)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ user_key = row.user_name.lower()
+ facts[user_key] = {
+ 'name': row.user_name,
+ 'locked': str(row.is_locked),
+ 'password': row.password,
+ 'expired': str(row.is_expired),
+ 'profile': row.profile_name,
+ 'resource_pool': row.resource_pool,
+ 'roles': [],
+ 'default_roles': []}
+ if row.is_locked:
+ facts[user_key]['locked_time'] = str(row.lock_time)
+ if row.all_roles:
+ facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',')
+ if row.default_roles:
+ facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',')
+ return facts
+
+
+def get_role_facts(cursor, role=''):
+ facts = {}
+ cursor.execute("""
+ select r.name, r.assigned_roles
+ from roles r
+ where (? = '' or r.name ilike ?)
+ """, role, role)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ role_key = row.name.lower()
+ facts[role_key] = {
+ 'name': row.name,
+ 'assigned_roles': []}
+ if row.assigned_roles:
+ facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',')
+ return facts
+
+
+def get_configuration_facts(cursor, parameter=''):
+ facts = {}
+ cursor.execute("""
+ select c.parameter_name, c.current_value, c.default_value
+ from configuration_parameters c
+ where c.node_name = 'ALL'
+ and (? = '' or c.parameter_name ilike ?)
+ """, parameter, parameter)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ facts[row.parameter_name.lower()] = {
+ 'parameter_name': row.parameter_name,
+ 'current_value': row.current_value,
+ 'default_value': row.default_value}
+ return facts
+
+
+def get_node_facts(cursor, schema=''):
+ facts = {}
+ cursor.execute("""
+ select node_name, node_address, export_address, node_state, node_type,
+ catalog_path
+ from nodes
+ """)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ facts[row.node_address] = {
+ 'node_name': row.node_name,
+ 'export_address': row.export_address,
+ 'node_state': row.node_state,
+ 'node_type': row.node_type,
+ 'catalog_path': row.catalog_path}
+ return facts
+
+# module logic
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ cluster=dict(default='localhost'),
+ port=dict(default='5433'),
+ db=dict(default=None),
+ login_user=dict(default='dbadmin'),
+ login_password=dict(default=None, no_log=True),
+ ), supports_check_mode=True)
+ is_old_facts = module._name in ('vertica_facts', 'community.general.vertica_facts')
+ if is_old_facts:
+ module.deprecate("The 'vertica_facts' module has been renamed to 'vertica_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ if not pyodbc_found:
+ module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR)
+
+ db = ''
+ if module.params['db']:
+ db = module.params['db']
+
+ try:
+ dsn = (
+ "Driver=Vertica;"
+ "Server=%s;"
+ "Port=%s;"
+ "Database=%s;"
+ "User=%s;"
+ "Password=%s;"
+ "ConnectionLoadBalance=%s"
+ ) % (module.params['cluster'], module.params['port'], db,
+ module.params['login_user'], module.params['login_password'], 'true')
+ db_conn = pyodbc.connect(dsn, autocommit=True)
+ cursor = db_conn.cursor()
+ except Exception as e:
+ module.fail_json(msg="Unable to connect to database: %s." % to_native(e), exception=traceback.format_exc())
+
+ try:
+ schema_facts = get_schema_facts(cursor)
+ user_facts = get_user_facts(cursor)
+ role_facts = get_role_facts(cursor)
+ configuration_facts = get_configuration_facts(cursor)
+ node_facts = get_node_facts(cursor)
+
+ if is_old_facts:
+ module.exit_json(changed=False,
+ ansible_facts={'vertica_schemas': schema_facts,
+ 'vertica_users': user_facts,
+ 'vertica_roles': role_facts,
+ 'vertica_configuration': configuration_facts,
+ 'vertica_nodes': node_facts})
+ else:
+ module.exit_json(changed=False,
+ vertica_schemas=schema_facts,
+ vertica_users=user_facts,
+ vertica_roles=role_facts,
+ vertica_configuration=configuration_facts,
+ vertica_nodes=node_facts)
+ except NotSupportedError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except SystemExit:
+ # avoid catching this on python 2.4
+ raise
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_info.py
new file mode 100644
index 00000000..a5741719
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_info.py
@@ -0,0 +1,291 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: vertica_info
+short_description: Gathers Vertica database facts.
+description:
+ - Gathers Vertica database information.
+ - This module was called C(vertica_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.vertica_info) module no longer returns C(ansible_facts)!
+options:
+ cluster:
+ description:
+ - Name of the cluster running the schema.
+ default: localhost
+ port:
+ description:
+ Database port to connect to.
+ default: 5433
+ db:
+ description:
+ - Name of the database running the schema.
+ login_user:
+ description:
+ - The username used to authenticate with.
+ default: dbadmin
+ login_password:
+ description:
+ - The password used to authenticate with.
+notes:
+ - The default authentication assumes that you are either logging in as or sudo'ing
+ to the C(dbadmin) account on the host.
+ - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
+ that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
+ - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
+ to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
+ and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
+ to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
+requirements: [ 'unixODBC', 'pyodbc' ]
+author: "Dariusz Owczarek (@dareko)"
+'''
+
+EXAMPLES = """
+- name: Gathering vertica facts
+ community.general.vertica_info: db=db_name
+ register: result
+
+- name: Print schemas
+ ansible.builtin.debug:
+ msg: "{{ result.vertica_schemas }}"
+"""
+import traceback
+
+PYODBC_IMP_ERR = None
+try:
+ import pyodbc
+except ImportError:
+ PYODBC_IMP_ERR = traceback.format_exc()
+ pyodbc_found = False
+else:
+ pyodbc_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+class NotSupportedError(Exception):
+ pass
+
+# module specific functions
+
+
+def get_schema_facts(cursor, schema=''):
+ facts = {}
+ cursor.execute("""
+ select schema_name, schema_owner, create_time
+ from schemata
+ where not is_system_schema and schema_name not in ('public')
+ and (? = '' or schema_name ilike ?)
+ """, schema, schema)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ facts[row.schema_name.lower()] = {
+ 'name': row.schema_name,
+ 'owner': row.schema_owner,
+ 'create_time': str(row.create_time),
+ 'usage_roles': [],
+ 'create_roles': []}
+ cursor.execute("""
+ select g.object_name as schema_name, r.name as role_name,
+ lower(g.privileges_description) privileges_description
+ from roles r join grants g
+ on g.grantee = r.name and g.object_type='SCHEMA'
+ and g.privileges_description like '%USAGE%'
+ and g.grantee not in ('public', 'dbadmin')
+ and (? = '' or g.object_name ilike ?)
+ """, schema, schema)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ schema_key = row.schema_name.lower()
+ if 'create' in row.privileges_description:
+ facts[schema_key]['create_roles'].append(row.role_name)
+ else:
+ facts[schema_key]['usage_roles'].append(row.role_name)
+ return facts
+
+
+def get_user_facts(cursor, user=''):
+ facts = {}
+ cursor.execute("""
+ select u.user_name, u.is_locked, u.lock_time,
+ p.password, p.acctexpired as is_expired,
+ u.profile_name, u.resource_pool,
+ u.all_roles, u.default_roles
+ from users u join password_auditor p on p.user_id = u.user_id
+ where not u.is_super_user
+ and (? = '' or u.user_name ilike ?)
+ """, user, user)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ user_key = row.user_name.lower()
+ facts[user_key] = {
+ 'name': row.user_name,
+ 'locked': str(row.is_locked),
+ 'password': row.password,
+ 'expired': str(row.is_expired),
+ 'profile': row.profile_name,
+ 'resource_pool': row.resource_pool,
+ 'roles': [],
+ 'default_roles': []}
+ if row.is_locked:
+ facts[user_key]['locked_time'] = str(row.lock_time)
+ if row.all_roles:
+ facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',')
+ if row.default_roles:
+ facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',')
+ return facts
+
+
+def get_role_facts(cursor, role=''):
+ facts = {}
+ cursor.execute("""
+ select r.name, r.assigned_roles
+ from roles r
+ where (? = '' or r.name ilike ?)
+ """, role, role)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ role_key = row.name.lower()
+ facts[role_key] = {
+ 'name': row.name,
+ 'assigned_roles': []}
+ if row.assigned_roles:
+ facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',')
+ return facts
+
+
+def get_configuration_facts(cursor, parameter=''):
+ facts = {}
+ cursor.execute("""
+ select c.parameter_name, c.current_value, c.default_value
+ from configuration_parameters c
+ where c.node_name = 'ALL'
+ and (? = '' or c.parameter_name ilike ?)
+ """, parameter, parameter)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ facts[row.parameter_name.lower()] = {
+ 'parameter_name': row.parameter_name,
+ 'current_value': row.current_value,
+ 'default_value': row.default_value}
+ return facts
+
+
+def get_node_facts(cursor, schema=''):
+ facts = {}
+ cursor.execute("""
+ select node_name, node_address, export_address, node_state, node_type,
+ catalog_path
+ from nodes
+ """)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ facts[row.node_address] = {
+ 'node_name': row.node_name,
+ 'export_address': row.export_address,
+ 'node_state': row.node_state,
+ 'node_type': row.node_type,
+ 'catalog_path': row.catalog_path}
+ return facts
+
+# module logic
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ cluster=dict(default='localhost'),
+ port=dict(default='5433'),
+ db=dict(default=None),
+ login_user=dict(default='dbadmin'),
+ login_password=dict(default=None, no_log=True),
+ ), supports_check_mode=True)
+ is_old_facts = module._name in ('vertica_facts', 'community.general.vertica_facts')
+ if is_old_facts:
+ module.deprecate("The 'vertica_facts' module has been renamed to 'vertica_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ if not pyodbc_found:
+ module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR)
+
+ db = ''
+ if module.params['db']:
+ db = module.params['db']
+
+ try:
+ dsn = (
+ "Driver=Vertica;"
+ "Server=%s;"
+ "Port=%s;"
+ "Database=%s;"
+ "User=%s;"
+ "Password=%s;"
+ "ConnectionLoadBalance=%s"
+ ) % (module.params['cluster'], module.params['port'], db,
+ module.params['login_user'], module.params['login_password'], 'true')
+ db_conn = pyodbc.connect(dsn, autocommit=True)
+ cursor = db_conn.cursor()
+ except Exception as e:
+ module.fail_json(msg="Unable to connect to database: %s." % to_native(e), exception=traceback.format_exc())
+
+ try:
+ schema_facts = get_schema_facts(cursor)
+ user_facts = get_user_facts(cursor)
+ role_facts = get_role_facts(cursor)
+ configuration_facts = get_configuration_facts(cursor)
+ node_facts = get_node_facts(cursor)
+
+ if is_old_facts:
+ module.exit_json(changed=False,
+ ansible_facts={'vertica_schemas': schema_facts,
+ 'vertica_users': user_facts,
+ 'vertica_roles': role_facts,
+ 'vertica_configuration': configuration_facts,
+ 'vertica_nodes': node_facts})
+ else:
+ module.exit_json(changed=False,
+ vertica_schemas=schema_facts,
+ vertica_users=user_facts,
+ vertica_roles=role_facts,
+ vertica_configuration=configuration_facts,
+ vertica_nodes=node_facts)
+ except NotSupportedError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except SystemExit:
+ # avoid catching this on python 2.4
+ raise
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_role.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_role.py
new file mode 100644
index 00000000..bba411d0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_role.py
@@ -0,0 +1,237 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: vertica_role
+short_description: Adds or removes Vertica database roles and assigns roles to them.
+description:
+ - Adds or removes Vertica database role and, optionally, assign other roles.
+options:
+ name:
+ description:
+ - Name of the role to add or remove.
+ required: true
+ assigned_roles:
+ description:
+ - Comma separated list of roles to assign to the role.
+ aliases: ['assigned_role']
+ state:
+ description:
+ - Whether to create C(present), drop C(absent) or lock C(locked) a role.
+ choices: ['present', 'absent']
+ default: present
+ db:
+ description:
+ - Name of the Vertica database.
+ cluster:
+ description:
+ - Name of the Vertica cluster.
+ default: localhost
+ port:
+ description:
+ - Vertica cluster port to connect to.
+ default: 5433
+ login_user:
+ description:
+ - The username used to authenticate with.
+ default: dbadmin
+ login_password:
+ description:
+ - The password used to authenticate with.
+notes:
+ - The default authentication assumes that you are either logging in as or sudo'ing
+ to the C(dbadmin) account on the host.
+ - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
+ that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
+ - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
+ to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
+ and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
+ to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
+requirements: [ 'unixODBC', 'pyodbc' ]
+author: "Dariusz Owczarek (@dareko)"
+'''
+
+EXAMPLES = """
+- name: Creating a new vertica role
+ community.general.vertica_role: name=role_name db=db_name state=present
+
+- name: Creating a new vertica role with other role assigned
+ community.general.vertica_role: name=role_name assigned_role=other_role_name state=present
+"""
+import traceback
+
+PYODBC_IMP_ERR = None
+try:
+ import pyodbc
+except ImportError:
+ PYODBC_IMP_ERR = traceback.format_exc()
+ pyodbc_found = False
+else:
+ pyodbc_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+class NotSupportedError(Exception):
+ pass
+
+
+class CannotDropError(Exception):
+ pass
+
+# module specific functions
+
+
+def get_role_facts(cursor, role=''):
+ facts = {}
+ cursor.execute("""
+ select r.name, r.assigned_roles
+ from roles r
+ where (? = '' or r.name ilike ?)
+ """, role, role)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ role_key = row.name.lower()
+ facts[role_key] = {
+ 'name': row.name,
+ 'assigned_roles': []}
+ if row.assigned_roles:
+ facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',')
+ return facts
+
+
+def update_roles(role_facts, cursor, role,
+ existing, required):
+ for assigned_role in set(existing) - set(required):
+ cursor.execute("revoke {0} from {1}".format(assigned_role, role))
+ for assigned_role in set(required) - set(existing):
+ cursor.execute("grant {0} to {1}".format(assigned_role, role))
+
+
+def check(role_facts, role, assigned_roles):
+ role_key = role.lower()
+ if role_key not in role_facts:
+ return False
+ if assigned_roles and sorted(assigned_roles) != sorted(role_facts[role_key]['assigned_roles']):
+ return False
+ return True
+
+
+def present(role_facts, cursor, role, assigned_roles):
+ role_key = role.lower()
+ if role_key not in role_facts:
+ cursor.execute("create role {0}".format(role))
+ update_roles(role_facts, cursor, role, [], assigned_roles)
+ role_facts.update(get_role_facts(cursor, role))
+ return True
+ else:
+ changed = False
+ if assigned_roles and (sorted(assigned_roles) != sorted(role_facts[role_key]['assigned_roles'])):
+ update_roles(role_facts, cursor, role,
+ role_facts[role_key]['assigned_roles'], assigned_roles)
+ changed = True
+ if changed:
+ role_facts.update(get_role_facts(cursor, role))
+ return changed
+
+
+def absent(role_facts, cursor, role, assigned_roles):
+ role_key = role.lower()
+ if role_key in role_facts:
+ update_roles(role_facts, cursor, role,
+ role_facts[role_key]['assigned_roles'], [])
+ cursor.execute("drop role {0} cascade".format(role_facts[role_key]['name']))
+ del role_facts[role_key]
+ return True
+ else:
+ return False
+
+# module logic
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ role=dict(required=True, aliases=['name']),
+ assigned_roles=dict(default=None, aliases=['assigned_role']),
+ state=dict(default='present', choices=['absent', 'present']),
+ db=dict(default=None),
+ cluster=dict(default='localhost'),
+ port=dict(default='5433'),
+ login_user=dict(default='dbadmin'),
+ login_password=dict(default=None, no_log=True),
+ ), supports_check_mode=True)
+
+ if not pyodbc_found:
+ module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR)
+
+ role = module.params['role']
+ assigned_roles = []
+ if module.params['assigned_roles']:
+ assigned_roles = module.params['assigned_roles'].split(',')
+ assigned_roles = filter(None, assigned_roles)
+ state = module.params['state']
+ db = ''
+ if module.params['db']:
+ db = module.params['db']
+
+ changed = False
+
+ try:
+ dsn = (
+ "Driver=Vertica;"
+ "Server={0};"
+ "Port={1};"
+ "Database={2};"
+ "User={3};"
+ "Password={4};"
+ "ConnectionLoadBalance={5}"
+ ).format(module.params['cluster'], module.params['port'], db,
+ module.params['login_user'], module.params['login_password'], 'true')
+ db_conn = pyodbc.connect(dsn, autocommit=True)
+ cursor = db_conn.cursor()
+ except Exception as e:
+ module.fail_json(msg="Unable to connect to database: {0}.".format(to_native(e)))
+
+ try:
+ role_facts = get_role_facts(cursor)
+ if module.check_mode:
+ changed = not check(role_facts, role, assigned_roles)
+ elif state == 'absent':
+ try:
+ changed = absent(role_facts, cursor, role, assigned_roles)
+ except pyodbc.Error as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ elif state == 'present':
+ try:
+ changed = present(role_facts, cursor, role, assigned_roles)
+ except pyodbc.Error as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except NotSupportedError as e:
+ module.fail_json(msg=to_native(e), ansible_facts={'vertica_roles': role_facts})
+ except CannotDropError as e:
+ module.fail_json(msg=to_native(e), ansible_facts={'vertica_roles': role_facts})
+ except SystemExit:
+ # avoid catching this on python 2.4
+ raise
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=changed, role=role, ansible_facts={'vertica_roles': role_facts})
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_schema.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_schema.py
new file mode 100644
index 00000000..424de564
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_schema.py
@@ -0,0 +1,308 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: vertica_schema
+short_description: Adds or removes Vertica database schema and roles.
+description:
+ - Adds or removes Vertica database schema and, optionally, roles
+ with schema access privileges.
+ - A schema will not be removed until all the objects have been dropped.
+ - In such a situation, if the module tries to remove the schema it
+ will fail and only remove roles created for the schema if they have
+ no dependencies.
+options:
+ name:
+ description:
+ - Name of the schema to add or remove.
+ required: true
+ usage_roles:
+ description:
+ - Comma separated list of roles to create and grant usage access to the schema.
+ aliases: ['usage_role']
+ create_roles:
+ description:
+ - Comma separated list of roles to create and grant usage and create access to the schema.
+ aliases: ['create_role']
+ owner:
+ description:
+ - Name of the user to set as owner of the schema.
+ state:
+ description:
+ - Whether to create C(present), or drop C(absent) a schema.
+ default: present
+ choices: ['present', 'absent']
+ db:
+ description:
+ - Name of the Vertica database.
+ cluster:
+ description:
+ - Name of the Vertica cluster.
+ default: localhost
+ port:
+ description:
+ - Vertica cluster port to connect to.
+ default: 5433
+ login_user:
+ description:
+ - The username used to authenticate with.
+ default: dbadmin
+ login_password:
+ description:
+ - The password used to authenticate with.
+notes:
+ - The default authentication assumes that you are either logging in as or sudo'ing
+ to the C(dbadmin) account on the host.
+ - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
+ that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
+ - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
+ to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
+ and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
+ to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
+requirements: [ 'unixODBC', 'pyodbc' ]
+author: "Dariusz Owczarek (@dareko)"
+'''
+
+EXAMPLES = """
+- name: Creating a new vertica schema
+ community.general.vertica_schema: name=schema_name db=db_name state=present
+
+- name: Creating a new schema with specific schema owner
+ community.general.vertica_schema: name=schema_name owner=dbowner db=db_name state=present
+
+- name: Creating a new schema with roles
+ community.general.vertica_schema:
+ name=schema_name
+ create_roles=schema_name_all
+ usage_roles=schema_name_ro,schema_name_rw
+ db=db_name
+ state=present
+"""
+import traceback
+
+PYODBC_IMP_ERR = None
+try:
+ import pyodbc
+except ImportError:
+ PYODBC_IMP_ERR = traceback.format_exc()
+ pyodbc_found = False
+else:
+ pyodbc_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+class NotSupportedError(Exception):
+ pass
+
+
+class CannotDropError(Exception):
+ pass
+
+# module specific functions
+
+
+def get_schema_facts(cursor, schema=''):
+ facts = {}
+ cursor.execute("""
+ select schema_name, schema_owner, create_time
+ from schemata
+ where not is_system_schema and schema_name not in ('public', 'TxtIndex')
+ and (? = '' or schema_name ilike ?)
+ """, schema, schema)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ facts[row.schema_name.lower()] = {
+ 'name': row.schema_name,
+ 'owner': row.schema_owner,
+ 'create_time': str(row.create_time),
+ 'usage_roles': [],
+ 'create_roles': []}
+ cursor.execute("""
+ select g.object_name as schema_name, r.name as role_name,
+ lower(g.privileges_description) privileges_description
+ from roles r join grants g
+ on g.grantee_id = r.role_id and g.object_type='SCHEMA'
+ and g.privileges_description like '%USAGE%'
+ and g.grantee not in ('public', 'dbadmin')
+ and (? = '' or g.object_name ilike ?)
+ """, schema, schema)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ schema_key = row.schema_name.lower()
+ if 'create' in row.privileges_description:
+ facts[schema_key]['create_roles'].append(row.role_name)
+ else:
+ facts[schema_key]['usage_roles'].append(row.role_name)
+ return facts
+
+
+def update_roles(schema_facts, cursor, schema,
+ existing, required,
+ create_existing, create_required):
+ for role in set(existing + create_existing) - set(required + create_required):
+ cursor.execute("drop role {0} cascade".format(role))
+ for role in set(create_existing) - set(create_required):
+ cursor.execute("revoke create on schema {0} from {1}".format(schema, role))
+ for role in set(required + create_required) - set(existing + create_existing):
+ cursor.execute("create role {0}".format(role))
+ cursor.execute("grant usage on schema {0} to {1}".format(schema, role))
+ for role in set(create_required) - set(create_existing):
+ cursor.execute("grant create on schema {0} to {1}".format(schema, role))
+
+
+def check(schema_facts, schema, usage_roles, create_roles, owner):
+ schema_key = schema.lower()
+ if schema_key not in schema_facts:
+ return False
+ if owner and owner.lower() == schema_facts[schema_key]['owner'].lower():
+ return False
+ if sorted(usage_roles) != sorted(schema_facts[schema_key]['usage_roles']):
+ return False
+ if sorted(create_roles) != sorted(schema_facts[schema_key]['create_roles']):
+ return False
+ return True
+
+
+def present(schema_facts, cursor, schema, usage_roles, create_roles, owner):
+ schema_key = schema.lower()
+ if schema_key not in schema_facts:
+ query_fragments = ["create schema {0}".format(schema)]
+ if owner:
+ query_fragments.append("authorization {0}".format(owner))
+ cursor.execute(' '.join(query_fragments))
+ update_roles(schema_facts, cursor, schema, [], usage_roles, [], create_roles)
+ schema_facts.update(get_schema_facts(cursor, schema))
+ return True
+ else:
+ changed = False
+ if owner and owner.lower() != schema_facts[schema_key]['owner'].lower():
+ raise NotSupportedError((
+ "Changing schema owner is not supported. "
+ "Current owner: {0}."
+ ).format(schema_facts[schema_key]['owner']))
+ if sorted(usage_roles) != sorted(schema_facts[schema_key]['usage_roles']) or \
+ sorted(create_roles) != sorted(schema_facts[schema_key]['create_roles']):
+
+ update_roles(schema_facts, cursor, schema,
+ schema_facts[schema_key]['usage_roles'], usage_roles,
+ schema_facts[schema_key]['create_roles'], create_roles)
+ changed = True
+ if changed:
+ schema_facts.update(get_schema_facts(cursor, schema))
+ return changed
+
+
+def absent(schema_facts, cursor, schema, usage_roles, create_roles):
+ schema_key = schema.lower()
+ if schema_key in schema_facts:
+ update_roles(schema_facts, cursor, schema,
+ schema_facts[schema_key]['usage_roles'], [], schema_facts[schema_key]['create_roles'], [])
+ try:
+ cursor.execute("drop schema {0} restrict".format(schema_facts[schema_key]['name']))
+ except pyodbc.Error:
+ raise CannotDropError("Dropping schema failed due to dependencies.")
+ del schema_facts[schema_key]
+ return True
+ else:
+ return False
+
+# module logic
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ schema=dict(required=True, aliases=['name']),
+ usage_roles=dict(default=None, aliases=['usage_role']),
+ create_roles=dict(default=None, aliases=['create_role']),
+ owner=dict(default=None),
+ state=dict(default='present', choices=['absent', 'present']),
+ db=dict(default=None),
+ cluster=dict(default='localhost'),
+ port=dict(default='5433'),
+ login_user=dict(default='dbadmin'),
+ login_password=dict(default=None, no_log=True),
+ ), supports_check_mode=True)
+
+ if not pyodbc_found:
+ module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR)
+
+ schema = module.params['schema']
+ usage_roles = []
+ if module.params['usage_roles']:
+ usage_roles = module.params['usage_roles'].split(',')
+ usage_roles = filter(None, usage_roles)
+ create_roles = []
+ if module.params['create_roles']:
+ create_roles = module.params['create_roles'].split(',')
+ create_roles = filter(None, create_roles)
+ owner = module.params['owner']
+ state = module.params['state']
+ db = ''
+ if module.params['db']:
+ db = module.params['db']
+
+ changed = False
+
+ try:
+ dsn = (
+ "Driver=Vertica;"
+ "Server={0};"
+ "Port={1};"
+ "Database={2};"
+ "User={3};"
+ "Password={4};"
+ "ConnectionLoadBalance={5}"
+ ).format(module.params['cluster'], module.params['port'], db,
+ module.params['login_user'], module.params['login_password'], 'true')
+ db_conn = pyodbc.connect(dsn, autocommit=True)
+ cursor = db_conn.cursor()
+ except Exception as e:
+ module.fail_json(msg="Unable to connect to database: {0}.".format(to_native(e)))
+
+ try:
+ schema_facts = get_schema_facts(cursor)
+ if module.check_mode:
+ changed = not check(schema_facts, schema, usage_roles, create_roles, owner)
+ elif state == 'absent':
+ try:
+ changed = absent(schema_facts, cursor, schema, usage_roles, create_roles)
+ except pyodbc.Error as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ elif state == 'present':
+ try:
+ changed = present(schema_facts, cursor, schema, usage_roles, create_roles, owner)
+ except pyodbc.Error as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except NotSupportedError as e:
+ module.fail_json(msg=to_native(e), ansible_facts={'vertica_schemas': schema_facts})
+ except CannotDropError as e:
+ module.fail_json(msg=to_native(e), ansible_facts={'vertica_schemas': schema_facts})
+ except SystemExit:
+ # avoid catching this on python 2.4
+ raise
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=changed, schema=schema, ansible_facts={'vertica_schemas': schema_facts})
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_user.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_user.py
new file mode 100644
index 00000000..f550f190
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/database/vertica/vertica_user.py
@@ -0,0 +1,373 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: vertica_user
+short_description: Adds or removes Vertica database users and assigns roles.
+description:
+ - Adds or removes Vertica database user and, optionally, assigns roles.
+ - A user will not be removed until all the dependencies have been dropped.
+ - In such a situation, if the module tries to remove the user it
+ will fail and only remove roles granted to the user.
+options:
+ name:
+ description:
+ - Name of the user to add or remove.
+ required: true
+ profile:
+ description:
+ - Sets the user's profile.
+ resource_pool:
+ description:
+ - Sets the user's resource pool.
+ password:
+ description:
+ - The user's password encrypted by the MD5 algorithm.
+ - The password must be generated with the format C("md5" + md5[password + username]),
+ resulting in a total of 35 characters. An easy way to do this is by querying
+ the Vertica database with select 'md5'||md5('<user_password><user_name>').
+ expired:
+ description:
+ - Sets the user's password expiration.
+ type: bool
+ ldap:
+ description:
+ - Set to true if users are authenticated via LDAP.
+ - The user will be created with password expired and set to I($ldap$).
+ type: bool
+ roles:
+ description:
+ - Comma separated list of roles to assign to the user.
+ aliases: ['role']
+ state:
+ description:
+ - Whether to create C(present), drop C(absent) or lock C(locked) a user.
+ choices: ['present', 'absent', 'locked']
+ default: present
+ db:
+ description:
+ - Name of the Vertica database.
+ cluster:
+ description:
+ - Name of the Vertica cluster.
+ default: localhost
+ port:
+ description:
+ - Vertica cluster port to connect to.
+ default: 5433
+ login_user:
+ description:
+ - The username used to authenticate with.
+ default: dbadmin
+ login_password:
+ description:
+ - The password used to authenticate with.
+notes:
+ - The default authentication assumes that you are either logging in as or sudo'ing
+ to the C(dbadmin) account on the host.
+ - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
+ that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
+ - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
+ to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
+ and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
+ to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
+requirements: [ 'unixODBC', 'pyodbc' ]
+author: "Dariusz Owczarek (@dareko)"
+'''
+
+EXAMPLES = """
+- name: Creating a new vertica user with password
+ community.general.vertica_user: name=user_name password=md5<encrypted_password> db=db_name state=present
+
+- name: Creating a new vertica user authenticated via ldap with roles assigned
+ community.general.vertica_user:
+ name=user_name
+ ldap=true
+ db=db_name
+ roles=schema_name_ro
+ state=present
+"""
+import traceback
+
+PYODBC_IMP_ERR = None
+try:
+ import pyodbc
+except ImportError:
+ PYODBC_IMP_ERR = traceback.format_exc()
+ pyodbc_found = False
+else:
+ pyodbc_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+class NotSupportedError(Exception):
+ pass
+
+
+class CannotDropError(Exception):
+ pass
+
+# module specific functions
+
+
+def get_user_facts(cursor, user=''):
+ facts = {}
+ cursor.execute("""
+ select u.user_name, u.is_locked, u.lock_time,
+ p.password, p.acctexpired as is_expired,
+ u.profile_name, u.resource_pool,
+ u.all_roles, u.default_roles
+ from users u join password_auditor p on p.user_id = u.user_id
+ where not u.is_super_user
+ and (? = '' or u.user_name ilike ?)
+ """, user, user)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ user_key = row.user_name.lower()
+ facts[user_key] = {
+ 'name': row.user_name,
+ 'locked': str(row.is_locked),
+ 'password': row.password,
+ 'expired': str(row.is_expired),
+ 'profile': row.profile_name,
+ 'resource_pool': row.resource_pool,
+ 'roles': [],
+ 'default_roles': []}
+ if row.is_locked:
+ facts[user_key]['locked_time'] = str(row.lock_time)
+ if row.all_roles:
+ facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',')
+ if row.default_roles:
+ facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',')
+ return facts
+
+
+def update_roles(user_facts, cursor, user,
+ existing_all, existing_default, required):
+ del_roles = list(set(existing_all) - set(required))
+ if del_roles:
+ cursor.execute("revoke {0} from {1}".format(','.join(del_roles), user))
+ new_roles = list(set(required) - set(existing_all))
+ if new_roles:
+ cursor.execute("grant {0} to {1}".format(','.join(new_roles), user))
+ if required:
+ cursor.execute("alter user {0} default role {1}".format(user, ','.join(required)))
+
+
+def check(user_facts, user, profile, resource_pool,
+ locked, password, expired, ldap, roles):
+ user_key = user.lower()
+ if user_key not in user_facts:
+ return False
+ if profile and profile != user_facts[user_key]['profile']:
+ return False
+ if resource_pool and resource_pool != user_facts[user_key]['resource_pool']:
+ return False
+ if locked != (user_facts[user_key]['locked'] == 'True'):
+ return False
+ if password and password != user_facts[user_key]['password']:
+ return False
+ if (expired is not None and expired != (user_facts[user_key]['expired'] == 'True') or
+ ldap is not None and ldap != (user_facts[user_key]['expired'] == 'True')):
+ return False
+ if roles and (sorted(roles) != sorted(user_facts[user_key]['roles']) or
+ sorted(roles) != sorted(user_facts[user_key]['default_roles'])):
+ return False
+ return True
+
+
+def present(user_facts, cursor, user, profile, resource_pool,
+ locked, password, expired, ldap, roles):
+ user_key = user.lower()
+ if user_key not in user_facts:
+ query_fragments = ["create user {0}".format(user)]
+ if locked:
+ query_fragments.append("account lock")
+ if password or ldap:
+ if password:
+ query_fragments.append("identified by '{0}'".format(password))
+ else:
+ query_fragments.append("identified by '$ldap$'")
+ if expired or ldap:
+ query_fragments.append("password expire")
+ if profile:
+ query_fragments.append("profile {0}".format(profile))
+ if resource_pool:
+ query_fragments.append("resource pool {0}".format(resource_pool))
+ cursor.execute(' '.join(query_fragments))
+ if resource_pool and resource_pool != 'general':
+ cursor.execute("grant usage on resource pool {0} to {1}".format(
+ resource_pool, user))
+ update_roles(user_facts, cursor, user, [], [], roles)
+ user_facts.update(get_user_facts(cursor, user))
+ return True
+ else:
+ changed = False
+ query_fragments = ["alter user {0}".format(user)]
+ if locked is not None and locked != (user_facts[user_key]['locked'] == 'True'):
+ if locked:
+ state = 'lock'
+ else:
+ state = 'unlock'
+ query_fragments.append("account {0}".format(state))
+ changed = True
+ if password and password != user_facts[user_key]['password']:
+ query_fragments.append("identified by '{0}'".format(password))
+ changed = True
+ if ldap:
+ if ldap != (user_facts[user_key]['expired'] == 'True'):
+ query_fragments.append("password expire")
+ changed = True
+ elif expired is not None and expired != (user_facts[user_key]['expired'] == 'True'):
+ if expired:
+ query_fragments.append("password expire")
+ changed = True
+ else:
+ raise NotSupportedError("Unexpiring user password is not supported.")
+ if profile and profile != user_facts[user_key]['profile']:
+ query_fragments.append("profile {0}".format(profile))
+ changed = True
+ if resource_pool and resource_pool != user_facts[user_key]['resource_pool']:
+ query_fragments.append("resource pool {0}".format(resource_pool))
+ if user_facts[user_key]['resource_pool'] != 'general':
+ cursor.execute("revoke usage on resource pool {0} from {1}".format(
+ user_facts[user_key]['resource_pool'], user))
+ if resource_pool != 'general':
+ cursor.execute("grant usage on resource pool {0} to {1}".format(
+ resource_pool, user))
+ changed = True
+ if changed:
+ cursor.execute(' '.join(query_fragments))
+ if roles and (sorted(roles) != sorted(user_facts[user_key]['roles']) or
+ sorted(roles) != sorted(user_facts[user_key]['default_roles'])):
+ update_roles(user_facts, cursor, user,
+ user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], roles)
+ changed = True
+ if changed:
+ user_facts.update(get_user_facts(cursor, user))
+ return changed
+
+
+def absent(user_facts, cursor, user, roles):
+ user_key = user.lower()
+ if user_key in user_facts:
+ update_roles(user_facts, cursor, user,
+ user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], [])
+ try:
+ cursor.execute("drop user {0}".format(user_facts[user_key]['name']))
+ except pyodbc.Error:
+ raise CannotDropError("Dropping user failed due to dependencies.")
+ del user_facts[user_key]
+ return True
+ else:
+ return False
+
+# module logic
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ user=dict(required=True, aliases=['name']),
+ profile=dict(default=None),
+ resource_pool=dict(default=None),
+ password=dict(default=None, no_log=True),
+ expired=dict(type='bool', default=None),
+ ldap=dict(type='bool', default=None),
+ roles=dict(default=None, aliases=['role']),
+ state=dict(default='present', choices=['absent', 'present', 'locked']),
+ db=dict(default=None),
+ cluster=dict(default='localhost'),
+ port=dict(default='5433'),
+ login_user=dict(default='dbadmin'),
+ login_password=dict(default=None, no_log=True),
+ ), supports_check_mode=True)
+
+ if not pyodbc_found:
+ module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR)
+
+ user = module.params['user']
+ profile = module.params['profile']
+ if profile:
+ profile = profile.lower()
+ resource_pool = module.params['resource_pool']
+ if resource_pool:
+ resource_pool = resource_pool.lower()
+ password = module.params['password']
+ expired = module.params['expired']
+ ldap = module.params['ldap']
+ roles = []
+ if module.params['roles']:
+ roles = module.params['roles'].split(',')
+ roles = filter(None, roles)
+ state = module.params['state']
+ if state == 'locked':
+ locked = True
+ else:
+ locked = False
+ db = ''
+ if module.params['db']:
+ db = module.params['db']
+
+ changed = False
+
+ try:
+ dsn = (
+ "Driver=Vertica;"
+ "Server={0};"
+ "Port={1};"
+ "Database={2};"
+ "User={3};"
+ "Password={4};"
+ "ConnectionLoadBalance={5}"
+ ).format(module.params['cluster'], module.params['port'], db,
+ module.params['login_user'], module.params['login_password'], 'true')
+ db_conn = pyodbc.connect(dsn, autocommit=True)
+ cursor = db_conn.cursor()
+ except Exception as e:
+ module.fail_json(msg="Unable to connect to database: {0}.".format(e))
+
+ try:
+ user_facts = get_user_facts(cursor)
+ if module.check_mode:
+ changed = not check(user_facts, user, profile, resource_pool,
+ locked, password, expired, ldap, roles)
+ elif state == 'absent':
+ try:
+ changed = absent(user_facts, cursor, user, roles)
+ except pyodbc.Error as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ elif state in ['present', 'locked']:
+ try:
+ changed = present(user_facts, cursor, user, profile, resource_pool,
+ locked, password, expired, ldap, roles)
+ except pyodbc.Error as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except NotSupportedError as e:
+ module.fail_json(msg=to_native(e), ansible_facts={'vertica_users': user_facts})
+ except CannotDropError as e:
+ module.fail_json(msg=to_native(e), ansible_facts={'vertica_users': user_facts})
+ except SystemExit:
+ # avoid catching this on python 2.4
+ raise
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=changed, user=user, ansible_facts={'vertica_users': user_facts})
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/datadog_event.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/datadog_event.py
new file mode 100644
index 00000000..a6327dde
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/datadog_event.py
@@ -0,0 +1,167 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Author: Artūras 'arturaz' Šlajus <x11@arturaz.net>
+# Author: Naoya Nakazawa <naoya.n@gmail.com>
+#
+# This module is proudly sponsored by iGeolise (www.igeolise.com) and
+# Tiny Lab Productions (www.tinylabproductions.com).
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: datadog_event
+short_description: Posts events to Datadog service
+description:
+- "Allows to post events to Datadog (www.datadoghq.com) service."
+- "Uses http://docs.datadoghq.com/api/#events API."
+author:
+- "Artūras `arturaz` Šlajus (@arturaz)"
+- "Naoya Nakazawa (@n0ts)"
+options:
+ api_key:
+ type: str
+ description: ["Your DataDog API key."]
+ required: true
+ app_key:
+ type: str
+ description: ["Your DataDog app key."]
+ required: true
+ title:
+ type: str
+ description: ["The event title."]
+ required: true
+ text:
+ type: str
+ description: ["The body of the event."]
+ required: true
+ date_happened:
+ type: int
+ description:
+ - POSIX timestamp of the event.
+ - Default value is now.
+ priority:
+ type: str
+ description: ["The priority of the event."]
+ default: normal
+ choices: [normal, low]
+ host:
+ type: str
+ description:
+ - Host name to associate with the event.
+ - If not specified, it defaults to the remote system's hostname.
+ tags:
+ type: list
+ description: ["Comma separated list of tags to apply to the event."]
+ alert_type:
+ type: str
+ description: ["Type of alert."]
+ default: info
+ choices: ['error', 'warning', 'info', 'success']
+ aggregation_key:
+ type: str
+ description: ["An arbitrary string to use for aggregation."]
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+'''
+
+EXAMPLES = '''
+- name: Post an event with low priority
+ community.general.datadog_event:
+ title: Testing from ansible
+ text: Test
+ priority: low
+ api_key: 9775a026f1ca7d1c6c5af9d94d9595a4
+ app_key: j4JyCYfefWHhgFgiZUqRm63AXHNZQyPGBfJtAzmN
+
+- name: Post an event with several tags
+ community.general.datadog_event:
+ title: Testing from ansible
+ text: Test
+ api_key: 9775a026f1ca7d1c6c5af9d94d9595a4
+ app_key: j4JyCYfefWHhgFgiZUqRm63AXHNZQyPGBfJtAzmN
+ tags: 'aa,bb,#host:{{ inventory_hostname }}'
+'''
+
+import platform
+import traceback
+
+# Import Datadog
+DATADOG_IMP_ERR = None
+try:
+ from datadog import initialize, api
+ HAS_DATADOG = True
+except Exception:
+ DATADOG_IMP_ERR = traceback.format_exc()
+ HAS_DATADOG = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(required=True, no_log=True),
+ app_key=dict(required=True, no_log=True),
+ title=dict(required=True),
+ text=dict(required=True),
+ date_happened=dict(required=False, default=None, type='int'),
+ priority=dict(
+ required=False, default='normal', choices=['normal', 'low']
+ ),
+ host=dict(required=False, default=None),
+ tags=dict(required=False, default=None, type='list'),
+ alert_type=dict(
+ required=False, default='info',
+ choices=['error', 'warning', 'info', 'success']
+ ),
+ aggregation_key=dict(required=False, default=None),
+ validate_certs=dict(default=True, type='bool'),
+ )
+ )
+
+ # Prepare Datadog
+ if not HAS_DATADOG:
+ module.fail_json(msg=missing_required_lib('datadogpy'), exception=DATADOG_IMP_ERR)
+
+ options = {
+ 'api_key': module.params['api_key'],
+ 'app_key': module.params['app_key']
+ }
+
+ initialize(**options)
+
+ _post_event(module)
+
+
+def _post_event(module):
+ try:
+ if module.params['host'] is None:
+ module.params['host'] = platform.node().split('.')[0]
+ msg = api.Event.create(title=module.params['title'],
+ text=module.params['text'],
+ host=module.params['host'],
+ tags=module.params['tags'],
+ priority=module.params['priority'],
+ alert_type=module.params['alert_type'],
+ aggregation_key=module.params['aggregation_key'],
+ source_type_name='ansible')
+ if msg['status'] != 'ok':
+ module.fail_json(msg=msg)
+
+ module.exit_json(changed=True, msg=msg)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/datadog_monitor.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/datadog_monitor.py
new file mode 100644
index 00000000..f6020c2b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/datadog_monitor.py
@@ -0,0 +1,404 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Sebastian Kornehl <sebastian.kornehl@asideas.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: datadog_monitor
+short_description: Manages Datadog monitors
+description:
+ - Manages monitors within Datadog.
+ - Options as described on https://docs.datadoghq.com/api/.
+author: Sebastian Kornehl (@skornehl)
+requirements: [datadog]
+options:
+ api_key:
+ description:
+ - Your Datadog API key.
+ required: true
+ type: str
+ api_host:
+ description:
+ - The URL to the Datadog API. Default value is C(https://api.datadoghq.com).
+ - This value can also be set with the C(DATADOG_HOST) environment variable.
+ required: false
+ type: str
+ version_added: '0.2.0'
+ app_key:
+ description:
+ - Your Datadog app key.
+ required: true
+ type: str
+ state:
+ description:
+ - The designated state of the monitor.
+ required: true
+ choices: ['present', 'absent', 'mute', 'unmute']
+ type: str
+ tags:
+ description:
+ - A list of tags to associate with your monitor when creating or updating.
+ - This can help you categorize and filter monitors.
+ type: list
+ type:
+ description:
+ - The type of the monitor.
+ choices: ['metric alert', 'service check', 'event alert', 'process alert', 'log alert']
+ type: str
+ query:
+ description:
+ - The monitor query to notify on.
+ - Syntax varies depending on what type of monitor you are creating.
+ type: str
+ name:
+ description:
+ - The name of the alert.
+ required: true
+ type: str
+ notification_message:
+ description:
+ - A message to include with notifications for this monitor.
+ - Email notifications can be sent to specific users by using the same '@username' notation as events.
+ - Monitor message template variables can be accessed by using double square brackets, i.e '[[' and ']]'.
+ - C(message) alias is deprecated in community.general 0.2.0, since it is used internally by Ansible Core Engine.
+ type: str
+ aliases: [ 'message' ]
+ silenced:
+ type: dict
+ description:
+ - Dictionary of scopes to silence, with timestamps or None.
+ - Each scope will be muted until the given POSIX timestamp or forever if the value is None.
+ default: ""
+ notify_no_data:
+ description:
+ - Whether this monitor will notify when data stops reporting.
+ type: bool
+ default: 'no'
+ no_data_timeframe:
+ description:
+ - The number of minutes before a monitor will notify when data stops reporting.
+ - Must be at least 2x the monitor timeframe for metric alerts or 2 minutes for service checks.
+ - If not specified, it defaults to 2x timeframe for metric, 2 minutes for service.
+ type: str
+ timeout_h:
+ description:
+ - The number of hours of the monitor not reporting data before it will automatically resolve from a triggered state.
+ type: str
+ renotify_interval:
+ description:
+ - The number of minutes after the last notification before a monitor will re-notify on the current status.
+ - It will only re-notify if it is not resolved.
+ type: str
+ escalation_message:
+ description:
+ - A message to include with a re-notification. Supports the '@username' notification we allow elsewhere.
+ - Not applicable if I(renotify_interval=None).
+ type: str
+ notify_audit:
+ description:
+ - Whether tagged users will be notified on changes to this monitor.
+ type: bool
+ default: 'no'
+ thresholds:
+ type: dict
+ description:
+ - A dictionary of thresholds by status.
+ - Only available for service checks and metric alerts.
+ - Because each of them can have multiple thresholds, we do not define them directly in the query.
+ - "If not specified, it defaults to: C({'ok': 1, 'critical': 1, 'warning': 1})."
+ locked:
+ description:
+ - Whether changes to this monitor should be restricted to the creator or admins.
+ type: bool
+ default: 'no'
+ require_full_window:
+ description:
+ - Whether this monitor needs a full window of data before it gets evaluated.
+ - We highly recommend you set this to False for sparse metrics, otherwise some evaluations will be skipped.
+ type: bool
+ new_host_delay:
+ description:
+ - A positive integer representing the number of seconds to wait before evaluating the monitor for new hosts.
+ - This gives the host time to fully initialize.
+ type: str
+ evaluation_delay:
+ description:
+ - Time to delay evaluation (in seconds).
+ - Effective for sparse values.
+ type: str
+ id:
+ description:
+ - The ID of the alert.
+ - If set, will be used instead of the name to locate the alert.
+ type: str
+ include_tags:
+ description:
+ - Whether notifications from this monitor automatically inserts its triggering tags into the title.
+ type: bool
+ default: yes
+ version_added: 1.3.0
+'''
+
+EXAMPLES = '''
+- name: Create a metric monitor
+ community.general.datadog_monitor:
+ type: "metric alert"
+ name: "Test monitor"
+ state: "present"
+ query: "datadog.agent.up.over('host:host1').last(2).count_by_status()"
+ notification_message: "Host [[host.name]] with IP [[host.ip]] is failing to report to datadog."
+ api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
+ app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
+
+- name: Deletes a monitor
+ community.general.datadog_monitor:
+ name: "Test monitor"
+ state: "absent"
+ api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
+ app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
+
+- name: Mutes a monitor
+ community.general.datadog_monitor:
+ name: "Test monitor"
+ state: "mute"
+ silenced: '{"*":None}'
+ api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
+ app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
+
+- name: Unmutes a monitor
+ community.general.datadog_monitor:
+ name: "Test monitor"
+ state: "unmute"
+ api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
+ app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
+
+- name: Use datadoghq.eu platform instead of datadoghq.com
+ community.general.datadog_monitor:
+ name: "Test monitor"
+ state: "absent"
+ api_host: https://api.datadoghq.eu
+ api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
+ app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
+'''
+import traceback
+
+# Import Datadog
+DATADOG_IMP_ERR = None
+try:
+ from datadog import initialize, api
+ HAS_DATADOG = True
+except Exception:
+ DATADOG_IMP_ERR = traceback.format_exc()
+ HAS_DATADOG = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(required=True, no_log=True),
+ api_host=dict(required=False),
+ app_key=dict(required=True, no_log=True),
+ state=dict(required=True, choices=['present', 'absent', 'mute', 'unmute']),
+ type=dict(required=False, choices=['metric alert', 'service check', 'event alert', 'process alert', 'log alert']),
+ name=dict(required=True),
+ query=dict(required=False),
+ notification_message=dict(required=False, no_log=True, default=None, aliases=['message'],
+ deprecated_aliases=[dict(name='message', version='3.0.0',
+ collection_name='community.general')]), # was Ansible 2.14
+ silenced=dict(required=False, default=None, type='dict'),
+ notify_no_data=dict(required=False, default=False, type='bool'),
+ no_data_timeframe=dict(required=False, default=None),
+ timeout_h=dict(required=False, default=None),
+ renotify_interval=dict(required=False, default=None),
+ escalation_message=dict(required=False, default=None),
+ notify_audit=dict(required=False, default=False, type='bool'),
+ thresholds=dict(required=False, type='dict', default=None),
+ tags=dict(required=False, type='list', default=None),
+ locked=dict(required=False, default=False, type='bool'),
+ require_full_window=dict(required=False, default=None, type='bool'),
+ new_host_delay=dict(required=False, default=None),
+ evaluation_delay=dict(required=False, default=None),
+ id=dict(required=False),
+ include_tags=dict(required=False, default=True, type='bool'),
+ )
+ )
+
+ # Prepare Datadog
+ if not HAS_DATADOG:
+ module.fail_json(msg=missing_required_lib('datadogpy'), exception=DATADOG_IMP_ERR)
+
+ if 'message' in module.params:
+ module.fail_json(msg="'message' is reserved keyword, please change this parameter to 'notification_message'")
+
+ options = {
+ 'api_key': module.params['api_key'],
+ 'api_host': module.params['api_host'],
+ 'app_key': module.params['app_key']
+ }
+
+ initialize(**options)
+
+ # Check if api_key and app_key is correct or not
+ # if not, then fail here.
+ response = api.Monitor.get_all()
+ if isinstance(response, dict):
+ msg = response.get('errors', None)
+ if msg:
+ module.fail_json(msg="Failed to connect Datadog server using given app_key and api_key : {0}".format(msg[0]))
+
+ if module.params['state'] == 'present':
+ install_monitor(module)
+ elif module.params['state'] == 'absent':
+ delete_monitor(module)
+ elif module.params['state'] == 'mute':
+ mute_monitor(module)
+ elif module.params['state'] == 'unmute':
+ unmute_monitor(module)
+
+
+def _fix_template_vars(message):
+ if message:
+ return message.replace('[[', '{{').replace(']]', '}}')
+ return message
+
+
+def _get_monitor(module):
+ if module.params['id'] is not None:
+ monitor = api.Monitor.get(module.params['id'])
+ if 'errors' in monitor:
+ module.fail_json(msg="Failed to retrieve monitor with id %s, errors are %s" % (module.params['id'], str(monitor['errors'])))
+ return monitor
+ else:
+ monitors = api.Monitor.get_all()
+ for monitor in monitors:
+ if monitor['name'] == _fix_template_vars(module.params['name']):
+ return monitor
+ return {}
+
+
+def _post_monitor(module, options):
+ try:
+ kwargs = dict(type=module.params['type'], query=module.params['query'],
+ name=_fix_template_vars(module.params['name']),
+ message=_fix_template_vars(module.params['notification_message']),
+ escalation_message=_fix_template_vars(module.params['escalation_message']),
+ options=options)
+ if module.params['tags'] is not None:
+ kwargs['tags'] = module.params['tags']
+ msg = api.Monitor.create(**kwargs)
+ if 'errors' in msg:
+ module.fail_json(msg=str(msg['errors']))
+ else:
+ module.exit_json(changed=True, msg=msg)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+def _equal_dicts(a, b, ignore_keys):
+ ka = set(a).difference(ignore_keys)
+ kb = set(b).difference(ignore_keys)
+ return ka == kb and all(a[k] == b[k] for k in ka)
+
+
+def _update_monitor(module, monitor, options):
+ try:
+ kwargs = dict(id=monitor['id'], query=module.params['query'],
+ name=_fix_template_vars(module.params['name']),
+ message=_fix_template_vars(module.params['notification_message']),
+ escalation_message=_fix_template_vars(module.params['escalation_message']),
+ options=options)
+ if module.params['tags'] is not None:
+ kwargs['tags'] = module.params['tags']
+ msg = api.Monitor.update(**kwargs)
+
+ if 'errors' in msg:
+ module.fail_json(msg=str(msg['errors']))
+ elif _equal_dicts(msg, monitor, ['creator', 'overall_state', 'modified', 'matching_downtimes', 'overall_state_modified']):
+ module.exit_json(changed=False, msg=msg)
+ else:
+ module.exit_json(changed=True, msg=msg)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+def install_monitor(module):
+ options = {
+ "silenced": module.params['silenced'],
+ "notify_no_data": module.boolean(module.params['notify_no_data']),
+ "no_data_timeframe": module.params['no_data_timeframe'],
+ "timeout_h": module.params['timeout_h'],
+ "renotify_interval": module.params['renotify_interval'],
+ "escalation_message": module.params['escalation_message'],
+ "notify_audit": module.boolean(module.params['notify_audit']),
+ "locked": module.boolean(module.params['locked']),
+ "require_full_window": module.params['require_full_window'],
+ "new_host_delay": module.params['new_host_delay'],
+ "evaluation_delay": module.params['evaluation_delay'],
+ "include_tags": module.params['include_tags'],
+ }
+
+ if module.params['type'] == "service check":
+ options["thresholds"] = module.params['thresholds'] or {'ok': 1, 'critical': 1, 'warning': 1}
+ if module.params['type'] in ["metric alert", "log alert"] and module.params['thresholds'] is not None:
+ options["thresholds"] = module.params['thresholds']
+
+ monitor = _get_monitor(module)
+ if not monitor:
+ _post_monitor(module, options)
+ else:
+ _update_monitor(module, monitor, options)
+
+
+def delete_monitor(module):
+ monitor = _get_monitor(module)
+ if not monitor:
+ module.exit_json(changed=False)
+ try:
+ msg = api.Monitor.delete(monitor['id'])
+ module.exit_json(changed=True, msg=msg)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+def mute_monitor(module):
+ monitor = _get_monitor(module)
+ if not monitor:
+ module.fail_json(msg="Monitor %s not found!" % module.params['name'])
+ elif monitor['options']['silenced']:
+ module.fail_json(msg="Monitor is already muted. Datadog does not allow to modify muted alerts, consider unmuting it first.")
+ elif (module.params['silenced'] is not None and len(set(monitor['options']['silenced']) ^ set(module.params['silenced'])) == 0):
+ module.exit_json(changed=False)
+ try:
+ if module.params['silenced'] is None or module.params['silenced'] == "":
+ msg = api.Monitor.mute(id=monitor['id'])
+ else:
+ msg = api.Monitor.mute(id=monitor['id'], silenced=module.params['silenced'])
+ module.exit_json(changed=True, msg=msg)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+def unmute_monitor(module):
+ monitor = _get_monitor(module)
+ if not monitor:
+ module.fail_json(msg="Monitor %s not found!" % module.params['name'])
+ elif not monitor['options']['silenced']:
+ module.exit_json(changed=False)
+ try:
+ msg = api.Monitor.unmute(monitor['id'])
+ module.exit_json(changed=True, msg=msg)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/dconf.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/dconf.py
new file mode 100644
index 00000000..49c42432
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/dconf.py
@@ -0,0 +1,380 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Branko Majic <branko@majic.rs>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: dconf
+author:
+ - "Branko Majic (@azaghal)"
+short_description: Modify and read dconf database
+description:
+ - This module allows modifications and reading of dconf database. The module
+ is implemented as a wrapper around dconf tool. Please see the dconf(1) man
+ page for more details.
+ - Since C(dconf) requires a running D-Bus session to change values, the module
+ will try to detect an existing session and reuse it, or run the tool via
+ C(dbus-run-session).
+notes:
+ - This module depends on C(psutil) Python library (version 4.0.0 and upwards),
+ C(dconf), C(dbus-send), and C(dbus-run-session) binaries. Depending on
+ distribution you are using, you may need to install additional packages to
+ have these available.
+ - Detection of existing, running D-Bus session, required to change settings
+ via C(dconf), is not 100% reliable due to implementation details of D-Bus
+ daemon itself. This might lead to running applications not picking-up
+ changes on the fly if options are changed via Ansible and
+ C(dbus-run-session).
+ - Keep in mind that the C(dconf) CLI tool, which this module wraps around,
+ utilises an unusual syntax for the values (GVariant). For example, if you
+ wanted to provide a string value, the correct syntax would be
+ C(value="'myvalue'") - with single quotes as part of the Ansible parameter
+ value.
+ - When using loops in combination with a value like
+ :code:`"[('xkb', 'us'), ('xkb', 'se')]"`, you need to be aware of possible
+ type conversions. Applying a filter :code:`"{{ item.value | string }}"`
+ to the parameter variable can avoid potential conversion problems.
+ - The easiest way to figure out exact syntax/value you need to provide for a
+ key is by making the configuration change in application affected by the
+ key, and then having a look at value set via commands C(dconf dump
+ /path/to/dir/) or C(dconf read /path/to/key).
+options:
+ key:
+ type: str
+ required: true
+ description:
+ - A dconf key to modify or read from the dconf database.
+ value:
+ type: str
+ required: false
+ description:
+ - Value to set for the specified dconf key. Value should be specified in
+ GVariant format. Due to complexity of this format, it is best to have a
+ look at existing values in the dconf database. Required for
+ C(state=present).
+ state:
+ type: str
+ required: false
+ default: present
+ choices:
+ - read
+ - present
+ - absent
+ description:
+ - The action to take upon the key/value.
+'''
+
+RETURN = """
+value:
+ description: value associated with the requested key
+ returned: success, state was "read"
+ type: str
+ sample: "'Default'"
+"""
+
+EXAMPLES = """
+- name: Configure available keyboard layouts in Gnome
+ community.general.dconf:
+ key: "/org/gnome/desktop/input-sources/sources"
+ value: "[('xkb', 'us'), ('xkb', 'se')]"
+ state: present
+
+- name: Read currently available keyboard layouts in Gnome
+ community.general.dconf:
+ key: "/org/gnome/desktop/input-sources/sources"
+ state: read
+ register: keyboard_layouts
+
+- name: Reset the available keyboard layouts in Gnome
+ community.general.dconf:
+ key: "/org/gnome/desktop/input-sources/sources"
+ state: absent
+
+- name: Configure available keyboard layouts in Cinnamon
+ community.general.dconf:
+ key: "/org/gnome/libgnomekbd/keyboard/layouts"
+ value: "['us', 'se']"
+ state: present
+
+- name: Read currently available keyboard layouts in Cinnamon
+ community.general.dconf:
+ key: "/org/gnome/libgnomekbd/keyboard/layouts"
+ state: read
+ register: keyboard_layouts
+
+- name: Reset the available keyboard layouts in Cinnamon
+ community.general.dconf:
+ key: "/org/gnome/libgnomekbd/keyboard/layouts"
+ state: absent
+
+- name: Disable desktop effects in Cinnamon
+ community.general.dconf:
+ key: "/org/cinnamon/desktop-effects"
+ value: "false"
+ state: present
+"""
+
+
+import os
+import traceback
+
+PSUTIL_IMP_ERR = None
+try:
+ import psutil
+ psutil_found = True
+except ImportError:
+ PSUTIL_IMP_ERR = traceback.format_exc()
+ psutil_found = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class DBusWrapper(object):
+ """
+ Helper class that can be used for running a command with a working D-Bus
+ session.
+
+ If possible, command will be run against an existing D-Bus session,
+ otherwise the session will be spawned via dbus-run-session.
+
+ Example usage:
+
+ dbus_wrapper = DBusWrapper(ansible_module)
+ dbus_wrapper.run_command(["printenv", "DBUS_SESSION_BUS_ADDRESS"])
+ """
+
+ def __init__(self, module):
+ """
+ Initialises an instance of the class.
+
+ :param module: Ansible module instance used to signal failures and run commands.
+ :type module: AnsibleModule
+ """
+
+ # Store passed-in arguments and set-up some defaults.
+ self.module = module
+
+ # Try to extract existing D-Bus session address.
+ self.dbus_session_bus_address = self._get_existing_dbus_session()
+
+ # If no existing D-Bus session was detected, check if dbus-run-session
+ # is available.
+ if self.dbus_session_bus_address is None:
+ self.module.get_bin_path('dbus-run-session', required=True)
+
+ def _get_existing_dbus_session(self):
+ """
+ Detects and returns an existing D-Bus session bus address.
+
+ :returns: string -- D-Bus session bus address. If a running D-Bus session was not detected, returns None.
+ """
+
+ # We'll be checking the processes of current user only.
+ uid = os.getuid()
+
+ # Go through all the pids for this user, try to extract the D-Bus
+ # session bus address from environment, and ensure it is possible to
+ # connect to it.
+ self.module.debug("Trying to detect existing D-Bus user session for user: %d" % uid)
+
+ for pid in psutil.pids():
+ process = psutil.Process(pid)
+ process_real_uid, _, _ = process.uids()
+ try:
+ if process_real_uid == uid and 'DBUS_SESSION_BUS_ADDRESS' in process.environ():
+ dbus_session_bus_address_candidate = process.environ()['DBUS_SESSION_BUS_ADDRESS']
+ self.module.debug("Found D-Bus user session candidate at address: %s" % dbus_session_bus_address_candidate)
+ command = ['dbus-send', '--address=%s' % dbus_session_bus_address_candidate, '--type=signal', '/', 'com.example.test']
+ rc, _, _ = self.module.run_command(command)
+
+ if rc == 0:
+ self.module.debug("Verified D-Bus user session candidate as usable at address: %s" % dbus_session_bus_address_candidate)
+
+ return dbus_session_bus_address_candidate
+
+ # This can happen with things like SSH sessions etc.
+ except psutil.AccessDenied:
+ pass
+
+ self.module.debug("Failed to find running D-Bus user session, will use dbus-run-session")
+
+ return None
+
+ def run_command(self, command):
+ """
+ Runs the specified command within a functional D-Bus session. Command is
+ effectively passed-on to AnsibleModule.run_command() method, with
+ modification for using dbus-run-session if necessary.
+
+ :param command: Command to run, including parameters. Each element of the list should be a string.
+ :type module: list
+
+ :returns: tuple(result_code, standard_output, standard_error) -- Result code, standard output, and standard error from running the command.
+ """
+
+ if self.dbus_session_bus_address is None:
+ self.module.debug("Using dbus-run-session wrapper for running commands.")
+ command = ['dbus-run-session'] + command
+ rc, out, err = self.module.run_command(command)
+
+ if self.dbus_session_bus_address is None and rc == 127:
+ self.module.fail_json(msg="Failed to run passed-in command, dbus-run-session faced an internal error: %s" % err)
+ else:
+ extra_environment = {'DBUS_SESSION_BUS_ADDRESS': self.dbus_session_bus_address}
+ rc, out, err = self.module.run_command(command, environ_update=extra_environment)
+
+ return rc, out, err
+
+
+class DconfPreference(object):
+
+ def __init__(self, module, check_mode=False):
+ """
+ Initialises instance of the class.
+
+ :param module: Ansible module instance used to signal failures and run commands.
+ :type module: AnsibleModule
+
+ :param check_mode: Specify whether to only check if a change should be made or if to actually make a change.
+ :type check_mode: bool
+ """
+
+ self.module = module
+ self.check_mode = check_mode
+
+ def read(self, key):
+ """
+ Retrieves current value associated with the dconf key.
+
+ If an error occurs, a call will be made to AnsibleModule.fail_json.
+
+ :returns: string -- Value assigned to the provided key. If the value is not set for specified key, returns None.
+ """
+
+ command = ["dconf", "read", key]
+
+ rc, out, err = self.module.run_command(command)
+
+ if rc != 0:
+ self.module.fail_json(msg='dconf failed while reading the value with error: %s' % err)
+
+ if out == '':
+ value = None
+ else:
+ value = out.rstrip('\n')
+
+ return value
+
+ def write(self, key, value):
+ """
+ Writes the value for specified key.
+
+ If an error occurs, a call will be made to AnsibleModule.fail_json.
+
+ :param key: dconf key for which the value should be set. Should be a full path.
+ :type key: str
+
+ :param value: Value to set for the specified dconf key. Should be specified in GVariant format.
+ :type value: str
+
+ :returns: bool -- True if a change was made, False if no change was required.
+ """
+
+ # If no change is needed (or won't be done due to check_mode), notify
+ # caller straight away.
+ if value == self.read(key):
+ return False
+ elif self.check_mode:
+ return True
+
+ # Set-up command to run. Since DBus is needed for write operation, wrap
+ # dconf command dbus-launch.
+ command = ["dconf", "write", key, value]
+
+ # Run the command and fetch standard return code, stdout, and stderr.
+ dbus_wrapper = DBusWrapper(self.module)
+ rc, out, err = dbus_wrapper.run_command(command)
+
+ if rc != 0:
+ self.module.fail_json(msg='dconf failed while write the value with error: %s' % err)
+
+ # Value was changed.
+ return True
+
+ def reset(self, key):
+ """
+ Returns value for the specified key (removes it from user configuration).
+
+ If an error occurs, a call will be made to AnsibleModule.fail_json.
+
+ :param key: dconf key to reset. Should be a full path.
+ :type key: str
+
+ :returns: bool -- True if a change was made, False if no change was required.
+ """
+
+ # Read the current value first.
+ current_value = self.read(key)
+
+ # No change was needed, key is not set at all, or just notify user if we
+ # are in check mode.
+ if current_value is None:
+ return False
+ elif self.check_mode:
+ return True
+
+ # Set-up command to run. Since DBus is needed for reset operation, wrap
+ # dconf command dbus-launch.
+ command = ["dconf", "reset", key]
+
+ # Run the command and fetch standard return code, stdout, and stderr.
+ dbus_wrapper = DBusWrapper(self.module)
+ rc, out, err = dbus_wrapper.run_command(command)
+
+ if rc != 0:
+ self.module.fail_json(msg='dconf failed while reseting the value with error: %s' % err)
+
+ # Value was changed.
+ return True
+
+
+def main():
+ # Setup the Ansible module
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent', 'read']),
+ key=dict(required=True, type='str'),
+ value=dict(required=False, default=None, type='str'),
+ ),
+ supports_check_mode=True
+ )
+
+ if not psutil_found:
+ module.fail_json(msg=missing_required_lib("psutil"), exception=PSUTIL_IMP_ERR)
+
+ # If present state was specified, value must be provided.
+ if module.params['state'] == 'present' and module.params['value'] is None:
+ module.fail_json(msg='State "present" requires "value" to be set.')
+
+ # Create wrapper instance.
+ dconf = DconfPreference(module, module.check_mode)
+
+ # Process based on different states.
+ if module.params['state'] == 'read':
+ value = dconf.read(module.params['key'])
+ module.exit_json(changed=False, value=value)
+ elif module.params['state'] == 'present':
+ changed = dconf.write(module.params['key'], module.params['value'])
+ module.exit_json(changed=changed)
+ elif module.params['state'] == 'absent':
+ changed = dconf.reset(module.params['key'])
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/deploy_helper.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/deploy_helper.py
new file mode 100644
index 00000000..641cc1d4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/deploy_helper.py
@@ -0,0 +1,524 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Jasper N. Brouwer <jasper@nerdsweide.nl>
+# (c) 2014, Ramon de la Fuente <ramon@delafuente.nl>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: deploy_helper
+author: "Ramon de la Fuente (@ramondelafuente)"
+short_description: Manages some of the steps common in deploying projects.
+description:
+ - The Deploy Helper manages some of the steps common in deploying software.
+ It creates a folder structure, manages a symlink for the current release
+ and cleans up old releases.
+ - "Running it with the C(state=query) or C(state=present) will return the C(deploy_helper) fact.
+ C(project_path), whatever you set in the path parameter,
+ C(current_path), the path to the symlink that points to the active release,
+ C(releases_path), the path to the folder to keep releases in,
+ C(shared_path), the path to the folder to keep shared resources in,
+ C(unfinished_filename), the file to check for to recognize unfinished builds,
+ C(previous_release), the release the 'current' symlink is pointing to,
+ C(previous_release_path), the full path to the 'current' symlink target,
+ C(new_release), either the 'release' parameter or a generated timestamp,
+ C(new_release_path), the path to the new release folder (not created by the module)."
+
+options:
+ path:
+ type: path
+ required: True
+ aliases: ['dest']
+ description:
+ - the root path of the project. Alias I(dest).
+ Returned in the C(deploy_helper.project_path) fact.
+
+ state:
+ type: str
+ description:
+ - the state of the project.
+ C(query) will only gather facts,
+ C(present) will create the project I(root) folder, and in it the I(releases) and I(shared) folders,
+ C(finalize) will remove the unfinished_filename file, create a symlink to the newly
+ deployed release and optionally clean old releases,
+ C(clean) will remove failed & old releases,
+ C(absent) will remove the project folder (synonymous to the M(ansible.builtin.file) module with C(state=absent))
+ choices: [ present, finalize, absent, clean, query ]
+ default: present
+
+ release:
+ type: str
+ description:
+ - the release version that is being deployed. Defaults to a timestamp format %Y%m%d%H%M%S (i.e. '20141119223359').
+ This parameter is optional during C(state=present), but needs to be set explicitly for C(state=finalize).
+ You can use the generated fact C(release={{ deploy_helper.new_release }}).
+
+ releases_path:
+ type: str
+ description:
+ - the name of the folder that will hold the releases. This can be relative to C(path) or absolute.
+ Returned in the C(deploy_helper.releases_path) fact.
+ default: releases
+
+ shared_path:
+ type: path
+ description:
+ - the name of the folder that will hold the shared resources. This can be relative to C(path) or absolute.
+ If this is set to an empty string, no shared folder will be created.
+ Returned in the C(deploy_helper.shared_path) fact.
+ default: shared
+
+ current_path:
+ type: path
+ description:
+ - the name of the symlink that is created when the deploy is finalized. Used in C(finalize) and C(clean).
+ Returned in the C(deploy_helper.current_path) fact.
+ default: current
+
+ unfinished_filename:
+ type: str
+ description:
+ - the name of the file that indicates a deploy has not finished. All folders in the releases_path that
+ contain this file will be deleted on C(state=finalize) with clean=True, or C(state=clean). This file is
+ automatically deleted from the I(new_release_path) during C(state=finalize).
+ default: DEPLOY_UNFINISHED
+
+ clean:
+ description:
+ - Whether to run the clean procedure in case of C(state=finalize).
+ type: bool
+ default: 'yes'
+
+ keep_releases:
+ type: int
+ description:
+ - the number of old releases to keep when cleaning. Used in C(finalize) and C(clean). Any unfinished builds
+ will be deleted first, so only correct releases will count. The current version will not count.
+ default: 5
+
+notes:
+ - Facts are only returned for C(state=query) and C(state=present). If you use both, you should pass any overridden
+ parameters to both calls, otherwise the second call will overwrite the facts of the first one.
+ - When using C(state=clean), the releases are ordered by I(creation date). You should be able to switch to a
+ new naming strategy without problems.
+ - Because of the default behaviour of generating the I(new_release) fact, this module will not be idempotent
+ unless you pass your own release name with C(release). Due to the nature of deploying software, this should not
+ be much of a problem.
+extends_documentation_fragment: files
+'''
+
+EXAMPLES = '''
+
+# General explanation, starting with an example folder structure for a project:
+
+# root:
+# releases:
+# - 20140415234508
+# - 20140415235146
+# - 20140416082818
+#
+# shared:
+# - sessions
+# - uploads
+#
+# current: releases/20140416082818
+
+
+# The 'releases' folder holds all the available releases. A release is a complete build of the application being
+# deployed. This can be a clone of a repository for example, or a sync of a local folder on your filesystem.
+# Having timestamped folders is one way of having distinct releases, but you could choose your own strategy like
+# git tags or commit hashes.
+#
+# During a deploy, a new folder should be created in the releases folder and any build steps required should be
+# performed. Once the new build is ready, the deploy procedure is 'finalized' by replacing the 'current' symlink
+# with a link to this build.
+#
+# The 'shared' folder holds any resource that is shared between releases. Examples of this are web-server
+# session files, or files uploaded by users of your application. It's quite common to have symlinks from a release
+# folder pointing to a shared/subfolder, and creating these links would be automated as part of the build steps.
+#
+# The 'current' symlink points to one of the releases. Probably the latest one, unless a deploy is in progress.
+# The web-server's root for the project will go through this symlink, so the 'downtime' when switching to a new
+# release is reduced to the time it takes to switch the link.
+#
+# To distinguish between successful builds and unfinished ones, a file can be placed in the folder of the release
+# that is currently in progress. The existence of this file will mark it as unfinished, and allow an automated
+# procedure to remove it during cleanup.
+
+
+# Typical usage
+- name: Initialize the deploy root and gather facts
+ community.general.deploy_helper:
+ path: /path/to/root
+- name: Clone the project to the new release folder
+ ansible.builtin.git:
+ repo: ansible.builtin.git://foosball.example.org/path/to/repo.git
+ dest: '{{ deploy_helper.new_release_path }}'
+ version: v1.1.1
+- name: Add an unfinished file, to allow cleanup on successful finalize
+ ansible.builtin.file:
+ path: '{{ deploy_helper.new_release_path }}/{{ deploy_helper.unfinished_filename }}'
+ state: touch
+- name: Perform some build steps, like running your dependency manager for example
+ composer:
+ command: install
+ working_dir: '{{ deploy_helper.new_release_path }}'
+- name: Create some folders in the shared folder
+ ansible.builtin.file:
+ path: '{{ deploy_helper.shared_path }}/{{ item }}'
+ state: directory
+ with_items:
+ - sessions
+ - uploads
+- name: Add symlinks from the new release to the shared folder
+ ansible.builtin.file:
+ path: '{{ deploy_helper.new_release_path }}/{{ item.path }}'
+ src: '{{ deploy_helper.shared_path }}/{{ item.src }}'
+ state: link
+ with_items:
+ - path: app/sessions
+ src: sessions
+ - path: web/uploads
+ src: uploads
+- name: Finalize the deploy, removing the unfinished file and switching the symlink
+ community.general.deploy_helper:
+ path: /path/to/root
+ release: '{{ deploy_helper.new_release }}'
+ state: finalize
+
+# Retrieving facts before running a deploy
+- name: Run 'state=query' to gather facts without changing anything
+ community.general.deploy_helper:
+ path: /path/to/root
+ state: query
+# Remember to set the 'release' parameter when you actually call 'state=present' later
+- name: Initialize the deploy root
+ community.general.deploy_helper:
+ path: /path/to/root
+ release: '{{ deploy_helper.new_release }}'
+ state: present
+
+# all paths can be absolute or relative (to the 'path' parameter)
+- community.general.deploy_helper:
+ path: /path/to/root
+ releases_path: /var/www/project/releases
+ shared_path: /var/www/shared
+ current_path: /var/www/active
+
+# Using your own naming strategy for releases (a version tag in this case):
+- community.general.deploy_helper:
+ path: /path/to/root
+ release: v1.1.1
+ state: present
+- community.general.deploy_helper:
+ path: /path/to/root
+ release: '{{ deploy_helper.new_release }}'
+ state: finalize
+
+# Using a different unfinished_filename:
+- community.general.deploy_helper:
+ path: /path/to/root
+ unfinished_filename: README.md
+ release: '{{ deploy_helper.new_release }}'
+ state: finalize
+
+# Postponing the cleanup of older builds:
+- community.general.deploy_helper:
+ path: /path/to/root
+ release: '{{ deploy_helper.new_release }}'
+ state: finalize
+ clean: False
+- community.general.deploy_helper:
+ path: /path/to/root
+ state: clean
+# Or running the cleanup ahead of the new deploy
+- community.general.deploy_helper:
+ path: /path/to/root
+ state: clean
+- community.general.deploy_helper:
+ path: /path/to/root
+ state: present
+
+# Keeping more old releases:
+- community.general.deploy_helper:
+ path: /path/to/root
+ release: '{{ deploy_helper.new_release }}'
+ state: finalize
+ keep_releases: 10
+# Or, if you use 'clean=false' on finalize:
+- community.general.deploy_helper:
+ path: /path/to/root
+ state: clean
+ keep_releases: 10
+
+# Removing the entire project root folder
+- community.general.deploy_helper:
+ path: /path/to/root
+ state: absent
+
+# Debugging the facts returned by the module
+- community.general.deploy_helper:
+ path: /path/to/root
+- ansible.builtin.debug:
+ var: deploy_helper
+'''
+import os
+import shutil
+import time
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+class DeployHelper(object):
+
+ def __init__(self, module):
+ self.module = module
+ self.file_args = module.load_file_common_arguments(module.params)
+
+ self.clean = module.params['clean']
+ self.current_path = module.params['current_path']
+ self.keep_releases = module.params['keep_releases']
+ self.path = module.params['path']
+ self.release = module.params['release']
+ self.releases_path = module.params['releases_path']
+ self.shared_path = module.params['shared_path']
+ self.state = module.params['state']
+ self.unfinished_filename = module.params['unfinished_filename']
+
+ def gather_facts(self):
+ current_path = os.path.join(self.path, self.current_path)
+ releases_path = os.path.join(self.path, self.releases_path)
+ if self.shared_path:
+ shared_path = os.path.join(self.path, self.shared_path)
+ else:
+ shared_path = None
+
+ previous_release, previous_release_path = self._get_last_release(current_path)
+
+ if not self.release and (self.state == 'query' or self.state == 'present'):
+ self.release = time.strftime("%Y%m%d%H%M%S")
+
+ if self.release:
+ new_release_path = os.path.join(releases_path, self.release)
+ else:
+ new_release_path = None
+
+ return {
+ 'project_path': self.path,
+ 'current_path': current_path,
+ 'releases_path': releases_path,
+ 'shared_path': shared_path,
+ 'previous_release': previous_release,
+ 'previous_release_path': previous_release_path,
+ 'new_release': self.release,
+ 'new_release_path': new_release_path,
+ 'unfinished_filename': self.unfinished_filename
+ }
+
+ def delete_path(self, path):
+ if not os.path.lexists(path):
+ return False
+
+ if not os.path.isdir(path):
+ self.module.fail_json(msg="%s exists but is not a directory" % path)
+
+ if not self.module.check_mode:
+ try:
+ shutil.rmtree(path, ignore_errors=False)
+ except Exception as e:
+ self.module.fail_json(msg="rmtree failed: %s" % to_native(e), exception=traceback.format_exc())
+
+ return True
+
+ def create_path(self, path):
+ changed = False
+
+ if not os.path.lexists(path):
+ changed = True
+ if not self.module.check_mode:
+ os.makedirs(path)
+
+ elif not os.path.isdir(path):
+ self.module.fail_json(msg="%s exists but is not a directory" % path)
+
+ changed += self.module.set_directory_attributes_if_different(self._get_file_args(path), changed)
+
+ return changed
+
+ def check_link(self, path):
+ if os.path.lexists(path):
+ if not os.path.islink(path):
+ self.module.fail_json(msg="%s exists but is not a symbolic link" % path)
+
+ def create_link(self, source, link_name):
+ changed = False
+
+ if os.path.islink(link_name):
+ norm_link = os.path.normpath(os.path.realpath(link_name))
+ norm_source = os.path.normpath(os.path.realpath(source))
+ if norm_link == norm_source:
+ changed = False
+ else:
+ changed = True
+ if not self.module.check_mode:
+ if not os.path.lexists(source):
+ self.module.fail_json(msg="the symlink target %s doesn't exists" % source)
+ tmp_link_name = link_name + '.' + self.unfinished_filename
+ if os.path.islink(tmp_link_name):
+ os.unlink(tmp_link_name)
+ os.symlink(source, tmp_link_name)
+ os.rename(tmp_link_name, link_name)
+ else:
+ changed = True
+ if not self.module.check_mode:
+ os.symlink(source, link_name)
+
+ return changed
+
+ def remove_unfinished_file(self, new_release_path):
+ changed = False
+ unfinished_file_path = os.path.join(new_release_path, self.unfinished_filename)
+ if os.path.lexists(unfinished_file_path):
+ changed = True
+ if not self.module.check_mode:
+ os.remove(unfinished_file_path)
+
+ return changed
+
+ def remove_unfinished_builds(self, releases_path):
+ changes = 0
+
+ for release in os.listdir(releases_path):
+ if os.path.isfile(os.path.join(releases_path, release, self.unfinished_filename)):
+ if self.module.check_mode:
+ changes += 1
+ else:
+ changes += self.delete_path(os.path.join(releases_path, release))
+
+ return changes
+
+ def remove_unfinished_link(self, path):
+ changed = False
+
+ tmp_link_name = os.path.join(path, self.release + '.' + self.unfinished_filename)
+ if not self.module.check_mode and os.path.exists(tmp_link_name):
+ changed = True
+ os.remove(tmp_link_name)
+
+ return changed
+
+ def cleanup(self, releases_path, reserve_version):
+ changes = 0
+
+ if os.path.lexists(releases_path):
+ releases = [f for f in os.listdir(releases_path) if os.path.isdir(os.path.join(releases_path, f))]
+ try:
+ releases.remove(reserve_version)
+ except ValueError:
+ pass
+
+ if not self.module.check_mode:
+ releases.sort(key=lambda x: os.path.getctime(os.path.join(releases_path, x)), reverse=True)
+ for release in releases[self.keep_releases:]:
+ changes += self.delete_path(os.path.join(releases_path, release))
+ elif len(releases) > self.keep_releases:
+ changes += (len(releases) - self.keep_releases)
+
+ return changes
+
+ def _get_file_args(self, path):
+ file_args = self.file_args.copy()
+ file_args['path'] = path
+ return file_args
+
+ def _get_last_release(self, current_path):
+ previous_release = None
+ previous_release_path = None
+
+ if os.path.lexists(current_path):
+ previous_release_path = os.path.realpath(current_path)
+ previous_release = os.path.basename(previous_release_path)
+
+ return previous_release, previous_release_path
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(aliases=['dest'], required=True, type='path'),
+ release=dict(required=False, type='str', default=None),
+ releases_path=dict(required=False, type='str', default='releases'),
+ shared_path=dict(required=False, type='path', default='shared'),
+ current_path=dict(required=False, type='path', default='current'),
+ keep_releases=dict(required=False, type='int', default=5),
+ clean=dict(required=False, type='bool', default=True),
+ unfinished_filename=dict(required=False, type='str', default='DEPLOY_UNFINISHED'),
+ state=dict(required=False, choices=['present', 'absent', 'clean', 'finalize', 'query'], default='present')
+ ),
+ add_file_common_args=True,
+ supports_check_mode=True
+ )
+
+ deploy_helper = DeployHelper(module)
+ facts = deploy_helper.gather_facts()
+
+ result = {
+ 'state': deploy_helper.state
+ }
+
+ changes = 0
+
+ if deploy_helper.state == 'query':
+ result['ansible_facts'] = {'deploy_helper': facts}
+
+ elif deploy_helper.state == 'present':
+ deploy_helper.check_link(facts['current_path'])
+ changes += deploy_helper.create_path(facts['project_path'])
+ changes += deploy_helper.create_path(facts['releases_path'])
+ if deploy_helper.shared_path:
+ changes += deploy_helper.create_path(facts['shared_path'])
+
+ result['ansible_facts'] = {'deploy_helper': facts}
+
+ elif deploy_helper.state == 'finalize':
+ if not deploy_helper.release:
+ module.fail_json(msg="'release' is a required parameter for state=finalize (try the 'deploy_helper.new_release' fact)")
+ if deploy_helper.keep_releases <= 0:
+ module.fail_json(msg="'keep_releases' should be at least 1")
+
+ changes += deploy_helper.remove_unfinished_file(facts['new_release_path'])
+ changes += deploy_helper.create_link(facts['new_release_path'], facts['current_path'])
+ if deploy_helper.clean:
+ changes += deploy_helper.remove_unfinished_link(facts['project_path'])
+ changes += deploy_helper.remove_unfinished_builds(facts['releases_path'])
+ changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release'])
+
+ elif deploy_helper.state == 'clean':
+ changes += deploy_helper.remove_unfinished_link(facts['project_path'])
+ changes += deploy_helper.remove_unfinished_builds(facts['releases_path'])
+ changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release'])
+
+ elif deploy_helper.state == 'absent':
+ # destroy the facts
+ result['ansible_facts'] = {'deploy_helper': []}
+ changes += deploy_helper.delete_path(facts['project_path'])
+
+ if changes > 0:
+ result['changed'] = True
+ else:
+ result['changed'] = False
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/dimensiondata_network.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/dimensiondata_network.py
new file mode 100644
index 00000000..2187ceaa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/dimensiondata_network.py
@@ -0,0 +1,296 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Dimension Data
+# Authors:
+# - Aimon Bustardo <aimon.bustardo@dimensiondata.com>
+# - Bert Diwa <Lamberto.Diwa@dimensiondata.com>
+# - Adam Friedman <tintoy@tintoy.io>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: dimensiondata_network
+short_description: Create, update, and delete MCP 1.0 & 2.0 networks
+extends_documentation_fragment:
+- community.general.dimensiondata
+- community.general.dimensiondata_wait
+
+description:
+ - Create, update, and delete MCP 1.0 & 2.0 networks
+author: 'Aimon Bustardo (@aimonb)'
+options:
+ name:
+ description:
+ - The name of the network domain to create.
+ required: true
+ type: str
+ description:
+ description:
+ - Additional description of the network domain.
+ required: false
+ type: str
+ service_plan:
+ description:
+ - The service plan, either "ESSENTIALS" or "ADVANCED".
+ - MCP 2.0 Only.
+ choices: [ESSENTIALS, ADVANCED]
+ default: ESSENTIALS
+ type: str
+ state:
+ description:
+ - Should the resource be present or absent.
+ choices: [present, absent]
+ default: present
+ type: str
+'''
+
+EXAMPLES = '''
+- name: Create an MCP 1.0 network
+ community.general.dimensiondata_network:
+ region: na
+ location: NA5
+ name: mynet
+
+- name: Create an MCP 2.0 network
+ community.general.dimensiondata_network:
+ region: na
+ mcp_user: my_user
+ mcp_password: my_password
+ location: NA9
+ name: mynet
+ service_plan: ADVANCED
+
+- name: Delete a network
+ community.general.dimensiondata_network:
+ region: na
+ location: NA1
+ name: mynet
+ state: absent
+'''
+
+RETURN = '''
+network:
+ description: Dictionary describing the network.
+ returned: On success when I(state=present).
+ type: complex
+ contains:
+ id:
+ description: Network ID.
+ type: str
+ sample: "8c787000-a000-4050-a215-280893411a7d"
+ name:
+ description: Network name.
+ type: str
+ sample: "My network"
+ description:
+ description: Network description.
+ type: str
+ sample: "My network description"
+ location:
+ description: Datacenter location.
+ type: str
+ sample: NA3
+ status:
+ description: Network status. (MCP 2.0 only)
+ type: str
+ sample: NORMAL
+ private_net:
+ description: Private network subnet. (MCP 1.0 only)
+ type: str
+ sample: "10.2.3.0"
+ multicast:
+ description: Multicast enabled? (MCP 1.0 only)
+ type: bool
+ sample: false
+'''
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.dimensiondata import HAS_LIBCLOUD, DimensionDataModule
+from ansible.module_utils._text import to_native
+
+if HAS_LIBCLOUD:
+ from libcloud.compute.base import NodeLocation
+ from libcloud.common.dimensiondata import DimensionDataAPIException
+
+
+class DimensionDataNetworkModule(DimensionDataModule):
+ """
+ The dimensiondata_network module for Ansible.
+ """
+
+ def __init__(self):
+ """
+ Create a new Dimension Data network module.
+ """
+
+ super(DimensionDataNetworkModule, self).__init__(
+ module=AnsibleModule(
+ argument_spec=DimensionDataModule.argument_spec_with_wait(
+ name=dict(type='str', required=True),
+ description=dict(type='str', required=False),
+ service_plan=dict(default='ESSENTIALS', choices=['ADVANCED', 'ESSENTIALS']),
+ state=dict(default='present', choices=['present', 'absent'])
+ ),
+ required_together=DimensionDataModule.required_together()
+ )
+ )
+
+ self.name = self.module.params['name']
+ self.description = self.module.params['description']
+ self.service_plan = self.module.params['service_plan']
+ self.state = self.module.params['state']
+
+ def state_present(self):
+ network = self._get_network()
+
+ if network:
+ self.module.exit_json(
+ changed=False,
+ msg='Network already exists',
+ network=self._network_to_dict(network)
+ )
+
+ network = self._create_network()
+
+ self.module.exit_json(
+ changed=True,
+ msg='Created network "%s" in datacenter "%s".' % (self.name, self.location),
+ network=self._network_to_dict(network)
+ )
+
+ def state_absent(self):
+ network = self._get_network()
+
+ if not network:
+ self.module.exit_json(
+ changed=False,
+ msg='Network "%s" does not exist' % self.name,
+ network=self._network_to_dict(network)
+ )
+
+ self._delete_network(network)
+
+ def _get_network(self):
+ if self.mcp_version == '1.0':
+ networks = self.driver.list_networks(location=self.location)
+ else:
+ networks = self.driver.ex_list_network_domains(location=self.location)
+
+ matched_network = [network for network in networks if network.name == self.name]
+ if matched_network:
+ return matched_network[0]
+
+ return None
+
+ def _network_to_dict(self, network):
+ network_dict = dict(
+ id=network.id,
+ name=network.name,
+ description=network.description
+ )
+
+ if isinstance(network.location, NodeLocation):
+ network_dict['location'] = network.location.id
+ else:
+ network_dict['location'] = network.location
+
+ if self.mcp_version == '1.0':
+ network_dict['private_net'] = network.private_net
+ network_dict['multicast'] = network.multicast
+ network_dict['status'] = None
+ else:
+ network_dict['private_net'] = None
+ network_dict['multicast'] = None
+ network_dict['status'] = network.status
+
+ return network_dict
+
+ def _create_network(self):
+
+ # Make sure service_plan argument is defined
+ if self.mcp_version == '2.0' and 'service_plan' not in self.module.params:
+ self.module.fail_json(
+ msg='service_plan required when creating network and location is MCP 2.0'
+ )
+
+ # Create network
+ try:
+ if self.mcp_version == '1.0':
+ network = self.driver.ex_create_network(
+ self.location,
+ self.name,
+ description=self.description
+ )
+ else:
+ network = self.driver.ex_create_network_domain(
+ self.location,
+ self.name,
+ self.module.params['service_plan'],
+ description=self.description
+ )
+ except DimensionDataAPIException as e:
+
+ self.module.fail_json(
+ msg="Failed to create new network: %s" % to_native(e), exception=traceback.format_exc()
+ )
+
+ if self.module.params['wait'] is True:
+ network = self._wait_for_network_state(network.id, 'NORMAL')
+
+ return network
+
+ def _delete_network(self, network):
+ try:
+ if self.mcp_version == '1.0':
+ deleted = self.driver.ex_delete_network(network)
+ else:
+ deleted = self.driver.ex_delete_network_domain(network)
+
+ if deleted:
+ self.module.exit_json(
+ changed=True,
+ msg="Deleted network with id %s" % network.id
+ )
+
+ self.module.fail_json(
+ "Unexpected failure deleting network with id %s", network.id
+ )
+
+ except DimensionDataAPIException as e:
+ self.module.fail_json(
+ msg="Failed to delete network: %s" % to_native(e), exception=traceback.format_exc()
+ )
+
+ def _wait_for_network_state(self, net_id, state_to_wait_for):
+ try:
+ return self.driver.connection.wait_for_state(
+ state_to_wait_for,
+ self.driver.ex_get_network_domain,
+ self.module.params['wait_poll_interval'],
+ self.module.params['wait_time'],
+ net_id
+ )
+ except DimensionDataAPIException as e:
+ self.module.fail_json(
+ msg='Network did not reach % state in time: %s' % (state_to_wait_for, to_native(e)),
+ exception=traceback.format_exc()
+ )
+
+
+def main():
+ module = DimensionDataNetworkModule()
+ if module.state == 'present':
+ module.state_present()
+ elif module.state == 'absent':
+ module.state_absent()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/dimensiondata_vlan.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/dimensiondata_vlan.py
new file mode 100644
index 00000000..26c621f4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/dimensiondata_vlan.py
@@ -0,0 +1,568 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Dimension Data
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+#
+# Authors:
+# - Adam Friedman <tintoy@tintoy.io>
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: dimensiondata_vlan
+short_description: Manage a VLAN in a Cloud Control network domain.
+extends_documentation_fragment:
+- community.general.dimensiondata
+- community.general.dimensiondata_wait
+
+description:
+ - Manage VLANs in Cloud Control network domains.
+author: 'Adam Friedman (@tintoy)'
+options:
+ name:
+ description:
+ - The name of the target VLAN.
+ type: str
+ required: true
+ description:
+ description:
+ - A description of the VLAN.
+ type: str
+ network_domain:
+ description:
+ - The Id or name of the target network domain.
+ required: true
+ type: str
+ private_ipv4_base_address:
+ description:
+ - The base address for the VLAN's IPv4 network (e.g. 192.168.1.0).
+ type: str
+ private_ipv4_prefix_size:
+ description:
+ - The size of the IPv4 address space, e.g 24.
+ - Required, if C(private_ipv4_base_address) is specified.
+ type: int
+ state:
+ description:
+ - The desired state for the target VLAN.
+ - C(readonly) ensures that the state is only ever read, not modified (the module will fail if the resource does not exist).
+ choices: [present, absent, readonly]
+ default: present
+ type: str
+ allow_expand:
+ description:
+ - Permit expansion of the target VLAN's network if the module parameters specify a larger network than the VLAN currently possesses.
+ - If C(False), the module will fail under these conditions.
+ - This is intended to prevent accidental expansion of a VLAN's network (since this operation is not reversible).
+ type: bool
+ default: 'no'
+'''
+
+EXAMPLES = '''
+- name: Add or update VLAN
+ community.general.dimensiondata_vlan:
+ region: na
+ location: NA5
+ network_domain: test_network
+ name: my_vlan1
+ description: A test VLAN
+ private_ipv4_base_address: 192.168.23.0
+ private_ipv4_prefix_size: 24
+ state: present
+ wait: yes
+
+- name: Read / get VLAN details
+ community.general.dimensiondata_vlan:
+ region: na
+ location: NA5
+ network_domain: test_network
+ name: my_vlan1
+ state: readonly
+ wait: yes
+
+- name: Delete a VLAN
+ community.general.dimensiondata_vlan:
+ region: na
+ location: NA5
+ network_domain: test_network
+ name: my_vlan_1
+ state: absent
+ wait: yes
+'''
+
+RETURN = '''
+vlan:
+ description: Dictionary describing the VLAN.
+ returned: On success when I(state) is 'present'
+ type: complex
+ contains:
+ id:
+ description: VLAN ID.
+ type: str
+ sample: "aaaaa000-a000-4050-a215-2808934ccccc"
+ name:
+ description: VLAN name.
+ type: str
+ sample: "My VLAN"
+ description:
+ description: VLAN description.
+ type: str
+ sample: "My VLAN description"
+ location:
+ description: Datacenter location.
+ type: str
+ sample: NA3
+ private_ipv4_base_address:
+ description: The base address for the VLAN's private IPV4 network.
+ type: str
+ sample: 192.168.23.0
+ private_ipv4_prefix_size:
+ description: The prefix size for the VLAN's private IPV4 network.
+ type: int
+ sample: 24
+ private_ipv4_gateway_address:
+ description: The gateway address for the VLAN's private IPV4 network.
+ type: str
+ sample: 192.168.23.1
+ private_ipv6_base_address:
+ description: The base address for the VLAN's IPV6 network.
+ type: str
+ sample: 2402:9900:111:1195:0:0:0:0
+ private_ipv6_prefix_size:
+ description: The prefix size for the VLAN's IPV6 network.
+ type: int
+ sample: 64
+ private_ipv6_gateway_address:
+ description: The gateway address for the VLAN's IPV6 network.
+ type: str
+ sample: 2402:9900:111:1195:0:0:0:1
+ status:
+ description: VLAN status.
+ type: str
+ sample: NORMAL
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.dimensiondata import DimensionDataModule, UnknownNetworkError
+
+try:
+ from libcloud.common.dimensiondata import DimensionDataVlan, DimensionDataAPIException
+
+ HAS_LIBCLOUD = True
+
+except ImportError:
+ DimensionDataVlan = None
+
+ HAS_LIBCLOUD = False
+
+
+class DimensionDataVlanModule(DimensionDataModule):
+ """
+ The dimensiondata_vlan module for Ansible.
+ """
+
+ def __init__(self):
+ """
+ Create a new Dimension Data VLAN module.
+ """
+
+ super(DimensionDataVlanModule, self).__init__(
+ module=AnsibleModule(
+ argument_spec=DimensionDataModule.argument_spec_with_wait(
+ name=dict(required=True, type='str'),
+ description=dict(default='', type='str'),
+ network_domain=dict(required=True, type='str'),
+ private_ipv4_base_address=dict(default='', type='str'),
+ private_ipv4_prefix_size=dict(default=0, type='int'),
+ allow_expand=dict(required=False, default=False, type='bool'),
+ state=dict(default='present', choices=['present', 'absent', 'readonly'])
+ ),
+ required_together=DimensionDataModule.required_together()
+ )
+ )
+
+ self.name = self.module.params['name']
+ self.description = self.module.params['description']
+ self.network_domain_selector = self.module.params['network_domain']
+ self.private_ipv4_base_address = self.module.params['private_ipv4_base_address']
+ self.private_ipv4_prefix_size = self.module.params['private_ipv4_prefix_size']
+ self.state = self.module.params['state']
+ self.allow_expand = self.module.params['allow_expand']
+
+ if self.wait and self.state != 'present':
+ self.module.fail_json(
+ msg='The wait parameter is only supported when state is "present".'
+ )
+
+ def state_present(self):
+ """
+ Ensure that the target VLAN is present.
+ """
+
+ network_domain = self._get_network_domain()
+
+ vlan = self._get_vlan(network_domain)
+ if not vlan:
+ if self.module.check_mode:
+ self.module.exit_json(
+ msg='VLAN "{0}" is absent from network domain "{1}" (should be present).'.format(
+ self.name, self.network_domain_selector
+ ),
+ changed=True
+ )
+
+ vlan = self._create_vlan(network_domain)
+ self.module.exit_json(
+ msg='Created VLAN "{0}" in network domain "{1}".'.format(
+ self.name, self.network_domain_selector
+ ),
+ vlan=vlan_to_dict(vlan),
+ changed=True
+ )
+ else:
+ diff = VlanDiff(vlan, self.module.params)
+ if not diff.has_changes():
+ self.module.exit_json(
+ msg='VLAN "{0}" is present in network domain "{1}" (no changes detected).'.format(
+ self.name, self.network_domain_selector
+ ),
+ vlan=vlan_to_dict(vlan),
+ changed=False
+ )
+
+ return
+
+ try:
+ diff.ensure_legal_change()
+ except InvalidVlanChangeError as invalid_vlan_change:
+ self.module.fail_json(
+ msg='Unable to update VLAN "{0}" in network domain "{1}": {2}'.format(
+ self.name, self.network_domain_selector, invalid_vlan_change
+ )
+ )
+
+ if diff.needs_expand() and not self.allow_expand:
+ self.module.fail_json(
+ msg='The configured private IPv4 network size ({0}-bit prefix) for '.format(
+ self.private_ipv4_prefix_size
+ ) + 'the VLAN differs from its current network size ({0}-bit prefix) '.format(
+ vlan.private_ipv4_range_size
+ ) + 'and needs to be expanded. Use allow_expand=true if this is what you want.'
+ )
+
+ if self.module.check_mode:
+ self.module.exit_json(
+ msg='VLAN "{0}" is present in network domain "{1}" (changes detected).'.format(
+ self.name, self.network_domain_selector
+ ),
+ vlan=vlan_to_dict(vlan),
+ changed=True
+ )
+
+ if diff.needs_edit():
+ vlan.name = self.name
+ vlan.description = self.description
+
+ self.driver.ex_update_vlan(vlan)
+
+ if diff.needs_expand():
+ vlan.private_ipv4_range_size = self.private_ipv4_prefix_size
+ self.driver.ex_expand_vlan(vlan)
+
+ self.module.exit_json(
+ msg='Updated VLAN "{0}" in network domain "{1}".'.format(
+ self.name, self.network_domain_selector
+ ),
+ vlan=vlan_to_dict(vlan),
+ changed=True
+ )
+
+ def state_readonly(self):
+ """
+ Read the target VLAN's state.
+ """
+
+ network_domain = self._get_network_domain()
+
+ vlan = self._get_vlan(network_domain)
+ if vlan:
+ self.module.exit_json(
+ vlan=vlan_to_dict(vlan),
+ changed=False
+ )
+ else:
+ self.module.fail_json(
+ msg='VLAN "{0}" does not exist in network domain "{1}".'.format(
+ self.name, self.network_domain_selector
+ )
+ )
+
+ def state_absent(self):
+ """
+ Ensure that the target VLAN is not present.
+ """
+
+ network_domain = self._get_network_domain()
+
+ vlan = self._get_vlan(network_domain)
+ if not vlan:
+ self.module.exit_json(
+ msg='VLAN "{0}" is absent from network domain "{1}".'.format(
+ self.name, self.network_domain_selector
+ ),
+ changed=False
+ )
+
+ return
+
+ if self.module.check_mode:
+ self.module.exit_json(
+ msg='VLAN "{0}" is present in network domain "{1}" (should be absent).'.format(
+ self.name, self.network_domain_selector
+ ),
+ vlan=vlan_to_dict(vlan),
+ changed=True
+ )
+
+ self._delete_vlan(vlan)
+
+ self.module.exit_json(
+ msg='Deleted VLAN "{0}" from network domain "{1}".'.format(
+ self.name, self.network_domain_selector
+ ),
+ changed=True
+ )
+
+ def _get_vlan(self, network_domain):
+ """
+ Retrieve the target VLAN details from CloudControl.
+
+ :param network_domain: The target network domain.
+ :return: The VLAN, or None if the target VLAN was not found.
+ :rtype: DimensionDataVlan
+ """
+
+ vlans = self.driver.ex_list_vlans(
+ location=self.location,
+ network_domain=network_domain
+ )
+ matching_vlans = [vlan for vlan in vlans if vlan.name == self.name]
+ if matching_vlans:
+ return matching_vlans[0]
+
+ return None
+
+ def _create_vlan(self, network_domain):
+ vlan = self.driver.ex_create_vlan(
+ network_domain,
+ self.name,
+ self.private_ipv4_base_address,
+ self.description,
+ self.private_ipv4_prefix_size
+ )
+
+ if self.wait:
+ vlan = self._wait_for_vlan_state(vlan.id, 'NORMAL')
+
+ return vlan
+
+ def _delete_vlan(self, vlan):
+ try:
+ self.driver.ex_delete_vlan(vlan)
+
+ # Not currently supported for deletes due to a bug in libcloud (module will error out if "wait" is specified when "state" is not "present").
+ if self.wait:
+ self._wait_for_vlan_state(vlan, 'NOT_FOUND')
+
+ except DimensionDataAPIException as api_exception:
+ self.module.fail_json(
+ msg='Failed to delete VLAN "{0}" due to unexpected error from the CloudControl API: {1}'.format(
+ vlan.id, api_exception.msg
+ )
+ )
+
+ def _wait_for_vlan_state(self, vlan, state_to_wait_for):
+ network_domain = self._get_network_domain()
+
+ wait_poll_interval = self.module.params['wait_poll_interval']
+ wait_time = self.module.params['wait_time']
+
+ # Bizarre bug in libcloud when checking status after delete; socket.error is too generic to catch in this context so for now we don't even try.
+
+ try:
+ return self.driver.connection.wait_for_state(
+ state_to_wait_for,
+ self.driver.ex_get_vlan,
+ wait_poll_interval,
+ wait_time,
+ vlan
+ )
+
+ except DimensionDataAPIException as api_exception:
+ if api_exception.code != 'RESOURCE_NOT_FOUND':
+ raise
+
+ return DimensionDataVlan(
+ id=vlan.id,
+ status='NOT_FOUND',
+ name='',
+ description='',
+ private_ipv4_range_address='',
+ private_ipv4_range_size=0,
+ ipv4_gateway='',
+ ipv6_range_address='',
+ ipv6_range_size=0,
+ ipv6_gateway='',
+ location=self.location,
+ network_domain=network_domain
+ )
+
+ def _get_network_domain(self):
+ """
+ Retrieve the target network domain from the Cloud Control API.
+
+ :return: The network domain.
+ """
+
+ try:
+ return self.get_network_domain(
+ self.network_domain_selector, self.location
+ )
+ except UnknownNetworkError:
+ self.module.fail_json(
+ msg='Cannot find network domain "{0}" in datacenter "{1}".'.format(
+ self.network_domain_selector, self.location
+ )
+ )
+
+ return None
+
+
+class InvalidVlanChangeError(Exception):
+ """
+ Error raised when an illegal change to VLAN state is attempted.
+ """
+
+ pass
+
+
+class VlanDiff(object):
+ """
+ Represents differences between VLAN information (from CloudControl) and module parameters.
+ """
+
+ def __init__(self, vlan, module_params):
+ """
+
+ :param vlan: The VLAN information from CloudControl.
+ :type vlan: DimensionDataVlan
+ :param module_params: The module parameters.
+ :type module_params: dict
+ """
+
+ self.vlan = vlan
+ self.module_params = module_params
+
+ self.name_changed = module_params['name'] != vlan.name
+ self.description_changed = module_params['description'] != vlan.description
+ self.private_ipv4_base_address_changed = module_params['private_ipv4_base_address'] != vlan.private_ipv4_range_address
+ self.private_ipv4_prefix_size_changed = module_params['private_ipv4_prefix_size'] != vlan.private_ipv4_range_size
+
+ # Is configured prefix size greater than or less than the actual prefix size?
+ private_ipv4_prefix_size_difference = module_params['private_ipv4_prefix_size'] - vlan.private_ipv4_range_size
+ self.private_ipv4_prefix_size_increased = private_ipv4_prefix_size_difference > 0
+ self.private_ipv4_prefix_size_decreased = private_ipv4_prefix_size_difference < 0
+
+ def has_changes(self):
+ """
+ Does the VlanDiff represent any changes between the VLAN and module configuration?
+
+ :return: True, if there are change changes; otherwise, False.
+ """
+
+ return self.needs_edit() or self.needs_expand()
+
+ def ensure_legal_change(self):
+ """
+ Ensure the change (if any) represented by the VlanDiff represents a legal change to VLAN state.
+
+ - private_ipv4_base_address cannot be changed
+ - private_ipv4_prefix_size must be greater than or equal to the VLAN's existing private_ipv4_range_size
+
+ :raise InvalidVlanChangeError: The VlanDiff does not represent a legal change to VLAN state.
+ """
+
+ # Cannot change base address for private IPv4 network.
+ if self.private_ipv4_base_address_changed:
+ raise InvalidVlanChangeError('Cannot change the private IPV4 base address for an existing VLAN.')
+
+ # Cannot shrink private IPv4 network (by increasing prefix size).
+ if self.private_ipv4_prefix_size_increased:
+ raise InvalidVlanChangeError('Cannot shrink the private IPV4 network for an existing VLAN (only expand is supported).')
+
+ def needs_edit(self):
+ """
+ Is an Edit operation required to resolve the differences between the VLAN information and the module parameters?
+
+ :return: True, if an Edit operation is required; otherwise, False.
+ """
+
+ return self.name_changed or self.description_changed
+
+ def needs_expand(self):
+ """
+ Is an Expand operation required to resolve the differences between the VLAN information and the module parameters?
+
+ The VLAN's network is expanded by reducing the size of its network prefix.
+
+ :return: True, if an Expand operation is required; otherwise, False.
+ """
+
+ return self.private_ipv4_prefix_size_decreased
+
+
+def vlan_to_dict(vlan):
+ return {
+ 'id': vlan.id,
+ 'name': vlan.name,
+ 'description': vlan.description,
+ 'location': vlan.location.id,
+ 'private_ipv4_base_address': vlan.private_ipv4_range_address,
+ 'private_ipv4_prefix_size': vlan.private_ipv4_range_size,
+ 'private_ipv4_gateway_address': vlan.ipv4_gateway,
+ 'ipv6_base_address': vlan.ipv6_range_address,
+ 'ipv6_prefix_size': vlan.ipv6_range_size,
+ 'ipv6_gateway_address': vlan.ipv6_gateway,
+ 'status': vlan.status
+ }
+
+
+def main():
+ module = DimensionDataVlanModule()
+
+ if module.state == 'present':
+ module.state_present()
+ elif module.state == 'readonly':
+ module.state_readonly()
+ elif module.state == 'absent':
+ module.state_absent()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/django_manage.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/django_manage.py
new file mode 100644
index 00000000..10161c04
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/django_manage.py
@@ -0,0 +1,347 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Scott Anderson <scottanderson42@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: django_manage
+short_description: Manages a Django application.
+description:
+ - Manages a Django application using the C(manage.py) application frontend to C(django-admin). With the
+ C(virtualenv) parameter, all management commands will be executed by the given C(virtualenv) installation.
+options:
+ command:
+ description:
+ - The name of the Django management command to run. Built in commands are C(cleanup), C(collectstatic),
+ C(flush), C(loaddata), C(migrate), C(syncdb), C(test), and C(validate).
+ - Other commands can be entered, but will fail if they're unknown to Django. Other commands that may
+ prompt for user input should be run with the C(--noinput) flag.
+ - The module will perform some basic parameter validation (when applicable) to the commands C(cleanup),
+ C(collectstatic), C(createcachetable), C(flush), C(loaddata), C(migrate), C(syncdb), C(test), and C(validate).
+ type: str
+ required: true
+ project_path:
+ description:
+ - The path to the root of the Django application where B(manage.py) lives.
+ type: path
+ required: true
+ aliases: [app_path, chdir]
+ settings:
+ description:
+ - The Python path to the application's settings module, such as C(myapp.settings).
+ type: path
+ required: false
+ pythonpath:
+ description:
+ - A directory to add to the Python path. Typically used to include the settings module if it is located
+ external to the application directory.
+ type: path
+ required: false
+ aliases: [python_path]
+ virtualenv:
+ description:
+ - An optional path to a I(virtualenv) installation to use while running the manage application.
+ type: path
+ aliases: [virtual_env]
+ apps:
+ description:
+ - A list of space-delimited apps to target. Used by the C(test) command.
+ type: str
+ required: false
+ cache_table:
+ description:
+ - The name of the table used for database-backed caching. Used by the C(createcachetable) command.
+ type: str
+ required: false
+ clear:
+ description:
+ - Clear the existing files before trying to copy or link the original file.
+ - Used only with the 'collectstatic' command. The C(--noinput) argument will be added automatically.
+ required: false
+ default: no
+ type: bool
+ database:
+ description:
+ - The database to target. Used by the C(createcachetable), C(flush), C(loaddata), C(syncdb),
+ and C(migrate) commands.
+ type: str
+ required: false
+ failfast:
+ description:
+ - Fail the command immediately if a test fails. Used by the C(test) command.
+ required: false
+ default: false
+ type: bool
+ aliases: [fail_fast]
+ fixtures:
+ description:
+ - A space-delimited list of fixture file names to load in the database. B(Required) by the C(loaddata) command.
+ type: str
+ required: false
+ skip:
+ description:
+ - Will skip over out-of-order missing migrations, you can only use this parameter with C(migrate) command.
+ required: false
+ type: bool
+ merge:
+ description:
+ - Will run out-of-order or missing migrations as they are not rollback migrations, you can only use this
+ parameter with C(migrate) command.
+ required: false
+ type: bool
+ link:
+ description:
+ - Will create links to the files instead of copying them, you can only use this parameter with
+ C(collectstatic) command.
+ required: false
+ type: bool
+ liveserver:
+ description:
+ - This parameter was implemented a long time ago in a galaxy far way. It probably relates to the
+ django-liveserver package, which is no longer updated.
+ - Hence, it will be considered DEPRECATED and should be removed in a future release.
+ type: str
+ required: false
+ aliases: [live_server]
+ testrunner:
+ description:
+ - "From the Django docs: Controls the test runner class that is used to execute tests."
+ - This parameter is passed as-is to C(manage.py).
+ type: str
+ required: false
+ aliases: [test_runner]
+notes:
+ - C(virtualenv) (U(http://www.virtualenv.org)) must be installed on the remote host if the virtualenv parameter
+ is specified.
+ - This module will create a virtualenv if the virtualenv parameter is specified and a virtualenv does not already
+ exist at the given location.
+ - This module assumes English error messages for the C(createcachetable) command to detect table existence,
+ unfortunately.
+ - To be able to use the C(migrate) command with django versions < 1.7, you must have C(south) installed and added
+ as an app in your settings.
+ - To be able to use the C(collectstatic) command, you must have enabled staticfiles in your settings.
+ - Your C(manage.py) application must be executable (rwxr-xr-x), and must have a valid shebang,
+ i.e. C(#!/usr/bin/env python), for invoking the appropriate Python interpreter.
+requirements: [ "virtualenv", "django" ]
+author: "Scott Anderson (@tastychutney)"
+'''
+
+EXAMPLES = """
+- name: Run cleanup on the application installed in django_dir
+ community.general.django_manage:
+ command: cleanup
+ project_path: "{{ django_dir }}"
+
+- name: Load the initial_data fixture into the application
+ community.general.django_manage:
+ command: loaddata
+ project_path: "{{ django_dir }}"
+ fixtures: "{{ initial_data }}"
+
+- name: Run syncdb on the application
+ community.general.django_manage:
+ command: syncdb
+ project_path: "{{ django_dir }}"
+ settings: "{{ settings_app_name }}"
+ pythonpath: "{{ settings_dir }}"
+ virtualenv: "{{ virtualenv_dir }}"
+
+- name: Run the SmokeTest test case from the main app. Useful for testing deploys
+ community.general.django_manage:
+ command: test
+ project_path: "{{ django_dir }}"
+ apps: main.SmokeTest
+
+- name: Create an initial superuser
+ community.general.django_manage:
+ command: "createsuperuser --noinput --username=admin --email=admin@example.com"
+ project_path: "{{ django_dir }}"
+"""
+
+import os
+import sys
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def _fail(module, cmd, out, err, **kwargs):
+ msg = ''
+ if out:
+ msg += "stdout: %s" % (out, )
+ if err:
+ msg += "\n:stderr: %s" % (err, )
+ module.fail_json(cmd=cmd, msg=msg, **kwargs)
+
+
+def _ensure_virtualenv(module):
+
+ venv_param = module.params['virtualenv']
+ if venv_param is None:
+ return
+
+ vbin = os.path.join(venv_param, 'bin')
+ activate = os.path.join(vbin, 'activate')
+
+ if not os.path.exists(activate):
+ virtualenv = module.get_bin_path('virtualenv', True)
+ vcmd = [virtualenv, venv_param]
+ rc, out_venv, err_venv = module.run_command(vcmd)
+ if rc != 0:
+ _fail(module, vcmd, out_venv, err_venv)
+
+ os.environ["PATH"] = "%s:%s" % (vbin, os.environ["PATH"])
+ os.environ["VIRTUAL_ENV"] = venv_param
+
+
+def createcachetable_check_changed(output):
+ return "already exists" not in output
+
+
+def flush_filter_output(line):
+ return "Installed" in line and "Installed 0 object" not in line
+
+
+def loaddata_filter_output(line):
+ return "Installed" in line and "Installed 0 object" not in line
+
+
+def syncdb_filter_output(line):
+ return ("Creating table " in line) \
+ or ("Installed" in line and "Installed 0 object" not in line)
+
+
+def migrate_filter_output(line):
+ return ("Migrating forwards " in line) \
+ or ("Installed" in line and "Installed 0 object" not in line) \
+ or ("Applying" in line)
+
+
+def collectstatic_filter_output(line):
+ return line and "0 static files" not in line
+
+
+def main():
+ command_allowed_param_map = dict(
+ cleanup=(),
+ createcachetable=('cache_table', 'database', ),
+ flush=('database', ),
+ loaddata=('database', 'fixtures', ),
+ syncdb=('database', ),
+ test=('failfast', 'testrunner', 'liveserver', 'apps', ),
+ validate=(),
+ migrate=('apps', 'skip', 'merge', 'database',),
+ collectstatic=('clear', 'link', ),
+ )
+
+ command_required_param_map = dict(
+ loaddata=('fixtures', ),
+ )
+
+ # forces --noinput on every command that needs it
+ noinput_commands = (
+ 'flush',
+ 'syncdb',
+ 'migrate',
+ 'test',
+ 'collectstatic',
+ )
+
+ # These params are allowed for certain commands only
+ specific_params = ('apps', 'clear', 'database', 'failfast', 'fixtures', 'liveserver', 'testrunner')
+
+ # These params are automatically added to the command if present
+ general_params = ('settings', 'pythonpath', 'database',)
+ specific_boolean_params = ('clear', 'failfast', 'skip', 'merge', 'link')
+ end_of_command_params = ('apps', 'cache_table', 'fixtures')
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ command=dict(required=True, type='str'),
+ project_path=dict(required=True, type='path', aliases=['app_path', 'chdir']),
+ settings=dict(default=None, required=False, type='path'),
+ pythonpath=dict(default=None, required=False, type='path', aliases=['python_path']),
+ virtualenv=dict(default=None, required=False, type='path', aliases=['virtual_env']),
+
+ apps=dict(default=None, required=False),
+ cache_table=dict(default=None, required=False, type='str'),
+ clear=dict(default=False, required=False, type='bool'),
+ database=dict(default=None, required=False, type='str'),
+ failfast=dict(default=False, required=False, type='bool', aliases=['fail_fast']),
+ fixtures=dict(default=None, required=False, type='str'),
+ liveserver=dict(default=None, required=False, type='str', aliases=['live_server'],
+ removed_in_version='3.0.0', removed_from_collection='community.general'),
+ testrunner=dict(default=None, required=False, type='str', aliases=['test_runner']),
+ skip=dict(default=None, required=False, type='bool'),
+ merge=dict(default=None, required=False, type='bool'),
+ link=dict(default=None, required=False, type='bool'),
+ ),
+ )
+
+ command = module.params['command']
+ project_path = module.params['project_path']
+ virtualenv = module.params['virtualenv']
+
+ for param in specific_params:
+ value = module.params[param]
+ if param in specific_boolean_params:
+ value = module.boolean(value)
+ if value and param not in command_allowed_param_map[command]:
+ module.fail_json(msg='%s param is incompatible with command=%s' % (param, command))
+
+ for param in command_required_param_map.get(command, ()):
+ if not module.params[param]:
+ module.fail_json(msg='%s param is required for command=%s' % (param, command))
+
+ _ensure_virtualenv(module)
+
+ cmd = "./manage.py %s" % (command, )
+
+ if command in noinput_commands:
+ cmd = '%s --noinput' % cmd
+
+ for param in general_params:
+ if module.params[param]:
+ cmd = '%s --%s=%s' % (cmd, param, module.params[param])
+
+ for param in specific_boolean_params:
+ if module.boolean(module.params[param]):
+ cmd = '%s --%s' % (cmd, param)
+
+ # these params always get tacked on the end of the command
+ for param in end_of_command_params:
+ if module.params[param]:
+ cmd = '%s %s' % (cmd, module.params[param])
+
+ rc, out, err = module.run_command(cmd, cwd=project_path)
+ if rc != 0:
+ if command == 'createcachetable' and 'table' in err and 'already exists' in err:
+ out = 'already exists.'
+ else:
+ if "Unknown command:" in err:
+ _fail(module, cmd, err, "Unknown django command: %s" % command)
+ _fail(module, cmd, out, err, path=os.environ["PATH"], syspath=sys.path)
+
+ changed = False
+
+ lines = out.split('\n')
+ filt = globals().get(command + "_filter_output", None)
+ if filt:
+ filtered_output = list(filter(filt, lines))
+ if len(filtered_output):
+ changed = True
+ check_changed = globals().get("{0}_check_changed".format(command), None)
+ if check_changed:
+ changed = check_changed(out)
+
+ module.exit_json(changed=changed, out=out, cmd=cmd, app_path=project_path, project_path=project_path,
+ virtualenv=virtualenv, settings=module.params['settings'], pythonpath=module.params['pythonpath'])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/dnsimple.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/dnsimple.py
new file mode 100644
index 00000000..1c814a9b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/dnsimple.py
@@ -0,0 +1,336 @@
+#!/usr/bin/python
+#
+# Copyright: Ansible Project
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: dnsimple
+short_description: Interface with dnsimple.com (a DNS hosting service)
+description:
+ - "Manages domains and records via the DNSimple API, see the docs: U(http://developer.dnsimple.com/)."
+notes:
+ - DNSimple API v1 is deprecated. Please install dnsimple-python>=1.0.0 which uses v2 API.
+options:
+ account_email:
+ description:
+ - Account email. If omitted, the environment variables C(DNSIMPLE_EMAIL) and C(DNSIMPLE_API_TOKEN) will be looked for.
+ - "If those aren't found, a C(.dnsimple) file will be looked for, see: U(https://github.com/mikemaccana/dnsimple-python#getting-started)."
+ type: str
+ account_api_token:
+ description:
+ - Account API token. See I(account_email) for more information.
+ type: str
+ domain:
+ description:
+ - Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNSimple.
+ - If omitted, a list of domains will be returned.
+ - If domain is present but the domain doesn't exist, it will be created.
+ type: str
+ record:
+ description:
+ - Record to add, if blank a record for the domain will be created, supports the wildcard (*).
+ type: str
+ record_ids:
+ description:
+ - List of records to ensure they either exist or do not exist.
+ type: list
+ type:
+ description:
+ - The type of DNS record to create.
+ choices: [ 'A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL' ]
+ type: str
+ ttl:
+ description:
+ - The TTL to give the new record in seconds.
+ default: 3600
+ type: int
+ value:
+ description:
+ - Record value.
+ - Must be specified when trying to ensure a record exists.
+ type: str
+ priority:
+ description:
+ - Record priority.
+ type: int
+ state:
+ description:
+ - whether the record should exist or not.
+ choices: [ 'present', 'absent' ]
+ default: present
+ type: str
+ solo:
+ description:
+ - Whether the record should be the only one for that record type and record name.
+ - Only use with C(state) is set to C(present) on a record.
+ type: 'bool'
+ default: no
+requirements:
+ - "dnsimple >= 1.0.0"
+author: "Alex Coomans (@drcapulet)"
+'''
+
+EXAMPLES = '''
+- name: Authenticate using email and API token and fetch all domains
+ community.general.dnsimple:
+ account_email: test@example.com
+ account_api_token: dummyapitoken
+ delegate_to: localhost
+
+- name: Fetch my.com domain records
+ community.general.dnsimple:
+ domain: my.com
+ state: present
+ delegate_to: localhost
+ register: records
+
+- name: Delete a domain
+ community.general.dnsimple:
+ domain: my.com
+ state: absent
+ delegate_to: localhost
+
+- name: Create a test.my.com A record to point to 127.0.0.1
+ community.general.dnsimple:
+ domain: my.com
+ record: test
+ type: A
+ value: 127.0.0.1
+ delegate_to: localhost
+ register: record
+
+- name: Delete record using record_ids
+ community.general.dnsimple:
+ domain: my.com
+ record_ids: '{{ record["id"] }}'
+ state: absent
+ delegate_to: localhost
+
+- name: Create a my.com CNAME record to example.com
+ community.general.dnsimple:
+ domain: my.com
+ record: ''
+ type: CNAME
+ value: example.com
+ state: present
+ delegate_to: localhost
+
+- name: Change TTL value for a record
+ community.general.dnsimple:
+ domain: my.com
+ record: ''
+ type: CNAME
+ value: example.com
+ ttl: 600
+ state: present
+ delegate_to: localhost
+
+- name: Delete the record
+ community.general.dnsimple:
+ domain: my.com
+ record: ''
+ type: CNAME
+ value: example.com
+ state: absent
+ delegate_to: localhost
+'''
+
+RETURN = r"""# """
+
+import os
+import traceback
+from distutils.version import LooseVersion
+
+DNSIMPLE_IMP_ERR = None
+try:
+ from dnsimple import DNSimple
+ from dnsimple.dnsimple import __version__ as dnsimple_version
+ from dnsimple.dnsimple import DNSimpleException
+ HAS_DNSIMPLE = True
+except ImportError:
+ DNSIMPLE_IMP_ERR = traceback.format_exc()
+ HAS_DNSIMPLE = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ account_email=dict(type='str'),
+ account_api_token=dict(type='str', no_log=True),
+ domain=dict(type='str'),
+ record=dict(type='str'),
+ record_ids=dict(type='list'),
+ type=dict(type='str', choices=['A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO',
+ 'POOL']),
+ ttl=dict(type='int', default=3600),
+ value=dict(type='str'),
+ priority=dict(type='int'),
+ state=dict(type='str', choices=['present', 'absent'], default='present'),
+ solo=dict(type='bool', default=False),
+ ),
+ required_together=[
+ ['record', 'value']
+ ],
+ supports_check_mode=True,
+ )
+
+ if not HAS_DNSIMPLE:
+ module.fail_json(msg=missing_required_lib('dnsimple'), exception=DNSIMPLE_IMP_ERR)
+
+ if LooseVersion(dnsimple_version) < LooseVersion('1.0.0'):
+ module.fail_json(msg="Current version of dnsimple Python module [%s] uses 'v1' API which is deprecated."
+ " Please upgrade to version 1.0.0 and above to use dnsimple 'v2' API." % dnsimple_version)
+
+ account_email = module.params.get('account_email')
+ account_api_token = module.params.get('account_api_token')
+ domain = module.params.get('domain')
+ record = module.params.get('record')
+ record_ids = module.params.get('record_ids')
+ record_type = module.params.get('type')
+ ttl = module.params.get('ttl')
+ value = module.params.get('value')
+ priority = module.params.get('priority')
+ state = module.params.get('state')
+ is_solo = module.params.get('solo')
+
+ if account_email and account_api_token:
+ client = DNSimple(email=account_email, api_token=account_api_token)
+ elif os.environ.get('DNSIMPLE_EMAIL') and os.environ.get('DNSIMPLE_API_TOKEN'):
+ client = DNSimple(email=os.environ.get('DNSIMPLE_EMAIL'), api_token=os.environ.get('DNSIMPLE_API_TOKEN'))
+ else:
+ client = DNSimple()
+
+ try:
+ # Let's figure out what operation we want to do
+
+ # No domain, return a list
+ if not domain:
+ domains = client.domains()
+ module.exit_json(changed=False, result=[d['domain'] for d in domains])
+
+ # Domain & No record
+ if domain and record is None and not record_ids:
+ domains = [d['domain'] for d in client.domains()]
+ if domain.isdigit():
+ dr = next((d for d in domains if d['id'] == int(domain)), None)
+ else:
+ dr = next((d for d in domains if d['name'] == domain), None)
+ if state == 'present':
+ if dr:
+ module.exit_json(changed=False, result=dr)
+ else:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=True, result=client.add_domain(domain)['domain'])
+
+ # state is absent
+ else:
+ if dr:
+ if not module.check_mode:
+ client.delete(domain)
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=False)
+
+ # need the not none check since record could be an empty string
+ if domain and record is not None:
+ records = [r['record'] for r in client.records(str(domain), params={'name': record})]
+
+ if not record_type:
+ module.fail_json(msg="Missing the record type")
+
+ if not value:
+ module.fail_json(msg="Missing the record value")
+
+ rr = next((r for r in records if r['name'] == record and r['type'] == record_type and r['content'] == value), None)
+
+ if state == 'present':
+ changed = False
+ if is_solo:
+ # delete any records that have the same name and record type
+ same_type = [r['id'] for r in records if r['name'] == record and r['type'] == record_type]
+ if rr:
+ same_type = [rid for rid in same_type if rid != rr['id']]
+ if same_type:
+ if not module.check_mode:
+ for rid in same_type:
+ client.delete_record(str(domain), rid)
+ changed = True
+ if rr:
+ # check if we need to update
+ if rr['ttl'] != ttl or rr['priority'] != priority:
+ data = {}
+ if ttl:
+ data['ttl'] = ttl
+ if priority:
+ data['priority'] = priority
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=True, result=client.update_record(str(domain), str(rr['id']), data)['record'])
+ else:
+ module.exit_json(changed=changed, result=rr)
+ else:
+ # create it
+ data = {
+ 'name': record,
+ 'type': record_type,
+ 'content': value,
+ }
+ if ttl:
+ data['ttl'] = ttl
+ if priority:
+ data['priority'] = priority
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=True, result=client.add_record(str(domain), data)['record'])
+
+ # state is absent
+ else:
+ if rr:
+ if not module.check_mode:
+ client.delete_record(str(domain), rr['id'])
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=False)
+
+ # Make sure these record_ids either all exist or none
+ if domain and record_ids:
+ current_records = [str(r['record']['id']) for r in client.records(str(domain))]
+ wanted_records = [str(r) for r in record_ids]
+ if state == 'present':
+ difference = list(set(wanted_records) - set(current_records))
+ if difference:
+ module.fail_json(msg="Missing the following records: %s" % difference)
+ else:
+ module.exit_json(changed=False)
+
+ # state is absent
+ else:
+ difference = list(set(wanted_records) & set(current_records))
+ if difference:
+ if not module.check_mode:
+ for rid in difference:
+ client.delete_record(str(domain), rid)
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=False)
+
+ except DNSimpleException as e:
+ module.fail_json(msg="Unable to contact DNSimple: %s" % e.message)
+
+ module.fail_json(msg="Unknown what you wanted me to do")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/dnsmadeeasy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/dnsmadeeasy.py
new file mode 100644
index 00000000..75135c82
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/dnsmadeeasy.py
@@ -0,0 +1,717 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: dnsmadeeasy
+short_description: Interface with dnsmadeeasy.com (a DNS hosting service).
+description:
+ - >
+ Manages DNS records via the v2 REST API of the DNS Made Easy service. It handles records only; there is no manipulation of domains or
+ monitor/account support yet. See: U(https://www.dnsmadeeasy.com/integration/restapi/)
+options:
+ account_key:
+ description:
+ - Account API Key.
+ required: true
+ type: str
+
+ account_secret:
+ description:
+ - Account Secret Key.
+ required: true
+ type: str
+
+ domain:
+ description:
+ - Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNS Made Easy (e.g. "839989") for faster
+ resolution
+ required: true
+ type: str
+
+ sandbox:
+ description:
+ - Decides if the sandbox API should be used. Otherwise (default) the production API of DNS Made Easy is used.
+ type: bool
+ default: 'no'
+
+ record_name:
+ description:
+ - Record name to get/create/delete/update. If record_name is not specified; all records for the domain will be returned in "result" regardless
+ of the state argument.
+ type: str
+
+ record_type:
+ description:
+ - Record type.
+ choices: [ 'A', 'AAAA', 'CNAME', 'ANAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT' ]
+ type: str
+
+ record_value:
+ description:
+ - >
+ Record value. HTTPRED: <redirection URL>, MX: <priority> <target name>, NS: <name server>, PTR: <target name>,
+ SRV: <priority> <weight> <port> <target name>, TXT: <text value>"
+ - >
+ If record_value is not specified; no changes will be made and the record will be returned in 'result'
+ (in other words, this module can be used to fetch a record's current id, type, and ttl)
+ type: str
+
+ record_ttl:
+ description:
+ - record's "Time to live". Number of seconds the record remains cached in DNS servers.
+ default: 1800
+ type: int
+
+ state:
+ description:
+ - whether the record should exist or not
+ required: true
+ choices: [ 'present', 'absent' ]
+ type: str
+
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+
+ monitor:
+ description:
+ - If C(yes), add or change the monitor. This is applicable only for A records.
+ type: bool
+ default: 'no'
+
+ systemDescription:
+ description:
+ - Description used by the monitor.
+ default: ''
+ type: str
+
+ maxEmails:
+ description:
+ - Number of emails sent to the contact list by the monitor.
+ default: 1
+ type: int
+
+ protocol:
+ description:
+ - Protocol used by the monitor.
+ default: 'HTTP'
+ choices: ['TCP', 'UDP', 'HTTP', 'DNS', 'SMTP', 'HTTPS']
+ type: str
+
+ port:
+ description:
+ - Port used by the monitor.
+ default: 80
+ type: int
+
+ sensitivity:
+ description:
+ - Number of checks the monitor performs before a failover occurs where Low = 8, Medium = 5,and High = 3.
+ default: 'Medium'
+ choices: ['Low', 'Medium', 'High']
+ type: str
+
+ contactList:
+ description:
+ - Name or id of the contact list that the monitor will notify.
+ - The default C('') means the Account Owner.
+ default: ''
+ type: str
+
+ httpFqdn:
+ description:
+ - The fully qualified domain name used by the monitor.
+ type: str
+
+ httpFile:
+ description:
+ - The file at the Fqdn that the monitor queries for HTTP or HTTPS.
+ type: str
+
+ httpQueryString:
+ description:
+ - The string in the httpFile that the monitor queries for HTTP or HTTPS.
+ type: str
+
+ failover:
+ description:
+ - If C(yes), add or change the failover. This is applicable only for A records.
+ type: bool
+ default: 'no'
+
+ autoFailover:
+ description:
+ - If true, fallback to the primary IP address is manual after a failover.
+ - If false, fallback to the primary IP address is automatic after a failover.
+ type: bool
+ default: 'no'
+
+ ip1:
+ description:
+ - Primary IP address for the failover.
+ - Required if adding or changing the monitor or failover.
+ type: str
+
+ ip2:
+ description:
+ - Secondary IP address for the failover.
+ - Required if adding or changing the failover.
+ type: str
+
+ ip3:
+ description:
+ - Tertiary IP address for the failover.
+ type: str
+
+ ip4:
+ description:
+ - Quaternary IP address for the failover.
+ type: str
+
+ ip5:
+ description:
+ - Quinary IP address for the failover.
+ type: str
+
+notes:
+ - The DNS Made Easy service requires that machines interacting with the API have the proper time and timezone set. Be sure you are within a few
+ seconds of actual time by using NTP.
+ - This module returns record(s) and monitor(s) in the "result" element when 'state' is set to 'present'.
+ These values can be be registered and used in your playbooks.
+ - Only A records can have a monitor or failover.
+ - To add failover, the 'failover', 'autoFailover', 'port', 'protocol', 'ip1', and 'ip2' options are required.
+ - To add monitor, the 'monitor', 'port', 'protocol', 'maxEmails', 'systemDescription', and 'ip1' options are required.
+ - The monitor and the failover will share 'port', 'protocol', and 'ip1' options.
+
+requirements: [ hashlib, hmac ]
+author: "Brice Burgess (@briceburg)"
+'''
+
+EXAMPLES = '''
+- name: Fetch my.com domain records
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ register: response
+
+- name: Create a record
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_type: A
+ record_value: 127.0.0.1
+
+- name: Update the previously created record
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_value: 192.0.2.23
+
+- name: Fetch a specific record
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ register: response
+
+- name: Delete a record
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ record_type: A
+ state: absent
+ record_name: test
+
+- name: Add a failover
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_type: A
+ record_value: 127.0.0.1
+ failover: True
+ ip1: 127.0.0.2
+ ip2: 127.0.0.3
+
+- name: Add a failover
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_type: A
+ record_value: 127.0.0.1
+ failover: True
+ ip1: 127.0.0.2
+ ip2: 127.0.0.3
+ ip3: 127.0.0.4
+ ip4: 127.0.0.5
+ ip5: 127.0.0.6
+
+- name: Add a monitor
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_type: A
+ record_value: 127.0.0.1
+ monitor: yes
+ ip1: 127.0.0.2
+ protocol: HTTP # default
+ port: 80 # default
+ maxEmails: 1
+ systemDescription: Monitor Test A record
+ contactList: my contact list
+
+- name: Add a monitor with http options
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_type: A
+ record_value: 127.0.0.1
+ monitor: yes
+ ip1: 127.0.0.2
+ protocol: HTTP # default
+ port: 80 # default
+ maxEmails: 1
+ systemDescription: Monitor Test A record
+ contactList: 1174 # contact list id
+ httpFqdn: http://my.com
+ httpFile: example
+ httpQueryString: some string
+
+- name: Add a monitor and a failover
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_type: A
+ record_value: 127.0.0.1
+ failover: True
+ ip1: 127.0.0.2
+ ip2: 127.0.0.3
+ monitor: yes
+ protocol: HTTPS
+ port: 443
+ maxEmails: 1
+ systemDescription: monitoring my.com status
+ contactList: emergencycontacts
+
+- name: Remove a failover
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_type: A
+ record_value: 127.0.0.1
+ failover: no
+
+- name: Remove a monitor
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_type: A
+ record_value: 127.0.0.1
+ monitor: no
+'''
+
+# ============================================
+# DNSMadeEasy module specific support methods.
+#
+
+import json
+import hashlib
+import hmac
+import locale
+from time import strftime, gmtime
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.six import string_types
+
+
+class DME2(object):
+
+ def __init__(self, apikey, secret, domain, sandbox, module):
+ self.module = module
+
+ self.api = apikey
+ self.secret = secret
+
+ if sandbox:
+ self.baseurl = 'https://api.sandbox.dnsmadeeasy.com/V2.0/'
+ self.module.warn(warning="Sandbox is enabled. All actions are made against the URL %s" % self.baseurl)
+ else:
+ self.baseurl = 'https://api.dnsmadeeasy.com/V2.0/'
+
+ self.domain = str(domain)
+ self.domain_map = None # ["domain_name"] => ID
+ self.record_map = None # ["record_name"] => ID
+ self.records = None # ["record_ID"] => <record>
+ self.all_records = None
+ self.contactList_map = None # ["contactList_name"] => ID
+
+ # Lookup the domain ID if passed as a domain name vs. ID
+ if not self.domain.isdigit():
+ self.domain = self.getDomainByName(self.domain)['id']
+
+ self.record_url = 'dns/managed/' + str(self.domain) + '/records'
+ self.monitor_url = 'monitor'
+ self.contactList_url = 'contactList'
+
+ def _headers(self):
+ currTime = self._get_date()
+ hashstring = self._create_hash(currTime)
+ headers = {'x-dnsme-apiKey': self.api,
+ 'x-dnsme-hmac': hashstring,
+ 'x-dnsme-requestDate': currTime,
+ 'content-type': 'application/json'}
+ return headers
+
+ def _get_date(self):
+ locale.setlocale(locale.LC_TIME, 'C')
+ return strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime())
+
+ def _create_hash(self, rightnow):
+ return hmac.new(self.secret.encode(), rightnow.encode(), hashlib.sha1).hexdigest()
+
+ def query(self, resource, method, data=None):
+ url = self.baseurl + resource
+ if data and not isinstance(data, string_types):
+ data = urlencode(data)
+
+ response, info = fetch_url(self.module, url, data=data, method=method, headers=self._headers())
+ if info['status'] not in (200, 201, 204):
+ self.module.fail_json(msg="%s returned %s, with body: %s" % (url, info['status'], info['msg']))
+
+ try:
+ return json.load(response)
+ except Exception:
+ return {}
+
+ def getDomain(self, domain_id):
+ if not self.domain_map:
+ self._instMap('domain')
+
+ return self.domains.get(domain_id, False)
+
+ def getDomainByName(self, domain_name):
+ if not self.domain_map:
+ self._instMap('domain')
+
+ return self.getDomain(self.domain_map.get(domain_name, 0))
+
+ def getDomains(self):
+ return self.query('dns/managed', 'GET')['data']
+
+ def getRecord(self, record_id):
+ if not self.record_map:
+ self._instMap('record')
+
+ return self.records.get(record_id, False)
+
+ # Try to find a single record matching this one.
+ # How we do this depends on the type of record. For instance, there
+ # can be several MX records for a single record_name while there can
+ # only be a single CNAME for a particular record_name. Note also that
+ # there can be several records with different types for a single name.
+ def getMatchingRecord(self, record_name, record_type, record_value):
+ # Get all the records if not already cached
+ if not self.all_records:
+ self.all_records = self.getRecords()
+
+ if record_type in ["CNAME", "ANAME", "HTTPRED", "PTR"]:
+ for result in self.all_records:
+ if result['name'] == record_name and result['type'] == record_type:
+ return result
+ return False
+ elif record_type in ["A", "AAAA", "MX", "NS", "TXT", "SRV"]:
+ for result in self.all_records:
+ if record_type == "MX":
+ value = record_value.split(" ")[1]
+ # Note that TXT records are surrounded by quotes in the API response.
+ elif record_type == "TXT":
+ value = '"{0}"'.format(record_value)
+ elif record_type == "SRV":
+ value = record_value.split(" ")[3]
+ else:
+ value = record_value
+ if result['name'] == record_name and result['type'] == record_type and result['value'] == value:
+ return result
+ return False
+ else:
+ raise Exception('record_type not yet supported')
+
+ def getRecords(self):
+ return self.query(self.record_url, 'GET')['data']
+
+ def _instMap(self, type):
+ # @TODO cache this call so it's executed only once per ansible execution
+ map = {}
+ results = {}
+
+ # iterate over e.g. self.getDomains() || self.getRecords()
+ for result in getattr(self, 'get' + type.title() + 's')():
+
+ map[result['name']] = result['id']
+ results[result['id']] = result
+
+ # e.g. self.domain_map || self.record_map
+ setattr(self, type + '_map', map)
+ setattr(self, type + 's', results) # e.g. self.domains || self.records
+
+ def prepareRecord(self, data):
+ return json.dumps(data, separators=(',', ':'))
+
+ def createRecord(self, data):
+ # @TODO update the cache w/ resultant record + id when impleneted
+ return self.query(self.record_url, 'POST', data)
+
+ def updateRecord(self, record_id, data):
+ # @TODO update the cache w/ resultant record + id when impleneted
+ return self.query(self.record_url + '/' + str(record_id), 'PUT', data)
+
+ def deleteRecord(self, record_id):
+ # @TODO remove record from the cache when impleneted
+ return self.query(self.record_url + '/' + str(record_id), 'DELETE')
+
+ def getMonitor(self, record_id):
+ return self.query(self.monitor_url + '/' + str(record_id), 'GET')
+
+ def updateMonitor(self, record_id, data):
+ return self.query(self.monitor_url + '/' + str(record_id), 'PUT', data)
+
+ def prepareMonitor(self, data):
+ return json.dumps(data, separators=(',', ':'))
+
+ def getContactList(self, contact_list_id):
+ if not self.contactList_map:
+ self._instMap('contactList')
+
+ return self.contactLists.get(contact_list_id, False)
+
+ def getContactlists(self):
+ return self.query(self.contactList_url, 'GET')['data']
+
+ def getContactListByName(self, name):
+ if not self.contactList_map:
+ self._instMap('contactList')
+
+ return self.getContactList(self.contactList_map.get(name, 0))
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ account_key=dict(required=True, no_log=True),
+ account_secret=dict(required=True, no_log=True),
+ domain=dict(required=True),
+ sandbox=dict(default=False, type='bool'),
+ state=dict(required=True, choices=['present', 'absent']),
+ record_name=dict(required=False),
+ record_type=dict(required=False, choices=[
+ 'A', 'AAAA', 'CNAME', 'ANAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT']),
+ record_value=dict(required=False),
+ record_ttl=dict(required=False, default=1800, type='int'),
+ monitor=dict(default=False, type='bool'),
+ systemDescription=dict(default=''),
+ maxEmails=dict(default=1, type='int'),
+ protocol=dict(default='HTTP', choices=['TCP', 'UDP', 'HTTP', 'DNS', 'SMTP', 'HTTPS']),
+ port=dict(default=80, type='int'),
+ sensitivity=dict(default='Medium', choices=['Low', 'Medium', 'High']),
+ contactList=dict(default=None),
+ httpFqdn=dict(required=False),
+ httpFile=dict(required=False),
+ httpQueryString=dict(required=False),
+ failover=dict(default=False, type='bool'),
+ autoFailover=dict(default=False, type='bool'),
+ ip1=dict(required=False),
+ ip2=dict(required=False),
+ ip3=dict(required=False),
+ ip4=dict(required=False),
+ ip5=dict(required=False),
+ validate_certs=dict(default=True, type='bool'),
+ ),
+ required_together=[
+ ['record_value', 'record_ttl', 'record_type']
+ ],
+ required_if=[
+ ['failover', True, ['autoFailover', 'port', 'protocol', 'ip1', 'ip2']],
+ ['monitor', True, ['port', 'protocol', 'maxEmails', 'systemDescription', 'ip1']]
+ ]
+ )
+
+ protocols = dict(TCP=1, UDP=2, HTTP=3, DNS=4, SMTP=5, HTTPS=6)
+ sensitivities = dict(Low=8, Medium=5, High=3)
+
+ DME = DME2(module.params["account_key"], module.params[
+ "account_secret"], module.params["domain"], module.params["sandbox"], module)
+ state = module.params["state"]
+ record_name = module.params["record_name"]
+ record_type = module.params["record_type"]
+ record_value = module.params["record_value"]
+
+ # Follow Keyword Controlled Behavior
+ if record_name is None:
+ domain_records = DME.getRecords()
+ if not domain_records:
+ module.fail_json(
+ msg="The requested domain name is not accessible with this api_key; try using its ID if known.")
+ module.exit_json(changed=False, result=domain_records)
+
+ # Fetch existing record + Build new one
+ current_record = DME.getMatchingRecord(record_name, record_type, record_value)
+ new_record = {'name': record_name}
+ for i in ["record_value", "record_type", "record_ttl"]:
+ if not module.params[i] is None:
+ new_record[i[len("record_"):]] = module.params[i]
+ # Special handling for mx record
+ if new_record["type"] == "MX":
+ new_record["mxLevel"] = new_record["value"].split(" ")[0]
+ new_record["value"] = new_record["value"].split(" ")[1]
+
+ # Special handling for SRV records
+ if new_record["type"] == "SRV":
+ new_record["priority"] = new_record["value"].split(" ")[0]
+ new_record["weight"] = new_record["value"].split(" ")[1]
+ new_record["port"] = new_record["value"].split(" ")[2]
+ new_record["value"] = new_record["value"].split(" ")[3]
+
+ # Fetch existing monitor if the A record indicates it should exist and build the new monitor
+ current_monitor = dict()
+ new_monitor = dict()
+ if current_record and current_record['type'] == 'A':
+ current_monitor = DME.getMonitor(current_record['id'])
+
+ # Build the new monitor
+ for i in ['monitor', 'systemDescription', 'protocol', 'port', 'sensitivity', 'maxEmails',
+ 'contactList', 'httpFqdn', 'httpFile', 'httpQueryString',
+ 'failover', 'autoFailover', 'ip1', 'ip2', 'ip3', 'ip4', 'ip5']:
+ if module.params[i] is not None:
+ if i == 'protocol':
+ # The API requires protocol to be a numeric in the range 1-6
+ new_monitor['protocolId'] = protocols[module.params[i]]
+ elif i == 'sensitivity':
+ # The API requires sensitivity to be a numeric of 8, 5, or 3
+ new_monitor[i] = sensitivities[module.params[i]]
+ elif i == 'contactList':
+ # The module accepts either the name or the id of the contact list
+ contact_list_id = module.params[i]
+ if not contact_list_id.isdigit() and contact_list_id != '':
+ contact_list = DME.getContactListByName(contact_list_id)
+ if not contact_list:
+ module.fail_json(msg="Contact list {0} does not exist".format(contact_list_id))
+ contact_list_id = contact_list.get('id', '')
+ new_monitor['contactListId'] = contact_list_id
+ else:
+ # The module option names match the API field names
+ new_monitor[i] = module.params[i]
+
+ # Compare new record against existing one
+ record_changed = False
+ if current_record:
+ for i in new_record:
+ # Remove leading and trailing quote character from values because TXT records
+ # are surrounded by quotes.
+ if str(current_record[i]).strip('"') != str(new_record[i]):
+ record_changed = True
+ new_record['id'] = str(current_record['id'])
+
+ monitor_changed = False
+ if current_monitor:
+ for i in new_monitor:
+ if str(current_monitor.get(i)) != str(new_monitor[i]):
+ monitor_changed = True
+
+ # Follow Keyword Controlled Behavior
+ if state == 'present':
+ # return the record if no value is specified
+ if "value" not in new_record:
+ if not current_record:
+ module.fail_json(
+ msg="A record with name '%s' does not exist for domain '%s.'" % (record_name, module.params['domain']))
+ module.exit_json(changed=False, result=dict(record=current_record, monitor=current_monitor))
+
+ # create record and monitor as the record does not exist
+ if not current_record:
+ record = DME.createRecord(DME.prepareRecord(new_record))
+ if new_monitor.get('monitor') and record_type == "A":
+ monitor = DME.updateMonitor(record['id'], DME.prepareMonitor(new_monitor))
+ module.exit_json(changed=True, result=dict(record=record, monitor=monitor))
+ else:
+ module.exit_json(changed=True, result=dict(record=record, monitor=current_monitor))
+
+ # update the record
+ updated = False
+ if record_changed:
+ DME.updateRecord(current_record['id'], DME.prepareRecord(new_record))
+ updated = True
+ if monitor_changed:
+ DME.updateMonitor(current_monitor['recordId'], DME.prepareMonitor(new_monitor))
+ updated = True
+ if updated:
+ module.exit_json(changed=True, result=dict(record=new_record, monitor=new_monitor))
+
+ # return the record (no changes)
+ module.exit_json(changed=False, result=dict(record=current_record, monitor=current_monitor))
+
+ elif state == 'absent':
+ changed = False
+ # delete the record (and the monitor/failover) if it exists
+ if current_record:
+ DME.deleteRecord(current_record['id'])
+ module.exit_json(changed=True)
+
+ # record does not exist, return w/o change.
+ module.exit_json(changed=changed)
+
+ else:
+ module.fail_json(
+ msg="'%s' is an unknown value for the state argument" % state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_compose.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_compose.py
new file mode 100644
index 00000000..96f89e6c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_compose.py
@@ -0,0 +1,1155 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: docker_compose
+
+short_description: Manage multi-container Docker applications with Docker Compose.
+
+
+author: "Chris Houseknecht (@chouseknecht)"
+
+description:
+ - Uses Docker Compose to start, shutdown and scale services.
+ - Works with compose versions 1 and 2.
+ - Configuration can be read from a C(docker-compose.yml) or C(docker-compose.yaml) file or inline using the I(definition) option.
+ - See the examples for more details.
+ - Supports check mode.
+ - This module was called C(docker_service) before Ansible 2.8. The usage did not change.
+
+options:
+ project_src:
+ description:
+ - Path to a directory containing a C(docker-compose.yml) or C(docker-compose.yaml) file.
+ - Mutually exclusive with I(definition).
+ - Required when no I(definition) is provided.
+ type: path
+ project_name:
+ description:
+ - Provide a project name. If not provided, the project name is taken from the basename of I(project_src).
+ - Required when I(definition) is provided.
+ type: str
+ files:
+ description:
+ - List of Compose file names relative to I(project_src). Overrides C(docker-compose.yml) or C(docker-compose.yaml).
+ - Files are loaded and merged in the order given.
+ type: list
+ elements: path
+ state:
+ description:
+ - Desired state of the project.
+ - Specifying C(present) is the same as running C(docker-compose up) resp. C(docker-compose stop) (with I(stopped)) resp. C(docker-compose restart)
+ (with I(restarted)).
+ - Specifying C(absent) is the same as running C(docker-compose down).
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+ services:
+ description:
+ - When I(state) is C(present) run C(docker-compose up) resp. C(docker-compose stop) (with I(stopped)) resp. C(docker-compose restart) (with I(restarted))
+ on a subset of services.
+ - If empty, which is the default, the operation will be performed on all services defined in the Compose file (or inline I(definition)).
+ type: list
+ elements: str
+ scale:
+ description:
+ - When I(state) is C(present) scale services. Provide a dictionary of key/value pairs where the key
+ is the name of the service and the value is an integer count for the number of containers.
+ type: dict
+ dependencies:
+ description:
+ - When I(state) is C(present) specify whether or not to include linked services.
+ type: bool
+ default: yes
+ definition:
+ description:
+ - Compose file describing one or more services, networks and volumes.
+ - Mutually exclusive with I(project_src) and I(files).
+ type: dict
+ hostname_check:
+ description:
+ - Whether or not to check the Docker daemon's hostname against the name provided in the client certificate.
+ type: bool
+ default: no
+ recreate:
+ description:
+ - By default containers will be recreated when their configuration differs from the service definition.
+ - Setting to C(never) ignores configuration differences and leaves existing containers unchanged.
+ - Setting to C(always) forces recreation of all existing containers.
+ type: str
+ default: smart
+ choices:
+ - always
+ - never
+ - smart
+ build:
+ description:
+ - Use with I(state) C(present) to always build images prior to starting the application.
+ - Same as running C(docker-compose build) with the pull option.
+ - Images will only be rebuilt if Docker detects a change in the Dockerfile or build directory contents.
+ - Use the I(nocache) option to ignore the image cache when performing the build.
+ - If an existing image is replaced, services using the image will be recreated unless I(recreate) is C(never).
+ type: bool
+ default: no
+ pull:
+ description:
+ - Use with I(state) C(present) to always pull images prior to starting the application.
+ - Same as running C(docker-compose pull).
+ - When a new image is pulled, services using the image will be recreated unless I(recreate) is C(never).
+ type: bool
+ default: no
+ nocache:
+ description:
+ - Use with the I(build) option to ignore the cache during the image build process.
+ type: bool
+ default: no
+ remove_images:
+ description:
+ - Use with I(state) C(absent) to remove all images or only local images.
+ type: str
+ choices:
+ - 'all'
+ - 'local'
+ remove_volumes:
+ description:
+ - Use with I(state) C(absent) to remove data volumes.
+ type: bool
+ default: no
+ stopped:
+ description:
+ - Use with I(state) C(present) to stop all containers defined in the Compose file.
+ - If I(services) is defined, only the containers listed there will be stopped.
+ type: bool
+ default: no
+ restarted:
+ description:
+ - Use with I(state) C(present) to restart all containers defined in the Compose file.
+ - If I(services) is defined, only the containers listed there will be restarted.
+ type: bool
+ default: no
+ remove_orphans:
+ description:
+ - Remove containers for services not defined in the Compose file.
+ type: bool
+ default: no
+ timeout:
+ description:
+ - timeout in seconds for container shutdown when attached or when containers are already running.
+ type: int
+ default: 10
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "docker-compose >= 1.7.0"
+ - "Docker API >= 1.20"
+ - "PyYAML >= 3.11"
+'''
+
+EXAMPLES = '''
+# Examples use the django example at https://docs.docker.com/compose/django. Follow it to create the
+# flask directory
+
+- name: Run using a project directory
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: Tear down existing services
+ community.general.docker_compose:
+ project_src: flask
+ state: absent
+
+ - name: Create and start services
+ community.general.docker_compose:
+ project_src: flask
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - name: Run `docker-compose up` again
+ community.general.docker_compose:
+ project_src: flask
+ build: no
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that: "not output.changed "
+
+ - name: Stop all services
+ community.general.docker_compose:
+ project_src: flask
+ build: no
+ stopped: yes
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that:
+ - "not web.flask_web_1.state.running"
+ - "not db.flask_db_1.state.running"
+
+ - name: Restart services
+ community.general.docker_compose:
+ project_src: flask
+ build: no
+ restarted: yes
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that:
+ - "web.flask_web_1.state.running"
+ - "db.flask_db_1.state.running"
+
+- name: Scale the web service to 2
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - community.general.docker_compose:
+ project_src: flask
+ scale:
+ web: 2
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+- name: Run with inline v2 compose
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - community.general.docker_compose:
+ project_src: flask
+ state: absent
+
+ - community.general.docker_compose:
+ project_name: flask
+ definition:
+ version: '2'
+ services:
+ db:
+ image: postgres
+ web:
+ build: "{{ playbook_dir }}/flask"
+ command: "python manage.py runserver 0.0.0.0:8000"
+ volumes:
+ - "{{ playbook_dir }}/flask:/code"
+ ports:
+ - "8000:8000"
+ depends_on:
+ - db
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that:
+ - "web.flask_web_1.state.running"
+ - "db.flask_db_1.state.running"
+
+- name: Run with inline v1 compose
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - community.general.docker_compose:
+ project_src: flask
+ state: absent
+
+ - community.general.docker_compose:
+ project_name: flask
+ definition:
+ db:
+ image: postgres
+ web:
+ build: "{{ playbook_dir }}/flask"
+ command: "python manage.py runserver 0.0.0.0:8000"
+ volumes:
+ - "{{ playbook_dir }}/flask:/code"
+ ports:
+ - "8000:8000"
+ links:
+ - db
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that:
+ - "web.flask_web_1.state.running"
+ - "db.flask_db_1.state.running"
+'''
+
+RETURN = '''
+services:
+ description:
+ - A dictionary mapping the service's name to a dictionary of containers.
+ - Note that facts are part of the registered vars since Ansible 2.8. For compatibility reasons, the facts
+ are also accessible directly. The service's name is the variable with which the container dictionary
+ can be accessed. Note that the returned facts will be removed in community.general 2.0.0.
+ returned: success
+ type: complex
+ contains:
+ container_name:
+ description: Name of the container. Format is C(project_service_#).
+ returned: success
+ type: complex
+ contains:
+ cmd:
+ description: One or more commands to be executed in the container.
+ returned: success
+ type: list
+ elements: str
+ example: ["postgres"]
+ image:
+ description: Name of the image from which the container was built.
+ returned: success
+ type: str
+ example: postgres
+ labels:
+ description: Meta data assigned to the container.
+ returned: success
+ type: dict
+ example: {...}
+ networks:
+ description: Contains a dictionary for each network to which the container is a member.
+ returned: success
+ type: list
+ elements: dict
+ contains:
+ IPAddress:
+ description: The IP address assigned to the container.
+ returned: success
+ type: str
+ example: 172.17.0.2
+ IPPrefixLen:
+ description: Number of bits used by the subnet.
+ returned: success
+ type: int
+ example: 16
+ aliases:
+ description: Aliases assigned to the container by the network.
+ returned: success
+ type: list
+ elements: str
+ example: ['db']
+ globalIPv6:
+ description: IPv6 address assigned to the container.
+ returned: success
+ type: str
+ example: ''
+ globalIPv6PrefixLen:
+ description: IPv6 subnet length.
+ returned: success
+ type: int
+ example: 0
+ links:
+ description: List of container names to which this container is linked.
+ returned: success
+ type: list
+ elements: str
+ example: null
+ macAddress:
+ description: Mac Address assigned to the virtual NIC.
+ returned: success
+ type: str
+ example: "02:42:ac:11:00:02"
+ state:
+ description: Information regarding the current disposition of the container.
+ returned: success
+ type: dict
+ contains:
+ running:
+ description: Whether or not the container is up with a running process.
+ returned: success
+ type: bool
+ example: true
+ status:
+ description: Description of the running state.
+ returned: success
+ type: str
+ example: running
+
+actions:
+ description: Provides the actions to be taken on each service as determined by compose.
+ returned: when in check mode or I(debug) is C(yes)
+ type: complex
+ contains:
+ service_name:
+ description: Name of the service.
+ returned: always
+ type: complex
+ contains:
+ pulled_image:
+ description: Provides image details when a new image is pulled for the service.
+ returned: on image pull
+ type: complex
+ contains:
+ name:
+ description: name of the image
+ returned: always
+ type: str
+ id:
+ description: image hash
+ returned: always
+ type: str
+ built_image:
+ description: Provides image details when a new image is built for the service.
+ returned: on image build
+ type: complex
+ contains:
+ name:
+ description: name of the image
+ returned: always
+ type: str
+ id:
+ description: image hash
+ returned: always
+ type: str
+
+ action:
+ description: A descriptive name of the action to be performed on the service's containers.
+ returned: always
+ type: list
+ elements: str
+ contains:
+ id:
+ description: the container's long ID
+ returned: always
+ type: str
+ name:
+ description: the container's name
+ returned: always
+ type: str
+ short_id:
+ description: the container's short ID
+ returned: always
+ type: str
+'''
+
+import os
+import re
+import sys
+import tempfile
+import traceback
+from contextlib import contextmanager
+from distutils.version import LooseVersion
+
+try:
+ import yaml
+ HAS_YAML = True
+ HAS_YAML_EXC = None
+except ImportError as dummy:
+ HAS_YAML = False
+ HAS_YAML_EXC = traceback.format_exc()
+
+try:
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+try:
+ from compose import __version__ as compose_version
+ from compose.cli.command import project_from_options
+ from compose.service import NoSuchImageError
+ from compose.cli.main import convergence_strategy_from_opts, build_action_from_opts, image_type_from_opt
+ from compose.const import DEFAULT_TIMEOUT, LABEL_SERVICE, LABEL_PROJECT, LABEL_ONE_OFF
+ HAS_COMPOSE = True
+ HAS_COMPOSE_EXC = None
+ MINIMUM_COMPOSE_VERSION = '1.7.0'
+except ImportError as dummy:
+ HAS_COMPOSE = False
+ HAS_COMPOSE_EXC = traceback.format_exc()
+ DEFAULT_TIMEOUT = 10
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ DockerBaseClass,
+ RequestException,
+)
+
+
+AUTH_PARAM_MAPPING = {
+ u'docker_host': u'--host',
+ u'tls': u'--tls',
+ u'cacert_path': u'--tlscacert',
+ u'cert_path': u'--tlscert',
+ u'key_path': u'--tlskey',
+ u'tls_verify': u'--tlsverify'
+}
+
+
+@contextmanager
+def stdout_redirector(path_name):
+ old_stdout = sys.stdout
+ fd = open(path_name, 'w')
+ sys.stdout = fd
+ try:
+ yield
+ finally:
+ sys.stdout = old_stdout
+
+
+@contextmanager
+def stderr_redirector(path_name):
+ old_fh = sys.stderr
+ fd = open(path_name, 'w')
+ sys.stderr = fd
+ try:
+ yield
+ finally:
+ sys.stderr = old_fh
+
+
+def make_redirection_tempfiles():
+ dummy, out_redir_name = tempfile.mkstemp(prefix="ansible")
+ dummy, err_redir_name = tempfile.mkstemp(prefix="ansible")
+ return (out_redir_name, err_redir_name)
+
+
+def cleanup_redirection_tempfiles(out_name, err_name):
+ for i in [out_name, err_name]:
+ os.remove(i)
+
+
+def get_redirected_output(path_name):
+ output = []
+ with open(path_name, 'r') as fd:
+ for line in fd:
+ # strip terminal format/color chars
+ new_line = re.sub(r'\x1b\[.+m', '', line)
+ output.append(new_line)
+ os.remove(path_name)
+ return output
+
+
+def attempt_extract_errors(exc_str, stdout, stderr):
+ errors = [l.strip() for l in stderr if l.strip().startswith('ERROR:')]
+ errors.extend([l.strip() for l in stdout if l.strip().startswith('ERROR:')])
+
+ warnings = [l.strip() for l in stderr if l.strip().startswith('WARNING:')]
+ warnings.extend([l.strip() for l in stdout if l.strip().startswith('WARNING:')])
+
+ # assume either the exception body (if present) or the last warning was the 'most'
+ # fatal.
+
+ if exc_str.strip():
+ msg = exc_str.strip()
+ elif errors:
+ msg = errors[-1].encode('utf-8')
+ else:
+ msg = 'unknown cause'
+
+ return {
+ 'warnings': [w.encode('utf-8') for w in warnings],
+ 'errors': [e.encode('utf-8') for e in errors],
+ 'msg': msg,
+ 'module_stderr': ''.join(stderr),
+ 'module_stdout': ''.join(stdout)
+ }
+
+
+def get_failure_info(exc, out_name, err_name=None, msg_format='%s'):
+ if err_name is None:
+ stderr = []
+ else:
+ stderr = get_redirected_output(err_name)
+ stdout = get_redirected_output(out_name)
+
+ reason = attempt_extract_errors(str(exc), stdout, stderr)
+ reason['msg'] = msg_format % reason['msg']
+ return reason
+
+
+class ContainerManager(DockerBaseClass):
+
+ def __init__(self, client):
+
+ super(ContainerManager, self).__init__()
+
+ self.client = client
+ self.project_src = None
+ self.files = None
+ self.project_name = None
+ self.state = None
+ self.definition = None
+ self.hostname_check = None
+ self.timeout = None
+ self.remove_images = None
+ self.remove_orphans = None
+ self.remove_volumes = None
+ self.stopped = None
+ self.restarted = None
+ self.recreate = None
+ self.build = None
+ self.dependencies = None
+ self.services = None
+ self.scale = None
+ self.debug = None
+ self.pull = None
+ self.nocache = None
+
+ for key, value in client.module.params.items():
+ setattr(self, key, value)
+
+ self.check_mode = client.check_mode
+
+ if not self.debug:
+ self.debug = client.module._debug
+
+ self.options = dict()
+ self.options.update(self._get_auth_options())
+ self.options[u'--skip-hostname-check'] = (not self.hostname_check)
+
+ if self.project_name:
+ self.options[u'--project-name'] = self.project_name
+
+ if self.files:
+ self.options[u'--file'] = self.files
+
+ if not HAS_COMPOSE:
+ self.client.fail("Unable to load docker-compose. Try `pip install docker-compose`. Error: %s" %
+ HAS_COMPOSE_EXC)
+
+ if LooseVersion(compose_version) < LooseVersion(MINIMUM_COMPOSE_VERSION):
+ self.client.fail("Found docker-compose version %s. Minimum required version is %s. "
+ "Upgrade docker-compose to a min version of %s." %
+ (compose_version, MINIMUM_COMPOSE_VERSION, MINIMUM_COMPOSE_VERSION))
+
+ if self.restarted and self.stopped:
+ self.client.fail("Cannot use restarted and stopped at the same time.")
+
+ self.log("options: ")
+ self.log(self.options, pretty_print=True)
+
+ if self.definition:
+ if not HAS_YAML:
+ self.client.fail("Unable to load yaml. Try `pip install PyYAML`. Error: %s" % HAS_YAML_EXC)
+
+ if not self.project_name:
+ self.client.fail("Parameter error - project_name required when providing definition.")
+
+ self.project_src = tempfile.mkdtemp(prefix="ansible")
+ compose_file = os.path.join(self.project_src, "docker-compose.yml")
+ try:
+ self.log('writing: ')
+ self.log(yaml.dump(self.definition, default_flow_style=False))
+ with open(compose_file, 'w') as f:
+ f.write(yaml.dump(self.definition, default_flow_style=False))
+ except Exception as exc:
+ self.client.fail("Error writing to %s - %s" % (compose_file, str(exc)))
+ else:
+ if not self.project_src:
+ self.client.fail("Parameter error - project_src required.")
+
+ try:
+ self.log("project_src: %s" % self.project_src)
+ self.project = project_from_options(self.project_src, self.options)
+ except Exception as exc:
+ self.client.fail("Configuration error - %s" % str(exc))
+
+ def exec_module(self):
+ result = dict()
+
+ if self.state == 'present':
+ result = self.cmd_up()
+ elif self.state == 'absent':
+ result = self.cmd_down()
+
+ if self.definition:
+ compose_file = os.path.join(self.project_src, "docker-compose.yml")
+ self.log("removing %s" % compose_file)
+ os.remove(compose_file)
+ self.log("removing %s" % self.project_src)
+ os.rmdir(self.project_src)
+
+ if not self.check_mode and not self.debug and result.get('actions'):
+ result.pop('actions')
+
+ return result
+
+ def _get_auth_options(self):
+ options = dict()
+ for key, value in self.client.auth_params.items():
+ if value is not None:
+ option = AUTH_PARAM_MAPPING.get(key)
+ if option:
+ options[option] = value
+ return options
+
+ def cmd_up(self):
+
+ start_deps = self.dependencies
+ service_names = self.services
+ detached = True
+ result = dict(changed=False, actions=[], ansible_facts=dict(), services=dict())
+
+ up_options = {
+ u'--no-recreate': False,
+ u'--build': False,
+ u'--no-build': False,
+ u'--no-deps': False,
+ u'--force-recreate': False,
+ }
+
+ if self.recreate == 'never':
+ up_options[u'--no-recreate'] = True
+ elif self.recreate == 'always':
+ up_options[u'--force-recreate'] = True
+
+ if self.remove_orphans:
+ up_options[u'--remove-orphans'] = True
+
+ converge = convergence_strategy_from_opts(up_options)
+ self.log("convergence strategy: %s" % converge)
+
+ if self.pull:
+ pull_output = self.cmd_pull()
+ result['changed'] = pull_output['changed']
+ result['actions'] += pull_output['actions']
+
+ if self.build:
+ build_output = self.cmd_build()
+ result['changed'] = build_output['changed']
+ result['actions'] += build_output['actions']
+
+ if self.remove_orphans:
+ containers = self.client.containers(
+ filters={
+ 'label': [
+ '{0}={1}'.format(LABEL_PROJECT, self.project.name),
+ '{0}={1}'.format(LABEL_ONE_OFF, "False")
+ ],
+ }
+ )
+
+ orphans = []
+ for container in containers:
+ service_name = container.get('Labels', {}).get(LABEL_SERVICE)
+ if service_name not in self.project.service_names:
+ orphans.append(service_name)
+
+ if orphans:
+ result['changed'] = True
+
+ for service in self.project.services:
+ if not service_names or service.name in service_names:
+ plan = service.convergence_plan(strategy=converge)
+ if plan.action != 'noop':
+ result['changed'] = True
+ result_action = dict(service=service.name)
+ result_action[plan.action] = []
+ for container in plan.containers:
+ result_action[plan.action].append(dict(
+ id=container.id,
+ name=container.name,
+ short_id=container.short_id,
+ ))
+ result['actions'].append(result_action)
+
+ if not self.check_mode and result['changed'] and not self.stopped:
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ do_build = build_action_from_opts(up_options)
+ self.log('Setting do_build to %s' % do_build)
+ self.project.up(
+ service_names=service_names,
+ start_deps=start_deps,
+ strategy=converge,
+ do_build=do_build,
+ detached=detached,
+ remove_orphans=self.remove_orphans,
+ timeout=self.timeout)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error starting project %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+
+ if self.stopped:
+ stop_output = self.cmd_stop(service_names)
+ result['changed'] = stop_output['changed']
+ result['actions'] += stop_output['actions']
+
+ if self.restarted:
+ restart_output = self.cmd_restart(service_names)
+ result['changed'] = restart_output['changed']
+ result['actions'] += restart_output['actions']
+
+ if self.scale:
+ scale_output = self.cmd_scale()
+ result['changed'] = scale_output['changed']
+ result['actions'] += scale_output['actions']
+
+ for service in self.project.services:
+ service_facts = dict()
+ result['ansible_facts'][service.name] = service_facts
+ result['services'][service.name] = service_facts
+ for container in service.containers(stopped=True):
+ inspection = container.inspect()
+ # pare down the inspection data to the most useful bits
+ facts = dict(
+ cmd=[],
+ labels=dict(),
+ image=None,
+ state=dict(
+ running=None,
+ status=None
+ ),
+ networks=dict()
+ )
+ if inspection['Config'].get('Cmd', None) is not None:
+ facts['cmd'] = inspection['Config']['Cmd']
+ if inspection['Config'].get('Labels', None) is not None:
+ facts['labels'] = inspection['Config']['Labels']
+ if inspection['Config'].get('Image', None) is not None:
+ facts['image'] = inspection['Config']['Image']
+ if inspection['State'].get('Running', None) is not None:
+ facts['state']['running'] = inspection['State']['Running']
+ if inspection['State'].get('Status', None) is not None:
+ facts['state']['status'] = inspection['State']['Status']
+
+ if inspection.get('NetworkSettings') and inspection['NetworkSettings'].get('Networks'):
+ networks = inspection['NetworkSettings']['Networks']
+ for key in networks:
+ facts['networks'][key] = dict(
+ aliases=[],
+ globalIPv6=None,
+ globalIPv6PrefixLen=0,
+ IPAddress=None,
+ IPPrefixLen=0,
+ links=None,
+ macAddress=None,
+ )
+ if networks[key].get('Aliases', None) is not None:
+ facts['networks'][key]['aliases'] = networks[key]['Aliases']
+ if networks[key].get('GlobalIPv6Address', None) is not None:
+ facts['networks'][key]['globalIPv6'] = networks[key]['GlobalIPv6Address']
+ if networks[key].get('GlobalIPv6PrefixLen', None) is not None:
+ facts['networks'][key]['globalIPv6PrefixLen'] = networks[key]['GlobalIPv6PrefixLen']
+ if networks[key].get('IPAddress', None) is not None:
+ facts['networks'][key]['IPAddress'] = networks[key]['IPAddress']
+ if networks[key].get('IPPrefixLen', None) is not None:
+ facts['networks'][key]['IPPrefixLen'] = networks[key]['IPPrefixLen']
+ if networks[key].get('Links', None) is not None:
+ facts['networks'][key]['links'] = networks[key]['Links']
+ if networks[key].get('MacAddress', None) is not None:
+ facts['networks'][key]['macAddress'] = networks[key]['MacAddress']
+
+ service_facts[container.name] = facts
+
+ return result
+
+ def cmd_pull(self):
+ result = dict(
+ changed=False,
+ actions=[],
+ )
+
+ if not self.check_mode:
+ for service in self.project.get_services(self.services, include_deps=False):
+ if 'image' not in service.options:
+ continue
+
+ self.log('Pulling image for service %s' % service.name)
+ # store the existing image ID
+ old_image_id = ''
+ try:
+ image = service.image()
+ if image and image.get('Id'):
+ old_image_id = image['Id']
+ except NoSuchImageError:
+ pass
+ except Exception as exc:
+ self.client.fail("Error: service image lookup failed - %s" % str(exc))
+
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ # pull the image
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ service.pull(ignore_pull_failures=False)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error: pull failed with %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+
+ # store the new image ID
+ new_image_id = ''
+ try:
+ image = service.image()
+ if image and image.get('Id'):
+ new_image_id = image['Id']
+ except NoSuchImageError as exc:
+ self.client.fail("Error: service image lookup failed after pull - %s" % str(exc))
+
+ if new_image_id != old_image_id:
+ # if a new image was pulled
+ result['changed'] = True
+ result['actions'].append(dict(
+ service=service.name,
+ pulled_image=dict(
+ name=service.image_name,
+ id=new_image_id
+ )
+ ))
+ return result
+
+ def cmd_build(self):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+ if not self.check_mode:
+ for service in self.project.get_services(self.services, include_deps=False):
+ if service.can_be_built():
+ self.log('Building image for service %s' % service.name)
+ # store the existing image ID
+ old_image_id = ''
+ try:
+ image = service.image()
+ if image and image.get('Id'):
+ old_image_id = image['Id']
+ except NoSuchImageError:
+ pass
+ except Exception as exc:
+ self.client.fail("Error: service image lookup failed - %s" % str(exc))
+
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ # build the image
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ new_image_id = service.build(pull=self.pull, no_cache=self.nocache)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error: build failed with %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+
+ if new_image_id not in old_image_id:
+ # if a new image was built
+ result['changed'] = True
+ result['actions'].append(dict(
+ service=service.name,
+ built_image=dict(
+ name=service.image_name,
+ id=new_image_id
+ )
+ ))
+ return result
+
+ def cmd_down(self):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+ for service in self.project.services:
+ containers = service.containers(stopped=True)
+ if len(containers):
+ result['changed'] = True
+ result['actions'].append(dict(
+ service=service.name,
+ deleted=[container.name for container in containers]
+ ))
+ if not self.check_mode and result['changed']:
+ image_type = image_type_from_opt('--rmi', self.remove_images)
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ self.project.down(image_type, self.remove_volumes, self.remove_orphans)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error stopping project - %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+ return result
+
+ def cmd_stop(self, service_names):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+ for service in self.project.services:
+ if not service_names or service.name in service_names:
+ service_res = dict(
+ service=service.name,
+ stop=[]
+ )
+ for container in service.containers(stopped=False):
+ result['changed'] = True
+ service_res['stop'].append(dict(
+ id=container.id,
+ name=container.name,
+ short_id=container.short_id
+ ))
+ result['actions'].append(service_res)
+ if not self.check_mode and result['changed']:
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ self.project.stop(service_names=service_names, timeout=self.timeout)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error stopping project %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+ return result
+
+ def cmd_restart(self, service_names):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+
+ for service in self.project.services:
+ if not service_names or service.name in service_names:
+ service_res = dict(
+ service=service.name,
+ restart=[]
+ )
+ for container in service.containers(stopped=True):
+ result['changed'] = True
+ service_res['restart'].append(dict(
+ id=container.id,
+ name=container.name,
+ short_id=container.short_id
+ ))
+ result['actions'].append(service_res)
+
+ if not self.check_mode and result['changed']:
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ self.project.restart(service_names=service_names, timeout=self.timeout)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error restarting project %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+ return result
+
+ def cmd_scale(self):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+ for service in self.project.services:
+ if service.name in self.scale:
+ service_res = dict(
+ service=service.name,
+ scale=0
+ )
+ containers = service.containers(stopped=True)
+ scale = self.parse_scale(service.name)
+ if len(containers) != scale:
+ result['changed'] = True
+ service_res['scale'] = scale - len(containers)
+ if not self.check_mode:
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ service.scale(scale)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error scaling {0} - %s".format(service.name))
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+ result['actions'].append(service_res)
+ return result
+
+ def parse_scale(self, service_name):
+ try:
+ return int(self.scale[service_name])
+ except ValueError:
+ self.client.fail("Error scaling %s - expected int, got %s",
+ service_name, str(type(self.scale[service_name])))
+
+
+def main():
+ argument_spec = dict(
+ project_src=dict(type='path'),
+ project_name=dict(type='str',),
+ files=dict(type='list', elements='path'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ definition=dict(type='dict'),
+ hostname_check=dict(type='bool', default=False),
+ recreate=dict(type='str', default='smart', choices=['always', 'never', 'smart']),
+ build=dict(type='bool', default=False),
+ remove_images=dict(type='str', choices=['all', 'local']),
+ remove_volumes=dict(type='bool', default=False),
+ remove_orphans=dict(type='bool', default=False),
+ stopped=dict(type='bool', default=False),
+ restarted=dict(type='bool', default=False),
+ scale=dict(type='dict'),
+ services=dict(type='list', elements='str'),
+ dependencies=dict(type='bool', default=True),
+ pull=dict(type='bool', default=False),
+ nocache=dict(type='bool', default=False),
+ debug=dict(type='bool', default=False),
+ timeout=dict(type='int', default=DEFAULT_TIMEOUT)
+ )
+
+ mutually_exclusive = [
+ ('definition', 'project_src'),
+ ('definition', 'files')
+ ]
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True,
+ min_docker_api_version='1.20',
+ )
+ if client.module._name in ('docker_service', 'community.general.docker_service'):
+ client.module.deprecate("The 'docker_service' module has been renamed to 'docker_compose'.",
+ version='2.0.0', collection_name='community.general') # was Ansible 2.12
+
+ try:
+ result = ContainerManager(client).exec_module()
+ client.module.exit_json(**result)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_config.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_config.py
new file mode 100644
index 00000000..5e7e426c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_config.py
@@ -0,0 +1,299 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_config
+
+short_description: Manage docker configs.
+
+
+description:
+ - Create and remove Docker configs in a Swarm environment. Similar to C(docker config create) and C(docker config rm).
+ - Adds to the metadata of new configs 'ansible_key', an encrypted hash representation of the data, which is then used
+ in future runs to test if a config has changed. If 'ansible_key' is not present, then a config will not be updated
+ unless the I(force) option is set.
+ - Updates to configs are performed by removing the config and creating it again.
+options:
+ data:
+ description:
+ - The value of the config. Required when state is C(present).
+ type: str
+ data_is_b64:
+ description:
+ - If set to C(true), the data is assumed to be Base64 encoded and will be
+ decoded before being used.
+ - To use binary I(data), it is better to keep it Base64 encoded and let it
+ be decoded by this option.
+ type: bool
+ default: no
+ labels:
+ description:
+ - "A map of key:value meta data, where both the I(key) and I(value) are expected to be a string."
+ - If new meta data is provided, or existing meta data is modified, the config will be updated by removing it and creating it again.
+ type: dict
+ force:
+ description:
+ - Use with state C(present) to always remove and recreate an existing config.
+ - If C(true), an existing config will be replaced, even if it has not been changed.
+ type: bool
+ default: no
+ name:
+ description:
+ - The name of the config.
+ type: str
+ required: yes
+ state:
+ description:
+ - Set to C(present), if the config should exist, and C(absent), if it should not.
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_2_documentation
+
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.6.0"
+ - "Docker API >= 1.30"
+
+author:
+ - Chris Houseknecht (@chouseknecht)
+ - John Hu (@ushuz)
+'''
+
+EXAMPLES = '''
+
+- name: Create config foo (from a file on the control machine)
+ community.general.docker_config:
+ name: foo
+ # If the file is JSON or binary, Ansible might modify it (because
+ # it is first decoded and later re-encoded). Base64-encoding the
+ # file directly after reading it prevents this to happen.
+ data: "{{ lookup('file', '/path/to/config/file') | b64encode }}"
+ data_is_b64: true
+ state: present
+
+- name: Change the config data
+ community.general.docker_config:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: baz
+ one: '1'
+ state: present
+
+- name: Add a new label
+ community.general.docker_config:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: baz
+ one: '1'
+ # Adding a new label will cause a remove/create of the config
+ two: '2'
+ state: present
+
+- name: No change
+ community.general.docker_config:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: baz
+ one: '1'
+ # Even though 'two' is missing, there is no change to the existing config
+ state: present
+
+- name: Update an existing label
+ community.general.docker_config:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: monkey # Changing a label will cause a remove/create of the config
+ one: '1'
+ state: present
+
+- name: Force the (re-)creation of the config
+ community.general.docker_config:
+ name: foo
+ data: Goodnight everyone!
+ force: yes
+ state: present
+
+- name: Remove config foo
+ community.general.docker_config:
+ name: foo
+ state: absent
+'''
+
+RETURN = '''
+config_id:
+ description:
+ - The ID assigned by Docker to the config object.
+ returned: success and I(state) is C(present)
+ type: str
+ sample: 'hzehrmyjigmcp2gb6nlhmjqcv'
+'''
+
+import base64
+import hashlib
+import traceback
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ DockerBaseClass,
+ compare_generic,
+ RequestException,
+)
+from ansible.module_utils._text import to_native, to_bytes
+
+
+class ConfigManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(ConfigManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.check_mode = self.client.check_mode
+
+ parameters = self.client.module.params
+ self.name = parameters.get('name')
+ self.state = parameters.get('state')
+ self.data = parameters.get('data')
+ if self.data is not None:
+ if parameters.get('data_is_b64'):
+ self.data = base64.b64decode(self.data)
+ else:
+ self.data = to_bytes(self.data)
+ self.labels = parameters.get('labels')
+ self.force = parameters.get('force')
+ self.data_key = None
+
+ def __call__(self):
+ if self.state == 'present':
+ self.data_key = hashlib.sha224(self.data).hexdigest()
+ self.present()
+ elif self.state == 'absent':
+ self.absent()
+
+ def get_config(self):
+ ''' Find an existing config. '''
+ try:
+ configs = self.client.configs(filters={'name': self.name})
+ except APIError as exc:
+ self.client.fail("Error accessing config %s: %s" % (self.name, to_native(exc)))
+
+ for config in configs:
+ if config['Spec']['Name'] == self.name:
+ return config
+ return None
+
+ def create_config(self):
+ ''' Create a new config '''
+ config_id = None
+ # We can't see the data after creation, so adding a label we can use for idempotency check
+ labels = {
+ 'ansible_key': self.data_key
+ }
+ if self.labels:
+ labels.update(self.labels)
+
+ try:
+ if not self.check_mode:
+ config_id = self.client.create_config(self.name, self.data, labels=labels)
+ except APIError as exc:
+ self.client.fail("Error creating config: %s" % to_native(exc))
+
+ if isinstance(config_id, dict):
+ config_id = config_id['ID']
+
+ return config_id
+
+ def present(self):
+ ''' Handles state == 'present', creating or updating the config '''
+ config = self.get_config()
+ if config:
+ self.results['config_id'] = config['ID']
+ data_changed = False
+ attrs = config.get('Spec', {})
+ if attrs.get('Labels', {}).get('ansible_key'):
+ if attrs['Labels']['ansible_key'] != self.data_key:
+ data_changed = True
+ labels_changed = not compare_generic(self.labels, attrs.get('Labels'), 'allow_more_present', 'dict')
+ if data_changed or labels_changed or self.force:
+ # if something changed or force, delete and re-create the config
+ self.absent()
+ config_id = self.create_config()
+ self.results['changed'] = True
+ self.results['config_id'] = config_id
+ else:
+ self.results['changed'] = True
+ self.results['config_id'] = self.create_config()
+
+ def absent(self):
+ ''' Handles state == 'absent', removing the config '''
+ config = self.get_config()
+ if config:
+ try:
+ if not self.check_mode:
+ self.client.remove_config(config['ID'])
+ except APIError as exc:
+ self.client.fail("Error removing config %s: %s" % (self.name, to_native(exc)))
+ self.results['changed'] = True
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ data=dict(type='str'),
+ data_is_b64=dict(type='bool', default=False),
+ labels=dict(type='dict'),
+ force=dict(type='bool', default=False)
+ )
+
+ required_if = [
+ ('state', 'present', ['data'])
+ ]
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=required_if,
+ min_docker_version='2.6.0',
+ min_docker_api_version='1.30',
+ )
+
+ try:
+ results = dict(
+ changed=False,
+ )
+
+ ConfigManager(client, results)()
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_container.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_container.py
new file mode 100644
index 00000000..30033ebf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_container.py
@@ -0,0 +1,3563 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_container
+
+short_description: manage docker containers
+
+description:
+ - Manage the life cycle of docker containers.
+ - Supports check mode. Run with C(--check) and C(--diff) to view config difference and list of actions to be taken.
+
+
+notes:
+ - For most config changes, the container needs to be recreated, i.e. the existing container has to be destroyed and
+ a new one created. This can cause unexpected data loss and downtime. You can use the I(comparisons) option to
+ prevent this.
+ - If the module needs to recreate the container, it will only use the options provided to the module to create the
+ new container (except I(image)). Therefore, always specify *all* options relevant to the container.
+ - When I(restart) is set to C(true), the module will only restart the container if no config changes are detected.
+ Please note that several options have default values; if the container to be restarted uses different values for
+ these options, it will be recreated instead. The options with default values which can cause this are I(auto_remove),
+ I(detach), I(init), I(interactive), I(memory), I(paused), I(privileged), I(read_only) and I(tty). This behavior
+ can be changed by setting I(container_default_behavior) to C(no_defaults), which will be the default value from
+ community.general 3.0.0 on.
+
+options:
+ auto_remove:
+ description:
+ - Enable auto-removal of the container on daemon side when the container's process exits.
+ - If I(container_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no).
+ type: bool
+ blkio_weight:
+ description:
+ - Block IO (relative weight), between 10 and 1000.
+ type: int
+ capabilities:
+ description:
+ - List of capabilities to add to the container.
+ type: list
+ elements: str
+ cap_drop:
+ description:
+ - List of capabilities to drop from the container.
+ type: list
+ elements: str
+ cleanup:
+ description:
+ - Use with I(detach=false) to remove the container after successful execution.
+ type: bool
+ default: no
+ command:
+ description:
+ - Command to execute when the container starts. A command may be either a string or a list.
+ - Prior to version 2.4, strings were split on commas.
+ type: raw
+ comparisons:
+ description:
+ - Allows to specify how properties of existing containers are compared with
+ module options to decide whether the container should be recreated / updated
+ or not.
+ - Only options which correspond to the state of a container as handled by the
+ Docker daemon can be specified, as well as C(networks).
+ - Must be a dictionary specifying for an option one of the keys C(strict), C(ignore)
+ and C(allow_more_present).
+ - If C(strict) is specified, values are tested for equality, and changes always
+ result in updating or restarting. If C(ignore) is specified, changes are ignored.
+ - C(allow_more_present) is allowed only for lists, sets and dicts. If it is
+ specified for lists or sets, the container will only be updated or restarted if
+ the module option contains a value which is not present in the container's
+ options. If the option is specified for a dict, the container will only be updated
+ or restarted if the module option contains a key which isn't present in the
+ container's option, or if the value of a key present differs.
+ - The wildcard option C(*) can be used to set one of the default values C(strict)
+ or C(ignore) to *all* comparisons which are not explicitly set to other values.
+ - See the examples for details.
+ type: dict
+ container_default_behavior:
+ description:
+ - Various module options used to have default values. This causes problems with
+ containers which use different values for these options.
+ - The default value is C(compatibility), which will ensure that the default values
+ are used when the values are not explicitly specified by the user.
+ - From community.general 3.0.0 on, the default value will switch to C(no_defaults). To avoid
+ deprecation warnings, please set I(container_default_behavior) to an explicit
+ value.
+ - This affects the I(auto_remove), I(detach), I(init), I(interactive), I(memory),
+ I(paused), I(privileged), I(read_only) and I(tty) options.
+ type: str
+ choices:
+ - compatibility
+ - no_defaults
+ version_added: '0.2.0'
+ cpu_period:
+ description:
+ - Limit CPU CFS (Completely Fair Scheduler) period.
+ - See I(cpus) for an easier to use alternative.
+ type: int
+ cpu_quota:
+ description:
+ - Limit CPU CFS (Completely Fair Scheduler) quota.
+ - See I(cpus) for an easier to use alternative.
+ type: int
+ cpus:
+ description:
+ - Specify how much of the available CPU resources a container can use.
+ - A value of C(1.5) means that at most one and a half CPU (core) will be used.
+ type: float
+ version_added: '0.2.0'
+ cpuset_cpus:
+ description:
+ - CPUs in which to allow execution C(1,3) or C(1-3).
+ type: str
+ cpuset_mems:
+ description:
+ - Memory nodes (MEMs) in which to allow execution C(0-3) or C(0,1).
+ type: str
+ cpu_shares:
+ description:
+ - CPU shares (relative weight).
+ type: int
+ detach:
+ description:
+ - Enable detached mode to leave the container running in background.
+ - If disabled, the task will reflect the status of the container run (failed if the command failed).
+ - If I(container_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(yes).
+ type: bool
+ devices:
+ description:
+ - List of host device bindings to add to the container.
+ - "Each binding is a mapping expressed in the format C(<path_on_host>:<path_in_container>:<cgroup_permissions>)."
+ type: list
+ elements: str
+ device_read_bps:
+ description:
+ - "List of device path and read rate (bytes per second) from device."
+ type: list
+ elements: dict
+ suboptions:
+ path:
+ description:
+ - Device path in the container.
+ type: str
+ required: yes
+ rate:
+ description:
+ - "Device read limit in format C(<number>[<unit>])."
+ - "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - "Omitting the unit defaults to bytes."
+ type: str
+ required: yes
+ device_write_bps:
+ description:
+ - "List of device and write rate (bytes per second) to device."
+ type: list
+ elements: dict
+ suboptions:
+ path:
+ description:
+ - Device path in the container.
+ type: str
+ required: yes
+ rate:
+ description:
+ - "Device read limit in format C(<number>[<unit>])."
+ - "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - "Omitting the unit defaults to bytes."
+ type: str
+ required: yes
+ device_read_iops:
+ description:
+ - "List of device and read rate (IO per second) from device."
+ type: list
+ elements: dict
+ suboptions:
+ path:
+ description:
+ - Device path in the container.
+ type: str
+ required: yes
+ rate:
+ description:
+ - "Device read limit."
+ - "Must be a positive integer."
+ type: int
+ required: yes
+ device_write_iops:
+ description:
+ - "List of device and write rate (IO per second) to device."
+ type: list
+ elements: dict
+ suboptions:
+ path:
+ description:
+ - Device path in the container.
+ type: str
+ required: yes
+ rate:
+ description:
+ - "Device read limit."
+ - "Must be a positive integer."
+ type: int
+ required: yes
+ device_requests:
+ description:
+ - Allows to request additional resources, such as GPUs.
+ type: list
+ elements: dict
+ suboptions:
+ capabilities:
+ description:
+ - List of lists of strings to request capabilities.
+ - The top-level list entries are combined by OR, and for every list entry,
+ the entries in the list it contains are combined by AND.
+ - The driver tries to satisfy one of the sub-lists.
+ - Available capabilities for the C(nvidia) driver can be found at
+ U(https://github.com/NVIDIA/nvidia-container-runtime).
+ type: list
+ elements: list
+ count:
+ description:
+ - Number or devices to request.
+ - Set to C(-1) to request all available devices.
+ type: int
+ device_ids:
+ description:
+ - List of device IDs.
+ type: list
+ elements: str
+ driver:
+ description:
+ - Which driver to use for this device.
+ type: str
+ options:
+ description:
+ - Driver-specific options.
+ type: dict
+ dns_opts:
+ description:
+ - List of DNS options.
+ type: list
+ elements: str
+ dns_servers:
+ description:
+ - List of custom DNS servers.
+ type: list
+ elements: str
+ dns_search_domains:
+ description:
+ - List of custom DNS search domains.
+ type: list
+ elements: str
+ domainname:
+ description:
+ - Container domainname.
+ type: str
+ env:
+ description:
+ - Dictionary of key,value pairs.
+ - Values which might be parsed as numbers, booleans or other types by the YAML parser must be quoted (e.g. C("true")) in order to avoid data loss.
+ type: dict
+ env_file:
+ description:
+ - Path to a file, present on the target, containing environment variables I(FOO=BAR).
+ - If variable also present in I(env), then the I(env) value will override.
+ type: path
+ entrypoint:
+ description:
+ - Command that overwrites the default C(ENTRYPOINT) of the image.
+ type: list
+ elements: str
+ etc_hosts:
+ description:
+ - Dict of host-to-IP mappings, where each host name is a key in the dictionary.
+ Each host name will be added to the container's C(/etc/hosts) file.
+ type: dict
+ exposed_ports:
+ description:
+ - List of additional container ports which informs Docker that the container
+ listens on the specified network ports at runtime.
+ - If the port is already exposed using C(EXPOSE) in a Dockerfile, it does not
+ need to be exposed again.
+ type: list
+ elements: str
+ aliases:
+ - exposed
+ - expose
+ force_kill:
+ description:
+ - Use the kill command when stopping a running container.
+ type: bool
+ default: no
+ aliases:
+ - forcekill
+ groups:
+ description:
+ - List of additional group names and/or IDs that the container process will run as.
+ type: list
+ elements: str
+ healthcheck:
+ description:
+ - Configure a check that is run to determine whether or not containers for this service are "healthy".
+ - "See the docs for the L(HEALTHCHECK Dockerfile instruction,https://docs.docker.com/engine/reference/builder/#healthcheck)
+ for details on how healthchecks work."
+ - "I(interval), I(timeout) and I(start_period) are specified as durations. They accept duration as a string in a format
+ that look like: C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ type: dict
+ suboptions:
+ test:
+ description:
+ - Command to run to check health.
+ - Must be either a string or a list. If it is a list, the first item must be one of C(NONE), C(CMD) or C(CMD-SHELL).
+ type: raw
+ interval:
+ description:
+ - Time between running the check.
+ - The default used by the Docker daemon is C(30s).
+ type: str
+ timeout:
+ description:
+ - Maximum time to allow one check to run.
+ - The default used by the Docker daemon is C(30s).
+ type: str
+ retries:
+ description:
+ - Consecutive number of failures needed to report unhealthy.
+ - The default used by the Docker daemon is C(3).
+ type: int
+ start_period:
+ description:
+ - Start period for the container to initialize before starting health-retries countdown.
+ - The default used by the Docker daemon is C(0s).
+ type: str
+ hostname:
+ description:
+ - The container's hostname.
+ type: str
+ ignore_image:
+ description:
+ - When I(state) is C(present) or C(started), the module compares the configuration of an existing
+ container to requested configuration. The evaluation includes the image version. If the image
+ version in the registry does not match the container, the container will be recreated. You can
+ stop this behavior by setting I(ignore_image) to C(True).
+ - "*Warning:* This option is ignored if C(image: ignore) or C(*: ignore) is specified in the
+ I(comparisons) option."
+ type: bool
+ default: no
+ image:
+ description:
+ - Repository path and tag used to create the container. If an image is not found or pull is true, the image
+ will be pulled from the registry. If no tag is included, C(latest) will be used.
+ - Can also be an image ID. If this is the case, the image is assumed to be available locally.
+ The I(pull) option is ignored for this case.
+ type: str
+ init:
+ description:
+ - Run an init inside the container that forwards signals and reaps processes.
+ - This option requires Docker API >= 1.25.
+ - If I(container_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no).
+ type: bool
+ interactive:
+ description:
+ - Keep stdin open after a container is launched, even if not attached.
+ - If I(container_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no).
+ type: bool
+ ipc_mode:
+ description:
+ - Set the IPC mode for the container.
+ - Can be one of C(container:<name|id>) to reuse another container's IPC namespace or C(host) to use
+ the host's IPC namespace within the container.
+ type: str
+ keep_volumes:
+ description:
+ - Retain anonymous volumes associated with a removed container.
+ type: bool
+ default: yes
+ kill_signal:
+ description:
+ - Override default signal used to kill a running container.
+ type: str
+ kernel_memory:
+ description:
+ - "Kernel memory limit in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte). Minimum is C(4M)."
+ - Omitting the unit defaults to bytes.
+ type: str
+ labels:
+ description:
+ - Dictionary of key value pairs.
+ type: dict
+ links:
+ description:
+ - List of name aliases for linked containers in the format C(container_name:alias).
+ - Setting this will force container to be restarted.
+ type: list
+ elements: str
+ log_driver:
+ description:
+ - Specify the logging driver. Docker uses C(json-file) by default.
+ - See L(here,https://docs.docker.com/config/containers/logging/configure/) for possible choices.
+ type: str
+ log_options:
+ description:
+ - Dictionary of options specific to the chosen I(log_driver).
+ - See U(https://docs.docker.com/engine/admin/logging/overview/) for details.
+ type: dict
+ aliases:
+ - log_opt
+ mac_address:
+ description:
+ - Container MAC address (e.g. 92:d0:c6:0a:29:33).
+ type: str
+ memory:
+ description:
+ - "Memory limit in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - Omitting the unit defaults to bytes.
+ - If I(container_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C("0").
+ type: str
+ memory_reservation:
+ description:
+ - "Memory soft limit in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - Omitting the unit defaults to bytes.
+ type: str
+ memory_swap:
+ description:
+ - "Total memory limit (memory + swap) in format C(<number>[<unit>]).
+ Number is a positive integer. Unit can be C(B) (byte), C(K) (kibibyte, 1024B),
+ C(M) (mebibyte), C(G) (gibibyte), C(T) (tebibyte), or C(P) (pebibyte)."
+ - Omitting the unit defaults to bytes.
+ type: str
+ memory_swappiness:
+ description:
+ - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100.
+ - If not set, the value will be remain the same if container exists and will be inherited
+ from the host machine if it is (re-)created.
+ type: int
+ mounts:
+ type: list
+ elements: dict
+ description:
+ - Specification for mounts to be added to the container. More powerful alternative to I(volumes).
+ suboptions:
+ target:
+ description:
+ - Path inside the container.
+ type: str
+ required: true
+ source:
+ description:
+ - Mount source (e.g. a volume name or a host path).
+ type: str
+ type:
+ description:
+ - The mount type.
+ - Note that C(npipe) is only supported by Docker for Windows.
+ type: str
+ choices:
+ - bind
+ - npipe
+ - tmpfs
+ - volume
+ default: volume
+ read_only:
+ description:
+ - Whether the mount should be read-only.
+ type: bool
+ consistency:
+ description:
+ - The consistency requirement for the mount.
+ type: str
+ choices:
+ - cached
+ - consistent
+ - default
+ - delegated
+ propagation:
+ description:
+ - Propagation mode. Only valid for the C(bind) type.
+ type: str
+ choices:
+ - private
+ - rprivate
+ - shared
+ - rshared
+ - slave
+ - rslave
+ no_copy:
+ description:
+ - False if the volume should be populated with the data from the target. Only valid for the C(volume) type.
+ - The default value is C(false).
+ type: bool
+ labels:
+ description:
+ - User-defined name and labels for the volume. Only valid for the C(volume) type.
+ type: dict
+ volume_driver:
+ description:
+ - Specify the volume driver. Only valid for the C(volume) type.
+ - See L(here,https://docs.docker.com/storage/volumes/#use-a-volume-driver) for details.
+ type: str
+ volume_options:
+ description:
+ - Dictionary of options specific to the chosen volume_driver. See
+ L(here,https://docs.docker.com/storage/volumes/#use-a-volume-driver) for details.
+ type: dict
+ tmpfs_size:
+ description:
+ - "The size for the tmpfs mount in bytes in format <number>[<unit>]."
+ - "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - "Omitting the unit defaults to bytes."
+ type: str
+ tmpfs_mode:
+ description:
+ - The permission mode for the tmpfs mount.
+ type: str
+ name:
+ description:
+ - Assign a name to a new container or match an existing container.
+ - When identifying an existing container name may be a name or a long or short container ID.
+ type: str
+ required: yes
+ network_mode:
+ description:
+ - Connect the container to a network. Choices are C(bridge), C(host), C(none), C(container:<name|id>), C(<network_name>) or C(default).
+ - "*Note* that from community.general 3.0.0 on, if I(networks_cli_compatible) is C(true) and I(networks) contains at least one network,
+ the default value for I(network_mode) will be the name of the first network in the I(networks) list. You can prevent this
+ by explicitly specifying a value for I(network_mode), like the default value C(default) which will be used by Docker if
+ I(network_mode) is not specified."
+ type: str
+ userns_mode:
+ description:
+ - Set the user namespace mode for the container. Currently, the only valid value are C(host) and the empty string.
+ type: str
+ networks:
+ description:
+ - List of networks the container belongs to.
+ - For examples of the data structure and usage see EXAMPLES below.
+ - To remove a container from one or more networks, use the I(purge_networks) option.
+ - Note that as opposed to C(docker run ...), M(community.general.docker_container) does not remove the default
+ network if I(networks) is specified. You need to explicitly use I(purge_networks) to enforce
+ the removal of the default network (and all other networks not explicitly mentioned in I(networks)).
+ Alternatively, use the I(networks_cli_compatible) option, which will be enabled by default from community.general 2.0.0 on.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - The network's name.
+ type: str
+ required: yes
+ ipv4_address:
+ description:
+ - The container's IPv4 address in this network.
+ type: str
+ ipv6_address:
+ description:
+ - The container's IPv6 address in this network.
+ type: str
+ links:
+ description:
+ - A list of containers to link to.
+ type: list
+ elements: str
+ aliases:
+ description:
+ - List of aliases for this container in this network. These names
+ can be used in the network to reach this container.
+ type: list
+ elements: str
+ networks_cli_compatible:
+ description:
+ - "When networks are provided to the module via the I(networks) option, the module
+ behaves differently than C(docker run --network): C(docker run --network other)
+ will create a container with network C(other) attached, but the default network
+ not attached. This module with I(networks: {name: other}) will create a container
+ with both C(default) and C(other) attached. If I(purge_networks) is set to C(yes),
+ the C(default) network will be removed afterwards."
+ - "If I(networks_cli_compatible) is set to C(yes), this module will behave as
+ C(docker run --network) and will *not* add the default network if I(networks) is
+ specified. If I(networks) is not specified, the default network will be attached."
+ - "*Note* that docker CLI also sets I(network_mode) to the name of the first network
+ added if C(--network) is specified. For more compatibility with docker CLI, you
+ explicitly have to set I(network_mode) to the name of the first network you're
+ adding. This behavior will change for community.general 3.0.0: then I(network_mode) will
+ automatically be set to the first network name in I(networks) if I(network_mode)
+ is not specified, I(networks) has at least one entry and I(networks_cli_compatible)
+ is C(true)."
+ - Current value is C(no). A new default of C(yes) will be set in community.general 2.0.0.
+ type: bool
+ oom_killer:
+ description:
+ - Whether or not to disable OOM Killer for the container.
+ type: bool
+ oom_score_adj:
+ description:
+ - An integer value containing the score given to the container in order to tune
+ OOM killer preferences.
+ type: int
+ output_logs:
+ description:
+ - If set to true, output of the container command will be printed.
+ - Only effective when I(log_driver) is set to C(json-file) or C(journald).
+ type: bool
+ default: no
+ paused:
+ description:
+ - Use with the started state to pause running processes inside the container.
+ - If I(container_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no).
+ type: bool
+ pid_mode:
+ description:
+ - Set the PID namespace mode for the container.
+ - Note that Docker SDK for Python < 2.0 only supports C(host). Newer versions of the
+ Docker SDK for Python (docker) allow all values supported by the Docker daemon.
+ type: str
+ pids_limit:
+ description:
+ - Set PIDs limit for the container. It accepts an integer value.
+ - Set C(-1) for unlimited PIDs.
+ type: int
+ privileged:
+ description:
+ - Give extended privileges to the container.
+ - If I(container_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no).
+ type: bool
+ published_ports:
+ description:
+ - List of ports to publish from the container to the host.
+ - "Use docker CLI syntax: C(8000), C(9000:8000), or C(0.0.0.0:9000:8000), where 8000 is a
+ container port, 9000 is a host port, and 0.0.0.0 is a host interface."
+ - Port ranges can be used for source and destination ports. If two ranges with
+ different lengths are specified, the shorter range will be used.
+ Since community.general 0.2.0, if the source port range has length 1, the port will not be assigned
+ to the first port of the destination range, but to a free port in that range. This is the
+ same behavior as for C(docker) command line utility.
+ - "Bind addresses must be either IPv4 or IPv6 addresses. Hostnames are *not* allowed. This
+ is different from the C(docker) command line utility. Use the L(dig lookup,../lookup/dig.html)
+ to resolve hostnames."
+ - A value of C(all) will publish all exposed container ports to random host ports, ignoring
+ any other mappings.
+ - If I(networks) parameter is provided, will inspect each network to see if there exists
+ a bridge network with optional parameter C(com.docker.network.bridge.host_binding_ipv4).
+ If such a network is found, then published ports where no host IP address is specified
+ will be bound to the host IP pointed to by C(com.docker.network.bridge.host_binding_ipv4).
+ Note that the first bridge network with a C(com.docker.network.bridge.host_binding_ipv4)
+ value encountered in the list of I(networks) is the one that will be used.
+ type: list
+ elements: str
+ aliases:
+ - ports
+ pull:
+ description:
+ - If true, always pull the latest version of an image. Otherwise, will only pull an image
+ when missing.
+ - "*Note:* images are only pulled when specified by name. If the image is specified
+ as a image ID (hash), it cannot be pulled."
+ type: bool
+ default: no
+ purge_networks:
+ description:
+ - Remove the container from ALL networks not included in I(networks) parameter.
+ - Any default networks such as C(bridge), if not found in I(networks), will be removed as well.
+ type: bool
+ default: no
+ read_only:
+ description:
+ - Mount the container's root file system as read-only.
+ - If I(container_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no).
+ type: bool
+ recreate:
+ description:
+ - Use with present and started states to force the re-creation of an existing container.
+ type: bool
+ default: no
+ removal_wait_timeout:
+ description:
+ - When removing an existing container, the docker daemon API call exists after the container
+ is scheduled for removal. Removal usually is very fast, but it can happen that during high I/O
+ load, removal can take longer. By default, the module will wait until the container has been
+ removed before trying to (re-)create it, however long this takes.
+ - By setting this option, the module will wait at most this many seconds for the container to be
+ removed. If the container is still in the removal phase after this many seconds, the module will
+ fail.
+ type: float
+ version_added: '0.2.0'
+ restart:
+ description:
+ - Use with started state to force a matching container to be stopped and restarted.
+ type: bool
+ default: no
+ restart_policy:
+ description:
+ - Container restart policy.
+ - Place quotes around C(no) option.
+ type: str
+ choices:
+ - 'no'
+ - 'on-failure'
+ - 'always'
+ - 'unless-stopped'
+ restart_retries:
+ description:
+ - Use with restart policy to control maximum number of restart attempts.
+ type: int
+ runtime:
+ description:
+ - Runtime to use for the container.
+ type: str
+ shm_size:
+ description:
+ - "Size of C(/dev/shm) in format C(<number>[<unit>]). Number is positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - Omitting the unit defaults to bytes. If you omit the size entirely, Docker daemon uses C(64M).
+ type: str
+ security_opts:
+ description:
+ - List of security options in the form of C("label:user:User").
+ type: list
+ elements: str
+ state:
+ description:
+ - 'C(absent) - A container matching the specified name will be stopped and removed. Use I(force_kill) to kill the container
+ rather than stopping it. Use I(keep_volumes) to retain anonymous volumes associated with the removed container.'
+ - 'C(present) - Asserts the existence of a container matching the name and any provided configuration parameters. If no
+ container matches the name, a container will be created. If a container matches the name but the provided configuration
+ does not match, the container will be updated, if it can be. If it cannot be updated, it will be removed and re-created
+ with the requested config.'
+ - 'C(started) - Asserts that the container is first C(present), and then if the container is not running moves it to a running
+ state. Use I(restart) to force a matching container to be stopped and restarted.'
+ - 'C(stopped) - Asserts that the container is first C(present), and then if the container is running moves it to a stopped
+ state.'
+ - To control what will be taken into account when comparing configuration, see the I(comparisons) option. To avoid that the
+ image version will be taken into account, you can also use the I(ignore_image) option.
+ - Use the I(recreate) option to always force re-creation of a matching container, even if it is running.
+ - If the container should be killed instead of stopped in case it needs to be stopped for recreation, or because I(state) is
+ C(stopped), please use the I(force_kill) option. Use I(keep_volumes) to retain anonymous volumes associated with a removed container.
+ - Use I(keep_volumes) to retain anonymous volumes associated with a removed container.
+ type: str
+ default: started
+ choices:
+ - absent
+ - present
+ - stopped
+ - started
+ stop_signal:
+ description:
+ - Override default signal used to stop the container.
+ type: str
+ stop_timeout:
+ description:
+ - Number of seconds to wait for the container to stop before sending C(SIGKILL).
+ When the container is created by this module, its C(StopTimeout) configuration
+ will be set to this value.
+ - When the container is stopped, will be used as a timeout for stopping the
+ container. In case the container has a custom C(StopTimeout) configuration,
+ the behavior depends on the version of the docker daemon. New versions of
+ the docker daemon will always use the container's configured C(StopTimeout)
+ value if it has been configured.
+ type: int
+ trust_image_content:
+ description:
+ - If C(yes), skip image verification.
+ - The option has never been used by the module. It will be removed in community.general 3.0.0.
+ type: bool
+ default: no
+ tmpfs:
+ description:
+ - Mount a tmpfs directory.
+ type: list
+ elements: str
+ tty:
+ description:
+ - Allocate a pseudo-TTY.
+ - If I(container_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no).
+ type: bool
+ ulimits:
+ description:
+ - "List of ulimit options. A ulimit is specified as C(nofile:262144:262144)."
+ type: list
+ elements: str
+ sysctls:
+ description:
+ - Dictionary of key,value pairs.
+ type: dict
+ user:
+ description:
+ - Sets the username or UID used and optionally the groupname or GID for the specified command.
+ - "Can be of the forms C(user), C(user:group), C(uid), C(uid:gid), C(user:gid) or C(uid:group)."
+ type: str
+ uts:
+ description:
+ - Set the UTS namespace mode for the container.
+ type: str
+ volumes:
+ description:
+ - List of volumes to mount within the container.
+ - "Use docker CLI-style syntax: C(/host:/container[:mode])"
+ - "Mount modes can be a comma-separated list of various modes such as C(ro), C(rw), C(consistent),
+ C(delegated), C(cached), C(rprivate), C(private), C(rshared), C(shared), C(rslave), C(slave), and
+ C(nocopy). Note that the docker daemon might not support all modes and combinations of such modes."
+ - SELinux hosts can additionally use C(z) or C(Z) to use a shared or private label for the volume.
+ - "Note that Ansible 2.7 and earlier only supported one mode, which had to be one of C(ro), C(rw),
+ C(z), and C(Z)."
+ type: list
+ elements: str
+ volume_driver:
+ description:
+ - The container volume driver.
+ type: str
+ volumes_from:
+ description:
+ - List of container names or IDs to get volumes from.
+ type: list
+ elements: str
+ working_dir:
+ description:
+ - Path to the working directory.
+ type: str
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+author:
+ - "Cove Schneider (@cove)"
+ - "Joshua Conner (@joshuaconner)"
+ - "Pavel Antonov (@softzilla)"
+ - "Thomas Steinbach (@ThomasSteinbach)"
+ - "Philippe Jandot (@zfil)"
+ - "Daan Oosterveld (@dusdanig)"
+ - "Chris Houseknecht (@chouseknecht)"
+ - "Kassian Sun (@kassiansun)"
+ - "Felix Fontein (@felixfontein)"
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "Docker API >= 1.20"
+'''
+
+EXAMPLES = '''
+- name: Create a data container
+ community.general.docker_container:
+ name: mydata
+ image: busybox
+ volumes:
+ - /data
+
+- name: Re-create a redis container
+ community.general.docker_container:
+ name: myredis
+ image: redis
+ command: redis-server --appendonly yes
+ state: present
+ recreate: yes
+ exposed_ports:
+ - 6379
+ volumes_from:
+ - mydata
+
+- name: Restart a container
+ community.general.docker_container:
+ name: myapplication
+ image: someuser/appimage
+ state: started
+ restart: yes
+ links:
+ - "myredis:aliasedredis"
+ devices:
+ - "/dev/sda:/dev/xvda:rwm"
+ ports:
+ # Publish container port 9000 as host port 8080
+ - "8080:9000"
+ # Publish container UDP port 9001 as host port 8081 on interface 127.0.0.1
+ - "127.0.0.1:8081:9001/udp"
+ # Publish container port 9002 as a random host port
+ - "9002"
+ # Publish container port 9003 as a free host port in range 8000-8100
+ # (the host port will be selected by the Docker daemon)
+ - "8000-8100:9003"
+ # Publish container ports 9010-9020 to host ports 7000-7010
+ - "7000-7010:9010-9020"
+ env:
+ SECRET_KEY: "ssssh"
+ # Values which might be parsed as numbers, booleans or other types by the YAML parser need to be quoted
+ BOOLEAN_KEY: "yes"
+
+- name: Container present
+ community.general.docker_container:
+ name: mycontainer
+ state: present
+ image: ubuntu:14.04
+ command: sleep infinity
+
+- name: Stop a container
+ community.general.docker_container:
+ name: mycontainer
+ state: stopped
+
+- name: Start 4 load-balanced containers
+ community.general.docker_container:
+ name: "container{{ item }}"
+ recreate: yes
+ image: someuser/anotherappimage
+ command: sleep 1d
+ with_sequence: count=4
+
+- name: Remove container
+ community.general.docker_container:
+ name: ohno
+ state: absent
+
+- name: Syslogging output
+ community.general.docker_container:
+ name: myservice
+ image: busybox
+ log_driver: syslog
+ log_options:
+ syslog-address: tcp://my-syslog-server:514
+ syslog-facility: daemon
+ # NOTE: in Docker 1.13+ the "syslog-tag" option was renamed to "tag" for
+ # older docker installs, use "syslog-tag" instead
+ tag: myservice
+
+- name: Create db container and connect to network
+ community.general.docker_container:
+ name: db_test
+ image: "postgres:latest"
+ networks:
+ - name: "{{ docker_network_name }}"
+
+- name: Start container, connect to network and link
+ community.general.docker_container:
+ name: sleeper
+ image: ubuntu:14.04
+ networks:
+ - name: TestingNet
+ ipv4_address: "172.1.1.100"
+ aliases:
+ - sleepyzz
+ links:
+ - db_test:db
+ - name: TestingNet2
+
+- name: Start a container with a command
+ community.general.docker_container:
+ name: sleepy
+ image: ubuntu:14.04
+ command: ["sleep", "infinity"]
+
+- name: Add container to networks
+ community.general.docker_container:
+ name: sleepy
+ networks:
+ - name: TestingNet
+ ipv4_address: 172.1.1.18
+ links:
+ - sleeper
+ - name: TestingNet2
+ ipv4_address: 172.1.10.20
+
+- name: Update network with aliases
+ community.general.docker_container:
+ name: sleepy
+ networks:
+ - name: TestingNet
+ aliases:
+ - sleepyz
+ - zzzz
+
+- name: Remove container from one network
+ community.general.docker_container:
+ name: sleepy
+ networks:
+ - name: TestingNet2
+ purge_networks: yes
+
+- name: Remove container from all networks
+ community.general.docker_container:
+ name: sleepy
+ purge_networks: yes
+
+- name: Start a container and use an env file
+ community.general.docker_container:
+ name: agent
+ image: jenkinsci/ssh-slave
+ env_file: /var/tmp/jenkins/agent.env
+
+- name: Create a container with limited capabilities
+ community.general.docker_container:
+ name: sleepy
+ image: ubuntu:16.04
+ command: sleep infinity
+ capabilities:
+ - sys_time
+ cap_drop:
+ - all
+
+- name: Finer container restart/update control
+ community.general.docker_container:
+ name: test
+ image: ubuntu:18.04
+ env:
+ arg1: "true"
+ arg2: "whatever"
+ volumes:
+ - /tmp:/tmp
+ comparisons:
+ image: ignore # don't restart containers with older versions of the image
+ env: strict # we want precisely this environment
+ volumes: allow_more_present # if there are more volumes, that's ok, as long as `/tmp:/tmp` is there
+
+- name: Finer container restart/update control II
+ community.general.docker_container:
+ name: test
+ image: ubuntu:18.04
+ env:
+ arg1: "true"
+ arg2: "whatever"
+ comparisons:
+ '*': ignore # by default, ignore *all* options (including image)
+ env: strict # except for environment variables; there, we want to be strict
+
+- name: Start container with healthstatus
+ community.general.docker_container:
+ name: nginx-proxy
+ image: nginx:1.13
+ state: started
+ healthcheck:
+ # Check if nginx server is healthy by curl'ing the server.
+ # If this fails or timeouts, the healthcheck fails.
+ test: ["CMD", "curl", "--fail", "http://nginx.host.com"]
+ interval: 1m30s
+ timeout: 10s
+ retries: 3
+ start_period: 30s
+
+- name: Remove healthcheck from container
+ community.general.docker_container:
+ name: nginx-proxy
+ image: nginx:1.13
+ state: started
+ healthcheck:
+ # The "NONE" check needs to be specified
+ test: ["NONE"]
+
+- name: Start container with block device read limit
+ community.general.docker_container:
+ name: test
+ image: ubuntu:18.04
+ state: started
+ device_read_bps:
+ # Limit read rate for /dev/sda to 20 mebibytes per second
+ - path: /dev/sda
+ rate: 20M
+ device_read_iops:
+ # Limit read rate for /dev/sdb to 300 IO per second
+ - path: /dev/sdb
+ rate: 300
+
+- name: Start container with GPUs
+ community.general.docker_container:
+ name: test
+ image: ubuntu:18.04
+ state: started
+ device_requests:
+ - # Add some specific devices to this container
+ device_ids:
+ - '0'
+ - 'GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a'
+ - # Add nVidia GPUs to this container
+ driver: nvidia
+ count: -1 # this means we want all
+ capabilities:
+ # We have one OR condition: 'gpu' AND 'utility'
+ - - gpu
+ - utility
+ # See https://github.com/NVIDIA/nvidia-container-runtime#supported-driver-capabilities
+ # for a list of capabilities supported by the nvidia driver
+'''
+
+RETURN = '''
+container:
+ description:
+ - Facts representing the current state of the container. Matches the docker inspection output.
+ - Note that facts are part of the registered vars since Ansible 2.8. For compatibility reasons, the facts
+ are also accessible directly as C(docker_container). Note that the returned fact will be removed in
+ community.general 2.0.0.
+ - Before 2.3 this was C(ansible_docker_container) but was renamed in 2.3 to C(docker_container) due to
+ conflicts with the connection plugin.
+ - Empty if I(state) is C(absent)
+ - If I(detached) is C(false), will include C(Output) attribute containing any output from container run.
+ returned: always
+ type: dict
+ sample: '{
+ "AppArmorProfile": "",
+ "Args": [],
+ "Config": {
+ "AttachStderr": false,
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "Cmd": [
+ "/usr/bin/supervisord"
+ ],
+ "Domainname": "",
+ "Entrypoint": null,
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "ExposedPorts": {
+ "443/tcp": {},
+ "80/tcp": {}
+ },
+ "Hostname": "8e47bf643eb9",
+ "Image": "lnmp_nginx:v1",
+ "Labels": {},
+ "OnBuild": null,
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Tty": false,
+ "User": "",
+ "Volumes": {
+ "/tmp/lnmp/nginx-sites/logs/": {}
+ },
+ ...
+ }'
+'''
+
+import os
+import re
+import shlex
+import traceback
+from distutils.version import LooseVersion
+from time import sleep
+
+from ansible.module_utils.common.text.formatters import human_to_bytes
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_native, to_text
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ DifferenceTracker,
+ DockerBaseClass,
+ compare_generic,
+ is_image_name_id,
+ sanitize_result,
+ clean_dict_booleans_for_docker_api,
+ omit_none_from_dict,
+ parse_healthcheck,
+ DOCKER_COMMON_ARGS,
+ RequestException,
+)
+
+try:
+ from docker import utils
+ from ansible_collections.community.general.plugins.module_utils.docker.common import docker_version
+ if LooseVersion(docker_version) >= LooseVersion('1.10.0'):
+ from docker.types import Ulimit, LogConfig
+ from docker import types as docker_types
+ else:
+ from docker.utils.types import Ulimit, LogConfig
+ from docker.errors import DockerException, APIError, NotFound
+except Exception:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+
+REQUIRES_CONVERSION_TO_BYTES = [
+ 'kernel_memory',
+ 'memory',
+ 'memory_reservation',
+ 'memory_swap',
+ 'shm_size'
+]
+
+
+def is_volume_permissions(mode):
+ for part in mode.split(','):
+ if part not in ('rw', 'ro', 'z', 'Z', 'consistent', 'delegated', 'cached', 'rprivate', 'private', 'rshared', 'shared', 'rslave', 'slave', 'nocopy'):
+ return False
+ return True
+
+
+def parse_port_range(range_or_port, client):
+ '''
+ Parses a string containing either a single port or a range of ports.
+
+ Returns a list of integers for each port in the list.
+ '''
+ if '-' in range_or_port:
+ try:
+ start, end = [int(port) for port in range_or_port.split('-')]
+ except Exception:
+ client.fail('Invalid port range: "{0}"'.format(range_or_port))
+ if end < start:
+ client.fail('Invalid port range: "{0}"'.format(range_or_port))
+ return list(range(start, end + 1))
+ else:
+ try:
+ return [int(range_or_port)]
+ except Exception:
+ client.fail('Invalid port: "{0}"'.format(range_or_port))
+
+
+def split_colon_ipv6(text, client):
+ '''
+ Split string by ':', while keeping IPv6 addresses in square brackets in one component.
+ '''
+ if '[' not in text:
+ return text.split(':')
+ start = 0
+ result = []
+ while start < len(text):
+ i = text.find('[', start)
+ if i < 0:
+ result.extend(text[start:].split(':'))
+ break
+ j = text.find(']', i)
+ if j < 0:
+ client.fail('Cannot find closing "]" in input "{0}" for opening "[" at index {1}!'.format(text, i + 1))
+ result.extend(text[start:i].split(':'))
+ k = text.find(':', j)
+ if k < 0:
+ result[-1] += text[i:]
+ start = len(text)
+ else:
+ result[-1] += text[i:k]
+ if k == len(text):
+ result.append('')
+ break
+ start = k + 1
+ return result
+
+
+class TaskParameters(DockerBaseClass):
+ '''
+ Access and parse module parameters
+ '''
+
+ def __init__(self, client):
+ super(TaskParameters, self).__init__()
+ self.client = client
+
+ self.auto_remove = None
+ self.blkio_weight = None
+ self.capabilities = None
+ self.cap_drop = None
+ self.cleanup = None
+ self.command = None
+ self.cpu_period = None
+ self.cpu_quota = None
+ self.cpus = None
+ self.cpuset_cpus = None
+ self.cpuset_mems = None
+ self.cpu_shares = None
+ self.detach = None
+ self.debug = None
+ self.devices = None
+ self.device_read_bps = None
+ self.device_write_bps = None
+ self.device_read_iops = None
+ self.device_write_iops = None
+ self.device_requests = None
+ self.dns_servers = None
+ self.dns_opts = None
+ self.dns_search_domains = None
+ self.domainname = None
+ self.env = None
+ self.env_file = None
+ self.entrypoint = None
+ self.etc_hosts = None
+ self.exposed_ports = None
+ self.force_kill = None
+ self.groups = None
+ self.healthcheck = None
+ self.hostname = None
+ self.ignore_image = None
+ self.image = None
+ self.init = None
+ self.interactive = None
+ self.ipc_mode = None
+ self.keep_volumes = None
+ self.kernel_memory = None
+ self.kill_signal = None
+ self.labels = None
+ self.links = None
+ self.log_driver = None
+ self.output_logs = None
+ self.log_options = None
+ self.mac_address = None
+ self.memory = None
+ self.memory_reservation = None
+ self.memory_swap = None
+ self.memory_swappiness = None
+ self.mounts = None
+ self.name = None
+ self.network_mode = None
+ self.userns_mode = None
+ self.networks = None
+ self.networks_cli_compatible = None
+ self.oom_killer = None
+ self.oom_score_adj = None
+ self.paused = None
+ self.pid_mode = None
+ self.pids_limit = None
+ self.privileged = None
+ self.purge_networks = None
+ self.pull = None
+ self.read_only = None
+ self.recreate = None
+ self.removal_wait_timeout = None
+ self.restart = None
+ self.restart_retries = None
+ self.restart_policy = None
+ self.runtime = None
+ self.shm_size = None
+ self.security_opts = None
+ self.state = None
+ self.stop_signal = None
+ self.stop_timeout = None
+ self.tmpfs = None
+ self.trust_image_content = None
+ self.tty = None
+ self.user = None
+ self.uts = None
+ self.volumes = None
+ self.volume_binds = dict()
+ self.volumes_from = None
+ self.volume_driver = None
+ self.working_dir = None
+
+ for key, value in client.module.params.items():
+ setattr(self, key, value)
+ self.comparisons = client.comparisons
+
+ # If state is 'absent', parameters do not have to be parsed or interpreted.
+ # Only the container's name is needed.
+ if self.state == 'absent':
+ return
+
+ if self.cpus is not None:
+ self.cpus = int(round(self.cpus * 1E9))
+
+ if self.groups:
+ # In case integers are passed as groups, we need to convert them to
+ # strings as docker internally treats them as strings.
+ self.groups = [to_text(g, errors='surrogate_or_strict') for g in self.groups]
+
+ for param_name in REQUIRES_CONVERSION_TO_BYTES:
+ if client.module.params.get(param_name):
+ try:
+ setattr(self, param_name, human_to_bytes(client.module.params.get(param_name)))
+ except ValueError as exc:
+ self.fail("Failed to convert %s to bytes: %s" % (param_name, exc))
+
+ self.publish_all_ports = False
+ self.published_ports = self._parse_publish_ports()
+ if self.published_ports in ('all', 'ALL'):
+ self.publish_all_ports = True
+ self.published_ports = None
+
+ self.ports = self._parse_exposed_ports(self.published_ports)
+ self.log("expose ports:")
+ self.log(self.ports, pretty_print=True)
+
+ self.links = self._parse_links(self.links)
+
+ if self.volumes:
+ self.volumes = self._expand_host_paths()
+
+ self.tmpfs = self._parse_tmpfs()
+ self.env = self._get_environment()
+ self.ulimits = self._parse_ulimits()
+ self.sysctls = self._parse_sysctls()
+ self.log_config = self._parse_log_config()
+ try:
+ self.healthcheck, self.disable_healthcheck = parse_healthcheck(self.healthcheck)
+ except ValueError as e:
+ self.fail(to_native(e))
+
+ self.exp_links = None
+ self.volume_binds = self._get_volume_binds(self.volumes)
+ self.pid_mode = self._replace_container_names(self.pid_mode)
+ self.ipc_mode = self._replace_container_names(self.ipc_mode)
+ self.network_mode = self._replace_container_names(self.network_mode)
+
+ self.log("volumes:")
+ self.log(self.volumes, pretty_print=True)
+ self.log("volume binds:")
+ self.log(self.volume_binds, pretty_print=True)
+
+ if self.networks:
+ for network in self.networks:
+ network['id'] = self._get_network_id(network['name'])
+ if not network['id']:
+ self.fail("Parameter error: network named %s could not be found. Does it exist?" % network['name'])
+ if network.get('links'):
+ network['links'] = self._parse_links(network['links'])
+
+ if self.mac_address:
+ # Ensure the MAC address uses colons instead of hyphens for later comparison
+ self.mac_address = self.mac_address.replace('-', ':')
+
+ if self.entrypoint:
+ # convert from list to str.
+ self.entrypoint = ' '.join([to_text(x, errors='surrogate_or_strict') for x in self.entrypoint])
+
+ if self.command:
+ # convert from list to str
+ if isinstance(self.command, list):
+ self.command = ' '.join([to_text(x, errors='surrogate_or_strict') for x in self.command])
+
+ self.mounts_opt, self.expected_mounts = self._process_mounts()
+
+ self._check_mount_target_collisions()
+
+ for param_name in ["device_read_bps", "device_write_bps"]:
+ if client.module.params.get(param_name):
+ self._process_rate_bps(option=param_name)
+
+ for param_name in ["device_read_iops", "device_write_iops"]:
+ if client.module.params.get(param_name):
+ self._process_rate_iops(option=param_name)
+
+ if self.device_requests:
+ for dr_index, dr in enumerate(self.device_requests):
+ # Make sure that capabilities are lists of lists of strings
+ if dr['capabilities']:
+ for or_index, or_list in enumerate(dr['capabilities']):
+ for and_index, and_term in enumerate(or_list):
+ if not isinstance(and_term, string_types):
+ self.fail(
+ "device_requests[{0}].capabilities[{1}][{2}] is not a string".format(
+ dr_index, or_index, and_index))
+ or_list[and_index] = to_native(and_term)
+ # Make sure that options is a dictionary mapping strings to strings
+ if dr['options']:
+ dr['options'] = clean_dict_booleans_for_docker_api(dr['options'])
+
+ def fail(self, msg):
+ self.client.fail(msg)
+
+ @property
+ def update_parameters(self):
+ '''
+ Returns parameters used to update a container
+ '''
+
+ update_parameters = dict(
+ blkio_weight='blkio_weight',
+ cpu_period='cpu_period',
+ cpu_quota='cpu_quota',
+ cpu_shares='cpu_shares',
+ cpuset_cpus='cpuset_cpus',
+ cpuset_mems='cpuset_mems',
+ mem_limit='memory',
+ mem_reservation='memory_reservation',
+ memswap_limit='memory_swap',
+ kernel_memory='kernel_memory',
+ restart_policy='restart_policy',
+ )
+
+ result = dict()
+ for key, value in update_parameters.items():
+ if getattr(self, value, None) is not None:
+ if key == 'restart_policy' and self.client.option_minimal_versions[value]['supported']:
+ restart_policy = dict(Name=self.restart_policy,
+ MaximumRetryCount=self.restart_retries)
+ result[key] = restart_policy
+ elif self.client.option_minimal_versions[value]['supported']:
+ result[key] = getattr(self, value)
+ return result
+
+ @property
+ def create_parameters(self):
+ '''
+ Returns parameters used to create a container
+ '''
+ create_params = dict(
+ command='command',
+ domainname='domainname',
+ hostname='hostname',
+ user='user',
+ detach='detach',
+ stdin_open='interactive',
+ tty='tty',
+ ports='ports',
+ environment='env',
+ name='name',
+ entrypoint='entrypoint',
+ mac_address='mac_address',
+ labels='labels',
+ stop_signal='stop_signal',
+ working_dir='working_dir',
+ stop_timeout='stop_timeout',
+ healthcheck='healthcheck',
+ )
+
+ if self.client.docker_py_version < LooseVersion('3.0'):
+ # cpu_shares and volume_driver moved to create_host_config in > 3
+ create_params['cpu_shares'] = 'cpu_shares'
+ create_params['volume_driver'] = 'volume_driver'
+
+ result = dict(
+ host_config=self._host_config(),
+ volumes=self._get_mounts(),
+ )
+
+ for key, value in create_params.items():
+ if getattr(self, value, None) is not None:
+ if self.client.option_minimal_versions[value]['supported']:
+ result[key] = getattr(self, value)
+
+ if self.disable_healthcheck:
+ # Make sure image's health check is overridden
+ result['healthcheck'] = {'test': ['NONE']}
+
+ if self.networks_cli_compatible and self.networks:
+ network = self.networks[0]
+ params = dict()
+ for para in ('ipv4_address', 'ipv6_address', 'links', 'aliases'):
+ if network.get(para):
+ params[para] = network[para]
+ network_config = dict()
+ network_config[network['name']] = self.client.create_endpoint_config(**params)
+ result['networking_config'] = self.client.create_networking_config(network_config)
+ return result
+
+ def _expand_host_paths(self):
+ new_vols = []
+ for vol in self.volumes:
+ if ':' in vol:
+ parts = vol.split(':')
+ if len(parts) == 3:
+ host, container, mode = parts
+ if not is_volume_permissions(mode):
+ self.fail('Found invalid volumes mode: {0}'.format(mode))
+ if re.match(r'[.~]', host):
+ host = os.path.abspath(os.path.expanduser(host))
+ new_vols.append("%s:%s:%s" % (host, container, mode))
+ continue
+ elif len(parts) == 2:
+ if not is_volume_permissions(parts[1]) and re.match(r'[.~]', parts[0]):
+ host = os.path.abspath(os.path.expanduser(parts[0]))
+ new_vols.append("%s:%s:rw" % (host, parts[1]))
+ continue
+ new_vols.append(vol)
+ return new_vols
+
+ def _get_mounts(self):
+ '''
+ Return a list of container mounts.
+ :return:
+ '''
+ result = []
+ if self.volumes:
+ for vol in self.volumes:
+ # Only pass anonymous volumes to create container
+ if ':' in vol:
+ parts = vol.split(':')
+ if len(parts) == 3:
+ continue
+ if len(parts) == 2:
+ if not is_volume_permissions(parts[1]):
+ continue
+ result.append(vol)
+ self.log("mounts:")
+ self.log(result, pretty_print=True)
+ return result
+
+ def _host_config(self):
+ '''
+ Returns parameters used to create a HostConfig object
+ '''
+
+ host_config_params = dict(
+ port_bindings='published_ports',
+ publish_all_ports='publish_all_ports',
+ links='links',
+ privileged='privileged',
+ dns='dns_servers',
+ dns_opt='dns_opts',
+ dns_search='dns_search_domains',
+ binds='volume_binds',
+ volumes_from='volumes_from',
+ network_mode='network_mode',
+ userns_mode='userns_mode',
+ cap_add='capabilities',
+ cap_drop='cap_drop',
+ extra_hosts='etc_hosts',
+ read_only='read_only',
+ ipc_mode='ipc_mode',
+ security_opt='security_opts',
+ ulimits='ulimits',
+ sysctls='sysctls',
+ log_config='log_config',
+ mem_limit='memory',
+ memswap_limit='memory_swap',
+ mem_swappiness='memory_swappiness',
+ oom_score_adj='oom_score_adj',
+ oom_kill_disable='oom_killer',
+ shm_size='shm_size',
+ group_add='groups',
+ devices='devices',
+ pid_mode='pid_mode',
+ tmpfs='tmpfs',
+ init='init',
+ uts_mode='uts',
+ runtime='runtime',
+ auto_remove='auto_remove',
+ device_read_bps='device_read_bps',
+ device_write_bps='device_write_bps',
+ device_read_iops='device_read_iops',
+ device_write_iops='device_write_iops',
+ pids_limit='pids_limit',
+ mounts='mounts',
+ nano_cpus='cpus',
+ )
+
+ if self.client.docker_py_version >= LooseVersion('1.9') and self.client.docker_api_version >= LooseVersion('1.22'):
+ # blkio_weight can always be updated, but can only be set on creation
+ # when Docker SDK for Python and Docker API are new enough
+ host_config_params['blkio_weight'] = 'blkio_weight'
+
+ if self.client.docker_py_version >= LooseVersion('3.0'):
+ # cpu_shares and volume_driver moved to create_host_config in > 3
+ host_config_params['cpu_shares'] = 'cpu_shares'
+ host_config_params['volume_driver'] = 'volume_driver'
+
+ params = dict()
+ for key, value in host_config_params.items():
+ if getattr(self, value, None) is not None:
+ if self.client.option_minimal_versions[value]['supported']:
+ params[key] = getattr(self, value)
+
+ if self.restart_policy:
+ params['restart_policy'] = dict(Name=self.restart_policy,
+ MaximumRetryCount=self.restart_retries)
+
+ if 'mounts' in params:
+ params['mounts'] = self.mounts_opt
+
+ if self.device_requests is not None:
+ params['device_requests'] = [dict((k, v) for k, v in dr.items() if v is not None) for dr in self.device_requests]
+
+ return self.client.create_host_config(**params)
+
+ @property
+ def default_host_ip(self):
+ ip = '0.0.0.0'
+ if not self.networks:
+ return ip
+ for net in self.networks:
+ if net.get('name'):
+ try:
+ network = self.client.inspect_network(net['name'])
+ if network.get('Driver') == 'bridge' and \
+ network.get('Options', {}).get('com.docker.network.bridge.host_binding_ipv4'):
+ ip = network['Options']['com.docker.network.bridge.host_binding_ipv4']
+ break
+ except NotFound as nfe:
+ self.client.fail(
+ "Cannot inspect the network '{0}' to determine the default IP: {1}".format(net['name'], nfe),
+ exception=traceback.format_exc()
+ )
+ return ip
+
+ def _parse_publish_ports(self):
+ '''
+ Parse ports from docker CLI syntax
+ '''
+ if self.published_ports is None:
+ return None
+
+ if 'all' in self.published_ports:
+ return 'all'
+
+ default_ip = self.default_host_ip
+
+ binds = {}
+ for port in self.published_ports:
+ parts = split_colon_ipv6(to_text(port, errors='surrogate_or_strict'), self.client)
+ container_port = parts[-1]
+ protocol = ''
+ if '/' in container_port:
+ container_port, protocol = parts[-1].split('/')
+ container_ports = parse_port_range(container_port, self.client)
+
+ p_len = len(parts)
+ if p_len == 1:
+ port_binds = len(container_ports) * [(default_ip,)]
+ elif p_len == 2:
+ if len(container_ports) == 1:
+ port_binds = [(default_ip, parts[0])]
+ else:
+ port_binds = [(default_ip, port) for port in parse_port_range(parts[0], self.client)]
+ elif p_len == 3:
+ # We only allow IPv4 and IPv6 addresses for the bind address
+ ipaddr = parts[0]
+ if not re.match(r'^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$', parts[0]) and not re.match(r'^\[[0-9a-fA-F:]+(?:|%[^\]/]+)\]$', ipaddr):
+ self.fail(('Bind addresses for published ports must be IPv4 or IPv6 addresses, not hostnames. '
+ 'Use the dig lookup to resolve hostnames. (Found hostname: {0})').format(ipaddr))
+ if re.match(r'^\[[0-9a-fA-F:]+\]$', ipaddr):
+ ipaddr = ipaddr[1:-1]
+ if parts[1]:
+ if len(container_ports) == 1:
+ port_binds = [(ipaddr, parts[1])]
+ else:
+ port_binds = [(ipaddr, port) for port in parse_port_range(parts[1], self.client)]
+ else:
+ port_binds = len(container_ports) * [(ipaddr,)]
+
+ for bind, container_port in zip(port_binds, container_ports):
+ idx = '{0}/{1}'.format(container_port, protocol) if protocol else container_port
+ if idx in binds:
+ old_bind = binds[idx]
+ if isinstance(old_bind, list):
+ old_bind.append(bind)
+ else:
+ binds[idx] = [old_bind, bind]
+ else:
+ binds[idx] = bind
+ return binds
+
+ def _get_volume_binds(self, volumes):
+ '''
+ Extract host bindings, if any, from list of volume mapping strings.
+
+ :return: dictionary of bind mappings
+ '''
+ result = dict()
+ if volumes:
+ for vol in volumes:
+ host = None
+ if ':' in vol:
+ parts = vol.split(':')
+ if len(parts) == 3:
+ host, container, mode = parts
+ if not is_volume_permissions(mode):
+ self.fail('Found invalid volumes mode: {0}'.format(mode))
+ elif len(parts) == 2:
+ if not is_volume_permissions(parts[1]):
+ host, container, mode = (parts + ['rw'])
+ if host is not None:
+ result[host] = dict(
+ bind=container,
+ mode=mode
+ )
+ return result
+
+ def _parse_exposed_ports(self, published_ports):
+ '''
+ Parse exposed ports from docker CLI-style ports syntax.
+ '''
+ exposed = []
+ if self.exposed_ports:
+ for port in self.exposed_ports:
+ port = to_text(port, errors='surrogate_or_strict').strip()
+ protocol = 'tcp'
+ match = re.search(r'(/.+$)', port)
+ if match:
+ protocol = match.group(1).replace('/', '')
+ port = re.sub(r'/.+$', '', port)
+ exposed.append((port, protocol))
+ if published_ports:
+ # Any published port should also be exposed
+ for publish_port in published_ports:
+ match = False
+ if isinstance(publish_port, string_types) and '/' in publish_port:
+ port, protocol = publish_port.split('/')
+ port = int(port)
+ else:
+ protocol = 'tcp'
+ port = int(publish_port)
+ for exposed_port in exposed:
+ if exposed_port[1] != protocol:
+ continue
+ if isinstance(exposed_port[0], string_types) and '-' in exposed_port[0]:
+ start_port, end_port = exposed_port[0].split('-')
+ if int(start_port) <= port <= int(end_port):
+ match = True
+ elif exposed_port[0] == port:
+ match = True
+ if not match:
+ exposed.append((port, protocol))
+ return exposed
+
+ @staticmethod
+ def _parse_links(links):
+ '''
+ Turn links into a dictionary
+ '''
+ if links is None:
+ return None
+
+ result = []
+ for link in links:
+ parsed_link = link.split(':', 1)
+ if len(parsed_link) == 2:
+ result.append((parsed_link[0], parsed_link[1]))
+ else:
+ result.append((parsed_link[0], parsed_link[0]))
+ return result
+
+ def _parse_ulimits(self):
+ '''
+ Turn ulimits into an array of Ulimit objects
+ '''
+ if self.ulimits is None:
+ return None
+
+ results = []
+ for limit in self.ulimits:
+ limits = dict()
+ pieces = limit.split(':')
+ if len(pieces) >= 2:
+ limits['name'] = pieces[0]
+ limits['soft'] = int(pieces[1])
+ limits['hard'] = int(pieces[1])
+ if len(pieces) == 3:
+ limits['hard'] = int(pieces[2])
+ try:
+ results.append(Ulimit(**limits))
+ except ValueError as exc:
+ self.fail("Error parsing ulimits value %s - %s" % (limit, exc))
+ return results
+
+ def _parse_sysctls(self):
+ '''
+ Turn sysctls into an hash of Sysctl objects
+ '''
+ return self.sysctls
+
+ def _parse_log_config(self):
+ '''
+ Create a LogConfig object
+ '''
+ if self.log_driver is None:
+ return None
+
+ options = dict(
+ Type=self.log_driver,
+ Config=dict()
+ )
+
+ if self.log_options is not None:
+ options['Config'] = dict()
+ for k, v in self.log_options.items():
+ if not isinstance(v, string_types):
+ self.client.module.warn(
+ "Non-string value found for log_options option '%s'. The value is automatically converted to '%s'. "
+ "If this is not correct, or you want to avoid such warnings, please quote the value." % (
+ k, to_text(v, errors='surrogate_or_strict'))
+ )
+ v = to_text(v, errors='surrogate_or_strict')
+ self.log_options[k] = v
+ options['Config'][k] = v
+
+ try:
+ return LogConfig(**options)
+ except ValueError as exc:
+ self.fail('Error parsing logging options - %s' % (exc))
+
+ def _parse_tmpfs(self):
+ '''
+ Turn tmpfs into a hash of Tmpfs objects
+ '''
+ result = dict()
+ if self.tmpfs is None:
+ return result
+
+ for tmpfs_spec in self.tmpfs:
+ split_spec = tmpfs_spec.split(":", 1)
+ if len(split_spec) > 1:
+ result[split_spec[0]] = split_spec[1]
+ else:
+ result[split_spec[0]] = ""
+ return result
+
+ def _get_environment(self):
+ """
+ If environment file is combined with explicit environment variables, the explicit environment variables
+ take precedence.
+ """
+ final_env = {}
+ if self.env_file:
+ parsed_env_file = utils.parse_env_file(self.env_file)
+ for name, value in parsed_env_file.items():
+ final_env[name] = to_text(value, errors='surrogate_or_strict')
+ if self.env:
+ for name, value in self.env.items():
+ if not isinstance(value, string_types):
+ self.fail("Non-string value found for env option. Ambiguous env options must be "
+ "wrapped in quotes to avoid them being interpreted. Key: %s" % (name, ))
+ final_env[name] = to_text(value, errors='surrogate_or_strict')
+ return final_env
+
+ def _get_network_id(self, network_name):
+ network_id = None
+ try:
+ for network in self.client.networks(names=[network_name]):
+ if network['Name'] == network_name:
+ network_id = network['Id']
+ break
+ except Exception as exc:
+ self.fail("Error getting network id for %s - %s" % (network_name, to_native(exc)))
+ return network_id
+
+ def _process_mounts(self):
+ if self.mounts is None:
+ return None, None
+ mounts_list = []
+ mounts_expected = []
+ for mount in self.mounts:
+ target = mount['target']
+ datatype = mount['type']
+ mount_dict = dict(mount)
+ # Sanity checks (so we don't wait for docker-py to barf on input)
+ if mount_dict.get('source') is None and datatype != 'tmpfs':
+ self.client.fail('source must be specified for mount "{0}" of type "{1}"'.format(target, datatype))
+ mount_option_types = dict(
+ volume_driver='volume',
+ volume_options='volume',
+ propagation='bind',
+ no_copy='volume',
+ labels='volume',
+ tmpfs_size='tmpfs',
+ tmpfs_mode='tmpfs',
+ )
+ for option, req_datatype in mount_option_types.items():
+ if mount_dict.get(option) is not None and datatype != req_datatype:
+ self.client.fail('{0} cannot be specified for mount "{1}" of type "{2}" (needs type "{3}")'.format(option, target, datatype, req_datatype))
+ # Handle volume_driver and volume_options
+ volume_driver = mount_dict.pop('volume_driver')
+ volume_options = mount_dict.pop('volume_options')
+ if volume_driver:
+ if volume_options:
+ volume_options = clean_dict_booleans_for_docker_api(volume_options)
+ mount_dict['driver_config'] = docker_types.DriverConfig(name=volume_driver, options=volume_options)
+ if mount_dict['labels']:
+ mount_dict['labels'] = clean_dict_booleans_for_docker_api(mount_dict['labels'])
+ if mount_dict.get('tmpfs_size') is not None:
+ try:
+ mount_dict['tmpfs_size'] = human_to_bytes(mount_dict['tmpfs_size'])
+ except ValueError as exc:
+ self.fail('Failed to convert tmpfs_size of mount "{0}" to bytes: {1}'.format(target, exc))
+ if mount_dict.get('tmpfs_mode') is not None:
+ try:
+ mount_dict['tmpfs_mode'] = int(mount_dict['tmpfs_mode'], 8)
+ except Exception as dummy:
+ self.client.fail('tmp_fs mode of mount "{0}" is not an octal string!'.format(target))
+ # Fill expected mount dict
+ mount_expected = dict(mount)
+ mount_expected['tmpfs_size'] = mount_dict['tmpfs_size']
+ mount_expected['tmpfs_mode'] = mount_dict['tmpfs_mode']
+ # Add result to lists
+ mounts_list.append(docker_types.Mount(**mount_dict))
+ mounts_expected.append(omit_none_from_dict(mount_expected))
+ return mounts_list, mounts_expected
+
+ def _process_rate_bps(self, option):
+ """
+ Format device_read_bps and device_write_bps option
+ """
+ devices_list = []
+ for v in getattr(self, option):
+ device_dict = dict((x.title(), y) for x, y in v.items())
+ device_dict['Rate'] = human_to_bytes(device_dict['Rate'])
+ devices_list.append(device_dict)
+
+ setattr(self, option, devices_list)
+
+ def _process_rate_iops(self, option):
+ """
+ Format device_read_iops and device_write_iops option
+ """
+ devices_list = []
+ for v in getattr(self, option):
+ device_dict = dict((x.title(), y) for x, y in v.items())
+ devices_list.append(device_dict)
+
+ setattr(self, option, devices_list)
+
+ def _replace_container_names(self, mode):
+ """
+ Parse IPC and PID modes. If they contain a container name, replace
+ with the container's ID.
+ """
+ if mode is None or not mode.startswith('container:'):
+ return mode
+ container_name = mode[len('container:'):]
+ # Try to inspect container to see whether this is an ID or a
+ # name (and in the latter case, retrieve it's ID)
+ container = self.client.get_container(container_name)
+ if container is None:
+ # If we can't find the container, issue a warning and continue with
+ # what the user specified.
+ self.client.module.warn('Cannot find a container with name or ID "{0}"'.format(container_name))
+ return mode
+ return 'container:{0}'.format(container['Id'])
+
+ def _check_mount_target_collisions(self):
+ last = dict()
+
+ def f(t, name):
+ if t in last:
+ if name == last[t]:
+ self.client.fail('The mount point "{0}" appears twice in the {1} option'.format(t, name))
+ else:
+ self.client.fail('The mount point "{0}" appears both in the {1} and {2} option'.format(t, name, last[t]))
+ last[t] = name
+
+ if self.expected_mounts:
+ for t in [m['target'] for m in self.expected_mounts]:
+ f(t, 'mounts')
+ if self.volumes:
+ for v in self.volumes:
+ vs = v.split(':')
+ f(vs[0 if len(vs) == 1 else 1], 'volumes')
+
+
+class Container(DockerBaseClass):
+
+ def __init__(self, container, parameters):
+ super(Container, self).__init__()
+ self.raw = container
+ self.Id = None
+ self.container = container
+ if container:
+ self.Id = container['Id']
+ self.Image = container['Image']
+ self.log(self.container, pretty_print=True)
+ self.parameters = parameters
+ self.parameters.expected_links = None
+ self.parameters.expected_ports = None
+ self.parameters.expected_exposed = None
+ self.parameters.expected_volumes = None
+ self.parameters.expected_ulimits = None
+ self.parameters.expected_sysctls = None
+ self.parameters.expected_etc_hosts = None
+ self.parameters.expected_env = None
+ self.parameters.expected_device_requests = None
+ self.parameters_map = dict()
+ self.parameters_map['expected_links'] = 'links'
+ self.parameters_map['expected_ports'] = 'expected_ports'
+ self.parameters_map['expected_exposed'] = 'exposed_ports'
+ self.parameters_map['expected_volumes'] = 'volumes'
+ self.parameters_map['expected_ulimits'] = 'ulimits'
+ self.parameters_map['expected_sysctls'] = 'sysctls'
+ self.parameters_map['expected_etc_hosts'] = 'etc_hosts'
+ self.parameters_map['expected_env'] = 'env'
+ self.parameters_map['expected_entrypoint'] = 'entrypoint'
+ self.parameters_map['expected_binds'] = 'volumes'
+ self.parameters_map['expected_cmd'] = 'command'
+ self.parameters_map['expected_devices'] = 'devices'
+ self.parameters_map['expected_healthcheck'] = 'healthcheck'
+ self.parameters_map['expected_mounts'] = 'mounts'
+ self.parameters_map['expected_device_requests'] = 'device_requests'
+
+ def fail(self, msg):
+ self.parameters.client.fail(msg)
+
+ @property
+ def exists(self):
+ return True if self.container else False
+
+ @property
+ def removing(self):
+ if self.container and self.container.get('State'):
+ return self.container['State'].get('Status') == 'removing'
+ return False
+
+ @property
+ def running(self):
+ if self.container and self.container.get('State'):
+ if self.container['State'].get('Running') and not self.container['State'].get('Ghost', False):
+ return True
+ return False
+
+ @property
+ def paused(self):
+ if self.container and self.container.get('State'):
+ return self.container['State'].get('Paused', False)
+ return False
+
+ def _compare(self, a, b, compare):
+ '''
+ Compare values a and b as described in compare.
+ '''
+ return compare_generic(a, b, compare['comparison'], compare['type'])
+
+ def _decode_mounts(self, mounts):
+ if not mounts:
+ return mounts
+ result = []
+ empty_dict = dict()
+ for mount in mounts:
+ res = dict()
+ res['type'] = mount.get('Type')
+ res['source'] = mount.get('Source')
+ res['target'] = mount.get('Target')
+ res['read_only'] = mount.get('ReadOnly', False) # golang's omitempty for bool returns None for False
+ res['consistency'] = mount.get('Consistency')
+ res['propagation'] = mount.get('BindOptions', empty_dict).get('Propagation')
+ res['no_copy'] = mount.get('VolumeOptions', empty_dict).get('NoCopy', False)
+ res['labels'] = mount.get('VolumeOptions', empty_dict).get('Labels', empty_dict)
+ res['volume_driver'] = mount.get('VolumeOptions', empty_dict).get('DriverConfig', empty_dict).get('Name')
+ res['volume_options'] = mount.get('VolumeOptions', empty_dict).get('DriverConfig', empty_dict).get('Options', empty_dict)
+ res['tmpfs_size'] = mount.get('TmpfsOptions', empty_dict).get('SizeBytes')
+ res['tmpfs_mode'] = mount.get('TmpfsOptions', empty_dict).get('Mode')
+ result.append(res)
+ return result
+
+ def has_different_configuration(self, image):
+ '''
+ Diff parameters vs existing container config. Returns tuple: (True | False, List of differences)
+ '''
+ self.log('Starting has_different_configuration')
+ self.parameters.expected_entrypoint = self._get_expected_entrypoint()
+ self.parameters.expected_links = self._get_expected_links()
+ self.parameters.expected_ports = self._get_expected_ports()
+ self.parameters.expected_exposed = self._get_expected_exposed(image)
+ self.parameters.expected_volumes = self._get_expected_volumes(image)
+ self.parameters.expected_binds = self._get_expected_binds(image)
+ self.parameters.expected_ulimits = self._get_expected_ulimits(self.parameters.ulimits)
+ self.parameters.expected_sysctls = self._get_expected_sysctls(self.parameters.sysctls)
+ self.parameters.expected_etc_hosts = self._convert_simple_dict_to_list('etc_hosts')
+ self.parameters.expected_env = self._get_expected_env(image)
+ self.parameters.expected_cmd = self._get_expected_cmd()
+ self.parameters.expected_devices = self._get_expected_devices()
+ self.parameters.expected_healthcheck = self._get_expected_healthcheck()
+ self.parameters.expected_device_requests = self._get_expected_device_requests()
+
+ if not self.container.get('HostConfig'):
+ self.fail("has_config_diff: Error parsing container properties. HostConfig missing.")
+ if not self.container.get('Config'):
+ self.fail("has_config_diff: Error parsing container properties. Config missing.")
+ if not self.container.get('NetworkSettings'):
+ self.fail("has_config_diff: Error parsing container properties. NetworkSettings missing.")
+
+ host_config = self.container['HostConfig']
+ log_config = host_config.get('LogConfig', dict())
+ config = self.container['Config']
+ network = self.container['NetworkSettings']
+
+ # The previous version of the docker module ignored the detach state by
+ # assuming if the container was running, it must have been detached.
+ detach = not (config.get('AttachStderr') and config.get('AttachStdout'))
+
+ # "ExposedPorts": null returns None type & causes AttributeError - PR #5517
+ if config.get('ExposedPorts') is not None:
+ expected_exposed = [self._normalize_port(p) for p in config.get('ExposedPorts', dict()).keys()]
+ else:
+ expected_exposed = []
+
+ # Map parameters to container inspect results
+ config_mapping = dict(
+ expected_cmd=config.get('Cmd'),
+ domainname=config.get('Domainname'),
+ hostname=config.get('Hostname'),
+ user=config.get('User'),
+ detach=detach,
+ init=host_config.get('Init'),
+ interactive=config.get('OpenStdin'),
+ capabilities=host_config.get('CapAdd'),
+ cap_drop=host_config.get('CapDrop'),
+ expected_devices=host_config.get('Devices'),
+ dns_servers=host_config.get('Dns'),
+ dns_opts=host_config.get('DnsOptions'),
+ dns_search_domains=host_config.get('DnsSearch'),
+ expected_env=(config.get('Env') or []),
+ expected_entrypoint=config.get('Entrypoint'),
+ expected_etc_hosts=host_config['ExtraHosts'],
+ expected_exposed=expected_exposed,
+ groups=host_config.get('GroupAdd'),
+ ipc_mode=host_config.get("IpcMode"),
+ labels=config.get('Labels'),
+ expected_links=host_config.get('Links'),
+ mac_address=config.get('MacAddress', network.get('MacAddress')),
+ memory_swappiness=host_config.get('MemorySwappiness'),
+ network_mode=host_config.get('NetworkMode'),
+ userns_mode=host_config.get('UsernsMode'),
+ oom_killer=host_config.get('OomKillDisable'),
+ oom_score_adj=host_config.get('OomScoreAdj'),
+ pid_mode=host_config.get('PidMode'),
+ privileged=host_config.get('Privileged'),
+ expected_ports=host_config.get('PortBindings'),
+ read_only=host_config.get('ReadonlyRootfs'),
+ runtime=host_config.get('Runtime'),
+ shm_size=host_config.get('ShmSize'),
+ security_opts=host_config.get("SecurityOpt"),
+ stop_signal=config.get("StopSignal"),
+ tmpfs=host_config.get('Tmpfs'),
+ tty=config.get('Tty'),
+ expected_ulimits=host_config.get('Ulimits'),
+ expected_sysctls=host_config.get('Sysctls'),
+ uts=host_config.get('UTSMode'),
+ expected_volumes=config.get('Volumes'),
+ expected_binds=host_config.get('Binds'),
+ volume_driver=host_config.get('VolumeDriver'),
+ volumes_from=host_config.get('VolumesFrom'),
+ working_dir=config.get('WorkingDir'),
+ publish_all_ports=host_config.get('PublishAllPorts'),
+ expected_healthcheck=config.get('Healthcheck'),
+ disable_healthcheck=(not config.get('Healthcheck') or config.get('Healthcheck').get('Test') == ['NONE']),
+ device_read_bps=host_config.get('BlkioDeviceReadBps'),
+ device_write_bps=host_config.get('BlkioDeviceWriteBps'),
+ device_read_iops=host_config.get('BlkioDeviceReadIOps'),
+ device_write_iops=host_config.get('BlkioDeviceWriteIOps'),
+ expected_device_requests=host_config.get('DeviceRequests'),
+ pids_limit=host_config.get('PidsLimit'),
+ # According to https://github.com/moby/moby/, support for HostConfig.Mounts
+ # has been included at least since v17.03.0-ce, which has API version 1.26.
+ # The previous tag, v1.9.1, has API version 1.21 and does not have
+ # HostConfig.Mounts. I have no idea what about API 1.25...
+ expected_mounts=self._decode_mounts(host_config.get('Mounts')),
+ cpus=host_config.get('NanoCpus'),
+ )
+ # Options which don't make sense without their accompanying option
+ if self.parameters.log_driver:
+ config_mapping['log_driver'] = log_config.get('Type')
+ config_mapping['log_options'] = log_config.get('Config')
+
+ if self.parameters.client.option_minimal_versions['auto_remove']['supported']:
+ # auto_remove is only supported in Docker SDK for Python >= 2.0.0; unfortunately
+ # it has a default value, that's why we have to jump through the hoops here
+ config_mapping['auto_remove'] = host_config.get('AutoRemove')
+
+ if self.parameters.client.option_minimal_versions['stop_timeout']['supported']:
+ # stop_timeout is only supported in Docker SDK for Python >= 2.1. Note that
+ # stop_timeout has a hybrid role, in that it used to be something only used
+ # for stopping containers, and is now also used as a container property.
+ # That's why it needs special handling here.
+ config_mapping['stop_timeout'] = config.get('StopTimeout')
+
+ if self.parameters.client.docker_api_version < LooseVersion('1.22'):
+ # For docker API < 1.22, update_container() is not supported. Thus
+ # we need to handle all limits which are usually handled by
+ # update_container() as configuration changes which require a container
+ # restart.
+ restart_policy = host_config.get('RestartPolicy', dict())
+
+ # Options which don't make sense without their accompanying option
+ if self.parameters.restart_policy:
+ config_mapping['restart_retries'] = restart_policy.get('MaximumRetryCount')
+
+ config_mapping.update(dict(
+ blkio_weight=host_config.get('BlkioWeight'),
+ cpu_period=host_config.get('CpuPeriod'),
+ cpu_quota=host_config.get('CpuQuota'),
+ cpu_shares=host_config.get('CpuShares'),
+ cpuset_cpus=host_config.get('CpusetCpus'),
+ cpuset_mems=host_config.get('CpusetMems'),
+ kernel_memory=host_config.get("KernelMemory"),
+ memory=host_config.get('Memory'),
+ memory_reservation=host_config.get('MemoryReservation'),
+ memory_swap=host_config.get('MemorySwap'),
+ restart_policy=restart_policy.get('Name')
+ ))
+
+ differences = DifferenceTracker()
+ for key, value in config_mapping.items():
+ minimal_version = self.parameters.client.option_minimal_versions.get(key, {})
+ if not minimal_version.get('supported', True):
+ continue
+ compare = self.parameters.client.comparisons[self.parameters_map.get(key, key)]
+ self.log('check differences %s %s vs %s (%s)' % (key, getattr(self.parameters, key), to_text(value, errors='surrogate_or_strict'), compare))
+ if getattr(self.parameters, key, None) is not None:
+ match = self._compare(getattr(self.parameters, key), value, compare)
+
+ if not match:
+ # no match. record the differences
+ p = getattr(self.parameters, key)
+ c = value
+ if compare['type'] == 'set':
+ # Since the order does not matter, sort so that the diff output is better.
+ if p is not None:
+ p = sorted(p)
+ if c is not None:
+ c = sorted(c)
+ elif compare['type'] == 'set(dict)':
+ # Since the order does not matter, sort so that the diff output is better.
+ if key == 'expected_mounts':
+ # For selected values, use one entry as key
+ def sort_key_fn(x):
+ return x['target']
+ else:
+ # We sort the list of dictionaries by using the sorted items of a dict as its key.
+ def sort_key_fn(x):
+ return sorted((a, to_text(b, errors='surrogate_or_strict')) for a, b in x.items())
+ if p is not None:
+ p = sorted(p, key=sort_key_fn)
+ if c is not None:
+ c = sorted(c, key=sort_key_fn)
+ differences.add(key, parameter=p, active=c)
+
+ has_differences = not differences.empty
+ return has_differences, differences
+
+ def has_different_resource_limits(self):
+ '''
+ Diff parameters and container resource limits
+ '''
+ if not self.container.get('HostConfig'):
+ self.fail("limits_differ_from_container: Error parsing container properties. HostConfig missing.")
+ if self.parameters.client.docker_api_version < LooseVersion('1.22'):
+ # update_container() call not supported
+ return False, []
+
+ host_config = self.container['HostConfig']
+
+ restart_policy = host_config.get('RestartPolicy') or dict()
+
+ config_mapping = dict(
+ blkio_weight=host_config.get('BlkioWeight'),
+ cpu_period=host_config.get('CpuPeriod'),
+ cpu_quota=host_config.get('CpuQuota'),
+ cpu_shares=host_config.get('CpuShares'),
+ cpuset_cpus=host_config.get('CpusetCpus'),
+ cpuset_mems=host_config.get('CpusetMems'),
+ kernel_memory=host_config.get("KernelMemory"),
+ memory=host_config.get('Memory'),
+ memory_reservation=host_config.get('MemoryReservation'),
+ memory_swap=host_config.get('MemorySwap'),
+ restart_policy=restart_policy.get('Name')
+ )
+
+ # Options which don't make sense without their accompanying option
+ if self.parameters.restart_policy:
+ config_mapping['restart_retries'] = restart_policy.get('MaximumRetryCount')
+
+ differences = DifferenceTracker()
+ for key, value in config_mapping.items():
+ if getattr(self.parameters, key, None):
+ compare = self.parameters.client.comparisons[self.parameters_map.get(key, key)]
+ match = self._compare(getattr(self.parameters, key), value, compare)
+
+ if not match:
+ # no match. record the differences
+ differences.add(key, parameter=getattr(self.parameters, key), active=value)
+ different = not differences.empty
+ return different, differences
+
+ def has_network_differences(self):
+ '''
+ Check if the container is connected to requested networks with expected options: links, aliases, ipv4, ipv6
+ '''
+ different = False
+ differences = []
+
+ if not self.parameters.networks:
+ return different, differences
+
+ if not self.container.get('NetworkSettings'):
+ self.fail("has_missing_networks: Error parsing container properties. NetworkSettings missing.")
+
+ connected_networks = self.container['NetworkSettings']['Networks']
+ for network in self.parameters.networks:
+ network_info = connected_networks.get(network['name'])
+ if network_info is None:
+ different = True
+ differences.append(dict(
+ parameter=network,
+ container=None
+ ))
+ else:
+ diff = False
+ network_info_ipam = network_info.get('IPAMConfig') or {}
+ if network.get('ipv4_address') and network['ipv4_address'] != network_info_ipam.get('IPv4Address'):
+ diff = True
+ if network.get('ipv6_address') and network['ipv6_address'] != network_info_ipam.get('IPv6Address'):
+ diff = True
+ if network.get('aliases'):
+ if not compare_generic(network['aliases'], network_info.get('Aliases'), 'allow_more_present', 'set'):
+ diff = True
+ if network.get('links'):
+ expected_links = []
+ for link, alias in network['links']:
+ expected_links.append("%s:%s" % (link, alias))
+ if not compare_generic(expected_links, network_info.get('Links'), 'allow_more_present', 'set'):
+ diff = True
+ if diff:
+ different = True
+ differences.append(dict(
+ parameter=network,
+ container=dict(
+ name=network['name'],
+ ipv4_address=network_info_ipam.get('IPv4Address'),
+ ipv6_address=network_info_ipam.get('IPv6Address'),
+ aliases=network_info.get('Aliases'),
+ links=network_info.get('Links')
+ )
+ ))
+ return different, differences
+
+ def has_extra_networks(self):
+ '''
+ Check if the container is connected to non-requested networks
+ '''
+ extra_networks = []
+ extra = False
+
+ if not self.container.get('NetworkSettings'):
+ self.fail("has_extra_networks: Error parsing container properties. NetworkSettings missing.")
+
+ connected_networks = self.container['NetworkSettings'].get('Networks')
+ if connected_networks:
+ for network, network_config in connected_networks.items():
+ keep = False
+ if self.parameters.networks:
+ for expected_network in self.parameters.networks:
+ if expected_network['name'] == network:
+ keep = True
+ if not keep:
+ extra = True
+ extra_networks.append(dict(name=network, id=network_config['NetworkID']))
+ return extra, extra_networks
+
+ def _get_expected_devices(self):
+ if not self.parameters.devices:
+ return None
+ expected_devices = []
+ for device in self.parameters.devices:
+ parts = device.split(':')
+ if len(parts) == 1:
+ expected_devices.append(
+ dict(
+ CgroupPermissions='rwm',
+ PathInContainer=parts[0],
+ PathOnHost=parts[0]
+ ))
+ elif len(parts) == 2:
+ parts = device.split(':')
+ expected_devices.append(
+ dict(
+ CgroupPermissions='rwm',
+ PathInContainer=parts[1],
+ PathOnHost=parts[0]
+ )
+ )
+ else:
+ expected_devices.append(
+ dict(
+ CgroupPermissions=parts[2],
+ PathInContainer=parts[1],
+ PathOnHost=parts[0]
+ ))
+ return expected_devices
+
+ def _get_expected_entrypoint(self):
+ if not self.parameters.entrypoint:
+ return None
+ return shlex.split(self.parameters.entrypoint)
+
+ def _get_expected_ports(self):
+ if self.parameters.published_ports is None:
+ return None
+ expected_bound_ports = {}
+ for container_port, config in self.parameters.published_ports.items():
+ if isinstance(container_port, int):
+ container_port = "%s/tcp" % container_port
+ if len(config) == 1:
+ if isinstance(config[0], int):
+ expected_bound_ports[container_port] = [{'HostIp': "0.0.0.0", 'HostPort': config[0]}]
+ else:
+ expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': ""}]
+ elif isinstance(config[0], tuple):
+ expected_bound_ports[container_port] = []
+ for host_ip, host_port in config:
+ expected_bound_ports[container_port].append({'HostIp': host_ip, 'HostPort': to_text(host_port, errors='surrogate_or_strict')})
+ else:
+ expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': to_text(config[1], errors='surrogate_or_strict')}]
+ return expected_bound_ports
+
+ def _get_expected_links(self):
+ if self.parameters.links is None:
+ return None
+ self.log('parameter links:')
+ self.log(self.parameters.links, pretty_print=True)
+ exp_links = []
+ for link, alias in self.parameters.links:
+ exp_links.append("/%s:%s/%s" % (link, ('/' + self.parameters.name), alias))
+ return exp_links
+
+ def _get_expected_binds(self, image):
+ self.log('_get_expected_binds')
+ image_vols = []
+ if image:
+ image_vols = self._get_image_binds(image[self.parameters.client.image_inspect_source].get('Volumes'))
+ param_vols = []
+ if self.parameters.volumes:
+ for vol in self.parameters.volumes:
+ host = None
+ if ':' in vol:
+ parts = vol.split(':')
+ if len(parts) == 3:
+ host, container, mode = parts
+ if not is_volume_permissions(mode):
+ self.fail('Found invalid volumes mode: {0}'.format(mode))
+ if len(parts) == 2:
+ if not is_volume_permissions(parts[1]):
+ host, container, mode = parts + ['rw']
+ if host:
+ param_vols.append("%s:%s:%s" % (host, container, mode))
+ result = list(set(image_vols + param_vols))
+ self.log("expected_binds:")
+ self.log(result, pretty_print=True)
+ return result
+
+ def _get_expected_device_requests(self):
+ if self.parameters.device_requests is None:
+ return None
+ device_requests = []
+ for dr in self.parameters.device_requests:
+ device_requests.append({
+ 'Driver': dr['driver'],
+ 'Count': dr['count'],
+ 'DeviceIDs': dr['device_ids'],
+ 'Capabilities': dr['capabilities'],
+ 'Options': dr['options'],
+ })
+ return device_requests
+
+ def _get_image_binds(self, volumes):
+ '''
+ Convert array of binds to array of strings with format host_path:container_path:mode
+
+ :param volumes: array of bind dicts
+ :return: array of strings
+ '''
+ results = []
+ if isinstance(volumes, dict):
+ results += self._get_bind_from_dict(volumes)
+ elif isinstance(volumes, list):
+ for vol in volumes:
+ results += self._get_bind_from_dict(vol)
+ return results
+
+ @staticmethod
+ def _get_bind_from_dict(volume_dict):
+ results = []
+ if volume_dict:
+ for host_path, config in volume_dict.items():
+ if isinstance(config, dict) and config.get('bind'):
+ container_path = config.get('bind')
+ mode = config.get('mode', 'rw')
+ results.append("%s:%s:%s" % (host_path, container_path, mode))
+ return results
+
+ def _get_expected_volumes(self, image):
+ self.log('_get_expected_volumes')
+ expected_vols = dict()
+ if image and image[self.parameters.client.image_inspect_source].get('Volumes'):
+ expected_vols.update(image[self.parameters.client.image_inspect_source].get('Volumes'))
+
+ if self.parameters.volumes:
+ for vol in self.parameters.volumes:
+ # We only expect anonymous volumes to show up in the list
+ if ':' in vol:
+ parts = vol.split(':')
+ if len(parts) == 3:
+ continue
+ if len(parts) == 2:
+ if not is_volume_permissions(parts[1]):
+ continue
+ expected_vols[vol] = dict()
+
+ if not expected_vols:
+ expected_vols = None
+ self.log("expected_volumes:")
+ self.log(expected_vols, pretty_print=True)
+ return expected_vols
+
+ def _get_expected_env(self, image):
+ self.log('_get_expected_env')
+ expected_env = dict()
+ if image and image[self.parameters.client.image_inspect_source].get('Env'):
+ for env_var in image[self.parameters.client.image_inspect_source]['Env']:
+ parts = env_var.split('=', 1)
+ expected_env[parts[0]] = parts[1]
+ if self.parameters.env:
+ expected_env.update(self.parameters.env)
+ param_env = []
+ for key, value in expected_env.items():
+ param_env.append("%s=%s" % (key, value))
+ return param_env
+
+ def _get_expected_exposed(self, image):
+ self.log('_get_expected_exposed')
+ image_ports = []
+ if image:
+ image_exposed_ports = image[self.parameters.client.image_inspect_source].get('ExposedPorts') or {}
+ image_ports = [self._normalize_port(p) for p in image_exposed_ports.keys()]
+ param_ports = []
+ if self.parameters.ports:
+ param_ports = [to_text(p[0], errors='surrogate_or_strict') + '/' + p[1] for p in self.parameters.ports]
+ result = list(set(image_ports + param_ports))
+ self.log(result, pretty_print=True)
+ return result
+
+ def _get_expected_ulimits(self, config_ulimits):
+ self.log('_get_expected_ulimits')
+ if config_ulimits is None:
+ return None
+ results = []
+ for limit in config_ulimits:
+ results.append(dict(
+ Name=limit.name,
+ Soft=limit.soft,
+ Hard=limit.hard
+ ))
+ return results
+
+ def _get_expected_sysctls(self, config_sysctls):
+ self.log('_get_expected_sysctls')
+ if config_sysctls is None:
+ return None
+ result = dict()
+ for key, value in config_sysctls.items():
+ result[key] = to_text(value, errors='surrogate_or_strict')
+ return result
+
+ def _get_expected_cmd(self):
+ self.log('_get_expected_cmd')
+ if not self.parameters.command:
+ return None
+ return shlex.split(self.parameters.command)
+
+ def _convert_simple_dict_to_list(self, param_name, join_with=':'):
+ if getattr(self.parameters, param_name, None) is None:
+ return None
+ results = []
+ for key, value in getattr(self.parameters, param_name).items():
+ results.append("%s%s%s" % (key, join_with, value))
+ return results
+
+ def _normalize_port(self, port):
+ if '/' not in port:
+ return port + '/tcp'
+ return port
+
+ def _get_expected_healthcheck(self):
+ self.log('_get_expected_healthcheck')
+ expected_healthcheck = dict()
+
+ if self.parameters.healthcheck:
+ expected_healthcheck.update([(k.title().replace("_", ""), v)
+ for k, v in self.parameters.healthcheck.items()])
+
+ return expected_healthcheck
+
+
+class ContainerManager(DockerBaseClass):
+ '''
+ Perform container management tasks
+ '''
+
+ def __init__(self, client):
+
+ super(ContainerManager, self).__init__()
+
+ if client.module.params.get('log_options') and not client.module.params.get('log_driver'):
+ client.module.warn('log_options is ignored when log_driver is not specified')
+ if client.module.params.get('healthcheck') and not client.module.params.get('healthcheck').get('test'):
+ client.module.warn('healthcheck is ignored when test is not specified')
+ if client.module.params.get('restart_retries') is not None and not client.module.params.get('restart_policy'):
+ client.module.warn('restart_retries is ignored when restart_policy is not specified')
+
+ self.client = client
+ self.parameters = TaskParameters(client)
+ self.check_mode = self.client.check_mode
+ self.results = {'changed': False, 'actions': []}
+ self.diff = {}
+ self.diff_tracker = DifferenceTracker()
+ self.facts = {}
+
+ state = self.parameters.state
+ if state in ('stopped', 'started', 'present'):
+ self.present(state)
+ elif state == 'absent':
+ self.absent()
+
+ if not self.check_mode and not self.parameters.debug:
+ self.results.pop('actions')
+
+ if self.client.module._diff or self.parameters.debug:
+ self.diff['before'], self.diff['after'] = self.diff_tracker.get_before_after()
+ self.results['diff'] = self.diff
+
+ if self.facts:
+ self.results['ansible_facts'] = {'docker_container': self.facts}
+ self.results['container'] = self.facts
+
+ def wait_for_state(self, container_id, complete_states=None, wait_states=None, accept_removal=False, max_wait=None):
+ delay = 1.0
+ total_wait = 0
+ while True:
+ # Inspect container
+ result = self.client.get_container_by_id(container_id)
+ if result is None:
+ if accept_removal:
+ return
+ msg = 'Encontered vanished container while waiting for container "{0}"'
+ self.fail(msg.format(container_id))
+ # Check container state
+ state = result.get('State', {}).get('Status')
+ if complete_states is not None and state in complete_states:
+ return
+ if wait_states is not None and state not in wait_states:
+ msg = 'Encontered unexpected state "{1}" while waiting for container "{0}"'
+ self.fail(msg.format(container_id, state))
+ # Wait
+ if max_wait is not None:
+ if total_wait > max_wait:
+ msg = 'Timeout of {1} seconds exceeded while waiting for container "{0}"'
+ self.fail(msg.format(container_id, max_wait))
+ if total_wait + delay > max_wait:
+ delay = max_wait - total_wait
+ sleep(delay)
+ total_wait += delay
+ # Exponential backoff, but never wait longer than 10 seconds
+ # (1.1**24 < 10, 1.1**25 > 10, so it will take 25 iterations
+ # until the maximal 10 seconds delay is reached. By then, the
+ # code will have slept for ~1.5 minutes.)
+ delay = min(delay * 1.1, 10)
+
+ def present(self, state):
+ container = self._get_container(self.parameters.name)
+ was_running = container.running
+ was_paused = container.paused
+ container_created = False
+
+ # If the image parameter was passed then we need to deal with the image
+ # version comparison. Otherwise we handle this depending on whether
+ # the container already runs or not; in the former case, in case the
+ # container needs to be restarted, we use the existing container's
+ # image ID.
+ image = self._get_image()
+ self.log(image, pretty_print=True)
+ if not container.exists or container.removing:
+ # New container
+ if container.removing:
+ self.log('Found container in removal phase')
+ else:
+ self.log('No container found')
+ if not self.parameters.image:
+ self.fail('Cannot create container when image is not specified!')
+ self.diff_tracker.add('exists', parameter=True, active=False)
+ if container.removing and not self.check_mode:
+ # Wait for container to be removed before trying to create it
+ self.wait_for_state(
+ container.Id, wait_states=['removing'], accept_removal=True, max_wait=self.parameters.removal_wait_timeout)
+ new_container = self.container_create(self.parameters.image, self.parameters.create_parameters)
+ if new_container:
+ container = new_container
+ container_created = True
+ else:
+ # Existing container
+ different, differences = container.has_different_configuration(image)
+ image_different = False
+ if self.parameters.comparisons['image']['comparison'] == 'strict':
+ image_different = self._image_is_different(image, container)
+ if image_different or different or self.parameters.recreate:
+ self.diff_tracker.merge(differences)
+ self.diff['differences'] = differences.get_legacy_docker_container_diffs()
+ if image_different:
+ self.diff['image_different'] = True
+ self.log("differences")
+ self.log(differences.get_legacy_docker_container_diffs(), pretty_print=True)
+ image_to_use = self.parameters.image
+ if not image_to_use and container and container.Image:
+ image_to_use = container.Image
+ if not image_to_use:
+ self.fail('Cannot recreate container when image is not specified or cannot be extracted from current container!')
+ if container.running:
+ self.container_stop(container.Id)
+ self.container_remove(container.Id)
+ if not self.check_mode:
+ self.wait_for_state(
+ container.Id, wait_states=['removing'], accept_removal=True, max_wait=self.parameters.removal_wait_timeout)
+ new_container = self.container_create(image_to_use, self.parameters.create_parameters)
+ if new_container:
+ container = new_container
+ container_created = True
+
+ if container and container.exists:
+ container = self.update_limits(container)
+ container = self.update_networks(container, container_created)
+
+ if state == 'started' and not container.running:
+ self.diff_tracker.add('running', parameter=True, active=was_running)
+ container = self.container_start(container.Id)
+ elif state == 'started' and self.parameters.restart:
+ self.diff_tracker.add('running', parameter=True, active=was_running)
+ self.diff_tracker.add('restarted', parameter=True, active=False)
+ container = self.container_restart(container.Id)
+ elif state == 'stopped' and container.running:
+ self.diff_tracker.add('running', parameter=False, active=was_running)
+ self.container_stop(container.Id)
+ container = self._get_container(container.Id)
+
+ if state == 'started' and self.parameters.paused is not None and container.paused != self.parameters.paused:
+ self.diff_tracker.add('paused', parameter=self.parameters.paused, active=was_paused)
+ if not self.check_mode:
+ try:
+ if self.parameters.paused:
+ self.client.pause(container=container.Id)
+ else:
+ self.client.unpause(container=container.Id)
+ except Exception as exc:
+ self.fail("Error %s container %s: %s" % (
+ "pausing" if self.parameters.paused else "unpausing", container.Id, to_native(exc)
+ ))
+ container = self._get_container(container.Id)
+ self.results['changed'] = True
+ self.results['actions'].append(dict(set_paused=self.parameters.paused))
+
+ self.facts = container.raw
+
+ def absent(self):
+ container = self._get_container(self.parameters.name)
+ if container.exists:
+ if container.running:
+ self.diff_tracker.add('running', parameter=False, active=True)
+ self.container_stop(container.Id)
+ self.diff_tracker.add('exists', parameter=False, active=True)
+ self.container_remove(container.Id)
+
+ def fail(self, msg, **kwargs):
+ self.client.fail(msg, **kwargs)
+
+ def _output_logs(self, msg):
+ self.client.module.log(msg=msg)
+
+ def _get_container(self, container):
+ '''
+ Expects container ID or Name. Returns a container object
+ '''
+ return Container(self.client.get_container(container), self.parameters)
+
+ def _get_image(self):
+ if not self.parameters.image:
+ self.log('No image specified')
+ return None
+ if is_image_name_id(self.parameters.image):
+ image = self.client.find_image_by_id(self.parameters.image)
+ else:
+ repository, tag = utils.parse_repository_tag(self.parameters.image)
+ if not tag:
+ tag = "latest"
+ image = self.client.find_image(repository, tag)
+ if not image or self.parameters.pull:
+ if not self.check_mode:
+ self.log("Pull the image.")
+ image, alreadyToLatest = self.client.pull_image(repository, tag)
+ if alreadyToLatest:
+ self.results['changed'] = False
+ else:
+ self.results['changed'] = True
+ self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag)))
+ elif not image:
+ # If the image isn't there, claim we'll pull.
+ # (Implicitly: if the image is there, claim it already was latest.)
+ self.results['changed'] = True
+ self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag)))
+
+ self.log("image")
+ self.log(image, pretty_print=True)
+ return image
+
+ def _image_is_different(self, image, container):
+ if image and image.get('Id'):
+ if container and container.Image:
+ if image.get('Id') != container.Image:
+ self.diff_tracker.add('image', parameter=image.get('Id'), active=container.Image)
+ return True
+ return False
+
+ def update_limits(self, container):
+ limits_differ, different_limits = container.has_different_resource_limits()
+ if limits_differ:
+ self.log("limit differences:")
+ self.log(different_limits.get_legacy_docker_container_diffs(), pretty_print=True)
+ self.diff_tracker.merge(different_limits)
+ if limits_differ and not self.check_mode:
+ self.container_update(container.Id, self.parameters.update_parameters)
+ return self._get_container(container.Id)
+ return container
+
+ def update_networks(self, container, container_created):
+ updated_container = container
+ if self.parameters.comparisons['networks']['comparison'] != 'ignore' or container_created:
+ has_network_differences, network_differences = container.has_network_differences()
+ if has_network_differences:
+ if self.diff.get('differences'):
+ self.diff['differences'].append(dict(network_differences=network_differences))
+ else:
+ self.diff['differences'] = [dict(network_differences=network_differences)]
+ for netdiff in network_differences:
+ self.diff_tracker.add(
+ 'network.{0}'.format(netdiff['parameter']['name']),
+ parameter=netdiff['parameter'],
+ active=netdiff['container']
+ )
+ self.results['changed'] = True
+ updated_container = self._add_networks(container, network_differences)
+
+ if (self.parameters.comparisons['networks']['comparison'] == 'strict' and self.parameters.networks is not None) or self.parameters.purge_networks:
+ has_extra_networks, extra_networks = container.has_extra_networks()
+ if has_extra_networks:
+ if self.diff.get('differences'):
+ self.diff['differences'].append(dict(purge_networks=extra_networks))
+ else:
+ self.diff['differences'] = [dict(purge_networks=extra_networks)]
+ for extra_network in extra_networks:
+ self.diff_tracker.add(
+ 'network.{0}'.format(extra_network['name']),
+ active=extra_network
+ )
+ self.results['changed'] = True
+ updated_container = self._purge_networks(container, extra_networks)
+ return updated_container
+
+ def _add_networks(self, container, differences):
+ for diff in differences:
+ # remove the container from the network, if connected
+ if diff.get('container'):
+ self.results['actions'].append(dict(removed_from_network=diff['parameter']['name']))
+ if not self.check_mode:
+ try:
+ self.client.disconnect_container_from_network(container.Id, diff['parameter']['id'])
+ except Exception as exc:
+ self.fail("Error disconnecting container from network %s - %s" % (diff['parameter']['name'],
+ to_native(exc)))
+ # connect to the network
+ params = dict()
+ for para in ('ipv4_address', 'ipv6_address', 'links', 'aliases'):
+ if diff['parameter'].get(para):
+ params[para] = diff['parameter'][para]
+ self.results['actions'].append(dict(added_to_network=diff['parameter']['name'], network_parameters=params))
+ if not self.check_mode:
+ try:
+ self.log("Connecting container to network %s" % diff['parameter']['id'])
+ self.log(params, pretty_print=True)
+ self.client.connect_container_to_network(container.Id, diff['parameter']['id'], **params)
+ except Exception as exc:
+ self.fail("Error connecting container to network %s - %s" % (diff['parameter']['name'], to_native(exc)))
+ return self._get_container(container.Id)
+
+ def _purge_networks(self, container, networks):
+ for network in networks:
+ self.results['actions'].append(dict(removed_from_network=network['name']))
+ if not self.check_mode:
+ try:
+ self.client.disconnect_container_from_network(container.Id, network['name'])
+ except Exception as exc:
+ self.fail("Error disconnecting container from network %s - %s" % (network['name'],
+ to_native(exc)))
+ return self._get_container(container.Id)
+
+ def container_create(self, image, create_parameters):
+ self.log("create container")
+ self.log("image: %s parameters:" % image)
+ self.log(create_parameters, pretty_print=True)
+ self.results['actions'].append(dict(created="Created container", create_parameters=create_parameters))
+ self.results['changed'] = True
+ new_container = None
+ if not self.check_mode:
+ try:
+ new_container = self.client.create_container(image, **create_parameters)
+ self.client.report_warnings(new_container)
+ except Exception as exc:
+ self.fail("Error creating container: %s" % to_native(exc))
+ return self._get_container(new_container['Id'])
+ return new_container
+
+ def container_start(self, container_id):
+ self.log("start container %s" % (container_id))
+ self.results['actions'].append(dict(started=container_id))
+ self.results['changed'] = True
+ if not self.check_mode:
+ try:
+ self.client.start(container=container_id)
+ except Exception as exc:
+ self.fail("Error starting container %s: %s" % (container_id, to_native(exc)))
+
+ if self.parameters.detach is False:
+ if self.client.docker_py_version >= LooseVersion('3.0'):
+ status = self.client.wait(container_id)['StatusCode']
+ else:
+ status = self.client.wait(container_id)
+ if self.parameters.auto_remove:
+ output = "Cannot retrieve result as auto_remove is enabled"
+ if self.parameters.output_logs:
+ self.client.module.warn('Cannot output_logs if auto_remove is enabled!')
+ else:
+ config = self.client.inspect_container(container_id)
+ logging_driver = config['HostConfig']['LogConfig']['Type']
+
+ if logging_driver in ('json-file', 'journald'):
+ output = self.client.logs(container_id, stdout=True, stderr=True, stream=False, timestamps=False)
+ if self.parameters.output_logs:
+ self._output_logs(msg=output)
+ else:
+ output = "Result logged using `%s` driver" % logging_driver
+
+ if status != 0:
+ self.fail(output, status=status)
+ if self.parameters.cleanup:
+ self.container_remove(container_id, force=True)
+ insp = self._get_container(container_id)
+ if insp.raw:
+ insp.raw['Output'] = output
+ else:
+ insp.raw = dict(Output=output)
+ return insp
+ return self._get_container(container_id)
+
+ def container_remove(self, container_id, link=False, force=False):
+ volume_state = (not self.parameters.keep_volumes)
+ self.log("remove container container:%s v:%s link:%s force%s" % (container_id, volume_state, link, force))
+ self.results['actions'].append(dict(removed=container_id, volume_state=volume_state, link=link, force=force))
+ self.results['changed'] = True
+ response = None
+ if not self.check_mode:
+ count = 0
+ while True:
+ try:
+ response = self.client.remove_container(container_id, v=volume_state, link=link, force=force)
+ except NotFound as dummy:
+ pass
+ except APIError as exc:
+ if 'Unpause the container before stopping or killing' in exc.explanation:
+ # New docker daemon versions do not allow containers to be removed
+ # if they are paused. Make sure we don't end up in an infinite loop.
+ if count == 3:
+ self.fail("Error removing container %s (tried to unpause three times): %s" % (container_id, to_native(exc)))
+ count += 1
+ # Unpause
+ try:
+ self.client.unpause(container=container_id)
+ except Exception as exc2:
+ self.fail("Error unpausing container %s for removal: %s" % (container_id, to_native(exc2)))
+ # Now try again
+ continue
+ if 'removal of container ' in exc.explanation and ' is already in progress' in exc.explanation:
+ pass
+ else:
+ self.fail("Error removing container %s: %s" % (container_id, to_native(exc)))
+ except Exception as exc:
+ self.fail("Error removing container %s: %s" % (container_id, to_native(exc)))
+ # We only loop when explicitly requested by 'continue'
+ break
+ return response
+
+ def container_update(self, container_id, update_parameters):
+ if update_parameters:
+ self.log("update container %s" % (container_id))
+ self.log(update_parameters, pretty_print=True)
+ self.results['actions'].append(dict(updated=container_id, update_parameters=update_parameters))
+ self.results['changed'] = True
+ if not self.check_mode and callable(getattr(self.client, 'update_container')):
+ try:
+ result = self.client.update_container(container_id, **update_parameters)
+ self.client.report_warnings(result)
+ except Exception as exc:
+ self.fail("Error updating container %s: %s" % (container_id, to_native(exc)))
+ return self._get_container(container_id)
+
+ def container_kill(self, container_id):
+ self.results['actions'].append(dict(killed=container_id, signal=self.parameters.kill_signal))
+ self.results['changed'] = True
+ response = None
+ if not self.check_mode:
+ try:
+ if self.parameters.kill_signal:
+ response = self.client.kill(container_id, signal=self.parameters.kill_signal)
+ else:
+ response = self.client.kill(container_id)
+ except Exception as exc:
+ self.fail("Error killing container %s: %s" % (container_id, exc))
+ return response
+
+ def container_restart(self, container_id):
+ self.results['actions'].append(dict(restarted=container_id, timeout=self.parameters.stop_timeout))
+ self.results['changed'] = True
+ if not self.check_mode:
+ try:
+ if self.parameters.stop_timeout:
+ dummy = self.client.restart(container_id, timeout=self.parameters.stop_timeout)
+ else:
+ dummy = self.client.restart(container_id)
+ except Exception as exc:
+ self.fail("Error restarting container %s: %s" % (container_id, to_native(exc)))
+ return self._get_container(container_id)
+
+ def container_stop(self, container_id):
+ if self.parameters.force_kill:
+ self.container_kill(container_id)
+ return
+ self.results['actions'].append(dict(stopped=container_id, timeout=self.parameters.stop_timeout))
+ self.results['changed'] = True
+ response = None
+ if not self.check_mode:
+ count = 0
+ while True:
+ try:
+ if self.parameters.stop_timeout:
+ response = self.client.stop(container_id, timeout=self.parameters.stop_timeout)
+ else:
+ response = self.client.stop(container_id)
+ except APIError as exc:
+ if 'Unpause the container before stopping or killing' in exc.explanation:
+ # New docker daemon versions do not allow containers to be removed
+ # if they are paused. Make sure we don't end up in an infinite loop.
+ if count == 3:
+ self.fail("Error removing container %s (tried to unpause three times): %s" % (container_id, to_native(exc)))
+ count += 1
+ # Unpause
+ try:
+ self.client.unpause(container=container_id)
+ except Exception as exc2:
+ self.fail("Error unpausing container %s for removal: %s" % (container_id, to_native(exc2)))
+ # Now try again
+ continue
+ self.fail("Error stopping container %s: %s" % (container_id, to_native(exc)))
+ except Exception as exc:
+ self.fail("Error stopping container %s: %s" % (container_id, to_native(exc)))
+ # We only loop when explicitly requested by 'continue'
+ break
+ return response
+
+
+def detect_ipvX_address_usage(client):
+ '''
+ Helper function to detect whether any specified network uses ipv4_address or ipv6_address
+ '''
+ for network in client.module.params.get("networks") or []:
+ if network.get('ipv4_address') is not None or network.get('ipv6_address') is not None:
+ return True
+ return False
+
+
+class AnsibleDockerClientContainer(AnsibleDockerClient):
+ # A list of module options which are not docker container properties
+ __NON_CONTAINER_PROPERTY_OPTIONS = tuple([
+ 'env_file', 'force_kill', 'keep_volumes', 'ignore_image', 'name', 'pull', 'purge_networks',
+ 'recreate', 'restart', 'state', 'trust_image_content', 'networks', 'cleanup', 'kill_signal',
+ 'output_logs', 'paused', 'removal_wait_timeout'
+ ] + list(DOCKER_COMMON_ARGS.keys()))
+
+ def _parse_comparisons(self):
+ comparisons = {}
+ comp_aliases = {}
+ # Put in defaults
+ explicit_types = dict(
+ command='list',
+ devices='set(dict)',
+ device_requests='set(dict)',
+ dns_search_domains='list',
+ dns_servers='list',
+ env='set',
+ entrypoint='list',
+ etc_hosts='set',
+ mounts='set(dict)',
+ networks='set(dict)',
+ ulimits='set(dict)',
+ device_read_bps='set(dict)',
+ device_write_bps='set(dict)',
+ device_read_iops='set(dict)',
+ device_write_iops='set(dict)',
+ )
+ all_options = set() # this is for improving user feedback when a wrong option was specified for comparison
+ default_values = dict(
+ stop_timeout='ignore',
+ )
+ for option, data in self.module.argument_spec.items():
+ all_options.add(option)
+ for alias in data.get('aliases', []):
+ all_options.add(alias)
+ # Ignore options which aren't used as container properties
+ if option in self.__NON_CONTAINER_PROPERTY_OPTIONS and option != 'networks':
+ continue
+ # Determine option type
+ if option in explicit_types:
+ datatype = explicit_types[option]
+ elif data['type'] == 'list':
+ datatype = 'set'
+ elif data['type'] == 'dict':
+ datatype = 'dict'
+ else:
+ datatype = 'value'
+ # Determine comparison type
+ if option in default_values:
+ comparison = default_values[option]
+ elif datatype in ('list', 'value'):
+ comparison = 'strict'
+ else:
+ comparison = 'allow_more_present'
+ comparisons[option] = dict(type=datatype, comparison=comparison, name=option)
+ # Keep track of aliases
+ comp_aliases[option] = option
+ for alias in data.get('aliases', []):
+ comp_aliases[alias] = option
+ # Process legacy ignore options
+ if self.module.params['ignore_image']:
+ comparisons['image']['comparison'] = 'ignore'
+ if self.module.params['purge_networks']:
+ comparisons['networks']['comparison'] = 'strict'
+ # Process options
+ if self.module.params.get('comparisons'):
+ # If '*' appears in comparisons, process it first
+ if '*' in self.module.params['comparisons']:
+ value = self.module.params['comparisons']['*']
+ if value not in ('strict', 'ignore'):
+ self.fail("The wildcard can only be used with comparison modes 'strict' and 'ignore'!")
+ for option, v in comparisons.items():
+ if option == 'networks':
+ # `networks` is special: only update if
+ # some value is actually specified
+ if self.module.params['networks'] is None:
+ continue
+ v['comparison'] = value
+ # Now process all other comparisons.
+ comp_aliases_used = {}
+ for key, value in self.module.params['comparisons'].items():
+ if key == '*':
+ continue
+ # Find main key
+ key_main = comp_aliases.get(key)
+ if key_main is None:
+ if key_main in all_options:
+ self.fail("The module option '%s' cannot be specified in the comparisons dict, "
+ "since it does not correspond to container's state!" % key)
+ self.fail("Unknown module option '%s' in comparisons dict!" % key)
+ if key_main in comp_aliases_used:
+ self.fail("Both '%s' and '%s' (aliases of %s) are specified in comparisons dict!" % (key, comp_aliases_used[key_main], key_main))
+ comp_aliases_used[key_main] = key
+ # Check value and update accordingly
+ if value in ('strict', 'ignore'):
+ comparisons[key_main]['comparison'] = value
+ elif value == 'allow_more_present':
+ if comparisons[key_main]['type'] == 'value':
+ self.fail("Option '%s' is a value and not a set/list/dict, so its comparison cannot be %s" % (key, value))
+ comparisons[key_main]['comparison'] = value
+ else:
+ self.fail("Unknown comparison mode '%s'!" % value)
+ # Add implicit options
+ comparisons['publish_all_ports'] = dict(type='value', comparison='strict', name='published_ports')
+ comparisons['expected_ports'] = dict(type='dict', comparison=comparisons['published_ports']['comparison'], name='expected_ports')
+ comparisons['disable_healthcheck'] = dict(type='value',
+ comparison='ignore' if comparisons['healthcheck']['comparison'] == 'ignore' else 'strict',
+ name='disable_healthcheck')
+ # Check legacy values
+ if self.module.params['ignore_image'] and comparisons['image']['comparison'] != 'ignore':
+ self.module.warn('The ignore_image option has been overridden by the comparisons option!')
+ if self.module.params['purge_networks'] and comparisons['networks']['comparison'] != 'strict':
+ self.module.warn('The purge_networks option has been overridden by the comparisons option!')
+ self.comparisons = comparisons
+
+ def _get_additional_minimal_versions(self):
+ stop_timeout_supported = self.docker_api_version >= LooseVersion('1.25')
+ stop_timeout_needed_for_update = self.module.params.get("stop_timeout") is not None and self.module.params.get('state') != 'absent'
+ if stop_timeout_supported:
+ stop_timeout_supported = self.docker_py_version >= LooseVersion('2.1')
+ if stop_timeout_needed_for_update and not stop_timeout_supported:
+ # We warn (instead of fail) since in older versions, stop_timeout was not used
+ # to update the container's configuration, but only when stopping a container.
+ self.module.warn("Docker SDK for Python's version is %s. Minimum version required is 2.1 to update "
+ "the container's stop_timeout configuration. "
+ "If you use the 'docker-py' module, you have to switch to the 'docker' Python package." % (docker_version,))
+ else:
+ if stop_timeout_needed_for_update and not stop_timeout_supported:
+ # We warn (instead of fail) since in older versions, stop_timeout was not used
+ # to update the container's configuration, but only when stopping a container.
+ self.module.warn("Docker API version is %s. Minimum version required is 1.25 to set or "
+ "update the container's stop_timeout configuration." % (self.docker_api_version_str,))
+ self.option_minimal_versions['stop_timeout']['supported'] = stop_timeout_supported
+
+ def __init__(self, **kwargs):
+ option_minimal_versions = dict(
+ # internal options
+ log_config=dict(),
+ publish_all_ports=dict(),
+ ports=dict(),
+ volume_binds=dict(),
+ name=dict(),
+ # normal options
+ device_read_bps=dict(docker_py_version='1.9.0', docker_api_version='1.22'),
+ device_read_iops=dict(docker_py_version='1.9.0', docker_api_version='1.22'),
+ device_write_bps=dict(docker_py_version='1.9.0', docker_api_version='1.22'),
+ device_write_iops=dict(docker_py_version='1.9.0', docker_api_version='1.22'),
+ device_requests=dict(docker_py_version='4.3.0', docker_api_version='1.40'),
+ dns_opts=dict(docker_api_version='1.21', docker_py_version='1.10.0'),
+ ipc_mode=dict(docker_api_version='1.25'),
+ mac_address=dict(docker_api_version='1.25'),
+ oom_score_adj=dict(docker_api_version='1.22'),
+ shm_size=dict(docker_api_version='1.22'),
+ stop_signal=dict(docker_api_version='1.21'),
+ tmpfs=dict(docker_api_version='1.22'),
+ volume_driver=dict(docker_api_version='1.21'),
+ memory_reservation=dict(docker_api_version='1.21'),
+ kernel_memory=dict(docker_api_version='1.21'),
+ auto_remove=dict(docker_py_version='2.1.0', docker_api_version='1.25'),
+ healthcheck=dict(docker_py_version='2.0.0', docker_api_version='1.24'),
+ init=dict(docker_py_version='2.2.0', docker_api_version='1.25'),
+ runtime=dict(docker_py_version='2.4.0', docker_api_version='1.25'),
+ sysctls=dict(docker_py_version='1.10.0', docker_api_version='1.24'),
+ userns_mode=dict(docker_py_version='1.10.0', docker_api_version='1.23'),
+ uts=dict(docker_py_version='3.5.0', docker_api_version='1.25'),
+ pids_limit=dict(docker_py_version='1.10.0', docker_api_version='1.23'),
+ mounts=dict(docker_py_version='2.6.0', docker_api_version='1.25'),
+ cpus=dict(docker_py_version='2.3.0', docker_api_version='1.25'),
+ # specials
+ ipvX_address_supported=dict(docker_py_version='1.9.0', docker_api_version='1.22',
+ detect_usage=detect_ipvX_address_usage,
+ usage_msg='ipv4_address or ipv6_address in networks'),
+ stop_timeout=dict(), # see _get_additional_minimal_versions()
+ )
+
+ super(AnsibleDockerClientContainer, self).__init__(
+ option_minimal_versions=option_minimal_versions,
+ option_minimal_versions_ignore_params=self.__NON_CONTAINER_PROPERTY_OPTIONS,
+ **kwargs
+ )
+
+ self.image_inspect_source = 'Config'
+ if self.docker_api_version < LooseVersion('1.21'):
+ self.image_inspect_source = 'ContainerConfig'
+
+ self._get_additional_minimal_versions()
+ self._parse_comparisons()
+
+ if self.module.params['container_default_behavior'] is None:
+ self.module.params['container_default_behavior'] = 'compatibility'
+ self.module.deprecate(
+ 'The container_default_behavior option will change its default value from "compatibility" to '
+ '"no_defaults" in community.general 3.0.0. To remove this warning, please specify an explicit value for it now',
+ version='3.0.0', collection_name='community.general' # was Ansible 2.14
+ )
+ if self.module.params['container_default_behavior'] == 'compatibility':
+ old_default_values = dict(
+ auto_remove=False,
+ detach=True,
+ init=False,
+ interactive=False,
+ memory="0",
+ paused=False,
+ privileged=False,
+ read_only=False,
+ tty=False,
+ )
+ for param, value in old_default_values.items():
+ if self.module.params[param] is None:
+ self.module.params[param] = value
+
+
+def main():
+ argument_spec = dict(
+ auto_remove=dict(type='bool'),
+ blkio_weight=dict(type='int'),
+ capabilities=dict(type='list', elements='str'),
+ cap_drop=dict(type='list', elements='str'),
+ cleanup=dict(type='bool', default=False),
+ command=dict(type='raw'),
+ comparisons=dict(type='dict'),
+ container_default_behavior=dict(type='str', choices=['compatibility', 'no_defaults']),
+ cpu_period=dict(type='int'),
+ cpu_quota=dict(type='int'),
+ cpus=dict(type='float'),
+ cpuset_cpus=dict(type='str'),
+ cpuset_mems=dict(type='str'),
+ cpu_shares=dict(type='int'),
+ detach=dict(type='bool'),
+ devices=dict(type='list', elements='str'),
+ device_read_bps=dict(type='list', elements='dict', options=dict(
+ path=dict(required=True, type='str'),
+ rate=dict(required=True, type='str'),
+ )),
+ device_write_bps=dict(type='list', elements='dict', options=dict(
+ path=dict(required=True, type='str'),
+ rate=dict(required=True, type='str'),
+ )),
+ device_read_iops=dict(type='list', elements='dict', options=dict(
+ path=dict(required=True, type='str'),
+ rate=dict(required=True, type='int'),
+ )),
+ device_write_iops=dict(type='list', elements='dict', options=dict(
+ path=dict(required=True, type='str'),
+ rate=dict(required=True, type='int'),
+ )),
+ device_requests=dict(type='list', elements='dict', options=dict(
+ capabilities=dict(type='list', elements='list'),
+ count=dict(type='int'),
+ device_ids=dict(type='list', elements='str'),
+ driver=dict(type='str'),
+ options=dict(type='dict'),
+ )),
+ dns_servers=dict(type='list', elements='str'),
+ dns_opts=dict(type='list', elements='str'),
+ dns_search_domains=dict(type='list', elements='str'),
+ domainname=dict(type='str'),
+ entrypoint=dict(type='list', elements='str'),
+ env=dict(type='dict'),
+ env_file=dict(type='path'),
+ etc_hosts=dict(type='dict'),
+ exposed_ports=dict(type='list', elements='str', aliases=['exposed', 'expose']),
+ force_kill=dict(type='bool', default=False, aliases=['forcekill']),
+ groups=dict(type='list', elements='str'),
+ healthcheck=dict(type='dict', options=dict(
+ test=dict(type='raw'),
+ interval=dict(type='str'),
+ timeout=dict(type='str'),
+ start_period=dict(type='str'),
+ retries=dict(type='int'),
+ )),
+ hostname=dict(type='str'),
+ ignore_image=dict(type='bool', default=False),
+ image=dict(type='str'),
+ init=dict(type='bool'),
+ interactive=dict(type='bool'),
+ ipc_mode=dict(type='str'),
+ keep_volumes=dict(type='bool', default=True),
+ kernel_memory=dict(type='str'),
+ kill_signal=dict(type='str'),
+ labels=dict(type='dict'),
+ links=dict(type='list', elements='str'),
+ log_driver=dict(type='str'),
+ log_options=dict(type='dict', aliases=['log_opt']),
+ mac_address=dict(type='str'),
+ memory=dict(type='str'),
+ memory_reservation=dict(type='str'),
+ memory_swap=dict(type='str'),
+ memory_swappiness=dict(type='int'),
+ mounts=dict(type='list', elements='dict', options=dict(
+ target=dict(type='str', required=True),
+ source=dict(type='str'),
+ type=dict(type='str', choices=['bind', 'volume', 'tmpfs', 'npipe'], default='volume'),
+ read_only=dict(type='bool'),
+ consistency=dict(type='str', choices=['default', 'consistent', 'cached', 'delegated']),
+ propagation=dict(type='str', choices=['private', 'rprivate', 'shared', 'rshared', 'slave', 'rslave']),
+ no_copy=dict(type='bool'),
+ labels=dict(type='dict'),
+ volume_driver=dict(type='str'),
+ volume_options=dict(type='dict'),
+ tmpfs_size=dict(type='str'),
+ tmpfs_mode=dict(type='str'),
+ )),
+ name=dict(type='str', required=True),
+ network_mode=dict(type='str'),
+ networks=dict(type='list', elements='dict', options=dict(
+ name=dict(type='str', required=True),
+ ipv4_address=dict(type='str'),
+ ipv6_address=dict(type='str'),
+ aliases=dict(type='list', elements='str'),
+ links=dict(type='list', elements='str'),
+ )),
+ networks_cli_compatible=dict(type='bool'),
+ oom_killer=dict(type='bool'),
+ oom_score_adj=dict(type='int'),
+ output_logs=dict(type='bool', default=False),
+ paused=dict(type='bool'),
+ pid_mode=dict(type='str'),
+ pids_limit=dict(type='int'),
+ privileged=dict(type='bool'),
+ published_ports=dict(type='list', elements='str', aliases=['ports']),
+ pull=dict(type='bool', default=False),
+ purge_networks=dict(type='bool', default=False),
+ read_only=dict(type='bool'),
+ recreate=dict(type='bool', default=False),
+ removal_wait_timeout=dict(type='float'),
+ restart=dict(type='bool', default=False),
+ restart_policy=dict(type='str', choices=['no', 'on-failure', 'always', 'unless-stopped']),
+ restart_retries=dict(type='int'),
+ runtime=dict(type='str'),
+ security_opts=dict(type='list', elements='str'),
+ shm_size=dict(type='str'),
+ state=dict(type='str', default='started', choices=['absent', 'present', 'started', 'stopped']),
+ stop_signal=dict(type='str'),
+ stop_timeout=dict(type='int'),
+ sysctls=dict(type='dict'),
+ tmpfs=dict(type='list', elements='str'),
+ trust_image_content=dict(type='bool', default=False, removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ tty=dict(type='bool'),
+ ulimits=dict(type='list', elements='str'),
+ user=dict(type='str'),
+ userns_mode=dict(type='str'),
+ uts=dict(type='str'),
+ volume_driver=dict(type='str'),
+ volumes=dict(type='list', elements='str'),
+ volumes_from=dict(type='list', elements='str'),
+ working_dir=dict(type='str'),
+ )
+
+ required_if = [
+ ('state', 'present', ['image'])
+ ]
+
+ client = AnsibleDockerClientContainer(
+ argument_spec=argument_spec,
+ required_if=required_if,
+ supports_check_mode=True,
+ min_docker_api_version='1.20',
+ )
+ if client.module.params['networks_cli_compatible'] is None and client.module.params['networks']:
+ client.module.deprecate(
+ 'Please note that docker_container handles networks slightly different than docker CLI. '
+ 'If you specify networks, the default network will still be attached as the first network. '
+ '(You can specify purge_networks to remove all networks not explicitly listed.) '
+ 'This behavior will change in community.general 2.0.0. You can change the behavior now by setting '
+ 'the new `networks_cli_compatible` option to `yes`, and remove this warning by setting '
+ 'it to `no`',
+ version='2.0.0', collection_name='community.general', # was Ansible 2.12
+ )
+ if client.module.params['networks_cli_compatible'] is True and client.module.params['networks'] and client.module.params['network_mode'] is None:
+ client.module.deprecate(
+ 'Please note that the default value for `network_mode` will change from not specified '
+ '(which is equal to `default`) to the name of the first network in `networks` if '
+ '`networks` has at least one entry and `networks_cli_compatible` is `true`. You can '
+ 'change the behavior now by explicitly setting `network_mode` to the name of the first '
+ 'network in `networks`, and remove this warning by setting `network_mode` to `default`. '
+ 'Please make sure that the value you set to `network_mode` equals the inspection result '
+ 'for existing containers, otherwise the module will recreate them. You can find out the '
+ 'correct value by running "docker inspect --format \'{{.HostConfig.NetworkMode}}\' <container_name>"',
+ version='3.0.0', collection_name='community.general', # was Ansible 2.14
+ )
+
+ try:
+ cm = ContainerManager(client)
+ client.module.exit_json(**sanitize_result(cm.results))
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_container_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_container_info.py
new file mode 100644
index 00000000..80025067
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_container_info.py
@@ -0,0 +1,145 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_container_info
+
+short_description: Retrieves facts about docker container
+
+description:
+ - Retrieves facts about a docker container.
+ - Essentially returns the output of C(docker inspect <name>), similar to what M(community.general.docker_container)
+ returns for a non-absent container.
+
+
+options:
+ name:
+ description:
+ - The name of the container to inspect.
+ - When identifying an existing container name may be a name or a long or short container ID.
+ type: str
+ required: yes
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+author:
+ - "Felix Fontein (@felixfontein)"
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "Docker API >= 1.20"
+'''
+
+EXAMPLES = '''
+- name: Get infos on container
+ community.general.docker_container_info:
+ name: mydata
+ register: result
+
+- name: Does container exist?
+ ansible.builtin.debug:
+ msg: "The container {{ 'exists' if result.exists else 'does not exist' }}"
+
+- name: Print information about container
+ ansible.builtin.debug:
+ var: result.container
+ when: result.exists
+'''
+
+RETURN = '''
+exists:
+ description:
+ - Returns whether the container exists.
+ type: bool
+ returned: always
+ sample: true
+container:
+ description:
+ - Facts representing the current state of the container. Matches the docker inspection output.
+ - Will be C(none) if container does not exist.
+ returned: always
+ type: dict
+ sample: '{
+ "AppArmorProfile": "",
+ "Args": [],
+ "Config": {
+ "AttachStderr": false,
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "Cmd": [
+ "/usr/bin/supervisord"
+ ],
+ "Domainname": "",
+ "Entrypoint": null,
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "ExposedPorts": {
+ "443/tcp": {},
+ "80/tcp": {}
+ },
+ "Hostname": "8e47bf643eb9",
+ "Image": "lnmp_nginx:v1",
+ "Labels": {},
+ "OnBuild": null,
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Tty": false,
+ "User": "",
+ "Volumes": {
+ "/tmp/lnmp/nginx-sites/logs/": {}
+ },
+ ...
+ }'
+'''
+
+import traceback
+
+try:
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ RequestException,
+)
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_api_version='1.20',
+ )
+
+ try:
+ container = client.get_container(client.module.params['name'])
+
+ client.module.exit_json(
+ changed=False,
+ exists=(True if container else False),
+ container=container,
+ )
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_host_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_host_info.py
new file mode 100644
index 00000000..674f8ad0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_host_info.py
@@ -0,0 +1,343 @@
+#!/usr/bin/python
+#
+# (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_host_info
+
+short_description: Retrieves facts about docker host and lists of objects of the services.
+
+description:
+ - Retrieves facts about a docker host.
+ - Essentially returns the output of C(docker system info).
+ - The module also allows to list object names for containers, images, networks and volumes.
+ It also allows to query information on disk usage.
+ - The output differs depending on API version of the docker daemon.
+ - If the docker daemon cannot be contacted or does not meet the API version requirements,
+ the module will fail.
+
+
+options:
+ containers:
+ description:
+ - Whether to list containers.
+ type: bool
+ default: no
+ containers_filters:
+ description:
+ - A dictionary of filter values used for selecting containers to list.
+ - "For example, C(until: 24h)."
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/container_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ images:
+ description:
+ - Whether to list images.
+ type: bool
+ default: no
+ images_filters:
+ description:
+ - A dictionary of filter values used for selecting images to list.
+ - "For example, C(dangling: true)."
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/image_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ networks:
+ description:
+ - Whether to list networks.
+ type: bool
+ default: no
+ networks_filters:
+ description:
+ - A dictionary of filter values used for selecting networks to list.
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/network_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ volumes:
+ description:
+ - Whether to list volumes.
+ type: bool
+ default: no
+ volumes_filters:
+ description:
+ - A dictionary of filter values used for selecting volumes to list.
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/volume_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ disk_usage:
+ description:
+ - Summary information on used disk space by all Docker layers.
+ - The output is a sum of images, volumes, containers and build cache.
+ type: bool
+ default: no
+ verbose_output:
+ description:
+ - When set to C(yes) and I(networks), I(volumes), I(images), I(containers) or I(disk_usage) is set to C(yes)
+ then output will contain verbose information about objects matching the full output of API method.
+ For details see the documentation of your version of Docker API at L(https://docs.docker.com/engine/api/).
+ - The verbose output in this module contains only subset of information returned by I(_info) module
+ for each type of the objects.
+ type: bool
+ default: no
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+author:
+ - Piotr Wojciechowski (@WojciechowskiPiotr)
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "Docker API >= 1.21"
+'''
+
+EXAMPLES = '''
+- name: Get info on docker host
+ community.general.docker_host_info:
+ register: result
+
+- name: Get info on docker host and list images
+ community.general.docker_host_info:
+ images: yes
+ register: result
+
+- name: Get info on docker host and list images matching the filter
+ community.general.docker_host_info:
+ images: yes
+ images_filters:
+ label: "mylabel"
+ register: result
+
+- name: Get info on docker host and verbose list images
+ community.general.docker_host_info:
+ images: yes
+ verbose_output: yes
+ register: result
+
+- name: Get info on docker host and used disk space
+ community.general.docker_host_info:
+ disk_usage: yes
+ register: result
+
+- ansible.builtin.debug:
+ var: result.host_info
+
+'''
+
+RETURN = '''
+can_talk_to_docker:
+ description:
+ - Will be C(true) if the module can talk to the docker daemon.
+ returned: both on success and on error
+ type: bool
+
+host_info:
+ description:
+ - Facts representing the basic state of the docker host. Matches the C(docker system info) output.
+ returned: always
+ type: dict
+volumes:
+ description:
+ - List of dict objects containing the basic information about each volume.
+ Keys matches the C(docker volume ls) output unless I(verbose_output=yes).
+ See description for I(verbose_output).
+ returned: When I(volumes) is C(yes)
+ type: list
+ elements: dict
+networks:
+ description:
+ - List of dict objects containing the basic information about each network.
+ Keys matches the C(docker network ls) output unless I(verbose_output=yes).
+ See description for I(verbose_output).
+ returned: When I(networks) is C(yes)
+ type: list
+ elements: dict
+containers:
+ description:
+ - List of dict objects containing the basic information about each container.
+ Keys matches the C(docker container ls) output unless I(verbose_output=yes).
+ See description for I(verbose_output).
+ returned: When I(containers) is C(yes)
+ type: list
+ elements: dict
+images:
+ description:
+ - List of dict objects containing the basic information about each image.
+ Keys matches the C(docker image ls) output unless I(verbose_output=yes).
+ See description for I(verbose_output).
+ returned: When I(images) is C(yes)
+ type: list
+ elements: dict
+disk_usage:
+ description:
+ - Information on summary disk usage by images, containers and volumes on docker host
+ unless I(verbose_output=yes). See description for I(verbose_output).
+ returned: When I(disk_usage) is C(yes)
+ type: dict
+
+'''
+
+import traceback
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ DockerBaseClass,
+ RequestException,
+)
+from ansible.module_utils._text import to_native
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # Missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import clean_dict_booleans_for_docker_api
+
+
+class DockerHostManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(DockerHostManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.verbose_output = self.client.module.params['verbose_output']
+
+ listed_objects = ['volumes', 'networks', 'containers', 'images']
+
+ self.results['host_info'] = self.get_docker_host_info()
+
+ if self.client.module.params['disk_usage']:
+ self.results['disk_usage'] = self.get_docker_disk_usage_facts()
+
+ for docker_object in listed_objects:
+ if self.client.module.params[docker_object]:
+ returned_name = docker_object
+ filter_name = docker_object + "_filters"
+ filters = clean_dict_booleans_for_docker_api(client.module.params.get(filter_name))
+ self.results[returned_name] = self.get_docker_items_list(docker_object, filters)
+
+ def get_docker_host_info(self):
+ try:
+ return self.client.info()
+ except APIError as exc:
+ self.client.fail("Error inspecting docker host: %s" % to_native(exc))
+
+ def get_docker_disk_usage_facts(self):
+ try:
+ if self.verbose_output:
+ return self.client.df()
+ else:
+ return dict(LayersSize=self.client.df()['LayersSize'])
+ except APIError as exc:
+ self.client.fail("Error inspecting docker host: %s" % to_native(exc))
+
+ def get_docker_items_list(self, docker_object=None, filters=None, verbose=False):
+ items = None
+ items_list = []
+
+ header_containers = ['Id', 'Image', 'Command', 'Created', 'Status', 'Ports', 'Names']
+ header_volumes = ['Driver', 'Name']
+ header_images = ['Id', 'RepoTags', 'Created', 'Size']
+ header_networks = ['Id', 'Driver', 'Name', 'Scope']
+
+ filter_arg = dict()
+ if filters:
+ filter_arg['filters'] = filters
+ try:
+ if docker_object == 'containers':
+ items = self.client.containers(**filter_arg)
+ elif docker_object == 'networks':
+ items = self.client.networks(**filter_arg)
+ elif docker_object == 'images':
+ items = self.client.images(**filter_arg)
+ elif docker_object == 'volumes':
+ items = self.client.volumes(**filter_arg)
+ except APIError as exc:
+ self.client.fail("Error inspecting docker host for object '%s': %s" %
+ (docker_object, to_native(exc)))
+
+ if self.verbose_output:
+ if docker_object != 'volumes':
+ return items
+ else:
+ return items['Volumes']
+
+ if docker_object == 'volumes':
+ items = items['Volumes']
+
+ for item in items:
+ item_record = dict()
+
+ if docker_object == 'containers':
+ for key in header_containers:
+ item_record[key] = item.get(key)
+ elif docker_object == 'networks':
+ for key in header_networks:
+ item_record[key] = item.get(key)
+ elif docker_object == 'images':
+ for key in header_images:
+ item_record[key] = item.get(key)
+ elif docker_object == 'volumes':
+ for key in header_volumes:
+ item_record[key] = item.get(key)
+ items_list.append(item_record)
+
+ return items_list
+
+
+def main():
+ argument_spec = dict(
+ containers=dict(type='bool', default=False),
+ containers_filters=dict(type='dict'),
+ images=dict(type='bool', default=False),
+ images_filters=dict(type='dict'),
+ networks=dict(type='bool', default=False),
+ networks_filters=dict(type='dict'),
+ volumes=dict(type='bool', default=False),
+ volumes_filters=dict(type='dict'),
+ disk_usage=dict(type='bool', default=False),
+ verbose_output=dict(type='bool', default=False),
+ )
+
+ option_minimal_versions = dict(
+ network_filters=dict(docker_py_version='2.0.2'),
+ disk_usage=dict(docker_py_version='2.2.0'),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_version='1.10.0',
+ min_docker_api_version='1.21',
+ option_minimal_versions=option_minimal_versions,
+ fail_results=dict(
+ can_talk_to_docker=False,
+ ),
+ )
+ client.fail_results['can_talk_to_docker'] = True
+
+ try:
+ results = dict(
+ changed=False,
+ )
+
+ DockerHostManager(client, results)
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_image.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_image.py
new file mode 100644
index 00000000..1e2976be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_image.py
@@ -0,0 +1,1021 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_image
+
+short_description: Manage docker images.
+
+
+description:
+ - Build, load or pull an image, making the image available for creating containers. Also supports tagging an
+ image into a repository and archiving an image to a .tar file.
+ - Since Ansible 2.8, it is recommended to explicitly specify the image's source (I(source) can be C(build),
+ C(load), C(pull) or C(local)). This will be required from community.general 2.0.0 on.
+
+options:
+ source:
+ description:
+ - "Determines where the module will try to retrieve the image from."
+ - "Use C(build) to build the image from a C(Dockerfile). I(build.path) must
+ be specified when this value is used."
+ - "Use C(load) to load the image from a C(.tar) file. I(load_path) must
+ be specified when this value is used."
+ - "Use C(pull) to pull the image from a registry."
+ - "Use C(local) to make sure that the image is already available on the local
+ docker daemon, i.e. do not try to build, pull or load the image."
+ - "Before community.general 2.0.0, the value of this option will be auto-detected
+ to be backwards compatible, but a warning will be issued if it is not
+ explicitly specified. From community.general 2.0.0 on, auto-detection will be disabled
+ and this option will be made mandatory."
+ type: str
+ choices:
+ - build
+ - load
+ - pull
+ - local
+ build:
+ description:
+ - "Specifies options used for building images."
+ type: dict
+ suboptions:
+ cache_from:
+ description:
+ - List of image names to consider as cache source.
+ type: list
+ elements: str
+ dockerfile:
+ description:
+ - Use with state C(present) and source C(build) to provide an alternate name for the Dockerfile to use when building an image.
+ - This can also include a relative path (relative to I(path)).
+ type: str
+ http_timeout:
+ description:
+ - Timeout for HTTP requests during the image build operation. Provide a positive integer value for the number of
+ seconds.
+ type: int
+ path:
+ description:
+ - Use with state 'present' to build an image. Will be the path to a directory containing the context and
+ Dockerfile for building an image.
+ type: path
+ required: yes
+ pull:
+ description:
+ - When building an image downloads any updates to the FROM image in Dockerfile.
+ - The default is currently C(yes). This will change to C(no) in community.general 2.0.0.
+ type: bool
+ rm:
+ description:
+ - Remove intermediate containers after build.
+ type: bool
+ default: yes
+ network:
+ description:
+ - The network to use for C(RUN) build instructions.
+ type: str
+ nocache:
+ description:
+ - Do not use cache when building an image.
+ type: bool
+ default: no
+ etc_hosts:
+ description:
+ - Extra hosts to add to C(/etc/hosts) in building containers, as a mapping of hostname to IP address.
+ type: dict
+ args:
+ description:
+ - Provide a dictionary of C(key:value) build arguments that map to Dockerfile ARG directive.
+ - Docker expects the value to be a string. For convenience any non-string values will be converted to strings.
+ - Requires Docker API >= 1.21.
+ type: dict
+ container_limits:
+ description:
+ - A dictionary of limits applied to each container created by the build process.
+ type: dict
+ suboptions:
+ memory:
+ description:
+ - Set memory limit for build.
+ type: int
+ memswap:
+ description:
+ - Total memory (memory + swap), -1 to disable swap.
+ type: int
+ cpushares:
+ description:
+ - CPU shares (relative weight).
+ type: int
+ cpusetcpus:
+ description:
+ - CPUs in which to allow execution, e.g., "0-3", "0,1".
+ type: str
+ use_config_proxy:
+ description:
+ - If set to C(yes) and a proxy configuration is specified in the docker client configuration
+ (by default C($HOME/.docker/config.json)), the corresponding environment variables will
+ be set in the container being built.
+ - Needs Docker SDK for Python >= 3.7.0.
+ type: bool
+ target:
+ description:
+ - When building an image specifies an intermediate build stage by
+ name as a final stage for the resulting image.
+ type: str
+ archive_path:
+ description:
+ - Use with state C(present) to archive an image to a .tar file.
+ type: path
+ load_path:
+ description:
+ - Use with state C(present) to load an image from a .tar file.
+ - Set I(source) to C(load) if you want to load the image. The option will
+ be set automatically before community.general 2.0.0 if this option is used (except
+ if I(path) is specified as well, in which case building will take precedence).
+ From community.general 2.0.0 on, you have to set I(source) to C(load).
+ type: path
+ dockerfile:
+ description:
+ - Use with state C(present) and source C(build) to provide an alternate name for the Dockerfile to use when building an image.
+ - This can also include a relative path (relative to I(path)).
+ - Please use I(build.dockerfile) instead. This option will be removed in community.general 2.0.0.
+ type: str
+ force:
+ description:
+ - Use with state I(absent) to un-tag and remove all images matching the specified name. Use with state
+ C(present) to build, load or pull an image when the image already exists. Also use with state C(present)
+ to force tagging an image.
+ - Please stop using this option, and use the more specialized force options
+ I(force_source), I(force_absent) and I(force_tag) instead.
+ - This option will be removed in community.general 2.0.0.
+ type: bool
+ force_source:
+ description:
+ - Use with state C(present) to build, load or pull an image (depending on the
+ value of the I(source) option) when the image already exists.
+ type: bool
+ default: false
+ force_absent:
+ description:
+ - Use with state I(absent) to un-tag and remove all images matching the specified name.
+ type: bool
+ default: false
+ force_tag:
+ description:
+ - Use with state C(present) to force tagging an image.
+ type: bool
+ default: false
+ http_timeout:
+ description:
+ - Timeout for HTTP requests during the image build operation. Provide a positive integer value for the number of
+ seconds.
+ - Please use I(build.http_timeout) instead. This option will be removed in community.general 2.0.0.
+ type: int
+ name:
+ description:
+ - "Image name. Name format will be one of: name, repository/name, registry_server:port/name.
+ When pushing or pulling an image the name can optionally include the tag by appending ':tag_name'."
+ - Note that image IDs (hashes) are not supported.
+ type: str
+ required: yes
+ path:
+ description:
+ - Use with state 'present' to build an image. Will be the path to a directory containing the context and
+ Dockerfile for building an image.
+ - Set I(source) to C(build) if you want to build the image. The option will
+ be set automatically before community.general 2.0.0 if this option is used. From community.general 2.0.0
+ on, you have to set I(source) to C(build).
+ - Please use I(build.path) instead. This option will be removed in community.general 2.0.0.
+ type: path
+ aliases:
+ - build_path
+ pull:
+ description:
+ - When building an image downloads any updates to the FROM image in Dockerfile.
+ - Please use I(build.pull) instead. This option will be removed in community.general 2.0.0.
+ - The default is currently C(yes). This will change to C(no) in community.general 2.0.0.
+ type: bool
+ push:
+ description:
+ - Push the image to the registry. Specify the registry as part of the I(name) or I(repository) parameter.
+ type: bool
+ default: no
+ rm:
+ description:
+ - Remove intermediate containers after build.
+ - Please use I(build.rm) instead. This option will be removed in community.general 2.0.0.
+ type: bool
+ default: yes
+ nocache:
+ description:
+ - Do not use cache when building an image.
+ - Please use I(build.nocache) instead. This option will be removed in community.general 2.0.0.
+ type: bool
+ default: no
+ repository:
+ description:
+ - Full path to a repository. Use with state C(present) to tag the image into the repository. Expects
+ format I(repository:tag). If no tag is provided, will use the value of the C(tag) parameter or I(latest).
+ type: str
+ state:
+ description:
+ - Make assertions about the state of an image.
+ - When C(absent) an image will be removed. Use the force option to un-tag and remove all images
+ matching the provided name.
+ - When C(present) check if an image exists using the provided name and tag. If the image is not found or the
+ force option is used, the image will either be pulled, built or loaded, depending on the I(source) option.
+ - By default the image will be pulled from Docker Hub, or the registry specified in the image's name. Note that
+ this will change in community.general 2.0.0, so to make sure that you are pulling, set I(source) to C(pull). To build
+ the image, provide a I(path) value set to a directory containing a context and Dockerfile, and set I(source)
+ to C(build). To load an image, specify I(load_path) to provide a path to an archive file. To tag an image to
+ a repository, provide a I(repository) path. If the name contains a repository path, it will be pushed.
+ - "*Note:* C(state=build) is DEPRECATED and will be removed in community.general 2.0.0. Specifying C(build) will behave the
+ same as C(present)."
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+ - build
+ tag:
+ description:
+ - Used to select an image when pulling. Will be added to the image when pushing, tagging or building. Defaults to
+ I(latest).
+ - If I(name) parameter format is I(name:tag), then tag value from I(name) will take precedence.
+ type: str
+ default: latest
+ buildargs:
+ description:
+ - Provide a dictionary of C(key:value) build arguments that map to Dockerfile ARG directive.
+ - Docker expects the value to be a string. For convenience any non-string values will be converted to strings.
+ - Requires Docker API >= 1.21.
+ - Please use I(build.args) instead. This option will be removed in community.general 2.0.0.
+ type: dict
+ container_limits:
+ description:
+ - A dictionary of limits applied to each container created by the build process.
+ - Please use I(build.container_limits) instead. This option will be removed in community.general 2.0.0.
+ type: dict
+ suboptions:
+ memory:
+ description:
+ - Set memory limit for build.
+ type: int
+ memswap:
+ description:
+ - Total memory (memory + swap), -1 to disable swap.
+ type: int
+ cpushares:
+ description:
+ - CPU shares (relative weight).
+ type: int
+ cpusetcpus:
+ description:
+ - CPUs in which to allow execution, e.g., "0-3", "0,1".
+ type: str
+ use_tls:
+ description:
+ - "DEPRECATED. Whether to use tls to connect to the docker daemon. Set to
+ C(encrypt) to use TLS. And set to C(verify) to use TLS and verify that
+ the server's certificate is valid for the server."
+ - "*Note:* If you specify this option, it will set the value of the I(tls) or
+ I(validate_certs) parameters if not set to C(no)."
+ - Will be removed in community.general 2.0.0.
+ type: str
+ choices:
+ - 'no'
+ - 'encrypt'
+ - 'verify'
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "Docker API >= 1.20"
+
+author:
+ - Pavel Antonov (@softzilla)
+ - Chris Houseknecht (@chouseknecht)
+ - Sorin Sbarnea (@ssbarnea)
+
+'''
+
+EXAMPLES = '''
+
+- name: Pull an image
+ community.general.docker_image:
+ name: pacur/centos-7
+ source: pull
+
+- name: Tag and push to docker hub
+ community.general.docker_image:
+ name: pacur/centos-7:56
+ repository: dcoppenhagan/myimage:7.56
+ push: yes
+ source: local
+
+- name: Tag and push to local registry
+ community.general.docker_image:
+ # Image will be centos:7
+ name: centos
+ # Will be pushed to localhost:5000/centos:7
+ repository: localhost:5000/centos
+ tag: 7
+ push: yes
+ source: local
+
+- name: Add tag latest to image
+ community.general.docker_image:
+ name: myimage:7.1.2
+ repository: myimage:latest
+ # As 'latest' usually already is present, we need to enable overwriting of existing tags:
+ force_tag: yes
+ source: local
+
+- name: Remove image
+ community.general.docker_image:
+ state: absent
+ name: registry.ansible.com/chouseknecht/sinatra
+ tag: v1
+
+- name: Build an image and push it to a private repo
+ community.general.docker_image:
+ build:
+ path: ./sinatra
+ name: registry.ansible.com/chouseknecht/sinatra
+ tag: v1
+ push: yes
+ source: build
+
+- name: Archive image
+ community.general.docker_image:
+ name: registry.ansible.com/chouseknecht/sinatra
+ tag: v1
+ archive_path: my_sinatra.tar
+ source: local
+
+- name: Load image from archive and push to a private registry
+ community.general.docker_image:
+ name: localhost:5000/myimages/sinatra
+ tag: v1
+ push: yes
+ load_path: my_sinatra.tar
+ source: load
+
+- name: Build image and with build args
+ community.general.docker_image:
+ name: myimage
+ build:
+ path: /path/to/build/dir
+ args:
+ log_volume: /var/log/myapp
+ listen_port: 8080
+ source: build
+
+- name: Build image using cache source
+ community.general.docker_image:
+ name: myimage:latest
+ build:
+ path: /path/to/build/dir
+ # Use as cache source for building myimage
+ cache_from:
+ - nginx:latest
+ - alpine:3.8
+ source: build
+'''
+
+RETURN = '''
+image:
+ description: Image inspection results for the affected image.
+ returned: success
+ type: dict
+ sample: {}
+stdout:
+ description: Docker build output when building an image.
+ returned: success
+ type: str
+ sample: ""
+ version_added: 1.3.0
+'''
+
+import errno
+import os
+import re
+import traceback
+
+from distutils.version import LooseVersion
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ clean_dict_booleans_for_docker_api,
+ docker_version,
+ AnsibleDockerClient,
+ DockerBaseClass,
+ is_image_name_id,
+ is_valid_tag,
+ RequestException,
+)
+from ansible.module_utils._text import to_native
+
+if docker_version is not None:
+ try:
+ if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
+ from docker.auth import resolve_repository_name
+ else:
+ from docker.auth.auth import resolve_repository_name
+ from docker.utils.utils import parse_repository_tag
+ from docker.errors import DockerException
+ except ImportError:
+ # missing Docker SDK for Python handled in module_utils.docker.common
+ pass
+
+
+class ImageManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(ImageManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ parameters = self.client.module.params
+ self.check_mode = self.client.check_mode
+
+ self.source = parameters['source']
+ build = parameters['build'] or dict()
+ self.archive_path = parameters.get('archive_path')
+ self.cache_from = build.get('cache_from')
+ self.container_limits = build.get('container_limits')
+ self.dockerfile = build.get('dockerfile')
+ self.force_source = parameters.get('force_source')
+ self.force_absent = parameters.get('force_absent')
+ self.force_tag = parameters.get('force_tag')
+ self.load_path = parameters.get('load_path')
+ self.name = parameters.get('name')
+ self.network = build.get('network')
+ self.extra_hosts = clean_dict_booleans_for_docker_api(build.get('etc_hosts'))
+ self.nocache = build.get('nocache', False)
+ self.build_path = build.get('path')
+ self.pull = build.get('pull')
+ self.target = build.get('target')
+ self.repository = parameters.get('repository')
+ self.rm = build.get('rm', True)
+ self.state = parameters.get('state')
+ self.tag = parameters.get('tag')
+ self.http_timeout = build.get('http_timeout')
+ self.push = parameters.get('push')
+ self.buildargs = build.get('args')
+ self.use_config_proxy = build.get('use_config_proxy')
+
+ # If name contains a tag, it takes precedence over tag parameter.
+ if not is_image_name_id(self.name):
+ repo, repo_tag = parse_repository_tag(self.name)
+ if repo_tag:
+ self.name = repo
+ self.tag = repo_tag
+
+ if self.state == 'present':
+ self.present()
+ elif self.state == 'absent':
+ self.absent()
+
+ def fail(self, msg):
+ self.client.fail(msg)
+
+ def present(self):
+ '''
+ Handles state = 'present', which includes building, loading or pulling an image,
+ depending on user provided parameters.
+
+ :returns None
+ '''
+ image = self.client.find_image(name=self.name, tag=self.tag)
+
+ if not image or self.force_source:
+ if self.source == 'build':
+ # Build the image
+ if not os.path.isdir(self.build_path):
+ self.fail("Requested build path %s could not be found or you do not have access." % self.build_path)
+ image_name = self.name
+ if self.tag:
+ image_name = "%s:%s" % (self.name, self.tag)
+ self.log("Building image %s" % image_name)
+ self.results['actions'].append("Built image %s from %s" % (image_name, self.build_path))
+ self.results['changed'] = True
+ if not self.check_mode:
+ self.results.update(self.build_image())
+
+ elif self.source == 'load':
+ # Load the image from an archive
+ if not os.path.isfile(self.load_path):
+ self.fail("Error loading image %s. Specified path %s does not exist." % (self.name,
+ self.load_path))
+ image_name = self.name
+ if self.tag:
+ image_name = "%s:%s" % (self.name, self.tag)
+ self.results['actions'].append("Loaded image %s from %s" % (image_name, self.load_path))
+ self.results['changed'] = True
+ if not self.check_mode:
+ self.results['image'] = self.load_image()
+ elif self.source == 'pull':
+ # pull the image
+ self.results['actions'].append('Pulled image %s:%s' % (self.name, self.tag))
+ self.results['changed'] = True
+ if not self.check_mode:
+ self.results['image'], dummy = self.client.pull_image(self.name, tag=self.tag)
+ elif self.source == 'local':
+ if image is None:
+ name = self.name
+ if self.tag:
+ name = "%s:%s" % (self.name, self.tag)
+ self.client.fail('Cannot find the image %s locally.' % name)
+ if not self.check_mode and image and image['Id'] == self.results['image']['Id']:
+ self.results['changed'] = False
+
+ if self.archive_path:
+ self.archive_image(self.name, self.tag)
+
+ if self.push and not self.repository:
+ self.push_image(self.name, self.tag)
+ elif self.repository:
+ self.tag_image(self.name, self.tag, self.repository, push=self.push)
+
+ def absent(self):
+ '''
+ Handles state = 'absent', which removes an image.
+
+ :return None
+ '''
+ name = self.name
+ if is_image_name_id(name):
+ image = self.client.find_image_by_id(name)
+ else:
+ image = self.client.find_image(name, self.tag)
+ if self.tag:
+ name = "%s:%s" % (self.name, self.tag)
+ if image:
+ if not self.check_mode:
+ try:
+ self.client.remove_image(name, force=self.force_absent)
+ except Exception as exc:
+ self.fail("Error removing image %s - %s" % (name, str(exc)))
+
+ self.results['changed'] = True
+ self.results['actions'].append("Removed image %s" % (name))
+ self.results['image']['state'] = 'Deleted'
+
+ def archive_image(self, name, tag):
+ '''
+ Archive an image to a .tar file. Called when archive_path is passed.
+
+ :param name - name of the image. Type: str
+ :return None
+ '''
+
+ if not tag:
+ tag = "latest"
+
+ image = self.client.find_image(name=name, tag=tag)
+ if not image:
+ self.log("archive image: image %s:%s not found" % (name, tag))
+ return
+
+ image_name = "%s:%s" % (name, tag)
+ self.results['actions'].append('Archived image %s to %s' % (image_name, self.archive_path))
+ self.results['changed'] = True
+ if not self.check_mode:
+ self.log("Getting archive of image %s" % image_name)
+ try:
+ image = self.client.get_image(image_name)
+ except Exception as exc:
+ self.fail("Error getting image %s - %s" % (image_name, str(exc)))
+
+ try:
+ with open(self.archive_path, 'wb') as fd:
+ if self.client.docker_py_version >= LooseVersion('3.0.0'):
+ for chunk in image:
+ fd.write(chunk)
+ else:
+ for chunk in image.stream(2048, decode_content=False):
+ fd.write(chunk)
+ except Exception as exc:
+ self.fail("Error writing image archive %s - %s" % (self.archive_path, str(exc)))
+
+ image = self.client.find_image(name=name, tag=tag)
+ if image:
+ self.results['image'] = image
+
+ def push_image(self, name, tag=None):
+ '''
+ If the name of the image contains a repository path, then push the image.
+
+ :param name Name of the image to push.
+ :param tag Use a specific tag.
+ :return: None
+ '''
+
+ repository = name
+ if not tag:
+ repository, tag = parse_repository_tag(name)
+ registry, repo_name = resolve_repository_name(repository)
+
+ self.log("push %s to %s/%s:%s" % (self.name, registry, repo_name, tag))
+
+ if registry:
+ self.results['actions'].append("Pushed image %s to %s/%s:%s" % (self.name, registry, repo_name, tag))
+ self.results['changed'] = True
+ if not self.check_mode:
+ status = None
+ try:
+ changed = False
+ for line in self.client.push(repository, tag=tag, stream=True, decode=True):
+ self.log(line, pretty_print=True)
+ if line.get('errorDetail'):
+ raise Exception(line['errorDetail']['message'])
+ status = line.get('status')
+ if status == 'Pushing':
+ changed = True
+ self.results['changed'] = changed
+ except Exception as exc:
+ if re.search('unauthorized', str(exc)):
+ if re.search('authentication required', str(exc)):
+ self.fail("Error pushing image %s/%s:%s - %s. Try logging into %s first." %
+ (registry, repo_name, tag, str(exc), registry))
+ else:
+ self.fail("Error pushing image %s/%s:%s - %s. Does the repository exist?" %
+ (registry, repo_name, tag, str(exc)))
+ self.fail("Error pushing image %s: %s" % (repository, str(exc)))
+ self.results['image'] = self.client.find_image(name=repository, tag=tag)
+ if not self.results['image']:
+ self.results['image'] = dict()
+ self.results['image']['push_status'] = status
+
+ def tag_image(self, name, tag, repository, push=False):
+ '''
+ Tag an image into a repository.
+
+ :param name: name of the image. required.
+ :param tag: image tag.
+ :param repository: path to the repository. required.
+ :param push: bool. push the image once it's tagged.
+ :return: None
+ '''
+ repo, repo_tag = parse_repository_tag(repository)
+ if not repo_tag:
+ repo_tag = "latest"
+ if tag:
+ repo_tag = tag
+ image = self.client.find_image(name=repo, tag=repo_tag)
+ found = 'found' if image else 'not found'
+ self.log("image %s was %s" % (repo, found))
+
+ if not image or self.force_tag:
+ self.log("tagging %s:%s to %s:%s" % (name, tag, repo, repo_tag))
+ self.results['changed'] = True
+ self.results['actions'].append("Tagged image %s:%s to %s:%s" % (name, tag, repo, repo_tag))
+ if not self.check_mode:
+ try:
+ # Finding the image does not always work, especially running a localhost registry. In those
+ # cases, if we don't set force=True, it errors.
+ image_name = name
+ if tag and not re.search(tag, name):
+ image_name = "%s:%s" % (name, tag)
+ tag_status = self.client.tag(image_name, repo, tag=repo_tag, force=True)
+ if not tag_status:
+ raise Exception("Tag operation failed.")
+ except Exception as exc:
+ self.fail("Error: failed to tag image - %s" % str(exc))
+ self.results['image'] = self.client.find_image(name=repo, tag=repo_tag)
+ if image and image['Id'] == self.results['image']['Id']:
+ self.results['changed'] = False
+
+ if push:
+ self.push_image(repo, repo_tag)
+
+ def build_image(self):
+ '''
+ Build an image
+
+ :return: image dict
+ '''
+ params = dict(
+ path=self.build_path,
+ tag=self.name,
+ rm=self.rm,
+ nocache=self.nocache,
+ timeout=self.http_timeout,
+ pull=self.pull,
+ forcerm=self.rm,
+ dockerfile=self.dockerfile,
+ decode=True,
+ )
+ if self.client.docker_py_version < LooseVersion('3.0.0'):
+ params['stream'] = True
+
+ if self.tag:
+ params['tag'] = "%s:%s" % (self.name, self.tag)
+ if self.container_limits:
+ params['container_limits'] = self.container_limits
+ if self.buildargs:
+ for key, value in self.buildargs.items():
+ self.buildargs[key] = to_native(value)
+ params['buildargs'] = self.buildargs
+ if self.cache_from:
+ params['cache_from'] = self.cache_from
+ if self.network:
+ params['network_mode'] = self.network
+ if self.extra_hosts:
+ params['extra_hosts'] = self.extra_hosts
+ if self.use_config_proxy:
+ params['use_config_proxy'] = self.use_config_proxy
+ # Due to a bug in docker-py, it will crash if
+ # use_config_proxy is True and buildargs is None
+ if 'buildargs' not in params:
+ params['buildargs'] = {}
+ if self.target:
+ params['target'] = self.target
+
+ build_output = []
+ for line in self.client.build(**params):
+ # line = json.loads(line)
+ self.log(line, pretty_print=True)
+ if "stream" in line or "status" in line:
+ build_line = line.get("stream") or line.get("status") or ''
+ build_output.append(build_line)
+
+ if line.get('error'):
+ if line.get('errorDetail'):
+ errorDetail = line.get('errorDetail')
+ self.fail(
+ "Error building %s - code: %s, message: %s, logs: %s" % (
+ self.name,
+ errorDetail.get('code'),
+ errorDetail.get('message'),
+ build_output))
+ else:
+ self.fail("Error building %s - message: %s, logs: %s" % (
+ self.name, line.get('error'), build_output))
+
+ return {"stdout": "\n".join(build_output),
+ "image": self.client.find_image(name=self.name, tag=self.tag)}
+
+ def load_image(self):
+ '''
+ Load an image from a .tar archive
+
+ :return: image dict
+ '''
+ # Load image(s) from file
+ load_output = []
+ has_output = False
+ try:
+ self.log("Opening image %s" % self.load_path)
+ with open(self.load_path, 'rb') as image_tar:
+ self.log("Loading image from %s" % self.load_path)
+ output = self.client.load_image(image_tar)
+ if output is not None:
+ # Old versions of Docker SDK of Python (before version 2.5.0) do not return anything.
+ # (See https://github.com/docker/docker-py/commit/7139e2d8f1ea82340417add02090bfaf7794f159)
+ # Note that before that commit, something else than None was returned, but that was also
+ # only introduced in a commit that first appeared in 2.5.0 (see
+ # https://github.com/docker/docker-py/commit/9e793806ff79559c3bc591d8c52a3bbe3cdb7350).
+ # So the above check works for every released version of Docker SDK for Python.
+ has_output = True
+ for line in output:
+ self.log(line, pretty_print=True)
+ if "stream" in line or "status" in line:
+ load_line = line.get("stream") or line.get("status") or ''
+ load_output.append(load_line)
+ else:
+ if LooseVersion(docker_version) < LooseVersion('2.5.0'):
+ self.client.module.warn(
+ 'The installed version of the Docker SDK for Python does not return the loading results'
+ ' from the Docker daemon. Therefore, we cannot verify whether the expected image was'
+ ' loaded, whether multiple images where loaded, or whether the load actually succeeded.'
+ ' If you are not stuck with Python 2.6, *please* upgrade to a version newer than 2.5.0'
+ ' (2.5.0 was released in August 2017).'
+ )
+ else:
+ self.client.module.warn(
+ 'The API version of your Docker daemon is < 1.23, which does not return the image'
+ ' loading result from the Docker daemon. Therefore, we cannot verify whether the'
+ ' expected image was loaded, whether multiple images where loaded, or whether the load'
+ ' actually succeeded. You should consider upgrading your Docker daemon.'
+ )
+ except EnvironmentError as exc:
+ if exc.errno == errno.ENOENT:
+ self.client.fail("Error opening image %s - %s" % (self.load_path, str(exc)))
+ self.client.fail("Error loading image %s - %s" % (self.name, str(exc)), stdout='\n'.join(load_output))
+ except Exception as exc:
+ self.client.fail("Error loading image %s - %s" % (self.name, str(exc)), stdout='\n'.join(load_output))
+
+ # Collect loaded images
+ if has_output:
+ # We can only do this when we actually got some output from Docker daemon
+ loaded_images = set()
+ for line in load_output:
+ if line.startswith('Loaded image:'):
+ loaded_images.add(line[len('Loaded image:'):].strip())
+
+ if not loaded_images:
+ self.client.fail("Detected no loaded images. Archive potentially corrupt?", stdout='\n'.join(load_output))
+
+ expected_image = '%s:%s' % (self.name, self.tag)
+ if expected_image not in loaded_images:
+ self.client.fail(
+ "The archive did not contain image '%s'. Instead, found %s." % (
+ expected_image, ', '.join(["'%s'" % image for image in sorted(loaded_images)])),
+ stdout='\n'.join(load_output))
+ loaded_images.remove(expected_image)
+
+ if loaded_images:
+ self.client.module.warn(
+ "The archive contained more images than specified: %s" % (
+ ', '.join(["'%s'" % image for image in sorted(loaded_images)]), ))
+
+ return self.client.find_image(self.name, self.tag)
+
+
+def main():
+ argument_spec = dict(
+ source=dict(type='str', choices=['build', 'load', 'pull', 'local']),
+ build=dict(type='dict', options=dict(
+ cache_from=dict(type='list', elements='str'),
+ container_limits=dict(type='dict', options=dict(
+ memory=dict(type='int'),
+ memswap=dict(type='int'),
+ cpushares=dict(type='int'),
+ cpusetcpus=dict(type='str'),
+ )),
+ dockerfile=dict(type='str'),
+ http_timeout=dict(type='int'),
+ network=dict(type='str'),
+ nocache=dict(type='bool', default=False),
+ path=dict(type='path', required=True),
+ pull=dict(type='bool'),
+ rm=dict(type='bool', default=True),
+ args=dict(type='dict'),
+ use_config_proxy=dict(type='bool'),
+ target=dict(type='str'),
+ etc_hosts=dict(type='dict'),
+ )),
+ archive_path=dict(type='path'),
+ container_limits=dict(type='dict', options=dict(
+ memory=dict(type='int'),
+ memswap=dict(type='int'),
+ cpushares=dict(type='int'),
+ cpusetcpus=dict(type='str'),
+ ), removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
+ dockerfile=dict(type='str', removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
+ force=dict(type='bool', removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
+ force_source=dict(type='bool', default=False),
+ force_absent=dict(type='bool', default=False),
+ force_tag=dict(type='bool', default=False),
+ http_timeout=dict(type='int', removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
+ load_path=dict(type='path'),
+ name=dict(type='str', required=True),
+ nocache=dict(type='bool', default=False, removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
+ path=dict(type='path', aliases=['build_path'], removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
+ pull=dict(type='bool', removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
+ push=dict(type='bool', default=False),
+ repository=dict(type='str'),
+ rm=dict(type='bool', default=True, removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
+ state=dict(type='str', default='present', choices=['absent', 'present', 'build']),
+ tag=dict(type='str', default='latest'),
+ use_tls=dict(type='str', choices=['no', 'encrypt', 'verify'], removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ buildargs=dict(type='dict', removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
+ )
+
+ required_if = [
+ # ('state', 'present', ['source']), -- enable in community.general 2.0.0
+ # ('source', 'build', ['build']), -- enable in community.general 2.0.0
+ ('source', 'load', ['load_path']),
+ ]
+
+ def detect_build_cache_from(client):
+ return client.module.params['build'] and client.module.params['build'].get('cache_from') is not None
+
+ def detect_build_network(client):
+ return client.module.params['build'] and client.module.params['build'].get('network') is not None
+
+ def detect_build_target(client):
+ return client.module.params['build'] and client.module.params['build'].get('target') is not None
+
+ def detect_use_config_proxy(client):
+ return client.module.params['build'] and client.module.params['build'].get('use_config_proxy') is not None
+
+ def detect_etc_hosts(client):
+ return client.module.params['build'] and bool(client.module.params['build'].get('etc_hosts'))
+
+ option_minimal_versions = dict()
+ option_minimal_versions["build.cache_from"] = dict(docker_py_version='2.1.0', docker_api_version='1.25', detect_usage=detect_build_cache_from)
+ option_minimal_versions["build.network"] = dict(docker_py_version='2.4.0', docker_api_version='1.25', detect_usage=detect_build_network)
+ option_minimal_versions["build.target"] = dict(docker_py_version='2.4.0', detect_usage=detect_build_target)
+ option_minimal_versions["build.use_config_proxy"] = dict(docker_py_version='3.7.0', detect_usage=detect_use_config_proxy)
+ option_minimal_versions["build.etc_hosts"] = dict(docker_py_version='2.6.0', docker_api_version='1.27', detect_usage=detect_etc_hosts)
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ required_if=required_if,
+ supports_check_mode=True,
+ min_docker_version='1.8.0',
+ min_docker_api_version='1.20',
+ option_minimal_versions=option_minimal_versions,
+ )
+
+ if client.module.params['state'] == 'build':
+ client.module.deprecate('The "build" state has been deprecated for a long time. '
+ 'Please use "present", which has the same meaning as "build".',
+ version='2.0.0', collection_name='community.general') # was Ansible 2.11
+ client.module.params['state'] = 'present'
+ if client.module.params['use_tls']:
+ client.module.deprecate('The "use_tls" option has been deprecated for a long time. '
+ 'Please use the "tls" and "validate_certs" options instead.',
+ version='2.0.0', collection_name='community.general') # was Ansible 2.11
+
+ if not is_valid_tag(client.module.params['tag'], allow_empty=True):
+ client.fail('"{0}" is not a valid docker tag!'.format(client.module.params['tag']))
+
+ build_options = dict(
+ container_limits='container_limits',
+ dockerfile='dockerfile',
+ http_timeout='http_timeout',
+ nocache='nocache',
+ path='path',
+ pull='pull',
+ rm='rm',
+ buildargs='args',
+ )
+ for option, build_option in build_options.items():
+ default_value = None
+ if option in ('rm', ):
+ default_value = True
+ elif option in ('nocache', ):
+ default_value = False
+ if client.module.params[option] != default_value:
+ if client.module.params['build'] is None:
+ client.module.params['build'] = dict()
+ if client.module.params['build'].get(build_option, default_value) != default_value:
+ client.fail('Cannot specify both %s and build.%s!' % (option, build_option))
+ client.module.params['build'][build_option] = client.module.params[option]
+ client.module.deprecate('Please specify build.%s instead of %s. The %s option '
+ 'has been renamed' % (build_option, option, option),
+ version='2.0.0', collection_name='community.general') # was Ansible 2.12
+ if client.module.params['source'] == 'build':
+ if (not client.module.params['build'] or not client.module.params['build'].get('path')):
+ client.fail('If "source" is set to "build", the "build.path" option must be specified.')
+ if client.module.params['build'].get('pull') is None:
+ client.module.deprecate("The default for build.pull is currently 'yes', but will be changed to "
+ "'no' in community.general 2.0.0. Please set build.pull explicitly to the value you need",
+ version='2.0.0', collection_name='community.general') # was Ansible 2.12
+ client.module.params['build']['pull'] = True # TODO: change to False in community.general 2.0.0
+
+ if client.module.params['state'] == 'present' and client.module.params['source'] is None:
+ # Autodetection. To be removed in community.general 2.0.0.
+ if (client.module.params['build'] or dict()).get('path'):
+ client.module.params['source'] = 'build'
+ elif client.module.params['load_path']:
+ client.module.params['source'] = 'load'
+ else:
+ client.module.params['source'] = 'pull'
+ client.module.deprecate('The value of the "source" option was determined to be "%s". '
+ 'Please set the "source" option explicitly. Autodetection will '
+ 'be removed in community.general 2.0.0.' % client.module.params['source'],
+ version='2.0.0', collection_name='community.general') # was Ansible 2.12
+
+ if client.module.params['force']:
+ client.module.params['force_source'] = True
+ client.module.params['force_absent'] = True
+ client.module.params['force_tag'] = True
+ client.module.deprecate('The "force" option will be removed in community.general 2.0.0. Please '
+ 'use the "force_source", "force_absent" or "force_tag" option '
+ 'instead, depending on what you want to force.',
+ version='2.0.0', collection_name='community.general') # was Ansible 2.12
+
+ try:
+ results = dict(
+ changed=False,
+ actions=[],
+ image={}
+ )
+
+ ImageManager(client, results)
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_image_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_image_facts.py
new file mode 100644
index 00000000..8cf08ef0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_image_facts.py
@@ -0,0 +1,270 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_image_info
+
+short_description: Inspect docker images
+
+
+description:
+ - Provide one or more image names, and the module will inspect each, returning an array of inspection results.
+ - If an image does not exist locally, it will not appear in the results. If you want to check whether an image exists
+ locally, you can call the module with the image name, then check whether the result list is empty (image does not
+ exist) or has one element (the image exists locally).
+ - The module will not attempt to pull images from registries. Use M(community.general.docker_image) with I(source) set to C(pull)
+ to ensure an image is pulled.
+
+notes:
+ - This module was called C(docker_image_facts) before Ansible 2.8. The usage did not change.
+
+options:
+ name:
+ description:
+ - An image name or a list of image names. Name format will be C(name[:tag]) or C(repository/name[:tag]),
+ where C(tag) is optional. If a tag is not provided, C(latest) will be used. Instead of image names, also
+ image IDs can be used.
+ - If no name is provided, a list of all images will be returned.
+ type: list
+ elements: str
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "Docker API >= 1.20"
+
+author:
+ - Chris Houseknecht (@chouseknecht)
+
+'''
+
+EXAMPLES = '''
+- name: Inspect a single image
+ community.general.docker_image_info:
+ name: pacur/centos-7
+
+- name: Inspect multiple images
+ community.general.docker_image_info:
+ name:
+ - pacur/centos-7
+ - sinatra
+ register: result
+
+- name: Make sure that both images pacur/centos-7 and sinatra exist locally
+ ansible.builtin.assert:
+ that:
+ - result.images | length == 2
+'''
+
+RETURN = '''
+images:
+ description:
+ - Inspection results for the selected images.
+ - The list only contains inspection results of images existing locally.
+ returned: always
+ type: list
+ elements: dict
+ sample: [
+ {
+ "Architecture": "amd64",
+ "Author": "",
+ "Comment": "",
+ "Config": {
+ "AttachStderr": false,
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "Cmd": [
+ "/etc/docker/registry/config.yml"
+ ],
+ "Domainname": "",
+ "Entrypoint": [
+ "/bin/registry"
+ ],
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "ExposedPorts": {
+ "5000/tcp": {}
+ },
+ "Hostname": "e5c68db50333",
+ "Image": "c72dce2618dc8f7b794d2b2c2b1e64e0205ead5befc294f8111da23bd6a2c799",
+ "Labels": {},
+ "OnBuild": [],
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Tty": false,
+ "User": "",
+ "Volumes": {
+ "/var/lib/registry": {}
+ },
+ "WorkingDir": ""
+ },
+ "Container": "e83a452b8fb89d78a25a6739457050131ca5c863629a47639530d9ad2008d610",
+ "ContainerConfig": {
+ "AttachStderr": false,
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "Cmd": [
+ "/bin/sh",
+ "-c",
+ '#(nop) CMD ["/etc/docker/registry/config.yml"]'
+ ],
+ "Domainname": "",
+ "Entrypoint": [
+ "/bin/registry"
+ ],
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "ExposedPorts": {
+ "5000/tcp": {}
+ },
+ "Hostname": "e5c68db50333",
+ "Image": "c72dce2618dc8f7b794d2b2c2b1e64e0205ead5befc294f8111da23bd6a2c799",
+ "Labels": {},
+ "OnBuild": [],
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Tty": false,
+ "User": "",
+ "Volumes": {
+ "/var/lib/registry": {}
+ },
+ "WorkingDir": ""
+ },
+ "Created": "2016-03-08T21:08:15.399680378Z",
+ "DockerVersion": "1.9.1",
+ "GraphDriver": {
+ "Data": null,
+ "Name": "aufs"
+ },
+ "Id": "53773d8552f07b730f3e19979e32499519807d67b344141d965463a950a66e08",
+ "Name": "registry:2",
+ "Os": "linux",
+ "Parent": "f0b1f729f784b755e7bf9c8c2e65d8a0a35a533769c2588f02895f6781ac0805",
+ "RepoDigests": [],
+ "RepoTags": [
+ "registry:2"
+ ],
+ "Size": 0,
+ "VirtualSize": 165808884
+ }
+ ]
+'''
+
+import traceback
+
+try:
+ from docker import utils
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ DockerBaseClass,
+ is_image_name_id,
+ RequestException,
+)
+
+
+class ImageManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(ImageManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.name = self.client.module.params.get('name')
+ self.log("Gathering facts for images: %s" % (str(self.name)))
+
+ if self.name:
+ self.results['images'] = self.get_facts()
+ else:
+ self.results['images'] = self.get_all_images()
+
+ def fail(self, msg):
+ self.client.fail(msg)
+
+ def get_facts(self):
+ '''
+ Lookup and inspect each image name found in the names parameter.
+
+ :returns array of image dictionaries
+ '''
+
+ results = []
+
+ names = self.name
+ if not isinstance(names, list):
+ names = [names]
+
+ for name in names:
+ if is_image_name_id(name):
+ self.log('Fetching image %s (ID)' % (name))
+ image = self.client.find_image_by_id(name)
+ else:
+ repository, tag = utils.parse_repository_tag(name)
+ if not tag:
+ tag = 'latest'
+ self.log('Fetching image %s:%s' % (repository, tag))
+ image = self.client.find_image(name=repository, tag=tag)
+ if image:
+ results.append(image)
+ return results
+
+ def get_all_images(self):
+ results = []
+ images = self.client.images()
+ for image in images:
+ try:
+ inspection = self.client.inspect_image(image['Id'])
+ except Exception as exc:
+ self.fail("Error inspecting image %s - %s" % (image['Id'], str(exc)))
+ results.append(inspection)
+ return results
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='list', elements='str'),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_api_version='1.20',
+ )
+ if client.module._name in ('docker_image_facts', 'community.general.docker_image_facts'):
+ client.module.deprecate("The 'docker_image_facts' module has been renamed to 'docker_image_info'",
+ version='2.0.0', collection_name='community.general') # was Ansible 2.12
+
+ try:
+ results = dict(
+ changed=False,
+ images=[]
+ )
+
+ ImageManager(client, results)
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_image_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_image_info.py
new file mode 100644
index 00000000..8cf08ef0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_image_info.py
@@ -0,0 +1,270 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_image_info
+
+short_description: Inspect docker images
+
+
+description:
+ - Provide one or more image names, and the module will inspect each, returning an array of inspection results.
+ - If an image does not exist locally, it will not appear in the results. If you want to check whether an image exists
+ locally, you can call the module with the image name, then check whether the result list is empty (image does not
+ exist) or has one element (the image exists locally).
+ - The module will not attempt to pull images from registries. Use M(community.general.docker_image) with I(source) set to C(pull)
+ to ensure an image is pulled.
+
+notes:
+ - This module was called C(docker_image_facts) before Ansible 2.8. The usage did not change.
+
+options:
+ name:
+ description:
+ - An image name or a list of image names. Name format will be C(name[:tag]) or C(repository/name[:tag]),
+ where C(tag) is optional. If a tag is not provided, C(latest) will be used. Instead of image names, also
+ image IDs can be used.
+ - If no name is provided, a list of all images will be returned.
+ type: list
+ elements: str
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "Docker API >= 1.20"
+
+author:
+ - Chris Houseknecht (@chouseknecht)
+
+'''
+
+EXAMPLES = '''
+- name: Inspect a single image
+ community.general.docker_image_info:
+ name: pacur/centos-7
+
+- name: Inspect multiple images
+ community.general.docker_image_info:
+ name:
+ - pacur/centos-7
+ - sinatra
+ register: result
+
+- name: Make sure that both images pacur/centos-7 and sinatra exist locally
+ ansible.builtin.assert:
+ that:
+ - result.images | length == 2
+'''
+
+RETURN = '''
+images:
+ description:
+ - Inspection results for the selected images.
+ - The list only contains inspection results of images existing locally.
+ returned: always
+ type: list
+ elements: dict
+ sample: [
+ {
+ "Architecture": "amd64",
+ "Author": "",
+ "Comment": "",
+ "Config": {
+ "AttachStderr": false,
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "Cmd": [
+ "/etc/docker/registry/config.yml"
+ ],
+ "Domainname": "",
+ "Entrypoint": [
+ "/bin/registry"
+ ],
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "ExposedPorts": {
+ "5000/tcp": {}
+ },
+ "Hostname": "e5c68db50333",
+ "Image": "c72dce2618dc8f7b794d2b2c2b1e64e0205ead5befc294f8111da23bd6a2c799",
+ "Labels": {},
+ "OnBuild": [],
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Tty": false,
+ "User": "",
+ "Volumes": {
+ "/var/lib/registry": {}
+ },
+ "WorkingDir": ""
+ },
+ "Container": "e83a452b8fb89d78a25a6739457050131ca5c863629a47639530d9ad2008d610",
+ "ContainerConfig": {
+ "AttachStderr": false,
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "Cmd": [
+ "/bin/sh",
+ "-c",
+ '#(nop) CMD ["/etc/docker/registry/config.yml"]'
+ ],
+ "Domainname": "",
+ "Entrypoint": [
+ "/bin/registry"
+ ],
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "ExposedPorts": {
+ "5000/tcp": {}
+ },
+ "Hostname": "e5c68db50333",
+ "Image": "c72dce2618dc8f7b794d2b2c2b1e64e0205ead5befc294f8111da23bd6a2c799",
+ "Labels": {},
+ "OnBuild": [],
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Tty": false,
+ "User": "",
+ "Volumes": {
+ "/var/lib/registry": {}
+ },
+ "WorkingDir": ""
+ },
+ "Created": "2016-03-08T21:08:15.399680378Z",
+ "DockerVersion": "1.9.1",
+ "GraphDriver": {
+ "Data": null,
+ "Name": "aufs"
+ },
+ "Id": "53773d8552f07b730f3e19979e32499519807d67b344141d965463a950a66e08",
+ "Name": "registry:2",
+ "Os": "linux",
+ "Parent": "f0b1f729f784b755e7bf9c8c2e65d8a0a35a533769c2588f02895f6781ac0805",
+ "RepoDigests": [],
+ "RepoTags": [
+ "registry:2"
+ ],
+ "Size": 0,
+ "VirtualSize": 165808884
+ }
+ ]
+'''
+
+import traceback
+
+try:
+ from docker import utils
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ DockerBaseClass,
+ is_image_name_id,
+ RequestException,
+)
+
+
+class ImageManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(ImageManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.name = self.client.module.params.get('name')
+ self.log("Gathering facts for images: %s" % (str(self.name)))
+
+ if self.name:
+ self.results['images'] = self.get_facts()
+ else:
+ self.results['images'] = self.get_all_images()
+
+ def fail(self, msg):
+ self.client.fail(msg)
+
+ def get_facts(self):
+ '''
+ Lookup and inspect each image name found in the names parameter.
+
+ :returns array of image dictionaries
+ '''
+
+ results = []
+
+ names = self.name
+ if not isinstance(names, list):
+ names = [names]
+
+ for name in names:
+ if is_image_name_id(name):
+ self.log('Fetching image %s (ID)' % (name))
+ image = self.client.find_image_by_id(name)
+ else:
+ repository, tag = utils.parse_repository_tag(name)
+ if not tag:
+ tag = 'latest'
+ self.log('Fetching image %s:%s' % (repository, tag))
+ image = self.client.find_image(name=repository, tag=tag)
+ if image:
+ results.append(image)
+ return results
+
+ def get_all_images(self):
+ results = []
+ images = self.client.images()
+ for image in images:
+ try:
+ inspection = self.client.inspect_image(image['Id'])
+ except Exception as exc:
+ self.fail("Error inspecting image %s - %s" % (image['Id'], str(exc)))
+ results.append(inspection)
+ return results
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='list', elements='str'),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_api_version='1.20',
+ )
+ if client.module._name in ('docker_image_facts', 'community.general.docker_image_facts'):
+ client.module.deprecate("The 'docker_image_facts' module has been renamed to 'docker_image_info'",
+ version='2.0.0', collection_name='community.general') # was Ansible 2.12
+
+ try:
+ results = dict(
+ changed=False,
+ images=[]
+ )
+
+ ImageManager(client, results)
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_login.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_login.py
new file mode 100644
index 00000000..6522e642
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_login.py
@@ -0,0 +1,485 @@
+#!/usr/bin/python
+#
+# (c) 2016 Olaf Kilian <olaf.kilian@symanex.com>
+# Chris Houseknecht, <house@redhat.com>
+# James Tanner, <jtanner@redhat.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_login
+short_description: Log into a Docker registry.
+description:
+ - Provides functionality similar to the "docker login" command.
+ - Authenticate with a docker registry and add the credentials to your local Docker config file respectively the
+ credentials store associated to the registry. Adding the credentials to the config files resp. the credential
+ store allows future connections to the registry using tools such as Ansible's Docker modules, the Docker CLI
+ and Docker SDK for Python without needing to provide credentials.
+ - Running in check mode will perform the authentication without updating the config file.
+options:
+ registry_url:
+ description:
+ - The registry URL.
+ type: str
+ default: "https://index.docker.io/v1/"
+ aliases:
+ - registry
+ - url
+ username:
+ description:
+ - The username for the registry account.
+ - Required when I(state) is C(present).
+ type: str
+ password:
+ description:
+ - The plaintext password for the registry account.
+ - Required when I(state) is C(present).
+ type: str
+ email:
+ description:
+ - Does nothing, do not use.
+ - Will be removed in community.general 3.0.0.
+ type: str
+ reauthorize:
+ description:
+ - Refresh existing authentication found in the configuration file.
+ type: bool
+ default: no
+ aliases:
+ - reauth
+ config_path:
+ description:
+ - Custom path to the Docker CLI configuration file.
+ type: path
+ default: ~/.docker/config.json
+ aliases:
+ - dockercfg_path
+ state:
+ description:
+ - This controls the current state of the user. C(present) will login in a user, C(absent) will log them out.
+ - To logout you only need the registry server, which defaults to DockerHub.
+ - Before 2.1 you could ONLY log in.
+ - Docker does not support 'logout' with a custom config file.
+ type: str
+ default: 'present'
+ choices: ['present', 'absent']
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "L(Python bindings for docker credentials store API) >= 0.2.1
+ (use L(docker-pycreds,https://pypi.org/project/docker-pycreds/) when using Docker SDK for Python < 4.0.0)"
+ - "Docker API >= 1.20"
+author:
+ - Olaf Kilian (@olsaki) <olaf.kilian@symanex.com>
+ - Chris Houseknecht (@chouseknecht)
+'''
+
+EXAMPLES = '''
+
+- name: Log into DockerHub
+ community.general.docker_login:
+ username: docker
+ password: rekcod
+
+- name: Log into private registry and force re-authorization
+ community.general.docker_login:
+ registry_url: your.private.registry.io
+ username: yourself
+ password: secrets3
+ reauthorize: yes
+
+- name: Log into DockerHub using a custom config file
+ community.general.docker_login:
+ username: docker
+ password: rekcod
+ config_path: /tmp/.mydockercfg
+
+- name: Log out of DockerHub
+ community.general.docker_login:
+ state: absent
+'''
+
+RETURN = '''
+login_results:
+ description: Results from the login.
+ returned: when state='present'
+ type: dict
+ sample: {
+ "serveraddress": "localhost:5000",
+ "username": "testuser"
+ }
+'''
+
+import base64
+import json
+import os
+import re
+import traceback
+from ansible.module_utils._text import to_bytes, to_text
+
+try:
+ from docker.errors import DockerException
+ from docker import auth
+
+ # Earlier versions of docker/docker-py put decode_auth
+ # in docker.auth.auth instead of docker.auth
+ if hasattr(auth, 'decode_auth'):
+ from docker.auth import decode_auth
+ else:
+ from docker.auth.auth import decode_auth
+
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ HAS_DOCKER_PY,
+ DEFAULT_DOCKER_REGISTRY,
+ DockerBaseClass,
+ EMAIL_REGEX,
+ RequestException,
+)
+
+NEEDS_DOCKER_PYCREDS = False
+
+# Early versions of docker/docker-py rely on docker-pycreds for
+# the credential store api.
+if HAS_DOCKER_PY:
+ try:
+ from docker.credentials.errors import StoreError, CredentialsNotFound
+ from docker.credentials import Store
+ except ImportError:
+ try:
+ from dockerpycreds.errors import StoreError, CredentialsNotFound
+ from dockerpycreds.store import Store
+ except ImportError as exc:
+ HAS_DOCKER_ERROR = str(exc)
+ NEEDS_DOCKER_PYCREDS = True
+
+
+if NEEDS_DOCKER_PYCREDS:
+ # docker-pycreds missing, so we need to create some place holder classes
+ # to allow instantiation.
+
+ class StoreError(Exception):
+ pass
+
+ class CredentialsNotFound(Exception):
+ pass
+
+
+class DockerFileStore(object):
+ '''
+ A custom credential store class that implements only the functionality we need to
+ update the docker config file when no credential helpers is provided.
+ '''
+
+ program = "<legacy config>"
+
+ def __init__(self, config_path):
+ self._config_path = config_path
+
+ # Make sure we have a minimal config if none is available.
+ self._config = dict(
+ auths=dict()
+ )
+
+ try:
+ # Attempt to read the existing config.
+ with open(self._config_path, "r") as f:
+ config = json.load(f)
+ except (ValueError, IOError):
+ # No config found or an invalid config found so we'll ignore it.
+ config = dict()
+
+ # Update our internal config with what ever was loaded.
+ self._config.update(config)
+
+ @property
+ def config_path(self):
+ '''
+ Return the config path configured in this DockerFileStore instance.
+ '''
+
+ return self._config_path
+
+ def get(self, server):
+ '''
+ Retrieve credentials for `server` if there are any in the config file.
+ Otherwise raise a `StoreError`
+ '''
+
+ server_creds = self._config['auths'].get(server)
+ if not server_creds:
+ raise CredentialsNotFound('No matching credentials')
+
+ (username, password) = decode_auth(server_creds['auth'])
+
+ return dict(
+ Username=username,
+ Secret=password
+ )
+
+ def _write(self):
+ '''
+ Write config back out to disk.
+ '''
+ # Make sure directory exists
+ dir = os.path.dirname(self._config_path)
+ if not os.path.exists(dir):
+ os.makedirs(dir)
+ # Write config; make sure it has permissions 0x600
+ content = json.dumps(self._config, indent=4, sort_keys=True).encode('utf-8')
+ f = os.open(self._config_path, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600)
+ try:
+ os.write(f, content)
+ finally:
+ os.close(f)
+
+ def store(self, server, username, password):
+ '''
+ Add a credentials for `server` to the current configuration.
+ '''
+
+ b64auth = base64.b64encode(
+ to_bytes(username) + b':' + to_bytes(password)
+ )
+ auth = to_text(b64auth)
+
+ # build up the auth structure
+ if 'auths' not in self._config:
+ self._config['auths'] = dict()
+
+ self._config['auths'][server] = dict(
+ auth=auth
+ )
+
+ self._write()
+
+ def erase(self, server):
+ '''
+ Remove credentials for the given server from the configuration.
+ '''
+
+ if 'auths' in self._config and server in self._config['auths']:
+ self._config['auths'].pop(server)
+ self._write()
+
+
+class LoginManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(LoginManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ parameters = self.client.module.params
+ self.check_mode = self.client.check_mode
+
+ self.registry_url = parameters.get('registry_url')
+ self.username = parameters.get('username')
+ self.password = parameters.get('password')
+ self.email = parameters.get('email')
+ self.reauthorize = parameters.get('reauthorize')
+ self.config_path = parameters.get('config_path')
+ self.state = parameters.get('state')
+
+ def run(self):
+ '''
+ Do the actuall work of this task here. This allows instantiation for partial
+ testing.
+ '''
+
+ if self.state == 'present':
+ self.login()
+ else:
+ self.logout()
+
+ def fail(self, msg):
+ self.client.fail(msg)
+
+ def login(self):
+ '''
+ Log into the registry with provided username/password. On success update the config
+ file with the new authorization.
+
+ :return: None
+ '''
+
+ if self.email and not re.match(EMAIL_REGEX, self.email):
+ self.fail("Parameter error: the email address appears to be incorrect. Expecting it to match "
+ "/%s/" % (EMAIL_REGEX))
+
+ self.results['actions'].append("Logged into %s" % (self.registry_url))
+ self.log("Log into %s with username %s" % (self.registry_url, self.username))
+ try:
+ response = self.client.login(
+ self.username,
+ password=self.password,
+ email=self.email,
+ registry=self.registry_url,
+ reauth=self.reauthorize,
+ dockercfg_path=self.config_path
+ )
+ except Exception as exc:
+ self.fail("Logging into %s for user %s failed - %s" % (self.registry_url, self.username, str(exc)))
+
+ # If user is already logged in, then response contains password for user
+ if 'password' in response:
+ # This returns correct password if user is logged in and wrong password is given.
+ # So if it returns another password as we passed, and the user didn't request to
+ # reauthorize, still do it.
+ if not self.reauthorize and response['password'] != self.password:
+ try:
+ response = self.client.login(
+ self.username,
+ password=self.password,
+ email=self.email,
+ registry=self.registry_url,
+ reauth=True,
+ dockercfg_path=self.config_path
+ )
+ except Exception as exc:
+ self.fail("Logging into %s for user %s failed - %s" % (self.registry_url, self.username, str(exc)))
+ response.pop('password', None)
+ self.results['login_result'] = response
+
+ self.update_credentials()
+
+ def logout(self):
+ '''
+ Log out of the registry. On success update the config file.
+
+ :return: None
+ '''
+
+ # Get the configuration store.
+ store = self.get_credential_store_instance(self.registry_url, self.config_path)
+
+ try:
+ current = store.get(self.registry_url)
+ except CredentialsNotFound:
+ # get raises an exception on not found.
+ self.log("Credentials for %s not present, doing nothing." % (self.registry_url))
+ self.results['changed'] = False
+ return
+
+ if not self.check_mode:
+ store.erase(self.registry_url)
+ self.results['changed'] = True
+
+ def update_credentials(self):
+ '''
+ If the authorization is not stored attempt to store authorization values via
+ the appropriate credential helper or to the config file.
+
+ :return: None
+ '''
+
+ # Check to see if credentials already exist.
+ store = self.get_credential_store_instance(self.registry_url, self.config_path)
+
+ try:
+ current = store.get(self.registry_url)
+ except CredentialsNotFound:
+ # get raises an exception on not found.
+ current = dict(
+ Username='',
+ Secret=''
+ )
+
+ if current['Username'] != self.username or current['Secret'] != self.password or self.reauthorize:
+ if not self.check_mode:
+ store.store(self.registry_url, self.username, self.password)
+ self.log("Writing credentials to configured helper %s for %s" % (store.program, self.registry_url))
+ self.results['actions'].append("Wrote credentials to configured helper %s for %s" % (
+ store.program, self.registry_url))
+ self.results['changed'] = True
+
+ def get_credential_store_instance(self, registry, dockercfg_path):
+ '''
+ Return an instance of docker.credentials.Store used by the given registry.
+
+ :return: A Store or None
+ :rtype: Union[docker.credentials.Store, NoneType]
+ '''
+
+ # Older versions of docker-py don't have this feature.
+ try:
+ credstore_env = self.client.credstore_env
+ except AttributeError:
+ credstore_env = None
+
+ config = auth.load_config(config_path=dockercfg_path)
+
+ if hasattr(auth, 'get_credential_store'):
+ store_name = auth.get_credential_store(config, registry)
+ elif 'credsStore' in config:
+ store_name = config['credsStore']
+ else:
+ store_name = None
+
+ # Make sure that there is a credential helper before trying to instantiate a
+ # Store object.
+ if store_name:
+ self.log("Found credential store %s" % store_name)
+ return Store(store_name, environment=credstore_env)
+
+ return DockerFileStore(dockercfg_path)
+
+
+def main():
+
+ argument_spec = dict(
+ registry_url=dict(type='str', default=DEFAULT_DOCKER_REGISTRY, aliases=['registry', 'url']),
+ username=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ email=dict(type='str', removed_in_version='3.0.0', removed_from_collection='community.general'), # was Ansible 2.14
+ reauthorize=dict(type='bool', default=False, aliases=['reauth']),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ config_path=dict(type='path', default='~/.docker/config.json', aliases=['dockercfg_path']),
+ )
+
+ required_if = [
+ ('state', 'present', ['username', 'password']),
+ ]
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=required_if,
+ min_docker_api_version='1.20',
+ )
+
+ try:
+ results = dict(
+ changed=False,
+ actions=[],
+ login_result={}
+ )
+
+ manager = LoginManager(client, results)
+ manager.run()
+
+ if 'actions' in results:
+ del results['actions']
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_network.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_network.py
new file mode 100644
index 00000000..f70cc67d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_network.py
@@ -0,0 +1,717 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: docker_network
+short_description: Manage Docker networks
+description:
+ - Create/remove Docker networks and connect containers to them.
+ - Performs largely the same function as the "docker network" CLI subcommand.
+options:
+ name:
+ description:
+ - Name of the network to operate on.
+ type: str
+ required: yes
+ aliases:
+ - network_name
+
+ connected:
+ description:
+ - List of container names or container IDs to connect to a network.
+ - Please note that the module only makes sure that these containers are connected to the network,
+ but does not care about connection options. If you rely on specific IP addresses etc., use the
+ M(community.general.docker_container) module to ensure your containers are correctly connected to this network.
+ type: list
+ elements: str
+ aliases:
+ - containers
+
+ driver:
+ description:
+ - Specify the type of network. Docker provides bridge and overlay drivers, but 3rd party drivers can also be used.
+ type: str
+ default: bridge
+
+ driver_options:
+ description:
+ - Dictionary of network settings. Consult docker docs for valid options and values.
+ type: dict
+
+ force:
+ description:
+ - With state C(absent) forces disconnecting all containers from the
+ network prior to deleting the network. With state C(present) will
+ disconnect all containers, delete the network and re-create the
+ network.
+ - This option is required if you have changed the IPAM or driver options
+ and want an existing network to be updated to use the new options.
+ type: bool
+ default: no
+
+ appends:
+ description:
+ - By default the connected list is canonical, meaning containers not on the list are removed from the network.
+ - Use I(appends) to leave existing containers connected.
+ type: bool
+ default: no
+ aliases:
+ - incremental
+
+ enable_ipv6:
+ description:
+ - Enable IPv6 networking.
+ type: bool
+
+ ipam_driver:
+ description:
+ - Specify an IPAM driver.
+ type: str
+
+ ipam_driver_options:
+ description:
+ - Dictionary of IPAM driver options.
+ type: dict
+
+ ipam_options:
+ description:
+ - Dictionary of IPAM options.
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter I(ipam_config) instead. In Docker 1.10.0, IPAM
+ options were introduced (see L(here,https://github.com/moby/moby/pull/17316)). This module parameter addresses
+ the IPAM config not the newly introduced IPAM options. For the IPAM options, see the I(ipam_driver_options)
+ parameter.
+ type: dict
+ suboptions:
+ subnet:
+ description:
+ - IP subset in CIDR notation.
+ type: str
+ iprange:
+ description:
+ - IP address range in CIDR notation.
+ type: str
+ gateway:
+ description:
+ - IP gateway address.
+ type: str
+ aux_addresses:
+ description:
+ - Auxiliary IP addresses used by Network driver, as a mapping from hostname to IP.
+ type: dict
+
+ ipam_config:
+ description:
+ - List of IPAM config blocks. Consult
+ L(Docker docs,https://docs.docker.com/compose/compose-file/compose-file-v2/#ipam) for valid options and values.
+ Note that I(iprange) is spelled differently here (we use the notation from the Docker SDK for Python).
+ type: list
+ elements: dict
+ suboptions:
+ subnet:
+ description:
+ - IP subset in CIDR notation.
+ type: str
+ iprange:
+ description:
+ - IP address range in CIDR notation.
+ type: str
+ gateway:
+ description:
+ - IP gateway address.
+ type: str
+ aux_addresses:
+ description:
+ - Auxiliary IP addresses used by Network driver, as a mapping from hostname to IP.
+ type: dict
+
+ state:
+ description:
+ - C(absent) deletes the network. If a network has connected containers, it
+ cannot be deleted. Use the I(force) option to disconnect all containers
+ and delete the network.
+ - C(present) creates the network, if it does not already exist with the
+ specified parameters, and connects the list of containers provided via
+ the connected parameter. Containers not on the list will be disconnected.
+ An empty list will leave no containers connected to the network. Use the
+ I(appends) option to leave existing containers connected. Use the I(force)
+ options to force re-creation of the network.
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+
+ internal:
+ description:
+ - Restrict external access to the network.
+ type: bool
+
+ labels:
+ description:
+ - Dictionary of labels.
+ type: dict
+
+ scope:
+ description:
+ - Specify the network's scope.
+ type: str
+ choices:
+ - local
+ - global
+ - swarm
+
+ attachable:
+ description:
+ - If enabled, and the network is in the global scope, non-service containers on worker nodes will be able to connect to the network.
+ type: bool
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+notes:
+ - When network options are changed, the module disconnects all containers from the network, deletes the network, and re-creates the network.
+ It does not try to reconnect containers, except the ones listed in (I(connected), and even for these, it does not consider specific
+ connection options like fixed IP addresses or MAC addresses. If you need more control over how the containers are connected to the
+ network, loop the M(community.general.docker_container) module to loop over your containers to make sure they are connected properly.
+ - The module does not support Docker Swarm, i.e. it will not try to disconnect or reconnect services. If services are connected to the
+ network, deleting the network will fail. When network options are changed, the network has to be deleted and recreated, so this will
+ fail as well.
+
+author:
+ - "Ben Keith (@keitwb)"
+ - "Chris Houseknecht (@chouseknecht)"
+ - "Dave Bendit (@DBendit)"
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "The docker server >= 1.10.0"
+'''
+
+EXAMPLES = '''
+- name: Create a network
+ community.general.docker_network:
+ name: network_one
+
+- name: Remove all but selected list of containers
+ community.general.docker_network:
+ name: network_one
+ connected:
+ - container_a
+ - container_b
+ - container_c
+
+- name: Remove a single container
+ community.general.docker_network:
+ name: network_one
+ connected: "{{ fulllist|difference(['container_a']) }}"
+
+- name: Add a container to a network, leaving existing containers connected
+ community.general.docker_network:
+ name: network_one
+ connected:
+ - container_a
+ appends: yes
+
+- name: Create a network with driver options
+ community.general.docker_network:
+ name: network_two
+ driver_options:
+ com.docker.network.bridge.name: net2
+
+- name: Create a network with custom IPAM config
+ community.general.docker_network:
+ name: network_three
+ ipam_config:
+ - subnet: 172.3.27.0/24
+ gateway: 172.3.27.2
+ iprange: 172.3.27.0/26
+ aux_addresses:
+ host1: 172.3.27.3
+ host2: 172.3.27.4
+
+- name: Create a network with labels
+ community.general.docker_network:
+ name: network_four
+ labels:
+ key1: value1
+ key2: value2
+
+- name: Create a network with IPv6 IPAM config
+ community.general.docker_network:
+ name: network_ipv6_one
+ enable_ipv6: yes
+ ipam_config:
+ - subnet: fdd1:ac8c:0557:7ce1::/64
+
+- name: Create a network with IPv6 and custom IPv4 IPAM config
+ community.general.docker_network:
+ name: network_ipv6_two
+ enable_ipv6: yes
+ ipam_config:
+ - subnet: 172.4.27.0/24
+ - subnet: fdd1:ac8c:0557:7ce2::/64
+
+- name: Delete a network, disconnecting all containers
+ community.general.docker_network:
+ name: network_one
+ state: absent
+ force: yes
+'''
+
+RETURN = '''
+network:
+ description:
+ - Network inspection results for the affected network.
+ - Note that facts are part of the registered vars since Ansible 2.8. For compatibility reasons, the facts
+ are also accessible directly as C(docker_network). Note that the returned fact will be removed in community.general 2.0.0.
+ returned: success
+ type: dict
+ sample: {}
+'''
+
+import re
+import traceback
+
+from distutils.version import LooseVersion
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ DockerBaseClass,
+ docker_version,
+ DifferenceTracker,
+ clean_dict_booleans_for_docker_api,
+ RequestException,
+)
+
+try:
+ from docker import utils
+ from docker.errors import DockerException
+ if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
+ from docker.types import IPAMPool, IPAMConfig
+except Exception:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+
+class TaskParameters(DockerBaseClass):
+ def __init__(self, client):
+ super(TaskParameters, self).__init__()
+ self.client = client
+
+ self.name = None
+ self.connected = None
+ self.driver = None
+ self.driver_options = None
+ self.ipam_driver = None
+ self.ipam_driver_options = None
+ self.ipam_options = None
+ self.ipam_config = None
+ self.appends = None
+ self.force = None
+ self.internal = None
+ self.labels = None
+ self.debug = None
+ self.enable_ipv6 = None
+ self.scope = None
+ self.attachable = None
+
+ for key, value in client.module.params.items():
+ setattr(self, key, value)
+
+
+def container_names_in_network(network):
+ return [c['Name'] for c in network['Containers'].values()] if network['Containers'] else []
+
+
+CIDR_IPV4 = re.compile(r'^([0-9]{1,3}\.){3}[0-9]{1,3}/([0-9]|[1-2][0-9]|3[0-2])$')
+CIDR_IPV6 = re.compile(r'^[0-9a-fA-F:]+/([0-9]|[1-9][0-9]|1[0-2][0-9])$')
+
+
+def validate_cidr(cidr):
+ """Validate CIDR. Return IP version of a CIDR string on success.
+
+ :param cidr: Valid CIDR
+ :type cidr: str
+ :return: ``ipv4`` or ``ipv6``
+ :rtype: str
+ :raises ValueError: If ``cidr`` is not a valid CIDR
+ """
+ if CIDR_IPV4.match(cidr):
+ return 'ipv4'
+ elif CIDR_IPV6.match(cidr):
+ return 'ipv6'
+ raise ValueError('"{0}" is not a valid CIDR'.format(cidr))
+
+
+def normalize_ipam_config_key(key):
+ """Normalizes IPAM config keys returned by Docker API to match Ansible keys.
+
+ :param key: Docker API key
+ :type key: str
+ :return Ansible module key
+ :rtype str
+ """
+ special_cases = {
+ 'AuxiliaryAddresses': 'aux_addresses'
+ }
+ return special_cases.get(key, key.lower())
+
+
+def dicts_are_essentially_equal(a, b):
+ """Make sure that a is a subset of b, where None entries of a are ignored."""
+ for k, v in a.items():
+ if v is None:
+ continue
+ if b.get(k) != v:
+ return False
+ return True
+
+
+class DockerNetworkManager(object):
+
+ def __init__(self, client):
+ self.client = client
+ self.parameters = TaskParameters(client)
+ self.check_mode = self.client.check_mode
+ self.results = {
+ u'changed': False,
+ u'actions': []
+ }
+ self.diff = self.client.module._diff
+ self.diff_tracker = DifferenceTracker()
+ self.diff_result = dict()
+
+ self.existing_network = self.get_existing_network()
+
+ if not self.parameters.connected and self.existing_network:
+ self.parameters.connected = container_names_in_network(self.existing_network)
+
+ if (self.parameters.ipam_options['subnet'] or self.parameters.ipam_options['iprange'] or
+ self.parameters.ipam_options['gateway'] or self.parameters.ipam_options['aux_addresses']):
+ self.parameters.ipam_config = [self.parameters.ipam_options]
+
+ if self.parameters.ipam_config:
+ try:
+ for ipam_config in self.parameters.ipam_config:
+ validate_cidr(ipam_config['subnet'])
+ except ValueError as e:
+ self.client.fail(str(e))
+
+ if self.parameters.driver_options:
+ self.parameters.driver_options = clean_dict_booleans_for_docker_api(self.parameters.driver_options)
+
+ state = self.parameters.state
+ if state == 'present':
+ self.present()
+ elif state == 'absent':
+ self.absent()
+
+ if self.diff or self.check_mode or self.parameters.debug:
+ if self.diff:
+ self.diff_result['before'], self.diff_result['after'] = self.diff_tracker.get_before_after()
+ self.results['diff'] = self.diff_result
+
+ def get_existing_network(self):
+ return self.client.get_network(name=self.parameters.name)
+
+ def has_different_config(self, net):
+ '''
+ Evaluates an existing network and returns a tuple containing a boolean
+ indicating if the configuration is different and a list of differences.
+
+ :param net: the inspection output for an existing network
+ :return: (bool, list)
+ '''
+ differences = DifferenceTracker()
+ if self.parameters.driver and self.parameters.driver != net['Driver']:
+ differences.add('driver',
+ parameter=self.parameters.driver,
+ active=net['Driver'])
+ if self.parameters.driver_options:
+ if not net.get('Options'):
+ differences.add('driver_options',
+ parameter=self.parameters.driver_options,
+ active=net.get('Options'))
+ else:
+ for key, value in self.parameters.driver_options.items():
+ if not (key in net['Options']) or value != net['Options'][key]:
+ differences.add('driver_options.%s' % key,
+ parameter=value,
+ active=net['Options'].get(key))
+
+ if self.parameters.ipam_driver:
+ if not net.get('IPAM') or net['IPAM']['Driver'] != self.parameters.ipam_driver:
+ differences.add('ipam_driver',
+ parameter=self.parameters.ipam_driver,
+ active=net.get('IPAM'))
+
+ if self.parameters.ipam_driver_options is not None:
+ ipam_driver_options = net['IPAM'].get('Options') or {}
+ if ipam_driver_options != self.parameters.ipam_driver_options:
+ differences.add('ipam_driver_options',
+ parameter=self.parameters.ipam_driver_options,
+ active=ipam_driver_options)
+
+ if self.parameters.ipam_config is not None and self.parameters.ipam_config:
+ if not net.get('IPAM') or not net['IPAM']['Config']:
+ differences.add('ipam_config',
+ parameter=self.parameters.ipam_config,
+ active=net.get('IPAM', {}).get('Config'))
+ else:
+ # Put network's IPAM config into the same format as module's IPAM config
+ net_ipam_configs = []
+ for net_ipam_config in net['IPAM']['Config']:
+ config = dict()
+ for k, v in net_ipam_config.items():
+ config[normalize_ipam_config_key(k)] = v
+ net_ipam_configs.append(config)
+ # Compare lists of dicts as sets of dicts
+ for idx, ipam_config in enumerate(self.parameters.ipam_config):
+ net_config = dict()
+ for net_ipam_config in net_ipam_configs:
+ if dicts_are_essentially_equal(ipam_config, net_ipam_config):
+ net_config = net_ipam_config
+ break
+ for key, value in ipam_config.items():
+ if value is None:
+ # due to recursive argument_spec, all keys are always present
+ # (but have default value None if not specified)
+ continue
+ if value != net_config.get(key):
+ differences.add('ipam_config[%s].%s' % (idx, key),
+ parameter=value,
+ active=net_config.get(key))
+
+ if self.parameters.enable_ipv6 is not None and self.parameters.enable_ipv6 != net.get('EnableIPv6', False):
+ differences.add('enable_ipv6',
+ parameter=self.parameters.enable_ipv6,
+ active=net.get('EnableIPv6', False))
+
+ if self.parameters.internal is not None and self.parameters.internal != net.get('Internal', False):
+ differences.add('internal',
+ parameter=self.parameters.internal,
+ active=net.get('Internal'))
+
+ if self.parameters.scope is not None and self.parameters.scope != net.get('Scope'):
+ differences.add('scope',
+ parameter=self.parameters.scope,
+ active=net.get('Scope'))
+
+ if self.parameters.attachable is not None and self.parameters.attachable != net.get('Attachable', False):
+ differences.add('attachable',
+ parameter=self.parameters.attachable,
+ active=net.get('Attachable'))
+ if self.parameters.labels:
+ if not net.get('Labels'):
+ differences.add('labels',
+ parameter=self.parameters.labels,
+ active=net.get('Labels'))
+ else:
+ for key, value in self.parameters.labels.items():
+ if not (key in net['Labels']) or value != net['Labels'][key]:
+ differences.add('labels.%s' % key,
+ parameter=value,
+ active=net['Labels'].get(key))
+
+ return not differences.empty, differences
+
+ def create_network(self):
+ if not self.existing_network:
+ params = dict(
+ driver=self.parameters.driver,
+ options=self.parameters.driver_options,
+ )
+
+ ipam_pools = []
+ if self.parameters.ipam_config:
+ for ipam_pool in self.parameters.ipam_config:
+ if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
+ ipam_pools.append(IPAMPool(**ipam_pool))
+ else:
+ ipam_pools.append(utils.create_ipam_pool(**ipam_pool))
+
+ if self.parameters.ipam_driver or self.parameters.ipam_driver_options or ipam_pools:
+ # Only add ipam parameter if a driver was specified or if IPAM parameters
+ # were specified. Leaving this parameter away can significantly speed up
+ # creation; on my machine creation with this option needs ~15 seconds,
+ # and without just a few seconds.
+ if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
+ params['ipam'] = IPAMConfig(driver=self.parameters.ipam_driver,
+ pool_configs=ipam_pools,
+ options=self.parameters.ipam_driver_options)
+ else:
+ params['ipam'] = utils.create_ipam_config(driver=self.parameters.ipam_driver,
+ pool_configs=ipam_pools)
+
+ if self.parameters.enable_ipv6 is not None:
+ params['enable_ipv6'] = self.parameters.enable_ipv6
+ if self.parameters.internal is not None:
+ params['internal'] = self.parameters.internal
+ if self.parameters.scope is not None:
+ params['scope'] = self.parameters.scope
+ if self.parameters.attachable is not None:
+ params['attachable'] = self.parameters.attachable
+ if self.parameters.labels:
+ params['labels'] = self.parameters.labels
+
+ if not self.check_mode:
+ resp = self.client.create_network(self.parameters.name, **params)
+ self.client.report_warnings(resp, ['Warning'])
+ self.existing_network = self.client.get_network(network_id=resp['Id'])
+ self.results['actions'].append("Created network %s with driver %s" % (self.parameters.name, self.parameters.driver))
+ self.results['changed'] = True
+
+ def remove_network(self):
+ if self.existing_network:
+ self.disconnect_all_containers()
+ if not self.check_mode:
+ self.client.remove_network(self.parameters.name)
+ self.results['actions'].append("Removed network %s" % (self.parameters.name,))
+ self.results['changed'] = True
+
+ def is_container_connected(self, container_name):
+ if not self.existing_network:
+ return False
+ return container_name in container_names_in_network(self.existing_network)
+
+ def connect_containers(self):
+ for name in self.parameters.connected:
+ if not self.is_container_connected(name):
+ if not self.check_mode:
+ self.client.connect_container_to_network(name, self.parameters.name)
+ self.results['actions'].append("Connected container %s" % (name,))
+ self.results['changed'] = True
+ self.diff_tracker.add('connected.{0}'.format(name),
+ parameter=True,
+ active=False)
+
+ def disconnect_missing(self):
+ if not self.existing_network:
+ return
+ containers = self.existing_network['Containers']
+ if not containers:
+ return
+ for c in containers.values():
+ name = c['Name']
+ if name not in self.parameters.connected:
+ self.disconnect_container(name)
+
+ def disconnect_all_containers(self):
+ containers = self.client.get_network(name=self.parameters.name)['Containers']
+ if not containers:
+ return
+ for cont in containers.values():
+ self.disconnect_container(cont['Name'])
+
+ def disconnect_container(self, container_name):
+ if not self.check_mode:
+ self.client.disconnect_container_from_network(container_name, self.parameters.name)
+ self.results['actions'].append("Disconnected container %s" % (container_name,))
+ self.results['changed'] = True
+ self.diff_tracker.add('connected.{0}'.format(container_name),
+ parameter=False,
+ active=True)
+
+ def present(self):
+ different = False
+ differences = DifferenceTracker()
+ if self.existing_network:
+ different, differences = self.has_different_config(self.existing_network)
+
+ self.diff_tracker.add('exists', parameter=True, active=self.existing_network is not None)
+ if self.parameters.force or different:
+ self.remove_network()
+ self.existing_network = None
+
+ self.create_network()
+ self.connect_containers()
+ if not self.parameters.appends:
+ self.disconnect_missing()
+
+ if self.diff or self.check_mode or self.parameters.debug:
+ self.diff_result['differences'] = differences.get_legacy_docker_diffs()
+ self.diff_tracker.merge(differences)
+
+ if not self.check_mode and not self.parameters.debug:
+ self.results.pop('actions')
+
+ network_facts = self.get_existing_network()
+ self.results['ansible_facts'] = {u'docker_network': network_facts}
+ self.results['network'] = network_facts
+
+ def absent(self):
+ self.diff_tracker.add('exists', parameter=False, active=self.existing_network is not None)
+ self.remove_network()
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True, aliases=['network_name']),
+ connected=dict(type='list', default=[], elements='str', aliases=['containers']),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ driver=dict(type='str', default='bridge'),
+ driver_options=dict(type='dict', default={}),
+ force=dict(type='bool', default=False),
+ appends=dict(type='bool', default=False, aliases=['incremental']),
+ ipam_driver=dict(type='str'),
+ ipam_driver_options=dict(type='dict'),
+ ipam_options=dict(type='dict', default={}, options=dict(
+ subnet=dict(type='str'),
+ iprange=dict(type='str'),
+ gateway=dict(type='str'),
+ aux_addresses=dict(type='dict'),
+ ), removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
+ ipam_config=dict(type='list', elements='dict', options=dict(
+ subnet=dict(type='str'),
+ iprange=dict(type='str'),
+ gateway=dict(type='str'),
+ aux_addresses=dict(type='dict'),
+ )),
+ enable_ipv6=dict(type='bool'),
+ internal=dict(type='bool'),
+ labels=dict(type='dict', default={}),
+ debug=dict(type='bool', default=False),
+ scope=dict(type='str', choices=['local', 'global', 'swarm']),
+ attachable=dict(type='bool'),
+ )
+
+ mutually_exclusive = [
+ ('ipam_config', 'ipam_options')
+ ]
+
+ option_minimal_versions = dict(
+ scope=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
+ attachable=dict(docker_py_version='2.0.0', docker_api_version='1.26'),
+ labels=dict(docker_api_version='1.23'),
+ ipam_driver_options=dict(docker_py_version='2.0.0'),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True,
+ min_docker_version='1.10.0',
+ min_docker_api_version='1.22',
+ # "The docker server >= 1.10.0"
+ option_minimal_versions=option_minimal_versions,
+ )
+
+ try:
+ cm = DockerNetworkManager(client)
+ client.module.exit_json(**cm.results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_network_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_network_info.py
new file mode 100644
index 00000000..feeff6a4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_network_info.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_network_info
+
+short_description: Retrieves facts about docker network
+
+description:
+ - Retrieves facts about a docker network.
+ - Essentially returns the output of C(docker network inspect <name>), similar to what M(community.general.docker_network)
+ returns for a non-absent network.
+
+
+options:
+ name:
+ description:
+ - The name of the network to inspect.
+ - When identifying an existing network name may be a name or a long or short network ID.
+ type: str
+ required: yes
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+author:
+ - "Dave Bendit (@DBendit)"
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "Docker API >= 1.21"
+'''
+
+EXAMPLES = '''
+- name: Get infos on network
+ community.general.docker_network_info:
+ name: mydata
+ register: result
+
+- name: Does network exist?
+ ansible.builtin.debug:
+ msg: "The network {{ 'exists' if result.exists else 'does not exist' }}"
+
+- name: Print information about network
+ ansible.builtin.debug:
+ var: result.network
+ when: result.exists
+'''
+
+RETURN = '''
+exists:
+ description:
+ - Returns whether the network exists.
+ type: bool
+ returned: always
+ sample: true
+network:
+ description:
+ - Facts representing the current state of the network. Matches the docker inspection output.
+ - Will be C(none) if network does not exist.
+ returned: always
+ type: dict
+ sample: '{
+ "Attachable": false,
+ "ConfigFrom": {
+ "Network": ""
+ },
+ "ConfigOnly": false,
+ "Containers": {},
+ "Created": "2018-12-07T01:47:51.250835114-06:00",
+ "Driver": "bridge",
+ "EnableIPv6": false,
+ "IPAM": {
+ "Config": [
+ {
+ "Gateway": "192.168.96.1",
+ "Subnet": "192.168.96.0/20"
+ }
+ ],
+ "Driver": "default",
+ "Options": null
+ },
+ "Id": "0856968545f22026c41c2c7c3d448319d3b4a6a03a40b148b3ac4031696d1c0a",
+ "Ingress": false,
+ "Internal": false,
+ "Labels": {},
+ "Name": "ansible-test-f2700bba",
+ "Options": {},
+ "Scope": "local"
+ }'
+'''
+
+import traceback
+
+try:
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ RequestException,
+)
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_api_version='1.21',
+ )
+
+ try:
+ network = client.get_network(client.module.params['name'])
+
+ client.module.exit_json(
+ changed=False,
+ exists=(True if network else False),
+ network=network,
+ )
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_node.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_node.py
new file mode 100644
index 00000000..12980e5f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_node.py
@@ -0,0 +1,294 @@
+#!/usr/bin/python
+#
+# (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: docker_node
+short_description: Manage Docker Swarm node
+description:
+ - Manages the Docker nodes via Swarm Manager.
+ - This module allows to change the node's role, its availability, and to modify, add or remove node labels.
+options:
+ hostname:
+ description:
+ - The hostname or ID of node as registered in Swarm.
+ - If more than one node is registered using the same hostname the ID must be used,
+ otherwise module will fail.
+ type: str
+ required: yes
+ labels:
+ description:
+ - User-defined key/value metadata that will be assigned as node attribute.
+ - Label operations in this module apply to the docker swarm node specified by I(hostname).
+ Use M(community.general.docker_swarm) module to add/modify/remove swarm cluster labels.
+ - The actual state of labels assigned to the node when module completes its work depends on
+ I(labels_state) and I(labels_to_remove) parameters values. See description below.
+ type: dict
+ labels_state:
+ description:
+ - It defines the operation on the labels assigned to node and labels specified in I(labels) option.
+ - Set to C(merge) to combine labels provided in I(labels) with those already assigned to the node.
+ If no labels are assigned then it will add listed labels. For labels that are already assigned
+ to the node, it will update their values. The labels not specified in I(labels) will remain unchanged.
+ If I(labels) is empty then no changes will be made.
+ - Set to C(replace) to replace all assigned labels with provided ones. If I(labels) is empty then
+ all labels assigned to the node will be removed.
+ type: str
+ default: 'merge'
+ choices:
+ - merge
+ - replace
+ labels_to_remove:
+ description:
+ - List of labels that will be removed from the node configuration. The list has to contain only label
+ names, not their values.
+ - If the label provided on the list is not assigned to the node, the entry is ignored.
+ - If the label is both on the I(labels_to_remove) and I(labels), then value provided in I(labels) remains
+ assigned to the node.
+ - If I(labels_state) is C(replace) and I(labels) is not provided or empty then all labels assigned to
+ node are removed and I(labels_to_remove) is ignored.
+ type: list
+ elements: str
+ availability:
+ description: Node availability to assign. If not provided then node availability remains unchanged.
+ choices:
+ - active
+ - pause
+ - drain
+ type: str
+ role:
+ description: Node role to assign. If not provided then node role remains unchanged.
+ choices:
+ - manager
+ - worker
+ type: str
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.4.0"
+ - Docker API >= 1.25
+author:
+ - Piotr Wojciechowski (@WojciechowskiPiotr)
+ - Thierry Bouvet (@tbouvet)
+
+'''
+
+EXAMPLES = '''
+- name: Set node role
+ community.general.docker_node:
+ hostname: mynode
+ role: manager
+
+- name: Set node availability
+ community.general.docker_node:
+ hostname: mynode
+ availability: drain
+
+- name: Replace node labels with new labels
+ community.general.docker_node:
+ hostname: mynode
+ labels:
+ key: value
+ labels_state: replace
+
+- name: Merge node labels and new labels
+ community.general.docker_node:
+ hostname: mynode
+ labels:
+ key: value
+
+- name: Remove all labels assigned to node
+ community.general.docker_node:
+ hostname: mynode
+ labels_state: replace
+
+- name: Remove selected labels from the node
+ community.general.docker_node:
+ hostname: mynode
+ labels_to_remove:
+ - key1
+ - key2
+'''
+
+RETURN = '''
+node:
+ description: Information about node after 'update' operation
+ returned: success
+ type: dict
+
+'''
+
+import traceback
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ DockerBaseClass,
+ RequestException,
+)
+
+from ansible.module_utils._text import to_native
+
+from ansible_collections.community.general.plugins.module_utils.docker.swarm import AnsibleDockerSwarmClient
+
+
+class TaskParameters(DockerBaseClass):
+ def __init__(self, client):
+ super(TaskParameters, self).__init__()
+
+ # Spec
+ self.name = None
+ self.labels = None
+ self.labels_state = None
+ self.labels_to_remove = None
+
+ # Node
+ self.availability = None
+ self.role = None
+
+ for key, value in client.module.params.items():
+ setattr(self, key, value)
+
+
+class SwarmNodeManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(SwarmNodeManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.check_mode = self.client.check_mode
+
+ self.client.fail_task_if_not_swarm_manager()
+
+ self.parameters = TaskParameters(client)
+
+ self.node_update()
+
+ def node_update(self):
+ if not (self.client.check_if_swarm_node(node_id=self.parameters.hostname)):
+ self.client.fail("This node is not part of a swarm.")
+ return
+
+ if self.client.check_if_swarm_node_is_down():
+ self.client.fail("Can not update the node. The node is down.")
+
+ try:
+ node_info = self.client.inspect_node(node_id=self.parameters.hostname)
+ except APIError as exc:
+ self.client.fail("Failed to get node information for %s" % to_native(exc))
+
+ changed = False
+ node_spec = dict(
+ Availability=self.parameters.availability,
+ Role=self.parameters.role,
+ Labels=self.parameters.labels,
+ )
+
+ if self.parameters.role is None:
+ node_spec['Role'] = node_info['Spec']['Role']
+ else:
+ if not node_info['Spec']['Role'] == self.parameters.role:
+ node_spec['Role'] = self.parameters.role
+ changed = True
+
+ if self.parameters.availability is None:
+ node_spec['Availability'] = node_info['Spec']['Availability']
+ else:
+ if not node_info['Spec']['Availability'] == self.parameters.availability:
+ node_info['Spec']['Availability'] = self.parameters.availability
+ changed = True
+
+ if self.parameters.labels_state == 'replace':
+ if self.parameters.labels is None:
+ node_spec['Labels'] = {}
+ if node_info['Spec']['Labels']:
+ changed = True
+ else:
+ if (node_info['Spec']['Labels'] or {}) != self.parameters.labels:
+ node_spec['Labels'] = self.parameters.labels
+ changed = True
+ elif self.parameters.labels_state == 'merge':
+ node_spec['Labels'] = dict(node_info['Spec']['Labels'] or {})
+ if self.parameters.labels is not None:
+ for key, value in self.parameters.labels.items():
+ if node_spec['Labels'].get(key) != value:
+ node_spec['Labels'][key] = value
+ changed = True
+
+ if self.parameters.labels_to_remove is not None:
+ for key in self.parameters.labels_to_remove:
+ if self.parameters.labels is not None:
+ if not self.parameters.labels.get(key):
+ if node_spec['Labels'].get(key):
+ node_spec['Labels'].pop(key)
+ changed = True
+ else:
+ self.client.module.warn(
+ "Label '%s' listed both in 'labels' and 'labels_to_remove'. "
+ "Keeping the assigned label value."
+ % to_native(key))
+ else:
+ if node_spec['Labels'].get(key):
+ node_spec['Labels'].pop(key)
+ changed = True
+
+ if changed is True:
+ if not self.check_mode:
+ try:
+ self.client.update_node(node_id=node_info['ID'], version=node_info['Version']['Index'],
+ node_spec=node_spec)
+ except APIError as exc:
+ self.client.fail("Failed to update node : %s" % to_native(exc))
+ self.results['node'] = self.client.get_node_inspect(node_id=node_info['ID'])
+ self.results['changed'] = changed
+ else:
+ self.results['node'] = node_info
+ self.results['changed'] = changed
+
+
+def main():
+ argument_spec = dict(
+ hostname=dict(type='str', required=True),
+ labels=dict(type='dict'),
+ labels_state=dict(type='str', default='merge', choices=['merge', 'replace']),
+ labels_to_remove=dict(type='list', elements='str'),
+ availability=dict(type='str', choices=['active', 'pause', 'drain']),
+ role=dict(type='str', choices=['worker', 'manager']),
+ )
+
+ client = AnsibleDockerSwarmClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_version='2.4.0',
+ min_docker_api_version='1.25',
+ )
+
+ try:
+ results = dict(
+ changed=False,
+ )
+
+ SwarmNodeManager(client, results)
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_node_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_node_info.py
new file mode 100644
index 00000000..c01edadc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_node_info.py
@@ -0,0 +1,156 @@
+#!/usr/bin/python
+#
+# (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_node_info
+
+short_description: Retrieves facts about docker swarm node from Swarm Manager
+
+description:
+ - Retrieves facts about a docker node.
+ - Essentially returns the output of C(docker node inspect <name>).
+ - Must be executed on a host running as Swarm Manager, otherwise the module will fail.
+
+
+options:
+ name:
+ description:
+ - The name of the node to inspect.
+ - The list of nodes names to inspect.
+ - If empty then return information of all nodes in Swarm cluster.
+ - When identifying the node use either the hostname of the node (as registered in Swarm) or node ID.
+ - If I(self) is C(true) then this parameter is ignored.
+ type: list
+ elements: str
+ self:
+ description:
+ - If C(true), queries the node (i.e. the docker daemon) the module communicates with.
+ - If C(true) then I(name) is ignored.
+ - If C(false) then query depends on I(name) presence and value.
+ type: bool
+ default: no
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+author:
+ - Piotr Wojciechowski (@WojciechowskiPiotr)
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.4.0"
+ - "Docker API >= 1.24"
+'''
+
+EXAMPLES = '''
+- name: Get info on all nodes
+ community.general.docker_node_info:
+ register: result
+
+- name: Get info on node
+ community.general.docker_node_info:
+ name: mynode
+ register: result
+
+- name: Get info on list of nodes
+ community.general.docker_node_info:
+ name:
+ - mynode1
+ - mynode2
+ register: result
+
+- name: Get info on host if it is Swarm Manager
+ community.general.docker_node_info:
+ self: true
+ register: result
+'''
+
+RETURN = '''
+nodes:
+ description:
+ - Facts representing the current state of the nodes. Matches the C(docker node inspect) output.
+ - Can contain multiple entries if more than one node provided in I(name), or I(name) is not provided.
+ - If I(name) contains a list of nodes, the output will provide information on all nodes registered
+ at the swarm, including nodes that left the swarm but haven't been removed from the cluster on swarm
+ managers and nodes that are unreachable.
+ returned: always
+ type: list
+ elements: dict
+'''
+
+import traceback
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ RequestException,
+)
+from ansible_collections.community.general.plugins.module_utils.docker.swarm import AnsibleDockerSwarmClient
+
+try:
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+
+def get_node_facts(client):
+
+ results = []
+
+ if client.module.params['self'] is True:
+ self_node_id = client.get_swarm_node_id()
+ node_info = client.get_node_inspect(node_id=self_node_id)
+ results.append(node_info)
+ return results
+
+ if client.module.params['name'] is None:
+ node_info = client.get_all_nodes_inspect()
+ return node_info
+
+ nodes = client.module.params['name']
+ if not isinstance(nodes, list):
+ nodes = [nodes]
+
+ for next_node_name in nodes:
+ next_node_info = client.get_node_inspect(node_id=next_node_name, skip_missing=True)
+ if next_node_info:
+ results.append(next_node_info)
+ return results
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='list', elements='str'),
+ self=dict(type='bool', default=False),
+ )
+
+ client = AnsibleDockerSwarmClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_version='2.4.0',
+ min_docker_api_version='1.24',
+ )
+
+ client.fail_task_if_not_swarm_manager()
+
+ try:
+ nodes = get_node_facts(client)
+
+ client.module.exit_json(
+ changed=False,
+ nodes=nodes,
+ )
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_prune.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_prune.py
new file mode 100644
index 00000000..025c6130
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_prune.py
@@ -0,0 +1,265 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_prune
+
+short_description: Allows to prune various docker objects
+
+description:
+ - Allows to run C(docker container prune), C(docker image prune), C(docker network prune)
+ and C(docker volume prune) via the Docker API.
+
+
+options:
+ containers:
+ description:
+ - Whether to prune containers.
+ type: bool
+ default: no
+ containers_filters:
+ description:
+ - A dictionary of filter values used for selecting containers to delete.
+ - "For example, C(until: 24h)."
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/container_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ images:
+ description:
+ - Whether to prune images.
+ type: bool
+ default: no
+ images_filters:
+ description:
+ - A dictionary of filter values used for selecting images to delete.
+ - "For example, C(dangling: true)."
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/image_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ networks:
+ description:
+ - Whether to prune networks.
+ type: bool
+ default: no
+ networks_filters:
+ description:
+ - A dictionary of filter values used for selecting networks to delete.
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/network_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ volumes:
+ description:
+ - Whether to prune volumes.
+ type: bool
+ default: no
+ volumes_filters:
+ description:
+ - A dictionary of filter values used for selecting volumes to delete.
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/volume_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ builder_cache:
+ description:
+ - Whether to prune the builder cache.
+ - Requires version 3.3.0 of the Docker SDK for Python or newer.
+ type: bool
+ default: no
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_2_documentation
+
+
+author:
+ - "Felix Fontein (@felixfontein)"
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.1.0"
+ - "Docker API >= 1.25"
+'''
+
+EXAMPLES = '''
+- name: Prune containers older than 24h
+ community.general.docker_prune:
+ containers: yes
+ containers_filters:
+ # only consider containers created more than 24 hours ago
+ until: 24h
+
+- name: Prune everything
+ community.general.docker_prune:
+ containers: yes
+ images: yes
+ networks: yes
+ volumes: yes
+ builder_cache: yes
+
+- name: Prune everything (including non-dangling images)
+ community.general.docker_prune:
+ containers: yes
+ images: yes
+ images_filters:
+ dangling: false
+ networks: yes
+ volumes: yes
+ builder_cache: yes
+'''
+
+RETURN = '''
+# containers
+containers:
+ description:
+ - List of IDs of deleted containers.
+ returned: I(containers) is C(true)
+ type: list
+ elements: str
+ sample: '[]'
+containers_space_reclaimed:
+ description:
+ - Amount of reclaimed disk space from container pruning in bytes.
+ returned: I(containers) is C(true)
+ type: int
+ sample: '0'
+
+# images
+images:
+ description:
+ - List of IDs of deleted images.
+ returned: I(images) is C(true)
+ type: list
+ elements: str
+ sample: '[]'
+images_space_reclaimed:
+ description:
+ - Amount of reclaimed disk space from image pruning in bytes.
+ returned: I(images) is C(true)
+ type: int
+ sample: '0'
+
+# networks
+networks:
+ description:
+ - List of IDs of deleted networks.
+ returned: I(networks) is C(true)
+ type: list
+ elements: str
+ sample: '[]'
+
+# volumes
+volumes:
+ description:
+ - List of IDs of deleted volumes.
+ returned: I(volumes) is C(true)
+ type: list
+ elements: str
+ sample: '[]'
+volumes_space_reclaimed:
+ description:
+ - Amount of reclaimed disk space from volumes pruning in bytes.
+ returned: I(volumes) is C(true)
+ type: int
+ sample: '0'
+
+# builder_cache
+builder_cache_space_reclaimed:
+ description:
+ - Amount of reclaimed disk space from builder cache pruning in bytes.
+ returned: I(builder_cache) is C(true)
+ type: int
+ sample: '0'
+'''
+
+import traceback
+
+try:
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from distutils.version import LooseVersion
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ RequestException,
+)
+
+try:
+ from ansible_collections.community.general.plugins.module_utils.docker.common import docker_version, clean_dict_booleans_for_docker_api
+except Exception as dummy:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+
+def main():
+ argument_spec = dict(
+ containers=dict(type='bool', default=False),
+ containers_filters=dict(type='dict'),
+ images=dict(type='bool', default=False),
+ images_filters=dict(type='dict'),
+ networks=dict(type='bool', default=False),
+ networks_filters=dict(type='dict'),
+ volumes=dict(type='bool', default=False),
+ volumes_filters=dict(type='dict'),
+ builder_cache=dict(type='bool', default=False),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ # supports_check_mode=True,
+ min_docker_api_version='1.25',
+ min_docker_version='2.1.0',
+ )
+
+ # Version checks
+ cache_min_version = '3.3.0'
+ if client.module.params['builder_cache'] and client.docker_py_version < LooseVersion(cache_min_version):
+ msg = "Error: Docker SDK for Python's version is %s. Minimum version required for builds option is %s. Use `pip install --upgrade docker` to upgrade."
+ client.fail(msg % (docker_version, cache_min_version))
+
+ try:
+ result = dict()
+
+ if client.module.params['containers']:
+ filters = clean_dict_booleans_for_docker_api(client.module.params.get('containers_filters'))
+ res = client.prune_containers(filters=filters)
+ result['containers'] = res.get('ContainersDeleted') or []
+ result['containers_space_reclaimed'] = res['SpaceReclaimed']
+
+ if client.module.params['images']:
+ filters = clean_dict_booleans_for_docker_api(client.module.params.get('images_filters'))
+ res = client.prune_images(filters=filters)
+ result['images'] = res.get('ImagesDeleted') or []
+ result['images_space_reclaimed'] = res['SpaceReclaimed']
+
+ if client.module.params['networks']:
+ filters = clean_dict_booleans_for_docker_api(client.module.params.get('networks_filters'))
+ res = client.prune_networks(filters=filters)
+ result['networks'] = res.get('NetworksDeleted') or []
+
+ if client.module.params['volumes']:
+ filters = clean_dict_booleans_for_docker_api(client.module.params.get('volumes_filters'))
+ res = client.prune_volumes(filters=filters)
+ result['volumes'] = res.get('VolumesDeleted') or []
+ result['volumes_space_reclaimed'] = res['SpaceReclaimed']
+
+ if client.module.params['builder_cache']:
+ res = client.prune_builds()
+ result['builder_cache_space_reclaimed'] = res['SpaceReclaimed']
+
+ client.module.exit_json(**result)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_secret.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_secret.py
new file mode 100644
index 00000000..b6ce7f28
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_secret.py
@@ -0,0 +1,302 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_secret
+
+short_description: Manage docker secrets.
+
+
+description:
+ - Create and remove Docker secrets in a Swarm environment. Similar to C(docker secret create) and C(docker secret rm).
+ - Adds to the metadata of new secrets 'ansible_key', an encrypted hash representation of the data, which is then used
+ in future runs to test if a secret has changed. If 'ansible_key is not present, then a secret will not be updated
+ unless the I(force) option is set.
+ - Updates to secrets are performed by removing the secret and creating it again.
+options:
+ data:
+ description:
+ - The value of the secret. Required when state is C(present).
+ type: str
+ data_is_b64:
+ description:
+ - If set to C(true), the data is assumed to be Base64 encoded and will be
+ decoded before being used.
+ - To use binary I(data), it is better to keep it Base64 encoded and let it
+ be decoded by this option.
+ type: bool
+ default: no
+ labels:
+ description:
+ - "A map of key:value meta data, where both key and value are expected to be strings."
+ - If new meta data is provided, or existing meta data is modified, the secret will be updated by removing it and creating it again.
+ type: dict
+ force:
+ description:
+ - Use with state C(present) to always remove and recreate an existing secret.
+ - If C(true), an existing secret will be replaced, even if it has not changed.
+ type: bool
+ default: no
+ name:
+ description:
+ - The name of the secret.
+ type: str
+ required: yes
+ state:
+ description:
+ - Set to C(present), if the secret should exist, and C(absent), if it should not.
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_2_documentation
+
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.1.0"
+ - "Docker API >= 1.25"
+
+author:
+ - Chris Houseknecht (@chouseknecht)
+'''
+
+EXAMPLES = '''
+
+- name: Create secret foo (from a file on the control machine)
+ community.general.docker_secret:
+ name: foo
+ # If the file is JSON or binary, Ansible might modify it (because
+ # it is first decoded and later re-encoded). Base64-encoding the
+ # file directly after reading it prevents this to happen.
+ data: "{{ lookup('file', '/path/to/secret/file') | b64encode }}"
+ data_is_b64: true
+ state: present
+
+- name: Change the secret data
+ community.general.docker_secret:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: baz
+ one: '1'
+ state: present
+
+- name: Add a new label
+ community.general.docker_secret:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: baz
+ one: '1'
+ # Adding a new label will cause a remove/create of the secret
+ two: '2'
+ state: present
+
+- name: No change
+ community.general.docker_secret:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: baz
+ one: '1'
+ # Even though 'two' is missing, there is no change to the existing secret
+ state: present
+
+- name: Update an existing label
+ community.general.docker_secret:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: monkey # Changing a label will cause a remove/create of the secret
+ one: '1'
+ state: present
+
+- name: Force the removal/creation of the secret
+ community.general.docker_secret:
+ name: foo
+ data: Goodnight everyone!
+ force: yes
+ state: present
+
+- name: Remove secret foo
+ community.general.docker_secret:
+ name: foo
+ state: absent
+'''
+
+RETURN = '''
+secret_id:
+ description:
+ - The ID assigned by Docker to the secret object.
+ returned: success and I(state) is C(present)
+ type: str
+ sample: 'hzehrmyjigmcp2gb6nlhmjqcv'
+'''
+
+import base64
+import hashlib
+import traceback
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ DockerBaseClass,
+ compare_generic,
+ RequestException,
+)
+from ansible.module_utils._text import to_native, to_bytes
+
+
+class SecretManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(SecretManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.check_mode = self.client.check_mode
+
+ parameters = self.client.module.params
+ self.name = parameters.get('name')
+ self.state = parameters.get('state')
+ self.data = parameters.get('data')
+ if self.data is not None:
+ if parameters.get('data_is_b64'):
+ self.data = base64.b64decode(self.data)
+ else:
+ self.data = to_bytes(self.data)
+ self.labels = parameters.get('labels')
+ self.force = parameters.get('force')
+ self.data_key = None
+
+ def __call__(self):
+ if self.state == 'present':
+ self.data_key = hashlib.sha224(self.data).hexdigest()
+ self.present()
+ elif self.state == 'absent':
+ self.absent()
+
+ def get_secret(self):
+ ''' Find an existing secret. '''
+ try:
+ secrets = self.client.secrets(filters={'name': self.name})
+ except APIError as exc:
+ self.client.fail("Error accessing secret %s: %s" % (self.name, to_native(exc)))
+
+ for secret in secrets:
+ if secret['Spec']['Name'] == self.name:
+ return secret
+ return None
+
+ def create_secret(self):
+ ''' Create a new secret '''
+ secret_id = None
+ # We can't see the data after creation, so adding a label we can use for idempotency check
+ labels = {
+ 'ansible_key': self.data_key
+ }
+ if self.labels:
+ labels.update(self.labels)
+
+ try:
+ if not self.check_mode:
+ secret_id = self.client.create_secret(self.name, self.data, labels=labels)
+ except APIError as exc:
+ self.client.fail("Error creating secret: %s" % to_native(exc))
+
+ if isinstance(secret_id, dict):
+ secret_id = secret_id['ID']
+
+ return secret_id
+
+ def present(self):
+ ''' Handles state == 'present', creating or updating the secret '''
+ secret = self.get_secret()
+ if secret:
+ self.results['secret_id'] = secret['ID']
+ data_changed = False
+ attrs = secret.get('Spec', {})
+ if attrs.get('Labels', {}).get('ansible_key'):
+ if attrs['Labels']['ansible_key'] != self.data_key:
+ data_changed = True
+ else:
+ if not self.force:
+ self.client.module.warn("'ansible_key' label not found. Secret will not be changed unless the force parameter is set to 'yes'")
+ labels_changed = not compare_generic(self.labels, attrs.get('Labels'), 'allow_more_present', 'dict')
+ if data_changed or labels_changed or self.force:
+ # if something changed or force, delete and re-create the secret
+ self.absent()
+ secret_id = self.create_secret()
+ self.results['changed'] = True
+ self.results['secret_id'] = secret_id
+ else:
+ self.results['changed'] = True
+ self.results['secret_id'] = self.create_secret()
+
+ def absent(self):
+ ''' Handles state == 'absent', removing the secret '''
+ secret = self.get_secret()
+ if secret:
+ try:
+ if not self.check_mode:
+ self.client.remove_secret(secret['ID'])
+ except APIError as exc:
+ self.client.fail("Error removing secret %s: %s" % (self.name, to_native(exc)))
+ self.results['changed'] = True
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ data=dict(type='str', no_log=True),
+ data_is_b64=dict(type='bool', default=False),
+ labels=dict(type='dict'),
+ force=dict(type='bool', default=False)
+ )
+
+ required_if = [
+ ('state', 'present', ['data'])
+ ]
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=required_if,
+ min_docker_version='2.1.0',
+ min_docker_api_version='1.25',
+ )
+
+ try:
+ results = dict(
+ changed=False,
+ secret_id=''
+ )
+
+ SecretManager(client, results)()
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_service.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_service.py
new file mode 100644
index 00000000..96f89e6c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_service.py
@@ -0,0 +1,1155 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: docker_compose
+
+short_description: Manage multi-container Docker applications with Docker Compose.
+
+
+author: "Chris Houseknecht (@chouseknecht)"
+
+description:
+ - Uses Docker Compose to start, shutdown and scale services.
+ - Works with compose versions 1 and 2.
+ - Configuration can be read from a C(docker-compose.yml) or C(docker-compose.yaml) file or inline using the I(definition) option.
+ - See the examples for more details.
+ - Supports check mode.
+ - This module was called C(docker_service) before Ansible 2.8. The usage did not change.
+
+options:
+ project_src:
+ description:
+ - Path to a directory containing a C(docker-compose.yml) or C(docker-compose.yaml) file.
+ - Mutually exclusive with I(definition).
+ - Required when no I(definition) is provided.
+ type: path
+ project_name:
+ description:
+ - Provide a project name. If not provided, the project name is taken from the basename of I(project_src).
+ - Required when I(definition) is provided.
+ type: str
+ files:
+ description:
+ - List of Compose file names relative to I(project_src). Overrides C(docker-compose.yml) or C(docker-compose.yaml).
+ - Files are loaded and merged in the order given.
+ type: list
+ elements: path
+ state:
+ description:
+ - Desired state of the project.
+ - Specifying C(present) is the same as running C(docker-compose up) resp. C(docker-compose stop) (with I(stopped)) resp. C(docker-compose restart)
+ (with I(restarted)).
+ - Specifying C(absent) is the same as running C(docker-compose down).
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+ services:
+ description:
+ - When I(state) is C(present) run C(docker-compose up) resp. C(docker-compose stop) (with I(stopped)) resp. C(docker-compose restart) (with I(restarted))
+ on a subset of services.
+ - If empty, which is the default, the operation will be performed on all services defined in the Compose file (or inline I(definition)).
+ type: list
+ elements: str
+ scale:
+ description:
+ - When I(state) is C(present) scale services. Provide a dictionary of key/value pairs where the key
+ is the name of the service and the value is an integer count for the number of containers.
+ type: dict
+ dependencies:
+ description:
+ - When I(state) is C(present) specify whether or not to include linked services.
+ type: bool
+ default: yes
+ definition:
+ description:
+ - Compose file describing one or more services, networks and volumes.
+ - Mutually exclusive with I(project_src) and I(files).
+ type: dict
+ hostname_check:
+ description:
+ - Whether or not to check the Docker daemon's hostname against the name provided in the client certificate.
+ type: bool
+ default: no
+ recreate:
+ description:
+ - By default containers will be recreated when their configuration differs from the service definition.
+ - Setting to C(never) ignores configuration differences and leaves existing containers unchanged.
+ - Setting to C(always) forces recreation of all existing containers.
+ type: str
+ default: smart
+ choices:
+ - always
+ - never
+ - smart
+ build:
+ description:
+ - Use with I(state) C(present) to always build images prior to starting the application.
+ - Same as running C(docker-compose build) with the pull option.
+ - Images will only be rebuilt if Docker detects a change in the Dockerfile or build directory contents.
+ - Use the I(nocache) option to ignore the image cache when performing the build.
+ - If an existing image is replaced, services using the image will be recreated unless I(recreate) is C(never).
+ type: bool
+ default: no
+ pull:
+ description:
+ - Use with I(state) C(present) to always pull images prior to starting the application.
+ - Same as running C(docker-compose pull).
+ - When a new image is pulled, services using the image will be recreated unless I(recreate) is C(never).
+ type: bool
+ default: no
+ nocache:
+ description:
+ - Use with the I(build) option to ignore the cache during the image build process.
+ type: bool
+ default: no
+ remove_images:
+ description:
+ - Use with I(state) C(absent) to remove all images or only local images.
+ type: str
+ choices:
+ - 'all'
+ - 'local'
+ remove_volumes:
+ description:
+ - Use with I(state) C(absent) to remove data volumes.
+ type: bool
+ default: no
+ stopped:
+ description:
+ - Use with I(state) C(present) to stop all containers defined in the Compose file.
+ - If I(services) is defined, only the containers listed there will be stopped.
+ type: bool
+ default: no
+ restarted:
+ description:
+ - Use with I(state) C(present) to restart all containers defined in the Compose file.
+ - If I(services) is defined, only the containers listed there will be restarted.
+ type: bool
+ default: no
+ remove_orphans:
+ description:
+ - Remove containers for services not defined in the Compose file.
+ type: bool
+ default: no
+ timeout:
+ description:
+ - timeout in seconds for container shutdown when attached or when containers are already running.
+ type: int
+ default: 10
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "docker-compose >= 1.7.0"
+ - "Docker API >= 1.20"
+ - "PyYAML >= 3.11"
+'''
+
+EXAMPLES = '''
+# Examples use the django example at https://docs.docker.com/compose/django. Follow it to create the
+# flask directory
+
+- name: Run using a project directory
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: Tear down existing services
+ community.general.docker_compose:
+ project_src: flask
+ state: absent
+
+ - name: Create and start services
+ community.general.docker_compose:
+ project_src: flask
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - name: Run `docker-compose up` again
+ community.general.docker_compose:
+ project_src: flask
+ build: no
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that: "not output.changed "
+
+ - name: Stop all services
+ community.general.docker_compose:
+ project_src: flask
+ build: no
+ stopped: yes
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that:
+ - "not web.flask_web_1.state.running"
+ - "not db.flask_db_1.state.running"
+
+ - name: Restart services
+ community.general.docker_compose:
+ project_src: flask
+ build: no
+ restarted: yes
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that:
+ - "web.flask_web_1.state.running"
+ - "db.flask_db_1.state.running"
+
+- name: Scale the web service to 2
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - community.general.docker_compose:
+ project_src: flask
+ scale:
+ web: 2
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+- name: Run with inline v2 compose
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - community.general.docker_compose:
+ project_src: flask
+ state: absent
+
+ - community.general.docker_compose:
+ project_name: flask
+ definition:
+ version: '2'
+ services:
+ db:
+ image: postgres
+ web:
+ build: "{{ playbook_dir }}/flask"
+ command: "python manage.py runserver 0.0.0.0:8000"
+ volumes:
+ - "{{ playbook_dir }}/flask:/code"
+ ports:
+ - "8000:8000"
+ depends_on:
+ - db
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that:
+ - "web.flask_web_1.state.running"
+ - "db.flask_db_1.state.running"
+
+- name: Run with inline v1 compose
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - community.general.docker_compose:
+ project_src: flask
+ state: absent
+
+ - community.general.docker_compose:
+ project_name: flask
+ definition:
+ db:
+ image: postgres
+ web:
+ build: "{{ playbook_dir }}/flask"
+ command: "python manage.py runserver 0.0.0.0:8000"
+ volumes:
+ - "{{ playbook_dir }}/flask:/code"
+ ports:
+ - "8000:8000"
+ links:
+ - db
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that:
+ - "web.flask_web_1.state.running"
+ - "db.flask_db_1.state.running"
+'''
+
+RETURN = '''
+services:
+ description:
+ - A dictionary mapping the service's name to a dictionary of containers.
+ - Note that facts are part of the registered vars since Ansible 2.8. For compatibility reasons, the facts
+ are also accessible directly. The service's name is the variable with which the container dictionary
+ can be accessed. Note that the returned facts will be removed in community.general 2.0.0.
+ returned: success
+ type: complex
+ contains:
+ container_name:
+ description: Name of the container. Format is C(project_service_#).
+ returned: success
+ type: complex
+ contains:
+ cmd:
+ description: One or more commands to be executed in the container.
+ returned: success
+ type: list
+ elements: str
+ example: ["postgres"]
+ image:
+ description: Name of the image from which the container was built.
+ returned: success
+ type: str
+ example: postgres
+ labels:
+ description: Meta data assigned to the container.
+ returned: success
+ type: dict
+ example: {...}
+ networks:
+ description: Contains a dictionary for each network to which the container is a member.
+ returned: success
+ type: list
+ elements: dict
+ contains:
+ IPAddress:
+ description: The IP address assigned to the container.
+ returned: success
+ type: str
+ example: 172.17.0.2
+ IPPrefixLen:
+ description: Number of bits used by the subnet.
+ returned: success
+ type: int
+ example: 16
+ aliases:
+ description: Aliases assigned to the container by the network.
+ returned: success
+ type: list
+ elements: str
+ example: ['db']
+ globalIPv6:
+ description: IPv6 address assigned to the container.
+ returned: success
+ type: str
+ example: ''
+ globalIPv6PrefixLen:
+ description: IPv6 subnet length.
+ returned: success
+ type: int
+ example: 0
+ links:
+ description: List of container names to which this container is linked.
+ returned: success
+ type: list
+ elements: str
+ example: null
+ macAddress:
+ description: Mac Address assigned to the virtual NIC.
+ returned: success
+ type: str
+ example: "02:42:ac:11:00:02"
+ state:
+ description: Information regarding the current disposition of the container.
+ returned: success
+ type: dict
+ contains:
+ running:
+ description: Whether or not the container is up with a running process.
+ returned: success
+ type: bool
+ example: true
+ status:
+ description: Description of the running state.
+ returned: success
+ type: str
+ example: running
+
+actions:
+ description: Provides the actions to be taken on each service as determined by compose.
+ returned: when in check mode or I(debug) is C(yes)
+ type: complex
+ contains:
+ service_name:
+ description: Name of the service.
+ returned: always
+ type: complex
+ contains:
+ pulled_image:
+ description: Provides image details when a new image is pulled for the service.
+ returned: on image pull
+ type: complex
+ contains:
+ name:
+ description: name of the image
+ returned: always
+ type: str
+ id:
+ description: image hash
+ returned: always
+ type: str
+ built_image:
+ description: Provides image details when a new image is built for the service.
+ returned: on image build
+ type: complex
+ contains:
+ name:
+ description: name of the image
+ returned: always
+ type: str
+ id:
+ description: image hash
+ returned: always
+ type: str
+
+ action:
+ description: A descriptive name of the action to be performed on the service's containers.
+ returned: always
+ type: list
+ elements: str
+ contains:
+ id:
+ description: the container's long ID
+ returned: always
+ type: str
+ name:
+ description: the container's name
+ returned: always
+ type: str
+ short_id:
+ description: the container's short ID
+ returned: always
+ type: str
+'''
+
+import os
+import re
+import sys
+import tempfile
+import traceback
+from contextlib import contextmanager
+from distutils.version import LooseVersion
+
+try:
+ import yaml
+ HAS_YAML = True
+ HAS_YAML_EXC = None
+except ImportError as dummy:
+ HAS_YAML = False
+ HAS_YAML_EXC = traceback.format_exc()
+
+try:
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+try:
+ from compose import __version__ as compose_version
+ from compose.cli.command import project_from_options
+ from compose.service import NoSuchImageError
+ from compose.cli.main import convergence_strategy_from_opts, build_action_from_opts, image_type_from_opt
+ from compose.const import DEFAULT_TIMEOUT, LABEL_SERVICE, LABEL_PROJECT, LABEL_ONE_OFF
+ HAS_COMPOSE = True
+ HAS_COMPOSE_EXC = None
+ MINIMUM_COMPOSE_VERSION = '1.7.0'
+except ImportError as dummy:
+ HAS_COMPOSE = False
+ HAS_COMPOSE_EXC = traceback.format_exc()
+ DEFAULT_TIMEOUT = 10
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ DockerBaseClass,
+ RequestException,
+)
+
+
+AUTH_PARAM_MAPPING = {
+ u'docker_host': u'--host',
+ u'tls': u'--tls',
+ u'cacert_path': u'--tlscacert',
+ u'cert_path': u'--tlscert',
+ u'key_path': u'--tlskey',
+ u'tls_verify': u'--tlsverify'
+}
+
+
+@contextmanager
+def stdout_redirector(path_name):
+ old_stdout = sys.stdout
+ fd = open(path_name, 'w')
+ sys.stdout = fd
+ try:
+ yield
+ finally:
+ sys.stdout = old_stdout
+
+
+@contextmanager
+def stderr_redirector(path_name):
+ old_fh = sys.stderr
+ fd = open(path_name, 'w')
+ sys.stderr = fd
+ try:
+ yield
+ finally:
+ sys.stderr = old_fh
+
+
+def make_redirection_tempfiles():
+ dummy, out_redir_name = tempfile.mkstemp(prefix="ansible")
+ dummy, err_redir_name = tempfile.mkstemp(prefix="ansible")
+ return (out_redir_name, err_redir_name)
+
+
+def cleanup_redirection_tempfiles(out_name, err_name):
+ for i in [out_name, err_name]:
+ os.remove(i)
+
+
+def get_redirected_output(path_name):
+ output = []
+ with open(path_name, 'r') as fd:
+ for line in fd:
+ # strip terminal format/color chars
+ new_line = re.sub(r'\x1b\[.+m', '', line)
+ output.append(new_line)
+ os.remove(path_name)
+ return output
+
+
+def attempt_extract_errors(exc_str, stdout, stderr):
+ errors = [l.strip() for l in stderr if l.strip().startswith('ERROR:')]
+ errors.extend([l.strip() for l in stdout if l.strip().startswith('ERROR:')])
+
+ warnings = [l.strip() for l in stderr if l.strip().startswith('WARNING:')]
+ warnings.extend([l.strip() for l in stdout if l.strip().startswith('WARNING:')])
+
+ # assume either the exception body (if present) or the last warning was the 'most'
+ # fatal.
+
+ if exc_str.strip():
+ msg = exc_str.strip()
+ elif errors:
+ msg = errors[-1].encode('utf-8')
+ else:
+ msg = 'unknown cause'
+
+ return {
+ 'warnings': [w.encode('utf-8') for w in warnings],
+ 'errors': [e.encode('utf-8') for e in errors],
+ 'msg': msg,
+ 'module_stderr': ''.join(stderr),
+ 'module_stdout': ''.join(stdout)
+ }
+
+
+def get_failure_info(exc, out_name, err_name=None, msg_format='%s'):
+ if err_name is None:
+ stderr = []
+ else:
+ stderr = get_redirected_output(err_name)
+ stdout = get_redirected_output(out_name)
+
+ reason = attempt_extract_errors(str(exc), stdout, stderr)
+ reason['msg'] = msg_format % reason['msg']
+ return reason
+
+
+class ContainerManager(DockerBaseClass):
+
+ def __init__(self, client):
+
+ super(ContainerManager, self).__init__()
+
+ self.client = client
+ self.project_src = None
+ self.files = None
+ self.project_name = None
+ self.state = None
+ self.definition = None
+ self.hostname_check = None
+ self.timeout = None
+ self.remove_images = None
+ self.remove_orphans = None
+ self.remove_volumes = None
+ self.stopped = None
+ self.restarted = None
+ self.recreate = None
+ self.build = None
+ self.dependencies = None
+ self.services = None
+ self.scale = None
+ self.debug = None
+ self.pull = None
+ self.nocache = None
+
+ for key, value in client.module.params.items():
+ setattr(self, key, value)
+
+ self.check_mode = client.check_mode
+
+ if not self.debug:
+ self.debug = client.module._debug
+
+ self.options = dict()
+ self.options.update(self._get_auth_options())
+ self.options[u'--skip-hostname-check'] = (not self.hostname_check)
+
+ if self.project_name:
+ self.options[u'--project-name'] = self.project_name
+
+ if self.files:
+ self.options[u'--file'] = self.files
+
+ if not HAS_COMPOSE:
+ self.client.fail("Unable to load docker-compose. Try `pip install docker-compose`. Error: %s" %
+ HAS_COMPOSE_EXC)
+
+ if LooseVersion(compose_version) < LooseVersion(MINIMUM_COMPOSE_VERSION):
+ self.client.fail("Found docker-compose version %s. Minimum required version is %s. "
+ "Upgrade docker-compose to a min version of %s." %
+ (compose_version, MINIMUM_COMPOSE_VERSION, MINIMUM_COMPOSE_VERSION))
+
+ if self.restarted and self.stopped:
+ self.client.fail("Cannot use restarted and stopped at the same time.")
+
+ self.log("options: ")
+ self.log(self.options, pretty_print=True)
+
+ if self.definition:
+ if not HAS_YAML:
+ self.client.fail("Unable to load yaml. Try `pip install PyYAML`. Error: %s" % HAS_YAML_EXC)
+
+ if not self.project_name:
+ self.client.fail("Parameter error - project_name required when providing definition.")
+
+ self.project_src = tempfile.mkdtemp(prefix="ansible")
+ compose_file = os.path.join(self.project_src, "docker-compose.yml")
+ try:
+ self.log('writing: ')
+ self.log(yaml.dump(self.definition, default_flow_style=False))
+ with open(compose_file, 'w') as f:
+ f.write(yaml.dump(self.definition, default_flow_style=False))
+ except Exception as exc:
+ self.client.fail("Error writing to %s - %s" % (compose_file, str(exc)))
+ else:
+ if not self.project_src:
+ self.client.fail("Parameter error - project_src required.")
+
+ try:
+ self.log("project_src: %s" % self.project_src)
+ self.project = project_from_options(self.project_src, self.options)
+ except Exception as exc:
+ self.client.fail("Configuration error - %s" % str(exc))
+
+ def exec_module(self):
+ result = dict()
+
+ if self.state == 'present':
+ result = self.cmd_up()
+ elif self.state == 'absent':
+ result = self.cmd_down()
+
+ if self.definition:
+ compose_file = os.path.join(self.project_src, "docker-compose.yml")
+ self.log("removing %s" % compose_file)
+ os.remove(compose_file)
+ self.log("removing %s" % self.project_src)
+ os.rmdir(self.project_src)
+
+ if not self.check_mode and not self.debug and result.get('actions'):
+ result.pop('actions')
+
+ return result
+
+ def _get_auth_options(self):
+ options = dict()
+ for key, value in self.client.auth_params.items():
+ if value is not None:
+ option = AUTH_PARAM_MAPPING.get(key)
+ if option:
+ options[option] = value
+ return options
+
+ def cmd_up(self):
+
+ start_deps = self.dependencies
+ service_names = self.services
+ detached = True
+ result = dict(changed=False, actions=[], ansible_facts=dict(), services=dict())
+
+ up_options = {
+ u'--no-recreate': False,
+ u'--build': False,
+ u'--no-build': False,
+ u'--no-deps': False,
+ u'--force-recreate': False,
+ }
+
+ if self.recreate == 'never':
+ up_options[u'--no-recreate'] = True
+ elif self.recreate == 'always':
+ up_options[u'--force-recreate'] = True
+
+ if self.remove_orphans:
+ up_options[u'--remove-orphans'] = True
+
+ converge = convergence_strategy_from_opts(up_options)
+ self.log("convergence strategy: %s" % converge)
+
+ if self.pull:
+ pull_output = self.cmd_pull()
+ result['changed'] = pull_output['changed']
+ result['actions'] += pull_output['actions']
+
+ if self.build:
+ build_output = self.cmd_build()
+ result['changed'] = build_output['changed']
+ result['actions'] += build_output['actions']
+
+ if self.remove_orphans:
+ containers = self.client.containers(
+ filters={
+ 'label': [
+ '{0}={1}'.format(LABEL_PROJECT, self.project.name),
+ '{0}={1}'.format(LABEL_ONE_OFF, "False")
+ ],
+ }
+ )
+
+ orphans = []
+ for container in containers:
+ service_name = container.get('Labels', {}).get(LABEL_SERVICE)
+ if service_name not in self.project.service_names:
+ orphans.append(service_name)
+
+ if orphans:
+ result['changed'] = True
+
+ for service in self.project.services:
+ if not service_names or service.name in service_names:
+ plan = service.convergence_plan(strategy=converge)
+ if plan.action != 'noop':
+ result['changed'] = True
+ result_action = dict(service=service.name)
+ result_action[plan.action] = []
+ for container in plan.containers:
+ result_action[plan.action].append(dict(
+ id=container.id,
+ name=container.name,
+ short_id=container.short_id,
+ ))
+ result['actions'].append(result_action)
+
+ if not self.check_mode and result['changed'] and not self.stopped:
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ do_build = build_action_from_opts(up_options)
+ self.log('Setting do_build to %s' % do_build)
+ self.project.up(
+ service_names=service_names,
+ start_deps=start_deps,
+ strategy=converge,
+ do_build=do_build,
+ detached=detached,
+ remove_orphans=self.remove_orphans,
+ timeout=self.timeout)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error starting project %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+
+ if self.stopped:
+ stop_output = self.cmd_stop(service_names)
+ result['changed'] = stop_output['changed']
+ result['actions'] += stop_output['actions']
+
+ if self.restarted:
+ restart_output = self.cmd_restart(service_names)
+ result['changed'] = restart_output['changed']
+ result['actions'] += restart_output['actions']
+
+ if self.scale:
+ scale_output = self.cmd_scale()
+ result['changed'] = scale_output['changed']
+ result['actions'] += scale_output['actions']
+
+ for service in self.project.services:
+ service_facts = dict()
+ result['ansible_facts'][service.name] = service_facts
+ result['services'][service.name] = service_facts
+ for container in service.containers(stopped=True):
+ inspection = container.inspect()
+ # pare down the inspection data to the most useful bits
+ facts = dict(
+ cmd=[],
+ labels=dict(),
+ image=None,
+ state=dict(
+ running=None,
+ status=None
+ ),
+ networks=dict()
+ )
+ if inspection['Config'].get('Cmd', None) is not None:
+ facts['cmd'] = inspection['Config']['Cmd']
+ if inspection['Config'].get('Labels', None) is not None:
+ facts['labels'] = inspection['Config']['Labels']
+ if inspection['Config'].get('Image', None) is not None:
+ facts['image'] = inspection['Config']['Image']
+ if inspection['State'].get('Running', None) is not None:
+ facts['state']['running'] = inspection['State']['Running']
+ if inspection['State'].get('Status', None) is not None:
+ facts['state']['status'] = inspection['State']['Status']
+
+ if inspection.get('NetworkSettings') and inspection['NetworkSettings'].get('Networks'):
+ networks = inspection['NetworkSettings']['Networks']
+ for key in networks:
+ facts['networks'][key] = dict(
+ aliases=[],
+ globalIPv6=None,
+ globalIPv6PrefixLen=0,
+ IPAddress=None,
+ IPPrefixLen=0,
+ links=None,
+ macAddress=None,
+ )
+ if networks[key].get('Aliases', None) is not None:
+ facts['networks'][key]['aliases'] = networks[key]['Aliases']
+ if networks[key].get('GlobalIPv6Address', None) is not None:
+ facts['networks'][key]['globalIPv6'] = networks[key]['GlobalIPv6Address']
+ if networks[key].get('GlobalIPv6PrefixLen', None) is not None:
+ facts['networks'][key]['globalIPv6PrefixLen'] = networks[key]['GlobalIPv6PrefixLen']
+ if networks[key].get('IPAddress', None) is not None:
+ facts['networks'][key]['IPAddress'] = networks[key]['IPAddress']
+ if networks[key].get('IPPrefixLen', None) is not None:
+ facts['networks'][key]['IPPrefixLen'] = networks[key]['IPPrefixLen']
+ if networks[key].get('Links', None) is not None:
+ facts['networks'][key]['links'] = networks[key]['Links']
+ if networks[key].get('MacAddress', None) is not None:
+ facts['networks'][key]['macAddress'] = networks[key]['MacAddress']
+
+ service_facts[container.name] = facts
+
+ return result
+
+ def cmd_pull(self):
+ result = dict(
+ changed=False,
+ actions=[],
+ )
+
+ if not self.check_mode:
+ for service in self.project.get_services(self.services, include_deps=False):
+ if 'image' not in service.options:
+ continue
+
+ self.log('Pulling image for service %s' % service.name)
+ # store the existing image ID
+ old_image_id = ''
+ try:
+ image = service.image()
+ if image and image.get('Id'):
+ old_image_id = image['Id']
+ except NoSuchImageError:
+ pass
+ except Exception as exc:
+ self.client.fail("Error: service image lookup failed - %s" % str(exc))
+
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ # pull the image
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ service.pull(ignore_pull_failures=False)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error: pull failed with %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+
+ # store the new image ID
+ new_image_id = ''
+ try:
+ image = service.image()
+ if image and image.get('Id'):
+ new_image_id = image['Id']
+ except NoSuchImageError as exc:
+ self.client.fail("Error: service image lookup failed after pull - %s" % str(exc))
+
+ if new_image_id != old_image_id:
+ # if a new image was pulled
+ result['changed'] = True
+ result['actions'].append(dict(
+ service=service.name,
+ pulled_image=dict(
+ name=service.image_name,
+ id=new_image_id
+ )
+ ))
+ return result
+
+ def cmd_build(self):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+ if not self.check_mode:
+ for service in self.project.get_services(self.services, include_deps=False):
+ if service.can_be_built():
+ self.log('Building image for service %s' % service.name)
+ # store the existing image ID
+ old_image_id = ''
+ try:
+ image = service.image()
+ if image and image.get('Id'):
+ old_image_id = image['Id']
+ except NoSuchImageError:
+ pass
+ except Exception as exc:
+ self.client.fail("Error: service image lookup failed - %s" % str(exc))
+
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ # build the image
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ new_image_id = service.build(pull=self.pull, no_cache=self.nocache)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error: build failed with %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+
+ if new_image_id not in old_image_id:
+ # if a new image was built
+ result['changed'] = True
+ result['actions'].append(dict(
+ service=service.name,
+ built_image=dict(
+ name=service.image_name,
+ id=new_image_id
+ )
+ ))
+ return result
+
+ def cmd_down(self):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+ for service in self.project.services:
+ containers = service.containers(stopped=True)
+ if len(containers):
+ result['changed'] = True
+ result['actions'].append(dict(
+ service=service.name,
+ deleted=[container.name for container in containers]
+ ))
+ if not self.check_mode and result['changed']:
+ image_type = image_type_from_opt('--rmi', self.remove_images)
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ self.project.down(image_type, self.remove_volumes, self.remove_orphans)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error stopping project - %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+ return result
+
+ def cmd_stop(self, service_names):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+ for service in self.project.services:
+ if not service_names or service.name in service_names:
+ service_res = dict(
+ service=service.name,
+ stop=[]
+ )
+ for container in service.containers(stopped=False):
+ result['changed'] = True
+ service_res['stop'].append(dict(
+ id=container.id,
+ name=container.name,
+ short_id=container.short_id
+ ))
+ result['actions'].append(service_res)
+ if not self.check_mode and result['changed']:
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ self.project.stop(service_names=service_names, timeout=self.timeout)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error stopping project %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+ return result
+
+ def cmd_restart(self, service_names):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+
+ for service in self.project.services:
+ if not service_names or service.name in service_names:
+ service_res = dict(
+ service=service.name,
+ restart=[]
+ )
+ for container in service.containers(stopped=True):
+ result['changed'] = True
+ service_res['restart'].append(dict(
+ id=container.id,
+ name=container.name,
+ short_id=container.short_id
+ ))
+ result['actions'].append(service_res)
+
+ if not self.check_mode and result['changed']:
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ self.project.restart(service_names=service_names, timeout=self.timeout)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error restarting project %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+ return result
+
+ def cmd_scale(self):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+ for service in self.project.services:
+ if service.name in self.scale:
+ service_res = dict(
+ service=service.name,
+ scale=0
+ )
+ containers = service.containers(stopped=True)
+ scale = self.parse_scale(service.name)
+ if len(containers) != scale:
+ result['changed'] = True
+ service_res['scale'] = scale - len(containers)
+ if not self.check_mode:
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ service.scale(scale)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error scaling {0} - %s".format(service.name))
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+ result['actions'].append(service_res)
+ return result
+
+ def parse_scale(self, service_name):
+ try:
+ return int(self.scale[service_name])
+ except ValueError:
+ self.client.fail("Error scaling %s - expected int, got %s",
+ service_name, str(type(self.scale[service_name])))
+
+
+def main():
+ argument_spec = dict(
+ project_src=dict(type='path'),
+ project_name=dict(type='str',),
+ files=dict(type='list', elements='path'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ definition=dict(type='dict'),
+ hostname_check=dict(type='bool', default=False),
+ recreate=dict(type='str', default='smart', choices=['always', 'never', 'smart']),
+ build=dict(type='bool', default=False),
+ remove_images=dict(type='str', choices=['all', 'local']),
+ remove_volumes=dict(type='bool', default=False),
+ remove_orphans=dict(type='bool', default=False),
+ stopped=dict(type='bool', default=False),
+ restarted=dict(type='bool', default=False),
+ scale=dict(type='dict'),
+ services=dict(type='list', elements='str'),
+ dependencies=dict(type='bool', default=True),
+ pull=dict(type='bool', default=False),
+ nocache=dict(type='bool', default=False),
+ debug=dict(type='bool', default=False),
+ timeout=dict(type='int', default=DEFAULT_TIMEOUT)
+ )
+
+ mutually_exclusive = [
+ ('definition', 'project_src'),
+ ('definition', 'files')
+ ]
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True,
+ min_docker_api_version='1.20',
+ )
+ if client.module._name in ('docker_service', 'community.general.docker_service'):
+ client.module.deprecate("The 'docker_service' module has been renamed to 'docker_compose'.",
+ version='2.0.0', collection_name='community.general') # was Ansible 2.12
+
+ try:
+ result = ContainerManager(client).exec_module()
+ client.module.exit_json(**result)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_stack.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_stack.py
new file mode 100644
index 00000000..d3089e20
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_stack.py
@@ -0,0 +1,308 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018 Dario Zanzico (git@dariozanzico.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_stack
+author: "Dario Zanzico (@dariko)"
+short_description: docker stack module
+description:
+ - Manage docker stacks using the 'docker stack' command
+ on the target node (see examples).
+options:
+ name:
+ description:
+ - Stack name
+ type: str
+ required: yes
+ state:
+ description:
+ - Service state.
+ type: str
+ default: "present"
+ choices:
+ - present
+ - absent
+ compose:
+ description:
+ - List of compose definitions. Any element may be a string
+ referring to the path of the compose file on the target host
+ or the YAML contents of a compose file nested as dictionary.
+ type: list
+ elements: raw
+ default: []
+ prune:
+ description:
+ - If true will add the C(--prune) option to the C(docker stack deploy) command.
+ This will have docker remove the services not present in the
+ current stack definition.
+ type: bool
+ default: no
+ with_registry_auth:
+ description:
+ - If true will add the C(--with-registry-auth) option to the C(docker stack deploy) command.
+ This will have docker send registry authentication details to Swarm agents.
+ type: bool
+ default: no
+ resolve_image:
+ description:
+ - If set will add the C(--resolve-image) option to the C(docker stack deploy) command.
+ This will have docker query the registry to resolve image digest and
+ supported platforms. If not set, docker use "always" by default.
+ type: str
+ choices: ["always", "changed", "never"]
+ absent_retries:
+ description:
+ - If C(>0) and I(state) is C(absent) the module will retry up to
+ I(absent_retries) times to delete the stack until all the
+ resources have been effectively deleted.
+ If the last try still reports the stack as not completely
+ removed the module will fail.
+ type: int
+ default: 0
+ absent_retries_interval:
+ description:
+ - Interval in seconds between consecutive I(absent_retries).
+ type: int
+ default: 1
+
+requirements:
+ - jsondiff
+ - pyyaml
+
+notes:
+ - Return values I(out) and I(err) have been deprecated and will be removed in community.general 3.0.0. Use I(stdout) and I(stderr) instead.
+'''
+
+RETURN = '''
+stack_spec_diff:
+ description: |
+ dictionary containing the differences between the 'Spec' field
+ of the stack services before and after applying the new stack
+ definition.
+ sample: >
+ "stack_spec_diff":
+ {'test_stack_test_service': {u'TaskTemplate': {u'ContainerSpec': {delete: [u'Env']}}}}
+ returned: on change
+ type: dict
+'''
+
+EXAMPLES = '''
+ - name: Deploy stack from a compose file
+ community.general.docker_stack:
+ state: present
+ name: mystack
+ compose:
+ - /opt/docker-compose.yml
+
+ - name: Deploy stack from base compose file and override the web service
+ community.general.docker_stack:
+ state: present
+ name: mystack
+ compose:
+ - /opt/docker-compose.yml
+ - version: '3'
+ services:
+ web:
+ image: nginx:latest
+ environment:
+ ENVVAR: envvar
+
+ - name: Remove stack
+ community.general.docker_stack:
+ name: mystack
+ state: absent
+'''
+
+
+import json
+import tempfile
+from ansible.module_utils.six import string_types
+from time import sleep
+
+try:
+ from jsondiff import diff as json_diff
+ HAS_JSONDIFF = True
+except ImportError:
+ HAS_JSONDIFF = False
+
+try:
+ from yaml import dump as yaml_dump
+ HAS_YAML = True
+except ImportError:
+ HAS_YAML = False
+
+from ansible.module_utils.basic import AnsibleModule, os
+
+
+def docker_stack_services(module, stack_name):
+ docker_bin = module.get_bin_path('docker', required=True)
+ rc, out, err = module.run_command([docker_bin,
+ "stack",
+ "services",
+ stack_name,
+ "--format",
+ "{{.Name}}"])
+ if err == "Nothing found in stack: %s\n" % stack_name:
+ return []
+ return out.strip().split('\n')
+
+
+def docker_service_inspect(module, service_name):
+ docker_bin = module.get_bin_path('docker', required=True)
+ rc, out, err = module.run_command([docker_bin,
+ "service",
+ "inspect",
+ service_name])
+ if rc != 0:
+ return None
+ else:
+ ret = json.loads(out)[0]['Spec']
+ return ret
+
+
+def docker_stack_deploy(module, stack_name, compose_files):
+ docker_bin = module.get_bin_path('docker', required=True)
+ command = [docker_bin, "stack", "deploy"]
+ if module.params["prune"]:
+ command += ["--prune"]
+ if module.params["with_registry_auth"]:
+ command += ["--with-registry-auth"]
+ if module.params["resolve_image"]:
+ command += ["--resolve-image",
+ module.params["resolve_image"]]
+ for compose_file in compose_files:
+ command += ["--compose-file",
+ compose_file]
+ command += [stack_name]
+ return module.run_command(command)
+
+
+def docker_stack_inspect(module, stack_name):
+ ret = {}
+ for service_name in docker_stack_services(module, stack_name):
+ ret[service_name] = docker_service_inspect(module, service_name)
+ return ret
+
+
+def docker_stack_rm(module, stack_name, retries, interval):
+ docker_bin = module.get_bin_path('docker', required=True)
+ command = [docker_bin, "stack", "rm", stack_name]
+
+ rc, out, err = module.run_command(command)
+
+ while err != "Nothing found in stack: %s\n" % stack_name and retries > 0:
+ sleep(interval)
+ retries = retries - 1
+ rc, out, err = module.run_command(command)
+ return rc, out, err
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec={
+ 'name': dict(type='str', required=True),
+ 'compose': dict(type='list', elements='raw', default=[]),
+ 'prune': dict(type='bool', default=False),
+ 'with_registry_auth': dict(type='bool', default=False),
+ 'resolve_image': dict(type='str', choices=['always', 'changed', 'never']),
+ 'state': dict(type='str', default='present', choices=['present', 'absent']),
+ 'absent_retries': dict(type='int', default=0),
+ 'absent_retries_interval': dict(type='int', default=1)
+ },
+ supports_check_mode=False
+ )
+
+ if not HAS_JSONDIFF:
+ return module.fail_json(msg="jsondiff is not installed, try 'pip install jsondiff'")
+
+ if not HAS_YAML:
+ return module.fail_json(msg="yaml is not installed, try 'pip install pyyaml'")
+
+ state = module.params['state']
+ compose = module.params['compose']
+ name = module.params['name']
+ absent_retries = module.params['absent_retries']
+ absent_retries_interval = module.params['absent_retries_interval']
+
+ if state == 'present':
+ if not compose:
+ module.fail_json(msg=("compose parameter must be a list "
+ "containing at least one element"))
+
+ compose_files = []
+ for i, compose_def in enumerate(compose):
+ if isinstance(compose_def, dict):
+ compose_file_fd, compose_file = tempfile.mkstemp()
+ module.add_cleanup_file(compose_file)
+ with os.fdopen(compose_file_fd, 'w') as stack_file:
+ compose_files.append(compose_file)
+ stack_file.write(yaml_dump(compose_def))
+ elif isinstance(compose_def, string_types):
+ compose_files.append(compose_def)
+ else:
+ module.fail_json(msg="compose element '%s' must be a " +
+ "string or a dictionary" % compose_def)
+
+ before_stack_services = docker_stack_inspect(module, name)
+
+ rc, out, err = docker_stack_deploy(module, name, compose_files)
+
+ after_stack_services = docker_stack_inspect(module, name)
+
+ if rc != 0:
+ module.fail_json(msg="docker stack up deploy command failed",
+ rc=rc,
+ out=out, err=err, # Deprecated
+ stdout=out, stderr=err)
+
+ before_after_differences = json_diff(before_stack_services,
+ after_stack_services)
+ for k in before_after_differences.keys():
+ if isinstance(before_after_differences[k], dict):
+ before_after_differences[k].pop('UpdatedAt', None)
+ before_after_differences[k].pop('Version', None)
+ if not list(before_after_differences[k].keys()):
+ before_after_differences.pop(k)
+
+ if not before_after_differences:
+ module.exit_json(
+ changed=False,
+ rc=rc,
+ stdout=out,
+ stderr=err)
+ else:
+ module.exit_json(
+ changed=True,
+ rc=rc,
+ stdout=out,
+ stderr=err,
+ stack_spec_diff=json_diff(before_stack_services,
+ after_stack_services,
+ dump=True))
+
+ else:
+ if docker_stack_services(module, name):
+ rc, out, err = docker_stack_rm(module, name, absent_retries, absent_retries_interval)
+ if rc != 0:
+ module.fail_json(msg="'docker stack down' command failed",
+ rc=rc,
+ out=out, err=err, # Deprecated
+ stdout=out, stderr=err)
+ else:
+ module.exit_json(changed=True,
+ msg=out, rc=rc,
+ err=err, # Deprecated
+ stdout=out, stderr=err)
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_stack_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_stack_info.py
new file mode 100644
index 00000000..74a3648d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_stack_info.py
@@ -0,0 +1,84 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2020 Jose Angel Munoz (@imjoseangel)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: docker_stack_info
+author: "Jose Angel Munoz (@imjoseangel)"
+short_description: Return information on a docker stack
+description:
+ - Retrieve information on docker stacks using the C(docker stack) command
+ on the target node (see examples).
+version_added: "1.0.0"
+'''
+
+RETURN = '''
+results:
+ description: |
+ List of dictionaries containing the list of stacks or tasks associated
+ to a stack name.
+ sample: >
+ "results": [{"name":"grafana","namespace":"default","orchestrator":"Kubernetes","services":"2"}]
+ returned: always
+ type: list
+'''
+
+EXAMPLES = '''
+ - name: Shows stack info
+ community.general.docker_stack_info:
+ register: result
+
+ - name: Show results
+ ansible.builtin.debug:
+ var: result.results
+'''
+
+import json
+from ansible.module_utils.basic import AnsibleModule
+
+
+def docker_stack_list(module):
+ docker_bin = module.get_bin_path('docker', required=True)
+ rc, out, err = module.run_command(
+ [docker_bin, "stack", "ls", "--format={{json .}}"])
+
+ return rc, out.strip(), err.strip()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec={
+ },
+ supports_check_mode=False
+ )
+
+ rc, out, err = docker_stack_list(module)
+
+ if rc != 0:
+ module.fail_json(msg="Error running docker stack. {0}".format(err),
+ rc=rc, stdout=out, stderr=err)
+ else:
+ if out:
+ ret = list(
+ json.loads(outitem)
+ for outitem in out.splitlines())
+
+ else:
+ ret = []
+
+ module.exit_json(changed=False,
+ rc=rc,
+ stdout=out,
+ stderr=err,
+ results=ret)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_stack_task_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_stack_task_info.py
new file mode 100644
index 00000000..966a4266
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_stack_task_info.py
@@ -0,0 +1,95 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2020 Jose Angel Munoz (@imjoseangel)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: docker_stack_task_info
+author: "Jose Angel Munoz (@imjoseangel)"
+short_description: Return information of the tasks on a docker stack
+description:
+ - Retrieve information on docker stacks tasks using the C(docker stack) command
+ on the target node (see examples).
+options:
+ name:
+ description:
+ - Stack name.
+ type: str
+ required: yes
+version_added: "1.1.0"
+'''
+
+RETURN = '''
+results:
+ description: |
+ List of dictionaries containing the list of tasks associated
+ to a stack name.
+ sample: >
+ [{"CurrentState":"Running","DesiredState":"Running","Error":"","ID":"7wqv6m02ugkw","Image":"busybox","Name":"test_stack.1","Node":"swarm","Ports":""}]
+ returned: always
+ type: list
+ elements: dict
+'''
+
+EXAMPLES = '''
+ - name: Shows stack info
+ community.general.docker_stack_task_info:
+ name: test_stack
+ register: result
+
+ - name: Show results
+ ansible.builtin.debug:
+ var: result.results
+'''
+
+import json
+from ansible.module_utils.basic import AnsibleModule
+
+
+def docker_stack_task(module, stack_name):
+ docker_bin = module.get_bin_path('docker', required=True)
+ rc, out, err = module.run_command(
+ [docker_bin, "stack", "ps", stack_name, "--format={{json .}}"])
+
+ return rc, out.strip(), err.strip()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec={
+ 'name': dict(type='str', required=True)
+ },
+ supports_check_mode=False
+ )
+
+ name = module.params['name']
+
+ rc, out, err = docker_stack_task(module, name)
+
+ if rc != 0:
+ module.fail_json(msg="Error running docker stack. {0}".format(err),
+ rc=rc, stdout=out, stderr=err)
+ else:
+ if out:
+ ret = list(
+ json.loads(outitem)
+ for outitem in out.splitlines())
+
+ else:
+ ret = []
+
+ module.exit_json(changed=False,
+ rc=rc,
+ stdout=out,
+ stderr=err,
+ results=ret)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_swarm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_swarm.py
new file mode 100644
index 00000000..52f37643
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_swarm.py
@@ -0,0 +1,675 @@
+#!/usr/bin/python
+
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: docker_swarm
+short_description: Manage Swarm cluster
+description:
+ - Create a new Swarm cluster.
+ - Add/Remove nodes or managers to an existing cluster.
+options:
+ advertise_addr:
+ description:
+ - Externally reachable address advertised to other nodes.
+ - This can either be an address/port combination
+ in the form C(192.168.1.1:4567), or an interface followed by a
+ port number, like C(eth0:4567).
+ - If the port number is omitted,
+ the port number from the listen address is used.
+ - If I(advertise_addr) is not specified, it will be automatically
+ detected when possible.
+ - Only used when swarm is initialised or joined. Because of this it's not
+ considered for idempotency checking.
+ type: str
+ default_addr_pool:
+ description:
+ - Default address pool in CIDR format.
+ - Only used when swarm is initialised. Because of this it's not considered
+ for idempotency checking.
+ - Requires API version >= 1.39.
+ type: list
+ elements: str
+ subnet_size:
+ description:
+ - Default address pool subnet mask length.
+ - Only used when swarm is initialised. Because of this it's not considered
+ for idempotency checking.
+ - Requires API version >= 1.39.
+ type: int
+ listen_addr:
+ description:
+ - Listen address used for inter-manager communication.
+ - This can either be an address/port combination in the form
+ C(192.168.1.1:4567), or an interface followed by a port number,
+ like C(eth0:4567).
+ - If the port number is omitted, the default swarm listening port
+ is used.
+ - Only used when swarm is initialised or joined. Because of this it's not
+ considered for idempotency checking.
+ type: str
+ default: 0.0.0.0:2377
+ force:
+ description:
+ - Use with state C(present) to force creating a new Swarm, even if already part of one.
+ - Use with state C(absent) to Leave the swarm even if this node is a manager.
+ type: bool
+ default: no
+ state:
+ description:
+ - Set to C(present), to create/update a new cluster.
+ - Set to C(join), to join an existing cluster.
+ - Set to C(absent), to leave an existing cluster.
+ - Set to C(remove), to remove an absent node from the cluster.
+ Note that removing requires Docker SDK for Python >= 2.4.0.
+ - Set to C(inspect) to display swarm informations.
+ type: str
+ default: present
+ choices:
+ - present
+ - join
+ - absent
+ - remove
+ - inspect
+ node_id:
+ description:
+ - Swarm id of the node to remove.
+ - Used with I(state=remove).
+ type: str
+ join_token:
+ description:
+ - Swarm token used to join a swarm cluster.
+ - Used with I(state=join).
+ type: str
+ remote_addrs:
+ description:
+ - Remote address of one or more manager nodes of an existing Swarm to connect to.
+ - Used with I(state=join).
+ type: list
+ elements: str
+ task_history_retention_limit:
+ description:
+ - Maximum number of tasks history stored.
+ - Docker default value is C(5).
+ type: int
+ snapshot_interval:
+ description:
+ - Number of logs entries between snapshot.
+ - Docker default value is C(10000).
+ type: int
+ keep_old_snapshots:
+ description:
+ - Number of snapshots to keep beyond the current snapshot.
+ - Docker default value is C(0).
+ type: int
+ log_entries_for_slow_followers:
+ description:
+ - Number of log entries to keep around to sync up slow followers after a snapshot is created.
+ type: int
+ heartbeat_tick:
+ description:
+ - Amount of ticks (in seconds) between each heartbeat.
+ - Docker default value is C(1s).
+ type: int
+ election_tick:
+ description:
+ - Amount of ticks (in seconds) needed without a leader to trigger a new election.
+ - Docker default value is C(10s).
+ type: int
+ dispatcher_heartbeat_period:
+ description:
+ - The delay for an agent to send a heartbeat to the dispatcher.
+ - Docker default value is C(5s).
+ type: int
+ node_cert_expiry:
+ description:
+ - Automatic expiry for nodes certificates.
+ - Docker default value is C(3months).
+ type: int
+ name:
+ description:
+ - The name of the swarm.
+ type: str
+ labels:
+ description:
+ - User-defined key/value metadata.
+ - Label operations in this module apply to the docker swarm cluster.
+ Use M(community.general.docker_node) module to add/modify/remove swarm node labels.
+ - Requires API version >= 1.32.
+ type: dict
+ signing_ca_cert:
+ description:
+ - The desired signing CA certificate for all swarm node TLS leaf certificates, in PEM format.
+ - This must not be a path to a certificate, but the contents of the certificate.
+ - Requires API version >= 1.30.
+ type: str
+ signing_ca_key:
+ description:
+ - The desired signing CA key for all swarm node TLS leaf certificates, in PEM format.
+ - This must not be a path to a key, but the contents of the key.
+ - Requires API version >= 1.30.
+ type: str
+ ca_force_rotate:
+ description:
+ - An integer whose purpose is to force swarm to generate a new signing CA certificate and key,
+ if none have been specified.
+ - Docker default value is C(0).
+ - Requires API version >= 1.30.
+ type: int
+ autolock_managers:
+ description:
+ - If set, generate a key and use it to lock data stored on the managers.
+ - Docker default value is C(no).
+ - M(community.general.docker_swarm_info) can be used to retrieve the unlock key.
+ type: bool
+ rotate_worker_token:
+ description: Rotate the worker join token.
+ type: bool
+ default: no
+ rotate_manager_token:
+ description: Rotate the manager join token.
+ type: bool
+ default: no
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - Docker API >= 1.25
+author:
+ - Thierry Bouvet (@tbouvet)
+ - Piotr Wojciechowski (@WojciechowskiPiotr)
+'''
+
+EXAMPLES = '''
+
+- name: Init a new swarm with default parameters
+ community.general.docker_swarm:
+ state: present
+
+- name: Update swarm configuration
+ community.general.docker_swarm:
+ state: present
+ election_tick: 5
+
+- name: Add nodes
+ community.general.docker_swarm:
+ state: join
+ advertise_addr: 192.168.1.2
+ join_token: SWMTKN-1--xxxxx
+ remote_addrs: [ '192.168.1.1:2377' ]
+
+- name: Leave swarm for a node
+ community.general.docker_swarm:
+ state: absent
+
+- name: Remove a swarm manager
+ community.general.docker_swarm:
+ state: absent
+ force: true
+
+- name: Remove node from swarm
+ community.general.docker_swarm:
+ state: remove
+ node_id: mynode
+
+- name: Inspect swarm
+ community.general.docker_swarm:
+ state: inspect
+ register: swarm_info
+'''
+
+RETURN = '''
+swarm_facts:
+ description: Informations about swarm.
+ returned: success
+ type: dict
+ contains:
+ JoinTokens:
+ description: Tokens to connect to the Swarm.
+ returned: success
+ type: dict
+ contains:
+ Worker:
+ description: Token to create a new *worker* node
+ returned: success
+ type: str
+ example: SWMTKN-1--xxxxx
+ Manager:
+ description: Token to create a new *manager* node
+ returned: success
+ type: str
+ example: SWMTKN-1--xxxxx
+ UnlockKey:
+ description: The swarm unlock-key if I(autolock_managers) is C(true).
+ returned: on success if I(autolock_managers) is C(true)
+ and swarm is initialised, or if I(autolock_managers) has changed.
+ type: str
+ example: SWMKEY-1-xxx
+
+actions:
+ description: Provides the actions done on the swarm.
+ returned: when action failed.
+ type: list
+ elements: str
+ example: "['This cluster is already a swarm cluster']"
+
+'''
+
+import json
+import traceback
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ DockerBaseClass,
+ DifferenceTracker,
+ RequestException,
+)
+
+from ansible_collections.community.general.plugins.module_utils.docker.swarm import AnsibleDockerSwarmClient
+
+from ansible.module_utils._text import to_native
+
+
+class TaskParameters(DockerBaseClass):
+ def __init__(self):
+ super(TaskParameters, self).__init__()
+
+ self.advertise_addr = None
+ self.listen_addr = None
+ self.remote_addrs = None
+ self.join_token = None
+
+ # Spec
+ self.snapshot_interval = None
+ self.task_history_retention_limit = None
+ self.keep_old_snapshots = None
+ self.log_entries_for_slow_followers = None
+ self.heartbeat_tick = None
+ self.election_tick = None
+ self.dispatcher_heartbeat_period = None
+ self.node_cert_expiry = None
+ self.name = None
+ self.labels = None
+ self.log_driver = None
+ self.signing_ca_cert = None
+ self.signing_ca_key = None
+ self.ca_force_rotate = None
+ self.autolock_managers = None
+ self.rotate_worker_token = None
+ self.rotate_manager_token = None
+ self.default_addr_pool = None
+ self.subnet_size = None
+
+ @staticmethod
+ def from_ansible_params(client):
+ result = TaskParameters()
+ for key, value in client.module.params.items():
+ if key in result.__dict__:
+ setattr(result, key, value)
+
+ result.update_parameters(client)
+ return result
+
+ def update_from_swarm_info(self, swarm_info):
+ spec = swarm_info['Spec']
+
+ ca_config = spec.get('CAConfig') or dict()
+ if self.node_cert_expiry is None:
+ self.node_cert_expiry = ca_config.get('NodeCertExpiry')
+ if self.ca_force_rotate is None:
+ self.ca_force_rotate = ca_config.get('ForceRotate')
+
+ dispatcher = spec.get('Dispatcher') or dict()
+ if self.dispatcher_heartbeat_period is None:
+ self.dispatcher_heartbeat_period = dispatcher.get('HeartbeatPeriod')
+
+ raft = spec.get('Raft') or dict()
+ if self.snapshot_interval is None:
+ self.snapshot_interval = raft.get('SnapshotInterval')
+ if self.keep_old_snapshots is None:
+ self.keep_old_snapshots = raft.get('KeepOldSnapshots')
+ if self.heartbeat_tick is None:
+ self.heartbeat_tick = raft.get('HeartbeatTick')
+ if self.log_entries_for_slow_followers is None:
+ self.log_entries_for_slow_followers = raft.get('LogEntriesForSlowFollowers')
+ if self.election_tick is None:
+ self.election_tick = raft.get('ElectionTick')
+
+ orchestration = spec.get('Orchestration') or dict()
+ if self.task_history_retention_limit is None:
+ self.task_history_retention_limit = orchestration.get('TaskHistoryRetentionLimit')
+
+ encryption_config = spec.get('EncryptionConfig') or dict()
+ if self.autolock_managers is None:
+ self.autolock_managers = encryption_config.get('AutoLockManagers')
+
+ if self.name is None:
+ self.name = spec['Name']
+
+ if self.labels is None:
+ self.labels = spec.get('Labels') or {}
+
+ if 'LogDriver' in spec['TaskDefaults']:
+ self.log_driver = spec['TaskDefaults']['LogDriver']
+
+ def update_parameters(self, client):
+ assign = dict(
+ snapshot_interval='snapshot_interval',
+ task_history_retention_limit='task_history_retention_limit',
+ keep_old_snapshots='keep_old_snapshots',
+ log_entries_for_slow_followers='log_entries_for_slow_followers',
+ heartbeat_tick='heartbeat_tick',
+ election_tick='election_tick',
+ dispatcher_heartbeat_period='dispatcher_heartbeat_period',
+ node_cert_expiry='node_cert_expiry',
+ name='name',
+ labels='labels',
+ signing_ca_cert='signing_ca_cert',
+ signing_ca_key='signing_ca_key',
+ ca_force_rotate='ca_force_rotate',
+ autolock_managers='autolock_managers',
+ log_driver='log_driver',
+ )
+ params = dict()
+ for dest, source in assign.items():
+ if not client.option_minimal_versions[source]['supported']:
+ continue
+ value = getattr(self, source)
+ if value is not None:
+ params[dest] = value
+ self.spec = client.create_swarm_spec(**params)
+
+ def compare_to_active(self, other, client, differences):
+ for k in self.__dict__:
+ if k in ('advertise_addr', 'listen_addr', 'remote_addrs', 'join_token',
+ 'rotate_worker_token', 'rotate_manager_token', 'spec',
+ 'default_addr_pool', 'subnet_size'):
+ continue
+ if not client.option_minimal_versions[k]['supported']:
+ continue
+ value = getattr(self, k)
+ if value is None:
+ continue
+ other_value = getattr(other, k)
+ if value != other_value:
+ differences.add(k, parameter=value, active=other_value)
+ if self.rotate_worker_token:
+ differences.add('rotate_worker_token', parameter=True, active=False)
+ if self.rotate_manager_token:
+ differences.add('rotate_manager_token', parameter=True, active=False)
+ return differences
+
+
+class SwarmManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(SwarmManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.check_mode = self.client.check_mode
+ self.swarm_info = {}
+
+ self.state = client.module.params['state']
+ self.force = client.module.params['force']
+ self.node_id = client.module.params['node_id']
+
+ self.differences = DifferenceTracker()
+ self.parameters = TaskParameters.from_ansible_params(client)
+
+ self.created = False
+
+ def __call__(self):
+ choice_map = {
+ "present": self.init_swarm,
+ "join": self.join,
+ "absent": self.leave,
+ "remove": self.remove,
+ "inspect": self.inspect_swarm
+ }
+
+ if self.state == 'inspect':
+ self.client.module.deprecate(
+ "The 'inspect' state is deprecated, please use 'docker_swarm_info' to inspect swarm cluster",
+ version='2.0.0', collection_name='community.general') # was Ansible 2.12
+
+ choice_map.get(self.state)()
+
+ if self.client.module._diff or self.parameters.debug:
+ diff = dict()
+ diff['before'], diff['after'] = self.differences.get_before_after()
+ self.results['diff'] = diff
+
+ def inspect_swarm(self):
+ try:
+ data = self.client.inspect_swarm()
+ json_str = json.dumps(data, ensure_ascii=False)
+ self.swarm_info = json.loads(json_str)
+
+ self.results['changed'] = False
+ self.results['swarm_facts'] = self.swarm_info
+
+ unlock_key = self.get_unlock_key()
+ self.swarm_info.update(unlock_key)
+ except APIError:
+ return
+
+ def get_unlock_key(self):
+ default = {'UnlockKey': None}
+ if not self.has_swarm_lock_changed():
+ return default
+ try:
+ return self.client.get_unlock_key() or default
+ except APIError:
+ return default
+
+ def has_swarm_lock_changed(self):
+ return self.parameters.autolock_managers and (
+ self.created or self.differences.has_difference_for('autolock_managers')
+ )
+
+ def init_swarm(self):
+ if not self.force and self.client.check_if_swarm_manager():
+ self.__update_swarm()
+ return
+
+ if not self.check_mode:
+ init_arguments = {
+ 'advertise_addr': self.parameters.advertise_addr,
+ 'listen_addr': self.parameters.listen_addr,
+ 'force_new_cluster': self.force,
+ 'swarm_spec': self.parameters.spec,
+ }
+ if self.parameters.default_addr_pool is not None:
+ init_arguments['default_addr_pool'] = self.parameters.default_addr_pool
+ if self.parameters.subnet_size is not None:
+ init_arguments['subnet_size'] = self.parameters.subnet_size
+ try:
+ self.client.init_swarm(**init_arguments)
+ except APIError as exc:
+ self.client.fail("Can not create a new Swarm Cluster: %s" % to_native(exc))
+
+ if not self.client.check_if_swarm_manager():
+ if not self.check_mode:
+ self.client.fail("Swarm not created or other error!")
+
+ self.created = True
+ self.inspect_swarm()
+ self.results['actions'].append("New Swarm cluster created: %s" % (self.swarm_info.get('ID')))
+ self.differences.add('state', parameter='present', active='absent')
+ self.results['changed'] = True
+ self.results['swarm_facts'] = {
+ 'JoinTokens': self.swarm_info.get('JoinTokens'),
+ 'UnlockKey': self.swarm_info.get('UnlockKey')
+ }
+
+ def __update_swarm(self):
+ try:
+ self.inspect_swarm()
+ version = self.swarm_info['Version']['Index']
+ self.parameters.update_from_swarm_info(self.swarm_info)
+ old_parameters = TaskParameters()
+ old_parameters.update_from_swarm_info(self.swarm_info)
+ self.parameters.compare_to_active(old_parameters, self.client, self.differences)
+ if self.differences.empty:
+ self.results['actions'].append("No modification")
+ self.results['changed'] = False
+ return
+ update_parameters = TaskParameters.from_ansible_params(self.client)
+ update_parameters.update_parameters(self.client)
+ if not self.check_mode:
+ self.client.update_swarm(
+ version=version, swarm_spec=update_parameters.spec,
+ rotate_worker_token=self.parameters.rotate_worker_token,
+ rotate_manager_token=self.parameters.rotate_manager_token)
+ except APIError as exc:
+ self.client.fail("Can not update a Swarm Cluster: %s" % to_native(exc))
+ return
+
+ self.inspect_swarm()
+ self.results['actions'].append("Swarm cluster updated")
+ self.results['changed'] = True
+
+ def join(self):
+ if self.client.check_if_swarm_node():
+ self.results['actions'].append("This node is already part of a swarm.")
+ return
+ if not self.check_mode:
+ try:
+ self.client.join_swarm(
+ remote_addrs=self.parameters.remote_addrs, join_token=self.parameters.join_token,
+ listen_addr=self.parameters.listen_addr, advertise_addr=self.parameters.advertise_addr)
+ except APIError as exc:
+ self.client.fail("Can not join the Swarm Cluster: %s" % to_native(exc))
+ self.results['actions'].append("New node is added to swarm cluster")
+ self.differences.add('joined', parameter=True, active=False)
+ self.results['changed'] = True
+
+ def leave(self):
+ if not self.client.check_if_swarm_node():
+ self.results['actions'].append("This node is not part of a swarm.")
+ return
+ if not self.check_mode:
+ try:
+ self.client.leave_swarm(force=self.force)
+ except APIError as exc:
+ self.client.fail("This node can not leave the Swarm Cluster: %s" % to_native(exc))
+ self.results['actions'].append("Node has left the swarm cluster")
+ self.differences.add('joined', parameter='absent', active='present')
+ self.results['changed'] = True
+
+ def remove(self):
+ if not self.client.check_if_swarm_manager():
+ self.client.fail("This node is not a manager.")
+
+ try:
+ status_down = self.client.check_if_swarm_node_is_down(node_id=self.node_id, repeat_check=5)
+ except APIError:
+ return
+
+ if not status_down:
+ self.client.fail("Can not remove the node. The status node is ready and not down.")
+
+ if not self.check_mode:
+ try:
+ self.client.remove_node(node_id=self.node_id, force=self.force)
+ except APIError as exc:
+ self.client.fail("Can not remove the node from the Swarm Cluster: %s" % to_native(exc))
+ self.results['actions'].append("Node is removed from swarm cluster.")
+ self.differences.add('joined', parameter=False, active=True)
+ self.results['changed'] = True
+
+
+def _detect_remove_operation(client):
+ return client.module.params['state'] == 'remove'
+
+
+def main():
+ argument_spec = dict(
+ advertise_addr=dict(type='str'),
+ state=dict(type='str', default='present', choices=['present', 'join', 'absent', 'remove', 'inspect']),
+ force=dict(type='bool', default=False),
+ listen_addr=dict(type='str', default='0.0.0.0:2377'),
+ remote_addrs=dict(type='list', elements='str'),
+ join_token=dict(type='str'),
+ snapshot_interval=dict(type='int'),
+ task_history_retention_limit=dict(type='int'),
+ keep_old_snapshots=dict(type='int'),
+ log_entries_for_slow_followers=dict(type='int'),
+ heartbeat_tick=dict(type='int'),
+ election_tick=dict(type='int'),
+ dispatcher_heartbeat_period=dict(type='int'),
+ node_cert_expiry=dict(type='int'),
+ name=dict(type='str'),
+ labels=dict(type='dict'),
+ signing_ca_cert=dict(type='str'),
+ signing_ca_key=dict(type='str', no_log=True),
+ ca_force_rotate=dict(type='int'),
+ autolock_managers=dict(type='bool'),
+ node_id=dict(type='str'),
+ rotate_worker_token=dict(type='bool', default=False),
+ rotate_manager_token=dict(type='bool', default=False),
+ default_addr_pool=dict(type='list', elements='str'),
+ subnet_size=dict(type='int'),
+ )
+
+ required_if = [
+ ('state', 'join', ['remote_addrs', 'join_token']),
+ ('state', 'remove', ['node_id'])
+ ]
+
+ option_minimal_versions = dict(
+ labels=dict(docker_py_version='2.6.0', docker_api_version='1.32'),
+ signing_ca_cert=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
+ signing_ca_key=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
+ ca_force_rotate=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
+ autolock_managers=dict(docker_py_version='2.6.0'),
+ log_driver=dict(docker_py_version='2.6.0'),
+ remove_operation=dict(
+ docker_py_version='2.4.0',
+ detect_usage=_detect_remove_operation,
+ usage_msg='remove swarm nodes'
+ ),
+ default_addr_pool=dict(docker_py_version='4.0.0', docker_api_version='1.39'),
+ subnet_size=dict(docker_py_version='4.0.0', docker_api_version='1.39'),
+ )
+
+ client = AnsibleDockerSwarmClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=required_if,
+ min_docker_version='1.10.0',
+ min_docker_api_version='1.25',
+ option_minimal_versions=option_minimal_versions,
+ )
+
+ try:
+ results = dict(
+ changed=False,
+ result='',
+ actions=[]
+ )
+
+ SwarmManager(client, results)()
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_swarm_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_swarm_info.py
new file mode 100644
index 00000000..f6d5fad1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_swarm_info.py
@@ -0,0 +1,384 @@
+#!/usr/bin/python
+#
+# (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: docker_swarm_info
+
+short_description: Retrieves facts about Docker Swarm cluster.
+
+description:
+ - Retrieves facts about a Docker Swarm.
+ - Returns lists of swarm objects names for the services - nodes, services, tasks.
+ - The output differs depending on API version available on docker host.
+ - Must be run on Swarm Manager node; otherwise module fails with error message.
+ It does return boolean flags in on both error and success which indicate whether
+ the docker daemon can be communicated with, whether it is in Swarm mode, and
+ whether it is a Swarm Manager node.
+
+
+author:
+ - Piotr Wojciechowski (@WojciechowskiPiotr)
+
+options:
+ nodes:
+ description:
+ - Whether to list swarm nodes.
+ type: bool
+ default: no
+ nodes_filters:
+ description:
+ - A dictionary of filter values used for selecting nodes to list.
+ - "For example, C(name: mynode)."
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/node_ls/#filtering)
+ for more information on possible filters.
+ type: dict
+ services:
+ description:
+ - Whether to list swarm services.
+ type: bool
+ default: no
+ services_filters:
+ description:
+ - A dictionary of filter values used for selecting services to list.
+ - "For example, C(name: myservice)."
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/service_ls/#filtering)
+ for more information on possible filters.
+ type: dict
+ tasks:
+ description:
+ - Whether to list containers.
+ type: bool
+ default: no
+ tasks_filters:
+ description:
+ - A dictionary of filter values used for selecting tasks to list.
+ - "For example, C(node: mynode-1)."
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/service_ps/#filtering)
+ for more information on possible filters.
+ type: dict
+ unlock_key:
+ description:
+ - Whether to retrieve the swarm unlock key.
+ type: bool
+ default: no
+ verbose_output:
+ description:
+ - When set to C(yes) and I(nodes), I(services) or I(tasks) is set to C(yes), then the module output will
+ contain verbose information about objects matching the full output of API method.
+ - For details see the documentation of your version of Docker API at U(https://docs.docker.com/engine/api/).
+ - The verbose output in this module contains only subset of information returned by I(_info) module
+ for each type of the objects.
+ type: bool
+ default: no
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "Docker API >= 1.24"
+'''
+
+EXAMPLES = '''
+- name: Get info on Docker Swarm
+ community.general.docker_swarm_info:
+ ignore_errors: yes
+ register: result
+
+- name: Inform about basic flags
+ ansible.builtin.debug:
+ msg: |
+ Was able to talk to docker daemon: {{ result.can_talk_to_docker }}
+ Docker in Swarm mode: {{ result.docker_swarm_active }}
+ This is a Manager node: {{ result.docker_swarm_manager }}
+
+- block:
+
+- name: Get info on Docker Swarm and list of registered nodes
+ community.general.docker_swarm_info:
+ nodes: yes
+ register: result
+
+- name: Get info on Docker Swarm and extended list of registered nodes
+ community.general.docker_swarm_info:
+ nodes: yes
+ verbose_output: yes
+ register: result
+
+- name: Get info on Docker Swarm and filtered list of registered nodes
+ community.general.docker_swarm_info:
+ nodes: yes
+ nodes_filters:
+ name: mynode
+ register: result
+
+- ansible.builtin.debug:
+ var: result.swarm_facts
+
+- name: Get the swarm unlock key
+ community.general.docker_swarm_info:
+ unlock_key: yes
+ register: result
+
+- ansible.builtin.debug:
+ var: result.swarm_unlock_key
+
+'''
+
+RETURN = '''
+can_talk_to_docker:
+ description:
+ - Will be C(true) if the module can talk to the docker daemon.
+ returned: both on success and on error
+ type: bool
+docker_swarm_active:
+ description:
+ - Will be C(true) if the module can talk to the docker daemon,
+ and the docker daemon is in Swarm mode.
+ returned: both on success and on error
+ type: bool
+docker_swarm_manager:
+ description:
+ - Will be C(true) if the module can talk to the docker daemon,
+ the docker daemon is in Swarm mode, and the current node is
+ a manager node.
+ - Only if this one is C(true), the module will not fail.
+ returned: both on success and on error
+ type: bool
+swarm_facts:
+ description:
+ - Facts representing the basic state of the docker Swarm cluster.
+ - Contains tokens to connect to the Swarm
+ returned: always
+ type: dict
+swarm_unlock_key:
+ description:
+ - Contains the key needed to unlock the swarm.
+ returned: When I(unlock_key) is C(true).
+ type: str
+nodes:
+ description:
+ - List of dict objects containing the basic information about each volume.
+ Keys matches the C(docker node ls) output unless I(verbose_output=yes).
+ See description for I(verbose_output).
+ returned: When I(nodes) is C(yes)
+ type: list
+ elements: dict
+services:
+ description:
+ - List of dict objects containing the basic information about each volume.
+ Keys matches the C(docker service ls) output unless I(verbose_output=yes).
+ See description for I(verbose_output).
+ returned: When I(services) is C(yes)
+ type: list
+ elements: dict
+tasks:
+ description:
+ - List of dict objects containing the basic information about each volume.
+ Keys matches the C(docker service ps) output unless I(verbose_output=yes).
+ See description for I(verbose_output).
+ returned: When I(tasks) is C(yes)
+ type: list
+ elements: dict
+
+'''
+
+import traceback
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker_common
+ pass
+
+from ansible.module_utils._text import to_native
+
+from ansible_collections.community.general.plugins.module_utils.docker.swarm import AnsibleDockerSwarmClient
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ DockerBaseClass,
+ clean_dict_booleans_for_docker_api,
+ RequestException,
+)
+
+
+class DockerSwarmManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(DockerSwarmManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.verbose_output = self.client.module.params['verbose_output']
+
+ listed_objects = ['tasks', 'services', 'nodes']
+
+ self.client.fail_task_if_not_swarm_manager()
+
+ self.results['swarm_facts'] = self.get_docker_swarm_facts()
+
+ for docker_object in listed_objects:
+ if self.client.module.params[docker_object]:
+ returned_name = docker_object
+ filter_name = docker_object + "_filters"
+ filters = clean_dict_booleans_for_docker_api(client.module.params.get(filter_name))
+ self.results[returned_name] = self.get_docker_items_list(docker_object, filters)
+ if self.client.module.params['unlock_key']:
+ self.results['swarm_unlock_key'] = self.get_docker_swarm_unlock_key()
+
+ def get_docker_swarm_facts(self):
+ try:
+ return self.client.inspect_swarm()
+ except APIError as exc:
+ self.client.fail("Error inspecting docker swarm: %s" % to_native(exc))
+
+ def get_docker_items_list(self, docker_object=None, filters=None):
+ items = None
+ items_list = []
+
+ try:
+ if docker_object == 'nodes':
+ items = self.client.nodes(filters=filters)
+ elif docker_object == 'tasks':
+ items = self.client.tasks(filters=filters)
+ elif docker_object == 'services':
+ items = self.client.services(filters=filters)
+ except APIError as exc:
+ self.client.fail("Error inspecting docker swarm for object '%s': %s" %
+ (docker_object, to_native(exc)))
+
+ if self.verbose_output:
+ return items
+
+ for item in items:
+ item_record = dict()
+
+ if docker_object == 'nodes':
+ item_record = self.get_essential_facts_nodes(item)
+ elif docker_object == 'tasks':
+ item_record = self.get_essential_facts_tasks(item)
+ elif docker_object == 'services':
+ item_record = self.get_essential_facts_services(item)
+ if item_record['Mode'] == 'Global':
+ item_record['Replicas'] = len(items)
+ items_list.append(item_record)
+
+ return items_list
+
+ @staticmethod
+ def get_essential_facts_nodes(item):
+ object_essentials = dict()
+
+ object_essentials['ID'] = item.get('ID')
+ object_essentials['Hostname'] = item['Description']['Hostname']
+ object_essentials['Status'] = item['Status']['State']
+ object_essentials['Availability'] = item['Spec']['Availability']
+ if 'ManagerStatus' in item:
+ object_essentials['ManagerStatus'] = item['ManagerStatus']['Reachability']
+ if 'Leader' in item['ManagerStatus'] and item['ManagerStatus']['Leader'] is True:
+ object_essentials['ManagerStatus'] = "Leader"
+ else:
+ object_essentials['ManagerStatus'] = None
+ object_essentials['EngineVersion'] = item['Description']['Engine']['EngineVersion']
+
+ return object_essentials
+
+ def get_essential_facts_tasks(self, item):
+ object_essentials = dict()
+
+ object_essentials['ID'] = item['ID']
+ # Returning container ID to not trigger another connection to host
+ # Container ID is sufficient to get extended info in other tasks
+ object_essentials['ContainerID'] = item['Status']['ContainerStatus']['ContainerID']
+ object_essentials['Image'] = item['Spec']['ContainerSpec']['Image']
+ object_essentials['Node'] = self.client.get_node_name_by_id(item['NodeID'])
+ object_essentials['DesiredState'] = item['DesiredState']
+ object_essentials['CurrentState'] = item['Status']['State']
+ if 'Err' in item['Status']:
+ object_essentials['Error'] = item['Status']['Err']
+ else:
+ object_essentials['Error'] = None
+
+ return object_essentials
+
+ @staticmethod
+ def get_essential_facts_services(item):
+ object_essentials = dict()
+
+ object_essentials['ID'] = item['ID']
+ object_essentials['Name'] = item['Spec']['Name']
+ if 'Replicated' in item['Spec']['Mode']:
+ object_essentials['Mode'] = "Replicated"
+ object_essentials['Replicas'] = item['Spec']['Mode']['Replicated']['Replicas']
+ elif 'Global' in item['Spec']['Mode']:
+ object_essentials['Mode'] = "Global"
+ # Number of replicas have to be updated in calling method or may be left as None
+ object_essentials['Replicas'] = None
+ object_essentials['Image'] = item['Spec']['TaskTemplate']['ContainerSpec']['Image']
+ if 'Ports' in item['Spec']['EndpointSpec']:
+ object_essentials['Ports'] = item['Spec']['EndpointSpec']['Ports']
+ else:
+ object_essentials['Ports'] = []
+
+ return object_essentials
+
+ def get_docker_swarm_unlock_key(self):
+ unlock_key = self.client.get_unlock_key() or {}
+ return unlock_key.get('UnlockKey') or None
+
+
+def main():
+ argument_spec = dict(
+ nodes=dict(type='bool', default=False),
+ nodes_filters=dict(type='dict'),
+ tasks=dict(type='bool', default=False),
+ tasks_filters=dict(type='dict'),
+ services=dict(type='bool', default=False),
+ services_filters=dict(type='dict'),
+ unlock_key=dict(type='bool', default=False),
+ verbose_output=dict(type='bool', default=False),
+ )
+ option_minimal_versions = dict(
+ unlock_key=dict(docker_py_version='2.7.0', docker_api_version='1.25'),
+ )
+
+ client = AnsibleDockerSwarmClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_version='1.10.0',
+ min_docker_api_version='1.24',
+ option_minimal_versions=option_minimal_versions,
+ fail_results=dict(
+ can_talk_to_docker=False,
+ docker_swarm_active=False,
+ docker_swarm_manager=False,
+ ),
+ )
+ client.fail_results['can_talk_to_docker'] = True
+ client.fail_results['docker_swarm_active'] = client.check_if_swarm_node()
+ client.fail_results['docker_swarm_manager'] = client.check_if_swarm_manager()
+
+ try:
+ results = dict(
+ changed=False,
+ )
+
+ DockerSwarmManager(client, results)
+ results.update(client.fail_results)
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_swarm_service.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_swarm_service.py
new file mode 100644
index 00000000..7c6f23a0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_swarm_service.py
@@ -0,0 +1,3004 @@
+#!/usr/bin/python
+#
+# (c) 2017, Dario Zanzico (git@dariozanzico.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: docker_swarm_service
+author:
+ - "Dario Zanzico (@dariko)"
+ - "Jason Witkowski (@jwitko)"
+ - "Hannes Ljungberg (@hannseman)"
+short_description: docker swarm service
+description:
+ - Manages docker services via a swarm manager node.
+options:
+ args:
+ description:
+ - List arguments to be passed to the container.
+ - Corresponds to the C(ARG) parameter of C(docker service create).
+ type: list
+ elements: str
+ command:
+ description:
+ - Command to execute when the container starts.
+ - A command may be either a string or a list or a list of strings.
+ - Corresponds to the C(COMMAND) parameter of C(docker service create).
+ type: raw
+ configs:
+ description:
+ - List of dictionaries describing the service configs.
+ - Corresponds to the C(--config) option of C(docker service create).
+ - Requires API version >= 1.30.
+ type: list
+ elements: dict
+ suboptions:
+ config_id:
+ description:
+ - Config's ID.
+ type: str
+ config_name:
+ description:
+ - Config's name as defined at its creation.
+ type: str
+ required: yes
+ filename:
+ description:
+ - Name of the file containing the config. Defaults to the I(config_name) if not specified.
+ type: str
+ uid:
+ description:
+ - UID of the config file's owner.
+ type: str
+ gid:
+ description:
+ - GID of the config file's group.
+ type: str
+ mode:
+ description:
+ - File access mode inside the container. Must be an octal number (like C(0644) or C(0444)).
+ type: int
+ constraints:
+ description:
+ - List of the service constraints.
+ - Corresponds to the C(--constraint) option of C(docker service create).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(placement.constraints) instead.
+ type: list
+ elements: str
+ container_labels:
+ description:
+ - Dictionary of key value pairs.
+ - Corresponds to the C(--container-label) option of C(docker service create).
+ type: dict
+ dns:
+ description:
+ - List of custom DNS servers.
+ - Corresponds to the C(--dns) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: list
+ elements: str
+ dns_search:
+ description:
+ - List of custom DNS search domains.
+ - Corresponds to the C(--dns-search) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: list
+ elements: str
+ dns_options:
+ description:
+ - List of custom DNS options.
+ - Corresponds to the C(--dns-option) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: list
+ elements: str
+ endpoint_mode:
+ description:
+ - Service endpoint mode.
+ - Corresponds to the C(--endpoint-mode) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: str
+ choices:
+ - vip
+ - dnsrr
+ env:
+ description:
+ - List or dictionary of the service environment variables.
+ - If passed a list each items need to be in the format of C(KEY=VALUE).
+ - If passed a dictionary values which might be parsed as numbers,
+ booleans or other types by the YAML parser must be quoted (e.g. C("true"))
+ in order to avoid data loss.
+ - Corresponds to the C(--env) option of C(docker service create).
+ type: raw
+ env_files:
+ description:
+ - List of paths to files, present on the target, containing environment variables C(FOO=BAR).
+ - The order of the list is significant in determining the value assigned to a
+ variable that shows up more than once.
+ - If variable also present in I(env), then I(env) value will override.
+ type: list
+ elements: path
+ force_update:
+ description:
+ - Force update even if no changes require it.
+ - Corresponds to the C(--force) option of C(docker service update).
+ - Requires API version >= 1.25.
+ type: bool
+ default: no
+ groups:
+ description:
+ - List of additional group names and/or IDs that the container process will run as.
+ - Corresponds to the C(--group) option of C(docker service update).
+ - Requires API version >= 1.25.
+ type: list
+ elements: str
+ healthcheck:
+ description:
+ - Configure a check that is run to determine whether or not containers for this service are "healthy".
+ See the docs for the L(HEALTHCHECK Dockerfile instruction,https://docs.docker.com/engine/reference/builder/#healthcheck)
+ for details on how healthchecks work.
+ - "I(interval), I(timeout) and I(start_period) are specified as durations. They accept duration as a string in a format
+ that look like: C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Requires API version >= 1.25.
+ type: dict
+ suboptions:
+ test:
+ description:
+ - Command to run to check health.
+ - Must be either a string or a list. If it is a list, the first item must be one of C(NONE), C(CMD) or C(CMD-SHELL).
+ type: raw
+ interval:
+ description:
+ - Time between running the check.
+ type: str
+ timeout:
+ description:
+ - Maximum time to allow one check to run.
+ type: str
+ retries:
+ description:
+ - Consecutive failures needed to report unhealthy. It accept integer value.
+ type: int
+ start_period:
+ description:
+ - Start period for the container to initialize before starting health-retries countdown.
+ type: str
+ hostname:
+ description:
+ - Container hostname.
+ - Corresponds to the C(--hostname) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: str
+ hosts:
+ description:
+ - Dict of host-to-IP mappings, where each host name is a key in the dictionary.
+ Each host name will be added to the container's /etc/hosts file.
+ - Corresponds to the C(--host) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: dict
+ image:
+ description:
+ - Service image path and tag.
+ - Corresponds to the C(IMAGE) parameter of C(docker service create).
+ type: str
+ init:
+ description:
+ - Use an init inside each service container to forward signals and reap processes.
+ - Corresponds to the C(--init) option of C(docker service create).
+ - Requires API version >= 1.37.
+ type: bool
+ version_added: '0.2.0'
+ labels:
+ description:
+ - Dictionary of key value pairs.
+ - Corresponds to the C(--label) option of C(docker service create).
+ type: dict
+ limits:
+ description:
+ - Configures service resource limits.
+ suboptions:
+ cpus:
+ description:
+ - Service CPU limit. C(0) equals no limit.
+ - Corresponds to the C(--limit-cpu) option of C(docker service create).
+ type: float
+ memory:
+ description:
+ - "Service memory limit in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - C(0) equals no limit.
+ - Omitting the unit defaults to bytes.
+ - Corresponds to the C(--limit-memory) option of C(docker service create).
+ type: str
+ type: dict
+ limit_cpu:
+ description:
+ - Service CPU limit. C(0) equals no limit.
+ - Corresponds to the C(--limit-cpu) option of C(docker service create).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(limits.cpus) instead.
+ type: float
+ limit_memory:
+ description:
+ - "Service memory limit in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - C(0) equals no limit.
+ - Omitting the unit defaults to bytes.
+ - Corresponds to the C(--limit-memory) option of C(docker service create).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(limits.memory) instead.
+ type: str
+ logging:
+ description:
+ - "Logging configuration for the service."
+ suboptions:
+ driver:
+ description:
+ - Configure the logging driver for a service.
+ - Corresponds to the C(--log-driver) option of C(docker service create).
+ type: str
+ options:
+ description:
+ - Options for service logging driver.
+ - Corresponds to the C(--log-opt) option of C(docker service create).
+ type: dict
+ type: dict
+ log_driver:
+ description:
+ - Configure the logging driver for a service.
+ - Corresponds to the C(--log-driver) option of C(docker service create).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(logging.driver) instead.
+ type: str
+ log_driver_options:
+ description:
+ - Options for service logging driver.
+ - Corresponds to the C(--log-opt) option of C(docker service create).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(logging.options) instead.
+ type: dict
+ mode:
+ description:
+ - Service replication mode.
+ - Service will be removed and recreated when changed.
+ - Corresponds to the C(--mode) option of C(docker service create).
+ type: str
+ default: replicated
+ choices:
+ - replicated
+ - global
+ mounts:
+ description:
+ - List of dictionaries describing the service mounts.
+ - Corresponds to the C(--mount) option of C(docker service create).
+ type: list
+ elements: dict
+ suboptions:
+ source:
+ description:
+ - Mount source (e.g. a volume name or a host path).
+ - Must be specified if I(type) is not C(tmpfs).
+ type: str
+ target:
+ description:
+ - Container path.
+ type: str
+ required: yes
+ type:
+ description:
+ - The mount type.
+ - Note that C(npipe) is only supported by Docker for Windows. Also note that C(npipe) was added in Ansible 2.9.
+ type: str
+ default: bind
+ choices:
+ - bind
+ - volume
+ - tmpfs
+ - npipe
+ readonly:
+ description:
+ - Whether the mount should be read-only.
+ type: bool
+ labels:
+ description:
+ - Volume labels to apply.
+ type: dict
+ propagation:
+ description:
+ - The propagation mode to use.
+ - Can only be used when I(type) is C(bind).
+ type: str
+ choices:
+ - shared
+ - slave
+ - private
+ - rshared
+ - rslave
+ - rprivate
+ no_copy:
+ description:
+ - Disable copying of data from a container when a volume is created.
+ - Can only be used when I(type) is C(volume).
+ type: bool
+ driver_config:
+ description:
+ - Volume driver configuration.
+ - Can only be used when I(type) is C(volume).
+ suboptions:
+ name:
+ description:
+ - Name of the volume-driver plugin to use for the volume.
+ type: str
+ options:
+ description:
+ - Options as key-value pairs to pass to the driver for this volume.
+ type: dict
+ type: dict
+ tmpfs_size:
+ description:
+ - "Size of the tmpfs mount in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - Can only be used when I(type) is C(tmpfs).
+ type: str
+ tmpfs_mode:
+ description:
+ - File mode of the tmpfs in octal.
+ - Can only be used when I(type) is C(tmpfs).
+ type: int
+ name:
+ description:
+ - Service name.
+ - Corresponds to the C(--name) option of C(docker service create).
+ type: str
+ required: yes
+ networks:
+ description:
+ - List of the service networks names or dictionaries.
+ - When passed dictionaries valid sub-options are I(name), which is required, and
+ I(aliases) and I(options).
+ - Prior to API version 1.29, updating and removing networks is not supported.
+ If changes are made the service will then be removed and recreated.
+ - Corresponds to the C(--network) option of C(docker service create).
+ type: list
+ elements: raw
+ placement:
+ description:
+ - Configures service placement preferences and constraints.
+ suboptions:
+ constraints:
+ description:
+ - List of the service constraints.
+ - Corresponds to the C(--constraint) option of C(docker service create).
+ type: list
+ elements: str
+ preferences:
+ description:
+ - List of the placement preferences as key value pairs.
+ - Corresponds to the C(--placement-pref) option of C(docker service create).
+ - Requires API version >= 1.27.
+ type: list
+ elements: dict
+ type: dict
+ publish:
+ description:
+ - List of dictionaries describing the service published ports.
+ - Corresponds to the C(--publish) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: list
+ elements: dict
+ suboptions:
+ published_port:
+ description:
+ - The port to make externally available.
+ type: int
+ required: yes
+ target_port:
+ description:
+ - The port inside the container to expose.
+ type: int
+ required: yes
+ protocol:
+ description:
+ - What protocol to use.
+ type: str
+ default: tcp
+ choices:
+ - tcp
+ - udp
+ mode:
+ description:
+ - What publish mode to use.
+ - Requires API version >= 1.32.
+ type: str
+ choices:
+ - ingress
+ - host
+ read_only:
+ description:
+ - Mount the containers root filesystem as read only.
+ - Corresponds to the C(--read-only) option of C(docker service create).
+ type: bool
+ replicas:
+ description:
+ - Number of containers instantiated in the service. Valid only if I(mode) is C(replicated).
+ - If set to C(-1), and service is not present, service replicas will be set to C(1).
+ - If set to C(-1), and service is present, service replicas will be unchanged.
+ - Corresponds to the C(--replicas) option of C(docker service create).
+ type: int
+ default: -1
+ reservations:
+ description:
+ - Configures service resource reservations.
+ suboptions:
+ cpus:
+ description:
+ - Service CPU reservation. C(0) equals no reservation.
+ - Corresponds to the C(--reserve-cpu) option of C(docker service create).
+ type: float
+ memory:
+ description:
+ - "Service memory reservation in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - C(0) equals no reservation.
+ - Omitting the unit defaults to bytes.
+ - Corresponds to the C(--reserve-memory) option of C(docker service create).
+ type: str
+ type: dict
+ reserve_cpu:
+ description:
+ - Service CPU reservation. C(0) equals no reservation.
+ - Corresponds to the C(--reserve-cpu) option of C(docker service create).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(reservations.cpus) instead.
+ type: float
+ reserve_memory:
+ description:
+ - "Service memory reservation in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - C(0) equals no reservation.
+ - Omitting the unit defaults to bytes.
+ - Corresponds to the C(--reserve-memory) option of C(docker service create).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(reservations.memory) instead.
+ type: str
+ resolve_image:
+ description:
+ - If the current image digest should be resolved from registry and updated if changed.
+ - Requires API version >= 1.30.
+ type: bool
+ default: no
+ restart_config:
+ description:
+ - Configures if and how to restart containers when they exit.
+ suboptions:
+ condition:
+ description:
+ - Restart condition of the service.
+ - Corresponds to the C(--restart-condition) option of C(docker service create).
+ type: str
+ choices:
+ - none
+ - on-failure
+ - any
+ delay:
+ description:
+ - Delay between restarts.
+ - "Accepts a a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--restart-delay) option of C(docker service create).
+ type: str
+ max_attempts:
+ description:
+ - Maximum number of service restarts.
+ - Corresponds to the C(--restart-condition) option of C(docker service create).
+ type: int
+ window:
+ description:
+ - Restart policy evaluation window.
+ - "Accepts a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--restart-window) option of C(docker service create).
+ type: str
+ type: dict
+ restart_policy:
+ description:
+ - Restart condition of the service.
+ - Corresponds to the C(--restart-condition) option of C(docker service create).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(restart_config.condition) instead.
+ type: str
+ choices:
+ - none
+ - on-failure
+ - any
+ restart_policy_attempts:
+ description:
+ - Maximum number of service restarts.
+ - Corresponds to the C(--restart-condition) option of C(docker service create).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(restart_config.max_attempts) instead.
+ type: int
+ restart_policy_delay:
+ description:
+ - Delay between restarts.
+ - "Accepts a duration as an integer in nanoseconds or as a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--restart-delay) option of C(docker service create).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(restart_config.delay) instead.
+ type: raw
+ restart_policy_window:
+ description:
+ - Restart policy evaluation window.
+ - "Accepts a duration as an integer in nanoseconds or as a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--restart-window) option of C(docker service create).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(restart_config.window) instead.
+ type: raw
+ rollback_config:
+ description:
+ - Configures how the service should be rolled back in case of a failing update.
+ suboptions:
+ parallelism:
+ description:
+ - The number of containers to rollback at a time. If set to 0, all containers rollback simultaneously.
+ - Corresponds to the C(--rollback-parallelism) option of C(docker service create).
+ - Requires API version >= 1.28.
+ type: int
+ delay:
+ description:
+ - Delay between task rollbacks.
+ - "Accepts a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--rollback-delay) option of C(docker service create).
+ - Requires API version >= 1.28.
+ type: str
+ failure_action:
+ description:
+ - Action to take in case of rollback failure.
+ - Corresponds to the C(--rollback-failure-action) option of C(docker service create).
+ - Requires API version >= 1.28.
+ type: str
+ choices:
+ - continue
+ - pause
+ monitor:
+ description:
+ - Duration after each task rollback to monitor for failure.
+ - "Accepts a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--rollback-monitor) option of C(docker service create).
+ - Requires API version >= 1.28.
+ type: str
+ max_failure_ratio:
+ description:
+ - Fraction of tasks that may fail during a rollback.
+ - Corresponds to the C(--rollback-max-failure-ratio) option of C(docker service create).
+ - Requires API version >= 1.28.
+ type: float
+ order:
+ description:
+ - Specifies the order of operations during rollbacks.
+ - Corresponds to the C(--rollback-order) option of C(docker service create).
+ - Requires API version >= 1.29.
+ type: str
+ type: dict
+ secrets:
+ description:
+ - List of dictionaries describing the service secrets.
+ - Corresponds to the C(--secret) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: list
+ elements: dict
+ suboptions:
+ secret_id:
+ description:
+ - Secret's ID.
+ type: str
+ secret_name:
+ description:
+ - Secret's name as defined at its creation.
+ type: str
+ required: yes
+ filename:
+ description:
+ - Name of the file containing the secret. Defaults to the I(secret_name) if not specified.
+ - Corresponds to the C(target) key of C(docker service create --secret).
+ type: str
+ uid:
+ description:
+ - UID of the secret file's owner.
+ type: str
+ gid:
+ description:
+ - GID of the secret file's group.
+ type: str
+ mode:
+ description:
+ - File access mode inside the container. Must be an octal number (like C(0644) or C(0444)).
+ type: int
+ state:
+ description:
+ - C(absent) - A service matching the specified name will be removed and have its tasks stopped.
+ - C(present) - Asserts the existence of a service matching the name and provided configuration parameters.
+ Unspecified configuration parameters will be set to docker defaults.
+ type: str
+ default: present
+ choices:
+ - present
+ - absent
+ stop_grace_period:
+ description:
+ - Time to wait before force killing a container.
+ - "Accepts a duration as a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--stop-grace-period) option of C(docker service create).
+ type: str
+ stop_signal:
+ description:
+ - Override default signal used to stop the container.
+ - Corresponds to the C(--stop-signal) option of C(docker service create).
+ type: str
+ tty:
+ description:
+ - Allocate a pseudo-TTY.
+ - Corresponds to the C(--tty) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: bool
+ update_config:
+ description:
+ - Configures how the service should be updated. Useful for configuring rolling updates.
+ suboptions:
+ parallelism:
+ description:
+ - Rolling update parallelism.
+ - Corresponds to the C(--update-parallelism) option of C(docker service create).
+ type: int
+ delay:
+ description:
+ - Rolling update delay.
+ - "Accepts a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--update-delay) option of C(docker service create).
+ type: str
+ failure_action:
+ description:
+ - Action to take in case of container failure.
+ - Corresponds to the C(--update-failure-action) option of C(docker service create).
+ - Usage of I(rollback) requires API version >= 1.29.
+ type: str
+ choices:
+ - continue
+ - pause
+ - rollback
+ monitor:
+ description:
+ - Time to monitor updated tasks for failures.
+ - "Accepts a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--update-monitor) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: str
+ max_failure_ratio:
+ description:
+ - Fraction of tasks that may fail during an update before the failure action is invoked.
+ - Corresponds to the C(--update-max-failure-ratio) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: float
+ order:
+ description:
+ - Specifies the order of operations when rolling out an updated task.
+ - Corresponds to the C(--update-order) option of C(docker service create).
+ - Requires API version >= 1.29.
+ type: str
+ type: dict
+ update_delay:
+ description:
+ - Rolling update delay.
+ - "Accepts a duration as an integer in nanoseconds or as a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--update-delay) option of C(docker service create).
+ - Before Ansible 2.8, the default value for this option was C(10).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(update_config.delay) instead.
+ type: raw
+ update_parallelism:
+ description:
+ - Rolling update parallelism.
+ - Corresponds to the C(--update-parallelism) option of C(docker service create).
+ - Before Ansible 2.8, the default value for this option was C(1).
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(update_config.parallelism) instead.
+ type: int
+ update_failure_action:
+ description:
+ - Action to take in case of container failure.
+ - Corresponds to the C(--update-failure-action) option of C(docker service create).
+ - Usage of I(rollback) requires API version >= 1.29.
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(update_config.failure_action) instead.
+ type: str
+ choices:
+ - continue
+ - pause
+ - rollback
+ update_monitor:
+ description:
+ - Time to monitor updated tasks for failures.
+ - "Accepts a duration as an integer in nanoseconds or as a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--update-monitor) option of C(docker service create).
+ - Requires API version >= 1.25.
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(update_config.monitor) instead.
+ type: raw
+ update_max_failure_ratio:
+ description:
+ - Fraction of tasks that may fail during an update before the failure action is invoked.
+ - Corresponds to the C(--update-max-failure-ratio) option of C(docker service create).
+ - Requires API version >= 1.25.
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(update_config.max_failure_ratio) instead.
+ type: float
+ update_order:
+ description:
+ - Specifies the order of operations when rolling out an updated task.
+ - Corresponds to the C(--update-order) option of C(docker service create).
+ - Requires API version >= 1.29.
+ - Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter C(update_config.order) instead.
+ type: str
+ choices:
+ - stop-first
+ - start-first
+ user:
+ description:
+ - Sets the username or UID used for the specified command.
+ - Before Ansible 2.8, the default value for this option was C(root).
+ - The default has been removed so that the user defined in the image is used if no user is specified here.
+ - Corresponds to the C(--user) option of C(docker service create).
+ type: str
+ working_dir:
+ description:
+ - Path to the working directory.
+ - Corresponds to the C(--workdir) option of C(docker service create).
+ type: str
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_2_documentation
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.0.2"
+ - "Docker API >= 1.24"
+notes:
+ - "Images will only resolve to the latest digest when using Docker API >= 1.30 and Docker SDK for Python >= 3.2.0.
+ When using older versions use C(force_update: true) to trigger the swarm to resolve a new image."
+'''
+
+RETURN = '''
+swarm_service:
+ returned: always
+ type: dict
+ description:
+ - Dictionary of variables representing the current state of the service.
+ Matches the module parameters format.
+ - Note that facts are not part of registered vars but accessible directly.
+ - Note that before Ansible 2.7.9, the return variable was documented as C(ansible_swarm_service),
+ while the module actually returned a variable called C(ansible_docker_service). The variable
+ was renamed to C(swarm_service) in both code and documentation for Ansible 2.7.9 and Ansible 2.8.0.
+ In Ansible 2.7.x, the old name C(ansible_docker_service) can still be used.
+ sample: '{
+ "args": [
+ "3600"
+ ],
+ "command": [
+ "sleep"
+ ],
+ "configs": null,
+ "constraints": [
+ "node.role == manager",
+ "engine.labels.operatingsystem == ubuntu 14.04"
+ ],
+ "container_labels": null,
+ "dns": null,
+ "dns_options": null,
+ "dns_search": null,
+ "endpoint_mode": null,
+ "env": [
+ "ENVVAR1=envvar1",
+ "ENVVAR2=envvar2"
+ ],
+ "force_update": null,
+ "groups": null,
+ "healthcheck": {
+ "interval": 90000000000,
+ "retries": 3,
+ "start_period": 30000000000,
+ "test": [
+ "CMD",
+ "curl",
+ "--fail",
+ "http://nginx.host.com"
+ ],
+ "timeout": 10000000000
+ },
+ "healthcheck_disabled": false,
+ "hostname": null,
+ "hosts": null,
+ "image": "alpine:latest@sha256:b3dbf31b77fd99d9c08f780ce6f5282aba076d70a513a8be859d8d3a4d0c92b8",
+ "labels": {
+ "com.example.department": "Finance",
+ "com.example.description": "Accounting webapp"
+ },
+ "limit_cpu": 0.5,
+ "limit_memory": 52428800,
+ "log_driver": "fluentd",
+ "log_driver_options": {
+ "fluentd-address": "127.0.0.1:24224",
+ "fluentd-async-connect": "true",
+ "tag": "myservice"
+ },
+ "mode": "replicated",
+ "mounts": [
+ {
+ "readonly": false,
+ "source": "/tmp/",
+ "target": "/remote_tmp/",
+ "type": "bind",
+ "labels": null,
+ "propagation": null,
+ "no_copy": null,
+ "driver_config": null,
+ "tmpfs_size": null,
+ "tmpfs_mode": null
+ }
+ ],
+ "networks": null,
+ "placement_preferences": [
+ {
+ "spread": "node.labels.mylabel"
+ }
+ ],
+ "publish": null,
+ "read_only": null,
+ "replicas": 1,
+ "reserve_cpu": 0.25,
+ "reserve_memory": 20971520,
+ "restart_policy": "on-failure",
+ "restart_policy_attempts": 3,
+ "restart_policy_delay": 5000000000,
+ "restart_policy_window": 120000000000,
+ "secrets": null,
+ "stop_grace_period": null,
+ "stop_signal": null,
+ "tty": null,
+ "update_delay": 10000000000,
+ "update_failure_action": null,
+ "update_max_failure_ratio": null,
+ "update_monitor": null,
+ "update_order": "stop-first",
+ "update_parallelism": 2,
+ "user": null,
+ "working_dir": null
+ }'
+changes:
+ returned: always
+ description:
+ - List of changed service attributes if a service has been altered, [] otherwise.
+ type: list
+ elements: str
+ sample: ['container_labels', 'replicas']
+rebuilt:
+ returned: always
+ description:
+ - True if the service has been recreated (removed and created)
+ type: bool
+ sample: True
+'''
+
+EXAMPLES = '''
+- name: Set command and arguments
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine
+ command: sleep
+ args:
+ - "3600"
+
+- name: Set a bind mount
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine
+ mounts:
+ - source: /tmp/
+ target: /remote_tmp/
+ type: bind
+
+- name: Set service labels
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine
+ labels:
+ com.example.description: "Accounting webapp"
+ com.example.department: "Finance"
+
+- name: Set environment variables
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine
+ env:
+ ENVVAR1: envvar1
+ ENVVAR2: envvar2
+ env_files:
+ - envs/common.env
+ - envs/apps/web.env
+
+- name: Set fluentd logging
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine
+ logging:
+ driver: fluentd
+ options:
+ fluentd-address: "127.0.0.1:24224"
+ fluentd-async-connect: "true"
+ tag: myservice
+
+- name: Set restart policies
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine
+ restart_config:
+ condition: on-failure
+ delay: 5s
+ max_attempts: 3
+ window: 120s
+
+- name: Set update config
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine
+ update_config:
+ parallelism: 2
+ delay: 10s
+ order: stop-first
+
+- name: Set rollback config
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine
+ update_config:
+ failure_action: rollback
+ rollback_config:
+ parallelism: 2
+ delay: 10s
+ order: stop-first
+
+- name: Set placement preferences
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine:edge
+ placement:
+ preferences:
+ - spread: node.labels.mylabel
+ constraints:
+ - node.role == manager
+ - engine.labels.operatingsystem == ubuntu 14.04
+
+- name: Set configs
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine:edge
+ configs:
+ - config_name: myconfig_name
+ filename: "/tmp/config.txt"
+
+- name: Set networks
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine:edge
+ networks:
+ - mynetwork
+
+- name: Set networks as a dictionary
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine:edge
+ networks:
+ - name: "mynetwork"
+ aliases:
+ - "mynetwork_alias"
+ options:
+ foo: bar
+
+- name: Set secrets
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine:edge
+ secrets:
+ - secret_name: mysecret_name
+ filename: "/run/secrets/secret.txt"
+
+- name: Start service with healthcheck
+ community.general.docker_swarm_service:
+ name: myservice
+ image: nginx:1.13
+ healthcheck:
+ # Check if nginx server is healthy by curl'ing the server.
+ # If this fails or timeouts, the healthcheck fails.
+ test: ["CMD", "curl", "--fail", "http://nginx.host.com"]
+ interval: 1m30s
+ timeout: 10s
+ retries: 3
+ start_period: 30s
+
+- name: Configure service resources
+ community.general.docker_swarm_service:
+ name: myservice
+ image: alpine:edge
+ reservations:
+ cpus: 0.25
+ memory: 20M
+ limits:
+ cpus: 0.50
+ memory: 50M
+
+- name: Remove service
+ community.general.docker_swarm_service:
+ name: myservice
+ state: absent
+'''
+
+import shlex
+import time
+import operator
+import traceback
+
+from distutils.version import LooseVersion
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ DifferenceTracker,
+ DockerBaseClass,
+ convert_duration_to_nanosecond,
+ parse_healthcheck,
+ clean_dict_booleans_for_docker_api,
+ RequestException,
+)
+
+from ansible.module_utils.basic import human_to_bytes
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_text
+
+try:
+ from docker import types
+ from docker.utils import (
+ parse_repository_tag,
+ parse_env_file,
+ format_environment,
+ )
+ from docker.errors import (
+ APIError,
+ DockerException,
+ NotFound,
+ )
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+
+def get_docker_environment(env, env_files):
+ """
+ Will return a list of "KEY=VALUE" items. Supplied env variable can
+ be either a list or a dictionary.
+
+ If environment files are combined with explicit environment variables,
+ the explicit environment variables take precedence.
+ """
+ env_dict = {}
+ if env_files:
+ for env_file in env_files:
+ parsed_env_file = parse_env_file(env_file)
+ for name, value in parsed_env_file.items():
+ env_dict[name] = str(value)
+ if env is not None and isinstance(env, string_types):
+ env = env.split(',')
+ if env is not None and isinstance(env, dict):
+ for name, value in env.items():
+ if not isinstance(value, string_types):
+ raise ValueError(
+ 'Non-string value found for env option. '
+ 'Ambiguous env options must be wrapped in quotes to avoid YAML parsing. Key: %s' % name
+ )
+ env_dict[name] = str(value)
+ elif env is not None and isinstance(env, list):
+ for item in env:
+ try:
+ name, value = item.split('=', 1)
+ except ValueError:
+ raise ValueError('Invalid environment variable found in list, needs to be in format KEY=VALUE.')
+ env_dict[name] = value
+ elif env is not None:
+ raise ValueError(
+ 'Invalid type for env %s (%s). Only list or dict allowed.' % (env, type(env))
+ )
+ env_list = format_environment(env_dict)
+ if not env_list:
+ if env is not None or env_files is not None:
+ return []
+ else:
+ return None
+ return sorted(env_list)
+
+
+def get_docker_networks(networks, network_ids):
+ """
+ Validate a list of network names or a list of network dictionaries.
+ Network names will be resolved to ids by using the network_ids mapping.
+ """
+ if networks is None:
+ return None
+ parsed_networks = []
+ for network in networks:
+ if isinstance(network, string_types):
+ parsed_network = {'name': network}
+ elif isinstance(network, dict):
+ if 'name' not in network:
+ raise TypeError(
+ '"name" is required when networks are passed as dictionaries.'
+ )
+ name = network.pop('name')
+ parsed_network = {'name': name}
+ aliases = network.pop('aliases', None)
+ if aliases is not None:
+ if not isinstance(aliases, list):
+ raise TypeError('"aliases" network option is only allowed as a list')
+ if not all(
+ isinstance(alias, string_types) for alias in aliases
+ ):
+ raise TypeError('Only strings are allowed as network aliases.')
+ parsed_network['aliases'] = aliases
+ options = network.pop('options', None)
+ if options is not None:
+ if not isinstance(options, dict):
+ raise TypeError('Only dict is allowed as network options.')
+ parsed_network['options'] = clean_dict_booleans_for_docker_api(options)
+ # Check if any invalid keys left
+ if network:
+ invalid_keys = ', '.join(network.keys())
+ raise TypeError(
+ '%s are not valid keys for the networks option' % invalid_keys
+ )
+
+ else:
+ raise TypeError(
+ 'Only a list of strings or dictionaries are allowed to be passed as networks.'
+ )
+ network_name = parsed_network.pop('name')
+ try:
+ parsed_network['id'] = network_ids[network_name]
+ except KeyError as e:
+ raise ValueError('Could not find a network named: %s.' % e)
+ parsed_networks.append(parsed_network)
+ return parsed_networks or []
+
+
+def get_nanoseconds_from_raw_option(name, value):
+ if value is None:
+ return None
+ elif isinstance(value, int):
+ return value
+ elif isinstance(value, string_types):
+ try:
+ return int(value)
+ except ValueError:
+ return convert_duration_to_nanosecond(value)
+ else:
+ raise ValueError(
+ 'Invalid type for %s %s (%s). Only string or int allowed.'
+ % (name, value, type(value))
+ )
+
+
+def get_value(key, values, default=None):
+ value = values.get(key)
+ return value if value is not None else default
+
+
+def has_dict_changed(new_dict, old_dict):
+ """
+ Check if new_dict has differences compared to old_dict while
+ ignoring keys in old_dict which are None in new_dict.
+ """
+ if new_dict is None:
+ return False
+ if not new_dict and old_dict:
+ return True
+ if not old_dict and new_dict:
+ return True
+ defined_options = dict(
+ (option, value) for option, value in new_dict.items()
+ if value is not None
+ )
+ for option, value in defined_options.items():
+ old_value = old_dict.get(option)
+ if not value and not old_value:
+ continue
+ if value != old_value:
+ return True
+ return False
+
+
+def has_list_changed(new_list, old_list, sort_lists=True, sort_key=None):
+ """
+ Check two lists have differences. Sort lists by default.
+ """
+
+ def sort_list(unsorted_list):
+ """
+ Sort a given list.
+ The list may contain dictionaries, so use the sort key to handle them.
+ """
+
+ if unsorted_list and isinstance(unsorted_list[0], dict):
+ if not sort_key:
+ raise Exception(
+ 'A sort key was not specified when sorting list'
+ )
+ else:
+ return sorted(unsorted_list, key=lambda k: k[sort_key])
+
+ # Either the list is empty or does not contain dictionaries
+ try:
+ return sorted(unsorted_list)
+ except TypeError:
+ return unsorted_list
+
+ if new_list is None:
+ return False
+ old_list = old_list or []
+ if len(new_list) != len(old_list):
+ return True
+
+ if sort_lists:
+ zip_data = zip(sort_list(new_list), sort_list(old_list))
+ else:
+ zip_data = zip(new_list, old_list)
+ for new_item, old_item in zip_data:
+ is_same_type = type(new_item) == type(old_item)
+ if not is_same_type:
+ if isinstance(new_item, string_types) and isinstance(old_item, string_types):
+ # Even though the types are different between these items,
+ # they are both strings. Try matching on the same string type.
+ try:
+ new_item_type = type(new_item)
+ old_item_casted = new_item_type(old_item)
+ if new_item != old_item_casted:
+ return True
+ else:
+ continue
+ except UnicodeEncodeError:
+ # Fallback to assuming the strings are different
+ return True
+ else:
+ return True
+ if isinstance(new_item, dict):
+ if has_dict_changed(new_item, old_item):
+ return True
+ elif new_item != old_item:
+ return True
+
+ return False
+
+
+def have_networks_changed(new_networks, old_networks):
+ """Special case list checking for networks to sort aliases"""
+
+ if new_networks is None:
+ return False
+ old_networks = old_networks or []
+ if len(new_networks) != len(old_networks):
+ return True
+
+ zip_data = zip(
+ sorted(new_networks, key=lambda k: k['id']),
+ sorted(old_networks, key=lambda k: k['id'])
+ )
+
+ for new_item, old_item in zip_data:
+ new_item = dict(new_item)
+ old_item = dict(old_item)
+ # Sort the aliases
+ if 'aliases' in new_item:
+ new_item['aliases'] = sorted(new_item['aliases'] or [])
+ if 'aliases' in old_item:
+ old_item['aliases'] = sorted(old_item['aliases'] or [])
+
+ if has_dict_changed(new_item, old_item):
+ return True
+
+ return False
+
+
+class DockerService(DockerBaseClass):
+ def __init__(self, docker_api_version, docker_py_version):
+ super(DockerService, self).__init__()
+ self.image = ""
+ self.command = None
+ self.args = None
+ self.endpoint_mode = None
+ self.dns = None
+ self.healthcheck = None
+ self.healthcheck_disabled = None
+ self.hostname = None
+ self.hosts = None
+ self.tty = None
+ self.dns_search = None
+ self.dns_options = None
+ self.env = None
+ self.force_update = None
+ self.groups = None
+ self.log_driver = None
+ self.log_driver_options = None
+ self.labels = None
+ self.container_labels = None
+ self.limit_cpu = None
+ self.limit_memory = None
+ self.reserve_cpu = None
+ self.reserve_memory = None
+ self.mode = "replicated"
+ self.user = None
+ self.mounts = None
+ self.configs = None
+ self.secrets = None
+ self.constraints = None
+ self.networks = None
+ self.stop_grace_period = None
+ self.stop_signal = None
+ self.publish = None
+ self.placement_preferences = None
+ self.replicas = -1
+ self.service_id = False
+ self.service_version = False
+ self.read_only = None
+ self.restart_policy = None
+ self.restart_policy_attempts = None
+ self.restart_policy_delay = None
+ self.restart_policy_window = None
+ self.rollback_config = None
+ self.update_delay = None
+ self.update_parallelism = None
+ self.update_failure_action = None
+ self.update_monitor = None
+ self.update_max_failure_ratio = None
+ self.update_order = None
+ self.working_dir = None
+ self.init = None
+
+ self.docker_api_version = docker_api_version
+ self.docker_py_version = docker_py_version
+
+ def get_facts(self):
+ return {
+ 'image': self.image,
+ 'mounts': self.mounts,
+ 'configs': self.configs,
+ 'networks': self.networks,
+ 'command': self.command,
+ 'args': self.args,
+ 'tty': self.tty,
+ 'dns': self.dns,
+ 'dns_search': self.dns_search,
+ 'dns_options': self.dns_options,
+ 'healthcheck': self.healthcheck,
+ 'healthcheck_disabled': self.healthcheck_disabled,
+ 'hostname': self.hostname,
+ 'hosts': self.hosts,
+ 'env': self.env,
+ 'force_update': self.force_update,
+ 'groups': self.groups,
+ 'log_driver': self.log_driver,
+ 'log_driver_options': self.log_driver_options,
+ 'publish': self.publish,
+ 'constraints': self.constraints,
+ 'placement_preferences': self.placement_preferences,
+ 'labels': self.labels,
+ 'container_labels': self.container_labels,
+ 'mode': self.mode,
+ 'replicas': self.replicas,
+ 'endpoint_mode': self.endpoint_mode,
+ 'restart_policy': self.restart_policy,
+ 'secrets': self.secrets,
+ 'stop_grace_period': self.stop_grace_period,
+ 'stop_signal': self.stop_signal,
+ 'limit_cpu': self.limit_cpu,
+ 'limit_memory': self.limit_memory,
+ 'read_only': self.read_only,
+ 'reserve_cpu': self.reserve_cpu,
+ 'reserve_memory': self.reserve_memory,
+ 'restart_policy_delay': self.restart_policy_delay,
+ 'restart_policy_attempts': self.restart_policy_attempts,
+ 'restart_policy_window': self.restart_policy_window,
+ 'rollback_config': self.rollback_config,
+ 'update_delay': self.update_delay,
+ 'update_parallelism': self.update_parallelism,
+ 'update_failure_action': self.update_failure_action,
+ 'update_monitor': self.update_monitor,
+ 'update_max_failure_ratio': self.update_max_failure_ratio,
+ 'update_order': self.update_order,
+ 'user': self.user,
+ 'working_dir': self.working_dir,
+ 'init': self.init,
+ }
+
+ @property
+ def can_update_networks(self):
+ # Before Docker API 1.29 adding/removing networks was not supported
+ return (
+ self.docker_api_version >= LooseVersion('1.29') and
+ self.docker_py_version >= LooseVersion('2.7')
+ )
+
+ @property
+ def can_use_task_template_networks(self):
+ # In Docker API 1.25 attaching networks to TaskTemplate is preferred over Spec
+ return (
+ self.docker_api_version >= LooseVersion('1.25') and
+ self.docker_py_version >= LooseVersion('2.7')
+ )
+
+ @staticmethod
+ def get_restart_config_from_ansible_params(params):
+ restart_config = params['restart_config'] or {}
+ condition = get_value(
+ 'condition',
+ restart_config,
+ default=params['restart_policy']
+ )
+ delay = get_value(
+ 'delay',
+ restart_config,
+ default=params['restart_policy_delay']
+ )
+ delay = get_nanoseconds_from_raw_option(
+ 'restart_policy_delay',
+ delay
+ )
+ max_attempts = get_value(
+ 'max_attempts',
+ restart_config,
+ default=params['restart_policy_attempts']
+ )
+ window = get_value(
+ 'window',
+ restart_config,
+ default=params['restart_policy_window']
+ )
+ window = get_nanoseconds_from_raw_option(
+ 'restart_policy_window',
+ window
+ )
+ return {
+ 'restart_policy': condition,
+ 'restart_policy_delay': delay,
+ 'restart_policy_attempts': max_attempts,
+ 'restart_policy_window': window
+ }
+
+ @staticmethod
+ def get_update_config_from_ansible_params(params):
+ update_config = params['update_config'] or {}
+ parallelism = get_value(
+ 'parallelism',
+ update_config,
+ default=params['update_parallelism']
+ )
+ delay = get_value(
+ 'delay',
+ update_config,
+ default=params['update_delay']
+ )
+ delay = get_nanoseconds_from_raw_option(
+ 'update_delay',
+ delay
+ )
+ failure_action = get_value(
+ 'failure_action',
+ update_config,
+ default=params['update_failure_action']
+ )
+ monitor = get_value(
+ 'monitor',
+ update_config,
+ default=params['update_monitor']
+ )
+ monitor = get_nanoseconds_from_raw_option(
+ 'update_monitor',
+ monitor
+ )
+ max_failure_ratio = get_value(
+ 'max_failure_ratio',
+ update_config,
+ default=params['update_max_failure_ratio']
+ )
+ order = get_value(
+ 'order',
+ update_config,
+ default=params['update_order']
+ )
+ return {
+ 'update_parallelism': parallelism,
+ 'update_delay': delay,
+ 'update_failure_action': failure_action,
+ 'update_monitor': monitor,
+ 'update_max_failure_ratio': max_failure_ratio,
+ 'update_order': order
+ }
+
+ @staticmethod
+ def get_rollback_config_from_ansible_params(params):
+ if params['rollback_config'] is None:
+ return None
+ rollback_config = params['rollback_config'] or {}
+ delay = get_nanoseconds_from_raw_option(
+ 'rollback_config.delay',
+ rollback_config.get('delay')
+ )
+ monitor = get_nanoseconds_from_raw_option(
+ 'rollback_config.monitor',
+ rollback_config.get('monitor')
+ )
+ return {
+ 'parallelism': rollback_config.get('parallelism'),
+ 'delay': delay,
+ 'failure_action': rollback_config.get('failure_action'),
+ 'monitor': monitor,
+ 'max_failure_ratio': rollback_config.get('max_failure_ratio'),
+ 'order': rollback_config.get('order'),
+
+ }
+
+ @staticmethod
+ def get_logging_from_ansible_params(params):
+ logging_config = params['logging'] or {}
+ driver = get_value(
+ 'driver',
+ logging_config,
+ default=params['log_driver']
+ )
+ options = get_value(
+ 'options',
+ logging_config,
+ default=params['log_driver_options']
+ )
+ return {
+ 'log_driver': driver,
+ 'log_driver_options': options,
+ }
+
+ @staticmethod
+ def get_limits_from_ansible_params(params):
+ limits = params['limits'] or {}
+ cpus = get_value(
+ 'cpus',
+ limits,
+ default=params['limit_cpu']
+ )
+ memory = get_value(
+ 'memory',
+ limits,
+ default=params['limit_memory']
+ )
+ if memory is not None:
+ try:
+ memory = human_to_bytes(memory)
+ except ValueError as exc:
+ raise Exception('Failed to convert limit_memory to bytes: %s' % exc)
+ return {
+ 'limit_cpu': cpus,
+ 'limit_memory': memory,
+ }
+
+ @staticmethod
+ def get_reservations_from_ansible_params(params):
+ reservations = params['reservations'] or {}
+ cpus = get_value(
+ 'cpus',
+ reservations,
+ default=params['reserve_cpu']
+ )
+ memory = get_value(
+ 'memory',
+ reservations,
+ default=params['reserve_memory']
+ )
+
+ if memory is not None:
+ try:
+ memory = human_to_bytes(memory)
+ except ValueError as exc:
+ raise Exception('Failed to convert reserve_memory to bytes: %s' % exc)
+ return {
+ 'reserve_cpu': cpus,
+ 'reserve_memory': memory,
+ }
+
+ @staticmethod
+ def get_placement_from_ansible_params(params):
+ placement = params['placement'] or {}
+ constraints = get_value(
+ 'constraints',
+ placement,
+ default=params['constraints']
+ )
+
+ preferences = placement.get('preferences')
+ return {
+ 'constraints': constraints,
+ 'placement_preferences': preferences,
+ }
+
+ @classmethod
+ def from_ansible_params(
+ cls,
+ ap,
+ old_service,
+ image_digest,
+ secret_ids,
+ config_ids,
+ network_ids,
+ docker_api_version,
+ docker_py_version,
+ ):
+ s = DockerService(docker_api_version, docker_py_version)
+ s.image = image_digest
+ s.args = ap['args']
+ s.endpoint_mode = ap['endpoint_mode']
+ s.dns = ap['dns']
+ s.dns_search = ap['dns_search']
+ s.dns_options = ap['dns_options']
+ s.healthcheck, s.healthcheck_disabled = parse_healthcheck(ap['healthcheck'])
+ s.hostname = ap['hostname']
+ s.hosts = ap['hosts']
+ s.tty = ap['tty']
+ s.labels = ap['labels']
+ s.container_labels = ap['container_labels']
+ s.mode = ap['mode']
+ s.stop_signal = ap['stop_signal']
+ s.user = ap['user']
+ s.working_dir = ap['working_dir']
+ s.read_only = ap['read_only']
+ s.init = ap['init']
+
+ s.networks = get_docker_networks(ap['networks'], network_ids)
+
+ s.command = ap['command']
+ if isinstance(s.command, string_types):
+ s.command = shlex.split(s.command)
+ elif isinstance(s.command, list):
+ invalid_items = [
+ (index, item)
+ for index, item in enumerate(s.command)
+ if not isinstance(item, string_types)
+ ]
+ if invalid_items:
+ errors = ', '.join(
+ [
+ '%s (%s) at index %s' % (item, type(item), index)
+ for index, item in invalid_items
+ ]
+ )
+ raise Exception(
+ 'All items in a command list need to be strings. '
+ 'Check quoting. Invalid items: %s.'
+ % errors
+ )
+ s.command = ap['command']
+ elif s.command is not None:
+ raise ValueError(
+ 'Invalid type for command %s (%s). '
+ 'Only string or list allowed. Check quoting.'
+ % (s.command, type(s.command))
+ )
+
+ s.env = get_docker_environment(ap['env'], ap['env_files'])
+ s.rollback_config = cls.get_rollback_config_from_ansible_params(ap)
+
+ update_config = cls.get_update_config_from_ansible_params(ap)
+ for key, value in update_config.items():
+ setattr(s, key, value)
+
+ restart_config = cls.get_restart_config_from_ansible_params(ap)
+ for key, value in restart_config.items():
+ setattr(s, key, value)
+
+ logging_config = cls.get_logging_from_ansible_params(ap)
+ for key, value in logging_config.items():
+ setattr(s, key, value)
+
+ limits = cls.get_limits_from_ansible_params(ap)
+ for key, value in limits.items():
+ setattr(s, key, value)
+
+ reservations = cls.get_reservations_from_ansible_params(ap)
+ for key, value in reservations.items():
+ setattr(s, key, value)
+
+ placement = cls.get_placement_from_ansible_params(ap)
+ for key, value in placement.items():
+ setattr(s, key, value)
+
+ if ap['stop_grace_period'] is not None:
+ s.stop_grace_period = convert_duration_to_nanosecond(ap['stop_grace_period'])
+
+ if ap['force_update']:
+ s.force_update = int(str(time.time()).replace('.', ''))
+
+ if ap['groups'] is not None:
+ # In case integers are passed as groups, we need to convert them to
+ # strings as docker internally treats them as strings.
+ s.groups = [str(g) for g in ap['groups']]
+
+ if ap['replicas'] == -1:
+ if old_service:
+ s.replicas = old_service.replicas
+ else:
+ s.replicas = 1
+ else:
+ s.replicas = ap['replicas']
+
+ if ap['publish'] is not None:
+ s.publish = []
+ for param_p in ap['publish']:
+ service_p = {}
+ service_p['protocol'] = param_p['protocol']
+ service_p['mode'] = param_p['mode']
+ service_p['published_port'] = param_p['published_port']
+ service_p['target_port'] = param_p['target_port']
+ s.publish.append(service_p)
+
+ if ap['mounts'] is not None:
+ s.mounts = []
+ for param_m in ap['mounts']:
+ service_m = {}
+ service_m['readonly'] = param_m['readonly']
+ service_m['type'] = param_m['type']
+ if param_m['source'] is None and param_m['type'] != 'tmpfs':
+ raise ValueError('Source must be specified for mounts which are not of type tmpfs')
+ service_m['source'] = param_m['source'] or ''
+ service_m['target'] = param_m['target']
+ service_m['labels'] = param_m['labels']
+ service_m['no_copy'] = param_m['no_copy']
+ service_m['propagation'] = param_m['propagation']
+ service_m['driver_config'] = param_m['driver_config']
+ service_m['tmpfs_mode'] = param_m['tmpfs_mode']
+ tmpfs_size = param_m['tmpfs_size']
+ if tmpfs_size is not None:
+ try:
+ tmpfs_size = human_to_bytes(tmpfs_size)
+ except ValueError as exc:
+ raise ValueError(
+ 'Failed to convert tmpfs_size to bytes: %s' % exc
+ )
+
+ service_m['tmpfs_size'] = tmpfs_size
+ s.mounts.append(service_m)
+
+ if ap['configs'] is not None:
+ s.configs = []
+ for param_m in ap['configs']:
+ service_c = {}
+ config_name = param_m['config_name']
+ service_c['config_id'] = param_m['config_id'] or config_ids[config_name]
+ service_c['config_name'] = config_name
+ service_c['filename'] = param_m['filename'] or config_name
+ service_c['uid'] = param_m['uid']
+ service_c['gid'] = param_m['gid']
+ service_c['mode'] = param_m['mode']
+ s.configs.append(service_c)
+
+ if ap['secrets'] is not None:
+ s.secrets = []
+ for param_m in ap['secrets']:
+ service_s = {}
+ secret_name = param_m['secret_name']
+ service_s['secret_id'] = param_m['secret_id'] or secret_ids[secret_name]
+ service_s['secret_name'] = secret_name
+ service_s['filename'] = param_m['filename'] or secret_name
+ service_s['uid'] = param_m['uid']
+ service_s['gid'] = param_m['gid']
+ service_s['mode'] = param_m['mode']
+ s.secrets.append(service_s)
+
+ return s
+
+ def compare(self, os):
+ differences = DifferenceTracker()
+ needs_rebuild = False
+ force_update = False
+ if self.endpoint_mode is not None and self.endpoint_mode != os.endpoint_mode:
+ differences.add('endpoint_mode', parameter=self.endpoint_mode, active=os.endpoint_mode)
+ if has_list_changed(self.env, os.env):
+ differences.add('env', parameter=self.env, active=os.env)
+ if self.log_driver is not None and self.log_driver != os.log_driver:
+ differences.add('log_driver', parameter=self.log_driver, active=os.log_driver)
+ if self.log_driver_options is not None and self.log_driver_options != (os.log_driver_options or {}):
+ differences.add('log_opt', parameter=self.log_driver_options, active=os.log_driver_options)
+ if self.mode != os.mode:
+ needs_rebuild = True
+ differences.add('mode', parameter=self.mode, active=os.mode)
+ if has_list_changed(self.mounts, os.mounts, sort_key='target'):
+ differences.add('mounts', parameter=self.mounts, active=os.mounts)
+ if has_list_changed(self.configs, os.configs, sort_key='config_name'):
+ differences.add('configs', parameter=self.configs, active=os.configs)
+ if has_list_changed(self.secrets, os.secrets, sort_key='secret_name'):
+ differences.add('secrets', parameter=self.secrets, active=os.secrets)
+ if have_networks_changed(self.networks, os.networks):
+ differences.add('networks', parameter=self.networks, active=os.networks)
+ needs_rebuild = not self.can_update_networks
+ if self.replicas != os.replicas:
+ differences.add('replicas', parameter=self.replicas, active=os.replicas)
+ if has_list_changed(self.command, os.command, sort_lists=False):
+ differences.add('command', parameter=self.command, active=os.command)
+ if has_list_changed(self.args, os.args, sort_lists=False):
+ differences.add('args', parameter=self.args, active=os.args)
+ if has_list_changed(self.constraints, os.constraints):
+ differences.add('constraints', parameter=self.constraints, active=os.constraints)
+ if has_list_changed(self.placement_preferences, os.placement_preferences, sort_lists=False):
+ differences.add('placement_preferences', parameter=self.placement_preferences, active=os.placement_preferences)
+ if has_list_changed(self.groups, os.groups):
+ differences.add('groups', parameter=self.groups, active=os.groups)
+ if self.labels is not None and self.labels != (os.labels or {}):
+ differences.add('labels', parameter=self.labels, active=os.labels)
+ if self.limit_cpu is not None and self.limit_cpu != os.limit_cpu:
+ differences.add('limit_cpu', parameter=self.limit_cpu, active=os.limit_cpu)
+ if self.limit_memory is not None and self.limit_memory != os.limit_memory:
+ differences.add('limit_memory', parameter=self.limit_memory, active=os.limit_memory)
+ if self.reserve_cpu is not None and self.reserve_cpu != os.reserve_cpu:
+ differences.add('reserve_cpu', parameter=self.reserve_cpu, active=os.reserve_cpu)
+ if self.reserve_memory is not None and self.reserve_memory != os.reserve_memory:
+ differences.add('reserve_memory', parameter=self.reserve_memory, active=os.reserve_memory)
+ if self.container_labels is not None and self.container_labels != (os.container_labels or {}):
+ differences.add('container_labels', parameter=self.container_labels, active=os.container_labels)
+ if self.stop_signal is not None and self.stop_signal != os.stop_signal:
+ differences.add('stop_signal', parameter=self.stop_signal, active=os.stop_signal)
+ if self.stop_grace_period is not None and self.stop_grace_period != os.stop_grace_period:
+ differences.add('stop_grace_period', parameter=self.stop_grace_period, active=os.stop_grace_period)
+ if self.has_publish_changed(os.publish):
+ differences.add('publish', parameter=self.publish, active=os.publish)
+ if self.read_only is not None and self.read_only != os.read_only:
+ differences.add('read_only', parameter=self.read_only, active=os.read_only)
+ if self.restart_policy is not None and self.restart_policy != os.restart_policy:
+ differences.add('restart_policy', parameter=self.restart_policy, active=os.restart_policy)
+ if self.restart_policy_attempts is not None and self.restart_policy_attempts != os.restart_policy_attempts:
+ differences.add('restart_policy_attempts', parameter=self.restart_policy_attempts, active=os.restart_policy_attempts)
+ if self.restart_policy_delay is not None and self.restart_policy_delay != os.restart_policy_delay:
+ differences.add('restart_policy_delay', parameter=self.restart_policy_delay, active=os.restart_policy_delay)
+ if self.restart_policy_window is not None and self.restart_policy_window != os.restart_policy_window:
+ differences.add('restart_policy_window', parameter=self.restart_policy_window, active=os.restart_policy_window)
+ if has_dict_changed(self.rollback_config, os.rollback_config):
+ differences.add('rollback_config', parameter=self.rollback_config, active=os.rollback_config)
+ if self.update_delay is not None and self.update_delay != os.update_delay:
+ differences.add('update_delay', parameter=self.update_delay, active=os.update_delay)
+ if self.update_parallelism is not None and self.update_parallelism != os.update_parallelism:
+ differences.add('update_parallelism', parameter=self.update_parallelism, active=os.update_parallelism)
+ if self.update_failure_action is not None and self.update_failure_action != os.update_failure_action:
+ differences.add('update_failure_action', parameter=self.update_failure_action, active=os.update_failure_action)
+ if self.update_monitor is not None and self.update_monitor != os.update_monitor:
+ differences.add('update_monitor', parameter=self.update_monitor, active=os.update_monitor)
+ if self.update_max_failure_ratio is not None and self.update_max_failure_ratio != os.update_max_failure_ratio:
+ differences.add('update_max_failure_ratio', parameter=self.update_max_failure_ratio, active=os.update_max_failure_ratio)
+ if self.update_order is not None and self.update_order != os.update_order:
+ differences.add('update_order', parameter=self.update_order, active=os.update_order)
+ has_image_changed, change = self.has_image_changed(os.image)
+ if has_image_changed:
+ differences.add('image', parameter=self.image, active=change)
+ if self.user and self.user != os.user:
+ differences.add('user', parameter=self.user, active=os.user)
+ if has_list_changed(self.dns, os.dns, sort_lists=False):
+ differences.add('dns', parameter=self.dns, active=os.dns)
+ if has_list_changed(self.dns_search, os.dns_search, sort_lists=False):
+ differences.add('dns_search', parameter=self.dns_search, active=os.dns_search)
+ if has_list_changed(self.dns_options, os.dns_options):
+ differences.add('dns_options', parameter=self.dns_options, active=os.dns_options)
+ if self.has_healthcheck_changed(os):
+ differences.add('healthcheck', parameter=self.healthcheck, active=os.healthcheck)
+ if self.hostname is not None and self.hostname != os.hostname:
+ differences.add('hostname', parameter=self.hostname, active=os.hostname)
+ if self.hosts is not None and self.hosts != (os.hosts or {}):
+ differences.add('hosts', parameter=self.hosts, active=os.hosts)
+ if self.tty is not None and self.tty != os.tty:
+ differences.add('tty', parameter=self.tty, active=os.tty)
+ if self.working_dir is not None and self.working_dir != os.working_dir:
+ differences.add('working_dir', parameter=self.working_dir, active=os.working_dir)
+ if self.force_update:
+ force_update = True
+ if self.init is not None and self.init != os.init:
+ differences.add('init', parameter=self.init, active=os.init)
+ return not differences.empty or force_update, differences, needs_rebuild, force_update
+
+ def has_healthcheck_changed(self, old_publish):
+ if self.healthcheck_disabled is False and self.healthcheck is None:
+ return False
+ if self.healthcheck_disabled:
+ if old_publish.healthcheck is None:
+ return False
+ if old_publish.healthcheck.get('test') == ['NONE']:
+ return False
+ return self.healthcheck != old_publish.healthcheck
+
+ def has_publish_changed(self, old_publish):
+ if self.publish is None:
+ return False
+ old_publish = old_publish or []
+ if len(self.publish) != len(old_publish):
+ return True
+ publish_sorter = operator.itemgetter('published_port', 'target_port', 'protocol')
+ publish = sorted(self.publish, key=publish_sorter)
+ old_publish = sorted(old_publish, key=publish_sorter)
+ for publish_item, old_publish_item in zip(publish, old_publish):
+ ignored_keys = set()
+ if not publish_item.get('mode'):
+ ignored_keys.add('mode')
+ # Create copies of publish_item dicts where keys specified in ignored_keys are left out
+ filtered_old_publish_item = dict(
+ (k, v) for k, v in old_publish_item.items() if k not in ignored_keys
+ )
+ filtered_publish_item = dict(
+ (k, v) for k, v in publish_item.items() if k not in ignored_keys
+ )
+ if filtered_publish_item != filtered_old_publish_item:
+ return True
+ return False
+
+ def has_image_changed(self, old_image):
+ if '@' not in self.image:
+ old_image = old_image.split('@')[0]
+ return self.image != old_image, old_image
+
+ def build_container_spec(self):
+ mounts = None
+ if self.mounts is not None:
+ mounts = []
+ for mount_config in self.mounts:
+ mount_options = {
+ 'target': 'target',
+ 'source': 'source',
+ 'type': 'type',
+ 'readonly': 'read_only',
+ 'propagation': 'propagation',
+ 'labels': 'labels',
+ 'no_copy': 'no_copy',
+ 'driver_config': 'driver_config',
+ 'tmpfs_size': 'tmpfs_size',
+ 'tmpfs_mode': 'tmpfs_mode'
+ }
+ mount_args = {}
+ for option, mount_arg in mount_options.items():
+ value = mount_config.get(option)
+ if value is not None:
+ mount_args[mount_arg] = value
+
+ mounts.append(types.Mount(**mount_args))
+
+ configs = None
+ if self.configs is not None:
+ configs = []
+ for config_config in self.configs:
+ config_args = {
+ 'config_id': config_config['config_id'],
+ 'config_name': config_config['config_name']
+ }
+ filename = config_config.get('filename')
+ if filename:
+ config_args['filename'] = filename
+ uid = config_config.get('uid')
+ if uid:
+ config_args['uid'] = uid
+ gid = config_config.get('gid')
+ if gid:
+ config_args['gid'] = gid
+ mode = config_config.get('mode')
+ if mode:
+ config_args['mode'] = mode
+
+ configs.append(types.ConfigReference(**config_args))
+
+ secrets = None
+ if self.secrets is not None:
+ secrets = []
+ for secret_config in self.secrets:
+ secret_args = {
+ 'secret_id': secret_config['secret_id'],
+ 'secret_name': secret_config['secret_name']
+ }
+ filename = secret_config.get('filename')
+ if filename:
+ secret_args['filename'] = filename
+ uid = secret_config.get('uid')
+ if uid:
+ secret_args['uid'] = uid
+ gid = secret_config.get('gid')
+ if gid:
+ secret_args['gid'] = gid
+ mode = secret_config.get('mode')
+ if mode:
+ secret_args['mode'] = mode
+
+ secrets.append(types.SecretReference(**secret_args))
+
+ dns_config_args = {}
+ if self.dns is not None:
+ dns_config_args['nameservers'] = self.dns
+ if self.dns_search is not None:
+ dns_config_args['search'] = self.dns_search
+ if self.dns_options is not None:
+ dns_config_args['options'] = self.dns_options
+ dns_config = types.DNSConfig(**dns_config_args) if dns_config_args else None
+
+ container_spec_args = {}
+ if self.command is not None:
+ container_spec_args['command'] = self.command
+ if self.args is not None:
+ container_spec_args['args'] = self.args
+ if self.env is not None:
+ container_spec_args['env'] = self.env
+ if self.user is not None:
+ container_spec_args['user'] = self.user
+ if self.container_labels is not None:
+ container_spec_args['labels'] = self.container_labels
+ if self.healthcheck is not None:
+ container_spec_args['healthcheck'] = types.Healthcheck(**self.healthcheck)
+ elif self.healthcheck_disabled:
+ container_spec_args['healthcheck'] = types.Healthcheck(test=['NONE'])
+ if self.hostname is not None:
+ container_spec_args['hostname'] = self.hostname
+ if self.hosts is not None:
+ container_spec_args['hosts'] = self.hosts
+ if self.read_only is not None:
+ container_spec_args['read_only'] = self.read_only
+ if self.stop_grace_period is not None:
+ container_spec_args['stop_grace_period'] = self.stop_grace_period
+ if self.stop_signal is not None:
+ container_spec_args['stop_signal'] = self.stop_signal
+ if self.tty is not None:
+ container_spec_args['tty'] = self.tty
+ if self.groups is not None:
+ container_spec_args['groups'] = self.groups
+ if self.working_dir is not None:
+ container_spec_args['workdir'] = self.working_dir
+ if secrets is not None:
+ container_spec_args['secrets'] = secrets
+ if mounts is not None:
+ container_spec_args['mounts'] = mounts
+ if dns_config is not None:
+ container_spec_args['dns_config'] = dns_config
+ if configs is not None:
+ container_spec_args['configs'] = configs
+ if self.init is not None:
+ container_spec_args['init'] = self.init
+
+ return types.ContainerSpec(self.image, **container_spec_args)
+
+ def build_placement(self):
+ placement_args = {}
+ if self.constraints is not None:
+ placement_args['constraints'] = self.constraints
+ if self.placement_preferences is not None:
+ placement_args['preferences'] = [
+ {key.title(): {'SpreadDescriptor': value}}
+ for preference in self.placement_preferences
+ for key, value in preference.items()
+ ]
+ return types.Placement(**placement_args) if placement_args else None
+
+ def build_update_config(self):
+ update_config_args = {}
+ if self.update_parallelism is not None:
+ update_config_args['parallelism'] = self.update_parallelism
+ if self.update_delay is not None:
+ update_config_args['delay'] = self.update_delay
+ if self.update_failure_action is not None:
+ update_config_args['failure_action'] = self.update_failure_action
+ if self.update_monitor is not None:
+ update_config_args['monitor'] = self.update_monitor
+ if self.update_max_failure_ratio is not None:
+ update_config_args['max_failure_ratio'] = self.update_max_failure_ratio
+ if self.update_order is not None:
+ update_config_args['order'] = self.update_order
+ return types.UpdateConfig(**update_config_args) if update_config_args else None
+
+ def build_log_driver(self):
+ log_driver_args = {}
+ if self.log_driver is not None:
+ log_driver_args['name'] = self.log_driver
+ if self.log_driver_options is not None:
+ log_driver_args['options'] = self.log_driver_options
+ return types.DriverConfig(**log_driver_args) if log_driver_args else None
+
+ def build_restart_policy(self):
+ restart_policy_args = {}
+ if self.restart_policy is not None:
+ restart_policy_args['condition'] = self.restart_policy
+ if self.restart_policy_delay is not None:
+ restart_policy_args['delay'] = self.restart_policy_delay
+ if self.restart_policy_attempts is not None:
+ restart_policy_args['max_attempts'] = self.restart_policy_attempts
+ if self.restart_policy_window is not None:
+ restart_policy_args['window'] = self.restart_policy_window
+ return types.RestartPolicy(**restart_policy_args) if restart_policy_args else None
+
+ def build_rollback_config(self):
+ if self.rollback_config is None:
+ return None
+ rollback_config_options = [
+ 'parallelism',
+ 'delay',
+ 'failure_action',
+ 'monitor',
+ 'max_failure_ratio',
+ 'order',
+ ]
+ rollback_config_args = {}
+ for option in rollback_config_options:
+ value = self.rollback_config.get(option)
+ if value is not None:
+ rollback_config_args[option] = value
+ return types.RollbackConfig(**rollback_config_args) if rollback_config_args else None
+
+ def build_resources(self):
+ resources_args = {}
+ if self.limit_cpu is not None:
+ resources_args['cpu_limit'] = int(self.limit_cpu * 1000000000.0)
+ if self.limit_memory is not None:
+ resources_args['mem_limit'] = self.limit_memory
+ if self.reserve_cpu is not None:
+ resources_args['cpu_reservation'] = int(self.reserve_cpu * 1000000000.0)
+ if self.reserve_memory is not None:
+ resources_args['mem_reservation'] = self.reserve_memory
+ return types.Resources(**resources_args) if resources_args else None
+
+ def build_task_template(self, container_spec, placement=None):
+ log_driver = self.build_log_driver()
+ restart_policy = self.build_restart_policy()
+ resources = self.build_resources()
+
+ task_template_args = {}
+ if placement is not None:
+ task_template_args['placement'] = placement
+ if log_driver is not None:
+ task_template_args['log_driver'] = log_driver
+ if restart_policy is not None:
+ task_template_args['restart_policy'] = restart_policy
+ if resources is not None:
+ task_template_args['resources'] = resources
+ if self.force_update:
+ task_template_args['force_update'] = self.force_update
+ if self.can_use_task_template_networks:
+ networks = self.build_networks()
+ if networks:
+ task_template_args['networks'] = networks
+ return types.TaskTemplate(container_spec=container_spec, **task_template_args)
+
+ def build_service_mode(self):
+ if self.mode == 'global':
+ self.replicas = None
+ return types.ServiceMode(self.mode, replicas=self.replicas)
+
+ def build_networks(self):
+ networks = None
+ if self.networks is not None:
+ networks = []
+ for network in self.networks:
+ docker_network = {'Target': network['id']}
+ if 'aliases' in network:
+ docker_network['Aliases'] = network['aliases']
+ if 'options' in network:
+ docker_network['DriverOpts'] = network['options']
+ networks.append(docker_network)
+ return networks
+
+ def build_endpoint_spec(self):
+ endpoint_spec_args = {}
+ if self.publish is not None:
+ ports = []
+ for port in self.publish:
+ port_spec = {
+ 'Protocol': port['protocol'],
+ 'PublishedPort': port['published_port'],
+ 'TargetPort': port['target_port']
+ }
+ if port.get('mode'):
+ port_spec['PublishMode'] = port['mode']
+ ports.append(port_spec)
+ endpoint_spec_args['ports'] = ports
+ if self.endpoint_mode is not None:
+ endpoint_spec_args['mode'] = self.endpoint_mode
+ return types.EndpointSpec(**endpoint_spec_args) if endpoint_spec_args else None
+
+ def build_docker_service(self):
+ container_spec = self.build_container_spec()
+ placement = self.build_placement()
+ task_template = self.build_task_template(container_spec, placement)
+
+ update_config = self.build_update_config()
+ rollback_config = self.build_rollback_config()
+ service_mode = self.build_service_mode()
+ endpoint_spec = self.build_endpoint_spec()
+
+ service = {'task_template': task_template, 'mode': service_mode}
+ if update_config:
+ service['update_config'] = update_config
+ if rollback_config:
+ service['rollback_config'] = rollback_config
+ if endpoint_spec:
+ service['endpoint_spec'] = endpoint_spec
+ if self.labels:
+ service['labels'] = self.labels
+ if not self.can_use_task_template_networks:
+ networks = self.build_networks()
+ if networks:
+ service['networks'] = networks
+ return service
+
+
+class DockerServiceManager(object):
+
+ def __init__(self, client):
+ self.client = client
+ self.retries = 2
+ self.diff_tracker = None
+
+ def get_service(self, name):
+ try:
+ raw_data = self.client.inspect_service(name)
+ except NotFound:
+ return None
+ ds = DockerService(self.client.docker_api_version, self.client.docker_py_version)
+
+ task_template_data = raw_data['Spec']['TaskTemplate']
+ ds.image = task_template_data['ContainerSpec']['Image']
+ ds.user = task_template_data['ContainerSpec'].get('User')
+ ds.env = task_template_data['ContainerSpec'].get('Env')
+ ds.command = task_template_data['ContainerSpec'].get('Command')
+ ds.args = task_template_data['ContainerSpec'].get('Args')
+ ds.groups = task_template_data['ContainerSpec'].get('Groups')
+ ds.stop_grace_period = task_template_data['ContainerSpec'].get('StopGracePeriod')
+ ds.stop_signal = task_template_data['ContainerSpec'].get('StopSignal')
+ ds.working_dir = task_template_data['ContainerSpec'].get('Dir')
+ ds.read_only = task_template_data['ContainerSpec'].get('ReadOnly')
+
+ healthcheck_data = task_template_data['ContainerSpec'].get('Healthcheck')
+ if healthcheck_data:
+ options = {
+ 'Test': 'test',
+ 'Interval': 'interval',
+ 'Timeout': 'timeout',
+ 'StartPeriod': 'start_period',
+ 'Retries': 'retries'
+ }
+ healthcheck = dict(
+ (options[key], value) for key, value in healthcheck_data.items()
+ if value is not None and key in options
+ )
+ ds.healthcheck = healthcheck
+
+ update_config_data = raw_data['Spec'].get('UpdateConfig')
+ if update_config_data:
+ ds.update_delay = update_config_data.get('Delay')
+ ds.update_parallelism = update_config_data.get('Parallelism')
+ ds.update_failure_action = update_config_data.get('FailureAction')
+ ds.update_monitor = update_config_data.get('Monitor')
+ ds.update_max_failure_ratio = update_config_data.get('MaxFailureRatio')
+ ds.update_order = update_config_data.get('Order')
+
+ rollback_config_data = raw_data['Spec'].get('RollbackConfig')
+ if rollback_config_data:
+ ds.rollback_config = {
+ 'parallelism': rollback_config_data.get('Parallelism'),
+ 'delay': rollback_config_data.get('Delay'),
+ 'failure_action': rollback_config_data.get('FailureAction'),
+ 'monitor': rollback_config_data.get('Monitor'),
+ 'max_failure_ratio': rollback_config_data.get('MaxFailureRatio'),
+ 'order': rollback_config_data.get('Order'),
+ }
+
+ dns_config = task_template_data['ContainerSpec'].get('DNSConfig')
+ if dns_config:
+ ds.dns = dns_config.get('Nameservers')
+ ds.dns_search = dns_config.get('Search')
+ ds.dns_options = dns_config.get('Options')
+
+ ds.hostname = task_template_data['ContainerSpec'].get('Hostname')
+
+ hosts = task_template_data['ContainerSpec'].get('Hosts')
+ if hosts:
+ hosts = [
+ list(reversed(host.split(":", 1)))
+ if ":" in host
+ else host.split(" ", 1)
+ for host in hosts
+ ]
+ ds.hosts = dict((hostname, ip) for ip, hostname in hosts)
+ ds.tty = task_template_data['ContainerSpec'].get('TTY')
+
+ placement = task_template_data.get('Placement')
+ if placement:
+ ds.constraints = placement.get('Constraints')
+ placement_preferences = []
+ for preference in placement.get('Preferences', []):
+ placement_preferences.append(
+ dict(
+ (key.lower(), value['SpreadDescriptor'])
+ for key, value in preference.items()
+ )
+ )
+ ds.placement_preferences = placement_preferences or None
+
+ restart_policy_data = task_template_data.get('RestartPolicy')
+ if restart_policy_data:
+ ds.restart_policy = restart_policy_data.get('Condition')
+ ds.restart_policy_delay = restart_policy_data.get('Delay')
+ ds.restart_policy_attempts = restart_policy_data.get('MaxAttempts')
+ ds.restart_policy_window = restart_policy_data.get('Window')
+
+ raw_data_endpoint_spec = raw_data['Spec'].get('EndpointSpec')
+ if raw_data_endpoint_spec:
+ ds.endpoint_mode = raw_data_endpoint_spec.get('Mode')
+ raw_data_ports = raw_data_endpoint_spec.get('Ports')
+ if raw_data_ports:
+ ds.publish = []
+ for port in raw_data_ports:
+ ds.publish.append({
+ 'protocol': port['Protocol'],
+ 'mode': port.get('PublishMode', None),
+ 'published_port': int(port['PublishedPort']),
+ 'target_port': int(port['TargetPort'])
+ })
+
+ raw_data_limits = task_template_data.get('Resources', {}).get('Limits')
+ if raw_data_limits:
+ raw_cpu_limits = raw_data_limits.get('NanoCPUs')
+ if raw_cpu_limits:
+ ds.limit_cpu = float(raw_cpu_limits) / 1000000000
+
+ raw_memory_limits = raw_data_limits.get('MemoryBytes')
+ if raw_memory_limits:
+ ds.limit_memory = int(raw_memory_limits)
+
+ raw_data_reservations = task_template_data.get('Resources', {}).get('Reservations')
+ if raw_data_reservations:
+ raw_cpu_reservations = raw_data_reservations.get('NanoCPUs')
+ if raw_cpu_reservations:
+ ds.reserve_cpu = float(raw_cpu_reservations) / 1000000000
+
+ raw_memory_reservations = raw_data_reservations.get('MemoryBytes')
+ if raw_memory_reservations:
+ ds.reserve_memory = int(raw_memory_reservations)
+
+ ds.labels = raw_data['Spec'].get('Labels')
+ ds.log_driver = task_template_data.get('LogDriver', {}).get('Name')
+ ds.log_driver_options = task_template_data.get('LogDriver', {}).get('Options')
+ ds.container_labels = task_template_data['ContainerSpec'].get('Labels')
+
+ mode = raw_data['Spec']['Mode']
+ if 'Replicated' in mode.keys():
+ ds.mode = to_text('replicated', encoding='utf-8')
+ ds.replicas = mode['Replicated']['Replicas']
+ elif 'Global' in mode.keys():
+ ds.mode = 'global'
+ else:
+ raise Exception('Unknown service mode: %s' % mode)
+
+ raw_data_mounts = task_template_data['ContainerSpec'].get('Mounts')
+ if raw_data_mounts:
+ ds.mounts = []
+ for mount_data in raw_data_mounts:
+ bind_options = mount_data.get('BindOptions', {})
+ volume_options = mount_data.get('VolumeOptions', {})
+ tmpfs_options = mount_data.get('TmpfsOptions', {})
+ driver_config = volume_options.get('DriverConfig', {})
+ driver_config = dict(
+ (key.lower(), value) for key, value in driver_config.items()
+ ) or None
+ ds.mounts.append({
+ 'source': mount_data.get('Source', ''),
+ 'type': mount_data['Type'],
+ 'target': mount_data['Target'],
+ 'readonly': mount_data.get('ReadOnly'),
+ 'propagation': bind_options.get('Propagation'),
+ 'no_copy': volume_options.get('NoCopy'),
+ 'labels': volume_options.get('Labels'),
+ 'driver_config': driver_config,
+ 'tmpfs_mode': tmpfs_options.get('Mode'),
+ 'tmpfs_size': tmpfs_options.get('SizeBytes'),
+ })
+
+ raw_data_configs = task_template_data['ContainerSpec'].get('Configs')
+ if raw_data_configs:
+ ds.configs = []
+ for config_data in raw_data_configs:
+ ds.configs.append({
+ 'config_id': config_data['ConfigID'],
+ 'config_name': config_data['ConfigName'],
+ 'filename': config_data['File'].get('Name'),
+ 'uid': config_data['File'].get('UID'),
+ 'gid': config_data['File'].get('GID'),
+ 'mode': config_data['File'].get('Mode')
+ })
+
+ raw_data_secrets = task_template_data['ContainerSpec'].get('Secrets')
+ if raw_data_secrets:
+ ds.secrets = []
+ for secret_data in raw_data_secrets:
+ ds.secrets.append({
+ 'secret_id': secret_data['SecretID'],
+ 'secret_name': secret_data['SecretName'],
+ 'filename': secret_data['File'].get('Name'),
+ 'uid': secret_data['File'].get('UID'),
+ 'gid': secret_data['File'].get('GID'),
+ 'mode': secret_data['File'].get('Mode')
+ })
+
+ raw_networks_data = task_template_data.get('Networks', raw_data['Spec'].get('Networks'))
+ if raw_networks_data:
+ ds.networks = []
+ for network_data in raw_networks_data:
+ network = {'id': network_data['Target']}
+ if 'Aliases' in network_data:
+ network['aliases'] = network_data['Aliases']
+ if 'DriverOpts' in network_data:
+ network['options'] = network_data['DriverOpts']
+ ds.networks.append(network)
+ ds.service_version = raw_data['Version']['Index']
+ ds.service_id = raw_data['ID']
+
+ ds.init = task_template_data['ContainerSpec'].get('Init', False)
+ return ds
+
+ def update_service(self, name, old_service, new_service):
+ service_data = new_service.build_docker_service()
+ result = self.client.update_service(
+ old_service.service_id,
+ old_service.service_version,
+ name=name,
+ **service_data
+ )
+ # Prior to Docker SDK 4.0.0 no warnings were returned and will thus be ignored.
+ # (see https://github.com/docker/docker-py/pull/2272)
+ self.client.report_warnings(result, ['Warning'])
+
+ def create_service(self, name, service):
+ service_data = service.build_docker_service()
+ result = self.client.create_service(name=name, **service_data)
+ self.client.report_warnings(result, ['Warning'])
+
+ def remove_service(self, name):
+ self.client.remove_service(name)
+
+ def get_image_digest(self, name, resolve=False):
+ if (
+ not name
+ or not resolve
+ ):
+ return name
+ repo, tag = parse_repository_tag(name)
+ if not tag:
+ tag = 'latest'
+ name = repo + ':' + tag
+ distribution_data = self.client.inspect_distribution(name)
+ digest = distribution_data['Descriptor']['digest']
+ return '%s@%s' % (name, digest)
+
+ def get_networks_names_ids(self):
+ return dict(
+ (network['Name'], network['Id']) for network in self.client.networks()
+ )
+
+ def get_missing_secret_ids(self):
+ """
+ Resolve missing secret ids by looking them up by name
+ """
+ secret_names = [
+ secret['secret_name']
+ for secret in self.client.module.params.get('secrets') or []
+ if secret['secret_id'] is None
+ ]
+ if not secret_names:
+ return {}
+ secrets = self.client.secrets(filters={'name': secret_names})
+ secrets = dict(
+ (secret['Spec']['Name'], secret['ID'])
+ for secret in secrets
+ if secret['Spec']['Name'] in secret_names
+ )
+ for secret_name in secret_names:
+ if secret_name not in secrets:
+ self.client.fail(
+ 'Could not find a secret named "%s"' % secret_name
+ )
+ return secrets
+
+ def get_missing_config_ids(self):
+ """
+ Resolve missing config ids by looking them up by name
+ """
+ config_names = [
+ config['config_name']
+ for config in self.client.module.params.get('configs') or []
+ if config['config_id'] is None
+ ]
+ if not config_names:
+ return {}
+ configs = self.client.configs(filters={'name': config_names})
+ configs = dict(
+ (config['Spec']['Name'], config['ID'])
+ for config in configs
+ if config['Spec']['Name'] in config_names
+ )
+ for config_name in config_names:
+ if config_name not in configs:
+ self.client.fail(
+ 'Could not find a config named "%s"' % config_name
+ )
+ return configs
+
+ def run(self):
+ self.diff_tracker = DifferenceTracker()
+ module = self.client.module
+
+ image = module.params['image']
+ try:
+ image_digest = self.get_image_digest(
+ name=image,
+ resolve=module.params['resolve_image']
+ )
+ except DockerException as e:
+ self.client.fail(
+ 'Error looking for an image named %s: %s'
+ % (image, e)
+ )
+
+ try:
+ current_service = self.get_service(module.params['name'])
+ except Exception as e:
+ self.client.fail(
+ 'Error looking for service named %s: %s'
+ % (module.params['name'], e)
+ )
+ try:
+ secret_ids = self.get_missing_secret_ids()
+ config_ids = self.get_missing_config_ids()
+ network_ids = self.get_networks_names_ids()
+ new_service = DockerService.from_ansible_params(
+ module.params,
+ current_service,
+ image_digest,
+ secret_ids,
+ config_ids,
+ network_ids,
+ self.client.docker_api_version,
+ self.client.docker_py_version
+ )
+ except Exception as e:
+ return self.client.fail(
+ 'Error parsing module parameters: %s' % e
+ )
+
+ changed = False
+ msg = 'noop'
+ rebuilt = False
+ differences = DifferenceTracker()
+ facts = {}
+
+ if current_service:
+ if module.params['state'] == 'absent':
+ if not module.check_mode:
+ self.remove_service(module.params['name'])
+ msg = 'Service removed'
+ changed = True
+ else:
+ changed, differences, need_rebuild, force_update = new_service.compare(
+ current_service
+ )
+ if changed:
+ self.diff_tracker.merge(differences)
+ if need_rebuild:
+ if not module.check_mode:
+ self.remove_service(module.params['name'])
+ self.create_service(
+ module.params['name'],
+ new_service
+ )
+ msg = 'Service rebuilt'
+ rebuilt = True
+ else:
+ if not module.check_mode:
+ self.update_service(
+ module.params['name'],
+ current_service,
+ new_service
+ )
+ msg = 'Service updated'
+ rebuilt = False
+ else:
+ if force_update:
+ if not module.check_mode:
+ self.update_service(
+ module.params['name'],
+ current_service,
+ new_service
+ )
+ msg = 'Service forcefully updated'
+ rebuilt = False
+ changed = True
+ else:
+ msg = 'Service unchanged'
+ facts = new_service.get_facts()
+ else:
+ if module.params['state'] == 'absent':
+ msg = 'Service absent'
+ else:
+ if not module.check_mode:
+ self.create_service(module.params['name'], new_service)
+ msg = 'Service created'
+ changed = True
+ facts = new_service.get_facts()
+
+ return msg, changed, rebuilt, differences.get_legacy_docker_diffs(), facts
+
+ def run_safe(self):
+ while True:
+ try:
+ return self.run()
+ except APIError as e:
+ # Sometimes Version.Index will have changed between an inspect and
+ # update. If this is encountered we'll retry the update.
+ if self.retries > 0 and 'update out of sequence' in str(e.explanation):
+ self.retries -= 1
+ time.sleep(1)
+ else:
+ raise
+
+
+def _detect_publish_mode_usage(client):
+ for publish_def in client.module.params['publish'] or []:
+ if publish_def.get('mode'):
+ return True
+ return False
+
+
+def _detect_healthcheck_start_period(client):
+ if client.module.params['healthcheck']:
+ return client.module.params['healthcheck']['start_period'] is not None
+ return False
+
+
+def _detect_mount_tmpfs_usage(client):
+ for mount in client.module.params['mounts'] or []:
+ if mount.get('type') == 'tmpfs':
+ return True
+ if mount.get('tmpfs_size') is not None:
+ return True
+ if mount.get('tmpfs_mode') is not None:
+ return True
+ return False
+
+
+def _detect_update_config_failure_action_rollback(client):
+ rollback_config_failure_action = (
+ (client.module.params['update_config'] or {}).get('failure_action')
+ )
+ update_failure_action = client.module.params['update_failure_action']
+ failure_action = rollback_config_failure_action or update_failure_action
+ return failure_action == 'rollback'
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ image=dict(type='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ mounts=dict(type='list', elements='dict', options=dict(
+ source=dict(type='str'),
+ target=dict(type='str', required=True),
+ type=dict(
+ type='str',
+ default='bind',
+ choices=['bind', 'volume', 'tmpfs', 'npipe'],
+ ),
+ readonly=dict(type='bool'),
+ labels=dict(type='dict'),
+ propagation=dict(
+ type='str',
+ choices=[
+ 'shared',
+ 'slave',
+ 'private',
+ 'rshared',
+ 'rslave',
+ 'rprivate'
+ ]
+ ),
+ no_copy=dict(type='bool'),
+ driver_config=dict(type='dict', options=dict(
+ name=dict(type='str'),
+ options=dict(type='dict')
+ )),
+ tmpfs_size=dict(type='str'),
+ tmpfs_mode=dict(type='int')
+ )),
+ configs=dict(type='list', elements='dict', options=dict(
+ config_id=dict(type='str'),
+ config_name=dict(type='str', required=True),
+ filename=dict(type='str'),
+ uid=dict(type='str'),
+ gid=dict(type='str'),
+ mode=dict(type='int'),
+ )),
+ secrets=dict(type='list', elements='dict', options=dict(
+ secret_id=dict(type='str'),
+ secret_name=dict(type='str', required=True),
+ filename=dict(type='str'),
+ uid=dict(type='str'),
+ gid=dict(type='str'),
+ mode=dict(type='int'),
+ )),
+ networks=dict(type='list', elements='raw'),
+ command=dict(type='raw'),
+ args=dict(type='list', elements='str'),
+ env=dict(type='raw'),
+ env_files=dict(type='list', elements='path'),
+ force_update=dict(type='bool', default=False),
+ groups=dict(type='list', elements='str'),
+ logging=dict(type='dict', options=dict(
+ driver=dict(type='str'),
+ options=dict(type='dict'),
+ )),
+ log_driver=dict(type='str', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ log_driver_options=dict(type='dict', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ publish=dict(type='list', elements='dict', options=dict(
+ published_port=dict(type='int', required=True),
+ target_port=dict(type='int', required=True),
+ protocol=dict(type='str', default='tcp', choices=['tcp', 'udp']),
+ mode=dict(type='str', choices=['ingress', 'host']),
+ )),
+ placement=dict(type='dict', options=dict(
+ constraints=dict(type='list', elements='str'),
+ preferences=dict(type='list', elements='dict'),
+ )),
+ constraints=dict(type='list', elements='str', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ tty=dict(type='bool'),
+ dns=dict(type='list', elements='str'),
+ dns_search=dict(type='list', elements='str'),
+ dns_options=dict(type='list', elements='str'),
+ healthcheck=dict(type='dict', options=dict(
+ test=dict(type='raw'),
+ interval=dict(type='str'),
+ timeout=dict(type='str'),
+ start_period=dict(type='str'),
+ retries=dict(type='int'),
+ )),
+ hostname=dict(type='str'),
+ hosts=dict(type='dict'),
+ labels=dict(type='dict'),
+ container_labels=dict(type='dict'),
+ mode=dict(
+ type='str',
+ default='replicated',
+ choices=['replicated', 'global']
+ ),
+ replicas=dict(type='int', default=-1),
+ endpoint_mode=dict(type='str', choices=['vip', 'dnsrr']),
+ stop_grace_period=dict(type='str'),
+ stop_signal=dict(type='str'),
+ limits=dict(type='dict', options=dict(
+ cpus=dict(type='float'),
+ memory=dict(type='str'),
+ )),
+ limit_cpu=dict(type='float', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ limit_memory=dict(type='str', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ read_only=dict(type='bool'),
+ reservations=dict(type='dict', options=dict(
+ cpus=dict(type='float'),
+ memory=dict(type='str'),
+ )),
+ reserve_cpu=dict(type='float', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ reserve_memory=dict(type='str', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ resolve_image=dict(type='bool', default=False),
+ restart_config=dict(type='dict', options=dict(
+ condition=dict(type='str', choices=['none', 'on-failure', 'any']),
+ delay=dict(type='str'),
+ max_attempts=dict(type='int'),
+ window=dict(type='str'),
+ )),
+ restart_policy=dict(
+ type='str',
+ choices=['none', 'on-failure', 'any'],
+ removed_in_version='2.0.0',
+ removed_from_collection='community.general', # was Ansible 2.12
+ ),
+ restart_policy_delay=dict(type='raw', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ restart_policy_attempts=dict(type='int', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ restart_policy_window=dict(type='raw', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ rollback_config=dict(type='dict', options=dict(
+ parallelism=dict(type='int'),
+ delay=dict(type='str'),
+ failure_action=dict(
+ type='str',
+ choices=['continue', 'pause']
+ ),
+ monitor=dict(type='str'),
+ max_failure_ratio=dict(type='float'),
+ order=dict(type='str'),
+ )),
+ update_config=dict(type='dict', options=dict(
+ parallelism=dict(type='int'),
+ delay=dict(type='str'),
+ failure_action=dict(
+ type='str',
+ choices=['continue', 'pause', 'rollback']
+ ),
+ monitor=dict(type='str'),
+ max_failure_ratio=dict(type='float'),
+ order=dict(type='str'),
+ )),
+ update_delay=dict(type='raw', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ update_parallelism=dict(type='int', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ update_failure_action=dict(
+ type='str',
+ choices=['continue', 'pause', 'rollback'],
+ removed_in_version='2.0.0',
+ removed_from_collection='community.general', # was Ansible 2.12
+ ),
+ update_monitor=dict(type='raw', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ update_max_failure_ratio=dict(type='float', removed_in_version='2.0.0',
+ removed_from_collection='community.general'), # was Ansible 2.12
+ update_order=dict(
+ type='str',
+ choices=['stop-first', 'start-first'],
+ removed_in_version='2.0.0',
+ removed_from_collection='community.general', # was Ansible 2.12
+ ),
+ user=dict(type='str'),
+ working_dir=dict(type='str'),
+ init=dict(type='bool'),
+ )
+
+ option_minimal_versions = dict(
+ constraints=dict(docker_py_version='2.4.0'),
+ dns=dict(docker_py_version='2.6.0', docker_api_version='1.25'),
+ dns_options=dict(docker_py_version='2.6.0', docker_api_version='1.25'),
+ dns_search=dict(docker_py_version='2.6.0', docker_api_version='1.25'),
+ endpoint_mode=dict(docker_py_version='3.0.0', docker_api_version='1.25'),
+ force_update=dict(docker_py_version='2.1.0', docker_api_version='1.25'),
+ healthcheck=dict(docker_py_version='2.6.0', docker_api_version='1.25'),
+ hostname=dict(docker_py_version='2.2.0', docker_api_version='1.25'),
+ hosts=dict(docker_py_version='2.6.0', docker_api_version='1.25'),
+ groups=dict(docker_py_version='2.6.0', docker_api_version='1.25'),
+ tty=dict(docker_py_version='2.4.0', docker_api_version='1.25'),
+ secrets=dict(docker_py_version='2.4.0', docker_api_version='1.25'),
+ configs=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
+ update_max_failure_ratio=dict(docker_py_version='2.1.0', docker_api_version='1.25'),
+ update_monitor=dict(docker_py_version='2.1.0', docker_api_version='1.25'),
+ update_order=dict(docker_py_version='2.7.0', docker_api_version='1.29'),
+ stop_signal=dict(docker_py_version='2.6.0', docker_api_version='1.28'),
+ publish=dict(docker_py_version='3.0.0', docker_api_version='1.25'),
+ read_only=dict(docker_py_version='2.6.0', docker_api_version='1.28'),
+ resolve_image=dict(docker_api_version='1.30', docker_py_version='3.2.0'),
+ rollback_config=dict(docker_py_version='3.5.0', docker_api_version='1.28'),
+ init=dict(docker_py_version='4.0.0', docker_api_version='1.37'),
+ # specials
+ publish_mode=dict(
+ docker_py_version='3.0.0',
+ docker_api_version='1.25',
+ detect_usage=_detect_publish_mode_usage,
+ usage_msg='set publish.mode'
+ ),
+ healthcheck_start_period=dict(
+ docker_py_version='2.6.0',
+ docker_api_version='1.29',
+ detect_usage=_detect_healthcheck_start_period,
+ usage_msg='set healthcheck.start_period'
+ ),
+ update_config_max_failure_ratio=dict(
+ docker_py_version='2.1.0',
+ docker_api_version='1.25',
+ detect_usage=lambda c: (c.module.params['update_config'] or {}).get(
+ 'max_failure_ratio'
+ ) is not None,
+ usage_msg='set update_config.max_failure_ratio'
+ ),
+ update_config_failure_action=dict(
+ docker_py_version='3.5.0',
+ docker_api_version='1.28',
+ detect_usage=_detect_update_config_failure_action_rollback,
+ usage_msg='set update_config.failure_action.rollback'
+ ),
+ update_config_monitor=dict(
+ docker_py_version='2.1.0',
+ docker_api_version='1.25',
+ detect_usage=lambda c: (c.module.params['update_config'] or {}).get(
+ 'monitor'
+ ) is not None,
+ usage_msg='set update_config.monitor'
+ ),
+ update_config_order=dict(
+ docker_py_version='2.7.0',
+ docker_api_version='1.29',
+ detect_usage=lambda c: (c.module.params['update_config'] or {}).get(
+ 'order'
+ ) is not None,
+ usage_msg='set update_config.order'
+ ),
+ placement_config_preferences=dict(
+ docker_py_version='2.4.0',
+ docker_api_version='1.27',
+ detect_usage=lambda c: (c.module.params['placement'] or {}).get(
+ 'preferences'
+ ) is not None,
+ usage_msg='set placement.preferences'
+ ),
+ placement_config_constraints=dict(
+ docker_py_version='2.4.0',
+ detect_usage=lambda c: (c.module.params['placement'] or {}).get(
+ 'constraints'
+ ) is not None,
+ usage_msg='set placement.constraints'
+ ),
+ mounts_tmpfs=dict(
+ docker_py_version='2.6.0',
+ detect_usage=_detect_mount_tmpfs_usage,
+ usage_msg='set mounts.tmpfs'
+ ),
+ rollback_config_order=dict(
+ docker_api_version='1.29',
+ detect_usage=lambda c: (c.module.params['rollback_config'] or {}).get(
+ 'order'
+ ) is not None,
+ usage_msg='set rollback_config.order'
+ ),
+ )
+ required_if = [
+ ('state', 'present', ['image'])
+ ]
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ required_if=required_if,
+ supports_check_mode=True,
+ min_docker_version='2.0.2',
+ min_docker_api_version='1.24',
+ option_minimal_versions=option_minimal_versions,
+ )
+
+ try:
+ dsm = DockerServiceManager(client)
+ msg, changed, rebuilt, changes, facts = dsm.run_safe()
+
+ results = dict(
+ msg=msg,
+ changed=changed,
+ rebuilt=rebuilt,
+ changes=changes,
+ swarm_service=facts,
+ )
+ if client.module._diff:
+ before, after = dsm.diff_tracker.get_before_after()
+ results['diff'] = dict(before=before, after=after)
+
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_swarm_service_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_swarm_service_info.py
new file mode 100644
index 00000000..130be7b3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_swarm_service_info.py
@@ -0,0 +1,115 @@
+#!/usr/bin/python
+#
+# (c) 2019 Hannes Ljungberg <hannes.ljungberg@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_swarm_service_info
+
+short_description: Retrieves information about docker services from a Swarm Manager
+
+description:
+ - Retrieves information about a docker service.
+ - Essentially returns the output of C(docker service inspect <name>).
+ - Must be executed on a host running as Swarm Manager, otherwise the module will fail.
+
+
+options:
+ name:
+ description:
+ - The name of the service to inspect.
+ type: str
+ required: yes
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+author:
+ - Hannes Ljungberg (@hannseman)
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.0.0"
+ - "Docker API >= 1.24"
+'''
+
+EXAMPLES = '''
+- name: Get info from a service
+ community.general.docker_swarm_service_info:
+ name: myservice
+ register: result
+'''
+
+RETURN = '''
+exists:
+ description:
+ - Returns whether the service exists.
+ type: bool
+ returned: always
+ sample: true
+service:
+ description:
+ - A dictionary representing the current state of the service. Matches the C(docker service inspect) output.
+ - Will be C(none) if service does not exist.
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+try:
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ RequestException,
+)
+
+from ansible_collections.community.general.plugins.module_utils.docker.swarm import AnsibleDockerSwarmClient
+
+
+def get_service_info(client):
+ service = client.module.params['name']
+ return client.get_service_inspect(
+ service_id=service,
+ skip_missing=True
+ )
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ )
+
+ client = AnsibleDockerSwarmClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_version='2.0.0',
+ min_docker_api_version='1.24',
+ )
+
+ client.fail_task_if_not_swarm_manager()
+
+ try:
+ service = get_service_info(client)
+
+ client.module.exit_json(
+ changed=False,
+ service=service,
+ exists=bool(service)
+ )
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_volume.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_volume.py
new file mode 100644
index 00000000..dca92df5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_volume.py
@@ -0,0 +1,332 @@
+#!/usr/bin/python
+# coding: utf-8
+#
+# Copyright 2017 Red Hat | Ansible, Alex Grönholm <alex.gronholm@nextday.fi>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: docker_volume
+short_description: Manage Docker volumes
+description:
+ - Create/remove Docker volumes.
+ - Performs largely the same function as the "docker volume" CLI subcommand.
+options:
+ volume_name:
+ description:
+ - Name of the volume to operate on.
+ type: str
+ required: yes
+ aliases:
+ - name
+
+ driver:
+ description:
+ - Specify the type of volume. Docker provides the C(local) driver, but 3rd party drivers can also be used.
+ type: str
+ default: local
+
+ driver_options:
+ description:
+ - "Dictionary of volume settings. Consult docker docs for valid options and values:
+ U(https://docs.docker.com/engine/reference/commandline/volume_create/#driver-specific-options)"
+ type: dict
+
+ labels:
+ description:
+ - Dictionary of label key/values to set for the volume
+ type: dict
+
+ force:
+ description:
+ - With state C(present) causes the volume to be deleted and recreated if the volume already
+ exist and the driver, driver options or labels differ. This will cause any data in the existing
+ volume to be lost.
+ - Deprecated. Will be removed in community.general 2.0.0. Set I(recreate) to C(options-changed) instead
+ for the same behavior of setting I(force) to C(yes).
+ type: bool
+
+ recreate:
+ description:
+ - Controls when a volume will be recreated when I(state) is C(present). Please
+ note that recreating an existing volume will cause **any data in the existing volume
+ to be lost!** The volume will be deleted and a new volume with the same name will be
+ created.
+ - The value C(always) forces the volume to be always recreated.
+ - The value C(never) makes sure the volume will not be recreated.
+ - The value C(options-changed) makes sure the volume will be recreated if the volume
+ already exist and the driver, driver options or labels differ.
+ type: str
+ default: never
+ choices:
+ - always
+ - never
+ - options-changed
+
+ state:
+ description:
+ - C(absent) deletes the volume.
+ - C(present) creates the volume, if it does not already exist.
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+author:
+ - Alex Grönholm (@agronholm)
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "The docker server >= 1.9.0"
+'''
+
+EXAMPLES = '''
+- name: Create a volume
+ community.general.docker_volume:
+ name: volume_one
+
+- name: Remove a volume
+ community.general.docker_volume:
+ name: volume_one
+ state: absent
+
+- name: Create a volume with options
+ community.general.docker_volume:
+ name: volume_two
+ driver_options:
+ type: btrfs
+ device: /dev/sda2
+'''
+
+RETURN = '''
+volume:
+ description:
+ - Volume inspection results for the affected volume.
+ - Note that facts are part of the registered vars since Ansible 2.8. For compatibility reasons, the facts
+ are also accessible directly as C(docker_volume). Note that the returned fact will be removed in community.general 2.0.0.
+ returned: success
+ type: dict
+ sample: {}
+'''
+
+import traceback
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ DockerBaseClass,
+ AnsibleDockerClient,
+ DifferenceTracker,
+ RequestException,
+)
+from ansible.module_utils.six import iteritems, text_type
+
+
+class TaskParameters(DockerBaseClass):
+ def __init__(self, client):
+ super(TaskParameters, self).__init__()
+ self.client = client
+
+ self.volume_name = None
+ self.driver = None
+ self.driver_options = None
+ self.labels = None
+ self.force = None
+ self.recreate = None
+ self.debug = None
+
+ for key, value in iteritems(client.module.params):
+ setattr(self, key, value)
+
+ if self.force is not None:
+ if self.recreate != 'never':
+ client.fail('Cannot use the deprecated "force" '
+ 'option when "recreate" is set. Please stop '
+ 'using the force option.')
+ client.module.warn('The "force" option of docker_volume has been deprecated '
+ 'in Ansible 2.8. Please use the "recreate" '
+ 'option, which provides the same functionality as "force".')
+ self.recreate = 'options-changed' if self.force else 'never'
+
+
+class DockerVolumeManager(object):
+
+ def __init__(self, client):
+ self.client = client
+ self.parameters = TaskParameters(client)
+ self.check_mode = self.client.check_mode
+ self.results = {
+ u'changed': False,
+ u'actions': []
+ }
+ self.diff = self.client.module._diff
+ self.diff_tracker = DifferenceTracker()
+ self.diff_result = dict()
+
+ self.existing_volume = self.get_existing_volume()
+
+ state = self.parameters.state
+ if state == 'present':
+ self.present()
+ elif state == 'absent':
+ self.absent()
+
+ if self.diff or self.check_mode or self.parameters.debug:
+ if self.diff:
+ self.diff_result['before'], self.diff_result['after'] = self.diff_tracker.get_before_after()
+ self.results['diff'] = self.diff_result
+
+ def get_existing_volume(self):
+ try:
+ volumes = self.client.volumes()
+ except APIError as e:
+ self.client.fail(text_type(e))
+
+ if volumes[u'Volumes'] is None:
+ return None
+
+ for volume in volumes[u'Volumes']:
+ if volume['Name'] == self.parameters.volume_name:
+ return volume
+
+ return None
+
+ def has_different_config(self):
+ """
+ Return the list of differences between the current parameters and the existing volume.
+
+ :return: list of options that differ
+ """
+ differences = DifferenceTracker()
+ if self.parameters.driver and self.parameters.driver != self.existing_volume['Driver']:
+ differences.add('driver', parameter=self.parameters.driver, active=self.existing_volume['Driver'])
+ if self.parameters.driver_options:
+ if not self.existing_volume.get('Options'):
+ differences.add('driver_options',
+ parameter=self.parameters.driver_options,
+ active=self.existing_volume.get('Options'))
+ else:
+ for key, value in iteritems(self.parameters.driver_options):
+ if (not self.existing_volume['Options'].get(key) or
+ value != self.existing_volume['Options'][key]):
+ differences.add('driver_options.%s' % key,
+ parameter=value,
+ active=self.existing_volume['Options'].get(key))
+ if self.parameters.labels:
+ existing_labels = self.existing_volume.get('Labels', {})
+ for label in self.parameters.labels:
+ if existing_labels.get(label) != self.parameters.labels.get(label):
+ differences.add('labels.%s' % label,
+ parameter=self.parameters.labels.get(label),
+ active=existing_labels.get(label))
+
+ return differences
+
+ def create_volume(self):
+ if not self.existing_volume:
+ if not self.check_mode:
+ try:
+ params = dict(
+ driver=self.parameters.driver,
+ driver_opts=self.parameters.driver_options,
+ )
+
+ if self.parameters.labels is not None:
+ params['labels'] = self.parameters.labels
+
+ resp = self.client.create_volume(self.parameters.volume_name, **params)
+ self.existing_volume = self.client.inspect_volume(resp['Name'])
+ except APIError as e:
+ self.client.fail(text_type(e))
+
+ self.results['actions'].append("Created volume %s with driver %s" % (self.parameters.volume_name, self.parameters.driver))
+ self.results['changed'] = True
+
+ def remove_volume(self):
+ if self.existing_volume:
+ if not self.check_mode:
+ try:
+ self.client.remove_volume(self.parameters.volume_name)
+ except APIError as e:
+ self.client.fail(text_type(e))
+
+ self.results['actions'].append("Removed volume %s" % self.parameters.volume_name)
+ self.results['changed'] = True
+
+ def present(self):
+ differences = DifferenceTracker()
+ if self.existing_volume:
+ differences = self.has_different_config()
+
+ self.diff_tracker.add('exists', parameter=True, active=self.existing_volume is not None)
+ if (not differences.empty and self.parameters.recreate == 'options-changed') or self.parameters.recreate == 'always':
+ self.remove_volume()
+ self.existing_volume = None
+
+ self.create_volume()
+
+ if self.diff or self.check_mode or self.parameters.debug:
+ self.diff_result['differences'] = differences.get_legacy_docker_diffs()
+ self.diff_tracker.merge(differences)
+
+ if not self.check_mode and not self.parameters.debug:
+ self.results.pop('actions')
+
+ volume_facts = self.get_existing_volume()
+ self.results['ansible_facts'] = {u'docker_volume': volume_facts}
+ self.results['volume'] = volume_facts
+
+ def absent(self):
+ self.diff_tracker.add('exists', parameter=False, active=self.existing_volume is not None)
+ self.remove_volume()
+
+
+def main():
+ argument_spec = dict(
+ volume_name=dict(type='str', required=True, aliases=['name']),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ driver=dict(type='str', default='local'),
+ driver_options=dict(type='dict', default={}),
+ labels=dict(type='dict'),
+ force=dict(type='bool', removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
+ recreate=dict(type='str', default='never', choices=['always', 'never', 'options-changed']),
+ debug=dict(type='bool', default=False)
+ )
+
+ option_minimal_versions = dict(
+ labels=dict(docker_py_version='1.10.0', docker_api_version='1.23'),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_version='1.10.0',
+ min_docker_api_version='1.21',
+ # "The docker server >= 1.9.0"
+ option_minimal_versions=option_minimal_versions,
+ )
+
+ try:
+ cm = DockerVolumeManager(client)
+ client.module.exit_json(**cm.results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_volume_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_volume_info.py
new file mode 100644
index 00000000..c00c2425
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/docker_volume_info.py
@@ -0,0 +1,128 @@
+#!/usr/bin/python
+# coding: utf-8
+#
+# Copyright 2017 Red Hat | Ansible, Alex Grönholm <alex.gronholm@nextday.fi>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: docker_volume_info
+short_description: Retrieve facts about Docker volumes
+description:
+ - Performs largely the same function as the "docker volume inspect" CLI subcommand.
+options:
+ name:
+ description:
+ - Name of the volume to inspect.
+ type: str
+ required: yes
+ aliases:
+ - volume_name
+
+extends_documentation_fragment:
+- community.general.docker
+- community.general.docker.docker_py_1_documentation
+
+
+author:
+ - Felix Fontein (@felixfontein)
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "Docker API >= 1.21"
+'''
+
+EXAMPLES = '''
+- name: Get infos on volume
+ community.general.docker_volume_info:
+ name: mydata
+ register: result
+
+- name: Does volume exist?
+ ansible.builtin.debug:
+ msg: "The volume {{ 'exists' if result.exists else 'does not exist' }}"
+
+- name: Print information about volume
+ ansible.builtin.debug:
+ var: result.volume
+ when: result.exists
+'''
+
+RETURN = '''
+exists:
+ description:
+ - Returns whether the volume exists.
+ type: bool
+ returned: always
+ sample: true
+volume:
+ description:
+ - Volume inspection results for the affected volume.
+ - Will be C(none) if volume does not exist.
+ returned: success
+ type: dict
+ sample: '{
+ "CreatedAt": "2018-12-09T17:43:44+01:00",
+ "Driver": "local",
+ "Labels": null,
+ "Mountpoint": "/var/lib/docker/volumes/ansible-test-bd3f6172/_data",
+ "Name": "ansible-test-bd3f6172",
+ "Options": {},
+ "Scope": "local"
+ }'
+'''
+
+import traceback
+
+try:
+ from docker.errors import DockerException, NotFound
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ AnsibleDockerClient,
+ RequestException,
+)
+
+
+def get_existing_volume(client, volume_name):
+ try:
+ return client.inspect_volume(volume_name)
+ except NotFound as dummy:
+ return None
+ except Exception as exc:
+ client.fail("Error inspecting volume: %s" % exc)
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True, aliases=['volume_name']),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_version='1.8.0',
+ min_docker_api_version='1.21',
+ )
+
+ try:
+ volume = get_existing_volume(client, client.module.params['name'])
+
+ client.module.exit_json(
+ changed=False,
+ exists=(True if volume else False),
+ volume=volume,
+ )
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/dpkg_divert.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/dpkg_divert.py
new file mode 100644
index 00000000..b7b57fd3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/dpkg_divert.py
@@ -0,0 +1,370 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017-2020, Yann Amar <quidame@poivron.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: dpkg_divert
+short_description: Override a debian package's version of a file
+version_added: '0.2.0'
+author:
+ - quidame (@quidame)
+description:
+ - A diversion is for C(dpkg) the knowledge that only a given package
+ (or the local administrator) is allowed to install a file at a given
+ location. Other packages shipping their own version of this file will
+ be forced to I(divert) it, i.e. to install it at another location. It
+ allows one to keep changes in a file provided by a debian package by
+ preventing its overwrite at package upgrade.
+ - This module manages diversions of debian packages files using the
+ C(dpkg-divert) commandline tool. It can either create or remove a
+ diversion for a given file, but also update an existing diversion
+ to modify its I(holder) and/or its I(divert) location.
+options:
+ path:
+ description:
+ - The original and absolute path of the file to be diverted or
+ undiverted. This path is unique, i.e. it is not possible to get
+ two diversions for the same I(path).
+ required: true
+ type: path
+ state:
+ description:
+ - When I(state=absent), remove the diversion of the specified
+ I(path); when I(state=present), create the diversion if it does
+ not exist, or update its package I(holder) or I(divert) location,
+ if it already exists.
+ type: str
+ default: present
+ choices: [absent, present]
+ holder:
+ description:
+ - The name of the package whose copy of file is not diverted, also
+ known as the diversion holder or the package the diversion belongs
+ to.
+ - The actual package does not have to be installed or even to exist
+ for its name to be valid. If not specified, the diversion is hold
+ by 'LOCAL', that is reserved by/for dpkg for local diversions.
+ - This parameter is ignored when I(state=absent).
+ type: str
+ divert:
+ description:
+ - The location where the versions of file will be diverted.
+ - Default is to add suffix C(.distrib) to the file path.
+ - This parameter is ignored when I(state=absent).
+ type: path
+ rename:
+ description:
+ - Actually move the file aside (when I(state=present)) or back (when
+ I(state=absent)), but only when changing the state of the diversion.
+ This parameter has no effect when attempting to add a diversion that
+ already exists or when removing an unexisting one.
+ - Unless I(force=true), renaming fails if the destination file already
+ exists (this lock being a dpkg-divert feature, and bypassing it being
+ a module feature).
+ type: bool
+ default: no
+ force:
+ description:
+ - When I(rename=true) and I(force=true), renaming is performed even if
+ the target of the renaming exists, i.e. the existing contents of the
+ file at this location will be lost.
+ - This parameter is ignored when I(rename=false).
+ type: bool
+ default: no
+notes:
+ - This module supports I(check_mode) and I(diff).
+requirements:
+ - dpkg-divert >= 1.15.0 (Debian family)
+'''
+
+EXAMPLES = r'''
+- name: Divert /usr/bin/busybox to /usr/bin/busybox.distrib and keep file in place
+ community.general.dpkg_divert:
+ path: /usr/bin/busybox
+
+- name: Divert /usr/bin/busybox by package 'branding'
+ community.general.dpkg_divert:
+ path: /usr/bin/busybox
+ holder: branding
+
+- name: Divert and rename busybox to busybox.dpkg-divert
+ community.general.dpkg_divert:
+ path: /usr/bin/busybox
+ divert: /usr/bin/busybox.dpkg-divert
+ rename: yes
+
+- name: Remove the busybox diversion and move the diverted file back
+ community.general.dpkg_divert:
+ path: /usr/bin/busybox
+ state: absent
+ rename: yes
+ force: yes
+'''
+
+RETURN = r'''
+commands:
+ description: The dpkg-divert commands ran internally by the module.
+ type: list
+ returned: on_success
+ elements: str
+ sample: |-
+ [
+ "/usr/bin/dpkg-divert --no-rename --remove /etc/foobarrc",
+ "/usr/bin/dpkg-divert --package ansible --no-rename --add /etc/foobarrc"
+ ]
+messages:
+ description: The dpkg-divert relevant messages (stdout or stderr).
+ type: list
+ returned: on_success
+ elements: str
+ sample: |-
+ [
+ "Removing 'local diversion of /etc/foobarrc to /etc/foobarrc.distrib'",
+ "Adding 'diversion of /etc/foobarrc to /etc/foobarrc.distrib by ansible'"
+ ]
+diversion:
+ description: The status of the diversion after task execution.
+ type: dict
+ returned: always
+ contains:
+ divert:
+ description: The location of the diverted file.
+ type: str
+ holder:
+ description: The package holding the diversion.
+ type: str
+ path:
+ description: The path of the file to divert/undivert.
+ type: str
+ state:
+ description: The state of the diversion.
+ type: str
+ sample: |-
+ {
+ "divert": "/etc/foobarrc.distrib",
+ "holder": "LOCAL",
+ "path": "/etc/foobarrc"
+ "state": "present"
+ }
+'''
+
+
+import re
+import os
+from distutils.version import LooseVersion
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes, to_native
+
+
+def diversion_state(module, command, path):
+ diversion = dict(path=path, state='absent', divert=None, holder=None)
+ rc, out, err = module.run_command([command, '--listpackage', path], check_rc=True)
+ if out:
+ diversion['state'] = 'present'
+ diversion['holder'] = out.rstrip()
+ rc, out, err = module.run_command([command, '--truename', path], check_rc=True)
+ diversion['divert'] = out.rstrip()
+ return diversion
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(required=True, type='path'),
+ state=dict(required=False, type='str', default='present', choices=['absent', 'present']),
+ holder=dict(required=False, type='str'),
+ divert=dict(required=False, type='path'),
+ rename=dict(required=False, type='bool', default=False),
+ force=dict(required=False, type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+
+ path = module.params['path']
+ state = module.params['state']
+ holder = module.params['holder']
+ divert = module.params['divert']
+ rename = module.params['rename']
+ force = module.params['force']
+
+ diversion_wanted = dict(path=path, state=state)
+ changed = False
+
+ DPKG_DIVERT = module.get_bin_path('dpkg-divert', required=True)
+ MAINCOMMAND = [DPKG_DIVERT]
+
+ # Option --listpackage is needed and comes with 1.15.0
+ rc, stdout, stderr = module.run_command([DPKG_DIVERT, '--version'], check_rc=True)
+ [current_version] = [x for x in stdout.splitlines()[0].split() if re.match('^[0-9]+[.][0-9]', x)]
+ if LooseVersion(current_version) < LooseVersion("1.15.0"):
+ module.fail_json(msg="Unsupported dpkg version (<1.15.0).")
+ no_rename_is_supported = (LooseVersion(current_version) >= LooseVersion("1.19.1"))
+
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ path_exists = os.path.exists(b_path)
+ # Used for things not doable with a single dpkg-divert command (as forced
+ # renaming of files, and diversion's 'holder' or 'divert' updates).
+ target_exists = False
+ truename_exists = False
+
+ diversion_before = diversion_state(module, DPKG_DIVERT, path)
+ if diversion_before['state'] == 'present':
+ b_divert = to_bytes(diversion_before['divert'], errors='surrogate_or_strict')
+ truename_exists = os.path.exists(b_divert)
+
+ # Append options as requested in the task parameters, but ignore some of
+ # them when removing the diversion.
+ if rename:
+ MAINCOMMAND.append('--rename')
+ elif no_rename_is_supported:
+ MAINCOMMAND.append('--no-rename')
+
+ if state == 'present':
+ if holder and holder != 'LOCAL':
+ MAINCOMMAND.extend(['--package', holder])
+ diversion_wanted['holder'] = holder
+ else:
+ MAINCOMMAND.append('--local')
+ diversion_wanted['holder'] = 'LOCAL'
+
+ if divert:
+ MAINCOMMAND.extend(['--divert', divert])
+ target = divert
+ else:
+ target = '%s.distrib' % path
+
+ MAINCOMMAND.extend(['--add', path])
+ diversion_wanted['divert'] = target
+ b_target = to_bytes(target, errors='surrogate_or_strict')
+ target_exists = os.path.exists(b_target)
+
+ else:
+ MAINCOMMAND.extend(['--remove', path])
+ diversion_wanted['divert'] = None
+ diversion_wanted['holder'] = None
+
+ # Start to populate the returned objects.
+ diversion = diversion_before.copy()
+ maincommand = ' '.join(MAINCOMMAND)
+ commands = [maincommand]
+
+ if module.check_mode or diversion_wanted == diversion_before:
+ MAINCOMMAND.insert(1, '--test')
+ diversion_after = diversion_wanted
+
+ # Just try and see
+ rc, stdout, stderr = module.run_command(MAINCOMMAND)
+
+ if rc == 0:
+ messages = [stdout.rstrip()]
+
+ # else... cases of failure with dpkg-divert are:
+ # - The diversion does not belong to the same package (or LOCAL)
+ # - The divert filename is not the same (e.g. path.distrib != path.divert)
+ # - The renaming is forbidden by dpkg-divert (i.e. both the file and the
+ # diverted file exist)
+
+ elif state != diversion_before['state']:
+ # There should be no case with 'divert' and 'holder' when creating the
+ # diversion from none, and they're ignored when removing the diversion.
+ # So this is all about renaming...
+ if rename and path_exists and (
+ (state == 'absent' and truename_exists) or
+ (state == 'present' and target_exists)):
+ if not force:
+ msg = "Set 'force' param to True to force renaming of files."
+ module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg,
+ stderr=stderr, stdout=stdout, diversion=diversion)
+ else:
+ msg = "Unexpected error while changing state of the diversion."
+ module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg,
+ stderr=stderr, stdout=stdout, diversion=diversion)
+
+ to_remove = path
+ if state == 'present':
+ to_remove = target
+
+ if not module.check_mode:
+ try:
+ b_remove = to_bytes(to_remove, errors='surrogate_or_strict')
+ os.unlink(b_remove)
+ except OSError as e:
+ msg = 'Failed to remove %s: %s' % (to_remove, to_native(e))
+ module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg,
+ stderr=stderr, stdout=stdout, diversion=diversion)
+ rc, stdout, stderr = module.run_command(MAINCOMMAND, check_rc=True)
+
+ messages = [stdout.rstrip()]
+
+ # The situation is that we want to modify the settings (holder or divert)
+ # of an existing diversion. dpkg-divert does not handle this, and we have
+ # to remove the existing diversion first, and then set a new one.
+ else:
+ RMDIVERSION = [DPKG_DIVERT, '--remove', path]
+ if no_rename_is_supported:
+ RMDIVERSION.insert(1, '--no-rename')
+ rmdiversion = ' '.join(RMDIVERSION)
+
+ if module.check_mode:
+ RMDIVERSION.insert(1, '--test')
+
+ if rename:
+ MAINCOMMAND.remove('--rename')
+ if no_rename_is_supported:
+ MAINCOMMAND.insert(1, '--no-rename')
+ maincommand = ' '.join(MAINCOMMAND)
+
+ commands = [rmdiversion, maincommand]
+ rc, rmdout, rmderr = module.run_command(RMDIVERSION, check_rc=True)
+
+ if module.check_mode:
+ messages = [rmdout.rstrip(), 'Running in check mode']
+ else:
+ rc, stdout, stderr = module.run_command(MAINCOMMAND, check_rc=True)
+ messages = [rmdout.rstrip(), stdout.rstrip()]
+
+ # Avoid if possible to orphan files (i.e. to dereference them in diversion
+ # database but let them in place), but do not make renaming issues fatal.
+ # BTW, this module is not about state of files involved in the diversion.
+ old = diversion_before['divert']
+ new = diversion_wanted['divert']
+ if new != old:
+ b_old = to_bytes(old, errors='surrogate_or_strict')
+ b_new = to_bytes(new, errors='surrogate_or_strict')
+ if os.path.exists(b_old) and not os.path.exists(b_new):
+ try:
+ os.rename(b_old, b_new)
+ except OSError as e:
+ pass
+
+ if not module.check_mode:
+ diversion_after = diversion_state(module, DPKG_DIVERT, path)
+
+ diversion = diversion_after.copy()
+ diff = dict()
+ if module._diff:
+ diff['before'] = diversion_before
+ diff['after'] = diversion_after
+
+ if diversion_after != diversion_before:
+ changed = True
+
+ if diversion_after == diversion_wanted:
+ module.exit_json(changed=changed, diversion=diversion,
+ commands=commands, messages=messages, diff=diff)
+ else:
+ msg = "Unexpected error: see stdout and stderr for details."
+ module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg,
+ stderr=stderr, stdout=stdout, diversion=diversion)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/easy_install.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/easy_install.py
new file mode 100644
index 00000000..5e1d7930
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/easy_install.py
@@ -0,0 +1,198 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Matt Wright <matt@nobien.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: easy_install
+short_description: Installs Python libraries
+description:
+ - Installs Python libraries, optionally in a I(virtualenv)
+options:
+ name:
+ type: str
+ description:
+ - A Python library name
+ required: true
+ virtualenv:
+ type: str
+ description:
+ - an optional I(virtualenv) directory path to install into. If the
+ I(virtualenv) does not exist, it is created automatically
+ virtualenv_site_packages:
+ description:
+ - Whether the virtual environment will inherit packages from the
+ global site-packages directory. Note that if this setting is
+ changed on an already existing virtual environment it will not
+ have any effect, the environment must be deleted and newly
+ created.
+ type: bool
+ default: 'no'
+ virtualenv_command:
+ type: str
+ description:
+ - The command to create the virtual environment with. For example
+ C(pyvenv), C(virtualenv), C(virtualenv2).
+ default: virtualenv
+ executable:
+ type: str
+ description:
+ - The explicit executable or a pathname to the executable to be used to
+ run easy_install for a specific version of Python installed in the
+ system. For example C(easy_install-3.3), if there are both Python 2.7
+ and 3.3 installations in the system and you want to run easy_install
+ for the Python 3.3 installation.
+ default: easy_install
+ state:
+ type: str
+ description:
+ - The desired state of the library. C(latest) ensures that the latest version is installed.
+ choices: [present, latest]
+ default: present
+notes:
+ - Please note that the C(easy_install) module can only install Python
+ libraries. Thus this module is not able to remove libraries. It is
+ generally recommended to use the M(ansible.builtin.pip) module which you can first install
+ using M(community.general.easy_install).
+ - Also note that I(virtualenv) must be installed on the remote host if the
+ C(virtualenv) parameter is specified.
+requirements: [ "virtualenv" ]
+author: "Matt Wright (@mattupstate)"
+'''
+
+EXAMPLES = '''
+- name: Install or update pip
+ community.general.easy_install:
+ name: pip
+ state: latest
+
+- name: Install Bottle into the specified virtualenv
+ community.general.easy_install:
+ name: bottle
+ virtualenv: /webapps/myapp/venv
+'''
+
+import os
+import os.path
+import tempfile
+from ansible.module_utils.basic import AnsibleModule
+
+
+def install_package(module, name, easy_install, executable_arguments):
+ cmd = '%s %s %s' % (easy_install, ' '.join(executable_arguments), name)
+ rc, out, err = module.run_command(cmd)
+ return rc, out, err
+
+
+def _is_package_installed(module, name, easy_install, executable_arguments):
+ # Copy and add to the arguments
+ executable_arguments = executable_arguments[:]
+ executable_arguments.append('--dry-run')
+ rc, out, err = install_package(module, name, easy_install, executable_arguments)
+ if rc:
+ module.fail_json(msg=err)
+ return 'Downloading' not in out
+
+
+def _get_easy_install(module, env=None, executable=None):
+ candidate_easy_inst_basenames = ['easy_install']
+ easy_install = None
+ if executable is not None:
+ if os.path.isabs(executable):
+ easy_install = executable
+ else:
+ candidate_easy_inst_basenames.insert(0, executable)
+ if easy_install is None:
+ if env is None:
+ opt_dirs = []
+ else:
+ # Try easy_install with the virtualenv directory first.
+ opt_dirs = ['%s/bin' % env]
+ for basename in candidate_easy_inst_basenames:
+ easy_install = module.get_bin_path(basename, False, opt_dirs)
+ if easy_install is not None:
+ break
+ # easy_install should have been found by now. The final call to
+ # get_bin_path will trigger fail_json.
+ if easy_install is None:
+ basename = candidate_easy_inst_basenames[0]
+ easy_install = module.get_bin_path(basename, True, opt_dirs)
+ return easy_install
+
+
+def main():
+ arg_spec = dict(
+ name=dict(required=True),
+ state=dict(required=False,
+ default='present',
+ choices=['present', 'latest'],
+ type='str'),
+ virtualenv=dict(default=None, required=False),
+ virtualenv_site_packages=dict(default=False, type='bool'),
+ virtualenv_command=dict(default='virtualenv', required=False),
+ executable=dict(default='easy_install', required=False),
+ )
+
+ module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
+
+ name = module.params['name']
+ env = module.params['virtualenv']
+ executable = module.params['executable']
+ site_packages = module.params['virtualenv_site_packages']
+ virtualenv_command = module.params['virtualenv_command']
+ executable_arguments = []
+ if module.params['state'] == 'latest':
+ executable_arguments.append('--upgrade')
+
+ rc = 0
+ err = ''
+ out = ''
+
+ if env:
+ virtualenv = module.get_bin_path(virtualenv_command, True)
+
+ if not os.path.exists(os.path.join(env, 'bin', 'activate')):
+ if module.check_mode:
+ module.exit_json(changed=True)
+ command = '%s %s' % (virtualenv, env)
+ if site_packages:
+ command += ' --system-site-packages'
+ cwd = tempfile.gettempdir()
+ rc_venv, out_venv, err_venv = module.run_command(command, cwd=cwd)
+
+ rc += rc_venv
+ out += out_venv
+ err += err_venv
+
+ easy_install = _get_easy_install(module, env, executable)
+
+ cmd = None
+ changed = False
+ installed = _is_package_installed(module, name, easy_install, executable_arguments)
+
+ if not installed:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ rc_easy_inst, out_easy_inst, err_easy_inst = install_package(module, name, easy_install, executable_arguments)
+
+ rc += rc_easy_inst
+ out += out_easy_inst
+ err += err_easy_inst
+
+ changed = True
+
+ if rc != 0:
+ module.fail_json(msg=err, cmd=cmd)
+
+ module.exit_json(changed=changed, binary=easy_install,
+ name=name, virtualenv=env)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ejabberd_user.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ejabberd_user.py
new file mode 100644
index 00000000..be63c920
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ejabberd_user.py
@@ -0,0 +1,218 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2013, Peter Sprygada <sprygada@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ejabberd_user
+author: "Peter Sprygada (@privateip)"
+short_description: Manages users for ejabberd servers
+requirements:
+ - ejabberd with mod_admin_extra
+description:
+ - This module provides user management for ejabberd servers
+options:
+ username:
+ type: str
+ description:
+ - the name of the user to manage
+ required: true
+ host:
+ type: str
+ description:
+ - the ejabberd host associated with this username
+ required: true
+ password:
+ type: str
+ description:
+ - the password to assign to the username
+ required: false
+ logging:
+ description:
+ - enables or disables the local syslog facility for this module
+ required: false
+ default: false
+ type: bool
+ state:
+ type: str
+ description:
+ - describe the desired state of the user to be managed
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent' ]
+notes:
+ - Password parameter is required for state == present only
+ - Passwords must be stored in clear text for this release
+ - The ejabberd configuration file must include mod_admin_extra as a module.
+'''
+EXAMPLES = '''
+# Example playbook entries using the ejabberd_user module to manage users state.
+
+- name: Create a user if it does not exist
+ community.general.ejabberd_user:
+ username: test
+ host: server
+ password: password
+
+- name: Delete a user if it exists
+ community.general.ejabberd_user:
+ username: test
+ host: server
+ state: absent
+'''
+
+import syslog
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class EjabberdUserException(Exception):
+ """ Base exception for EjabberdUser class object """
+ pass
+
+
+class EjabberdUser(object):
+ """ This object represents a user resource for an ejabberd server. The
+ object manages user creation and deletion using ejabberdctl. The following
+ commands are currently supported:
+ * ejabberdctl register
+ * ejabberdctl deregister
+ """
+
+ def __init__(self, module):
+ self.module = module
+ self.logging = module.params.get('logging')
+ self.state = module.params.get('state')
+ self.host = module.params.get('host')
+ self.user = module.params.get('username')
+ self.pwd = module.params.get('password')
+
+ @property
+ def changed(self):
+ """ This method will check the current user and see if the password has
+ changed. It will return True if the user does not match the supplied
+ credentials and False if it does not
+ """
+ try:
+ options = [self.user, self.host, self.pwd]
+ (rc, out, err) = self.run_command('check_password', options)
+ except EjabberdUserException:
+ (rc, out, err) = (1, None, "required attribute(s) missing")
+ return rc
+
+ @property
+ def exists(self):
+ """ This method will check to see if the supplied username exists for
+ host specified. If the user exists True is returned, otherwise False
+ is returned
+ """
+ try:
+ options = [self.user, self.host]
+ (rc, out, err) = self.run_command('check_account', options)
+ except EjabberdUserException:
+ (rc, out, err) = (1, None, "required attribute(s) missing")
+ return not bool(int(rc))
+
+ def log(self, entry):
+ """ This method will log information to the local syslog facility """
+ if self.logging:
+ syslog.openlog('ansible-%s' % self.module._name)
+ syslog.syslog(syslog.LOG_NOTICE, entry)
+
+ def run_command(self, cmd, options):
+ """ This method will run the any command specified and return the
+ returns using the Ansible common module
+ """
+ if not all(options):
+ raise EjabberdUserException
+
+ cmd = 'ejabberdctl %s ' % cmd
+ cmd += " ".join(options)
+ self.log('command: %s' % cmd)
+ return self.module.run_command(cmd.split())
+
+ def update(self):
+ """ The update method will update the credentials for the user provided
+ """
+ try:
+ options = [self.user, self.host, self.pwd]
+ (rc, out, err) = self.run_command('change_password', options)
+ except EjabberdUserException:
+ (rc, out, err) = (1, None, "required attribute(s) missing")
+ return (rc, out, err)
+
+ def create(self):
+ """ The create method will create a new user on the host with the
+ password provided
+ """
+ try:
+ options = [self.user, self.host, self.pwd]
+ (rc, out, err) = self.run_command('register', options)
+ except EjabberdUserException:
+ (rc, out, err) = (1, None, "required attribute(s) missing")
+ return (rc, out, err)
+
+ def delete(self):
+ """ The delete method will delete the user from the host
+ """
+ try:
+ options = [self.user, self.host]
+ (rc, out, err) = self.run_command('unregister', options)
+ except EjabberdUserException:
+ (rc, out, err) = (1, None, "required attribute(s) missing")
+ return (rc, out, err)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(required=True, type='str'),
+ username=dict(required=True, type='str'),
+ password=dict(default=None, type='str', no_log=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ logging=dict(default=False, type='bool')
+ ),
+ supports_check_mode=True
+ )
+
+ obj = EjabberdUser(module)
+
+ rc = None
+ result = dict(changed=False)
+
+ if obj.state == 'absent':
+ if obj.exists:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = obj.delete()
+ if rc != 0:
+ module.fail_json(msg=err, rc=rc)
+
+ elif obj.state == 'present':
+ if not obj.exists:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = obj.create()
+ elif obj.changed:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = obj.update()
+ if rc is not None and rc != 0:
+ module.fail_json(msg=err, rc=rc)
+
+ if rc is None:
+ result['changed'] = False
+ else:
+ result['changed'] = True
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/elasticsearch_plugin.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/elasticsearch_plugin.py
new file mode 100644
index 00000000..27a67406
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/elasticsearch_plugin.py
@@ -0,0 +1,291 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# (c) 2015, Mathew Davies <thepixeldeveloper@googlemail.com>
+# (c) 2017, Sam Doran <sdoran@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: elasticsearch_plugin
+short_description: Manage Elasticsearch plugins
+description:
+ - Manages Elasticsearch plugins.
+author:
+ - Mathew Davies (@ThePixelDeveloper)
+ - Sam Doran (@samdoran)
+options:
+ name:
+ description:
+ - Name of the plugin to install.
+ required: True
+ state:
+ description:
+ - Desired state of a plugin.
+ choices: ["present", "absent"]
+ default: present
+ src:
+ description:
+ - Optionally set the source location to retrieve the plugin from. This can be a file://
+ URL to install from a local file, or a remote URL. If this is not set, the plugin
+ location is just based on the name.
+ - The name parameter must match the descriptor in the plugin ZIP specified.
+ - Is only used if the state would change, which is solely checked based on the name
+ parameter. If, for example, the plugin is already installed, changing this has no
+ effect.
+ - For ES 1.x use url.
+ required: False
+ url:
+ description:
+ - Set exact URL to download the plugin from (Only works for ES 1.x).
+ - For ES 2.x and higher, use src.
+ required: False
+ timeout:
+ description:
+ - "Timeout setting: 30s, 1m, 1h..."
+ - Only valid for Elasticsearch < 5.0. This option is ignored for Elasticsearch > 5.0.
+ default: 1m
+ force:
+ description:
+ - "Force batch mode when installing plugins. This is only necessary if a plugin requires additional permissions and console detection fails."
+ default: False
+ type: bool
+ plugin_bin:
+ description:
+ - Location of the plugin binary. If this file is not found, the default plugin binaries will be used.
+ - The default changed in Ansible 2.4 to None.
+ plugin_dir:
+ description:
+ - Your configured plugin directory specified in Elasticsearch
+ default: /usr/share/elasticsearch/plugins/
+ proxy_host:
+ description:
+ - Proxy host to use during plugin installation
+ proxy_port:
+ description:
+ - Proxy port to use during plugin installation
+ version:
+ description:
+ - Version of the plugin to be installed.
+ If plugin exists with previous version, it will NOT be updated
+'''
+
+EXAMPLES = '''
+- name: Install Elasticsearch Head plugin in Elasticsearch 2.x
+ community.general.elasticsearch_plugin:
+ name: mobz/elasticsearch-head
+ state: present
+
+- name: Install a specific version of Elasticsearch Head in Elasticsearch 2.x
+ community.general.elasticsearch_plugin:
+ name: mobz/elasticsearch-head
+ version: 2.0.0
+
+- name: Uninstall Elasticsearch head plugin in Elasticsearch 2.x
+ community.general.elasticsearch_plugin:
+ name: mobz/elasticsearch-head
+ state: absent
+
+- name: Install a specific plugin in Elasticsearch >= 5.0
+ community.general.elasticsearch_plugin:
+ name: analysis-icu
+ state: present
+
+- name: Install the ingest-geoip plugin with a forced installation
+ community.general.elasticsearch_plugin:
+ name: ingest-geoip
+ state: present
+ force: yes
+'''
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+PACKAGE_STATE_MAP = dict(
+ present="install",
+ absent="remove"
+)
+
+PLUGIN_BIN_PATHS = tuple([
+ '/usr/share/elasticsearch/bin/elasticsearch-plugin',
+ '/usr/share/elasticsearch/bin/plugin'
+])
+
+
+def parse_plugin_repo(string):
+ elements = string.split("/")
+
+ # We first consider the simplest form: pluginname
+ repo = elements[0]
+
+ # We consider the form: username/pluginname
+ if len(elements) > 1:
+ repo = elements[1]
+
+ # remove elasticsearch- prefix
+ # remove es- prefix
+ for string in ("elasticsearch-", "es-"):
+ if repo.startswith(string):
+ return repo[len(string):]
+
+ return repo
+
+
+def is_plugin_present(plugin_name, plugin_dir):
+ return os.path.isdir(os.path.join(plugin_dir, plugin_name))
+
+
+def parse_error(string):
+ reason = "ERROR: "
+ try:
+ return string[string.index(reason) + len(reason):].strip()
+ except ValueError:
+ return string
+
+
+def install_plugin(module, plugin_bin, plugin_name, version, src, url, proxy_host, proxy_port, timeout, force):
+ cmd_args = [plugin_bin, PACKAGE_STATE_MAP["present"]]
+ is_old_command = (os.path.basename(plugin_bin) == 'plugin')
+
+ # Timeout and version are only valid for plugin, not elasticsearch-plugin
+ if is_old_command:
+ if timeout:
+ cmd_args.append("--timeout %s" % timeout)
+
+ if version:
+ plugin_name = plugin_name + '/' + version
+ cmd_args[2] = plugin_name
+
+ if proxy_host and proxy_port:
+ cmd_args.append("-DproxyHost=%s -DproxyPort=%s" % (proxy_host, proxy_port))
+
+ # Legacy ES 1.x
+ if url:
+ cmd_args.append("--url %s" % url)
+
+ if force:
+ cmd_args.append("--batch")
+ if src:
+ cmd_args.append(src)
+ else:
+ cmd_args.append(plugin_name)
+
+ cmd = " ".join(cmd_args)
+
+ if module.check_mode:
+ rc, out, err = 0, "check mode", ""
+ else:
+ rc, out, err = module.run_command(cmd)
+
+ if rc != 0:
+ reason = parse_error(out)
+ module.fail_json(msg="Installing plugin '%s' failed: %s" % (plugin_name, reason), err=err)
+
+ return True, cmd, out, err
+
+
+def remove_plugin(module, plugin_bin, plugin_name):
+ cmd_args = [plugin_bin, PACKAGE_STATE_MAP["absent"], parse_plugin_repo(plugin_name)]
+
+ cmd = " ".join(cmd_args)
+
+ if module.check_mode:
+ rc, out, err = 0, "check mode", ""
+ else:
+ rc, out, err = module.run_command(cmd)
+
+ if rc != 0:
+ reason = parse_error(out)
+ module.fail_json(msg="Removing plugin '%s' failed: %s" % (plugin_name, reason), err=err)
+
+ return True, cmd, out, err
+
+
+def get_plugin_bin(module, plugin_bin=None):
+ # Use the plugin_bin that was supplied first before trying other options
+ valid_plugin_bin = None
+ if plugin_bin and os.path.isfile(plugin_bin):
+ valid_plugin_bin = plugin_bin
+
+ else:
+ # Add the plugin_bin passed into the module to the top of the list of paths to test,
+ # testing for that binary name first before falling back to the default paths.
+ bin_paths = list(PLUGIN_BIN_PATHS)
+ if plugin_bin and plugin_bin not in bin_paths:
+ bin_paths.insert(0, plugin_bin)
+
+ # Get separate lists of dirs and binary names from the full paths to the
+ # plugin binaries.
+ plugin_dirs = list(set([os.path.dirname(x) for x in bin_paths]))
+ plugin_bins = list(set([os.path.basename(x) for x in bin_paths]))
+
+ # Check for the binary names in the default system paths as well as the path
+ # specified in the module arguments.
+ for bin_file in plugin_bins:
+ valid_plugin_bin = module.get_bin_path(bin_file, opt_dirs=plugin_dirs)
+ if valid_plugin_bin:
+ break
+
+ if not valid_plugin_bin:
+ module.fail_json(msg='%s does not exist and no other valid plugin installers were found. Make sure Elasticsearch is installed.' % plugin_bin)
+
+ return valid_plugin_bin
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(default="present", choices=PACKAGE_STATE_MAP.keys()),
+ src=dict(default=None),
+ url=dict(default=None),
+ timeout=dict(default="1m"),
+ force=dict(type='bool', default=False),
+ plugin_bin=dict(type="path"),
+ plugin_dir=dict(default="/usr/share/elasticsearch/plugins/", type="path"),
+ proxy_host=dict(default=None),
+ proxy_port=dict(default=None),
+ version=dict(default=None)
+ ),
+ mutually_exclusive=[("src", "url")],
+ supports_check_mode=True
+ )
+
+ name = module.params["name"]
+ state = module.params["state"]
+ url = module.params["url"]
+ src = module.params["src"]
+ timeout = module.params["timeout"]
+ force = module.params["force"]
+ plugin_bin = module.params["plugin_bin"]
+ plugin_dir = module.params["plugin_dir"]
+ proxy_host = module.params["proxy_host"]
+ proxy_port = module.params["proxy_port"]
+ version = module.params["version"]
+
+ # Search provided path and system paths for valid binary
+ plugin_bin = get_plugin_bin(module, plugin_bin)
+
+ repo = parse_plugin_repo(name)
+ present = is_plugin_present(repo, plugin_dir)
+
+ # skip if the state is correct
+ if (present and state == "present") or (state == "absent" and not present):
+ module.exit_json(changed=False, name=name, state=state)
+
+ if state == "present":
+ changed, cmd, out, err = install_plugin(module, plugin_bin, name, version, src, url, proxy_host, proxy_port, timeout, force)
+
+ elif state == "absent":
+ changed, cmd, out, err = remove_plugin(module, plugin_bin, name)
+
+ module.exit_json(changed=changed, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/emc_vnx_sg_member.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/emc_vnx_sg_member.py
new file mode 100644
index 00000000..dfac03ef
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/emc_vnx_sg_member.py
@@ -0,0 +1,170 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2018, Luca 'remix_tj' Lorenzetto <lorenzetto.luca@gmail.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: emc_vnx_sg_member
+
+short_description: Manage storage group member on EMC VNX
+
+
+description:
+ - "This module manages the members of an existing storage group."
+
+extends_documentation_fragment:
+- community.general.emc.emc_vnx
+
+
+options:
+ name:
+ description:
+ - Name of the Storage group to manage.
+ required: true
+ lunid:
+ description:
+ - Lun id to be added.
+ required: true
+ state:
+ description:
+ - Indicates the desired lunid state.
+ - C(present) ensures specified lunid is present in the Storage Group.
+ - C(absent) ensures specified lunid is absent from Storage Group.
+ default: present
+ choices: [ "present", "absent"]
+
+
+author:
+ - Luca 'remix_tj' Lorenzetto (@remixtj)
+'''
+
+EXAMPLES = '''
+- name: Add lun to storage group
+ community.general.emc_vnx_sg_member:
+ name: sg01
+ sp_address: sp1a.fqdn
+ sp_user: sysadmin
+ sp_password: sysadmin
+ lunid: 100
+ state: present
+
+- name: Remove lun from storage group
+ community.general.emc_vnx_sg_member:
+ name: sg01
+ sp_address: sp1a.fqdn
+ sp_user: sysadmin
+ sp_password: sysadmin
+ lunid: 100
+ state: absent
+'''
+
+RETURN = '''
+hluid:
+ description: LUNID that hosts attached to the storage group will see.
+ type: int
+ returned: success
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+from ansible_collections.community.general.plugins.module_utils.storage.emc.emc_vnx import emc_vnx_argument_spec
+
+LIB_IMP_ERR = None
+try:
+ from storops import VNXSystem
+ from storops.exception import VNXCredentialError, VNXStorageGroupError, \
+ VNXAluAlreadyAttachedError, VNXAttachAluError, VNXDetachAluNotFoundError
+ HAS_LIB = True
+except Exception:
+ LIB_IMP_ERR = traceback.format_exc()
+ HAS_LIB = False
+
+
+def run_module():
+ module_args = dict(
+ name=dict(type='str', required=True),
+ lunid=dict(type='int', required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+
+ module_args.update(emc_vnx_argument_spec)
+
+ result = dict(
+ changed=False,
+ hluid=None
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True
+ )
+
+ if not HAS_LIB:
+ module.fail_json(msg=missing_required_lib('storops >= 0.5.10'),
+ exception=LIB_IMP_ERR)
+
+ sp_user = module.params['sp_user']
+ sp_address = module.params['sp_address']
+ sp_password = module.params['sp_password']
+ alu = module.params['lunid']
+
+ # if the user is working with this module in only check mode we do not
+ # want to make any changes to the environment, just return the current
+ # state with no modifications
+ if module.check_mode:
+ return result
+
+ try:
+ vnx = VNXSystem(sp_address, sp_user, sp_password)
+ sg = vnx.get_sg(module.params['name'])
+ if sg.existed:
+ if module.params['state'] == 'present':
+ if not sg.has_alu(alu):
+ try:
+ result['hluid'] = sg.attach_alu(alu)
+ result['changed'] = True
+ except VNXAluAlreadyAttachedError:
+ result['hluid'] = sg.get_hlu(alu)
+ except (VNXAttachAluError, VNXStorageGroupError) as e:
+ module.fail_json(msg='Error attaching {0}: '
+ '{1} '.format(alu, to_native(e)),
+ **result)
+ else:
+ result['hluid'] = sg.get_hlu(alu)
+ if module.params['state'] == 'absent' and sg.has_alu(alu):
+ try:
+ sg.detach_alu(alu)
+ result['changed'] = True
+ except VNXDetachAluNotFoundError:
+ # being not attached when using absent is OK
+ pass
+ except VNXStorageGroupError as e:
+ module.fail_json(msg='Error detaching alu {0}: '
+ '{1} '.format(alu, to_native(e)),
+ **result)
+ else:
+ module.fail_json(msg='No such storage group named '
+ '{0}'.format(module.params['name']),
+ **result)
+ except VNXCredentialError as e:
+ module.fail_json(msg='{0}'.format(to_native(e)), **result)
+
+ module.exit_json(**result)
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/etcd3.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/etcd3.py
new file mode 100644
index 00000000..78838429
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/etcd3.py
@@ -0,0 +1,240 @@
+#!/usr/bin/python
+#
+# (c) 2018, Jean-Philippe Evrard <jean-philippe@evrard.me>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: etcd3
+short_description: "Set or delete key value pairs from an etcd3 cluster"
+requirements:
+ - etcd3
+description:
+ - Sets or deletes values in etcd3 cluster using its v3 api.
+ - Needs python etcd3 lib to work
+options:
+ key:
+ description:
+ - the key where the information is stored in the cluster
+ required: true
+ value:
+ description:
+ - the information stored
+ required: true
+ host:
+ description:
+ - the IP address of the cluster
+ default: 'localhost'
+ port:
+ description:
+ - the port number used to connect to the cluster
+ default: 2379
+ state:
+ description:
+ - the state of the value for the key.
+ - can be present or absent
+ required: true
+ choices: [ present, absent ]
+ user:
+ description:
+ - The etcd user to authenticate with.
+ password:
+ description:
+ - The password to use for authentication.
+ - Required if I(user) is defined.
+ ca_cert:
+ description:
+ - The Certificate Authority to use to verify the etcd host.
+ - Required if I(client_cert) and I(client_key) are defined.
+ client_cert:
+ description:
+ - PEM formatted certificate chain file to be used for SSL client authentication.
+ - Required if I(client_key) is defined.
+ client_key:
+ description:
+ - PEM formatted file that contains your private key to be used for SSL client authentication.
+ - Required if I(client_cert) is defined.
+ timeout:
+ description:
+ - The socket level timeout in seconds.
+author:
+ - Jean-Philippe Evrard (@evrardjp)
+ - Victor Fauth (@vfauth)
+'''
+
+EXAMPLES = """
+- name: Store a value "bar" under the key "foo" for a cluster located "http://localhost:2379"
+ community.general.etcd3:
+ key: "foo"
+ value: "baz3"
+ host: "localhost"
+ port: 2379
+ state: "present"
+
+- name: Authenticate using user/password combination with a timeout of 10 seconds
+ community.general.etcd3:
+ key: "foo"
+ value: "baz3"
+ state: "present"
+ user: "someone"
+ password: "password123"
+ timeout: 10
+
+- name: Authenticate using TLS certificates
+ community.general.etcd3:
+ key: "foo"
+ value: "baz3"
+ state: "present"
+ ca_cert: "/etc/ssl/certs/CA_CERT.pem"
+ client_cert: "/etc/ssl/certs/cert.crt"
+ client_key: "/etc/ssl/private/key.pem"
+"""
+
+RETURN = '''
+key:
+ description: The key that was queried
+ returned: always
+ type: str
+old_value:
+ description: The previous value in the cluster
+ returned: always
+ type: str
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+try:
+ import etcd3
+ HAS_ETCD = True
+except ImportError:
+ ETCD_IMP_ERR = traceback.format_exc()
+ HAS_ETCD = False
+
+
+def run_module():
+ # define the available arguments/parameters that a user can pass to
+ # the module
+ module_args = dict(
+ key=dict(type='str', required=True),
+ value=dict(type='str', required=True),
+ host=dict(type='str', default='localhost'),
+ port=dict(type='int', default=2379),
+ state=dict(type='str', required=True, choices=['present', 'absent']),
+ user=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ ca_cert=dict(type='path'),
+ client_cert=dict(type='path'),
+ client_key=dict(type='path'),
+ timeout=dict(type='int'),
+ )
+
+ # seed the result dict in the object
+ # we primarily care about changed and state
+ # change is if this module effectively modified the target
+ # state will include any data that you want your module to pass back
+ # for consumption, for example, in a subsequent task
+ result = dict(
+ changed=False,
+ )
+
+ # the AnsibleModule object will be our abstraction working with Ansible
+ # this includes instantiation, a couple of common attr would be the
+ # args/params passed to the execution, as well as if the module
+ # supports check mode
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True,
+ required_together=[['client_cert', 'client_key'], ['user', 'password']],
+ )
+
+ # It is possible to set `ca_cert` to verify the server identity without
+ # setting `client_cert` or `client_key` to authenticate the client
+ # so required_together is enough
+ # Due to `required_together=[['client_cert', 'client_key']]`, checking the presence
+ # of either `client_cert` or `client_key` is enough
+ if module.params['ca_cert'] is None and module.params['client_cert'] is not None:
+ module.fail_json(msg="The 'ca_cert' parameter must be defined when 'client_cert' and 'client_key' are present.")
+
+ result['key'] = module.params.get('key')
+ module.params['cert_cert'] = module.params.pop('client_cert')
+ module.params['cert_key'] = module.params.pop('client_key')
+
+ if not HAS_ETCD:
+ module.fail_json(msg=missing_required_lib('etcd3'), exception=ETCD_IMP_ERR)
+
+ allowed_keys = ['host', 'port', 'ca_cert', 'cert_cert', 'cert_key',
+ 'timeout', 'user', 'password']
+ # TODO(evrardjp): Move this back to a dict comprehension when python 2.7 is
+ # the minimum supported version
+ # client_params = {key: value for key, value in module.params.items() if key in allowed_keys}
+ client_params = dict()
+ for key, value in module.params.items():
+ if key in allowed_keys:
+ client_params[key] = value
+ try:
+ etcd = etcd3.client(**client_params)
+ except Exception as exp:
+ module.fail_json(msg='Cannot connect to etcd cluster: %s' % (to_native(exp)),
+ exception=traceback.format_exc())
+ try:
+ cluster_value = etcd.get(module.params['key'])
+ except Exception as exp:
+ module.fail_json(msg='Cannot reach data: %s' % (to_native(exp)),
+ exception=traceback.format_exc())
+
+ # Make the cluster_value[0] a string for string comparisons
+ result['old_value'] = to_native(cluster_value[0])
+
+ if module.params['state'] == 'absent':
+ if cluster_value[0] is not None:
+ if module.check_mode:
+ result['changed'] = True
+ else:
+ try:
+ etcd.delete(module.params['key'])
+ except Exception as exp:
+ module.fail_json(msg='Cannot delete %s: %s' % (module.params['key'], to_native(exp)),
+ exception=traceback.format_exc())
+ else:
+ result['changed'] = True
+ elif module.params['state'] == 'present':
+ if result['old_value'] != module.params['value']:
+ if module.check_mode:
+ result['changed'] = True
+ else:
+ try:
+ etcd.put(module.params['key'], module.params['value'])
+ except Exception as exp:
+ module.fail_json(msg='Cannot add or edit key %s: %s' % (module.params['key'], to_native(exp)),
+ exception=traceback.format_exc())
+ else:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="State not recognized")
+
+ # manipulate or modify the state as needed (this is going to be the
+ # part where your module will do what it needs to do)
+
+ # during the execution of the module, if there is an exception or a
+ # conditional state that effectively causes a failure, run
+ # AnsibleModule.fail_json() to pass in the message and the result
+
+ # in the event of a successful module execution, you will want to
+ # simple AnsibleModule.exit_json(), passing the key/value results
+ module.exit_json(**result)
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/facter.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/facter.py
new file mode 100644
index 00000000..abd2ebc3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/facter.py
@@ -0,0 +1,72 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: facter
+short_description: Runs the discovery program I(facter) on the remote system
+description:
+ - Runs the C(facter) discovery program
+ (U(https://github.com/puppetlabs/facter)) on the remote system, returning
+ JSON data that can be useful for inventory purposes.
+options:
+ arguments:
+ description:
+ - Specifies arguments for facter.
+ type: list
+ elements: str
+requirements:
+ - facter
+ - ruby-json
+author:
+ - Ansible Core Team
+ - Michael DeHaan
+'''
+
+EXAMPLES = '''
+# Example command-line invocation
+# ansible www.example.net -m facter
+
+- name: Execute facter no arguments
+ community.general.facter:
+
+- name: Execute facter with arguments
+ community.general.facter:
+ arguments:
+ - -p
+ - system_uptime
+ - timezone
+ - is_virtual
+'''
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ arguments=dict(required=False, type='list', elements='str')
+ )
+ )
+
+ facter_path = module.get_bin_path(
+ 'facter',
+ opt_dirs=['/opt/puppetlabs/bin'])
+
+ cmd = [facter_path, "--json"]
+ if module.params['arguments']:
+ cmd += module.params['arguments']
+
+ rc, out, err = module.run_command(cmd, check_rc=True)
+ module.exit_json(**json.loads(out))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/files/archive.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/files/archive.py
new file mode 100644
index 00000000..2872b5ac
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/files/archive.py
@@ -0,0 +1,572 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Ben Doherty <bendohmv@gmail.com>
+# Sponsored by Oomph, Inc. http://www.oomphinc.com
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: archive
+short_description: Creates a compressed archive of one or more files or trees
+extends_documentation_fragment: files
+description:
+ - Creates or extends an archive.
+ - The source and archive are on the remote host, and the archive I(is not) copied to the local host.
+ - Source files can be deleted after archival by specifying I(remove=True).
+options:
+ path:
+ description:
+ - Remote absolute path, glob, or list of paths or globs for the file or files to compress or archive.
+ type: list
+ elements: path
+ required: true
+ format:
+ description:
+ - The type of compression to use.
+ - Support for xz was added in Ansible 2.5.
+ type: str
+ choices: [ bz2, gz, tar, xz, zip ]
+ default: gz
+ dest:
+ description:
+ - The file name of the destination archive. The parent directory must exists on the remote host.
+ - This is required when C(path) refers to multiple files by either specifying a glob, a directory or multiple paths in a list.
+ type: path
+ exclude_path:
+ description:
+ - Remote absolute path, glob, or list of paths or globs for the file or files to exclude from I(path) list and glob expansion.
+ type: list
+ elements: path
+ force_archive:
+ description:
+ - Allow you to force the module to treat this as an archive even if only a single file is specified.
+ - By default behaviour is maintained. i.e A when a single file is specified it is compressed only (not archived).
+ type: bool
+ default: false
+ remove:
+ description:
+ - Remove any added source files and trees after adding to archive.
+ type: bool
+ default: no
+notes:
+ - Requires tarfile, zipfile, gzip and bzip2 packages on target host.
+ - Requires lzma or backports.lzma if using xz format.
+ - Can produce I(gzip), I(bzip2), I(lzma) and I(zip) compressed files or archives.
+seealso:
+- module: ansible.builtin.unarchive
+author:
+- Ben Doherty (@bendoh)
+'''
+
+EXAMPLES = r'''
+- name: Compress directory /path/to/foo/ into /path/to/foo.tgz
+ community.general.archive:
+ path: /path/to/foo
+ dest: /path/to/foo.tgz
+
+- name: Compress regular file /path/to/foo into /path/to/foo.gz and remove it
+ community.general.archive:
+ path: /path/to/foo
+ remove: yes
+
+- name: Create a zip archive of /path/to/foo
+ community.general.archive:
+ path: /path/to/foo
+ format: zip
+
+- name: Create a bz2 archive of multiple files, rooted at /path
+ community.general.archive:
+ path:
+ - /path/to/foo
+ - /path/wong/foo
+ dest: /path/file.tar.bz2
+ format: bz2
+
+- name: Create a bz2 archive of a globbed path, while excluding specific dirnames
+ community.general.archive:
+ path:
+ - /path/to/foo/*
+ dest: /path/file.tar.bz2
+ exclude_path:
+ - /path/to/foo/bar
+ - /path/to/foo/baz
+ format: bz2
+
+- name: Create a bz2 archive of a globbed path, while excluding a glob of dirnames
+ community.general.archive:
+ path:
+ - /path/to/foo/*
+ dest: /path/file.tar.bz2
+ exclude_path:
+ - /path/to/foo/ba*
+ format: bz2
+
+- name: Use gzip to compress a single archive (i.e don't archive it first with tar)
+ community.general.archive:
+ path: /path/to/foo/single.file
+ dest: /path/file.gz
+ format: gz
+
+- name: Create a tar.gz archive of a single file.
+ community.general.archive:
+ path: /path/to/foo/single.file
+ dest: /path/file.tar.gz
+ format: gz
+ force_archive: true
+'''
+
+RETURN = r'''
+state:
+ description:
+ The current state of the archived file.
+ If 'absent', then no source files were found and the archive does not exist.
+ If 'compress', then the file source file is in the compressed state.
+ If 'archive', then the source file or paths are currently archived.
+ If 'incomplete', then an archive was created, but not all source paths were found.
+ type: str
+ returned: always
+missing:
+ description: Any files that were missing from the source.
+ type: list
+ returned: success
+archived:
+ description: Any files that were compressed or added to the archive.
+ type: list
+ returned: success
+arcroot:
+ description: The archive root.
+ type: str
+ returned: always
+expanded_paths:
+ description: The list of matching paths from paths argument.
+ type: list
+ returned: always
+expanded_exclude_paths:
+ description: The list of matching exclude paths from the exclude_path argument.
+ type: list
+ returned: always
+'''
+
+import bz2
+import filecmp
+import glob
+import gzip
+import io
+import os
+import re
+import shutil
+import tarfile
+import zipfile
+from traceback import format_exc
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_bytes, to_native
+from ansible.module_utils.six import PY3
+
+
+LZMA_IMP_ERR = None
+if PY3:
+ try:
+ import lzma
+ HAS_LZMA = True
+ except ImportError:
+ LZMA_IMP_ERR = format_exc()
+ HAS_LZMA = False
+else:
+ try:
+ from backports import lzma
+ HAS_LZMA = True
+ except ImportError:
+ LZMA_IMP_ERR = format_exc()
+ HAS_LZMA = False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='list', elements='path', required=True),
+ format=dict(type='str', default='gz', choices=['bz2', 'gz', 'tar', 'xz', 'zip']),
+ dest=dict(type='path'),
+ exclude_path=dict(type='list', elements='path'),
+ force_archive=dict(type='bool', default=False),
+ remove=dict(type='bool', default=False),
+ ),
+ add_file_common_args=True,
+ supports_check_mode=True,
+ )
+
+ params = module.params
+ check_mode = module.check_mode
+ paths = params['path']
+ dest = params['dest']
+ b_dest = None if not dest else to_bytes(dest, errors='surrogate_or_strict')
+ exclude_paths = params['exclude_path']
+ remove = params['remove']
+
+ b_expanded_paths = []
+ b_expanded_exclude_paths = []
+ fmt = params['format']
+ b_fmt = to_bytes(fmt, errors='surrogate_or_strict')
+ force_archive = params['force_archive']
+ globby = False
+ changed = False
+ state = 'absent'
+
+ # Simple or archive file compression (inapplicable with 'zip' since it's always an archive)
+ archive = False
+ b_successes = []
+
+ # Fail early
+ if not HAS_LZMA and fmt == 'xz':
+ module.fail_json(msg=missing_required_lib("lzma or backports.lzma", reason="when using xz format"),
+ exception=LZMA_IMP_ERR)
+ module.fail_json(msg="lzma or backports.lzma is required when using xz format.")
+
+ for path in paths:
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+
+ # Expand any glob characters. If found, add the expanded glob to the
+ # list of expanded_paths, which might be empty.
+ if (b'*' in b_path or b'?' in b_path):
+ b_expanded_paths.extend(glob.glob(b_path))
+ globby = True
+
+ # If there are no glob characters the path is added to the expanded paths
+ # whether the path exists or not
+ else:
+ b_expanded_paths.append(b_path)
+
+ # Only attempt to expand the exclude paths if it exists
+ if exclude_paths:
+ for exclude_path in exclude_paths:
+ b_exclude_path = to_bytes(exclude_path, errors='surrogate_or_strict')
+
+ # Expand any glob characters. If found, add the expanded glob to the
+ # list of expanded_paths, which might be empty.
+ if (b'*' in b_exclude_path or b'?' in b_exclude_path):
+ b_expanded_exclude_paths.extend(glob.glob(b_exclude_path))
+
+ # If there are no glob character the exclude path is added to the expanded
+ # exclude paths whether the path exists or not.
+ else:
+ b_expanded_exclude_paths.append(b_exclude_path)
+
+ if not b_expanded_paths:
+ return module.fail_json(
+ path=', '.join(paths),
+ expanded_paths=to_native(b', '.join(b_expanded_paths), errors='surrogate_or_strict'),
+ msg='Error, no source paths were found'
+ )
+
+ # Only try to determine if we are working with an archive or not if we haven't set archive to true
+ if not force_archive:
+ # If we actually matched multiple files or TRIED to, then
+ # treat this as a multi-file archive
+ archive = globby or os.path.isdir(b_expanded_paths[0]) or len(b_expanded_paths) > 1
+ else:
+ archive = True
+
+ # Default created file name (for single-file archives) to
+ # <file>.<format>
+ if not b_dest and not archive:
+ b_dest = b'%s.%s' % (b_expanded_paths[0], b_fmt)
+
+ # Force archives to specify 'dest'
+ if archive and not b_dest:
+ module.fail_json(dest=dest, path=', '.join(paths), msg='Error, must specify "dest" when archiving multiple files or trees')
+
+ b_sep = to_bytes(os.sep, errors='surrogate_or_strict')
+
+ b_archive_paths = []
+ b_missing = []
+ b_arcroot = b''
+
+ for b_path in b_expanded_paths:
+ # Use the longest common directory name among all the files
+ # as the archive root path
+ if b_arcroot == b'':
+ b_arcroot = os.path.dirname(b_path) + b_sep
+ else:
+ for i in range(len(b_arcroot)):
+ if b_path[i] != b_arcroot[i]:
+ break
+
+ if i < len(b_arcroot):
+ b_arcroot = os.path.dirname(b_arcroot[0:i + 1])
+
+ b_arcroot += b_sep
+
+ # Don't allow archives to be created anywhere within paths to be removed
+ if remove and os.path.isdir(b_path):
+ b_path_dir = b_path
+ if not b_path.endswith(b'/'):
+ b_path_dir += b'/'
+
+ if b_dest.startswith(b_path_dir):
+ module.fail_json(
+ path=', '.join(paths),
+ msg='Error, created archive can not be contained in source paths when remove=True'
+ )
+
+ if os.path.lexists(b_path) and b_path not in b_expanded_exclude_paths:
+ b_archive_paths.append(b_path)
+ else:
+ b_missing.append(b_path)
+
+ # No source files were found but the named archive exists: are we 'compress' or 'archive' now?
+ if len(b_missing) == len(b_expanded_paths) and b_dest and os.path.exists(b_dest):
+ # Just check the filename to know if it's an archive or simple compressed file
+ if re.search(br'(\.tar|\.tar\.gz|\.tgz|\.tbz2|\.tar\.bz2|\.tar\.xz|\.zip)$', os.path.basename(b_dest), re.IGNORECASE):
+ state = 'archive'
+ else:
+ state = 'compress'
+
+ # Multiple files, or globbiness
+ elif archive:
+ if not b_archive_paths:
+ # No source files were found, but the archive is there.
+ if os.path.lexists(b_dest):
+ state = 'archive'
+ elif b_missing:
+ # SOME source files were found, but not all of them
+ state = 'incomplete'
+
+ archive = None
+ size = 0
+ errors = []
+
+ if os.path.lexists(b_dest):
+ size = os.path.getsize(b_dest)
+
+ if state != 'archive':
+ if check_mode:
+ changed = True
+
+ else:
+ try:
+ # Slightly more difficult (and less efficient!) compression using zipfile module
+ if fmt == 'zip':
+ arcfile = zipfile.ZipFile(
+ to_native(b_dest, errors='surrogate_or_strict', encoding='ascii'),
+ 'w',
+ zipfile.ZIP_DEFLATED,
+ True
+ )
+
+ # Easier compression using tarfile module
+ elif fmt == 'gz' or fmt == 'bz2':
+ arcfile = tarfile.open(to_native(b_dest, errors='surrogate_or_strict', encoding='ascii'), 'w|' + fmt)
+
+ # python3 tarfile module allows xz format but for python2 we have to create the tarfile
+ # in memory and then compress it with lzma.
+ elif fmt == 'xz':
+ arcfileIO = io.BytesIO()
+ arcfile = tarfile.open(fileobj=arcfileIO, mode='w')
+
+ # Or plain tar archiving
+ elif fmt == 'tar':
+ arcfile = tarfile.open(to_native(b_dest, errors='surrogate_or_strict', encoding='ascii'), 'w')
+
+ b_match_root = re.compile(br'^%s' % re.escape(b_arcroot))
+ for b_path in b_archive_paths:
+ if os.path.isdir(b_path):
+ # Recurse into directories
+ for b_dirpath, b_dirnames, b_filenames in os.walk(b_path, topdown=True):
+ if not b_dirpath.endswith(b_sep):
+ b_dirpath += b_sep
+
+ for b_dirname in b_dirnames:
+ b_fullpath = b_dirpath + b_dirname
+ n_fullpath = to_native(b_fullpath, errors='surrogate_or_strict', encoding='ascii')
+ n_arcname = to_native(b_match_root.sub(b'', b_fullpath), errors='surrogate_or_strict')
+
+ try:
+ if fmt == 'zip':
+ arcfile.write(n_fullpath, n_arcname)
+ else:
+ arcfile.add(n_fullpath, n_arcname, recursive=False)
+
+ except Exception as e:
+ errors.append('%s: %s' % (n_fullpath, to_native(e)))
+
+ for b_filename in b_filenames:
+ b_fullpath = b_dirpath + b_filename
+ n_fullpath = to_native(b_fullpath, errors='surrogate_or_strict', encoding='ascii')
+ n_arcname = to_native(b_match_root.sub(b'', b_fullpath), errors='surrogate_or_strict')
+
+ try:
+ if fmt == 'zip':
+ arcfile.write(n_fullpath, n_arcname)
+ else:
+ arcfile.add(n_fullpath, n_arcname, recursive=False)
+
+ b_successes.append(b_fullpath)
+ except Exception as e:
+ errors.append('Adding %s: %s' % (to_native(b_path), to_native(e)))
+ else:
+ path = to_native(b_path, errors='surrogate_or_strict', encoding='ascii')
+ arcname = to_native(b_match_root.sub(b'', b_path), errors='surrogate_or_strict')
+ if fmt == 'zip':
+ arcfile.write(path, arcname)
+ else:
+ arcfile.add(path, arcname, recursive=False)
+
+ b_successes.append(b_path)
+
+ except Exception as e:
+ expanded_fmt = 'zip' if fmt == 'zip' else ('tar.' + fmt)
+ module.fail_json(
+ msg='Error when writing %s archive at %s: %s' % (expanded_fmt, dest, to_native(e)),
+ exception=format_exc()
+ )
+
+ if arcfile:
+ arcfile.close()
+ state = 'archive'
+
+ if fmt == 'xz':
+ with lzma.open(b_dest, 'wb') as f:
+ f.write(arcfileIO.getvalue())
+ arcfileIO.close()
+
+ if errors:
+ module.fail_json(msg='Errors when writing archive at %s: %s' % (dest, '; '.join(errors)))
+
+ if state in ['archive', 'incomplete'] and remove:
+ for b_path in b_successes:
+ try:
+ if os.path.isdir(b_path):
+ shutil.rmtree(b_path)
+ elif not check_mode:
+ os.remove(b_path)
+ except OSError as e:
+ errors.append(to_native(b_path))
+
+ if errors:
+ module.fail_json(dest=dest, msg='Error deleting some source files: ', files=errors)
+
+ # Rudimentary check: If size changed then file changed. Not perfect, but easy.
+ if not check_mode and os.path.getsize(b_dest) != size:
+ changed = True
+
+ if b_successes and state != 'incomplete':
+ state = 'archive'
+
+ # Simple, single-file compression
+ else:
+ b_path = b_expanded_paths[0]
+
+ # No source or compressed file
+ if not (os.path.exists(b_path) or os.path.lexists(b_dest)):
+ state = 'absent'
+
+ # if it already exists and the source file isn't there, consider this done
+ elif not os.path.lexists(b_path) and os.path.lexists(b_dest):
+ state = 'compress'
+
+ else:
+ if module.check_mode:
+ if not os.path.exists(b_dest):
+ changed = True
+ else:
+ size = 0
+ f_in = f_out = arcfile = None
+
+ if os.path.lexists(b_dest):
+ size = os.path.getsize(b_dest)
+
+ try:
+ if fmt == 'zip':
+ arcfile = zipfile.ZipFile(
+ to_native(b_dest, errors='surrogate_or_strict', encoding='ascii'),
+ 'w',
+ zipfile.ZIP_DEFLATED,
+ True
+ )
+ arcfile.write(
+ to_native(b_path, errors='surrogate_or_strict', encoding='ascii'),
+ to_native(b_path[len(b_arcroot):], errors='surrogate_or_strict')
+ )
+ arcfile.close()
+ state = 'archive' # because all zip files are archives
+ elif fmt == 'tar':
+ arcfile = tarfile.open(to_native(b_dest, errors='surrogate_or_strict', encoding='ascii'), 'w')
+ arcfile.add(to_native(b_path, errors='surrogate_or_strict', encoding='ascii'))
+ arcfile.close()
+ else:
+ f_in = open(b_path, 'rb')
+
+ n_dest = to_native(b_dest, errors='surrogate_or_strict', encoding='ascii')
+ if fmt == 'gz':
+ f_out = gzip.open(n_dest, 'wb')
+ elif fmt == 'bz2':
+ f_out = bz2.BZ2File(n_dest, 'wb')
+ elif fmt == 'xz':
+ f_out = lzma.LZMAFile(n_dest, 'wb')
+ else:
+ raise OSError("Invalid format")
+
+ shutil.copyfileobj(f_in, f_out)
+
+ b_successes.append(b_path)
+
+ except OSError as e:
+ module.fail_json(
+ path=to_native(b_path),
+ dest=dest,
+ msg='Unable to write to compressed file: %s' % to_native(e), exception=format_exc()
+ )
+
+ if arcfile:
+ arcfile.close()
+ if f_in:
+ f_in.close()
+ if f_out:
+ f_out.close()
+
+ # Rudimentary check: If size changed then file changed. Not perfect, but easy.
+ if os.path.getsize(b_dest) != size:
+ changed = True
+
+ state = 'compress'
+
+ if remove and not check_mode:
+ try:
+ os.remove(b_path)
+
+ except OSError as e:
+ module.fail_json(
+ path=to_native(b_path),
+ msg='Unable to remove source file: %s' % to_native(e), exception=format_exc()
+ )
+
+ try:
+ file_args = module.load_file_common_arguments(params, path=b_dest)
+ except TypeError:
+ # The path argument is only supported in Ansible-base 2.10+. Fall back to
+ # pre-2.10 behavior for older Ansible versions.
+ params['path'] = b_dest
+ file_args = module.load_file_common_arguments(params)
+
+ if not check_mode:
+ changed = module.set_fs_attributes_if_different(file_args, changed)
+
+ module.exit_json(
+ archived=[to_native(p, errors='surrogate_or_strict') for p in b_successes],
+ dest=dest,
+ changed=changed,
+ state=state,
+ arcroot=to_native(b_arcroot, errors='surrogate_or_strict'),
+ missing=[to_native(p, errors='surrogate_or_strict') for p in b_missing],
+ expanded_paths=[to_native(p, errors='surrogate_or_strict') for p in b_expanded_paths],
+ expanded_exclude_paths=[to_native(p, errors='surrogate_or_strict') for p in b_expanded_exclude_paths],
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/files/ini_file.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/files/ini_file.py
new file mode 100644
index 00000000..0beaca9a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/files/ini_file.py
@@ -0,0 +1,334 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
+# Copyright: (c) 2015, Ales Nosek <anosek.nosek () gmail.com>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ini_file
+short_description: Tweak settings in INI files
+extends_documentation_fragment: files
+description:
+ - Manage (add, remove, change) individual settings in an INI-style file without having
+ to manage the file as a whole with, say, M(ansible.builtin.template) or M(ansible.builtin.assemble).
+ - Adds missing sections if they don't exist.
+ - Before Ansible 2.0, comments are discarded when the source file is read, and therefore will not show up in the destination file.
+ - Since Ansible 2.3, this module adds missing ending newlines to files to keep in line with the POSIX standard, even when
+ no other modifications need to be applied.
+options:
+ path:
+ description:
+ - Path to the INI-style file; this file is created if required.
+ - Before Ansible 2.3 this option was only usable as I(dest).
+ type: path
+ required: true
+ aliases: [ dest ]
+ section:
+ description:
+ - Section name in INI file. This is added if C(state=present) automatically when
+ a single value is being set.
+ - If left empty or set to C(null), the I(option) will be placed before the first I(section).
+ - Using C(null) is also required if the config format does not support sections.
+ type: str
+ required: true
+ option:
+ description:
+ - If set (required for changing a I(value)), this is the name of the option.
+ - May be omitted if adding/removing a whole I(section).
+ type: str
+ value:
+ description:
+ - The string value to be associated with an I(option).
+ - May be omitted when removing an I(option).
+ type: str
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can get
+ the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: no
+ state:
+ description:
+ - If set to C(absent) the option or section will be removed if present instead of created.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ no_extra_spaces:
+ description:
+ - Do not insert spaces before and after '=' symbol.
+ type: bool
+ default: no
+ create:
+ description:
+ - If set to C(no), the module will fail if the file does not already exist.
+ - By default it will create the file if it is missing.
+ type: bool
+ default: yes
+ allow_no_value:
+ description:
+ - Allow option without value and without '=' symbol.
+ type: bool
+ default: no
+notes:
+ - While it is possible to add an I(option) without specifying a I(value), this makes no sense.
+ - As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but I(dest) still works as well.
+author:
+ - Jan-Piet Mens (@jpmens)
+ - Ales Nosek (@noseka1)
+'''
+
+EXAMPLES = r'''
+# Before Ansible 2.3, option 'dest' was used instead of 'path'
+- name: Ensure "fav=lemonade is in section "[drinks]" in specified file
+ community.general.ini_file:
+ path: /etc/conf
+ section: drinks
+ option: fav
+ value: lemonade
+ mode: '0600'
+ backup: yes
+
+- name: Ensure "temperature=cold is in section "[drinks]" in specified file
+ community.general.ini_file:
+ path: /etc/anotherconf
+ section: drinks
+ option: temperature
+ value: cold
+ backup: yes
+'''
+
+import os
+import re
+import tempfile
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def match_opt(option, line):
+ option = re.escape(option)
+ return re.match('( |\t)*%s( |\t)*(=|$)' % option, line) \
+ or re.match('#( |\t)*%s( |\t)*(=|$)' % option, line) \
+ or re.match(';( |\t)*%s( |\t)*(=|$)' % option, line)
+
+
+def match_active_opt(option, line):
+ option = re.escape(option)
+ return re.match('( |\t)*%s( |\t)*(=|$)' % option, line)
+
+
+def do_ini(module, filename, section=None, option=None, value=None,
+ state='present', backup=False, no_extra_spaces=False, create=True,
+ allow_no_value=False):
+
+ diff = dict(
+ before='',
+ after='',
+ before_header='%s (content)' % filename,
+ after_header='%s (content)' % filename,
+ )
+
+ if not os.path.exists(filename):
+ if not create:
+ module.fail_json(rc=257, msg='Destination %s does not exist !' % filename)
+ destpath = os.path.dirname(filename)
+ if not os.path.exists(destpath) and not module.check_mode:
+ os.makedirs(destpath)
+ ini_lines = []
+ else:
+ ini_file = open(filename, 'r')
+ try:
+ ini_lines = ini_file.readlines()
+ finally:
+ ini_file.close()
+
+ if module._diff:
+ diff['before'] = ''.join(ini_lines)
+
+ changed = False
+
+ # ini file could be empty
+ if not ini_lines:
+ ini_lines.append('\n')
+
+ # last line of file may not contain a trailing newline
+ if ini_lines[-1] == "" or ini_lines[-1][-1] != '\n':
+ ini_lines[-1] += '\n'
+ changed = True
+
+ # append fake section lines to simplify the logic
+ # At top:
+ # Fake random section to do not match any other in the file
+ # Using commit hash as fake section name
+ fake_section_name = "ad01e11446efb704fcdbdb21f2c43757423d91c5"
+
+ # Insert it at the beginning
+ ini_lines.insert(0, '[%s]' % fake_section_name)
+
+ # At botton:
+ ini_lines.append('[')
+
+ # If no section is defined, fake section is used
+ if not section:
+ section = fake_section_name
+
+ within_section = not section
+ section_start = 0
+ msg = 'OK'
+ if no_extra_spaces:
+ assignment_format = '%s=%s\n'
+ else:
+ assignment_format = '%s = %s\n'
+
+ for index, line in enumerate(ini_lines):
+ if line.startswith('[%s]' % section):
+ within_section = True
+ section_start = index
+ elif line.startswith('['):
+ if within_section:
+ if state == 'present':
+ # insert missing option line at the end of the section
+ for i in range(index, 0, -1):
+ # search backwards for previous non-blank or non-comment line
+ if not re.match(r'^[ \t]*([#;].*)?$', ini_lines[i - 1]):
+ if not value and allow_no_value:
+ ini_lines.insert(i, '%s\n' % option)
+ else:
+ ini_lines.insert(i, assignment_format % (option, value))
+ msg = 'option added'
+ changed = True
+ break
+ elif state == 'absent' and not option:
+ # remove the entire section
+ del ini_lines[section_start:index]
+ msg = 'section removed'
+ changed = True
+ break
+ else:
+ if within_section and option:
+ if state == 'present':
+ # change the existing option line
+ if match_opt(option, line):
+ if not value and allow_no_value:
+ newline = '%s\n' % option
+ else:
+ newline = assignment_format % (option, value)
+ option_changed = ini_lines[index] != newline
+ changed = changed or option_changed
+ if option_changed:
+ msg = 'option changed'
+ ini_lines[index] = newline
+ if option_changed:
+ # remove all possible option occurrences from the rest of the section
+ index = index + 1
+ while index < len(ini_lines):
+ line = ini_lines[index]
+ if line.startswith('['):
+ break
+ if match_active_opt(option, line):
+ del ini_lines[index]
+ else:
+ index = index + 1
+ break
+ elif state == 'absent':
+ # delete the existing line
+ if match_active_opt(option, line):
+ del ini_lines[index]
+ changed = True
+ msg = 'option changed'
+ break
+
+ # remove the fake section line
+ del ini_lines[0]
+ del ini_lines[-1:]
+
+ if not within_section and option and state == 'present':
+ ini_lines.append('[%s]\n' % section)
+ if not value and allow_no_value:
+ ini_lines.append('%s\n' % option)
+ else:
+ ini_lines.append(assignment_format % (option, value))
+ changed = True
+ msg = 'section and option added'
+
+ if module._diff:
+ diff['after'] = ''.join(ini_lines)
+
+ backup_file = None
+ if changed and not module.check_mode:
+ if backup:
+ backup_file = module.backup_local(filename)
+
+ try:
+ tmpfd, tmpfile = tempfile.mkstemp(dir=module.tmpdir)
+ f = os.fdopen(tmpfd, 'w')
+ f.writelines(ini_lines)
+ f.close()
+ except IOError:
+ module.fail_json(msg="Unable to create temporary file %s", traceback=traceback.format_exc())
+
+ try:
+ module.atomic_move(tmpfile, filename)
+ except IOError:
+ module.ansible.fail_json(msg='Unable to move temporary \
+ file %s to %s, IOError' % (tmpfile, filename), traceback=traceback.format_exc())
+
+ return (changed, backup_file, diff, msg)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path', required=True, aliases=['dest']),
+ section=dict(type='str', required=True),
+ option=dict(type='str'),
+ value=dict(type='str'),
+ backup=dict(type='bool', default=False),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ no_extra_spaces=dict(type='bool', default=False),
+ allow_no_value=dict(type='bool', default=False),
+ create=dict(type='bool', default=True)
+ ),
+ add_file_common_args=True,
+ supports_check_mode=True,
+ )
+
+ path = module.params['path']
+ section = module.params['section']
+ option = module.params['option']
+ value = module.params['value']
+ state = module.params['state']
+ backup = module.params['backup']
+ no_extra_spaces = module.params['no_extra_spaces']
+ allow_no_value = module.params['allow_no_value']
+ create = module.params['create']
+
+ (changed, backup_file, diff, msg) = do_ini(module, path, section, option, value, state, backup, no_extra_spaces, create, allow_no_value)
+
+ if not module.check_mode and os.path.exists(path):
+ file_args = module.load_file_common_arguments(module.params)
+ changed = module.set_fs_attributes_if_different(file_args, changed)
+
+ results = dict(
+ changed=changed,
+ diff=diff,
+ msg=msg,
+ path=path,
+ )
+ if backup_file is not None:
+ results['backup_file'] = backup_file
+
+ # Mission complete
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/files/iso_create.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/files/iso_create.py
new file mode 100644
index 00000000..bf6359b1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/files/iso_create.py
@@ -0,0 +1,295 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Ansible Project
+# Copyright: (c) 2020, VMware, Inc. All Rights Reserved.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: iso_create
+short_description: Generate ISO file with specified files or folders
+description:
+ - This module is used to generate ISO file with specified path of files.
+author:
+ - Diane Wang (@Tomorrow9) <dianew@vmware.com>
+requirements:
+- "pycdlib"
+- "python >= 2.7"
+version_added: '0.2.0'
+
+options:
+ src_files:
+ description:
+ - This is a list of absolute paths of source files or folders which will be contained in the new generated ISO file.
+ - Will fail if specified file or folder in C(src_files) does not exist on local machine.
+ - 'Note: With all ISO9660 levels from 1 to 3, all file names are restricted to uppercase letters, numbers and
+ underscores (_). File names are limited to 31 characters, directory nesting is limited to 8 levels, and path
+ names are limited to 255 characters.'
+ type: list
+ required: yes
+ elements: path
+ dest_iso:
+ description:
+ - The absolute path with file name of the new generated ISO file on local machine.
+ - Will create intermediate folders when they does not exist.
+ type: path
+ required: yes
+ interchange_level:
+ description:
+ - The ISO9660 interchange level to use, it dictates the rules on the names of files.
+ - Levels and valid values C(1), C(2), C(3), C(4) are supported.
+ - The default value is level C(1), which is the most conservative, level C(3) is recommended.
+ - ISO9660 file names at interchange level C(1) cannot have more than 8 characters or 3 characters in the extension.
+ type: int
+ default: 1
+ choices: [1, 2, 3, 4]
+ vol_ident:
+ description:
+ - The volume identification string to use on the new generated ISO image.
+ type: str
+ rock_ridge:
+ description:
+ - Whether to make this ISO have the Rock Ridge extensions or not.
+ - Valid values are C(1.09), C(1.10) or C(1.12), means adding the specified Rock Ridge version to the ISO.
+ - If unsure, set C(1.09) to ensure maximum compatibility.
+ - If not specified, then not add Rock Ridge extension to the ISO.
+ type: str
+ choices: ['1.09', '1.10', '1.12']
+ joliet:
+ description:
+ - Support levels and valid values are C(1), C(2), or C(3).
+ - Level C(3) is by far the most common.
+ - If not specified, then no Joliet support is added.
+ type: int
+ choices: [1, 2, 3]
+ udf:
+ description:
+ - Whether to add UDF support to this ISO.
+ - If set to C(True), then version 2.60 of the UDF spec is used.
+ - If not specified or set to C(False), then no UDF support is added.
+ type: bool
+ default: False
+'''
+
+EXAMPLES = r'''
+- name: Create an ISO file
+ community.general.iso_create:
+ src_files:
+ - /root/testfile.yml
+ - /root/testfolder
+ dest_iso: /tmp/test.iso
+ interchange_level: 3
+
+- name: Create an ISO file with Rock Ridge extension
+ community.general.iso_create:
+ src_files:
+ - /root/testfile.yml
+ - /root/testfolder
+ dest_iso: /tmp/test.iso
+ rock_ridge: 1.09
+
+- name: Create an ISO file with Joliet support
+ community.general.iso_create:
+ src_files:
+ - ./windows_config/Autounattend.xml
+ dest_iso: ./test.iso
+ interchange_level: 3
+ joliet: 3
+ vol_ident: WIN_AUTOINSTALL
+'''
+
+RETURN = r'''
+source_file:
+ description: Configured source files or directories list.
+ returned: on success
+ type: list
+ elements: path
+ sample: ["/path/to/file.txt", "/path/to/folder"]
+created_iso:
+ description: Created iso file path.
+ returned: on success
+ type: str
+ sample: "/path/to/test.iso"
+interchange_level:
+ description: Configured interchange level.
+ returned: on success
+ type: int
+ sample: 3
+vol_ident:
+ description: Configured volume identification string.
+ returned: on success
+ type: str
+ sample: "OEMDRV"
+joliet:
+ description: Configured Joliet support level.
+ returned: on success
+ type: int
+ sample: 3
+rock_ridge:
+ description: Configured Rock Ridge version.
+ returned: on success
+ type: str
+ sample: "1.09"
+udf:
+ description: Configured UDF support.
+ returned: on success
+ type: bool
+ sample: False
+'''
+
+import os
+import traceback
+
+PYCDLIB_IMP_ERR = None
+try:
+ import pycdlib
+ HAS_PYCDLIB = True
+except ImportError:
+ PYCDLIB_IMP_ERR = traceback.format_exc()
+ HAS_PYCDLIB = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def add_file(module, iso_file=None, src_file=None, file_path=None, rock_ridge=None, use_joliet=None, use_udf=None):
+ rr_name = None
+ joliet_path = None
+ udf_path = None
+ # In standard ISO interchange level 1, file names have a maximum of 8 characters, followed by a required dot,
+ # followed by a maximum 3 character extension, followed by a semicolon and a version
+ file_name = os.path.basename(file_path)
+ if '.' not in file_name:
+ file_in_iso_path = file_path.upper() + '.;1'
+ else:
+ file_in_iso_path = file_path.upper() + ';1'
+ if rock_ridge:
+ rr_name = file_name
+ if use_joliet:
+ joliet_path = file_path
+ if use_udf:
+ udf_path = file_path
+ try:
+ iso_file.add_file(src_file, iso_path=file_in_iso_path, rr_name=rr_name, joliet_path=joliet_path, udf_path=udf_path)
+ except Exception as err:
+ module.fail_json(msg="Failed to add file %s to ISO file due to %s" % (src_file, to_native(err)))
+
+
+def add_directory(module, iso_file=None, dir_path=None, rock_ridge=None, use_joliet=None, use_udf=None):
+ rr_name = None
+ joliet_path = None
+ udf_path = None
+ iso_dir_path = dir_path.upper()
+ if rock_ridge:
+ rr_name = os.path.basename(dir_path)
+ if use_joliet:
+ joliet_path = iso_dir_path
+ if use_udf:
+ udf_path = iso_dir_path
+ try:
+ iso_file.add_directory(iso_path=iso_dir_path, rr_name=rr_name, joliet_path=joliet_path, udf_path=udf_path)
+ except Exception as err:
+ module.fail_json(msg="Failed to directory %s to ISO file due to %s" % (dir_path, to_native(err)))
+
+
+def main():
+ argument_spec = dict(
+ src_files=dict(type='list', required=True, elements='path'),
+ dest_iso=dict(type='path', required=True),
+ interchange_level=dict(type='int', choices=[1, 2, 3, 4], default=1),
+ vol_ident=dict(type='str'),
+ rock_ridge=dict(type='str', choices=['1.09', '1.10', '1.12']),
+ joliet=dict(type='int', choices=[1, 2, 3]),
+ udf=dict(type='bool', default=False),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+ if not HAS_PYCDLIB:
+ module.fail_json(missing_required_lib('pycdlib'), exception=PYCDLIB_IMP_ERR)
+
+ src_file_list = module.params.get('src_files')
+ if src_file_list and len(src_file_list) == 0:
+ module.fail_json(msg='Please specify source file and/or directory list using src_files parameter.')
+ for src_file in src_file_list:
+ if not os.path.exists(src_file):
+ module.fail_json(msg="Specified source file/directory path does not exist on local machine, %s" % src_file)
+
+ dest_iso = module.params.get('dest_iso')
+ if dest_iso and len(dest_iso) == 0:
+ module.fail_json(msg='Please specify the absolute path of the new created ISO file using dest_iso parameter.')
+
+ dest_iso_dir = os.path.dirname(dest_iso)
+ if dest_iso_dir and not os.path.exists(dest_iso_dir):
+ # will create intermediate dir for new ISO file
+ try:
+ os.makedirs(dest_iso_dir)
+ except OSError as err:
+ module.fail_json(msg='Exception caught when creating folder %s, with error %s' % (dest_iso_dir, to_native(err)))
+
+ volume_id = module.params.get('vol_ident')
+ if volume_id is None:
+ volume_id = ''
+ inter_level = module.params.get('interchange_level')
+ rock_ridge = module.params.get('rock_ridge')
+ use_joliet = module.params.get('joliet')
+ use_udf = None
+ if module.params['udf']:
+ use_udf = '2.60'
+
+ result = dict(
+ changed=False,
+ source_file=src_file_list,
+ created_iso=dest_iso,
+ interchange_level=inter_level,
+ vol_ident=volume_id,
+ rock_ridge=rock_ridge,
+ joliet=use_joliet,
+ udf=use_udf
+ )
+ if not module.check_mode:
+ iso_file = pycdlib.PyCdlib()
+ iso_file.new(interchange_level=inter_level, vol_ident=volume_id, rock_ridge=rock_ridge, joliet=use_joliet, udf=use_udf)
+
+ for src_file in src_file_list:
+ # if specify a dir then go through the dir to add files and dirs
+ if os.path.isdir(src_file):
+ dir_list = []
+ file_list = []
+ src_file = src_file.rstrip('/')
+ dir_name = os.path.basename(src_file)
+ add_directory(module, iso_file=iso_file, dir_path='/' + dir_name, rock_ridge=rock_ridge,
+ use_joliet=use_joliet, use_udf=use_udf)
+
+ # get dir list and file list
+ for path, dirs, files in os.walk(src_file):
+ for filename in files:
+ file_list.append(os.path.join(path, filename))
+ for dir in dirs:
+ dir_list.append(os.path.join(path, dir))
+ for new_dir in dir_list:
+ add_directory(module, iso_file=iso_file, dir_path=new_dir.split(os.path.dirname(src_file))[1],
+ rock_ridge=rock_ridge, use_joliet=use_joliet, use_udf=use_udf)
+ for new_file in file_list:
+ add_file(module, iso_file=iso_file, src_file=new_file,
+ file_path=new_file.split(os.path.dirname(src_file))[1], rock_ridge=rock_ridge,
+ use_joliet=use_joliet, use_udf=use_udf)
+ # if specify a file then add this file directly to the '/' path in ISO
+ else:
+ add_file(module, iso_file=iso_file, src_file=src_file, file_path='/' + os.path.basename(src_file),
+ rock_ridge=rock_ridge, use_joliet=use_joliet, use_udf=use_udf)
+
+ iso_file.write(dest_iso)
+ iso_file.close()
+
+ result['changed'] = True
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/files/iso_extract.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/files/iso_extract.py
new file mode 100644
index 00000000..0c73ac96
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/files/iso_extract.py
@@ -0,0 +1,218 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Jeroen Hoekx <jeroen.hoekx@dsquare.be>
+# Copyright: (c) 2016, Matt Robinson <git@nerdoftheherd.com>
+# Copyright: (c) 2017, Dag Wieers <dag@wieers.com>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+- Jeroen Hoekx (@jhoekx)
+- Matt Robinson (@ribbons)
+- Dag Wieers (@dagwieers)
+module: iso_extract
+short_description: Extract files from an ISO image
+description:
+- This module has two possible ways of operation.
+- If 7zip is installed on the system, this module extracts files from an ISO
+ into a temporary directory and copies files to a given destination,
+ if needed.
+- If the user has mount-capabilities (CAP_SYS_ADMIN on Linux) this module
+ mounts the ISO image to a temporary location, and copies files to a given
+ destination, if needed.
+requirements:
+- Either 7z (from I(7zip) or I(p7zip) package)
+- Or mount capabilities (root-access, or CAP_SYS_ADMIN capability on Linux)
+options:
+ image:
+ description:
+ - The ISO image to extract files from.
+ type: path
+ required: yes
+ aliases: [ path, src ]
+ dest:
+ description:
+ - The destination directory to extract files to.
+ type: path
+ required: yes
+ files:
+ description:
+ - A list of files to extract from the image.
+ - Extracting directories does not work.
+ type: list
+ elements: str
+ required: yes
+ force:
+ description:
+ - If C(yes), which will replace the remote file when contents are different than the source.
+ - If C(no), the file will only be extracted and copied if the destination does not already exist.
+ - Alias C(thirsty) has been deprecated and will be removed in community.general 3.0.0.
+ type: bool
+ default: yes
+ aliases: [ thirsty ]
+ executable:
+ description:
+ - The path to the C(7z) executable to use for extracting files from the ISO.
+ type: path
+ default: '7z'
+notes:
+- Only the file checksum (content) is taken into account when extracting files
+ from the ISO image. If C(force=no), only checks the presence of the file.
+- In Ansible 2.3 this module was using C(mount) and C(umount) commands only,
+ requiring root access. This is no longer needed with the introduction of 7zip
+ for extraction.
+'''
+
+EXAMPLES = r'''
+- name: Extract kernel and ramdisk from a LiveCD
+ community.general.iso_extract:
+ image: /tmp/rear-test.iso
+ dest: /tmp/virt-rear/
+ files:
+ - isolinux/kernel
+ - isolinux/initrd.cgz
+'''
+
+RETURN = r'''
+#
+'''
+
+import os.path
+import shutil
+import tempfile
+
+try: # python 3.3+
+ from shlex import quote
+except ImportError: # older python
+ from pipes import quote
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ image=dict(type='path', required=True, aliases=['path', 'src']),
+ dest=dict(type='path', required=True),
+ files=dict(type='list', elements='str', required=True),
+ force=dict(type='bool', default=True, aliases=['thirsty']),
+ executable=dict(type='path'), # No default on purpose
+ ),
+ supports_check_mode=True,
+ )
+ image = module.params['image']
+ dest = module.params['dest']
+ files = module.params['files']
+ force = module.params['force']
+ executable = module.params['executable']
+
+ if module.params.get('thirsty'):
+ module.deprecate('The alias "thirsty" has been deprecated and will be removed, use "force" instead',
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ result = dict(
+ changed=False,
+ dest=dest,
+ image=image,
+ )
+
+ # We want to know if the user provided it or not, so we set default here
+ if executable is None:
+ executable = '7z'
+
+ binary = module.get_bin_path(executable, None)
+
+ # When executable was provided and binary not found, warn user !
+ if module.params['executable'] is not None and not binary:
+ module.warn("Executable '%s' is not found on the system, trying to mount ISO instead." % executable)
+
+ if not os.path.exists(dest):
+ module.fail_json(msg="Directory '%s' does not exist" % dest)
+
+ if not os.path.exists(os.path.dirname(image)):
+ module.fail_json(msg="ISO image '%s' does not exist" % image)
+
+ result['files'] = []
+ extract_files = list(files)
+
+ if not force:
+ # Check if we have to process any files based on existence
+ for f in files:
+ dest_file = os.path.join(dest, os.path.basename(f))
+ if os.path.exists(dest_file):
+ result['files'].append(dict(
+ checksum=None,
+ dest=dest_file,
+ src=f,
+ ))
+ extract_files.remove(f)
+
+ if not extract_files:
+ module.exit_json(**result)
+
+ tmp_dir = tempfile.mkdtemp()
+
+ # Use 7zip when we have a binary, otherwise try to mount
+ if binary:
+ cmd = '%s x "%s" -o"%s" %s' % (binary, image, tmp_dir, ' '.join([quote(f) for f in extract_files]))
+ else:
+ cmd = 'mount -o loop,ro "%s" "%s"' % (image, tmp_dir)
+
+ rc, out, err = module.run_command(cmd)
+ if rc != 0:
+ result.update(dict(
+ cmd=cmd,
+ rc=rc,
+ stderr=err,
+ stdout=out,
+ ))
+ shutil.rmtree(tmp_dir)
+
+ if binary:
+ module.fail_json(msg="Failed to extract from ISO image '%s' to '%s'" % (image, tmp_dir), **result)
+ else:
+ module.fail_json(msg="Failed to mount ISO image '%s' to '%s', and we could not find executable '%s'." % (image, tmp_dir, executable), **result)
+
+ try:
+ for f in extract_files:
+ tmp_src = os.path.join(tmp_dir, f)
+ if not os.path.exists(tmp_src):
+ module.fail_json(msg="Failed to extract '%s' from ISO image" % f, **result)
+
+ src_checksum = module.sha1(tmp_src)
+
+ dest_file = os.path.join(dest, os.path.basename(f))
+
+ if os.path.exists(dest_file):
+ dest_checksum = module.sha1(dest_file)
+ else:
+ dest_checksum = None
+
+ result['files'].append(dict(
+ checksum=src_checksum,
+ dest=dest_file,
+ src=f,
+ ))
+
+ if src_checksum != dest_checksum:
+ if not module.check_mode:
+ shutil.copy(tmp_src, dest_file)
+
+ result['changed'] = True
+ finally:
+ if not binary:
+ module.run_command('umount "%s"' % tmp_dir)
+
+ shutil.rmtree(tmp_dir)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/files/read_csv.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/files/read_csv.py
new file mode 100644
index 00000000..7100d378
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/files/read_csv.py
@@ -0,0 +1,241 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Dag Wieers (@dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: read_csv
+short_description: Read a CSV file
+description:
+- Read a CSV file and return a list or a dictionary, containing one dictionary per row.
+author:
+- Dag Wieers (@dagwieers)
+options:
+ path:
+ description:
+ - The CSV filename to read data from.
+ type: path
+ required: yes
+ aliases: [ filename ]
+ key:
+ description:
+ - The column name used as a key for the resulting dictionary.
+ - If C(key) is unset, the module returns a list of dictionaries,
+ where each dictionary is a row in the CSV file.
+ type: str
+ dialect:
+ description:
+ - The CSV dialect to use when parsing the CSV file.
+ - Possible values include C(excel), C(excel-tab) or C(unix).
+ type: str
+ default: excel
+ fieldnames:
+ description:
+ - A list of field names for every column.
+ - This is needed if the CSV does not have a header.
+ type: list
+ elements: str
+ unique:
+ description:
+ - Whether the C(key) used is expected to be unique.
+ type: bool
+ default: yes
+ delimiter:
+ description:
+ - A one-character string used to separate fields.
+ - When using this parameter, you change the default value used by C(dialect).
+ - The default value depends on the dialect used.
+ type: str
+ skipinitialspace:
+ description:
+ - Whether to ignore any whitespaces immediately following the delimiter.
+ - When using this parameter, you change the default value used by C(dialect).
+ - The default value depends on the dialect used.
+ type: bool
+ strict:
+ description:
+ - Whether to raise an exception on bad CSV input.
+ - When using this parameter, you change the default value used by C(dialect).
+ - The default value depends on the dialect used.
+ type: bool
+notes:
+- Ansible also ships with the C(csvfile) lookup plugin, which can be used to do selective lookups in CSV files from Jinja.
+'''
+
+EXAMPLES = r'''
+# Example CSV file with header
+#
+# name,uid,gid
+# dag,500,500
+# jeroen,501,500
+
+# Read a CSV file and access user 'dag'
+- name: Read users from CSV file and return a dictionary
+ community.general.read_csv:
+ path: users.csv
+ key: name
+ register: users
+ delegate_to: localhost
+
+- ansible.builtin.debug:
+ msg: 'User {{ users.dict.dag.name }} has UID {{ users.dict.dag.uid }} and GID {{ users.dict.dag.gid }}'
+
+# Read a CSV file and access the first item
+- name: Read users from CSV file and return a list
+ community.general.read_csv:
+ path: users.csv
+ register: users
+ delegate_to: localhost
+
+- ansible.builtin.debug:
+ msg: 'User {{ users.list.1.name }} has UID {{ users.list.1.uid }} and GID {{ users.list.1.gid }}'
+
+# Example CSV file without header and semi-colon delimiter
+#
+# dag;500;500
+# jeroen;501;500
+
+# Read a CSV file without headers
+- name: Read users from CSV file and return a list
+ community.general.read_csv:
+ path: users.csv
+ fieldnames: name,uid,gid
+ delimiter: ';'
+ register: users
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+dict:
+ description: The CSV content as a dictionary.
+ returned: success
+ type: dict
+ sample:
+ dag:
+ name: dag
+ uid: 500
+ gid: 500
+ jeroen:
+ name: jeroen
+ uid: 501
+ gid: 500
+list:
+ description: The CSV content as a list.
+ returned: success
+ type: list
+ sample:
+ - name: dag
+ uid: 500
+ gid: 500
+ - name: jeroen
+ uid: 501
+ gid: 500
+'''
+
+import csv
+from io import BytesIO, StringIO
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_text
+from ansible.module_utils.six import PY3
+
+
+# Add Unix dialect from Python 3
+class unix_dialect(csv.Dialect):
+ """Describe the usual properties of Unix-generated CSV files."""
+ delimiter = ','
+ quotechar = '"'
+ doublequote = True
+ skipinitialspace = False
+ lineterminator = '\n'
+ quoting = csv.QUOTE_ALL
+
+
+csv.register_dialect("unix", unix_dialect)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path', required=True, aliases=['filename']),
+ dialect=dict(type='str', default='excel'),
+ key=dict(type='str'),
+ fieldnames=dict(type='list', elements='str'),
+ unique=dict(type='bool', default=True),
+ delimiter=dict(type='str'),
+ skipinitialspace=dict(type='bool'),
+ strict=dict(type='bool'),
+ ),
+ supports_check_mode=True,
+ )
+
+ path = module.params['path']
+ dialect = module.params['dialect']
+ key = module.params['key']
+ fieldnames = module.params['fieldnames']
+ unique = module.params['unique']
+
+ if dialect not in csv.list_dialects():
+ module.fail_json(msg="Dialect '%s' is not supported by your version of python." % dialect)
+
+ dialect_options = dict(
+ delimiter=module.params['delimiter'],
+ skipinitialspace=module.params['skipinitialspace'],
+ strict=module.params['strict'],
+ )
+
+ # Create a dictionary from only set options
+ dialect_params = dict((k, v) for k, v in dialect_options.items() if v is not None)
+ if dialect_params:
+ try:
+ csv.register_dialect('custom', dialect, **dialect_params)
+ except TypeError as e:
+ module.fail_json(msg="Unable to create custom dialect: %s" % to_text(e))
+ dialect = 'custom'
+
+ try:
+ with open(path, 'rb') as f:
+ data = f.read()
+ except (IOError, OSError) as e:
+ module.fail_json(msg="Unable to open file: %s" % to_text(e))
+
+ if PY3:
+ # Manually decode on Python3 so that we can use the surrogateescape error handler
+ data = to_text(data, errors='surrogate_or_strict')
+ fake_fh = StringIO(data)
+ else:
+ fake_fh = BytesIO(data)
+
+ reader = csv.DictReader(fake_fh, fieldnames=fieldnames, dialect=dialect)
+
+ if key and key not in reader.fieldnames:
+ module.fail_json(msg="Key '%s' was not found in the CSV header fields: %s" % (key, ', '.join(reader.fieldnames)))
+
+ data_dict = dict()
+ data_list = list()
+
+ if key is None:
+ try:
+ for row in reader:
+ data_list.append(row)
+ except csv.Error as e:
+ module.fail_json(msg="Unable to process file: %s" % to_text(e))
+ else:
+ try:
+ for row in reader:
+ if unique and row[key] in data_dict:
+ module.fail_json(msg="Key '%s' is not unique for value '%s'" % (key, row[key]))
+ data_dict[row[key]] = row
+ except csv.Error as e:
+ module.fail_json(msg="Unable to process file: %s" % to_text(e))
+
+ module.exit_json(dict=data_dict, list=data_list)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/files/xattr.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/files/xattr.py
new file mode 100644
index 00000000..8b1449be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/files/xattr.py
@@ -0,0 +1,237 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: xattr
+short_description: Manage user defined extended attributes
+description:
+ - Manages filesystem user defined extended attributes.
+ - Requires that extended attributes are enabled on the target filesystem
+ and that the setfattr/getfattr utilities are present.
+options:
+ path:
+ description:
+ - The full path of the file/object to get the facts of.
+ - Before 2.3 this option was only usable as I(name).
+ type: path
+ required: true
+ aliases: [ name ]
+ namespace:
+ description:
+ - Namespace of the named name/key.
+ type: str
+ default: user
+ key:
+ description:
+ - The name of a specific Extended attribute key to set/retrieve.
+ type: str
+ value:
+ description:
+ - The value to set the named name/key to, it automatically sets the C(state) to 'set'.
+ type: str
+ state:
+ description:
+ - defines which state you want to do.
+ C(read) retrieves the current value for a C(key) (default)
+ C(present) sets C(name) to C(value), default if value is set
+ C(all) dumps all data
+ C(keys) retrieves all keys
+ C(absent) deletes the key
+ type: str
+ choices: [ absent, all, keys, present, read ]
+ default: read
+ follow:
+ description:
+ - If C(yes), dereferences symlinks and sets/gets attributes on symlink target,
+ otherwise acts on symlink itself.
+ type: bool
+ default: yes
+notes:
+ - As of Ansible 2.3, the I(name) option has been changed to I(path) as default, but I(name) still works as well.
+author:
+- Brian Coca (@bcoca)
+'''
+
+EXAMPLES = '''
+- name: Obtain the extended attributes of /etc/foo.conf
+ community.general.xattr:
+ path: /etc/foo.conf
+
+- name: Set the key 'user.foo' to value 'bar'
+ community.general.xattr:
+ path: /etc/foo.conf
+ key: foo
+ value: bar
+
+- name: Set the key 'trusted.glusterfs.volume-id' to value '0x817b94343f164f199e5b573b4ea1f914'
+ community.general.xattr:
+ path: /mnt/bricks/brick1
+ namespace: trusted
+ key: glusterfs.volume-id
+ value: "0x817b94343f164f199e5b573b4ea1f914"
+
+- name: Remove the key 'user.foo'
+ community.general.xattr:
+ path: /etc/foo.conf
+ key: foo
+ state: absent
+
+- name: Remove the key 'trusted.glusterfs.volume-id'
+ community.general.xattr:
+ path: /mnt/bricks/brick1
+ namespace: trusted
+ key: glusterfs.volume-id
+ state: absent
+'''
+
+import os
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def get_xattr_keys(module, path, follow):
+ cmd = [module.get_bin_path('getfattr', True)]
+ # prevents warning and not sure why it's not default
+ cmd.append('--absolute-names')
+ if not follow:
+ cmd.append('-h')
+ cmd.append(path)
+
+ return _run_xattr(module, cmd)
+
+
+def get_xattr(module, path, key, follow):
+
+ cmd = [module.get_bin_path('getfattr', True)]
+ # prevents warning and not sure why it's not default
+ cmd.append('--absolute-names')
+ if not follow:
+ cmd.append('-h')
+ if key is None:
+ cmd.append('-d')
+ else:
+ cmd.append('-n %s' % key)
+ cmd.append(path)
+
+ return _run_xattr(module, cmd, False)
+
+
+def set_xattr(module, path, key, value, follow):
+
+ cmd = [module.get_bin_path('setfattr', True)]
+ if not follow:
+ cmd.append('-h')
+ cmd.append('-n %s' % key)
+ cmd.append('-v %s' % value)
+ cmd.append(path)
+
+ return _run_xattr(module, cmd)
+
+
+def rm_xattr(module, path, key, follow):
+
+ cmd = [module.get_bin_path('setfattr', True)]
+ if not follow:
+ cmd.append('-h')
+ cmd.append('-x %s' % key)
+ cmd.append(path)
+
+ return _run_xattr(module, cmd, False)
+
+
+def _run_xattr(module, cmd, check_rc=True):
+
+ try:
+ (rc, out, err) = module.run_command(' '.join(cmd), check_rc=check_rc)
+ except Exception as e:
+ module.fail_json(msg="%s!" % to_native(e))
+
+ # result = {'raw': out}
+ result = {}
+ for line in out.splitlines():
+ if line.startswith('#') or line == '':
+ pass
+ elif '=' in line:
+ (key, val) = line.split('=')
+ result[key] = val.strip('"')
+ else:
+ result[line] = ''
+ return result
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path', required=True, aliases=['name']),
+ namespace=dict(type='str', default='user'),
+ key=dict(type='str'),
+ value=dict(type='str'),
+ state=dict(type='str', default='read', choices=['absent', 'all', 'keys', 'present', 'read']),
+ follow=dict(type='bool', default=True),
+ ),
+ supports_check_mode=True,
+ )
+ path = module.params.get('path')
+ namespace = module.params.get('namespace')
+ key = module.params.get('key')
+ value = module.params.get('value')
+ state = module.params.get('state')
+ follow = module.params.get('follow')
+
+ if not os.path.exists(path):
+ module.fail_json(msg="path not found or not accessible!")
+
+ changed = False
+ msg = ""
+ res = {}
+
+ if key is None and state in ['absent', 'present']:
+ module.fail_json(msg="%s needs a key parameter" % state)
+
+ # Prepend the key with the namespace if defined
+ if (
+ key is not None and
+ namespace is not None and
+ len(namespace) > 0 and
+ not (namespace == 'user' and key.startswith('user.'))):
+ key = '%s.%s' % (namespace, key)
+
+ if (state == 'present' or value is not None):
+ current = get_xattr(module, path, key, follow)
+ if current is None or key not in current or value != current[key]:
+ if not module.check_mode:
+ res = set_xattr(module, path, key, value, follow)
+ changed = True
+ res = current
+ msg = "%s set to %s" % (key, value)
+ elif state == 'absent':
+ current = get_xattr(module, path, key, follow)
+ if current is not None and key in current:
+ if not module.check_mode:
+ res = rm_xattr(module, path, key, follow)
+ changed = True
+ res = current
+ msg = "%s removed" % (key)
+ elif state == 'keys':
+ res = get_xattr_keys(module, path, follow)
+ msg = "returning all keys"
+ elif state == 'all':
+ res = get_xattr(module, path, None, follow)
+ msg = "dumping all"
+ else:
+ res = get_xattr(module, path, key, follow)
+ msg = "returning %s" % key
+
+ module.exit_json(changed=changed, msg=msg, xattr=res)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/files/xml.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/files/xml.py
new file mode 100644
index 00000000..1733e657
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/files/xml.py
@@ -0,0 +1,958 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Red Hat, Inc.
+# Copyright: (c) 2014, Tim Bielawa <tbielawa@redhat.com>
+# Copyright: (c) 2014, Magnus Hedemark <mhedemar@redhat.com>
+# Copyright: (c) 2017, Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: xml
+short_description: Manage bits and pieces of XML files or strings
+description:
+- A CRUD-like interface to managing bits of XML files.
+options:
+ path:
+ description:
+ - Path to the file to operate on.
+ - This file must exist ahead of time.
+ - This parameter is required, unless C(xmlstring) is given.
+ type: path
+ aliases: [ dest, file ]
+ xmlstring:
+ description:
+ - A string containing XML on which to operate.
+ - This parameter is required, unless C(path) is given.
+ type: str
+ xpath:
+ description:
+ - A valid XPath expression describing the item(s) you want to manipulate.
+ - Operates on the document root, C(/), by default.
+ type: str
+ namespaces:
+ description:
+ - The namespace C(prefix:uri) mapping for the XPath expression.
+ - Needs to be a C(dict), not a C(list) of items.
+ type: dict
+ state:
+ description:
+ - Set or remove an xpath selection (node(s), attribute(s)).
+ type: str
+ choices: [ absent, present ]
+ default: present
+ aliases: [ ensure ]
+ attribute:
+ description:
+ - The attribute to select when using parameter C(value).
+ - This is a string, not prepended with C(@).
+ type: raw
+ value:
+ description:
+ - Desired state of the selected attribute.
+ - Either a string, or to unset a value, the Python C(None) keyword (YAML Equivalent, C(null)).
+ - Elements default to no value (but present).
+ - Attributes default to an empty string.
+ type: raw
+ add_children:
+ description:
+ - Add additional child-element(s) to a selected element for a given C(xpath).
+ - Child elements must be given in a list and each item may be either a string
+ (eg. C(children=ansible) to add an empty C(<ansible/>) child element),
+ or a hash where the key is an element name and the value is the element value.
+ - This parameter requires C(xpath) to be set.
+ type: list
+ set_children:
+ description:
+ - Set the child-element(s) of a selected element for a given C(xpath).
+ - Removes any existing children.
+ - Child elements must be specified as in C(add_children).
+ - This parameter requires C(xpath) to be set.
+ type: list
+ count:
+ description:
+ - Search for a given C(xpath) and provide the count of any matches.
+ - This parameter requires C(xpath) to be set.
+ type: bool
+ default: no
+ print_match:
+ description:
+ - Search for a given C(xpath) and print out any matches.
+ - This parameter requires C(xpath) to be set.
+ type: bool
+ default: no
+ pretty_print:
+ description:
+ - Pretty print XML output.
+ type: bool
+ default: no
+ content:
+ description:
+ - Search for a given C(xpath) and get content.
+ - This parameter requires C(xpath) to be set.
+ type: str
+ choices: [ attribute, text ]
+ input_type:
+ description:
+ - Type of input for C(add_children) and C(set_children).
+ type: str
+ choices: [ xml, yaml ]
+ default: yaml
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can get
+ the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: no
+ strip_cdata_tags:
+ description:
+ - Remove CDATA tags surrounding text values.
+ - Note that this might break your XML file if text values contain characters that could be interpreted as XML.
+ type: bool
+ default: no
+ insertbefore:
+ description:
+ - Add additional child-element(s) before the first selected element for a given C(xpath).
+ - Child elements must be given in a list and each item may be either a string
+ (eg. C(children=ansible) to add an empty C(<ansible/>) child element),
+ or a hash where the key is an element name and the value is the element value.
+ - This parameter requires C(xpath) to be set.
+ type: bool
+ default: no
+ insertafter:
+ description:
+ - Add additional child-element(s) after the last selected element for a given C(xpath).
+ - Child elements must be given in a list and each item may be either a string
+ (eg. C(children=ansible) to add an empty C(<ansible/>) child element),
+ or a hash where the key is an element name and the value is the element value.
+ - This parameter requires C(xpath) to be set.
+ type: bool
+ default: no
+requirements:
+- lxml >= 2.3.0
+notes:
+- Use the C(--check) and C(--diff) options when testing your expressions.
+- The diff output is automatically pretty-printed, so may not reflect the actual file content, only the file structure.
+- This module does not handle complicated xpath expressions, so limit xpath selectors to simple expressions.
+- Beware that in case your XML elements are namespaced, you need to use the C(namespaces) parameter, see the examples.
+- Namespaces prefix should be used for all children of an element where namespace is defined, unless another namespace is defined for them.
+seealso:
+- name: Xml module development community wiki
+ description: More information related to the development of this xml module.
+ link: https://github.com/ansible/community/wiki/Module:-xml
+- name: Introduction to XPath
+ description: A brief tutorial on XPath (w3schools.com).
+ link: https://www.w3schools.com/xml/xpath_intro.asp
+- name: XPath Reference document
+ description: The reference documentation on XSLT/XPath (developer.mozilla.org).
+ link: https://developer.mozilla.org/en-US/docs/Web/XPath
+author:
+- Tim Bielawa (@tbielawa)
+- Magnus Hedemark (@magnus919)
+- Dag Wieers (@dagwieers)
+'''
+
+EXAMPLES = r'''
+# Consider the following XML file:
+#
+# <business type="bar">
+# <name>Tasty Beverage Co.</name>
+# <beers>
+# <beer>Rochefort 10</beer>
+# <beer>St. Bernardus Abbot 12</beer>
+# <beer>Schlitz</beer>
+# </beers>
+# <rating subjective="true">10</rating>
+# <website>
+# <mobilefriendly/>
+# <address>http://tastybeverageco.com</address>
+# </website>
+# </business>
+
+- name: Remove the 'subjective' attribute of the 'rating' element
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/rating/@subjective
+ state: absent
+
+- name: Set the rating to '11'
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/rating
+ value: 11
+
+# Retrieve and display the number of nodes
+- name: Get count of 'beers' nodes
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/beers/beer
+ count: yes
+ register: hits
+
+- ansible.builtin.debug:
+ var: hits.count
+
+# Example where parent XML nodes are created automatically
+- name: Add a 'phonenumber' element to the 'business' element
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/phonenumber
+ value: 555-555-1234
+
+- name: Add several more beers to the 'beers' element
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/beers
+ add_children:
+ - beer: Old Rasputin
+ - beer: Old Motor Oil
+ - beer: Old Curmudgeon
+
+- name: Add several more beers to the 'beers' element and add them before the 'Rochefort 10' element
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: '/business/beers/beer[text()="Rochefort 10"]'
+ insertbefore: yes
+ add_children:
+ - beer: Old Rasputin
+ - beer: Old Motor Oil
+ - beer: Old Curmudgeon
+
+# NOTE: The 'state' defaults to 'present' and 'value' defaults to 'null' for elements
+- name: Add a 'validxhtml' element to the 'website' element
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/website/validxhtml
+
+- name: Add an empty 'validatedon' attribute to the 'validxhtml' element
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/website/validxhtml/@validatedon
+
+- name: Add or modify an attribute, add element if needed
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/website/validxhtml
+ attribute: validatedon
+ value: 1976-08-05
+
+# How to read an attribute value and access it in Ansible
+- name: Read an element's attribute values
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/website/validxhtml
+ content: attribute
+ register: xmlresp
+
+- name: Show an attribute value
+ ansible.builtin.debug:
+ var: xmlresp.matches[0].validxhtml.validatedon
+
+- name: Remove all children from the 'website' element (option 1)
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/website/*
+ state: absent
+
+- name: Remove all children from the 'website' element (option 2)
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/website
+ children: []
+
+# In case of namespaces, like in below XML, they have to be explicitly stated.
+#
+# <foo xmlns="http://x.test" xmlns:attr="http://z.test">
+# <bar>
+# <baz xmlns="http://y.test" attr:my_namespaced_attribute="true" />
+# </bar>
+# </foo>
+
+# NOTE: There is the prefix 'x' in front of the 'bar' element, too.
+- name: Set namespaced '/x:foo/x:bar/y:baz/@z:my_namespaced_attribute' to 'false'
+ community.general.xml:
+ path: foo.xml
+ xpath: /x:foo/x:bar/y:baz
+ namespaces:
+ x: http://x.test
+ y: http://y.test
+ z: http://z.test
+ attribute: z:my_namespaced_attribute
+ value: 'false'
+'''
+
+RETURN = r'''
+actions:
+ description: A dictionary with the original xpath, namespaces and state.
+ type: dict
+ returned: success
+ sample: {xpath: xpath, namespaces: [namespace1, namespace2], state=present}
+backup_file:
+ description: The name of the backup file that was created
+ type: str
+ returned: when backup=yes
+ sample: /path/to/file.xml.1942.2017-08-24@14:16:01~
+count:
+ description: The count of xpath matches.
+ type: int
+ returned: when parameter 'count' is set
+ sample: 2
+matches:
+ description: The xpath matches found.
+ type: list
+ returned: when parameter 'print_match' is set
+msg:
+ description: A message related to the performed action(s).
+ type: str
+ returned: always
+xmlstring:
+ description: An XML string of the resulting output.
+ type: str
+ returned: when parameter 'xmlstring' is set
+'''
+
+import copy
+import json
+import os
+import re
+import traceback
+
+from distutils.version import LooseVersion
+from io import BytesIO
+
+LXML_IMP_ERR = None
+try:
+ from lxml import etree, objectify
+ HAS_LXML = True
+except ImportError:
+ LXML_IMP_ERR = traceback.format_exc()
+ HAS_LXML = False
+
+from ansible.module_utils.basic import AnsibleModule, json_dict_bytes_to_unicode, missing_required_lib
+from ansible.module_utils.six import iteritems, string_types
+from ansible.module_utils._text import to_bytes, to_native
+from ansible.module_utils.common._collections_compat import MutableMapping
+
+_IDENT = r"[a-zA-Z-][a-zA-Z0-9_\-\.]*"
+_NSIDENT = _IDENT + "|" + _IDENT + ":" + _IDENT
+# Note: we can't reasonably support the 'if you need to put both ' and " in a string, concatenate
+# strings wrapped by the other delimiter' XPath trick, especially as simple XPath.
+_XPSTR = "('(?:.*)'|\"(?:.*)\")"
+
+_RE_SPLITSIMPLELAST = re.compile("^(.*)/(" + _NSIDENT + ")$")
+_RE_SPLITSIMPLELASTEQVALUE = re.compile("^(.*)/(" + _NSIDENT + ")/text\\(\\)=" + _XPSTR + "$")
+_RE_SPLITSIMPLEATTRLAST = re.compile("^(.*)/(@(?:" + _NSIDENT + "))$")
+_RE_SPLITSIMPLEATTRLASTEQVALUE = re.compile("^(.*)/(@(?:" + _NSIDENT + "))=" + _XPSTR + "$")
+_RE_SPLITSUBLAST = re.compile("^(.*)/(" + _NSIDENT + ")\\[(.*)\\]$")
+_RE_SPLITONLYEQVALUE = re.compile("^(.*)/text\\(\\)=" + _XPSTR + "$")
+
+
+def has_changed(doc):
+ orig_obj = etree.tostring(objectify.fromstring(etree.tostring(orig_doc)))
+ obj = etree.tostring(objectify.fromstring(etree.tostring(doc)))
+ return (orig_obj != obj)
+
+
+def do_print_match(module, tree, xpath, namespaces):
+ match = tree.xpath(xpath, namespaces=namespaces)
+ match_xpaths = []
+ for m in match:
+ match_xpaths.append(tree.getpath(m))
+ match_str = json.dumps(match_xpaths)
+ msg = "selector '%s' match: %s" % (xpath, match_str)
+ finish(module, tree, xpath, namespaces, changed=False, msg=msg)
+
+
+def count_nodes(module, tree, xpath, namespaces):
+ """ Return the count of nodes matching the xpath """
+ hits = tree.xpath("count(/%s)" % xpath, namespaces=namespaces)
+ msg = "found %d nodes" % hits
+ finish(module, tree, xpath, namespaces, changed=False, msg=msg, hitcount=int(hits))
+
+
+def is_node(tree, xpath, namespaces):
+ """ Test if a given xpath matches anything and if that match is a node.
+
+ For now we just assume you're only searching for one specific thing."""
+ if xpath_matches(tree, xpath, namespaces):
+ # OK, it found something
+ match = tree.xpath(xpath, namespaces=namespaces)
+ if isinstance(match[0], etree._Element):
+ return True
+
+ return False
+
+
+def is_attribute(tree, xpath, namespaces):
+ """ Test if a given xpath matches and that match is an attribute
+
+ An xpath attribute search will only match one item"""
+ if xpath_matches(tree, xpath, namespaces):
+ match = tree.xpath(xpath, namespaces=namespaces)
+ if isinstance(match[0], etree._ElementStringResult):
+ return True
+ elif isinstance(match[0], etree._ElementUnicodeResult):
+ return True
+ return False
+
+
+def xpath_matches(tree, xpath, namespaces):
+ """ Test if a node exists """
+ if tree.xpath(xpath, namespaces=namespaces):
+ return True
+ return False
+
+
+def delete_xpath_target(module, tree, xpath, namespaces):
+ """ Delete an attribute or element from a tree """
+ changed = False
+ try:
+ for result in tree.xpath(xpath, namespaces=namespaces):
+ changed = True
+ # Get the xpath for this result
+ if is_attribute(tree, xpath, namespaces):
+ # Delete an attribute
+ parent = result.getparent()
+ # Pop this attribute match out of the parent
+ # node's 'attrib' dict by using this match's
+ # 'attrname' attribute for the key
+ parent.attrib.pop(result.attrname)
+ elif is_node(tree, xpath, namespaces):
+ # Delete an element
+ result.getparent().remove(result)
+ else:
+ raise Exception("Impossible error")
+ except Exception as e:
+ module.fail_json(msg="Couldn't delete xpath target: %s (%s)" % (xpath, e))
+ else:
+ finish(module, tree, xpath, namespaces, changed=changed)
+
+
+def replace_children_of(children, match):
+ for element in list(match):
+ match.remove(element)
+ match.extend(children)
+
+
+def set_target_children_inner(module, tree, xpath, namespaces, children, in_type):
+ matches = tree.xpath(xpath, namespaces=namespaces)
+
+ # Create a list of our new children
+ children = children_to_nodes(module, children, in_type)
+ children_as_string = [etree.tostring(c) for c in children]
+
+ changed = False
+
+ # xpaths always return matches as a list, so....
+ for match in matches:
+ # Check if elements differ
+ if len(list(match)) == len(children):
+ for idx, element in enumerate(list(match)):
+ if etree.tostring(element) != children_as_string[idx]:
+ replace_children_of(children, match)
+ changed = True
+ break
+ else:
+ replace_children_of(children, match)
+ changed = True
+
+ return changed
+
+
+def set_target_children(module, tree, xpath, namespaces, children, in_type):
+ changed = set_target_children_inner(module, tree, xpath, namespaces, children, in_type)
+ # Write it out
+ finish(module, tree, xpath, namespaces, changed=changed)
+
+
+def add_target_children(module, tree, xpath, namespaces, children, in_type, insertbefore, insertafter):
+ if is_node(tree, xpath, namespaces):
+ new_kids = children_to_nodes(module, children, in_type)
+ if insertbefore or insertafter:
+ insert_target_children(tree, xpath, namespaces, new_kids, insertbefore, insertafter)
+ else:
+ for node in tree.xpath(xpath, namespaces=namespaces):
+ node.extend(new_kids)
+ finish(module, tree, xpath, namespaces, changed=True)
+ else:
+ finish(module, tree, xpath, namespaces)
+
+
+def insert_target_children(tree, xpath, namespaces, children, insertbefore, insertafter):
+ """
+ Insert the given children before or after the given xpath. If insertbefore is True, it is inserted before the
+ first xpath hit, with insertafter, it is inserted after the last xpath hit.
+ """
+ insert_target = tree.xpath(xpath, namespaces=namespaces)
+ loc_index = 0 if insertbefore else -1
+ index_in_parent = insert_target[loc_index].getparent().index(insert_target[loc_index])
+ parent = insert_target[0].getparent()
+ if insertafter:
+ index_in_parent += 1
+ for child in children:
+ parent.insert(index_in_parent, child)
+ index_in_parent += 1
+
+
+def _extract_xpstr(g):
+ return g[1:-1]
+
+
+def split_xpath_last(xpath):
+ """split an XPath of the form /foo/bar/baz into /foo/bar and baz"""
+ xpath = xpath.strip()
+ m = _RE_SPLITSIMPLELAST.match(xpath)
+ if m:
+ # requesting an element to exist
+ return (m.group(1), [(m.group(2), None)])
+ m = _RE_SPLITSIMPLELASTEQVALUE.match(xpath)
+ if m:
+ # requesting an element to exist with an inner text
+ return (m.group(1), [(m.group(2), _extract_xpstr(m.group(3)))])
+
+ m = _RE_SPLITSIMPLEATTRLAST.match(xpath)
+ if m:
+ # requesting an attribute to exist
+ return (m.group(1), [(m.group(2), None)])
+ m = _RE_SPLITSIMPLEATTRLASTEQVALUE.match(xpath)
+ if m:
+ # requesting an attribute to exist with a value
+ return (m.group(1), [(m.group(2), _extract_xpstr(m.group(3)))])
+
+ m = _RE_SPLITSUBLAST.match(xpath)
+ if m:
+ content = [x.strip() for x in m.group(3).split(" and ")]
+ return (m.group(1), [('/' + m.group(2), content)])
+
+ m = _RE_SPLITONLYEQVALUE.match(xpath)
+ if m:
+ # requesting a change of inner text
+ return (m.group(1), [("", _extract_xpstr(m.group(2)))])
+ return (xpath, [])
+
+
+def nsnameToClark(name, namespaces):
+ if ":" in name:
+ (nsname, rawname) = name.split(":")
+ # return "{{%s}}%s" % (namespaces[nsname], rawname)
+ return "{{{0}}}{1}".format(namespaces[nsname], rawname)
+
+ # no namespace name here
+ return name
+
+
+def check_or_make_target(module, tree, xpath, namespaces):
+ (inner_xpath, changes) = split_xpath_last(xpath)
+ if (inner_xpath == xpath) or (changes is None):
+ module.fail_json(msg="Can't process Xpath %s in order to spawn nodes! tree is %s" %
+ (xpath, etree.tostring(tree, pretty_print=True)))
+ return False
+
+ changed = False
+
+ if not is_node(tree, inner_xpath, namespaces):
+ changed = check_or_make_target(module, tree, inner_xpath, namespaces)
+
+ # we test again after calling check_or_make_target
+ if is_node(tree, inner_xpath, namespaces) and changes:
+ for (eoa, eoa_value) in changes:
+ if eoa and eoa[0] != '@' and eoa[0] != '/':
+ # implicitly creating an element
+ new_kids = children_to_nodes(module, [nsnameToClark(eoa, namespaces)], "yaml")
+ if eoa_value:
+ for nk in new_kids:
+ nk.text = eoa_value
+
+ for node in tree.xpath(inner_xpath, namespaces=namespaces):
+ node.extend(new_kids)
+ changed = True
+ # module.fail_json(msg="now tree=%s" % etree.tostring(tree, pretty_print=True))
+ elif eoa and eoa[0] == '/':
+ element = eoa[1:]
+ new_kids = children_to_nodes(module, [nsnameToClark(element, namespaces)], "yaml")
+ for node in tree.xpath(inner_xpath, namespaces=namespaces):
+ node.extend(new_kids)
+ for nk in new_kids:
+ for subexpr in eoa_value:
+ # module.fail_json(msg="element=%s subexpr=%s node=%s now tree=%s" %
+ # (element, subexpr, etree.tostring(node, pretty_print=True), etree.tostring(tree, pretty_print=True))
+ check_or_make_target(module, nk, "./" + subexpr, namespaces)
+ changed = True
+
+ # module.fail_json(msg="now tree=%s" % etree.tostring(tree, pretty_print=True))
+ elif eoa == "":
+ for node in tree.xpath(inner_xpath, namespaces=namespaces):
+ if (node.text != eoa_value):
+ node.text = eoa_value
+ changed = True
+
+ elif eoa and eoa[0] == '@':
+ attribute = nsnameToClark(eoa[1:], namespaces)
+
+ for element in tree.xpath(inner_xpath, namespaces=namespaces):
+ changing = (attribute not in element.attrib or element.attrib[attribute] != eoa_value)
+
+ if changing:
+ changed = changed or changing
+ if eoa_value is None:
+ value = ""
+ else:
+ value = eoa_value
+ element.attrib[attribute] = value
+
+ # module.fail_json(msg="arf %s changing=%s as curval=%s changed tree=%s" %
+ # (xpath, changing, etree.tostring(tree, changing, element[attribute], pretty_print=True)))
+
+ else:
+ module.fail_json(msg="unknown tree transformation=%s" % etree.tostring(tree, pretty_print=True))
+
+ return changed
+
+
+def ensure_xpath_exists(module, tree, xpath, namespaces):
+ changed = False
+
+ if not is_node(tree, xpath, namespaces):
+ changed = check_or_make_target(module, tree, xpath, namespaces)
+
+ finish(module, tree, xpath, namespaces, changed)
+
+
+def set_target_inner(module, tree, xpath, namespaces, attribute, value):
+ changed = False
+
+ try:
+ if not is_node(tree, xpath, namespaces):
+ changed = check_or_make_target(module, tree, xpath, namespaces)
+ except Exception as e:
+ missing_namespace = ""
+ # NOTE: This checks only the namespaces defined in root element!
+ # TODO: Implement a more robust check to check for child namespaces' existence
+ if tree.getroot().nsmap and ":" not in xpath:
+ missing_namespace = "XML document has namespace(s) defined, but no namespace prefix(es) used in xpath!\n"
+ module.fail_json(msg="%sXpath %s causes a failure: %s\n -- tree is %s" %
+ (missing_namespace, xpath, e, etree.tostring(tree, pretty_print=True)), exception=traceback.format_exc())
+
+ if not is_node(tree, xpath, namespaces):
+ module.fail_json(msg="Xpath %s does not reference a node! tree is %s" %
+ (xpath, etree.tostring(tree, pretty_print=True)))
+
+ for element in tree.xpath(xpath, namespaces=namespaces):
+ if not attribute:
+ changed = changed or (element.text != value)
+ if element.text != value:
+ element.text = value
+ else:
+ changed = changed or (element.get(attribute) != value)
+ if ":" in attribute:
+ attr_ns, attr_name = attribute.split(":")
+ # attribute = "{{%s}}%s" % (namespaces[attr_ns], attr_name)
+ attribute = "{{{0}}}{1}".format(namespaces[attr_ns], attr_name)
+ if element.get(attribute) != value:
+ element.set(attribute, value)
+
+ return changed
+
+
+def set_target(module, tree, xpath, namespaces, attribute, value):
+ changed = set_target_inner(module, tree, xpath, namespaces, attribute, value)
+ finish(module, tree, xpath, namespaces, changed)
+
+
+def get_element_text(module, tree, xpath, namespaces):
+ if not is_node(tree, xpath, namespaces):
+ module.fail_json(msg="Xpath %s does not reference a node!" % xpath)
+
+ elements = []
+ for element in tree.xpath(xpath, namespaces=namespaces):
+ elements.append({element.tag: element.text})
+
+ finish(module, tree, xpath, namespaces, changed=False, msg=len(elements), hitcount=len(elements), matches=elements)
+
+
+def get_element_attr(module, tree, xpath, namespaces):
+ if not is_node(tree, xpath, namespaces):
+ module.fail_json(msg="Xpath %s does not reference a node!" % xpath)
+
+ elements = []
+ for element in tree.xpath(xpath, namespaces=namespaces):
+ child = {}
+ for key in element.keys():
+ value = element.get(key)
+ child.update({key: value})
+ elements.append({element.tag: child})
+
+ finish(module, tree, xpath, namespaces, changed=False, msg=len(elements), hitcount=len(elements), matches=elements)
+
+
+def child_to_element(module, child, in_type):
+ if in_type == 'xml':
+ infile = BytesIO(to_bytes(child, errors='surrogate_or_strict'))
+
+ try:
+ parser = etree.XMLParser()
+ node = etree.parse(infile, parser)
+ return node.getroot()
+ except etree.XMLSyntaxError as e:
+ module.fail_json(msg="Error while parsing child element: %s" % e)
+ elif in_type == 'yaml':
+ if isinstance(child, string_types):
+ return etree.Element(child)
+ elif isinstance(child, MutableMapping):
+ if len(child) > 1:
+ module.fail_json(msg="Can only create children from hashes with one key")
+
+ (key, value) = next(iteritems(child))
+ if isinstance(value, MutableMapping):
+ children = value.pop('_', None)
+
+ node = etree.Element(key, value)
+
+ if children is not None:
+ if not isinstance(children, list):
+ module.fail_json(msg="Invalid children type: %s, must be list." % type(children))
+
+ subnodes = children_to_nodes(module, children)
+ node.extend(subnodes)
+ else:
+ node = etree.Element(key)
+ node.text = value
+ return node
+ else:
+ module.fail_json(msg="Invalid child type: %s. Children must be either strings or hashes." % type(child))
+ else:
+ module.fail_json(msg="Invalid child input type: %s. Type must be either xml or yaml." % in_type)
+
+
+def children_to_nodes(module=None, children=None, type='yaml'):
+ """turn a str/hash/list of str&hash into a list of elements"""
+ children = [] if children is None else children
+
+ return [child_to_element(module, child, type) for child in children]
+
+
+def make_pretty(module, tree):
+ xml_string = etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print'])
+
+ result = dict(
+ changed=False,
+ )
+
+ if module.params['path']:
+ xml_file = module.params['path']
+ with open(xml_file, 'rb') as xml_content:
+ if xml_string != xml_content.read():
+ result['changed'] = True
+ if not module.check_mode:
+ if module.params['backup']:
+ result['backup_file'] = module.backup_local(module.params['path'])
+ tree.write(xml_file, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print'])
+
+ elif module.params['xmlstring']:
+ result['xmlstring'] = xml_string
+ # NOTE: Modifying a string is not considered a change !
+ if xml_string != module.params['xmlstring']:
+ result['changed'] = True
+
+ module.exit_json(**result)
+
+
+def finish(module, tree, xpath, namespaces, changed=False, msg='', hitcount=0, matches=tuple()):
+
+ result = dict(
+ actions=dict(
+ xpath=xpath,
+ namespaces=namespaces,
+ state=module.params['state']
+ ),
+ changed=has_changed(tree),
+ )
+
+ if module.params['count'] or hitcount:
+ result['count'] = hitcount
+
+ if module.params['print_match'] or matches:
+ result['matches'] = matches
+
+ if msg:
+ result['msg'] = msg
+
+ if result['changed']:
+ if module._diff:
+ result['diff'] = dict(
+ before=etree.tostring(orig_doc, xml_declaration=True, encoding='UTF-8', pretty_print=True),
+ after=etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=True),
+ )
+
+ if module.params['path'] and not module.check_mode:
+ if module.params['backup']:
+ result['backup_file'] = module.backup_local(module.params['path'])
+ tree.write(module.params['path'], xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print'])
+
+ if module.params['xmlstring']:
+ result['xmlstring'] = etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print'])
+
+ module.exit_json(**result)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path', aliases=['dest', 'file']),
+ xmlstring=dict(type='str'),
+ xpath=dict(type='str'),
+ namespaces=dict(type='dict', default={}),
+ state=dict(type='str', default='present', choices=['absent', 'present'], aliases=['ensure']),
+ value=dict(type='raw'),
+ attribute=dict(type='raw'),
+ add_children=dict(type='list'),
+ set_children=dict(type='list'),
+ count=dict(type='bool', default=False),
+ print_match=dict(type='bool', default=False),
+ pretty_print=dict(type='bool', default=False),
+ content=dict(type='str', choices=['attribute', 'text']),
+ input_type=dict(type='str', default='yaml', choices=['xml', 'yaml']),
+ backup=dict(type='bool', default=False),
+ strip_cdata_tags=dict(type='bool', default=False),
+ insertbefore=dict(type='bool', default=False),
+ insertafter=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ required_by=dict(
+ add_children=['xpath'],
+ # TODO: Reinstate this in community.general 2.0.0 when we have deprecated the incorrect use below
+ # attribute=['value'],
+ content=['xpath'],
+ set_children=['xpath'],
+ value=['xpath'],
+ ),
+ required_if=[
+ ['count', True, ['xpath']],
+ ['print_match', True, ['xpath']],
+ ['insertbefore', True, ['xpath']],
+ ['insertafter', True, ['xpath']],
+ ],
+ required_one_of=[
+ ['path', 'xmlstring'],
+ ['add_children', 'content', 'count', 'pretty_print', 'print_match', 'set_children', 'value'],
+ ],
+ mutually_exclusive=[
+ ['add_children', 'content', 'count', 'print_match', 'set_children', 'value'],
+ ['path', 'xmlstring'],
+ ['insertbefore', 'insertafter'],
+ ],
+ )
+
+ xml_file = module.params['path']
+ xml_string = module.params['xmlstring']
+ xpath = module.params['xpath']
+ namespaces = module.params['namespaces']
+ state = module.params['state']
+ value = json_dict_bytes_to_unicode(module.params['value'])
+ attribute = module.params['attribute']
+ set_children = json_dict_bytes_to_unicode(module.params['set_children'])
+ add_children = json_dict_bytes_to_unicode(module.params['add_children'])
+ pretty_print = module.params['pretty_print']
+ content = module.params['content']
+ input_type = module.params['input_type']
+ print_match = module.params['print_match']
+ count = module.params['count']
+ backup = module.params['backup']
+ strip_cdata_tags = module.params['strip_cdata_tags']
+ insertbefore = module.params['insertbefore']
+ insertafter = module.params['insertafter']
+
+ # Check if we have lxml 2.3.0 or newer installed
+ if not HAS_LXML:
+ module.fail_json(msg=missing_required_lib("lxml"), exception=LXML_IMP_ERR)
+ elif LooseVersion('.'.join(to_native(f) for f in etree.LXML_VERSION)) < LooseVersion('2.3.0'):
+ module.fail_json(msg='The xml ansible module requires lxml 2.3.0 or newer installed on the managed machine')
+ elif LooseVersion('.'.join(to_native(f) for f in etree.LXML_VERSION)) < LooseVersion('3.0.0'):
+ module.warn('Using lxml version lower than 3.0.0 does not guarantee predictable element attribute order.')
+
+ # Report wrongly used attribute parameter when using content=attribute
+ # TODO: Remove this in community.general 2.0.0 (and reinstate strict parameter test above) and remove the integration test example
+ if content == 'attribute' and attribute is not None:
+ module.deprecate("Parameter 'attribute=%s' is ignored when using 'content=attribute' only 'xpath' is used. Please remove entry." % attribute,
+ version='2.0.0', collection_name='community.general') # was Ansible 2.12
+
+ # Check if the file exists
+ if xml_string:
+ infile = BytesIO(to_bytes(xml_string, errors='surrogate_or_strict'))
+ elif os.path.isfile(xml_file):
+ infile = open(xml_file, 'rb')
+ else:
+ module.fail_json(msg="The target XML source '%s' does not exist." % xml_file)
+
+ # Parse and evaluate xpath expression
+ if xpath is not None:
+ try:
+ etree.XPath(xpath)
+ except etree.XPathSyntaxError as e:
+ module.fail_json(msg="Syntax error in xpath expression: %s (%s)" % (xpath, e))
+ except etree.XPathEvalError as e:
+ module.fail_json(msg="Evaluation error in xpath expression: %s (%s)" % (xpath, e))
+
+ # Try to parse in the target XML file
+ try:
+ parser = etree.XMLParser(remove_blank_text=pretty_print, strip_cdata=strip_cdata_tags)
+ doc = etree.parse(infile, parser)
+ except etree.XMLSyntaxError as e:
+ module.fail_json(msg="Error while parsing document: %s (%s)" % (xml_file or 'xml_string', e))
+
+ # Ensure we have the original copy to compare
+ global orig_doc
+ orig_doc = copy.deepcopy(doc)
+
+ if print_match:
+ do_print_match(module, doc, xpath, namespaces)
+
+ if count:
+ count_nodes(module, doc, xpath, namespaces)
+
+ if content == 'attribute':
+ get_element_attr(module, doc, xpath, namespaces)
+ elif content == 'text':
+ get_element_text(module, doc, xpath, namespaces)
+
+ # File exists:
+ if state == 'absent':
+ # - absent: delete xpath target
+ delete_xpath_target(module, doc, xpath, namespaces)
+
+ # - present: carry on
+
+ # children && value both set?: should have already aborted by now
+ # add_children && set_children both set?: should have already aborted by now
+
+ # set_children set?
+ if set_children:
+ set_target_children(module, doc, xpath, namespaces, set_children, input_type)
+
+ # add_children set?
+ if add_children:
+ add_target_children(module, doc, xpath, namespaces, add_children, input_type, insertbefore, insertafter)
+
+ # No?: Carry on
+
+ # Is the xpath target an attribute selector?
+ if value is not None:
+ set_target(module, doc, xpath, namespaces, attribute, value)
+
+ # If an xpath was provided, we need to do something with the data
+ if xpath is not None:
+ ensure_xpath_exists(module, doc, xpath, namespaces)
+
+ # Otherwise only reformat the xml data?
+ if pretty_print:
+ make_pretty(module, doc)
+
+ module.fail_json(msg="Don't know what to do")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/filesystem.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/filesystem.py
new file mode 100644
index 00000000..e78eec4e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/filesystem.py
@@ -0,0 +1,496 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Alexander Bulimov <lazywolf0@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+author:
+- Alexander Bulimov (@abulimov)
+module: filesystem
+short_description: Makes a filesystem
+description:
+ - This module creates a filesystem.
+options:
+ state:
+ description:
+ - If C(state=present), the filesystem is created if it doesn't already
+ exist, that is the default behaviour if I(state) is omitted.
+ - If C(state=absent), filesystem signatures on I(dev) are wiped if it
+ contains a filesystem (as known by C(blkid)).
+ - When C(state=absent), all other options but I(dev) are ignored, and the
+ module doesn't fail if the device I(dev) doesn't actually exist.
+ - C(state=absent) is not supported and will fail on FreeBSD systems.
+ type: str
+ choices: [ present, absent ]
+ default: present
+ version_added: 1.3.0
+ fstype:
+ choices: [ btrfs, ext2, ext3, ext4, ext4dev, f2fs, lvm, ocfs2, reiserfs, xfs, vfat, swap ]
+ description:
+ - Filesystem type to be created. This option is required with
+ C(state=present) (or if I(state) is omitted).
+ - reiserfs support was added in 2.2.
+ - lvm support was added in 2.5.
+ - since 2.5, I(dev) can be an image file.
+ - vfat support was added in 2.5
+ - ocfs2 support was added in 2.6
+ - f2fs support was added in 2.7
+ - swap support was added in 2.8
+ type: str
+ aliases: [type]
+ dev:
+ description:
+ - Target path to device or image file.
+ type: path
+ required: yes
+ aliases: [device]
+ force:
+ description:
+ - If C(yes), allows to create new filesystem on devices that already has filesystem.
+ type: bool
+ default: 'no'
+ resizefs:
+ description:
+ - If C(yes), if the block device and filesystem size differ, grow the filesystem into the space.
+ - Supported for C(ext2), C(ext3), C(ext4), C(ext4dev), C(f2fs), C(lvm), C(xfs), C(vfat), C(swap) filesystems.
+ - XFS Will only grow if mounted. Currently, the module is based on commands
+ from C(util-linux) package to perform operations, so resizing of XFS is
+ not supported on FreeBSD systems.
+ - vFAT will likely fail if fatresize < 1.04.
+ type: bool
+ default: 'no'
+ opts:
+ description:
+ - List of options to be passed to mkfs command.
+ type: str
+requirements:
+ - Uses tools related to the I(fstype) (C(mkfs)) and C(blkid) command. When I(resizefs) is enabled, C(blockdev) command is required too.
+notes:
+ - Potential filesystem on I(dev) are checked using C(blkid), in case C(blkid) isn't able to detect an existing filesystem,
+ this filesystem is overwritten even if I(force) is C(no).
+ - This module supports I(check_mode).
+'''
+
+EXAMPLES = '''
+- name: Create a ext2 filesystem on /dev/sdb1
+ community.general.filesystem:
+ fstype: ext2
+ dev: /dev/sdb1
+
+- name: Create a ext4 filesystem on /dev/sdb1 and check disk blocks
+ community.general.filesystem:
+ fstype: ext4
+ dev: /dev/sdb1
+ opts: -cc
+
+- name: Blank filesystem signature on /dev/sdb1
+ community.general.filesystem:
+ dev: /dev/sdb1
+ state: absent
+'''
+
+from distutils.version import LooseVersion
+import os
+import platform
+import re
+import stat
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Device(object):
+ def __init__(self, module, path):
+ self.module = module
+ self.path = path
+
+ def size(self):
+ """ Return size in bytes of device. Returns int """
+ statinfo = os.stat(self.path)
+ if stat.S_ISBLK(statinfo.st_mode):
+ blockdev_cmd = self.module.get_bin_path("blockdev", required=True)
+ _, devsize_in_bytes, _ = self.module.run_command([blockdev_cmd, "--getsize64", self.path], check_rc=True)
+ return int(devsize_in_bytes)
+ elif os.path.isfile(self.path):
+ return os.path.getsize(self.path)
+ else:
+ self.module.fail_json(changed=False, msg="Target device not supported: %s" % self)
+
+ def get_mountpoint(self):
+ """Return (first) mountpoint of device. Returns None when not mounted."""
+ cmd_findmnt = self.module.get_bin_path("findmnt", required=True)
+
+ # find mountpoint
+ rc, mountpoint, _ = self.module.run_command([cmd_findmnt, "--mtab", "--noheadings", "--output",
+ "TARGET", "--source", self.path], check_rc=False)
+ if rc != 0:
+ mountpoint = None
+ else:
+ mountpoint = mountpoint.split('\n')[0]
+
+ return mountpoint
+
+ def __str__(self):
+ return self.path
+
+
+class Filesystem(object):
+
+ GROW = None
+ MKFS = None
+ MKFS_FORCE_FLAGS = ''
+
+ LANG_ENV = {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C'}
+
+ def __init__(self, module):
+ self.module = module
+
+ @property
+ def fstype(self):
+ return type(self).__name__
+
+ def get_fs_size(self, dev):
+ """ Return size in bytes of filesystem on device. Returns int """
+ raise NotImplementedError()
+
+ def create(self, opts, dev):
+ if self.module.check_mode:
+ return
+
+ mkfs = self.module.get_bin_path(self.MKFS, required=True)
+ if opts is None:
+ cmd = "%s %s '%s'" % (mkfs, self.MKFS_FORCE_FLAGS, dev)
+ else:
+ cmd = "%s %s %s '%s'" % (mkfs, self.MKFS_FORCE_FLAGS, opts, dev)
+ self.module.run_command(cmd, check_rc=True)
+
+ def wipefs(self, dev):
+ if platform.system() == 'FreeBSD':
+ msg = "module param state=absent is currently not supported on this OS (FreeBSD)."
+ self.module.fail_json(msg=msg)
+
+ if self.module.check_mode:
+ return
+
+ # wipefs comes with util-linux package (as 'blockdev' & 'findmnt' above)
+ # so it is not supported on FreeBSD. Even the use of dd as a fallback is
+ # not doable here if it needs get_mountpoint() (to prevent corruption of
+ # a mounted filesystem), since 'findmnt' is not available on FreeBSD.
+ wipefs = self.module.get_bin_path('wipefs', required=True)
+ cmd = [wipefs, "--all", dev.__str__()]
+ self.module.run_command(cmd, check_rc=True)
+
+ def grow_cmd(self, dev):
+ cmd = self.module.get_bin_path(self.GROW, required=True)
+ return [cmd, str(dev)]
+
+ def grow(self, dev):
+ """Get dev and fs size and compare. Returns stdout of used command."""
+ devsize_in_bytes = dev.size()
+
+ try:
+ fssize_in_bytes = self.get_fs_size(dev)
+ except NotImplementedError:
+ self.module.fail_json(changed=False, msg="module does not support resizing %s filesystem yet." % self.fstype)
+
+ if not fssize_in_bytes < devsize_in_bytes:
+ self.module.exit_json(changed=False, msg="%s filesystem is using the whole device %s" % (self.fstype, dev))
+ elif self.module.check_mode:
+ self.module.exit_json(changed=True, msg="Resizing filesystem %s on device %s" % (self.fstype, dev))
+ else:
+ _, out, _ = self.module.run_command(self.grow_cmd(dev), check_rc=True)
+ return out
+
+
+class Ext(Filesystem):
+ MKFS_FORCE_FLAGS = '-F'
+ GROW = 'resize2fs'
+
+ def get_fs_size(self, dev):
+ cmd = self.module.get_bin_path('tune2fs', required=True)
+ # Get Block count and Block size
+ _, size, _ = self.module.run_command([cmd, '-l', str(dev)], check_rc=True, environ_update=self.LANG_ENV)
+ for line in size.splitlines():
+ if 'Block count:' in line:
+ block_count = int(line.split(':')[1].strip())
+ elif 'Block size:' in line:
+ block_size = int(line.split(':')[1].strip())
+ return block_size * block_count
+
+
+class Ext2(Ext):
+ MKFS = 'mkfs.ext2'
+
+
+class Ext3(Ext):
+ MKFS = 'mkfs.ext3'
+
+
+class Ext4(Ext):
+ MKFS = 'mkfs.ext4'
+
+
+class XFS(Filesystem):
+ MKFS = 'mkfs.xfs'
+ MKFS_FORCE_FLAGS = '-f'
+ GROW = 'xfs_growfs'
+
+ def get_fs_size(self, dev):
+ cmd = self.module.get_bin_path('xfs_info', required=True)
+
+ mountpoint = dev.get_mountpoint()
+ if mountpoint:
+ rc, out, err = self.module.run_command([cmd, str(mountpoint)], environ_update=self.LANG_ENV)
+ else:
+ # Recent GNU/Linux distros support access to unmounted XFS filesystems
+ rc, out, err = self.module.run_command([cmd, str(dev)], environ_update=self.LANG_ENV)
+ if rc != 0:
+ self.module.fail_json(msg="Error while attempting to query size of XFS filesystem: %s" % err)
+
+ for line in out.splitlines():
+ col = line.split('=')
+ if col[0].strip() == 'data':
+ if col[1].strip() != 'bsize':
+ self.module.fail_json(msg='Unexpected output format from xfs_info (could not locate "bsize")')
+ if col[2].split()[1] != 'blocks':
+ self.module.fail_json(msg='Unexpected output format from xfs_info (could not locate "blocks")')
+ block_size = int(col[2].split()[0])
+ block_count = int(col[3].split(',')[0])
+ return block_size * block_count
+
+ def grow_cmd(self, dev):
+ # Check first if growing is needed, and then if it is doable or not.
+ devsize_in_bytes = dev.size()
+ fssize_in_bytes = self.get_fs_size(dev)
+ if not fssize_in_bytes < devsize_in_bytes:
+ self.module.exit_json(changed=False, msg="%s filesystem is using the whole device %s" % (self.fstype, dev))
+
+ mountpoint = dev.get_mountpoint()
+ if not mountpoint:
+ # xfs filesystem needs to be mounted
+ self.module.fail_json(msg="%s needs to be mounted for xfs operations" % dev)
+
+ cmd = self.module.get_bin_path(self.GROW, required=True)
+
+ return [cmd, str(mountpoint)]
+
+
+class Reiserfs(Filesystem):
+ MKFS = 'mkfs.reiserfs'
+ MKFS_FORCE_FLAGS = '-f'
+
+
+class Btrfs(Filesystem):
+ MKFS = 'mkfs.btrfs'
+
+ def __init__(self, module):
+ super(Btrfs, self).__init__(module)
+ _, stdout, stderr = self.module.run_command('%s --version' % self.MKFS, check_rc=True)
+ match = re.search(r" v([0-9.]+)", stdout)
+ if not match:
+ # v0.20-rc1 use stderr
+ match = re.search(r" v([0-9.]+)", stderr)
+ if match:
+ # v0.20-rc1 doesn't have --force parameter added in following version v3.12
+ if LooseVersion(match.group(1)) >= LooseVersion('3.12'):
+ self.MKFS_FORCE_FLAGS = '-f'
+ else:
+ self.MKFS_FORCE_FLAGS = ''
+ else:
+ # assume version is greater or equal to 3.12
+ self.MKFS_FORCE_FLAGS = '-f'
+ self.module.warn('Unable to identify mkfs.btrfs version (%r, %r)' % (stdout, stderr))
+
+
+class Ocfs2(Filesystem):
+ MKFS = 'mkfs.ocfs2'
+ MKFS_FORCE_FLAGS = '-Fx'
+
+
+class F2fs(Filesystem):
+ MKFS = 'mkfs.f2fs'
+ GROW = 'resize.f2fs'
+
+ @property
+ def MKFS_FORCE_FLAGS(self):
+ mkfs = self.module.get_bin_path(self.MKFS, required=True)
+ cmd = "%s %s" % (mkfs, os.devnull)
+ _, out, _ = self.module.run_command(cmd, check_rc=False, environ_update=self.LANG_ENV)
+ # Looking for " F2FS-tools: mkfs.f2fs Ver: 1.10.0 (2018-01-30)"
+ # mkfs.f2fs displays version since v1.2.0
+ match = re.search(r"F2FS-tools: mkfs.f2fs Ver: ([0-9.]+) \(", out)
+ if match is not None:
+ # Since 1.9.0, mkfs.f2fs check overwrite before make filesystem
+ # before that version -f switch wasn't used
+ if LooseVersion(match.group(1)) >= LooseVersion('1.9.0'):
+ return '-f'
+
+ return ''
+
+ def get_fs_size(self, dev):
+ cmd = self.module.get_bin_path('dump.f2fs', required=True)
+ # Get sector count and sector size
+ _, dump, _ = self.module.run_command([cmd, str(dev)], check_rc=True, environ_update=self.LANG_ENV)
+ sector_size = None
+ sector_count = None
+ for line in dump.splitlines():
+ if 'Info: sector size = ' in line:
+ # expected: 'Info: sector size = 512'
+ sector_size = int(line.split()[4])
+ elif 'Info: total FS sectors = ' in line:
+ # expected: 'Info: total FS sectors = 102400 (50 MB)'
+ sector_count = int(line.split()[5])
+
+ if None not in (sector_size, sector_count):
+ break
+ else:
+ self.module.warn("Unable to process dump.f2fs output '%s'", '\n'.join(dump))
+ self.module.fail_json(msg="Unable to process dump.f2fs output for %s" % dev)
+
+ return sector_size * sector_count
+
+
+class VFAT(Filesystem):
+ if platform.system() == 'FreeBSD':
+ MKFS = "newfs_msdos"
+ else:
+ MKFS = 'mkfs.vfat'
+ GROW = 'fatresize'
+
+ def get_fs_size(self, dev):
+ cmd = self.module.get_bin_path(self.GROW, required=True)
+ _, output, _ = self.module.run_command([cmd, '--info', str(dev)], check_rc=True, environ_update=self.LANG_ENV)
+ for line in output.splitlines()[1:]:
+ param, value = line.split(':', 1)
+ if param.strip() == 'Size':
+ return int(value.strip())
+ self.module.fail_json(msg="fatresize failed to provide filesystem size for %s" % dev)
+
+ def grow_cmd(self, dev):
+ cmd = self.module.get_bin_path(self.GROW)
+ return [cmd, "-s", str(dev.size()), str(dev.path)]
+
+
+class LVM(Filesystem):
+ MKFS = 'pvcreate'
+ MKFS_FORCE_FLAGS = '-f'
+ GROW = 'pvresize'
+
+ def get_fs_size(self, dev):
+ cmd = self.module.get_bin_path('pvs', required=True)
+ _, size, _ = self.module.run_command([cmd, '--noheadings', '-o', 'pv_size', '--units', 'b', '--nosuffix', str(dev)], check_rc=True)
+ block_count = int(size)
+ return block_count
+
+
+class Swap(Filesystem):
+ MKFS = 'mkswap'
+ MKFS_FORCE_FLAGS = '-f'
+
+
+FILESYSTEMS = {
+ 'ext2': Ext2,
+ 'ext3': Ext3,
+ 'ext4': Ext4,
+ 'ext4dev': Ext4,
+ 'f2fs': F2fs,
+ 'reiserfs': Reiserfs,
+ 'xfs': XFS,
+ 'btrfs': Btrfs,
+ 'vfat': VFAT,
+ 'ocfs2': Ocfs2,
+ 'LVM2_member': LVM,
+ 'swap': Swap,
+}
+
+
+def main():
+ friendly_names = {
+ 'lvm': 'LVM2_member',
+ }
+
+ fstypes = set(FILESYSTEMS.keys()) - set(friendly_names.values()) | set(friendly_names.keys())
+
+ # There is no "single command" to manipulate filesystems, so we map them all out and their options
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ fstype=dict(type='str', aliases=['type'], choices=list(fstypes)),
+ dev=dict(type='path', required=True, aliases=['device']),
+ opts=dict(type='str'),
+ force=dict(type='bool', default=False),
+ resizefs=dict(type='bool', default=False),
+ ),
+ required_if=[
+ ('state', 'present', ['fstype'])
+ ],
+ supports_check_mode=True,
+ )
+
+ state = module.params['state']
+ dev = module.params['dev']
+ fstype = module.params['fstype']
+ opts = module.params['opts']
+ force = module.params['force']
+ resizefs = module.params['resizefs']
+
+ changed = False
+
+ if not os.path.exists(dev):
+ msg = "Device %s not found." % dev
+ if state == "present":
+ module.fail_json(msg=msg)
+ else:
+ module.exit_json(msg=msg)
+
+ dev = Device(module, dev)
+
+ cmd = module.get_bin_path('blkid', required=True)
+ rc, raw_fs, err = module.run_command("%s -c /dev/null -o value -s TYPE %s" % (cmd, dev))
+ # In case blkid isn't able to identify an existing filesystem, device is considered as empty,
+ # then this existing filesystem would be overwritten even if force isn't enabled.
+ fs = raw_fs.strip()
+
+ if state == "present":
+ if fstype in friendly_names:
+ fstype = friendly_names[fstype]
+
+ try:
+ klass = FILESYSTEMS[fstype]
+ except KeyError:
+ module.fail_json(changed=False, msg="module does not support this filesystem (%s) yet." % fstype)
+
+ filesystem = klass(module)
+
+ same_fs = fs and FILESYSTEMS.get(fs) == FILESYSTEMS[fstype]
+ if same_fs and not resizefs and not force:
+ module.exit_json(changed=False)
+ elif same_fs and resizefs:
+ if not filesystem.GROW:
+ module.fail_json(changed=False, msg="module does not support resizing %s filesystem yet." % fstype)
+
+ out = filesystem.grow(dev)
+
+ module.exit_json(changed=True, msg=out)
+ elif fs and not force:
+ module.fail_json(msg="'%s' is already used as %s, use force=yes to overwrite" % (dev, fs), rc=rc, err=err)
+
+ # create fs
+ filesystem.create(opts, dev)
+ changed = True
+
+ elif fs:
+ # wipe fs signatures
+ filesystem = Filesystem(module)
+ filesystem.wipefs(dev)
+ changed = True
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/flatpak.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/flatpak.py
new file mode 100644
index 00000000..1be1a722
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/flatpak.py
@@ -0,0 +1,312 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017 John Kwiatkoski (@JayKayy) <jkwiat40@gmail.com>
+# Copyright: (c) 2018 Alexander Bethke (@oolongbrothers) <oolongbrothers@gmx.net>
+# Copyright: (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+# ATTENTION CONTRIBUTORS!
+#
+# TL;DR: Run this module's integration tests manually before opening a pull request
+#
+# Long explanation:
+# The integration tests for this module are currently NOT run on the Ansible project's continuous
+# delivery pipeline. So please: When you make changes to this module, make sure that you run the
+# included integration tests manually for both Python 2 and Python 3:
+#
+# Python 2:
+# ansible-test integration -v --docker fedora28 --docker-privileged --allow-unsupported --python 2.7 flatpak
+# Python 3:
+# ansible-test integration -v --docker fedora28 --docker-privileged --allow-unsupported --python 3.6 flatpak
+#
+# Because of external dependencies, the current integration tests are somewhat too slow and brittle
+# to be included right now. I have plans to rewrite the integration tests based on a local flatpak
+# repository so that they can be included into the normal CI pipeline.
+# //oolongbrothers
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: flatpak
+short_description: Manage flatpaks
+description:
+- Allows users to add or remove flatpaks.
+- See the M(community.general.flatpak_remote) module for managing flatpak remotes.
+author:
+- John Kwiatkoski (@JayKayy)
+- Alexander Bethke (@oolongbrothers)
+requirements:
+- flatpak
+options:
+ executable:
+ description:
+ - The path to the C(flatpak) executable to use.
+ - By default, this module looks for the C(flatpak) executable on the path.
+ type: path
+ default: flatpak
+ method:
+ description:
+ - The installation method to use.
+ - Defines if the I(flatpak) is supposed to be installed globally for the whole C(system)
+ or only for the current C(user).
+ type: str
+ choices: [ system, user ]
+ default: system
+ name:
+ description:
+ - The name of the flatpak to manage.
+ - When used with I(state=present), I(name) can be specified as an C(http(s)) URL to a
+ C(flatpakref) file or the unique reverse DNS name that identifies a flatpak.
+ - When supplying a reverse DNS name, you can use the I(remote) option to specify on what remote
+ to look for the flatpak. An example for a reverse DNS name is C(org.gnome.gedit).
+ - When used with I(state=absent), it is recommended to specify the name in the reverse DNS
+ format.
+ - When supplying an C(http(s)) URL with I(state=absent), the module will try to match the
+ installed flatpak based on the name of the flatpakref to remove it. However, there is no
+ guarantee that the names of the flatpakref file and the reverse DNS name of the installed
+ flatpak do match.
+ type: str
+ required: true
+ remote:
+ description:
+ - The flatpak remote (repository) to install the flatpak from.
+ - By default, C(flathub) is assumed, but you do need to add the flathub flatpak_remote before
+ you can use this.
+ - See the M(community.general.flatpak_remote) module for managing flatpak remotes.
+ type: str
+ default: flathub
+ state:
+ description:
+ - Indicates the desired package state.
+ choices: [ absent, present ]
+ type: str
+ default: present
+'''
+
+EXAMPLES = r'''
+- name: Install the spotify flatpak
+ community.general.flatpak:
+ name: https://s3.amazonaws.com/alexlarsson/spotify-repo/spotify.flatpakref
+ state: present
+
+- name: Install the gedit flatpak package
+ community.general.flatpak:
+ name: https://git.gnome.org/browse/gnome-apps-nightly/plain/gedit.flatpakref
+ state: present
+
+- name: Install the gedit package from flathub for current user
+ community.general.flatpak:
+ name: org.gnome.gedit
+ state: present
+ method: user
+
+- name: Install the Gnome Calendar flatpak from the gnome remote system-wide
+ community.general.flatpak:
+ name: org.gnome.Calendar
+ state: present
+ remote: gnome
+
+- name: Remove the gedit flatpak
+ community.general.flatpak:
+ name: org.gnome.gedit
+ state: absent
+'''
+
+RETURN = r'''
+command:
+ description: The exact flatpak command that was executed
+ returned: When a flatpak command has been executed
+ type: str
+ sample: "/usr/bin/flatpak install --user --nontinteractive flathub org.gnome.Calculator"
+msg:
+ description: Module error message
+ returned: failure
+ type: str
+ sample: "Executable '/usr/local/bin/flatpak' was not found on the system."
+rc:
+ description: Return code from flatpak binary
+ returned: When a flatpak command has been executed
+ type: int
+ sample: 0
+stderr:
+ description: Error output from flatpak binary
+ returned: When a flatpak command has been executed
+ type: str
+ sample: "error: Error searching remote flathub: Can't find ref org.gnome.KDE"
+stdout:
+ description: Output from flatpak binary
+ returned: When a flatpak command has been executed
+ type: str
+ sample: "org.gnome.Calendar/x86_64/stable\tcurrent\norg.gnome.gitg/x86_64/stable\tcurrent\n"
+'''
+
+from distutils.version import StrictVersion
+
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+from ansible.module_utils.basic import AnsibleModule
+
+OUTDATED_FLATPAK_VERSION_ERROR_MESSAGE = "Unknown option --columns=application"
+
+
+def install_flat(module, binary, remote, name, method):
+ """Add a new flatpak."""
+ global result
+ flatpak_version = _flatpak_version(module, binary)
+ if StrictVersion(flatpak_version) < StrictVersion('1.1.3'):
+ noninteractive_arg = "-y"
+ else:
+ noninteractive_arg = "--noninteractive"
+ if name.startswith('http://') or name.startswith('https://'):
+ command = [binary, "install", "--{0}".format(method), noninteractive_arg, name]
+ else:
+ command = [binary, "install", "--{0}".format(method), noninteractive_arg, remote, name]
+ _flatpak_command(module, module.check_mode, command)
+ result['changed'] = True
+
+
+def uninstall_flat(module, binary, name, method):
+ """Remove an existing flatpak."""
+ global result
+ flatpak_version = _flatpak_version(module, binary)
+ if StrictVersion(flatpak_version) < StrictVersion('1.1.3'):
+ noninteractive_arg = "-y"
+ else:
+ noninteractive_arg = "--noninteractive"
+ installed_flat_name = _match_installed_flat_name(module, binary, name, method)
+ command = [binary, "uninstall", "--{0}".format(method), noninteractive_arg, name]
+ _flatpak_command(module, module.check_mode, command)
+ result['changed'] = True
+
+
+def flatpak_exists(module, binary, name, method):
+ """Check if the flatpak is installed."""
+ command = [binary, "list", "--{0}".format(method), "--app"]
+ output = _flatpak_command(module, False, command)
+ name = _parse_flatpak_name(name).lower()
+ if name in output.lower():
+ return True
+ return False
+
+
+def _match_installed_flat_name(module, binary, name, method):
+ # This is a difficult function, since if the user supplies a flatpakref url,
+ # we have to rely on a naming convention:
+ # The flatpakref file name needs to match the flatpak name
+ global result
+ parsed_name = _parse_flatpak_name(name)
+ # Try running flatpak list with columns feature
+ command = [binary, "list", "--{0}".format(method), "--app", "--columns=application"]
+ _flatpak_command(module, False, command, ignore_failure=True)
+ if result['rc'] != 0 and OUTDATED_FLATPAK_VERSION_ERROR_MESSAGE in result['stderr']:
+ # Probably flatpak before 1.2
+ matched_flatpak_name = \
+ _match_flat_using_flatpak_column_feature(module, binary, parsed_name, method)
+ else:
+ # Probably flatpak >= 1.2
+ matched_flatpak_name = \
+ _match_flat_using_outdated_flatpak_format(module, binary, parsed_name, method)
+
+ if matched_flatpak_name:
+ return matched_flatpak_name
+ else:
+ result['msg'] = "Flatpak removal failed: Could not match any installed flatpaks to " +\
+ "the name `{0}`. ".format(_parse_flatpak_name(name)) +\
+ "If you used a URL, try using the reverse DNS name of the flatpak"
+ module.fail_json(**result)
+
+
+def _match_flat_using_outdated_flatpak_format(module, binary, parsed_name, method):
+ global result
+ command = [binary, "list", "--{0}".format(method), "--app", "--columns=application"]
+ output = _flatpak_command(module, False, command)
+ for row in output.split('\n'):
+ if parsed_name.lower() == row.lower():
+ return row
+
+
+def _match_flat_using_flatpak_column_feature(module, binary, parsed_name, method):
+ global result
+ command = [binary, "list", "--{0}".format(method), "--app"]
+ output = _flatpak_command(module, False, command)
+ for row in output.split('\n'):
+ if parsed_name.lower() in row.lower():
+ return row.split()[0]
+
+
+def _parse_flatpak_name(name):
+ if name.startswith('http://') or name.startswith('https://'):
+ file_name = urlparse(name).path.split('/')[-1]
+ file_name_without_extension = file_name.split('.')[0:-1]
+ common_name = ".".join(file_name_without_extension)
+ else:
+ common_name = name
+ return common_name
+
+
+def _flatpak_version(module, binary):
+ global result
+ command = [binary, "--version"]
+ output = _flatpak_command(module, False, command)
+ version_number = output.split()[1]
+ return version_number
+
+
+def _flatpak_command(module, noop, command, ignore_failure=False):
+ global result
+ result['command'] = ' '.join(command)
+ if noop:
+ result['rc'] = 0
+ return ""
+
+ result['rc'], result['stdout'], result['stderr'] = module.run_command(
+ command, check_rc=not ignore_failure
+ )
+ return result['stdout']
+
+
+def main():
+ # This module supports check mode
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ remote=dict(type='str', default='flathub'),
+ method=dict(type='str', default='system',
+ choices=['user', 'system']),
+ state=dict(type='str', default='present',
+ choices=['absent', 'present']),
+ executable=dict(type='path', default='flatpak')
+ ),
+ supports_check_mode=True,
+ )
+
+ name = module.params['name']
+ state = module.params['state']
+ remote = module.params['remote']
+ method = module.params['method']
+ executable = module.params['executable']
+ binary = module.get_bin_path(executable, None)
+
+ global result
+ result = dict(
+ changed=False
+ )
+
+ # If the binary was not found, fail the operation
+ if not binary:
+ module.fail_json(msg="Executable '%s' was not found on the system." % executable, **result)
+
+ if state == 'present' and not flatpak_exists(module, binary, name, method):
+ install_flat(module, binary, remote, name, method)
+ elif state == 'absent' and flatpak_exists(module, binary, name, method):
+ uninstall_flat(module, binary, name, method)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/flatpak_remote.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/flatpak_remote.py
new file mode 100644
index 00000000..dbb211c2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/flatpak_remote.py
@@ -0,0 +1,234 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017 John Kwiatkoski (@JayKayy) <jkwiat40@gmail.com>
+# Copyright: (c) 2018 Alexander Bethke (@oolongbrothers) <oolongbrothers@gmx.net>
+# Copyright: (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+# ATTENTION CONTRIBUTORS!
+#
+# TL;DR: Run this module's integration tests manually before opening a pull request
+#
+# Long explanation:
+# The integration tests for this module are currently NOT run on the Ansible project's continuous
+# delivery pipeline. So please: When you make changes to this module, make sure that you run the
+# included integration tests manually for both Python 2 and Python 3:
+#
+# Python 2:
+# ansible-test integration -v --docker fedora28 --docker-privileged --allow-unsupported --python 2.7 flatpak_remote
+# Python 3:
+# ansible-test integration -v --docker fedora28 --docker-privileged --allow-unsupported --python 3.6 flatpak_remote
+#
+# Because of external dependencies, the current integration tests are somewhat too slow and brittle
+# to be included right now. I have plans to rewrite the integration tests based on a local flatpak
+# repository so that they can be included into the normal CI pipeline.
+# //oolongbrothers
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: flatpak_remote
+short_description: Manage flatpak repository remotes
+description:
+- Allows users to add or remove flatpak remotes.
+- The flatpak remotes concept is comparable to what is called repositories in other packaging
+ formats.
+- Currently, remote addition is only supported via I(flatpakrepo) file URLs.
+- Existing remotes will not be updated.
+- See the M(community.general.flatpak) module for managing flatpaks.
+author:
+- John Kwiatkoski (@JayKayy)
+- Alexander Bethke (@oolongbrothers)
+requirements:
+- flatpak
+options:
+ executable:
+ description:
+ - The path to the C(flatpak) executable to use.
+ - By default, this module looks for the C(flatpak) executable on the path.
+ type: str
+ default: flatpak
+ flatpakrepo_url:
+ description:
+ - The URL to the I(flatpakrepo) file representing the repository remote to add.
+ - When used with I(state=present), the flatpak remote specified under the I(flatpakrepo_url)
+ is added using the specified installation C(method).
+ - When used with I(state=absent), this is not required.
+ - Required when I(state=present).
+ type: str
+ method:
+ description:
+ - The installation method to use.
+ - Defines if the I(flatpak) is supposed to be installed globally for the whole C(system)
+ or only for the current C(user).
+ type: str
+ choices: [ system, user ]
+ default: system
+ name:
+ description:
+ - The desired name for the flatpak remote to be registered under on the managed host.
+ - When used with I(state=present), the remote will be added to the managed host under
+ the specified I(name).
+ - When used with I(state=absent) the remote with that name will be removed.
+ type: str
+ required: true
+ state:
+ description:
+ - Indicates the desired package state.
+ type: str
+ choices: [ absent, present ]
+ default: present
+'''
+
+EXAMPLES = r'''
+- name: Add the Gnome flatpak remote to the system installation
+ community.general.flatpak_remote:
+ name: gnome
+ state: present
+ flatpakrepo_url: https://sdk.gnome.org/gnome-apps.flatpakrepo
+
+- name: Add the flathub flatpak repository remote to the user installation
+ community.general.flatpak_remote:
+ name: flathub
+ state: present
+ flatpakrepo_url: https://dl.flathub.org/repo/flathub.flatpakrepo
+ method: user
+
+- name: Remove the Gnome flatpak remote from the user installation
+ community.general.flatpak_remote:
+ name: gnome
+ state: absent
+ method: user
+
+- name: Remove the flathub remote from the system installation
+ community.general.flatpak_remote:
+ name: flathub
+ state: absent
+'''
+
+RETURN = r'''
+command:
+ description: The exact flatpak command that was executed
+ returned: When a flatpak command has been executed
+ type: str
+ sample: "/usr/bin/flatpak remote-add --system flatpak-test https://dl.flathub.org/repo/flathub.flatpakrepo"
+msg:
+ description: Module error message
+ returned: failure
+ type: str
+ sample: "Executable '/usr/local/bin/flatpak' was not found on the system."
+rc:
+ description: Return code from flatpak binary
+ returned: When a flatpak command has been executed
+ type: int
+ sample: 0
+stderr:
+ description: Error output from flatpak binary
+ returned: When a flatpak command has been executed
+ type: str
+ sample: "error: GPG verification enabled, but no summary found (check that the configured URL in remote config is correct)\n"
+stdout:
+ description: Output from flatpak binary
+ returned: When a flatpak command has been executed
+ type: str
+ sample: "flathub\tFlathub\thttps://dl.flathub.org/repo/\t1\t\n"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes, to_native
+
+
+def add_remote(module, binary, name, flatpakrepo_url, method):
+ """Add a new remote."""
+ global result
+ command = [binary, "remote-add", "--{0}".format(method), name, flatpakrepo_url]
+ _flatpak_command(module, module.check_mode, command)
+ result['changed'] = True
+
+
+def remove_remote(module, binary, name, method):
+ """Remove an existing remote."""
+ global result
+ command = [binary, "remote-delete", "--{0}".format(method), "--force", name]
+ _flatpak_command(module, module.check_mode, command)
+ result['changed'] = True
+
+
+def remote_exists(module, binary, name, method):
+ """Check if the remote exists."""
+ command = [binary, "remote-list", "-d", "--{0}".format(method)]
+ # The query operation for the remote needs to be run even in check mode
+ output = _flatpak_command(module, False, command)
+ for line in output.splitlines():
+ listed_remote = line.split()
+ if len(listed_remote) == 0:
+ continue
+ if listed_remote[0] == to_native(name):
+ return True
+ return False
+
+
+def _flatpak_command(module, noop, command):
+ global result
+ result['command'] = ' '.join(command)
+ if noop:
+ result['rc'] = 0
+ return ""
+
+ result['rc'], result['stdout'], result['stderr'] = module.run_command(
+ command, check_rc=True
+ )
+ return result['stdout']
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ flatpakrepo_url=dict(type='str'),
+ method=dict(type='str', default='system',
+ choices=['user', 'system']),
+ state=dict(type='str', default="present",
+ choices=['absent', 'present']),
+ executable=dict(type='str', default="flatpak")
+ ),
+ # This module supports check mode
+ supports_check_mode=True,
+ )
+
+ name = module.params['name']
+ flatpakrepo_url = module.params['flatpakrepo_url']
+ method = module.params['method']
+ state = module.params['state']
+ executable = module.params['executable']
+ binary = module.get_bin_path(executable, None)
+
+ if flatpakrepo_url is None:
+ flatpakrepo_url = ''
+
+ global result
+ result = dict(
+ changed=False
+ )
+
+ # If the binary was not found, fail the operation
+ if not binary:
+ module.fail_json(msg="Executable '%s' was not found on the system." % executable, **result)
+
+ remote_already_exists = remote_exists(module, binary, to_bytes(name), method)
+
+ if state == 'present' and not remote_already_exists:
+ add_remote(module, binary, name, flatpakrepo_url, method)
+ elif state == 'absent' and remote_already_exists:
+ remove_remote(module, binary, name, method)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/flowdock.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/flowdock.py
new file mode 100644
index 00000000..a1842c5d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/flowdock.py
@@ -0,0 +1,197 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2013 Matt Coddington <coddington@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: flowdock
+author: "Matt Coddington (@mcodd)"
+short_description: Send a message to a flowdock
+description:
+ - Send a message to a flowdock team inbox or chat using the push API (see https://www.flowdock.com/api/team-inbox and https://www.flowdock.com/api/chat)
+options:
+ token:
+ type: str
+ description:
+ - API token.
+ required: true
+ type:
+ type: str
+ description:
+ - Whether to post to 'inbox' or 'chat'
+ required: true
+ choices: [ "inbox", "chat" ]
+ msg:
+ type: str
+ description:
+ - Content of the message
+ required: true
+ tags:
+ type: str
+ description:
+ - tags of the message, separated by commas
+ required: false
+ external_user_name:
+ type: str
+ description:
+ - (chat only - required) Name of the "user" sending the message
+ required: false
+ from_address:
+ type: str
+ description:
+ - (inbox only - required) Email address of the message sender
+ required: false
+ source:
+ type: str
+ description:
+ - (inbox only - required) Human readable identifier of the application that uses the Flowdock API
+ required: false
+ subject:
+ type: str
+ description:
+ - (inbox only - required) Subject line of the message
+ required: false
+ from_name:
+ type: str
+ description:
+ - (inbox only) Name of the message sender
+ required: false
+ reply_to:
+ type: str
+ description:
+ - (inbox only) Email address for replies
+ required: false
+ project:
+ type: str
+ description:
+ - (inbox only) Human readable identifier for more detailed message categorization
+ required: false
+ link:
+ type: str
+ description:
+ - (inbox only) Link associated with the message. This will be used to link the message subject in Team Inbox.
+ required: false
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ type: bool
+
+requirements: [ ]
+'''
+
+EXAMPLES = '''
+- name: Send a message to a flowdock
+ community.general.flowdock:
+ type: inbox
+ token: AAAAAA
+ from_address: user@example.com
+ source: my cool app
+ msg: test from ansible
+ subject: test subject
+
+- name: Send a message to a flowdock
+ community.general.flowdock:
+ type: chat
+ token: AAAAAA
+ external_user_name: testuser
+ msg: test from ansible
+ tags: tag1,tag2,tag3
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(required=True, no_log=True),
+ msg=dict(required=True),
+ type=dict(required=True, choices=["inbox", "chat"]),
+ external_user_name=dict(required=False),
+ from_address=dict(required=False),
+ source=dict(required=False),
+ subject=dict(required=False),
+ from_name=dict(required=False),
+ reply_to=dict(required=False),
+ project=dict(required=False),
+ tags=dict(required=False),
+ link=dict(required=False),
+ validate_certs=dict(default=True, type='bool'),
+ ),
+ supports_check_mode=True
+ )
+
+ type = module.params["type"]
+ token = module.params["token"]
+ if type == 'inbox':
+ url = "https://api.flowdock.com/v1/messages/team_inbox/%s" % (token)
+ else:
+ url = "https://api.flowdock.com/v1/messages/chat/%s" % (token)
+
+ params = {}
+
+ # required params
+ params['content'] = module.params["msg"]
+
+ # required params for the 'chat' type
+ if module.params['external_user_name']:
+ if type == 'inbox':
+ module.fail_json(msg="external_user_name is not valid for the 'inbox' type")
+ else:
+ params['external_user_name'] = module.params["external_user_name"]
+ elif type == 'chat':
+ module.fail_json(msg="external_user_name is required for the 'chat' type")
+
+ # required params for the 'inbox' type
+ for item in ['from_address', 'source', 'subject']:
+ if module.params[item]:
+ if type == 'chat':
+ module.fail_json(msg="%s is not valid for the 'chat' type" % item)
+ else:
+ params[item] = module.params[item]
+ elif type == 'inbox':
+ module.fail_json(msg="%s is required for the 'inbox' type" % item)
+
+ # optional params
+ if module.params["tags"]:
+ params['tags'] = module.params["tags"]
+
+ # optional params for the 'inbox' type
+ for item in ['from_name', 'reply_to', 'project', 'link']:
+ if module.params[item]:
+ if type == 'chat':
+ module.fail_json(msg="%s is not valid for the 'chat' type" % item)
+ else:
+ params[item] = module.params[item]
+
+ # If we're in check mode, just exit pretending like we succeeded
+ if module.check_mode:
+ module.exit_json(changed=False)
+
+ # Send the data to Flowdock
+ data = urlencode(params)
+ response, info = fetch_url(module, url, data=data)
+ if info['status'] != 200:
+ module.fail_json(msg="unable to send msg: %s" % info['msg'])
+
+ module.exit_json(changed=True, msg=module.params["msg"])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/foreman.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/foreman.py
new file mode 100644
index 00000000..b209b05a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/foreman.py
@@ -0,0 +1,157 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Eric D Helms <ericdhelms@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: foreman
+short_description: Manage Foreman Resources
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.12
+ why: "Replaced by re-designed individual modules living at https://github.com/theforeman/foreman-ansible-modules"
+ alternative: https://github.com/theforeman/foreman-ansible-modules
+description:
+ - Allows the management of Foreman resources inside your Foreman server.
+author:
+- Eric D Helms (@ehelms)
+requirements:
+ - nailgun >= 0.28.0
+ - python >= 2.6
+ - datetime
+options:
+ server_url:
+ description:
+ - URL of Foreman server.
+ required: true
+ username:
+ description:
+ - Username on Foreman server.
+ required: true
+ verify_ssl:
+ description:
+ - Whether to verify an SSL connection to Foreman server.
+ type: bool
+ default: False
+ password:
+ description:
+ - Password for user accessing Foreman server.
+ required: true
+ entity:
+ description:
+ - The Foreman resource that the action will be performed on (e.g. organization, host).
+ required: true
+ params:
+ description:
+ - Parameters associated to the entity resource to set or edit in dictionary format (e.g. name, description).
+ required: true
+'''
+
+EXAMPLES = '''
+- name: Create CI Organization
+ community.general.foreman:
+ username: admin
+ password: admin
+ server_url: https://fakeserver.com
+ entity: organization
+ params:
+ name: My Cool New Organization
+ delegate_to: localhost
+'''
+
+RETURN = '''# '''
+
+import traceback
+
+try:
+ from nailgun import entities
+ from nailgun.config import ServerConfig
+ HAS_NAILGUN_PACKAGE = True
+except Exception:
+ HAS_NAILGUN_PACKAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+class NailGun(object):
+ def __init__(self, server, entities, module):
+ self._server = server
+ self._entities = entities
+ self._module = module
+
+ def find_organization(self, name, **params):
+ org = self._entities.Organization(self._server, name=name, **params)
+ response = org.search(set(), {'search': 'name={0}'.format(name)})
+
+ if len(response) == 1:
+ return response[0]
+
+ return None
+
+ def organization(self, params):
+ name = params['name']
+ del params['name']
+ org = self.find_organization(name, **params)
+
+ if org:
+ org = self._entities.Organization(self._server, name=name, id=org.id, **params)
+ org.update()
+ else:
+ org = self._entities.Organization(self._server, name=name, **params)
+ org.create()
+
+ return True
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ server_url=dict(type='str', required=True),
+ username=dict(type='str', required=True, no_log=True),
+ password=dict(type='str', required=True, no_log=True),
+ entity=dict(type='str', required=True),
+ verify_ssl=dict(type='bool', default=False),
+ params=dict(type='dict', required=True, no_log=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ if not HAS_NAILGUN_PACKAGE:
+ module.fail_json(msg="Missing required nailgun module (check docs or install with: pip install nailgun")
+
+ server_url = module.params['server_url']
+ username = module.params['username']
+ password = module.params['password']
+ entity = module.params['entity']
+ params = module.params['params']
+ verify_ssl = module.params['verify_ssl']
+
+ server = ServerConfig(
+ url=server_url,
+ auth=(username, password),
+ verify=verify_ssl
+ )
+ ng = NailGun(server, entities, module)
+
+ # Lets make an connection to the server with username and password
+ try:
+ org = entities.Organization(server)
+ org.search()
+ except Exception as e:
+ module.fail_json(msg="Failed to connect to Foreman server: %s " % to_native(e),
+ exception=traceback.format_exc())
+
+ if entity == 'organization':
+ ng.organization(params)
+ module.exit_json(changed=True, result="%s updated" % entity)
+ else:
+ module.fail_json(changed=False, result="Unsupported entity supplied")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gc_storage.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gc_storage.py
new file mode 100644
index 00000000..52ca18fa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gc_storage.py
@@ -0,0 +1,497 @@
+#!/usr/bin/python
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gc_storage
+short_description: This module manages objects/buckets in Google Cloud Storage.
+description:
+ - This module allows users to manage their objects/buckets in Google Cloud Storage. It allows upload and download operations and can set some
+ canned permissions. It also allows retrieval of URLs for objects for use in playbooks, and retrieval of string contents of objects. This module
+ requires setting the default project in GCS prior to playbook usage. See U(https://developers.google.com/storage/docs/reference/v1/apiversion1) for
+ information about setting the default project.
+
+options:
+ bucket:
+ type: str
+ description:
+ - Bucket name.
+ required: true
+ object:
+ type: path
+ description:
+ - Keyname of the object inside the bucket. Can be also be used to create "virtual directories" (see examples).
+ src:
+ type: str
+ description:
+ - The source file path when performing a PUT operation.
+ dest:
+ type: path
+ description:
+ - The destination file path when downloading an object/key with a GET operation.
+ overwrite:
+ description:
+ - Forces an overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations.
+ type: bool
+ default: 'yes'
+ aliases: [ 'force' ]
+ permission:
+ type: str
+ description:
+ - This option let's the user set the canned permissions on the object/bucket that are created. The permissions that can be set are 'private',
+ 'public-read', 'authenticated-read'.
+ default: private
+ choices: ['private', 'public-read', 'authenticated-read']
+ headers:
+ type: dict
+ description:
+ - Headers to attach to object.
+ default: {}
+ expiration:
+ type: int
+ default: 600
+ description:
+ - Time limit (in seconds) for the URL generated and returned by GCA when performing a mode=put or mode=get_url operation. This url is only
+ available when public-read is the acl for the object.
+ aliases: [expiry]
+ mode:
+ type: str
+ description:
+ - Switches the module behaviour between upload, download, get_url (return download url) , get_str (download object as string), create (bucket) and
+ delete (bucket).
+ required: true
+ choices: [ 'get', 'put', 'get_url', 'get_str', 'delete', 'create' ]
+ gs_secret_key:
+ type: str
+ description:
+ - GS secret key. If not set then the value of the GS_SECRET_ACCESS_KEY environment variable is used.
+ required: true
+ gs_access_key:
+ type: str
+ description:
+ - GS access key. If not set then the value of the GS_ACCESS_KEY_ID environment variable is used.
+ required: true
+ region:
+ type: str
+ description:
+ - The gs region to use. If not defined then the value 'US' will be used. See U(https://cloud.google.com/storage/docs/bucket-locations)
+ default: 'US'
+ versioning:
+ description:
+ - Whether versioning is enabled or disabled (note that once versioning is enabled, it can only be suspended)
+ type: bool
+ default: false
+
+requirements:
+ - "python >= 2.6"
+ - "boto >= 2.9"
+
+author:
+- Benno Joy (@bennojoy)
+- Lukas Beumer (@Nitaco)
+
+'''
+
+EXAMPLES = '''
+- name: Upload some content
+ community.general.gc_storage:
+ bucket: mybucket
+ object: key.txt
+ src: /usr/local/myfile.txt
+ mode: put
+ permission: public-read
+
+- name: Upload some headers
+ community.general.gc_storage:
+ bucket: mybucket
+ object: key.txt
+ src: /usr/local/myfile.txt
+ headers: '{"Content-Encoding": "gzip"}'
+
+- name: Download some content
+ community.general.gc_storage:
+ bucket: mybucket
+ object: key.txt
+ dest: /usr/local/myfile.txt
+ mode: get
+
+- name: Download an object as a string to use else where in your playbook
+ community.general.gc_storage:
+ bucket: mybucket
+ object: key.txt
+ mode: get_str
+
+- name: Create an empty bucket
+ community.general.gc_storage:
+ bucket: mybucket
+ mode: create
+
+- name: Create a bucket with key as directory
+ community.general.gc_storage:
+ bucket: mybucket
+ object: /my/directory/path
+ mode: create
+
+- name: Delete a bucket and all contents
+ community.general.gc_storage:
+ bucket: mybucket
+ mode: delete
+
+- name: Create a bucket with versioning enabled
+ community.general.gc_storage:
+ bucket: "mybucket"
+ versioning: yes
+ mode: create
+
+- name: Create a bucket located in the eu
+ community.general.gc_storage:
+ bucket: "mybucket"
+ region: "europe-west3"
+ mode: create
+
+'''
+
+import os
+
+try:
+ import boto
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def grant_check(module, gs, obj):
+ try:
+ acp = obj.get_acl()
+ if module.params.get('permission') == 'public-read':
+ grant = [x for x in acp.entries.entry_list if x.scope.type == 'AllUsers']
+ if not grant:
+ obj.set_acl('public-read')
+ module.exit_json(changed=True, result="The objects permission as been set to public-read")
+ if module.params.get('permission') == 'authenticated-read':
+ grant = [x for x in acp.entries.entry_list if x.scope.type == 'AllAuthenticatedUsers']
+ if not grant:
+ obj.set_acl('authenticated-read')
+ module.exit_json(changed=True, result="The objects permission as been set to authenticated-read")
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+ return True
+
+
+def key_check(module, gs, bucket, obj):
+ try:
+ bucket = gs.lookup(bucket)
+ key_check = bucket.get_key(obj)
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+ if key_check:
+ grant_check(module, gs, key_check)
+ return True
+ else:
+ return False
+
+
+def keysum(module, gs, bucket, obj):
+ bucket = gs.lookup(bucket)
+ key_check = bucket.get_key(obj)
+ if not key_check:
+ return None
+ md5_remote = key_check.etag[1:-1]
+ etag_multipart = '-' in md5_remote # Check for multipart, etag is not md5
+ if etag_multipart is True:
+ module.fail_json(msg="Files uploaded with multipart of gs are not supported with checksum, unable to compute checksum.")
+ return md5_remote
+
+
+def bucket_check(module, gs, bucket):
+ try:
+ result = gs.lookup(bucket)
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+ if result:
+ grant_check(module, gs, result)
+ return True
+ else:
+ return False
+
+
+def create_bucket(module, gs, bucket):
+ try:
+ bucket = gs.create_bucket(bucket, transform_headers(module.params.get('headers')), module.params.get('region'))
+ bucket.set_acl(module.params.get('permission'))
+ bucket.configure_versioning(module.params.get('versioning'))
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+ if bucket:
+ return True
+
+
+def delete_bucket(module, gs, bucket):
+ try:
+ bucket = gs.lookup(bucket)
+ bucket_contents = bucket.list()
+ for key in bucket_contents:
+ bucket.delete_key(key.name)
+ bucket.delete()
+ return True
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+
+
+def delete_key(module, gs, bucket, obj):
+ try:
+ bucket = gs.lookup(bucket)
+ bucket.delete_key(obj)
+ module.exit_json(msg="Object deleted from bucket ", changed=True)
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+
+
+def create_dirkey(module, gs, bucket, obj):
+ try:
+ bucket = gs.lookup(bucket)
+ key = bucket.new_key(obj)
+ key.set_contents_from_string('')
+ module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket.name), changed=True)
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+
+
+def path_check(path):
+ if os.path.exists(path):
+ return True
+ else:
+ return False
+
+
+def transform_headers(headers):
+ """
+ Boto url-encodes values unless we convert the value to `str`, so doing
+ this prevents 'max-age=100000' from being converted to "max-age%3D100000".
+
+ :param headers: Headers to convert
+ :type headers: dict
+ :rtype: dict
+
+ """
+
+ for key, value in headers.items():
+ headers[key] = str(value)
+ return headers
+
+
+def upload_gsfile(module, gs, bucket, obj, src, expiry):
+ try:
+ bucket = gs.lookup(bucket)
+ key = bucket.new_key(obj)
+ key.set_contents_from_filename(
+ filename=src,
+ headers=transform_headers(module.params.get('headers'))
+ )
+ key.set_acl(module.params.get('permission'))
+ url = key.generate_url(expiry)
+ module.exit_json(msg="PUT operation complete", url=url, changed=True)
+ except gs.provider.storage_copy_error as e:
+ module.fail_json(msg=str(e))
+
+
+def download_gsfile(module, gs, bucket, obj, dest):
+ try:
+ bucket = gs.lookup(bucket)
+ key = bucket.lookup(obj)
+ key.get_contents_to_filename(dest)
+ module.exit_json(msg="GET operation complete", changed=True)
+ except gs.provider.storage_copy_error as e:
+ module.fail_json(msg=str(e))
+
+
+def download_gsstr(module, gs, bucket, obj):
+ try:
+ bucket = gs.lookup(bucket)
+ key = bucket.lookup(obj)
+ contents = key.get_contents_as_string()
+ module.exit_json(msg="GET operation complete", contents=contents, changed=True)
+ except gs.provider.storage_copy_error as e:
+ module.fail_json(msg=str(e))
+
+
+def get_download_url(module, gs, bucket, obj, expiry):
+ try:
+ bucket = gs.lookup(bucket)
+ key = bucket.lookup(obj)
+ url = key.generate_url(expiry)
+ module.exit_json(msg="Download url:", url=url, expiration=expiry, changed=True)
+ except gs.provider.storage_response_error as e:
+ module.fail_json(msg=str(e))
+
+
+def handle_get(module, gs, bucket, obj, overwrite, dest):
+ md5_remote = keysum(module, gs, bucket, obj)
+ md5_local = module.md5(dest)
+ if md5_local == md5_remote:
+ module.exit_json(changed=False)
+ if md5_local != md5_remote and not overwrite:
+ module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.", failed=True)
+ else:
+ download_gsfile(module, gs, bucket, obj, dest)
+
+
+def handle_put(module, gs, bucket, obj, overwrite, src, expiration):
+ # Lets check to see if bucket exists to get ground truth.
+ bucket_rc = bucket_check(module, gs, bucket)
+ key_rc = key_check(module, gs, bucket, obj)
+
+ # Lets check key state. Does it exist and if it does, compute the etag md5sum.
+ if bucket_rc and key_rc:
+ md5_remote = keysum(module, gs, bucket, obj)
+ md5_local = module.md5(src)
+ if md5_local == md5_remote:
+ module.exit_json(msg="Local and remote object are identical", changed=False)
+ if md5_local != md5_remote and not overwrite:
+ module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.", failed=True)
+ else:
+ upload_gsfile(module, gs, bucket, obj, src, expiration)
+
+ if not bucket_rc:
+ create_bucket(module, gs, bucket)
+ upload_gsfile(module, gs, bucket, obj, src, expiration)
+
+ # If bucket exists but key doesn't, just upload.
+ if bucket_rc and not key_rc:
+ upload_gsfile(module, gs, bucket, obj, src, expiration)
+
+
+def handle_delete(module, gs, bucket, obj):
+ if bucket and not obj:
+ if bucket_check(module, gs, bucket):
+ module.exit_json(msg="Bucket %s and all keys have been deleted." % bucket, changed=delete_bucket(module, gs, bucket))
+ else:
+ module.exit_json(msg="Bucket does not exist.", changed=False)
+ if bucket and obj:
+ if bucket_check(module, gs, bucket):
+ if key_check(module, gs, bucket, obj):
+ module.exit_json(msg="Object has been deleted.", changed=delete_key(module, gs, bucket, obj))
+ else:
+ module.exit_json(msg="Object does not exist.", changed=False)
+ else:
+ module.exit_json(msg="Bucket does not exist.", changed=False)
+ else:
+ module.fail_json(msg="Bucket or Bucket & object parameter is required.", failed=True)
+
+
+def handle_create(module, gs, bucket, obj):
+ if bucket and not obj:
+ if bucket_check(module, gs, bucket):
+ module.exit_json(msg="Bucket already exists.", changed=False)
+ else:
+ module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, gs, bucket))
+ if bucket and obj:
+ if obj.endswith('/'):
+ dirobj = obj
+ else:
+ dirobj = obj + "/"
+
+ if bucket_check(module, gs, bucket):
+ if key_check(module, gs, bucket, dirobj):
+ module.exit_json(msg="Bucket %s and key %s already exists." % (bucket, obj), changed=False)
+ else:
+ create_dirkey(module, gs, bucket, dirobj)
+ else:
+ create_bucket(module, gs, bucket)
+ create_dirkey(module, gs, bucket, dirobj)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ bucket=dict(required=True),
+ object=dict(default=None, type='path'),
+ src=dict(default=None),
+ dest=dict(default=None, type='path'),
+ expiration=dict(type='int', default=600, aliases=['expiry']),
+ mode=dict(choices=['get', 'put', 'delete', 'create', 'get_url', 'get_str'], required=True),
+ permission=dict(choices=['private', 'public-read', 'authenticated-read'], default='private'),
+ headers=dict(type='dict', default={}),
+ gs_secret_key=dict(no_log=True, required=True),
+ gs_access_key=dict(required=True),
+ overwrite=dict(default=True, type='bool', aliases=['force']),
+ region=dict(default='US', type='str'),
+ versioning=dict(default=False, type='bool')
+ ),
+ )
+
+ if not HAS_BOTO:
+ module.fail_json(msg='`boto` 2.9+ is required for this module. Try: pip install `boto` --upgrade')
+
+ bucket = module.params.get('bucket')
+ obj = module.params.get('object')
+ src = module.params.get('src')
+ dest = module.params.get('dest')
+ mode = module.params.get('mode')
+ expiry = module.params.get('expiration')
+ gs_secret_key = module.params.get('gs_secret_key')
+ gs_access_key = module.params.get('gs_access_key')
+ overwrite = module.params.get('overwrite')
+
+ if mode == 'put':
+ if not src or not object:
+ module.fail_json(msg="When using PUT, src, bucket, object are mandatory parameters")
+ if mode == 'get':
+ if not dest or not object:
+ module.fail_json(msg="When using GET, dest, bucket, object are mandatory parameters")
+
+ try:
+ gs = boto.connect_gs(gs_access_key, gs_secret_key)
+ except boto.exception.NoAuthHandlerFound as e:
+ module.fail_json(msg=str(e))
+
+ if mode == 'get':
+ if not bucket_check(module, gs, bucket) or not key_check(module, gs, bucket, obj):
+ module.fail_json(msg="Target bucket/key cannot be found", failed=True)
+ if not path_check(dest):
+ download_gsfile(module, gs, bucket, obj, dest)
+ else:
+ handle_get(module, gs, bucket, obj, overwrite, dest)
+
+ if mode == 'put':
+ if not path_check(src):
+ module.fail_json(msg="Local object for PUT does not exist", failed=True)
+ handle_put(module, gs, bucket, obj, overwrite, src, expiry)
+
+ # Support for deleting an object if we have both params.
+ if mode == 'delete':
+ handle_delete(module, gs, bucket, obj)
+
+ if mode == 'create':
+ handle_create(module, gs, bucket, obj)
+
+ if mode == 'get_url':
+ if bucket and obj:
+ if bucket_check(module, gs, bucket) and key_check(module, gs, bucket, obj):
+ get_download_url(module, gs, bucket, obj, expiry)
+ else:
+ module.fail_json(msg="Key/Bucket does not exist", failed=True)
+ else:
+ module.fail_json(msg="Bucket and Object parameters must be set", failed=True)
+
+ # --------------------------- Get the String contents of an Object -------------------------
+ if mode == 'get_str':
+ if bucket and obj:
+ if bucket_check(module, gs, bucket) and key_check(module, gs, bucket, obj):
+ download_gsstr(module, gs, bucket, obj)
+ else:
+ module.fail_json(msg="Key/Bucket does not exist", failed=True)
+ else:
+ module.fail_json(msg="Bucket and Object parameters must be set", failed=True)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcdns_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcdns_record.py
new file mode 100644
index 00000000..b97377b2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcdns_record.py
@@ -0,0 +1,780 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2015 CallFire Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+################################################################################
+# Documentation
+################################################################################
+
+DOCUMENTATION = '''
+---
+module: gcdns_record
+short_description: Creates or removes resource records in Google Cloud DNS
+description:
+ - Creates or removes resource records in Google Cloud DNS.
+author: "William Albert (@walbert947)"
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.19.0"
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.12
+ why: Updated modules released with increased functionality
+ alternative: Use M(google.cloud.gcp_dns_resource_record_set) instead.
+options:
+ state:
+ type: str
+ description:
+ - Whether the given resource record should or should not be present.
+ choices: ["present", "absent"]
+ default: "present"
+ record:
+ type: str
+ description:
+ - The fully-qualified domain name of the resource record.
+ required: true
+ aliases: ['name']
+ zone:
+ type: str
+ description:
+ - The DNS domain name of the zone (e.g., example.com).
+ - One of either I(zone) or I(zone_id) must be specified as an
+ option, or the module will fail.
+ - If both I(zone) and I(zone_id) are specified, I(zone_id) will be
+ used.
+ zone_id:
+ type: str
+ description:
+ - The Google Cloud ID of the zone (e.g., example-com).
+ - One of either I(zone) or I(zone_id) must be specified as an
+ option, or the module will fail.
+ - These usually take the form of domain names with the dots replaced
+ with dashes. A zone ID will never have any dots in it.
+ - I(zone_id) can be faster than I(zone) in projects with a large
+ number of zones.
+ - If both I(zone) and I(zone_id) are specified, I(zone_id) will be
+ used.
+ type:
+ type: str
+ description:
+ - The type of resource record to add.
+ required: true
+ choices: [ 'A', 'AAAA', 'CNAME', 'SRV', 'TXT', 'SOA', 'NS', 'MX', 'SPF', 'PTR' ]
+ record_data:
+ type: list
+ description:
+ - The record_data to use for the resource record.
+ - I(record_data) must be specified if I(state) is C(present) or
+ I(overwrite) is C(True), or the module will fail.
+ - Valid record_data vary based on the record's I(type). In addition,
+ resource records that contain a DNS domain name in the value
+ field (e.g., CNAME, PTR, SRV, .etc) MUST include a trailing dot
+ in the value.
+ - Individual string record_data for TXT records must be enclosed in
+ double quotes.
+ - For resource records that have the same name but different
+ record_data (e.g., multiple A records), they must be defined as
+ multiple list entries in a single record.
+ required: false
+ aliases: ['value']
+ ttl:
+ type: int
+ description:
+ - The amount of time in seconds that a resource record will remain
+ cached by a caching resolver.
+ default: 300
+ overwrite:
+ description:
+ - Whether an attempt to overwrite an existing record should succeed
+ or fail. The behavior of this option depends on I(state).
+ - If I(state) is C(present) and I(overwrite) is C(True), this
+ module will replace an existing resource record of the same name
+ with the provided I(record_data). If I(state) is C(present) and
+ I(overwrite) is C(False), this module will fail if there is an
+ existing resource record with the same name and type, but
+ different resource data.
+ - If I(state) is C(absent) and I(overwrite) is C(True), this
+ module will remove the given resource record unconditionally.
+ If I(state) is C(absent) and I(overwrite) is C(False), this
+ module will fail if the provided record_data do not match exactly
+ with the existing resource record's record_data.
+ type: bool
+ default: 'no'
+ service_account_email:
+ type: str
+ description:
+ - The e-mail address for a service account with access to Google
+ Cloud DNS.
+ pem_file:
+ type: path
+ description:
+ - The path to the PEM file associated with the service account
+ email.
+ - This option is deprecated and may be removed in a future release.
+ Use I(credentials_file) instead.
+ credentials_file:
+ type: path
+ description:
+ - The path to the JSON file associated with the service account
+ email.
+ project_id:
+ type: str
+ description:
+ - The Google Cloud Platform project ID to use.
+notes:
+ - See also M(community.general.gcdns_zone).
+ - This modules's underlying library does not support in-place updates for
+ DNS resource records. Instead, resource records are quickly deleted and
+ recreated.
+ - SOA records are technically supported, but their functionality is limited
+ to verifying that a zone's existing SOA record matches a pre-determined
+ value. The SOA record cannot be updated.
+ - Root NS records cannot be updated.
+ - NAPTR records are not supported.
+'''
+
+EXAMPLES = '''
+- name: Create an A record
+ community.general.gcdns_record:
+ record: 'www1.example.com'
+ zone: 'example.com'
+ type: A
+ value: '1.2.3.4'
+
+- name: Update an existing record
+ community.general.gcdns_record:
+ record: 'www1.example.com'
+ zone: 'example.com'
+ type: A
+ overwrite: true
+ value: '5.6.7.8'
+
+- name: Remove an A record
+ community.general.gcdns_record:
+ record: 'www1.example.com'
+ zone_id: 'example-com'
+ state: absent
+ type: A
+ value: '5.6.7.8'
+
+- name: Create a CNAME record. Note the trailing dot of value
+ community.general.gcdns_record:
+ record: 'www.example.com'
+ zone_id: 'example-com'
+ type: CNAME
+ value: 'www.example.com.'
+
+- name: Create an MX record with a custom TTL. Note the trailing dot of value
+ community.general.gcdns_record:
+ record: 'example.com'
+ zone: 'example.com'
+ type: MX
+ ttl: 3600
+ value: '10 mail.example.com.'
+
+- name: Create multiple A records with the same name
+ community.general.gcdns_record:
+ record: 'api.example.com'
+ zone_id: 'example-com'
+ type: A
+ record_data:
+ - '192.0.2.23'
+ - '10.4.5.6'
+ - '198.51.100.5'
+ - '203.0.113.10'
+
+- name: Change the value of an existing record with multiple record_data
+ community.general.gcdns_record:
+ record: 'api.example.com'
+ zone: 'example.com'
+ type: A
+ overwrite: true
+ record_data: # WARNING: All values in a record will be replaced
+ - '192.0.2.23'
+ - '192.0.2.42' # The changed record
+ - '198.51.100.5'
+ - '203.0.113.10'
+
+- name: Safely remove a multi-line record
+ community.general.gcdns_record:
+ record: 'api.example.com'
+ zone_id: 'example-com'
+ state: absent
+ type: A
+ record_data: # NOTE: All of the values must match exactly
+ - '192.0.2.23'
+ - '192.0.2.42'
+ - '198.51.100.5'
+ - '203.0.113.10'
+
+- name: Unconditionally remove a record
+ community.general.gcdns_record:
+ record: 'api.example.com'
+ zone_id: 'example-com'
+ state: absent
+ overwrite: true # overwrite is true, so no values are needed
+ type: A
+
+- name: Create an AAAA record
+ community.general.gcdns_record:
+ record: 'www1.example.com'
+ zone: 'example.com'
+ type: AAAA
+ value: 'fd00:db8::1'
+
+- name: Create a PTR record
+ community.general.gcdns_record:
+ record: '10.5.168.192.in-addr.arpa'
+ zone: '5.168.192.in-addr.arpa'
+ type: PTR
+ value: 'api.example.com.' # Note the trailing dot.
+
+- name: Create an NS record
+ community.general.gcdns_record:
+ record: 'subdomain.example.com'
+ zone: 'example.com'
+ type: NS
+ ttl: 21600
+ record_data:
+ - 'ns-cloud-d1.googledomains.com.' # Note the trailing dots on values
+ - 'ns-cloud-d2.googledomains.com.'
+ - 'ns-cloud-d3.googledomains.com.'
+ - 'ns-cloud-d4.googledomains.com.'
+
+- name: Create a TXT record
+ community.general.gcdns_record:
+ record: 'example.com'
+ zone_id: 'example-com'
+ type: TXT
+ record_data:
+ - '"v=spf1 include:_spf.google.com -all"' # A single-string TXT value
+ - '"hello " "world"' # A multi-string TXT value
+'''
+
+RETURN = '''
+overwrite:
+ description: Whether to the module was allowed to overwrite the record
+ returned: success
+ type: bool
+ sample: True
+record:
+ description: Fully-qualified domain name of the resource record
+ returned: success
+ type: str
+ sample: mail.example.com.
+state:
+ description: Whether the record is present or absent
+ returned: success
+ type: str
+ sample: present
+ttl:
+ description: The time-to-live of the resource record
+ returned: success
+ type: int
+ sample: 300
+type:
+ description: The type of the resource record
+ returned: success
+ type: str
+ sample: A
+record_data:
+ description: The resource record values
+ returned: success
+ type: list
+ sample: ['5.6.7.8', '9.10.11.12']
+zone:
+ description: The dns name of the zone
+ returned: success
+ type: str
+ sample: example.com.
+zone_id:
+ description: The Google Cloud DNS ID of the zone
+ returned: success
+ type: str
+ sample: example-com
+'''
+
+
+################################################################################
+# Imports
+################################################################################
+
+import socket
+from distutils.version import LooseVersion
+
+try:
+ from libcloud import __version__ as LIBCLOUD_VERSION
+ from libcloud.common.google import InvalidRequestError
+ from libcloud.common.types import LibcloudError
+ from libcloud.dns.types import Provider
+ from libcloud.dns.types import RecordDoesNotExistError
+ from libcloud.dns.types import ZoneDoesNotExistError
+ HAS_LIBCLOUD = True
+ # The libcloud Google Cloud DNS provider.
+ PROVIDER = Provider.GOOGLE
+except ImportError:
+ HAS_LIBCLOUD = False
+ PROVIDER = None
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcdns import gcdns_connect
+
+
+################################################################################
+# Constants
+################################################################################
+
+# Apache libcloud 0.19.0 was the first to contain the non-beta Google Cloud DNS
+# v1 API. Earlier versions contained the beta v1 API, which has since been
+# deprecated and decommissioned.
+MINIMUM_LIBCLOUD_VERSION = '0.19.0'
+
+# The records that libcloud's Google Cloud DNS provider supports.
+#
+# Libcloud has a RECORD_TYPE_MAP dictionary in the provider that also contains
+# this information and is the authoritative source on which records are
+# supported, but accessing the dictionary requires creating a Google Cloud DNS
+# driver object, which is done in a helper module.
+#
+# I'm hard-coding the supported record types here, because they (hopefully!)
+# shouldn't change much, and it allows me to use it as a "choices" parameter
+# in an AnsibleModule argument_spec.
+SUPPORTED_RECORD_TYPES = ['A', 'AAAA', 'CNAME', 'SRV', 'TXT', 'SOA', 'NS', 'MX', 'SPF', 'PTR']
+
+
+################################################################################
+# Functions
+################################################################################
+
+def create_record(module, gcdns, zone, record):
+ """Creates or overwrites a resource record."""
+
+ overwrite = module.boolean(module.params['overwrite'])
+ record_name = module.params['record']
+ record_type = module.params['type']
+ ttl = module.params['ttl']
+ record_data = module.params['record_data']
+ data = dict(ttl=ttl, rrdatas=record_data)
+
+ # Google Cloud DNS wants the trailing dot on all DNS names.
+ if record_name[-1] != '.':
+ record_name = record_name + '.'
+
+ # If we found a record, we need to check if the values match.
+ if record is not None:
+ # If the record matches, we obviously don't have to change anything.
+ if _records_match(record.data['ttl'], record.data['rrdatas'], ttl, record_data):
+ return False
+
+ # The record doesn't match, so we need to check if we can overwrite it.
+ if not overwrite:
+ module.fail_json(
+ msg='cannot overwrite existing record, overwrite protection enabled',
+ changed=False
+ )
+
+ # The record either doesn't exist, or it exists and we can overwrite it.
+ if record is None and not module.check_mode:
+ # There's no existing record, so we'll just create it.
+ try:
+ gcdns.create_record(record_name, zone, record_type, data)
+ except InvalidRequestError as error:
+ if error.code == 'invalid':
+ # The resource record name and type are valid by themselves, but
+ # not when combined (e.g., an 'A' record with "www.example.com"
+ # as its value).
+ module.fail_json(
+ msg='value is invalid for the given type: ' +
+ "%s, got value: %s" % (record_type, record_data),
+ changed=False
+ )
+
+ elif error.code == 'cnameResourceRecordSetConflict':
+ # We're attempting to create a CNAME resource record when we
+ # already have another type of resource record with the name
+ # domain name.
+ module.fail_json(
+ msg="non-CNAME resource record already exists: %s" % record_name,
+ changed=False
+ )
+
+ else:
+ # The error is something else that we don't know how to handle,
+ # so we'll just re-raise the exception.
+ raise
+
+ elif record is not None and not module.check_mode:
+ # The Google provider in libcloud doesn't support updating a record in
+ # place, so if the record already exists, we need to delete it and
+ # recreate it using the new information.
+ gcdns.delete_record(record)
+
+ try:
+ gcdns.create_record(record_name, zone, record_type, data)
+ except InvalidRequestError:
+ # Something blew up when creating the record. This will usually be a
+ # result of invalid value data in the new record. Unfortunately, we
+ # already changed the state of the record by deleting the old one,
+ # so we'll try to roll back before failing out.
+ try:
+ gcdns.create_record(record.name, record.zone, record.type, record.data)
+ module.fail_json(
+ msg='error updating record, the original record was restored',
+ changed=False
+ )
+ except LibcloudError:
+ # We deleted the old record, couldn't create the new record, and
+ # couldn't roll back. That really sucks. We'll dump the original
+ # record to the failure output so the user can restore it if
+ # necessary.
+ module.fail_json(
+ msg='error updating record, and could not restore original record, ' +
+ "original name: %s " % record.name +
+ "original zone: %s " % record.zone +
+ "original type: %s " % record.type +
+ "original data: %s" % record.data,
+ changed=True)
+
+ return True
+
+
+def remove_record(module, gcdns, record):
+ """Remove a resource record."""
+
+ overwrite = module.boolean(module.params['overwrite'])
+ ttl = module.params['ttl']
+ record_data = module.params['record_data']
+
+ # If there is no record, we're obviously done.
+ if record is None:
+ return False
+
+ # If there is an existing record, do our values match the values of the
+ # existing record?
+ if not overwrite:
+ if not _records_match(record.data['ttl'], record.data['rrdatas'], ttl, record_data):
+ module.fail_json(
+ msg='cannot delete due to non-matching ttl or record_data: ' +
+ "ttl: %d, record_data: %s " % (ttl, record_data) +
+ "original ttl: %d, original record_data: %s" % (record.data['ttl'], record.data['rrdatas']),
+ changed=False
+ )
+
+ # If we got to this point, we're okay to delete the record.
+ if not module.check_mode:
+ gcdns.delete_record(record)
+
+ return True
+
+
+def _get_record(gcdns, zone, record_type, record_name):
+ """Gets the record object for a given FQDN."""
+
+ # The record ID is a combination of its type and FQDN. For example, the
+ # ID of an A record for www.example.com would be 'A:www.example.com.'
+ record_id = "%s:%s" % (record_type, record_name)
+
+ try:
+ return gcdns.get_record(zone.id, record_id)
+ except RecordDoesNotExistError:
+ return None
+
+
+def _get_zone(gcdns, zone_name, zone_id):
+ """Gets the zone object for a given domain name."""
+
+ if zone_id is not None:
+ try:
+ return gcdns.get_zone(zone_id)
+ except ZoneDoesNotExistError:
+ return None
+
+ # To create a zone, we need to supply a domain name. However, to delete a
+ # zone, we need to supply a zone ID. Zone ID's are often based on domain
+ # names, but that's not guaranteed, so we'll iterate through the list of
+ # zones to see if we can find a matching domain name.
+ available_zones = gcdns.iterate_zones()
+ found_zone = None
+
+ for zone in available_zones:
+ if zone.domain == zone_name:
+ found_zone = zone
+ break
+
+ return found_zone
+
+
+def _records_match(old_ttl, old_record_data, new_ttl, new_record_data):
+ """Checks to see if original and new TTL and values match."""
+
+ matches = True
+
+ if old_ttl != new_ttl:
+ matches = False
+ if old_record_data != new_record_data:
+ matches = False
+
+ return matches
+
+
+def _sanity_check(module):
+ """Run sanity checks that don't depend on info from the zone/record."""
+
+ overwrite = module.params['overwrite']
+ record_name = module.params['record']
+ record_type = module.params['type']
+ state = module.params['state']
+ ttl = module.params['ttl']
+ record_data = module.params['record_data']
+
+ # Apache libcloud needs to be installed and at least the minimum version.
+ if not HAS_LIBCLOUD:
+ module.fail_json(
+ msg='This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
+ changed=False
+ )
+ elif LooseVersion(LIBCLOUD_VERSION) < MINIMUM_LIBCLOUD_VERSION:
+ module.fail_json(
+ msg='This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
+ changed=False
+ )
+
+ # A negative TTL is not permitted (how would they even work?!).
+ if ttl < 0:
+ module.fail_json(
+ msg='TTL cannot be less than zero, got: %d' % ttl,
+ changed=False
+ )
+
+ # Deleting SOA records is not permitted.
+ if record_type == 'SOA' and state == 'absent':
+ module.fail_json(msg='cannot delete SOA records', changed=False)
+
+ # Updating SOA records is not permitted.
+ if record_type == 'SOA' and state == 'present' and overwrite:
+ module.fail_json(msg='cannot update SOA records', changed=False)
+
+ # Some sanity checks depend on what value was supplied.
+ if record_data is not None and (state == 'present' or not overwrite):
+ # A records must contain valid IPv4 addresses.
+ if record_type == 'A':
+ for value in record_data:
+ try:
+ socket.inet_aton(value)
+ except socket.error:
+ module.fail_json(
+ msg='invalid A record value, got: %s' % value,
+ changed=False
+ )
+
+ # AAAA records must contain valid IPv6 addresses.
+ if record_type == 'AAAA':
+ for value in record_data:
+ try:
+ socket.inet_pton(socket.AF_INET6, value)
+ except socket.error:
+ module.fail_json(
+ msg='invalid AAAA record value, got: %s' % value,
+ changed=False
+ )
+
+ # CNAME and SOA records can't have multiple values.
+ if record_type in ['CNAME', 'SOA'] and len(record_data) > 1:
+ module.fail_json(
+ msg='CNAME or SOA records cannot have more than one value, ' +
+ "got: %s" % record_data,
+ changed=False
+ )
+
+ # Google Cloud DNS does not support wildcard NS records.
+ if record_type == 'NS' and record_name[0] == '*':
+ module.fail_json(
+ msg="wildcard NS records not allowed, got: %s" % record_name,
+ changed=False
+ )
+
+ # Values for txt records must begin and end with a double quote.
+ if record_type == 'TXT':
+ for value in record_data:
+ if value[0] != '"' and value[-1] != '"':
+ module.fail_json(
+ msg='TXT record_data must be enclosed in double quotes, ' +
+ 'got: %s' % value,
+ changed=False
+ )
+
+
+def _additional_sanity_checks(module, zone):
+ """Run input sanity checks that depend on info from the zone/record."""
+
+ overwrite = module.params['overwrite']
+ record_name = module.params['record']
+ record_type = module.params['type']
+ state = module.params['state']
+
+ # CNAME records are not allowed to have the same name as the root domain.
+ if record_type == 'CNAME' and record_name == zone.domain:
+ module.fail_json(
+ msg='CNAME records cannot match the zone name',
+ changed=False
+ )
+
+ # The root domain must always have an NS record.
+ if record_type == 'NS' and record_name == zone.domain and state == 'absent':
+ module.fail_json(
+ msg='cannot delete root NS records',
+ changed=False
+ )
+
+ # Updating NS records with the name as the root domain is not allowed
+ # because libcloud does not support in-place updates and root domain NS
+ # records cannot be removed.
+ if record_type == 'NS' and record_name == zone.domain and overwrite:
+ module.fail_json(
+ msg='cannot update existing root NS records',
+ changed=False
+ )
+
+ # SOA records with names that don't match the root domain are not permitted
+ # (and wouldn't make sense anyway).
+ if record_type == 'SOA' and record_name != zone.domain:
+ module.fail_json(
+ msg='non-root SOA records are not permitted, got: %s' % record_name,
+ changed=False
+ )
+
+
+################################################################################
+# Main
+################################################################################
+
+def main():
+ """Main function"""
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'], type='str'),
+ record=dict(required=True, aliases=['name'], type='str'),
+ zone=dict(type='str'),
+ zone_id=dict(type='str'),
+ type=dict(required=True, choices=SUPPORTED_RECORD_TYPES, type='str'),
+ record_data=dict(aliases=['value'], type='list'),
+ ttl=dict(default=300, type='int'),
+ overwrite=dict(default=False, type='bool'),
+ service_account_email=dict(type='str'),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ project_id=dict(type='str')
+ ),
+ required_if=[
+ ('state', 'present', ['record_data']),
+ ('overwrite', False, ['record_data'])
+ ],
+ required_one_of=[['zone', 'zone_id']],
+ supports_check_mode=True
+ )
+
+ _sanity_check(module)
+
+ record_name = module.params['record']
+ record_type = module.params['type']
+ state = module.params['state']
+ ttl = module.params['ttl']
+ zone_name = module.params['zone']
+ zone_id = module.params['zone_id']
+
+ json_output = dict(
+ state=state,
+ record=record_name,
+ zone=zone_name,
+ zone_id=zone_id,
+ type=record_type,
+ record_data=module.params['record_data'],
+ ttl=ttl,
+ overwrite=module.boolean(module.params['overwrite'])
+ )
+
+ # Google Cloud DNS wants the trailing dot on all DNS names.
+ if zone_name is not None and zone_name[-1] != '.':
+ zone_name = zone_name + '.'
+ if record_name[-1] != '.':
+ record_name = record_name + '.'
+
+ # Build a connection object that we can use to connect with Google Cloud
+ # DNS.
+ gcdns = gcdns_connect(module, provider=PROVIDER)
+
+ # We need to check that the zone we're creating a record for actually
+ # exists.
+ zone = _get_zone(gcdns, zone_name, zone_id)
+ if zone is None and zone_name is not None:
+ module.fail_json(
+ msg='zone name was not found: %s' % zone_name,
+ changed=False
+ )
+ elif zone is None and zone_id is not None:
+ module.fail_json(
+ msg='zone id was not found: %s' % zone_id,
+ changed=False
+ )
+
+ # Populate the returns with the actual zone information.
+ json_output['zone'] = zone.domain
+ json_output['zone_id'] = zone.id
+
+ # We also need to check if the record we want to create or remove actually
+ # exists.
+ try:
+ record = _get_record(gcdns, zone, record_type, record_name)
+ except InvalidRequestError:
+ # We gave Google Cloud DNS an invalid DNS record name.
+ module.fail_json(
+ msg='record name is invalid: %s' % record_name,
+ changed=False
+ )
+
+ _additional_sanity_checks(module, zone)
+
+ diff = dict()
+
+ # Build the 'before' diff
+ if record is None:
+ diff['before'] = ''
+ diff['before_header'] = '<absent>'
+ else:
+ diff['before'] = dict(
+ record=record.data['name'],
+ type=record.data['type'],
+ record_data=record.data['rrdatas'],
+ ttl=record.data['ttl']
+ )
+ diff['before_header'] = "%s:%s" % (record_type, record_name)
+
+ # Create, remove, or modify the record.
+ if state == 'present':
+ diff['after'] = dict(
+ record=record_name,
+ type=record_type,
+ record_data=module.params['record_data'],
+ ttl=ttl
+ )
+ diff['after_header'] = "%s:%s" % (record_type, record_name)
+
+ changed = create_record(module, gcdns, zone, record)
+
+ elif state == 'absent':
+ diff['after'] = ''
+ diff['after_header'] = '<absent>'
+
+ changed = remove_record(module, gcdns, record)
+
+ module.exit_json(changed=changed, diff=diff, **json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcdns_zone.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcdns_zone.py
new file mode 100644
index 00000000..6f66b5fe
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcdns_zone.py
@@ -0,0 +1,372 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2015 CallFire Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+################################################################################
+# Documentation
+################################################################################
+
+DOCUMENTATION = '''
+---
+module: gcdns_zone
+short_description: Creates or removes zones in Google Cloud DNS
+description:
+ - Creates or removes managed zones in Google Cloud DNS.
+author: "William Albert (@walbert947)"
+requirements:
+ - "apache-libcloud >= 0.19.0"
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.12
+ why: Updated modules released with increased functionality
+ alternative: Use M(google.cloud.gcp_dns_managed_zone) instead.
+options:
+ state:
+ type: str
+ description:
+ - Whether the given zone should or should not be present.
+ choices: ["present", "absent"]
+ default: "present"
+ zone:
+ type: str
+ description:
+ - The DNS domain name of the zone.
+ - This is NOT the Google Cloud DNS zone ID (e.g., example-com). If
+ you attempt to specify a zone ID, this module will attempt to
+ create a TLD and will fail.
+ required: true
+ aliases: ['name']
+ description:
+ type: str
+ description:
+ - An arbitrary text string to use for the zone description.
+ default: ""
+ service_account_email:
+ type: str
+ description:
+ - The e-mail address for a service account with access to Google
+ Cloud DNS.
+ pem_file:
+ type: path
+ description:
+ - The path to the PEM file associated with the service account
+ email.
+ - This option is deprecated and may be removed in a future release.
+ Use I(credentials_file) instead.
+ credentials_file:
+ type: path
+ description:
+ - The path to the JSON file associated with the service account
+ email.
+ project_id:
+ type: str
+ description:
+ - The Google Cloud Platform project ID to use.
+notes:
+ - See also M(community.general.gcdns_record).
+ - Zones that are newly created must still be set up with a domain registrar
+ before they can be used.
+'''
+
+EXAMPLES = '''
+# Basic zone creation example.
+- name: Create a basic zone with the minimum number of parameters.
+ community.general.gcdns_zone: zone=example.com
+
+# Zone removal example.
+- name: Remove a zone.
+ community.general.gcdns_zone: zone=example.com state=absent
+
+# Zone creation with description
+- name: Creating a zone with a description
+ community.general.gcdns_zone: zone=example.com description="This is an awesome zone"
+'''
+
+RETURN = '''
+description:
+ description: The zone's description
+ returned: success
+ type: str
+ sample: This is an awesome zone
+state:
+ description: Whether the zone is present or absent
+ returned: success
+ type: str
+ sample: present
+zone:
+ description: The zone's DNS name
+ returned: success
+ type: str
+ sample: example.com.
+'''
+
+
+################################################################################
+# Imports
+################################################################################
+
+from distutils.version import LooseVersion
+
+try:
+ from libcloud import __version__ as LIBCLOUD_VERSION
+ from libcloud.common.google import InvalidRequestError
+ from libcloud.common.google import ResourceExistsError
+ from libcloud.common.google import ResourceNotFoundError
+ from libcloud.dns.types import Provider
+ # The libcloud Google Cloud DNS provider.
+ PROVIDER = Provider.GOOGLE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+ PROVIDER = None
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcdns import gcdns_connect
+
+
+################################################################################
+# Constants
+################################################################################
+
+# Apache libcloud 0.19.0 was the first to contain the non-beta Google Cloud DNS
+# v1 API. Earlier versions contained the beta v1 API, which has since been
+# deprecated and decommissioned.
+MINIMUM_LIBCLOUD_VERSION = '0.19.0'
+
+# The URL used to verify ownership of a zone in Google Cloud DNS.
+ZONE_VERIFICATION_URL = 'https://www.google.com/webmasters/verification/'
+
+################################################################################
+# Functions
+################################################################################
+
+
+def create_zone(module, gcdns, zone):
+ """Creates a new Google Cloud DNS zone."""
+
+ description = module.params['description']
+ extra = dict(description=description)
+ zone_name = module.params['zone']
+
+ # Google Cloud DNS wants the trailing dot on the domain name.
+ if zone_name[-1] != '.':
+ zone_name = zone_name + '.'
+
+ # If we got a zone back, then the domain exists.
+ if zone is not None:
+ return False
+
+ # The zone doesn't exist yet.
+ try:
+ if not module.check_mode:
+ gcdns.create_zone(domain=zone_name, extra=extra)
+ return True
+
+ except ResourceExistsError:
+ # The zone already exists. We checked for this already, so either
+ # Google is lying, or someone was a ninja and created the zone
+ # within milliseconds of us checking for its existence. In any case,
+ # the zone has already been created, so we have nothing more to do.
+ return False
+
+ except InvalidRequestError as error:
+ if error.code == 'invalid':
+ # The zone name or a parameter might be completely invalid. This is
+ # typically caused by an illegal DNS name (e.g. foo..com).
+ module.fail_json(
+ msg="zone name is not a valid DNS name: %s" % zone_name,
+ changed=False
+ )
+
+ elif error.code == 'managedZoneDnsNameNotAvailable':
+ # Google Cloud DNS will refuse to create zones with certain domain
+ # names, such as TLDs, ccTLDs, or special domain names such as
+ # example.com.
+ module.fail_json(
+ msg="zone name is reserved or already in use: %s" % zone_name,
+ changed=False
+ )
+
+ elif error.code == 'verifyManagedZoneDnsNameOwnership':
+ # This domain name needs to be verified before Google will create
+ # it. This occurs when a user attempts to create a zone which shares
+ # a domain name with a zone hosted elsewhere in Google Cloud DNS.
+ module.fail_json(
+ msg="ownership of zone %s needs to be verified at %s" % (zone_name, ZONE_VERIFICATION_URL),
+ changed=False
+ )
+
+ else:
+ # The error is something else that we don't know how to handle,
+ # so we'll just re-raise the exception.
+ raise
+
+
+def remove_zone(module, gcdns, zone):
+ """Removes an existing Google Cloud DNS zone."""
+
+ # If there's no zone, then we're obviously done.
+ if zone is None:
+ return False
+
+ # An empty zone will have two resource records:
+ # 1. An NS record with a list of authoritative name servers
+ # 2. An SOA record
+ # If any additional resource records are present, Google Cloud DNS will
+ # refuse to remove the zone.
+ if len(zone.list_records()) > 2:
+ module.fail_json(
+ msg="zone is not empty and cannot be removed: %s" % zone.domain,
+ changed=False
+ )
+
+ try:
+ if not module.check_mode:
+ gcdns.delete_zone(zone)
+ return True
+
+ except ResourceNotFoundError:
+ # When we performed our check, the zone existed. It may have been
+ # deleted by something else. It's gone, so whatever.
+ return False
+
+ except InvalidRequestError as error:
+ if error.code == 'containerNotEmpty':
+ # When we performed our check, the zone existed and was empty. In
+ # the milliseconds between the check and the removal command,
+ # records were added to the zone.
+ module.fail_json(
+ msg="zone is not empty and cannot be removed: %s" % zone.domain,
+ changed=False
+ )
+
+ else:
+ # The error is something else that we don't know how to handle,
+ # so we'll just re-raise the exception.
+ raise
+
+
+def _get_zone(gcdns, zone_name):
+ """Gets the zone object for a given domain name."""
+
+ # To create a zone, we need to supply a zone name. However, to delete a
+ # zone, we need to supply a zone ID. Zone ID's are often based on zone
+ # names, but that's not guaranteed, so we'll iterate through the list of
+ # zones to see if we can find a matching name.
+ available_zones = gcdns.iterate_zones()
+ found_zone = None
+
+ for zone in available_zones:
+ if zone.domain == zone_name:
+ found_zone = zone
+ break
+
+ return found_zone
+
+
+def _sanity_check(module):
+ """Run module sanity checks."""
+
+ zone_name = module.params['zone']
+
+ # Apache libcloud needs to be installed and at least the minimum version.
+ if not HAS_LIBCLOUD:
+ module.fail_json(
+ msg='This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
+ changed=False
+ )
+ elif LooseVersion(LIBCLOUD_VERSION) < MINIMUM_LIBCLOUD_VERSION:
+ module.fail_json(
+ msg='This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
+ changed=False
+ )
+
+ # Google Cloud DNS does not support the creation of TLDs.
+ if '.' not in zone_name or len([label for label in zone_name.split('.') if label]) == 1:
+ module.fail_json(
+ msg='cannot create top-level domain: %s' % zone_name,
+ changed=False
+ )
+
+################################################################################
+# Main
+################################################################################
+
+
+def main():
+ """Main function"""
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'], type='str'),
+ zone=dict(required=True, aliases=['name'], type='str'),
+ description=dict(default='', type='str'),
+ service_account_email=dict(type='str'),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ project_id=dict(type='str')
+ ),
+ supports_check_mode=True
+ )
+
+ _sanity_check(module)
+
+ zone_name = module.params['zone']
+ state = module.params['state']
+
+ # Google Cloud DNS wants the trailing dot on the domain name.
+ if zone_name[-1] != '.':
+ zone_name = zone_name + '.'
+
+ json_output = dict(
+ state=state,
+ zone=zone_name,
+ description=module.params['description']
+ )
+
+ # Build a connection object that was can use to connect with Google
+ # Cloud DNS.
+ gcdns = gcdns_connect(module, provider=PROVIDER)
+
+ # We need to check if the zone we're attempting to create already exists.
+ zone = _get_zone(gcdns, zone_name)
+
+ diff = dict()
+
+ # Build the 'before' diff
+ if zone is None:
+ diff['before'] = ''
+ diff['before_header'] = '<absent>'
+ else:
+ diff['before'] = dict(
+ zone=zone.domain,
+ description=zone.extra['description']
+ )
+ diff['before_header'] = zone_name
+
+ # Create or remove the zone.
+ if state == 'present':
+ diff['after'] = dict(
+ zone=zone_name,
+ description=module.params['description']
+ )
+ diff['after_header'] = zone_name
+
+ changed = create_zone(module, gcdns, zone)
+
+ elif state == 'absent':
+ diff['after'] = ''
+ diff['after_header'] = '<absent>'
+
+ changed = remove_zone(module, gcdns, zone)
+
+ module.exit_json(changed=changed, diff=diff, **json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce.py
new file mode 100644
index 00000000..7e658786
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce.py
@@ -0,0 +1,753 @@
+#!/usr/bin/python
+# Copyright 2013 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gce
+short_description: create or terminate GCE instances
+description:
+ - Creates or terminates Google Compute Engine (GCE) instances. See
+ U(https://cloud.google.com/compute) for an overview.
+ Full install/configuration instructions for the gce* modules can
+ be found in the comments of ansible/test/gce_tests.py.
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.12
+ why: Updated modules released with increased functionality
+ alternative: Use M(google.cloud.gcp_compute_instance) instead.
+options:
+ image:
+ type: str
+ description:
+ - image string to use for the instance (default will follow latest
+ stable debian image)
+ default: "debian-8"
+ image_family:
+ type: str
+ description:
+ - image family from which to select the image. The most recent
+ non-deprecated image in the family will be used.
+ external_projects:
+ type: list
+ description:
+ - A list of other projects (accessible with the provisioning credentials)
+ to be searched for the image.
+ instance_names:
+ type: str
+ description:
+ - a comma-separated list of instance names to create or destroy
+ machine_type:
+ type: str
+ description:
+ - machine type to use for the instance, use 'n1-standard-1' by default
+ default: "n1-standard-1"
+ metadata:
+ type: str
+ description:
+ - a hash/dictionary of custom data for the instance;
+ '{"key":"value", ...}'
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account permissions (see
+ U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create),
+ --scopes section for detailed information)
+ - >
+ Available choices are:
+ C(bigquery), C(cloud-platform), C(compute-ro), C(compute-rw),
+ C(useraccounts-ro), C(useraccounts-rw), C(datastore), C(logging-write),
+ C(monitoring), C(sql-admin), C(storage-full), C(storage-ro),
+ C(storage-rw), C(taskqueue), C(userinfo-email).
+ pem_file:
+ type: path
+ description:
+ - path to the pem file associated with the service account email
+ This option is deprecated. Use 'credentials_file'.
+ credentials_file:
+ type: path
+ description:
+ - path to the JSON file associated with the service account email
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ name:
+ type: str
+ description:
+ - either a name of a single instance or when used with 'num_instances',
+ the base name of a cluster of nodes
+ aliases: ['base_name']
+ num_instances:
+ type: int
+ description:
+ - can be used with 'name', specifies
+ the number of nodes to provision using 'name'
+ as a base name
+ network:
+ type: str
+ description:
+ - name of the network, 'default' will be used if not specified
+ default: "default"
+ subnetwork:
+ type: str
+ description:
+ - name of the subnetwork in which the instance should be created
+ persistent_boot_disk:
+ description:
+ - if set, create the instance with a persistent boot disk
+ type: bool
+ default: 'no'
+ disks:
+ type: list
+ description:
+ - a list of persistent disks to attach to the instance; a string value
+ gives the name of the disk; alternatively, a dictionary value can
+ define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry
+ will be the boot disk (which must be READ_WRITE).
+ state:
+ type: str
+ description:
+ - desired state of the resource
+ default: "present"
+ choices: ["active", "present", "absent", "deleted", "started", "stopped", "terminated"]
+ tags:
+ type: list
+ description:
+ - a comma-separated list of tags to associate with the instance
+ zone:
+ type: str
+ description:
+ - the GCE zone to use. The list of available zones is at U(https://cloud.google.com/compute/docs/regions-zones/regions-zones#available).
+ default: "us-central1-a"
+ ip_forward:
+ description:
+ - set to C(yes) if the instance can forward ip packets (useful for
+ gateways)
+ type: bool
+ default: 'no'
+ external_ip:
+ type: str
+ description:
+ - type of external ip, ephemeral by default; alternatively, a fixed gce ip or ip name can be given. Specify 'none' if no external ip is desired.
+ default: "ephemeral"
+ disk_auto_delete:
+ description:
+ - if set boot disk will be removed after instance destruction
+ type: bool
+ default: 'yes'
+ preemptible:
+ description:
+ - if set to C(yes), instances will be preemptible and time-limited.
+ (requires libcloud >= 0.20.0)
+ type: bool
+ disk_size:
+ type: int
+ description:
+ - The size of the boot disk created for this instance (in GB)
+ default: 10
+
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials,
+ >= 0.20.0 if using preemptible option"
+notes:
+ - Either I(instance_names) or I(name) is required.
+ - JSON credentials strongly preferred.
+author:
+ - Eric Johnson (@erjohnso) <erjohnso@google.com>
+ - Tom Melendez (@supertom) <supertom@google.com>
+'''
+
+EXAMPLES = '''
+# Basic provisioning example. Create a single Debian 8 instance in the
+# us-central1-a Zone of the n1-standard-1 machine type.
+# Create multiple instances by specifying multiple names, separated by
+# commas in the instance_names field
+# (e.g. my-test-instance1,my-test-instance2)
+ - community.general.gce:
+ instance_names: my-test-instance1
+ zone: us-central1-a
+ machine_type: n1-standard-1
+ image: debian-8
+ state: present
+ service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
+ credentials_file: "/path/to/your-key.json"
+ project_id: "your-project-name"
+ disk_size: 32
+
+# Create a single instance of an image from the "my-base-image" image family
+# in the us-central1-a Zone of the n1-standard-1 machine type.
+# This image family is in the "my-other-project" GCP project.
+ - community.general.gce:
+ instance_names: my-test-instance1
+ zone: us-central1-a
+ machine_type: n1-standard-1
+ image_family: my-base-image
+ external_projects:
+ - my-other-project
+ state: present
+ service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
+ credentials_file: "/path/to/your-key.json"
+ project_id: "your-project-name"
+ disk_size: 32
+
+# Create a single Debian 8 instance in the us-central1-a Zone
+# Use existing disks, custom network/subnetwork, set service account permissions
+# add tags and metadata.
+ - community.general.gce:
+ instance_names: my-test-instance
+ zone: us-central1-a
+ machine_type: n1-standard-1
+ state: present
+ metadata: '{"db":"postgres", "group":"qa", "id":500}'
+ tags:
+ - http-server
+ - my-other-tag
+ disks:
+ - name: disk-2
+ mode: READ_WRITE
+ - name: disk-3
+ mode: READ_ONLY
+ disk_auto_delete: false
+ network: foobar-network
+ subnetwork: foobar-subnetwork-1
+ preemptible: true
+ ip_forward: true
+ service_account_permissions:
+ - storage-full
+ - taskqueue
+ - bigquery
+ - https://www.googleapis.com/auth/ndev.clouddns.readwrite
+ service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
+ credentials_file: "/path/to/your-key.json"
+ project_id: "your-project-name"
+
+---
+# Example Playbook
+- name: Compute Engine Instance Examples
+ hosts: localhost
+ vars:
+ service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
+ credentials_file: "/path/to/your-key.json"
+ project_id: "your-project-name"
+ tasks:
+ - name: Create multiple instances
+ # Basic provisioning example. Create multiple Debian 8 instances in the
+ # us-central1-a Zone of n1-standard-1 machine type.
+ community.general.gce:
+ instance_names: test1,test2,test3
+ zone: us-central1-a
+ machine_type: n1-standard-1
+ image: debian-8
+ state: present
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ metadata : '{ "startup-script" : "apt-get update" }'
+ register: gce
+
+ - name: Save host data
+ ansible.builtin.add_host:
+ hostname: "{{ item.public_ip }}"
+ groupname: gce_instances_ips
+ with_items: "{{ gce.instance_data }}"
+
+ - name: Wait for SSH for instances
+ ansible.builtin.wait_for:
+ delay: 1
+ host: "{{ item.public_ip }}"
+ port: 22
+ state: started
+ timeout: 30
+ with_items: "{{ gce.instance_data }}"
+
+ - name: Configure Hosts
+ hosts: gce_instances_ips
+ become: yes
+ become_method: sudo
+ roles:
+ - my-role-one
+ - my-role-two
+ tags:
+ - config
+
+ - name: Delete test-instances
+ # Basic termination of instance.
+ community.general.gce:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ instance_names: "{{ gce.instance_names }}"
+ zone: us-central1-a
+ state: absent
+ tags:
+ - delete
+'''
+
+import socket
+import logging
+
+try:
+ from ast import literal_eval
+
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ import libcloud
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
+ ResourceExistsError, ResourceInUseError, ResourceNotFoundError
+ from libcloud.compute.drivers.gce import GCEAddress
+
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gce import gce_connect, unexpected_error_msg
+from ansible_collections.community.general.plugins.module_utils.gcp import get_valid_location
+from ansible.module_utils.six.moves import reduce
+
+
+def get_instance_info(inst):
+ """Retrieves instance information from an instance object and returns it
+ as a dictionary.
+
+ """
+ metadata = {}
+ if 'metadata' in inst.extra and 'items' in inst.extra['metadata']:
+ for md in inst.extra['metadata']['items']:
+ metadata[md['key']] = md['value']
+
+ try:
+ netname = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
+ except Exception:
+ netname = None
+ try:
+ subnetname = inst.extra['networkInterfaces'][0]['subnetwork'].split('/')[-1]
+ except Exception:
+ subnetname = None
+ if 'disks' in inst.extra:
+ disk_names = [disk_info['source'].split('/')[-1]
+ for disk_info
+ in sorted(inst.extra['disks'],
+ key=lambda disk_info: disk_info['index'])]
+ else:
+ disk_names = []
+
+ if len(inst.public_ips) == 0:
+ public_ip = None
+ else:
+ public_ip = inst.public_ips[0]
+
+ return ({
+ 'image': inst.image is not None and inst.image.split('/')[-1] or None,
+ 'disks': disk_names,
+ 'machine_type': inst.size,
+ 'metadata': metadata,
+ 'name': inst.name,
+ 'network': netname,
+ 'subnetwork': subnetname,
+ 'private_ip': inst.private_ips[0],
+ 'public_ip': public_ip,
+ 'status': ('status' in inst.extra) and inst.extra['status'] or None,
+ 'tags': ('tags' in inst.extra) and inst.extra['tags'] or [],
+ 'zone': ('zone' in inst.extra) and inst.extra['zone'].name or None,
+ })
+
+
+def create_instances(module, gce, instance_names, number, lc_zone):
+ """Creates new instances. Attributes other than instance_names are picked
+ up from 'module'
+
+ module : AnsibleModule object
+ community.general.gce: authenticated GCE libcloud driver
+ instance_names: python list of instance names to create
+ number: number of instances to create
+ lc_zone: GCEZone object
+
+ Returns:
+ A list of dictionaries with instance information
+ about the instances that were launched.
+
+ """
+ image = module.params.get('image')
+ image_family = module.params.get('image_family')
+ external_projects = module.params.get('external_projects')
+ machine_type = module.params.get('machine_type')
+ metadata = module.params.get('metadata')
+ network = module.params.get('network')
+ subnetwork = module.params.get('subnetwork')
+ persistent_boot_disk = module.params.get('persistent_boot_disk')
+ disks = module.params.get('disks')
+ tags = module.params.get('tags')
+ ip_forward = module.params.get('ip_forward')
+ external_ip = module.params.get('external_ip')
+ disk_auto_delete = module.params.get('disk_auto_delete')
+ preemptible = module.params.get('preemptible')
+ disk_size = module.params.get('disk_size')
+ service_account_permissions = module.params.get('service_account_permissions')
+
+ if external_ip == "none":
+ instance_external_ip = None
+ elif external_ip != "ephemeral":
+ instance_external_ip = external_ip
+ try:
+ # check if instance_external_ip is an ip or a name
+ try:
+ socket.inet_aton(instance_external_ip)
+ instance_external_ip = GCEAddress(id='unknown', name='unknown', address=instance_external_ip, region='unknown', driver=gce)
+ except socket.error:
+ instance_external_ip = gce.ex_get_address(instance_external_ip)
+ except GoogleBaseError as e:
+ module.fail_json(msg='Unexpected error attempting to get a static ip %s, error: %s' % (external_ip, e.value))
+ else:
+ instance_external_ip = external_ip
+
+ new_instances = []
+ changed = False
+
+ lc_disks = []
+ disk_modes = []
+ for i, disk in enumerate(disks or []):
+ if isinstance(disk, dict):
+ lc_disks.append(gce.ex_get_volume(disk['name'], lc_zone))
+ disk_modes.append(disk['mode'])
+ else:
+ lc_disks.append(gce.ex_get_volume(disk, lc_zone))
+ # boot disk is implicitly READ_WRITE
+ disk_modes.append('READ_ONLY' if i > 0 else 'READ_WRITE')
+ lc_network = gce.ex_get_network(network)
+ lc_machine_type = gce.ex_get_size(machine_type, lc_zone)
+
+ # Try to convert the user's metadata value into the format expected
+ # by GCE. First try to ensure user has proper quoting of a
+ # dictionary-like syntax using 'literal_eval', then convert the python
+ # dict into a python list of 'key' / 'value' dicts. Should end up
+ # with:
+ # [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...]
+ if metadata:
+ if isinstance(metadata, dict):
+ md = metadata
+ else:
+ try:
+ md = literal_eval(str(metadata))
+ if not isinstance(md, dict):
+ raise ValueError('metadata must be a dict')
+ except ValueError as e:
+ module.fail_json(msg='bad metadata: %s' % str(e))
+ except SyntaxError as e:
+ module.fail_json(msg='bad metadata syntax')
+
+ if hasattr(libcloud, '__version__') and libcloud.__version__ < '0.15':
+ items = []
+ for k, v in md.items():
+ items.append({"key": k, "value": v})
+ metadata = {'items': items}
+ else:
+ metadata = md
+
+ lc_image = LazyDiskImage(module, gce, image, lc_disks, family=image_family, projects=external_projects)
+ ex_sa_perms = []
+ bad_perms = []
+ if service_account_permissions:
+ for perm in service_account_permissions:
+ if perm not in gce.SA_SCOPES_MAP and not perm.startswith('https://www.googleapis.com/auth'):
+ bad_perms.append(perm)
+ if len(bad_perms) > 0:
+ module.fail_json(msg='bad permissions: %s' % str(bad_perms))
+ ex_sa_perms.append({'email': "default"})
+ ex_sa_perms[0]['scopes'] = service_account_permissions
+
+ # These variables all have default values but check just in case
+ if not lc_network or not lc_machine_type or not lc_zone:
+ module.fail_json(msg='Missing required create instance variable',
+ changed=False)
+
+ gce_args = dict(
+ location=lc_zone,
+ ex_network=network, ex_tags=tags, ex_metadata=metadata,
+ ex_can_ip_forward=ip_forward,
+ external_ip=instance_external_ip, ex_disk_auto_delete=disk_auto_delete,
+ ex_service_accounts=ex_sa_perms
+ )
+ if preemptible is not None:
+ gce_args['ex_preemptible'] = preemptible
+ if subnetwork is not None:
+ gce_args['ex_subnetwork'] = subnetwork
+
+ if isinstance(instance_names, str) and not number:
+ instance_names = [instance_names]
+
+ if isinstance(instance_names, str) and number:
+ instance_responses = gce.ex_create_multiple_nodes(instance_names, lc_machine_type,
+ lc_image(), number, **gce_args)
+ for resp in instance_responses:
+ n = resp
+ if isinstance(resp, libcloud.compute.drivers.gce.GCEFailedNode):
+ try:
+ n = gce.ex_get_node(n.name, lc_zone)
+ except ResourceNotFoundError:
+ pass
+ else:
+ # Assure that at least one node has been created to set changed=True
+ changed = True
+ new_instances.append(n)
+ else:
+ for instance in instance_names:
+ pd = None
+ if lc_disks:
+ pd = lc_disks[0]
+ elif persistent_boot_disk:
+ try:
+ pd = gce.ex_get_volume("%s" % instance, lc_zone)
+ except ResourceNotFoundError:
+ pd = gce.create_volume(disk_size, "%s" % instance, image=lc_image())
+ gce_args['ex_boot_disk'] = pd
+
+ inst = None
+ try:
+ inst = gce.ex_get_node(instance, lc_zone)
+ except ResourceNotFoundError:
+ inst = gce.create_node(
+ instance, lc_machine_type, lc_image(), **gce_args
+ )
+ changed = True
+ except GoogleBaseError as e:
+ module.fail_json(msg='Unexpected error attempting to create ' +
+ 'instance %s, error: %s' % (instance, e.value))
+ if inst:
+ new_instances.append(inst)
+
+ for inst in new_instances:
+ for i, lc_disk in enumerate(lc_disks):
+ # Check whether the disk is already attached
+ if (len(inst.extra['disks']) > i):
+ attached_disk = inst.extra['disks'][i]
+ if attached_disk['source'] != lc_disk.extra['selfLink']:
+ module.fail_json(
+ msg=("Disk at index %d does not match: requested=%s found=%s" % (
+ i, lc_disk.extra['selfLink'], attached_disk['source'])))
+ elif attached_disk['mode'] != disk_modes[i]:
+ module.fail_json(
+ msg=("Disk at index %d is in the wrong mode: requested=%s found=%s" % (
+ i, disk_modes[i], attached_disk['mode'])))
+ else:
+ continue
+ gce.attach_volume(inst, lc_disk, ex_mode=disk_modes[i])
+ # Work around libcloud bug: attached volumes don't get added
+ # to the instance metadata. get_instance_info() only cares about
+ # source and index.
+ if len(inst.extra['disks']) != i + 1:
+ inst.extra['disks'].append(
+ {'source': lc_disk.extra['selfLink'], 'index': i})
+
+ instance_names = []
+ instance_json_data = []
+ for inst in new_instances:
+ d = get_instance_info(inst)
+ instance_names.append(d['name'])
+ instance_json_data.append(d)
+
+ return (changed, instance_json_data, instance_names)
+
+
+def change_instance_state(module, gce, instance_names, number, zone, state):
+ """Changes the state of a list of instances. For example,
+ change from started to stopped, or started to absent.
+
+ module: Ansible module object
+ community.general.gce: authenticated GCE connection object
+ instance_names: a list of instance names to terminate
+ zone: GCEZone object where the instances reside prior to termination
+ state: 'state' parameter passed into module as argument
+
+ Returns a dictionary of instance names that were changed.
+
+ """
+ changed = False
+ nodes = []
+ state_instance_names = []
+
+ if isinstance(instance_names, str) and number:
+ node_names = ['%s-%03d' % (instance_names, i) for i in range(number)]
+ elif isinstance(instance_names, str) and not number:
+ node_names = [instance_names]
+ else:
+ node_names = instance_names
+
+ for name in node_names:
+ inst = None
+ try:
+ inst = gce.ex_get_node(name, zone)
+ except ResourceNotFoundError:
+ state_instance_names.append(name)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ else:
+ nodes.append(inst)
+ state_instance_names.append(name)
+
+ if state in ['absent', 'deleted'] and number:
+ changed_nodes = gce.ex_destroy_multiple_nodes(nodes) or [False]
+ changed = reduce(lambda x, y: x or y, changed_nodes)
+ else:
+ for node in nodes:
+ if state in ['absent', 'deleted']:
+ gce.destroy_node(node)
+ changed = True
+ elif state == 'started' and node.state == libcloud.compute.types.NodeState.STOPPED:
+ gce.ex_start_node(node)
+ changed = True
+ elif state in ['stopped', 'terminated'] and node.state == libcloud.compute.types.NodeState.RUNNING:
+ gce.ex_stop_node(node)
+ changed = True
+
+ return (changed, state_instance_names)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ image=dict(default='debian-8'),
+ image_family=dict(),
+ external_projects=dict(type='list'),
+ instance_names=dict(),
+ machine_type=dict(default='n1-standard-1'),
+ metadata=dict(),
+ name=dict(aliases=['base_name']),
+ num_instances=dict(type='int'),
+ network=dict(default='default'),
+ subnetwork=dict(),
+ persistent_boot_disk=dict(type='bool', default=False),
+ disks=dict(type='list'),
+ state=dict(choices=['active', 'present', 'absent', 'deleted',
+ 'started', 'stopped', 'terminated'],
+ default='present'),
+ tags=dict(type='list'),
+ zone=dict(default='us-central1-a'),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ project_id=dict(),
+ ip_forward=dict(type='bool', default=False),
+ external_ip=dict(default='ephemeral'),
+ disk_auto_delete=dict(type='bool', default=True),
+ disk_size=dict(type='int', default=10),
+ preemptible=dict(type='bool', default=None),
+ ),
+ mutually_exclusive=[('instance_names', 'name')]
+ )
+
+ if not HAS_PYTHON26:
+ module.fail_json(msg="GCE module requires python's 'ast' module, python v2.6+")
+ if not HAS_LIBCLOUD:
+ module.fail_json(msg='libcloud with GCE support (0.17.0+) required for this module')
+
+ gce = gce_connect(module)
+
+ image = module.params.get('image')
+ image_family = module.params.get('image_family')
+ external_projects = module.params.get('external_projects')
+ instance_names = module.params.get('instance_names')
+ name = module.params.get('name')
+ number = module.params.get('num_instances')
+ subnetwork = module.params.get('subnetwork')
+ state = module.params.get('state')
+ zone = module.params.get('zone')
+ preemptible = module.params.get('preemptible')
+ changed = False
+
+ inames = None
+ if isinstance(instance_names, list):
+ inames = instance_names
+ elif isinstance(instance_names, str):
+ inames = instance_names.split(',')
+ if name:
+ inames = name
+ if not inames:
+ module.fail_json(msg='Must specify a "name" or "instance_names"',
+ changed=False)
+ if not zone:
+ module.fail_json(msg='Must specify a "zone"', changed=False)
+
+ lc_zone = get_valid_location(module, gce, zone)
+ if preemptible is not None and hasattr(libcloud, '__version__') and libcloud.__version__ < '0.20':
+ module.fail_json(msg="Apache Libcloud 0.20.0+ is required to use 'preemptible' option",
+ changed=False)
+
+ if subnetwork is not None and not hasattr(gce, 'ex_get_subnetwork'):
+ module.fail_json(msg="Apache Libcloud 1.0.0+ is required to use 'subnetwork' option",
+ changed=False)
+
+ json_output = {'zone': zone}
+ if state in ['absent', 'deleted', 'started', 'stopped', 'terminated']:
+ json_output['state'] = state
+ (changed, state_instance_names) = change_instance_state(
+ module, gce, inames, number, lc_zone, state)
+
+ # based on what user specified, return the same variable, although
+ # value could be different if an instance could not be destroyed
+ if instance_names or name and number:
+ json_output['instance_names'] = state_instance_names
+ elif name:
+ json_output['name'] = name
+
+ elif state in ['active', 'present']:
+ json_output['state'] = 'present'
+ (changed, instance_data, instance_name_list) = create_instances(
+ module, gce, inames, number, lc_zone)
+ json_output['instance_data'] = instance_data
+ if instance_names:
+ json_output['instance_names'] = instance_name_list
+ elif name:
+ json_output['name'] = name
+
+ json_output['changed'] = changed
+ module.exit_json(**json_output)
+
+
+class LazyDiskImage:
+ """
+ Object for lazy instantiation of disk image
+ gce.ex_get_image is a very expensive call, so we want to avoid calling it as much as possible.
+ """
+
+ def __init__(self, module, gce, name, has_pd, family=None, projects=None):
+ self.image = None
+ self.was_called = False
+ self.gce = gce
+ self.name = name
+ self.has_pd = has_pd
+ self.module = module
+ self.family = family
+ self.projects = projects
+
+ def __call__(self):
+ if not self.was_called:
+ self.was_called = True
+ if not self.has_pd:
+ if self.family:
+ self.image = self.gce.ex_get_image_from_family(self.family, ex_project_list=self.projects)
+ else:
+ self.image = self.gce.ex_get_image(self.name, ex_project_list=self.projects)
+ if not self.image:
+ self.module.fail_json(msg='image or disks missing for create instance', changed=False)
+ return self.image
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_eip.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_eip.py
new file mode 100644
index 00000000..b5fd4bf3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_eip.py
@@ -0,0 +1,247 @@
+#!/usr/bin/python
+# Copyright 2017 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: gce_eip
+short_description: Create or Destroy Global or Regional External IP addresses.
+description:
+ - Create (reserve) or Destroy (release) Regional or Global IP Addresses. See
+ U(https://cloud.google.com/compute/docs/configure-instance-ip-addresses#reserve_new_static) for more on reserving static addresses.
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.19.0"
+notes:
+ - Global addresses can only be used with Global Forwarding Rules.
+author:
+ - "Tom Melendez (@supertom) <tom@supertom.com>"
+options:
+ name:
+ type: str
+ description:
+ - Name of Address.
+ required: true
+ region:
+ type: str
+ description:
+ - Region to create the address in. Set to 'global' to create a global address.
+ required: true
+ state:
+ type: str
+ description: The state the address should be in. C(present) or C(absent) are the only valid options.
+ default: present
+ required: false
+ choices: [present, absent]
+ project_id:
+ type: str
+ description:
+ - The Google Cloud Platform project ID to use.
+ pem_file:
+ type: path
+ description:
+ - The path to the PEM file associated with the service account email.
+ - This option is deprecated and may be removed in a future release. Use I(credentials_file) instead.
+ credentials_file:
+ type: path
+ description:
+ - The path to the JSON file associated with the service account email.
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account permissions
+'''
+
+EXAMPLES = '''
+- name: Create a Global external IP address
+ community.general.gce_eip:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ name: my-global-ip
+ region: global
+ state: present
+
+- name: Create a Regional external IP address
+ community.general.gce_eip:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ name: my-global-ip
+ region: us-east1
+ state: present
+'''
+
+RETURN = '''
+address:
+ description: IP address being operated on
+ returned: always
+ type: str
+ sample: "35.186.222.233"
+name:
+ description: name of the address being operated on
+ returned: always
+ type: str
+ sample: "my-address"
+region:
+ description: Which region an address belongs.
+ returned: always
+ type: str
+ sample: "global"
+'''
+
+USER_AGENT_VERSION = 'v1'
+USER_AGENT_PRODUCT = 'Ansible-gce_eip'
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ import libcloud
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
+ ResourceExistsError, ResourceInUseError, ResourceNotFoundError
+ from libcloud.compute.drivers.gce import GCEAddress
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcp import gcp_connect
+
+
+def get_address(gce, name, region):
+ """
+ Get an Address from GCE.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param name: Name of the Address.
+ :type name: ``str``
+
+ :return: A GCEAddress object or None.
+ :rtype: :class: `GCEAddress` or None
+ """
+ try:
+ return gce.ex_get_address(name=name, region=region)
+
+ except ResourceNotFoundError:
+ return None
+
+
+def create_address(gce, params):
+ """
+ Create a new Address.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param params: Dictionary of parameters needed by the module.
+ :type params: ``dict``
+
+ :return: Tuple with changed status and address.
+ :rtype: tuple in the format of (bool, str)
+ """
+ changed = False
+ return_data = []
+
+ address = gce.ex_create_address(
+ name=params['name'], region=params['region'])
+
+ if address:
+ changed = True
+ return_data = address.address
+
+ return (changed, return_data)
+
+
+def delete_address(address):
+ """
+ Delete an Address.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param params: Dictionary of parameters needed by the module.
+ :type params: ``dict``
+
+ :return: Tuple with changed status and address.
+ :rtype: tuple in the format of (bool, str)
+ """
+ changed = False
+ return_data = []
+ if address.destroy():
+ changed = True
+ return_data = address.address
+ return (changed, return_data)
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ name=dict(required=True),
+ state=dict(choices=['absent', 'present'], default='present'),
+ region=dict(required=True),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ project_id=dict(), ), )
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+ if not HAS_LIBCLOUD:
+ module.fail_json(
+ msg='libcloud with GCE support (+0.19) required for this module.')
+
+ gce = gcp_connect(module, Provider.GCE, get_driver,
+ USER_AGENT_PRODUCT, USER_AGENT_VERSION)
+
+ params = {}
+ params['state'] = module.params.get('state')
+ params['name'] = module.params.get('name')
+ params['region'] = module.params.get('region')
+
+ changed = False
+ json_output = {'state': params['state']}
+ address = get_address(gce, params['name'], region=params['region'])
+
+ if params['state'] == 'absent':
+ if not address:
+ # Doesn't exist in GCE, and state==absent.
+ changed = False
+ module.fail_json(
+ msg="Cannot delete unknown address: %s" %
+ (params['name']))
+ else:
+ # Delete
+ (changed, json_output['address']) = delete_address(address)
+ else:
+ if not address:
+ # Create
+ (changed, json_output['address']) = create_address(gce,
+ params)
+ else:
+ changed = False
+ json_output['address'] = address.address
+
+ json_output['changed'] = changed
+ json_output.update(params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_img.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_img.py
new file mode 100644
index 00000000..c4705098
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_img.py
@@ -0,0 +1,211 @@
+#!/usr/bin/python
+# Copyright 2015 Google Inc. All Rights Reserved.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+"""An Ansible module to utilize GCE image resources."""
+
+DOCUMENTATION = '''
+---
+module: gce_img
+short_description: utilize GCE image resources
+description:
+ - This module can create and delete GCE private images from gzipped
+ compressed tarball containing raw disk data or from existing detached
+ disks in any zone. U(https://cloud.google.com/compute/docs/images)
+options:
+ name:
+ type: str
+ description:
+ - the name of the image to create or delete
+ required: true
+ description:
+ type: str
+ description:
+ - an optional description
+ family:
+ type: str
+ description:
+ - an optional family name
+ source:
+ type: str
+ description:
+ - the source disk or the Google Cloud Storage URI to create the image from
+ state:
+ type: str
+ description:
+ - desired state of the image
+ default: "present"
+ choices: ["present", "absent"]
+ zone:
+ type: str
+ description:
+ - the zone of the disk specified by source
+ default: "us-central1-a"
+ timeout:
+ type: int
+ description:
+ - timeout for the operation
+ default: 180
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ pem_file:
+ type: path
+ description:
+ - path to the pem file associated with the service account email
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud"
+author: "Tom Melendez (@supertom)"
+'''
+
+EXAMPLES = '''
+- name: Create an image named test-image from the disk 'test-disk' in zone us-central1-a
+ community.general.gce_img:
+ name: test-image
+ source: test-disk
+ zone: us-central1-a
+ state: present
+
+- name: Create an image named test-image from a tarball in Google Cloud Storage
+ community.general.gce_img:
+ name: test-image
+ source: https://storage.googleapis.com/bucket/path/to/image.tgz
+
+- name: Alternatively use the gs scheme
+ community.general.gce_img:
+ name: test-image
+ source: gs://bucket/path/to/image.tgz
+
+- name: Delete an image named test-image
+ community.general.gce_img:
+ name: test-image
+ state: absent
+'''
+
+
+try:
+ import libcloud
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError
+ from libcloud.common.google import ResourceExistsError
+ from libcloud.common.google import ResourceNotFoundError
+ _ = Provider.GCE
+ has_libcloud = True
+except ImportError:
+ has_libcloud = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gce import gce_connect
+
+
+GCS_URI = 'https://storage.googleapis.com/'
+
+
+def create_image(gce, name, module):
+ """Create an image with the specified name."""
+ source = module.params.get('source')
+ zone = module.params.get('zone')
+ desc = module.params.get('description')
+ timeout = module.params.get('timeout')
+ family = module.params.get('family')
+
+ if not source:
+ module.fail_json(msg='Must supply a source', changed=False)
+
+ if source.startswith(GCS_URI):
+ # source is a Google Cloud Storage URI
+ volume = source
+ elif source.startswith('gs://'):
+ # libcloud only accepts https URI.
+ volume = source.replace('gs://', GCS_URI)
+ else:
+ try:
+ volume = gce.ex_get_volume(source, zone)
+ except ResourceNotFoundError:
+ module.fail_json(msg='Disk %s not found in zone %s' % (source, zone),
+ changed=False)
+ except GoogleBaseError as e:
+ module.fail_json(msg=str(e), changed=False)
+
+ gce_extra_args = {}
+ if family is not None:
+ gce_extra_args['family'] = family
+
+ old_timeout = gce.connection.timeout
+ try:
+ gce.connection.timeout = timeout
+ gce.ex_create_image(name, volume, desc, use_existing=False, **gce_extra_args)
+ return True
+ except ResourceExistsError:
+ return False
+ except GoogleBaseError as e:
+ module.fail_json(msg=str(e), changed=False)
+ finally:
+ gce.connection.timeout = old_timeout
+
+
+def delete_image(gce, name, module):
+ """Delete a specific image resource by name."""
+ try:
+ gce.ex_delete_image(name)
+ return True
+ except ResourceNotFoundError:
+ return False
+ except GoogleBaseError as e:
+ module.fail_json(msg=str(e), changed=False)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ family=dict(),
+ description=dict(),
+ source=dict(),
+ state=dict(default='present', choices=['present', 'absent']),
+ zone=dict(default='us-central1-a'),
+ service_account_email=dict(),
+ pem_file=dict(type='path'),
+ project_id=dict(),
+ timeout=dict(type='int', default=180)
+ )
+ )
+
+ if not has_libcloud:
+ module.fail_json(msg='libcloud with GCE support is required.')
+
+ gce = gce_connect(module)
+
+ name = module.params.get('name')
+ state = module.params.get('state')
+ family = module.params.get('family')
+ changed = False
+
+ if family is not None and hasattr(libcloud, '__version__') and libcloud.__version__ <= '0.20.1':
+ module.fail_json(msg="Apache Libcloud 1.0.0+ is required to use 'family' option",
+ changed=False)
+
+ # user wants to create an image.
+ if state == 'present':
+ changed = create_image(gce, name, module)
+
+ # user wants to delete the image.
+ if state == 'absent':
+ changed = delete_image(gce, name, module)
+
+ module.exit_json(changed=changed, name=name)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_instance_template.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_instance_template.py
new file mode 100644
index 00000000..04ddacce
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_instance_template.py
@@ -0,0 +1,605 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gce_instance_template
+short_description: create or destroy instance templates of Compute Engine of GCP.
+description:
+ - Creates or destroy Google instance templates
+ of Compute Engine of Google Cloud Platform.
+options:
+ state:
+ type: str
+ description:
+ - The desired state for the instance template.
+ default: "present"
+ choices: ["present", "absent"]
+ name:
+ type: str
+ description:
+ - The name of the GCE instance template.
+ required: True
+ aliases: [base_name]
+ size:
+ type: str
+ description:
+ - The desired machine type for the instance template.
+ default: "f1-micro"
+ source:
+ type: str
+ description:
+ - A source disk to attach to the instance.
+ Cannot specify both I(image) and I(source).
+ image:
+ type: str
+ description:
+ - The image to use to create the instance.
+ Cannot specify both both I(image) and I(source).
+ image_family:
+ type: str
+ description:
+ - The image family to use to create the instance.
+ If I(image) has been used I(image_family) is ignored.
+ Cannot specify both I(image) and I(source).
+ default: debian-8
+ disk_type:
+ type: str
+ description:
+ - Specify a C(pd-standard) disk or C(pd-ssd) for an SSD disk.
+ choices:
+ - pd-standard
+ - pd-ssd
+ default: pd-standard
+ disk_auto_delete:
+ description:
+ - Indicate that the boot disk should be
+ deleted when the Node is deleted.
+ default: true
+ type: bool
+ network:
+ type: str
+ description:
+ - The network to associate with the instance.
+ default: "default"
+ subnetwork:
+ type: str
+ description:
+ - The Subnetwork resource name for this instance.
+ can_ip_forward:
+ description:
+ - Set to C(yes) to allow instance to
+ send/receive non-matching src/dst packets.
+ type: bool
+ default: 'no'
+ external_ip:
+ type: str
+ description:
+ - The external IP address to use.
+ If C(ephemeral), a new non-static address will be
+ used. If C(None), then no external address will
+ be used. To use an existing static IP address
+ specify address name.
+ default: "ephemeral"
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account permissions (see
+ U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create),
+ --scopes section for detailed information)
+ - >
+ Available choices are:
+ C(bigquery), C(cloud-platform), C(compute-ro), C(compute-rw),
+ C(useraccounts-ro), C(useraccounts-rw), C(datastore), C(logging-write),
+ C(monitoring), C(sql-admin), C(storage-full), C(storage-ro),
+ C(storage-rw), C(taskqueue), C(userinfo-email).
+ automatic_restart:
+ description:
+ - Defines whether the instance should be
+ automatically restarted when it is
+ terminated by Compute Engine.
+ type: bool
+ preemptible:
+ description:
+ - Defines whether the instance is preemptible.
+ type: bool
+ tags:
+ type: list
+ description:
+ - a comma-separated list of tags to associate with the instance
+ metadata:
+ description:
+ - a hash/dictionary of custom data for the instance;
+ '{"key":"value", ...}'
+ description:
+ type: str
+ description:
+ - description of instance template
+ disks:
+ type: list
+ description:
+ - a list of persistent disks to attach to the instance; a string value
+ gives the name of the disk; alternatively, a dictionary value can
+ define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry
+ will be the boot disk (which must be READ_WRITE).
+ nic_gce_struct:
+ type: list
+ description:
+ - Support passing in the GCE-specific
+ formatted networkInterfaces[] structure.
+ disks_gce_struct:
+ type: list
+ description:
+ - Support passing in the GCE-specific
+ formatted formatted disks[] structure. Case sensitive.
+ see U(https://cloud.google.com/compute/docs/reference/latest/instanceTemplates#resource) for detailed information
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ pem_file:
+ type: path
+ description:
+ - path to the pem file associated with the service account email
+ This option is deprecated. Use 'credentials_file'.
+ credentials_file:
+ type: path
+ description:
+ - path to the JSON file associated with the service account email
+ subnetwork_region:
+ type: str
+ description:
+ - Region that subnetwork resides in. (Required for subnetwork to successfully complete)
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials,
+ >= 0.20.0 if using preemptible option"
+notes:
+ - JSON credentials strongly preferred.
+author: "Gwenael Pellen (@GwenaelPellenArkeup) <gwenael.pellen@arkeup.com>"
+'''
+
+EXAMPLES = '''
+# Usage
+- name: Create instance template named foo
+ community.general.gce_instance_template:
+ name: foo
+ size: n1-standard-1
+ image_family: ubuntu-1604-lts
+ state: present
+ project_id: "your-project-name"
+ credentials_file: "/path/to/your-key.json"
+ service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
+
+# Example Playbook
+- name: Compute Engine Instance Template Examples
+ hosts: localhost
+ vars:
+ service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
+ credentials_file: "/path/to/your-key.json"
+ project_id: "your-project-name"
+ tasks:
+ - name: Create instance template
+ community.general.gce_instance_template:
+ name: my-test-instance-template
+ size: n1-standard-1
+ image_family: ubuntu-1604-lts
+ state: present
+ project_id: "{{ project_id }}"
+ credentials_file: "{{ credentials_file }}"
+ service_account_email: "{{ service_account_email }}"
+ - name: Delete instance template
+ community.general.gce_instance_template:
+ name: my-test-instance-template
+ size: n1-standard-1
+ image_family: ubuntu-1604-lts
+ state: absent
+ project_id: "{{ project_id }}"
+ credentials_file: "{{ credentials_file }}"
+ service_account_email: "{{ service_account_email }}"
+
+# Example playbook using disks_gce_struct
+- name: Compute Engine Instance Template Examples
+ hosts: localhost
+ vars:
+ service_account_email: "your-sa@your-project-name.iam.gserviceaccount.com"
+ credentials_file: "/path/to/your-key.json"
+ project_id: "your-project-name"
+ tasks:
+ - name: Create instance template
+ community.general.gce_instance_template:
+ name: foo
+ size: n1-standard-1
+ state: present
+ project_id: "{{ project_id }}"
+ credentials_file: "{{ credentials_file }}"
+ service_account_email: "{{ service_account_email }}"
+ disks_gce_struct:
+ - device_name: /dev/sda
+ boot: true
+ autoDelete: true
+ initializeParams:
+ diskSizeGb: 30
+ diskType: pd-ssd
+ sourceImage: projects/debian-cloud/global/images/family/debian-8
+
+'''
+
+RETURN = '''
+'''
+
+import traceback
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ import libcloud
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
+ ResourceExistsError, ResourceInUseError, ResourceNotFoundError
+ from libcloud.compute.drivers.gce import GCEAddress
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gce import gce_connect
+from ansible.module_utils._text import to_native
+
+
+def get_info(inst):
+ """Retrieves instance template information
+ """
+ return({
+ 'name': inst.name,
+ 'extra': inst.extra,
+ })
+
+
+def create_instance_template(module, gce):
+ """Create an instance template
+ module : AnsibleModule object
+ gce: authenticated GCE libcloud driver
+ Returns:
+ instance template information
+ """
+ # get info from module
+ name = module.params.get('name')
+ size = module.params.get('size')
+ source = module.params.get('source')
+ image = module.params.get('image')
+ image_family = module.params.get('image_family')
+ disk_type = module.params.get('disk_type')
+ disk_auto_delete = module.params.get('disk_auto_delete')
+ network = module.params.get('network')
+ subnetwork = module.params.get('subnetwork')
+ subnetwork_region = module.params.get('subnetwork_region')
+ can_ip_forward = module.params.get('can_ip_forward')
+ external_ip = module.params.get('external_ip')
+ service_account_permissions = module.params.get(
+ 'service_account_permissions')
+ service_account_email = module.params.get('service_account_email')
+ on_host_maintenance = module.params.get('on_host_maintenance')
+ automatic_restart = module.params.get('automatic_restart')
+ preemptible = module.params.get('preemptible')
+ tags = module.params.get('tags')
+ metadata = module.params.get('metadata')
+ description = module.params.get('description')
+ disks_gce_struct = module.params.get('disks_gce_struct')
+ changed = False
+
+ # args of ex_create_instancetemplate
+ gce_args = dict(
+ name="instance",
+ size="f1-micro",
+ source=None,
+ image=None,
+ disk_type='pd-standard',
+ disk_auto_delete=True,
+ network='default',
+ subnetwork=None,
+ can_ip_forward=None,
+ external_ip='ephemeral',
+ service_accounts=None,
+ on_host_maintenance=None,
+ automatic_restart=None,
+ preemptible=None,
+ tags=None,
+ metadata=None,
+ description=None,
+ disks_gce_struct=None,
+ nic_gce_struct=None
+ )
+
+ gce_args['name'] = name
+ gce_args['size'] = size
+
+ if source is not None:
+ gce_args['source'] = source
+
+ if image:
+ gce_args['image'] = image
+ else:
+ if image_family:
+ image = gce.ex_get_image_from_family(image_family)
+ gce_args['image'] = image
+ else:
+ gce_args['image'] = "debian-8"
+
+ gce_args['disk_type'] = disk_type
+ gce_args['disk_auto_delete'] = disk_auto_delete
+
+ gce_network = gce.ex_get_network(network)
+ gce_args['network'] = gce_network
+
+ if subnetwork is not None:
+ gce_args['subnetwork'] = gce.ex_get_subnetwork(subnetwork, region=subnetwork_region)
+
+ if can_ip_forward is not None:
+ gce_args['can_ip_forward'] = can_ip_forward
+
+ if external_ip == "ephemeral":
+ instance_external_ip = external_ip
+ elif external_ip == "none":
+ instance_external_ip = None
+ else:
+ try:
+ instance_external_ip = gce.ex_get_address(external_ip)
+ except GoogleBaseError as err:
+ # external_ip is name ?
+ instance_external_ip = external_ip
+ gce_args['external_ip'] = instance_external_ip
+
+ ex_sa_perms = []
+ bad_perms = []
+ if service_account_permissions:
+ for perm in service_account_permissions:
+ if perm not in gce.SA_SCOPES_MAP:
+ bad_perms.append(perm)
+ if len(bad_perms) > 0:
+ module.fail_json(msg='bad permissions: %s' % str(bad_perms))
+ if service_account_email is not None:
+ ex_sa_perms.append({'email': str(service_account_email)})
+ else:
+ ex_sa_perms.append({'email': "default"})
+ ex_sa_perms[0]['scopes'] = service_account_permissions
+ gce_args['service_accounts'] = ex_sa_perms
+
+ if on_host_maintenance is not None:
+ gce_args['on_host_maintenance'] = on_host_maintenance
+
+ if automatic_restart is not None:
+ gce_args['automatic_restart'] = automatic_restart
+
+ if preemptible is not None:
+ gce_args['preemptible'] = preemptible
+
+ if tags is not None:
+ gce_args['tags'] = tags
+
+ if disks_gce_struct is not None:
+ gce_args['disks_gce_struct'] = disks_gce_struct
+
+ # Try to convert the user's metadata value into the format expected
+ # by GCE. First try to ensure user has proper quoting of a
+ # dictionary-like syntax using 'literal_eval', then convert the python
+ # dict into a python list of 'key' / 'value' dicts. Should end up
+ # with:
+ # [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...]
+ if metadata:
+ if isinstance(metadata, dict):
+ md = metadata
+ else:
+ try:
+ md = literal_eval(str(metadata))
+ if not isinstance(md, dict):
+ raise ValueError('metadata must be a dict')
+ except ValueError as e:
+ module.fail_json(msg='bad metadata: %s' % str(e))
+ except SyntaxError as e:
+ module.fail_json(msg='bad metadata syntax')
+
+ if hasattr(libcloud, '__version__') and libcloud.__version__ < '0.15':
+ items = []
+ for k, v in md.items():
+ items.append({"key": k, "value": v})
+ metadata = {'items': items}
+ else:
+ metadata = md
+ gce_args['metadata'] = metadata
+
+ if description is not None:
+ gce_args['description'] = description
+
+ instance = None
+ try:
+ instance = gce.ex_get_instancetemplate(name)
+ except ResourceNotFoundError:
+ try:
+ instance = gce.ex_create_instancetemplate(**gce_args)
+ changed = True
+ except GoogleBaseError as err:
+ module.fail_json(
+ msg='Unexpected error attempting to create instance {0}, error: {1}'
+ .format(
+ instance,
+ err.value
+ )
+ )
+
+ if instance:
+ json_data = get_info(instance)
+ else:
+ module.fail_json(msg="no instance template!")
+
+ return (changed, json_data, name)
+
+
+def delete_instance_template(module, gce):
+ """ Delete instance template.
+ module : AnsibleModule object
+ gce: authenticated GCE libcloud driver
+ Returns:
+ instance template information
+ """
+ name = module.params.get('name')
+ current_state = "absent"
+ changed = False
+
+ # get instance template
+ instance = None
+ try:
+ instance = gce.ex_get_instancetemplate(name)
+ current_state = "present"
+ except GoogleBaseError as e:
+ json_data = dict(msg='instance template not exists: %s' % to_native(e),
+ exception=traceback.format_exc())
+
+ if current_state == "present":
+ rc = instance.destroy()
+ if rc:
+ changed = True
+ else:
+ module.fail_json(
+ msg='instance template destroy failed'
+ )
+
+ json_data = {}
+ return (changed, json_data, name)
+
+
+def module_controller(module, gce):
+ ''' Control module state parameter.
+ module : AnsibleModule object
+ gce: authenticated GCE libcloud driver
+ Returns:
+ nothing
+ Exit:
+ AnsibleModule object exit with json data.
+ '''
+ json_output = dict()
+ state = module.params.get("state")
+ if state == "present":
+ (changed, output, name) = create_instance_template(module, gce)
+ json_output['changed'] = changed
+ json_output['msg'] = output
+ elif state == "absent":
+ (changed, output, name) = delete_instance_template(module, gce)
+ json_output['changed'] = changed
+ json_output['msg'] = output
+
+ module.exit_json(**json_output)
+
+
+def check_if_system_state_would_be_changed(module, gce):
+ ''' check_if_system_state_would_be_changed !
+ module : AnsibleModule object
+ gce: authenticated GCE libcloud driver
+ Returns:
+ system_state changed
+ '''
+ changed = False
+ current_state = "absent"
+
+ state = module.params.get("state")
+ name = module.params.get("name")
+
+ try:
+ gce.ex_get_instancetemplate(name)
+ current_state = "present"
+ except GoogleBaseError as e:
+ module.fail_json(msg='GCE get instancetemplate problem: %s' % to_native(e),
+ exception=traceback.format_exc())
+
+ if current_state != state:
+ changed = True
+
+ if current_state == "absent":
+ if changed:
+ output = 'instance template {0} will be created'.format(name)
+ else:
+ output = 'nothing to do for instance template {0} '.format(name)
+ if current_state == "present":
+ if changed:
+ output = 'instance template {0} will be destroyed'.format(name)
+ else:
+ output = 'nothing to do for instance template {0} '.format(name)
+
+ return (changed, output)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(choices=['present', 'absent'], default='present'),
+ name=dict(required=True, aliases=['base_name']),
+ size=dict(default='f1-micro'),
+ source=dict(),
+ image=dict(),
+ image_family=dict(default='debian-8'),
+ disk_type=dict(choices=['pd-standard', 'pd-ssd'], default='pd-standard', type='str'),
+ disk_auto_delete=dict(type='bool', default=True),
+ network=dict(default='default'),
+ subnetwork=dict(),
+ can_ip_forward=dict(type='bool', default=False),
+ external_ip=dict(default='ephemeral'),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ automatic_restart=dict(type='bool', default=None),
+ preemptible=dict(type='bool', default=None),
+ tags=dict(type='list'),
+ metadata=dict(),
+ description=dict(),
+ disks=dict(type='list'),
+ nic_gce_struct=dict(type='list'),
+ project_id=dict(),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ subnetwork_region=dict(),
+ disks_gce_struct=dict(type='list')
+ ),
+ mutually_exclusive=[['source', 'image']],
+ required_one_of=[['image', 'image_family']],
+ supports_check_mode=True
+ )
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+ if not HAS_LIBCLOUD:
+ module.fail_json(
+ msg='libcloud with GCE support (0.17.0+) required for this module')
+
+ try:
+ gce = gce_connect(module)
+ except GoogleBaseError as e:
+ module.fail_json(msg='GCE Connection failed %s' % to_native(e), exception=traceback.format_exc())
+
+ if module.check_mode:
+ (changed, output) = check_if_system_state_would_be_changed(module, gce)
+ module.exit_json(
+ changed=changed,
+ msg=output
+ )
+ else:
+ module_controller(module, gce)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_labels.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_labels.py
new file mode 100644
index 00000000..dced7599
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_labels.py
@@ -0,0 +1,350 @@
+#!/usr/bin/python
+# Copyright 2017 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gce_labels
+short_description: Create, Update or Destroy GCE Labels.
+description:
+ - Create, Update or Destroy GCE Labels on instances, disks, snapshots, etc.
+ When specifying the GCE resource, users may specify the full URL for
+ the resource (its 'self_link'), or the individual parameters of the
+ resource (type, location, name). Examples for the two options can be
+ seen in the documentation.
+ See U(https://cloud.google.com/compute/docs/label-or-tag-resources) for
+ more information about GCE Labels. Labels are gradually being added to
+ more GCE resources, so this module will need to be updated as new
+ resources are added to the GCE (v1) API.
+requirements:
+ - 'python >= 2.6'
+ - 'google-api-python-client >= 1.6.2'
+ - 'google-auth >= 1.0.0'
+ - 'google-auth-httplib2 >= 0.0.2'
+notes:
+ - Labels support resources such as instances, disks, images, etc. See
+ U(https://cloud.google.com/compute/docs/labeling-resources) for the list
+ of resources available in the GCE v1 API (not alpha or beta).
+author:
+ - 'Eric Johnson (@erjohnso) <erjohnso@google.com>'
+options:
+ labels:
+ type: dict
+ description:
+ - A list of labels (key/value pairs) to add or remove for the resource.
+ required: false
+ resource_url:
+ type: str
+ description:
+ - The 'self_link' for the resource (instance, disk, snapshot, etc)
+ required: false
+ resource_type:
+ type: str
+ description:
+ - The type of resource (instances, disks, snapshots, images)
+ required: false
+ resource_location:
+ type: str
+ description:
+ - The location of resource (global, us-central1-f, etc.)
+ required: false
+ resource_name:
+ type: str
+ description:
+ - The name of resource.
+ required: false
+ state:
+ type: str
+ description: The state the labels should be in. C(present) or C(absent) are the only valid options.
+ default: present
+ required: false
+ choices: [present, absent]
+ project_id:
+ type: str
+ description:
+ - The Google Cloud Platform project ID to use.
+ pem_file:
+ type: str
+ description:
+ - The path to the PEM file associated with the service account email.
+ - This option is deprecated and may be removed in a future release. Use I(credentials_file) instead.
+ credentials_file:
+ type: str
+ description:
+ - The path to the JSON file associated with the service account email.
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account email
+'''
+
+EXAMPLES = '''
+- name: Add labels on an existing instance (using resource_url)
+ community.general.gce_labels:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ labels:
+ webserver-frontend: homepage
+ environment: test
+ experiment-name: kennedy
+ resource_url: https://www.googleapis.com/compute/beta/projects/myproject/zones/us-central1-f/instances/example-instance
+ state: present
+- name: Add labels on an image (using resource params)
+ community.general.gce_labels:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ labels:
+ webserver-frontend: homepage
+ environment: test
+ experiment-name: kennedy
+ resource_type: images
+ resource_location: global
+ resource_name: my-custom-image
+ state: present
+- name: Remove specified labels from the GCE instance
+ community.general.gce_labels:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ labels:
+ environment: prod
+ experiment-name: kennedy
+ resource_url: https://www.googleapis.com/compute/beta/projects/myproject/zones/us-central1-f/instances/example-instance
+ state: absent
+'''
+
+RETURN = '''
+labels:
+ description: List of labels that exist on the resource.
+ returned: Always.
+ type: dict
+ sample: [ { 'webserver-frontend': 'homepage', 'environment': 'test', 'environment-name': 'kennedy' } ]
+resource_url:
+ description: The 'self_link' of the GCE resource.
+ returned: Always.
+ type: str
+ sample: 'https://www.googleapis.com/compute/beta/projects/myproject/zones/us-central1-f/instances/example-instance'
+resource_type:
+ description: The type of the GCE resource.
+ returned: Always.
+ type: str
+ sample: instances
+resource_location:
+ description: The location of the GCE resource.
+ returned: Always.
+ type: str
+ sample: us-central1-f
+resource_name:
+ description: The name of the GCE resource.
+ returned: Always.
+ type: str
+ sample: my-happy-little-instance
+state:
+ description: state of the labels
+ returned: Always.
+ type: str
+ sample: present
+'''
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcp import check_params, get_google_api_client, GCPUtils
+
+
+UA_PRODUCT = 'ansible-gce_labels'
+UA_VERSION = '0.0.1'
+GCE_API_VERSION = 'v1'
+
+# TODO(all): As Labels are added to more GCE resources, this list will need to
+# be updated (along with some code changes below). The list can *only* include
+# resources from the 'v1' GCE API and will *not* work with 'beta' or 'alpha'.
+KNOWN_RESOURCES = ['instances', 'disks', 'snapshots', 'images']
+
+
+def _fetch_resource(client, module):
+ params = module.params
+ if params['resource_url']:
+ if not params['resource_url'].startswith('https://www.googleapis.com/compute'):
+ module.fail_json(
+ msg='Invalid self_link url: %s' % params['resource_url'])
+ else:
+ parts = params['resource_url'].split('/')[8:]
+ if len(parts) == 2:
+ resource_type, resource_name = parts
+ resource_location = 'global'
+ else:
+ resource_location, resource_type, resource_name = parts
+ else:
+ if not params['resource_type'] or not params['resource_location'] \
+ or not params['resource_name']:
+ module.fail_json(msg='Missing required resource params.')
+ resource_type = params['resource_type'].lower()
+ resource_name = params['resource_name'].lower()
+ resource_location = params['resource_location'].lower()
+
+ if resource_type not in KNOWN_RESOURCES:
+ module.fail_json(msg='Unsupported resource_type: %s' % resource_type)
+
+ # TODO(all): See the comment above for KNOWN_RESOURCES. As labels are
+ # added to the v1 GCE API for more resources, some minor code work will
+ # need to be added here.
+ if resource_type == 'instances':
+ resource = client.instances().get(project=params['project_id'],
+ zone=resource_location,
+ instance=resource_name).execute()
+ elif resource_type == 'disks':
+ resource = client.disks().get(project=params['project_id'],
+ zone=resource_location,
+ disk=resource_name).execute()
+ elif resource_type == 'snapshots':
+ resource = client.snapshots().get(project=params['project_id'],
+ snapshot=resource_name).execute()
+ elif resource_type == 'images':
+ resource = client.images().get(project=params['project_id'],
+ image=resource_name).execute()
+ else:
+ module.fail_json(msg='Unsupported resource type: %s' % resource_type)
+
+ return resource.get('labelFingerprint', ''), {
+ 'resource_name': resource.get('name'),
+ 'resource_url': resource.get('selfLink'),
+ 'resource_type': resource_type,
+ 'resource_location': resource_location,
+ 'labels': resource.get('labels', {})
+ }
+
+
+def _set_labels(client, new_labels, module, ri, fingerprint):
+ params = module.params
+ result = err = None
+ labels = {
+ 'labels': new_labels,
+ 'labelFingerprint': fingerprint
+ }
+
+ # TODO(all): See the comment above for KNOWN_RESOURCES. As labels are
+ # added to the v1 GCE API for more resources, some minor code work will
+ # need to be added here.
+ if ri['resource_type'] == 'instances':
+ req = client.instances().setLabels(project=params['project_id'],
+ instance=ri['resource_name'],
+ zone=ri['resource_location'],
+ body=labels)
+ elif ri['resource_type'] == 'disks':
+ req = client.disks().setLabels(project=params['project_id'],
+ zone=ri['resource_location'],
+ resource=ri['resource_name'],
+ body=labels)
+ elif ri['resource_type'] == 'snapshots':
+ req = client.snapshots().setLabels(project=params['project_id'],
+ resource=ri['resource_name'],
+ body=labels)
+ elif ri['resource_type'] == 'images':
+ req = client.images().setLabels(project=params['project_id'],
+ resource=ri['resource_name'],
+ body=labels)
+ else:
+ module.fail_json(msg='Unsupported resource type: %s' % ri['resource_type'])
+
+ # TODO(erjohnso): Once Labels goes GA, we'll be able to use the GCPUtils
+ # method to poll for the async request/operation to complete before
+ # returning. However, during 'beta', we are in an odd state where
+ # API requests must be sent to the 'compute/beta' API, but the python
+ # client library only allows for *Operations.get() requests to be
+ # sent to 'compute/v1' API. The response operation is in the 'beta'
+ # API-scope, but the client library cannot find the operation (404).
+ # result = GCPUtils.execute_api_client_req(req, client=client, raw=False)
+ # return result, err
+ result = req.execute()
+ return True, err
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(choices=['absent', 'present'], default='present'),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ pem_file=dict(),
+ credentials_file=dict(),
+ labels=dict(required=False, type='dict', default={}),
+ resource_url=dict(required=False, type='str'),
+ resource_name=dict(required=False, type='str'),
+ resource_location=dict(required=False, type='str'),
+ resource_type=dict(required=False, type='str'),
+ project_id=dict()
+ ),
+ required_together=[
+ ['resource_name', 'resource_location', 'resource_type']
+ ],
+ mutually_exclusive=[
+ ['resource_url', 'resource_name'],
+ ['resource_url', 'resource_location'],
+ ['resource_url', 'resource_type']
+ ]
+ )
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+
+ client, cparams = get_google_api_client(module, 'compute',
+ user_agent_product=UA_PRODUCT,
+ user_agent_version=UA_VERSION,
+ api_version=GCE_API_VERSION)
+
+ # Get current resource info including labelFingerprint
+ fingerprint, resource_info = _fetch_resource(client, module)
+ new_labels = resource_info['labels'].copy()
+
+ update_needed = False
+ if module.params['state'] == 'absent':
+ for k, v in module.params['labels'].items():
+ if k in new_labels:
+ if new_labels[k] == v:
+ update_needed = True
+ new_labels.pop(k, None)
+ else:
+ module.fail_json(msg="Could not remove unmatched label pair '%s':'%s'" % (k, v))
+ else:
+ for k, v in module.params['labels'].items():
+ if k not in new_labels:
+ update_needed = True
+ new_labels[k] = v
+
+ changed = False
+ json_output = {'state': module.params['state']}
+ if update_needed:
+ changed, err = _set_labels(client, new_labels, module, resource_info,
+ fingerprint)
+ json_output['changed'] = changed
+
+ # TODO(erjohnso): probably want to re-fetch the resource to return the
+ # new labelFingerprint, check that desired labels match updated labels.
+ # BUT! Will need to wait for setLabels() to hit v1 API so we can use the
+ # GCPUtils feature to poll for the operation to be complete. For now,
+ # we'll just update the output with what we have from the original
+ # state of the resource.
+ json_output.update(resource_info)
+ json_output.update(module.params)
+
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_lb.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_lb.py
new file mode 100644
index 00000000..50e26a58
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_lb.py
@@ -0,0 +1,310 @@
+#!/usr/bin/python
+# Copyright 2013 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gce_lb
+short_description: create/destroy GCE load-balancer resources
+description:
+ - This module can create and destroy Google Compute Engine C(loadbalancer)
+ and C(httphealthcheck) resources. The primary LB resource is the
+ C(load_balancer) resource and the health check parameters are all
+ prefixed with I(httphealthcheck).
+ The full documentation for Google Compute Engine load balancing is at
+ U(https://developers.google.com/compute/docs/load-balancing/). However,
+ the ansible module simplifies the configuration by following the
+ libcloud model.
+ Full install/configuration instructions for the gce* modules can
+ be found in the comments of ansible/test/gce_tests.py.
+options:
+ httphealthcheck_name:
+ type: str
+ description:
+ - the name identifier for the HTTP health check
+ httphealthcheck_port:
+ type: int
+ description:
+ - the TCP port to use for HTTP health checking
+ default: 80
+ httphealthcheck_path:
+ type: str
+ description:
+ - the url path to use for HTTP health checking
+ default: "/"
+ httphealthcheck_interval:
+ type: int
+ description:
+ - the duration in seconds between each health check request
+ default: 5
+ httphealthcheck_timeout:
+ type: int
+ description:
+ - the timeout in seconds before a request is considered a failed check
+ default: 5
+ httphealthcheck_unhealthy_count:
+ type: int
+ description:
+ - number of consecutive failed checks before marking a node unhealthy
+ default: 2
+ httphealthcheck_healthy_count:
+ type: int
+ description:
+ - number of consecutive successful checks before marking a node healthy
+ default: 2
+ httphealthcheck_host:
+ type: str
+ description:
+ - host header to pass through on HTTP check requests
+ name:
+ type: str
+ description:
+ - name of the load-balancer resource
+ protocol:
+ type: str
+ description:
+ - the protocol used for the load-balancer packet forwarding, tcp or udp
+ - "the available choices are: C(tcp) or C(udp)."
+ default: "tcp"
+ region:
+ type: str
+ description:
+ - the GCE region where the load-balancer is defined
+ external_ip:
+ type: str
+ description:
+ - the external static IPv4 (or auto-assigned) address for the LB
+ port_range:
+ type: str
+ description:
+ - the port (range) to forward, e.g. 80 or 8000-8888 defaults to all ports
+ members:
+ type: list
+ description:
+ - a list of zone/nodename pairs, e.g ['us-central1-a/www-a', ...]
+ state:
+ type: str
+ description:
+ - desired state of the LB
+ - "the available choices are: C(active), C(present), C(absent), C(deleted)."
+ default: "present"
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ pem_file:
+ type: path
+ description:
+ - path to the pem file associated with the service account email
+ This option is deprecated. Use 'credentials_file'.
+ credentials_file:
+ type: path
+ description:
+ - path to the JSON file associated with the service account email
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials"
+author: "Eric Johnson (@erjohnso) <erjohnso@google.com>"
+'''
+
+EXAMPLES = '''
+- name: Simple example of creating a new LB, adding members, and a health check
+ local_action:
+ module: gce_lb
+ name: testlb
+ region: us-central1
+ members: ["us-central1-a/www-a", "us-central1-b/www-b"]
+ httphealthcheck_name: hc
+ httphealthcheck_port: 80
+ httphealthcheck_path: "/up"
+'''
+
+try:
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.loadbalancer.types import Provider as Provider_lb
+ from libcloud.loadbalancer.providers import get_driver as get_driver_lb
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, ResourceExistsError, ResourceNotFoundError
+
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gce import USER_AGENT_PRODUCT, USER_AGENT_VERSION, gce_connect, unexpected_error_msg
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ httphealthcheck_name=dict(),
+ httphealthcheck_port=dict(default=80, type='int'),
+ httphealthcheck_path=dict(default='/'),
+ httphealthcheck_interval=dict(default=5, type='int'),
+ httphealthcheck_timeout=dict(default=5, type='int'),
+ httphealthcheck_unhealthy_count=dict(default=2, type='int'),
+ httphealthcheck_healthy_count=dict(default=2, type='int'),
+ httphealthcheck_host=dict(),
+ name=dict(),
+ protocol=dict(default='tcp'),
+ region=dict(),
+ external_ip=dict(),
+ port_range=dict(),
+ members=dict(type='list'),
+ state=dict(default='present'),
+ service_account_email=dict(),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ project_id=dict(),
+ )
+ )
+
+ if not HAS_LIBCLOUD:
+ module.fail_json(msg='libcloud with GCE support (0.13.3+) required for this module.')
+
+ gce = gce_connect(module)
+
+ httphealthcheck_name = module.params.get('httphealthcheck_name')
+ httphealthcheck_port = module.params.get('httphealthcheck_port')
+ httphealthcheck_path = module.params.get('httphealthcheck_path')
+ httphealthcheck_interval = module.params.get('httphealthcheck_interval')
+ httphealthcheck_timeout = module.params.get('httphealthcheck_timeout')
+ httphealthcheck_unhealthy_count = module.params.get('httphealthcheck_unhealthy_count')
+ httphealthcheck_healthy_count = module.params.get('httphealthcheck_healthy_count')
+ httphealthcheck_host = module.params.get('httphealthcheck_host')
+ name = module.params.get('name')
+ protocol = module.params.get('protocol')
+ region = module.params.get('region')
+ external_ip = module.params.get('external_ip')
+ port_range = module.params.get('port_range')
+ members = module.params.get('members')
+ state = module.params.get('state')
+
+ try:
+ gcelb = get_driver_lb(Provider_lb.GCE)(gce_driver=gce)
+ gcelb.connection.user_agent_append("%s/%s" % (
+ USER_AGENT_PRODUCT, USER_AGENT_VERSION))
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ changed = False
+ json_output = {'name': name, 'state': state}
+
+ if not name and not httphealthcheck_name:
+ module.fail_json(msg='Nothing to do, please specify a "name" ' + 'or "httphealthcheck_name" parameter', changed=False)
+
+ if state in ['active', 'present']:
+ # first, create the httphealthcheck if requested
+ hc = None
+ if httphealthcheck_name:
+ json_output['httphealthcheck_name'] = httphealthcheck_name
+ try:
+ hc = gcelb.ex_create_healthcheck(httphealthcheck_name,
+ host=httphealthcheck_host, path=httphealthcheck_path,
+ port=httphealthcheck_port,
+ interval=httphealthcheck_interval,
+ timeout=httphealthcheck_timeout,
+ unhealthy_threshold=httphealthcheck_unhealthy_count,
+ healthy_threshold=httphealthcheck_healthy_count)
+ changed = True
+ except ResourceExistsError:
+ hc = gce.ex_get_healthcheck(httphealthcheck_name)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ if hc is not None:
+ json_output['httphealthcheck_host'] = hc.extra['host']
+ json_output['httphealthcheck_path'] = hc.path
+ json_output['httphealthcheck_port'] = hc.port
+ json_output['httphealthcheck_interval'] = hc.interval
+ json_output['httphealthcheck_timeout'] = hc.timeout
+ json_output['httphealthcheck_unhealthy_count'] = hc.unhealthy_threshold
+ json_output['httphealthcheck_healthy_count'] = hc.healthy_threshold
+
+ # create the forwarding rule (and target pool under the hood)
+ lb = None
+ if name:
+ if not region:
+ module.fail_json(msg='Missing required region name',
+ changed=False)
+ nodes = []
+ output_nodes = []
+ json_output['name'] = name
+ # members is a python list of 'zone/inst' strings
+ if members:
+ for node in members:
+ try:
+ zone, node_name = node.split('/')
+ nodes.append(gce.ex_get_node(node_name, zone))
+ output_nodes.append(node)
+ except Exception:
+ # skip nodes that are badly formatted or don't exist
+ pass
+ try:
+ if hc is not None:
+ lb = gcelb.create_balancer(name, port_range, protocol,
+ None, nodes, ex_region=region, ex_healthchecks=[hc],
+ ex_address=external_ip)
+ else:
+ lb = gcelb.create_balancer(name, port_range, protocol,
+ None, nodes, ex_region=region, ex_address=external_ip)
+ changed = True
+ except ResourceExistsError:
+ lb = gcelb.get_balancer(name)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ if lb is not None:
+ json_output['members'] = output_nodes
+ json_output['protocol'] = protocol
+ json_output['region'] = region
+ json_output['external_ip'] = lb.ip
+ json_output['port_range'] = lb.port
+ hc_names = []
+ if 'healthchecks' in lb.extra:
+ for hc in lb.extra['healthchecks']:
+ hc_names.append(hc.name)
+ json_output['httphealthchecks'] = hc_names
+
+ if state in ['absent', 'deleted']:
+ # first, delete the load balancer (forwarding rule and target pool)
+ # if specified.
+ if name:
+ json_output['name'] = name
+ try:
+ lb = gcelb.get_balancer(name)
+ gcelb.destroy_balancer(lb)
+ changed = True
+ except ResourceNotFoundError:
+ pass
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ # destroy the health check if specified
+ if httphealthcheck_name:
+ json_output['httphealthcheck_name'] = httphealthcheck_name
+ try:
+ hc = gce.ex_get_healthcheck(httphealthcheck_name)
+ gce.ex_destroy_healthcheck(hc)
+ changed = True
+ except ResourceNotFoundError:
+ pass
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ json_output['changed'] = changed
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_mig.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_mig.py
new file mode 100644
index 00000000..42db08bc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_mig.py
@@ -0,0 +1,904 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gce_mig
+short_description: Create, Update or Destroy a Managed Instance Group (MIG).
+description:
+ - Create, Update or Destroy a Managed Instance Group (MIG). See
+ U(https://cloud.google.com/compute/docs/instance-groups) for an overview.
+ Full install/configuration instructions for the gce* modules can
+ be found in the comments of ansible/test/gce_tests.py.
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 1.2.0"
+notes:
+ - Resizing and Recreating VM are also supported.
+ - An existing instance template is required in order to create a
+ Managed Instance Group.
+author:
+ - "Tom Melendez (@supertom) <tom@supertom.com>"
+options:
+ name:
+ type: str
+ description:
+ - Name of the Managed Instance Group.
+ required: true
+ template:
+ type: str
+ description:
+ - Instance Template to be used in creating the VMs. See
+ U(https://cloud.google.com/compute/docs/instance-templates) to learn more
+ about Instance Templates. Required for creating MIGs.
+ size:
+ type: int
+ description:
+ - Size of Managed Instance Group. If MIG already exists, it will be
+ resized to the number provided here. Required for creating MIGs.
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account permissions
+ pem_file:
+ type: path
+ description:
+ - path to the pem file associated with the service account email
+ This option is deprecated. Use 'credentials_file'.
+ credentials_file:
+ type: path
+ description:
+ - Path to the JSON file associated with the service account email
+ project_id:
+ type: str
+ description:
+ - GCE project ID
+ state:
+ type: str
+ description:
+ - desired state of the resource
+ default: "present"
+ choices: ["absent", "present"]
+ zone:
+ type: str
+ description:
+ - The GCE zone to use for this Managed Instance Group.
+ required: true
+ autoscaling:
+ type: dict
+ description:
+ - A dictionary of configuration for the autoscaler. 'enabled (bool)', 'name (str)'
+ and policy.max_instances (int) are required fields if autoscaling is used. See
+ U(https://cloud.google.com/compute/docs/reference/beta/autoscalers) for more information
+ on Autoscaling.
+ named_ports:
+ type: list
+ description:
+ - Define named ports that backend services can forward data to. Format is a a list of
+ name:port dictionaries.
+ recreate_instances:
+ type: bool
+ default: no
+ description:
+ - Recreate MIG instances.
+'''
+
+EXAMPLES = '''
+# Following playbook creates, rebuilds instances, resizes and then deletes a MIG.
+# Notes:
+# - Two valid Instance Templates must exist in your GCE project in order to run
+# this playbook. Change the fields to match the templates used in your
+# project.
+# - The use of the 'pause' module is not required, it is just for convenience.
+- name: Managed Instance Group Example
+ hosts: localhost
+ gather_facts: False
+ tasks:
+ - name: Create MIG
+ community.general.gce_mig:
+ name: ansible-mig-example
+ zone: us-central1-c
+ state: present
+ size: 1
+ template: my-instance-template-1
+ named_ports:
+ - name: http
+ port: 80
+ - name: foobar
+ port: 82
+
+ - name: Pause for 30 seconds
+ ansible.builtin.pause:
+ seconds: 30
+
+ - name: Recreate MIG Instances with Instance Template change.
+ community.general.gce_mig:
+ name: ansible-mig-example
+ zone: us-central1-c
+ state: present
+ template: my-instance-template-2-small
+ recreate_instances: yes
+
+ - name: Pause for 30 seconds
+ ansible.builtin.pause:
+ seconds: 30
+
+ - name: Resize MIG
+ community.general.gce_mig:
+ name: ansible-mig-example
+ zone: us-central1-c
+ state: present
+ size: 3
+
+ - name: Update MIG with Autoscaler
+ community.general.gce_mig:
+ name: ansible-mig-example
+ zone: us-central1-c
+ state: present
+ size: 3
+ template: my-instance-template-2-small
+ recreate_instances: yes
+ autoscaling:
+ enabled: yes
+ name: my-autoscaler
+ policy:
+ min_instances: 2
+ max_instances: 5
+ cool_down_period: 37
+ cpu_utilization:
+ target: .39
+ load_balancing_utilization:
+ target: 0.4
+
+ - name: Pause for 30 seconds
+ ansible.builtin.pause:
+ seconds: 30
+
+ - name: Delete MIG
+ community.general.gce_mig:
+ name: ansible-mig-example
+ zone: us-central1-c
+ state: absent
+ autoscaling:
+ enabled: no
+ name: my-autoscaler
+'''
+RETURN = '''
+zone:
+ description: Zone in which to launch MIG.
+ returned: always
+ type: str
+ sample: "us-central1-b"
+
+template:
+ description: Instance Template to use for VMs. Must exist prior to using with MIG.
+ returned: changed
+ type: str
+ sample: "my-instance-template"
+
+name:
+ description: Name of the Managed Instance Group.
+ returned: changed
+ type: str
+ sample: "my-managed-instance-group"
+
+named_ports:
+ description: list of named ports acted upon
+ returned: when named_ports are initially set or updated
+ type: list
+ sample: [{ "name": "http", "port": 80 }, { "name": "foo", "port": 82 }]
+
+size:
+ description: Number of VMs in Managed Instance Group.
+ returned: changed
+ type: int
+ sample: 4
+
+created_instances:
+ description: Names of instances created.
+ returned: When instances are created.
+ type: list
+ sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"]
+
+deleted_instances:
+ description: Names of instances deleted.
+ returned: When instances are deleted.
+ type: list
+ sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"]
+
+resize_created_instances:
+ description: Names of instances created during resizing.
+ returned: When a resize results in the creation of instances.
+ type: list
+ sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"]
+
+resize_deleted_instances:
+ description: Names of instances deleted during resizing.
+ returned: When a resize results in the deletion of instances.
+ type: list
+ sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"]
+
+recreated_instances:
+ description: Names of instances recreated.
+ returned: When instances are recreated.
+ type: list
+ sample: ["ansible-mig-new-0k4y", "ansible-mig-new-0zk5", "ansible-mig-new-kp68"]
+
+created_autoscaler:
+ description: True if Autoscaler was attempted and created. False otherwise.
+ returned: When the creation of an Autoscaler was attempted.
+ type: bool
+ sample: true
+
+updated_autoscaler:
+ description: True if an Autoscaler update was attempted and succeeded.
+ False returned if update failed.
+ returned: When the update of an Autoscaler was attempted.
+ type: bool
+ sample: true
+
+deleted_autoscaler:
+ description: True if an Autoscaler delete attempted and succeeded.
+ False returned if delete failed.
+ returned: When the delete of an Autoscaler was attempted.
+ type: bool
+ sample: true
+
+set_named_ports:
+ description: True if the named_ports have been set
+ returned: named_ports have been set
+ type: bool
+ sample: true
+
+updated_named_ports:
+ description: True if the named_ports have been updated
+ returned: named_ports have been updated
+ type: bool
+ sample: true
+'''
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ import libcloud
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
+ ResourceExistsError, ResourceInUseError, ResourceNotFoundError
+ from libcloud.compute.drivers.gce import GCEAddress
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gce import gce_connect
+
+
+def _check_params(params, field_list):
+ """
+ Helper to validate params.
+
+ Use this in function definitions if they require specific fields
+ to be present.
+
+ :param params: structure that contains the fields
+ :type params: ``dict``
+
+ :param field_list: list of dict representing the fields
+ [{'name': str, 'required': True/False', 'type': cls}]
+ :type field_list: ``list`` of ``dict``
+
+ :return True, exits otherwise
+ :rtype: ``bool``
+ """
+ for d in field_list:
+ if not d['name'] in params:
+ if d['required'] is True:
+ return (False, "%s is required and must be of type: %s" %
+ (d['name'], str(d['type'])))
+ else:
+ if not isinstance(params[d['name']], d['type']):
+ return (False,
+ "%s must be of type: %s" % (d['name'], str(d['type'])))
+
+ return (True, '')
+
+
+def _validate_autoscaling_params(params):
+ """
+ Validate that the minimum configuration is present for autoscaling.
+
+ :param params: Ansible dictionary containing autoscaling configuration
+ It is expected that autoscaling config will be found at the
+ key 'autoscaling'.
+ :type params: ``dict``
+
+ :return: Tuple containing a boolean and a string. True if autoscaler
+ is valid, False otherwise, plus str for message.
+ :rtype: ``(``bool``, ``str``)``
+ """
+ if not params['autoscaling']:
+ # It's optional, so if not set at all, it's valid.
+ return (True, '')
+ if not isinstance(params['autoscaling'], dict):
+ return (False,
+ 'autoscaling: configuration expected to be a dictionary.')
+
+ # check first-level required fields
+ as_req_fields = [
+ {'name': 'name', 'required': True, 'type': str},
+ {'name': 'enabled', 'required': True, 'type': bool},
+ {'name': 'policy', 'required': True, 'type': dict}
+ ] # yapf: disable
+
+ (as_req_valid, as_req_msg) = _check_params(params['autoscaling'],
+ as_req_fields)
+ if not as_req_valid:
+ return (False, as_req_msg)
+
+ # check policy configuration
+ as_policy_fields = [
+ {'name': 'max_instances', 'required': True, 'type': int},
+ {'name': 'min_instances', 'required': False, 'type': int},
+ {'name': 'cool_down_period', 'required': False, 'type': int}
+ ] # yapf: disable
+
+ (as_policy_valid, as_policy_msg) = _check_params(
+ params['autoscaling']['policy'], as_policy_fields)
+ if not as_policy_valid:
+ return (False, as_policy_msg)
+
+ # TODO(supertom): check utilization fields
+
+ return (True, '')
+
+
+def _validate_named_port_params(params):
+ """
+ Validate the named ports parameters
+
+ :param params: Ansible dictionary containing named_ports configuration
+ It is expected that autoscaling config will be found at the
+ key 'named_ports'. That key should contain a list of
+ {name : port} dictionaries.
+ :type params: ``dict``
+
+ :return: Tuple containing a boolean and a string. True if params
+ are valid, False otherwise, plus str for message.
+ :rtype: ``(``bool``, ``str``)``
+ """
+ if not params['named_ports']:
+ # It's optional, so if not set at all, it's valid.
+ return (True, '')
+ if not isinstance(params['named_ports'], list):
+ return (False, 'named_ports: expected list of name:port dictionaries.')
+ req_fields = [
+ {'name': 'name', 'required': True, 'type': str},
+ {'name': 'port', 'required': True, 'type': int}
+ ] # yapf: disable
+
+ for np in params['named_ports']:
+ (valid_named_ports, np_msg) = _check_params(np, req_fields)
+ if not valid_named_ports:
+ return (False, np_msg)
+
+ return (True, '')
+
+
+def _get_instance_list(mig, field='name', filter_list=None):
+ """
+ Helper to grab field from instances response.
+
+ :param mig: Managed Instance Group Object from libcloud.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :param field: Field name in list_managed_instances response. Defaults
+ to 'name'.
+ :type field: ``str``
+
+ :param filter_list: list of 'currentAction' strings to filter on. Only
+ items that match a currentAction in this list will
+ be returned. Default is "['NONE']".
+ :type filter_list: ``list`` of ``str``
+
+ :return: List of strings from list_managed_instances response.
+ :rtype: ``list``
+ """
+ filter_list = ['NONE'] if filter_list is None else filter_list
+
+ return [x[field] for x in mig.list_managed_instances()
+ if x['currentAction'] in filter_list]
+
+
+def _gen_gce_as_policy(as_params):
+ """
+ Take Autoscaler params and generate GCE-compatible policy.
+
+ :param as_params: Dictionary in Ansible-playbook format
+ containing policy arguments.
+ :type as_params: ``dict``
+
+ :return: GCE-compatible policy dictionary
+ :rtype: ``dict``
+ """
+ asp_data = {}
+ asp_data['maxNumReplicas'] = as_params['max_instances']
+ if 'min_instances' in as_params:
+ asp_data['minNumReplicas'] = as_params['min_instances']
+ if 'cool_down_period' in as_params:
+ asp_data['coolDownPeriodSec'] = as_params['cool_down_period']
+ if 'cpu_utilization' in as_params and 'target' in as_params[
+ 'cpu_utilization']:
+ asp_data['cpuUtilization'] = {'utilizationTarget':
+ as_params['cpu_utilization']['target']}
+ if 'load_balancing_utilization' in as_params and 'target' in as_params[
+ 'load_balancing_utilization']:
+ asp_data['loadBalancingUtilization'] = {
+ 'utilizationTarget':
+ as_params['load_balancing_utilization']['target']
+ }
+
+ return asp_data
+
+
+def create_autoscaler(gce, mig, params):
+ """
+ Create a new Autoscaler for a MIG.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param mig: An initialized GCEInstanceGroupManager.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :param params: Dictionary of autoscaling parameters.
+ :type params: ``dict``
+
+ :return: Tuple with changed stats.
+ :rtype: tuple in the format of (bool, list)
+ """
+ changed = False
+ as_policy = _gen_gce_as_policy(params['policy'])
+ autoscaler = gce.ex_create_autoscaler(name=params['name'], zone=mig.zone,
+ instance_group=mig, policy=as_policy)
+ if autoscaler:
+ changed = True
+ return changed
+
+
+def update_autoscaler(gce, autoscaler, params):
+ """
+ Update an Autoscaler.
+
+ Takes an existing Autoscaler object, and updates it with
+ the supplied params before calling libcloud's update method.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param autoscaler: An initialized GCEAutoscaler.
+ :type autoscaler: :class: `GCEAutoscaler`
+
+ :param params: Dictionary of autoscaling parameters.
+ :type params: ``dict``
+
+ :return: True if changes, False otherwise.
+ :rtype: ``bool``
+ """
+ as_policy = _gen_gce_as_policy(params['policy'])
+ if autoscaler.policy != as_policy:
+ autoscaler.policy = as_policy
+ autoscaler = gce.ex_update_autoscaler(autoscaler)
+ if autoscaler:
+ return True
+ return False
+
+
+def delete_autoscaler(autoscaler):
+ """
+ Delete an Autoscaler. Does not affect MIG.
+
+ :param mig: Managed Instance Group Object from Libcloud.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :return: Tuple with changed stats and a list of affected instances.
+ :rtype: tuple in the format of (bool, list)
+ """
+ changed = False
+ if autoscaler.destroy():
+ changed = True
+ return changed
+
+
+def get_autoscaler(gce, name, zone):
+ """
+ Get an Autoscaler from GCE.
+
+ If the Autoscaler is not found, None is found.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param name: Name of the Autoscaler.
+ :type name: ``str``
+
+ :param zone: Zone that the Autoscaler is located in.
+ :type zone: ``str``
+
+ :return: A GCEAutoscaler object or None.
+ :rtype: :class: `GCEAutoscaler` or None
+ """
+ try:
+ # Does the Autoscaler already exist?
+ return gce.ex_get_autoscaler(name, zone)
+
+ except ResourceNotFoundError:
+ return None
+
+
+def create_mig(gce, params):
+ """
+ Create a new Managed Instance Group.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param params: Dictionary of parameters needed by the module.
+ :type params: ``dict``
+
+ :return: Tuple with changed stats and a list of affected instances.
+ :rtype: tuple in the format of (bool, list)
+ """
+
+ changed = False
+ return_data = []
+ actions_filter = ['CREATING']
+
+ mig = gce.ex_create_instancegroupmanager(
+ name=params['name'], size=params['size'], template=params['template'],
+ zone=params['zone'])
+
+ if mig:
+ changed = True
+ return_data = _get_instance_list(mig, filter_list=actions_filter)
+
+ return (changed, return_data)
+
+
+def delete_mig(mig):
+ """
+ Delete a Managed Instance Group. All VMs in that MIG are also deleted."
+
+ :param mig: Managed Instance Group Object from Libcloud.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :return: Tuple with changed stats and a list of affected instances.
+ :rtype: tuple in the format of (bool, list)
+ """
+ changed = False
+ return_data = []
+ actions_filter = ['NONE', 'CREATING', 'RECREATING', 'DELETING',
+ 'ABANDONING', 'RESTARTING', 'REFRESHING']
+ instance_names = _get_instance_list(mig, filter_list=actions_filter)
+ if mig.destroy():
+ changed = True
+ return_data = instance_names
+
+ return (changed, return_data)
+
+
+def recreate_instances_in_mig(mig):
+ """
+ Recreate the instances for a Managed Instance Group.
+
+ :param mig: Managed Instance Group Object from libcloud.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :return: Tuple with changed stats and a list of affected instances.
+ :rtype: tuple in the format of (bool, list)
+ """
+ changed = False
+ return_data = []
+ actions_filter = ['RECREATING']
+
+ if mig.recreate_instances():
+ changed = True
+ return_data = _get_instance_list(mig, filter_list=actions_filter)
+
+ return (changed, return_data)
+
+
+def resize_mig(mig, size):
+ """
+ Resize a Managed Instance Group.
+
+ Based on the size provided, GCE will automatically create and delete
+ VMs as needed.
+
+ :param mig: Managed Instance Group Object from libcloud.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :return: Tuple with changed stats and a list of affected instances.
+ :rtype: tuple in the format of (bool, list)
+ """
+ changed = False
+ return_data = []
+ actions_filter = ['CREATING', 'DELETING']
+
+ if mig.resize(size):
+ changed = True
+ return_data = _get_instance_list(mig, filter_list=actions_filter)
+
+ return (changed, return_data)
+
+
+def get_mig(gce, name, zone):
+ """
+ Get a Managed Instance Group from GCE.
+
+ If the MIG is not found, None is found.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param name: Name of the Managed Instance Group.
+ :type name: ``str``
+
+ :param zone: Zone that the Managed Instance Group is located in.
+ :type zone: ``str``
+
+ :return: A GCEInstanceGroupManager object or None.
+ :rtype: :class: `GCEInstanceGroupManager` or None
+ """
+ try:
+ # Does the MIG already exist?
+ return gce.ex_get_instancegroupmanager(name=name, zone=zone)
+
+ except ResourceNotFoundError:
+ return None
+
+
+def update_named_ports(mig, named_ports):
+ """
+ Set the named ports on a Managed Instance Group.
+
+ Sort the existing named ports and new. If different, update.
+ This also implicitly allows for the removal of named_por
+
+ :param mig: Managed Instance Group Object from libcloud.
+ :type mig: :class: `GCEInstanceGroupManager`
+
+ :param named_ports: list of dictionaries in the format of {'name': port}
+ :type named_ports: ``list`` of ``dict``
+
+ :return: True if successful
+ :rtype: ``bool``
+ """
+ changed = False
+ existing_ports = []
+ new_ports = []
+ if hasattr(mig.instance_group, 'named_ports'):
+ existing_ports = sorted(mig.instance_group.named_ports,
+ key=lambda x: x['name'])
+ if named_ports is not None:
+ new_ports = sorted(named_ports, key=lambda x: x['name'])
+
+ if existing_ports != new_ports:
+ if mig.instance_group.set_named_ports(named_ports):
+ changed = True
+
+ return changed
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ name=dict(required=True),
+ template=dict(),
+ recreate_instances=dict(type='bool', default=False),
+ # Do not set a default size here. For Create and some update
+ # operations, it is required and should be explicitly set.
+ # Below, we set it to the existing value if it has not been set.
+ size=dict(type='int'),
+ state=dict(choices=['absent', 'present'], default='present'),
+ zone=dict(required=True),
+ autoscaling=dict(type='dict', default=None),
+ named_ports=dict(type='list', default=None),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ project_id=dict(), ), )
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+ if not HAS_LIBCLOUD:
+ module.fail_json(
+ msg='libcloud with GCE Managed Instance Group support (1.2+) required for this module.')
+
+ gce = gce_connect(module)
+ if not hasattr(gce, 'ex_create_instancegroupmanager'):
+ module.fail_json(
+ msg='libcloud with GCE Managed Instance Group support (1.2+) required for this module.',
+ changed=False)
+
+ params = {}
+ params['state'] = module.params.get('state')
+ params['zone'] = module.params.get('zone')
+ params['name'] = module.params.get('name')
+ params['size'] = module.params.get('size')
+ params['template'] = module.params.get('template')
+ params['recreate_instances'] = module.params.get('recreate_instances')
+ params['autoscaling'] = module.params.get('autoscaling', None)
+ params['named_ports'] = module.params.get('named_ports', None)
+
+ (valid_autoscaling, as_msg) = _validate_autoscaling_params(params)
+ if not valid_autoscaling:
+ module.fail_json(msg=as_msg, changed=False)
+
+ if params['named_ports'] is not None and not hasattr(
+ gce, 'ex_instancegroup_set_named_ports'):
+ module.fail_json(
+ msg="Apache Libcloud 1.3.0+ is required to use 'named_ports' option",
+ changed=False)
+
+ (valid_named_ports, np_msg) = _validate_named_port_params(params)
+ if not valid_named_ports:
+ module.fail_json(msg=np_msg, changed=False)
+
+ changed = False
+ json_output = {'state': params['state'], 'zone': params['zone']}
+ mig = get_mig(gce, params['name'], params['zone'])
+
+ if not mig:
+ if params['state'] == 'absent':
+ # Doesn't exist in GCE, and state==absent.
+ changed = False
+ module.fail_json(
+ msg="Cannot delete unknown managed instance group: %s" %
+ (params['name']))
+ else:
+ # Create MIG
+ req_create_fields = [
+ {'name': 'template', 'required': True, 'type': str},
+ {'name': 'size', 'required': True, 'type': int}
+ ] # yapf: disable
+
+ (valid_create_fields, valid_create_msg) = _check_params(
+ params, req_create_fields)
+ if not valid_create_fields:
+ module.fail_json(msg=valid_create_msg, changed=False)
+
+ (changed, json_output['created_instances']) = create_mig(gce,
+ params)
+ if params['autoscaling'] and params['autoscaling'][
+ 'enabled'] is True:
+ # Fetch newly-created MIG and create Autoscaler for it.
+ mig = get_mig(gce, params['name'], params['zone'])
+ if not mig:
+ module.fail_json(
+ msg='Unable to fetch created MIG %s to create \
+ autoscaler in zone: %s' % (
+ params['name'], params['zone']), changed=False)
+
+ if not create_autoscaler(gce, mig, params['autoscaling']):
+ module.fail_json(
+ msg='Unable to fetch MIG %s to create autoscaler \
+ in zone: %s' % (params['name'], params['zone']),
+ changed=False)
+
+ json_output['created_autoscaler'] = True
+ # Add named ports if available
+ if params['named_ports']:
+ mig = get_mig(gce, params['name'], params['zone'])
+ if not mig:
+ module.fail_json(
+ msg='Unable to fetch created MIG %s to create \
+ autoscaler in zone: %s' % (
+ params['name'], params['zone']), changed=False)
+ json_output['set_named_ports'] = update_named_ports(
+ mig, params['named_ports'])
+ if json_output['set_named_ports']:
+ json_output['named_ports'] = params['named_ports']
+
+ elif params['state'] == 'absent':
+ # Delete MIG
+
+ # First, check and remove the autoscaler, if present.
+ # Note: multiple autoscalers can be associated to a single MIG. We
+ # only handle the one that is named, but we might want to think about this.
+ if params['autoscaling']:
+ autoscaler = get_autoscaler(gce, params['autoscaling']['name'],
+ params['zone'])
+ if not autoscaler:
+ module.fail_json(msg='Unable to fetch autoscaler %s to delete \
+ in zone: %s' % (params['autoscaling']['name'], params['zone']),
+ changed=False)
+
+ changed = delete_autoscaler(autoscaler)
+ json_output['deleted_autoscaler'] = changed
+
+ # Now, delete the MIG.
+ (changed, json_output['deleted_instances']) = delete_mig(mig)
+
+ else:
+ # Update MIG
+
+ # If we're going to update a MIG, we need a size and template values.
+ # If not specified, we use the values from the existing MIG.
+ if not params['size']:
+ params['size'] = mig.size
+
+ if not params['template']:
+ params['template'] = mig.template.name
+
+ if params['template'] != mig.template.name:
+ # Update Instance Template.
+ new_template = gce.ex_get_instancetemplate(params['template'])
+ mig.set_instancetemplate(new_template)
+ json_output['updated_instancetemplate'] = True
+ changed = True
+ if params['recreate_instances'] is True:
+ # Recreate Instances.
+ (changed, json_output['recreated_instances']
+ ) = recreate_instances_in_mig(mig)
+
+ if params['size'] != mig.size:
+ # Resize MIG.
+ keystr = 'created' if params['size'] > mig.size else 'deleted'
+ (changed, json_output['resize_%s_instances' %
+ (keystr)]) = resize_mig(mig, params['size'])
+
+ # Update Autoscaler
+ if params['autoscaling']:
+ autoscaler = get_autoscaler(gce, params['autoscaling']['name'],
+ params['zone'])
+ if not autoscaler:
+ # Try to create autoscaler.
+ # Note: this isn't perfect, if the autoscaler name has changed
+ # we wouldn't know that here.
+ if not create_autoscaler(gce, mig, params['autoscaling']):
+ module.fail_json(
+ msg='Unable to create autoscaler %s for existing MIG %s\
+ in zone: %s' % (params['autoscaling']['name'],
+ params['name'], params['zone']),
+ changed=False)
+ json_output['created_autoscaler'] = True
+ changed = True
+ else:
+ if params['autoscaling']['enabled'] is False:
+ # Delete autoscaler
+ changed = delete_autoscaler(autoscaler)
+ json_output['delete_autoscaler'] = changed
+ else:
+ # Update policy, etc.
+ changed = update_autoscaler(gce, autoscaler,
+ params['autoscaling'])
+ json_output['updated_autoscaler'] = changed
+ named_ports = params['named_ports'] or []
+ json_output['updated_named_ports'] = update_named_ports(mig,
+ named_ports)
+ if json_output['updated_named_ports']:
+ json_output['named_ports'] = named_ports
+
+ json_output['changed'] = changed
+ json_output.update(params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_net.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_net.py
new file mode 100644
index 00000000..48971ae7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_net.py
@@ -0,0 +1,511 @@
+#!/usr/bin/python
+# Copyright 2013 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gce_net
+short_description: create/destroy GCE networks and firewall rules
+description:
+ - This module can create and destroy Google Compute Engine networks and
+ firewall rules U(https://cloud.google.com/compute/docs/networking).
+ The I(name) parameter is reserved for referencing a network while the
+ I(fwname) parameter is used to reference firewall rules.
+ IPv4 Address ranges must be specified using the CIDR
+ U(http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) format.
+ Full install/configuration instructions for the gce* modules can
+ be found in the comments of ansible/test/gce_tests.py.
+options:
+ allowed:
+ type: str
+ description:
+ - the protocol:ports to allow (I(tcp:80) or I(tcp:80,443) or I(tcp:80-800;udp:1-25))
+ this parameter is mandatory when creating or updating a firewall rule
+ ipv4_range:
+ type: str
+ description:
+ - the IPv4 address range in CIDR notation for the network
+ this parameter is not mandatory when you specified existing network in name parameter,
+ but when you create new network, this parameter is mandatory
+ fwname:
+ type: str
+ description:
+ - name of the firewall rule
+ name:
+ type: str
+ description:
+ - name of the network
+ src_range:
+ type: list
+ description:
+ - the source IPv4 address range in CIDR notation
+ default: []
+ src_tags:
+ type: list
+ description:
+ - the source instance tags for creating a firewall rule
+ default: []
+ target_tags:
+ type: list
+ description:
+ - the target instance tags for creating a firewall rule
+ default: []
+ state:
+ type: str
+ description:
+ - desired state of the network or firewall
+ - "Available choices are: C(active), C(present), C(absent), C(deleted)."
+ default: "present"
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ pem_file:
+ type: path
+ description:
+ - path to the pem file associated with the service account email
+ This option is deprecated. Use C(credentials_file).
+ credentials_file:
+ type: path
+ description:
+ - path to the JSON file associated with the service account email
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ mode:
+ type: str
+ description:
+ - network mode for Google Cloud
+ C(legacy) indicates a network with an IP address range;
+ C(auto) automatically generates subnetworks in different regions;
+ C(custom) uses networks to group subnets of user specified IP address ranges
+ https://cloud.google.com/compute/docs/networking#network_types
+ default: "legacy"
+ choices: ["legacy", "auto", "custom"]
+ subnet_name:
+ type: str
+ description:
+ - name of subnet to create
+ subnet_region:
+ type: str
+ description:
+ - region of subnet to create
+ subnet_desc:
+ type: str
+ description:
+ - description of subnet to create
+
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials"
+author: "Eric Johnson (@erjohnso) <erjohnso@google.com>, Tom Melendez (@supertom) <supertom@google.com>"
+'''
+
+EXAMPLES = '''
+# Create a 'legacy' Network
+- name: Create Legacy Network
+ community.general.gce_net:
+ name: legacynet
+ ipv4_range: '10.24.17.0/24'
+ mode: legacy
+ state: present
+
+# Create an 'auto' Network
+- name: Create Auto Network
+ community.general.gce_net:
+ name: autonet
+ mode: auto
+ state: present
+
+# Create a 'custom' Network
+- name: Create Custom Network
+ community.general.gce_net:
+ name: customnet
+ mode: custom
+ subnet_name: "customsubnet"
+ subnet_region: us-east1
+ ipv4_range: '10.240.16.0/24'
+ state: "present"
+
+# Create Firewall Rule with Source Tags
+- name: Create Firewall Rule w/Source Tags
+ community.general.gce_net:
+ name: default
+ fwname: "my-firewall-rule"
+ allowed: tcp:80
+ state: "present"
+ src_tags: "foo,bar"
+
+# Create Firewall Rule with Source Range
+- name: Create Firewall Rule w/Source Range
+ community.general.gce_net:
+ name: default
+ fwname: "my-firewall-rule"
+ allowed: tcp:80
+ state: "present"
+ src_range: ['10.1.1.1/32']
+
+# Create Custom Subnetwork
+- name: Create Custom Subnetwork
+ community.general.gce_net:
+ name: privatenet
+ mode: custom
+ subnet_name: subnet_example
+ subnet_region: us-central1
+ ipv4_range: '10.0.0.0/16'
+'''
+
+RETURN = '''
+allowed:
+ description: Rules (ports and protocols) specified by this firewall rule.
+ returned: When specified
+ type: str
+ sample: "tcp:80;icmp"
+
+fwname:
+ description: Name of the firewall rule.
+ returned: When specified
+ type: str
+ sample: "my-fwname"
+
+ipv4_range:
+ description: IPv4 range of the specified network or subnetwork.
+ returned: when specified or when a subnetwork is created
+ type: str
+ sample: "10.0.0.0/16"
+
+name:
+ description: Name of the network.
+ returned: always
+ type: str
+ sample: "my-network"
+
+src_range:
+ description: IP address blocks a firewall rule applies to.
+ returned: when specified
+ type: list
+ sample: [ '10.1.1.12/8' ]
+
+src_tags:
+ description: Instance Tags firewall rule applies to.
+ returned: when specified while creating a firewall rule
+ type: list
+ sample: [ 'foo', 'bar' ]
+
+state:
+ description: State of the item operated on.
+ returned: always
+ type: str
+ sample: "present"
+
+subnet_name:
+ description: Name of the subnetwork.
+ returned: when specified or when a subnetwork is created
+ type: str
+ sample: "my-subnetwork"
+
+subnet_region:
+ description: Region of the specified subnet.
+ returned: when specified or when a subnetwork is created
+ type: str
+ sample: "us-east1"
+
+target_tags:
+ description: Instance Tags with these tags receive traffic allowed by firewall rule.
+ returned: when specified while creating a firewall rule
+ type: list
+ sample: [ 'foo', 'bar' ]
+'''
+try:
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, ResourceExistsError, ResourceNotFoundError
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gce import gce_connect, unexpected_error_msg
+
+
+def format_allowed_section(allowed):
+ """Format each section of the allowed list"""
+ if allowed.count(":") == 0:
+ protocol = allowed
+ ports = []
+ elif allowed.count(":") == 1:
+ protocol, ports = allowed.split(":")
+ else:
+ return []
+ if ports.count(","):
+ ports = ports.split(",")
+ elif ports:
+ ports = [ports]
+ return_val = {"IPProtocol": protocol}
+ if ports:
+ return_val["ports"] = ports
+ return return_val
+
+
+def format_allowed(allowed):
+ """Format the 'allowed' value so that it is GCE compatible."""
+ return_value = []
+ if allowed.count(";") == 0:
+ return [format_allowed_section(allowed)]
+ else:
+ sections = allowed.split(";")
+ for section in sections:
+ return_value.append(format_allowed_section(section))
+ return return_value
+
+
+def sorted_allowed_list(allowed_list):
+ """Sort allowed_list (output of format_allowed) by protocol and port."""
+ # sort by protocol
+ allowed_by_protocol = sorted(allowed_list, key=lambda x: x['IPProtocol'])
+ # sort the ports list
+ return sorted(allowed_by_protocol, key=lambda y: sorted(y.get('ports', [])))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ allowed=dict(),
+ ipv4_range=dict(),
+ fwname=dict(),
+ name=dict(),
+ src_range=dict(default=[], type='list'),
+ src_tags=dict(default=[], type='list'),
+ target_tags=dict(default=[], type='list'),
+ state=dict(default='present'),
+ service_account_email=dict(),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ project_id=dict(),
+ mode=dict(default='legacy', choices=['legacy', 'auto', 'custom']),
+ subnet_name=dict(),
+ subnet_region=dict(),
+ subnet_desc=dict(),
+ )
+ )
+
+ if not HAS_LIBCLOUD:
+ module.fail_json(msg='libcloud with GCE support (0.17.0+) required for this module')
+
+ gce = gce_connect(module)
+
+ allowed = module.params.get('allowed')
+ ipv4_range = module.params.get('ipv4_range')
+ fwname = module.params.get('fwname')
+ name = module.params.get('name')
+ src_range = module.params.get('src_range')
+ src_tags = module.params.get('src_tags')
+ target_tags = module.params.get('target_tags')
+ state = module.params.get('state')
+ mode = module.params.get('mode')
+ subnet_name = module.params.get('subnet_name')
+ subnet_region = module.params.get('subnet_region')
+ subnet_desc = module.params.get('subnet_desc')
+
+ changed = False
+ json_output = {'state': state}
+
+ if state in ['active', 'present']:
+ network = None
+ subnet = None
+ try:
+ network = gce.ex_get_network(name)
+ json_output['name'] = name
+ if mode == 'legacy':
+ json_output['ipv4_range'] = network.cidr
+ if network and mode == 'custom' and subnet_name:
+ if not hasattr(gce, 'ex_get_subnetwork'):
+ module.fail_json(msg="Update libcloud to a more recent version (>1.0) that supports network 'mode' parameter", changed=False)
+
+ subnet = gce.ex_get_subnetwork(subnet_name, region=subnet_region)
+ json_output['subnet_name'] = subnet_name
+ json_output['ipv4_range'] = subnet.cidr
+ except ResourceNotFoundError:
+ pass
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ # user wants to create a new network that doesn't yet exist
+ if name and not network:
+ if not ipv4_range and mode != 'auto':
+ module.fail_json(msg="Network '" + name + "' is not found. To create network in legacy or custom mode, 'ipv4_range' parameter is required",
+ changed=False)
+ args = [ipv4_range if mode == 'legacy' else None]
+ kwargs = {}
+ if mode != 'legacy':
+ kwargs['mode'] = mode
+
+ try:
+ network = gce.ex_create_network(name, *args, **kwargs)
+ json_output['name'] = name
+ json_output['ipv4_range'] = ipv4_range
+ changed = True
+ except TypeError:
+ module.fail_json(msg="Update libcloud to a more recent version (>1.0) that supports network 'mode' parameter", changed=False)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ if (subnet_name or ipv4_range) and not subnet and mode == 'custom':
+ if not hasattr(gce, 'ex_create_subnetwork'):
+ module.fail_json(msg='Update libcloud to a more recent version (>1.0) that supports subnetwork creation', changed=changed)
+ if not subnet_name or not ipv4_range or not subnet_region:
+ module.fail_json(msg="subnet_name, ipv4_range, and subnet_region required for custom mode", changed=changed)
+
+ try:
+ subnet = gce.ex_create_subnetwork(subnet_name, cidr=ipv4_range, network=name, region=subnet_region, description=subnet_desc)
+ json_output['subnet_name'] = subnet_name
+ json_output['ipv4_range'] = ipv4_range
+ changed = True
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=changed)
+
+ if fwname:
+ # user creating a firewall rule
+ if not allowed and not src_range and not src_tags:
+ if changed and network:
+ module.fail_json(
+ msg="Network created, but missing required " + "firewall rule parameter(s)", changed=True)
+ module.fail_json(
+ msg="Missing required firewall rule parameter(s)",
+ changed=False)
+
+ allowed_list = format_allowed(allowed)
+
+ # Fetch existing rule and if it exists, compare attributes
+ # update if attributes changed. Create if doesn't exist.
+ try:
+ fw_changed = False
+ fw = gce.ex_get_firewall(fwname)
+
+ # If old and new attributes are different, we update the firewall rule.
+ # This implicitly lets us clear out attributes as well.
+ # allowed_list is required and must not be None for firewall rules.
+ if allowed_list and (sorted_allowed_list(allowed_list) != sorted_allowed_list(fw.allowed)):
+ fw.allowed = allowed_list
+ fw_changed = True
+
+ # source_ranges might not be set in the project; cast it to an empty list
+ fw.source_ranges = fw.source_ranges or []
+
+ # If these attributes are lists, we sort them first, then compare.
+ # Otherwise, we update if they differ.
+ if fw.source_ranges != src_range:
+ if isinstance(src_range, list):
+ if sorted(fw.source_ranges) != sorted(src_range):
+ fw.source_ranges = src_range
+ fw_changed = True
+ else:
+ fw.source_ranges = src_range
+ fw_changed = True
+
+ # source_tags might not be set in the project; cast it to an empty list
+ fw.source_tags = fw.source_tags or []
+
+ if fw.source_tags != src_tags:
+ if isinstance(src_tags, list):
+ if sorted(fw.source_tags) != sorted(src_tags):
+ fw.source_tags = src_tags
+ fw_changed = True
+ else:
+ fw.source_tags = src_tags
+ fw_changed = True
+
+ # target_tags might not be set in the project; cast it to an empty list
+ fw.target_tags = fw.target_tags or []
+
+ if fw.target_tags != target_tags:
+ if isinstance(target_tags, list):
+ if sorted(fw.target_tags) != sorted(target_tags):
+ fw.target_tags = target_tags
+ fw_changed = True
+ else:
+ fw.target_tags = target_tags
+ fw_changed = True
+
+ if fw_changed is True:
+ try:
+ gce.ex_update_firewall(fw)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ # Firewall rule not found so we try to create it.
+ except ResourceNotFoundError:
+ try:
+ gce.ex_create_firewall(fwname, allowed_list, network=name,
+ source_ranges=src_range, source_tags=src_tags, target_tags=target_tags)
+ changed = True
+
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ json_output['fwname'] = fwname
+ json_output['allowed'] = allowed
+ json_output['src_range'] = src_range
+ json_output['src_tags'] = src_tags
+ json_output['target_tags'] = target_tags
+
+ if state in ['absent', 'deleted']:
+ if fwname:
+ json_output['fwname'] = fwname
+ fw = None
+ try:
+ fw = gce.ex_get_firewall(fwname)
+ except ResourceNotFoundError:
+ pass
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ if fw:
+ gce.ex_destroy_firewall(fw)
+ changed = True
+ elif subnet_name:
+ if not hasattr(gce, 'ex_get_subnetwork') or not hasattr(gce, 'ex_destroy_subnetwork'):
+ module.fail_json(msg='Update libcloud to a more recent version (>1.0) that supports subnetwork creation', changed=changed)
+ json_output['name'] = subnet_name
+ subnet = None
+ try:
+ subnet = gce.ex_get_subnetwork(subnet_name, region=subnet_region)
+ except ResourceNotFoundError:
+ pass
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ if subnet:
+ gce.ex_destroy_subnetwork(subnet)
+ changed = True
+ elif name:
+ json_output['name'] = name
+ network = None
+ try:
+ network = gce.ex_get_network(name)
+
+ except ResourceNotFoundError:
+ pass
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ if network:
+ try:
+ gce.ex_destroy_network(network)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ changed = True
+
+ json_output['changed'] = changed
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_pd.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_pd.py
new file mode 100644
index 00000000..7e60285f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_pd.py
@@ -0,0 +1,293 @@
+#!/usr/bin/python
+# Copyright 2013 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gce_pd
+short_description: utilize GCE persistent disk resources
+description:
+ - This module can create and destroy unformatted GCE persistent disks
+ U(https://developers.google.com/compute/docs/disks#persistentdisks).
+ It also supports attaching and detaching disks from running instances.
+ Full install/configuration instructions for the gce* modules can
+ be found in the comments of ansible/test/gce_tests.py.
+options:
+ detach_only:
+ description:
+ - do not destroy the disk, merely detach it from an instance
+ type: bool
+ instance_name:
+ type: str
+ description:
+ - instance name if you wish to attach or detach the disk
+ mode:
+ type: str
+ description:
+ - GCE mount mode of disk, READ_ONLY (default) or READ_WRITE
+ default: "READ_ONLY"
+ choices: ["READ_WRITE", "READ_ONLY"]
+ name:
+ type: str
+ description:
+ - name of the disk
+ required: true
+ size_gb:
+ type: str
+ description:
+ - whole integer size of disk (in GB) to create, default is 10 GB
+ default: "10"
+ image:
+ type: str
+ description:
+ - the source image to use for the disk
+ snapshot:
+ type: str
+ description:
+ - the source snapshot to use for the disk
+ state:
+ type: str
+ description:
+ - desired state of the persistent disk
+ - "Available choices are: C(active), C(present), C(absent), C(deleted)."
+ default: "present"
+ zone:
+ type: str
+ description:
+ - zone in which to create the disk
+ default: "us-central1-b"
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ pem_file:
+ type: path
+ description:
+ - path to the pem file associated with the service account email
+ This option is deprecated. Use 'credentials_file'.
+ credentials_file:
+ type: path
+ description:
+ - path to the JSON file associated with the service account email
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ disk_type:
+ type: str
+ description:
+ - Specify a C(pd-standard) disk or C(pd-ssd) for an SSD disk.
+ default: "pd-standard"
+ delete_on_termination:
+ description:
+ - If C(yes), deletes the volume when instance is terminated
+ type: bool
+ image_family:
+ type: str
+ description:
+ - The image family to use to create the instance.
+ If I(image) has been used I(image_family) is ignored.
+ Cannot specify both I(image) and I(source).
+ external_projects:
+ type: list
+ description:
+ - A list of other projects (accessible with the provisioning credentials)
+ to be searched for the image.
+
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials"
+author: "Eric Johnson (@erjohnso) <erjohnso@google.com>"
+'''
+
+EXAMPLES = '''
+- name: Simple attachment action to an existing instance
+ local_action:
+ module: gce_pd
+ instance_name: notlocalhost
+ size_gb: 5
+ name: pd
+'''
+
+try:
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, ResourceExistsError, ResourceNotFoundError, ResourceInUseError
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gce import gce_connect, unexpected_error_msg
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ delete_on_termination=dict(type='bool'),
+ detach_only=dict(type='bool'),
+ instance_name=dict(),
+ mode=dict(default='READ_ONLY', choices=['READ_WRITE', 'READ_ONLY']),
+ name=dict(required=True),
+ size_gb=dict(default=10),
+ disk_type=dict(default='pd-standard'),
+ image=dict(),
+ image_family=dict(),
+ external_projects=dict(type='list'),
+ snapshot=dict(),
+ state=dict(default='present'),
+ zone=dict(default='us-central1-b'),
+ service_account_email=dict(),
+ pem_file=dict(type='path'),
+ credentials_file=dict(type='path'),
+ project_id=dict(),
+ )
+ )
+ if not HAS_LIBCLOUD:
+ module.fail_json(msg='libcloud with GCE support (0.17.0+) is required for this module')
+
+ gce = gce_connect(module)
+
+ delete_on_termination = module.params.get('delete_on_termination')
+ detach_only = module.params.get('detach_only')
+ instance_name = module.params.get('instance_name')
+ mode = module.params.get('mode')
+ name = module.params.get('name')
+ size_gb = module.params.get('size_gb')
+ disk_type = module.params.get('disk_type')
+ image = module.params.get('image')
+ image_family = module.params.get('image_family')
+ external_projects = module.params.get('external_projects')
+ snapshot = module.params.get('snapshot')
+ state = module.params.get('state')
+ zone = module.params.get('zone')
+
+ if delete_on_termination and not instance_name:
+ module.fail_json(
+ msg='Must specify an instance name when requesting delete on termination',
+ changed=False)
+
+ if detach_only and not instance_name:
+ module.fail_json(
+ msg='Must specify an instance name when detaching a disk',
+ changed=False)
+
+ disk = inst = None
+ changed = is_attached = False
+
+ json_output = {'name': name, 'zone': zone, 'state': state, 'disk_type': disk_type}
+ if detach_only:
+ json_output['detach_only'] = True
+ json_output['detached_from_instance'] = instance_name
+
+ if instance_name:
+ # user wants to attach/detach from an existing instance
+ try:
+ inst = gce.ex_get_node(instance_name, zone)
+ # is the disk attached?
+ for d in inst.extra['disks']:
+ if d['deviceName'] == name:
+ is_attached = True
+ json_output['attached_mode'] = d['mode']
+ json_output['attached_to_instance'] = inst.name
+ except Exception:
+ pass
+
+ # find disk if it already exists
+ try:
+ disk = gce.ex_get_volume(name)
+ json_output['size_gb'] = int(disk.size)
+ except ResourceNotFoundError:
+ pass
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+
+ # user wants a disk to exist. If "instance_name" is supplied the user
+ # also wants it attached
+ if state in ['active', 'present']:
+
+ if not size_gb:
+ module.fail_json(msg="Must supply a size_gb", changed=False)
+ try:
+ size_gb = int(round(float(size_gb)))
+ if size_gb < 1:
+ raise Exception
+ except Exception:
+ module.fail_json(msg="Must supply a size_gb larger than 1 GB",
+ changed=False)
+
+ if instance_name and inst is None:
+ module.fail_json(msg='Instance %s does not exist in zone %s' % (
+ instance_name, zone), changed=False)
+
+ if not disk:
+ if image is not None and snapshot is not None:
+ module.fail_json(
+ msg='Cannot give both image (%s) and snapshot (%s)' % (
+ image, snapshot), changed=False)
+ lc_image = None
+ lc_snapshot = None
+ if image_family is not None:
+ lc_image = gce.ex_get_image_from_family(image_family, ex_project_list=external_projects)
+ elif image is not None:
+ lc_image = gce.ex_get_image(image, ex_project_list=external_projects)
+ elif snapshot is not None:
+ lc_snapshot = gce.ex_get_snapshot(snapshot)
+ try:
+ disk = gce.create_volume(
+ size_gb, name, location=zone, image=lc_image,
+ snapshot=lc_snapshot, ex_disk_type=disk_type)
+ except ResourceExistsError:
+ pass
+ except QuotaExceededError:
+ module.fail_json(msg='Requested disk size exceeds quota',
+ changed=False)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ json_output['size_gb'] = size_gb
+ if image is not None:
+ json_output['image'] = image
+ if snapshot is not None:
+ json_output['snapshot'] = snapshot
+ changed = True
+ if inst and not is_attached:
+ try:
+ gce.attach_volume(inst, disk, device=name, ex_mode=mode,
+ ex_auto_delete=delete_on_termination)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ json_output['attached_to_instance'] = inst.name
+ json_output['attached_mode'] = mode
+ if delete_on_termination:
+ json_output['delete_on_termination'] = True
+ changed = True
+
+ # user wants to delete a disk (or perhaps just detach it).
+ if state in ['absent', 'deleted'] and disk:
+
+ if inst and is_attached:
+ try:
+ gce.detach_volume(disk, ex_node=inst)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ changed = True
+ if not detach_only:
+ try:
+ gce.destroy_volume(disk)
+ except ResourceInUseError as e:
+ module.fail_json(msg=str(e.value), changed=False)
+ except Exception as e:
+ module.fail_json(msg=unexpected_error_msg(e), changed=False)
+ changed = True
+
+ json_output['changed'] = changed
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_snapshot.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_snapshot.py
new file mode 100644
index 00000000..4fca1b05
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_snapshot.py
@@ -0,0 +1,225 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gce_snapshot
+short_description: Create or destroy snapshots for GCE storage volumes
+description:
+ - Manages snapshots for GCE instances. This module manages snapshots for
+ the storage volumes of a GCE compute instance. If there are multiple
+ volumes, each snapshot will be prepended with the disk name
+options:
+ instance_name:
+ type: str
+ description:
+ - The GCE instance to snapshot
+ required: True
+ snapshot_name:
+ type: str
+ description:
+ - The name of the snapshot to manage
+ required: True
+ disks:
+ type: list
+ description:
+ - A list of disks to create snapshots for. If none is provided,
+ all of the volumes will have snapshots created.
+ required: False
+ state:
+ type: str
+ description:
+ - Whether a snapshot should be C(present) or C(absent)
+ required: false
+ default: present
+ choices: [present, absent]
+ service_account_email:
+ type: str
+ description:
+ - GCP service account email for the project where the instance resides
+ credentials_file:
+ type: path
+ description:
+ - The path to the credentials file associated with the service account
+ project_id:
+ type: str
+ description:
+ - The GCP project ID to use
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.19.0"
+author: Rob Wagner (@robwagner33)
+'''
+
+EXAMPLES = '''
+- name: Create gce snapshot
+ community.general.gce_snapshot:
+ instance_name: example-instance
+ snapshot_name: example-snapshot
+ state: present
+ service_account_email: project_name@appspot.gserviceaccount.com
+ credentials_file: /path/to/credentials
+ project_id: project_name
+ delegate_to: localhost
+
+- name: Delete gce snapshot
+ community.general.gce_snapshot:
+ instance_name: example-instance
+ snapshot_name: example-snapshot
+ state: absent
+ service_account_email: project_name@appspot.gserviceaccount.com
+ credentials_file: /path/to/credentials
+ project_id: project_name
+ delegate_to: localhost
+
+# This example creates snapshots for only two of the available disks as
+# disk0-example-snapshot and disk1-example-snapshot
+- name: Create snapshots of specific disks
+ community.general.gce_snapshot:
+ instance_name: example-instance
+ snapshot_name: example-snapshot
+ state: present
+ disks:
+ - disk0
+ - disk1
+ service_account_email: project_name@appspot.gserviceaccount.com
+ credentials_file: /path/to/credentials
+ project_id: project_name
+ delegate_to: localhost
+'''
+
+RETURN = '''
+snapshots_created:
+ description: List of newly created snapshots
+ returned: When snapshots are created
+ type: list
+ sample: "[disk0-example-snapshot, disk1-example-snapshot]"
+
+snapshots_deleted:
+ description: List of destroyed snapshots
+ returned: When snapshots are deleted
+ type: list
+ sample: "[disk0-example-snapshot, disk1-example-snapshot]"
+
+snapshots_existing:
+ description: List of snapshots that already existed (no-op)
+ returned: When snapshots were already present
+ type: list
+ sample: "[disk0-example-snapshot, disk1-example-snapshot]"
+
+snapshots_absent:
+ description: List of snapshots that were already absent (no-op)
+ returned: When snapshots were already absent
+ type: list
+ sample: "[disk0-example-snapshot, disk1-example-snapshot]"
+'''
+
+try:
+ from libcloud.compute.types import Provider
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gce import gce_connect
+
+
+def find_snapshot(volume, name):
+ '''
+ Check if there is a snapshot already created with the given name for
+ the passed in volume.
+
+ Args:
+ volume: A gce StorageVolume object to manage
+ name: The name of the snapshot to look for
+
+ Returns:
+ The VolumeSnapshot object if one is found
+ '''
+ found_snapshot = None
+ snapshots = volume.list_snapshots()
+ for snapshot in snapshots:
+ if name == snapshot.name:
+ found_snapshot = snapshot
+ return found_snapshot
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ instance_name=dict(required=True),
+ snapshot_name=dict(required=True),
+ state=dict(choices=['present', 'absent'], default='present'),
+ disks=dict(default=None, type='list'),
+ service_account_email=dict(type='str'),
+ credentials_file=dict(type='path'),
+ project_id=dict(type='str')
+ )
+ )
+
+ if not HAS_LIBCLOUD:
+ module.fail_json(msg='libcloud with GCE support (0.19.0+) is required for this module')
+
+ gce = gce_connect(module)
+
+ instance_name = module.params.get('instance_name')
+ snapshot_name = module.params.get('snapshot_name')
+ disks = module.params.get('disks')
+ state = module.params.get('state')
+
+ json_output = dict(
+ changed=False,
+ snapshots_created=[],
+ snapshots_deleted=[],
+ snapshots_existing=[],
+ snapshots_absent=[]
+ )
+
+ snapshot = None
+
+ instance = gce.ex_get_node(instance_name, 'all')
+ instance_disks = instance.extra['disks']
+
+ for instance_disk in instance_disks:
+ disk_snapshot_name = snapshot_name
+ disk_info = gce._get_components_from_path(instance_disk['source'])
+ device_name = disk_info['name']
+ device_zone = disk_info['zone']
+ if disks is None or device_name in disks:
+ volume_obj = gce.ex_get_volume(device_name, device_zone)
+
+ # If we have more than one disk to snapshot, prepend the disk name
+ if len(instance_disks) > 1:
+ disk_snapshot_name = device_name + "-" + disk_snapshot_name
+
+ snapshot = find_snapshot(volume_obj, disk_snapshot_name)
+
+ if snapshot and state == 'present':
+ json_output['snapshots_existing'].append(disk_snapshot_name)
+
+ elif snapshot and state == 'absent':
+ snapshot.destroy()
+ json_output['changed'] = True
+ json_output['snapshots_deleted'].append(disk_snapshot_name)
+
+ elif not snapshot and state == 'present':
+ volume_obj.snapshot(disk_snapshot_name)
+ json_output['changed'] = True
+ json_output['snapshots_created'].append(disk_snapshot_name)
+
+ elif not snapshot and state == 'absent':
+ json_output['snapshots_absent'].append(disk_snapshot_name)
+
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_tag.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_tag.py
new file mode 100644
index 00000000..1e36ed4d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gce_tag.py
@@ -0,0 +1,218 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gce_tag
+short_description: add or remove tag(s) to/from GCE instances
+description:
+ - This module can add or remove tags U(https://cloud.google.com/compute/docs/label-or-tag-resources#tags)
+ to/from GCE instances. Use 'instance_pattern' to update multiple instances in a specify zone.
+options:
+ instance_name:
+ type: str
+ description:
+ - The name of the GCE instance to add/remove tags.
+ - Required if C(instance_pattern) is not specified.
+ instance_pattern:
+ type: str
+ description:
+ - The pattern of GCE instance names to match for adding/removing tags. Full-Python regex is supported.
+ See U(https://docs.python.org/2/library/re.html) for details.
+ - If C(instance_name) is not specified, this field is required.
+ tags:
+ type: list
+ description:
+ - Comma-separated list of tags to add or remove.
+ required: yes
+ state:
+ type: str
+ description:
+ - Desired state of the tags.
+ choices: [ absent, present ]
+ default: present
+ zone:
+ type: str
+ description:
+ - The zone of the disk specified by source.
+ default: us-central1-a
+ service_account_email:
+ type: str
+ description:
+ - Service account email.
+ pem_file:
+ type: path
+ description:
+ - Path to the PEM file associated with the service account email.
+ project_id:
+ type: str
+ description:
+ - Your GCE project ID.
+requirements:
+ - python >= 2.6
+ - apache-libcloud >= 0.17.0
+notes:
+ - Either I(instance_name) or I(instance_pattern) is required.
+author:
+ - Do Hoang Khiem (@dohoangkhiem) <(dohoangkhiem@gmail.com>
+ - Tom Melendez (@supertom)
+'''
+
+EXAMPLES = '''
+- name: Add tags to instance
+ community.general.gce_tag:
+ instance_name: staging-server
+ tags: http-server,https-server,staging
+ zone: us-central1-a
+ state: present
+
+- name: Remove tags from instance in default zone (us-central1-a)
+ community.general.gce_tag:
+ instance_name: test-server
+ tags: foo,bar
+ state: absent
+
+- name: Add tags to instances in zone that match pattern
+ community.general.gce_tag:
+ instance_pattern: test-server-*
+ tags: foo,bar
+ zone: us-central1-a
+ state: present
+'''
+
+import re
+import traceback
+
+try:
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
+ ResourceExistsError, ResourceNotFoundError, InvalidRequestError
+
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gce import gce_connect
+
+
+def _union_items(baselist, comparelist):
+ """Combine two lists, removing duplicates."""
+ return list(set(baselist) | set(comparelist))
+
+
+def _intersect_items(baselist, comparelist):
+ """Return matching items in both lists."""
+ return list(set(baselist) & set(comparelist))
+
+
+def _get_changed_items(baselist, comparelist):
+ """Return changed items as they relate to baselist."""
+ return list(set(baselist) & set(set(baselist) ^ set(comparelist)))
+
+
+def modify_tags(gce, module, node, tags, state='present'):
+ """Modify tags on an instance."""
+
+ existing_tags = node.extra['tags']
+ tags = [x.lower() for x in tags]
+ tags_changed = []
+
+ if state == 'absent':
+ # tags changed are any that intersect
+ tags_changed = _intersect_items(existing_tags, tags)
+ if not tags_changed:
+ return False, None
+ # update instance with tags in existing tags that weren't specified
+ node_tags = _get_changed_items(existing_tags, tags)
+ else:
+ # tags changed are any that in the new list that weren't in existing
+ tags_changed = _get_changed_items(tags, existing_tags)
+ if not tags_changed:
+ return False, None
+ # update instance with the combined list
+ node_tags = _union_items(existing_tags, tags)
+
+ try:
+ gce.ex_set_node_tags(node, node_tags)
+ return True, tags_changed
+ except (GoogleBaseError, InvalidRequestError) as e:
+ module.fail_json(msg=str(e), changed=False)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ instance_name=dict(type='str'),
+ instance_pattern=dict(type='str'),
+ tags=dict(type='list', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ zone=dict(type='str', default='us-central1-a'),
+ service_account_email=dict(type='str'),
+ pem_file=dict(type='path'),
+ project_id=dict(type='str'),
+ ),
+ mutually_exclusive=[
+ ['instance_name', 'instance_pattern']
+ ],
+ required_one_of=[
+ ['instance_name', 'instance_pattern']
+ ],
+ )
+
+ instance_name = module.params.get('instance_name')
+ instance_pattern = module.params.get('instance_pattern')
+ state = module.params.get('state')
+ tags = module.params.get('tags')
+ zone = module.params.get('zone')
+ changed = False
+
+ if not HAS_LIBCLOUD:
+ module.fail_json(msg='libcloud with GCE support (0.17.0+) required for this module')
+
+ gce = gce_connect(module)
+
+ # Create list of nodes to operate on
+ matching_nodes = []
+ try:
+ if instance_pattern:
+ instances = gce.list_nodes(ex_zone=zone)
+ # no instances in zone
+ if not instances:
+ module.exit_json(changed=False, tags=tags, zone=zone, instances_updated=[])
+ try:
+ # Python regex fully supported: https://docs.python.org/2/library/re.html
+ p = re.compile(instance_pattern)
+ matching_nodes = [i for i in instances if p.search(i.name) is not None]
+ except re.error as e:
+ module.fail_json(msg='Regex error for pattern %s: %s' % (instance_pattern, e), changed=False)
+ else:
+ matching_nodes = [gce.ex_get_node(instance_name, zone=zone)]
+ except ResourceNotFoundError:
+ module.fail_json(msg='Instance %s not found in zone %s' % (instance_name, zone), changed=False)
+ except GoogleBaseError as e:
+ module.fail_json(msg=str(e), changed=False, exception=traceback.format_exc())
+
+ # Tag nodes
+ instance_pattern_matches = []
+ tags_changed = []
+ for node in matching_nodes:
+ changed, tags_changed = modify_tags(gce, module, node, tags, state)
+ if changed:
+ instance_pattern_matches.append({'instance_name': node.name, 'tags_changed': tags_changed})
+ if instance_pattern:
+ module.exit_json(changed=changed, instance_pattern=instance_pattern, tags=tags_changed, zone=zone, instances_updated=instance_pattern_matches)
+ else:
+ module.exit_json(changed=changed, instance_name=instance_name, tags=tags_changed, zone=zone)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gconftool2.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gconftool2.py
new file mode 100644
index 00000000..b1df1da8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gconftool2.py
@@ -0,0 +1,233 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Kenneth D. Evensen <kevensen@redhat.com>
+# Copyright: (c) 2017, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: gconftool2
+author:
+ - Kenneth D. Evensen (@kevensen)
+short_description: Edit GNOME Configurations
+description:
+ - This module allows for the manipulation of GNOME 2 Configuration via
+ gconftool-2. Please see the gconftool-2(1) man pages for more details.
+options:
+ key:
+ type: str
+ description:
+ - A GConf preference key is an element in the GConf repository
+ that corresponds to an application preference. See man gconftool-2(1)
+ required: yes
+ value:
+ type: str
+ description:
+ - Preference keys typically have simple values such as strings,
+ integers, or lists of strings and integers. This is ignored if the state
+ is "get". See man gconftool-2(1)
+ value_type:
+ type: str
+ description:
+ - The type of value being set. This is ignored if the state is "get".
+ choices: [ bool, float, int, string ]
+ state:
+ type: str
+ description:
+ - The action to take upon the key/value.
+ required: yes
+ choices: [ absent, get, present ]
+ config_source:
+ type: str
+ description:
+ - Specify a configuration source to use rather than the default path.
+ See man gconftool-2(1)
+ direct:
+ description:
+ - Access the config database directly, bypassing server. If direct is
+ specified then the config_source must be specified as well.
+ See man gconftool-2(1)
+ type: bool
+ default: 'no'
+'''
+
+EXAMPLES = """
+- name: Change the widget font to "Serif 12"
+ community.general.gconftool2:
+ key: "/desktop/gnome/interface/font_name"
+ value_type: "string"
+ value: "Serif 12"
+"""
+
+RETURN = '''
+ key:
+ description: The key specified in the module parameters
+ returned: success
+ type: str
+ sample: /desktop/gnome/interface/font_name
+ value_type:
+ description: The type of the value that was changed
+ returned: success
+ type: str
+ sample: string
+ value:
+ description: The value of the preference key after executing the module
+ returned: success
+ type: str
+ sample: "Serif 12"
+...
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class GConf2Preference(object):
+ def __init__(self, ansible, key, value_type, value,
+ direct=False, config_source=""):
+ self.ansible = ansible
+ self.key = key
+ self.value_type = value_type
+ self.value = value
+ self.config_source = config_source
+ self.direct = direct
+
+ def value_already_set(self):
+ return False
+
+ def call(self, call_type, fail_onerr=True):
+ """ Helper function to perform gconftool-2 operations """
+ config_source = ''
+ direct = ''
+ changed = False
+ out = ''
+
+ # If the configuration source is different from the default, create
+ # the argument
+ if self.config_source is not None and len(self.config_source) > 0:
+ config_source = "--config-source " + self.config_source
+
+ # If direct is true, create the argument
+ if self.direct:
+ direct = "--direct"
+
+ # Execute the call
+ cmd = "gconftool-2 "
+ try:
+ # If the call is "get", then we don't need as many parameters and
+ # we can ignore some
+ if call_type == 'get':
+ cmd += "--get {0}".format(self.key)
+ # Otherwise, we will use all relevant parameters
+ elif call_type == 'set':
+ cmd += "{0} {1} --type {2} --{3} {4} \"{5}\"".format(direct,
+ config_source,
+ self.value_type,
+ call_type,
+ self.key,
+ self.value)
+ elif call_type == 'unset':
+ cmd += "--unset {0}".format(self.key)
+
+ # Start external command
+ rc, out, err = self.ansible.run_command(cmd, use_unsafe_shell=True)
+
+ if len(err) > 0:
+ if fail_onerr:
+ self.ansible.fail_json(msg='gconftool-2 failed with '
+ 'error: %s' % (str(err)))
+ else:
+ changed = True
+
+ except OSError as exception:
+ self.ansible.fail_json(msg='gconftool-2 failed with exception: '
+ '%s' % exception)
+ return changed, out.rstrip()
+
+
+def main():
+ # Setup the Ansible module
+ module = AnsibleModule(
+ argument_spec=dict(
+ key=dict(type='str', required=True),
+ value_type=dict(type='str', choices=['bool', 'float', 'int', 'string']),
+ value=dict(type='str'),
+ state=dict(type='str', required=True, choices=['absent', 'get', 'present']),
+ direct=dict(type='bool', default=False),
+ config_source=dict(type='str'),
+ ),
+ supports_check_mode=True
+ )
+
+ state_values = {"present": "set", "absent": "unset", "get": "get"}
+
+ # Assign module values to dictionary values
+ key = module.params['key']
+ value_type = module.params['value_type']
+ if module.params['value'].lower() == "true":
+ value = "true"
+ elif module.params['value'] == "false":
+ value = "false"
+ else:
+ value = module.params['value']
+
+ state = state_values[module.params['state']]
+ direct = module.params['direct']
+ config_source = module.params['config_source']
+
+ # Initialize some variables for later
+ change = False
+ new_value = ''
+
+ if state != "get":
+ if value is None or value == "":
+ module.fail_json(msg='State %s requires "value" to be set'
+ % str(state))
+ elif value_type is None or value_type == "":
+ module.fail_json(msg='State %s requires "value_type" to be set'
+ % str(state))
+
+ if direct and config_source is None:
+ module.fail_json(msg='If "direct" is "yes" then the ' +
+ '"config_source" must be specified')
+ elif not direct and config_source is not None:
+ module.fail_json(msg='If the "config_source" is specified ' +
+ 'then "direct" must be "yes"')
+
+ # Create a gconf2 preference
+ gconf_pref = GConf2Preference(module, key, value_type,
+ value, direct, config_source)
+ # Now we get the current value, if not found don't fail
+ _, current_value = gconf_pref.call("get", fail_onerr=False)
+
+ # Check if the current value equals the value we want to set. If not, make
+ # a change
+ if current_value != value:
+ # If check mode, we know a change would have occurred.
+ if module.check_mode:
+ # So we will set the change to True
+ change = True
+ # And set the new_value to the value that would have been set
+ new_value = value
+ # If not check mode make the change.
+ else:
+ change, new_value = gconf_pref.call(state)
+ # If the value we want to set is the same as the current_value, we will
+ # set the new_value to the current_value for reporting
+ else:
+ new_value = current_value
+
+ facts = dict(gconftool2={'changed': change,
+ 'key': key,
+ 'value_type': value_type,
+ 'new_value': new_value,
+ 'previous_value': current_value,
+ 'playbook_value': module.params['value']})
+
+ module.exit_json(changed=change, ansible_facts=facts)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcp_backend_service.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcp_backend_service.py
new file mode 100644
index 00000000..ee564ae0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcp_backend_service.py
@@ -0,0 +1,420 @@
+#!/usr/bin/python
+# Copyright 2017 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: gcp_backend_service
+short_description: Create or Destroy a Backend Service.
+description:
+ - Create or Destroy a Backend Service. See
+ U(https://cloud.google.com/compute/docs/load-balancing/http/backend-service) for an overview.
+ Full install/configuration instructions for the Google Cloud modules can
+ be found in the comments of ansible/test/gce_tests.py.
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 1.3.0"
+notes:
+ - Update is not currently supported.
+ - Only global backend services are currently supported. Regional backends not currently supported.
+ - Internal load balancing not currently supported.
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.12
+ why: Updated modules released with increased functionality
+ alternative: Use M(google.cloud.gcp_compute_backend_service) instead.
+author:
+ - "Tom Melendez (@supertom) <tom@supertom.com>"
+options:
+ backend_service_name:
+ type: str
+ description:
+ - Name of the Backend Service.
+ required: true
+ backends:
+ type: list
+ description:
+ - List of backends that make up the backend service. A backend is made up of
+ an instance group and optionally several other parameters. See
+ U(https://cloud.google.com/compute/docs/reference/latest/backendServices)
+ for details.
+ required: true
+ healthchecks:
+ type: list
+ description:
+ - List of healthchecks. Only one healthcheck is supported.
+ required: true
+ enable_cdn:
+ description:
+ - If true, enable Cloud CDN for this Backend Service.
+ type: bool
+ port_name:
+ type: str
+ description:
+ - Name of the port on the managed instance group (MIG) that backend
+ services can forward data to. Required for external load balancing.
+ protocol:
+ type: str
+ description:
+ - The protocol this Backend Service uses to communicate with backends.
+ Possible values are HTTP, HTTPS, TCP, and SSL. The default is TCP.
+ choices: [HTTP, HTTPS, TCP, SSL]
+ default: TCP
+ required: false
+ timeout:
+ type: int
+ description:
+ - How many seconds to wait for the backend before considering it a failed
+ request. Default is 30 seconds. Valid range is 1-86400.
+ required: false
+ service_account_email:
+ type: str
+ description:
+ - Service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account permissions
+ credentials_file:
+ type: str
+ description:
+ - Path to the JSON file associated with the service account email.
+ pem_file:
+ type: str
+ description:
+ - Path to the PEM file associated with the service account email.
+ project_id:
+ type: str
+ description:
+ - GCE project ID.
+ state:
+ type: str
+ description:
+ - Desired state of the resource
+ default: "present"
+ choices: ["absent", "present"]
+'''
+
+EXAMPLES = '''
+- name: Create Minimum Backend Service
+ community.general.gcp_backend_service:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ backend_service_name: "{{ bes }}"
+ backends:
+ - instance_group: managed_instance_group_1
+ healthchecks:
+ - healthcheck_name_for_backend_service
+ port_name: myhttpport
+ state: present
+
+- name: Create BES with extended backend parameters
+ community.general.gcp_backend_service:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ backend_service_name: "{{ bes }}"
+ backends:
+ - instance_group: managed_instance_group_1
+ max_utilization: 0.6
+ max_rate: 10
+ - instance_group: managed_instance_group_2
+ max_utilization: 0.5
+ max_rate: 4
+ healthchecks:
+ - healthcheck_name_for_backend_service
+ port_name: myhttpport
+ state: present
+ timeout: 60
+'''
+
+RETURN = '''
+backend_service_created:
+ description: Indicator Backend Service was created.
+ returned: When a Backend Service is created.
+ type: bool
+ sample: "True"
+backend_service_deleted:
+ description: Indicator Backend Service was deleted.
+ returned: When a Backend Service is deleted.
+ type: bool
+ sample: "True"
+backend_service_name:
+ description: Name of the Backend Service.
+ returned: Always.
+ type: str
+ sample: "my-backend-service"
+backends:
+ description: List of backends (comprised of instance_group) that
+ make up a Backend Service.
+ returned: When a Backend Service exists.
+ type: list
+ sample: "[ { 'instance_group': 'mig_one', 'zone': 'us-central1-b'} ]"
+enable_cdn:
+ description: If Cloud CDN is enabled. null if not set.
+ returned: When a backend service exists.
+ type: bool
+ sample: "True"
+healthchecks:
+ description: List of healthchecks applied to the Backend Service.
+ returned: When a Backend Service exists.
+ type: list
+ sample: "[ 'my-healthcheck' ]"
+protocol:
+ description: Protocol used to communicate with the Backends.
+ returned: When a Backend Service exists.
+ type: str
+ sample: "HTTP"
+port_name:
+ description: Name of Backend Port.
+ returned: When a Backend Service exists.
+ type: str
+ sample: "myhttpport"
+timeout:
+ description: In seconds, how long before a request sent to a backend is
+ considered failed.
+ returned: If specified.
+ type: int
+ sample: "myhttpport"
+'''
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ import libcloud
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
+ ResourceExistsError, ResourceInUseError, ResourceNotFoundError
+ from libcloud.compute.drivers.gce import GCEAddress
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gce import gce_connect
+from ansible_collections.community.general.plugins.module_utils.gcp import check_params
+
+
+def _validate_params(params):
+ """
+ Validate backend_service params.
+
+ This function calls _validate_backend_params to verify
+ the backend-specific parameters.
+
+ :param params: Ansible dictionary containing configuration.
+ :type params: ``dict``
+
+ :return: True or raises ValueError
+ :rtype: ``bool`` or `class:ValueError`
+ """
+ fields = [
+ {'name': 'timeout', 'type': int, 'min': 1, 'max': 86400},
+ ]
+ try:
+ check_params(params, fields)
+ _validate_backend_params(params['backends'])
+ except Exception:
+ raise
+
+ return (True, '')
+
+
+def _validate_backend_params(backends):
+ """
+ Validate configuration for backends.
+
+ :param backends: Ansible dictionary containing backends configuration (only).
+ :type backends: ``dict``
+
+ :return: True or raises ValueError
+ :rtype: ``bool`` or `class:ValueError`
+ """
+ fields = [
+ {'name': 'balancing_mode', 'type': str, 'values': ['UTILIZATION', 'RATE', 'CONNECTION']},
+ {'name': 'max_utilization', 'type': float},
+ {'name': 'max_connections', 'type': int},
+ {'name': 'max_rate', 'type': int},
+ {'name': 'max_rate_per_instance', 'type': float},
+ ]
+
+ if not backends:
+ raise ValueError('backends should be a list.')
+
+ for backend in backends:
+ try:
+ check_params(backend, fields)
+ except Exception:
+ raise
+
+ if 'max_rate' in backend and 'max_rate_per_instance' in backend:
+ raise ValueError('Both maxRate or maxRatePerInstance cannot be set.')
+
+ return (True, '')
+
+
+def get_backend_service(gce, name):
+ """
+ Get a Backend Service from GCE.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param name: Name of the Backend Service.
+ :type name: ``str``
+
+ :return: A GCEBackendService object or None.
+ :rtype: :class: `GCEBackendService` or None
+ """
+ try:
+ # Does the Backend Service already exist?
+ return gce.ex_get_backendservice(name=name)
+
+ except ResourceNotFoundError:
+ return None
+
+
+def get_healthcheck(gce, name):
+ return gce.ex_get_healthcheck(name)
+
+
+def get_instancegroup(gce, name, zone=None):
+ return gce.ex_get_instancegroup(name=name, zone=zone)
+
+
+def create_backend_service(gce, params):
+ """
+ Create a new Backend Service.
+
+ :param gce: An initialized GCE driver object.
+ :type gce: :class: `GCENodeDriver`
+
+ :param params: Dictionary of parameters needed by the module.
+ :type params: ``dict``
+
+ :return: Tuple with changed stats
+ :rtype: tuple in the format of (bool, bool)
+ """
+ from copy import deepcopy
+
+ changed = False
+ return_data = False
+ # only one healthcheck is currently supported
+ hc_name = params['healthchecks'][0]
+ hc = get_healthcheck(gce, hc_name)
+ backends = []
+ for backend in params['backends']:
+ ig = get_instancegroup(gce, backend['instance_group'],
+ backend.get('zone', None))
+ kwargs = deepcopy(backend)
+ kwargs['instance_group'] = ig
+ backends.append(gce.ex_create_backend(
+ **kwargs))
+
+ bes = gce.ex_create_backendservice(
+ name=params['backend_service_name'], healthchecks=[hc], backends=backends,
+ enable_cdn=params['enable_cdn'], port_name=params['port_name'],
+ timeout_sec=params['timeout'], protocol=params['protocol'])
+
+ if bes:
+ changed = True
+ return_data = True
+
+ return (changed, return_data)
+
+
+def delete_backend_service(bes):
+ """
+ Delete a Backend Service. The Instance Groups are NOT destroyed.
+ """
+ changed = False
+ return_data = False
+ if bes.destroy():
+ changed = True
+ return_data = True
+ return (changed, return_data)
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ backends=dict(type='list', required=True),
+ backend_service_name=dict(required=True),
+ healthchecks=dict(type='list', required=True),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ enable_cdn=dict(type='bool'),
+ port_name=dict(type='str'),
+ protocol=dict(type='str', default='TCP',
+ choices=['HTTP', 'HTTPS', 'SSL', 'TCP']),
+ timeout=dict(type='int'),
+ state=dict(choices=['absent', 'present'], default='present'),
+ pem_file=dict(),
+ credentials_file=dict(),
+ project_id=dict(), ), )
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+ if not HAS_LIBCLOUD:
+ module.fail_json(
+ msg='libcloud with GCE Backend Service support (1.3+) required for this module.')
+
+ gce = gce_connect(module)
+ if not hasattr(gce, 'ex_create_instancegroupmanager'):
+ module.fail_json(
+ msg='libcloud with GCE Backend Service support (1.3+) required for this module.',
+ changed=False)
+
+ params = {}
+ params['state'] = module.params.get('state')
+ params['backend_service_name'] = module.params.get('backend_service_name')
+ params['backends'] = module.params.get('backends')
+ params['healthchecks'] = module.params.get('healthchecks')
+ params['enable_cdn'] = module.params.get('enable_cdn', None)
+ params['port_name'] = module.params.get('port_name', None)
+ params['protocol'] = module.params.get('protocol', None)
+ params['timeout'] = module.params.get('timeout', None)
+
+ try:
+ _validate_params(params)
+ except Exception as e:
+ module.fail_json(msg=e.message, changed=False)
+
+ changed = False
+ json_output = {'state': params['state']}
+ bes = get_backend_service(gce, params['backend_service_name'])
+
+ if not bes:
+ if params['state'] == 'absent':
+ # Doesn't exist and state==absent.
+ changed = False
+ module.fail_json(
+ msg="Cannot delete unknown backend service: %s" %
+ (params['backend_service_name']))
+ else:
+ # Create
+ (changed, json_output['backend_service_created']) = create_backend_service(gce,
+ params)
+ elif params['state'] == 'absent':
+ # Delete
+ (changed, json_output['backend_service_deleted']) = delete_backend_service(bes)
+ else:
+ # TODO(supertom): Add update support when it is available in libcloud.
+ changed = False
+
+ json_output['changed'] = changed
+ json_output.update(params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcp_forwarding_rule.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcp_forwarding_rule.py
new file mode 100644
index 00000000..56dbfa7e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcp_forwarding_rule.py
@@ -0,0 +1,385 @@
+#!/usr/bin/python
+# Copyright 2017 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gcp_forwarding_rule
+short_description: Create, Update or Destroy a Forwarding_Rule.
+description:
+ - Create, Update or Destroy a Forwarding_Rule. See
+ U(https://cloud.google.com/compute/docs/load-balancing/http/target-proxies) for an overview.
+ More details on the Global Forwarding_Rule API can be found at
+ U(https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules)
+ More details on the Forwarding Rules API can be found at
+ U(https://cloud.google.com/compute/docs/reference/latest/forwardingRules)
+requirements:
+ - "python >= 2.6"
+ - "google-api-python-client >= 1.6.2"
+ - "google-auth >= 0.9.0"
+ - "google-auth-httplib2 >= 0.0.2"
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.12
+ why: Updated modules released with increased functionality
+ alternative: Use M(google.cloud.gcp_compute_forwarding_rule) or M(google.cloud.gcp_compute_global_forwarding_rule) instead.
+notes:
+ - Currently only supports global forwarding rules.
+ As such, Load Balancing Scheme is always EXTERNAL.
+author:
+ - "Tom Melendez (@supertom) <tom@supertom.com>"
+options:
+ address:
+ type: str
+ description:
+ - IPv4 or named IP address. Must be of the same scope (regional, global).
+ Reserved addresses can (and probably should) be used for global
+ forwarding rules. You may reserve IPs from the console or
+ via the gce_eip module.
+ required: false
+ forwarding_rule_name:
+ type: str
+ description:
+ - Name of the Forwarding_Rule.
+ required: true
+ port_range:
+ type: str
+ description:
+ - For global forwarding rules, must be set to 80 or 8080 for TargetHttpProxy, and
+ 443 for TargetHttpsProxy or TargetSslProxy.
+ required: false
+ protocol:
+ type: str
+ description:
+ - For global forwarding rules, TCP, UDP, ESP, AH, SCTP or ICMP. Default is TCP.
+ required: false
+ choices: [TCP]
+ default: TCP
+ region:
+ type: str
+ description:
+ - The region for this forwarding rule. Currently, only 'global' is supported.
+ required: true
+ state:
+ type: str
+ description:
+ - The state of the Forwarding Rule. 'present' or 'absent'
+ required: true
+ choices: ["present", "absent"]
+ target:
+ type: str
+ description:
+ - Target resource for forwarding rule. For global proxy, this is a Global
+ TargetProxy resource. Required for external load balancing (including Global load balancing)
+ required: false
+ project_id:
+ type: str
+ description:
+ - The Google Cloud Platform project ID to use.
+ pem_file:
+ type: str
+ description:
+ - The path to the PEM file associated with the service account email.
+ - This option is deprecated and may be removed in a future release. Use I(credentials_file) instead.
+ credentials_file:
+ type: str
+ description:
+ - The path to the JSON file associated with the service account email.
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account permissions
+ load_balancing_scheme:
+ type: str
+ choices: [EXTERNAL]
+ default: EXTERNAL
+ description:
+ - Load balancing scheme. At the moment the only choice is EXTERNAL.
+'''
+
+EXAMPLES = '''
+- name: Create Minimum GLOBAL Forwarding_Rule
+ community.general.gcp_forwarding_rule:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ forwarding_rule_name: my-forwarding_rule
+ protocol: TCP
+ port_range: 80
+ region: global
+ target: my-target-proxy
+ state: present
+
+- name: Create Forwarding_Rule w/reserved static address
+ community.general.gcp_forwarding_rule:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ forwarding_rule_name: my-forwarding_rule
+ protocol: TCP
+ port_range: 80
+ address: my-reserved-static-address-name
+ region: global
+ target: my-target-proxy
+ state: present
+'''
+
+RETURN = '''
+forwarding_rule_name:
+ description: Name of the Forwarding_Rule
+ returned: Always
+ type: str
+ sample: my-target-proxy
+forwarding_rule:
+ description: GCP Forwarding_Rule dictionary
+ returned: Always. Refer to GCP documentation for detailed field descriptions.
+ type: dict
+ sample: { "name": "my-forwarding_rule", "target": "..." }
+region:
+ description: Region for Forwarding Rule.
+ returned: Always
+ type: bool
+ sample: true
+state:
+ description: state of the Forwarding_Rule
+ returned: Always.
+ type: str
+ sample: present
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcp import get_google_api_client, GCPUtils
+
+
+USER_AGENT_PRODUCT = 'ansible-forwarding_rule'
+USER_AGENT_VERSION = '0.0.1'
+
+
+def _build_global_forwarding_rule_dict(params, project_id=None):
+ """
+ Reformat services in Ansible Params.
+
+ :param params: Params from AnsibleModule object
+ :type params: ``dict``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: dictionary suitable for submission to GCP API.
+ :rtype ``dict``
+ """
+ url = ''
+ if project_id:
+ url = GCPUtils.build_googleapi_url(project_id)
+ gcp_dict = GCPUtils.params_to_gcp_dict(params, 'forwarding_rule_name')
+ if 'target' in gcp_dict:
+ gcp_dict['target'] = '%s/global/targetHttpProxies/%s' % (url,
+ gcp_dict['target'])
+ if 'address' in gcp_dict:
+ gcp_dict['IPAddress'] = '%s/global/addresses/%s' % (url,
+ gcp_dict['address'])
+ del gcp_dict['address']
+ if 'protocol' in gcp_dict:
+ gcp_dict['IPProtocol'] = gcp_dict['protocol']
+ del gcp_dict['protocol']
+ return gcp_dict
+
+
+def get_global_forwarding_rule(client, name, project_id=None):
+ """
+ Get a Global Forwarding Rule from GCP.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param name: Name of the Global Forwarding Rule.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: A dict resp from the respective GCP 'get' request.
+ :rtype: ``dict``
+ """
+ try:
+ req = client.globalForwardingRules().get(
+ project=project_id, forwardingRule=name)
+ return GCPUtils.execute_api_client_req(req, raise_404=False)
+ except Exception:
+ raise
+
+
+def create_global_forwarding_rule(client, params, project_id):
+ """
+ Create a new Global Forwarding Rule.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param params: Dictionary of arguments from AnsibleModule.
+ :type params: ``dict``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ gcp_dict = _build_global_forwarding_rule_dict(params, project_id)
+ try:
+ req = client.globalForwardingRules().insert(project=project_id, body=gcp_dict)
+ return_data = GCPUtils.execute_api_client_req(req, client, raw=False)
+ if not return_data:
+ return_data = get_global_forwarding_rule(client,
+ name=params['forwarding_rule_name'],
+ project_id=project_id)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def delete_global_forwarding_rule(client, name, project_id):
+ """
+ Delete a Global Forwarding Rule.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param name: Name of the Target Proxy.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ try:
+ req = client.globalForwardingRules().delete(
+ project=project_id, forwardingRule=name)
+ return_data = GCPUtils.execute_api_client_req(req, client)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def update_global_forwarding_rule(client, forwarding_rule, params, name, project_id):
+ """
+ Update a Global Forwarding_Rule. Currently, only a target can be updated.
+
+ If the forwarding_rule has not changed, the update will not occur.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param forwarding_rule: Name of the Target Proxy.
+ :type forwarding_rule: ``dict``
+
+ :param params: Dictionary of arguments from AnsibleModule.
+ :type params: ``dict``
+
+ :param name: Name of the Global Forwarding Rule.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ gcp_dict = _build_global_forwarding_rule_dict(params, project_id)
+
+ GCPUtils.are_params_equal(forwarding_rule, gcp_dict)
+ if forwarding_rule['target'] == gcp_dict['target']:
+ return (False, 'no update necessary')
+
+ try:
+ req = client.globalForwardingRules().setTarget(project=project_id,
+ forwardingRule=name,
+ body={'target': gcp_dict['target']})
+ return_data = GCPUtils.execute_api_client_req(
+ req, client=client, raw=False)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ forwarding_rule_name=dict(required=True),
+ region=dict(required=True),
+ target=dict(required=False),
+ address=dict(type='str', required=False),
+ protocol=dict(required=False, default='TCP', choices=['TCP']),
+ port_range=dict(required=False),
+ load_balancing_scheme=dict(
+ required=False, default='EXTERNAL', choices=['EXTERNAL']),
+ state=dict(required=True, choices=['absent', 'present']),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ pem_file=dict(),
+ credentials_file=dict(),
+ project_id=dict(), ), )
+
+ client, conn_params = get_google_api_client(module, 'compute', user_agent_product=USER_AGENT_PRODUCT,
+ user_agent_version=USER_AGENT_VERSION)
+
+ params = {}
+ params['state'] = module.params.get('state')
+ params['forwarding_rule_name'] = module.params.get('forwarding_rule_name')
+ params['region'] = module.params.get('region')
+ params['target'] = module.params.get('target', None)
+ params['protocol'] = module.params.get('protocol', None)
+ params['port_range'] = module.params.get('port_range')
+ if module.params.get('address', None):
+ params['address'] = module.params.get('address', None)
+
+ if params['region'] != 'global':
+ # This module currently doesn't support regional rules.
+ module.fail_json(
+ msg=("%s - Only global forwarding rules currently supported. "
+ "Be sure to specify 'global' for the region option.") %
+ (params['forwarding_rule_name']))
+
+ changed = False
+ json_output = {'state': params['state']}
+ forwarding_rule = None
+ if params['region'] == 'global':
+ forwarding_rule = get_global_forwarding_rule(client,
+ name=params['forwarding_rule_name'],
+ project_id=conn_params['project_id'])
+ if not forwarding_rule:
+ if params['state'] == 'absent':
+ # Doesn't exist in GCE, and state==absent.
+ changed = False
+ module.fail_json(
+ msg="Cannot delete unknown forwarding_rule: %s" %
+ (params['forwarding_rule_name']))
+ else:
+ # Create
+ changed, json_output['forwarding_rule'] = create_global_forwarding_rule(client,
+ params=params,
+ project_id=conn_params['project_id'])
+ elif params['state'] == 'absent':
+ # Delete
+ changed, json_output['forwarding_rule'] = delete_global_forwarding_rule(client,
+ name=params['forwarding_rule_name'],
+ project_id=conn_params['project_id'])
+ else:
+ changed, json_output['forwarding_rule'] = update_global_forwarding_rule(client,
+ forwarding_rule=forwarding_rule,
+ params=params,
+ name=params['forwarding_rule_name'],
+ project_id=conn_params['project_id'])
+
+ json_output['changed'] = changed
+ json_output.update(params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcp_healthcheck.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcp_healthcheck.py
new file mode 100644
index 00000000..19b28653
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcp_healthcheck.py
@@ -0,0 +1,457 @@
+#!/usr/bin/python
+# Copyright 2017 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gcp_healthcheck
+short_description: Create, Update or Destroy a Healthcheck.
+description:
+ - Create, Update or Destroy a Healthcheck. Currently only HTTP and
+ HTTPS Healthchecks are supported. Healthchecks are used to monitor
+ individual instances, managed instance groups and/or backend
+ services. Healtchecks are reusable.
+ - Visit
+ U(https://cloud.google.com/compute/docs/load-balancing/health-checks)
+ for an overview of Healthchecks on GCP.
+ - See
+ U(https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks) for
+ API details on HTTP Healthchecks.
+ - See
+ U(https://cloud.google.com/compute/docs/reference/latest/httpsHealthChecks)
+ for more details on the HTTPS Healtcheck API.
+requirements:
+ - "python >= 2.6"
+ - "google-api-python-client >= 1.6.2"
+ - "google-auth >= 0.9.0"
+ - "google-auth-httplib2 >= 0.0.2"
+notes:
+ - Only supports HTTP and HTTPS Healthchecks currently.
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.12
+ why: Updated modules released with increased functionality
+ alternative: >
+ Use M(google.cloud.gcp_compute_health_check), M(google.cloud.gcp_compute_http_health_check) or
+ M(google.cloud.gcp_compute_https_health_check) instead.
+author:
+ - "Tom Melendez (@supertom) <tom@supertom.com>"
+options:
+ check_interval:
+ type: int
+ description:
+ - How often (in seconds) to send a health check.
+ default: 5
+ healthcheck_name:
+ type: str
+ description:
+ - Name of the Healthcheck.
+ required: true
+ healthcheck_type:
+ type: str
+ description:
+ - Type of Healthcheck.
+ required: true
+ choices: ["HTTP", "HTTPS"]
+ host_header:
+ type: str
+ description:
+ - The value of the host header in the health check request. If left
+ empty, the public IP on behalf of which this health
+ check is performed will be used.
+ default: ""
+ port:
+ type: int
+ description:
+ - The TCP port number for the health check request. The default value is
+ 443 for HTTPS and 80 for HTTP.
+ request_path:
+ type: str
+ description:
+ - The request path of the HTTPS health check request.
+ required: false
+ default: "/"
+ state:
+ type: str
+ description: State of the Healthcheck.
+ choices: ["present", "absent"]
+ default: present
+ timeout:
+ type: int
+ description:
+ - How long (in seconds) to wait for a response before claiming
+ failure. It is invalid for timeout
+ to have a greater value than check_interval.
+ default: 5
+ unhealthy_threshold:
+ type: int
+ description:
+ - A so-far healthy instance will be marked unhealthy after this
+ many consecutive failures.
+ default: 2
+ healthy_threshold:
+ type: int
+ description:
+ - A so-far unhealthy instance will be marked healthy after this
+ many consecutive successes.
+ default: 2
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account permissions (see
+ U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create),
+ --scopes section for detailed information)
+ - >
+ Available choices are:
+ C(bigquery), C(cloud-platform), C(compute-ro), C(compute-rw),
+ C(useraccounts-ro), C(useraccounts-rw), C(datastore), C(logging-write),
+ C(monitoring), C(sql-admin), C(storage-full), C(storage-ro),
+ C(storage-rw), C(taskqueue), C(userinfo-email).
+ credentials_file:
+ type: str
+ description:
+ - Path to the JSON file associated with the service account email
+ project_id:
+ type: str
+ description:
+ - Your GCP project ID
+'''
+
+EXAMPLES = '''
+- name: Create Minimum HealthCheck
+ community.general.gcp_healthcheck:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ healthcheck_name: my-healthcheck
+ healthcheck_type: HTTP
+ state: present
+- name: Create HTTP HealthCheck
+ community.general.gcp_healthcheck:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ healthcheck_name: my-healthcheck
+ healthcheck_type: HTTP
+ host: my-host
+ request_path: /hc
+ check_interval: 10
+ timeout: 30
+ unhealthy_threshhold: 2
+ healthy_threshhold: 1
+ state: present
+- name: Create HTTPS HealthCheck
+ community.general.gcp_healthcheck:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ healthcheck_name: "{{ https_healthcheck }}"
+ healthcheck_type: HTTPS
+ host_header: my-host
+ request_path: /hc
+ check_interval: 5
+ timeout: 5
+ unhealthy_threshold: 2
+ healthy_threshold: 1
+ state: present
+'''
+
+RETURN = '''
+state:
+ description: state of the Healthcheck
+ returned: Always.
+ type: str
+ sample: present
+healthcheck_name:
+ description: Name of the Healthcheck
+ returned: Always
+ type: str
+ sample: my-url-map
+healthcheck_type:
+ description: Type of the Healthcheck
+ returned: Always
+ type: str
+ sample: HTTP
+healthcheck:
+ description: GCP Healthcheck dictionary
+ returned: Always. Refer to GCP documentation for detailed field descriptions.
+ type: dict
+ sample: { "name": "my-hc", "port": 443, "requestPath": "/foo" }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcp import get_google_api_client, GCPUtils
+
+
+USER_AGENT_PRODUCT = 'ansible-healthcheck'
+USER_AGENT_VERSION = '0.0.1'
+
+
+def _validate_healthcheck_params(params):
+ """
+ Validate healthcheck params.
+
+ Simple validation has already assumed by AnsibleModule.
+
+ :param params: Ansible dictionary containing configuration.
+ :type params: ``dict``
+
+ :return: True or raises ValueError
+ :rtype: ``bool`` or `class:ValueError`
+ """
+ if params['timeout'] > params['check_interval']:
+ raise ValueError("timeout (%s) is greater than check_interval (%s)" % (
+ params['timeout'], params['check_interval']))
+
+ return (True, '')
+
+
+def _build_healthcheck_dict(params):
+ """
+ Reformat services in Ansible Params for GCP.
+
+ :param params: Params from AnsibleModule object
+ :type params: ``dict``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: dictionary suitable for submission to GCP
+ HealthCheck (HTTP/HTTPS) API.
+ :rtype ``dict``
+ """
+ gcp_dict = GCPUtils.params_to_gcp_dict(params, 'healthcheck_name')
+ if 'timeout' in gcp_dict:
+ gcp_dict['timeoutSec'] = gcp_dict['timeout']
+ del gcp_dict['timeout']
+
+ if 'checkInterval' in gcp_dict:
+ gcp_dict['checkIntervalSec'] = gcp_dict['checkInterval']
+ del gcp_dict['checkInterval']
+
+ if 'hostHeader' in gcp_dict:
+ gcp_dict['host'] = gcp_dict['hostHeader']
+ del gcp_dict['hostHeader']
+
+ if 'healthcheckType' in gcp_dict:
+ del gcp_dict['healthcheckType']
+ return gcp_dict
+
+
+def _get_req_resource(client, resource_type):
+ if resource_type == 'HTTPS':
+ return (client.httpsHealthChecks(), 'httpsHealthCheck')
+ else:
+ return (client.httpHealthChecks(), 'httpHealthCheck')
+
+
+def get_healthcheck(client, name, project_id=None, resource_type='HTTP'):
+ """
+ Get a Healthcheck from GCP.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param name: Name of the Url Map.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: A dict resp from the respective GCP 'get' request.
+ :rtype: ``dict``
+ """
+ try:
+ resource, entity_name = _get_req_resource(client, resource_type)
+ args = {'project': project_id, entity_name: name}
+ req = resource.get(**args)
+ return GCPUtils.execute_api_client_req(req, raise_404=False)
+ except Exception:
+ raise
+
+
+def create_healthcheck(client, params, project_id, resource_type='HTTP'):
+ """
+ Create a new Healthcheck.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param params: Dictionary of arguments from AnsibleModule.
+ :type params: ``dict``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ gcp_dict = _build_healthcheck_dict(params)
+ try:
+ resource, _ = _get_req_resource(client, resource_type)
+ args = {'project': project_id, 'body': gcp_dict}
+ req = resource.insert(**args)
+ return_data = GCPUtils.execute_api_client_req(req, client, raw=False)
+ if not return_data:
+ return_data = get_healthcheck(client,
+ name=params['healthcheck_name'],
+ project_id=project_id)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def delete_healthcheck(client, name, project_id, resource_type='HTTP'):
+ """
+ Delete a Healthcheck.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param name: Name of the Url Map.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ try:
+ resource, entity_name = _get_req_resource(client, resource_type)
+ args = {'project': project_id, entity_name: name}
+ req = resource.delete(**args)
+ return_data = GCPUtils.execute_api_client_req(req, client)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def update_healthcheck(client, healthcheck, params, name, project_id,
+ resource_type='HTTP'):
+ """
+ Update a Healthcheck.
+
+ If the healthcheck has not changed, the update will not occur.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param healthcheck: Name of the Url Map.
+ :type healthcheck: ``dict``
+
+ :param params: Dictionary of arguments from AnsibleModule.
+ :type params: ``dict``
+
+ :param name: Name of the Url Map.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ gcp_dict = _build_healthcheck_dict(params)
+ ans = GCPUtils.are_params_equal(healthcheck, gcp_dict)
+ if ans:
+ return (False, 'no update necessary')
+
+ try:
+ resource, entity_name = _get_req_resource(client, resource_type)
+ args = {'project': project_id, entity_name: name, 'body': gcp_dict}
+ req = resource.update(**args)
+ return_data = GCPUtils.execute_api_client_req(
+ req, client=client, raw=False)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ healthcheck_name=dict(required=True),
+ healthcheck_type=dict(required=True,
+ choices=['HTTP', 'HTTPS']),
+ request_path=dict(required=False, default='/'),
+ check_interval=dict(required=False, type='int', default=5),
+ healthy_threshold=dict(required=False, type='int', default=2),
+ unhealthy_threshold=dict(required=False, type='int', default=2),
+ host_header=dict(required=False, type='str', default=''),
+ timeout=dict(required=False, type='int', default=5),
+ port=dict(required=False, type='int'),
+ state=dict(choices=['absent', 'present'], default='present'),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ credentials_file=dict(),
+ project_id=dict(), ), )
+
+ client, conn_params = get_google_api_client(module, 'compute', user_agent_product=USER_AGENT_PRODUCT,
+ user_agent_version=USER_AGENT_VERSION)
+
+ params = {}
+
+ params['healthcheck_name'] = module.params.get('healthcheck_name')
+ params['healthcheck_type'] = module.params.get('healthcheck_type')
+ params['request_path'] = module.params.get('request_path')
+ params['check_interval'] = module.params.get('check_interval')
+ params['healthy_threshold'] = module.params.get('healthy_threshold')
+ params['unhealthy_threshold'] = module.params.get('unhealthy_threshold')
+ params['host_header'] = module.params.get('host_header')
+ params['timeout'] = module.params.get('timeout')
+ params['port'] = module.params.get('port', None)
+ params['state'] = module.params.get('state')
+
+ if not params['port']:
+ params['port'] = 80
+ if params['healthcheck_type'] == 'HTTPS':
+ params['port'] = 443
+ try:
+ _validate_healthcheck_params(params)
+ except Exception as e:
+ module.fail_json(msg=e.message, changed=False)
+
+ changed = False
+ json_output = {'state': params['state']}
+ healthcheck = get_healthcheck(client,
+ name=params['healthcheck_name'],
+ project_id=conn_params['project_id'],
+ resource_type=params['healthcheck_type'])
+
+ if not healthcheck:
+ if params['state'] == 'absent':
+ # Doesn't exist in GCE, and state==absent.
+ changed = False
+ module.fail_json(
+ msg="Cannot delete unknown healthcheck: %s" %
+ (params['healthcheck_name']))
+ else:
+ # Create
+ changed, json_output['healthcheck'] = create_healthcheck(client,
+ params=params,
+ project_id=conn_params['project_id'],
+ resource_type=params['healthcheck_type'])
+ elif params['state'] == 'absent':
+ # Delete
+ changed, json_output['healthcheck'] = delete_healthcheck(client,
+ name=params['healthcheck_name'],
+ project_id=conn_params['project_id'],
+ resource_type=params['healthcheck_type'])
+ else:
+ changed, json_output['healthcheck'] = update_healthcheck(client,
+ healthcheck=healthcheck,
+ params=params,
+ name=params['healthcheck_name'],
+ project_id=conn_params['project_id'],
+ resource_type=params['healthcheck_type'])
+ json_output['changed'] = changed
+ json_output.update(params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcp_target_proxy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcp_target_proxy.py
new file mode 100644
index 00000000..611cee04
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcp_target_proxy.py
@@ -0,0 +1,320 @@
+#!/usr/bin/python
+# Copyright 2017 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gcp_target_proxy
+short_description: Create, Update or Destroy a Target_Proxy.
+description:
+ - Create, Update or Destroy a Target_Proxy. See
+ U(https://cloud.google.com/compute/docs/load-balancing/http/target-proxies) for an overview.
+ More details on the Target_Proxy API can be found at
+ U(https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies#resource-representations).
+requirements:
+ - "python >= 2.6"
+ - "google-api-python-client >= 1.6.2"
+ - "google-auth >= 0.9.0"
+ - "google-auth-httplib2 >= 0.0.2"
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.12
+ why: Updated modules released with increased functionality
+ alternative: Use M(google.cloud.gcp_compute_target_http_proxy) instead.
+notes:
+ - Currently only supports global HTTP proxy.
+author:
+ - "Tom Melendez (@supertom) <tom@supertom.com>"
+options:
+ target_proxy_name:
+ type: str
+ description:
+ - Name of the Target_Proxy.
+ required: true
+ target_proxy_type:
+ type: str
+ description:
+ - Type of Target_Proxy. HTTP, HTTPS or SSL. Only HTTP is currently supported.
+ required: true
+ choices: [HTTP]
+ url_map_name:
+ type: str
+ description:
+ - Name of the Url Map. Required if type is HTTP or HTTPS proxy.
+ required: false
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ pem_file:
+ type: str
+ description:
+ - path to the pem file associated with the service account email
+ This option is deprecated. Use 'credentials_file'.
+ credentials_file:
+ type: str
+ description:
+ - path to the JSON file associated with the service account email
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account permissions
+ state:
+ type: str
+ description: The state the target proxy should be in. C(present) or C(absent) are the only valid options.
+ required: true
+ choices: [present, absent]
+'''
+
+EXAMPLES = '''
+- name: Create Minimum HTTP Target_Proxy
+ community.general.gcp_target_proxy:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ target_proxy_name: my-target_proxy
+ target_proxy_type: HTTP
+ url_map_name: my-url-map
+ state: present
+'''
+
+RETURN = '''
+state:
+ description: state of the Target_Proxy
+ returned: Always.
+ type: str
+ sample: present
+updated_target_proxy:
+ description: True if the target_proxy has been updated. Will not appear on
+ initial target_proxy creation.
+ returned: if the target_proxy has been updated.
+ type: bool
+ sample: true
+target_proxy_name:
+ description: Name of the Target_Proxy
+ returned: Always
+ type: str
+ sample: my-target-proxy
+target_proxy_type:
+ description: Type of Target_Proxy. One of HTTP, HTTPS or SSL.
+ returned: Always
+ type: str
+ sample: HTTP
+target_proxy:
+ description: GCP Target_Proxy dictionary
+ returned: Always. Refer to GCP documentation for detailed field descriptions.
+ type: dict
+ sample: { "name": "my-target-proxy", "urlMap": "..." }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcp import get_google_api_client, GCPUtils
+
+
+USER_AGENT_PRODUCT = 'ansible-target_proxy'
+USER_AGENT_VERSION = '0.0.1'
+
+
+def _build_target_proxy_dict(params, project_id=None):
+ """
+ Reformat services in Ansible Params.
+
+ :param params: Params from AnsibleModule object
+ :type params: ``dict``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: dictionary suitable for submission to GCP UrlMap API.
+ :rtype ``dict``
+ """
+ url = ''
+ if project_id:
+ url = GCPUtils.build_googleapi_url(project_id)
+ gcp_dict = GCPUtils.params_to_gcp_dict(params, 'target_proxy_name')
+ if 'urlMap' in gcp_dict:
+ gcp_dict['urlMap'] = '%s/global/urlMaps/%s' % (url,
+ gcp_dict['urlMap'])
+ return gcp_dict
+
+
+def get_target_http_proxy(client, name, project_id=None):
+ """
+ Get a Target HTTP Proxy from GCP.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param name: Name of the Target Proxy.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: A dict resp from the respective GCP 'get' request.
+ :rtype: ``dict``
+ """
+ req = client.targetHttpProxies().get(project=project_id,
+ targetHttpProxy=name)
+ return GCPUtils.execute_api_client_req(req, raise_404=False)
+
+
+def create_target_http_proxy(client, params, project_id):
+ """
+ Create a new Target_Proxy.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param params: Dictionary of arguments from AnsibleModule.
+ :type params: ``dict``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ gcp_dict = _build_target_proxy_dict(params, project_id)
+ try:
+ req = client.targetHttpProxies().insert(project=project_id,
+ body=gcp_dict)
+ return_data = GCPUtils.execute_api_client_req(req, client, raw=False)
+ if not return_data:
+ return_data = get_target_http_proxy(client,
+ name=params['target_proxy_name'],
+ project_id=project_id)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def delete_target_http_proxy(client, name, project_id):
+ """
+ Delete a Target_Proxy.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param name: Name of the Target Proxy.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ try:
+ req = client.targetHttpProxies().delete(
+ project=project_id, targetHttpProxy=name)
+ return_data = GCPUtils.execute_api_client_req(req, client)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def update_target_http_proxy(client, target_proxy, params, name, project_id):
+ """
+ Update a HTTP Target_Proxy. Currently only the Url Map can be updated.
+
+ If the target_proxy has not changed, the update will not occur.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param target_proxy: Name of the Target Proxy.
+ :type target_proxy: ``dict``
+
+ :param params: Dictionary of arguments from AnsibleModule.
+ :type params: ``dict``
+
+ :param name: Name of the Target Proxy.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ gcp_dict = _build_target_proxy_dict(params, project_id)
+
+ GCPUtils.are_params_equal(target_proxy, gcp_dict)
+ if target_proxy['urlMap'] == gcp_dict['urlMap']:
+ return (False, 'no update necessary')
+
+ try:
+ req = client.targetHttpProxies().setUrlMap(project=project_id,
+ targetHttpProxy=name,
+ body={"urlMap": gcp_dict['urlMap']})
+ return_data = GCPUtils.execute_api_client_req(
+ req, client=client, raw=False)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ target_proxy_name=dict(required=True),
+ target_proxy_type=dict(required=True, choices=['HTTP']),
+ url_map_name=dict(required=False),
+ state=dict(required=True, choices=['absent', 'present']),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ pem_file=dict(),
+ credentials_file=dict(),
+ project_id=dict(), ), )
+
+ client, conn_params = get_google_api_client(module, 'compute', user_agent_product=USER_AGENT_PRODUCT,
+ user_agent_version=USER_AGENT_VERSION)
+
+ params = {}
+ params['state'] = module.params.get('state')
+ params['target_proxy_name'] = module.params.get('target_proxy_name')
+ params['target_proxy_type'] = module.params.get('target_proxy_type')
+ params['url_map'] = module.params.get('url_map_name', None)
+
+ changed = False
+ json_output = {'state': params['state']}
+ target_proxy = get_target_http_proxy(client,
+ name=params['target_proxy_name'],
+ project_id=conn_params['project_id'])
+
+ if not target_proxy:
+ if params['state'] == 'absent':
+ # Doesn't exist in GCE, and state==absent.
+ changed = False
+ module.fail_json(
+ msg="Cannot delete unknown target_proxy: %s" %
+ (params['target_proxy_name']))
+ else:
+ # Create
+ changed, json_output['target_proxy'] = create_target_http_proxy(client,
+ params=params,
+ project_id=conn_params['project_id'])
+ elif params['state'] == 'absent':
+ # Delete
+ changed, json_output['target_proxy'] = delete_target_http_proxy(client,
+ name=params['target_proxy_name'],
+ project_id=conn_params['project_id'])
+ else:
+ changed, json_output['target_proxy'] = update_target_http_proxy(client,
+ target_proxy=target_proxy,
+ params=params,
+ name=params['target_proxy_name'],
+ project_id=conn_params['project_id'])
+ json_output['updated_target_proxy'] = changed
+
+ json_output['changed'] = changed
+ json_output.update(params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcp_url_map.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcp_url_map.py
new file mode 100644
index 00000000..3fc2c96b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcp_url_map.py
@@ -0,0 +1,535 @@
+#!/usr/bin/python
+# Copyright 2017 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gcp_url_map
+short_description: Create, Update or Destroy a Url_Map.
+description:
+ - Create, Update or Destroy a Url_Map. See
+ U(https://cloud.google.com/compute/docs/load-balancing/http/url-map) for an overview.
+ More details on the Url_Map API can be found at
+ U(https://cloud.google.com/compute/docs/reference/latest/urlMaps#resource).
+requirements:
+ - "python >= 2.6"
+ - "google-api-python-client >= 1.6.2"
+ - "google-auth >= 0.9.0"
+ - "google-auth-httplib2 >= 0.0.2"
+notes:
+ - Only supports global Backend Services.
+ - Url_Map tests are not currently supported.
+author:
+ - "Tom Melendez (@supertom) <tom@supertom.com>"
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.12
+ why: Updated modules released with increased functionality
+ alternative: Use M(google.cloud.gcp_compute_url_map) instead.
+options:
+ url_map_name:
+ type: str
+ description:
+ - Name of the Url_Map.
+ required: true
+ default_service:
+ type: str
+ description:
+ - Default Backend Service if no host rules match.
+ required: true
+ host_rules:
+ type: list
+ description:
+ - The list of HostRules to use against the URL. Contains
+ a list of hosts and an associated path_matcher.
+ - The 'hosts' parameter is a list of host patterns to match. They
+ must be valid hostnames, except * will match any string of
+ ([a-z0-9-.]*). In that case, * must be the first character
+ and must be followed in the pattern by either - or ..
+ - The 'path_matcher' parameter is name of the PathMatcher to use
+ to match the path portion of the URL if the hostRule matches the URL's
+ host portion.
+ required: false
+ path_matchers:
+ type: list
+ description:
+ - The list of named PathMatchers to use against the URL. Contains
+ path_rules, which is a list of paths and an associated service. A
+ default_service can also be specified for each path_matcher.
+ - The 'name' parameter to which this path_matcher is referred by the
+ host_rule.
+ - The 'default_service' parameter is the name of the
+ BackendService resource. This will be used if none of the path_rules
+ defined by this path_matcher is matched by the URL's path portion.
+ - The 'path_rules' parameter is a list of dictionaries containing a
+ list of paths and a service to direct traffic to. Each path item must
+ start with / and the only place a * is allowed is at the end following
+ a /. The string fed to the path matcher does not include any text after
+ the first ? or #, and those chars are not allowed here.
+ required: false
+ project_id:
+ type: str
+ description:
+ - The Google Cloud Platform project ID to use.
+ pem_file:
+ type: str
+ description:
+ - The path to the PEM file associated with the service account email.
+ - This option is deprecated and may be removed in a future release. Use I(credentials_file) instead.
+ credentials_file:
+ type: str
+ description:
+ - The path to the JSON file associated with the service account email.
+ service_account_email:
+ type: str
+ description:
+ - service account email
+ service_account_permissions:
+ type: list
+ description:
+ - service account permissions
+ state:
+ type: str
+ description: The state the URL map should be in. C(present) or C(absent) are the only valid options.
+ default: present
+ required: false
+ choices: [present, absent]
+'''
+
+EXAMPLES = '''
+- name: Create Minimal Url_Map
+ community.general.gcp_url_map:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ url_map_name: my-url_map
+ default_service: my-backend-service
+ state: present
+- name: Create UrlMap with pathmatcher
+ community.general.gcp_url_map:
+ service_account_email: "{{ service_account_email }}"
+ credentials_file: "{{ credentials_file }}"
+ project_id: "{{ project_id }}"
+ url_map_name: my-url-map-pm
+ default_service: default-backend-service
+ path_matchers:
+ - name: 'path-matcher-one'
+ description: 'path matcher one'
+ default_service: 'bes-pathmatcher-one-default'
+ path_rules:
+ - service: 'my-one-bes'
+ paths:
+ - '/data'
+ - '/aboutus'
+ host_rules:
+ - hosts:
+ - '*.'
+ path_matcher: 'path-matcher-one'
+ state: "present"
+'''
+
+RETURN = '''
+host_rules:
+ description: List of HostRules.
+ returned: If specified.
+ type: dict
+ sample: [ { hosts: ["*."], "path_matcher": "my-pm" } ]
+path_matchers:
+ description: The list of named PathMatchers to use against the URL.
+ returned: If specified.
+ type: dict
+ sample: [ { "name": "my-pm", "path_rules": [ { "paths": [ "/data" ] } ], "service": "my-service" } ]
+state:
+ description: state of the Url_Map
+ returned: Always.
+ type: str
+ sample: present
+updated_url_map:
+ description: True if the url_map has been updated. Will not appear on
+ initial url_map creation.
+ returned: if the url_map has been updated.
+ type: bool
+ sample: true
+url_map_name:
+ description: Name of the Url_Map
+ returned: Always
+ type: str
+ sample: my-url-map
+url_map:
+ description: GCP Url_Map dictionary
+ returned: Always. Refer to GCP documentation for detailed field descriptions.
+ type: dict
+ sample: { "name": "my-url-map", "hostRules": [...], "pathMatchers": [...] }
+'''
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcp import check_params, get_google_api_client, GCPUtils
+from ansible.module_utils.six import string_types
+
+
+USER_AGENT_PRODUCT = 'ansible-url_map'
+USER_AGENT_VERSION = '0.0.1'
+
+
+def _validate_params(params):
+ """
+ Validate url_map params.
+
+ This function calls _validate_host_rules_params to verify
+ the host_rules-specific parameters.
+
+ This function calls _validate_path_matchers_params to verify
+ the path_matchers-specific parameters.
+
+ :param params: Ansible dictionary containing configuration.
+ :type params: ``dict``
+
+ :return: True or raises ValueError
+ :rtype: ``bool`` or `class:ValueError`
+ """
+ fields = [
+ {'name': 'default_service', 'type': str, 'required': True},
+ {'name': 'host_rules', 'type': list},
+ {'name': 'path_matchers', 'type': list},
+ ]
+ try:
+ check_params(params, fields)
+ if 'path_matchers' in params and params['path_matchers'] is not None:
+ _validate_path_matcher_params(params['path_matchers'])
+ if 'host_rules' in params and params['host_rules'] is not None:
+ _validate_host_rules_params(params['host_rules'])
+ except Exception:
+ raise
+
+ return (True, '')
+
+
+def _validate_path_matcher_params(path_matchers):
+ """
+ Validate configuration for path_matchers.
+
+ :param path_matchers: Ansible dictionary containing path_matchers
+ configuration (only).
+ :type path_matchers: ``dict``
+
+ :return: True or raises ValueError
+ :rtype: ``bool`` or `class:ValueError`
+ """
+ fields = [
+ {'name': 'name', 'type': str, 'required': True},
+ {'name': 'default_service', 'type': str, 'required': True},
+ {'name': 'path_rules', 'type': list, 'required': True},
+ {'name': 'max_rate', 'type': int},
+ {'name': 'max_rate_per_instance', 'type': float},
+ ]
+ pr_fields = [
+ {'name': 'service', 'type': str, 'required': True},
+ {'name': 'paths', 'type': list, 'required': True},
+ ]
+
+ if not path_matchers:
+ raise ValueError(('path_matchers should be a list. %s (%s) provided'
+ % (path_matchers, type(path_matchers))))
+
+ for pm in path_matchers:
+ try:
+ check_params(pm, fields)
+ for pr in pm['path_rules']:
+ check_params(pr, pr_fields)
+ for path in pr['paths']:
+ if not path.startswith('/'):
+ raise ValueError("path for %s must start with /" % (
+ pm['name']))
+ except Exception:
+ raise
+
+ return (True, '')
+
+
+def _validate_host_rules_params(host_rules):
+ """
+ Validate configuration for host_rules.
+
+ :param host_rules: Ansible dictionary containing host_rules
+ configuration (only).
+ :type host_rules ``dict``
+
+ :return: True or raises ValueError
+ :rtype: ``bool`` or `class:ValueError`
+ """
+ fields = [
+ {'name': 'path_matcher', 'type': str, 'required': True},
+ ]
+
+ if not host_rules:
+ raise ValueError('host_rules should be a list.')
+
+ for hr in host_rules:
+ try:
+ check_params(hr, fields)
+ for host in hr['hosts']:
+ if not isinstance(host, string_types):
+ raise ValueError("host in hostrules must be a string")
+ elif '*' in host:
+ if host.index('*') != 0:
+ raise ValueError("wildcard must be first char in host, %s" % (
+ host))
+ else:
+ if host[1] not in ['.', '-', ]:
+ raise ValueError("wildcard be followed by a '.' or '-', %s" % (
+ host))
+
+ except Exception:
+ raise
+
+ return (True, '')
+
+
+def _build_path_matchers(path_matcher_list, project_id):
+ """
+ Reformat services in path matchers list.
+
+ Specifically, builds out URLs.
+
+ :param path_matcher_list: The GCP project ID.
+ :type path_matcher_list: ``list`` of ``dict``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: list suitable for submission to GCP
+ UrlMap API Path Matchers list.
+ :rtype ``list`` of ``dict``
+ """
+ url = ''
+ if project_id:
+ url = GCPUtils.build_googleapi_url(project_id)
+ for pm in path_matcher_list:
+ if 'defaultService' in pm:
+ pm['defaultService'] = '%s/global/backendServices/%s' % (url,
+ pm['defaultService'])
+ if 'pathRules' in pm:
+ for rule in pm['pathRules']:
+ if 'service' in rule:
+ rule['service'] = '%s/global/backendServices/%s' % (url,
+ rule['service'])
+ return path_matcher_list
+
+
+def _build_url_map_dict(params, project_id=None):
+ """
+ Reformat services in Ansible Params.
+
+ :param params: Params from AnsibleModule object
+ :type params: ``dict``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: dictionary suitable for submission to GCP UrlMap API.
+ :rtype ``dict``
+ """
+ url = ''
+ if project_id:
+ url = GCPUtils.build_googleapi_url(project_id)
+ gcp_dict = GCPUtils.params_to_gcp_dict(params, 'url_map_name')
+ if 'defaultService' in gcp_dict:
+ gcp_dict['defaultService'] = '%s/global/backendServices/%s' % (url,
+ gcp_dict['defaultService'])
+ if 'pathMatchers' in gcp_dict:
+ gcp_dict['pathMatchers'] = _build_path_matchers(gcp_dict['pathMatchers'], project_id)
+
+ return gcp_dict
+
+
+def get_url_map(client, name, project_id=None):
+ """
+ Get a Url_Map from GCP.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param name: Name of the Url Map.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: A dict resp from the respective GCP 'get' request.
+ :rtype: ``dict``
+ """
+ try:
+ req = client.urlMaps().get(project=project_id, urlMap=name)
+ return GCPUtils.execute_api_client_req(req, raise_404=False)
+ except Exception:
+ raise
+
+
+def create_url_map(client, params, project_id):
+ """
+ Create a new Url_Map.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param params: Dictionary of arguments from AnsibleModule.
+ :type params: ``dict``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ gcp_dict = _build_url_map_dict(params, project_id)
+ try:
+ req = client.urlMaps().insert(project=project_id, body=gcp_dict)
+ return_data = GCPUtils.execute_api_client_req(req, client, raw=False)
+ if not return_data:
+ return_data = get_url_map(client,
+ name=params['url_map_name'],
+ project_id=project_id)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def delete_url_map(client, name, project_id):
+ """
+ Delete a Url_Map.
+
+ :param client: An initialized GCE Compute Discover resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param name: Name of the Url Map.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ try:
+ req = client.urlMaps().delete(project=project_id, urlMap=name)
+ return_data = GCPUtils.execute_api_client_req(req, client)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def update_url_map(client, url_map, params, name, project_id):
+ """
+ Update a Url_Map.
+
+ If the url_map has not changed, the update will not occur.
+
+ :param client: An initialized GCE Compute Discovery resource.
+ :type client: :class: `googleapiclient.discovery.Resource`
+
+ :param url_map: Name of the Url Map.
+ :type url_map: ``dict``
+
+ :param params: Dictionary of arguments from AnsibleModule.
+ :type params: ``dict``
+
+ :param name: Name of the Url Map.
+ :type name: ``str``
+
+ :param project_id: The GCP project ID.
+ :type project_id: ``str``
+
+ :return: Tuple with changed status and response dict
+ :rtype: ``tuple`` in the format of (bool, dict)
+ """
+ gcp_dict = _build_url_map_dict(params, project_id)
+
+ ans = GCPUtils.are_params_equal(url_map, gcp_dict)
+ if ans:
+ return (False, 'no update necessary')
+
+ gcp_dict['fingerprint'] = url_map['fingerprint']
+ try:
+ req = client.urlMaps().update(project=project_id,
+ urlMap=name, body=gcp_dict)
+ return_data = GCPUtils.execute_api_client_req(req, client=client, raw=False)
+ return (True, return_data)
+ except Exception:
+ raise
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ url_map_name=dict(required=True),
+ state=dict(choices=['absent', 'present'], default='present'),
+ default_service=dict(required=True),
+ path_matchers=dict(type='list', required=False),
+ host_rules=dict(type='list', required=False),
+ service_account_email=dict(),
+ service_account_permissions=dict(type='list'),
+ pem_file=dict(),
+ credentials_file=dict(),
+ project_id=dict(), ), required_together=[
+ ['path_matchers', 'host_rules'], ])
+
+ client, conn_params = get_google_api_client(module, 'compute', user_agent_product=USER_AGENT_PRODUCT,
+ user_agent_version=USER_AGENT_VERSION)
+
+ params = {}
+ params['state'] = module.params.get('state')
+ params['url_map_name'] = module.params.get('url_map_name')
+ params['default_service'] = module.params.get('default_service')
+ if module.params.get('path_matchers'):
+ params['path_matchers'] = module.params.get('path_matchers')
+ if module.params.get('host_rules'):
+ params['host_rules'] = module.params.get('host_rules')
+
+ try:
+ _validate_params(params)
+ except Exception as e:
+ module.fail_json(msg=e.message, changed=False)
+
+ changed = False
+ json_output = {'state': params['state']}
+ url_map = get_url_map(client,
+ name=params['url_map_name'],
+ project_id=conn_params['project_id'])
+
+ if not url_map:
+ if params['state'] == 'absent':
+ # Doesn't exist in GCE, and state==absent.
+ changed = False
+ module.fail_json(
+ msg="Cannot delete unknown url_map: %s" %
+ (params['url_map_name']))
+ else:
+ # Create
+ changed, json_output['url_map'] = create_url_map(client,
+ params=params,
+ project_id=conn_params['project_id'])
+ elif params['state'] == 'absent':
+ # Delete
+ changed, json_output['url_map'] = delete_url_map(client,
+ name=params['url_map_name'],
+ project_id=conn_params['project_id'])
+ else:
+ changed, json_output['url_map'] = update_url_map(client,
+ url_map=url_map,
+ params=params,
+ name=params['url_map_name'],
+ project_id=conn_params['project_id'])
+ json_output['updated_url_map'] = changed
+
+ json_output['changed'] = changed
+ json_output.update(params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcpubsub.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcpubsub.py
new file mode 100644
index 00000000..de257503
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcpubsub.py
@@ -0,0 +1,349 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gcpubsub
+short_description: Create and Delete Topics/Subscriptions, Publish and pull messages on PubSub
+description:
+ - Create and Delete Topics/Subscriptions, Publish and pull messages on PubSub.
+ See U(https://cloud.google.com/pubsub/docs) for an overview.
+requirements:
+ - google-auth >= 0.5.0
+ - google-cloud-pubsub >= 0.22.0
+notes:
+ - Subscription pull happens before publish. You cannot publish and pull in the same task.
+author:
+ - Tom Melendez (@supertom) <tom@supertom.com>
+options:
+ topic:
+ type: str
+ description:
+ - GCP pubsub topic name.
+ - Only the name, not the full path, is required.
+ required: yes
+ subscription:
+ type: dict
+ description:
+ - Dictionary containing a subscription name associated with a topic (required), along with optional ack_deadline, push_endpoint and pull.
+ For pulling from a subscription, message_ack (bool), max_messages (int) and return_immediate are available as subfields.
+ See subfields name, push_endpoint and ack_deadline for more information.
+ suboptions:
+ name:
+ description:
+ - Subfield of subscription. Required if subscription is specified. See examples.
+ ack_deadline:
+ description:
+ - Subfield of subscription. Not required. Default deadline for subscriptions to ACK the message before it is resent. See examples.
+ pull:
+ description:
+ - Subfield of subscription. Not required. If specified, messages will be retrieved from topic via the
+ provided subscription name. max_messages (int; default None; max number of messages to pull),
+ message_ack (bool; default False; acknowledge the message) and return_immediately
+ (bool; default True, don't wait for messages to appear). If the messages are acknowledged,
+ changed is set to True, otherwise, changed is False.
+ push_endpoint:
+ description:
+ - Subfield of subscription. Not required. If specified, message will be sent to an endpoint.
+ See U(https://cloud.google.com/pubsub/docs/advanced#push_endpoints) for more information.
+ publish:
+ type: list
+ description:
+ - List of dictionaries describing messages and attributes to be published. Dictionary is in message(str):attributes(dict) format.
+ Only message is required.
+ state:
+ type: str
+ description:
+ - State of the topic or queue.
+ - Applies to the most granular resource.
+ - If subscription isspecified we remove it.
+ - If only topic is specified, that is what is removed.
+ - NOTE - A topic can be removed without first removing the subscription.
+ choices: [ absent, present ]
+ default: present
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ credentials_file:
+ type: str
+ description:
+ - path to the JSON file associated with the service account email
+ service_account_email:
+ type: str
+ description:
+ - service account email
+'''
+
+EXAMPLES = '''
+# (Message will be pushed; there is no check to see if the message was pushed before
+- name: Create a topic and publish a message to it
+ community.general.gcpubsub:
+ topic: ansible-topic-example
+ state: present
+
+# Subscriptions associated with topic are not deleted.
+- name: Delete Topic
+ community.general.gcpubsub:
+ topic: ansible-topic-example
+ state: absent
+
+# Setting absent will keep the messages from being sent
+- name: Publish multiple messages, with attributes (key:value available with the message)
+ community.general.gcpubsub:
+ topic: '{{ topic_name }}'
+ state: present
+ publish:
+ - message: this is message 1
+ attributes:
+ mykey1: myvalue
+ mykey2: myvalu2
+ mykey3: myvalue3
+ - message: this is message 2
+ attributes:
+ server: prod
+ sla: "99.9999"
+ owner: fred
+
+- name: Create Subscription (pull)
+ community.general.gcpubsub:
+ topic: ansible-topic-example
+ subscription:
+ - name: mysub
+ state: present
+
+# pull is default, ack_deadline is not required
+- name: Create Subscription with ack_deadline and push endpoint
+ community.general.gcpubsub:
+ topic: ansible-topic-example
+ subscription:
+ - name: mysub
+ ack_deadline: "60"
+ push_endpoint: http://pushendpoint.example.com
+ state: present
+
+# Setting push_endpoint to "None" converts subscription to pull.
+- name: Subscription change from push to pull
+ community.general.gcpubsub:
+ topic: ansible-topic-example
+ subscription:
+ name: mysub
+ push_endpoint: "None"
+
+### Topic will not be deleted
+- name: Delete subscription
+ community.general.gcpubsub:
+ topic: ansible-topic-example
+ subscription:
+ - name: mysub
+ state: absent
+
+# only pull keyword is required.
+- name: Pull messages from subscription
+ community.general.gcpubsub:
+ topic: ansible-topic-example
+ subscription:
+ name: ansible-topic-example-sub
+ pull:
+ message_ack: yes
+ max_messages: "100"
+'''
+
+RETURN = '''
+publish:
+ description: List of dictionaries describing messages and attributes to be published. Dictionary is in message(str):attributes(dict) format.
+ Only message is required.
+ returned: Only when specified
+ type: list
+ sample: "publish: ['message': 'my message', attributes: {'key1': 'value1'}]"
+
+pulled_messages:
+ description: list of dictionaries containing message info. Fields are ack_id, attributes, data, message_id.
+ returned: Only when subscription.pull is specified
+ type: list
+ sample: [{ "ack_id": "XkASTCcYREl...","attributes": {"key1": "val1",...}, "data": "this is message 1", "message_id": "49107464153705"},..]
+
+state:
+ description: The state of the topic or subscription. Value will be either 'absent' or 'present'.
+ returned: Always
+ type: str
+ sample: "present"
+
+subscription:
+ description: Name of subscription.
+ returned: When subscription fields are specified
+ type: str
+ sample: "mysubscription"
+
+topic:
+ description: Name of topic.
+ returned: Always
+ type: str
+ sample: "mytopic"
+'''
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ from google.cloud import pubsub
+ HAS_GOOGLE_CLOUD_PUBSUB = True
+except ImportError as e:
+ HAS_GOOGLE_CLOUD_PUBSUB = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcp import check_min_pkg_version, get_google_cloud_credentials
+
+
+CLOUD_CLIENT = 'google-cloud-pubsub'
+CLOUD_CLIENT_MINIMUM_VERSION = '0.22.0'
+CLOUD_CLIENT_USER_AGENT = 'ansible-pubsub-0.1'
+
+
+def publish_messages(message_list, topic):
+ with topic.batch() as batch:
+ for message in message_list:
+ msg = message['message']
+ attrs = {}
+ if 'attributes' in message:
+ attrs = message['attributes']
+ batch.publish(bytes(msg), **attrs)
+ return True
+
+
+def pull_messages(pull_params, sub):
+ """
+ :rtype: tuple (output, changed)
+ """
+ changed = False
+ max_messages = pull_params.get('max_messages', None)
+ message_ack = pull_params.get('message_ack', 'no')
+ return_immediately = pull_params.get('return_immediately', False)
+
+ output = []
+ pulled = sub.pull(return_immediately=return_immediately, max_messages=max_messages)
+
+ for ack_id, msg in pulled:
+ msg_dict = {'message_id': msg.message_id,
+ 'attributes': msg.attributes,
+ 'data': msg.data,
+ 'ack_id': ack_id}
+ output.append(msg_dict)
+
+ if message_ack:
+ ack_ids = [m['ack_id'] for m in output]
+ if ack_ids:
+ sub.acknowledge(ack_ids)
+ changed = True
+ return (output, changed)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ topic=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ publish=dict(type='list'),
+ subscription=dict(type='dict'),
+ service_account_email=dict(type='str'),
+ credentials_file=dict(type='str'),
+ project_id=dict(type='str'),
+ ),
+ )
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+
+ if not HAS_GOOGLE_CLOUD_PUBSUB:
+ module.fail_json(msg="Please install google-cloud-pubsub library.")
+
+ if not check_min_pkg_version(CLOUD_CLIENT, CLOUD_CLIENT_MINIMUM_VERSION):
+ module.fail_json(msg="Please install %s client version %s" % (CLOUD_CLIENT, CLOUD_CLIENT_MINIMUM_VERSION))
+
+ mod_params = {}
+ mod_params['publish'] = module.params.get('publish')
+ mod_params['state'] = module.params.get('state')
+ mod_params['topic'] = module.params.get('topic')
+ mod_params['subscription'] = module.params.get('subscription')
+
+ creds, params = get_google_cloud_credentials(module)
+ pubsub_client = pubsub.Client(project=params['project_id'], credentials=creds, use_gax=False)
+ pubsub_client.user_agent = CLOUD_CLIENT_USER_AGENT
+
+ changed = False
+ json_output = {}
+
+ t = None
+ if mod_params['topic']:
+ t = pubsub_client.topic(mod_params['topic'])
+ s = None
+ if mod_params['subscription']:
+ # Note: default ack deadline cannot be changed without deleting/recreating subscription
+ s = t.subscription(mod_params['subscription']['name'],
+ ack_deadline=mod_params['subscription'].get('ack_deadline', None),
+ push_endpoint=mod_params['subscription'].get('push_endpoint', None))
+
+ if mod_params['state'] == 'absent':
+ # Remove the most granular resource. If subscription is specified
+ # we remove it. If only topic is specified, that is what is removed.
+ # Note that a topic can be removed without first removing the subscription.
+ # TODO(supertom): Enhancement: Provide an option to only delete a topic
+ # if there are no subscriptions associated with it (which the API does not support).
+ if s is not None:
+ if s.exists():
+ s.delete()
+ changed = True
+ else:
+ if t.exists():
+ t.delete()
+ changed = True
+ elif mod_params['state'] == 'present':
+ if not t.exists():
+ t.create()
+ changed = True
+ if s:
+ if not s.exists():
+ s.create()
+ s.reload()
+ changed = True
+ else:
+ # Subscription operations
+ # TODO(supertom): if more 'update' operations arise, turn this into a function.
+ s.reload()
+ push_endpoint = mod_params['subscription'].get('push_endpoint', None)
+ if push_endpoint is not None:
+ if push_endpoint != s.push_endpoint:
+ if push_endpoint == 'None':
+ push_endpoint = None
+ s.modify_push_configuration(push_endpoint=push_endpoint)
+ s.reload()
+ changed = push_endpoint == s.push_endpoint
+
+ if 'pull' in mod_params['subscription']:
+ if s.push_endpoint is not None:
+ module.fail_json(msg="Cannot pull messages, push_endpoint is configured.")
+ (json_output['pulled_messages'], changed) = pull_messages(
+ mod_params['subscription']['pull'], s)
+
+ # publish messages to the topic
+ if mod_params['publish'] and len(mod_params['publish']) > 0:
+ changed = publish_messages(mod_params['publish'], t)
+
+ json_output['changed'] = changed
+ json_output.update(mod_params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcpubsub_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcpubsub_facts.py
new file mode 100644
index 00000000..dbb8d359
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcpubsub_facts.py
@@ -0,0 +1,164 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gcpubsub_info
+short_description: List Topics/Subscriptions and Messages from Google PubSub.
+description:
+ - List Topics/Subscriptions from Google PubSub. Use the gcpubsub module for
+ topic/subscription management.
+ See U(https://cloud.google.com/pubsub/docs) for an overview.
+ - This module was called C(gcpubsub_facts) before Ansible 2.9. The usage did not change.
+requirements:
+ - "python >= 2.6"
+ - "google-auth >= 0.5.0"
+ - "google-cloud-pubsub >= 0.22.0"
+notes:
+ - list state enables user to list topics or subscriptions in the project. See examples for details.
+author:
+ - "Tom Melendez (@supertom) <tom@supertom.com>"
+options:
+ topic:
+ type: str
+ description:
+ - GCP pubsub topic name. Only the name, not the full path, is required.
+ required: False
+ view:
+ type: str
+ description:
+ - Choices are 'topics' or 'subscriptions'
+ choices: [topics, subscriptions]
+ default: topics
+ state:
+ type: str
+ description:
+ - list is the only valid option.
+ required: False
+ choices: [list]
+ default: list
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ credentials_file:
+ type: str
+ description:
+ - path to the JSON file associated with the service account email
+ service_account_email:
+ type: str
+ description:
+ - service account email
+'''
+
+EXAMPLES = '''
+- name: List all Topics in a project
+ community.general.gcpubsub_info:
+ view: topics
+ state: list
+
+- name: List all Subscriptions in a project
+ community.general.gcpubsub_info:
+ view: subscriptions
+ state: list
+
+- name: List all Subscriptions for a Topic in a project
+ community.general.gcpubsub_info:
+ view: subscriptions
+ topic: my-topic
+ state: list
+'''
+
+RETURN = '''
+subscriptions:
+ description: List of subscriptions.
+ returned: When view is set to subscriptions.
+ type: list
+ sample: ["mysubscription", "mysubscription2"]
+topic:
+ description: Name of topic. Used to filter subscriptions.
+ returned: Always
+ type: str
+ sample: "mytopic"
+topics:
+ description: List of topics.
+ returned: When view is set to topics.
+ type: list
+ sample: ["mytopic", "mytopic2"]
+'''
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ from google.cloud import pubsub
+ HAS_GOOGLE_CLOUD_PUBSUB = True
+except ImportError as e:
+ HAS_GOOGLE_CLOUD_PUBSUB = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcp import check_min_pkg_version, get_google_cloud_credentials
+
+
+def list_func(data, member='name'):
+ """Used for state=list."""
+ return [getattr(x, member) for x in data]
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ view=dict(choices=['topics', 'subscriptions'], default='topics'),
+ topic=dict(required=False),
+ state=dict(choices=['list'], default='list'),
+ service_account_email=dict(),
+ credentials_file=dict(),
+ project_id=dict(), ),)
+ if module._name in ('gcpubsub_facts', 'community.general.gcpubsub_facts'):
+ module.deprecate("The 'gcpubsub_facts' module has been renamed to 'gcpubsub_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+
+ if not HAS_GOOGLE_CLOUD_PUBSUB:
+ module.fail_json(msg="Please install google-cloud-pubsub library.")
+
+ CLIENT_MINIMUM_VERSION = '0.22.0'
+ if not check_min_pkg_version('google-cloud-pubsub', CLIENT_MINIMUM_VERSION):
+ module.fail_json(msg="Please install google-cloud-pubsub library version %s" % CLIENT_MINIMUM_VERSION)
+
+ mod_params = {}
+ mod_params['state'] = module.params.get('state')
+ mod_params['topic'] = module.params.get('topic')
+ mod_params['view'] = module.params.get('view')
+
+ creds, params = get_google_cloud_credentials(module)
+ pubsub_client = pubsub.Client(project=params['project_id'], credentials=creds, use_gax=False)
+ pubsub_client.user_agent = 'ansible-pubsub-0.1'
+
+ json_output = {}
+ if mod_params['view'] == 'topics':
+ json_output['topics'] = list_func(pubsub_client.list_topics())
+ elif mod_params['view'] == 'subscriptions':
+ if mod_params['topic']:
+ t = pubsub_client.topic(mod_params['topic'])
+ json_output['subscriptions'] = list_func(t.list_subscriptions())
+ else:
+ json_output['subscriptions'] = list_func(pubsub_client.list_subscriptions())
+
+ json_output['changed'] = False
+ json_output.update(mod_params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcpubsub_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcpubsub_info.py
new file mode 100644
index 00000000..dbb8d359
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcpubsub_info.py
@@ -0,0 +1,164 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gcpubsub_info
+short_description: List Topics/Subscriptions and Messages from Google PubSub.
+description:
+ - List Topics/Subscriptions from Google PubSub. Use the gcpubsub module for
+ topic/subscription management.
+ See U(https://cloud.google.com/pubsub/docs) for an overview.
+ - This module was called C(gcpubsub_facts) before Ansible 2.9. The usage did not change.
+requirements:
+ - "python >= 2.6"
+ - "google-auth >= 0.5.0"
+ - "google-cloud-pubsub >= 0.22.0"
+notes:
+ - list state enables user to list topics or subscriptions in the project. See examples for details.
+author:
+ - "Tom Melendez (@supertom) <tom@supertom.com>"
+options:
+ topic:
+ type: str
+ description:
+ - GCP pubsub topic name. Only the name, not the full path, is required.
+ required: False
+ view:
+ type: str
+ description:
+ - Choices are 'topics' or 'subscriptions'
+ choices: [topics, subscriptions]
+ default: topics
+ state:
+ type: str
+ description:
+ - list is the only valid option.
+ required: False
+ choices: [list]
+ default: list
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ credentials_file:
+ type: str
+ description:
+ - path to the JSON file associated with the service account email
+ service_account_email:
+ type: str
+ description:
+ - service account email
+'''
+
+EXAMPLES = '''
+- name: List all Topics in a project
+ community.general.gcpubsub_info:
+ view: topics
+ state: list
+
+- name: List all Subscriptions in a project
+ community.general.gcpubsub_info:
+ view: subscriptions
+ state: list
+
+- name: List all Subscriptions for a Topic in a project
+ community.general.gcpubsub_info:
+ view: subscriptions
+ topic: my-topic
+ state: list
+'''
+
+RETURN = '''
+subscriptions:
+ description: List of subscriptions.
+ returned: When view is set to subscriptions.
+ type: list
+ sample: ["mysubscription", "mysubscription2"]
+topic:
+ description: Name of topic. Used to filter subscriptions.
+ returned: Always
+ type: str
+ sample: "mytopic"
+topics:
+ description: List of topics.
+ returned: When view is set to topics.
+ type: list
+ sample: ["mytopic", "mytopic2"]
+'''
+
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ from google.cloud import pubsub
+ HAS_GOOGLE_CLOUD_PUBSUB = True
+except ImportError as e:
+ HAS_GOOGLE_CLOUD_PUBSUB = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcp import check_min_pkg_version, get_google_cloud_credentials
+
+
+def list_func(data, member='name'):
+ """Used for state=list."""
+ return [getattr(x, member) for x in data]
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ view=dict(choices=['topics', 'subscriptions'], default='topics'),
+ topic=dict(required=False),
+ state=dict(choices=['list'], default='list'),
+ service_account_email=dict(),
+ credentials_file=dict(),
+ project_id=dict(), ),)
+ if module._name in ('gcpubsub_facts', 'community.general.gcpubsub_facts'):
+ module.deprecate("The 'gcpubsub_facts' module has been renamed to 'gcpubsub_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+
+ if not HAS_GOOGLE_CLOUD_PUBSUB:
+ module.fail_json(msg="Please install google-cloud-pubsub library.")
+
+ CLIENT_MINIMUM_VERSION = '0.22.0'
+ if not check_min_pkg_version('google-cloud-pubsub', CLIENT_MINIMUM_VERSION):
+ module.fail_json(msg="Please install google-cloud-pubsub library version %s" % CLIENT_MINIMUM_VERSION)
+
+ mod_params = {}
+ mod_params['state'] = module.params.get('state')
+ mod_params['topic'] = module.params.get('topic')
+ mod_params['view'] = module.params.get('view')
+
+ creds, params = get_google_cloud_credentials(module)
+ pubsub_client = pubsub.Client(project=params['project_id'], credentials=creds, use_gax=False)
+ pubsub_client.user_agent = 'ansible-pubsub-0.1'
+
+ json_output = {}
+ if mod_params['view'] == 'topics':
+ json_output['topics'] = list_func(pubsub_client.list_topics())
+ elif mod_params['view'] == 'subscriptions':
+ if mod_params['topic']:
+ t = pubsub_client.topic(mod_params['topic'])
+ json_output['subscriptions'] = list_func(t.list_subscriptions())
+ else:
+ json_output['subscriptions'] = list_func(pubsub_client.list_subscriptions())
+
+ json_output['changed'] = False
+ json_output.update(mod_params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcspanner.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcspanner.py
new file mode 100644
index 00000000..e88fc26b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gcspanner.py
@@ -0,0 +1,304 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gcspanner
+short_description: Create and Delete Instances/Databases on Spanner
+description:
+ - Create and Delete Instances/Databases on Spanner.
+ See U(https://cloud.google.com/spanner/docs) for an overview.
+requirements:
+ - python >= 2.6
+ - google-auth >= 0.5.0
+ - google-cloud-spanner >= 0.23.0
+notes:
+ - Changing the configuration on an existing instance is not supported.
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.12
+ why: Updated modules released with increased functionality
+ alternative: Use M(google.cloud.gcp_spanner_database) and/or M(google.cloud.gcp_spanner_instance) instead.
+author:
+ - Tom Melendez (@supertom) <tom@supertom.com>
+options:
+ configuration:
+ type: str
+ description:
+ - Configuration the instance should use.
+ - Examples are us-central1, asia-east1 and europe-west1.
+ required: yes
+ instance_id:
+ type: str
+ description:
+ - GCP spanner instance name.
+ required: yes
+ database_name:
+ type: str
+ description:
+ - Name of database contained on the instance.
+ force_instance_delete:
+ description:
+ - To delete an instance, this argument must exist and be true (along with state being equal to absent).
+ type: bool
+ default: 'no'
+ instance_display_name:
+ type: str
+ description:
+ - Name of Instance to display.
+ - If not specified, instance_id will be used instead.
+ node_count:
+ type: int
+ description:
+ - Number of nodes in the instance.
+ default: 1
+ state:
+ type: str
+ description:
+ - State of the instance or database. Applies to the most granular resource.
+ - If a C(database_name) is specified we remove it.
+ - If only C(instance_id) is specified, that is what is removed.
+ choices: [ absent, present ]
+ default: present
+ project_id:
+ type: str
+ description:
+ - your GCE project ID
+ credentials_file:
+ type: str
+ description:
+ - path to the JSON file associated with the service account email
+ service_account_email:
+ type: str
+ description:
+ - service account email
+'''
+
+EXAMPLES = '''
+- name: Create instance
+ community.general.gcspanner:
+ instance_id: '{{ instance_id }}'
+ configuration: '{{ configuration }}'
+ state: present
+ node_count: 1
+
+- name: Create database
+ community.general.gcspanner:
+ instance_id: '{{ instance_id }}'
+ configuration: '{{ configuration }}'
+ database_name: '{{ database_name }}'
+ state: present
+
+- name: Delete instance (and all databases)
+- community.general.gcspanner:
+ instance_id: '{{ instance_id }}'
+ configuration: '{{ configuration }}'
+ state: absent
+ force_instance_delete: yes
+'''
+
+RETURN = '''
+state:
+ description: The state of the instance or database. Value will be either 'absent' or 'present'.
+ returned: Always
+ type: str
+ sample: "present"
+
+database_name:
+ description: Name of database.
+ returned: When database name is specified
+ type: str
+ sample: "mydatabase"
+
+instance_id:
+ description: Name of instance.
+ returned: Always
+ type: str
+ sample: "myinstance"
+
+previous_values:
+ description: List of dictionaries containing previous values prior to update.
+ returned: When an instance update has occurred and a field has been modified.
+ type: dict
+ sample: "'previous_values': { 'instance': { 'instance_display_name': 'my-instance', 'node_count': 1 } }"
+
+updated:
+ description: Boolean field to denote an update has occurred.
+ returned: When an update has occurred.
+ type: bool
+ sample: True
+'''
+try:
+ from ast import literal_eval
+ HAS_PYTHON26 = True
+except ImportError:
+ HAS_PYTHON26 = False
+
+try:
+ from google.cloud import spanner
+ from google.gax.errors import GaxError
+ HAS_GOOGLE_CLOUD_SPANNER = True
+except ImportError as e:
+ HAS_GOOGLE_CLOUD_SPANNER = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gcp import check_min_pkg_version, get_google_cloud_credentials
+from ansible.module_utils.six import string_types
+
+
+CLOUD_CLIENT = 'google-cloud-spanner'
+CLOUD_CLIENT_MINIMUM_VERSION = '0.23.0'
+CLOUD_CLIENT_USER_AGENT = 'ansible-spanner-0.1'
+
+
+def get_spanner_configuration_name(config_name, project_name):
+ config_name = 'projects/%s/instanceConfigs/regional-%s' % (project_name,
+ config_name)
+ return config_name
+
+
+def instance_update(instance):
+ """
+ Call update method on spanner client.
+
+ Note: A ValueError exception is thrown despite the client succeeding.
+ So, we validate the node_count and instance_display_name parameters and then
+ ignore the ValueError exception.
+
+ :param instance: a Spanner instance object
+ :type instance: class `google.cloud.spanner.Instance`
+
+ :returns True on success, raises ValueError on type error.
+ :rtype ``bool``
+ """
+ errmsg = ''
+ if not isinstance(instance.node_count, int):
+ errmsg = 'node_count must be an integer %s (%s)' % (
+ instance.node_count, type(instance.node_count))
+ if instance.display_name and not isinstance(instance.display_name,
+ string_types):
+ errmsg = 'instance_display_name must be an string %s (%s)' % (
+ instance.display_name, type(instance.display_name))
+ if errmsg:
+ raise ValueError(errmsg)
+
+ try:
+ instance.update()
+ except ValueError:
+ # The ValueError here is the one we 'expect'.
+ pass
+
+ return True
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ instance_id=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ database_name=dict(type='str'),
+ configuration=dict(type='str', required=True),
+ node_count=dict(type='int', default=1),
+ instance_display_name=dict(type='str'),
+ force_instance_delete=dict(type='bool', default=False),
+ service_account_email=dict(type='str'),
+ credentials_file=dict(type='str'),
+ project_id=dict(type='str'),
+ ),
+ )
+
+ if not HAS_PYTHON26:
+ module.fail_json(
+ msg="GCE module requires python's 'ast' module, python v2.6+")
+
+ if not HAS_GOOGLE_CLOUD_SPANNER:
+ module.fail_json(msg="Please install google-cloud-spanner.")
+
+ if not check_min_pkg_version(CLOUD_CLIENT, CLOUD_CLIENT_MINIMUM_VERSION):
+ module.fail_json(msg="Please install %s client version %s" %
+ (CLOUD_CLIENT, CLOUD_CLIENT_MINIMUM_VERSION))
+
+ mod_params = {}
+ mod_params['state'] = module.params.get('state')
+ mod_params['instance_id'] = module.params.get('instance_id')
+ mod_params['database_name'] = module.params.get('database_name')
+ mod_params['configuration'] = module.params.get('configuration')
+ mod_params['node_count'] = module.params.get('node_count', None)
+ mod_params['instance_display_name'] = module.params.get('instance_display_name')
+ mod_params['force_instance_delete'] = module.params.get('force_instance_delete')
+
+ creds, params = get_google_cloud_credentials(module)
+ spanner_client = spanner.Client(project=params['project_id'],
+ credentials=creds,
+ user_agent=CLOUD_CLIENT_USER_AGENT)
+ changed = False
+ json_output = {}
+
+ i = None
+ if mod_params['instance_id']:
+ config_name = get_spanner_configuration_name(
+ mod_params['configuration'], params['project_id'])
+ i = spanner_client.instance(mod_params['instance_id'],
+ configuration_name=config_name)
+ d = None
+ if mod_params['database_name']:
+ # TODO(supertom): support DDL
+ ddl_statements = ''
+ d = i.database(mod_params['database_name'], ddl_statements)
+
+ if mod_params['state'] == 'absent':
+ # Remove the most granular resource. If database is specified
+ # we remove it. If only instance is specified, that is what is removed.
+ if d is not None and d.exists():
+ d.drop()
+ changed = True
+ else:
+ if i.exists():
+ if mod_params['force_instance_delete']:
+ i.delete()
+ else:
+ module.fail_json(
+ msg=(("Cannot delete Spanner instance: "
+ "'force_instance_delete' argument not specified")))
+ changed = True
+ elif mod_params['state'] == 'present':
+ if not i.exists():
+ i = spanner_client.instance(mod_params['instance_id'],
+ configuration_name=config_name,
+ display_name=mod_params['instance_display_name'],
+ node_count=mod_params['node_count'] or 1)
+ i.create()
+ changed = True
+ else:
+ # update instance
+ i.reload()
+ inst_prev_vals = {}
+ if i.display_name != mod_params['instance_display_name']:
+ inst_prev_vals['instance_display_name'] = i.display_name
+ i.display_name = mod_params['instance_display_name']
+ if mod_params['node_count']:
+ if i.node_count != mod_params['node_count']:
+ inst_prev_vals['node_count'] = i.node_count
+ i.node_count = mod_params['node_count']
+ if inst_prev_vals:
+ changed = instance_update(i)
+ json_output['updated'] = changed
+ json_output['previous_values'] = {'instance': inst_prev_vals}
+ if d:
+ if not d.exists():
+ d.create()
+ d.reload()
+ changed = True
+
+ json_output['changed'] = changed
+ json_output.update(mod_params)
+ module.exit_json(**json_output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gem.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gem.py
new file mode 100644
index 00000000..516c9b0a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gem.py
@@ -0,0 +1,311 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Johan Wiren <johan.wiren.se@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gem
+short_description: Manage Ruby gems
+description:
+ - Manage installation and uninstallation of Ruby gems.
+options:
+ name:
+ type: str
+ description:
+ - The name of the gem to be managed.
+ required: true
+ state:
+ type: str
+ description:
+ - The desired state of the gem. C(latest) ensures that the latest version is installed.
+ required: false
+ choices: [present, absent, latest]
+ default: present
+ gem_source:
+ type: path
+ description:
+ - The path to a local gem used as installation source.
+ required: false
+ include_dependencies:
+ description:
+ - Whether to include dependencies or not.
+ required: false
+ type: bool
+ default: "yes"
+ repository:
+ type: str
+ description:
+ - The repository from which the gem will be installed
+ required: false
+ aliases: [source]
+ user_install:
+ description:
+ - Install gem in user's local gems cache or for all users
+ required: false
+ type: bool
+ default: "yes"
+ executable:
+ type: path
+ description:
+ - Override the path to the gem executable
+ required: false
+ install_dir:
+ type: path
+ description:
+ - Install the gems into a specific directory.
+ These gems will be independent from the global installed ones.
+ Specifying this requires user_install to be false.
+ required: false
+ env_shebang:
+ description:
+ - Rewrite the shebang line on installed scripts to use /usr/bin/env.
+ required: false
+ default: "no"
+ type: bool
+ version:
+ type: str
+ description:
+ - Version of the gem to be installed/removed.
+ required: false
+ pre_release:
+ description:
+ - Allow installation of pre-release versions of the gem.
+ required: false
+ default: "no"
+ type: bool
+ include_doc:
+ description:
+ - Install with or without docs.
+ required: false
+ default: "no"
+ type: bool
+ build_flags:
+ type: str
+ description:
+ - Allow adding build flags for gem compilation
+ required: false
+ force:
+ description:
+ - Force gem to install, bypassing dependency checks.
+ required: false
+ default: "no"
+ type: bool
+author:
+ - "Ansible Core Team"
+ - "Johan Wiren (@johanwiren)"
+'''
+
+EXAMPLES = '''
+- name: Install version 1.0 of vagrant
+ community.general.gem:
+ name: vagrant
+ version: 1.0
+ state: present
+
+- name: Install latest available version of rake
+ community.general.gem:
+ name: rake
+ state: latest
+
+- name: Install rake version 1.0 from a local gem on disk
+ community.general.gem:
+ name: rake
+ gem_source: /path/to/gems/rake-1.0.gem
+ state: present
+'''
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def get_rubygems_path(module):
+ if module.params['executable']:
+ result = module.params['executable'].split(' ')
+ else:
+ result = [module.get_bin_path('gem', True)]
+ return result
+
+
+def get_rubygems_version(module):
+ cmd = get_rubygems_path(module) + ['--version']
+ (rc, out, err) = module.run_command(cmd, check_rc=True)
+
+ match = re.match(r'^(\d+)\.(\d+)\.(\d+)', out)
+ if not match:
+ return None
+
+ return tuple(int(x) for x in match.groups())
+
+
+def get_rubygems_environ(module):
+ if module.params['install_dir']:
+ return {'GEM_HOME': module.params['install_dir']}
+ return None
+
+
+def get_installed_versions(module, remote=False):
+
+ cmd = get_rubygems_path(module)
+ cmd.append('query')
+ if remote:
+ cmd.append('--remote')
+ if module.params['repository']:
+ cmd.extend(['--source', module.params['repository']])
+ cmd.append('-n')
+ cmd.append('^%s$' % module.params['name'])
+
+ environ = get_rubygems_environ(module)
+ (rc, out, err) = module.run_command(cmd, environ_update=environ, check_rc=True)
+ installed_versions = []
+ for line in out.splitlines():
+ match = re.match(r"\S+\s+\((?:default: )?(.+)\)", line)
+ if match:
+ versions = match.group(1)
+ for version in versions.split(', '):
+ installed_versions.append(version.split()[0])
+ return installed_versions
+
+
+def exists(module):
+ if module.params['state'] == 'latest':
+ remoteversions = get_installed_versions(module, remote=True)
+ if remoteversions:
+ module.params['version'] = remoteversions[0]
+ installed_versions = get_installed_versions(module)
+ if module.params['version']:
+ if module.params['version'] in installed_versions:
+ return True
+ else:
+ if installed_versions:
+ return True
+ return False
+
+
+def uninstall(module):
+
+ if module.check_mode:
+ return
+ cmd = get_rubygems_path(module)
+ environ = get_rubygems_environ(module)
+ cmd.append('uninstall')
+ if module.params['install_dir']:
+ cmd.extend(['--install-dir', module.params['install_dir']])
+
+ if module.params['version']:
+ cmd.extend(['--version', module.params['version']])
+ else:
+ cmd.append('--all')
+ cmd.append('--executable')
+ cmd.append(module.params['name'])
+ module.run_command(cmd, environ_update=environ, check_rc=True)
+
+
+def install(module):
+
+ if module.check_mode:
+ return
+
+ ver = get_rubygems_version(module)
+ if ver:
+ major = ver[0]
+ else:
+ major = None
+
+ cmd = get_rubygems_path(module)
+ cmd.append('install')
+ if module.params['version']:
+ cmd.extend(['--version', module.params['version']])
+ if module.params['repository']:
+ cmd.extend(['--source', module.params['repository']])
+ if not module.params['include_dependencies']:
+ cmd.append('--ignore-dependencies')
+ else:
+ if major and major < 2:
+ cmd.append('--include-dependencies')
+ if module.params['user_install']:
+ cmd.append('--user-install')
+ else:
+ cmd.append('--no-user-install')
+ if module.params['install_dir']:
+ cmd.extend(['--install-dir', module.params['install_dir']])
+ if module.params['pre_release']:
+ cmd.append('--pre')
+ if not module.params['include_doc']:
+ if major and major < 2:
+ cmd.append('--no-rdoc')
+ cmd.append('--no-ri')
+ else:
+ cmd.append('--no-document')
+ if module.params['env_shebang']:
+ cmd.append('--env-shebang')
+ cmd.append(module.params['gem_source'])
+ if module.params['build_flags']:
+ cmd.extend(['--', module.params['build_flags']])
+ if module.params['force']:
+ cmd.append('--force')
+ module.run_command(cmd, check_rc=True)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ executable=dict(required=False, type='path'),
+ gem_source=dict(required=False, type='path'),
+ include_dependencies=dict(required=False, default=True, type='bool'),
+ name=dict(required=True, type='str'),
+ repository=dict(required=False, aliases=['source'], type='str'),
+ state=dict(required=False, default='present', choices=['present', 'absent', 'latest'], type='str'),
+ user_install=dict(required=False, default=True, type='bool'),
+ install_dir=dict(required=False, type='path'),
+ pre_release=dict(required=False, default=False, type='bool'),
+ include_doc=dict(required=False, default=False, type='bool'),
+ env_shebang=dict(required=False, default=False, type='bool'),
+ version=dict(required=False, type='str'),
+ build_flags=dict(required=False, type='str'),
+ force=dict(required=False, default=False, type='bool'),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[['gem_source', 'repository'], ['gem_source', 'version']],
+ )
+
+ if module.params['version'] and module.params['state'] == 'latest':
+ module.fail_json(msg="Cannot specify version when state=latest")
+ if module.params['gem_source'] and module.params['state'] == 'latest':
+ module.fail_json(msg="Cannot maintain state=latest when installing from local source")
+ if module.params['user_install'] and module.params['install_dir']:
+ module.fail_json(msg="install_dir requires user_install=false")
+
+ if not module.params['gem_source']:
+ module.params['gem_source'] = module.params['name']
+
+ changed = False
+
+ if module.params['state'] in ['present', 'latest']:
+ if not exists(module):
+ install(module)
+ changed = True
+ elif module.params['state'] == 'absent':
+ if exists(module):
+ uninstall(module)
+ changed = True
+
+ result = {}
+ result['name'] = module.params['name']
+ result['state'] = module.params['state']
+ if module.params['version']:
+ result['version'] = module.params['version']
+ result['changed'] = changed
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/git_config.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/git_config.py
new file mode 100644
index 00000000..66ef45f3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/git_config.py
@@ -0,0 +1,273 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Marius Gedminas <marius@pov.lt>
+# (c) 2016, Matthew Gamble <git@matthewgamble.net>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: git_config
+author:
+ - Matthew Gamble (@djmattyg007)
+ - Marius Gedminas (@mgedmin)
+requirements: ['git']
+short_description: Read and write git configuration
+description:
+ - The C(git_config) module changes git configuration by invoking 'git config'.
+ This is needed if you don't want to use M(ansible.builtin.template) for the entire git
+ config file (e.g. because you need to change just C(user.email) in
+ /etc/.git/config). Solutions involving M(ansible.builtin.command) are cumbersome or
+ don't work correctly in check mode.
+options:
+ list_all:
+ description:
+ - List all settings (optionally limited to a given I(scope))
+ type: bool
+ default: 'no'
+ name:
+ description:
+ - The name of the setting. If no value is supplied, the value will
+ be read from the config if it has been set.
+ repo:
+ description:
+ - Path to a git repository for reading and writing values from a
+ specific repo.
+ scope:
+ description:
+ - Specify which scope to read/set values from. This is required
+ when setting config values. If this is set to local, you must
+ also specify the repo parameter. It defaults to system only when
+ not using I(list_all)=yes.
+ choices: [ "local", "global", "system" ]
+ state:
+ description:
+ - "Indicates the setting should be set/unset.
+ This parameter has higher precedence than I(value) parameter:
+ when I(state)=absent and I(value) is defined, I(value) is discarded."
+ choices: [ 'present', 'absent' ]
+ default: 'present'
+ value:
+ description:
+ - When specifying the name of a single setting, supply a value to
+ set that setting to the given value.
+'''
+
+EXAMPLES = '''
+- name: Add a setting to ~/.gitconfig
+ community.general.git_config:
+ name: alias.ci
+ scope: global
+ value: commit
+
+- name: Add a setting to ~/.gitconfig
+ community.general.git_config:
+ name: alias.st
+ scope: global
+ value: status
+
+- name: Remove a setting from ~/.gitconfig
+ community.general.git_config:
+ name: alias.ci
+ scope: global
+ state: absent
+
+- name: Add a setting to ~/.gitconfig
+ community.general.git_config:
+ name: core.editor
+ scope: global
+ value: vim
+
+- name: Add a setting system-wide
+ community.general.git_config:
+ name: alias.remotev
+ scope: system
+ value: remote -v
+
+- name: Add a setting to a system scope (default)
+ community.general.git_config:
+ name: alias.diffc
+ value: diff --cached
+
+- name: Add a setting to a system scope (default)
+ community.general.git_config:
+ name: color.ui
+ value: auto
+
+- name: Make etckeeper not complaining when it is invoked by cron
+ community.general.git_config:
+ name: user.email
+ repo: /etc
+ scope: local
+ value: 'root@{{ ansible_fqdn }}'
+
+- name: Read individual values from git config
+ community.general.git_config:
+ name: alias.ci
+ scope: global
+
+- name: Scope system is also assumed when reading values, unless list_all=yes
+ community.general.git_config:
+ name: alias.diffc
+
+- name: Read all values from git config
+ community.general.git_config:
+ list_all: yes
+ scope: global
+
+- name: When list_all is yes and no scope is specified, you get configuration from all scopes
+ community.general.git_config:
+ list_all: yes
+
+- name: Specify a repository to include local settings
+ community.general.git_config:
+ list_all: yes
+ repo: /path/to/repo.git
+'''
+
+RETURN = '''
+---
+config_value:
+ description: When list_all=no and value is not set, a string containing the value of the setting in name
+ returned: success
+ type: str
+ sample: "vim"
+
+config_values:
+ description: When list_all=yes, a dict containing key/value pairs of multiple configuration settings
+ returned: success
+ type: dict
+ sample:
+ core.editor: "vim"
+ color.ui: "auto"
+ alias.diffc: "diff --cached"
+ alias.remotev: "remote -v"
+'''
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import shlex_quote
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ list_all=dict(required=False, type='bool', default=False),
+ name=dict(type='str'),
+ repo=dict(type='path'),
+ scope=dict(required=False, type='str', choices=['local', 'global', 'system']),
+ state=dict(required=False, type='str', default='present', choices=['present', 'absent']),
+ value=dict(required=False)
+ ),
+ mutually_exclusive=[['list_all', 'name'], ['list_all', 'value'], ['list_all', 'state']],
+ required_if=[('scope', 'local', ['repo'])],
+ required_one_of=[['list_all', 'name']],
+ supports_check_mode=True,
+ )
+ git_path = module.get_bin_path('git', True)
+
+ params = module.params
+ # We check error message for a pattern, so we need to make sure the messages appear in the form we're expecting.
+ # Set the locale to C to ensure consistent messages.
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ if params['name']:
+ name = params['name']
+ else:
+ name = None
+
+ if params['scope']:
+ scope = params['scope']
+ elif params['list_all']:
+ scope = None
+ else:
+ scope = 'system'
+
+ if params['state'] == 'absent':
+ unset = 'unset'
+ params['value'] = None
+ else:
+ unset = None
+
+ if params['value']:
+ new_value = params['value']
+ else:
+ new_value = None
+
+ args = [git_path, "config", "--includes"]
+ if params['list_all']:
+ args.append('-l')
+ if scope:
+ args.append("--" + scope)
+ if name:
+ args.append(name)
+
+ if scope == 'local':
+ dir = params['repo']
+ elif params['list_all'] and params['repo']:
+ # Include local settings from a specific repo when listing all available settings
+ dir = params['repo']
+ else:
+ # Run from root directory to avoid accidentally picking up any local config settings
+ dir = "/"
+
+ (rc, out, err) = module.run_command(' '.join(args), cwd=dir)
+ if params['list_all'] and scope and rc == 128 and 'unable to read config file' in err:
+ # This just means nothing has been set at the given scope
+ module.exit_json(changed=False, msg='', config_values={})
+ elif rc >= 2:
+ # If the return code is 1, it just means the option hasn't been set yet, which is fine.
+ module.fail_json(rc=rc, msg=err, cmd=' '.join(args))
+
+ if params['list_all']:
+ values = out.rstrip().splitlines()
+ config_values = {}
+ for value in values:
+ k, v = value.split('=', 1)
+ config_values[k] = v
+ module.exit_json(changed=False, msg='', config_values=config_values)
+ elif not new_value and not unset:
+ module.exit_json(changed=False, msg='', config_value=out.rstrip())
+ elif unset and not out:
+ module.exit_json(changed=False, msg='no setting to unset')
+ else:
+ old_value = out.rstrip()
+ if old_value == new_value:
+ module.exit_json(changed=False, msg="")
+
+ if not module.check_mode:
+ if unset:
+ args.insert(len(args) - 1, "--" + unset)
+ cmd = ' '.join(args)
+ else:
+ new_value_quoted = shlex_quote(new_value)
+ cmd = ' '.join(args + [new_value_quoted])
+ try: # try using extra parameter from ansible-base 2.10.4 onwards
+ (rc, out, err) = module.run_command(cmd, cwd=dir, ignore_invalid_cwd=False)
+ except TypeError:
+ # @TODO remove try/except when community.general drop support for 2.10.x
+ if not os.path.isdir(dir):
+ module.fail_json(msg="Cannot find directory '{0}'".format(dir))
+ (rc, out, err) = module.run_command(cmd, cwd=dir)
+ if err:
+ module.fail_json(rc=rc, msg=err, cmd=cmd)
+
+ module.exit_json(
+ msg='setting changed',
+ diff=dict(
+ before_header=' '.join(args),
+ before=old_value + "\n",
+ after_header=' '.join(args),
+ after=(new_value or '') + "\n"
+ ),
+ changed=True
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/github_deploy_key.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/github_deploy_key.py
new file mode 100644
index 00000000..8836454e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/github_deploy_key.py
@@ -0,0 +1,330 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: github_deploy_key
+author: "Ali (@bincyber)"
+short_description: Manages deploy keys for GitHub repositories.
+description:
+ - "Adds or removes deploy keys for GitHub repositories. Supports authentication using username and password,
+ username and password and 2-factor authentication code (OTP), OAuth2 token, or personal access token. Admin
+ rights on the repository are required."
+options:
+ github_url:
+ description:
+ - The base URL of the GitHub API
+ required: false
+ type: str
+ version_added: '0.2.0'
+ default: https://api.github.com
+ owner:
+ description:
+ - The name of the individual account or organization that owns the GitHub repository.
+ required: true
+ aliases: [ 'account', 'organization' ]
+ repo:
+ description:
+ - The name of the GitHub repository.
+ required: true
+ aliases: [ 'repository' ]
+ name:
+ description:
+ - The name for the deploy key.
+ required: true
+ aliases: [ 'title', 'label' ]
+ key:
+ description:
+ - The SSH public key to add to the repository as a deploy key.
+ required: true
+ read_only:
+ description:
+ - If C(true), the deploy key will only be able to read repository contents. Otherwise, the deploy key will be able to read and write.
+ type: bool
+ default: 'yes'
+ state:
+ description:
+ - The state of the deploy key.
+ default: "present"
+ choices: [ "present", "absent" ]
+ force:
+ description:
+ - If C(true), forcefully adds the deploy key by deleting any existing deploy key with the same public key or title.
+ type: bool
+ default: 'no'
+ username:
+ description:
+ - The username to authenticate with. Should not be set when using personal access token
+ password:
+ description:
+ - The password to authenticate with. Alternatively, a personal access token can be used instead of I(username) and I(password) combination.
+ token:
+ description:
+ - The OAuth2 token or personal access token to authenticate with. Mutually exclusive with I(password).
+ otp:
+ description:
+ - The 6 digit One Time Password for 2-Factor Authentication. Required together with I(username) and I(password).
+ aliases: ['2fa_token']
+notes:
+ - "Refer to GitHub's API documentation here: https://developer.github.com/v3/repos/keys/."
+'''
+
+EXAMPLES = '''
+- name: Add a new read-only deploy key to a GitHub repository using basic authentication
+ community.general.github_deploy_key:
+ owner: "johndoe"
+ repo: "example"
+ name: "new-deploy-key"
+ key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..."
+ read_only: yes
+ username: "johndoe"
+ password: "supersecretpassword"
+
+- name: Remove an existing deploy key from a GitHub repository
+ community.general.github_deploy_key:
+ owner: "johndoe"
+ repository: "example"
+ name: "new-deploy-key"
+ key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..."
+ force: yes
+ username: "johndoe"
+ password: "supersecretpassword"
+ state: absent
+
+- name: Add a new deploy key to a GitHub repository, replace an existing key, use an OAuth2 token to authenticate
+ community.general.github_deploy_key:
+ owner: "johndoe"
+ repository: "example"
+ name: "new-deploy-key"
+ key: "{{ lookup('file', '~/.ssh/github.pub') }}"
+ force: yes
+ token: "ABAQDAwXxn7kIMNWzcDfo..."
+
+- name: Re-add a deploy key to a GitHub repository but with a different name
+ community.general.github_deploy_key:
+ owner: "johndoe"
+ repository: "example"
+ name: "replace-deploy-key"
+ key: "{{ lookup('file', '~/.ssh/github.pub') }}"
+ username: "johndoe"
+ password: "supersecretpassword"
+
+- name: Add a new deploy key to a GitHub repository using 2FA
+ community.general.github_deploy_key:
+ owner: "johndoe"
+ repo: "example"
+ name: "new-deploy-key-2"
+ key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..."
+ username: "johndoe"
+ password: "supersecretpassword"
+ otp: 123456
+
+- name: Add a read-only deploy key to a repository hosted on GitHub Enterprise
+ community.general.github_deploy_key:
+ github_url: "https://api.example.com"
+ owner: "janedoe"
+ repo: "example"
+ name: "new-deploy-key"
+ key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..."
+ read_only: yes
+ username: "janedoe"
+ password: "supersecretpassword"
+'''
+
+RETURN = '''
+msg:
+ description: the status message describing what occurred
+ returned: always
+ type: str
+ sample: "Deploy key added successfully"
+
+http_status_code:
+ description: the HTTP status code returned by the GitHub API
+ returned: failed
+ type: int
+ sample: 400
+
+error:
+ description: the error message returned by the GitHub API
+ returned: failed
+ type: str
+ sample: "key is already in use"
+
+id:
+ description: the key identifier assigned by GitHub for the deploy key
+ returned: changed
+ type: int
+ sample: 24381901
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from re import findall
+
+
+class GithubDeployKey(object):
+ def __init__(self, module):
+ self.module = module
+
+ self.github_url = self.module.params['github_url']
+ self.name = module.params['name']
+ self.key = module.params['key']
+ self.state = module.params['state']
+ self.read_only = module.params.get('read_only', True)
+ self.force = module.params.get('force', False)
+ self.username = module.params.get('username', None)
+ self.password = module.params.get('password', None)
+ self.token = module.params.get('token', None)
+ self.otp = module.params.get('otp', None)
+
+ @property
+ def url(self):
+ owner = self.module.params['owner']
+ repo = self.module.params['repo']
+ return "{0}/repos/{1}/{2}/keys".format(self.github_url, owner, repo)
+
+ @property
+ def headers(self):
+ if self.username is not None and self.password is not None:
+ self.module.params['url_username'] = self.username
+ self.module.params['url_password'] = self.password
+ self.module.params['force_basic_auth'] = True
+ if self.otp is not None:
+ return {"X-GitHub-OTP": self.otp}
+ elif self.token is not None:
+ return {"Authorization": "token {0}".format(self.token)}
+ else:
+ return None
+
+ def paginate(self, url):
+ while url:
+ resp, info = fetch_url(self.module, url, headers=self.headers, method="GET")
+
+ if info["status"] == 200:
+ yield self.module.from_json(resp.read())
+
+ links = {}
+ for x, y in findall(r'<([^>]+)>;\s*rel="(\w+)"', info["link"]):
+ links[y] = x
+
+ url = links.get('next')
+ else:
+ self.handle_error(method="GET", info=info)
+
+ def get_existing_key(self):
+ for keys in self.paginate(self.url):
+ if keys:
+ for i in keys:
+ existing_key_id = str(i["id"])
+ if i["key"].split() == self.key.split()[:2]:
+ return existing_key_id
+ elif i['title'] == self.name and self.force:
+ return existing_key_id
+ else:
+ return None
+
+ def add_new_key(self):
+ request_body = {"title": self.name, "key": self.key, "read_only": self.read_only}
+
+ resp, info = fetch_url(self.module, self.url, data=self.module.jsonify(request_body), headers=self.headers, method="POST", timeout=30)
+
+ status_code = info["status"]
+
+ if status_code == 201:
+ response_body = self.module.from_json(resp.read())
+ key_id = response_body["id"]
+ self.module.exit_json(changed=True, msg="Deploy key successfully added", id=key_id)
+ elif status_code == 422:
+ self.module.exit_json(changed=False, msg="Deploy key already exists")
+ else:
+ self.handle_error(method="POST", info=info)
+
+ def remove_existing_key(self, key_id):
+ resp, info = fetch_url(self.module, "{0}/{1}".format(self.url, key_id), headers=self.headers, method="DELETE")
+
+ status_code = info["status"]
+
+ if status_code == 204:
+ if self.state == 'absent':
+ self.module.exit_json(changed=True, msg="Deploy key successfully deleted", id=key_id)
+ else:
+ self.handle_error(method="DELETE", info=info, key_id=key_id)
+
+ def handle_error(self, method, info, key_id=None):
+ status_code = info['status']
+ body = info.get('body')
+ if body:
+ err = self.module.from_json(body)['message']
+
+ if status_code == 401:
+ self.module.fail_json(msg="Failed to connect to {0} due to invalid credentials".format(self.github_url), http_status_code=status_code, error=err)
+ elif status_code == 404:
+ self.module.fail_json(msg="GitHub repository does not exist", http_status_code=status_code, error=err)
+ else:
+ if method == "GET":
+ self.module.fail_json(msg="Failed to retrieve existing deploy keys", http_status_code=status_code, error=err)
+ elif method == "POST":
+ self.module.fail_json(msg="Failed to add deploy key", http_status_code=status_code, error=err)
+ elif method == "DELETE":
+ self.module.fail_json(msg="Failed to delete existing deploy key", id=key_id, http_status_code=status_code, error=err)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ github_url=dict(required=False, type='str', default="https://api.github.com"),
+ owner=dict(required=True, type='str', aliases=['account', 'organization']),
+ repo=dict(required=True, type='str', aliases=['repository']),
+ name=dict(required=True, type='str', aliases=['title', 'label']),
+ key=dict(required=True, type='str'),
+ read_only=dict(required=False, type='bool', default=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ force=dict(required=False, type='bool', default=False),
+ username=dict(required=False, type='str'),
+ password=dict(required=False, type='str', no_log=True),
+ otp=dict(required=False, type='int', aliases=['2fa_token'], no_log=True),
+ token=dict(required=False, type='str', no_log=True)
+ ),
+ mutually_exclusive=[
+ ['password', 'token']
+ ],
+ required_together=[
+ ['username', 'password'],
+ ['otp', 'username', 'password']
+ ],
+ required_one_of=[
+ ['username', 'token']
+ ],
+ supports_check_mode=True,
+ )
+
+ deploy_key = GithubDeployKey(module)
+
+ if module.check_mode:
+ key_id = deploy_key.get_existing_key()
+ if deploy_key.state == "present" and key_id is None:
+ module.exit_json(changed=True)
+ elif deploy_key.state == "present" and key_id is not None:
+ module.exit_json(changed=False)
+
+ # to forcefully modify an existing key, the existing key must be deleted first
+ if deploy_key.state == 'absent' or deploy_key.force:
+ key_id = deploy_key.get_existing_key()
+
+ if key_id is not None:
+ deploy_key.remove_existing_key(key_id)
+ elif deploy_key.state == 'absent':
+ module.exit_json(changed=False, msg="Deploy key does not exist")
+
+ if deploy_key.state == "present":
+ deploy_key.add_new_key()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/github_hooks.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/github_hooks.py
new file mode 100644
index 00000000..e326711d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/github_hooks.py
@@ -0,0 +1,193 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Phillip Gentry <phillip@cx.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: github_hooks
+short_description: Manages GitHub service hooks.
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.12
+ why: Replaced by more granular modules
+ alternative: Use M(community.general.github_webhook) and M(community.general.github_webhook_info) instead.
+description:
+ - Adds service hooks and removes service hooks that have an error status.
+options:
+ user:
+ description:
+ - GitHub username.
+ required: true
+ oauthkey:
+ description:
+ - The oauth key provided by GitHub. It can be found/generated on GitHub under "Edit Your Profile" >> "Developer settings" >> "Personal Access Tokens"
+ required: true
+ repo:
+ description:
+ - >
+ This is the API url for the repository you want to manage hooks for. It should be in the form of: https://api.github.com/repos/user:/repo:.
+ Note this is different than the normal repo url.
+ required: true
+ hookurl:
+ description:
+ - When creating a new hook, this is the url that you want GitHub to post to. It is only required when creating a new hook.
+ required: false
+ action:
+ description:
+ - This tells the githooks module what you want it to do.
+ required: true
+ choices: [ "create", "cleanall", "list", "clean504" ]
+ validate_certs:
+ description:
+ - If C(no), SSL certificates for the target repo will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ type: bool
+ content_type:
+ description:
+ - Content type to use for requests made to the webhook
+ required: false
+ default: 'json'
+ choices: ['json', 'form']
+
+author: "Phillip Gentry, CX Inc (@pcgentry)"
+'''
+
+EXAMPLES = '''
+- name: Create a new service hook ignoring duplicates
+ community.general.github_hooks:
+ action: create
+ hookurl: http://11.111.111.111:2222
+ user: '{{ gituser }}'
+ oauthkey: '{{ oauthkey }}'
+ repo: https://api.github.com/repos/pcgentry/Github-Auto-Deploy
+
+# Cleaning all hooks for this repo that had an error on the last update.
+# Since this works for all hooks in a repo it is probably best that this would be called from a handler.
+- name: Clean all hooks
+ community.general.github_hooks:
+ action: cleanall
+ user: '{{ gituser }}'
+ oauthkey: '{{ oauthkey }}'
+ repo: '{{ repo }}'
+ delegate_to: localhost
+'''
+
+import json
+import base64
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils._text import to_bytes
+
+
+def request(module, url, user, oauthkey, data='', method='GET'):
+ auth = base64.b64encode(to_bytes('%s:%s' % (user, oauthkey)).replace('\n', ''))
+ headers = {
+ 'Authorization': 'Basic %s' % auth,
+ }
+ response, info = fetch_url(module, url, headers=headers, data=data, method=method)
+ return response, info
+
+
+def _list(module, oauthkey, repo, user):
+ url = "%s/hooks" % repo
+ response, info = request(module, url, user, oauthkey)
+ if info['status'] != 200:
+ return False, ''
+ else:
+ return False, response.read()
+
+
+def _clean504(module, oauthkey, repo, user):
+ current_hooks = _list(module, oauthkey, repo, user)[1]
+ decoded = json.loads(current_hooks)
+
+ for hook in decoded:
+ if hook['last_response']['code'] == 504:
+ _delete(module, oauthkey, repo, user, hook['id'])
+
+ return 0, current_hooks
+
+
+def _cleanall(module, oauthkey, repo, user):
+ current_hooks = _list(module, oauthkey, repo, user)[1]
+ decoded = json.loads(current_hooks)
+
+ for hook in decoded:
+ if hook['last_response']['code'] != 200:
+ _delete(module, oauthkey, repo, user, hook['id'])
+
+ return 0, current_hooks
+
+
+def _create(module, hookurl, oauthkey, repo, user, content_type):
+ url = "%s/hooks" % repo
+ values = {
+ "active": True,
+ "name": "web",
+ "config": {
+ "url": "%s" % hookurl,
+ "content_type": "%s" % content_type
+ }
+ }
+ data = json.dumps(values)
+ response, info = request(module, url, user, oauthkey, data=data, method='POST')
+ if info['status'] != 200:
+ return 0, '[]'
+ else:
+ return 0, response.read()
+
+
+def _delete(module, oauthkey, repo, user, hookid):
+ url = "%s/hooks/%s" % (repo, hookid)
+ response, info = request(module, url, user, oauthkey, method='DELETE')
+ return response.read()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ action=dict(required=True, choices=['list', 'clean504', 'cleanall', 'create']),
+ hookurl=dict(required=False),
+ oauthkey=dict(required=True, no_log=True),
+ repo=dict(required=True),
+ user=dict(required=True),
+ validate_certs=dict(default=True, type='bool'),
+ content_type=dict(default='json', choices=['json', 'form']),
+ )
+ )
+
+ action = module.params['action']
+ hookurl = module.params['hookurl']
+ oauthkey = module.params['oauthkey']
+ repo = module.params['repo']
+ user = module.params['user']
+ content_type = module.params['content_type']
+
+ if action == "list":
+ (rc, out) = _list(module, oauthkey, repo, user)
+
+ if action == "clean504":
+ (rc, out) = _clean504(module, oauthkey, repo, user)
+
+ if action == "cleanall":
+ (rc, out) = _cleanall(module, oauthkey, repo, user)
+
+ if action == "create":
+ (rc, out) = _create(module, hookurl, oauthkey, repo, user, content_type)
+
+ if rc != 0:
+ module.fail_json(msg="failed", result=out)
+
+ module.exit_json(msg="success", result=out)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/github_issue.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/github_issue.py
new file mode 100644
index 00000000..9c4b558b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/github_issue.py
@@ -0,0 +1,111 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2017-18, Abhijeet Kasurde <akasurde@redhat.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: github_issue
+short_description: View GitHub issue.
+description:
+ - View GitHub issue for a given repository and organization.
+options:
+ repo:
+ description:
+ - Name of repository from which issue needs to be retrieved.
+ required: true
+ organization:
+ description:
+ - Name of the GitHub organization in which the repository is hosted.
+ required: true
+ issue:
+ description:
+ - Issue number for which information is required.
+ required: true
+ action:
+ description:
+ - Get various details about issue depending upon action specified.
+ default: 'get_status'
+ choices:
+ - 'get_status'
+author:
+ - Abhijeet Kasurde (@Akasurde)
+'''
+
+RETURN = '''
+get_status:
+ description: State of the GitHub issue
+ type: str
+ returned: success
+ sample: open, closed
+'''
+
+EXAMPLES = '''
+- name: Check if GitHub issue is closed or not
+ community.general.github_issue:
+ organization: ansible
+ repo: ansible
+ issue: 23642
+ action: get_status
+ register: r
+
+- name: Take action depending upon issue status
+ ansible.builtin.debug:
+ msg: Do something when issue 23642 is open
+ when: r.issue_status == 'open'
+'''
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ organization=dict(required=True),
+ repo=dict(required=True),
+ issue=dict(type='int', required=True),
+ action=dict(choices=['get_status'], default='get_status'),
+ ),
+ supports_check_mode=True,
+ )
+
+ organization = module.params['organization']
+ repo = module.params['repo']
+ issue = module.params['issue']
+ action = module.params['action']
+
+ result = dict()
+
+ headers = {
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/vnd.github.v3+json',
+ }
+
+ url = "https://api.github.com/repos/%s/%s/issues/%s" % (organization, repo, issue)
+
+ response, info = fetch_url(module, url, headers=headers)
+ if not (200 <= info['status'] < 400):
+ if info['status'] == 404:
+ module.fail_json(msg="Failed to find issue %s" % issue)
+ module.fail_json(msg="Failed to send request to %s: %s" % (url, info['msg']))
+
+ gh_obj = json.loads(response.read())
+
+ if action == 'get_status' or action is None:
+ if module.check_mode:
+ result.update(changed=True)
+ else:
+ result.update(changed=True, issue_status=gh_obj['state'])
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/github_key.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/github_key.py
new file mode 100644
index 00000000..415065f8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/github_key.py
@@ -0,0 +1,237 @@
+#!/usr/bin/python
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: github_key
+short_description: Manage GitHub access keys.
+description:
+ - Creates, removes, or updates GitHub access keys.
+options:
+ token:
+ description:
+ - GitHub Access Token with permission to list and create public keys.
+ required: true
+ name:
+ description:
+ - SSH key name
+ required: true
+ pubkey:
+ description:
+ - SSH public key value. Required when C(state=present).
+ state:
+ description:
+ - Whether to remove a key, ensure that it exists, or update its value.
+ choices: ['present', 'absent']
+ default: 'present'
+ force:
+ description:
+ - The default is C(yes), which will replace the existing remote key
+ if it's different than C(pubkey). If C(no), the key will only be
+ set if no key with the given C(name) exists.
+ type: bool
+ default: 'yes'
+
+author: Robert Estelle (@erydo)
+'''
+
+RETURN = '''
+deleted_keys:
+ description: An array of key objects that were deleted. Only present on state=absent
+ type: list
+ returned: When state=absent
+ sample: [{'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': False}]
+matching_keys:
+ description: An array of keys matching the specified name. Only present on state=present
+ type: list
+ returned: When state=present
+ sample: [{'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': False}]
+key:
+ description: Metadata about the key just created. Only present on state=present
+ type: dict
+ returned: success
+ sample: {'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': False}
+'''
+
+EXAMPLES = '''
+- name: Read SSH public key to authorize
+ ansible.builtin.shell: cat /home/foo/.ssh/id_rsa.pub
+ register: ssh_pub_key
+
+- name: Authorize key with GitHub
+ local_action:
+ module: github_key
+ name: Access Key for Some Machine
+ token: '{{ github_access_token }}'
+ pubkey: '{{ ssh_pub_key.stdout }}'
+'''
+
+
+import json
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+API_BASE = 'https://api.github.com'
+
+
+class GitHubResponse(object):
+ def __init__(self, response, info):
+ self.content = response.read()
+ self.info = info
+
+ def json(self):
+ return json.loads(self.content)
+
+ def links(self):
+ links = {}
+ if 'link' in self.info:
+ link_header = self.info['link']
+ matches = re.findall('<([^>]+)>; rel="([^"]+)"', link_header)
+ for url, rel in matches:
+ links[rel] = url
+ return links
+
+
+class GitHubSession(object):
+ def __init__(self, module, token):
+ self.module = module
+ self.token = token
+
+ def request(self, method, url, data=None):
+ headers = {
+ 'Authorization': 'token %s' % self.token,
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/vnd.github.v3+json',
+ }
+ response, info = fetch_url(
+ self.module, url, method=method, data=data, headers=headers)
+ if not (200 <= info['status'] < 400):
+ self.module.fail_json(
+ msg=(" failed to send request %s to %s: %s"
+ % (method, url, info['msg'])))
+ return GitHubResponse(response, info)
+
+
+def get_all_keys(session):
+ url = API_BASE + '/user/keys'
+ result = []
+ while url:
+ r = session.request('GET', url)
+ result.extend(r.json())
+ url = r.links().get('next')
+ return result
+
+
+def create_key(session, name, pubkey, check_mode):
+ if check_mode:
+ from datetime import datetime
+ now = datetime.utcnow()
+ return {
+ 'id': 0,
+ 'key': pubkey,
+ 'title': name,
+ 'url': 'http://example.com/CHECK_MODE_GITHUB_KEY',
+ 'created_at': datetime.strftime(now, '%Y-%m-%dT%H:%M:%SZ'),
+ 'read_only': False,
+ 'verified': False
+ }
+ else:
+ return session.request(
+ 'POST',
+ API_BASE + '/user/keys',
+ data=json.dumps({'title': name, 'key': pubkey})).json()
+
+
+def delete_keys(session, to_delete, check_mode):
+ if check_mode:
+ return
+
+ for key in to_delete:
+ session.request('DELETE', API_BASE + '/user/keys/%s' % key["id"])
+
+
+def ensure_key_absent(session, name, check_mode):
+ to_delete = [key for key in get_all_keys(session) if key['title'] == name]
+ delete_keys(session, to_delete, check_mode=check_mode)
+
+ return {'changed': bool(to_delete),
+ 'deleted_keys': to_delete}
+
+
+def ensure_key_present(module, session, name, pubkey, force, check_mode):
+ all_keys = get_all_keys(session)
+ matching_keys = [k for k in all_keys if k['title'] == name]
+ deleted_keys = []
+
+ new_signature = pubkey.split(' ')[1]
+ for key in all_keys:
+ existing_signature = key['key'].split(' ')[1]
+ if new_signature == existing_signature and key['title'] != name:
+ module.fail_json(msg=(
+ "another key with the same content is already registered "
+ "under the name |{0}|").format(key['title']))
+
+ if matching_keys and force and matching_keys[0]['key'].split(' ')[1] != new_signature:
+ delete_keys(session, matching_keys, check_mode=check_mode)
+ (deleted_keys, matching_keys) = (matching_keys, [])
+
+ if not matching_keys:
+ key = create_key(session, name, pubkey, check_mode=check_mode)
+ else:
+ key = matching_keys[0]
+
+ return {
+ 'changed': bool(deleted_keys or not matching_keys),
+ 'deleted_keys': deleted_keys,
+ 'matching_keys': matching_keys,
+ 'key': key
+ }
+
+
+def main():
+ argument_spec = {
+ 'token': {'required': True, 'no_log': True},
+ 'name': {'required': True},
+ 'pubkey': {},
+ 'state': {'choices': ['present', 'absent'], 'default': 'present'},
+ 'force': {'default': True, 'type': 'bool'},
+ }
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ token = module.params['token']
+ name = module.params['name']
+ state = module.params['state']
+ force = module.params['force']
+ pubkey = module.params.get('pubkey')
+
+ if pubkey:
+ pubkey_parts = pubkey.split(' ')
+ # Keys consist of a protocol, the key data, and an optional comment.
+ if len(pubkey_parts) < 2:
+ module.fail_json(msg='"pubkey" parameter has an invalid format')
+ elif state == 'present':
+ module.fail_json(msg='"pubkey" is required when state=present')
+
+ session = GitHubSession(module, token)
+ if state == 'present':
+ result = ensure_key_present(module, session, name, pubkey, force=force,
+ check_mode=module.check_mode)
+ elif state == 'absent':
+ result = ensure_key_absent(session, name, check_mode=module.check_mode)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/github_release.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/github_release.py
new file mode 100644
index 00000000..5372d6e8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/github_release.py
@@ -0,0 +1,213 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Ansible Team
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: github_release
+short_description: Interact with GitHub Releases
+description:
+ - Fetch metadata about GitHub Releases
+options:
+ token:
+ description:
+ - GitHub Personal Access Token for authenticating. Mutually exclusive with C(password).
+ user:
+ description:
+ - The GitHub account that owns the repository
+ required: true
+ password:
+ description:
+ - The GitHub account password for the user. Mutually exclusive with C(token).
+ repo:
+ description:
+ - Repository name
+ required: true
+ action:
+ description:
+ - Action to perform
+ required: true
+ choices: [ 'latest_release', 'create_release' ]
+ tag:
+ description:
+ - Tag name when creating a release. Required when using action is set to C(create_release).
+ target:
+ description:
+ - Target of release when creating a release
+ name:
+ description:
+ - Name of release when creating a release
+ body:
+ description:
+ - Description of the release when creating a release
+ draft:
+ description:
+ - Sets if the release is a draft or not. (boolean)
+ type: 'bool'
+ default: 'no'
+ prerelease:
+ description:
+ - Sets if the release is a prerelease or not. (boolean)
+ type: bool
+ default: 'no'
+
+author:
+ - "Adrian Moisey (@adrianmoisey)"
+requirements:
+ - "github3.py >= 1.0.0a3"
+'''
+
+EXAMPLES = '''
+- name: Get latest release of a public repository
+ community.general.github_release:
+ user: ansible
+ repo: ansible
+ action: latest_release
+
+- name: Get latest release of testuseer/testrepo
+ community.general.github_release:
+ token: tokenabc1234567890
+ user: testuser
+ repo: testrepo
+ action: latest_release
+
+- name: Get latest release of test repo using username and password. Ansible 2.4.
+ community.general.github_release:
+ user: testuser
+ password: secret123
+ repo: testrepo
+ action: latest_release
+
+- name: Create a new release
+ community.general.github_release:
+ token: tokenabc1234567890
+ user: testuser
+ repo: testrepo
+ action: create_release
+ tag: test
+ target: master
+ name: My Release
+ body: Some description
+
+'''
+
+RETURN = '''
+create_release:
+ description:
+ - Version of the created release
+ - "For Ansible version 2.5 and later, if specified release version already exists, then State is unchanged"
+ - "For Ansible versions prior to 2.5, if specified release version already exists, then State is skipped"
+ type: str
+ returned: success
+ sample: 1.1.0
+
+latest_release:
+ description: Version of the latest release
+ type: str
+ returned: success
+ sample: 1.1.0
+'''
+
+import traceback
+
+GITHUB_IMP_ERR = None
+try:
+ import github3
+
+ HAS_GITHUB_API = True
+except ImportError:
+ GITHUB_IMP_ERR = traceback.format_exc()
+ HAS_GITHUB_API = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ repo=dict(required=True),
+ user=dict(required=True),
+ password=dict(no_log=True),
+ token=dict(no_log=True),
+ action=dict(
+ required=True, choices=['latest_release', 'create_release']),
+ tag=dict(type='str'),
+ target=dict(type='str'),
+ name=dict(type='str'),
+ body=dict(type='str'),
+ draft=dict(type='bool', default=False),
+ prerelease=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=(('password', 'token'),),
+ required_if=[('action', 'create_release', ['tag']),
+ ('action', 'create_release', ['password', 'token'], True)],
+ )
+
+ if not HAS_GITHUB_API:
+ module.fail_json(msg=missing_required_lib('github3.py >= 1.0.0a3'),
+ exception=GITHUB_IMP_ERR)
+
+ repo = module.params['repo']
+ user = module.params['user']
+ password = module.params['password']
+ login_token = module.params['token']
+ action = module.params['action']
+ tag = module.params.get('tag')
+ target = module.params.get('target')
+ name = module.params.get('name')
+ body = module.params.get('body')
+ draft = module.params.get('draft')
+ prerelease = module.params.get('prerelease')
+
+ # login to github
+ try:
+ if password:
+ gh_obj = github3.login(user, password=password)
+ elif login_token:
+ gh_obj = github3.login(token=login_token)
+ else:
+ gh_obj = github3.GitHub()
+
+ # test if we're actually logged in
+ if password or login_token:
+ gh_obj.me()
+ except github3.exceptions.AuthenticationFailed as e:
+ module.fail_json(msg='Failed to connect to GitHub: %s' % to_native(e),
+ details="Please check username and password or token "
+ "for repository %s" % repo)
+
+ repository = gh_obj.repository(user, repo)
+
+ if not repository:
+ module.fail_json(msg="Repository %s/%s doesn't exist" % (user, repo))
+
+ if action == 'latest_release':
+ release = repository.latest_release()
+ if release:
+ module.exit_json(tag=release.tag_name)
+ else:
+ module.exit_json(tag=None)
+
+ if action == 'create_release':
+ release_exists = repository.release_from_tag(tag)
+ if release_exists:
+ module.exit_json(changed=False, msg="Release for tag %s already exists." % tag)
+
+ release = repository.create_release(
+ tag, target, name, body, draft, prerelease)
+ if release:
+ module.exit_json(changed=True, tag=release.tag_name)
+ else:
+ module.exit_json(changed=False, tag=None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/github_webhook.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/github_webhook.py
new file mode 100644
index 00000000..ac153689
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/github_webhook.py
@@ -0,0 +1,280 @@
+#!/usr/bin/python
+#
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: github_webhook
+short_description: Manage GitHub webhooks
+description:
+ - "Create and delete GitHub webhooks"
+requirements:
+ - "PyGithub >= 1.3.5"
+options:
+ repository:
+ description:
+ - Full name of the repository to configure a hook for
+ required: true
+ aliases:
+ - repo
+ url:
+ description:
+ - URL to which payloads will be delivered
+ required: true
+ content_type:
+ description:
+ - The media type used to serialize the payloads
+ required: false
+ choices: [ form, json ]
+ default: form
+ secret:
+ description:
+ - The shared secret between GitHub and the payload URL.
+ required: false
+ insecure_ssl:
+ description:
+ - >
+ Flag to indicate that GitHub should skip SSL verification when calling
+ the hook.
+ required: false
+ type: bool
+ default: false
+ events:
+ description:
+ - >
+ A list of GitHub events the hook is triggered for. Events are listed at
+ U(https://developer.github.com/v3/activity/events/types/). Required
+ unless C(state) is C(absent)
+ required: false
+ type: list
+ elements: str
+ active:
+ description:
+ - Whether or not the hook is active
+ required: false
+ type: bool
+ default: true
+ state:
+ description:
+ - Whether the hook should be present or absent
+ required: false
+ choices: [ absent, present ]
+ default: present
+ user:
+ description:
+ - User to authenticate to GitHub as
+ required: true
+ password:
+ description:
+ - Password to authenticate to GitHub with
+ required: false
+ token:
+ description:
+ - Token to authenticate to GitHub with
+ required: false
+ github_url:
+ description:
+ - Base URL of the GitHub API
+ required: false
+ default: https://api.github.com
+
+author:
+ - "Chris St. Pierre (@stpierre)"
+'''
+
+EXAMPLES = '''
+- name: create a new webhook that triggers on push (password auth)
+ community.general.github_webhook:
+ repository: ansible/ansible
+ url: https://www.example.com/hooks/
+ events:
+ - push
+ user: "{{ github_user }}"
+ password: "{{ github_password }}"
+
+- name: Create a new webhook in a github enterprise installation with multiple event triggers (token auth)
+ community.general.github_webhook:
+ repository: myorg/myrepo
+ url: https://jenkins.example.com/ghprbhook/
+ content_type: json
+ secret: "{{ github_shared_secret }}"
+ insecure_ssl: True
+ events:
+ - issue_comment
+ - pull_request
+ user: "{{ github_user }}"
+ token: "{{ github_user_api_token }}"
+ github_url: https://github.example.com
+
+- name: Delete a webhook (password auth)
+ community.general.github_webhook:
+ repository: ansible/ansible
+ url: https://www.example.com/hooks/
+ state: absent
+ user: "{{ github_user }}"
+ password: "{{ github_password }}"
+'''
+
+RETURN = '''
+---
+hook_id:
+ description: The GitHub ID of the hook created/updated
+ returned: when state is 'present'
+ type: int
+ sample: 6206
+'''
+
+import traceback
+
+GITHUB_IMP_ERR = None
+try:
+ import github
+ HAS_GITHUB = True
+except ImportError:
+ GITHUB_IMP_ERR = traceback.format_exc()
+ HAS_GITHUB = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def _create_hook_config(module):
+ return {
+ "url": module.params["url"],
+ "content_type": module.params["content_type"],
+ "secret": module.params.get("secret"),
+ "insecure_ssl": "1" if module.params["insecure_ssl"] else "0"
+ }
+
+
+def create_hook(repo, module):
+ config = _create_hook_config(module)
+ try:
+ hook = repo.create_hook(
+ name="web",
+ config=config,
+ events=module.params["events"],
+ active=module.params["active"])
+ except github.GithubException as err:
+ module.fail_json(msg="Unable to create hook for repository %s: %s" % (
+ repo.full_name, to_native(err)))
+
+ data = {"hook_id": hook.id}
+ return True, data
+
+
+def update_hook(repo, hook, module):
+ config = _create_hook_config(module)
+ try:
+ hook.update()
+ hook.edit(
+ name="web",
+ config=config,
+ events=module.params["events"],
+ active=module.params["active"])
+
+ changed = hook.update()
+ except github.GithubException as err:
+ module.fail_json(msg="Unable to modify hook for repository %s: %s" % (
+ repo.full_name, to_native(err)))
+
+ data = {"hook_id": hook.id}
+ return changed, data
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ repository=dict(type='str', required=True, aliases=['repo']),
+ url=dict(type='str', required=True),
+ content_type=dict(
+ type='str',
+ choices=('json', 'form'),
+ required=False,
+ default='form'),
+ secret=dict(type='str', required=False, no_log=True),
+ insecure_ssl=dict(type='bool', required=False, default=False),
+ events=dict(type='list', elements='str', required=False),
+ active=dict(type='bool', required=False, default=True),
+ state=dict(
+ type='str',
+ required=False,
+ choices=('absent', 'present'),
+ default='present'),
+ user=dict(type='str', required=True),
+ password=dict(type='str', required=False, no_log=True),
+ token=dict(type='str', required=False, no_log=True),
+ github_url=dict(
+ type='str', required=False, default="https://api.github.com")),
+ mutually_exclusive=(('password', 'token'),),
+ required_one_of=(("password", "token"),),
+ required_if=(("state", "present", ("events",)),),
+ )
+
+ if not HAS_GITHUB:
+ module.fail_json(msg=missing_required_lib('PyGithub'),
+ exception=GITHUB_IMP_ERR)
+
+ try:
+ github_conn = github.Github(
+ module.params["user"],
+ module.params.get("password") or module.params.get("token"),
+ base_url=module.params["github_url"])
+ except github.GithubException as err:
+ module.fail_json(msg="Could not connect to GitHub at %s: %s" % (
+ module.params["github_url"], to_native(err)))
+
+ try:
+ repo = github_conn.get_repo(module.params["repository"])
+ except github.BadCredentialsException as err:
+ module.fail_json(msg="Could not authenticate to GitHub at %s: %s" % (
+ module.params["github_url"], to_native(err)))
+ except github.UnknownObjectException as err:
+ module.fail_json(
+ msg="Could not find repository %s in GitHub at %s: %s" % (
+ module.params["repository"], module.params["github_url"],
+ to_native(err)))
+ except Exception as err:
+ module.fail_json(
+ msg="Could not fetch repository %s from GitHub at %s: %s" %
+ (module.params["repository"], module.params["github_url"],
+ to_native(err)),
+ exception=traceback.format_exc())
+
+ hook = None
+ try:
+ for hook in repo.get_hooks():
+ if hook.config.get("url") == module.params["url"]:
+ break
+ else:
+ hook = None
+ except github.GithubException as err:
+ module.fail_json(msg="Unable to get hooks from repository %s: %s" % (
+ module.params["repository"], to_native(err)))
+
+ changed = False
+ data = {}
+ if hook is None and module.params["state"] == "present":
+ changed, data = create_hook(repo, module)
+ elif hook is not None and module.params["state"] == "absent":
+ try:
+ hook.delete()
+ except github.GithubException as err:
+ module.fail_json(
+ msg="Unable to delete hook from repository %s: %s" % (
+ repo.full_name, to_native(err)))
+ else:
+ changed = True
+ elif hook is not None and module.params["state"] == "present":
+ changed, data = update_hook(repo, hook, module)
+ # else, there is no hook and we want there to be no hook
+
+ module.exit_json(changed=changed, **data)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/github_webhook_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/github_webhook_info.py
new file mode 100644
index 00000000..f99a0a03
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/github_webhook_info.py
@@ -0,0 +1,169 @@
+#!/usr/bin/python
+#
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: github_webhook_info
+short_description: Query information about GitHub webhooks
+description:
+ - "Query information about GitHub webhooks"
+ - This module was called C(github_webhook_facts) before Ansible 2.9. The usage did not change.
+requirements:
+ - "PyGithub >= 1.3.5"
+options:
+ repository:
+ description:
+ - Full name of the repository to configure a hook for
+ required: true
+ aliases:
+ - repo
+ user:
+ description:
+ - User to authenticate to GitHub as
+ required: true
+ password:
+ description:
+ - Password to authenticate to GitHub with
+ required: false
+ token:
+ description:
+ - Token to authenticate to GitHub with
+ required: false
+ github_url:
+ description:
+ - Base URL of the github api
+ required: false
+ default: https://api.github.com
+
+author:
+ - "Chris St. Pierre (@stpierre)"
+'''
+
+EXAMPLES = '''
+- name: List hooks for a repository (password auth)
+ community.general.github_webhook_info:
+ repository: ansible/ansible
+ user: "{{ github_user }}"
+ password: "{{ github_password }}"
+ register: ansible_webhooks
+
+- name: List hooks for a repository on GitHub Enterprise (token auth)
+ community.general.github_webhook_info:
+ repository: myorg/myrepo
+ user: "{{ github_user }}"
+ token: "{{ github_user_api_token }}"
+ github_url: https://github.example.com/api/v3/
+ register: myrepo_webhooks
+'''
+
+RETURN = '''
+---
+hooks:
+ description: A list of hooks that exist for the repo
+ returned: always
+ type: list
+ sample: >
+ [{"has_shared_secret": true,
+ "url": "https://jenkins.example.com/ghprbhook/",
+ "events": ["issue_comment", "pull_request"],
+ "insecure_ssl": "1",
+ "content_type": "json",
+ "active": true,
+ "id": 6206,
+ "last_response": {"status": "active", "message": "OK", "code": 200}}]
+'''
+
+import traceback
+
+GITHUB_IMP_ERR = None
+try:
+ import github
+ HAS_GITHUB = True
+except ImportError:
+ GITHUB_IMP_ERR = traceback.format_exc()
+ HAS_GITHUB = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def _munge_hook(hook_obj):
+ retval = {
+ "active": hook_obj.active,
+ "events": hook_obj.events,
+ "id": hook_obj.id,
+ "url": hook_obj.url,
+ }
+ retval.update(hook_obj.config)
+ retval["has_shared_secret"] = "secret" in retval
+ if "secret" in retval:
+ del retval["secret"]
+
+ retval["last_response"] = hook_obj.last_response.raw_data
+ return retval
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ repository=dict(type='str', required=True, aliases=["repo"]),
+ user=dict(type='str', required=True),
+ password=dict(type='str', required=False, no_log=True),
+ token=dict(type='str', required=False, no_log=True),
+ github_url=dict(
+ type='str', required=False, default="https://api.github.com")),
+ mutually_exclusive=(('password', 'token'), ),
+ required_one_of=(("password", "token"), ),
+ supports_check_mode=True)
+ if module._name in ('github_webhook_facts', 'community.general.github_webhook_facts'):
+ module.deprecate("The 'github_webhook_facts' module has been renamed to 'github_webhook_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ if not HAS_GITHUB:
+ module.fail_json(msg=missing_required_lib('PyGithub'),
+ exception=GITHUB_IMP_ERR)
+
+ try:
+ github_conn = github.Github(
+ module.params["user"],
+ module.params.get("password") or module.params.get("token"),
+ base_url=module.params["github_url"])
+ except github.GithubException as err:
+ module.fail_json(msg="Could not connect to GitHub at %s: %s" % (
+ module.params["github_url"], to_native(err)))
+
+ try:
+ repo = github_conn.get_repo(module.params["repository"])
+ except github.BadCredentialsException as err:
+ module.fail_json(msg="Could not authenticate to GitHub at %s: %s" % (
+ module.params["github_url"], to_native(err)))
+ except github.UnknownObjectException as err:
+ module.fail_json(
+ msg="Could not find repository %s in GitHub at %s: %s" % (
+ module.params["repository"], module.params["github_url"],
+ to_native(err)))
+ except Exception as err:
+ module.fail_json(
+ msg="Could not fetch repository %s from GitHub at %s: %s" %
+ (module.params["repository"], module.params["github_url"],
+ to_native(err)),
+ exception=traceback.format_exc())
+
+ try:
+ hooks = [_munge_hook(h) for h in repo.get_hooks()]
+ except github.GithubException as err:
+ module.fail_json(
+ msg="Unable to get hooks from repository %s: %s" %
+ (module.params["repository"], to_native(err)),
+ exception=traceback.format_exc())
+
+ module.exit_json(changed=False, hooks=hooks)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_deploy_key.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_deploy_key.py
new file mode 100644
index 00000000..c66a6f9d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_deploy_key.py
@@ -0,0 +1,295 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# Copyright: (c) 2018, Marcus Watkins <marwatk@marcuswatkins.net>
+# Based on code:
+# Copyright: (c) 2013, Phillip Gentry <phillip@cx.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: gitlab_deploy_key
+short_description: Manages GitLab project deploy keys.
+description:
+ - Adds, updates and removes project deploy keys
+author:
+ - Marcus Watkins (@marwatk)
+ - Guillaume Martinez (@Lunik)
+requirements:
+ - python >= 2.7
+ - python-gitlab python module
+extends_documentation_fragment:
+- community.general.auth_basic
+
+options:
+ api_token:
+ description:
+ - GitLab token for logging in.
+ type: str
+ project:
+ description:
+ - Id or Full path of project in the form of group/name.
+ required: true
+ type: str
+ title:
+ description:
+ - Deploy key's title.
+ required: true
+ type: str
+ key:
+ description:
+ - Deploy key
+ required: true
+ type: str
+ can_push:
+ description:
+ - Whether this key can push to the project.
+ type: bool
+ default: no
+ state:
+ description:
+ - When C(present) the deploy key added to the project if it doesn't exist.
+ - When C(absent) it will be removed from the project if it exists.
+ default: present
+ type: str
+ choices: [ "present", "absent" ]
+'''
+
+EXAMPLES = '''
+- name: "Adding a project deploy key"
+ community.general.gitlab_deploy_key:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ api_token }}"
+ project: "my_group/my_project"
+ title: "Jenkins CI"
+ state: present
+ key: "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9w..."
+
+- name: "Update the above deploy key to add push access"
+ community.general.gitlab_deploy_key:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ api_token }}"
+ project: "my_group/my_project"
+ title: "Jenkins CI"
+ state: present
+ can_push: yes
+
+- name: "Remove the previous deploy key from the project"
+ community.general.gitlab_deploy_key:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ api_token }}"
+ project: "my_group/my_project"
+ state: absent
+ key: "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9w..."
+
+'''
+
+RETURN = '''
+msg:
+ description: Success or failure message
+ returned: always
+ type: str
+ sample: "Success"
+
+result:
+ description: json parsed response from the server
+ returned: always
+ type: dict
+
+error:
+ description: the error message returned by the GitLab API
+ returned: failed
+ type: str
+ sample: "400: key is already in use"
+
+deploy_key:
+ description: API object
+ returned: always
+ type: dict
+'''
+
+import re
+import traceback
+
+GITLAB_IMP_ERR = None
+try:
+ import gitlab
+ HAS_GITLAB_PACKAGE = True
+except Exception:
+ GITLAB_IMP_ERR = traceback.format_exc()
+ HAS_GITLAB_PACKAGE = False
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import findProject, gitlabAuthentication
+
+
+class GitLabDeployKey(object):
+ def __init__(self, module, gitlab_instance):
+ self._module = module
+ self._gitlab = gitlab_instance
+ self.deployKeyObject = None
+
+ '''
+ @param project Project object
+ @param key_title Title of the key
+ @param key_key String of the key
+ @param key_can_push Option of the deployKey
+ @param options Deploy key options
+ '''
+ def createOrUpdateDeployKey(self, project, key_title, key_key, options):
+ changed = False
+
+ # Because we have already call existsDeployKey in main()
+ if self.deployKeyObject is None:
+ deployKey = self.createDeployKey(project, {
+ 'title': key_title,
+ 'key': key_key,
+ 'can_push': options['can_push']})
+ changed = True
+ else:
+ changed, deployKey = self.updateDeployKey(self.deployKeyObject, {
+ 'can_push': options['can_push']})
+
+ self.deployKeyObject = deployKey
+ if changed:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True, msg="Successfully created or updated the deploy key %s" % key_title)
+
+ try:
+ deployKey.save()
+ except Exception as e:
+ self._module.fail_json(msg="Failed to update deploy key: %s " % e)
+ return True
+ else:
+ return False
+
+ '''
+ @param project Project Object
+ @param arguments Attributes of the deployKey
+ '''
+ def createDeployKey(self, project, arguments):
+ if self._module.check_mode:
+ return True
+
+ try:
+ deployKey = project.keys.create(arguments)
+ except (gitlab.exceptions.GitlabCreateError) as e:
+ self._module.fail_json(msg="Failed to create deploy key: %s " % to_native(e))
+
+ return deployKey
+
+ '''
+ @param deployKey Deploy Key Object
+ @param arguments Attributes of the deployKey
+ '''
+ def updateDeployKey(self, deployKey, arguments):
+ changed = False
+
+ for arg_key, arg_value in arguments.items():
+ if arguments[arg_key] is not None:
+ if getattr(deployKey, arg_key) != arguments[arg_key]:
+ setattr(deployKey, arg_key, arguments[arg_key])
+ changed = True
+
+ return (changed, deployKey)
+
+ '''
+ @param project Project object
+ @param key_title Title of the key
+ '''
+ def findDeployKey(self, project, key_title):
+ deployKeys = project.keys.list()
+ for deployKey in deployKeys:
+ if (deployKey.title == key_title):
+ return deployKey
+
+ '''
+ @param project Project object
+ @param key_title Title of the key
+ '''
+ def existsDeployKey(self, project, key_title):
+ # When project exists, object will be stored in self.projectObject.
+ deployKey = self.findDeployKey(project, key_title)
+ if deployKey:
+ self.deployKeyObject = deployKey
+ return True
+ return False
+
+ def deleteDeployKey(self):
+ if self._module.check_mode:
+ return True
+
+ return self.deployKeyObject.delete()
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_token=dict(type='str', no_log=True),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ project=dict(type='str', required=True),
+ key=dict(type='str', required=True),
+ can_push=dict(type='bool', default=False),
+ title=dict(type='str', required=True)
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_password', 'api_token']
+ ],
+ required_together=[
+ ['api_username', 'api_password']
+ ],
+ required_one_of=[
+ ['api_username', 'api_token']
+ ],
+ supports_check_mode=True,
+ )
+
+ state = module.params['state']
+ project_identifier = module.params['project']
+ key_title = module.params['title']
+ key_keyfile = module.params['key']
+ key_can_push = module.params['can_push']
+
+ if not HAS_GITLAB_PACKAGE:
+ module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
+
+ gitlab_instance = gitlabAuthentication(module)
+
+ gitlab_deploy_key = GitLabDeployKey(module, gitlab_instance)
+
+ project = findProject(gitlab_instance, project_identifier)
+
+ if project is None:
+ module.fail_json(msg="Failed to create deploy key: project %s doesn't exists" % project_identifier)
+
+ deployKey_exists = gitlab_deploy_key.existsDeployKey(project, key_title)
+
+ if state == 'absent':
+ if deployKey_exists:
+ gitlab_deploy_key.deleteDeployKey()
+ module.exit_json(changed=True, msg="Successfully deleted deploy key %s" % key_title)
+ else:
+ module.exit_json(changed=False, msg="Deploy key deleted or does not exists")
+
+ if state == 'present':
+ if gitlab_deploy_key.createOrUpdateDeployKey(project, key_title, key_keyfile, {'can_push': key_can_push}):
+
+ module.exit_json(changed=True, msg="Successfully created or updated the deploy key %s" % key_title,
+ deploy_key=gitlab_deploy_key.deployKeyObject._attrs)
+ else:
+ module.exit_json(changed=False, msg="No need to update the deploy key %s" % key_title,
+ deploy_key=gitlab_deploy_key.deployKeyObject._attrs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_group.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_group.py
new file mode 100644
index 00000000..0c612733
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_group.py
@@ -0,0 +1,324 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# Copyright: (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gitlab_group
+short_description: Creates/updates/deletes GitLab Groups
+description:
+ - When the group does not exist in GitLab, it will be created.
+ - When the group does exist and state=absent, the group will be deleted.
+author:
+ - Werner Dijkerman (@dj-wasabi)
+ - Guillaume Martinez (@Lunik)
+requirements:
+ - python >= 2.7
+ - python-gitlab python module
+extends_documentation_fragment:
+- community.general.auth_basic
+
+options:
+ api_token:
+ description:
+ - GitLab token for logging in.
+ type: str
+ name:
+ description:
+ - Name of the group you want to create.
+ required: true
+ type: str
+ path:
+ description:
+ - The path of the group you want to create, this will be api_url/group_path
+ - If not supplied, the group_name will be used.
+ type: str
+ description:
+ description:
+ - A description for the group.
+ type: str
+ state:
+ description:
+ - create or delete group.
+ - Possible values are present and absent.
+ default: present
+ type: str
+ choices: ["present", "absent"]
+ parent:
+ description:
+ - Allow to create subgroups
+ - Id or Full path of parent group in the form of group/name
+ type: str
+ visibility:
+ description:
+ - Default visibility of the group
+ choices: ["private", "internal", "public"]
+ default: private
+ type: str
+'''
+
+EXAMPLES = '''
+- name: "Delete GitLab Group"
+ community.general.gitlab_group:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ validate_certs: False
+ name: my_first_group
+ state: absent
+
+- name: "Create GitLab Group"
+ community.general.gitlab_group:
+ api_url: https://gitlab.example.com/
+ validate_certs: True
+ api_username: dj-wasabi
+ api_password: "MySecretPassword"
+ name: my_first_group
+ path: my_first_group
+ state: present
+
+# The group will by created at https://gitlab.dj-wasabi.local/super_parent/parent/my_first_group
+- name: "Create GitLab SubGroup"
+ community.general.gitlab_group:
+ api_url: https://gitlab.example.com/
+ validate_certs: True
+ api_username: dj-wasabi
+ api_password: "MySecretPassword"
+ name: my_first_group
+ path: my_first_group
+ state: present
+ parent: "super_parent/parent"
+'''
+
+RETURN = '''
+msg:
+ description: Success or failure message
+ returned: always
+ type: str
+ sample: "Success"
+
+result:
+ description: json parsed response from the server
+ returned: always
+ type: dict
+
+error:
+ description: the error message returned by the GitLab API
+ returned: failed
+ type: str
+ sample: "400: path is already in use"
+
+group:
+ description: API object
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+GITLAB_IMP_ERR = None
+try:
+ import gitlab
+ HAS_GITLAB_PACKAGE = True
+except Exception:
+ GITLAB_IMP_ERR = traceback.format_exc()
+ HAS_GITLAB_PACKAGE = False
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import findGroup, gitlabAuthentication
+
+
+class GitLabGroup(object):
+ def __init__(self, module, gitlab_instance):
+ self._module = module
+ self._gitlab = gitlab_instance
+ self.groupObject = None
+
+ '''
+ @param group Group object
+ '''
+ def getGroupId(self, group):
+ if group is not None:
+ return group.id
+ return None
+
+ '''
+ @param name Name of the group
+ @param parent Parent group full path
+ @param options Group options
+ '''
+ def createOrUpdateGroup(self, name, parent, options):
+ changed = False
+
+ # Because we have already call userExists in main()
+ if self.groupObject is None:
+ parent_id = self.getGroupId(parent)
+
+ payload = {
+ 'name': name,
+ 'path': options['path'],
+ 'parent_id': parent_id,
+ 'visibility': options['visibility']
+ }
+ if options.get('description'):
+ payload['description'] = options['description']
+ group = self.createGroup(payload)
+ changed = True
+ else:
+ changed, group = self.updateGroup(self.groupObject, {
+ 'name': name,
+ 'description': options['description'],
+ 'visibility': options['visibility']})
+
+ self.groupObject = group
+ if changed:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True, msg="Successfully created or updated the group %s" % name)
+
+ try:
+ group.save()
+ except Exception as e:
+ self._module.fail_json(msg="Failed to update group: %s " % e)
+ return True
+ else:
+ return False
+
+ '''
+ @param arguments Attributes of the group
+ '''
+ def createGroup(self, arguments):
+ if self._module.check_mode:
+ return True
+
+ try:
+ group = self._gitlab.groups.create(arguments)
+ except (gitlab.exceptions.GitlabCreateError) as e:
+ self._module.fail_json(msg="Failed to create group: %s " % to_native(e))
+
+ return group
+
+ '''
+ @param group Group Object
+ @param arguments Attributes of the group
+ '''
+ def updateGroup(self, group, arguments):
+ changed = False
+
+ for arg_key, arg_value in arguments.items():
+ if arguments[arg_key] is not None:
+ if getattr(group, arg_key) != arguments[arg_key]:
+ setattr(group, arg_key, arguments[arg_key])
+ changed = True
+
+ return (changed, group)
+
+ def deleteGroup(self):
+ group = self.groupObject
+
+ if len(group.projects.list()) >= 1:
+ self._module.fail_json(
+ msg="There are still projects in this group. These needs to be moved or deleted before this group can be removed.")
+ else:
+ if self._module.check_mode:
+ return True
+
+ try:
+ group.delete()
+ except Exception as e:
+ self._module.fail_json(msg="Failed to delete group: %s " % to_native(e))
+
+ '''
+ @param name Name of the groupe
+ @param full_path Complete path of the Group including parent group path. <parent_path>/<group_path>
+ '''
+ def existsGroup(self, project_identifier):
+ # When group/user exists, object will be stored in self.groupObject.
+ group = findGroup(self._gitlab, project_identifier)
+ if group:
+ self.groupObject = group
+ return True
+ return False
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_token=dict(type='str', no_log=True),
+ name=dict(type='str', required=True),
+ path=dict(type='str'),
+ description=dict(type='str'),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ parent=dict(type='str'),
+ visibility=dict(type='str', default="private", choices=["internal", "private", "public"]),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_password', 'api_token'],
+ ],
+ required_together=[
+ ['api_username', 'api_password'],
+ ],
+ required_one_of=[
+ ['api_username', 'api_token']
+ ],
+ supports_check_mode=True,
+ )
+
+ group_name = module.params['name']
+ group_path = module.params['path']
+ description = module.params['description']
+ state = module.params['state']
+ parent_identifier = module.params['parent']
+ group_visibility = module.params['visibility']
+
+ if not HAS_GITLAB_PACKAGE:
+ module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
+
+ gitlab_instance = gitlabAuthentication(module)
+
+ # Define default group_path based on group_name
+ if group_path is None:
+ group_path = group_name.replace(" ", "_")
+
+ gitlab_group = GitLabGroup(module, gitlab_instance)
+
+ parent_group = None
+ if parent_identifier:
+ parent_group = findGroup(gitlab_instance, parent_identifier)
+ if not parent_group:
+ module.fail_json(msg="Failed create GitLab group: Parent group doesn't exists")
+
+ group_exists = gitlab_group.existsGroup(parent_group.full_path + '/' + group_path)
+ else:
+ group_exists = gitlab_group.existsGroup(group_path)
+
+ if state == 'absent':
+ if group_exists:
+ gitlab_group.deleteGroup()
+ module.exit_json(changed=True, msg="Successfully deleted group %s" % group_name)
+ else:
+ module.exit_json(changed=False, msg="Group deleted or does not exists")
+
+ if state == 'present':
+ if gitlab_group.createOrUpdateGroup(group_name, parent_group, {
+ "path": group_path,
+ "description": description,
+ "visibility": group_visibility}):
+ module.exit_json(changed=True, msg="Successfully created or updated the group %s" % group_name, group=gitlab_group.groupObject._attrs)
+ else:
+ module.exit_json(changed=False, msg="No need to update the group %s" % group_name, group=gitlab_group.groupObject._attrs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_group_members.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_group_members.py
new file mode 100644
index 00000000..8a3da2a4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_group_members.py
@@ -0,0 +1,263 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Zainab Alsaffar <Zainab.Alsaffar@mail.rit.edu>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: gitlab_group_members
+short_description: Manage group members on GitLab Server
+description:
+ - This module allows to add and remove members to/from a group, or change a member's access level in a group on GitLab.
+version_added: '1.2.0'
+author: Zainab Alsaffar (@zanssa)
+requirements:
+ - python-gitlab python module <= 1.15.0
+ - administrator rights on the GitLab server
+extends_documentation_fragment: community.general.auth_basic
+options:
+ api_token:
+ description:
+ - A personal access token to authenticate with the GitLab API.
+ required: true
+ type: str
+ gitlab_group:
+ description:
+ - The name of the GitLab group the member is added to/removed from.
+ required: true
+ type: str
+ gitlab_user:
+ description:
+ - The username of the member to add to/remove from the GitLab group.
+ required: true
+ type: str
+ access_level:
+ description:
+ - The access level for the user.
+ - Required if I(state=present), user state is set to present.
+ type: str
+ choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner']
+ state:
+ description:
+ - State of the member in the group.
+ - On C(present), it adds a user to a GitLab group.
+ - On C(absent), it removes a user from a GitLab group.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+notes:
+ - Supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+- name: Add a user to a GitLab Group
+ community.general.gitlab_group_members:
+ api_url: 'https://gitlab.example.com'
+ api_token: 'Your-Private-Token'
+ gitlab_group: groupname
+ gitlab_user: username
+ access_level: developer
+ state: present
+
+- name: Remove a user from a GitLab Group
+ community.general.gitlab_group_members:
+ api_url: 'https://gitlab.example.com'
+ api_token: 'Your-Private-Token'
+ gitlab_group: groupname
+ gitlab_user: username
+ state: absent
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import gitlabAuthentication
+
+import traceback
+
+try:
+ import gitlab
+ HAS_PY_GITLAB = True
+except ImportError:
+ GITLAB_IMP_ERR = traceback.format_exc()
+ HAS_PY_GITLAB = False
+
+
+class GitLabGroup(object):
+ def __init__(self, module, gl):
+ self._module = module
+ self._gitlab = gl
+
+ # get user id if the user exists
+ def get_user_id(self, gitlab_user):
+ user_exists = self._gitlab.users.list(username=gitlab_user)
+ if user_exists:
+ return user_exists[0].id
+
+ # get group id if group exists
+ def get_group_id(self, gitlab_group):
+ group_exists = self._gitlab.groups.list(search=gitlab_group)
+ if group_exists:
+ return group_exists[0].id
+
+ # get all members in a group
+ def get_members_in_a_group(self, gitlab_group_id):
+ group = self._gitlab.groups.get(gitlab_group_id)
+ return group.members.list()
+
+ # check if the user is a member of the group
+ def is_user_a_member(self, members, gitlab_user_id):
+ for member in members:
+ if member.id == gitlab_user_id:
+ return True
+ return False
+
+ # add user to a group
+ def add_member_to_group(self, gitlab_user_id, gitlab_group_id, access_level):
+ try:
+ group = self._gitlab.groups.get(gitlab_group_id)
+ add_member = group.members.create(
+ {'user_id': gitlab_user_id, 'access_level': access_level})
+
+ if add_member:
+ return add_member.username
+
+ except (gitlab.exceptions.GitlabCreateError) as e:
+ self._module.fail_json(
+ msg="Failed to add member to the Group, Group ID %s: %s" % (gitlab_group_id, e))
+
+ # remove user from a group
+ def remove_user_from_group(self, gitlab_user_id, gitlab_group_id):
+ try:
+ group = self._gitlab.groups.get(gitlab_group_id)
+ group.members.delete(gitlab_user_id)
+
+ except (gitlab.exceptions.GitlabDeleteError) as e:
+ self._module.fail_json(
+ msg="Failed to remove member from GitLab group, ID %s: %s" % (gitlab_group_id, e))
+
+ # get user's access level
+ def get_user_access_level(self, members, gitlab_user_id):
+ for member in members:
+ if member.id == gitlab_user_id:
+ return member.access_level
+
+ # update user's access level in a group
+ def update_user_access_level(self, members, gitlab_user_id, access_level):
+ for member in members:
+ if member.id == gitlab_user_id:
+ try:
+ member.access_level = access_level
+ member.save()
+ except (gitlab.exceptions.GitlabCreateError) as e:
+ self._module.fail_json(
+ msg="Failed to update the access level for the member, %s: %s" % (gitlab_user_id, e))
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_token=dict(type='str', required=True, no_log=True),
+ gitlab_group=dict(type='str', required=True),
+ gitlab_user=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ access_level=dict(type='str', required=False, choices=['guest', 'reporter', 'developer', 'maintainer', 'owner'])
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_password', 'api_token'],
+ ],
+ required_together=[
+ ['api_username', 'api_password'],
+ ],
+ required_one_of=[
+ ['api_username', 'api_token'],
+ ],
+ required_if=[
+ ['state', 'present', ['access_level']],
+ ],
+ supports_check_mode=True,
+ )
+
+ if not HAS_PY_GITLAB:
+ module.fail_json(msg=missing_required_lib('python-gitlab', url='https://python-gitlab.readthedocs.io/en/stable/'), exception=GITLAB_IMP_ERR)
+
+ gitlab_group = module.params['gitlab_group']
+ gitlab_user = module.params['gitlab_user']
+ state = module.params['state']
+ access_level = module.params['access_level']
+
+ # convert access level string input to int
+ if access_level:
+ access_level_int = {
+ 'guest': gitlab.GUEST_ACCESS,
+ 'reporter': gitlab.REPORTER_ACCESS,
+ 'developer': gitlab.DEVELOPER_ACCESS,
+ 'maintainer': gitlab.MAINTAINER_ACCESS,
+ 'owner': gitlab.OWNER_ACCESS
+ }
+
+ access_level = access_level_int[access_level]
+
+ # connect to gitlab server
+ gl = gitlabAuthentication(module)
+
+ group = GitLabGroup(module, gl)
+
+ gitlab_user_id = group.get_user_id(gitlab_user)
+ gitlab_group_id = group.get_group_id(gitlab_group)
+
+ # group doesn't exist
+ if not gitlab_group_id:
+ module.fail_json(msg="group '%s' not found." % gitlab_group)
+
+ # user doesn't exist
+ if not gitlab_user_id:
+ if state == 'absent':
+ module.exit_json(changed=False, result="user '%s' not found, and thus also not part of the group" % gitlab_user)
+ else:
+ module.fail_json(msg="user '%s' not found." % gitlab_user)
+
+ members = group.get_members_in_a_group(gitlab_group_id)
+ is_user_a_member = group.is_user_a_member(members, gitlab_user_id)
+
+ # check if the user is a member in the group
+ if not is_user_a_member:
+ if state == 'present':
+ # add user to the group
+ if not module.check_mode:
+ group.add_member_to_group(gitlab_user_id, gitlab_group_id, access_level)
+ module.exit_json(changed=True, result="Successfully added user '%s' to the group." % gitlab_user)
+ # state as absent
+ else:
+ module.exit_json(changed=False, result="User, '%s', is not a member in the group. No change to report" % gitlab_user)
+ # in case that a user is a member
+ else:
+ if state == 'present':
+ # compare the access level
+ user_access_level = group.get_user_access_level(members, gitlab_user_id)
+ if user_access_level == access_level:
+ module.exit_json(changed=False, result="User, '%s', is already a member in the group. No change to report" % gitlab_user)
+ else:
+ # update the access level for the user
+ if not module.check_mode:
+ group.update_user_access_level(members, gitlab_user_id, access_level)
+ module.exit_json(changed=True, result="Successfully updated the access level for the user, '%s'" % gitlab_user)
+ else:
+ # remove the user from the group
+ if not module.check_mode:
+ group.remove_user_from_group(gitlab_user_id, gitlab_group_id)
+ module.exit_json(changed=True, result="Successfully removed user, '%s', from the group" % gitlab_user)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_group_variable.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_group_variable.py
new file mode 100644
index 00000000..dd20a0b8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_group_variable.py
@@ -0,0 +1,304 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Florent Madiot (scodeman@scode.io)
+# Based on code:
+# Copyright: (c) 2019, Markus Bergholz (markuman@gmail.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+module: gitlab_group_variable
+short_description: Creates, updates, or deletes GitLab groups variables
+version_added: 1.2.0
+description:
+ - Creates a group variable if it does not exist.
+ - When a group variable does exist, its value will be updated when the values are different.
+ - Variables which are untouched in the playbook, but are not untouched in the GitLab group,
+ they stay untouched (I(purge) is C(false)) or will be deleted (I(purge) is C(true)).
+author:
+ - Florent Madiot (@scodeman)
+requirements:
+ - python >= 2.7
+ - python-gitlab python module
+extends_documentation_fragment:
+ - community.general.auth_basic
+
+options:
+ state:
+ description:
+ - Create or delete group variable.
+ default: present
+ type: str
+ choices: ["present", "absent"]
+ api_token:
+ description:
+ - GitLab access token with API permissions.
+ required: true
+ type: str
+ group:
+ description:
+ - The path and name of the group.
+ required: true
+ type: str
+ purge:
+ description:
+ - When set to C(true), delete all variables which are not untouched in the task.
+ default: false
+ type: bool
+ vars:
+ description:
+ - When the list element is a simple key-value pair, set masked and protected to false.
+ - When the list element is a dict with the keys I(value), I(masked) and I(protected), the user can
+ have full control about whether a value should be masked, protected or both.
+ - Support for protected values requires GitLab >= 9.3.
+ - Support for masked values requires GitLab >= 11.10.
+ - A I(value) must be a string or a number.
+ - Field I(variable_type) must be a string with either C(env_var), which is the default, or C(file).
+ - When a value is masked, it must be in Base64 and have a length of at least 8 characters.
+ See GitLab documentation on acceptable values for a masked variable (U(https://docs.gitlab.com/ce/ci/variables/#masked-variables)).
+ default: {}
+ type: dict
+notes:
+- Supports I(check_mode).
+'''
+
+
+EXAMPLES = r'''
+- name: Set or update some CI/CD variables
+ community.general.gitlab_group_variable:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ group: scodeman/testgroup/
+ purge: false
+ vars:
+ ACCESS_KEY_ID: abc123
+ SECRET_ACCESS_KEY: 321cba
+
+- name: Set or update some CI/CD variables
+ community.general.gitlab_group_variable:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ group: scodeman/testgroup/
+ purge: false
+ vars:
+ ACCESS_KEY_ID: abc123
+ SECRET_ACCESS_KEY:
+ value: 3214cbad
+ masked: true
+ protected: true
+ variable_type: env_var
+
+- name: Delete one variable
+ community.general.gitlab_group_variable:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ group: scodeman/testgroup/
+ state: absent
+ vars:
+ ACCESS_KEY_ID: abc123
+'''
+
+RETURN = r'''
+group_variable:
+ description: Four lists of the variablenames which were added, updated, removed or exist.
+ returned: always
+ type: dict
+ contains:
+ added:
+ description: A list of variables which were created.
+ returned: always
+ type: list
+ sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']"
+ untouched:
+ description: A list of variables which exist.
+ returned: always
+ type: list
+ sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']"
+ removed:
+ description: A list of variables which were deleted.
+ returned: always
+ type: list
+ sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']"
+ updated:
+ description: A list of variables whose values were changed.
+ returned: always
+ type: list
+ sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']"
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.six import string_types
+from ansible.module_utils.six import integer_types
+
+
+GITLAB_IMP_ERR = None
+try:
+ import gitlab
+ HAS_GITLAB_PACKAGE = True
+except Exception:
+ GITLAB_IMP_ERR = traceback.format_exc()
+ HAS_GITLAB_PACKAGE = False
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import gitlabAuthentication
+
+
+class GitlabGroupVariables(object):
+
+ def __init__(self, module, gitlab_instance):
+ self.repo = gitlab_instance
+ self.group = self.get_group(module.params['group'])
+ self._module = module
+
+ def get_group(self, group_name):
+ return self.repo.groups.get(group_name)
+
+ def list_all_group_variables(self):
+ page_nb = 1
+ variables = []
+ vars_page = self.group.variables.list(page=page_nb)
+ while len(vars_page) > 0:
+ variables += vars_page
+ page_nb += 1
+ vars_page = self.group.variables.list(page=page_nb)
+ return variables
+
+ def create_variable(self, key, value, masked, protected, variable_type):
+ if self._module.check_mode:
+ return
+ return self.group.variables.create({"key": key, "value": value,
+ "masked": masked, "protected": protected,
+ "variable_type": variable_type})
+
+ def update_variable(self, key, var, value, masked, protected, variable_type):
+ if var.value == value and var.protected == protected and var.masked == masked and var.variable_type == variable_type:
+ return False
+
+ if self._module.check_mode:
+ return True
+
+ if var.protected == protected and var.masked == masked and var.variable_type == variable_type:
+ var.value = value
+ var.save()
+ return True
+
+ self.delete_variable(key)
+ self.create_variable(key, value, masked, protected, variable_type)
+ return True
+
+ def delete_variable(self, key):
+ if self._module.check_mode:
+ return
+ return self.group.variables.delete(key)
+
+
+def native_python_main(this_gitlab, purge, var_list, state, module):
+
+ change = False
+ return_value = dict(added=list(), updated=list(), removed=list(), untouched=list())
+
+ gitlab_keys = this_gitlab.list_all_group_variables()
+ existing_variables = [x.get_id() for x in gitlab_keys]
+
+ for key in var_list:
+ if not isinstance(var_list[key], (string_types, integer_types, float, dict)):
+ module.fail_json(msg="Value of %s variable must be of type string, integer, float or dict, passed %s" % (key, var_list[key].__class__.__name__))
+
+ for key in var_list:
+
+ if isinstance(var_list[key], (string_types, integer_types, float)):
+ value = var_list[key]
+ masked = False
+ protected = False
+ variable_type = 'env_var'
+ elif isinstance(var_list[key], dict):
+ value = var_list[key].get('value')
+ masked = var_list[key].get('masked', False)
+ protected = var_list[key].get('protected', False)
+ variable_type = var_list[key].get('variable_type', 'env_var')
+
+ if key in existing_variables:
+ index = existing_variables.index(key)
+ existing_variables[index] = None
+
+ if state == 'present':
+ single_change = this_gitlab.update_variable(key,
+ gitlab_keys[index],
+ value, masked,
+ protected,
+ variable_type)
+ change = single_change or change
+ if single_change:
+ return_value['updated'].append(key)
+ else:
+ return_value['untouched'].append(key)
+
+ elif state == 'absent':
+ this_gitlab.delete_variable(key)
+ change = True
+ return_value['removed'].append(key)
+
+ elif key not in existing_variables and state == 'present':
+ this_gitlab.create_variable(key, value, masked, protected, variable_type)
+ change = True
+ return_value['added'].append(key)
+
+ existing_variables = list(filter(None, existing_variables))
+ if purge:
+ for item in existing_variables:
+ this_gitlab.delete_variable(item)
+ change = True
+ return_value['removed'].append(item)
+ else:
+ return_value['untouched'].extend(existing_variables)
+
+ return change, return_value
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(
+ api_token=dict(type='str', required=True, no_log=True),
+ group=dict(type='str', required=True),
+ purge=dict(type='bool', required=False, default=False),
+ vars=dict(type='dict', required=False, default=dict(), no_log=True),
+ state=dict(type='str', default="present", choices=["absent", "present"])
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_password', 'api_token'],
+ ],
+ required_together=[
+ ['api_username', 'api_password'],
+ ],
+ required_one_of=[
+ ['api_username', 'api_token']
+ ],
+ supports_check_mode=True
+ )
+
+ purge = module.params['purge']
+ var_list = module.params['vars']
+ state = module.params['state']
+
+ if not HAS_GITLAB_PACKAGE:
+ module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
+
+ gitlab_instance = gitlabAuthentication(module)
+
+ this_gitlab = GitlabGroupVariables(module=module, gitlab_instance=gitlab_instance)
+
+ changed, return_value = native_python_main(this_gitlab, purge, var_list, state, module)
+
+ module.exit_json(changed=changed, group_variable=return_value)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_hook.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_hook.py
new file mode 100644
index 00000000..bc4b6ecb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_hook.py
@@ -0,0 +1,387 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# Copyright: (c) 2018, Marcus Watkins <marwatk@marcuswatkins.net>
+# Based on code:
+# Copyright: (c) 2013, Phillip Gentry <phillip@cx.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gitlab_hook
+short_description: Manages GitLab project hooks.
+description:
+ - Adds, updates and removes project hook
+author:
+ - Marcus Watkins (@marwatk)
+ - Guillaume Martinez (@Lunik)
+requirements:
+ - python >= 2.7
+ - python-gitlab python module
+extends_documentation_fragment:
+- community.general.auth_basic
+
+options:
+ api_token:
+ description:
+ - GitLab token for logging in.
+ type: str
+ project:
+ description:
+ - Id or Full path of the project in the form of group/name.
+ required: true
+ type: str
+ hook_url:
+ description:
+ - The url that you want GitLab to post to, this is used as the primary key for updates and deletion.
+ required: true
+ type: str
+ state:
+ description:
+ - When C(present) the hook will be updated to match the input or created if it doesn't exist.
+ - When C(absent) hook will be deleted if it exists.
+ default: present
+ type: str
+ choices: [ "present", "absent" ]
+ push_events:
+ description:
+ - Trigger hook on push events.
+ type: bool
+ default: yes
+ push_events_branch_filter:
+ description:
+ - Branch name of wildcard to trigger hook on push events
+ type: str
+ version_added: '0.2.0'
+ issues_events:
+ description:
+ - Trigger hook on issues events.
+ type: bool
+ default: no
+ merge_requests_events:
+ description:
+ - Trigger hook on merge requests events.
+ type: bool
+ default: no
+ tag_push_events:
+ description:
+ - Trigger hook on tag push events.
+ type: bool
+ default: no
+ note_events:
+ description:
+ - Trigger hook on note events or when someone adds a comment.
+ type: bool
+ default: no
+ job_events:
+ description:
+ - Trigger hook on job events.
+ type: bool
+ default: no
+ pipeline_events:
+ description:
+ - Trigger hook on pipeline events.
+ type: bool
+ default: no
+ wiki_page_events:
+ description:
+ - Trigger hook on wiki events.
+ type: bool
+ default: no
+ hook_validate_certs:
+ description:
+ - Whether GitLab will do SSL verification when triggering the hook.
+ type: bool
+ default: no
+ aliases: [ enable_ssl_verification ]
+ token:
+ description:
+ - Secret token to validate hook messages at the receiver.
+ - If this is present it will always result in a change as it cannot be retrieved from GitLab.
+ - Will show up in the X-GitLab-Token HTTP request header.
+ required: false
+ type: str
+'''
+
+EXAMPLES = '''
+- name: "Adding a project hook"
+ community.general.gitlab_hook:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ project: "my_group/my_project"
+ hook_url: "https://my-ci-server.example.com/gitlab-hook"
+ state: present
+ push_events: yes
+ tag_push_events: yes
+ hook_validate_certs: no
+ token: "my-super-secret-token-that-my-ci-server-will-check"
+
+- name: "Delete the previous hook"
+ community.general.gitlab_hook:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ project: "my_group/my_project"
+ hook_url: "https://my-ci-server.example.com/gitlab-hook"
+ state: absent
+
+- name: "Delete a hook by numeric project id"
+ community.general.gitlab_hook:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ project: 10
+ hook_url: "https://my-ci-server.example.com/gitlab-hook"
+ state: absent
+'''
+
+RETURN = '''
+msg:
+ description: Success or failure message
+ returned: always
+ type: str
+ sample: "Success"
+
+result:
+ description: json parsed response from the server
+ returned: always
+ type: dict
+
+error:
+ description: the error message returned by the GitLab API
+ returned: failed
+ type: str
+ sample: "400: path is already in use"
+
+hook:
+ description: API object
+ returned: always
+ type: dict
+'''
+
+import re
+import traceback
+
+GITLAB_IMP_ERR = None
+try:
+ import gitlab
+ HAS_GITLAB_PACKAGE = True
+except Exception:
+ GITLAB_IMP_ERR = traceback.format_exc()
+ HAS_GITLAB_PACKAGE = False
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import findProject, gitlabAuthentication
+
+
+class GitLabHook(object):
+ def __init__(self, module, gitlab_instance):
+ self._module = module
+ self._gitlab = gitlab_instance
+ self.hookObject = None
+
+ '''
+ @param project Project Object
+ @param hook_url Url to call on event
+ @param description Description of the group
+ @param parent Parent group full path
+ '''
+ def createOrUpdateHook(self, project, hook_url, options):
+ changed = False
+
+ # Because we have already call userExists in main()
+ if self.hookObject is None:
+ hook = self.createHook(project, {
+ 'url': hook_url,
+ 'push_events': options['push_events'],
+ 'push_events_branch_filter': options['push_events_branch_filter'],
+ 'issues_events': options['issues_events'],
+ 'merge_requests_events': options['merge_requests_events'],
+ 'tag_push_events': options['tag_push_events'],
+ 'note_events': options['note_events'],
+ 'job_events': options['job_events'],
+ 'pipeline_events': options['pipeline_events'],
+ 'wiki_page_events': options['wiki_page_events'],
+ 'enable_ssl_verification': options['enable_ssl_verification'],
+ 'token': options['token']})
+ changed = True
+ else:
+ changed, hook = self.updateHook(self.hookObject, {
+ 'push_events': options['push_events'],
+ 'push_events_branch_filter': options['push_events_branch_filter'],
+ 'issues_events': options['issues_events'],
+ 'merge_requests_events': options['merge_requests_events'],
+ 'tag_push_events': options['tag_push_events'],
+ 'note_events': options['note_events'],
+ 'job_events': options['job_events'],
+ 'pipeline_events': options['pipeline_events'],
+ 'wiki_page_events': options['wiki_page_events'],
+ 'enable_ssl_verification': options['enable_ssl_verification'],
+ 'token': options['token']})
+
+ self.hookObject = hook
+ if changed:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True, msg="Successfully created or updated the hook %s" % hook_url)
+
+ try:
+ hook.save()
+ except Exception as e:
+ self._module.fail_json(msg="Failed to update hook: %s " % e)
+ return True
+ else:
+ return False
+
+ '''
+ @param project Project Object
+ @param arguments Attributes of the hook
+ '''
+ def createHook(self, project, arguments):
+ if self._module.check_mode:
+ return True
+
+ hook = project.hooks.create(arguments)
+
+ return hook
+
+ '''
+ @param hook Hook Object
+ @param arguments Attributes of the hook
+ '''
+ def updateHook(self, hook, arguments):
+ changed = False
+
+ for arg_key, arg_value in arguments.items():
+ if arguments[arg_key] is not None:
+ if getattr(hook, arg_key) != arguments[arg_key]:
+ setattr(hook, arg_key, arguments[arg_key])
+ changed = True
+
+ return (changed, hook)
+
+ '''
+ @param project Project object
+ @param hook_url Url to call on event
+ '''
+ def findHook(self, project, hook_url):
+ hooks = project.hooks.list()
+ for hook in hooks:
+ if (hook.url == hook_url):
+ return hook
+
+ '''
+ @param project Project object
+ @param hook_url Url to call on event
+ '''
+ def existsHook(self, project, hook_url):
+ # When project exists, object will be stored in self.projectObject.
+ hook = self.findHook(project, hook_url)
+ if hook:
+ self.hookObject = hook
+ return True
+ return False
+
+ def deleteHook(self):
+ if self._module.check_mode:
+ return True
+
+ return self.hookObject.delete()
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_token=dict(type='str', no_log=True),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ project=dict(type='str', required=True),
+ hook_url=dict(type='str', required=True),
+ push_events=dict(type='bool', default=True),
+ push_events_branch_filter=dict(type='str', default=''),
+ issues_events=dict(type='bool', default=False),
+ merge_requests_events=dict(type='bool', default=False),
+ tag_push_events=dict(type='bool', default=False),
+ note_events=dict(type='bool', default=False),
+ job_events=dict(type='bool', default=False),
+ pipeline_events=dict(type='bool', default=False),
+ wiki_page_events=dict(type='bool', default=False),
+ hook_validate_certs=dict(type='bool', default=False, aliases=['enable_ssl_verification']),
+ token=dict(type='str', no_log=True),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_password', 'api_token']
+ ],
+ required_together=[
+ ['api_username', 'api_password']
+ ],
+ required_one_of=[
+ ['api_username', 'api_token']
+ ],
+ supports_check_mode=True,
+ )
+
+ state = module.params['state']
+ project_identifier = module.params['project']
+ hook_url = module.params['hook_url']
+ push_events = module.params['push_events']
+ push_events_branch_filter = module.params['push_events_branch_filter']
+ issues_events = module.params['issues_events']
+ merge_requests_events = module.params['merge_requests_events']
+ tag_push_events = module.params['tag_push_events']
+ note_events = module.params['note_events']
+ job_events = module.params['job_events']
+ pipeline_events = module.params['pipeline_events']
+ wiki_page_events = module.params['wiki_page_events']
+ enable_ssl_verification = module.params['hook_validate_certs']
+ hook_token = module.params['token']
+
+ if not HAS_GITLAB_PACKAGE:
+ module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
+
+ gitlab_instance = gitlabAuthentication(module)
+
+ gitlab_hook = GitLabHook(module, gitlab_instance)
+
+ project = findProject(gitlab_instance, project_identifier)
+
+ if project is None:
+ module.fail_json(msg="Failed to create hook: project %s doesn't exists" % project_identifier)
+
+ hook_exists = gitlab_hook.existsHook(project, hook_url)
+
+ if state == 'absent':
+ if hook_exists:
+ gitlab_hook.deleteHook()
+ module.exit_json(changed=True, msg="Successfully deleted hook %s" % hook_url)
+ else:
+ module.exit_json(changed=False, msg="Hook deleted or does not exists")
+
+ if state == 'present':
+ if gitlab_hook.createOrUpdateHook(project, hook_url, {
+ "push_events": push_events,
+ "push_events_branch_filter": push_events_branch_filter,
+ "issues_events": issues_events,
+ "merge_requests_events": merge_requests_events,
+ "tag_push_events": tag_push_events,
+ "note_events": note_events,
+ "job_events": job_events,
+ "pipeline_events": pipeline_events,
+ "wiki_page_events": wiki_page_events,
+ "enable_ssl_verification": enable_ssl_verification,
+ "token": hook_token}):
+
+ module.exit_json(changed=True, msg="Successfully created or updated the hook %s" % hook_url, hook=gitlab_hook.hookObject._attrs)
+ else:
+ module.exit_json(changed=False, msg="No need to update the hook %s" % hook_url, hook=gitlab_hook.hookObject._attrs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_project.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_project.py
new file mode 100644
index 00000000..98631c74
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_project.py
@@ -0,0 +1,374 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# Copyright: (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: gitlab_project
+short_description: Creates/updates/deletes GitLab Projects
+description:
+ - When the project does not exist in GitLab, it will be created.
+ - When the project does exists and state=absent, the project will be deleted.
+ - When changes are made to the project, the project will be updated.
+author:
+ - Werner Dijkerman (@dj-wasabi)
+ - Guillaume Martinez (@Lunik)
+requirements:
+ - python >= 2.7
+ - python-gitlab python module
+extends_documentation_fragment:
+- community.general.auth_basic
+
+options:
+ api_token:
+ description:
+ - GitLab token for logging in.
+ type: str
+ group:
+ description:
+ - Id or The full path of the group of which this projects belongs to.
+ type: str
+ name:
+ description:
+ - The name of the project
+ required: true
+ type: str
+ path:
+ description:
+ - The path of the project you want to create, this will be server_url/<group>/path.
+ - If not supplied, name will be used.
+ type: str
+ description:
+ description:
+ - An description for the project.
+ type: str
+ issues_enabled:
+ description:
+ - Whether you want to create issues or not.
+ - Possible values are true and false.
+ type: bool
+ default: yes
+ merge_requests_enabled:
+ description:
+ - If merge requests can be made or not.
+ - Possible values are true and false.
+ type: bool
+ default: yes
+ wiki_enabled:
+ description:
+ - If an wiki for this project should be available or not.
+ - Possible values are true and false.
+ type: bool
+ default: yes
+ snippets_enabled:
+ description:
+ - If creating snippets should be available or not.
+ - Possible values are true and false.
+ type: bool
+ default: yes
+ visibility:
+ description:
+ - Private. Project access must be granted explicitly for each user.
+ - Internal. The project can be cloned by any logged in user.
+ - Public. The project can be cloned without any authentication.
+ default: private
+ type: str
+ choices: ["private", "internal", "public"]
+ aliases:
+ - visibility_level
+ import_url:
+ description:
+ - Git repository which will be imported into gitlab.
+ - GitLab server needs read access to this git repository.
+ required: false
+ type: str
+ state:
+ description:
+ - create or delete project.
+ - Possible values are present and absent.
+ default: present
+ type: str
+ choices: ["present", "absent"]
+ merge_method:
+ description:
+ - What requirements are placed upon merges.
+ - Possible values are C(merge), C(rebase_merge) merge commit with semi-linear history, C(ff) fast-forward merges only.
+ type: str
+ choices: ["ff", "merge", "rebase_merge"]
+ default: merge
+ version_added: "1.0.0"
+'''
+
+EXAMPLES = r'''
+- name: Delete GitLab Project
+ community.general.gitlab_project:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ validate_certs: False
+ name: my_first_project
+ state: absent
+ delegate_to: localhost
+
+- name: Create GitLab Project in group Ansible
+ community.general.gitlab_project:
+ api_url: https://gitlab.example.com/
+ validate_certs: True
+ api_username: dj-wasabi
+ api_password: "MySecretPassword"
+ name: my_first_project
+ group: ansible
+ issues_enabled: False
+ merge_method: rebase_merge
+ wiki_enabled: True
+ snippets_enabled: True
+ import_url: http://git.example.com/example/lab.git
+ state: present
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+msg:
+ description: Success or failure message.
+ returned: always
+ type: str
+ sample: "Success"
+
+result:
+ description: json parsed response from the server.
+ returned: always
+ type: dict
+
+error:
+ description: the error message returned by the GitLab API.
+ returned: failed
+ type: str
+ sample: "400: path is already in use"
+
+project:
+ description: API object.
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+GITLAB_IMP_ERR = None
+try:
+ import gitlab
+ HAS_GITLAB_PACKAGE = True
+except Exception:
+ GITLAB_IMP_ERR = traceback.format_exc()
+ HAS_GITLAB_PACKAGE = False
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import findGroup, findProject, gitlabAuthentication
+
+
+class GitLabProject(object):
+ def __init__(self, module, gitlab_instance):
+ self._module = module
+ self._gitlab = gitlab_instance
+ self.projectObject = None
+
+ '''
+ @param project_name Name of the project
+ @param namespace Namespace Object (User or Group)
+ @param options Options of the project
+ '''
+ def createOrUpdateProject(self, project_name, namespace, options):
+ changed = False
+
+ # Because we have already call userExists in main()
+ if self.projectObject is None:
+ project = self.createProject(namespace, {
+ 'name': project_name,
+ 'path': options['path'],
+ 'description': options['description'],
+ 'issues_enabled': options['issues_enabled'],
+ 'merge_requests_enabled': options['merge_requests_enabled'],
+ 'merge_method': options['merge_method'],
+ 'wiki_enabled': options['wiki_enabled'],
+ 'snippets_enabled': options['snippets_enabled'],
+ 'visibility': options['visibility'],
+ 'import_url': options['import_url']})
+ changed = True
+ else:
+ changed, project = self.updateProject(self.projectObject, {
+ 'name': project_name,
+ 'description': options['description'],
+ 'issues_enabled': options['issues_enabled'],
+ 'merge_requests_enabled': options['merge_requests_enabled'],
+ 'merge_method': options['merge_method'],
+ 'wiki_enabled': options['wiki_enabled'],
+ 'snippets_enabled': options['snippets_enabled'],
+ 'visibility': options['visibility']})
+
+ self.projectObject = project
+ if changed:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True, msg="Successfully created or updated the project %s" % project_name)
+
+ try:
+ project.save()
+ except Exception as e:
+ self._module.fail_json(msg="Failed update project: %s " % e)
+ return True
+ else:
+ return False
+
+ '''
+ @param namespace Namespace Object (User or Group)
+ @param arguments Attributes of the project
+ '''
+ def createProject(self, namespace, arguments):
+ if self._module.check_mode:
+ return True
+
+ arguments['namespace_id'] = namespace.id
+ try:
+ project = self._gitlab.projects.create(arguments)
+ except (gitlab.exceptions.GitlabCreateError) as e:
+ self._module.fail_json(msg="Failed to create project: %s " % to_native(e))
+
+ return project
+
+ '''
+ @param project Project Object
+ @param arguments Attributes of the project
+ '''
+ def updateProject(self, project, arguments):
+ changed = False
+
+ for arg_key, arg_value in arguments.items():
+ if arguments[arg_key] is not None:
+ if getattr(project, arg_key) != arguments[arg_key]:
+ setattr(project, arg_key, arguments[arg_key])
+ changed = True
+
+ return (changed, project)
+
+ def deleteProject(self):
+ if self._module.check_mode:
+ return True
+
+ project = self.projectObject
+
+ return project.delete()
+
+ '''
+ @param namespace User/Group object
+ @param name Name of the project
+ '''
+ def existsProject(self, namespace, path):
+ # When project exists, object will be stored in self.projectObject.
+ project = findProject(self._gitlab, namespace.full_path + '/' + path)
+ if project:
+ self.projectObject = project
+ return True
+ return False
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_token=dict(type='str', no_log=True),
+ group=dict(type='str'),
+ name=dict(type='str', required=True),
+ path=dict(type='str'),
+ description=dict(type='str'),
+ issues_enabled=dict(type='bool', default=True),
+ merge_requests_enabled=dict(type='bool', default=True),
+ merge_method=dict(type='str', default='merge', choices=["merge", "rebase_merge", "ff"]),
+ wiki_enabled=dict(type='bool', default=True),
+ snippets_enabled=dict(default=True, type='bool'),
+ visibility=dict(type='str', default="private", choices=["internal", "private", "public"], aliases=["visibility_level"]),
+ import_url=dict(type='str'),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_password', 'api_token'],
+ ],
+ required_together=[
+ ['api_username', 'api_password'],
+ ],
+ required_one_of=[
+ ['api_username', 'api_token']
+ ],
+ supports_check_mode=True,
+ )
+
+ group_identifier = module.params['group']
+ project_name = module.params['name']
+ project_path = module.params['path']
+ project_description = module.params['description']
+ issues_enabled = module.params['issues_enabled']
+ merge_requests_enabled = module.params['merge_requests_enabled']
+ merge_method = module.params['merge_method']
+ wiki_enabled = module.params['wiki_enabled']
+ snippets_enabled = module.params['snippets_enabled']
+ visibility = module.params['visibility']
+ import_url = module.params['import_url']
+ state = module.params['state']
+
+ if not HAS_GITLAB_PACKAGE:
+ module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
+
+ gitlab_instance = gitlabAuthentication(module)
+
+ # Set project_path to project_name if it is empty.
+ if project_path is None:
+ project_path = project_name.replace(" ", "_")
+
+ gitlab_project = GitLabProject(module, gitlab_instance)
+
+ if group_identifier:
+ group = findGroup(gitlab_instance, group_identifier)
+ if group is None:
+ module.fail_json(msg="Failed to create project: group %s doesn't exists" % group_identifier)
+
+ namespace = gitlab_instance.namespaces.get(group.id)
+ project_exists = gitlab_project.existsProject(namespace, project_path)
+ else:
+ user = gitlab_instance.users.list(username=gitlab_instance.user.username)[0]
+ namespace = gitlab_instance.namespaces.get(user.id)
+ project_exists = gitlab_project.existsProject(namespace, project_path)
+
+ if state == 'absent':
+ if project_exists:
+ gitlab_project.deleteProject()
+ module.exit_json(changed=True, msg="Successfully deleted project %s" % project_name)
+ else:
+ module.exit_json(changed=False, msg="Project deleted or does not exists")
+
+ if state == 'present':
+ if gitlab_project.createOrUpdateProject(project_name, namespace, {
+ "path": project_path,
+ "description": project_description,
+ "issues_enabled": issues_enabled,
+ "merge_requests_enabled": merge_requests_enabled,
+ "merge_method": merge_method,
+ "wiki_enabled": wiki_enabled,
+ "snippets_enabled": snippets_enabled,
+ "visibility": visibility,
+ "import_url": import_url}):
+
+ module.exit_json(changed=True, msg="Successfully created or updated the project %s" % project_name, project=gitlab_project.projectObject._attrs)
+ else:
+ module.exit_json(changed=False, msg="No need to update the project %s" % project_name, project=gitlab_project.projectObject._attrs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_project_variable.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_project_variable.py
new file mode 100644
index 00000000..9803f76b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_project_variable.py
@@ -0,0 +1,299 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Markus Bergholz (markuman@gmail.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: gitlab_project_variable
+short_description: Creates/updates/deletes GitLab Projects Variables
+description:
+ - When a project variable does not exist, it will be created.
+ - When a project variable does exist, its value will be updated when the values are different.
+ - Variables which are untouched in the playbook, but are not untouched in the GitLab project,
+ they stay untouched (I(purge) is C(false)) or will be deleted (I(purge) is C(true)).
+author:
+ - "Markus Bergholz (@markuman)"
+requirements:
+ - python >= 2.7
+ - python-gitlab python module
+extends_documentation_fragment:
+- community.general.auth_basic
+
+options:
+ state:
+ description:
+ - Create or delete project variable.
+ - Possible values are present and absent.
+ default: present
+ type: str
+ choices: ["present", "absent"]
+ api_token:
+ description:
+ - GitLab access token with API permissions.
+ required: true
+ type: str
+ project:
+ description:
+ - The path and name of the project.
+ required: true
+ type: str
+ purge:
+ description:
+ - When set to true, all variables which are not untouched in the task will be deleted.
+ default: false
+ type: bool
+ vars:
+ description:
+ - When the list element is a simple key-value pair, masked and protected will be set to false.
+ - When the list element is a dict with the keys I(value), I(masked) and I(protected), the user can
+ have full control about whether a value should be masked, protected or both.
+ - Support for protected values requires GitLab >= 9.3.
+ - Support for masked values requires GitLab >= 11.10.
+ - A I(value) must be a string or a number.
+ - Field I(variable_type) must be a string with either C(env_var), which is the default, or C(file).
+ - When a value is masked, it must be in Base64 and have a length of at least 8 characters.
+ See GitLab documentation on acceptable values for a masked variable (https://docs.gitlab.com/ce/ci/variables/#masked-variables).
+ default: {}
+ type: dict
+'''
+
+
+EXAMPLES = '''
+- name: Set or update some CI/CD variables
+ community.general.gitlab_project_variable:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ project: markuman/dotfiles
+ purge: false
+ vars:
+ ACCESS_KEY_ID: abc123
+ SECRET_ACCESS_KEY: 321cba
+
+- name: Set or update some CI/CD variables
+ community.general.gitlab_project_variable:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ project: markuman/dotfiles
+ purge: false
+ vars:
+ ACCESS_KEY_ID: abc123
+ SECRET_ACCESS_KEY:
+ value: 3214cbad
+ masked: true
+ protected: true
+ variable_type: env_var
+
+- name: Delete one variable
+ community.general.gitlab_project_variable:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ project: markuman/dotfiles
+ state: absent
+ vars:
+ ACCESS_KEY_ID: abc123
+'''
+
+RETURN = '''
+project_variable:
+ description: Four lists of the variablenames which were added, updated, removed or exist.
+ returned: always
+ type: dict
+ contains:
+ added:
+ description: A list of variables which were created.
+ returned: always
+ type: list
+ sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']"
+ untouched:
+ description: A list of variables which exist.
+ returned: always
+ type: list
+ sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']"
+ removed:
+ description: A list of variables which were deleted.
+ returned: always
+ type: list
+ sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']"
+ updated:
+ description: A list of variables whose values were changed.
+ returned: always
+ type: list
+ sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']"
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.six import string_types
+from ansible.module_utils.six import integer_types
+
+
+GITLAB_IMP_ERR = None
+try:
+ import gitlab
+ HAS_GITLAB_PACKAGE = True
+except Exception:
+ GITLAB_IMP_ERR = traceback.format_exc()
+ HAS_GITLAB_PACKAGE = False
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import gitlabAuthentication
+
+
+class GitlabProjectVariables(object):
+
+ def __init__(self, module, gitlab_instance):
+ self.repo = gitlab_instance
+ self.project = self.get_project(module.params['project'])
+ self._module = module
+
+ def get_project(self, project_name):
+ return self.repo.projects.get(project_name)
+
+ def list_all_project_variables(self):
+ page_nb = 1
+ variables = []
+ vars_page = self.project.variables.list(page=page_nb)
+ while len(vars_page) > 0:
+ variables += vars_page
+ page_nb += 1
+ vars_page = self.project.variables.list(page=page_nb)
+ return variables
+
+ def create_variable(self, key, value, masked, protected, variable_type):
+ if self._module.check_mode:
+ return
+ return self.project.variables.create({"key": key, "value": value,
+ "masked": masked, "protected": protected,
+ "variable_type": variable_type})
+
+ def update_variable(self, key, var, value, masked, protected, variable_type):
+ if var.value == value and var.protected == protected and var.masked == masked and var.variable_type == variable_type:
+ return False
+
+ if self._module.check_mode:
+ return True
+
+ if var.protected == protected and var.masked == masked and var.variable_type == variable_type:
+ var.value = value
+ var.save()
+ return True
+
+ self.delete_variable(key)
+ self.create_variable(key, value, masked, protected, variable_type)
+ return True
+
+ def delete_variable(self, key):
+ if self._module.check_mode:
+ return
+ return self.project.variables.delete(key)
+
+
+def native_python_main(this_gitlab, purge, var_list, state, module):
+
+ change = False
+ return_value = dict(added=list(), updated=list(), removed=list(), untouched=list())
+
+ gitlab_keys = this_gitlab.list_all_project_variables()
+ existing_variables = [x.get_id() for x in gitlab_keys]
+
+ for key in var_list:
+
+ if isinstance(var_list[key], string_types) or isinstance(var_list[key], (integer_types, float)):
+ value = var_list[key]
+ masked = False
+ protected = False
+ variable_type = 'env_var'
+ elif isinstance(var_list[key], dict):
+ value = var_list[key].get('value')
+ masked = var_list[key].get('masked', False)
+ protected = var_list[key].get('protected', False)
+ variable_type = var_list[key].get('variable_type', 'env_var')
+ else:
+ module.fail_json(msg="value must be of type string, integer or dict")
+
+ if key in existing_variables:
+ index = existing_variables.index(key)
+ existing_variables[index] = None
+
+ if state == 'present':
+ single_change = this_gitlab.update_variable(key,
+ gitlab_keys[index],
+ value, masked,
+ protected,
+ variable_type)
+ change = single_change or change
+ if single_change:
+ return_value['updated'].append(key)
+ else:
+ return_value['untouched'].append(key)
+
+ elif state == 'absent':
+ this_gitlab.delete_variable(key)
+ change = True
+ return_value['removed'].append(key)
+
+ elif key not in existing_variables and state == 'present':
+ this_gitlab.create_variable(key, value, masked, protected, variable_type)
+ change = True
+ return_value['added'].append(key)
+
+ existing_variables = list(filter(None, existing_variables))
+ if purge:
+ for item in existing_variables:
+ this_gitlab.delete_variable(item)
+ change = True
+ return_value['removed'].append(item)
+ else:
+ return_value['untouched'].extend(existing_variables)
+
+ return change, return_value
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(
+ api_token=dict(type='str', required=True, no_log=True),
+ project=dict(type='str', required=True),
+ purge=dict(type='bool', required=False, default=False),
+ vars=dict(type='dict', required=False, default=dict(), no_log=True),
+ state=dict(type='str', default="present", choices=["absent", "present"])
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_password', 'api_token'],
+ ],
+ required_together=[
+ ['api_username', 'api_password'],
+ ],
+ required_one_of=[
+ ['api_username', 'api_token']
+ ],
+ supports_check_mode=True
+ )
+
+ purge = module.params['purge']
+ var_list = module.params['vars']
+ state = module.params['state']
+
+ if not HAS_GITLAB_PACKAGE:
+ module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
+
+ gitlab_instance = gitlabAuthentication(module)
+
+ this_gitlab = GitlabProjectVariables(module=module, gitlab_instance=gitlab_instance)
+
+ change, return_value = native_python_main(this_gitlab, purge, var_list, state, module)
+
+ module.exit_json(changed=change, project_variable=return_value)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_runner.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_runner.py
new file mode 100644
index 00000000..70384914
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_runner.py
@@ -0,0 +1,348 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# Copyright: (c) 2018, Samy Coenen <samy.coenen@nubera.be>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gitlab_runner
+short_description: Create, modify and delete GitLab Runners.
+description:
+ - Register, update and delete runners with the GitLab API.
+ - All operations are performed using the GitLab API v4.
+ - For details, consult the full API documentation at U(https://docs.gitlab.com/ee/api/runners.html).
+ - A valid private API token is required for all operations. You can create as many tokens as you like using the GitLab web interface at
+ U(https://$GITLAB_URL/profile/personal_access_tokens).
+ - A valid registration token is required for registering a new runner.
+ To create shared runners, you need to ask your administrator to give you this token.
+ It can be found at U(https://$GITLAB_URL/admin/runners/).
+notes:
+ - To create a new runner at least the C(api_token), C(description) and C(api_url) options are required.
+ - Runners need to have unique descriptions.
+author:
+ - Samy Coenen (@SamyCoenen)
+ - Guillaume Martinez (@Lunik)
+requirements:
+ - python >= 2.7
+ - python-gitlab >= 1.5.0
+extends_documentation_fragment:
+- community.general.auth_basic
+
+options:
+ api_token:
+ description:
+ - Your private token to interact with the GitLab API.
+ type: str
+ description:
+ description:
+ - The unique name of the runner.
+ required: True
+ type: str
+ aliases:
+ - name
+ state:
+ description:
+ - Make sure that the runner with the same name exists with the same configuration or delete the runner with the same name.
+ required: False
+ default: present
+ choices: ["present", "absent"]
+ type: str
+ registration_token:
+ description:
+ - The registration token is used to register new runners.
+ required: True
+ type: str
+ active:
+ description:
+ - Define if the runners is immediately active after creation.
+ required: False
+ default: yes
+ type: bool
+ locked:
+ description:
+ - Determines if the runner is locked or not.
+ required: False
+ default: False
+ type: bool
+ access_level:
+ description:
+ - Determines if a runner can pick up jobs from protected branches.
+ required: False
+ default: ref_protected
+ choices: ["ref_protected", "not_protected"]
+ type: str
+ maximum_timeout:
+ description:
+ - The maximum timeout that a runner has to pick up a specific job.
+ required: False
+ default: 3600
+ type: int
+ run_untagged:
+ description:
+ - Run untagged jobs or not.
+ required: False
+ default: yes
+ type: bool
+ tag_list:
+ description: The tags that apply to the runner.
+ required: False
+ default: []
+ type: list
+'''
+
+EXAMPLES = '''
+- name: "Register runner"
+ community.general.gitlab_runner:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ registration_token: 4gfdsg345
+ description: Docker Machine t1
+ state: present
+ active: True
+ tag_list: ['docker']
+ run_untagged: False
+ locked: False
+
+- name: "Delete runner"
+ community.general.gitlab_runner:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ description: Docker Machine t1
+ state: absent
+'''
+
+RETURN = '''
+msg:
+ description: Success or failure message
+ returned: always
+ type: str
+ sample: "Success"
+
+result:
+ description: json parsed response from the server
+ returned: always
+ type: dict
+
+error:
+ description: the error message returned by the GitLab API
+ returned: failed
+ type: str
+ sample: "400: path is already in use"
+
+runner:
+ description: API object
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+GITLAB_IMP_ERR = None
+try:
+ import gitlab
+ HAS_GITLAB_PACKAGE = True
+except Exception:
+ GITLAB_IMP_ERR = traceback.format_exc()
+ HAS_GITLAB_PACKAGE = False
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import gitlabAuthentication
+
+try:
+ cmp
+except NameError:
+ def cmp(a, b):
+ return (a > b) - (a < b)
+
+
+class GitLabRunner(object):
+ def __init__(self, module, gitlab_instance):
+ self._module = module
+ self._gitlab = gitlab_instance
+ self.runnerObject = None
+
+ def createOrUpdateRunner(self, description, options):
+ changed = False
+
+ # Because we have already call userExists in main()
+ if self.runnerObject is None:
+ runner = self.createRunner({
+ 'description': description,
+ 'active': options['active'],
+ 'token': options['registration_token'],
+ 'locked': options['locked'],
+ 'run_untagged': options['run_untagged'],
+ 'maximum_timeout': options['maximum_timeout'],
+ 'tag_list': options['tag_list']})
+ changed = True
+ else:
+ changed, runner = self.updateRunner(self.runnerObject, {
+ 'active': options['active'],
+ 'locked': options['locked'],
+ 'run_untagged': options['run_untagged'],
+ 'maximum_timeout': options['maximum_timeout'],
+ 'access_level': options['access_level'],
+ 'tag_list': options['tag_list']})
+
+ self.runnerObject = runner
+ if changed:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True, msg="Successfully created or updated the runner %s" % description)
+
+ try:
+ runner.save()
+ except Exception as e:
+ self._module.fail_json(msg="Failed to update runner: %s " % to_native(e))
+ return True
+ else:
+ return False
+
+ '''
+ @param arguments Attributes of the runner
+ '''
+ def createRunner(self, arguments):
+ if self._module.check_mode:
+ return True
+
+ try:
+ runner = self._gitlab.runners.create(arguments)
+ except (gitlab.exceptions.GitlabCreateError) as e:
+ self._module.fail_json(msg="Failed to create runner: %s " % to_native(e))
+
+ return runner
+
+ '''
+ @param runner Runner object
+ @param arguments Attributes of the runner
+ '''
+ def updateRunner(self, runner, arguments):
+ changed = False
+
+ for arg_key, arg_value in arguments.items():
+ if arguments[arg_key] is not None:
+ if isinstance(arguments[arg_key], list):
+ list1 = getattr(runner, arg_key)
+ list1.sort()
+ list2 = arguments[arg_key]
+ list2.sort()
+ if cmp(list1, list2):
+ setattr(runner, arg_key, arguments[arg_key])
+ changed = True
+ else:
+ if getattr(runner, arg_key) != arguments[arg_key]:
+ setattr(runner, arg_key, arguments[arg_key])
+ changed = True
+
+ return (changed, runner)
+
+ '''
+ @param description Description of the runner
+ '''
+ def findRunner(self, description):
+ runners = self._gitlab.runners.all(as_list=False)
+ for runner in runners:
+ if (runner['description'] == description):
+ return self._gitlab.runners.get(runner['id'])
+
+ '''
+ @param description Description of the runner
+ '''
+ def existsRunner(self, description):
+ # When runner exists, object will be stored in self.runnerObject.
+ runner = self.findRunner(description)
+
+ if runner:
+ self.runnerObject = runner
+ return True
+ return False
+
+ def deleteRunner(self):
+ if self._module.check_mode:
+ return True
+
+ runner = self.runnerObject
+
+ return runner.delete()
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_token=dict(type='str', no_log=True),
+ description=dict(type='str', required=True, aliases=["name"]),
+ active=dict(type='bool', default=True),
+ tag_list=dict(type='list', default=[]),
+ run_untagged=dict(type='bool', default=True),
+ locked=dict(type='bool', default=False),
+ access_level=dict(type='str', default='ref_protected', choices=["not_protected", "ref_protected"]),
+ maximum_timeout=dict(type='int', default=3600),
+ registration_token=dict(type='str', required=True, no_log=True),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_password', 'api_token'],
+ ],
+ required_together=[
+ ['api_username', 'api_password'],
+ ],
+ required_one_of=[
+ ['api_username', 'api_token'],
+ ],
+ supports_check_mode=True,
+ )
+
+ state = module.params['state']
+ runner_description = module.params['description']
+ runner_active = module.params['active']
+ tag_list = module.params['tag_list']
+ run_untagged = module.params['run_untagged']
+ runner_locked = module.params['locked']
+ access_level = module.params['access_level']
+ maximum_timeout = module.params['maximum_timeout']
+ registration_token = module.params['registration_token']
+
+ if not HAS_GITLAB_PACKAGE:
+ module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
+
+ gitlab_instance = gitlabAuthentication(module)
+
+ gitlab_runner = GitLabRunner(module, gitlab_instance)
+ runner_exists = gitlab_runner.existsRunner(runner_description)
+
+ if state == 'absent':
+ if runner_exists:
+ gitlab_runner.deleteRunner()
+ module.exit_json(changed=True, msg="Successfully deleted runner %s" % runner_description)
+ else:
+ module.exit_json(changed=False, msg="Runner deleted or does not exists")
+
+ if state == 'present':
+ if gitlab_runner.createOrUpdateRunner(runner_description, {
+ "active": runner_active,
+ "tag_list": tag_list,
+ "run_untagged": run_untagged,
+ "locked": runner_locked,
+ "access_level": access_level,
+ "maximum_timeout": maximum_timeout,
+ "registration_token": registration_token}):
+ module.exit_json(changed=True, runner=gitlab_runner.runnerObject._attrs,
+ msg="Successfully created or updated the runner %s" % runner_description)
+ else:
+ module.exit_json(changed=False, runner=gitlab_runner.runnerObject._attrs,
+ msg="No need to update the runner %s" % runner_description)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_user.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_user.py
new file mode 100644
index 00000000..1e8ee65a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gitlab_user.py
@@ -0,0 +1,563 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# Copyright: (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gitlab_user
+short_description: Creates/updates/deletes/blocks/unblocks GitLab Users
+description:
+ - When the user does not exist in GitLab, it will be created.
+ - When the user exists and state=absent, the user will be deleted.
+ - When the user exists and state=blocked, the user will be blocked.
+ - When changes are made to user, the user will be updated.
+notes:
+ - From community.general 0.2.0 and onwards, name, email and password are optional while deleting the user.
+author:
+ - Werner Dijkerman (@dj-wasabi)
+ - Guillaume Martinez (@Lunik)
+requirements:
+ - python >= 2.7
+ - python-gitlab python module
+ - administrator rights on the GitLab server
+extends_documentation_fragment:
+- community.general.auth_basic
+
+options:
+ api_token:
+ description:
+ - GitLab token for logging in.
+ type: str
+ name:
+ description:
+ - Name of the user you want to create.
+ - Required only if C(state) is set to C(present).
+ type: str
+ username:
+ description:
+ - The username of the user.
+ required: true
+ type: str
+ password:
+ description:
+ - The password of the user.
+ - GitLab server enforces minimum password length to 8, set this value with 8 or more characters.
+ - Required only if C(state) is set to C(present).
+ type: str
+ email:
+ description:
+ - The email that belongs to the user.
+ - Required only if C(state) is set to C(present).
+ type: str
+ sshkey_name:
+ description:
+ - The name of the sshkey
+ type: str
+ sshkey_file:
+ description:
+ - The ssh key itself.
+ type: str
+ group:
+ description:
+ - Id or Full path of parent group in the form of group/name.
+ - Add user as an member to this group.
+ type: str
+ access_level:
+ description:
+ - The access level to the group. One of the following can be used.
+ - guest
+ - reporter
+ - developer
+ - master (alias for maintainer)
+ - maintainer
+ - owner
+ default: guest
+ type: str
+ choices: ["guest", "reporter", "developer", "master", "maintainer", "owner"]
+ state:
+ description:
+ - Create, delete or block a user.
+ default: present
+ type: str
+ choices: ["present", "absent", "blocked", "unblocked"]
+ confirm:
+ description:
+ - Require confirmation.
+ type: bool
+ default: yes
+ isadmin:
+ description:
+ - Grant admin privileges to the user.
+ type: bool
+ default: no
+ external:
+ description:
+ - Define external parameter for this user.
+ type: bool
+ default: no
+'''
+
+EXAMPLES = '''
+- name: "Delete GitLab User"
+ community.general.gitlab_user:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ validate_certs: False
+ username: myusername
+ state: absent
+
+- name: "Create GitLab User"
+ community.general.gitlab_user:
+ api_url: https://gitlab.example.com/
+ validate_certs: True
+ api_username: dj-wasabi
+ api_password: "MySecretPassword"
+ name: My Name
+ username: myusername
+ password: mysecretpassword
+ email: me@example.com
+ sshkey_name: MySSH
+ sshkey_file: ssh-rsa AAAAB3NzaC1yc...
+ state: present
+ group: super_group/mon_group
+ access_level: owner
+
+- name: "Block GitLab User"
+ community.general.gitlab_user:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ validate_certs: False
+ username: myusername
+ state: blocked
+
+- name: "Unblock GitLab User"
+ community.general.gitlab_user:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ validate_certs: False
+ username: myusername
+ state: unblocked
+'''
+
+RETURN = '''
+msg:
+ description: Success or failure message
+ returned: always
+ type: str
+ sample: "Success"
+
+result:
+ description: json parsed response from the server
+ returned: always
+ type: dict
+
+error:
+ description: the error message returned by the GitLab API
+ returned: failed
+ type: str
+ sample: "400: path is already in use"
+
+user:
+ description: API object
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+GITLAB_IMP_ERR = None
+try:
+ import gitlab
+ HAS_GITLAB_PACKAGE = True
+except Exception:
+ GITLAB_IMP_ERR = traceback.format_exc()
+ HAS_GITLAB_PACKAGE = False
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import findGroup, gitlabAuthentication
+
+
+class GitLabUser(object):
+ def __init__(self, module, gitlab_instance):
+ self._module = module
+ self._gitlab = gitlab_instance
+ self.userObject = None
+ self.ACCESS_LEVEL = {
+ 'guest': gitlab.GUEST_ACCESS,
+ 'reporter': gitlab.REPORTER_ACCESS,
+ 'developer': gitlab.DEVELOPER_ACCESS,
+ 'master': gitlab.MAINTAINER_ACCESS,
+ 'maintainer': gitlab.MAINTAINER_ACCESS,
+ 'owner': gitlab.OWNER_ACCESS}
+
+ '''
+ @param username Username of the user
+ @param options User options
+ '''
+ def createOrUpdateUser(self, username, options):
+ changed = False
+ potentionally_changed = False
+
+ # Because we have already call userExists in main()
+ if self.userObject is None:
+ user = self.createUser({
+ 'name': options['name'],
+ 'username': username,
+ 'password': options['password'],
+ 'email': options['email'],
+ 'skip_confirmation': not options['confirm'],
+ 'admin': options['isadmin'],
+ 'external': options['external']})
+ changed = True
+ else:
+ changed, user = self.updateUser(
+ self.userObject, {
+ # add "normal" parameters here, put uncheckable
+ # params in the dict below
+ 'name': {'value': options['name']},
+ 'email': {'value': options['email']},
+
+ # note: for some attributes like this one the key
+ # from reading back from server is unfortunately
+ # different to the one needed for pushing/writing,
+ # in that case use the optional setter key
+ 'is_admin': {
+ 'value': options['isadmin'], 'setter': 'admin'
+ },
+ 'external': {'value': options['external']},
+ },
+ {
+ # put "uncheckable" params here, this means params
+ # which the gitlab does accept for setting but does
+ # not return any information about it
+ 'skip_reconfirmation': {'value': not options['confirm']},
+ 'password': {'value': options['password']},
+ }
+ )
+
+ # note: as we unfortunately have some uncheckable parameters
+ # where it is not possible to determine if the update
+ # changed something or not, we must assume here that a
+ # changed happend and that an user object update is needed
+ potentionally_changed = True
+
+ # Assign ssh keys
+ if options['sshkey_name'] and options['sshkey_file']:
+ key_changed = self.addSshKeyToUser(user, {
+ 'name': options['sshkey_name'],
+ 'file': options['sshkey_file']})
+ changed = changed or key_changed
+
+ # Assign group
+ if options['group_path']:
+ group_changed = self.assignUserToGroup(user, options['group_path'], options['access_level'])
+ changed = changed or group_changed
+
+ self.userObject = user
+ if (changed or potentionally_changed) and not self._module.check_mode:
+ try:
+ user.save()
+ except Exception as e:
+ self._module.fail_json(msg="Failed to update user: %s " % to_native(e))
+
+ if changed:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True, msg="Successfully created or updated the user %s" % username)
+ return True
+ else:
+ return False
+
+ '''
+ @param group User object
+ '''
+ def getUserId(self, user):
+ if user is not None:
+ return user.id
+ return None
+
+ '''
+ @param user User object
+ @param sshkey_name Name of the ssh key
+ '''
+ def sshKeyExists(self, user, sshkey_name):
+ keyList = map(lambda k: k.title, user.keys.list())
+
+ return sshkey_name in keyList
+
+ '''
+ @param user User object
+ @param sshkey Dict containing sshkey infos {"name": "", "file": ""}
+ '''
+ def addSshKeyToUser(self, user, sshkey):
+ if not self.sshKeyExists(user, sshkey['name']):
+ if self._module.check_mode:
+ return True
+
+ try:
+ user.keys.create({
+ 'title': sshkey['name'],
+ 'key': sshkey['file']})
+ except gitlab.exceptions.GitlabCreateError as e:
+ self._module.fail_json(msg="Failed to assign sshkey to user: %s" % to_native(e))
+ return True
+ return False
+
+ '''
+ @param group Group object
+ @param user_id Id of the user to find
+ '''
+ def findMember(self, group, user_id):
+ try:
+ member = group.members.get(user_id)
+ except gitlab.exceptions.GitlabGetError:
+ return None
+ return member
+
+ '''
+ @param group Group object
+ @param user_id Id of the user to check
+ '''
+ def memberExists(self, group, user_id):
+ member = self.findMember(group, user_id)
+
+ return member is not None
+
+ '''
+ @param group Group object
+ @param user_id Id of the user to check
+ @param access_level GitLab access_level to check
+ '''
+ def memberAsGoodAccessLevel(self, group, user_id, access_level):
+ member = self.findMember(group, user_id)
+
+ return member.access_level == access_level
+
+ '''
+ @param user User object
+ @param group_path Complete path of the Group including parent group path. <parent_path>/<group_path>
+ @param access_level GitLab access_level to assign
+ '''
+ def assignUserToGroup(self, user, group_identifier, access_level):
+ group = findGroup(self._gitlab, group_identifier)
+
+ if self._module.check_mode:
+ return True
+
+ if group is None:
+ return False
+
+ if self.memberExists(group, self.getUserId(user)):
+ member = self.findMember(group, self.getUserId(user))
+ if not self.memberAsGoodAccessLevel(group, member.id, self.ACCESS_LEVEL[access_level]):
+ member.access_level = self.ACCESS_LEVEL[access_level]
+ member.save()
+ return True
+ else:
+ try:
+ group.members.create({
+ 'user_id': self.getUserId(user),
+ 'access_level': self.ACCESS_LEVEL[access_level]})
+ except gitlab.exceptions.GitlabCreateError as e:
+ self._module.fail_json(msg="Failed to assign user to group: %s" % to_native(e))
+ return True
+ return False
+
+ '''
+ @param user User object
+ @param arguments User attributes
+ '''
+ def updateUser(self, user, arguments, uncheckable_args):
+ changed = False
+
+ for arg_key, arg_value in arguments.items():
+ av = arg_value['value']
+
+ if av is not None:
+ if getattr(user, arg_key) != av:
+ setattr(user, arg_value.get('setter', arg_key), av)
+ changed = True
+
+ for arg_key, arg_value in uncheckable_args.items():
+ av = arg_value['value']
+
+ if av is not None:
+ setattr(user, arg_value.get('setter', arg_key), av)
+
+ return (changed, user)
+
+ '''
+ @param arguments User attributes
+ '''
+ def createUser(self, arguments):
+ if self._module.check_mode:
+ return True
+
+ try:
+ user = self._gitlab.users.create(arguments)
+ except (gitlab.exceptions.GitlabCreateError) as e:
+ self._module.fail_json(msg="Failed to create user: %s " % to_native(e))
+
+ return user
+
+ '''
+ @param username Username of the user
+ '''
+ def findUser(self, username):
+ users = self._gitlab.users.list(search=username)
+ for user in users:
+ if (user.username == username):
+ return user
+
+ '''
+ @param username Username of the user
+ '''
+ def existsUser(self, username):
+ # When user exists, object will be stored in self.userObject.
+ user = self.findUser(username)
+ if user:
+ self.userObject = user
+ return True
+ return False
+
+ '''
+ @param username Username of the user
+ '''
+ def isActive(self, username):
+ user = self.findUser(username)
+ return user.attributes['state'] == 'active'
+
+ def deleteUser(self):
+ if self._module.check_mode:
+ return True
+
+ user = self.userObject
+
+ return user.delete()
+
+ def blockUser(self):
+ if self._module.check_mode:
+ return True
+
+ user = self.userObject
+
+ return user.block()
+
+ def unblockUser(self):
+ if self._module.check_mode:
+ return True
+
+ user = self.userObject
+
+ return user.unblock()
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_token=dict(type='str', no_log=True),
+ name=dict(type='str'),
+ state=dict(type='str', default="present", choices=["absent", "present", "blocked", "unblocked"]),
+ username=dict(type='str', required=True),
+ password=dict(type='str', no_log=True),
+ email=dict(type='str'),
+ sshkey_name=dict(type='str'),
+ sshkey_file=dict(type='str'),
+ group=dict(type='str'),
+ access_level=dict(type='str', default="guest", choices=["developer", "guest", "maintainer", "master", "owner", "reporter"]),
+ confirm=dict(type='bool', default=True),
+ isadmin=dict(type='bool', default=False),
+ external=dict(type='bool', default=False),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_password', 'api_token'],
+ ],
+ required_together=[
+ ['api_username', 'api_password'],
+ ],
+ required_one_of=[
+ ['api_username', 'api_token']
+ ],
+ supports_check_mode=True,
+ required_if=(
+ ('state', 'present', ['name', 'email', 'password']),
+ )
+ )
+
+ user_name = module.params['name']
+ state = module.params['state']
+ user_username = module.params['username'].lower()
+ user_password = module.params['password']
+ user_email = module.params['email']
+ user_sshkey_name = module.params['sshkey_name']
+ user_sshkey_file = module.params['sshkey_file']
+ group_path = module.params['group']
+ access_level = module.params['access_level']
+ confirm = module.params['confirm']
+ user_isadmin = module.params['isadmin']
+ user_external = module.params['external']
+
+ if not HAS_GITLAB_PACKAGE:
+ module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
+
+ gitlab_instance = gitlabAuthentication(module)
+
+ gitlab_user = GitLabUser(module, gitlab_instance)
+ user_exists = gitlab_user.existsUser(user_username)
+ if user_exists:
+ user_is_active = gitlab_user.isActive(user_username)
+ else:
+ user_is_active = False
+
+ if state == 'absent':
+ if user_exists:
+ gitlab_user.deleteUser()
+ module.exit_json(changed=True, msg="Successfully deleted user %s" % user_username)
+ else:
+ module.exit_json(changed=False, msg="User deleted or does not exists")
+
+ if state == 'blocked':
+ if user_exists and user_is_active:
+ gitlab_user.blockUser()
+ module.exit_json(changed=True, msg="Successfully blocked user %s" % user_username)
+ else:
+ module.exit_json(changed=False, msg="User already blocked or does not exists")
+
+ if state == 'unblocked':
+ if user_exists and not user_is_active:
+ gitlab_user.unblockUser()
+ module.exit_json(changed=True, msg="Successfully unblocked user %s" % user_username)
+ else:
+ module.exit_json(changed=False, msg="User is not blocked or does not exists")
+
+ if state == 'present':
+ if gitlab_user.createOrUpdateUser(user_username, {
+ "name": user_name,
+ "password": user_password,
+ "email": user_email,
+ "sshkey_name": user_sshkey_name,
+ "sshkey_file": user_sshkey_file,
+ "group_path": group_path,
+ "access_level": access_level,
+ "confirm": confirm,
+ "isadmin": user_isadmin,
+ "external": user_external}):
+ module.exit_json(changed=True, msg="Successfully created or updated the user %s" % user_username, user=gitlab_user.userObject._attrs)
+ else:
+ module.exit_json(changed=False, msg="No need to update the user %s" % user_username, user=gitlab_user.userObject._attrs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gluster_heal_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gluster_heal_info.py
new file mode 100644
index 00000000..46306585
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gluster_heal_info.py
@@ -0,0 +1,199 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2016, Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gluster_heal_info
+short_description: Gather information on self-heal or rebalance status
+author: "Devyani Kota (@devyanikota)"
+description:
+ - Gather facts about either self-heal or rebalance status.
+ - This module was called C(gluster_heal_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.gluster_heal_info) module no longer returns C(ansible_facts)!
+options:
+ name:
+ description:
+ - The volume name.
+ required: true
+ aliases: ['volume']
+ status_filter:
+ default: "self-heal"
+ choices: ["self-heal", "rebalance"]
+ description:
+ - Determines which facts are to be returned.
+ - If the C(status_filter) is C(self-heal), status of self-heal, along with the number of files still in process are returned.
+ - If the C(status_filter) is C(rebalance), rebalance status is returned.
+requirements:
+ - GlusterFS > 3.2
+'''
+
+EXAMPLES = '''
+- name: Gather self-heal facts about all gluster hosts in the cluster
+ community.general.gluster_heal_info:
+ name: test_volume
+ status_filter: self-heal
+ register: self_heal_status
+- ansible.builtin.debug:
+ var: self_heal_status
+
+- name: Gather rebalance facts about all gluster hosts in the cluster
+ community.general.gluster_heal_info:
+ name: test_volume
+ status_filter: rebalance
+ register: rebalance_status
+- ansible.builtin.debug:
+ var: rebalance_status
+'''
+
+RETURN = '''
+name:
+ description: GlusterFS volume name
+ returned: always
+ type: str
+status_filter:
+ description: Whether self-heal or rebalance status is to be returned
+ returned: always
+ type: str
+heal_info:
+ description: List of files that still need healing process
+ returned: On success
+ type: list
+rebalance_status:
+ description: Status of rebalance operation
+ returned: On success
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from distutils.version import LooseVersion
+
+glusterbin = ''
+
+
+def run_gluster(gargs, **kwargs):
+ global glusterbin
+ global module
+ args = [glusterbin, '--mode=script']
+ args.extend(gargs)
+ try:
+ rc, out, err = module.run_command(args, **kwargs)
+ if rc != 0:
+ module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' %
+ (' '.join(args), rc, out or err), exception=traceback.format_exc())
+ except Exception as e:
+ module.fail_json(msg='error running gluster (%s) command: %s' % (' '.join(args),
+ to_native(e)), exception=traceback.format_exc())
+ return out
+
+
+def get_self_heal_status(name):
+ out = run_gluster(['volume', 'heal', name, 'info'], environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'))
+ raw_out = out.split("\n")
+ heal_info = []
+ # return files that still need healing.
+ for line in raw_out:
+ if 'Brick' in line:
+ br_dict = {}
+ br_dict['brick'] = line.strip().strip("Brick")
+ elif 'Status' in line:
+ br_dict['status'] = line.split(":")[1].strip()
+ elif 'Number' in line:
+ br_dict['no_of_entries'] = line.split(":")[1].strip()
+ elif line.startswith('/') or line.startswith('<') or '\n' in line:
+ continue
+ else:
+ br_dict and heal_info.append(br_dict)
+ br_dict = {}
+ return heal_info
+
+
+def get_rebalance_status(name):
+ out = run_gluster(['volume', 'rebalance', name, 'status'], environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'))
+ raw_out = out.split("\n")
+ rebalance_status = []
+ # return the files that are either still 'in progress' state or 'completed'.
+ for line in raw_out:
+ line = " ".join(line.split())
+ line_vals = line.split(" ")
+ if line_vals[0].startswith('-') or line_vals[0].startswith('Node'):
+ continue
+ node_dict = {}
+ if len(line_vals) == 1 or len(line_vals) == 4:
+ continue
+ node_dict['node'] = line_vals[0]
+ node_dict['rebalanced_files'] = line_vals[1]
+ node_dict['failures'] = line_vals[4]
+ if 'in progress' in line:
+ node_dict['status'] = line_vals[5] + line_vals[6]
+ rebalance_status.append(node_dict)
+ elif 'completed' in line:
+ node_dict['status'] = line_vals[5]
+ rebalance_status.append(node_dict)
+ return rebalance_status
+
+
+def is_invalid_gluster_version(module, required_version):
+ cmd = module.get_bin_path('gluster', True) + ' --version'
+ result = module.run_command(cmd)
+ ver_line = result[1].split('\n')[0]
+ version = ver_line.split(' ')[1]
+ # If the installed version is less than 3.2, it is an invalid version
+ # return True
+ return LooseVersion(version) < LooseVersion(required_version)
+
+
+def main():
+ global module
+ global glusterbin
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True, aliases=['volume']),
+ status_filter=dict(type='str', default='self-heal', choices=['self-heal', 'rebalance']),
+ ),
+ )
+ is_old_facts = module._name in ('gluster_heal_facts', 'community.general.gluster_heal_facts')
+ if is_old_facts:
+ module.deprecate("The 'gluster_heal_facts' module has been renamed to 'gluster_heal_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ glusterbin = module.get_bin_path('gluster', True)
+ required_version = "3.2"
+ status_filter = module.params['status_filter']
+ volume_name = module.params['name']
+ heal_info = ''
+ rebalance_status = ''
+
+ # Verify if required GlusterFS version is installed
+ if is_invalid_gluster_version(module, required_version):
+ module.fail_json(msg="GlusterFS version > %s is required" %
+ required_version)
+
+ try:
+ if status_filter == "self-heal":
+ heal_info = get_self_heal_status(volume_name)
+ elif status_filter == "rebalance":
+ rebalance_status = get_rebalance_status(volume_name)
+ except Exception as e:
+ module.fail_json(msg='Error retrieving status: %s' % e, exception=traceback.format_exc())
+
+ facts = {}
+ facts['glusterfs'] = {'volume': volume_name, 'status_filter': status_filter, 'heal_info': heal_info, 'rebalance': rebalance_status}
+
+ if is_old_facts:
+ module.exit_json(ansible_facts=facts)
+ else:
+ module.exit_json(**facts)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gluster_peer.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gluster_peer.py
new file mode 100644
index 00000000..e9e6fd71
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gluster_peer.py
@@ -0,0 +1,172 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2015 Nandaja Varma <nvarma@redhat.com>
+# Copyright 2018 Red Hat, Inc.
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gluster_peer
+short_description: Attach/Detach peers to/from the cluster
+description:
+ - Create or diminish a GlusterFS trusted storage pool. A set of nodes can be
+ added into an existing trusted storage pool or a new storage pool can be
+ formed. Or, nodes can be removed from an existing trusted storage pool.
+author: Sachidananda Urs (@sac)
+options:
+ state:
+ choices: ["present", "absent"]
+ default: "present"
+ description:
+ - Determines whether the nodes should be attached to the pool or
+ removed from the pool. If the state is present, nodes will be
+ attached to the pool. If state is absent, nodes will be detached
+ from the pool.
+ type: str
+ nodes:
+ description:
+ - List of nodes that have to be probed into the pool.
+ required: true
+ type: list
+ force:
+ type: bool
+ default: false
+ description:
+ - Applicable only while removing the nodes from the pool. gluster
+ will refuse to detach a node from the pool if any one of the node
+ is down, in such cases force can be used.
+requirements:
+ - GlusterFS > 3.2
+notes:
+ - This module does not support check mode.
+'''
+
+EXAMPLES = '''
+- name: Create a trusted storage pool
+ community.general.gluster_peer:
+ state: present
+ nodes:
+ - 10.0.1.5
+ - 10.0.1.10
+
+- name: Delete a node from the trusted storage pool
+ community.general.gluster_peer:
+ state: absent
+ nodes:
+ - 10.0.1.10
+
+- name: Delete a node from the trusted storage pool by force
+ community.general.gluster_peer:
+ state: absent
+ nodes:
+ - 10.0.0.1
+ force: true
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from distutils.version import LooseVersion
+
+
+class Peer(object):
+ def __init__(self, module):
+ self.module = module
+ self.state = self.module.params['state']
+ self.nodes = self.module.params['nodes']
+ self.glustercmd = self.module.get_bin_path('gluster', True)
+ self.lang = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+ self.action = ''
+ self.force = ''
+
+ def gluster_peer_ops(self):
+ if not self.nodes:
+ self.module.fail_json(msg="nodes list cannot be empty")
+ self.force = 'force' if self.module.params.get('force') else ''
+ if self.state == 'present':
+ self.nodes = self.get_to_be_probed_hosts(self.nodes)
+ self.action = 'probe'
+ # In case of peer probe, we do not need `force'
+ self.force = ''
+ else:
+ self.action = 'detach'
+ self.call_peer_commands()
+
+ def get_to_be_probed_hosts(self, hosts):
+ peercmd = [self.glustercmd, 'pool', 'list', '--mode=script']
+ rc, output, err = self.module.run_command(peercmd,
+ environ_update=self.lang)
+ peers_in_cluster = [line.split('\t')[1].strip() for
+ line in filter(None, output.split('\n')[1:])]
+ try:
+ peers_in_cluster.remove('localhost')
+ except ValueError:
+ # It is ok not to have localhost in list
+ pass
+ hosts_to_be_probed = [host for host in hosts if host not in
+ peers_in_cluster]
+ return hosts_to_be_probed
+
+ def call_peer_commands(self):
+ result = {}
+ result['msg'] = ''
+ result['changed'] = False
+
+ for node in self.nodes:
+ peercmd = [self.glustercmd, 'peer', self.action, node, '--mode=script']
+ if self.force:
+ peercmd.append(self.force)
+ rc, out, err = self.module.run_command(peercmd,
+ environ_update=self.lang)
+ if rc:
+ result['rc'] = rc
+ result['msg'] = err
+ # Fail early, do not wait for the loop to finish
+ self.module.fail_json(**result)
+ else:
+ if 'already in peer' in out or \
+ 'localhost not needed' in out:
+ result['changed'] |= False
+ else:
+ result['changed'] = True
+ self.module.exit_json(**result)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ force=dict(type='bool', required=False, default=False),
+ nodes=dict(type='list', required=True),
+ state=dict(type='str', choices=['absent', 'present'],
+ default='present'),
+ ),
+ supports_check_mode=False
+ )
+ pops = Peer(module)
+ required_version = "3.2"
+ # Verify if required GlusterFS version is installed
+ if is_invalid_gluster_version(module, required_version):
+ module.fail_json(msg="GlusterFS version > %s is required" %
+ required_version)
+ pops.gluster_peer_ops()
+
+
+def is_invalid_gluster_version(module, required_version):
+ cmd = module.get_bin_path('gluster', True) + ' --version'
+ result = module.run_command(cmd)
+ ver_line = result[1].split('\n')[0]
+ version = ver_line.split(' ')[1]
+ # If the installed version is less than 3.2, it is an invalid version
+ # return True
+ return LooseVersion(version) < LooseVersion(required_version)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gluster_volume.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gluster_volume.py
new file mode 100644
index 00000000..d6444ef5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gluster_volume.py
@@ -0,0 +1,604 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Taneli Leppä <taneli@crasman.fi>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: gluster_volume
+short_description: Manage GlusterFS volumes
+description:
+ - Create, remove, start, stop and tune GlusterFS volumes
+options:
+ name:
+ description:
+ - The volume name.
+ required: true
+ aliases: ['volume']
+ state:
+ description:
+ - Use present/absent ensure if a volume exists or not.
+ Use started/stopped to control its availability.
+ required: true
+ choices: ['absent', 'present', 'started', 'stopped']
+ cluster:
+ description:
+ - List of hosts to use for probing and brick setup.
+ host:
+ description:
+ - Override local hostname (for peer probing purposes).
+ replicas:
+ description:
+ - Replica count for volume.
+ arbiters:
+ description:
+ - Arbiter count for volume.
+ stripes:
+ description:
+ - Stripe count for volume.
+ disperses:
+ description:
+ - Disperse count for volume.
+ redundancies:
+ description:
+ - Redundancy count for volume.
+ transport:
+ description:
+ - Transport type for volume.
+ default: tcp
+ choices: [ tcp, rdma, 'tcp,rdma' ]
+ bricks:
+ description:
+ - Brick paths on servers. Multiple brick paths can be separated by commas.
+ aliases: [ brick ]
+ start_on_create:
+ description:
+ - Controls whether the volume is started after creation or not.
+ type: bool
+ default: 'yes'
+ rebalance:
+ description:
+ - Controls whether the cluster is rebalanced after changes.
+ type: bool
+ default: 'no'
+ directory:
+ description:
+ - Directory for limit-usage.
+ options:
+ description:
+ - A dictionary/hash with options/settings for the volume.
+ quota:
+ description:
+ - Quota value for limit-usage (be sure to use 10.0MB instead of 10MB, see quota list).
+ force:
+ description:
+ - If brick is being created in the root partition, module will fail.
+ Set force to true to override this behaviour.
+ type: bool
+ default: false
+notes:
+ - Requires cli tools for GlusterFS on servers.
+ - Will add new bricks, but not remove them.
+author:
+- Taneli Leppä (@rosmo)
+'''
+
+EXAMPLES = """
+- name: Create gluster volume
+ community.general.gluster_volume:
+ state: present
+ name: test1
+ bricks: /bricks/brick1/g1
+ rebalance: yes
+ cluster:
+ - 192.0.2.10
+ - 192.0.2.11
+ run_once: true
+
+- name: Tune
+ community.general.gluster_volume:
+ state: present
+ name: test1
+ options:
+ performance.cache-size: 256MB
+
+- name: Set multiple options on GlusterFS volume
+ community.general.gluster_volume:
+ state: present
+ name: test1
+ options:
+ { performance.cache-size: 128MB,
+ write-behind: 'off',
+ quick-read: 'on'
+ }
+
+- name: Start gluster volume
+ community.general.gluster_volume:
+ state: started
+ name: test1
+
+- name: Limit usage
+ community.general.gluster_volume:
+ state: present
+ name: test1
+ directory: /foo
+ quota: 20.0MB
+
+- name: Stop gluster volume
+ community.general.gluster_volume:
+ state: stopped
+ name: test1
+
+- name: Remove gluster volume
+ community.general.gluster_volume:
+ state: absent
+ name: test1
+
+- name: Create gluster volume with multiple bricks
+ community.general.gluster_volume:
+ state: present
+ name: test2
+ bricks: /bricks/brick1/g2,/bricks/brick2/g2
+ cluster:
+ - 192.0.2.10
+ - 192.0.2.11
+ run_once: true
+
+- name: Remove the bricks from gluster volume
+ community.general.gluster_volume:
+ state: present
+ name: testvol
+ bricks: /bricks/brick1/b1,/bricks/brick2/b2
+ cluster:
+ - 10.70.42.85
+ force: true
+ run_once: true
+
+- name: Reduce cluster configuration
+ community.general.gluster_volume:
+ state: present
+ name: testvol
+ bricks: /bricks/brick3/b1,/bricks/brick4/b2
+ replicas: 2
+ cluster:
+ - 10.70.42.85
+ force: true
+ run_once: true
+"""
+
+import re
+import socket
+import time
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+glusterbin = ''
+
+
+def run_gluster(gargs, **kwargs):
+ global glusterbin
+ global module
+ args = [glusterbin, '--mode=script']
+ args.extend(gargs)
+ try:
+ rc, out, err = module.run_command(args, **kwargs)
+ if rc != 0:
+ module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' %
+ (' '.join(args), rc, out or err), exception=traceback.format_exc())
+ except Exception as e:
+ module.fail_json(msg='error running gluster (%s) command: %s' % (' '.join(args),
+ to_native(e)), exception=traceback.format_exc())
+ return out
+
+
+def run_gluster_nofail(gargs, **kwargs):
+ global glusterbin
+ global module
+ args = [glusterbin]
+ args.extend(gargs)
+ rc, out, err = module.run_command(args, **kwargs)
+ if rc != 0:
+ return None
+ return out
+
+
+def get_peers():
+ out = run_gluster(['peer', 'status'])
+ peers = {}
+ hostname = None
+ uuid = None
+ state = None
+ shortNames = False
+ for row in out.split('\n'):
+ if ': ' in row:
+ key, value = row.split(': ')
+ if key.lower() == 'hostname':
+ hostname = value
+ shortNames = False
+ if key.lower() == 'uuid':
+ uuid = value
+ if key.lower() == 'state':
+ state = value
+ peers[hostname] = [uuid, state]
+ elif row.lower() == 'other names:':
+ shortNames = True
+ elif row != '' and shortNames is True:
+ peers[row] = [uuid, state]
+ elif row == '':
+ shortNames = False
+ return peers
+
+
+def get_volumes():
+ out = run_gluster(['volume', 'info'])
+
+ volumes = {}
+ volume = {}
+ for row in out.split('\n'):
+ if ': ' in row:
+ key, value = row.split(': ')
+ if key.lower() == 'volume name':
+ volume['name'] = value
+ volume['options'] = {}
+ volume['quota'] = False
+ if key.lower() == 'volume id':
+ volume['id'] = value
+ if key.lower() == 'status':
+ volume['status'] = value
+ if key.lower() == 'transport-type':
+ volume['transport'] = value
+ if value.lower().endswith(' (arbiter)'):
+ if 'arbiters' not in volume:
+ volume['arbiters'] = []
+ value = value[:-10]
+ volume['arbiters'].append(value)
+ elif key.lower() == 'number of bricks':
+ volume['replicas'] = value[-1:]
+ if key.lower() != 'bricks' and key.lower()[:5] == 'brick':
+ if 'bricks' not in volume:
+ volume['bricks'] = []
+ volume['bricks'].append(value)
+ # Volume options
+ if '.' in key:
+ if 'options' not in volume:
+ volume['options'] = {}
+ volume['options'][key] = value
+ if key == 'features.quota' and value == 'on':
+ volume['quota'] = True
+ else:
+ if row.lower() != 'bricks:' and row.lower() != 'options reconfigured:':
+ if len(volume) > 0:
+ volumes[volume['name']] = volume
+ volume = {}
+ return volumes
+
+
+def get_quotas(name, nofail):
+ quotas = {}
+ if nofail:
+ out = run_gluster_nofail(['volume', 'quota', name, 'list'])
+ if not out:
+ return quotas
+ else:
+ out = run_gluster(['volume', 'quota', name, 'list'])
+ for row in out.split('\n'):
+ if row[:1] == '/':
+ q = re.split(r'\s+', row)
+ quotas[q[0]] = q[1]
+ return quotas
+
+
+def wait_for_peer(host):
+ for x in range(0, 4):
+ peers = get_peers()
+ if host in peers and peers[host][1].lower().find('peer in cluster') != -1:
+ return True
+ time.sleep(1)
+ return False
+
+
+def probe(host, myhostname):
+ global module
+ out = run_gluster(['peer', 'probe', host])
+ if out.find('localhost') == -1 and not wait_for_peer(host):
+ module.fail_json(msg='failed to probe peer %s on %s' % (host, myhostname))
+
+
+def probe_all_peers(hosts, peers, myhostname):
+ for host in hosts:
+ host = host.strip() # Clean up any extra space for exact comparison
+ if host not in peers:
+ probe(host, myhostname)
+
+
+def create_volume(name, stripe, replica, arbiter, disperse, redundancy, transport, hosts, bricks, force):
+ args = ['volume', 'create']
+ args.append(name)
+ if stripe:
+ args.append('stripe')
+ args.append(str(stripe))
+ if replica:
+ args.append('replica')
+ args.append(str(replica))
+ if arbiter:
+ args.append('arbiter')
+ args.append(str(arbiter))
+ if disperse:
+ args.append('disperse')
+ args.append(str(disperse))
+ if redundancy:
+ args.append('redundancy')
+ args.append(str(redundancy))
+ args.append('transport')
+ args.append(transport)
+ for brick in bricks:
+ for host in hosts:
+ args.append(('%s:%s' % (host, brick)))
+ if force:
+ args.append('force')
+ run_gluster(args)
+
+
+def start_volume(name):
+ run_gluster(['volume', 'start', name])
+
+
+def stop_volume(name):
+ run_gluster(['volume', 'stop', name])
+
+
+def set_volume_option(name, option, parameter):
+ run_gluster(['volume', 'set', name, option, parameter])
+
+
+def add_bricks(name, new_bricks, stripe, replica, force):
+ args = ['volume', 'add-brick', name]
+ if stripe:
+ args.append('stripe')
+ args.append(str(stripe))
+ if replica:
+ args.append('replica')
+ args.append(str(replica))
+ args.extend(new_bricks)
+ if force:
+ args.append('force')
+ run_gluster(args)
+
+
+def remove_bricks(name, removed_bricks, force):
+ # max-tries=12 with default_interval=10 secs
+ max_tries = 12
+ retries = 0
+ success = False
+ args = ['volume', 'remove-brick', name]
+ args.extend(removed_bricks)
+ # create a copy of args to use for commit operation
+ args_c = args[:]
+ args.append('start')
+ run_gluster(args)
+ # remove-brick operation needs to be followed by commit operation.
+ if not force:
+ module.fail_json(msg="Force option is mandatory.")
+ else:
+ while retries < max_tries:
+ last_brick = removed_bricks[-1]
+ out = run_gluster(['volume', 'remove-brick', name, last_brick, 'status'])
+ for row in out.split('\n')[1:]:
+ if 'completed' in row:
+ # remove-brick successful, call commit operation.
+ args_c.append('commit')
+ out = run_gluster(args_c)
+ success = True
+ break
+ else:
+ time.sleep(10)
+ if success:
+ break
+ retries += 1
+ if not success:
+ # remove-brick still in process, needs to be committed after completion.
+ module.fail_json(msg="Exceeded number of tries, check remove-brick status.\n"
+ "Commit operation needs to be followed.")
+
+
+def reduce_config(name, removed_bricks, replicas, force):
+ out = run_gluster(['volume', 'heal', name, 'info'])
+ summary = out.split("\n")
+ for line in summary:
+ if 'Number' in line and int(line.split(":")[1].strip()) != 0:
+ module.fail_json(msg="Operation aborted, self-heal in progress.")
+ args = ['volume', 'remove-brick', name, 'replica', replicas]
+ args.extend(removed_bricks)
+ if force:
+ args.append('force')
+ else:
+ module.fail_json(msg="Force option is mandatory")
+ run_gluster(args)
+
+
+def do_rebalance(name):
+ run_gluster(['volume', 'rebalance', name, 'start'])
+
+
+def enable_quota(name):
+ run_gluster(['volume', 'quota', name, 'enable'])
+
+
+def set_quota(name, directory, value):
+ run_gluster(['volume', 'quota', name, 'limit-usage', directory, value])
+
+
+def main():
+ # MAIN
+
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True, aliases=['volume']),
+ state=dict(type='str', required=True, choices=['absent', 'started', 'stopped', 'present']),
+ cluster=dict(type='list'),
+ host=dict(type='str'),
+ stripes=dict(type='int'),
+ replicas=dict(type='int'),
+ arbiters=dict(type='int'),
+ disperses=dict(type='int'),
+ redundancies=dict(type='int'),
+ transport=dict(type='str', default='tcp', choices=['tcp', 'rdma', 'tcp,rdma']),
+ bricks=dict(type='str', aliases=['brick']),
+ start_on_create=dict(type='bool', default=True),
+ rebalance=dict(type='bool', default=False),
+ options=dict(type='dict', default={}),
+ quota=dict(type='str'),
+ directory=dict(type='str'),
+ force=dict(type='bool', default=False),
+ ),
+ )
+
+ global glusterbin
+ glusterbin = module.get_bin_path('gluster', True)
+
+ changed = False
+
+ action = module.params['state']
+ volume_name = module.params['name']
+ cluster = module.params['cluster']
+ brick_paths = module.params['bricks']
+ stripes = module.params['stripes']
+ replicas = module.params['replicas']
+ arbiters = module.params['arbiters']
+ disperses = module.params['disperses']
+ redundancies = module.params['redundancies']
+ transport = module.params['transport']
+ myhostname = module.params['host']
+ start_on_create = module.boolean(module.params['start_on_create'])
+ rebalance = module.boolean(module.params['rebalance'])
+ force = module.boolean(module.params['force'])
+
+ if not myhostname:
+ myhostname = socket.gethostname()
+
+ # Clean up if last element is empty. Consider that yml can look like this:
+ # cluster="{% for host in groups['glusterfs'] %}{{ hostvars[host]['private_ip'] }},{% endfor %}"
+ if cluster is not None and len(cluster) > 1 and cluster[-1] == '':
+ cluster = cluster[0:-1]
+
+ if cluster is None:
+ cluster = []
+
+ if brick_paths is not None and "," in brick_paths:
+ brick_paths = brick_paths.split(",")
+ else:
+ brick_paths = [brick_paths]
+
+ options = module.params['options']
+ quota = module.params['quota']
+ directory = module.params['directory']
+
+ # get current state info
+ peers = get_peers()
+ volumes = get_volumes()
+ quotas = {}
+ if volume_name in volumes and volumes[volume_name]['quota'] and volumes[volume_name]['status'].lower() == 'started':
+ quotas = get_quotas(volume_name, True)
+
+ # do the work!
+ if action == 'absent':
+ if volume_name in volumes:
+ if volumes[volume_name]['status'].lower() != 'stopped':
+ stop_volume(volume_name)
+ run_gluster(['volume', 'delete', volume_name])
+ changed = True
+
+ if action == 'present':
+ probe_all_peers(cluster, peers, myhostname)
+
+ # create if it doesn't exist
+ if volume_name not in volumes:
+ create_volume(volume_name, stripes, replicas, arbiters, disperses, redundancies, transport, cluster, brick_paths, force)
+ volumes = get_volumes()
+ changed = True
+
+ if volume_name in volumes:
+ if volumes[volume_name]['status'].lower() != 'started' and start_on_create:
+ start_volume(volume_name)
+ changed = True
+
+ # switch bricks
+ new_bricks = []
+ removed_bricks = []
+ all_bricks = []
+ bricks_in_volume = volumes[volume_name]['bricks']
+
+ for node in cluster:
+ for brick_path in brick_paths:
+ brick = '%s:%s' % (node, brick_path)
+ all_bricks.append(brick)
+ if brick not in bricks_in_volume:
+ new_bricks.append(brick)
+
+ if not new_bricks and len(all_bricks) > 0 and \
+ len(all_bricks) < len(bricks_in_volume):
+ for brick in bricks_in_volume:
+ if brick not in all_bricks:
+ removed_bricks.append(brick)
+
+ if new_bricks:
+ add_bricks(volume_name, new_bricks, stripes, replicas, force)
+ changed = True
+
+ if removed_bricks:
+ if replicas and int(replicas) < int(volumes[volume_name]['replicas']):
+ reduce_config(volume_name, removed_bricks, str(replicas), force)
+ else:
+ remove_bricks(volume_name, removed_bricks, force)
+ changed = True
+
+ # handle quotas
+ if quota:
+ if not volumes[volume_name]['quota']:
+ enable_quota(volume_name)
+ quotas = get_quotas(volume_name, False)
+ if directory not in quotas or quotas[directory] != quota:
+ set_quota(volume_name, directory, quota)
+ changed = True
+
+ # set options
+ for option in options.keys():
+ if option not in volumes[volume_name]['options'] or volumes[volume_name]['options'][option] != options[option]:
+ set_volume_option(volume_name, option, options[option])
+ changed = True
+
+ else:
+ module.fail_json(msg='failed to create volume %s' % volume_name)
+
+ if action != 'absent' and volume_name not in volumes:
+ module.fail_json(msg='volume not found %s' % volume_name)
+
+ if action == 'started':
+ if volumes[volume_name]['status'].lower() != 'started':
+ start_volume(volume_name)
+ changed = True
+
+ if action == 'stopped':
+ if volumes[volume_name]['status'].lower() != 'stopped':
+ stop_volume(volume_name)
+ changed = True
+
+ if changed:
+ volumes = get_volumes()
+ if rebalance:
+ do_rebalance(volume_name)
+
+ facts = {}
+ facts['glusterfs'] = {'peers': peers, 'volumes': volumes, 'quotas': quotas}
+
+ module.exit_json(changed=changed, ansible_facts=facts)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/grove.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/grove.py
new file mode 100644
index 00000000..c1816e63
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/grove.py
@@ -0,0 +1,116 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: grove
+short_description: Sends a notification to a grove.io channel
+description:
+ - The C(grove) module sends a message for a service to a Grove.io
+ channel.
+options:
+ channel_token:
+ type: str
+ description:
+ - Token of the channel to post to.
+ required: true
+ service:
+ type: str
+ description:
+ - Name of the service (displayed as the "user" in the message)
+ required: false
+ default: ansible
+ message:
+ type: str
+ description:
+ - Message content
+ required: true
+ url:
+ type: str
+ description:
+ - Service URL for the web client
+ required: false
+ icon_url:
+ type: str
+ description:
+ - Icon for the service
+ required: false
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ default: 'yes'
+ type: bool
+author: "Jonas Pfenniger (@zimbatm)"
+'''
+
+EXAMPLES = '''
+- name: Sends a notification to a grove.io channel
+ community.general.grove:
+ channel_token: 6Ph62VBBJOccmtTPZbubiPzdrhipZXtg
+ service: my-app
+ message: 'deployed {{ target }}'
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url
+
+
+BASE_URL = 'https://grove.io/api/notice/%s/'
+
+# ==============================================================
+# do_notify_grove
+
+
+def do_notify_grove(module, channel_token, service, message, url=None, icon_url=None):
+ my_url = BASE_URL % (channel_token,)
+
+ my_data = dict(service=service, message=message)
+ if url is not None:
+ my_data['url'] = url
+ if icon_url is not None:
+ my_data['icon_url'] = icon_url
+
+ data = urlencode(my_data)
+ response, info = fetch_url(module, my_url, data=data)
+ if info['status'] != 200:
+ module.fail_json(msg="failed to send notification: %s" % info['msg'])
+
+# ==============================================================
+# main
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ channel_token=dict(type='str', required=True, no_log=True),
+ message=dict(type='str', required=True),
+ service=dict(type='str', default='ansible'),
+ url=dict(type='str', default=None),
+ icon_url=dict(type='str', default=None),
+ validate_certs=dict(default=True, type='bool'),
+ )
+ )
+
+ channel_token = module.params['channel_token']
+ service = module.params['service']
+ message = module.params['message']
+ url = module.params['url']
+ icon_url = module.params['icon_url']
+
+ do_notify_grove(module, channel_token, service, message, url, icon_url)
+
+ # Mission complete
+ module.exit_json(msg="OK")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/gunicorn.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gunicorn.py
new file mode 100644
index 00000000..57030556
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/gunicorn.py
@@ -0,0 +1,230 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Alejandro Gomez <alexgomez2202@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gunicorn
+short_description: Run gunicorn with various settings.
+description:
+ - Starts gunicorn with the parameters specified. Common settings for gunicorn
+ configuration are supported. For additional configuration use a config file
+ See U(https://gunicorn-docs.readthedocs.io/en/latest/settings.html) for more
+ options. It's recommended to always use the chdir option to avoid problems
+ with the location of the app.
+requirements: [gunicorn]
+author:
+ - "Alejandro Gomez (@agmezr)"
+options:
+ app:
+ type: str
+ required: true
+ aliases: ['name']
+ description:
+ - The app module. A name refers to a WSGI callable that should be found in the specified module.
+ venv:
+ type: path
+ aliases: ['virtualenv']
+ description:
+ - 'Path to the virtualenv directory.'
+ config:
+ type: path
+ description:
+ - 'Path to the gunicorn configuration file.'
+ aliases: ['conf']
+ chdir:
+ type: path
+ description:
+ - 'Chdir to specified directory before apps loading.'
+ pid:
+ type: path
+ description:
+ - 'A filename to use for the PID file. If not set and not found on the configuration file a tmp
+ pid file will be created to check a successful run of gunicorn.'
+ worker:
+ type: str
+ choices: ['sync', 'eventlet', 'gevent', 'tornado ', 'gthread', 'gaiohttp']
+ description:
+ - 'The type of workers to use. The default class (sync) should handle most "normal" types of workloads.'
+ user:
+ type: str
+ description:
+ - 'Switch worker processes to run as this user.'
+notes:
+ - If not specified on config file, a temporary error log will be created on /tmp dir.
+ Please make sure you have write access in /tmp dir. Not needed but will help you to
+ identify any problem with configuration.
+'''
+
+EXAMPLES = '''
+- name: Simple gunicorn run example
+ community.general.gunicorn:
+ app: 'wsgi'
+ chdir: '/workspace/example'
+
+- name: Run gunicorn on a virtualenv
+ community.general.gunicorn:
+ app: 'wsgi'
+ chdir: '/workspace/example'
+ venv: '/workspace/example/venv'
+
+- name: Run gunicorn with a config file
+ community.general.gunicorn:
+ app: 'wsgi'
+ chdir: '/workspace/example'
+ conf: '/workspace/example/gunicorn.cfg'
+
+- name: Run gunicorn as ansible user with specified pid and config file
+ community.general.gunicorn:
+ app: 'wsgi'
+ chdir: '/workspace/example'
+ conf: '/workspace/example/gunicorn.cfg'
+ venv: '/workspace/example/venv'
+ pid: '/workspace/example/gunicorn.pid'
+ user: 'ansible'
+'''
+
+RETURN = '''
+gunicorn:
+ description: process id of gunicorn
+ returned: changed
+ type: str
+ sample: "1234"
+'''
+
+import os
+import time
+
+# import ansible utils
+from ansible.module_utils.basic import AnsibleModule
+
+
+def search_existing_config(config, option):
+ ''' search in config file for specified option '''
+ if config and os.path.isfile(config):
+ data_config = None
+ with open(config, 'r') as f:
+ for line in f:
+ if option in line:
+ return line
+ return None
+
+
+def remove_tmp_file(file_path):
+ ''' remove temporary files '''
+ if os.path.isfile(file_path):
+ os.remove(file_path)
+
+
+def main():
+
+ # available gunicorn options on module
+ gunicorn_options = {
+ 'config': '-c',
+ 'chdir': '--chdir',
+ 'worker': '-k',
+ 'user': '-u',
+ }
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ app=dict(required=True, type='str', aliases=['name']),
+ venv=dict(required=False, type='path', default=None, aliases=['virtualenv']),
+ config=dict(required=False, default=None, type='path', aliases=['conf']),
+ chdir=dict(required=False, type='path', default=None),
+ pid=dict(required=False, type='path', default=None),
+ user=dict(required=False, type='str'),
+ worker=dict(required=False,
+ type='str',
+ choices=['sync', 'eventlet', 'gevent', 'tornado ', 'gthread', 'gaiohttp']
+ ),
+ )
+ )
+
+ # temporary files in case no option provided
+ tmp_error_log = os.path.join(module.tmpdir, 'gunicorn.temp.error.log')
+ tmp_pid_file = os.path.join(module.tmpdir, 'gunicorn.temp.pid')
+
+ # remove temp file if exists
+ remove_tmp_file(tmp_pid_file)
+ remove_tmp_file(tmp_error_log)
+
+ # obtain app name and venv
+ params = module.params
+ app = params['app']
+ venv = params['venv']
+ pid = params['pid']
+
+ # use venv path if exists
+ if venv:
+ gunicorn_command = "/".join((venv, 'bin', 'gunicorn'))
+ else:
+ gunicorn_command = 'gunicorn'
+
+ # to daemonize the process
+ options = ["-D"]
+
+ # fill options
+ for option in gunicorn_options:
+ param = params[option]
+ if param:
+ options.append(gunicorn_options[option])
+ options.append(param)
+
+ error_log = search_existing_config(params['config'], 'errorlog')
+ if not error_log:
+ # place error log somewhere in case of fail
+ options.append("--error-logfile")
+ options.append(tmp_error_log)
+
+ pid_file = search_existing_config(params['config'], 'pid')
+ if not params['pid'] and not pid_file:
+ pid = tmp_pid_file
+
+ # add option for pid file if not found on config file
+ if not pid_file:
+ options.append('--pid')
+ options.append(pid)
+
+ # put args together
+ args = [gunicorn_command] + options + [app]
+ rc, out, err = module.run_command(args, use_unsafe_shell=False, encoding=None)
+
+ if not err:
+ # wait for gunicorn to dump to log
+ time.sleep(0.5)
+ if os.path.isfile(pid):
+ with open(pid, 'r') as f:
+ result = f.readline().strip()
+
+ if not params['pid']:
+ os.remove(pid)
+
+ module.exit_json(changed=True, pid=result, debug=" ".join(args))
+ else:
+ # if user defined own error log, check that
+ if error_log:
+ error = 'Please check your {0}'.format(error_log.strip())
+ else:
+ if os.path.isfile(tmp_error_log):
+ with open(tmp_error_log, 'r') as f:
+ error = f.read()
+ # delete tmp log
+ os.remove(tmp_error_log)
+ else:
+ error = "Log not found"
+
+ module.fail_json(msg='Failed to start gunicorn. {0}'.format(error), error=err)
+
+ else:
+ module.fail_json(msg='Failed to start gunicorn {0}'.format(err), error=err)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/haproxy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/haproxy.py
new file mode 100644
index 00000000..848cc1fa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/haproxy.py
@@ -0,0 +1,479 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Ravi Bhure <ravibhure@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: haproxy
+short_description: Enable, disable, and set weights for HAProxy backend servers using socket commands
+author:
+- Ravi Bhure (@ravibhure)
+description:
+ - Enable, disable, drain and set weights for HAProxy backend servers using socket commands.
+notes:
+ - Enable, disable and drain commands are restricted and can only be issued on
+ sockets configured for level 'admin'. For example, you can add the line
+ 'stats socket /var/run/haproxy.sock level admin' to the general section of
+ haproxy.cfg. See U(http://haproxy.1wt.eu/download/1.5/doc/configuration.txt).
+ - Depends on netcat (nc) being available; you need to install the appropriate
+ package for your operating system before this module can be used.
+options:
+ backend:
+ description:
+ - Name of the HAProxy backend pool.
+ - If this parameter is unset, it will be auto-detected.
+ type: str
+ drain:
+ description:
+ - Wait until the server has no active connections or until the timeout
+ determined by wait_interval and wait_retries is reached.
+ - Continue only after the status changes to 'MAINT'.
+ - This overrides the shutdown_sessions option.
+ type: bool
+ default: false
+ host:
+ description:
+ - Name of the backend host to change.
+ type: str
+ required: true
+ shutdown_sessions:
+ description:
+ - When disabling a server, immediately terminate all the sessions attached
+ to the specified server.
+ - This can be used to terminate long-running sessions after a server is put
+ into maintenance mode. Overridden by the drain option.
+ type: bool
+ default: no
+ socket:
+ description:
+ - Path to the HAProxy socket file.
+ type: path
+ default: /var/run/haproxy.sock
+ state:
+ description:
+ - Desired state of the provided backend host.
+ - Note that C(drain) state was added in version 2.4.
+ - It is supported only by HAProxy version 1.5 or later,
+ - When used on versions < 1.5, it will be ignored.
+ type: str
+ required: true
+ choices: [ disabled, drain, enabled ]
+ agent:
+ description:
+ - Disable/enable agent checks (depending on I(state) value).
+ type: bool
+ default: no
+ version_added: 1.0.0
+ health:
+ description:
+ - Disable/enable health checks (depending on I(state) value).
+ type: bool
+ default: no
+ version_added: "1.0.0"
+ fail_on_not_found:
+ description:
+ - Fail whenever trying to enable/disable a backend host that does not exist
+ type: bool
+ default: no
+ wait:
+ description:
+ - Wait until the server reports a status of 'UP' when C(state=enabled),
+ status of 'MAINT' when C(state=disabled) or status of 'DRAIN' when C(state=drain)
+ type: bool
+ default: no
+ wait_interval:
+ description:
+ - Number of seconds to wait between retries.
+ type: int
+ default: 5
+ wait_retries:
+ description:
+ - Number of times to check for status after changing the state.
+ type: int
+ default: 25
+ weight:
+ description:
+ - The value passed in argument.
+ - If the value ends with the `%` sign, then the new weight will be
+ relative to the initially configured weight.
+ - Relative weights are only permitted between 0 and 100% and absolute
+ weights are permitted between 0 and 256.
+ type: str
+'''
+
+EXAMPLES = r'''
+- name: Disable server in 'www' backend pool
+ community.general.haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
+ backend: www
+
+- name: Disable server in 'www' backend pool, also stop health/agent checks
+ community.general.haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
+ health: yes
+ agent: yes
+
+- name: Disable server without backend pool name (apply to all available backend pool)
+ community.general.haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
+
+- name: Disable server, provide socket file
+ community.general.haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
+ socket: /var/run/haproxy.sock
+ backend: www
+
+- name: Disable server, provide socket file, wait until status reports in maintenance
+ community.general.haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
+ socket: /var/run/haproxy.sock
+ backend: www
+ wait: yes
+
+# Place server in drain mode, providing a socket file. Then check the server's
+# status every minute to see if it changes to maintenance mode, continuing if it
+# does in an hour and failing otherwise.
+- community.general.haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
+ socket: /var/run/haproxy.sock
+ backend: www
+ wait: yes
+ drain: yes
+ wait_interval: 1
+ wait_retries: 60
+
+- name: Disable backend server in 'www' backend pool and drop open sessions to it
+ community.general.haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
+ backend: www
+ socket: /var/run/haproxy.sock
+ shutdown_sessions: yes
+
+- name: Disable server without backend pool name (apply to all available backend pool) but fail when the backend host is not found
+ community.general.haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
+ fail_on_not_found: yes
+
+- name: Enable server in 'www' backend pool
+ community.general.haproxy:
+ state: enabled
+ host: '{{ inventory_hostname }}'
+ backend: www
+
+- name: Enable server in 'www' backend pool wait until healthy
+ community.general.haproxy:
+ state: enabled
+ host: '{{ inventory_hostname }}'
+ backend: www
+ wait: yes
+
+- name: Enable server in 'www' backend pool wait until healthy. Retry 10 times with intervals of 5 seconds to retrieve the health
+ community.general.haproxy:
+ state: enabled
+ host: '{{ inventory_hostname }}'
+ backend: www
+ wait: yes
+ wait_retries: 10
+ wait_interval: 5
+
+- name: Enable server in 'www' backend pool with change server(s) weight
+ community.general.haproxy:
+ state: enabled
+ host: '{{ inventory_hostname }}'
+ socket: /var/run/haproxy.sock
+ weight: 10
+ backend: www
+
+- name: Set the server in 'www' backend pool to drain mode
+ community.general.haproxy:
+ state: drain
+ host: '{{ inventory_hostname }}'
+ socket: /var/run/haproxy.sock
+ backend: www
+'''
+
+import csv
+import socket
+import time
+from string import Template
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes, to_text
+
+
+DEFAULT_SOCKET_LOCATION = "/var/run/haproxy.sock"
+RECV_SIZE = 1024
+ACTION_CHOICES = ['enabled', 'disabled', 'drain']
+WAIT_RETRIES = 25
+WAIT_INTERVAL = 5
+
+
+######################################################################
+class TimeoutException(Exception):
+ pass
+
+
+class HAProxy(object):
+ """
+ Used for communicating with HAProxy through its local UNIX socket interface.
+ Perform common tasks in Haproxy related to enable server and
+ disable server.
+
+ The complete set of external commands Haproxy handles is documented
+ on their website:
+
+ http://haproxy.1wt.eu/download/1.5/doc/configuration.txt#Unix Socket commands
+ """
+
+ def __init__(self, module):
+ self.module = module
+
+ self.state = self.module.params['state']
+ self.host = self.module.params['host']
+ self.backend = self.module.params['backend']
+ self.weight = self.module.params['weight']
+ self.socket = self.module.params['socket']
+ self.shutdown_sessions = self.module.params['shutdown_sessions']
+ self.fail_on_not_found = self.module.params['fail_on_not_found']
+ self.agent = self.module.params['agent']
+ self.health = self.module.params['health']
+ self.wait = self.module.params['wait']
+ self.wait_retries = self.module.params['wait_retries']
+ self.wait_interval = self.module.params['wait_interval']
+ self._drain = self.module.params['drain']
+ self.command_results = {}
+
+ def execute(self, cmd, timeout=200, capture_output=True):
+ """
+ Executes a HAProxy command by sending a message to a HAProxy's local
+ UNIX socket and waiting up to 'timeout' milliseconds for the response.
+ """
+ self.client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ self.client.connect(self.socket)
+ self.client.sendall(to_bytes('%s\n' % cmd))
+
+ result = b''
+ buf = b''
+ buf = self.client.recv(RECV_SIZE)
+ while buf:
+ result += buf
+ buf = self.client.recv(RECV_SIZE)
+ result = to_text(result, errors='surrogate_or_strict')
+
+ if capture_output:
+ self.capture_command_output(cmd, result.strip())
+ self.client.close()
+ return result
+
+ def capture_command_output(self, cmd, output):
+ """
+ Capture the output for a command
+ """
+ if 'command' not in self.command_results:
+ self.command_results['command'] = []
+ self.command_results['command'].append(cmd)
+ if 'output' not in self.command_results:
+ self.command_results['output'] = []
+ self.command_results['output'].append(output)
+
+ def discover_all_backends(self):
+ """
+ Discover all entries with svname = 'BACKEND' and return a list of their corresponding
+ pxnames
+ """
+ data = self.execute('show stat', 200, False).lstrip('# ')
+ r = csv.DictReader(data.splitlines())
+ return tuple(map(lambda d: d['pxname'], filter(lambda d: d['svname'] == 'BACKEND', r)))
+
+ def discover_version(self):
+ """
+ Attempt to extract the haproxy version.
+ Return a tuple containing major and minor version.
+ """
+ data = self.execute('show info', 200, False)
+ lines = data.splitlines()
+ line = [x for x in lines if 'Version:' in x]
+ try:
+ version_values = line[0].partition(':')[2].strip().split('.', 3)
+ version = (int(version_values[0]), int(version_values[1]))
+ except (ValueError, TypeError, IndexError):
+ version = None
+
+ return version
+
+ def execute_for_backends(self, cmd, pxname, svname, wait_for_status=None):
+ """
+ Run some command on the specified backends. If no backends are provided they will
+ be discovered automatically (all backends)
+ """
+ # Discover backends if none are given
+ if pxname is None:
+ backends = self.discover_all_backends()
+ else:
+ backends = [pxname]
+
+ # Run the command for each requested backend
+ for backend in backends:
+ # Fail when backends were not found
+ state = self.get_state_for(backend, svname)
+ if (self.fail_on_not_found) and state is None:
+ self.module.fail_json(
+ msg="The specified backend '%s/%s' was not found!" % (backend, svname))
+
+ if state is not None:
+ self.execute(Template(cmd).substitute(pxname=backend, svname=svname))
+ if self.wait:
+ self.wait_until_status(backend, svname, wait_for_status)
+
+ def get_state_for(self, pxname, svname):
+ """
+ Find the state of specific services. When pxname is not set, get all backends for a specific host.
+ Returns a list of dictionaries containing the status and weight for those services.
+ """
+ data = self.execute('show stat', 200, False).lstrip('# ')
+ r = csv.DictReader(data.splitlines())
+ state = tuple(
+ map(
+ lambda d: {'status': d['status'], 'weight': d['weight'], 'scur': d['scur']},
+ filter(lambda d: (pxname is None or d['pxname']
+ == pxname) and d['svname'] == svname, r)
+ )
+ )
+ return state or None
+
+ def wait_until_status(self, pxname, svname, status):
+ """
+ Wait for a service to reach the specified status. Try RETRIES times
+ with INTERVAL seconds of sleep in between. If the service has not reached
+ the expected status in that time, the module will fail. If the service was
+ not found, the module will fail.
+ """
+ for i in range(1, self.wait_retries):
+ state = self.get_state_for(pxname, svname)
+
+ # We can assume there will only be 1 element in state because both svname and pxname are always set when we get here
+ # When using track we get a status like this: MAINT (via pxname/svname) so we need to do substring matching
+ if status in state[0]['status']:
+ if not self._drain or (state[0]['scur'] == '0' and 'MAINT' in state):
+ return True
+ else:
+ time.sleep(self.wait_interval)
+
+ self.module.fail_json(msg="server %s/%s not status '%s' after %d retries. Aborting." %
+ (pxname, svname, status, self.wait_retries))
+
+ def enabled(self, host, backend, weight):
+ """
+ Enabled action, marks server to UP and checks are re-enabled,
+ also supports to get current weight for server (default) and
+ set the weight for haproxy backend server when provides.
+ """
+ cmd = "get weight $pxname/$svname; enable server $pxname/$svname"
+ if self.agent:
+ cmd += "; enable agent $pxname/$svname"
+ if self.health:
+ cmd += "; enable health $pxname/$svname"
+ if weight:
+ cmd += "; set weight $pxname/$svname %s" % weight
+ self.execute_for_backends(cmd, backend, host, 'UP')
+
+ def disabled(self, host, backend, shutdown_sessions):
+ """
+ Disabled action, marks server to DOWN for maintenance. In this mode, no more checks will be
+ performed on the server until it leaves maintenance,
+ also it shutdown sessions while disabling backend host server.
+ """
+ cmd = "get weight $pxname/$svname"
+ if self.agent:
+ cmd += "; disable agent $pxname/$svname"
+ if self.health:
+ cmd += "; disable health $pxname/$svname"
+ cmd += "; disable server $pxname/$svname"
+ if shutdown_sessions:
+ cmd += "; shutdown sessions server $pxname/$svname"
+ self.execute_for_backends(cmd, backend, host, 'MAINT')
+
+ def drain(self, host, backend, status='DRAIN'):
+ """
+ Drain action, sets the server to DRAIN mode.
+ In this mode mode, the server will not accept any new connections
+ other than those that are accepted via persistence.
+ """
+ haproxy_version = self.discover_version()
+
+ # check if haproxy version suppots DRAIN state (starting with 1.5)
+ if haproxy_version and (1, 5) <= haproxy_version:
+ cmd = "set server $pxname/$svname state drain"
+ self.execute_for_backends(cmd, backend, host, status)
+
+ def act(self):
+ """
+ Figure out what you want to do from ansible, and then do it.
+ """
+ # Get the state before the run
+ self.command_results['state_before'] = self.get_state_for(self.backend, self.host)
+
+ # toggle enable/disbale server
+ if self.state == 'enabled':
+ self.enabled(self.host, self.backend, self.weight)
+ elif self.state == 'disabled' and self._drain:
+ self.drain(self.host, self.backend, status='MAINT')
+ elif self.state == 'disabled':
+ self.disabled(self.host, self.backend, self.shutdown_sessions)
+ elif self.state == 'drain':
+ self.drain(self.host, self.backend)
+ else:
+ self.module.fail_json(msg="unknown state specified: '%s'" % self.state)
+
+ # Get the state after the run
+ self.command_results['state_after'] = self.get_state_for(self.backend, self.host)
+
+ # Report change status
+ self.command_results['changed'] = (self.command_results['state_before'] != self.command_results['state_after'])
+
+ self.module.exit_json(**self.command_results)
+
+
+def main():
+
+ # load ansible module object
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', required=True, choices=ACTION_CHOICES),
+ host=dict(type='str', required=True),
+ backend=dict(type='str'),
+ weight=dict(type='str'),
+ socket=dict(type='path', default=DEFAULT_SOCKET_LOCATION),
+ shutdown_sessions=dict(type='bool', default=False),
+ fail_on_not_found=dict(type='bool', default=False),
+ health=dict(type='bool', default=False),
+ agent=dict(type='bool', default=False),
+ wait=dict(type='bool', default=False),
+ wait_retries=dict(type='int', default=WAIT_RETRIES),
+ wait_interval=dict(type='int', default=WAIT_INTERVAL),
+ drain=dict(type='bool', default=False),
+ ),
+ )
+
+ if not socket:
+ module.fail_json(msg="unable to locate haproxy socket")
+
+ ansible_haproxy = HAProxy(module)
+ ansible_haproxy.act()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/helm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/helm.py
new file mode 100644
index 00000000..dd592d6e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/helm.py
@@ -0,0 +1,216 @@
+#!/usr/bin/python
+# (c) 2016, Flavio Percoco <flavio@redhat.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.14
+ why: For more details https://github.com/ansible/ansible/issues/61546.
+ alternative: Use M(community.kubernetes.helm) instead.
+module: helm
+short_description: Manages Kubernetes packages with the Helm package manager
+author: "Flavio Percoco (@flaper87)"
+description:
+ - Install, upgrade, delete and list packages with the Helm package manager.
+requirements:
+ - "pyhelm"
+ - "grpcio"
+options:
+ host:
+ description:
+ - Tiller's server host.
+ type: str
+ default: "localhost"
+ port:
+ description:
+ - Tiller's server port.
+ type: int
+ default: 44134
+ namespace:
+ description:
+ - Kubernetes namespace where the chart should be installed.
+ type: str
+ default: "default"
+ name:
+ description:
+ - Release name to manage.
+ type: str
+ state:
+ description:
+ - Whether to install C(present), remove C(absent), or purge C(purged) a package.
+ choices: ['absent', 'purged', 'present']
+ type: str
+ default: "present"
+ chart:
+ description:
+ - A map describing the chart to install. See examples for available options.
+ type: dict
+ default: {}
+ values:
+ description:
+ - A map of value options for the chart.
+ type: dict
+ default: {}
+ disable_hooks:
+ description:
+ - Whether to disable hooks during the uninstall process.
+ type: bool
+ default: 'no'
+'''
+
+RETURN = ''' # '''
+
+EXAMPLES = '''
+- name: Install helm chart
+ community.general.helm:
+ host: localhost
+ chart:
+ name: memcached
+ version: 0.4.0
+ source:
+ type: repo
+ location: https://kubernetes-charts.storage.googleapis.com
+ state: present
+ name: my-memcached
+ namespace: default
+
+- name: Uninstall helm chart
+ community.general.helm:
+ host: localhost
+ state: absent
+ name: my-memcached
+
+- name: Install helm chart from a git repo
+ community.general.helm:
+ host: localhost
+ chart:
+ source:
+ type: git
+ location: https://github.com/user/helm-chart.git
+ state: present
+ name: my-example
+ namespace: default
+ values:
+ foo: "bar"
+
+- name: Install helm chart from a git repo specifying path
+ community.general.helm:
+ host: localhost
+ chart:
+ source:
+ type: git
+ location: https://github.com/helm/charts.git
+ path: stable/memcached
+ state: present
+ name: my-memcached
+ namespace: default
+ values: "{{ lookup('file', '/path/to/file/values.yaml') | from_yaml }}"
+'''
+
+import traceback
+HELM_IMPORT_ERR = None
+try:
+ import grpc
+ from pyhelm import tiller
+ from pyhelm import chartbuilder
+except ImportError:
+ HELM_IMPORT_ERR = traceback.format_exc()
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+def install(module, tserver):
+ changed = False
+ params = module.params
+ name = params['name']
+ values = params['values']
+ chart = module.params['chart']
+ namespace = module.params['namespace']
+
+ chartb = chartbuilder.ChartBuilder(chart)
+ r_matches = (x for x in tserver.list_releases()
+ if x.name == name and x.namespace == namespace)
+ installed_release = next(r_matches, None)
+ if installed_release:
+ if installed_release.chart.metadata.version != chart['version']:
+ tserver.update_release(chartb.get_helm_chart(), False,
+ namespace, name=name, values=values)
+ changed = True
+ else:
+ tserver.install_release(chartb.get_helm_chart(), namespace,
+ dry_run=False, name=name,
+ values=values)
+ changed = True
+
+ return dict(changed=changed)
+
+
+def delete(module, tserver, purge=False):
+ changed = False
+ params = module.params
+
+ if not module.params['name']:
+ module.fail_json(msg='Missing required field name')
+
+ name = module.params['name']
+ disable_hooks = params['disable_hooks']
+
+ try:
+ tserver.uninstall_release(name, disable_hooks, purge)
+ changed = True
+ except grpc._channel._Rendezvous as exc:
+ if 'not found' not in str(exc):
+ raise exc
+
+ return dict(changed=changed)
+
+
+def main():
+ """The main function."""
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(type='str', default='localhost'),
+ port=dict(type='int', default=44134),
+ name=dict(type='str', default=''),
+ chart=dict(type='dict'),
+ state=dict(
+ choices=['absent', 'purged', 'present'],
+ default='present'
+ ),
+ # Install options
+ values=dict(type='dict'),
+ namespace=dict(type='str', default='default'),
+
+ # Uninstall options
+ disable_hooks=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True)
+
+ if HELM_IMPORT_ERR:
+ module.fail_json(msg=missing_required_lib('pyhelm'), exception=HELM_IMPORT_ERR)
+
+ host = module.params['host']
+ port = module.params['port']
+ state = module.params['state']
+ tserver = tiller.Tiller(host, port)
+
+ if state == 'present':
+ rst = install(module, tserver)
+
+ if state in 'absent':
+ rst = delete(module, tserver)
+
+ if state in 'purged':
+ rst = delete(module, tserver, True)
+
+ module.exit_json(**rst)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/heroku_collaborator.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/heroku_collaborator.py
new file mode 100644
index 00000000..276b5b12
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/heroku_collaborator.py
@@ -0,0 +1,128 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: heroku_collaborator
+short_description: "Add or delete app collaborators on Heroku"
+description:
+ - Manages collaborators for Heroku apps.
+ - If set to C(present) and heroku user is already collaborator, then do nothing.
+ - If set to C(present) and heroku user is not collaborator, then add user to app.
+ - If set to C(absent) and heroku user is collaborator, then delete user from app.
+author:
+ - Marcel Arns (@marns93)
+requirements:
+ - heroku3
+options:
+ api_key:
+ type: str
+ description:
+ - Heroku API key
+ apps:
+ type: list
+ description:
+ - List of Heroku App names
+ required: true
+ suppress_invitation:
+ description:
+ - Suppress email invitation when creating collaborator
+ type: bool
+ default: "no"
+ user:
+ type: str
+ description:
+ - User ID or e-mail
+ required: true
+ state:
+ type: str
+ description:
+ - Create or remove the heroku collaborator
+ choices: ["present", "absent"]
+ default: "present"
+notes:
+ - C(HEROKU_API_KEY) and C(TF_VAR_HEROKU_API_KEY) env variable can be used instead setting C(api_key).
+ - If you use I(--check), you can also pass the I(-v) flag to see affected apps in C(msg), e.g. ["heroku-example-app"].
+'''
+
+EXAMPLES = '''
+- name: Create a heroku collaborator
+ community.general.heroku_collaborator:
+ api_key: YOUR_API_KEY
+ user: max.mustermann@example.com
+ apps: heroku-example-app
+ state: present
+
+- name: An example of using the module in loop
+ community.general.heroku_collaborator:
+ api_key: YOUR_API_KEY
+ user: '{{ item.user }}'
+ apps: '{{ item.apps | default(apps) }}'
+ suppress_invitation: '{{ item.suppress_invitation | default(suppress_invitation) }}'
+ state: '{{ item.state | default("present") }}'
+ with_items:
+ - { user: 'a.b@example.com' }
+ - { state: 'absent', user: 'b.c@example.com', suppress_invitation: false }
+ - { user: 'x.y@example.com', apps: ["heroku-example-app"] }
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.heroku import HerokuHelper
+
+
+def add_or_delete_heroku_collaborator(module, client):
+ user = module.params['user']
+ state = module.params['state']
+ affected_apps = []
+ result_state = False
+
+ for app in module.params['apps']:
+ if app not in client.apps():
+ module.fail_json(msg='App {0} does not exist'.format(app))
+
+ heroku_app = client.apps()[app]
+
+ heroku_collaborator_list = [collaborator.user.email for collaborator in heroku_app.collaborators()]
+
+ if state == 'absent' and user in heroku_collaborator_list:
+ if not module.check_mode:
+ heroku_app.remove_collaborator(user)
+ affected_apps += [app]
+ result_state = True
+ elif state == 'present' and user not in heroku_collaborator_list:
+ if not module.check_mode:
+ heroku_app.add_collaborator(user_id_or_email=user, silent=module.params['suppress_invitation'])
+ affected_apps += [app]
+ result_state = True
+
+ return result_state, affected_apps
+
+
+def main():
+ argument_spec = HerokuHelper.heroku_argument_spec()
+ argument_spec.update(
+ user=dict(required=True, type='str'),
+ apps=dict(required=True, type='list'),
+ suppress_invitation=dict(default=False, type='bool'),
+ state=dict(default='present', type='str', choices=['present', 'absent']),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ client = HerokuHelper(module).get_heroku_client()
+
+ has_changed, msg = add_or_delete_heroku_collaborator(module, client)
+ module.exit_json(changed=has_changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/hetzner_failover_ip.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hetzner_failover_ip.py
new file mode 100644
index 00000000..a57e0ab8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hetzner_failover_ip.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019 Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: hetzner_failover_ip
+short_description: Manage Hetzner's failover IPs
+author:
+ - Felix Fontein (@felixfontein)
+description:
+ - Manage Hetzner's failover IPs.
+seealso:
+ - name: Failover IP documentation
+ description: Hetzner's documentation on failover IPs.
+ link: https://wiki.hetzner.de/index.php/Failover/en
+ - module: community.general.hetzner_failover_ip_info
+ description: Retrieve information on failover IPs.
+extends_documentation_fragment:
+- community.general.hetzner
+
+options:
+ failover_ip:
+ description: The failover IP address.
+ type: str
+ required: yes
+ state:
+ description:
+ - Defines whether the IP will be routed or not.
+ - If set to C(routed), I(value) must be specified.
+ type: str
+ choices:
+ - routed
+ - unrouted
+ default: routed
+ value:
+ description:
+ - The new value for the failover IP address.
+ - Required when setting I(state) to C(routed).
+ type: str
+ timeout:
+ description:
+ - Timeout to use when routing or unrouting the failover IP.
+ - Note that the API call returns when the failover IP has been
+ successfully routed to the new address, respectively successfully
+ unrouted.
+ type: int
+ default: 180
+'''
+
+EXAMPLES = r'''
+- name: Set value of failover IP 1.2.3.4 to 5.6.7.8
+ community.general.hetzner_failover_ip:
+ hetzner_user: foo
+ hetzner_password: bar
+ failover_ip: 1.2.3.4
+ value: 5.6.7.8
+
+- name: Set value of failover IP 1.2.3.4 to unrouted
+ community.general.hetzner_failover_ip:
+ hetzner_user: foo
+ hetzner_password: bar
+ failover_ip: 1.2.3.4
+ state: unrouted
+'''
+
+RETURN = r'''
+value:
+ description:
+ - The value of the failover IP.
+ - Will be C(none) if the IP is unrouted.
+ returned: success
+ type: str
+state:
+ description:
+ - Will be C(routed) or C(unrouted).
+ returned: success
+ type: str
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.hetzner import (
+ HETZNER_DEFAULT_ARGUMENT_SPEC,
+ get_failover,
+ set_failover,
+ get_failover_state,
+)
+
+
+def main():
+ argument_spec = dict(
+ failover_ip=dict(type='str', required=True),
+ state=dict(type='str', default='routed', choices=['routed', 'unrouted']),
+ value=dict(type='str'),
+ timeout=dict(type='int', default=180),
+ )
+ argument_spec.update(HETZNER_DEFAULT_ARGUMENT_SPEC)
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=(
+ ('state', 'routed', ['value']),
+ ),
+ )
+
+ failover_ip = module.params['failover_ip']
+ value = get_failover(module, failover_ip)
+ changed = False
+ before = get_failover_state(value)
+
+ if module.params['state'] == 'routed':
+ new_value = module.params['value']
+ else:
+ new_value = None
+
+ if value != new_value:
+ if module.check_mode:
+ value = new_value
+ changed = True
+ else:
+ value, changed = set_failover(module, failover_ip, new_value, timeout=module.params['timeout'])
+
+ after = get_failover_state(value)
+ module.exit_json(
+ changed=changed,
+ diff=dict(
+ before=before,
+ after=after,
+ ),
+ **after
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/hetzner_failover_ip_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hetzner_failover_ip_info.py
new file mode 100644
index 00000000..4d6f9f37
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hetzner_failover_ip_info.py
@@ -0,0 +1,117 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019 Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: hetzner_failover_ip_info
+short_description: Retrieve information on Hetzner's failover IPs
+author:
+ - Felix Fontein (@felixfontein)
+description:
+ - Retrieve information on Hetzner's failover IPs.
+seealso:
+ - name: Failover IP documentation
+ description: Hetzner's documentation on failover IPs.
+ link: https://wiki.hetzner.de/index.php/Failover/en
+ - module: community.general.hetzner_failover_ip
+ description: Manage failover IPs.
+extends_documentation_fragment:
+- community.general.hetzner
+
+options:
+ failover_ip:
+ description: The failover IP address.
+ type: str
+ required: yes
+'''
+
+EXAMPLES = r'''
+- name: Get value of failover IP 1.2.3.4
+ community.general.hetzner_failover_ip_info:
+ hetzner_user: foo
+ hetzner_password: bar
+ failover_ip: 1.2.3.4
+ value: 5.6.7.8
+ register: result
+
+- name: Print value of failover IP 1.2.3.4 in case it is routed
+ ansible.builtin.debug:
+ msg: "1.2.3.4 routes to {{ result.value }}"
+ when: result.state == 'routed'
+'''
+
+RETURN = r'''
+value:
+ description:
+ - The value of the failover IP.
+ - Will be C(none) if the IP is unrouted.
+ returned: success
+ type: str
+state:
+ description:
+ - Will be C(routed) or C(unrouted).
+ returned: success
+ type: str
+failover_ip:
+ description:
+ - The failover IP.
+ returned: success
+ type: str
+ sample: '1.2.3.4'
+failover_netmask:
+ description:
+ - The netmask for the failover IP.
+ returned: success
+ type: str
+ sample: '255.255.255.255'
+server_ip:
+ description:
+ - The main IP of the server this failover IP is associated to.
+ - This is I(not) the server the failover IP is routed to.
+ returned: success
+ type: str
+server_number:
+ description:
+ - The number of the server this failover IP is associated to.
+ - This is I(not) the server the failover IP is routed to.
+ returned: success
+ type: int
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.hetzner import (
+ HETZNER_DEFAULT_ARGUMENT_SPEC,
+ get_failover_record,
+ get_failover_state,
+)
+
+
+def main():
+ argument_spec = dict(
+ failover_ip=dict(type='str', required=True),
+ )
+ argument_spec.update(HETZNER_DEFAULT_ARGUMENT_SPEC)
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ failover = get_failover_record(module, module.params['failover_ip'])
+ result = get_failover_state(failover['active_server_ip'])
+ result['failover_ip'] = failover['ip']
+ result['failover_netmask'] = failover['netmask']
+ result['server_ip'] = failover['server_ip']
+ result['server_number'] = failover['server_number']
+ result['changed'] = False
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/hetzner_firewall.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hetzner_firewall.py
new file mode 100644
index 00000000..ade9bd95
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hetzner_firewall.py
@@ -0,0 +1,509 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019 Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: hetzner_firewall
+version_added: '0.2.0'
+short_description: Manage Hetzner's dedicated server firewall
+author:
+ - Felix Fontein (@felixfontein)
+description:
+ - Manage Hetzner's dedicated server firewall.
+ - Note that idempotency check for TCP flags simply compares strings and doesn't
+ try to interpret the rules. This might change in the future.
+seealso:
+ - name: Firewall documentation
+ description: Hetzner's documentation on the stateless firewall for dedicated servers
+ link: https://wiki.hetzner.de/index.php/Robot_Firewall/en
+ - module: community.general.hetzner_firewall_info
+ description: Retrieve information on firewall configuration.
+extends_documentation_fragment:
+- community.general.hetzner
+
+options:
+ server_ip:
+ description: The server's main IP address.
+ required: yes
+ type: str
+ port:
+ description:
+ - Switch port of firewall.
+ type: str
+ choices: [ main, kvm ]
+ default: main
+ state:
+ description:
+ - Status of the firewall.
+ - Firewall is active if state is C(present), and disabled if state is C(absent).
+ type: str
+ default: present
+ choices: [ present, absent ]
+ whitelist_hos:
+ description:
+ - Whether Hetzner services have access.
+ type: bool
+ rules:
+ description:
+ - Firewall rules.
+ type: dict
+ suboptions:
+ input:
+ description:
+ - Input firewall rules.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - Name of the firewall rule.
+ type: str
+ ip_version:
+ description:
+ - Internet protocol version.
+ - Note that currently, only IPv4 is supported by Hetzner.
+ required: yes
+ type: str
+ choices: [ ipv4, ipv6 ]
+ dst_ip:
+ description:
+ - Destination IP address or subnet address.
+ - CIDR notation.
+ type: str
+ dst_port:
+ description:
+ - Destination port or port range.
+ type: str
+ src_ip:
+ description:
+ - Source IP address or subnet address.
+ - CIDR notation.
+ type: str
+ src_port:
+ description:
+ - Source port or port range.
+ type: str
+ protocol:
+ description:
+ - Protocol above IP layer
+ type: str
+ tcp_flags:
+ description:
+ - TCP flags or logical combination of flags.
+ - Flags supported by Hetzner are C(syn), C(fin), C(rst), C(psh) and C(urg).
+ - They can be combined with C(|) (logical or) and C(&) (logical and).
+ - See L(the documentation,https://wiki.hetzner.de/index.php/Robot_Firewall/en#Parameter)
+ for more information.
+ type: str
+ action:
+ description:
+ - Action if rule matches.
+ required: yes
+ type: str
+ choices: [ accept, discard ]
+ update_timeout:
+ description:
+ - Timeout to use when configuring the firewall.
+ - Note that the API call returns before the firewall has been
+ successfully set up.
+ type: int
+ default: 30
+ wait_for_configured:
+ description:
+ - Whether to wait until the firewall has been successfully configured before
+ determining what to do, and before returning from the module.
+ - The API returns status C(in progress) when the firewall is currently
+ being configured. If this happens, the module will try again until
+ the status changes to C(active) or C(disabled).
+ - Please note that there is a request limit. If you have to do multiple
+ updates, it can be better to disable waiting, and regularly use
+ M(community.general.hetzner_firewall_info) to query status.
+ type: bool
+ default: yes
+ wait_delay:
+ description:
+ - Delay to wait (in seconds) before checking again whether the firewall has
+ been configured.
+ type: int
+ default: 10
+ timeout:
+ description:
+ - Timeout (in seconds) for waiting for firewall to be configured.
+ type: int
+ default: 180
+'''
+
+EXAMPLES = r'''
+- name: Configure firewall for server with main IP 1.2.3.4
+ community.general.hetzner_firewall:
+ hetzner_user: foo
+ hetzner_password: bar
+ server_ip: 1.2.3.4
+ state: present
+ whitelist_hos: yes
+ rules:
+ input:
+ - name: Allow everything to ports 20-23 from 4.3.2.1/24
+ ip_version: ipv4
+ src_ip: 4.3.2.1/24
+ dst_port: '20-23'
+ action: accept
+ - name: Allow everything to port 443
+ ip_version: ipv4
+ dst_port: '443'
+ action: accept
+ - name: Drop everything else
+ ip_version: ipv4
+ action: discard
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result }}"
+'''
+
+RETURN = r'''
+firewall:
+ description:
+ - The firewall configuration.
+ type: dict
+ returned: success
+ contains:
+ port:
+ description:
+ - Switch port of firewall.
+ - C(main) or C(kvm).
+ type: str
+ sample: main
+ server_ip:
+ description:
+ - Server's main IP address.
+ type: str
+ sample: 1.2.3.4
+ server_number:
+ description:
+ - Hetzner's internal server number.
+ type: int
+ sample: 12345
+ status:
+ description:
+ - Status of the firewall.
+ - C(active) or C(disabled).
+ - Will be C(in process) if the firewall is currently updated, and
+ I(wait_for_configured) is set to C(no) or I(timeout) to a too small value.
+ type: str
+ sample: active
+ whitelist_hos:
+ description:
+ - Whether Hetzner services have access.
+ type: bool
+ sample: true
+ rules:
+ description:
+ - Firewall rules.
+ type: dict
+ contains:
+ input:
+ description:
+ - Input firewall rules.
+ type: list
+ elements: dict
+ contains:
+ name:
+ description:
+ - Name of the firewall rule.
+ type: str
+ sample: Allow HTTP access to server
+ ip_version:
+ description:
+ - Internet protocol version.
+ type: str
+ sample: ipv4
+ dst_ip:
+ description:
+ - Destination IP address or subnet address.
+ - CIDR notation.
+ type: str
+ sample: 1.2.3.4/32
+ dst_port:
+ description:
+ - Destination port or port range.
+ type: str
+ sample: "443"
+ src_ip:
+ description:
+ - Source IP address or subnet address.
+ - CIDR notation.
+ type: str
+ sample: null
+ src_port:
+ description:
+ - Source port or port range.
+ type: str
+ sample: null
+ protocol:
+ description:
+ - Protocol above IP layer
+ type: str
+ sample: tcp
+ tcp_flags:
+ description:
+ - TCP flags or logical combination of flags.
+ type: str
+ sample: null
+ action:
+ description:
+ - Action if rule matches.
+ - C(accept) or C(discard).
+ type: str
+ sample: accept
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.compat import ipaddress as compat_ipaddress
+from ansible_collections.community.general.plugins.module_utils.hetzner import (
+ HETZNER_DEFAULT_ARGUMENT_SPEC,
+ BASE_URL,
+ fetch_url_json,
+ fetch_url_json_with_retries,
+ CheckDoneTimeoutException,
+)
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils._text import to_native, to_text
+
+
+RULE_OPTION_NAMES = [
+ 'name', 'ip_version', 'dst_ip', 'dst_port', 'src_ip', 'src_port',
+ 'protocol', 'tcp_flags', 'action',
+]
+
+RULES = ['input']
+
+
+def restrict_dict(dictionary, fields):
+ result = dict()
+ for k, v in dictionary.items():
+ if k in fields:
+ result[k] = v
+ return result
+
+
+def restrict_firewall_config(config):
+ result = restrict_dict(config, ['port', 'status', 'whitelist_hos'])
+ result['rules'] = dict()
+ for ruleset in RULES:
+ result['rules'][ruleset] = [
+ restrict_dict(rule, RULE_OPTION_NAMES)
+ for rule in config['rules'].get(ruleset) or []
+ ]
+ return result
+
+
+def update(before, after, params, name):
+ bv = before.get(name)
+ after[name] = bv
+ changed = False
+ pv = params[name]
+ if pv is not None:
+ changed = pv != bv
+ if changed:
+ after[name] = pv
+ return changed
+
+
+def normalize_ip(ip, ip_version):
+ if ip is None:
+ return ip
+ if '/' in ip:
+ ip, range = ip.split('/')
+ else:
+ ip, range = ip, ''
+ ip_addr = to_native(compat_ipaddress.ip_address(to_text(ip)).compressed)
+ if range == '':
+ range = '32' if ip_version.lower() == 'ipv4' else '128'
+ return ip_addr + '/' + range
+
+
+def update_rules(before, after, params, ruleset):
+ before_rules = before['rules'][ruleset]
+ after_rules = after['rules'][ruleset]
+ params_rules = params['rules'][ruleset]
+ changed = len(before_rules) != len(params_rules)
+ for no, rule in enumerate(params_rules):
+ rule['src_ip'] = normalize_ip(rule['src_ip'], rule['ip_version'])
+ rule['dst_ip'] = normalize_ip(rule['dst_ip'], rule['ip_version'])
+ if no < len(before_rules):
+ before_rule = before_rules[no]
+ before_rule['src_ip'] = normalize_ip(before_rule['src_ip'], before_rule['ip_version'])
+ before_rule['dst_ip'] = normalize_ip(before_rule['dst_ip'], before_rule['ip_version'])
+ if before_rule != rule:
+ changed = True
+ after_rules.append(rule)
+ return changed
+
+
+def encode_rule(output, rulename, input):
+ for i, rule in enumerate(input['rules'][rulename]):
+ for k, v in rule.items():
+ if v is not None:
+ output['rules[{0}][{1}][{2}]'.format(rulename, i, k)] = v
+
+
+def create_default_rules_object():
+ rules = dict()
+ for ruleset in RULES:
+ rules[ruleset] = []
+ return rules
+
+
+def firewall_configured(result, error):
+ return result['firewall']['status'] != 'in process'
+
+
+def main():
+ argument_spec = dict(
+ server_ip=dict(type='str', required=True),
+ port=dict(type='str', default='main', choices=['main', 'kvm']),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ whitelist_hos=dict(type='bool'),
+ rules=dict(type='dict', options=dict(
+ input=dict(type='list', elements='dict', options=dict(
+ name=dict(type='str'),
+ ip_version=dict(type='str', required=True, choices=['ipv4', 'ipv6']),
+ dst_ip=dict(type='str'),
+ dst_port=dict(type='str'),
+ src_ip=dict(type='str'),
+ src_port=dict(type='str'),
+ protocol=dict(type='str'),
+ tcp_flags=dict(type='str'),
+ action=dict(type='str', required=True, choices=['accept', 'discard']),
+ )),
+ )),
+ update_timeout=dict(type='int', default=30),
+ wait_for_configured=dict(type='bool', default=True),
+ wait_delay=dict(type='int', default=10),
+ timeout=dict(type='int', default=180),
+ )
+ argument_spec.update(HETZNER_DEFAULT_ARGUMENT_SPEC)
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ # Sanitize input
+ module.params['status'] = 'active' if (module.params['state'] == 'present') else 'disabled'
+ if module.params['rules'] is None:
+ module.params['rules'] = {}
+ if module.params['rules'].get('input') is None:
+ module.params['rules']['input'] = []
+
+ server_ip = module.params['server_ip']
+
+ # https://robot.your-server.de/doc/webservice/en.html#get-firewall-server-ip
+ url = "{0}/firewall/{1}".format(BASE_URL, server_ip)
+ if module.params['wait_for_configured']:
+ try:
+ result, error = fetch_url_json_with_retries(
+ module,
+ url,
+ check_done_callback=firewall_configured,
+ check_done_delay=module.params['wait_delay'],
+ check_done_timeout=module.params['timeout'],
+ )
+ except CheckDoneTimeoutException as dummy:
+ module.fail_json(msg='Timeout while waiting for firewall to be configured.')
+ else:
+ result, error = fetch_url_json(module, url)
+ if not firewall_configured(result, error):
+ module.fail_json(msg='Firewall configuration cannot be read as it is not configured.')
+
+ full_before = result['firewall']
+ if not full_before.get('rules'):
+ full_before['rules'] = create_default_rules_object()
+ before = restrict_firewall_config(full_before)
+
+ # Build wanted (after) state and compare
+ after = dict(before)
+ changed = False
+ changed |= update(before, after, module.params, 'port')
+ changed |= update(before, after, module.params, 'status')
+ changed |= update(before, after, module.params, 'whitelist_hos')
+ after['rules'] = create_default_rules_object()
+ if module.params['status'] == 'active':
+ for ruleset in RULES:
+ changed |= update_rules(before, after, module.params, ruleset)
+
+ # Update if different
+ construct_result = True
+ construct_status = None
+ if changed and not module.check_mode:
+ # https://robot.your-server.de/doc/webservice/en.html#post-firewall-server-ip
+ url = "{0}/firewall/{1}".format(BASE_URL, server_ip)
+ headers = {"Content-type": "application/x-www-form-urlencoded"}
+ data = dict(after)
+ data['whitelist_hos'] = str(data['whitelist_hos']).lower()
+ del data['rules']
+ for ruleset in RULES:
+ encode_rule(data, ruleset, after)
+ result, error = fetch_url_json(
+ module,
+ url,
+ method='POST',
+ timeout=module.params['update_timeout'],
+ data=urlencode(data),
+ headers=headers,
+ )
+ if module.params['wait_for_configured'] and not firewall_configured(result, error):
+ try:
+ result, error = fetch_url_json_with_retries(
+ module,
+ url,
+ check_done_callback=firewall_configured,
+ check_done_delay=module.params['wait_delay'],
+ check_done_timeout=module.params['timeout'],
+ skip_first=True,
+ )
+ except CheckDoneTimeoutException as e:
+ result, error = e.result, e.error
+ module.warn('Timeout while waiting for firewall to be configured.')
+
+ full_after = result['firewall']
+ if not full_after.get('rules'):
+ full_after['rules'] = create_default_rules_object()
+ construct_status = full_after['status']
+ if construct_status != 'in process':
+ # Only use result if configuration is done, so that diff will be ok
+ after = restrict_firewall_config(full_after)
+ construct_result = False
+
+ if construct_result:
+ # Construct result (used for check mode, and configuration still in process)
+ full_after = dict(full_before)
+ for k, v in after.items():
+ if k != 'rules':
+ full_after[k] = after[k]
+ if construct_status is not None:
+ # We want 'in process' here
+ full_after['status'] = construct_status
+ full_after['rules'] = dict()
+ for ruleset in RULES:
+ full_after['rules'][ruleset] = after['rules'][ruleset]
+
+ module.exit_json(
+ changed=changed,
+ diff=dict(
+ before=before,
+ after=after,
+ ),
+ firewall=full_after,
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/hetzner_firewall_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hetzner_firewall_info.py
new file mode 100644
index 00000000..fde06a5a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hetzner_firewall_info.py
@@ -0,0 +1,226 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019 Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: hetzner_firewall_info
+version_added: '0.2.0'
+short_description: Manage Hetzner's dedicated server firewall
+author:
+ - Felix Fontein (@felixfontein)
+description:
+ - Manage Hetzner's dedicated server firewall.
+seealso:
+ - name: Firewall documentation
+ description: Hetzner's documentation on the stateless firewall for dedicated servers
+ link: https://wiki.hetzner.de/index.php/Robot_Firewall/en
+ - module: community.general.hetzner_firewall
+ description: Configure firewall.
+extends_documentation_fragment:
+- community.general.hetzner
+
+options:
+ server_ip:
+ description: The server's main IP address.
+ type: str
+ required: yes
+ wait_for_configured:
+ description:
+ - Whether to wait until the firewall has been successfully configured before
+ determining what to do, and before returning from the module.
+ - The API returns status C(in progress) when the firewall is currently
+ being configured. If this happens, the module will try again until
+ the status changes to C(active) or C(disabled).
+ - Please note that there is a request limit. If you have to do multiple
+ updates, it can be better to disable waiting, and regularly use
+ M(community.general.hetzner_firewall_info) to query status.
+ type: bool
+ default: yes
+ wait_delay:
+ description:
+ - Delay to wait (in seconds) before checking again whether the firewall has
+ been configured.
+ type: int
+ default: 10
+ timeout:
+ description:
+ - Timeout (in seconds) for waiting for firewall to be configured.
+ type: int
+ default: 180
+'''
+
+EXAMPLES = r'''
+- name: Get firewall configuration for server with main IP 1.2.3.4
+ community.general.hetzner_firewall_info:
+ hetzner_user: foo
+ hetzner_password: bar
+ server_ip: 1.2.3.4
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.firewall }}"
+'''
+
+RETURN = r'''
+firewall:
+ description:
+ - The firewall configuration.
+ type: dict
+ returned: success
+ contains:
+ port:
+ description:
+ - Switch port of firewall.
+ - C(main) or C(kvm).
+ type: str
+ sample: main
+ server_ip:
+ description:
+ - Server's main IP address.
+ type: str
+ sample: 1.2.3.4
+ server_number:
+ description:
+ - Hetzner's internal server number.
+ type: int
+ sample: 12345
+ status:
+ description:
+ - Status of the firewall.
+ - C(active) or C(disabled).
+ - Will be C(in process) if the firewall is currently updated, and
+ I(wait_for_configured) is set to C(no) or I(timeout) to a too small value.
+ type: str
+ sample: active
+ whitelist_hos:
+ description:
+ - Whether Hetzner services have access.
+ type: bool
+ sample: true
+ rules:
+ description:
+ - Firewall rules.
+ type: dict
+ contains:
+ input:
+ description:
+ - Input firewall rules.
+ type: list
+ elements: dict
+ contains:
+ name:
+ description:
+ - Name of the firewall rule.
+ type: str
+ sample: Allow HTTP access to server
+ ip_version:
+ description:
+ - Internet protocol version.
+ type: str
+ sample: ipv4
+ dst_ip:
+ description:
+ - Destination IP address or subnet address.
+ - CIDR notation.
+ type: str
+ sample: 1.2.3.4/32
+ dst_port:
+ description:
+ - Destination port or port range.
+ type: str
+ sample: "443"
+ src_ip:
+ description:
+ - Source IP address or subnet address.
+ - CIDR notation.
+ type: str
+ sample: null
+ src_port:
+ description:
+ - Source port or port range.
+ type: str
+ sample: null
+ protocol:
+ description:
+ - Protocol above IP layer
+ type: str
+ sample: tcp
+ tcp_flags:
+ description:
+ - TCP flags or logical combination of flags.
+ type: str
+ sample: null
+ action:
+ description:
+ - Action if rule matches.
+ - C(accept) or C(discard).
+ type: str
+ sample: accept
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.hetzner import (
+ HETZNER_DEFAULT_ARGUMENT_SPEC,
+ BASE_URL,
+ fetch_url_json,
+ fetch_url_json_with_retries,
+ CheckDoneTimeoutException,
+)
+
+
+def firewall_configured(result, error):
+ return result['firewall']['status'] != 'in process'
+
+
+def main():
+ argument_spec = dict(
+ server_ip=dict(type='str', required=True),
+ wait_for_configured=dict(type='bool', default=True),
+ wait_delay=dict(type='int', default=10),
+ timeout=dict(type='int', default=180),
+ )
+ argument_spec.update(HETZNER_DEFAULT_ARGUMENT_SPEC)
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ server_ip = module.params['server_ip']
+
+ # https://robot.your-server.de/doc/webservice/en.html#get-firewall-server-ip
+ url = "{0}/firewall/{1}".format(BASE_URL, server_ip)
+ if module.params['wait_for_configured']:
+ try:
+ result, error = fetch_url_json_with_retries(
+ module,
+ url,
+ check_done_callback=firewall_configured,
+ check_done_delay=module.params['wait_delay'],
+ check_done_timeout=module.params['timeout'],
+ )
+ except CheckDoneTimeoutException as dummy:
+ module.fail_json(msg='Timeout while waiting for firewall to be configured.')
+ else:
+ result, error = fetch_url_json(module, url)
+
+ firewall = result['firewall']
+ if not firewall.get('rules'):
+ firewall['rules'] = dict()
+ for ruleset in ['input']:
+ firewall['rules'][ruleset] = []
+
+ module.exit_json(
+ changed=False,
+ firewall=firewall,
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/hg.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hg.py
new file mode 100644
index 00000000..5c084d3a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hg.py
@@ -0,0 +1,291 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Yeukhon Wong <yeukhon@acm.org>
+# Copyright: (c) 2014, Nate Coraor <nate@bx.psu.edu>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: hg
+short_description: Manages Mercurial (hg) repositories
+description:
+ - Manages Mercurial (hg) repositories. Supports SSH, HTTP/S and local address.
+author: "Yeukhon Wong (@yeukhon)"
+options:
+ repo:
+ description:
+ - The repository address.
+ required: yes
+ aliases: [ name ]
+ dest:
+ description:
+ - Absolute path of where the repository should be cloned to.
+ This parameter is required, unless clone and update are set to no
+ revision:
+ description:
+ - Equivalent C(-r) option in hg command which could be the changeset, revision number,
+ branch name or even tag.
+ aliases: [ version ]
+ force:
+ description:
+ - Discards uncommitted changes. Runs C(hg update -C). Prior to
+ 1.9, the default was `yes`.
+ type: bool
+ default: 'no'
+ purge:
+ description:
+ - Deletes untracked files. Runs C(hg purge).
+ type: bool
+ default: 'no'
+ update:
+ description:
+ - If C(no), do not retrieve new revisions from the origin repository
+ type: bool
+ default: 'yes'
+ clone:
+ description:
+ - If C(no), do not clone the repository if it does not exist locally.
+ type: bool
+ default: 'yes'
+ executable:
+ description:
+ - Path to hg executable to use. If not supplied,
+ the normal mechanism for resolving binary paths will be used.
+notes:
+ - This module does not support push capability. See U(https://github.com/ansible/ansible/issues/31156).
+ - "If the task seems to be hanging, first verify remote host is in C(known_hosts).
+ SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt,
+ one solution is to add the remote host public key in C(/etc/ssh/ssh_known_hosts) before calling
+ the hg module, with the following command: ssh-keyscan remote_host.com >> /etc/ssh/ssh_known_hosts."
+ - As per 01 Dec 2018, Bitbucket has dropped support for TLSv1 and TLSv1.1 connections. As such,
+ if the underlying system still uses a Python version below 2.7.9, you will have issues checking out
+ bitbucket repositories. See U(https://bitbucket.org/blog/deprecating-tlsv1-tlsv1-1-2018-12-01).
+'''
+
+EXAMPLES = '''
+- name: Ensure the current working copy is inside the stable branch and deletes untracked files if any.
+ community.general.hg:
+ repo: https://bitbucket.org/user/repo1
+ dest: /home/user/repo1
+ revision: stable
+ purge: yes
+
+- name: Get information about the repository whether or not it has already been cloned locally.
+ community.general.hg:
+ repo: git://bitbucket.org/user/repo
+ dest: /srv/checkout
+ clone: no
+ update: no
+'''
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+class Hg(object):
+ def __init__(self, module, dest, repo, revision, hg_path):
+ self.module = module
+ self.dest = dest
+ self.repo = repo
+ self.revision = revision
+ self.hg_path = hg_path
+
+ def _command(self, args_list):
+ (rc, out, err) = self.module.run_command([self.hg_path] + args_list)
+ return (rc, out, err)
+
+ def _list_untracked(self):
+ args = ['purge', '--config', 'extensions.purge=', '-R', self.dest, '--print']
+ return self._command(args)
+
+ def get_revision(self):
+ """
+ hg id -b -i -t returns a string in the format:
+ "<changeset>[+] <branch_name> <tag>"
+ This format lists the state of the current working copy,
+ and indicates whether there are uncommitted changes by the
+ plus sign. Otherwise, the sign is omitted.
+
+ Read the full description via hg id --help
+ """
+ (rc, out, err) = self._command(['id', '-b', '-i', '-t', '-R', self.dest])
+ if rc != 0:
+ self.module.fail_json(msg=err)
+ else:
+ return to_native(out).strip('\n')
+
+ def get_remote_revision(self):
+ (rc, out, err) = self._command(['id', self.repo])
+ if rc != 0:
+ self.module.fail_json(msg=err)
+ else:
+ return to_native(out).strip('\n')
+
+ def has_local_mods(self):
+ now = self.get_revision()
+ if '+' in now:
+ return True
+ else:
+ return False
+
+ def discard(self):
+ before = self.has_local_mods()
+ if not before:
+ return False
+
+ args = ['update', '-C', '-R', self.dest, '-r', '.']
+ (rc, out, err) = self._command(args)
+ if rc != 0:
+ self.module.fail_json(msg=err)
+
+ after = self.has_local_mods()
+ if before != after and not after: # no more local modification
+ return True
+
+ def purge(self):
+ # before purge, find out if there are any untracked files
+ (rc1, out1, err1) = self._list_untracked()
+ if rc1 != 0:
+ self.module.fail_json(msg=err1)
+
+ # there are some untrackd files
+ if out1 != '':
+ args = ['purge', '--config', 'extensions.purge=', '-R', self.dest]
+ (rc2, out2, err2) = self._command(args)
+ if rc2 != 0:
+ self.module.fail_json(msg=err2)
+ return True
+ else:
+ return False
+
+ def cleanup(self, force, purge):
+ discarded = False
+ purged = False
+
+ if force:
+ discarded = self.discard()
+ if purge:
+ purged = self.purge()
+ if discarded or purged:
+ return True
+ else:
+ return False
+
+ def pull(self):
+ return self._command(
+ ['pull', '-R', self.dest, self.repo])
+
+ def update(self):
+ if self.revision is not None:
+ return self._command(['update', '-r', self.revision, '-R', self.dest])
+ return self._command(['update', '-R', self.dest])
+
+ def clone(self):
+ if self.revision is not None:
+ return self._command(['clone', self.repo, self.dest, '-r', self.revision])
+ return self._command(['clone', self.repo, self.dest])
+
+ @property
+ def at_revision(self):
+ """
+ There is no point in pulling from a potentially down/slow remote site
+ if the desired changeset is already the current changeset.
+ """
+ if self.revision is None or len(self.revision) < 7:
+ # Assume it's a rev number, tag, or branch
+ return False
+ (rc, out, err) = self._command(['--debug', 'id', '-i', '-R', self.dest])
+ if rc != 0:
+ self.module.fail_json(msg=err)
+ if out.startswith(self.revision):
+ return True
+ return False
+
+
+# ===========================================
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ repo=dict(type='str', required=True, aliases=['name']),
+ dest=dict(type='path'),
+ revision=dict(type='str', default=None, aliases=['version']),
+ force=dict(type='bool', default=False),
+ purge=dict(type='bool', default=False),
+ update=dict(type='bool', default=True),
+ clone=dict(type='bool', default=True),
+ executable=dict(type='str', default=None),
+ ),
+ )
+ repo = module.params['repo']
+ dest = module.params['dest']
+ revision = module.params['revision']
+ force = module.params['force']
+ purge = module.params['purge']
+ update = module.params['update']
+ clone = module.params['clone']
+ hg_path = module.params['executable'] or module.get_bin_path('hg', True)
+ if dest is not None:
+ hgrc = os.path.join(dest, '.hg/hgrc')
+
+ # initial states
+ before = ''
+ changed = False
+ cleaned = False
+
+ if not dest and (clone or update):
+ module.fail_json(msg="the destination directory must be specified unless clone=no and update=no")
+
+ hg = Hg(module, dest, repo, revision, hg_path)
+
+ # If there is no hgrc file, then assume repo is absent
+ # and perform clone. Otherwise, perform pull and update.
+ if not clone and not update:
+ out = hg.get_remote_revision()
+ module.exit_json(after=out, changed=False)
+ if not os.path.exists(hgrc):
+ if clone:
+ (rc, out, err) = hg.clone()
+ if rc != 0:
+ module.fail_json(msg=err)
+ else:
+ module.exit_json(changed=False)
+ elif not update:
+ # Just return having found a repo already in the dest path
+ before = hg.get_revision()
+ elif hg.at_revision:
+ # no update needed, don't pull
+ before = hg.get_revision()
+
+ # but force and purge if desired
+ cleaned = hg.cleanup(force, purge)
+ else:
+ # get the current state before doing pulling
+ before = hg.get_revision()
+
+ # can perform force and purge
+ cleaned = hg.cleanup(force, purge)
+
+ (rc, out, err) = hg.pull()
+ if rc != 0:
+ module.fail_json(msg=err)
+
+ (rc, out, err) = hg.update()
+ if rc != 0:
+ module.fail_json(msg=err)
+
+ after = hg.get_revision()
+ if before != after or cleaned:
+ changed = True
+
+ module.exit_json(before=before, after=after, changed=changed, cleaned=cleaned)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/hipchat.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hipchat.py
new file mode 100644
index 00000000..06c9fca4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hipchat.py
@@ -0,0 +1,212 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: hipchat
+short_description: Send a message to Hipchat.
+description:
+ - Send a message to a Hipchat room, with options to control the formatting.
+options:
+ token:
+ type: str
+ description:
+ - API token.
+ required: true
+ room:
+ type: str
+ description:
+ - ID or name of the room.
+ required: true
+ msg_from:
+ type: str
+ description:
+ - Name the message will appear to be sent from. Max length is 15
+ characters - above this it will be truncated.
+ default: Ansible
+ aliases: [from]
+ msg:
+ type: str
+ description:
+ - The message body.
+ required: true
+ color:
+ type: str
+ description:
+ - Background color for the message.
+ default: yellow
+ choices: [ "yellow", "red", "green", "purple", "gray", "random" ]
+ msg_format:
+ type: str
+ description:
+ - Message format.
+ default: text
+ choices: [ "text", "html" ]
+ notify:
+ description:
+ - If true, a notification will be triggered for users in the room.
+ type: bool
+ default: 'yes'
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+ api:
+ type: str
+ description:
+ - API url if using a self-hosted hipchat server. For Hipchat API version
+ 2 use the default URI with C(/v2) instead of C(/v1).
+ default: 'https://api.hipchat.com/v1'
+
+author:
+- Shirou Wakayama (@shirou)
+- Paul Bourdel (@pb8226)
+'''
+
+EXAMPLES = '''
+- name: Send a message to a Hipchat room
+ community.general.hipchat:
+ room: notif
+ msg: Ansible task finished
+
+- name: Send a message to a Hipchat room using Hipchat API version 2
+ community.general.hipchat:
+ api: https://api.hipchat.com/v2/
+ token: OAUTH2_TOKEN
+ room: notify
+ msg: Ansible task finished
+'''
+
+# ===========================================
+# HipChat module specific support methods.
+#
+
+import json
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.six.moves.urllib.request import pathname2url
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import fetch_url
+
+
+DEFAULT_URI = "https://api.hipchat.com/v1"
+
+MSG_URI_V1 = "/rooms/message"
+
+NOTIFY_URI_V2 = "/room/{id_or_name}/notification"
+
+
+def send_msg_v1(module, token, room, msg_from, msg, msg_format='text',
+ color='yellow', notify=False, api=MSG_URI_V1):
+ '''sending message to hipchat v1 server'''
+
+ params = {}
+ params['room_id'] = room
+ params['from'] = msg_from[:15] # max length is 15
+ params['message'] = msg
+ params['message_format'] = msg_format
+ params['color'] = color
+ params['api'] = api
+ params['notify'] = int(notify)
+
+ url = api + MSG_URI_V1 + "?auth_token=%s" % (token)
+ data = urlencode(params)
+
+ if module.check_mode:
+ # In check mode, exit before actually sending the message
+ module.exit_json(changed=False)
+
+ response, info = fetch_url(module, url, data=data)
+ if info['status'] == 200:
+ return response.read()
+ else:
+ module.fail_json(msg="failed to send message, return status=%s" % str(info['status']))
+
+
+def send_msg_v2(module, token, room, msg_from, msg, msg_format='text',
+ color='yellow', notify=False, api=NOTIFY_URI_V2):
+ '''sending message to hipchat v2 server'''
+
+ headers = {'Authorization': 'Bearer %s' % token, 'Content-Type': 'application/json'}
+
+ body = dict()
+ body['message'] = msg
+ body['color'] = color
+ body['message_format'] = msg_format
+ body['notify'] = notify
+
+ POST_URL = api + NOTIFY_URI_V2
+
+ url = POST_URL.replace('{id_or_name}', pathname2url(room))
+ data = json.dumps(body)
+
+ if module.check_mode:
+ # In check mode, exit before actually sending the message
+ module.exit_json(changed=False)
+
+ response, info = fetch_url(module, url, data=data, headers=headers, method='POST')
+
+ # https://www.hipchat.com/docs/apiv2/method/send_room_notification shows
+ # 204 to be the expected result code.
+ if info['status'] in [200, 204]:
+ return response.read()
+ else:
+ module.fail_json(msg="failed to send message, return status=%s" % str(info['status']))
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(required=True, no_log=True),
+ room=dict(required=True),
+ msg=dict(required=True),
+ msg_from=dict(default="Ansible", aliases=['from']),
+ color=dict(default="yellow", choices=["yellow", "red", "green",
+ "purple", "gray", "random"]),
+ msg_format=dict(default="text", choices=["text", "html"]),
+ notify=dict(default=True, type='bool'),
+ validate_certs=dict(default=True, type='bool'),
+ api=dict(default=DEFAULT_URI),
+ ),
+ supports_check_mode=True
+ )
+
+ token = module.params["token"]
+ room = str(module.params["room"])
+ msg = module.params["msg"]
+ msg_from = module.params["msg_from"]
+ color = module.params["color"]
+ msg_format = module.params["msg_format"]
+ notify = module.params["notify"]
+ api = module.params["api"]
+
+ try:
+ if api.find('/v2') != -1:
+ send_msg_v2(module, token, room, msg_from, msg, msg_format, color, notify, api)
+ else:
+ send_msg_v1(module, token, room, msg_from, msg, msg_format, color, notify, api)
+ except Exception as e:
+ module.fail_json(msg="unable to send msg: %s" % to_native(e), exception=traceback.format_exc())
+
+ changed = True
+ module.exit_json(changed=changed, room=room, msg_from=msg_from, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/homebrew.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/homebrew.py
new file mode 100644
index 00000000..21dea647
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/homebrew.py
@@ -0,0 +1,971 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Andrew Dunham <andrew@du.nham.ca>
+# (c) 2013, Daniel Jaouen <dcj24@cornell.edu>
+# (c) 2015, Indrajit Raychaudhuri <irc+code@indrajit.com>
+#
+# Based on macports (Jimmy Tang <jcftang@gmail.com>)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: homebrew
+author:
+ - "Indrajit Raychaudhuri (@indrajitr)"
+ - "Daniel Jaouen (@danieljaouen)"
+ - "Andrew Dunham (@andrew-d)"
+requirements:
+ - "python >= 2.6"
+ - homebrew must already be installed on the target system
+short_description: Package manager for Homebrew
+description:
+ - Manages Homebrew packages
+options:
+ name:
+ description:
+ - A list of names of packages to install/remove.
+ aliases: [ 'formula', 'package', 'pkg' ]
+ type: list
+ elements: str
+ path:
+ description:
+ - "A ':' separated list of paths to search for 'brew' executable.
+ Since a package (I(formula) in homebrew parlance) location is prefixed relative to the actual path of I(brew) command,
+ providing an alternative I(brew) path enables managing different set of packages in an alternative location in the system."
+ default: '/usr/local/bin:/opt/homebrew/bin'
+ type: path
+ state:
+ description:
+ - state of the package.
+ choices: [ 'absent', 'head', 'installed', 'latest', 'linked', 'present', 'removed', 'uninstalled', 'unlinked', 'upgraded' ]
+ default: present
+ type: str
+ update_homebrew:
+ description:
+ - update homebrew itself first.
+ type: bool
+ default: no
+ aliases: ['update-brew']
+ upgrade_all:
+ description:
+ - upgrade all homebrew packages.
+ type: bool
+ default: no
+ aliases: ['upgrade']
+ install_options:
+ description:
+ - options flags to install a package.
+ aliases: ['options']
+ type: list
+ elements: str
+ upgrade_options:
+ description:
+ - Option flags to upgrade.
+ type: list
+ elements: str
+ version_added: '0.2.0'
+notes:
+ - When used with a `loop:` each package will be processed individually,
+ it is much more efficient to pass the list directly to the `name` option.
+'''
+
+EXAMPLES = '''
+# Install formula foo with 'brew' in default path
+- community.general.homebrew:
+ name: foo
+ state: present
+
+# Install formula foo with 'brew' in alternate path C(/my/other/location/bin)
+- community.general.homebrew:
+ name: foo
+ path: /my/other/location/bin
+ state: present
+
+# Update homebrew first and install formula foo with 'brew' in default path
+- community.general.homebrew:
+ name: foo
+ state: present
+ update_homebrew: yes
+
+# Update homebrew first and upgrade formula foo to latest available with 'brew' in default path
+- community.general.homebrew:
+ name: foo
+ state: latest
+ update_homebrew: yes
+
+# Update homebrew and upgrade all packages
+- community.general.homebrew:
+ update_homebrew: yes
+ upgrade_all: yes
+
+# Miscellaneous other examples
+- community.general.homebrew:
+ name: foo
+ state: head
+
+- community.general.homebrew:
+ name: foo
+ state: linked
+
+- community.general.homebrew:
+ name: foo
+ state: absent
+
+- community.general.homebrew:
+ name: foo,bar
+ state: absent
+
+- community.general.homebrew:
+ name: foo
+ state: present
+ install_options: with-baz,enable-debug
+
+- name: Use ignored-pinned option while upgrading all
+ community.general.homebrew:
+ upgrade_all: yes
+ upgrade_options: ignored-pinned
+'''
+
+RETURN = '''
+msg:
+ description: if the cache was updated or not
+ returned: always
+ type: str
+ sample: "Changed: 0, Unchanged: 2"
+unchanged_pkgs:
+ description:
+ - List of package names which are unchanged after module run
+ returned: success
+ type: list
+ sample: ["awscli", "ag"]
+ version_added: '0.2.0'
+changed_pkgs:
+ description:
+ - List of package names which are changed after module run
+ returned: success
+ type: list
+ sample: ['git', 'git-cola']
+ version_added: '0.2.0'
+'''
+
+import os.path
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems, string_types
+
+
+# exceptions -------------------------------------------------------------- {{{
+class HomebrewException(Exception):
+ pass
+# /exceptions ------------------------------------------------------------- }}}
+
+
+# utils ------------------------------------------------------------------- {{{
+def _create_regex_group_complement(s):
+ lines = (line.strip() for line in s.split('\n') if line.strip())
+ chars = filter(None, (line.split('#')[0].strip() for line in lines))
+ group = r'[^' + r''.join(chars) + r']'
+ return re.compile(group)
+# /utils ------------------------------------------------------------------ }}}
+
+
+class Homebrew(object):
+ '''A class to manage Homebrew packages.'''
+
+ # class regexes ------------------------------------------------ {{{
+ VALID_PATH_CHARS = r'''
+ \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
+ \s # spaces
+ : # colons
+ {sep} # the OS-specific path separator
+ . # dots
+ \- # dashes
+ '''.format(sep=os.path.sep)
+
+ VALID_BREW_PATH_CHARS = r'''
+ \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
+ \s # spaces
+ {sep} # the OS-specific path separator
+ . # dots
+ \- # dashes
+ '''.format(sep=os.path.sep)
+
+ VALID_PACKAGE_CHARS = r'''
+ \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
+ . # dots
+ / # slash (for taps)
+ \+ # plusses
+ \- # dashes
+ : # colons (for URLs)
+ @ # at-sign
+ '''
+
+ INVALID_PATH_REGEX = _create_regex_group_complement(VALID_PATH_CHARS)
+ INVALID_BREW_PATH_REGEX = _create_regex_group_complement(VALID_BREW_PATH_CHARS)
+ INVALID_PACKAGE_REGEX = _create_regex_group_complement(VALID_PACKAGE_CHARS)
+ # /class regexes ----------------------------------------------- }}}
+
+ # class validations -------------------------------------------- {{{
+ @classmethod
+ def valid_path(cls, path):
+ '''
+ `path` must be one of:
+ - list of paths
+ - a string containing only:
+ - alphanumeric characters
+ - dashes
+ - dots
+ - spaces
+ - colons
+ - os.path.sep
+ '''
+
+ if isinstance(path, string_types):
+ return not cls.INVALID_PATH_REGEX.search(path)
+
+ try:
+ iter(path)
+ except TypeError:
+ return False
+ else:
+ paths = path
+ return all(cls.valid_brew_path(path_) for path_ in paths)
+
+ @classmethod
+ def valid_brew_path(cls, brew_path):
+ '''
+ `brew_path` must be one of:
+ - None
+ - a string containing only:
+ - alphanumeric characters
+ - dashes
+ - dots
+ - spaces
+ - os.path.sep
+ '''
+
+ if brew_path is None:
+ return True
+
+ return (
+ isinstance(brew_path, string_types)
+ and not cls.INVALID_BREW_PATH_REGEX.search(brew_path)
+ )
+
+ @classmethod
+ def valid_package(cls, package):
+ '''A valid package is either None or alphanumeric.'''
+
+ if package is None:
+ return True
+
+ return (
+ isinstance(package, string_types)
+ and not cls.INVALID_PACKAGE_REGEX.search(package)
+ )
+
+ @classmethod
+ def valid_state(cls, state):
+ '''
+ A valid state is one of:
+ - None
+ - installed
+ - upgraded
+ - head
+ - linked
+ - unlinked
+ - absent
+ '''
+
+ if state is None:
+ return True
+ else:
+ return (
+ isinstance(state, string_types)
+ and state.lower() in (
+ 'installed',
+ 'upgraded',
+ 'head',
+ 'linked',
+ 'unlinked',
+ 'absent',
+ )
+ )
+
+ @classmethod
+ def valid_module(cls, module):
+ '''A valid module is an instance of AnsibleModule.'''
+
+ return isinstance(module, AnsibleModule)
+
+ # /class validations ------------------------------------------- }}}
+
+ # class properties --------------------------------------------- {{{
+ @property
+ def module(self):
+ return self._module
+
+ @module.setter
+ def module(self, module):
+ if not self.valid_module(module):
+ self._module = None
+ self.failed = True
+ self.message = 'Invalid module: {0}.'.format(module)
+ raise HomebrewException(self.message)
+
+ else:
+ self._module = module
+ return module
+
+ @property
+ def path(self):
+ return self._path
+
+ @path.setter
+ def path(self, path):
+ if not self.valid_path(path):
+ self._path = []
+ self.failed = True
+ self.message = 'Invalid path: {0}.'.format(path)
+ raise HomebrewException(self.message)
+
+ else:
+ if isinstance(path, string_types):
+ self._path = path.split(':')
+ else:
+ self._path = path
+
+ return path
+
+ @property
+ def brew_path(self):
+ return self._brew_path
+
+ @brew_path.setter
+ def brew_path(self, brew_path):
+ if not self.valid_brew_path(brew_path):
+ self._brew_path = None
+ self.failed = True
+ self.message = 'Invalid brew_path: {0}.'.format(brew_path)
+ raise HomebrewException(self.message)
+
+ else:
+ self._brew_path = brew_path
+ return brew_path
+
+ @property
+ def params(self):
+ return self._params
+
+ @params.setter
+ def params(self, params):
+ self._params = self.module.params
+ return self._params
+
+ @property
+ def current_package(self):
+ return self._current_package
+
+ @current_package.setter
+ def current_package(self, package):
+ if not self.valid_package(package):
+ self._current_package = None
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(package)
+ raise HomebrewException(self.message)
+
+ else:
+ self._current_package = package
+ return package
+ # /class properties -------------------------------------------- }}}
+
+ def __init__(self, module, path, packages=None, state=None,
+ update_homebrew=False, upgrade_all=False,
+ install_options=None, upgrade_options=None):
+ if not install_options:
+ install_options = list()
+ if not upgrade_options:
+ upgrade_options = list()
+ self._setup_status_vars()
+ self._setup_instance_vars(module=module, path=path, packages=packages,
+ state=state, update_homebrew=update_homebrew,
+ upgrade_all=upgrade_all,
+ install_options=install_options,
+ upgrade_options=upgrade_options,)
+
+ self._prep()
+
+ # prep --------------------------------------------------------- {{{
+ def _setup_status_vars(self):
+ self.failed = False
+ self.changed = False
+ self.changed_count = 0
+ self.unchanged_count = 0
+ self.changed_pkgs = []
+ self.unchanged_pkgs = []
+ self.message = ''
+
+ def _setup_instance_vars(self, **kwargs):
+ for key, val in iteritems(kwargs):
+ setattr(self, key, val)
+
+ def _prep(self):
+ self._prep_brew_path()
+
+ def _prep_brew_path(self):
+ if not self.module:
+ self.brew_path = None
+ self.failed = True
+ self.message = 'AnsibleModule not set.'
+ raise HomebrewException(self.message)
+
+ self.brew_path = self.module.get_bin_path(
+ 'brew',
+ required=True,
+ opt_dirs=self.path,
+ )
+ if not self.brew_path:
+ self.brew_path = None
+ self.failed = True
+ self.message = 'Unable to locate homebrew executable.'
+ raise HomebrewException('Unable to locate homebrew executable.')
+
+ return self.brew_path
+
+ def _status(self):
+ return (self.failed, self.changed, self.message)
+ # /prep -------------------------------------------------------- }}}
+
+ def run(self):
+ try:
+ self._run()
+ except HomebrewException:
+ pass
+
+ if not self.failed and (self.changed_count + self.unchanged_count > 1):
+ self.message = "Changed: %d, Unchanged: %d" % (
+ self.changed_count,
+ self.unchanged_count,
+ )
+ (failed, changed, message) = self._status()
+
+ return (failed, changed, message)
+
+ # checks ------------------------------------------------------- {{{
+ def _current_package_is_installed(self):
+ if not self.valid_package(self.current_package):
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ cmd = [
+ "{brew_path}".format(brew_path=self.brew_path),
+ "info",
+ self.current_package,
+ ]
+ rc, out, err = self.module.run_command(cmd)
+ for line in out.split('\n'):
+ if (
+ re.search(r'Built from source', line)
+ or re.search(r'Poured from bottle', line)
+ ):
+ return True
+
+ return False
+
+ def _current_package_is_outdated(self):
+ if not self.valid_package(self.current_package):
+ return False
+
+ rc, out, err = self.module.run_command([
+ self.brew_path,
+ 'outdated',
+ self.current_package,
+ ])
+
+ return rc != 0
+
+ def _current_package_is_installed_from_head(self):
+ if not Homebrew.valid_package(self.current_package):
+ return False
+ elif not self._current_package_is_installed():
+ return False
+
+ rc, out, err = self.module.run_command([
+ self.brew_path,
+ 'info',
+ self.current_package,
+ ])
+
+ try:
+ version_info = [line for line in out.split('\n') if line][0]
+ except IndexError:
+ return False
+
+ return version_info.split(' ')[-1] == 'HEAD'
+ # /checks ------------------------------------------------------ }}}
+
+ # commands ----------------------------------------------------- {{{
+ def _run(self):
+ if self.update_homebrew:
+ self._update_homebrew()
+
+ if self.upgrade_all:
+ self._upgrade_all()
+
+ if self.packages:
+ if self.state == 'installed':
+ return self._install_packages()
+ elif self.state == 'upgraded':
+ return self._upgrade_packages()
+ elif self.state == 'head':
+ return self._install_packages()
+ elif self.state == 'linked':
+ return self._link_packages()
+ elif self.state == 'unlinked':
+ return self._unlink_packages()
+ elif self.state == 'absent':
+ return self._uninstall_packages()
+
+ # updated -------------------------------- {{{
+ def _update_homebrew(self):
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Homebrew would be updated.'
+ raise HomebrewException(self.message)
+
+ rc, out, err = self.module.run_command([
+ self.brew_path,
+ 'update',
+ ])
+ if rc == 0:
+ if out and isinstance(out, string_types):
+ already_updated = any(
+ re.search(r'Already up-to-date.', s.strip(), re.IGNORECASE)
+ for s in out.split('\n')
+ if s
+ )
+ if not already_updated:
+ self.changed = True
+ self.message = 'Homebrew updated successfully.'
+ else:
+ self.message = 'Homebrew already up-to-date.'
+
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewException(self.message)
+ # /updated ------------------------------- }}}
+
+ # _upgrade_all --------------------------- {{{
+ def _upgrade_all(self):
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Homebrew packages would be upgraded.'
+ raise HomebrewException(self.message)
+ cmd = [self.brew_path, 'upgrade'] + self.upgrade_options
+
+ rc, out, err = self.module.run_command(cmd)
+ if rc == 0:
+ if not out:
+ self.message = 'Homebrew packages already upgraded.'
+
+ else:
+ self.changed = True
+ self.message = 'Homebrew upgraded.'
+
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewException(self.message)
+ # /_upgrade_all -------------------------- }}}
+
+ # installed ------------------------------ {{{
+ def _install_current_package(self):
+ if not self.valid_package(self.current_package):
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if self._current_package_is_installed():
+ self.unchanged_count += 1
+ self.unchanged_pkgs.append(self.current_package)
+ self.message = 'Package already installed: {0}'.format(
+ self.current_package,
+ )
+ return True
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Package would be installed: {0}'.format(
+ self.current_package
+ )
+ raise HomebrewException(self.message)
+
+ if self.state == 'head':
+ head = '--HEAD'
+ else:
+ head = None
+
+ opts = (
+ [self.brew_path, 'install']
+ + self.install_options
+ + [self.current_package, head]
+ )
+ cmd = [opt for opt in opts if opt]
+ rc, out, err = self.module.run_command(cmd)
+
+ if self._current_package_is_installed():
+ self.changed_count += 1
+ self.changed_pkgs.append(self.current_package)
+ self.changed = True
+ self.message = 'Package installed: {0}'.format(self.current_package)
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewException(self.message)
+
+ def _install_packages(self):
+ for package in self.packages:
+ self.current_package = package
+ self._install_current_package()
+
+ return True
+ # /installed ----------------------------- }}}
+
+ # upgraded ------------------------------- {{{
+ def _upgrade_current_package(self):
+ command = 'upgrade'
+
+ if not self.valid_package(self.current_package):
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if not self._current_package_is_installed():
+ command = 'install'
+
+ if self._current_package_is_installed() and not self._current_package_is_outdated():
+ self.message = 'Package is already upgraded: {0}'.format(
+ self.current_package,
+ )
+ self.unchanged_count += 1
+ self.unchanged_pkgs.append(self.current_package)
+ return True
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Package would be upgraded: {0}'.format(
+ self.current_package
+ )
+ raise HomebrewException(self.message)
+
+ opts = (
+ [self.brew_path, command]
+ + self.install_options
+ + [self.current_package]
+ )
+ cmd = [opt for opt in opts if opt]
+ rc, out, err = self.module.run_command(cmd)
+
+ if self._current_package_is_installed() and not self._current_package_is_outdated():
+ self.changed_count += 1
+ self.changed_pkgs.append(self.current_package)
+ self.changed = True
+ self.message = 'Package upgraded: {0}'.format(self.current_package)
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewException(self.message)
+
+ def _upgrade_all_packages(self):
+ opts = (
+ [self.brew_path, 'upgrade']
+ + self.install_options
+ )
+ cmd = [opt for opt in opts if opt]
+ rc, out, err = self.module.run_command(cmd)
+
+ if rc == 0:
+ self.changed = True
+ self.message = 'All packages upgraded.'
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewException(self.message)
+
+ def _upgrade_packages(self):
+ if not self.packages:
+ self._upgrade_all_packages()
+ else:
+ for package in self.packages:
+ self.current_package = package
+ self._upgrade_current_package()
+ return True
+ # /upgraded ------------------------------ }}}
+
+ # uninstalled ---------------------------- {{{
+ def _uninstall_current_package(self):
+ if not self.valid_package(self.current_package):
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if not self._current_package_is_installed():
+ self.unchanged_count += 1
+ self.unchanged_pkgs.append(self.current_package)
+ self.message = 'Package already uninstalled: {0}'.format(
+ self.current_package,
+ )
+ return True
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Package would be uninstalled: {0}'.format(
+ self.current_package
+ )
+ raise HomebrewException(self.message)
+
+ opts = (
+ [self.brew_path, 'uninstall', '--force']
+ + self.install_options
+ + [self.current_package]
+ )
+ cmd = [opt for opt in opts if opt]
+ rc, out, err = self.module.run_command(cmd)
+
+ if not self._current_package_is_installed():
+ self.changed_count += 1
+ self.changed_pkgs.append(self.current_package)
+ self.changed = True
+ self.message = 'Package uninstalled: {0}'.format(self.current_package)
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewException(self.message)
+
+ def _uninstall_packages(self):
+ for package in self.packages:
+ self.current_package = package
+ self._uninstall_current_package()
+
+ return True
+ # /uninstalled ----------------------------- }}}
+
+ # linked --------------------------------- {{{
+ def _link_current_package(self):
+ if not self.valid_package(self.current_package):
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if not self._current_package_is_installed():
+ self.failed = True
+ self.message = 'Package not installed: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Package would be linked: {0}'.format(
+ self.current_package
+ )
+ raise HomebrewException(self.message)
+
+ opts = (
+ [self.brew_path, 'link']
+ + self.install_options
+ + [self.current_package]
+ )
+ cmd = [opt for opt in opts if opt]
+ rc, out, err = self.module.run_command(cmd)
+
+ if rc == 0:
+ self.changed_count += 1
+ self.changed_pkgs.append(self.current_package)
+ self.changed = True
+ self.message = 'Package linked: {0}'.format(self.current_package)
+
+ return True
+ else:
+ self.failed = True
+ self.message = 'Package could not be linked: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ def _link_packages(self):
+ for package in self.packages:
+ self.current_package = package
+ self._link_current_package()
+
+ return True
+ # /linked -------------------------------- }}}
+
+ # unlinked ------------------------------- {{{
+ def _unlink_current_package(self):
+ if not self.valid_package(self.current_package):
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if not self._current_package_is_installed():
+ self.failed = True
+ self.message = 'Package not installed: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Package would be unlinked: {0}'.format(
+ self.current_package
+ )
+ raise HomebrewException(self.message)
+
+ opts = (
+ [self.brew_path, 'unlink']
+ + self.install_options
+ + [self.current_package]
+ )
+ cmd = [opt for opt in opts if opt]
+ rc, out, err = self.module.run_command(cmd)
+
+ if rc == 0:
+ self.changed_count += 1
+ self.changed_pkgs.append(self.current_package)
+ self.changed = True
+ self.message = 'Package unlinked: {0}'.format(self.current_package)
+
+ return True
+ else:
+ self.failed = True
+ self.message = 'Package could not be unlinked: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ def _unlink_packages(self):
+ for package in self.packages:
+ self.current_package = package
+ self._unlink_current_package()
+
+ return True
+ # /unlinked ------------------------------ }}}
+ # /commands ---------------------------------------------------- }}}
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(
+ aliases=["pkg", "package", "formula"],
+ required=False,
+ type='list',
+ elements='str',
+ ),
+ path=dict(
+ default="/usr/local/bin:/opt/homebrew/bin",
+ required=False,
+ type='path',
+ ),
+ state=dict(
+ default="present",
+ choices=[
+ "present", "installed",
+ "latest", "upgraded", "head",
+ "linked", "unlinked",
+ "absent", "removed", "uninstalled",
+ ],
+ ),
+ update_homebrew=dict(
+ default=False,
+ aliases=["update-brew"],
+ type='bool',
+ ),
+ upgrade_all=dict(
+ default=False,
+ aliases=["upgrade"],
+ type='bool',
+ ),
+ install_options=dict(
+ default=None,
+ aliases=['options'],
+ type='list',
+ elements='str',
+ ),
+ upgrade_options=dict(
+ default=None,
+ type='list',
+ elements='str',
+ )
+ ),
+ supports_check_mode=True,
+ )
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ p = module.params
+
+ if p['name']:
+ packages = p['name']
+ else:
+ packages = None
+
+ path = p['path']
+ if path:
+ path = path.split(':')
+
+ state = p['state']
+ if state in ('present', 'installed'):
+ state = 'installed'
+ if state in ('head', ):
+ state = 'head'
+ if state in ('latest', 'upgraded'):
+ state = 'upgraded'
+ if state == 'linked':
+ state = 'linked'
+ if state == 'unlinked':
+ state = 'unlinked'
+ if state in ('absent', 'removed', 'uninstalled'):
+ state = 'absent'
+
+ update_homebrew = p['update_homebrew']
+ if not update_homebrew:
+ module.run_command_environ_update.update(
+ dict(HOMEBREW_NO_AUTO_UPDATE="True")
+ )
+ upgrade_all = p['upgrade_all']
+ p['install_options'] = p['install_options'] or []
+ install_options = ['--{0}'.format(install_option)
+ for install_option in p['install_options']]
+
+ p['upgrade_options'] = p['upgrade_options'] or []
+ upgrade_options = ['--{0}'.format(upgrade_option)
+ for upgrade_option in p['upgrade_options']]
+ brew = Homebrew(module=module, path=path, packages=packages,
+ state=state, update_homebrew=update_homebrew,
+ upgrade_all=upgrade_all, install_options=install_options,
+ upgrade_options=upgrade_options)
+ (failed, changed, message) = brew.run()
+ changed_pkgs = brew.changed_pkgs
+ unchanged_pkgs = brew.unchanged_pkgs
+
+ if failed:
+ module.fail_json(msg=message)
+ module.exit_json(
+ changed=changed,
+ msg=message,
+ unchanged_pkgs=unchanged_pkgs,
+ changed_pkgs=changed_pkgs
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/homebrew_cask.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/homebrew_cask.py
new file mode 100644
index 00000000..feb1ba68
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/homebrew_cask.py
@@ -0,0 +1,875 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Daniel Jaouen <dcj24@cornell.edu>
+# Copyright: (c) 2016, Indrajit Raychaudhuri <irc+code@indrajit.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: homebrew_cask
+author:
+- "Indrajit Raychaudhuri (@indrajitr)"
+- "Daniel Jaouen (@danieljaouen)"
+- "Enric Lluelles (@enriclluelles)"
+requirements:
+- "python >= 2.6"
+short_description: Install and uninstall homebrew casks
+description:
+- Manages Homebrew casks.
+options:
+ name:
+ description:
+ - Name of cask to install or remove.
+ aliases: [ 'cask', 'package', 'pkg' ]
+ type: list
+ elements: str
+ path:
+ description:
+ - "':' separated list of paths to search for 'brew' executable."
+ default: '/usr/local/bin:/opt/homebrew/bin'
+ type: path
+ state:
+ description:
+ - State of the cask.
+ choices: [ 'absent', 'installed', 'latest', 'present', 'removed', 'uninstalled', 'upgraded' ]
+ default: present
+ type: str
+ sudo_password:
+ description:
+ - The sudo password to be passed to SUDO_ASKPASS.
+ required: false
+ type: str
+ update_homebrew:
+ description:
+ - Update homebrew itself first.
+ - Note that C(brew cask update) is a synonym for C(brew update).
+ type: bool
+ default: no
+ aliases: [ 'update-brew' ]
+ install_options:
+ description:
+ - Options flags to install a package.
+ aliases: [ 'options' ]
+ type: list
+ elements: str
+ accept_external_apps:
+ description:
+ - Allow external apps.
+ type: bool
+ default: no
+ upgrade_all:
+ description:
+ - Upgrade all casks.
+ - Mutually exclusive with C(upgraded) state.
+ type: bool
+ default: no
+ aliases: [ 'upgrade' ]
+ greedy:
+ description:
+ - Upgrade casks that auto update.
+ - Passes --greedy to brew cask outdated when checking
+ if an installed cask has a newer version available.
+ type: bool
+ default: no
+'''
+EXAMPLES = '''
+- name: Install cask
+ community.general.homebrew_cask:
+ name: alfred
+ state: present
+
+- name: Remove cask
+ community.general.homebrew_cask:
+ name: alfred
+ state: absent
+
+- name: Install cask with install options
+ community.general.homebrew_cask:
+ name: alfred
+ state: present
+ install_options: 'appdir=/Applications'
+
+- name: Install cask with install options
+ community.general.homebrew_cask:
+ name: alfred
+ state: present
+ install_options: 'debug,appdir=/Applications'
+
+- name: Allow external app
+ community.general.homebrew_cask:
+ name: alfred
+ state: present
+ accept_external_apps: True
+
+- name: Remove cask with force option
+ community.general.homebrew_cask:
+ name: alfred
+ state: absent
+ install_options: force
+
+- name: Upgrade all casks
+ community.general.homebrew_cask:
+ upgrade_all: true
+
+- name: Upgrade given cask with force option
+ community.general.homebrew_cask:
+ name: alfred
+ state: upgraded
+ install_options: force
+
+- name: Upgrade cask with greedy option
+ community.general.homebrew_cask:
+ name: 1password
+ state: upgraded
+ greedy: True
+
+- name: Using sudo password for installing cask
+ community.general.homebrew_cask:
+ name: wireshark
+ state: present
+ sudo_password: "{{ ansible_become_pass }}"
+'''
+
+import os
+import re
+import tempfile
+from distutils import version
+
+from ansible.module_utils._text import to_bytes
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems, string_types
+
+
+# exceptions -------------------------------------------------------------- {{{
+class HomebrewCaskException(Exception):
+ pass
+# /exceptions ------------------------------------------------------------- }}}
+
+
+# utils ------------------------------------------------------------------- {{{
+def _create_regex_group_complement(s):
+ lines = (line.strip() for line in s.split('\n') if line.strip())
+ chars = filter(None, (line.split('#')[0].strip() for line in lines))
+ group = r'[^' + r''.join(chars) + r']'
+ return re.compile(group)
+# /utils ------------------------------------------------------------------ }}}
+
+
+class HomebrewCask(object):
+ '''A class to manage Homebrew casks.'''
+
+ # class regexes ------------------------------------------------ {{{
+ VALID_PATH_CHARS = r'''
+ \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
+ \s # spaces
+ : # colons
+ {sep} # the OS-specific path separator
+ . # dots
+ \- # dashes
+ '''.format(sep=os.path.sep)
+
+ VALID_BREW_PATH_CHARS = r'''
+ \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
+ \s # spaces
+ {sep} # the OS-specific path separator
+ . # dots
+ \- # dashes
+ '''.format(sep=os.path.sep)
+
+ VALID_CASK_CHARS = r'''
+ \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
+ . # dots
+ / # slash (for taps)
+ \- # dashes
+ @ # at symbol
+ '''
+
+ INVALID_PATH_REGEX = _create_regex_group_complement(VALID_PATH_CHARS)
+ INVALID_BREW_PATH_REGEX = _create_regex_group_complement(VALID_BREW_PATH_CHARS)
+ INVALID_CASK_REGEX = _create_regex_group_complement(VALID_CASK_CHARS)
+ # /class regexes ----------------------------------------------- }}}
+
+ # class validations -------------------------------------------- {{{
+ @classmethod
+ def valid_path(cls, path):
+ '''
+ `path` must be one of:
+ - list of paths
+ - a string containing only:
+ - alphanumeric characters
+ - dashes
+ - dots
+ - spaces
+ - colons
+ - os.path.sep
+ '''
+
+ if isinstance(path, (string_types)):
+ return not cls.INVALID_PATH_REGEX.search(path)
+
+ try:
+ iter(path)
+ except TypeError:
+ return False
+ else:
+ paths = path
+ return all(cls.valid_brew_path(path_) for path_ in paths)
+
+ @classmethod
+ def valid_brew_path(cls, brew_path):
+ '''
+ `brew_path` must be one of:
+ - None
+ - a string containing only:
+ - alphanumeric characters
+ - dashes
+ - dots
+ - spaces
+ - os.path.sep
+ '''
+
+ if brew_path is None:
+ return True
+
+ return (
+ isinstance(brew_path, string_types)
+ and not cls.INVALID_BREW_PATH_REGEX.search(brew_path)
+ )
+
+ @classmethod
+ def valid_cask(cls, cask):
+ '''A valid cask is either None or alphanumeric + backslashes.'''
+
+ if cask is None:
+ return True
+
+ return (
+ isinstance(cask, string_types)
+ and not cls.INVALID_CASK_REGEX.search(cask)
+ )
+
+ @classmethod
+ def valid_state(cls, state):
+ '''
+ A valid state is one of:
+ - installed
+ - absent
+ '''
+
+ if state is None:
+ return True
+ else:
+ return (
+ isinstance(state, string_types)
+ and state.lower() in (
+ 'installed',
+ 'absent',
+ )
+ )
+
+ @classmethod
+ def valid_module(cls, module):
+ '''A valid module is an instance of AnsibleModule.'''
+
+ return isinstance(module, AnsibleModule)
+ # /class validations ------------------------------------------- }}}
+
+ # class properties --------------------------------------------- {{{
+ @property
+ def module(self):
+ return self._module
+
+ @module.setter
+ def module(self, module):
+ if not self.valid_module(module):
+ self._module = None
+ self.failed = True
+ self.message = 'Invalid module: {0}.'.format(module)
+ raise HomebrewCaskException(self.message)
+
+ else:
+ self._module = module
+ return module
+
+ @property
+ def path(self):
+ return self._path
+
+ @path.setter
+ def path(self, path):
+ if not self.valid_path(path):
+ self._path = []
+ self.failed = True
+ self.message = 'Invalid path: {0}.'.format(path)
+ raise HomebrewCaskException(self.message)
+
+ else:
+ if isinstance(path, string_types):
+ self._path = path.split(':')
+ else:
+ self._path = path
+
+ return path
+
+ @property
+ def brew_path(self):
+ return self._brew_path
+
+ @brew_path.setter
+ def brew_path(self, brew_path):
+ if not self.valid_brew_path(brew_path):
+ self._brew_path = None
+ self.failed = True
+ self.message = 'Invalid brew_path: {0}.'.format(brew_path)
+ raise HomebrewCaskException(self.message)
+
+ else:
+ self._brew_path = brew_path
+ return brew_path
+
+ @property
+ def params(self):
+ return self._params
+
+ @params.setter
+ def params(self, params):
+ self._params = self.module.params
+ return self._params
+
+ @property
+ def current_cask(self):
+ return self._current_cask
+
+ @current_cask.setter
+ def current_cask(self, cask):
+ if not self.valid_cask(cask):
+ self._current_cask = None
+ self.failed = True
+ self.message = 'Invalid cask: {0}.'.format(cask)
+ raise HomebrewCaskException(self.message)
+
+ else:
+ self._current_cask = cask
+ return cask
+
+ @property
+ def brew_version(self):
+ try:
+ return self._brew_version
+ except AttributeError:
+ return None
+
+ @brew_version.setter
+ def brew_version(self, brew_version):
+ self._brew_version = brew_version
+
+ # /class properties -------------------------------------------- }}}
+
+ def __init__(self, module, path=path, casks=None, state=None,
+ sudo_password=None, update_homebrew=False,
+ install_options=None, accept_external_apps=False,
+ upgrade_all=False, greedy=False):
+ if not install_options:
+ install_options = list()
+ self._setup_status_vars()
+ self._setup_instance_vars(module=module, path=path, casks=casks,
+ state=state, sudo_password=sudo_password,
+ update_homebrew=update_homebrew,
+ install_options=install_options,
+ accept_external_apps=accept_external_apps,
+ upgrade_all=upgrade_all,
+ greedy=greedy, )
+
+ self._prep()
+
+ # prep --------------------------------------------------------- {{{
+ def _setup_status_vars(self):
+ self.failed = False
+ self.changed = False
+ self.changed_count = 0
+ self.unchanged_count = 0
+ self.message = ''
+
+ def _setup_instance_vars(self, **kwargs):
+ for key, val in iteritems(kwargs):
+ setattr(self, key, val)
+
+ def _prep(self):
+ self._prep_brew_path()
+
+ def _prep_brew_path(self):
+ if not self.module:
+ self.brew_path = None
+ self.failed = True
+ self.message = 'AnsibleModule not set.'
+ raise HomebrewCaskException(self.message)
+
+ self.brew_path = self.module.get_bin_path(
+ 'brew',
+ required=True,
+ opt_dirs=self.path,
+ )
+ if not self.brew_path:
+ self.brew_path = None
+ self.failed = True
+ self.message = 'Unable to locate homebrew executable.'
+ raise HomebrewCaskException('Unable to locate homebrew executable.')
+
+ return self.brew_path
+
+ def _status(self):
+ return (self.failed, self.changed, self.message)
+ # /prep -------------------------------------------------------- }}}
+
+ def run(self):
+ try:
+ self._run()
+ except HomebrewCaskException:
+ pass
+
+ if not self.failed and (self.changed_count + self.unchanged_count > 1):
+ self.message = "Changed: %d, Unchanged: %d" % (
+ self.changed_count,
+ self.unchanged_count,
+ )
+ (failed, changed, message) = self._status()
+
+ return (failed, changed, message)
+
+ # checks ------------------------------------------------------- {{{
+ def _current_cask_is_outdated(self):
+ if not self.valid_cask(self.current_cask):
+ return False
+
+ if self._brew_cask_command_is_deprecated():
+ base_opts = [self.brew_path, 'outdated', '--cask']
+ else:
+ base_opts = [self.brew_path, 'cask', 'outdated']
+
+ cask_is_outdated_command = base_opts + (['--greedy'] if self.greedy else []) + [self.current_cask]
+
+ rc, out, err = self.module.run_command(cask_is_outdated_command)
+
+ return out != ""
+
+ def _current_cask_is_installed(self):
+ if not self.valid_cask(self.current_cask):
+ self.failed = True
+ self.message = 'Invalid cask: {0}.'.format(self.current_cask)
+ raise HomebrewCaskException(self.message)
+
+ if self._brew_cask_command_is_deprecated():
+ base_opts = [self.brew_path, "list", "--cask"]
+ else:
+ base_opts = [self.brew_path, "cask", "list"]
+
+ cmd = base_opts + [self.current_cask]
+ rc, out, err = self.module.run_command(cmd)
+
+ if rc == 0:
+ return True
+ else:
+ return False
+
+ def _get_brew_version(self):
+ if self.brew_version:
+ return self.brew_version
+
+ cmd = [self.brew_path, '--version']
+
+ rc, out, err = self.module.run_command(cmd, check_rc=True)
+
+ # get version string from first line of "brew --version" output
+ version = out.split('\n')[0].split(' ')[1]
+ self.brew_version = version
+ return self.brew_version
+
+ def _brew_cask_command_is_deprecated(self):
+ # The `brew cask` replacements were fully available in 2.6.0 (https://brew.sh/2020/12/01/homebrew-2.6.0/)
+ return version.LooseVersion(self._get_brew_version()) >= version.LooseVersion('2.6.0')
+ # /checks ------------------------------------------------------ }}}
+
+ # commands ----------------------------------------------------- {{{
+ def _run(self):
+ if self.upgrade_all:
+ return self._upgrade_all()
+
+ if self.casks:
+ if self.state == 'installed':
+ return self._install_casks()
+ elif self.state == 'upgraded':
+ return self._upgrade_casks()
+ elif self.state == 'absent':
+ return self._uninstall_casks()
+
+ self.failed = True
+ self.message = "You must select a cask to install."
+ raise HomebrewCaskException(self.message)
+
+ # sudo_password fix ---------------------- {{{
+ def _run_command_with_sudo_password(self, cmd):
+ rc, out, err = '', '', ''
+
+ with tempfile.NamedTemporaryFile() as sudo_askpass_file:
+ sudo_askpass_file.write(b"#!/bin/sh\n\necho '%s'\n" % to_bytes(self.sudo_password))
+ os.chmod(sudo_askpass_file.name, 0o700)
+ sudo_askpass_file.file.close()
+
+ rc, out, err = self.module.run_command(
+ cmd,
+ environ_update={'SUDO_ASKPASS': sudo_askpass_file.name}
+ )
+
+ self.module.add_cleanup_file(sudo_askpass_file.name)
+
+ return (rc, out, err)
+ # /sudo_password fix --------------------- }}}
+
+ # updated -------------------------------- {{{
+ def _update_homebrew(self):
+ rc, out, err = self.module.run_command([
+ self.brew_path,
+ 'update',
+ ])
+ if rc == 0:
+ if out and isinstance(out, string_types):
+ already_updated = any(
+ re.search(r'Already up-to-date.', s.strip(), re.IGNORECASE)
+ for s in out.split('\n')
+ if s
+ )
+ if not already_updated:
+ self.changed = True
+ self.message = 'Homebrew updated successfully.'
+ else:
+ self.message = 'Homebrew already up-to-date.'
+
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewCaskException(self.message)
+ # /updated ------------------------------- }}}
+
+ # _upgrade_all --------------------------- {{{
+ def _upgrade_all(self):
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Casks would be upgraded.'
+ raise HomebrewCaskException(self.message)
+
+ if self._brew_cask_command_is_deprecated():
+ cmd = [self.brew_path, 'upgrade', '--cask']
+ else:
+ cmd = [self.brew_path, 'cask', 'upgrade']
+
+ rc, out, err = '', '', ''
+
+ if self.sudo_password:
+ rc, out, err = self._run_command_with_sudo_password(cmd)
+ else:
+ rc, out, err = self.module.run_command(cmd)
+
+ if rc == 0:
+ if re.search(r'==> No Casks to upgrade', out.strip(), re.IGNORECASE):
+ self.message = 'Homebrew casks already upgraded.'
+
+ else:
+ self.changed = True
+ self.message = 'Homebrew casks upgraded.'
+
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewCaskException(self.message)
+ # /_upgrade_all -------------------------- }}}
+
+ # installed ------------------------------ {{{
+ def _install_current_cask(self):
+ if not self.valid_cask(self.current_cask):
+ self.failed = True
+ self.message = 'Invalid cask: {0}.'.format(self.current_cask)
+ raise HomebrewCaskException(self.message)
+
+ if self._current_cask_is_installed():
+ self.unchanged_count += 1
+ self.message = 'Cask already installed: {0}'.format(
+ self.current_cask,
+ )
+ return True
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Cask would be installed: {0}'.format(
+ self.current_cask
+ )
+ raise HomebrewCaskException(self.message)
+
+ if self._brew_cask_command_is_deprecated():
+ base_opts = [self.brew_path, 'install', '--cask']
+ else:
+ base_opts = [self.brew_path, 'cask', 'install']
+
+ opts = base_opts + [self.current_cask] + self.install_options
+
+ cmd = [opt for opt in opts if opt]
+
+ rc, out, err = '', '', ''
+
+ if self.sudo_password:
+ rc, out, err = self._run_command_with_sudo_password(cmd)
+ else:
+ rc, out, err = self.module.run_command(cmd)
+
+ if self._current_cask_is_installed():
+ self.changed_count += 1
+ self.changed = True
+ self.message = 'Cask installed: {0}'.format(self.current_cask)
+ return True
+ elif self.accept_external_apps and re.search(r"Error: It seems there is already an App at", err):
+ self.unchanged_count += 1
+ self.message = 'Cask already installed: {0}'.format(
+ self.current_cask,
+ )
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewCaskException(self.message)
+
+ def _install_casks(self):
+ for cask in self.casks:
+ self.current_cask = cask
+ self._install_current_cask()
+
+ return True
+ # /installed ----------------------------- }}}
+
+ # upgraded ------------------------------- {{{
+ def _upgrade_current_cask(self):
+ command = 'upgrade'
+
+ if not self.valid_cask(self.current_cask):
+ self.failed = True
+ self.message = 'Invalid cask: {0}.'.format(self.current_cask)
+ raise HomebrewCaskException(self.message)
+
+ if not self._current_cask_is_installed():
+ command = 'install'
+
+ if self._current_cask_is_installed() and not self._current_cask_is_outdated():
+ self.message = 'Cask is already upgraded: {0}'.format(
+ self.current_cask,
+ )
+ self.unchanged_count += 1
+ return True
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Cask would be upgraded: {0}'.format(
+ self.current_cask
+ )
+ raise HomebrewCaskException(self.message)
+
+ if self._brew_cask_command_is_deprecated():
+ base_opts = [self.brew_path, command, '--cask']
+ else:
+ base_opts = [self.brew_path, 'cask', command]
+
+ opts = base_opts + self.install_options + [self.current_cask]
+
+ cmd = [opt for opt in opts if opt]
+
+ rc, out, err = '', '', ''
+
+ if self.sudo_password:
+ rc, out, err = self._run_command_with_sudo_password(cmd)
+ else:
+ rc, out, err = self.module.run_command(cmd)
+
+ if self._current_cask_is_installed() and not self._current_cask_is_outdated():
+ self.changed_count += 1
+ self.changed = True
+ self.message = 'Cask upgraded: {0}'.format(self.current_cask)
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewCaskException(self.message)
+
+ def _upgrade_casks(self):
+ for cask in self.casks:
+ self.current_cask = cask
+ self._upgrade_current_cask()
+
+ return True
+ # /upgraded ------------------------------ }}}
+
+ # uninstalled ---------------------------- {{{
+ def _uninstall_current_cask(self):
+ if not self.valid_cask(self.current_cask):
+ self.failed = True
+ self.message = 'Invalid cask: {0}.'.format(self.current_cask)
+ raise HomebrewCaskException(self.message)
+
+ if not self._current_cask_is_installed():
+ self.unchanged_count += 1
+ self.message = 'Cask already uninstalled: {0}'.format(
+ self.current_cask,
+ )
+ return True
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Cask would be uninstalled: {0}'.format(
+ self.current_cask
+ )
+ raise HomebrewCaskException(self.message)
+
+ if self._brew_cask_command_is_deprecated():
+ base_opts = [self.brew_path, 'uninstall', '--cask']
+ else:
+ base_opts = [self.brew_path, 'cask', 'uninstall']
+
+ opts = base_opts + [self.current_cask] + self.install_options
+
+ cmd = [opt for opt in opts if opt]
+
+ rc, out, err = '', '', ''
+
+ if self.sudo_password:
+ rc, out, err = self._run_command_with_sudo_password(cmd)
+ else:
+ rc, out, err = self.module.run_command(cmd)
+
+ if not self._current_cask_is_installed():
+ self.changed_count += 1
+ self.changed = True
+ self.message = 'Cask uninstalled: {0}'.format(self.current_cask)
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewCaskException(self.message)
+
+ def _uninstall_casks(self):
+ for cask in self.casks:
+ self.current_cask = cask
+ self._uninstall_current_cask()
+
+ return True
+ # /uninstalled --------------------------- }}}
+ # /commands ---------------------------------------------------- }}}
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(
+ aliases=["pkg", "package", "cask"],
+ required=False,
+ type='list',
+ elements='str',
+ ),
+ path=dict(
+ default="/usr/local/bin:/opt/homebrew/bin",
+ required=False,
+ type='path',
+ ),
+ state=dict(
+ default="present",
+ choices=[
+ "present", "installed",
+ "latest", "upgraded",
+ "absent", "removed", "uninstalled",
+ ],
+ ),
+ sudo_password=dict(
+ type="str",
+ required=False,
+ no_log=True,
+ ),
+ update_homebrew=dict(
+ default=False,
+ aliases=["update-brew"],
+ type='bool',
+ ),
+ install_options=dict(
+ default=None,
+ aliases=['options'],
+ type='list',
+ elements='str',
+ ),
+ accept_external_apps=dict(
+ default=False,
+ type='bool',
+ ),
+ upgrade_all=dict(
+ default=False,
+ aliases=["upgrade"],
+ type='bool',
+ ),
+ greedy=dict(
+ default=False,
+ type='bool',
+ ),
+ ),
+ supports_check_mode=True,
+ )
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ p = module.params
+
+ if p['name']:
+ casks = p['name']
+ else:
+ casks = None
+
+ path = p['path']
+ if path:
+ path = path.split(':')
+
+ state = p['state']
+ if state in ('present', 'installed'):
+ state = 'installed'
+ if state in ('latest', 'upgraded'):
+ state = 'upgraded'
+ if state in ('absent', 'removed', 'uninstalled'):
+ state = 'absent'
+
+ sudo_password = p['sudo_password']
+
+ update_homebrew = p['update_homebrew']
+ upgrade_all = p['upgrade_all']
+ greedy = p['greedy']
+ p['install_options'] = p['install_options'] or []
+ install_options = ['--{0}'.format(install_option)
+ for install_option in p['install_options']]
+
+ accept_external_apps = p['accept_external_apps']
+
+ brew_cask = HomebrewCask(module=module, path=path, casks=casks,
+ state=state, sudo_password=sudo_password,
+ update_homebrew=update_homebrew,
+ install_options=install_options,
+ accept_external_apps=accept_external_apps,
+ upgrade_all=upgrade_all,
+ greedy=greedy,
+ )
+ (failed, changed, message) = brew_cask.run()
+ if failed:
+ module.fail_json(msg=message)
+ else:
+ module.exit_json(changed=changed, msg=message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/homebrew_tap.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/homebrew_tap.py
new file mode 100644
index 00000000..d31da485
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/homebrew_tap.py
@@ -0,0 +1,256 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Daniel Jaouen <dcj24@cornell.edu>
+# (c) 2016, Indrajit Raychaudhuri <irc+code@indrajit.com>
+#
+# Based on homebrew (Andrew Dunham <andrew@du.nham.ca>)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: homebrew_tap
+author:
+ - "Indrajit Raychaudhuri (@indrajitr)"
+ - "Daniel Jaouen (@danieljaouen)"
+short_description: Tap a Homebrew repository.
+description:
+ - Tap external Homebrew repositories.
+options:
+ name:
+ description:
+ - The GitHub user/organization repository to tap.
+ required: true
+ aliases: ['tap']
+ type: list
+ elements: str
+ url:
+ description:
+ - The optional git URL of the repository to tap. The URL is not
+ assumed to be on GitHub, and the protocol doesn't have to be HTTP.
+ Any location and protocol that git can handle is fine.
+ - I(name) option may not be a list of multiple taps (but a single
+ tap instead) when this option is provided.
+ required: false
+ type: str
+ state:
+ description:
+ - state of the repository.
+ choices: [ 'present', 'absent' ]
+ required: false
+ default: 'present'
+ type: str
+requirements: [ homebrew ]
+'''
+
+EXAMPLES = '''
+- name: Tap a Homebrew repository, state present
+ community.general.homebrew_tap:
+ name: homebrew/dupes
+
+- name: Tap a Homebrew repository, state absent
+ community.general.homebrew_tap:
+ name: homebrew/dupes
+ state: absent
+
+- name: Tap a Homebrew repository, state present
+ community.general.homebrew_tap:
+ name: homebrew/dupes,homebrew/science
+ state: present
+
+- name: Tap a Homebrew repository using url, state present
+ community.general.homebrew_tap:
+ name: telemachus/brew
+ url: 'https://bitbucket.org/telemachus/brew'
+'''
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def a_valid_tap(tap):
+ '''Returns True if the tap is valid.'''
+ regex = re.compile(r'^([\w-]+)/(homebrew-)?([\w-]+)$')
+ return regex.match(tap)
+
+
+def already_tapped(module, brew_path, tap):
+ '''Returns True if already tapped.'''
+
+ rc, out, err = module.run_command([
+ brew_path,
+ 'tap',
+ ])
+
+ taps = [tap_.strip().lower() for tap_ in out.split('\n') if tap_]
+ tap_name = re.sub('homebrew-', '', tap.lower())
+
+ return tap_name in taps
+
+
+def add_tap(module, brew_path, tap, url=None):
+ '''Adds a single tap.'''
+ failed, changed, msg = False, False, ''
+
+ if not a_valid_tap(tap):
+ failed = True
+ msg = 'not a valid tap: %s' % tap
+
+ elif not already_tapped(module, brew_path, tap):
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ rc, out, err = module.run_command([
+ brew_path,
+ 'tap',
+ tap,
+ url,
+ ])
+ if rc == 0:
+ changed = True
+ msg = 'successfully tapped: %s' % tap
+ else:
+ failed = True
+ msg = 'failed to tap: %s' % tap
+
+ else:
+ msg = 'already tapped: %s' % tap
+
+ return (failed, changed, msg)
+
+
+def add_taps(module, brew_path, taps):
+ '''Adds one or more taps.'''
+ failed, unchanged, added, msg = False, 0, 0, ''
+
+ for tap in taps:
+ (failed, changed, msg) = add_tap(module, brew_path, tap)
+ if failed:
+ break
+ if changed:
+ added += 1
+ else:
+ unchanged += 1
+
+ if failed:
+ msg = 'added: %d, unchanged: %d, error: ' + msg
+ msg = msg % (added, unchanged)
+ elif added:
+ changed = True
+ msg = 'added: %d, unchanged: %d' % (added, unchanged)
+ else:
+ msg = 'added: %d, unchanged: %d' % (added, unchanged)
+
+ return (failed, changed, msg)
+
+
+def remove_tap(module, brew_path, tap):
+ '''Removes a single tap.'''
+ failed, changed, msg = False, False, ''
+
+ if not a_valid_tap(tap):
+ failed = True
+ msg = 'not a valid tap: %s' % tap
+
+ elif already_tapped(module, brew_path, tap):
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ rc, out, err = module.run_command([
+ brew_path,
+ 'untap',
+ tap,
+ ])
+ if not already_tapped(module, brew_path, tap):
+ changed = True
+ msg = 'successfully untapped: %s' % tap
+ else:
+ failed = True
+ msg = 'failed to untap: %s' % tap
+
+ else:
+ msg = 'already untapped: %s' % tap
+
+ return (failed, changed, msg)
+
+
+def remove_taps(module, brew_path, taps):
+ '''Removes one or more taps.'''
+ failed, unchanged, removed, msg = False, 0, 0, ''
+
+ for tap in taps:
+ (failed, changed, msg) = remove_tap(module, brew_path, tap)
+ if failed:
+ break
+ if changed:
+ removed += 1
+ else:
+ unchanged += 1
+
+ if failed:
+ msg = 'removed: %d, unchanged: %d, error: ' + msg
+ msg = msg % (removed, unchanged)
+ elif removed:
+ changed = True
+ msg = 'removed: %d, unchanged: %d' % (removed, unchanged)
+ else:
+ msg = 'removed: %d, unchanged: %d' % (removed, unchanged)
+
+ return (failed, changed, msg)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(aliases=['tap'], type='list', required=True, elements='str'),
+ url=dict(default=None, required=False),
+ state=dict(default='present', choices=['present', 'absent']),
+ ),
+ supports_check_mode=True,
+ )
+
+ brew_path = module.get_bin_path(
+ 'brew',
+ required=True,
+ opt_dirs=['/usr/local/bin', '/opt/homebrew/bin']
+ )
+
+ taps = module.params['name']
+ url = module.params['url']
+
+ if module.params['state'] == 'present':
+ if url is None:
+ # No tap URL provided explicitly, continue with bulk addition
+ # of all the taps.
+ failed, changed, msg = add_taps(module, brew_path, taps)
+ else:
+ # When an tap URL is provided explicitly, we allow adding
+ # *single* tap only. Validate and proceed to add single tap.
+ if len(taps) > 1:
+ msg = "List of multiple taps may not be provided with 'url' option."
+ module.fail_json(msg=msg)
+ else:
+ failed, changed, msg = add_tap(module, brew_path, taps[0], url)
+
+ if failed:
+ module.fail_json(msg=msg)
+ else:
+ module.exit_json(changed=changed, msg=msg)
+
+ elif module.params['state'] == 'absent':
+ failed, changed, msg = remove_taps(module, brew_path, taps)
+
+ if failed:
+ module.fail_json(msg=msg)
+ else:
+ module.exit_json(changed=changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/honeybadger_deployment.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/honeybadger_deployment.py
new file mode 100644
index 00000000..0b96af04
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/honeybadger_deployment.py
@@ -0,0 +1,128 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014 Benjamin Curtis <benjamin.curtis@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: honeybadger_deployment
+author: "Benjamin Curtis (@stympy)"
+short_description: Notify Honeybadger.io about app deployments
+description:
+ - Notify Honeybadger.io about app deployments (see http://docs.honeybadger.io/article/188-deployment-tracking)
+options:
+ token:
+ type: str
+ description:
+ - API token.
+ required: true
+ environment:
+ type: str
+ description:
+ - The environment name, typically 'production', 'staging', etc.
+ required: true
+ user:
+ type: str
+ description:
+ - The username of the person doing the deployment
+ repo:
+ type: str
+ description:
+ - URL of the project repository
+ revision:
+ type: str
+ description:
+ - A hash, number, tag, or other identifier showing what revision was deployed
+ url:
+ type: str
+ description:
+ - Optional URL to submit the notification to.
+ default: "https://api.honeybadger.io/v1/deploys"
+ validate_certs:
+ description:
+ - If C(no), SSL certificates for the target url will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+
+'''
+
+EXAMPLES = '''
+- name: Notify Honeybadger.io about an app deployment
+ community.general.honeybadger_deployment:
+ token: AAAAAA
+ environment: staging
+ user: ansible
+ revision: b6826b8
+ repo: 'git@github.com:user/repo.git'
+'''
+
+RETURN = '''# '''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import fetch_url
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(required=True, no_log=True),
+ environment=dict(required=True),
+ user=dict(required=False),
+ repo=dict(required=False),
+ revision=dict(required=False),
+ url=dict(required=False, default='https://api.honeybadger.io/v1/deploys'),
+ validate_certs=dict(default=True, type='bool'),
+ ),
+ supports_check_mode=True
+ )
+
+ params = {}
+
+ if module.params["environment"]:
+ params["deploy[environment]"] = module.params["environment"]
+
+ if module.params["user"]:
+ params["deploy[local_username]"] = module.params["user"]
+
+ if module.params["repo"]:
+ params["deploy[repository]"] = module.params["repo"]
+
+ if module.params["revision"]:
+ params["deploy[revision]"] = module.params["revision"]
+
+ params["api_key"] = module.params["token"]
+
+ url = module.params.get('url')
+
+ # If we're in check mode, just exit pretending like we succeeded
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ try:
+ data = urlencode(params)
+ response, info = fetch_url(module, url, data=data)
+ except Exception as e:
+ module.fail_json(msg='Unable to notify Honeybadger: %s' % to_native(e), exception=traceback.format_exc())
+ else:
+ if info['status'] == 201:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/hpilo_boot.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hpilo_boot.py
new file mode 100644
index 00000000..1e37aee3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hpilo_boot.py
@@ -0,0 +1,203 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2012 Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: hpilo_boot
+author: Dag Wieers (@dagwieers)
+short_description: Boot system using specific media through HP iLO interface
+description:
+- "This module boots a system through its HP iLO interface. The boot media
+ can be one of: cdrom, floppy, hdd, network or usb."
+- This module requires the hpilo python module.
+options:
+ host:
+ description:
+ - The HP iLO hostname/address that is linked to the physical system.
+ required: true
+ login:
+ description:
+ - The login name to authenticate to the HP iLO interface.
+ default: Administrator
+ password:
+ description:
+ - The password to authenticate to the HP iLO interface.
+ default: admin
+ media:
+ description:
+ - The boot media to boot the system from
+ choices: [ "cdrom", "floppy", "rbsu", "hdd", "network", "normal", "usb" ]
+ image:
+ description:
+ - The URL of a cdrom, floppy or usb boot media image.
+ protocol://username:password@hostname:port/filename
+ - protocol is either 'http' or 'https'
+ - username:password is optional
+ - port is optional
+ state:
+ description:
+ - The state of the boot media.
+ - "no_boot: Do not boot from the device"
+ - "boot_once: Boot from the device once and then notthereafter"
+ - "boot_always: Boot from the device each time the server is rebooted"
+ - "connect: Connect the virtual media device and set to boot_always"
+ - "disconnect: Disconnects the virtual media device and set to no_boot"
+ - "poweroff: Power off the server"
+ default: boot_once
+ choices: [ "boot_always", "boot_once", "connect", "disconnect", "no_boot", "poweroff" ]
+ force:
+ description:
+ - Whether to force a reboot (even when the system is already booted).
+ - As a safeguard, without force, hpilo_boot will refuse to reboot a server that is already running.
+ default: no
+ type: bool
+ ssl_version:
+ description:
+ - Change the ssl_version used.
+ default: TLSv1
+ choices: [ "SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2" ]
+requirements:
+- python-hpilo
+notes:
+- To use a USB key image you need to specify floppy as boot media.
+- This module ought to be run from a system that can access the HP iLO
+ interface directly, either by using C(local_action) or using C(delegate_to).
+'''
+
+EXAMPLES = r'''
+- name: Task to boot a system using an ISO from an HP iLO interface only if the system is an HP server
+ community.general.hpilo_boot:
+ host: YOUR_ILO_ADDRESS
+ login: YOUR_ILO_LOGIN
+ password: YOUR_ILO_PASSWORD
+ media: cdrom
+ image: http://some-web-server/iso/boot.iso
+ when: cmdb_hwmodel.startswith('HP ')
+ delegate_to: localhost
+
+- name: Power off a server
+ community.general.hpilo_boot:
+ host: YOUR_ILO_HOST
+ login: YOUR_ILO_LOGIN
+ password: YOUR_ILO_PASSWORD
+ state: poweroff
+ delegate_to: localhost
+'''
+
+RETURN = '''
+# Default return values
+'''
+
+import time
+import traceback
+import warnings
+
+HPILO_IMP_ERR = None
+try:
+ import hpilo
+ HAS_HPILO = True
+except ImportError:
+ HPILO_IMP_ERR = traceback.format_exc()
+ HAS_HPILO = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+# Suppress warnings from hpilo
+warnings.simplefilter('ignore')
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(type='str', required=True),
+ login=dict(type='str', default='Administrator'),
+ password=dict(type='str', default='admin', no_log=True),
+ media=dict(type='str', choices=['cdrom', 'floppy', 'rbsu', 'hdd', 'network', 'normal', 'usb']),
+ image=dict(type='str'),
+ state=dict(type='str', default='boot_once', choices=['boot_always', 'boot_once', 'connect', 'disconnect', 'no_boot', 'poweroff']),
+ force=dict(type='bool', default=False),
+ ssl_version=dict(type='str', default='TLSv1', choices=['SSLv3', 'SSLv23', 'TLSv1', 'TLSv1_1', 'TLSv1_2']),
+ )
+ )
+
+ if not HAS_HPILO:
+ module.fail_json(msg=missing_required_lib('python-hpilo'), exception=HPILO_IMP_ERR)
+
+ host = module.params['host']
+ login = module.params['login']
+ password = module.params['password']
+ media = module.params['media']
+ image = module.params['image']
+ state = module.params['state']
+ force = module.params['force']
+ ssl_version = getattr(hpilo.ssl, 'PROTOCOL_' + module.params.get('ssl_version').upper().replace('V', 'v'))
+
+ ilo = hpilo.Ilo(host, login=login, password=password, ssl_version=ssl_version)
+ changed = False
+ status = {}
+ power_status = 'UNKNOWN'
+
+ if media and state in ('boot_always', 'boot_once', 'connect', 'disconnect', 'no_boot'):
+
+ # Workaround for: Error communicating with iLO: Problem manipulating EV
+ try:
+ ilo.set_one_time_boot(media)
+ except hpilo.IloError:
+ time.sleep(60)
+ ilo.set_one_time_boot(media)
+
+ # TODO: Verify if image URL exists/works
+ if image:
+ ilo.insert_virtual_media(media, image)
+ changed = True
+
+ if media == 'cdrom':
+ ilo.set_vm_status('cdrom', state, True)
+ status = ilo.get_vm_status()
+ changed = True
+ elif media in ('floppy', 'usb'):
+ ilo.set_vf_status(state, True)
+ status = ilo.get_vf_status()
+ changed = True
+
+ # Only perform a boot when state is boot_once or boot_always, or in case we want to force a reboot
+ if state in ('boot_once', 'boot_always') or force:
+
+ power_status = ilo.get_host_power_status()
+
+ if not force and power_status == 'ON':
+ module.fail_json(msg='HP iLO (%s) reports that the server is already powered on !' % host)
+
+ if power_status == 'ON':
+ ilo.warm_boot_server()
+# ilo.cold_boot_server()
+ changed = True
+ else:
+ ilo.press_pwr_btn()
+# ilo.reset_server()
+# ilo.set_host_power(host_power=True)
+ changed = True
+
+ elif state in ('poweroff'):
+
+ power_status = ilo.get_host_power_status()
+
+ if not power_status == 'OFF':
+ ilo.hold_pwr_btn()
+# ilo.set_host_power(host_power=False)
+ changed = True
+
+ module.exit_json(changed=changed, power=power_status, **status)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/hpilo_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hpilo_facts.py
new file mode 100644
index 00000000..af43ca19
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hpilo_facts.py
@@ -0,0 +1,258 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2012 Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: hpilo_info
+author: Dag Wieers (@dagwieers)
+short_description: Gather information through an HP iLO interface
+description:
+- This module gathers information on a specific system using its HP iLO interface.
+ These information includes hardware and network related information useful
+ for provisioning (e.g. macaddress, uuid).
+- This module requires the C(hpilo) python module.
+- This module was called C(hpilo_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.hpilo_info) module no longer returns C(ansible_facts)!
+options:
+ host:
+ description:
+ - The HP iLO hostname/address that is linked to the physical system.
+ required: true
+ login:
+ description:
+ - The login name to authenticate to the HP iLO interface.
+ default: Administrator
+ password:
+ description:
+ - The password to authenticate to the HP iLO interface.
+ default: admin
+ ssl_version:
+ description:
+ - Change the ssl_version used.
+ default: TLSv1
+ choices: [ "SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2" ]
+requirements:
+- hpilo
+notes:
+- This module ought to be run from a system that can access the HP iLO
+ interface directly, either by using C(local_action) or using C(delegate_to).
+'''
+
+EXAMPLES = r'''
+- name: Gather facts from a HP iLO interface only if the system is an HP server
+ community.general.hpilo_info:
+ host: YOUR_ILO_ADDRESS
+ login: YOUR_ILO_LOGIN
+ password: YOUR_ILO_PASSWORD
+ when: cmdb_hwmodel.startswith('HP ')
+ delegate_to: localhost
+ register: results
+
+- ansible.builtin.fail:
+ msg: 'CMDB serial ({{ cmdb_serialno }}) does not match hardware serial ({{ results.hw_system_serial }}) !'
+ when: cmdb_serialno != results.hw_system_serial
+'''
+
+RETURN = r'''
+# Typical output of HP iLO_info for a physical system
+hw_bios_date:
+ description: BIOS date
+ returned: always
+ type: str
+ sample: 05/05/2011
+
+hw_bios_version:
+ description: BIOS version
+ returned: always
+ type: str
+ sample: P68
+
+hw_ethX:
+ description: Interface information (for each interface)
+ returned: always
+ type: dict
+ sample:
+ - macaddress: 00:11:22:33:44:55
+ macaddress_dash: 00-11-22-33-44-55
+
+hw_eth_ilo:
+ description: Interface information (for the iLO network interface)
+ returned: always
+ type: dict
+ sample:
+ - macaddress: 00:11:22:33:44:BA
+ - macaddress_dash: 00-11-22-33-44-BA
+
+hw_product_name:
+ description: Product name
+ returned: always
+ type: str
+ sample: ProLiant DL360 G7
+
+hw_product_uuid:
+ description: Product UUID
+ returned: always
+ type: str
+ sample: ef50bac8-2845-40ff-81d9-675315501dac
+
+hw_system_serial:
+ description: System serial number
+ returned: always
+ type: str
+ sample: ABC12345D6
+
+hw_uuid:
+ description: Hardware UUID
+ returned: always
+ type: str
+ sample: 123456ABC78901D2
+'''
+
+import re
+import traceback
+import warnings
+
+HPILO_IMP_ERR = None
+try:
+ import hpilo
+ HAS_HPILO = True
+except ImportError:
+ HPILO_IMP_ERR = traceback.format_exc()
+ HAS_HPILO = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+# Suppress warnings from hpilo
+warnings.simplefilter('ignore')
+
+
+def parse_flat_interface(entry, non_numeric='hw_eth_ilo'):
+ try:
+ infoname = 'hw_eth' + str(int(entry['Port']) - 1)
+ except Exception:
+ infoname = non_numeric
+
+ info = {
+ 'macaddress': entry['MAC'].replace('-', ':'),
+ 'macaddress_dash': entry['MAC']
+ }
+ return (infoname, info)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(type='str', required=True),
+ login=dict(type='str', default='Administrator'),
+ password=dict(type='str', default='admin', no_log=True),
+ ssl_version=dict(type='str', default='TLSv1', choices=['SSLv3', 'SSLv23', 'TLSv1', 'TLSv1_1', 'TLSv1_2']),
+ ),
+ supports_check_mode=True,
+ )
+ is_old_facts = module._name in ('hpilo_facts', 'community.general.hpilo_facts')
+ if is_old_facts:
+ module.deprecate("The 'hpilo_facts' module has been renamed to 'hpilo_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ if not HAS_HPILO:
+ module.fail_json(msg=missing_required_lib('python-hpilo'), exception=HPILO_IMP_ERR)
+
+ host = module.params['host']
+ login = module.params['login']
+ password = module.params['password']
+ ssl_version = getattr(hpilo.ssl, 'PROTOCOL_' + module.params.get('ssl_version').upper().replace('V', 'v'))
+
+ ilo = hpilo.Ilo(host, login=login, password=password, ssl_version=ssl_version)
+
+ info = {
+ 'module_hw': True,
+ }
+
+ # TODO: Count number of CPUs, DIMMs and total memory
+ try:
+ data = ilo.get_host_data()
+ except hpilo.IloCommunicationError as e:
+ module.fail_json(msg=to_native(e))
+
+ for entry in data:
+ if 'type' not in entry:
+ continue
+ elif entry['type'] == 0: # BIOS Information
+ info['hw_bios_version'] = entry['Family']
+ info['hw_bios_date'] = entry['Date']
+ elif entry['type'] == 1: # System Information
+ info['hw_uuid'] = entry['UUID']
+ info['hw_system_serial'] = entry['Serial Number'].rstrip()
+ info['hw_product_name'] = entry['Product Name']
+ info['hw_product_uuid'] = entry['cUUID']
+ elif entry['type'] == 209: # Embedded NIC MAC Assignment
+ if 'fields' in entry:
+ for (name, value) in [(e['name'], e['value']) for e in entry['fields']]:
+ if name.startswith('Port'):
+ try:
+ infoname = 'hw_eth' + str(int(value) - 1)
+ except Exception:
+ infoname = 'hw_eth_ilo'
+ elif name.startswith('MAC'):
+ info[infoname] = {
+ 'macaddress': value.replace('-', ':'),
+ 'macaddress_dash': value
+ }
+ else:
+ (infoname, entry_info) = parse_flat_interface(entry, 'hw_eth_ilo')
+ info[infoname] = entry_info
+ elif entry['type'] == 209: # HPQ NIC iSCSI MAC Info
+ for (name, value) in [(e['name'], e['value']) for e in entry['fields']]:
+ if name.startswith('Port'):
+ try:
+ infoname = 'hw_iscsi' + str(int(value) - 1)
+ except Exception:
+ infoname = 'hw_iscsi_ilo'
+ elif name.startswith('MAC'):
+ info[infoname] = {
+ 'macaddress': value.replace('-', ':'),
+ 'macaddress_dash': value
+ }
+ elif entry['type'] == 233: # Embedded NIC MAC Assignment (Alternate data format)
+ (infoname, entry_info) = parse_flat_interface(entry, 'hw_eth_ilo')
+ info[infoname] = entry_info
+
+ # Collect health (RAM/CPU data)
+ health = ilo.get_embedded_health()
+ info['hw_health'] = health
+
+ memory_details_summary = health.get('memory', {}).get('memory_details_summary')
+ # RAM as reported by iLO 2.10 on ProLiant BL460c Gen8
+ if memory_details_summary:
+ info['hw_memory_details_summary'] = memory_details_summary
+ info['hw_memory_total'] = 0
+ for cpu, details in memory_details_summary.items():
+ cpu_total_memory_size = details.get('total_memory_size')
+ if cpu_total_memory_size:
+ ram = re.search(r'(\d+)\s+(\w+)', cpu_total_memory_size)
+ if ram:
+ if ram.group(2) == 'GB':
+ info['hw_memory_total'] = info['hw_memory_total'] + int(ram.group(1))
+
+ # reformat into a text friendly format
+ info['hw_memory_total'] = "{0} GB".format(info['hw_memory_total'])
+
+ if is_old_facts:
+ module.exit_json(ansible_facts=info)
+ else:
+ module.exit_json(**info)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/hpilo_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hpilo_info.py
new file mode 100644
index 00000000..af43ca19
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hpilo_info.py
@@ -0,0 +1,258 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2012 Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: hpilo_info
+author: Dag Wieers (@dagwieers)
+short_description: Gather information through an HP iLO interface
+description:
+- This module gathers information on a specific system using its HP iLO interface.
+ These information includes hardware and network related information useful
+ for provisioning (e.g. macaddress, uuid).
+- This module requires the C(hpilo) python module.
+- This module was called C(hpilo_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.hpilo_info) module no longer returns C(ansible_facts)!
+options:
+ host:
+ description:
+ - The HP iLO hostname/address that is linked to the physical system.
+ required: true
+ login:
+ description:
+ - The login name to authenticate to the HP iLO interface.
+ default: Administrator
+ password:
+ description:
+ - The password to authenticate to the HP iLO interface.
+ default: admin
+ ssl_version:
+ description:
+ - Change the ssl_version used.
+ default: TLSv1
+ choices: [ "SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2" ]
+requirements:
+- hpilo
+notes:
+- This module ought to be run from a system that can access the HP iLO
+ interface directly, either by using C(local_action) or using C(delegate_to).
+'''
+
+EXAMPLES = r'''
+- name: Gather facts from a HP iLO interface only if the system is an HP server
+ community.general.hpilo_info:
+ host: YOUR_ILO_ADDRESS
+ login: YOUR_ILO_LOGIN
+ password: YOUR_ILO_PASSWORD
+ when: cmdb_hwmodel.startswith('HP ')
+ delegate_to: localhost
+ register: results
+
+- ansible.builtin.fail:
+ msg: 'CMDB serial ({{ cmdb_serialno }}) does not match hardware serial ({{ results.hw_system_serial }}) !'
+ when: cmdb_serialno != results.hw_system_serial
+'''
+
+RETURN = r'''
+# Typical output of HP iLO_info for a physical system
+hw_bios_date:
+ description: BIOS date
+ returned: always
+ type: str
+ sample: 05/05/2011
+
+hw_bios_version:
+ description: BIOS version
+ returned: always
+ type: str
+ sample: P68
+
+hw_ethX:
+ description: Interface information (for each interface)
+ returned: always
+ type: dict
+ sample:
+ - macaddress: 00:11:22:33:44:55
+ macaddress_dash: 00-11-22-33-44-55
+
+hw_eth_ilo:
+ description: Interface information (for the iLO network interface)
+ returned: always
+ type: dict
+ sample:
+ - macaddress: 00:11:22:33:44:BA
+ - macaddress_dash: 00-11-22-33-44-BA
+
+hw_product_name:
+ description: Product name
+ returned: always
+ type: str
+ sample: ProLiant DL360 G7
+
+hw_product_uuid:
+ description: Product UUID
+ returned: always
+ type: str
+ sample: ef50bac8-2845-40ff-81d9-675315501dac
+
+hw_system_serial:
+ description: System serial number
+ returned: always
+ type: str
+ sample: ABC12345D6
+
+hw_uuid:
+ description: Hardware UUID
+ returned: always
+ type: str
+ sample: 123456ABC78901D2
+'''
+
+import re
+import traceback
+import warnings
+
+HPILO_IMP_ERR = None
+try:
+ import hpilo
+ HAS_HPILO = True
+except ImportError:
+ HPILO_IMP_ERR = traceback.format_exc()
+ HAS_HPILO = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+# Suppress warnings from hpilo
+warnings.simplefilter('ignore')
+
+
+def parse_flat_interface(entry, non_numeric='hw_eth_ilo'):
+ try:
+ infoname = 'hw_eth' + str(int(entry['Port']) - 1)
+ except Exception:
+ infoname = non_numeric
+
+ info = {
+ 'macaddress': entry['MAC'].replace('-', ':'),
+ 'macaddress_dash': entry['MAC']
+ }
+ return (infoname, info)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(type='str', required=True),
+ login=dict(type='str', default='Administrator'),
+ password=dict(type='str', default='admin', no_log=True),
+ ssl_version=dict(type='str', default='TLSv1', choices=['SSLv3', 'SSLv23', 'TLSv1', 'TLSv1_1', 'TLSv1_2']),
+ ),
+ supports_check_mode=True,
+ )
+ is_old_facts = module._name in ('hpilo_facts', 'community.general.hpilo_facts')
+ if is_old_facts:
+ module.deprecate("The 'hpilo_facts' module has been renamed to 'hpilo_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ if not HAS_HPILO:
+ module.fail_json(msg=missing_required_lib('python-hpilo'), exception=HPILO_IMP_ERR)
+
+ host = module.params['host']
+ login = module.params['login']
+ password = module.params['password']
+ ssl_version = getattr(hpilo.ssl, 'PROTOCOL_' + module.params.get('ssl_version').upper().replace('V', 'v'))
+
+ ilo = hpilo.Ilo(host, login=login, password=password, ssl_version=ssl_version)
+
+ info = {
+ 'module_hw': True,
+ }
+
+ # TODO: Count number of CPUs, DIMMs and total memory
+ try:
+ data = ilo.get_host_data()
+ except hpilo.IloCommunicationError as e:
+ module.fail_json(msg=to_native(e))
+
+ for entry in data:
+ if 'type' not in entry:
+ continue
+ elif entry['type'] == 0: # BIOS Information
+ info['hw_bios_version'] = entry['Family']
+ info['hw_bios_date'] = entry['Date']
+ elif entry['type'] == 1: # System Information
+ info['hw_uuid'] = entry['UUID']
+ info['hw_system_serial'] = entry['Serial Number'].rstrip()
+ info['hw_product_name'] = entry['Product Name']
+ info['hw_product_uuid'] = entry['cUUID']
+ elif entry['type'] == 209: # Embedded NIC MAC Assignment
+ if 'fields' in entry:
+ for (name, value) in [(e['name'], e['value']) for e in entry['fields']]:
+ if name.startswith('Port'):
+ try:
+ infoname = 'hw_eth' + str(int(value) - 1)
+ except Exception:
+ infoname = 'hw_eth_ilo'
+ elif name.startswith('MAC'):
+ info[infoname] = {
+ 'macaddress': value.replace('-', ':'),
+ 'macaddress_dash': value
+ }
+ else:
+ (infoname, entry_info) = parse_flat_interface(entry, 'hw_eth_ilo')
+ info[infoname] = entry_info
+ elif entry['type'] == 209: # HPQ NIC iSCSI MAC Info
+ for (name, value) in [(e['name'], e['value']) for e in entry['fields']]:
+ if name.startswith('Port'):
+ try:
+ infoname = 'hw_iscsi' + str(int(value) - 1)
+ except Exception:
+ infoname = 'hw_iscsi_ilo'
+ elif name.startswith('MAC'):
+ info[infoname] = {
+ 'macaddress': value.replace('-', ':'),
+ 'macaddress_dash': value
+ }
+ elif entry['type'] == 233: # Embedded NIC MAC Assignment (Alternate data format)
+ (infoname, entry_info) = parse_flat_interface(entry, 'hw_eth_ilo')
+ info[infoname] = entry_info
+
+ # Collect health (RAM/CPU data)
+ health = ilo.get_embedded_health()
+ info['hw_health'] = health
+
+ memory_details_summary = health.get('memory', {}).get('memory_details_summary')
+ # RAM as reported by iLO 2.10 on ProLiant BL460c Gen8
+ if memory_details_summary:
+ info['hw_memory_details_summary'] = memory_details_summary
+ info['hw_memory_total'] = 0
+ for cpu, details in memory_details_summary.items():
+ cpu_total_memory_size = details.get('total_memory_size')
+ if cpu_total_memory_size:
+ ram = re.search(r'(\d+)\s+(\w+)', cpu_total_memory_size)
+ if ram:
+ if ram.group(2) == 'GB':
+ info['hw_memory_total'] = info['hw_memory_total'] + int(ram.group(1))
+
+ # reformat into a text friendly format
+ info['hw_memory_total'] = "{0} GB".format(info['hw_memory_total'])
+
+ if is_old_facts:
+ module.exit_json(ansible_facts=info)
+ else:
+ module.exit_json(**info)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/hponcfg.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hponcfg.py
new file mode 100644
index 00000000..451e4b06
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hponcfg.py
@@ -0,0 +1,111 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: hponcfg
+author: Dag Wieers (@dagwieers)
+short_description: Configure HP iLO interface using hponcfg
+description:
+- This modules configures the HP iLO interface using hponcfg.
+options:
+ path:
+ description:
+ - The XML file as accepted by hponcfg.
+ required: true
+ aliases: ['src']
+ minfw:
+ description:
+ - The minimum firmware level needed.
+ required: false
+ executable:
+ description:
+ - Path to the hponcfg executable (`hponcfg` which uses $PATH).
+ default: hponcfg
+ verbose:
+ description:
+ - Run hponcfg in verbose mode (-v).
+ default: no
+ type: bool
+requirements:
+- hponcfg tool
+notes:
+- You need a working hponcfg on the target system.
+'''
+
+EXAMPLES = r'''
+- name: Example hponcfg configuration XML
+ ansible.builtin.copy:
+ content: |
+ <ribcl VERSION="2.0">
+ <login USER_LOGIN="user" PASSWORD="password">
+ <rib_info MODE="WRITE">
+ <mod_global_settings>
+ <session_timeout value="0"/>
+ <ssh_status value="Y"/>
+ <ssh_port value="22"/>
+ <serial_cli_status value="3"/>
+ <serial_cli_speed value="5"/>
+ </mod_global_settings>
+ </rib_info>
+ </login>
+ </ribcl>
+ dest: /tmp/enable-ssh.xml
+
+- name: Configure HP iLO using enable-ssh.xml
+ community.general.hponcfg:
+ src: /tmp/enable-ssh.xml
+
+- name: Configure HP iLO on VMware ESXi hypervisor
+ community.general.hponcfg:
+ src: /tmp/enable-ssh.xml
+ executable: /opt/hp/tools/hponcfg
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ src=dict(type='path', required=True, aliases=['path']),
+ minfw=dict(type='str'),
+ executable=dict(default='hponcfg', type='str'),
+ verbose=dict(default=False, type='bool'),
+ )
+ )
+
+ # Consider every action a change (not idempotent yet!)
+ changed = True
+
+ src = module.params['src']
+ minfw = module.params['minfw']
+ executable = module.params['executable']
+ verbose = module.params['verbose']
+
+ options = ' -f %s' % src
+
+ if verbose:
+ options += ' -v'
+
+ if minfw:
+ options += ' -m %s' % minfw
+
+ rc, stdout, stderr = module.run_command('%s %s' % (executable, options))
+
+ if rc != 0:
+ module.fail_json(rc=rc, msg="Failed to run hponcfg", stdout=stdout, stderr=stderr)
+
+ module.exit_json(changed=changed, stdout=stdout, stderr=stderr)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/htpasswd.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/htpasswd.py
new file mode 100644
index 00000000..6ff04131
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/htpasswd.py
@@ -0,0 +1,274 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Nimbis Services, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: htpasswd
+short_description: manage user files for basic authentication
+description:
+ - Add and remove username/password entries in a password file using htpasswd.
+ - This is used by web servers such as Apache and Nginx for basic authentication.
+options:
+ path:
+ type: path
+ required: true
+ aliases: [ dest, destfile ]
+ description:
+ - Path to the file that contains the usernames and passwords
+ name:
+ type: str
+ required: true
+ aliases: [ username ]
+ description:
+ - User name to add or remove
+ password:
+ type: str
+ required: false
+ description:
+ - Password associated with user.
+ - Must be specified if user does not exist yet.
+ crypt_scheme:
+ type: str
+ required: false
+ default: "apr_md5_crypt"
+ description:
+ - Encryption scheme to be used. As well as the four choices listed
+ here, you can also use any other hash supported by passlib, such as
+ md5_crypt and sha256_crypt, which are linux passwd hashes. If you
+ do so the password file will not be compatible with Apache or Nginx
+ - 'Some of the available choices might be: C(apr_md5_crypt), C(des_crypt), C(ldap_sha1), C(plaintext)'
+ state:
+ type: str
+ required: false
+ choices: [ present, absent ]
+ default: "present"
+ description:
+ - Whether the user entry should be present or not
+ create:
+ required: false
+ type: bool
+ default: "yes"
+ description:
+ - Used with C(state=present). If specified, the file will be created
+ if it does not already exist. If set to "no", will fail if the
+ file does not exist
+notes:
+ - "This module depends on the I(passlib) Python library, which needs to be installed on all target systems."
+ - "On Debian, Ubuntu, or Fedora: install I(python-passlib)."
+ - "On RHEL or CentOS: Enable EPEL, then install I(python-passlib)."
+requirements: [ passlib>=1.6 ]
+author: "Ansible Core Team"
+extends_documentation_fragment: files
+'''
+
+EXAMPLES = """
+- name: Add a user to a password file and ensure permissions are set
+ community.general.htpasswd:
+ path: /etc/nginx/passwdfile
+ name: janedoe
+ password: '9s36?;fyNp'
+ owner: root
+ group: www-data
+ mode: 0640
+
+- name: Remove a user from a password file
+ community.general.htpasswd:
+ path: /etc/apache2/passwdfile
+ name: foobar
+ state: absent
+
+- name: Add a user to a password file suitable for use by libpam-pwdfile
+ community.general.htpasswd:
+ path: /etc/mail/passwords
+ name: alex
+ password: oedu2eGh
+ crypt_scheme: md5_crypt
+"""
+
+
+import os
+import tempfile
+import traceback
+from distutils.version import LooseVersion
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+PASSLIB_IMP_ERR = None
+try:
+ from passlib.apache import HtpasswdFile, htpasswd_context
+ from passlib.context import CryptContext
+ import passlib
+except ImportError:
+ PASSLIB_IMP_ERR = traceback.format_exc()
+ passlib_installed = False
+else:
+ passlib_installed = True
+
+apache_hashes = ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"]
+
+
+def create_missing_directories(dest):
+ destpath = os.path.dirname(dest)
+ if not os.path.exists(destpath):
+ os.makedirs(destpath)
+
+
+def present(dest, username, password, crypt_scheme, create, check_mode):
+ """ Ensures user is present
+
+ Returns (msg, changed) """
+ if crypt_scheme in apache_hashes:
+ context = htpasswd_context
+ else:
+ context = CryptContext(schemes=[crypt_scheme] + apache_hashes)
+ if not os.path.exists(dest):
+ if not create:
+ raise ValueError('Destination %s does not exist' % dest)
+ if check_mode:
+ return ("Create %s" % dest, True)
+ create_missing_directories(dest)
+ if LooseVersion(passlib.__version__) >= LooseVersion('1.6'):
+ ht = HtpasswdFile(dest, new=True, default_scheme=crypt_scheme, context=context)
+ else:
+ ht = HtpasswdFile(dest, autoload=False, default=crypt_scheme, context=context)
+ if getattr(ht, 'set_password', None):
+ ht.set_password(username, password)
+ else:
+ ht.update(username, password)
+ ht.save()
+ return ("Created %s and added %s" % (dest, username), True)
+ else:
+ if LooseVersion(passlib.__version__) >= LooseVersion('1.6'):
+ ht = HtpasswdFile(dest, new=False, default_scheme=crypt_scheme, context=context)
+ else:
+ ht = HtpasswdFile(dest, default=crypt_scheme, context=context)
+
+ found = None
+ if getattr(ht, 'check_password', None):
+ found = ht.check_password(username, password)
+ else:
+ found = ht.verify(username, password)
+
+ if found:
+ return ("%s already present" % username, False)
+ else:
+ if not check_mode:
+ if getattr(ht, 'set_password', None):
+ ht.set_password(username, password)
+ else:
+ ht.update(username, password)
+ ht.save()
+ return ("Add/update %s" % username, True)
+
+
+def absent(dest, username, check_mode):
+ """ Ensures user is absent
+
+ Returns (msg, changed) """
+ if LooseVersion(passlib.__version__) >= LooseVersion('1.6'):
+ ht = HtpasswdFile(dest, new=False)
+ else:
+ ht = HtpasswdFile(dest)
+
+ if username not in ht.users():
+ return ("%s not present" % username, False)
+ else:
+ if not check_mode:
+ ht.delete(username)
+ ht.save()
+ return ("Remove %s" % username, True)
+
+
+def check_file_attrs(module, changed, message):
+
+ file_args = module.load_file_common_arguments(module.params)
+ if module.set_fs_attributes_if_different(file_args, False):
+
+ if changed:
+ message += " and "
+ changed = True
+ message += "ownership, perms or SE linux context changed"
+
+ return message, changed
+
+
+def main():
+ arg_spec = dict(
+ path=dict(type='path', required=True, aliases=["dest", "destfile"]),
+ name=dict(type='str', required=True, aliases=["username"]),
+ password=dict(type='str', required=False, default=None, no_log=True),
+ crypt_scheme=dict(type='str', required=False, default="apr_md5_crypt"),
+ state=dict(type='str', required=False, default="present", choices=["present", "absent"]),
+ create=dict(type='bool', default=True),
+
+ )
+ module = AnsibleModule(argument_spec=arg_spec,
+ add_file_common_args=True,
+ supports_check_mode=True)
+
+ path = module.params['path']
+ username = module.params['name']
+ password = module.params['password']
+ crypt_scheme = module.params['crypt_scheme']
+ state = module.params['state']
+ create = module.params['create']
+ check_mode = module.check_mode
+
+ if not passlib_installed:
+ module.fail_json(msg=missing_required_lib("passlib"), exception=PASSLIB_IMP_ERR)
+
+ # Check file for blank lines in effort to avoid "need more than 1 value to unpack" error.
+ try:
+ f = open(path, "r")
+ except IOError:
+ # No preexisting file to remove blank lines from
+ f = None
+ else:
+ try:
+ lines = f.readlines()
+ finally:
+ f.close()
+
+ # If the file gets edited, it returns true, so only edit the file if it has blank lines
+ strip = False
+ for line in lines:
+ if not line.strip():
+ strip = True
+ break
+
+ if strip:
+ # If check mode, create a temporary file
+ if check_mode:
+ temp = tempfile.NamedTemporaryFile()
+ path = temp.name
+ f = open(path, "w")
+ try:
+ [f.write(line) for line in lines if line.strip()]
+ finally:
+ f.close()
+
+ try:
+ if state == 'present':
+ (msg, changed) = present(path, username, password, crypt_scheme, create, check_mode)
+ elif state == 'absent':
+ if not os.path.exists(path):
+ module.exit_json(msg="%s not present" % username,
+ warnings="%s does not exist" % path, changed=False)
+ (msg, changed) = absent(path, username, check_mode)
+ else:
+ module.fail_json(msg="Invalid state: %s" % state)
+
+ check_file_attrs(module, changed, msg)
+ module.exit_json(msg=msg, changed=changed)
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_ecs_instance.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_ecs_instance.py
new file mode 100644
index 00000000..3d4ba84b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_ecs_instance.py
@@ -0,0 +1,2135 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_ecs_instance
+description:
+ - instance management.
+short_description: Creates a resource of Ecs/Instance in Huawei Cloud
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huawei Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ timeouts:
+ description:
+ - The timeouts for each operations.
+ type: dict
+ suboptions:
+ create:
+ description:
+ - The timeouts for create operation.
+ type: str
+ default: '30m'
+ update:
+ description:
+ - The timeouts for update operation.
+ type: str
+ default: '30m'
+ delete:
+ description:
+ - The timeouts for delete operation.
+ type: str
+ default: '30m'
+ availability_zone:
+ description:
+ - Specifies the name of the AZ where the ECS is located.
+ type: str
+ required: true
+ flavor_name:
+ description:
+ - Specifies the name of the system flavor.
+ type: str
+ required: true
+ image_id:
+ description:
+ - Specifies the ID of the system image.
+ type: str
+ required: true
+ name:
+ description:
+ - Specifies the ECS name. Value requirements consists of 1 to 64
+ characters, including letters, digits, underscores C(_), hyphens
+ (-), periods (.).
+ type: str
+ required: true
+ nics:
+ description:
+ - Specifies the NIC information of the ECS. Constraints the
+ network of the NIC must belong to the VPC specified by vpc_id. A
+ maximum of 12 NICs can be attached to an ECS.
+ type: list
+ elements: dict
+ required: true
+ suboptions:
+ ip_address:
+ description:
+ - Specifies the IP address of the NIC. The value is an IPv4
+ address. Its value must be an unused IP
+ address in the network segment of the subnet.
+ type: str
+ required: true
+ subnet_id:
+ description:
+ - Specifies the ID of subnet.
+ type: str
+ required: true
+ root_volume:
+ description:
+ - Specifies the configuration of the ECS's system disks.
+ type: dict
+ required: true
+ suboptions:
+ volume_type:
+ description:
+ - Specifies the ECS system disk type.
+ - SATA is common I/O disk type.
+ - SAS is high I/O disk type.
+ - SSD is ultra-high I/O disk type.
+ - co-p1 is high I/O (performance-optimized I) disk type.
+ - uh-l1 is ultra-high I/O (latency-optimized) disk type.
+ - NOTE is For HANA, HL1, and HL2 ECSs, use co-p1 and uh-l1
+ disks. For other ECSs, do not use co-p1 or uh-l1 disks.
+ type: str
+ required: true
+ size:
+ description:
+ - Specifies the system disk size, in GB. The value range is
+ 1 to 1024. The system disk size must be
+ greater than or equal to the minimum system disk size
+ supported by the image (min_disk attribute of the image).
+ If this parameter is not specified or is set to 0, the
+ default system disk size is the minimum value of the
+ system disk in the image (min_disk attribute of the
+ image).
+ type: int
+ required: false
+ snapshot_id:
+ description:
+ - Specifies the snapshot ID or ID of the original data disk
+ contained in the full-ECS image.
+ type: str
+ required: false
+ vpc_id:
+ description:
+ - Specifies the ID of the VPC to which the ECS belongs.
+ type: str
+ required: true
+ admin_pass:
+ description:
+ - Specifies the initial login password of the administrator account
+ for logging in to an ECS using password authentication. The Linux
+ administrator is root, and the Windows administrator is
+ Administrator. Password complexity requirements, consists of 8 to
+ 26 characters. The password must contain at least three of the
+ following character types 'uppercase letters, lowercase letters,
+ digits, and special characters (!@$%^-_=+[{}]:,./?)'. The password
+ cannot contain the username or the username in reverse. The
+ Windows ECS password cannot contain the username, the username in
+ reverse, or more than two consecutive characters in the username.
+ type: str
+ required: false
+ data_volumes:
+ description:
+ - Specifies the data disks of ECS instance.
+ type: list
+ elements: dict
+ required: false
+ suboptions:
+ volume_id:
+ description:
+ - Specifies the disk ID.
+ type: str
+ required: true
+ device:
+ description:
+ - Specifies the disk device name.
+ type: str
+ required: false
+ description:
+ description:
+ - Specifies the description of an ECS, which is a null string by
+ default. Can contain a maximum of 85 characters. Cannot contain
+ special characters, such as < and >.
+ type: str
+ required: false
+ eip_id:
+ description:
+ - Specifies the ID of the elastic IP address assigned to the ECS.
+ Only elastic IP addresses in the DOWN state can be
+ assigned.
+ type: str
+ required: false
+ enable_auto_recovery:
+ description:
+ - Specifies whether automatic recovery is enabled on the ECS.
+ type: bool
+ required: false
+ enterprise_project_id:
+ description:
+ - Specifies the ID of the enterprise project to which the ECS
+ belongs.
+ type: str
+ required: false
+ security_groups:
+ description:
+ - Specifies the security groups of the ECS. If this
+ parameter is left blank, the default security group is bound to
+ the ECS by default.
+ type: list
+ elements: str
+ required: false
+ server_metadata:
+ description:
+ - Specifies the metadata of ECS to be created.
+ type: dict
+ required: false
+ server_tags:
+ description:
+ - Specifies the tags of an ECS. When you create ECSs, one ECS
+ supports up to 10 tags.
+ type: dict
+ required: false
+ ssh_key_name:
+ description:
+ - Specifies the name of the SSH key used for logging in to the ECS.
+ type: str
+ required: false
+ user_data:
+ description:
+ - Specifies the user data to be injected during the ECS creation
+ process. Text, text files, and gzip files can be injected.
+ The content to be injected must be encoded with
+ base64. The maximum size of the content to be injected (before
+ encoding) is 32 KB. For Linux ECSs, this parameter does not take
+ effect when adminPass is used.
+ type: str
+ required: false
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+# create an ecs instance
+- name: Create a vpc
+ hwc_network_vpc:
+ cidr: "192.168.100.0/24"
+ name: "ansible_network_vpc_test"
+ register: vpc
+- name: Create a subnet
+ hwc_vpc_subnet:
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: true
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ register: subnet
+- name: Create a eip
+ hwc_vpc_eip:
+ dedicated_bandwidth:
+ charge_mode: "traffic"
+ name: "ansible_test_dedicated_bandwidth"
+ size: 1
+ type: "5_bgp"
+ register: eip
+- name: Create a disk
+ hwc_evs_disk:
+ availability_zone: "cn-north-1a"
+ name: "ansible_evs_disk_test"
+ volume_type: "SATA"
+ size: 10
+ register: disk
+- name: Create an instance
+ community.general.hwc_ecs_instance:
+ data_volumes:
+ - volume_id: "{{ disk.id }}"
+ enable_auto_recovery: false
+ eip_id: "{{ eip.id }}"
+ name: "ansible_ecs_instance_test"
+ availability_zone: "cn-north-1a"
+ nics:
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.34"
+ server_tags:
+ my_server: "my_server"
+ image_id: "8da46d6d-6079-4e31-ad6d-a7167efff892"
+ flavor_name: "s3.small.1"
+ vpc_id: "{{ vpc.id }}"
+ root_volume:
+ volume_type: "SAS"
+'''
+
+RETURN = '''
+ availability_zone:
+ description:
+ - Specifies the name of the AZ where the ECS is located.
+ type: str
+ returned: success
+ flavor_name:
+ description:
+ - Specifies the name of the system flavor.
+ type: str
+ returned: success
+ image_id:
+ description:
+ - Specifies the ID of the system image.
+ type: str
+ returned: success
+ name:
+ description:
+ - Specifies the ECS name. Value requirements "Consists of 1 to 64
+ characters, including letters, digits, underscores C(_), hyphens
+ (-), periods (.)".
+ type: str
+ returned: success
+ nics:
+ description:
+ - Specifies the NIC information of the ECS. The
+ network of the NIC must belong to the VPC specified by vpc_id. A
+ maximum of 12 NICs can be attached to an ECS.
+ type: list
+ returned: success
+ contains:
+ ip_address:
+ description:
+ - Specifies the IP address of the NIC. The value is an IPv4
+ address. Its value must be an unused IP
+ address in the network segment of the subnet.
+ type: str
+ returned: success
+ subnet_id:
+ description:
+ - Specifies the ID of subnet.
+ type: str
+ returned: success
+ port_id:
+ description:
+ - Specifies the port ID corresponding to the IP address.
+ type: str
+ returned: success
+ root_volume:
+ description:
+ - Specifies the configuration of the ECS's system disks.
+ type: dict
+ returned: success
+ contains:
+ volume_type:
+ description:
+ - Specifies the ECS system disk type.
+ - SATA is common I/O disk type.
+ - SAS is high I/O disk type.
+ - SSD is ultra-high I/O disk type.
+ - co-p1 is high I/O (performance-optimized I) disk type.
+ - uh-l1 is ultra-high I/O (latency-optimized) disk type.
+ - NOTE is For HANA, HL1, and HL2 ECSs, use co-p1 and uh-l1
+ disks. For other ECSs, do not use co-p1 or uh-l1 disks.
+ type: str
+ returned: success
+ size:
+ description:
+ - Specifies the system disk size, in GB. The value range is
+ 1 to 1024. The system disk size must be
+ greater than or equal to the minimum system disk size
+ supported by the image (min_disk attribute of the image).
+ If this parameter is not specified or is set to 0, the
+ default system disk size is the minimum value of the
+ system disk in the image (min_disk attribute of the
+ image).
+ type: int
+ returned: success
+ snapshot_id:
+ description:
+ - Specifies the snapshot ID or ID of the original data disk
+ contained in the full-ECS image.
+ type: str
+ returned: success
+ device:
+ description:
+ - Specifies the disk device name.
+ type: str
+ returned: success
+ volume_id:
+ description:
+ - Specifies the disk ID.
+ type: str
+ returned: success
+ vpc_id:
+ description:
+ - Specifies the ID of the VPC to which the ECS belongs.
+ type: str
+ returned: success
+ admin_pass:
+ description:
+ - Specifies the initial login password of the administrator account
+ for logging in to an ECS using password authentication. The Linux
+ administrator is root, and the Windows administrator is
+ Administrator. Password complexity requirements consists of 8 to
+ 26 characters. The password must contain at least three of the
+ following character types "uppercase letters, lowercase letters,
+ digits, and special characters (!@$%^-_=+[{}]:,./?)". The password
+ cannot contain the username or the username in reverse. The
+ Windows ECS password cannot contain the username, the username in
+ reverse, or more than two consecutive characters in the username.
+ type: str
+ returned: success
+ data_volumes:
+ description:
+ - Specifies the data disks of ECS instance.
+ type: list
+ returned: success
+ contains:
+ volume_id:
+ description:
+ - Specifies the disk ID.
+ type: str
+ returned: success
+ device:
+ description:
+ - Specifies the disk device name.
+ type: str
+ returned: success
+ description:
+ description:
+ - Specifies the description of an ECS, which is a null string by
+ default. Can contain a maximum of 85 characters. Cannot contain
+ special characters, such as < and >.
+ type: str
+ returned: success
+ eip_id:
+ description:
+ - Specifies the ID of the elastic IP address assigned to the ECS.
+ Only elastic IP addresses in the DOWN state can be assigned.
+ type: str
+ returned: success
+ enable_auto_recovery:
+ description:
+ - Specifies whether automatic recovery is enabled on the ECS.
+ type: bool
+ returned: success
+ enterprise_project_id:
+ description:
+ - Specifies the ID of the enterprise project to which the ECS
+ belongs.
+ type: str
+ returned: success
+ security_groups:
+ description:
+ - Specifies the security groups of the ECS. If this parameter is left
+ blank, the default security group is bound to the ECS by default.
+ type: list
+ returned: success
+ server_metadata:
+ description:
+ - Specifies the metadata of ECS to be created.
+ type: dict
+ returned: success
+ server_tags:
+ description:
+ - Specifies the tags of an ECS. When you create ECSs, one ECS
+ supports up to 10 tags.
+ type: dict
+ returned: success
+ ssh_key_name:
+ description:
+ - Specifies the name of the SSH key used for logging in to the ECS.
+ type: str
+ returned: success
+ user_data:
+ description:
+ - Specifies the user data to be injected during the ECS creation
+ process. Text, text files, and gzip files can be injected.
+ The content to be injected must be encoded with base64. The maximum
+ size of the content to be injected (before encoding) is 32 KB. For
+ Linux ECSs, this parameter does not take effect when adminPass is
+ used.
+ type: str
+ returned: success
+ config_drive:
+ description:
+ - Specifies the configuration driver.
+ type: str
+ returned: success
+ created:
+ description:
+ - Specifies the time when an ECS was created.
+ type: str
+ returned: success
+ disk_config_type:
+ description:
+ - Specifies the disk configuration type. MANUAL is The image
+ space is not expanded. AUTO is the image space of the system disk
+ will be expanded to be as same as the flavor.
+ type: str
+ returned: success
+ host_name:
+ description:
+ - Specifies the host name of the ECS.
+ type: str
+ returned: success
+ image_name:
+ description:
+ - Specifies the image name of the ECS.
+ type: str
+ returned: success
+ power_state:
+ description:
+ - Specifies the power status of the ECS.
+ type: int
+ returned: success
+ server_alias:
+ description:
+ - Specifies the ECS alias.
+ type: str
+ returned: success
+ status:
+ description:
+ - Specifies the ECS status. Options are ACTIVE, REBOOT, HARD_REBOOT,
+ REBUILD, MIGRATING, BUILD, SHUTOFF, RESIZE, VERIFY_RESIZE, ERROR,
+ and DELETED.
+ type: str
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcModule, are_different_dicts, build_path,
+ get_region, is_empty_value, navigate_value, wait_to_finish)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ timeouts=dict(type='dict', options=dict(
+ create=dict(default='30m', type='str'),
+ update=dict(default='30m', type='str'),
+ delete=dict(default='30m', type='str'),
+ ), default=dict()),
+ availability_zone=dict(type='str', required=True),
+ flavor_name=dict(type='str', required=True),
+ image_id=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ nics=dict(
+ type='list', required=True, elements='dict',
+ options=dict(
+ ip_address=dict(type='str', required=True),
+ subnet_id=dict(type='str', required=True)
+ ),
+ ),
+ root_volume=dict(type='dict', required=True, options=dict(
+ volume_type=dict(type='str', required=True),
+ size=dict(type='int'),
+ snapshot_id=dict(type='str')
+ )),
+ vpc_id=dict(type='str', required=True),
+ admin_pass=dict(type='str', no_log=True),
+ data_volumes=dict(type='list', elements='dict', options=dict(
+ volume_id=dict(type='str', required=True),
+ device=dict(type='str')
+ )),
+ description=dict(type='str'),
+ eip_id=dict(type='str'),
+ enable_auto_recovery=dict(type='bool'),
+ enterprise_project_id=dict(type='str'),
+ security_groups=dict(type='list', elements='str'),
+ server_metadata=dict(type='dict'),
+ server_tags=dict(type='dict'),
+ ssh_key_name=dict(type='str'),
+ user_data=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "ecs")
+
+ try:
+ _init(config)
+ is_exist = module.params['id']
+
+ result = None
+ changed = False
+ if module.params['state'] == 'present':
+ if not is_exist:
+ if not module.check_mode:
+ create(config)
+ changed = True
+
+ inputv = user_input_parameters(module)
+ resp, array_index = read_resource(config)
+ result = build_state(inputv, resp, array_index)
+ set_readonly_options(inputv, result)
+ if are_different_dicts(inputv, result):
+ if not module.check_mode:
+ update(config, inputv, result)
+
+ inputv = user_input_parameters(module)
+ resp, array_index = read_resource(config)
+ result = build_state(inputv, resp, array_index)
+ set_readonly_options(inputv, result)
+ if are_different_dicts(inputv, result):
+ raise Exception("Update resource failed, "
+ "some attributes are not updated")
+
+ changed = True
+
+ result['id'] = module.params.get('id')
+ else:
+ result = dict()
+ if is_exist:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def _init(config):
+ module = config.module
+ if module.params['id']:
+ return
+
+ v = search_resource(config)
+ n = len(v)
+ if n > 1:
+ raise Exception("Found more than one resource(%s)" % ", ".join([
+ navigate_value(i, ["id"])
+ for i in v
+ ]))
+
+ if n == 1:
+ module.params['id'] = navigate_value(v[0], ["id"])
+
+
+def user_input_parameters(module):
+ return {
+ "admin_pass": module.params.get("admin_pass"),
+ "availability_zone": module.params.get("availability_zone"),
+ "data_volumes": module.params.get("data_volumes"),
+ "description": module.params.get("description"),
+ "eip_id": module.params.get("eip_id"),
+ "enable_auto_recovery": module.params.get("enable_auto_recovery"),
+ "enterprise_project_id": module.params.get("enterprise_project_id"),
+ "flavor_name": module.params.get("flavor_name"),
+ "image_id": module.params.get("image_id"),
+ "name": module.params.get("name"),
+ "nics": module.params.get("nics"),
+ "root_volume": module.params.get("root_volume"),
+ "security_groups": module.params.get("security_groups"),
+ "server_metadata": module.params.get("server_metadata"),
+ "server_tags": module.params.get("server_tags"),
+ "ssh_key_name": module.params.get("ssh_key_name"),
+ "user_data": module.params.get("user_data"),
+ "vpc_id": module.params.get("vpc_id"),
+ }
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "ecs", "project")
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ opts = user_input_parameters(module)
+ opts["ansible_module"] = module
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+ obj = async_wait(config, r, client, timeout)
+
+ sub_job_identity = {
+ "job_type": "createSingleServer",
+ }
+ for item in navigate_value(obj, ["entities", "sub_jobs"]):
+ for k, v in sub_job_identity.items():
+ if item[k] != v:
+ break
+ else:
+ obj = item
+ break
+ else:
+ raise Exception("Can't find the sub job")
+ module.params['id'] = navigate_value(obj, ["entities", "server_id"])
+
+
+def update(config, expect_state, current_state):
+ module = config.module
+ expect_state["current_state"] = current_state
+ current_state["current_state"] = current_state
+ timeout = 60 * int(module.params['timeouts']['update'].rstrip('m'))
+ client = config.client(get_region(module), "ecs", "project")
+
+ params = build_delete_nics_parameters(expect_state)
+ params1 = build_delete_nics_parameters(current_state)
+ if params and are_different_dicts(params, params1):
+ r = send_delete_nics_request(module, params, client)
+ async_wait(config, r, client, timeout)
+
+ params = build_set_auto_recovery_parameters(expect_state)
+ params1 = build_set_auto_recovery_parameters(current_state)
+ if params and are_different_dicts(params, params1):
+ send_set_auto_recovery_request(module, params, client)
+
+ params = build_attach_nics_parameters(expect_state)
+ params1 = build_attach_nics_parameters(current_state)
+ if params and are_different_dicts(params, params1):
+ r = send_attach_nics_request(module, params, client)
+ async_wait(config, r, client, timeout)
+
+ multi_invoke_delete_volume(config, expect_state, client, timeout)
+
+ multi_invoke_attach_data_disk(config, expect_state, client, timeout)
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "ecs", "project")
+ timeout = 60 * int(module.params['timeouts']['delete'].rstrip('m'))
+
+ opts = user_input_parameters(module)
+ opts["ansible_module"] = module
+
+ params = build_delete_parameters(opts)
+ if params:
+ r = send_delete_request(module, params, client)
+ async_wait(config, r, client, timeout)
+
+
+def read_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "ecs", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ preprocess_read_response(r)
+ res["read"] = fill_read_resp_body(r)
+
+ r = send_read_auto_recovery_request(module, client)
+ res["read_auto_recovery"] = fill_read_auto_recovery_resp_body(r)
+
+ return res, None
+
+
+def preprocess_read_response(resp):
+ v = resp.get("os-extended-volumes:volumes_attached")
+ if v and isinstance(v, list):
+ for i in range(len(v)):
+ if v[i].get("bootIndex") == "0":
+ root_volume = v[i]
+
+ if (i + 1) != len(v):
+ v[i] = v[-1]
+
+ v.pop()
+
+ resp["root_volume"] = root_volume
+ break
+
+ v = resp.get("addresses")
+ if v:
+ rv = {}
+ eips = []
+ for val in v.values():
+ for item in val:
+ if item["OS-EXT-IPS:type"] == "floating":
+ eips.append(item)
+ else:
+ rv[item["OS-EXT-IPS:port_id"]] = item
+
+ for item in eips:
+ k = item["OS-EXT-IPS:port_id"]
+ if k in rv:
+ rv[k]["eip_address"] = item.get("addr", "")
+ else:
+ rv[k] = item
+ item["eip_address"] = item.get("addr", "")
+ item["addr"] = ""
+
+ resp["address"] = rv.values()
+
+
+def build_state(opts, response, array_index):
+ states = flatten_options(response, array_index)
+ set_unreadable_options(opts, states)
+ adjust_options(opts, states)
+ return states
+
+
+def _build_query_link(opts):
+ query_params = []
+
+ v = navigate_value(opts, ["enterprise_project_id"])
+ if v or v in [False, 0]:
+ query_params.append(
+ "enterprise_project_id=" + (str(v) if v else str(v).lower()))
+
+ v = navigate_value(opts, ["name"])
+ if v or v in [False, 0]:
+ query_params.append(
+ "name=" + (str(v) if v else str(v).lower()))
+
+ query_link = "?limit=10&offset={offset}"
+ if query_params:
+ query_link += "&" + "&".join(query_params)
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "ecs", "project")
+ opts = user_input_parameters(module)
+ identity_obj = _build_identity_object(opts)
+ query_link = _build_query_link(opts)
+ link = "cloudservers/detail" + query_link
+
+ result = []
+ p = {'offset': 1}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ item = fill_list_resp_body(item)
+ adjust_list_resp(identity_obj, item)
+ if not are_different_dicts(identity_obj, item):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['offset'] += 1
+
+ return result
+
+
+def build_delete_nics_parameters(opts):
+ params = dict()
+
+ v = expand_delete_nics_nics(opts, None)
+ if not is_empty_value(v):
+ params["nics"] = v
+
+ return params
+
+
+def expand_delete_nics_nics(d, array_index):
+ cv = d["current_state"].get("nics")
+ if not cv:
+ return None
+
+ val = cv
+
+ ev = d.get("nics")
+ if ev:
+ m = [item.get("ip_address") for item in ev]
+ val = [item for item in cv if item.get("ip_address") not in m]
+
+ r = []
+ for item in val:
+ transformed = dict()
+
+ v = item.get("port_id")
+ if not is_empty_value(v):
+ transformed["id"] = v
+
+ if transformed:
+ r.append(transformed)
+
+ return r
+
+
+def send_delete_nics_request(module, params, client):
+ url = build_path(module, "cloudservers/{id}/nics/delete")
+
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(delete_nics), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def build_set_auto_recovery_parameters(opts):
+ params = dict()
+
+ v = expand_set_auto_recovery_support_auto_recovery(opts, None)
+ if v is not None:
+ params["support_auto_recovery"] = v
+
+ return params
+
+
+def expand_set_auto_recovery_support_auto_recovery(d, array_index):
+ v = navigate_value(d, ["enable_auto_recovery"], None)
+ return None if v is None else str(v).lower()
+
+
+def send_set_auto_recovery_request(module, params, client):
+ url = build_path(module, "cloudservers/{id}/autorecovery")
+
+ try:
+ r = client.put(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(set_auto_recovery), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["admin_pass"], None)
+ if not is_empty_value(v):
+ params["adminPass"] = v
+
+ v = navigate_value(opts, ["availability_zone"], None)
+ if not is_empty_value(v):
+ params["availability_zone"] = v
+
+ v = navigate_value(opts, ["description"], None)
+ if not is_empty_value(v):
+ params["description"] = v
+
+ v = expand_create_extendparam(opts, None)
+ if not is_empty_value(v):
+ params["extendparam"] = v
+
+ v = navigate_value(opts, ["flavor_name"], None)
+ if not is_empty_value(v):
+ params["flavorRef"] = v
+
+ v = navigate_value(opts, ["image_id"], None)
+ if not is_empty_value(v):
+ params["imageRef"] = v
+
+ v = navigate_value(opts, ["ssh_key_name"], None)
+ if not is_empty_value(v):
+ params["key_name"] = v
+
+ v = navigate_value(opts, ["server_metadata"], None)
+ if not is_empty_value(v):
+ params["metadata"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ v = expand_create_nics(opts, None)
+ if not is_empty_value(v):
+ params["nics"] = v
+
+ v = expand_create_publicip(opts, None)
+ if not is_empty_value(v):
+ params["publicip"] = v
+
+ v = expand_create_root_volume(opts, None)
+ if not is_empty_value(v):
+ params["root_volume"] = v
+
+ v = expand_create_security_groups(opts, None)
+ if not is_empty_value(v):
+ params["security_groups"] = v
+
+ v = expand_create_server_tags(opts, None)
+ if not is_empty_value(v):
+ params["server_tags"] = v
+
+ v = navigate_value(opts, ["user_data"], None)
+ if not is_empty_value(v):
+ params["user_data"] = v
+
+ v = navigate_value(opts, ["vpc_id"], None)
+ if not is_empty_value(v):
+ params["vpcid"] = v
+
+ if not params:
+ return params
+
+ params = {"server": params}
+
+ return params
+
+
+def expand_create_extendparam(d, array_index):
+ r = dict()
+
+ r["chargingMode"] = 0
+
+ v = navigate_value(d, ["enterprise_project_id"], array_index)
+ if not is_empty_value(v):
+ r["enterprise_project_id"] = v
+
+ v = navigate_value(d, ["enable_auto_recovery"], array_index)
+ if not is_empty_value(v):
+ r["support_auto_recovery"] = v
+
+ return r
+
+
+def expand_create_nics(d, array_index):
+ new_ai = dict()
+ if array_index:
+ new_ai.update(array_index)
+
+ req = []
+
+ v = navigate_value(
+ d, ["nics"], new_ai)
+
+ if not v:
+ return req
+ n = len(v)
+ for i in range(n):
+ new_ai["nics"] = i
+ transformed = dict()
+
+ v = navigate_value(d, ["nics", "ip_address"], new_ai)
+ if not is_empty_value(v):
+ transformed["ip_address"] = v
+
+ v = navigate_value(d, ["nics", "subnet_id"], new_ai)
+ if not is_empty_value(v):
+ transformed["subnet_id"] = v
+
+ if transformed:
+ req.append(transformed)
+
+ return req
+
+
+def expand_create_publicip(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["eip_id"], array_index)
+ if not is_empty_value(v):
+ r["id"] = v
+
+ return r
+
+
+def expand_create_root_volume(d, array_index):
+ r = dict()
+
+ v = expand_create_root_volume_extendparam(d, array_index)
+ if not is_empty_value(v):
+ r["extendparam"] = v
+
+ v = navigate_value(d, ["root_volume", "size"], array_index)
+ if not is_empty_value(v):
+ r["size"] = v
+
+ v = navigate_value(d, ["root_volume", "volume_type"], array_index)
+ if not is_empty_value(v):
+ r["volumetype"] = v
+
+ return r
+
+
+def expand_create_root_volume_extendparam(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["root_volume", "snapshot_id"], array_index)
+ if not is_empty_value(v):
+ r["snapshotId"] = v
+
+ return r
+
+
+def expand_create_security_groups(d, array_index):
+ v = d.get("security_groups")
+ if not v:
+ return None
+
+ return [{"id": i} for i in v]
+
+
+def expand_create_server_tags(d, array_index):
+ v = d.get("server_tags")
+ if not v:
+ return None
+
+ return [{"key": k, "value": v1} for k, v1 in v.items()]
+
+
+def send_create_request(module, params, client):
+ url = "cloudservers"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def build_attach_nics_parameters(opts):
+ params = dict()
+
+ v = expand_attach_nics_nics(opts, None)
+ if not is_empty_value(v):
+ params["nics"] = v
+
+ return params
+
+
+def expand_attach_nics_nics(d, array_index):
+ ev = d.get("nics")
+ if not ev:
+ return None
+
+ val = ev
+
+ cv = d["current_state"].get("nics")
+ if cv:
+ m = [item.get("ip_address") for item in cv]
+ val = [item for item in ev if item.get("ip_address") not in m]
+
+ r = []
+ for item in val:
+ transformed = dict()
+
+ v = item.get("ip_address")
+ if not is_empty_value(v):
+ transformed["ip_address"] = v
+
+ v = item.get("subnet_id")
+ if not is_empty_value(v):
+ transformed["subnet_id"] = v
+
+ if transformed:
+ r.append(transformed)
+
+ return r
+
+
+def send_attach_nics_request(module, params, client):
+ url = build_path(module, "cloudservers/{id}/nics")
+
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(attach_nics), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_delete_volume_request(module, params, client, info):
+ path_parameters = {
+ "volume_id": ["volume_id"],
+ }
+ data = dict((key, navigate_value(info, path))
+ for key, path in path_parameters.items())
+
+ url = build_path(module, "cloudservers/{id}/detachvolume/{volume_id}", data)
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(delete_volume), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def build_attach_data_disk_parameters(opts, array_index):
+ params = dict()
+
+ v = expand_attach_data_disk_volume_attachment(opts, array_index)
+ if not is_empty_value(v):
+ params["volumeAttachment"] = v
+
+ return params
+
+
+def expand_attach_data_disk_volume_attachment(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["data_volumes", "device"], array_index)
+ if not is_empty_value(v):
+ r["device"] = v
+
+ v = navigate_value(d, ["data_volumes", "volume_id"], array_index)
+ if not is_empty_value(v):
+ r["volumeId"] = v
+
+ return r
+
+
+def send_attach_data_disk_request(module, params, client):
+ url = build_path(module, "cloudservers/{id}/attachvolume")
+
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(attach_data_disk), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def build_delete_parameters(opts):
+ params = dict()
+
+ params["delete_publicip"] = False
+
+ params["delete_volume"] = False
+
+ v = expand_delete_servers(opts, None)
+ if not is_empty_value(v):
+ params["servers"] = v
+
+ return params
+
+
+def expand_delete_servers(d, array_index):
+ new_ai = dict()
+ if array_index:
+ new_ai.update(array_index)
+
+ req = []
+
+ n = 1
+ for i in range(n):
+ transformed = dict()
+
+ v = expand_delete_servers_id(d, new_ai)
+ if not is_empty_value(v):
+ transformed["id"] = v
+
+ if transformed:
+ req.append(transformed)
+
+ return req
+
+
+def expand_delete_servers_id(d, array_index):
+ return d["ansible_module"].params.get("id")
+
+
+def send_delete_request(module, params, client):
+ url = "cloudservers/delete"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def async_wait(config, result, client, timeout):
+ module = config.module
+
+ url = build_path(module, "jobs/{job_id}", result)
+
+ def _query_status():
+ r = None
+ try:
+ r = client.get(url, timeout=timeout)
+ except HwcClientException:
+ return None, ""
+
+ try:
+ s = navigate_value(r, ["status"])
+ return r, s
+ except Exception:
+ return None, ""
+
+ try:
+ return wait_to_finish(
+ ["SUCCESS"],
+ ["RUNNING", "INIT"],
+ _query_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_ecs_instance): error "
+ "waiting to be done, error= %s" % str(ex))
+
+
+def multi_invoke_delete_volume(config, opts, client, timeout):
+ module = config.module
+
+ opts1 = None
+ expect = opts["data_volumes"]
+ current = opts["current_state"]["data_volumes"]
+ if expect and current:
+ v = [i["volume_id"] for i in expect]
+ opts1 = {
+ "data_volumes": [
+ i for i in current if i["volume_id"] not in v
+ ]
+ }
+
+ loop_val = navigate_value(opts1, ["data_volumes"])
+ if not loop_val:
+ return
+
+ for i in range(len(loop_val)):
+ r = send_delete_volume_request(module, None, client, loop_val[i])
+ async_wait(config, r, client, timeout)
+
+
+def multi_invoke_attach_data_disk(config, opts, client, timeout):
+ module = config.module
+
+ opts1 = opts
+ expect = opts["data_volumes"]
+ current = opts["current_state"]["data_volumes"]
+ if expect and current:
+ v = [i["volume_id"] for i in current]
+ opts1 = {
+ "data_volumes": [
+ i for i in expect if i["volume_id"] not in v
+ ]
+ }
+
+ loop_val = navigate_value(opts1, ["data_volumes"])
+ if not loop_val:
+ return
+
+ for i in range(len(loop_val)):
+ params = build_attach_data_disk_parameters(opts1, {"data_volumes": i})
+ r = send_attach_data_disk_request(module, params, client)
+ async_wait(config, r, client, timeout)
+
+
+def send_read_request(module, client):
+ url = build_path(module, "cloudservers/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["server"], None)
+
+
+def fill_read_resp_body(body):
+ result = dict()
+
+ result["OS-DCF:diskConfig"] = body.get("OS-DCF:diskConfig")
+
+ result["OS-EXT-AZ:availability_zone"] = body.get(
+ "OS-EXT-AZ:availability_zone")
+
+ result["OS-EXT-SRV-ATTR:hostname"] = body.get("OS-EXT-SRV-ATTR:hostname")
+
+ result["OS-EXT-SRV-ATTR:instance_name"] = body.get(
+ "OS-EXT-SRV-ATTR:instance_name")
+
+ result["OS-EXT-SRV-ATTR:user_data"] = body.get("OS-EXT-SRV-ATTR:user_data")
+
+ result["OS-EXT-STS:power_state"] = body.get("OS-EXT-STS:power_state")
+
+ v = fill_read_resp_address(body.get("address"))
+ result["address"] = v
+
+ result["config_drive"] = body.get("config_drive")
+
+ result["created"] = body.get("created")
+
+ result["description"] = body.get("description")
+
+ result["enterprise_project_id"] = body.get("enterprise_project_id")
+
+ v = fill_read_resp_flavor(body.get("flavor"))
+ result["flavor"] = v
+
+ result["id"] = body.get("id")
+
+ v = fill_read_resp_image(body.get("image"))
+ result["image"] = v
+
+ result["key_name"] = body.get("key_name")
+
+ v = fill_read_resp_metadata(body.get("metadata"))
+ result["metadata"] = v
+
+ result["name"] = body.get("name")
+
+ v = fill_read_resp_os_extended_volumes_volumes_attached(
+ body.get("os-extended-volumes:volumes_attached"))
+ result["os-extended-volumes:volumes_attached"] = v
+
+ v = fill_read_resp_root_volume(body.get("root_volume"))
+ result["root_volume"] = v
+
+ result["status"] = body.get("status")
+
+ result["tags"] = body.get("tags")
+
+ return result
+
+
+def fill_read_resp_address(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["OS-EXT-IPS:port_id"] = item.get("OS-EXT-IPS:port_id")
+
+ val["OS-EXT-IPS:type"] = item.get("OS-EXT-IPS:type")
+
+ val["addr"] = item.get("addr")
+
+ result.append(val)
+
+ return result
+
+
+def fill_read_resp_flavor(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["id"] = value.get("id")
+
+ return result
+
+
+def fill_read_resp_image(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["id"] = value.get("id")
+
+ return result
+
+
+def fill_read_resp_metadata(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["image_name"] = value.get("image_name")
+
+ result["vpc_id"] = value.get("vpc_id")
+
+ return result
+
+
+def fill_read_resp_os_extended_volumes_volumes_attached(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["bootIndex"] = item.get("bootIndex")
+
+ val["device"] = item.get("device")
+
+ val["id"] = item.get("id")
+
+ result.append(val)
+
+ return result
+
+
+def fill_read_resp_root_volume(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["device"] = value.get("device")
+
+ result["id"] = value.get("id")
+
+ return result
+
+
+def send_read_auto_recovery_request(module, client):
+ url = build_path(module, "cloudservers/{id}/autorecovery")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(read_auto_recovery), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def fill_read_auto_recovery_resp_body(body):
+ result = dict()
+
+ result["support_auto_recovery"] = body.get("support_auto_recovery")
+
+ return result
+
+
+def flatten_options(response, array_index):
+ r = dict()
+
+ v = navigate_value(
+ response, ["read", "OS-EXT-AZ:availability_zone"], array_index)
+ r["availability_zone"] = v
+
+ v = navigate_value(response, ["read", "config_drive"], array_index)
+ r["config_drive"] = v
+
+ v = navigate_value(response, ["read", "created"], array_index)
+ r["created"] = v
+
+ v = flatten_data_volumes(response, array_index)
+ r["data_volumes"] = v
+
+ v = navigate_value(response, ["read", "description"], array_index)
+ r["description"] = v
+
+ v = navigate_value(response, ["read", "OS-DCF:diskConfig"], array_index)
+ r["disk_config_type"] = v
+
+ v = flatten_enable_auto_recovery(response, array_index)
+ r["enable_auto_recovery"] = v
+
+ v = navigate_value(
+ response, ["read", "enterprise_project_id"], array_index)
+ r["enterprise_project_id"] = v
+
+ v = navigate_value(response, ["read", "flavor", "id"], array_index)
+ r["flavor_name"] = v
+
+ v = navigate_value(
+ response, ["read", "OS-EXT-SRV-ATTR:hostname"], array_index)
+ r["host_name"] = v
+
+ v = navigate_value(response, ["read", "image", "id"], array_index)
+ r["image_id"] = v
+
+ v = navigate_value(
+ response, ["read", "metadata", "image_name"], array_index)
+ r["image_name"] = v
+
+ v = navigate_value(response, ["read", "name"], array_index)
+ r["name"] = v
+
+ v = flatten_nics(response, array_index)
+ r["nics"] = v
+
+ v = navigate_value(
+ response, ["read", "OS-EXT-STS:power_state"], array_index)
+ r["power_state"] = v
+
+ v = flatten_root_volume(response, array_index)
+ r["root_volume"] = v
+
+ v = navigate_value(
+ response, ["read", "OS-EXT-SRV-ATTR:instance_name"], array_index)
+ r["server_alias"] = v
+
+ v = flatten_server_tags(response, array_index)
+ r["server_tags"] = v
+
+ v = navigate_value(response, ["read", "key_name"], array_index)
+ r["ssh_key_name"] = v
+
+ v = navigate_value(response, ["read", "status"], array_index)
+ r["status"] = v
+
+ v = navigate_value(
+ response, ["read", "OS-EXT-SRV-ATTR:user_data"], array_index)
+ r["user_data"] = v
+
+ v = navigate_value(response, ["read", "metadata", "vpc_id"], array_index)
+ r["vpc_id"] = v
+
+ return r
+
+
+def flatten_data_volumes(d, array_index):
+ v = navigate_value(d, ["read", "os-extended-volumes:volumes_attached"],
+ array_index)
+ if not v:
+ return None
+ n = len(v)
+ result = []
+
+ new_ai = dict()
+ if array_index:
+ new_ai.update(array_index)
+
+ for i in range(n):
+ new_ai["read.os-extended-volumes:volumes_attached"] = i
+
+ val = dict()
+
+ v = navigate_value(
+ d, ["read", "os-extended-volumes:volumes_attached", "device"], new_ai)
+ val["device"] = v
+
+ v = navigate_value(
+ d, ["read", "os-extended-volumes:volumes_attached", "id"], new_ai)
+ val["volume_id"] = v
+
+ for v in val.values():
+ if v is not None:
+ result.append(val)
+ break
+
+ return result if result else None
+
+
+def flatten_enable_auto_recovery(d, array_index):
+ v = navigate_value(d, ["read_auto_recovery", "support_auto_recovery"],
+ array_index)
+ return v == "true"
+
+
+def flatten_nics(d, array_index):
+ v = navigate_value(d, ["read", "address"],
+ array_index)
+ if not v:
+ return None
+ n = len(v)
+ result = []
+
+ new_ai = dict()
+ if array_index:
+ new_ai.update(array_index)
+
+ for i in range(n):
+ new_ai["read.address"] = i
+
+ val = dict()
+
+ v = navigate_value(d, ["read", "address", "addr"], new_ai)
+ val["ip_address"] = v
+
+ v = navigate_value(
+ d, ["read", "address", "OS-EXT-IPS:port_id"], new_ai)
+ val["port_id"] = v
+
+ for v in val.values():
+ if v is not None:
+ result.append(val)
+ break
+
+ return result if result else None
+
+
+def flatten_root_volume(d, array_index):
+ result = dict()
+
+ v = navigate_value(d, ["read", "root_volume", "device"], array_index)
+ result["device"] = v
+
+ v = navigate_value(d, ["read", "root_volume", "id"], array_index)
+ result["volume_id"] = v
+
+ for v in result.values():
+ if v is not None:
+ return result
+ return None
+
+
+def flatten_server_tags(d, array_index):
+ v = navigate_value(d, ["read", "tags"], array_index)
+ if not v:
+ return None
+
+ r = dict()
+ for item in v:
+ v1 = item.split("=")
+ if v1:
+ r[v1[0]] = v1[1]
+ return r
+
+
+def adjust_options(opts, states):
+ adjust_data_volumes(opts, states)
+
+ adjust_nics(opts, states)
+
+
+def adjust_data_volumes(parent_input, parent_cur):
+ iv = parent_input.get("data_volumes")
+ if not (iv and isinstance(iv, list)):
+ return
+
+ cv = parent_cur.get("data_volumes")
+ if not (cv and isinstance(cv, list)):
+ return
+
+ lcv = len(cv)
+ result = []
+ q = []
+ for iiv in iv:
+ if len(q) == lcv:
+ break
+
+ icv = None
+ for j in range(lcv):
+ if j in q:
+ continue
+
+ icv = cv[j]
+
+ if iiv["volume_id"] != icv["volume_id"]:
+ continue
+
+ result.append(icv)
+ q.append(j)
+ break
+ else:
+ break
+
+ if len(q) != lcv:
+ for i in range(lcv):
+ if i not in q:
+ result.append(cv[i])
+
+ if len(result) != lcv:
+ raise Exception("adjust property(data_volumes) failed, "
+ "the array number is not equal")
+
+ parent_cur["data_volumes"] = result
+
+
+def adjust_nics(parent_input, parent_cur):
+ iv = parent_input.get("nics")
+ if not (iv and isinstance(iv, list)):
+ return
+
+ cv = parent_cur.get("nics")
+ if not (cv and isinstance(cv, list)):
+ return
+
+ lcv = len(cv)
+ result = []
+ q = []
+ for iiv in iv:
+ if len(q) == lcv:
+ break
+
+ icv = None
+ for j in range(lcv):
+ if j in q:
+ continue
+
+ icv = cv[j]
+
+ if iiv["ip_address"] != icv["ip_address"]:
+ continue
+
+ result.append(icv)
+ q.append(j)
+ break
+ else:
+ break
+
+ if len(q) != lcv:
+ for i in range(lcv):
+ if i not in q:
+ result.append(cv[i])
+
+ if len(result) != lcv:
+ raise Exception("adjust property(nics) failed, "
+ "the array number is not equal")
+
+ parent_cur["nics"] = result
+
+
+def set_unreadable_options(opts, states):
+ states["admin_pass"] = opts.get("admin_pass")
+
+ states["eip_id"] = opts.get("eip_id")
+
+ set_unread_nics(
+ opts.get("nics"), states.get("nics"))
+
+ set_unread_root_volume(
+ opts.get("root_volume"), states.get("root_volume"))
+
+ states["security_groups"] = opts.get("security_groups")
+
+ states["server_metadata"] = opts.get("server_metadata")
+
+
+def set_unread_nics(inputv, curv):
+ if not (inputv and isinstance(inputv, list)):
+ return
+
+ if not (curv and isinstance(curv, list)):
+ return
+
+ lcv = len(curv)
+ q = []
+ for iv in inputv:
+ if len(q) == lcv:
+ break
+
+ cv = None
+ for j in range(lcv):
+ if j in q:
+ continue
+
+ cv = curv[j]
+
+ if iv["ip_address"] != cv["ip_address"]:
+ continue
+
+ q.append(j)
+ break
+ else:
+ continue
+
+ cv["subnet_id"] = iv.get("subnet_id")
+
+
+def set_unread_root_volume(inputv, curv):
+ if not (inputv and isinstance(inputv, dict)):
+ return
+
+ if not (curv and isinstance(curv, dict)):
+ return
+
+ curv["size"] = inputv.get("size")
+
+ curv["snapshot_id"] = inputv.get("snapshot_id")
+
+ curv["volume_type"] = inputv.get("volume_type")
+
+
+def set_readonly_options(opts, states):
+ opts["config_drive"] = states.get("config_drive")
+
+ opts["created"] = states.get("created")
+
+ opts["disk_config_type"] = states.get("disk_config_type")
+
+ opts["host_name"] = states.get("host_name")
+
+ opts["image_name"] = states.get("image_name")
+
+ set_readonly_nics(
+ opts.get("nics"), states.get("nics"))
+
+ opts["power_state"] = states.get("power_state")
+
+ set_readonly_root_volume(
+ opts.get("root_volume"), states.get("root_volume"))
+
+ opts["server_alias"] = states.get("server_alias")
+
+ opts["status"] = states.get("status")
+
+
+def set_readonly_nics(inputv, curv):
+ if not (curv and isinstance(curv, list)):
+ return
+
+ if not (inputv and isinstance(inputv, list)):
+ return
+
+ lcv = len(curv)
+ q = []
+ for iv in inputv:
+ if len(q) == lcv:
+ break
+
+ cv = None
+ for j in range(lcv):
+ if j in q:
+ continue
+
+ cv = curv[j]
+
+ if iv["ip_address"] != cv["ip_address"]:
+ continue
+
+ q.append(j)
+ break
+ else:
+ continue
+
+ iv["port_id"] = cv.get("port_id")
+
+
+def set_readonly_root_volume(inputv, curv):
+ if not (inputv and isinstance(inputv, dict)):
+ return
+
+ if not (curv and isinstance(curv, dict)):
+ return
+
+ inputv["device"] = curv.get("device")
+
+ inputv["volume_id"] = curv.get("volume_id")
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["servers"], None)
+
+
+def _build_identity_object(all_opts):
+ result = dict()
+
+ result["OS-DCF:diskConfig"] = None
+
+ v = navigate_value(all_opts, ["availability_zone"], None)
+ result["OS-EXT-AZ:availability_zone"] = v
+
+ result["OS-EXT-SRV-ATTR:hostname"] = None
+
+ result["OS-EXT-SRV-ATTR:instance_name"] = None
+
+ v = navigate_value(all_opts, ["user_data"], None)
+ result["OS-EXT-SRV-ATTR:user_data"] = v
+
+ result["OS-EXT-STS:power_state"] = None
+
+ result["config_drive"] = None
+
+ result["created"] = None
+
+ v = navigate_value(all_opts, ["description"], None)
+ result["description"] = v
+
+ v = navigate_value(all_opts, ["enterprise_project_id"], None)
+ result["enterprise_project_id"] = v
+
+ v = expand_list_flavor(all_opts, None)
+ result["flavor"] = v
+
+ result["id"] = None
+
+ v = expand_list_image(all_opts, None)
+ result["image"] = v
+
+ v = navigate_value(all_opts, ["ssh_key_name"], None)
+ result["key_name"] = v
+
+ v = expand_list_metadata(all_opts, None)
+ result["metadata"] = v
+
+ v = navigate_value(all_opts, ["name"], None)
+ result["name"] = v
+
+ result["status"] = None
+
+ v = expand_list_tags(all_opts, None)
+ result["tags"] = v
+
+ return result
+
+
+def expand_list_flavor(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["flavor_name"], array_index)
+ r["id"] = v
+
+ for v in r.values():
+ if v is not None:
+ return r
+ return None
+
+
+def expand_list_image(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["image_id"], array_index)
+ r["id"] = v
+
+ for v in r.values():
+ if v is not None:
+ return r
+ return None
+
+
+def expand_list_metadata(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["vpc_id"], array_index)
+ r["vpc_id"] = v
+
+ for v in r.values():
+ if v is not None:
+ return r
+ return None
+
+
+def expand_list_tags(d, array_index):
+ v = d.get("server_tags")
+ if not v:
+ return None
+
+ return [k + "=" + v1 for k, v1 in v.items()]
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ result["OS-DCF:diskConfig"] = body.get("OS-DCF:diskConfig")
+
+ result["OS-EXT-AZ:availability_zone"] = body.get(
+ "OS-EXT-AZ:availability_zone")
+
+ result["OS-EXT-SRV-ATTR:hostname"] = body.get("OS-EXT-SRV-ATTR:hostname")
+
+ result["OS-EXT-SRV-ATTR:instance_name"] = body.get(
+ "OS-EXT-SRV-ATTR:instance_name")
+
+ result["OS-EXT-SRV-ATTR:user_data"] = body.get("OS-EXT-SRV-ATTR:user_data")
+
+ result["OS-EXT-STS:power_state"] = body.get("OS-EXT-STS:power_state")
+
+ result["config_drive"] = body.get("config_drive")
+
+ result["created"] = body.get("created")
+
+ result["description"] = body.get("description")
+
+ result["enterprise_project_id"] = body.get("enterprise_project_id")
+
+ v = fill_list_resp_flavor(body.get("flavor"))
+ result["flavor"] = v
+
+ result["id"] = body.get("id")
+
+ v = fill_list_resp_image(body.get("image"))
+ result["image"] = v
+
+ result["key_name"] = body.get("key_name")
+
+ v = fill_list_resp_metadata(body.get("metadata"))
+ result["metadata"] = v
+
+ result["name"] = body.get("name")
+
+ result["status"] = body.get("status")
+
+ result["tags"] = body.get("tags")
+
+ return result
+
+
+def fill_list_resp_flavor(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["id"] = value.get("id")
+
+ return result
+
+
+def fill_list_resp_image(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["id"] = value.get("id")
+
+ return result
+
+
+def fill_list_resp_metadata(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["vpc_id"] = value.get("vpc_id")
+
+ return result
+
+
+def adjust_list_resp(opts, resp):
+ adjust_list_api_tags(opts, resp)
+
+
+def adjust_list_api_tags(parent_input, parent_cur):
+ iv = parent_input.get("tags")
+ if not (iv and isinstance(iv, list)):
+ return
+
+ cv = parent_cur.get("tags")
+ if not (cv and isinstance(cv, list)):
+ return
+
+ result = []
+ for iiv in iv:
+ if iiv not in cv:
+ break
+
+ result.append(iiv)
+
+ j = cv.index(iiv)
+ cv[j] = cv[-1]
+ cv.pop()
+
+ if cv:
+ result.extend(cv)
+ parent_cur["tags"] = result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_evs_disk.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_evs_disk.py
new file mode 100644
index 00000000..4aec1b94
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_evs_disk.py
@@ -0,0 +1,1210 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_evs_disk
+description:
+ - block storage management.
+short_description: Creates a resource of Evs/Disk in Huawei Cloud
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huaweicloud Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ timeouts:
+ description:
+ - The timeouts for each operations.
+ type: dict
+ suboptions:
+ create:
+ description:
+ - The timeouts for create operation.
+ type: str
+ default: '30m'
+ update:
+ description:
+ - The timeouts for update operation.
+ type: str
+ default: '30m'
+ delete:
+ description:
+ - The timeouts for delete operation.
+ type: str
+ default: '30m'
+ availability_zone:
+ description:
+ - Specifies the AZ where you want to create the disk.
+ type: str
+ required: true
+ name:
+ description:
+ - Specifies the disk name. The value can contain a maximum of 255
+ bytes.
+ type: str
+ required: true
+ volume_type:
+ description:
+ - Specifies the disk type. Currently, the value can be SSD, SAS, or
+ SATA.
+ - SSD specifies the ultra-high I/O disk type.
+ - SAS specifies the high I/O disk type.
+ - SATA specifies the common I/O disk type.
+ - If the specified disk type is not available in the AZ, the
+ disk will fail to create. If the EVS disk is created from a
+ snapshot, the volume_type field must be the same as that of the
+ snapshot's source disk.
+ type: str
+ required: true
+ backup_id:
+ description:
+ - Specifies the ID of the backup that can be used to create a disk.
+ This parameter is mandatory when you use a backup to create the
+ disk.
+ type: str
+ required: false
+ description:
+ description:
+ - Specifies the disk description. The value can contain a maximum
+ of 255 bytes.
+ type: str
+ required: false
+ enable_full_clone:
+ description:
+ - If the disk is created from a snapshot and linked cloning needs
+ to be used, set this parameter to True.
+ type: bool
+ required: false
+ enable_scsi:
+ description:
+ - If this parameter is set to True, the disk device type will be
+ SCSI, which allows ECS OSs to directly access underlying storage
+ media. SCSI reservation command is supported. If this parameter
+ is set to False, the disk device type will be VBD, which supports
+ only simple SCSI read/write commands.
+ - If parameter enable_share is set to True and this parameter
+ is not specified, shared SCSI disks are created. SCSI EVS disks
+ cannot be created from backups, which means that this parameter
+ cannot be True if backup_id has been specified.
+ type: bool
+ required: false
+ enable_share:
+ description:
+ - Specifies whether the disk is shareable. The default value is
+ False.
+ type: bool
+ required: false
+ encryption_id:
+ description:
+ - Specifies the encryption ID. The length of it fixes at 36 bytes.
+ type: str
+ required: false
+ enterprise_project_id:
+ description:
+ - Specifies the enterprise project ID. This ID is associated with
+ the disk during the disk creation. If it is not specified, the
+ disk is bound to the default enterprise project.
+ type: str
+ required: false
+ image_id:
+ description:
+ - Specifies the image ID. If this parameter is specified, the disk
+ is created from an image. BMS system disks cannot be
+ created from BMS images.
+ type: str
+ required: false
+ size:
+ description:
+ - Specifies the disk size, in GB. Its values are as follows, System
+ disk 1 GB to 1024 GB, Data disk 10 GB to 32768 GB. This
+ parameter is mandatory when you create an empty disk or use an
+ image or a snapshot to create a disk. If you use an image or a
+ snapshot to create a disk, the disk size must be greater than or
+ equal to the image or snapshot size. This parameter is optional
+ when you use a backup to create a disk. If this parameter is not
+ specified, the disk size is equal to the backup size.
+ type: int
+ required: false
+ snapshot_id:
+ description:
+ - Specifies the snapshot ID. If this parameter is specified, the
+ disk is created from a snapshot.
+ type: str
+ required: false
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+# test create disk
+- name: Create a disk
+ community.general.hwc_evs_disk:
+ availability_zone: "cn-north-1a"
+ name: "ansible_evs_disk_test"
+ volume_type: "SATA"
+ size: 10
+'''
+
+RETURN = '''
+ availability_zone:
+ description:
+ - Specifies the AZ where you want to create the disk.
+ type: str
+ returned: success
+ name:
+ description:
+ - Specifies the disk name. The value can contain a maximum of 255
+ bytes.
+ type: str
+ returned: success
+ volume_type:
+ description:
+ - Specifies the disk type. Currently, the value can be SSD, SAS, or
+ SATA.
+ - SSD specifies the ultra-high I/O disk type.
+ - SAS specifies the high I/O disk type.
+ - SATA specifies the common I/O disk type.
+ - If the specified disk type is not available in the AZ, the
+ disk will fail to create. If the EVS disk is created from a
+ snapshot, the volume_type field must be the same as that of the
+ snapshot's source disk.
+ type: str
+ returned: success
+ backup_id:
+ description:
+ - Specifies the ID of the backup that can be used to create a disk.
+ This parameter is mandatory when you use a backup to create the
+ disk.
+ type: str
+ returned: success
+ description:
+ description:
+ - Specifies the disk description. The value can contain a maximum
+ of 255 bytes.
+ type: str
+ returned: success
+ enable_full_clone:
+ description:
+ - If the disk is created from a snapshot and linked cloning needs
+ to be used, set this parameter to True.
+ type: bool
+ returned: success
+ enable_scsi:
+ description:
+ - If this parameter is set to True, the disk device type will be
+ SCSI, which allows ECS OSs to directly access underlying storage
+ media. SCSI reservation command is supported. If this parameter
+ is set to False, the disk device type will be VBD, which supports
+ only simple SCSI read/write commands.
+ - If parameter enable_share is set to True and this parameter
+ is not specified, shared SCSI disks are created. SCSI EVS disks
+ cannot be created from backups, which means that this parameter
+ cannot be True if backup_id has been specified.
+ type: bool
+ returned: success
+ enable_share:
+ description:
+ - Specifies whether the disk is shareable. The default value is
+ False.
+ type: bool
+ returned: success
+ encryption_id:
+ description:
+ - Specifies the encryption ID. The length of it fixes at 36 bytes.
+ type: str
+ returned: success
+ enterprise_project_id:
+ description:
+ - Specifies the enterprise project ID. This ID is associated with
+ the disk during the disk creation. If it is not specified, the
+ disk is bound to the default enterprise project.
+ type: str
+ returned: success
+ image_id:
+ description:
+ - Specifies the image ID. If this parameter is specified, the disk
+ is created from an image. BMS system disks cannot be
+ created from BMS images.
+ type: str
+ returned: success
+ size:
+ description:
+ - Specifies the disk size, in GB. Its values are as follows, System
+ disk 1 GB to 1024 GB, Data disk 10 GB to 32768 GB. This
+ parameter is mandatory when you create an empty disk or use an
+ image or a snapshot to create a disk. If you use an image or a
+ snapshot to create a disk, the disk size must be greater than or
+ equal to the image or snapshot size. This parameter is optional
+ when you use a backup to create a disk. If this parameter is not
+ specified, the disk size is equal to the backup size.
+ type: int
+ returned: success
+ snapshot_id:
+ description:
+ - Specifies the snapshot ID. If this parameter is specified, the
+ disk is created from a snapshot.
+ type: str
+ returned: success
+ attachments:
+ description:
+ - Specifies the disk attachment information.
+ type: complex
+ returned: success
+ contains:
+ attached_at:
+ description:
+ - Specifies the time when the disk was attached. Time
+ format is 'UTC YYYY-MM-DDTHH:MM:SS'.
+ type: str
+ returned: success
+ attachment_id:
+ description:
+ - Specifies the ID of the attachment information.
+ type: str
+ returned: success
+ device:
+ description:
+ - Specifies the device name.
+ type: str
+ returned: success
+ server_id:
+ description:
+ - Specifies the ID of the server to which the disk is
+ attached.
+ type: str
+ returned: success
+ backup_policy_id:
+ description:
+ - Specifies the backup policy ID.
+ type: str
+ returned: success
+ created_at:
+ description:
+ - Specifies the time when the disk was created. Time format is 'UTC
+ YYYY-MM-DDTHH:MM:SS'.
+ type: str
+ returned: success
+ is_bootable:
+ description:
+ - Specifies whether the disk is bootable.
+ type: bool
+ returned: success
+ is_readonly:
+ description:
+ - Specifies whether the disk is read-only or read/write. True
+ indicates that the disk is read-only. False indicates that the
+ disk is read/write.
+ type: bool
+ returned: success
+ source_volume_id:
+ description:
+ - Specifies the source disk ID. This parameter has a value if the
+ disk is created from a source disk.
+ type: str
+ returned: success
+ status:
+ description:
+ - Specifies the disk status.
+ type: str
+ returned: success
+ tags:
+ description:
+ - Specifies the disk tags.
+ type: dict
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcModule, are_different_dicts, build_path,
+ get_region, is_empty_value, navigate_value, wait_to_finish)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ timeouts=dict(type='dict', options=dict(
+ create=dict(default='30m', type='str'),
+ update=dict(default='30m', type='str'),
+ delete=dict(default='30m', type='str'),
+ ), default=dict()),
+ availability_zone=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ volume_type=dict(type='str', required=True),
+ backup_id=dict(type='str'),
+ description=dict(type='str'),
+ enable_full_clone=dict(type='bool'),
+ enable_scsi=dict(type='bool'),
+ enable_share=dict(type='bool'),
+ encryption_id=dict(type='str'),
+ enterprise_project_id=dict(type='str'),
+ image_id=dict(type='str'),
+ size=dict(type='int'),
+ snapshot_id=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "evs")
+
+ try:
+ _init(config)
+ is_exist = module.params.get('id')
+
+ result = None
+ changed = False
+ if module.params['state'] == 'present':
+ if not is_exist:
+ if not module.check_mode:
+ create(config)
+ changed = True
+
+ inputv = user_input_parameters(module)
+ resp, array_index = read_resource(config)
+ result = build_state(inputv, resp, array_index)
+ set_readonly_options(inputv, result)
+ if are_different_dicts(inputv, result):
+ if not module.check_mode:
+ update(config, inputv, result)
+
+ inputv = user_input_parameters(module)
+ resp, array_index = read_resource(config)
+ result = build_state(inputv, resp, array_index)
+ set_readonly_options(inputv, result)
+ if are_different_dicts(inputv, result):
+ raise Exception("Update resource failed, "
+ "some attributes are not updated")
+
+ changed = True
+
+ result['id'] = module.params.get('id')
+ else:
+ result = dict()
+ if is_exist:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def _init(config):
+ module = config.module
+ if module.params.get('id'):
+ return
+
+ v = search_resource(config)
+ n = len(v)
+ if n > 1:
+ raise Exception("find more than one resources(%s)" % ", ".join([
+ navigate_value(i, ["id"])
+ for i in v
+ ]))
+
+ if n == 1:
+ module.params['id'] = navigate_value(v[0], ["id"])
+
+
+def user_input_parameters(module):
+ return {
+ "availability_zone": module.params.get("availability_zone"),
+ "backup_id": module.params.get("backup_id"),
+ "description": module.params.get("description"),
+ "enable_full_clone": module.params.get("enable_full_clone"),
+ "enable_scsi": module.params.get("enable_scsi"),
+ "enable_share": module.params.get("enable_share"),
+ "encryption_id": module.params.get("encryption_id"),
+ "enterprise_project_id": module.params.get("enterprise_project_id"),
+ "image_id": module.params.get("image_id"),
+ "name": module.params.get("name"),
+ "size": module.params.get("size"),
+ "snapshot_id": module.params.get("snapshot_id"),
+ "volume_type": module.params.get("volume_type"),
+ }
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "volumev3", "project")
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ opts = user_input_parameters(module)
+ opts["ansible_module"] = module
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+
+ client1 = config.client(get_region(module), "volume", "project")
+ client1.endpoint = client1.endpoint.replace("/v2/", "/v1/")
+ obj = async_wait(config, r, client1, timeout)
+ module.params['id'] = navigate_value(obj, ["entities", "volume_id"])
+
+
+def update(config, expect_state, current_state):
+ module = config.module
+ expect_state["current_state"] = current_state
+ current_state["current_state"] = current_state
+ client = config.client(get_region(module), "evs", "project")
+ timeout = 60 * int(module.params['timeouts']['update'].rstrip('m'))
+
+ params = build_update_parameters(expect_state)
+ params1 = build_update_parameters(current_state)
+ if params and are_different_dicts(params, params1):
+ send_update_request(module, params, client)
+
+ params = build_extend_disk_parameters(expect_state)
+ params1 = build_extend_disk_parameters(current_state)
+ if params and are_different_dicts(params, params1):
+ client1 = config.client(get_region(module), "evsv2.1", "project")
+ r = send_extend_disk_request(module, params, client1)
+
+ client1 = config.client(get_region(module), "volume", "project")
+ client1.endpoint = client1.endpoint.replace("/v2/", "/v1/")
+ async_wait(config, r, client1, timeout)
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "evs", "project")
+ timeout = 60 * int(module.params['timeouts']['delete'].rstrip('m'))
+
+ r = send_delete_request(module, None, client)
+
+ client = config.client(get_region(module), "volume", "project")
+ client.endpoint = client.endpoint.replace("/v2/", "/v1/")
+ async_wait(config, r, client, timeout)
+
+
+def read_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "volumev3", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ res["read"] = fill_read_resp_body(r)
+
+ return res, None
+
+
+def build_state(opts, response, array_index):
+ states = flatten_options(response, array_index)
+ set_unreadable_options(opts, states)
+ return states
+
+
+def _build_query_link(opts):
+ query_params = []
+
+ v = navigate_value(opts, ["enable_share"])
+ if v or v in [False, 0]:
+ query_params.append(
+ "multiattach=" + (str(v) if v else str(v).lower()))
+
+ v = navigate_value(opts, ["name"])
+ if v or v in [False, 0]:
+ query_params.append(
+ "name=" + (str(v) if v else str(v).lower()))
+
+ v = navigate_value(opts, ["availability_zone"])
+ if v or v in [False, 0]:
+ query_params.append(
+ "availability_zone=" + (str(v) if v else str(v).lower()))
+
+ query_link = "?limit=10&offset={start}"
+ if query_params:
+ query_link += "&" + "&".join(query_params)
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "volumev3", "project")
+ opts = user_input_parameters(module)
+ name = module.params.get("name")
+ query_link = _build_query_link(opts)
+ link = "os-vendor-volumes/detail" + query_link
+
+ result = []
+ p = {'start': 0}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ if name == item.get("name"):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['start'] += len(r)
+
+ return result
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["availability_zone"], None)
+ if not is_empty_value(v):
+ params["availability_zone"] = v
+
+ v = navigate_value(opts, ["backup_id"], None)
+ if not is_empty_value(v):
+ params["backup_id"] = v
+
+ v = navigate_value(opts, ["description"], None)
+ if not is_empty_value(v):
+ params["description"] = v
+
+ v = navigate_value(opts, ["enterprise_project_id"], None)
+ if not is_empty_value(v):
+ params["enterprise_project_id"] = v
+
+ v = navigate_value(opts, ["image_id"], None)
+ if not is_empty_value(v):
+ params["imageRef"] = v
+
+ v = expand_create_metadata(opts, None)
+ if not is_empty_value(v):
+ params["metadata"] = v
+
+ v = navigate_value(opts, ["enable_share"], None)
+ if not is_empty_value(v):
+ params["multiattach"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ v = navigate_value(opts, ["size"], None)
+ if not is_empty_value(v):
+ params["size"] = v
+
+ v = navigate_value(opts, ["snapshot_id"], None)
+ if not is_empty_value(v):
+ params["snapshot_id"] = v
+
+ v = navigate_value(opts, ["volume_type"], None)
+ if not is_empty_value(v):
+ params["volume_type"] = v
+
+ if not params:
+ return params
+
+ params = {"volume": params}
+
+ return params
+
+
+def expand_create_metadata(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["encryption_id"], array_index)
+ if not is_empty_value(v):
+ r["__system__cmkid"] = v
+
+ v = expand_create_metadata_system_encrypted(d, array_index)
+ if not is_empty_value(v):
+ r["__system__encrypted"] = v
+
+ v = expand_create_metadata_full_clone(d, array_index)
+ if not is_empty_value(v):
+ r["full_clone"] = v
+
+ v = expand_create_metadata_hw_passthrough(d, array_index)
+ if not is_empty_value(v):
+ r["hw:passthrough"] = v
+
+ return r
+
+
+def expand_create_metadata_system_encrypted(d, array_index):
+ v = navigate_value(d, ["encryption_id"], array_index)
+ return "1" if v else ""
+
+
+def expand_create_metadata_full_clone(d, array_index):
+ v = navigate_value(d, ["enable_full_clone"], array_index)
+ return "0" if v else ""
+
+
+def expand_create_metadata_hw_passthrough(d, array_index):
+ v = navigate_value(d, ["enable_scsi"], array_index)
+ if v is None:
+ return v
+ return "true" if v else "false"
+
+
+def send_create_request(module, params, client):
+ url = "cloudvolumes"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_evs_disk): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def build_update_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["description"], None)
+ if v is not None:
+ params["description"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ if not params:
+ return params
+
+ params = {"volume": params}
+
+ return params
+
+
+def send_update_request(module, params, client):
+ url = build_path(module, "cloudvolumes/{id}")
+
+ try:
+ r = client.put(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_evs_disk): error running "
+ "api(update), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_delete_request(module, params, client):
+ url = build_path(module, "cloudvolumes/{id}")
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_evs_disk): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def build_extend_disk_parameters(opts):
+ params = dict()
+
+ v = expand_extend_disk_os_extend(opts, None)
+ if not is_empty_value(v):
+ params["os-extend"] = v
+
+ return params
+
+
+def expand_extend_disk_os_extend(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["size"], array_index)
+ if not is_empty_value(v):
+ r["new_size"] = v
+
+ return r
+
+
+def send_extend_disk_request(module, params, client):
+ url = build_path(module, "cloudvolumes/{id}/action")
+
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_evs_disk): error running "
+ "api(extend_disk), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def async_wait(config, result, client, timeout):
+ module = config.module
+
+ path_parameters = {
+ "job_id": ["job_id"],
+ }
+ data = dict((key, navigate_value(result, path))
+ for key, path in path_parameters.items())
+
+ url = build_path(module, "jobs/{job_id}", data)
+
+ def _query_status():
+ r = None
+ try:
+ r = client.get(url, timeout=timeout)
+ except HwcClientException:
+ return None, ""
+
+ try:
+ s = navigate_value(r, ["status"])
+ return r, s
+ except Exception:
+ return None, ""
+
+ try:
+ return wait_to_finish(
+ ["SUCCESS"],
+ ["RUNNING", "INIT"],
+ _query_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_evs_disk): error "
+ "waiting to be done, error= %s" % str(ex))
+
+
+def send_read_request(module, client):
+ url = build_path(module, "os-vendor-volumes/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_evs_disk): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["volume"], None)
+
+
+def fill_read_resp_body(body):
+ result = dict()
+
+ v = fill_read_resp_attachments(body.get("attachments"))
+ result["attachments"] = v
+
+ result["availability_zone"] = body.get("availability_zone")
+
+ result["bootable"] = body.get("bootable")
+
+ result["created_at"] = body.get("created_at")
+
+ result["description"] = body.get("description")
+
+ result["enterprise_project_id"] = body.get("enterprise_project_id")
+
+ result["id"] = body.get("id")
+
+ v = fill_read_resp_metadata(body.get("metadata"))
+ result["metadata"] = v
+
+ result["multiattach"] = body.get("multiattach")
+
+ result["name"] = body.get("name")
+
+ result["size"] = body.get("size")
+
+ result["snapshot_id"] = body.get("snapshot_id")
+
+ result["source_volid"] = body.get("source_volid")
+
+ result["status"] = body.get("status")
+
+ result["tags"] = body.get("tags")
+
+ v = fill_read_resp_volume_image_metadata(body.get("volume_image_metadata"))
+ result["volume_image_metadata"] = v
+
+ result["volume_type"] = body.get("volume_type")
+
+ return result
+
+
+def fill_read_resp_attachments(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["attached_at"] = item.get("attached_at")
+
+ val["attachment_id"] = item.get("attachment_id")
+
+ val["device"] = item.get("device")
+
+ val["server_id"] = item.get("server_id")
+
+ result.append(val)
+
+ return result
+
+
+def fill_read_resp_metadata(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["__system__cmkid"] = value.get("__system__cmkid")
+
+ result["attached_mode"] = value.get("attached_mode")
+
+ result["full_clone"] = value.get("full_clone")
+
+ result["hw:passthrough"] = value.get("hw:passthrough")
+
+ result["policy"] = value.get("policy")
+
+ result["readonly"] = value.get("readonly")
+
+ return result
+
+
+def fill_read_resp_volume_image_metadata(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["id"] = value.get("id")
+
+ return result
+
+
+def flatten_options(response, array_index):
+ r = dict()
+
+ v = flatten_attachments(response, array_index)
+ r["attachments"] = v
+
+ v = navigate_value(response, ["read", "availability_zone"], array_index)
+ r["availability_zone"] = v
+
+ v = navigate_value(response, ["read", "metadata", "policy"], array_index)
+ r["backup_policy_id"] = v
+
+ v = navigate_value(response, ["read", "created_at"], array_index)
+ r["created_at"] = v
+
+ v = navigate_value(response, ["read", "description"], array_index)
+ r["description"] = v
+
+ v = flatten_enable_full_clone(response, array_index)
+ r["enable_full_clone"] = v
+
+ v = flatten_enable_scsi(response, array_index)
+ r["enable_scsi"] = v
+
+ v = navigate_value(response, ["read", "multiattach"], array_index)
+ r["enable_share"] = v
+
+ v = navigate_value(
+ response, ["read", "metadata", "__system__cmkid"], array_index)
+ r["encryption_id"] = v
+
+ v = navigate_value(
+ response, ["read", "enterprise_project_id"], array_index)
+ r["enterprise_project_id"] = v
+
+ v = navigate_value(
+ response, ["read", "volume_image_metadata", "id"], array_index)
+ r["image_id"] = v
+
+ v = flatten_is_bootable(response, array_index)
+ r["is_bootable"] = v
+
+ v = flatten_is_readonly(response, array_index)
+ r["is_readonly"] = v
+
+ v = navigate_value(response, ["read", "name"], array_index)
+ r["name"] = v
+
+ v = navigate_value(response, ["read", "size"], array_index)
+ r["size"] = v
+
+ v = navigate_value(response, ["read", "snapshot_id"], array_index)
+ r["snapshot_id"] = v
+
+ v = navigate_value(response, ["read", "source_volid"], array_index)
+ r["source_volume_id"] = v
+
+ v = navigate_value(response, ["read", "status"], array_index)
+ r["status"] = v
+
+ v = navigate_value(response, ["read", "tags"], array_index)
+ r["tags"] = v
+
+ v = navigate_value(response, ["read", "volume_type"], array_index)
+ r["volume_type"] = v
+
+ return r
+
+
+def flatten_attachments(d, array_index):
+ v = navigate_value(d, ["read", "attachments"],
+ array_index)
+ if not v:
+ return None
+ n = len(v)
+ result = []
+
+ new_ai = dict()
+ if array_index:
+ new_ai.update(array_index)
+
+ for i in range(n):
+ new_ai["read.attachments"] = i
+
+ val = dict()
+
+ v = navigate_value(d, ["read", "attachments", "attached_at"], new_ai)
+ val["attached_at"] = v
+
+ v = navigate_value(d, ["read", "attachments", "attachment_id"], new_ai)
+ val["attachment_id"] = v
+
+ v = navigate_value(d, ["read", "attachments", "device"], new_ai)
+ val["device"] = v
+
+ v = navigate_value(d, ["read", "attachments", "server_id"], new_ai)
+ val["server_id"] = v
+
+ for v in val.values():
+ if v is not None:
+ result.append(val)
+ break
+
+ return result if result else None
+
+
+def flatten_enable_full_clone(d, array_index):
+ v = navigate_value(d, ["read", "metadata", "full_clone"],
+ array_index)
+ if v is None:
+ return v
+ return True if v == "0" else False
+
+
+def flatten_enable_scsi(d, array_index):
+ v = navigate_value(d, ["read", "metadata", "hw:passthrough"],
+ array_index)
+ if v is None:
+ return v
+ return True if v in ["true", "True"] else False
+
+
+def flatten_is_bootable(d, array_index):
+ v = navigate_value(d, ["read", "bootable"], array_index)
+ if v is None:
+ return v
+ return True if v in ["true", "True"] else False
+
+
+def flatten_is_readonly(d, array_index):
+ v = navigate_value(d, ["read", "metadata", "readonly"],
+ array_index)
+ if v is None:
+ return v
+ return True if v in ["true", "True"] else False
+
+
+def set_unreadable_options(opts, states):
+ states["backup_id"] = opts.get("backup_id")
+
+
+def set_readonly_options(opts, states):
+ opts["attachments"] = states.get("attachments")
+
+ opts["backup_policy_id"] = states.get("backup_policy_id")
+
+ opts["created_at"] = states.get("created_at")
+
+ opts["is_bootable"] = states.get("is_bootable")
+
+ opts["is_readonly"] = states.get("is_readonly")
+
+ opts["source_volume_id"] = states.get("source_volume_id")
+
+ opts["status"] = states.get("status")
+
+ opts["tags"] = states.get("tags")
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_evs_disk): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["volumes"], None)
+
+
+def expand_list_metadata(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["encryption_id"], array_index)
+ r["__system__cmkid"] = v
+
+ r["attached_mode"] = None
+
+ v = navigate_value(d, ["enable_full_clone"], array_index)
+ r["full_clone"] = v
+
+ v = navigate_value(d, ["enable_scsi"], array_index)
+ r["hw:passthrough"] = v
+
+ r["policy"] = None
+
+ r["readonly"] = None
+
+ for v in r.values():
+ if v is not None:
+ return r
+ return None
+
+
+def expand_list_volume_image_metadata(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["image_id"], array_index)
+ r["id"] = v
+
+ for v in r.values():
+ if v is not None:
+ return r
+ return None
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ v = fill_list_resp_attachments(body.get("attachments"))
+ result["attachments"] = v
+
+ result["availability_zone"] = body.get("availability_zone")
+
+ result["bootable"] = body.get("bootable")
+
+ result["created_at"] = body.get("created_at")
+
+ result["description"] = body.get("description")
+
+ result["enterprise_project_id"] = body.get("enterprise_project_id")
+
+ result["id"] = body.get("id")
+
+ v = fill_list_resp_metadata(body.get("metadata"))
+ result["metadata"] = v
+
+ result["multiattach"] = body.get("multiattach")
+
+ result["name"] = body.get("name")
+
+ result["size"] = body.get("size")
+
+ result["snapshot_id"] = body.get("snapshot_id")
+
+ result["source_volid"] = body.get("source_volid")
+
+ result["status"] = body.get("status")
+
+ result["tags"] = body.get("tags")
+
+ v = fill_list_resp_volume_image_metadata(body.get("volume_image_metadata"))
+ result["volume_image_metadata"] = v
+
+ result["volume_type"] = body.get("volume_type")
+
+ return result
+
+
+def fill_list_resp_attachments(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["attached_at"] = item.get("attached_at")
+
+ val["attachment_id"] = item.get("attachment_id")
+
+ val["device"] = item.get("device")
+
+ val["server_id"] = item.get("server_id")
+
+ result.append(val)
+
+ return result
+
+
+def fill_list_resp_metadata(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["__system__cmkid"] = value.get("__system__cmkid")
+
+ result["attached_mode"] = value.get("attached_mode")
+
+ result["full_clone"] = value.get("full_clone")
+
+ result["hw:passthrough"] = value.get("hw:passthrough")
+
+ result["policy"] = value.get("policy")
+
+ result["readonly"] = value.get("readonly")
+
+ return result
+
+
+def fill_list_resp_volume_image_metadata(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["id"] = value.get("id")
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_network_vpc.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_network_vpc.py
new file mode 100644
index 00000000..f53369ad
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_network_vpc.py
@@ -0,0 +1,493 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2018 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_network_vpc
+description:
+ - Represents an vpc resource.
+short_description: Creates a Huawei Cloud VPC
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - requests >= 2.18.4
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in vpc.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ timeouts:
+ description:
+ - The timeouts for each operations.
+ type: dict
+ suboptions:
+ create:
+ description:
+ - The timeout for create operation.
+ type: str
+ default: '15m'
+ update:
+ description:
+ - The timeout for update operation.
+ type: str
+ default: '15m'
+ delete:
+ description:
+ - The timeout for delete operation.
+ type: str
+ default: '15m'
+ name:
+ description:
+ - The name of vpc.
+ type: str
+ required: true
+ cidr:
+ description:
+ - The range of available subnets in the vpc.
+ type: str
+ required: true
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+- name: Create a vpc
+ community.general.hwc_network_vpc:
+ identity_endpoint: "{{ identity_endpoint }}"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ domain: "{{ domain }}"
+ project: "{{ project }}"
+ region: "{{ region }}"
+ name: "vpc_1"
+ cidr: "192.168.100.0/24"
+ state: present
+'''
+
+RETURN = '''
+ id:
+ description:
+ - the id of vpc.
+ type: str
+ returned: success
+ name:
+ description:
+ - the name of vpc.
+ type: str
+ returned: success
+ cidr:
+ description:
+ - the range of available subnets in the vpc.
+ type: str
+ returned: success
+ status:
+ description:
+ - the status of vpc.
+ type: str
+ returned: success
+ routes:
+ description:
+ - the route information.
+ type: complex
+ returned: success
+ contains:
+ destination:
+ description:
+ - the destination network segment of a route.
+ type: str
+ returned: success
+ next_hop:
+ description:
+ - the next hop of a route. If the route type is peering,
+ it will provide VPC peering connection ID.
+ type: str
+ returned: success
+ enable_shared_snat:
+ description:
+ - show whether the shared snat is enabled.
+ type: bool
+ returned: success
+'''
+
+###############################################################################
+# Imports
+###############################################################################
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (Config, HwcClientException,
+ HwcClientException404, HwcModule,
+ are_different_dicts, is_empty_value,
+ wait_to_finish, get_region,
+ build_path, navigate_value)
+import re
+
+###############################################################################
+# Main
+###############################################################################
+
+
+def main():
+ """Main function"""
+
+ module = HwcModule(
+ argument_spec=dict(
+ state=dict(
+ default='present', choices=['present', 'absent'], type='str'),
+ timeouts=dict(type='dict', options=dict(
+ create=dict(default='15m', type='str'),
+ update=dict(default='15m', type='str'),
+ delete=dict(default='15m', type='str'),
+ ), default=dict()),
+ name=dict(required=True, type='str'),
+ cidr=dict(required=True, type='str')
+ ),
+ supports_check_mode=True,
+ )
+ config = Config(module, 'vpc')
+
+ state = module.params['state']
+
+ if (not module.params.get("id")) and module.params.get("name"):
+ module.params['id'] = get_id_by_name(config)
+
+ fetch = None
+ link = self_link(module)
+ # the link will include Nones if required format parameters are missed
+ if not re.search('/None/|/None$', link):
+ client = config.client(get_region(module), "vpc", "project")
+ fetch = fetch_resource(module, client, link)
+ if fetch:
+ fetch = fetch.get('vpc')
+ changed = False
+
+ if fetch:
+ if state == 'present':
+ expect = _get_editable_properties(module)
+ current_state = response_to_hash(module, fetch)
+ current = {"cidr": current_state["cidr"]}
+ if are_different_dicts(expect, current):
+ if not module.check_mode:
+ fetch = update(config, self_link(module))
+ fetch = response_to_hash(module, fetch.get('vpc'))
+ changed = True
+ else:
+ fetch = current_state
+ else:
+ if not module.check_mode:
+ delete(config, self_link(module))
+ fetch = {}
+ changed = True
+ else:
+ if state == 'present':
+ if not module.check_mode:
+ fetch = create(config, "vpcs")
+ fetch = response_to_hash(module, fetch.get('vpc'))
+ changed = True
+ else:
+ fetch = {}
+
+ fetch.update({'changed': changed})
+
+ module.exit_json(**fetch)
+
+
+def create(config, link):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ r = None
+ try:
+ r = client.post(link, resource_to_create(module))
+ except HwcClientException as ex:
+ msg = ("module(hwc_network_vpc): error creating "
+ "resource, error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ wait_done = wait_for_operation(config, 'create', r)
+ v = ""
+ try:
+ v = navigate_value(wait_done, ['vpc', 'id'])
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ url = build_path(module, 'vpcs/{op_id}', {'op_id': v})
+ return fetch_resource(module, client, url)
+
+
+def update(config, link):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ r = None
+ try:
+ r = client.put(link, resource_to_update(module))
+ except HwcClientException as ex:
+ msg = ("module(hwc_network_vpc): error updating "
+ "resource, error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ wait_for_operation(config, 'update', r)
+
+ return fetch_resource(module, client, link)
+
+
+def delete(config, link):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ try:
+ client.delete(link)
+ except HwcClientException as ex:
+ msg = ("module(hwc_network_vpc): error deleting "
+ "resource, error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ wait_for_delete(module, client, link)
+
+
+def fetch_resource(module, client, link):
+ try:
+ return client.get(link)
+ except HwcClientException as ex:
+ msg = ("module(hwc_network_vpc): error fetching "
+ "resource, error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+
+def get_id_by_name(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ name = module.params.get("name")
+ link = "vpcs"
+ query_link = "?marker={marker}&limit=10"
+ link += query_link
+ not_format_keys = re.findall("={marker}", link)
+ none_values = re.findall("=None", link)
+
+ if not (not_format_keys or none_values):
+ r = None
+ try:
+ r = client.get(link)
+ except Exception:
+ pass
+ if r is None:
+ return None
+ r = r.get('vpcs', [])
+ ids = [
+ i.get('id') for i in r if i.get('name', '') == name
+ ]
+ if not ids:
+ return None
+ elif len(ids) == 1:
+ return ids[0]
+ else:
+ module.fail_json(
+ msg="Multiple resources with same name are found.")
+ elif none_values:
+ module.fail_json(
+ msg="Can not find id by name because url includes None.")
+ else:
+ p = {'marker': ''}
+ ids = set()
+ while True:
+ r = None
+ try:
+ r = client.get(link.format(**p))
+ except Exception:
+ pass
+ if r is None:
+ break
+ r = r.get('vpcs', [])
+ if r == []:
+ break
+ for i in r:
+ if i.get('name') == name:
+ ids.add(i.get('id'))
+ if len(ids) >= 2:
+ module.fail_json(
+ msg="Multiple resources with same name are found.")
+
+ p['marker'] = r[-1].get('id')
+
+ return ids.pop() if ids else None
+
+
+def self_link(module):
+ return build_path(module, "vpcs/{id}")
+
+
+def resource_to_create(module):
+ params = dict()
+
+ v = module.params.get('cidr')
+ if not is_empty_value(v):
+ params["cidr"] = v
+
+ v = module.params.get('name')
+ if not is_empty_value(v):
+ params["name"] = v
+
+ if not params:
+ return params
+
+ params = {"vpc": params}
+
+ return params
+
+
+def resource_to_update(module):
+ params = dict()
+
+ v = module.params.get('cidr')
+ if not is_empty_value(v):
+ params["cidr"] = v
+
+ if not params:
+ return params
+
+ params = {"vpc": params}
+
+ return params
+
+
+def _get_editable_properties(module):
+ return {
+ "cidr": module.params.get("cidr"),
+ }
+
+
+def response_to_hash(module, response):
+ """ Remove unnecessary properties from the response.
+ This is for doing comparisons with Ansible's current parameters.
+ """
+ return {
+ u'id': response.get(u'id'),
+ u'name': response.get(u'name'),
+ u'cidr': response.get(u'cidr'),
+ u'status': response.get(u'status'),
+ u'routes': VpcRoutesArray(
+ response.get(u'routes', []), module).from_response(),
+ u'enable_shared_snat': response.get(u'enable_shared_snat')
+ }
+
+
+def wait_for_operation(config, op_type, op_result):
+ module = config.module
+ op_id = ""
+ try:
+ op_id = navigate_value(op_result, ['vpc', 'id'])
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ url = build_path(module, "vpcs/{op_id}", {'op_id': op_id})
+ timeout = 60 * int(module.params['timeouts'][op_type].rstrip('m'))
+ states = {
+ 'create': {
+ 'allowed': ['CREATING', 'DONW', 'OK'],
+ 'complete': ['OK'],
+ },
+ 'update': {
+ 'allowed': ['PENDING_UPDATE', 'DONW', 'OK'],
+ 'complete': ['OK'],
+ }
+ }
+
+ return wait_for_completion(url, timeout, states[op_type]['allowed'],
+ states[op_type]['complete'], config)
+
+
+def wait_for_completion(op_uri, timeout, allowed_states,
+ complete_states, config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ def _refresh_status():
+ r = None
+ try:
+ r = fetch_resource(module, client, op_uri)
+ except Exception:
+ return None, ""
+
+ status = ""
+ try:
+ status = navigate_value(r, ['vpc', 'status'])
+ except Exception:
+ return None, ""
+
+ return r, status
+
+ try:
+ return wait_to_finish(complete_states, allowed_states,
+ _refresh_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def wait_for_delete(module, client, link):
+
+ def _refresh_status():
+ try:
+ client.get(link)
+ except HwcClientException404:
+ return True, "Done"
+
+ except Exception:
+ return None, ""
+
+ return True, "Pending"
+
+ timeout = 60 * int(module.params['timeouts']['delete'].rstrip('m'))
+ try:
+ return wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+class VpcRoutesArray(object):
+ def __init__(self, request, module):
+ self.module = module
+ if request:
+ self.request = request
+ else:
+ self.request = []
+
+ def to_request(self):
+ items = []
+ for item in self.request:
+ items.append(self._request_for_item(item))
+ return items
+
+ def from_response(self):
+ items = []
+ for item in self.request:
+ items.append(self._response_from_item(item))
+ return items
+
+ def _request_for_item(self, item):
+ return {
+ u'destination': item.get('destination'),
+ u'nexthop': item.get('next_hop')
+ }
+
+ def _response_from_item(self, item):
+ return {
+ u'destination': item.get(u'destination'),
+ u'next_hop': item.get(u'nexthop')
+ }
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_smn_topic.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_smn_topic.py
new file mode 100644
index 00000000..f7fb4fae
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_smn_topic.py
@@ -0,0 +1,338 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_smn_topic
+description:
+ - Represents a SMN notification topic resource.
+short_description: Creates a resource of SMNTopic in Huaweicloud Cloud
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - requests >= 2.18.4
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huaweicloud Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ display_name:
+ description:
+ - Topic display name, which is presented as the name of the email
+ sender in an email message. The topic display name contains a
+ maximum of 192 bytes.
+ type: str
+ required: false
+ name:
+ description:
+ - Name of the topic to be created. The topic name is a string of 1
+ to 256 characters. It must contain upper- or lower-case letters,
+ digits, hyphens (-), and underscores C(_), and must start with a
+ letter or digit.
+ type: str
+ required: true
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+- name: Create a smn topic
+ community.general.hwc_smn_topic:
+ identity_endpoint: "{{ identity_endpoint }}"
+ user_name: "{{ user_name }}"
+ password: "{{ password }}"
+ domain_name: "{{ domain_name }}"
+ project_name: "{{ project_name }}"
+ region: "{{ region }}"
+ name: "ansible_smn_topic_test"
+ state: present
+'''
+
+RETURN = '''
+create_time:
+ description:
+ - Time when the topic was created.
+ returned: success
+ type: str
+display_name:
+ description:
+ - Topic display name, which is presented as the name of the email
+ sender in an email message. The topic display name contains a
+ maximum of 192 bytes.
+ returned: success
+ type: str
+name:
+ description:
+ - Name of the topic to be created. The topic name is a string of 1
+ to 256 characters. It must contain upper- or lower-case letters,
+ digits, hyphens (-), and underscores C(_), and must start with a
+ letter or digit.
+ returned: success
+ type: str
+push_policy:
+ description:
+ - Message pushing policy. 0 indicates that the message sending
+ fails and the message is cached in the queue. 1 indicates that
+ the failed message is discarded.
+ returned: success
+ type: int
+topic_urn:
+ description:
+ - Resource identifier of a topic, which is unique.
+ returned: success
+ type: str
+update_time:
+ description:
+ - Time when the topic was updated.
+ returned: success
+ type: str
+'''
+
+###############################################################################
+# Imports
+###############################################################################
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (Config, HwcClientException,
+ HwcModule, navigate_value,
+ are_different_dicts, is_empty_value,
+ build_path, get_region)
+import re
+
+###############################################################################
+# Main
+###############################################################################
+
+
+def main():
+ """Main function"""
+
+ module = HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ display_name=dict(type='str'),
+ name=dict(required=True, type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+ config = Config(module, "smn")
+
+ state = module.params['state']
+
+ if not module.params.get("id"):
+ module.params['id'] = get_resource_id(config)
+
+ fetch = None
+ link = self_link(module)
+ # the link will include Nones if required format parameters are missed
+ if not re.search('/None/|/None$', link):
+ client = config.client(get_region(module), "smn", "project")
+ fetch = fetch_resource(module, client, link)
+ changed = False
+
+ if fetch:
+ if state == 'present':
+ expect = _get_resource_editable_properties(module)
+ current_state = response_to_hash(module, fetch)
+ current = {'display_name': current_state['display_name']}
+ if are_different_dicts(expect, current):
+ if not module.check_mode:
+ fetch = update(config)
+ fetch = response_to_hash(module, fetch)
+ changed = True
+ else:
+ fetch = current_state
+ else:
+ if not module.check_mode:
+ delete(config)
+ fetch = {}
+ changed = True
+ else:
+ if state == 'present':
+ if not module.check_mode:
+ fetch = create(config)
+ fetch = response_to_hash(module, fetch)
+ changed = True
+ else:
+ fetch = {}
+
+ fetch.update({'changed': changed})
+
+ module.exit_json(**fetch)
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "smn", "project")
+
+ link = "notifications/topics"
+ r = None
+ try:
+ r = client.post(link, create_resource_opts(module))
+ except HwcClientException as ex:
+ msg = ("module(hwc_smn_topic): error creating "
+ "resource, error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return get_resource(config, r)
+
+
+def update(config):
+ module = config.module
+ client = config.client(get_region(module), "smn", "project")
+
+ link = self_link(module)
+ try:
+ client.put(link, update_resource_opts(module))
+ except HwcClientException as ex:
+ msg = ("module(hwc_smn_topic): error updating "
+ "resource, error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return fetch_resource(module, client, link)
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "smn", "project")
+
+ link = self_link(module)
+ try:
+ client.delete(link)
+ except HwcClientException as ex:
+ msg = ("module(hwc_smn_topic): error deleting "
+ "resource, error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+
+def fetch_resource(module, client, link):
+ try:
+ return client.get(link)
+ except HwcClientException as ex:
+ msg = ("module(hwc_smn_topic): error fetching "
+ "resource, error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+
+def get_resource(config, result):
+ module = config.module
+ client = config.client(get_region(module), "smn", "project")
+
+ v = ""
+ try:
+ v = navigate_value(result, ['topic_urn'])
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ d = {'topic_urn': v}
+ url = build_path(module, 'notifications/topics/{topic_urn}', d)
+
+ return fetch_resource(module, client, url)
+
+
+def get_resource_id(config):
+ module = config.module
+ client = config.client(get_region(module), "smn", "project")
+
+ link = "notifications/topics"
+ query_link = "?offset={offset}&limit=10"
+ link += query_link
+
+ p = {'offset': 0}
+ v = module.params.get('name')
+ ids = set()
+ while True:
+ r = None
+ try:
+ r = client.get(link.format(**p))
+ except Exception:
+ pass
+ if r is None:
+ break
+ r = r.get('topics', [])
+ if r == []:
+ break
+ for i in r:
+ if i.get('name') == v:
+ ids.add(i.get('topic_urn'))
+ if len(ids) >= 2:
+ module.fail_json(msg="Multiple resources are found")
+
+ p['offset'] += 1
+
+ return ids.pop() if ids else None
+
+
+def self_link(module):
+ return build_path(module, "notifications/topics/{id}")
+
+
+def create_resource_opts(module):
+ params = dict()
+
+ v = module.params.get('display_name')
+ if not is_empty_value(v):
+ params["display_name"] = v
+
+ v = module.params.get('name')
+ if not is_empty_value(v):
+ params["name"] = v
+
+ return params
+
+
+def update_resource_opts(module):
+ params = dict()
+
+ v = module.params.get('display_name')
+ if not is_empty_value(v):
+ params["display_name"] = v
+
+ return params
+
+
+def _get_resource_editable_properties(module):
+ return {
+ "display_name": module.params.get("display_name"),
+ }
+
+
+def response_to_hash(module, response):
+ """Remove unnecessary properties from the response.
+ This is for doing comparisons with Ansible's current parameters.
+ """
+ return {
+ u'create_time': response.get(u'create_time'),
+ u'display_name': response.get(u'display_name'),
+ u'name': response.get(u'name'),
+ u'push_policy': _push_policy_convert_from_response(
+ response.get('push_policy')),
+ u'topic_urn': response.get(u'topic_urn'),
+ u'update_time': response.get(u'update_time')
+ }
+
+
+def _push_policy_convert_from_response(value):
+ return {
+ 0: "the message sending fails and is cached in the queue",
+ 1: "the failed message is discarded",
+ }.get(int(value))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_eip.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_eip.py
new file mode 100644
index 00000000..b53395f8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_eip.py
@@ -0,0 +1,877 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_vpc_eip
+description:
+ - elastic ip management.
+short_description: Creates a resource of Vpc/EIP in Huawei Cloud
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huawei Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ timeouts:
+ description:
+ - The timeouts for each operations.
+ type: dict
+ suboptions:
+ create:
+ description:
+ - The timeouts for create operation.
+ type: str
+ default: '5m'
+ update:
+ description:
+ - The timeouts for update operation.
+ type: str
+ default: '5m'
+ type:
+ description:
+ - Specifies the EIP type.
+ type: str
+ required: true
+ dedicated_bandwidth:
+ description:
+ - Specifies the dedicated bandwidth object.
+ type: dict
+ required: false
+ suboptions:
+ charge_mode:
+ description:
+ - Specifies whether the bandwidth is billed by traffic or
+ by bandwidth size. The value can be bandwidth or traffic.
+ If this parameter is left blank or is null character
+ string, default value bandwidth is used. For IPv6
+ addresses, the default parameter value is bandwidth
+ outside China and is traffic in China.
+ type: str
+ required: true
+ name:
+ description:
+ - Specifies the bandwidth name. The value is a string of 1
+ to 64 characters that can contain letters, digits,
+ underscores C(_), hyphens (-), and periods (.).
+ type: str
+ required: true
+ size:
+ description:
+ - Specifies the bandwidth size. The value ranges from 1
+ Mbit/s to 2000 Mbit/s by default. (The specific range may
+ vary depending on the configuration in each region. You
+ can see the bandwidth range of each region on the
+ management console.) The minimum unit for bandwidth
+ adjustment varies depending on the bandwidth range. The
+ details are as follows.
+ - The minimum unit is 1 Mbit/s if the allowed bandwidth
+ size ranges from 0 to 300 Mbit/s (with 300 Mbit/s
+ included).
+ - The minimum unit is 50 Mbit/s if the allowed bandwidth
+ size ranges 300 Mbit/s to 1000 Mbit/s (with 1000 Mbit/s
+ included).
+ - The minimum unit is 500 Mbit/s if the allowed bandwidth
+ size is greater than 1000 Mbit/s.
+ type: int
+ required: true
+ enterprise_project_id:
+ description:
+ - Specifies the enterprise project ID.
+ type: str
+ required: false
+ ip_version:
+ description:
+ - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this
+ parameter is left blank, an IPv4 address will be assigned.
+ type: int
+ required: false
+ ipv4_address:
+ description:
+ - Specifies the obtained IPv4 EIP. The system automatically assigns
+ an EIP if you do not specify it.
+ type: str
+ required: false
+ port_id:
+ description:
+ - Specifies the port ID. This parameter is returned only when a
+ private IP address is bound with the EIP.
+ type: str
+ required: false
+ shared_bandwidth_id:
+ description:
+ - Specifies the ID of shared bandwidth.
+ type: str
+ required: false
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+# create an eip and bind it to a port
+- name: Create vpc
+ hwc_network_vpc:
+ cidr: "192.168.100.0/24"
+ name: "ansible_network_vpc_test"
+ register: vpc
+- name: Create subnet
+ hwc_vpc_subnet:
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: True
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ register: subnet
+- name: Create a port
+ hwc_vpc_port:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ register: port
+- name: Create an eip and bind it to a port
+ community.general.hwc_vpc_eip:
+ type: "5_bgp"
+ dedicated_bandwidth:
+ charge_mode: "traffic"
+ name: "ansible_test_dedicated_bandwidth"
+ size: 1
+ port_id: "{{ port.id }}"
+'''
+
+RETURN = '''
+ type:
+ description:
+ - Specifies the EIP type.
+ type: str
+ returned: success
+ dedicated_bandwidth:
+ description:
+ - Specifies the dedicated bandwidth object.
+ type: dict
+ returned: success
+ contains:
+ charge_mode:
+ description:
+ - Specifies whether the bandwidth is billed by traffic or
+ by bandwidth size. The value can be bandwidth or traffic.
+ If this parameter is left blank or is null character
+ string, default value bandwidth is used. For IPv6
+ addresses, the default parameter value is bandwidth
+ outside China and is traffic in China.
+ type: str
+ returned: success
+ name:
+ description:
+ - Specifies the bandwidth name. The value is a string of 1
+ to 64 characters that can contain letters, digits,
+ underscores C(_), hyphens (-), and periods (.).
+ type: str
+ returned: success
+ size:
+ description:
+ - Specifies the bandwidth size. The value ranges from 1
+ Mbit/s to 2000 Mbit/s by default. (The specific range may
+ vary depending on the configuration in each region. You
+ can see the bandwidth range of each region on the
+ management console.) The minimum unit for bandwidth
+ adjustment varies depending on the bandwidth range. The
+ details are as follows:.
+ - The minimum unit is 1 Mbit/s if the allowed bandwidth
+ size ranges from 0 to 300 Mbit/s (with 300 Mbit/s
+ included).
+ - The minimum unit is 50 Mbit/s if the allowed bandwidth
+ size ranges 300 Mbit/s to 1000 Mbit/s (with 1000 Mbit/s
+ included).
+ - The minimum unit is 500 Mbit/s if the allowed bandwidth
+ size is greater than 1000 Mbit/s.
+ type: int
+ returned: success
+ id:
+ description:
+ - Specifies the ID of dedicated bandwidth.
+ type: str
+ returned: success
+ enterprise_project_id:
+ description:
+ - Specifies the enterprise project ID.
+ type: str
+ returned: success
+ ip_version:
+ description:
+ - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this
+ parameter is left blank, an IPv4 address will be assigned.
+ type: int
+ returned: success
+ ipv4_address:
+ description:
+ - Specifies the obtained IPv4 EIP. The system automatically assigns
+ an EIP if you do not specify it.
+ type: str
+ returned: success
+ port_id:
+ description:
+ - Specifies the port ID. This parameter is returned only when a
+ private IP address is bound with the EIP.
+ type: str
+ returned: success
+ shared_bandwidth_id:
+ description:
+ - Specifies the ID of shared bandwidth.
+ type: str
+ returned: success
+ create_time:
+ description:
+ - Specifies the time (UTC time) when the EIP was assigned.
+ type: str
+ returned: success
+ ipv6_address:
+ description:
+ - Specifies the obtained IPv6 EIP.
+ type: str
+ returned: success
+ private_ip_address:
+ description:
+ - Specifies the private IP address bound with the EIP. This
+ parameter is returned only when a private IP address is bound
+ with the EIP.
+ type: str
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcClientException404, HwcModule,
+ are_different_dicts, build_path, get_region, is_empty_value,
+ navigate_value, wait_to_finish)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ timeouts=dict(type='dict', options=dict(
+ create=dict(default='5m', type='str'),
+ update=dict(default='5m', type='str'),
+ ), default=dict()),
+ type=dict(type='str', required=True),
+ dedicated_bandwidth=dict(type='dict', options=dict(
+ charge_mode=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ size=dict(type='int', required=True)
+ )),
+ enterprise_project_id=dict(type='str'),
+ ip_version=dict(type='int'),
+ ipv4_address=dict(type='str'),
+ port_id=dict(type='str'),
+ shared_bandwidth_id=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "vpc")
+
+ try:
+ resource = None
+ if module.params['id']:
+ resource = True
+ else:
+ v = search_resource(config)
+ if len(v) > 1:
+ raise Exception("Found more than one resource(%s)" % ", ".join([
+ navigate_value(i, ["id"]) for i in v]))
+
+ if len(v) == 1:
+ resource = v[0]
+ module.params['id'] = navigate_value(resource, ["id"])
+
+ result = {}
+ changed = False
+ if module.params['state'] == 'present':
+ if resource is None:
+ if not module.check_mode:
+ create(config)
+ changed = True
+
+ current = read_resource(config, exclude_output=True)
+ expect = user_input_parameters(module)
+ if are_different_dicts(expect, current):
+ if not module.check_mode:
+ update(config)
+ changed = True
+
+ result = read_resource(config)
+ result['id'] = module.params.get('id')
+ else:
+ if resource:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def user_input_parameters(module):
+ return {
+ "dedicated_bandwidth": module.params.get("dedicated_bandwidth"),
+ "enterprise_project_id": module.params.get("enterprise_project_id"),
+ "ip_version": module.params.get("ip_version"),
+ "ipv4_address": module.params.get("ipv4_address"),
+ "port_id": module.params.get("port_id"),
+ "shared_bandwidth_id": module.params.get("shared_bandwidth_id"),
+ "type": module.params.get("type"),
+ }
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ opts = user_input_parameters(module)
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+ obj = async_wait_create(config, r, client, timeout)
+ module.params['id'] = navigate_value(obj, ["publicip", "id"])
+
+
+def update(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ timeout = 60 * int(module.params['timeouts']['update'].rstrip('m'))
+ opts = user_input_parameters(module)
+
+ params = build_update_parameters(opts)
+ if params:
+ r = send_update_request(module, params, client)
+ async_wait_update(config, r, client, timeout)
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ if module.params["port_id"]:
+ module.params["port_id"] = ""
+ update(config)
+
+ send_delete_request(module, None, client)
+
+ url = build_path(module, "publicips/{id}")
+
+ def _refresh_status():
+ try:
+ client.get(url)
+ except HwcClientException404:
+ return True, "Done"
+
+ except Exception:
+ return None, ""
+
+ return True, "Pending"
+
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ try:
+ wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_eip): error "
+ "waiting for api(delete) to "
+ "be done, error= %s" % str(ex))
+
+
+def read_resource(config, exclude_output=False):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ res["read"] = fill_read_resp_body(r)
+
+ return update_properties(module, res, None, exclude_output)
+
+
+def _build_query_link(opts):
+ query_params = []
+
+ v = navigate_value(opts, ["ip_version"])
+ if v:
+ query_params.append("ip_version=" + str(v))
+
+ v = navigate_value(opts, ["enterprise_project_id"])
+ if v:
+ query_params.append("enterprise_project_id=" + str(v))
+
+ query_link = "?marker={marker}&limit=10"
+ if query_params:
+ query_link += "&" + "&".join(query_params)
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+ identity_obj = _build_identity_object(opts)
+ query_link = _build_query_link(opts)
+ link = "publicips" + query_link
+
+ result = []
+ p = {'marker': ''}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ item = fill_list_resp_body(item)
+ if not are_different_dicts(identity_obj, item):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['marker'] = r[-1].get('id')
+
+ return result
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = expand_create_bandwidth(opts, None)
+ if not is_empty_value(v):
+ params["bandwidth"] = v
+
+ v = navigate_value(opts, ["enterprise_project_id"], None)
+ if not is_empty_value(v):
+ params["enterprise_project_id"] = v
+
+ v = expand_create_publicip(opts, None)
+ if not is_empty_value(v):
+ params["publicip"] = v
+
+ return params
+
+
+def expand_create_bandwidth(d, array_index):
+ v = navigate_value(d, ["dedicated_bandwidth"], array_index)
+ sbwid = navigate_value(d, ["shared_bandwidth_id"], array_index)
+ if v and sbwid:
+ raise Exception("don't input shared_bandwidth_id and "
+ "dedicated_bandwidth at same time")
+
+ if not (v or sbwid):
+ raise Exception("must input shared_bandwidth_id or "
+ "dedicated_bandwidth")
+
+ if sbwid:
+ return {
+ "id": sbwid,
+ "share_type": "WHOLE"}
+
+ return {
+ "charge_mode": v["charge_mode"],
+ "name": v["name"],
+ "share_type": "PER",
+ "size": v["size"]}
+
+
+def expand_create_publicip(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["ipv4_address"], array_index)
+ if not is_empty_value(v):
+ r["ip_address"] = v
+
+ v = navigate_value(d, ["ip_version"], array_index)
+ if not is_empty_value(v):
+ r["ip_version"] = v
+
+ v = navigate_value(d, ["type"], array_index)
+ if not is_empty_value(v):
+ r["type"] = v
+
+ return r
+
+
+def send_create_request(module, params, client):
+ url = "publicips"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_eip): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def async_wait_create(config, result, client, timeout):
+ module = config.module
+
+ path_parameters = {
+ "publicip_id": ["publicip", "id"],
+ }
+ data = dict((key, navigate_value(result, path))
+ for key, path in path_parameters.items())
+
+ url = build_path(module, "publicips/{publicip_id}", data)
+
+ def _query_status():
+ r = None
+ try:
+ r = client.get(url, timeout=timeout)
+ except HwcClientException:
+ return None, ""
+
+ try:
+ s = navigate_value(r, ["publicip", "status"])
+ return r, s
+ except Exception:
+ return None, ""
+
+ try:
+ return wait_to_finish(
+ ["ACTIVE", "DOWN"],
+ None,
+ _query_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_eip): error "
+ "waiting for api(create) to "
+ "be done, error= %s" % str(ex))
+
+
+def build_update_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["ip_version"], None)
+ if not is_empty_value(v):
+ params["ip_version"] = v
+
+ v = navigate_value(opts, ["port_id"], None)
+ if v is not None:
+ params["port_id"] = v
+
+ if not params:
+ return params
+
+ params = {"publicip": params}
+
+ return params
+
+
+def send_update_request(module, params, client):
+ url = build_path(module, "publicips/{id}")
+
+ try:
+ r = client.put(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_eip): error running "
+ "api(update), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def async_wait_update(config, result, client, timeout):
+ module = config.module
+
+ url = build_path(module, "publicips/{id}")
+
+ def _query_status():
+ r = None
+ try:
+ r = client.get(url, timeout=timeout)
+ except HwcClientException:
+ return None, ""
+
+ try:
+ s = navigate_value(r, ["publicip", "status"])
+ return r, s
+ except Exception:
+ return None, ""
+
+ try:
+ return wait_to_finish(
+ ["ACTIVE", "DOWN"],
+ None,
+ _query_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_eip): error "
+ "waiting for api(update) to "
+ "be done, error= %s" % str(ex))
+
+
+def send_delete_request(module, params, client):
+ url = build_path(module, "publicips/{id}")
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_eip): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_read_request(module, client):
+ url = build_path(module, "publicips/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_eip): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["publicip"], None)
+
+
+def fill_read_resp_body(body):
+ result = dict()
+
+ result["bandwidth_id"] = body.get("bandwidth_id")
+
+ result["bandwidth_name"] = body.get("bandwidth_name")
+
+ result["bandwidth_share_type"] = body.get("bandwidth_share_type")
+
+ result["bandwidth_size"] = body.get("bandwidth_size")
+
+ result["create_time"] = body.get("create_time")
+
+ result["enterprise_project_id"] = body.get("enterprise_project_id")
+
+ result["id"] = body.get("id")
+
+ result["ip_version"] = body.get("ip_version")
+
+ result["port_id"] = body.get("port_id")
+
+ result["private_ip_address"] = body.get("private_ip_address")
+
+ result["public_ip_address"] = body.get("public_ip_address")
+
+ result["public_ipv6_address"] = body.get("public_ipv6_address")
+
+ result["status"] = body.get("status")
+
+ result["tenant_id"] = body.get("tenant_id")
+
+ result["type"] = body.get("type")
+
+ return result
+
+
+def update_properties(module, response, array_index, exclude_output=False):
+ r = user_input_parameters(module)
+
+ if not exclude_output:
+ v = navigate_value(response, ["read", "create_time"], array_index)
+ r["create_time"] = v
+
+ v = r.get("dedicated_bandwidth")
+ v = flatten_dedicated_bandwidth(response, array_index, v, exclude_output)
+ r["dedicated_bandwidth"] = v
+
+ v = navigate_value(response, ["read", "enterprise_project_id"],
+ array_index)
+ r["enterprise_project_id"] = v
+
+ v = navigate_value(response, ["read", "ip_version"], array_index)
+ r["ip_version"] = v
+
+ v = navigate_value(response, ["read", "public_ip_address"], array_index)
+ r["ipv4_address"] = v
+
+ if not exclude_output:
+ v = navigate_value(response, ["read", "public_ipv6_address"],
+ array_index)
+ r["ipv6_address"] = v
+
+ v = navigate_value(response, ["read", "port_id"], array_index)
+ r["port_id"] = v
+
+ if not exclude_output:
+ v = navigate_value(response, ["read", "private_ip_address"],
+ array_index)
+ r["private_ip_address"] = v
+
+ v = r.get("shared_bandwidth_id")
+ v = flatten_shared_bandwidth_id(response, array_index, v, exclude_output)
+ r["shared_bandwidth_id"] = v
+
+ v = navigate_value(response, ["read", "type"], array_index)
+ r["type"] = v
+
+ return r
+
+
+def flatten_dedicated_bandwidth(d, array_index, current_value, exclude_output):
+ v = navigate_value(d, ["read", "bandwidth_share_type"], array_index)
+ if not (v and v == "PER"):
+ return current_value
+
+ result = current_value
+ if not result:
+ result = dict()
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "bandwidth_id"], array_index)
+ if v is not None:
+ result["id"] = v
+
+ v = navigate_value(d, ["read", "bandwidth_name"], array_index)
+ if v is not None:
+ result["name"] = v
+
+ v = navigate_value(d, ["read", "bandwidth_size"], array_index)
+ if v is not None:
+ result["size"] = v
+
+ return result if result else current_value
+
+
+def flatten_shared_bandwidth_id(d, array_index, current_value, exclude_output):
+ v = navigate_value(d, ["read", "bandwidth_id"], array_index)
+
+ v1 = navigate_value(d, ["read", "bandwidth_share_type"], array_index)
+
+ return v if (v1 and v1 == "WHOLE") else current_value
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_eip): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["publicips"], None)
+
+
+def _build_identity_object(all_opts):
+ result = dict()
+
+ v = expand_list_bandwidth_id(all_opts, None)
+ result["bandwidth_id"] = v
+
+ v = navigate_value(all_opts, ["dedicated_bandwidth", "name"], None)
+ result["bandwidth_name"] = v
+
+ result["bandwidth_share_type"] = None
+
+ v = navigate_value(all_opts, ["dedicated_bandwidth", "size"], None)
+ result["bandwidth_size"] = v
+
+ result["create_time"] = None
+
+ v = navigate_value(all_opts, ["enterprise_project_id"], None)
+ result["enterprise_project_id"] = v
+
+ result["id"] = None
+
+ v = navigate_value(all_opts, ["ip_version"], None)
+ result["ip_version"] = v
+
+ v = navigate_value(all_opts, ["port_id"], None)
+ result["port_id"] = v
+
+ result["private_ip_address"] = None
+
+ v = navigate_value(all_opts, ["ipv4_address"], None)
+ result["public_ip_address"] = v
+
+ result["public_ipv6_address"] = None
+
+ result["status"] = None
+
+ result["tenant_id"] = None
+
+ v = navigate_value(all_opts, ["type"], None)
+ result["type"] = v
+
+ return result
+
+
+def expand_list_bandwidth_id(d, array_index):
+ v = navigate_value(d, ["dedicated_bandwidth"], array_index)
+ sbwid = navigate_value(d, ["shared_bandwidth_id"], array_index)
+ if v and sbwid:
+ raise Exception("don't input shared_bandwidth_id and "
+ "dedicated_bandwidth at same time")
+
+ return sbwid
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ result["bandwidth_id"] = body.get("bandwidth_id")
+
+ result["bandwidth_name"] = body.get("bandwidth_name")
+
+ result["bandwidth_share_type"] = body.get("bandwidth_share_type")
+
+ result["bandwidth_size"] = body.get("bandwidth_size")
+
+ result["create_time"] = body.get("create_time")
+
+ result["enterprise_project_id"] = body.get("enterprise_project_id")
+
+ result["id"] = body.get("id")
+
+ result["ip_version"] = body.get("ip_version")
+
+ result["port_id"] = body.get("port_id")
+
+ result["private_ip_address"] = body.get("private_ip_address")
+
+ result["public_ip_address"] = body.get("public_ip_address")
+
+ result["public_ipv6_address"] = body.get("public_ipv6_address")
+
+ result["status"] = body.get("status")
+
+ result["tenant_id"] = body.get("tenant_id")
+
+ result["type"] = body.get("type")
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_peering_connect.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_peering_connect.py
new file mode 100644
index 00000000..a4d5921b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_peering_connect.py
@@ -0,0 +1,691 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_vpc_peering_connect
+description:
+ - vpc peering management.
+short_description: Creates a resource of Vpc/PeeringConnect in Huawei Cloud
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huawei Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ timeouts:
+ description:
+ - The timeouts for each operations.
+ type: dict
+ suboptions:
+ create:
+ description:
+ - The timeouts for create operation.
+ type: str
+ default: '15m'
+ local_vpc_id:
+ description:
+ - Specifies the ID of local VPC.
+ type: str
+ required: true
+ name:
+ description:
+ - Specifies the name of the VPC peering connection. The value can
+ contain 1 to 64 characters.
+ type: str
+ required: true
+ peering_vpc:
+ description:
+ - Specifies information about the peering VPC.
+ type: dict
+ required: true
+ suboptions:
+ vpc_id:
+ description:
+ - Specifies the ID of peering VPC.
+ type: str
+ required: true
+ project_id:
+ description:
+ - Specifies the ID of the project which the peering vpc
+ belongs to.
+ type: str
+ required: false
+ description:
+ description:
+ - The description of vpc peering connection.
+ type: str
+ required: false
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+# create a peering connect
+- name: Create a local vpc
+ hwc_network_vpc:
+ cidr: "192.168.0.0/16"
+ name: "ansible_network_vpc_test_local"
+ register: vpc1
+- name: Create a peering vpc
+ hwc_network_vpc:
+ cidr: "192.168.0.0/16"
+ name: "ansible_network_vpc_test_peering"
+ register: vpc2
+- name: Create a peering connect
+ community.general.hwc_vpc_peering_connect:
+ local_vpc_id: "{{ vpc1.id }}"
+ name: "ansible_network_peering_test"
+ peering_vpc:
+ vpc_id: "{{ vpc2.id }}"
+'''
+
+RETURN = '''
+ local_vpc_id:
+ description:
+ - Specifies the ID of local VPC.
+ type: str
+ returned: success
+ name:
+ description:
+ - Specifies the name of the VPC peering connection. The value can
+ contain 1 to 64 characters.
+ type: str
+ returned: success
+ peering_vpc:
+ description:
+ - Specifies information about the peering VPC.
+ type: dict
+ returned: success
+ contains:
+ vpc_id:
+ description:
+ - Specifies the ID of peering VPC.
+ type: str
+ returned: success
+ project_id:
+ description:
+ - Specifies the ID of the project which the peering vpc
+ belongs to.
+ type: str
+ returned: success
+ description:
+ description:
+ - The description of vpc peering connection.
+ type: str
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcClientException404, HwcModule,
+ are_different_dicts, build_path, get_region, is_empty_value,
+ navigate_value, wait_to_finish)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ timeouts=dict(type='dict', options=dict(
+ create=dict(default='15m', type='str'),
+ ), default=dict()),
+ local_vpc_id=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ peering_vpc=dict(type='dict', required=True, options=dict(
+ vpc_id=dict(type='str', required=True),
+ project_id=dict(type='str')
+ )),
+ description=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "vpc")
+
+ try:
+ resource = None
+ if module.params['id']:
+ resource = True
+ else:
+ v = search_resource(config)
+ if len(v) > 1:
+ raise Exception("Found more than one resource(%s)" % ", ".join([
+ navigate_value(i, ["id"]) for i in v]))
+
+ if len(v) == 1:
+ resource = v[0]
+ module.params['id'] = navigate_value(resource, ["id"])
+
+ result = {}
+ changed = False
+ if module.params['state'] == 'present':
+ if resource is None:
+ if not module.check_mode:
+ create(config)
+ changed = True
+
+ current = read_resource(config, exclude_output=True)
+ expect = user_input_parameters(module)
+ if are_different_dicts(expect, current):
+ if not module.check_mode:
+ update(config)
+ changed = True
+
+ result = read_resource(config)
+ result['id'] = module.params.get('id')
+ else:
+ if resource:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def user_input_parameters(module):
+ return {
+ "description": module.params.get("description"),
+ "local_vpc_id": module.params.get("local_vpc_id"),
+ "name": module.params.get("name"),
+ "peering_vpc": module.params.get("peering_vpc"),
+ }
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "network", "project")
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ opts = user_input_parameters(module)
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+ obj = async_wait_create(config, r, client, timeout)
+ module.params['id'] = navigate_value(obj, ["peering", "id"])
+
+
+def update(config):
+ module = config.module
+ client = config.client(get_region(module), "network", "project")
+ opts = user_input_parameters(module)
+
+ params = build_update_parameters(opts)
+ if params:
+ send_update_request(module, params, client)
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "network", "project")
+
+ send_delete_request(module, None, client)
+
+ url = build_path(module, "v2.0/vpc/peerings/{id}")
+
+ def _refresh_status():
+ try:
+ client.get(url)
+ except HwcClientException404:
+ return True, "Done"
+
+ except Exception:
+ return None, ""
+
+ return True, "Pending"
+
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ try:
+ wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_peering_connect): error "
+ "waiting for api(delete) to "
+ "be done, error= %s" % str(ex))
+
+
+def read_resource(config, exclude_output=False):
+ module = config.module
+ client = config.client(get_region(module), "network", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ res["read"] = fill_read_resp_body(r)
+
+ return update_properties(module, res, None, exclude_output)
+
+
+def _build_query_link(opts):
+ query_params = []
+
+ v = navigate_value(opts, ["local_vpc_id"])
+ if v:
+ query_params.append("vpc_id=" + str(v))
+
+ v = navigate_value(opts, ["name"])
+ if v:
+ query_params.append("name=" + str(v))
+
+ query_link = "?marker={marker}&limit=10"
+ if query_params:
+ query_link += "&" + "&".join(query_params)
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "network", "project")
+ opts = user_input_parameters(module)
+ identity_obj = _build_identity_object(opts)
+ query_link = _build_query_link(opts)
+ link = "v2.0/vpc/peerings" + query_link
+
+ result = []
+ p = {'marker': ''}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ item = fill_list_resp_body(item)
+ if not are_different_dicts(identity_obj, item):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['marker'] = r[-1].get('id')
+
+ return result
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = expand_create_accept_vpc_info(opts, None)
+ if not is_empty_value(v):
+ params["accept_vpc_info"] = v
+
+ v = navigate_value(opts, ["description"], None)
+ if not is_empty_value(v):
+ params["description"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ v = expand_create_request_vpc_info(opts, None)
+ if not is_empty_value(v):
+ params["request_vpc_info"] = v
+
+ if not params:
+ return params
+
+ params = {"peering": params}
+
+ return params
+
+
+def expand_create_accept_vpc_info(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["peering_vpc", "project_id"], array_index)
+ if not is_empty_value(v):
+ r["tenant_id"] = v
+
+ v = navigate_value(d, ["peering_vpc", "vpc_id"], array_index)
+ if not is_empty_value(v):
+ r["vpc_id"] = v
+
+ return r
+
+
+def expand_create_request_vpc_info(d, array_index):
+ r = dict()
+
+ r["tenant_id"] = ""
+
+ v = navigate_value(d, ["local_vpc_id"], array_index)
+ if not is_empty_value(v):
+ r["vpc_id"] = v
+
+ return r
+
+
+def send_create_request(module, params, client):
+ url = "v2.0/vpc/peerings"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_peering_connect): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def async_wait_create(config, result, client, timeout):
+ module = config.module
+
+ path_parameters = {
+ "peering_id": ["peering", "id"],
+ }
+ data = dict((key, navigate_value(result, path))
+ for key, path in path_parameters.items())
+
+ url = build_path(module, "v2.0/vpc/peerings/{peering_id}", data)
+
+ def _query_status():
+ r = None
+ try:
+ r = client.get(url, timeout=timeout)
+ except HwcClientException:
+ return None, ""
+
+ try:
+ s = navigate_value(r, ["peering", "status"])
+ return r, s
+ except Exception:
+ return None, ""
+
+ try:
+ return wait_to_finish(
+ ["ACTIVE"],
+ ["PENDING_ACCEPTANCE"],
+ _query_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_peering_connect): error "
+ "waiting for api(create) to "
+ "be done, error= %s" % str(ex))
+
+
+def build_update_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["description"], None)
+ if not is_empty_value(v):
+ params["description"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ if not params:
+ return params
+
+ params = {"peering": params}
+
+ return params
+
+
+def send_update_request(module, params, client):
+ url = build_path(module, "v2.0/vpc/peerings/{id}")
+
+ try:
+ r = client.put(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_peering_connect): error running "
+ "api(update), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_delete_request(module, params, client):
+ url = build_path(module, "v2.0/vpc/peerings/{id}")
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_peering_connect): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_read_request(module, client):
+ url = build_path(module, "v2.0/vpc/peerings/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_peering_connect): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["peering"], None)
+
+
+def fill_read_resp_body(body):
+ result = dict()
+
+ v = fill_read_resp_accept_vpc_info(body.get("accept_vpc_info"))
+ result["accept_vpc_info"] = v
+
+ result["description"] = body.get("description")
+
+ result["id"] = body.get("id")
+
+ result["name"] = body.get("name")
+
+ v = fill_read_resp_request_vpc_info(body.get("request_vpc_info"))
+ result["request_vpc_info"] = v
+
+ result["status"] = body.get("status")
+
+ return result
+
+
+def fill_read_resp_accept_vpc_info(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["tenant_id"] = value.get("tenant_id")
+
+ result["vpc_id"] = value.get("vpc_id")
+
+ return result
+
+
+def fill_read_resp_request_vpc_info(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["tenant_id"] = value.get("tenant_id")
+
+ result["vpc_id"] = value.get("vpc_id")
+
+ return result
+
+
+def update_properties(module, response, array_index, exclude_output=False):
+ r = user_input_parameters(module)
+
+ v = navigate_value(response, ["read", "description"], array_index)
+ r["description"] = v
+
+ v = navigate_value(response, ["read", "request_vpc_info", "vpc_id"],
+ array_index)
+ r["local_vpc_id"] = v
+
+ v = navigate_value(response, ["read", "name"], array_index)
+ r["name"] = v
+
+ v = r.get("peering_vpc")
+ v = flatten_peering_vpc(response, array_index, v, exclude_output)
+ r["peering_vpc"] = v
+
+ return r
+
+
+def flatten_peering_vpc(d, array_index, current_value, exclude_output):
+ result = current_value
+ has_init_value = True
+ if not result:
+ result = dict()
+ has_init_value = False
+
+ v = navigate_value(d, ["read", "accept_vpc_info", "tenant_id"],
+ array_index)
+ result["project_id"] = v
+
+ v = navigate_value(d, ["read", "accept_vpc_info", "vpc_id"], array_index)
+ result["vpc_id"] = v
+
+ if has_init_value:
+ return result
+
+ for v in result.values():
+ if v is not None:
+ return result
+ return current_value
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_peering_connect): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["peerings"], None)
+
+
+def _build_identity_object(all_opts):
+ result = dict()
+
+ v = expand_list_accept_vpc_info(all_opts, None)
+ result["accept_vpc_info"] = v
+
+ v = navigate_value(all_opts, ["description"], None)
+ result["description"] = v
+
+ result["id"] = None
+
+ v = navigate_value(all_opts, ["name"], None)
+ result["name"] = v
+
+ v = expand_list_request_vpc_info(all_opts, None)
+ result["request_vpc_info"] = v
+
+ result["status"] = None
+
+ return result
+
+
+def expand_list_accept_vpc_info(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["peering_vpc", "project_id"], array_index)
+ r["tenant_id"] = v
+
+ v = navigate_value(d, ["peering_vpc", "vpc_id"], array_index)
+ r["vpc_id"] = v
+
+ for v in r.values():
+ if v is not None:
+ return r
+ return None
+
+
+def expand_list_request_vpc_info(d, array_index):
+ r = dict()
+
+ r["tenant_id"] = None
+
+ v = navigate_value(d, ["local_vpc_id"], array_index)
+ r["vpc_id"] = v
+
+ for v in r.values():
+ if v is not None:
+ return r
+ return None
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ v = fill_list_resp_accept_vpc_info(body.get("accept_vpc_info"))
+ result["accept_vpc_info"] = v
+
+ result["description"] = body.get("description")
+
+ result["id"] = body.get("id")
+
+ result["name"] = body.get("name")
+
+ v = fill_list_resp_request_vpc_info(body.get("request_vpc_info"))
+ result["request_vpc_info"] = v
+
+ result["status"] = body.get("status")
+
+ return result
+
+
+def fill_list_resp_accept_vpc_info(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["tenant_id"] = value.get("tenant_id")
+
+ result["vpc_id"] = value.get("vpc_id")
+
+ return result
+
+
+def fill_list_resp_request_vpc_info(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["tenant_id"] = value.get("tenant_id")
+
+ result["vpc_id"] = value.get("vpc_id")
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_port.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_port.py
new file mode 100644
index 00000000..cf0718f5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_port.py
@@ -0,0 +1,1160 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_vpc_port
+description:
+ - vpc port management.
+short_description: Creates a resource of Vpc/Port in Huawei Cloud
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huawei Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ timeouts:
+ description:
+ - The timeouts for each operations.
+ type: dict
+ suboptions:
+ create:
+ description:
+ - The timeouts for create operation.
+ type: str
+ default: '15m'
+ subnet_id:
+ description:
+ - Specifies the ID of the subnet to which the port belongs.
+ type: str
+ required: true
+ admin_state_up:
+ description:
+ - Specifies the administrative state of the port.
+ type: bool
+ required: false
+ allowed_address_pairs:
+ description:
+ - Specifies a set of zero or more allowed address pairs.
+ required: false
+ type: list
+ elements: dict
+ suboptions:
+ ip_address:
+ description:
+ - Specifies the IP address. It cannot set it to 0.0.0.0.
+ Configure an independent security group for the port if a
+ large CIDR block (subnet mask less than 24) is configured
+ for parameter allowed_address_pairs.
+ type: str
+ required: false
+ mac_address:
+ description:
+ - Specifies the MAC address.
+ type: str
+ required: false
+ extra_dhcp_opts:
+ description:
+ - Specifies the extended option of DHCP.
+ type: list
+ elements: dict
+ required: false
+ suboptions:
+ name:
+ description:
+ - Specifies the option name.
+ type: str
+ required: false
+ value:
+ description:
+ - Specifies the option value.
+ type: str
+ required: false
+ ip_address:
+ description:
+ - Specifies the port IP address.
+ type: str
+ required: false
+ name:
+ description:
+ - Specifies the port name. The value can contain no more than 255
+ characters.
+ type: str
+ required: false
+ security_groups:
+ description:
+ - Specifies the ID of the security group.
+ type: list
+ elements: str
+ required: false
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+# create a port
+- name: Create vpc
+ hwc_network_vpc:
+ cidr: "192.168.100.0/24"
+ name: "ansible_network_vpc_test"
+ register: vpc
+- name: Create subnet
+ hwc_vpc_subnet:
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: True
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ register: subnet
+- name: Create a port
+ community.general.hwc_vpc_port:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+'''
+
+RETURN = '''
+ subnet_id:
+ description:
+ - Specifies the ID of the subnet to which the port belongs.
+ type: str
+ returned: success
+ admin_state_up:
+ description:
+ - Specifies the administrative state of the port.
+ type: bool
+ returned: success
+ allowed_address_pairs:
+ description:
+ - Specifies a set of zero or more allowed address pairs.
+ type: list
+ returned: success
+ contains:
+ ip_address:
+ description:
+ - Specifies the IP address. It cannot set it to 0.0.0.0.
+ Configure an independent security group for the port if a
+ large CIDR block (subnet mask less than 24) is configured
+ for parameter allowed_address_pairs.
+ type: str
+ returned: success
+ mac_address:
+ description:
+ - Specifies the MAC address.
+ type: str
+ returned: success
+ extra_dhcp_opts:
+ description:
+ - Specifies the extended option of DHCP.
+ type: list
+ returned: success
+ contains:
+ name:
+ description:
+ - Specifies the option name.
+ type: str
+ returned: success
+ value:
+ description:
+ - Specifies the option value.
+ type: str
+ returned: success
+ ip_address:
+ description:
+ - Specifies the port IP address.
+ type: str
+ returned: success
+ name:
+ description:
+ - Specifies the port name. The value can contain no more than 255
+ characters.
+ type: str
+ returned: success
+ security_groups:
+ description:
+ - Specifies the ID of the security group.
+ type: list
+ returned: success
+ mac_address:
+ description:
+ - Specifies the port MAC address.
+ type: str
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcClientException404, HwcModule,
+ are_different_dicts, build_path, get_region, is_empty_value,
+ navigate_value, wait_to_finish)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ timeouts=dict(type='dict', options=dict(
+ create=dict(default='15m', type='str'),
+ ), default=dict()),
+ subnet_id=dict(type='str', required=True),
+ admin_state_up=dict(type='bool'),
+ allowed_address_pairs=dict(
+ type='list', elements='dict',
+ options=dict(
+ ip_address=dict(type='str'),
+ mac_address=dict(type='str')
+ ),
+ ),
+ extra_dhcp_opts=dict(type='list', elements='dict', options=dict(
+ name=dict(type='str'),
+ value=dict(type='str')
+ )),
+ ip_address=dict(type='str'),
+ name=dict(type='str'),
+ security_groups=dict(type='list', elements='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "vpc")
+
+ try:
+ resource = None
+ if module.params['id']:
+ resource = True
+ else:
+ v = search_resource(config)
+ if len(v) > 1:
+ raise Exception("Found more than one resource(%s)" % ", ".join([
+ navigate_value(i, ["id"]) for i in v]))
+
+ if len(v) == 1:
+ resource = v[0]
+ module.params['id'] = navigate_value(resource, ["id"])
+
+ result = {}
+ changed = False
+ if module.params['state'] == 'present':
+ if resource is None:
+ if not module.check_mode:
+ create(config)
+ changed = True
+
+ current = read_resource(config, exclude_output=True)
+ expect = user_input_parameters(module)
+ if are_different_dicts(expect, current):
+ if not module.check_mode:
+ update(config)
+ changed = True
+
+ result = read_resource(config)
+ result['id'] = module.params.get('id')
+ else:
+ if resource:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def user_input_parameters(module):
+ return {
+ "admin_state_up": module.params.get("admin_state_up"),
+ "allowed_address_pairs": module.params.get("allowed_address_pairs"),
+ "extra_dhcp_opts": module.params.get("extra_dhcp_opts"),
+ "ip_address": module.params.get("ip_address"),
+ "name": module.params.get("name"),
+ "security_groups": module.params.get("security_groups"),
+ "subnet_id": module.params.get("subnet_id"),
+ }
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ opts = user_input_parameters(module)
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+ obj = async_wait_create(config, r, client, timeout)
+ module.params['id'] = navigate_value(obj, ["port", "id"])
+
+
+def update(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+
+ params = build_update_parameters(opts)
+ if params:
+ send_update_request(module, params, client)
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ send_delete_request(module, None, client)
+
+ url = build_path(module, "ports/{id}")
+
+ def _refresh_status():
+ try:
+ client.get(url)
+ except HwcClientException404:
+ return True, "Done"
+
+ except Exception:
+ return None, ""
+
+ return True, "Pending"
+
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ try:
+ wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_port): error "
+ "waiting for api(delete) to "
+ "be done, error= %s" % str(ex))
+
+
+def read_resource(config, exclude_output=False):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ res["read"] = fill_read_resp_body(r)
+
+ array_index = {
+ "read.fixed_ips": 0,
+ }
+
+ return update_properties(module, res, array_index, exclude_output)
+
+
+def _build_query_link(opts):
+ query_params = []
+
+ v = navigate_value(opts, ["subnet_id"])
+ if v:
+ query_params.append("network_id=" + str(v))
+
+ v = navigate_value(opts, ["name"])
+ if v:
+ query_params.append("name=" + str(v))
+
+ v = navigate_value(opts, ["admin_state_up"])
+ if v:
+ query_params.append("admin_state_up=" + str(v))
+
+ query_link = "?marker={marker}&limit=10"
+ if query_params:
+ query_link += "&" + "&".join(query_params)
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+ identity_obj = _build_identity_object(opts)
+ query_link = _build_query_link(opts)
+ link = "ports" + query_link
+
+ result = []
+ p = {'marker': ''}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ item = fill_list_resp_body(item)
+ if not are_different_dicts(identity_obj, item):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['marker'] = r[-1].get('id')
+
+ return result
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["admin_state_up"], None)
+ if not is_empty_value(v):
+ params["admin_state_up"] = v
+
+ v = expand_create_allowed_address_pairs(opts, None)
+ if not is_empty_value(v):
+ params["allowed_address_pairs"] = v
+
+ v = expand_create_extra_dhcp_opts(opts, None)
+ if not is_empty_value(v):
+ params["extra_dhcp_opts"] = v
+
+ v = expand_create_fixed_ips(opts, None)
+ if not is_empty_value(v):
+ params["fixed_ips"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ v = navigate_value(opts, ["subnet_id"], None)
+ if not is_empty_value(v):
+ params["network_id"] = v
+
+ v = navigate_value(opts, ["security_groups"], None)
+ if not is_empty_value(v):
+ params["security_groups"] = v
+
+ if not params:
+ return params
+
+ params = {"port": params}
+
+ return params
+
+
+def expand_create_allowed_address_pairs(d, array_index):
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ req = []
+
+ v = navigate_value(d, ["allowed_address_pairs"],
+ new_array_index)
+ if not v:
+ return req
+ n = len(v)
+ for i in range(n):
+ new_array_index["allowed_address_pairs"] = i
+ transformed = dict()
+
+ v = navigate_value(d, ["allowed_address_pairs", "ip_address"],
+ new_array_index)
+ if not is_empty_value(v):
+ transformed["ip_address"] = v
+
+ v = navigate_value(d, ["allowed_address_pairs", "mac_address"],
+ new_array_index)
+ if not is_empty_value(v):
+ transformed["mac_address"] = v
+
+ if transformed:
+ req.append(transformed)
+
+ return req
+
+
+def expand_create_extra_dhcp_opts(d, array_index):
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ req = []
+
+ v = navigate_value(d, ["extra_dhcp_opts"],
+ new_array_index)
+ if not v:
+ return req
+ n = len(v)
+ for i in range(n):
+ new_array_index["extra_dhcp_opts"] = i
+ transformed = dict()
+
+ v = navigate_value(d, ["extra_dhcp_opts", "name"], new_array_index)
+ if not is_empty_value(v):
+ transformed["opt_name"] = v
+
+ v = navigate_value(d, ["extra_dhcp_opts", "value"], new_array_index)
+ if not is_empty_value(v):
+ transformed["opt_value"] = v
+
+ if transformed:
+ req.append(transformed)
+
+ return req
+
+
+def expand_create_fixed_ips(d, array_index):
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ req = []
+
+ n = 1
+ for i in range(n):
+ transformed = dict()
+
+ v = navigate_value(d, ["ip_address"], new_array_index)
+ if not is_empty_value(v):
+ transformed["ip_address"] = v
+
+ if transformed:
+ req.append(transformed)
+
+ return req
+
+
+def send_create_request(module, params, client):
+ url = "ports"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_port): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def async_wait_create(config, result, client, timeout):
+ module = config.module
+
+ path_parameters = {
+ "port_id": ["port", "id"],
+ }
+ data = dict((key, navigate_value(result, path))
+ for key, path in path_parameters.items())
+
+ url = build_path(module, "ports/{port_id}", data)
+
+ def _query_status():
+ r = None
+ try:
+ r = client.get(url, timeout=timeout)
+ except HwcClientException:
+ return None, ""
+
+ try:
+ s = navigate_value(r, ["port", "status"])
+ return r, s
+ except Exception:
+ return None, ""
+
+ try:
+ return wait_to_finish(
+ ["ACTIVE", "DOWN"],
+ ["BUILD"],
+ _query_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_port): error "
+ "waiting for api(create) to "
+ "be done, error= %s" % str(ex))
+
+
+def build_update_parameters(opts):
+ params = dict()
+
+ v = expand_update_allowed_address_pairs(opts, None)
+ if v is not None:
+ params["allowed_address_pairs"] = v
+
+ v = expand_update_extra_dhcp_opts(opts, None)
+ if v is not None:
+ params["extra_dhcp_opts"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ v = navigate_value(opts, ["security_groups"], None)
+ if not is_empty_value(v):
+ params["security_groups"] = v
+
+ if not params:
+ return params
+
+ params = {"port": params}
+
+ return params
+
+
+def expand_update_allowed_address_pairs(d, array_index):
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ req = []
+
+ v = navigate_value(d, ["allowed_address_pairs"],
+ new_array_index)
+ if not v:
+ return req
+ n = len(v)
+ for i in range(n):
+ new_array_index["allowed_address_pairs"] = i
+ transformed = dict()
+
+ v = navigate_value(d, ["allowed_address_pairs", "ip_address"],
+ new_array_index)
+ if not is_empty_value(v):
+ transformed["ip_address"] = v
+
+ v = navigate_value(d, ["allowed_address_pairs", "mac_address"],
+ new_array_index)
+ if not is_empty_value(v):
+ transformed["mac_address"] = v
+
+ if transformed:
+ req.append(transformed)
+
+ return req
+
+
+def expand_update_extra_dhcp_opts(d, array_index):
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ req = []
+
+ v = navigate_value(d, ["extra_dhcp_opts"],
+ new_array_index)
+ if not v:
+ return req
+ n = len(v)
+ for i in range(n):
+ new_array_index["extra_dhcp_opts"] = i
+ transformed = dict()
+
+ v = navigate_value(d, ["extra_dhcp_opts", "name"], new_array_index)
+ if not is_empty_value(v):
+ transformed["opt_name"] = v
+
+ v = navigate_value(d, ["extra_dhcp_opts", "value"], new_array_index)
+ if not is_empty_value(v):
+ transformed["opt_value"] = v
+
+ if transformed:
+ req.append(transformed)
+
+ return req
+
+
+def send_update_request(module, params, client):
+ url = build_path(module, "ports/{id}")
+
+ try:
+ r = client.put(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_port): error running "
+ "api(update), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_delete_request(module, params, client):
+ url = build_path(module, "ports/{id}")
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_port): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_read_request(module, client):
+ url = build_path(module, "ports/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_port): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["port"], None)
+
+
+def fill_read_resp_body(body):
+ result = dict()
+
+ result["admin_state_up"] = body.get("admin_state_up")
+
+ v = fill_read_resp_allowed_address_pairs(body.get("allowed_address_pairs"))
+ result["allowed_address_pairs"] = v
+
+ result["binding_host_id"] = body.get("binding_host_id")
+
+ result["binding_vnic_type"] = body.get("binding_vnic_type")
+
+ result["device_id"] = body.get("device_id")
+
+ result["device_owner"] = body.get("device_owner")
+
+ result["dns_name"] = body.get("dns_name")
+
+ v = fill_read_resp_extra_dhcp_opts(body.get("extra_dhcp_opts"))
+ result["extra_dhcp_opts"] = v
+
+ v = fill_read_resp_fixed_ips(body.get("fixed_ips"))
+ result["fixed_ips"] = v
+
+ result["id"] = body.get("id")
+
+ result["mac_address"] = body.get("mac_address")
+
+ result["name"] = body.get("name")
+
+ result["network_id"] = body.get("network_id")
+
+ result["security_groups"] = body.get("security_groups")
+
+ result["status"] = body.get("status")
+
+ result["tenant_id"] = body.get("tenant_id")
+
+ return result
+
+
+def fill_read_resp_allowed_address_pairs(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["ip_address"] = item.get("ip_address")
+
+ val["mac_address"] = item.get("mac_address")
+
+ result.append(val)
+
+ return result
+
+
+def fill_read_resp_extra_dhcp_opts(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["opt_name"] = item.get("opt_name")
+
+ val["opt_value"] = item.get("opt_value")
+
+ result.append(val)
+
+ return result
+
+
+def fill_read_resp_fixed_ips(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["ip_address"] = item.get("ip_address")
+
+ result.append(val)
+
+ return result
+
+
+def update_properties(module, response, array_index, exclude_output=False):
+ r = user_input_parameters(module)
+
+ v = navigate_value(response, ["read", "admin_state_up"], array_index)
+ r["admin_state_up"] = v
+
+ v = r.get("allowed_address_pairs")
+ v = flatten_allowed_address_pairs(response, array_index, v, exclude_output)
+ r["allowed_address_pairs"] = v
+
+ v = r.get("extra_dhcp_opts")
+ v = flatten_extra_dhcp_opts(response, array_index, v, exclude_output)
+ r["extra_dhcp_opts"] = v
+
+ v = navigate_value(response, ["read", "fixed_ips", "ip_address"],
+ array_index)
+ r["ip_address"] = v
+
+ if not exclude_output:
+ v = navigate_value(response, ["read", "mac_address"], array_index)
+ r["mac_address"] = v
+
+ v = navigate_value(response, ["read", "name"], array_index)
+ r["name"] = v
+
+ v = navigate_value(response, ["read", "security_groups"], array_index)
+ r["security_groups"] = v
+
+ v = navigate_value(response, ["read", "network_id"], array_index)
+ r["subnet_id"] = v
+
+ return r
+
+
+def flatten_allowed_address_pairs(d, array_index,
+ current_value, exclude_output):
+ n = 0
+ result = current_value
+ has_init_value = True
+ if result:
+ n = len(result)
+ else:
+ has_init_value = False
+ result = []
+ v = navigate_value(d, ["read", "allowed_address_pairs"],
+ array_index)
+ if not v:
+ return current_value
+ n = len(v)
+
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ for i in range(n):
+ new_array_index["read.allowed_address_pairs"] = i
+
+ val = dict()
+ if len(result) >= (i + 1) and result[i]:
+ val = result[i]
+
+ v = navigate_value(d, ["read", "allowed_address_pairs", "ip_address"],
+ new_array_index)
+ val["ip_address"] = v
+
+ v = navigate_value(d, ["read", "allowed_address_pairs", "mac_address"],
+ new_array_index)
+ val["mac_address"] = v
+
+ if len(result) >= (i + 1):
+ result[i] = val
+ else:
+ for v in val.values():
+ if v is not None:
+ result.append(val)
+ break
+
+ return result if (has_init_value or result) else current_value
+
+
+def flatten_extra_dhcp_opts(d, array_index, current_value, exclude_output):
+ n = 0
+ result = current_value
+ has_init_value = True
+ if result:
+ n = len(result)
+ else:
+ has_init_value = False
+ result = []
+ v = navigate_value(d, ["read", "extra_dhcp_opts"],
+ array_index)
+ if not v:
+ return current_value
+ n = len(v)
+
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ for i in range(n):
+ new_array_index["read.extra_dhcp_opts"] = i
+
+ val = dict()
+ if len(result) >= (i + 1) and result[i]:
+ val = result[i]
+
+ v = navigate_value(d, ["read", "extra_dhcp_opts", "opt_name"],
+ new_array_index)
+ val["name"] = v
+
+ v = navigate_value(d, ["read", "extra_dhcp_opts", "opt_value"],
+ new_array_index)
+ val["value"] = v
+
+ if len(result) >= (i + 1):
+ result[i] = val
+ else:
+ for v in val.values():
+ if v is not None:
+ result.append(val)
+ break
+
+ return result if (has_init_value or result) else current_value
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_port): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["ports"], None)
+
+
+def _build_identity_object(all_opts):
+ result = dict()
+
+ v = navigate_value(all_opts, ["admin_state_up"], None)
+ result["admin_state_up"] = v
+
+ v = expand_list_allowed_address_pairs(all_opts, None)
+ result["allowed_address_pairs"] = v
+
+ result["binding_host_id"] = None
+
+ result["binding_vnic_type"] = None
+
+ result["device_id"] = None
+
+ result["device_owner"] = None
+
+ result["dns_name"] = None
+
+ v = expand_list_extra_dhcp_opts(all_opts, None)
+ result["extra_dhcp_opts"] = v
+
+ v = expand_list_fixed_ips(all_opts, None)
+ result["fixed_ips"] = v
+
+ result["id"] = None
+
+ result["mac_address"] = None
+
+ v = navigate_value(all_opts, ["name"], None)
+ result["name"] = v
+
+ v = navigate_value(all_opts, ["subnet_id"], None)
+ result["network_id"] = v
+
+ v = navigate_value(all_opts, ["security_groups"], None)
+ result["security_groups"] = v
+
+ result["status"] = None
+
+ result["tenant_id"] = None
+
+ return result
+
+
+def expand_list_allowed_address_pairs(d, array_index):
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ req = []
+
+ v = navigate_value(d, ["allowed_address_pairs"],
+ new_array_index)
+
+ n = len(v) if v else 1
+ for i in range(n):
+ new_array_index["allowed_address_pairs"] = i
+ transformed = dict()
+
+ v = navigate_value(d, ["allowed_address_pairs", "ip_address"],
+ new_array_index)
+ transformed["ip_address"] = v
+
+ v = navigate_value(d, ["allowed_address_pairs", "mac_address"],
+ new_array_index)
+ transformed["mac_address"] = v
+
+ for v in transformed.values():
+ if v is not None:
+ req.append(transformed)
+ break
+
+ return req if req else None
+
+
+def expand_list_extra_dhcp_opts(d, array_index):
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ req = []
+
+ v = navigate_value(d, ["extra_dhcp_opts"],
+ new_array_index)
+
+ n = len(v) if v else 1
+ for i in range(n):
+ new_array_index["extra_dhcp_opts"] = i
+ transformed = dict()
+
+ v = navigate_value(d, ["extra_dhcp_opts", "name"], new_array_index)
+ transformed["opt_name"] = v
+
+ v = navigate_value(d, ["extra_dhcp_opts", "value"], new_array_index)
+ transformed["opt_value"] = v
+
+ for v in transformed.values():
+ if v is not None:
+ req.append(transformed)
+ break
+
+ return req if req else None
+
+
+def expand_list_fixed_ips(d, array_index):
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ req = []
+
+ n = 1
+ for i in range(n):
+ transformed = dict()
+
+ v = navigate_value(d, ["ip_address"], new_array_index)
+ transformed["ip_address"] = v
+
+ for v in transformed.values():
+ if v is not None:
+ req.append(transformed)
+ break
+
+ return req if req else None
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ result["admin_state_up"] = body.get("admin_state_up")
+
+ v = fill_list_resp_allowed_address_pairs(body.get("allowed_address_pairs"))
+ result["allowed_address_pairs"] = v
+
+ result["binding_host_id"] = body.get("binding_host_id")
+
+ result["binding_vnic_type"] = body.get("binding_vnic_type")
+
+ result["device_id"] = body.get("device_id")
+
+ result["device_owner"] = body.get("device_owner")
+
+ result["dns_name"] = body.get("dns_name")
+
+ v = fill_list_resp_extra_dhcp_opts(body.get("extra_dhcp_opts"))
+ result["extra_dhcp_opts"] = v
+
+ v = fill_list_resp_fixed_ips(body.get("fixed_ips"))
+ result["fixed_ips"] = v
+
+ result["id"] = body.get("id")
+
+ result["mac_address"] = body.get("mac_address")
+
+ result["name"] = body.get("name")
+
+ result["network_id"] = body.get("network_id")
+
+ result["security_groups"] = body.get("security_groups")
+
+ result["status"] = body.get("status")
+
+ result["tenant_id"] = body.get("tenant_id")
+
+ return result
+
+
+def fill_list_resp_allowed_address_pairs(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["ip_address"] = item.get("ip_address")
+
+ val["mac_address"] = item.get("mac_address")
+
+ result.append(val)
+
+ return result
+
+
+def fill_list_resp_extra_dhcp_opts(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["opt_name"] = item.get("opt_name")
+
+ val["opt_value"] = item.get("opt_value")
+
+ result.append(val)
+
+ return result
+
+
+def fill_list_resp_fixed_ips(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["ip_address"] = item.get("ip_address")
+
+ result.append(val)
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_private_ip.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_private_ip.py
new file mode 100644
index 00000000..901755f3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_private_ip.py
@@ -0,0 +1,354 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_vpc_private_ip
+description:
+ - vpc private ip management.
+short_description: Creates a resource of Vpc/PrivateIP in Huawei Cloud
+notes:
+ - If I(id) option is provided, it takes precedence over I(subnet_id), I(ip_address) for private ip selection.
+ - I(subnet_id), I(ip_address) are used for private ip selection. If more than one private ip with this options exists, execution is aborted.
+ - No parameter support updating. If one of option is changed, the module will create a new resource.
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huawei Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ subnet_id:
+ description:
+ - Specifies the ID of the subnet from which IP addresses are
+ assigned. Cannot be changed after creating the private ip.
+ type: str
+ required: true
+ ip_address:
+ description:
+ - Specifies the target IP address. The value can be an available IP
+ address in the subnet. If it is not specified, the system
+ automatically assigns an IP address. Cannot be changed after
+ creating the private ip.
+ type: str
+ required: false
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+# create a private ip
+- name: Create vpc
+ hwc_network_vpc:
+ cidr: "192.168.100.0/24"
+ name: "ansible_network_vpc_test"
+ register: vpc
+- name: Create subnet
+ hwc_vpc_subnet:
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: True
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ register: subnet
+- name: Create a private ip
+ community.general.hwc_vpc_private_ip:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+'''
+
+RETURN = '''
+ subnet_id:
+ description:
+ - Specifies the ID of the subnet from which IP addresses are
+ assigned.
+ type: str
+ returned: success
+ ip_address:
+ description:
+ - Specifies the target IP address. The value can be an available IP
+ address in the subnet. If it is not specified, the system
+ automatically assigns an IP address.
+ type: str
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcModule, are_different_dicts, build_path,
+ get_region, is_empty_value, navigate_value)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ subnet_id=dict(type='str', required=True),
+ ip_address=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "vpc")
+
+ try:
+ resource = None
+ if module.params['id']:
+ resource = True
+ else:
+ v = search_resource(config)
+ if len(v) > 1:
+ raise Exception("Found more than one resource(%s)" % ", ".join([
+ navigate_value(i, ["id"]) for i in v]))
+
+ if len(v) == 1:
+ resource = v[0]
+ module.params['id'] = navigate_value(resource, ["id"])
+
+ result = {}
+ changed = False
+ if module.params['state'] == 'present':
+ if resource is None:
+ if not module.check_mode:
+ create(config)
+ changed = True
+
+ current = read_resource(config, exclude_output=True)
+ expect = user_input_parameters(module)
+ if are_different_dicts(expect, current):
+ raise Exception(
+ "Cannot change option from (%s) to (%s)of an"
+ " existing resource.(%s)" % (current, expect, module.params.get('id')))
+
+ result = read_resource(config)
+ result['id'] = module.params.get('id')
+ else:
+ if resource:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def user_input_parameters(module):
+ return {
+ "ip_address": module.params.get("ip_address"),
+ "subnet_id": module.params.get("subnet_id"),
+ }
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+ module.params['id'] = navigate_value(r, ["privateips", "id"],
+ {"privateips": 0})
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ send_delete_request(module, None, client)
+
+
+def read_resource(config, exclude_output=False):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ res["read"] = fill_read_resp_body(r)
+
+ return update_properties(module, res, None, exclude_output)
+
+
+def _build_query_link(opts):
+ query_link = "?marker={marker}&limit=10"
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+ identity_obj = _build_identity_object(opts)
+ query_link = _build_query_link(opts)
+ link = build_path(module, "subnets/{subnet_id}/privateips") + query_link
+
+ result = []
+ p = {'marker': ''}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ item = fill_list_resp_body(item)
+ if not are_different_dicts(identity_obj, item):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['marker'] = r[-1].get('id')
+
+ return result
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["ip_address"], None)
+ if not is_empty_value(v):
+ params["ip_address"] = v
+
+ v = navigate_value(opts, ["subnet_id"], None)
+ if not is_empty_value(v):
+ params["subnet_id"] = v
+
+ if not params:
+ return params
+
+ params = {"privateips": [params]}
+
+ return params
+
+
+def send_create_request(module, params, client):
+ url = "privateips"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_private_ip): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_delete_request(module, params, client):
+ url = build_path(module, "privateips/{id}")
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_private_ip): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_read_request(module, client):
+ url = build_path(module, "privateips/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_private_ip): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["privateip"], None)
+
+
+def fill_read_resp_body(body):
+ result = dict()
+
+ result["id"] = body.get("id")
+
+ result["ip_address"] = body.get("ip_address")
+
+ result["subnet_id"] = body.get("subnet_id")
+
+ return result
+
+
+def update_properties(module, response, array_index, exclude_output=False):
+ r = user_input_parameters(module)
+
+ v = navigate_value(response, ["read", "ip_address"], array_index)
+ r["ip_address"] = v
+
+ v = navigate_value(response, ["read", "subnet_id"], array_index)
+ r["subnet_id"] = v
+
+ return r
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_private_ip): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["privateips"], None)
+
+
+def _build_identity_object(all_opts):
+ result = dict()
+
+ result["id"] = None
+
+ v = navigate_value(all_opts, ["ip_address"], None)
+ result["ip_address"] = v
+
+ v = navigate_value(all_opts, ["subnet_id"], None)
+ result["subnet_id"] = v
+
+ return result
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ result["id"] = body.get("id")
+
+ result["ip_address"] = body.get("ip_address")
+
+ result["subnet_id"] = body.get("subnet_id")
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_route.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_route.py
new file mode 100644
index 00000000..31829dc6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_route.py
@@ -0,0 +1,437 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_vpc_route
+description:
+ - vpc route management.
+short_description: Creates a resource of Vpc/Route in Huawei Cloud
+notes:
+ - If I(id) option is provided, it takes precedence over I(destination), I(vpc_id), I(type) and I(next_hop) for route selection.
+ - I(destination), I(vpc_id), I(type) and I(next_hop) are used for route selection. If more than one route with this options exists, execution is aborted.
+ - No parameter support updating. If one of option is changed, the module will create a new resource.
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huawei Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ destination:
+ description:
+ - Specifies the destination IP address or CIDR block.
+ type: str
+ required: true
+ next_hop:
+ description:
+ - Specifies the next hop. The value is VPC peering connection ID.
+ type: str
+ required: true
+ vpc_id:
+ description:
+ - Specifies the VPC ID to which route is added.
+ type: str
+ required: true
+ type:
+ description:
+ - Specifies the type of route.
+ type: str
+ required: false
+ default: 'peering'
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+# create a peering connect
+- name: Create a local vpc
+ hwc_network_vpc:
+ cidr: "192.168.0.0/16"
+ name: "ansible_network_vpc_test_local"
+ register: vpc1
+- name: Create a peering vpc
+ hwc_network_vpc:
+ cidr: "192.168.0.0/16"
+ name: "ansible_network_vpc_test_peering"
+ register: vpc2
+- name: Create a peering connect
+ hwc_vpc_peering_connect:
+ local_vpc_id: "{{ vpc1.id }}"
+ name: "ansible_network_peering_test"
+ filters:
+ - "name"
+ peering_vpc:
+ vpc_id: "{{ vpc2.id }}"
+ register: connect
+- name: Create a route
+ community.general.hwc_vpc_route:
+ vpc_id: "{{ vpc1.id }}"
+ destination: "192.168.0.0/16"
+ next_hop: "{{ connect.id }}"
+'''
+
+RETURN = '''
+ id:
+ description:
+ - UUID of the route.
+ type: str
+ returned: success
+ destination:
+ description:
+ - Specifies the destination IP address or CIDR block.
+ type: str
+ returned: success
+ next_hop:
+ description:
+ - Specifies the next hop. The value is VPC peering connection ID.
+ type: str
+ returned: success
+ vpc_id:
+ description:
+ - Specifies the VPC ID to which route is added.
+ type: str
+ returned: success
+ type:
+ description:
+ - Specifies the type of route.
+ type: str
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcModule, are_different_dicts, build_path,
+ get_region, is_empty_value, navigate_value)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ destination=dict(type='str', required=True),
+ next_hop=dict(type='str', required=True),
+ vpc_id=dict(type='str', required=True),
+ type=dict(type='str', default='peering'),
+ id=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "vpc")
+
+ try:
+ resource = None
+ if module.params.get("id"):
+ resource = get_resource_by_id(config)
+ if module.params['state'] == 'present':
+ opts = user_input_parameters(module)
+ if are_different_dicts(resource, opts):
+ raise Exception(
+ "Cannot change option from (%s) to (%s) for an"
+ " existing route.(%s)" % (resource, opts,
+ config.module.params.get(
+ 'id')))
+ else:
+ v = search_resource(config)
+ if len(v) > 1:
+ raise Exception("Found more than one resource(%s)" % ", ".join([
+ navigate_value(i, ["id"]) for i in v]))
+
+ if len(v) == 1:
+ resource = update_properties(module, {"read": v[0]}, None)
+ module.params['id'] = navigate_value(resource, ["id"])
+
+ result = {}
+ changed = False
+ if module.params['state'] == 'present':
+ if resource is None:
+ if not module.check_mode:
+ resource = create(config)
+ changed = True
+
+ result = resource
+ else:
+ if resource:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def user_input_parameters(module):
+ return {
+ "destination": module.params.get("destination"),
+ "next_hop": module.params.get("next_hop"),
+ "type": module.params.get("type"),
+ "vpc_id": module.params.get("vpc_id"),
+ "id": module.params.get("id"),
+ }
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "network", "project")
+ opts = user_input_parameters(module)
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+ module.params['id'] = navigate_value(r, ["route", "id"])
+
+ result = update_properties(module, {"read": fill_resp_body(r)}, None)
+ return result
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "network", "project")
+
+ send_delete_request(module, None, client)
+
+
+def get_resource_by_id(config, exclude_output=False):
+ module = config.module
+ client = config.client(get_region(module), "network", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ res["read"] = fill_resp_body(r)
+
+ result = update_properties(module, res, None, exclude_output)
+ return result
+
+
+def _build_query_link(opts):
+ query_params = []
+
+ v = navigate_value(opts, ["type"])
+ if v:
+ query_params.append("type=" + str(v))
+
+ v = navigate_value(opts, ["destination"])
+ if v:
+ query_params.append("destination=" + str(v))
+
+ v = navigate_value(opts, ["vpc_id"])
+ if v:
+ query_params.append("vpc_id=" + str(v))
+
+ query_link = "?marker={marker}&limit=10"
+ if query_params:
+ query_link += "&" + "&".join(query_params)
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "network", "project")
+ opts = user_input_parameters(module)
+ identity_obj = _build_identity_object(opts)
+ query_link = _build_query_link(opts)
+ link = "v2.0/vpc/routes" + query_link
+
+ result = []
+ p = {'marker': ''}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ item = fill_list_resp_body(item)
+ if not are_different_dicts(identity_obj, item):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['marker'] = r[-1].get('id')
+
+ return result
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["destination"], None)
+ if not is_empty_value(v):
+ params["destination"] = v
+
+ v = navigate_value(opts, ["next_hop"], None)
+ if not is_empty_value(v):
+ params["nexthop"] = v
+
+ v = navigate_value(opts, ["type"], None)
+ if not is_empty_value(v):
+ params["type"] = v
+
+ v = navigate_value(opts, ["vpc_id"], None)
+ if not is_empty_value(v):
+ params["vpc_id"] = v
+
+ if not params:
+ return params
+
+ params = {"route": params}
+
+ return params
+
+
+def send_create_request(module, params, client):
+ url = "v2.0/vpc/routes"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_route): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_delete_request(module, params, client):
+ url = build_path(module, "v2.0/vpc/routes/{id}")
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_route): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_read_request(module, client):
+ url = build_path(module, "v2.0/vpc/routes/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_route): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["route"], None)
+
+
+def fill_resp_body(body):
+ result = dict()
+
+ result["destination"] = body.get("destination")
+
+ result["id"] = body.get("id")
+
+ result["nexthop"] = body.get("nexthop")
+
+ result["type"] = body.get("type")
+
+ result["vpc_id"] = body.get("vpc_id")
+
+ return result
+
+
+def update_properties(module, response, array_index, exclude_output=False):
+ r = user_input_parameters(module)
+
+ v = navigate_value(response, ["read", "destination"], array_index)
+ r["destination"] = v
+
+ v = navigate_value(response, ["read", "nexthop"], array_index)
+ r["next_hop"] = v
+
+ v = navigate_value(response, ["read", "type"], array_index)
+ r["type"] = v
+
+ v = navigate_value(response, ["read", "vpc_id"], array_index)
+ r["vpc_id"] = v
+
+ v = navigate_value(response, ["read", "id"], array_index)
+ r["id"] = v
+
+ return r
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_route): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["routes"], None)
+
+
+def _build_identity_object(all_opts):
+ result = dict()
+
+ v = navigate_value(all_opts, ["destination"], None)
+ result["destination"] = v
+
+ v = navigate_value(all_opts, ["id"], None)
+ result["id"] = v
+
+ v = navigate_value(all_opts, ["next_hop"], None)
+ result["nexthop"] = v
+
+ v = navigate_value(all_opts, ["type"], None)
+ result["type"] = v
+
+ v = navigate_value(all_opts, ["vpc_id"], None)
+ result["vpc_id"] = v
+
+ return result
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ result["destination"] = body.get("destination")
+
+ result["id"] = body.get("id")
+
+ result["nexthop"] = body.get("nexthop")
+
+ result["type"] = body.get("type")
+
+ result["vpc_id"] = body.get("vpc_id")
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_security_group.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_security_group.py
new file mode 100644
index 00000000..60351815
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_security_group.py
@@ -0,0 +1,645 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_vpc_security_group
+description:
+ - vpc security group management.
+short_description: Creates a resource of Vpc/SecurityGroup in Huawei Cloud
+notes:
+ - If I(id) option is provided, it takes precedence over I(name),
+ I(enterprise_project_id) and I(vpc_id) for security group selection.
+ - I(name), I(enterprise_project_id) and I(vpc_id) are used for security
+ group selection. If more than one security group with this options exists,
+ execution is aborted.
+ - No parameter support updating. If one of option is changed, the module
+ will create a new resource.
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huawei Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ name:
+ description:
+ - Specifies the security group name. The value is a string of 1 to
+ 64 characters that can contain letters, digits, underscores C(_),
+ hyphens (-), and periods (.).
+ type: str
+ required: true
+ enterprise_project_id:
+ description:
+ - Specifies the enterprise project ID. When creating a security
+ group, associate the enterprise project ID with the security
+ group.s
+ type: str
+ required: false
+ default: 0
+ vpc_id:
+ description:
+ - Specifies the resource ID of the VPC to which the security group
+ belongs.
+ type: str
+ required: false
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+# create a security group
+- name: Create a security group
+ community.general.hwc_vpc_security_group:
+ name: "ansible_network_security_group_test"
+'''
+
+RETURN = '''
+ name:
+ description:
+ - Specifies the security group name. The value is a string of 1 to
+ 64 characters that can contain letters, digits, underscores C(_),
+ hyphens (-), and periods (.).
+ type: str
+ returned: success
+ enterprise_project_id:
+ description:
+ - Specifies the enterprise project ID. When creating a security
+ group, associate the enterprise project ID with the security
+ group.
+ type: str
+ returned: success
+ vpc_id:
+ description:
+ - Specifies the resource ID of the VPC to which the security group
+ belongs.
+ type: str
+ returned: success
+ rules:
+ description:
+ - Specifies the security group rule, which ensures that resources
+ in the security group can communicate with one another.
+ type: complex
+ returned: success
+ contains:
+ description:
+ description:
+ - Provides supplementary information about the security
+ group rule.
+ type: str
+ returned: success
+ direction:
+ description:
+ - Specifies the direction of access control. The value can
+ be egress or ingress.
+ type: str
+ returned: success
+ ethertype:
+ description:
+ - Specifies the IP protocol version. The value can be IPv4
+ or IPv6.
+ type: str
+ returned: success
+ id:
+ description:
+ - Specifies the security group rule ID.
+ type: str
+ returned: success
+ port_range_max:
+ description:
+ - Specifies the end port number. The value ranges from 1 to
+ 65535. If the protocol is not icmp, the value cannot be
+ smaller than the port_range_min value. An empty value
+ indicates all ports.
+ type: int
+ returned: success
+ port_range_min:
+ description:
+ - Specifies the start port number. The value ranges from 1
+ to 65535. The value cannot be greater than the
+ port_range_max value. An empty value indicates all ports.
+ type: int
+ returned: success
+ protocol:
+ description:
+ - Specifies the protocol type. The value can be icmp, tcp,
+ udp, or others. If the parameter is left blank, the
+ security group supports all protocols.
+ type: str
+ returned: success
+ remote_address_group_id:
+ description:
+ - Specifies the ID of remote IP address group.
+ type: str
+ returned: success
+ remote_group_id:
+ description:
+ - Specifies the ID of the peer security group.
+ type: str
+ returned: success
+ remote_ip_prefix:
+ description:
+ - Specifies the remote IP address. If the access control
+ direction is set to egress, the parameter specifies the
+ source IP address. If the access control direction is set
+ to ingress, the parameter specifies the destination IP
+ address.
+ type: str
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcModule, are_different_dicts, build_path,
+ get_region, is_empty_value, navigate_value)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ name=dict(type='str', required=True),
+ enterprise_project_id=dict(type='str'),
+ vpc_id=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "vpc")
+
+ try:
+ resource = None
+ if module.params.get("id"):
+ resource = read_resource(config)
+ if module.params['state'] == 'present':
+ check_resource_option(resource, module)
+ else:
+ v = search_resource(config)
+ if len(v) > 1:
+ raise Exception("Found more than one resource(%s)" % ", ".join([
+ navigate_value(i, ["id"]) for i in v]))
+
+ if len(v) == 1:
+ resource = update_properties(module, {"read": v[0]}, None)
+ module.params['id'] = navigate_value(resource, ["id"])
+
+ result = {}
+ changed = False
+ if module.params['state'] == 'present':
+ if resource is None:
+ if not module.check_mode:
+ resource = create(config)
+ changed = True
+
+ result = resource
+ else:
+ if resource:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def user_input_parameters(module):
+ return {
+ "enterprise_project_id": module.params.get("enterprise_project_id"),
+ "name": module.params.get("name"),
+ "vpc_id": module.params.get("vpc_id"),
+ "id": module.params.get("id"),
+ }
+
+
+def check_resource_option(resource, module):
+ opts = user_input_parameters(module)
+
+ resource = {
+ "enterprise_project_id": resource.get("enterprise_project_id"),
+ "name": resource.get("name"),
+ "vpc_id": resource.get("vpc_id"),
+ "id": resource.get("id"),
+ }
+
+ if are_different_dicts(resource, opts):
+ raise Exception(
+ "Cannot change option from (%s) to (%s) for an"
+ " existing security group(%s)." % (resource, opts,
+ module.params.get('id')))
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+ module.params['id'] = navigate_value(r, ["security_group", "id"])
+
+ result = update_properties(module, {"read": fill_read_resp_body(r)}, None)
+ return result
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ send_delete_request(module, None, client)
+
+
+def read_resource(config, exclude_output=False):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ res["read"] = fill_read_resp_body(r)
+
+ return update_properties(module, res, None, exclude_output)
+
+
+def _build_query_link(opts):
+ query_params = []
+
+ v = navigate_value(opts, ["enterprise_project_id"])
+ if v:
+ query_params.append("enterprise_project_id=" + str(v))
+
+ v = navigate_value(opts, ["vpc_id"])
+ if v:
+ query_params.append("vpc_id=" + str(v))
+
+ query_link = "?marker={marker}&limit=10"
+ if query_params:
+ query_link += "&" + "&".join(query_params)
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+ identity_obj = _build_identity_object(opts)
+ query_link = _build_query_link(opts)
+ link = "security-groups" + query_link
+
+ result = []
+ p = {'marker': ''}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ item = fill_list_resp_body(item)
+ if not are_different_dicts(identity_obj, item):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['marker'] = r[-1].get('id')
+
+ return result
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["enterprise_project_id"], None)
+ if not is_empty_value(v):
+ params["enterprise_project_id"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ v = navigate_value(opts, ["vpc_id"], None)
+ if not is_empty_value(v):
+ params["vpc_id"] = v
+
+ if not params:
+ return params
+
+ params = {"security_group": params}
+
+ return params
+
+
+def send_create_request(module, params, client):
+ url = "security-groups"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_security_group): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_delete_request(module, params, client):
+ url = build_path(module, "security-groups/{id}")
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_security_group): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_read_request(module, client):
+ url = build_path(module, "security-groups/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_security_group): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["security_group"], None)
+
+
+def fill_read_resp_body(body):
+ result = dict()
+
+ result["enterprise_project_id"] = body.get("enterprise_project_id")
+
+ result["id"] = body.get("id")
+
+ result["name"] = body.get("name")
+
+ v = fill_read_resp_security_group_rules(body.get("security_group_rules"))
+ result["security_group_rules"] = v
+
+ result["vpc_id"] = body.get("vpc_id")
+
+ return result
+
+
+def fill_read_resp_security_group_rules(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["description"] = item.get("description")
+
+ val["direction"] = item.get("direction")
+
+ val["ethertype"] = item.get("ethertype")
+
+ val["id"] = item.get("id")
+
+ val["port_range_max"] = item.get("port_range_max")
+
+ val["port_range_min"] = item.get("port_range_min")
+
+ val["protocol"] = item.get("protocol")
+
+ val["remote_address_group_id"] = item.get("remote_address_group_id")
+
+ val["remote_group_id"] = item.get("remote_group_id")
+
+ val["remote_ip_prefix"] = item.get("remote_ip_prefix")
+
+ val["security_group_id"] = item.get("security_group_id")
+
+ result.append(val)
+
+ return result
+
+
+def update_properties(module, response, array_index, exclude_output=False):
+ r = user_input_parameters(module)
+
+ v = navigate_value(response, ["read", "enterprise_project_id"],
+ array_index)
+ r["enterprise_project_id"] = v
+
+ v = navigate_value(response, ["read", "name"], array_index)
+ r["name"] = v
+
+ if not exclude_output:
+ v = r.get("rules")
+ v = flatten_rules(response, array_index, v, exclude_output)
+ r["rules"] = v
+
+ v = navigate_value(response, ["read", "vpc_id"], array_index)
+ r["vpc_id"] = v
+
+ v = navigate_value(response, ["read", "id"], array_index)
+ r["id"] = v
+
+ return r
+
+
+def flatten_rules(d, array_index, current_value, exclude_output):
+ n = 0
+ result = current_value
+ has_init_value = True
+ if result:
+ n = len(result)
+ else:
+ has_init_value = False
+ result = []
+ v = navigate_value(d, ["read", "security_group_rules"],
+ array_index)
+ if not v:
+ return current_value
+ n = len(v)
+
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ for i in range(n):
+ new_array_index["read.security_group_rules"] = i
+
+ val = dict()
+ if len(result) >= (i + 1) and result[i]:
+ val = result[i]
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "description"],
+ new_array_index)
+ val["description"] = v
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "direction"],
+ new_array_index)
+ val["direction"] = v
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "ethertype"],
+ new_array_index)
+ val["ethertype"] = v
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "id"],
+ new_array_index)
+ val["id"] = v
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "port_range_max"],
+ new_array_index)
+ val["port_range_max"] = v
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "port_range_min"],
+ new_array_index)
+ val["port_range_min"] = v
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "protocol"],
+ new_array_index)
+ val["protocol"] = v
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "remote_address_group_id"],
+ new_array_index)
+ val["remote_address_group_id"] = v
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "remote_group_id"],
+ new_array_index)
+ val["remote_group_id"] = v
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "remote_ip_prefix"],
+ new_array_index)
+ val["remote_ip_prefix"] = v
+
+ if len(result) >= (i + 1):
+ result[i] = val
+ else:
+ for v in val.values():
+ if v is not None:
+ result.append(val)
+ break
+
+ return result if (has_init_value or result) else current_value
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_security_group): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["security_groups"], None)
+
+
+def _build_identity_object(all_opts):
+ result = dict()
+
+ v = navigate_value(all_opts, ["enterprise_project_id"], None)
+ result["enterprise_project_id"] = v
+
+ result["id"] = None
+
+ v = navigate_value(all_opts, ["name"], None)
+ result["name"] = v
+
+ result["security_group_rules"] = None
+
+ v = navigate_value(all_opts, ["vpc_id"], None)
+ result["vpc_id"] = v
+
+ return result
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ result["enterprise_project_id"] = body.get("enterprise_project_id")
+
+ result["id"] = body.get("id")
+
+ result["name"] = body.get("name")
+
+ v = fill_list_resp_security_group_rules(body.get("security_group_rules"))
+ result["security_group_rules"] = v
+
+ result["vpc_id"] = body.get("vpc_id")
+
+ return result
+
+
+def fill_list_resp_security_group_rules(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["description"] = item.get("description")
+
+ val["direction"] = item.get("direction")
+
+ val["ethertype"] = item.get("ethertype")
+
+ val["id"] = item.get("id")
+
+ val["port_range_max"] = item.get("port_range_max")
+
+ val["port_range_min"] = item.get("port_range_min")
+
+ val["protocol"] = item.get("protocol")
+
+ val["remote_address_group_id"] = item.get("remote_address_group_id")
+
+ val["remote_group_id"] = item.get("remote_group_id")
+
+ val["remote_ip_prefix"] = item.get("remote_ip_prefix")
+
+ val["security_group_id"] = item.get("security_group_id")
+
+ result.append(val)
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_security_group_rule.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_security_group_rule.py
new file mode 100644
index 00000000..f92c8276
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_security_group_rule.py
@@ -0,0 +1,570 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_vpc_security_group_rule
+description:
+ - vpc security group management.
+short_description: Creates a resource of Vpc/SecurityGroupRule in Huawei Cloud
+notes:
+ - If I(id) option is provided, it takes precedence over
+ I(enterprise_project_id) for security group rule selection.
+ - I(security_group_id) is used for security group rule selection. If more
+ than one security group rule with this options exists, execution is
+ aborted.
+ - No parameter support updating. If one of option is changed, the module
+ will create a new resource.
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huawei Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ direction:
+ description:
+ - Specifies the direction of access control. The value can be
+ egress or ingress.
+ type: str
+ required: true
+ security_group_id:
+ description:
+ - Specifies the security group rule ID, which uniquely identifies
+ the security group rule.
+ type: str
+ required: true
+ description:
+ description:
+ - Provides supplementary information about the security group rule.
+ The value is a string of no more than 255 characters that can
+ contain letters and digits.
+ type: str
+ required: false
+ ethertype:
+ description:
+ - Specifies the IP protocol version. The value can be IPv4 or IPv6.
+ If you do not set this parameter, IPv4 is used by default.
+ type: str
+ required: false
+ port_range_max:
+ description:
+ - Specifies the end port number. The value ranges from 1 to 65535.
+ If the protocol is not icmp, the value cannot be smaller than the
+ port_range_min value. An empty value indicates all ports.
+ type: int
+ required: false
+ port_range_min:
+ description:
+ - Specifies the start port number. The value ranges from 1 to
+ 65535. The value cannot be greater than the port_range_max value.
+ An empty value indicates all ports.
+ type: int
+ required: false
+ protocol:
+ description:
+ - Specifies the protocol type. The value can be icmp, tcp, or udp.
+ If the parameter is left blank, the security group supports all
+ protocols.
+ type: str
+ required: false
+ remote_group_id:
+ description:
+ - Specifies the ID of the peer security group. The value is
+ exclusive with parameter remote_ip_prefix.
+ type: str
+ required: false
+ remote_ip_prefix:
+ description:
+ - Specifies the remote IP address. If the access control direction
+ is set to egress, the parameter specifies the source IP address.
+ If the access control direction is set to ingress, the parameter
+ specifies the destination IP address. The value can be in the
+ CIDR format or IP addresses. The parameter is exclusive with
+ parameter remote_group_id.
+ type: str
+ required: false
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+# create a security group rule
+- name: Create a security group
+ hwc_vpc_security_group:
+ name: "ansible_network_security_group_test"
+ register: sg
+- name: Create a security group rule
+ community.general.hwc_vpc_security_group_rule:
+ direction: "ingress"
+ protocol: "tcp"
+ ethertype: "IPv4"
+ port_range_max: 22
+ security_group_id: "{{ sg.id }}"
+ port_range_min: 22
+ remote_ip_prefix: "0.0.0.0/0"
+'''
+
+RETURN = '''
+ direction:
+ description:
+ - Specifies the direction of access control. The value can be
+ egress or ingress.
+ type: str
+ returned: success
+ security_group_id:
+ description:
+ - Specifies the security group rule ID, which uniquely identifies
+ the security group rule.
+ type: str
+ returned: success
+ description:
+ description:
+ - Provides supplementary information about the security group rule.
+ The value is a string of no more than 255 characters that can
+ contain letters and digits.
+ type: str
+ returned: success
+ ethertype:
+ description:
+ - Specifies the IP protocol version. The value can be IPv4 or IPv6.
+ If you do not set this parameter, IPv4 is used by default.
+ type: str
+ returned: success
+ port_range_max:
+ description:
+ - Specifies the end port number. The value ranges from 1 to 65535.
+ If the protocol is not icmp, the value cannot be smaller than the
+ port_range_min value. An empty value indicates all ports.
+ type: int
+ returned: success
+ port_range_min:
+ description:
+ - Specifies the start port number. The value ranges from 1 to
+ 65535. The value cannot be greater than the port_range_max value.
+ An empty value indicates all ports.
+ type: int
+ returned: success
+ protocol:
+ description:
+ - Specifies the protocol type. The value can be icmp, tcp, or udp.
+ If the parameter is left blank, the security group supports all
+ protocols.
+ type: str
+ returned: success
+ remote_group_id:
+ description:
+ - Specifies the ID of the peer security group. The value is
+ exclusive with parameter remote_ip_prefix.
+ type: str
+ returned: success
+ remote_ip_prefix:
+ description:
+ - Specifies the remote IP address. If the access control direction
+ is set to egress, the parameter specifies the source IP address.
+ If the access control direction is set to ingress, the parameter
+ specifies the destination IP address. The value can be in the
+ CIDR format or IP addresses. The parameter is exclusive with
+ parameter remote_group_id.
+ type: str
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcModule, are_different_dicts, build_path,
+ get_region, is_empty_value, navigate_value)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ direction=dict(type='str', required=True),
+ security_group_id=dict(type='str', required=True),
+ description=dict(type='str'),
+ ethertype=dict(type='str'),
+ port_range_max=dict(type='int'),
+ port_range_min=dict(type='int'),
+ protocol=dict(type='str'),
+ remote_group_id=dict(type='str'),
+ remote_ip_prefix=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "vpc")
+
+ try:
+ resource = None
+ if module.params['id']:
+ resource = True
+ else:
+ v = search_resource(config)
+ if len(v) > 1:
+ raise Exception("Found more than one resource(%s)" % ", ".join([
+ navigate_value(i, ["id"]) for i in v]))
+
+ if len(v) == 1:
+ resource = v[0]
+ module.params['id'] = navigate_value(resource, ["id"])
+
+ result = {}
+ changed = False
+ if module.params['state'] == 'present':
+ if resource is None:
+ if not module.check_mode:
+ create(config)
+ changed = True
+
+ current = read_resource(config, exclude_output=True)
+ expect = user_input_parameters(module)
+ if are_different_dicts(expect, current):
+ raise Exception(
+ "Cannot change option from (%s) to (%s) for an"
+ " existing security group(%s)." % (current, expect, module.params.get('id')))
+ result = read_resource(config)
+ result['id'] = module.params.get('id')
+ else:
+ if resource:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def user_input_parameters(module):
+ return {
+ "description": module.params.get("description"),
+ "direction": module.params.get("direction"),
+ "ethertype": module.params.get("ethertype"),
+ "port_range_max": module.params.get("port_range_max"),
+ "port_range_min": module.params.get("port_range_min"),
+ "protocol": module.params.get("protocol"),
+ "remote_group_id": module.params.get("remote_group_id"),
+ "remote_ip_prefix": module.params.get("remote_ip_prefix"),
+ "security_group_id": module.params.get("security_group_id"),
+ }
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+ module.params['id'] = navigate_value(r, ["security_group_rule", "id"])
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ send_delete_request(module, None, client)
+
+
+def read_resource(config, exclude_output=False):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ res["read"] = fill_read_resp_body(r)
+
+ return update_properties(module, res, None, exclude_output)
+
+
+def _build_query_link(opts):
+ query_link = "?marker={marker}&limit=10"
+ v = navigate_value(opts, ["security_group_id"])
+ if v:
+ query_link += "&security_group_id=" + str(v)
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+ identity_obj = _build_identity_object(opts)
+ query_link = _build_query_link(opts)
+ link = "security-group-rules" + query_link
+
+ result = []
+ p = {'marker': ''}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ item = fill_list_resp_body(item)
+ if not are_different_dicts(identity_obj, item):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['marker'] = r[-1].get('id')
+
+ return result
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["description"], None)
+ if not is_empty_value(v):
+ params["description"] = v
+
+ v = navigate_value(opts, ["direction"], None)
+ if not is_empty_value(v):
+ params["direction"] = v
+
+ v = navigate_value(opts, ["ethertype"], None)
+ if not is_empty_value(v):
+ params["ethertype"] = v
+
+ v = navigate_value(opts, ["port_range_max"], None)
+ if not is_empty_value(v):
+ params["port_range_max"] = v
+
+ v = navigate_value(opts, ["port_range_min"], None)
+ if not is_empty_value(v):
+ params["port_range_min"] = v
+
+ v = navigate_value(opts, ["protocol"], None)
+ if not is_empty_value(v):
+ params["protocol"] = v
+
+ v = navigate_value(opts, ["remote_group_id"], None)
+ if not is_empty_value(v):
+ params["remote_group_id"] = v
+
+ v = navigate_value(opts, ["remote_ip_prefix"], None)
+ if not is_empty_value(v):
+ params["remote_ip_prefix"] = v
+
+ v = navigate_value(opts, ["security_group_id"], None)
+ if not is_empty_value(v):
+ params["security_group_id"] = v
+
+ if not params:
+ return params
+
+ params = {"security_group_rule": params}
+
+ return params
+
+
+def send_create_request(module, params, client):
+ url = "security-group-rules"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_security_group_rule): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_delete_request(module, params, client):
+ url = build_path(module, "security-group-rules/{id}")
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_security_group_rule): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_read_request(module, client):
+ url = build_path(module, "security-group-rules/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_security_group_rule): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["security_group_rule"], None)
+
+
+def fill_read_resp_body(body):
+ result = dict()
+
+ result["description"] = body.get("description")
+
+ result["direction"] = body.get("direction")
+
+ result["ethertype"] = body.get("ethertype")
+
+ result["id"] = body.get("id")
+
+ result["port_range_max"] = body.get("port_range_max")
+
+ result["port_range_min"] = body.get("port_range_min")
+
+ result["protocol"] = body.get("protocol")
+
+ result["remote_address_group_id"] = body.get("remote_address_group_id")
+
+ result["remote_group_id"] = body.get("remote_group_id")
+
+ result["remote_ip_prefix"] = body.get("remote_ip_prefix")
+
+ result["security_group_id"] = body.get("security_group_id")
+
+ return result
+
+
+def update_properties(module, response, array_index, exclude_output=False):
+ r = user_input_parameters(module)
+
+ v = navigate_value(response, ["read", "description"], array_index)
+ r["description"] = v
+
+ v = navigate_value(response, ["read", "direction"], array_index)
+ r["direction"] = v
+
+ v = navigate_value(response, ["read", "ethertype"], array_index)
+ r["ethertype"] = v
+
+ v = navigate_value(response, ["read", "port_range_max"], array_index)
+ r["port_range_max"] = v
+
+ v = navigate_value(response, ["read", "port_range_min"], array_index)
+ r["port_range_min"] = v
+
+ v = navigate_value(response, ["read", "protocol"], array_index)
+ r["protocol"] = v
+
+ v = navigate_value(response, ["read", "remote_group_id"], array_index)
+ r["remote_group_id"] = v
+
+ v = navigate_value(response, ["read", "remote_ip_prefix"], array_index)
+ r["remote_ip_prefix"] = v
+
+ v = navigate_value(response, ["read", "security_group_id"], array_index)
+ r["security_group_id"] = v
+
+ return r
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_security_group_rule): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["security_group_rules"], None)
+
+
+def _build_identity_object(all_opts):
+ result = dict()
+
+ v = navigate_value(all_opts, ["description"], None)
+ result["description"] = v
+
+ v = navigate_value(all_opts, ["direction"], None)
+ result["direction"] = v
+
+ v = navigate_value(all_opts, ["ethertype"], None)
+ result["ethertype"] = v
+
+ result["id"] = None
+
+ v = navigate_value(all_opts, ["port_range_max"], None)
+ result["port_range_max"] = v
+
+ v = navigate_value(all_opts, ["port_range_min"], None)
+ result["port_range_min"] = v
+
+ v = navigate_value(all_opts, ["protocol"], None)
+ result["protocol"] = v
+
+ result["remote_address_group_id"] = None
+
+ v = navigate_value(all_opts, ["remote_group_id"], None)
+ result["remote_group_id"] = v
+
+ v = navigate_value(all_opts, ["remote_ip_prefix"], None)
+ result["remote_ip_prefix"] = v
+
+ v = navigate_value(all_opts, ["security_group_id"], None)
+ result["security_group_id"] = v
+
+ return result
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ result["description"] = body.get("description")
+
+ result["direction"] = body.get("direction")
+
+ result["ethertype"] = body.get("ethertype")
+
+ result["id"] = body.get("id")
+
+ result["port_range_max"] = body.get("port_range_max")
+
+ result["port_range_min"] = body.get("port_range_min")
+
+ result["protocol"] = body.get("protocol")
+
+ result["remote_address_group_id"] = body.get("remote_address_group_id")
+
+ result["remote_group_id"] = body.get("remote_group_id")
+
+ result["remote_ip_prefix"] = body.get("remote_ip_prefix")
+
+ result["security_group_id"] = body.get("security_group_id")
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_subnet.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_subnet.py
new file mode 100644
index 00000000..ccf18050
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/hwc_vpc_subnet.py
@@ -0,0 +1,734 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_vpc_subnet
+description:
+ - subnet management.
+short_description: Creates a resource of Vpc/Subnet in Huawei Cloud
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huawei Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ timeouts:
+ description:
+ - The timeouts for each operations.
+ type: dict
+ suboptions:
+ create:
+ description:
+ - The timeouts for create operation.
+ type: str
+ default: '15m'
+ update:
+ description:
+ - The timeouts for update operation.
+ type: str
+ default: '15m'
+ cidr:
+ description:
+ - Specifies the subnet CIDR block. The value must be within the VPC
+ CIDR block and be in CIDR format. The subnet mask cannot be
+ greater than 28. Cannot be changed after creating the subnet.
+ type: str
+ required: true
+ gateway_ip:
+ description:
+ - Specifies the gateway of the subnet. The value must be an IP
+ address in the subnet. Cannot be changed after creating the subnet.
+ type: str
+ required: true
+ name:
+ description:
+ - Specifies the subnet name. The value is a string of 1 to 64
+ characters that can contain letters, digits, underscores C(_),
+ hyphens (-), and periods (.).
+ type: str
+ required: true
+ vpc_id:
+ description:
+ - Specifies the ID of the VPC to which the subnet belongs. Cannot
+ be changed after creating the subnet.
+ type: str
+ required: true
+ availability_zone:
+ description:
+ - Specifies the AZ to which the subnet belongs. Cannot be changed
+ after creating the subnet.
+ type: str
+ required: false
+ dhcp_enable:
+ description:
+ - Specifies whether DHCP is enabled for the subnet. The value can
+ be true (enabled) or false(disabled), and default value is true.
+ If this parameter is set to false, newly created ECSs cannot
+ obtain IP addresses, and usernames and passwords cannot be
+ injected using Cloud-init.
+ type: bool
+ required: false
+ dns_address:
+ description:
+ - Specifies the DNS server addresses for subnet. The address
+ in the head will be used first.
+ type: list
+ elements: str
+ required: false
+extends_documentation_fragment:
+- community.general.hwc
+
+'''
+
+EXAMPLES = '''
+# create subnet
+- name: Create vpc
+ hwc_network_vpc:
+ cidr: "192.168.100.0/24"
+ name: "ansible_network_vpc_test"
+ register: vpc
+- name: Create subnet
+ community.general.hwc_vpc_subnet:
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: True
+'''
+
+RETURN = '''
+ cidr:
+ description:
+ - Specifies the subnet CIDR block. The value must be within the VPC
+ CIDR block and be in CIDR format. The subnet mask cannot be
+ greater than 28.
+ type: str
+ returned: success
+ gateway_ip:
+ description:
+ - Specifies the gateway of the subnet. The value must be an IP
+ address in the subnet.
+ type: str
+ returned: success
+ name:
+ description:
+ - Specifies the subnet name. The value is a string of 1 to 64
+ characters that can contain letters, digits, underscores C(_),
+ hyphens (-), and periods (.).
+ type: str
+ returned: success
+ vpc_id:
+ description:
+ - Specifies the ID of the VPC to which the subnet belongs.
+ type: str
+ returned: success
+ availability_zone:
+ description:
+ - Specifies the AZ to which the subnet belongs.
+ type: str
+ returned: success
+ dhcp_enable:
+ description:
+ - Specifies whether DHCP is enabled for the subnet. The value can
+ be true (enabled) or false(disabled), and default value is true.
+ If this parameter is set to false, newly created ECSs cannot
+ obtain IP addresses, and usernames and passwords cannot be
+ injected using Cloud-init.
+ type: bool
+ returned: success
+ dns_address:
+ description:
+ - Specifies the DNS server addresses for subnet. The address
+ in the head will be used first.
+ type: list
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcClientException404, HwcModule,
+ are_different_dicts, build_path, get_region, is_empty_value,
+ navigate_value, wait_to_finish)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ timeouts=dict(type='dict', options=dict(
+ create=dict(default='15m', type='str'),
+ update=dict(default='15m', type='str'),
+ ), default=dict()),
+ cidr=dict(type='str', required=True),
+ gateway_ip=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ vpc_id=dict(type='str', required=True),
+ availability_zone=dict(type='str'),
+ dhcp_enable=dict(type='bool'),
+ dns_address=dict(type='list', elements='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "vpc")
+
+ try:
+ resource = None
+ if module.params.get('id'):
+ resource = True
+ else:
+ v = search_resource(config)
+ if len(v) > 1:
+ raise Exception("Found more than one resource(%s)" % ", ".join([
+ navigate_value(i, ["id"]) for i in v]))
+
+ if len(v) == 1:
+ resource = v[0]
+ module.params['id'] = navigate_value(resource, ["id"])
+
+ result = {}
+ changed = False
+ if module.params['state'] == 'present':
+ if resource is None:
+ if not module.check_mode:
+ create(config)
+ changed = True
+
+ current = read_resource(config, exclude_output=True)
+ expect = user_input_parameters(module)
+ if are_different_dicts(expect, current):
+ if not module.check_mode:
+ update(config)
+ changed = True
+
+ result = read_resource(config)
+ result['id'] = module.params.get('id')
+ else:
+ if resource:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def user_input_parameters(module):
+ return {
+ "availability_zone": module.params.get("availability_zone"),
+ "cidr": module.params.get("cidr"),
+ "dhcp_enable": module.params.get("dhcp_enable"),
+ "dns_address": module.params.get("dns_address"),
+ "gateway_ip": module.params.get("gateway_ip"),
+ "name": module.params.get("name"),
+ "vpc_id": module.params.get("vpc_id"),
+ }
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ opts = user_input_parameters(module)
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+ obj = async_wait_create(config, r, client, timeout)
+ module.params['id'] = navigate_value(obj, ["subnet", "id"])
+
+
+def update(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ timeout = 60 * int(module.params['timeouts']['update'].rstrip('m'))
+ opts = user_input_parameters(module)
+
+ params = build_update_parameters(opts)
+ if params:
+ r = send_update_request(module, params, client)
+ async_wait_update(config, r, client, timeout)
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ send_delete_request(module, None, client)
+
+ url = build_path(module, "subnets/{id}")
+
+ def _refresh_status():
+ try:
+ client.get(url)
+ except HwcClientException404:
+ return True, "Done"
+
+ except Exception:
+ return None, ""
+
+ return True, "Pending"
+
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ try:
+ wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_subnet): error "
+ "waiting for api(delete) to "
+ "be done, error= %s" % str(ex))
+
+
+def read_resource(config, exclude_output=False):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ res["read"] = fill_read_resp_body(r)
+
+ return update_properties(module, res, None, exclude_output)
+
+
+def _build_query_link(opts):
+ query_link = "?marker={marker}&limit=10"
+ v = navigate_value(opts, ["vpc_id"])
+ if v:
+ query_link += "&vpc_id=" + str(v)
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+ identity_obj = _build_identity_object(opts)
+ query_link = _build_query_link(opts)
+ link = "subnets" + query_link
+
+ result = []
+ p = {'marker': ''}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ item = fill_list_resp_body(item)
+ if not are_different_dicts(identity_obj, item):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['marker'] = r[-1].get('id')
+
+ return result
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["availability_zone"], None)
+ if not is_empty_value(v):
+ params["availability_zone"] = v
+
+ v = navigate_value(opts, ["cidr"], None)
+ if not is_empty_value(v):
+ params["cidr"] = v
+
+ v = navigate_value(opts, ["dhcp_enable"], None)
+ if v is not None:
+ params["dhcp_enable"] = v
+
+ v = expand_create_dns_list(opts, None)
+ if not is_empty_value(v):
+ params["dnsList"] = v
+
+ v = navigate_value(opts, ["gateway_ip"], None)
+ if not is_empty_value(v):
+ params["gateway_ip"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ v = expand_create_primary_dns(opts, None)
+ if not is_empty_value(v):
+ params["primary_dns"] = v
+
+ v = expand_create_secondary_dns(opts, None)
+ if not is_empty_value(v):
+ params["secondary_dns"] = v
+
+ v = navigate_value(opts, ["vpc_id"], None)
+ if not is_empty_value(v):
+ params["vpc_id"] = v
+
+ if not params:
+ return params
+
+ params = {"subnet": params}
+
+ return params
+
+
+def expand_create_dns_list(d, array_index):
+ v = navigate_value(d, ["dns_address"], array_index)
+ return v if (v and len(v) > 2) else []
+
+
+def expand_create_primary_dns(d, array_index):
+ v = navigate_value(d, ["dns_address"], array_index)
+ return v[0] if v else ""
+
+
+def expand_create_secondary_dns(d, array_index):
+ v = navigate_value(d, ["dns_address"], array_index)
+ return v[1] if (v and len(v) > 1) else ""
+
+
+def send_create_request(module, params, client):
+ url = "subnets"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_subnet): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def async_wait_create(config, result, client, timeout):
+ module = config.module
+
+ path_parameters = {
+ "subnet_id": ["subnet", "id"],
+ }
+ data = dict((key, navigate_value(result, path))
+ for key, path in path_parameters.items())
+
+ url = build_path(module, "subnets/{subnet_id}", data)
+
+ def _query_status():
+ r = None
+ try:
+ r = client.get(url, timeout=timeout)
+ except HwcClientException:
+ return None, ""
+
+ try:
+ s = navigate_value(r, ["subnet", "status"])
+ return r, s
+ except Exception:
+ return None, ""
+
+ try:
+ return wait_to_finish(
+ ["ACTIVE"],
+ ["UNKNOWN"],
+ _query_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_subnet): error "
+ "waiting for api(create) to "
+ "be done, error= %s" % str(ex))
+
+
+def build_update_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["dhcp_enable"], None)
+ if v is not None:
+ params["dhcp_enable"] = v
+
+ v = expand_update_dns_list(opts, None)
+ if v is not None:
+ params["dnsList"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ v = expand_update_primary_dns(opts, None)
+ if v is not None:
+ params["primary_dns"] = v
+
+ v = expand_update_secondary_dns(opts, None)
+ if v is not None:
+ params["secondary_dns"] = v
+
+ if not params:
+ return params
+
+ params = {"subnet": params}
+
+ return params
+
+
+def expand_update_dns_list(d, array_index):
+ v = navigate_value(d, ["dns_address"], array_index)
+ if v:
+ if len(v) > 2:
+ return v
+ return None
+ return []
+
+
+def expand_update_primary_dns(d, array_index):
+ v = navigate_value(d, ["dns_address"], array_index)
+ return v[0] if v else ""
+
+
+def expand_update_secondary_dns(d, array_index):
+ v = navigate_value(d, ["dns_address"], array_index)
+ return v[1] if (v and len(v) > 1) else ""
+
+
+def send_update_request(module, params, client):
+ url = build_path(module, "vpcs/{vpc_id}/subnets/{id}")
+
+ try:
+ r = client.put(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_subnet): error running "
+ "api(update), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def async_wait_update(config, result, client, timeout):
+ module = config.module
+
+ path_parameters = {
+ "subnet_id": ["subnet", "id"],
+ }
+ data = dict((key, navigate_value(result, path))
+ for key, path in path_parameters.items())
+
+ url = build_path(module, "subnets/{subnet_id}", data)
+
+ def _query_status():
+ r = None
+ try:
+ r = client.get(url, timeout=timeout)
+ except HwcClientException:
+ return None, ""
+
+ try:
+ s = navigate_value(r, ["subnet", "status"])
+ return r, s
+ except Exception:
+ return None, ""
+
+ try:
+ return wait_to_finish(
+ ["ACTIVE"],
+ ["UNKNOWN"],
+ _query_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_subnet): error "
+ "waiting for api(update) to "
+ "be done, error= %s" % str(ex))
+
+
+def send_delete_request(module, params, client):
+ url = build_path(module, "vpcs/{vpc_id}/subnets/{id}")
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_subnet): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_read_request(module, client):
+ url = build_path(module, "subnets/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_subnet): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["subnet"], None)
+
+
+def fill_read_resp_body(body):
+ result = dict()
+
+ result["availability_zone"] = body.get("availability_zone")
+
+ result["cidr"] = body.get("cidr")
+
+ result["dhcp_enable"] = body.get("dhcp_enable")
+
+ result["dnsList"] = body.get("dnsList")
+
+ result["gateway_ip"] = body.get("gateway_ip")
+
+ result["id"] = body.get("id")
+
+ result["name"] = body.get("name")
+
+ result["neutron_network_id"] = body.get("neutron_network_id")
+
+ result["neutron_subnet_id"] = body.get("neutron_subnet_id")
+
+ result["primary_dns"] = body.get("primary_dns")
+
+ result["secondary_dns"] = body.get("secondary_dns")
+
+ result["status"] = body.get("status")
+
+ result["vpc_id"] = body.get("vpc_id")
+
+ return result
+
+
+def update_properties(module, response, array_index, exclude_output=False):
+ r = user_input_parameters(module)
+
+ v = navigate_value(response, ["read", "availability_zone"], array_index)
+ r["availability_zone"] = v
+
+ v = navigate_value(response, ["read", "cidr"], array_index)
+ r["cidr"] = v
+
+ v = navigate_value(response, ["read", "dhcp_enable"], array_index)
+ r["dhcp_enable"] = v
+
+ v = navigate_value(response, ["read", "dnsList"], array_index)
+ r["dns_address"] = v
+
+ v = navigate_value(response, ["read", "gateway_ip"], array_index)
+ r["gateway_ip"] = v
+
+ v = navigate_value(response, ["read", "name"], array_index)
+ r["name"] = v
+
+ v = navigate_value(response, ["read", "vpc_id"], array_index)
+ r["vpc_id"] = v
+
+ return r
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_subnet): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["subnets"], None)
+
+
+def _build_identity_object(all_opts):
+ result = dict()
+
+ v = navigate_value(all_opts, ["availability_zone"], None)
+ result["availability_zone"] = v
+
+ v = navigate_value(all_opts, ["cidr"], None)
+ result["cidr"] = v
+
+ v = navigate_value(all_opts, ["dhcp_enable"], None)
+ result["dhcp_enable"] = v
+
+ v = navigate_value(all_opts, ["dns_address"], None)
+ result["dnsList"] = v
+
+ v = navigate_value(all_opts, ["gateway_ip"], None)
+ result["gateway_ip"] = v
+
+ result["id"] = None
+
+ v = navigate_value(all_opts, ["name"], None)
+ result["name"] = v
+
+ result["neutron_network_id"] = None
+
+ result["neutron_subnet_id"] = None
+
+ result["primary_dns"] = None
+
+ result["secondary_dns"] = None
+
+ result["status"] = None
+
+ v = navigate_value(all_opts, ["vpc_id"], None)
+ result["vpc_id"] = v
+
+ return result
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ result["availability_zone"] = body.get("availability_zone")
+
+ result["cidr"] = body.get("cidr")
+
+ result["dhcp_enable"] = body.get("dhcp_enable")
+
+ result["dnsList"] = body.get("dnsList")
+
+ result["gateway_ip"] = body.get("gateway_ip")
+
+ result["id"] = body.get("id")
+
+ result["name"] = body.get("name")
+
+ result["neutron_network_id"] = body.get("neutron_network_id")
+
+ result["neutron_subnet_id"] = body.get("neutron_subnet_id")
+
+ result["primary_dns"] = body.get("primary_dns")
+
+ result["secondary_dns"] = body.get("secondary_dns")
+
+ result["status"] = body.get("status")
+
+ result["vpc_id"] = body.get("vpc_id")
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_domain.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_domain.py
new file mode 100644
index 00000000..29690497
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_domain.py
@@ -0,0 +1,156 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, IBM CORPORATION
+# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_sa_domain
+short_description: Manages domains on IBM Spectrum Accelerate Family storage systems
+
+description:
+ - "This module can be used to add domains to or removes them from IBM Spectrum Accelerate Family storage systems."
+
+options:
+ domain:
+ description:
+ - Name of the domain to be managed.
+ required: true
+ state:
+ description:
+ - The desired state of the domain.
+ default: "present"
+ choices: [ "present", "absent" ]
+ ldap_id:
+ description:
+ - ldap id to add to the domain.
+ required: false
+ size:
+ description:
+ - Size of the domain.
+ required: false
+ hard_capacity:
+ description:
+ - Hard capacity of the domain.
+ required: false
+ soft_capacity:
+ description:
+ - Soft capacity of the domain.
+ required: false
+ max_cgs:
+ description:
+ - Number of max cgs.
+ required: false
+ max_dms:
+ description:
+ - Number of max dms.
+ required: false
+ max_mirrors:
+ description:
+ - Number of max_mirrors.
+ required: false
+ max_pools:
+ description:
+ - Number of max_pools.
+ required: false
+ max_volumes:
+ description:
+ - Number of max_volumes.
+ required: false
+ perf_class:
+ description:
+ - Add the domain to a performance class.
+ required: false
+
+extends_documentation_fragment:
+- community.general.ibm_storage
+
+
+author:
+ - Tzur Eliyahu (@tzure)
+'''
+
+EXAMPLES = '''
+- name: Define new domain.
+ community.general.ibm_sa_domain:
+ domain: domain_name
+ size: domain_size
+ state: present
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+
+- name: Delete domain.
+ community.general.ibm_sa_domain:
+ domain: domain_name
+ state: absent
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+'''
+RETURN = '''
+msg:
+ description: module return status.
+ returned: as needed
+ type: str
+ sample: "domain 'domain_name' created successfully."
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \
+ connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed
+
+
+def main():
+ argument_spec = spectrum_accelerate_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ domain=dict(required=True),
+ size=dict(),
+ max_dms=dict(),
+ max_cgs=dict(),
+ ldap_id=dict(),
+ max_mirrors=dict(),
+ max_pools=dict(),
+ max_volumes=dict(),
+ perf_class=dict(),
+ hard_capacity=dict(),
+ soft_capacity=dict()
+ )
+ )
+
+ module = AnsibleModule(argument_spec)
+
+ is_pyxcli_installed(module)
+
+ xcli_client = connect_ssl(module)
+ domain = xcli_client.cmd.domain_list(
+ domain=module.params['domain']).as_single_element
+ state = module.params['state']
+
+ state_changed = False
+ msg = 'Domain \'{0}\''.format(module.params['domain'])
+ if state == 'present' and not domain:
+ state_changed = execute_pyxcli_command(
+ module, 'domain_create', xcli_client)
+ msg += " created successfully."
+ elif state == 'absent' and domain:
+ state_changed = execute_pyxcli_command(
+ module, 'domain_delete', xcli_client)
+ msg += " deleted successfully."
+ else:
+ msg += " state unchanged."
+
+ module.exit_json(changed=state_changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_host.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_host.py
new file mode 100644
index 00000000..5ce12992
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_host.py
@@ -0,0 +1,118 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2018 IBM CORPORATION
+# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_sa_host
+short_description: Adds hosts to or removes them from IBM Spectrum Accelerate Family storage systems.
+
+description:
+ - "This module adds hosts to or removes them from IBM Spectrum Accelerate Family storage systems."
+
+options:
+ host:
+ description:
+ - Host name.
+ required: true
+ state:
+ description:
+ - Host state.
+ default: "present"
+ choices: [ "present", "absent" ]
+ cluster:
+ description:
+ - The name of the cluster to include the host.
+ required: false
+ domain:
+ description:
+ - The domains the cluster will be attached to.
+ To include more than one domain,
+ separate domain names with commas.
+ To include all existing domains, use an asterisk ("*").
+ required: false
+ iscsi_chap_name:
+ description:
+ - The host's CHAP name identifier
+ required: false
+ iscsi_chap_secret:
+ description:
+ - The password of the initiator used to
+ authenticate to the system when CHAP is enable
+ required: false
+
+extends_documentation_fragment:
+- community.general.ibm_storage
+
+
+author:
+ - Tzur Eliyahu (@tzure)
+'''
+
+EXAMPLES = '''
+- name: Define new host.
+ community.general.ibm_sa_host:
+ host: host_name
+ state: present
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+
+- name: Delete host.
+ community.general.ibm_sa_host:
+ host: host_name
+ state: absent
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+'''
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \
+ connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed
+
+
+def main():
+ argument_spec = spectrum_accelerate_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ host=dict(required=True),
+ cluster=dict(),
+ domain=dict(),
+ iscsi_chap_name=dict(),
+ iscsi_chap_secret=dict(no_log=True),
+ )
+ )
+
+ module = AnsibleModule(argument_spec)
+
+ is_pyxcli_installed(module)
+
+ xcli_client = connect_ssl(module)
+ host = xcli_client.cmd.host_list(
+ host=module.params['host']).as_single_element
+ state = module.params['state']
+
+ state_changed = False
+ if state == 'present' and not host:
+ state_changed = execute_pyxcli_command(
+ module, 'host_define', xcli_client)
+ elif state == 'absent' and host:
+ state_changed = execute_pyxcli_command(
+ module, 'host_delete', xcli_client)
+
+ module.exit_json(changed=state_changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_host_ports.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_host_ports.py
new file mode 100644
index 00000000..981bc553
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_host_ports.py
@@ -0,0 +1,127 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2018 IBM CORPORATION
+# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_sa_host_ports
+short_description: Add host ports on IBM Spectrum Accelerate Family storage systems.
+
+description:
+ - "This module adds ports to or removes them from the hosts
+ on IBM Spectrum Accelerate Family storage systems."
+
+options:
+ host:
+ description:
+ - Host name.
+ required: true
+ state:
+ description:
+ - Host ports state.
+ default: "present"
+ choices: [ "present", "absent" ]
+ iscsi_name:
+ description:
+ - iSCSI initiator name.
+ required: false
+ fcaddress:
+ description:
+ - Fiber channel address.
+ required: false
+ num_of_visible_targets:
+ description:
+ - Number of visible targets.
+ required: false
+
+extends_documentation_fragment:
+- community.general.ibm_storage
+
+
+author:
+ - Tzur Eliyahu (@tzure)
+'''
+
+EXAMPLES = '''
+- name: Add ports for host.
+ community.general.ibm_sa_host_ports:
+ host: test_host
+ iscsi_name: iqn.1994-05.com***
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+ state: present
+
+- name: Remove ports for host.
+ community.general.ibm_sa_host_ports:
+ host: test_host
+ iscsi_name: iqn.1994-05.com***
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+ state: absent
+
+'''
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import (execute_pyxcli_command, connect_ssl,
+ spectrum_accelerate_spec, is_pyxcli_installed)
+
+
+def main():
+ argument_spec = spectrum_accelerate_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ host=dict(required=True),
+ iscsi_name=dict(),
+ fcaddress=dict(),
+ num_of_visible_targets=dict()
+ )
+ )
+
+ module = AnsibleModule(argument_spec)
+ is_pyxcli_installed(module)
+
+ xcli_client = connect_ssl(module)
+ # required args
+ ports = []
+ try:
+ ports = xcli_client.cmd.host_list_ports(
+ host=module.params.get('host')).as_list
+ except Exception:
+ pass
+ state = module.params['state']
+ port_exists = False
+ ports = [port.get('port_name') for port in ports]
+
+ fc_ports = (module.params.get('fcaddress')
+ if module.params.get('fcaddress') else [])
+ iscsi_ports = (module.params.get('iscsi_name')
+ if module.params.get('iscsi_name') else [])
+ for port in ports:
+ if port in iscsi_ports or port in fc_ports:
+ port_exists = True
+ break
+ state_changed = False
+ if state == 'present' and not port_exists:
+ state_changed = execute_pyxcli_command(
+ module, 'host_add_port', xcli_client)
+ if state == 'absent' and port_exists:
+ state_changed = execute_pyxcli_command(
+ module, 'host_remove_port', xcli_client)
+
+ module.exit_json(changed=state_changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_pool.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_pool.py
new file mode 100644
index 00000000..812904eb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_pool.py
@@ -0,0 +1,115 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2018 IBM CORPORATION
+# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_sa_pool
+short_description: Handles pools on IBM Spectrum Accelerate Family storage systems.
+
+description:
+ - "This module creates or deletes pools to be used on IBM Spectrum Accelerate Family storage systems"
+
+options:
+ pool:
+ description:
+ - Pool name.
+ required: true
+ state:
+ description:
+ - Pool state.
+ default: "present"
+ choices: [ "present", "absent" ]
+ size:
+ description:
+ - Pool size in GB
+ required: false
+ snapshot_size:
+ description:
+ - Pool snapshot size in GB
+ required: false
+ domain:
+ description:
+ - Adds the pool to the specified domain.
+ required: false
+ perf_class:
+ description:
+ - Assigns a perf_class to the pool.
+ required: false
+
+extends_documentation_fragment:
+- community.general.ibm_storage
+
+
+author:
+ - Tzur Eliyahu (@tzure)
+'''
+
+EXAMPLES = '''
+- name: Create new pool.
+ community.general.ibm_sa_pool:
+ name: pool_name
+ size: 300
+ state: present
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+
+- name: Delete pool.
+ community.general.ibm_sa_pool:
+ name: pool_name
+ state: absent
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+'''
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \
+ connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed
+
+
+def main():
+ argument_spec = spectrum_accelerate_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ pool=dict(required=True),
+ size=dict(),
+ snapshot_size=dict(),
+ domain=dict(),
+ perf_class=dict()
+ )
+ )
+
+ module = AnsibleModule(argument_spec)
+
+ is_pyxcli_installed(module)
+
+ xcli_client = connect_ssl(module)
+ pool = xcli_client.cmd.pool_list(
+ pool=module.params['pool']).as_single_element
+ state = module.params['state']
+
+ state_changed = False
+ if state == 'present' and not pool:
+ state_changed = execute_pyxcli_command(
+ module, 'pool_create', xcli_client)
+ if state == 'absent' and pool:
+ state_changed = execute_pyxcli_command(
+ module, 'pool_delete', xcli_client)
+
+ module.exit_json(changed=state_changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_vol.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_vol.py
new file mode 100644
index 00000000..bf578cee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_vol.py
@@ -0,0 +1,107 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2018 IBM CORPORATION
+# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_sa_vol
+short_description: Handle volumes on IBM Spectrum Accelerate Family storage systems.
+
+description:
+ - "This module creates or deletes volumes to be used on IBM Spectrum Accelerate Family storage systems."
+
+options:
+ vol:
+ description:
+ - Volume name.
+ required: true
+ pool:
+ description:
+ - Volume pool.
+ required: false
+ state:
+ description:
+ - Volume state.
+ default: "present"
+ choices: [ "present", "absent" ]
+ size:
+ description:
+ - Volume size.
+ required: false
+
+extends_documentation_fragment:
+- community.general.ibm_storage
+
+
+author:
+ - Tzur Eliyahu (@tzure)
+'''
+
+EXAMPLES = '''
+- name: Create a new volume.
+ community.general.ibm_sa_vol:
+ vol: volume_name
+ pool: pool_name
+ size: 17
+ state: present
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+
+- name: Delete an existing volume.
+ community.general.ibm_sa_vol:
+ vol: volume_name
+ state: absent
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+'''
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \
+ connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed
+
+
+def main():
+ argument_spec = spectrum_accelerate_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ vol=dict(required=True),
+ pool=dict(),
+ size=dict()
+ )
+ )
+
+ module = AnsibleModule(argument_spec)
+
+ is_pyxcli_installed(module)
+
+ xcli_client = connect_ssl(module)
+ # required args
+ volume = xcli_client.cmd.vol_list(
+ vol=module.params.get('vol')).as_single_element
+ state = module.params['state']
+
+ state_changed = False
+ if state == 'present' and not volume:
+ state_changed = execute_pyxcli_command(
+ module, 'vol_create', xcli_client)
+ elif state == 'absent' and volume:
+ state_changed = execute_pyxcli_command(
+ module, 'vol_delete', xcli_client)
+
+ module.exit_json(changed=state_changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_vol_map.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_vol_map.py
new file mode 100644
index 00000000..f1f5a807
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ibm_sa_vol_map.py
@@ -0,0 +1,136 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2018 IBM CORPORATION
+# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_sa_vol_map
+short_description: Handles volume mapping on IBM Spectrum Accelerate Family storage systems.
+
+description:
+ - "This module maps volumes to or unmaps them from the hosts on
+ IBM Spectrum Accelerate Family storage systems."
+
+options:
+ vol:
+ description:
+ - Volume name.
+ required: true
+ state:
+ default: "present"
+ choices: [ "present", "absent" ]
+ description:
+ - When the state is present the volume is mapped.
+ When the state is absent, the volume is meant to be unmapped.
+
+ cluster:
+ description:
+ - Maps the volume to a cluster.
+ required: false
+ host:
+ description:
+ - Maps the volume to a host.
+ required: false
+ lun:
+ description:
+ - The LUN identifier.
+ required: false
+ override:
+ description:
+ - Overrides the existing volume mapping.
+ required: false
+
+extends_documentation_fragment:
+- community.general.ibm_storage
+
+
+author:
+ - Tzur Eliyahu (@tzure)
+'''
+
+EXAMPLES = '''
+- name: Map volume to host.
+ community.general.ibm_sa_vol_map:
+ vol: volume_name
+ lun: 1
+ host: host_name
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+ state: present
+
+- name: Map volume to cluster.
+ community.general.ibm_sa_vol_map:
+ vol: volume_name
+ lun: 1
+ cluster: cluster_name
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+ state: present
+
+- name: Unmap volume.
+ community.general.ibm_sa_vol_map:
+ host: host_name
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+ state: absent
+'''
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import (execute_pyxcli_command,
+ connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed)
+
+
+def main():
+ argument_spec = spectrum_accelerate_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ vol=dict(required=True),
+ lun=dict(),
+ cluster=dict(),
+ host=dict(),
+ override=dict()
+ )
+ )
+
+ module = AnsibleModule(argument_spec)
+ is_pyxcli_installed(module)
+
+ xcli_client = connect_ssl(module)
+ # required args
+ mapping = False
+ try:
+ mapped_hosts = xcli_client.cmd.vol_mapping_list(
+ vol=module.params.get('vol')).as_list
+ for host in mapped_hosts:
+ if host['host'] == module.params.get("host", ""):
+ mapping = True
+ except Exception:
+ pass
+ state = module.params['state']
+
+ state_changed = False
+ if state == 'present' and not mapping:
+ state_changed = execute_pyxcli_command(module, 'map_vol', xcli_client)
+ if state == 'absent' and mapping:
+ state_changed = execute_pyxcli_command(
+ module, 'unmap_vol', xcli_client)
+
+ module.exit_json(changed=state_changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/icinga2_feature.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/icinga2_feature.py
new file mode 100644
index 00000000..b59c0e11
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/icinga2_feature.py
@@ -0,0 +1,126 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Loic Blot <loic.blot@unix-experience.fr>
+# Copyright (c) 2018, Ansible Project
+# Sponsored by Infopro Digital. http://www.infopro-digital.com/
+# Sponsored by E.T.A.I. http://www.etai.fr/
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: icinga2_feature
+
+short_description: Manage Icinga2 feature
+description:
+ - This module can be used to enable or disable an Icinga2 feature.
+author: "Loic Blot (@nerzhul)"
+options:
+ name:
+ type: str
+ description:
+ - This is the feature name to enable or disable.
+ required: True
+ state:
+ type: str
+ description:
+ - If set to C(present) and feature is disabled, then feature is enabled.
+ - If set to C(present) and feature is already enabled, then nothing is changed.
+ - If set to C(absent) and feature is enabled, then feature is disabled.
+ - If set to C(absent) and feature is already disabled, then nothing is changed.
+ choices: [ "present", "absent" ]
+ default: present
+'''
+
+EXAMPLES = '''
+- name: Enable ido-pgsql feature
+ community.general.icinga2_feature:
+ name: ido-pgsql
+ state: present
+
+- name: Disable api feature
+ community.general.icinga2_feature:
+ name: api
+ state: absent
+'''
+
+RETURN = '''
+#
+'''
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Icinga2FeatureHelper:
+ def __init__(self, module):
+ self.module = module
+ self._icinga2 = module.get_bin_path('icinga2', True)
+ self.feature_name = self.module.params['name']
+ self.state = self.module.params['state']
+
+ def _exec(self, args):
+ cmd = [self._icinga2, 'feature']
+ rc, out, err = self.module.run_command(cmd + args, check_rc=True)
+ return rc, out
+
+ def manage(self):
+ rc, out = self._exec(["list"])
+ if rc != 0:
+ self.module.fail_json(msg="Unable to list icinga2 features. "
+ "Ensure icinga2 is installed and present in binary path.")
+
+ # If feature is already in good state, just exit
+ if (re.search("Disabled features:.* %s[ \n]" % self.feature_name, out) and self.state == "absent") or \
+ (re.search("Enabled features:.* %s[ \n]" % self.feature_name, out) and self.state == "present"):
+ self.module.exit_json(changed=False)
+
+ if self.module.check_mode:
+ self.module.exit_json(changed=True)
+
+ feature_enable_str = "enable" if self.state == "present" else "disable"
+
+ rc, out = self._exec([feature_enable_str, self.feature_name])
+
+ change_applied = False
+ if self.state == "present":
+ if rc != 0:
+ self.module.fail_json(msg="Failed to %s feature %s."
+ " icinga2 command returned %s" % (feature_enable_str,
+ self.feature_name,
+ out))
+
+ if re.search("already enabled", out) is None:
+ change_applied = True
+ else:
+ if rc == 0:
+ change_applied = True
+ # RC is not 0 for this already disabled feature, handle it as no change applied
+ elif re.search("Cannot disable feature '%s'. Target file .* does not exist" % self.feature_name, out):
+ change_applied = False
+ else:
+ self.module.fail_json(msg="Failed to disable feature. Command returns %s" % out)
+
+ self.module.exit_json(changed=change_applied)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', choices=["present", "absent"], default="present")
+ ),
+ supports_check_mode=True
+ )
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+ Icinga2FeatureHelper(module).manage()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/icinga2_host.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/icinga2_host.py
new file mode 100644
index 00000000..65c95812
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/icinga2_host.py
@@ -0,0 +1,331 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# This module is proudly sponsored by CGI (www.cgi.com) and
+# KPN (www.kpn.com).
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: icinga2_host
+short_description: Manage a host in Icinga2
+description:
+ - "Add or remove a host to Icinga2 through the API."
+ - "See U(https://www.icinga.com/docs/icinga2/latest/doc/12-icinga2-api/)"
+author: "Jurgen Brand (@t794104)"
+options:
+ url:
+ type: str
+ description:
+ - HTTP, HTTPS, or FTP URL in the form (http|https|ftp)://[user[:pass]]@host.domain[:port]/path
+ use_proxy:
+ description:
+ - If C(no), it will not use a proxy, even if one is defined in
+ an environment variable on the target hosts.
+ type: bool
+ default: 'yes'
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+ url_username:
+ type: str
+ description:
+ - The username for use in HTTP basic authentication.
+ - This parameter can be used without C(url_password) for sites that allow empty passwords.
+ url_password:
+ type: str
+ description:
+ - The password for use in HTTP basic authentication.
+ - If the C(url_username) parameter is not specified, the C(url_password) parameter will not be used.
+ force_basic_auth:
+ description:
+ - httplib2, the library used by the uri module only sends authentication information when a webservice
+ responds to an initial request with a 401 status. Since some basic auth services do not properly
+ send a 401, logins will fail. This option forces the sending of the Basic authentication header
+ upon initial request.
+ type: bool
+ default: 'no'
+ client_cert:
+ type: path
+ description:
+ - PEM formatted certificate chain file to be used for SSL client
+ authentication. This file can also include the key as well, and if
+ the key is included, C(client_key) is not required.
+ client_key:
+ type: path
+ description:
+ - PEM formatted file that contains your private key to be used for SSL
+ client authentication. If C(client_cert) contains both the certificate
+ and key, this option is not required.
+ state:
+ type: str
+ description:
+ - Apply feature state.
+ choices: [ "present", "absent" ]
+ default: present
+ name:
+ type: str
+ description:
+ - Name used to create / delete the host. This does not need to be the FQDN, but does needs to be unique.
+ required: true
+ zone:
+ type: str
+ description:
+ - The zone from where this host should be polled.
+ template:
+ type: str
+ description:
+ - The template used to define the host.
+ - Template cannot be modified after object creation.
+ check_command:
+ type: str
+ description:
+ - The command used to check if the host is alive.
+ default: "hostalive"
+ display_name:
+ type: str
+ description:
+ - The name used to display the host.
+ - If not specified, it defaults to the value of the I(name) parameter.
+ ip:
+ type: str
+ description:
+ - The IP address of the host.
+ required: true
+ variables:
+ type: dict
+ description:
+ - Dictionary of variables.
+extends_documentation_fragment:
+ - url
+'''
+
+EXAMPLES = '''
+- name: Add host to icinga
+ community.general.icinga2_host:
+ url: "https://icinga2.example.com"
+ url_username: "ansible"
+ url_password: "a_secret"
+ state: present
+ name: "{{ ansible_fqdn }}"
+ ip: "{{ ansible_default_ipv4.address }}"
+ variables:
+ foo: "bar"
+ delegate_to: 127.0.0.1
+'''
+
+RETURN = '''
+name:
+ description: The name used to create, modify or delete the host
+ type: str
+ returned: always
+data:
+ description: The data structure used for create, modify or delete of the host
+ type: dict
+ returned: always
+'''
+
+import json
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url, url_argument_spec
+
+
+# ===========================================
+# Icinga2 API class
+#
+class icinga2_api:
+ module = None
+
+ def __init__(self, module):
+ self.module = module
+
+ def call_url(self, path, data='', method='GET'):
+ headers = {
+ 'Accept': 'application/json',
+ 'X-HTTP-Method-Override': method,
+ }
+ url = self.module.params.get("url") + "/" + path
+ rsp, info = fetch_url(module=self.module, url=url, data=data, headers=headers, method=method, use_proxy=self.module.params['use_proxy'])
+ body = ''
+ if rsp:
+ body = json.loads(rsp.read())
+ if info['status'] >= 400:
+ body = info['body']
+ return {'code': info['status'], 'data': body}
+
+ def check_connection(self):
+ ret = self.call_url('v1/status')
+ if ret['code'] == 200:
+ return True
+ return False
+
+ def exists(self, hostname):
+ data = {
+ "filter": "match(\"" + hostname + "\", host.name)",
+ }
+ ret = self.call_url(
+ path="v1/objects/hosts",
+ data=self.module.jsonify(data)
+ )
+ if ret['code'] == 200:
+ if len(ret['data']['results']) == 1:
+ return True
+ return False
+
+ def create(self, hostname, data):
+ ret = self.call_url(
+ path="v1/objects/hosts/" + hostname,
+ data=self.module.jsonify(data),
+ method="PUT"
+ )
+ return ret
+
+ def delete(self, hostname):
+ data = {"cascade": 1}
+ ret = self.call_url(
+ path="v1/objects/hosts/" + hostname,
+ data=self.module.jsonify(data),
+ method="DELETE"
+ )
+ return ret
+
+ def modify(self, hostname, data):
+ ret = self.call_url(
+ path="v1/objects/hosts/" + hostname,
+ data=self.module.jsonify(data),
+ method="POST"
+ )
+ return ret
+
+ def diff(self, hostname, data):
+ ret = self.call_url(
+ path="v1/objects/hosts/" + hostname,
+ method="GET"
+ )
+ changed = False
+ ic_data = ret['data']['results'][0]
+ for key in data['attrs']:
+ if key not in ic_data['attrs'].keys():
+ changed = True
+ elif data['attrs'][key] != ic_data['attrs'][key]:
+ changed = True
+ return changed
+
+
+# ===========================================
+# Module execution.
+#
+def main():
+ # use the predefined argument spec for url
+ argument_spec = url_argument_spec()
+ # add our own arguments
+ argument_spec.update(
+ state=dict(default="present", choices=["absent", "present"]),
+ name=dict(required=True, aliases=['host']),
+ zone=dict(),
+ template=dict(default=None),
+ check_command=dict(default="hostalive"),
+ display_name=dict(default=None),
+ ip=dict(required=True),
+ variables=dict(type='dict', default=None),
+ )
+
+ # Define the main module
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ state = module.params["state"]
+ name = module.params["name"]
+ zone = module.params["zone"]
+ template = [name]
+ if module.params["template"]:
+ template.append(module.params["template"])
+ check_command = module.params["check_command"]
+ ip = module.params["ip"]
+ display_name = module.params["display_name"]
+ if not display_name:
+ display_name = name
+ variables = module.params["variables"]
+
+ try:
+ icinga = icinga2_api(module=module)
+ icinga.check_connection()
+ except Exception as e:
+ module.fail_json(msg="unable to connect to Icinga. Exception message: %s" % (e))
+
+ data = {
+ 'attrs': {
+ 'address': ip,
+ 'display_name': display_name,
+ 'check_command': check_command,
+ 'zone': zone,
+ 'vars': {
+ 'made_by': "ansible",
+ },
+ 'templates': template,
+ }
+ }
+
+ if variables:
+ data['attrs']['vars'].update(variables)
+
+ changed = False
+ if icinga.exists(name):
+ if state == "absent":
+ if module.check_mode:
+ module.exit_json(changed=True, name=name, data=data)
+ else:
+ try:
+ ret = icinga.delete(name)
+ if ret['code'] == 200:
+ changed = True
+ else:
+ module.fail_json(msg="bad return code (%s) deleting host: '%s'" % (ret['code'], ret['data']))
+ except Exception as e:
+ module.fail_json(msg="exception deleting host: " + str(e))
+
+ elif icinga.diff(name, data):
+ if module.check_mode:
+ module.exit_json(changed=False, name=name, data=data)
+
+ # Template attribute is not allowed in modification
+ del data['attrs']['templates']
+
+ ret = icinga.modify(name, data)
+
+ if ret['code'] == 200:
+ changed = True
+ else:
+ module.fail_json(msg="bad return code (%s) modifying host: '%s'" % (ret['code'], ret['data']))
+
+ else:
+ if state == "present":
+ if module.check_mode:
+ changed = True
+ else:
+ try:
+ ret = icinga.create(name, data)
+ if ret['code'] == 200:
+ changed = True
+ else:
+ module.fail_json(msg="bad return code (%s) creating host: '%s'" % (ret['code'], ret['data']))
+ except Exception as e:
+ module.fail_json(msg="exception creating host: " + str(e))
+
+ module.exit_json(changed=changed, name=name, data=data)
+
+
+# import module snippets
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_config.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_config.py
new file mode 100644
index 00000000..756b6cf9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_config.py
@@ -0,0 +1,137 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Fran Fitzpatrick <francis.x.fitzpatrick@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_config
+author: Fran Fitzpatrick (@fxfitz)
+short_description: Manage Global FreeIPA Configuration Settings
+description:
+- Modify global configuration settings of a FreeIPA Server.
+options:
+ ipadefaultloginshell:
+ description: Default shell for new users.
+ aliases: ["loginshell"]
+ type: str
+ ipadefaultemaildomain:
+ description: Default e-mail domain for new users.
+ aliases: ["emaildomain"]
+ type: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure the default login shell is bash.
+ community.general.ipa_config:
+ ipadefaultloginshell: /bin/bash
+ ipa_host: localhost
+ ipa_user: admin
+ ipa_pass: supersecret
+
+- name: Ensure the default e-mail domain is ansible.com.
+ community.general.ipa_config:
+ ipadefaultemaildomain: ansible.com
+ ipa_host: localhost
+ ipa_user: admin
+ ipa_pass: supersecret
+'''
+
+RETURN = r'''
+config:
+ description: Configuration as returned by IPA API.
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class ConfigIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(ConfigIPAClient, self).__init__(module, host, port, protocol)
+
+ def config_show(self):
+ return self._post_json(method='config_show', name=None)
+
+ def config_mod(self, name, item):
+ return self._post_json(method='config_mod', name=name, item=item)
+
+
+def get_config_dict(ipadefaultloginshell=None, ipadefaultemaildomain=None):
+ config = {}
+ if ipadefaultloginshell is not None:
+ config['ipadefaultloginshell'] = ipadefaultloginshell
+ if ipadefaultemaildomain is not None:
+ config['ipadefaultemaildomain'] = ipadefaultemaildomain
+
+ return config
+
+
+def get_config_diff(client, ipa_config, module_config):
+ return client.get_diff(ipa_data=ipa_config, module_data=module_config)
+
+
+def ensure(module, client):
+ module_config = get_config_dict(
+ ipadefaultloginshell=module.params.get('ipadefaultloginshell'),
+ ipadefaultemaildomain=module.params.get('ipadefaultemaildomain'),
+ )
+ ipa_config = client.config_show()
+ diff = get_config_diff(client, ipa_config, module_config)
+
+ changed = False
+ new_config = {}
+ for module_key in diff:
+ if module_config.get(module_key) != ipa_config.get(module_key, None):
+ changed = True
+ new_config.update({module_key: module_config.get(module_key)})
+
+ if changed and not module.check_mode:
+ client.config_mod(name=None, item=new_config)
+
+ return changed, client.config_show()
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(
+ ipadefaultloginshell=dict(type='str', aliases=['loginshell']),
+ ipadefaultemaildomain=dict(type='str', aliases=['emaildomain']),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ client = ConfigIPAClient(
+ module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot']
+ )
+
+ try:
+ client.login(
+ username=module.params['ipa_user'],
+ password=module.params['ipa_pass']
+ )
+ changed, user = ensure(module, client)
+ module.exit_json(changed=changed, user=user)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_dnsrecord.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_dnsrecord.py
new file mode 100644
index 00000000..635bf2ff
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_dnsrecord.py
@@ -0,0 +1,319 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Abhijeet Kasurde (akasurde@redhat.com)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_dnsrecord
+author: Abhijeet Kasurde (@Akasurde)
+short_description: Manage FreeIPA DNS records
+description:
+- Add, modify and delete an IPA DNS Record using IPA API.
+options:
+ zone_name:
+ description:
+ - The DNS zone name to which DNS record needs to be managed.
+ required: true
+ type: str
+ record_name:
+ description:
+ - The DNS record name to manage.
+ required: true
+ aliases: ["name"]
+ type: str
+ record_type:
+ description:
+ - The type of DNS record name.
+ - Currently, 'A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'PTR', 'TXT', 'SRV' and 'MX' are supported.
+ - "'A6', 'CNAME', 'DNAME' and 'TXT' are added in version 2.5."
+ - "'SRV' and 'MX' are added in version 2.8."
+ required: false
+ default: 'A'
+ choices: ['A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'MX', 'PTR', 'SRV', 'TXT']
+ type: str
+ record_value:
+ description:
+ - Manage DNS record name with this value.
+ - In the case of 'A' or 'AAAA' record types, this will be the IP address.
+ - In the case of 'A6' record type, this will be the A6 Record data.
+ - In the case of 'CNAME' record type, this will be the hostname.
+ - In the case of 'DNAME' record type, this will be the DNAME target.
+ - In the case of 'PTR' record type, this will be the hostname.
+ - In the case of 'TXT' record type, this will be a text.
+ - In the case of 'SRV' record type, this will be a service record.
+ - In the case of 'MX' record type, this will be a mail exchanger record.
+ required: true
+ type: str
+ record_ttl:
+ description:
+ - Set the TTL for the record.
+ - Applies only when adding a new or changing the value of record_value.
+ required: false
+ type: int
+ state:
+ description: State to ensure
+ required: false
+ default: present
+ choices: ["absent", "present"]
+ type: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure dns record is present
+ community.general.ipa_dnsrecord:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: present
+ zone_name: example.com
+ record_name: vm-001
+ record_type: 'AAAA'
+ record_value: '::1'
+
+- name: Ensure that dns record exists with a TTL
+ community.general.ipa_dnsrecord:
+ name: host02
+ zone_name: example.com
+ record_type: 'AAAA'
+ record_value: '::1'
+ record_ttl: 300
+ ipa_host: ipa.example.com
+ ipa_pass: topsecret
+ state: present
+
+- name: Ensure a PTR record is present
+ community.general.ipa_dnsrecord:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: present
+ zone_name: 2.168.192.in-addr.arpa
+ record_name: 5
+ record_type: 'PTR'
+ record_value: 'internal.ipa.example.com'
+
+- name: Ensure a TXT record is present
+ community.general.ipa_dnsrecord:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: present
+ zone_name: example.com
+ record_name: _kerberos
+ record_type: 'TXT'
+ record_value: 'EXAMPLE.COM'
+
+- name: Ensure an SRV record is present
+ community.general.ipa_dnsrecord:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: present
+ zone_name: example.com
+ record_name: _kerberos._udp.example.com
+ record_type: 'SRV'
+ record_value: '10 50 88 ipa.example.com'
+
+- name: Ensure an MX record is present
+ community.general.ipa_dnsrecord:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: present
+ zone_name: example.com
+ record_name: '@'
+ record_type: 'MX'
+ record_value: '1 mailserver.example.com'
+
+- name: Ensure that dns record is removed
+ community.general.ipa_dnsrecord:
+ name: host01
+ zone_name: example.com
+ record_type: 'AAAA'
+ record_value: '::1'
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+ state: absent
+'''
+
+RETURN = r'''
+dnsrecord:
+ description: DNS record as returned by IPA API.
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class DNSRecordIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(DNSRecordIPAClient, self).__init__(module, host, port, protocol)
+
+ def dnsrecord_find(self, zone_name, record_name):
+ if record_name == '@':
+ return self._post_json(method='dnsrecord_show', name=zone_name, item={'idnsname': record_name, 'all': True})
+ else:
+ return self._post_json(method='dnsrecord_find', name=zone_name, item={'idnsname': record_name, 'all': True})
+
+ def dnsrecord_add(self, zone_name=None, record_name=None, details=None):
+ item = dict(idnsname=record_name)
+ if details['record_type'] == 'A':
+ item.update(a_part_ip_address=details['record_value'])
+ elif details['record_type'] == 'AAAA':
+ item.update(aaaa_part_ip_address=details['record_value'])
+ elif details['record_type'] == 'A6':
+ item.update(a6_part_data=details['record_value'])
+ elif details['record_type'] == 'CNAME':
+ item.update(cname_part_hostname=details['record_value'])
+ elif details['record_type'] == 'DNAME':
+ item.update(dname_part_target=details['record_value'])
+ elif details['record_type'] == 'PTR':
+ item.update(ptr_part_hostname=details['record_value'])
+ elif details['record_type'] == 'TXT':
+ item.update(txtrecord=details['record_value'])
+ elif details['record_type'] == 'SRV':
+ item.update(srvrecord=details['record_value'])
+ elif details['record_type'] == 'MX':
+ item.update(mxrecord=details['record_value'])
+
+ if details.get('record_ttl'):
+ item.update(dnsttl=details['record_ttl'])
+
+ return self._post_json(method='dnsrecord_add', name=zone_name, item=item)
+
+ def dnsrecord_mod(self, zone_name=None, record_name=None, details=None):
+ item = get_dnsrecord_dict(details)
+ item.update(idnsname=record_name)
+ if details.get('record_ttl'):
+ item.update(dnsttl=details['record_ttl'])
+ return self._post_json(method='dnsrecord_mod', name=zone_name, item=item)
+
+ def dnsrecord_del(self, zone_name=None, record_name=None, details=None):
+ item = get_dnsrecord_dict(details)
+ item.update(idnsname=record_name)
+ return self._post_json(method='dnsrecord_del', name=zone_name, item=item)
+
+
+def get_dnsrecord_dict(details=None):
+ module_dnsrecord = dict()
+ if details['record_type'] == 'A' and details['record_value']:
+ module_dnsrecord.update(arecord=details['record_value'])
+ elif details['record_type'] == 'AAAA' and details['record_value']:
+ module_dnsrecord.update(aaaarecord=details['record_value'])
+ elif details['record_type'] == 'A6' and details['record_value']:
+ module_dnsrecord.update(a6record=details['record_value'])
+ elif details['record_type'] == 'CNAME' and details['record_value']:
+ module_dnsrecord.update(cnamerecord=details['record_value'])
+ elif details['record_type'] == 'DNAME' and details['record_value']:
+ module_dnsrecord.update(dnamerecord=details['record_value'])
+ elif details['record_type'] == 'PTR' and details['record_value']:
+ module_dnsrecord.update(ptrrecord=details['record_value'])
+ elif details['record_type'] == 'TXT' and details['record_value']:
+ module_dnsrecord.update(txtrecord=details['record_value'])
+ elif details['record_type'] == 'SRV' and details['record_value']:
+ module_dnsrecord.update(srvrecord=details['record_value'])
+ elif details['record_type'] == 'MX' and details['record_value']:
+ module_dnsrecord.update(mxrecord=details['record_value'])
+
+ if details.get('record_ttl'):
+ module_dnsrecord.update(dnsttl=details['record_ttl'])
+
+ return module_dnsrecord
+
+
+def get_dnsrecord_diff(client, ipa_dnsrecord, module_dnsrecord):
+ details = get_dnsrecord_dict(module_dnsrecord)
+ return client.get_diff(ipa_data=ipa_dnsrecord, module_data=details)
+
+
+def ensure(module, client):
+ zone_name = module.params['zone_name']
+ record_name = module.params['record_name']
+ record_ttl = module.params.get('record_ttl')
+ state = module.params['state']
+
+ ipa_dnsrecord = client.dnsrecord_find(zone_name, record_name)
+
+ module_dnsrecord = dict(
+ record_type=module.params['record_type'],
+ record_value=module.params['record_value'],
+ record_ttl=to_native(record_ttl, nonstring='passthru'),
+ )
+
+ # ttl is not required to change records
+ if module_dnsrecord['record_ttl'] is None:
+ module_dnsrecord.pop('record_ttl')
+
+ changed = False
+ if state == 'present':
+ if not ipa_dnsrecord:
+ changed = True
+ if not module.check_mode:
+ client.dnsrecord_add(zone_name=zone_name,
+ record_name=record_name,
+ details=module_dnsrecord)
+ else:
+ diff = get_dnsrecord_diff(client, ipa_dnsrecord, module_dnsrecord)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ client.dnsrecord_mod(zone_name=zone_name,
+ record_name=record_name,
+ details=module_dnsrecord)
+ else:
+ if ipa_dnsrecord:
+ changed = True
+ if not module.check_mode:
+ client.dnsrecord_del(zone_name=zone_name,
+ record_name=record_name,
+ details=module_dnsrecord)
+
+ return changed, client.dnsrecord_find(zone_name, record_name)
+
+
+def main():
+ record_types = ['A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'PTR', 'TXT', 'SRV', 'MX']
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(
+ zone_name=dict(type='str', required=True),
+ record_name=dict(type='str', aliases=['name'], required=True),
+ record_type=dict(type='str', default='A', choices=record_types),
+ record_value=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ record_ttl=dict(type='int', required=False),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ client = DNSRecordIPAClient(
+ module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot']
+ )
+
+ try:
+ client.login(
+ username=module.params['ipa_user'],
+ password=module.params['ipa_pass']
+ )
+ changed, record = ensure(module, client)
+ module.exit_json(changed=changed, record=record)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_dnszone.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_dnszone.py
new file mode 100644
index 00000000..1536866c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_dnszone.py
@@ -0,0 +1,162 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Fran Fitzpatrick (francis.x.fitzpatrick@gmail.com)
+# Borrowed heavily from other work by Abhijeet Kasurde (akasurde@redhat.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_dnszone
+author: Fran Fitzpatrick (@fxfitz)
+short_description: Manage FreeIPA DNS Zones
+description:
+- Add and delete an IPA DNS Zones using IPA API
+options:
+ zone_name:
+ description:
+ - The DNS zone name to which needs to be managed.
+ required: true
+ type: str
+ state:
+ description: State to ensure
+ required: false
+ default: present
+ choices: ["absent", "present"]
+ type: str
+ dynamicupdate:
+ description: Apply dynamic update to zone
+ required: false
+ default: "false"
+ choices: ["false", "true"]
+ type: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure dns zone is present
+ community.general.ipa_dnszone:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: present
+ zone_name: example.com
+
+- name: Ensure dns zone is present and is dynamic update
+ community.general.ipa_dnszone:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: present
+ zone_name: example.com
+ dynamicupdate: true
+
+- name: Ensure that dns zone is removed
+ community.general.ipa_dnszone:
+ zone_name: example.com
+ ipa_host: localhost
+ ipa_user: admin
+ ipa_pass: topsecret
+ state: absent
+'''
+
+RETURN = r'''
+zone:
+ description: DNS zone as returned by IPA API.
+ returned: always
+ type: dict
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class DNSZoneIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(DNSZoneIPAClient, self).__init__(module, host, port, protocol)
+
+ def dnszone_find(self, zone_name, details=None):
+ itens = {'idnsname': zone_name}
+ if details is not None:
+ itens.update(details)
+
+ return self._post_json(
+ method='dnszone_find',
+ name=zone_name,
+ item=itens
+ )
+
+ def dnszone_add(self, zone_name=None, details=None):
+ itens = {}
+ if details is not None:
+ itens.update(details)
+
+ return self._post_json(
+ method='dnszone_add',
+ name=zone_name,
+ item=itens
+ )
+
+ def dnszone_del(self, zone_name=None, record_name=None, details=None):
+ return self._post_json(
+ method='dnszone_del', name=zone_name, item={})
+
+
+def ensure(module, client):
+ zone_name = module.params['zone_name']
+ state = module.params['state']
+ dynamicupdate = module.params['dynamicupdate']
+
+ ipa_dnszone = client.dnszone_find(zone_name)
+
+ changed = False
+ if state == 'present':
+ if not ipa_dnszone:
+ changed = True
+ if not module.check_mode:
+ client.dnszone_add(zone_name=zone_name, details={'idnsallowdynupdate': dynamicupdate})
+ else:
+ changed = False
+ else:
+ if ipa_dnszone:
+ changed = True
+ if not module.check_mode:
+ client.dnszone_del(zone_name=zone_name)
+
+ return changed, client.dnszone_find(zone_name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(zone_name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ dynamicupdate=dict(type='str', required=False, default='false', choices=['true', 'false']),
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ client = DNSZoneIPAClient(
+ module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot']
+ )
+
+ try:
+ client.login(
+ username=module.params['ipa_user'],
+ password=module.params['ipa_pass']
+ )
+ changed, zone = ensure(module, client)
+ module.exit_json(changed=changed, zone=zone)
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_group.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_group.py
new file mode 100644
index 00000000..84ff443a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_group.py
@@ -0,0 +1,259 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_group
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA group
+description:
+- Add, modify and delete group within IPA server
+options:
+ cn:
+ description:
+ - Canonical name.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ['name']
+ type: str
+ description:
+ description:
+ - Description of the group.
+ type: str
+ external:
+ description:
+ - Allow adding external non-IPA members from trusted domains.
+ type: bool
+ gidnumber:
+ description:
+ - GID (use this option to set it manually).
+ aliases: ['gid']
+ type: str
+ group:
+ description:
+ - List of group names assigned to this group.
+ - If an empty list is passed all groups will be removed from this group.
+ - If option is omitted assigned groups will not be checked or changed.
+ - Groups that are already assigned but not passed will be removed.
+ type: list
+ elements: str
+ nonposix:
+ description:
+ - Create as a non-POSIX group.
+ type: bool
+ user:
+ description:
+ - List of user names assigned to this group.
+ - If an empty list is passed all users will be removed from this group.
+ - If option is omitted assigned users will not be checked or changed.
+ - Users that are already assigned but not passed will be removed.
+ type: list
+ elements: str
+ state:
+ description:
+ - State to ensure
+ default: "present"
+ choices: ["absent", "present"]
+ type: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure group is present
+ community.general.ipa_group:
+ name: oinstall
+ gidnumber: '54321'
+ state: present
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure that groups sysops and appops are assigned to ops but no other group
+ community.general.ipa_group:
+ name: ops
+ group:
+ - sysops
+ - appops
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure that users linus and larry are assign to the group, but no other user
+ community.general.ipa_group:
+ name: sysops
+ user:
+ - linus
+ - larry
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure group is absent
+ community.general.ipa_group:
+ name: sysops
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+group:
+ description: Group as returned by IPA API
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class GroupIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(GroupIPAClient, self).__init__(module, host, port, protocol)
+
+ def group_find(self, name):
+ return self._post_json(method='group_find', name=None, item={'all': True, 'cn': name})
+
+ def group_add(self, name, item):
+ return self._post_json(method='group_add', name=name, item=item)
+
+ def group_mod(self, name, item):
+ return self._post_json(method='group_mod', name=name, item=item)
+
+ def group_del(self, name):
+ return self._post_json(method='group_del', name=name)
+
+ def group_add_member(self, name, item):
+ return self._post_json(method='group_add_member', name=name, item=item)
+
+ def group_add_member_group(self, name, item):
+ return self.group_add_member(name=name, item={'group': item})
+
+ def group_add_member_user(self, name, item):
+ return self.group_add_member(name=name, item={'user': item})
+
+ def group_remove_member(self, name, item):
+ return self._post_json(method='group_remove_member', name=name, item=item)
+
+ def group_remove_member_group(self, name, item):
+ return self.group_remove_member(name=name, item={'group': item})
+
+ def group_remove_member_user(self, name, item):
+ return self.group_remove_member(name=name, item={'user': item})
+
+
+def get_group_dict(description=None, external=None, gid=None, nonposix=None):
+ group = {}
+ if description is not None:
+ group['description'] = description
+ if external is not None:
+ group['external'] = external
+ if gid is not None:
+ group['gidnumber'] = gid
+ if nonposix is not None:
+ group['nonposix'] = nonposix
+ return group
+
+
+def get_group_diff(client, ipa_group, module_group):
+ data = []
+ # With group_add attribute nonposix is passed, whereas with group_mod only posix can be passed.
+ if 'nonposix' in module_group:
+ # Only non-posix groups can be changed to posix
+ if not module_group['nonposix'] and ipa_group.get('nonposix'):
+ module_group['posix'] = True
+ del module_group['nonposix']
+
+ if 'external' in module_group:
+ if module_group['external'] and 'ipaexternalgroup' in ipa_group.get('objectclass'):
+ del module_group['external']
+
+ return client.get_diff(ipa_data=ipa_group, module_data=module_group)
+
+
+def ensure(module, client):
+ state = module.params['state']
+ name = module.params['cn']
+ group = module.params['group']
+ user = module.params['user']
+
+ module_group = get_group_dict(description=module.params['description'], external=module.params['external'],
+ gid=module.params['gidnumber'], nonposix=module.params['nonposix'])
+ ipa_group = client.group_find(name=name)
+
+ changed = False
+ if state == 'present':
+ if not ipa_group:
+ changed = True
+ if not module.check_mode:
+ ipa_group = client.group_add(name, item=module_group)
+ else:
+ diff = get_group_diff(client, ipa_group, module_group)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_group.get(key)
+ client.group_mod(name=name, item=data)
+
+ if group is not None:
+ changed = client.modify_if_diff(name, ipa_group.get('member_group', []), group,
+ client.group_add_member_group,
+ client.group_remove_member_group) or changed
+
+ if user is not None:
+ changed = client.modify_if_diff(name, ipa_group.get('member_user', []), user,
+ client.group_add_member_user,
+ client.group_remove_member_user) or changed
+
+ else:
+ if ipa_group:
+ changed = True
+ if not module.check_mode:
+ client.group_del(name)
+
+ return changed, client.group_find(name=name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(cn=dict(type='str', required=True, aliases=['name']),
+ description=dict(type='str'),
+ external=dict(type='bool'),
+ gidnumber=dict(type='str', aliases=['gid']),
+ group=dict(type='list', elements='str'),
+ nonposix=dict(type='bool'),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ user=dict(type='list', elements='str'))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ client = GroupIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, group = ensure(module, client)
+ module.exit_json(changed=changed, group=group)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_hbacrule.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_hbacrule.py
new file mode 100644
index 00000000..cb49fd53
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_hbacrule.py
@@ -0,0 +1,355 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_hbacrule
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA HBAC rule
+description:
+- Add, modify or delete an IPA HBAC rule using IPA API.
+options:
+ cn:
+ description:
+ - Canonical name.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ["name"]
+ type: str
+ description:
+ description: Description
+ type: str
+ host:
+ description:
+ - List of host names to assign.
+ - If an empty list is passed all hosts will be removed from the rule.
+ - If option is omitted hosts will not be checked or changed.
+ required: false
+ type: list
+ elements: str
+ hostcategory:
+ description: Host category
+ choices: ['all']
+ type: str
+ hostgroup:
+ description:
+ - List of hostgroup names to assign.
+ - If an empty list is passed all hostgroups will be removed. from the rule
+ - If option is omitted hostgroups will not be checked or changed.
+ type: list
+ elements: str
+ service:
+ description:
+ - List of service names to assign.
+ - If an empty list is passed all services will be removed from the rule.
+ - If option is omitted services will not be checked or changed.
+ type: list
+ elements: str
+ servicecategory:
+ description: Service category
+ choices: ['all']
+ type: str
+ servicegroup:
+ description:
+ - List of service group names to assign.
+ - If an empty list is passed all assigned service groups will be removed from the rule.
+ - If option is omitted service groups will not be checked or changed.
+ type: list
+ elements: str
+ sourcehost:
+ description:
+ - List of source host names to assign.
+ - If an empty list if passed all assigned source hosts will be removed from the rule.
+ - If option is omitted source hosts will not be checked or changed.
+ type: list
+ elements: str
+ sourcehostcategory:
+ description: Source host category
+ choices: ['all']
+ type: str
+ sourcehostgroup:
+ description:
+ - List of source host group names to assign.
+ - If an empty list if passed all assigned source host groups will be removed from the rule.
+ - If option is omitted source host groups will not be checked or changed.
+ type: list
+ elements: str
+ state:
+ description: State to ensure
+ default: "present"
+ choices: ["absent", "disabled", "enabled","present"]
+ type: str
+ user:
+ description:
+ - List of user names to assign.
+ - If an empty list if passed all assigned users will be removed from the rule.
+ - If option is omitted users will not be checked or changed.
+ type: list
+ elements: str
+ usercategory:
+ description: User category
+ choices: ['all']
+ type: str
+ usergroup:
+ description:
+ - List of user group names to assign.
+ - If an empty list if passed all assigned user groups will be removed from the rule.
+ - If option is omitted user groups will not be checked or changed.
+ type: list
+ elements: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure rule to allow all users to access any host from any host
+ community.general.ipa_hbacrule:
+ name: allow_all
+ description: Allow all users to access any host from any host
+ hostcategory: all
+ servicecategory: all
+ usercategory: all
+ state: present
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure rule with certain limitations
+ community.general.ipa_hbacrule:
+ name: allow_all_developers_access_to_db
+ description: Allow all developers to access any database from any host
+ hostgroup:
+ - db-server
+ usergroup:
+ - developers
+ state: present
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure rule is absent
+ community.general.ipa_hbacrule:
+ name: rule_to_be_deleted
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+hbacrule:
+ description: HBAC rule as returned by IPA API.
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class HBACRuleIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(HBACRuleIPAClient, self).__init__(module, host, port, protocol)
+
+ def hbacrule_find(self, name):
+ return self._post_json(method='hbacrule_find', name=None, item={'all': True, 'cn': name})
+
+ def hbacrule_add(self, name, item):
+ return self._post_json(method='hbacrule_add', name=name, item=item)
+
+ def hbacrule_mod(self, name, item):
+ return self._post_json(method='hbacrule_mod', name=name, item=item)
+
+ def hbacrule_del(self, name):
+ return self._post_json(method='hbacrule_del', name=name)
+
+ def hbacrule_add_host(self, name, item):
+ return self._post_json(method='hbacrule_add_host', name=name, item=item)
+
+ def hbacrule_remove_host(self, name, item):
+ return self._post_json(method='hbacrule_remove_host', name=name, item=item)
+
+ def hbacrule_add_service(self, name, item):
+ return self._post_json(method='hbacrule_add_service', name=name, item=item)
+
+ def hbacrule_remove_service(self, name, item):
+ return self._post_json(method='hbacrule_remove_service', name=name, item=item)
+
+ def hbacrule_add_user(self, name, item):
+ return self._post_json(method='hbacrule_add_user', name=name, item=item)
+
+ def hbacrule_remove_user(self, name, item):
+ return self._post_json(method='hbacrule_remove_user', name=name, item=item)
+
+ def hbacrule_add_sourcehost(self, name, item):
+ return self._post_json(method='hbacrule_add_sourcehost', name=name, item=item)
+
+ def hbacrule_remove_sourcehost(self, name, item):
+ return self._post_json(method='hbacrule_remove_sourcehost', name=name, item=item)
+
+
+def get_hbacrule_dict(description=None, hostcategory=None, ipaenabledflag=None, servicecategory=None,
+ sourcehostcategory=None,
+ usercategory=None):
+ data = {}
+ if description is not None:
+ data['description'] = description
+ if hostcategory is not None:
+ data['hostcategory'] = hostcategory
+ if ipaenabledflag is not None:
+ data['ipaenabledflag'] = ipaenabledflag
+ if servicecategory is not None:
+ data['servicecategory'] = servicecategory
+ if sourcehostcategory is not None:
+ data['sourcehostcategory'] = sourcehostcategory
+ if usercategory is not None:
+ data['usercategory'] = usercategory
+ return data
+
+
+def get_hbcarule_diff(client, ipa_hbcarule, module_hbcarule):
+ return client.get_diff(ipa_data=ipa_hbcarule, module_data=module_hbcarule)
+
+
+def ensure(module, client):
+ name = module.params['cn']
+ state = module.params['state']
+
+ if state in ['present', 'enabled']:
+ ipaenabledflag = 'TRUE'
+ else:
+ ipaenabledflag = 'FALSE'
+
+ host = module.params['host']
+ hostcategory = module.params['hostcategory']
+ hostgroup = module.params['hostgroup']
+ service = module.params['service']
+ servicecategory = module.params['servicecategory']
+ servicegroup = module.params['servicegroup']
+ sourcehost = module.params['sourcehost']
+ sourcehostcategory = module.params['sourcehostcategory']
+ sourcehostgroup = module.params['sourcehostgroup']
+ user = module.params['user']
+ usercategory = module.params['usercategory']
+ usergroup = module.params['usergroup']
+
+ module_hbacrule = get_hbacrule_dict(description=module.params['description'],
+ hostcategory=hostcategory,
+ ipaenabledflag=ipaenabledflag,
+ servicecategory=servicecategory,
+ sourcehostcategory=sourcehostcategory,
+ usercategory=usercategory)
+ ipa_hbacrule = client.hbacrule_find(name=name)
+
+ changed = False
+ if state in ['present', 'enabled', 'disabled']:
+ if not ipa_hbacrule:
+ changed = True
+ if not module.check_mode:
+ ipa_hbacrule = client.hbacrule_add(name=name, item=module_hbacrule)
+ else:
+ diff = get_hbcarule_diff(client, ipa_hbacrule, module_hbacrule)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_hbacrule.get(key)
+ client.hbacrule_mod(name=name, item=data)
+
+ if host is not None:
+ changed = client.modify_if_diff(name, ipa_hbacrule.get('memberhost_host', []), host,
+ client.hbacrule_add_host,
+ client.hbacrule_remove_host, 'host') or changed
+
+ if hostgroup is not None:
+ changed = client.modify_if_diff(name, ipa_hbacrule.get('memberhost_hostgroup', []), hostgroup,
+ client.hbacrule_add_host,
+ client.hbacrule_remove_host, 'hostgroup') or changed
+
+ if service is not None:
+ changed = client.modify_if_diff(name, ipa_hbacrule.get('memberservice_hbacsvc', []), service,
+ client.hbacrule_add_service,
+ client.hbacrule_remove_service, 'hbacsvc') or changed
+
+ if servicegroup is not None:
+ changed = client.modify_if_diff(name, ipa_hbacrule.get('memberservice_hbacsvcgroup', []),
+ servicegroup,
+ client.hbacrule_add_service,
+ client.hbacrule_remove_service, 'hbacsvcgroup') or changed
+
+ if sourcehost is not None:
+ changed = client.modify_if_diff(name, ipa_hbacrule.get('sourcehost_host', []), sourcehost,
+ client.hbacrule_add_sourcehost,
+ client.hbacrule_remove_sourcehost, 'host') or changed
+
+ if sourcehostgroup is not None:
+ changed = client.modify_if_diff(name, ipa_hbacrule.get('sourcehost_group', []), sourcehostgroup,
+ client.hbacrule_add_sourcehost,
+ client.hbacrule_remove_sourcehost, 'hostgroup') or changed
+
+ if user is not None:
+ changed = client.modify_if_diff(name, ipa_hbacrule.get('memberuser_user', []), user,
+ client.hbacrule_add_user,
+ client.hbacrule_remove_user, 'user') or changed
+
+ if usergroup is not None:
+ changed = client.modify_if_diff(name, ipa_hbacrule.get('memberuser_group', []), usergroup,
+ client.hbacrule_add_user,
+ client.hbacrule_remove_user, 'group') or changed
+ else:
+ if ipa_hbacrule:
+ changed = True
+ if not module.check_mode:
+ client.hbacrule_del(name=name)
+
+ return changed, client.hbacrule_find(name=name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(cn=dict(type='str', required=True, aliases=['name']),
+ description=dict(type='str'),
+ host=dict(type='list', elements='str'),
+ hostcategory=dict(type='str', choices=['all']),
+ hostgroup=dict(type='list', elements='str'),
+ service=dict(type='list', elements='str'),
+ servicecategory=dict(type='str', choices=['all']),
+ servicegroup=dict(type='list', elements='str'),
+ sourcehost=dict(type='list', elements='str'),
+ sourcehostcategory=dict(type='str', choices=['all']),
+ sourcehostgroup=dict(type='list', elements='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']),
+ user=dict(type='list', elements='str'),
+ usercategory=dict(type='str', choices=['all']),
+ usergroup=dict(type='list', elements='str'))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ client = HBACRuleIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, hbacrule = ensure(module, client)
+ module.exit_json(changed=changed, hbacrule=hbacrule)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_host.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_host.py
new file mode 100644
index 00000000..80892c01
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_host.py
@@ -0,0 +1,305 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_host
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA host
+description:
+- Add, modify and delete an IPA host using IPA API.
+options:
+ fqdn:
+ description:
+ - Full qualified domain name.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ["name"]
+ type: str
+ description:
+ description:
+ - A description of this host.
+ type: str
+ force:
+ description:
+ - Force host name even if not in DNS.
+ required: false
+ type: bool
+ ip_address:
+ description:
+ - Add the host to DNS with this IP address.
+ type: str
+ mac_address:
+ description:
+ - List of Hardware MAC address(es) off this host.
+ - If option is omitted MAC addresses will not be checked or changed.
+ - If an empty list is passed all assigned MAC addresses will be removed.
+ - MAC addresses that are already assigned but not passed will be removed.
+ aliases: ["macaddress"]
+ type: list
+ elements: str
+ ns_host_location:
+ description:
+ - Host location (e.g. "Lab 2")
+ aliases: ["nshostlocation"]
+ type: str
+ ns_hardware_platform:
+ description:
+ - Host hardware platform (e.g. "Lenovo T61")
+ aliases: ["nshardwareplatform"]
+ type: str
+ ns_os_version:
+ description:
+ - Host operating system and version (e.g. "Fedora 9")
+ aliases: ["nsosversion"]
+ type: str
+ user_certificate:
+ description:
+ - List of Base-64 encoded server certificates.
+ - If option is omitted certificates will not be checked or changed.
+ - If an empty list is passed all assigned certificates will be removed.
+ - Certificates already assigned but not passed will be removed.
+ aliases: ["usercertificate"]
+ type: list
+ elements: str
+ state:
+ description: State to ensure.
+ default: present
+ choices: ["absent", "disabled", "enabled", "present"]
+ type: str
+ update_dns:
+ description:
+ - If set C("True") with state as C("absent"), then removes DNS records of the host managed by FreeIPA DNS.
+ - This option has no effect for states other than "absent".
+ type: bool
+ random_password:
+ description: Generate a random password to be used in bulk enrollment.
+ type: bool
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure host is present
+ community.general.ipa_host:
+ name: host01.example.com
+ description: Example host
+ ip_address: 192.168.0.123
+ ns_host_location: Lab
+ ns_os_version: CentOS 7
+ ns_hardware_platform: Lenovo T61
+ mac_address:
+ - "08:00:27:E3:B1:2D"
+ - "52:54:00:BD:97:1E"
+ state: present
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Generate a random password for bulk enrolment
+ community.general.ipa_host:
+ name: host01.example.com
+ description: Example host
+ ip_address: 192.168.0.123
+ state: present
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+ validate_certs: False
+ random_password: True
+
+- name: Ensure host is disabled
+ community.general.ipa_host:
+ name: host01.example.com
+ state: disabled
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure that all user certificates are removed
+ community.general.ipa_host:
+ name: host01.example.com
+ user_certificate: []
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure host is absent
+ community.general.ipa_host:
+ name: host01.example.com
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure host and its DNS record is absent
+ community.general.ipa_host:
+ name: host01.example.com
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+ update_dns: True
+'''
+
+RETURN = r'''
+host:
+ description: Host as returned by IPA API.
+ returned: always
+ type: dict
+host_diff:
+ description: List of options that differ and would be changed
+ returned: if check mode and a difference is found
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class HostIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(HostIPAClient, self).__init__(module, host, port, protocol)
+
+ def host_show(self, name):
+ return self._post_json(method='host_show', name=name)
+
+ def host_find(self, name):
+ return self._post_json(method='host_find', name=None, item={'all': True, 'fqdn': name})
+
+ def host_add(self, name, host):
+ return self._post_json(method='host_add', name=name, item=host)
+
+ def host_mod(self, name, host):
+ return self._post_json(method='host_mod', name=name, item=host)
+
+ def host_del(self, name, update_dns):
+ return self._post_json(method='host_del', name=name, item={'updatedns': update_dns})
+
+ def host_disable(self, name):
+ return self._post_json(method='host_disable', name=name)
+
+
+def get_host_dict(description=None, force=None, ip_address=None, ns_host_location=None, ns_hardware_platform=None,
+ ns_os_version=None, user_certificate=None, mac_address=None, random_password=None):
+ data = {}
+ if description is not None:
+ data['description'] = description
+ if force is not None:
+ data['force'] = force
+ if ip_address is not None:
+ data['ip_address'] = ip_address
+ if ns_host_location is not None:
+ data['nshostlocation'] = ns_host_location
+ if ns_hardware_platform is not None:
+ data['nshardwareplatform'] = ns_hardware_platform
+ if ns_os_version is not None:
+ data['nsosversion'] = ns_os_version
+ if user_certificate is not None:
+ data['usercertificate'] = [{"__base64__": item} for item in user_certificate]
+ if mac_address is not None:
+ data['macaddress'] = mac_address
+ if random_password is not None:
+ data['random'] = random_password
+ return data
+
+
+def get_host_diff(client, ipa_host, module_host):
+ non_updateable_keys = ['force', 'ip_address']
+ if not module_host.get('random'):
+ non_updateable_keys.append('random')
+ for key in non_updateable_keys:
+ if key in module_host:
+ del module_host[key]
+
+ return client.get_diff(ipa_data=ipa_host, module_data=module_host)
+
+
+def ensure(module, client):
+ name = module.params['fqdn']
+ state = module.params['state']
+
+ ipa_host = client.host_find(name=name)
+ module_host = get_host_dict(description=module.params['description'],
+ force=module.params['force'], ip_address=module.params['ip_address'],
+ ns_host_location=module.params['ns_host_location'],
+ ns_hardware_platform=module.params['ns_hardware_platform'],
+ ns_os_version=module.params['ns_os_version'],
+ user_certificate=module.params['user_certificate'],
+ mac_address=module.params['mac_address'],
+ random_password=module.params.get('random_password'),
+ )
+ changed = False
+ if state in ['present', 'enabled', 'disabled']:
+ if not ipa_host:
+ changed = True
+ if not module.check_mode:
+ # OTP password generated by FreeIPA is visible only for host_add command
+ # so, return directly from here.
+ return changed, client.host_add(name=name, host=module_host)
+ else:
+ diff = get_host_diff(client, ipa_host, module_host)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_host.get(key)
+ ipa_host_show = client.host_show(name=name)
+ if ipa_host_show.get('has_keytab', False) and module.params.get('random_password'):
+ client.host_disable(name=name)
+ return changed, client.host_mod(name=name, host=data)
+
+ else:
+ if ipa_host:
+ changed = True
+ update_dns = module.params.get('update_dns', False)
+ if not module.check_mode:
+ client.host_del(name=name, update_dns=update_dns)
+
+ return changed, client.host_find(name=name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(description=dict(type='str'),
+ fqdn=dict(type='str', required=True, aliases=['name']),
+ force=dict(type='bool'),
+ ip_address=dict(type='str'),
+ ns_host_location=dict(type='str', aliases=['nshostlocation']),
+ ns_hardware_platform=dict(type='str', aliases=['nshardwareplatform']),
+ ns_os_version=dict(type='str', aliases=['nsosversion']),
+ user_certificate=dict(type='list', aliases=['usercertificate'], elements='str'),
+ mac_address=dict(type='list', aliases=['macaddress'], elements='str'),
+ update_dns=dict(type='bool'),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']),
+ random_password=dict(type='bool', no_log=False),)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ client = HostIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, host = ensure(module, client)
+ module.exit_json(changed=changed, host=host)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_hostgroup.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_hostgroup.py
new file mode 100644
index 00000000..ae1f1a6b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_hostgroup.py
@@ -0,0 +1,208 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_hostgroup
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA host-group
+description:
+- Add, modify and delete an IPA host-group using IPA API.
+options:
+ cn:
+ description:
+ - Name of host-group.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ["name"]
+ type: str
+ description:
+ description:
+ - Description.
+ type: str
+ host:
+ description:
+ - List of hosts that belong to the host-group.
+ - If an empty list is passed all hosts will be removed from the group.
+ - If option is omitted hosts will not be checked or changed.
+ - If option is passed all assigned hosts that are not passed will be unassigned from the group.
+ type: list
+ elements: str
+ hostgroup:
+ description:
+ - List of host-groups than belong to that host-group.
+ - If an empty list is passed all host-groups will be removed from the group.
+ - If option is omitted host-groups will not be checked or changed.
+ - If option is passed all assigned hostgroups that are not passed will be unassigned from the group.
+ type: list
+ elements: str
+ state:
+ description:
+ - State to ensure.
+ default: "present"
+ choices: ["absent", "disabled", "enabled", "present"]
+ type: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure host-group databases is present
+ community.general.ipa_hostgroup:
+ name: databases
+ state: present
+ host:
+ - db.example.com
+ hostgroup:
+ - mysql-server
+ - oracle-server
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure host-group databases is absent
+ community.general.ipa_hostgroup:
+ name: databases
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+hostgroup:
+ description: Hostgroup as returned by IPA API.
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class HostGroupIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(HostGroupIPAClient, self).__init__(module, host, port, protocol)
+
+ def hostgroup_find(self, name):
+ return self._post_json(method='hostgroup_find', name=None, item={'all': True, 'cn': name})
+
+ def hostgroup_add(self, name, item):
+ return self._post_json(method='hostgroup_add', name=name, item=item)
+
+ def hostgroup_mod(self, name, item):
+ return self._post_json(method='hostgroup_mod', name=name, item=item)
+
+ def hostgroup_del(self, name):
+ return self._post_json(method='hostgroup_del', name=name)
+
+ def hostgroup_add_member(self, name, item):
+ return self._post_json(method='hostgroup_add_member', name=name, item=item)
+
+ def hostgroup_add_host(self, name, item):
+ return self.hostgroup_add_member(name=name, item={'host': item})
+
+ def hostgroup_add_hostgroup(self, name, item):
+ return self.hostgroup_add_member(name=name, item={'hostgroup': item})
+
+ def hostgroup_remove_member(self, name, item):
+ return self._post_json(method='hostgroup_remove_member', name=name, item=item)
+
+ def hostgroup_remove_host(self, name, item):
+ return self.hostgroup_remove_member(name=name, item={'host': item})
+
+ def hostgroup_remove_hostgroup(self, name, item):
+ return self.hostgroup_remove_member(name=name, item={'hostgroup': item})
+
+
+def get_hostgroup_dict(description=None):
+ data = {}
+ if description is not None:
+ data['description'] = description
+ return data
+
+
+def get_hostgroup_diff(client, ipa_hostgroup, module_hostgroup):
+ return client.get_diff(ipa_data=ipa_hostgroup, module_data=module_hostgroup)
+
+
+def ensure(module, client):
+ name = module.params['cn']
+ state = module.params['state']
+ host = module.params['host']
+ hostgroup = module.params['hostgroup']
+
+ ipa_hostgroup = client.hostgroup_find(name=name)
+ module_hostgroup = get_hostgroup_dict(description=module.params['description'])
+
+ changed = False
+ if state == 'present':
+ if not ipa_hostgroup:
+ changed = True
+ if not module.check_mode:
+ ipa_hostgroup = client.hostgroup_add(name=name, item=module_hostgroup)
+ else:
+ diff = get_hostgroup_diff(client, ipa_hostgroup, module_hostgroup)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_hostgroup.get(key)
+ client.hostgroup_mod(name=name, item=data)
+
+ if host is not None:
+ changed = client.modify_if_diff(name, ipa_hostgroup.get('member_host', []), [item.lower() for item in host],
+ client.hostgroup_add_host, client.hostgroup_remove_host) or changed
+
+ if hostgroup is not None:
+ changed = client.modify_if_diff(name, ipa_hostgroup.get('member_hostgroup', []),
+ [item.lower() for item in hostgroup],
+ client.hostgroup_add_hostgroup,
+ client.hostgroup_remove_hostgroup) or changed
+
+ else:
+ if ipa_hostgroup:
+ changed = True
+ if not module.check_mode:
+ client.hostgroup_del(name=name)
+
+ return changed, client.hostgroup_find(name=name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(cn=dict(type='str', required=True, aliases=['name']),
+ description=dict(type='str'),
+ host=dict(type='list', elements='str'),
+ hostgroup=dict(type='list', elements='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ client = HostGroupIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, hostgroup = ensure(module, client)
+ module.exit_json(changed=changed, hostgroup=hostgroup)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_role.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_role.py
new file mode 100644
index 00000000..589a6d5e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_role.py
@@ -0,0 +1,302 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_role
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA role
+description:
+- Add, modify and delete a role within FreeIPA server using FreeIPA API.
+options:
+ cn:
+ description:
+ - Role name.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ['name']
+ type: str
+ description:
+ description:
+ - A description of this role-group.
+ type: str
+ group:
+ description:
+ - List of group names assign to this role.
+ - If an empty list is passed all assigned groups will be unassigned from the role.
+ - If option is omitted groups will not be checked or changed.
+ - If option is passed all assigned groups that are not passed will be unassigned from the role.
+ type: list
+ elements: str
+ host:
+ description:
+ - List of host names to assign.
+ - If an empty list is passed all assigned hosts will be unassigned from the role.
+ - If option is omitted hosts will not be checked or changed.
+ - If option is passed all assigned hosts that are not passed will be unassigned from the role.
+ type: list
+ elements: str
+ hostgroup:
+ description:
+ - List of host group names to assign.
+ - If an empty list is passed all assigned host groups will be removed from the role.
+ - If option is omitted host groups will not be checked or changed.
+ - If option is passed all assigned hostgroups that are not passed will be unassigned from the role.
+ type: list
+ elements: str
+ privilege:
+ description:
+ - List of privileges granted to the role.
+ - If an empty list is passed all assigned privileges will be removed.
+ - If option is omitted privileges will not be checked or changed.
+ - If option is passed all assigned privileges that are not passed will be removed.
+ type: list
+ elements: str
+ service:
+ description:
+ - List of service names to assign.
+ - If an empty list is passed all assigned services will be removed from the role.
+ - If option is omitted services will not be checked or changed.
+ - If option is passed all assigned services that are not passed will be removed from the role.
+ type: list
+ elements: str
+ state:
+ description: State to ensure.
+ default: "present"
+ choices: ["absent", "present"]
+ type: str
+ user:
+ description:
+ - List of user names to assign.
+ - If an empty list is passed all assigned users will be removed from the role.
+ - If option is omitted users will not be checked or changed.
+ type: list
+ elements: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure role is present
+ community.general.ipa_role:
+ name: dba
+ description: Database Administrators
+ state: present
+ user:
+ - pinky
+ - brain
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure role with certain details
+ community.general.ipa_role:
+ name: another-role
+ description: Just another role
+ group:
+ - editors
+ host:
+ - host01.example.com
+ hostgroup:
+ - hostgroup01
+ privilege:
+ - Group Administrators
+ - User Administrators
+ service:
+ - service01
+
+- name: Ensure role is absent
+ community.general.ipa_role:
+ name: dba
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+role:
+ description: Role as returned by IPA API.
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class RoleIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(RoleIPAClient, self).__init__(module, host, port, protocol)
+
+ def role_find(self, name):
+ return self._post_json(method='role_find', name=None, item={'all': True, 'cn': name})
+
+ def role_add(self, name, item):
+ return self._post_json(method='role_add', name=name, item=item)
+
+ def role_mod(self, name, item):
+ return self._post_json(method='role_mod', name=name, item=item)
+
+ def role_del(self, name):
+ return self._post_json(method='role_del', name=name)
+
+ def role_add_member(self, name, item):
+ return self._post_json(method='role_add_member', name=name, item=item)
+
+ def role_add_group(self, name, item):
+ return self.role_add_member(name=name, item={'group': item})
+
+ def role_add_host(self, name, item):
+ return self.role_add_member(name=name, item={'host': item})
+
+ def role_add_hostgroup(self, name, item):
+ return self.role_add_member(name=name, item={'hostgroup': item})
+
+ def role_add_service(self, name, item):
+ return self.role_add_member(name=name, item={'service': item})
+
+ def role_add_user(self, name, item):
+ return self.role_add_member(name=name, item={'user': item})
+
+ def role_remove_member(self, name, item):
+ return self._post_json(method='role_remove_member', name=name, item=item)
+
+ def role_remove_group(self, name, item):
+ return self.role_remove_member(name=name, item={'group': item})
+
+ def role_remove_host(self, name, item):
+ return self.role_remove_member(name=name, item={'host': item})
+
+ def role_remove_hostgroup(self, name, item):
+ return self.role_remove_member(name=name, item={'hostgroup': item})
+
+ def role_remove_service(self, name, item):
+ return self.role_remove_member(name=name, item={'service': item})
+
+ def role_remove_user(self, name, item):
+ return self.role_remove_member(name=name, item={'user': item})
+
+ def role_add_privilege(self, name, item):
+ return self._post_json(method='role_add_privilege', name=name, item={'privilege': item})
+
+ def role_remove_privilege(self, name, item):
+ return self._post_json(method='role_remove_privilege', name=name, item={'privilege': item})
+
+
+def get_role_dict(description=None):
+ data = {}
+ if description is not None:
+ data['description'] = description
+ return data
+
+
+def get_role_diff(client, ipa_role, module_role):
+ return client.get_diff(ipa_data=ipa_role, module_data=module_role)
+
+
+def ensure(module, client):
+ state = module.params['state']
+ name = module.params['cn']
+ group = module.params['group']
+ host = module.params['host']
+ hostgroup = module.params['hostgroup']
+ privilege = module.params['privilege']
+ service = module.params['service']
+ user = module.params['user']
+
+ module_role = get_role_dict(description=module.params['description'])
+ ipa_role = client.role_find(name=name)
+
+ changed = False
+ if state == 'present':
+ if not ipa_role:
+ changed = True
+ if not module.check_mode:
+ ipa_role = client.role_add(name=name, item=module_role)
+ else:
+ diff = get_role_diff(client, ipa_role, module_role)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_role.get(key)
+ client.role_mod(name=name, item=data)
+
+ if group is not None:
+ changed = client.modify_if_diff(name, ipa_role.get('member_group', []), group,
+ client.role_add_group,
+ client.role_remove_group) or changed
+ if host is not None:
+ changed = client.modify_if_diff(name, ipa_role.get('member_host', []), host,
+ client.role_add_host,
+ client.role_remove_host) or changed
+
+ if hostgroup is not None:
+ changed = client.modify_if_diff(name, ipa_role.get('member_hostgroup', []), hostgroup,
+ client.role_add_hostgroup,
+ client.role_remove_hostgroup) or changed
+
+ if privilege is not None:
+ changed = client.modify_if_diff(name, ipa_role.get('memberof_privilege', []), privilege,
+ client.role_add_privilege,
+ client.role_remove_privilege) or changed
+ if service is not None:
+ changed = client.modify_if_diff(name, ipa_role.get('member_service', []), service,
+ client.role_add_service,
+ client.role_remove_service) or changed
+ if user is not None:
+ changed = client.modify_if_diff(name, ipa_role.get('member_user', []), user,
+ client.role_add_user,
+ client.role_remove_user) or changed
+
+ else:
+ if ipa_role:
+ changed = True
+ if not module.check_mode:
+ client.role_del(name)
+
+ return changed, client.role_find(name=name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(cn=dict(type='str', required=True, aliases=['name']),
+ description=dict(type='str'),
+ group=dict(type='list', elements='str'),
+ host=dict(type='list', elements='str'),
+ hostgroup=dict(type='list', elements='str'),
+ privilege=dict(type='list', elements='str'),
+ service=dict(type='list', elements='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ user=dict(type='list', elements='str'))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ client = RoleIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, role = ensure(module, client)
+ module.exit_json(changed=changed, role=role)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_service.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_service.py
new file mode 100644
index 00000000..c13f7ab6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_service.py
@@ -0,0 +1,208 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_service
+author: Cédric Parent (@cprh)
+short_description: Manage FreeIPA service
+description:
+- Add and delete an IPA service using IPA API.
+options:
+ krbcanonicalname:
+ description:
+ - Principal of the service.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ["name"]
+ type: str
+ hosts:
+ description:
+ - Defines the list of 'ManagedBy' hosts.
+ required: false
+ type: list
+ elements: str
+ force:
+ description:
+ - Force principal name even if host is not in DNS.
+ required: false
+ type: bool
+ state:
+ description: State to ensure.
+ required: false
+ default: present
+ choices: ["absent", "present"]
+ type: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure service is present
+ community.general.ipa_service:
+ name: http/host01.example.com
+ state: present
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure service is absent
+ community.general.ipa_service:
+ name: http/host01.example.com
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Changing Managing hosts list
+ community.general.ipa_service:
+ name: http/host01.example.com
+ host:
+ - host01.example.com
+ - host02.example.com
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+service:
+ description: Service as returned by IPA API.
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class ServiceIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(ServiceIPAClient, self).__init__(module, host, port, protocol)
+
+ def service_find(self, name):
+ return self._post_json(method='service_find', name=None, item={'all': True, 'krbcanonicalname': name})
+
+ def service_add(self, name, service):
+ return self._post_json(method='service_add', name=name, item=service)
+
+ def service_mod(self, name, service):
+ return self._post_json(method='service_mod', name=name, item=service)
+
+ def service_del(self, name):
+ return self._post_json(method='service_del', name=name)
+
+ def service_disable(self, name):
+ return self._post_json(method='service_disable', name=name)
+
+ def service_add_host(self, name, item):
+ return self._post_json(method='service_add_host', name=name, item={'host': item})
+
+ def service_remove_host(self, name, item):
+ return self._post_json(method='service_remove_host', name=name, item={'host': item})
+
+
+def get_service_dict(force=None, krbcanonicalname=None):
+ data = {}
+ if force is not None:
+ data['force'] = force
+ if krbcanonicalname is not None:
+ data['krbcanonicalname'] = krbcanonicalname
+ return data
+
+
+def get_service_diff(client, ipa_host, module_service):
+ non_updateable_keys = ['force', 'krbcanonicalname']
+ for key in non_updateable_keys:
+ if key in module_service:
+ del module_service[key]
+
+ return client.get_diff(ipa_data=ipa_host, module_data=module_service)
+
+
+def ensure(module, client):
+ name = module.params['krbcanonicalname']
+ state = module.params['state']
+ hosts = module.params['hosts']
+
+ ipa_service = client.service_find(name=name)
+ module_service = get_service_dict(force=module.params['force'])
+ changed = False
+ if state in ['present', 'enabled', 'disabled']:
+ if not ipa_service:
+ changed = True
+ if not module.check_mode:
+ client.service_add(name=name, service=module_service)
+ else:
+ diff = get_service_diff(client, ipa_service, module_service)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_service.get(key)
+ client.service_mod(name=name, service=data)
+ if hosts is not None:
+ if 'managedby_host' in ipa_service:
+ for host in ipa_service['managedby_host']:
+ if host not in hosts:
+ if not module.check_mode:
+ client.service_remove_host(name=name, item=host)
+ changed = True
+ for host in hosts:
+ if host not in ipa_service['managedby_host']:
+ if not module.check_mode:
+ client.service_add_host(name=name, item=host)
+ changed = True
+ else:
+ for host in hosts:
+ if not module.check_mode:
+ client.service_add_host(name=name, item=host)
+ changed = True
+
+ else:
+ if ipa_service:
+ changed = True
+ if not module.check_mode:
+ client.service_del(name=name)
+
+ return changed, client.service_find(name=name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(
+ krbcanonicalname=dict(type='str', required=True, aliases=['name']),
+ force=dict(type='bool', required=False),
+ hosts=dict(type='list', required=False, elements='str'),
+ state=dict(type='str', required=False, default='present',
+ choices=['present', 'absent']))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ client = ServiceIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, host = ensure(module, client)
+ module.exit_json(changed=changed, host=host)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_subca.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_subca.py
new file mode 100644
index 00000000..218951a0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_subca.py
@@ -0,0 +1,211 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, Abhijeet Kasurde (akasurde@redhat.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_subca
+author: Abhijeet Kasurde (@Akasurde)
+short_description: Manage FreeIPA Lightweight Sub Certificate Authorities.
+description:
+- Add, modify, enable, disable and delete an IPA Lightweight Sub Certificate Authorities using IPA API.
+options:
+ subca_name:
+ description:
+ - The Sub Certificate Authority name which needs to be managed.
+ required: true
+ aliases: ["name"]
+ type: str
+ subca_subject:
+ description:
+ - The Sub Certificate Authority's Subject. e.g., 'CN=SampleSubCA1,O=testrelm.test'.
+ required: true
+ type: str
+ subca_desc:
+ description:
+ - The Sub Certificate Authority's description.
+ type: str
+ state:
+ description:
+ - State to ensure.
+ - State 'disable' and 'enable' is available for FreeIPA 4.4.2 version and onwards.
+ required: false
+ default: present
+ choices: ["absent", "disabled", "enabled", "present"]
+ type: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = '''
+- name: Ensure IPA Sub CA is present
+ community.general.ipa_subca:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: present
+ subca_name: AnsibleSubCA1
+ subca_subject: 'CN=AnsibleSubCA1,O=example.com'
+ subca_desc: Ansible Sub CA
+
+- name: Ensure that IPA Sub CA is removed
+ community.general.ipa_subca:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: absent
+ subca_name: AnsibleSubCA1
+
+- name: Ensure that IPA Sub CA is disabled
+ community.general.ipa_subca:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: disable
+ subca_name: AnsibleSubCA1
+'''
+
+RETURN = r'''
+subca:
+ description: IPA Sub CA record as returned by IPA API.
+ returned: always
+ type: dict
+'''
+
+from distutils.version import LooseVersion
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class SubCAIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(SubCAIPAClient, self).__init__(module, host, port, protocol)
+
+ def subca_find(self, subca_name):
+ return self._post_json(method='ca_find', name=subca_name, item=None)
+
+ def subca_add(self, subca_name=None, subject_dn=None, details=None):
+ item = dict(ipacasubjectdn=subject_dn)
+ subca_desc = details.get('description', None)
+ if subca_desc is not None:
+ item.update(description=subca_desc)
+ return self._post_json(method='ca_add', name=subca_name, item=item)
+
+ def subca_mod(self, subca_name=None, diff=None, details=None):
+ item = get_subca_dict(details)
+ for change in diff:
+ update_detail = dict()
+ if item[change] is not None:
+ update_detail.update(setattr="{0}={1}".format(change, item[change]))
+ self._post_json(method='ca_mod', name=subca_name, item=update_detail)
+
+ def subca_del(self, subca_name=None):
+ return self._post_json(method='ca_del', name=subca_name)
+
+ def subca_disable(self, subca_name=None):
+ return self._post_json(method='ca_disable', name=subca_name)
+
+ def subca_enable(self, subca_name=None):
+ return self._post_json(method='ca_enable', name=subca_name)
+
+
+def get_subca_dict(details=None):
+ module_subca = dict()
+ if details['description'] is not None:
+ module_subca['description'] = details['description']
+ if details['subca_subject'] is not None:
+ module_subca['ipacasubjectdn'] = details['subca_subject']
+ return module_subca
+
+
+def get_subca_diff(client, ipa_subca, module_subca):
+ details = get_subca_dict(module_subca)
+ return client.get_diff(ipa_data=ipa_subca, module_data=details)
+
+
+def ensure(module, client):
+ subca_name = module.params['subca_name']
+ subca_subject_dn = module.params['subca_subject']
+ subca_desc = module.params['subca_desc']
+
+ state = module.params['state']
+
+ ipa_subca = client.subca_find(subca_name)
+ module_subca = dict(description=subca_desc,
+ subca_subject=subca_subject_dn)
+
+ changed = False
+ if state == 'present':
+ if not ipa_subca:
+ changed = True
+ if not module.check_mode:
+ client.subca_add(subca_name=subca_name, subject_dn=subca_subject_dn, details=module_subca)
+ else:
+ diff = get_subca_diff(client, ipa_subca, module_subca)
+ # IPA does not allow to modify Sub CA's subject DN
+ # So skip it for now.
+ if 'ipacasubjectdn' in diff:
+ diff.remove('ipacasubjectdn')
+ del module_subca['subca_subject']
+
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ client.subca_mod(subca_name=subca_name, diff=diff, details=module_subca)
+ elif state == 'absent':
+ if ipa_subca:
+ changed = True
+ if not module.check_mode:
+ client.subca_del(subca_name=subca_name)
+ elif state == 'disable':
+ ipa_version = client.get_ipa_version()
+ if LooseVersion(ipa_version) < LooseVersion('4.4.2'):
+ module.fail_json(msg="Current version of IPA server [%s] does not support 'CA disable' option. Please upgrade to "
+ "version greater than 4.4.2")
+ if ipa_subca:
+ changed = True
+ if not module.check_mode:
+ client.subca_disable(subca_name=subca_name)
+ elif state == 'enable':
+ ipa_version = client.get_ipa_version()
+ if LooseVersion(ipa_version) < LooseVersion('4.4.2'):
+ module.fail_json(msg="Current version of IPA server [%s] does not support 'CA enable' option. Please upgrade to "
+ "version greater than 4.4.2")
+ if ipa_subca:
+ changed = True
+ if not module.check_mode:
+ client.subca_enable(subca_name=subca_name)
+
+ return changed, client.subca_find(subca_name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(subca_name=dict(type='str', required=True, aliases=['name']),
+ subca_subject=dict(type='str', required=True),
+ subca_desc=dict(type='str'),
+ state=dict(type='str', default='present',
+ choices=['present', 'absent', 'enabled', 'disabled']),)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,)
+
+ client = SubCAIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, record = ensure(module, client)
+ module.exit_json(changed=changed, record=record)
+ except Exception as exc:
+ module.fail_json(msg=to_native(exc))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_sudocmd.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_sudocmd.py
new file mode 100644
index 00000000..aa09e0e4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_sudocmd.py
@@ -0,0 +1,151 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_sudocmd
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA sudo command
+description:
+- Add, modify or delete sudo command within FreeIPA server using FreeIPA API.
+options:
+ sudocmd:
+ description:
+ - Sudo command.
+ aliases: ['name']
+ required: true
+ type: str
+ description:
+ description:
+ - A description of this command.
+ type: str
+ state:
+ description: State to ensure.
+ default: present
+ choices: ['absent', 'disabled', 'enabled', 'present']
+ type: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure sudo command exists
+ community.general.ipa_sudocmd:
+ name: su
+ description: Allow to run su via sudo
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure sudo command does not exist
+ community.general.ipa_sudocmd:
+ name: su
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+sudocmd:
+ description: Sudo command as return from IPA API
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class SudoCmdIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(SudoCmdIPAClient, self).__init__(module, host, port, protocol)
+
+ def sudocmd_find(self, name):
+ return self._post_json(method='sudocmd_find', name=None, item={'all': True, 'sudocmd': name})
+
+ def sudocmd_add(self, name, item):
+ return self._post_json(method='sudocmd_add', name=name, item=item)
+
+ def sudocmd_mod(self, name, item):
+ return self._post_json(method='sudocmd_mod', name=name, item=item)
+
+ def sudocmd_del(self, name):
+ return self._post_json(method='sudocmd_del', name=name)
+
+
+def get_sudocmd_dict(description=None):
+ data = {}
+ if description is not None:
+ data['description'] = description
+ return data
+
+
+def get_sudocmd_diff(client, ipa_sudocmd, module_sudocmd):
+ return client.get_diff(ipa_data=ipa_sudocmd, module_data=module_sudocmd)
+
+
+def ensure(module, client):
+ name = module.params['sudocmd']
+ state = module.params['state']
+
+ module_sudocmd = get_sudocmd_dict(description=module.params['description'])
+ ipa_sudocmd = client.sudocmd_find(name=name)
+
+ changed = False
+ if state == 'present':
+ if not ipa_sudocmd:
+ changed = True
+ if not module.check_mode:
+ client.sudocmd_add(name=name, item=module_sudocmd)
+ else:
+ diff = get_sudocmd_diff(client, ipa_sudocmd, module_sudocmd)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_sudocmd.get(key)
+ client.sudocmd_mod(name=name, item=data)
+ else:
+ if ipa_sudocmd:
+ changed = True
+ if not module.check_mode:
+ client.sudocmd_del(name=name)
+
+ return changed, client.sudocmd_find(name=name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(description=dict(type='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']),
+ sudocmd=dict(type='str', required=True, aliases=['name']))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ client = SudoCmdIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, sudocmd = ensure(module, client)
+ module.exit_json(changed=changed, sudocmd=sudocmd)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_sudocmdgroup.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_sudocmdgroup.py
new file mode 100644
index 00000000..96eb6559
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_sudocmdgroup.py
@@ -0,0 +1,179 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_sudocmdgroup
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA sudo command group
+description:
+- Add, modify or delete sudo command group within IPA server using IPA API.
+options:
+ cn:
+ description:
+ - Sudo Command Group.
+ aliases: ['name']
+ required: true
+ type: str
+ description:
+ description:
+ - Group description.
+ type: str
+ state:
+ description: State to ensure.
+ default: present
+ choices: ['absent', 'disabled', 'enabled', 'present']
+ type: str
+ sudocmd:
+ description:
+ - List of sudo commands to assign to the group.
+ - If an empty list is passed all assigned commands will be removed from the group.
+ - If option is omitted sudo commands will not be checked or changed.
+ type: list
+ elements: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure sudo command group exists
+ community.general.ipa_sudocmdgroup:
+ name: group01
+ description: Group of important commands
+ sudocmd:
+ - su
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure sudo command group does not exist
+ community.general.ipa_sudocmdgroup:
+ name: group01
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+sudocmdgroup:
+ description: Sudo command group as returned by IPA API
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class SudoCmdGroupIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(SudoCmdGroupIPAClient, self).__init__(module, host, port, protocol)
+
+ def sudocmdgroup_find(self, name):
+ return self._post_json(method='sudocmdgroup_find', name=None, item={'all': True, 'cn': name})
+
+ def sudocmdgroup_add(self, name, item):
+ return self._post_json(method='sudocmdgroup_add', name=name, item=item)
+
+ def sudocmdgroup_mod(self, name, item):
+ return self._post_json(method='sudocmdgroup_mod', name=name, item=item)
+
+ def sudocmdgroup_del(self, name):
+ return self._post_json(method='sudocmdgroup_del', name=name)
+
+ def sudocmdgroup_add_member(self, name, item):
+ return self._post_json(method='sudocmdgroup_add_member', name=name, item=item)
+
+ def sudocmdgroup_add_member_sudocmd(self, name, item):
+ return self.sudocmdgroup_add_member(name=name, item={'sudocmd': item})
+
+ def sudocmdgroup_remove_member(self, name, item):
+ return self._post_json(method='sudocmdgroup_remove_member', name=name, item=item)
+
+ def sudocmdgroup_remove_member_sudocmd(self, name, item):
+ return self.sudocmdgroup_remove_member(name=name, item={'sudocmd': item})
+
+
+def get_sudocmdgroup_dict(description=None):
+ data = {}
+ if description is not None:
+ data['description'] = description
+ return data
+
+
+def get_sudocmdgroup_diff(client, ipa_sudocmdgroup, module_sudocmdgroup):
+ return client.get_diff(ipa_data=ipa_sudocmdgroup, module_data=module_sudocmdgroup)
+
+
+def ensure(module, client):
+ name = module.params['cn']
+ state = module.params['state']
+ sudocmd = module.params['sudocmd']
+
+ module_sudocmdgroup = get_sudocmdgroup_dict(description=module.params['description'])
+ ipa_sudocmdgroup = client.sudocmdgroup_find(name=name)
+
+ changed = False
+ if state == 'present':
+ if not ipa_sudocmdgroup:
+ changed = True
+ if not module.check_mode:
+ ipa_sudocmdgroup = client.sudocmdgroup_add(name=name, item=module_sudocmdgroup)
+ else:
+ diff = get_sudocmdgroup_diff(client, ipa_sudocmdgroup, module_sudocmdgroup)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_sudocmdgroup.get(key)
+ client.sudocmdgroup_mod(name=name, item=data)
+
+ if sudocmd is not None:
+ changed = client.modify_if_diff(name, ipa_sudocmdgroup.get('member_sudocmd', []), sudocmd,
+ client.sudocmdgroup_add_member_sudocmd,
+ client.sudocmdgroup_remove_member_sudocmd)
+ else:
+ if ipa_sudocmdgroup:
+ changed = True
+ if not module.check_mode:
+ client.sudocmdgroup_del(name=name)
+
+ return changed, client.sudocmdgroup_find(name=name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(cn=dict(type='str', required=True, aliases=['name']),
+ description=dict(type='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']),
+ sudocmd=dict(type='list', elements='str'))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ client = SudoCmdGroupIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, sudocmdgroup = ensure(module, client)
+ module.exit_json(changed=changed, sudorule=sudocmdgroup)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_sudorule.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_sudorule.py
new file mode 100644
index 00000000..9a0259bb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_sudorule.py
@@ -0,0 +1,400 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_sudorule
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA sudo rule
+description:
+- Add, modify or delete sudo rule within IPA server using IPA API.
+options:
+ cn:
+ description:
+ - Canonical name.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ['name']
+ type: str
+ cmdcategory:
+ description:
+ - Command category the rule applies to.
+ choices: ['all']
+ type: str
+ cmd:
+ description:
+ - List of commands assigned to the rule.
+ - If an empty list is passed all commands will be removed from the rule.
+ - If option is omitted commands will not be checked or changed.
+ type: list
+ elements: str
+ description:
+ description:
+ - Description of the sudo rule.
+ type: str
+ host:
+ description:
+ - List of hosts assigned to the rule.
+ - If an empty list is passed all hosts will be removed from the rule.
+ - If option is omitted hosts will not be checked or changed.
+ - Option C(hostcategory) must be omitted to assign hosts.
+ type: list
+ elements: str
+ hostcategory:
+ description:
+ - Host category the rule applies to.
+ - If 'all' is passed one must omit C(host) and C(hostgroup).
+ - Option C(host) and C(hostgroup) must be omitted to assign 'all'.
+ choices: ['all']
+ type: str
+ hostgroup:
+ description:
+ - List of host groups assigned to the rule.
+ - If an empty list is passed all host groups will be removed from the rule.
+ - If option is omitted host groups will not be checked or changed.
+ - Option C(hostcategory) must be omitted to assign host groups.
+ type: list
+ elements: str
+ runasusercategory:
+ description:
+ - RunAs User category the rule applies to.
+ choices: ['all']
+ type: str
+ runasgroupcategory:
+ description:
+ - RunAs Group category the rule applies to.
+ choices: ['all']
+ type: str
+ sudoopt:
+ description:
+ - List of options to add to the sudo rule.
+ type: list
+ elements: str
+ user:
+ description:
+ - List of users assigned to the rule.
+ - If an empty list is passed all users will be removed from the rule.
+ - If option is omitted users will not be checked or changed.
+ type: list
+ elements: str
+ usercategory:
+ description:
+ - User category the rule applies to.
+ choices: ['all']
+ type: str
+ usergroup:
+ description:
+ - List of user groups assigned to the rule.
+ - If an empty list is passed all user groups will be removed from the rule.
+ - If option is omitted user groups will not be checked or changed.
+ type: list
+ elements: str
+ state:
+ description: State to ensure.
+ default: present
+ choices: ['absent', 'disabled', 'enabled', 'present']
+ type: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure sudo rule is present that's allows all every body to execute any command on any host without being asked for a password.
+ community.general.ipa_sudorule:
+ name: sudo_all_nopasswd
+ cmdcategory: all
+ description: Allow to run every command with sudo without password
+ hostcategory: all
+ sudoopt:
+ - '!authenticate'
+ usercategory: all
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure user group developers can run every command on host group db-server as well as on host db01.example.com.
+ community.general.ipa_sudorule:
+ name: sudo_dev_dbserver
+ description: Allow developers to run every command with sudo on all database server
+ cmdcategory: all
+ host:
+ - db01.example.com
+ hostgroup:
+ - db-server
+ sudoopt:
+ - '!authenticate'
+ usergroup:
+ - developers
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+sudorule:
+ description: Sudorule as returned by IPA
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class SudoRuleIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(SudoRuleIPAClient, self).__init__(module, host, port, protocol)
+
+ def sudorule_find(self, name):
+ return self._post_json(method='sudorule_find', name=None, item={'all': True, 'cn': name})
+
+ def sudorule_add(self, name, item):
+ return self._post_json(method='sudorule_add', name=name, item=item)
+
+ def sudorule_mod(self, name, item):
+ return self._post_json(method='sudorule_mod', name=name, item=item)
+
+ def sudorule_del(self, name):
+ return self._post_json(method='sudorule_del', name=name)
+
+ def sudorule_add_option(self, name, item):
+ return self._post_json(method='sudorule_add_option', name=name, item=item)
+
+ def sudorule_add_option_ipasudoopt(self, name, item):
+ return self.sudorule_add_option(name=name, item={'ipasudoopt': item})
+
+ def sudorule_remove_option(self, name, item):
+ return self._post_json(method='sudorule_remove_option', name=name, item=item)
+
+ def sudorule_remove_option_ipasudoopt(self, name, item):
+ return self.sudorule_remove_option(name=name, item={'ipasudoopt': item})
+
+ def sudorule_add_host(self, name, item):
+ return self._post_json(method='sudorule_add_host', name=name, item=item)
+
+ def sudorule_add_host_host(self, name, item):
+ return self.sudorule_add_host(name=name, item={'host': item})
+
+ def sudorule_add_host_hostgroup(self, name, item):
+ return self.sudorule_add_host(name=name, item={'hostgroup': item})
+
+ def sudorule_remove_host(self, name, item):
+ return self._post_json(method='sudorule_remove_host', name=name, item=item)
+
+ def sudorule_remove_host_host(self, name, item):
+ return self.sudorule_remove_host(name=name, item={'host': item})
+
+ def sudorule_remove_host_hostgroup(self, name, item):
+ return self.sudorule_remove_host(name=name, item={'hostgroup': item})
+
+ def sudorule_add_allow_command(self, name, item):
+ return self._post_json(method='sudorule_add_allow_command', name=name, item={'sudocmd': item})
+
+ def sudorule_remove_allow_command(self, name, item):
+ return self._post_json(method='sudorule_remove_allow_command', name=name, item=item)
+
+ def sudorule_add_user(self, name, item):
+ return self._post_json(method='sudorule_add_user', name=name, item=item)
+
+ def sudorule_add_user_user(self, name, item):
+ return self.sudorule_add_user(name=name, item={'user': item})
+
+ def sudorule_add_user_group(self, name, item):
+ return self.sudorule_add_user(name=name, item={'group': item})
+
+ def sudorule_remove_user(self, name, item):
+ return self._post_json(method='sudorule_remove_user', name=name, item=item)
+
+ def sudorule_remove_user_user(self, name, item):
+ return self.sudorule_remove_user(name=name, item={'user': item})
+
+ def sudorule_remove_user_group(self, name, item):
+ return self.sudorule_remove_user(name=name, item={'group': item})
+
+
+def get_sudorule_dict(cmdcategory=None, description=None, hostcategory=None, ipaenabledflag=None, usercategory=None,
+ runasgroupcategory=None, runasusercategory=None):
+ data = {}
+ if cmdcategory is not None:
+ data['cmdcategory'] = cmdcategory
+ if description is not None:
+ data['description'] = description
+ if hostcategory is not None:
+ data['hostcategory'] = hostcategory
+ if ipaenabledflag is not None:
+ data['ipaenabledflag'] = ipaenabledflag
+ if usercategory is not None:
+ data['usercategory'] = usercategory
+ if runasusercategory is not None:
+ data['ipasudorunasusercategory'] = runasusercategory
+ if runasgroupcategory is not None:
+ data['ipasudorunasgroupcategory'] = runasgroupcategory
+ return data
+
+
+def category_changed(module, client, category_name, ipa_sudorule):
+ if ipa_sudorule.get(category_name, None) == ['all']:
+ if not module.check_mode:
+ # cn is returned as list even with only a single value.
+ client.sudorule_mod(name=ipa_sudorule.get('cn')[0], item={category_name: None})
+ return True
+ return False
+
+
+def ensure(module, client):
+ state = module.params['state']
+ name = module.params['cn']
+ cmd = module.params['cmd']
+ cmdcategory = module.params['cmdcategory']
+ host = module.params['host']
+ hostcategory = module.params['hostcategory']
+ hostgroup = module.params['hostgroup']
+ runasusercategory = module.params['runasusercategory']
+ runasgroupcategory = module.params['runasgroupcategory']
+
+ if state in ['present', 'enabled']:
+ ipaenabledflag = 'TRUE'
+ else:
+ ipaenabledflag = 'FALSE'
+
+ sudoopt = module.params['sudoopt']
+ user = module.params['user']
+ usercategory = module.params['usercategory']
+ usergroup = module.params['usergroup']
+
+ module_sudorule = get_sudorule_dict(cmdcategory=cmdcategory,
+ description=module.params['description'],
+ hostcategory=hostcategory,
+ ipaenabledflag=ipaenabledflag,
+ usercategory=usercategory,
+ runasusercategory=runasusercategory,
+ runasgroupcategory=runasgroupcategory)
+ ipa_sudorule = client.sudorule_find(name=name)
+
+ changed = False
+ if state in ['present', 'disabled', 'enabled']:
+ if not ipa_sudorule:
+ changed = True
+ if not module.check_mode:
+ ipa_sudorule = client.sudorule_add(name=name, item=module_sudorule)
+ else:
+ diff = client.get_diff(ipa_sudorule, module_sudorule)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ if 'hostcategory' in diff:
+ if ipa_sudorule.get('memberhost_host', None) is not None:
+ client.sudorule_remove_host_host(name=name, item=ipa_sudorule.get('memberhost_host'))
+ if ipa_sudorule.get('memberhost_hostgroup', None) is not None:
+ client.sudorule_remove_host_hostgroup(name=name,
+ item=ipa_sudorule.get('memberhost_hostgroup'))
+
+ client.sudorule_mod(name=name, item=module_sudorule)
+
+ if cmd is not None:
+ changed = category_changed(module, client, 'cmdcategory', ipa_sudorule) or changed
+ if not module.check_mode:
+ client.sudorule_add_allow_command(name=name, item=cmd)
+
+ if runasusercategory is not None:
+ changed = category_changed(module, client, 'iparunasusercategory', ipa_sudorule) or changed
+
+ if runasgroupcategory is not None:
+ changed = category_changed(module, client, 'iparunasgroupcategory', ipa_sudorule) or changed
+
+ if host is not None:
+ changed = category_changed(module, client, 'hostcategory', ipa_sudorule) or changed
+ changed = client.modify_if_diff(name, ipa_sudorule.get('memberhost_host', []), host,
+ client.sudorule_add_host_host,
+ client.sudorule_remove_host_host) or changed
+
+ if hostgroup is not None:
+ changed = category_changed(module, client, 'hostcategory', ipa_sudorule) or changed
+ changed = client.modify_if_diff(name, ipa_sudorule.get('memberhost_hostgroup', []), hostgroup,
+ client.sudorule_add_host_hostgroup,
+ client.sudorule_remove_host_hostgroup) or changed
+ if sudoopt is not None:
+ # client.modify_if_diff does not work as each option must be removed/added by its own
+ ipa_list = ipa_sudorule.get('ipasudoopt', [])
+ module_list = sudoopt
+ diff = list(set(ipa_list) - set(module_list))
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ for item in diff:
+ client.sudorule_remove_option_ipasudoopt(name, item)
+ diff = list(set(module_list) - set(ipa_list))
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ for item in diff:
+ client.sudorule_add_option_ipasudoopt(name, item)
+
+ if user is not None:
+ changed = category_changed(module, client, 'usercategory', ipa_sudorule) or changed
+ changed = client.modify_if_diff(name, ipa_sudorule.get('memberuser_user', []), user,
+ client.sudorule_add_user_user,
+ client.sudorule_remove_user_user) or changed
+ if usergroup is not None:
+ changed = category_changed(module, client, 'usercategory', ipa_sudorule) or changed
+ changed = client.modify_if_diff(name, ipa_sudorule.get('memberuser_group', []), usergroup,
+ client.sudorule_add_user_group,
+ client.sudorule_remove_user_group) or changed
+ else:
+ if ipa_sudorule:
+ changed = True
+ if not module.check_mode:
+ client.sudorule_del(name)
+
+ return changed, client.sudorule_find(name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(cmd=dict(type='list', elements='str'),
+ cmdcategory=dict(type='str', choices=['all']),
+ cn=dict(type='str', required=True, aliases=['name']),
+ description=dict(type='str'),
+ host=dict(type='list', elements='str'),
+ hostcategory=dict(type='str', choices=['all']),
+ hostgroup=dict(type='list', elements='str'),
+ runasusercategory=dict(type='str', choices=['all']),
+ runasgroupcategory=dict(type='str', choices=['all']),
+ sudoopt=dict(type='list', elements='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']),
+ user=dict(type='list', elements='str'),
+ usercategory=dict(type='str', choices=['all']),
+ usergroup=dict(type='list', elements='str'))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ mutually_exclusive=[['cmdcategory', 'cmd'],
+ ['hostcategory', 'host'],
+ ['hostcategory', 'hostgroup'],
+ ['usercategory', 'user'],
+ ['usercategory', 'usergroup']],
+ supports_check_mode=True)
+
+ client = SudoRuleIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, sudorule = ensure(module, client)
+ module.exit_json(changed=changed, sudorule=sudorule)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_user.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_user.py
new file mode 100644
index 00000000..fa7b3abb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_user.py
@@ -0,0 +1,392 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_user
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA users
+description:
+- Add, modify and delete user within IPA server.
+options:
+ displayname:
+ description: Display name.
+ type: str
+ update_password:
+ description:
+ - Set password for a user.
+ type: str
+ default: 'always'
+ choices: [ always, on_create ]
+ givenname:
+ description: First name.
+ type: str
+ krbpasswordexpiration:
+ description:
+ - Date at which the user password will expire.
+ - In the format YYYYMMddHHmmss.
+ - e.g. 20180121182022 will expire on 21 January 2018 at 18:20:22.
+ type: str
+ loginshell:
+ description: Login shell.
+ type: str
+ mail:
+ description:
+ - List of mail addresses assigned to the user.
+ - If an empty list is passed all assigned email addresses will be deleted.
+ - If None is passed email addresses will not be checked or changed.
+ type: list
+ elements: str
+ password:
+ description:
+ - Password for a user.
+ - Will not be set for an existing user unless I(update_password=always), which is the default.
+ type: str
+ sn:
+ description: Surname.
+ type: str
+ sshpubkey:
+ description:
+ - List of public SSH key.
+ - If an empty list is passed all assigned public keys will be deleted.
+ - If None is passed SSH public keys will not be checked or changed.
+ type: list
+ elements: str
+ state:
+ description: State to ensure.
+ default: "present"
+ choices: ["absent", "disabled", "enabled", "present"]
+ type: str
+ telephonenumber:
+ description:
+ - List of telephone numbers assigned to the user.
+ - If an empty list is passed all assigned telephone numbers will be deleted.
+ - If None is passed telephone numbers will not be checked or changed.
+ type: list
+ elements: str
+ title:
+ description: Title.
+ type: str
+ uid:
+ description: uid of the user.
+ required: true
+ aliases: ["name"]
+ type: str
+ uidnumber:
+ description:
+ - Account Settings UID/Posix User ID number.
+ type: str
+ gidnumber:
+ description:
+ - Posix Group ID.
+ type: str
+ homedirectory:
+ description:
+ - Default home directory of the user.
+ type: str
+ version_added: '0.2.0'
+ userauthtype:
+ description:
+ - The authentication type to use for the user.
+ choices: ["password", "radius", "otp", "pkinit", "hardened"]
+ type: str
+ version_added: '1.2.0'
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+requirements:
+- base64
+- hashlib
+'''
+
+EXAMPLES = r'''
+- name: Ensure pinky is present and always reset password
+ community.general.ipa_user:
+ name: pinky
+ state: present
+ krbpasswordexpiration: 20200119235959
+ givenname: Pinky
+ sn: Acme
+ mail:
+ - pinky@acme.com
+ telephonenumber:
+ - '+555123456'
+ sshpubkey:
+ - ssh-rsa ....
+ - ssh-dsa ....
+ uidnumber: '1001'
+ gidnumber: '100'
+ homedirectory: /home/pinky
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure brain is absent
+ community.general.ipa_user:
+ name: brain
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure pinky is present but don't reset password if already exists
+ community.general.ipa_user:
+ name: pinky
+ state: present
+ givenname: Pinky
+ sn: Acme
+ password: zounds
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+ update_password: on_create
+
+- name: Ensure pinky is present and using one time password authentication
+ community.general.ipa_user:
+ name: pinky
+ state: present
+ userauthtype: otp
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+user:
+ description: User as returned by IPA API
+ returned: always
+ type: dict
+'''
+
+import base64
+import hashlib
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class UserIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(UserIPAClient, self).__init__(module, host, port, protocol)
+
+ def user_find(self, name):
+ return self._post_json(method='user_find', name=None, item={'all': True, 'uid': name})
+
+ def user_add(self, name, item):
+ return self._post_json(method='user_add', name=name, item=item)
+
+ def user_mod(self, name, item):
+ return self._post_json(method='user_mod', name=name, item=item)
+
+ def user_del(self, name):
+ return self._post_json(method='user_del', name=name)
+
+ def user_disable(self, name):
+ return self._post_json(method='user_disable', name=name)
+
+ def user_enable(self, name):
+ return self._post_json(method='user_enable', name=name)
+
+
+def get_user_dict(displayname=None, givenname=None, krbpasswordexpiration=None, loginshell=None,
+ mail=None, nsaccountlock=False, sn=None, sshpubkey=None, telephonenumber=None,
+ title=None, userpassword=None, gidnumber=None, uidnumber=None, homedirectory=None,
+ userauthtype=None):
+ user = {}
+ if displayname is not None:
+ user['displayname'] = displayname
+ if krbpasswordexpiration is not None:
+ user['krbpasswordexpiration'] = krbpasswordexpiration + "Z"
+ if givenname is not None:
+ user['givenname'] = givenname
+ if loginshell is not None:
+ user['loginshell'] = loginshell
+ if mail is not None:
+ user['mail'] = mail
+ user['nsaccountlock'] = nsaccountlock
+ if sn is not None:
+ user['sn'] = sn
+ if sshpubkey is not None:
+ user['ipasshpubkey'] = sshpubkey
+ if telephonenumber is not None:
+ user['telephonenumber'] = telephonenumber
+ if title is not None:
+ user['title'] = title
+ if userpassword is not None:
+ user['userpassword'] = userpassword
+ if gidnumber is not None:
+ user['gidnumber'] = gidnumber
+ if uidnumber is not None:
+ user['uidnumber'] = uidnumber
+ if homedirectory is not None:
+ user['homedirectory'] = homedirectory
+ if userauthtype is not None:
+ user['ipauserauthtype'] = userauthtype
+
+ return user
+
+
+def get_user_diff(client, ipa_user, module_user):
+ """
+ Return the keys of each dict whereas values are different. Unfortunately the IPA
+ API returns everything as a list even if only a single value is possible.
+ Therefore some more complexity is needed.
+ The method will check if the value type of module_user.attr is not a list and
+ create a list with that element if the same attribute in ipa_user is list. In this way I hope that the method
+ must not be changed if the returned API dict is changed.
+ :param ipa_user:
+ :param module_user:
+ :return:
+ """
+ # sshpubkeyfp is the list of ssh key fingerprints. IPA doesn't return the keys itself but instead the fingerprints.
+ # These are used for comparison.
+ sshpubkey = None
+ if 'ipasshpubkey' in module_user:
+ hash_algo = 'md5'
+ if 'sshpubkeyfp' in ipa_user and ipa_user['sshpubkeyfp'][0][:7].upper() == 'SHA256:':
+ hash_algo = 'sha256'
+ module_user['sshpubkeyfp'] = [get_ssh_key_fingerprint(pubkey, hash_algo) for pubkey in module_user['ipasshpubkey']]
+ # Remove the ipasshpubkey element as it is not returned from IPA but save it's value to be used later on
+ sshpubkey = module_user['ipasshpubkey']
+ del module_user['ipasshpubkey']
+
+ result = client.get_diff(ipa_data=ipa_user, module_data=module_user)
+
+ # If there are public keys, remove the fingerprints and add them back to the dict
+ if sshpubkey is not None:
+ del module_user['sshpubkeyfp']
+ module_user['ipasshpubkey'] = sshpubkey
+ return result
+
+
+def get_ssh_key_fingerprint(ssh_key, hash_algo='sha256'):
+ """
+ Return the public key fingerprint of a given public SSH key
+ in format "[fp] [user@host] (ssh-rsa)" where fp is of the format:
+ FB:0C:AC:0A:07:94:5B:CE:75:6E:63:32:13:AD:AD:D7
+ for md5 or
+ SHA256:[base64]
+ for sha256
+ :param ssh_key:
+ :param hash_algo:
+ :return:
+ """
+ parts = ssh_key.strip().split()
+ if len(parts) == 0:
+ return None
+ key_type = parts[0]
+ key = base64.b64decode(parts[1].encode('ascii'))
+
+ if hash_algo == 'md5':
+ fp_plain = hashlib.md5(key).hexdigest()
+ key_fp = ':'.join(a + b for a, b in zip(fp_plain[::2], fp_plain[1::2])).upper()
+ elif hash_algo == 'sha256':
+ fp_plain = base64.b64encode(hashlib.sha256(key).digest()).decode('ascii').rstrip('=')
+ key_fp = 'SHA256:{fp}'.format(fp=fp_plain)
+ if len(parts) < 3:
+ return "%s (%s)" % (key_fp, key_type)
+ else:
+ user_host = parts[2]
+ return "%s %s (%s)" % (key_fp, user_host, key_type)
+
+
+def ensure(module, client):
+ state = module.params['state']
+ name = module.params['uid']
+ nsaccountlock = state == 'disabled'
+
+ module_user = get_user_dict(displayname=module.params.get('displayname'),
+ krbpasswordexpiration=module.params.get('krbpasswordexpiration'),
+ givenname=module.params.get('givenname'),
+ loginshell=module.params['loginshell'],
+ mail=module.params['mail'], sn=module.params['sn'],
+ sshpubkey=module.params['sshpubkey'], nsaccountlock=nsaccountlock,
+ telephonenumber=module.params['telephonenumber'], title=module.params['title'],
+ userpassword=module.params['password'],
+ gidnumber=module.params.get('gidnumber'), uidnumber=module.params.get('uidnumber'),
+ homedirectory=module.params.get('homedirectory'),
+ userauthtype=module.params.get('userauthtype'))
+
+ update_password = module.params.get('update_password')
+ ipa_user = client.user_find(name=name)
+
+ changed = False
+ if state in ['present', 'enabled', 'disabled']:
+ if not ipa_user:
+ changed = True
+ if not module.check_mode:
+ ipa_user = client.user_add(name=name, item=module_user)
+ else:
+ if update_password == 'on_create':
+ module_user.pop('userpassword', None)
+ diff = get_user_diff(client, ipa_user, module_user)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ ipa_user = client.user_mod(name=name, item=module_user)
+ else:
+ if ipa_user:
+ changed = True
+ if not module.check_mode:
+ client.user_del(name)
+
+ return changed, ipa_user
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(displayname=dict(type='str'),
+ givenname=dict(type='str'),
+ update_password=dict(type='str', default="always",
+ choices=['always', 'on_create'],
+ no_log=False),
+ krbpasswordexpiration=dict(type='str', no_log=False),
+ loginshell=dict(type='str'),
+ mail=dict(type='list', elements='str'),
+ sn=dict(type='str'),
+ uid=dict(type='str', required=True, aliases=['name']),
+ gidnumber=dict(type='str'),
+ uidnumber=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ sshpubkey=dict(type='list', elements='str'),
+ state=dict(type='str', default='present',
+ choices=['present', 'absent', 'enabled', 'disabled']),
+ telephonenumber=dict(type='list', elements='str'),
+ title=dict(type='str'),
+ homedirectory=dict(type='str'),
+ userauthtype=dict(type='str',
+ choices=['password', 'radius', 'otp', 'pkinit', 'hardened']))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ client = UserIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+
+ # If sshpubkey is defined as None than module.params['sshpubkey'] is [None]. IPA itself returns None (not a list).
+ # Therefore a small check here to replace list(None) by None. Otherwise get_user_diff() would return sshpubkey
+ # as different which should be avoided.
+ if module.params['sshpubkey'] is not None:
+ if len(module.params['sshpubkey']) == 1 and module.params['sshpubkey'][0] == "":
+ module.params['sshpubkey'] = None
+
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, user = ensure(module, client)
+ module.exit_json(changed=changed, user=user)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_vault.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_vault.py
new file mode 100644
index 00000000..3376b8c4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/ipa/ipa_vault.py
@@ -0,0 +1,249 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Juan Manuel Parrilla <jparrill@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_vault
+author: Juan Manuel Parrilla (@jparrill)
+short_description: Manage FreeIPA vaults
+description:
+- Add, modify and delete vaults and secret vaults.
+- KRA service should be enabled to use this module.
+options:
+ cn:
+ description:
+ - Vault name.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ["name"]
+ type: str
+ description:
+ description:
+ - Description.
+ type: str
+ ipavaulttype:
+ description:
+ - Vault types are based on security level.
+ default: "symmetric"
+ choices: ["asymmetric", "standard", "symmetric"]
+ aliases: ["vault_type"]
+ type: str
+ ipavaultpublickey:
+ description:
+ - Public key.
+ aliases: ["vault_public_key"]
+ type: str
+ ipavaultsalt:
+ description:
+ - Vault Salt.
+ aliases: ["vault_salt"]
+ type: str
+ username:
+ description:
+ - Any user can own one or more user vaults.
+ - Mutually exclusive with service.
+ aliases: ["user"]
+ type: list
+ elements: str
+ service:
+ description:
+ - Any service can own one or more service vaults.
+ - Mutually exclusive with user.
+ type: str
+ state:
+ description:
+ - State to ensure.
+ default: "present"
+ choices: ["absent", "present"]
+ type: str
+ replace:
+ description:
+ - Force replace the existant vault on IPA server.
+ type: bool
+ default: False
+ choices: ["True", "False"]
+ validate_certs:
+ description:
+ - Validate IPA server certificates.
+ type: bool
+ default: true
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure vault is present
+ community.general.ipa_vault:
+ name: vault01
+ vault_type: standard
+ user: user01
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+ validate_certs: false
+
+- name: Ensure vault is present for Admin user
+ community.general.ipa_vault:
+ name: vault01
+ vault_type: standard
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure vault is absent
+ community.general.ipa_vault:
+ name: vault01
+ vault_type: standard
+ user: user01
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Modify vault if already exists
+ community.general.ipa_vault:
+ name: vault01
+ vault_type: standard
+ description: "Vault for test"
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+ replace: True
+
+- name: Get vault info if already exists
+ community.general.ipa_vault:
+ name: vault01
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+vault:
+ description: Vault as returned by IPA API
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class VaultIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(VaultIPAClient, self).__init__(module, host, port, protocol)
+
+ def vault_find(self, name):
+ return self._post_json(method='vault_find', name=None, item={'all': True, 'cn': name})
+
+ def vault_add_internal(self, name, item):
+ return self._post_json(method='vault_add_internal', name=name, item=item)
+
+ def vault_mod_internal(self, name, item):
+ return self._post_json(method='vault_mod_internal', name=name, item=item)
+
+ def vault_del(self, name):
+ return self._post_json(method='vault_del', name=name)
+
+
+def get_vault_dict(description=None, vault_type=None, vault_salt=None, vault_public_key=None, service=None):
+ vault = {}
+
+ if description is not None:
+ vault['description'] = description
+ if vault_type is not None:
+ vault['ipavaulttype'] = vault_type
+ if vault_salt is not None:
+ vault['ipavaultsalt'] = vault_salt
+ if vault_public_key is not None:
+ vault['ipavaultpublickey'] = vault_public_key
+ if service is not None:
+ vault['service'] = service
+ return vault
+
+
+def get_vault_diff(client, ipa_vault, module_vault, module):
+ return client.get_diff(ipa_data=ipa_vault, module_data=module_vault)
+
+
+def ensure(module, client):
+ state = module.params['state']
+ name = module.params['cn']
+ user = module.params['username']
+ replace = module.params['replace']
+
+ module_vault = get_vault_dict(description=module.params['description'], vault_type=module.params['ipavaulttype'],
+ vault_salt=module.params['ipavaultsalt'],
+ vault_public_key=module.params['ipavaultpublickey'],
+ service=module.params['service'])
+ ipa_vault = client.vault_find(name=name)
+
+ changed = False
+ if state == 'present':
+ if not ipa_vault:
+ # New vault
+ changed = True
+ if not module.check_mode:
+ ipa_vault = client.vault_add_internal(name, item=module_vault)
+ else:
+ # Already exists
+ if replace:
+ diff = get_vault_diff(client, ipa_vault, module_vault, module)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_vault.get(key)
+ client.vault_mod_internal(name=name, item=data)
+
+ else:
+ if ipa_vault:
+ changed = True
+ if not module.check_mode:
+ client.vault_del(name)
+
+ return changed, client.vault_find(name=name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(cn=dict(type='str', required=True, aliases=['name']),
+ description=dict(type='str'),
+ ipavaulttype=dict(type='str', default='symmetric',
+ choices=['standard', 'symmetric', 'asymmetric'], aliases=['vault_type']),
+ ipavaultsalt=dict(type='str', aliases=['vault_salt']),
+ ipavaultpublickey=dict(type='str', aliases=['vault_public_key']),
+ service=dict(type='str'),
+ replace=dict(type='bool', default=False, choices=[True, False]),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ username=dict(type='list', elements='str', aliases=['user']))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[['username', 'service']])
+
+ client = VaultIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, vault = ensure(module, client)
+ module.exit_json(changed=changed, vault=vault)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_client.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_client.py
new file mode 100644
index 00000000..b27155ba
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_client.py
@@ -0,0 +1,879 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017, Eike Frost <ei@kefro.st>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: keycloak_client
+
+short_description: Allows administration of Keycloak clients via Keycloak API
+
+
+description:
+ - This module allows the administration of Keycloak clients via the Keycloak REST API. It
+ requires access to the REST API via OpenID Connect; the user connecting and the client being
+ used must have the requisite access rights. In a default Keycloak installation, admin-cli
+ and an admin user would work, as would a separate client definition with the scope tailored
+ to your needs and a user having the expected roles.
+
+ - The names of module options are snake_cased versions of the camelCase ones found in the
+ Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html).
+ Aliases are provided so camelCased versions can be used as well.
+
+ - The Keycloak API does not always sanity check inputs e.g. you can set
+ SAML-specific settings on an OpenID Connect client for instance and vice versa. Be careful.
+ If you do not specify a setting, usually a sensible default is chosen.
+
+options:
+ state:
+ description:
+ - State of the client
+ - On C(present), the client will be created (or updated if it exists already).
+ - On C(absent), the client will be removed if it exists
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ realm:
+ description:
+ - The realm to create the client in.
+ type: str
+ default: master
+
+ client_id:
+ description:
+ - Client id of client to be worked on. This is usually an alphanumeric name chosen by
+ you. Either this or I(id) is required. If you specify both, I(id) takes precedence.
+ This is 'clientId' in the Keycloak REST API.
+ aliases:
+ - clientId
+ type: str
+
+ id:
+ description:
+ - Id of client to be worked on. This is usually an UUID. Either this or I(client_id)
+ is required. If you specify both, this takes precedence.
+ type: str
+
+ name:
+ description:
+ - Name of the client (this is not the same as I(client_id))
+ type: str
+
+ description:
+ description:
+ - Description of the client in Keycloak
+ type: str
+
+ root_url:
+ description:
+ - Root URL appended to relative URLs for this client
+ This is 'rootUrl' in the Keycloak REST API.
+ aliases:
+ - rootUrl
+ type: str
+
+ admin_url:
+ description:
+ - URL to the admin interface of the client
+ This is 'adminUrl' in the Keycloak REST API.
+ aliases:
+ - adminUrl
+ type: str
+
+ base_url:
+ description:
+ - Default URL to use when the auth server needs to redirect or link back to the client
+ This is 'baseUrl' in the Keycloak REST API.
+ aliases:
+ - baseUrl
+ type: str
+
+ enabled:
+ description:
+ - Is this client enabled or not?
+ type: bool
+
+ client_authenticator_type:
+ description:
+ - How do clients authenticate with the auth server? Either C(client-secret) or
+ C(client-jwt) can be chosen. When using C(client-secret), the module parameter
+ I(secret) can set it, while for C(client-jwt), you can use the keys C(use.jwks.url),
+ C(jwks.url), and C(jwt.credential.certificate) in the I(attributes) module parameter
+ to configure its behavior.
+ This is 'clientAuthenticatorType' in the Keycloak REST API.
+ choices: ['client-secret', 'client-jwt']
+ aliases:
+ - clientAuthenticatorType
+ type: str
+
+ secret:
+ description:
+ - When using I(client_authenticator_type) C(client-secret) (the default), you can
+ specify a secret here (otherwise one will be generated if it does not exit). If
+ changing this secret, the module will not register a change currently (but the
+ changed secret will be saved).
+ type: str
+
+ registration_access_token:
+ description:
+ - The registration access token provides access for clients to the client registration
+ service.
+ This is 'registrationAccessToken' in the Keycloak REST API.
+ aliases:
+ - registrationAccessToken
+ type: str
+
+ default_roles:
+ description:
+ - list of default roles for this client. If the client roles referenced do not exist
+ yet, they will be created.
+ This is 'defaultRoles' in the Keycloak REST API.
+ aliases:
+ - defaultRoles
+ type: list
+
+ redirect_uris:
+ description:
+ - Acceptable redirect URIs for this client.
+ This is 'redirectUris' in the Keycloak REST API.
+ aliases:
+ - redirectUris
+ type: list
+
+ web_origins:
+ description:
+ - List of allowed CORS origins.
+ This is 'webOrigins' in the Keycloak REST API.
+ aliases:
+ - webOrigins
+ type: list
+
+ not_before:
+ description:
+ - Revoke any tokens issued before this date for this client (this is a UNIX timestamp).
+ This is 'notBefore' in the Keycloak REST API.
+ type: int
+ aliases:
+ - notBefore
+
+ bearer_only:
+ description:
+ - The access type of this client is bearer-only.
+ This is 'bearerOnly' in the Keycloak REST API.
+ aliases:
+ - bearerOnly
+ type: bool
+
+ consent_required:
+ description:
+ - If enabled, users have to consent to client access.
+ This is 'consentRequired' in the Keycloak REST API.
+ aliases:
+ - consentRequired
+ type: bool
+
+ standard_flow_enabled:
+ description:
+ - Enable standard flow for this client or not (OpenID connect).
+ This is 'standardFlowEnabled' in the Keycloak REST API.
+ aliases:
+ - standardFlowEnabled
+ type: bool
+
+ implicit_flow_enabled:
+ description:
+ - Enable implicit flow for this client or not (OpenID connect).
+ This is 'implicitFlowEnabled' in the Keycloak REST API.
+ aliases:
+ - implicitFlowEnabled
+ type: bool
+
+ direct_access_grants_enabled:
+ description:
+ - Are direct access grants enabled for this client or not (OpenID connect).
+ This is 'directAccessGrantsEnabled' in the Keycloak REST API.
+ aliases:
+ - directAccessGrantsEnabled
+ type: bool
+
+ service_accounts_enabled:
+ description:
+ - Are service accounts enabled for this client or not (OpenID connect).
+ This is 'serviceAccountsEnabled' in the Keycloak REST API.
+ aliases:
+ - serviceAccountsEnabled
+ type: bool
+
+ authorization_services_enabled:
+ description:
+ - Are authorization services enabled for this client or not (OpenID connect).
+ This is 'authorizationServicesEnabled' in the Keycloak REST API.
+ aliases:
+ - authorizationServicesEnabled
+ type: bool
+
+ public_client:
+ description:
+ - Is the access type for this client public or not.
+ This is 'publicClient' in the Keycloak REST API.
+ aliases:
+ - publicClient
+ type: bool
+
+ frontchannel_logout:
+ description:
+ - Is frontchannel logout enabled for this client or not.
+ This is 'frontchannelLogout' in the Keycloak REST API.
+ aliases:
+ - frontchannelLogout
+ type: bool
+
+ protocol:
+ description:
+ - Type of client (either C(openid-connect) or C(saml).
+ type: str
+ choices: ['openid-connect', 'saml']
+
+ full_scope_allowed:
+ description:
+ - Is the "Full Scope Allowed" feature set for this client or not.
+ This is 'fullScopeAllowed' in the Keycloak REST API.
+ aliases:
+ - fullScopeAllowed
+ type: bool
+
+ node_re_registration_timeout:
+ description:
+ - Cluster node re-registration timeout for this client.
+ This is 'nodeReRegistrationTimeout' in the Keycloak REST API.
+ type: int
+ aliases:
+ - nodeReRegistrationTimeout
+
+ registered_nodes:
+ description:
+ - dict of registered cluster nodes (with C(nodename) as the key and last registration
+ time as the value).
+ This is 'registeredNodes' in the Keycloak REST API.
+ type: dict
+ aliases:
+ - registeredNodes
+
+ client_template:
+ description:
+ - Client template to use for this client. If it does not exist this field will silently
+ be dropped.
+ This is 'clientTemplate' in the Keycloak REST API.
+ type: str
+ aliases:
+ - clientTemplate
+
+ use_template_config:
+ description:
+ - Whether or not to use configuration from the I(client_template).
+ This is 'useTemplateConfig' in the Keycloak REST API.
+ aliases:
+ - useTemplateConfig
+ type: bool
+
+ use_template_scope:
+ description:
+ - Whether or not to use scope configuration from the I(client_template).
+ This is 'useTemplateScope' in the Keycloak REST API.
+ aliases:
+ - useTemplateScope
+ type: bool
+
+ use_template_mappers:
+ description:
+ - Whether or not to use mapper configuration from the I(client_template).
+ This is 'useTemplateMappers' in the Keycloak REST API.
+ aliases:
+ - useTemplateMappers
+ type: bool
+
+ surrogate_auth_required:
+ description:
+ - Whether or not surrogate auth is required.
+ This is 'surrogateAuthRequired' in the Keycloak REST API.
+ aliases:
+ - surrogateAuthRequired
+ type: bool
+
+ authorization_settings:
+ description:
+ - a data structure defining the authorization settings for this client. For reference,
+ please see the Keycloak API docs at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html#_resourceserverrepresentation).
+ This is 'authorizationSettings' in the Keycloak REST API.
+ type: dict
+ aliases:
+ - authorizationSettings
+
+ protocol_mappers:
+ description:
+ - a list of dicts defining protocol mappers for this client.
+ This is 'protocolMappers' in the Keycloak REST API.
+ aliases:
+ - protocolMappers
+ type: list
+ elements: dict
+ suboptions:
+ consentRequired:
+ description:
+ - Specifies whether a user needs to provide consent to a client for this mapper to be active.
+ type: bool
+
+ consentText:
+ description:
+ - The human-readable name of the consent the user is presented to accept.
+ type: str
+
+ id:
+ description:
+ - Usually a UUID specifying the internal ID of this protocol mapper instance.
+ type: str
+
+ name:
+ description:
+ - The name of this protocol mapper.
+ type: str
+
+ protocol:
+ description:
+ - This is either C(openid-connect) or C(saml), this specifies for which protocol this protocol mapper
+ is active.
+ choices: ['openid-connect', 'saml']
+ type: str
+
+ protocolMapper:
+ description:
+ - The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is
+ impossible to provide since this may be extended through SPIs by the user of Keycloak,
+ by default Keycloak as of 3.4 ships with at least
+ - C(docker-v2-allow-all-mapper)
+ - C(oidc-address-mapper)
+ - C(oidc-full-name-mapper)
+ - C(oidc-group-membership-mapper)
+ - C(oidc-hardcoded-claim-mapper)
+ - C(oidc-hardcoded-role-mapper)
+ - C(oidc-role-name-mapper)
+ - C(oidc-script-based-protocol-mapper)
+ - C(oidc-sha256-pairwise-sub-mapper)
+ - C(oidc-usermodel-attribute-mapper)
+ - C(oidc-usermodel-client-role-mapper)
+ - C(oidc-usermodel-property-mapper)
+ - C(oidc-usermodel-realm-role-mapper)
+ - C(oidc-usersessionmodel-note-mapper)
+ - C(saml-group-membership-mapper)
+ - C(saml-hardcode-attribute-mapper)
+ - C(saml-hardcode-role-mapper)
+ - C(saml-role-list-mapper)
+ - C(saml-role-name-mapper)
+ - C(saml-user-attribute-mapper)
+ - C(saml-user-property-mapper)
+ - C(saml-user-session-note-mapper)
+ - An exhaustive list of available mappers on your installation can be obtained on
+ the admin console by going to Server Info -> Providers and looking under
+ 'protocol-mapper'.
+ type: str
+
+ config:
+ description:
+ - Dict specifying the configuration options for the protocol mapper; the
+ contents differ depending on the value of I(protocolMapper) and are not documented
+ other than by the source of the mappers and its parent class(es). An example is given
+ below. It is easiest to obtain valid config values by dumping an already-existing
+ protocol mapper configuration through check-mode in the I(existing) field.
+ type: dict
+
+ attributes:
+ description:
+ - A dict of further attributes for this client. This can contain various configuration
+ settings; an example is given in the examples section. While an exhaustive list of
+ permissible options is not available; possible options as of Keycloak 3.4 are listed below. The Keycloak
+ API does not validate whether a given option is appropriate for the protocol used; if specified
+ anyway, Keycloak will simply not use it.
+ type: dict
+ suboptions:
+ saml.authnstatement:
+ description:
+ - For SAML clients, boolean specifying whether or not a statement containing method and timestamp
+ should be included in the login response.
+
+ saml.client.signature:
+ description:
+ - For SAML clients, boolean specifying whether a client signature is required and validated.
+
+ saml.encrypt:
+ description:
+ - Boolean specifying whether SAML assertions should be encrypted with the client's public key.
+
+ saml.force.post.binding:
+ description:
+ - For SAML clients, boolean specifying whether always to use POST binding for responses.
+
+ saml.onetimeuse.condition:
+ description:
+ - For SAML clients, boolean specifying whether a OneTimeUse condition should be included in login responses.
+
+ saml.server.signature:
+ description:
+ - Boolean specifying whether SAML documents should be signed by the realm.
+
+ saml.server.signature.keyinfo.ext:
+ description:
+ - For SAML clients, boolean specifying whether REDIRECT signing key lookup should be optimized through inclusion
+ of the signing key id in the SAML Extensions element.
+
+ saml.signature.algorithm:
+ description:
+ - Signature algorithm used to sign SAML documents. One of C(RSA_SHA256), C(RSA_SHA1), C(RSA_SHA512), or C(DSA_SHA1).
+
+ saml.signing.certificate:
+ description:
+ - SAML signing key certificate, base64-encoded.
+
+ saml.signing.private.key:
+ description:
+ - SAML signing key private key, base64-encoded.
+
+ saml_assertion_consumer_url_post:
+ description:
+ - SAML POST Binding URL for the client's assertion consumer service (login responses).
+
+ saml_assertion_consumer_url_redirect:
+ description:
+ - SAML Redirect Binding URL for the client's assertion consumer service (login responses).
+
+
+ saml_force_name_id_format:
+ description:
+ - For SAML clients, Boolean specifying whether to ignore requested NameID subject format and using the configured one instead.
+
+ saml_name_id_format:
+ description:
+ - For SAML clients, the NameID format to use (one of C(username), C(email), C(transient), or C(persistent))
+
+ saml_signature_canonicalization_method:
+ description:
+ - SAML signature canonicalization method. This is one of four values, namely
+ C(http://www.w3.org/2001/10/xml-exc-c14n#) for EXCLUSIVE,
+ C(http://www.w3.org/2001/10/xml-exc-c14n#WithComments) for EXCLUSIVE_WITH_COMMENTS,
+ C(http://www.w3.org/TR/2001/REC-xml-c14n-20010315) for INCLUSIVE, and
+ C(http://www.w3.org/TR/2001/REC-xml-c14n-20010315#WithComments) for INCLUSIVE_WITH_COMMENTS.
+
+ saml_single_logout_service_url_post:
+ description:
+ - SAML POST binding url for the client's single logout service.
+
+ saml_single_logout_service_url_redirect:
+ description:
+ - SAML redirect binding url for the client's single logout service.
+
+ user.info.response.signature.alg:
+ description:
+ - For OpenID-Connect clients, JWA algorithm for signed UserInfo-endpoint responses. One of C(RS256) or C(unsigned).
+
+ request.object.signature.alg:
+ description:
+ - For OpenID-Connect clients, JWA algorithm which the client needs to use when sending
+ OIDC request object. One of C(any), C(none), C(RS256).
+
+ use.jwks.url:
+ description:
+ - For OpenID-Connect clients, boolean specifying whether to use a JWKS URL to obtain client
+ public keys.
+
+ jwks.url:
+ description:
+ - For OpenID-Connect clients, URL where client keys in JWK are stored.
+
+ jwt.credential.certificate:
+ description:
+ - For OpenID-Connect clients, client certificate for validating JWT issued by
+ client and signed by its key, base64-encoded.
+
+extends_documentation_fragment:
+- community.general.keycloak
+
+
+author:
+ - Eike Frost (@eikef)
+'''
+
+EXAMPLES = '''
+- name: Create or update Keycloak client (minimal example)
+ local_action:
+ module: keycloak_client
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ client_id: test
+ state: present
+
+- name: Delete a Keycloak client
+ local_action:
+ module: keycloak_client
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ client_id: test
+ state: absent
+
+- name: Create or update a Keycloak client (with all the bells and whistles)
+ local_action:
+ module: keycloak_client
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ state: present
+ realm: master
+ client_id: test
+ id: d8b127a3-31f6-44c8-a7e4-4ab9a3e78d95
+ name: this_is_a_test
+ description: Description of this wonderful client
+ root_url: https://www.example.com/
+ admin_url: https://www.example.com/admin_url
+ base_url: basepath
+ enabled: True
+ client_authenticator_type: client-secret
+ secret: REALLYWELLKEPTSECRET
+ redirect_uris:
+ - https://www.example.com/*
+ - http://localhost:8888/
+ web_origins:
+ - https://www.example.com/*
+ not_before: 1507825725
+ bearer_only: False
+ consent_required: False
+ standard_flow_enabled: True
+ implicit_flow_enabled: False
+ direct_access_grants_enabled: False
+ service_accounts_enabled: False
+ authorization_services_enabled: False
+ public_client: False
+ frontchannel_logout: False
+ protocol: openid-connect
+ full_scope_allowed: false
+ node_re_registration_timeout: -1
+ client_template: test
+ use_template_config: False
+ use_template_scope: false
+ use_template_mappers: no
+ registered_nodes:
+ node01.example.com: 1507828202
+ registration_access_token: eyJWT_TOKEN
+ surrogate_auth_required: false
+ default_roles:
+ - test01
+ - test02
+ protocol_mappers:
+ - config:
+ access.token.claim: True
+ claim.name: "family_name"
+ id.token.claim: True
+ jsonType.label: String
+ user.attribute: lastName
+ userinfo.token.claim: True
+ consentRequired: True
+ consentText: "${familyName}"
+ name: family name
+ protocol: openid-connect
+ protocolMapper: oidc-usermodel-property-mapper
+ - config:
+ attribute.name: Role
+ attribute.nameformat: Basic
+ single: false
+ consentRequired: false
+ name: role list
+ protocol: saml
+ protocolMapper: saml-role-list-mapper
+ attributes:
+ saml.authnstatement: True
+ saml.client.signature: True
+ saml.force.post.binding: True
+ saml.server.signature: True
+ saml.signature.algorithm: RSA_SHA256
+ saml.signing.certificate: CERTIFICATEHERE
+ saml.signing.private.key: PRIVATEKEYHERE
+ saml_force_name_id_format: False
+ saml_name_id_format: username
+ saml_signature_canonicalization_method: "http://www.w3.org/2001/10/xml-exc-c14n#"
+ user.info.response.signature.alg: RS256
+ request.object.signature.alg: RS256
+ use.jwks.url: true
+ jwks.url: JWKS_URL_FOR_CLIENT_AUTH_JWT
+ jwt.credential.certificate: JWT_CREDENTIAL_CERTIFICATE_FOR_CLIENT_AUTH
+'''
+
+RETURN = '''
+msg:
+ description: Message as to what action was taken
+ returned: always
+ type: str
+ sample: "Client testclient has been updated"
+
+proposed:
+ description: client representation of proposed changes to client
+ returned: always
+ type: dict
+ sample: {
+ clientId: "test"
+ }
+existing:
+ description: client representation of existing client (sample is truncated)
+ returned: always
+ type: dict
+ sample: {
+ "adminUrl": "http://www.example.com/admin_url",
+ "attributes": {
+ "request.object.signature.alg": "RS256",
+ }
+ }
+end_state:
+ description: client representation of client after module execution (sample is truncated)
+ returned: always
+ type: dict
+ sample: {
+ "adminUrl": "http://www.example.com/admin_url",
+ "attributes": {
+ "request.object.signature.alg": "RS256",
+ }
+ }
+'''
+
+from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
+ keycloak_argument_spec, get_token, KeycloakError
+from ansible.module_utils.basic import AnsibleModule
+
+
+def sanitize_cr(clientrep):
+ """ Removes probably sensitive details from a client representation
+
+ :param clientrep: the clientrep dict to be sanitized
+ :return: sanitized clientrep dict
+ """
+ result = clientrep.copy()
+ if 'secret' in result:
+ result['secret'] = 'no_log'
+ if 'attributes' in result:
+ if 'saml.signing.private.key' in result['attributes']:
+ result['attributes']['saml.signing.private.key'] = 'no_log'
+ return result
+
+
+def main():
+ """
+ Module execution
+
+ :return:
+ """
+ argument_spec = keycloak_argument_spec()
+
+ protmapper_spec = dict(
+ consentRequired=dict(type='bool'),
+ consentText=dict(type='str'),
+ id=dict(type='str'),
+ name=dict(type='str'),
+ protocol=dict(type='str', choices=['openid-connect', 'saml']),
+ protocolMapper=dict(type='str'),
+ config=dict(type='dict'),
+ )
+
+ meta_args = dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ realm=dict(type='str', default='master'),
+
+ id=dict(type='str'),
+ client_id=dict(type='str', aliases=['clientId']),
+ name=dict(type='str'),
+ description=dict(type='str'),
+ root_url=dict(type='str', aliases=['rootUrl']),
+ admin_url=dict(type='str', aliases=['adminUrl']),
+ base_url=dict(type='str', aliases=['baseUrl']),
+ surrogate_auth_required=dict(type='bool', aliases=['surrogateAuthRequired']),
+ enabled=dict(type='bool'),
+ client_authenticator_type=dict(type='str', choices=['client-secret', 'client-jwt'], aliases=['clientAuthenticatorType']),
+ secret=dict(type='str', no_log=True),
+ registration_access_token=dict(type='str', aliases=['registrationAccessToken'], no_log=True),
+ default_roles=dict(type='list', aliases=['defaultRoles']),
+ redirect_uris=dict(type='list', aliases=['redirectUris']),
+ web_origins=dict(type='list', aliases=['webOrigins']),
+ not_before=dict(type='int', aliases=['notBefore']),
+ bearer_only=dict(type='bool', aliases=['bearerOnly']),
+ consent_required=dict(type='bool', aliases=['consentRequired']),
+ standard_flow_enabled=dict(type='bool', aliases=['standardFlowEnabled']),
+ implicit_flow_enabled=dict(type='bool', aliases=['implicitFlowEnabled']),
+ direct_access_grants_enabled=dict(type='bool', aliases=['directAccessGrantsEnabled']),
+ service_accounts_enabled=dict(type='bool', aliases=['serviceAccountsEnabled']),
+ authorization_services_enabled=dict(type='bool', aliases=['authorizationServicesEnabled']),
+ public_client=dict(type='bool', aliases=['publicClient']),
+ frontchannel_logout=dict(type='bool', aliases=['frontchannelLogout']),
+ protocol=dict(type='str', choices=['openid-connect', 'saml']),
+ attributes=dict(type='dict'),
+ full_scope_allowed=dict(type='bool', aliases=['fullScopeAllowed']),
+ node_re_registration_timeout=dict(type='int', aliases=['nodeReRegistrationTimeout']),
+ registered_nodes=dict(type='dict', aliases=['registeredNodes']),
+ client_template=dict(type='str', aliases=['clientTemplate']),
+ use_template_config=dict(type='bool', aliases=['useTemplateConfig']),
+ use_template_scope=dict(type='bool', aliases=['useTemplateScope']),
+ use_template_mappers=dict(type='bool', aliases=['useTemplateMappers']),
+ protocol_mappers=dict(type='list', elements='dict', options=protmapper_spec, aliases=['protocolMappers']),
+ authorization_settings=dict(type='dict', aliases=['authorizationSettings']),
+ )
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=([['client_id', 'id']]))
+
+ result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={})
+
+ # Obtain access token, initialize API
+ try:
+ connection_header = get_token(
+ base_url=module.params.get('auth_keycloak_url'),
+ validate_certs=module.params.get('validate_certs'),
+ auth_realm=module.params.get('auth_realm'),
+ client_id=module.params.get('auth_client_id'),
+ auth_username=module.params.get('auth_username'),
+ auth_password=module.params.get('auth_password'),
+ client_secret=module.params.get('auth_client_secret'),
+ )
+ except KeycloakError as e:
+ module.fail_json(msg=str(e))
+
+ kc = KeycloakAPI(module, connection_header)
+
+ realm = module.params.get('realm')
+ cid = module.params.get('id')
+ state = module.params.get('state')
+
+ # convert module parameters to client representation parameters (if they belong in there)
+ client_params = [x for x in module.params
+ if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm'] and
+ module.params.get(x) is not None]
+ keycloak_argument_spec().keys()
+ # See whether the client already exists in Keycloak
+ if cid is None:
+ before_client = kc.get_client_by_clientid(module.params.get('client_id'), realm=realm)
+ if before_client is not None:
+ cid = before_client['id']
+ else:
+ before_client = kc.get_client_by_id(cid, realm=realm)
+
+ if before_client is None:
+ before_client = dict()
+
+ # Build a proposed changeset from parameters given to this module
+ changeset = dict()
+
+ for client_param in client_params:
+ new_param_value = module.params.get(client_param)
+
+ # some lists in the Keycloak API are sorted, some are not.
+ if isinstance(new_param_value, list):
+ if client_param in ['attributes']:
+ try:
+ new_param_value = sorted(new_param_value)
+ except TypeError:
+ pass
+ # Unfortunately, the ansible argument spec checker introduces variables with null values when
+ # they are not specified
+ if client_param == 'protocol_mappers':
+ new_param_value = [dict((k, v) for k, v in x.items() if x[k] is not None) for x in new_param_value]
+
+ changeset[camel(client_param)] = new_param_value
+
+ # Whether creating or updating a client, take the before-state and merge the changeset into it
+ updated_client = before_client.copy()
+ updated_client.update(changeset)
+
+ result['proposed'] = sanitize_cr(changeset)
+ result['existing'] = sanitize_cr(before_client)
+
+ # If the client does not exist yet, before_client is still empty
+ if before_client == dict():
+ if state == 'absent':
+ # do nothing and exit
+ if module._diff:
+ result['diff'] = dict(before='', after='')
+ result['msg'] = 'Client does not exist, doing nothing.'
+ module.exit_json(**result)
+
+ # create new client
+ result['changed'] = True
+ if 'clientId' not in updated_client:
+ module.fail_json(msg='client_id needs to be specified when creating a new client')
+
+ if module._diff:
+ result['diff'] = dict(before='', after=sanitize_cr(updated_client))
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ kc.create_client(updated_client, realm=realm)
+ after_client = kc.get_client_by_clientid(updated_client['clientId'], realm=realm)
+
+ result['end_state'] = sanitize_cr(after_client)
+
+ result['msg'] = 'Client %s has been created.' % updated_client['clientId']
+ module.exit_json(**result)
+ else:
+ if state == 'present':
+ # update existing client
+ result['changed'] = True
+ if module.check_mode:
+ # We can only compare the current client with the proposed updates we have
+ if module._diff:
+ result['diff'] = dict(before=sanitize_cr(before_client),
+ after=sanitize_cr(updated_client))
+ result['changed'] = (before_client != updated_client)
+
+ module.exit_json(**result)
+
+ kc.update_client(cid, updated_client, realm=realm)
+
+ after_client = kc.get_client_by_id(cid, realm=realm)
+ if before_client == after_client:
+ result['changed'] = False
+ if module._diff:
+ result['diff'] = dict(before=sanitize_cr(before_client),
+ after=sanitize_cr(after_client))
+ result['end_state'] = sanitize_cr(after_client)
+
+ result['msg'] = 'Client %s has been updated.' % updated_client['clientId']
+ module.exit_json(**result)
+ else:
+ # Delete existing client
+ result['changed'] = True
+ if module._diff:
+ result['diff']['before'] = sanitize_cr(before_client)
+ result['diff']['after'] = ''
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ kc.delete_client(cid, realm=realm)
+ result['proposed'] = dict()
+ result['end_state'] = dict()
+ result['msg'] = 'Client %s has been deleted.' % before_client['clientId']
+ module.exit_json(**result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_clienttemplate.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_clienttemplate.py
new file mode 100644
index 00000000..d68198d5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_clienttemplate.py
@@ -0,0 +1,431 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017, Eike Frost <ei@kefro.st>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: keycloak_clienttemplate
+
+short_description: Allows administration of Keycloak client templates via Keycloak API
+
+
+description:
+ - This module allows the administration of Keycloak client templates via the Keycloak REST API. It
+ requires access to the REST API via OpenID Connect; the user connecting and the client being
+ used must have the requisite access rights. In a default Keycloak installation, admin-cli
+ and an admin user would work, as would a separate client definition with the scope tailored
+ to your needs and a user having the expected roles.
+
+ - The names of module options are snake_cased versions of the camelCase ones found in the
+ Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html)
+
+ - The Keycloak API does not always enforce for only sensible settings to be used -- you can set
+ SAML-specific settings on an OpenID Connect client for instance and vice versa. Be careful.
+ If you do not specify a setting, usually a sensible default is chosen.
+
+options:
+ state:
+ description:
+ - State of the client template
+ - On C(present), the client template will be created (or updated if it exists already).
+ - On C(absent), the client template will be removed if it exists
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ id:
+ description:
+ - Id of client template to be worked on. This is usually a UUID.
+ type: str
+
+ realm:
+ description:
+ - Realm this client template is found in.
+ type: str
+ default: master
+
+ name:
+ description:
+ - Name of the client template
+ type: str
+
+ description:
+ description:
+ - Description of the client template in Keycloak
+ type: str
+
+ protocol:
+ description:
+ - Type of client template (either C(openid-connect) or C(saml).
+ choices: ['openid-connect', 'saml']
+ type: str
+
+ full_scope_allowed:
+ description:
+ - Is the "Full Scope Allowed" feature set for this client template or not.
+ This is 'fullScopeAllowed' in the Keycloak REST API.
+ type: bool
+
+ protocol_mappers:
+ description:
+ - a list of dicts defining protocol mappers for this client template.
+ This is 'protocolMappers' in the Keycloak REST API.
+ type: list
+ elements: dict
+ suboptions:
+ consentRequired:
+ description:
+ - Specifies whether a user needs to provide consent to a client for this mapper to be active.
+ type: bool
+
+ consentText:
+ description:
+ - The human-readable name of the consent the user is presented to accept.
+ type: str
+
+ id:
+ description:
+ - Usually a UUID specifying the internal ID of this protocol mapper instance.
+ type: str
+
+ name:
+ description:
+ - The name of this protocol mapper.
+ type: str
+
+ protocol:
+ description:
+ - is either 'openid-connect' or 'saml', this specifies for which protocol this protocol mapper
+ is active.
+ choices: ['openid-connect', 'saml']
+ type: str
+
+ protocolMapper:
+ description:
+ - The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is
+ impossible to provide since this may be extended through SPIs by the user of Keycloak,
+ by default Keycloak as of 3.4 ships with at least
+ - C(docker-v2-allow-all-mapper)
+ - C(oidc-address-mapper)
+ - C(oidc-full-name-mapper)
+ - C(oidc-group-membership-mapper)
+ - C(oidc-hardcoded-claim-mapper)
+ - C(oidc-hardcoded-role-mapper)
+ - C(oidc-role-name-mapper)
+ - C(oidc-script-based-protocol-mapper)
+ - C(oidc-sha256-pairwise-sub-mapper)
+ - C(oidc-usermodel-attribute-mapper)
+ - C(oidc-usermodel-client-role-mapper)
+ - C(oidc-usermodel-property-mapper)
+ - C(oidc-usermodel-realm-role-mapper)
+ - C(oidc-usersessionmodel-note-mapper)
+ - C(saml-group-membership-mapper)
+ - C(saml-hardcode-attribute-mapper)
+ - C(saml-hardcode-role-mapper)
+ - C(saml-role-list-mapper)
+ - C(saml-role-name-mapper)
+ - C(saml-user-attribute-mapper)
+ - C(saml-user-property-mapper)
+ - C(saml-user-session-note-mapper)
+ - An exhaustive list of available mappers on your installation can be obtained on
+ the admin console by going to Server Info -> Providers and looking under
+ 'protocol-mapper'.
+ type: str
+
+ config:
+ description:
+ - Dict specifying the configuration options for the protocol mapper; the
+ contents differ depending on the value of I(protocolMapper) and are not documented
+ other than by the source of the mappers and its parent class(es). An example is given
+ below. It is easiest to obtain valid config values by dumping an already-existing
+ protocol mapper configuration through check-mode in the "existing" field.
+ type: dict
+
+ attributes:
+ description:
+ - A dict of further attributes for this client template. This can contain various
+ configuration settings, though in the default installation of Keycloak as of 3.4, none
+ are documented or known, so this is usually empty.
+ type: dict
+
+notes:
+- The Keycloak REST API defines further fields (namely I(bearerOnly), I(consentRequired), I(standardFlowEnabled),
+ I(implicitFlowEnabled), I(directAccessGrantsEnabled), I(serviceAccountsEnabled), I(publicClient), and
+ I(frontchannelLogout)) which, while available with keycloak_client, do not have any effect on
+ Keycloak client-templates and are discarded if supplied with an API request changing client-templates. As such,
+ they are not available through this module.
+
+extends_documentation_fragment:
+- community.general.keycloak
+
+
+author:
+ - Eike Frost (@eikef)
+'''
+
+EXAMPLES = '''
+- name: Create or update Keycloak client template (minimal)
+ local_action:
+ module: keycloak_clienttemplate
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ realm: master
+ name: this_is_a_test
+
+- name: Delete Keycloak client template
+ local_action:
+ module: keycloak_clienttemplate
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ realm: master
+ state: absent
+ name: test01
+
+- name: Create or update Keycloak client template (with a protocol mapper)
+ local_action:
+ module: keycloak_clienttemplate
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ realm: master
+ name: this_is_a_test
+ protocol_mappers:
+ - config:
+ access.token.claim: True
+ claim.name: "family_name"
+ id.token.claim: True
+ jsonType.label: String
+ user.attribute: lastName
+ userinfo.token.claim: True
+ consentRequired: True
+ consentText: "${familyName}"
+ name: family name
+ protocol: openid-connect
+ protocolMapper: oidc-usermodel-property-mapper
+ full_scope_allowed: false
+ id: bce6f5e9-d7d3-4955-817e-c5b7f8d65b3f
+'''
+
+RETURN = '''
+msg:
+ description: Message as to what action was taken
+ returned: always
+ type: str
+ sample: "Client template testclient has been updated"
+
+proposed:
+ description: client template representation of proposed changes to client template
+ returned: always
+ type: dict
+ sample: {
+ name: "test01"
+ }
+existing:
+ description: client template representation of existing client template (sample is truncated)
+ returned: always
+ type: dict
+ sample: {
+ "description": "test01",
+ "fullScopeAllowed": false,
+ "id": "9c3712ab-decd-481e-954f-76da7b006e5f",
+ "name": "test01",
+ "protocol": "saml"
+ }
+end_state:
+ description: client template representation of client template after module execution (sample is truncated)
+ returned: always
+ type: dict
+ sample: {
+ "description": "test01",
+ "fullScopeAllowed": false,
+ "id": "9c3712ab-decd-481e-954f-76da7b006e5f",
+ "name": "test01",
+ "protocol": "saml"
+ }
+'''
+
+from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
+ keycloak_argument_spec, get_token, KeycloakError
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ """
+ Module execution
+
+ :return:
+ """
+ argument_spec = keycloak_argument_spec()
+
+ protmapper_spec = dict(
+ consentRequired=dict(type='bool'),
+ consentText=dict(type='str'),
+ id=dict(type='str'),
+ name=dict(type='str'),
+ protocol=dict(type='str', choices=['openid-connect', 'saml']),
+ protocolMapper=dict(type='str'),
+ config=dict(type='dict'),
+ )
+
+ meta_args = dict(
+ realm=dict(type='str', default='master'),
+ state=dict(default='present', choices=['present', 'absent']),
+
+ id=dict(type='str'),
+ name=dict(type='str'),
+ description=dict(type='str'),
+ protocol=dict(type='str', choices=['openid-connect', 'saml']),
+ attributes=dict(type='dict'),
+ full_scope_allowed=dict(type='bool'),
+ protocol_mappers=dict(type='list', elements='dict', options=protmapper_spec),
+ )
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=([['id', 'name']]))
+
+ result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={})
+
+ # Obtain access token, initialize API
+ try:
+ connection_header = get_token(
+ base_url=module.params.get('auth_keycloak_url'),
+ validate_certs=module.params.get('validate_certs'),
+ auth_realm=module.params.get('auth_realm'),
+ client_id=module.params.get('auth_client_id'),
+ auth_username=module.params.get('auth_username'),
+ auth_password=module.params.get('auth_password'),
+ client_secret=module.params.get('auth_client_secret'),
+ )
+ except KeycloakError as e:
+ module.fail_json(msg=str(e))
+ kc = KeycloakAPI(module, connection_header)
+
+ realm = module.params.get('realm')
+ state = module.params.get('state')
+ cid = module.params.get('id')
+
+ # convert module parameters to client representation parameters (if they belong in there)
+ clientt_params = [x for x in module.params
+ if x not in ['state', 'auth_keycloak_url', 'auth_client_id', 'auth_realm',
+ 'auth_client_secret', 'auth_username', 'auth_password',
+ 'validate_certs', 'realm'] and module.params.get(x) is not None]
+
+ # See whether the client template already exists in Keycloak
+ if cid is None:
+ before_clientt = kc.get_client_template_by_name(module.params.get('name'), realm=realm)
+ if before_clientt is not None:
+ cid = before_clientt['id']
+ else:
+ before_clientt = kc.get_client_template_by_id(cid, realm=realm)
+
+ if before_clientt is None:
+ before_clientt = dict()
+
+ result['existing'] = before_clientt
+
+ # Build a proposed changeset from parameters given to this module
+ changeset = dict()
+
+ for clientt_param in clientt_params:
+ # lists in the Keycloak API are sorted
+ new_param_value = module.params.get(clientt_param)
+ if isinstance(new_param_value, list):
+ try:
+ new_param_value = sorted(new_param_value)
+ except TypeError:
+ pass
+ changeset[camel(clientt_param)] = new_param_value
+
+ # Whether creating or updating a client, take the before-state and merge the changeset into it
+ updated_clientt = before_clientt.copy()
+ updated_clientt.update(changeset)
+
+ result['proposed'] = changeset
+
+ # If the client template does not exist yet, before_client is still empty
+ if before_clientt == dict():
+ if state == 'absent':
+ # do nothing and exit
+ if module._diff:
+ result['diff'] = dict(before='', after='')
+ result['msg'] = 'Client template does not exist, doing nothing.'
+ module.exit_json(**result)
+
+ # create new client template
+ result['changed'] = True
+ if 'name' not in updated_clientt:
+ module.fail_json(msg='name needs to be specified when creating a new client')
+
+ if module._diff:
+ result['diff'] = dict(before='', after=updated_clientt)
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ kc.create_client_template(updated_clientt, realm=realm)
+ after_clientt = kc.get_client_template_by_name(updated_clientt['name'], realm=realm)
+
+ result['end_state'] = after_clientt
+
+ result['msg'] = 'Client template %s has been created.' % updated_clientt['name']
+ module.exit_json(**result)
+ else:
+ if state == 'present':
+ # update existing client template
+ result['changed'] = True
+ if module.check_mode:
+ # We can only compare the current client template with the proposed updates we have
+ if module._diff:
+ result['diff'] = dict(before=before_clientt,
+ after=updated_clientt)
+
+ module.exit_json(**result)
+
+ kc.update_client_template(cid, updated_clientt, realm=realm)
+
+ after_clientt = kc.get_client_template_by_id(cid, realm=realm)
+ if before_clientt == after_clientt:
+ result['changed'] = False
+ if module._diff:
+ result['diff'] = dict(before=before_clientt,
+ after=after_clientt)
+ result['end_state'] = after_clientt
+
+ result['msg'] = 'Client template %s has been updated.' % updated_clientt['name']
+ module.exit_json(**result)
+ else:
+ # Delete existing client
+ result['changed'] = True
+ if module._diff:
+ result['diff']['before'] = before_clientt
+ result['diff']['after'] = ''
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ kc.delete_client_template(cid, realm=realm)
+ result['proposed'] = dict()
+ result['end_state'] = dict()
+ result['msg'] = 'Client template %s has been deleted.' % before_clientt['name']
+ module.exit_json(**result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_group.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_group.py
new file mode 100644
index 00000000..45b5c290
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/keycloak/keycloak_group.py
@@ -0,0 +1,364 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019, Adam Goossens <adam.goossens@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: keycloak_group
+
+short_description: Allows administration of Keycloak groups via Keycloak API
+
+description:
+ - This module allows you to add, remove or modify Keycloak groups via the Keycloak REST API.
+ It requires access to the REST API via OpenID Connect; the user connecting and the client being
+ used must have the requisite access rights. In a default Keycloak installation, admin-cli
+ and an admin user would work, as would a separate client definition with the scope tailored
+ to your needs and a user having the expected roles.
+
+ - The names of module options are snake_cased versions of the camelCase ones found in the
+ Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html).
+
+ - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will
+ be returned that way by this module. You may pass single values for attributes when calling the module,
+ and this will be translated into a list suitable for the API.
+
+ - When updating a group, where possible provide the group ID to the module. This removes a lookup
+ to the API to translate the name into the group ID.
+
+
+options:
+ state:
+ description:
+ - State of the group.
+ - On C(present), the group will be created if it does not yet exist, or updated with the parameters you provide.
+ - On C(absent), the group will be removed if it exists.
+ default: 'present'
+ type: str
+ choices:
+ - present
+ - absent
+
+ name:
+ type: str
+ description:
+ - Name of the group.
+ - This parameter is required only when creating or updating the group.
+
+ realm:
+ type: str
+ description:
+ - They Keycloak realm under which this group resides.
+ default: 'master'
+
+ id:
+ type: str
+ description:
+ - The unique identifier for this group.
+ - This parameter is not required for updating or deleting a group but
+ providing it will reduce the number of API calls required.
+
+ attributes:
+ type: dict
+ description:
+ - A dict of key/value pairs to set as custom attributes for the group.
+ - Values may be single values (e.g. a string) or a list of strings.
+
+notes:
+ - Presently, the I(realmRoles), I(clientRoles) and I(access) attributes returned by the Keycloak API
+ are read-only for groups. This limitation will be removed in a later version of this module.
+
+extends_documentation_fragment:
+- community.general.keycloak
+
+
+author:
+ - Adam Goossens (@adamgoossens)
+'''
+
+EXAMPLES = '''
+- name: Create a Keycloak group
+ community.general.keycloak_group:
+ name: my-new-kc-group
+ realm: MyCustomRealm
+ state: present
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ delegate_to: localhost
+
+- name: Delete a keycloak group
+ community.general.keycloak_group:
+ id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd'
+ state: absent
+ realm: MyCustomRealm
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ delegate_to: localhost
+
+- name: Delete a Keycloak group based on name
+ community.general.keycloak_group:
+ name: my-group-for-deletion
+ state: absent
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ delegate_to: localhost
+
+- name: Update the name of a Keycloak group
+ community.general.keycloak_group:
+ id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd'
+ name: an-updated-kc-group-name
+ state: present
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ delegate_to: localhost
+
+- name: Create a keycloak group with some custom attributes
+ community.general.keycloak_group:
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ name: my-new_group
+ attributes:
+ attrib1: value1
+ attrib2: value2
+ attrib3:
+ - with
+ - numerous
+ - individual
+ - list
+ - items
+ delegate_to: localhost
+'''
+
+RETURN = '''
+group:
+ description: Group representation of the group after module execution (sample is truncated).
+ returned: always
+ type: complex
+ contains:
+ id:
+ description: GUID that identifies the group
+ type: str
+ returned: always
+ sample: 23f38145-3195-462c-97e7-97041ccea73e
+ name:
+ description: Name of the group
+ type: str
+ returned: always
+ sample: grp-test-123
+ attributes:
+ description: Attributes applied to this group
+ type: dict
+ returned: always
+ sample:
+ attr1: ["val1", "val2", "val3"]
+ path:
+ description: URI path to the group
+ type: str
+ returned: always
+ sample: /grp-test-123
+ realmRoles:
+ description: An array of the realm-level roles granted to this group
+ type: list
+ returned: always
+ sample: []
+ subGroups:
+ description: A list of groups that are children of this group. These groups will have the same parameters as
+ documented here.
+ type: list
+ returned: always
+ clientRoles:
+ description: A list of client-level roles granted to this group
+ type: list
+ returned: always
+ sample: []
+ access:
+ description: A dict describing the accesses you have to this group based on the credentials used.
+ type: dict
+ returned: always
+ sample:
+ manage: true
+ manageMembership: true
+ view: true
+'''
+
+from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
+ keycloak_argument_spec, get_token, KeycloakError
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ """
+ Module execution
+
+ :return:
+ """
+ argument_spec = keycloak_argument_spec()
+ meta_args = dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ realm=dict(default='master'),
+ id=dict(type='str'),
+ name=dict(type='str'),
+ attributes=dict(type='dict')
+ )
+
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=([['id', 'name']]))
+
+ result = dict(changed=False, msg='', diff={}, group='')
+
+ # Obtain access token, initialize API
+ try:
+ connection_header = get_token(
+ base_url=module.params.get('auth_keycloak_url'),
+ validate_certs=module.params.get('validate_certs'),
+ auth_realm=module.params.get('auth_realm'),
+ client_id=module.params.get('auth_client_id'),
+ auth_username=module.params.get('auth_username'),
+ auth_password=module.params.get('auth_password'),
+ client_secret=module.params.get('auth_client_secret'),
+ )
+ except KeycloakError as e:
+ module.fail_json(msg=str(e))
+ kc = KeycloakAPI(module, connection_header)
+
+ realm = module.params.get('realm')
+ state = module.params.get('state')
+ gid = module.params.get('id')
+ name = module.params.get('name')
+ attributes = module.params.get('attributes')
+
+ before_group = None # current state of the group, for merging.
+
+ # does the group already exist?
+ if gid is None:
+ before_group = kc.get_group_by_name(name, realm=realm)
+ else:
+ before_group = kc.get_group_by_groupid(gid, realm=realm)
+
+ before_group = {} if before_group is None else before_group
+
+ # attributes in Keycloak have their values returned as lists
+ # via the API. attributes is a dict, so we'll transparently convert
+ # the values to lists.
+ if attributes is not None:
+ for key, val in module.params['attributes'].items():
+ module.params['attributes'][key] = [val] if not isinstance(val, list) else val
+
+ group_params = [x for x in module.params
+ if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm'] and
+ module.params.get(x) is not None]
+
+ # build a changeset
+ changeset = {}
+ for param in group_params:
+ new_param_value = module.params.get(param)
+ old_value = before_group[param] if param in before_group else None
+ if new_param_value != old_value:
+ changeset[camel(param)] = new_param_value
+
+ # prepare the new group
+ updated_group = before_group.copy()
+ updated_group.update(changeset)
+
+ # if before_group is none, the group doesn't exist.
+ if before_group == {}:
+ if state == 'absent':
+ # nothing to do.
+ if module._diff:
+ result['diff'] = dict(before='', after='')
+ result['msg'] = 'Group does not exist; doing nothing.'
+ result['group'] = dict()
+ module.exit_json(**result)
+
+ # for 'present', create a new group.
+ result['changed'] = True
+ if name is None:
+ module.fail_json(msg='name must be specified when creating a new group')
+
+ if module._diff:
+ result['diff'] = dict(before='', after=updated_group)
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ # do it for real!
+ kc.create_group(updated_group, realm=realm)
+ after_group = kc.get_group_by_name(name, realm)
+
+ result['group'] = after_group
+ result['msg'] = 'Group {name} has been created with ID {id}'.format(name=after_group['name'],
+ id=after_group['id'])
+
+ else:
+ if state == 'present':
+ # no changes
+ if updated_group == before_group:
+ result['changed'] = False
+ result['group'] = updated_group
+ result['msg'] = "No changes required to group {name}.".format(name=before_group['name'])
+ module.exit_json(**result)
+
+ # update the existing group
+ result['changed'] = True
+
+ if module._diff:
+ result['diff'] = dict(before=before_group, after=updated_group)
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ # do the update
+ kc.update_group(updated_group, realm=realm)
+
+ after_group = kc.get_group_by_groupid(updated_group['id'], realm=realm)
+
+ result['group'] = after_group
+ result['msg'] = "Group {id} has been updated".format(id=after_group['id'])
+
+ module.exit_json(**result)
+
+ elif state == 'absent':
+ result['group'] = dict()
+
+ if module._diff:
+ result['diff'] = dict(before=before_group, after='')
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ # delete for real
+ gid = before_group['id']
+ kc.delete_group(groupid=gid, realm=realm)
+
+ result['changed'] = True
+ result['msg'] = "Group {name} has been deleted".format(name=before_group['name'])
+
+ module.exit_json(**result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/onepassword_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/onepassword_facts.py
new file mode 100644
index 00000000..6a5c3d92
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/onepassword_facts.py
@@ -0,0 +1,392 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Ryan Conway (@rylon)
+# (c) 2018, Scott Buchanan <sbuchanan@ri.pn> (onepassword.py used as starting point)
+# (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: onepassword_info
+author:
+ - Ryan Conway (@Rylon)
+requirements:
+ - C(op) 1Password command line utility. See U(https://support.1password.com/command-line/)
+notes:
+ - Tested with C(op) version 0.5.5
+ - "Based on the C(onepassword) lookup plugin by Scott Buchanan <sbuchanan@ri.pn>."
+ - When this module is called with the deprecated C(onepassword_facts) name, potentially sensitive data
+ from 1Password is returned as Ansible facts. Facts are subject to caching if enabled, which means this
+ data could be stored in clear text on disk or in a database.
+short_description: Gather items from 1Password
+description:
+ - M(community.general.onepassword_info) wraps the C(op) command line utility to fetch data about one or more 1Password items.
+ - A fatal error occurs if any of the items being searched for can not be found.
+ - Recommend using with the C(no_log) option to avoid logging the values of the secrets being retrieved.
+ - This module was called C(onepassword_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.onepassword_info) module no longer returns C(ansible_facts)!
+ You must now use the C(register) option to use the facts in other tasks.
+options:
+ search_terms:
+ type: list
+ description:
+ - A list of one or more search terms.
+ - Each search term can either be a simple string or it can be a dictionary for more control.
+ - When passing a simple string, I(field) is assumed to be C(password).
+ - When passing a dictionary, the following fields are available.
+ suboptions:
+ name:
+ type: str
+ description:
+ - The name of the 1Password item to search for (required).
+ field:
+ type: str
+ description:
+ - The name of the field to search for within this item (optional, defaults to "password" (or "document" if the item has an attachment).
+ section:
+ type: str
+ description:
+ - The name of a section within this item containing the specified field (optional, will search all sections if not specified).
+ vault:
+ type: str
+ description:
+ - The name of the particular 1Password vault to search, useful if your 1Password user has access to multiple vaults (optional).
+ required: True
+ auto_login:
+ type: dict
+ description:
+ - A dictionary containing authentication details. If this is set, M(community.general.onepassword_info)
+ will attempt to sign in to 1Password automatically.
+ - Without this option, you must have already logged in via the 1Password CLI before running Ansible.
+ - It is B(highly) recommended to store 1Password credentials in an Ansible Vault. Ensure that the key used to encrypt
+ the Ansible Vault is equal to or greater in strength than the 1Password master password.
+ suboptions:
+ subdomain:
+ type: str
+ description:
+ - 1Password subdomain name (<subdomain>.1password.com).
+ - If this is not specified, the most recent subdomain will be used.
+ username:
+ type: str
+ description:
+ - 1Password username.
+ - Only required for initial sign in.
+ master_password:
+ type: str
+ description:
+ - The master password for your subdomain.
+ - This is always required when specifying C(auto_login).
+ required: True
+ secret_key:
+ type: str
+ description:
+ - The secret key for your subdomain.
+ - Only required for initial sign in.
+ default: {}
+ required: False
+ cli_path:
+ type: path
+ description: Used to specify the exact path to the C(op) command line interface
+ required: False
+ default: 'op'
+'''
+
+EXAMPLES = '''
+# Gather secrets from 1Password, assuming there is a 'password' field:
+- name: Get a password
+ community.general.onepassword_info:
+ search_terms: My 1Password item
+ delegate_to: localhost
+ register: my_1password_item
+ no_log: true # Don't want to log the secrets to the console!
+
+# Gather secrets from 1Password, with more advanced search terms:
+- name: Get a password
+ community.general.onepassword_info:
+ search_terms:
+ - name: My 1Password item
+ field: Custom field name # optional, defaults to 'password'
+ section: Custom section name # optional, defaults to 'None'
+ vault: Name of the vault # optional, only necessary if there is more than 1 Vault available
+ delegate_to: localhost
+ register: my_1password_item
+ no_log: True # Don't want to log the secrets to the console!
+
+# Gather secrets combining simple and advanced search terms to retrieve two items, one of which we fetch two
+# fields. In the first 'password' is fetched, as a field name is not specified (default behaviour) and in the
+# second, 'Custom field name' is fetched, as that is specified explicitly.
+- name: Get a password
+ community.general.onepassword_info:
+ search_terms:
+ - My 1Password item # 'name' is optional when passing a simple string...
+ - name: My Other 1Password item # ...but it can also be set for consistency
+ - name: My 1Password item
+ field: Custom field name # optional, defaults to 'password'
+ section: Custom section name # optional, defaults to 'None'
+ vault: Name of the vault # optional, only necessary if there is more than 1 Vault available
+ - name: A 1Password item with document attachment
+ delegate_to: localhost
+ register: my_1password_item
+ no_log: true # Don't want to log the secrets to the console!
+
+- name: Debug a password (for example)
+ ansible.builtin.debug:
+ msg: "{{ my_1password_item['onepassword']['My 1Password item'] }}"
+'''
+
+RETURN = '''
+---
+# One or more dictionaries for each matching item from 1Password, along with the appropriate fields.
+# This shows the response you would expect to receive from the third example documented above.
+onepassword:
+ description: Dictionary of each 1password item matching the given search terms, shows what would be returned from the third example above.
+ returned: success
+ type: dict
+ sample:
+ "My 1Password item":
+ password: the value of this field
+ Custom field name: the value of this field
+ "My Other 1Password item":
+ password: the value of this field
+ "A 1Password item with document attachment":
+ document: the contents of the document attached to this item
+'''
+
+
+import errno
+import json
+import os
+import re
+
+from subprocess import Popen, PIPE
+
+from ansible.module_utils._text import to_bytes, to_native
+from ansible.module_utils.basic import AnsibleModule
+
+
+class AnsibleModuleError(Exception):
+ def __init__(self, results):
+ self.results = results
+
+ def __repr__(self):
+ return self.results
+
+
+class OnePasswordInfo(object):
+
+ def __init__(self):
+ self.cli_path = module.params.get('cli_path')
+ self.config_file_path = '~/.op/config'
+ self.auto_login = module.params.get('auto_login')
+ self.logged_in = False
+ self.token = None
+
+ terms = module.params.get('search_terms')
+ self.terms = self.parse_search_terms(terms)
+
+ def _run(self, args, expected_rc=0, command_input=None, ignore_errors=False):
+ if self.token:
+ # Adds the session token to all commands if we're logged in.
+ args += [to_bytes('--session=') + self.token]
+
+ command = [self.cli_path] + args
+ p = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
+ out, err = p.communicate(input=command_input)
+ rc = p.wait()
+ if not ignore_errors and rc != expected_rc:
+ raise AnsibleModuleError(to_native(err))
+ return rc, out, err
+
+ def _parse_field(self, data_json, item_id, field_name, section_title=None):
+ data = json.loads(data_json)
+
+ if ('documentAttributes' in data['details']):
+ # This is actually a document, let's fetch the document data instead!
+ document = self._run(["get", "document", data['overview']['title']])
+ return {'document': document[1].strip()}
+
+ else:
+ # This is not a document, let's try to find the requested field
+
+ # Some types of 1Password items have a 'password' field directly alongside the 'fields' attribute,
+ # not inside it, so we need to check there first.
+ if (field_name in data['details']):
+ return {field_name: data['details'][field_name]}
+
+ # Otherwise we continue looking inside the 'fields' attribute for the specified field.
+ else:
+ if section_title is None:
+ for field_data in data['details'].get('fields', []):
+ if field_data.get('name', '').lower() == field_name.lower():
+ return {field_name: field_data.get('value', '')}
+
+ # Not found it yet, so now lets see if there are any sections defined
+ # and search through those for the field. If a section was given, we skip
+ # any non-matching sections, otherwise we search them all until we find the field.
+ for section_data in data['details'].get('sections', []):
+ if section_title is not None and section_title.lower() != section_data['title'].lower():
+ continue
+ for field_data in section_data.get('fields', []):
+ if field_data.get('t', '').lower() == field_name.lower():
+ return {field_name: field_data.get('v', '')}
+
+ # We will get here if the field could not be found in any section and the item wasn't a document to be downloaded.
+ optional_section_title = '' if section_title is None else " in the section '%s'" % section_title
+ module.fail_json(msg="Unable to find an item in 1Password named '%s' with the field '%s'%s." % (item_id, field_name, optional_section_title))
+
+ def parse_search_terms(self, terms):
+ processed_terms = []
+
+ for term in terms:
+ if not isinstance(term, dict):
+ term = {'name': term}
+
+ if 'name' not in term:
+ module.fail_json(msg="Missing required 'name' field from search term, got: '%s'" % to_native(term))
+
+ term['field'] = term.get('field', 'password')
+ term['section'] = term.get('section', None)
+ term['vault'] = term.get('vault', None)
+
+ processed_terms.append(term)
+
+ return processed_terms
+
+ def get_raw(self, item_id, vault=None):
+ try:
+ args = ["get", "item", item_id]
+ if vault is not None:
+ args += ['--vault={0}'.format(vault)]
+ rc, output, dummy = self._run(args)
+ return output
+
+ except Exception as e:
+ if re.search(".*not found.*", to_native(e)):
+ module.fail_json(msg="Unable to find an item in 1Password named '%s'." % item_id)
+ else:
+ module.fail_json(msg="Unexpected error attempting to find an item in 1Password named '%s': %s" % (item_id, to_native(e)))
+
+ def get_field(self, item_id, field, section=None, vault=None):
+ output = self.get_raw(item_id, vault)
+ return self._parse_field(output, item_id, field, section) if output != '' else ''
+
+ def full_login(self):
+ if self.auto_login is not None:
+ if None in [self.auto_login.get('subdomain'), self.auto_login.get('username'),
+ self.auto_login.get('secret_key'), self.auto_login.get('master_password')]:
+ module.fail_json(msg='Unable to perform initial sign in to 1Password. '
+ 'subdomain, username, secret_key, and master_password are required to perform initial sign in.')
+
+ args = [
+ 'signin',
+ '{0}.1password.com'.format(self.auto_login['subdomain']),
+ to_bytes(self.auto_login['username']),
+ to_bytes(self.auto_login['secret_key']),
+ '--output=raw',
+ ]
+
+ try:
+ rc, out, err = self._run(args, command_input=to_bytes(self.auto_login['master_password']))
+ self.token = out.strip()
+ except AnsibleModuleError as e:
+ module.fail_json(msg="Failed to perform initial sign in to 1Password: %s" % to_native(e))
+ else:
+ module.fail_json(msg="Unable to perform an initial sign in to 1Password. Please run '%s sigin' "
+ "or define credentials in 'auto_login'. See the module documentation for details." % self.cli_path)
+
+ def get_token(self):
+ # If the config file exists, assume an initial signin has taken place and try basic sign in
+ if os.path.isfile(self.config_file_path):
+
+ if self.auto_login is not None:
+
+ # Since we are not currently signed in, master_password is required at a minimum
+ if not self.auto_login.get('master_password'):
+ module.fail_json(msg="Unable to sign in to 1Password. 'auto_login.master_password' is required.")
+
+ # Try signing in using the master_password and a subdomain if one is provided
+ try:
+ args = ['signin', '--output=raw']
+
+ if self.auto_login.get('subdomain'):
+ args = ['signin', self.auto_login['subdomain'], '--output=raw']
+
+ rc, out, err = self._run(args, command_input=to_bytes(self.auto_login['master_password']))
+ self.token = out.strip()
+
+ except AnsibleModuleError:
+ self.full_login()
+
+ else:
+ self.full_login()
+
+ else:
+ # Attempt a full sign in since there appears to be no existing sign in
+ self.full_login()
+
+ def assert_logged_in(self):
+ try:
+ rc, out, err = self._run(['get', 'account'], ignore_errors=True)
+ if rc == 0:
+ self.logged_in = True
+ if not self.logged_in:
+ self.get_token()
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ module.fail_json(msg="1Password CLI tool '%s' not installed in path on control machine" % self.cli_path)
+ raise e
+
+ def run(self):
+ result = {}
+
+ self.assert_logged_in()
+
+ for term in self.terms:
+ value = self.get_field(term['name'], term['field'], term['section'], term['vault'])
+
+ if term['name'] in result:
+ # If we already have a result for this key, we have to append this result dictionary
+ # to the existing one. This is only applicable when there is a single item
+ # in 1Password which has two different fields, and we want to retrieve both of them.
+ result[term['name']].update(value)
+ else:
+ # If this is the first result for this key, simply set it.
+ result[term['name']] = value
+
+ return result
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ cli_path=dict(type='path', default='op'),
+ auto_login=dict(type='dict', options=dict(
+ subdomain=dict(type='str'),
+ username=dict(type='str'),
+ master_password=dict(required=True, type='str', no_log=True),
+ secret_key=dict(type='str', no_log=True),
+ ), default=None),
+ search_terms=dict(required=True, type='list')
+ ),
+ supports_check_mode=True
+ )
+
+ results = {'onepassword': OnePasswordInfo().run()}
+
+ if module._name in ('onepassword_facts', 'community.general.onepassword_facts'):
+ module.deprecate("The 'onepassword_facts' module has been renamed to 'onepassword_info'. "
+ "When called with the new name it no longer returns 'ansible_facts'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+ module.exit_json(changed=False, ansible_facts=results)
+ else:
+ module.exit_json(changed=False, **results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/onepassword_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/onepassword_info.py
new file mode 100644
index 00000000..6a5c3d92
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/onepassword_info.py
@@ -0,0 +1,392 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Ryan Conway (@rylon)
+# (c) 2018, Scott Buchanan <sbuchanan@ri.pn> (onepassword.py used as starting point)
+# (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: onepassword_info
+author:
+ - Ryan Conway (@Rylon)
+requirements:
+ - C(op) 1Password command line utility. See U(https://support.1password.com/command-line/)
+notes:
+ - Tested with C(op) version 0.5.5
+ - "Based on the C(onepassword) lookup plugin by Scott Buchanan <sbuchanan@ri.pn>."
+ - When this module is called with the deprecated C(onepassword_facts) name, potentially sensitive data
+ from 1Password is returned as Ansible facts. Facts are subject to caching if enabled, which means this
+ data could be stored in clear text on disk or in a database.
+short_description: Gather items from 1Password
+description:
+ - M(community.general.onepassword_info) wraps the C(op) command line utility to fetch data about one or more 1Password items.
+ - A fatal error occurs if any of the items being searched for can not be found.
+ - Recommend using with the C(no_log) option to avoid logging the values of the secrets being retrieved.
+ - This module was called C(onepassword_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.onepassword_info) module no longer returns C(ansible_facts)!
+ You must now use the C(register) option to use the facts in other tasks.
+options:
+ search_terms:
+ type: list
+ description:
+ - A list of one or more search terms.
+ - Each search term can either be a simple string or it can be a dictionary for more control.
+ - When passing a simple string, I(field) is assumed to be C(password).
+ - When passing a dictionary, the following fields are available.
+ suboptions:
+ name:
+ type: str
+ description:
+ - The name of the 1Password item to search for (required).
+ field:
+ type: str
+ description:
+ - The name of the field to search for within this item (optional, defaults to "password" (or "document" if the item has an attachment).
+ section:
+ type: str
+ description:
+ - The name of a section within this item containing the specified field (optional, will search all sections if not specified).
+ vault:
+ type: str
+ description:
+ - The name of the particular 1Password vault to search, useful if your 1Password user has access to multiple vaults (optional).
+ required: True
+ auto_login:
+ type: dict
+ description:
+ - A dictionary containing authentication details. If this is set, M(community.general.onepassword_info)
+ will attempt to sign in to 1Password automatically.
+ - Without this option, you must have already logged in via the 1Password CLI before running Ansible.
+ - It is B(highly) recommended to store 1Password credentials in an Ansible Vault. Ensure that the key used to encrypt
+ the Ansible Vault is equal to or greater in strength than the 1Password master password.
+ suboptions:
+ subdomain:
+ type: str
+ description:
+ - 1Password subdomain name (<subdomain>.1password.com).
+ - If this is not specified, the most recent subdomain will be used.
+ username:
+ type: str
+ description:
+ - 1Password username.
+ - Only required for initial sign in.
+ master_password:
+ type: str
+ description:
+ - The master password for your subdomain.
+ - This is always required when specifying C(auto_login).
+ required: True
+ secret_key:
+ type: str
+ description:
+ - The secret key for your subdomain.
+ - Only required for initial sign in.
+ default: {}
+ required: False
+ cli_path:
+ type: path
+ description: Used to specify the exact path to the C(op) command line interface
+ required: False
+ default: 'op'
+'''
+
+EXAMPLES = '''
+# Gather secrets from 1Password, assuming there is a 'password' field:
+- name: Get a password
+ community.general.onepassword_info:
+ search_terms: My 1Password item
+ delegate_to: localhost
+ register: my_1password_item
+ no_log: true # Don't want to log the secrets to the console!
+
+# Gather secrets from 1Password, with more advanced search terms:
+- name: Get a password
+ community.general.onepassword_info:
+ search_terms:
+ - name: My 1Password item
+ field: Custom field name # optional, defaults to 'password'
+ section: Custom section name # optional, defaults to 'None'
+ vault: Name of the vault # optional, only necessary if there is more than 1 Vault available
+ delegate_to: localhost
+ register: my_1password_item
+ no_log: True # Don't want to log the secrets to the console!
+
+# Gather secrets combining simple and advanced search terms to retrieve two items, one of which we fetch two
+# fields. In the first 'password' is fetched, as a field name is not specified (default behaviour) and in the
+# second, 'Custom field name' is fetched, as that is specified explicitly.
+- name: Get a password
+ community.general.onepassword_info:
+ search_terms:
+ - My 1Password item # 'name' is optional when passing a simple string...
+ - name: My Other 1Password item # ...but it can also be set for consistency
+ - name: My 1Password item
+ field: Custom field name # optional, defaults to 'password'
+ section: Custom section name # optional, defaults to 'None'
+ vault: Name of the vault # optional, only necessary if there is more than 1 Vault available
+ - name: A 1Password item with document attachment
+ delegate_to: localhost
+ register: my_1password_item
+ no_log: true # Don't want to log the secrets to the console!
+
+- name: Debug a password (for example)
+ ansible.builtin.debug:
+ msg: "{{ my_1password_item['onepassword']['My 1Password item'] }}"
+'''
+
+RETURN = '''
+---
+# One or more dictionaries for each matching item from 1Password, along with the appropriate fields.
+# This shows the response you would expect to receive from the third example documented above.
+onepassword:
+ description: Dictionary of each 1password item matching the given search terms, shows what would be returned from the third example above.
+ returned: success
+ type: dict
+ sample:
+ "My 1Password item":
+ password: the value of this field
+ Custom field name: the value of this field
+ "My Other 1Password item":
+ password: the value of this field
+ "A 1Password item with document attachment":
+ document: the contents of the document attached to this item
+'''
+
+
+import errno
+import json
+import os
+import re
+
+from subprocess import Popen, PIPE
+
+from ansible.module_utils._text import to_bytes, to_native
+from ansible.module_utils.basic import AnsibleModule
+
+
+class AnsibleModuleError(Exception):
+ def __init__(self, results):
+ self.results = results
+
+ def __repr__(self):
+ return self.results
+
+
+class OnePasswordInfo(object):
+
+ def __init__(self):
+ self.cli_path = module.params.get('cli_path')
+ self.config_file_path = '~/.op/config'
+ self.auto_login = module.params.get('auto_login')
+ self.logged_in = False
+ self.token = None
+
+ terms = module.params.get('search_terms')
+ self.terms = self.parse_search_terms(terms)
+
+ def _run(self, args, expected_rc=0, command_input=None, ignore_errors=False):
+ if self.token:
+ # Adds the session token to all commands if we're logged in.
+ args += [to_bytes('--session=') + self.token]
+
+ command = [self.cli_path] + args
+ p = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
+ out, err = p.communicate(input=command_input)
+ rc = p.wait()
+ if not ignore_errors and rc != expected_rc:
+ raise AnsibleModuleError(to_native(err))
+ return rc, out, err
+
+ def _parse_field(self, data_json, item_id, field_name, section_title=None):
+ data = json.loads(data_json)
+
+ if ('documentAttributes' in data['details']):
+ # This is actually a document, let's fetch the document data instead!
+ document = self._run(["get", "document", data['overview']['title']])
+ return {'document': document[1].strip()}
+
+ else:
+ # This is not a document, let's try to find the requested field
+
+ # Some types of 1Password items have a 'password' field directly alongside the 'fields' attribute,
+ # not inside it, so we need to check there first.
+ if (field_name in data['details']):
+ return {field_name: data['details'][field_name]}
+
+ # Otherwise we continue looking inside the 'fields' attribute for the specified field.
+ else:
+ if section_title is None:
+ for field_data in data['details'].get('fields', []):
+ if field_data.get('name', '').lower() == field_name.lower():
+ return {field_name: field_data.get('value', '')}
+
+ # Not found it yet, so now lets see if there are any sections defined
+ # and search through those for the field. If a section was given, we skip
+ # any non-matching sections, otherwise we search them all until we find the field.
+ for section_data in data['details'].get('sections', []):
+ if section_title is not None and section_title.lower() != section_data['title'].lower():
+ continue
+ for field_data in section_data.get('fields', []):
+ if field_data.get('t', '').lower() == field_name.lower():
+ return {field_name: field_data.get('v', '')}
+
+ # We will get here if the field could not be found in any section and the item wasn't a document to be downloaded.
+ optional_section_title = '' if section_title is None else " in the section '%s'" % section_title
+ module.fail_json(msg="Unable to find an item in 1Password named '%s' with the field '%s'%s." % (item_id, field_name, optional_section_title))
+
+ def parse_search_terms(self, terms):
+ processed_terms = []
+
+ for term in terms:
+ if not isinstance(term, dict):
+ term = {'name': term}
+
+ if 'name' not in term:
+ module.fail_json(msg="Missing required 'name' field from search term, got: '%s'" % to_native(term))
+
+ term['field'] = term.get('field', 'password')
+ term['section'] = term.get('section', None)
+ term['vault'] = term.get('vault', None)
+
+ processed_terms.append(term)
+
+ return processed_terms
+
+ def get_raw(self, item_id, vault=None):
+ try:
+ args = ["get", "item", item_id]
+ if vault is not None:
+ args += ['--vault={0}'.format(vault)]
+ rc, output, dummy = self._run(args)
+ return output
+
+ except Exception as e:
+ if re.search(".*not found.*", to_native(e)):
+ module.fail_json(msg="Unable to find an item in 1Password named '%s'." % item_id)
+ else:
+ module.fail_json(msg="Unexpected error attempting to find an item in 1Password named '%s': %s" % (item_id, to_native(e)))
+
+ def get_field(self, item_id, field, section=None, vault=None):
+ output = self.get_raw(item_id, vault)
+ return self._parse_field(output, item_id, field, section) if output != '' else ''
+
+ def full_login(self):
+ if self.auto_login is not None:
+ if None in [self.auto_login.get('subdomain'), self.auto_login.get('username'),
+ self.auto_login.get('secret_key'), self.auto_login.get('master_password')]:
+ module.fail_json(msg='Unable to perform initial sign in to 1Password. '
+ 'subdomain, username, secret_key, and master_password are required to perform initial sign in.')
+
+ args = [
+ 'signin',
+ '{0}.1password.com'.format(self.auto_login['subdomain']),
+ to_bytes(self.auto_login['username']),
+ to_bytes(self.auto_login['secret_key']),
+ '--output=raw',
+ ]
+
+ try:
+ rc, out, err = self._run(args, command_input=to_bytes(self.auto_login['master_password']))
+ self.token = out.strip()
+ except AnsibleModuleError as e:
+ module.fail_json(msg="Failed to perform initial sign in to 1Password: %s" % to_native(e))
+ else:
+ module.fail_json(msg="Unable to perform an initial sign in to 1Password. Please run '%s sigin' "
+ "or define credentials in 'auto_login'. See the module documentation for details." % self.cli_path)
+
+ def get_token(self):
+ # If the config file exists, assume an initial signin has taken place and try basic sign in
+ if os.path.isfile(self.config_file_path):
+
+ if self.auto_login is not None:
+
+ # Since we are not currently signed in, master_password is required at a minimum
+ if not self.auto_login.get('master_password'):
+ module.fail_json(msg="Unable to sign in to 1Password. 'auto_login.master_password' is required.")
+
+ # Try signing in using the master_password and a subdomain if one is provided
+ try:
+ args = ['signin', '--output=raw']
+
+ if self.auto_login.get('subdomain'):
+ args = ['signin', self.auto_login['subdomain'], '--output=raw']
+
+ rc, out, err = self._run(args, command_input=to_bytes(self.auto_login['master_password']))
+ self.token = out.strip()
+
+ except AnsibleModuleError:
+ self.full_login()
+
+ else:
+ self.full_login()
+
+ else:
+ # Attempt a full sign in since there appears to be no existing sign in
+ self.full_login()
+
+ def assert_logged_in(self):
+ try:
+ rc, out, err = self._run(['get', 'account'], ignore_errors=True)
+ if rc == 0:
+ self.logged_in = True
+ if not self.logged_in:
+ self.get_token()
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ module.fail_json(msg="1Password CLI tool '%s' not installed in path on control machine" % self.cli_path)
+ raise e
+
+ def run(self):
+ result = {}
+
+ self.assert_logged_in()
+
+ for term in self.terms:
+ value = self.get_field(term['name'], term['field'], term['section'], term['vault'])
+
+ if term['name'] in result:
+ # If we already have a result for this key, we have to append this result dictionary
+ # to the existing one. This is only applicable when there is a single item
+ # in 1Password which has two different fields, and we want to retrieve both of them.
+ result[term['name']].update(value)
+ else:
+ # If this is the first result for this key, simply set it.
+ result[term['name']] = value
+
+ return result
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ cli_path=dict(type='path', default='op'),
+ auto_login=dict(type='dict', options=dict(
+ subdomain=dict(type='str'),
+ username=dict(type='str'),
+ master_password=dict(required=True, type='str', no_log=True),
+ secret_key=dict(type='str', no_log=True),
+ ), default=None),
+ search_terms=dict(required=True, type='list')
+ ),
+ supports_check_mode=True
+ )
+
+ results = {'onepassword': OnePasswordInfo().run()}
+
+ if module._name in ('onepassword_facts', 'community.general.onepassword_facts'):
+ module.deprecate("The 'onepassword_facts' module has been renamed to 'onepassword_info'. "
+ "When called with the new name it no longer returns 'ansible_facts'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+ module.exit_json(changed=False, ansible_facts=results)
+ else:
+ module.exit_json(changed=False, **results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/opendj/opendj_backendprop.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/opendj/opendj_backendprop.py
new file mode 100644
index 00000000..aa477e42
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/identity/opendj/opendj_backendprop.py
@@ -0,0 +1,198 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2016, Werner Dijkerman (ikben@werner-dijkerman.nl)
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: opendj_backendprop
+short_description: Will update the backend configuration of OpenDJ via the dsconfig set-backend-prop command.
+description:
+ - This module will update settings for OpenDJ with the command set-backend-prop.
+ - It will check first via de get-backend-prop if configuration needs to be applied.
+author:
+ - Werner Dijkerman (@dj-wasabi)
+options:
+ opendj_bindir:
+ description:
+ - The path to the bin directory of OpenDJ.
+ required: false
+ default: /opt/opendj/bin
+ hostname:
+ description:
+ - The hostname of the OpenDJ server.
+ required: true
+ port:
+ description:
+ - The Admin port on which the OpenDJ instance is available.
+ required: true
+ username:
+ description:
+ - The username to connect to.
+ required: false
+ default: cn=Directory Manager
+ password:
+ description:
+ - The password for the cn=Directory Manager user.
+ - Either password or passwordfile is needed.
+ required: false
+ passwordfile:
+ description:
+ - Location to the password file which holds the password for the cn=Directory Manager user.
+ - Either password or passwordfile is needed.
+ required: false
+ backend:
+ description:
+ - The name of the backend on which the property needs to be updated.
+ required: true
+ name:
+ description:
+ - The configuration setting to update.
+ required: true
+ value:
+ description:
+ - The value for the configuration item.
+ required: true
+ state:
+ description:
+ - If configuration needs to be added/updated
+ required: false
+ default: "present"
+'''
+
+EXAMPLES = '''
+ - name: Add or update OpenDJ backend properties
+ action: opendj_backendprop
+ hostname=localhost
+ port=4444
+ username="cn=Directory Manager"
+ password=password
+ backend=userRoot
+ name=index-entry-limit
+ value=5000
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class BackendProp(object):
+
+ def __init__(self, module):
+ self._module = module
+
+ def get_property(self, opendj_bindir, hostname, port, username, password_method, backend_name):
+ my_command = [
+ opendj_bindir + '/dsconfig',
+ 'get-backend-prop',
+ '-h', hostname,
+ '--port', str(port),
+ '--bindDN', username,
+ '--backend-name', backend_name,
+ '-n', '-X', '-s'
+ ] + password_method
+ rc, stdout, stderr = self._module.run_command(my_command)
+ if rc == 0:
+ return stdout
+ else:
+ self._module.fail_json(msg="Error message: " + str(stderr))
+
+ def set_property(self, opendj_bindir, hostname, port, username, password_method, backend_name, name, value):
+ my_command = [
+ opendj_bindir + '/dsconfig',
+ 'set-backend-prop',
+ '-h', hostname,
+ '--port', str(port),
+ '--bindDN', username,
+ '--backend-name', backend_name,
+ '--set', name + ":" + value,
+ '-n', '-X'
+ ] + password_method
+ rc, stdout, stderr = self._module.run_command(my_command)
+ if rc == 0:
+ return True
+ else:
+ self._module.fail_json(msg="Error message: " + stderr)
+
+ def validate_data(self, data=None, name=None, value=None):
+ for config_line in data.split('\n'):
+ if config_line:
+ split_line = config_line.split()
+ if split_line[0] == name:
+ if split_line[1] == value:
+ return True
+ return False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ opendj_bindir=dict(default="/opt/opendj/bin", type="path"),
+ hostname=dict(required=True),
+ port=dict(required=True),
+ username=dict(default="cn=Directory Manager", required=False),
+ password=dict(required=False, no_log=True),
+ passwordfile=dict(required=False, type="path"),
+ backend=dict(required=True),
+ name=dict(required=True),
+ value=dict(required=True),
+ state=dict(default="present"),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[['password', 'passwordfile']],
+ required_one_of=[['password', 'passwordfile']]
+ )
+
+ opendj_bindir = module.params['opendj_bindir']
+ hostname = module.params['hostname']
+ port = module.params['port']
+ username = module.params['username']
+ password = module.params['password']
+ passwordfile = module.params['passwordfile']
+ backend_name = module.params['backend']
+ name = module.params['name']
+ value = module.params['value']
+ state = module.params['state']
+
+ if module.params["password"] is not None:
+ password_method = ['-w', password]
+ elif module.params["passwordfile"] is not None:
+ password_method = ['-j', passwordfile]
+
+ opendj = BackendProp(module)
+ validate = opendj.get_property(opendj_bindir=opendj_bindir,
+ hostname=hostname,
+ port=port,
+ username=username,
+ password_method=password_method,
+ backend_name=backend_name)
+
+ if validate:
+ if not opendj.validate_data(data=validate, name=name, value=value):
+ if module.check_mode:
+ module.exit_json(changed=True)
+ if opendj.set_property(opendj_bindir=opendj_bindir,
+ hostname=hostname,
+ port=port,
+ username=username,
+ password_method=password_method,
+ backend_name=backend_name,
+ name=name,
+ value=value):
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=False)
+ else:
+ module.exit_json(changed=False)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_firmware.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_firmware.py
new file mode 100644
index 00000000..fa8ac66c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_firmware.py
@@ -0,0 +1,207 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 2.0
+# Copyright (C) 2018-2019 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: idrac_firmware
+short_description: Firmware update from a repository on a network share (CIFS, NFS).
+description:
+ - Update the Firmware by connecting to a network share (either CIFS or NFS) that contains a catalog of
+ available updates.
+ - Network share should contain a valid repository of Update Packages (DUPs) and a catalog file describing the DUPs.
+ - All applicable updates contained in the repository are applied to the system.
+ - This feature is available only with iDRAC Enterprise License.
+options:
+ idrac_ip:
+ description: iDRAC IP Address.
+ type: str
+ required: True
+ idrac_user:
+ description: iDRAC username.
+ type: str
+ required: True
+ idrac_password:
+ description: iDRAC user password.
+ type: str
+ required: True
+ aliases: ['idrac_pwd']
+ idrac_port:
+ description: iDRAC port.
+ type: int
+ default: 443
+ share_name:
+ description: CIFS or NFS Network share.
+ type: str
+ required: True
+ share_user:
+ description: Network share user in the format 'user@domain' or 'domain\\user' if user is
+ part of a domain else 'user'. This option is mandatory for CIFS Network Share.
+ type: str
+ share_password:
+ description: Network share user password. This option is mandatory for CIFS Network Share.
+ type: str
+ aliases: ['share_pwd']
+ share_mnt:
+ description: Local mount path of the network share with read-write permission for ansible user.
+ This option is mandatory for Network Share.
+ type: str
+ required: True
+ reboot:
+ description: Whether to reboots after applying the updates or not.
+ type: bool
+ default: false
+ job_wait:
+ description: Whether to wait for job completion or not.
+ type: bool
+ default: true
+ catalog_file_name:
+ required: False
+ description: Catalog file name relative to the I(share_name).
+ type: str
+ default: 'Catalog.xml'
+
+requirements:
+ - "omsdk"
+ - "python >= 2.7.5"
+author: "Rajeev Arakkal (@rajeevarakkal)"
+'''
+
+EXAMPLES = """
+---
+- name: Update firmware from repository on a Network Share
+ community.general.idrac_firmware:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ share_name: "192.168.0.0:/share"
+ share_user: "share_user_name"
+ share_password: "share_user_pwd"
+ share_mnt: "/mnt/share"
+ reboot: True
+ job_wait: True
+ catalog_file_name: "Catalog.xml"
+"""
+
+RETURN = """
+---
+msg:
+ type: str
+ description: Over all firmware update status.
+ returned: always
+ sample: "Successfully updated the firmware."
+update_status:
+ type: dict
+ description: Firmware Update job and progress details from the iDRAC.
+ returned: success
+ sample: {
+ 'InstanceID': 'JID_XXXXXXXXXXXX',
+ 'JobState': 'Completed',
+ 'Message': 'Job completed successfully.',
+ 'MessageId': 'REDXXX',
+ 'Name': 'Repository Update',
+ 'JobStartTime': 'NA',
+ 'Status': 'Success',
+ }
+"""
+
+
+from ansible_collections.community.general.plugins.module_utils.remote_management.dellemc.dellemc_idrac import iDRACConnection
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from omsdk.sdkcreds import UserCredentials
+ from omsdk.sdkfile import FileOnShare
+ HAS_OMSDK = True
+except ImportError:
+ HAS_OMSDK = False
+
+
+def _validate_catalog_file(catalog_file_name):
+ normilized_file_name = catalog_file_name.lower()
+ if not normilized_file_name:
+ raise ValueError('catalog_file_name should be a non-empty string.')
+ elif not normilized_file_name.endswith("xml"):
+ raise ValueError('catalog_file_name should be an XML file.')
+
+
+def update_firmware(idrac, module):
+ """Update firmware from a network share and return the job details."""
+ msg = {}
+ msg['changed'] = False
+ msg['update_status'] = {}
+
+ try:
+ upd_share = FileOnShare(remote=module.params['share_name'] + "/" + module.params['catalog_file_name'],
+ mount_point=module.params['share_mnt'],
+ isFolder=False,
+ creds=UserCredentials(
+ module.params['share_user'],
+ module.params['share_password'])
+ )
+
+ idrac.use_redfish = True
+ if '12' in idrac.ServerGeneration or '13' in idrac.ServerGeneration:
+ idrac.use_redfish = False
+
+ apply_update = True
+ msg['update_status'] = idrac.update_mgr.update_from_repo(upd_share,
+ apply_update,
+ module.params['reboot'],
+ module.params['job_wait'])
+ except RuntimeError as e:
+ module.fail_json(msg=str(e))
+
+ if "Status" in msg['update_status']:
+ if msg['update_status']['Status'] == "Success":
+ if module.params['job_wait']:
+ msg['changed'] = True
+ else:
+ module.fail_json(msg='Failed to update firmware.', update_status=msg['update_status'])
+ return msg
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec={
+ "idrac_ip": {"required": True, "type": 'str'},
+ "idrac_user": {"required": True, "type": 'str'},
+ "idrac_password": {"required": True, "type": 'str', "aliases": ['idrac_pwd'], "no_log": True},
+ "idrac_port": {"required": False, "default": 443, "type": 'int'},
+
+ "share_name": {"required": True, "type": 'str'},
+ "share_user": {"required": False, "type": 'str'},
+ "share_password": {"required": False, "type": 'str', "aliases": ['share_pwd'], "no_log": True},
+ "share_mnt": {"required": True, "type": 'str'},
+
+ "catalog_file_name": {"required": False, "type": 'str', "default": "Catalog.xml"},
+ "reboot": {"required": False, "type": 'bool', "default": False},
+ "job_wait": {"required": False, "type": 'bool', "default": True},
+ },
+
+ supports_check_mode=False)
+
+ try:
+ # Validate the catalog file
+ _validate_catalog_file(module.params['catalog_file_name'])
+ # Connect to iDRAC and update firmware
+ with iDRACConnection(module.params) as idrac:
+ update_status = update_firmware(idrac, module)
+ except (ImportError, ValueError, RuntimeError) as e:
+ module.fail_json(msg=str(e))
+
+ module.exit_json(msg='Successfully updated the firmware.', update_status=update_status)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_redfish_command.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_redfish_command.py
new file mode 100644
index 00000000..ea97ecdc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_redfish_command.py
@@ -0,0 +1,200 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018 Dell EMC Inc.
+# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: idrac_redfish_command
+short_description: Manages Out-Of-Band controllers using iDRAC OEM Redfish APIs
+description:
+ - Builds Redfish URIs locally and sends them to remote OOB controllers to
+ perform an action.
+ - For use with Dell iDRAC operations that require Redfish OEM extensions
+options:
+ category:
+ required: true
+ description:
+ - Category to execute on OOB controller
+ type: str
+ command:
+ required: true
+ description:
+ - List of commands to execute on OOB controller
+ type: list
+ baseuri:
+ required: true
+ description:
+ - Base URI of OOB controller
+ type: str
+ username:
+ required: true
+ description:
+ - User for authentication with OOB controller
+ type: str
+ password:
+ required: true
+ description:
+ - Password for authentication with OOB controller
+ type: str
+ timeout:
+ description:
+ - Timeout in seconds for URL requests to OOB controller
+ default: 10
+ type: int
+ resource_id:
+ required: false
+ description:
+ - The ID of the System, Manager or Chassis to modify
+ type: str
+ version_added: '0.2.0'
+
+author: "Jose Delarosa (@jose-delarosa)"
+'''
+
+EXAMPLES = '''
+ - name: Create BIOS configuration job (schedule BIOS setting update)
+ community.general.idrac_redfish_command:
+ category: Systems
+ command: CreateBiosConfigJob
+ resource_id: System.Embedded.1
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+'''
+
+RETURN = '''
+msg:
+ description: Message with action result or error description
+ returned: always
+ type: str
+ sample: "Action was successful"
+'''
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
+from ansible.module_utils._text import to_native
+
+
+class IdracRedfishUtils(RedfishUtils):
+
+ def create_bios_config_job(self):
+ result = {}
+ key = "Bios"
+ jobs = "Jobs"
+
+ # Search for 'key' entry and extract URI from it
+ response = self.get_request(self.root_uri + self.systems_uris[0])
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ if key not in data:
+ return {'ret': False, 'msg': "Key %s not found" % key}
+
+ bios_uri = data[key]["@odata.id"]
+
+ # Extract proper URI
+ response = self.get_request(self.root_uri + bios_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+ set_bios_attr_uri = data["@Redfish.Settings"]["SettingsObject"][
+ "@odata.id"]
+
+ payload = {"TargetSettingsURI": set_bios_attr_uri}
+ response = self.post_request(
+ self.root_uri + self.manager_uri + "/" + jobs, payload)
+ if response['ret'] is False:
+ return response
+
+ response_output = response['resp'].__dict__
+ job_id = response_output["headers"]["Location"]
+ job_id = re.search("JID_.+", job_id).group()
+ # Currently not passing job_id back to user but patch is coming
+ return {'ret': True, 'msg': "Config job %s created" % job_id}
+
+
+CATEGORY_COMMANDS_ALL = {
+ "Systems": ["CreateBiosConfigJob"],
+ "Accounts": [],
+ "Manager": []
+}
+
+
+def main():
+ result = {}
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(required=True),
+ command=dict(required=True, type='list'),
+ baseuri=dict(required=True),
+ username=dict(required=True),
+ password=dict(required=True, no_log=True),
+ timeout=dict(type='int', default=10),
+ resource_id=dict()
+ ),
+ supports_check_mode=False
+ )
+
+ category = module.params['category']
+ command_list = module.params['command']
+
+ # admin credentials used for authentication
+ creds = {'user': module.params['username'],
+ 'pswd': module.params['password']}
+
+ # timeout
+ timeout = module.params['timeout']
+
+ # System, Manager or Chassis ID to modify
+ resource_id = module.params['resource_id']
+
+ # Build root URI
+ root_uri = "https://" + module.params['baseuri']
+ rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module,
+ resource_id=resource_id, data_modification=True)
+
+ # Check that Category is valid
+ if category not in CATEGORY_COMMANDS_ALL:
+ module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys())))
+
+ # Check that all commands are valid
+ for cmd in command_list:
+ # Fail if even one command given is invalid
+ if cmd not in CATEGORY_COMMANDS_ALL[category]:
+ module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
+
+ # Organize by Categories / Commands
+
+ if category == "Systems":
+ # execute only if we find a System resource
+ result = rf_utils._find_systems_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ if command == "CreateBiosConfigJob":
+ # execute only if we find a Managers resource
+ result = rf_utils._find_managers_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+ result = rf_utils.create_bios_config_job()
+
+ # Return data back or fail with proper message
+ if result['ret'] is True:
+ del result['ret']
+ module.exit_json(changed=True, msg='Action was successful')
+ else:
+ module.fail_json(msg=to_native(result['msg']))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_redfish_config.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_redfish_config.py
new file mode 100644
index 00000000..485d54cd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_redfish_config.py
@@ -0,0 +1,327 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019 Dell EMC Inc.
+# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: idrac_redfish_config
+short_description: Manages servers through iDRAC using Dell Redfish APIs
+description:
+ - For use with Dell iDRAC operations that require Redfish OEM extensions
+ - Builds Redfish URIs locally and sends them to remote iDRAC controllers to
+ set or update a configuration attribute.
+options:
+ category:
+ required: true
+ type: str
+ description:
+ - Category to execute on iDRAC
+ command:
+ required: true
+ description:
+ - List of commands to execute on iDRAC
+ - I(SetManagerAttributes), I(SetLifecycleControllerAttributes) and
+ I(SetSystemAttributes) are mutually exclusive commands when C(category)
+ is I(Manager)
+ type: list
+ baseuri:
+ required: true
+ description:
+ - Base URI of iDRAC
+ type: str
+ username:
+ required: true
+ description:
+ - User for authentication with iDRAC
+ type: str
+ password:
+ required: true
+ description:
+ - Password for authentication with iDRAC
+ type: str
+ manager_attribute_name:
+ required: false
+ description:
+ - (deprecated) name of iDRAC attribute to update
+ type: str
+ manager_attribute_value:
+ required: false
+ description:
+ - (deprecated) value of iDRAC attribute to update
+ type: str
+ manager_attributes:
+ required: false
+ description:
+ - dictionary of iDRAC attribute name and value pairs to update
+ default: {}
+ type: 'dict'
+ version_added: '0.2.0'
+ timeout:
+ description:
+ - Timeout in seconds for URL requests to iDRAC controller
+ default: 10
+ type: int
+ resource_id:
+ required: false
+ description:
+ - The ID of the System, Manager or Chassis to modify
+ type: str
+ version_added: '0.2.0'
+
+author: "Jose Delarosa (@jose-delarosa)"
+'''
+
+EXAMPLES = '''
+ - name: Enable NTP and set NTP server and Time zone attributes in iDRAC
+ community.general.idrac_redfish_config:
+ category: Manager
+ command: SetManagerAttributes
+ resource_id: iDRAC.Embedded.1
+ manager_attributes:
+ NTPConfigGroup.1.NTPEnable: "Enabled"
+ NTPConfigGroup.1.NTP1: "{{ ntpserver1 }}"
+ Time.1.Timezone: "{{ timezone }}"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username}}"
+ password: "{{ password }}"
+
+ - name: Enable Syslog and set Syslog servers in iDRAC
+ community.general.idrac_redfish_config:
+ category: Manager
+ command: SetManagerAttributes
+ resource_id: iDRAC.Embedded.1
+ manager_attributes:
+ SysLog.1.SysLogEnable: "Enabled"
+ SysLog.1.Server1: "{{ syslog_server1 }}"
+ SysLog.1.Server2: "{{ syslog_server2 }}"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username}}"
+ password: "{{ password }}"
+
+ - name: Configure SNMP community string, port, protocol and trap format
+ community.general.idrac_redfish_config:
+ category: Manager
+ command: SetManagerAttributes
+ resource_id: iDRAC.Embedded.1
+ manager_attributes:
+ SNMP.1.AgentEnable: "Enabled"
+ SNMP.1.AgentCommunity: "public_community_string"
+ SNMP.1.TrapFormat: "SNMPv1"
+ SNMP.1.SNMPProtocol: "All"
+ SNMP.1.DiscoveryPort: 161
+ SNMP.1.AlertPort: 162
+ baseuri: "{{ baseuri }}"
+ username: "{{ username}}"
+ password: "{{ password }}"
+
+ - name: Enable CSIOR
+ community.general.idrac_redfish_config:
+ category: Manager
+ command: SetLifecycleControllerAttributes
+ resource_id: iDRAC.Embedded.1
+ manager_attributes:
+ LCAttributes.1.CollectSystemInventoryOnRestart: "Enabled"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username}}"
+ password: "{{ password }}"
+
+ - name: Set Power Supply Redundancy Policy to A/B Grid Redundant
+ community.general.idrac_redfish_config:
+ category: Manager
+ command: SetSystemAttributes
+ resource_id: iDRAC.Embedded.1
+ manager_attributes:
+ ServerPwr.1.PSRedPolicy: "A/B Grid Redundant"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username}}"
+ password: "{{ password }}"
+'''
+
+RETURN = '''
+msg:
+ description: Message with action result or error description
+ returned: always
+ type: str
+ sample: "Action was successful"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.validation import (
+ check_mutually_exclusive,
+ check_required_arguments
+)
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
+from ansible.module_utils._text import to_native
+
+
+class IdracRedfishUtils(RedfishUtils):
+
+ def set_manager_attributes(self, command):
+
+ result = {}
+ required_arg_spec = {'manager_attributes': {'required': True}}
+
+ try:
+ check_required_arguments(required_arg_spec, self.module.params)
+
+ except TypeError as e:
+ msg = to_native(e)
+ self.module.fail_json(msg=msg)
+
+ key = "Attributes"
+ command_manager_attributes_uri_map = {
+ "SetManagerAttributes": self.manager_uri,
+ "SetLifecycleControllerAttributes": "/redfish/v1/Managers/LifecycleController.Embedded.1",
+ "SetSystemAttributes": "/redfish/v1/Managers/System.Embedded.1"
+ }
+ manager_uri = command_manager_attributes_uri_map.get(command, self.manager_uri)
+
+ attributes = self.module.params['manager_attributes']
+ manager_attr_name = self.module.params.get('manager_attribute_name')
+ manager_attr_value = self.module.params.get('manager_attribute_value')
+
+ # manager attributes to update
+ if manager_attr_name:
+ attributes.update({manager_attr_name: manager_attr_value})
+
+ attrs_to_patch = {}
+ attrs_skipped = {}
+
+ # Search for key entry and extract URI from it
+ response = self.get_request(self.root_uri + manager_uri + "/" + key)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ if key not in data:
+ return {'ret': False,
+ 'msg': "%s: Key %s not found" % (command, key)}
+
+ for attr_name, attr_value in attributes.items():
+ # Check if attribute exists
+ if attr_name not in data[u'Attributes']:
+ return {'ret': False,
+ 'msg': "%s: Manager attribute %s not found" % (command, attr_name)}
+
+ # Find out if value is already set to what we want. If yes, exclude
+ # those attributes
+ if data[u'Attributes'][attr_name] == attr_value:
+ attrs_skipped.update({attr_name: attr_value})
+ else:
+ attrs_to_patch.update({attr_name: attr_value})
+
+ if not attrs_to_patch:
+ return {'ret': True, 'changed': False,
+ 'msg': "Manager attributes already set"}
+
+ payload = {"Attributes": attrs_to_patch}
+ response = self.patch_request(self.root_uri + manager_uri + "/" + key, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True, 'changed': True,
+ 'msg': "%s: Modified Manager attributes %s" % (command, attrs_to_patch)}
+
+
+CATEGORY_COMMANDS_ALL = {
+ "Manager": ["SetManagerAttributes", "SetLifecycleControllerAttributes",
+ "SetSystemAttributes"]
+}
+
+# list of mutually exclusive commands for a category
+CATEGORY_COMMANDS_MUTUALLY_EXCLUSIVE = {
+ "Manager": [["SetManagerAttributes", "SetLifecycleControllerAttributes",
+ "SetSystemAttributes"]]
+}
+
+
+def main():
+ result = {}
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(required=True),
+ command=dict(required=True, type='list'),
+ baseuri=dict(required=True),
+ username=dict(required=True),
+ password=dict(required=True, no_log=True),
+ manager_attribute_name=dict(default=None),
+ manager_attribute_value=dict(default=None),
+ manager_attributes=dict(type='dict', default={}),
+ timeout=dict(type='int', default=10),
+ resource_id=dict()
+ ),
+ supports_check_mode=False
+ )
+
+ category = module.params['category']
+ command_list = module.params['command']
+
+ # admin credentials used for authentication
+ creds = {'user': module.params['username'],
+ 'pswd': module.params['password']}
+
+ # timeout
+ timeout = module.params['timeout']
+
+ # System, Manager or Chassis ID to modify
+ resource_id = module.params['resource_id']
+
+ # Build root URI
+ root_uri = "https://" + module.params['baseuri']
+ rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module,
+ resource_id=resource_id, data_modification=True)
+
+ # Check that Category is valid
+ if category not in CATEGORY_COMMANDS_ALL:
+ module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys())))
+
+ # Check that all commands are valid
+ for cmd in command_list:
+ # Fail if even one command given is invalid
+ if cmd not in CATEGORY_COMMANDS_ALL[category]:
+ module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
+
+ # check for mutually exclusive commands
+ try:
+ # check_mutually_exclusive accepts a single list or list of lists that
+ # are groups of terms that should be mutually exclusive with one another
+ # and checks that against a dictionary
+ check_mutually_exclusive(CATEGORY_COMMANDS_MUTUALLY_EXCLUSIVE[category],
+ dict.fromkeys(command_list, True))
+
+ except TypeError as e:
+ module.fail_json(msg=to_native(e))
+
+ # Organize by Categories / Commands
+
+ if category == "Manager":
+ # execute only if we find a Manager resource
+ result = rf_utils._find_managers_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ if command in ["SetManagerAttributes", "SetLifecycleControllerAttributes", "SetSystemAttributes"]:
+ result = rf_utils.set_manager_attributes(command)
+
+ if any((module.params['manager_attribute_name'], module.params['manager_attribute_value'])):
+ module.deprecate(msg='Arguments `manager_attribute_name` and '
+ '`manager_attribute_value` are deprecated. '
+ 'Use `manager_attributes` instead for passing in '
+ 'the manager attribute name and value pairs',
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ # Return data back or fail with proper message
+ if result['ret'] is True:
+ module.exit_json(changed=result['changed'], msg=to_native(result['msg']))
+ else:
+ module.fail_json(msg=to_native(result['msg']))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_redfish_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_redfish_facts.py
new file mode 100644
index 00000000..f5b7fe1a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_redfish_facts.py
@@ -0,0 +1,236 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019 Dell EMC Inc.
+# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: idrac_redfish_info
+short_description: Gather PowerEdge server information through iDRAC using Redfish APIs
+description:
+ - Builds Redfish URIs locally and sends them to remote iDRAC controllers to
+ get information back.
+ - For use with Dell EMC iDRAC operations that require Redfish OEM extensions
+ - This module was called C(idrac_redfish_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.idrac_redfish_info) module no longer returns C(ansible_facts)!
+options:
+ category:
+ required: true
+ description:
+ - Category to execute on iDRAC controller
+ type: str
+ command:
+ required: true
+ description:
+ - List of commands to execute on iDRAC controller
+ - C(GetManagerAttributes) returns the list of dicts containing iDRAC,
+ LifecycleController and System attributes
+ type: list
+ baseuri:
+ required: true
+ description:
+ - Base URI of iDRAC controller
+ type: str
+ username:
+ required: true
+ description:
+ - User for authentication with iDRAC controller
+ type: str
+ password:
+ required: true
+ description:
+ - Password for authentication with iDRAC controller
+ type: str
+ timeout:
+ description:
+ - Timeout in seconds for URL requests to OOB controller
+ default: 10
+ type: int
+
+author: "Jose Delarosa (@jose-delarosa)"
+'''
+
+EXAMPLES = '''
+ - name: Get Manager attributes with a default of 20 seconds
+ community.general.idrac_redfish_info:
+ category: Manager
+ command: GetManagerAttributes
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ timeout: 20
+ register: result
+
+ # Examples to display the value of all or a single iDRAC attribute
+ - name: Store iDRAC attributes as a fact variable
+ ansible.builtin.set_fact:
+ idrac_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'iDRACAttributes') | list | first }}"
+
+ - name: Display all iDRAC attributes
+ ansible.builtin.debug:
+ var: idrac_attributes
+
+ - name: Display the value of 'Syslog.1.SysLogEnable' iDRAC attribute
+ ansible.builtin.debug:
+ var: idrac_attributes['Syslog.1.SysLogEnable']
+
+ # Examples to display the value of all or a single LifecycleController attribute
+ - name: Store LifecycleController attributes as a fact variable
+ ansible.builtin.set_fact:
+ lc_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'LCAttributes') | list | first }}"
+
+ - name: Display LifecycleController attributes
+ ansible.builtin.debug:
+ var: lc_attributes
+
+ - name: Display the value of 'CollectSystemInventoryOnRestart' attribute
+ ansible.builtin.debug:
+ var: lc_attributes['LCAttributes.1.CollectSystemInventoryOnRestart']
+
+ # Examples to display the value of all or a single System attribute
+ - name: Store System attributes as a fact variable
+ ansible.builtin.set_fact:
+ system_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'SystemAttributes') | list | first }}"
+
+ - name: Display System attributes
+ ansible.builtin.debug:
+ var: system_attributes
+
+ - name: Display the value of 'PSRedPolicy'
+ ansible.builtin.debug:
+ var: system_attributes['ServerPwr.1.PSRedPolicy']
+
+'''
+
+RETURN = '''
+msg:
+ description: different results depending on task
+ returned: always
+ type: dict
+ sample: List of Manager attributes
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
+from ansible.module_utils._text import to_native
+
+
+class IdracRedfishUtils(RedfishUtils):
+
+ def get_manager_attributes(self):
+ result = {}
+ manager_attributes = []
+ properties = ['Attributes', 'Id']
+
+ response = self.get_request(self.root_uri + self.manager_uri)
+
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ # Manager attributes are supported as part of iDRAC OEM extension
+ # Attributes are supported only on iDRAC9
+ try:
+ for members in data[u'Links'][u'Oem'][u'Dell'][u'DellAttributes']:
+ attributes_uri = members[u'@odata.id']
+
+ response = self.get_request(self.root_uri + attributes_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ attributes = {}
+ for prop in properties:
+ if prop in data:
+ attributes[prop] = data.get(prop)
+
+ if attributes:
+ manager_attributes.append(attributes)
+
+ result['ret'] = True
+
+ except (AttributeError, KeyError) as e:
+ result['ret'] = False
+ result['msg'] = "Failed to find attribute/key: " + str(e)
+
+ result["entries"] = manager_attributes
+ return result
+
+
+CATEGORY_COMMANDS_ALL = {
+ "Manager": ["GetManagerAttributes"]
+}
+
+
+def main():
+ result = {}
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(required=True),
+ command=dict(required=True, type='list'),
+ baseuri=dict(required=True),
+ username=dict(required=True),
+ password=dict(required=True, no_log=True),
+ timeout=dict(type='int', default=10)
+ ),
+ supports_check_mode=False
+ )
+ is_old_facts = module._name in ('idrac_redfish_facts', 'community.general.idrac_redfish_facts')
+ if is_old_facts:
+ module.deprecate("The 'idrac_redfish_facts' module has been renamed to 'idrac_redfish_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ category = module.params['category']
+ command_list = module.params['command']
+
+ # admin credentials used for authentication
+ creds = {'user': module.params['username'],
+ 'pswd': module.params['password']}
+
+ # timeout
+ timeout = module.params['timeout']
+
+ # Build root URI
+ root_uri = "https://" + module.params['baseuri']
+ rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module)
+
+ # Check that Category is valid
+ if category not in CATEGORY_COMMANDS_ALL:
+ module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys())))
+
+ # Check that all commands are valid
+ for cmd in command_list:
+ # Fail if even one command given is invalid
+ if cmd not in CATEGORY_COMMANDS_ALL[category]:
+ module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
+
+ # Organize by Categories / Commands
+
+ if category == "Manager":
+ # execute only if we find a Manager resource
+ result = rf_utils._find_managers_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ if command == "GetManagerAttributes":
+ result = rf_utils.get_manager_attributes()
+
+ # Return data back or fail with proper message
+ if result['ret'] is True:
+ del result['ret']
+ if is_old_facts:
+ module.exit_json(ansible_facts=dict(redfish_facts=result))
+ else:
+ module.exit_json(redfish_facts=result)
+ else:
+ module.fail_json(msg=to_native(result['msg']))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_redfish_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_redfish_info.py
new file mode 100644
index 00000000..f5b7fe1a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_redfish_info.py
@@ -0,0 +1,236 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019 Dell EMC Inc.
+# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: idrac_redfish_info
+short_description: Gather PowerEdge server information through iDRAC using Redfish APIs
+description:
+ - Builds Redfish URIs locally and sends them to remote iDRAC controllers to
+ get information back.
+ - For use with Dell EMC iDRAC operations that require Redfish OEM extensions
+ - This module was called C(idrac_redfish_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.idrac_redfish_info) module no longer returns C(ansible_facts)!
+options:
+ category:
+ required: true
+ description:
+ - Category to execute on iDRAC controller
+ type: str
+ command:
+ required: true
+ description:
+ - List of commands to execute on iDRAC controller
+ - C(GetManagerAttributes) returns the list of dicts containing iDRAC,
+ LifecycleController and System attributes
+ type: list
+ baseuri:
+ required: true
+ description:
+ - Base URI of iDRAC controller
+ type: str
+ username:
+ required: true
+ description:
+ - User for authentication with iDRAC controller
+ type: str
+ password:
+ required: true
+ description:
+ - Password for authentication with iDRAC controller
+ type: str
+ timeout:
+ description:
+ - Timeout in seconds for URL requests to OOB controller
+ default: 10
+ type: int
+
+author: "Jose Delarosa (@jose-delarosa)"
+'''
+
+EXAMPLES = '''
+ - name: Get Manager attributes with a default of 20 seconds
+ community.general.idrac_redfish_info:
+ category: Manager
+ command: GetManagerAttributes
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ timeout: 20
+ register: result
+
+ # Examples to display the value of all or a single iDRAC attribute
+ - name: Store iDRAC attributes as a fact variable
+ ansible.builtin.set_fact:
+ idrac_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'iDRACAttributes') | list | first }}"
+
+ - name: Display all iDRAC attributes
+ ansible.builtin.debug:
+ var: idrac_attributes
+
+ - name: Display the value of 'Syslog.1.SysLogEnable' iDRAC attribute
+ ansible.builtin.debug:
+ var: idrac_attributes['Syslog.1.SysLogEnable']
+
+ # Examples to display the value of all or a single LifecycleController attribute
+ - name: Store LifecycleController attributes as a fact variable
+ ansible.builtin.set_fact:
+ lc_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'LCAttributes') | list | first }}"
+
+ - name: Display LifecycleController attributes
+ ansible.builtin.debug:
+ var: lc_attributes
+
+ - name: Display the value of 'CollectSystemInventoryOnRestart' attribute
+ ansible.builtin.debug:
+ var: lc_attributes['LCAttributes.1.CollectSystemInventoryOnRestart']
+
+ # Examples to display the value of all or a single System attribute
+ - name: Store System attributes as a fact variable
+ ansible.builtin.set_fact:
+ system_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'SystemAttributes') | list | first }}"
+
+ - name: Display System attributes
+ ansible.builtin.debug:
+ var: system_attributes
+
+ - name: Display the value of 'PSRedPolicy'
+ ansible.builtin.debug:
+ var: system_attributes['ServerPwr.1.PSRedPolicy']
+
+'''
+
+RETURN = '''
+msg:
+ description: different results depending on task
+ returned: always
+ type: dict
+ sample: List of Manager attributes
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
+from ansible.module_utils._text import to_native
+
+
+class IdracRedfishUtils(RedfishUtils):
+
+ def get_manager_attributes(self):
+ result = {}
+ manager_attributes = []
+ properties = ['Attributes', 'Id']
+
+ response = self.get_request(self.root_uri + self.manager_uri)
+
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ # Manager attributes are supported as part of iDRAC OEM extension
+ # Attributes are supported only on iDRAC9
+ try:
+ for members in data[u'Links'][u'Oem'][u'Dell'][u'DellAttributes']:
+ attributes_uri = members[u'@odata.id']
+
+ response = self.get_request(self.root_uri + attributes_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ attributes = {}
+ for prop in properties:
+ if prop in data:
+ attributes[prop] = data.get(prop)
+
+ if attributes:
+ manager_attributes.append(attributes)
+
+ result['ret'] = True
+
+ except (AttributeError, KeyError) as e:
+ result['ret'] = False
+ result['msg'] = "Failed to find attribute/key: " + str(e)
+
+ result["entries"] = manager_attributes
+ return result
+
+
+CATEGORY_COMMANDS_ALL = {
+ "Manager": ["GetManagerAttributes"]
+}
+
+
+def main():
+ result = {}
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(required=True),
+ command=dict(required=True, type='list'),
+ baseuri=dict(required=True),
+ username=dict(required=True),
+ password=dict(required=True, no_log=True),
+ timeout=dict(type='int', default=10)
+ ),
+ supports_check_mode=False
+ )
+ is_old_facts = module._name in ('idrac_redfish_facts', 'community.general.idrac_redfish_facts')
+ if is_old_facts:
+ module.deprecate("The 'idrac_redfish_facts' module has been renamed to 'idrac_redfish_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ category = module.params['category']
+ command_list = module.params['command']
+
+ # admin credentials used for authentication
+ creds = {'user': module.params['username'],
+ 'pswd': module.params['password']}
+
+ # timeout
+ timeout = module.params['timeout']
+
+ # Build root URI
+ root_uri = "https://" + module.params['baseuri']
+ rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module)
+
+ # Check that Category is valid
+ if category not in CATEGORY_COMMANDS_ALL:
+ module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys())))
+
+ # Check that all commands are valid
+ for cmd in command_list:
+ # Fail if even one command given is invalid
+ if cmd not in CATEGORY_COMMANDS_ALL[category]:
+ module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
+
+ # Organize by Categories / Commands
+
+ if category == "Manager":
+ # execute only if we find a Manager resource
+ result = rf_utils._find_managers_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ if command == "GetManagerAttributes":
+ result = rf_utils.get_manager_attributes()
+
+ # Return data back or fail with proper message
+ if result['ret'] is True:
+ del result['ret']
+ if is_old_facts:
+ module.exit_json(ansible_facts=dict(redfish_facts=result))
+ else:
+ module.exit_json(redfish_facts=result)
+ else:
+ module.fail_json(msg=to_native(result['msg']))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_server_config_profile.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_server_config_profile.py
new file mode 100644
index 00000000..39857fd3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/idrac_server_config_profile.py
@@ -0,0 +1,301 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 2.0
+# Copyright (C) 2019 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: idrac_server_config_profile
+short_description: Export or Import iDRAC Server Configuration Profile (SCP).
+description:
+ - Export the Server Configuration Profile (SCP) from the iDRAC or Import from a network share or a local file.
+options:
+ idrac_ip:
+ description: iDRAC IP Address.
+ type: str
+ required: True
+ idrac_user:
+ description: iDRAC username.
+ type: str
+ required: True
+ idrac_password:
+ description: iDRAC user password.
+ type: str
+ required: True
+ aliases: ['idrac_pwd']
+ idrac_port:
+ description: iDRAC port.
+ type: int
+ default: 443
+ command:
+ description:
+ - If C(import), will perform SCP import operations.
+ - If C(export), will perform SCP export operations.
+ choices: ['import', 'export']
+ default: 'export'
+ job_wait:
+ description: Whether to wait for job completion or not.
+ type: bool
+ required: True
+ share_name:
+ description: CIFS or NFS Network Share or a local path.
+ type: str
+ required: True
+ share_user:
+ description: Network share user in the format 'user@domain' or 'domain\\user' if user is
+ part of a domain else 'user'. This option is mandatory for CIFS Network Share.
+ type: str
+ share_password:
+ description: Network share user password. This option is mandatory for CIFS Network Share.
+ type: str
+ aliases: ['share_pwd']
+ scp_file:
+ description: Server Configuration Profile file name. This option is mandatory for C(import) command.
+ type: str
+ scp_components:
+ description:
+ - If C(ALL), this module will import all components configurations from SCP file.
+ - If C(IDRAC), this module will import iDRAC configuration from SCP file.
+ - If C(BIOS), this module will import BIOS configuration from SCP file.
+ - If C(NIC), this module will import NIC configuration from SCP file.
+ - If C(RAID), this module will import RAID configuration from SCP file.
+ choices: ['ALL', 'IDRAC', 'BIOS', 'NIC', 'RAID']
+ default: 'ALL'
+ shutdown_type:
+ description:
+ - This option is applicable for C(import) command.
+ - If C(Graceful), it gracefully shuts down the server.
+ - If C(Forced), it forcefully shuts down the server.
+ - If C(NoReboot), it does not reboot the server.
+ choices: ['Graceful', 'Forced', 'NoReboot']
+ default: 'Graceful'
+ end_host_power_state:
+ description:
+ - This option is applicable for C(import) command.
+ - If C(On), End host power state is on.
+ - If C(Off), End host power state is off.
+ choices: ['On' ,'Off']
+ default: 'On'
+ export_format:
+ description: Specify the output file format. This option is applicable for C(export) command.
+ choices: ['JSON', 'XML']
+ default: 'XML'
+ export_use:
+ description: Specify the type of server configuration profile (SCP) to be exported.
+ This option is applicable for C(export) command.
+ choices: ['Default', 'Clone', 'Replace']
+ default: 'Default'
+
+requirements:
+ - "omsdk"
+ - "python >= 2.7.5"
+author: "Jagadeesh N V(@jagadeeshnv)"
+
+'''
+
+EXAMPLES = r'''
+---
+- name: Import Server Configuration Profile from a network share
+ community.general.idrac_server_config_profile:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ command: "import"
+ share_name: "192.168.0.2:/share"
+ share_user: "share_user_name"
+ share_password: "share_user_password"
+ scp_file: "scp_filename.xml"
+ scp_components: "ALL"
+ job_wait: True
+
+- name: Import Server Configuration Profile from a local path
+ community.general.idrac_server_config_profile:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ command: "import"
+ share_name: "/scp_folder"
+ share_user: "share_user_name"
+ share_password: "share_user_password"
+ scp_file: "scp_filename.xml"
+ scp_components: "ALL"
+ job_wait: True
+
+- name: Export Server Configuration Profile to a network share
+ community.general.idrac_server_config_profile:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ share_name: "192.168.0.2:/share"
+ share_user: "share_user_name"
+ share_password: "share_user_password"
+ job_wait: False
+
+- name: Export Server Configuration Profile to a local path
+ community.general.idrac_server_config_profile:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ share_name: "/scp_folder"
+ share_user: "share_user_name"
+ share_password: "share_user_password"
+ job_wait: False
+'''
+
+RETURN = r'''
+---
+msg:
+ type: str
+ description: Status of the import or export SCP job.
+ returned: always
+ sample: "Successfully imported the Server Configuration Profile"
+scp_status:
+ type: dict
+ description: SCP operation job and progress details from the iDRAC.
+ returned: success
+ sample:
+ {
+ "Id": "JID_XXXXXXXXX",
+ "JobState": "Completed",
+ "JobType": "ImportConfiguration",
+ "Message": "Successfully imported and applied Server Configuration Profile.",
+ "MessageArgs": [],
+ "MessageId": "XXX123",
+ "Name": "Import Configuration",
+ "PercentComplete": 100,
+ "StartTime": "TIME_NOW",
+ "Status": "Success",
+ "TargetSettingsURI": null,
+ "retval": true
+ }
+'''
+
+import os
+from ansible_collections.community.general.plugins.module_utils.remote_management.dellemc.dellemc_idrac import iDRACConnection
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from omsdk.sdkfile import file_share_manager
+ from omsdk.sdkcreds import UserCredentials
+ from omdrivers.enums.iDRAC.iDRACEnums import (SCPTargetEnum, EndHostPowerStateEnum,
+ ShutdownTypeEnum, ExportFormatEnum, ExportUseEnum)
+except ImportError:
+ pass
+
+
+def run_import_server_config_profile(idrac, module):
+ """Import Server Configuration Profile from a network share."""
+ target = SCPTargetEnum[module.params['scp_components']]
+ job_wait = module.params['job_wait']
+ end_host_power_state = EndHostPowerStateEnum[module.params['end_host_power_state']]
+ shutdown_type = ShutdownTypeEnum[module.params['shutdown_type']]
+ idrac.use_redfish = True
+
+ try:
+ myshare = file_share_manager.create_share_obj(
+ share_path="{0}{1}{2}".format(module.params['share_name'], os.sep, module.params['scp_file']),
+ creds=UserCredentials(module.params['share_user'],
+ module.params['share_password']), isFolder=False)
+ import_status = idrac.config_mgr.scp_import(myshare,
+ target=target, shutdown_type=shutdown_type,
+ end_host_power_state=end_host_power_state,
+ job_wait=job_wait)
+ if not import_status or import_status.get('Status') != "Success":
+ module.fail_json(msg='Failed to import scp.', scp_status=import_status)
+ except RuntimeError as e:
+ module.fail_json(msg=str(e))
+ return import_status
+
+
+def run_export_server_config_profile(idrac, module):
+ """Export Server Configuration Profile to a network share."""
+ export_format = ExportFormatEnum[module.params['export_format']]
+ scp_file_name_format = "%ip_%Y%m%d_%H%M%S_scp.{0}".format(module.params['export_format'].lower())
+ target = SCPTargetEnum[module.params['scp_components']]
+ export_use = ExportUseEnum[module.params['export_use']]
+ idrac.use_redfish = True
+
+ try:
+ myshare = file_share_manager.create_share_obj(share_path=module.params['share_name'],
+ creds=UserCredentials(module.params['share_user'],
+ module.params['share_password']),
+ isFolder=True)
+ scp_file_name = myshare.new_file(scp_file_name_format)
+ export_status = idrac.config_mgr.scp_export(scp_file_name,
+ target=target,
+ export_format=export_format,
+ export_use=export_use,
+ job_wait=module.params['job_wait'])
+ if not export_status or export_status.get('Status') != "Success":
+ module.fail_json(msg='Failed to export scp.', scp_status=export_status)
+ except RuntimeError as e:
+ module.fail_json(msg=str(e))
+ return export_status
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec={
+ "idrac_ip": {"required": True, "type": 'str'},
+ "idrac_user": {"required": True, "type": 'str'},
+ "idrac_password": {"required": True, "type": 'str',
+ "aliases": ['idrac_pwd'], "no_log": True},
+ "idrac_port": {"required": False, "default": 443, "type": 'int'},
+
+ "command": {"required": False, "type": 'str',
+ "choices": ['export', 'import'], "default": 'export'},
+ "job_wait": {"required": True, "type": 'bool'},
+
+ "share_name": {"required": True, "type": 'str'},
+ "share_user": {"required": False, "type": 'str'},
+ "share_password": {"required": False, "type": 'str',
+ "aliases": ['share_pwd'], "no_log": True},
+ "scp_components": {"required": False,
+ "choices": ['ALL', 'IDRAC', 'BIOS', 'NIC', 'RAID'],
+ "default": 'ALL'},
+
+ "scp_file": {"required": False, "type": 'str'},
+ "shutdown_type": {"required": False,
+ "choices": ['Graceful', 'Forced', 'NoReboot'],
+ "default": 'Graceful'},
+ "end_host_power_state": {"required": False,
+ "choices": ['On', 'Off'],
+ "default": 'On'},
+
+ "export_format": {"required": False, "type": 'str',
+ "choices": ['JSON', 'XML'], "default": 'XML'},
+ "export_use": {"required": False, "type": 'str',
+ "choices": ['Default', 'Clone', 'Replace'], "default": 'Default'}
+ },
+ required_if=[
+ ["command", "import", ["scp_file"]]
+ ],
+ supports_check_mode=False)
+
+ try:
+ changed = False
+ with iDRACConnection(module.params) as idrac:
+ command = module.params['command']
+ if command == 'import':
+ scp_status = run_import_server_config_profile(idrac, module)
+ if "No changes were applied" not in scp_status.get('Message', ""):
+ changed = True
+ else:
+ scp_status = run_export_server_config_profile(idrac, module)
+ module.exit_json(changed=changed, msg="Successfully {0}ed the Server Configuration Profile.".format(command),
+ scp_status=scp_status)
+ except (ImportError, ValueError, RuntimeError) as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/imc_rest.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/imc_rest.py
new file mode 100644
index 00000000..ca318b4e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/imc_rest.py
@@ -0,0 +1,427 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# (c) 2017, Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: imc_rest
+short_description: Manage Cisco IMC hardware through its REST API
+description:
+- Provides direct access to the Cisco IMC REST API.
+- Perform any configuration changes and actions that the Cisco IMC supports.
+- More information about the IMC REST API is available from
+ U(http://www.cisco.com/c/en/us/td/docs/unified_computing/ucs/c/sw/api/3_0/b_Cisco_IMC_api_301.html)
+author:
+- Dag Wieers (@dagwieers)
+requirements:
+- lxml
+- xmljson >= 0.1.8
+options:
+ hostname:
+ description:
+ - IP Address or hostname of Cisco IMC, resolvable by Ansible control host.
+ required: true
+ aliases: [ host, ip ]
+ username:
+ description:
+ - Username used to login to the switch.
+ default: admin
+ aliases: [ user ]
+ password:
+ description:
+ - The password to use for authentication.
+ default: password
+ path:
+ description:
+ - Name of the absolute path of the filename that includes the body
+ of the http request being sent to the Cisco IMC REST API.
+ - Parameter C(path) is mutual exclusive with parameter C(content).
+ aliases: [ 'src', 'config_file' ]
+ content:
+ description:
+ - When used instead of C(path), sets the content of the API requests directly.
+ - This may be convenient to template simple requests, for anything complex use the M(ansible.builtin.template) module.
+ - You can collate multiple IMC XML fragments and they will be processed sequentially in a single stream,
+ the Cisco IMC output is subsequently merged.
+ - Parameter C(content) is mutual exclusive with parameter C(path).
+ protocol:
+ description:
+ - Connection protocol to use.
+ default: https
+ choices: [ http, https ]
+ timeout:
+ description:
+ - The socket level timeout in seconds.
+ - This is the time that every single connection (every fragment) can spend.
+ If this C(timeout) is reached, the module will fail with a
+ C(Connection failure) indicating that C(The read operation timed out).
+ default: 60
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated.
+ - This should only set to C(no) used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+notes:
+- The XML fragments don't need an authentication cookie, this is injected by the module automatically.
+- The Cisco IMC XML output is being translated to JSON using the Cobra convention.
+- Any configConfMo change requested has a return status of 'modified', even if there was no actual change
+ from the previous configuration. As a result, this module will always report a change on subsequent runs.
+ In case this behaviour is fixed in a future update to Cisco IMC, this module will automatically adapt.
+- If you get a C(Connection failure) related to C(The read operation timed out) increase the C(timeout)
+ parameter. Some XML fragments can take longer than the default timeout.
+- More information about the IMC REST API is available from
+ U(http://www.cisco.com/c/en/us/td/docs/unified_computing/ucs/c/sw/api/3_0/b_Cisco_IMC_api_301.html)
+'''
+
+EXAMPLES = r'''
+- name: Power down server
+ community.general.imc_rest:
+ hostname: '{{ imc_hostname }}'
+ username: '{{ imc_username }}'
+ password: '{{ imc_password }}'
+ validate_certs: no
+ content: |
+ <configConfMo><inConfig>
+ <computeRackUnit dn="sys/rack-unit-1" adminPower="down"/>
+ </inConfig></configConfMo>
+ delegate_to: localhost
+
+- name: Configure IMC using multiple XML fragments
+ community.general.imc_rest:
+ hostname: '{{ imc_hostname }}'
+ username: '{{ imc_username }}'
+ password: '{{ imc_password }}'
+ validate_certs: no
+ timeout: 120
+ content: |
+ <!-- Configure Serial-on-LAN -->
+ <configConfMo><inConfig>
+ <solIf dn="sys/rack-unit-1/sol-if" adminState="enable" speed=="115200" comport="com0"/>
+ </inConfig></configConfMo>
+
+ <!-- Configure Console Redirection -->
+ <configConfMo><inConfig>
+ <biosVfConsoleRedirection dn="sys/rack-unit-1/bios/bios-settings/Console-redirection"
+ vpBaudRate="115200"
+ vpConsoleRedirection="com-0"
+ vpFlowControl="none"
+ vpTerminalType="vt100"
+ vpPuttyKeyPad="LINUX"
+ vpRedirectionAfterPOST="Always Enable"/>
+ </inConfig></configConfMo>
+ delegate_to: localhost
+
+- name: Enable PXE boot and power-cycle server
+ community.general.imc_rest:
+ hostname: '{{ imc_hostname }}'
+ username: '{{ imc_username }}'
+ password: '{{ imc_password }}'
+ validate_certs: no
+ content: |
+ <!-- Configure PXE boot -->
+ <configConfMo><inConfig>
+ <lsbootLan dn="sys/rack-unit-1/boot-policy/lan-read-only" access="read-only" order="1" prot="pxe" type="lan"/>
+ </inConfig></configConfMo>
+
+ <!-- Power cycle server -->
+ <configConfMo><inConfig>
+ <computeRackUnit dn="sys/rack-unit-1" adminPower="cycle-immediate"/>
+ </inConfig></configConfMo>
+ delegate_to: localhost
+
+- name: Reconfigure IMC to boot from storage
+ community.general.imc_rest:
+ hostname: '{{ imc_host }}'
+ username: '{{ imc_username }}'
+ password: '{{ imc_password }}'
+ validate_certs: no
+ content: |
+ <configConfMo><inConfig>
+ <lsbootStorage dn="sys/rack-unit-1/boot-policy/storage-read-write" access="read-write" order="1" type="storage"/>
+ </inConfig></configConfMo>
+ delegate_to: localhost
+
+- name: Add customer description to server
+ community.general.imc_rest:
+ hostname: '{{ imc_host }}'
+ username: '{{ imc_username }}'
+ password: '{{ imc_password }}'
+ validate_certs: no
+ content: |
+ <configConfMo><inConfig>
+ <computeRackUnit dn="sys/rack-unit-1" usrLbl="Customer Lab - POD{{ pod_id }} - {{ inventory_hostname_short }}"/>
+ </inConfig></configConfMo>
+ delegate_to: localhost
+
+- name: Disable HTTP and increase session timeout to max value 10800 secs
+ community.general.imc_rest:
+ hostname: '{{ imc_host }}'
+ username: '{{ imc_username }}'
+ password: '{{ imc_password }}'
+ validate_certs: no
+ timeout: 120
+ content: |
+ <configConfMo><inConfig>
+ <commHttp dn="sys/svc-ext/http-svc" adminState="disabled"/>
+ </inConfig></configConfMo>
+
+ <configConfMo><inConfig>
+ <commHttps dn="sys/svc-ext/https-svc" adminState="enabled" sessionTimeout="10800"/>
+ </inConfig></configConfMo>
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+aaLogin:
+ description: Cisco IMC XML output for the login, translated to JSON using Cobra convention
+ returned: success
+ type: dict
+ sample: |
+ "attributes": {
+ "cookie": "",
+ "outCookie": "1498902428/9de6dc36-417c-157c-106c-139efe2dc02a",
+ "outPriv": "admin",
+ "outRefreshPeriod": "600",
+ "outSessionId": "114",
+ "outVersion": "2.0(13e)",
+ "response": "yes"
+ }
+configConfMo:
+ description: Cisco IMC XML output for any configConfMo XML fragments, translated to JSON using Cobra convention
+ returned: success
+ type: dict
+ sample: |
+elapsed:
+ description: Elapsed time in seconds
+ returned: always
+ type: int
+ sample: 31
+response:
+ description: HTTP response message, including content length
+ returned: always
+ type: str
+ sample: OK (729 bytes)
+status:
+ description: The HTTP response status code
+ returned: always
+ type: dict
+ sample: 200
+error:
+ description: Cisco IMC XML error output for last request, translated to JSON using Cobra convention
+ returned: failed
+ type: dict
+ sample: |
+ "attributes": {
+ "cookie": "",
+ "errorCode": "ERR-xml-parse-error",
+ "errorDescr": "XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed. ",
+ "invocationResult": "594",
+ "response": "yes"
+ }
+error_code:
+ description: Cisco IMC error code
+ returned: failed
+ type: str
+ sample: ERR-xml-parse-error
+error_text:
+ description: Cisco IMC error message
+ returned: failed
+ type: str
+ sample: |
+ XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed.
+input:
+ description: RAW XML input sent to the Cisco IMC, causing the error
+ returned: failed
+ type: str
+ sample: |
+ <configConfMo><inConfig><computeRackUnit dn="sys/rack-unit-1" admin_Power="down"/></inConfig></configConfMo>
+output:
+ description: RAW XML output received from the Cisco IMC, with error details
+ returned: failed
+ type: str
+ sample: >
+ <error cookie=""
+ response="yes"
+ errorCode="ERR-xml-parse-error"
+ invocationResult="594"
+ errorDescr="XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed.\n"/>
+'''
+
+import atexit
+import datetime
+import itertools
+import os
+import traceback
+
+LXML_ETREE_IMP_ERR = None
+try:
+ import lxml.etree
+ HAS_LXML_ETREE = True
+except ImportError:
+ LXML_ETREE_IMP_ERR = traceback.format_exc()
+ HAS_LXML_ETREE = False
+
+XMLJSON_COBRA_IMP_ERR = None
+try:
+ from xmljson import cobra
+ HAS_XMLJSON_COBRA = True
+except ImportError:
+ XMLJSON_COBRA_IMP_ERR = traceback.format_exc()
+ HAS_XMLJSON_COBRA = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.urls import fetch_url
+
+
+def imc_response(module, rawoutput, rawinput=''):
+ ''' Handle IMC returned data '''
+ xmloutput = lxml.etree.fromstring(rawoutput)
+ result = cobra.data(xmloutput)
+
+ # Handle errors
+ if xmloutput.get('errorCode') and xmloutput.get('errorDescr'):
+ if rawinput:
+ result['input'] = rawinput
+ result['output'] = rawoutput
+ result['error_code'] = xmloutput.get('errorCode')
+ result['error_text'] = xmloutput.get('errorDescr')
+ module.fail_json(msg='Request failed: %(error_text)s' % result, **result)
+
+ return result
+
+
+def logout(module, url, cookie, timeout):
+ ''' Perform a logout, if needed '''
+ data = '<aaaLogout cookie="%s" inCookie="%s"/>' % (cookie, cookie)
+ resp, auth = fetch_url(module, url, data=data, method="POST", timeout=timeout)
+
+
+def merge(one, two):
+ ''' Merge two complex nested datastructures into one'''
+ if isinstance(one, dict) and isinstance(two, dict):
+ copy = dict(one)
+ # copy.update({key: merge(one.get(key, None), two[key]) for key in two})
+ copy.update(dict((key, merge(one.get(key, None), two[key])) for key in two))
+ return copy
+
+ elif isinstance(one, list) and isinstance(two, list):
+ return [merge(alpha, beta) for (alpha, beta) in itertools.izip_longest(one, two)]
+
+ return one if two is None else two
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ hostname=dict(type='str', required=True, aliases=['host', 'ip']),
+ username=dict(type='str', default='admin', aliases=['user']),
+ password=dict(type='str', default='password', no_log=True),
+ content=dict(type='str'),
+ path=dict(type='path', aliases=['config_file', 'src']),
+ protocol=dict(type='str', default='https', choices=['http', 'https']),
+ timeout=dict(type='int', default=60),
+ validate_certs=dict(type='bool', default=True),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[['content', 'path']],
+ )
+
+ if not HAS_LXML_ETREE:
+ module.fail_json(msg=missing_required_lib('lxml'), exception=LXML_ETREE_IMP_ERR)
+
+ if not HAS_XMLJSON_COBRA:
+ module.fail_json(msg=missing_required_lib('xmljson >= 0.1.8'), exception=XMLJSON_COBRA_IMP_ERR)
+
+ hostname = module.params['hostname']
+ username = module.params['username']
+ password = module.params['password']
+
+ content = module.params['content']
+ path = module.params['path']
+
+ protocol = module.params['protocol']
+ timeout = module.params['timeout']
+
+ result = dict(
+ failed=False,
+ changed=False,
+ )
+
+ # Report missing file
+ file_exists = False
+ if path:
+ if os.path.isfile(path):
+ file_exists = True
+ else:
+ module.fail_json(msg='Cannot find/access path:\n%s' % path)
+
+ start = datetime.datetime.utcnow()
+
+ # Perform login first
+ url = '%s://%s/nuova' % (protocol, hostname)
+ data = '<aaaLogin inName="%s" inPassword="%s"/>' % (username, password)
+ resp, auth = fetch_url(module, url, data=data, method='POST', timeout=timeout)
+ if resp is None or auth['status'] != 200:
+ result['elapsed'] = (datetime.datetime.utcnow() - start).seconds
+ module.fail_json(msg='Task failed with error %(status)s: %(msg)s' % auth, **result)
+ result.update(imc_response(module, resp.read()))
+
+ # Store cookie for future requests
+ try:
+ cookie = result['aaaLogin']['attributes']['outCookie']
+ except Exception:
+ module.fail_json(msg='Could not find cookie in output', **result)
+
+ # If we would not log out properly, we run out of sessions quickly
+ atexit.register(logout, module, url, cookie, timeout)
+
+ # Prepare request data
+ if content:
+ rawdata = content
+ elif file_exists:
+ with open(path, 'r') as config_object:
+ rawdata = config_object.read()
+
+ # Wrap the XML documents in a <root> element
+ xmldata = lxml.etree.fromstring('<root>%s</root>' % rawdata.replace('\n', ''))
+
+ # Handle each XML document separately in the same session
+ for xmldoc in list(xmldata):
+ if xmldoc.tag is lxml.etree.Comment:
+ continue
+ # Add cookie to XML
+ xmldoc.set('cookie', cookie)
+ data = lxml.etree.tostring(xmldoc)
+
+ # Perform actual request
+ resp, info = fetch_url(module, url, data=data, method='POST', timeout=timeout)
+ if resp is None or info['status'] != 200:
+ result['elapsed'] = (datetime.datetime.utcnow() - start).seconds
+ module.fail_json(msg='Task failed with error %(status)s: %(msg)s' % info, **result)
+
+ # Merge results with previous results
+ rawoutput = resp.read()
+ result = merge(result, imc_response(module, rawoutput, rawinput=data))
+ result['response'] = info['msg']
+ result['status'] = info['status']
+
+ # Check for any changes
+ # NOTE: Unfortunately IMC API always report status as 'modified'
+ xmloutput = lxml.etree.fromstring(rawoutput)
+ results = xmloutput.xpath('/configConfMo/outConfig/*/@status')
+ result['changed'] = ('modified' in results)
+
+ # Report success
+ result['elapsed'] = (datetime.datetime.utcnow() - start).seconds
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/imgadm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/imgadm.py
new file mode 100644
index 00000000..18a67d01
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/imgadm.py
@@ -0,0 +1,311 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, 2017 Jasper Lievisse Adriaanse <j@jasper.la>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: imgadm
+short_description: Manage SmartOS images
+description:
+ - Manage SmartOS virtual machine images through imgadm(1M)
+author: Jasper Lievisse Adriaanse (@jasperla)
+options:
+ force:
+ required: false
+ type: bool
+ description:
+ - Force a given operation (where supported by imgadm(1M)).
+ pool:
+ required: false
+ default: zones
+ description:
+ - zpool to import to or delete images from.
+ type: str
+ source:
+ required: false
+ description:
+ - URI for the image source.
+ type: str
+ state:
+ required: true
+ choices: [ present, absent, deleted, imported, updated, vacuumed ]
+ description:
+ - State the object operated on should be in. C(imported) is an alias for
+ for C(present) and C(deleted) for C(absent). When set to C(vacuumed)
+ and C(uuid) to C(*), it will remove all unused images.
+ type: str
+
+ type:
+ required: false
+ choices: [ imgapi, docker, dsapi ]
+ default: imgapi
+ description:
+ - Type for image sources.
+ type: str
+
+ uuid:
+ required: false
+ description:
+ - Image UUID. Can either be a full UUID or C(*) for all images.
+ type: str
+
+requirements:
+ - python >= 2.6
+'''
+
+EXAMPLES = '''
+- name: Import an image
+ community.general.imgadm:
+ uuid: '70e3ae72-96b6-11e6-9056-9737fd4d0764'
+ state: imported
+
+- name: Delete an image
+ community.general.imgadm:
+ uuid: '70e3ae72-96b6-11e6-9056-9737fd4d0764'
+ state: deleted
+
+- name: Update all images
+ community.general.imgadm:
+ uuid: '*'
+ state: updated
+
+- name: Update a single image
+ community.general.imgadm:
+ uuid: '70e3ae72-96b6-11e6-9056-9737fd4d0764'
+ state: updated
+
+- name: Add a source
+ community.general.imgadm:
+ source: 'https://datasets.project-fifo.net'
+ state: present
+
+- name: Add a Docker source
+ community.general.imgadm:
+ source: 'https://docker.io'
+ type: docker
+ state: present
+
+- name: Remove a source
+ community.general.imgadm:
+ source: 'https://docker.io'
+ state: absent
+'''
+
+RETURN = '''
+source:
+ description: Source that is managed.
+ returned: When not managing an image.
+ type: str
+ sample: https://datasets.project-fifo.net
+uuid:
+ description: UUID for an image operated on.
+ returned: When not managing an image source.
+ type: str
+ sample: 70e3ae72-96b6-11e6-9056-9737fd4d0764
+state:
+ description: State of the target, after execution.
+ returned: success
+ type: str
+ sample: 'present'
+'''
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+# Shortcut for the imgadm(1M) command. While imgadm(1M) supports a
+# -E option to return any errors in JSON, the generated JSON does not play well
+# with the JSON parsers of Python. The returned message contains '\n' as part of
+# the stacktrace, which breaks the parsers.
+
+
+class Imgadm(object):
+ def __init__(self, module):
+ self.module = module
+ self.params = module.params
+ self.cmd = module.get_bin_path('imgadm', required=True)
+ self.changed = False
+ self.uuid = module.params['uuid']
+
+ # Since there are a number of (natural) aliases, prevent having to look
+ # them up everytime we operate on `state`.
+ if self.params['state'] in ['present', 'imported', 'updated']:
+ self.present = True
+ else:
+ self.present = False
+
+ # Perform basic UUID validation upfront.
+ if self.uuid and self.uuid != '*':
+ if not re.match('^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$', self.uuid, re.IGNORECASE):
+ module.fail_json(msg='Provided value for uuid option is not a valid UUID.')
+
+ # Helper method to massage stderr
+ def errmsg(self, stderr):
+ match = re.match(r'^imgadm .*?: error \(\w+\): (.*): .*', stderr)
+ if match:
+ return match.groups()[0]
+ else:
+ return 'Unexpected failure'
+
+ def update_images(self):
+ if self.uuid == '*':
+ cmd = '{0} update'.format(self.cmd)
+ else:
+ cmd = '{0} update {1}'.format(self.cmd, self.uuid)
+
+ (rc, stdout, stderr) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.fail_json(msg='Failed to update images: {0}'.format(self.errmsg(stderr)))
+
+ # There is no feedback from imgadm(1M) to determine if anything
+ # was actually changed. So treat this as an 'always-changes' operation.
+ # Note that 'imgadm -v' produces unparseable JSON...
+ self.changed = True
+
+ def manage_sources(self):
+ force = self.params['force']
+ source = self.params['source']
+ imgtype = self.params['type']
+
+ cmd = '{0} sources'.format(self.cmd)
+
+ if force:
+ cmd += ' -f'
+
+ if self.present:
+ cmd = '{0} -a {1} -t {2}'.format(cmd, source, imgtype)
+ (rc, stdout, stderr) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.fail_json(msg='Failed to add source: {0}'.format(self.errmsg(stderr)))
+
+ # Check the various responses.
+ # Note that trying to add a source with the wrong type is handled
+ # above as it results in a non-zero status.
+
+ regex = 'Already have "{0}" image source "{1}", no change'.format(imgtype, source)
+ if re.match(regex, stdout):
+ self.changed = False
+
+ regex = 'Added "%s" image source "%s"' % (imgtype, source)
+ if re.match(regex, stdout):
+ self.changed = True
+ else:
+ # Type is ignored by imgadm(1M) here
+ cmd += ' -d %s' % source
+ (rc, stdout, stderr) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.fail_json(msg='Failed to remove source: {0}'.format(self.errmsg(stderr)))
+
+ regex = 'Do not have image source "%s", no change' % source
+ if re.match(regex, stdout):
+ self.changed = False
+
+ regex = 'Deleted ".*" image source "%s"' % source
+ if re.match(regex, stdout):
+ self.changed = True
+
+ def manage_images(self):
+ pool = self.params['pool']
+ state = self.params['state']
+
+ if state == 'vacuumed':
+ # Unconditionally pass '--force', otherwise we're prompted with 'y/N'
+ cmd = '{0} vacuum -f'.format(self.cmd)
+
+ (rc, stdout, stderr) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.fail_json(msg='Failed to vacuum images: {0}'.format(self.errmsg(stderr)))
+ else:
+ if stdout == '':
+ self.changed = False
+ else:
+ self.changed = True
+ if self.present:
+ cmd = '{0} import -P {1} -q {2}'.format(self.cmd, pool, self.uuid)
+
+ (rc, stdout, stderr) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.fail_json(msg='Failed to import image: {0}'.format(self.errmsg(stderr)))
+
+ regex = r'Image {0} \(.*\) is already installed, skipping'.format(self.uuid)
+ if re.match(regex, stdout):
+ self.changed = False
+
+ regex = '.*ActiveImageNotFound.*'
+ if re.match(regex, stderr):
+ self.changed = False
+
+ regex = 'Imported image {0}.*'.format(self.uuid)
+ if re.match(regex, stdout.splitlines()[-1]):
+ self.changed = True
+ else:
+ cmd = '{0} delete -P {1} {2}'.format(self.cmd, pool, self.uuid)
+
+ (rc, stdout, stderr) = self.module.run_command(cmd)
+
+ regex = '.*ImageNotInstalled.*'
+ if re.match(regex, stderr):
+ # Even if the 'rc' was non-zero (3), we handled the situation
+ # in order to determine if there was a change.
+ self.changed = False
+
+ regex = 'Deleted image {0}'.format(self.uuid)
+ if re.match(regex, stdout):
+ self.changed = True
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ force=dict(type='bool'),
+ pool=dict(default='zones'),
+ source=dict(),
+ state=dict(required=True, choices=['present', 'absent', 'deleted', 'imported', 'updated', 'vacuumed']),
+ type=dict(default='imgapi', choices=['imgapi', 'docker', 'dsapi']),
+ uuid=dict()
+ ),
+ # This module relies largely on imgadm(1M) to enforce idempotency, which does not
+ # provide a "noop" (or equivalent) mode to do a dry-run.
+ supports_check_mode=False,
+ )
+
+ imgadm = Imgadm(module)
+
+ uuid = module.params['uuid']
+ source = module.params['source']
+ state = module.params['state']
+
+ result = {'state': state}
+
+ # Either manage sources or images.
+ if source:
+ result['source'] = source
+ imgadm.manage_sources()
+ else:
+ result['uuid'] = uuid
+
+ if state == 'updated':
+ imgadm.update_images()
+ else:
+ # Make sure operate on a single image for the following actions
+ if (uuid == '*') and (state != 'vacuumed'):
+ module.fail_json(msg='Can only specify uuid as "*" when updating image(s)')
+ imgadm.manage_images()
+
+ result['changed'] = imgadm.changed
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/infinity.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/infinity.py
new file mode 100644
index 00000000..ab41f680
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/infinity.py
@@ -0,0 +1,565 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, <meiliu@fusionlayer.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+module: infinity
+short_description: Manage Infinity IPAM using Rest API
+description:
+ - Manage Infinity IPAM using REST API.
+author:
+ - Meirong Liu (@MeganLiu)
+options:
+ server_ip:
+ description:
+ - Infinity server_ip with IP address.
+ type: str
+ required: true
+ username:
+ description:
+ - Username to access Infinity.
+ - The user must have REST API privileges.
+ type: str
+ required: true
+ password:
+ description:
+ - Infinity password.
+ type: str
+ required: true
+ action:
+ description:
+ - Action to perform
+ type: str
+ required: true
+ choices: [add_network, delete_network, get_network, get_network_id, release_ip, release_network, reserve_network, reserve_next_available_ip ]
+ network_id:
+ description:
+ - Network ID.
+ type: str
+ default: ''
+ ip_address:
+ description:
+ - IP Address for a reservation or a release.
+ type: str
+ default: ''
+ network_address:
+ description:
+ - Network address with CIDR format (e.g., 192.168.310.0).
+ type: str
+ default: ''
+ network_size:
+ description:
+ - Network bitmask (e.g. 255.255.255.220) or CIDR format (e.g., /26).
+ type: str
+ default: ''
+ network_name:
+ description:
+ - The name of a network.
+ type: str
+ default: ''
+ network_location:
+ description:
+ - The parent network id for a given network.
+ type: int
+ default: -1
+ network_type:
+ description:
+ - Network type defined by Infinity
+ type: str
+ choices: [ lan, shared_lan, supernet ]
+ default: lan
+ network_family:
+ description:
+ - Network family defined by Infinity, e.g. IPv4, IPv6 and Dual stack
+ type: str
+ choices: [ 4, 6, dual ]
+ default: 4
+'''
+
+EXAMPLES = r'''
+---
+- hosts: localhost
+ connection: local
+ strategy: debug
+ tasks:
+ - name: Reserve network into Infinity IPAM
+ community.general.infinity:
+ server_ip: 80.75.107.12
+ username: username
+ password: password
+ action: reserve_network
+ network_name: reserve_new_ansible_network
+ network_family: 4
+ network_type: lan
+ network_id: 1201
+ network_size: /28
+ register: infinity
+'''
+
+RETURN = r'''
+network_id:
+ description: id for a given network
+ returned: success
+ type: str
+ sample: '1501'
+ip_info:
+ description: when reserve next available ip address from a network, the ip address info ) is returned.
+ returned: success
+ type: str
+ sample: '{"address": "192.168.10.3", "hostname": "", "FQDN": "", "domainname": "", "id": 3229}'
+network_info:
+ description: when reserving a LAN network from a Infinity supernet by providing network_size, the information about the reserved network is returned.
+ returned: success
+ type: str
+ sample: {"network_address": "192.168.10.32/28","network_family": "4", "network_id": 3102,
+ "network_size": null,"description": null,"network_location": "3085",
+ "ranges": { "id": 0, "name": null,"first_ip": null,"type": null,"last_ip": null},
+ "network_type": "lan","network_name": "'reserve_new_ansible_network'"}
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule, json
+from ansible.module_utils.urls import open_url
+
+
+class Infinity(object):
+ """
+ Class for manage REST API calls with the Infinity.
+ """
+
+ def __init__(self, module, server_ip, username, password):
+ self.module = module
+ self.auth_user = username
+ self.auth_pass = password
+ self.base_url = "https://%s/rest/v1/" % (str(server_ip))
+
+ def _get_api_call_ansible_handler(
+ self,
+ method='get',
+ resource_url='',
+ stat_codes=None,
+ params=None,
+ payload_data=None):
+ """
+ Perform the HTTPS request by using ansible get/delete method
+ """
+ stat_codes = [200] if stat_codes is None else stat_codes
+ request_url = str(self.base_url) + str(resource_url)
+ response = None
+ headers = {'Content-Type': 'application/json'}
+ if not request_url:
+ self.module.exit_json(
+ msg="When sending Rest api call , the resource URL is empty, please check.")
+ if payload_data and not isinstance(payload_data, str):
+ payload_data = json.dumps(payload_data)
+ response_raw = open_url(
+ str(request_url),
+ method=method,
+ timeout=20,
+ headers=headers,
+ url_username=self.auth_user,
+ url_password=self.auth_pass,
+ validate_certs=False,
+ force_basic_auth=True,
+ data=payload_data)
+
+ response = response_raw.read()
+ payload = ''
+ if response_raw.code not in stat_codes:
+ self.module.exit_json(
+ changed=False,
+ meta=" openurl response_raw.code show error and error code is %r" %
+ (response_raw.code))
+ else:
+ if isinstance(response, str) and len(response) > 0:
+ payload = response
+ elif method.lower() == 'delete' and response_raw.code == 204:
+ payload = 'Delete is done.'
+ if isinstance(payload, dict) and "text" in payload:
+ self.module.exit_json(
+ changed=False,
+ meta="when calling rest api, returned data is not json ")
+ raise Exception(payload["text"])
+ return payload
+
+ # ---------------------------------------------------------------------------
+ # get_network()
+ # ---------------------------------------------------------------------------
+ def get_network(self, network_id, network_name, limit=-1):
+ """
+ Search network_name inside Infinity by using rest api
+ Network id or network_name needs to be provided
+ return the details of a given with given network_id or name
+ """
+ if network_name is None and network_id is None:
+ self.module.exit_json(
+ msg="You must specify one of the options 'network_name' or 'network_id'.")
+ method = "get"
+ resource_url = ''
+ params = {}
+ response = None
+ if network_id:
+ resource_url = "networks/" + str(network_id)
+ response = self._get_api_call_ansible_handler(method, resource_url)
+ if network_id is None and network_name:
+ method = "get"
+ resource_url = "search"
+ params = {"query": json.dumps(
+ {"name": network_name, "type": "network"})}
+ response = self._get_api_call_ansible_handler(
+ method, resource_url, payload_data=json.dumps(params))
+ if response and isinstance(response, str):
+ response = json.loads(response)
+ if response and isinstance(response, list) and len(
+ response) > 1 and limit == 1:
+ response = response[0]
+ response = json.dumps(response)
+ return response
+
+ # ---------------------------------------------------------------------------
+ # get_network_id()
+ # ---------------------------------------------------------------------------
+ def get_network_id(self, network_name="", network_type='lan'):
+ """
+ query network_id from Infinity via rest api based on given network_name
+ """
+ method = 'get'
+ resource_url = 'search'
+ response = None
+ if network_name is None:
+ self.module.exit_json(
+ msg="You must specify the option 'network_name'")
+ params = {"query": json.dumps(
+ {"name": network_name, "type": "network"})}
+ response = self._get_api_call_ansible_handler(
+ method, resource_url, payload_data=json.dumps(params))
+ network_id = ""
+ if response and isinstance(response, str):
+ response = json.loads(response)
+ if response and isinstance(response, list):
+ response = response[0]
+ network_id = response['id']
+ return network_id
+
+ # ---------------------------------------------------------------------------
+ # reserve_next_available_ip()
+ # ---------------------------------------------------------------------------
+ def reserve_next_available_ip(self, network_id=""):
+ """
+ Reserve ip address via Infinity by using rest api
+ network_id: the id of the network that users would like to reserve network from
+ return the next available ip address from that given network
+ """
+ method = "post"
+ resource_url = ''
+ response = None
+ ip_info = ''
+ if not network_id:
+ self.module.exit_json(
+ msg="You must specify the option 'network_id'.")
+ if network_id:
+ resource_url = "networks/" + str(network_id) + "/reserve_ip"
+ response = self._get_api_call_ansible_handler(method, resource_url)
+ if response and response.find(
+ "[") >= 0 and response.find("]") >= 0:
+ start_pos = response.find("{")
+ end_pos = response.find("}")
+ ip_info = response[start_pos: (end_pos + 1)]
+ return ip_info
+
+ # -------------------------
+ # release_ip()
+ # -------------------------
+ def release_ip(self, network_id="", ip_address=""):
+ """
+ Reserve ip address via Infinity by using rest api
+ """
+ method = "get"
+ resource_url = ''
+ response = None
+ if ip_address is None or network_id is None:
+ self.module.exit_json(
+ msg="You must specify those two options: 'network_id' and 'ip_address'.")
+
+ resource_url = "networks/" + str(network_id) + "/children"
+ response = self._get_api_call_ansible_handler(method, resource_url)
+ if not response:
+ self.module.exit_json(
+ msg="There is an error in release ip %s from network %s." %
+ (ip_address, network_id))
+
+ ip_list = json.loads(response)
+ ip_idlist = []
+ for ip_item in ip_list:
+ ip_id = ip_item['id']
+ ip_idlist.append(ip_id)
+ deleted_ip_id = ''
+ for ip_id in ip_idlist:
+ ip_response = ''
+ resource_url = "ip_addresses/" + str(ip_id)
+ ip_response = self._get_api_call_ansible_handler(
+ method,
+ resource_url,
+ stat_codes=[200])
+ if ip_response and json.loads(
+ ip_response)['address'] == str(ip_address):
+ deleted_ip_id = ip_id
+ break
+ if deleted_ip_id:
+ method = 'delete'
+ resource_url = "ip_addresses/" + str(deleted_ip_id)
+ response = self._get_api_call_ansible_handler(
+ method, resource_url, stat_codes=[204])
+ else:
+ self.module.exit_json(
+ msg=" When release ip, could not find the ip address %r from the given network %r' ." %
+ (ip_address, network_id))
+
+ return response
+
+ # -------------------
+ # delete_network()
+ # -------------------
+ def delete_network(self, network_id="", network_name=""):
+ """
+ delete network from Infinity by using rest api
+ """
+ method = 'delete'
+ resource_url = ''
+ response = None
+ if network_id is None and network_name is None:
+ self.module.exit_json(
+ msg="You must specify one of those options: 'network_id','network_name' .")
+ if network_id is None and network_name:
+ network_id = self.get_network_id(network_name=network_name)
+ if network_id:
+ resource_url = "networks/" + str(network_id)
+ response = self._get_api_call_ansible_handler(
+ method, resource_url, stat_codes=[204])
+ return response
+
+ # reserve_network()
+ # ---------------------------------------------------------------------------
+ def reserve_network(self, network_id="",
+ reserved_network_name="", reserved_network_description="",
+ reserved_network_size="", reserved_network_family='4',
+ reserved_network_type='lan', reserved_network_address="",):
+ """
+ Reserves the first available network of specified size from a given supernet
+ <dt>network_name (required)</dt><dd>Name of the network</dd>
+ <dt>description (optional)</dt><dd>Free description</dd>
+ <dt>network_family (required)</dt><dd>Address family of the network. One of '4', '6', 'IPv4', 'IPv6', 'dual'</dd>
+ <dt>network_address (optional)</dt><dd>Address of the new network. If not given, the first network available will be created.</dd>
+ <dt>network_size (required)</dt><dd>Size of the new network in /&lt;prefix&gt; notation.</dd>
+ <dt>network_type (required)</dt><dd>Type of network. One of 'supernet', 'lan', 'shared_lan'</dd>
+
+ """
+ method = 'post'
+ resource_url = ''
+ network_info = None
+ if network_id is None or reserved_network_name is None or reserved_network_size is None:
+ self.module.exit_json(
+ msg="You must specify those options: 'network_id', 'reserved_network_name' and 'reserved_network_size'")
+ if network_id:
+ resource_url = "networks/" + str(network_id) + "/reserve_network"
+ if not reserved_network_family:
+ reserved_network_family = '4'
+ if not reserved_network_type:
+ reserved_network_type = 'lan'
+ payload_data = {
+ "network_name": reserved_network_name,
+ 'description': reserved_network_description,
+ 'network_size': reserved_network_size,
+ 'network_family': reserved_network_family,
+ 'network_type': reserved_network_type,
+ 'network_location': int(network_id)}
+ if reserved_network_address:
+ payload_data.update({'network_address': reserved_network_address})
+
+ network_info = self._get_api_call_ansible_handler(
+ method, resource_url, stat_codes=[200, 201], payload_data=payload_data)
+
+ return network_info
+
+ # ---------------------------------------------------------------------------
+ # release_network()
+ # ---------------------------------------------------------------------------
+ def release_network(
+ self,
+ network_id="",
+ released_network_name="",
+ released_network_type='lan'):
+ """
+ Release the network with name 'released_network_name' from the given supernet network_id
+ """
+ method = 'get'
+ response = None
+ if network_id is None or released_network_name is None:
+ self.module.exit_json(
+ msg="You must specify those options 'network_id', 'reserved_network_name' and 'reserved_network_size'")
+ matched_network_id = ""
+ resource_url = "networks/" + str(network_id) + "/children"
+ response = self._get_api_call_ansible_handler(method, resource_url)
+ if not response:
+ self.module.exit_json(
+ msg=" there is an error in releasing network %r from network %s." %
+ (network_id, released_network_name))
+ if response:
+ response = json.loads(response)
+ for child_net in response:
+ if child_net['network'] and child_net['network']['network_name'] == released_network_name:
+ matched_network_id = child_net['network']['network_id']
+ break
+ response = None
+ if matched_network_id:
+ method = 'delete'
+ resource_url = "networks/" + str(matched_network_id)
+ response = self._get_api_call_ansible_handler(
+ method, resource_url, stat_codes=[204])
+ else:
+ self.module.exit_json(
+ msg=" When release network , could not find the network %r from the given superent %r' " %
+ (released_network_name, network_id))
+
+ return response
+
+ # ---------------------------------------------------------------------------
+ # add_network()
+ # ---------------------------------------------------------------------------
+ def add_network(
+ self, network_name="", network_address="",
+ network_size="", network_family='4',
+ network_type='lan', network_location=-1):
+ """
+ add a new LAN network into a given supernet Fusionlayer Infinity via rest api or default supernet
+ required fields=['network_name', 'network_family', 'network_type', 'network_address','network_size' ]
+ """
+ method = 'post'
+ resource_url = 'networks'
+ response = None
+ if network_name is None or network_address is None or network_size is None:
+ self.module.exit_json(
+ msg="You must specify those options 'network_name', 'network_address' and 'network_size'")
+
+ if not network_family:
+ network_family = '4'
+ if not network_type:
+ network_type = 'lan'
+ if not network_location:
+ network_location = -1
+ payload_data = {
+ "network_name": network_name,
+ 'network_address': network_address,
+ 'network_size': network_size,
+ 'network_family': network_family,
+ 'network_type': network_type,
+ 'network_location': network_location}
+ response = self._get_api_call_ansible_handler(
+ method='post', resource_url=resource_url,
+ stat_codes=[200], payload_data=payload_data)
+ return response
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ server_ip=dict(type='str', required=True),
+ username=dict(type='str', required=True),
+ password=dict(type='str', required=True, no_log=True),
+ network_id=dict(type='str'),
+ ip_address=dict(type='str'),
+ network_name=dict(type='str'),
+ network_location=dict(type='int', default=-1),
+ network_family=dict(type='str', default='4', choices=['4', '6', 'dual']),
+ network_type=dict(type='str', default='lan', choices=['lan', 'shared_lan', 'supernet']),
+ network_address=dict(type='str'),
+ network_size=dict(type='str'),
+ action=dict(type='str', required=True, choices=[
+ 'add_network',
+ 'delete_network',
+ 'get_network',
+ 'get_network_id',
+ 'release_ip',
+ 'release_network',
+ 'reserve_network',
+ 'reserve_next_available_ip',
+ ],),
+ ),
+ required_together=(
+ ['username', 'password'],
+ ),
+ )
+ server_ip = module.params["server_ip"]
+ username = module.params["username"]
+ password = module.params["password"]
+ action = module.params["action"]
+ network_id = module.params["network_id"]
+ released_ip = module.params["ip_address"]
+ network_name = module.params["network_name"]
+ network_family = module.params["network_family"]
+ network_type = module.params["network_type"]
+ network_address = module.params["network_address"]
+ network_size = module.params["network_size"]
+ network_location = module.params["network_location"]
+ my_infinity = Infinity(module, server_ip, username, password)
+ result = ''
+ if action == "reserve_next_available_ip":
+ if network_id:
+ result = my_infinity.reserve_next_available_ip(network_id)
+ if not result:
+ result = 'There is an error in calling method of reserve_next_available_ip'
+ module.exit_json(changed=False, meta=result)
+ module.exit_json(changed=True, meta=result)
+ elif action == "release_ip":
+ if network_id and released_ip:
+ result = my_infinity.release_ip(
+ network_id=network_id, ip_address=released_ip)
+ module.exit_json(changed=True, meta=result)
+ elif action == "delete_network":
+ result = my_infinity.delete_network(
+ network_id=network_id, network_name=network_name)
+ module.exit_json(changed=True, meta=result)
+
+ elif action == "get_network_id":
+ result = my_infinity.get_network_id(
+ network_name=network_name, network_type=network_type)
+ module.exit_json(changed=True, meta=result)
+ elif action == "get_network":
+ result = my_infinity.get_network(
+ network_id=network_id, network_name=network_name)
+ module.exit_json(changed=True, meta=result)
+ elif action == "reserve_network":
+ result = my_infinity.reserve_network(
+ network_id=network_id,
+ reserved_network_name=network_name,
+ reserved_network_size=network_size,
+ reserved_network_family=network_family,
+ reserved_network_type=network_type,
+ reserved_network_address=network_address)
+ module.exit_json(changed=True, meta=result)
+ elif action == "release_network":
+ result = my_infinity.release_network(
+ network_id=network_id,
+ released_network_name=network_name,
+ released_network_type=network_type)
+ module.exit_json(changed=True, meta=result)
+
+ elif action == "add_network":
+ result = my_infinity.add_network(
+ network_name=network_name,
+ network_location=network_location,
+ network_address=network_address,
+ network_size=network_size,
+ network_family=network_family,
+ network_type=network_type)
+
+ module.exit_json(changed=True, meta=result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/influxdb_database.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/influxdb_database.py
new file mode 100644
index 00000000..7b798c36
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/influxdb_database.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2016, Kamil Szczygiel <kamil.szczygiel () intel.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: influxdb_database
+short_description: Manage InfluxDB databases
+description:
+ - Manage InfluxDB databases.
+author: "Kamil Szczygiel (@kamsz)"
+requirements:
+ - "python >= 2.6"
+ - "influxdb >= 0.9"
+ - requests
+options:
+ database_name:
+ description:
+ - Name of the database.
+ required: true
+ type: str
+ state:
+ description:
+ - Determines if the database should be created or destroyed.
+ choices: [ absent, present ]
+ default: present
+ type: str
+extends_documentation_fragment:
+- community.general.influxdb
+
+'''
+
+EXAMPLES = r'''
+# Example influxdb_database command from Ansible Playbooks
+- name: Create database
+ community.general.influxdb_database:
+ hostname: "{{influxdb_ip_address}}"
+ database_name: "{{influxdb_database_name}}"
+
+- name: Destroy database
+ community.general.influxdb_database:
+ hostname: "{{influxdb_ip_address}}"
+ database_name: "{{influxdb_database_name}}"
+ state: absent
+
+- name: Create database using custom credentials
+ community.general.influxdb_database:
+ hostname: "{{influxdb_ip_address}}"
+ username: "{{influxdb_username}}"
+ password: "{{influxdb_password}}"
+ database_name: "{{influxdb_database_name}}"
+ ssl: yes
+ validate_certs: yes
+'''
+
+RETURN = r'''
+# only defaults
+'''
+
+try:
+ import requests.exceptions
+ from influxdb import exceptions
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb
+
+
+def find_database(module, client, database_name):
+ database = None
+
+ try:
+ databases = client.get_list_database()
+ for db in databases:
+ if db['name'] == database_name:
+ database = db
+ break
+ except requests.exceptions.ConnectionError as e:
+ module.fail_json(msg=str(e))
+ return database
+
+
+def create_database(module, client, database_name):
+ if not module.check_mode:
+ try:
+ client.create_database(database_name)
+ except requests.exceptions.ConnectionError as e:
+ module.fail_json(msg=str(e))
+
+ module.exit_json(changed=True)
+
+
+def drop_database(module, client, database_name):
+ if not module.check_mode:
+ try:
+ client.drop_database(database_name)
+ except exceptions.InfluxDBClientError as e:
+ module.fail_json(msg=e.content)
+
+ module.exit_json(changed=True)
+
+
+def main():
+ argument_spec = InfluxDb.influxdb_argument_spec()
+ argument_spec.update(
+ database_name=dict(required=True, type='str'),
+ state=dict(default='present', type='str', choices=['present', 'absent'])
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ state = module.params['state']
+
+ influxdb = InfluxDb(module)
+ client = influxdb.connect_to_influxdb()
+ database_name = influxdb.database_name
+ database = find_database(module, client, database_name)
+
+ if state == 'present':
+ if database:
+ module.exit_json(changed=False)
+ else:
+ create_database(module, client, database_name)
+
+ if state == 'absent':
+ if database:
+ drop_database(module, client, database_name)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/influxdb_query.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/influxdb_query.py
new file mode 100644
index 00000000..d9cf5007
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/influxdb_query.py
@@ -0,0 +1,101 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, René Moser <mail@renemoser.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: influxdb_query
+short_description: Query data points from InfluxDB
+description:
+ - Query data points from InfluxDB.
+author: "René Moser (@resmo)"
+requirements:
+ - "python >= 2.6"
+ - "influxdb >= 0.9"
+options:
+ query:
+ description:
+ - Query to be executed.
+ required: true
+ type: str
+ database_name:
+ description:
+ - Name of the database.
+ required: true
+ type: str
+extends_documentation_fragment:
+- community.general.influxdb
+
+'''
+
+EXAMPLES = r'''
+- name: Query connections
+ community.general.influxdb_query:
+ hostname: "{{ influxdb_ip_address }}"
+ database_name: "{{ influxdb_database_name }}"
+ query: "select mean(value) from connections"
+ register: connection
+
+- name: Query connections with tags filters
+ community.general.influxdb_query:
+ hostname: "{{ influxdb_ip_address }}"
+ database_name: "{{ influxdb_database_name }}"
+ query: "select mean(value) from connections where region='zue01' and host='server01'"
+ register: connection
+
+- name: Print results from the query
+ ansible.builtin.debug:
+ var: connection.query_results
+'''
+
+RETURN = r'''
+query_results:
+ description: Result from the query
+ returned: success
+ type: list
+ sample:
+ - mean: 1245.5333333333333
+ time: "1970-01-01T00:00:00Z"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb
+
+
+class AnsibleInfluxDBRead(InfluxDb):
+
+ def read_by_query(self, query):
+ client = self.connect_to_influxdb()
+ try:
+ rs = client.query(query)
+ if rs:
+ return list(rs.get_points())
+ except Exception as e:
+ self.module.fail_json(msg=to_native(e))
+
+
+def main():
+ argument_spec = InfluxDb.influxdb_argument_spec()
+ argument_spec.update(
+ query=dict(type='str', required=True),
+ database_name=dict(required=True, type='str'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ influx = AnsibleInfluxDBRead(module)
+ query = module.params.get('query')
+ results = influx.read_by_query(query)
+ module.exit_json(changed=True, query_results=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/influxdb_retention_policy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/influxdb_retention_policy.py
new file mode 100644
index 00000000..0774915f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/influxdb_retention_policy.py
@@ -0,0 +1,197 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2016, Kamil Szczygiel <kamil.szczygiel () intel.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: influxdb_retention_policy
+short_description: Manage InfluxDB retention policies
+description:
+ - Manage InfluxDB retention policies.
+author: "Kamil Szczygiel (@kamsz)"
+requirements:
+ - "python >= 2.6"
+ - "influxdb >= 0.9"
+ - requests
+options:
+ database_name:
+ description:
+ - Name of the database.
+ required: true
+ type: str
+ policy_name:
+ description:
+ - Name of the retention policy.
+ required: true
+ type: str
+ duration:
+ description:
+ - Determines how long InfluxDB should keep the data.
+ required: true
+ type: str
+ replication:
+ description:
+ - Determines how many independent copies of each point are stored in the cluster.
+ required: true
+ type: int
+ default:
+ description:
+ - Sets the retention policy as default retention policy.
+ type: bool
+ default: false
+extends_documentation_fragment:
+- community.general.influxdb
+
+'''
+
+EXAMPLES = r'''
+# Example influxdb_retention_policy command from Ansible Playbooks
+- name: Create 1 hour retention policy
+ community.general.influxdb_retention_policy:
+ hostname: "{{influxdb_ip_address}}"
+ database_name: "{{influxdb_database_name}}"
+ policy_name: test
+ duration: 1h
+ replication: 1
+ ssl: yes
+ validate_certs: yes
+
+- name: Create 1 day retention policy
+ community.general.influxdb_retention_policy:
+ hostname: "{{influxdb_ip_address}}"
+ database_name: "{{influxdb_database_name}}"
+ policy_name: test
+ duration: 1d
+ replication: 1
+
+- name: Create 1 week retention policy
+ community.general.influxdb_retention_policy:
+ hostname: "{{influxdb_ip_address}}"
+ database_name: "{{influxdb_database_name}}"
+ policy_name: test
+ duration: 1w
+ replication: 1
+
+- name: Create infinite retention policy
+ community.general.influxdb_retention_policy:
+ hostname: "{{influxdb_ip_address}}"
+ database_name: "{{influxdb_database_name}}"
+ policy_name: test
+ duration: INF
+ replication: 1
+ ssl: no
+ validate_certs: no
+'''
+
+RETURN = r'''
+# only defaults
+'''
+
+import re
+
+try:
+ import requests.exceptions
+ from influxdb import exceptions
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb
+from ansible.module_utils._text import to_native
+
+
+def find_retention_policy(module, client):
+ database_name = module.params['database_name']
+ policy_name = module.params['policy_name']
+ hostname = module.params['hostname']
+ retention_policy = None
+
+ try:
+ retention_policies = client.get_list_retention_policies(database=database_name)
+ for policy in retention_policies:
+ if policy['name'] == policy_name:
+ retention_policy = policy
+ break
+ except requests.exceptions.ConnectionError as e:
+ module.fail_json(msg="Cannot connect to database %s on %s : %s" % (database_name, hostname, to_native(e)))
+ return retention_policy
+
+
+def create_retention_policy(module, client):
+ database_name = module.params['database_name']
+ policy_name = module.params['policy_name']
+ duration = module.params['duration']
+ replication = module.params['replication']
+ default = module.params['default']
+
+ if not module.check_mode:
+ try:
+ client.create_retention_policy(policy_name, duration, replication, database_name, default)
+ except exceptions.InfluxDBClientError as e:
+ module.fail_json(msg=e.content)
+ module.exit_json(changed=True)
+
+
+def alter_retention_policy(module, client, retention_policy):
+ database_name = module.params['database_name']
+ policy_name = module.params['policy_name']
+ duration = module.params['duration']
+ replication = module.params['replication']
+ default = module.params['default']
+ duration_regexp = re.compile(r'(\d+)([hdw]{1})|(^INF$){1}')
+ changed = False
+
+ duration_lookup = duration_regexp.search(duration)
+
+ if duration_lookup.group(2) == 'h':
+ influxdb_duration_format = '%s0m0s' % duration
+ elif duration_lookup.group(2) == 'd':
+ influxdb_duration_format = '%sh0m0s' % (int(duration_lookup.group(1)) * 24)
+ elif duration_lookup.group(2) == 'w':
+ influxdb_duration_format = '%sh0m0s' % (int(duration_lookup.group(1)) * 24 * 7)
+ elif duration == 'INF':
+ influxdb_duration_format = '0'
+
+ if (not retention_policy['duration'] == influxdb_duration_format or
+ not retention_policy['replicaN'] == int(replication) or
+ not retention_policy['default'] == default):
+ if not module.check_mode:
+ try:
+ client.alter_retention_policy(policy_name, database_name, duration, replication, default)
+ except exceptions.InfluxDBClientError as e:
+ module.fail_json(msg=e.content)
+ changed = True
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = InfluxDb.influxdb_argument_spec()
+ argument_spec.update(
+ database_name=dict(required=True, type='str'),
+ policy_name=dict(required=True, type='str'),
+ duration=dict(required=True, type='str'),
+ replication=dict(required=True, type='int'),
+ default=dict(default=False, type='bool')
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ influxdb = InfluxDb(module)
+ client = influxdb.connect_to_influxdb()
+
+ retention_policy = find_retention_policy(module, client)
+
+ if retention_policy:
+ alter_retention_policy(module, client, retention_policy)
+ else:
+ create_retention_policy(module, client)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/influxdb_user.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/influxdb_user.py
new file mode 100644
index 00000000..e17e3753
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/influxdb_user.py
@@ -0,0 +1,263 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2017, Vitaliy Zhhuta <zhhuta () gmail.com>
+# insipred by Kamil Szczygiel <kamil.szczygiel () intel.com> influxdb_database module
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: influxdb_user
+short_description: Manage InfluxDB users
+description:
+ - Manage InfluxDB users.
+author: "Vitaliy Zhhuta (@zhhuta)"
+requirements:
+ - "python >= 2.6"
+ - "influxdb >= 0.9"
+options:
+ user_name:
+ description:
+ - Name of the user.
+ required: True
+ type: str
+ user_password:
+ description:
+ - Password to be set for the user.
+ required: false
+ type: str
+ admin:
+ description:
+ - Whether the user should be in the admin role or not.
+ - Since version 2.8, the role will also be updated.
+ default: no
+ type: bool
+ state:
+ description:
+ - State of the user.
+ choices: [ absent, present ]
+ default: present
+ type: str
+ grants:
+ description:
+ - Privileges to grant to this user.
+ - Takes a list of dicts containing the "database" and "privilege" keys.
+ - If this argument is not provided, the current grants will be left alone.
+ - If an empty list is provided, all grants for the user will be removed.
+ type: list
+ elements: dict
+extends_documentation_fragment:
+- community.general.influxdb
+
+'''
+
+EXAMPLES = r'''
+- name: Create a user on localhost using default login credentials
+ community.general.influxdb_user:
+ user_name: john
+ user_password: s3cr3t
+
+- name: Create a user on localhost using custom login credentials
+ community.general.influxdb_user:
+ user_name: john
+ user_password: s3cr3t
+ login_username: "{{ influxdb_username }}"
+ login_password: "{{ influxdb_password }}"
+
+- name: Create an admin user on a remote host using custom login credentials
+ community.general.influxdb_user:
+ user_name: john
+ user_password: s3cr3t
+ admin: yes
+ hostname: "{{ influxdb_hostname }}"
+ login_username: "{{ influxdb_username }}"
+ login_password: "{{ influxdb_password }}"
+
+- name: Create a user on localhost with privileges
+ community.general.influxdb_user:
+ user_name: john
+ user_password: s3cr3t
+ login_username: "{{ influxdb_username }}"
+ login_password: "{{ influxdb_password }}"
+ grants:
+ - database: 'collectd'
+ privilege: 'WRITE'
+ - database: 'graphite'
+ privilege: 'READ'
+
+- name: Destroy a user using custom login credentials
+ community.general.influxdb_user:
+ user_name: john
+ login_username: "{{ influxdb_username }}"
+ login_password: "{{ influxdb_password }}"
+ state: absent
+'''
+
+RETURN = r'''
+#only defaults
+'''
+
+from ansible.module_utils.urls import ConnectionError
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils.influxdb as influx
+
+
+def find_user(module, client, user_name):
+ user_result = None
+
+ try:
+ users = client.get_list_users()
+ for user in users:
+ if user['user'] == user_name:
+ user_result = user
+ break
+ except (ConnectionError, influx.exceptions.InfluxDBClientError) as e:
+ module.fail_json(msg=to_native(e))
+ return user_result
+
+
+def check_user_password(module, client, user_name, user_password):
+ try:
+ client.switch_user(user_name, user_password)
+ client.get_list_users()
+ except influx.exceptions.InfluxDBClientError as e:
+ if e.code == 401:
+ return False
+ except ConnectionError as e:
+ module.fail_json(msg=to_native(e))
+ finally:
+ # restore previous user
+ client.switch_user(module.params['username'], module.params['password'])
+ return True
+
+
+def set_user_password(module, client, user_name, user_password):
+ if not module.check_mode:
+ try:
+ client.set_user_password(user_name, user_password)
+ except ConnectionError as e:
+ module.fail_json(msg=to_native(e))
+
+
+def create_user(module, client, user_name, user_password, admin):
+ if not module.check_mode:
+ try:
+ client.create_user(user_name, user_password, admin)
+ except ConnectionError as e:
+ module.fail_json(msg=to_native(e))
+
+
+def drop_user(module, client, user_name):
+ if not module.check_mode:
+ try:
+ client.drop_user(user_name)
+ except influx.exceptions.InfluxDBClientError as e:
+ module.fail_json(msg=e.content)
+
+ module.exit_json(changed=True)
+
+
+def set_user_grants(module, client, user_name, grants):
+ changed = False
+
+ try:
+ current_grants = client.get_list_privileges(user_name)
+ # Fix privileges wording
+ for i, v in enumerate(current_grants):
+ if v['privilege'] == 'ALL PRIVILEGES':
+ v['privilege'] = 'ALL'
+ current_grants[i] = v
+ elif v['privilege'] == 'NO PRIVILEGES':
+ del(current_grants[i])
+
+ # check if the current grants are included in the desired ones
+ for current_grant in current_grants:
+ if current_grant not in grants:
+ if not module.check_mode:
+ client.revoke_privilege(current_grant['privilege'],
+ current_grant['database'],
+ user_name)
+ changed = True
+
+ # check if the desired grants are included in the current ones
+ for grant in grants:
+ if grant not in current_grants:
+ if not module.check_mode:
+ client.grant_privilege(grant['privilege'],
+ grant['database'],
+ user_name)
+ changed = True
+
+ except influx.exceptions.InfluxDBClientError as e:
+ module.fail_json(msg=e.content)
+
+ return changed
+
+
+def main():
+ argument_spec = influx.InfluxDb.influxdb_argument_spec()
+ argument_spec.update(
+ state=dict(default='present', type='str', choices=['present', 'absent']),
+ user_name=dict(required=True, type='str'),
+ user_password=dict(required=False, type='str', no_log=True),
+ admin=dict(default='False', type='bool'),
+ grants=dict(type='list', elements='dict'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ state = module.params['state']
+ user_name = module.params['user_name']
+ user_password = module.params['user_password']
+ admin = module.params['admin']
+ grants = module.params['grants']
+ influxdb = influx.InfluxDb(module)
+ client = influxdb.connect_to_influxdb()
+ user = find_user(module, client, user_name)
+
+ changed = False
+
+ if state == 'present':
+ if user:
+ if not check_user_password(module, client, user_name, user_password) and user_password is not None:
+ set_user_password(module, client, user_name, user_password)
+ changed = True
+
+ try:
+ if admin and not user['admin']:
+ if not module.check_mode:
+ client.grant_admin_privileges(user_name)
+ changed = True
+ elif not admin and user['admin']:
+ if not module.check_mode:
+ client.revoke_admin_privileges(user_name)
+ changed = True
+ except influx.exceptions.InfluxDBClientError as e:
+ module.fail_json(msg=to_native(e))
+
+ else:
+ user_password = user_password or ''
+ create_user(module, client, user_name, user_password, admin)
+ changed = True
+
+ if grants is not None:
+ if set_user_grants(module, client, user_name, grants):
+ changed = True
+
+ module.exit_json(changed=changed)
+
+ if state == 'absent':
+ if user:
+ drop_user(module, client, user_name)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/influxdb_write.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/influxdb_write.py
new file mode 100644
index 00000000..0dc063a7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/influxdb_write.py
@@ -0,0 +1,96 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, René Moser <mail@renemoser.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: influxdb_write
+short_description: Write data points into InfluxDB
+description:
+ - Write data points into InfluxDB.
+author: "René Moser (@resmo)"
+requirements:
+ - "python >= 2.6"
+ - "influxdb >= 0.9"
+options:
+ data_points:
+ description:
+ - Data points as dict to write into the database.
+ required: true
+ type: list
+ elements: dict
+ database_name:
+ description:
+ - Name of the database.
+ required: true
+ type: str
+extends_documentation_fragment:
+- community.general.influxdb
+
+'''
+
+EXAMPLES = r'''
+- name: Write points into database
+ community.general.influxdb_write:
+ hostname: "{{influxdb_ip_address}}"
+ database_name: "{{influxdb_database_name}}"
+ data_points:
+ - measurement: connections
+ tags:
+ host: server01
+ region: us-west
+ time: "{{ ansible_date_time.iso8601 }}"
+ fields:
+ value: 2000
+ - measurement: connections
+ tags:
+ host: server02
+ region: us-east
+ time: "{{ ansible_date_time.iso8601 }}"
+ fields:
+ value: 3000
+'''
+
+RETURN = r'''
+# only defaults
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb
+
+
+class AnsibleInfluxDBWrite(InfluxDb):
+
+ def write_data_point(self, data_points):
+ client = self.connect_to_influxdb()
+
+ try:
+ client.write_points(data_points)
+ except Exception as e:
+ self.module.fail_json(msg=to_native(e))
+
+
+def main():
+ argument_spec = InfluxDb.influxdb_argument_spec()
+ argument_spec.update(
+ data_points=dict(required=True, type='list', elements='dict'),
+ database_name=dict(required=True, type='str'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ )
+
+ influx = AnsibleInfluxDBWrite(module)
+ data_points = module.params.get('data_points')
+ influx.write_data_point(data_points)
+ module.exit_json(changed=True)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ini_file.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ini_file.py
new file mode 100644
index 00000000..0beaca9a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ini_file.py
@@ -0,0 +1,334 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
+# Copyright: (c) 2015, Ales Nosek <anosek.nosek () gmail.com>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ini_file
+short_description: Tweak settings in INI files
+extends_documentation_fragment: files
+description:
+ - Manage (add, remove, change) individual settings in an INI-style file without having
+ to manage the file as a whole with, say, M(ansible.builtin.template) or M(ansible.builtin.assemble).
+ - Adds missing sections if they don't exist.
+ - Before Ansible 2.0, comments are discarded when the source file is read, and therefore will not show up in the destination file.
+ - Since Ansible 2.3, this module adds missing ending newlines to files to keep in line with the POSIX standard, even when
+ no other modifications need to be applied.
+options:
+ path:
+ description:
+ - Path to the INI-style file; this file is created if required.
+ - Before Ansible 2.3 this option was only usable as I(dest).
+ type: path
+ required: true
+ aliases: [ dest ]
+ section:
+ description:
+ - Section name in INI file. This is added if C(state=present) automatically when
+ a single value is being set.
+ - If left empty or set to C(null), the I(option) will be placed before the first I(section).
+ - Using C(null) is also required if the config format does not support sections.
+ type: str
+ required: true
+ option:
+ description:
+ - If set (required for changing a I(value)), this is the name of the option.
+ - May be omitted if adding/removing a whole I(section).
+ type: str
+ value:
+ description:
+ - The string value to be associated with an I(option).
+ - May be omitted when removing an I(option).
+ type: str
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can get
+ the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: no
+ state:
+ description:
+ - If set to C(absent) the option or section will be removed if present instead of created.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ no_extra_spaces:
+ description:
+ - Do not insert spaces before and after '=' symbol.
+ type: bool
+ default: no
+ create:
+ description:
+ - If set to C(no), the module will fail if the file does not already exist.
+ - By default it will create the file if it is missing.
+ type: bool
+ default: yes
+ allow_no_value:
+ description:
+ - Allow option without value and without '=' symbol.
+ type: bool
+ default: no
+notes:
+ - While it is possible to add an I(option) without specifying a I(value), this makes no sense.
+ - As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but I(dest) still works as well.
+author:
+ - Jan-Piet Mens (@jpmens)
+ - Ales Nosek (@noseka1)
+'''
+
+EXAMPLES = r'''
+# Before Ansible 2.3, option 'dest' was used instead of 'path'
+- name: Ensure "fav=lemonade is in section "[drinks]" in specified file
+ community.general.ini_file:
+ path: /etc/conf
+ section: drinks
+ option: fav
+ value: lemonade
+ mode: '0600'
+ backup: yes
+
+- name: Ensure "temperature=cold is in section "[drinks]" in specified file
+ community.general.ini_file:
+ path: /etc/anotherconf
+ section: drinks
+ option: temperature
+ value: cold
+ backup: yes
+'''
+
+import os
+import re
+import tempfile
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def match_opt(option, line):
+ option = re.escape(option)
+ return re.match('( |\t)*%s( |\t)*(=|$)' % option, line) \
+ or re.match('#( |\t)*%s( |\t)*(=|$)' % option, line) \
+ or re.match(';( |\t)*%s( |\t)*(=|$)' % option, line)
+
+
+def match_active_opt(option, line):
+ option = re.escape(option)
+ return re.match('( |\t)*%s( |\t)*(=|$)' % option, line)
+
+
+def do_ini(module, filename, section=None, option=None, value=None,
+ state='present', backup=False, no_extra_spaces=False, create=True,
+ allow_no_value=False):
+
+ diff = dict(
+ before='',
+ after='',
+ before_header='%s (content)' % filename,
+ after_header='%s (content)' % filename,
+ )
+
+ if not os.path.exists(filename):
+ if not create:
+ module.fail_json(rc=257, msg='Destination %s does not exist !' % filename)
+ destpath = os.path.dirname(filename)
+ if not os.path.exists(destpath) and not module.check_mode:
+ os.makedirs(destpath)
+ ini_lines = []
+ else:
+ ini_file = open(filename, 'r')
+ try:
+ ini_lines = ini_file.readlines()
+ finally:
+ ini_file.close()
+
+ if module._diff:
+ diff['before'] = ''.join(ini_lines)
+
+ changed = False
+
+ # ini file could be empty
+ if not ini_lines:
+ ini_lines.append('\n')
+
+ # last line of file may not contain a trailing newline
+ if ini_lines[-1] == "" or ini_lines[-1][-1] != '\n':
+ ini_lines[-1] += '\n'
+ changed = True
+
+ # append fake section lines to simplify the logic
+ # At top:
+ # Fake random section to do not match any other in the file
+ # Using commit hash as fake section name
+ fake_section_name = "ad01e11446efb704fcdbdb21f2c43757423d91c5"
+
+ # Insert it at the beginning
+ ini_lines.insert(0, '[%s]' % fake_section_name)
+
+ # At botton:
+ ini_lines.append('[')
+
+ # If no section is defined, fake section is used
+ if not section:
+ section = fake_section_name
+
+ within_section = not section
+ section_start = 0
+ msg = 'OK'
+ if no_extra_spaces:
+ assignment_format = '%s=%s\n'
+ else:
+ assignment_format = '%s = %s\n'
+
+ for index, line in enumerate(ini_lines):
+ if line.startswith('[%s]' % section):
+ within_section = True
+ section_start = index
+ elif line.startswith('['):
+ if within_section:
+ if state == 'present':
+ # insert missing option line at the end of the section
+ for i in range(index, 0, -1):
+ # search backwards for previous non-blank or non-comment line
+ if not re.match(r'^[ \t]*([#;].*)?$', ini_lines[i - 1]):
+ if not value and allow_no_value:
+ ini_lines.insert(i, '%s\n' % option)
+ else:
+ ini_lines.insert(i, assignment_format % (option, value))
+ msg = 'option added'
+ changed = True
+ break
+ elif state == 'absent' and not option:
+ # remove the entire section
+ del ini_lines[section_start:index]
+ msg = 'section removed'
+ changed = True
+ break
+ else:
+ if within_section and option:
+ if state == 'present':
+ # change the existing option line
+ if match_opt(option, line):
+ if not value and allow_no_value:
+ newline = '%s\n' % option
+ else:
+ newline = assignment_format % (option, value)
+ option_changed = ini_lines[index] != newline
+ changed = changed or option_changed
+ if option_changed:
+ msg = 'option changed'
+ ini_lines[index] = newline
+ if option_changed:
+ # remove all possible option occurrences from the rest of the section
+ index = index + 1
+ while index < len(ini_lines):
+ line = ini_lines[index]
+ if line.startswith('['):
+ break
+ if match_active_opt(option, line):
+ del ini_lines[index]
+ else:
+ index = index + 1
+ break
+ elif state == 'absent':
+ # delete the existing line
+ if match_active_opt(option, line):
+ del ini_lines[index]
+ changed = True
+ msg = 'option changed'
+ break
+
+ # remove the fake section line
+ del ini_lines[0]
+ del ini_lines[-1:]
+
+ if not within_section and option and state == 'present':
+ ini_lines.append('[%s]\n' % section)
+ if not value and allow_no_value:
+ ini_lines.append('%s\n' % option)
+ else:
+ ini_lines.append(assignment_format % (option, value))
+ changed = True
+ msg = 'section and option added'
+
+ if module._diff:
+ diff['after'] = ''.join(ini_lines)
+
+ backup_file = None
+ if changed and not module.check_mode:
+ if backup:
+ backup_file = module.backup_local(filename)
+
+ try:
+ tmpfd, tmpfile = tempfile.mkstemp(dir=module.tmpdir)
+ f = os.fdopen(tmpfd, 'w')
+ f.writelines(ini_lines)
+ f.close()
+ except IOError:
+ module.fail_json(msg="Unable to create temporary file %s", traceback=traceback.format_exc())
+
+ try:
+ module.atomic_move(tmpfile, filename)
+ except IOError:
+ module.ansible.fail_json(msg='Unable to move temporary \
+ file %s to %s, IOError' % (tmpfile, filename), traceback=traceback.format_exc())
+
+ return (changed, backup_file, diff, msg)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path', required=True, aliases=['dest']),
+ section=dict(type='str', required=True),
+ option=dict(type='str'),
+ value=dict(type='str'),
+ backup=dict(type='bool', default=False),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ no_extra_spaces=dict(type='bool', default=False),
+ allow_no_value=dict(type='bool', default=False),
+ create=dict(type='bool', default=True)
+ ),
+ add_file_common_args=True,
+ supports_check_mode=True,
+ )
+
+ path = module.params['path']
+ section = module.params['section']
+ option = module.params['option']
+ value = module.params['value']
+ state = module.params['state']
+ backup = module.params['backup']
+ no_extra_spaces = module.params['no_extra_spaces']
+ allow_no_value = module.params['allow_no_value']
+ create = module.params['create']
+
+ (changed, backup_file, diff, msg) = do_ini(module, path, section, option, value, state, backup, no_extra_spaces, create, allow_no_value)
+
+ if not module.check_mode and os.path.exists(path):
+ file_args = module.load_file_common_arguments(module.params)
+ changed = module.set_fs_attributes_if_different(file_args, changed)
+
+ results = dict(
+ changed=changed,
+ diff=diff,
+ msg=msg,
+ path=path,
+ )
+ if backup_file is not None:
+ results['backup_file'] = backup_file
+
+ # Mission complete
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/installp.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/installp.py
new file mode 100644
index 00000000..af7a950a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/installp.py
@@ -0,0 +1,292 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Kairo Araujo <kairo@kairo.eti.br>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: installp
+author:
+- Kairo Araujo (@kairoaraujo)
+short_description: Manage packages on AIX
+description:
+ - Manage packages using 'installp' on AIX
+options:
+ accept_license:
+ description:
+ - Whether to accept the license for the package(s).
+ type: bool
+ default: no
+ name:
+ description:
+ - One or more packages to install or remove.
+ - Use C(all) to install all packages available on informed C(repository_path).
+ type: list
+ elements: str
+ required: true
+ aliases: [ pkg ]
+ repository_path:
+ description:
+ - Path with AIX packages (required to install).
+ type: path
+ state:
+ description:
+ - Whether the package needs to be present on or absent from the system.
+ type: str
+ choices: [ absent, present ]
+ default: present
+notes:
+- If the package is already installed, even the package/fileset is new, the module will not install it.
+'''
+
+EXAMPLES = r'''
+- name: Install package foo
+ community.general.installp:
+ name: foo
+ repository_path: /repository/AIX71/installp/base
+ accept_license: yes
+ state: present
+
+- name: Install bos.sysmgt that includes bos.sysmgt.nim.master, bos.sysmgt.nim.spot
+ community.general.installp:
+ name: bos.sysmgt
+ repository_path: /repository/AIX71/installp/base
+ accept_license: yes
+ state: present
+
+- name: Install bos.sysmgt.nim.master only
+ community.general.installp:
+ name: bos.sysmgt.nim.master
+ repository_path: /repository/AIX71/installp/base
+ accept_license: yes
+ state: present
+
+- name: Install bos.sysmgt.nim.master and bos.sysmgt.nim.spot
+ community.general.installp:
+ name: bos.sysmgt.nim.master, bos.sysmgt.nim.spot
+ repository_path: /repository/AIX71/installp/base
+ accept_license: yes
+ state: present
+
+- name: Remove packages bos.sysmgt.nim.master
+ community.general.installp:
+ name: bos.sysmgt.nim.master
+ state: absent
+'''
+
+RETURN = r''' # '''
+
+import os
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def _check_new_pkg(module, package, repository_path):
+ """
+ Check if the package of fileset is correct name and repository path.
+
+ :param module: Ansible module arguments spec.
+ :param package: Package/fileset name.
+ :param repository_path: Repository package path.
+ :return: Bool, package information.
+ """
+
+ if os.path.isdir(repository_path):
+ installp_cmd = module.get_bin_path('installp', True)
+ rc, package_result, err = module.run_command("%s -l -MR -d %s" % (installp_cmd, repository_path))
+ if rc != 0:
+ module.fail_json(msg="Failed to run installp.", rc=rc, err=err)
+
+ if package == 'all':
+ pkg_info = "All packages on dir"
+ return True, pkg_info
+
+ else:
+ pkg_info = {}
+ for line in package_result.splitlines():
+ if re.findall(package, line):
+ pkg_name = line.split()[0].strip()
+ pkg_version = line.split()[1].strip()
+ pkg_info[pkg_name] = pkg_version
+
+ return True, pkg_info
+
+ return False, None
+
+ else:
+ module.fail_json(msg="Repository path %s is not valid." % repository_path)
+
+
+def _check_installed_pkg(module, package, repository_path):
+ """
+ Check the package on AIX.
+ It verifies if the package is installed and informations
+
+ :param module: Ansible module parameters spec.
+ :param package: Package/fileset name.
+ :param repository_path: Repository package path.
+ :return: Bool, package data.
+ """
+
+ lslpp_cmd = module.get_bin_path('lslpp', True)
+ rc, lslpp_result, err = module.run_command("%s -lcq %s*" % (lslpp_cmd, package))
+
+ if rc == 1:
+ package_state = ' '.join(err.split()[-2:])
+ if package_state == 'not installed.':
+ return False, None
+ else:
+ module.fail_json(msg="Failed to run lslpp.", rc=rc, err=err)
+
+ if rc != 0:
+ module.fail_json(msg="Failed to run lslpp.", rc=rc, err=err)
+
+ pkg_data = {}
+ full_pkg_data = lslpp_result.splitlines()
+ for line in full_pkg_data:
+ pkg_name, fileset, level = line.split(':')[0:3]
+ pkg_data[pkg_name] = fileset, level
+
+ return True, pkg_data
+
+
+def remove(module, installp_cmd, packages):
+ repository_path = None
+ remove_count = 0
+ removed_pkgs = []
+ not_found_pkg = []
+ for package in packages:
+ pkg_check, dummy = _check_installed_pkg(module, package, repository_path)
+
+ if pkg_check:
+ if not module.check_mode:
+ rc, remove_out, err = module.run_command("%s -u %s" % (installp_cmd, package))
+ if rc != 0:
+ module.fail_json(msg="Failed to run installp.", rc=rc, err=err)
+ remove_count += 1
+ removed_pkgs.append(package)
+
+ else:
+ not_found_pkg.append(package)
+
+ if remove_count > 0:
+ if len(not_found_pkg) > 1:
+ not_found_pkg.insert(0, "Package(s) not found: ")
+
+ changed = True
+ msg = "Packages removed: %s. %s " % (' '.join(removed_pkgs), ' '.join(not_found_pkg))
+
+ else:
+ changed = False
+ msg = ("No packages removed, all packages not found: %s" % ' '.join(not_found_pkg))
+
+ return changed, msg
+
+
+def install(module, installp_cmd, packages, repository_path, accept_license):
+ installed_pkgs = []
+ not_found_pkgs = []
+ already_installed_pkgs = {}
+
+ accept_license_param = {
+ True: '-Y',
+ False: '',
+ }
+
+ # Validate if package exists on repository path.
+ for package in packages:
+ pkg_check, pkg_data = _check_new_pkg(module, package, repository_path)
+
+ # If package exists on repository path, check if package is installed.
+ if pkg_check:
+ pkg_check_current, pkg_info = _check_installed_pkg(module, package, repository_path)
+
+ # If package is already installed.
+ if pkg_check_current:
+ # Check if package is a package and not a fileset, get version
+ # and add the package into already installed list
+ if package in pkg_info.keys():
+ already_installed_pkgs[package] = pkg_info[package][1]
+
+ else:
+ # If the package is not a package but a fileset, confirm
+ # and add the fileset/package into already installed list
+ for key in pkg_info.keys():
+ if package in pkg_info[key]:
+ already_installed_pkgs[package] = pkg_info[key][1]
+
+ else:
+ if not module.check_mode:
+ rc, out, err = module.run_command("%s -a %s -X -d %s %s" % (installp_cmd, accept_license_param[accept_license], repository_path, package))
+ if rc != 0:
+ module.fail_json(msg="Failed to run installp", rc=rc, err=err)
+ installed_pkgs.append(package)
+
+ else:
+ not_found_pkgs.append(package)
+
+ if len(installed_pkgs) > 0:
+ installed_msg = (" Installed: %s." % ' '.join(installed_pkgs))
+ else:
+ installed_msg = ''
+
+ if len(not_found_pkgs) > 0:
+ not_found_msg = (" Not found: %s." % ' '.join(not_found_pkgs))
+ else:
+ not_found_msg = ''
+
+ if len(already_installed_pkgs) > 0:
+ already_installed_msg = (" Already installed: %s." % already_installed_pkgs)
+ else:
+ already_installed_msg = ''
+
+ if len(installed_pkgs) > 0:
+ changed = True
+ msg = ("%s%s%s" % (installed_msg, not_found_msg, already_installed_msg))
+ else:
+ changed = False
+ msg = ("No packages installed.%s%s%s" % (installed_msg, not_found_msg, already_installed_msg))
+
+ return changed, msg
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='list', elements='str', required=True, aliases=['pkg']),
+ repository_path=dict(type='path'),
+ accept_license=dict(type='bool', default=False),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ),
+ supports_check_mode=True,
+ )
+
+ name = module.params['name']
+ repository_path = module.params['repository_path']
+ accept_license = module.params['accept_license']
+ state = module.params['state']
+
+ installp_cmd = module.get_bin_path('installp', True)
+
+ if state == 'present':
+ if repository_path is None:
+ module.fail_json(msg="repository_path is required to install package")
+
+ changed, msg = install(module, installp_cmd, name, repository_path, accept_license)
+
+ elif state == 'absent':
+ changed, msg = remove(module, installp_cmd, name)
+
+ else:
+ module.fail_json(changed=False, msg="Unexpected state.")
+
+ module.exit_json(changed=changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/interfaces_file.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/interfaces_file.py
new file mode 100644
index 00000000..d1e37573
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/interfaces_file.py
@@ -0,0 +1,399 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2016, Roman Belyakovsky <ihryamzik () gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: interfaces_file
+short_description: Tweak settings in /etc/network/interfaces files
+extends_documentation_fragment: files
+description:
+ - Manage (add, remove, change) individual interface options in an interfaces-style file without having
+ to manage the file as a whole with, say, M(ansible.builtin.template) or M(ansible.builtin.assemble). Interface has to be presented in a file.
+ - Read information about interfaces from interfaces-styled files
+options:
+ dest:
+ type: path
+ description:
+ - Path to the interfaces file
+ default: /etc/network/interfaces
+ iface:
+ type: str
+ description:
+ - Name of the interface, required for value changes or option remove
+ address_family:
+ type: str
+ description:
+ - Address family of the interface, useful if same interface name is used for both inet and inet6
+ option:
+ type: str
+ description:
+ - Name of the option, required for value changes or option remove
+ value:
+ type: str
+ description:
+ - If I(option) is not presented for the I(interface) and I(state) is C(present) option will be added.
+ If I(option) already exists and is not C(pre-up), C(up), C(post-up) or C(down), it's value will be updated.
+ C(pre-up), C(up), C(post-up) and C(down) options can't be updated, only adding new options, removing existing
+ ones or cleaning the whole option set are supported
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can get
+ the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: 'no'
+ state:
+ type: str
+ description:
+ - If set to C(absent) the option or section will be removed if present instead of created.
+ default: "present"
+ choices: [ "present", "absent" ]
+
+notes:
+ - If option is defined multiple times last one will be updated but all will be deleted in case of an absent state
+requirements: []
+author: "Roman Belyakovsky (@hryamzik)"
+'''
+
+RETURN = '''
+dest:
+ description: destination file/path
+ returned: success
+ type: str
+ sample: "/etc/network/interfaces"
+ifaces:
+ description: interfaces dictionary
+ returned: success
+ type: complex
+ contains:
+ ifaces:
+ description: interface dictionary
+ returned: success
+ type: dict
+ contains:
+ eth0:
+ description: Name of the interface
+ returned: success
+ type: dict
+ contains:
+ address_family:
+ description: interface address family
+ returned: success
+ type: str
+ sample: "inet"
+ method:
+ description: interface method
+ returned: success
+ type: str
+ sample: "manual"
+ mtu:
+ description: other options, all values returned as strings
+ returned: success
+ type: str
+ sample: "1500"
+ pre-up:
+ description: list of C(pre-up) scripts
+ returned: success
+ type: list
+ sample:
+ - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1"
+ - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2"
+ up:
+ description: list of C(up) scripts
+ returned: success
+ type: list
+ sample:
+ - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1"
+ - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2"
+ post-up:
+ description: list of C(post-up) scripts
+ returned: success
+ type: list
+ sample:
+ - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1"
+ - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2"
+ down:
+ description: list of C(down) scripts
+ returned: success
+ type: list
+ sample:
+ - "route del -net 10.10.10.0/24 gw 10.10.10.1 dev eth1"
+ - "route del -net 10.10.11.0/24 gw 10.10.11.1 dev eth2"
+...
+'''
+
+EXAMPLES = '''
+- name: Set eth1 mtu configuration value to 8000
+ community.general.interfaces_file:
+ dest: /etc/network/interfaces.d/eth1.cfg
+ iface: eth1
+ option: mtu
+ value: 8000
+ backup: yes
+ state: present
+ register: eth1_cfg
+'''
+
+import os
+import re
+import tempfile
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes
+
+
+def lineDict(line):
+ return {'line': line, 'line_type': 'unknown'}
+
+
+def optionDict(line, iface, option, value, address_family):
+ return {'line': line, 'iface': iface, 'option': option, 'value': value, 'line_type': 'option', 'address_family': address_family}
+
+
+def getValueFromLine(s):
+ spaceRe = re.compile(r'\s+')
+ for m in spaceRe.finditer(s):
+ pass
+ valueEnd = m.start()
+ option = s.split()[0]
+ optionStart = s.find(option)
+ optionLen = len(option)
+ valueStart = re.search(r'\s', s[optionLen + optionStart:]).end() + optionLen + optionStart
+ return s[valueStart:valueEnd]
+
+
+def read_interfaces_file(module, filename):
+ f = open(filename, 'r')
+ return read_interfaces_lines(module, f)
+
+
+def read_interfaces_lines(module, line_strings):
+ lines = []
+ ifaces = {}
+ currently_processing = None
+ i = 0
+ for line in line_strings:
+ i += 1
+ words = line.split()
+ if len(words) < 1:
+ lines.append(lineDict(line))
+ continue
+ if words[0][0] == "#":
+ lines.append(lineDict(line))
+ continue
+ if words[0] == "mapping":
+ # currmap = calloc(1, sizeof *currmap);
+ lines.append(lineDict(line))
+ currently_processing = "MAPPING"
+ elif words[0] == "source":
+ lines.append(lineDict(line))
+ currently_processing = "NONE"
+ elif words[0] == "source-dir":
+ lines.append(lineDict(line))
+ currently_processing = "NONE"
+ elif words[0] == "source-directory":
+ lines.append(lineDict(line))
+ currently_processing = "NONE"
+ elif words[0] == "iface":
+ currif = {
+ "pre-up": [],
+ "up": [],
+ "down": [],
+ "post-up": []
+ }
+ iface_name = words[1]
+ try:
+ currif['address_family'] = words[2]
+ except IndexError:
+ currif['address_family'] = None
+ address_family = currif['address_family']
+ try:
+ currif['method'] = words[3]
+ except IndexError:
+ currif['method'] = None
+
+ ifaces[iface_name] = currif
+ lines.append({'line': line, 'iface': iface_name, 'line_type': 'iface', 'params': currif, 'address_family': address_family})
+ currently_processing = "IFACE"
+ elif words[0] == "auto":
+ lines.append(lineDict(line))
+ currently_processing = "NONE"
+ elif words[0].startswith("allow-"):
+ lines.append(lineDict(line))
+ currently_processing = "NONE"
+ elif words[0] == "no-auto-down":
+ lines.append(lineDict(line))
+ currently_processing = "NONE"
+ elif words[0] == "no-scripts":
+ lines.append(lineDict(line))
+ currently_processing = "NONE"
+ else:
+ if currently_processing == "IFACE":
+ option_name = words[0]
+ # TODO: if option_name in currif.options
+ value = getValueFromLine(line)
+ lines.append(optionDict(line, iface_name, option_name, value, address_family))
+ if option_name in ["pre-up", "up", "down", "post-up"]:
+ currif[option_name].append(value)
+ else:
+ currif[option_name] = value
+ elif currently_processing == "MAPPING":
+ lines.append(lineDict(line))
+ elif currently_processing == "NONE":
+ lines.append(lineDict(line))
+ else:
+ module.fail_json(msg="misplaced option %s in line %d" % (line, i))
+ return None, None
+ return lines, ifaces
+
+
+def setInterfaceOption(module, lines, iface, option, raw_value, state, address_family=None):
+ value = str(raw_value)
+ changed = False
+
+ iface_lines = [item for item in lines if "iface" in item and item["iface"] == iface]
+ if address_family is not None:
+ iface_lines = [item for item in iface_lines
+ if "address_family" in item and item["address_family"] == address_family]
+
+ if len(iface_lines) < 1:
+ # interface not found
+ module.fail_json(msg="Error: interface %s not found" % iface)
+ return changed, None
+
+ iface_options = list(filter(lambda i: i['line_type'] == 'option', iface_lines))
+ target_options = list(filter(lambda i: i['option'] == option, iface_options))
+
+ if state == "present":
+ if len(target_options) < 1:
+ changed = True
+ # add new option
+ last_line_dict = iface_lines[-1]
+ changed, lines = addOptionAfterLine(option, value, iface, lines, last_line_dict, iface_options, address_family)
+ else:
+ if option in ["pre-up", "up", "down", "post-up"]:
+ if len(list(filter(lambda i: i['value'] == value, target_options))) < 1:
+ changed, lines = addOptionAfterLine(option, value, iface, lines, target_options[-1], iface_options, address_family)
+ else:
+ # if more than one option found edit the last one
+ if target_options[-1]['value'] != value:
+ changed = True
+ target_option = target_options[-1]
+ old_line = target_option['line']
+ old_value = target_option['value']
+ address_family = target_option['address_family']
+ prefix_start = old_line.find(option)
+ optionLen = len(option)
+ old_value_position = re.search(r"\s+".join(map(re.escape, old_value.split())), old_line[prefix_start + optionLen:])
+ start = old_value_position.start() + prefix_start + optionLen
+ end = old_value_position.end() + prefix_start + optionLen
+ line = old_line[:start] + value + old_line[end:]
+ index = len(lines) - lines[::-1].index(target_option) - 1
+ lines[index] = optionDict(line, iface, option, value, address_family)
+ elif state == "absent":
+ if len(target_options) >= 1:
+ if option in ["pre-up", "up", "down", "post-up"] and value is not None and value != "None":
+ for target_option in filter(lambda i: i['value'] == value, target_options):
+ changed = True
+ lines = list(filter(lambda ln: ln != target_option, lines))
+ else:
+ changed = True
+ for target_option in target_options:
+ lines = list(filter(lambda ln: ln != target_option, lines))
+ else:
+ module.fail_json(msg="Error: unsupported state %s, has to be either present or absent" % state)
+
+ return changed, lines
+
+
+def addOptionAfterLine(option, value, iface, lines, last_line_dict, iface_options, address_family):
+ # Changing method of interface is not an addition
+ if option == 'method':
+ changed = False
+ for ln in lines:
+ if ln.get('line_type', '') == 'iface' and ln.get('iface', '') == iface and value != ln.get('params', {}).get('method', ''):
+ changed = True
+ ln['line'] = re.sub(ln.get('params', {}).get('method', '') + '$', value, ln.get('line'))
+ ln['params']['method'] = value
+ return changed, lines
+
+ last_line = last_line_dict['line']
+ prefix_start = last_line.find(last_line.split()[0])
+ suffix_start = last_line.rfind(last_line.split()[-1]) + len(last_line.split()[-1])
+ prefix = last_line[:prefix_start]
+
+ if len(iface_options) < 1:
+ # interface has no options, ident
+ prefix += " "
+
+ line = prefix + "%s %s" % (option, value) + last_line[suffix_start:]
+ option_dict = optionDict(line, iface, option, value, address_family)
+ index = len(lines) - lines[::-1].index(last_line_dict)
+ lines.insert(index, option_dict)
+ return True, lines
+
+
+def write_changes(module, lines, dest):
+
+ tmpfd, tmpfile = tempfile.mkstemp()
+ f = os.fdopen(tmpfd, 'wb')
+ f.write(to_bytes(''.join(lines), errors='surrogate_or_strict'))
+ f.close()
+ module.atomic_move(tmpfile, os.path.realpath(dest))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ dest=dict(type='path', default='/etc/network/interfaces'),
+ iface=dict(type='str'),
+ address_family=dict(type='str'),
+ option=dict(type='str'),
+ value=dict(type='str'),
+ backup=dict(type='bool', default=False),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ),
+ add_file_common_args=True,
+ supports_check_mode=True,
+ required_by=dict(
+ option=('iface',),
+ ),
+ )
+
+ dest = module.params['dest']
+ iface = module.params['iface']
+ address_family = module.params['address_family']
+ option = module.params['option']
+ value = module.params['value']
+ backup = module.params['backup']
+ state = module.params['state']
+
+ if option is not None and state == "present" and value is None:
+ module.fail_json(msg="Value must be set if option is defined and state is 'present'")
+
+ lines, ifaces = read_interfaces_file(module, dest)
+
+ changed = False
+
+ if option is not None:
+ changed, lines = setInterfaceOption(module, lines, iface, option, value, state, address_family)
+
+ if changed:
+ _, ifaces = read_interfaces_lines(module, [d['line'] for d in lines if 'line' in d])
+
+ if changed and not module.check_mode:
+ if backup:
+ module.backup_local(dest)
+ write_changes(module, [d['line'] for d in lines if 'line' in d], dest)
+
+ module.exit_json(dest=dest, changed=changed, ifaces=ifaces)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ip_netns.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ip_netns.py
new file mode 100644
index 00000000..50aec392
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ip_netns.py
@@ -0,0 +1,144 @@
+#!/usr/bin/python
+# (c) 2017, Arie Bregman <abregman@redhat.com>
+#
+# This file is a module for Ansible that interacts with Network Manager
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ip_netns
+author: "Arie Bregman (@bregman-arie)"
+short_description: Manage network namespaces
+requirements: [ ip ]
+description:
+ - Create or delete network namespaces using the ip command.
+options:
+ name:
+ required: false
+ description:
+ - Name of the namespace
+ type: str
+ state:
+ required: false
+ default: "present"
+ choices: [ present, absent ]
+ description:
+ - Whether the namespace should exist
+ type: str
+'''
+
+EXAMPLES = '''
+- name: Create a namespace named mario
+ community.general.ip_netns:
+ name: mario
+ state: present
+
+- name: Delete a namespace named luigi
+ community.general.ip_netns:
+ name: luigi
+ state: absent
+'''
+
+RETURN = '''
+# Default return values
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_text
+
+
+class Namespace(object):
+ """Interface to network namespaces. """
+
+ def __init__(self, module):
+ self.module = module
+ self.name = module.params['name']
+ self.state = module.params['state']
+
+ def _netns(self, command):
+ '''Run ip nents command'''
+ return self.module.run_command(['ip', 'netns'] + command)
+
+ def exists(self):
+ '''Check if the namespace already exists'''
+ rc, out, err = self.module.run_command('ip netns list')
+ if rc != 0:
+ self.module.fail_json(msg=to_text(err))
+ return self.name in out
+
+ def add(self):
+ '''Create network namespace'''
+ rtc, out, err = self._netns(['add', self.name])
+
+ if rtc != 0:
+ self.module.fail_json(msg=err)
+
+ def delete(self):
+ '''Delete network namespace'''
+ rtc, out, err = self._netns(['del', self.name])
+ if rtc != 0:
+ self.module.fail_json(msg=err)
+
+ def check(self):
+ '''Run check mode'''
+ changed = False
+
+ if self.state == 'present' and self.exists():
+ changed = True
+
+ elif self.state == 'absent' and self.exists():
+ changed = True
+ elif self.state == 'present' and not self.exists():
+ changed = True
+
+ self.module.exit_json(changed=changed)
+
+ def run(self):
+ '''Make the necessary changes'''
+ changed = False
+
+ if self.state == 'absent':
+ if self.exists():
+ self.delete()
+ changed = True
+ elif self.state == 'present':
+ if not self.exists():
+ self.add()
+ changed = True
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ """Entry point."""
+ module = AnsibleModule(
+ argument_spec={
+ 'name': {'default': None},
+ 'state': {'default': 'present', 'choices': ['present', 'absent']},
+ },
+ supports_check_mode=True,
+ )
+
+ network_namespace = Namespace(module)
+ if module.check_mode:
+ network_namespace.check()
+ else:
+ network_namespace.run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_config.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_config.py
new file mode 100644
index 00000000..756b6cf9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_config.py
@@ -0,0 +1,137 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Fran Fitzpatrick <francis.x.fitzpatrick@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_config
+author: Fran Fitzpatrick (@fxfitz)
+short_description: Manage Global FreeIPA Configuration Settings
+description:
+- Modify global configuration settings of a FreeIPA Server.
+options:
+ ipadefaultloginshell:
+ description: Default shell for new users.
+ aliases: ["loginshell"]
+ type: str
+ ipadefaultemaildomain:
+ description: Default e-mail domain for new users.
+ aliases: ["emaildomain"]
+ type: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure the default login shell is bash.
+ community.general.ipa_config:
+ ipadefaultloginshell: /bin/bash
+ ipa_host: localhost
+ ipa_user: admin
+ ipa_pass: supersecret
+
+- name: Ensure the default e-mail domain is ansible.com.
+ community.general.ipa_config:
+ ipadefaultemaildomain: ansible.com
+ ipa_host: localhost
+ ipa_user: admin
+ ipa_pass: supersecret
+'''
+
+RETURN = r'''
+config:
+ description: Configuration as returned by IPA API.
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class ConfigIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(ConfigIPAClient, self).__init__(module, host, port, protocol)
+
+ def config_show(self):
+ return self._post_json(method='config_show', name=None)
+
+ def config_mod(self, name, item):
+ return self._post_json(method='config_mod', name=name, item=item)
+
+
+def get_config_dict(ipadefaultloginshell=None, ipadefaultemaildomain=None):
+ config = {}
+ if ipadefaultloginshell is not None:
+ config['ipadefaultloginshell'] = ipadefaultloginshell
+ if ipadefaultemaildomain is not None:
+ config['ipadefaultemaildomain'] = ipadefaultemaildomain
+
+ return config
+
+
+def get_config_diff(client, ipa_config, module_config):
+ return client.get_diff(ipa_data=ipa_config, module_data=module_config)
+
+
+def ensure(module, client):
+ module_config = get_config_dict(
+ ipadefaultloginshell=module.params.get('ipadefaultloginshell'),
+ ipadefaultemaildomain=module.params.get('ipadefaultemaildomain'),
+ )
+ ipa_config = client.config_show()
+ diff = get_config_diff(client, ipa_config, module_config)
+
+ changed = False
+ new_config = {}
+ for module_key in diff:
+ if module_config.get(module_key) != ipa_config.get(module_key, None):
+ changed = True
+ new_config.update({module_key: module_config.get(module_key)})
+
+ if changed and not module.check_mode:
+ client.config_mod(name=None, item=new_config)
+
+ return changed, client.config_show()
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(
+ ipadefaultloginshell=dict(type='str', aliases=['loginshell']),
+ ipadefaultemaildomain=dict(type='str', aliases=['emaildomain']),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ client = ConfigIPAClient(
+ module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot']
+ )
+
+ try:
+ client.login(
+ username=module.params['ipa_user'],
+ password=module.params['ipa_pass']
+ )
+ changed, user = ensure(module, client)
+ module.exit_json(changed=changed, user=user)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_dnsrecord.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_dnsrecord.py
new file mode 100644
index 00000000..635bf2ff
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_dnsrecord.py
@@ -0,0 +1,319 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Abhijeet Kasurde (akasurde@redhat.com)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_dnsrecord
+author: Abhijeet Kasurde (@Akasurde)
+short_description: Manage FreeIPA DNS records
+description:
+- Add, modify and delete an IPA DNS Record using IPA API.
+options:
+ zone_name:
+ description:
+ - The DNS zone name to which DNS record needs to be managed.
+ required: true
+ type: str
+ record_name:
+ description:
+ - The DNS record name to manage.
+ required: true
+ aliases: ["name"]
+ type: str
+ record_type:
+ description:
+ - The type of DNS record name.
+ - Currently, 'A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'PTR', 'TXT', 'SRV' and 'MX' are supported.
+ - "'A6', 'CNAME', 'DNAME' and 'TXT' are added in version 2.5."
+ - "'SRV' and 'MX' are added in version 2.8."
+ required: false
+ default: 'A'
+ choices: ['A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'MX', 'PTR', 'SRV', 'TXT']
+ type: str
+ record_value:
+ description:
+ - Manage DNS record name with this value.
+ - In the case of 'A' or 'AAAA' record types, this will be the IP address.
+ - In the case of 'A6' record type, this will be the A6 Record data.
+ - In the case of 'CNAME' record type, this will be the hostname.
+ - In the case of 'DNAME' record type, this will be the DNAME target.
+ - In the case of 'PTR' record type, this will be the hostname.
+ - In the case of 'TXT' record type, this will be a text.
+ - In the case of 'SRV' record type, this will be a service record.
+ - In the case of 'MX' record type, this will be a mail exchanger record.
+ required: true
+ type: str
+ record_ttl:
+ description:
+ - Set the TTL for the record.
+ - Applies only when adding a new or changing the value of record_value.
+ required: false
+ type: int
+ state:
+ description: State to ensure
+ required: false
+ default: present
+ choices: ["absent", "present"]
+ type: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure dns record is present
+ community.general.ipa_dnsrecord:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: present
+ zone_name: example.com
+ record_name: vm-001
+ record_type: 'AAAA'
+ record_value: '::1'
+
+- name: Ensure that dns record exists with a TTL
+ community.general.ipa_dnsrecord:
+ name: host02
+ zone_name: example.com
+ record_type: 'AAAA'
+ record_value: '::1'
+ record_ttl: 300
+ ipa_host: ipa.example.com
+ ipa_pass: topsecret
+ state: present
+
+- name: Ensure a PTR record is present
+ community.general.ipa_dnsrecord:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: present
+ zone_name: 2.168.192.in-addr.arpa
+ record_name: 5
+ record_type: 'PTR'
+ record_value: 'internal.ipa.example.com'
+
+- name: Ensure a TXT record is present
+ community.general.ipa_dnsrecord:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: present
+ zone_name: example.com
+ record_name: _kerberos
+ record_type: 'TXT'
+ record_value: 'EXAMPLE.COM'
+
+- name: Ensure an SRV record is present
+ community.general.ipa_dnsrecord:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: present
+ zone_name: example.com
+ record_name: _kerberos._udp.example.com
+ record_type: 'SRV'
+ record_value: '10 50 88 ipa.example.com'
+
+- name: Ensure an MX record is present
+ community.general.ipa_dnsrecord:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: present
+ zone_name: example.com
+ record_name: '@'
+ record_type: 'MX'
+ record_value: '1 mailserver.example.com'
+
+- name: Ensure that dns record is removed
+ community.general.ipa_dnsrecord:
+ name: host01
+ zone_name: example.com
+ record_type: 'AAAA'
+ record_value: '::1'
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+ state: absent
+'''
+
+RETURN = r'''
+dnsrecord:
+ description: DNS record as returned by IPA API.
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class DNSRecordIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(DNSRecordIPAClient, self).__init__(module, host, port, protocol)
+
+ def dnsrecord_find(self, zone_name, record_name):
+ if record_name == '@':
+ return self._post_json(method='dnsrecord_show', name=zone_name, item={'idnsname': record_name, 'all': True})
+ else:
+ return self._post_json(method='dnsrecord_find', name=zone_name, item={'idnsname': record_name, 'all': True})
+
+ def dnsrecord_add(self, zone_name=None, record_name=None, details=None):
+ item = dict(idnsname=record_name)
+ if details['record_type'] == 'A':
+ item.update(a_part_ip_address=details['record_value'])
+ elif details['record_type'] == 'AAAA':
+ item.update(aaaa_part_ip_address=details['record_value'])
+ elif details['record_type'] == 'A6':
+ item.update(a6_part_data=details['record_value'])
+ elif details['record_type'] == 'CNAME':
+ item.update(cname_part_hostname=details['record_value'])
+ elif details['record_type'] == 'DNAME':
+ item.update(dname_part_target=details['record_value'])
+ elif details['record_type'] == 'PTR':
+ item.update(ptr_part_hostname=details['record_value'])
+ elif details['record_type'] == 'TXT':
+ item.update(txtrecord=details['record_value'])
+ elif details['record_type'] == 'SRV':
+ item.update(srvrecord=details['record_value'])
+ elif details['record_type'] == 'MX':
+ item.update(mxrecord=details['record_value'])
+
+ if details.get('record_ttl'):
+ item.update(dnsttl=details['record_ttl'])
+
+ return self._post_json(method='dnsrecord_add', name=zone_name, item=item)
+
+ def dnsrecord_mod(self, zone_name=None, record_name=None, details=None):
+ item = get_dnsrecord_dict(details)
+ item.update(idnsname=record_name)
+ if details.get('record_ttl'):
+ item.update(dnsttl=details['record_ttl'])
+ return self._post_json(method='dnsrecord_mod', name=zone_name, item=item)
+
+ def dnsrecord_del(self, zone_name=None, record_name=None, details=None):
+ item = get_dnsrecord_dict(details)
+ item.update(idnsname=record_name)
+ return self._post_json(method='dnsrecord_del', name=zone_name, item=item)
+
+
+def get_dnsrecord_dict(details=None):
+ module_dnsrecord = dict()
+ if details['record_type'] == 'A' and details['record_value']:
+ module_dnsrecord.update(arecord=details['record_value'])
+ elif details['record_type'] == 'AAAA' and details['record_value']:
+ module_dnsrecord.update(aaaarecord=details['record_value'])
+ elif details['record_type'] == 'A6' and details['record_value']:
+ module_dnsrecord.update(a6record=details['record_value'])
+ elif details['record_type'] == 'CNAME' and details['record_value']:
+ module_dnsrecord.update(cnamerecord=details['record_value'])
+ elif details['record_type'] == 'DNAME' and details['record_value']:
+ module_dnsrecord.update(dnamerecord=details['record_value'])
+ elif details['record_type'] == 'PTR' and details['record_value']:
+ module_dnsrecord.update(ptrrecord=details['record_value'])
+ elif details['record_type'] == 'TXT' and details['record_value']:
+ module_dnsrecord.update(txtrecord=details['record_value'])
+ elif details['record_type'] == 'SRV' and details['record_value']:
+ module_dnsrecord.update(srvrecord=details['record_value'])
+ elif details['record_type'] == 'MX' and details['record_value']:
+ module_dnsrecord.update(mxrecord=details['record_value'])
+
+ if details.get('record_ttl'):
+ module_dnsrecord.update(dnsttl=details['record_ttl'])
+
+ return module_dnsrecord
+
+
+def get_dnsrecord_diff(client, ipa_dnsrecord, module_dnsrecord):
+ details = get_dnsrecord_dict(module_dnsrecord)
+ return client.get_diff(ipa_data=ipa_dnsrecord, module_data=details)
+
+
+def ensure(module, client):
+ zone_name = module.params['zone_name']
+ record_name = module.params['record_name']
+ record_ttl = module.params.get('record_ttl')
+ state = module.params['state']
+
+ ipa_dnsrecord = client.dnsrecord_find(zone_name, record_name)
+
+ module_dnsrecord = dict(
+ record_type=module.params['record_type'],
+ record_value=module.params['record_value'],
+ record_ttl=to_native(record_ttl, nonstring='passthru'),
+ )
+
+ # ttl is not required to change records
+ if module_dnsrecord['record_ttl'] is None:
+ module_dnsrecord.pop('record_ttl')
+
+ changed = False
+ if state == 'present':
+ if not ipa_dnsrecord:
+ changed = True
+ if not module.check_mode:
+ client.dnsrecord_add(zone_name=zone_name,
+ record_name=record_name,
+ details=module_dnsrecord)
+ else:
+ diff = get_dnsrecord_diff(client, ipa_dnsrecord, module_dnsrecord)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ client.dnsrecord_mod(zone_name=zone_name,
+ record_name=record_name,
+ details=module_dnsrecord)
+ else:
+ if ipa_dnsrecord:
+ changed = True
+ if not module.check_mode:
+ client.dnsrecord_del(zone_name=zone_name,
+ record_name=record_name,
+ details=module_dnsrecord)
+
+ return changed, client.dnsrecord_find(zone_name, record_name)
+
+
+def main():
+ record_types = ['A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'PTR', 'TXT', 'SRV', 'MX']
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(
+ zone_name=dict(type='str', required=True),
+ record_name=dict(type='str', aliases=['name'], required=True),
+ record_type=dict(type='str', default='A', choices=record_types),
+ record_value=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ record_ttl=dict(type='int', required=False),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ client = DNSRecordIPAClient(
+ module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot']
+ )
+
+ try:
+ client.login(
+ username=module.params['ipa_user'],
+ password=module.params['ipa_pass']
+ )
+ changed, record = ensure(module, client)
+ module.exit_json(changed=changed, record=record)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_dnszone.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_dnszone.py
new file mode 100644
index 00000000..1536866c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_dnszone.py
@@ -0,0 +1,162 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Fran Fitzpatrick (francis.x.fitzpatrick@gmail.com)
+# Borrowed heavily from other work by Abhijeet Kasurde (akasurde@redhat.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_dnszone
+author: Fran Fitzpatrick (@fxfitz)
+short_description: Manage FreeIPA DNS Zones
+description:
+- Add and delete an IPA DNS Zones using IPA API
+options:
+ zone_name:
+ description:
+ - The DNS zone name to which needs to be managed.
+ required: true
+ type: str
+ state:
+ description: State to ensure
+ required: false
+ default: present
+ choices: ["absent", "present"]
+ type: str
+ dynamicupdate:
+ description: Apply dynamic update to zone
+ required: false
+ default: "false"
+ choices: ["false", "true"]
+ type: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure dns zone is present
+ community.general.ipa_dnszone:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: present
+ zone_name: example.com
+
+- name: Ensure dns zone is present and is dynamic update
+ community.general.ipa_dnszone:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: present
+ zone_name: example.com
+ dynamicupdate: true
+
+- name: Ensure that dns zone is removed
+ community.general.ipa_dnszone:
+ zone_name: example.com
+ ipa_host: localhost
+ ipa_user: admin
+ ipa_pass: topsecret
+ state: absent
+'''
+
+RETURN = r'''
+zone:
+ description: DNS zone as returned by IPA API.
+ returned: always
+ type: dict
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class DNSZoneIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(DNSZoneIPAClient, self).__init__(module, host, port, protocol)
+
+ def dnszone_find(self, zone_name, details=None):
+ itens = {'idnsname': zone_name}
+ if details is not None:
+ itens.update(details)
+
+ return self._post_json(
+ method='dnszone_find',
+ name=zone_name,
+ item=itens
+ )
+
+ def dnszone_add(self, zone_name=None, details=None):
+ itens = {}
+ if details is not None:
+ itens.update(details)
+
+ return self._post_json(
+ method='dnszone_add',
+ name=zone_name,
+ item=itens
+ )
+
+ def dnszone_del(self, zone_name=None, record_name=None, details=None):
+ return self._post_json(
+ method='dnszone_del', name=zone_name, item={})
+
+
+def ensure(module, client):
+ zone_name = module.params['zone_name']
+ state = module.params['state']
+ dynamicupdate = module.params['dynamicupdate']
+
+ ipa_dnszone = client.dnszone_find(zone_name)
+
+ changed = False
+ if state == 'present':
+ if not ipa_dnszone:
+ changed = True
+ if not module.check_mode:
+ client.dnszone_add(zone_name=zone_name, details={'idnsallowdynupdate': dynamicupdate})
+ else:
+ changed = False
+ else:
+ if ipa_dnszone:
+ changed = True
+ if not module.check_mode:
+ client.dnszone_del(zone_name=zone_name)
+
+ return changed, client.dnszone_find(zone_name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(zone_name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ dynamicupdate=dict(type='str', required=False, default='false', choices=['true', 'false']),
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ client = DNSZoneIPAClient(
+ module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot']
+ )
+
+ try:
+ client.login(
+ username=module.params['ipa_user'],
+ password=module.params['ipa_pass']
+ )
+ changed, zone = ensure(module, client)
+ module.exit_json(changed=changed, zone=zone)
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_group.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_group.py
new file mode 100644
index 00000000..84ff443a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_group.py
@@ -0,0 +1,259 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_group
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA group
+description:
+- Add, modify and delete group within IPA server
+options:
+ cn:
+ description:
+ - Canonical name.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ['name']
+ type: str
+ description:
+ description:
+ - Description of the group.
+ type: str
+ external:
+ description:
+ - Allow adding external non-IPA members from trusted domains.
+ type: bool
+ gidnumber:
+ description:
+ - GID (use this option to set it manually).
+ aliases: ['gid']
+ type: str
+ group:
+ description:
+ - List of group names assigned to this group.
+ - If an empty list is passed all groups will be removed from this group.
+ - If option is omitted assigned groups will not be checked or changed.
+ - Groups that are already assigned but not passed will be removed.
+ type: list
+ elements: str
+ nonposix:
+ description:
+ - Create as a non-POSIX group.
+ type: bool
+ user:
+ description:
+ - List of user names assigned to this group.
+ - If an empty list is passed all users will be removed from this group.
+ - If option is omitted assigned users will not be checked or changed.
+ - Users that are already assigned but not passed will be removed.
+ type: list
+ elements: str
+ state:
+ description:
+ - State to ensure
+ default: "present"
+ choices: ["absent", "present"]
+ type: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure group is present
+ community.general.ipa_group:
+ name: oinstall
+ gidnumber: '54321'
+ state: present
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure that groups sysops and appops are assigned to ops but no other group
+ community.general.ipa_group:
+ name: ops
+ group:
+ - sysops
+ - appops
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure that users linus and larry are assign to the group, but no other user
+ community.general.ipa_group:
+ name: sysops
+ user:
+ - linus
+ - larry
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure group is absent
+ community.general.ipa_group:
+ name: sysops
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+group:
+ description: Group as returned by IPA API
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class GroupIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(GroupIPAClient, self).__init__(module, host, port, protocol)
+
+ def group_find(self, name):
+ return self._post_json(method='group_find', name=None, item={'all': True, 'cn': name})
+
+ def group_add(self, name, item):
+ return self._post_json(method='group_add', name=name, item=item)
+
+ def group_mod(self, name, item):
+ return self._post_json(method='group_mod', name=name, item=item)
+
+ def group_del(self, name):
+ return self._post_json(method='group_del', name=name)
+
+ def group_add_member(self, name, item):
+ return self._post_json(method='group_add_member', name=name, item=item)
+
+ def group_add_member_group(self, name, item):
+ return self.group_add_member(name=name, item={'group': item})
+
+ def group_add_member_user(self, name, item):
+ return self.group_add_member(name=name, item={'user': item})
+
+ def group_remove_member(self, name, item):
+ return self._post_json(method='group_remove_member', name=name, item=item)
+
+ def group_remove_member_group(self, name, item):
+ return self.group_remove_member(name=name, item={'group': item})
+
+ def group_remove_member_user(self, name, item):
+ return self.group_remove_member(name=name, item={'user': item})
+
+
+def get_group_dict(description=None, external=None, gid=None, nonposix=None):
+ group = {}
+ if description is not None:
+ group['description'] = description
+ if external is not None:
+ group['external'] = external
+ if gid is not None:
+ group['gidnumber'] = gid
+ if nonposix is not None:
+ group['nonposix'] = nonposix
+ return group
+
+
+def get_group_diff(client, ipa_group, module_group):
+ data = []
+ # With group_add attribute nonposix is passed, whereas with group_mod only posix can be passed.
+ if 'nonposix' in module_group:
+ # Only non-posix groups can be changed to posix
+ if not module_group['nonposix'] and ipa_group.get('nonposix'):
+ module_group['posix'] = True
+ del module_group['nonposix']
+
+ if 'external' in module_group:
+ if module_group['external'] and 'ipaexternalgroup' in ipa_group.get('objectclass'):
+ del module_group['external']
+
+ return client.get_diff(ipa_data=ipa_group, module_data=module_group)
+
+
+def ensure(module, client):
+ state = module.params['state']
+ name = module.params['cn']
+ group = module.params['group']
+ user = module.params['user']
+
+ module_group = get_group_dict(description=module.params['description'], external=module.params['external'],
+ gid=module.params['gidnumber'], nonposix=module.params['nonposix'])
+ ipa_group = client.group_find(name=name)
+
+ changed = False
+ if state == 'present':
+ if not ipa_group:
+ changed = True
+ if not module.check_mode:
+ ipa_group = client.group_add(name, item=module_group)
+ else:
+ diff = get_group_diff(client, ipa_group, module_group)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_group.get(key)
+ client.group_mod(name=name, item=data)
+
+ if group is not None:
+ changed = client.modify_if_diff(name, ipa_group.get('member_group', []), group,
+ client.group_add_member_group,
+ client.group_remove_member_group) or changed
+
+ if user is not None:
+ changed = client.modify_if_diff(name, ipa_group.get('member_user', []), user,
+ client.group_add_member_user,
+ client.group_remove_member_user) or changed
+
+ else:
+ if ipa_group:
+ changed = True
+ if not module.check_mode:
+ client.group_del(name)
+
+ return changed, client.group_find(name=name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(cn=dict(type='str', required=True, aliases=['name']),
+ description=dict(type='str'),
+ external=dict(type='bool'),
+ gidnumber=dict(type='str', aliases=['gid']),
+ group=dict(type='list', elements='str'),
+ nonposix=dict(type='bool'),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ user=dict(type='list', elements='str'))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ client = GroupIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, group = ensure(module, client)
+ module.exit_json(changed=changed, group=group)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_hbacrule.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_hbacrule.py
new file mode 100644
index 00000000..cb49fd53
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_hbacrule.py
@@ -0,0 +1,355 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_hbacrule
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA HBAC rule
+description:
+- Add, modify or delete an IPA HBAC rule using IPA API.
+options:
+ cn:
+ description:
+ - Canonical name.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ["name"]
+ type: str
+ description:
+ description: Description
+ type: str
+ host:
+ description:
+ - List of host names to assign.
+ - If an empty list is passed all hosts will be removed from the rule.
+ - If option is omitted hosts will not be checked or changed.
+ required: false
+ type: list
+ elements: str
+ hostcategory:
+ description: Host category
+ choices: ['all']
+ type: str
+ hostgroup:
+ description:
+ - List of hostgroup names to assign.
+ - If an empty list is passed all hostgroups will be removed. from the rule
+ - If option is omitted hostgroups will not be checked or changed.
+ type: list
+ elements: str
+ service:
+ description:
+ - List of service names to assign.
+ - If an empty list is passed all services will be removed from the rule.
+ - If option is omitted services will not be checked or changed.
+ type: list
+ elements: str
+ servicecategory:
+ description: Service category
+ choices: ['all']
+ type: str
+ servicegroup:
+ description:
+ - List of service group names to assign.
+ - If an empty list is passed all assigned service groups will be removed from the rule.
+ - If option is omitted service groups will not be checked or changed.
+ type: list
+ elements: str
+ sourcehost:
+ description:
+ - List of source host names to assign.
+ - If an empty list if passed all assigned source hosts will be removed from the rule.
+ - If option is omitted source hosts will not be checked or changed.
+ type: list
+ elements: str
+ sourcehostcategory:
+ description: Source host category
+ choices: ['all']
+ type: str
+ sourcehostgroup:
+ description:
+ - List of source host group names to assign.
+ - If an empty list if passed all assigned source host groups will be removed from the rule.
+ - If option is omitted source host groups will not be checked or changed.
+ type: list
+ elements: str
+ state:
+ description: State to ensure
+ default: "present"
+ choices: ["absent", "disabled", "enabled","present"]
+ type: str
+ user:
+ description:
+ - List of user names to assign.
+ - If an empty list if passed all assigned users will be removed from the rule.
+ - If option is omitted users will not be checked or changed.
+ type: list
+ elements: str
+ usercategory:
+ description: User category
+ choices: ['all']
+ type: str
+ usergroup:
+ description:
+ - List of user group names to assign.
+ - If an empty list if passed all assigned user groups will be removed from the rule.
+ - If option is omitted user groups will not be checked or changed.
+ type: list
+ elements: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure rule to allow all users to access any host from any host
+ community.general.ipa_hbacrule:
+ name: allow_all
+ description: Allow all users to access any host from any host
+ hostcategory: all
+ servicecategory: all
+ usercategory: all
+ state: present
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure rule with certain limitations
+ community.general.ipa_hbacrule:
+ name: allow_all_developers_access_to_db
+ description: Allow all developers to access any database from any host
+ hostgroup:
+ - db-server
+ usergroup:
+ - developers
+ state: present
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure rule is absent
+ community.general.ipa_hbacrule:
+ name: rule_to_be_deleted
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+hbacrule:
+ description: HBAC rule as returned by IPA API.
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class HBACRuleIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(HBACRuleIPAClient, self).__init__(module, host, port, protocol)
+
+ def hbacrule_find(self, name):
+ return self._post_json(method='hbacrule_find', name=None, item={'all': True, 'cn': name})
+
+ def hbacrule_add(self, name, item):
+ return self._post_json(method='hbacrule_add', name=name, item=item)
+
+ def hbacrule_mod(self, name, item):
+ return self._post_json(method='hbacrule_mod', name=name, item=item)
+
+ def hbacrule_del(self, name):
+ return self._post_json(method='hbacrule_del', name=name)
+
+ def hbacrule_add_host(self, name, item):
+ return self._post_json(method='hbacrule_add_host', name=name, item=item)
+
+ def hbacrule_remove_host(self, name, item):
+ return self._post_json(method='hbacrule_remove_host', name=name, item=item)
+
+ def hbacrule_add_service(self, name, item):
+ return self._post_json(method='hbacrule_add_service', name=name, item=item)
+
+ def hbacrule_remove_service(self, name, item):
+ return self._post_json(method='hbacrule_remove_service', name=name, item=item)
+
+ def hbacrule_add_user(self, name, item):
+ return self._post_json(method='hbacrule_add_user', name=name, item=item)
+
+ def hbacrule_remove_user(self, name, item):
+ return self._post_json(method='hbacrule_remove_user', name=name, item=item)
+
+ def hbacrule_add_sourcehost(self, name, item):
+ return self._post_json(method='hbacrule_add_sourcehost', name=name, item=item)
+
+ def hbacrule_remove_sourcehost(self, name, item):
+ return self._post_json(method='hbacrule_remove_sourcehost', name=name, item=item)
+
+
+def get_hbacrule_dict(description=None, hostcategory=None, ipaenabledflag=None, servicecategory=None,
+ sourcehostcategory=None,
+ usercategory=None):
+ data = {}
+ if description is not None:
+ data['description'] = description
+ if hostcategory is not None:
+ data['hostcategory'] = hostcategory
+ if ipaenabledflag is not None:
+ data['ipaenabledflag'] = ipaenabledflag
+ if servicecategory is not None:
+ data['servicecategory'] = servicecategory
+ if sourcehostcategory is not None:
+ data['sourcehostcategory'] = sourcehostcategory
+ if usercategory is not None:
+ data['usercategory'] = usercategory
+ return data
+
+
+def get_hbcarule_diff(client, ipa_hbcarule, module_hbcarule):
+ return client.get_diff(ipa_data=ipa_hbcarule, module_data=module_hbcarule)
+
+
+def ensure(module, client):
+ name = module.params['cn']
+ state = module.params['state']
+
+ if state in ['present', 'enabled']:
+ ipaenabledflag = 'TRUE'
+ else:
+ ipaenabledflag = 'FALSE'
+
+ host = module.params['host']
+ hostcategory = module.params['hostcategory']
+ hostgroup = module.params['hostgroup']
+ service = module.params['service']
+ servicecategory = module.params['servicecategory']
+ servicegroup = module.params['servicegroup']
+ sourcehost = module.params['sourcehost']
+ sourcehostcategory = module.params['sourcehostcategory']
+ sourcehostgroup = module.params['sourcehostgroup']
+ user = module.params['user']
+ usercategory = module.params['usercategory']
+ usergroup = module.params['usergroup']
+
+ module_hbacrule = get_hbacrule_dict(description=module.params['description'],
+ hostcategory=hostcategory,
+ ipaenabledflag=ipaenabledflag,
+ servicecategory=servicecategory,
+ sourcehostcategory=sourcehostcategory,
+ usercategory=usercategory)
+ ipa_hbacrule = client.hbacrule_find(name=name)
+
+ changed = False
+ if state in ['present', 'enabled', 'disabled']:
+ if not ipa_hbacrule:
+ changed = True
+ if not module.check_mode:
+ ipa_hbacrule = client.hbacrule_add(name=name, item=module_hbacrule)
+ else:
+ diff = get_hbcarule_diff(client, ipa_hbacrule, module_hbacrule)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_hbacrule.get(key)
+ client.hbacrule_mod(name=name, item=data)
+
+ if host is not None:
+ changed = client.modify_if_diff(name, ipa_hbacrule.get('memberhost_host', []), host,
+ client.hbacrule_add_host,
+ client.hbacrule_remove_host, 'host') or changed
+
+ if hostgroup is not None:
+ changed = client.modify_if_diff(name, ipa_hbacrule.get('memberhost_hostgroup', []), hostgroup,
+ client.hbacrule_add_host,
+ client.hbacrule_remove_host, 'hostgroup') or changed
+
+ if service is not None:
+ changed = client.modify_if_diff(name, ipa_hbacrule.get('memberservice_hbacsvc', []), service,
+ client.hbacrule_add_service,
+ client.hbacrule_remove_service, 'hbacsvc') or changed
+
+ if servicegroup is not None:
+ changed = client.modify_if_diff(name, ipa_hbacrule.get('memberservice_hbacsvcgroup', []),
+ servicegroup,
+ client.hbacrule_add_service,
+ client.hbacrule_remove_service, 'hbacsvcgroup') or changed
+
+ if sourcehost is not None:
+ changed = client.modify_if_diff(name, ipa_hbacrule.get('sourcehost_host', []), sourcehost,
+ client.hbacrule_add_sourcehost,
+ client.hbacrule_remove_sourcehost, 'host') or changed
+
+ if sourcehostgroup is not None:
+ changed = client.modify_if_diff(name, ipa_hbacrule.get('sourcehost_group', []), sourcehostgroup,
+ client.hbacrule_add_sourcehost,
+ client.hbacrule_remove_sourcehost, 'hostgroup') or changed
+
+ if user is not None:
+ changed = client.modify_if_diff(name, ipa_hbacrule.get('memberuser_user', []), user,
+ client.hbacrule_add_user,
+ client.hbacrule_remove_user, 'user') or changed
+
+ if usergroup is not None:
+ changed = client.modify_if_diff(name, ipa_hbacrule.get('memberuser_group', []), usergroup,
+ client.hbacrule_add_user,
+ client.hbacrule_remove_user, 'group') or changed
+ else:
+ if ipa_hbacrule:
+ changed = True
+ if not module.check_mode:
+ client.hbacrule_del(name=name)
+
+ return changed, client.hbacrule_find(name=name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(cn=dict(type='str', required=True, aliases=['name']),
+ description=dict(type='str'),
+ host=dict(type='list', elements='str'),
+ hostcategory=dict(type='str', choices=['all']),
+ hostgroup=dict(type='list', elements='str'),
+ service=dict(type='list', elements='str'),
+ servicecategory=dict(type='str', choices=['all']),
+ servicegroup=dict(type='list', elements='str'),
+ sourcehost=dict(type='list', elements='str'),
+ sourcehostcategory=dict(type='str', choices=['all']),
+ sourcehostgroup=dict(type='list', elements='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']),
+ user=dict(type='list', elements='str'),
+ usercategory=dict(type='str', choices=['all']),
+ usergroup=dict(type='list', elements='str'))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ client = HBACRuleIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, hbacrule = ensure(module, client)
+ module.exit_json(changed=changed, hbacrule=hbacrule)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_host.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_host.py
new file mode 100644
index 00000000..80892c01
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_host.py
@@ -0,0 +1,305 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_host
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA host
+description:
+- Add, modify and delete an IPA host using IPA API.
+options:
+ fqdn:
+ description:
+ - Full qualified domain name.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ["name"]
+ type: str
+ description:
+ description:
+ - A description of this host.
+ type: str
+ force:
+ description:
+ - Force host name even if not in DNS.
+ required: false
+ type: bool
+ ip_address:
+ description:
+ - Add the host to DNS with this IP address.
+ type: str
+ mac_address:
+ description:
+ - List of Hardware MAC address(es) off this host.
+ - If option is omitted MAC addresses will not be checked or changed.
+ - If an empty list is passed all assigned MAC addresses will be removed.
+ - MAC addresses that are already assigned but not passed will be removed.
+ aliases: ["macaddress"]
+ type: list
+ elements: str
+ ns_host_location:
+ description:
+ - Host location (e.g. "Lab 2")
+ aliases: ["nshostlocation"]
+ type: str
+ ns_hardware_platform:
+ description:
+ - Host hardware platform (e.g. "Lenovo T61")
+ aliases: ["nshardwareplatform"]
+ type: str
+ ns_os_version:
+ description:
+ - Host operating system and version (e.g. "Fedora 9")
+ aliases: ["nsosversion"]
+ type: str
+ user_certificate:
+ description:
+ - List of Base-64 encoded server certificates.
+ - If option is omitted certificates will not be checked or changed.
+ - If an empty list is passed all assigned certificates will be removed.
+ - Certificates already assigned but not passed will be removed.
+ aliases: ["usercertificate"]
+ type: list
+ elements: str
+ state:
+ description: State to ensure.
+ default: present
+ choices: ["absent", "disabled", "enabled", "present"]
+ type: str
+ update_dns:
+ description:
+ - If set C("True") with state as C("absent"), then removes DNS records of the host managed by FreeIPA DNS.
+ - This option has no effect for states other than "absent".
+ type: bool
+ random_password:
+ description: Generate a random password to be used in bulk enrollment.
+ type: bool
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure host is present
+ community.general.ipa_host:
+ name: host01.example.com
+ description: Example host
+ ip_address: 192.168.0.123
+ ns_host_location: Lab
+ ns_os_version: CentOS 7
+ ns_hardware_platform: Lenovo T61
+ mac_address:
+ - "08:00:27:E3:B1:2D"
+ - "52:54:00:BD:97:1E"
+ state: present
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Generate a random password for bulk enrolment
+ community.general.ipa_host:
+ name: host01.example.com
+ description: Example host
+ ip_address: 192.168.0.123
+ state: present
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+ validate_certs: False
+ random_password: True
+
+- name: Ensure host is disabled
+ community.general.ipa_host:
+ name: host01.example.com
+ state: disabled
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure that all user certificates are removed
+ community.general.ipa_host:
+ name: host01.example.com
+ user_certificate: []
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure host is absent
+ community.general.ipa_host:
+ name: host01.example.com
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure host and its DNS record is absent
+ community.general.ipa_host:
+ name: host01.example.com
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+ update_dns: True
+'''
+
+RETURN = r'''
+host:
+ description: Host as returned by IPA API.
+ returned: always
+ type: dict
+host_diff:
+ description: List of options that differ and would be changed
+ returned: if check mode and a difference is found
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class HostIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(HostIPAClient, self).__init__(module, host, port, protocol)
+
+ def host_show(self, name):
+ return self._post_json(method='host_show', name=name)
+
+ def host_find(self, name):
+ return self._post_json(method='host_find', name=None, item={'all': True, 'fqdn': name})
+
+ def host_add(self, name, host):
+ return self._post_json(method='host_add', name=name, item=host)
+
+ def host_mod(self, name, host):
+ return self._post_json(method='host_mod', name=name, item=host)
+
+ def host_del(self, name, update_dns):
+ return self._post_json(method='host_del', name=name, item={'updatedns': update_dns})
+
+ def host_disable(self, name):
+ return self._post_json(method='host_disable', name=name)
+
+
+def get_host_dict(description=None, force=None, ip_address=None, ns_host_location=None, ns_hardware_platform=None,
+ ns_os_version=None, user_certificate=None, mac_address=None, random_password=None):
+ data = {}
+ if description is not None:
+ data['description'] = description
+ if force is not None:
+ data['force'] = force
+ if ip_address is not None:
+ data['ip_address'] = ip_address
+ if ns_host_location is not None:
+ data['nshostlocation'] = ns_host_location
+ if ns_hardware_platform is not None:
+ data['nshardwareplatform'] = ns_hardware_platform
+ if ns_os_version is not None:
+ data['nsosversion'] = ns_os_version
+ if user_certificate is not None:
+ data['usercertificate'] = [{"__base64__": item} for item in user_certificate]
+ if mac_address is not None:
+ data['macaddress'] = mac_address
+ if random_password is not None:
+ data['random'] = random_password
+ return data
+
+
+def get_host_diff(client, ipa_host, module_host):
+ non_updateable_keys = ['force', 'ip_address']
+ if not module_host.get('random'):
+ non_updateable_keys.append('random')
+ for key in non_updateable_keys:
+ if key in module_host:
+ del module_host[key]
+
+ return client.get_diff(ipa_data=ipa_host, module_data=module_host)
+
+
+def ensure(module, client):
+ name = module.params['fqdn']
+ state = module.params['state']
+
+ ipa_host = client.host_find(name=name)
+ module_host = get_host_dict(description=module.params['description'],
+ force=module.params['force'], ip_address=module.params['ip_address'],
+ ns_host_location=module.params['ns_host_location'],
+ ns_hardware_platform=module.params['ns_hardware_platform'],
+ ns_os_version=module.params['ns_os_version'],
+ user_certificate=module.params['user_certificate'],
+ mac_address=module.params['mac_address'],
+ random_password=module.params.get('random_password'),
+ )
+ changed = False
+ if state in ['present', 'enabled', 'disabled']:
+ if not ipa_host:
+ changed = True
+ if not module.check_mode:
+ # OTP password generated by FreeIPA is visible only for host_add command
+ # so, return directly from here.
+ return changed, client.host_add(name=name, host=module_host)
+ else:
+ diff = get_host_diff(client, ipa_host, module_host)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_host.get(key)
+ ipa_host_show = client.host_show(name=name)
+ if ipa_host_show.get('has_keytab', False) and module.params.get('random_password'):
+ client.host_disable(name=name)
+ return changed, client.host_mod(name=name, host=data)
+
+ else:
+ if ipa_host:
+ changed = True
+ update_dns = module.params.get('update_dns', False)
+ if not module.check_mode:
+ client.host_del(name=name, update_dns=update_dns)
+
+ return changed, client.host_find(name=name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(description=dict(type='str'),
+ fqdn=dict(type='str', required=True, aliases=['name']),
+ force=dict(type='bool'),
+ ip_address=dict(type='str'),
+ ns_host_location=dict(type='str', aliases=['nshostlocation']),
+ ns_hardware_platform=dict(type='str', aliases=['nshardwareplatform']),
+ ns_os_version=dict(type='str', aliases=['nsosversion']),
+ user_certificate=dict(type='list', aliases=['usercertificate'], elements='str'),
+ mac_address=dict(type='list', aliases=['macaddress'], elements='str'),
+ update_dns=dict(type='bool'),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']),
+ random_password=dict(type='bool', no_log=False),)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ client = HostIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, host = ensure(module, client)
+ module.exit_json(changed=changed, host=host)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_hostgroup.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_hostgroup.py
new file mode 100644
index 00000000..ae1f1a6b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_hostgroup.py
@@ -0,0 +1,208 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_hostgroup
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA host-group
+description:
+- Add, modify and delete an IPA host-group using IPA API.
+options:
+ cn:
+ description:
+ - Name of host-group.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ["name"]
+ type: str
+ description:
+ description:
+ - Description.
+ type: str
+ host:
+ description:
+ - List of hosts that belong to the host-group.
+ - If an empty list is passed all hosts will be removed from the group.
+ - If option is omitted hosts will not be checked or changed.
+ - If option is passed all assigned hosts that are not passed will be unassigned from the group.
+ type: list
+ elements: str
+ hostgroup:
+ description:
+ - List of host-groups than belong to that host-group.
+ - If an empty list is passed all host-groups will be removed from the group.
+ - If option is omitted host-groups will not be checked or changed.
+ - If option is passed all assigned hostgroups that are not passed will be unassigned from the group.
+ type: list
+ elements: str
+ state:
+ description:
+ - State to ensure.
+ default: "present"
+ choices: ["absent", "disabled", "enabled", "present"]
+ type: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure host-group databases is present
+ community.general.ipa_hostgroup:
+ name: databases
+ state: present
+ host:
+ - db.example.com
+ hostgroup:
+ - mysql-server
+ - oracle-server
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure host-group databases is absent
+ community.general.ipa_hostgroup:
+ name: databases
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+hostgroup:
+ description: Hostgroup as returned by IPA API.
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class HostGroupIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(HostGroupIPAClient, self).__init__(module, host, port, protocol)
+
+ def hostgroup_find(self, name):
+ return self._post_json(method='hostgroup_find', name=None, item={'all': True, 'cn': name})
+
+ def hostgroup_add(self, name, item):
+ return self._post_json(method='hostgroup_add', name=name, item=item)
+
+ def hostgroup_mod(self, name, item):
+ return self._post_json(method='hostgroup_mod', name=name, item=item)
+
+ def hostgroup_del(self, name):
+ return self._post_json(method='hostgroup_del', name=name)
+
+ def hostgroup_add_member(self, name, item):
+ return self._post_json(method='hostgroup_add_member', name=name, item=item)
+
+ def hostgroup_add_host(self, name, item):
+ return self.hostgroup_add_member(name=name, item={'host': item})
+
+ def hostgroup_add_hostgroup(self, name, item):
+ return self.hostgroup_add_member(name=name, item={'hostgroup': item})
+
+ def hostgroup_remove_member(self, name, item):
+ return self._post_json(method='hostgroup_remove_member', name=name, item=item)
+
+ def hostgroup_remove_host(self, name, item):
+ return self.hostgroup_remove_member(name=name, item={'host': item})
+
+ def hostgroup_remove_hostgroup(self, name, item):
+ return self.hostgroup_remove_member(name=name, item={'hostgroup': item})
+
+
+def get_hostgroup_dict(description=None):
+ data = {}
+ if description is not None:
+ data['description'] = description
+ return data
+
+
+def get_hostgroup_diff(client, ipa_hostgroup, module_hostgroup):
+ return client.get_diff(ipa_data=ipa_hostgroup, module_data=module_hostgroup)
+
+
+def ensure(module, client):
+ name = module.params['cn']
+ state = module.params['state']
+ host = module.params['host']
+ hostgroup = module.params['hostgroup']
+
+ ipa_hostgroup = client.hostgroup_find(name=name)
+ module_hostgroup = get_hostgroup_dict(description=module.params['description'])
+
+ changed = False
+ if state == 'present':
+ if not ipa_hostgroup:
+ changed = True
+ if not module.check_mode:
+ ipa_hostgroup = client.hostgroup_add(name=name, item=module_hostgroup)
+ else:
+ diff = get_hostgroup_diff(client, ipa_hostgroup, module_hostgroup)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_hostgroup.get(key)
+ client.hostgroup_mod(name=name, item=data)
+
+ if host is not None:
+ changed = client.modify_if_diff(name, ipa_hostgroup.get('member_host', []), [item.lower() for item in host],
+ client.hostgroup_add_host, client.hostgroup_remove_host) or changed
+
+ if hostgroup is not None:
+ changed = client.modify_if_diff(name, ipa_hostgroup.get('member_hostgroup', []),
+ [item.lower() for item in hostgroup],
+ client.hostgroup_add_hostgroup,
+ client.hostgroup_remove_hostgroup) or changed
+
+ else:
+ if ipa_hostgroup:
+ changed = True
+ if not module.check_mode:
+ client.hostgroup_del(name=name)
+
+ return changed, client.hostgroup_find(name=name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(cn=dict(type='str', required=True, aliases=['name']),
+ description=dict(type='str'),
+ host=dict(type='list', elements='str'),
+ hostgroup=dict(type='list', elements='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ client = HostGroupIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, hostgroup = ensure(module, client)
+ module.exit_json(changed=changed, hostgroup=hostgroup)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_role.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_role.py
new file mode 100644
index 00000000..589a6d5e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_role.py
@@ -0,0 +1,302 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_role
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA role
+description:
+- Add, modify and delete a role within FreeIPA server using FreeIPA API.
+options:
+ cn:
+ description:
+ - Role name.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ['name']
+ type: str
+ description:
+ description:
+ - A description of this role-group.
+ type: str
+ group:
+ description:
+ - List of group names assign to this role.
+ - If an empty list is passed all assigned groups will be unassigned from the role.
+ - If option is omitted groups will not be checked or changed.
+ - If option is passed all assigned groups that are not passed will be unassigned from the role.
+ type: list
+ elements: str
+ host:
+ description:
+ - List of host names to assign.
+ - If an empty list is passed all assigned hosts will be unassigned from the role.
+ - If option is omitted hosts will not be checked or changed.
+ - If option is passed all assigned hosts that are not passed will be unassigned from the role.
+ type: list
+ elements: str
+ hostgroup:
+ description:
+ - List of host group names to assign.
+ - If an empty list is passed all assigned host groups will be removed from the role.
+ - If option is omitted host groups will not be checked or changed.
+ - If option is passed all assigned hostgroups that are not passed will be unassigned from the role.
+ type: list
+ elements: str
+ privilege:
+ description:
+ - List of privileges granted to the role.
+ - If an empty list is passed all assigned privileges will be removed.
+ - If option is omitted privileges will not be checked or changed.
+ - If option is passed all assigned privileges that are not passed will be removed.
+ type: list
+ elements: str
+ service:
+ description:
+ - List of service names to assign.
+ - If an empty list is passed all assigned services will be removed from the role.
+ - If option is omitted services will not be checked or changed.
+ - If option is passed all assigned services that are not passed will be removed from the role.
+ type: list
+ elements: str
+ state:
+ description: State to ensure.
+ default: "present"
+ choices: ["absent", "present"]
+ type: str
+ user:
+ description:
+ - List of user names to assign.
+ - If an empty list is passed all assigned users will be removed from the role.
+ - If option is omitted users will not be checked or changed.
+ type: list
+ elements: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure role is present
+ community.general.ipa_role:
+ name: dba
+ description: Database Administrators
+ state: present
+ user:
+ - pinky
+ - brain
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure role with certain details
+ community.general.ipa_role:
+ name: another-role
+ description: Just another role
+ group:
+ - editors
+ host:
+ - host01.example.com
+ hostgroup:
+ - hostgroup01
+ privilege:
+ - Group Administrators
+ - User Administrators
+ service:
+ - service01
+
+- name: Ensure role is absent
+ community.general.ipa_role:
+ name: dba
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+role:
+ description: Role as returned by IPA API.
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class RoleIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(RoleIPAClient, self).__init__(module, host, port, protocol)
+
+ def role_find(self, name):
+ return self._post_json(method='role_find', name=None, item={'all': True, 'cn': name})
+
+ def role_add(self, name, item):
+ return self._post_json(method='role_add', name=name, item=item)
+
+ def role_mod(self, name, item):
+ return self._post_json(method='role_mod', name=name, item=item)
+
+ def role_del(self, name):
+ return self._post_json(method='role_del', name=name)
+
+ def role_add_member(self, name, item):
+ return self._post_json(method='role_add_member', name=name, item=item)
+
+ def role_add_group(self, name, item):
+ return self.role_add_member(name=name, item={'group': item})
+
+ def role_add_host(self, name, item):
+ return self.role_add_member(name=name, item={'host': item})
+
+ def role_add_hostgroup(self, name, item):
+ return self.role_add_member(name=name, item={'hostgroup': item})
+
+ def role_add_service(self, name, item):
+ return self.role_add_member(name=name, item={'service': item})
+
+ def role_add_user(self, name, item):
+ return self.role_add_member(name=name, item={'user': item})
+
+ def role_remove_member(self, name, item):
+ return self._post_json(method='role_remove_member', name=name, item=item)
+
+ def role_remove_group(self, name, item):
+ return self.role_remove_member(name=name, item={'group': item})
+
+ def role_remove_host(self, name, item):
+ return self.role_remove_member(name=name, item={'host': item})
+
+ def role_remove_hostgroup(self, name, item):
+ return self.role_remove_member(name=name, item={'hostgroup': item})
+
+ def role_remove_service(self, name, item):
+ return self.role_remove_member(name=name, item={'service': item})
+
+ def role_remove_user(self, name, item):
+ return self.role_remove_member(name=name, item={'user': item})
+
+ def role_add_privilege(self, name, item):
+ return self._post_json(method='role_add_privilege', name=name, item={'privilege': item})
+
+ def role_remove_privilege(self, name, item):
+ return self._post_json(method='role_remove_privilege', name=name, item={'privilege': item})
+
+
+def get_role_dict(description=None):
+ data = {}
+ if description is not None:
+ data['description'] = description
+ return data
+
+
+def get_role_diff(client, ipa_role, module_role):
+ return client.get_diff(ipa_data=ipa_role, module_data=module_role)
+
+
+def ensure(module, client):
+ state = module.params['state']
+ name = module.params['cn']
+ group = module.params['group']
+ host = module.params['host']
+ hostgroup = module.params['hostgroup']
+ privilege = module.params['privilege']
+ service = module.params['service']
+ user = module.params['user']
+
+ module_role = get_role_dict(description=module.params['description'])
+ ipa_role = client.role_find(name=name)
+
+ changed = False
+ if state == 'present':
+ if not ipa_role:
+ changed = True
+ if not module.check_mode:
+ ipa_role = client.role_add(name=name, item=module_role)
+ else:
+ diff = get_role_diff(client, ipa_role, module_role)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_role.get(key)
+ client.role_mod(name=name, item=data)
+
+ if group is not None:
+ changed = client.modify_if_diff(name, ipa_role.get('member_group', []), group,
+ client.role_add_group,
+ client.role_remove_group) or changed
+ if host is not None:
+ changed = client.modify_if_diff(name, ipa_role.get('member_host', []), host,
+ client.role_add_host,
+ client.role_remove_host) or changed
+
+ if hostgroup is not None:
+ changed = client.modify_if_diff(name, ipa_role.get('member_hostgroup', []), hostgroup,
+ client.role_add_hostgroup,
+ client.role_remove_hostgroup) or changed
+
+ if privilege is not None:
+ changed = client.modify_if_diff(name, ipa_role.get('memberof_privilege', []), privilege,
+ client.role_add_privilege,
+ client.role_remove_privilege) or changed
+ if service is not None:
+ changed = client.modify_if_diff(name, ipa_role.get('member_service', []), service,
+ client.role_add_service,
+ client.role_remove_service) or changed
+ if user is not None:
+ changed = client.modify_if_diff(name, ipa_role.get('member_user', []), user,
+ client.role_add_user,
+ client.role_remove_user) or changed
+
+ else:
+ if ipa_role:
+ changed = True
+ if not module.check_mode:
+ client.role_del(name)
+
+ return changed, client.role_find(name=name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(cn=dict(type='str', required=True, aliases=['name']),
+ description=dict(type='str'),
+ group=dict(type='list', elements='str'),
+ host=dict(type='list', elements='str'),
+ hostgroup=dict(type='list', elements='str'),
+ privilege=dict(type='list', elements='str'),
+ service=dict(type='list', elements='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ user=dict(type='list', elements='str'))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ client = RoleIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, role = ensure(module, client)
+ module.exit_json(changed=changed, role=role)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_service.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_service.py
new file mode 100644
index 00000000..c13f7ab6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_service.py
@@ -0,0 +1,208 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_service
+author: Cédric Parent (@cprh)
+short_description: Manage FreeIPA service
+description:
+- Add and delete an IPA service using IPA API.
+options:
+ krbcanonicalname:
+ description:
+ - Principal of the service.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ["name"]
+ type: str
+ hosts:
+ description:
+ - Defines the list of 'ManagedBy' hosts.
+ required: false
+ type: list
+ elements: str
+ force:
+ description:
+ - Force principal name even if host is not in DNS.
+ required: false
+ type: bool
+ state:
+ description: State to ensure.
+ required: false
+ default: present
+ choices: ["absent", "present"]
+ type: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure service is present
+ community.general.ipa_service:
+ name: http/host01.example.com
+ state: present
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure service is absent
+ community.general.ipa_service:
+ name: http/host01.example.com
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Changing Managing hosts list
+ community.general.ipa_service:
+ name: http/host01.example.com
+ host:
+ - host01.example.com
+ - host02.example.com
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+service:
+ description: Service as returned by IPA API.
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class ServiceIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(ServiceIPAClient, self).__init__(module, host, port, protocol)
+
+ def service_find(self, name):
+ return self._post_json(method='service_find', name=None, item={'all': True, 'krbcanonicalname': name})
+
+ def service_add(self, name, service):
+ return self._post_json(method='service_add', name=name, item=service)
+
+ def service_mod(self, name, service):
+ return self._post_json(method='service_mod', name=name, item=service)
+
+ def service_del(self, name):
+ return self._post_json(method='service_del', name=name)
+
+ def service_disable(self, name):
+ return self._post_json(method='service_disable', name=name)
+
+ def service_add_host(self, name, item):
+ return self._post_json(method='service_add_host', name=name, item={'host': item})
+
+ def service_remove_host(self, name, item):
+ return self._post_json(method='service_remove_host', name=name, item={'host': item})
+
+
+def get_service_dict(force=None, krbcanonicalname=None):
+ data = {}
+ if force is not None:
+ data['force'] = force
+ if krbcanonicalname is not None:
+ data['krbcanonicalname'] = krbcanonicalname
+ return data
+
+
+def get_service_diff(client, ipa_host, module_service):
+ non_updateable_keys = ['force', 'krbcanonicalname']
+ for key in non_updateable_keys:
+ if key in module_service:
+ del module_service[key]
+
+ return client.get_diff(ipa_data=ipa_host, module_data=module_service)
+
+
+def ensure(module, client):
+ name = module.params['krbcanonicalname']
+ state = module.params['state']
+ hosts = module.params['hosts']
+
+ ipa_service = client.service_find(name=name)
+ module_service = get_service_dict(force=module.params['force'])
+ changed = False
+ if state in ['present', 'enabled', 'disabled']:
+ if not ipa_service:
+ changed = True
+ if not module.check_mode:
+ client.service_add(name=name, service=module_service)
+ else:
+ diff = get_service_diff(client, ipa_service, module_service)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_service.get(key)
+ client.service_mod(name=name, service=data)
+ if hosts is not None:
+ if 'managedby_host' in ipa_service:
+ for host in ipa_service['managedby_host']:
+ if host not in hosts:
+ if not module.check_mode:
+ client.service_remove_host(name=name, item=host)
+ changed = True
+ for host in hosts:
+ if host not in ipa_service['managedby_host']:
+ if not module.check_mode:
+ client.service_add_host(name=name, item=host)
+ changed = True
+ else:
+ for host in hosts:
+ if not module.check_mode:
+ client.service_add_host(name=name, item=host)
+ changed = True
+
+ else:
+ if ipa_service:
+ changed = True
+ if not module.check_mode:
+ client.service_del(name=name)
+
+ return changed, client.service_find(name=name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(
+ krbcanonicalname=dict(type='str', required=True, aliases=['name']),
+ force=dict(type='bool', required=False),
+ hosts=dict(type='list', required=False, elements='str'),
+ state=dict(type='str', required=False, default='present',
+ choices=['present', 'absent']))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ client = ServiceIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, host = ensure(module, client)
+ module.exit_json(changed=changed, host=host)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_subca.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_subca.py
new file mode 100644
index 00000000..218951a0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_subca.py
@@ -0,0 +1,211 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, Abhijeet Kasurde (akasurde@redhat.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_subca
+author: Abhijeet Kasurde (@Akasurde)
+short_description: Manage FreeIPA Lightweight Sub Certificate Authorities.
+description:
+- Add, modify, enable, disable and delete an IPA Lightweight Sub Certificate Authorities using IPA API.
+options:
+ subca_name:
+ description:
+ - The Sub Certificate Authority name which needs to be managed.
+ required: true
+ aliases: ["name"]
+ type: str
+ subca_subject:
+ description:
+ - The Sub Certificate Authority's Subject. e.g., 'CN=SampleSubCA1,O=testrelm.test'.
+ required: true
+ type: str
+ subca_desc:
+ description:
+ - The Sub Certificate Authority's description.
+ type: str
+ state:
+ description:
+ - State to ensure.
+ - State 'disable' and 'enable' is available for FreeIPA 4.4.2 version and onwards.
+ required: false
+ default: present
+ choices: ["absent", "disabled", "enabled", "present"]
+ type: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = '''
+- name: Ensure IPA Sub CA is present
+ community.general.ipa_subca:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: present
+ subca_name: AnsibleSubCA1
+ subca_subject: 'CN=AnsibleSubCA1,O=example.com'
+ subca_desc: Ansible Sub CA
+
+- name: Ensure that IPA Sub CA is removed
+ community.general.ipa_subca:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: absent
+ subca_name: AnsibleSubCA1
+
+- name: Ensure that IPA Sub CA is disabled
+ community.general.ipa_subca:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: disable
+ subca_name: AnsibleSubCA1
+'''
+
+RETURN = r'''
+subca:
+ description: IPA Sub CA record as returned by IPA API.
+ returned: always
+ type: dict
+'''
+
+from distutils.version import LooseVersion
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class SubCAIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(SubCAIPAClient, self).__init__(module, host, port, protocol)
+
+ def subca_find(self, subca_name):
+ return self._post_json(method='ca_find', name=subca_name, item=None)
+
+ def subca_add(self, subca_name=None, subject_dn=None, details=None):
+ item = dict(ipacasubjectdn=subject_dn)
+ subca_desc = details.get('description', None)
+ if subca_desc is not None:
+ item.update(description=subca_desc)
+ return self._post_json(method='ca_add', name=subca_name, item=item)
+
+ def subca_mod(self, subca_name=None, diff=None, details=None):
+ item = get_subca_dict(details)
+ for change in diff:
+ update_detail = dict()
+ if item[change] is not None:
+ update_detail.update(setattr="{0}={1}".format(change, item[change]))
+ self._post_json(method='ca_mod', name=subca_name, item=update_detail)
+
+ def subca_del(self, subca_name=None):
+ return self._post_json(method='ca_del', name=subca_name)
+
+ def subca_disable(self, subca_name=None):
+ return self._post_json(method='ca_disable', name=subca_name)
+
+ def subca_enable(self, subca_name=None):
+ return self._post_json(method='ca_enable', name=subca_name)
+
+
+def get_subca_dict(details=None):
+ module_subca = dict()
+ if details['description'] is not None:
+ module_subca['description'] = details['description']
+ if details['subca_subject'] is not None:
+ module_subca['ipacasubjectdn'] = details['subca_subject']
+ return module_subca
+
+
+def get_subca_diff(client, ipa_subca, module_subca):
+ details = get_subca_dict(module_subca)
+ return client.get_diff(ipa_data=ipa_subca, module_data=details)
+
+
+def ensure(module, client):
+ subca_name = module.params['subca_name']
+ subca_subject_dn = module.params['subca_subject']
+ subca_desc = module.params['subca_desc']
+
+ state = module.params['state']
+
+ ipa_subca = client.subca_find(subca_name)
+ module_subca = dict(description=subca_desc,
+ subca_subject=subca_subject_dn)
+
+ changed = False
+ if state == 'present':
+ if not ipa_subca:
+ changed = True
+ if not module.check_mode:
+ client.subca_add(subca_name=subca_name, subject_dn=subca_subject_dn, details=module_subca)
+ else:
+ diff = get_subca_diff(client, ipa_subca, module_subca)
+ # IPA does not allow to modify Sub CA's subject DN
+ # So skip it for now.
+ if 'ipacasubjectdn' in diff:
+ diff.remove('ipacasubjectdn')
+ del module_subca['subca_subject']
+
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ client.subca_mod(subca_name=subca_name, diff=diff, details=module_subca)
+ elif state == 'absent':
+ if ipa_subca:
+ changed = True
+ if not module.check_mode:
+ client.subca_del(subca_name=subca_name)
+ elif state == 'disable':
+ ipa_version = client.get_ipa_version()
+ if LooseVersion(ipa_version) < LooseVersion('4.4.2'):
+ module.fail_json(msg="Current version of IPA server [%s] does not support 'CA disable' option. Please upgrade to "
+ "version greater than 4.4.2")
+ if ipa_subca:
+ changed = True
+ if not module.check_mode:
+ client.subca_disable(subca_name=subca_name)
+ elif state == 'enable':
+ ipa_version = client.get_ipa_version()
+ if LooseVersion(ipa_version) < LooseVersion('4.4.2'):
+ module.fail_json(msg="Current version of IPA server [%s] does not support 'CA enable' option. Please upgrade to "
+ "version greater than 4.4.2")
+ if ipa_subca:
+ changed = True
+ if not module.check_mode:
+ client.subca_enable(subca_name=subca_name)
+
+ return changed, client.subca_find(subca_name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(subca_name=dict(type='str', required=True, aliases=['name']),
+ subca_subject=dict(type='str', required=True),
+ subca_desc=dict(type='str'),
+ state=dict(type='str', default='present',
+ choices=['present', 'absent', 'enabled', 'disabled']),)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,)
+
+ client = SubCAIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, record = ensure(module, client)
+ module.exit_json(changed=changed, record=record)
+ except Exception as exc:
+ module.fail_json(msg=to_native(exc))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_sudocmd.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_sudocmd.py
new file mode 100644
index 00000000..aa09e0e4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_sudocmd.py
@@ -0,0 +1,151 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_sudocmd
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA sudo command
+description:
+- Add, modify or delete sudo command within FreeIPA server using FreeIPA API.
+options:
+ sudocmd:
+ description:
+ - Sudo command.
+ aliases: ['name']
+ required: true
+ type: str
+ description:
+ description:
+ - A description of this command.
+ type: str
+ state:
+ description: State to ensure.
+ default: present
+ choices: ['absent', 'disabled', 'enabled', 'present']
+ type: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure sudo command exists
+ community.general.ipa_sudocmd:
+ name: su
+ description: Allow to run su via sudo
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure sudo command does not exist
+ community.general.ipa_sudocmd:
+ name: su
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+sudocmd:
+ description: Sudo command as return from IPA API
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class SudoCmdIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(SudoCmdIPAClient, self).__init__(module, host, port, protocol)
+
+ def sudocmd_find(self, name):
+ return self._post_json(method='sudocmd_find', name=None, item={'all': True, 'sudocmd': name})
+
+ def sudocmd_add(self, name, item):
+ return self._post_json(method='sudocmd_add', name=name, item=item)
+
+ def sudocmd_mod(self, name, item):
+ return self._post_json(method='sudocmd_mod', name=name, item=item)
+
+ def sudocmd_del(self, name):
+ return self._post_json(method='sudocmd_del', name=name)
+
+
+def get_sudocmd_dict(description=None):
+ data = {}
+ if description is not None:
+ data['description'] = description
+ return data
+
+
+def get_sudocmd_diff(client, ipa_sudocmd, module_sudocmd):
+ return client.get_diff(ipa_data=ipa_sudocmd, module_data=module_sudocmd)
+
+
+def ensure(module, client):
+ name = module.params['sudocmd']
+ state = module.params['state']
+
+ module_sudocmd = get_sudocmd_dict(description=module.params['description'])
+ ipa_sudocmd = client.sudocmd_find(name=name)
+
+ changed = False
+ if state == 'present':
+ if not ipa_sudocmd:
+ changed = True
+ if not module.check_mode:
+ client.sudocmd_add(name=name, item=module_sudocmd)
+ else:
+ diff = get_sudocmd_diff(client, ipa_sudocmd, module_sudocmd)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_sudocmd.get(key)
+ client.sudocmd_mod(name=name, item=data)
+ else:
+ if ipa_sudocmd:
+ changed = True
+ if not module.check_mode:
+ client.sudocmd_del(name=name)
+
+ return changed, client.sudocmd_find(name=name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(description=dict(type='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']),
+ sudocmd=dict(type='str', required=True, aliases=['name']))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ client = SudoCmdIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, sudocmd = ensure(module, client)
+ module.exit_json(changed=changed, sudocmd=sudocmd)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_sudocmdgroup.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_sudocmdgroup.py
new file mode 100644
index 00000000..96eb6559
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_sudocmdgroup.py
@@ -0,0 +1,179 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_sudocmdgroup
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA sudo command group
+description:
+- Add, modify or delete sudo command group within IPA server using IPA API.
+options:
+ cn:
+ description:
+ - Sudo Command Group.
+ aliases: ['name']
+ required: true
+ type: str
+ description:
+ description:
+ - Group description.
+ type: str
+ state:
+ description: State to ensure.
+ default: present
+ choices: ['absent', 'disabled', 'enabled', 'present']
+ type: str
+ sudocmd:
+ description:
+ - List of sudo commands to assign to the group.
+ - If an empty list is passed all assigned commands will be removed from the group.
+ - If option is omitted sudo commands will not be checked or changed.
+ type: list
+ elements: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure sudo command group exists
+ community.general.ipa_sudocmdgroup:
+ name: group01
+ description: Group of important commands
+ sudocmd:
+ - su
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure sudo command group does not exist
+ community.general.ipa_sudocmdgroup:
+ name: group01
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+sudocmdgroup:
+ description: Sudo command group as returned by IPA API
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class SudoCmdGroupIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(SudoCmdGroupIPAClient, self).__init__(module, host, port, protocol)
+
+ def sudocmdgroup_find(self, name):
+ return self._post_json(method='sudocmdgroup_find', name=None, item={'all': True, 'cn': name})
+
+ def sudocmdgroup_add(self, name, item):
+ return self._post_json(method='sudocmdgroup_add', name=name, item=item)
+
+ def sudocmdgroup_mod(self, name, item):
+ return self._post_json(method='sudocmdgroup_mod', name=name, item=item)
+
+ def sudocmdgroup_del(self, name):
+ return self._post_json(method='sudocmdgroup_del', name=name)
+
+ def sudocmdgroup_add_member(self, name, item):
+ return self._post_json(method='sudocmdgroup_add_member', name=name, item=item)
+
+ def sudocmdgroup_add_member_sudocmd(self, name, item):
+ return self.sudocmdgroup_add_member(name=name, item={'sudocmd': item})
+
+ def sudocmdgroup_remove_member(self, name, item):
+ return self._post_json(method='sudocmdgroup_remove_member', name=name, item=item)
+
+ def sudocmdgroup_remove_member_sudocmd(self, name, item):
+ return self.sudocmdgroup_remove_member(name=name, item={'sudocmd': item})
+
+
+def get_sudocmdgroup_dict(description=None):
+ data = {}
+ if description is not None:
+ data['description'] = description
+ return data
+
+
+def get_sudocmdgroup_diff(client, ipa_sudocmdgroup, module_sudocmdgroup):
+ return client.get_diff(ipa_data=ipa_sudocmdgroup, module_data=module_sudocmdgroup)
+
+
+def ensure(module, client):
+ name = module.params['cn']
+ state = module.params['state']
+ sudocmd = module.params['sudocmd']
+
+ module_sudocmdgroup = get_sudocmdgroup_dict(description=module.params['description'])
+ ipa_sudocmdgroup = client.sudocmdgroup_find(name=name)
+
+ changed = False
+ if state == 'present':
+ if not ipa_sudocmdgroup:
+ changed = True
+ if not module.check_mode:
+ ipa_sudocmdgroup = client.sudocmdgroup_add(name=name, item=module_sudocmdgroup)
+ else:
+ diff = get_sudocmdgroup_diff(client, ipa_sudocmdgroup, module_sudocmdgroup)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_sudocmdgroup.get(key)
+ client.sudocmdgroup_mod(name=name, item=data)
+
+ if sudocmd is not None:
+ changed = client.modify_if_diff(name, ipa_sudocmdgroup.get('member_sudocmd', []), sudocmd,
+ client.sudocmdgroup_add_member_sudocmd,
+ client.sudocmdgroup_remove_member_sudocmd)
+ else:
+ if ipa_sudocmdgroup:
+ changed = True
+ if not module.check_mode:
+ client.sudocmdgroup_del(name=name)
+
+ return changed, client.sudocmdgroup_find(name=name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(cn=dict(type='str', required=True, aliases=['name']),
+ description=dict(type='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']),
+ sudocmd=dict(type='list', elements='str'))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ client = SudoCmdGroupIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, sudocmdgroup = ensure(module, client)
+ module.exit_json(changed=changed, sudorule=sudocmdgroup)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_sudorule.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_sudorule.py
new file mode 100644
index 00000000..9a0259bb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_sudorule.py
@@ -0,0 +1,400 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_sudorule
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA sudo rule
+description:
+- Add, modify or delete sudo rule within IPA server using IPA API.
+options:
+ cn:
+ description:
+ - Canonical name.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ['name']
+ type: str
+ cmdcategory:
+ description:
+ - Command category the rule applies to.
+ choices: ['all']
+ type: str
+ cmd:
+ description:
+ - List of commands assigned to the rule.
+ - If an empty list is passed all commands will be removed from the rule.
+ - If option is omitted commands will not be checked or changed.
+ type: list
+ elements: str
+ description:
+ description:
+ - Description of the sudo rule.
+ type: str
+ host:
+ description:
+ - List of hosts assigned to the rule.
+ - If an empty list is passed all hosts will be removed from the rule.
+ - If option is omitted hosts will not be checked or changed.
+ - Option C(hostcategory) must be omitted to assign hosts.
+ type: list
+ elements: str
+ hostcategory:
+ description:
+ - Host category the rule applies to.
+ - If 'all' is passed one must omit C(host) and C(hostgroup).
+ - Option C(host) and C(hostgroup) must be omitted to assign 'all'.
+ choices: ['all']
+ type: str
+ hostgroup:
+ description:
+ - List of host groups assigned to the rule.
+ - If an empty list is passed all host groups will be removed from the rule.
+ - If option is omitted host groups will not be checked or changed.
+ - Option C(hostcategory) must be omitted to assign host groups.
+ type: list
+ elements: str
+ runasusercategory:
+ description:
+ - RunAs User category the rule applies to.
+ choices: ['all']
+ type: str
+ runasgroupcategory:
+ description:
+ - RunAs Group category the rule applies to.
+ choices: ['all']
+ type: str
+ sudoopt:
+ description:
+ - List of options to add to the sudo rule.
+ type: list
+ elements: str
+ user:
+ description:
+ - List of users assigned to the rule.
+ - If an empty list is passed all users will be removed from the rule.
+ - If option is omitted users will not be checked or changed.
+ type: list
+ elements: str
+ usercategory:
+ description:
+ - User category the rule applies to.
+ choices: ['all']
+ type: str
+ usergroup:
+ description:
+ - List of user groups assigned to the rule.
+ - If an empty list is passed all user groups will be removed from the rule.
+ - If option is omitted user groups will not be checked or changed.
+ type: list
+ elements: str
+ state:
+ description: State to ensure.
+ default: present
+ choices: ['absent', 'disabled', 'enabled', 'present']
+ type: str
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure sudo rule is present that's allows all every body to execute any command on any host without being asked for a password.
+ community.general.ipa_sudorule:
+ name: sudo_all_nopasswd
+ cmdcategory: all
+ description: Allow to run every command with sudo without password
+ hostcategory: all
+ sudoopt:
+ - '!authenticate'
+ usercategory: all
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure user group developers can run every command on host group db-server as well as on host db01.example.com.
+ community.general.ipa_sudorule:
+ name: sudo_dev_dbserver
+ description: Allow developers to run every command with sudo on all database server
+ cmdcategory: all
+ host:
+ - db01.example.com
+ hostgroup:
+ - db-server
+ sudoopt:
+ - '!authenticate'
+ usergroup:
+ - developers
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+sudorule:
+ description: Sudorule as returned by IPA
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class SudoRuleIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(SudoRuleIPAClient, self).__init__(module, host, port, protocol)
+
+ def sudorule_find(self, name):
+ return self._post_json(method='sudorule_find', name=None, item={'all': True, 'cn': name})
+
+ def sudorule_add(self, name, item):
+ return self._post_json(method='sudorule_add', name=name, item=item)
+
+ def sudorule_mod(self, name, item):
+ return self._post_json(method='sudorule_mod', name=name, item=item)
+
+ def sudorule_del(self, name):
+ return self._post_json(method='sudorule_del', name=name)
+
+ def sudorule_add_option(self, name, item):
+ return self._post_json(method='sudorule_add_option', name=name, item=item)
+
+ def sudorule_add_option_ipasudoopt(self, name, item):
+ return self.sudorule_add_option(name=name, item={'ipasudoopt': item})
+
+ def sudorule_remove_option(self, name, item):
+ return self._post_json(method='sudorule_remove_option', name=name, item=item)
+
+ def sudorule_remove_option_ipasudoopt(self, name, item):
+ return self.sudorule_remove_option(name=name, item={'ipasudoopt': item})
+
+ def sudorule_add_host(self, name, item):
+ return self._post_json(method='sudorule_add_host', name=name, item=item)
+
+ def sudorule_add_host_host(self, name, item):
+ return self.sudorule_add_host(name=name, item={'host': item})
+
+ def sudorule_add_host_hostgroup(self, name, item):
+ return self.sudorule_add_host(name=name, item={'hostgroup': item})
+
+ def sudorule_remove_host(self, name, item):
+ return self._post_json(method='sudorule_remove_host', name=name, item=item)
+
+ def sudorule_remove_host_host(self, name, item):
+ return self.sudorule_remove_host(name=name, item={'host': item})
+
+ def sudorule_remove_host_hostgroup(self, name, item):
+ return self.sudorule_remove_host(name=name, item={'hostgroup': item})
+
+ def sudorule_add_allow_command(self, name, item):
+ return self._post_json(method='sudorule_add_allow_command', name=name, item={'sudocmd': item})
+
+ def sudorule_remove_allow_command(self, name, item):
+ return self._post_json(method='sudorule_remove_allow_command', name=name, item=item)
+
+ def sudorule_add_user(self, name, item):
+ return self._post_json(method='sudorule_add_user', name=name, item=item)
+
+ def sudorule_add_user_user(self, name, item):
+ return self.sudorule_add_user(name=name, item={'user': item})
+
+ def sudorule_add_user_group(self, name, item):
+ return self.sudorule_add_user(name=name, item={'group': item})
+
+ def sudorule_remove_user(self, name, item):
+ return self._post_json(method='sudorule_remove_user', name=name, item=item)
+
+ def sudorule_remove_user_user(self, name, item):
+ return self.sudorule_remove_user(name=name, item={'user': item})
+
+ def sudorule_remove_user_group(self, name, item):
+ return self.sudorule_remove_user(name=name, item={'group': item})
+
+
+def get_sudorule_dict(cmdcategory=None, description=None, hostcategory=None, ipaenabledflag=None, usercategory=None,
+ runasgroupcategory=None, runasusercategory=None):
+ data = {}
+ if cmdcategory is not None:
+ data['cmdcategory'] = cmdcategory
+ if description is not None:
+ data['description'] = description
+ if hostcategory is not None:
+ data['hostcategory'] = hostcategory
+ if ipaenabledflag is not None:
+ data['ipaenabledflag'] = ipaenabledflag
+ if usercategory is not None:
+ data['usercategory'] = usercategory
+ if runasusercategory is not None:
+ data['ipasudorunasusercategory'] = runasusercategory
+ if runasgroupcategory is not None:
+ data['ipasudorunasgroupcategory'] = runasgroupcategory
+ return data
+
+
+def category_changed(module, client, category_name, ipa_sudorule):
+ if ipa_sudorule.get(category_name, None) == ['all']:
+ if not module.check_mode:
+ # cn is returned as list even with only a single value.
+ client.sudorule_mod(name=ipa_sudorule.get('cn')[0], item={category_name: None})
+ return True
+ return False
+
+
+def ensure(module, client):
+ state = module.params['state']
+ name = module.params['cn']
+ cmd = module.params['cmd']
+ cmdcategory = module.params['cmdcategory']
+ host = module.params['host']
+ hostcategory = module.params['hostcategory']
+ hostgroup = module.params['hostgroup']
+ runasusercategory = module.params['runasusercategory']
+ runasgroupcategory = module.params['runasgroupcategory']
+
+ if state in ['present', 'enabled']:
+ ipaenabledflag = 'TRUE'
+ else:
+ ipaenabledflag = 'FALSE'
+
+ sudoopt = module.params['sudoopt']
+ user = module.params['user']
+ usercategory = module.params['usercategory']
+ usergroup = module.params['usergroup']
+
+ module_sudorule = get_sudorule_dict(cmdcategory=cmdcategory,
+ description=module.params['description'],
+ hostcategory=hostcategory,
+ ipaenabledflag=ipaenabledflag,
+ usercategory=usercategory,
+ runasusercategory=runasusercategory,
+ runasgroupcategory=runasgroupcategory)
+ ipa_sudorule = client.sudorule_find(name=name)
+
+ changed = False
+ if state in ['present', 'disabled', 'enabled']:
+ if not ipa_sudorule:
+ changed = True
+ if not module.check_mode:
+ ipa_sudorule = client.sudorule_add(name=name, item=module_sudorule)
+ else:
+ diff = client.get_diff(ipa_sudorule, module_sudorule)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ if 'hostcategory' in diff:
+ if ipa_sudorule.get('memberhost_host', None) is not None:
+ client.sudorule_remove_host_host(name=name, item=ipa_sudorule.get('memberhost_host'))
+ if ipa_sudorule.get('memberhost_hostgroup', None) is not None:
+ client.sudorule_remove_host_hostgroup(name=name,
+ item=ipa_sudorule.get('memberhost_hostgroup'))
+
+ client.sudorule_mod(name=name, item=module_sudorule)
+
+ if cmd is not None:
+ changed = category_changed(module, client, 'cmdcategory', ipa_sudorule) or changed
+ if not module.check_mode:
+ client.sudorule_add_allow_command(name=name, item=cmd)
+
+ if runasusercategory is not None:
+ changed = category_changed(module, client, 'iparunasusercategory', ipa_sudorule) or changed
+
+ if runasgroupcategory is not None:
+ changed = category_changed(module, client, 'iparunasgroupcategory', ipa_sudorule) or changed
+
+ if host is not None:
+ changed = category_changed(module, client, 'hostcategory', ipa_sudorule) or changed
+ changed = client.modify_if_diff(name, ipa_sudorule.get('memberhost_host', []), host,
+ client.sudorule_add_host_host,
+ client.sudorule_remove_host_host) or changed
+
+ if hostgroup is not None:
+ changed = category_changed(module, client, 'hostcategory', ipa_sudorule) or changed
+ changed = client.modify_if_diff(name, ipa_sudorule.get('memberhost_hostgroup', []), hostgroup,
+ client.sudorule_add_host_hostgroup,
+ client.sudorule_remove_host_hostgroup) or changed
+ if sudoopt is not None:
+ # client.modify_if_diff does not work as each option must be removed/added by its own
+ ipa_list = ipa_sudorule.get('ipasudoopt', [])
+ module_list = sudoopt
+ diff = list(set(ipa_list) - set(module_list))
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ for item in diff:
+ client.sudorule_remove_option_ipasudoopt(name, item)
+ diff = list(set(module_list) - set(ipa_list))
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ for item in diff:
+ client.sudorule_add_option_ipasudoopt(name, item)
+
+ if user is not None:
+ changed = category_changed(module, client, 'usercategory', ipa_sudorule) or changed
+ changed = client.modify_if_diff(name, ipa_sudorule.get('memberuser_user', []), user,
+ client.sudorule_add_user_user,
+ client.sudorule_remove_user_user) or changed
+ if usergroup is not None:
+ changed = category_changed(module, client, 'usercategory', ipa_sudorule) or changed
+ changed = client.modify_if_diff(name, ipa_sudorule.get('memberuser_group', []), usergroup,
+ client.sudorule_add_user_group,
+ client.sudorule_remove_user_group) or changed
+ else:
+ if ipa_sudorule:
+ changed = True
+ if not module.check_mode:
+ client.sudorule_del(name)
+
+ return changed, client.sudorule_find(name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(cmd=dict(type='list', elements='str'),
+ cmdcategory=dict(type='str', choices=['all']),
+ cn=dict(type='str', required=True, aliases=['name']),
+ description=dict(type='str'),
+ host=dict(type='list', elements='str'),
+ hostcategory=dict(type='str', choices=['all']),
+ hostgroup=dict(type='list', elements='str'),
+ runasusercategory=dict(type='str', choices=['all']),
+ runasgroupcategory=dict(type='str', choices=['all']),
+ sudoopt=dict(type='list', elements='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']),
+ user=dict(type='list', elements='str'),
+ usercategory=dict(type='str', choices=['all']),
+ usergroup=dict(type='list', elements='str'))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ mutually_exclusive=[['cmdcategory', 'cmd'],
+ ['hostcategory', 'host'],
+ ['hostcategory', 'hostgroup'],
+ ['usercategory', 'user'],
+ ['usercategory', 'usergroup']],
+ supports_check_mode=True)
+
+ client = SudoRuleIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, sudorule = ensure(module, client)
+ module.exit_json(changed=changed, sudorule=sudorule)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_user.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_user.py
new file mode 100644
index 00000000..fa7b3abb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_user.py
@@ -0,0 +1,392 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_user
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA users
+description:
+- Add, modify and delete user within IPA server.
+options:
+ displayname:
+ description: Display name.
+ type: str
+ update_password:
+ description:
+ - Set password for a user.
+ type: str
+ default: 'always'
+ choices: [ always, on_create ]
+ givenname:
+ description: First name.
+ type: str
+ krbpasswordexpiration:
+ description:
+ - Date at which the user password will expire.
+ - In the format YYYYMMddHHmmss.
+ - e.g. 20180121182022 will expire on 21 January 2018 at 18:20:22.
+ type: str
+ loginshell:
+ description: Login shell.
+ type: str
+ mail:
+ description:
+ - List of mail addresses assigned to the user.
+ - If an empty list is passed all assigned email addresses will be deleted.
+ - If None is passed email addresses will not be checked or changed.
+ type: list
+ elements: str
+ password:
+ description:
+ - Password for a user.
+ - Will not be set for an existing user unless I(update_password=always), which is the default.
+ type: str
+ sn:
+ description: Surname.
+ type: str
+ sshpubkey:
+ description:
+ - List of public SSH key.
+ - If an empty list is passed all assigned public keys will be deleted.
+ - If None is passed SSH public keys will not be checked or changed.
+ type: list
+ elements: str
+ state:
+ description: State to ensure.
+ default: "present"
+ choices: ["absent", "disabled", "enabled", "present"]
+ type: str
+ telephonenumber:
+ description:
+ - List of telephone numbers assigned to the user.
+ - If an empty list is passed all assigned telephone numbers will be deleted.
+ - If None is passed telephone numbers will not be checked or changed.
+ type: list
+ elements: str
+ title:
+ description: Title.
+ type: str
+ uid:
+ description: uid of the user.
+ required: true
+ aliases: ["name"]
+ type: str
+ uidnumber:
+ description:
+ - Account Settings UID/Posix User ID number.
+ type: str
+ gidnumber:
+ description:
+ - Posix Group ID.
+ type: str
+ homedirectory:
+ description:
+ - Default home directory of the user.
+ type: str
+ version_added: '0.2.0'
+ userauthtype:
+ description:
+ - The authentication type to use for the user.
+ choices: ["password", "radius", "otp", "pkinit", "hardened"]
+ type: str
+ version_added: '1.2.0'
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+requirements:
+- base64
+- hashlib
+'''
+
+EXAMPLES = r'''
+- name: Ensure pinky is present and always reset password
+ community.general.ipa_user:
+ name: pinky
+ state: present
+ krbpasswordexpiration: 20200119235959
+ givenname: Pinky
+ sn: Acme
+ mail:
+ - pinky@acme.com
+ telephonenumber:
+ - '+555123456'
+ sshpubkey:
+ - ssh-rsa ....
+ - ssh-dsa ....
+ uidnumber: '1001'
+ gidnumber: '100'
+ homedirectory: /home/pinky
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure brain is absent
+ community.general.ipa_user:
+ name: brain
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure pinky is present but don't reset password if already exists
+ community.general.ipa_user:
+ name: pinky
+ state: present
+ givenname: Pinky
+ sn: Acme
+ password: zounds
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+ update_password: on_create
+
+- name: Ensure pinky is present and using one time password authentication
+ community.general.ipa_user:
+ name: pinky
+ state: present
+ userauthtype: otp
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+user:
+ description: User as returned by IPA API
+ returned: always
+ type: dict
+'''
+
+import base64
+import hashlib
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class UserIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(UserIPAClient, self).__init__(module, host, port, protocol)
+
+ def user_find(self, name):
+ return self._post_json(method='user_find', name=None, item={'all': True, 'uid': name})
+
+ def user_add(self, name, item):
+ return self._post_json(method='user_add', name=name, item=item)
+
+ def user_mod(self, name, item):
+ return self._post_json(method='user_mod', name=name, item=item)
+
+ def user_del(self, name):
+ return self._post_json(method='user_del', name=name)
+
+ def user_disable(self, name):
+ return self._post_json(method='user_disable', name=name)
+
+ def user_enable(self, name):
+ return self._post_json(method='user_enable', name=name)
+
+
+def get_user_dict(displayname=None, givenname=None, krbpasswordexpiration=None, loginshell=None,
+ mail=None, nsaccountlock=False, sn=None, sshpubkey=None, telephonenumber=None,
+ title=None, userpassword=None, gidnumber=None, uidnumber=None, homedirectory=None,
+ userauthtype=None):
+ user = {}
+ if displayname is not None:
+ user['displayname'] = displayname
+ if krbpasswordexpiration is not None:
+ user['krbpasswordexpiration'] = krbpasswordexpiration + "Z"
+ if givenname is not None:
+ user['givenname'] = givenname
+ if loginshell is not None:
+ user['loginshell'] = loginshell
+ if mail is not None:
+ user['mail'] = mail
+ user['nsaccountlock'] = nsaccountlock
+ if sn is not None:
+ user['sn'] = sn
+ if sshpubkey is not None:
+ user['ipasshpubkey'] = sshpubkey
+ if telephonenumber is not None:
+ user['telephonenumber'] = telephonenumber
+ if title is not None:
+ user['title'] = title
+ if userpassword is not None:
+ user['userpassword'] = userpassword
+ if gidnumber is not None:
+ user['gidnumber'] = gidnumber
+ if uidnumber is not None:
+ user['uidnumber'] = uidnumber
+ if homedirectory is not None:
+ user['homedirectory'] = homedirectory
+ if userauthtype is not None:
+ user['ipauserauthtype'] = userauthtype
+
+ return user
+
+
+def get_user_diff(client, ipa_user, module_user):
+ """
+ Return the keys of each dict whereas values are different. Unfortunately the IPA
+ API returns everything as a list even if only a single value is possible.
+ Therefore some more complexity is needed.
+ The method will check if the value type of module_user.attr is not a list and
+ create a list with that element if the same attribute in ipa_user is list. In this way I hope that the method
+ must not be changed if the returned API dict is changed.
+ :param ipa_user:
+ :param module_user:
+ :return:
+ """
+ # sshpubkeyfp is the list of ssh key fingerprints. IPA doesn't return the keys itself but instead the fingerprints.
+ # These are used for comparison.
+ sshpubkey = None
+ if 'ipasshpubkey' in module_user:
+ hash_algo = 'md5'
+ if 'sshpubkeyfp' in ipa_user and ipa_user['sshpubkeyfp'][0][:7].upper() == 'SHA256:':
+ hash_algo = 'sha256'
+ module_user['sshpubkeyfp'] = [get_ssh_key_fingerprint(pubkey, hash_algo) for pubkey in module_user['ipasshpubkey']]
+ # Remove the ipasshpubkey element as it is not returned from IPA but save it's value to be used later on
+ sshpubkey = module_user['ipasshpubkey']
+ del module_user['ipasshpubkey']
+
+ result = client.get_diff(ipa_data=ipa_user, module_data=module_user)
+
+ # If there are public keys, remove the fingerprints and add them back to the dict
+ if sshpubkey is not None:
+ del module_user['sshpubkeyfp']
+ module_user['ipasshpubkey'] = sshpubkey
+ return result
+
+
+def get_ssh_key_fingerprint(ssh_key, hash_algo='sha256'):
+ """
+ Return the public key fingerprint of a given public SSH key
+ in format "[fp] [user@host] (ssh-rsa)" where fp is of the format:
+ FB:0C:AC:0A:07:94:5B:CE:75:6E:63:32:13:AD:AD:D7
+ for md5 or
+ SHA256:[base64]
+ for sha256
+ :param ssh_key:
+ :param hash_algo:
+ :return:
+ """
+ parts = ssh_key.strip().split()
+ if len(parts) == 0:
+ return None
+ key_type = parts[0]
+ key = base64.b64decode(parts[1].encode('ascii'))
+
+ if hash_algo == 'md5':
+ fp_plain = hashlib.md5(key).hexdigest()
+ key_fp = ':'.join(a + b for a, b in zip(fp_plain[::2], fp_plain[1::2])).upper()
+ elif hash_algo == 'sha256':
+ fp_plain = base64.b64encode(hashlib.sha256(key).digest()).decode('ascii').rstrip('=')
+ key_fp = 'SHA256:{fp}'.format(fp=fp_plain)
+ if len(parts) < 3:
+ return "%s (%s)" % (key_fp, key_type)
+ else:
+ user_host = parts[2]
+ return "%s %s (%s)" % (key_fp, user_host, key_type)
+
+
+def ensure(module, client):
+ state = module.params['state']
+ name = module.params['uid']
+ nsaccountlock = state == 'disabled'
+
+ module_user = get_user_dict(displayname=module.params.get('displayname'),
+ krbpasswordexpiration=module.params.get('krbpasswordexpiration'),
+ givenname=module.params.get('givenname'),
+ loginshell=module.params['loginshell'],
+ mail=module.params['mail'], sn=module.params['sn'],
+ sshpubkey=module.params['sshpubkey'], nsaccountlock=nsaccountlock,
+ telephonenumber=module.params['telephonenumber'], title=module.params['title'],
+ userpassword=module.params['password'],
+ gidnumber=module.params.get('gidnumber'), uidnumber=module.params.get('uidnumber'),
+ homedirectory=module.params.get('homedirectory'),
+ userauthtype=module.params.get('userauthtype'))
+
+ update_password = module.params.get('update_password')
+ ipa_user = client.user_find(name=name)
+
+ changed = False
+ if state in ['present', 'enabled', 'disabled']:
+ if not ipa_user:
+ changed = True
+ if not module.check_mode:
+ ipa_user = client.user_add(name=name, item=module_user)
+ else:
+ if update_password == 'on_create':
+ module_user.pop('userpassword', None)
+ diff = get_user_diff(client, ipa_user, module_user)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ ipa_user = client.user_mod(name=name, item=module_user)
+ else:
+ if ipa_user:
+ changed = True
+ if not module.check_mode:
+ client.user_del(name)
+
+ return changed, ipa_user
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(displayname=dict(type='str'),
+ givenname=dict(type='str'),
+ update_password=dict(type='str', default="always",
+ choices=['always', 'on_create'],
+ no_log=False),
+ krbpasswordexpiration=dict(type='str', no_log=False),
+ loginshell=dict(type='str'),
+ mail=dict(type='list', elements='str'),
+ sn=dict(type='str'),
+ uid=dict(type='str', required=True, aliases=['name']),
+ gidnumber=dict(type='str'),
+ uidnumber=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ sshpubkey=dict(type='list', elements='str'),
+ state=dict(type='str', default='present',
+ choices=['present', 'absent', 'enabled', 'disabled']),
+ telephonenumber=dict(type='list', elements='str'),
+ title=dict(type='str'),
+ homedirectory=dict(type='str'),
+ userauthtype=dict(type='str',
+ choices=['password', 'radius', 'otp', 'pkinit', 'hardened']))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ client = UserIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+
+ # If sshpubkey is defined as None than module.params['sshpubkey'] is [None]. IPA itself returns None (not a list).
+ # Therefore a small check here to replace list(None) by None. Otherwise get_user_diff() would return sshpubkey
+ # as different which should be avoided.
+ if module.params['sshpubkey'] is not None:
+ if len(module.params['sshpubkey']) == 1 and module.params['sshpubkey'][0] == "":
+ module.params['sshpubkey'] = None
+
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, user = ensure(module, client)
+ module.exit_json(changed=changed, user=user)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_vault.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_vault.py
new file mode 100644
index 00000000..3376b8c4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipa_vault.py
@@ -0,0 +1,249 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2018, Juan Manuel Parrilla <jparrill@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_vault
+author: Juan Manuel Parrilla (@jparrill)
+short_description: Manage FreeIPA vaults
+description:
+- Add, modify and delete vaults and secret vaults.
+- KRA service should be enabled to use this module.
+options:
+ cn:
+ description:
+ - Vault name.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ["name"]
+ type: str
+ description:
+ description:
+ - Description.
+ type: str
+ ipavaulttype:
+ description:
+ - Vault types are based on security level.
+ default: "symmetric"
+ choices: ["asymmetric", "standard", "symmetric"]
+ aliases: ["vault_type"]
+ type: str
+ ipavaultpublickey:
+ description:
+ - Public key.
+ aliases: ["vault_public_key"]
+ type: str
+ ipavaultsalt:
+ description:
+ - Vault Salt.
+ aliases: ["vault_salt"]
+ type: str
+ username:
+ description:
+ - Any user can own one or more user vaults.
+ - Mutually exclusive with service.
+ aliases: ["user"]
+ type: list
+ elements: str
+ service:
+ description:
+ - Any service can own one or more service vaults.
+ - Mutually exclusive with user.
+ type: str
+ state:
+ description:
+ - State to ensure.
+ default: "present"
+ choices: ["absent", "present"]
+ type: str
+ replace:
+ description:
+ - Force replace the existant vault on IPA server.
+ type: bool
+ default: False
+ choices: ["True", "False"]
+ validate_certs:
+ description:
+ - Validate IPA server certificates.
+ type: bool
+ default: true
+extends_documentation_fragment:
+- community.general.ipa.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure vault is present
+ community.general.ipa_vault:
+ name: vault01
+ vault_type: standard
+ user: user01
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+ validate_certs: false
+
+- name: Ensure vault is present for Admin user
+ community.general.ipa_vault:
+ name: vault01
+ vault_type: standard
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure vault is absent
+ community.general.ipa_vault:
+ name: vault01
+ vault_type: standard
+ user: user01
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Modify vault if already exists
+ community.general.ipa_vault:
+ name: vault01
+ vault_type: standard
+ description: "Vault for test"
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+ replace: True
+
+- name: Get vault info if already exists
+ community.general.ipa_vault:
+ name: vault01
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+vault:
+ description: Vault as returned by IPA API
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class VaultIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(VaultIPAClient, self).__init__(module, host, port, protocol)
+
+ def vault_find(self, name):
+ return self._post_json(method='vault_find', name=None, item={'all': True, 'cn': name})
+
+ def vault_add_internal(self, name, item):
+ return self._post_json(method='vault_add_internal', name=name, item=item)
+
+ def vault_mod_internal(self, name, item):
+ return self._post_json(method='vault_mod_internal', name=name, item=item)
+
+ def vault_del(self, name):
+ return self._post_json(method='vault_del', name=name)
+
+
+def get_vault_dict(description=None, vault_type=None, vault_salt=None, vault_public_key=None, service=None):
+ vault = {}
+
+ if description is not None:
+ vault['description'] = description
+ if vault_type is not None:
+ vault['ipavaulttype'] = vault_type
+ if vault_salt is not None:
+ vault['ipavaultsalt'] = vault_salt
+ if vault_public_key is not None:
+ vault['ipavaultpublickey'] = vault_public_key
+ if service is not None:
+ vault['service'] = service
+ return vault
+
+
+def get_vault_diff(client, ipa_vault, module_vault, module):
+ return client.get_diff(ipa_data=ipa_vault, module_data=module_vault)
+
+
+def ensure(module, client):
+ state = module.params['state']
+ name = module.params['cn']
+ user = module.params['username']
+ replace = module.params['replace']
+
+ module_vault = get_vault_dict(description=module.params['description'], vault_type=module.params['ipavaulttype'],
+ vault_salt=module.params['ipavaultsalt'],
+ vault_public_key=module.params['ipavaultpublickey'],
+ service=module.params['service'])
+ ipa_vault = client.vault_find(name=name)
+
+ changed = False
+ if state == 'present':
+ if not ipa_vault:
+ # New vault
+ changed = True
+ if not module.check_mode:
+ ipa_vault = client.vault_add_internal(name, item=module_vault)
+ else:
+ # Already exists
+ if replace:
+ diff = get_vault_diff(client, ipa_vault, module_vault, module)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_vault.get(key)
+ client.vault_mod_internal(name=name, item=data)
+
+ else:
+ if ipa_vault:
+ changed = True
+ if not module.check_mode:
+ client.vault_del(name)
+
+ return changed, client.vault_find(name=name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(cn=dict(type='str', required=True, aliases=['name']),
+ description=dict(type='str'),
+ ipavaulttype=dict(type='str', default='symmetric',
+ choices=['standard', 'symmetric', 'asymmetric'], aliases=['vault_type']),
+ ipavaultsalt=dict(type='str', aliases=['vault_salt']),
+ ipavaultpublickey=dict(type='str', aliases=['vault_public_key']),
+ service=dict(type='str'),
+ replace=dict(type='bool', default=False, choices=[True, False]),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ username=dict(type='list', elements='str', aliases=['user']))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[['username', 'service']])
+
+ client = VaultIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, vault = ensure(module, client)
+ module.exit_json(changed=changed, vault=vault)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipify_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipify_facts.py
new file mode 100644
index 00000000..dcdc5ef8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipify_facts.py
@@ -0,0 +1,105 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2015, René Moser <mail@renemoser.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ipify_facts
+short_description: Retrieve the public IP of your internet gateway
+description:
+ - If behind NAT and need to know the public IP of your internet gateway.
+author:
+- René Moser (@resmo)
+options:
+ api_url:
+ description:
+ - URL of the ipify.org API service.
+ - C(?format=json) will be appended per default.
+ type: str
+ default: https://api.ipify.org/
+ timeout:
+ description:
+ - HTTP connection timeout in seconds.
+ type: int
+ default: 10
+ validate_certs:
+ description:
+ - When set to C(NO), SSL certificates will not be validated.
+ type: bool
+ default: yes
+notes:
+ - Visit https://www.ipify.org to get more information.
+'''
+
+EXAMPLES = r'''
+# Gather IP facts from ipify.org
+- name: Get my public IP
+ community.general.ipify_facts:
+
+# Gather IP facts from your own ipify service endpoint with a custom timeout
+- name: Get my public IP
+ community.general.ipify_facts:
+ api_url: http://api.example.com/ipify
+ timeout: 20
+'''
+
+RETURN = r'''
+---
+ipify_public_ip:
+ description: Public IP of the internet gateway.
+ returned: success
+ type: str
+ sample: 1.2.3.4
+'''
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils._text import to_text
+
+
+class IpifyFacts(object):
+
+ def __init__(self):
+ self.api_url = module.params.get('api_url')
+ self.timeout = module.params.get('timeout')
+
+ def run(self):
+ result = {
+ 'ipify_public_ip': None
+ }
+ (response, info) = fetch_url(module=module, url=self.api_url + "?format=json", force=True, timeout=self.timeout)
+
+ if not response:
+ module.fail_json(msg="No valid or no response from url %s within %s seconds (timeout)" % (self.api_url, self.timeout))
+
+ data = json.loads(to_text(response.read()))
+ result['ipify_public_ip'] = data.get('ip')
+ return result
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_url=dict(type='str', default='https://api.ipify.org/'),
+ timeout=dict(type='int', default=10),
+ validate_certs=dict(type='bool', default=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ ipify_facts = IpifyFacts().run()
+ ipify_facts_result = dict(changed=False, ansible_facts=ipify_facts)
+ module.exit_json(**ipify_facts_result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipinfoio_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipinfoio_facts.py
new file mode 100644
index 00000000..f4186cdc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipinfoio_facts.py
@@ -0,0 +1,131 @@
+#!/usr/bin/python
+# -*- coding: UTF-8 -*-
+
+# Copyright: (c) 2016, Aleksei Kostiuk <unitoff@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ipinfoio_facts
+short_description: "Retrieve IP geolocation facts of a host's IP address"
+description:
+ - "Gather IP geolocation facts of a host's IP address using ipinfo.io API"
+author: "Aleksei Kostiuk (@akostyuk)"
+options:
+ timeout:
+ description:
+ - HTTP connection timeout in seconds
+ required: false
+ default: 10
+ type: int
+ http_agent:
+ description:
+ - Set http user agent
+ required: false
+ default: "ansible-ipinfoio-module/0.0.1"
+ type: str
+notes:
+ - "Check http://ipinfo.io/ for more information"
+'''
+
+EXAMPLES = '''
+# Retrieve geolocation data of a host's IP address
+- name: Get IP geolocation data
+ community.general.ipinfoio_facts:
+'''
+
+RETURN = '''
+ansible_facts:
+ description: "Dictionary of ip geolocation facts for a host's IP address"
+ returned: changed
+ type: complex
+ contains:
+ ip:
+ description: "Public IP address of a host"
+ type: str
+ sample: "8.8.8.8"
+ hostname:
+ description: Domain name
+ type: str
+ sample: "google-public-dns-a.google.com"
+ country:
+ description: ISO 3166-1 alpha-2 country code
+ type: str
+ sample: "US"
+ region:
+ description: State or province name
+ type: str
+ sample: "California"
+ city:
+ description: City name
+ type: str
+ sample: "Mountain View"
+ loc:
+ description: Latitude and Longitude of the location
+ type: str
+ sample: "37.3860,-122.0838"
+ org:
+ description: "organization's name"
+ type: str
+ sample: "AS3356 Level 3 Communications, Inc."
+ postal:
+ description: Postal code
+ type: str
+ sample: "94035"
+'''
+from ansible.module_utils.basic import AnsibleModule
+
+from ansible.module_utils.urls import fetch_url
+
+
+USER_AGENT = 'ansible-ipinfoio-module/0.0.1'
+
+
+class IpinfoioFacts(object):
+
+ def __init__(self, module):
+ self.url = 'https://ipinfo.io/json'
+ self.timeout = module.params.get('timeout')
+ self.module = module
+
+ def get_geo_data(self):
+ response, info = fetch_url(self.module, self.url, force=True, # NOQA
+ timeout=self.timeout)
+ try:
+ info['status'] == 200
+ except AssertionError:
+ self.module.fail_json(msg='Could not get {0} page, '
+ 'check for connectivity!'.format(self.url))
+ else:
+ try:
+ content = response.read()
+ result = self.module.from_json(content.decode('utf8'))
+ except ValueError:
+ self.module.fail_json(
+ msg='Failed to parse the ipinfo.io response: '
+ '{0} {1}'.format(self.url, content))
+ else:
+ return result
+
+
+def main():
+ module = AnsibleModule( # NOQA
+ argument_spec=dict(
+ http_agent=dict(default=USER_AGENT),
+ timeout=dict(type='int', default=10),
+ ),
+ supports_check_mode=True,
+ )
+
+ ipinfoio = IpinfoioFacts(module)
+ ipinfoio_result = dict(
+ changed=False, ansible_facts=ipinfoio.get_geo_data())
+ module.exit_json(**ipinfoio_result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipmi_boot.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipmi_boot.py
new file mode 100644
index 00000000..6509ca21
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipmi_boot.py
@@ -0,0 +1,194 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ipmi_boot
+short_description: Management of order of boot devices
+description:
+ - Use this module to manage order of boot devices
+options:
+ name:
+ description:
+ - Hostname or ip address of the BMC.
+ required: true
+ port:
+ description:
+ - Remote RMCP port.
+ default: 623
+ user:
+ description:
+ - Username to use to connect to the BMC.
+ required: true
+ password:
+ description:
+ - Password to connect to the BMC.
+ required: true
+ bootdev:
+ description:
+ - Set boot device to use on next reboot
+ - "The choices for the device are:
+ - network -- Request network boot
+ - floppy -- Boot from floppy
+ - hd -- Boot from hard drive
+ - safe -- Boot from hard drive, requesting 'safe mode'
+ - optical -- boot from CD/DVD/BD drive
+ - setup -- Boot into setup utility
+ - default -- remove any IPMI directed boot device request"
+ required: true
+ choices:
+ - network
+ - floppy
+ - hd
+ - safe
+ - optical
+ - setup
+ - default
+ state:
+ description:
+ - Whether to ensure that boot devices is desired.
+ - "The choices for the state are:
+ - present -- Request system turn on
+ - absent -- Request system turn on"
+ default: present
+ choices: [ present, absent ]
+ persistent:
+ description:
+ - If set, ask that system firmware uses this device beyond next boot.
+ Be aware many systems do not honor this.
+ type: bool
+ default: 'no'
+ uefiboot:
+ description:
+ - If set, request UEFI boot explicitly.
+ Strictly speaking, the spec suggests that if not set, the system should BIOS boot and offers no "don't care" option.
+ In practice, this flag not being set does not preclude UEFI boot on any system I've encountered.
+ type: bool
+ default: 'no'
+requirements:
+ - "python >= 2.6"
+ - pyghmi
+author: "Bulat Gaifullin (@bgaifullin) <gaifullinbf@gmail.com>"
+'''
+
+RETURN = '''
+bootdev:
+ description: The boot device name which will be used beyond next boot.
+ returned: success
+ type: str
+ sample: default
+persistent:
+ description: If True, system firmware will use this device beyond next boot.
+ returned: success
+ type: bool
+ sample: false
+uefimode:
+ description: If True, system firmware will use UEFI boot explicitly beyond next boot.
+ returned: success
+ type: bool
+ sample: false
+'''
+
+EXAMPLES = '''
+- name: Ensure bootdevice is HD
+ community.general.ipmi_boot:
+ name: test.testdomain.com
+ user: admin
+ password: password
+ bootdev: hd
+
+- name: Ensure bootdevice is not Network
+ community.general.ipmi_boot:
+ name: test.testdomain.com
+ user: admin
+ password: password
+ bootdev: network
+ state: absent
+'''
+
+import traceback
+
+PYGHMI_IMP_ERR = None
+try:
+ from pyghmi.ipmi import command
+except ImportError:
+ PYGHMI_IMP_ERR = traceback.format_exc()
+ command = None
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ port=dict(default=623, type='int'),
+ user=dict(required=True, no_log=True),
+ password=dict(required=True, no_log=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ bootdev=dict(required=True, choices=['network', 'hd', 'floppy', 'safe', 'optical', 'setup', 'default']),
+ persistent=dict(default=False, type='bool'),
+ uefiboot=dict(default=False, type='bool')
+ ),
+ supports_check_mode=True,
+ )
+
+ if command is None:
+ module.fail_json(msg=missing_required_lib('pyghmi'), exception=PYGHMI_IMP_ERR)
+
+ name = module.params['name']
+ port = module.params['port']
+ user = module.params['user']
+ password = module.params['password']
+ state = module.params['state']
+ bootdev = module.params['bootdev']
+ persistent = module.params['persistent']
+ uefiboot = module.params['uefiboot']
+ request = dict()
+
+ if state == 'absent' and bootdev == 'default':
+ module.fail_json(msg="The bootdev 'default' cannot be used with state 'absent'.")
+
+ # --- run command ---
+ try:
+ ipmi_cmd = command.Command(
+ bmc=name, userid=user, password=password, port=port
+ )
+ module.debug('ipmi instantiated - name: "%s"' % name)
+ current = ipmi_cmd.get_bootdev()
+ # uefimode may not supported by BMC, so use desired value as default
+ current.setdefault('uefimode', uefiboot)
+ if state == 'present' and current != dict(bootdev=bootdev, persistent=persistent, uefimode=uefiboot):
+ request = dict(bootdev=bootdev, uefiboot=uefiboot, persist=persistent)
+ elif state == 'absent' and current['bootdev'] == bootdev:
+ request = dict(bootdev='default')
+ else:
+ module.exit_json(changed=False, **current)
+
+ if module.check_mode:
+ response = dict(bootdev=request['bootdev'])
+ else:
+ response = ipmi_cmd.set_bootdev(**request)
+
+ if 'error' in response:
+ module.fail_json(msg=response['error'])
+
+ if 'persist' in request:
+ response['persistent'] = request['persist']
+ if 'uefiboot' in request:
+ response['uefimode'] = request['uefiboot']
+
+ module.exit_json(changed=True, **response)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipmi_power.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipmi_power.py
new file mode 100644
index 00000000..47840154
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipmi_power.py
@@ -0,0 +1,131 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ipmi_power
+short_description: Power management for machine
+description:
+ - Use this module for power management
+options:
+ name:
+ description:
+ - Hostname or ip address of the BMC.
+ required: true
+ port:
+ description:
+ - Remote RMCP port.
+ default: 623
+ user:
+ description:
+ - Username to use to connect to the BMC.
+ required: true
+ password:
+ description:
+ - Password to connect to the BMC.
+ required: true
+ state:
+ description:
+ - Whether to ensure that the machine in desired state.
+ - "The choices for state are:
+ - on -- Request system turn on
+ - off -- Request system turn off without waiting for OS to shutdown
+ - shutdown -- Have system request OS proper shutdown
+ - reset -- Request system reset without waiting for OS
+ - boot -- If system is off, then 'on', else 'reset'"
+ choices: ['on', 'off', shutdown, reset, boot]
+ required: true
+ timeout:
+ description:
+ - Maximum number of seconds before interrupt request.
+ default: 300
+requirements:
+ - "python >= 2.6"
+ - pyghmi
+author: "Bulat Gaifullin (@bgaifullin) <gaifullinbf@gmail.com>"
+'''
+
+RETURN = '''
+powerstate:
+ description: The current power state of the machine.
+ returned: success
+ type: str
+ sample: on
+'''
+
+EXAMPLES = '''
+- name: Ensure machine is powered on
+ community.general.ipmi_power:
+ name: test.testdomain.com
+ user: admin
+ password: password
+ state: on
+'''
+
+import traceback
+
+PYGHMI_IMP_ERR = None
+try:
+ from pyghmi.ipmi import command
+except ImportError:
+ PYGHMI_IMP_ERR = traceback.format_exc()
+ command = None
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ port=dict(default=623, type='int'),
+ state=dict(required=True, choices=['on', 'off', 'shutdown', 'reset', 'boot']),
+ user=dict(required=True, no_log=True),
+ password=dict(required=True, no_log=True),
+ timeout=dict(default=300, type='int'),
+ ),
+ supports_check_mode=True,
+ )
+
+ if command is None:
+ module.fail_json(msg=missing_required_lib('pyghmi'), exception=PYGHMI_IMP_ERR)
+
+ name = module.params['name']
+ port = module.params['port']
+ user = module.params['user']
+ password = module.params['password']
+ state = module.params['state']
+ timeout = module.params['timeout']
+
+ # --- run command ---
+ try:
+ ipmi_cmd = command.Command(
+ bmc=name, userid=user, password=password, port=port
+ )
+ module.debug('ipmi instantiated - name: "%s"' % name)
+
+ current = ipmi_cmd.get_power()
+ if current['powerstate'] != state:
+ response = {'powerstate': state} if module.check_mode else ipmi_cmd.set_power(state, wait=timeout)
+ changed = True
+ else:
+ response = current
+ changed = False
+
+ if 'error' in response:
+ module.fail_json(msg=response['error'])
+
+ module.exit_json(changed=changed, **response)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/iptables_state.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/iptables_state.py
new file mode 100644
index 00000000..56475268
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/iptables_state.py
@@ -0,0 +1,649 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, quidame <quidame@poivron.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: iptables_state
+short_description: Save iptables state into a file or restore it from a file
+version_added: '1.1.0'
+author: quidame (@quidame)
+description:
+ - C(iptables) is used to set up, maintain, and inspect the tables of IP
+ packet filter rules in the Linux kernel.
+ - This module handles the saving and/or loading of rules. This is the same
+ as the behaviour of the C(iptables-save) and C(iptables-restore) (or
+ C(ip6tables-save) and C(ip6tables-restore) for IPv6) commands which this
+ module uses internally.
+ - Modifying the state of the firewall remotely may lead to loose access to
+ the host in case of mistake in new ruleset. This module embeds a rollback
+ feature to avoid this, by telling the host to restore previous rules if a
+ cookie is still there after a given delay, and all this time telling the
+ controller to try to remove this cookie on the host through a new
+ connection.
+notes:
+ - The rollback feature is not a module option and depends on task's
+ attributes. To enable it, the module must be played asynchronously, i.e.
+ by setting task attributes I(poll) to C(0), and I(async) to a value less
+ or equal to C(ANSIBLE_TIMEOUT). If I(async) is greater, the rollback will
+ still happen if it shall happen, but you will experience a connection
+ timeout instead of more relevant info returned by the module after its
+ failure.
+ - This module supports I(check_mode).
+options:
+ counters:
+ description:
+ - Save or restore the values of all packet and byte counters.
+ - When C(true), the module is not idempotent.
+ type: bool
+ default: false
+ ip_version:
+ description:
+ - Which version of the IP protocol this module should apply to.
+ type: str
+ choices: [ ipv4, ipv6 ]
+ default: ipv4
+ modprobe:
+ description:
+ - Specify the path to the C(modprobe) program internally used by iptables
+ related commands to load kernel modules.
+ - By default, C(/proc/sys/kernel/modprobe) is inspected to determine the
+ executable's path.
+ type: path
+ noflush:
+ description:
+ - For I(state=restored), ignored otherwise.
+ - If C(false), restoring iptables rules from a file flushes (deletes)
+ all previous contents of the respective table(s). If C(true), the
+ previous rules are left untouched (but policies are updated anyway,
+ for all built-in chains).
+ type: bool
+ default: false
+ path:
+ description:
+ - The file the iptables state should be saved to.
+ - The file the iptables state should be restored from.
+ type: path
+ required: yes
+ state:
+ description:
+ - Whether the firewall state should be saved (into a file) or restored
+ (from a file).
+ type: str
+ choices: [ saved, restored ]
+ required: yes
+ table:
+ description:
+ - When I(state=restored), restore only the named table even if the input
+ file contains other tables. Fail if the named table is not declared in
+ the file.
+ - When I(state=saved), restrict output to the specified table. If not
+ specified, output includes all active tables.
+ type: str
+ choices: [ filter, nat, mangle, raw, security ]
+ wait:
+ description:
+ - Wait N seconds for the xtables lock to prevent instant failure in case
+ multiple instances of the program are running concurrently.
+ type: int
+requirements: [iptables, ip6tables]
+'''
+
+EXAMPLES = r'''
+# This will apply to all loaded/active IPv4 tables.
+- name: Save current state of the firewall in system file
+ community.general.iptables_state:
+ state: saved
+ path: /etc/sysconfig/iptables
+
+# This will apply only to IPv6 filter table.
+- name: save current state of the firewall in system file
+ community.general.iptables_state:
+ ip_version: ipv6
+ table: filter
+ state: saved
+ path: /etc/iptables/rules.v6
+
+# This will load a state from a file, with a rollback in case of access loss
+- name: restore firewall state from a file
+ community.general.iptables_state:
+ state: restored
+ path: /run/iptables.apply
+ async: "{{ ansible_timeout }}"
+ poll: 0
+
+# This will load new rules by appending them to the current ones
+- name: restore firewall state from a file
+ community.general.iptables_state:
+ state: restored
+ path: /run/iptables.apply
+ noflush: true
+ async: "{{ ansible_timeout }}"
+ poll: 0
+
+# This will only retrieve information
+- name: get current state of the firewall
+ community.general.iptables_state:
+ state: saved
+ path: /tmp/iptables
+ check_mode: yes
+ changed_when: false
+ register: iptables_state
+
+- name: show current state of the firewall
+ ansible.builtin.debug:
+ var: iptables_state.initial_state
+'''
+
+RETURN = r'''
+applied:
+ description: Whether or not the wanted state has been successfully restored.
+ type: bool
+ returned: always
+ sample: true
+initial_state:
+ description: The current state of the firewall when module starts.
+ type: list
+ elements: str
+ returned: always
+ sample: [
+ "# Generated by xtables-save v1.8.2",
+ "*filter",
+ ":INPUT ACCEPT [0:0]",
+ ":FORWARD ACCEPT [0:0]",
+ ":OUTPUT ACCEPT [0:0]",
+ "COMMIT",
+ "# Completed"
+ ]
+restored:
+ description: The state the module restored, whenever it is finally applied or not.
+ type: list
+ elements: str
+ returned: always
+ sample: [
+ "# Generated by xtables-save v1.8.2",
+ "*filter",
+ ":INPUT DROP [0:0]",
+ ":FORWARD DROP [0:0]",
+ ":OUTPUT ACCEPT [0:0]",
+ "-A INPUT -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT",
+ "-A INPUT -m conntrack --ctstate INVALID -j DROP",
+ "-A INPUT -i lo -j ACCEPT",
+ "-A INPUT -p icmp -j ACCEPT",
+ "-A INPUT -p tcp -m tcp --dport 22 -j ACCEPT",
+ "COMMIT",
+ "# Completed"
+ ]
+saved:
+ description: The iptables state the module saved.
+ type: list
+ elements: str
+ returned: always
+ sample: [
+ "# Generated by xtables-save v1.8.2",
+ "*filter",
+ ":INPUT ACCEPT [0:0]",
+ ":FORWARD DROP [0:0]",
+ ":OUTPUT ACCEPT [0:0]",
+ "COMMIT",
+ "# Completed"
+ ]
+tables:
+ description: The iptables we have interest for when module starts.
+ type: dict
+ contains:
+ table:
+ description: Policies and rules for all chains of the named table.
+ type: list
+ elements: str
+ sample: |-
+ {
+ "filter": [
+ ":INPUT ACCEPT",
+ ":FORWARD ACCEPT",
+ ":OUTPUT ACCEPT",
+ "-A INPUT -i lo -j ACCEPT",
+ "-A INPUT -p icmp -j ACCEPT",
+ "-A INPUT -p tcp -m tcp --dport 22 -j ACCEPT",
+ "-A INPUT -j REJECT --reject-with icmp-host-prohibited"
+ ],
+ "nat": [
+ ":PREROUTING ACCEPT",
+ ":INPUT ACCEPT",
+ ":OUTPUT ACCEPT",
+ ":POSTROUTING ACCEPT"
+ ]
+ }
+ returned: always
+'''
+
+
+import re
+import os
+import time
+import tempfile
+import filecmp
+import shutil
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes, to_native, to_text
+
+
+IPTABLES = dict(
+ ipv4='iptables',
+ ipv6='ip6tables',
+)
+
+SAVE = dict(
+ ipv4='iptables-save',
+ ipv6='ip6tables-save',
+)
+
+RESTORE = dict(
+ ipv4='iptables-restore',
+ ipv6='ip6tables-restore',
+)
+
+TABLES = ['filter', 'mangle', 'nat', 'raw', 'security']
+
+
+def read_state(b_path):
+ '''
+ Read a file and store its content in a variable as a list.
+ '''
+ with open(b_path, 'r') as f:
+ text = f.read()
+ lines = text.splitlines()
+ while '' in lines:
+ lines.remove('')
+ return (lines)
+
+
+def write_state(b_path, lines, changed):
+ '''
+ Write given contents to the given path, and return changed status.
+ '''
+ # Populate a temporary file
+ tmpfd, tmpfile = tempfile.mkstemp()
+ with os.fdopen(tmpfd, 'w') as f:
+ for line in lines:
+ f.write('%s\n' % line)
+
+ # Prepare to copy temporary file to the final destination
+ if not os.path.exists(b_path):
+ b_destdir = os.path.dirname(b_path)
+ destdir = to_native(b_destdir, errors='surrogate_or_strict')
+ if b_destdir and not os.path.exists(b_destdir) and not module.check_mode:
+ try:
+ os.makedirs(b_destdir)
+ except Exception as e:
+ module.fail_json(
+ msg='Error creating %s. Error code: %s. Error description: %s' % (destdir, e[0], e[1]),
+ initial_state=lines)
+ changed = True
+
+ elif not filecmp.cmp(tmpfile, b_path):
+ changed = True
+
+ # Do it
+ if changed and not module.check_mode:
+ try:
+ shutil.copyfile(tmpfile, b_path)
+ except Exception as e:
+ path = to_native(b_path, errors='surrogate_or_strict')
+ module.fail_json(
+ msg='Error saving state into %s. Error code: %s. Error description: %s' % (path, e[0], e[1]),
+ initial_state=lines)
+
+ return changed
+
+
+def initialize_from_null_state(initializer, initcommand, table):
+ '''
+ This ensures iptables-state output is suitable for iptables-restore to roll
+ back to it, i.e. iptables-save output is not empty. This also works for the
+ iptables-nft-save alternative.
+ '''
+ if table is None:
+ table = 'filter'
+
+ tmpfd, tmpfile = tempfile.mkstemp()
+ with os.fdopen(tmpfd, 'w') as f:
+ f.write('*%s\nCOMMIT\n' % table)
+
+ initializer.append(tmpfile)
+ (rc, out, err) = module.run_command(initializer, check_rc=True)
+ (rc, out, err) = module.run_command(initcommand, check_rc=True)
+ return (rc, out, err)
+
+
+def filter_and_format_state(string):
+ '''
+ Remove timestamps to ensure idempotence between runs. Also remove counters
+ by default. And return the result as a list.
+ '''
+ string = re.sub('((^|\n)# (Generated|Completed)[^\n]*) on [^\n]*', '\\1', string)
+ if not module.params['counters']:
+ string = re.sub('[[][0-9]+:[0-9]+[]]', '[0:0]', string)
+ lines = string.splitlines()
+ while '' in lines:
+ lines.remove('')
+ return (lines)
+
+
+def per_table_state(command, state):
+ '''
+ Convert raw iptables-save output into usable datastructure, for reliable
+ comparisons between initial and final states.
+ '''
+ tables = dict()
+ for t in TABLES:
+ COMMAND = list(command)
+ if '*%s' % t in state.splitlines():
+ COMMAND.extend(['--table', t])
+ (rc, out, err) = module.run_command(COMMAND, check_rc=True)
+ out = re.sub('(^|\n)(# Generated|# Completed|[*]%s|COMMIT)[^\n]*' % t, '', out)
+ out = re.sub(' *[[][0-9]+:[0-9]+[]] *', '', out)
+ table = out.splitlines()
+ while '' in table:
+ table.remove('')
+ tables[t] = table
+ return (tables)
+
+
+def main():
+
+ global module
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path', required=True),
+ state=dict(type='str', choices=['saved', 'restored'], required=True),
+ table=dict(type='str', choices=['filter', 'nat', 'mangle', 'raw', 'security']),
+ noflush=dict(type='bool', default=False),
+ counters=dict(type='bool', default=False),
+ modprobe=dict(type='path'),
+ ip_version=dict(type='str', choices=['ipv4', 'ipv6'], default='ipv4'),
+ wait=dict(type='int'),
+ _timeout=dict(type='int'),
+ _back=dict(type='path'),
+ ),
+ required_together=[
+ ['_timeout', '_back'],
+ ],
+ supports_check_mode=True,
+ )
+
+ # We'll parse iptables-restore stderr
+ module.run_command_environ_update = dict(LANG='C', LC_MESSAGES='C')
+
+ path = module.params['path']
+ state = module.params['state']
+ table = module.params['table']
+ noflush = module.params['noflush']
+ counters = module.params['counters']
+ modprobe = module.params['modprobe']
+ ip_version = module.params['ip_version']
+ wait = module.params['wait']
+ _timeout = module.params['_timeout']
+ _back = module.params['_back']
+
+ bin_iptables = module.get_bin_path(IPTABLES[ip_version], True)
+ bin_iptables_save = module.get_bin_path(SAVE[ip_version], True)
+ bin_iptables_restore = module.get_bin_path(RESTORE[ip_version], True)
+
+ os.umask(0o077)
+ changed = False
+ COMMANDARGS = []
+ INITCOMMAND = [bin_iptables_save]
+ INITIALIZER = [bin_iptables_restore]
+ TESTCOMMAND = [bin_iptables_restore, '--test']
+
+ if counters:
+ COMMANDARGS.append('--counters')
+
+ if table is not None:
+ COMMANDARGS.extend(['--table', table])
+
+ if wait is not None:
+ TESTCOMMAND.extend(['--wait', '%s' % wait])
+
+ if modprobe is not None:
+ b_modprobe = to_bytes(modprobe, errors='surrogate_or_strict')
+ if not os.path.exists(b_modprobe):
+ module.fail_json(msg="modprobe %s not found" % modprobe)
+ if not os.path.isfile(b_modprobe):
+ module.fail_json(msg="modprobe %s not a file" % modprobe)
+ if not os.access(b_modprobe, os.R_OK):
+ module.fail_json(msg="modprobe %s not readable" % modprobe)
+ if not os.access(b_modprobe, os.X_OK):
+ module.fail_json(msg="modprobe %s not executable" % modprobe)
+ COMMANDARGS.extend(['--modprobe', modprobe])
+ INITIALIZER.extend(['--modprobe', modprobe])
+ INITCOMMAND.extend(['--modprobe', modprobe])
+ TESTCOMMAND.extend(['--modprobe', modprobe])
+
+ SAVECOMMAND = list(COMMANDARGS)
+ SAVECOMMAND.insert(0, bin_iptables_save)
+
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+
+ if state == 'restored':
+ if not os.path.exists(b_path):
+ module.fail_json(msg="Source %s not found" % path)
+ if not os.path.isfile(b_path):
+ module.fail_json(msg="Source %s not a file" % path)
+ if not os.access(b_path, os.R_OK):
+ module.fail_json(msg="Source %s not readable" % path)
+ state_to_restore = read_state(b_path)
+ else:
+ cmd = ' '.join(SAVECOMMAND)
+
+ (rc, stdout, stderr) = module.run_command(INITCOMMAND, check_rc=True)
+
+ # The issue comes when wanting to restore state from empty iptable-save's
+ # output... what happens when, say:
+ # - no table is specified, and iptables-save's output is only nat table;
+ # - we give filter's ruleset to iptables-restore, that locks ourselve out
+ # of the host;
+ # then trying to roll iptables state back to the previous (working) setup
+ # doesn't override current filter table because no filter table is stored
+ # in the backup ! So we have to ensure tables to be restored have a backup
+ # in case of rollback.
+ if table is None:
+ if state == 'restored':
+ for t in TABLES:
+ if '*%s' % t in state_to_restore:
+ if len(stdout) == 0 or '*%s' % t not in stdout.splitlines():
+ (rc, stdout, stderr) = initialize_from_null_state(INITIALIZER, INITCOMMAND, t)
+ elif len(stdout) == 0:
+ (rc, stdout, stderr) = initialize_from_null_state(INITIALIZER, INITCOMMAND, 'filter')
+
+ elif state == 'restored' and '*%s' % table not in state_to_restore:
+ module.fail_json(msg="Table %s to restore not defined in %s" % (table, path))
+
+ elif len(stdout) == 0 or '*%s' % table not in stdout.splitlines():
+ (rc, stdout, stderr) = initialize_from_null_state(INITIALIZER, INITCOMMAND, table)
+
+ initial_state = filter_and_format_state(stdout)
+ if initial_state is None:
+ module.fail_json(msg="Unable to initialize firewall from NULL state.")
+
+ # Depending on the value of 'table', initref_state may differ from
+ # initial_state.
+ (rc, stdout, stderr) = module.run_command(SAVECOMMAND, check_rc=True)
+ tables_before = per_table_state(SAVECOMMAND, stdout)
+ initref_state = filter_and_format_state(stdout)
+
+ if state == 'saved':
+ changed = write_state(b_path, initref_state, changed)
+ module.exit_json(
+ changed=changed,
+ cmd=cmd,
+ tables=tables_before,
+ initial_state=initial_state,
+ saved=initref_state)
+
+ #
+ # All remaining code is for state=restored
+ #
+
+ MAINCOMMAND = list(COMMANDARGS)
+ MAINCOMMAND.insert(0, bin_iptables_restore)
+
+ if wait is not None:
+ MAINCOMMAND.extend(['--wait', '%s' % wait])
+
+ if _back is not None:
+ b_back = to_bytes(_back, errors='surrogate_or_strict')
+ garbage = write_state(b_back, initref_state, changed)
+ BACKCOMMAND = list(MAINCOMMAND)
+ BACKCOMMAND.append(_back)
+
+ if noflush:
+ MAINCOMMAND.append('--noflush')
+
+ MAINCOMMAND.append(path)
+ cmd = ' '.join(MAINCOMMAND)
+
+ TESTCOMMAND = list(MAINCOMMAND)
+ TESTCOMMAND.insert(1, '--test')
+ error_msg = "Source %s is not suitable for input to %s" % (path, os.path.basename(bin_iptables_restore))
+
+ # Due to a bug in iptables-nft-restore --test, we have to validate tables
+ # one by one (https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=960003).
+ for t in tables_before:
+ testcommand = list(TESTCOMMAND)
+ testcommand.extend(['--table', t])
+ (rc, stdout, stderr) = module.run_command(testcommand)
+
+ if 'Another app is currently holding the xtables lock' in stderr:
+ error_msg = stderr
+
+ if rc != 0:
+ cmd = ' '.join(testcommand)
+ module.fail_json(
+ msg=error_msg,
+ cmd=cmd,
+ rc=rc,
+ stdout=stdout,
+ stderr=stderr,
+ tables=tables_before,
+ initial_state=initial_state,
+ restored=state_to_restore,
+ applied=False)
+
+ if module.check_mode:
+ tmpfd, tmpfile = tempfile.mkstemp()
+ with os.fdopen(tmpfd, 'w') as f:
+ for line in initial_state:
+ f.write('%s\n' % line)
+
+ if filecmp.cmp(tmpfile, b_path):
+ restored_state = initial_state
+ else:
+ restored_state = state_to_restore
+
+ else:
+ # Let time enough to the plugin to retrieve async status of the module
+ # in case of bad option type/value and the like.
+ if _back is not None:
+ b_starter = to_bytes('%s.starter' % _back, errors='surrogate_or_strict')
+ while True:
+ if os.path.exists(b_starter):
+ os.remove(b_starter)
+ break
+ else:
+ time.sleep(0.01)
+ continue
+
+ (rc, stdout, stderr) = module.run_command(MAINCOMMAND)
+ if 'Another app is currently holding the xtables lock' in stderr:
+ module.fail_json(
+ msg=stderr,
+ cmd=cmd,
+ rc=rc,
+ stdout=stdout,
+ stderr=stderr,
+ tables=tables_before,
+ initial_state=initial_state,
+ restored=state_to_restore,
+ applied=False)
+
+ (rc, stdout, stderr) = module.run_command(SAVECOMMAND, check_rc=True)
+ restored_state = filter_and_format_state(stdout)
+
+ if restored_state != initref_state and restored_state != initial_state:
+ if module.check_mode:
+ changed = True
+ else:
+ tables_after = per_table_state(SAVECOMMAND, stdout)
+ if tables_after != tables_before:
+ changed = True
+
+ if _back is None or module.check_mode:
+ module.exit_json(
+ changed=changed,
+ cmd=cmd,
+ tables=tables_before,
+ initial_state=initial_state,
+ restored=restored_state,
+ applied=True)
+
+ # The rollback implementation currently needs:
+ # Here:
+ # * test existence of the backup file, exit with success if it doesn't exist
+ # * otherwise, restore iptables from this file and return failure
+ # Action plugin:
+ # * try to remove the backup file
+ # * wait async task is finished and retrieve its final status
+ # * modify it and return the result
+ # Task:
+ # * task attribute 'async' set to the same value (or lower) than ansible
+ # timeout
+ # * task attribute 'poll' equals 0
+ #
+ for x in range(_timeout):
+ if os.path.exists(b_back):
+ time.sleep(1)
+ continue
+ module.exit_json(
+ changed=changed,
+ cmd=cmd,
+ tables=tables_before,
+ initial_state=initial_state,
+ restored=restored_state,
+ applied=True)
+
+ # Here we are: for whatever reason, but probably due to the current ruleset,
+ # the action plugin (i.e. on the controller) was unable to remove the backup
+ # cookie, so we restore initial state from it.
+ (rc, stdout, stderr) = module.run_command(BACKCOMMAND, check_rc=True)
+ os.remove(b_back)
+
+ (rc, stdout, stderr) = module.run_command(SAVECOMMAND, check_rc=True)
+ tables_rollback = per_table_state(SAVECOMMAND, stdout)
+
+ msg = (
+ "Failed to confirm state restored from %s after %ss. "
+ "Firewall has been rolled back to its initial state." % (path, _timeout)
+ )
+
+ module.fail_json(
+ changed=(tables_before != tables_rollback),
+ msg=msg,
+ cmd=cmd,
+ tables=tables_before,
+ initial_state=initial_state,
+ restored=restored_state,
+ applied=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipwcli_dns.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipwcli_dns.py
new file mode 100644
index 00000000..355c7034
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ipwcli_dns.py
@@ -0,0 +1,342 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2020, Christian Wollinger <cwollinger@web.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ipwcli_dns
+
+short_description: Manage DNS Records for Ericsson IPWorks via ipwcli
+
+version_added: '0.2.0'
+
+description:
+ - "Manage DNS records for the Ericsson IPWorks DNS server. The module will use the ipwcli to deploy the DNS records."
+
+requirements:
+ - ipwcli (installed on Ericsson IPWorks)
+
+notes:
+ - To make the DNS record changes effective, you need to run C(update dnsserver) on the ipwcli.
+
+options:
+ dnsname:
+ description:
+ - Name of the record.
+ required: true
+ type: str
+ type:
+ description:
+ - Type of the record.
+ required: true
+ type: str
+ choices: [ NAPTR, SRV, A, AAAA ]
+ container:
+ description:
+ - Sets the container zone for the record.
+ required: true
+ type: str
+ address:
+ description:
+ - The IP address for the A or AAAA record.
+ - Required for C(type=A) or C(type=AAAA)
+ type: str
+ ttl:
+ description:
+ - Sets the TTL of the record.
+ type: int
+ default: 3600
+ state:
+ description:
+ - Whether the record should exist or not.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ priority:
+ description:
+ - Sets the priority of the SRV record.
+ type: int
+ default: 10
+ weight:
+ description:
+ - Sets the weight of the SRV record.
+ type: int
+ default: 10
+ port:
+ description:
+ - Sets the port of the SRV record.
+ - Required for C(type=SRV)
+ type: int
+ target:
+ description:
+ - Sets the target of the SRV record.
+ - Required for C(type=SRV)
+ type: str
+ order:
+ description:
+ - Sets the order of the NAPTR record.
+ - Required for C(type=NAPTR)
+ type: int
+ preference:
+ description:
+ - Sets the preference of the NAPTR record.
+ - Required for C(type=NAPTR)
+ type: int
+ flags:
+ description:
+ - Sets one of the possible flags of NAPTR record.
+ - Required for C(type=NAPTR)
+ type: str
+ choices: ['S', 'A', 'U', 'P']
+ service:
+ description:
+ - Sets the service of the NAPTR record.
+ - Required for C(type=NAPTR)
+ type: str
+ replacement:
+ description:
+ - Sets the replacement of the NAPTR record.
+ - Required for C(type=NAPTR)
+ type: str
+ username:
+ description:
+ - Username to login on ipwcli.
+ type: str
+ required: true
+ password:
+ description:
+ - Password to login on ipwcli.
+ type: str
+ required: true
+
+author:
+ - Christian Wollinger (@cwollinger)
+'''
+
+EXAMPLES = '''
+- name: Create A record
+ community.general.ipwcli_dns:
+ dnsname: example.com
+ type: A
+ container: ZoneOne
+ address: 127.0.0.1
+
+- name: Remove SRV record if exists
+ community.general.ipwcli_dns:
+ dnsname: _sip._tcp.test.example.com
+ type: SRV
+ container: ZoneOne
+ ttl: 100
+ state: absent
+ target: example.com
+ port: 5060
+
+- name: Create NAPTR record
+ community.general.ipwcli_dns:
+ dnsname: test.example.com
+ type: NAPTR
+ preference: 10
+ container: ZoneOne
+ ttl: 100
+ order: 10
+ service: 'SIP+D2T'
+ replacement: '_sip._tcp.test.example.com.'
+ flags: S
+'''
+
+RETURN = '''
+record:
+ description: The created record from the input params
+ type: str
+ returned: always
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+import os
+
+
+class ResourceRecord(object):
+
+ def __init__(self, module):
+ self.module = module
+ self.dnsname = module.params['dnsname']
+ self.dnstype = module.params['type']
+ self.container = module.params['container']
+ self.address = module.params['address']
+ self.ttl = module.params['ttl']
+ self.state = module.params['state']
+ self.priority = module.params['priority']
+ self.weight = module.params['weight']
+ self.port = module.params['port']
+ self.target = module.params['target']
+ self.order = module.params['order']
+ self.preference = module.params['preference']
+ self.flags = module.params['flags']
+ self.service = module.params['service']
+ self.replacement = module.params['replacement']
+ self.user = module.params['username']
+ self.password = module.params['password']
+
+ def create_naptrrecord(self):
+ # create NAPTR record with the given params
+ record = ('naptrrecord %s -set ttl=%s;container=%s;order=%s;preference=%s;flags="%s";service="%s";replacement="%s"'
+ % (self.dnsname, self.ttl, self.container, self.order, self.preference, self.flags, self.service, self.replacement))
+ return record
+
+ def create_srvrecord(self):
+ # create SRV record with the given params
+ record = ('srvrecord %s -set ttl=%s;container=%s;priority=%s;weight=%s;port=%s;target=%s'
+ % (self.dnsname, self.ttl, self.container, self.priority, self.weight, self.port, self.target))
+ return record
+
+ def create_arecord(self):
+ # create A record with the given params
+ if self.dnstype == 'AAAA':
+ record = 'aaaarecord %s %s -set ttl=%s;container=%s' % (self.dnsname, self.address, self.ttl, self.container)
+ else:
+ record = 'arecord %s %s -set ttl=%s;container=%s' % (self.dnsname, self.address, self.ttl, self.container)
+
+ return record
+
+ def list_record(self, record):
+ # check if the record exists via list on ipwcli
+ search = 'list %s' % (record.replace(';', '&&').replace('set', 'where'))
+ cmd = [self.module.get_bin_path('ipwcli', True)]
+ cmd.append('-user=%s' % (self.user))
+ cmd.append('-password=%s' % (self.password))
+ rc, out, err = self.module.run_command(cmd, data=search)
+
+ if 'Invalid username or password' in out:
+ self.module.fail_json(msg='access denied at ipwcli login: Invalid username or password')
+
+ if (('ARecord %s' % self.dnsname in out and rc == 0) or ('SRVRecord %s' % self.dnsname in out and rc == 0) or
+ ('NAPTRRecord %s' % self.dnsname in out and rc == 0)):
+ return True, rc, out, err
+
+ return False, rc, out, err
+
+ def deploy_record(self, record):
+ # check what happens if create fails on ipworks
+ stdin = 'create %s' % (record)
+ cmd = [self.module.get_bin_path('ipwcli', True)]
+ cmd.append('-user=%s' % (self.user))
+ cmd.append('-password=%s' % (self.password))
+ rc, out, err = self.module.run_command(cmd, data=stdin)
+
+ if 'Invalid username or password' in out:
+ self.module.fail_json(msg='access denied at ipwcli login: Invalid username or password')
+
+ if '1 object(s) created.' in out:
+ return rc, out, err
+ else:
+ self.module.fail_json(msg='record creation failed', stderr=out)
+
+ def delete_record(self, record):
+ # check what happens if create fails on ipworks
+ stdin = 'delete %s' % (record.replace(';', '&&').replace('set', 'where'))
+ cmd = [self.module.get_bin_path('ipwcli', True)]
+ cmd.append('-user=%s' % (self.user))
+ cmd.append('-password=%s' % (self.password))
+ rc, out, err = self.module.run_command(cmd, data=stdin)
+
+ if 'Invalid username or password' in out:
+ self.module.fail_json(msg='access denied at ipwcli login: Invalid username or password')
+
+ if '1 object(s) were updated.' in out:
+ return rc, out, err
+ else:
+ self.module.fail_json(msg='record deletion failed', stderr=out)
+
+
+def run_module():
+ # define available arguments/parameters a user can pass to the module
+ module_args = dict(
+ dnsname=dict(type='str', required=True),
+ type=dict(type='str', required=True, choices=['A', 'AAAA', 'SRV', 'NAPTR']),
+ container=dict(type='str', required=True),
+ address=dict(type='str', required=False),
+ ttl=dict(type='int', required=False, default=3600),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ priority=dict(type='int', required=False, default=10),
+ weight=dict(type='int', required=False, default=10),
+ port=dict(type='int', required=False),
+ target=dict(type='str', required=False),
+ order=dict(type='int', required=False),
+ preference=dict(type='int', required=False),
+ flags=dict(type='str', required=False, choices=['S', 'A', 'U', 'P']),
+ service=dict(type='str', required=False),
+ replacement=dict(type='str', required=False),
+ username=dict(type='str', required=True),
+ password=dict(type='str', required=True, no_log=True)
+ )
+
+ # define result
+ result = dict(
+ changed=False,
+ stdout='',
+ stderr='',
+ rc=0,
+ record=''
+ )
+
+ # supports check mode
+ module = AnsibleModule(
+ argument_spec=module_args,
+ required_if=[
+ ['type', 'A', ['address']],
+ ['type', 'AAAA', ['address']],
+ ['type', 'SRV', ['port', 'target']],
+ ['type', 'NAPTR', ['preference', 'order', 'service', 'replacement']],
+ ],
+ supports_check_mode=True
+ )
+
+ user = ResourceRecord(module)
+
+ if user.dnstype == 'NAPTR':
+ record = user.create_naptrrecord()
+ elif user.dnstype == 'SRV':
+ record = user.create_srvrecord()
+ elif user.dnstype == 'A' or user.dnstype == 'AAAA':
+ record = user.create_arecord()
+
+ found, rc, out, err = user.list_record(record)
+
+ if found and user.state == 'absent':
+ if module.check_mode:
+ module.exit_json(changed=True)
+ rc, out, err = user.delete_record(record)
+ result['changed'] = True
+ result['record'] = record
+ result['rc'] = rc
+ result['stdout'] = out
+ result['stderr'] = err
+ elif not found and user.state == 'present':
+ if module.check_mode:
+ module.exit_json(changed=True)
+ rc, out, err = user.deploy_record(record)
+ result['changed'] = True
+ result['record'] = record
+ result['rc'] = rc
+ result['stdout'] = out
+ result['stderr'] = err
+ else:
+ result['changed'] = False
+ result['record'] = record
+ result['rc'] = rc
+ result['stdout'] = out
+ result['stderr'] = err
+
+ module.exit_json(**result)
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/irc.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/irc.py
new file mode 100644
index 00000000..1c050fc1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/irc.py
@@ -0,0 +1,303 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Jan-Piet Mens <jpmens () gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: irc
+short_description: Send a message to an IRC channel or a nick
+description:
+ - Send a message to an IRC channel or a nick. This is a very simplistic implementation.
+options:
+ server:
+ type: str
+ description:
+ - IRC server name/address
+ default: localhost
+ port:
+ type: int
+ description:
+ - IRC server port number
+ default: 6667
+ nick:
+ type: str
+ description:
+ - Nickname to send the message from. May be shortened, depending on server's NICKLEN setting.
+ default: ansible
+ msg:
+ type: str
+ description:
+ - The message body.
+ required: true
+ topic:
+ type: str
+ description:
+ - Set the channel topic
+ color:
+ type: str
+ description:
+ - Text color for the message. ("none" is a valid option in 1.6 or later, in 1.6 and prior, the default color is black, not "none").
+ Added 11 more colors in version 2.0.
+ default: "none"
+ choices: [ "none", "white", "black", "blue", "green", "red", "brown", "purple", "orange", "yellow", "light_green", "teal", "light_cyan",
+ "light_blue", "pink", "gray", "light_gray"]
+ aliases: [colour]
+ channel:
+ type: str
+ description:
+ - Channel name. One of nick_to or channel needs to be set. When both are set, the message will be sent to both of them.
+ nick_to:
+ type: list
+ elements: str
+ description:
+ - A list of nicknames to send the message to. One of nick_to or channel needs to be set. When both are defined, the message will be sent to both of them.
+ key:
+ type: str
+ description:
+ - Channel key
+ passwd:
+ type: str
+ description:
+ - Server password
+ timeout:
+ type: int
+ description:
+ - Timeout to use while waiting for successful registration and join
+ messages, this is to prevent an endless loop
+ default: 30
+ use_ssl:
+ description:
+ - Designates whether TLS/SSL should be used when connecting to the IRC server
+ type: bool
+ default: 'no'
+ part:
+ description:
+ - Designates whether user should part from channel after sending message or not.
+ Useful for when using a faux bot and not wanting join/parts between messages.
+ type: bool
+ default: 'yes'
+ style:
+ type: str
+ description:
+ - Text style for the message. Note italic does not work on some clients
+ choices: [ "bold", "underline", "reverse", "italic", "none" ]
+ default: none
+
+# informational: requirements for nodes
+requirements: [ socket ]
+author:
+ - "Jan-Piet Mens (@jpmens)"
+ - "Matt Martz (@sivel)"
+'''
+
+EXAMPLES = '''
+- name: Send a message to an IRC channel from nick ansible
+ community.general.irc:
+ server: irc.example.net
+ channel: #t1
+ msg: Hello world
+
+- name: Send a message to an IRC channel
+ local_action:
+ module: irc
+ port: 6669
+ server: irc.example.net
+ channel: #t1
+ msg: 'All finished at {{ ansible_date_time.iso8601 }}'
+ color: red
+ nick: ansibleIRC
+
+- name: Send a message to an IRC channel
+ local_action:
+ module: irc
+ port: 6669
+ server: irc.example.net
+ channel: #t1
+ nick_to:
+ - nick1
+ - nick2
+ msg: 'All finished at {{ ansible_date_time.iso8601 }}'
+ color: red
+ nick: ansibleIRC
+'''
+
+# ===========================================
+# IRC module support methods.
+#
+
+import re
+import socket
+import ssl
+import time
+import traceback
+
+from ansible.module_utils._text import to_native, to_bytes
+from ansible.module_utils.basic import AnsibleModule
+
+
+def send_msg(msg, server='localhost', port='6667', channel=None, nick_to=None, key=None, topic=None,
+ nick="ansible", color='none', passwd=False, timeout=30, use_ssl=False, part=True, style=None):
+ '''send message to IRC'''
+ nick_to = [] if nick_to is None else nick_to
+
+ colornumbers = {
+ 'white': "00",
+ 'black': "01",
+ 'blue': "02",
+ 'green': "03",
+ 'red': "04",
+ 'brown': "05",
+ 'purple': "06",
+ 'orange': "07",
+ 'yellow': "08",
+ 'light_green': "09",
+ 'teal': "10",
+ 'light_cyan': "11",
+ 'light_blue': "12",
+ 'pink': "13",
+ 'gray': "14",
+ 'light_gray': "15",
+ }
+
+ stylechoices = {
+ 'bold': "\x02",
+ 'underline': "\x1F",
+ 'reverse': "\x16",
+ 'italic': "\x1D",
+ }
+
+ try:
+ styletext = stylechoices[style]
+ except Exception:
+ styletext = ""
+
+ try:
+ colornumber = colornumbers[color]
+ colortext = "\x03" + colornumber
+ except Exception:
+ colortext = ""
+
+ message = styletext + colortext + msg
+
+ irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ if use_ssl:
+ irc = ssl.wrap_socket(irc)
+ irc.connect((server, int(port)))
+
+ if passwd:
+ irc.send(to_bytes('PASS %s\r\n' % passwd))
+ irc.send(to_bytes('NICK %s\r\n' % nick))
+ irc.send(to_bytes('USER %s %s %s :ansible IRC\r\n' % (nick, nick, nick)))
+ motd = ''
+ start = time.time()
+ while 1:
+ motd += to_native(irc.recv(1024))
+ # The server might send back a shorter nick than we specified (due to NICKLEN),
+ # so grab that and use it from now on (assuming we find the 00[1-4] response).
+ match = re.search(r'^:\S+ 00[1-4] (?P<nick>\S+) :', motd, flags=re.M)
+ if match:
+ nick = match.group('nick')
+ break
+ elif time.time() - start > timeout:
+ raise Exception('Timeout waiting for IRC server welcome response')
+ time.sleep(0.5)
+
+ if channel:
+ if key:
+ irc.send(to_bytes('JOIN %s %s\r\n' % (channel, key)))
+ else:
+ irc.send(to_bytes('JOIN %s\r\n' % channel))
+
+ join = ''
+ start = time.time()
+ while 1:
+ join += to_native(irc.recv(1024))
+ if re.search(r'^:\S+ 366 %s %s :' % (nick, channel), join, flags=re.M | re.I):
+ break
+ elif time.time() - start > timeout:
+ raise Exception('Timeout waiting for IRC JOIN response')
+ time.sleep(0.5)
+
+ if topic is not None:
+ irc.send(to_bytes('TOPIC %s :%s\r\n' % (channel, topic)))
+ time.sleep(1)
+
+ if nick_to:
+ for nick in nick_to:
+ irc.send(to_bytes('PRIVMSG %s :%s\r\n' % (nick, message)))
+ if channel:
+ irc.send(to_bytes('PRIVMSG %s :%s\r\n' % (channel, message)))
+ time.sleep(1)
+ if part:
+ if channel:
+ irc.send(to_bytes('PART %s\r\n' % channel))
+ irc.send(to_bytes('QUIT\r\n'))
+ time.sleep(1)
+ irc.close()
+
+# ===========================================
+# Main
+#
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ server=dict(default='localhost'),
+ port=dict(type='int', default=6667),
+ nick=dict(default='ansible'),
+ nick_to=dict(required=False, type='list', elements='str'),
+ msg=dict(required=True),
+ color=dict(default="none", aliases=['colour'], choices=["white", "black", "blue",
+ "green", "red", "brown",
+ "purple", "orange", "yellow",
+ "light_green", "teal", "light_cyan",
+ "light_blue", "pink", "gray",
+ "light_gray", "none"]),
+ style=dict(default="none", choices=["underline", "reverse", "bold", "italic", "none"]),
+ channel=dict(required=False),
+ key=dict(no_log=True),
+ topic=dict(),
+ passwd=dict(no_log=True),
+ timeout=dict(type='int', default=30),
+ part=dict(type='bool', default=True),
+ use_ssl=dict(type='bool', default=False)
+ ),
+ supports_check_mode=True,
+ required_one_of=[['channel', 'nick_to']]
+ )
+
+ server = module.params["server"]
+ port = module.params["port"]
+ nick = module.params["nick"]
+ nick_to = module.params["nick_to"]
+ msg = module.params["msg"]
+ color = module.params["color"]
+ channel = module.params["channel"]
+ topic = module.params["topic"]
+ if topic and not channel:
+ module.fail_json(msg="When topic is specified, a channel is required.")
+ key = module.params["key"]
+ passwd = module.params["passwd"]
+ timeout = module.params["timeout"]
+ use_ssl = module.params["use_ssl"]
+ part = module.params["part"]
+ style = module.params["style"]
+
+ try:
+ send_msg(msg, server, port, channel, nick_to, key, topic, nick, color, passwd, timeout, use_ssl, part, style)
+ except Exception as e:
+ module.fail_json(msg="unable to send to IRC: %s" % to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=False, channel=channel, nick=nick,
+ msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/iso_create.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/iso_create.py
new file mode 100644
index 00000000..bf6359b1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/iso_create.py
@@ -0,0 +1,295 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Ansible Project
+# Copyright: (c) 2020, VMware, Inc. All Rights Reserved.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: iso_create
+short_description: Generate ISO file with specified files or folders
+description:
+ - This module is used to generate ISO file with specified path of files.
+author:
+ - Diane Wang (@Tomorrow9) <dianew@vmware.com>
+requirements:
+- "pycdlib"
+- "python >= 2.7"
+version_added: '0.2.0'
+
+options:
+ src_files:
+ description:
+ - This is a list of absolute paths of source files or folders which will be contained in the new generated ISO file.
+ - Will fail if specified file or folder in C(src_files) does not exist on local machine.
+ - 'Note: With all ISO9660 levels from 1 to 3, all file names are restricted to uppercase letters, numbers and
+ underscores (_). File names are limited to 31 characters, directory nesting is limited to 8 levels, and path
+ names are limited to 255 characters.'
+ type: list
+ required: yes
+ elements: path
+ dest_iso:
+ description:
+ - The absolute path with file name of the new generated ISO file on local machine.
+ - Will create intermediate folders when they does not exist.
+ type: path
+ required: yes
+ interchange_level:
+ description:
+ - The ISO9660 interchange level to use, it dictates the rules on the names of files.
+ - Levels and valid values C(1), C(2), C(3), C(4) are supported.
+ - The default value is level C(1), which is the most conservative, level C(3) is recommended.
+ - ISO9660 file names at interchange level C(1) cannot have more than 8 characters or 3 characters in the extension.
+ type: int
+ default: 1
+ choices: [1, 2, 3, 4]
+ vol_ident:
+ description:
+ - The volume identification string to use on the new generated ISO image.
+ type: str
+ rock_ridge:
+ description:
+ - Whether to make this ISO have the Rock Ridge extensions or not.
+ - Valid values are C(1.09), C(1.10) or C(1.12), means adding the specified Rock Ridge version to the ISO.
+ - If unsure, set C(1.09) to ensure maximum compatibility.
+ - If not specified, then not add Rock Ridge extension to the ISO.
+ type: str
+ choices: ['1.09', '1.10', '1.12']
+ joliet:
+ description:
+ - Support levels and valid values are C(1), C(2), or C(3).
+ - Level C(3) is by far the most common.
+ - If not specified, then no Joliet support is added.
+ type: int
+ choices: [1, 2, 3]
+ udf:
+ description:
+ - Whether to add UDF support to this ISO.
+ - If set to C(True), then version 2.60 of the UDF spec is used.
+ - If not specified or set to C(False), then no UDF support is added.
+ type: bool
+ default: False
+'''
+
+EXAMPLES = r'''
+- name: Create an ISO file
+ community.general.iso_create:
+ src_files:
+ - /root/testfile.yml
+ - /root/testfolder
+ dest_iso: /tmp/test.iso
+ interchange_level: 3
+
+- name: Create an ISO file with Rock Ridge extension
+ community.general.iso_create:
+ src_files:
+ - /root/testfile.yml
+ - /root/testfolder
+ dest_iso: /tmp/test.iso
+ rock_ridge: 1.09
+
+- name: Create an ISO file with Joliet support
+ community.general.iso_create:
+ src_files:
+ - ./windows_config/Autounattend.xml
+ dest_iso: ./test.iso
+ interchange_level: 3
+ joliet: 3
+ vol_ident: WIN_AUTOINSTALL
+'''
+
+RETURN = r'''
+source_file:
+ description: Configured source files or directories list.
+ returned: on success
+ type: list
+ elements: path
+ sample: ["/path/to/file.txt", "/path/to/folder"]
+created_iso:
+ description: Created iso file path.
+ returned: on success
+ type: str
+ sample: "/path/to/test.iso"
+interchange_level:
+ description: Configured interchange level.
+ returned: on success
+ type: int
+ sample: 3
+vol_ident:
+ description: Configured volume identification string.
+ returned: on success
+ type: str
+ sample: "OEMDRV"
+joliet:
+ description: Configured Joliet support level.
+ returned: on success
+ type: int
+ sample: 3
+rock_ridge:
+ description: Configured Rock Ridge version.
+ returned: on success
+ type: str
+ sample: "1.09"
+udf:
+ description: Configured UDF support.
+ returned: on success
+ type: bool
+ sample: False
+'''
+
+import os
+import traceback
+
+PYCDLIB_IMP_ERR = None
+try:
+ import pycdlib
+ HAS_PYCDLIB = True
+except ImportError:
+ PYCDLIB_IMP_ERR = traceback.format_exc()
+ HAS_PYCDLIB = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def add_file(module, iso_file=None, src_file=None, file_path=None, rock_ridge=None, use_joliet=None, use_udf=None):
+ rr_name = None
+ joliet_path = None
+ udf_path = None
+ # In standard ISO interchange level 1, file names have a maximum of 8 characters, followed by a required dot,
+ # followed by a maximum 3 character extension, followed by a semicolon and a version
+ file_name = os.path.basename(file_path)
+ if '.' not in file_name:
+ file_in_iso_path = file_path.upper() + '.;1'
+ else:
+ file_in_iso_path = file_path.upper() + ';1'
+ if rock_ridge:
+ rr_name = file_name
+ if use_joliet:
+ joliet_path = file_path
+ if use_udf:
+ udf_path = file_path
+ try:
+ iso_file.add_file(src_file, iso_path=file_in_iso_path, rr_name=rr_name, joliet_path=joliet_path, udf_path=udf_path)
+ except Exception as err:
+ module.fail_json(msg="Failed to add file %s to ISO file due to %s" % (src_file, to_native(err)))
+
+
+def add_directory(module, iso_file=None, dir_path=None, rock_ridge=None, use_joliet=None, use_udf=None):
+ rr_name = None
+ joliet_path = None
+ udf_path = None
+ iso_dir_path = dir_path.upper()
+ if rock_ridge:
+ rr_name = os.path.basename(dir_path)
+ if use_joliet:
+ joliet_path = iso_dir_path
+ if use_udf:
+ udf_path = iso_dir_path
+ try:
+ iso_file.add_directory(iso_path=iso_dir_path, rr_name=rr_name, joliet_path=joliet_path, udf_path=udf_path)
+ except Exception as err:
+ module.fail_json(msg="Failed to directory %s to ISO file due to %s" % (dir_path, to_native(err)))
+
+
+def main():
+ argument_spec = dict(
+ src_files=dict(type='list', required=True, elements='path'),
+ dest_iso=dict(type='path', required=True),
+ interchange_level=dict(type='int', choices=[1, 2, 3, 4], default=1),
+ vol_ident=dict(type='str'),
+ rock_ridge=dict(type='str', choices=['1.09', '1.10', '1.12']),
+ joliet=dict(type='int', choices=[1, 2, 3]),
+ udf=dict(type='bool', default=False),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+ if not HAS_PYCDLIB:
+ module.fail_json(missing_required_lib('pycdlib'), exception=PYCDLIB_IMP_ERR)
+
+ src_file_list = module.params.get('src_files')
+ if src_file_list and len(src_file_list) == 0:
+ module.fail_json(msg='Please specify source file and/or directory list using src_files parameter.')
+ for src_file in src_file_list:
+ if not os.path.exists(src_file):
+ module.fail_json(msg="Specified source file/directory path does not exist on local machine, %s" % src_file)
+
+ dest_iso = module.params.get('dest_iso')
+ if dest_iso and len(dest_iso) == 0:
+ module.fail_json(msg='Please specify the absolute path of the new created ISO file using dest_iso parameter.')
+
+ dest_iso_dir = os.path.dirname(dest_iso)
+ if dest_iso_dir and not os.path.exists(dest_iso_dir):
+ # will create intermediate dir for new ISO file
+ try:
+ os.makedirs(dest_iso_dir)
+ except OSError as err:
+ module.fail_json(msg='Exception caught when creating folder %s, with error %s' % (dest_iso_dir, to_native(err)))
+
+ volume_id = module.params.get('vol_ident')
+ if volume_id is None:
+ volume_id = ''
+ inter_level = module.params.get('interchange_level')
+ rock_ridge = module.params.get('rock_ridge')
+ use_joliet = module.params.get('joliet')
+ use_udf = None
+ if module.params['udf']:
+ use_udf = '2.60'
+
+ result = dict(
+ changed=False,
+ source_file=src_file_list,
+ created_iso=dest_iso,
+ interchange_level=inter_level,
+ vol_ident=volume_id,
+ rock_ridge=rock_ridge,
+ joliet=use_joliet,
+ udf=use_udf
+ )
+ if not module.check_mode:
+ iso_file = pycdlib.PyCdlib()
+ iso_file.new(interchange_level=inter_level, vol_ident=volume_id, rock_ridge=rock_ridge, joliet=use_joliet, udf=use_udf)
+
+ for src_file in src_file_list:
+ # if specify a dir then go through the dir to add files and dirs
+ if os.path.isdir(src_file):
+ dir_list = []
+ file_list = []
+ src_file = src_file.rstrip('/')
+ dir_name = os.path.basename(src_file)
+ add_directory(module, iso_file=iso_file, dir_path='/' + dir_name, rock_ridge=rock_ridge,
+ use_joliet=use_joliet, use_udf=use_udf)
+
+ # get dir list and file list
+ for path, dirs, files in os.walk(src_file):
+ for filename in files:
+ file_list.append(os.path.join(path, filename))
+ for dir in dirs:
+ dir_list.append(os.path.join(path, dir))
+ for new_dir in dir_list:
+ add_directory(module, iso_file=iso_file, dir_path=new_dir.split(os.path.dirname(src_file))[1],
+ rock_ridge=rock_ridge, use_joliet=use_joliet, use_udf=use_udf)
+ for new_file in file_list:
+ add_file(module, iso_file=iso_file, src_file=new_file,
+ file_path=new_file.split(os.path.dirname(src_file))[1], rock_ridge=rock_ridge,
+ use_joliet=use_joliet, use_udf=use_udf)
+ # if specify a file then add this file directly to the '/' path in ISO
+ else:
+ add_file(module, iso_file=iso_file, src_file=src_file, file_path='/' + os.path.basename(src_file),
+ rock_ridge=rock_ridge, use_joliet=use_joliet, use_udf=use_udf)
+
+ iso_file.write(dest_iso)
+ iso_file.close()
+
+ result['changed'] = True
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/iso_extract.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/iso_extract.py
new file mode 100644
index 00000000..0c73ac96
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/iso_extract.py
@@ -0,0 +1,218 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Jeroen Hoekx <jeroen.hoekx@dsquare.be>
+# Copyright: (c) 2016, Matt Robinson <git@nerdoftheherd.com>
+# Copyright: (c) 2017, Dag Wieers <dag@wieers.com>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+- Jeroen Hoekx (@jhoekx)
+- Matt Robinson (@ribbons)
+- Dag Wieers (@dagwieers)
+module: iso_extract
+short_description: Extract files from an ISO image
+description:
+- This module has two possible ways of operation.
+- If 7zip is installed on the system, this module extracts files from an ISO
+ into a temporary directory and copies files to a given destination,
+ if needed.
+- If the user has mount-capabilities (CAP_SYS_ADMIN on Linux) this module
+ mounts the ISO image to a temporary location, and copies files to a given
+ destination, if needed.
+requirements:
+- Either 7z (from I(7zip) or I(p7zip) package)
+- Or mount capabilities (root-access, or CAP_SYS_ADMIN capability on Linux)
+options:
+ image:
+ description:
+ - The ISO image to extract files from.
+ type: path
+ required: yes
+ aliases: [ path, src ]
+ dest:
+ description:
+ - The destination directory to extract files to.
+ type: path
+ required: yes
+ files:
+ description:
+ - A list of files to extract from the image.
+ - Extracting directories does not work.
+ type: list
+ elements: str
+ required: yes
+ force:
+ description:
+ - If C(yes), which will replace the remote file when contents are different than the source.
+ - If C(no), the file will only be extracted and copied if the destination does not already exist.
+ - Alias C(thirsty) has been deprecated and will be removed in community.general 3.0.0.
+ type: bool
+ default: yes
+ aliases: [ thirsty ]
+ executable:
+ description:
+ - The path to the C(7z) executable to use for extracting files from the ISO.
+ type: path
+ default: '7z'
+notes:
+- Only the file checksum (content) is taken into account when extracting files
+ from the ISO image. If C(force=no), only checks the presence of the file.
+- In Ansible 2.3 this module was using C(mount) and C(umount) commands only,
+ requiring root access. This is no longer needed with the introduction of 7zip
+ for extraction.
+'''
+
+EXAMPLES = r'''
+- name: Extract kernel and ramdisk from a LiveCD
+ community.general.iso_extract:
+ image: /tmp/rear-test.iso
+ dest: /tmp/virt-rear/
+ files:
+ - isolinux/kernel
+ - isolinux/initrd.cgz
+'''
+
+RETURN = r'''
+#
+'''
+
+import os.path
+import shutil
+import tempfile
+
+try: # python 3.3+
+ from shlex import quote
+except ImportError: # older python
+ from pipes import quote
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ image=dict(type='path', required=True, aliases=['path', 'src']),
+ dest=dict(type='path', required=True),
+ files=dict(type='list', elements='str', required=True),
+ force=dict(type='bool', default=True, aliases=['thirsty']),
+ executable=dict(type='path'), # No default on purpose
+ ),
+ supports_check_mode=True,
+ )
+ image = module.params['image']
+ dest = module.params['dest']
+ files = module.params['files']
+ force = module.params['force']
+ executable = module.params['executable']
+
+ if module.params.get('thirsty'):
+ module.deprecate('The alias "thirsty" has been deprecated and will be removed, use "force" instead',
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ result = dict(
+ changed=False,
+ dest=dest,
+ image=image,
+ )
+
+ # We want to know if the user provided it or not, so we set default here
+ if executable is None:
+ executable = '7z'
+
+ binary = module.get_bin_path(executable, None)
+
+ # When executable was provided and binary not found, warn user !
+ if module.params['executable'] is not None and not binary:
+ module.warn("Executable '%s' is not found on the system, trying to mount ISO instead." % executable)
+
+ if not os.path.exists(dest):
+ module.fail_json(msg="Directory '%s' does not exist" % dest)
+
+ if not os.path.exists(os.path.dirname(image)):
+ module.fail_json(msg="ISO image '%s' does not exist" % image)
+
+ result['files'] = []
+ extract_files = list(files)
+
+ if not force:
+ # Check if we have to process any files based on existence
+ for f in files:
+ dest_file = os.path.join(dest, os.path.basename(f))
+ if os.path.exists(dest_file):
+ result['files'].append(dict(
+ checksum=None,
+ dest=dest_file,
+ src=f,
+ ))
+ extract_files.remove(f)
+
+ if not extract_files:
+ module.exit_json(**result)
+
+ tmp_dir = tempfile.mkdtemp()
+
+ # Use 7zip when we have a binary, otherwise try to mount
+ if binary:
+ cmd = '%s x "%s" -o"%s" %s' % (binary, image, tmp_dir, ' '.join([quote(f) for f in extract_files]))
+ else:
+ cmd = 'mount -o loop,ro "%s" "%s"' % (image, tmp_dir)
+
+ rc, out, err = module.run_command(cmd)
+ if rc != 0:
+ result.update(dict(
+ cmd=cmd,
+ rc=rc,
+ stderr=err,
+ stdout=out,
+ ))
+ shutil.rmtree(tmp_dir)
+
+ if binary:
+ module.fail_json(msg="Failed to extract from ISO image '%s' to '%s'" % (image, tmp_dir), **result)
+ else:
+ module.fail_json(msg="Failed to mount ISO image '%s' to '%s', and we could not find executable '%s'." % (image, tmp_dir, executable), **result)
+
+ try:
+ for f in extract_files:
+ tmp_src = os.path.join(tmp_dir, f)
+ if not os.path.exists(tmp_src):
+ module.fail_json(msg="Failed to extract '%s' from ISO image" % f, **result)
+
+ src_checksum = module.sha1(tmp_src)
+
+ dest_file = os.path.join(dest, os.path.basename(f))
+
+ if os.path.exists(dest_file):
+ dest_checksum = module.sha1(dest_file)
+ else:
+ dest_checksum = None
+
+ result['files'].append(dict(
+ checksum=src_checksum,
+ dest=dest_file,
+ src=f,
+ ))
+
+ if src_checksum != dest_checksum:
+ if not module.check_mode:
+ shutil.copy(tmp_src, dest_file)
+
+ result['changed'] = True
+ finally:
+ if not binary:
+ module.run_command('umount "%s"' % tmp_dir)
+
+ shutil.rmtree(tmp_dir)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/jabber.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/jabber.py
new file mode 100644
index 00000000..68e2c593
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/jabber.py
@@ -0,0 +1,166 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, Brian Coca <bcoca@ansible.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: jabber
+short_description: Send a message to jabber user or chat room
+description:
+ - Send a message to jabber
+options:
+ user:
+ type: str
+ description:
+ - User as which to connect
+ required: true
+ password:
+ type: str
+ description:
+ - password for user to connect
+ required: true
+ to:
+ type: str
+ description:
+ - user ID or name of the room, when using room use a slash to indicate your nick.
+ required: true
+ msg:
+ type: str
+ description:
+ - The message body.
+ required: true
+ host:
+ type: str
+ description:
+ - host to connect, overrides user info
+ port:
+ type: int
+ description:
+ - port to connect to, overrides default
+ default: 5222
+ encoding:
+ type: str
+ description:
+ - message encoding
+
+# informational: requirements for nodes
+requirements:
+ - python xmpp (xmpppy)
+author: "Brian Coca (@bcoca)"
+'''
+
+EXAMPLES = '''
+- name: Send a message to a user
+ community.general.jabber:
+ user: mybot@example.net
+ password: secret
+ to: friend@example.net
+ msg: Ansible task finished
+
+- name: Send a message to a room
+ community.general.jabber:
+ user: mybot@example.net
+ password: secret
+ to: mychaps@conference.example.net/ansiblebot
+ msg: Ansible task finished
+
+- name: Send a message, specifying the host and port
+ community.general.jabber:
+ user: mybot@example.net
+ host: talk.example.net
+ port: 5223
+ password: secret
+ to: mychaps@example.net
+ msg: Ansible task finished
+'''
+
+import time
+import traceback
+
+HAS_XMPP = True
+XMPP_IMP_ERR = None
+try:
+ import xmpp
+except ImportError:
+ XMPP_IMP_ERR = traceback.format_exc()
+ HAS_XMPP = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ user=dict(required=True),
+ password=dict(required=True, no_log=True),
+ to=dict(required=True),
+ msg=dict(required=True),
+ host=dict(required=False),
+ port=dict(required=False, default=5222, type='int'),
+ encoding=dict(required=False),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_XMPP:
+ module.fail_json(msg=missing_required_lib('xmpppy'), exception=XMPP_IMP_ERR)
+
+ jid = xmpp.JID(module.params['user'])
+ user = jid.getNode()
+ server = jid.getDomain()
+ port = module.params['port']
+ password = module.params['password']
+ try:
+ to, nick = module.params['to'].split('/', 1)
+ except ValueError:
+ to, nick = module.params['to'], None
+
+ if module.params['host']:
+ host = module.params['host']
+ else:
+ host = server
+ if module.params['encoding']:
+ xmpp.simplexml.ENCODING = module.params['encoding']
+
+ msg = xmpp.protocol.Message(body=module.params['msg'])
+
+ try:
+ conn = xmpp.Client(server, debug=[])
+ if not conn.connect(server=(host, port)):
+ module.fail_json(rc=1, msg='Failed to connect to server: %s' % (server))
+ if not conn.auth(user, password, 'Ansible'):
+ module.fail_json(rc=1, msg='Failed to authorize %s on: %s' % (user, server))
+ # some old servers require this, also the sleep following send
+ conn.sendInitPresence(requestRoster=0)
+
+ if nick: # sending to room instead of user, need to join
+ msg.setType('groupchat')
+ msg.setTag('x', namespace='http://jabber.org/protocol/muc#user')
+ join = xmpp.Presence(to=module.params['to'])
+ join.setTag('x', namespace='http://jabber.org/protocol/muc')
+ conn.send(join)
+ time.sleep(1)
+ else:
+ msg.setType('chat')
+
+ msg.setTo(to)
+ if not module.check_mode:
+ conn.send(msg)
+ time.sleep(1)
+ conn.disconnect()
+ except Exception as e:
+ module.fail_json(msg="unable to send msg: %s" % to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=False, to=to, user=user, msg=msg.getBody())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/java_cert.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/java_cert.py
new file mode 100644
index 00000000..7333397b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/java_cert.py
@@ -0,0 +1,401 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, RSD Services S.A
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: java_cert
+short_description: Uses keytool to import/remove key from java keystore (cacerts)
+description:
+ - This is a wrapper module around keytool, which can be used to import/remove
+ certificates from a given java keystore.
+options:
+ cert_url:
+ description:
+ - Basic URL to fetch SSL certificate from.
+ - One of C(cert_url) or C(cert_path) is required to load certificate.
+ type: str
+ cert_port:
+ description:
+ - Port to connect to URL.
+ - This will be used to create server URL:PORT.
+ type: int
+ default: 443
+ cert_path:
+ description:
+ - Local path to load certificate from.
+ - One of C(cert_url) or C(cert_path) is required to load certificate.
+ type: path
+ cert_alias:
+ description:
+ - Imported certificate alias.
+ - The alias is used when checking for the presence of a certificate in the keystore.
+ type: str
+ trust_cacert:
+ description:
+ - Trust imported cert as CAcert.
+ type: bool
+ default: False
+ version_added: '0.2.0'
+ pkcs12_path:
+ description:
+ - Local path to load PKCS12 keystore from.
+ type: path
+ pkcs12_password:
+ description:
+ - Password for importing from PKCS12 keystore.
+ type: str
+ default: ''
+ pkcs12_alias:
+ description:
+ - Alias in the PKCS12 keystore.
+ type: str
+ keystore_path:
+ description:
+ - Path to keystore.
+ type: path
+ keystore_pass:
+ description:
+ - Keystore password.
+ type: str
+ required: true
+ keystore_create:
+ description:
+ - Create keystore if it does not exist.
+ type: bool
+ default: false
+ keystore_type:
+ description:
+ - Keystore type (JCEKS, JKS).
+ type: str
+ executable:
+ description:
+ - Path to keytool binary if not used we search in PATH for it.
+ type: str
+ default: keytool
+ state:
+ description:
+ - Defines action which can be either certificate import or removal.
+ type: str
+ choices: [ absent, present ]
+ default: present
+author:
+- Adam Hamsik (@haad)
+'''
+
+EXAMPLES = r'''
+- name: Import SSL certificate from google.com to a given cacerts keystore
+ community.general.java_cert:
+ cert_url: google.com
+ cert_port: 443
+ keystore_path: /usr/lib/jvm/jre7/lib/security/cacerts
+ keystore_pass: changeit
+ state: present
+
+- name: Remove certificate with given alias from a keystore
+ community.general.java_cert:
+ cert_url: google.com
+ keystore_path: /usr/lib/jvm/jre7/lib/security/cacerts
+ keystore_pass: changeit
+ executable: /usr/lib/jvm/jre7/bin/keytool
+ state: absent
+
+- name: Import trusted CA from SSL certificate
+ community.general.java_cert:
+ cert_path: /opt/certs/rootca.crt
+ keystore_path: /tmp/cacerts
+ keystore_pass: changeit
+ keystore_create: yes
+ state: present
+ cert_alias: LE_RootCA
+ trust_cacert: True
+
+- name: Import SSL certificate from google.com to a keystore, create it if it doesn't exist
+ community.general.java_cert:
+ cert_url: google.com
+ keystore_path: /tmp/cacerts
+ keystore_pass: changeit
+ keystore_create: yes
+ state: present
+
+- name: Import a pkcs12 keystore with a specified alias, create it if it doesn't exist
+ community.general.java_cert:
+ pkcs12_path: "/tmp/importkeystore.p12"
+ cert_alias: default
+ keystore_path: /opt/wildfly/standalone/configuration/defaultkeystore.jks
+ keystore_pass: changeit
+ keystore_create: yes
+ state: present
+
+- name: Import SSL certificate to JCEKS keystore
+ community.general.java_cert:
+ pkcs12_path: "/tmp/importkeystore.p12"
+ pkcs12_alias: default
+ pkcs12_password: somepass
+ cert_alias: default
+ keystore_path: /opt/someapp/security/keystore.jceks
+ keystore_type: "JCEKS"
+ keystore_pass: changeit
+ keystore_create: yes
+ state: present
+'''
+
+RETURN = r'''
+msg:
+ description: Output from stdout of keytool command after execution of given command.
+ returned: success
+ type: str
+ sample: "Module require existing keystore at keystore_path '/tmp/test/cacerts'"
+
+rc:
+ description: Keytool command execution return value.
+ returned: success
+ type: int
+ sample: "0"
+
+cmd:
+ description: Executed command to get action done.
+ returned: success
+ type: str
+ sample: "keytool -importcert -noprompt -keystore"
+'''
+
+import os
+import re
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+
+
+def get_keystore_type(keystore_type):
+ ''' Check that custom keystore is presented in parameters '''
+ if keystore_type:
+ return " -storetype '%s'" % keystore_type
+ return ''
+
+
+def check_cert_present(module, executable, keystore_path, keystore_pass, alias, keystore_type):
+ ''' Check if certificate with alias is present in keystore
+ located at keystore_path '''
+ test_cmd = ("%s -noprompt -list -keystore '%s' -storepass '%s' "
+ "-alias '%s' %s") % (executable, keystore_path, keystore_pass, alias, get_keystore_type(keystore_type))
+
+ (check_rc, _, _) = module.run_command(test_cmd)
+ if check_rc == 0:
+ return True
+ return False
+
+
+def import_cert_url(module, executable, url, port, keystore_path, keystore_pass, alias, keystore_type, trust_cacert):
+ ''' Import certificate from URL into keystore located at keystore_path '''
+
+ https_proxy = os.getenv("https_proxy")
+ no_proxy = os.getenv("no_proxy")
+
+ proxy_opts = ''
+ if https_proxy is not None:
+ (proxy_host, proxy_port) = https_proxy.split(':')
+ proxy_opts = "-J-Dhttps.proxyHost=%s -J-Dhttps.proxyPort=%s" % (proxy_host, proxy_port)
+
+ if no_proxy is not None:
+ # For Java's nonProxyHosts property, items are separated by '|',
+ # and patterns have to start with "*".
+ non_proxy_hosts = no_proxy.replace(',', '|')
+ non_proxy_hosts = re.sub(r'(^|\|)\.', r'\1*.', non_proxy_hosts)
+
+ # The property name is http.nonProxyHosts, there is no
+ # separate setting for HTTPS.
+ proxy_opts += " -J-Dhttp.nonProxyHosts='%s'" % non_proxy_hosts
+
+ fetch_cmd = "%s -printcert -rfc -sslserver %s %s:%d" % (executable, proxy_opts, url, port)
+ import_cmd = ("%s -importcert -noprompt -keystore '%s' "
+ "-storepass '%s' -alias '%s' %s") % (executable, keystore_path,
+ keystore_pass, alias,
+ get_keystore_type(keystore_type))
+ if trust_cacert:
+ import_cmd = import_cmd + " -trustcacerts"
+
+ # Fetch SSL certificate from remote host.
+ (_, fetch_out, _) = module.run_command(fetch_cmd, check_rc=True)
+
+ # Use remote certificate from remote host and import it to a java keystore
+ (import_rc, import_out, import_err) = module.run_command(import_cmd,
+ data=fetch_out,
+ check_rc=False)
+ diff = {'before': '\n', 'after': '%s\n' % alias}
+ if import_rc == 0:
+ module.exit_json(changed=True, msg=import_out,
+ rc=import_rc, cmd=import_cmd, stdout=import_out,
+ diff=diff)
+ else:
+ module.fail_json(msg=import_out, rc=import_rc, cmd=import_cmd,
+ error=import_err)
+
+
+def import_cert_path(module, executable, path, keystore_path, keystore_pass, alias, keystore_type, trust_cacert):
+ ''' Import certificate from path into keystore located on
+ keystore_path as alias '''
+ import_cmd = ("%s -importcert -noprompt -keystore '%s' "
+ "-storepass '%s' -file '%s' -alias '%s' %s") % (executable, keystore_path,
+ keystore_pass, path, alias,
+ get_keystore_type(keystore_type))
+
+ if trust_cacert:
+ import_cmd = import_cmd + " -trustcacerts"
+
+ # Use local certificate from local path and import it to a java keystore
+ (import_rc, import_out, import_err) = module.run_command(import_cmd,
+ check_rc=False)
+
+ diff = {'before': '\n', 'after': '%s\n' % alias}
+ if import_rc == 0:
+ module.exit_json(changed=True, msg=import_out,
+ rc=import_rc, cmd=import_cmd, stdout=import_out,
+ error=import_err, diff=diff)
+ else:
+ module.fail_json(msg=import_out, rc=import_rc, cmd=import_cmd)
+
+
+def import_pkcs12_path(module, executable, path, keystore_path, keystore_pass, pkcs12_pass, pkcs12_alias, alias, keystore_type):
+ ''' Import pkcs12 from path into keystore located on
+ keystore_path as alias '''
+ import_cmd = ("%s -importkeystore -noprompt -destkeystore '%s' -srcstoretype PKCS12 "
+ "-deststorepass '%s' -destkeypass '%s' -srckeystore '%s' -srcstorepass '%s' "
+ "-srcalias '%s' -destalias '%s' %s") % (executable, keystore_path, keystore_pass,
+ keystore_pass, path, pkcs12_pass, pkcs12_alias,
+ alias, get_keystore_type(keystore_type))
+
+ # Use local certificate from local path and import it to a java keystore
+ (import_rc, import_out, import_err) = module.run_command(import_cmd,
+ check_rc=False)
+
+ diff = {'before': '\n', 'after': '%s\n' % alias}
+ if import_rc == 0:
+ module.exit_json(changed=True, msg=import_out,
+ rc=import_rc, cmd=import_cmd, stdout=import_out,
+ error=import_err, diff=diff)
+ else:
+ module.fail_json(msg=import_out, rc=import_rc, cmd=import_cmd)
+
+
+def delete_cert(module, executable, keystore_path, keystore_pass, alias, keystore_type):
+ ''' Delete certificate identified with alias from keystore on keystore_path '''
+ del_cmd = ("%s -delete -keystore '%s' -storepass '%s' "
+ "-alias '%s' %s") % (executable, keystore_path, keystore_pass, alias, get_keystore_type(keystore_type))
+
+ # Delete SSL certificate from keystore
+ (del_rc, del_out, del_err) = module.run_command(del_cmd, check_rc=True)
+
+ diff = {'before': '%s\n' % alias, 'after': None}
+
+ module.exit_json(changed=True, msg=del_out,
+ rc=del_rc, cmd=del_cmd, stdout=del_out,
+ error=del_err, diff=diff)
+
+
+def test_keytool(module, executable):
+ ''' Test if keytool is actually executable or not '''
+ module.run_command("%s" % executable, check_rc=True)
+
+
+def test_keystore(module, keystore_path):
+ ''' Check if we can access keystore as file or not '''
+ if keystore_path is None:
+ keystore_path = ''
+
+ if not os.path.exists(keystore_path) and not os.path.isfile(keystore_path):
+ # Keystore doesn't exist we want to create it
+ module.fail_json(changed=False, msg="Module require existing keystore at keystore_path '%s'" % keystore_path)
+
+
+def main():
+ argument_spec = dict(
+ cert_url=dict(type='str'),
+ cert_path=dict(type='path'),
+ pkcs12_path=dict(type='path'),
+ pkcs12_password=dict(type='str', no_log=True),
+ pkcs12_alias=dict(type='str'),
+ cert_alias=dict(type='str'),
+ cert_port=dict(type='int', default=443),
+ keystore_path=dict(type='path'),
+ keystore_pass=dict(type='str', required=True, no_log=True),
+ trust_cacert=dict(type='bool', default=False),
+ keystore_create=dict(type='bool', default=False),
+ keystore_type=dict(type='str'),
+ executable=dict(type='str', default='keytool'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[['cert_path', 'cert_url', 'pkcs12_path']],
+ required_together=[['keystore_path', 'keystore_pass']],
+ mutually_exclusive=[
+ ['cert_url', 'cert_path', 'pkcs12_path']
+ ],
+ supports_check_mode=True,
+ )
+
+ url = module.params.get('cert_url')
+ path = module.params.get('cert_path')
+ port = module.params.get('cert_port')
+
+ pkcs12_path = module.params.get('pkcs12_path')
+ pkcs12_pass = module.params.get('pkcs12_password', '')
+ pkcs12_alias = module.params.get('pkcs12_alias', '1')
+
+ cert_alias = module.params.get('cert_alias') or url
+ trust_cacert = module.params.get('trust_cacert')
+
+ keystore_path = module.params.get('keystore_path')
+ keystore_pass = module.params.get('keystore_pass')
+ keystore_create = module.params.get('keystore_create')
+ keystore_type = module.params.get('keystore_type')
+ executable = module.params.get('executable')
+ state = module.params.get('state')
+
+ if path and not cert_alias:
+ module.fail_json(changed=False,
+ msg="Using local path import from %s requires alias argument."
+ % keystore_path)
+
+ test_keytool(module, executable)
+
+ if not keystore_create:
+ test_keystore(module, keystore_path)
+
+ cert_present = check_cert_present(module, executable, keystore_path,
+ keystore_pass, cert_alias, keystore_type)
+
+ if state == 'absent' and cert_present:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ delete_cert(module, executable, keystore_path, keystore_pass, cert_alias, keystore_type)
+
+ elif state == 'present' and not cert_present:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ if pkcs12_path:
+ import_pkcs12_path(module, executable, pkcs12_path, keystore_path,
+ keystore_pass, pkcs12_pass, pkcs12_alias, cert_alias, keystore_type)
+
+ if path:
+ import_cert_path(module, executable, path, keystore_path,
+ keystore_pass, cert_alias, keystore_type, trust_cacert)
+
+ if url:
+ import_cert_url(module, executable, url, port, keystore_path,
+ keystore_pass, cert_alias, keystore_type, trust_cacert)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/java_keystore.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/java_keystore.py
new file mode 100644
index 00000000..db37bdee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/java_keystore.py
@@ -0,0 +1,315 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Guillaume Grossetie <ggrossetie@yuzutech.fr>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: java_keystore
+short_description: Create or delete a Java keystore in JKS format.
+description:
+ - Create or delete a Java keystore in JKS format for a given certificate.
+options:
+ name:
+ type: str
+ description:
+ - Name of the certificate.
+ required: true
+ certificate:
+ type: str
+ description:
+ - Certificate that should be used to create the key store.
+ required: true
+ private_key:
+ type: str
+ description:
+ - Private key that should be used to create the key store.
+ required: true
+ private_key_passphrase:
+ description:
+ - Pass phrase for reading the private key, if required.
+ type: str
+ required: false
+ version_added: '0.2.0'
+ password:
+ type: str
+ description:
+ - Password that should be used to secure the key store.
+ required: true
+ dest:
+ type: path
+ description:
+ - Absolute path where the jks should be generated.
+ required: true
+ owner:
+ description:
+ - Name of the user that should own jks file.
+ required: false
+ group:
+ description:
+ - Name of the group that should own jks file.
+ required: false
+ mode:
+ description:
+ - Mode the file should be.
+ required: false
+ force:
+ description:
+ - Key store will be created even if it already exists.
+ required: false
+ type: bool
+ default: 'no'
+requirements: [openssl, keytool]
+author: Guillaume Grossetie (@Mogztter)
+extends_documentation_fragment:
+- files
+
+'''
+
+EXAMPLES = '''
+- name: Create a key store for the given certificate (inline)
+ community.general.java_keystore:
+ name: example
+ certificate: |
+ -----BEGIN CERTIFICATE-----
+ h19dUZ2co2fI/ibYiwxWk4aeNE6KWvCaTQOMQ8t6Uo2XKhpL/xnjoAgh1uCQN/69
+ MG+34+RhUWzCfdZH7T8/qDxJw2kEPKluaYh7KnMsba+5jHjmtzix5QIDAQABo4IB
+ -----END CERTIFICATE-----
+ private_key: |
+ -----BEGIN RSA PRIVATE KEY-----
+ DBVFTEVDVFJJQ0lURSBERSBGUkFOQ0UxFzAVBgNVBAsMDjAwMDIgNTUyMDgxMzE3
+ GLlDNMw/uHyME7gHFsqJA7O11VY6O5WQ4IDP3m/s5ZV6s+Nn6Lerz17VZ99
+ -----END RSA PRIVATE KEY-----
+ password: changeit
+ dest: /etc/security/keystore.jks
+
+- name: Create a key store for the given certificate (lookup)
+ community.general.java_keystore:
+ name: example
+ certificate: "{{lookup('file', '/path/to/certificate.crt') }}"
+ private_key: "{{lookup('file', '/path/to/private.key') }}"
+ password: changeit
+ dest: /etc/security/keystore.jks
+'''
+
+RETURN = '''
+msg:
+ description: Output from stdout of keytool/openssl command after execution of given command or an error.
+ returned: changed and failure
+ type: str
+ sample: "Unable to find the current certificate fingerprint in ..."
+
+rc:
+ description: keytool/openssl command execution return value
+ returned: changed and failure
+ type: int
+ sample: "0"
+
+cmd:
+ description: Executed command to get action done
+ returned: changed and failure
+ type: str
+ sample: "openssl x509 -noout -in /tmp/cert.crt -fingerprint -sha256"
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+import os
+import re
+
+
+def read_certificate_fingerprint(module, openssl_bin, certificate_path):
+ current_certificate_fingerprint_cmd = [openssl_bin, "x509", "-noout", "-in", certificate_path, "-fingerprint", "-sha256"]
+ (rc, current_certificate_fingerprint_out, current_certificate_fingerprint_err) = run_commands(module, current_certificate_fingerprint_cmd)
+ if rc != 0:
+ return module.fail_json(msg=current_certificate_fingerprint_out,
+ err=current_certificate_fingerprint_err,
+ rc=rc,
+ cmd=current_certificate_fingerprint_cmd)
+
+ current_certificate_match = re.search(r"=([\w:]+)", current_certificate_fingerprint_out)
+ if not current_certificate_match:
+ return module.fail_json(
+ msg="Unable to find the current certificate fingerprint in %s" % current_certificate_fingerprint_out,
+ rc=rc,
+ cmd=current_certificate_fingerprint_err
+ )
+
+ return current_certificate_match.group(1)
+
+
+def read_stored_certificate_fingerprint(module, keytool_bin, alias, keystore_path, keystore_password):
+ stored_certificate_fingerprint_cmd = [keytool_bin, "-list", "-alias", alias, "-keystore", keystore_path, "-storepass", keystore_password, "-v"]
+ (rc, stored_certificate_fingerprint_out, stored_certificate_fingerprint_err) = run_commands(module, stored_certificate_fingerprint_cmd)
+ if rc != 0:
+ if "keytool error: java.lang.Exception: Alias <%s> does not exist" % alias not in stored_certificate_fingerprint_out:
+ return module.fail_json(msg=stored_certificate_fingerprint_out,
+ err=stored_certificate_fingerprint_err,
+ rc=rc,
+ cmd=stored_certificate_fingerprint_cmd)
+ else:
+ return None
+ else:
+ stored_certificate_match = re.search(r"SHA256: ([\w:]+)", stored_certificate_fingerprint_out)
+ if not stored_certificate_match:
+ return module.fail_json(
+ msg="Unable to find the stored certificate fingerprint in %s" % stored_certificate_fingerprint_out,
+ rc=rc,
+ cmd=stored_certificate_fingerprint_cmd
+ )
+
+ return stored_certificate_match.group(1)
+
+
+def run_commands(module, cmd, data=None, check_rc=True):
+ return module.run_command(cmd, check_rc=check_rc, data=data)
+
+
+def create_file(path, content):
+ with open(path, 'w') as f:
+ f.write(content)
+ return path
+
+
+def create_tmp_certificate(module):
+ return create_file("/tmp/%s.crt" % module.params['name'], module.params['certificate'])
+
+
+def create_tmp_private_key(module):
+ return create_file("/tmp/%s.key" % module.params['name'], module.params['private_key'])
+
+
+def cert_changed(module, openssl_bin, keytool_bin, keystore_path, keystore_pass, alias):
+ certificate_path = create_tmp_certificate(module)
+ try:
+ current_certificate_fingerprint = read_certificate_fingerprint(module, openssl_bin, certificate_path)
+ stored_certificate_fingerprint = read_stored_certificate_fingerprint(module, keytool_bin, alias, keystore_path, keystore_pass)
+ return current_certificate_fingerprint != stored_certificate_fingerprint
+ finally:
+ os.remove(certificate_path)
+
+
+def create_jks(module, name, openssl_bin, keytool_bin, keystore_path, password, keypass):
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ certificate_path = create_tmp_certificate(module)
+ private_key_path = create_tmp_private_key(module)
+ try:
+ if os.path.exists(keystore_path):
+ os.remove(keystore_path)
+
+ keystore_p12_path = "/tmp/keystore.p12"
+ if os.path.exists(keystore_p12_path):
+ os.remove(keystore_p12_path)
+
+ export_p12_cmd = [openssl_bin, "pkcs12", "-export", "-name", name, "-in", certificate_path,
+ "-inkey", private_key_path, "-out",
+ keystore_p12_path, "-passout", "stdin"]
+
+ # when keypass is provided, add -passin
+ cmd_stdin = ""
+ if keypass:
+ export_p12_cmd.append("-passin")
+ export_p12_cmd.append("stdin")
+ cmd_stdin = "%s\n" % keypass
+
+ cmd_stdin += "%s\n%s" % (password, password)
+ (rc, export_p12_out, export_p12_err) = run_commands(module, export_p12_cmd, data=cmd_stdin)
+ if rc != 0:
+ return module.fail_json(msg=export_p12_out,
+ rc=rc,
+ cmd=export_p12_cmd)
+
+ import_keystore_cmd = [keytool_bin, "-importkeystore",
+ "-destkeystore", keystore_path,
+ "-srckeystore", keystore_p12_path,
+ "-srcstoretype", "pkcs12",
+ "-alias", name,
+ "-deststorepass", password,
+ "-srcstorepass", password,
+ "-noprompt"]
+ (rc, import_keystore_out, import_keystore_err) = run_commands(module, import_keystore_cmd, data=None)
+ if rc == 0:
+ update_jks_perm(module, keystore_path)
+ return module.exit_json(changed=True,
+ msg=import_keystore_out,
+ rc=rc,
+ cmd=import_keystore_cmd,
+ stdout_lines=import_keystore_out)
+ else:
+ return module.fail_json(msg=import_keystore_out,
+ rc=rc,
+ cmd=import_keystore_cmd)
+ finally:
+ os.remove(certificate_path)
+ os.remove(private_key_path)
+
+
+def update_jks_perm(module, keystore_path):
+ try:
+ file_args = module.load_file_common_arguments(module.params, path=keystore_path)
+ except TypeError:
+ # The path argument is only supported in Ansible-base 2.10+. Fall back to
+ # pre-2.10 behavior for older Ansible versions.
+ module.params['path'] = keystore_path
+ file_args = module.load_file_common_arguments(module.params)
+ module.set_fs_attributes_if_different(file_args, False)
+
+
+def process_jks(module):
+ name = module.params['name']
+ password = module.params['password']
+ keypass = module.params['private_key_passphrase']
+ keystore_path = module.params['dest']
+ force = module.params['force']
+ openssl_bin = module.get_bin_path('openssl', True)
+ keytool_bin = module.get_bin_path('keytool', True)
+
+ if os.path.exists(keystore_path):
+ if force:
+ create_jks(module, name, openssl_bin, keytool_bin, keystore_path, password, keypass)
+ else:
+ if cert_changed(module, openssl_bin, keytool_bin, keystore_path, password, name):
+ create_jks(module, name, openssl_bin, keytool_bin, keystore_path, password, keypass)
+ else:
+ if not module.check_mode:
+ update_jks_perm(module, keystore_path)
+ return module.exit_json(changed=False)
+ else:
+ create_jks(module, name, openssl_bin, keytool_bin, keystore_path, password, keypass)
+
+
+class ArgumentSpec(object):
+ def __init__(self):
+ self.supports_check_mode = True
+ self.add_file_common_args = True
+ argument_spec = dict(
+ name=dict(required=True),
+ certificate=dict(required=True, no_log=True),
+ private_key=dict(required=True, no_log=True),
+ password=dict(required=True, no_log=True),
+ dest=dict(required=True, type='path'),
+ force=dict(required=False, default=False, type='bool'),
+ private_key_passphrase=dict(required=False, no_log=True, type='str')
+ )
+ self.argument_spec = argument_spec
+
+
+def main():
+ spec = ArgumentSpec()
+ module = AnsibleModule(
+ argument_spec=spec.argument_spec,
+ add_file_common_args=spec.add_file_common_args,
+ supports_check_mode=spec.supports_check_mode
+ )
+ process_jks(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/jboss.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/jboss.py
new file mode 100644
index 00000000..4c077a1d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/jboss.py
@@ -0,0 +1,178 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Jeroen Hoekx <jeroen.hoekx@dsquare.be>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: jboss
+short_description: Deploy applications to JBoss
+description:
+ - Deploy applications to JBoss standalone using the filesystem.
+options:
+ deployment:
+ required: true
+ description:
+ - The name of the deployment.
+ type: str
+ src:
+ description:
+ - The remote path of the application ear or war to deploy.
+ - Required when I(state=present).
+ - Ignored when I(state=absent).
+ type: path
+ deploy_path:
+ default: /var/lib/jbossas/standalone/deployments
+ description:
+ - The location in the filesystem where the deployment scanner listens.
+ type: path
+ state:
+ choices: [ present, absent ]
+ default: "present"
+ description:
+ - Whether the application should be deployed or undeployed.
+ type: str
+notes:
+ - The JBoss standalone deployment-scanner has to be enabled in standalone.xml
+ - The module can wait until I(deployment) file is deployed/undeployed by deployment-scanner.
+ Duration of waiting time depends on scan-interval parameter from standalone.xml.
+ - Ensure no identically named application is deployed through the JBoss CLI
+seealso:
+- name: WildFly reference
+ description: Complete reference of the WildFly documentation.
+ link: https://docs.wildfly.org
+author:
+ - Jeroen Hoekx (@jhoekx)
+'''
+
+EXAMPLES = r"""
+- name: Deploy a hello world application to the default deploy_path
+ community.general.jboss:
+ src: /tmp/hello-1.0-SNAPSHOT.war
+ deployment: hello.war
+ state: present
+
+- name: Update the hello world application to the non-default deploy_path
+ community.general.jboss:
+ src: /tmp/hello-1.1-SNAPSHOT.war
+ deploy_path: /opt/wildfly/deployment
+ deployment: hello.war
+ state: present
+
+- name: Undeploy the hello world application from the default deploy_path
+ community.general.jboss:
+ deployment: hello.war
+ state: absent
+"""
+
+RETURN = r""" # """
+
+import os
+import shutil
+import time
+from ansible.module_utils.basic import AnsibleModule
+
+
+DEFAULT_DEPLOY_PATH = '/var/lib/jbossas/standalone/deployments'
+
+
+def is_deployed(deploy_path, deployment):
+ return os.path.exists(os.path.join(deploy_path, "%s.deployed" % deployment))
+
+
+def is_undeployed(deploy_path, deployment):
+ return os.path.exists(os.path.join(deploy_path, "%s.undeployed" % deployment))
+
+
+def is_failed(deploy_path, deployment):
+ return os.path.exists(os.path.join(deploy_path, "%s.failed" % deployment))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ src=dict(type='path'),
+ deployment=dict(type='str', required=True),
+ deploy_path=dict(type='path', default=DEFAULT_DEPLOY_PATH),
+ state=dict(type='str', choices=['absent', 'present'], default='present'),
+ ),
+ required_if=[('state', 'present', ('src',))],
+ supports_check_mode=True
+ )
+
+ result = dict(changed=False)
+
+ src = module.params['src']
+ deployment = module.params['deployment']
+ deploy_path = module.params['deploy_path']
+ state = module.params['state']
+
+ if not os.path.exists(deploy_path):
+ module.fail_json(msg="deploy_path does not exist.")
+
+ if state == 'absent' and src:
+ module.warn('Parameter src is ignored when state=absent')
+ elif state == 'present' and not os.path.exists(src):
+ module.fail_json(msg='Source file %s does not exist.' % src)
+
+ deployed = is_deployed(deploy_path, deployment)
+
+ # === when check_mode ===
+ if module.check_mode:
+ if state == 'present':
+ if not deployed:
+ result['changed'] = True
+
+ elif deployed:
+ if module.sha1(src) != module.sha1(os.path.join(deploy_path, deployment)):
+ result['changed'] = True
+
+ elif state == 'absent' and deployed:
+ result['changed'] = True
+
+ module.exit_json(**result)
+ # =======================
+
+ if state == 'present' and not deployed:
+ if is_failed(deploy_path, deployment):
+ # Clean up old failed deployment
+ os.remove(os.path.join(deploy_path, "%s.failed" % deployment))
+
+ shutil.copyfile(src, os.path.join(deploy_path, deployment))
+ while not deployed:
+ deployed = is_deployed(deploy_path, deployment)
+ if is_failed(deploy_path, deployment):
+ module.fail_json(msg='Deploying %s failed.' % deployment)
+ time.sleep(1)
+ result['changed'] = True
+
+ if state == 'present' and deployed:
+ if module.sha1(src) != module.sha1(os.path.join(deploy_path, deployment)):
+ os.remove(os.path.join(deploy_path, "%s.deployed" % deployment))
+ shutil.copyfile(src, os.path.join(deploy_path, deployment))
+ deployed = False
+ while not deployed:
+ deployed = is_deployed(deploy_path, deployment)
+ if is_failed(deploy_path, deployment):
+ module.fail_json(msg='Deploying %s failed.' % deployment)
+ time.sleep(1)
+ result['changed'] = True
+
+ if state == 'absent' and deployed:
+ os.remove(os.path.join(deploy_path, "%s.deployed" % deployment))
+ while deployed:
+ deployed = not is_undeployed(deploy_path, deployment)
+ if is_failed(deploy_path, deployment):
+ module.fail_json(msg='Undeploying %s failed.' % deployment)
+ time.sleep(1)
+ result['changed'] = True
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/jenkins_job.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/jenkins_job.py
new file mode 100644
index 00000000..0e06b5ee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/jenkins_job.py
@@ -0,0 +1,367 @@
+#!/usr/bin/python
+#
+# Copyright: (c) Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: jenkins_job
+short_description: Manage jenkins jobs
+description:
+ - Manage Jenkins jobs by using Jenkins REST API.
+requirements:
+ - "python-jenkins >= 0.4.12"
+author: "Sergio Millan Rodriguez (@sermilrod)"
+options:
+ config:
+ type: str
+ description:
+ - config in XML format.
+ - Required if job does not yet exist.
+ - Mutually exclusive with C(enabled).
+ - Considered if C(state=present).
+ required: false
+ enabled:
+ description:
+ - Whether the job should be enabled or disabled.
+ - Mutually exclusive with C(config).
+ - Considered if C(state=present).
+ type: bool
+ required: false
+ name:
+ type: str
+ description:
+ - Name of the Jenkins job.
+ required: true
+ password:
+ type: str
+ description:
+ - Password to authenticate with the Jenkins server.
+ required: false
+ state:
+ type: str
+ description:
+ - Attribute that specifies if the job has to be created or deleted.
+ required: false
+ default: present
+ choices: ['present', 'absent']
+ token:
+ type: str
+ description:
+ - API token used to authenticate alternatively to password.
+ required: false
+ url:
+ type: str
+ description:
+ - URL where the Jenkins server is accessible.
+ required: false
+ default: http://localhost:8080
+ user:
+ type: str
+ description:
+ - User to authenticate with the Jenkins server.
+ required: false
+'''
+
+EXAMPLES = '''
+- name: Create a jenkins job using basic authentication
+ community.general.jenkins_job:
+ config: "{{ lookup('file', 'templates/test.xml') }}"
+ name: test
+ password: admin
+ url: http://localhost:8080
+ user: admin
+
+- name: Create a jenkins job using the token
+ community.general.jenkins_job:
+ config: "{{ lookup('template', 'templates/test.xml.j2') }}"
+ name: test
+ token: asdfasfasfasdfasdfadfasfasdfasdfc
+ url: http://localhost:8080
+ user: admin
+
+- name: Delete a jenkins job using basic authentication
+ community.general.jenkins_job:
+ name: test
+ password: admin
+ state: absent
+ url: http://localhost:8080
+ user: admin
+
+- name: Delete a jenkins job using the token
+ community.general.jenkins_job:
+ name: test
+ token: asdfasfasfasdfasdfadfasfasdfasdfc
+ state: absent
+ url: http://localhost:8080
+ user: admin
+
+- name: Disable a jenkins job using basic authentication
+ community.general.jenkins_job:
+ name: test
+ password: admin
+ enabled: False
+ url: http://localhost:8080
+ user: admin
+
+- name: Disable a jenkins job using the token
+ community.general.jenkins_job:
+ name: test
+ token: asdfasfasfasdfasdfadfasfasdfasdfc
+ enabled: False
+ url: http://localhost:8080
+ user: admin
+'''
+
+RETURN = '''
+---
+name:
+ description: Name of the jenkins job.
+ returned: success
+ type: str
+ sample: test-job
+state:
+ description: State of the jenkins job.
+ returned: success
+ type: str
+ sample: present
+enabled:
+ description: Whether the jenkins job is enabled or not.
+ returned: success
+ type: bool
+ sample: true
+user:
+ description: User used for authentication.
+ returned: success
+ type: str
+ sample: admin
+url:
+ description: Url to connect to the Jenkins server.
+ returned: success
+ type: str
+ sample: https://jenkins.mydomain.com
+'''
+
+import traceback
+import xml.etree.ElementTree as ET
+
+JENKINS_IMP_ERR = None
+try:
+ import jenkins
+ python_jenkins_installed = True
+except ImportError:
+ JENKINS_IMP_ERR = traceback.format_exc()
+ python_jenkins_installed = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+class JenkinsJob:
+
+ def __init__(self, module):
+ self.module = module
+
+ self.config = module.params.get('config')
+ self.name = module.params.get('name')
+ self.password = module.params.get('password')
+ self.state = module.params.get('state')
+ self.enabled = module.params.get('enabled')
+ self.token = module.params.get('token')
+ self.user = module.params.get('user')
+ self.jenkins_url = module.params.get('url')
+ self.server = self.get_jenkins_connection()
+
+ self.result = {
+ 'changed': False,
+ 'url': self.jenkins_url,
+ 'name': self.name,
+ 'user': self.user,
+ 'state': self.state,
+ 'diff': {
+ 'before': "",
+ 'after': ""
+ }
+ }
+
+ self.EXCL_STATE = "excluded state"
+
+ def get_jenkins_connection(self):
+ try:
+ if (self.user and self.password):
+ return jenkins.Jenkins(self.jenkins_url, self.user, self.password)
+ elif (self.user and self.token):
+ return jenkins.Jenkins(self.jenkins_url, self.user, self.token)
+ elif (self.user and not (self.password or self.token)):
+ return jenkins.Jenkins(self.jenkins_url, self.user)
+ else:
+ return jenkins.Jenkins(self.jenkins_url)
+ except Exception as e:
+ self.module.fail_json(msg='Unable to connect to Jenkins server, %s' % to_native(e), exception=traceback.format_exc())
+
+ def get_job_status(self):
+ try:
+ response = self.server.get_job_info(self.name)
+ if "color" not in response:
+ return self.EXCL_STATE
+ else:
+ return to_native(response['color'])
+
+ except Exception as e:
+ self.module.fail_json(msg='Unable to fetch job information, %s' % to_native(e), exception=traceback.format_exc())
+
+ def job_exists(self):
+ try:
+ return bool(self.server.job_exists(self.name))
+ except Exception as e:
+ self.module.fail_json(msg='Unable to validate if job exists, %s for %s' % (to_native(e), self.jenkins_url),
+ exception=traceback.format_exc())
+
+ def get_config(self):
+ return job_config_to_string(self.config)
+
+ def get_current_config(self):
+ return job_config_to_string(self.server.get_job_config(self.name).encode('utf-8'))
+
+ def has_config_changed(self):
+ # config is optional, if not provided we keep the current config as is
+ if self.config is None:
+ return False
+
+ config_file = self.get_config()
+ machine_file = self.get_current_config()
+
+ self.result['diff']['after'] = config_file
+ self.result['diff']['before'] = machine_file
+
+ if machine_file != config_file:
+ return True
+ return False
+
+ def present_job(self):
+ if self.config is None and self.enabled is None:
+ self.module.fail_json(msg='one of the following params is required on state=present: config,enabled')
+
+ if not self.job_exists():
+ self.create_job()
+ else:
+ self.update_job()
+
+ def has_state_changed(self, status):
+ # Keep in current state if enabled arg_spec is not given
+ if self.enabled is None:
+ return False
+
+ if ((self.enabled is False and status != "disabled") or (self.enabled is True and status == "disabled")):
+ return True
+ return False
+
+ def switch_state(self):
+ if self.enabled is False:
+ self.server.disable_job(self.name)
+ else:
+ self.server.enable_job(self.name)
+
+ def update_job(self):
+ try:
+ status = self.get_job_status()
+
+ # Handle job config
+ if self.has_config_changed():
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ self.server.reconfig_job(self.name, self.get_config())
+
+ # Handle job disable/enable
+ elif (status != self.EXCL_STATE and self.has_state_changed(status)):
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ self.switch_state()
+
+ except Exception as e:
+ self.module.fail_json(msg='Unable to reconfigure job, %s for %s' % (to_native(e), self.jenkins_url),
+ exception=traceback.format_exc())
+
+ def create_job(self):
+ if self.config is None:
+ self.module.fail_json(msg='missing required param: config')
+
+ self.result['changed'] = True
+ try:
+ config_file = self.get_config()
+ self.result['diff']['after'] = config_file
+ if not self.module.check_mode:
+ self.server.create_job(self.name, config_file)
+ except Exception as e:
+ self.module.fail_json(msg='Unable to create job, %s for %s' % (to_native(e), self.jenkins_url),
+ exception=traceback.format_exc())
+
+ def absent_job(self):
+ if self.job_exists():
+ self.result['changed'] = True
+ self.result['diff']['before'] = self.get_current_config()
+ if not self.module.check_mode:
+ try:
+ self.server.delete_job(self.name)
+ except Exception as e:
+ self.module.fail_json(msg='Unable to delete job, %s for %s' % (to_native(e), self.jenkins_url),
+ exception=traceback.format_exc())
+
+ def get_result(self):
+ result = self.result
+ if self.job_exists():
+ result['enabled'] = self.get_job_status() != "disabled"
+ else:
+ result['enabled'] = None
+ return result
+
+
+def test_dependencies(module):
+ if not python_jenkins_installed:
+ module.fail_json(
+ msg=missing_required_lib("python-jenkins",
+ url="https://python-jenkins.readthedocs.io/en/latest/install.html"),
+ exception=JENKINS_IMP_ERR)
+
+
+def job_config_to_string(xml_str):
+ return ET.tostring(ET.fromstring(xml_str)).decode('ascii')
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ config=dict(type='str', required=False),
+ name=dict(type='str', required=True),
+ password=dict(type='str', required=False, no_log=True),
+ state=dict(type='str', required=False, choices=['present', 'absent'], default="present"),
+ enabled=dict(required=False, type='bool'),
+ token=dict(type='str', required=False, no_log=True),
+ url=dict(type='str', required=False, default="http://localhost:8080"),
+ user=dict(type='str', required=False)
+ ),
+ mutually_exclusive=[
+ ['password', 'token'],
+ ['config', 'enabled'],
+ ],
+ supports_check_mode=True,
+ )
+
+ test_dependencies(module)
+ jenkins_job = JenkinsJob(module)
+
+ if module.params.get('state') == "present":
+ jenkins_job.present_job()
+ else:
+ jenkins_job.absent_job()
+
+ result = jenkins_job.get_result()
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/jenkins_job_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/jenkins_job_facts.py
new file mode 100644
index 00000000..f0d13262
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/jenkins_job_facts.py
@@ -0,0 +1,258 @@
+#!/usr/bin/python
+#
+# Copyright: (c) Ansible Project
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: jenkins_job_info
+short_description: Get information about Jenkins jobs
+description:
+ - This module can be used to query information about which Jenkins jobs which already exists.
+ - This module was called C(jenkins_job_info) before Ansible 2.9. The usage did not change.
+requirements:
+ - "python-jenkins >= 0.4.12"
+options:
+ name:
+ type: str
+ description:
+ - Exact name of the Jenkins job to fetch information about.
+ glob:
+ type: str
+ description:
+ - A shell glob of Jenkins job names to fetch information about.
+ color:
+ type: str
+ description:
+ - Only fetch jobs with the given status color.
+ password:
+ type: str
+ description:
+ - Password to authenticate with the Jenkins server.
+ - This is a required parameter, if C(token) is not provided.
+ token:
+ type: str
+ description:
+ - API token used to authenticate with the Jenkins server.
+ - This is a required parameter, if C(password) is not provided.
+ url:
+ type: str
+ description:
+ - URL where the Jenkins server is accessible.
+ default: http://localhost:8080
+ user:
+ type: str
+ description:
+ - User to authenticate with the Jenkins server.
+ validate_certs:
+ description:
+ - If set to C(False), the SSL certificates will not be validated.
+ - This should only set to C(False) used on personally controlled sites using self-signed certificates.
+ default: true
+ type: bool
+author:
+ - "Chris St. Pierre (@stpierre)"
+'''
+
+EXAMPLES = '''
+# Get all Jenkins jobs using basic auth
+- community.general.jenkins_job_info:
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+# Get all Jenkins jobs using the token
+- community.general.jenkins_job_info:
+ user: admin
+ token: abcdefghijklmnop
+ register: my_jenkins_job_info
+
+# Get info about a single job using basic auth
+- community.general.jenkins_job_info:
+ name: some-job-name
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+# Get info about a single job in a folder using basic auth
+- community.general.jenkins_job_info:
+ name: some-folder-name/some-job-name
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+# Get info about jobs matching a shell glob using basic auth
+- community.general.jenkins_job_info:
+ glob: some-job-*
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+# Get info about all failing jobs using basic auth
+- community.general.jenkins_job_info:
+ color: red
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+# Get info about passing jobs matching a shell glob using basic auth
+- community.general.jenkins_job_info:
+ name: some-job-*
+ color: blue
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+- name: Get the info from custom URL with token and validate_certs=False
+ community.general.jenkins_job_info:
+ user: admin
+ token: 126df5c60d66c66e3b75b11104a16a8a
+ url: https://jenkins.example.com
+ validate_certs: False
+ register: my_jenkins_job_info
+'''
+
+RETURN = '''
+---
+jobs:
+ description: All jobs found matching the specified criteria
+ returned: success
+ type: list
+ sample:
+ [
+ {
+ "name": "test-job",
+ "fullname": "test-folder/test-job",
+ "url": "http://localhost:8080/job/test-job/",
+ "color": "blue"
+ },
+ ]
+'''
+
+import ssl
+import fnmatch
+import traceback
+
+JENKINS_IMP_ERR = None
+try:
+ import jenkins
+ HAS_JENKINS = True
+except ImportError:
+ JENKINS_IMP_ERR = traceback.format_exc()
+ HAS_JENKINS = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def get_jenkins_connection(module):
+ url = module.params["url"]
+ username = module.params.get("user")
+ password = module.params.get("password")
+ token = module.params.get("token")
+
+ validate_certs = module.params.get('validate_certs')
+ if not validate_certs and hasattr(ssl, 'SSLContext'):
+ ssl._create_default_https_context = ssl._create_unverified_context
+ if validate_certs and not hasattr(ssl, 'SSLContext'):
+ module.fail_json(msg="Module does not support changing verification mode with python < 2.7.9."
+ " Either update Python or use validate_certs=false.")
+
+ if username and (password or token):
+ return jenkins.Jenkins(url, username, password or token)
+ elif username:
+ return jenkins.Jenkins(url, username)
+ else:
+ return jenkins.Jenkins(url)
+
+
+def test_dependencies(module):
+ if not HAS_JENKINS:
+ module.fail_json(
+ msg=missing_required_lib("python-jenkins",
+ url="https://python-jenkins.readthedocs.io/en/latest/install.html"),
+ exception=JENKINS_IMP_ERR)
+
+
+def get_jobs(module):
+ jenkins_conn = get_jenkins_connection(module)
+ jobs = []
+ if module.params.get("name"):
+ try:
+ job_info = jenkins_conn.get_job_info(module.params.get("name"))
+ except jenkins.NotFoundException:
+ pass
+ else:
+ jobs.append({
+ "name": job_info["name"],
+ "fullname": job_info["fullName"],
+ "url": job_info["url"],
+ "color": job_info["color"]
+ })
+
+ else:
+ all_jobs = jenkins_conn.get_all_jobs()
+ if module.params.get("glob"):
+ jobs.extend(
+ j for j in all_jobs
+ if fnmatch.fnmatch(j["fullname"], module.params.get("glob")))
+ else:
+ jobs = all_jobs
+ # python-jenkins includes the internal Jenkins class used for each job
+ # in its return value; we strip that out because the leading underscore
+ # (and the fact that it's not documented in the python-jenkins docs)
+ # indicates that it's not part of the dependable public interface.
+ for job in jobs:
+ if "_class" in job:
+ del job["_class"]
+
+ if module.params.get("color"):
+ jobs = [j for j in jobs if j["color"] == module.params.get("color")]
+
+ return jobs
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str'),
+ glob=dict(type='str'),
+ color=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ token=dict(type='str', no_log=True),
+ url=dict(type='str', default="http://localhost:8080"),
+ user=dict(type='str'),
+ validate_certs=dict(type='bool', default=True),
+ ),
+ mutually_exclusive=[
+ ['password', 'token'],
+ ['name', 'glob'],
+ ],
+ required_one_of=[
+ ['password', 'token'],
+ ],
+ supports_check_mode=True,
+ )
+ if module._name in ('jenkins_job_facts', 'community.general.jenkins_job_facts'):
+ module.deprecate("The 'jenkins_job_facts' module has been renamed to 'jenkins_job_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ test_dependencies(module)
+ jobs = list()
+
+ try:
+ jobs = get_jobs(module)
+ except jenkins.JenkinsException as err:
+ module.fail_json(
+ msg='Unable to connect to Jenkins server, %s' % to_native(err),
+ exception=traceback.format_exc())
+
+ module.exit_json(changed=False, jobs=jobs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/jenkins_job_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/jenkins_job_info.py
new file mode 100644
index 00000000..f0d13262
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/jenkins_job_info.py
@@ -0,0 +1,258 @@
+#!/usr/bin/python
+#
+# Copyright: (c) Ansible Project
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: jenkins_job_info
+short_description: Get information about Jenkins jobs
+description:
+ - This module can be used to query information about which Jenkins jobs which already exists.
+ - This module was called C(jenkins_job_info) before Ansible 2.9. The usage did not change.
+requirements:
+ - "python-jenkins >= 0.4.12"
+options:
+ name:
+ type: str
+ description:
+ - Exact name of the Jenkins job to fetch information about.
+ glob:
+ type: str
+ description:
+ - A shell glob of Jenkins job names to fetch information about.
+ color:
+ type: str
+ description:
+ - Only fetch jobs with the given status color.
+ password:
+ type: str
+ description:
+ - Password to authenticate with the Jenkins server.
+ - This is a required parameter, if C(token) is not provided.
+ token:
+ type: str
+ description:
+ - API token used to authenticate with the Jenkins server.
+ - This is a required parameter, if C(password) is not provided.
+ url:
+ type: str
+ description:
+ - URL where the Jenkins server is accessible.
+ default: http://localhost:8080
+ user:
+ type: str
+ description:
+ - User to authenticate with the Jenkins server.
+ validate_certs:
+ description:
+ - If set to C(False), the SSL certificates will not be validated.
+ - This should only set to C(False) used on personally controlled sites using self-signed certificates.
+ default: true
+ type: bool
+author:
+ - "Chris St. Pierre (@stpierre)"
+'''
+
+EXAMPLES = '''
+# Get all Jenkins jobs using basic auth
+- community.general.jenkins_job_info:
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+# Get all Jenkins jobs using the token
+- community.general.jenkins_job_info:
+ user: admin
+ token: abcdefghijklmnop
+ register: my_jenkins_job_info
+
+# Get info about a single job using basic auth
+- community.general.jenkins_job_info:
+ name: some-job-name
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+# Get info about a single job in a folder using basic auth
+- community.general.jenkins_job_info:
+ name: some-folder-name/some-job-name
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+# Get info about jobs matching a shell glob using basic auth
+- community.general.jenkins_job_info:
+ glob: some-job-*
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+# Get info about all failing jobs using basic auth
+- community.general.jenkins_job_info:
+ color: red
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+# Get info about passing jobs matching a shell glob using basic auth
+- community.general.jenkins_job_info:
+ name: some-job-*
+ color: blue
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+- name: Get the info from custom URL with token and validate_certs=False
+ community.general.jenkins_job_info:
+ user: admin
+ token: 126df5c60d66c66e3b75b11104a16a8a
+ url: https://jenkins.example.com
+ validate_certs: False
+ register: my_jenkins_job_info
+'''
+
+RETURN = '''
+---
+jobs:
+ description: All jobs found matching the specified criteria
+ returned: success
+ type: list
+ sample:
+ [
+ {
+ "name": "test-job",
+ "fullname": "test-folder/test-job",
+ "url": "http://localhost:8080/job/test-job/",
+ "color": "blue"
+ },
+ ]
+'''
+
+import ssl
+import fnmatch
+import traceback
+
+JENKINS_IMP_ERR = None
+try:
+ import jenkins
+ HAS_JENKINS = True
+except ImportError:
+ JENKINS_IMP_ERR = traceback.format_exc()
+ HAS_JENKINS = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def get_jenkins_connection(module):
+ url = module.params["url"]
+ username = module.params.get("user")
+ password = module.params.get("password")
+ token = module.params.get("token")
+
+ validate_certs = module.params.get('validate_certs')
+ if not validate_certs and hasattr(ssl, 'SSLContext'):
+ ssl._create_default_https_context = ssl._create_unverified_context
+ if validate_certs and not hasattr(ssl, 'SSLContext'):
+ module.fail_json(msg="Module does not support changing verification mode with python < 2.7.9."
+ " Either update Python or use validate_certs=false.")
+
+ if username and (password or token):
+ return jenkins.Jenkins(url, username, password or token)
+ elif username:
+ return jenkins.Jenkins(url, username)
+ else:
+ return jenkins.Jenkins(url)
+
+
+def test_dependencies(module):
+ if not HAS_JENKINS:
+ module.fail_json(
+ msg=missing_required_lib("python-jenkins",
+ url="https://python-jenkins.readthedocs.io/en/latest/install.html"),
+ exception=JENKINS_IMP_ERR)
+
+
+def get_jobs(module):
+ jenkins_conn = get_jenkins_connection(module)
+ jobs = []
+ if module.params.get("name"):
+ try:
+ job_info = jenkins_conn.get_job_info(module.params.get("name"))
+ except jenkins.NotFoundException:
+ pass
+ else:
+ jobs.append({
+ "name": job_info["name"],
+ "fullname": job_info["fullName"],
+ "url": job_info["url"],
+ "color": job_info["color"]
+ })
+
+ else:
+ all_jobs = jenkins_conn.get_all_jobs()
+ if module.params.get("glob"):
+ jobs.extend(
+ j for j in all_jobs
+ if fnmatch.fnmatch(j["fullname"], module.params.get("glob")))
+ else:
+ jobs = all_jobs
+ # python-jenkins includes the internal Jenkins class used for each job
+ # in its return value; we strip that out because the leading underscore
+ # (and the fact that it's not documented in the python-jenkins docs)
+ # indicates that it's not part of the dependable public interface.
+ for job in jobs:
+ if "_class" in job:
+ del job["_class"]
+
+ if module.params.get("color"):
+ jobs = [j for j in jobs if j["color"] == module.params.get("color")]
+
+ return jobs
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str'),
+ glob=dict(type='str'),
+ color=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ token=dict(type='str', no_log=True),
+ url=dict(type='str', default="http://localhost:8080"),
+ user=dict(type='str'),
+ validate_certs=dict(type='bool', default=True),
+ ),
+ mutually_exclusive=[
+ ['password', 'token'],
+ ['name', 'glob'],
+ ],
+ required_one_of=[
+ ['password', 'token'],
+ ],
+ supports_check_mode=True,
+ )
+ if module._name in ('jenkins_job_facts', 'community.general.jenkins_job_facts'):
+ module.deprecate("The 'jenkins_job_facts' module has been renamed to 'jenkins_job_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ test_dependencies(module)
+ jobs = list()
+
+ try:
+ jobs = get_jobs(module)
+ except jenkins.JenkinsException as err:
+ module.fail_json(
+ msg='Unable to connect to Jenkins server, %s' % to_native(err),
+ exception=traceback.format_exc())
+
+ module.exit_json(changed=False, jobs=jobs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/jenkins_plugin.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/jenkins_plugin.py
new file mode 100644
index 00000000..e2adf7a6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/jenkins_plugin.py
@@ -0,0 +1,780 @@
+#!/usr/bin/python
+# encoding: utf-8
+
+# (c) 2016, Jiri Tyr <jiri.tyr@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: jenkins_plugin
+author: Jiri Tyr (@jtyr)
+short_description: Add or remove Jenkins plugin
+description:
+ - Ansible module which helps to manage Jenkins plugins.
+
+options:
+ group:
+ type: str
+ description:
+ - Name of the Jenkins group on the OS.
+ default: jenkins
+ jenkins_home:
+ type: path
+ description:
+ - Home directory of the Jenkins user.
+ default: /var/lib/jenkins
+ mode:
+ type: raw
+ description:
+ - File mode applied on versioned plugins.
+ default: '0644'
+ name:
+ type: str
+ description:
+ - Plugin name.
+ required: yes
+ owner:
+ type: str
+ description:
+ - Name of the Jenkins user on the OS.
+ default: jenkins
+ state:
+ type: str
+ description:
+ - Desired plugin state.
+ - If the C(latest) is set, the check for new version will be performed
+ every time. This is suitable to keep the plugin up-to-date.
+ choices: [absent, present, pinned, unpinned, enabled, disabled, latest]
+ default: present
+ timeout:
+ type: int
+ description:
+ - Server connection timeout in secs.
+ default: 30
+ updates_expiration:
+ type: int
+ description:
+ - Number of seconds after which a new copy of the I(update-center.json)
+ file is downloaded. This is used to avoid the need to download the
+ plugin to calculate its checksum when C(latest) is specified.
+ - Set it to C(0) if no cache file should be used. In that case, the
+ plugin file will always be downloaded to calculate its checksum when
+ C(latest) is specified.
+ default: 86400
+ updates_url:
+ type: str
+ description:
+ - URL of the Update Centre.
+ - Used as the base URL to download the plugins and the
+ I(update-center.json) JSON file.
+ default: https://updates.jenkins.io
+ url:
+ type: str
+ description:
+ - URL of the Jenkins server.
+ default: http://localhost:8080
+ version:
+ type: str
+ description:
+ - Plugin version number.
+ - If this option is specified, all plugin dependencies must be installed
+ manually.
+ - It might take longer to verify that the correct version is installed.
+ This is especially true if a specific version number is specified.
+ - Quote the version to prevent the value to be interpreted as float. For
+ example if C(1.20) would be unquoted, it would become C(1.2).
+ with_dependencies:
+ description:
+ - Defines whether to install plugin dependencies.
+ - This option takes effect only if the I(version) is not defined.
+ type: bool
+ default: yes
+
+notes:
+ - Plugin installation should be run under root or the same user which owns
+ the plugin files on the disk. Only if the plugin is not installed yet and
+ no version is specified, the API installation is performed which requires
+ only the Web UI credentials.
+ - It's necessary to notify the handler or call the I(service) module to
+ restart the Jenkins service after a new plugin was installed.
+ - Pinning works only if the plugin is installed and Jenkins service was
+ successfully restarted after the plugin installation.
+ - It is not possible to run the module remotely by changing the I(url)
+ parameter to point to the Jenkins server. The module must be used on the
+ host where Jenkins runs as it needs direct access to the plugin files.
+extends_documentation_fragment:
+ - url
+ - files
+'''
+
+EXAMPLES = '''
+- name: Install plugin
+ community.general.jenkins_plugin:
+ name: build-pipeline-plugin
+
+- name: Install plugin without its dependencies
+ community.general.jenkins_plugin:
+ name: build-pipeline-plugin
+ with_dependencies: no
+
+- name: Make sure the plugin is always up-to-date
+ community.general.jenkins_plugin:
+ name: token-macro
+ state: latest
+
+- name: Install specific version of the plugin
+ community.general.jenkins_plugin:
+ name: token-macro
+ version: "1.15"
+
+- name: Pin the plugin
+ community.general.jenkins_plugin:
+ name: token-macro
+ state: pinned
+
+- name: Unpin the plugin
+ community.general.jenkins_plugin:
+ name: token-macro
+ state: unpinned
+
+- name: Enable the plugin
+ community.general.jenkins_plugin:
+ name: token-macro
+ state: enabled
+
+- name: Disable the plugin
+ community.general.jenkins_plugin:
+ name: token-macro
+ state: disabled
+
+- name: Uninstall plugin
+ community.general.jenkins_plugin:
+ name: build-pipeline-plugin
+ state: absent
+
+#
+# Example of how to authenticate
+#
+- name: Install plugin
+ community.general.jenkins_plugin:
+ name: build-pipeline-plugin
+ url_username: admin
+ url_password: p4ssw0rd
+ url: http://localhost:8888
+
+#
+# Example of a Play which handles Jenkins restarts during the state changes
+#
+- name: Jenkins Master play
+ hosts: jenkins-master
+ vars:
+ my_jenkins_plugins:
+ token-macro:
+ enabled: yes
+ build-pipeline-plugin:
+ version: "1.4.9"
+ pinned: no
+ enabled: yes
+ tasks:
+ - name: Install plugins without a specific version
+ community.general.jenkins_plugin:
+ name: "{{ item.key }}"
+ register: my_jenkins_plugin_unversioned
+ when: >
+ 'version' not in item.value
+ with_dict: "{{ my_jenkins_plugins }}"
+
+ - name: Install plugins with a specific version
+ community.general.jenkins_plugin:
+ name: "{{ item.key }}"
+ version: "{{ item.value['version'] }}"
+ register: my_jenkins_plugin_versioned
+ when: >
+ 'version' in item.value
+ with_dict: "{{ my_jenkins_plugins }}"
+
+ - name: Initiate the fact
+ ansible.builtin.set_fact:
+ jenkins_restart_required: no
+
+ - name: Check if restart is required by any of the versioned plugins
+ ansible.builtin.set_fact:
+ jenkins_restart_required: yes
+ when: item.changed
+ with_items: "{{ my_jenkins_plugin_versioned.results }}"
+
+ - name: Check if restart is required by any of the unversioned plugins
+ ansible.builtin.set_fact:
+ jenkins_restart_required: yes
+ when: item.changed
+ with_items: "{{ my_jenkins_plugin_unversioned.results }}"
+
+ - name: Restart Jenkins if required
+ ansible.builtin.service:
+ name: jenkins
+ state: restarted
+ when: jenkins_restart_required
+
+ - name: Wait for Jenkins to start up
+ ansible.builtin.uri:
+ url: http://localhost:8080
+ status_code: 200
+ timeout: 5
+ register: jenkins_service_status
+ # Keep trying for 5 mins in 5 sec intervals
+ retries: 60
+ delay: 5
+ until: >
+ 'status' in jenkins_service_status and
+ jenkins_service_status['status'] == 200
+ when: jenkins_restart_required
+
+ - name: Reset the fact
+ ansible.builtin.set_fact:
+ jenkins_restart_required: no
+ when: jenkins_restart_required
+
+ - name: Plugin pinning
+ community.general.jenkins_plugin:
+ name: "{{ item.key }}"
+ state: "{{ 'pinned' if item.value['pinned'] else 'unpinned'}}"
+ when: >
+ 'pinned' in item.value
+ with_dict: "{{ my_jenkins_plugins }}"
+
+ - name: Plugin enabling
+ community.general.jenkins_plugin:
+ name: "{{ item.key }}"
+ state: "{{ 'enabled' if item.value['enabled'] else 'disabled'}}"
+ when: >
+ 'enabled' in item.value
+ with_dict: "{{ my_jenkins_plugins }}"
+'''
+
+RETURN = '''
+plugin:
+ description: plugin name
+ returned: success
+ type: str
+ sample: build-pipeline-plugin
+state:
+ description: state of the target, after execution
+ returned: success
+ type: str
+ sample: "present"
+'''
+
+from ansible.module_utils.basic import AnsibleModule, to_bytes
+from ansible.module_utils.six.moves import http_cookiejar as cookiejar
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url, url_argument_spec
+from ansible.module_utils.six import text_type, binary_type
+from ansible.module_utils._text import to_native
+import base64
+import hashlib
+import json
+import os
+import tempfile
+import time
+
+
+class JenkinsPlugin(object):
+ def __init__(self, module):
+ # To be able to call fail_json
+ self.module = module
+
+ # Shortcuts for the params
+ self.params = self.module.params
+ self.url = self.params['url']
+ self.timeout = self.params['timeout']
+
+ # Crumb
+ self.crumb = {}
+ # Cookie jar for crumb session
+ self.cookies = None
+
+ if self._csrf_enabled():
+ self.cookies = cookiejar.LWPCookieJar()
+ self.crumb = self._get_crumb()
+
+ # Get list of installed plugins
+ self._get_installed_plugins()
+
+ def _csrf_enabled(self):
+ csrf_data = self._get_json_data(
+ "%s/%s" % (self.url, "api/json"), 'CSRF')
+
+ if 'useCrumbs' not in csrf_data:
+ self.module.fail_json(
+ msg="Required fields not found in the Crumbs response.",
+ details=csrf_data)
+
+ return csrf_data['useCrumbs']
+
+ def _get_json_data(self, url, what, **kwargs):
+ # Get the JSON data
+ r = self._get_url_data(url, what, **kwargs)
+
+ # Parse the JSON data
+ try:
+ json_data = json.loads(to_native(r.read()))
+ except Exception as e:
+ self.module.fail_json(
+ msg="Cannot parse %s JSON data." % what,
+ details=to_native(e))
+
+ return json_data
+
+ def _get_url_data(
+ self, url, what=None, msg_status=None, msg_exception=None,
+ **kwargs):
+ # Compose default messages
+ if msg_status is None:
+ msg_status = "Cannot get %s" % what
+
+ if msg_exception is None:
+ msg_exception = "Retrieval of %s failed." % what
+
+ # Get the URL data
+ try:
+ response, info = fetch_url(
+ self.module, url, timeout=self.timeout, cookies=self.cookies,
+ headers=self.crumb, **kwargs)
+
+ if info['status'] != 200:
+ self.module.fail_json(msg=msg_status, details=info['msg'])
+ except Exception as e:
+ self.module.fail_json(msg=msg_exception, details=to_native(e))
+
+ return response
+
+ def _get_crumb(self):
+ crumb_data = self._get_json_data(
+ "%s/%s" % (self.url, "crumbIssuer/api/json"), 'Crumb')
+
+ if 'crumbRequestField' in crumb_data and 'crumb' in crumb_data:
+ ret = {
+ crumb_data['crumbRequestField']: crumb_data['crumb']
+ }
+ else:
+ self.module.fail_json(
+ msg="Required fields not found in the Crum response.",
+ details=crumb_data)
+
+ return ret
+
+ def _get_installed_plugins(self):
+ plugins_data = self._get_json_data(
+ "%s/%s" % (self.url, "pluginManager/api/json?depth=1"),
+ 'list of plugins')
+
+ # Check if we got valid data
+ if 'plugins' not in plugins_data:
+ self.module.fail_json(msg="No valid plugin data found.")
+
+ # Create final list of installed/pined plugins
+ self.is_installed = False
+ self.is_pinned = False
+ self.is_enabled = False
+
+ for p in plugins_data['plugins']:
+ if p['shortName'] == self.params['name']:
+ self.is_installed = True
+
+ if p['pinned']:
+ self.is_pinned = True
+
+ if p['enabled']:
+ self.is_enabled = True
+
+ break
+
+ def install(self):
+ changed = False
+ plugin_file = (
+ '%s/plugins/%s.jpi' % (
+ self.params['jenkins_home'],
+ self.params['name']))
+
+ if not self.is_installed and self.params['version'] in [None, 'latest']:
+ if not self.module.check_mode:
+ # Install the plugin (with dependencies)
+ install_script = (
+ 'd = Jenkins.instance.updateCenter.getPlugin("%s")'
+ '.deploy(); d.get();' % self.params['name'])
+
+ if self.params['with_dependencies']:
+ install_script = (
+ 'Jenkins.instance.updateCenter.getPlugin("%s")'
+ '.getNeededDependencies().each{it.deploy()}; %s' % (
+ self.params['name'], install_script))
+
+ script_data = {
+ 'script': install_script
+ }
+ data = urlencode(script_data)
+
+ # Send the installation request
+ r = self._get_url_data(
+ "%s/scriptText" % self.url,
+ msg_status="Cannot install plugin.",
+ msg_exception="Plugin installation has failed.",
+ data=data)
+
+ hpi_file = '%s/plugins/%s.hpi' % (
+ self.params['jenkins_home'],
+ self.params['name'])
+
+ if os.path.isfile(hpi_file):
+ os.remove(hpi_file)
+
+ changed = True
+ else:
+ # Check if the plugin directory exists
+ if not os.path.isdir(self.params['jenkins_home']):
+ self.module.fail_json(
+ msg="Jenkins home directory doesn't exist.")
+
+ checksum_old = None
+ if os.path.isfile(plugin_file):
+ # Make the checksum of the currently installed plugin
+ with open(plugin_file, 'rb') as plugin_fh:
+ plugin_content = plugin_fh.read()
+ checksum_old = hashlib.sha1(plugin_content).hexdigest()
+
+ if self.params['version'] in [None, 'latest']:
+ # Take latest version
+ plugin_url = (
+ "%s/latest/%s.hpi" % (
+ self.params['updates_url'],
+ self.params['name']))
+ else:
+ # Take specific version
+ plugin_url = (
+ "{0}/download/plugins/"
+ "{1}/{2}/{1}.hpi".format(
+ self.params['updates_url'],
+ self.params['name'],
+ self.params['version']))
+
+ if (
+ self.params['updates_expiration'] == 0 or
+ self.params['version'] not in [None, 'latest'] or
+ checksum_old is None):
+
+ # Download the plugin file directly
+ r = self._download_plugin(plugin_url)
+
+ # Write downloaded plugin into file if checksums don't match
+ if checksum_old is None:
+ # No previously installed plugin
+ if not self.module.check_mode:
+ self._write_file(plugin_file, r)
+
+ changed = True
+ else:
+ # Get data for the MD5
+ data = r.read()
+
+ # Make new checksum
+ checksum_new = hashlib.sha1(data).hexdigest()
+
+ # If the checksum is different from the currently installed
+ # plugin, store the new plugin
+ if checksum_old != checksum_new:
+ if not self.module.check_mode:
+ self._write_file(plugin_file, data)
+
+ changed = True
+ elif self.params['version'] == 'latest':
+ # Check for update from the updates JSON file
+ plugin_data = self._download_updates()
+
+ # If the latest version changed, download it
+ if checksum_old != to_bytes(plugin_data['sha1']):
+ if not self.module.check_mode:
+ r = self._download_plugin(plugin_url)
+ self._write_file(plugin_file, r)
+
+ changed = True
+
+ # Change file attributes if needed
+ if os.path.isfile(plugin_file):
+ params = {
+ 'dest': plugin_file
+ }
+ params.update(self.params)
+ file_args = self.module.load_file_common_arguments(params)
+
+ if not self.module.check_mode:
+ # Not sure how to run this in the check mode
+ changed = self.module.set_fs_attributes_if_different(
+ file_args, changed)
+ else:
+ # See the comment above
+ changed = True
+
+ return changed
+
+ def _download_updates(self):
+ updates_filename = 'jenkins-plugin-cache.json'
+ updates_dir = os.path.expanduser('~/.ansible/tmp')
+ updates_file = "%s/%s" % (updates_dir, updates_filename)
+ download_updates = True
+
+ # Check if we need to download new updates file
+ if os.path.isfile(updates_file):
+ # Get timestamp when the file was changed last time
+ ts_file = os.stat(updates_file).st_mtime
+ ts_now = time.time()
+
+ if ts_now - ts_file < self.params['updates_expiration']:
+ download_updates = False
+
+ updates_file_orig = updates_file
+
+ # Download the updates file if needed
+ if download_updates:
+ url = "%s/update-center.json" % self.params['updates_url']
+
+ # Get the data
+ r = self._get_url_data(
+ url,
+ msg_status="Remote updates not found.",
+ msg_exception="Updates download failed.")
+
+ # Write the updates file
+ update_fd, updates_file = tempfile.mkstemp()
+ os.write(update_fd, r.read())
+
+ try:
+ os.close(update_fd)
+ except IOError as e:
+ self.module.fail_json(
+ msg="Cannot close the tmp updates file %s." % updates_file,
+ details=to_native(e))
+
+ # Open the updates file
+ try:
+ f = open(updates_file, encoding='utf-8')
+ except IOError as e:
+ self.module.fail_json(
+ msg="Cannot open temporal updates file.",
+ details=to_native(e))
+
+ i = 0
+ for line in f:
+ # Read only the second line
+ if i == 1:
+ try:
+ data = json.loads(line)
+ except Exception as e:
+ self.module.fail_json(
+ msg="Cannot load JSON data from the tmp updates file.",
+ details=to_native(e))
+
+ break
+
+ i += 1
+
+ # Move the updates file to the right place if we could read it
+ if download_updates:
+ # Make sure the destination directory exists
+ if not os.path.isdir(updates_dir):
+ try:
+ os.makedirs(updates_dir, int('0700', 8))
+ except OSError as e:
+ self.module.fail_json(
+ msg="Cannot create temporal directory.",
+ details=to_native(e))
+
+ self.module.atomic_move(updates_file, updates_file_orig)
+
+ # Check if we have the plugin data available
+ if 'plugins' not in data or self.params['name'] not in data['plugins']:
+ self.module.fail_json(
+ msg="Cannot find plugin data in the updates file.")
+
+ return data['plugins'][self.params['name']]
+
+ def _download_plugin(self, plugin_url):
+ # Download the plugin
+ r = self._get_url_data(
+ plugin_url,
+ msg_status="Plugin not found.",
+ msg_exception="Plugin download failed.")
+
+ return r
+
+ def _write_file(self, f, data):
+ # Store the plugin into a temp file and then move it
+ tmp_f_fd, tmp_f = tempfile.mkstemp()
+
+ if isinstance(data, (text_type, binary_type)):
+ os.write(tmp_f_fd, data)
+ else:
+ os.write(tmp_f_fd, data.read())
+
+ try:
+ os.close(tmp_f_fd)
+ except IOError as e:
+ self.module.fail_json(
+ msg='Cannot close the temporal plugin file %s.' % tmp_f,
+ details=to_native(e))
+
+ # Move the file onto the right place
+ self.module.atomic_move(tmp_f, f)
+
+ def uninstall(self):
+ changed = False
+
+ # Perform the action
+ if self.is_installed:
+ if not self.module.check_mode:
+ self._pm_query('doUninstall', 'Uninstallation')
+
+ changed = True
+
+ return changed
+
+ def pin(self):
+ return self._pinning('pin')
+
+ def unpin(self):
+ return self._pinning('unpin')
+
+ def _pinning(self, action):
+ changed = False
+
+ # Check if the plugin is pinned/unpinned
+ if (
+ action == 'pin' and not self.is_pinned or
+ action == 'unpin' and self.is_pinned):
+
+ # Perform the action
+ if not self.module.check_mode:
+ self._pm_query(action, "%sning" % action.capitalize())
+
+ changed = True
+
+ return changed
+
+ def enable(self):
+ return self._enabling('enable')
+
+ def disable(self):
+ return self._enabling('disable')
+
+ def _enabling(self, action):
+ changed = False
+
+ # Check if the plugin is pinned/unpinned
+ if (
+ action == 'enable' and not self.is_enabled or
+ action == 'disable' and self.is_enabled):
+
+ # Perform the action
+ if not self.module.check_mode:
+ self._pm_query(
+ "make%sd" % action.capitalize(),
+ "%sing" % action[:-1].capitalize())
+
+ changed = True
+
+ return changed
+
+ def _pm_query(self, action, msg):
+ url = "%s/pluginManager/plugin/%s/%s" % (
+ self.params['url'], self.params['name'], action)
+
+ # Send the request
+ self._get_url_data(
+ url,
+ msg_status="Plugin not found. %s" % url,
+ msg_exception="%s has failed." % msg)
+
+
+def main():
+ # Module arguments
+ argument_spec = url_argument_spec()
+ argument_spec.update(
+ group=dict(type='str', default='jenkins'),
+ jenkins_home=dict(type='path', default='/var/lib/jenkins'),
+ mode=dict(default='0644', type='raw'),
+ name=dict(type='str', required=True),
+ owner=dict(type='str', default='jenkins'),
+ state=dict(
+ choices=[
+ 'present',
+ 'absent',
+ 'pinned',
+ 'unpinned',
+ 'enabled',
+ 'disabled',
+ 'latest'],
+ default='present'),
+ timeout=dict(default=30, type="int"),
+ updates_expiration=dict(default=86400, type="int"),
+ updates_url=dict(default='https://updates.jenkins.io'),
+ url=dict(default='http://localhost:8080'),
+ url_password=dict(no_log=True),
+ version=dict(),
+ with_dependencies=dict(default=True, type='bool'),
+ )
+ # Module settings
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ add_file_common_args=True,
+ supports_check_mode=True,
+ )
+
+ # Force basic authentication
+ module.params['force_basic_auth'] = True
+
+ # Convert timeout to float
+ try:
+ module.params['timeout'] = float(module.params['timeout'])
+ except ValueError as e:
+ module.fail_json(
+ msg='Cannot convert %s to float.' % module.params['timeout'],
+ details=to_native(e))
+
+ # Set version to latest if state is latest
+ if module.params['state'] == 'latest':
+ module.params['state'] = 'present'
+ module.params['version'] = 'latest'
+
+ # Create some shortcuts
+ name = module.params['name']
+ state = module.params['state']
+
+ # Initial change state of the task
+ changed = False
+
+ # Instantiate the JenkinsPlugin object
+ jp = JenkinsPlugin(module)
+
+ # Perform action depending on the requested state
+ if state == 'present':
+ changed = jp.install()
+ elif state == 'absent':
+ changed = jp.uninstall()
+ elif state == 'pinned':
+ changed = jp.pin()
+ elif state == 'unpinned':
+ changed = jp.unpin()
+ elif state == 'enabled':
+ changed = jp.enable()
+ elif state == 'disabled':
+ changed = jp.disable()
+
+ # Print status of the change
+ module.exit_json(changed=changed, plugin=name, state=state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/jenkins_script.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/jenkins_script.py
new file mode 100644
index 00000000..68f06c27
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/jenkins_script.py
@@ -0,0 +1,197 @@
+#!/usr/bin/python
+
+# encoding: utf-8
+
+# (c) 2016, James Hogarth <james.hogarth@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+author: James Hogarth (@hogarthj)
+module: jenkins_script
+short_description: Executes a groovy script in the jenkins instance
+description:
+ - The C(jenkins_script) module takes a script plus a dict of values
+ to use within the script and returns the result of the script being run.
+
+options:
+ script:
+ type: str
+ description:
+ - The groovy script to be executed.
+ This gets passed as a string Template if args is defined.
+ required: true
+ url:
+ type: str
+ description:
+ - The jenkins server to execute the script against. The default is a local
+ jenkins instance that is not being proxied through a webserver.
+ default: http://localhost:8080
+ validate_certs:
+ description:
+ - If set to C(no), the SSL certificates will not be validated.
+ This should only set to C(no) used on personally controlled sites
+ using self-signed certificates as it avoids verifying the source site.
+ type: bool
+ default: 'yes'
+ user:
+ type: str
+ description:
+ - The username to connect to the jenkins server with.
+ password:
+ type: str
+ description:
+ - The password to connect to the jenkins server with.
+ timeout:
+ type: int
+ description:
+ - The request timeout in seconds
+ default: 10
+ args:
+ type: dict
+ description:
+ - A dict of key-value pairs used in formatting the script using string.Template (see https://docs.python.org/2/library/string.html#template-strings).
+
+notes:
+ - Since the script can do anything this does not report on changes.
+ Knowing the script is being run it's important to set changed_when
+ for the ansible output to be clear on any alterations made.
+
+'''
+
+EXAMPLES = '''
+- name: Obtaining a list of plugins
+ community.general.jenkins_script:
+ script: 'println(Jenkins.instance.pluginManager.plugins)'
+ user: admin
+ password: admin
+
+- name: Setting master using a variable to hold a more complicate script
+ ansible.builtin.set_fact:
+ setmaster_mode: |
+ import jenkins.model.*
+ instance = Jenkins.getInstance()
+ instance.setMode(${jenkins_mode})
+ instance.save()
+
+- name: Use the variable as the script
+ community.general.jenkins_script:
+ script: "{{ setmaster_mode }}"
+ args:
+ jenkins_mode: Node.Mode.EXCLUSIVE
+
+- name: Interacting with an untrusted HTTPS connection
+ community.general.jenkins_script:
+ script: "println(Jenkins.instance.pluginManager.plugins)"
+ user: admin
+ password: admin
+ url: https://localhost
+ validate_certs: no
+'''
+
+RETURN = '''
+output:
+ description: Result of script
+ returned: success
+ type: str
+ sample: 'Result: true'
+'''
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import http_cookiejar as cookiejar
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils._text import to_native
+
+
+def is_csrf_protection_enabled(module):
+ resp, info = fetch_url(module,
+ module.params['url'] + '/api/json',
+ timeout=module.params['timeout'],
+ method='GET')
+ if info["status"] != 200:
+ module.fail_json(msg="HTTP error " + str(info["status"]) + " " + info["msg"], output='')
+
+ content = to_native(resp.read())
+ return json.loads(content).get('useCrumbs', False)
+
+
+def get_crumb(module, cookies):
+ resp, info = fetch_url(module,
+ module.params['url'] + '/crumbIssuer/api/json',
+ method='GET',
+ timeout=module.params['timeout'],
+ cookies=cookies)
+ if info["status"] != 200:
+ module.fail_json(msg="HTTP error " + str(info["status"]) + " " + info["msg"], output='')
+
+ content = to_native(resp.read())
+ return json.loads(content)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ script=dict(required=True, type="str"),
+ url=dict(required=False, type="str", default="http://localhost:8080"),
+ validate_certs=dict(required=False, type="bool", default=True),
+ user=dict(required=False, type="str", default=None),
+ password=dict(required=False, no_log=True, type="str", default=None),
+ timeout=dict(required=False, type="int", default=10),
+ args=dict(required=False, type="dict", default=None)
+ )
+ )
+
+ if module.params['user'] is not None:
+ if module.params['password'] is None:
+ module.fail_json(msg="password required when user provided", output='')
+ module.params['url_username'] = module.params['user']
+ module.params['url_password'] = module.params['password']
+ module.params['force_basic_auth'] = True
+
+ if module.params['args'] is not None:
+ from string import Template
+ try:
+ script_contents = Template(module.params['script']).substitute(module.params['args'])
+ except KeyError as err:
+ module.fail_json(msg="Error with templating variable: %s" % err, output='')
+ else:
+ script_contents = module.params['script']
+
+ headers = {}
+ cookies = None
+ if is_csrf_protection_enabled(module):
+ cookies = cookiejar.LWPCookieJar()
+ crumb = get_crumb(module, cookies)
+ headers = {crumb['crumbRequestField']: crumb['crumb']}
+
+ resp, info = fetch_url(module,
+ module.params['url'] + "/scriptText",
+ data=urlencode({'script': script_contents}),
+ headers=headers,
+ method="POST",
+ timeout=module.params['timeout'],
+ cookies=cookies)
+
+ if info["status"] != 200:
+ module.fail_json(msg="HTTP error " + str(info["status"]) + " " + info["msg"], output='')
+
+ result = to_native(resp.read())
+
+ if 'Exception:' in result and 'at java.lang.Thread' in result:
+ module.fail_json(msg="script failed with stacktrace:\n " + result, output='')
+
+ module.exit_json(
+ output=result,
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/jira.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/jira.py
new file mode 100644
index 00000000..d10be9ea
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/jira.py
@@ -0,0 +1,531 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Steve Smith <ssmith@atlassian.com>
+# Atlassian open-source approval reference OSR-76.
+#
+# (c) 2020, Per Abildgaard Toft <per@minfejl.dk> Search and update function
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r"""
+module: jira
+short_description: create and modify issues in a JIRA instance
+description:
+ - Create and modify issues in a JIRA instance.
+
+options:
+ uri:
+ type: str
+ required: true
+ description:
+ - Base URI for the JIRA instance.
+
+ operation:
+ type: str
+ required: true
+ aliases: [ command ]
+ choices: [ comment, create, edit, fetch, link, search, transition, update ]
+ description:
+ - The operation to perform.
+
+ username:
+ type: str
+ required: true
+ description:
+ - The username to log-in with.
+
+ password:
+ type: str
+ required: true
+ description:
+ - The password to log-in with.
+
+ project:
+ type: str
+ required: false
+ description:
+ - The project for this operation. Required for issue creation.
+
+ summary:
+ type: str
+ required: false
+ description:
+ - The issue summary, where appropriate.
+
+ description:
+ type: str
+ required: false
+ description:
+ - The issue description, where appropriate.
+
+ issuetype:
+ type: str
+ required: false
+ description:
+ - The issue type, for issue creation.
+
+ issue:
+ type: str
+ required: false
+ description:
+ - An existing issue key to operate on.
+ aliases: ['ticket']
+
+ comment:
+ type: str
+ required: false
+ description:
+ - The comment text to add.
+
+ status:
+ type: str
+ required: false
+ description:
+ - The desired status; only relevant for the transition operation.
+
+ assignee:
+ type: str
+ required: false
+ description:
+ - Sets the assignee on create or transition operations. Note not all transitions will allow this.
+
+ linktype:
+ type: str
+ required: false
+ description:
+ - Set type of link, when action 'link' selected.
+
+ inwardissue:
+ type: str
+ required: false
+ description:
+ - Set issue from which link will be created.
+
+ outwardissue:
+ type: str
+ required: false
+ description:
+ - Set issue to which link will be created.
+
+ fields:
+ type: dict
+ required: false
+ description:
+ - This is a free-form data structure that can contain arbitrary data. This is passed directly to the JIRA REST API
+ (possibly after merging with other required data, as when passed to create). See examples for more information,
+ and the JIRA REST API for the structure required for various fields.
+
+ jql:
+ required: false
+ description:
+ - Query JIRA in JQL Syntax, e.g. 'CMDB Hostname'='test.example.com'.
+ type: str
+ version_added: '0.2.0'
+
+ maxresults:
+ required: false
+ description:
+ - Limit the result of I(operation=search). If no value is specified, the default jira limit will be used.
+ - Used when I(operation=search) only, ignored otherwise.
+ type: int
+ version_added: '0.2.0'
+
+ timeout:
+ type: float
+ required: false
+ description:
+ - Set timeout, in seconds, on requests to JIRA API.
+ default: 10
+
+ validate_certs:
+ required: false
+ description:
+ - Require valid SSL certificates (set to `false` if you'd like to use self-signed certificates)
+ default: true
+ type: bool
+
+notes:
+ - "Currently this only works with basic-auth."
+
+author:
+- "Steve Smith (@tarka)"
+- "Per Abildgaard Toft (@pertoft)"
+"""
+
+EXAMPLES = r"""
+# Create a new issue and add a comment to it:
+- name: Create an issue
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ project: ANS
+ operation: create
+ summary: Example Issue
+ description: Created using Ansible
+ issuetype: Task
+ args:
+ fields:
+ customfield_13225: "test"
+ customfield_12931: '{"value": "Test"}'
+ register: issue
+
+- name: Comment on issue
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ issue: '{{ issue.meta.key }}'
+ operation: comment
+ comment: A comment added by Ansible
+
+# Assign an existing issue using edit
+- name: Assign an issue using free-form fields
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ issue: '{{ issue.meta.key}}'
+ operation: edit
+ assignee: ssmith
+
+# Create an issue with an existing assignee
+- name: Create an assigned issue
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ project: ANS
+ operation: create
+ summary: Assigned issue
+ description: Created and assigned using Ansible
+ issuetype: Task
+ assignee: ssmith
+
+# Edit an issue
+- name: Set the labels on an issue using free-form fields
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ issue: '{{ issue.meta.key }}'
+ operation: edit
+ args:
+ fields:
+ labels:
+ - autocreated
+ - ansible
+
+# Updating a field using operations: add, set & remove
+- name: Change the value of a Select dropdown
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ issue: '{{ issue.meta.key }}'
+ operation: update
+ args:
+ fields:
+ customfield_12931: [ {'set': {'value': 'Virtual'}} ]
+ customfield_13820: [ {'set': {'value':'Manually'}} ]
+ register: cmdb_issue
+ delegate_to: localhost
+
+
+# Retrieve metadata for an issue and use it to create an account
+- name: Get an issue
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ project: ANS
+ operation: fetch
+ issue: ANS-63
+ register: issue
+
+# Search for an issue
+# You can limit the search for specific fields by adding optional args. Note! It must be a dict, hence, lastViewed: null
+- name: Search for an issue
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ project: ANS
+ operation: search
+ maxresults: 10
+ jql: project=cmdb AND cf[13225]="test"
+ args:
+ fields:
+ lastViewed: null
+ register: issue
+
+- name: Create a unix account for the reporter
+ become: true
+ user:
+ name: '{{ issue.meta.fields.creator.name }}'
+ comment: '{{ issue.meta.fields.creator.displayName }}'
+
+# You can get list of valid linktypes at /rest/api/2/issueLinkType
+# url of your jira installation.
+- name: Create link from HSP-1 to MKY-1
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ operation: link
+ linktype: Relates
+ inwardissue: HSP-1
+ outwardissue: MKY-1
+
+# Transition an issue by target status
+- name: Close the issue
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ issue: '{{ issue.meta.key }}'
+ operation: transition
+ status: Done
+ args:
+ fields:
+ customfield_14321: [ {'set': {'value': 'Value of Select' }} ]
+ comment: [ { 'add': { 'body' : 'Test' } }]
+
+"""
+
+import base64
+import json
+import sys
+import traceback
+
+from ansible.module_utils.six.moves.urllib.request import pathname2url
+
+from ansible.module_utils._text import to_text, to_bytes, to_native
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def request(url, user, passwd, timeout, data=None, method=None):
+ if data:
+ data = json.dumps(data)
+
+ # NOTE: fetch_url uses a password manager, which follows the
+ # standard request-then-challenge basic-auth semantics. However as
+ # JIRA allows some unauthorised operations it doesn't necessarily
+ # send the challenge, so the request occurs as the anonymous user,
+ # resulting in unexpected results. To work around this we manually
+ # inject the basic-auth header up-front to ensure that JIRA treats
+ # the requests as authorized for this user.
+ auth = to_text(base64.b64encode(to_bytes('{0}:{1}'.format(user, passwd), errors='surrogate_or_strict')))
+ response, info = fetch_url(module, url, data=data, method=method, timeout=timeout,
+ headers={'Content-Type': 'application/json',
+ 'Authorization': "Basic %s" % auth})
+
+ if info['status'] not in (200, 201, 204):
+ error = None
+ try:
+ error = json.loads(info['body'])
+ except Exception:
+ module.fail_json(msg=to_native(info['body']), exception=traceback.format_exc())
+ if error:
+ msg = []
+ for key in ('errorMessages', 'errors'):
+ if error.get(key):
+ msg.append(to_native(error[key]))
+ if msg:
+ module.fail_json(msg=', '.join(msg))
+ module.fail_json(msg=to_native(error))
+ # Fallback print body, if it cant be decoded
+ module.fail_json(msg=to_native(info['body']))
+
+ body = response.read()
+
+ if body:
+ return json.loads(to_text(body, errors='surrogate_or_strict'))
+ return {}
+
+
+def post(url, user, passwd, timeout, data):
+ return request(url, user, passwd, timeout, data=data, method='POST')
+
+
+def put(url, user, passwd, timeout, data):
+ return request(url, user, passwd, timeout, data=data, method='PUT')
+
+
+def get(url, user, passwd, timeout):
+ return request(url, user, passwd, timeout)
+
+
+def create(restbase, user, passwd, params):
+ createfields = {
+ 'project': {'key': params['project']},
+ 'summary': params['summary'],
+ 'issuetype': {'name': params['issuetype']}}
+
+ if params['description']:
+ createfields['description'] = params['description']
+
+ # Merge in any additional or overridden fields
+ if params['fields']:
+ createfields.update(params['fields'])
+
+ data = {'fields': createfields}
+
+ url = restbase + '/issue/'
+
+ return True, post(url, user, passwd, params['timeout'], data)
+
+
+def comment(restbase, user, passwd, params):
+ data = {
+ 'body': params['comment']
+ }
+ url = restbase + '/issue/' + params['issue'] + '/comment'
+
+ return True, post(url, user, passwd, params['timeout'], data)
+
+
+def edit(restbase, user, passwd, params):
+ data = {
+ 'fields': params['fields']
+ }
+ url = restbase + '/issue/' + params['issue']
+
+ return True, put(url, user, passwd, params['timeout'], data)
+
+
+def update(restbase, user, passwd, params):
+ data = {
+ "update": params['fields'],
+ }
+ url = restbase + '/issue/' + params['issue']
+
+ return True, put(url, user, passwd, params['timeout'], data)
+
+
+def fetch(restbase, user, passwd, params):
+ url = restbase + '/issue/' + params['issue']
+ return False, get(url, user, passwd, params['timeout'])
+
+
+def search(restbase, user, passwd, params):
+ url = restbase + '/search?jql=' + pathname2url(params['jql'])
+ if params['fields']:
+ fields = params['fields'].keys()
+ url = url + '&fields=' + '&fields='.join([pathname2url(f) for f in fields])
+ if params['maxresults']:
+ url = url + '&maxResults=' + str(params['maxresults'])
+ return False, get(url, user, passwd, params['timeout'])
+
+
+def transition(restbase, user, passwd, params):
+ # Find the transition id
+ turl = restbase + '/issue/' + params['issue'] + "/transitions"
+ tmeta = get(turl, user, passwd, params['timeout'])
+
+ target = params['status']
+ tid = None
+ for t in tmeta['transitions']:
+ if t['name'] == target:
+ tid = t['id']
+ break
+
+ if not tid:
+ raise ValueError("Failed find valid transition for '%s'" % target)
+
+ # Perform it
+ url = restbase + '/issue/' + params['issue'] + "/transitions"
+ data = {'transition': {"id": tid},
+ 'update': params['fields']}
+
+ return True, post(url, user, passwd, params['timeout'], data)
+
+
+def link(restbase, user, passwd, params):
+ data = {
+ 'type': {'name': params['linktype']},
+ 'inwardIssue': {'key': params['inwardissue']},
+ 'outwardIssue': {'key': params['outwardissue']},
+ }
+
+ url = restbase + '/issueLink/'
+
+ return True, post(url, user, passwd, params['timeout'], data)
+
+
+def main():
+
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ uri=dict(type='str', required=True),
+ operation=dict(type='str', choices=['create', 'comment', 'edit', 'update', 'fetch', 'transition', 'link', 'search'],
+ aliases=['command'], required=True),
+ username=dict(type='str', required=True),
+ password=dict(type='str', required=True, no_log=True),
+ project=dict(type='str', ),
+ summary=dict(type='str', ),
+ description=dict(type='str', ),
+ issuetype=dict(type='str', ),
+ issue=dict(type='str', aliases=['ticket']),
+ comment=dict(type='str', ),
+ status=dict(type='str', ),
+ assignee=dict(type='str', ),
+ fields=dict(default={}, type='dict'),
+ linktype=dict(type='str', ),
+ inwardissue=dict(type='str', ),
+ outwardissue=dict(type='str', ),
+ jql=dict(type='str', ),
+ maxresults=dict(type='int'),
+ timeout=dict(type='float', default=10),
+ validate_certs=dict(default=True, type='bool'),
+ ),
+ required_if=(
+ ('operation', 'create', ['project', 'issuetype', 'summary']),
+ ('operation', 'comment', ['issue', 'comment']),
+ ('operation', 'fetch', ['issue']),
+ ('operation', 'transition', ['issue', 'status']),
+ ('operation', 'link', ['linktype', 'inwardissue', 'outwardissue']),
+ ('operation', 'search', ['jql']),
+ ),
+ supports_check_mode=False
+ )
+
+ op = module.params['operation']
+
+ # Handle rest of parameters
+ uri = module.params['uri']
+ user = module.params['username']
+ passwd = module.params['password']
+ if module.params['assignee']:
+ module.params['fields']['assignee'] = {'name': module.params['assignee']}
+
+ if not uri.endswith('/'):
+ uri = uri + '/'
+ restbase = uri + 'rest/api/2'
+
+ # Dispatch
+ try:
+
+ # Lookup the corresponding method for this operation. This is
+ # safe as the AnsibleModule should remove any unknown operations.
+ thismod = sys.modules[__name__]
+ method = getattr(thismod, op)
+
+ changed, ret = method(restbase, user, passwd, module.params)
+
+ except Exception as e:
+ return module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=changed, meta=ret)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/katello.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/katello.py
new file mode 100644
index 00000000..732c4723
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/katello.py
@@ -0,0 +1,615 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Eric D Helms <ericdhelms@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: katello
+short_description: Manage Katello Resources
+deprecated:
+ removed_in: '2.0.0' # was Ansible 2.12
+ why: "Replaced by re-designed individual modules living at https://github.com/theforeman/foreman-ansible-modules"
+ alternative: https://github.com/theforeman/foreman-ansible-modules
+description:
+ - Allows the management of Katello resources inside your Foreman server.
+author:
+- Eric D Helms (@ehelms)
+requirements:
+ - nailgun >= 0.28.0
+ - python >= 2.6
+ - datetime
+options:
+ server_url:
+ description:
+ - URL of Foreman server.
+ required: true
+ username:
+ description:
+ - Username on Foreman server.
+ required: true
+ password:
+ description:
+ - Password for user accessing Foreman server.
+ required: true
+ entity:
+ description:
+ - The Foreman resource that the action will be performed on (e.g. organization, host).
+ choices:
+
+ - repository
+ - manifest
+ - repository_set
+ - sync_plan
+ - content_view
+ - lifecycle_environment
+ - activation_key
+ - product
+
+ required: true
+ action:
+ description:
+ - action associated to the entity resource to set or edit in dictionary format.
+ - Possible Action in relation to Entitys.
+ - "sync (available when entity=product or entity=repository)"
+ - "publish (available when entity=content_view)"
+ - "promote (available when entity=content_view)"
+ choices:
+ - sync
+ - publish
+ - promote
+ required: false
+ params:
+ description:
+ - Parameters associated to the entity resource and action, to set or edit in dictionary format.
+ - Each choice may be only available with specific entitys and actions.
+ - "Possible Choices are in the format of param_name ([entry,action,action,...],[entity,..],...)."
+ - The action "None" means no action specified.
+ - Possible Params in relation to entity and action.
+ - "name ([product,sync,None], [repository,sync], [repository_set,None], [sync_plan,None],"
+ - "[content_view,promote,publish,None], [lifecycle_environment,None], [activation_key,None])"
+ - "organization ([product,sync,None] ,[repository,sync,None], [repository_set,None], [sync_plan,None], "
+ - "[content_view,promote,publish,None], [lifecycle_environment,None], [activation_key,None])"
+ - "content ([manifest,None])"
+ - "product ([repository,sync,None], [repository_set,None], [sync_plan,None])"
+ - "basearch ([repository_set,None])"
+ - "releaserver ([repository_set,None])"
+ - "sync_date ([sync_plan,None])"
+ - "interval ([sync_plan,None])"
+ - "repositories ([content_view,None])"
+ - "from_environment ([content_view,promote])"
+ - "to_environment([content_view,promote])"
+ - "prior ([lifecycle_environment,None])"
+ - "content_view ([activation_key,None])"
+ - "lifecycle_environment ([activation_key,None])"
+ required: true
+ task_timeout:
+ description:
+ - The timeout in seconds to wait for the started Foreman action to finish.
+ - If the timeout is reached and the Foreman action did not complete, the ansible task fails. However the foreman action does not get canceled.
+ default: 1000
+ required: false
+ verify_ssl:
+ description:
+ - verify the ssl/https connection (e.g for a valid certificate)
+ default: false
+ type: bool
+ required: false
+'''
+
+EXAMPLES = '''
+---
+# Simple Example:
+
+- name: Create Product
+ community.general.katello:
+ username: admin
+ password: admin
+ server_url: https://fakeserver.com
+ entity: product
+ params:
+ name: Centos 7
+ delegate_to: localhost
+
+# Abstraction Example:
+# katello.yml
+---
+- name: "{{ name }}"
+ community.general.katello:
+ username: admin
+ password: admin
+ server_url: https://fakeserver.com
+ entity: "{{ entity }}"
+ params: "{{ params }}"
+ delegate_to: localhost
+
+# tasks.yml
+---
+- include: katello.yml
+ vars:
+ name: Create Dev Environment
+ entity: lifecycle_environment
+ params:
+ name: Dev
+ prior: Library
+ organization: Default Organization
+
+- include: katello.yml
+ vars:
+ name: Create Centos Product
+ entity: product
+ params:
+ name: Centos 7
+ organization: Default Organization
+
+- include: katello.yml
+ vars:
+ name: Create 7.2 Repository
+ entity: repository
+ params:
+ name: Centos 7.2
+ product: Centos 7
+ organization: Default Organization
+ content_type: yum
+ url: http://mirror.centos.org/centos/7/os/x86_64/
+
+- include: katello.yml
+ vars:
+ name: Create Centos 7 View
+ entity: content_view
+ params:
+ name: Centos 7 View
+ organization: Default Organization
+ repositories:
+ - name: Centos 7.2
+ product: Centos 7
+
+- include: katello.yml
+ vars:
+ name: Enable RHEL Product
+ entity: repository_set
+ params:
+ name: Red Hat Enterprise Linux 7 Server (RPMs)
+ product: Red Hat Enterprise Linux Server
+ organization: Default Organization
+ basearch: x86_64
+ releasever: 7
+
+- include: katello.yml
+ vars:
+ name: Promote Contentview Environment with longer timeout
+ task_timeout: 10800
+ entity: content_view
+ action: promote
+ params:
+ name: MyContentView
+ organization: MyOrganisation
+ from_environment: Testing
+ to_environment: Production
+
+# Best Practices
+
+# In Foreman, things can be done in parallel.
+# When a conflicting action is already running,
+# the task will fail instantly instead of waiting for the already running action to complete.
+# So you should use a "until success" loop to catch this.
+
+- name: Promote Contentview Environment with increased Timeout
+ community.general.katello:
+ username: ansibleuser
+ password: supersecret
+ task_timeout: 10800
+ entity: content_view
+ action: promote
+ params:
+ name: MyContentView
+ organization: MyOrganisation
+ from_environment: Testing
+ to_environment: Production
+ register: task_result
+ until: task_result is success
+ retries: 9
+ delay: 120
+
+'''
+
+RETURN = '''# '''
+
+import datetime
+import os
+import traceback
+
+try:
+ from nailgun import entities, entity_fields, entity_mixins
+ from nailgun.config import ServerConfig
+ HAS_NAILGUN_PACKAGE = True
+except Exception:
+ HAS_NAILGUN_PACKAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+class NailGun(object):
+ def __init__(self, server, entities, module, task_timeout):
+ self._server = server
+ self._entities = entities
+ self._module = module
+ entity_mixins.TASK_TIMEOUT = task_timeout
+
+ def find_organization(self, name, **params):
+ org = self._entities.Organization(self._server, name=name, **params)
+ response = org.search(set(), {'search': 'name={0}'.format(name)})
+
+ if len(response) == 1:
+ return response[0]
+ else:
+ self._module.fail_json(msg="No organization found for %s" % name)
+
+ def find_lifecycle_environment(self, name, organization):
+ org = self.find_organization(organization)
+
+ lifecycle_env = self._entities.LifecycleEnvironment(self._server, name=name, organization=org)
+ response = lifecycle_env.search()
+
+ if len(response) == 1:
+ return response[0]
+ else:
+ self._module.fail_json(msg="No Lifecycle Found found for %s" % name)
+
+ def find_product(self, name, organization):
+ org = self.find_organization(organization)
+
+ product = self._entities.Product(self._server, name=name, organization=org)
+ response = product.search()
+
+ if len(response) == 1:
+ return response[0]
+ else:
+ self._module.fail_json(msg="No Product found for %s" % name)
+
+ def find_repository(self, name, product, organization):
+ product = self.find_product(product, organization)
+
+ repository = self._entities.Repository(self._server, name=name, product=product)
+ repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization)
+ repository.organization = product.organization
+ response = repository.search()
+
+ if len(response) == 1:
+ return response[0]
+ else:
+ self._module.fail_json(msg="No Repository found for %s" % name)
+
+ def find_content_view(self, name, organization):
+ org = self.find_organization(organization)
+
+ content_view = self._entities.ContentView(self._server, name=name, organization=org)
+ response = content_view.search()
+
+ if len(response) == 1:
+ return response[0]
+ else:
+ self._module.fail_json(msg="No Content View found for %s" % name)
+
+ def organization(self, params):
+ name = params['name']
+ del params['name']
+ org = self.find_organization(name, **params)
+
+ if org:
+ org = self._entities.Organization(self._server, name=name, id=org.id, **params)
+ org.update()
+ else:
+ org = self._entities.Organization(self._server, name=name, **params)
+ org.create()
+
+ return True
+
+ def manifest(self, params):
+ org = self.find_organization(params['organization'])
+ params['organization'] = org.id
+
+ try:
+ file = open(os.getcwd() + params['content'], 'r')
+ content = file.read()
+ finally:
+ file.close()
+
+ manifest = self._entities.Subscription(self._server)
+
+ try:
+ manifest.upload(
+ data={'organization_id': org.id},
+ files={'content': content}
+ )
+ return True
+ except Exception as e:
+
+ if "Import is the same as existing data" in e.message:
+ return False
+ else:
+ self._module.fail_json(msg="Manifest import failed with %s" % to_native(e),
+ exception=traceback.format_exc())
+
+ def product(self, params):
+ org = self.find_organization(params['organization'])
+ params['organization'] = org.id
+
+ product = self._entities.Product(self._server, **params)
+ response = product.search()
+
+ if len(response) == 1:
+ product.id = response[0].id
+ product.update()
+ else:
+ product.create()
+
+ return True
+
+ def sync_product(self, params):
+ org = self.find_organization(params['organization'])
+ product = self.find_product(params['name'], org.name)
+
+ return product.sync()
+
+ def repository(self, params):
+ product = self.find_product(params['product'], params['organization'])
+ params['product'] = product.id
+ del params['organization']
+
+ repository = self._entities.Repository(self._server, **params)
+ repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization)
+ repository.organization = product.organization
+ response = repository.search()
+
+ if len(response) == 1:
+ repository.id = response[0].id
+ repository.update()
+ else:
+ repository.create()
+
+ return True
+
+ def sync_repository(self, params):
+ org = self.find_organization(params['organization'])
+ repository = self.find_repository(params['name'], params['product'], org.name)
+
+ return repository.sync()
+
+ def repository_set(self, params):
+ product = self.find_product(params['product'], params['organization'])
+ del params['product']
+ del params['organization']
+
+ if not product:
+ return False
+ else:
+ reposet = self._entities.RepositorySet(self._server, product=product, name=params['name'])
+ reposet = reposet.search()[0]
+
+ formatted_name = [params['name'].replace('(', '').replace(')', '')]
+ formatted_name.append(params['basearch'])
+
+ if 'releasever' in params:
+ formatted_name.append(params['releasever'])
+
+ formatted_name = ' '.join(formatted_name)
+
+ repository = self._entities.Repository(self._server, product=product, name=formatted_name)
+ repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization)
+ repository.organization = product.organization
+ repository = repository.search()
+
+ if len(repository) == 0:
+ if 'releasever' in params:
+ reposet.enable(data={'basearch': params['basearch'], 'releasever': params['releasever']})
+ else:
+ reposet.enable(data={'basearch': params['basearch']})
+
+ return True
+
+ def sync_plan(self, params):
+ org = self.find_organization(params['organization'])
+ params['organization'] = org.id
+ params['sync_date'] = datetime.datetime.strptime(params['sync_date'], "%H:%M")
+
+ products = params['products']
+ del params['products']
+
+ sync_plan = self._entities.SyncPlan(
+ self._server,
+ name=params['name'],
+ organization=org
+ )
+ response = sync_plan.search()
+
+ sync_plan.sync_date = params['sync_date']
+ sync_plan.interval = params['interval']
+
+ if len(response) == 1:
+ sync_plan.id = response[0].id
+ sync_plan.update()
+ else:
+ response = sync_plan.create()
+ sync_plan.id = response[0].id
+
+ if products:
+ ids = []
+
+ for name in products:
+ product = self.find_product(name, org.name)
+ ids.append(product.id)
+
+ sync_plan.add_products(data={'product_ids': ids})
+
+ return True
+
+ def content_view(self, params):
+ org = self.find_organization(params['organization'])
+
+ content_view = self._entities.ContentView(self._server, name=params['name'], organization=org)
+ response = content_view.search()
+
+ if len(response) == 1:
+ content_view.id = response[0].id
+ content_view.update()
+ else:
+ content_view = content_view.create()
+
+ if params['repositories']:
+ repos = []
+
+ for repository in params['repositories']:
+ repository = self.find_repository(repository['name'], repository['product'], org.name)
+ repos.append(repository)
+
+ content_view.repository = repos
+ content_view.update(['repository'])
+
+ def find_content_view_version(self, name, organization, environment):
+ env = self.find_lifecycle_environment(environment, organization)
+ content_view = self.find_content_view(name, organization)
+
+ content_view_version = self._entities.ContentViewVersion(self._server, content_view=content_view)
+ response = content_view_version.search(['content_view'], {'environment_id': env.id})
+
+ if len(response) == 1:
+ return response[0]
+ else:
+ self._module.fail_json(msg="No Content View version found for %s" % response)
+
+ def publish(self, params):
+ content_view = self.find_content_view(params['name'], params['organization'])
+
+ return content_view.publish()
+
+ def promote(self, params):
+ to_environment = self.find_lifecycle_environment(params['to_environment'], params['organization'])
+ version = self.find_content_view_version(params['name'], params['organization'], params['from_environment'])
+
+ data = {'environment_id': to_environment.id}
+ return version.promote(data=data)
+
+ def lifecycle_environment(self, params):
+ org = self.find_organization(params['organization'])
+ prior_env = self.find_lifecycle_environment(params['prior'], params['organization'])
+
+ lifecycle_env = self._entities.LifecycleEnvironment(self._server, name=params['name'], organization=org, prior=prior_env)
+ response = lifecycle_env.search()
+
+ if len(response) == 1:
+ lifecycle_env.id = response[0].id
+ lifecycle_env.update()
+ else:
+ lifecycle_env.create()
+
+ return True
+
+ def activation_key(self, params):
+ org = self.find_organization(params['organization'])
+
+ activation_key = self._entities.ActivationKey(self._server, name=params['name'], organization=org)
+ response = activation_key.search()
+
+ if len(response) == 1:
+ activation_key.id = response[0].id
+ activation_key.update()
+ else:
+ activation_key.create()
+
+ if params['content_view']:
+ content_view = self.find_content_view(params['content_view'], params['organization'])
+ lifecycle_environment = self.find_lifecycle_environment(params['lifecycle_environment'], params['organization'])
+
+ activation_key.content_view = content_view
+ activation_key.environment = lifecycle_environment
+ activation_key.update()
+
+ return True
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ server_url=dict(type='str', required=True),
+ username=dict(type='str', required=True, no_log=True),
+ password=dict(type='str', required=True, no_log=True),
+ entity=dict(type='str', required=True,
+ choices=['repository', 'manifest', 'repository_set', 'sync_plan',
+ 'content_view', 'lifecycle_environment', 'activation_key', 'product']),
+ action=dict(type='str', choices=['sync', 'publish', 'promote']),
+ verify_ssl=dict(type='bool', default=False),
+ task_timeout=dict(type='int', default=1000),
+ params=dict(type='dict', required=True, no_log=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ if not HAS_NAILGUN_PACKAGE:
+ module.fail_json(msg="Missing required nailgun module (check docs or install with: pip install nailgun")
+
+ server_url = module.params['server_url']
+ username = module.params['username']
+ password = module.params['password']
+ entity = module.params['entity']
+ action = module.params['action']
+ params = module.params['params']
+ verify_ssl = module.params['verify_ssl']
+ task_timeout = module.params['task_timeout']
+
+ server = ServerConfig(
+ url=server_url,
+ auth=(username, password),
+ verify=verify_ssl
+ )
+ ng = NailGun(server, entities, module, task_timeout)
+
+ # Lets make an connection to the server with username and password
+ try:
+ org = entities.Organization(server)
+ org.search()
+ except Exception as e:
+ module.fail_json(msg="Failed to connect to Foreman server: %s " % e)
+
+ result = False
+
+ if entity == 'product':
+ if action == 'sync':
+ result = ng.sync_product(params)
+ else:
+ result = ng.product(params)
+ elif entity == 'repository':
+ if action == 'sync':
+ result = ng.sync_repository(params)
+ else:
+ result = ng.repository(params)
+ elif entity == 'manifest':
+ result = ng.manifest(params)
+ elif entity == 'repository_set':
+ result = ng.repository_set(params)
+ elif entity == 'sync_plan':
+ result = ng.sync_plan(params)
+ elif entity == 'content_view':
+ if action == 'publish':
+ result = ng.publish(params)
+ elif action == 'promote':
+ result = ng.promote(params)
+ else:
+ result = ng.content_view(params)
+ elif entity == 'lifecycle_environment':
+ result = ng.lifecycle_environment(params)
+ elif entity == 'activation_key':
+ result = ng.activation_key(params)
+ else:
+ module.fail_json(changed=False, result="Unsupported entity supplied")
+
+ module.exit_json(changed=result, result="%s updated" % entity)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/kernel_blacklist.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/kernel_blacklist.py
new file mode 100644
index 00000000..ff6f9c22
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/kernel_blacklist.py
@@ -0,0 +1,153 @@
+#!/usr/bin/python
+# encoding: utf-8 -*-
+
+# Copyright: (c) 2013, Matthias Vogelgesang <matthias.vogelgesang@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: kernel_blacklist
+author:
+- Matthias Vogelgesang (@matze)
+short_description: Blacklist kernel modules
+description:
+ - Add or remove kernel modules from blacklist.
+options:
+ name:
+ type: str
+ description:
+ - Name of kernel module to black- or whitelist.
+ required: true
+ state:
+ type: str
+ description:
+ - Whether the module should be present in the blacklist or absent.
+ choices: [ absent, present ]
+ default: present
+ blacklist_file:
+ type: str
+ description:
+ - If specified, use this blacklist file instead of
+ C(/etc/modprobe.d/blacklist-ansible.conf).
+'''
+
+EXAMPLES = '''
+- name: Blacklist the nouveau driver module
+ community.general.kernel_blacklist:
+ name: nouveau
+ state: present
+'''
+
+import os
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Blacklist(object):
+ def __init__(self, module, filename, checkmode):
+ self.filename = filename
+ self.module = module
+ self.checkmode = checkmode
+
+ def create_file(self):
+ if not self.checkmode and not os.path.exists(self.filename):
+ open(self.filename, 'a').close()
+ return True
+ elif self.checkmode and not os.path.exists(self.filename):
+ self.filename = os.devnull
+ return True
+ else:
+ return False
+
+ def get_pattern(self):
+ return r'^blacklist\s*' + self.module + '$'
+
+ def readlines(self):
+ f = open(self.filename, 'r')
+ lines = f.readlines()
+ f.close()
+ return lines
+
+ def module_listed(self):
+ lines = self.readlines()
+ pattern = self.get_pattern()
+
+ for line in lines:
+ stripped = line.strip()
+ if stripped.startswith('#'):
+ continue
+
+ if re.match(pattern, stripped):
+ return True
+
+ return False
+
+ def remove_module(self):
+ lines = self.readlines()
+ pattern = self.get_pattern()
+
+ if self.checkmode:
+ f = open(os.devnull, 'w')
+ else:
+ f = open(self.filename, 'w')
+
+ for line in lines:
+ if not re.match(pattern, line.strip()):
+ f.write(line)
+
+ f.close()
+
+ def add_module(self):
+ if self.checkmode:
+ f = open(os.devnull, 'a')
+ else:
+ f = open(self.filename, 'a')
+
+ f.write('blacklist %s\n' % self.module)
+
+ f.close()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ blacklist_file=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+ args = dict(changed=False, failed=False,
+ name=module.params['name'], state=module.params['state'])
+
+ filename = '/etc/modprobe.d/blacklist-ansible.conf'
+
+ if module.params['blacklist_file']:
+ filename = module.params['blacklist_file']
+
+ blacklist = Blacklist(args['name'], filename, module.check_mode)
+
+ if blacklist.create_file():
+ args['changed'] = True
+ else:
+ args['changed'] = False
+
+ if blacklist.module_listed():
+ if args['state'] == 'absent':
+ blacklist.remove_module()
+ args['changed'] = True
+ else:
+ if args['state'] == 'present':
+ blacklist.add_module()
+ args['changed'] = True
+
+ module.exit_json(**args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/keycloak_client.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/keycloak_client.py
new file mode 100644
index 00000000..b27155ba
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/keycloak_client.py
@@ -0,0 +1,879 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017, Eike Frost <ei@kefro.st>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: keycloak_client
+
+short_description: Allows administration of Keycloak clients via Keycloak API
+
+
+description:
+ - This module allows the administration of Keycloak clients via the Keycloak REST API. It
+ requires access to the REST API via OpenID Connect; the user connecting and the client being
+ used must have the requisite access rights. In a default Keycloak installation, admin-cli
+ and an admin user would work, as would a separate client definition with the scope tailored
+ to your needs and a user having the expected roles.
+
+ - The names of module options are snake_cased versions of the camelCase ones found in the
+ Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html).
+ Aliases are provided so camelCased versions can be used as well.
+
+ - The Keycloak API does not always sanity check inputs e.g. you can set
+ SAML-specific settings on an OpenID Connect client for instance and vice versa. Be careful.
+ If you do not specify a setting, usually a sensible default is chosen.
+
+options:
+ state:
+ description:
+ - State of the client
+ - On C(present), the client will be created (or updated if it exists already).
+ - On C(absent), the client will be removed if it exists
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ realm:
+ description:
+ - The realm to create the client in.
+ type: str
+ default: master
+
+ client_id:
+ description:
+ - Client id of client to be worked on. This is usually an alphanumeric name chosen by
+ you. Either this or I(id) is required. If you specify both, I(id) takes precedence.
+ This is 'clientId' in the Keycloak REST API.
+ aliases:
+ - clientId
+ type: str
+
+ id:
+ description:
+ - Id of client to be worked on. This is usually an UUID. Either this or I(client_id)
+ is required. If you specify both, this takes precedence.
+ type: str
+
+ name:
+ description:
+ - Name of the client (this is not the same as I(client_id))
+ type: str
+
+ description:
+ description:
+ - Description of the client in Keycloak
+ type: str
+
+ root_url:
+ description:
+ - Root URL appended to relative URLs for this client
+ This is 'rootUrl' in the Keycloak REST API.
+ aliases:
+ - rootUrl
+ type: str
+
+ admin_url:
+ description:
+ - URL to the admin interface of the client
+ This is 'adminUrl' in the Keycloak REST API.
+ aliases:
+ - adminUrl
+ type: str
+
+ base_url:
+ description:
+ - Default URL to use when the auth server needs to redirect or link back to the client
+ This is 'baseUrl' in the Keycloak REST API.
+ aliases:
+ - baseUrl
+ type: str
+
+ enabled:
+ description:
+ - Is this client enabled or not?
+ type: bool
+
+ client_authenticator_type:
+ description:
+ - How do clients authenticate with the auth server? Either C(client-secret) or
+ C(client-jwt) can be chosen. When using C(client-secret), the module parameter
+ I(secret) can set it, while for C(client-jwt), you can use the keys C(use.jwks.url),
+ C(jwks.url), and C(jwt.credential.certificate) in the I(attributes) module parameter
+ to configure its behavior.
+ This is 'clientAuthenticatorType' in the Keycloak REST API.
+ choices: ['client-secret', 'client-jwt']
+ aliases:
+ - clientAuthenticatorType
+ type: str
+
+ secret:
+ description:
+ - When using I(client_authenticator_type) C(client-secret) (the default), you can
+ specify a secret here (otherwise one will be generated if it does not exit). If
+ changing this secret, the module will not register a change currently (but the
+ changed secret will be saved).
+ type: str
+
+ registration_access_token:
+ description:
+ - The registration access token provides access for clients to the client registration
+ service.
+ This is 'registrationAccessToken' in the Keycloak REST API.
+ aliases:
+ - registrationAccessToken
+ type: str
+
+ default_roles:
+ description:
+ - list of default roles for this client. If the client roles referenced do not exist
+ yet, they will be created.
+ This is 'defaultRoles' in the Keycloak REST API.
+ aliases:
+ - defaultRoles
+ type: list
+
+ redirect_uris:
+ description:
+ - Acceptable redirect URIs for this client.
+ This is 'redirectUris' in the Keycloak REST API.
+ aliases:
+ - redirectUris
+ type: list
+
+ web_origins:
+ description:
+ - List of allowed CORS origins.
+ This is 'webOrigins' in the Keycloak REST API.
+ aliases:
+ - webOrigins
+ type: list
+
+ not_before:
+ description:
+ - Revoke any tokens issued before this date for this client (this is a UNIX timestamp).
+ This is 'notBefore' in the Keycloak REST API.
+ type: int
+ aliases:
+ - notBefore
+
+ bearer_only:
+ description:
+ - The access type of this client is bearer-only.
+ This is 'bearerOnly' in the Keycloak REST API.
+ aliases:
+ - bearerOnly
+ type: bool
+
+ consent_required:
+ description:
+ - If enabled, users have to consent to client access.
+ This is 'consentRequired' in the Keycloak REST API.
+ aliases:
+ - consentRequired
+ type: bool
+
+ standard_flow_enabled:
+ description:
+ - Enable standard flow for this client or not (OpenID connect).
+ This is 'standardFlowEnabled' in the Keycloak REST API.
+ aliases:
+ - standardFlowEnabled
+ type: bool
+
+ implicit_flow_enabled:
+ description:
+ - Enable implicit flow for this client or not (OpenID connect).
+ This is 'implicitFlowEnabled' in the Keycloak REST API.
+ aliases:
+ - implicitFlowEnabled
+ type: bool
+
+ direct_access_grants_enabled:
+ description:
+ - Are direct access grants enabled for this client or not (OpenID connect).
+ This is 'directAccessGrantsEnabled' in the Keycloak REST API.
+ aliases:
+ - directAccessGrantsEnabled
+ type: bool
+
+ service_accounts_enabled:
+ description:
+ - Are service accounts enabled for this client or not (OpenID connect).
+ This is 'serviceAccountsEnabled' in the Keycloak REST API.
+ aliases:
+ - serviceAccountsEnabled
+ type: bool
+
+ authorization_services_enabled:
+ description:
+ - Are authorization services enabled for this client or not (OpenID connect).
+ This is 'authorizationServicesEnabled' in the Keycloak REST API.
+ aliases:
+ - authorizationServicesEnabled
+ type: bool
+
+ public_client:
+ description:
+ - Is the access type for this client public or not.
+ This is 'publicClient' in the Keycloak REST API.
+ aliases:
+ - publicClient
+ type: bool
+
+ frontchannel_logout:
+ description:
+ - Is frontchannel logout enabled for this client or not.
+ This is 'frontchannelLogout' in the Keycloak REST API.
+ aliases:
+ - frontchannelLogout
+ type: bool
+
+ protocol:
+ description:
+ - Type of client (either C(openid-connect) or C(saml).
+ type: str
+ choices: ['openid-connect', 'saml']
+
+ full_scope_allowed:
+ description:
+ - Is the "Full Scope Allowed" feature set for this client or not.
+ This is 'fullScopeAllowed' in the Keycloak REST API.
+ aliases:
+ - fullScopeAllowed
+ type: bool
+
+ node_re_registration_timeout:
+ description:
+ - Cluster node re-registration timeout for this client.
+ This is 'nodeReRegistrationTimeout' in the Keycloak REST API.
+ type: int
+ aliases:
+ - nodeReRegistrationTimeout
+
+ registered_nodes:
+ description:
+ - dict of registered cluster nodes (with C(nodename) as the key and last registration
+ time as the value).
+ This is 'registeredNodes' in the Keycloak REST API.
+ type: dict
+ aliases:
+ - registeredNodes
+
+ client_template:
+ description:
+ - Client template to use for this client. If it does not exist this field will silently
+ be dropped.
+ This is 'clientTemplate' in the Keycloak REST API.
+ type: str
+ aliases:
+ - clientTemplate
+
+ use_template_config:
+ description:
+ - Whether or not to use configuration from the I(client_template).
+ This is 'useTemplateConfig' in the Keycloak REST API.
+ aliases:
+ - useTemplateConfig
+ type: bool
+
+ use_template_scope:
+ description:
+ - Whether or not to use scope configuration from the I(client_template).
+ This is 'useTemplateScope' in the Keycloak REST API.
+ aliases:
+ - useTemplateScope
+ type: bool
+
+ use_template_mappers:
+ description:
+ - Whether or not to use mapper configuration from the I(client_template).
+ This is 'useTemplateMappers' in the Keycloak REST API.
+ aliases:
+ - useTemplateMappers
+ type: bool
+
+ surrogate_auth_required:
+ description:
+ - Whether or not surrogate auth is required.
+ This is 'surrogateAuthRequired' in the Keycloak REST API.
+ aliases:
+ - surrogateAuthRequired
+ type: bool
+
+ authorization_settings:
+ description:
+ - a data structure defining the authorization settings for this client. For reference,
+ please see the Keycloak API docs at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html#_resourceserverrepresentation).
+ This is 'authorizationSettings' in the Keycloak REST API.
+ type: dict
+ aliases:
+ - authorizationSettings
+
+ protocol_mappers:
+ description:
+ - a list of dicts defining protocol mappers for this client.
+ This is 'protocolMappers' in the Keycloak REST API.
+ aliases:
+ - protocolMappers
+ type: list
+ elements: dict
+ suboptions:
+ consentRequired:
+ description:
+ - Specifies whether a user needs to provide consent to a client for this mapper to be active.
+ type: bool
+
+ consentText:
+ description:
+ - The human-readable name of the consent the user is presented to accept.
+ type: str
+
+ id:
+ description:
+ - Usually a UUID specifying the internal ID of this protocol mapper instance.
+ type: str
+
+ name:
+ description:
+ - The name of this protocol mapper.
+ type: str
+
+ protocol:
+ description:
+ - This is either C(openid-connect) or C(saml), this specifies for which protocol this protocol mapper
+ is active.
+ choices: ['openid-connect', 'saml']
+ type: str
+
+ protocolMapper:
+ description:
+ - The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is
+ impossible to provide since this may be extended through SPIs by the user of Keycloak,
+ by default Keycloak as of 3.4 ships with at least
+ - C(docker-v2-allow-all-mapper)
+ - C(oidc-address-mapper)
+ - C(oidc-full-name-mapper)
+ - C(oidc-group-membership-mapper)
+ - C(oidc-hardcoded-claim-mapper)
+ - C(oidc-hardcoded-role-mapper)
+ - C(oidc-role-name-mapper)
+ - C(oidc-script-based-protocol-mapper)
+ - C(oidc-sha256-pairwise-sub-mapper)
+ - C(oidc-usermodel-attribute-mapper)
+ - C(oidc-usermodel-client-role-mapper)
+ - C(oidc-usermodel-property-mapper)
+ - C(oidc-usermodel-realm-role-mapper)
+ - C(oidc-usersessionmodel-note-mapper)
+ - C(saml-group-membership-mapper)
+ - C(saml-hardcode-attribute-mapper)
+ - C(saml-hardcode-role-mapper)
+ - C(saml-role-list-mapper)
+ - C(saml-role-name-mapper)
+ - C(saml-user-attribute-mapper)
+ - C(saml-user-property-mapper)
+ - C(saml-user-session-note-mapper)
+ - An exhaustive list of available mappers on your installation can be obtained on
+ the admin console by going to Server Info -> Providers and looking under
+ 'protocol-mapper'.
+ type: str
+
+ config:
+ description:
+ - Dict specifying the configuration options for the protocol mapper; the
+ contents differ depending on the value of I(protocolMapper) and are not documented
+ other than by the source of the mappers and its parent class(es). An example is given
+ below. It is easiest to obtain valid config values by dumping an already-existing
+ protocol mapper configuration through check-mode in the I(existing) field.
+ type: dict
+
+ attributes:
+ description:
+ - A dict of further attributes for this client. This can contain various configuration
+ settings; an example is given in the examples section. While an exhaustive list of
+ permissible options is not available; possible options as of Keycloak 3.4 are listed below. The Keycloak
+ API does not validate whether a given option is appropriate for the protocol used; if specified
+ anyway, Keycloak will simply not use it.
+ type: dict
+ suboptions:
+ saml.authnstatement:
+ description:
+ - For SAML clients, boolean specifying whether or not a statement containing method and timestamp
+ should be included in the login response.
+
+ saml.client.signature:
+ description:
+ - For SAML clients, boolean specifying whether a client signature is required and validated.
+
+ saml.encrypt:
+ description:
+ - Boolean specifying whether SAML assertions should be encrypted with the client's public key.
+
+ saml.force.post.binding:
+ description:
+ - For SAML clients, boolean specifying whether always to use POST binding for responses.
+
+ saml.onetimeuse.condition:
+ description:
+ - For SAML clients, boolean specifying whether a OneTimeUse condition should be included in login responses.
+
+ saml.server.signature:
+ description:
+ - Boolean specifying whether SAML documents should be signed by the realm.
+
+ saml.server.signature.keyinfo.ext:
+ description:
+ - For SAML clients, boolean specifying whether REDIRECT signing key lookup should be optimized through inclusion
+ of the signing key id in the SAML Extensions element.
+
+ saml.signature.algorithm:
+ description:
+ - Signature algorithm used to sign SAML documents. One of C(RSA_SHA256), C(RSA_SHA1), C(RSA_SHA512), or C(DSA_SHA1).
+
+ saml.signing.certificate:
+ description:
+ - SAML signing key certificate, base64-encoded.
+
+ saml.signing.private.key:
+ description:
+ - SAML signing key private key, base64-encoded.
+
+ saml_assertion_consumer_url_post:
+ description:
+ - SAML POST Binding URL for the client's assertion consumer service (login responses).
+
+ saml_assertion_consumer_url_redirect:
+ description:
+ - SAML Redirect Binding URL for the client's assertion consumer service (login responses).
+
+
+ saml_force_name_id_format:
+ description:
+ - For SAML clients, Boolean specifying whether to ignore requested NameID subject format and using the configured one instead.
+
+ saml_name_id_format:
+ description:
+ - For SAML clients, the NameID format to use (one of C(username), C(email), C(transient), or C(persistent))
+
+ saml_signature_canonicalization_method:
+ description:
+ - SAML signature canonicalization method. This is one of four values, namely
+ C(http://www.w3.org/2001/10/xml-exc-c14n#) for EXCLUSIVE,
+ C(http://www.w3.org/2001/10/xml-exc-c14n#WithComments) for EXCLUSIVE_WITH_COMMENTS,
+ C(http://www.w3.org/TR/2001/REC-xml-c14n-20010315) for INCLUSIVE, and
+ C(http://www.w3.org/TR/2001/REC-xml-c14n-20010315#WithComments) for INCLUSIVE_WITH_COMMENTS.
+
+ saml_single_logout_service_url_post:
+ description:
+ - SAML POST binding url for the client's single logout service.
+
+ saml_single_logout_service_url_redirect:
+ description:
+ - SAML redirect binding url for the client's single logout service.
+
+ user.info.response.signature.alg:
+ description:
+ - For OpenID-Connect clients, JWA algorithm for signed UserInfo-endpoint responses. One of C(RS256) or C(unsigned).
+
+ request.object.signature.alg:
+ description:
+ - For OpenID-Connect clients, JWA algorithm which the client needs to use when sending
+ OIDC request object. One of C(any), C(none), C(RS256).
+
+ use.jwks.url:
+ description:
+ - For OpenID-Connect clients, boolean specifying whether to use a JWKS URL to obtain client
+ public keys.
+
+ jwks.url:
+ description:
+ - For OpenID-Connect clients, URL where client keys in JWK are stored.
+
+ jwt.credential.certificate:
+ description:
+ - For OpenID-Connect clients, client certificate for validating JWT issued by
+ client and signed by its key, base64-encoded.
+
+extends_documentation_fragment:
+- community.general.keycloak
+
+
+author:
+ - Eike Frost (@eikef)
+'''
+
+EXAMPLES = '''
+- name: Create or update Keycloak client (minimal example)
+ local_action:
+ module: keycloak_client
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ client_id: test
+ state: present
+
+- name: Delete a Keycloak client
+ local_action:
+ module: keycloak_client
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ client_id: test
+ state: absent
+
+- name: Create or update a Keycloak client (with all the bells and whistles)
+ local_action:
+ module: keycloak_client
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ state: present
+ realm: master
+ client_id: test
+ id: d8b127a3-31f6-44c8-a7e4-4ab9a3e78d95
+ name: this_is_a_test
+ description: Description of this wonderful client
+ root_url: https://www.example.com/
+ admin_url: https://www.example.com/admin_url
+ base_url: basepath
+ enabled: True
+ client_authenticator_type: client-secret
+ secret: REALLYWELLKEPTSECRET
+ redirect_uris:
+ - https://www.example.com/*
+ - http://localhost:8888/
+ web_origins:
+ - https://www.example.com/*
+ not_before: 1507825725
+ bearer_only: False
+ consent_required: False
+ standard_flow_enabled: True
+ implicit_flow_enabled: False
+ direct_access_grants_enabled: False
+ service_accounts_enabled: False
+ authorization_services_enabled: False
+ public_client: False
+ frontchannel_logout: False
+ protocol: openid-connect
+ full_scope_allowed: false
+ node_re_registration_timeout: -1
+ client_template: test
+ use_template_config: False
+ use_template_scope: false
+ use_template_mappers: no
+ registered_nodes:
+ node01.example.com: 1507828202
+ registration_access_token: eyJWT_TOKEN
+ surrogate_auth_required: false
+ default_roles:
+ - test01
+ - test02
+ protocol_mappers:
+ - config:
+ access.token.claim: True
+ claim.name: "family_name"
+ id.token.claim: True
+ jsonType.label: String
+ user.attribute: lastName
+ userinfo.token.claim: True
+ consentRequired: True
+ consentText: "${familyName}"
+ name: family name
+ protocol: openid-connect
+ protocolMapper: oidc-usermodel-property-mapper
+ - config:
+ attribute.name: Role
+ attribute.nameformat: Basic
+ single: false
+ consentRequired: false
+ name: role list
+ protocol: saml
+ protocolMapper: saml-role-list-mapper
+ attributes:
+ saml.authnstatement: True
+ saml.client.signature: True
+ saml.force.post.binding: True
+ saml.server.signature: True
+ saml.signature.algorithm: RSA_SHA256
+ saml.signing.certificate: CERTIFICATEHERE
+ saml.signing.private.key: PRIVATEKEYHERE
+ saml_force_name_id_format: False
+ saml_name_id_format: username
+ saml_signature_canonicalization_method: "http://www.w3.org/2001/10/xml-exc-c14n#"
+ user.info.response.signature.alg: RS256
+ request.object.signature.alg: RS256
+ use.jwks.url: true
+ jwks.url: JWKS_URL_FOR_CLIENT_AUTH_JWT
+ jwt.credential.certificate: JWT_CREDENTIAL_CERTIFICATE_FOR_CLIENT_AUTH
+'''
+
+RETURN = '''
+msg:
+ description: Message as to what action was taken
+ returned: always
+ type: str
+ sample: "Client testclient has been updated"
+
+proposed:
+ description: client representation of proposed changes to client
+ returned: always
+ type: dict
+ sample: {
+ clientId: "test"
+ }
+existing:
+ description: client representation of existing client (sample is truncated)
+ returned: always
+ type: dict
+ sample: {
+ "adminUrl": "http://www.example.com/admin_url",
+ "attributes": {
+ "request.object.signature.alg": "RS256",
+ }
+ }
+end_state:
+ description: client representation of client after module execution (sample is truncated)
+ returned: always
+ type: dict
+ sample: {
+ "adminUrl": "http://www.example.com/admin_url",
+ "attributes": {
+ "request.object.signature.alg": "RS256",
+ }
+ }
+'''
+
+from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
+ keycloak_argument_spec, get_token, KeycloakError
+from ansible.module_utils.basic import AnsibleModule
+
+
+def sanitize_cr(clientrep):
+ """ Removes probably sensitive details from a client representation
+
+ :param clientrep: the clientrep dict to be sanitized
+ :return: sanitized clientrep dict
+ """
+ result = clientrep.copy()
+ if 'secret' in result:
+ result['secret'] = 'no_log'
+ if 'attributes' in result:
+ if 'saml.signing.private.key' in result['attributes']:
+ result['attributes']['saml.signing.private.key'] = 'no_log'
+ return result
+
+
+def main():
+ """
+ Module execution
+
+ :return:
+ """
+ argument_spec = keycloak_argument_spec()
+
+ protmapper_spec = dict(
+ consentRequired=dict(type='bool'),
+ consentText=dict(type='str'),
+ id=dict(type='str'),
+ name=dict(type='str'),
+ protocol=dict(type='str', choices=['openid-connect', 'saml']),
+ protocolMapper=dict(type='str'),
+ config=dict(type='dict'),
+ )
+
+ meta_args = dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ realm=dict(type='str', default='master'),
+
+ id=dict(type='str'),
+ client_id=dict(type='str', aliases=['clientId']),
+ name=dict(type='str'),
+ description=dict(type='str'),
+ root_url=dict(type='str', aliases=['rootUrl']),
+ admin_url=dict(type='str', aliases=['adminUrl']),
+ base_url=dict(type='str', aliases=['baseUrl']),
+ surrogate_auth_required=dict(type='bool', aliases=['surrogateAuthRequired']),
+ enabled=dict(type='bool'),
+ client_authenticator_type=dict(type='str', choices=['client-secret', 'client-jwt'], aliases=['clientAuthenticatorType']),
+ secret=dict(type='str', no_log=True),
+ registration_access_token=dict(type='str', aliases=['registrationAccessToken'], no_log=True),
+ default_roles=dict(type='list', aliases=['defaultRoles']),
+ redirect_uris=dict(type='list', aliases=['redirectUris']),
+ web_origins=dict(type='list', aliases=['webOrigins']),
+ not_before=dict(type='int', aliases=['notBefore']),
+ bearer_only=dict(type='bool', aliases=['bearerOnly']),
+ consent_required=dict(type='bool', aliases=['consentRequired']),
+ standard_flow_enabled=dict(type='bool', aliases=['standardFlowEnabled']),
+ implicit_flow_enabled=dict(type='bool', aliases=['implicitFlowEnabled']),
+ direct_access_grants_enabled=dict(type='bool', aliases=['directAccessGrantsEnabled']),
+ service_accounts_enabled=dict(type='bool', aliases=['serviceAccountsEnabled']),
+ authorization_services_enabled=dict(type='bool', aliases=['authorizationServicesEnabled']),
+ public_client=dict(type='bool', aliases=['publicClient']),
+ frontchannel_logout=dict(type='bool', aliases=['frontchannelLogout']),
+ protocol=dict(type='str', choices=['openid-connect', 'saml']),
+ attributes=dict(type='dict'),
+ full_scope_allowed=dict(type='bool', aliases=['fullScopeAllowed']),
+ node_re_registration_timeout=dict(type='int', aliases=['nodeReRegistrationTimeout']),
+ registered_nodes=dict(type='dict', aliases=['registeredNodes']),
+ client_template=dict(type='str', aliases=['clientTemplate']),
+ use_template_config=dict(type='bool', aliases=['useTemplateConfig']),
+ use_template_scope=dict(type='bool', aliases=['useTemplateScope']),
+ use_template_mappers=dict(type='bool', aliases=['useTemplateMappers']),
+ protocol_mappers=dict(type='list', elements='dict', options=protmapper_spec, aliases=['protocolMappers']),
+ authorization_settings=dict(type='dict', aliases=['authorizationSettings']),
+ )
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=([['client_id', 'id']]))
+
+ result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={})
+
+ # Obtain access token, initialize API
+ try:
+ connection_header = get_token(
+ base_url=module.params.get('auth_keycloak_url'),
+ validate_certs=module.params.get('validate_certs'),
+ auth_realm=module.params.get('auth_realm'),
+ client_id=module.params.get('auth_client_id'),
+ auth_username=module.params.get('auth_username'),
+ auth_password=module.params.get('auth_password'),
+ client_secret=module.params.get('auth_client_secret'),
+ )
+ except KeycloakError as e:
+ module.fail_json(msg=str(e))
+
+ kc = KeycloakAPI(module, connection_header)
+
+ realm = module.params.get('realm')
+ cid = module.params.get('id')
+ state = module.params.get('state')
+
+ # convert module parameters to client representation parameters (if they belong in there)
+ client_params = [x for x in module.params
+ if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm'] and
+ module.params.get(x) is not None]
+ keycloak_argument_spec().keys()
+ # See whether the client already exists in Keycloak
+ if cid is None:
+ before_client = kc.get_client_by_clientid(module.params.get('client_id'), realm=realm)
+ if before_client is not None:
+ cid = before_client['id']
+ else:
+ before_client = kc.get_client_by_id(cid, realm=realm)
+
+ if before_client is None:
+ before_client = dict()
+
+ # Build a proposed changeset from parameters given to this module
+ changeset = dict()
+
+ for client_param in client_params:
+ new_param_value = module.params.get(client_param)
+
+ # some lists in the Keycloak API are sorted, some are not.
+ if isinstance(new_param_value, list):
+ if client_param in ['attributes']:
+ try:
+ new_param_value = sorted(new_param_value)
+ except TypeError:
+ pass
+ # Unfortunately, the ansible argument spec checker introduces variables with null values when
+ # they are not specified
+ if client_param == 'protocol_mappers':
+ new_param_value = [dict((k, v) for k, v in x.items() if x[k] is not None) for x in new_param_value]
+
+ changeset[camel(client_param)] = new_param_value
+
+ # Whether creating or updating a client, take the before-state and merge the changeset into it
+ updated_client = before_client.copy()
+ updated_client.update(changeset)
+
+ result['proposed'] = sanitize_cr(changeset)
+ result['existing'] = sanitize_cr(before_client)
+
+ # If the client does not exist yet, before_client is still empty
+ if before_client == dict():
+ if state == 'absent':
+ # do nothing and exit
+ if module._diff:
+ result['diff'] = dict(before='', after='')
+ result['msg'] = 'Client does not exist, doing nothing.'
+ module.exit_json(**result)
+
+ # create new client
+ result['changed'] = True
+ if 'clientId' not in updated_client:
+ module.fail_json(msg='client_id needs to be specified when creating a new client')
+
+ if module._diff:
+ result['diff'] = dict(before='', after=sanitize_cr(updated_client))
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ kc.create_client(updated_client, realm=realm)
+ after_client = kc.get_client_by_clientid(updated_client['clientId'], realm=realm)
+
+ result['end_state'] = sanitize_cr(after_client)
+
+ result['msg'] = 'Client %s has been created.' % updated_client['clientId']
+ module.exit_json(**result)
+ else:
+ if state == 'present':
+ # update existing client
+ result['changed'] = True
+ if module.check_mode:
+ # We can only compare the current client with the proposed updates we have
+ if module._diff:
+ result['diff'] = dict(before=sanitize_cr(before_client),
+ after=sanitize_cr(updated_client))
+ result['changed'] = (before_client != updated_client)
+
+ module.exit_json(**result)
+
+ kc.update_client(cid, updated_client, realm=realm)
+
+ after_client = kc.get_client_by_id(cid, realm=realm)
+ if before_client == after_client:
+ result['changed'] = False
+ if module._diff:
+ result['diff'] = dict(before=sanitize_cr(before_client),
+ after=sanitize_cr(after_client))
+ result['end_state'] = sanitize_cr(after_client)
+
+ result['msg'] = 'Client %s has been updated.' % updated_client['clientId']
+ module.exit_json(**result)
+ else:
+ # Delete existing client
+ result['changed'] = True
+ if module._diff:
+ result['diff']['before'] = sanitize_cr(before_client)
+ result['diff']['after'] = ''
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ kc.delete_client(cid, realm=realm)
+ result['proposed'] = dict()
+ result['end_state'] = dict()
+ result['msg'] = 'Client %s has been deleted.' % before_client['clientId']
+ module.exit_json(**result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/keycloak_clienttemplate.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/keycloak_clienttemplate.py
new file mode 100644
index 00000000..d68198d5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/keycloak_clienttemplate.py
@@ -0,0 +1,431 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017, Eike Frost <ei@kefro.st>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: keycloak_clienttemplate
+
+short_description: Allows administration of Keycloak client templates via Keycloak API
+
+
+description:
+ - This module allows the administration of Keycloak client templates via the Keycloak REST API. It
+ requires access to the REST API via OpenID Connect; the user connecting and the client being
+ used must have the requisite access rights. In a default Keycloak installation, admin-cli
+ and an admin user would work, as would a separate client definition with the scope tailored
+ to your needs and a user having the expected roles.
+
+ - The names of module options are snake_cased versions of the camelCase ones found in the
+ Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html)
+
+ - The Keycloak API does not always enforce for only sensible settings to be used -- you can set
+ SAML-specific settings on an OpenID Connect client for instance and vice versa. Be careful.
+ If you do not specify a setting, usually a sensible default is chosen.
+
+options:
+ state:
+ description:
+ - State of the client template
+ - On C(present), the client template will be created (or updated if it exists already).
+ - On C(absent), the client template will be removed if it exists
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ id:
+ description:
+ - Id of client template to be worked on. This is usually a UUID.
+ type: str
+
+ realm:
+ description:
+ - Realm this client template is found in.
+ type: str
+ default: master
+
+ name:
+ description:
+ - Name of the client template
+ type: str
+
+ description:
+ description:
+ - Description of the client template in Keycloak
+ type: str
+
+ protocol:
+ description:
+ - Type of client template (either C(openid-connect) or C(saml).
+ choices: ['openid-connect', 'saml']
+ type: str
+
+ full_scope_allowed:
+ description:
+ - Is the "Full Scope Allowed" feature set for this client template or not.
+ This is 'fullScopeAllowed' in the Keycloak REST API.
+ type: bool
+
+ protocol_mappers:
+ description:
+ - a list of dicts defining protocol mappers for this client template.
+ This is 'protocolMappers' in the Keycloak REST API.
+ type: list
+ elements: dict
+ suboptions:
+ consentRequired:
+ description:
+ - Specifies whether a user needs to provide consent to a client for this mapper to be active.
+ type: bool
+
+ consentText:
+ description:
+ - The human-readable name of the consent the user is presented to accept.
+ type: str
+
+ id:
+ description:
+ - Usually a UUID specifying the internal ID of this protocol mapper instance.
+ type: str
+
+ name:
+ description:
+ - The name of this protocol mapper.
+ type: str
+
+ protocol:
+ description:
+ - is either 'openid-connect' or 'saml', this specifies for which protocol this protocol mapper
+ is active.
+ choices: ['openid-connect', 'saml']
+ type: str
+
+ protocolMapper:
+ description:
+ - The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is
+ impossible to provide since this may be extended through SPIs by the user of Keycloak,
+ by default Keycloak as of 3.4 ships with at least
+ - C(docker-v2-allow-all-mapper)
+ - C(oidc-address-mapper)
+ - C(oidc-full-name-mapper)
+ - C(oidc-group-membership-mapper)
+ - C(oidc-hardcoded-claim-mapper)
+ - C(oidc-hardcoded-role-mapper)
+ - C(oidc-role-name-mapper)
+ - C(oidc-script-based-protocol-mapper)
+ - C(oidc-sha256-pairwise-sub-mapper)
+ - C(oidc-usermodel-attribute-mapper)
+ - C(oidc-usermodel-client-role-mapper)
+ - C(oidc-usermodel-property-mapper)
+ - C(oidc-usermodel-realm-role-mapper)
+ - C(oidc-usersessionmodel-note-mapper)
+ - C(saml-group-membership-mapper)
+ - C(saml-hardcode-attribute-mapper)
+ - C(saml-hardcode-role-mapper)
+ - C(saml-role-list-mapper)
+ - C(saml-role-name-mapper)
+ - C(saml-user-attribute-mapper)
+ - C(saml-user-property-mapper)
+ - C(saml-user-session-note-mapper)
+ - An exhaustive list of available mappers on your installation can be obtained on
+ the admin console by going to Server Info -> Providers and looking under
+ 'protocol-mapper'.
+ type: str
+
+ config:
+ description:
+ - Dict specifying the configuration options for the protocol mapper; the
+ contents differ depending on the value of I(protocolMapper) and are not documented
+ other than by the source of the mappers and its parent class(es). An example is given
+ below. It is easiest to obtain valid config values by dumping an already-existing
+ protocol mapper configuration through check-mode in the "existing" field.
+ type: dict
+
+ attributes:
+ description:
+ - A dict of further attributes for this client template. This can contain various
+ configuration settings, though in the default installation of Keycloak as of 3.4, none
+ are documented or known, so this is usually empty.
+ type: dict
+
+notes:
+- The Keycloak REST API defines further fields (namely I(bearerOnly), I(consentRequired), I(standardFlowEnabled),
+ I(implicitFlowEnabled), I(directAccessGrantsEnabled), I(serviceAccountsEnabled), I(publicClient), and
+ I(frontchannelLogout)) which, while available with keycloak_client, do not have any effect on
+ Keycloak client-templates and are discarded if supplied with an API request changing client-templates. As such,
+ they are not available through this module.
+
+extends_documentation_fragment:
+- community.general.keycloak
+
+
+author:
+ - Eike Frost (@eikef)
+'''
+
+EXAMPLES = '''
+- name: Create or update Keycloak client template (minimal)
+ local_action:
+ module: keycloak_clienttemplate
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ realm: master
+ name: this_is_a_test
+
+- name: Delete Keycloak client template
+ local_action:
+ module: keycloak_clienttemplate
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ realm: master
+ state: absent
+ name: test01
+
+- name: Create or update Keycloak client template (with a protocol mapper)
+ local_action:
+ module: keycloak_clienttemplate
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ realm: master
+ name: this_is_a_test
+ protocol_mappers:
+ - config:
+ access.token.claim: True
+ claim.name: "family_name"
+ id.token.claim: True
+ jsonType.label: String
+ user.attribute: lastName
+ userinfo.token.claim: True
+ consentRequired: True
+ consentText: "${familyName}"
+ name: family name
+ protocol: openid-connect
+ protocolMapper: oidc-usermodel-property-mapper
+ full_scope_allowed: false
+ id: bce6f5e9-d7d3-4955-817e-c5b7f8d65b3f
+'''
+
+RETURN = '''
+msg:
+ description: Message as to what action was taken
+ returned: always
+ type: str
+ sample: "Client template testclient has been updated"
+
+proposed:
+ description: client template representation of proposed changes to client template
+ returned: always
+ type: dict
+ sample: {
+ name: "test01"
+ }
+existing:
+ description: client template representation of existing client template (sample is truncated)
+ returned: always
+ type: dict
+ sample: {
+ "description": "test01",
+ "fullScopeAllowed": false,
+ "id": "9c3712ab-decd-481e-954f-76da7b006e5f",
+ "name": "test01",
+ "protocol": "saml"
+ }
+end_state:
+ description: client template representation of client template after module execution (sample is truncated)
+ returned: always
+ type: dict
+ sample: {
+ "description": "test01",
+ "fullScopeAllowed": false,
+ "id": "9c3712ab-decd-481e-954f-76da7b006e5f",
+ "name": "test01",
+ "protocol": "saml"
+ }
+'''
+
+from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
+ keycloak_argument_spec, get_token, KeycloakError
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ """
+ Module execution
+
+ :return:
+ """
+ argument_spec = keycloak_argument_spec()
+
+ protmapper_spec = dict(
+ consentRequired=dict(type='bool'),
+ consentText=dict(type='str'),
+ id=dict(type='str'),
+ name=dict(type='str'),
+ protocol=dict(type='str', choices=['openid-connect', 'saml']),
+ protocolMapper=dict(type='str'),
+ config=dict(type='dict'),
+ )
+
+ meta_args = dict(
+ realm=dict(type='str', default='master'),
+ state=dict(default='present', choices=['present', 'absent']),
+
+ id=dict(type='str'),
+ name=dict(type='str'),
+ description=dict(type='str'),
+ protocol=dict(type='str', choices=['openid-connect', 'saml']),
+ attributes=dict(type='dict'),
+ full_scope_allowed=dict(type='bool'),
+ protocol_mappers=dict(type='list', elements='dict', options=protmapper_spec),
+ )
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=([['id', 'name']]))
+
+ result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={})
+
+ # Obtain access token, initialize API
+ try:
+ connection_header = get_token(
+ base_url=module.params.get('auth_keycloak_url'),
+ validate_certs=module.params.get('validate_certs'),
+ auth_realm=module.params.get('auth_realm'),
+ client_id=module.params.get('auth_client_id'),
+ auth_username=module.params.get('auth_username'),
+ auth_password=module.params.get('auth_password'),
+ client_secret=module.params.get('auth_client_secret'),
+ )
+ except KeycloakError as e:
+ module.fail_json(msg=str(e))
+ kc = KeycloakAPI(module, connection_header)
+
+ realm = module.params.get('realm')
+ state = module.params.get('state')
+ cid = module.params.get('id')
+
+ # convert module parameters to client representation parameters (if they belong in there)
+ clientt_params = [x for x in module.params
+ if x not in ['state', 'auth_keycloak_url', 'auth_client_id', 'auth_realm',
+ 'auth_client_secret', 'auth_username', 'auth_password',
+ 'validate_certs', 'realm'] and module.params.get(x) is not None]
+
+ # See whether the client template already exists in Keycloak
+ if cid is None:
+ before_clientt = kc.get_client_template_by_name(module.params.get('name'), realm=realm)
+ if before_clientt is not None:
+ cid = before_clientt['id']
+ else:
+ before_clientt = kc.get_client_template_by_id(cid, realm=realm)
+
+ if before_clientt is None:
+ before_clientt = dict()
+
+ result['existing'] = before_clientt
+
+ # Build a proposed changeset from parameters given to this module
+ changeset = dict()
+
+ for clientt_param in clientt_params:
+ # lists in the Keycloak API are sorted
+ new_param_value = module.params.get(clientt_param)
+ if isinstance(new_param_value, list):
+ try:
+ new_param_value = sorted(new_param_value)
+ except TypeError:
+ pass
+ changeset[camel(clientt_param)] = new_param_value
+
+ # Whether creating or updating a client, take the before-state and merge the changeset into it
+ updated_clientt = before_clientt.copy()
+ updated_clientt.update(changeset)
+
+ result['proposed'] = changeset
+
+ # If the client template does not exist yet, before_client is still empty
+ if before_clientt == dict():
+ if state == 'absent':
+ # do nothing and exit
+ if module._diff:
+ result['diff'] = dict(before='', after='')
+ result['msg'] = 'Client template does not exist, doing nothing.'
+ module.exit_json(**result)
+
+ # create new client template
+ result['changed'] = True
+ if 'name' not in updated_clientt:
+ module.fail_json(msg='name needs to be specified when creating a new client')
+
+ if module._diff:
+ result['diff'] = dict(before='', after=updated_clientt)
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ kc.create_client_template(updated_clientt, realm=realm)
+ after_clientt = kc.get_client_template_by_name(updated_clientt['name'], realm=realm)
+
+ result['end_state'] = after_clientt
+
+ result['msg'] = 'Client template %s has been created.' % updated_clientt['name']
+ module.exit_json(**result)
+ else:
+ if state == 'present':
+ # update existing client template
+ result['changed'] = True
+ if module.check_mode:
+ # We can only compare the current client template with the proposed updates we have
+ if module._diff:
+ result['diff'] = dict(before=before_clientt,
+ after=updated_clientt)
+
+ module.exit_json(**result)
+
+ kc.update_client_template(cid, updated_clientt, realm=realm)
+
+ after_clientt = kc.get_client_template_by_id(cid, realm=realm)
+ if before_clientt == after_clientt:
+ result['changed'] = False
+ if module._diff:
+ result['diff'] = dict(before=before_clientt,
+ after=after_clientt)
+ result['end_state'] = after_clientt
+
+ result['msg'] = 'Client template %s has been updated.' % updated_clientt['name']
+ module.exit_json(**result)
+ else:
+ # Delete existing client
+ result['changed'] = True
+ if module._diff:
+ result['diff']['before'] = before_clientt
+ result['diff']['after'] = ''
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ kc.delete_client_template(cid, realm=realm)
+ result['proposed'] = dict()
+ result['end_state'] = dict()
+ result['msg'] = 'Client template %s has been deleted.' % before_clientt['name']
+ module.exit_json(**result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/keycloak_group.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/keycloak_group.py
new file mode 100644
index 00000000..45b5c290
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/keycloak_group.py
@@ -0,0 +1,364 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019, Adam Goossens <adam.goossens@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: keycloak_group
+
+short_description: Allows administration of Keycloak groups via Keycloak API
+
+description:
+ - This module allows you to add, remove or modify Keycloak groups via the Keycloak REST API.
+ It requires access to the REST API via OpenID Connect; the user connecting and the client being
+ used must have the requisite access rights. In a default Keycloak installation, admin-cli
+ and an admin user would work, as would a separate client definition with the scope tailored
+ to your needs and a user having the expected roles.
+
+ - The names of module options are snake_cased versions of the camelCase ones found in the
+ Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html).
+
+ - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will
+ be returned that way by this module. You may pass single values for attributes when calling the module,
+ and this will be translated into a list suitable for the API.
+
+ - When updating a group, where possible provide the group ID to the module. This removes a lookup
+ to the API to translate the name into the group ID.
+
+
+options:
+ state:
+ description:
+ - State of the group.
+ - On C(present), the group will be created if it does not yet exist, or updated with the parameters you provide.
+ - On C(absent), the group will be removed if it exists.
+ default: 'present'
+ type: str
+ choices:
+ - present
+ - absent
+
+ name:
+ type: str
+ description:
+ - Name of the group.
+ - This parameter is required only when creating or updating the group.
+
+ realm:
+ type: str
+ description:
+ - They Keycloak realm under which this group resides.
+ default: 'master'
+
+ id:
+ type: str
+ description:
+ - The unique identifier for this group.
+ - This parameter is not required for updating or deleting a group but
+ providing it will reduce the number of API calls required.
+
+ attributes:
+ type: dict
+ description:
+ - A dict of key/value pairs to set as custom attributes for the group.
+ - Values may be single values (e.g. a string) or a list of strings.
+
+notes:
+ - Presently, the I(realmRoles), I(clientRoles) and I(access) attributes returned by the Keycloak API
+ are read-only for groups. This limitation will be removed in a later version of this module.
+
+extends_documentation_fragment:
+- community.general.keycloak
+
+
+author:
+ - Adam Goossens (@adamgoossens)
+'''
+
+EXAMPLES = '''
+- name: Create a Keycloak group
+ community.general.keycloak_group:
+ name: my-new-kc-group
+ realm: MyCustomRealm
+ state: present
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ delegate_to: localhost
+
+- name: Delete a keycloak group
+ community.general.keycloak_group:
+ id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd'
+ state: absent
+ realm: MyCustomRealm
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ delegate_to: localhost
+
+- name: Delete a Keycloak group based on name
+ community.general.keycloak_group:
+ name: my-group-for-deletion
+ state: absent
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ delegate_to: localhost
+
+- name: Update the name of a Keycloak group
+ community.general.keycloak_group:
+ id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd'
+ name: an-updated-kc-group-name
+ state: present
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ delegate_to: localhost
+
+- name: Create a keycloak group with some custom attributes
+ community.general.keycloak_group:
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ name: my-new_group
+ attributes:
+ attrib1: value1
+ attrib2: value2
+ attrib3:
+ - with
+ - numerous
+ - individual
+ - list
+ - items
+ delegate_to: localhost
+'''
+
+RETURN = '''
+group:
+ description: Group representation of the group after module execution (sample is truncated).
+ returned: always
+ type: complex
+ contains:
+ id:
+ description: GUID that identifies the group
+ type: str
+ returned: always
+ sample: 23f38145-3195-462c-97e7-97041ccea73e
+ name:
+ description: Name of the group
+ type: str
+ returned: always
+ sample: grp-test-123
+ attributes:
+ description: Attributes applied to this group
+ type: dict
+ returned: always
+ sample:
+ attr1: ["val1", "val2", "val3"]
+ path:
+ description: URI path to the group
+ type: str
+ returned: always
+ sample: /grp-test-123
+ realmRoles:
+ description: An array of the realm-level roles granted to this group
+ type: list
+ returned: always
+ sample: []
+ subGroups:
+ description: A list of groups that are children of this group. These groups will have the same parameters as
+ documented here.
+ type: list
+ returned: always
+ clientRoles:
+ description: A list of client-level roles granted to this group
+ type: list
+ returned: always
+ sample: []
+ access:
+ description: A dict describing the accesses you have to this group based on the credentials used.
+ type: dict
+ returned: always
+ sample:
+ manage: true
+ manageMembership: true
+ view: true
+'''
+
+from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
+ keycloak_argument_spec, get_token, KeycloakError
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ """
+ Module execution
+
+ :return:
+ """
+ argument_spec = keycloak_argument_spec()
+ meta_args = dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ realm=dict(default='master'),
+ id=dict(type='str'),
+ name=dict(type='str'),
+ attributes=dict(type='dict')
+ )
+
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=([['id', 'name']]))
+
+ result = dict(changed=False, msg='', diff={}, group='')
+
+ # Obtain access token, initialize API
+ try:
+ connection_header = get_token(
+ base_url=module.params.get('auth_keycloak_url'),
+ validate_certs=module.params.get('validate_certs'),
+ auth_realm=module.params.get('auth_realm'),
+ client_id=module.params.get('auth_client_id'),
+ auth_username=module.params.get('auth_username'),
+ auth_password=module.params.get('auth_password'),
+ client_secret=module.params.get('auth_client_secret'),
+ )
+ except KeycloakError as e:
+ module.fail_json(msg=str(e))
+ kc = KeycloakAPI(module, connection_header)
+
+ realm = module.params.get('realm')
+ state = module.params.get('state')
+ gid = module.params.get('id')
+ name = module.params.get('name')
+ attributes = module.params.get('attributes')
+
+ before_group = None # current state of the group, for merging.
+
+ # does the group already exist?
+ if gid is None:
+ before_group = kc.get_group_by_name(name, realm=realm)
+ else:
+ before_group = kc.get_group_by_groupid(gid, realm=realm)
+
+ before_group = {} if before_group is None else before_group
+
+ # attributes in Keycloak have their values returned as lists
+ # via the API. attributes is a dict, so we'll transparently convert
+ # the values to lists.
+ if attributes is not None:
+ for key, val in module.params['attributes'].items():
+ module.params['attributes'][key] = [val] if not isinstance(val, list) else val
+
+ group_params = [x for x in module.params
+ if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm'] and
+ module.params.get(x) is not None]
+
+ # build a changeset
+ changeset = {}
+ for param in group_params:
+ new_param_value = module.params.get(param)
+ old_value = before_group[param] if param in before_group else None
+ if new_param_value != old_value:
+ changeset[camel(param)] = new_param_value
+
+ # prepare the new group
+ updated_group = before_group.copy()
+ updated_group.update(changeset)
+
+ # if before_group is none, the group doesn't exist.
+ if before_group == {}:
+ if state == 'absent':
+ # nothing to do.
+ if module._diff:
+ result['diff'] = dict(before='', after='')
+ result['msg'] = 'Group does not exist; doing nothing.'
+ result['group'] = dict()
+ module.exit_json(**result)
+
+ # for 'present', create a new group.
+ result['changed'] = True
+ if name is None:
+ module.fail_json(msg='name must be specified when creating a new group')
+
+ if module._diff:
+ result['diff'] = dict(before='', after=updated_group)
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ # do it for real!
+ kc.create_group(updated_group, realm=realm)
+ after_group = kc.get_group_by_name(name, realm)
+
+ result['group'] = after_group
+ result['msg'] = 'Group {name} has been created with ID {id}'.format(name=after_group['name'],
+ id=after_group['id'])
+
+ else:
+ if state == 'present':
+ # no changes
+ if updated_group == before_group:
+ result['changed'] = False
+ result['group'] = updated_group
+ result['msg'] = "No changes required to group {name}.".format(name=before_group['name'])
+ module.exit_json(**result)
+
+ # update the existing group
+ result['changed'] = True
+
+ if module._diff:
+ result['diff'] = dict(before=before_group, after=updated_group)
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ # do the update
+ kc.update_group(updated_group, realm=realm)
+
+ after_group = kc.get_group_by_groupid(updated_group['id'], realm=realm)
+
+ result['group'] = after_group
+ result['msg'] = "Group {id} has been updated".format(id=after_group['id'])
+
+ module.exit_json(**result)
+
+ elif state == 'absent':
+ result['group'] = dict()
+
+ if module._diff:
+ result['diff'] = dict(before=before_group, after='')
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ # delete for real
+ gid = before_group['id']
+ kc.delete_group(groupid=gid, realm=realm)
+
+ result['changed'] = True
+ result['msg'] = "Group {name} has been deleted".format(name=before_group['name'])
+
+ module.exit_json(**result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/kibana_plugin.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/kibana_plugin.py
new file mode 100644
index 00000000..e84d8a6b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/kibana_plugin.py
@@ -0,0 +1,257 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Thierno IB. BARRY @barryib
+# Sponsored by Polyconseil http://polyconseil.fr.
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: kibana_plugin
+short_description: Manage Kibana plugins
+description:
+ - This module can be used to manage Kibana plugins.
+author: Thierno IB. BARRY (@barryib)
+options:
+ name:
+ description:
+ - Name of the plugin to install.
+ required: True
+ state:
+ description:
+ - Desired state of a plugin.
+ choices: ["present", "absent"]
+ default: present
+ url:
+ description:
+ - Set exact URL to download the plugin from.
+ - For local file, prefix its absolute path with file://
+ timeout:
+ description:
+ - "Timeout setting: 30s, 1m, 1h etc."
+ default: 1m
+ plugin_bin:
+ description:
+ - Location of the Kibana binary.
+ default: /opt/kibana/bin/kibana
+ plugin_dir:
+ description:
+ - Your configured plugin directory specified in Kibana.
+ default: /opt/kibana/installedPlugins/
+ version:
+ description:
+ - Version of the plugin to be installed.
+ - If plugin exists with previous version, plugin will NOT be updated unless C(force) is set to yes.
+ force:
+ description:
+ - Delete and re-install the plugin. Can be useful for plugins update.
+ type: bool
+ default: 'no'
+'''
+
+EXAMPLES = '''
+- name: Install Elasticsearch head plugin
+ community.general.kibana_plugin:
+ state: present
+ name: elasticsearch/marvel
+
+- name: Install specific version of a plugin
+ community.general.kibana_plugin:
+ state: present
+ name: elasticsearch/marvel
+ version: '2.3.3'
+
+- name: Uninstall Elasticsearch head plugin
+ community.general.kibana_plugin:
+ state: absent
+ name: elasticsearch/marvel
+'''
+
+RETURN = '''
+cmd:
+ description: the launched command during plugin management (install / remove)
+ returned: success
+ type: str
+name:
+ description: the plugin name to install or remove
+ returned: success
+ type: str
+url:
+ description: the url from where the plugin is installed from
+ returned: success
+ type: str
+timeout:
+ description: the timeout for plugin download
+ returned: success
+ type: str
+stdout:
+ description: the command stdout
+ returned: success
+ type: str
+stderr:
+ description: the command stderr
+ returned: success
+ type: str
+state:
+ description: the state for the managed plugin
+ returned: success
+ type: str
+'''
+
+import os
+from distutils.version import LooseVersion
+from ansible.module_utils.basic import AnsibleModule
+
+
+PACKAGE_STATE_MAP = dict(
+ present="--install",
+ absent="--remove"
+)
+
+
+def parse_plugin_repo(string):
+ elements = string.split("/")
+
+ # We first consider the simplest form: pluginname
+ repo = elements[0]
+
+ # We consider the form: username/pluginname
+ if len(elements) > 1:
+ repo = elements[1]
+
+ # remove elasticsearch- prefix
+ # remove es- prefix
+ for string in ("elasticsearch-", "es-"):
+ if repo.startswith(string):
+ return repo[len(string):]
+
+ return repo
+
+
+def is_plugin_present(plugin_dir, working_dir):
+ return os.path.isdir(os.path.join(working_dir, plugin_dir))
+
+
+def parse_error(string):
+ reason = "reason: "
+ try:
+ return string[string.index(reason) + len(reason):].strip()
+ except ValueError:
+ return string
+
+
+def install_plugin(module, plugin_bin, plugin_name, url, timeout, kibana_version='4.6'):
+ if LooseVersion(kibana_version) > LooseVersion('4.6'):
+ kibana_plugin_bin = os.path.join(os.path.dirname(plugin_bin), 'kibana-plugin')
+ cmd_args = [kibana_plugin_bin, "install"]
+ if url:
+ cmd_args.append(url)
+ else:
+ cmd_args.append(plugin_name)
+ else:
+ cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["present"], plugin_name]
+
+ if url:
+ cmd_args.append("--url %s" % url)
+
+ if timeout:
+ cmd_args.append("--timeout %s" % timeout)
+
+ cmd = " ".join(cmd_args)
+
+ if module.check_mode:
+ return True, cmd, "check mode", ""
+
+ rc, out, err = module.run_command(cmd)
+ if rc != 0:
+ reason = parse_error(out)
+ module.fail_json(msg=reason)
+
+ return True, cmd, out, err
+
+
+def remove_plugin(module, plugin_bin, plugin_name, kibana_version='4.6'):
+ if LooseVersion(kibana_version) > LooseVersion('4.6'):
+ kibana_plugin_bin = os.path.join(os.path.dirname(plugin_bin), 'kibana-plugin')
+ cmd_args = [kibana_plugin_bin, "remove", plugin_name]
+ else:
+ cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["absent"], plugin_name]
+
+ cmd = " ".join(cmd_args)
+
+ if module.check_mode:
+ return True, cmd, "check mode", ""
+
+ rc, out, err = module.run_command(cmd)
+ if rc != 0:
+ reason = parse_error(out)
+ module.fail_json(msg=reason)
+
+ return True, cmd, out, err
+
+
+def get_kibana_version(module, plugin_bin):
+ cmd_args = [plugin_bin, '--version']
+ cmd = " ".join(cmd_args)
+ rc, out, err = module.run_command(cmd)
+ if rc != 0:
+ module.fail_json(msg="Failed to get Kibana version : %s" % err)
+
+ return out.strip()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(default="present", choices=PACKAGE_STATE_MAP.keys()),
+ url=dict(default=None),
+ timeout=dict(default="1m"),
+ plugin_bin=dict(default="/opt/kibana/bin/kibana", type="path"),
+ plugin_dir=dict(default="/opt/kibana/installedPlugins/", type="path"),
+ version=dict(default=None),
+ force=dict(default="no", type="bool")
+ ),
+ supports_check_mode=True,
+ )
+
+ name = module.params["name"]
+ state = module.params["state"]
+ url = module.params["url"]
+ timeout = module.params["timeout"]
+ plugin_bin = module.params["plugin_bin"]
+ plugin_dir = module.params["plugin_dir"]
+ version = module.params["version"]
+ force = module.params["force"]
+
+ changed, cmd, out, err = False, '', '', ''
+
+ kibana_version = get_kibana_version(module, plugin_bin)
+
+ present = is_plugin_present(parse_plugin_repo(name), plugin_dir)
+
+ # skip if the state is correct
+ if (present and state == "present" and not force) or (state == "absent" and not present and not force):
+ module.exit_json(changed=False, name=name, state=state)
+
+ if version:
+ name = name + '/' + version
+
+ if state == "present":
+ if force:
+ remove_plugin(module, plugin_bin, name)
+ changed, cmd, out, err = install_plugin(module, plugin_bin, name, url, timeout, kibana_version)
+
+ elif state == "absent":
+ changed, cmd, out, err = remove_plugin(module, plugin_bin, name, kibana_version)
+
+ module.exit_json(changed=changed, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_cdi_upload.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_cdi_upload.py
new file mode 100644
index 00000000..f25d7d70
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_cdi_upload.py
@@ -0,0 +1,184 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: kubevirt_cdi_upload
+
+short_description: Upload local VM images to CDI Upload Proxy.
+
+
+author: KubeVirt Team (@kubevirt)
+
+
+description:
+ - Use Openshift Python SDK to create UploadTokenRequest objects.
+ - Transfer contents of local files to the CDI Upload Proxy.
+
+options:
+ pvc_name:
+ description:
+ - Use to specify the name of the target PersistentVolumeClaim.
+ required: true
+ pvc_namespace:
+ description:
+ - Use to specify the namespace of the target PersistentVolumeClaim.
+ required: true
+ upload_host:
+ description:
+ - URL containing the host and port on which the CDI Upload Proxy is available.
+ - "More info: U(https://github.com/kubevirt/containerized-data-importer/blob/master/doc/upload.md#expose-cdi-uploadproxy-service)"
+ upload_host_validate_certs:
+ description:
+ - Whether or not to verify the CDI Upload Proxy's SSL certificates against your system's CA trust store.
+ default: true
+ type: bool
+ aliases: [ upload_host_verify_ssl ]
+ path:
+ description:
+ - Path of local image file to transfer.
+ merge_type:
+ description:
+ - Whether to override the default patch merge approach with a specific type. By default, the strategic
+ merge will typically be used.
+ type: list
+ choices: [ json, merge, strategic-merge ]
+
+extends_documentation_fragment:
+- community.kubernetes.k8s_auth_options
+
+
+requirements:
+ - python >= 2.7
+ - openshift >= 0.8.2
+ - requests >= 2.0.0
+'''
+
+EXAMPLES = '''
+- name: Upload local image to pvc-vm1
+ community.general.kubevirt_cdi_upload:
+ pvc_namespace: default
+ pvc_name: pvc-vm1
+ upload_host: https://localhost:8443
+ upload_host_validate_certs: false
+ path: /tmp/cirros-0.4.0-x86_64-disk.img
+'''
+
+RETURN = '''# '''
+
+import copy
+import traceback
+
+from collections import defaultdict
+
+from ansible_collections.community.kubernetes.plugins.module_utils.common import AUTH_ARG_SPEC
+from ansible_collections.community.kubernetes.plugins.module_utils.raw import KubernetesRawModule
+
+# 3rd party imports
+try:
+ import requests
+ HAS_REQUESTS = True
+except ImportError:
+ HAS_REQUESTS = False
+
+
+SERVICE_ARG_SPEC = {
+ 'pvc_name': {'required': True},
+ 'pvc_namespace': {'required': True},
+ 'upload_host': {'required': True},
+ 'upload_host_validate_certs': {
+ 'type': 'bool',
+ 'default': True,
+ 'aliases': ['upload_host_verify_ssl']
+ },
+ 'path': {'required': True},
+ 'merge_type': {
+ 'type': 'list',
+ 'choices': ['json', 'merge', 'strategic-merge']
+ },
+}
+
+
+class KubeVirtCDIUpload(KubernetesRawModule):
+ def __init__(self, *args, **kwargs):
+ super(KubeVirtCDIUpload, self).__init__(*args, k8s_kind='UploadTokenRequest', **kwargs)
+
+ if not HAS_REQUESTS:
+ self.fail("This module requires the python 'requests' package. Try `pip install requests`.")
+
+ @property
+ def argspec(self):
+ """ argspec property builder """
+ argument_spec = copy.deepcopy(AUTH_ARG_SPEC)
+ argument_spec.update(SERVICE_ARG_SPEC)
+ return argument_spec
+
+ def execute_module(self):
+ """ Module execution """
+
+ API = 'v1alpha1'
+ KIND = 'UploadTokenRequest'
+
+ self.client = self.get_api_client()
+
+ api_version = 'upload.cdi.kubevirt.io/{0}'.format(API)
+ pvc_name = self.params.get('pvc_name')
+ pvc_namespace = self.params.get('pvc_namespace')
+ upload_host = self.params.get('upload_host')
+ upload_host_verify_ssl = self.params.get('upload_host_validate_certs')
+ path = self.params.get('path')
+
+ definition = defaultdict(defaultdict)
+
+ definition['kind'] = KIND
+ definition['apiVersion'] = api_version
+
+ def_meta = definition['metadata']
+ def_meta['name'] = pvc_name
+ def_meta['namespace'] = pvc_namespace
+
+ def_spec = definition['spec']
+ def_spec['pvcName'] = pvc_name
+
+ # Let's check the file's there before we do anything else
+ imgfile = open(path, 'rb')
+
+ resource = self.find_resource(KIND, api_version, fail=True)
+ definition = self.set_defaults(resource, definition)
+ result = self.perform_action(resource, definition)
+
+ headers = {'Authorization': "Bearer {0}".format(result['result']['status']['token'])}
+ url = "{0}/{1}/upload".format(upload_host, API)
+ ret = requests.post(url, data=imgfile, headers=headers, verify=upload_host_verify_ssl)
+
+ if ret.status_code != 200:
+ self.fail_request("Something went wrong while uploading data", method='POST', url=url,
+ reason=ret.reason, status_code=ret.status_code)
+
+ self.exit_json(changed=True)
+
+ def fail_request(self, msg, **kwargs):
+ req_info = {}
+ for k, v in kwargs.items():
+ req_info['req_' + k] = v
+ self.fail_json(msg=msg, **req_info)
+
+
+def main():
+ module = KubeVirtCDIUpload()
+ try:
+ module.execute_module()
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_preset.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_preset.py
new file mode 100644
index 00000000..7e0776c7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_preset.py
@@ -0,0 +1,154 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: kubevirt_preset
+
+short_description: Manage KubeVirt virtual machine presets
+
+description:
+ - Use Openshift Python SDK to manage the state of KubeVirt virtual machine presets.
+
+
+author: KubeVirt Team (@kubevirt)
+
+options:
+ state:
+ description:
+ - Create or delete virtual machine presets.
+ default: "present"
+ choices:
+ - present
+ - absent
+ type: str
+ name:
+ description:
+ - Name of the virtual machine preset.
+ required: true
+ type: str
+ namespace:
+ description:
+ - Namespace where the virtual machine preset exists.
+ required: true
+ type: str
+ selector:
+ description:
+ - "Selector is a label query over a set of virtual machine preset."
+ type: dict
+
+extends_documentation_fragment:
+- community.kubernetes.k8s_auth_options
+- community.general.kubevirt_vm_options
+- community.general.kubevirt_common_options
+
+
+requirements:
+ - python >= 2.7
+ - openshift >= 0.8.2
+'''
+
+EXAMPLES = '''
+- name: Create virtual machine preset 'vmi-preset-small'
+ community.general.kubevirt_preset:
+ state: present
+ name: vmi-preset-small
+ namespace: vms
+ memory: 64M
+ selector:
+ matchLabels:
+ kubevirt.io/vmPreset: vmi-preset-small
+
+- name: Remove virtual machine preset 'vmi-preset-small'
+ community.general.kubevirt_preset:
+ state: absent
+ name: vmi-preset-small
+ namespace: vms
+'''
+
+RETURN = '''
+kubevirt_preset:
+ description:
+ - The virtual machine preset managed by the user.
+ - "This dictionary contains all values returned by the KubeVirt API all options
+ are described here U(https://kubevirt.io/api-reference/master/definitions.html#_v1_virtualmachineinstancepreset)"
+ returned: success
+ type: complex
+ contains: {}
+'''
+
+import copy
+import traceback
+
+
+from ansible_collections.community.kubernetes.plugins.module_utils.common import AUTH_ARG_SPEC
+
+from ansible_collections.community.general.plugins.module_utils.kubevirt import (
+ virtdict,
+ KubeVirtRawModule,
+ VM_COMMON_ARG_SPEC
+)
+
+
+KIND = 'VirtualMachineInstancePreset'
+VMP_ARG_SPEC = {
+ 'selector': {'type': 'dict'},
+}
+
+
+class KubeVirtVMPreset(KubeVirtRawModule):
+
+ @property
+ def argspec(self):
+ """ argspec property builder """
+ argument_spec = copy.deepcopy(AUTH_ARG_SPEC)
+ argument_spec.update(VM_COMMON_ARG_SPEC)
+ argument_spec.update(VMP_ARG_SPEC)
+ return argument_spec
+
+ def execute_module(self):
+ # Parse parameters specific for this module:
+ definition = virtdict()
+ selector = self.params.get('selector')
+
+ if selector:
+ definition['spec']['selector'] = selector
+
+ # FIXME: Devices must be set, but we don't yet support any
+ # attributes there, remove when we do:
+ definition['spec']['domain']['devices'] = dict()
+
+ # defaults for template
+ defaults = {'disks': [], 'volumes': [], 'interfaces': [], 'networks': []}
+
+ # Execute the CURD of VM:
+ dummy, definition = self.construct_vm_definition(KIND, definition, definition, defaults)
+ result_crud = self.execute_crud(KIND, definition)
+ changed = result_crud['changed']
+ result = result_crud.pop('result')
+
+ # Return from the module:
+ self.exit_json(**{
+ 'changed': changed,
+ 'kubevirt_preset': result,
+ 'result': result_crud,
+ })
+
+
+def main():
+ module = KubeVirtVMPreset()
+ try:
+ module.execute_module()
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_pvc.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_pvc.py
new file mode 100644
index 00000000..5687c23d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_pvc.py
@@ -0,0 +1,457 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: kubevirt_pvc
+
+short_description: Manage PVCs on Kubernetes
+
+
+author: KubeVirt Team (@kubevirt)
+
+description:
+ - Use Openshift Python SDK to manage PVCs on Kubernetes
+ - Support Containerized Data Importer out of the box
+
+options:
+ resource_definition:
+ description:
+ - "A partial YAML definition of the PVC object being created/updated. Here you can define Kubernetes
+ PVC Resource parameters not covered by this module's parameters."
+ - "NOTE: I(resource_definition) has lower priority than module parameters. If you try to define e.g.
+ I(metadata.namespace) here, that value will be ignored and I(namespace) used instead."
+ aliases:
+ - definition
+ - inline
+ type: dict
+ state:
+ description:
+ - "Determines if an object should be created, patched, or deleted. When set to C(present), an object will be
+ created, if it does not already exist. If set to C(absent), an existing object will be deleted. If set to
+ C(present), an existing object will be patched, if its attributes differ from those specified using
+ module options and I(resource_definition)."
+ default: present
+ choices:
+ - present
+ - absent
+ force:
+ description:
+ - If set to C(True), and I(state) is C(present), an existing object will be replaced.
+ default: false
+ type: bool
+ merge_type:
+ description:
+ - Whether to override the default patch merge approach with a specific type.
+ - "This defaults to C(['strategic-merge', 'merge']), which is ideal for using the same parameters
+ on resource kinds that combine Custom Resources and built-in resources."
+ - See U(https://kubernetes.io/docs/tasks/run-application/update-api-object-kubectl-patch/#use-a-json-merge-patch-to-update-a-deployment)
+ - If more than one merge_type is given, the merge_types will be tried in order
+ choices:
+ - json
+ - merge
+ - strategic-merge
+ type: list
+ name:
+ description:
+ - Use to specify a PVC object name.
+ required: true
+ type: str
+ namespace:
+ description:
+ - Use to specify a PVC object namespace.
+ required: true
+ type: str
+ annotations:
+ description:
+ - Annotations attached to this object.
+ - U(https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/)
+ type: dict
+ labels:
+ description:
+ - Labels attached to this object.
+ - U(https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)
+ type: dict
+ selector:
+ description:
+ - A label query over volumes to consider for binding.
+ - U(https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)
+ type: dict
+ access_modes:
+ description:
+ - Contains the desired access modes the volume should have.
+ - "More info: U(https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes)"
+ type: list
+ size:
+ description:
+ - How much storage to allocate to the PVC.
+ type: str
+ aliases:
+ - storage
+ storage_class_name:
+ description:
+ - Name of the StorageClass required by the claim.
+ - "More info: U(https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1)"
+ type: str
+ volume_mode:
+ description:
+ - "This defines what type of volume is required by the claim. Value of Filesystem is implied when not
+ included in claim spec. This is an alpha feature of kubernetes and may change in the future."
+ type: str
+ volume_name:
+ description:
+ - This is the binding reference to the PersistentVolume backing this claim.
+ type: str
+ cdi_source:
+ description:
+ - "If data is to be copied onto the PVC using the Containerized Data Importer you can specify the source of
+ the data (along with any additional configuration) as well as it's format."
+ - "Valid source types are: blank, http, s3, registry, pvc and upload. The last one requires using the
+ M(community.general.kubevirt_cdi_upload) module to actually perform an upload."
+ - "Source data format is specified using the optional I(content_type). Valid options are C(kubevirt)
+ (default; raw image) and C(archive) (tar.gz)."
+ - "This uses the DataVolume source syntax:
+ U(https://github.com/kubevirt/containerized-data-importer/blob/master/doc/datavolumes.md#https3registry-source)"
+ type: dict
+ wait:
+ description:
+ - "If set, this module will wait for the PVC to become bound and CDI (if enabled) to finish its operation
+ before returning."
+ - "Used only if I(state) set to C(present)."
+ - "Unless used in conjunction with I(cdi_source), this might result in a timeout, as clusters may be configured
+ to not bind PVCs until first usage."
+ default: false
+ type: bool
+ wait_timeout:
+ description:
+ - Specifies how much time in seconds to wait for PVC creation to complete if I(wait) option is enabled.
+ - Default value is reasonably high due to an expectation that CDI might take a while to finish its operation.
+ type: int
+ default: 300
+
+extends_documentation_fragment:
+- community.kubernetes.k8s_auth_options
+
+
+requirements:
+ - python >= 2.7
+ - openshift >= 0.8.2
+'''
+
+EXAMPLES = '''
+- name: Create a PVC and import data from an external source
+ community.general.kubevirt_pvc:
+ name: pvc1
+ namespace: default
+ size: 100Mi
+ access_modes:
+ - ReadWriteOnce
+ cdi_source:
+ http:
+ url: https://www.source.example/path/of/data/vm.img
+ # If the URL points to a tar.gz containing the disk image, uncomment the line below:
+ #content_type: archive
+
+- name: Create a PVC as a clone from a different PVC
+ community.general.kubevirt_pvc:
+ name: pvc2
+ namespace: default
+ size: 100Mi
+ access_modes:
+ - ReadWriteOnce
+ cdi_source:
+ pvc:
+ namespace: source-ns
+ name: source-pvc
+
+- name: Create a PVC ready for data upload
+ community.general.kubevirt_pvc:
+ name: pvc3
+ namespace: default
+ size: 100Mi
+ access_modes:
+ - ReadWriteOnce
+ cdi_source:
+ upload: yes
+ # You need the kubevirt_cdi_upload module to actually upload something
+
+- name: Create a PVC with a blank raw image
+ community.general.kubevirt_pvc:
+ name: pvc4
+ namespace: default
+ size: 100Mi
+ access_modes:
+ - ReadWriteOnce
+ cdi_source:
+ blank: yes
+
+- name: Create a PVC and fill it with data from a container
+ community.general.kubevirt_pvc:
+ name: pvc5
+ namespace: default
+ size: 100Mi
+ access_modes:
+ - ReadWriteOnce
+ cdi_source:
+ registry:
+ url: "docker://kubevirt/fedora-cloud-registry-disk-demo"
+
+'''
+
+RETURN = '''
+result:
+ description:
+ - The created, patched, or otherwise present object. Will be empty in the case of a deletion.
+ returned: success
+ type: complex
+ contains:
+ api_version:
+ description: The versioned schema of this representation of an object.
+ returned: success
+ type: str
+ kind:
+ description: Represents the REST resource this object represents.
+ returned: success
+ type: str
+ metadata:
+ description: Standard object metadata. Includes name, namespace, annotations, labels, etc.
+ returned: success
+ type: complex
+ spec:
+ description: Specific attributes of the object. Will vary based on the I(api_version) and I(kind).
+ returned: success
+ type: complex
+ status:
+ description: Current status details for the object.
+ returned: success
+ type: complex
+ items:
+ description: Returned only when multiple yaml documents are passed to src or resource_definition
+ returned: when resource_definition or src contains list of objects
+ type: list
+ duration:
+ description: elapsed time of task in seconds
+ returned: when C(wait) is true
+ type: int
+ sample: 48
+'''
+
+
+import copy
+import traceback
+
+from collections import defaultdict
+
+from ansible_collections.community.kubernetes.plugins.module_utils.common import AUTH_ARG_SPEC
+from ansible_collections.community.kubernetes.plugins.module_utils.raw import KubernetesRawModule
+from ansible_collections.community.general.plugins.module_utils.kubevirt import virtdict, KubeVirtRawModule
+
+
+PVC_ARG_SPEC = {
+ 'name': {'required': True},
+ 'namespace': {'required': True},
+ 'state': {
+ 'type': 'str',
+ 'choices': [
+ 'present', 'absent'
+ ],
+ 'default': 'present'
+ },
+ 'force': {
+ 'type': 'bool',
+ 'default': False,
+ },
+ 'merge_type': {
+ 'type': 'list',
+ 'choices': ['json', 'merge', 'strategic-merge']
+ },
+ 'resource_definition': {
+ 'type': 'dict',
+ 'aliases': ['definition', 'inline']
+ },
+ 'labels': {'type': 'dict'},
+ 'annotations': {'type': 'dict'},
+ 'selector': {'type': 'dict'},
+ 'access_modes': {'type': 'list'},
+ 'size': {
+ 'type': 'str',
+ 'aliases': ['storage']
+ },
+ 'storage_class_name': {'type': 'str'},
+ 'volume_mode': {'type': 'str'},
+ 'volume_name': {'type': 'str'},
+ 'cdi_source': {'type': 'dict'},
+ 'wait': {
+ 'type': 'bool',
+ 'default': False
+ },
+ 'wait_timeout': {
+ 'type': 'int',
+ 'default': 300
+ }
+}
+
+
+class CreatePVCFailed(Exception):
+ pass
+
+
+class KubevirtPVC(KubernetesRawModule):
+ def __init__(self):
+ super(KubevirtPVC, self).__init__()
+
+ @property
+ def argspec(self):
+ argument_spec = copy.deepcopy(AUTH_ARG_SPEC)
+ argument_spec.update(PVC_ARG_SPEC)
+ return argument_spec
+
+ @staticmethod
+ def fix_serialization(obj):
+ if obj and hasattr(obj, 'to_dict'):
+ return obj.to_dict()
+ return obj
+
+ def _parse_cdi_source(self, _cdi_src, metadata):
+ cdi_src = copy.deepcopy(_cdi_src)
+ annotations = metadata['annotations']
+ labels = metadata['labels']
+
+ valid_content_types = ('kubevirt', 'archive')
+ valid_sources = ('http', 's3', 'pvc', 'upload', 'blank', 'registry')
+
+ if 'content_type' in cdi_src:
+ content_type = cdi_src.pop('content_type')
+ if content_type not in valid_content_types:
+ raise ValueError("cdi_source.content_type must be one of {0}, not: '{1}'".format(
+ valid_content_types, content_type))
+ annotations['cdi.kubevirt.io/storage.contentType'] = content_type
+
+ if len(cdi_src) != 1:
+ raise ValueError("You must specify exactly one valid CDI source, not {0}: {1}".format(len(cdi_src), tuple(cdi_src.keys())))
+
+ src_type = tuple(cdi_src.keys())[0]
+ src_spec = cdi_src[src_type]
+
+ if src_type not in valid_sources:
+ raise ValueError("Got an invalid CDI source type: '{0}', must be one of {1}".format(src_type, valid_sources))
+
+ # True for all cases save one
+ labels['app'] = 'containerized-data-importer'
+
+ if src_type == 'upload':
+ annotations['cdi.kubevirt.io/storage.upload.target'] = ''
+ elif src_type == 'blank':
+ annotations['cdi.kubevirt.io/storage.import.source'] = 'none'
+ elif src_type == 'pvc':
+ if not isinstance(src_spec, dict) or sorted(src_spec.keys()) != ['name', 'namespace']:
+ raise ValueError("CDI Source 'pvc' requires specifying 'name' and 'namespace' (and nothing else)")
+ labels['app'] = 'host-assisted-cloning'
+ annotations['k8s.io/CloneRequest'] = '{0}/{1}'.format(src_spec['namespace'], src_spec['name'])
+ elif src_type in ('http', 's3', 'registry'):
+ if not isinstance(src_spec, dict) or 'url' not in src_spec:
+ raise ValueError("CDI Source '{0}' requires specifying 'url'".format(src_type))
+ unknown_params = set(src_spec.keys()).difference(set(('url', 'secretRef', 'certConfigMap')))
+ if unknown_params:
+ raise ValueError("CDI Source '{0}' does not know recognize params: {1}".format(src_type, tuple(unknown_params)))
+ annotations['cdi.kubevirt.io/storage.import.source'] = src_type
+ annotations['cdi.kubevirt.io/storage.import.endpoint'] = src_spec['url']
+ if 'secretRef' in src_spec:
+ annotations['cdi.kubevirt.io/storage.import.secretName'] = src_spec['secretRef']
+ if 'certConfigMap' in src_spec:
+ annotations['cdi.kubevirt.io/storage.import.certConfigMap'] = src_spec['certConfigMap']
+
+ def _wait_for_creation(self, resource, uid):
+ return_obj = None
+ desired_cdi_status = 'Succeeded'
+ use_cdi = True if self.params.get('cdi_source') else False
+ if use_cdi and 'upload' in self.params['cdi_source']:
+ desired_cdi_status = 'Running'
+
+ for event in resource.watch(namespace=self.namespace, timeout=self.params.get('wait_timeout')):
+ entity = event['object']
+ metadata = entity.metadata
+ if not hasattr(metadata, 'uid') or metadata.uid != uid:
+ continue
+ if entity.status.phase == 'Bound':
+ if use_cdi and hasattr(metadata, 'annotations'):
+ import_status = metadata.annotations.get('cdi.kubevirt.io/storage.pod.phase')
+ if import_status == desired_cdi_status:
+ return_obj = entity
+ break
+ elif import_status == 'Failed':
+ raise CreatePVCFailed("PVC creation incomplete; importing data failed")
+ else:
+ return_obj = entity
+ break
+ elif entity.status.phase == 'Failed':
+ raise CreatePVCFailed("PVC creation failed")
+
+ if not return_obj:
+ raise CreatePVCFailed("PVC creation timed out")
+
+ return self.fix_serialization(return_obj)
+
+ def execute_module(self):
+ KIND = 'PersistentVolumeClaim'
+ API = 'v1'
+
+ definition = virtdict()
+ definition['kind'] = KIND
+ definition['apiVersion'] = API
+
+ metadata = definition['metadata']
+ metadata['name'] = self.params.get('name')
+ metadata['namespace'] = self.params.get('namespace')
+ if self.params.get('annotations'):
+ metadata['annotations'] = self.params.get('annotations')
+ if self.params.get('labels'):
+ metadata['labels'] = self.params.get('labels')
+ if self.params.get('cdi_source'):
+ self._parse_cdi_source(self.params.get('cdi_source'), metadata)
+
+ spec = definition['spec']
+ if self.params.get('access_modes'):
+ spec['accessModes'] = self.params.get('access_modes')
+ if self.params.get('size'):
+ spec['resources']['requests']['storage'] = self.params.get('size')
+ if self.params.get('storage_class_name'):
+ spec['storageClassName'] = self.params.get('storage_class_name')
+ if self.params.get('selector'):
+ spec['selector'] = self.params.get('selector')
+ if self.params.get('volume_mode'):
+ spec['volumeMode'] = self.params.get('volume_mode')
+ if self.params.get('volume_name'):
+ spec['volumeName'] = self.params.get('volume_name')
+
+ # 'resource_definition:' has lower priority than module parameters
+ definition = dict(KubeVirtRawModule.merge_dicts(definition, self.resource_definitions[0]))
+
+ self.client = self.get_api_client()
+ resource = self.find_resource(KIND, API, fail=True)
+ definition = self.set_defaults(resource, definition)
+ result = self.perform_action(resource, definition)
+ if self.params.get('wait') and self.params.get('state') == 'present':
+ result['result'] = self._wait_for_creation(resource, result['result']['metadata']['uid'])
+
+ self.exit_json(**result)
+
+
+def main():
+ module = KubevirtPVC()
+ try:
+ module.execute_module()
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_rs.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_rs.py
new file mode 100644
index 00000000..d1fdc394
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_rs.py
@@ -0,0 +1,211 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: kubevirt_rs
+
+short_description: Manage KubeVirt virtual machine replica sets
+
+description:
+ - Use Openshift Python SDK to manage the state of KubeVirt virtual machine replica sets.
+
+
+author: KubeVirt Team (@kubevirt)
+
+options:
+ state:
+ description:
+ - Create or delete virtual machine replica sets.
+ default: "present"
+ choices:
+ - present
+ - absent
+ type: str
+ name:
+ description:
+ - Name of the virtual machine replica set.
+ required: true
+ type: str
+ namespace:
+ description:
+ - Namespace where the virtual machine replica set exists.
+ required: true
+ type: str
+ selector:
+ description:
+ - "Selector is a label query over a set of virtual machine."
+ required: true
+ type: dict
+ replicas:
+ description:
+ - Number of desired pods. This is a pointer to distinguish between explicit zero and not specified.
+ - Replicas defaults to 1 if newly created replica set.
+ type: int
+
+extends_documentation_fragment:
+- community.kubernetes.k8s_auth_options
+- community.general.kubevirt_vm_options
+- community.general.kubevirt_common_options
+
+
+requirements:
+ - python >= 2.7
+ - openshift >= 0.8.2
+'''
+
+EXAMPLES = '''
+- name: Create virtual machine replica set 'myvmir'
+ community.general.kubevirt_rs:
+ state: present
+ name: myvmir
+ namespace: vms
+ wait: true
+ replicas: 3
+ memory: 64M
+ labels:
+ myvmi: myvmi
+ selector:
+ matchLabels:
+ myvmi: myvmi
+ disks:
+ - name: containerdisk
+ volume:
+ containerDisk:
+ image: kubevirt/cirros-container-disk-demo:latest
+ path: /custom-disk/cirros.img
+ disk:
+ bus: virtio
+
+- name: Remove virtual machine replica set 'myvmir'
+ community.general.kubevirt_rs:
+ state: absent
+ name: myvmir
+ namespace: vms
+ wait: true
+'''
+
+RETURN = '''
+kubevirt_rs:
+ description:
+ - The virtual machine virtual machine replica set managed by the user.
+ - "This dictionary contains all values returned by the KubeVirt API all options
+ are described here U(https://kubevirt.io/api-reference/master/definitions.html#_v1_virtualmachineinstance)"
+ returned: success
+ type: complex
+ contains: {}
+'''
+
+import copy
+import traceback
+
+
+from ansible_collections.community.kubernetes.plugins.module_utils.common import AUTH_ARG_SPEC
+
+from ansible_collections.community.general.plugins.module_utils.kubevirt import (
+ virtdict,
+ KubeVirtRawModule,
+ VM_COMMON_ARG_SPEC,
+)
+
+
+KIND = 'VirtualMachineInstanceReplicaSet'
+VMIR_ARG_SPEC = {
+ 'replicas': {'type': 'int'},
+ 'selector': {'type': 'dict'},
+}
+
+
+class KubeVirtVMIRS(KubeVirtRawModule):
+
+ @property
+ def argspec(self):
+ """ argspec property builder """
+ argument_spec = copy.deepcopy(AUTH_ARG_SPEC)
+ argument_spec.update(copy.deepcopy(VM_COMMON_ARG_SPEC))
+ argument_spec.update(copy.deepcopy(VMIR_ARG_SPEC))
+ return argument_spec
+
+ def wait_for_replicas(self, replicas):
+ """ Wait for ready_replicas to equal the requested number of replicas. """
+ resource = self.find_supported_resource(KIND)
+ return_obj = None
+
+ for event in resource.watch(namespace=self.namespace, timeout=self.params.get('wait_timeout')):
+ entity = event['object']
+ if entity.metadata.name != self.name:
+ continue
+ status = entity.get('status', {})
+ readyReplicas = status.get('readyReplicas', 0)
+ if readyReplicas == replicas:
+ return_obj = entity
+ break
+
+ if not return_obj:
+ self.fail_json(msg="Error fetching the patched object. Try a higher wait_timeout value.")
+ if replicas and return_obj.status.readyReplicas is None:
+ self.fail_json(msg="Failed to fetch the number of ready replicas. Try a higher wait_timeout value.")
+ if replicas and return_obj.status.readyReplicas != replicas:
+ self.fail_json(msg="Number of ready replicas is {0}. Failed to reach {1} ready replicas within "
+ "the wait_timeout period.".format(return_obj.status.ready_replicas, replicas))
+ return return_obj.to_dict()
+
+ def execute_module(self):
+ # Parse parameters specific for this module:
+ definition = virtdict()
+ selector = self.params.get('selector')
+ replicas = self.params.get('replicas')
+
+ if selector:
+ definition['spec']['selector'] = selector
+
+ if replicas is not None:
+ definition['spec']['replicas'] = replicas
+
+ # defaults for template
+ defaults = {'disks': [], 'volumes': [], 'interfaces': [], 'networks': []}
+
+ # Execute the CURD of VM:
+ template = definition['spec']['template']
+ dummy, definition = self.construct_vm_definition(KIND, definition, template, defaults)
+ result_crud = self.execute_crud(KIND, definition)
+ changed = result_crud['changed']
+ result = result_crud.pop('result')
+
+ # When creating a new VMIRS object without specifying `replicas`, assume it's '1' to make the
+ # wait logic work correctly
+ if changed and result_crud['method'] == 'create' and replicas is None:
+ replicas = 1
+
+ # Wait for the new number of ready replicas after a CRUD update
+ # Note1: doesn't work correctly when reducing number of replicas due to how VMIRS works (as of kubevirt 1.5.0)
+ # Note2: not the place to wait for the VMIs to get deleted when deleting the VMIRS object; that *might* be
+ # achievable in execute_crud(); keywords: orphanDependents, propagationPolicy, DeleteOptions
+ if self.params.get('wait') and replicas is not None and self.params.get('state') == 'present':
+ result = self.wait_for_replicas(replicas)
+
+ # Return from the module:
+ self.exit_json(**{
+ 'changed': changed,
+ 'kubevirt_rs': result,
+ 'result': result_crud,
+ })
+
+
+def main():
+ module = KubeVirtVMIRS()
+ try:
+ module.execute_module()
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_template.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_template.py
new file mode 100644
index 00000000..3054b1a7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_template.py
@@ -0,0 +1,385 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: kubevirt_template
+
+short_description: Manage KubeVirt templates
+
+description:
+ - Use Openshift Python SDK to manage the state of KubeVirt templates.
+
+
+author: KubeVirt Team (@kubevirt)
+
+options:
+ name:
+ description:
+ - Name of the Template object.
+ required: true
+ type: str
+ namespace:
+ description:
+ - Namespace where the Template object exists.
+ required: true
+ type: str
+ objects:
+ description:
+ - List of any valid API objects, such as a I(DeploymentConfig), I(Service), etc. The object
+ will be created exactly as defined here, with any parameter values substituted in prior to creation.
+ The definition of these objects can reference parameters defined earlier.
+ - As part of the list user can pass also I(VirtualMachine) kind. When passing I(VirtualMachine)
+ user must use Ansible structure of the parameters not the Kubernetes API structure. For more information
+ please take a look at M(community.general.kubevirt_vm) module and at EXAMPLES section, where you can see example.
+ type: list
+ merge_type:
+ description:
+ - Whether to override the default patch merge approach with a specific type. By default, the strategic
+ merge will typically be used.
+ type: list
+ choices: [ json, merge, strategic-merge ]
+ display_name:
+ description:
+ - "A brief, user-friendly name, which can be employed by user interfaces."
+ type: str
+ description:
+ description:
+ - A description of the template.
+ - Include enough detail that the user will understand what is being deployed...
+ and any caveats they need to know before deploying. It should also provide links to additional information,
+ such as a README file."
+ type: str
+ long_description:
+ description:
+ - "Additional template description. This may be displayed by the service catalog, for example."
+ type: str
+ provider_display_name:
+ description:
+ - "The name of the person or organization providing the template."
+ type: str
+ documentation_url:
+ description:
+ - "A URL referencing further documentation for the template."
+ type: str
+ support_url:
+ description:
+ - "A URL where support can be obtained for the template."
+ type: str
+ editable:
+ description:
+ - "Extension for hinting at which elements should be considered editable.
+ List of jsonpath selectors. The jsonpath root is the objects: element of the template."
+ - This is parameter can be used only when kubevirt addon is installed on your openshift cluster.
+ type: list
+ default_disk:
+ description:
+ - "The goal of default disk is to define what kind of disk is supported by the OS mainly in
+ terms of bus (ide, scsi, sata, virtio, ...)"
+ - The C(default_disk) parameter define configuration overlay for disks that will be applied on top of disks
+ during virtual machine creation to define global compatibility and/or performance defaults defined here.
+ - This is parameter can be used only when kubevirt addon is installed on your openshift cluster.
+ type: dict
+ default_volume:
+ description:
+ - "The goal of default volume is to be able to configure mostly performance parameters like
+ caches if those are exposed by the underlying volume implementation."
+ - The C(default_volume) parameter define configuration overlay for volumes that will be applied on top of volumes
+ during virtual machine creation to define global compatibility and/or performance defaults defined here.
+ - This is parameter can be used only when kubevirt addon is installed on your openshift cluster.
+ type: dict
+ default_nic:
+ description:
+ - "The goal of default network is similar to I(default_disk) and should be used as a template
+ to ensure OS compatibility and performance."
+ - The C(default_nic) parameter define configuration overlay for nic that will be applied on top of nics
+ during virtual machine creation to define global compatibility and/or performance defaults defined here.
+ - This is parameter can be used only when kubevirt addon is installed on your openshift cluster.
+ type: dict
+ default_network:
+ description:
+ - "The goal of default network is similar to I(default_volume) and should be used as a template
+ that specifies performance and connection parameters (L2 bridge for example)"
+ - The C(default_network) parameter define configuration overlay for networks that will be applied on top of networks
+ during virtual machine creation to define global compatibility and/or performance defaults defined here.
+ - This is parameter can be used only when kubevirt addon is installed on your openshift cluster.
+ type: dict
+ icon_class:
+ description:
+ - "An icon to be displayed with your template in the web console. Choose from our existing logo
+ icons when possible. You can also use icons from FontAwesome. Alternatively, provide icons through
+ CSS customizations that can be added to an OpenShift Container Platform cluster that uses your template.
+ You must specify an icon class that exists, or it will prevent falling back to the generic icon."
+ type: str
+ parameters:
+ description:
+ - "Parameters allow a value to be supplied by the user or generated when the template is instantiated.
+ Then, that value is substituted wherever the parameter is referenced. References can be defined in any
+ field in the objects list field. This is useful for generating random passwords or allowing the user to
+ supply a host name or other user-specific value that is required to customize the template."
+ - "More information can be found at: U(https://docs.openshift.com/container-platform/3.6/dev_guide/templates.html#writing-parameters)"
+ type: list
+ version:
+ description:
+ - Template structure version.
+ - This is parameter can be used only when kubevirt addon is installed on your openshift cluster.
+ type: str
+
+extends_documentation_fragment:
+- community.kubernetes.k8s_auth_options
+- community.kubernetes.k8s_state_options
+
+
+requirements:
+ - python >= 2.7
+ - openshift >= 0.8.2
+'''
+
+EXAMPLES = '''
+- name: Create template 'mytemplate'
+ community.general.kubevirt_template:
+ state: present
+ name: myvmtemplate
+ namespace: templates
+ display_name: Generic cirros template
+ description: Basic cirros template
+ long_description: Verbose description of cirros template
+ provider_display_name: Just Be Cool, Inc.
+ documentation_url: http://theverycoolcompany.com
+ support_url: http://support.theverycoolcompany.com
+ icon_class: icon-linux
+ default_disk:
+ disk:
+ bus: virtio
+ default_nic:
+ model: virtio
+ default_network:
+ resource:
+ resourceName: bridge.network.kubevirt.io/cnvmgmt
+ default_volume:
+ containerDisk:
+ image: kubevirt/cirros-container-disk-demo:latest
+ objects:
+ - name: ${NAME}
+ kind: VirtualMachine
+ memory: ${MEMORY_SIZE}
+ state: present
+ namespace: vms
+ parameters:
+ - name: NAME
+ description: VM name
+ generate: expression
+ from: 'vm-[A-Za-z0-9]{8}'
+ - name: MEMORY_SIZE
+ description: Memory size
+ value: 1Gi
+
+- name: Remove template 'myvmtemplate'
+ community.general.kubevirt_template:
+ state: absent
+ name: myvmtemplate
+ namespace: templates
+'''
+
+RETURN = '''
+kubevirt_template:
+ description:
+ - The template dictionary specification returned by the API.
+ returned: success
+ type: complex
+ contains: {}
+'''
+
+
+import copy
+import traceback
+
+from ansible_collections.community.kubernetes.plugins.module_utils.common import AUTH_ARG_SPEC
+
+from ansible_collections.community.general.plugins.module_utils.kubevirt import (
+ virtdict,
+ KubeVirtRawModule,
+ API_GROUP,
+ MAX_SUPPORTED_API_VERSION
+)
+
+
+TEMPLATE_ARG_SPEC = {
+ 'name': {'required': True},
+ 'namespace': {'required': True},
+ 'state': {
+ 'default': 'present',
+ 'choices': ['present', 'absent'],
+ },
+ 'force': {
+ 'type': 'bool',
+ 'default': False,
+ },
+ 'merge_type': {
+ 'type': 'list',
+ 'choices': ['json', 'merge', 'strategic-merge']
+ },
+ 'objects': {
+ 'type': 'list',
+ },
+ 'display_name': {
+ 'type': 'str',
+ },
+ 'description': {
+ 'type': 'str',
+ },
+ 'long_description': {
+ 'type': 'str',
+ },
+ 'provider_display_name': {
+ 'type': 'str',
+ },
+ 'documentation_url': {
+ 'type': 'str',
+ },
+ 'support_url': {
+ 'type': 'str',
+ },
+ 'icon_class': {
+ 'type': 'str',
+ },
+ 'version': {
+ 'type': 'str',
+ },
+ 'editable': {
+ 'type': 'list',
+ },
+ 'default_disk': {
+ 'type': 'dict',
+ },
+ 'default_volume': {
+ 'type': 'dict',
+ },
+ 'default_network': {
+ 'type': 'dict',
+ },
+ 'default_nic': {
+ 'type': 'dict',
+ },
+ 'parameters': {
+ 'type': 'list',
+ },
+}
+
+
+class KubeVirtVMTemplate(KubeVirtRawModule):
+
+ @property
+ def argspec(self):
+ """ argspec property builder """
+ argument_spec = copy.deepcopy(AUTH_ARG_SPEC)
+ argument_spec.update(TEMPLATE_ARG_SPEC)
+ return argument_spec
+
+ def execute_module(self):
+ # Parse parameters specific for this module:
+ definition = virtdict()
+
+ # Execute the CRUD of VM template:
+ kind = 'Template'
+ template_api_version = 'template.openshift.io/v1'
+
+ # Fill in template parameters:
+ definition['parameters'] = self.params.get('parameters')
+
+ # Fill in the default Label
+ labels = definition['metadata']['labels']
+ labels['template.cnv.io/type'] = 'vm'
+
+ # Fill in Openshift/Kubevirt template annotations:
+ annotations = definition['metadata']['annotations']
+ if self.params.get('display_name'):
+ annotations['openshift.io/display-name'] = self.params.get('display_name')
+ if self.params.get('description'):
+ annotations['description'] = self.params.get('description')
+ if self.params.get('long_description'):
+ annotations['openshift.io/long-description'] = self.params.get('long_description')
+ if self.params.get('provider_display_name'):
+ annotations['openshift.io/provider-display-name'] = self.params.get('provider_display_name')
+ if self.params.get('documentation_url'):
+ annotations['openshift.io/documentation-url'] = self.params.get('documentation_url')
+ if self.params.get('support_url'):
+ annotations['openshift.io/support-url'] = self.params.get('support_url')
+ if self.params.get('icon_class'):
+ annotations['iconClass'] = self.params.get('icon_class')
+ if self.params.get('version'):
+ annotations['template.cnv.io/version'] = self.params.get('version')
+
+ # TODO: Make it more Ansiblish, so user don't have to specify API JSON path, but rather Ansible params:
+ if self.params.get('editable'):
+ annotations['template.cnv.io/editable'] = self.params.get('editable')
+
+ # Set defaults annotations:
+ if self.params.get('default_disk'):
+ annotations['defaults.template.cnv.io/disk'] = self.params.get('default_disk').get('name')
+ if self.params.get('default_volume'):
+ annotations['defaults.template.cnv.io/volume'] = self.params.get('default_volume').get('name')
+ if self.params.get('default_nic'):
+ annotations['defaults.template.cnv.io/nic'] = self.params.get('default_nic').get('name')
+ if self.params.get('default_network'):
+ annotations['defaults.template.cnv.io/network'] = self.params.get('default_network').get('name')
+
+ # Process objects:
+ self.client = self.get_api_client()
+ definition['objects'] = []
+ objects = self.params.get('objects') or []
+ for obj in objects:
+ if obj['kind'] != 'VirtualMachine':
+ definition['objects'].append(obj)
+ else:
+ vm_definition = virtdict()
+
+ # Set VM defaults:
+ if self.params.get('default_disk'):
+ vm_definition['spec']['template']['spec']['domain']['devices']['disks'] = [self.params.get('default_disk')]
+ if self.params.get('default_volume'):
+ vm_definition['spec']['template']['spec']['volumes'] = [self.params.get('default_volume')]
+ if self.params.get('default_nic'):
+ vm_definition['spec']['template']['spec']['domain']['devices']['interfaces'] = [self.params.get('default_nic')]
+ if self.params.get('default_network'):
+ vm_definition['spec']['template']['spec']['networks'] = [self.params.get('default_network')]
+
+ # Set kubevirt API version:
+ vm_definition['apiVersion'] = '%s/%s' % (API_GROUP, MAX_SUPPORTED_API_VERSION)
+
+ # Construct k8s vm API object:
+ vm_template = vm_definition['spec']['template']
+ dummy, vm_def = self.construct_vm_template_definition('VirtualMachine', vm_definition, vm_template, obj)
+
+ definition['objects'].append(vm_def)
+
+ # Create template:
+ resource = self.client.resources.get(api_version=template_api_version, kind=kind, name='templates')
+ definition = self.set_defaults(resource, definition)
+ result = self.perform_action(resource, definition)
+
+ # Return from the module:
+ self.exit_json(**{
+ 'changed': result['changed'],
+ 'kubevirt_template': result.pop('result'),
+ 'result': result,
+ })
+
+
+def main():
+ module = KubeVirtVMTemplate()
+ try:
+ module.execute_module()
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_vm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_vm.py
new file mode 100644
index 00000000..4466bee2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/kubevirt_vm.py
@@ -0,0 +1,469 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: kubevirt_vm
+
+short_description: Manage KubeVirt virtual machine
+
+description:
+ - Use Openshift Python SDK to manage the state of KubeVirt virtual machines.
+
+
+author: KubeVirt Team (@kubevirt)
+
+options:
+ state:
+ description:
+ - Set the virtual machine to either I(present), I(absent), I(running) or I(stopped).
+ - "I(present) - Create or update a virtual machine. (And run it if it's ephemeral.)"
+ - "I(absent) - Remove a virtual machine."
+ - "I(running) - Create or update a virtual machine and run it."
+ - "I(stopped) - Stop a virtual machine. (This deletes ephemeral VMs.)"
+ default: "present"
+ choices:
+ - present
+ - absent
+ - running
+ - stopped
+ type: str
+ name:
+ description:
+ - Name of the virtual machine.
+ required: true
+ type: str
+ namespace:
+ description:
+ - Namespace where the virtual machine exists.
+ required: true
+ type: str
+ ephemeral:
+ description:
+ - If (true) ephemeral virtual machine will be created. When destroyed it won't be accessible again.
+ - Works only with C(state) I(present) and I(absent).
+ type: bool
+ default: false
+ datavolumes:
+ description:
+ - "DataVolumes are a way to automate importing virtual machine disks onto pvcs during the virtual machine's
+ launch flow. Without using a DataVolume, users have to prepare a pvc with a disk image before assigning
+ it to a VM or VMI manifest. With a DataVolume, both the pvc creation and import is automated on behalf of the user."
+ type: list
+ template:
+ description:
+ - "Name of Template to be used in creation of a virtual machine."
+ type: str
+ template_parameters:
+ description:
+ - "New values of parameters from Template."
+ type: dict
+
+extends_documentation_fragment:
+- community.kubernetes.k8s_auth_options
+- community.general.kubevirt_vm_options
+- community.general.kubevirt_common_options
+
+
+requirements:
+ - python >= 2.7
+ - openshift >= 0.8.2
+'''
+
+EXAMPLES = '''
+- name: Start virtual machine 'myvm'
+ community.general.kubevirt_vm:
+ state: running
+ name: myvm
+ namespace: vms
+
+- name: Create virtual machine 'myvm' and start it
+ community.general.kubevirt_vm:
+ state: running
+ name: myvm
+ namespace: vms
+ memory: 64Mi
+ cpu_cores: 1
+ bootloader: efi
+ smbios_uuid: 5d307ca9-b3ef-428c-8861-06e72d69f223
+ cpu_model: Conroe
+ headless: true
+ hugepage_size: 2Mi
+ tablets:
+ - bus: virtio
+ name: tablet1
+ cpu_limit: 3
+ cpu_shares: 2
+ disks:
+ - name: containerdisk
+ volume:
+ containerDisk:
+ image: kubevirt/cirros-container-disk-demo:latest
+ path: /custom-disk/cirros.img
+ disk:
+ bus: virtio
+
+- name: Create virtual machine 'myvm' with multus network interface
+ community.general.kubevirt_vm:
+ name: myvm
+ namespace: vms
+ memory: 512M
+ interfaces:
+ - name: default
+ bridge: {}
+ network:
+ pod: {}
+ - name: mynet
+ bridge: {}
+ network:
+ multus:
+ networkName: mynetconf
+
+- name: Combine inline definition with Ansible parameters
+ community.general.kubevirt_vm:
+ # Kubernetes specification:
+ definition:
+ metadata:
+ labels:
+ app: galaxy
+ service: web
+ origin: vmware
+
+ # Ansible parameters:
+ state: running
+ name: myvm
+ namespace: vms
+ memory: 64M
+ disks:
+ - name: containerdisk
+ volume:
+ containerDisk:
+ image: kubevirt/cirros-container-disk-demo:latest
+ path: /custom-disk/cirros.img
+ disk:
+ bus: virtio
+
+- name: Start ephemeral virtual machine 'myvm' and wait to be running
+ community.general.kubevirt_vm:
+ ephemeral: true
+ state: running
+ wait: true
+ wait_timeout: 180
+ name: myvm
+ namespace: vms
+ memory: 64M
+ labels:
+ kubevirt.io/vm: myvm
+ disks:
+ - name: containerdisk
+ volume:
+ containerDisk:
+ image: kubevirt/cirros-container-disk-demo:latest
+ path: /custom-disk/cirros.img
+ disk:
+ bus: virtio
+
+- name: Start fedora vm with cloud init
+ community.general.kubevirt_vm:
+ state: running
+ wait: true
+ name: myvm
+ namespace: vms
+ memory: 1024M
+ cloud_init_nocloud:
+ userData: |-
+ #cloud-config
+ password: fedora
+ chpasswd: { expire: False }
+ disks:
+ - name: containerdisk
+ volume:
+ containerDisk:
+ image: kubevirt/fedora-cloud-container-disk-demo:latest
+ path: /disk/fedora.qcow2
+ disk:
+ bus: virtio
+ node_affinity:
+ soft:
+ - weight: 1
+ term:
+ match_expressions:
+ - key: security
+ operator: In
+ values:
+ - S2
+
+- name: Create virtual machine with datavolume and specify node affinity
+ community.general.kubevirt_vm:
+ name: myvm
+ namespace: default
+ memory: 1024Mi
+ datavolumes:
+ - name: mydv
+ source:
+ http:
+ url: https://url/disk.qcow2
+ pvc:
+ accessModes:
+ - ReadWriteOnce
+ storage: 5Gi
+ node_affinity:
+ hard:
+ - term:
+ match_expressions:
+ - key: security
+ operator: In
+ values:
+ - S1
+
+- name: Remove virtual machine 'myvm'
+ community.general.kubevirt_vm:
+ state: absent
+ name: myvm
+ namespace: vms
+'''
+
+RETURN = '''
+kubevirt_vm:
+ description:
+ - The virtual machine dictionary specification returned by the API.
+ - "This dictionary contains all values returned by the KubeVirt API all options
+ are described here U(https://kubevirt.io/api-reference/master/definitions.html#_v1_virtualmachine)"
+ returned: success
+ type: complex
+ contains: {}
+'''
+
+
+import copy
+import traceback
+
+from ansible_collections.community.kubernetes.plugins.module_utils.common import AUTH_ARG_SPEC
+from ansible_collections.community.general.plugins.module_utils.kubevirt import (
+ virtdict,
+ KubeVirtRawModule,
+ VM_COMMON_ARG_SPEC,
+ VM_SPEC_DEF_ARG_SPEC
+)
+
+VM_ARG_SPEC = {
+ 'ephemeral': {'type': 'bool', 'default': False},
+ 'state': {
+ 'type': 'str',
+ 'choices': [
+ 'present', 'absent', 'running', 'stopped'
+ ],
+ 'default': 'present'
+ },
+ 'datavolumes': {'type': 'list'},
+ 'template': {'type': 'str'},
+ 'template_parameters': {'type': 'dict'},
+}
+
+# Which params (can) modify 'spec:' contents of a VM:
+VM_SPEC_PARAMS = list(VM_SPEC_DEF_ARG_SPEC.keys()) + ['datavolumes', 'template', 'template_parameters']
+
+
+class KubeVirtVM(KubeVirtRawModule):
+
+ @property
+ def argspec(self):
+ """ argspec property builder """
+ argument_spec = copy.deepcopy(AUTH_ARG_SPEC)
+ argument_spec.update(VM_COMMON_ARG_SPEC)
+ argument_spec.update(VM_ARG_SPEC)
+ return argument_spec
+
+ @staticmethod
+ def fix_serialization(obj):
+ if obj and hasattr(obj, 'to_dict'):
+ return obj.to_dict()
+ return obj
+
+ def _wait_for_vmi_running(self):
+ for event in self._kind_resource.watch(namespace=self.namespace, timeout=self.params.get('wait_timeout')):
+ entity = event['object']
+ if entity.metadata.name != self.name:
+ continue
+ status = entity.get('status', {})
+ phase = status.get('phase', None)
+ if phase == 'Running':
+ return entity
+
+ self.fail("Timeout occurred while waiting for virtual machine to start. Maybe try a higher wait_timeout value?")
+
+ def _wait_for_vm_state(self, new_state):
+ if new_state == 'running':
+ want_created = want_ready = True
+ else:
+ want_created = want_ready = False
+
+ for event in self._kind_resource.watch(namespace=self.namespace, timeout=self.params.get('wait_timeout')):
+ entity = event['object']
+ if entity.metadata.name != self.name:
+ continue
+ status = entity.get('status', {})
+ created = status.get('created', False)
+ ready = status.get('ready', False)
+ if (created, ready) == (want_created, want_ready):
+ return entity
+
+ self.fail("Timeout occurred while waiting for virtual machine to achieve '{0}' state. "
+ "Maybe try a higher wait_timeout value?".format(new_state))
+
+ def manage_vm_state(self, new_state, already_changed):
+ new_running = True if new_state == 'running' else False
+ changed = False
+ k8s_obj = {}
+
+ if not already_changed:
+ k8s_obj = self.get_resource(self._kind_resource)
+ if not k8s_obj:
+ self.fail("VirtualMachine object disappeared during module operation, aborting.")
+ if k8s_obj.spec.get('running', False) == new_running:
+ return False, k8s_obj
+
+ newdef = dict(metadata=dict(name=self.name, namespace=self.namespace), spec=dict(running=new_running))
+ k8s_obj, err = self.patch_resource(self._kind_resource, newdef, k8s_obj,
+ self.name, self.namespace, merge_type='merge')
+ if err:
+ self.fail_json(**err)
+ else:
+ changed = True
+
+ if self.params.get('wait'):
+ k8s_obj = self._wait_for_vm_state(new_state)
+
+ return changed, k8s_obj
+
+ def _process_template_defaults(self, proccess_template, processedtemplate, defaults):
+ def set_template_default(default_name, default_name_index, definition_spec):
+ default_value = proccess_template['metadata']['annotations'][default_name]
+ if default_value:
+ values = definition_spec[default_name_index]
+ default_values = [d for d in values if d.get('name') == default_value]
+ defaults[default_name_index] = default_values
+ if definition_spec[default_name_index] is None:
+ definition_spec[default_name_index] = []
+ definition_spec[default_name_index].extend([d for d in values if d.get('name') != default_value])
+
+ devices = processedtemplate['spec']['template']['spec']['domain']['devices']
+ spec = processedtemplate['spec']['template']['spec']
+
+ set_template_default('defaults.template.cnv.io/disk', 'disks', devices)
+ set_template_default('defaults.template.cnv.io/volume', 'volumes', spec)
+ set_template_default('defaults.template.cnv.io/nic', 'interfaces', devices)
+ set_template_default('defaults.template.cnv.io/network', 'networks', spec)
+
+ def construct_definition(self, kind, our_state, ephemeral):
+ definition = virtdict()
+ processedtemplate = {}
+
+ # Construct the API object definition:
+ defaults = {'disks': [], 'volumes': [], 'interfaces': [], 'networks': []}
+ vm_template = self.params.get('template')
+ if vm_template:
+ # Find the template the VM should be created from:
+ template_resource = self.client.resources.get(api_version='template.openshift.io/v1', kind='Template', name='templates')
+ proccess_template = template_resource.get(name=vm_template, namespace=self.params.get('namespace'))
+
+ # Set proper template values taken from module option 'template_parameters':
+ for k, v in self.params.get('template_parameters', {}).items():
+ for parameter in proccess_template.parameters:
+ if parameter.name == k:
+ parameter.value = v
+
+ # Proccess the template:
+ processedtemplates_res = self.client.resources.get(api_version='template.openshift.io/v1', kind='Template', name='processedtemplates')
+ processedtemplate = processedtemplates_res.create(proccess_template.to_dict()).to_dict()['objects'][0]
+
+ # Process defaults of the template:
+ self._process_template_defaults(proccess_template, processedtemplate, defaults)
+
+ if not ephemeral:
+ definition['spec']['running'] = our_state == 'running'
+ template = definition if ephemeral else definition['spec']['template']
+ template['metadata']['labels']['vm.cnv.io/name'] = self.params.get('name')
+ dummy, definition = self.construct_vm_definition(kind, definition, template, defaults)
+
+ return self.merge_dicts(definition, processedtemplate)
+
+ def execute_module(self):
+ # Parse parameters specific to this module:
+ ephemeral = self.params.get('ephemeral')
+ k8s_state = our_state = self.params.get('state')
+ kind = 'VirtualMachineInstance' if ephemeral else 'VirtualMachine'
+ _used_params = [name for name in self.params if self.params[name] is not None]
+ # Is 'spec:' getting changed?
+ vm_spec_change = True if set(VM_SPEC_PARAMS).intersection(_used_params) else False
+ changed = False
+ crud_executed = False
+ method = ''
+
+ # Underlying module_utils/k8s/* code knows only of state == present/absent; let's make sure not to confuse it
+ if ephemeral:
+ # Ephemerals don't actually support running/stopped; we treat those as aliases for present/absent instead
+ if our_state == 'running':
+ self.params['state'] = k8s_state = 'present'
+ elif our_state == 'stopped':
+ self.params['state'] = k8s_state = 'absent'
+ else:
+ if our_state != 'absent':
+ self.params['state'] = k8s_state = 'present'
+
+ # Start with fetching the current object to make sure it exists
+ # If it does, but we end up not performing any operations on it, at least we'll be able to return
+ # its current contents as part of the final json
+ self.client = self.get_api_client()
+ self._kind_resource = self.find_supported_resource(kind)
+ k8s_obj = self.get_resource(self._kind_resource)
+ if not self.check_mode and not vm_spec_change and k8s_state != 'absent' and not k8s_obj:
+ self.fail("It's impossible to create an empty VM or change state of a non-existent VM.")
+
+ # If there are (potential) changes to `spec:` or we want to delete the object, that warrants a full CRUD
+ # Also check_mode always warrants a CRUD, as that'll produce a sane result
+ if vm_spec_change or k8s_state == 'absent' or self.check_mode:
+ definition = self.construct_definition(kind, our_state, ephemeral)
+ result = self.execute_crud(kind, definition)
+ changed = result['changed']
+ k8s_obj = result['result']
+ method = result['method']
+ crud_executed = True
+
+ if ephemeral and self.params.get('wait') and k8s_state == 'present' and not self.check_mode:
+ # Waiting for k8s_state==absent is handled inside execute_crud()
+ k8s_obj = self._wait_for_vmi_running()
+
+ if not ephemeral and our_state in ['running', 'stopped'] and not self.check_mode:
+ # State==present/absent doesn't involve any additional VMI state management and is fully
+ # handled inside execute_crud() (including wait logic)
+ patched, k8s_obj = self.manage_vm_state(our_state, crud_executed)
+ changed = changed or patched
+ if changed:
+ method = method or 'patch'
+
+ # Return from the module:
+ self.exit_json(**{
+ 'changed': changed,
+ 'kubevirt_vm': self.fix_serialization(k8s_obj),
+ 'method': method
+ })
+
+
+def main():
+ module = KubeVirtVM()
+ try:
+ module.execute_module()
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/launchd.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/launchd.py
new file mode 100644
index 00000000..919d8d7b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/launchd.py
@@ -0,0 +1,514 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Martin Migasiewicz <migasiew.nk@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: launchd
+author:
+- Martin Migasiewicz (@martinm82)
+short_description: Manage macOS services
+version_added: 1.0.0
+description:
+- Manage launchd services on target macOS hosts.
+options:
+ name:
+ description:
+ - Name of the service.
+ type: str
+ required: true
+ state:
+ description:
+ - C(started)/C(stopped) are idempotent actions that will not run
+ commands unless necessary.
+ - Launchd does not support C(restarted) nor C(reloaded) natively.
+ These will trigger a stop/start (restarted) or an unload/load
+ (reloaded).
+ - C(restarted) unloads and loads the service before start to ensure
+ that the latest job definition (plist) is used.
+ - C(reloaded) unloads and loads the service to ensure that the latest
+ job definition (plist) is used. Whether a service is started or
+ stopped depends on the content of the definition file.
+ type: str
+ choices: [ reloaded, restarted, started, stopped, unloaded ]
+ enabled:
+ description:
+ - Whether the service should start on boot.
+ - B(At least one of state and enabled are required.)
+ type: bool
+ force_stop:
+ description:
+ - Whether the service should not be restarted automatically by launchd.
+ - Services might have the 'KeepAlive' attribute set to true in a launchd configuration.
+ In case this is set to true, stopping a service will cause that launchd starts the service again.
+ - Set this option to C(yes) to let this module change the 'KeepAlive' attribute to false.
+ type: bool
+ default: no
+notes:
+- A user must privileged to manage services using this module.
+requirements:
+- A system managed by launchd
+- The plistlib python library
+'''
+
+EXAMPLES = r'''
+- name: Make sure spotify webhelper is started
+ community.general.launchd:
+ name: com.spotify.webhelper
+ state: started
+
+- name: Deploy custom memcached job definition
+ template:
+ src: org.memcached.plist.j2
+ dest: /Library/LaunchDaemons/org.memcached.plist
+
+- name: Run memcached
+ community.general.launchd:
+ name: org.memcached
+ state: started
+
+- name: Stop memcached
+ community.general.launchd:
+ name: org.memcached
+ state: stopped
+
+- name: Stop memcached
+ community.general.launchd:
+ name: org.memcached
+ state: stopped
+ force_stop: yes
+
+- name: Restart memcached
+ community.general.launchd:
+ name: org.memcached
+ state: restarted
+
+- name: Unload memcached
+ community.general.launchd:
+ name: org.memcached
+ state: unloaded
+'''
+
+RETURN = r'''
+status:
+ description: Metadata about service status
+ returned: always
+ type: dict
+ sample:
+ {
+ "current_pid": "-",
+ "current_state": "stopped",
+ "previous_pid": "82636",
+ "previous_state": "running"
+ }
+'''
+
+import os
+import plistlib
+from abc import ABCMeta, abstractmethod
+from time import sleep
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+class ServiceState:
+ UNKNOWN = 0
+ LOADED = 1
+ STOPPED = 2
+ STARTED = 3
+ UNLOADED = 4
+
+ @staticmethod
+ def to_string(state):
+ strings = {
+ ServiceState.UNKNOWN: 'unknown',
+ ServiceState.LOADED: 'loaded',
+ ServiceState.STOPPED: 'stopped',
+ ServiceState.STARTED: 'started',
+ ServiceState.UNLOADED: 'unloaded'
+ }
+ return strings[state]
+
+
+class Plist:
+ def __init__(self, module, service):
+ self.__changed = False
+ self.__service = service
+
+ state, pid, dummy, dummy = LaunchCtlList(module, service).run()
+
+ # Check if readPlist is available or not
+ self.old_plistlib = hasattr(plistlib, 'readPlist')
+
+ self.__file = self.__find_service_plist(service)
+ if self.__file is None:
+ msg = 'Unable to infer the path of %s service plist file' % service
+ if pid is None and state == ServiceState.UNLOADED:
+ msg += ' and it was not found among active services'
+ module.fail_json(msg=msg)
+ self.__update(module)
+
+ @staticmethod
+ def __find_service_plist(service_name):
+ """Finds the plist file associated with a service"""
+
+ launchd_paths = [
+ os.path.expanduser('~/Library/LaunchAgents'),
+ '/Library/LaunchAgents',
+ '/Library/LaunchDaemons',
+ '/System/Library/LaunchAgents',
+ '/System/Library/LaunchDaemons'
+ ]
+
+ for path in launchd_paths:
+ try:
+ files = os.listdir(path)
+ except OSError:
+ continue
+
+ filename = '%s.plist' % service_name
+ if filename in files:
+ return os.path.join(path, filename)
+ return None
+
+ def __update(self, module):
+ self.__handle_param_enabled(module)
+ self.__handle_param_force_stop(module)
+
+ def __read_plist_file(self, module):
+ service_plist = {}
+ if self.old_plistlib:
+ return plistlib.readPlist(self.__file)
+
+ # readPlist is deprecated in Python 3 and onwards
+ try:
+ with open(self.__file, 'rb') as plist_fp:
+ service_plist = plistlib.load(plist_fp)
+ except Exception as e:
+ module.fail_json(msg="Failed to read plist file "
+ "%s due to %s" % (self.__file, to_native(e)))
+ return service_plist
+
+ def __write_plist_file(self, module, service_plist=None):
+ if not service_plist:
+ service_plist = {}
+
+ if self.old_plistlib:
+ plistlib.writePlist(service_plist, self.__file)
+ return
+ # writePlist is deprecated in Python 3 and onwards
+ try:
+ with open(self.__file, 'wb') as plist_fp:
+ plistlib.dump(service_plist, plist_fp)
+ except Exception as e:
+ module.fail_json(msg="Failed to write to plist file "
+ " %s due to %s" % (self.__file, to_native(e)))
+
+ def __handle_param_enabled(self, module):
+ if module.params['enabled'] is not None:
+ service_plist = self.__read_plist_file(module)
+
+ # Enable/disable service startup at boot if requested
+ # Launchctl does not expose functionality to set the RunAtLoad
+ # attribute of a job definition. So we parse and modify the job
+ # definition plist file directly for this purpose.
+ if module.params['enabled'] is not None:
+ enabled = service_plist.get('RunAtLoad', False)
+ if module.params['enabled'] != enabled:
+ service_plist['RunAtLoad'] = module.params['enabled']
+
+ # Update the plist with one of the changes done.
+ if not module.check_mode:
+ self.__write_plist_file(module, service_plist)
+ self.__changed = True
+
+ def __handle_param_force_stop(self, module):
+ if module.params['force_stop'] is not None:
+ service_plist = self.__read_plist_file(module)
+
+ # Set KeepAlive to false in case force_stop is defined to avoid
+ # that the service gets restarted when stopping was requested.
+ if module.params['force_stop'] is not None:
+ keep_alive = service_plist.get('KeepAlive', False)
+ if module.params['force_stop'] and keep_alive:
+ service_plist['KeepAlive'] = not module.params['force_stop']
+
+ # Update the plist with one of the changes done.
+ if not module.check_mode:
+ self.__write_plist_file(module, service_plist)
+ self.__changed = True
+
+ def is_changed(self):
+ return self.__changed
+
+ def get_file(self):
+ return self.__file
+
+
+class LaunchCtlTask(object):
+ __metaclass__ = ABCMeta
+ WAITING_TIME = 5 # seconds
+
+ def __init__(self, module, service, plist):
+ self._module = module
+ self._service = service
+ self._plist = plist
+ self._launch = self._module.get_bin_path('launchctl', True)
+
+ def run(self):
+ """Runs a launchd command like 'load', 'unload', 'start', 'stop', etc.
+ and returns the new state and pid.
+ """
+ self.runCommand()
+ return self.get_state()
+
+ @abstractmethod
+ def runCommand(self):
+ pass
+
+ def get_state(self):
+ rc, out, err = self._launchctl("list")
+ if rc != 0:
+ self._module.fail_json(
+ msg='Failed to get status of %s' % (self._launch))
+
+ state = ServiceState.UNLOADED
+ service_pid = "-"
+ status_code = None
+ for line in out.splitlines():
+ if line.strip():
+ pid, last_exit_code, label = line.split('\t')
+ if label.strip() == self._service:
+ service_pid = pid
+ status_code = last_exit_code
+
+ # From launchctl man page:
+ # If the number [...] is negative, it represents the
+ # negative of the signal which killed the job. Thus,
+ # "-15" would indicate that the job was terminated with
+ # SIGTERM.
+ if last_exit_code not in ['0', '-2', '-3', '-9', '-15']:
+ # Something strange happened and we have no clue in
+ # which state the service is now. Therefore we mark
+ # the service state as UNKNOWN.
+ state = ServiceState.UNKNOWN
+ elif pid != '-':
+ # PID seems to be an integer so we assume the service
+ # is started.
+ state = ServiceState.STARTED
+ else:
+ # Exit code is 0 and PID is not available so we assume
+ # the service is stopped.
+ state = ServiceState.STOPPED
+ break
+ return (state, service_pid, status_code, err)
+
+ def start(self):
+ rc, out, err = self._launchctl("start")
+ # Unfortunately launchd does not wait until the process really started.
+ sleep(self.WAITING_TIME)
+ return (rc, out, err)
+
+ def stop(self):
+ rc, out, err = self._launchctl("stop")
+ # Unfortunately launchd does not wait until the process really stopped.
+ sleep(self.WAITING_TIME)
+ return (rc, out, err)
+
+ def restart(self):
+ # TODO: check for rc, out, err
+ self.stop()
+ return self.start()
+
+ def reload(self):
+ # TODO: check for rc, out, err
+ self.unload()
+ return self.load()
+
+ def load(self):
+ return self._launchctl("load")
+
+ def unload(self):
+ return self._launchctl("unload")
+
+ def _launchctl(self, command):
+ service_or_plist = self._plist.get_file() if command in [
+ 'load', 'unload'] else self._service if command in ['start', 'stop'] else ""
+
+ rc, out, err = self._module.run_command(
+ '%s %s %s' % (self._launch, command, service_or_plist))
+
+ if rc != 0:
+ msg = "Unable to %s '%s' (%s): '%s'" % (
+ command, self._service, self._plist.get_file(), err)
+ self._module.fail_json(msg=msg)
+
+ return (rc, out, err)
+
+
+class LaunchCtlStart(LaunchCtlTask):
+ def __init__(self, module, service, plist):
+ super(LaunchCtlStart, self).__init__(module, service, plist)
+
+ def runCommand(self):
+ state, dummy, dummy, dummy = self.get_state()
+
+ if state in (ServiceState.STOPPED, ServiceState.LOADED):
+ self.reload()
+ self.start()
+ elif state == ServiceState.STARTED:
+ # In case the service is already in started state but the
+ # job definition was changed we need to unload/load the
+ # service and start the service again.
+ if self._plist.is_changed():
+ self.reload()
+ self.start()
+ elif state == ServiceState.UNLOADED:
+ self.load()
+ self.start()
+ elif state == ServiceState.UNKNOWN:
+ # We are in an unknown state, let's try to reload the config
+ # and start the service again.
+ self.reload()
+ self.start()
+
+
+class LaunchCtlStop(LaunchCtlTask):
+ def __init__(self, module, service, plist):
+ super(LaunchCtlStop, self).__init__(module, service, plist)
+
+ def runCommand(self):
+ state, dummy, dummy, dummy = self.get_state()
+
+ if state == ServiceState.STOPPED:
+ # In case the service is stopped and we might later decide
+ # to start it, we need to reload the job definition by
+ # forcing an unload and load first.
+ # Afterwards we need to stop it as it might have been
+ # started again (KeepAlive or RunAtLoad).
+ if self._plist.is_changed():
+ self.reload()
+ self.stop()
+ elif state in (ServiceState.STARTED, ServiceState.LOADED):
+ if self._plist.is_changed():
+ self.reload()
+ self.stop()
+ elif state == ServiceState.UNKNOWN:
+ # We are in an unknown state, let's try to reload the config
+ # and stop the service gracefully.
+ self.reload()
+ self.stop()
+
+
+class LaunchCtlReload(LaunchCtlTask):
+ def __init__(self, module, service, plist):
+ super(LaunchCtlReload, self).__init__(module, service, plist)
+
+ def runCommand(self):
+ state, dummy, dummy, dummy = self.get_state()
+
+ if state == ServiceState.UNLOADED:
+ # launchd throws an error if we do an unload on an already
+ # unloaded service.
+ self.load()
+ else:
+ self.reload()
+
+
+class LaunchCtlUnload(LaunchCtlTask):
+ def __init__(self, module, service, plist):
+ super(LaunchCtlUnload, self).__init__(module, service, plist)
+
+ def runCommand(self):
+ state, dummy, dummy, dummy = self.get_state()
+ self.unload()
+
+
+class LaunchCtlRestart(LaunchCtlReload):
+ def __init__(self, module, service, plist):
+ super(LaunchCtlRestart, self).__init__(module, service, plist)
+
+ def runCommand(self):
+ super(LaunchCtlRestart, self).runCommand()
+ self.start()
+
+
+class LaunchCtlList(LaunchCtlTask):
+ def __init__(self, module, service):
+ super(LaunchCtlList, self).__init__(module, service, None)
+
+ def runCommand(self):
+ # Do nothing, the list functionality is done by the
+ # base class run method.
+ pass
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', choices=['reloaded', 'restarted', 'started', 'stopped', 'unloaded']),
+ enabled=dict(type='bool'),
+ force_stop=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ required_one_of=[
+ ['state', 'enabled'],
+ ],
+ )
+
+ service = module.params['name']
+ action = module.params['state']
+ rc = 0
+ out = err = ''
+ result = {
+ 'name': service,
+ 'changed': False,
+ 'status': {},
+ }
+
+ # We will tailor the plist file in case one of the options
+ # (enabled, force_stop) was specified.
+ plist = Plist(module, service)
+ result['changed'] = plist.is_changed()
+
+ # Gather information about the service to be controlled.
+ state, pid, dummy, dummy = LaunchCtlList(module, service).run()
+ result['status']['previous_state'] = ServiceState.to_string(state)
+ result['status']['previous_pid'] = pid
+
+ # Map the actions to specific tasks
+ tasks = {
+ 'started': LaunchCtlStart(module, service, plist),
+ 'stopped': LaunchCtlStop(module, service, plist),
+ 'restarted': LaunchCtlRestart(module, service, plist),
+ 'reloaded': LaunchCtlReload(module, service, plist),
+ 'unloaded': LaunchCtlUnload(module, service, plist)
+ }
+
+ status_code = '0'
+ # Run the requested task
+ if not module.check_mode:
+ state, pid, status_code, err = tasks[action].run()
+
+ result['status']['current_state'] = ServiceState.to_string(state)
+ result['status']['current_pid'] = pid
+ result['status']['status_code'] = status_code
+ result['status']['error'] = err
+
+ if (result['status']['current_state'] != result['status']['previous_state'] or
+ result['status']['current_pid'] != result['status']['previous_pid']):
+ result['changed'] = True
+ if module.check_mode:
+ result['changed'] = True
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/layman.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/layman.py
new file mode 100644
index 00000000..3c990205
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/layman.py
@@ -0,0 +1,268 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Jakub Jirutka <jakub@jirutka.cz>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: layman
+author: "Jakub Jirutka (@jirutka)"
+short_description: Manage Gentoo overlays
+description:
+ - Uses Layman to manage an additional repositories for the Portage package manager on Gentoo Linux.
+ Please note that Layman must be installed on a managed node prior using this module.
+requirements:
+ - "python >= 2.6"
+ - layman python module
+options:
+ name:
+ description:
+ - The overlay id to install, synchronize, or uninstall.
+ Use 'ALL' to sync all of the installed overlays (can be used only when C(state=updated)).
+ required: true
+ type: str
+ list_url:
+ description:
+ - An URL of the alternative overlays list that defines the overlay to install.
+ This list will be fetched and saved under C(${overlay_defs})/${name}.xml), where
+ C(overlay_defs) is readed from the Layman's configuration.
+ aliases: [url]
+ type: str
+ state:
+ description:
+ - Whether to install (C(present)), sync (C(updated)), or uninstall (C(absent)) the overlay.
+ default: present
+ choices: [present, absent, updated]
+ type: str
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be
+ set to C(no) when no other option exists. Prior to 1.9.3 the code
+ defaulted to C(no).
+ type: bool
+ default: yes
+'''
+
+EXAMPLES = '''
+- name: Install the overlay mozilla which is on the central overlays list
+ community.general.layman:
+ name: mozilla
+
+- name: Install the overlay cvut from the specified alternative list
+ community.general.layman:
+ name: cvut
+ list_url: 'http://raw.github.com/cvut/gentoo-overlay/master/overlay.xml'
+
+- name: Update (sync) the overlay cvut or install if not installed yet
+ community.general.layman:
+ name: cvut
+ list_url: 'http://raw.github.com/cvut/gentoo-overlay/master/overlay.xml'
+ state: updated
+
+- name: Update (sync) all of the installed overlays
+ community.general.layman:
+ name: ALL
+ state: updated
+
+- name: Uninstall the overlay cvut
+ community.general.layman:
+ name: cvut
+ state: absent
+'''
+
+import shutil
+import traceback
+
+from os import path
+
+LAYMAN_IMP_ERR = None
+try:
+ from layman.api import LaymanAPI
+ from layman.config import BareConfig
+ HAS_LAYMAN_API = True
+except ImportError:
+ LAYMAN_IMP_ERR = traceback.format_exc()
+ HAS_LAYMAN_API = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.urls import fetch_url
+
+
+USERAGENT = 'ansible-httpget'
+
+
+class ModuleError(Exception):
+ pass
+
+
+def init_layman(config=None):
+ '''Returns the initialized ``LaymanAPI``.
+
+ :param config: the layman's configuration to use (optional)
+ '''
+ if config is None:
+ config = BareConfig(read_configfile=True, quietness=1)
+ return LaymanAPI(config)
+
+
+def download_url(module, url, dest):
+ '''
+ :param url: the URL to download
+ :param dest: the absolute path of where to save the downloaded content to;
+ it must be writable and not a directory
+
+ :raises ModuleError
+ '''
+
+ # Hack to add params in the form that fetch_url expects
+ module.params['http_agent'] = USERAGENT
+ response, info = fetch_url(module, url)
+ if info['status'] != 200:
+ raise ModuleError("Failed to get %s: %s" % (url, info['msg']))
+
+ try:
+ with open(dest, 'w') as f:
+ shutil.copyfileobj(response, f)
+ except IOError as e:
+ raise ModuleError("Failed to write: %s" % str(e))
+
+
+def install_overlay(module, name, list_url=None):
+ '''Installs the overlay repository. If not on the central overlays list,
+ then :list_url of an alternative list must be provided. The list will be
+ fetched and saved under ``%(overlay_defs)/%(name.xml)`` (location of the
+ ``overlay_defs`` is read from the Layman's configuration).
+
+ :param name: the overlay id
+ :param list_url: the URL of the remote repositories list to look for the overlay
+ definition (optional, default: None)
+
+ :returns: True if the overlay was installed, or False if already exists
+ (i.e. nothing has changed)
+ :raises ModuleError
+ '''
+ # read Layman configuration
+ layman_conf = BareConfig(read_configfile=True)
+ layman = init_layman(layman_conf)
+
+ if layman.is_installed(name):
+ return False
+
+ if module.check_mode:
+ mymsg = 'Would add layman repo \'' + name + '\''
+ module.exit_json(changed=True, msg=mymsg)
+
+ if not layman.is_repo(name):
+ if not list_url:
+ raise ModuleError("Overlay '%s' is not on the list of known "
+ "overlays and URL of the remote list was not provided." % name)
+
+ overlay_defs = layman_conf.get_option('overlay_defs')
+ dest = path.join(overlay_defs, name + '.xml')
+
+ download_url(module, list_url, dest)
+
+ # reload config
+ layman = init_layman()
+
+ if not layman.add_repos(name):
+ raise ModuleError(layman.get_errors())
+
+ return True
+
+
+def uninstall_overlay(module, name):
+ '''Uninstalls the given overlay repository from the system.
+
+ :param name: the overlay id to uninstall
+
+ :returns: True if the overlay was uninstalled, or False if doesn't exist
+ (i.e. nothing has changed)
+ :raises ModuleError
+ '''
+ layman = init_layman()
+
+ if not layman.is_installed(name):
+ return False
+
+ if module.check_mode:
+ mymsg = 'Would remove layman repo \'' + name + '\''
+ module.exit_json(changed=True, msg=mymsg)
+
+ layman.delete_repos(name)
+ if layman.get_errors():
+ raise ModuleError(layman.get_errors())
+
+ return True
+
+
+def sync_overlay(name):
+ '''Synchronizes the specified overlay repository.
+
+ :param name: the overlay repository id to sync
+ :raises ModuleError
+ '''
+ layman = init_layman()
+
+ if not layman.sync(name):
+ messages = [str(item[1]) for item in layman.sync_results[2]]
+ raise ModuleError(messages)
+
+
+def sync_overlays():
+ '''Synchronize all of the installed overlays.
+
+ :raises ModuleError
+ '''
+ layman = init_layman()
+
+ for name in layman.get_installed():
+ sync_overlay(name)
+
+
+def main():
+ # define module
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ list_url=dict(aliases=['url']),
+ state=dict(default="present", choices=['present', 'absent', 'updated']),
+ validate_certs=dict(required=False, default=True, type='bool'),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_LAYMAN_API:
+ module.fail_json(msg=missing_required_lib('Layman'), exception=LAYMAN_IMP_ERR)
+
+ state, name, url = (module.params[key] for key in ['state', 'name', 'list_url'])
+
+ changed = False
+ try:
+ if state == 'present':
+ changed = install_overlay(module, name, url)
+
+ elif state == 'updated':
+ if name == 'ALL':
+ sync_overlays()
+ elif install_overlay(module, name, url):
+ changed = True
+ else:
+ sync_overlay(name)
+ else:
+ changed = uninstall_overlay(module, name)
+
+ except ModuleError as e:
+ module.fail_json(msg=e.message)
+ else:
+ module.exit_json(changed=changed, name=name)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/lbu.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/lbu.py
new file mode 100644
index 00000000..6f850791
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/lbu.py
@@ -0,0 +1,127 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2019, Kaarle Ritvanen <kaarle.ritvanen@datakunkku.fi>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: lbu
+
+short_description: Local Backup Utility for Alpine Linux
+
+version_added: '0.2.0'
+
+description:
+- Manage Local Backup Utility of Alpine Linux in run-from-RAM mode
+
+options:
+ commit:
+ description:
+ - Control whether to commit changed files.
+ type: bool
+ exclude:
+ description:
+ - List of paths to exclude.
+ type: list
+ elements: str
+ include:
+ description:
+ - List of paths to include.
+ type: list
+ elements: str
+
+author:
+- Kaarle Ritvanen (@kunkku)
+'''
+
+EXAMPLES = '''
+# Commit changed files (if any)
+- name: Commit
+ community.general.lbu:
+ commit: true
+
+# Exclude path and commit
+- name: Exclude directory
+ community.general.lbu:
+ commit: true
+ exclude:
+ - /etc/opt
+
+# Include paths without committing
+- name: Include file and directory
+ community.general.lbu:
+ include:
+ - /root/.ssh/authorized_keys
+ - /var/lib/misc
+'''
+
+RETURN = '''
+msg:
+ description: Error message
+ type: str
+ returned: on failure
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+import os.path
+
+
+def run_module():
+ module = AnsibleModule(
+ argument_spec={
+ 'commit': {'type': 'bool'},
+ 'exclude': {'type': 'list', 'elements': 'str'},
+ 'include': {'type': 'list', 'elements': 'str'}
+ },
+ supports_check_mode=True
+ )
+
+ changed = False
+
+ def run_lbu(*args):
+ code, stdout, stderr = module.run_command(
+ [module.get_bin_path('lbu', required=True)] + list(args)
+ )
+ if code:
+ module.fail_json(changed=changed, msg=stderr)
+ return stdout
+
+ update = False
+ commit = False
+
+ for param in ('include', 'exclude'):
+ if module.params[param]:
+ paths = run_lbu(param, '-l').split('\n')
+ for path in module.params[param]:
+ if os.path.normpath('/' + path)[1:] not in paths:
+ update = True
+
+ if module.params['commit']:
+ commit = update or run_lbu('status') > ''
+
+ if module.check_mode:
+ module.exit_json(changed=update or commit)
+
+ if update:
+ for param in ('include', 'exclude'):
+ if module.params[param]:
+ run_lbu(param, *module.params[param])
+ changed = True
+
+ if commit:
+ run_lbu('commit')
+ changed = True
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ldap_attr.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ldap_attr.py
new file mode 100644
index 00000000..f983b857
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ldap_attr.py
@@ -0,0 +1,284 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Peter Sagerson <psagers@ignorare.net>
+# Copyright: (c) 2016, Jiri Tyr <jiri.tyr@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ldap_attr
+short_description: Add or remove LDAP attribute values
+description:
+ - Add or remove LDAP attribute values.
+notes:
+ - This only deals with attributes on existing entries. To add or remove
+ whole entries, see M(community.general.ldap_entry).
+ - The default authentication settings will attempt to use a SASL EXTERNAL
+ bind over a UNIX domain socket. This works well with the default Ubuntu
+ install for example, which includes a cn=peercred,cn=external,cn=auth ACL
+ rule allowing root to modify the server configuration. If you need to use
+ a simple bind to access your server, pass the credentials in I(bind_dn)
+ and I(bind_pw).
+ - For I(state=present) and I(state=absent), all value comparisons are
+ performed on the server for maximum accuracy. For I(state=exact), values
+ have to be compared in Python, which obviously ignores LDAP matching
+ rules. This should work out in most cases, but it is theoretically
+ possible to see spurious changes when target and actual values are
+ semantically identical but lexically distinct.
+ - "The I(params) parameter was removed due to circumventing Ansible's parameter
+ handling. The I(params) parameter started disallowing setting the I(bind_pw) parameter in
+ Ansible-2.7 as it was insecure to set the parameter that way."
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.14
+ why: 'The current "ldap_attr" module does not support LDAP attribute insertions or deletions with objectClass dependencies.'
+ alternative: 'Use M(community.general.ldap_attrs) instead. Deprecated in community.general 0.2.0.'
+author:
+ - Jiri Tyr (@jtyr)
+requirements:
+ - python-ldap
+options:
+ name:
+ description:
+ - The name of the attribute to modify.
+ type: str
+ required: true
+ state:
+ description:
+ - The state of the attribute values.
+ - If C(present), all given values will be added if they're missing.
+ - If C(absent), all given values will be removed if present.
+ - If C(exact), the set of values will be forced to exactly those provided and no others.
+ - If I(state=exact) and I(value) is an empty list, all values for this attribute will be removed.
+ type: str
+ choices: [ absent, exact, present ]
+ default: present
+ values:
+ description:
+ - The value(s) to add or remove. This can be a string or a list of
+ strings. The complex argument format is required in order to pass
+ a list of strings (see examples).
+ type: raw
+ required: true
+extends_documentation_fragment:
+- community.general.ldap.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Configure directory number 1 for example.com
+ community.general.ldap_attr:
+ dn: olcDatabase={1}hdb,cn=config
+ name: olcSuffix
+ values: dc=example,dc=com
+ state: exact
+
+# The complex argument format is required here to pass a list of ACL strings.
+- name: Set up the ACL
+ community.general.ldap_attr:
+ dn: olcDatabase={1}hdb,cn=config
+ name: olcAccess
+ values:
+ - >-
+ {0}to attrs=userPassword,shadowLastChange
+ by self write
+ by anonymous auth
+ by dn="cn=admin,dc=example,dc=com" write
+ by * none'
+ - >-
+ {1}to dn.base="dc=example,dc=com"
+ by dn="cn=admin,dc=example,dc=com" write
+ by * read
+ state: exact
+
+- name: Declare some indexes
+ community.general.ldap_attr:
+ dn: olcDatabase={1}hdb,cn=config
+ name: olcDbIndex
+ values: "{{ item }}"
+ with_items:
+ - objectClass eq
+ - uid eq
+
+- name: Set up a root user, which we can use later to bootstrap the directory
+ community.general.ldap_attr:
+ dn: olcDatabase={1}hdb,cn=config
+ name: "{{ item.key }}"
+ values: "{{ item.value }}"
+ state: exact
+ with_dict:
+ olcRootDN: cn=root,dc=example,dc=com
+ olcRootPW: "{SSHA}tabyipcHzhwESzRaGA7oQ/SDoBZQOGND"
+
+- name: Get rid of an unneeded attribute
+ community.general.ldap_attr:
+ dn: uid=jdoe,ou=people,dc=example,dc=com
+ name: shadowExpire
+ values: []
+ state: exact
+ server_uri: ldap://localhost/
+ bind_dn: cn=admin,dc=example,dc=com
+ bind_pw: password
+
+#
+# The same as in the previous example but with the authentication details
+# stored in the ldap_auth variable:
+#
+# ldap_auth:
+# server_uri: ldap://localhost/
+# bind_dn: cn=admin,dc=example,dc=com
+# bind_pw: password
+#
+# In the example below, 'args' is a task keyword, passed at the same level as the module
+- name: Get rid of an unneeded attribute
+ community.general.ldap_attr:
+ dn: uid=jdoe,ou=people,dc=example,dc=com
+ name: shadowExpire
+ values: []
+ state: exact
+ args: "{{ ldap_auth }}"
+'''
+
+RETURN = r'''
+modlist:
+ description: list of modified parameters
+ returned: success
+ type: list
+ sample: '[[2, "olcRootDN", ["cn=root,dc=example,dc=com"]]]'
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native, to_bytes
+from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs
+
+LDAP_IMP_ERR = None
+try:
+ import ldap
+
+ HAS_LDAP = True
+except ImportError:
+ LDAP_IMP_ERR = traceback.format_exc()
+ HAS_LDAP = False
+
+
+class LdapAttr(LdapGeneric):
+ def __init__(self, module):
+ LdapGeneric.__init__(self, module)
+
+ # Shortcuts
+ self.name = self.module.params['name']
+ self.state = self.module.params['state']
+
+ # Normalize values
+ if isinstance(self.module.params['values'], list):
+ self.values = list(map(to_bytes, self.module.params['values']))
+ else:
+ self.values = [to_bytes(self.module.params['values'])]
+
+ def add(self):
+ values_to_add = list(filter(self._is_value_absent, self.values))
+
+ if len(values_to_add) > 0:
+ modlist = [(ldap.MOD_ADD, self.name, values_to_add)]
+ else:
+ modlist = []
+
+ return modlist
+
+ def delete(self):
+ values_to_delete = list(filter(self._is_value_present, self.values))
+
+ if len(values_to_delete) > 0:
+ modlist = [(ldap.MOD_DELETE, self.name, values_to_delete)]
+ else:
+ modlist = []
+
+ return modlist
+
+ def exact(self):
+ try:
+ results = self.connection.search_s(
+ self.dn, ldap.SCOPE_BASE, attrlist=[self.name])
+ except ldap.LDAPError as e:
+ self.fail("Cannot search for attribute %s" % self.name, e)
+
+ current = results[0][1].get(self.name, [])
+ modlist = []
+
+ if frozenset(self.values) != frozenset(current):
+ if len(current) == 0:
+ modlist = [(ldap.MOD_ADD, self.name, self.values)]
+ elif len(self.values) == 0:
+ modlist = [(ldap.MOD_DELETE, self.name, None)]
+ else:
+ modlist = [(ldap.MOD_REPLACE, self.name, self.values)]
+
+ return modlist
+
+ def _is_value_present(self, value):
+ """ True if the target attribute has the given value. """
+ try:
+ is_present = bool(
+ self.connection.compare_s(self.dn, self.name, value))
+ except ldap.NO_SUCH_ATTRIBUTE:
+ is_present = False
+
+ return is_present
+
+ def _is_value_absent(self, value):
+ """ True if the target attribute doesn't have the given value. """
+ return not self._is_value_present(value)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=gen_specs(
+ name=dict(type='str', required=True),
+ params=dict(type='dict'),
+ state=dict(type='str', default='present', choices=['absent', 'exact', 'present']),
+ values=dict(type='raw', required=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ if not HAS_LDAP:
+ module.fail_json(msg=missing_required_lib('python-ldap'),
+ exception=LDAP_IMP_ERR)
+
+ if module.params['params']:
+ module.fail_json(msg="The `params` option to ldap_attr was removed in since it circumvents Ansible's option handling")
+
+ # Instantiate the LdapAttr object
+ ldap = LdapAttr(module)
+
+ state = module.params['state']
+
+ # Perform action
+ if state == 'present':
+ modlist = ldap.add()
+ elif state == 'absent':
+ modlist = ldap.delete()
+ elif state == 'exact':
+ modlist = ldap.exact()
+
+ changed = False
+
+ if len(modlist) > 0:
+ changed = True
+
+ if not module.check_mode:
+ try:
+ ldap.connection.modify_s(ldap.dn, modlist)
+ except Exception as e:
+ module.fail_json(msg="Attribute action failed.", details=to_native(e))
+
+ module.exit_json(changed=changed, modlist=modlist)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ldap_attrs.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ldap_attrs.py
new file mode 100644
index 00000000..ae5cb7fd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ldap_attrs.py
@@ -0,0 +1,318 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Maciej Delmanowski <drybjed@gmail.com>
+# Copyright: (c) 2017, Alexander Korinek <noles@a3k.net>
+# Copyright: (c) 2016, Peter Sagerson <psagers@ignorare.net>
+# Copyright: (c) 2016, Jiri Tyr <jiri.tyr@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ldap_attrs
+short_description: Add or remove multiple LDAP attribute values
+description:
+ - Add or remove multiple LDAP attribute values.
+notes:
+ - This only deals with attributes on existing entries. To add or remove
+ whole entries, see M(community.general.ldap_entry).
+ - The default authentication settings will attempt to use a SASL EXTERNAL
+ bind over a UNIX domain socket. This works well with the default Ubuntu
+ install for example, which includes a cn=peercred,cn=external,cn=auth ACL
+ rule allowing root to modify the server configuration. If you need to use
+ a simple bind to access your server, pass the credentials in I(bind_dn)
+ and I(bind_pw).
+ - For I(state=present) and I(state=absent), all value comparisons are
+ performed on the server for maximum accuracy. For I(state=exact), values
+ have to be compared in Python, which obviously ignores LDAP matching
+ rules. This should work out in most cases, but it is theoretically
+ possible to see spurious changes when target and actual values are
+ semantically identical but lexically distinct.
+version_added: '0.2.0'
+author:
+ - Jiri Tyr (@jtyr)
+ - Alexander Korinek (@noles)
+ - Maciej Delmanowski (@drybjed)
+requirements:
+ - python-ldap
+options:
+ state:
+ required: false
+ type: str
+ choices: [present, absent, exact]
+ default: present
+ description:
+ - The state of the attribute values. If C(present), all given attribute
+ values will be added if they're missing. If C(absent), all given
+ attribute values will be removed if present. If C(exact), the set of
+ attribute values will be forced to exactly those provided and no others.
+ If I(state=exact) and the attribute I(value) is empty, all values for
+ this attribute will be removed.
+ attributes:
+ required: true
+ type: dict
+ description:
+ - The attribute(s) and value(s) to add or remove. The complex argument format is required in order to pass
+ a list of strings (see examples).
+ ordered:
+ required: false
+ type: bool
+ default: 'no'
+ description:
+ - If C(yes), prepend list values with X-ORDERED index numbers in all
+ attributes specified in the current task. This is useful mostly with
+ I(olcAccess) attribute to easily manage LDAP Access Control Lists.
+extends_documentation_fragment:
+- community.general.ldap.documentation
+
+'''
+
+
+EXAMPLES = r'''
+- name: Configure directory number 1 for example.com
+ community.general.ldap_attrs:
+ dn: olcDatabase={1}hdb,cn=config
+ attributes:
+ olcSuffix: dc=example,dc=com
+ state: exact
+
+# The complex argument format is required here to pass a list of ACL strings.
+- name: Set up the ACL
+ community.general.ldap_attrs:
+ dn: olcDatabase={1}hdb,cn=config
+ attributes:
+ olcAccess:
+ - >-
+ {0}to attrs=userPassword,shadowLastChange
+ by self write
+ by anonymous auth
+ by dn="cn=admin,dc=example,dc=com" write
+ by * none'
+ - >-
+ {1}to dn.base="dc=example,dc=com"
+ by dn="cn=admin,dc=example,dc=com" write
+ by * read
+ state: exact
+
+# An alternative approach with automatic X-ORDERED numbering
+- name: Set up the ACL
+ community.general.ldap_attrs:
+ dn: olcDatabase={1}hdb,cn=config
+ attributes:
+ olcAccess:
+ - >-
+ to attrs=userPassword,shadowLastChange
+ by self write
+ by anonymous auth
+ by dn="cn=admin,dc=example,dc=com" write
+ by * none'
+ - >-
+ to dn.base="dc=example,dc=com"
+ by dn="cn=admin,dc=example,dc=com" write
+ by * read
+ ordered: yes
+ state: exact
+
+- name: Declare some indexes
+ community.general.ldap_attrs:
+ dn: olcDatabase={1}hdb,cn=config
+ attributes:
+ olcDbIndex:
+ - objectClass eq
+ - uid eq
+
+- name: Set up a root user, which we can use later to bootstrap the directory
+ community.general.ldap_attrs:
+ dn: olcDatabase={1}hdb,cn=config
+ attributes:
+ olcRootDN: cn=root,dc=example,dc=com
+ olcRootPW: "{SSHA}tabyipcHzhwESzRaGA7oQ/SDoBZQOGND"
+ state: exact
+
+- name: Remove an attribute with a specific value
+ community.general.ldap_attrs:
+ dn: uid=jdoe,ou=people,dc=example,dc=com
+ attributes:
+ description: "An example user account"
+ state: absent
+ server_uri: ldap://localhost/
+ bind_dn: cn=admin,dc=example,dc=com
+ bind_pw: password
+
+- name: Remove specified attribute(s) from an entry
+ community.general.ldap_attrs:
+ dn: uid=jdoe,ou=people,dc=example,dc=com
+ attributes:
+ description: []
+ state: exact
+ server_uri: ldap://localhost/
+ bind_dn: cn=admin,dc=example,dc=com
+ bind_pw: password
+'''
+
+
+RETURN = r'''
+modlist:
+ description: list of modified parameters
+ returned: success
+ type: list
+ sample: '[[2, "olcRootDN", ["cn=root,dc=example,dc=com"]]]'
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native, to_bytes
+from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs
+import re
+
+LDAP_IMP_ERR = None
+try:
+ import ldap
+
+ HAS_LDAP = True
+except ImportError:
+ LDAP_IMP_ERR = traceback.format_exc()
+ HAS_LDAP = False
+
+
+class LdapAttrs(LdapGeneric):
+ def __init__(self, module):
+ LdapGeneric.__init__(self, module)
+
+ # Shortcuts
+ self.attrs = self.module.params['attributes']
+ self.state = self.module.params['state']
+ self.ordered = self.module.params['ordered']
+
+ def _order_values(self, values):
+ """ Preprend X-ORDERED index numbers to attribute's values. """
+ ordered_values = []
+
+ if isinstance(values, list):
+ for index, value in enumerate(values):
+ cleaned_value = re.sub(r'^\{\d+\}', '', value)
+ ordered_values.append('{' + str(index) + '}' + cleaned_value)
+
+ return ordered_values
+
+ def _normalize_values(self, values):
+ """ Normalize attribute's values. """
+ norm_values = []
+
+ if isinstance(values, list):
+ if self.ordered:
+ norm_values = list(map(to_bytes,
+ self._order_values(list(map(str,
+ values)))))
+ else:
+ norm_values = list(map(to_bytes, values))
+ else:
+ norm_values = [to_bytes(str(values))]
+
+ return norm_values
+
+ def add(self):
+ modlist = []
+ for name, values in self.module.params['attributes'].items():
+ norm_values = self._normalize_values(values)
+ for value in norm_values:
+ if self._is_value_absent(name, value):
+ modlist.append((ldap.MOD_ADD, name, value))
+
+ return modlist
+
+ def delete(self):
+ modlist = []
+ for name, values in self.module.params['attributes'].items():
+ norm_values = self._normalize_values(values)
+ for value in norm_values:
+ if self._is_value_present(name, value):
+ modlist.append((ldap.MOD_DELETE, name, value))
+
+ return modlist
+
+ def exact(self):
+ modlist = []
+ for name, values in self.module.params['attributes'].items():
+ norm_values = self._normalize_values(values)
+ try:
+ results = self.connection.search_s(
+ self.dn, ldap.SCOPE_BASE, attrlist=[name])
+ except ldap.LDAPError as e:
+ self.fail("Cannot search for attribute %s" % name, e)
+
+ current = results[0][1].get(name, [])
+
+ if frozenset(norm_values) != frozenset(current):
+ if len(current) == 0:
+ modlist.append((ldap.MOD_ADD, name, norm_values))
+ elif len(norm_values) == 0:
+ modlist.append((ldap.MOD_DELETE, name, None))
+ else:
+ modlist.append((ldap.MOD_REPLACE, name, norm_values))
+
+ return modlist
+
+ def _is_value_present(self, name, value):
+ """ True if the target attribute has the given value. """
+ try:
+ is_present = bool(
+ self.connection.compare_s(self.dn, name, value))
+ except ldap.NO_SUCH_ATTRIBUTE:
+ is_present = False
+
+ return is_present
+
+ def _is_value_absent(self, name, value):
+ """ True if the target attribute doesn't have the given value. """
+ return not self._is_value_present(name, value)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=gen_specs(
+ attributes=dict(type='dict', required=True),
+ ordered=dict(type='bool', default=False, required=False),
+ state=dict(type='str', default='present', choices=['absent', 'exact', 'present']),
+ ),
+ supports_check_mode=True,
+ )
+
+ if not HAS_LDAP:
+ module.fail_json(msg=missing_required_lib('python-ldap'),
+ exception=LDAP_IMP_ERR)
+
+ # Instantiate the LdapAttr object
+ ldap = LdapAttrs(module)
+
+ state = module.params['state']
+
+ # Perform action
+ if state == 'present':
+ modlist = ldap.add()
+ elif state == 'absent':
+ modlist = ldap.delete()
+ elif state == 'exact':
+ modlist = ldap.exact()
+
+ changed = False
+
+ if len(modlist) > 0:
+ changed = True
+
+ if not module.check_mode:
+ try:
+ ldap.connection.modify_s(ldap.dn, modlist)
+ except Exception as e:
+ module.fail_json(msg="Attribute action failed.", details=to_native(e))
+
+ module.exit_json(changed=changed, modlist=modlist)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ldap_entry.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ldap_entry.py
new file mode 100644
index 00000000..7ee0c3dd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ldap_entry.py
@@ -0,0 +1,226 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Peter Sagerson <psagers@ignorare.net>
+# Copyright: (c) 2016, Jiri Tyr <jiri.tyr@gmail.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ldap_entry
+short_description: Add or remove LDAP entries.
+description:
+ - Add or remove LDAP entries. This module only asserts the existence or
+ non-existence of an LDAP entry, not its attributes. To assert the
+ attribute values of an entry, see M(community.general.ldap_attr).
+notes:
+ - The default authentication settings will attempt to use a SASL EXTERNAL
+ bind over a UNIX domain socket. This works well with the default Ubuntu
+ install for example, which includes a cn=peercred,cn=external,cn=auth ACL
+ rule allowing root to modify the server configuration. If you need to use
+ a simple bind to access your server, pass the credentials in I(bind_dn)
+ and I(bind_pw).
+ - "The I(params) parameter was removed due to circumventing Ansible's parameter
+ handling. The I(params) parameter started disallowing setting the I(bind_pw) parameter in
+ Ansible-2.7 as it was insecure to set the parameter that way."
+author:
+ - Jiri Tyr (@jtyr)
+requirements:
+ - python-ldap
+options:
+ attributes:
+ description:
+ - If I(state=present), attributes necessary to create an entry. Existing
+ entries are never modified. To assert specific attribute values on an
+ existing entry, use M(community.general.ldap_attr) module instead.
+ type: dict
+ objectClass:
+ description:
+ - If I(state=present), value or list of values to use when creating
+ the entry. It can either be a string or an actual list of
+ strings.
+ type: list
+ elements: str
+ state:
+ description:
+ - The target state of the entry.
+ choices: [present, absent]
+ default: present
+extends_documentation_fragment:
+- community.general.ldap.documentation
+
+'''
+
+
+EXAMPLES = """
+- name: Make sure we have a parent entry for users
+ community.general.ldap_entry:
+ dn: ou=users,dc=example,dc=com
+ objectClass: organizationalUnit
+
+- name: Make sure we have an admin user
+ community.general.ldap_entry:
+ dn: cn=admin,dc=example,dc=com
+ objectClass:
+ - simpleSecurityObject
+ - organizationalRole
+ attributes:
+ description: An LDAP administrator
+ userPassword: "{SSHA}tabyipcHzhwESzRaGA7oQ/SDoBZQOGND"
+
+- name: Get rid of an old entry
+ community.general.ldap_entry:
+ dn: ou=stuff,dc=example,dc=com
+ state: absent
+ server_uri: ldap://localhost/
+ bind_dn: cn=admin,dc=example,dc=com
+ bind_pw: password
+
+#
+# The same as in the previous example but with the authentication details
+# stored in the ldap_auth variable:
+#
+# ldap_auth:
+# server_uri: ldap://localhost/
+# bind_dn: cn=admin,dc=example,dc=com
+# bind_pw: password
+#
+# In the example below, 'args' is a task keyword, passed at the same level as the module
+- name: Get rid of an old entry
+ community.general.ldap_entry:
+ dn: ou=stuff,dc=example,dc=com
+ state: absent
+ args: "{{ ldap_auth }}"
+"""
+
+
+RETURN = """
+# Default return values
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native, to_bytes
+from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs
+
+LDAP_IMP_ERR = None
+try:
+ import ldap.modlist
+
+ HAS_LDAP = True
+except ImportError:
+ LDAP_IMP_ERR = traceback.format_exc()
+ HAS_LDAP = False
+
+
+class LdapEntry(LdapGeneric):
+ def __init__(self, module):
+ LdapGeneric.__init__(self, module)
+
+ # Shortcuts
+ self.state = self.module.params['state']
+
+ # Add the objectClass into the list of attributes
+ self.module.params['attributes']['objectClass'] = (
+ self.module.params['objectClass'])
+
+ # Load attributes
+ if self.state == 'present':
+ self.attrs = self._load_attrs()
+
+ def _load_attrs(self):
+ """ Turn attribute's value to array. """
+ attrs = {}
+
+ for name, value in self.module.params['attributes'].items():
+ if isinstance(value, list):
+ attrs[name] = list(map(to_bytes, value))
+ else:
+ attrs[name] = [to_bytes(value)]
+
+ return attrs
+
+ def add(self):
+ """ If self.dn does not exist, returns a callable that will add it. """
+ def _add():
+ self.connection.add_s(self.dn, modlist)
+
+ if not self._is_entry_present():
+ modlist = ldap.modlist.addModlist(self.attrs)
+ action = _add
+ else:
+ action = None
+
+ return action
+
+ def delete(self):
+ """ If self.dn exists, returns a callable that will delete it. """
+ def _delete():
+ self.connection.delete_s(self.dn)
+
+ if self._is_entry_present():
+ action = _delete
+ else:
+ action = None
+
+ return action
+
+ def _is_entry_present(self):
+ try:
+ self.connection.search_s(self.dn, ldap.SCOPE_BASE)
+ except ldap.NO_SUCH_OBJECT:
+ is_present = False
+ else:
+ is_present = True
+
+ return is_present
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=gen_specs(
+ attributes=dict(default={}, type='dict'),
+ objectClass=dict(type='list', elements='str'),
+ params=dict(type='dict'),
+ state=dict(default='present', choices=['present', 'absent']),
+ ),
+ required_if=[('state', 'present', ['objectClass'])],
+ supports_check_mode=True,
+ )
+
+ if not HAS_LDAP:
+ module.fail_json(msg=missing_required_lib('python-ldap'),
+ exception=LDAP_IMP_ERR)
+
+ if module.params['params']:
+ module.fail_json(msg="The `params` option to ldap_attr was removed since it circumvents Ansible's option handling")
+
+ state = module.params['state']
+
+ # Instantiate the LdapEntry object
+ ldap = LdapEntry(module)
+
+ # Get the action function
+ if state == 'present':
+ action = ldap.add()
+ elif state == 'absent':
+ action = ldap.delete()
+
+ # Perform the action
+ if action is not None and not module.check_mode:
+ try:
+ action()
+ except Exception as e:
+ module.fail_json(msg="Entry action failed.", details=to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=(action is not None))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ldap_passwd.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ldap_passwd.py
new file mode 100644
index 00000000..8d86ee93
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ldap_passwd.py
@@ -0,0 +1,143 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017-2018, Keller Fuchs <kellerfuchs@hashbang.sh>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ldap_passwd
+short_description: Set passwords in LDAP.
+description:
+ - Set a password for an LDAP entry. This module only asserts that
+ a given password is valid for a given entry. To assert the
+ existence of an entry, see M(community.general.ldap_entry).
+notes:
+ - The default authentication settings will attempt to use a SASL EXTERNAL
+ bind over a UNIX domain socket. This works well with the default Ubuntu
+ install for example, which includes a cn=peercred,cn=external,cn=auth ACL
+ rule allowing root to modify the server configuration. If you need to use
+ a simple bind to access your server, pass the credentials in I(bind_dn)
+ and I(bind_pw).
+author:
+ - Keller Fuchs (@KellerFuchs)
+requirements:
+ - python-ldap
+options:
+ passwd:
+ description:
+ - The (plaintext) password to be set for I(dn).
+ type: str
+extends_documentation_fragment:
+- community.general.ldap.documentation
+
+'''
+
+EXAMPLES = """
+- name: Set a password for the admin user
+ community.general.ldap_passwd:
+ dn: cn=admin,dc=example,dc=com
+ passwd: "{{ vault_secret }}"
+
+- name: Setting passwords in bulk
+ community.general.ldap_passwd:
+ dn: "{{ item.key }}"
+ passwd: "{{ item.value }}"
+ with_dict:
+ alice: alice123123
+ bob: "|30b!"
+ admin: "{{ vault_secret }}"
+"""
+
+RETURN = """
+modlist:
+ description: list of modified parameters
+ returned: success
+ type: list
+ sample: '[[2, "olcRootDN", ["cn=root,dc=example,dc=com"]]]'
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs
+
+LDAP_IMP_ERR = None
+try:
+ import ldap
+
+ HAS_LDAP = True
+except ImportError:
+ LDAP_IMP_ERR = traceback.format_exc()
+ HAS_LDAP = False
+
+
+class LdapPasswd(LdapGeneric):
+ def __init__(self, module):
+ LdapGeneric.__init__(self, module)
+
+ # Shortcuts
+ self.passwd = self.module.params['passwd']
+
+ def passwd_check(self):
+ try:
+ tmp_con = ldap.initialize(self.server_uri)
+ except ldap.LDAPError as e:
+ self.fail("Cannot initialize LDAP connection", e)
+
+ if self.start_tls:
+ try:
+ tmp_con.start_tls_s()
+ except ldap.LDAPError as e:
+ self.fail("Cannot start TLS.", e)
+
+ try:
+ tmp_con.simple_bind_s(self.dn, self.passwd)
+ except ldap.INVALID_CREDENTIALS:
+ return True
+ except ldap.LDAPError as e:
+ self.fail("Cannot bind to the server.", e)
+ else:
+ return False
+ finally:
+ tmp_con.unbind()
+
+ def passwd_set(self):
+ # Exit early if the password is already valid
+ if not self.passwd_check():
+ return False
+
+ # Change the password (or throw an exception)
+ try:
+ self.connection.passwd_s(self.dn, None, self.passwd)
+ except ldap.LDAPError as e:
+ self.fail("Unable to set password", e)
+
+ # Password successfully changed
+ return True
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=gen_specs(passwd=dict(no_log=True)),
+ supports_check_mode=True,
+ )
+
+ if not HAS_LDAP:
+ module.fail_json(msg=missing_required_lib('python-ldap'),
+ exception=LDAP_IMP_ERR)
+
+ ldap = LdapPasswd(module)
+
+ if module.check_mode:
+ module.exit_json(changed=ldap.passwd_check())
+
+ module.exit_json(changed=ldap.passwd_set())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ldap_search.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ldap_search.py
new file mode 100644
index 00000000..3b1a2833
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ldap_search.py
@@ -0,0 +1,180 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Peter Sagerson <psagers@ignorare.net>
+# Copyright: (c) 2020, Sebastian Pfahl <eryx@gmx.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: ldap_search
+version_added: '0.2.0'
+short_description: Search for entries in a LDAP server
+description:
+ - Return the results of an LDAP search.
+notes:
+ - The default authentication settings will attempt to use a SASL EXTERNAL
+ bind over a UNIX domain socket. This works well with the default Ubuntu
+ install for example, which includes a C(cn=peercred,cn=external,cn=auth) ACL
+ rule allowing root to modify the server configuration. If you need to use
+ a simple bind to access your server, pass the credentials in I(bind_dn)
+ and I(bind_pw).
+author:
+ - Sebastian Pfahl (@eryx12o45)
+requirements:
+ - python-ldap
+options:
+ dn:
+ required: true
+ type: str
+ description:
+ - The LDAP DN to search in.
+ scope:
+ choices: [base, onelevel, subordinate, children]
+ default: base
+ type: str
+ description:
+ - The LDAP scope to use.
+ filter:
+ default: '(objectClass=*)'
+ type: str
+ description:
+ - Used for filtering the LDAP search result.
+ attrs:
+ type: list
+ elements: str
+ description:
+ - A list of attributes for limiting the result. Use an
+ actual list or a comma-separated string.
+ schema:
+ default: false
+ type: bool
+ description:
+ - Set to C(true) to return the full attribute schema of entries, not
+ their attribute values. Overrides I(attrs) when provided.
+extends_documentation_fragment:
+ - community.general.ldap.documentation
+"""
+
+EXAMPLES = r"""
+- name: Return all entries within the 'groups' organizational unit.
+ community.general.ldap_search:
+ dn: "ou=groups,dc=example,dc=com"
+ register: ldap_groups
+
+- name: Return GIDs for all groups
+ community.general.ldap_search:
+ dn: "ou=groups,dc=example,dc=com"
+ scope: "onelevel"
+ attrs:
+ - "gidNumber"
+ register: ldap_group_gids
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs
+
+LDAP_IMP_ERR = None
+try:
+ import ldap
+
+ HAS_LDAP = True
+except ImportError:
+ LDAP_IMP_ERR = traceback.format_exc()
+ HAS_LDAP = False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=gen_specs(
+ dn=dict(type='str', required=True),
+ scope=dict(type='str', default='base', choices=['base', 'onelevel', 'subordinate', 'children']),
+ filter=dict(type='str', default='(objectClass=*)'),
+ attrs=dict(type='list', elements='str'),
+ schema=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+
+ if not HAS_LDAP:
+ module.fail_json(msg=missing_required_lib('python-ldap'),
+ exception=LDAP_IMP_ERR)
+
+ if not module.check_mode:
+ try:
+ LdapSearch(module).main()
+ except Exception as exception:
+ module.fail_json(msg="Attribute action failed.", details=to_native(exception))
+
+ module.exit_json(changed=False)
+
+
+def _extract_entry(dn, attrs):
+ extracted = {'dn': dn}
+ for attr, val in list(attrs.items()):
+ if len(val) == 1:
+ extracted[attr] = val[0]
+ else:
+ extracted[attr] = val
+ return extracted
+
+
+class LdapSearch(LdapGeneric):
+ def __init__(self, module):
+ LdapGeneric.__init__(self, module)
+
+ self.dn = self.module.params['dn']
+ self.filterstr = self.module.params['filter']
+ self.attrlist = []
+ self._load_scope()
+ self._load_attrs()
+ self._load_schema()
+
+ def _load_schema(self):
+ self.schema = self.module.boolean(self.module.params['schema'])
+ if self.schema:
+ self.attrsonly = 1
+ else:
+ self.attrsonly = 0
+
+ def _load_scope(self):
+ spec = dict(
+ base=ldap.SCOPE_BASE,
+ onelevel=ldap.SCOPE_ONELEVEL,
+ subordinate=ldap.SCOPE_SUBORDINATE,
+ children=ldap.SCOPE_SUBTREE,
+ )
+ self.scope = spec[self.module.params['scope']]
+
+ def _load_attrs(self):
+ self.attrlist = self.module.params['attrs'] or None
+
+ def main(self):
+ results = self.perform_search()
+ self.module.exit_json(changed=False, results=results)
+
+ def perform_search(self):
+ try:
+ results = self.connection.search_s(
+ self.dn,
+ self.scope,
+ filterstr=self.filterstr,
+ attrlist=self.attrlist,
+ attrsonly=self.attrsonly
+ )
+ if self.schema:
+ return [dict(dn=result[0], attrs=list(result[1].keys())) for result in results]
+ else:
+ return [_extract_entry(result[0], result[1]) for result in results]
+ except ldap.NO_SUCH_OBJECT:
+ self.module.fail_json(msg="Base not found: {0}".format(self.dn))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/librato_annotation.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/librato_annotation.py
new file mode 100644
index 00000000..d0fd406d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/librato_annotation.py
@@ -0,0 +1,166 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (C) Seth Edwards, 2014
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: librato_annotation
+short_description: create an annotation in librato
+description:
+ - Create an annotation event on the given annotation stream :name. If the annotation stream does not exist, it will be created automatically
+author: "Seth Edwards (@Sedward)"
+requirements: []
+options:
+ user:
+ type: str
+ description:
+ - Librato account username
+ required: true
+ api_key:
+ type: str
+ description:
+ - Librato account api key
+ required: true
+ name:
+ type: str
+ description:
+ - The annotation stream name
+ - If the annotation stream does not exist, it will be created automatically
+ required: false
+ title:
+ type: str
+ description:
+ - The title of an annotation is a string and may contain spaces
+ - The title should be a short, high-level summary of the annotation e.g. v45 Deployment
+ required: true
+ source:
+ type: str
+ description:
+ - A string which describes the originating source of an annotation when that annotation is tracked across multiple members of a population
+ required: false
+ description:
+ type: str
+ description:
+ - The description contains extra metadata about a particular annotation
+ - The description should contain specifics on the individual annotation e.g. Deployed 9b562b2 shipped new feature foo!
+ required: false
+ start_time:
+ type: int
+ description:
+ - The unix timestamp indicating the time at which the event referenced by this annotation started
+ required: false
+ end_time:
+ type: int
+ description:
+ - The unix timestamp indicating the time at which the event referenced by this annotation ended
+ - For events that have a duration, this is a useful way to annotate the duration of the event
+ required: false
+ links:
+ type: list
+ description:
+ - See examples
+'''
+
+EXAMPLES = '''
+- name: Create a simple annotation event with a source
+ community.general.librato_annotation:
+ user: user@example.com
+ api_key: XXXXXXXXXXXXXXXXX
+ title: App Config Change
+ source: foo.bar
+ description: This is a detailed description of the config change
+
+- name: Create an annotation that includes a link
+ community.general.librato_annotation:
+ user: user@example.com
+ api_key: XXXXXXXXXXXXXXXXXX
+ name: code.deploy
+ title: app code deploy
+ description: this is a detailed description of a deployment
+ links:
+ - rel: example
+ href: http://www.example.com/deploy
+
+- name: Create an annotation with a start_time and end_time
+ community.general.librato_annotation:
+ user: user@example.com
+ api_key: XXXXXXXXXXXXXXXXXX
+ name: maintenance
+ title: Maintenance window
+ description: This is a detailed description of maintenance
+ start_time: 1395940006
+ end_time: 1395954406
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def post_annotation(module):
+ user = module.params['user']
+ api_key = module.params['api_key']
+ name = module.params['name']
+ title = module.params['title']
+
+ url = 'https://metrics-api.librato.com/v1/annotations/%s' % name
+ params = {}
+ params['title'] = title
+
+ if module.params['source'] is not None:
+ params['source'] = module.params['source']
+ if module.params['description'] is not None:
+ params['description'] = module.params['description']
+ if module.params['start_time'] is not None:
+ params['start_time'] = module.params['start_time']
+ if module.params['end_time'] is not None:
+ params['end_time'] = module.params['end_time']
+ if module.params['links'] is not None:
+ params['links'] = module.params['links']
+
+ json_body = module.jsonify(params)
+
+ headers = {}
+ headers['Content-Type'] = 'application/json'
+
+ # Hack send parameters the way fetch_url wants them
+ module.params['url_username'] = user
+ module.params['url_password'] = api_key
+ response, info = fetch_url(module, url, data=json_body, headers=headers)
+ response_code = str(info['status'])
+ response_body = info['body']
+ if info['status'] != 201:
+ if info['status'] >= 400:
+ module.fail_json(msg="Request Failed. Response code: " + response_code + " Response body: " + response_body)
+ else:
+ module.fail_json(msg="Request Failed. Response code: " + response_code)
+ response = response.read()
+ module.exit_json(changed=True, annotation=response)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ user=dict(required=True),
+ api_key=dict(required=True, no_log=True),
+ name=dict(required=False),
+ title=dict(required=True),
+ source=dict(required=False),
+ description=dict(required=False),
+ start_time=dict(required=False, default=None, type='int'),
+ end_time=dict(required=False, default=None, type='int'),
+ links=dict(type='list')
+ )
+ )
+
+ post_annotation(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/linode.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/linode.py
new file mode 100644
index 00000000..a35b25b6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/linode.py
@@ -0,0 +1,690 @@
+#!/usr/bin/python
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: linode
+short_description: Manage instances on the Linode Public Cloud
+description:
+ - Manage Linode Public Cloud instances and optionally wait for it to be 'running'.
+options:
+ state:
+ description:
+ - Indicate desired state of the resource
+ choices: [ absent, active, deleted, present, restarted, started, stopped ]
+ default: present
+ type: str
+ api_key:
+ description:
+ - Linode API key
+ type: str
+ name:
+ description:
+ - Name to give the instance (alphanumeric, dashes, underscore).
+ - To keep sanity on the Linode Web Console, name is prepended with C(LinodeID-).
+ required: true
+ type: str
+ displaygroup:
+ description:
+ - Add the instance to a Display Group in Linode Manager.
+ type: str
+ linode_id:
+ description:
+ - Unique ID of a linode server. This value is read-only in the sense that
+ if you specify it on creation of a Linode it will not be used. The
+ Linode API generates these IDs and we can those generated value here to
+ reference a Linode more specifically. This is useful for idempotence.
+ aliases: [ lid ]
+ type: int
+ additional_disks:
+ description:
+ - List of dictionaries for creating additional disks that are added to the Linode configuration settings.
+ - Dictionary takes Size, Label, Type. Size is in MB.
+ type: list
+ alert_bwin_enabled:
+ description:
+ - Set status of bandwidth in alerts.
+ type: bool
+ alert_bwin_threshold:
+ description:
+ - Set threshold in MB of bandwidth in alerts.
+ type: int
+ alert_bwout_enabled:
+ description:
+ - Set status of bandwidth out alerts.
+ type: bool
+ alert_bwout_threshold:
+ description:
+ - Set threshold in MB of bandwidth out alerts.
+ type: int
+ alert_bwquota_enabled:
+ description:
+ - Set status of bandwidth quota alerts as percentage of network transfer quota.
+ type: bool
+ alert_bwquota_threshold:
+ description:
+ - Set threshold in MB of bandwidth quota alerts.
+ type: int
+ alert_cpu_enabled:
+ description:
+ - Set status of receiving CPU usage alerts.
+ type: bool
+ alert_cpu_threshold:
+ description:
+ - Set percentage threshold for receiving CPU usage alerts. Each CPU core adds 100% to total.
+ type: int
+ alert_diskio_enabled:
+ description:
+ - Set status of receiving disk IO alerts.
+ type: bool
+ alert_diskio_threshold:
+ description:
+ - Set threshold for average IO ops/sec over 2 hour period.
+ type: int
+ backupweeklyday:
+ description:
+ - Integer value for what day of the week to store weekly backups.
+ type: int
+ plan:
+ description:
+ - plan to use for the instance (Linode plan)
+ type: int
+ payment_term:
+ description:
+ - payment term to use for the instance (payment term in months)
+ default: 1
+ choices: [ 1, 12, 24 ]
+ type: int
+ password:
+ description:
+ - root password to apply to a new server (auto generated if missing)
+ type: str
+ private_ip:
+ description:
+ - Add private IPv4 address when Linode is created.
+ - Default is C(false).
+ type: bool
+ ssh_pub_key:
+ description:
+ - SSH public key applied to root user
+ type: str
+ swap:
+ description:
+ - swap size in MB
+ default: 512
+ type: int
+ distribution:
+ description:
+ - distribution to use for the instance (Linode Distribution)
+ type: int
+ datacenter:
+ description:
+ - datacenter to create an instance in (Linode Datacenter)
+ type: int
+ kernel_id:
+ description:
+ - kernel to use for the instance (Linode Kernel)
+ type: int
+ wait:
+ description:
+ - wait for the instance to be in state C(running) before returning
+ type: bool
+ default: true
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ default: 300
+ type: int
+ watchdog:
+ description:
+ - Set status of Lassie watchdog.
+ type: bool
+ default: "True"
+requirements:
+ - python >= 2.6
+ - linode-python
+author:
+- Vincent Viallet (@zbal)
+notes:
+ - Please note, linode-python does not have python 3 support.
+ - This module uses the now deprecated v3 of the Linode API.
+ - C(LINODE_API_KEY) env variable can be used instead.
+ - Please review U(https://www.linode.com/api/linode) for determining the required parameters.
+'''
+
+EXAMPLES = '''
+
+- name: Create a new Linode
+ community.general.linode:
+ name: linode-test1
+ plan: 1
+ datacenter: 7
+ distribution: 129
+ state: present
+ register: linode_creation
+
+- name: Create a server with a private IP Address
+ community.general.linode:
+ module: linode
+ api_key: 'longStringFromLinodeApi'
+ name: linode-test1
+ plan: 1
+ datacenter: 2
+ distribution: 99
+ password: 'superSecureRootPassword'
+ private_ip: yes
+ ssh_pub_key: 'ssh-rsa qwerty'
+ swap: 768
+ wait: yes
+ wait_timeout: 600
+ state: present
+ delegate_to: localhost
+ register: linode_creation
+
+- name: Fully configure new server
+ community.general.linode:
+ api_key: 'longStringFromLinodeApi'
+ name: linode-test1
+ plan: 4
+ datacenter: 2
+ distribution: 99
+ kernel_id: 138
+ password: 'superSecureRootPassword'
+ private_ip: yes
+ ssh_pub_key: 'ssh-rsa qwerty'
+ swap: 768
+ wait: yes
+ wait_timeout: 600
+ state: present
+ alert_bwquota_enabled: True
+ alert_bwquota_threshold: 80
+ alert_bwin_enabled: True
+ alert_bwin_threshold: 10
+ alert_cpu_enabled: True
+ alert_cpu_threshold: 210
+ alert_bwout_enabled: True
+ alert_bwout_threshold: 10
+ alert_diskio_enabled: True
+ alert_diskio_threshold: 10000
+ backupweeklyday: 1
+ backupwindow: 2
+ displaygroup: 'test'
+ additional_disks:
+ - {Label: 'disk1', Size: 2500, Type: 'raw'}
+ - {Label: 'newdisk', Size: 2000}
+ watchdog: True
+ delegate_to: localhost
+ register: linode_creation
+
+- name: Ensure a running server (create if missing)
+ community.general.linode:
+ api_key: 'longStringFromLinodeApi'
+ name: linode-test1
+ plan: 1
+ datacenter: 2
+ distribution: 99
+ password: 'superSecureRootPassword'
+ ssh_pub_key: 'ssh-rsa qwerty'
+ swap: 768
+ wait: yes
+ wait_timeout: 600
+ state: present
+ delegate_to: localhost
+ register: linode_creation
+
+- name: Delete a server
+ community.general.linode:
+ api_key: 'longStringFromLinodeApi'
+ name: linode-test1
+ linode_id: "{{ linode_creation.instance.id }}"
+ state: absent
+ delegate_to: localhost
+
+- name: Stop a server
+ community.general.linode:
+ api_key: 'longStringFromLinodeApi'
+ name: linode-test1
+ linode_id: "{{ linode_creation.instance.id }}"
+ state: stopped
+ delegate_to: localhost
+
+- name: Reboot a server
+ community.general.linode:
+ api_key: 'longStringFromLinodeApi'
+ name: linode-test1
+ linode_id: "{{ linode_creation.instance.id }}"
+ state: restarted
+ delegate_to: localhost
+'''
+
+import os
+import time
+import traceback
+
+LINODE_IMP_ERR = None
+try:
+ from linode import api as linode_api
+ HAS_LINODE = True
+except ImportError:
+ LINODE_IMP_ERR = traceback.format_exc()
+ HAS_LINODE = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+def randompass():
+ '''
+ Generate a long random password that comply to Linode requirements
+ '''
+ # Linode API currently requires the following:
+ # It must contain at least two of these four character classes:
+ # lower case letters - upper case letters - numbers - punctuation
+ # we play it safe :)
+ import random
+ import string
+ # as of python 2.4, this reseeds the PRNG from urandom
+ random.seed()
+ lower = ''.join(random.choice(string.ascii_lowercase) for x in range(6))
+ upper = ''.join(random.choice(string.ascii_uppercase) for x in range(6))
+ number = ''.join(random.choice(string.digits) for x in range(6))
+ punct = ''.join(random.choice(string.punctuation) for x in range(6))
+ p = lower + upper + number + punct
+ return ''.join(random.sample(p, len(p)))
+
+
+def getInstanceDetails(api, server):
+ '''
+ Return the details of an instance, populating IPs, etc.
+ '''
+ instance = {'id': server['LINODEID'],
+ 'name': server['LABEL'],
+ 'public': [],
+ 'private': []}
+
+ # Populate with ips
+ for ip in api.linode_ip_list(LinodeId=server['LINODEID']):
+ if ip['ISPUBLIC'] and 'ipv4' not in instance:
+ instance['ipv4'] = ip['IPADDRESS']
+ instance['fqdn'] = ip['RDNS_NAME']
+ if ip['ISPUBLIC']:
+ instance['public'].append({'ipv4': ip['IPADDRESS'],
+ 'fqdn': ip['RDNS_NAME'],
+ 'ip_id': ip['IPADDRESSID']})
+ else:
+ instance['private'].append({'ipv4': ip['IPADDRESS'],
+ 'fqdn': ip['RDNS_NAME'],
+ 'ip_id': ip['IPADDRESSID']})
+ return instance
+
+
+def linodeServers(module, api, state, name,
+ displaygroup, plan, additional_disks, distribution,
+ datacenter, kernel_id, linode_id, payment_term, password,
+ private_ip, ssh_pub_key, swap, wait, wait_timeout, watchdog, **kwargs):
+ instances = []
+ changed = False
+ new_server = False
+ servers = []
+ disks = []
+ configs = []
+ jobs = []
+
+ # See if we can match an existing server details with the provided linode_id
+ if linode_id:
+ # For the moment we only consider linode_id as criteria for match
+ # Later we can use more (size, name, etc.) and update existing
+ servers = api.linode_list(LinodeId=linode_id)
+ # Attempt to fetch details about disks and configs only if servers are
+ # found with linode_id
+ if servers:
+ disks = api.linode_disk_list(LinodeId=linode_id)
+ configs = api.linode_config_list(LinodeId=linode_id)
+
+ # Act on the state
+ if state in ('active', 'present', 'started'):
+ # TODO: validate all the plan / distribution / datacenter are valid
+
+ # Multi step process/validation:
+ # - need linode_id (entity)
+ # - need disk_id for linode_id - create disk from distrib
+ # - need config_id for linode_id - create config (need kernel)
+
+ # Any create step triggers a job that need to be waited for.
+ if not servers:
+ for arg in (name, plan, distribution, datacenter):
+ if not arg:
+ module.fail_json(msg='%s is required for %s state' % (arg, state)) # @TODO use required_if instead
+ # Create linode entity
+ new_server = True
+
+ # Get size of all individually listed disks to subtract from Distribution disk
+ used_disk_space = 0 if additional_disks is None else sum(disk['Size'] for disk in additional_disks)
+
+ try:
+ res = api.linode_create(DatacenterID=datacenter, PlanID=plan,
+ PaymentTerm=payment_term)
+ linode_id = res['LinodeID']
+ # Update linode Label to match name
+ api.linode_update(LinodeId=linode_id, Label='%s-%s' % (linode_id, name))
+ # Update Linode with Ansible configuration options
+ api.linode_update(LinodeId=linode_id, LPM_DISPLAYGROUP=displaygroup, WATCHDOG=watchdog, **kwargs)
+ # Save server
+ servers = api.linode_list(LinodeId=linode_id)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'])
+
+ # Add private IP to Linode
+ if private_ip:
+ try:
+ res = api.linode_ip_addprivate(LinodeID=linode_id)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'])
+
+ if not disks:
+ for arg in (name, linode_id, distribution):
+ if not arg:
+ module.fail_json(msg='%s is required for %s state' % (arg, state))
+ # Create disks (1 from distrib, 1 for SWAP)
+ new_server = True
+ try:
+ if not password:
+ # Password is required on creation, if not provided generate one
+ password = randompass()
+ if not swap:
+ swap = 512
+ # Create data disk
+ size = servers[0]['TOTALHD'] - used_disk_space - swap
+
+ if ssh_pub_key:
+ res = api.linode_disk_createfromdistribution(
+ LinodeId=linode_id, DistributionID=distribution,
+ rootPass=password, rootSSHKey=ssh_pub_key,
+ Label='%s data disk (lid: %s)' % (name, linode_id),
+ Size=size)
+ else:
+ res = api.linode_disk_createfromdistribution(
+ LinodeId=linode_id, DistributionID=distribution,
+ rootPass=password,
+ Label='%s data disk (lid: %s)' % (name, linode_id),
+ Size=size)
+ jobs.append(res['JobID'])
+ # Create SWAP disk
+ res = api.linode_disk_create(LinodeId=linode_id, Type='swap',
+ Label='%s swap disk (lid: %s)' % (name, linode_id),
+ Size=swap)
+ # Create individually listed disks at specified size
+ if additional_disks:
+ for disk in additional_disks:
+ # If a disk Type is not passed in, default to ext4
+ if disk.get('Type') is None:
+ disk['Type'] = 'ext4'
+ res = api.linode_disk_create(LinodeID=linode_id, Label=disk['Label'], Size=disk['Size'], Type=disk['Type'])
+
+ jobs.append(res['JobID'])
+ except Exception as e:
+ # TODO: destroy linode ?
+ module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'])
+
+ if not configs:
+ for arg in (name, linode_id, distribution):
+ if not arg:
+ module.fail_json(msg='%s is required for %s state' % (arg, state))
+
+ # Check architecture
+ for distrib in api.avail_distributions():
+ if distrib['DISTRIBUTIONID'] != distribution:
+ continue
+ arch = '32'
+ if distrib['IS64BIT']:
+ arch = '64'
+ break
+
+ # Get latest kernel matching arch if kernel_id is not specified
+ if not kernel_id:
+ for kernel in api.avail_kernels():
+ if not kernel['LABEL'].startswith('Latest %s' % arch):
+ continue
+ kernel_id = kernel['KERNELID']
+ break
+
+ # Get disk list
+ disks_id = []
+ for disk in api.linode_disk_list(LinodeId=linode_id):
+ if disk['TYPE'] == 'ext3':
+ disks_id.insert(0, str(disk['DISKID']))
+ continue
+ disks_id.append(str(disk['DISKID']))
+ # Trick to get the 9 items in the list
+ while len(disks_id) < 9:
+ disks_id.append('')
+ disks_list = ','.join(disks_id)
+
+ # Create config
+ new_server = True
+ try:
+ api.linode_config_create(LinodeId=linode_id, KernelId=kernel_id,
+ Disklist=disks_list, Label='%s config' % name)
+ configs = api.linode_config_list(LinodeId=linode_id)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'])
+
+ # Start / Ensure servers are running
+ for server in servers:
+ # Refresh server state
+ server = api.linode_list(LinodeId=server['LINODEID'])[0]
+ # Ensure existing servers are up and running, boot if necessary
+ if server['STATUS'] != 1:
+ res = api.linode_boot(LinodeId=linode_id)
+ jobs.append(res['JobID'])
+ changed = True
+
+ # wait here until the instances are up
+ wait_timeout = time.time() + wait_timeout
+ while wait and wait_timeout > time.time():
+ # refresh the server details
+ server = api.linode_list(LinodeId=server['LINODEID'])[0]
+ # status:
+ # -2: Boot failed
+ # 1: Running
+ if server['STATUS'] in (-2, 1):
+ break
+ time.sleep(5)
+ if wait and wait_timeout <= time.time():
+ # waiting took too long
+ module.fail_json(msg='Timeout waiting on %s (lid: %s)' % (server['LABEL'], server['LINODEID']))
+ # Get a fresh copy of the server details
+ server = api.linode_list(LinodeId=server['LINODEID'])[0]
+ if server['STATUS'] == -2:
+ module.fail_json(msg='%s (lid: %s) failed to boot' %
+ (server['LABEL'], server['LINODEID']))
+ # From now on we know the task is a success
+ # Build instance report
+ instance = getInstanceDetails(api, server)
+ # depending on wait flag select the status
+ if wait:
+ instance['status'] = 'Running'
+ else:
+ instance['status'] = 'Starting'
+
+ # Return the root password if this is a new box and no SSH key
+ # has been provided
+ if new_server and not ssh_pub_key:
+ instance['password'] = password
+ instances.append(instance)
+
+ elif state in ('stopped'):
+ if not linode_id:
+ module.fail_json(msg='linode_id is required for stopped state')
+
+ if not servers:
+ module.fail_json(msg='Server (lid: %s) not found' % (linode_id))
+
+ for server in servers:
+ instance = getInstanceDetails(api, server)
+ if server['STATUS'] != 2:
+ try:
+ res = api.linode_shutdown(LinodeId=linode_id)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'])
+ instance['status'] = 'Stopping'
+ changed = True
+ else:
+ instance['status'] = 'Stopped'
+ instances.append(instance)
+
+ elif state in ('restarted'):
+ if not linode_id:
+ module.fail_json(msg='linode_id is required for restarted state')
+
+ if not servers:
+ module.fail_json(msg='Server (lid: %s) not found' % (linode_id))
+
+ for server in servers:
+ instance = getInstanceDetails(api, server)
+ try:
+ res = api.linode_reboot(LinodeId=server['LINODEID'])
+ except Exception as e:
+ module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'])
+ instance['status'] = 'Restarting'
+ changed = True
+ instances.append(instance)
+
+ elif state in ('absent', 'deleted'):
+ for server in servers:
+ instance = getInstanceDetails(api, server)
+ try:
+ api.linode_delete(LinodeId=server['LINODEID'], skipChecks=True)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'])
+ instance['status'] = 'Deleting'
+ changed = True
+ instances.append(instance)
+
+ # Ease parsing if only 1 instance
+ if len(instances) == 1:
+ module.exit_json(changed=changed, instance=instances[0])
+
+ module.exit_json(changed=changed, instances=instances)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present',
+ choices=['absent', 'active', 'deleted', 'present', 'restarted', 'started', 'stopped']),
+ api_key=dict(type='str', no_log=True),
+ name=dict(type='str', required=True),
+ alert_bwin_enabled=dict(type='bool'),
+ alert_bwin_threshold=dict(type='int'),
+ alert_bwout_enabled=dict(type='bool'),
+ alert_bwout_threshold=dict(type='int'),
+ alert_bwquota_enabled=dict(type='bool'),
+ alert_bwquota_threshold=dict(type='int'),
+ alert_cpu_enabled=dict(type='bool'),
+ alert_cpu_threshold=dict(type='int'),
+ alert_diskio_enabled=dict(type='bool'),
+ alert_diskio_threshold=dict(type='int'),
+ backupsenabled=dict(type='int'),
+ backupweeklyday=dict(type='int'),
+ backupwindow=dict(type='int'),
+ displaygroup=dict(type='str', default=''),
+ plan=dict(type='int'),
+ additional_disks=dict(type='list'),
+ distribution=dict(type='int'),
+ datacenter=dict(type='int'),
+ kernel_id=dict(type='int'),
+ linode_id=dict(type='int', aliases=['lid']),
+ payment_term=dict(type='int', default=1, choices=[1, 12, 24]),
+ password=dict(type='str', no_log=True),
+ private_ip=dict(type='bool'),
+ ssh_pub_key=dict(type='str'),
+ swap=dict(type='int', default=512),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=300),
+ watchdog=dict(type='bool', default=True),
+ ),
+ )
+
+ if not HAS_LINODE:
+ module.fail_json(msg=missing_required_lib('linode-python'), exception=LINODE_IMP_ERR)
+
+ state = module.params.get('state')
+ api_key = module.params.get('api_key')
+ name = module.params.get('name')
+ alert_bwin_enabled = module.params.get('alert_bwin_enabled')
+ alert_bwin_threshold = module.params.get('alert_bwin_threshold')
+ alert_bwout_enabled = module.params.get('alert_bwout_enabled')
+ alert_bwout_threshold = module.params.get('alert_bwout_threshold')
+ alert_bwquota_enabled = module.params.get('alert_bwquota_enabled')
+ alert_bwquota_threshold = module.params.get('alert_bwquota_threshold')
+ alert_cpu_enabled = module.params.get('alert_cpu_enabled')
+ alert_cpu_threshold = module.params.get('alert_cpu_threshold')
+ alert_diskio_enabled = module.params.get('alert_diskio_enabled')
+ alert_diskio_threshold = module.params.get('alert_diskio_threshold')
+ backupsenabled = module.params.get('backupsenabled')
+ backupweeklyday = module.params.get('backupweeklyday')
+ backupwindow = module.params.get('backupwindow')
+ displaygroup = module.params.get('displaygroup')
+ plan = module.params.get('plan')
+ additional_disks = module.params.get('additional_disks')
+ distribution = module.params.get('distribution')
+ datacenter = module.params.get('datacenter')
+ kernel_id = module.params.get('kernel_id')
+ linode_id = module.params.get('linode_id')
+ payment_term = module.params.get('payment_term')
+ password = module.params.get('password')
+ private_ip = module.params.get('private_ip')
+ ssh_pub_key = module.params.get('ssh_pub_key')
+ swap = module.params.get('swap')
+ wait = module.params.get('wait')
+ wait_timeout = int(module.params.get('wait_timeout'))
+ watchdog = int(module.params.get('watchdog'))
+
+ kwargs = dict()
+ check_items = dict(
+ alert_bwin_enabled=alert_bwin_enabled,
+ alert_bwin_threshold=alert_bwin_threshold,
+ alert_bwout_enabled=alert_bwout_enabled,
+ alert_bwout_threshold=alert_bwout_threshold,
+ alert_bwquota_enabled=alert_bwquota_enabled,
+ alert_bwquota_threshold=alert_bwquota_threshold,
+ alert_cpu_enabled=alert_cpu_enabled,
+ alert_cpu_threshold=alert_cpu_threshold,
+ alert_diskio_enabled=alert_diskio_enabled,
+ alert_diskio_threshold=alert_diskio_threshold,
+ backupweeklyday=backupweeklyday,
+ backupwindow=backupwindow,
+ )
+
+ for key, value in check_items.items():
+ if value is not None:
+ kwargs[key] = value
+
+ # Setup the api_key
+ if not api_key:
+ try:
+ api_key = os.environ['LINODE_API_KEY']
+ except KeyError as e:
+ module.fail_json(msg='Unable to load %s' % e.message)
+
+ # setup the auth
+ try:
+ api = linode_api.Api(api_key)
+ api.test_echo()
+ except Exception as e:
+ module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'])
+
+ linodeServers(module, api, state, name,
+ displaygroup, plan,
+ additional_disks, distribution, datacenter, kernel_id, linode_id,
+ payment_term, password, private_ip, ssh_pub_key, swap, wait,
+ wait_timeout, watchdog, **kwargs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/linode_v4.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/linode_v4.py
new file mode 100644
index 00000000..17a697b3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/linode_v4.py
@@ -0,0 +1,309 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: linode_v4
+short_description: Manage instances on the Linode cloud.
+description: Manage instances on the Linode cloud.
+requirements:
+ - python >= 2.7
+ - linode_api4 >= 2.0.0
+author:
+ - Luke Murphy (@decentral1se)
+notes:
+ - No Linode resizing is currently implemented. This module will, in time,
+ replace the current Linode module which uses deprecated API bindings on the
+ Linode side.
+options:
+ region:
+ description:
+ - The region of the instance. This is a required parameter only when
+ creating Linode instances. See
+ U(https://www.linode.com/docs/api/regions/).
+ required: false
+ type: str
+ image:
+ description:
+ - The image of the instance. This is a required parameter only when
+ creating Linode instances. See
+ U(https://www.linode.com/docs/api/images/).
+ type: str
+ required: false
+ type:
+ description:
+ - The type of the instance. This is a required parameter only when
+ creating Linode instances. See
+ U(https://www.linode.com/docs/api/linode-types/).
+ type: str
+ required: false
+ label:
+ description:
+ - The instance label. This label is used as the main determiner for
+ idempotence for the module and is therefore mandatory.
+ type: str
+ required: true
+ group:
+ description:
+ - The group that the instance should be marked under. Please note, that
+ group labelling is deprecated but still supported. The encouraged
+ method for marking instances is to use tags.
+ type: str
+ required: false
+ tags:
+ description:
+ - The tags that the instance should be marked under. See
+ U(https://www.linode.com/docs/api/tags/).
+ required: false
+ type: list
+ root_pass:
+ description:
+ - The password for the root user. If not specified, one will be
+ generated. This generated password will be available in the task
+ success JSON.
+ required: false
+ type: str
+ authorized_keys:
+ description:
+ - A list of SSH public key parts to deploy for the root user.
+ required: false
+ type: list
+ state:
+ description:
+ - The desired instance state.
+ type: str
+ choices:
+ - present
+ - absent
+ required: true
+ access_token:
+ description:
+ - The Linode API v4 access token. It may also be specified by exposing
+ the C(LINODE_ACCESS_TOKEN) environment variable. See
+ U(https://www.linode.com/docs/api#access-and-authentication).
+ required: true
+ type: str
+ stackscript_id:
+ description:
+ - The numeric ID of the StackScript to use when creating the instance.
+ See U(https://www.linode.com/docs/api/stackscripts/).
+ type: int
+ version_added: 1.3.0
+ stackscript_data:
+ description:
+ - An object containing arguments to any User Defined Fields present in
+ the StackScript used when creating the instance.
+ Only valid when a stackscript_id is provided.
+ See U(https://www.linode.com/docs/api/stackscripts/).
+ type: dict
+ version_added: 1.3.0
+'''
+
+EXAMPLES = """
+- name: Create a new Linode.
+ community.general.linode_v4:
+ label: new-linode
+ type: g6-nanode-1
+ region: eu-west
+ image: linode/debian9
+ root_pass: passw0rd
+ authorized_keys:
+ - "ssh-rsa ..."
+ stackscript_id: 1337
+ stackscript_data:
+ variable: value
+ state: present
+
+- name: Delete that new Linode.
+ community.general.linode_v4:
+ label: new-linode
+ state: absent
+"""
+
+RETURN = """
+instance:
+ description: The instance description in JSON serialized form.
+ returned: Always.
+ type: dict
+ sample: {
+ "root_pass": "foobar", # if auto-generated
+ "alerts": {
+ "cpu": 90,
+ "io": 10000,
+ "network_in": 10,
+ "network_out": 10,
+ "transfer_quota": 80
+ },
+ "backups": {
+ "enabled": false,
+ "schedule": {
+ "day": null,
+ "window": null
+ }
+ },
+ "created": "2018-09-26T08:12:33",
+ "group": "Foobar Group",
+ "hypervisor": "kvm",
+ "id": 10480444,
+ "image": "linode/centos7",
+ "ipv4": [
+ "130.132.285.233"
+ ],
+ "ipv6": "2a82:7e00::h03c:46ff:fe04:5cd2/64",
+ "label": "lin-foo",
+ "region": "eu-west",
+ "specs": {
+ "disk": 25600,
+ "memory": 1024,
+ "transfer": 1000,
+ "vcpus": 1
+ },
+ "status": "running",
+ "tags": [],
+ "type": "g6-nanode-1",
+ "updated": "2018-09-26T10:10:14",
+ "watchdog_enabled": true
+ }
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils.linode import get_user_agent
+
+LINODE_IMP_ERR = None
+try:
+ from linode_api4 import Instance, LinodeClient
+ HAS_LINODE_DEPENDENCY = True
+except ImportError:
+ LINODE_IMP_ERR = traceback.format_exc()
+ HAS_LINODE_DEPENDENCY = False
+
+
+def create_linode(module, client, **kwargs):
+ """Creates a Linode instance and handles return format."""
+ if kwargs['root_pass'] is None:
+ kwargs.pop('root_pass')
+
+ try:
+ response = client.linode.instance_create(**kwargs)
+ except Exception as exception:
+ module.fail_json(msg='Unable to query the Linode API. Saw: %s' % exception)
+
+ try:
+ if isinstance(response, tuple):
+ instance, root_pass = response
+ instance_json = instance._raw_json
+ instance_json.update({'root_pass': root_pass})
+ return instance_json
+ else:
+ return response._raw_json
+ except TypeError:
+ module.fail_json(msg='Unable to parse Linode instance creation'
+ ' response. Please raise a bug against this'
+ ' module on https://github.com/ansible/ansible/issues'
+ )
+
+
+def maybe_instance_from_label(module, client):
+ """Try to retrieve an instance based on a label."""
+ try:
+ label = module.params['label']
+ result = client.linode.instances(Instance.label == label)
+ return result[0]
+ except IndexError:
+ return None
+ except Exception as exception:
+ module.fail_json(msg='Unable to query the Linode API. Saw: %s' % exception)
+
+
+def initialise_module():
+ """Initialise the module parameter specification."""
+ return AnsibleModule(
+ argument_spec=dict(
+ label=dict(type='str', required=True),
+ state=dict(
+ type='str',
+ required=True,
+ choices=['present', 'absent']
+ ),
+ access_token=dict(
+ type='str',
+ required=True,
+ no_log=True,
+ fallback=(env_fallback, ['LINODE_ACCESS_TOKEN']),
+ ),
+ authorized_keys=dict(type='list', required=False),
+ group=dict(type='str', required=False),
+ image=dict(type='str', required=False),
+ region=dict(type='str', required=False),
+ root_pass=dict(type='str', required=False, no_log=True),
+ tags=dict(type='list', required=False),
+ type=dict(type='str', required=False),
+ stackscript_id=dict(type='int', required=False),
+ stackscript_data=dict(type='dict', required=False),
+ ),
+ supports_check_mode=False,
+ required_one_of=(
+ ['state', 'label'],
+ ),
+ required_together=(
+ ['region', 'image', 'type'],
+ )
+ )
+
+
+def build_client(module):
+ """Build a LinodeClient."""
+ return LinodeClient(
+ module.params['access_token'],
+ user_agent=get_user_agent('linode_v4_module')
+ )
+
+
+def main():
+ """Module entrypoint."""
+ module = initialise_module()
+
+ if not HAS_LINODE_DEPENDENCY:
+ module.fail_json(msg=missing_required_lib('linode-api4'), exception=LINODE_IMP_ERR)
+
+ client = build_client(module)
+ instance = maybe_instance_from_label(module, client)
+
+ if module.params['state'] == 'present' and instance is not None:
+ module.exit_json(changed=False, instance=instance._raw_json)
+
+ elif module.params['state'] == 'present' and instance is None:
+ instance_json = create_linode(
+ module, client,
+ authorized_keys=module.params['authorized_keys'],
+ group=module.params['group'],
+ image=module.params['image'],
+ label=module.params['label'],
+ region=module.params['region'],
+ root_pass=module.params['root_pass'],
+ tags=module.params['tags'],
+ ltype=module.params['type'],
+ stackscript=module.params['stackscript_id'],
+ stackscript_data=module.params['stackscript_data'],
+ )
+ module.exit_json(changed=True, instance=instance_json)
+
+ elif module.params['state'] == 'absent' and instance is not None:
+ instance.delete()
+ module.exit_json(changed=True, instance=instance._raw_json)
+
+ elif module.params['state'] == 'absent' and instance is None:
+ module.exit_json(changed=False, instance={})
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/listen_ports_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/listen_ports_facts.py
new file mode 100644
index 00000000..27ecca8f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/listen_ports_facts.py
@@ -0,0 +1,243 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2017, Nathan Davison <ndavison85@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: listen_ports_facts
+author:
+ - Nathan Davison (@ndavison)
+description:
+ - Gather facts on processes listening on TCP and UDP ports using netstat command.
+ - This module currently supports Linux only.
+requirements:
+ - netstat
+short_description: Gather facts on processes listening on TCP and UDP ports.
+'''
+
+EXAMPLES = r'''
+- name: Gather facts on listening ports
+ community.general.listen_ports_facts:
+
+- name: TCP whitelist violation
+ ansible.builtin.debug:
+ msg: TCP port {{ item.port }} by pid {{ item.pid }} violates the whitelist
+ vars:
+ tcp_listen_violations: "{{ ansible_facts.tcp_listen | selectattr('port', 'in', tcp_whitelist) | list }}"
+ tcp_whitelist:
+ - 22
+ - 25
+ loop: "{{ tcp_listen_violations }}"
+
+- name: List TCP ports
+ ansible.builtin.debug:
+ msg: "{{ ansible_facts.tcp_listen | map(attribute='port') | sort | list }}"
+
+- name: List UDP ports
+ ansible.builtin.debug:
+ msg: "{{ ansible_facts.udp_listen | map(attribute='port') | sort | list }}"
+
+- name: List all ports
+ ansible.builtin.debug:
+ msg: "{{ (ansible_facts.tcp_listen + ansible_facts.udp_listen) | map(attribute='port') | unique | sort | list }}"
+'''
+
+RETURN = r'''
+ansible_facts:
+ description: Dictionary containing details of TCP and UDP ports with listening servers
+ returned: always
+ type: complex
+ contains:
+ tcp_listen:
+ description: A list of processes that are listening on a TCP port.
+ returned: if TCP servers were found
+ type: list
+ contains:
+ address:
+ description: The address the server is listening on.
+ returned: always
+ type: str
+ sample: "0.0.0.0"
+ name:
+ description: The name of the listening process.
+ returned: if user permissions allow
+ type: str
+ sample: "mysqld"
+ pid:
+ description: The pid of the listening process.
+ returned: always
+ type: int
+ sample: 1223
+ port:
+ description: The port the server is listening on.
+ returned: always
+ type: int
+ sample: 3306
+ protocol:
+ description: The network protocol of the server.
+ returned: always
+ type: str
+ sample: "tcp"
+ stime:
+ description: The start time of the listening process.
+ returned: always
+ type: str
+ sample: "Thu Feb 2 13:29:45 2017"
+ user:
+ description: The user who is running the listening process.
+ returned: always
+ type: str
+ sample: "mysql"
+ udp_listen:
+ description: A list of processes that are listening on a UDP port.
+ returned: if UDP servers were found
+ type: list
+ contains:
+ address:
+ description: The address the server is listening on.
+ returned: always
+ type: str
+ sample: "0.0.0.0"
+ name:
+ description: The name of the listening process.
+ returned: if user permissions allow
+ type: str
+ sample: "rsyslogd"
+ pid:
+ description: The pid of the listening process.
+ returned: always
+ type: int
+ sample: 609
+ port:
+ description: The port the server is listening on.
+ returned: always
+ type: int
+ sample: 514
+ protocol:
+ description: The network protocol of the server.
+ returned: always
+ type: str
+ sample: "udp"
+ stime:
+ description: The start time of the listening process.
+ returned: always
+ type: str
+ sample: "Thu Feb 2 13:29:45 2017"
+ user:
+ description: The user who is running the listening process.
+ returned: always
+ type: str
+ sample: "root"
+'''
+
+import re
+import platform
+from ansible.module_utils._text import to_native
+from ansible.module_utils.basic import AnsibleModule
+
+
+def netStatParse(raw):
+ results = list()
+ for line in raw.splitlines():
+ listening_search = re.search('[^ ]+:[0-9]+', line)
+ if listening_search:
+ splitted = line.split()
+ conns = re.search('([^ ]+):([0-9]+)', splitted[3])
+ pidstr = ''
+ if 'tcp' in splitted[0]:
+ protocol = 'tcp'
+ pidstr = splitted[6]
+ elif 'udp' in splitted[0]:
+ protocol = 'udp'
+ pidstr = splitted[5]
+ pids = re.search(r'(([0-9]+)/(.*)|-)', pidstr)
+ if conns and pids:
+ address = conns.group(1)
+ port = conns.group(2)
+ if (pids.group(2)):
+ pid = pids.group(2)
+ else:
+ pid = 0
+ if (pids.group(3)):
+ name = pids.group(3)
+ else:
+ name = ''
+ result = {
+ 'pid': int(pid),
+ 'address': address,
+ 'port': int(port),
+ 'protocol': protocol,
+ 'name': name,
+ }
+ if result not in results:
+ results.append(result)
+ else:
+ raise EnvironmentError('Could not get process information for the listening ports.')
+ return results
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec={},
+ supports_check_mode=True,
+ )
+
+ if platform.system() != 'Linux':
+ module.fail_json(msg='This module requires Linux.')
+
+ def getPidSTime(pid):
+ ps_cmd = module.get_bin_path('ps', True)
+ rc, ps_output, stderr = module.run_command([ps_cmd, '-o', 'lstart', '-p', str(pid)])
+ stime = ''
+ if rc == 0:
+ for line in ps_output.splitlines():
+ if 'started' not in line:
+ stime = line
+ return stime
+
+ def getPidUser(pid):
+ ps_cmd = module.get_bin_path('ps', True)
+ rc, ps_output, stderr = module.run_command([ps_cmd, '-o', 'user', '-p', str(pid)])
+ user = ''
+ if rc == 0:
+ for line in ps_output.splitlines():
+ if line != 'USER':
+ user = line
+ return user
+
+ result = {
+ 'changed': False,
+ 'ansible_facts': {
+ 'tcp_listen': [],
+ 'udp_listen': [],
+ },
+ }
+
+ try:
+ netstat_cmd = module.get_bin_path('netstat', True)
+
+ # which ports are listening for connections?
+ rc, stdout, stderr = module.run_command([netstat_cmd, '-plunt'])
+ if rc == 0:
+ netstatOut = netStatParse(stdout)
+ for p in netstatOut:
+ p['stime'] = getPidSTime(p['pid'])
+ p['user'] = getPidUser(p['pid'])
+ if p['protocol'] == 'tcp':
+ result['ansible_facts']['tcp_listen'].append(p)
+ elif p['protocol'] == 'udp':
+ result['ansible_facts']['udp_listen'].append(p)
+ except (KeyError, EnvironmentError) as e:
+ module.fail_json(msg=to_native(e))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/lldp.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/lldp.py
new file mode 100644
index 00000000..ae86db40
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/lldp.py
@@ -0,0 +1,79 @@
+#!/usr/bin/python
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: lldp
+requirements: [ lldpctl ]
+short_description: get details reported by lldp
+description:
+ - Reads data out of lldpctl
+options: {}
+author: "Andy Hill (@andyhky)"
+notes:
+ - Requires lldpd running and lldp enabled on switches
+'''
+
+EXAMPLES = '''
+# Retrieve switch/port information
+ - name: Gather information from lldp
+ community.general.lldp:
+
+ - name: Print each switch/port
+ ansible.builtin.debug:
+ msg: "{{ lldp[item]['chassis']['name'] }} / {{ lldp[item]['port']['ifname'] }}"
+ with_items: "{{ lldp.keys() }}"
+
+# TASK: [Print each switch/port] ***********************************************************
+# ok: [10.13.0.22] => (item=eth2) => {"item": "eth2", "msg": "switch1.example.com / Gi0/24"}
+# ok: [10.13.0.22] => (item=eth1) => {"item": "eth1", "msg": "switch2.example.com / Gi0/3"}
+# ok: [10.13.0.22] => (item=eth0) => {"item": "eth0", "msg": "switch3.example.com / Gi0/3"}
+
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def gather_lldp(module):
+ cmd = [module.get_bin_path('lldpctl'), '-f', 'keyvalue']
+ rc, output, err = module.run_command(cmd)
+ if output:
+ output_dict = {}
+ current_dict = {}
+ lldp_entries = output.split("\n")
+
+ for entry in lldp_entries:
+ if entry.startswith('lldp'):
+ path, value = entry.strip().split("=", 1)
+ path = path.split(".")
+ path_components, final = path[:-1], path[-1]
+ else:
+ value = current_dict[final] + '\n' + entry
+
+ current_dict = output_dict
+ for path_component in path_components:
+ current_dict[path_component] = current_dict.get(path_component, {})
+ current_dict = current_dict[path_component]
+ current_dict[final] = value
+ return output_dict
+
+
+def main():
+ module = AnsibleModule({})
+
+ lldp_output = gather_lldp(module)
+ try:
+ data = {'lldp': lldp_output['lldp']}
+ module.exit_json(ansible_facts=data)
+ except TypeError:
+ module.fail_json(msg="lldpctl command failed. is lldpd running?")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/locale_gen.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/locale_gen.py
new file mode 100644
index 00000000..9a5b84f0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/locale_gen.py
@@ -0,0 +1,234 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: locale_gen
+short_description: Creates or removes locales
+description:
+ - Manages locales by editing /etc/locale.gen and invoking locale-gen.
+author:
+- Augustus Kling (@AugustusKling)
+options:
+ name:
+ type: str
+ description:
+ - Name and encoding of the locale, such as "en_GB.UTF-8".
+ required: true
+ state:
+ type: str
+ description:
+ - Whether the locale shall be present.
+ choices: [ absent, present ]
+ default: present
+'''
+
+EXAMPLES = '''
+- name: Ensure a locale exists
+ community.general.locale_gen:
+ name: de_CH.UTF-8
+ state: present
+'''
+
+import os
+import re
+from subprocess import Popen, PIPE, call
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+LOCALE_NORMALIZATION = {
+ ".utf8": ".UTF-8",
+ ".eucjp": ".EUC-JP",
+ ".iso885915": ".ISO-8859-15",
+ ".cp1251": ".CP1251",
+ ".koi8r": ".KOI8-R",
+ ".armscii8": ".ARMSCII-8",
+ ".euckr": ".EUC-KR",
+ ".gbk": ".GBK",
+ ".gb18030": ".GB18030",
+ ".euctw": ".EUC-TW",
+}
+
+
+# ===========================================
+# location module specific support methods.
+#
+
+def is_available(name, ubuntuMode):
+ """Check if the given locale is available on the system. This is done by
+ checking either :
+ * if the locale is present in /etc/locales.gen
+ * or if the locale is present in /usr/share/i18n/SUPPORTED"""
+ if ubuntuMode:
+ __regexp = r'^(?P<locale>\S+_\S+) (?P<charset>\S+)\s*$'
+ __locales_available = '/usr/share/i18n/SUPPORTED'
+ else:
+ __regexp = r'^#{0,1}\s*(?P<locale>\S+_\S+) (?P<charset>\S+)\s*$'
+ __locales_available = '/etc/locale.gen'
+
+ re_compiled = re.compile(__regexp)
+ fd = open(__locales_available, 'r')
+ for line in fd:
+ result = re_compiled.match(line)
+ if result and result.group('locale') == name:
+ return True
+ fd.close()
+ return False
+
+
+def is_present(name):
+ """Checks if the given locale is currently installed."""
+ output = Popen(["locale", "-a"], stdout=PIPE).communicate()[0]
+ output = to_native(output)
+ return any(fix_case(name) == fix_case(line) for line in output.splitlines())
+
+
+def fix_case(name):
+ """locale -a might return the encoding in either lower or upper case.
+ Passing through this function makes them uniform for comparisons."""
+ for s, r in LOCALE_NORMALIZATION.items():
+ name = name.replace(s, r)
+ return name
+
+
+def replace_line(existing_line, new_line):
+ """Replaces lines in /etc/locale.gen"""
+ try:
+ f = open("/etc/locale.gen", "r")
+ lines = [line.replace(existing_line, new_line) for line in f]
+ finally:
+ f.close()
+ try:
+ f = open("/etc/locale.gen", "w")
+ f.write("".join(lines))
+ finally:
+ f.close()
+
+
+def set_locale(name, enabled=True):
+ """ Sets the state of the locale. Defaults to enabled. """
+ search_string = r'#{0,1}\s*%s (?P<charset>.+)' % name
+ if enabled:
+ new_string = r'%s \g<charset>' % (name)
+ else:
+ new_string = r'# %s \g<charset>' % (name)
+ try:
+ f = open("/etc/locale.gen", "r")
+ lines = [re.sub(search_string, new_string, line) for line in f]
+ finally:
+ f.close()
+ try:
+ f = open("/etc/locale.gen", "w")
+ f.write("".join(lines))
+ finally:
+ f.close()
+
+
+def apply_change(targetState, name):
+ """Create or remove locale.
+
+ Keyword arguments:
+ targetState -- Desired state, either present or absent.
+ name -- Name including encoding such as de_CH.UTF-8.
+ """
+ if targetState == "present":
+ # Create locale.
+ set_locale(name, enabled=True)
+ else:
+ # Delete locale.
+ set_locale(name, enabled=False)
+
+ localeGenExitValue = call("locale-gen")
+ if localeGenExitValue != 0:
+ raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned " + str(localeGenExitValue))
+
+
+def apply_change_ubuntu(targetState, name):
+ """Create or remove locale.
+
+ Keyword arguments:
+ targetState -- Desired state, either present or absent.
+ name -- Name including encoding such as de_CH.UTF-8.
+ """
+ if targetState == "present":
+ # Create locale.
+ # Ubuntu's patched locale-gen automatically adds the new locale to /var/lib/locales/supported.d/local
+ localeGenExitValue = call(["locale-gen", name])
+ else:
+ # Delete locale involves discarding the locale from /var/lib/locales/supported.d/local and regenerating all locales.
+ try:
+ f = open("/var/lib/locales/supported.d/local", "r")
+ content = f.readlines()
+ finally:
+ f.close()
+ try:
+ f = open("/var/lib/locales/supported.d/local", "w")
+ for line in content:
+ locale, charset = line.split(' ')
+ if locale != name:
+ f.write(line)
+ finally:
+ f.close()
+ # Purge locales and regenerate.
+ # Please provide a patch if you know how to avoid regenerating the locales to keep!
+ localeGenExitValue = call(["locale-gen", "--purge"])
+
+ if localeGenExitValue != 0:
+ raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned " + str(localeGenExitValue))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ),
+ supports_check_mode=True,
+ )
+
+ name = module.params['name']
+ state = module.params['state']
+
+ if not os.path.exists("/etc/locale.gen"):
+ if os.path.exists("/var/lib/locales/supported.d/"):
+ # Ubuntu created its own system to manage locales.
+ ubuntuMode = True
+ else:
+ module.fail_json(msg="/etc/locale.gen and /var/lib/locales/supported.d/local are missing. Is the package \"locales\" installed?")
+ else:
+ # We found the common way to manage locales.
+ ubuntuMode = False
+
+ if not is_available(name, ubuntuMode):
+ module.fail_json(msg="The locale you've entered is not available "
+ "on your system.")
+
+ if is_present(name):
+ prev_state = "present"
+ else:
+ prev_state = "absent"
+ changed = (prev_state != state)
+
+ if module.check_mode:
+ module.exit_json(changed=changed)
+ else:
+ if changed:
+ try:
+ if ubuntuMode is False:
+ apply_change(state, name)
+ else:
+ apply_change_ubuntu(state, name)
+ except EnvironmentError as e:
+ module.fail_json(msg=to_native(e), exitValue=e.errno)
+
+ module.exit_json(name=name, changed=changed, msg="OK")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/logentries.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/logentries.py
new file mode 100644
index 00000000..8f39fb51
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/logentries.py
@@ -0,0 +1,156 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Ivan Vanderbyl <ivan@app.io>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: logentries
+author: "Ivan Vanderbyl (@ivanvanderbyl)"
+short_description: Module for tracking logs via logentries.com
+description:
+ - Sends logs to LogEntries in realtime
+options:
+ path:
+ type: str
+ description:
+ - path to a log file
+ required: true
+ state:
+ type: str
+ description:
+ - following state of the log
+ choices: [ 'present', 'absent', 'followed', 'unfollowed' ]
+ required: false
+ default: present
+ name:
+ type: str
+ description:
+ - name of the log
+ required: false
+ logtype:
+ type: str
+ description:
+ - type of the log
+ required: false
+ aliases: [type]
+
+notes:
+ - Requires the LogEntries agent which can be installed following the instructions at logentries.com
+'''
+EXAMPLES = '''
+- name: Track nginx logs
+ community.general.logentries:
+ path: /var/log/nginx/access.log
+ state: present
+ name: nginx-access-log
+
+- name: Stop tracking nginx logs
+ community.general.logentries:
+ path: /var/log/nginx/error.log
+ state: absent
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def query_log_status(module, le_path, path, state="present"):
+ """ Returns whether a log is followed or not. """
+
+ if state == "present":
+ rc, out, err = module.run_command("%s followed %s" % (le_path, path))
+ if rc == 0:
+ return True
+
+ return False
+
+
+def follow_log(module, le_path, logs, name=None, logtype=None):
+ """ Follows one or more logs if not already followed. """
+
+ followed_count = 0
+
+ for log in logs:
+ if query_log_status(module, le_path, log):
+ continue
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ cmd = [le_path, 'follow', log]
+ if name:
+ cmd.extend(['--name', name])
+ if logtype:
+ cmd.extend(['--type', logtype])
+ rc, out, err = module.run_command(' '.join(cmd))
+
+ if not query_log_status(module, le_path, log):
+ module.fail_json(msg="failed to follow '%s': %s" % (log, err.strip()))
+
+ followed_count += 1
+
+ if followed_count > 0:
+ module.exit_json(changed=True, msg="followed %d log(s)" % (followed_count,))
+
+ module.exit_json(changed=False, msg="logs(s) already followed")
+
+
+def unfollow_log(module, le_path, logs):
+ """ Unfollows one or more logs if followed. """
+
+ removed_count = 0
+
+ # Using a for loop in case of error, we can report the package that failed
+ for log in logs:
+ # Query the log first, to see if we even need to remove.
+ if not query_log_status(module, le_path, log):
+ continue
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+ rc, out, err = module.run_command([le_path, 'rm', log])
+
+ if query_log_status(module, le_path, log):
+ module.fail_json(msg="failed to remove '%s': %s" % (log, err.strip()))
+
+ removed_count += 1
+
+ if removed_count > 0:
+ module.exit_json(changed=True, msg="removed %d package(s)" % removed_count)
+
+ module.exit_json(changed=False, msg="logs(s) already unfollowed")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(required=True),
+ state=dict(default="present", choices=["present", "followed", "absent", "unfollowed"]),
+ name=dict(required=False, default=None, type='str'),
+ logtype=dict(required=False, default=None, type='str', aliases=['type'])
+ ),
+ supports_check_mode=True
+ )
+
+ le_path = module.get_bin_path('le', True, ['/usr/local/bin'])
+
+ p = module.params
+
+ # Handle multiple log files
+ logs = p["path"].split(",")
+ logs = filter(None, logs)
+
+ if p["state"] in ["present", "followed"]:
+ follow_log(module, le_path, logs, name=p['name'], logtype=p['logtype'])
+
+ elif p["state"] in ["absent", "unfollowed"]:
+ unfollow_log(module, le_path, logs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/logentries_msg.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/logentries_msg.py
new file mode 100644
index 00000000..59e0f325
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/logentries_msg.py
@@ -0,0 +1,99 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: logentries_msg
+short_description: Send a message to logentries.
+description:
+ - Send a message to logentries
+requirements:
+ - "python >= 2.6"
+options:
+ token:
+ type: str
+ description:
+ - Log token.
+ required: true
+ msg:
+ type: str
+ description:
+ - The message body.
+ required: true
+ api:
+ type: str
+ description:
+ - API endpoint
+ default: data.logentries.com
+ port:
+ type: int
+ description:
+ - API endpoint port
+ default: 80
+author: "Jimmy Tang (@jcftang) <jimmy_tang@rapid7.com>"
+'''
+
+RETURN = '''# '''
+
+EXAMPLES = '''
+- name: Send a message to logentries
+ community.general.logentries_msg:
+ token=00000000-0000-0000-0000-000000000000
+ msg="{{ ansible_hostname }}"
+'''
+
+import socket
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def send_msg(module, token, msg, api, port):
+
+ message = "{0} {1}\n".format(token, msg)
+
+ api_ip = socket.gethostbyname(api)
+
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ s.connect((api_ip, port))
+ try:
+ if not module.check_mode:
+ s.send(message)
+ except Exception as e:
+ module.fail_json(msg="failed to send message, msg=%s" % e)
+ s.close()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(type='str', required=True, no_log=True),
+ msg=dict(type='str', required=True),
+ api=dict(type='str', default="data.logentries.com"),
+ port=dict(type='int', default=80)),
+ supports_check_mode=True
+ )
+
+ token = module.params["token"]
+ msg = module.params["msg"]
+ api = module.params["api"]
+ port = module.params["port"]
+
+ changed = False
+ try:
+ send_msg(module, token, msg, api, port)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg="unable to send msg: %s" % e)
+
+ module.exit_json(changed=changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/logstash_plugin.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/logstash_plugin.py
new file mode 100644
index 00000000..4a45c04a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/logstash_plugin.py
@@ -0,0 +1,172 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# (c) 2017, Loic Blot <loic.blot@unix-experience.fr>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: logstash_plugin
+short_description: Manage Logstash plugins
+description:
+ - Manages Logstash plugins.
+author: Loic Blot (@nerzhul)
+options:
+ name:
+ type: str
+ description:
+ - Install plugin with that name.
+ required: True
+ state:
+ type: str
+ description:
+ - Apply plugin state.
+ choices: ["present", "absent"]
+ default: present
+ plugin_bin:
+ type: path
+ description:
+ - Specify logstash-plugin to use for plugin management.
+ default: /usr/share/logstash/bin/logstash-plugin
+ proxy_host:
+ type: str
+ description:
+ - Proxy host to use during plugin installation.
+ proxy_port:
+ type: str
+ description:
+ - Proxy port to use during plugin installation.
+ version:
+ type: str
+ description:
+ - Specify plugin Version of the plugin to install.
+ If plugin exists with previous version, it will NOT be updated.
+'''
+
+EXAMPLES = '''
+- name: Install Logstash beats input plugin
+ community.general.logstash_plugin:
+ state: present
+ name: logstash-input-beats
+
+- name: Install specific version of a plugin
+ community.general.logstash_plugin:
+ state: present
+ name: logstash-input-syslog
+ version: '3.2.0'
+
+- name: Uninstall Logstash plugin
+ community.general.logstash_plugin:
+ state: absent
+ name: logstash-filter-multiline
+
+- name: Install Logstash plugin with alternate heap size
+ community.general.logstash_plugin:
+ state: present
+ name: logstash-input-beats
+ environment:
+ LS_JAVA_OPTS: "-Xms256m -Xmx256m"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+PACKAGE_STATE_MAP = dict(
+ present="install",
+ absent="remove"
+)
+
+
+def is_plugin_present(module, plugin_bin, plugin_name):
+ cmd_args = [plugin_bin, "list", plugin_name]
+ rc, out, err = module.run_command(" ".join(cmd_args))
+ return rc == 0
+
+
+def parse_error(string):
+ reason = "reason: "
+ try:
+ return string[string.index(reason) + len(reason):].strip()
+ except ValueError:
+ return string
+
+
+def install_plugin(module, plugin_bin, plugin_name, version, proxy_host, proxy_port):
+ cmd_args = [plugin_bin, PACKAGE_STATE_MAP["present"], plugin_name]
+
+ if version:
+ cmd_args.append("--version %s" % version)
+
+ if proxy_host and proxy_port:
+ cmd_args.append("-DproxyHost=%s -DproxyPort=%s" % (proxy_host, proxy_port))
+
+ cmd = " ".join(cmd_args)
+
+ if module.check_mode:
+ rc, out, err = 0, "check mode", ""
+ else:
+ rc, out, err = module.run_command(cmd)
+
+ if rc != 0:
+ reason = parse_error(out)
+ module.fail_json(msg=reason)
+
+ return True, cmd, out, err
+
+
+def remove_plugin(module, plugin_bin, plugin_name):
+ cmd_args = [plugin_bin, PACKAGE_STATE_MAP["absent"], plugin_name]
+
+ cmd = " ".join(cmd_args)
+
+ if module.check_mode:
+ rc, out, err = 0, "check mode", ""
+ else:
+ rc, out, err = module.run_command(cmd)
+
+ if rc != 0:
+ reason = parse_error(out)
+ module.fail_json(msg=reason)
+
+ return True, cmd, out, err
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(default="present", choices=PACKAGE_STATE_MAP.keys()),
+ plugin_bin=dict(default="/usr/share/logstash/bin/logstash-plugin", type="path"),
+ proxy_host=dict(default=None),
+ proxy_port=dict(default=None),
+ version=dict(default=None)
+ ),
+ supports_check_mode=True
+ )
+
+ name = module.params["name"]
+ state = module.params["state"]
+ plugin_bin = module.params["plugin_bin"]
+ proxy_host = module.params["proxy_host"]
+ proxy_port = module.params["proxy_port"]
+ version = module.params["version"]
+
+ present = is_plugin_present(module, plugin_bin, name)
+
+ # skip if the state is correct
+ if (present and state == "present") or (state == "absent" and not present):
+ module.exit_json(changed=False, name=name, state=state)
+
+ if state == "present":
+ changed, cmd, out, err = install_plugin(module, plugin_bin, name, version, proxy_host, proxy_port)
+ elif state == "absent":
+ changed, cmd, out, err = remove_plugin(module, plugin_bin, name)
+
+ module.exit_json(changed=changed, cmd=cmd, name=name, state=state, stdout=out, stderr=err)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/lvg.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/lvg.py
new file mode 100644
index 00000000..25f261ae
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/lvg.py
@@ -0,0 +1,328 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Alexander Bulimov <lazywolf0@gmail.com>
+# Based on lvol module by Jeroen Hoekx <jeroen.hoekx@dsquare.be>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+- Alexander Bulimov (@abulimov)
+module: lvg
+short_description: Configure LVM volume groups
+description:
+ - This module creates, removes or resizes volume groups.
+options:
+ vg:
+ description:
+ - The name of the volume group.
+ type: str
+ required: true
+ pvs:
+ description:
+ - List of comma-separated devices to use as physical devices in this volume group.
+ - Required when creating or resizing volume group.
+ - The module will take care of running pvcreate if needed.
+ type: list
+ elements: str
+ pesize:
+ description:
+ - "The size of the physical extent. I(pesize) must be a power of 2 of at least 1 sector
+ (where the sector size is the largest sector size of the PVs currently used in the VG),
+ or at least 128KiB."
+ - Since Ansible 2.6, pesize can be optionally suffixed by a UNIT (k/K/m/M/g/G), default unit is megabyte.
+ type: str
+ default: "4"
+ pv_options:
+ description:
+ - Additional options to pass to C(pvcreate) when creating the volume group.
+ type: str
+ pvresize:
+ description:
+ - If C(yes), resize the physical volume to the maximum available size.
+ type: bool
+ default: false
+ version_added: '0.2.0'
+ vg_options:
+ description:
+ - Additional options to pass to C(vgcreate) when creating the volume group.
+ type: str
+ state:
+ description:
+ - Control if the volume group exists.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ force:
+ description:
+ - If C(yes), allows to remove volume group with logical volumes.
+ type: bool
+ default: no
+seealso:
+- module: community.general.filesystem
+- module: community.general.lvol
+- module: community.general.parted
+notes:
+ - This module does not modify PE size for already present volume group.
+'''
+
+EXAMPLES = r'''
+- name: Create a volume group on top of /dev/sda1 with physical extent size = 32MB
+ community.general.lvg:
+ vg: vg.services
+ pvs: /dev/sda1
+ pesize: 32
+
+- name: Create a volume group on top of /dev/sdb with physical extent size = 128KiB
+ community.general.lvg:
+ vg: vg.services
+ pvs: /dev/sdb
+ pesize: 128K
+
+# If, for example, we already have VG vg.services on top of /dev/sdb1,
+# this VG will be extended by /dev/sdc5. Or if vg.services was created on
+# top of /dev/sda5, we first extend it with /dev/sdb1 and /dev/sdc5,
+# and then reduce by /dev/sda5.
+- name: Create or resize a volume group on top of /dev/sdb1 and /dev/sdc5.
+ community.general.lvg:
+ vg: vg.services
+ pvs: /dev/sdb1,/dev/sdc5
+
+- name: Remove a volume group with name vg.services
+ community.general.lvg:
+ vg: vg.services
+ state: absent
+
+- name: Create a volume group on top of /dev/sda3 and resize the volume group /dev/sda3 to the maximum possible
+ community.general.lvg:
+ vg: resizableVG
+ pvs: /dev/sda3
+ pvresize: yes
+'''
+
+import itertools
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def parse_vgs(data):
+ vgs = []
+ for line in data.splitlines():
+ parts = line.strip().split(';')
+ vgs.append({
+ 'name': parts[0],
+ 'pv_count': int(parts[1]),
+ 'lv_count': int(parts[2]),
+ })
+ return vgs
+
+
+def find_mapper_device_name(module, dm_device):
+ dmsetup_cmd = module.get_bin_path('dmsetup', True)
+ mapper_prefix = '/dev/mapper/'
+ rc, dm_name, err = module.run_command("%s info -C --noheadings -o name %s" % (dmsetup_cmd, dm_device))
+ if rc != 0:
+ module.fail_json(msg="Failed executing dmsetup command.", rc=rc, err=err)
+ mapper_device = mapper_prefix + dm_name.rstrip()
+ return mapper_device
+
+
+def parse_pvs(module, data):
+ pvs = []
+ dm_prefix = '/dev/dm-'
+ for line in data.splitlines():
+ parts = line.strip().split(';')
+ if parts[0].startswith(dm_prefix):
+ parts[0] = find_mapper_device_name(module, parts[0])
+ pvs.append({
+ 'name': parts[0],
+ 'vg_name': parts[1],
+ })
+ return pvs
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ vg=dict(type='str', required=True),
+ pvs=dict(type='list', elements='str'),
+ pesize=dict(type='str', default='4'),
+ pv_options=dict(type='str', default=''),
+ pvresize=dict(type='bool', default=False),
+ vg_options=dict(type='str', default=''),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ force=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+
+ vg = module.params['vg']
+ state = module.params['state']
+ force = module.boolean(module.params['force'])
+ pvresize = module.boolean(module.params['pvresize'])
+ pesize = module.params['pesize']
+ pvoptions = module.params['pv_options'].split()
+ vgoptions = module.params['vg_options'].split()
+
+ dev_list = []
+ if module.params['pvs']:
+ dev_list = list(module.params['pvs'])
+ elif state == 'present':
+ module.fail_json(msg="No physical volumes given.")
+
+ # LVM always uses real paths not symlinks so replace symlinks with actual path
+ for idx, dev in enumerate(dev_list):
+ dev_list[idx] = os.path.realpath(dev)
+
+ if state == 'present':
+ # check given devices
+ for test_dev in dev_list:
+ if not os.path.exists(test_dev):
+ module.fail_json(msg="Device %s not found." % test_dev)
+
+ # get pv list
+ pvs_cmd = module.get_bin_path('pvs', True)
+ if dev_list:
+ pvs_filter_pv_name = ' || '.join(
+ 'pv_name = {0}'.format(x)
+ for x in itertools.chain(dev_list, module.params['pvs'])
+ )
+ pvs_filter_vg_name = 'vg_name = {0}'.format(vg)
+ pvs_filter = "--select '{0} || {1}' ".format(pvs_filter_pv_name, pvs_filter_vg_name)
+ else:
+ pvs_filter = ''
+ rc, current_pvs, err = module.run_command("%s --noheadings -o pv_name,vg_name --separator ';' %s" % (pvs_cmd, pvs_filter))
+ if rc != 0:
+ module.fail_json(msg="Failed executing pvs command.", rc=rc, err=err)
+
+ # check pv for devices
+ pvs = parse_pvs(module, current_pvs)
+ used_pvs = [pv for pv in pvs if pv['name'] in dev_list and pv['vg_name'] and pv['vg_name'] != vg]
+ if used_pvs:
+ module.fail_json(msg="Device %s is already in %s volume group." % (used_pvs[0]['name'], used_pvs[0]['vg_name']))
+
+ vgs_cmd = module.get_bin_path('vgs', True)
+ rc, current_vgs, err = module.run_command("%s --noheadings -o vg_name,pv_count,lv_count --separator ';'" % vgs_cmd)
+
+ if rc != 0:
+ module.fail_json(msg="Failed executing vgs command.", rc=rc, err=err)
+
+ changed = False
+
+ vgs = parse_vgs(current_vgs)
+
+ for test_vg in vgs:
+ if test_vg['name'] == vg:
+ this_vg = test_vg
+ break
+ else:
+ this_vg = None
+
+ if this_vg is None:
+ if state == 'present':
+ # create VG
+ if module.check_mode:
+ changed = True
+ else:
+ # create PV
+ pvcreate_cmd = module.get_bin_path('pvcreate', True)
+ for current_dev in dev_list:
+ rc, _, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)])
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err)
+ vgcreate_cmd = module.get_bin_path('vgcreate')
+ rc, _, err = module.run_command([vgcreate_cmd] + vgoptions + ['-s', pesize, vg] + dev_list)
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Creating volume group '%s' failed" % vg, rc=rc, err=err)
+ else:
+ if state == 'absent':
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ if this_vg['lv_count'] == 0 or force:
+ # remove VG
+ vgremove_cmd = module.get_bin_path('vgremove', True)
+ rc, _, err = module.run_command("%s --force %s" % (vgremove_cmd, vg))
+ if rc == 0:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg="Failed to remove volume group %s" % (vg), rc=rc, err=err)
+ else:
+ module.fail_json(msg="Refuse to remove non-empty volume group %s without force=yes" % (vg))
+
+ # resize VG
+ current_devs = [os.path.realpath(pv['name']) for pv in pvs if pv['vg_name'] == vg]
+ devs_to_remove = list(set(current_devs) - set(dev_list))
+ devs_to_add = list(set(dev_list) - set(current_devs))
+
+ if current_devs:
+ if state == 'present' and pvresize:
+ for device in current_devs:
+ pvresize_cmd = module.get_bin_path('pvresize', True)
+ pvdisplay_cmd = module.get_bin_path('pvdisplay', True)
+ pvdisplay_ops = ["--units", "b", "--columns", "--noheadings", "--nosuffix"]
+ pvdisplay_cmd_device_options = [pvdisplay_cmd, device] + pvdisplay_ops
+ rc, dev_size, err = module.run_command(pvdisplay_cmd_device_options + ["-o", "dev_size"])
+ dev_size = int(dev_size.replace(" ", ""))
+ rc, pv_size, err = module.run_command(pvdisplay_cmd_device_options + ["-o", "pv_size"])
+ pv_size = int(pv_size.replace(" ", ""))
+ rc, pe_start, err = module.run_command(pvdisplay_cmd_device_options + ["-o", "pe_start"])
+ pe_start = int(pe_start.replace(" ", ""))
+ rc, vg_extent_size, err = module.run_command(pvdisplay_cmd_device_options + ["-o", "vg_extent_size"])
+ vg_extent_size = int(vg_extent_size.replace(" ", ""))
+ if (dev_size - (pe_start + pv_size)) > vg_extent_size:
+ if module.check_mode:
+ changed = True
+ else:
+ rc, _, err = module.run_command([pvresize_cmd, device])
+ if rc != 0:
+ module.fail_json(msg="Failed executing pvresize command.", rc=rc, err=err)
+ else:
+ changed = True
+
+ if devs_to_add or devs_to_remove:
+ if module.check_mode:
+ changed = True
+ else:
+ if devs_to_add:
+ devs_to_add_string = ' '.join(devs_to_add)
+ # create PV
+ pvcreate_cmd = module.get_bin_path('pvcreate', True)
+ for current_dev in devs_to_add:
+ rc, _, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)])
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err)
+ # add PV to our VG
+ vgextend_cmd = module.get_bin_path('vgextend', True)
+ rc, _, err = module.run_command("%s %s %s" % (vgextend_cmd, vg, devs_to_add_string))
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Unable to extend %s by %s." % (vg, devs_to_add_string), rc=rc, err=err)
+
+ # remove some PV from our VG
+ if devs_to_remove:
+ devs_to_remove_string = ' '.join(devs_to_remove)
+ vgreduce_cmd = module.get_bin_path('vgreduce', True)
+ rc, _, err = module.run_command("%s --force %s %s" % (vgreduce_cmd, vg, devs_to_remove_string))
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Unable to reduce %s by %s." % (vg, devs_to_remove_string), rc=rc, err=err)
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/lvol.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/lvol.py
new file mode 100644
index 00000000..fa50007e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/lvol.py
@@ -0,0 +1,566 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Jeroen Hoekx <jeroen.hoekx@dsquare.be>, Alexander Bulimov <lazywolf0@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+author:
+ - Jeroen Hoekx (@jhoekx)
+ - Alexander Bulimov (@abulimov)
+module: lvol
+short_description: Configure LVM logical volumes
+description:
+ - This module creates, removes or resizes logical volumes.
+options:
+ vg:
+ type: str
+ required: true
+ description:
+ - The volume group this logical volume is part of.
+ lv:
+ type: str
+ description:
+ - The name of the logical volume.
+ size:
+ type: str
+ description:
+ - The size of the logical volume, according to lvcreate(8) --size, by
+ default in megabytes or optionally with one of [bBsSkKmMgGtTpPeE] units; or
+ according to lvcreate(8) --extents as a percentage of [VG|PVS|FREE];
+ Float values must begin with a digit.
+ Resizing using percentage values was not supported prior to 2.1.
+ state:
+ type: str
+ description:
+ - Control if the logical volume exists. If C(present) and the
+ volume does not already exist then the C(size) option is required.
+ choices: [ absent, present ]
+ default: present
+ active:
+ description:
+ - Whether the volume is active and visible to the host.
+ type: bool
+ default: 'yes'
+ force:
+ description:
+ - Shrink or remove operations of volumes requires this switch. Ensures that
+ that filesystems get never corrupted/destroyed by mistake.
+ type: bool
+ default: 'no'
+ opts:
+ type: str
+ description:
+ - Free-form options to be passed to the lvcreate command.
+ snapshot:
+ type: str
+ description:
+ - The name of the snapshot volume
+ pvs:
+ type: str
+ description:
+ - Comma separated list of physical volumes (e.g. /dev/sda,/dev/sdb).
+ thinpool:
+ type: str
+ description:
+ - The thin pool volume name. When you want to create a thin provisioned volume, specify a thin pool volume name.
+ shrink:
+ description:
+ - Shrink if current size is higher than size requested.
+ type: bool
+ default: 'yes'
+ resizefs:
+ description:
+ - Resize the underlying filesystem together with the logical volume.
+ type: bool
+ default: 'no'
+notes:
+ - You must specify lv (when managing the state of logical volumes) or thinpool (when managing a thin provisioned volume).
+'''
+
+EXAMPLES = '''
+- name: Create a logical volume of 512m
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 512
+
+- name: Create a logical volume of 512m with disks /dev/sda and /dev/sdb
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 512
+ pvs: /dev/sda,/dev/sdb
+
+- name: Create cache pool logical volume
+ community.general.lvol:
+ vg: firefly
+ lv: lvcache
+ size: 512m
+ opts: --type cache-pool
+
+- name: Create a logical volume of 512g.
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 512g
+
+- name: Create a logical volume the size of all remaining space in the volume group
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 100%FREE
+
+- name: Create a logical volume with special options
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 512g
+ opts: -r 16
+
+- name: Extend the logical volume to 1024m.
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 1024
+
+- name: Extend the logical volume to consume all remaining space in the volume group
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: +100%FREE
+
+- name: Extend the logical volume to take all remaining space of the PVs and resize the underlying filesystem
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 100%PVS
+ resizefs: true
+
+- name: Resize the logical volume to % of VG
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 80%VG
+ force: yes
+
+- name: Reduce the logical volume to 512m
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 512
+ force: yes
+
+- name: Set the logical volume to 512m and do not try to shrink if size is lower than current one
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 512
+ shrink: no
+
+- name: Remove the logical volume.
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ state: absent
+ force: yes
+
+- name: Create a snapshot volume of the test logical volume.
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ snapshot: snap1
+ size: 100m
+
+- name: Deactivate a logical volume
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ active: false
+
+- name: Create a deactivated logical volume
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 512g
+ active: false
+
+- name: Create a thin pool of 512g
+ community.general.lvol:
+ vg: firefly
+ thinpool: testpool
+ size: 512g
+
+- name: Create a thin volume of 128g
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ thinpool: testpool
+ size: 128g
+'''
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+LVOL_ENV_VARS = dict(
+ # make sure we use the C locale when running lvol-related commands
+ LANG='C',
+ LC_ALL='C',
+ LC_MESSAGES='C',
+ LC_CTYPE='C',
+)
+
+
+def mkversion(major, minor, patch):
+ return (1000 * 1000 * int(major)) + (1000 * int(minor)) + int(patch)
+
+
+def parse_lvs(data):
+ lvs = []
+ for line in data.splitlines():
+ parts = line.strip().split(';')
+ lvs.append({
+ 'name': parts[0].replace('[', '').replace(']', ''),
+ 'size': float(parts[1]),
+ 'active': (parts[2][4] == 'a'),
+ 'thinpool': (parts[2][0] == 't'),
+ 'thinvol': (parts[2][0] == 'V'),
+ })
+ return lvs
+
+
+def parse_vgs(data):
+ vgs = []
+ for line in data.splitlines():
+ parts = line.strip().split(';')
+ vgs.append({
+ 'name': parts[0],
+ 'size': float(parts[1]),
+ 'free': float(parts[2]),
+ 'ext_size': float(parts[3])
+ })
+ return vgs
+
+
+def get_lvm_version(module):
+ ver_cmd = module.get_bin_path("lvm", required=True)
+ rc, out, err = module.run_command("%s version" % (ver_cmd))
+ if rc != 0:
+ return None
+ m = re.search(r"LVM version:\s+(\d+)\.(\d+)\.(\d+).*(\d{4}-\d{2}-\d{2})", out)
+ if not m:
+ return None
+ return mkversion(m.group(1), m.group(2), m.group(3))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ vg=dict(type='str', required=True),
+ lv=dict(type='str'),
+ size=dict(type='str'),
+ opts=dict(type='str'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ force=dict(type='bool', default=False),
+ shrink=dict(type='bool', default=True),
+ active=dict(type='bool', default=True),
+ snapshot=dict(type='str'),
+ pvs=dict(type='str'),
+ resizefs=dict(type='bool', default=False),
+ thinpool=dict(type='str'),
+ ),
+ supports_check_mode=True,
+ required_one_of=(
+ ['lv', 'thinpool'],
+ ),
+ )
+
+ module.run_command_environ_update = LVOL_ENV_VARS
+
+ # Determine if the "--yes" option should be used
+ version_found = get_lvm_version(module)
+ if version_found is None:
+ module.fail_json(msg="Failed to get LVM version number")
+ version_yesopt = mkversion(2, 2, 99) # First LVM with the "--yes" option
+ if version_found >= version_yesopt:
+ yesopt = "--yes"
+ else:
+ yesopt = ""
+
+ vg = module.params['vg']
+ lv = module.params['lv']
+ size = module.params['size']
+ opts = module.params['opts']
+ state = module.params['state']
+ force = module.boolean(module.params['force'])
+ shrink = module.boolean(module.params['shrink'])
+ active = module.boolean(module.params['active'])
+ resizefs = module.boolean(module.params['resizefs'])
+ thinpool = module.params['thinpool']
+ size_opt = 'L'
+ size_unit = 'm'
+ snapshot = module.params['snapshot']
+ pvs = module.params['pvs']
+
+ if pvs is None:
+ pvs = ""
+ else:
+ pvs = pvs.replace(",", " ")
+
+ if opts is None:
+ opts = ""
+
+ # Add --test option when running in check-mode
+ if module.check_mode:
+ test_opt = ' --test'
+ else:
+ test_opt = ''
+
+ if size:
+ # LVCREATE(8) -l --extents option with percentage
+ if '%' in size:
+ size_parts = size.split('%', 1)
+ size_percent = int(size_parts[0])
+ if size_percent > 100:
+ module.fail_json(msg="Size percentage cannot be larger than 100%")
+ size_whole = size_parts[1]
+ if size_whole == 'ORIGIN':
+ module.fail_json(msg="Snapshot Volumes are not supported")
+ elif size_whole not in ['VG', 'PVS', 'FREE']:
+ module.fail_json(msg="Specify extents as a percentage of VG|PVS|FREE")
+ size_opt = 'l'
+ size_unit = ''
+
+ if '%' not in size:
+ # LVCREATE(8) -L --size option unit
+ if size[-1].lower() in 'bskmgtpe':
+ size_unit = size[-1].lower()
+ size = size[0:-1]
+
+ try:
+ float(size)
+ if not size[0].isdigit():
+ raise ValueError()
+ except ValueError:
+ module.fail_json(msg="Bad size specification of '%s'" % size)
+
+ # when no unit, megabytes by default
+ if size_opt == 'l':
+ unit = 'm'
+ else:
+ unit = size_unit
+
+ # Get information on volume group requested
+ vgs_cmd = module.get_bin_path("vgs", required=True)
+ rc, current_vgs, err = module.run_command(
+ "%s --noheadings --nosuffix -o vg_name,size,free,vg_extent_size --units %s --separator ';' %s" % (vgs_cmd, unit, vg))
+
+ if rc != 0:
+ if state == 'absent':
+ module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg)
+ else:
+ module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err)
+
+ vgs = parse_vgs(current_vgs)
+ this_vg = vgs[0]
+
+ # Get information on logical volume requested
+ lvs_cmd = module.get_bin_path("lvs", required=True)
+ rc, current_lvs, err = module.run_command(
+ "%s -a --noheadings --nosuffix -o lv_name,size,lv_attr --units %s --separator ';' %s" % (lvs_cmd, unit, vg))
+
+ if rc != 0:
+ if state == 'absent':
+ module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg)
+ else:
+ module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err)
+
+ changed = False
+
+ lvs = parse_lvs(current_lvs)
+
+ if snapshot:
+ # Check snapshot pre-conditions
+ for test_lv in lvs:
+ if test_lv['name'] == lv or test_lv['name'] == thinpool:
+ if not test_lv['thinpool'] and not thinpool:
+ break
+ else:
+ module.fail_json(msg="Snapshots of thin pool LVs are not supported.")
+ else:
+ module.fail_json(msg="Snapshot origin LV %s does not exist in volume group %s." % (lv, vg))
+ check_lv = snapshot
+
+ elif thinpool:
+ if lv:
+ # Check thin volume pre-conditions
+ for test_lv in lvs:
+ if test_lv['name'] == thinpool:
+ break
+ else:
+ module.fail_json(msg="Thin pool LV %s does not exist in volume group %s." % (thinpool, vg))
+ check_lv = lv
+ else:
+ check_lv = thinpool
+ else:
+ check_lv = lv
+
+ for test_lv in lvs:
+ if test_lv['name'] in (check_lv, check_lv.rsplit('/', 1)[-1]):
+ this_lv = test_lv
+ break
+ else:
+ this_lv = None
+
+ msg = ''
+ if this_lv is None:
+ if state == 'present':
+ # Require size argument except for snapshot of thin volumes
+ if (lv or thinpool) and not size:
+ for test_lv in lvs:
+ if test_lv['name'] == lv and test_lv['thinvol'] and snapshot:
+ break
+ else:
+ module.fail_json(msg="No size given.")
+
+ # create LV
+ lvcreate_cmd = module.get_bin_path("lvcreate", required=True)
+ if snapshot is not None:
+ if size:
+ cmd = "%s %s %s -%s %s%s -s -n %s %s %s/%s" % (lvcreate_cmd, test_opt, yesopt, size_opt, size, size_unit, snapshot, opts, vg, lv)
+ else:
+ cmd = "%s %s %s -s -n %s %s %s/%s" % (lvcreate_cmd, test_opt, yesopt, snapshot, opts, vg, lv)
+ elif thinpool and lv:
+ if size_opt == 'l':
+ module.fail_json(changed=False, msg="Thin volume sizing with percentage not supported.")
+ size_opt = 'V'
+ cmd = "%s %s -n %s -%s %s%s %s -T %s/%s" % (lvcreate_cmd, yesopt, lv, size_opt, size, size_unit, opts, vg, thinpool)
+ elif thinpool and not lv:
+ cmd = "%s %s -%s %s%s %s -T %s/%s" % (lvcreate_cmd, yesopt, size_opt, size, size_unit, opts, vg, thinpool)
+ else:
+ cmd = "%s %s %s -n %s -%s %s%s %s %s %s" % (lvcreate_cmd, test_opt, yesopt, lv, size_opt, size, size_unit, opts, vg, pvs)
+ rc, _, err = module.run_command(cmd)
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Creating logical volume '%s' failed" % lv, rc=rc, err=err)
+ else:
+ if state == 'absent':
+ # remove LV
+ if not force:
+ module.fail_json(msg="Sorry, no removal of logical volume %s without force=yes." % (this_lv['name']))
+ lvremove_cmd = module.get_bin_path("lvremove", required=True)
+ rc, _, err = module.run_command("%s %s --force %s/%s" % (lvremove_cmd, test_opt, vg, this_lv['name']))
+ if rc == 0:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg="Failed to remove logical volume %s" % (lv), rc=rc, err=err)
+
+ elif not size:
+ pass
+
+ elif size_opt == 'l':
+ # Resize LV based on % value
+ tool = None
+ size_free = this_vg['free']
+ if size_whole == 'VG' or size_whole == 'PVS':
+ size_requested = size_percent * this_vg['size'] / 100
+ else: # size_whole == 'FREE':
+ size_requested = size_percent * this_vg['free'] / 100
+
+ # Round down to the next lowest whole physical extent
+ size_requested -= (size_requested % this_vg['ext_size'])
+
+ if '+' in size:
+ size_requested += this_lv['size']
+ if this_lv['size'] < size_requested:
+ if (size_free > 0) and (('+' not in size) or (size_free >= (size_requested - this_lv['size']))):
+ tool = module.get_bin_path("lvextend", required=True)
+ else:
+ module.fail_json(
+ msg="Logical Volume %s could not be extended. Not enough free space left (%s%s required / %s%s available)" %
+ (this_lv['name'], (size_requested - this_lv['size']), unit, size_free, unit)
+ )
+ elif shrink and this_lv['size'] > size_requested + this_vg['ext_size']: # more than an extent too large
+ if size_requested == 0:
+ module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name']))
+ elif not force:
+ module.fail_json(msg="Sorry, no shrinking of %s without force=yes" % (this_lv['name']))
+ else:
+ tool = module.get_bin_path("lvreduce", required=True)
+ tool = '%s %s' % (tool, '--force')
+
+ if tool:
+ if resizefs:
+ tool = '%s %s' % (tool, '--resizefs')
+ cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs)
+ rc, out, err = module.run_command(cmd)
+ if "Reached maximum COW size" in out:
+ module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out)
+ elif rc == 0:
+ changed = True
+ msg = "Volume %s resized to %s%s" % (this_lv['name'], size_requested, unit)
+ elif "matches existing size" in err:
+ module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'])
+ elif "not larger than existing size" in err:
+ module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err)
+ else:
+ module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err)
+
+ else:
+ # resize LV based on absolute values
+ tool = None
+ if float(size) > this_lv['size']:
+ tool = module.get_bin_path("lvextend", required=True)
+ elif shrink and float(size) < this_lv['size']:
+ if float(size) == 0:
+ module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name']))
+ if not force:
+ module.fail_json(msg="Sorry, no shrinking of %s without force=yes." % (this_lv['name']))
+ else:
+ tool = module.get_bin_path("lvreduce", required=True)
+ tool = '%s %s' % (tool, '--force')
+
+ if tool:
+ if resizefs:
+ tool = '%s %s' % (tool, '--resizefs')
+ cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs)
+ rc, out, err = module.run_command(cmd)
+ if "Reached maximum COW size" in out:
+ module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out)
+ elif rc == 0:
+ changed = True
+ elif "matches existing size" in err:
+ module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'])
+ elif "not larger than existing size" in err:
+ module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err)
+ else:
+ module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err)
+
+ if this_lv is not None:
+ if active:
+ lvchange_cmd = module.get_bin_path("lvchange", required=True)
+ rc, _, err = module.run_command("%s -ay %s/%s" % (lvchange_cmd, vg, this_lv['name']))
+ if rc == 0:
+ module.exit_json(changed=((not this_lv['active']) or changed), vg=vg, lv=this_lv['name'], size=this_lv['size'])
+ else:
+ module.fail_json(msg="Failed to activate logical volume %s" % (lv), rc=rc, err=err)
+ else:
+ lvchange_cmd = module.get_bin_path("lvchange", required=True)
+ rc, _, err = module.run_command("%s -an %s/%s" % (lvchange_cmd, vg, this_lv['name']))
+ if rc == 0:
+ module.exit_json(changed=(this_lv['active'] or changed), vg=vg, lv=this_lv['name'], size=this_lv['size'])
+ else:
+ module.fail_json(msg="Failed to deactivate logical volume %s" % (lv), rc=rc, err=err)
+
+ module.exit_json(changed=changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/lxc_container.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/lxc_container.py
new file mode 100644
index 00000000..c1a3d1c4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/lxc_container.py
@@ -0,0 +1,1760 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Kevin Carter <kevin.carter@rackspace.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: lxc_container
+short_description: Manage LXC Containers
+description:
+ - Management of LXC containers.
+author: "Kevin Carter (@cloudnull)"
+options:
+ name:
+ description:
+ - Name of a container.
+ type: str
+ required: true
+ backing_store:
+ choices:
+ - dir
+ - lvm
+ - loop
+ - btrfs
+ - overlayfs
+ - zfs
+ description:
+ - Backend storage type for the container.
+ type: str
+ default: dir
+ template:
+ description:
+ - Name of the template to use within an LXC create.
+ type: str
+ default: ubuntu
+ template_options:
+ description:
+ - Template options when building the container.
+ type: str
+ config:
+ description:
+ - Path to the LXC configuration file.
+ type: path
+ lv_name:
+ description:
+ - Name of the logical volume, defaults to the container name.
+ - If not specified, it defaults to C($CONTAINER_NAME).
+ type: str
+ vg_name:
+ description:
+ - If backend store is lvm, specify the name of the volume group.
+ type: str
+ default: lxc
+ thinpool:
+ description:
+ - Use LVM thin pool called TP.
+ type: str
+ fs_type:
+ description:
+ - Create fstype TYPE.
+ type: str
+ default: ext4
+ fs_size:
+ description:
+ - File system Size.
+ type: str
+ default: 5G
+ directory:
+ description:
+ - Place rootfs directory under DIR.
+ type: path
+ zfs_root:
+ description:
+ - Create zfs under given zfsroot.
+ type: str
+ container_command:
+ description:
+ - Run a command within a container.
+ type: str
+ lxc_path:
+ description:
+ - Place container under PATH.
+ type: path
+ container_log:
+ description:
+ - Enable a container log for host actions to the container.
+ type: bool
+ default: 'no'
+ container_log_level:
+ choices:
+ - Info
+ - info
+ - INFO
+ - Error
+ - error
+ - ERROR
+ - Debug
+ - debug
+ - DEBUG
+ description:
+ - Set the log level for a container where *container_log* was set.
+ type: str
+ required: false
+ default: INFO
+ clone_name:
+ description:
+ - Name of the new cloned server.
+ - This is only used when state is clone.
+ type: str
+ clone_snapshot:
+ description:
+ - Create a snapshot a container when cloning.
+ - This is not supported by all container storage backends.
+ - Enabling this may fail if the backing store does not support snapshots.
+ type: bool
+ default: 'no'
+ archive:
+ description:
+ - Create an archive of a container.
+ - This will create a tarball of the running container.
+ type: bool
+ default: 'no'
+ archive_path:
+ description:
+ - Path the save the archived container.
+ - If the path does not exist the archive method will attempt to create it.
+ type: path
+ archive_compression:
+ choices:
+ - gzip
+ - bzip2
+ - none
+ description:
+ - Type of compression to use when creating an archive of a running
+ container.
+ type: str
+ default: gzip
+ state:
+ choices:
+ - started
+ - stopped
+ - restarted
+ - absent
+ - frozen
+ - clone
+ description:
+ - Define the state of a container.
+ - If you clone a container using I(clone_name) the newly cloned
+ container created in a stopped state.
+ - The running container will be stopped while the clone operation is
+ happening and upon completion of the clone the original container
+ state will be restored.
+ type: str
+ default: started
+ container_config:
+ description:
+ - A list of C(key=value) options to use when configuring a container.
+ type: list
+ elements: str
+requirements:
+ - 'lxc >= 1.0 # OS package'
+ - 'python >= 2.6 # OS Package'
+ - 'lxc-python2 >= 0.1 # PIP Package from https://github.com/lxc/python2-lxc'
+notes:
+ - Containers must have a unique name. If you attempt to create a container
+ with a name that already exists in the users namespace the module will
+ simply return as "unchanged".
+ - The "container_command" can be used with any state except "absent". If
+ used with state "stopped" the container will be "started", the command
+ executed, and then the container "stopped" again. Likewise if the state
+ is "stopped" and the container does not exist it will be first created,
+ "started", the command executed, and then "stopped". If you use a "|"
+ in the variable you can use common script formatting within the variable
+ itself The "container_command" option will always execute as BASH.
+ When using "container_command" a log file is created in the /tmp/ directory
+ which contains both stdout and stderr of any command executed.
+ - If "archive" is **true** the system will attempt to create a compressed
+ tarball of the running container. The "archive" option supports LVM backed
+ containers and will create a snapshot of the running container when
+ creating the archive.
+ - If your distro does not have a package for "python2-lxc", which is a
+ requirement for this module, it can be installed from source at
+ "https://github.com/lxc/python2-lxc" or installed via pip using the package
+ name lxc-python2.
+'''
+
+EXAMPLES = r"""
+- name: Create a started container
+ community.general.lxc_container:
+ name: test-container-started
+ container_log: true
+ template: ubuntu
+ state: started
+ template_options: --release trusty
+
+- name: Create a stopped container
+ community.general.lxc_container:
+ name: test-container-stopped
+ container_log: true
+ template: ubuntu
+ state: stopped
+ template_options: --release trusty
+
+- name: Create a frozen container
+ community.general.lxc_container:
+ name: test-container-frozen
+ container_log: true
+ template: ubuntu
+ state: frozen
+ template_options: --release trusty
+ container_command: |
+ echo 'hello world.' | tee /opt/started-frozen
+
+# Create filesystem container, configure it, and archive it, and start it.
+- name: Create filesystem container
+ community.general.lxc_container:
+ name: test-container-config
+ backing_store: dir
+ container_log: true
+ template: ubuntu
+ state: started
+ archive: true
+ archive_compression: none
+ container_config:
+ - "lxc.aa_profile=unconfined"
+ - "lxc.cgroup.devices.allow=a *:* rmw"
+ template_options: --release trusty
+
+# Create an lvm container, run a complex command in it, add additional
+# configuration to it, create an archive of it, and finally leave the container
+# in a frozen state. The container archive will be compressed using bzip2
+- name: Create a frozen lvm container
+ community.general.lxc_container:
+ name: test-container-lvm
+ container_log: true
+ template: ubuntu
+ state: frozen
+ backing_store: lvm
+ template_options: --release trusty
+ container_command: |
+ apt-get update
+ apt-get install -y vim lxc-dev
+ echo 'hello world.' | tee /opt/started
+ if [[ -f "/opt/started" ]]; then
+ echo 'hello world.' | tee /opt/found-started
+ fi
+ container_config:
+ - "lxc.aa_profile=unconfined"
+ - "lxc.cgroup.devices.allow=a *:* rmw"
+ archive: true
+ archive_compression: bzip2
+ register: lvm_container_info
+
+- name: Debug info on container "test-container-lvm"
+ ansible.builtin.debug:
+ var: lvm_container_info
+
+- name: Run a command in a container and ensure its in a "stopped" state.
+ community.general.lxc_container:
+ name: test-container-started
+ state: stopped
+ container_command: |
+ echo 'hello world.' | tee /opt/stopped
+
+- name: Run a command in a container and ensure its it in a "frozen" state.
+ community.general.lxc_container:
+ name: test-container-stopped
+ state: frozen
+ container_command: |
+ echo 'hello world.' | tee /opt/frozen
+
+- name: Start a container
+ community.general.lxc_container:
+ name: test-container-stopped
+ state: started
+
+- name: Run a command in a container and then restart it
+ community.general.lxc_container:
+ name: test-container-started
+ state: restarted
+ container_command: |
+ echo 'hello world.' | tee /opt/restarted
+
+- name: Run a complex command within a "running" container
+ community.general.lxc_container:
+ name: test-container-started
+ container_command: |
+ apt-get update
+ apt-get install -y curl wget vim apache2
+ echo 'hello world.' | tee /opt/started
+ if [[ -f "/opt/started" ]]; then
+ echo 'hello world.' | tee /opt/found-started
+ fi
+
+# Create an archive of an existing container, save the archive to a defined
+# path and then destroy it.
+- name: Archive container
+ community.general.lxc_container:
+ name: test-container-started
+ state: absent
+ archive: true
+ archive_path: /opt/archives
+
+# Create a container using overlayfs, create an archive of it, create a
+# snapshot clone of the container and and finally leave the container
+# in a frozen state. The container archive will be compressed using gzip.
+- name: Create an overlayfs container archive and clone it
+ community.general.lxc_container:
+ name: test-container-overlayfs
+ container_log: true
+ template: ubuntu
+ state: started
+ backing_store: overlayfs
+ template_options: --release trusty
+ clone_snapshot: true
+ clone_name: test-container-overlayfs-clone-snapshot
+ archive: true
+ archive_compression: gzip
+ register: clone_container_info
+
+- name: Debug info on container "test-container"
+ ansible.builtin.debug:
+ var: clone_container_info
+
+- name: Clone a container using snapshot
+ community.general.lxc_container:
+ name: test-container-overlayfs-clone-snapshot
+ backing_store: overlayfs
+ clone_name: test-container-overlayfs-clone-snapshot2
+ clone_snapshot: true
+
+- name: Create a new container and clone it
+ community.general.lxc_container:
+ name: test-container-new-archive
+ backing_store: dir
+ clone_name: test-container-new-archive-clone
+
+- name: Archive and clone a container then destroy it
+ community.general.lxc_container:
+ name: test-container-new-archive
+ state: absent
+ clone_name: test-container-new-archive-destroyed-clone
+ archive: true
+ archive_compression: gzip
+
+- name: Start a cloned container.
+ community.general.lxc_container:
+ name: test-container-new-archive-destroyed-clone
+ state: started
+
+- name: Destroy a container
+ community.general.lxc_container:
+ name: '{{ item }}'
+ state: absent
+ with_items:
+ - test-container-stopped
+ - test-container-started
+ - test-container-frozen
+ - test-container-lvm
+ - test-container-config
+ - test-container-overlayfs
+ - test-container-overlayfs-clone
+ - test-container-overlayfs-clone-snapshot
+ - test-container-overlayfs-clone-snapshot2
+ - test-container-new-archive
+ - test-container-new-archive-clone
+ - test-container-new-archive-destroyed-clone
+"""
+
+RETURN = r"""
+lxc_container:
+ description: container information
+ returned: success
+ type: complex
+ contains:
+ name:
+ description: name of the lxc container
+ returned: success
+ type: str
+ sample: test_host
+ init_pid:
+ description: pid of the lxc init process
+ returned: success
+ type: int
+ sample: 19786
+ interfaces:
+ description: list of the container's network interfaces
+ returned: success
+ type: list
+ sample: [ "eth0", "lo" ]
+ ips:
+ description: list of ips
+ returned: success
+ type: list
+ sample: [ "10.0.3.3" ]
+ state:
+ description: resulting state of the container
+ returned: success
+ type: str
+ sample: "running"
+ archive:
+ description: resulting state of the container
+ returned: success, when archive is true
+ type: str
+ sample: "/tmp/test-container-config.tar"
+ clone:
+ description: if the container was cloned
+ returned: success, when clone_name is specified
+ type: bool
+ sample: True
+"""
+
+import os
+import os.path
+import re
+import shutil
+import subprocess
+import tempfile
+import time
+
+try:
+ import lxc
+except ImportError:
+ HAS_LXC = False
+else:
+ HAS_LXC = True
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.parsing.convert_bool import BOOLEANS_FALSE, BOOLEANS_TRUE
+from ansible.module_utils.six.moves import xrange
+from ansible.module_utils._text import to_text, to_bytes
+
+
+# LXC_COMPRESSION_MAP is a map of available compression types when creating
+# an archive of a container.
+LXC_COMPRESSION_MAP = {
+ 'gzip': {
+ 'extension': 'tar.tgz',
+ 'argument': '-czf'
+ },
+ 'bzip2': {
+ 'extension': 'tar.bz2',
+ 'argument': '-cjf'
+ },
+ 'none': {
+ 'extension': 'tar',
+ 'argument': '-cf'
+ }
+}
+
+
+# LXC_COMMAND_MAP is a map of variables that are available to a method based
+# on the state the container is in.
+LXC_COMMAND_MAP = {
+ 'create': {
+ 'variables': {
+ 'config': '--config',
+ 'template': '--template',
+ 'backing_store': '--bdev',
+ 'lxc_path': '--lxcpath',
+ 'lv_name': '--lvname',
+ 'vg_name': '--vgname',
+ 'thinpool': '--thinpool',
+ 'fs_type': '--fstype',
+ 'fs_size': '--fssize',
+ 'directory': '--dir',
+ 'zfs_root': '--zfsroot'
+ }
+ },
+ 'clone': {
+ 'variables-lxc-copy': {
+ 'backing_store': '--backingstorage',
+ 'lxc_path': '--lxcpath',
+ 'fs_size': '--fssize',
+ 'name': '--name',
+ 'clone_name': '--newname'
+ },
+ # lxc-clone is deprecated in favor of lxc-copy
+ 'variables-lxc-clone': {
+ 'backing_store': '--backingstore',
+ 'lxc_path': '--lxcpath',
+ 'fs_size': '--fssize',
+ 'name': '--orig',
+ 'clone_name': '--new'
+ }
+ }
+}
+
+
+# LXC_BACKING_STORE is a map of available storage backends and options that
+# are incompatible with the given storage backend.
+LXC_BACKING_STORE = {
+ 'dir': [
+ 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool'
+ ],
+ 'lvm': [
+ 'zfs_root'
+ ],
+ 'btrfs': [
+ 'lv_name', 'vg_name', 'thinpool', 'zfs_root', 'fs_type', 'fs_size'
+ ],
+ 'loop': [
+ 'lv_name', 'vg_name', 'thinpool', 'zfs_root'
+ ],
+ 'overlayfs': [
+ 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool', 'zfs_root'
+ ],
+ 'zfs': [
+ 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool'
+ ]
+}
+
+
+# LXC_LOGGING_LEVELS is a map of available log levels
+LXC_LOGGING_LEVELS = {
+ 'INFO': ['info', 'INFO', 'Info'],
+ 'ERROR': ['error', 'ERROR', 'Error'],
+ 'DEBUG': ['debug', 'DEBUG', 'Debug']
+}
+
+
+# LXC_ANSIBLE_STATES is a map of states that contain values of methods used
+# when a particular state is evoked.
+LXC_ANSIBLE_STATES = {
+ 'started': '_started',
+ 'stopped': '_stopped',
+ 'restarted': '_restarted',
+ 'absent': '_destroyed',
+ 'frozen': '_frozen',
+ 'clone': '_clone'
+}
+
+
+# This is used to attach to a running container and execute commands from
+# within the container on the host. This will provide local access to a
+# container without using SSH. The template will attempt to work within the
+# home directory of the user that was attached to the container and source
+# that users environment variables by default.
+ATTACH_TEMPLATE = """#!/usr/bin/env bash
+pushd "$(getent passwd $(whoami)|cut -f6 -d':')"
+ if [[ -f ".bashrc" ]];then
+ source .bashrc
+ unset HOSTNAME
+ fi
+popd
+
+# User defined command
+%(container_command)s
+"""
+
+
+def create_script(command):
+ """Write out a script onto a target.
+
+ This method should be backward compatible with Python 2.4+ when executing
+ from within the container.
+
+ :param command: command to run, this can be a script and can use spacing
+ with newlines as separation.
+ :type command: ``str``
+ """
+
+ (fd, script_file) = tempfile.mkstemp(prefix='lxc-attach-script')
+ f = os.fdopen(fd, 'wb')
+ try:
+ f.write(to_bytes(ATTACH_TEMPLATE % {'container_command': command}, errors='surrogate_or_strict'))
+ f.flush()
+ finally:
+ f.close()
+
+ # Ensure the script is executable.
+ os.chmod(script_file, int('0700', 8))
+
+ # Output log file.
+ stdout_file = os.fdopen(tempfile.mkstemp(prefix='lxc-attach-script-log')[0], 'ab')
+
+ # Error log file.
+ stderr_file = os.fdopen(tempfile.mkstemp(prefix='lxc-attach-script-err')[0], 'ab')
+
+ # Execute the script command.
+ try:
+ subprocess.Popen(
+ [script_file],
+ stdout=stdout_file,
+ stderr=stderr_file
+ ).communicate()
+ finally:
+ # Close the log files.
+ stderr_file.close()
+ stdout_file.close()
+
+ # Remove the script file upon completion of execution.
+ os.remove(script_file)
+
+
+class LxcContainerManagement(object):
+ def __init__(self, module):
+ """Management of LXC containers via Ansible.
+
+ :param module: Processed Ansible Module.
+ :type module: ``object``
+ """
+ self.module = module
+ self.state = self.module.params.get('state', None)
+ self.state_change = False
+ self.lxc_vg = None
+ self.lxc_path = self.module.params.get('lxc_path', None)
+ self.container_name = self.module.params['name']
+ self.container = self.get_container_bind()
+ self.archive_info = None
+ self.clone_info = None
+
+ def get_container_bind(self):
+ return lxc.Container(name=self.container_name)
+
+ @staticmethod
+ def _roundup(num):
+ """Return a rounded floating point number.
+
+ :param num: Number to round up.
+ :type: ``float``
+ :returns: Rounded up number.
+ :rtype: ``int``
+ """
+ num, part = str(num).split('.')
+ num = int(num)
+ if int(part) != 0:
+ num += 1
+ return num
+
+ @staticmethod
+ def _container_exists(container_name, lxc_path=None):
+ """Check if a container exists.
+
+ :param container_name: Name of the container.
+ :type: ``str``
+ :returns: True or False if the container is found.
+ :rtype: ``bol``
+ """
+ if [i for i in lxc.list_containers(config_path=lxc_path) if i == container_name]:
+ return True
+ else:
+ return False
+
+ @staticmethod
+ def _add_variables(variables_dict, build_command):
+ """Return a command list with all found options.
+
+ :param variables_dict: Pre-parsed optional variables used from a
+ seed command.
+ :type variables_dict: ``dict``
+ :param build_command: Command to run.
+ :type build_command: ``list``
+ :returns: list of command options.
+ :rtype: ``list``
+ """
+
+ for key, value in variables_dict.items():
+ build_command.append(
+ '%s %s' % (key, value)
+ )
+ return build_command
+
+ def _get_vars(self, variables):
+ """Return a dict of all variables as found within the module.
+
+ :param variables: Hash of all variables to find.
+ :type variables: ``dict``
+ """
+
+ # Remove incompatible storage backend options.
+ variables = variables.copy()
+ for v in LXC_BACKING_STORE[self.module.params['backing_store']]:
+ variables.pop(v, None)
+
+ return_dict = dict()
+ false_values = BOOLEANS_FALSE.union([None, ''])
+ for k, v in variables.items():
+ _var = self.module.params.get(k)
+ if _var not in false_values:
+ return_dict[v] = _var
+ return return_dict
+
+ def _run_command(self, build_command, unsafe_shell=False):
+ """Return information from running an Ansible Command.
+
+ This will squash the build command list into a string and then
+ execute the command via Ansible. The output is returned to the method.
+ This output is returned as `return_code`, `stdout`, `stderr`.
+
+ :param build_command: Used for the command and all options.
+ :type build_command: ``list``
+ :param unsafe_shell: Enable or Disable unsafe sell commands.
+ :type unsafe_shell: ``bol``
+ """
+
+ return self.module.run_command(
+ ' '.join(build_command),
+ use_unsafe_shell=unsafe_shell
+ )
+
+ def _config(self):
+ """Configure an LXC container.
+
+ Write new configuration values to the lxc config file. This will
+ stop the container if it's running write the new options and then
+ restart the container upon completion.
+ """
+
+ _container_config = self.module.params.get('container_config')
+ if not _container_config:
+ return False
+
+ container_config_file = self.container.config_file_name
+ with open(container_config_file, 'rb') as f:
+ container_config = to_text(f.read(), errors='surrogate_or_strict').splitlines(True)
+
+ parsed_options = [i.split('=', 1) for i in _container_config]
+ config_change = False
+ for key, value in parsed_options:
+ key = key.strip()
+ value = value.strip()
+ new_entry = '%s = %s\n' % (key, value)
+ keyre = re.compile(r'%s(\s+)?=' % key)
+ for option_line in container_config:
+ # Look for key in config
+ if keyre.match(option_line):
+ _, _value = option_line.split('=', 1)
+ config_value = ' '.join(_value.split())
+ line_index = container_config.index(option_line)
+ # If the sanitized values don't match replace them
+ if value != config_value:
+ line_index += 1
+ if new_entry not in container_config:
+ config_change = True
+ container_config.insert(line_index, new_entry)
+ # Break the flow as values are written or not at this point
+ break
+ else:
+ config_change = True
+ container_config.append(new_entry)
+
+ # If the config changed restart the container.
+ if config_change:
+ container_state = self._get_state()
+ if container_state != 'stopped':
+ self.container.stop()
+
+ with open(container_config_file, 'wb') as f:
+ f.writelines([to_bytes(line, errors='surrogate_or_strict') for line in container_config])
+
+ self.state_change = True
+ if container_state == 'running':
+ self._container_startup()
+ elif container_state == 'frozen':
+ self._container_startup()
+ self.container.freeze()
+
+ def _container_create_clone(self):
+ """Clone a new LXC container from an existing container.
+
+ This method will clone an existing container to a new container using
+ the `clone_name` variable as the new container name. The method will
+ create a container if the container `name` does not exist.
+
+ Note that cloning a container will ensure that the original container
+ is "stopped" before the clone can be done. Because this operation can
+ require a state change the method will return the original container
+ to its prior state upon completion of the clone.
+
+ Once the clone is complete the new container will be left in a stopped
+ state.
+ """
+
+ # Ensure that the state of the original container is stopped
+ container_state = self._get_state()
+ if container_state != 'stopped':
+ self.state_change = True
+ self.container.stop()
+
+ # lxc-clone is deprecated in favor of lxc-copy
+ clone_vars = 'variables-lxc-copy'
+ clone_cmd = self.module.get_bin_path('lxc-copy')
+ if not clone_cmd:
+ clone_vars = 'variables-lxc-clone'
+ clone_cmd = self.module.get_bin_path('lxc-clone', True)
+
+ build_command = [
+ clone_cmd,
+ ]
+
+ build_command = self._add_variables(
+ variables_dict=self._get_vars(
+ variables=LXC_COMMAND_MAP['clone'][clone_vars]
+ ),
+ build_command=build_command
+ )
+
+ # Load logging for the instance when creating it.
+ if self.module.params.get('clone_snapshot') in BOOLEANS_TRUE:
+ build_command.append('--snapshot')
+ # Check for backing_store == overlayfs if so force the use of snapshot
+ # If overlay fs is used and snapshot is unset the clone command will
+ # fail with an unsupported type.
+ elif self.module.params.get('backing_store') == 'overlayfs':
+ build_command.append('--snapshot')
+
+ rc, return_data, err = self._run_command(build_command)
+ if rc != 0:
+ message = "Failed executing %s." % os.path.basename(clone_cmd)
+ self.failure(
+ err=err, rc=rc, msg=message, command=' '.join(
+ build_command
+ )
+ )
+ else:
+ self.state_change = True
+ # Restore the original state of the origin container if it was
+ # not in a stopped state.
+ if container_state == 'running':
+ self.container.start()
+ elif container_state == 'frozen':
+ self.container.start()
+ self.container.freeze()
+
+ return True
+
+ def _create(self):
+ """Create a new LXC container.
+
+ This method will build and execute a shell command to build the
+ container. It would have been nice to simply use the lxc python library
+ however at the time this was written the python library, in both py2
+ and py3 didn't support some of the more advanced container create
+ processes. These missing processes mainly revolve around backing
+ LXC containers with block devices.
+ """
+
+ build_command = [
+ self.module.get_bin_path('lxc-create', True),
+ '--name %s' % self.container_name,
+ '--quiet'
+ ]
+
+ build_command = self._add_variables(
+ variables_dict=self._get_vars(
+ variables=LXC_COMMAND_MAP['create']['variables']
+ ),
+ build_command=build_command
+ )
+
+ # Load logging for the instance when creating it.
+ if self.module.params.get('container_log') in BOOLEANS_TRUE:
+ # Set the logging path to the /var/log/lxc if uid is root. else
+ # set it to the home folder of the user executing.
+ try:
+ if os.getuid() != 0:
+ log_path = os.getenv('HOME')
+ else:
+ if not os.path.isdir('/var/log/lxc/'):
+ os.makedirs('/var/log/lxc/')
+ log_path = '/var/log/lxc/'
+ except OSError:
+ log_path = os.getenv('HOME')
+
+ build_command.extend([
+ '--logfile %s' % os.path.join(
+ log_path, 'lxc-%s.log' % self.container_name
+ ),
+ '--logpriority %s' % self.module.params.get(
+ 'container_log_level'
+ ).upper()
+ ])
+
+ # Add the template commands to the end of the command if there are any
+ template_options = self.module.params.get('template_options', None)
+ if template_options:
+ build_command.append('-- %s' % template_options)
+
+ rc, return_data, err = self._run_command(build_command)
+ if rc != 0:
+ message = "Failed executing lxc-create."
+ self.failure(
+ err=err, rc=rc, msg=message, command=' '.join(build_command)
+ )
+ else:
+ self.state_change = True
+
+ def _container_data(self):
+ """Returns a dict of container information.
+
+ :returns: container data
+ :rtype: ``dict``
+ """
+
+ return {
+ 'interfaces': self.container.get_interfaces(),
+ 'ips': self.container.get_ips(),
+ 'state': self._get_state(),
+ 'init_pid': int(self.container.init_pid),
+ 'name': self.container_name,
+ }
+
+ def _unfreeze(self):
+ """Unfreeze a container.
+
+ :returns: True or False based on if the container was unfrozen.
+ :rtype: ``bol``
+ """
+
+ unfreeze = self.container.unfreeze()
+ if unfreeze:
+ self.state_change = True
+ return unfreeze
+
+ def _get_state(self):
+ """Return the state of a container.
+
+ If the container is not found the state returned is "absent"
+
+ :returns: state of a container as a lower case string.
+ :rtype: ``str``
+ """
+
+ if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
+ return str(self.container.state).lower()
+ return str('absent')
+
+ def _execute_command(self):
+ """Execute a shell command."""
+
+ container_command = self.module.params.get('container_command')
+ if container_command:
+ container_state = self._get_state()
+ if container_state == 'frozen':
+ self._unfreeze()
+ elif container_state == 'stopped':
+ self._container_startup()
+
+ self.container.attach_wait(create_script, container_command)
+ self.state_change = True
+
+ def _container_startup(self, timeout=60):
+ """Ensure a container is started.
+
+ :param timeout: Time before the destroy operation is abandoned.
+ :type timeout: ``int``
+ """
+
+ self.container = self.get_container_bind()
+ for _ in xrange(timeout):
+ if self._get_state() != 'running':
+ self.container.start()
+ self.state_change = True
+ # post startup sleep for 1 second.
+ time.sleep(1)
+ else:
+ return True
+ self.failure(
+ lxc_container=self._container_data(),
+ error='Failed to start container'
+ ' [ %s ]' % self.container_name,
+ rc=1,
+ msg='The container [ %s ] failed to start. Check to lxc is'
+ ' available and that the container is in a functional'
+ ' state.' % self.container_name
+ )
+
+ def _check_archive(self):
+ """Create a compressed archive of a container.
+
+ This will store archive_info in as self.archive_info
+ """
+
+ if self.module.params.get('archive') in BOOLEANS_TRUE:
+ self.archive_info = {
+ 'archive': self._container_create_tar()
+ }
+
+ def _check_clone(self):
+ """Create a compressed archive of a container.
+
+ This will store archive_info in as self.archive_info
+ """
+
+ clone_name = self.module.params.get('clone_name')
+ if clone_name:
+ if not self._container_exists(container_name=clone_name, lxc_path=self.lxc_path):
+ self.clone_info = {
+ 'cloned': self._container_create_clone()
+ }
+ else:
+ self.clone_info = {
+ 'cloned': False
+ }
+
+ def _destroyed(self, timeout=60):
+ """Ensure a container is destroyed.
+
+ :param timeout: Time before the destroy operation is abandoned.
+ :type timeout: ``int``
+ """
+
+ for _ in xrange(timeout):
+ if not self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
+ break
+
+ # Check if the container needs to have an archive created.
+ self._check_archive()
+
+ # Check if the container is to be cloned
+ self._check_clone()
+
+ if self._get_state() != 'stopped':
+ self.state_change = True
+ self.container.stop()
+
+ if self.container.destroy():
+ self.state_change = True
+
+ # post destroy attempt sleep for 1 second.
+ time.sleep(1)
+ else:
+ self.failure(
+ lxc_container=self._container_data(),
+ error='Failed to destroy container'
+ ' [ %s ]' % self.container_name,
+ rc=1,
+ msg='The container [ %s ] failed to be destroyed. Check'
+ ' that lxc is available and that the container is in a'
+ ' functional state.' % self.container_name
+ )
+
+ def _frozen(self, count=0):
+ """Ensure a container is frozen.
+
+ If the container does not exist the container will be created.
+
+ :param count: number of times this command has been called by itself.
+ :type count: ``int``
+ """
+
+ self.check_count(count=count, method='frozen')
+ if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
+ self._execute_command()
+
+ # Perform any configuration updates
+ self._config()
+
+ container_state = self._get_state()
+ if container_state == 'frozen':
+ pass
+ elif container_state == 'running':
+ self.container.freeze()
+ self.state_change = True
+ else:
+ self._container_startup()
+ self.container.freeze()
+ self.state_change = True
+
+ # Check if the container needs to have an archive created.
+ self._check_archive()
+
+ # Check if the container is to be cloned
+ self._check_clone()
+ else:
+ self._create()
+ count += 1
+ self._frozen(count)
+
+ def _restarted(self, count=0):
+ """Ensure a container is restarted.
+
+ If the container does not exist the container will be created.
+
+ :param count: number of times this command has been called by itself.
+ :type count: ``int``
+ """
+
+ self.check_count(count=count, method='restart')
+ if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
+ self._execute_command()
+
+ # Perform any configuration updates
+ self._config()
+
+ if self._get_state() != 'stopped':
+ self.container.stop()
+ self.state_change = True
+
+ # Run container startup
+ self._container_startup()
+
+ # Check if the container needs to have an archive created.
+ self._check_archive()
+
+ # Check if the container is to be cloned
+ self._check_clone()
+ else:
+ self._create()
+ count += 1
+ self._restarted(count)
+
+ def _stopped(self, count=0):
+ """Ensure a container is stopped.
+
+ If the container does not exist the container will be created.
+
+ :param count: number of times this command has been called by itself.
+ :type count: ``int``
+ """
+
+ self.check_count(count=count, method='stop')
+ if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
+ self._execute_command()
+
+ # Perform any configuration updates
+ self._config()
+
+ if self._get_state() != 'stopped':
+ self.container.stop()
+ self.state_change = True
+
+ # Check if the container needs to have an archive created.
+ self._check_archive()
+
+ # Check if the container is to be cloned
+ self._check_clone()
+ else:
+ self._create()
+ count += 1
+ self._stopped(count)
+
+ def _started(self, count=0):
+ """Ensure a container is started.
+
+ If the container does not exist the container will be created.
+
+ :param count: number of times this command has been called by itself.
+ :type count: ``int``
+ """
+
+ self.check_count(count=count, method='start')
+ if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
+ container_state = self._get_state()
+ if container_state == 'running':
+ pass
+ elif container_state == 'frozen':
+ self._unfreeze()
+ elif not self._container_startup():
+ self.failure(
+ lxc_container=self._container_data(),
+ error='Failed to start container'
+ ' [ %s ]' % self.container_name,
+ rc=1,
+ msg='The container [ %s ] failed to start. Check to lxc is'
+ ' available and that the container is in a functional'
+ ' state.' % self.container_name
+ )
+
+ # Return data
+ self._execute_command()
+
+ # Perform any configuration updates
+ self._config()
+
+ # Check if the container needs to have an archive created.
+ self._check_archive()
+
+ # Check if the container is to be cloned
+ self._check_clone()
+ else:
+ self._create()
+ count += 1
+ self._started(count)
+
+ def _get_lxc_vg(self):
+ """Return the name of the Volume Group used in LXC."""
+
+ build_command = [
+ self.module.get_bin_path('lxc-config', True),
+ "lxc.bdev.lvm.vg"
+ ]
+ rc, vg, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='Failed to read LVM VG from LXC config',
+ command=' '.join(build_command)
+ )
+ else:
+ return str(vg.strip())
+
+ def _lvm_lv_list(self):
+ """Return a list of all lv in a current vg."""
+
+ vg = self._get_lxc_vg()
+ build_command = [
+ self.module.get_bin_path('lvs', True)
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='Failed to get list of LVs',
+ command=' '.join(build_command)
+ )
+
+ all_lvms = [i.split() for i in stdout.splitlines()][1:]
+ return [lv_entry[0] for lv_entry in all_lvms if lv_entry[1] == vg]
+
+ def _get_vg_free_pe(self, vg_name):
+ """Return the available size of a given VG.
+
+ :param vg_name: Name of volume.
+ :type vg_name: ``str``
+ :returns: size and measurement of an LV
+ :type: ``tuple``
+ """
+
+ build_command = [
+ 'vgdisplay',
+ vg_name,
+ '--units',
+ 'g'
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to read vg %s' % vg_name,
+ command=' '.join(build_command)
+ )
+
+ vg_info = [i.strip() for i in stdout.splitlines()][1:]
+ free_pe = [i for i in vg_info if i.startswith('Free')]
+ _free_pe = free_pe[0].split()
+ return float(_free_pe[-2]), _free_pe[-1]
+
+ def _get_lv_size(self, lv_name):
+ """Return the available size of a given LV.
+
+ :param lv_name: Name of volume.
+ :type lv_name: ``str``
+ :returns: size and measurement of an LV
+ :type: ``tuple``
+ """
+
+ vg = self._get_lxc_vg()
+ lv = os.path.join(vg, lv_name)
+ build_command = [
+ 'lvdisplay',
+ lv,
+ '--units',
+ 'g'
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to read lv %s' % lv,
+ command=' '.join(build_command)
+ )
+
+ lv_info = [i.strip() for i in stdout.splitlines()][1:]
+ _free_pe = [i for i in lv_info if i.startswith('LV Size')]
+ free_pe = _free_pe[0].split()
+ return self._roundup(float(free_pe[-2])), free_pe[-1]
+
+ def _lvm_snapshot_create(self, source_lv, snapshot_name,
+ snapshot_size_gb=5):
+ """Create an LVM snapshot.
+
+ :param source_lv: Name of lv to snapshot
+ :type source_lv: ``str``
+ :param snapshot_name: Name of lv snapshot
+ :type snapshot_name: ``str``
+ :param snapshot_size_gb: Size of snapshot to create
+ :type snapshot_size_gb: ``int``
+ """
+
+ vg = self._get_lxc_vg()
+ free_space, messurement = self._get_vg_free_pe(vg_name=vg)
+
+ if free_space < float(snapshot_size_gb):
+ message = (
+ 'Snapshot size [ %s ] is > greater than [ %s ] on volume group'
+ ' [ %s ]' % (snapshot_size_gb, free_space, vg)
+ )
+ self.failure(
+ error='Not enough space to create snapshot',
+ rc=2,
+ msg=message
+ )
+
+ # Create LVM Snapshot
+ build_command = [
+ self.module.get_bin_path('lvcreate', True),
+ "-n",
+ snapshot_name,
+ "-s",
+ os.path.join(vg, source_lv),
+ "-L%sg" % snapshot_size_gb
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='Failed to Create LVM snapshot %s/%s --> %s'
+ % (vg, source_lv, snapshot_name)
+ )
+
+ def _lvm_lv_mount(self, lv_name, mount_point):
+ """mount an lv.
+
+ :param lv_name: name of the logical volume to mount
+ :type lv_name: ``str``
+ :param mount_point: path on the file system that is mounted.
+ :type mount_point: ``str``
+ """
+
+ vg = self._get_lxc_vg()
+
+ build_command = [
+ self.module.get_bin_path('mount', True),
+ "/dev/%s/%s" % (vg, lv_name),
+ mount_point,
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to mountlvm lv %s/%s to %s'
+ % (vg, lv_name, mount_point)
+ )
+
+ def _create_tar(self, source_dir):
+ """Create an archive of a given ``source_dir`` to ``output_path``.
+
+ :param source_dir: Path to the directory to be archived.
+ :type source_dir: ``str``
+ """
+
+ old_umask = os.umask(int('0077', 8))
+
+ archive_path = self.module.params.get('archive_path')
+ if not os.path.isdir(archive_path):
+ os.makedirs(archive_path)
+
+ archive_compression = self.module.params.get('archive_compression')
+ compression_type = LXC_COMPRESSION_MAP[archive_compression]
+
+ # remove trailing / if present.
+ archive_name = '%s.%s' % (
+ os.path.join(
+ archive_path,
+ self.container_name
+ ),
+ compression_type['extension']
+ )
+
+ build_command = [
+ self.module.get_bin_path('tar', True),
+ '--directory=%s' % os.path.realpath(
+ os.path.expanduser(source_dir)
+ ),
+ compression_type['argument'],
+ archive_name,
+ '.'
+ ]
+
+ rc, stdout, err = self._run_command(
+ build_command=build_command,
+ unsafe_shell=True
+ )
+
+ os.umask(old_umask)
+
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to create tar archive',
+ command=' '.join(build_command)
+ )
+
+ return archive_name
+
+ def _lvm_lv_remove(self, lv_name):
+ """Remove an LV.
+
+ :param lv_name: The name of the logical volume
+ :type lv_name: ``str``
+ """
+
+ vg = self._get_lxc_vg()
+ build_command = [
+ self.module.get_bin_path('lvremove', True),
+ "-f",
+ "%s/%s" % (vg, lv_name),
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='Failed to remove LVM LV %s/%s' % (vg, lv_name),
+ command=' '.join(build_command)
+ )
+
+ def _rsync_data(self, container_path, temp_dir):
+ """Sync the container directory to the temp directory.
+
+ :param container_path: path to the container container
+ :type container_path: ``str``
+ :param temp_dir: path to the temporary local working directory
+ :type temp_dir: ``str``
+ """
+ # This loop is created to support overlayfs archives. This should
+ # squash all of the layers into a single archive.
+ fs_paths = container_path.split(':')
+ if 'overlayfs' in fs_paths:
+ fs_paths.pop(fs_paths.index('overlayfs'))
+
+ for fs_path in fs_paths:
+ # Set the path to the container data
+ fs_path = os.path.dirname(fs_path)
+
+ # Run the sync command
+ build_command = [
+ self.module.get_bin_path('rsync', True),
+ '-aHAX',
+ fs_path,
+ temp_dir
+ ]
+ rc, stdout, err = self._run_command(
+ build_command,
+ unsafe_shell=True
+ )
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to perform archive',
+ command=' '.join(build_command)
+ )
+
+ def _unmount(self, mount_point):
+ """Unmount a file system.
+
+ :param mount_point: path on the file system that is mounted.
+ :type mount_point: ``str``
+ """
+
+ build_command = [
+ self.module.get_bin_path('umount', True),
+ mount_point,
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to unmount [ %s ]' % mount_point,
+ command=' '.join(build_command)
+ )
+
+ def _overlayfs_mount(self, lowerdir, upperdir, mount_point):
+ """mount an lv.
+
+ :param lowerdir: name/path of the lower directory
+ :type lowerdir: ``str``
+ :param upperdir: name/path of the upper directory
+ :type upperdir: ``str``
+ :param mount_point: path on the file system that is mounted.
+ :type mount_point: ``str``
+ """
+
+ build_command = [
+ self.module.get_bin_path('mount', True),
+ '-t overlayfs',
+ '-o lowerdir=%s,upperdir=%s' % (lowerdir, upperdir),
+ 'overlayfs',
+ mount_point,
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to mount overlayfs:%s:%s to %s -- Command: %s'
+ % (lowerdir, upperdir, mount_point, build_command)
+ )
+
+ def _container_create_tar(self):
+ """Create a tar archive from an LXC container.
+
+ The process is as follows:
+ * Stop or Freeze the container
+ * Create temporary dir
+ * Copy container and config to temporary directory
+ * If LVM backed:
+ * Create LVM snapshot of LV backing the container
+ * Mount the snapshot to tmpdir/rootfs
+ * Restore the state of the container
+ * Create tar of tmpdir
+ * Clean up
+ """
+
+ # Create a temp dir
+ temp_dir = tempfile.mkdtemp()
+
+ # Set the name of the working dir, temp + container_name
+ work_dir = os.path.join(temp_dir, self.container_name)
+
+ # LXC container rootfs
+ lxc_rootfs = self.container.get_config_item('lxc.rootfs')
+
+ # Test if the containers rootfs is a block device
+ block_backed = lxc_rootfs.startswith(os.path.join(os.sep, 'dev'))
+
+ # Test if the container is using overlayfs
+ overlayfs_backed = lxc_rootfs.startswith('overlayfs')
+
+ mount_point = os.path.join(work_dir, 'rootfs')
+
+ # Set the snapshot name if needed
+ snapshot_name = '%s_lxc_snapshot' % self.container_name
+
+ container_state = self._get_state()
+ try:
+ # Ensure the original container is stopped or frozen
+ if container_state not in ['stopped', 'frozen']:
+ if container_state == 'running':
+ self.container.freeze()
+ else:
+ self.container.stop()
+
+ # Sync the container data from the container_path to work_dir
+ self._rsync_data(lxc_rootfs, temp_dir)
+
+ if block_backed:
+ if snapshot_name not in self._lvm_lv_list():
+ if not os.path.exists(mount_point):
+ os.makedirs(mount_point)
+
+ # Take snapshot
+ size, measurement = self._get_lv_size(
+ lv_name=self.container_name
+ )
+ self._lvm_snapshot_create(
+ source_lv=self.container_name,
+ snapshot_name=snapshot_name,
+ snapshot_size_gb=size
+ )
+
+ # Mount snapshot
+ self._lvm_lv_mount(
+ lv_name=snapshot_name,
+ mount_point=mount_point
+ )
+ else:
+ self.failure(
+ err='snapshot [ %s ] already exists' % snapshot_name,
+ rc=1,
+ msg='The snapshot [ %s ] already exists. Please clean'
+ ' up old snapshot of containers before continuing.'
+ % snapshot_name
+ )
+ elif overlayfs_backed:
+ lowerdir, upperdir = lxc_rootfs.split(':')[1:]
+ self._overlayfs_mount(
+ lowerdir=lowerdir,
+ upperdir=upperdir,
+ mount_point=mount_point
+ )
+
+ # Set the state as changed and set a new fact
+ self.state_change = True
+ return self._create_tar(source_dir=work_dir)
+ finally:
+ if block_backed or overlayfs_backed:
+ # unmount snapshot
+ self._unmount(mount_point)
+
+ if block_backed:
+ # Remove snapshot
+ self._lvm_lv_remove(snapshot_name)
+
+ # Restore original state of container
+ if container_state == 'running':
+ if self._get_state() == 'frozen':
+ self.container.unfreeze()
+ else:
+ self.container.start()
+
+ # Remove tmpdir
+ shutil.rmtree(temp_dir)
+
+ def check_count(self, count, method):
+ if count > 1:
+ self.failure(
+ error='Failed to %s container' % method,
+ rc=1,
+ msg='The container [ %s ] failed to %s. Check to lxc is'
+ ' available and that the container is in a functional'
+ ' state.' % (self.container_name, method)
+ )
+
+ def failure(self, **kwargs):
+ """Return a Failure when running an Ansible command.
+
+ :param error: ``str`` Error that occurred.
+ :param rc: ``int`` Return code while executing an Ansible command.
+ :param msg: ``str`` Message to report.
+ """
+
+ self.module.fail_json(**kwargs)
+
+ def run(self):
+ """Run the main method."""
+
+ action = getattr(self, LXC_ANSIBLE_STATES[self.state])
+ action()
+
+ outcome = self._container_data()
+ if self.archive_info:
+ outcome.update(self.archive_info)
+
+ if self.clone_info:
+ outcome.update(self.clone_info)
+
+ self.module.exit_json(
+ changed=self.state_change,
+ lxc_container=outcome
+ )
+
+
+def main():
+ """Ansible Main module."""
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(
+ type='str',
+ required=True
+ ),
+ template=dict(
+ type='str',
+ default='ubuntu'
+ ),
+ backing_store=dict(
+ type='str',
+ choices=LXC_BACKING_STORE.keys(),
+ default='dir'
+ ),
+ template_options=dict(
+ type='str'
+ ),
+ config=dict(
+ type='path',
+ ),
+ vg_name=dict(
+ type='str',
+ default='lxc'
+ ),
+ thinpool=dict(
+ type='str'
+ ),
+ fs_type=dict(
+ type='str',
+ default='ext4'
+ ),
+ fs_size=dict(
+ type='str',
+ default='5G'
+ ),
+ directory=dict(
+ type='path'
+ ),
+ zfs_root=dict(
+ type='str'
+ ),
+ lv_name=dict(
+ type='str'
+ ),
+ lxc_path=dict(
+ type='path'
+ ),
+ state=dict(
+ choices=LXC_ANSIBLE_STATES.keys(),
+ default='started'
+ ),
+ container_command=dict(
+ type='str'
+ ),
+ container_config=dict(
+ type='list',
+ elements='str'
+ ),
+ container_log=dict(
+ type='bool',
+ default=False
+ ),
+ container_log_level=dict(
+ choices=[n for i in LXC_LOGGING_LEVELS.values() for n in i],
+ default='INFO'
+ ),
+ clone_name=dict(
+ type='str',
+ required=False
+ ),
+ clone_snapshot=dict(
+ type='bool',
+ default='false'
+ ),
+ archive=dict(
+ type='bool',
+ default=False
+ ),
+ archive_path=dict(
+ type='path',
+ ),
+ archive_compression=dict(
+ choices=LXC_COMPRESSION_MAP.keys(),
+ default='gzip'
+ )
+ ),
+ supports_check_mode=False,
+ required_if=([
+ ('archive', True, ['archive_path'])
+ ]),
+ )
+
+ if not HAS_LXC:
+ module.fail_json(
+ msg='The `lxc` module is not importable. Check the requirements.'
+ )
+
+ lv_name = module.params.get('lv_name')
+ if not lv_name:
+ module.params['lv_name'] = module.params.get('name')
+
+ lxc_manage = LxcContainerManagement(module=module)
+ lxc_manage.run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/lxca_cmms.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/lxca_cmms.py
new file mode 100644
index 00000000..7bd7b9ff
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/lxca_cmms.py
@@ -0,0 +1,170 @@
+#!/usr/bin/python
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+author:
+ - Naval Patel (@navalkp)
+ - Prashant Bhosale (@prabhosa)
+module: lxca_cmms
+short_description: Custom module for lxca cmms inventory utility
+description:
+ - This module returns/displays a inventory details of cmms
+
+options:
+ uuid:
+ description:
+ uuid of device, this is string with length greater than 16.
+
+ command_options:
+ description:
+ options to filter nodes information
+ default: cmms
+ choices:
+ - cmms
+ - cmms_by_uuid
+ - cmms_by_chassis_uuid
+
+ chassis:
+ description:
+ uuid of chassis, this is string with length greater than 16.
+
+extends_documentation_fragment:
+- community.general.lxca_common
+
+'''
+
+EXAMPLES = '''
+# get all cmms info
+- name: Get nodes data from LXCA
+ community.general.lxca_cmms:
+ login_user: USERID
+ login_password: Password
+ auth_url: "https://10.243.15.168"
+
+# get specific cmms info by uuid
+- name: Get nodes data from LXCA
+ community.general.lxca_cmms:
+ login_user: USERID
+ login_password: Password
+ auth_url: "https://10.243.15.168"
+ uuid: "3C737AA5E31640CE949B10C129A8B01F"
+ command_options: cmms_by_uuid
+
+# get specific cmms info by chassis uuid
+- name: Get nodes data from LXCA
+ community.general.lxca_cmms:
+ login_user: USERID
+ login_password: Password
+ auth_url: "https://10.243.15.168"
+ chassis: "3C737AA5E31640CE949B10C129A8B01F"
+ command_options: cmms_by_chassis_uuid
+
+'''
+
+RETURN = r'''
+result:
+ description: cmms detail from lxca
+ returned: success
+ type: dict
+ sample:
+ cmmList:
+ - machineType: ''
+ model: ''
+ type: 'CMM'
+ uuid: '118D2C88C8FD11E4947B6EAE8B4BDCDF'
+ # bunch of properties
+ - machineType: ''
+ model: ''
+ type: 'CMM'
+ uuid: '223D2C88C8FD11E4947B6EAE8B4BDCDF'
+ # bunch of properties
+ # Multiple cmms details
+'''
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common import LXCA_COMMON_ARGS, has_pylxca, connection_object
+try:
+ from pylxca import cmms
+except ImportError:
+ pass
+
+
+UUID_REQUIRED = 'UUID of device is required for cmms_by_uuid command.'
+CHASSIS_UUID_REQUIRED = 'UUID of chassis is required for cmms_by_chassis_uuid command.'
+SUCCESS_MSG = "Success %s result"
+
+
+def _cmms(module, lxca_con):
+ return cmms(lxca_con)
+
+
+def _cmms_by_uuid(module, lxca_con):
+ if not module.params['uuid']:
+ module.fail_json(msg=UUID_REQUIRED)
+ return cmms(lxca_con, module.params['uuid'])
+
+
+def _cmms_by_chassis_uuid(module, lxca_con):
+ if not module.params['chassis']:
+ module.fail_json(msg=CHASSIS_UUID_REQUIRED)
+ return cmms(lxca_con, chassis=module.params['chassis'])
+
+
+def setup_module_object():
+ """
+ this function merge argument spec and create ansible module object
+ :return:
+ """
+ args_spec = dict(LXCA_COMMON_ARGS)
+ args_spec.update(INPUT_ARG_SPEC)
+ module = AnsibleModule(argument_spec=args_spec, supports_check_mode=False)
+
+ return module
+
+
+FUNC_DICT = {
+ 'cmms': _cmms,
+ 'cmms_by_uuid': _cmms_by_uuid,
+ 'cmms_by_chassis_uuid': _cmms_by_chassis_uuid,
+}
+
+
+INPUT_ARG_SPEC = dict(
+ command_options=dict(default='cmms', choices=['cmms', 'cmms_by_uuid',
+ 'cmms_by_chassis_uuid']),
+ uuid=dict(default=None),
+ chassis=dict(default=None)
+)
+
+
+def execute_module(module):
+ """
+ This function invoke commands
+ :param module: Ansible module object
+ """
+ try:
+ with connection_object(module) as lxca_con:
+ result = FUNC_DICT[module.params['command_options']](module, lxca_con)
+ module.exit_json(changed=False,
+ msg=SUCCESS_MSG % module.params['command_options'],
+ result=result)
+ except Exception as exception:
+ error_msg = '; '.join((e) for e in exception.args)
+ module.fail_json(msg=error_msg, exception=traceback.format_exc())
+
+
+def main():
+ module = setup_module_object()
+ has_pylxca(module)
+ execute_module(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/lxca_nodes.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/lxca_nodes.py
new file mode 100644
index 00000000..febe2fd5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/lxca_nodes.py
@@ -0,0 +1,200 @@
+#!/usr/bin/python
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+author:
+ - Naval Patel (@navalkp)
+ - Prashant Bhosale (@prabhosa)
+module: lxca_nodes
+short_description: Custom module for lxca nodes inventory utility
+description:
+ - This module returns/displays a inventory details of nodes
+
+options:
+ uuid:
+ description:
+ uuid of device, this is string with length greater than 16.
+
+ command_options:
+ description:
+ options to filter nodes information
+ default: nodes
+ choices:
+ - nodes
+ - nodes_by_uuid
+ - nodes_by_chassis_uuid
+ - nodes_status_managed
+ - nodes_status_unmanaged
+
+ chassis:
+ description:
+ uuid of chassis, this is string with length greater than 16.
+
+extends_documentation_fragment:
+- community.general.lxca_common
+
+'''
+
+EXAMPLES = '''
+# get all nodes info
+- name: Get nodes data from LXCA
+ community.general.lxca_nodes:
+ login_user: USERID
+ login_password: Password
+ auth_url: "https://10.243.15.168"
+ command_options: nodes
+
+# get specific nodes info by uuid
+- name: Get nodes data from LXCA
+ community.general.lxca_nodes:
+ login_user: USERID
+ login_password: Password
+ auth_url: "https://10.243.15.168"
+ uuid: "3C737AA5E31640CE949B10C129A8B01F"
+ command_options: nodes_by_uuid
+
+# get specific nodes info by chassis uuid
+- name: Get nodes data from LXCA
+ community.general.lxca_nodes:
+ login_user: USERID
+ login_password: Password
+ auth_url: "https://10.243.15.168"
+ chassis: "3C737AA5E31640CE949B10C129A8B01F"
+ command_options: nodes_by_chassis_uuid
+
+# get managed nodes
+- name: Get nodes data from LXCA
+ community.general.lxca_nodes:
+ login_user: USERID
+ login_password: Password
+ auth_url: "https://10.243.15.168"
+ command_options: nodes_status_managed
+
+# get unmanaged nodes
+- name: Get nodes data from LXCA
+ community.general.lxca_nodes:
+ login_user: USERID
+ login_password: Password
+ auth_url: "https://10.243.15.168"
+ command_options: nodes_status_unmanaged
+
+'''
+
+RETURN = r'''
+result:
+ description: nodes detail from lxca
+ returned: always
+ type: dict
+ sample:
+ nodeList:
+ - machineType: '6241'
+ model: 'AC1'
+ type: 'Rack-TowerServer'
+ uuid: '118D2C88C8FD11E4947B6EAE8B4BDCDF'
+ # bunch of properties
+ - machineType: '8871'
+ model: 'AC1'
+ type: 'Rack-TowerServer'
+ uuid: '223D2C88C8FD11E4947B6EAE8B4BDCDF'
+ # bunch of properties
+ # Multiple nodes details
+'''
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common import LXCA_COMMON_ARGS, has_pylxca, connection_object
+try:
+ from pylxca import nodes
+except ImportError:
+ pass
+
+
+UUID_REQUIRED = 'UUID of device is required for nodes_by_uuid command.'
+CHASSIS_UUID_REQUIRED = 'UUID of chassis is required for nodes_by_chassis_uuid command.'
+SUCCESS_MSG = "Success %s result"
+
+
+def _nodes(module, lxca_con):
+ return nodes(lxca_con)
+
+
+def _nodes_by_uuid(module, lxca_con):
+ if not module.params['uuid']:
+ module.fail_json(msg=UUID_REQUIRED)
+ return nodes(lxca_con, module.params['uuid'])
+
+
+def _nodes_by_chassis_uuid(module, lxca_con):
+ if not module.params['chassis']:
+ module.fail_json(msg=CHASSIS_UUID_REQUIRED)
+ return nodes(lxca_con, chassis=module.params['chassis'])
+
+
+def _nodes_status_managed(module, lxca_con):
+ return nodes(lxca_con, status='managed')
+
+
+def _nodes_status_unmanaged(module, lxca_con):
+ return nodes(lxca_con, status='unmanaged')
+
+
+def setup_module_object():
+ """
+ this function merge argument spec and create ansible module object
+ :return:
+ """
+ args_spec = dict(LXCA_COMMON_ARGS)
+ args_spec.update(INPUT_ARG_SPEC)
+ module = AnsibleModule(argument_spec=args_spec, supports_check_mode=False)
+
+ return module
+
+
+FUNC_DICT = {
+ 'nodes': _nodes,
+ 'nodes_by_uuid': _nodes_by_uuid,
+ 'nodes_by_chassis_uuid': _nodes_by_chassis_uuid,
+ 'nodes_status_managed': _nodes_status_managed,
+ 'nodes_status_unmanaged': _nodes_status_unmanaged,
+}
+
+
+INPUT_ARG_SPEC = dict(
+ command_options=dict(default='nodes', choices=['nodes', 'nodes_by_uuid',
+ 'nodes_by_chassis_uuid',
+ 'nodes_status_managed',
+ 'nodes_status_unmanaged']),
+ uuid=dict(default=None), chassis=dict(default=None)
+)
+
+
+def execute_module(module):
+ """
+ This function invoke commands
+ :param module: Ansible module object
+ """
+ try:
+ with connection_object(module) as lxca_con:
+ result = FUNC_DICT[module.params['command_options']](module, lxca_con)
+ module.exit_json(changed=False,
+ msg=SUCCESS_MSG % module.params['command_options'],
+ result=result)
+ except Exception as exception:
+ error_msg = '; '.join(exception.args)
+ module.fail_json(msg=error_msg, exception=traceback.format_exc())
+
+
+def main():
+ module = setup_module_object()
+ has_pylxca(module)
+ execute_module(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/lxd_container.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/lxd_container.py
new file mode 100644
index 00000000..119387f9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/lxd_container.py
@@ -0,0 +1,710 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Hiroaki Nakamura <hnakamur@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: lxd_container
+short_description: Manage LXD Containers
+description:
+ - Management of LXD containers
+author: "Hiroaki Nakamura (@hnakamur)"
+options:
+ name:
+ description:
+ - Name of a container.
+ type: str
+ required: true
+ architecture:
+ description:
+ - The architecture for the container (e.g. "x86_64" or "i686").
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)
+ type: str
+ required: false
+ config:
+ description:
+ - 'The config for the container (e.g. {"limits.cpu": "2"}).
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)'
+ - If the container already exists and its "config" value in metadata
+ obtained from
+ GET /1.0/containers/<name>
+ U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#10containersname)
+ are different, they this module tries to apply the configurations.
+ - The key starts with 'volatile.' are ignored for this comparison.
+ - Not all config values are supported to apply the existing container.
+ Maybe you need to delete and recreate a container.
+ type: dict
+ required: false
+ profiles:
+ description:
+ - Profile to be used by the container
+ type: list
+ devices:
+ description:
+ - 'The devices for the container
+ (e.g. { "rootfs": { "path": "/dev/kvm", "type": "unix-char" }).
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)'
+ type: dict
+ required: false
+ ephemeral:
+ description:
+ - Whether or not the container is ephemeral (e.g. true or false).
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)
+ required: false
+ type: bool
+ source:
+ description:
+ - 'The source for the container
+ (e.g. { "type": "image",
+ "mode": "pull",
+ "server": "https://images.linuxcontainers.org",
+ "protocol": "lxd",
+ "alias": "ubuntu/xenial/amd64" }).'
+ - 'See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1) for complete API documentation.'
+ - 'Note that C(protocol) accepts two choices: C(lxd) or C(simplestreams)'
+ required: false
+ type: dict
+ state:
+ choices:
+ - started
+ - stopped
+ - restarted
+ - absent
+ - frozen
+ description:
+ - Define the state of a container.
+ required: false
+ default: started
+ type: str
+ target:
+ description:
+ - For cluster deployments. Will attempt to create a container on a target node.
+ If container exists elsewhere in a cluster, then container will not be replaced or moved.
+ The name should respond to same name of the node you see in C(lxc cluster list).
+ type: str
+ required: false
+ version_added: 1.0.0
+ timeout:
+ description:
+ - A timeout for changing the state of the container.
+ - This is also used as a timeout for waiting until IPv4 addresses
+ are set to the all network interfaces in the container after
+ starting or restarting.
+ required: false
+ default: 30
+ type: int
+ wait_for_ipv4_addresses:
+ description:
+ - If this is true, the C(lxd_container) waits until IPv4 addresses
+ are set to the all network interfaces in the container after
+ starting or restarting.
+ required: false
+ default: false
+ type: bool
+ force_stop:
+ description:
+ - If this is true, the C(lxd_container) forces to stop the container
+ when it stops or restarts the container.
+ required: false
+ default: false
+ type: bool
+ url:
+ description:
+ - The unix domain socket path or the https URL for the LXD server.
+ required: false
+ default: unix:/var/lib/lxd/unix.socket
+ type: str
+ snap_url:
+ description:
+ - The unix domain socket path when LXD is installed by snap package manager.
+ required: false
+ default: unix:/var/snap/lxd/common/lxd/unix.socket
+ type: str
+ client_key:
+ description:
+ - The client certificate key file path.
+ - If not specified, it defaults to C(${HOME}/.config/lxc/client.key).
+ required: false
+ aliases: [ key_file ]
+ type: str
+ client_cert:
+ description:
+ - The client certificate file path.
+ - If not specified, it defaults to C(${HOME}/.config/lxc/client.crt).
+ required: false
+ aliases: [ cert_file ]
+ type: str
+ trust_password:
+ description:
+ - The client trusted password.
+ - You need to set this password on the LXD server before
+ running this module using the following command.
+ lxc config set core.trust_password <some random password>
+ See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/)
+ - If trust_password is set, this module send a request for
+ authentication before sending any requests.
+ required: false
+ type: str
+notes:
+ - Containers must have a unique name. If you attempt to create a container
+ with a name that already existed in the users namespace the module will
+ simply return as "unchanged".
+ - There are two ways to run commands in containers, using the command
+ module or using the ansible lxd connection plugin bundled in Ansible >=
+ 2.1, the later requires python to be installed in the container which can
+ be done with the command module.
+ - You can copy a file from the host to the container
+ with the Ansible M(ansible.builtin.copy) and M(ansible.builtin.template) module and the `lxd` connection plugin.
+ See the example below.
+ - You can copy a file in the created container to the localhost
+ with `command=lxc file pull container_name/dir/filename filename`.
+ See the first example below.
+'''
+
+EXAMPLES = '''
+# An example for creating a Ubuntu container and install python
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Create a started container
+ community.general.lxd_container:
+ name: mycontainer
+ state: started
+ source:
+ type: image
+ mode: pull
+ server: https://images.linuxcontainers.org
+ protocol: lxd # if you get a 404, try setting protocol: simplestreams
+ alias: ubuntu/xenial/amd64
+ profiles: ["default"]
+ wait_for_ipv4_addresses: true
+ timeout: 600
+
+ - name: Check python is installed in container
+ delegate_to: mycontainer
+ ansible.builtin.raw: dpkg -s python
+ register: python_install_check
+ failed_when: python_install_check.rc not in [0, 1]
+ changed_when: false
+
+ - name: Install python in container
+ delegate_to: mycontainer
+ ansible.builtin.raw: apt-get install -y python
+ when: python_install_check.rc == 1
+
+# An example for creating an Ubuntu 14.04 container using an image fingerprint.
+# This requires changing 'server' and 'protocol' key values, replacing the
+# 'alias' key with with 'fingerprint' and supplying an appropriate value that
+# matches the container image you wish to use.
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Create a started container
+ community.general.lxd_container:
+ name: mycontainer
+ state: started
+ source:
+ type: image
+ mode: pull
+ # Provides current (and older) Ubuntu images with listed fingerprints
+ server: https://cloud-images.ubuntu.com/releases
+ # Protocol used by 'ubuntu' remote (as shown by 'lxc remote list')
+ protocol: simplestreams
+ # This provides an Ubuntu 14.04 LTS amd64 image from 20150814.
+ fingerprint: e9a8bdfab6dc
+ profiles: ["default"]
+ wait_for_ipv4_addresses: true
+ timeout: 600
+
+# An example for deleting a container
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Delete a container
+ community.general.lxd_container:
+ name: mycontainer
+ state: absent
+
+# An example for restarting a container
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Restart a container
+ community.general.lxd_container:
+ name: mycontainer
+ state: restarted
+
+# An example for restarting a container using https to connect to the LXD server
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Restart a container
+ community.general.lxd_container:
+ url: https://127.0.0.1:8443
+ # These client_cert and client_key values are equal to the default values.
+ #client_cert: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt"
+ #client_key: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key"
+ trust_password: mypassword
+ name: mycontainer
+ state: restarted
+
+# Note your container must be in the inventory for the below example.
+#
+# [containers]
+# mycontainer ansible_connection=lxd
+#
+- hosts:
+ - mycontainer
+ tasks:
+ - name: Copy /etc/hosts in the created container to localhost with name "mycontainer-hosts"
+ ansible.builtin.fetch:
+ src: /etc/hosts
+ dest: /tmp/mycontainer-hosts
+ flat: true
+
+# An example for LXD cluster deployments. This example will create two new container on specific
+# nodes - 'node01' and 'node02'. In 'target:', 'node01' and 'node02' are names of LXD cluster
+# members that LXD cluster recognizes, not ansible inventory names, see: 'lxc cluster list'.
+# LXD API calls can be made to any LXD member, in this example, we send API requests to
+#'node01.example.com', which matches ansible inventory name.
+- hosts: node01.example.com
+ tasks:
+ - name: Create LXD container
+ community.general.lxd_container:
+ name: new-container-1
+ state: started
+ source:
+ type: image
+ mode: pull
+ alias: ubuntu/xenial/amd64
+ target: node01
+
+ - name: Create container on another node
+ community.general.lxd_container:
+ name: new-container-2
+ state: started
+ source:
+ type: image
+ mode: pull
+ alias: ubuntu/xenial/amd64
+ target: node02
+'''
+
+RETURN = '''
+addresses:
+ description: Mapping from the network device name to a list of IPv4 addresses in the container
+ returned: when state is started or restarted
+ type: dict
+ sample: {"eth0": ["10.155.92.191"]}
+old_state:
+ description: The old state of the container
+ returned: when state is started or restarted
+ type: str
+ sample: "stopped"
+logs:
+ description: The logs of requests and responses.
+ returned: when ansible-playbook is invoked with -vvvv.
+ type: list
+ sample: "(too long to be placed here)"
+actions:
+ description: List of actions performed for the container.
+ returned: success
+ type: list
+ sample: '["create", "start"]'
+'''
+import datetime
+import os
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+
+# LXD_ANSIBLE_STATES is a map of states that contain values of methods used
+# when a particular state is evoked.
+LXD_ANSIBLE_STATES = {
+ 'started': '_started',
+ 'stopped': '_stopped',
+ 'restarted': '_restarted',
+ 'absent': '_destroyed',
+ 'frozen': '_frozen'
+}
+
+# ANSIBLE_LXD_STATES is a map of states of lxd containers to the Ansible
+# lxc_container module state parameter value.
+ANSIBLE_LXD_STATES = {
+ 'Running': 'started',
+ 'Stopped': 'stopped',
+ 'Frozen': 'frozen',
+}
+
+# ANSIBLE_LXD_DEFAULT_URL is a default value of the lxd endpoint
+ANSIBLE_LXD_DEFAULT_URL = 'unix:/var/lib/lxd/unix.socket'
+
+# CONFIG_PARAMS is a list of config attribute names.
+CONFIG_PARAMS = [
+ 'architecture', 'config', 'devices', 'ephemeral', 'profiles', 'source'
+]
+
+
+class LXDContainerManagement(object):
+ def __init__(self, module):
+ """Management of LXC containers via Ansible.
+
+ :param module: Processed Ansible Module.
+ :type module: ``object``
+ """
+ self.module = module
+ self.name = self.module.params['name']
+ self._build_config()
+
+ self.state = self.module.params['state']
+
+ self.timeout = self.module.params['timeout']
+ self.wait_for_ipv4_addresses = self.module.params['wait_for_ipv4_addresses']
+ self.force_stop = self.module.params['force_stop']
+ self.addresses = None
+ self.target = self.module.params['target']
+
+ self.key_file = self.module.params.get('client_key')
+ if self.key_file is None:
+ self.key_file = '{0}/.config/lxc/client.key'.format(os.environ['HOME'])
+ self.cert_file = self.module.params.get('client_cert')
+ if self.cert_file is None:
+ self.cert_file = '{0}/.config/lxc/client.crt'.format(os.environ['HOME'])
+ self.debug = self.module._verbosity >= 4
+
+ try:
+ if self.module.params['url'] != ANSIBLE_LXD_DEFAULT_URL:
+ self.url = self.module.params['url']
+ elif os.path.exists(self.module.params['snap_url'].replace('unix:', '')):
+ self.url = self.module.params['snap_url']
+ else:
+ self.url = self.module.params['url']
+ except Exception as e:
+ self.module.fail_json(msg=e.msg)
+
+ try:
+ self.client = LXDClient(
+ self.url, key_file=self.key_file, cert_file=self.cert_file,
+ debug=self.debug
+ )
+ except LXDClientException as e:
+ self.module.fail_json(msg=e.msg)
+ self.trust_password = self.module.params.get('trust_password', None)
+ self.actions = []
+
+ def _build_config(self):
+ self.config = {}
+ for attr in CONFIG_PARAMS:
+ param_val = self.module.params.get(attr, None)
+ if param_val is not None:
+ self.config[attr] = param_val
+
+ def _get_container_json(self):
+ return self.client.do(
+ 'GET', '/1.0/containers/{0}'.format(self.name),
+ ok_error_codes=[404]
+ )
+
+ def _get_container_state_json(self):
+ return self.client.do(
+ 'GET', '/1.0/containers/{0}/state'.format(self.name),
+ ok_error_codes=[404]
+ )
+
+ @staticmethod
+ def _container_json_to_module_state(resp_json):
+ if resp_json['type'] == 'error':
+ return 'absent'
+ return ANSIBLE_LXD_STATES[resp_json['metadata']['status']]
+
+ def _change_state(self, action, force_stop=False):
+ body_json = {'action': action, 'timeout': self.timeout}
+ if force_stop:
+ body_json['force'] = True
+ return self.client.do('PUT', '/1.0/containers/{0}/state'.format(self.name), body_json=body_json)
+
+ def _create_container(self):
+ config = self.config.copy()
+ config['name'] = self.name
+ if self.target:
+ self.client.do('POST', '/1.0/containers?' + urlencode(dict(target=self.target)), config)
+ else:
+ self.client.do('POST', '/1.0/containers', config)
+ self.actions.append('create')
+
+ def _start_container(self):
+ self._change_state('start')
+ self.actions.append('start')
+
+ def _stop_container(self):
+ self._change_state('stop', self.force_stop)
+ self.actions.append('stop')
+
+ def _restart_container(self):
+ self._change_state('restart', self.force_stop)
+ self.actions.append('restart')
+
+ def _delete_container(self):
+ self.client.do('DELETE', '/1.0/containers/{0}'.format(self.name))
+ self.actions.append('delete')
+
+ def _freeze_container(self):
+ self._change_state('freeze')
+ self.actions.append('freeze')
+
+ def _unfreeze_container(self):
+ self._change_state('unfreeze')
+ self.actions.append('unfreez')
+
+ def _container_ipv4_addresses(self, ignore_devices=None):
+ ignore_devices = ['lo'] if ignore_devices is None else ignore_devices
+
+ resp_json = self._get_container_state_json()
+ network = resp_json['metadata']['network'] or {}
+ network = dict((k, v) for k, v in network.items() if k not in ignore_devices) or {}
+ addresses = dict((k, [a['address'] for a in v['addresses'] if a['family'] == 'inet']) for k, v in network.items()) or {}
+ return addresses
+
+ @staticmethod
+ def _has_all_ipv4_addresses(addresses):
+ return len(addresses) > 0 and all(len(v) > 0 for v in addresses.values())
+
+ def _get_addresses(self):
+ try:
+ due = datetime.datetime.now() + datetime.timedelta(seconds=self.timeout)
+ while datetime.datetime.now() < due:
+ time.sleep(1)
+ addresses = self._container_ipv4_addresses()
+ if self._has_all_ipv4_addresses(addresses):
+ self.addresses = addresses
+ return
+ except LXDClientException as e:
+ e.msg = 'timeout for getting IPv4 addresses'
+ raise
+
+ def _started(self):
+ if self.old_state == 'absent':
+ self._create_container()
+ self._start_container()
+ else:
+ if self.old_state == 'frozen':
+ self._unfreeze_container()
+ elif self.old_state == 'stopped':
+ self._start_container()
+ if self._needs_to_apply_container_configs():
+ self._apply_container_configs()
+ if self.wait_for_ipv4_addresses:
+ self._get_addresses()
+
+ def _stopped(self):
+ if self.old_state == 'absent':
+ self._create_container()
+ else:
+ if self.old_state == 'stopped':
+ if self._needs_to_apply_container_configs():
+ self._start_container()
+ self._apply_container_configs()
+ self._stop_container()
+ else:
+ if self.old_state == 'frozen':
+ self._unfreeze_container()
+ if self._needs_to_apply_container_configs():
+ self._apply_container_configs()
+ self._stop_container()
+
+ def _restarted(self):
+ if self.old_state == 'absent':
+ self._create_container()
+ self._start_container()
+ else:
+ if self.old_state == 'frozen':
+ self._unfreeze_container()
+ if self._needs_to_apply_container_configs():
+ self._apply_container_configs()
+ self._restart_container()
+ if self.wait_for_ipv4_addresses:
+ self._get_addresses()
+
+ def _destroyed(self):
+ if self.old_state != 'absent':
+ if self.old_state == 'frozen':
+ self._unfreeze_container()
+ if self.old_state != 'stopped':
+ self._stop_container()
+ self._delete_container()
+
+ def _frozen(self):
+ if self.old_state == 'absent':
+ self._create_container()
+ self._start_container()
+ self._freeze_container()
+ else:
+ if self.old_state == 'stopped':
+ self._start_container()
+ if self._needs_to_apply_container_configs():
+ self._apply_container_configs()
+ self._freeze_container()
+
+ def _needs_to_change_container_config(self, key):
+ if key not in self.config:
+ return False
+ if key == 'config':
+ old_configs = dict((k, v) for k, v in self.old_container_json['metadata'][key].items() if not k.startswith('volatile.'))
+ for k, v in self.config['config'].items():
+ if k not in old_configs:
+ return True
+ if old_configs[k] != v:
+ return True
+ return False
+ else:
+ old_configs = self.old_container_json['metadata'][key]
+ return self.config[key] != old_configs
+
+ def _needs_to_apply_container_configs(self):
+ return (
+ self._needs_to_change_container_config('architecture') or
+ self._needs_to_change_container_config('config') or
+ self._needs_to_change_container_config('ephemeral') or
+ self._needs_to_change_container_config('devices') or
+ self._needs_to_change_container_config('profiles')
+ )
+
+ def _apply_container_configs(self):
+ old_metadata = self.old_container_json['metadata']
+ body_json = {
+ 'architecture': old_metadata['architecture'],
+ 'config': old_metadata['config'],
+ 'devices': old_metadata['devices'],
+ 'profiles': old_metadata['profiles']
+ }
+ if self._needs_to_change_container_config('architecture'):
+ body_json['architecture'] = self.config['architecture']
+ if self._needs_to_change_container_config('config'):
+ for k, v in self.config['config'].items():
+ body_json['config'][k] = v
+ if self._needs_to_change_container_config('ephemeral'):
+ body_json['ephemeral'] = self.config['ephemeral']
+ if self._needs_to_change_container_config('devices'):
+ body_json['devices'] = self.config['devices']
+ if self._needs_to_change_container_config('profiles'):
+ body_json['profiles'] = self.config['profiles']
+ self.client.do('PUT', '/1.0/containers/{0}'.format(self.name), body_json=body_json)
+ self.actions.append('apply_container_configs')
+
+ def run(self):
+ """Run the main method."""
+
+ try:
+ if self.trust_password is not None:
+ self.client.authenticate(self.trust_password)
+
+ self.old_container_json = self._get_container_json()
+ self.old_state = self._container_json_to_module_state(self.old_container_json)
+ action = getattr(self, LXD_ANSIBLE_STATES[self.state])
+ action()
+
+ state_changed = len(self.actions) > 0
+ result_json = {
+ 'log_verbosity': self.module._verbosity,
+ 'changed': state_changed,
+ 'old_state': self.old_state,
+ 'actions': self.actions
+ }
+ if self.client.debug:
+ result_json['logs'] = self.client.logs
+ if self.addresses is not None:
+ result_json['addresses'] = self.addresses
+ self.module.exit_json(**result_json)
+ except LXDClientException as e:
+ state_changed = len(self.actions) > 0
+ fail_params = {
+ 'msg': e.msg,
+ 'changed': state_changed,
+ 'actions': self.actions
+ }
+ if self.client.debug:
+ fail_params['logs'] = e.kwargs['logs']
+ self.module.fail_json(**fail_params)
+
+
+def main():
+ """Ansible Main module."""
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(
+ type='str',
+ required=True
+ ),
+ architecture=dict(
+ type='str',
+ ),
+ config=dict(
+ type='dict',
+ ),
+ devices=dict(
+ type='dict',
+ ),
+ ephemeral=dict(
+ type='bool',
+ ),
+ profiles=dict(
+ type='list',
+ ),
+ source=dict(
+ type='dict',
+ ),
+ state=dict(
+ choices=LXD_ANSIBLE_STATES.keys(),
+ default='started'
+ ),
+ target=dict(
+ type='str',
+ ),
+ timeout=dict(
+ type='int',
+ default=30
+ ),
+ wait_for_ipv4_addresses=dict(
+ type='bool',
+ default=False
+ ),
+ force_stop=dict(
+ type='bool',
+ default=False
+ ),
+ url=dict(
+ type='str',
+ default=ANSIBLE_LXD_DEFAULT_URL
+ ),
+ snap_url=dict(
+ type='str',
+ default='unix:/var/snap/lxd/common/lxd/unix.socket'
+ ),
+ client_key=dict(
+ type='str',
+ aliases=['key_file']
+ ),
+ client_cert=dict(
+ type='str',
+ aliases=['cert_file']
+ ),
+ trust_password=dict(type='str', no_log=True)
+ ),
+ supports_check_mode=False,
+ )
+
+ lxd_manage = LXDContainerManagement(module=module)
+ lxd_manage.run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/lxd_profile.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/lxd_profile.py
new file mode 100644
index 00000000..ccd74d42
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/lxd_profile.py
@@ -0,0 +1,404 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Hiroaki Nakamura <hnakamur@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: lxd_profile
+short_description: Manage LXD profiles
+description:
+ - Management of LXD profiles
+author: "Hiroaki Nakamura (@hnakamur)"
+options:
+ name:
+ description:
+ - Name of a profile.
+ required: true
+ type: str
+ description:
+ description:
+ - Description of the profile.
+ type: str
+ config:
+ description:
+ - 'The config for the container (e.g. {"limits.memory": "4GB"}).
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#patch-3)'
+ - If the profile already exists and its "config" value in metadata
+ obtained from
+ GET /1.0/profiles/<name>
+ U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#get-19)
+ are different, they this module tries to apply the configurations.
+ - Not all config values are supported to apply the existing profile.
+ Maybe you need to delete and recreate a profile.
+ required: false
+ type: dict
+ devices:
+ description:
+ - 'The devices for the profile
+ (e.g. {"rootfs": {"path": "/dev/kvm", "type": "unix-char"}).
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#patch-3)'
+ required: false
+ type: dict
+ new_name:
+ description:
+ - A new name of a profile.
+ - If this parameter is specified a profile will be renamed to this name.
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-11)
+ required: false
+ type: str
+ state:
+ choices:
+ - present
+ - absent
+ description:
+ - Define the state of a profile.
+ required: false
+ default: present
+ type: str
+ url:
+ description:
+ - The unix domain socket path or the https URL for the LXD server.
+ required: false
+ default: unix:/var/lib/lxd/unix.socket
+ type: str
+ snap_url:
+ description:
+ - The unix domain socket path when LXD is installed by snap package manager.
+ required: false
+ default: unix:/var/snap/lxd/common/lxd/unix.socket
+ type: str
+ client_key:
+ description:
+ - The client certificate key file path.
+ - If not specified, it defaults to C($HOME/.config/lxc/client.key).
+ required: false
+ aliases: [ key_file ]
+ type: str
+ client_cert:
+ description:
+ - The client certificate file path.
+ - If not specified, it defaults to C($HOME/.config/lxc/client.crt).
+ required: false
+ aliases: [ cert_file ]
+ type: str
+ trust_password:
+ description:
+ - The client trusted password.
+ - You need to set this password on the LXD server before
+ running this module using the following command.
+ lxc config set core.trust_password <some random password>
+ See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/)
+ - If trust_password is set, this module send a request for
+ authentication before sending any requests.
+ required: false
+ type: str
+notes:
+ - Profiles must have a unique name. If you attempt to create a profile
+ with a name that already existed in the users namespace the module will
+ simply return as "unchanged".
+'''
+
+EXAMPLES = '''
+# An example for creating a profile
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Create a profile
+ community.general.lxd_profile:
+ name: macvlan
+ state: present
+ config: {}
+ description: my macvlan profile
+ devices:
+ eth0:
+ nictype: macvlan
+ parent: br0
+ type: nic
+
+# An example for creating a profile via http connection
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Create macvlan profile
+ community.general.lxd_profile:
+ url: https://127.0.0.1:8443
+ # These client_cert and client_key values are equal to the default values.
+ #client_cert: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt"
+ #client_key: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key"
+ trust_password: mypassword
+ name: macvlan
+ state: present
+ config: {}
+ description: my macvlan profile
+ devices:
+ eth0:
+ nictype: macvlan
+ parent: br0
+ type: nic
+
+# An example for deleting a profile
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Delete a profile
+ community.general.lxd_profile:
+ name: macvlan
+ state: absent
+
+# An example for renaming a profile
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Rename a profile
+ community.general.lxd_profile:
+ name: macvlan
+ new_name: macvlan2
+ state: present
+'''
+
+RETURN = '''
+old_state:
+ description: The old state of the profile
+ returned: success
+ type: str
+ sample: "absent"
+logs:
+ description: The logs of requests and responses.
+ returned: when ansible-playbook is invoked with -vvvv.
+ type: list
+ sample: "(too long to be placed here)"
+actions:
+ description: List of actions performed for the profile.
+ returned: success
+ type: list
+ sample: '["create"]'
+'''
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException
+
+# ANSIBLE_LXD_DEFAULT_URL is a default value of the lxd endpoint
+ANSIBLE_LXD_DEFAULT_URL = 'unix:/var/lib/lxd/unix.socket'
+
+# PROFILE_STATES is a list for states supported
+PROFILES_STATES = [
+ 'present', 'absent'
+]
+
+# CONFIG_PARAMS is a list of config attribute names.
+CONFIG_PARAMS = [
+ 'config', 'description', 'devices'
+]
+
+
+class LXDProfileManagement(object):
+ def __init__(self, module):
+ """Management of LXC containers via Ansible.
+
+ :param module: Processed Ansible Module.
+ :type module: ``object``
+ """
+ self.module = module
+ self.name = self.module.params['name']
+ self._build_config()
+ self.state = self.module.params['state']
+ self.new_name = self.module.params.get('new_name', None)
+
+ self.key_file = self.module.params.get('client_key')
+ if self.key_file is None:
+ self.key_file = '{0}/.config/lxc/client.key'.format(os.environ['HOME'])
+ self.cert_file = self.module.params.get('client_cert')
+ if self.cert_file is None:
+ self.cert_file = '{0}/.config/lxc/client.crt'.format(os.environ['HOME'])
+ self.debug = self.module._verbosity >= 4
+
+ try:
+ if self.module.params['url'] != ANSIBLE_LXD_DEFAULT_URL:
+ self.url = self.module.params['url']
+ elif os.path.exists(self.module.params['snap_url'].replace('unix:', '')):
+ self.url = self.module.params['snap_url']
+ else:
+ self.url = self.module.params['url']
+ except Exception as e:
+ self.module.fail_json(msg=e.msg)
+
+ try:
+ self.client = LXDClient(
+ self.url, key_file=self.key_file, cert_file=self.cert_file,
+ debug=self.debug
+ )
+ except LXDClientException as e:
+ self.module.fail_json(msg=e.msg)
+ self.trust_password = self.module.params.get('trust_password', None)
+ self.actions = []
+
+ def _build_config(self):
+ self.config = {}
+ for attr in CONFIG_PARAMS:
+ param_val = self.module.params.get(attr, None)
+ if param_val is not None:
+ self.config[attr] = param_val
+
+ def _get_profile_json(self):
+ return self.client.do(
+ 'GET', '/1.0/profiles/{0}'.format(self.name),
+ ok_error_codes=[404]
+ )
+
+ @staticmethod
+ def _profile_json_to_module_state(resp_json):
+ if resp_json['type'] == 'error':
+ return 'absent'
+ return 'present'
+
+ def _update_profile(self):
+ if self.state == 'present':
+ if self.old_state == 'absent':
+ if self.new_name is None:
+ self._create_profile()
+ else:
+ self.module.fail_json(
+ msg='new_name must not be set when the profile does not exist and the specified state is present',
+ changed=False)
+ else:
+ if self.new_name is not None and self.new_name != self.name:
+ self._rename_profile()
+ if self._needs_to_apply_profile_configs():
+ self._apply_profile_configs()
+ elif self.state == 'absent':
+ if self.old_state == 'present':
+ if self.new_name is None:
+ self._delete_profile()
+ else:
+ self.module.fail_json(
+ msg='new_name must not be set when the profile exists and the specified state is absent',
+ changed=False)
+
+ def _create_profile(self):
+ config = self.config.copy()
+ config['name'] = self.name
+ self.client.do('POST', '/1.0/profiles', config)
+ self.actions.append('create')
+
+ def _rename_profile(self):
+ config = {'name': self.new_name}
+ self.client.do('POST', '/1.0/profiles/{0}'.format(self.name), config)
+ self.actions.append('rename')
+ self.name = self.new_name
+
+ def _needs_to_change_profile_config(self, key):
+ if key not in self.config:
+ return False
+ old_configs = self.old_profile_json['metadata'].get(key, None)
+ return self.config[key] != old_configs
+
+ def _needs_to_apply_profile_configs(self):
+ return (
+ self._needs_to_change_profile_config('config') or
+ self._needs_to_change_profile_config('description') or
+ self._needs_to_change_profile_config('devices')
+ )
+
+ def _apply_profile_configs(self):
+ config = self.old_profile_json.copy()
+ for k, v in self.config.items():
+ config[k] = v
+ self.client.do('PUT', '/1.0/profiles/{0}'.format(self.name), config)
+ self.actions.append('apply_profile_configs')
+
+ def _delete_profile(self):
+ self.client.do('DELETE', '/1.0/profiles/{0}'.format(self.name))
+ self.actions.append('delete')
+
+ def run(self):
+ """Run the main method."""
+
+ try:
+ if self.trust_password is not None:
+ self.client.authenticate(self.trust_password)
+
+ self.old_profile_json = self._get_profile_json()
+ self.old_state = self._profile_json_to_module_state(self.old_profile_json)
+ self._update_profile()
+
+ state_changed = len(self.actions) > 0
+ result_json = {
+ 'changed': state_changed,
+ 'old_state': self.old_state,
+ 'actions': self.actions
+ }
+ if self.client.debug:
+ result_json['logs'] = self.client.logs
+ self.module.exit_json(**result_json)
+ except LXDClientException as e:
+ state_changed = len(self.actions) > 0
+ fail_params = {
+ 'msg': e.msg,
+ 'changed': state_changed,
+ 'actions': self.actions
+ }
+ if self.client.debug:
+ fail_params['logs'] = e.kwargs['logs']
+ self.module.fail_json(**fail_params)
+
+
+def main():
+ """Ansible Main module."""
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(
+ type='str',
+ required=True
+ ),
+ new_name=dict(
+ type='str',
+ ),
+ config=dict(
+ type='dict',
+ ),
+ description=dict(
+ type='str',
+ ),
+ devices=dict(
+ type='dict',
+ ),
+ state=dict(
+ choices=PROFILES_STATES,
+ default='present'
+ ),
+ url=dict(
+ type='str',
+ default=ANSIBLE_LXD_DEFAULT_URL
+ ),
+ snap_url=dict(
+ type='str',
+ default='unix:/var/snap/lxd/common/lxd/unix.socket'
+ ),
+ client_key=dict(
+ type='str',
+ aliases=['key_file']
+ ),
+ client_cert=dict(
+ type='str',
+ aliases=['cert_file']
+ ),
+ trust_password=dict(type='str', no_log=True)
+ ),
+ supports_check_mode=False,
+ )
+
+ lxd_manage = LXDProfileManagement(module=module)
+ lxd_manage.run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/macports.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/macports.py
new file mode 100644
index 00000000..a865a8f3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/macports.py
@@ -0,0 +1,307 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Jimmy Tang <jcftang@gmail.com>
+# Based on okpg (Patrick Pelletier <pp.pelletier@gmail.com>), pacman
+# (Afterburn) and pkgin (Shaun Zinck) modules
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: macports
+author: "Jimmy Tang (@jcftang)"
+short_description: Package manager for MacPorts
+description:
+ - Manages MacPorts packages (ports)
+options:
+ name:
+ description:
+ - A list of port names.
+ aliases: ['port']
+ type: list
+ elements: str
+ selfupdate:
+ description:
+ - Update Macports and the ports tree, either prior to installing ports or as a separate step.
+ - Equivalent to running C(port selfupdate).
+ aliases: ['update_cache', 'update_ports']
+ default: "no"
+ type: bool
+ state:
+ description:
+ - Indicates the desired state of the port.
+ choices: [ 'present', 'absent', 'active', 'inactive', 'installed', 'removed']
+ default: present
+ type: str
+ upgrade:
+ description:
+ - Upgrade all outdated ports, either prior to installing ports or as a separate step.
+ - Equivalent to running C(port upgrade outdated).
+ default: "no"
+ type: bool
+ variant:
+ description:
+ - A port variant specification.
+ - 'C(variant) is only supported with state: I(installed)/I(present).'
+ aliases: ['variants']
+ type: str
+'''
+EXAMPLES = '''
+- name: Install the foo port
+ community.general.macports:
+ name: foo
+
+- name: Install the universal, x11 variant of the foo port
+ community.general.macports:
+ name: foo
+ variant: +universal+x11
+
+- name: Install a list of ports
+ community.general.macports:
+ name: "{{ ports }}"
+ vars:
+ ports:
+ - foo
+ - foo-tools
+
+- name: Update Macports and the ports tree, then upgrade all outdated ports
+ community.general.macports:
+ selfupdate: yes
+ upgrade: yes
+
+- name: Update Macports and the ports tree, then install the foo port
+ community.general.macports:
+ name: foo
+ selfupdate: yes
+
+- name: Remove the foo port
+ community.general.macports:
+ name: foo
+ state: absent
+
+- name: Activate the foo port
+ community.general.macports:
+ name: foo
+ state: active
+
+- name: Deactivate the foo port
+ community.general.macports:
+ name: foo
+ state: inactive
+'''
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import shlex_quote
+
+
+def selfupdate(module, port_path):
+ """ Update Macports and the ports tree. """
+
+ rc, out, err = module.run_command("%s -v selfupdate" % port_path)
+
+ if rc == 0:
+ updated = any(
+ re.search(r'Total number of ports parsed:\s+[^0]', s.strip()) or
+ re.search(r'Installing new Macports release', s.strip())
+ for s in out.split('\n')
+ if s
+ )
+ if updated:
+ changed = True
+ msg = "Macports updated successfully"
+ else:
+ changed = False
+ msg = "Macports already up-to-date"
+
+ return (changed, msg)
+ else:
+ module.fail_json(msg="Failed to update Macports", stdout=out, stderr=err)
+
+
+def upgrade(module, port_path):
+ """ Upgrade outdated ports. """
+
+ rc, out, err = module.run_command("%s upgrade outdated" % port_path)
+
+ # rc is 1 when nothing to upgrade so check stdout first.
+ if out.strip() == "Nothing to upgrade.":
+ changed = False
+ msg = "Ports already upgraded"
+ return (changed, msg)
+ elif rc == 0:
+ changed = True
+ msg = "Outdated ports upgraded successfully"
+ return (changed, msg)
+ else:
+ module.fail_json(msg="Failed to upgrade outdated ports", stdout=out, stderr=err)
+
+
+def query_port(module, port_path, name, state="present"):
+ """ Returns whether a port is installed or not. """
+
+ if state == "present":
+
+ rc, out, err = module.run_command([port_path, "-q", "installed", name])
+
+ if rc == 0 and out.strip().startswith(name + " "):
+ return True
+
+ return False
+
+ elif state == "active":
+
+ rc, out, err = module.run_command([port_path, "-q", "installed", name])
+
+ if rc == 0 and "(active)" in out:
+ return True
+
+ return False
+
+
+def remove_ports(module, port_path, ports):
+ """ Uninstalls one or more ports if installed. """
+
+ remove_c = 0
+ # Using a for loop in case of error, we can report the port that failed
+ for port in ports:
+ # Query the port first, to see if we even need to remove
+ if not query_port(module, port_path, port):
+ continue
+
+ rc, out, err = module.run_command("%s uninstall %s" % (port_path, port))
+
+ if query_port(module, port_path, port):
+ module.fail_json(msg="Failed to remove %s: %s" % (port, err))
+
+ remove_c += 1
+
+ if remove_c > 0:
+
+ module.exit_json(changed=True, msg="Removed %s port(s)" % remove_c)
+
+ module.exit_json(changed=False, msg="Port(s) already absent")
+
+
+def install_ports(module, port_path, ports, variant):
+ """ Installs one or more ports if not already installed. """
+
+ install_c = 0
+
+ for port in ports:
+ if query_port(module, port_path, port):
+ continue
+
+ rc, out, err = module.run_command("%s install %s %s" % (port_path, port, variant))
+
+ if not query_port(module, port_path, port):
+ module.fail_json(msg="Failed to install %s: %s" % (port, err))
+
+ install_c += 1
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg="Installed %s port(s)" % (install_c))
+
+ module.exit_json(changed=False, msg="Port(s) already present")
+
+
+def activate_ports(module, port_path, ports):
+ """ Activate a port if it's inactive. """
+
+ activate_c = 0
+
+ for port in ports:
+ if not query_port(module, port_path, port):
+ module.fail_json(msg="Failed to activate %s, port(s) not present" % (port))
+
+ if query_port(module, port_path, port, state="active"):
+ continue
+
+ rc, out, err = module.run_command("%s activate %s" % (port_path, port))
+
+ if not query_port(module, port_path, port, state="active"):
+ module.fail_json(msg="Failed to activate %s: %s" % (port, err))
+
+ activate_c += 1
+
+ if activate_c > 0:
+ module.exit_json(changed=True, msg="Activated %s port(s)" % (activate_c))
+
+ module.exit_json(changed=False, msg="Port(s) already active")
+
+
+def deactivate_ports(module, port_path, ports):
+ """ Deactivate a port if it's active. """
+
+ deactivated_c = 0
+
+ for port in ports:
+ if not query_port(module, port_path, port):
+ module.fail_json(msg="Failed to deactivate %s, port(s) not present" % (port))
+
+ if not query_port(module, port_path, port, state="active"):
+ continue
+
+ rc, out, err = module.run_command("%s deactivate %s" % (port_path, port))
+
+ if query_port(module, port_path, port, state="active"):
+ module.fail_json(msg="Failed to deactivate %s: %s" % (port, err))
+
+ deactivated_c += 1
+
+ if deactivated_c > 0:
+ module.exit_json(changed=True, msg="Deactivated %s port(s)" % (deactivated_c))
+
+ module.exit_json(changed=False, msg="Port(s) already inactive")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='list', elements='str', aliases=["port"]),
+ selfupdate=dict(aliases=["update_cache", "update_ports"], default=False, type='bool'),
+ state=dict(default="present", choices=["present", "installed", "absent", "removed", "active", "inactive"]),
+ upgrade=dict(default=False, type='bool'),
+ variant=dict(aliases=["variants"], default=None, type='str')
+ )
+ )
+
+ port_path = module.get_bin_path('port', True, ['/opt/local/bin'])
+
+ p = module.params
+
+ if p["selfupdate"]:
+ (changed, msg) = selfupdate(module, port_path)
+ if not (p["name"] or p["upgrade"]):
+ module.exit_json(changed=changed, msg=msg)
+
+ if p["upgrade"]:
+ (changed, msg) = upgrade(module, port_path)
+ if not p["name"]:
+ module.exit_json(changed=changed, msg=msg)
+
+ pkgs = p["name"]
+
+ variant = p["variant"]
+
+ if p["state"] in ["present", "installed"]:
+ install_ports(module, port_path, pkgs, variant)
+
+ elif p["state"] in ["absent", "removed"]:
+ remove_ports(module, port_path, pkgs)
+
+ elif p["state"] == "active":
+ activate_ports(module, port_path, pkgs)
+
+ elif p["state"] == "inactive":
+ deactivate_ports(module, port_path, pkgs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/mail.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/mail.py
new file mode 100644
index 00000000..574f8478
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/mail.py
@@ -0,0 +1,386 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Dag Wieers (@dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+author:
+- Dag Wieers (@dagwieers)
+module: mail
+short_description: Send an email
+description:
+- This module is useful for sending emails from playbooks.
+- One may wonder why automate sending emails? In complex environments
+ there are from time to time processes that cannot be automated, either
+ because you lack the authority to make it so, or because not everyone
+ agrees to a common approach.
+- If you cannot automate a specific step, but the step is non-blocking,
+ sending out an email to the responsible party to make them perform their
+ part of the bargain is an elegant way to put the responsibility in
+ someone else's lap.
+- Of course sending out a mail can be equally useful as a way to notify
+ one or more people in a team that a specific action has been
+ (successfully) taken.
+options:
+ sender:
+ description:
+ - The email-address the mail is sent from. May contain address and phrase.
+ type: str
+ default: root
+ aliases: [ from ]
+ to:
+ description:
+ - The email-address(es) the mail is being sent to.
+ - This is a list, which may contain address and phrase portions.
+ type: list
+ default: root
+ aliases: [ recipients ]
+ cc:
+ description:
+ - The email-address(es) the mail is being copied to.
+ - This is a list, which may contain address and phrase portions.
+ type: list
+ bcc:
+ description:
+ - The email-address(es) the mail is being 'blind' copied to.
+ - This is a list, which may contain address and phrase portions.
+ type: list
+ subject:
+ description:
+ - The subject of the email being sent.
+ required: yes
+ type: str
+ aliases: [ msg ]
+ body:
+ description:
+ - The body of the email being sent.
+ type: str
+ username:
+ description:
+ - If SMTP requires username.
+ type: str
+ password:
+ description:
+ - If SMTP requires password.
+ type: str
+ host:
+ description:
+ - The mail server.
+ type: str
+ default: localhost
+ port:
+ description:
+ - The mail server port.
+ - This must be a valid integer between 1 and 65534
+ type: int
+ default: 25
+ attach:
+ description:
+ - A list of pathnames of files to attach to the message.
+ - Attached files will have their content-type set to C(application/octet-stream).
+ type: list
+ default: []
+ headers:
+ description:
+ - A list of headers which should be added to the message.
+ - Each individual header is specified as C(header=value) (see example below).
+ type: list
+ default: []
+ charset:
+ description:
+ - The character set of email being sent.
+ type: str
+ default: utf-8
+ subtype:
+ description:
+ - The minor mime type, can be either C(plain) or C(html).
+ - The major type is always C(text).
+ type: str
+ choices: [ html, plain ]
+ default: plain
+ secure:
+ description:
+ - If C(always), the connection will only send email if the connection is Encrypted.
+ If the server doesn't accept the encrypted connection it will fail.
+ - If C(try), the connection will attempt to setup a secure SSL/TLS session, before trying to send.
+ - If C(never), the connection will not attempt to setup a secure SSL/TLS session, before sending
+ - If C(starttls), the connection will try to upgrade to a secure SSL/TLS connection, before sending.
+ If it is unable to do so it will fail.
+ type: str
+ choices: [ always, never, starttls, try ]
+ default: try
+ timeout:
+ description:
+ - Sets the timeout in seconds for connection attempts.
+ type: int
+ default: 20
+'''
+
+EXAMPLES = r'''
+- name: Example playbook sending mail to root
+ community.general.mail:
+ subject: System {{ ansible_hostname }} has been successfully provisioned.
+ delegate_to: localhost
+
+- name: Sending an e-mail using Gmail SMTP servers
+ community.general.mail:
+ host: smtp.gmail.com
+ port: 587
+ username: username@gmail.com
+ password: mysecret
+ to: John Smith <john.smith@example.com>
+ subject: Ansible-report
+ body: System {{ ansible_hostname }} has been successfully provisioned.
+ delegate_to: localhost
+
+- name: Send e-mail to a bunch of users, attaching files
+ community.general.mail:
+ host: 127.0.0.1
+ port: 2025
+ subject: Ansible-report
+ body: Hello, this is an e-mail. I hope you like it ;-)
+ from: jane@example.net (Jane Jolie)
+ to:
+ - John Doe <j.d@example.org>
+ - Suzie Something <sue@example.com>
+ cc: Charlie Root <root@localhost>
+ attach:
+ - /etc/group
+ - /tmp/avatar2.png
+ headers:
+ - Reply-To=john@example.com
+ - X-Special="Something or other"
+ charset: us-ascii
+ delegate_to: localhost
+
+- name: Sending an e-mail using the remote machine, not the Ansible controller node
+ community.general.mail:
+ host: localhost
+ port: 25
+ to: John Smith <john.smith@example.com>
+ subject: Ansible-report
+ body: System {{ ansible_hostname }} has been successfully provisioned.
+
+- name: Sending an e-mail using Legacy SSL to the remote machine
+ community.general.mail:
+ host: localhost
+ port: 25
+ to: John Smith <john.smith@example.com>
+ subject: Ansible-report
+ body: System {{ ansible_hostname }} has been successfully provisioned.
+ secure: always
+
+- name: Sending an e-mail using StartTLS to the remote machine
+ community.general.mail:
+ host: localhost
+ port: 25
+ to: John Smith <john.smith@example.com>
+ subject: Ansible-report
+ body: System {{ ansible_hostname }} has been successfully provisioned.
+ secure: starttls
+'''
+
+import os
+import smtplib
+import ssl
+import traceback
+from email import encoders
+from email.utils import parseaddr, formataddr, formatdate
+from email.mime.base import MIMEBase
+from email.mime.multipart import MIMEMultipart
+from email.mime.text import MIMEText
+from email.header import Header
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import PY3
+from ansible.module_utils._text import to_native
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ username=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ host=dict(type='str', default='localhost'),
+ port=dict(type='int', default=25),
+ sender=dict(type='str', default='root', aliases=['from']),
+ to=dict(type='list', default=['root'], aliases=['recipients']),
+ cc=dict(type='list', default=[]),
+ bcc=dict(type='list', default=[]),
+ subject=dict(type='str', required=True, aliases=['msg']),
+ body=dict(type='str'),
+ attach=dict(type='list', default=[]),
+ headers=dict(type='list', default=[]),
+ charset=dict(type='str', default='utf-8'),
+ subtype=dict(type='str', default='plain', choices=['html', 'plain']),
+ secure=dict(type='str', default='try', choices=['always', 'never', 'starttls', 'try']),
+ timeout=dict(type='int', default=20),
+ ),
+ required_together=[['password', 'username']],
+ )
+
+ username = module.params.get('username')
+ password = module.params.get('password')
+ host = module.params.get('host')
+ port = module.params.get('port')
+ sender = module.params.get('sender')
+ recipients = module.params.get('to')
+ copies = module.params.get('cc')
+ blindcopies = module.params.get('bcc')
+ subject = module.params.get('subject')
+ body = module.params.get('body')
+ attach_files = module.params.get('attach')
+ headers = module.params.get('headers')
+ charset = module.params.get('charset')
+ subtype = module.params.get('subtype')
+ secure = module.params.get('secure')
+ timeout = module.params.get('timeout')
+
+ code = 0
+ secure_state = False
+ sender_phrase, sender_addr = parseaddr(sender)
+
+ if not body:
+ body = subject
+
+ try:
+ if secure != 'never':
+ try:
+ if PY3:
+ smtp = smtplib.SMTP_SSL(host=host, port=port, timeout=timeout)
+ else:
+ smtp = smtplib.SMTP_SSL(timeout=timeout)
+ code, smtpmessage = smtp.connect(host, port)
+ secure_state = True
+ except ssl.SSLError as e:
+ if secure == 'always':
+ module.fail_json(rc=1, msg='Unable to start an encrypted session to %s:%s: %s' %
+ (host, port, to_native(e)), exception=traceback.format_exc())
+ except Exception:
+ pass
+
+ if not secure_state:
+ if PY3:
+ smtp = smtplib.SMTP(host=host, port=port, timeout=timeout)
+ else:
+ smtp = smtplib.SMTP(timeout=timeout)
+ code, smtpmessage = smtp.connect(host, port)
+
+ except smtplib.SMTPException as e:
+ module.fail_json(rc=1, msg='Unable to Connect %s:%s: %s' % (host, port, to_native(e)), exception=traceback.format_exc())
+
+ try:
+ smtp.ehlo()
+ except smtplib.SMTPException as e:
+ module.fail_json(rc=1, msg='Helo failed for host %s:%s: %s' % (host, port, to_native(e)), exception=traceback.format_exc())
+
+ if int(code) > 0:
+ if not secure_state and secure in ('starttls', 'try'):
+ if smtp.has_extn('STARTTLS'):
+ try:
+ smtp.starttls()
+ secure_state = True
+ except smtplib.SMTPException as e:
+ module.fail_json(rc=1, msg='Unable to start an encrypted session to %s:%s: %s' %
+ (host, port, to_native(e)), exception=traceback.format_exc())
+ try:
+ smtp.ehlo()
+ except smtplib.SMTPException as e:
+ module.fail_json(rc=1, msg='Helo failed for host %s:%s: %s' % (host, port, to_native(e)), exception=traceback.format_exc())
+ else:
+ if secure == 'starttls':
+ module.fail_json(rc=1, msg='StartTLS is not offered on server %s:%s' % (host, port))
+
+ if username and password:
+ if smtp.has_extn('AUTH'):
+ try:
+ smtp.login(username, password)
+ except smtplib.SMTPAuthenticationError:
+ module.fail_json(rc=1, msg='Authentication to %s:%s failed, please check your username and/or password' % (host, port))
+ except smtplib.SMTPException:
+ module.fail_json(rc=1, msg='No Suitable authentication method was found on %s:%s' % (host, port))
+ else:
+ module.fail_json(rc=1, msg="No Authentication on the server at %s:%s" % (host, port))
+
+ if not secure_state and (username and password):
+ module.warn('Username and Password was sent without encryption')
+
+ msg = MIMEMultipart(_charset=charset)
+ msg['From'] = formataddr((sender_phrase, sender_addr))
+ msg['Date'] = formatdate(localtime=True)
+ msg['Subject'] = Header(subject, charset)
+ msg.preamble = "Multipart message"
+
+ for header in headers:
+ # NOTE: Backward compatible with old syntax using '|' as delimiter
+ for hdr in [x.strip() for x in header.split('|')]:
+ try:
+ h_key, h_val = hdr.split('=')
+ h_val = to_native(Header(h_val, charset))
+ msg.add_header(h_key, h_val)
+ except Exception:
+ module.warn("Skipping header '%s', unable to parse" % hdr)
+
+ if 'X-Mailer' not in msg:
+ msg.add_header('X-Mailer', 'Ansible mail module')
+
+ addr_list = []
+ for addr in [x.strip() for x in blindcopies]:
+ addr_list.append(parseaddr(addr)[1]) # address only, w/o phrase
+
+ to_list = []
+ for addr in [x.strip() for x in recipients]:
+ to_list.append(formataddr(parseaddr(addr)))
+ addr_list.append(parseaddr(addr)[1]) # address only, w/o phrase
+ msg['To'] = ", ".join(to_list)
+
+ cc_list = []
+ for addr in [x.strip() for x in copies]:
+ cc_list.append(formataddr(parseaddr(addr)))
+ addr_list.append(parseaddr(addr)[1]) # address only, w/o phrase
+ msg['Cc'] = ", ".join(cc_list)
+
+ part = MIMEText(body + "\n\n", _subtype=subtype, _charset=charset)
+ msg.attach(part)
+
+ # NOTE: Backware compatibility with old syntax using space as delimiter is not retained
+ # This breaks files with spaces in it :-(
+ for filename in attach_files:
+ try:
+ part = MIMEBase('application', 'octet-stream')
+ with open(filename, 'rb') as fp:
+ part.set_payload(fp.read())
+ encoders.encode_base64(part)
+ part.add_header('Content-disposition', 'attachment', filename=os.path.basename(filename))
+ msg.attach(part)
+ except Exception as e:
+ module.fail_json(rc=1, msg="Failed to send community.general.mail: can't attach file %s: %s" %
+ (filename, to_native(e)), exception=traceback.format_exc())
+
+ composed = msg.as_string()
+
+ try:
+ result = smtp.sendmail(sender_addr, set(addr_list), composed)
+ except Exception as e:
+ module.fail_json(rc=1, msg="Failed to send mail to '%s': %s" %
+ (", ".join(set(addr_list)), to_native(e)), exception=traceback.format_exc())
+
+ smtp.quit()
+
+ if result:
+ for key in result:
+ module.warn("Failed to send mail to '%s': %s %s" % (key, result[key][0], result[key][1]))
+ module.exit_json(msg='Failed to send mail to at least one recipient', result=result)
+
+ module.exit_json(msg='Mail sent successfully', result=result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/make.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/make.py
new file mode 100644
index 00000000..7314af28
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/make.py
@@ -0,0 +1,173 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Linus Unnebäck <linus@folkdatorn.se>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: make
+short_description: Run targets in a Makefile
+requirements:
+- make
+author: Linus Unnebäck (@LinusU) <linus@folkdatorn.se>
+description:
+ - Run targets in a Makefile.
+options:
+ target:
+ description:
+ - The target to run.
+ - Typically this would be something like C(install),C(test) or C(all)."
+ type: str
+ params:
+ description:
+ - Any extra parameters to pass to make.
+ type: dict
+ chdir:
+ description:
+ - Change to this directory before running make.
+ type: path
+ required: true
+ file:
+ description:
+ - Use a custom Makefile.
+ type: path
+ make:
+ description:
+ - Use a specific make binary.
+ type: path
+ version_added: '0.2.0'
+'''
+
+EXAMPLES = r'''
+- name: Build the default target
+ community.general.make:
+ chdir: /home/ubuntu/cool-project
+
+- name: Run 'install' target as root
+ community.general.make:
+ chdir: /home/ubuntu/cool-project
+ target: install
+ become: yes
+
+- name: Build 'all' target with extra arguments
+ community.general.make:
+ chdir: /home/ubuntu/cool-project
+ target: all
+ params:
+ NUM_THREADS: 4
+ BACKEND: lapack
+
+- name: Build 'all' target with a custom Makefile
+ community.general.make:
+ chdir: /home/ubuntu/cool-project
+ target: all
+ file: /some-project/Makefile
+'''
+
+RETURN = r'''# '''
+
+from ansible.module_utils.six import iteritems
+from ansible.module_utils.basic import AnsibleModule
+
+
+def run_command(command, module, check_rc=True):
+ """
+ Run a command using the module, return
+ the result code and std{err,out} content.
+
+ :param command: list of command arguments
+ :param module: Ansible make module instance
+ :return: return code, stdout content, stderr content
+ """
+ rc, out, err = module.run_command(command, check_rc=check_rc, cwd=module.params['chdir'])
+ return rc, sanitize_output(out), sanitize_output(err)
+
+
+def sanitize_output(output):
+ """
+ Sanitize the output string before we
+ pass it to module.fail_json. Defaults
+ the string to empty if it is None, else
+ strips trailing newlines.
+
+ :param output: output to sanitize
+ :return: sanitized output
+ """
+ if output is None:
+ return ''
+ else:
+ return output.rstrip("\r\n")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ target=dict(type='str'),
+ params=dict(type='dict'),
+ chdir=dict(type='path', required=True),
+ file=dict(type='path'),
+ make=dict(type='path'),
+ ),
+ supports_check_mode=True,
+ )
+
+ make_path = module.params['make']
+ if make_path is None:
+ # Build up the invocation of `make` we are going to use
+ # For non-Linux OSes, prefer gmake (GNU make) over make
+ make_path = module.get_bin_path('gmake', required=False)
+ if not make_path:
+ # Fall back to system make
+ make_path = module.get_bin_path('make', required=True)
+ make_target = module.params['target']
+ if module.params['params'] is not None:
+ make_parameters = [k + '=' + str(v) for k, v in iteritems(module.params['params'])]
+ else:
+ make_parameters = []
+
+ if module.params['file'] is not None:
+ base_command = [make_path, "-f", module.params['file'], make_target]
+ else:
+ base_command = [make_path, make_target]
+ base_command.extend(make_parameters)
+
+ # Check if the target is already up to date
+ rc, out, err = run_command(base_command + ['-q'], module, check_rc=False)
+ if module.check_mode:
+ # If we've been asked to do a dry run, we only need
+ # to report whether or not the target is up to date
+ changed = (rc != 0)
+ else:
+ if rc == 0:
+ # The target is up to date, so we don't have to
+ # do anything
+ changed = False
+ else:
+ # The target isn't up to date, so we need to run it
+ rc, out, err = run_command(base_command, module,
+ check_rc=True)
+ changed = True
+
+ # We don't report the return code, as if this module failed
+ # we would be calling fail_json from run_command, so even if
+ # we had a non-zero return code, we did not fail. However, if
+ # we report a non-zero return code here, we will be marked as
+ # failed regardless of what we signal using the failed= kwarg.
+ module.exit_json(
+ changed=changed,
+ failed=False,
+ stdout=out,
+ stderr=err,
+ target=module.params['target'],
+ params=module.params['params'],
+ chdir=module.params['chdir'],
+ file=module.params['file']
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_alert_profiles.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_alert_profiles.py
new file mode 100644
index 00000000..d40a8ca0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_alert_profiles.py
@@ -0,0 +1,304 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017 Red Hat Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: manageiq_alert_profiles
+
+short_description: Configuration of alert profiles for ManageIQ
+extends_documentation_fragment:
+- community.general.manageiq
+
+author: Elad Alfassa (@elad661) <ealfassa@redhat.com>
+description:
+ - The manageiq_alert_profiles module supports adding, updating and deleting alert profiles in ManageIQ.
+
+options:
+ state:
+ type: str
+ description:
+ - absent - alert profile should not exist,
+ - present - alert profile should exist,
+ choices: ['absent', 'present']
+ default: 'present'
+ name:
+ type: str
+ description:
+ - The unique alert profile name in ManageIQ.
+ - Required when state is "absent" or "present".
+ resource_type:
+ type: str
+ description:
+ - The resource type for the alert profile in ManageIQ. Required when state is "present".
+ choices: ['Vm', 'ContainerNode', 'MiqServer', 'Host', 'Storage', 'EmsCluster',
+ 'ExtManagementSystem', 'MiddlewareServer']
+ alerts:
+ type: list
+ description:
+ - List of alert descriptions to assign to this profile.
+ - Required if state is "present"
+ notes:
+ type: str
+ description:
+ - Optional notes for this profile
+
+'''
+
+EXAMPLES = '''
+- name: Add an alert profile to ManageIQ
+ community.general.manageiq_alert_profiles:
+ state: present
+ name: Test profile
+ resource_type: ContainerNode
+ alerts:
+ - Test Alert 01
+ - Test Alert 02
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Delete an alert profile from ManageIQ
+ community.general.manageiq_alert_profiles:
+ state: absent
+ name: Test profile
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec
+
+
+class ManageIQAlertProfiles(object):
+ """ Object to execute alert profile management operations in manageiq.
+ """
+
+ def __init__(self, manageiq):
+ self.manageiq = manageiq
+
+ self.module = self.manageiq.module
+ self.api_url = self.manageiq.api_url
+ self.client = self.manageiq.client
+ self.url = '{api_url}/alert_definition_profiles'.format(api_url=self.api_url)
+
+ def get_profiles(self):
+ """ Get all alert profiles from ManageIQ
+ """
+ try:
+ response = self.client.get(self.url + '?expand=alert_definitions,resources')
+ except Exception as e:
+ self.module.fail_json(msg="Failed to query alert profiles: {error}".format(error=e))
+ return response.get('resources') or []
+
+ def get_alerts(self, alert_descriptions):
+ """ Get a list of alert hrefs from a list of alert descriptions
+ """
+ alerts = []
+ for alert_description in alert_descriptions:
+ alert = self.manageiq.find_collection_resource_or_fail("alert_definitions",
+ description=alert_description)
+ alerts.append(alert['href'])
+
+ return alerts
+
+ def add_profile(self, profile):
+ """ Add a new alert profile to ManageIQ
+ """
+ # find all alerts to add to the profile
+ # we do this first to fail early if one is missing.
+ alerts = self.get_alerts(profile['alerts'])
+
+ # build the profile dict to send to the server
+
+ profile_dict = dict(name=profile['name'],
+ description=profile['name'],
+ mode=profile['resource_type'])
+ if profile['notes']:
+ profile_dict['set_data'] = dict(notes=profile['notes'])
+
+ # send it to the server
+ try:
+ result = self.client.post(self.url, resource=profile_dict, action="create")
+ except Exception as e:
+ self.module.fail_json(msg="Creating profile failed {error}".format(error=e))
+
+ # now that it has been created, we can assign the alerts
+ self.assign_or_unassign(result['results'][0], alerts, "assign")
+
+ msg = "Profile {name} created successfully"
+ msg = msg.format(name=profile['name'])
+ return dict(changed=True, msg=msg)
+
+ def delete_profile(self, profile):
+ """ Delete an alert profile from ManageIQ
+ """
+ try:
+ self.client.post(profile['href'], action="delete")
+ except Exception as e:
+ self.module.fail_json(msg="Deleting profile failed: {error}".format(error=e))
+
+ msg = "Successfully deleted profile {name}".format(name=profile['name'])
+ return dict(changed=True, msg=msg)
+
+ def get_alert_href(self, alert):
+ """ Get an absolute href for an alert
+ """
+ return "{url}/alert_definitions/{id}".format(url=self.api_url, id=alert['id'])
+
+ def assign_or_unassign(self, profile, resources, action):
+ """ Assign or unassign alerts to profile, and validate the result.
+ """
+ alerts = [dict(href=href) for href in resources]
+
+ subcollection_url = profile['href'] + '/alert_definitions'
+ try:
+ result = self.client.post(subcollection_url, resources=alerts, action=action)
+ if len(result['results']) != len(alerts):
+ msg = "Failed to {action} alerts to profile '{name}'," +\
+ "expected {expected} alerts to be {action}ed," +\
+ "but only {changed} were {action}ed"
+ msg = msg.format(action=action,
+ name=profile['name'],
+ expected=len(alerts),
+ changed=result['results'])
+ self.module.fail_json(msg=msg)
+ except Exception as e:
+ msg = "Failed to {action} alerts to profile '{name}': {error}"
+ msg = msg.format(action=action, name=profile['name'], error=e)
+ self.module.fail_json(msg=msg)
+
+ return result['results']
+
+ def update_profile(self, old_profile, desired_profile):
+ """ Update alert profile in ManageIQ
+ """
+ changed = False
+ # we need to use client.get to query the alert definitions
+ old_profile = self.client.get(old_profile['href'] + '?expand=alert_definitions')
+
+ # figure out which alerts we need to assign / unassign
+ # alerts listed by the user:
+ desired_alerts = set(self.get_alerts(desired_profile['alerts']))
+
+ # alert which currently exist in the profile
+ if 'alert_definitions' in old_profile:
+ # we use get_alert_href to have a direct href to the alert
+ existing_alerts = set([self.get_alert_href(alert) for alert in old_profile['alert_definitions']])
+ else:
+ # no alerts in this profile
+ existing_alerts = set()
+
+ to_add = list(desired_alerts - existing_alerts)
+ to_remove = list(existing_alerts - desired_alerts)
+
+ # assign / unassign the alerts, if needed
+
+ if to_remove:
+ self.assign_or_unassign(old_profile, to_remove, "unassign")
+ changed = True
+ if to_add:
+ self.assign_or_unassign(old_profile, to_add, "assign")
+ changed = True
+
+ # update other properties
+ profile_dict = dict()
+
+ if old_profile['mode'] != desired_profile['resource_type']:
+ # mode needs to be updated
+ profile_dict['mode'] = desired_profile['resource_type']
+
+ # check if notes need to be updated
+ old_notes = old_profile.get('set_data', {}).get('notes')
+
+ if desired_profile['notes'] != old_notes:
+ profile_dict['set_data'] = dict(notes=desired_profile['notes'])
+
+ if profile_dict:
+ # if we have any updated values
+ changed = True
+ try:
+ result = self.client.post(old_profile['href'],
+ resource=profile_dict,
+ action="edit")
+ except Exception as e:
+ msg = "Updating profile '{name}' failed: {error}"
+ msg = msg.format(name=old_profile['name'], error=e)
+ self.module.fail_json(msg=msg, result=result)
+
+ if changed:
+ msg = "Profile {name} updated successfully".format(name=desired_profile['name'])
+ else:
+ msg = "No update needed for profile {name}".format(name=desired_profile['name'])
+ return dict(changed=changed, msg=msg)
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str'),
+ resource_type=dict(type='str', choices=['Vm',
+ 'ContainerNode',
+ 'MiqServer',
+ 'Host',
+ 'Storage',
+ 'EmsCluster',
+ 'ExtManagementSystem',
+ 'MiddlewareServer']),
+ alerts=dict(type='list'),
+ notes=dict(type='str'),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ # add the manageiq connection arguments to the arguments
+ argument_spec.update(manageiq_argument_spec())
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ required_if=[('state', 'present', ['name', 'resource_type']),
+ ('state', 'absent', ['name'])])
+
+ state = module.params['state']
+ name = module.params['name']
+
+ manageiq = ManageIQ(module)
+ manageiq_alert_profiles = ManageIQAlertProfiles(manageiq)
+
+ existing_profile = manageiq.find_collection_resource_by("alert_definition_profiles",
+ name=name)
+
+ # we need to add or update the alert profile
+ if state == "present":
+ if not existing_profile:
+ # a profile with this name doesn't exist yet, let's create it
+ res_args = manageiq_alert_profiles.add_profile(module.params)
+ else:
+ # a profile with this name exists, we might need to update it
+ res_args = manageiq_alert_profiles.update_profile(existing_profile, module.params)
+
+ # this alert profile should not exist
+ if state == "absent":
+ # if we have an alert profile with this name, delete it
+ if existing_profile:
+ res_args = manageiq_alert_profiles.delete_profile(existing_profile)
+ else:
+ # This alert profile does not exist in ManageIQ, and that's okay
+ msg = "Alert profile '{name}' does not exist in ManageIQ"
+ msg = msg.format(name=name)
+ res_args = dict(changed=False, msg=msg)
+
+ module.exit_json(**res_args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_alerts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_alerts.py
new file mode 100644
index 00000000..4f818a3a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_alerts.py
@@ -0,0 +1,349 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017 Red Hat Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: manageiq_alerts
+
+short_description: Configuration of alerts in ManageIQ
+extends_documentation_fragment:
+- community.general.manageiq
+
+author: Elad Alfassa (@elad661) <ealfassa@redhat.com
+description:
+ - The manageiq_alerts module supports adding, updating and deleting alerts in ManageIQ.
+
+options:
+ state:
+ type: str
+ description:
+ - absent - alert should not exist,
+ - present - alert should exist,
+ required: False
+ choices: ['absent', 'present']
+ default: 'present'
+ description:
+ type: str
+ description:
+ - The unique alert description in ManageIQ.
+ - Required when state is "absent" or "present".
+ resource_type:
+ type: str
+ description:
+ - The entity type for the alert in ManageIQ. Required when state is "present".
+ choices: ['Vm', 'ContainerNode', 'MiqServer', 'Host', 'Storage', 'EmsCluster',
+ 'ExtManagementSystem', 'MiddlewareServer']
+ expression_type:
+ type: str
+ description:
+ - Expression type.
+ default: hash
+ choices: ["hash", "miq"]
+ expression:
+ type: dict
+ description:
+ - The alert expression for ManageIQ.
+ - Can either be in the "Miq Expression" format or the "Hash Expression format".
+ - Required if state is "present".
+ enabled:
+ description:
+ - Enable or disable the alert. Required if state is "present".
+ type: bool
+ options:
+ type: dict
+ description:
+ - Additional alert options, such as notification type and frequency
+
+
+'''
+
+EXAMPLES = '''
+- name: Add an alert with a "hash expression" to ManageIQ
+ community.general.manageiq_alerts:
+ state: present
+ description: Test Alert 01
+ options:
+ notifications:
+ email:
+ to: ["example@example.com"]
+ from: "example@example.com"
+ resource_type: ContainerNode
+ expression:
+ eval_method: hostd_log_threshold
+ mode: internal
+ options: {}
+ enabled: true
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Add an alert with a "miq expression" to ManageIQ
+ community.general.manageiq_alerts:
+ state: present
+ description: Test Alert 02
+ options:
+ notifications:
+ email:
+ to: ["example@example.com"]
+ from: "example@example.com"
+ resource_type: Vm
+ expression_type: miq
+ expression:
+ and:
+ - CONTAINS:
+ tag: Vm.managed-environment
+ value: prod
+ - not:
+ CONTAINS:
+ tag: Vm.host.managed-environment
+ value: prod
+ enabled: true
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Delete an alert from ManageIQ
+ community.general.manageiq_alerts:
+ state: absent
+ description: Test Alert 01
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec
+
+
+class ManageIQAlert(object):
+ """ Represent a ManageIQ alert. Can be initialized with both the format
+ we receive from the server and the format we get from the user.
+ """
+ def __init__(self, alert):
+ self.description = alert['description']
+ self.db = alert['db']
+ self.enabled = alert['enabled']
+ self.options = alert['options']
+ self.hash_expression = None
+ self.miq_expressipn = None
+
+ if 'hash_expression' in alert:
+ self.hash_expression = alert['hash_expression']
+ if 'miq_expression' in alert:
+ self.miq_expression = alert['miq_expression']
+ if 'exp' in self.miq_expression:
+ # miq_expression is a field that needs a special case, because
+ # it's returned surrounded by a dict named exp even though we don't
+ # send it with that dict.
+ self.miq_expression = self.miq_expression['exp']
+
+ def __eq__(self, other):
+ """ Compare two ManageIQAlert objects
+ """
+ return self.__dict__ == other.__dict__
+
+
+class ManageIQAlerts(object):
+ """ Object to execute alert management operations in manageiq.
+ """
+
+ def __init__(self, manageiq):
+ self.manageiq = manageiq
+
+ self.module = self.manageiq.module
+ self.api_url = self.manageiq.api_url
+ self.client = self.manageiq.client
+ self.alerts_url = '{api_url}/alert_definitions'.format(api_url=self.api_url)
+
+ def get_alerts(self):
+ """ Get all alerts from ManageIQ
+ """
+ try:
+ response = self.client.get(self.alerts_url + '?expand=resources')
+ except Exception as e:
+ self.module.fail_json(msg="Failed to query alerts: {error}".format(error=e))
+ return response.get('resources', [])
+
+ def validate_hash_expression(self, expression):
+ """ Validate a 'hash expression' alert definition
+ """
+ # hash expressions must have the following fields
+ for key in ['options', 'eval_method', 'mode']:
+ if key not in expression:
+ msg = "Hash expression is missing required field {key}".format(key=key)
+ self.module.fail_json(msg)
+
+ def create_alert_dict(self, params):
+ """ Create a dict representing an alert
+ """
+ if params['expression_type'] == 'hash':
+ # hash expression supports depends on https://github.com/ManageIQ/manageiq-api/pull/76
+ self.validate_hash_expression(params['expression'])
+ expression_type = 'hash_expression'
+ else:
+ # actually miq_expression, but we call it "expression" for backwards-compatibility
+ expression_type = 'expression'
+
+ # build the alret
+ alert = dict(description=params['description'],
+ db=params['resource_type'],
+ options=params['options'],
+ enabled=params['enabled'])
+
+ # add the actual expression.
+ alert.update({expression_type: params['expression']})
+
+ return alert
+
+ def add_alert(self, alert):
+ """ Add a new alert to ManageIQ
+ """
+ try:
+ result = self.client.post(self.alerts_url, action='create', resource=alert)
+
+ msg = "Alert {description} created successfully: {details}"
+ msg = msg.format(description=alert['description'], details=result)
+ return dict(changed=True, msg=msg)
+ except Exception as e:
+ msg = "Creating alert {description} failed: {error}"
+ if "Resource expression needs be specified" in str(e):
+ # Running on an older version of ManageIQ and trying to create a hash expression
+ msg = msg.format(description=alert['description'],
+ error="Your version of ManageIQ does not support hash_expression")
+ else:
+ msg = msg.format(description=alert['description'], error=e)
+ self.module.fail_json(msg=msg)
+
+ def delete_alert(self, alert):
+ """ Delete an alert
+ """
+ try:
+ result = self.client.post('{url}/{id}'.format(url=self.alerts_url,
+ id=alert['id']),
+ action="delete")
+ msg = "Alert {description} deleted: {details}"
+ msg = msg.format(description=alert['description'], details=result)
+ return dict(changed=True, msg=msg)
+ except Exception as e:
+ msg = "Deleting alert {description} failed: {error}"
+ msg = msg.format(description=alert['description'], error=e)
+ self.module.fail_json(msg=msg)
+
+ def update_alert(self, existing_alert, new_alert):
+ """ Update an existing alert with the values from `new_alert`
+ """
+ new_alert_obj = ManageIQAlert(new_alert)
+ if new_alert_obj == ManageIQAlert(existing_alert):
+ # no change needed - alerts are identical
+ return dict(changed=False, msg="No update needed")
+ else:
+ try:
+ url = '{url}/{id}'.format(url=self.alerts_url, id=existing_alert['id'])
+ result = self.client.post(url, action="edit", resource=new_alert)
+
+ # make sure that the update was indeed successful by comparing
+ # the result to the expected result.
+ if new_alert_obj == ManageIQAlert(result):
+ # success!
+ msg = "Alert {description} updated successfully: {details}"
+ msg = msg.format(description=existing_alert['description'], details=result)
+
+ return dict(changed=True, msg=msg)
+ else:
+ # unexpected result
+ msg = "Updating alert {description} failed, unexpected result {details}"
+ msg = msg.format(description=existing_alert['description'], details=result)
+
+ self.module.fail_json(msg=msg)
+
+ except Exception as e:
+ msg = "Updating alert {description} failed: {error}"
+ if "Resource expression needs be specified" in str(e):
+ # Running on an older version of ManageIQ and trying to update a hash expression
+ msg = msg.format(description=existing_alert['description'],
+ error="Your version of ManageIQ does not support hash_expression")
+ else:
+ msg = msg.format(description=existing_alert['description'], error=e)
+ self.module.fail_json(msg=msg)
+
+
+def main():
+ argument_spec = dict(
+ description=dict(type='str'),
+ resource_type=dict(type='str', choices=['Vm',
+ 'ContainerNode',
+ 'MiqServer',
+ 'Host',
+ 'Storage',
+ 'EmsCluster',
+ 'ExtManagementSystem',
+ 'MiddlewareServer']),
+ expression_type=dict(type='str', default='hash', choices=['miq', 'hash']),
+ expression=dict(type='dict'),
+ options=dict(type='dict'),
+ enabled=dict(type='bool'),
+ state=dict(required=False, default='present',
+ choices=['present', 'absent']),
+ )
+ # add the manageiq connection arguments to the arguments
+ argument_spec.update(manageiq_argument_spec())
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ required_if=[('state', 'present', ['description',
+ 'resource_type',
+ 'expression',
+ 'enabled',
+ 'options']),
+ ('state', 'absent', ['description'])])
+
+ state = module.params['state']
+ description = module.params['description']
+
+ manageiq = ManageIQ(module)
+ manageiq_alerts = ManageIQAlerts(manageiq)
+
+ existing_alert = manageiq.find_collection_resource_by("alert_definitions",
+ description=description)
+
+ # we need to add or update the alert
+ if state == "present":
+ alert = manageiq_alerts.create_alert_dict(module.params)
+
+ if not existing_alert:
+ # an alert with this description doesn't exist yet, let's create it
+ res_args = manageiq_alerts.add_alert(alert)
+ else:
+ # an alert with this description exists, we might need to update it
+ res_args = manageiq_alerts.update_alert(existing_alert, alert)
+
+ # this alert should not exist
+ elif state == "absent":
+ # if we have an alert with this description, delete it
+ if existing_alert:
+ res_args = manageiq_alerts.delete_alert(existing_alert)
+ else:
+ # it doesn't exist, and that's okay
+ msg = "Alert '{description}' does not exist in ManageIQ"
+ msg = msg.format(description=description)
+ res_args = dict(changed=False, msg=msg)
+
+ module.exit_json(**res_args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_group.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_group.py
new file mode 100644
index 00000000..2050eb63
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_group.py
@@ -0,0 +1,648 @@
+#!/usr/bin/python
+#
+# (c) 2018, Evert Mulder <evertmulder@gmail.com> (base on manageiq_user.py by Daniel Korn <korndaniel1@gmail.com>)
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: manageiq_group
+
+short_description: Management of groups in ManageIQ.
+extends_documentation_fragment:
+- community.general.manageiq
+
+author: Evert Mulder (@evertmulder)
+description:
+ - The manageiq_group module supports adding, updating and deleting groups in ManageIQ.
+requirements:
+- manageiq-client
+
+options:
+ state:
+ type: str
+ description:
+ - absent - group should not exist, present - group should be.
+ choices: ['absent', 'present']
+ default: 'present'
+ description:
+ type: str
+ description:
+ - The group description.
+ required: true
+ default: null
+ role_id:
+ type: int
+ description:
+ - The the group role id
+ required: false
+ default: null
+ role:
+ type: str
+ description:
+ - The the group role name
+ - The C(role_id) has precedence over the C(role) when supplied.
+ required: false
+ default: null
+ tenant_id:
+ type: int
+ description:
+ - The tenant for the group identified by the tenant id.
+ required: false
+ default: null
+ tenant:
+ type: str
+ description:
+ - The tenant for the group identified by the tenant name.
+ - The C(tenant_id) has precedence over the C(tenant) when supplied.
+ - Tenant names are case sensitive.
+ required: false
+ default: null
+ managed_filters:
+ description: The tag values per category
+ type: dict
+ required: false
+ default: null
+ managed_filters_merge_mode:
+ type: str
+ description:
+ - In merge mode existing categories are kept or updated, new categories are added.
+ - In replace mode all categories will be replaced with the supplied C(managed_filters).
+ choices: [ merge, replace ]
+ default: replace
+ belongsto_filters:
+ description: A list of strings with a reference to the allowed host, cluster or folder
+ type: list
+ elements: str
+ required: false
+ default: null
+ belongsto_filters_merge_mode:
+ type: str
+ description:
+ - In merge mode existing settings are merged with the supplied C(belongsto_filters).
+ - In replace mode current values are replaced with the supplied C(belongsto_filters).
+ choices: [ merge, replace ]
+ default: replace
+'''
+
+EXAMPLES = '''
+- name: Create a group in ManageIQ with the role EvmRole-user and tenant 'my_tenant'
+ community.general.manageiq_group:
+ description: 'MyGroup-user'
+ role: 'EvmRole-user'
+ tenant: 'my_tenant'
+ manageiq_connection:
+ url: 'https://manageiq_server'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Create a group in ManageIQ with the role EvmRole-user and tenant with tenant_id 4
+ community.general.manageiq_group:
+ description: 'MyGroup-user'
+ role: 'EvmRole-user'
+ tenant_id: 4
+ manageiq_connection:
+ url: 'https://manageiq_server'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name:
+ - Create or update a group in ManageIQ with the role EvmRole-user and tenant my_tenant.
+ - Apply 3 prov_max_cpu and 2 department tags to the group.
+ - Limit access to a cluster for the group.
+ community.general.manageiq_group:
+ description: 'MyGroup-user'
+ role: 'EvmRole-user'
+ tenant: my_tenant
+ managed_filters:
+ prov_max_cpu:
+ - '1'
+ - '2'
+ - '4'
+ department:
+ - defense
+ - engineering
+ managed_filters_merge_mode: replace
+ belongsto_filters:
+ - "/belongsto/ExtManagementSystem|ProviderName/EmsFolder|Datacenters/EmsFolder|dc_name/EmsFolder|host/EmsCluster|Cluster name"
+ belongsto_filters_merge_mode: merge
+ manageiq_connection:
+ url: 'https://manageiq_server'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Delete a group in ManageIQ
+ community.general.manageiq_group:
+ state: 'absent'
+ description: 'MyGroup-user'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+
+- name: Delete a group in ManageIQ using a token
+ community.general.manageiq_group:
+ state: 'absent'
+ description: 'MyGroup-user'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ token: 'sometoken'
+'''
+
+RETURN = '''
+group:
+ description: The group.
+ returned: success
+ type: complex
+ contains:
+ description:
+ description: The group description
+ returned: success
+ type: str
+ id:
+ description: The group id
+ returned: success
+ type: int
+ group_type:
+ description: The group type, system or user
+ returned: success
+ type: str
+ role:
+ description: The group role name
+ returned: success
+ type: str
+ tenant:
+ description: The group tenant name
+ returned: success
+ type: str
+ managed_filters:
+ description: The tag values per category
+ returned: success
+ type: dict
+ belongsto_filters:
+ description: A list of strings with a reference to the allowed host, cluster or folder
+ returned: success
+ type: list
+ created_on:
+ description: Group creation date
+ returned: success
+ type: str
+ sample: "2018-08-12T08:37:55+00:00"
+ updated_on:
+ description: Group update date
+ returned: success
+ type: int
+ sample: "2018-08-12T08:37:55+00:00"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec
+
+
+class ManageIQgroup(object):
+ """
+ Object to execute group management operations in manageiq.
+ """
+
+ def __init__(self, manageiq):
+ self.manageiq = manageiq
+
+ self.module = self.manageiq.module
+ self.api_url = self.manageiq.api_url
+ self.client = self.manageiq.client
+
+ def group(self, description):
+ """ Search for group object by description.
+ Returns:
+ the group, or None if group was not found.
+ """
+ groups = self.client.collections.groups.find_by(description=description)
+ if len(groups) == 0:
+ return None
+ else:
+ return groups[0]
+
+ def tenant(self, tenant_id, tenant_name):
+ """ Search for tenant entity by name or id
+ Returns:
+ the tenant entity, None if no id or name was supplied
+ """
+
+ if tenant_id:
+ tenant = self.client.get_entity('tenants', tenant_id)
+ if not tenant:
+ self.module.fail_json(msg="Tenant with id '%s' not found in manageiq" % str(tenant_id))
+ return tenant
+ else:
+ if tenant_name:
+ tenant_res = self.client.collections.tenants.find_by(name=tenant_name)
+ if not tenant_res:
+ self.module.fail_json(msg="Tenant '%s' not found in manageiq" % tenant_name)
+ if len(tenant_res) > 1:
+ self.module.fail_json(msg="Multiple tenants found in manageiq with name '%s" % tenant_name)
+ tenant = tenant_res[0]
+ return tenant
+ else:
+ # No tenant name or tenant id supplied
+ return None
+
+ def role(self, role_id, role_name):
+ """ Search for a role object by name or id.
+ Returns:
+ the role entity, None no id or name was supplied
+
+ the role, or send a module Fail signal if role not found.
+ """
+ if role_id:
+ role = self.client.get_entity('roles', role_id)
+ if not role:
+ self.module.fail_json(msg="Role with id '%s' not found in manageiq" % str(role_id))
+ return role
+ else:
+ if role_name:
+ role_res = self.client.collections.roles.find_by(name=role_name)
+ if not role_res:
+ self.module.fail_json(msg="Role '%s' not found in manageiq" % role_name)
+ if len(role_res) > 1:
+ self.module.fail_json(msg="Multiple roles found in manageiq with name '%s" % role_name)
+ return role_res[0]
+ else:
+ # No role name or role id supplied
+ return None
+
+ @staticmethod
+ def merge_dict_values(norm_current_values, norm_updated_values):
+ """ Create an merged update object for manageiq group filters.
+
+ The input dict contain the tag values per category.
+ If the new values contain the category, all tags for that category are replaced
+ If the new values do not contain the category, the existing tags are kept
+
+ Returns:
+ the nested array with the merged values, used in the update post body
+ """
+
+ # If no updated values are supplied, in merge mode, the original values must be returned
+ # otherwise the existing tag filters will be removed.
+ if norm_current_values and (not norm_updated_values):
+ return norm_current_values
+
+ # If no existing tag filters exist, use the user supplied values
+ if (not norm_current_values) and norm_updated_values:
+ return norm_updated_values
+
+ # start with norm_current_values's keys and values
+ res = norm_current_values.copy()
+ # replace res with norm_updated_values's keys and values
+ res.update(norm_updated_values)
+ return res
+
+ def delete_group(self, group):
+ """ Deletes a group from manageiq.
+
+ Returns:
+ a dict of:
+ changed: boolean indicating if the entity was updated.
+ msg: a short message describing the operation executed.
+ """
+ try:
+ url = '%s/groups/%s' % (self.api_url, group['id'])
+ result = self.client.post(url, action='delete')
+ except Exception as e:
+ self.module.fail_json(msg="failed to delete group %s: %s" % (group['description'], str(e)))
+
+ if result['success'] is False:
+ self.module.fail_json(msg=result['message'])
+
+ return dict(
+ changed=True,
+ msg="deleted group %s with id %s" % (group['description'], group['id']))
+
+ def edit_group(self, group, description, role, tenant, norm_managed_filters, managed_filters_merge_mode,
+ belongsto_filters, belongsto_filters_merge_mode):
+ """ Edit a manageiq group.
+
+ Returns:
+ a dict of:
+ changed: boolean indicating if the entity was updated.
+ msg: a short message describing the operation executed.
+ """
+
+ if role or norm_managed_filters or belongsto_filters:
+ group.reload(attributes=['miq_user_role_name', 'entitlement'])
+
+ try:
+ current_role = group['miq_user_role_name']
+ except AttributeError:
+ current_role = None
+
+ changed = False
+ resource = {}
+
+ if description and group['description'] != description:
+ resource['description'] = description
+ changed = True
+
+ if tenant and group['tenant_id'] != tenant['id']:
+ resource['tenant'] = dict(id=tenant['id'])
+ changed = True
+
+ if role and current_role != role['name']:
+ resource['role'] = dict(id=role['id'])
+ changed = True
+
+ if norm_managed_filters or belongsto_filters:
+
+ # Only compare if filters are supplied
+ entitlement = group['entitlement']
+
+ if 'filters' not in entitlement:
+ # No existing filters exist, use supplied filters
+ managed_tag_filters_post_body = self.normalized_managed_tag_filters_to_miq(norm_managed_filters)
+ resource['filters'] = {'managed': managed_tag_filters_post_body, "belongsto": belongsto_filters}
+ changed = True
+ else:
+ current_filters = entitlement['filters']
+ new_filters = self.edit_group_edit_filters(current_filters,
+ norm_managed_filters, managed_filters_merge_mode,
+ belongsto_filters, belongsto_filters_merge_mode)
+ if new_filters:
+ resource['filters'] = new_filters
+ changed = True
+
+ if not changed:
+ return dict(
+ changed=False,
+ msg="group %s is not changed." % group['description'])
+
+ # try to update group
+ try:
+ self.client.post(group['href'], action='edit', resource=resource)
+ changed = True
+ except Exception as e:
+ self.module.fail_json(msg="failed to update group %s: %s" % (group['name'], str(e)))
+
+ return dict(
+ changed=changed,
+ msg="successfully updated the group %s with id %s" % (group['description'], group['id']))
+
+ def edit_group_edit_filters(self, current_filters, norm_managed_filters, managed_filters_merge_mode,
+ belongsto_filters, belongsto_filters_merge_mode):
+ """ Edit a manageiq group filters.
+
+ Returns:
+ None if no the group was not updated
+ If the group was updated the post body part for updating the group
+ """
+ filters_updated = False
+ new_filters_resource = {}
+
+ current_belongsto_set = current_filters.get('belongsto', set())
+
+ if belongsto_filters:
+ new_belongsto_set = set(belongsto_filters)
+ else:
+ new_belongsto_set = set()
+
+ if current_belongsto_set == new_belongsto_set:
+ new_filters_resource['belongsto'] = current_filters['belongsto']
+ else:
+ if belongsto_filters_merge_mode == 'merge':
+ current_belongsto_set.update(new_belongsto_set)
+ new_filters_resource['belongsto'] = list(current_belongsto_set)
+ else:
+ new_filters_resource['belongsto'] = list(new_belongsto_set)
+ filters_updated = True
+
+ # Process belongsto managed filter tags
+ # The input is in the form dict with keys are the categories and the tags are supplied string array
+ # ManageIQ, the current_managed, uses an array of arrays. One array of categories.
+ # We normalize the user input from a dict with arrays to a dict of sorted arrays
+ # We normalize the current manageiq array of arrays also to a dict of sorted arrays so we can compare
+ norm_current_filters = self.manageiq_filters_to_sorted_dict(current_filters)
+
+ if norm_current_filters == norm_managed_filters:
+ if 'managed' in current_filters:
+ new_filters_resource['managed'] = current_filters['managed']
+ else:
+ if managed_filters_merge_mode == 'merge':
+ merged_dict = self.merge_dict_values(norm_current_filters, norm_managed_filters)
+ new_filters_resource['managed'] = self.normalized_managed_tag_filters_to_miq(merged_dict)
+ else:
+ new_filters_resource['managed'] = self.normalized_managed_tag_filters_to_miq(norm_managed_filters)
+ filters_updated = True
+
+ if not filters_updated:
+ return None
+
+ return new_filters_resource
+
+ def create_group(self, description, role, tenant, norm_managed_filters, belongsto_filters):
+ """ Creates the group in manageiq.
+
+ Returns:
+ the created group id, name, created_on timestamp,
+ updated_on timestamp.
+ """
+ # check for required arguments
+ for key, value in dict(description=description).items():
+ if value in (None, ''):
+ self.module.fail_json(msg="missing required argument: %s" % key)
+
+ url = '%s/groups' % self.api_url
+
+ resource = {'description': description}
+
+ if role is not None:
+ resource['role'] = dict(id=role['id'])
+
+ if tenant is not None:
+ resource['tenant'] = dict(id=tenant['id'])
+
+ if norm_managed_filters or belongsto_filters:
+ managed_tag_filters_post_body = self.normalized_managed_tag_filters_to_miq(norm_managed_filters)
+ resource['filters'] = {'managed': managed_tag_filters_post_body, "belongsto": belongsto_filters}
+
+ try:
+ result = self.client.post(url, action='create', resource=resource)
+ except Exception as e:
+ self.module.fail_json(msg="failed to create group %s: %s" % (description, str(e)))
+
+ return dict(
+ changed=True,
+ msg="successfully created group %s" % description,
+ group_id=result['results'][0]['id']
+ )
+
+ @staticmethod
+ def normalized_managed_tag_filters_to_miq(norm_managed_filters):
+ if not norm_managed_filters:
+ return None
+
+ return list(norm_managed_filters.values())
+
+ @staticmethod
+ def manageiq_filters_to_sorted_dict(current_filters):
+ current_managed_filters = current_filters.get('managed')
+ if not current_managed_filters:
+ return None
+
+ res = {}
+ for tag_list in current_managed_filters:
+ tag_list.sort()
+ key = tag_list[0].split('/')[2]
+ res[key] = tag_list
+
+ return res
+
+ @staticmethod
+ def normalize_user_managed_filters_to_sorted_dict(managed_filters, module):
+ if not managed_filters:
+ return None
+
+ res = {}
+ for cat_key in managed_filters:
+ cat_array = []
+ if not isinstance(managed_filters[cat_key], list):
+ module.fail_json(msg='Entry "{0}" of managed_filters must be a list!'.format(cat_key))
+ for tags in managed_filters[cat_key]:
+ miq_managed_tag = "/managed/" + cat_key + "/" + tags
+ cat_array.append(miq_managed_tag)
+ # Do not add empty categories. ManageIQ will remove all categories that are not supplied
+ if cat_array:
+ cat_array.sort()
+ res[cat_key] = cat_array
+ return res
+
+ @staticmethod
+ def create_result_group(group):
+ """ Creates the ansible result object from a manageiq group entity
+
+ Returns:
+ a dict with the group id, description, role, tenant, filters, group_type, created_on, updated_on
+ """
+ try:
+ role_name = group['miq_user_role_name']
+ except AttributeError:
+ role_name = None
+
+ managed_filters = None
+ belongsto_filters = None
+ if 'filters' in group['entitlement']:
+ filters = group['entitlement']['filters']
+ belongsto_filters = filters.get('belongsto')
+ group_managed_filters = filters.get('managed')
+ if group_managed_filters:
+ managed_filters = {}
+ for tag_list in group_managed_filters:
+ key = tag_list[0].split('/')[2]
+ tags = []
+ for t in tag_list:
+ tags.append(t.split('/')[3])
+ managed_filters[key] = tags
+
+ return dict(
+ id=group['id'],
+ description=group['description'],
+ role=role_name,
+ tenant=group['tenant']['name'],
+ managed_filters=managed_filters,
+ belongsto_filters=belongsto_filters,
+ group_type=group['group_type'],
+ created_on=group['created_on'],
+ updated_on=group['updated_on'],
+ )
+
+
+def main():
+ argument_spec = dict(
+ description=dict(required=True, type='str'),
+ state=dict(choices=['absent', 'present'], default='present'),
+ role_id=dict(required=False, type='int'),
+ role=dict(required=False, type='str'),
+ tenant_id=dict(required=False, type='int'),
+ tenant=dict(required=False, type='str'),
+ managed_filters=dict(required=False, type='dict'),
+ managed_filters_merge_mode=dict(required=False, choices=['merge', 'replace'], default='replace'),
+ belongsto_filters=dict(required=False, type='list', elements='str'),
+ belongsto_filters_merge_mode=dict(required=False, choices=['merge', 'replace'], default='replace'),
+ )
+ # add the manageiq connection arguments to the arguments
+ argument_spec.update(manageiq_argument_spec())
+
+ module = AnsibleModule(
+ argument_spec=argument_spec
+ )
+
+ description = module.params['description']
+ state = module.params['state']
+ role_id = module.params['role_id']
+ role_name = module.params['role']
+ tenant_id = module.params['tenant_id']
+ tenant_name = module.params['tenant']
+ managed_filters = module.params['managed_filters']
+ managed_filters_merge_mode = module.params['managed_filters_merge_mode']
+ belongsto_filters = module.params['belongsto_filters']
+ belongsto_filters_merge_mode = module.params['belongsto_filters_merge_mode']
+
+ manageiq = ManageIQ(module)
+ manageiq_group = ManageIQgroup(manageiq)
+
+ group = manageiq_group.group(description)
+
+ # group should not exist
+ if state == "absent":
+ # if we have a group, delete it
+ if group:
+ res_args = manageiq_group.delete_group(group)
+ # if we do not have a group, nothing to do
+ else:
+ res_args = dict(
+ changed=False,
+ msg="group '%s' does not exist in manageiq" % description)
+
+ # group should exist
+ if state == "present":
+
+ tenant = manageiq_group.tenant(tenant_id, tenant_name)
+ role = manageiq_group.role(role_id, role_name)
+ norm_managed_filters = manageiq_group.normalize_user_managed_filters_to_sorted_dict(managed_filters, module)
+ # if we have a group, edit it
+ if group:
+ res_args = manageiq_group.edit_group(group, description, role, tenant,
+ norm_managed_filters, managed_filters_merge_mode,
+ belongsto_filters, belongsto_filters_merge_mode)
+
+ # if we do not have a group, create it
+ else:
+ res_args = manageiq_group.create_group(description, role, tenant, norm_managed_filters, belongsto_filters)
+ group = manageiq.client.get_entity('groups', res_args['group_id'])
+
+ group.reload(expand='resources', attributes=['miq_user_role_name', 'tenant', 'entitlement'])
+ res_args['group'] = manageiq_group.create_result_group(group)
+
+ module.exit_json(**res_args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_policies.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_policies.py
new file mode 100644
index 00000000..600c0bff
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_policies.py
@@ -0,0 +1,344 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# (c) 2017, Daniel Korn <korndaniel1@gmail.com>
+# (c) 2017, Yaacov Zamir <yzamir@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: manageiq_policies
+
+short_description: Management of resource policy_profiles in ManageIQ.
+extends_documentation_fragment:
+- community.general.manageiq
+
+author: Daniel Korn (@dkorn)
+description:
+ - The manageiq_policies module supports adding and deleting policy_profiles in ManageIQ.
+
+options:
+ state:
+ type: str
+ description:
+ - absent - policy_profiles should not exist,
+ - present - policy_profiles should exist,
+ - list - list current policy_profiles and policies.
+ choices: ['absent', 'present', 'list']
+ default: 'present'
+ policy_profiles:
+ type: list
+ description:
+ - list of dictionaries, each includes the policy_profile 'name' key.
+ - required if state is present or absent.
+ resource_type:
+ type: str
+ description:
+ - the type of the resource to which the profile should be [un]assigned
+ required: true
+ choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster',
+ 'data store', 'group', 'resource pool', 'service', 'service template',
+ 'template', 'tenant', 'user']
+ resource_name:
+ type: str
+ description:
+ - the name of the resource to which the profile should be [un]assigned
+ required: true
+'''
+
+EXAMPLES = '''
+- name: Assign new policy_profile for a provider in ManageIQ
+ community.general.manageiq_policies:
+ resource_name: 'EngLab'
+ resource_type: 'provider'
+ policy_profiles:
+ - name: openscap profile
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Unassign a policy_profile for a provider in ManageIQ
+ community.general.manageiq_policies:
+ state: absent
+ resource_name: 'EngLab'
+ resource_type: 'provider'
+ policy_profiles:
+ - name: openscap profile
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: List current policy_profile and policies for a provider in ManageIQ
+ community.general.manageiq_policies:
+ state: list
+ resource_name: 'EngLab'
+ resource_type: 'provider'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+'''
+
+RETURN = '''
+manageiq_policies:
+ description:
+ - List current policy_profile and policies for a provider in ManageIQ
+ returned: always
+ type: dict
+ sample: '{
+ "changed": false,
+ "profiles": [
+ {
+ "policies": [
+ {
+ "active": true,
+ "description": "OpenSCAP",
+ "name": "openscap policy"
+ },
+ {
+ "active": true,
+ "description": "Analyse incoming container images",
+ "name": "analyse incoming container images"
+ },
+ {
+ "active": true,
+ "description": "Schedule compliance after smart state analysis",
+ "name": "schedule compliance after smart state analysis"
+ }
+ ],
+ "profile_description": "OpenSCAP profile",
+ "profile_name": "openscap profile"
+ }
+ ]
+ }'
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec, manageiq_entities
+
+
+class ManageIQPolicies(object):
+ """
+ Object to execute policies management operations of manageiq resources.
+ """
+
+ def __init__(self, manageiq, resource_type, resource_id):
+ self.manageiq = manageiq
+
+ self.module = self.manageiq.module
+ self.api_url = self.manageiq.api_url
+ self.client = self.manageiq.client
+
+ self.resource_type = resource_type
+ self.resource_id = resource_id
+ self.resource_url = '{api_url}/{resource_type}/{resource_id}'.format(
+ api_url=self.api_url,
+ resource_type=resource_type,
+ resource_id=resource_id)
+
+ def query_profile_href(self, profile):
+ """ Add or Update the policy_profile href field
+
+ Example:
+ {name: STR, ...} => {name: STR, href: STR}
+ """
+ resource = self.manageiq.find_collection_resource_or_fail(
+ "policy_profiles", **profile)
+ return dict(name=profile['name'], href=resource['href'])
+
+ def query_resource_profiles(self):
+ """ Returns a set of the profile objects objects assigned to the resource
+ """
+ url = '{resource_url}/policy_profiles?expand=resources'
+ try:
+ response = self.client.get(url.format(resource_url=self.resource_url))
+ except Exception as e:
+ msg = "Failed to query {resource_type} policies: {error}".format(
+ resource_type=self.resource_type,
+ error=e)
+ self.module.fail_json(msg=msg)
+
+ resources = response.get('resources', [])
+
+ # clean the returned rest api profile object to look like:
+ # {profile_name: STR, profile_description: STR, policies: ARR<POLICIES>}
+ profiles = [self.clean_profile_object(profile) for profile in resources]
+
+ return profiles
+
+ def query_profile_policies(self, profile_id):
+ """ Returns a set of the policy objects assigned to the resource
+ """
+ url = '{api_url}/policy_profiles/{profile_id}?expand=policies'
+ try:
+ response = self.client.get(url.format(api_url=self.api_url, profile_id=profile_id))
+ except Exception as e:
+ msg = "Failed to query {resource_type} policies: {error}".format(
+ resource_type=self.resource_type,
+ error=e)
+ self.module.fail_json(msg=msg)
+
+ resources = response.get('policies', [])
+
+ # clean the returned rest api policy object to look like:
+ # {name: STR, description: STR, active: BOOL}
+ policies = [self.clean_policy_object(policy) for policy in resources]
+
+ return policies
+
+ def clean_policy_object(self, policy):
+ """ Clean a policy object to have human readable form of:
+ {
+ name: STR,
+ description: STR,
+ active: BOOL
+ }
+ """
+ name = policy.get('name')
+ description = policy.get('description')
+ active = policy.get('active')
+
+ return dict(
+ name=name,
+ description=description,
+ active=active)
+
+ def clean_profile_object(self, profile):
+ """ Clean a profile object to have human readable form of:
+ {
+ profile_name: STR,
+ profile_description: STR,
+ policies: ARR<POLICIES>
+ }
+ """
+ profile_id = profile['id']
+ name = profile.get('name')
+ description = profile.get('description')
+ policies = self.query_profile_policies(profile_id)
+
+ return dict(
+ profile_name=name,
+ profile_description=description,
+ policies=policies)
+
+ def profiles_to_update(self, profiles, action):
+ """ Create a list of policies we need to update in ManageIQ.
+
+ Returns:
+ Whether or not a change took place and a message describing the
+ operation executed.
+ """
+ profiles_to_post = []
+ assigned_profiles = self.query_resource_profiles()
+
+ # make a list of assigned full profile names strings
+ # e.g. ['openscap profile', ...]
+ assigned_profiles_set = set([profile['profile_name'] for profile in assigned_profiles])
+
+ for profile in profiles:
+ assigned = profile.get('name') in assigned_profiles_set
+
+ if (action == 'unassign' and assigned) or (action == 'assign' and not assigned):
+ # add/update the policy profile href field
+ # {name: STR, ...} => {name: STR, href: STR}
+ profile = self.query_profile_href(profile)
+ profiles_to_post.append(profile)
+
+ return profiles_to_post
+
+ def assign_or_unassign_profiles(self, profiles, action):
+ """ Perform assign/unassign action
+ """
+ # get a list of profiles needed to be changed
+ profiles_to_post = self.profiles_to_update(profiles, action)
+ if not profiles_to_post:
+ return dict(
+ changed=False,
+ msg="Profiles {profiles} already {action}ed, nothing to do".format(
+ action=action,
+ profiles=profiles))
+
+ # try to assign or unassign profiles to resource
+ url = '{resource_url}/policy_profiles'.format(resource_url=self.resource_url)
+ try:
+ response = self.client.post(url, action=action, resources=profiles_to_post)
+ except Exception as e:
+ msg = "Failed to {action} profile: {error}".format(
+ action=action,
+ error=e)
+ self.module.fail_json(msg=msg)
+
+ # check all entities in result to be successful
+ for result in response['results']:
+ if not result['success']:
+ msg = "Failed to {action}: {message}".format(
+ action=action,
+ message=result['message'])
+ self.module.fail_json(msg=msg)
+
+ # successfully changed all needed profiles
+ return dict(
+ changed=True,
+ msg="Successfully {action}ed profiles: {profiles}".format(
+ action=action,
+ profiles=profiles))
+
+
+def main():
+ actions = {'present': 'assign', 'absent': 'unassign', 'list': 'list'}
+ argument_spec = dict(
+ policy_profiles=dict(type='list'),
+ resource_name=dict(required=True, type='str'),
+ resource_type=dict(required=True, type='str',
+ choices=list(manageiq_entities().keys())),
+ state=dict(required=False, type='str',
+ choices=['present', 'absent', 'list'], default='present'),
+ )
+ # add the manageiq connection arguments to the arguments
+ argument_spec.update(manageiq_argument_spec())
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_if=[
+ ('state', 'present', ['policy_profiles']),
+ ('state', 'absent', ['policy_profiles'])
+ ],
+ )
+
+ policy_profiles = module.params['policy_profiles']
+ resource_type_key = module.params['resource_type']
+ resource_name = module.params['resource_name']
+ state = module.params['state']
+
+ # get the action and resource type
+ action = actions[state]
+ resource_type = manageiq_entities()[resource_type_key]
+
+ manageiq = ManageIQ(module)
+
+ # query resource id, fail if resource does not exist
+ resource_id = manageiq.find_collection_resource_or_fail(resource_type, name=resource_name)['id']
+
+ manageiq_policies = ManageIQPolicies(manageiq, resource_type, resource_id)
+
+ if action == 'list':
+ # return a list of current profiles for this object
+ current_profiles = manageiq_policies.query_resource_profiles()
+ res_args = dict(changed=False, profiles=current_profiles)
+ else:
+ # assign or unassign the profiles
+ res_args = manageiq_policies.assign_or_unassign_profiles(policy_profiles, action)
+
+ module.exit_json(**res_args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_provider.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_provider.py
new file mode 100644
index 00000000..7f55b55b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_provider.py
@@ -0,0 +1,928 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# (c) 2017, Daniel Korn <korndaniel1@gmail.com>
+# (c) 2017, Yaacov Zamir <yzamir@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: manageiq_provider
+short_description: Management of provider in ManageIQ.
+extends_documentation_fragment:
+- community.general.manageiq
+
+author: Daniel Korn (@dkorn)
+description:
+ - The manageiq_provider module supports adding, updating, and deleting provider in ManageIQ.
+
+options:
+ state:
+ type: str
+ description:
+ - absent - provider should not exist, present - provider should be present, refresh - provider will be refreshed
+ choices: ['absent', 'present', 'refresh']
+ default: 'present'
+ name:
+ type: str
+ description: The provider's name.
+ required: true
+ type:
+ type: str
+ description: The provider's type.
+ choices: ['Openshift', 'Amazon', 'oVirt', 'VMware', 'Azure', 'Director', 'OpenStack', 'GCE']
+ zone:
+ type: str
+ description: The ManageIQ zone name that will manage the provider.
+ default: 'default'
+ provider_region:
+ type: str
+ description: The provider region name to connect to (e.g. AWS region for Amazon).
+ host_default_vnc_port_start:
+ type: str
+ description: The first port in the host VNC range. defaults to None.
+ host_default_vnc_port_end:
+ type: str
+ description: The last port in the host VNC range. defaults to None.
+ subscription:
+ type: str
+ description: Microsoft Azure subscription ID. defaults to None.
+ project:
+ type: str
+ description: Google Compute Engine Project ID. defaults to None.
+ azure_tenant_id:
+ type: str
+ description: Tenant ID. defaults to None.
+ aliases: [ keystone_v3_domain_id ]
+ tenant_mapping_enabled:
+ type: bool
+ default: 'no'
+ description: Whether to enable mapping of existing tenants. defaults to False.
+ api_version:
+ type: str
+ description: The OpenStack Keystone API version. defaults to None.
+ choices: ['v2', 'v3']
+
+ provider:
+ description: Default endpoint connection information, required if state is true.
+ suboptions:
+ hostname:
+ type: str
+ description: The provider's api hostname.
+ required: true
+ port:
+ type: int
+ description: The provider's api port.
+ userid:
+ type: str
+ description: Provider's api endpoint authentication userid. defaults to None.
+ password:
+ type: str
+ description: Provider's api endpoint authentication password. defaults to None.
+ auth_key:
+ type: str
+ description: Provider's api endpoint authentication bearer token. defaults to None.
+ validate_certs:
+ description: Whether SSL certificates should be verified for HTTPS requests (deprecated). defaults to True.
+ type: bool
+ default: 'yes'
+ security_protocol:
+ type: str
+ description: How SSL certificates should be used for HTTPS requests. defaults to None.
+ choices: ['ssl-with-validation','ssl-with-validation-custom-ca','ssl-without-validation','non-ssl']
+ certificate_authority:
+ type: str
+ description: The CA bundle string with custom certificates. defaults to None.
+
+ metrics:
+ description: Metrics endpoint connection information.
+ suboptions:
+ hostname:
+ type: str
+ description: The provider's api hostname.
+ required: true
+ port:
+ type: int
+ description: The provider's api port.
+ userid:
+ type: str
+ description: Provider's api endpoint authentication userid. defaults to None.
+ password:
+ type: str
+ description: Provider's api endpoint authentication password. defaults to None.
+ auth_key:
+ type: str
+ description: Provider's api endpoint authentication bearer token. defaults to None.
+ validate_certs:
+ description: Whether SSL certificates should be verified for HTTPS requests (deprecated). defaults to True.
+ type: bool
+ default: 'yes'
+ security_protocol:
+ type: str
+ choices: ['ssl-with-validation','ssl-with-validation-custom-ca','ssl-without-validation','non-ssl']
+ description: How SSL certificates should be used for HTTPS requests. defaults to None.
+ certificate_authority:
+ type: str
+ description: The CA bundle string with custom certificates. defaults to None.
+ path:
+ type: str
+ description: Database name for oVirt metrics. Defaults to C(ovirt_engine_history).
+
+ alerts:
+ description: Alerts endpoint connection information.
+ suboptions:
+ hostname:
+ type: str
+ description: The provider's api hostname.
+ required: true
+ port:
+ type: int
+ description: The provider's api port.
+ userid:
+ type: str
+ description: Provider's api endpoint authentication userid. defaults to None.
+ password:
+ type: str
+ description: Provider's api endpoint authentication password. defaults to None.
+ auth_key:
+ type: str
+ description: Provider's api endpoint authentication bearer token. defaults to None.
+ validate_certs:
+ type: bool
+ description: Whether SSL certificates should be verified for HTTPS requests (deprecated). defaults to True.
+ default: true
+ security_protocol:
+ type: str
+ choices: ['ssl-with-validation','ssl-with-validation-custom-ca','ssl-without-validation', 'non-ssl']
+ description: How SSL certificates should be used for HTTPS requests. defaults to None.
+ certificate_authority:
+ type: str
+ description: The CA bundle string with custom certificates. defaults to None.
+
+ ssh_keypair:
+ description: SSH key pair used for SSH connections to all hosts in this provider.
+ suboptions:
+ hostname:
+ type: str
+ description: Director hostname.
+ required: true
+ userid:
+ type: str
+ description: SSH username.
+ auth_key:
+ type: str
+ description: SSH private key.
+ validate_certs:
+ description:
+ - Whether certificates should be verified for connections.
+ type: bool
+ default: yes
+ aliases: [ verify_ssl ]
+'''
+
+EXAMPLES = '''
+- name: Create a new provider in ManageIQ ('Hawkular' metrics)
+ community.general.manageiq_provider:
+ name: 'EngLab'
+ type: 'OpenShift'
+ state: 'present'
+ provider:
+ auth_key: 'topSecret'
+ hostname: 'example.com'
+ port: 8443
+ validate_certs: true
+ security_protocol: 'ssl-with-validation-custom-ca'
+ certificate_authority: |
+ -----BEGIN CERTIFICATE-----
+ FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
+ c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
+ MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
+ ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
+ ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
+ AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
+ Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
+ z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
+ ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
+ AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
+ SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
+ QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
+ aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
+ gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
+ qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
+ XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
+ -----END CERTIFICATE-----
+ metrics:
+ auth_key: 'topSecret'
+ role: 'hawkular'
+ hostname: 'example.com'
+ port: 443
+ validate_certs: true
+ security_protocol: 'ssl-with-validation-custom-ca'
+ certificate_authority: |
+ -----BEGIN CERTIFICATE-----
+ FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
+ c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
+ MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
+ ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
+ ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
+ AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
+ Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
+ z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
+ ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
+ AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
+ SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
+ QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
+ aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
+ gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
+ qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
+ XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
+ -----END CERTIFICATE-----
+ manageiq_connection:
+ url: 'https://127.0.0.1:80'
+ username: 'admin'
+ password: 'password'
+ validate_certs: true
+
+
+- name: Update an existing provider named 'EngLab' (defaults to 'Prometheus' metrics)
+ community.general.manageiq_provider:
+ name: 'EngLab'
+ type: 'Openshift'
+ state: 'present'
+ provider:
+ auth_key: 'topSecret'
+ hostname: 'next.example.com'
+ port: 8443
+ validate_certs: true
+ security_protocol: 'ssl-with-validation-custom-ca'
+ certificate_authority: |
+ -----BEGIN CERTIFICATE-----
+ FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
+ c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
+ MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
+ ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
+ ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
+ AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
+ Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
+ z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
+ ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
+ AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
+ SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
+ QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
+ aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
+ gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
+ qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
+ XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
+ -----END CERTIFICATE-----
+ metrics:
+ auth_key: 'topSecret'
+ hostname: 'next.example.com'
+ port: 443
+ validate_certs: true
+ security_protocol: 'ssl-with-validation-custom-ca'
+ certificate_authority: |
+ -----BEGIN CERTIFICATE-----
+ FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
+ c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
+ MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
+ ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
+ ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
+ AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
+ Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
+ z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
+ ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
+ AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
+ SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
+ QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
+ aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
+ gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
+ qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
+ XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
+ -----END CERTIFICATE-----
+ manageiq_connection:
+ url: 'https://127.0.0.1'
+ username: 'admin'
+ password: 'password'
+ validate_certs: true
+
+
+- name: Delete a provider in ManageIQ
+ community.general.manageiq_provider:
+ name: 'EngLab'
+ type: 'Openshift'
+ state: 'absent'
+ manageiq_connection:
+ url: 'https://127.0.0.1'
+ username: 'admin'
+ password: 'password'
+ validate_certs: true
+
+
+- name: Create a new Amazon provider in ManageIQ using token authentication
+ community.general.manageiq_provider:
+ name: 'EngAmazon'
+ type: 'Amazon'
+ state: 'present'
+ provider:
+ hostname: 'amazon.example.com'
+ userid: 'hello'
+ password: 'world'
+ manageiq_connection:
+ url: 'https://127.0.0.1'
+ token: 'VeryLongToken'
+ validate_certs: true
+
+
+- name: Create a new oVirt provider in ManageIQ
+ community.general.manageiq_provider:
+ name: 'RHEV'
+ type: 'oVirt'
+ state: 'present'
+ provider:
+ hostname: 'rhev01.example.com'
+ userid: 'admin@internal'
+ password: 'password'
+ validate_certs: true
+ certificate_authority: |
+ -----BEGIN CERTIFICATE-----
+ FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
+ c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
+ MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
+ ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
+ ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
+ AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
+ Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
+ z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
+ ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
+ AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
+ SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
+ QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
+ aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
+ gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
+ qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
+ XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
+ -----END CERTIFICATE-----
+ metrics:
+ hostname: 'metrics.example.com'
+ path: 'ovirt_engine_history'
+ userid: 'user_id_metrics'
+ password: 'password_metrics'
+ validate_certs: true
+ certificate_authority: |
+ -----BEGIN CERTIFICATE-----
+ FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
+ c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
+ MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
+ ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
+ ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
+ AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
+ Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
+ z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
+ ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
+ AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
+ SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
+ QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
+ aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
+ gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
+ qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
+ XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
+ -----END CERTIFICATE-----
+ manageiq_connection:
+ url: 'https://127.0.0.1'
+ username: 'admin'
+ password: 'password'
+ validate_certs: true
+
+- name: Create a new VMware provider in ManageIQ
+ community.general.manageiq_provider:
+ name: 'EngVMware'
+ type: 'VMware'
+ state: 'present'
+ provider:
+ hostname: 'vcenter.example.com'
+ host_default_vnc_port_start: 5800
+ host_default_vnc_port_end: 5801
+ userid: 'root'
+ password: 'password'
+ manageiq_connection:
+ url: 'https://127.0.0.1'
+ token: 'VeryLongToken'
+ validate_certs: true
+
+- name: Create a new Azure provider in ManageIQ
+ community.general.manageiq_provider:
+ name: 'EngAzure'
+ type: 'Azure'
+ provider_region: 'northeurope'
+ subscription: 'e272bd74-f661-484f-b223-88dd128a4049'
+ azure_tenant_id: 'e272bd74-f661-484f-b223-88dd128a4048'
+ state: 'present'
+ provider:
+ hostname: 'azure.example.com'
+ userid: 'e272bd74-f661-484f-b223-88dd128a4049'
+ password: 'password'
+ manageiq_connection:
+ url: 'https://cf-6af0.rhpds.opentlc.com'
+ username: 'admin'
+ password: 'password'
+ validate_certs: false
+
+- name: Create a new OpenStack Director provider in ManageIQ with rsa keypair
+ community.general.manageiq_provider:
+ name: 'EngDirector'
+ type: 'Director'
+ api_version: 'v3'
+ state: 'present'
+ provider:
+ hostname: 'director.example.com'
+ userid: 'admin'
+ password: 'password'
+ security_protocol: 'ssl-with-validation'
+ validate_certs: 'true'
+ certificate_authority: |
+ -----BEGIN CERTIFICATE-----
+ FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
+ c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
+ MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
+ ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
+ ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
+ AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
+ Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
+ z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
+ ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
+ AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
+ SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
+ QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
+ aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
+ gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
+ qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
+ XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
+ -----END CERTIFICATE-----
+ ssh_keypair:
+ hostname: director.example.com
+ userid: heat-admin
+ auth_key: 'SecretSSHPrivateKey'
+
+- name: Create a new OpenStack provider in ManageIQ with amqp metrics
+ community.general.manageiq_provider:
+ name: 'EngOpenStack'
+ type: 'OpenStack'
+ api_version: 'v3'
+ state: 'present'
+ provider_region: 'europe'
+ tenant_mapping_enabled: 'False'
+ keystone_v3_domain_id: 'mydomain'
+ provider:
+ hostname: 'openstack.example.com'
+ userid: 'admin'
+ password: 'password'
+ security_protocol: 'ssl-with-validation'
+ validate_certs: 'true'
+ certificate_authority: |
+ -----BEGIN CERTIFICATE-----
+ FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
+ c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
+ MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
+ ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
+ ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
+ AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
+ Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
+ z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
+ ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
+ AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
+ SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
+ QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
+ aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
+ gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
+ qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
+ XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
+ -----END CERTIFICATE-----
+ metrics:
+ role: amqp
+ hostname: 'amqp.example.com'
+ security_protocol: 'non-ssl'
+ port: 5666
+ userid: admin
+ password: password
+
+
+- name: Create a new GCE provider in ManageIQ
+ community.general.manageiq_provider:
+ name: 'EngGoogle'
+ type: 'GCE'
+ provider_region: 'europe-west1'
+ project: 'project1'
+ state: 'present'
+ provider:
+ hostname: 'gce.example.com'
+ auth_key: 'google_json_key'
+ validate_certs: 'false'
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec
+
+
+def supported_providers():
+ return dict(
+ Openshift=dict(
+ class_name='ManageIQ::Providers::Openshift::ContainerManager',
+ authtype='bearer',
+ default_role='default',
+ metrics_role='prometheus',
+ alerts_role='prometheus_alerts',
+ ),
+ Amazon=dict(
+ class_name='ManageIQ::Providers::Amazon::CloudManager',
+ ),
+ oVirt=dict(
+ class_name='ManageIQ::Providers::Redhat::InfraManager',
+ default_role='default',
+ metrics_role='metrics',
+ ),
+ VMware=dict(
+ class_name='ManageIQ::Providers::Vmware::InfraManager',
+ ),
+ Azure=dict(
+ class_name='ManageIQ::Providers::Azure::CloudManager',
+ ),
+ Director=dict(
+ class_name='ManageIQ::Providers::Openstack::InfraManager',
+ ssh_keypair_role="ssh_keypair"
+ ),
+ OpenStack=dict(
+ class_name='ManageIQ::Providers::Openstack::CloudManager',
+ ),
+ GCE=dict(
+ class_name='ManageIQ::Providers::Google::CloudManager',
+ ),
+ )
+
+
+def endpoint_list_spec():
+ return dict(
+ provider=dict(type='dict', options=endpoint_argument_spec()),
+ metrics=dict(type='dict', options=endpoint_argument_spec()),
+ alerts=dict(type='dict', options=endpoint_argument_spec()),
+ ssh_keypair=dict(type='dict', options=endpoint_argument_spec()),
+ )
+
+
+def endpoint_argument_spec():
+ return dict(
+ role=dict(),
+ hostname=dict(required=True),
+ port=dict(type='int'),
+ validate_certs=dict(default=True, type='bool', aliases=['verify_ssl']),
+ certificate_authority=dict(),
+ security_protocol=dict(
+ choices=[
+ 'ssl-with-validation',
+ 'ssl-with-validation-custom-ca',
+ 'ssl-without-validation',
+ 'non-ssl',
+ ],
+ ),
+ userid=dict(),
+ password=dict(no_log=True),
+ auth_key=dict(no_log=True),
+ subscription=dict(no_log=True),
+ project=dict(),
+ uid_ems=dict(),
+ path=dict(),
+ )
+
+
+def delete_nulls(h):
+ """ Remove null entries from a hash
+
+ Returns:
+ a hash without nulls
+ """
+ if isinstance(h, list):
+ return [delete_nulls(i) for i in h]
+ if isinstance(h, dict):
+ return dict((k, delete_nulls(v)) for k, v in h.items() if v is not None)
+
+ return h
+
+
+class ManageIQProvider(object):
+ """
+ Object to execute provider management operations in manageiq.
+ """
+
+ def __init__(self, manageiq):
+ self.manageiq = manageiq
+
+ self.module = self.manageiq.module
+ self.api_url = self.manageiq.api_url
+ self.client = self.manageiq.client
+
+ def class_name_to_type(self, class_name):
+ """ Convert class_name to type
+
+ Returns:
+ the type
+ """
+ out = [k for k, v in supported_providers().items() if v['class_name'] == class_name]
+ if len(out) == 1:
+ return out[0]
+
+ return None
+
+ def zone_id(self, name):
+ """ Search for zone id by zone name.
+
+ Returns:
+ the zone id, or send a module Fail signal if zone not found.
+ """
+ zone = self.manageiq.find_collection_resource_by('zones', name=name)
+ if not zone: # zone doesn't exist
+ self.module.fail_json(
+ msg="zone %s does not exist in manageiq" % (name))
+
+ return zone['id']
+
+ def provider(self, name):
+ """ Search for provider object by name.
+
+ Returns:
+ the provider, or None if provider not found.
+ """
+ return self.manageiq.find_collection_resource_by('providers', name=name)
+
+ def build_connection_configurations(self, provider_type, endpoints):
+ """ Build "connection_configurations" objects from
+ requested endpoints provided by user
+
+ Returns:
+ the user requested provider endpoints list
+ """
+ connection_configurations = []
+ endpoint_keys = endpoint_list_spec().keys()
+ provider_defaults = supported_providers().get(provider_type, {})
+
+ # get endpoint defaults
+ endpoint = endpoints.get('provider')
+ default_auth_key = endpoint.get('auth_key')
+
+ # build a connection_configuration object for each endpoint
+ for endpoint_key in endpoint_keys:
+ endpoint = endpoints.get(endpoint_key)
+ if endpoint:
+ # get role and authtype
+ role = endpoint.get('role') or provider_defaults.get(endpoint_key + '_role', 'default')
+ if role == 'default':
+ authtype = provider_defaults.get('authtype') or role
+ else:
+ authtype = role
+
+ # set a connection_configuration
+ connection_configurations.append({
+ 'endpoint': {
+ 'role': role,
+ 'hostname': endpoint.get('hostname'),
+ 'port': endpoint.get('port'),
+ 'verify_ssl': [0, 1][endpoint.get('validate_certs', True)],
+ 'security_protocol': endpoint.get('security_protocol'),
+ 'certificate_authority': endpoint.get('certificate_authority'),
+ 'path': endpoint.get('path'),
+ },
+ 'authentication': {
+ 'authtype': authtype,
+ 'userid': endpoint.get('userid'),
+ 'password': endpoint.get('password'),
+ 'auth_key': endpoint.get('auth_key') or default_auth_key,
+ }
+ })
+
+ return connection_configurations
+
+ def delete_provider(self, provider):
+ """ Deletes a provider from manageiq.
+
+ Returns:
+ a short message describing the operation executed.
+ """
+ try:
+ url = '%s/providers/%s' % (self.api_url, provider['id'])
+ result = self.client.post(url, action='delete')
+ except Exception as e:
+ self.module.fail_json(msg="failed to delete provider %s: %s" % (provider['name'], str(e)))
+
+ return dict(changed=True, msg=result['message'])
+
+ def edit_provider(self, provider, name, provider_type, endpoints, zone_id, provider_region,
+ host_default_vnc_port_start, host_default_vnc_port_end,
+ subscription, project, uid_ems, tenant_mapping_enabled, api_version):
+ """ Edit a provider from manageiq.
+
+ Returns:
+ a short message describing the operation executed.
+ """
+ url = '%s/providers/%s' % (self.api_url, provider['id'])
+
+ resource = dict(
+ name=name,
+ zone={'id': zone_id},
+ provider_region=provider_region,
+ connection_configurations=endpoints,
+ host_default_vnc_port_start=host_default_vnc_port_start,
+ host_default_vnc_port_end=host_default_vnc_port_end,
+ subscription=subscription,
+ project=project,
+ uid_ems=uid_ems,
+ tenant_mapping_enabled=tenant_mapping_enabled,
+ api_version=api_version,
+ )
+
+ # NOTE: we do not check for diff's between requested and current
+ # provider, we always submit endpoints with password or auth_keys,
+ # since we can not compare with current password or auth_key,
+ # every edit request is sent to ManageIQ API without comparing
+ # it to current state.
+
+ # clean nulls, we do not send nulls to the api
+ resource = delete_nulls(resource)
+
+ # try to update provider
+ try:
+ result = self.client.post(url, action='edit', resource=resource)
+ except Exception as e:
+ self.module.fail_json(msg="failed to update provider %s: %s" % (provider['name'], str(e)))
+
+ return dict(
+ changed=True,
+ msg="successfully updated the provider %s: %s" % (provider['name'], result))
+
+ def create_provider(self, name, provider_type, endpoints, zone_id, provider_region,
+ host_default_vnc_port_start, host_default_vnc_port_end,
+ subscription, project, uid_ems, tenant_mapping_enabled, api_version):
+ """ Creates the provider in manageiq.
+
+ Returns:
+ a short message describing the operation executed.
+ """
+ resource = dict(
+ name=name,
+ zone={'id': zone_id},
+ provider_region=provider_region,
+ host_default_vnc_port_start=host_default_vnc_port_start,
+ host_default_vnc_port_end=host_default_vnc_port_end,
+ subscription=subscription,
+ project=project,
+ uid_ems=uid_ems,
+ tenant_mapping_enabled=tenant_mapping_enabled,
+ api_version=api_version,
+ connection_configurations=endpoints,
+ )
+
+ # clean nulls, we do not send nulls to the api
+ resource = delete_nulls(resource)
+
+ # try to create a new provider
+ try:
+ url = '%s/providers' % (self.api_url)
+ result = self.client.post(url, type=supported_providers()[provider_type]['class_name'], **resource)
+ except Exception as e:
+ self.module.fail_json(msg="failed to create provider %s: %s" % (name, str(e)))
+
+ return dict(
+ changed=True,
+ msg="successfully created the provider %s: %s" % (name, result['results']))
+
+ def refresh(self, provider, name):
+ """ Trigger provider refresh.
+
+ Returns:
+ a short message describing the operation executed.
+ """
+ try:
+ url = '%s/providers/%s' % (self.api_url, provider['id'])
+ result = self.client.post(url, action='refresh')
+ except Exception as e:
+ self.module.fail_json(msg="failed to refresh provider %s: %s" % (name, str(e)))
+
+ return dict(
+ changed=True,
+ msg="refreshing provider %s" % name)
+
+
+def main():
+ zone_id = None
+ endpoints = []
+ argument_spec = dict(
+ state=dict(choices=['absent', 'present', 'refresh'], default='present'),
+ name=dict(required=True),
+ zone=dict(default='default'),
+ provider_region=dict(),
+ host_default_vnc_port_start=dict(),
+ host_default_vnc_port_end=dict(),
+ subscription=dict(),
+ project=dict(),
+ azure_tenant_id=dict(aliases=['keystone_v3_domain_id']),
+ tenant_mapping_enabled=dict(default=False, type='bool'),
+ api_version=dict(choices=['v2', 'v3']),
+ type=dict(choices=supported_providers().keys()),
+ )
+ # add the manageiq connection arguments to the arguments
+ argument_spec.update(manageiq_argument_spec())
+ # add the endpoint arguments to the arguments
+ argument_spec.update(endpoint_list_spec())
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_if=[
+ ('state', 'present', ['provider']),
+ ('state', 'refresh', ['name'])],
+ required_together=[
+ ['host_default_vnc_port_start', 'host_default_vnc_port_end']
+ ],
+ )
+
+ name = module.params['name']
+ zone_name = module.params['zone']
+ provider_type = module.params['type']
+ raw_endpoints = module.params
+ provider_region = module.params['provider_region']
+ host_default_vnc_port_start = module.params['host_default_vnc_port_start']
+ host_default_vnc_port_end = module.params['host_default_vnc_port_end']
+ subscription = module.params['subscription']
+ uid_ems = module.params['azure_tenant_id']
+ project = module.params['project']
+ tenant_mapping_enabled = module.params['tenant_mapping_enabled']
+ api_version = module.params['api_version']
+ state = module.params['state']
+
+ manageiq = ManageIQ(module)
+ manageiq_provider = ManageIQProvider(manageiq)
+
+ provider = manageiq_provider.provider(name)
+
+ # provider should not exist
+ if state == "absent":
+ # if we have a provider, delete it
+ if provider:
+ res_args = manageiq_provider.delete_provider(provider)
+ # if we do not have a provider, nothing to do
+ else:
+ res_args = dict(
+ changed=False,
+ msg="provider %s: does not exist in manageiq" % (name))
+
+ # provider should exist
+ if state == "present":
+ # get data user did not explicitly give
+ if zone_name:
+ zone_id = manageiq_provider.zone_id(zone_name)
+
+ # if we do not have a provider_type, use the current provider_type
+ if provider and not provider_type:
+ provider_type = manageiq_provider.class_name_to_type(provider['type'])
+
+ # check supported_providers types
+ if not provider_type:
+ manageiq_provider.module.fail_json(
+ msg="missing required argument: provider_type")
+
+ # check supported_providers types
+ if provider_type not in supported_providers().keys():
+ manageiq_provider.module.fail_json(
+ msg="provider_type %s is not supported" % (provider_type))
+
+ # build "connection_configurations" objects from user requested endpoints
+ # "provider" is a required endpoint, if we have it, we have endpoints
+ if raw_endpoints.get("provider"):
+ endpoints = manageiq_provider.build_connection_configurations(provider_type, raw_endpoints)
+
+ # if we have a provider, edit it
+ if provider:
+ res_args = manageiq_provider.edit_provider(provider, name, provider_type, endpoints, zone_id, provider_region,
+ host_default_vnc_port_start, host_default_vnc_port_end,
+ subscription, project, uid_ems, tenant_mapping_enabled, api_version)
+ # if we do not have a provider, create it
+ else:
+ res_args = manageiq_provider.create_provider(name, provider_type, endpoints, zone_id, provider_region,
+ host_default_vnc_port_start, host_default_vnc_port_end,
+ subscription, project, uid_ems, tenant_mapping_enabled, api_version)
+
+ # refresh provider (trigger sync)
+ if state == "refresh":
+ if provider:
+ res_args = manageiq_provider.refresh(provider, name)
+ else:
+ res_args = dict(
+ changed=False,
+ msg="provider %s: does not exist in manageiq" % (name))
+
+ module.exit_json(**res_args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_tags.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_tags.py
new file mode 100644
index 00000000..68de2324
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_tags.py
@@ -0,0 +1,289 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# (c) 2017, Daniel Korn <korndaniel1@gmail.com>
+# (c) 2017, Yaacov Zamir <yzamir@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: manageiq_tags
+
+short_description: Management of resource tags in ManageIQ.
+extends_documentation_fragment:
+- community.general.manageiq
+
+author: Daniel Korn (@dkorn)
+description:
+ - The manageiq_tags module supports adding, updating and deleting tags in ManageIQ.
+
+options:
+ state:
+ type: str
+ description:
+ - absent - tags should not exist,
+ - present - tags should exist,
+ - list - list current tags.
+ choices: ['absent', 'present', 'list']
+ default: 'present'
+ tags:
+ type: list
+ description:
+ - tags - list of dictionaries, each includes 'name' and 'category' keys.
+ - required if state is present or absent.
+ resource_type:
+ type: str
+ description:
+ - the relevant resource type in manageiq
+ required: true
+ choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster',
+ 'data store', 'group', 'resource pool', 'service', 'service template',
+ 'template', 'tenant', 'user']
+ resource_name:
+ type: str
+ description:
+ - the relevant resource name in manageiq
+ required: true
+'''
+
+EXAMPLES = '''
+- name: Create new tags for a provider in ManageIQ
+ community.general.manageiq_tags:
+ resource_name: 'EngLab'
+ resource_type: 'provider'
+ tags:
+ - category: environment
+ name: prod
+ - category: owner
+ name: prod_ops
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Remove tags for a provider in ManageIQ
+ community.general.manageiq_tags:
+ state: absent
+ resource_name: 'EngLab'
+ resource_type: 'provider'
+ tags:
+ - category: environment
+ name: prod
+ - category: owner
+ name: prod_ops
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: List current tags for a provider in ManageIQ
+ community.general.manageiq_tags:
+ state: list
+ resource_name: 'EngLab'
+ resource_type: 'provider'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec, manageiq_entities
+
+
+def query_resource_id(manageiq, resource_type, resource_name):
+ """ Query the resource name in ManageIQ.
+
+ Returns:
+ the resource id if it exists in manageiq, Fail otherwise.
+ """
+ resource = manageiq.find_collection_resource_by(resource_type, name=resource_name)
+ if resource:
+ return resource["id"]
+ else:
+ msg = "{resource_name} {resource_type} does not exist in manageiq".format(
+ resource_name=resource_name, resource_type=resource_type)
+ manageiq.module.fail_json(msg=msg)
+
+
+class ManageIQTags(object):
+ """
+ Object to execute tags management operations of manageiq resources.
+ """
+
+ def __init__(self, manageiq, resource_type, resource_id):
+ self.manageiq = manageiq
+
+ self.module = self.manageiq.module
+ self.api_url = self.manageiq.api_url
+ self.client = self.manageiq.client
+
+ self.resource_type = resource_type
+ self.resource_id = resource_id
+ self.resource_url = '{api_url}/{resource_type}/{resource_id}'.format(
+ api_url=self.api_url,
+ resource_type=resource_type,
+ resource_id=resource_id)
+
+ def full_tag_name(self, tag):
+ """ Returns the full tag name in manageiq
+ """
+ return '/managed/{tag_category}/{tag_name}'.format(
+ tag_category=tag['category'],
+ tag_name=tag['name'])
+
+ def clean_tag_object(self, tag):
+ """ Clean a tag object to have human readable form of:
+ {
+ full_name: STR,
+ name: STR,
+ display_name: STR,
+ category: STR
+ }
+ """
+ full_name = tag.get('name')
+ categorization = tag.get('categorization', {})
+
+ return dict(
+ full_name=full_name,
+ name=categorization.get('name'),
+ display_name=categorization.get('display_name'),
+ category=categorization.get('category', {}).get('name'))
+
+ def query_resource_tags(self):
+ """ Returns a set of the tag objects assigned to the resource
+ """
+ url = '{resource_url}/tags?expand=resources&attributes=categorization'
+ try:
+ response = self.client.get(url.format(resource_url=self.resource_url))
+ except Exception as e:
+ msg = "Failed to query {resource_type} tags: {error}".format(
+ resource_type=self.resource_type,
+ error=e)
+ self.module.fail_json(msg=msg)
+
+ resources = response.get('resources', [])
+
+ # clean the returned rest api tag object to look like:
+ # {full_name: STR, name: STR, display_name: STR, category: STR}
+ tags = [self.clean_tag_object(tag) for tag in resources]
+
+ return tags
+
+ def tags_to_update(self, tags, action):
+ """ Create a list of tags we need to update in ManageIQ.
+
+ Returns:
+ Whether or not a change took place and a message describing the
+ operation executed.
+ """
+ tags_to_post = []
+ assigned_tags = self.query_resource_tags()
+
+ # make a list of assigned full tag names strings
+ # e.g. ['/managed/environment/prod', ...]
+ assigned_tags_set = set([tag['full_name'] for tag in assigned_tags])
+
+ for tag in tags:
+ assigned = self.full_tag_name(tag) in assigned_tags_set
+
+ if assigned and action == 'unassign':
+ tags_to_post.append(tag)
+ elif (not assigned) and action == 'assign':
+ tags_to_post.append(tag)
+
+ return tags_to_post
+
+ def assign_or_unassign_tags(self, tags, action):
+ """ Perform assign/unassign action
+ """
+ # get a list of tags needed to be changed
+ tags_to_post = self.tags_to_update(tags, action)
+ if not tags_to_post:
+ return dict(
+ changed=False,
+ msg="Tags already {action}ed, nothing to do".format(action=action))
+
+ # try to assign or unassign tags to resource
+ url = '{resource_url}/tags'.format(resource_url=self.resource_url)
+ try:
+ response = self.client.post(url, action=action, resources=tags)
+ except Exception as e:
+ msg = "Failed to {action} tag: {error}".format(
+ action=action,
+ error=e)
+ self.module.fail_json(msg=msg)
+
+ # check all entities in result to be successful
+ for result in response['results']:
+ if not result['success']:
+ msg = "Failed to {action}: {message}".format(
+ action=action,
+ message=result['message'])
+ self.module.fail_json(msg=msg)
+
+ # successfully changed all needed tags
+ return dict(
+ changed=True,
+ msg="Successfully {action}ed tags".format(action=action))
+
+
+def main():
+ actions = {'present': 'assign', 'absent': 'unassign', 'list': 'list'}
+ argument_spec = dict(
+ tags=dict(type='list'),
+ resource_name=dict(required=True, type='str'),
+ resource_type=dict(required=True, type='str',
+ choices=list(manageiq_entities().keys())),
+ state=dict(required=False, type='str',
+ choices=['present', 'absent', 'list'], default='present'),
+ )
+ # add the manageiq connection arguments to the arguments
+ argument_spec.update(manageiq_argument_spec())
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_if=[
+ ('state', 'present', ['tags']),
+ ('state', 'absent', ['tags'])
+ ],
+ )
+
+ tags = module.params['tags']
+ resource_type_key = module.params['resource_type']
+ resource_name = module.params['resource_name']
+ state = module.params['state']
+
+ # get the action and resource type
+ action = actions[state]
+ resource_type = manageiq_entities()[resource_type_key]
+
+ manageiq = ManageIQ(module)
+
+ # query resource id, fail if resource does not exist
+ resource_id = query_resource_id(manageiq, resource_type, resource_name)
+
+ manageiq_tags = ManageIQTags(manageiq, resource_type, resource_id)
+
+ if action == 'list':
+ # return a list of current tags for this object
+ current_tags = manageiq_tags.query_resource_tags()
+ res_args = dict(changed=False, tags=current_tags)
+ else:
+ # assign or unassign the tags
+ res_args = manageiq_tags.assign_or_unassign_tags(tags, action)
+
+ module.exit_json(**res_args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_tenant.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_tenant.py
new file mode 100644
index 00000000..3ec174cf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_tenant.py
@@ -0,0 +1,557 @@
+#!/usr/bin/python
+#
+# (c) 2018, Evert Mulder (base on manageiq_user.py by Daniel Korn <korndaniel1@gmail.com>)
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: manageiq_tenant
+
+short_description: Management of tenants in ManageIQ.
+extends_documentation_fragment:
+- community.general.manageiq
+
+author: Evert Mulder (@evertmulder)
+description:
+ - The manageiq_tenant module supports adding, updating and deleting tenants in ManageIQ.
+requirements:
+- manageiq-client
+options:
+ state:
+ type: str
+ description:
+ - absent - tenant should not exist, present - tenant should be.
+ choices: ['absent', 'present']
+ default: 'present'
+ name:
+ type: str
+ description:
+ - The tenant name.
+ required: true
+ default: null
+ description:
+ type: str
+ description:
+ - The tenant description.
+ required: true
+ default: null
+ parent_id:
+ type: int
+ description:
+ - The id of the parent tenant. If not supplied the root tenant is used.
+ - The C(parent_id) takes president over C(parent) when supplied
+ required: false
+ default: null
+ parent:
+ type: str
+ description:
+ - The name of the parent tenant. If not supplied and no C(parent_id) is supplied the root tenant is used.
+ required: false
+ default: null
+ quotas:
+ type: dict
+ description:
+ - The tenant quotas.
+ - All parameters case sensitive.
+ - 'Valid attributes are:'
+ - ' - C(cpu_allocated) (int): use null to remove the quota.'
+ - ' - C(mem_allocated) (GB): use null to remove the quota.'
+ - ' - C(storage_allocated) (GB): use null to remove the quota.'
+ - ' - C(vms_allocated) (int): use null to remove the quota.'
+ - ' - C(templates_allocated) (int): use null to remove the quota.'
+ required: false
+ default: null
+'''
+
+EXAMPLES = '''
+- name: Update the root tenant in ManageIQ
+ community.general.manageiq_tenant:
+ name: 'My Company'
+ description: 'My company name'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Create a tenant in ManageIQ
+ community.general.manageiq_tenant:
+ name: 'Dep1'
+ description: 'Manufacturing department'
+ parent_id: 1
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Delete a tenant in ManageIQ
+ community.general.manageiq_tenant:
+ state: 'absent'
+ name: 'Dep1'
+ parent_id: 1
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Set tenant quota for cpu_allocated, mem_allocated, remove quota for vms_allocated
+ community.general.manageiq_tenant:
+ name: 'Dep1'
+ parent_id: 1
+ quotas:
+ - cpu_allocated: 100
+ - mem_allocated: 50
+ - vms_allocated: null
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+
+- name: Delete a tenant in ManageIQ using a token
+ community.general.manageiq_tenant:
+ state: 'absent'
+ name: 'Dep1'
+ parent_id: 1
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ token: 'sometoken'
+ validate_certs: False
+'''
+
+RETURN = '''
+tenant:
+ description: The tenant.
+ returned: success
+ type: complex
+ contains:
+ id:
+ description: The tenant id
+ returned: success
+ type: int
+ name:
+ description: The tenant name
+ returned: success
+ type: str
+ description:
+ description: The tenant description
+ returned: success
+ type: str
+ parent_id:
+ description: The id of the parent tenant
+ returned: success
+ type: int
+ quotas:
+ description: List of tenant quotas
+ returned: success
+ type: list
+ sample:
+ cpu_allocated: 100
+ mem_allocated: 50
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec
+
+
+class ManageIQTenant(object):
+ """
+ Object to execute tenant management operations in manageiq.
+ """
+
+ def __init__(self, manageiq):
+ self.manageiq = manageiq
+
+ self.module = self.manageiq.module
+ self.api_url = self.manageiq.api_url
+ self.client = self.manageiq.client
+
+ def tenant(self, name, parent_id, parent):
+ """ Search for tenant object by name and parent_id or parent
+ or the root tenant if no parent or parent_id is supplied.
+ Returns:
+ the parent tenant, None for the root tenant
+ the tenant or None if tenant was not found.
+ """
+
+ if parent_id:
+ parent_tenant_res = self.client.collections.tenants.find_by(id=parent_id)
+ if not parent_tenant_res:
+ self.module.fail_json(msg="Parent tenant with id '%s' not found in manageiq" % str(parent_id))
+ parent_tenant = parent_tenant_res[0]
+ tenants = self.client.collections.tenants.find_by(name=name)
+
+ for tenant in tenants:
+ try:
+ ancestry = tenant['ancestry']
+ except AttributeError:
+ ancestry = None
+
+ if ancestry:
+ tenant_parent_id = int(ancestry.split("/")[-1])
+ if int(tenant_parent_id) == parent_id:
+ return parent_tenant, tenant
+
+ return parent_tenant, None
+ else:
+ if parent:
+ parent_tenant_res = self.client.collections.tenants.find_by(name=parent)
+ if not parent_tenant_res:
+ self.module.fail_json(msg="Parent tenant '%s' not found in manageiq" % parent)
+
+ if len(parent_tenant_res) > 1:
+ self.module.fail_json(msg="Multiple parent tenants not found in manageiq with name '%s" % parent)
+
+ parent_tenant = parent_tenant_res[0]
+ parent_id = int(parent_tenant['id'])
+ tenants = self.client.collections.tenants.find_by(name=name)
+
+ for tenant in tenants:
+ try:
+ ancestry = tenant['ancestry']
+ except AttributeError:
+ ancestry = None
+
+ if ancestry:
+ tenant_parent_id = int(ancestry.split("/")[-1])
+ if tenant_parent_id == parent_id:
+ return parent_tenant, tenant
+
+ return parent_tenant, None
+ else:
+ # No parent or parent id supplied we select the root tenant
+ return None, self.client.collections.tenants.find_by(ancestry=None)[0]
+
+ def compare_tenant(self, tenant, name, description):
+ """ Compare tenant fields with new field values.
+
+ Returns:
+ false if tenant fields have some difference from new fields, true o/w.
+ """
+ found_difference = (
+ (name and tenant['name'] != name) or
+ (description and tenant['description'] != description)
+ )
+
+ return not found_difference
+
+ def delete_tenant(self, tenant):
+ """ Deletes a tenant from manageiq.
+
+ Returns:
+ dict with `msg` and `changed`
+ """
+ try:
+ url = '%s/tenants/%s' % (self.api_url, tenant['id'])
+ result = self.client.post(url, action='delete')
+ except Exception as e:
+ self.module.fail_json(msg="failed to delete tenant %s: %s" % (tenant['name'], str(e)))
+
+ if result['success'] is False:
+ self.module.fail_json(msg=result['message'])
+
+ return dict(changed=True, msg=result['message'])
+
+ def edit_tenant(self, tenant, name, description):
+ """ Edit a manageiq tenant.
+
+ Returns:
+ dict with `msg` and `changed`
+ """
+ resource = dict(name=name, description=description, use_config_for_attributes=False)
+
+ # check if we need to update ( compare_tenant is true is no difference found )
+ if self.compare_tenant(tenant, name, description):
+ return dict(
+ changed=False,
+ msg="tenant %s is not changed." % tenant['name'],
+ tenant=tenant['_data'])
+
+ # try to update tenant
+ try:
+ result = self.client.post(tenant['href'], action='edit', resource=resource)
+ except Exception as e:
+ self.module.fail_json(msg="failed to update tenant %s: %s" % (tenant['name'], str(e)))
+
+ return dict(
+ changed=True,
+ msg="successfully updated the tenant with id %s" % (tenant['id']))
+
+ def create_tenant(self, name, description, parent_tenant):
+ """ Creates the tenant in manageiq.
+
+ Returns:
+ dict with `msg`, `changed` and `tenant_id`
+ """
+ parent_id = parent_tenant['id']
+ # check for required arguments
+ for key, value in dict(name=name, description=description, parent_id=parent_id).items():
+ if value in (None, ''):
+ self.module.fail_json(msg="missing required argument: %s" % key)
+
+ url = '%s/tenants' % self.api_url
+
+ resource = {'name': name, 'description': description, 'parent': {'id': parent_id}}
+
+ try:
+ result = self.client.post(url, action='create', resource=resource)
+ tenant_id = result['results'][0]['id']
+ except Exception as e:
+ self.module.fail_json(msg="failed to create tenant %s: %s" % (name, str(e)))
+
+ return dict(
+ changed=True,
+ msg="successfully created tenant '%s' with id '%s'" % (name, tenant_id),
+ tenant_id=tenant_id)
+
+ def tenant_quota(self, tenant, quota_key):
+ """ Search for tenant quota object by tenant and quota_key.
+ Returns:
+ the quota for the tenant, or None if the tenant quota was not found.
+ """
+
+ tenant_quotas = self.client.get("%s/quotas?expand=resources&filter[]=name=%s" % (tenant['href'], quota_key))
+
+ return tenant_quotas['resources']
+
+ def tenant_quotas(self, tenant):
+ """ Search for tenant quotas object by tenant.
+ Returns:
+ the quotas for the tenant, or None if no tenant quotas were not found.
+ """
+
+ tenant_quotas = self.client.get("%s/quotas?expand=resources" % (tenant['href']))
+
+ return tenant_quotas['resources']
+
+ def update_tenant_quotas(self, tenant, quotas):
+ """ Creates the tenant quotas in manageiq.
+
+ Returns:
+ dict with `msg` and `changed`
+ """
+
+ changed = False
+ messages = []
+ for quota_key, quota_value in quotas.items():
+ current_quota_filtered = self.tenant_quota(tenant, quota_key)
+ if current_quota_filtered:
+ current_quota = current_quota_filtered[0]
+ else:
+ current_quota = None
+
+ if quota_value:
+ # Change the byte values to GB
+ if quota_key in ['storage_allocated', 'mem_allocated']:
+ quota_value_int = int(quota_value) * 1024 * 1024 * 1024
+ else:
+ quota_value_int = int(quota_value)
+ if current_quota:
+ res = self.edit_tenant_quota(tenant, current_quota, quota_key, quota_value_int)
+ else:
+ res = self.create_tenant_quota(tenant, quota_key, quota_value_int)
+ else:
+ if current_quota:
+ res = self.delete_tenant_quota(tenant, current_quota)
+ else:
+ res = dict(changed=False, msg="tenant quota '%s' does not exist" % quota_key)
+
+ if res['changed']:
+ changed = True
+
+ messages.append(res['msg'])
+
+ return dict(
+ changed=changed,
+ msg=', '.join(messages))
+
+ def edit_tenant_quota(self, tenant, current_quota, quota_key, quota_value):
+ """ Update the tenant quotas in manageiq.
+
+ Returns:
+ result
+ """
+
+ if current_quota['value'] == quota_value:
+ return dict(
+ changed=False,
+ msg="tenant quota %s already has value %s" % (quota_key, quota_value))
+ else:
+
+ url = '%s/quotas/%s' % (tenant['href'], current_quota['id'])
+ resource = {'value': quota_value}
+ try:
+ self.client.post(url, action='edit', resource=resource)
+ except Exception as e:
+ self.module.fail_json(msg="failed to update tenant quota %s: %s" % (quota_key, str(e)))
+
+ return dict(
+ changed=True,
+ msg="successfully updated tenant quota %s" % quota_key)
+
+ def create_tenant_quota(self, tenant, quota_key, quota_value):
+ """ Creates the tenant quotas in manageiq.
+
+ Returns:
+ result
+ """
+ url = '%s/quotas' % (tenant['href'])
+ resource = {'name': quota_key, 'value': quota_value}
+ try:
+ self.client.post(url, action='create', resource=resource)
+ except Exception as e:
+ self.module.fail_json(msg="failed to create tenant quota %s: %s" % (quota_key, str(e)))
+
+ return dict(
+ changed=True,
+ msg="successfully created tenant quota %s" % quota_key)
+
+ def delete_tenant_quota(self, tenant, quota):
+ """ deletes the tenant quotas in manageiq.
+
+ Returns:
+ result
+ """
+ try:
+ result = self.client.post(quota['href'], action='delete')
+ except Exception as e:
+ self.module.fail_json(msg="failed to delete tenant quota '%s': %s" % (quota['name'], str(e)))
+
+ return dict(changed=True, msg=result['message'])
+
+ def create_tenant_response(self, tenant, parent_tenant):
+ """ Creates the ansible result object from a manageiq tenant entity
+
+ Returns:
+ a dict with the tenant id, name, description, parent id,
+ quota's
+ """
+ tenant_quotas = self.create_tenant_quotas_response(tenant['tenant_quotas'])
+
+ try:
+ ancestry = tenant['ancestry']
+ tenant_parent_id = ancestry.split("/")[-1]
+ except AttributeError:
+ # The root tenant does not return the ancestry attribute
+ tenant_parent_id = None
+
+ return dict(
+ id=tenant['id'],
+ name=tenant['name'],
+ description=tenant['description'],
+ parent_id=tenant_parent_id,
+ quotas=tenant_quotas
+ )
+
+ @staticmethod
+ def create_tenant_quotas_response(tenant_quotas):
+ """ Creates the ansible result object from a manageiq tenant_quotas entity
+
+ Returns:
+ a dict with the applied quotas, name and value
+ """
+
+ if not tenant_quotas:
+ return {}
+
+ result = {}
+ for quota in tenant_quotas:
+ if quota['unit'] == 'bytes':
+ value = float(quota['value']) / (1024 * 1024 * 1024)
+ else:
+ value = quota['value']
+ result[quota['name']] = value
+ return result
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True, type='str'),
+ description=dict(required=True, type='str'),
+ parent_id=dict(required=False, type='int'),
+ parent=dict(required=False, type='str'),
+ state=dict(choices=['absent', 'present'], default='present'),
+ quotas=dict(type='dict', default={})
+ )
+ # add the manageiq connection arguments to the arguments
+ argument_spec.update(manageiq_argument_spec())
+
+ module = AnsibleModule(
+ argument_spec=argument_spec
+ )
+
+ name = module.params['name']
+ description = module.params['description']
+ parent_id = module.params['parent_id']
+ parent = module.params['parent']
+ state = module.params['state']
+ quotas = module.params['quotas']
+
+ manageiq = ManageIQ(module)
+ manageiq_tenant = ManageIQTenant(manageiq)
+
+ parent_tenant, tenant = manageiq_tenant.tenant(name, parent_id, parent)
+
+ # tenant should not exist
+ if state == "absent":
+ # if we have a tenant, delete it
+ if tenant:
+ res_args = manageiq_tenant.delete_tenant(tenant)
+ # if we do not have a tenant, nothing to do
+ else:
+ if parent_id:
+ msg = "tenant '%s' with parent_id %i does not exist in manageiq" % (name, parent_id)
+ else:
+ msg = "tenant '%s' with parent '%s' does not exist in manageiq" % (name, parent)
+
+ res_args = dict(
+ changed=False,
+ msg=msg)
+
+ # tenant should exist
+ if state == "present":
+ # if we have a tenant, edit it
+ if tenant:
+ res_args = manageiq_tenant.edit_tenant(tenant, name, description)
+
+ # if we do not have a tenant, create it
+ else:
+ res_args = manageiq_tenant.create_tenant(name, description, parent_tenant)
+ tenant = manageiq.client.get_entity('tenants', res_args['tenant_id'])
+
+ # quotas as supplied and we have a tenant
+ if quotas:
+ tenant_quotas_res = manageiq_tenant.update_tenant_quotas(tenant, quotas)
+ if tenant_quotas_res['changed']:
+ res_args['changed'] = True
+ res_args['tenant_quotas_msg'] = tenant_quotas_res['msg']
+
+ tenant.reload(expand='resources', attributes=['tenant_quotas'])
+ res_args['tenant'] = manageiq_tenant.create_tenant_response(tenant, parent_tenant)
+
+ module.exit_json(**res_args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_user.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_user.py
new file mode 100644
index 00000000..8905dde2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/manageiq_user.py
@@ -0,0 +1,331 @@
+#!/usr/bin/python
+#
+# (c) 2017, Daniel Korn <korndaniel1@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: manageiq_user
+
+short_description: Management of users in ManageIQ.
+extends_documentation_fragment:
+- community.general.manageiq
+
+author: Daniel Korn (@dkorn)
+description:
+ - The manageiq_user module supports adding, updating and deleting users in ManageIQ.
+
+options:
+ state:
+ type: str
+ description:
+ - absent - user should not exist, present - user should be.
+ choices: ['absent', 'present']
+ default: 'present'
+ userid:
+ type: str
+ description:
+ - The unique userid in manageiq, often mentioned as username.
+ required: true
+ name:
+ type: str
+ description:
+ - The users' full name.
+ password:
+ type: str
+ description:
+ - The users' password.
+ group:
+ type: str
+ description:
+ - The name of the group to which the user belongs.
+ email:
+ type: str
+ description:
+ - The users' E-mail address.
+ update_password:
+ type: str
+ default: always
+ choices: ['always', 'on_create']
+ description:
+ - C(always) will update passwords unconditionally. C(on_create) will only set the password for a newly created user.
+'''
+
+EXAMPLES = '''
+- name: Create a new user in ManageIQ
+ community.general.manageiq_user:
+ userid: 'jdoe'
+ name: 'Jane Doe'
+ password: 'VerySecret'
+ group: 'EvmGroup-user'
+ email: 'jdoe@example.com'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Create a new user in ManageIQ using a token
+ community.general.manageiq_user:
+ userid: 'jdoe'
+ name: 'Jane Doe'
+ password: 'VerySecret'
+ group: 'EvmGroup-user'
+ email: 'jdoe@example.com'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ token: 'sometoken'
+ validate_certs: False
+
+- name: Delete a user in ManageIQ
+ community.general.manageiq_user:
+ state: 'absent'
+ userid: 'jdoe'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Delete a user in ManageIQ using a token
+ community.general.manageiq_user:
+ state: 'absent'
+ userid: 'jdoe'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ token: 'sometoken'
+ validate_certs: False
+
+- name: Update email of user in ManageIQ
+ community.general.manageiq_user:
+ userid: 'jdoe'
+ email: 'jaustine@example.com'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Update email of user in ManageIQ using a token
+ community.general.manageiq_user:
+ userid: 'jdoe'
+ email: 'jaustine@example.com'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ token: 'sometoken'
+ validate_certs: False
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec
+
+
+class ManageIQUser(object):
+ """
+ Object to execute user management operations in manageiq.
+ """
+
+ def __init__(self, manageiq):
+ self.manageiq = manageiq
+
+ self.module = self.manageiq.module
+ self.api_url = self.manageiq.api_url
+ self.client = self.manageiq.client
+
+ def group_id(self, description):
+ """ Search for group id by group description.
+
+ Returns:
+ the group id, or send a module Fail signal if group not found.
+ """
+ group = self.manageiq.find_collection_resource_by('groups', description=description)
+ if not group: # group doesn't exist
+ self.module.fail_json(
+ msg="group %s does not exist in manageiq" % (description))
+
+ return group['id']
+
+ def user(self, userid):
+ """ Search for user object by userid.
+
+ Returns:
+ the user, or None if user not found.
+ """
+ return self.manageiq.find_collection_resource_by('users', userid=userid)
+
+ def compare_user(self, user, name, group_id, password, email):
+ """ Compare user fields with new field values.
+
+ Returns:
+ false if user fields have some difference from new fields, true o/w.
+ """
+ found_difference = (
+ (name and user['name'] != name) or
+ (password is not None) or
+ (email and user['email'] != email) or
+ (group_id and user['current_group_id'] != group_id)
+ )
+
+ return not found_difference
+
+ def delete_user(self, user):
+ """ Deletes a user from manageiq.
+
+ Returns:
+ a short message describing the operation executed.
+ """
+ try:
+ url = '%s/users/%s' % (self.api_url, user['id'])
+ result = self.client.post(url, action='delete')
+ except Exception as e:
+ self.module.fail_json(msg="failed to delete user %s: %s" % (user['userid'], str(e)))
+
+ return dict(changed=True, msg=result['message'])
+
+ def edit_user(self, user, name, group, password, email):
+ """ Edit a user from manageiq.
+
+ Returns:
+ a short message describing the operation executed.
+ """
+ group_id = None
+ url = '%s/users/%s' % (self.api_url, user['id'])
+
+ resource = dict(userid=user['userid'])
+ if group is not None:
+ group_id = self.group_id(group)
+ resource['group'] = dict(id=group_id)
+ if name is not None:
+ resource['name'] = name
+ if email is not None:
+ resource['email'] = email
+
+ # if there is a password param, but 'update_password' is 'on_create'
+ # then discard the password (since we're editing an existing user)
+ if self.module.params['update_password'] == 'on_create':
+ password = None
+ if password is not None:
+ resource['password'] = password
+
+ # check if we need to update ( compare_user is true is no difference found )
+ if self.compare_user(user, name, group_id, password, email):
+ return dict(
+ changed=False,
+ msg="user %s is not changed." % (user['userid']))
+
+ # try to update user
+ try:
+ result = self.client.post(url, action='edit', resource=resource)
+ except Exception as e:
+ self.module.fail_json(msg="failed to update user %s: %s" % (user['userid'], str(e)))
+
+ return dict(
+ changed=True,
+ msg="successfully updated the user %s: %s" % (user['userid'], result))
+
+ def create_user(self, userid, name, group, password, email):
+ """ Creates the user in manageiq.
+
+ Returns:
+ the created user id, name, created_on timestamp,
+ updated_on timestamp, userid and current_group_id.
+ """
+ # check for required arguments
+ for key, value in dict(name=name, group=group, password=password).items():
+ if value in (None, ''):
+ self.module.fail_json(msg="missing required argument: %s" % (key))
+
+ group_id = self.group_id(group)
+ url = '%s/users' % (self.api_url)
+
+ resource = {'userid': userid, 'name': name, 'password': password, 'group': {'id': group_id}}
+ if email is not None:
+ resource['email'] = email
+
+ # try to create a new user
+ try:
+ result = self.client.post(url, action='create', resource=resource)
+ except Exception as e:
+ self.module.fail_json(msg="failed to create user %s: %s" % (userid, str(e)))
+
+ return dict(
+ changed=True,
+ msg="successfully created the user %s: %s" % (userid, result['results']))
+
+
+def main():
+ argument_spec = dict(
+ userid=dict(required=True, type='str'),
+ name=dict(),
+ password=dict(no_log=True),
+ group=dict(),
+ email=dict(),
+ state=dict(choices=['absent', 'present'], default='present'),
+ update_password=dict(choices=['always', 'on_create'],
+ default='always'),
+ )
+ # add the manageiq connection arguments to the arguments
+ argument_spec.update(manageiq_argument_spec())
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ )
+
+ userid = module.params['userid']
+ name = module.params['name']
+ password = module.params['password']
+ group = module.params['group']
+ email = module.params['email']
+ state = module.params['state']
+
+ manageiq = ManageIQ(module)
+ manageiq_user = ManageIQUser(manageiq)
+
+ user = manageiq_user.user(userid)
+
+ # user should not exist
+ if state == "absent":
+ # if we have a user, delete it
+ if user:
+ res_args = manageiq_user.delete_user(user)
+ # if we do not have a user, nothing to do
+ else:
+ res_args = dict(
+ changed=False,
+ msg="user %s: does not exist in manageiq" % (userid))
+
+ # user should exist
+ if state == "present":
+ # if we have a user, edit it
+ if user:
+ res_args = manageiq_user.edit_user(user, name, group, password, email)
+ # if we do not have a user, create it
+ else:
+ res_args = manageiq_user.create_user(userid, name, group, password, email)
+
+ module.exit_json(**res_args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/mas.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/mas.py
new file mode 100644
index 00000000..bc3e6dfd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/mas.py
@@ -0,0 +1,295 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Lukas Bestle <project-ansible@lukasbestle.com>
+# Copyright: (c) 2017, Michael Heap <m@michaelheap.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: mas
+short_description: Manage Mac App Store applications with mas-cli
+description:
+ - Installs, uninstalls and updates macOS applications from the Mac App Store using the C(mas-cli).
+version_added: '0.2.0'
+author:
+ - Michael Heap (@mheap)
+ - Lukas Bestle (@lukasbestle)
+options:
+ id:
+ description:
+ - The Mac App Store identifier of the app(s) you want to manage.
+ - This can be found by running C(mas search APP_NAME) on your machine.
+ type: list
+ elements: int
+ state:
+ description:
+ - Desired state of the app installation.
+ - The C(absent) value requires root permissions, also see the examples.
+ type: str
+ choices:
+ - absent
+ - latest
+ - present
+ default: present
+ upgrade_all:
+ description:
+ - Upgrade all installed Mac App Store apps.
+ type: bool
+ default: "no"
+ aliases: ["upgrade"]
+requirements:
+ - macOS 10.11+
+ - "mas-cli (U(https://github.com/mas-cli/mas)) 1.5.0+ available as C(mas) in the bin path"
+ - The Apple ID to use already needs to be signed in to the Mac App Store (check with C(mas account)).
+notes:
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = '''
+- name: Install Keynote
+ community.general.mas:
+ id: 409183694
+ state: present
+
+- name: Install Divvy with command mas installed in /usr/local/bin
+ community.general.mas:
+ id: 413857545
+ state: present
+ environment:
+ PATH: /usr/local/bin:{{ ansible_facts.env.PATH }}
+
+- name: Install a list of apps
+ community.general.mas:
+ id:
+ - 409183694 # Keynote
+ - 413857545 # Divvy
+ state: present
+
+- name: Ensure the latest Keynote version is installed
+ community.general.mas:
+ id: 409183694
+ state: latest
+
+- name: Upgrade all installed Mac App Store apps
+ community.general.mas:
+ upgrade_all: yes
+
+- name: Install specific apps and also upgrade all others
+ community.general.mas:
+ id:
+ - 409183694 # Keynote
+ - 413857545 # Divvy
+ state: present
+ upgrade_all: yes
+
+- name: Uninstall Divvy
+ community.general.mas:
+ id: 413857545
+ state: absent
+ become: yes # Uninstallation requires root permissions
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from distutils.version import StrictVersion
+import os
+
+
+class Mas(object):
+
+ def __init__(self, module):
+ self.module = module
+
+ # Initialize data properties
+ self.mas_path = self.module.get_bin_path('mas')
+ self._checked_signin = False
+ self._installed = None # Populated only if needed
+ self._outdated = None # Populated only if needed
+ self.count_install = 0
+ self.count_upgrade = 0
+ self.count_uninstall = 0
+ self.result = {
+ 'changed': False
+ }
+
+ self.check_mas_tool()
+
+ def app_command(self, command, id):
+ ''' Runs a `mas` command on a given app; command can be 'install', 'upgrade' or 'uninstall' '''
+
+ if not self.module.check_mode:
+ if command != 'uninstall':
+ self.check_signin()
+
+ rc, out, err = self.run([command, str(id)])
+ if rc != 0:
+ self.module.fail_json(
+ msg="Error running command '{0}' on app '{1}': {2}".format(command, str(id), out.rstrip())
+ )
+
+ # No error or dry run
+ self.__dict__['count_' + command] += 1
+
+ def check_mas_tool(self):
+ ''' Verifies that the `mas` tool is available in a recent version '''
+
+ # Is the `mas` tool available at all?
+ if not self.mas_path:
+ self.module.fail_json(msg='Required `mas` tool is not installed')
+
+ # Is the version recent enough?
+ rc, out, err = self.run(['version'])
+ if rc != 0 or not out.strip() or StrictVersion(out.strip()) < StrictVersion('1.5.0'):
+ self.module.fail_json(msg='`mas` tool in version 1.5.0+ needed, got ' + out.strip())
+
+ def check_signin(self):
+ ''' Verifies that the user is signed in to the Mac App Store '''
+
+ # Only check this once per execution
+ if self._checked_signin:
+ return
+
+ rc, out, err = self.run(['account'])
+ if out.split("\n", 1)[0].rstrip() == 'Not signed in':
+ self.module.fail_json(msg='You must be signed in to the Mac App Store')
+
+ self._checked_signin = True
+
+ def exit(self):
+ ''' Exit with the data we have collected over time '''
+
+ msgs = []
+ if self.count_install > 0:
+ msgs.append('Installed {0} app(s)'.format(self.count_install))
+ if self.count_upgrade > 0:
+ msgs.append('Upgraded {0} app(s)'.format(self.count_upgrade))
+ if self.count_uninstall > 0:
+ msgs.append('Uninstalled {0} app(s)'.format(self.count_uninstall))
+
+ if msgs:
+ self.result['changed'] = True
+ self.result['msg'] = ', '.join(msgs)
+
+ self.module.exit_json(**self.result)
+
+ def get_current_state(self, command):
+ ''' Returns the list of all app IDs; command can either be 'list' or 'outdated' '''
+
+ rc, raw_apps, err = self.run([command])
+ rows = raw_apps.split("\n")
+ if rows[0] == "No installed apps found":
+ rows = []
+ apps = []
+ for r in rows:
+ # Format: "123456789 App Name"
+ r = r.split(' ', 1)
+ if len(r) == 2:
+ apps.append(int(r[0]))
+
+ return apps
+
+ def installed(self):
+ ''' Returns the list of installed apps '''
+
+ # Populate cache if not already done
+ if self._installed is None:
+ self._installed = self.get_current_state('list')
+
+ return self._installed
+
+ def is_installed(self, id):
+ ''' Checks whether the given app is installed '''
+
+ return int(id) in self.installed()
+
+ def is_outdated(self, id):
+ ''' Checks whether the given app is installed, but outdated '''
+
+ return int(id) in self.outdated()
+
+ def outdated(self):
+ ''' Returns the list of installed, but outdated apps '''
+
+ # Populate cache if not already done
+ if self._outdated is None:
+ self._outdated = self.get_current_state('outdated')
+
+ return self._outdated
+
+ def run(self, cmd):
+ ''' Runs a command of the `mas` tool '''
+
+ cmd.insert(0, self.mas_path)
+ return self.module.run_command(cmd, False)
+
+ def upgrade_all(self):
+ ''' Upgrades all installed apps and sets the correct result data '''
+
+ outdated = self.outdated()
+
+ if not self.module.check_mode:
+ self.check_signin()
+
+ rc, out, err = self.run(['upgrade'])
+ if rc != 0:
+ self.module.fail_json(msg='Could not upgrade all apps: ' + out.rstrip())
+
+ self.count_upgrade += len(outdated)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ id=dict(type='list', elements='int'),
+ state=dict(type='str', default='present', choices=['absent', 'latest', 'present']),
+ upgrade_all=dict(type='bool', default=False, aliases=['upgrade']),
+ ),
+ supports_check_mode=True
+ )
+ mas = Mas(module)
+
+ if module.params['id']:
+ apps = module.params['id']
+ else:
+ apps = []
+
+ state = module.params['state']
+ upgrade = module.params['upgrade_all']
+
+ # Run operations on the given app IDs
+ for app in sorted(set(apps)):
+ if state == 'present':
+ if not mas.is_installed(app):
+ mas.app_command('install', app)
+
+ elif state == 'absent':
+ if mas.is_installed(app):
+ # Ensure we are root
+ if os.getuid() != 0:
+ module.fail_json(msg="Uninstalling apps requires root permissions ('become: yes')")
+
+ mas.app_command('uninstall', app)
+
+ elif state == 'latest':
+ if not mas.is_installed(app):
+ mas.app_command('install', app)
+ elif mas.is_outdated(app):
+ mas.app_command('upgrade', app)
+
+ # Upgrade all apps if requested
+ mas._outdated = None # Clear cache
+ if upgrade and mas.outdated():
+ mas.upgrade_all()
+
+ # Exit with the collected data
+ mas.exit()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/matrix.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/matrix.py
new file mode 100644
index 00000000..d94ed2b8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/matrix.py
@@ -0,0 +1,139 @@
+#!/usr/bin/python
+# coding: utf-8
+
+# (c) 2018, Jan Christian Grünhage <jan.christian@gruenhage.xyz>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+author: "Jan Christian Grünhage (@jcgruenhage)"
+module: matrix
+short_description: Send notifications to matrix
+description:
+ - This module sends html formatted notifications to matrix rooms.
+options:
+ msg_plain:
+ type: str
+ description:
+ - Plain text form of the message to send to matrix, usually markdown
+ required: true
+ msg_html:
+ type: str
+ description:
+ - HTML form of the message to send to matrix
+ required: true
+ room_id:
+ type: str
+ description:
+ - ID of the room to send the notification to
+ required: true
+ hs_url:
+ type: str
+ description:
+ - URL of the homeserver, where the CS-API is reachable
+ required: true
+ token:
+ type: str
+ description:
+ - Authentication token for the API call. If provided, user_id and password are not required
+ user_id:
+ type: str
+ description:
+ - The user id of the user
+ password:
+ type: str
+ description:
+ - The password to log in with
+requirements:
+ - matrix-client (Python library)
+'''
+
+EXAMPLES = '''
+- name: Send matrix notification with token
+ community.general.matrix:
+ msg_plain: "**hello world**"
+ msg_html: "<b>hello world</b>"
+ room_id: "!12345678:server.tld"
+ hs_url: "https://matrix.org"
+ token: "{{ matrix_auth_token }}"
+
+- name: Send matrix notification with user_id and password
+ community.general.matrix:
+ msg_plain: "**hello world**"
+ msg_html: "<b>hello world</b>"
+ room_id: "!12345678:server.tld"
+ hs_url: "https://matrix.org"
+ user_id: "ansible_notification_bot"
+ password: "{{ matrix_auth_password }}"
+'''
+
+RETURN = '''
+'''
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+MATRIX_IMP_ERR = None
+try:
+ from matrix_client.client import MatrixClient
+except ImportError:
+ MATRIX_IMP_ERR = traceback.format_exc()
+ matrix_found = False
+else:
+ matrix_found = True
+
+
+def run_module():
+ module_args = dict(
+ msg_plain=dict(type='str', required=True),
+ msg_html=dict(type='str', required=True),
+ room_id=dict(type='str', required=True),
+ hs_url=dict(type='str', required=True),
+ token=dict(type='str', required=False, no_log=True),
+ user_id=dict(type='str', required=False),
+ password=dict(type='str', required=False, no_log=True),
+ )
+
+ result = dict(
+ changed=False,
+ message=''
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ mutually_exclusive=[['password', 'token']],
+ required_one_of=[['password', 'token']],
+ required_together=[['user_id', 'password']],
+ supports_check_mode=True
+ )
+
+ if not matrix_found:
+ module.fail_json(msg=missing_required_lib('matrix-client'), exception=MATRIX_IMP_ERR)
+
+ if module.check_mode:
+ return result
+
+ # create a client object
+ client = MatrixClient(module.params['hs_url'])
+ if module.params['token'] is not None:
+ client.api.token = module.params['token']
+ else:
+ client.login(module.params['user_id'], module.params['password'], sync=False)
+
+ # make sure we are in a given room and return a room object for it
+ room = client.join_room(module.params['room_id'])
+ # send an html formatted messages
+ room.send_html(module.params['msg_html'], module.params['msg_plain'])
+
+ module.exit_json(**result)
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/mattermost.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/mattermost.py
new file mode 100644
index 00000000..579cfa5b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/mattermost.py
@@ -0,0 +1,151 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Benjamin Jolivot <bjolivot@gmail.com>
+# Inspired by slack module :
+# # (c) 2017, Steve Pletcher <steve@steve-pletcher.com>
+# # (c) 2016, René Moser <mail@renemoser.net>
+# # (c) 2015, Stefan Berggren <nsg@nsg.cc>
+# # (c) 2014, Ramon de la Fuente <ramon@delafuente.nl>)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: mattermost
+short_description: Send Mattermost notifications
+description:
+ - Sends notifications to U(http://your.mattermost.url) via the Incoming WebHook integration.
+author: "Benjamin Jolivot (@bjolivot)"
+options:
+ url:
+ type: str
+ description:
+ - Mattermost url (i.e. http://mattermost.yourcompany.com).
+ required: true
+ api_key:
+ type: str
+ description:
+ - Mattermost webhook api key. Log into your mattermost site, go to
+ Menu -> Integration -> Incoming Webhook -> Add Incoming Webhook.
+ This will give you full URL. api_key is the last part.
+ http://mattermost.example.com/hooks/C(API_KEY)
+ required: true
+ text:
+ type: str
+ description:
+ - Text to send. Note that the module does not handle escaping characters.
+ required: true
+ channel:
+ type: str
+ description:
+ - Channel to send the message to. If absent, the message goes to the channel selected for the I(api_key).
+ username:
+ type: str
+ description:
+ - This is the sender of the message (Username Override need to be enabled by mattermost admin, see mattermost doc.
+ default: Ansible
+ icon_url:
+ type: str
+ description:
+ - Url for the message sender's icon.
+ default: https://www.ansible.com/favicon.ico
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ default: yes
+ type: bool
+'''
+
+EXAMPLES = """
+- name: Send notification message via Mattermost
+ community.general.mattermost:
+ url: http://mattermost.example.com
+ api_key: my_api_key
+ text: '{{ inventory_hostname }} completed'
+
+- name: Send notification message via Mattermost all options
+ community.general.mattermost:
+ url: http://mattermost.example.com
+ api_key: my_api_key
+ text: '{{ inventory_hostname }} completed'
+ channel: notifications
+ username: 'Ansible on {{ inventory_hostname }}'
+ icon_url: http://www.example.com/some-image-file.png
+"""
+
+RETURN = '''
+payload:
+ description: Mattermost payload
+ returned: success
+ type: str
+webhook_url:
+ description: URL the webhook is sent to
+ returned: success
+ type: str
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def main():
+ module = AnsibleModule(
+ supports_check_mode=True,
+ argument_spec=dict(
+ url=dict(type='str', required=True),
+ api_key=dict(type='str', required=True, no_log=True),
+ text=dict(type='str', required=True),
+ channel=dict(type='str', default=None),
+ username=dict(type='str', default='Ansible'),
+ icon_url=dict(type='str', default='https://www.ansible.com/favicon.ico'),
+ validate_certs=dict(default=True, type='bool'),
+ )
+ )
+ # init return dict
+ result = dict(changed=False, msg="OK")
+
+ # define webhook
+ webhook_url = "{0}/hooks/{1}".format(module.params['url'], module.params['api_key'])
+ result['webhook_url'] = webhook_url
+
+ # define payload
+ payload = {}
+ for param in ['text', 'channel', 'username', 'icon_url']:
+ if module.params[param] is not None:
+ payload[param] = module.params[param]
+
+ payload = module.jsonify(payload)
+ result['payload'] = payload
+
+ # http headers
+ headers = {
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/json',
+ }
+
+ # notes:
+ # Nothing is done in check mode
+ # it'll pass even if your server is down or/and if your token is invalid.
+ # If someone find good way to check...
+
+ # send request if not in test mode
+ if module.check_mode is False:
+ response, info = fetch_url(module=module, url=webhook_url, headers=headers, method='POST', data=payload)
+
+ # something's wrong
+ if info['status'] != 200:
+ # some problem
+ result['msg'] = "Failed to send mattermost message, the error was: {0}".format(info['msg'])
+ module.fail_json(**result)
+
+ # Looks good
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/maven_artifact.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/maven_artifact.py
new file mode 100644
index 00000000..03c3d4d4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/maven_artifact.py
@@ -0,0 +1,712 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2014, Chris Schmidt <chris.schmidt () contrastsecurity.com>
+#
+# Built using https://github.com/hamnis/useful-scripts/blob/master/python/download-maven-artifact
+# as a reference and starting point.
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: maven_artifact
+short_description: Downloads an Artifact from a Maven Repository
+description:
+ - Downloads an artifact from a maven repository given the maven coordinates provided to the module.
+ - Can retrieve snapshots or release versions of the artifact and will resolve the latest available
+ version if one is not available.
+author: "Chris Schmidt (@chrisisbeef)"
+requirements:
+ - lxml
+ - boto if using a S3 repository (s3://...)
+options:
+ group_id:
+ type: str
+ description:
+ - The Maven groupId coordinate
+ required: true
+ artifact_id:
+ type: str
+ description:
+ - The maven artifactId coordinate
+ required: true
+ version:
+ type: str
+ description:
+ - The maven version coordinate
+ - Mutually exclusive with I(version_by_spec).
+ version_by_spec:
+ type: str
+ description:
+ - The maven dependency version ranges.
+ - See supported version ranges on U(https://cwiki.apache.org/confluence/display/MAVENOLD/Dependency+Mediation+and+Conflict+Resolution)
+ - The range type "(,1.0],[1.2,)" and "(,1.1),(1.1,)" is not supported.
+ - Mutually exclusive with I(version).
+ version_added: '0.2.0'
+ classifier:
+ type: str
+ description:
+ - The maven classifier coordinate
+ extension:
+ type: str
+ description:
+ - The maven type/extension coordinate
+ default: jar
+ repository_url:
+ type: str
+ description:
+ - The URL of the Maven Repository to download from.
+ - Use s3://... if the repository is hosted on Amazon S3, added in version 2.2.
+ - Use file://... if the repository is local, added in version 2.6
+ default: https://repo1.maven.org/maven2
+ username:
+ type: str
+ description:
+ - The username to authenticate as to the Maven Repository. Use AWS secret key of the repository is hosted on S3
+ aliases: [ "aws_secret_key" ]
+ password:
+ type: str
+ description:
+ - The password to authenticate with to the Maven Repository. Use AWS secret access key of the repository is hosted on S3
+ aliases: [ "aws_secret_access_key" ]
+ headers:
+ description:
+ - Add custom HTTP headers to a request in hash/dict format.
+ type: dict
+ force_basic_auth:
+ description:
+ - httplib2, the library used by the uri module only sends authentication information when a webservice
+ responds to an initial request with a 401 status. Since some basic auth services do not properly
+ send a 401, logins will fail. This option forces the sending of the Basic authentication header
+ upon initial request.
+ default: 'no'
+ type: bool
+ version_added: '0.2.0'
+ dest:
+ type: path
+ description:
+ - The path where the artifact should be written to
+ - If file mode or ownerships are specified and destination path already exists, they affect the downloaded file
+ required: true
+ state:
+ type: str
+ description:
+ - The desired state of the artifact
+ default: present
+ choices: [present,absent]
+ timeout:
+ type: int
+ description:
+ - Specifies a timeout in seconds for the connection attempt
+ default: 10
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be set to C(no) when no other option exists.
+ type: bool
+ default: 'yes'
+ client_cert:
+ description:
+ - PEM formatted certificate chain file to be used for SSL client authentication.
+ - This file can also include the key as well, and if the key is included, I(client_key) is not required.
+ type: path
+ version_added: '1.3.0'
+ client_key:
+ description:
+ - PEM formatted file that contains your private key to be used for SSL client authentication.
+ - If I(client_cert) contains both the certificate and key, this option is not required.
+ type: path
+ version_added: '1.3.0'
+ keep_name:
+ description:
+ - If C(yes), the downloaded artifact's name is preserved, i.e the version number remains part of it.
+ - This option only has effect when C(dest) is a directory and C(version) is set to C(latest) or C(version_by_spec)
+ is defined.
+ type: bool
+ default: 'no'
+ verify_checksum:
+ type: str
+ description:
+ - If C(never), the md5 checksum will never be downloaded and verified.
+ - If C(download), the md5 checksum will be downloaded and verified only after artifact download. This is the default.
+ - If C(change), the md5 checksum will be downloaded and verified if the destination already exist,
+ to verify if they are identical. This was the behaviour before 2.6. Since it downloads the md5 before (maybe)
+ downloading the artifact, and since some repository software, when acting as a proxy/cache, return a 404 error
+ if the artifact has not been cached yet, it may fail unexpectedly.
+ If you still need it, you should consider using C(always) instead - if you deal with a checksum, it is better to
+ use it to verify integrity after download.
+ - C(always) combines C(download) and C(change).
+ required: false
+ default: 'download'
+ choices: ['never', 'download', 'change', 'always']
+extends_documentation_fragment:
+ - files
+'''
+
+EXAMPLES = '''
+- name: Download the latest version of the JUnit framework artifact from Maven Central
+ community.general.maven_artifact:
+ group_id: junit
+ artifact_id: junit
+ dest: /tmp/junit-latest.jar
+
+- name: Download JUnit 4.11 from Maven Central
+ community.general.maven_artifact:
+ group_id: junit
+ artifact_id: junit
+ version: 4.11
+ dest: /tmp/junit-4.11.jar
+
+- name: Download an artifact from a private repository requiring authentication
+ community.general.maven_artifact:
+ group_id: com.company
+ artifact_id: library-name
+ repository_url: 'https://repo.company.com/maven'
+ username: user
+ password: pass
+ dest: /tmp/library-name-latest.jar
+
+- name: Download an artifact from a private repository requiring certificate authentication
+ community.general.maven_artifact:
+ group_id: com.company
+ artifact_id: library-name
+ repository_url: 'https://repo.company.com/maven'
+ client_cert: /path/to/cert.pem
+ client_key: /path/to/key.pem
+ dest: /tmp/library-name-latest.jar
+
+- name: Download a WAR File to the Tomcat webapps directory to be deployed
+ community.general.maven_artifact:
+ group_id: com.company
+ artifact_id: web-app
+ extension: war
+ repository_url: 'https://repo.company.com/maven'
+ dest: /var/lib/tomcat7/webapps/web-app.war
+
+- name: Keep a downloaded artifact's name, i.e. retain the version
+ community.general.maven_artifact:
+ version: latest
+ artifact_id: spring-core
+ group_id: org.springframework
+ dest: /tmp/
+ keep_name: yes
+
+- name: Download the latest version of the JUnit framework artifact from Maven local
+ community.general.maven_artifact:
+ group_id: junit
+ artifact_id: junit
+ dest: /tmp/junit-latest.jar
+ repository_url: "file://{{ lookup('env','HOME') }}/.m2/repository"
+
+- name: Download the latest version between 3.8 and 4.0 (exclusive) of the JUnit framework artifact from Maven Central
+ community.general.maven_artifact:
+ group_id: junit
+ artifact_id: junit
+ version_by_spec: "[3.8,4.0)"
+ dest: /tmp/
+'''
+
+import hashlib
+import os
+import posixpath
+import shutil
+import io
+import tempfile
+import traceback
+import re
+
+from ansible.module_utils.ansible_release import __version__ as ansible_version
+from re import match
+
+LXML_ETREE_IMP_ERR = None
+try:
+ from lxml import etree
+ HAS_LXML_ETREE = True
+except ImportError:
+ LXML_ETREE_IMP_ERR = traceback.format_exc()
+ HAS_LXML_ETREE = False
+
+BOTO_IMP_ERR = None
+try:
+ import boto3
+ HAS_BOTO = True
+except ImportError:
+ BOTO_IMP_ERR = traceback.format_exc()
+ HAS_BOTO = False
+
+SEMANTIC_VERSION_IMP_ERR = None
+try:
+ from semantic_version import Version, Spec
+ HAS_SEMANTIC_VERSION = True
+except ImportError:
+ SEMANTIC_VERSION_IMP_ERR = traceback.format_exc()
+ HAS_SEMANTIC_VERSION = False
+
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils._text import to_bytes, to_native, to_text
+
+
+def split_pre_existing_dir(dirname):
+ '''
+ Return the first pre-existing directory and a list of the new directories that will be created.
+ '''
+ head, tail = os.path.split(dirname)
+ b_head = to_bytes(head, errors='surrogate_or_strict')
+ if not os.path.exists(b_head):
+ if head == dirname:
+ return None, [head]
+ else:
+ (pre_existing_dir, new_directory_list) = split_pre_existing_dir(head)
+ else:
+ return head, [tail]
+ new_directory_list.append(tail)
+ return pre_existing_dir, new_directory_list
+
+
+def adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed):
+ '''
+ Walk the new directories list and make sure that permissions are as we would expect
+ '''
+ if new_directory_list:
+ first_sub_dir = new_directory_list.pop(0)
+ if not pre_existing_dir:
+ working_dir = first_sub_dir
+ else:
+ working_dir = os.path.join(pre_existing_dir, first_sub_dir)
+ directory_args['path'] = working_dir
+ changed = module.set_fs_attributes_if_different(directory_args, changed)
+ changed = adjust_recursive_directory_permissions(working_dir, new_directory_list, module, directory_args, changed)
+ return changed
+
+
+class Artifact(object):
+ def __init__(self, group_id, artifact_id, version, version_by_spec, classifier='', extension='jar'):
+ if not group_id:
+ raise ValueError("group_id must be set")
+ if not artifact_id:
+ raise ValueError("artifact_id must be set")
+
+ self.group_id = group_id
+ self.artifact_id = artifact_id
+ self.version = version
+ self.version_by_spec = version_by_spec
+ self.classifier = classifier
+
+ if not extension:
+ self.extension = "jar"
+ else:
+ self.extension = extension
+
+ def is_snapshot(self):
+ return self.version and self.version.endswith("SNAPSHOT")
+
+ def path(self, with_version=True):
+ base = posixpath.join(self.group_id.replace(".", "/"), self.artifact_id)
+ if with_version and self.version:
+ timestamp_version_match = re.match("^(.*-)?([0-9]{8}\\.[0-9]{6}-[0-9]+)$", self.version)
+ if timestamp_version_match:
+ base = posixpath.join(base, timestamp_version_match.group(1) + "SNAPSHOT")
+ else:
+ base = posixpath.join(base, self.version)
+ return base
+
+ def _generate_filename(self):
+ filename = self.artifact_id + "-" + self.classifier + "." + self.extension
+ if not self.classifier:
+ filename = self.artifact_id + "." + self.extension
+ return filename
+
+ def get_filename(self, filename=None):
+ if not filename:
+ filename = self._generate_filename()
+ elif os.path.isdir(filename):
+ filename = os.path.join(filename, self._generate_filename())
+ return filename
+
+ def __str__(self):
+ result = "%s:%s:%s" % (self.group_id, self.artifact_id, self.version)
+ if self.classifier:
+ result = "%s:%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.classifier, self.version)
+ elif self.extension != "jar":
+ result = "%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.version)
+ return result
+
+ @staticmethod
+ def parse(input):
+ parts = input.split(":")
+ if len(parts) >= 3:
+ g = parts[0]
+ a = parts[1]
+ v = parts[len(parts) - 1]
+ t = None
+ c = None
+ if len(parts) == 4:
+ t = parts[2]
+ if len(parts) == 5:
+ t = parts[2]
+ c = parts[3]
+ return Artifact(g, a, v, c, t)
+ else:
+ return None
+
+
+class MavenDownloader:
+ def __init__(self, module, base, local=False, headers=None):
+ self.module = module
+ if base.endswith("/"):
+ base = base.rstrip("/")
+ self.base = base
+ self.local = local
+ self.headers = headers
+ self.user_agent = "Ansible {0} maven_artifact".format(ansible_version)
+ self.latest_version_found = None
+ self.metadata_file_name = "maven-metadata-local.xml" if local else "maven-metadata.xml"
+
+ def find_version_by_spec(self, artifact):
+ path = "/%s/%s" % (artifact.path(False), self.metadata_file_name)
+ content = self._getContent(self.base + path, "Failed to retrieve the maven metadata file: " + path)
+ xml = etree.fromstring(content)
+ original_versions = xml.xpath("/metadata/versioning/versions/version/text()")
+ versions = []
+ for version in original_versions:
+ try:
+ versions.append(Version.coerce(version))
+ except ValueError:
+ # This means that version string is not a valid semantic versioning
+ pass
+
+ parse_versions_syntax = {
+ # example -> (,1.0]
+ r"^\(,(?P<upper_bound>[0-9.]*)]$": "<={upper_bound}",
+ # example -> 1.0
+ r"^(?P<version>[0-9.]*)$": "~={version}",
+ # example -> [1.0]
+ r"^\[(?P<version>[0-9.]*)\]$": "=={version}",
+ # example -> [1.2, 1.3]
+ r"^\[(?P<lower_bound>[0-9.]*),\s*(?P<upper_bound>[0-9.]*)\]$": ">={lower_bound},<={upper_bound}",
+ # example -> [1.2, 1.3)
+ r"^\[(?P<lower_bound>[0-9.]*),\s*(?P<upper_bound>[0-9.]+)\)$": ">={lower_bound},<{upper_bound}",
+ # example -> [1.5,)
+ r"^\[(?P<lower_bound>[0-9.]*),\)$": ">={lower_bound}",
+ }
+
+ for regex, spec_format in parse_versions_syntax.items():
+ regex_result = match(regex, artifact.version_by_spec)
+ if regex_result:
+ spec = Spec(spec_format.format(**regex_result.groupdict()))
+ selected_version = spec.select(versions)
+
+ if not selected_version:
+ raise ValueError("No version found with this spec version: {0}".format(artifact.version_by_spec))
+
+ # To deal when repos on maven don't have patch number on first build (e.g. 3.8 instead of 3.8.0)
+ if str(selected_version) not in original_versions:
+ selected_version.patch = None
+
+ return str(selected_version)
+
+ raise ValueError("The spec version {0} is not supported! ".format(artifact.version_by_spec))
+
+ def find_latest_version_available(self, artifact):
+ if self.latest_version_found:
+ return self.latest_version_found
+ path = "/%s/%s" % (artifact.path(False), self.metadata_file_name)
+ content = self._getContent(self.base + path, "Failed to retrieve the maven metadata file: " + path)
+ xml = etree.fromstring(content)
+ v = xml.xpath("/metadata/versioning/versions/version[last()]/text()")
+ if v:
+ self.latest_version_found = v[0]
+ return v[0]
+
+ def find_uri_for_artifact(self, artifact):
+ if artifact.version_by_spec:
+ artifact.version = self.find_version_by_spec(artifact)
+
+ if artifact.version == "latest":
+ artifact.version = self.find_latest_version_available(artifact)
+
+ if artifact.is_snapshot():
+ if self.local:
+ return self._uri_for_artifact(artifact, artifact.version)
+ path = "/%s/%s" % (artifact.path(), self.metadata_file_name)
+ content = self._getContent(self.base + path, "Failed to retrieve the maven metadata file: " + path)
+ xml = etree.fromstring(content)
+
+ for snapshotArtifact in xml.xpath("/metadata/versioning/snapshotVersions/snapshotVersion"):
+ classifier = snapshotArtifact.xpath("classifier/text()")
+ artifact_classifier = classifier[0] if classifier else ''
+ extension = snapshotArtifact.xpath("extension/text()")
+ artifact_extension = extension[0] if extension else ''
+ if artifact_classifier == artifact.classifier and artifact_extension == artifact.extension:
+ return self._uri_for_artifact(artifact, snapshotArtifact.xpath("value/text()")[0])
+ timestamp_xmlpath = xml.xpath("/metadata/versioning/snapshot/timestamp/text()")
+ if timestamp_xmlpath:
+ timestamp = timestamp_xmlpath[0]
+ build_number = xml.xpath("/metadata/versioning/snapshot/buildNumber/text()")[0]
+ return self._uri_for_artifact(artifact, artifact.version.replace("SNAPSHOT", timestamp + "-" + build_number))
+
+ return self._uri_for_artifact(artifact, artifact.version)
+
+ def _uri_for_artifact(self, artifact, version=None):
+ if artifact.is_snapshot() and not version:
+ raise ValueError("Expected uniqueversion for snapshot artifact " + str(artifact))
+ elif not artifact.is_snapshot():
+ version = artifact.version
+ if artifact.classifier:
+ return posixpath.join(self.base, artifact.path(), artifact.artifact_id + "-" + version + "-" + artifact.classifier + "." + artifact.extension)
+
+ return posixpath.join(self.base, artifact.path(), artifact.artifact_id + "-" + version + "." + artifact.extension)
+
+ # for small files, directly get the full content
+ def _getContent(self, url, failmsg, force=True):
+ if self.local:
+ parsed_url = urlparse(url)
+ if os.path.isfile(parsed_url.path):
+ with io.open(parsed_url.path, 'rb') as f:
+ return f.read()
+ if force:
+ raise ValueError(failmsg + " because can not find file: " + url)
+ return None
+ response = self._request(url, failmsg, force)
+ if response:
+ return response.read()
+ return None
+
+ # only for HTTP request
+ def _request(self, url, failmsg, force=True):
+ url_to_use = url
+ parsed_url = urlparse(url)
+
+ if parsed_url.scheme == 's3':
+ parsed_url = urlparse(url)
+ bucket_name = parsed_url.netloc
+ key_name = parsed_url.path[1:]
+ client = boto3.client('s3', aws_access_key_id=self.module.params.get('username', ''), aws_secret_access_key=self.module.params.get('password', ''))
+ url_to_use = client.generate_presigned_url('get_object', Params={'Bucket': bucket_name, 'Key': key_name}, ExpiresIn=10)
+
+ req_timeout = self.module.params.get('timeout')
+
+ # Hack to add parameters in the way that fetch_url expects
+ self.module.params['url_username'] = self.module.params.get('username', '')
+ self.module.params['url_password'] = self.module.params.get('password', '')
+ self.module.params['http_agent'] = self.user_agent
+
+ response, info = fetch_url(self.module, url_to_use, timeout=req_timeout, headers=self.headers)
+ if info['status'] == 200:
+ return response
+ if force:
+ raise ValueError(failmsg + " because of " + info['msg'] + "for URL " + url_to_use)
+ return None
+
+ def download(self, tmpdir, artifact, verify_download, filename=None):
+ if (not artifact.version and not artifact.version_by_spec) or artifact.version == "latest":
+ artifact = Artifact(artifact.group_id, artifact.artifact_id, self.find_latest_version_available(artifact), None,
+ artifact.classifier, artifact.extension)
+ url = self.find_uri_for_artifact(artifact)
+ tempfd, tempname = tempfile.mkstemp(dir=tmpdir)
+
+ try:
+ # copy to temp file
+ if self.local:
+ parsed_url = urlparse(url)
+ if os.path.isfile(parsed_url.path):
+ shutil.copy2(parsed_url.path, tempname)
+ else:
+ return "Can not find local file: " + parsed_url.path
+ else:
+ response = self._request(url, "Failed to download artifact " + str(artifact))
+ with os.fdopen(tempfd, 'wb') as f:
+ shutil.copyfileobj(response, f)
+
+ if verify_download:
+ invalid_md5 = self.is_invalid_md5(tempname, url)
+ if invalid_md5:
+ # if verify_change was set, the previous file would be deleted
+ os.remove(tempname)
+ return invalid_md5
+ except Exception as e:
+ os.remove(tempname)
+ raise e
+
+ # all good, now copy temp file to target
+ shutil.move(tempname, artifact.get_filename(filename))
+ return None
+
+ def is_invalid_md5(self, file, remote_url):
+ if os.path.exists(file):
+ local_md5 = self._local_md5(file)
+ if self.local:
+ parsed_url = urlparse(remote_url)
+ remote_md5 = self._local_md5(parsed_url.path)
+ else:
+ try:
+ remote_md5 = to_text(self._getContent(remote_url + '.md5', "Failed to retrieve MD5", False), errors='strict')
+ except UnicodeError as e:
+ return "Cannot retrieve a valid md5 from %s: %s" % (remote_url, to_native(e))
+ if(not remote_md5):
+ return "Cannot find md5 from " + remote_url
+ try:
+ # Check if remote md5 only contains md5 or md5 + filename
+ _remote_md5 = remote_md5.split(None)[0]
+ remote_md5 = _remote_md5
+ # remote_md5 is empty so we continue and keep original md5 string
+ # This should not happen since we check for remote_md5 before
+ except IndexError:
+ pass
+ if local_md5.lower() == remote_md5.lower():
+ return None
+ else:
+ return "Checksum does not match: we computed " + local_md5 + " but the repository states " + remote_md5
+
+ return "Path does not exist: " + file
+
+ def _local_md5(self, file):
+ md5 = hashlib.md5()
+ with io.open(file, 'rb') as f:
+ for chunk in iter(lambda: f.read(8192), b''):
+ md5.update(chunk)
+ return md5.hexdigest()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ group_id=dict(required=True),
+ artifact_id=dict(required=True),
+ version=dict(default=None),
+ version_by_spec=dict(default=None),
+ classifier=dict(default=''),
+ extension=dict(default='jar'),
+ repository_url=dict(default='https://repo1.maven.org/maven2'),
+ username=dict(default=None, aliases=['aws_secret_key']),
+ password=dict(default=None, no_log=True, aliases=['aws_secret_access_key']),
+ headers=dict(type='dict'),
+ force_basic_auth=dict(default=False, type='bool'),
+ state=dict(default="present", choices=["present", "absent"]), # TODO - Implement a "latest" state
+ timeout=dict(default=10, type='int'),
+ dest=dict(type="path", required=True),
+ validate_certs=dict(required=False, default=True, type='bool'),
+ client_cert=dict(type="path", required=False),
+ client_key=dict(type="path", required=False),
+ keep_name=dict(required=False, default=False, type='bool'),
+ verify_checksum=dict(required=False, default='download', choices=['never', 'download', 'change', 'always']),
+ directory_mode=dict(type='str'), # Used since https://github.com/ansible/ansible/pull/24965, not sure
+ # if this should really be here.
+ ),
+ add_file_common_args=True,
+ mutually_exclusive=([('version', 'version_by_spec')])
+ )
+
+ if not HAS_LXML_ETREE:
+ module.fail_json(msg=missing_required_lib('lxml'), exception=LXML_ETREE_IMP_ERR)
+
+ if module.params['version_by_spec'] and not HAS_SEMANTIC_VERSION:
+ module.fail_json(msg=missing_required_lib('semantic_version'), exception=SEMANTIC_VERSION_IMP_ERR)
+
+ repository_url = module.params["repository_url"]
+ if not repository_url:
+ repository_url = "https://repo1.maven.org/maven2"
+ try:
+ parsed_url = urlparse(repository_url)
+ except AttributeError as e:
+ module.fail_json(msg='url parsing went wrong %s' % e)
+
+ local = parsed_url.scheme == "file"
+
+ if parsed_url.scheme == 's3' and not HAS_BOTO:
+ module.fail_json(msg=missing_required_lib('boto3', reason='when using s3:// repository URLs'),
+ exception=BOTO_IMP_ERR)
+
+ group_id = module.params["group_id"]
+ artifact_id = module.params["artifact_id"]
+ version = module.params["version"]
+ version_by_spec = module.params["version_by_spec"]
+ classifier = module.params["classifier"]
+ extension = module.params["extension"]
+ headers = module.params['headers']
+ state = module.params["state"]
+ dest = module.params["dest"]
+ b_dest = to_bytes(dest, errors='surrogate_or_strict')
+ keep_name = module.params["keep_name"]
+ verify_checksum = module.params["verify_checksum"]
+ verify_download = verify_checksum in ['download', 'always']
+ verify_change = verify_checksum in ['change', 'always']
+
+ downloader = MavenDownloader(module, repository_url, local, headers)
+
+ if not version_by_spec and not version:
+ version = "latest"
+
+ try:
+ artifact = Artifact(group_id, artifact_id, version, version_by_spec, classifier, extension)
+ except ValueError as e:
+ module.fail_json(msg=e.args[0])
+
+ changed = False
+ prev_state = "absent"
+
+ if dest.endswith(os.sep):
+ b_dest = to_bytes(dest, errors='surrogate_or_strict')
+ if not os.path.exists(b_dest):
+ (pre_existing_dir, new_directory_list) = split_pre_existing_dir(dest)
+ os.makedirs(b_dest)
+ directory_args = module.load_file_common_arguments(module.params)
+ directory_mode = module.params["directory_mode"]
+ if directory_mode is not None:
+ directory_args['mode'] = directory_mode
+ else:
+ directory_args['mode'] = None
+ changed = adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed)
+
+ if os.path.isdir(b_dest):
+ version_part = version
+ if version == 'latest':
+ version_part = downloader.find_latest_version_available(artifact)
+ elif version_by_spec:
+ version_part = downloader.find_version_by_spec(artifact)
+
+ filename = "{artifact_id}{version_part}{classifier}.{extension}".format(
+ artifact_id=artifact_id,
+ version_part="-{0}".format(version_part) if keep_name else "",
+ classifier="-{0}".format(classifier) if classifier else "",
+ extension=extension
+ )
+ dest = posixpath.join(dest, filename)
+
+ b_dest = to_bytes(dest, errors='surrogate_or_strict')
+
+ if os.path.lexists(b_dest) and ((not verify_change) or not downloader.is_invalid_md5(dest, downloader.find_uri_for_artifact(artifact))):
+ prev_state = "present"
+
+ if prev_state == "absent":
+ try:
+ download_error = downloader.download(module.tmpdir, artifact, verify_download, b_dest)
+ if download_error is None:
+ changed = True
+ else:
+ module.fail_json(msg="Cannot retrieve the artifact to destination: " + download_error)
+ except ValueError as e:
+ module.fail_json(msg=e.args[0])
+
+ try:
+ file_args = module.load_file_common_arguments(module.params, path=dest)
+ except TypeError:
+ # The path argument is only supported in Ansible-base 2.10+. Fall back to
+ # pre-2.10 behavior for older Ansible versions.
+ module.params['path'] = dest
+ file_args = module.load_file_common_arguments(module.params)
+ changed = module.set_fs_attributes_if_different(file_args, changed)
+ if changed:
+ module.exit_json(state=state, dest=dest, group_id=group_id, artifact_id=artifact_id, version=version, classifier=classifier,
+ extension=extension, repository_url=repository_url, changed=changed)
+ else:
+ module.exit_json(state=state, dest=dest, changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_dns_reload.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_dns_reload.py
new file mode 100644
index 00000000..6eefe133
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_dns_reload.py
@@ -0,0 +1,183 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Simon Weald <ansible@simonweald.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: memset_dns_reload
+author: "Simon Weald (@glitchcrab)"
+short_description: Request reload of Memset's DNS infrastructure,
+notes:
+ - DNS reload requests are a best-effort service provided by Memset; these generally
+ happen every 15 minutes by default, however you can request an immediate reload if
+ later tasks rely on the records being created. An API key generated via the
+ Memset customer control panel is required with the following minimum scope -
+ I(dns.reload). If you wish to poll the job status to wait until the reload has
+ completed, then I(job.status) is also required.
+description:
+ - Request a reload of Memset's DNS infrastructure, and optionally poll until it finishes.
+options:
+ api_key:
+ required: true
+ type: str
+ description:
+ - The API key obtained from the Memset control panel.
+ poll:
+ default: false
+ type: bool
+ description:
+ - Boolean value, if set will poll the reload job's status and return
+ when the job has completed (unless the 30 second timeout is reached first).
+ If the timeout is reached then the task will not be marked as failed, but
+ stderr will indicate that the polling failed.
+'''
+
+EXAMPLES = '''
+- name: Submit DNS reload and poll
+ community.general.memset_dns_reload:
+ api_key: 5eb86c9196ab03919abcf03857163741
+ poll: True
+ delegate_to: localhost
+'''
+
+RETURN = '''
+---
+memset_api:
+ description: Raw response from the Memset API.
+ returned: always
+ type: complex
+ contains:
+ error:
+ description: Whether the job ended in error state.
+ returned: always
+ type: bool
+ sample: true
+ finished:
+ description: Whether the job completed before the result was returned.
+ returned: always
+ type: bool
+ sample: true
+ id:
+ description: Job ID.
+ returned: always
+ type: str
+ sample: "c9cc8ad2a3e3fb8c63ed83c424928ef8"
+ status:
+ description: Job status.
+ returned: always
+ type: str
+ sample: "DONE"
+ type:
+ description: Job type.
+ returned: always
+ type: str
+ sample: "dns"
+'''
+
+from time import sleep
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
+
+
+def poll_reload_status(api_key=None, job_id=None, payload=None):
+ '''
+ We poll the `job.status` endpoint every 5 seconds up to a
+ maximum of 6 times. This is a relatively arbitrary choice of
+ timeout, however requests rarely take longer than 15 seconds
+ to complete.
+ '''
+ memset_api, stderr, msg = None, None, None
+ payload['id'] = job_id
+
+ api_method = 'job.status'
+ _has_failed, _msg, response = memset_api_call(api_key=api_key, api_method=api_method, payload=payload)
+
+ while not response.json()['finished']:
+ counter = 0
+ while counter < 6:
+ sleep(5)
+ _has_failed, msg, response = memset_api_call(api_key=api_key, api_method=api_method, payload=payload)
+ counter += 1
+ if response.json()['error']:
+ # the reload job was submitted but polling failed. Don't return this as an overall task failure.
+ stderr = "Reload submitted successfully, but the Memset API returned a job error when attempting to poll the reload status."
+ else:
+ memset_api = response.json()
+ msg = None
+
+ return(memset_api, msg, stderr)
+
+
+def reload_dns(args=None):
+ '''
+ DNS reloads are a single API call and therefore there's not much
+ which can go wrong outside of auth errors.
+ '''
+ retvals, payload = dict(), dict()
+ has_changed, has_failed = False, False
+ memset_api, msg, stderr = None, None, None
+
+ api_method = 'dns.reload'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+
+ if has_failed:
+ # this is the first time the API is called; incorrect credentials will
+ # manifest themselves at this point so we need to ensure the user is
+ # informed of the reason.
+ retvals['failed'] = has_failed
+ retvals['memset_api'] = response.json()
+ retvals['msg'] = msg
+ return(retvals)
+
+ # set changed to true if the reload request was accepted.
+ has_changed = True
+ memset_api = msg
+ # empty msg var as we don't want to return the API's json response twice.
+ msg = None
+
+ if args['poll']:
+ # hand off to the poll function.
+ job_id = response.json()['id']
+ memset_api, msg, stderr = poll_reload_status(api_key=args['api_key'], job_id=job_id, payload=payload)
+
+ # assemble return variables.
+ retvals['failed'] = has_failed
+ retvals['changed'] = has_changed
+ for val in ['msg', 'stderr', 'memset_api']:
+ if val is not None:
+ retvals[val] = eval(val)
+
+ return(retvals)
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(required=True, type='str', no_log=True),
+ poll=dict(required=False, default=False, type='bool')
+ ),
+ supports_check_mode=False
+ )
+
+ # populate the dict with the user-provided vars.
+ args = dict()
+ for key, arg in module.params.items():
+ args[key] = arg
+
+ retvals = reload_dns(args)
+
+ if retvals['failed']:
+ module.fail_json(**retvals)
+ else:
+ module.exit_json(**retvals)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_memstore_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_memstore_facts.py
new file mode 100644
index 00000000..5eea6ab1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_memstore_facts.py
@@ -0,0 +1,172 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Simon Weald <ansible@simonweald.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: memset_memstore_info
+author: "Simon Weald (@glitchcrab)"
+short_description: Retrieve Memstore product usage information.
+notes:
+ - An API key generated via the Memset customer control panel is needed with the
+ following minimum scope - I(memstore.usage).
+description:
+ - Retrieve Memstore product usage information.
+ - This module was called C(memset_memstore_facts) before Ansible 2.9. The usage did not change.
+options:
+ api_key:
+ required: true
+ type: str
+ description:
+ - The API key obtained from the Memset control panel.
+ name:
+ required: true
+ type: str
+ description:
+ - The Memstore product name (i.e. C(mstestyaa1)).
+'''
+
+EXAMPLES = '''
+- name: Get usage for mstestyaa1
+ community.general.memset_memstore_info:
+ name: mstestyaa1
+ api_key: 5eb86c9896ab03919abcf03857163741
+ delegate_to: localhost
+'''
+
+RETURN = '''
+---
+memset_api:
+ description: Info from the Memset API
+ returned: always
+ type: complex
+ contains:
+ cdn_bandwidth:
+ description: Dictionary of CDN bandwidth facts
+ returned: always
+ type: complex
+ contains:
+ bytes_out:
+ description: Outbound CDN bandwidth for the last 24 hours in bytes
+ returned: always
+ type: int
+ sample: 1000
+ requests:
+ description: Number of requests in the last 24 hours
+ returned: always
+ type: int
+ sample: 10
+ bytes_in:
+ description: Inbound CDN bandwidth for the last 24 hours in bytes
+ returned: always
+ type: int
+ sample: 1000
+ containers:
+ description: Number of containers
+ returned: always
+ type: int
+ sample: 10
+ bytes:
+ description: Space used in bytes
+ returned: always
+ type: int
+ sample: 3860997965
+ objs:
+ description: Number of objects
+ returned: always
+ type: int
+ sample: 1000
+ bandwidth:
+ description: Dictionary of CDN bandwidth facts
+ returned: always
+ type: complex
+ contains:
+ bytes_out:
+ description: Outbound bandwidth for the last 24 hours in bytes
+ returned: always
+ type: int
+ sample: 1000
+ requests:
+ description: Number of requests in the last 24 hours
+ returned: always
+ type: int
+ sample: 10
+ bytes_in:
+ description: Inbound bandwidth for the last 24 hours in bytes
+ returned: always
+ type: int
+ sample: 1000
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
+
+
+def get_facts(args=None):
+ '''
+ Performs a simple API call and returns a JSON blob.
+ '''
+ retvals, payload = dict(), dict()
+ has_changed, has_failed = False, False
+ msg, stderr, memset_api = None, None, None
+
+ payload['name'] = args['name']
+
+ api_method = 'memstore.usage'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+
+ if has_failed:
+ # this is the first time the API is called; incorrect credentials will
+ # manifest themselves at this point so we need to ensure the user is
+ # informed of the reason.
+ retvals['failed'] = has_failed
+ retvals['msg'] = msg
+ retvals['stderr'] = "API returned an error: {0}" . format(response.status_code)
+ return(retvals)
+
+ # we don't want to return the same thing twice
+ msg = None
+ memset_api = response.json()
+
+ retvals['changed'] = has_changed
+ retvals['failed'] = has_failed
+ for val in ['msg', 'memset_api']:
+ if val is not None:
+ retvals[val] = eval(val)
+
+ return(retvals)
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(required=True, type='str', no_log=True),
+ name=dict(required=True, type='str')
+ ),
+ supports_check_mode=False
+ )
+ if module._name in ('memset_memstore_facts', 'community.general.memset_memstore_facts'):
+ module.deprecate("The 'memset_memstore_facts' module has been renamed to 'memset_memstore_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ # populate the dict with the user-provided vars.
+ args = dict()
+ for key, arg in module.params.items():
+ args[key] = arg
+
+ retvals = get_facts(args)
+
+ if retvals['failed']:
+ module.fail_json(**retvals)
+ else:
+ module.exit_json(**retvals)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_memstore_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_memstore_info.py
new file mode 100644
index 00000000..5eea6ab1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_memstore_info.py
@@ -0,0 +1,172 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Simon Weald <ansible@simonweald.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: memset_memstore_info
+author: "Simon Weald (@glitchcrab)"
+short_description: Retrieve Memstore product usage information.
+notes:
+ - An API key generated via the Memset customer control panel is needed with the
+ following minimum scope - I(memstore.usage).
+description:
+ - Retrieve Memstore product usage information.
+ - This module was called C(memset_memstore_facts) before Ansible 2.9. The usage did not change.
+options:
+ api_key:
+ required: true
+ type: str
+ description:
+ - The API key obtained from the Memset control panel.
+ name:
+ required: true
+ type: str
+ description:
+ - The Memstore product name (i.e. C(mstestyaa1)).
+'''
+
+EXAMPLES = '''
+- name: Get usage for mstestyaa1
+ community.general.memset_memstore_info:
+ name: mstestyaa1
+ api_key: 5eb86c9896ab03919abcf03857163741
+ delegate_to: localhost
+'''
+
+RETURN = '''
+---
+memset_api:
+ description: Info from the Memset API
+ returned: always
+ type: complex
+ contains:
+ cdn_bandwidth:
+ description: Dictionary of CDN bandwidth facts
+ returned: always
+ type: complex
+ contains:
+ bytes_out:
+ description: Outbound CDN bandwidth for the last 24 hours in bytes
+ returned: always
+ type: int
+ sample: 1000
+ requests:
+ description: Number of requests in the last 24 hours
+ returned: always
+ type: int
+ sample: 10
+ bytes_in:
+ description: Inbound CDN bandwidth for the last 24 hours in bytes
+ returned: always
+ type: int
+ sample: 1000
+ containers:
+ description: Number of containers
+ returned: always
+ type: int
+ sample: 10
+ bytes:
+ description: Space used in bytes
+ returned: always
+ type: int
+ sample: 3860997965
+ objs:
+ description: Number of objects
+ returned: always
+ type: int
+ sample: 1000
+ bandwidth:
+ description: Dictionary of CDN bandwidth facts
+ returned: always
+ type: complex
+ contains:
+ bytes_out:
+ description: Outbound bandwidth for the last 24 hours in bytes
+ returned: always
+ type: int
+ sample: 1000
+ requests:
+ description: Number of requests in the last 24 hours
+ returned: always
+ type: int
+ sample: 10
+ bytes_in:
+ description: Inbound bandwidth for the last 24 hours in bytes
+ returned: always
+ type: int
+ sample: 1000
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
+
+
+def get_facts(args=None):
+ '''
+ Performs a simple API call and returns a JSON blob.
+ '''
+ retvals, payload = dict(), dict()
+ has_changed, has_failed = False, False
+ msg, stderr, memset_api = None, None, None
+
+ payload['name'] = args['name']
+
+ api_method = 'memstore.usage'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+
+ if has_failed:
+ # this is the first time the API is called; incorrect credentials will
+ # manifest themselves at this point so we need to ensure the user is
+ # informed of the reason.
+ retvals['failed'] = has_failed
+ retvals['msg'] = msg
+ retvals['stderr'] = "API returned an error: {0}" . format(response.status_code)
+ return(retvals)
+
+ # we don't want to return the same thing twice
+ msg = None
+ memset_api = response.json()
+
+ retvals['changed'] = has_changed
+ retvals['failed'] = has_failed
+ for val in ['msg', 'memset_api']:
+ if val is not None:
+ retvals[val] = eval(val)
+
+ return(retvals)
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(required=True, type='str', no_log=True),
+ name=dict(required=True, type='str')
+ ),
+ supports_check_mode=False
+ )
+ if module._name in ('memset_memstore_facts', 'community.general.memset_memstore_facts'):
+ module.deprecate("The 'memset_memstore_facts' module has been renamed to 'memset_memstore_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ # populate the dict with the user-provided vars.
+ args = dict()
+ for key, arg in module.params.items():
+ args[key] = arg
+
+ retvals = get_facts(args)
+
+ if retvals['failed']:
+ module.fail_json(**retvals)
+ else:
+ module.exit_json(**retvals)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_server_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_server_facts.py
new file mode 100644
index 00000000..d8943c14
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_server_facts.py
@@ -0,0 +1,297 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Simon Weald <ansible@simonweald.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: memset_server_info
+author: "Simon Weald (@glitchcrab)"
+short_description: Retrieve server information.
+notes:
+ - An API key generated via the Memset customer control panel is needed with the
+ following minimum scope - I(server.info).
+description:
+ - Retrieve server information.
+ - This module was called C(memset_server_facts) before Ansible 2.9. The usage did not change.
+options:
+ api_key:
+ required: true
+ type: str
+ description:
+ - The API key obtained from the Memset control panel.
+ name:
+ required: true
+ type: str
+ description:
+ - The server product name (i.e. C(testyaa1)).
+'''
+
+EXAMPLES = '''
+- name: Get details for testyaa1
+ community.general.memset_server_info:
+ name: testyaa1
+ api_key: 5eb86c9896ab03919abcf03857163741
+ delegate_to: localhost
+'''
+
+RETURN = '''
+---
+memset_api:
+ description: Info from the Memset API
+ returned: always
+ type: complex
+ contains:
+ backups:
+ description: Whether this server has a backup service.
+ returned: always
+ type: bool
+ sample: true
+ control_panel:
+ description: Whether the server has a control panel (i.e. cPanel).
+ returned: always
+ type: str
+ sample: 'cpanel'
+ data_zone:
+ description: The data zone the server is in.
+ returned: always
+ type: str
+ sample: 'Memset Public Cloud'
+ expiry_date:
+ description: Current expiry date of the server.
+ returned: always
+ type: str
+ sample: '2018-08-10'
+ firewall_rule_group:
+ description: Details about the firewall group this server is in.
+ returned: always
+ type: dict
+ sample: {
+ "default_outbound_policy": "RETURN",
+ "name": "testyaa-fw1",
+ "nickname": "testyaa cPanel rules",
+ "notes": "",
+ "public": false,
+ "rules": {
+ "51d7db54d39c3544ef7c48baa0b9944f": {
+ "action": "ACCEPT",
+ "comment": "",
+ "dest_ip6s": "any",
+ "dest_ips": "any",
+ "dest_ports": "any",
+ "direction": "Inbound",
+ "ip_version": "any",
+ "ordering": 2,
+ "protocols": "icmp",
+ "rule_group_name": "testyaa-fw1",
+ "rule_id": "51d7db54d39c3544ef7c48baa0b9944f",
+ "source_ip6s": "any",
+ "source_ips": "any",
+ "source_ports": "any"
+ }
+ }
+ }
+ firewall_type:
+ description: The type of firewall the server has (i.e. self-managed, managed).
+ returned: always
+ type: str
+ sample: 'managed'
+ host_name:
+ description: The server's hostname.
+ returned: always
+ type: str
+ sample: 'testyaa1.miniserver.com'
+ ignore_monitoring_off:
+ description: When true, Memset won't remind the customer that monitoring is disabled.
+ returned: always
+ type: bool
+ sample: true
+ ips:
+ description: List of dictionaries of all IP addresses assigned to the server.
+ returned: always
+ type: list
+ sample: [
+ {
+ "address": "1.2.3.4",
+ "bytes_in_today": 1000.0,
+ "bytes_in_yesterday": 2000.0,
+ "bytes_out_today": 1000.0,
+ "bytes_out_yesterday": 2000.0
+ }
+ ]
+ monitor:
+ description: Whether the server has monitoring enabled.
+ returned: always
+ type: bool
+ sample: true
+ monitoring_level:
+ description: The server's monitoring level (i.e. basic).
+ returned: always
+ type: str
+ sample: 'basic'
+ name:
+ description: Server name (same as the service name).
+ returned: always
+ type: str
+ sample: 'testyaa1'
+ network_zones:
+ description: The network zone(s) the server is in.
+ returned: always
+ type: list
+ sample: [ 'reading' ]
+ nickname:
+ description: Customer-set nickname for the server.
+ returned: always
+ type: str
+ sample: 'database server'
+ no_auto_reboot:
+ description: Whether or not to reboot the server if monitoring detects it down.
+ returned: always
+ type: bool
+ sample: true
+ no_nrpe:
+ description: Whether Memset should use NRPE to monitor this server.
+ returned: always
+ type: bool
+ sample: true
+ os:
+ description: The server's Operating System.
+ returned: always
+ type: str
+ sample: 'debian_stretch_64'
+ penetration_patrol:
+ description: Intrusion detection support level for this server.
+ returned: always
+ type: str
+ sample: 'managed'
+ penetration_patrol_alert_level:
+ description: The alert level at which notifications are sent.
+ returned: always
+ type: int
+ sample: 10
+ primary_ip:
+ description: Server's primary IP.
+ returned: always
+ type: str
+ sample: '1.2.3.4'
+ renewal_price_amount:
+ description: Renewal cost for the server.
+ returned: always
+ type: str
+ sample: '30.00'
+ renewal_price_currency:
+ description: Currency for renewal payments.
+ returned: always
+ type: str
+ sample: 'GBP'
+ renewal_price_vat:
+ description: VAT rate for renewal payments
+ returned: always
+ type: str
+ sample: '20'
+ start_date:
+ description: Server's start date.
+ returned: always
+ type: str
+ sample: '2013-04-10'
+ status:
+ description: Current status of the server (i.e. live, onhold).
+ returned: always
+ type: str
+ sample: 'LIVE'
+ support_level:
+ description: Support level included with the server.
+ returned: always
+ type: str
+ sample: 'managed'
+ type:
+ description: What this server is (i.e. dedicated)
+ returned: always
+ type: str
+ sample: 'miniserver'
+ vlans:
+ description: Dictionary of tagged and untagged VLANs this server is in.
+ returned: always
+ type: dict
+ sample: {
+ tagged: [],
+ untagged: [ 'testyaa-vlan1', 'testyaa-vlan2' ]
+ }
+ vulnscan:
+ description: Vulnerability scanning level.
+ returned: always
+ type: str
+ sample: 'basic'
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
+
+
+def get_facts(args=None):
+ '''
+ Performs a simple API call and returns a JSON blob.
+ '''
+ retvals, payload = dict(), dict()
+ has_changed, has_failed = False, False
+ msg, stderr, memset_api = None, None, None
+
+ payload['name'] = args['name']
+
+ api_method = 'server.info'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+
+ if has_failed:
+ # this is the first time the API is called; incorrect credentials will
+ # manifest themselves at this point so we need to ensure the user is
+ # informed of the reason.
+ retvals['failed'] = has_failed
+ retvals['msg'] = msg
+ retvals['stderr'] = "API returned an error: {0}" . format(response.status_code)
+ return(retvals)
+
+ # we don't want to return the same thing twice
+ msg = None
+ memset_api = response.json()
+
+ retvals['changed'] = has_changed
+ retvals['failed'] = has_failed
+ for val in ['msg', 'memset_api']:
+ if val is not None:
+ retvals[val] = eval(val)
+
+ return(retvals)
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(required=True, type='str', no_log=True),
+ name=dict(required=True, type='str')
+ ),
+ supports_check_mode=False
+ )
+ if module._name in ('memset_server_facts', 'community.general.memset_server_facts'):
+ module.deprecate("The 'memset_server_facts' module has been renamed to 'memset_server_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ # populate the dict with the user-provided vars.
+ args = dict()
+ for key, arg in module.params.items():
+ args[key] = arg
+
+ retvals = get_facts(args)
+
+ if retvals['failed']:
+ module.fail_json(**retvals)
+ else:
+ module.exit_json(**retvals)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_server_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_server_info.py
new file mode 100644
index 00000000..d8943c14
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_server_info.py
@@ -0,0 +1,297 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Simon Weald <ansible@simonweald.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: memset_server_info
+author: "Simon Weald (@glitchcrab)"
+short_description: Retrieve server information.
+notes:
+ - An API key generated via the Memset customer control panel is needed with the
+ following minimum scope - I(server.info).
+description:
+ - Retrieve server information.
+ - This module was called C(memset_server_facts) before Ansible 2.9. The usage did not change.
+options:
+ api_key:
+ required: true
+ type: str
+ description:
+ - The API key obtained from the Memset control panel.
+ name:
+ required: true
+ type: str
+ description:
+ - The server product name (i.e. C(testyaa1)).
+'''
+
+EXAMPLES = '''
+- name: Get details for testyaa1
+ community.general.memset_server_info:
+ name: testyaa1
+ api_key: 5eb86c9896ab03919abcf03857163741
+ delegate_to: localhost
+'''
+
+RETURN = '''
+---
+memset_api:
+ description: Info from the Memset API
+ returned: always
+ type: complex
+ contains:
+ backups:
+ description: Whether this server has a backup service.
+ returned: always
+ type: bool
+ sample: true
+ control_panel:
+ description: Whether the server has a control panel (i.e. cPanel).
+ returned: always
+ type: str
+ sample: 'cpanel'
+ data_zone:
+ description: The data zone the server is in.
+ returned: always
+ type: str
+ sample: 'Memset Public Cloud'
+ expiry_date:
+ description: Current expiry date of the server.
+ returned: always
+ type: str
+ sample: '2018-08-10'
+ firewall_rule_group:
+ description: Details about the firewall group this server is in.
+ returned: always
+ type: dict
+ sample: {
+ "default_outbound_policy": "RETURN",
+ "name": "testyaa-fw1",
+ "nickname": "testyaa cPanel rules",
+ "notes": "",
+ "public": false,
+ "rules": {
+ "51d7db54d39c3544ef7c48baa0b9944f": {
+ "action": "ACCEPT",
+ "comment": "",
+ "dest_ip6s": "any",
+ "dest_ips": "any",
+ "dest_ports": "any",
+ "direction": "Inbound",
+ "ip_version": "any",
+ "ordering": 2,
+ "protocols": "icmp",
+ "rule_group_name": "testyaa-fw1",
+ "rule_id": "51d7db54d39c3544ef7c48baa0b9944f",
+ "source_ip6s": "any",
+ "source_ips": "any",
+ "source_ports": "any"
+ }
+ }
+ }
+ firewall_type:
+ description: The type of firewall the server has (i.e. self-managed, managed).
+ returned: always
+ type: str
+ sample: 'managed'
+ host_name:
+ description: The server's hostname.
+ returned: always
+ type: str
+ sample: 'testyaa1.miniserver.com'
+ ignore_monitoring_off:
+ description: When true, Memset won't remind the customer that monitoring is disabled.
+ returned: always
+ type: bool
+ sample: true
+ ips:
+ description: List of dictionaries of all IP addresses assigned to the server.
+ returned: always
+ type: list
+ sample: [
+ {
+ "address": "1.2.3.4",
+ "bytes_in_today": 1000.0,
+ "bytes_in_yesterday": 2000.0,
+ "bytes_out_today": 1000.0,
+ "bytes_out_yesterday": 2000.0
+ }
+ ]
+ monitor:
+ description: Whether the server has monitoring enabled.
+ returned: always
+ type: bool
+ sample: true
+ monitoring_level:
+ description: The server's monitoring level (i.e. basic).
+ returned: always
+ type: str
+ sample: 'basic'
+ name:
+ description: Server name (same as the service name).
+ returned: always
+ type: str
+ sample: 'testyaa1'
+ network_zones:
+ description: The network zone(s) the server is in.
+ returned: always
+ type: list
+ sample: [ 'reading' ]
+ nickname:
+ description: Customer-set nickname for the server.
+ returned: always
+ type: str
+ sample: 'database server'
+ no_auto_reboot:
+ description: Whether or not to reboot the server if monitoring detects it down.
+ returned: always
+ type: bool
+ sample: true
+ no_nrpe:
+ description: Whether Memset should use NRPE to monitor this server.
+ returned: always
+ type: bool
+ sample: true
+ os:
+ description: The server's Operating System.
+ returned: always
+ type: str
+ sample: 'debian_stretch_64'
+ penetration_patrol:
+ description: Intrusion detection support level for this server.
+ returned: always
+ type: str
+ sample: 'managed'
+ penetration_patrol_alert_level:
+ description: The alert level at which notifications are sent.
+ returned: always
+ type: int
+ sample: 10
+ primary_ip:
+ description: Server's primary IP.
+ returned: always
+ type: str
+ sample: '1.2.3.4'
+ renewal_price_amount:
+ description: Renewal cost for the server.
+ returned: always
+ type: str
+ sample: '30.00'
+ renewal_price_currency:
+ description: Currency for renewal payments.
+ returned: always
+ type: str
+ sample: 'GBP'
+ renewal_price_vat:
+ description: VAT rate for renewal payments
+ returned: always
+ type: str
+ sample: '20'
+ start_date:
+ description: Server's start date.
+ returned: always
+ type: str
+ sample: '2013-04-10'
+ status:
+ description: Current status of the server (i.e. live, onhold).
+ returned: always
+ type: str
+ sample: 'LIVE'
+ support_level:
+ description: Support level included with the server.
+ returned: always
+ type: str
+ sample: 'managed'
+ type:
+ description: What this server is (i.e. dedicated)
+ returned: always
+ type: str
+ sample: 'miniserver'
+ vlans:
+ description: Dictionary of tagged and untagged VLANs this server is in.
+ returned: always
+ type: dict
+ sample: {
+ tagged: [],
+ untagged: [ 'testyaa-vlan1', 'testyaa-vlan2' ]
+ }
+ vulnscan:
+ description: Vulnerability scanning level.
+ returned: always
+ type: str
+ sample: 'basic'
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
+
+
+def get_facts(args=None):
+ '''
+ Performs a simple API call and returns a JSON blob.
+ '''
+ retvals, payload = dict(), dict()
+ has_changed, has_failed = False, False
+ msg, stderr, memset_api = None, None, None
+
+ payload['name'] = args['name']
+
+ api_method = 'server.info'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+
+ if has_failed:
+ # this is the first time the API is called; incorrect credentials will
+ # manifest themselves at this point so we need to ensure the user is
+ # informed of the reason.
+ retvals['failed'] = has_failed
+ retvals['msg'] = msg
+ retvals['stderr'] = "API returned an error: {0}" . format(response.status_code)
+ return(retvals)
+
+ # we don't want to return the same thing twice
+ msg = None
+ memset_api = response.json()
+
+ retvals['changed'] = has_changed
+ retvals['failed'] = has_failed
+ for val in ['msg', 'memset_api']:
+ if val is not None:
+ retvals[val] = eval(val)
+
+ return(retvals)
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(required=True, type='str', no_log=True),
+ name=dict(required=True, type='str')
+ ),
+ supports_check_mode=False
+ )
+ if module._name in ('memset_server_facts', 'community.general.memset_server_facts'):
+ module.deprecate("The 'memset_server_facts' module has been renamed to 'memset_server_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ # populate the dict with the user-provided vars.
+ args = dict()
+ for key, arg in module.params.items():
+ args[key] = arg
+
+ retvals = get_facts(args)
+
+ if retvals['failed']:
+ module.fail_json(**retvals)
+ else:
+ module.exit_json(**retvals)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_zone.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_zone.py
new file mode 100644
index 00000000..9ef798bd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_zone.py
@@ -0,0 +1,311 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Simon Weald <ansible@simonweald.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: memset_zone
+author: "Simon Weald (@glitchcrab)"
+short_description: Creates and deletes Memset DNS zones.
+notes:
+ - Zones can be thought of as a logical group of domains, all of which share the
+ same DNS records (i.e. they point to the same IP). An API key generated via the
+ Memset customer control panel is needed with the following minimum scope -
+ I(dns.zone_create), I(dns.zone_delete), I(dns.zone_list).
+description:
+ - Manage DNS zones in a Memset account.
+options:
+ state:
+ required: true
+ description:
+ - Indicates desired state of resource.
+ type: str
+ choices: [ absent, present ]
+ api_key:
+ required: true
+ description:
+ - The API key obtained from the Memset control panel.
+ type: str
+ name:
+ required: true
+ description:
+ - The zone nickname; usually the same as the main domain. Ensure this
+ value has at most 250 characters.
+ type: str
+ aliases: [ nickname ]
+ ttl:
+ description:
+ - The default TTL for all records created in the zone. This must be a
+ valid int from U(https://www.memset.com/apidocs/methods_dns.html#dns.zone_create).
+ type: int
+ choices: [ 0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400 ]
+ force:
+ required: false
+ default: false
+ type: bool
+ description:
+ - Forces deletion of a zone and all zone domains/zone records it contains.
+'''
+
+EXAMPLES = '''
+# Create the zone 'test'
+- name: Create zone
+ community.general.memset_zone:
+ name: test
+ state: present
+ api_key: 5eb86c9196ab03919abcf03857163741
+ ttl: 300
+ delegate_to: localhost
+
+# Force zone deletion
+- name: Force delete zone
+ community.general.memset_zone:
+ name: test
+ state: absent
+ api_key: 5eb86c9196ab03919abcf03857163741
+ force: true
+ delegate_to: localhost
+'''
+
+RETURN = '''
+memset_api:
+ description: Zone info from the Memset API
+ returned: when state == present
+ type: complex
+ contains:
+ domains:
+ description: List of domains in this zone
+ returned: always
+ type: list
+ sample: []
+ id:
+ description: Zone id
+ returned: always
+ type: str
+ sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c"
+ nickname:
+ description: Zone name
+ returned: always
+ type: str
+ sample: "example.com"
+ records:
+ description: List of DNS records for domains in this zone
+ returned: always
+ type: list
+ sample: []
+ ttl:
+ description: Default TTL for domains in this zone
+ returned: always
+ type: int
+ sample: 300
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.memset import check_zone
+from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id
+from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
+
+
+def api_validation(args=None):
+ '''
+ Perform some validation which will be enforced by Memset's API (see:
+ https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create)
+ '''
+ # zone domain length must be less than 250 chars.
+ if len(args['name']) > 250:
+ stderr = 'Zone name must be less than 250 characters in length.'
+ module.fail_json(failed=True, msg=stderr, stderr=stderr)
+
+
+def check(args=None):
+ '''
+ Support for running with check mode.
+ '''
+ retvals = dict()
+
+ api_method = 'dns.zone_list'
+ has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+
+ zone_exists, counter = check_zone(data=response, name=args['name'])
+
+ # set changed to true if the operation would cause a change.
+ has_changed = ((zone_exists and args['state'] == 'absent') or (not zone_exists and args['state'] == 'present'))
+
+ retvals['changed'] = has_changed
+ retvals['failed'] = has_failed
+
+ return(retvals)
+
+
+def create_zone(args=None, zone_exists=None, payload=None):
+ '''
+ At this point we already know whether the zone exists, so we
+ just need to make the API reflect the desired state.
+ '''
+ has_changed, has_failed = False, False
+ msg, memset_api = None, None
+
+ if not zone_exists:
+ payload['ttl'] = args['ttl']
+ payload['nickname'] = args['name']
+ api_method = 'dns.zone_create'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ if not has_failed:
+ has_changed = True
+ else:
+ api_method = 'dns.zone_list'
+ _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+ for zone in response.json():
+ if zone['nickname'] == args['name']:
+ break
+ if zone['ttl'] != args['ttl']:
+ # update the zone if the desired TTL is different.
+ payload['id'] = zone['id']
+ payload['ttl'] = args['ttl']
+ api_method = 'dns.zone_update'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ if not has_failed:
+ has_changed = True
+
+ # populate return var with zone info.
+ api_method = 'dns.zone_list'
+ _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+
+ zone_exists, msg, counter, zone_id = get_zone_id(zone_name=args['name'], current_zones=response.json())
+
+ if zone_exists:
+ payload = dict()
+ payload['id'] = zone_id
+ api_method = 'dns.zone_info'
+ _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ memset_api = response.json()
+
+ return(has_failed, has_changed, memset_api, msg)
+
+
+def delete_zone(args=None, zone_exists=None, payload=None):
+ '''
+ Deletion requires extra sanity checking as the zone cannot be
+ deleted if it contains domains or records. Setting force=true
+ will override this behaviour.
+ '''
+ has_changed, has_failed = False, False
+ msg, memset_api = None, None
+
+ if zone_exists:
+ api_method = 'dns.zone_list'
+ _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ counter = 0
+ for zone in response.json():
+ if zone['nickname'] == args['name']:
+ counter += 1
+ if counter == 1:
+ for zone in response.json():
+ if zone['nickname'] == args['name']:
+ zone_id = zone['id']
+ domain_count = len(zone['domains'])
+ record_count = len(zone['records'])
+ if (domain_count > 0 or record_count > 0) and args['force'] is False:
+ # we need to fail out if force was not explicitly set.
+ stderr = 'Zone contains domains or records and force was not used.'
+ has_failed = True
+ has_changed = False
+ module.fail_json(failed=has_failed, changed=has_changed, msg=msg, stderr=stderr, rc=1)
+ api_method = 'dns.zone_delete'
+ payload['id'] = zone_id
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ if not has_failed:
+ has_changed = True
+ # return raw JSON from API in named var and then unset msg var so we aren't returning the same thing twice.
+ memset_api = msg
+ msg = None
+ else:
+ # zone names are not unique, so we cannot safely delete the requested
+ # zone at this time.
+ has_failed = True
+ has_changed = False
+ msg = 'Unable to delete zone as multiple zones with the same name exist.'
+ else:
+ has_failed, has_changed = False, False
+
+ return(has_failed, has_changed, memset_api, msg)
+
+
+def create_or_delete(args=None):
+ '''
+ We need to perform some initial sanity checking and also look
+ up required info before handing it off to create or delete.
+ '''
+ retvals, payload = dict(), dict()
+ has_failed, has_changed = False, False
+ msg, memset_api, stderr = None, None, None
+
+ # get the zones and check if the relevant zone exists.
+ api_method = 'dns.zone_list'
+ _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+ if _has_failed:
+ # this is the first time the API is called; incorrect credentials will
+ # manifest themselves at this point so we need to ensure the user is
+ # informed of the reason.
+ retvals['failed'] = _has_failed
+ retvals['msg'] = _msg
+
+ return(retvals)
+
+ zone_exists, _msg, counter, _zone_id = get_zone_id(zone_name=args['name'], current_zones=response.json())
+
+ if args['state'] == 'present':
+ has_failed, has_changed, memset_api, msg = create_zone(args=args, zone_exists=zone_exists, payload=payload)
+
+ elif args['state'] == 'absent':
+ has_failed, has_changed, memset_api, msg = delete_zone(args=args, zone_exists=zone_exists, payload=payload)
+
+ retvals['failed'] = has_failed
+ retvals['changed'] = has_changed
+ for val in ['msg', 'stderr', 'memset_api']:
+ if val is not None:
+ retvals[val] = eval(val)
+
+ return(retvals)
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(required=True, choices=['present', 'absent'], type='str'),
+ api_key=dict(required=True, type='str', no_log=True),
+ name=dict(required=True, aliases=['nickname'], type='str'),
+ ttl=dict(required=False, default=0, choices=[0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400], type='int'),
+ force=dict(required=False, default=False, type='bool')
+ ),
+ supports_check_mode=True
+ )
+
+ # populate the dict with the user-provided vars.
+ args = dict()
+ for key, arg in module.params.items():
+ args[key] = arg
+ args['check_mode'] = module.check_mode
+
+ # validate some API-specific limitations.
+ api_validation(args=args)
+
+ if module.check_mode:
+ retvals = check(args)
+ else:
+ retvals = create_or_delete(args)
+
+ if retvals['failed']:
+ module.fail_json(**retvals)
+ else:
+ module.exit_json(**retvals)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_zone_domain.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_zone_domain.py
new file mode 100644
index 00000000..4aa0eada
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_zone_domain.py
@@ -0,0 +1,266 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Simon Weald <ansible@simonweald.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: memset_zone_domain
+author: "Simon Weald (@glitchcrab)"
+short_description: Create and delete domains in Memset DNS zones.
+notes:
+ - Zone domains can be thought of as a collection of domains, all of which share the
+ same DNS records (i.e. they point to the same IP). An API key generated via the
+ Memset customer control panel is needed with the following minimum scope -
+ I(dns.zone_domain_create), I(dns.zone_domain_delete), I(dns.zone_domain_list).
+ - Currently this module can only create one domain at a time. Multiple domains should
+ be created using C(with_items).
+description:
+ - Manage DNS zone domains in a Memset account.
+options:
+ state:
+ default: present
+ description:
+ - Indicates desired state of resource.
+ type: str
+ choices: [ absent, present ]
+ api_key:
+ required: true
+ description:
+ - The API key obtained from the Memset control panel.
+ type: str
+ domain:
+ required: true
+ description:
+ - The zone domain name. Ensure this value has at most 250 characters.
+ type: str
+ aliases: ['name']
+ zone:
+ required: true
+ description:
+ - The zone to add the domain to (this must already exist).
+ type: str
+'''
+
+EXAMPLES = '''
+# Create the zone domain 'test.com'
+- name: Create zone domain
+ community.general.memset_zone_domain:
+ domain: test.com
+ zone: testzone
+ state: present
+ api_key: 5eb86c9196ab03919abcf03857163741
+ delegate_to: localhost
+'''
+
+RETURN = '''
+memset_api:
+ description: Domain info from the Memset API
+ returned: when changed or state == present
+ type: complex
+ contains:
+ domain:
+ description: Domain name
+ returned: always
+ type: str
+ sample: "example.com"
+ id:
+ description: Domain ID
+ returned: always
+ type: str
+ sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id
+from ansible_collections.community.general.plugins.module_utils.memset import check_zone_domain
+from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
+
+
+def api_validation(args=None):
+ '''
+ Perform some validation which will be enforced by Memset's API (see:
+ https://www.memset.com/apidocs/methods_dns.html#dns.zone_domain_create)
+ '''
+ # zone domain length must be less than 250 chars
+ if len(args['domain']) > 250:
+ stderr = 'Zone domain must be less than 250 characters in length.'
+ module.fail_json(failed=True, msg=stderr)
+
+
+def check(args=None):
+ '''
+ Support for running with check mode.
+ '''
+ retvals = dict()
+ has_changed = False
+
+ api_method = 'dns.zone_domain_list'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+
+ domain_exists = check_zone_domain(data=response, domain=args['domain'])
+
+ # set changed to true if the operation would cause a change.
+ has_changed = ((domain_exists and args['state'] == 'absent') or (not domain_exists and args['state'] == 'present'))
+
+ retvals['changed'] = has_changed
+ retvals['failed'] = has_failed
+
+ return(retvals)
+
+
+def create_zone_domain(args=None, zone_exists=None, zone_id=None, payload=None):
+ '''
+ At this point we already know whether the containing zone exists,
+ so we just need to create the domain (or exit if it already exists).
+ '''
+ has_changed, has_failed = False, False
+ msg = None
+
+ api_method = 'dns.zone_domain_list'
+ _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+
+ for zone_domain in response.json():
+ if zone_domain['domain'] == args['domain']:
+ # zone domain already exists, nothing to change.
+ has_changed = False
+ break
+ else:
+ # we need to create the domain
+ api_method = 'dns.zone_domain_create'
+ payload['domain'] = args['domain']
+ payload['zone_id'] = zone_id
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ if not has_failed:
+ has_changed = True
+
+ return(has_failed, has_changed, msg)
+
+
+def delete_zone_domain(args=None, payload=None):
+ '''
+ Deletion is pretty simple, domains are always unique so we
+ we don't need to do any sanity checking to avoid deleting the
+ wrong thing.
+ '''
+ has_changed, has_failed = False, False
+ msg, memset_api = None, None
+
+ api_method = 'dns.zone_domain_list'
+ _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+
+ domain_exists = check_zone_domain(data=response, domain=args['domain'])
+
+ if domain_exists:
+ api_method = 'dns.zone_domain_delete'
+ payload['domain'] = args['domain']
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ if not has_failed:
+ has_changed = True
+ memset_api = response.json()
+ # unset msg as we don't want to return unnecessary info to the user.
+ msg = None
+
+ return(has_failed, has_changed, memset_api, msg)
+
+
+def create_or_delete_domain(args=None):
+ '''
+ We need to perform some initial sanity checking and also look
+ up required info before handing it off to create or delete.
+ '''
+ retvals, payload = dict(), dict()
+ has_changed, has_failed = False, False
+ msg, stderr, memset_api = None, None, None
+
+ # get the zones and check if the relevant zone exists.
+ api_method = 'dns.zone_list'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+
+ if has_failed:
+ # this is the first time the API is called; incorrect credentials will
+ # manifest themselves at this point so we need to ensure the user is
+ # informed of the reason.
+ retvals['failed'] = has_failed
+ retvals['msg'] = msg
+ retvals['stderr'] = "API returned an error: {0}" . format(response.status_code)
+ return(retvals)
+
+ zone_exists, msg, counter, zone_id = get_zone_id(zone_name=args['zone'], current_zones=response.json())
+
+ if not zone_exists:
+ # the zone needs to be unique - this isn't a requirement of Memset's API but it
+ # makes sense in the context of this module.
+ has_failed = True
+ if counter == 0:
+ stderr = "DNS zone '{0}' does not exist, cannot create domain." . format(args['zone'])
+ elif counter > 1:
+ stderr = "{0} matches multiple zones, cannot create domain." . format(args['zone'])
+
+ retvals['failed'] = has_failed
+ retvals['msg'] = stderr
+ return(retvals)
+
+ if args['state'] == 'present':
+ has_failed, has_changed, msg = create_zone_domain(args=args, zone_exists=zone_exists, zone_id=zone_id, payload=payload)
+
+ if args['state'] == 'absent':
+ has_failed, has_changed, memset_api, msg = delete_zone_domain(args=args, payload=payload)
+
+ retvals['changed'] = has_changed
+ retvals['failed'] = has_failed
+ for val in ['msg', 'stderr', 'memset_api']:
+ if val is not None:
+ retvals[val] = eval(val)
+
+ return(retvals)
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'], type='str'),
+ api_key=dict(required=True, type='str', no_log=True),
+ domain=dict(required=True, aliases=['name'], type='str'),
+ zone=dict(required=True, type='str')
+ ),
+ supports_check_mode=True
+ )
+
+ # populate the dict with the user-provided vars.
+ args = dict()
+ for key, arg in module.params.items():
+ args[key] = arg
+ args['check_mode'] = module.check_mode
+
+ # validate some API-specific limitations.
+ api_validation(args=args)
+
+ if module.check_mode:
+ retvals = check(args)
+ else:
+ retvals = create_or_delete_domain(args)
+
+ # we would need to populate the return values with the API's response
+ # in several places so it's easier to do it at the end instead.
+ if not retvals['failed']:
+ if args['state'] == 'present' and not module.check_mode:
+ payload = dict()
+ payload['domain'] = args['domain']
+ api_method = 'dns.zone_domain_info'
+ _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ retvals['memset_api'] = response.json()
+
+ if retvals['failed']:
+ module.fail_json(**retvals)
+ else:
+ module.exit_json(**retvals)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_zone_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_zone_record.py
new file mode 100644
index 00000000..981d2ac4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/memset_zone_record.py
@@ -0,0 +1,380 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Simon Weald <ansible@simonweald.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: memset_zone_record
+author: "Simon Weald (@glitchcrab)"
+short_description: Create and delete records in Memset DNS zones.
+notes:
+ - Zones can be thought of as a logical group of domains, all of which share the
+ same DNS records (i.e. they point to the same IP). An API key generated via the
+ Memset customer control panel is needed with the following minimum scope -
+ I(dns.zone_create), I(dns.zone_delete), I(dns.zone_list).
+ - Currently this module can only create one DNS record at a time. Multiple records
+ should be created using C(with_items).
+description:
+ - Manage DNS records in a Memset account.
+options:
+ state:
+ default: present
+ description:
+ - Indicates desired state of resource.
+ type: str
+ choices: [ absent, present ]
+ api_key:
+ required: true
+ description:
+ - The API key obtained from the Memset control panel.
+ type: str
+ address:
+ required: true
+ description:
+ - The address for this record (can be IP or text string depending on record type).
+ type: str
+ aliases: [ ip, data ]
+ priority:
+ description:
+ - C(SRV) and C(TXT) record priority, in the range 0 > 999 (inclusive).
+ type: int
+ record:
+ required: false
+ description:
+ - The subdomain to create.
+ type: str
+ type:
+ required: true
+ description:
+ - The type of DNS record to create.
+ choices: [ A, AAAA, CNAME, MX, NS, SRV, TXT ]
+ type: str
+ relative:
+ type: bool
+ default: false
+ description:
+ - If set then the current domain is added onto the address field for C(CNAME), C(MX), C(NS)
+ and C(SRV)record types.
+ ttl:
+ description:
+ - The record's TTL in seconds (will inherit zone's TTL if not explicitly set). This must be a
+ valid int from U(https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create).
+ choices: [ 0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400 ]
+ type: int
+ zone:
+ required: true
+ description:
+ - The name of the zone to which to add the record to.
+ type: str
+'''
+
+EXAMPLES = '''
+# Create DNS record for www.domain.com
+- name: Create DNS record
+ community.general.memset_zone_record:
+ api_key: dcf089a2896940da9ffefb307ef49ccd
+ state: present
+ zone: domain.com
+ type: A
+ record: www
+ address: 1.2.3.4
+ ttl: 300
+ relative: false
+ delegate_to: localhost
+
+# create an SPF record for domain.com
+- name: Create SPF record for domain.com
+ community.general.memset_zone_record:
+ api_key: dcf089a2896940da9ffefb307ef49ccd
+ state: present
+ zone: domain.com
+ type: TXT
+ address: "v=spf1 +a +mx +ip4:a1.2.3.4 ?all"
+ delegate_to: localhost
+
+# create multiple DNS records
+- name: Create multiple DNS records
+ community.general.memset_zone_record:
+ api_key: dcf089a2896940da9ffefb307ef49ccd
+ zone: "{{ item.zone }}"
+ type: "{{ item.type }}"
+ record: "{{ item.record }}"
+ address: "{{ item.address }}"
+ delegate_to: localhost
+ with_items:
+ - { 'zone': 'domain1.com', 'type': 'A', 'record': 'www', 'address': '1.2.3.4' }
+ - { 'zone': 'domain2.com', 'type': 'A', 'record': 'mail', 'address': '4.3.2.1' }
+'''
+
+RETURN = '''
+memset_api:
+ description: Record info from the Memset API.
+ returned: when state == present
+ type: complex
+ contains:
+ address:
+ description: Record content (may be an IP, string or blank depending on record type).
+ returned: always
+ type: str
+ sample: 1.1.1.1
+ id:
+ description: Record ID.
+ returned: always
+ type: str
+ sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c"
+ priority:
+ description: Priority for C(MX) and C(SRV) records.
+ returned: always
+ type: int
+ sample: 10
+ record:
+ description: Name of record.
+ returned: always
+ type: str
+ sample: "www"
+ relative:
+ description: Adds the current domain onto the address field for C(CNAME), C(MX), C(NS) and C(SRV) types.
+ returned: always
+ type: bool
+ sample: False
+ ttl:
+ description: Record TTL.
+ returned: always
+ type: int
+ sample: 10
+ type:
+ description: Record type.
+ returned: always
+ type: str
+ sample: AAAA
+ zone_id:
+ description: Zone ID.
+ returned: always
+ type: str
+ sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id
+from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
+from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id
+
+
+def api_validation(args=None):
+ '''
+ Perform some validation which will be enforced by Memset's API (see:
+ https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create)
+ '''
+ failed_validation = False
+
+ # priority can only be integer 0 > 999
+ if not 0 <= args['priority'] <= 999:
+ failed_validation = True
+ error = 'Priority must be in the range 0 > 999 (inclusive).'
+ # data value must be max 250 chars
+ if len(args['address']) > 250:
+ failed_validation = True
+ error = "Address must be less than 250 characters in length."
+ # record value must be max 250 chars
+ if args['record']:
+ if len(args['record']) > 63:
+ failed_validation = True
+ error = "Record must be less than 63 characters in length."
+ # relative isn't used for all record types
+ if args['relative']:
+ if args['type'] not in ['CNAME', 'MX', 'NS', 'SRV']:
+ failed_validation = True
+ error = "Relative is only valid for CNAME, MX, NS and SRV record types."
+ # if any of the above failed then fail early
+ if failed_validation:
+ module.fail_json(failed=True, msg=error)
+
+
+def create_zone_record(args=None, zone_id=None, records=None, payload=None):
+ '''
+ Sanity checking has already occurred prior to this function being
+ called, so we can go ahead and either create or update the record.
+ As defaults are defined for all values in the argument_spec, this
+ may cause some changes to occur as the defaults are enforced (if
+ the user has only configured required variables).
+ '''
+ has_changed, has_failed = False, False
+ msg, memset_api = None, None
+
+ # assemble the new record.
+ new_record = dict()
+ new_record['zone_id'] = zone_id
+ for arg in ['priority', 'address', 'relative', 'record', 'ttl', 'type']:
+ new_record[arg] = args[arg]
+
+ # if we have any matches, update them.
+ if records:
+ for zone_record in records:
+ # record exists, add ID to payload.
+ new_record['id'] = zone_record['id']
+ if zone_record == new_record:
+ # nothing to do; record is already correct so we populate
+ # the return var with the existing record's details.
+ memset_api = zone_record
+ return(has_changed, has_failed, memset_api, msg)
+ else:
+ # merge dicts ensuring we change any updated values
+ payload = zone_record.copy()
+ payload.update(new_record)
+ api_method = 'dns.zone_record_update'
+ if args['check_mode']:
+ has_changed = True
+ # return the new record to the user in the returned var.
+ memset_api = new_record
+ return(has_changed, has_failed, memset_api, msg)
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ if not has_failed:
+ has_changed = True
+ memset_api = new_record
+ # empty msg as we don't want to return a boatload of json to the user.
+ msg = None
+ else:
+ # no record found, so we need to create it
+ api_method = 'dns.zone_record_create'
+ payload = new_record
+ if args['check_mode']:
+ has_changed = True
+ # populate the return var with the new record's details.
+ memset_api = new_record
+ return(has_changed, has_failed, memset_api, msg)
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ if not has_failed:
+ has_changed = True
+ memset_api = new_record
+ # empty msg as we don't want to return a boatload of json to the user.
+ msg = None
+
+ return(has_changed, has_failed, memset_api, msg)
+
+
+def delete_zone_record(args=None, records=None, payload=None):
+ '''
+ Matching records can be cleanly deleted without affecting other
+ resource types, so this is pretty simple to achieve.
+ '''
+ has_changed, has_failed = False, False
+ msg, memset_api = None, None
+
+ # if we have any matches, delete them.
+ if records:
+ for zone_record in records:
+ if args['check_mode']:
+ has_changed = True
+ return(has_changed, has_failed, memset_api, msg)
+ payload['id'] = zone_record['id']
+ api_method = 'dns.zone_record_delete'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ if not has_failed:
+ has_changed = True
+ memset_api = zone_record
+ # empty msg as we don't want to return a boatload of json to the user.
+ msg = None
+
+ return(has_changed, has_failed, memset_api, msg)
+
+
+def create_or_delete(args=None):
+ '''
+ We need to perform some initial sanity checking and also look
+ up required info before handing it off to create or delete functions.
+ Check mode is integrated into the create or delete functions.
+ '''
+ has_failed, has_changed = False, False
+ msg, memset_api, stderr = None, None, None
+ retvals, payload = dict(), dict()
+
+ # get the zones and check if the relevant zone exists.
+ api_method = 'dns.zone_list'
+ _has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+
+ if _has_failed:
+ # this is the first time the API is called; incorrect credentials will
+ # manifest themselves at this point so we need to ensure the user is
+ # informed of the reason.
+ retvals['failed'] = _has_failed
+ retvals['msg'] = msg
+ retvals['stderr'] = "API returned an error: {0}" . format(response.status_code)
+ return(retvals)
+
+ zone_exists, _msg, counter, zone_id = get_zone_id(zone_name=args['zone'], current_zones=response.json())
+
+ if not zone_exists:
+ has_failed = True
+ if counter == 0:
+ stderr = "DNS zone {0} does not exist." . format(args['zone'])
+ elif counter > 1:
+ stderr = "{0} matches multiple zones." . format(args['zone'])
+ retvals['failed'] = has_failed
+ retvals['msg'] = stderr
+ retvals['stderr'] = stderr
+ return(retvals)
+
+ # get a list of all records ( as we can't limit records by zone)
+ api_method = 'dns.zone_record_list'
+ _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+
+ # find any matching records
+ records = [record for record in response.json() if record['zone_id'] == zone_id
+ and record['record'] == args['record'] and record['type'] == args['type']]
+
+ if args['state'] == 'present':
+ has_changed, has_failed, memset_api, msg = create_zone_record(args=args, zone_id=zone_id, records=records, payload=payload)
+
+ if args['state'] == 'absent':
+ has_changed, has_failed, memset_api, msg = delete_zone_record(args=args, records=records, payload=payload)
+
+ retvals['changed'] = has_changed
+ retvals['failed'] = has_failed
+ for val in ['msg', 'stderr', 'memset_api']:
+ if val is not None:
+ retvals[val] = eval(val)
+
+ return(retvals)
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(required=False, default='present', choices=['present', 'absent'], type='str'),
+ api_key=dict(required=True, type='str', no_log=True),
+ zone=dict(required=True, type='str'),
+ type=dict(required=True, choices=['A', 'AAAA', 'CNAME', 'MX', 'NS', 'SRV', 'TXT'], type='str'),
+ address=dict(required=True, aliases=['ip', 'data'], type='str'),
+ record=dict(required=False, default='', type='str'),
+ ttl=dict(required=False, default=0, choices=[0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400], type='int'),
+ priority=dict(required=False, default=0, type='int'),
+ relative=dict(required=False, default=False, type='bool')
+ ),
+ supports_check_mode=True
+ )
+
+ # populate the dict with the user-provided vars.
+ args = dict()
+ for key, arg in module.params.items():
+ args[key] = arg
+ args['check_mode'] = module.check_mode
+
+ # perform some Memset API-specific validation
+ api_validation(args=args)
+
+ retvals = create_or_delete(args)
+
+ if retvals['failed']:
+ module.fail_json(**retvals)
+ else:
+ module.exit_json(**retvals)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/mksysb.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/mksysb.py
new file mode 100644
index 00000000..1be917dd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/mksysb.py
@@ -0,0 +1,202 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Kairo Araujo <kairo@kairo.eti.br>
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+author: Kairo Araujo (@kairoaraujo)
+module: mksysb
+short_description: Generates AIX mksysb rootvg backups.
+description:
+ - This module manages a basic AIX mksysb (image) of rootvg.
+options:
+ backup_crypt_files:
+ description:
+ - Backup encrypted files.
+ type: bool
+ default: "yes"
+ backup_dmapi_fs:
+ description:
+ - Back up DMAPI filesystem files.
+ type: bool
+ default: "yes"
+ create_map_files:
+ description:
+ - Creates a new MAP files.
+ type: bool
+ default: "no"
+ exclude_files:
+ description:
+ - Excludes files using C(/etc/rootvg.exclude).
+ type: bool
+ default: "no"
+ exclude_wpar_files:
+ description:
+ - Excludes WPAR files.
+ type: bool
+ default: "no"
+ extended_attrs:
+ description:
+ - Backup extended attributes.
+ type: bool
+ default: "yes"
+ name:
+ type: str
+ description:
+ - Backup name
+ required: true
+ new_image_data:
+ description:
+ - Creates a new file data.
+ type: bool
+ default: "yes"
+ software_packing:
+ description:
+ - Exclude files from packing option listed in
+ C(/etc/exclude_packing.rootvg).
+ type: bool
+ default: "no"
+ storage_path:
+ type: str
+ description:
+ - Storage path where the mksysb will stored.
+ required: true
+ use_snapshot:
+ description:
+ - Creates backup using snapshots.
+ type: bool
+ default: "no"
+'''
+
+EXAMPLES = '''
+- name: Running a backup image mksysb
+ community.general.mksysb:
+ name: myserver
+ storage_path: /repository/images
+ exclude_files: yes
+ exclude_wpar_files: yes
+'''
+
+RETURN = '''
+changed:
+ description: Return changed for mksysb actions as true or false.
+ returned: always
+ type: bool
+msg:
+ description: Return message regarding the action.
+ returned: always
+ type: str
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+import os
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ backup_crypt_files=dict(type='bool', default=True),
+ backup_dmapi_fs=dict(type='bool', default=True),
+ create_map_files=dict(type='bool', default=False),
+ exclude_files=dict(type='bool', default=False),
+ exclude_wpar_files=dict(type='bool', default=False),
+ extended_attrs=dict(type='bool', default=True),
+ name=dict(type='str', required=True),
+ new_image_data=dict(type='bool', default=True),
+ software_packing=dict(type='bool', default=False),
+ storage_path=dict(type='str', required=True),
+ use_snapshot=dict(type='bool', default=False)
+ ),
+ supports_check_mode=True,
+ )
+
+ # Command options.
+ map_file_opt = {
+ True: '-m',
+ False: ''
+ }
+
+ use_snapshot_opt = {
+ True: '-T',
+ False: ''
+ }
+
+ exclude_files_opt = {
+ True: '-e',
+ False: ''
+ }
+
+ exclude_wpar_opt = {
+ True: '-G',
+ False: ''
+ }
+
+ new_image_data_opt = {
+ True: '-i',
+ False: ''
+ }
+
+ soft_packing_opt = {
+ True: '',
+ False: '-p'
+ }
+
+ extend_attr_opt = {
+ True: '',
+ False: '-a'
+ }
+
+ crypt_files_opt = {
+ True: '',
+ False: '-Z'
+ }
+
+ dmapi_fs_opt = {
+ True: '-a',
+ False: ''
+ }
+
+ backup_crypt_files = crypt_files_opt[module.params['backup_crypt_files']]
+ backup_dmapi_fs = dmapi_fs_opt[module.params['backup_dmapi_fs']]
+ create_map_files = map_file_opt[module.params['create_map_files']]
+ exclude_files = exclude_files_opt[module.params['exclude_files']]
+ exclude_wpar_files = exclude_wpar_opt[module.params['exclude_wpar_files']]
+ extended_attrs = extend_attr_opt[module.params['extended_attrs']]
+ name = module.params['name']
+ new_image_data = new_image_data_opt[module.params['new_image_data']]
+ software_packing = soft_packing_opt[module.params['software_packing']]
+ storage_path = module.params['storage_path']
+ use_snapshot = use_snapshot_opt[module.params['use_snapshot']]
+
+ # Validate if storage_path is a valid directory.
+ if os.path.isdir(storage_path):
+ if not module.check_mode:
+ # Generates the mksysb image backup.
+ mksysb_cmd = module.get_bin_path('mksysb', True)
+ rc, mksysb_output, err = module.run_command(
+ "%s -X %s %s %s %s %s %s %s %s %s %s/%s" % (
+ mksysb_cmd, create_map_files, use_snapshot, exclude_files,
+ exclude_wpar_files, software_packing, extended_attrs,
+ backup_crypt_files, backup_dmapi_fs, new_image_data,
+ storage_path, name))
+ if rc == 0:
+ module.exit_json(changed=True, msg=mksysb_output)
+ else:
+ module.fail_json(msg="mksysb failed.", rc=rc, err=err)
+
+ module.exit_json(changed=True)
+
+ else:
+ module.fail_json(msg="Storage path %s is not valid." % storage_path)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/modprobe.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/modprobe.py
new file mode 100644
index 00000000..0ab75235
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/modprobe.py
@@ -0,0 +1,127 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, David Stygstra <david.stygstra@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: modprobe
+short_description: Load or unload kernel modules
+author:
+ - David Stygstra (@stygstra)
+ - Julien Dauphant (@jdauphant)
+ - Matt Jeffery (@mattjeffery)
+description:
+ - Load or unload kernel modules.
+options:
+ name:
+ type: str
+ required: true
+ description:
+ - Name of kernel module to manage.
+ state:
+ type: str
+ description:
+ - Whether the module should be present or absent.
+ choices: [ absent, present ]
+ default: present
+ params:
+ type: str
+ description:
+ - Modules parameters.
+ default: ''
+'''
+
+EXAMPLES = '''
+- name: Add the 802.1q module
+ community.general.modprobe:
+ name: 8021q
+ state: present
+
+- name: Add the dummy module
+ community.general.modprobe:
+ name: dummy
+ state: present
+ params: 'numdummies=2'
+'''
+
+import os.path
+import shlex
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ params=dict(type='str', default=''),
+ ),
+ supports_check_mode=True,
+ )
+
+ name = module.params['name']
+ params = module.params['params']
+ state = module.params['state']
+
+ # FIXME: Adding all parameters as result values is useless
+ result = dict(
+ changed=False,
+ name=name,
+ params=params,
+ state=state,
+ )
+
+ # Check if module is present
+ try:
+ present = False
+ with open('/proc/modules') as modules:
+ module_name = name.replace('-', '_') + ' '
+ for line in modules:
+ if line.startswith(module_name):
+ present = True
+ break
+ if not present:
+ command = [module.get_bin_path('uname', True), '-r']
+ rc, uname_kernel_release, err = module.run_command(command)
+ module_file = '/' + name + '.ko'
+ builtin_path = os.path.join('/lib/modules/', uname_kernel_release.strip(),
+ 'modules.builtin')
+ with open(builtin_path) as builtins:
+ for line in builtins:
+ if line.endswith(module_file):
+ present = True
+ break
+ except IOError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc(), **result)
+
+ # Add/remove module as needed
+ if state == 'present':
+ if not present:
+ if not module.check_mode:
+ command = [module.get_bin_path('modprobe', True), name]
+ command.extend(shlex.split(params))
+ rc, out, err = module.run_command(command)
+ if rc != 0:
+ module.fail_json(msg=err, rc=rc, stdout=out, stderr=err, **result)
+ result['changed'] = True
+ elif state == 'absent':
+ if present:
+ if not module.check_mode:
+ rc, out, err = module.run_command([module.get_bin_path('modprobe', True), '-r', name])
+ if rc != 0:
+ module.fail_json(msg=err, rc=rc, stdout=out, stderr=err, **result)
+ result['changed'] = True
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monit.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monit.py
new file mode 100644
index 00000000..1dfe76d6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monit.py
@@ -0,0 +1,340 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Darryl Stoflet <stoflet@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: monit
+short_description: Manage the state of a program monitored via Monit
+description:
+ - Manage the state of a program monitored via I(Monit).
+options:
+ name:
+ description:
+ - The name of the I(monit) program/process to manage.
+ required: true
+ type: str
+ state:
+ description:
+ - The state of service.
+ required: true
+ choices: [ "present", "started", "stopped", "restarted", "monitored", "unmonitored", "reloaded" ]
+ type: str
+ timeout:
+ description:
+ - If there are pending actions for the service monitored by monit, then Ansible will check
+ for up to this many seconds to verify the requested action has been performed.
+ Ansible will sleep for five seconds between each check.
+ default: 300
+ type: int
+author:
+ - Darryl Stoflet (@dstoflet)
+ - Simon Kelly (@snopoke)
+'''
+
+EXAMPLES = '''
+- name: Manage the state of program httpd to be in started state
+ community.general.monit:
+ name: httpd
+ state: started
+'''
+
+import time
+import re
+
+from collections import namedtuple
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import python_2_unicode_compatible
+
+
+STATE_COMMAND_MAP = {
+ 'stopped': 'stop',
+ 'started': 'start',
+ 'monitored': 'monitor',
+ 'unmonitored': 'unmonitor',
+ 'restarted': 'restart'
+}
+
+MONIT_SERVICES = ['Process', 'File', 'Fifo', 'Filesystem', 'Directory', 'Remote host', 'System', 'Program',
+ 'Network']
+
+
+@python_2_unicode_compatible
+class StatusValue(namedtuple("Status", "value, is_pending")):
+ MISSING = 'missing'
+ OK = 'ok'
+ NOT_MONITORED = 'not_monitored'
+ INITIALIZING = 'initializing'
+ DOES_NOT_EXIST = 'does_not_exist'
+ EXECUTION_FAILED = 'execution_failed'
+ ALL_STATUS = [
+ MISSING, OK, NOT_MONITORED, INITIALIZING, DOES_NOT_EXIST, EXECUTION_FAILED
+ ]
+
+ def __new__(cls, value, is_pending=False):
+ return super(StatusValue, cls).__new__(cls, value, is_pending)
+
+ def pending(self):
+ return StatusValue(self.value, True)
+
+ def __getattr__(self, item):
+ if item in ('is_%s' % status for status in self.ALL_STATUS):
+ return self.value == getattr(self, item[3:].upper())
+ raise AttributeError(item)
+
+ def __str__(self):
+ return "%s%s" % (self.value, " (pending)" if self.is_pending else "")
+
+
+class Status(object):
+ MISSING = StatusValue(StatusValue.MISSING)
+ OK = StatusValue(StatusValue.OK)
+ RUNNING = StatusValue(StatusValue.OK)
+ NOT_MONITORED = StatusValue(StatusValue.NOT_MONITORED)
+ INITIALIZING = StatusValue(StatusValue.INITIALIZING)
+ DOES_NOT_EXIST = StatusValue(StatusValue.DOES_NOT_EXIST)
+ EXECUTION_FAILED = StatusValue(StatusValue.EXECUTION_FAILED)
+
+
+class Monit(object):
+ def __init__(self, module, monit_bin_path, service_name, timeout):
+ self.module = module
+ self.monit_bin_path = monit_bin_path
+ self.process_name = service_name
+ self.timeout = timeout
+
+ self._monit_version = None
+ self._raw_version = None
+ self._status_change_retry_count = 6
+
+ def monit_version(self):
+ if self._monit_version is None:
+ self._raw_version, version = self._get_monit_version()
+ # Use only major and minor even if there are more these should be enough
+ self._monit_version = version[0], version[1]
+ return self._monit_version
+
+ def _get_monit_version(self):
+ rc, out, err = self.module.run_command('%s -V' % self.monit_bin_path, check_rc=True)
+ version_line = out.split('\n')[0]
+ raw_version = re.search(r"([0-9]+\.){1,2}([0-9]+)?", version_line).group()
+ return raw_version, tuple(map(int, raw_version.split('.')))
+
+ def exit_fail(self, msg, status=None, **kwargs):
+ kwargs.update({
+ 'msg': msg,
+ 'monit_version': self._raw_version,
+ 'process_status': str(status) if status else None,
+ })
+ self.module.fail_json(**kwargs)
+
+ def exit_success(self, state):
+ self.module.exit_json(changed=True, name=self.process_name, state=state)
+
+ @property
+ def command_args(self):
+ return "-B" if self.monit_version() > (5, 18) else ""
+
+ def get_status(self, validate=False):
+ """Return the status of the process in monit.
+
+ :@param validate: Force monit to re-check the status of the process
+ """
+ monit_command = "validate" if validate else "status"
+ check_rc = False if validate else True # 'validate' always has rc = 1
+ command = ' '.join([self.monit_bin_path, monit_command, self.command_args, self.process_name])
+ rc, out, err = self.module.run_command(command, check_rc=check_rc)
+ return self._parse_status(out, err)
+
+ def _parse_status(self, output, err):
+ escaped_monit_services = '|'.join([re.escape(x) for x in MONIT_SERVICES])
+ pattern = "(%s) '%s'" % (escaped_monit_services, re.escape(self.process_name))
+ if not re.search(pattern, output, re.IGNORECASE):
+ return Status.MISSING
+
+ status_val = re.findall(r"^\s*status\s*([\w\- ]+)", output, re.MULTILINE)
+ if not status_val:
+ self.exit_fail("Unable to find process status", stdout=output, stderr=err)
+
+ status_val = status_val[0].strip().upper()
+ if ' | ' in status_val:
+ status_val = status_val.split(' | ')[0]
+ if ' - ' not in status_val:
+ status_val = status_val.replace(' ', '_')
+ return getattr(Status, status_val)
+ else:
+ status_val, substatus = status_val.split(' - ')
+ action, state = substatus.split()
+ if action in ['START', 'INITIALIZING', 'RESTART', 'MONITOR']:
+ status = Status.OK
+ else:
+ status = Status.NOT_MONITORED
+
+ if state == 'pending':
+ status = status.pending()
+ return status
+
+ def is_process_present(self):
+ rc, out, err = self.module.run_command('%s summary %s' % (self.monit_bin_path, self.command_args), check_rc=True)
+ return bool(re.findall(r'\b%s\b' % self.process_name, out))
+
+ def is_process_running(self):
+ return self.get_status().is_ok
+
+ def run_command(self, command):
+ """Runs a monit command, and returns the new status."""
+ return self.module.run_command('%s %s %s' % (self.monit_bin_path, command, self.process_name), check_rc=True)
+
+ def wait_for_status_change(self, current_status):
+ running_status = self.get_status()
+ if running_status.value != current_status.value or current_status.value == StatusValue.EXECUTION_FAILED:
+ return running_status
+
+ loop_count = 0
+ while running_status.value == current_status.value:
+ if loop_count >= self._status_change_retry_count:
+ self.exit_fail('waited too long for monit to change state', running_status)
+
+ loop_count += 1
+ time.sleep(0.5)
+ validate = loop_count % 2 == 0 # force recheck of status every second try
+ running_status = self.get_status(validate)
+ return running_status
+
+ def wait_for_monit_to_stop_pending(self, current_status=None):
+ """Fails this run if there is no status or it's pending/initializing for timeout"""
+ timeout_time = time.time() + self.timeout
+
+ if not current_status:
+ current_status = self.get_status()
+ waiting_status = [
+ StatusValue.MISSING,
+ StatusValue.INITIALIZING,
+ StatusValue.DOES_NOT_EXIST,
+ ]
+ while current_status.is_pending or (current_status.value in waiting_status):
+ if time.time() >= timeout_time:
+ self.exit_fail('waited too long for "pending", or "initiating" status to go away', current_status)
+
+ time.sleep(5)
+ current_status = self.get_status(validate=True)
+ return current_status
+
+ def reload(self):
+ rc, out, err = self.module.run_command('%s reload' % self.monit_bin_path)
+ if rc != 0:
+ self.exit_fail('monit reload failed', stdout=out, stderr=err)
+ self.exit_success(state='reloaded')
+
+ def present(self):
+ self.run_command('reload')
+
+ timeout_time = time.time() + self.timeout
+ while not self.is_process_present():
+ if time.time() >= timeout_time:
+ self.exit_fail('waited too long for process to become "present"')
+
+ time.sleep(5)
+
+ self.exit_success(state='present')
+
+ def change_state(self, state, expected_status, invert_expected=None):
+ current_status = self.get_status()
+ self.run_command(STATE_COMMAND_MAP[state])
+ status = self.wait_for_status_change(current_status)
+ status = self.wait_for_monit_to_stop_pending(status)
+ status_match = status.value == expected_status.value
+ if invert_expected:
+ status_match = not status_match
+ if status_match:
+ self.exit_success(state=state)
+ self.exit_fail('%s process not %s' % (self.process_name, state), status)
+
+ def stop(self):
+ self.change_state('stopped', Status.NOT_MONITORED)
+
+ def unmonitor(self):
+ self.change_state('unmonitored', Status.NOT_MONITORED)
+
+ def restart(self):
+ self.change_state('restarted', Status.OK)
+
+ def start(self):
+ self.change_state('started', Status.OK)
+
+ def monitor(self):
+ self.change_state('monitored', Status.NOT_MONITORED, invert_expected=True)
+
+
+def main():
+ arg_spec = dict(
+ name=dict(required=True),
+ timeout=dict(default=300, type='int'),
+ state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped', 'monitored', 'unmonitored', 'reloaded'])
+ )
+
+ module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
+
+ name = module.params['name']
+ state = module.params['state']
+ timeout = module.params['timeout']
+
+ monit = Monit(module, module.get_bin_path('monit', True), name, timeout)
+
+ def exit_if_check_mode():
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ if state == 'reloaded':
+ exit_if_check_mode()
+ monit.reload()
+
+ present = monit.is_process_present()
+
+ if not present and not state == 'present':
+ module.fail_json(msg='%s process not presently configured with monit' % name, name=name)
+
+ if state == 'present':
+ if present:
+ module.exit_json(changed=False, name=name, state=state)
+ exit_if_check_mode()
+ monit.present()
+
+ monit.wait_for_monit_to_stop_pending()
+ running = monit.is_process_running()
+
+ if running and state in ['started', 'monitored']:
+ module.exit_json(changed=False, name=name, state=state)
+
+ if running and state == 'stopped':
+ exit_if_check_mode()
+ monit.stop()
+
+ if running and state == 'unmonitored':
+ exit_if_check_mode()
+ monit.unmonitor()
+
+ elif state == 'restarted':
+ exit_if_check_mode()
+ monit.restart()
+
+ elif not running and state == 'started':
+ exit_if_check_mode()
+ monit.start()
+
+ elif not running and state == 'monitored':
+ exit_if_check_mode()
+ monit.monitor()
+
+ module.exit_json(changed=False, name=name, state=state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/airbrake_deployment.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/airbrake_deployment.py
new file mode 100644
index 00000000..3e7938bf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/airbrake_deployment.py
@@ -0,0 +1,204 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2013 Bruce Pennypacker <bruce@pennypacker.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: airbrake_deployment
+author:
+- "Bruce Pennypacker (@bpennypacker)"
+- "Patrick Humpal (@phumpal)"
+short_description: Notify airbrake about app deployments
+description:
+ - Notify airbrake about app deployments (see U(https://airbrake.io/docs/api/#deploys-v4)).
+ - Parameter I(token) has been deprecated for community.general 0.2.0. Please remove entry.
+options:
+ project_id:
+ description:
+ - Airbrake PROJECT_ID
+ required: false
+ type: str
+ version_added: '0.2.0'
+ project_key:
+ description:
+ - Airbrake PROJECT_KEY.
+ required: false
+ type: str
+ version_added: '0.2.0'
+ environment:
+ description:
+ - The airbrake environment name, typically 'production', 'staging', etc.
+ required: true
+ type: str
+ user:
+ description:
+ - The username of the person doing the deployment
+ required: false
+ type: str
+ repo:
+ description:
+ - URL of the project repository
+ required: false
+ type: str
+ revision:
+ description:
+ - A hash, number, tag, or other identifier showing what revision from version control was deployed
+ required: false
+ type: str
+ version:
+ description:
+ - A string identifying what version was deployed
+ required: false
+ type: str
+ version_added: '1.0.0'
+ url:
+ description:
+ - Optional URL to submit the notification to. Use to send notifications to Airbrake-compliant tools like Errbit.
+ required: false
+ default: "https://api.airbrake.io/api/v4/projects/"
+ type: str
+ validate_certs:
+ description:
+ - If C(no), SSL certificates for the target url will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ type: bool
+ token:
+ description:
+ - This parameter (API token) has been deprecated in community.general 0.2.0. Please remove it from your tasks.
+ required: false
+ type: str
+
+requirements: []
+'''
+
+EXAMPLES = '''
+- name: Notify airbrake about an app deployment
+ community.general.airbrake_deployment:
+ project_id: '12345'
+ project_key: 'AAAAAA'
+ environment: staging
+ user: ansible
+ revision: '4.2'
+
+- name: Notify airbrake about an app deployment, using git hash as revision
+ community.general.airbrake_deployment:
+ project_id: '12345'
+ project_key: 'AAAAAA'
+ environment: staging
+ user: ansible
+ revision: 'e54dd3a01f2c421b558ef33b5f79db936e2dcf15'
+ version: '0.2.0'
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(required=False, no_log=True, type='str'),
+ project_id=dict(required=False, no_log=True, type='str'),
+ project_key=dict(required=False, no_log=True, type='str'),
+ environment=dict(required=True, type='str'),
+ user=dict(required=False, type='str'),
+ repo=dict(required=False, type='str'),
+ revision=dict(required=False, type='str'),
+ version=dict(required=False, type='str'),
+ url=dict(required=False, default='https://api.airbrake.io/api/v4/projects/', type='str'),
+ validate_certs=dict(default=True, type='bool'),
+ ),
+ supports_check_mode=True,
+ required_together=[('project_id', 'project_key')],
+ mutually_exclusive=[('project_id', 'token')],
+ )
+
+ # Build list of params
+ params = {}
+
+ # If we're in check mode, just exit pretending like we succeeded
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ if module.params["token"]:
+ # v2 API documented at https://airbrake.io/docs/legacy-xml-api/#tracking-deploys
+ if module.params["environment"]:
+ params["deploy[rails_env]"] = module.params["environment"]
+
+ if module.params["user"]:
+ params["deploy[local_username]"] = module.params["user"]
+
+ if module.params["repo"]:
+ params["deploy[scm_repository]"] = module.params["repo"]
+
+ if module.params["revision"]:
+ params["deploy[scm_revision]"] = module.params["revision"]
+
+ # version not supported in v2 API; omit
+
+ module.deprecate("Parameter 'token' is deprecated since community.general 0.2.0. Please remove "
+ "it and use 'project_id' and 'project_key' instead",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.14
+
+ params["api_key"] = module.params["token"]
+
+ # Allow sending to Airbrake compliant v2 APIs
+ if module.params["url"] == 'https://api.airbrake.io/api/v4/projects/':
+ url = 'https://api.airbrake.io/deploys.txt'
+ else:
+ url = module.params["url"]
+
+ # Send the data to airbrake
+ data = urlencode(params)
+ response, info = fetch_url(module, url, data=data)
+
+ if module.params["project_id"] and module.params["project_key"]:
+ # v4 API documented at https://airbrake.io/docs/api/#create-deploy-v4
+ if module.params["environment"]:
+ params["environment"] = module.params["environment"]
+
+ if module.params["user"]:
+ params["username"] = module.params["user"]
+
+ if module.params["repo"]:
+ params["repository"] = module.params["repo"]
+
+ if module.params["revision"]:
+ params["revision"] = module.params["revision"]
+
+ if module.params["version"]:
+ params["version"] = module.params["version"]
+
+ # Build deploy url
+ url = module.params.get('url') + module.params["project_id"] + '/deploys?key=' + module.params["project_key"]
+ json_body = module.jsonify(params)
+
+ # Build header
+ headers = {'Content-Type': 'application/json'}
+
+ # Notify Airbrake of deploy
+ response, info = fetch_url(module, url, data=json_body,
+ headers=headers, method='POST')
+
+ if info['status'] == 200 or info['status'] == 201:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/bigpanda.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/bigpanda.py
new file mode 100644
index 00000000..ea693eb8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/bigpanda.py
@@ -0,0 +1,222 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: bigpanda
+author: "Hagai Kariti (@hkariti)"
+short_description: Notify BigPanda about deployments
+description:
+ - Notify BigPanda when deployments start and end (successfully or not). Returns a deployment object containing all the parameters for future module calls.
+options:
+ component:
+ type: str
+ description:
+ - "The name of the component being deployed. Ex: billing"
+ required: true
+ aliases: ['name']
+ version:
+ type: str
+ description:
+ - The deployment version.
+ required: true
+ token:
+ type: str
+ description:
+ - API token.
+ required: true
+ state:
+ type: str
+ description:
+ - State of the deployment.
+ required: true
+ choices: ['started', 'finished', 'failed']
+ hosts:
+ type: str
+ description:
+ - Name of affected host name. Can be a list.
+ - If not specified, it defaults to the remote system's hostname.
+ required: false
+ aliases: ['host']
+ env:
+ type: str
+ description:
+ - The environment name, typically 'production', 'staging', etc.
+ required: false
+ owner:
+ type: str
+ description:
+ - The person responsible for the deployment.
+ required: false
+ description:
+ type: str
+ description:
+ - Free text description of the deployment.
+ required: false
+ url:
+ type: str
+ description:
+ - Base URL of the API server.
+ required: False
+ default: https://api.bigpanda.io
+ validate_certs:
+ description:
+ - If C(no), SSL certificates for the target url will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ type: bool
+ deployment_message:
+ type: str
+ description:
+ - Message about the deployment.
+ - C(message) alias is deprecated in community.general 0.2.0, since it is used internally by Ansible Core Engine.
+ aliases: ['message']
+ version_added: '0.2.0'
+ source_system:
+ type: str
+ description:
+ - Source system used in the requests to the API
+ default: ansible
+
+# informational: requirements for nodes
+requirements: [ ]
+'''
+
+EXAMPLES = '''
+- name: Notify BigPanda about a deployment
+ community.general.bigpanda:
+ component: myapp
+ version: '1.3'
+ token: '{{ bigpanda_token }}'
+ state: started
+
+- name: Notify BigPanda about a deployment
+ community.general.bigpanda:
+ component: myapp
+ version: '1.3'
+ token: '{{ bigpanda_token }}'
+ state: finished
+
+# If outside servers aren't reachable from your machine, use delegate_to and override hosts:
+- name: Notify BigPanda about a deployment
+ community.general.bigpanda:
+ component: myapp
+ version: '1.3'
+ token: '{{ bigpanda_token }}'
+ hosts: '{{ ansible_hostname }}'
+ state: started
+ delegate_to: localhost
+ register: deployment
+
+- name: Notify BigPanda about a deployment
+ community.general.bigpanda:
+ component: '{{ deployment.component }}'
+ version: '{{ deployment.version }}'
+ token: '{{ deployment.token }}'
+ state: finished
+ delegate_to: localhost
+'''
+
+# ===========================================
+# Module execution.
+#
+import json
+import socket
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import fetch_url
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ component=dict(required=True, aliases=['name']),
+ version=dict(required=True),
+ token=dict(required=True, no_log=True),
+ state=dict(required=True, choices=['started', 'finished', 'failed']),
+ hosts=dict(required=False, aliases=['host']),
+ env=dict(required=False),
+ owner=dict(required=False),
+ description=dict(required=False),
+ deployment_message=dict(required=False, aliases=['message'],
+ deprecated_aliases=[dict(name='message', version='3.0.0',
+ collection_name='community.general')]), # was Ansible 2.14
+ source_system=dict(required=False, default='ansible'),
+ validate_certs=dict(default=True, type='bool'),
+ url=dict(required=False, default='https://api.bigpanda.io'),
+ ),
+ supports_check_mode=True,
+ )
+
+ token = module.params['token']
+ state = module.params['state']
+ url = module.params['url']
+
+ # Build the common request body
+ body = dict()
+ for k in ('component', 'version', 'hosts'):
+ v = module.params[k]
+ if v is not None:
+ body[k] = v
+ if body.get('hosts') is None:
+ body['hosts'] = [socket.gethostname()]
+
+ if not isinstance(body['hosts'], list):
+ body['hosts'] = [body['hosts']]
+
+ # Insert state-specific attributes to body
+ if state == 'started':
+ for k in ('source_system', 'env', 'owner', 'description'):
+ v = module.params[k]
+ if v is not None:
+ body[k] = v
+
+ request_url = url + '/data/events/deployments/start'
+ else:
+ message = module.params['message']
+ if message is not None:
+ body['errorMessage'] = message
+
+ if state == 'finished':
+ body['status'] = 'success'
+ else:
+ body['status'] = 'failure'
+
+ request_url = url + '/data/events/deployments/end'
+
+ # Build the deployment object we return
+ deployment = dict(token=token, url=url)
+ deployment.update(body)
+ if 'errorMessage' in deployment:
+ message = deployment.pop('errorMessage')
+ deployment['message'] = message
+
+ # If we're in check mode, just exit pretending like we succeeded
+ if module.check_mode:
+ module.exit_json(changed=True, **deployment)
+
+ # Send the data to bigpanda
+ data = json.dumps(body)
+ headers = {'Authorization': 'Bearer %s' % token, 'Content-Type': 'application/json'}
+ try:
+ response, info = fetch_url(module, request_url, data=data, headers=headers)
+ if info['status'] == 200:
+ module.exit_json(changed=True, **deployment)
+ else:
+ module.fail_json(msg=json.dumps(info))
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/circonus_annotation.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/circonus_annotation.py
new file mode 100644
index 00000000..27d23168
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/circonus_annotation.py
@@ -0,0 +1,234 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2014-2015, Epic Games, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: circonus_annotation
+short_description: create an annotation in circonus
+description:
+ - Create an annotation event with a given category, title and description. Optionally start, end or durations can be provided
+author: "Nick Harring (@NickatEpic)"
+requirements:
+ - requests (either >= 2.0.0 for Python 3, or >= 1.0.0 for Python 2)
+notes:
+ - Check mode isn't supported.
+options:
+ api_key:
+ type: str
+ description:
+ - Circonus API key
+ required: true
+ category:
+ type: str
+ description:
+ - Annotation Category
+ required: true
+ description:
+ type: str
+ description:
+ - Description of annotation
+ required: true
+ title:
+ type: str
+ description:
+ - Title of annotation
+ required: true
+ start:
+ type: int
+ description:
+ - Unix timestamp of event start
+ - If not specified, it defaults to I(now).
+ stop:
+ type: int
+ description:
+ - Unix timestamp of event end
+ - If not specified, it defaults to I(now) + I(duration).
+ duration:
+ type: int
+ description:
+ - Duration in seconds of annotation
+ default: 0
+'''
+EXAMPLES = '''
+- name: Create a simple annotation event with a source, defaults to start and end time of now
+ community.general.circonus_annotation:
+ api_key: XXXXXXXXXXXXXXXXX
+ title: App Config Change
+ description: This is a detailed description of the config change
+ category: This category groups like annotations
+
+- name: Create an annotation with a duration of 5 minutes and a default start time of now
+ community.general.circonus_annotation:
+ api_key: XXXXXXXXXXXXXXXXX
+ title: App Config Change
+ description: This is a detailed description of the config change
+ category: This category groups like annotations
+ duration: 300
+
+- name: Create an annotation with a start_time and end_time
+ community.general.circonus_annotation:
+ api_key: XXXXXXXXXXXXXXXXX
+ title: App Config Change
+ description: This is a detailed description of the config change
+ category: This category groups like annotations
+ start_time: 1395940006
+ end_time: 1395954407
+'''
+
+RETURN = '''
+annotation:
+ description: details about the created annotation
+ returned: success
+ type: complex
+ contains:
+ _cid:
+ description: annotation identifier
+ returned: success
+ type: str
+ sample: /annotation/100000
+ _created:
+ description: creation timestamp
+ returned: success
+ type: int
+ sample: 1502236928
+ _last_modified:
+ description: last modification timestamp
+ returned: success
+ type: int
+ sample: 1502236928
+ _last_modified_by:
+ description: last modified by
+ returned: success
+ type: str
+ sample: /user/1000
+ category:
+ description: category of the created annotation
+ returned: success
+ type: str
+ sample: alerts
+ title:
+ description: title of the created annotation
+ returned: success
+ type: str
+ sample: WARNING
+ description:
+ description: description of the created annotation
+ returned: success
+ type: str
+ sample: Host is down.
+ start:
+ description: timestamp, since annotation applies
+ returned: success
+ type: int
+ sample: Host is down.
+ stop:
+ description: timestamp, since annotation ends
+ returned: success
+ type: str
+ sample: Host is down.
+ rel_metrics:
+ description: Array of metrics related to this annotation, each metrics is a string.
+ returned: success
+ type: list
+ sample:
+ - 54321_kbps
+'''
+import json
+import time
+import traceback
+from distutils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+ HAS_REQUESTS = True
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ HAS_REQUESTS = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.six import PY3
+from ansible.module_utils._text import to_native
+
+
+def check_requests_dep(module):
+ """Check if an adequate requests version is available"""
+ if not HAS_REQUESTS:
+ module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ else:
+ required_version = '2.0.0' if PY3 else '1.0.0'
+ if LooseVersion(requests.__version__) < LooseVersion(required_version):
+ module.fail_json(msg="'requests' library version should be >= %s, found: %s." % (required_version, requests.__version__))
+
+
+def post_annotation(annotation, api_key):
+ ''' Takes annotation dict and api_key string'''
+ base_url = 'https://api.circonus.com/v2'
+ anootate_post_endpoint = '/annotation'
+ resp = requests.post(base_url + anootate_post_endpoint,
+ headers=build_headers(api_key), data=json.dumps(annotation))
+ resp.raise_for_status()
+ return resp
+
+
+def create_annotation(module):
+ ''' Takes ansible module object '''
+ annotation = {}
+ duration = module.params['duration']
+ if module.params['start'] is not None:
+ start = module.params['start']
+ else:
+ start = int(time.time())
+ if module.params['stop'] is not None:
+ stop = module.params['stop']
+ else:
+ stop = int(time.time()) + duration
+ annotation['start'] = start
+ annotation['stop'] = stop
+ annotation['category'] = module.params['category']
+ annotation['description'] = module.params['description']
+ annotation['title'] = module.params['title']
+ return annotation
+
+
+def build_headers(api_token):
+ '''Takes api token, returns headers with it included.'''
+ headers = {'X-Circonus-App-Name': 'ansible',
+ 'Host': 'api.circonus.com', 'X-Circonus-Auth-Token': api_token,
+ 'Accept': 'application/json'}
+ return headers
+
+
+def main():
+ '''Main function, dispatches logic'''
+ module = AnsibleModule(
+ argument_spec=dict(
+ start=dict(type='int'),
+ stop=dict(type='int'),
+ category=dict(required=True),
+ title=dict(required=True),
+ description=dict(required=True),
+ duration=dict(default=0, type='int'),
+ api_key=dict(required=True, no_log=True)
+ )
+ )
+
+ check_requests_dep(module)
+
+ annotation = create_annotation(module)
+ try:
+ resp = post_annotation(annotation, module.params['api_key'])
+ except requests.exceptions.RequestException as e:
+ module.fail_json(msg='Request Failed', reason=to_native(e), exception=traceback.format_exc())
+ module.exit_json(changed=True, annotation=resp.json())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/datadog/datadog_event.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/datadog/datadog_event.py
new file mode 100644
index 00000000..a6327dde
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/datadog/datadog_event.py
@@ -0,0 +1,167 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Author: Artūras 'arturaz' Šlajus <x11@arturaz.net>
+# Author: Naoya Nakazawa <naoya.n@gmail.com>
+#
+# This module is proudly sponsored by iGeolise (www.igeolise.com) and
+# Tiny Lab Productions (www.tinylabproductions.com).
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: datadog_event
+short_description: Posts events to Datadog service
+description:
+- "Allows to post events to Datadog (www.datadoghq.com) service."
+- "Uses http://docs.datadoghq.com/api/#events API."
+author:
+- "Artūras `arturaz` Šlajus (@arturaz)"
+- "Naoya Nakazawa (@n0ts)"
+options:
+ api_key:
+ type: str
+ description: ["Your DataDog API key."]
+ required: true
+ app_key:
+ type: str
+ description: ["Your DataDog app key."]
+ required: true
+ title:
+ type: str
+ description: ["The event title."]
+ required: true
+ text:
+ type: str
+ description: ["The body of the event."]
+ required: true
+ date_happened:
+ type: int
+ description:
+ - POSIX timestamp of the event.
+ - Default value is now.
+ priority:
+ type: str
+ description: ["The priority of the event."]
+ default: normal
+ choices: [normal, low]
+ host:
+ type: str
+ description:
+ - Host name to associate with the event.
+ - If not specified, it defaults to the remote system's hostname.
+ tags:
+ type: list
+ description: ["Comma separated list of tags to apply to the event."]
+ alert_type:
+ type: str
+ description: ["Type of alert."]
+ default: info
+ choices: ['error', 'warning', 'info', 'success']
+ aggregation_key:
+ type: str
+ description: ["An arbitrary string to use for aggregation."]
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+'''
+
+EXAMPLES = '''
+- name: Post an event with low priority
+ community.general.datadog_event:
+ title: Testing from ansible
+ text: Test
+ priority: low
+ api_key: 9775a026f1ca7d1c6c5af9d94d9595a4
+ app_key: j4JyCYfefWHhgFgiZUqRm63AXHNZQyPGBfJtAzmN
+
+- name: Post an event with several tags
+ community.general.datadog_event:
+ title: Testing from ansible
+ text: Test
+ api_key: 9775a026f1ca7d1c6c5af9d94d9595a4
+ app_key: j4JyCYfefWHhgFgiZUqRm63AXHNZQyPGBfJtAzmN
+ tags: 'aa,bb,#host:{{ inventory_hostname }}'
+'''
+
+import platform
+import traceback
+
+# Import Datadog
+DATADOG_IMP_ERR = None
+try:
+ from datadog import initialize, api
+ HAS_DATADOG = True
+except Exception:
+ DATADOG_IMP_ERR = traceback.format_exc()
+ HAS_DATADOG = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(required=True, no_log=True),
+ app_key=dict(required=True, no_log=True),
+ title=dict(required=True),
+ text=dict(required=True),
+ date_happened=dict(required=False, default=None, type='int'),
+ priority=dict(
+ required=False, default='normal', choices=['normal', 'low']
+ ),
+ host=dict(required=False, default=None),
+ tags=dict(required=False, default=None, type='list'),
+ alert_type=dict(
+ required=False, default='info',
+ choices=['error', 'warning', 'info', 'success']
+ ),
+ aggregation_key=dict(required=False, default=None),
+ validate_certs=dict(default=True, type='bool'),
+ )
+ )
+
+ # Prepare Datadog
+ if not HAS_DATADOG:
+ module.fail_json(msg=missing_required_lib('datadogpy'), exception=DATADOG_IMP_ERR)
+
+ options = {
+ 'api_key': module.params['api_key'],
+ 'app_key': module.params['app_key']
+ }
+
+ initialize(**options)
+
+ _post_event(module)
+
+
+def _post_event(module):
+ try:
+ if module.params['host'] is None:
+ module.params['host'] = platform.node().split('.')[0]
+ msg = api.Event.create(title=module.params['title'],
+ text=module.params['text'],
+ host=module.params['host'],
+ tags=module.params['tags'],
+ priority=module.params['priority'],
+ alert_type=module.params['alert_type'],
+ aggregation_key=module.params['aggregation_key'],
+ source_type_name='ansible')
+ if msg['status'] != 'ok':
+ module.fail_json(msg=msg)
+
+ module.exit_json(changed=True, msg=msg)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/datadog/datadog_monitor.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/datadog/datadog_monitor.py
new file mode 100644
index 00000000..f6020c2b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/datadog/datadog_monitor.py
@@ -0,0 +1,404 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Sebastian Kornehl <sebastian.kornehl@asideas.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: datadog_monitor
+short_description: Manages Datadog monitors
+description:
+ - Manages monitors within Datadog.
+ - Options as described on https://docs.datadoghq.com/api/.
+author: Sebastian Kornehl (@skornehl)
+requirements: [datadog]
+options:
+ api_key:
+ description:
+ - Your Datadog API key.
+ required: true
+ type: str
+ api_host:
+ description:
+ - The URL to the Datadog API. Default value is C(https://api.datadoghq.com).
+ - This value can also be set with the C(DATADOG_HOST) environment variable.
+ required: false
+ type: str
+ version_added: '0.2.0'
+ app_key:
+ description:
+ - Your Datadog app key.
+ required: true
+ type: str
+ state:
+ description:
+ - The designated state of the monitor.
+ required: true
+ choices: ['present', 'absent', 'mute', 'unmute']
+ type: str
+ tags:
+ description:
+ - A list of tags to associate with your monitor when creating or updating.
+ - This can help you categorize and filter monitors.
+ type: list
+ type:
+ description:
+ - The type of the monitor.
+ choices: ['metric alert', 'service check', 'event alert', 'process alert', 'log alert']
+ type: str
+ query:
+ description:
+ - The monitor query to notify on.
+ - Syntax varies depending on what type of monitor you are creating.
+ type: str
+ name:
+ description:
+ - The name of the alert.
+ required: true
+ type: str
+ notification_message:
+ description:
+ - A message to include with notifications for this monitor.
+ - Email notifications can be sent to specific users by using the same '@username' notation as events.
+ - Monitor message template variables can be accessed by using double square brackets, i.e '[[' and ']]'.
+ - C(message) alias is deprecated in community.general 0.2.0, since it is used internally by Ansible Core Engine.
+ type: str
+ aliases: [ 'message' ]
+ silenced:
+ type: dict
+ description:
+ - Dictionary of scopes to silence, with timestamps or None.
+ - Each scope will be muted until the given POSIX timestamp or forever if the value is None.
+ default: ""
+ notify_no_data:
+ description:
+ - Whether this monitor will notify when data stops reporting.
+ type: bool
+ default: 'no'
+ no_data_timeframe:
+ description:
+ - The number of minutes before a monitor will notify when data stops reporting.
+ - Must be at least 2x the monitor timeframe for metric alerts or 2 minutes for service checks.
+ - If not specified, it defaults to 2x timeframe for metric, 2 minutes for service.
+ type: str
+ timeout_h:
+ description:
+ - The number of hours of the monitor not reporting data before it will automatically resolve from a triggered state.
+ type: str
+ renotify_interval:
+ description:
+ - The number of minutes after the last notification before a monitor will re-notify on the current status.
+ - It will only re-notify if it is not resolved.
+ type: str
+ escalation_message:
+ description:
+ - A message to include with a re-notification. Supports the '@username' notification we allow elsewhere.
+ - Not applicable if I(renotify_interval=None).
+ type: str
+ notify_audit:
+ description:
+ - Whether tagged users will be notified on changes to this monitor.
+ type: bool
+ default: 'no'
+ thresholds:
+ type: dict
+ description:
+ - A dictionary of thresholds by status.
+ - Only available for service checks and metric alerts.
+ - Because each of them can have multiple thresholds, we do not define them directly in the query.
+ - "If not specified, it defaults to: C({'ok': 1, 'critical': 1, 'warning': 1})."
+ locked:
+ description:
+ - Whether changes to this monitor should be restricted to the creator or admins.
+ type: bool
+ default: 'no'
+ require_full_window:
+ description:
+ - Whether this monitor needs a full window of data before it gets evaluated.
+ - We highly recommend you set this to False for sparse metrics, otherwise some evaluations will be skipped.
+ type: bool
+ new_host_delay:
+ description:
+ - A positive integer representing the number of seconds to wait before evaluating the monitor for new hosts.
+ - This gives the host time to fully initialize.
+ type: str
+ evaluation_delay:
+ description:
+ - Time to delay evaluation (in seconds).
+ - Effective for sparse values.
+ type: str
+ id:
+ description:
+ - The ID of the alert.
+ - If set, will be used instead of the name to locate the alert.
+ type: str
+ include_tags:
+ description:
+ - Whether notifications from this monitor automatically inserts its triggering tags into the title.
+ type: bool
+ default: yes
+ version_added: 1.3.0
+'''
+
+EXAMPLES = '''
+- name: Create a metric monitor
+ community.general.datadog_monitor:
+ type: "metric alert"
+ name: "Test monitor"
+ state: "present"
+ query: "datadog.agent.up.over('host:host1').last(2).count_by_status()"
+ notification_message: "Host [[host.name]] with IP [[host.ip]] is failing to report to datadog."
+ api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
+ app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
+
+- name: Deletes a monitor
+ community.general.datadog_monitor:
+ name: "Test monitor"
+ state: "absent"
+ api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
+ app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
+
+- name: Mutes a monitor
+ community.general.datadog_monitor:
+ name: "Test monitor"
+ state: "mute"
+ silenced: '{"*":None}'
+ api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
+ app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
+
+- name: Unmutes a monitor
+ community.general.datadog_monitor:
+ name: "Test monitor"
+ state: "unmute"
+ api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
+ app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
+
+- name: Use datadoghq.eu platform instead of datadoghq.com
+ community.general.datadog_monitor:
+ name: "Test monitor"
+ state: "absent"
+ api_host: https://api.datadoghq.eu
+ api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
+ app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
+'''
+import traceback
+
+# Import Datadog
+DATADOG_IMP_ERR = None
+try:
+ from datadog import initialize, api
+ HAS_DATADOG = True
+except Exception:
+ DATADOG_IMP_ERR = traceback.format_exc()
+ HAS_DATADOG = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(required=True, no_log=True),
+ api_host=dict(required=False),
+ app_key=dict(required=True, no_log=True),
+ state=dict(required=True, choices=['present', 'absent', 'mute', 'unmute']),
+ type=dict(required=False, choices=['metric alert', 'service check', 'event alert', 'process alert', 'log alert']),
+ name=dict(required=True),
+ query=dict(required=False),
+ notification_message=dict(required=False, no_log=True, default=None, aliases=['message'],
+ deprecated_aliases=[dict(name='message', version='3.0.0',
+ collection_name='community.general')]), # was Ansible 2.14
+ silenced=dict(required=False, default=None, type='dict'),
+ notify_no_data=dict(required=False, default=False, type='bool'),
+ no_data_timeframe=dict(required=False, default=None),
+ timeout_h=dict(required=False, default=None),
+ renotify_interval=dict(required=False, default=None),
+ escalation_message=dict(required=False, default=None),
+ notify_audit=dict(required=False, default=False, type='bool'),
+ thresholds=dict(required=False, type='dict', default=None),
+ tags=dict(required=False, type='list', default=None),
+ locked=dict(required=False, default=False, type='bool'),
+ require_full_window=dict(required=False, default=None, type='bool'),
+ new_host_delay=dict(required=False, default=None),
+ evaluation_delay=dict(required=False, default=None),
+ id=dict(required=False),
+ include_tags=dict(required=False, default=True, type='bool'),
+ )
+ )
+
+ # Prepare Datadog
+ if not HAS_DATADOG:
+ module.fail_json(msg=missing_required_lib('datadogpy'), exception=DATADOG_IMP_ERR)
+
+ if 'message' in module.params:
+ module.fail_json(msg="'message' is reserved keyword, please change this parameter to 'notification_message'")
+
+ options = {
+ 'api_key': module.params['api_key'],
+ 'api_host': module.params['api_host'],
+ 'app_key': module.params['app_key']
+ }
+
+ initialize(**options)
+
+ # Check if api_key and app_key is correct or not
+ # if not, then fail here.
+ response = api.Monitor.get_all()
+ if isinstance(response, dict):
+ msg = response.get('errors', None)
+ if msg:
+ module.fail_json(msg="Failed to connect Datadog server using given app_key and api_key : {0}".format(msg[0]))
+
+ if module.params['state'] == 'present':
+ install_monitor(module)
+ elif module.params['state'] == 'absent':
+ delete_monitor(module)
+ elif module.params['state'] == 'mute':
+ mute_monitor(module)
+ elif module.params['state'] == 'unmute':
+ unmute_monitor(module)
+
+
+def _fix_template_vars(message):
+ if message:
+ return message.replace('[[', '{{').replace(']]', '}}')
+ return message
+
+
+def _get_monitor(module):
+ if module.params['id'] is not None:
+ monitor = api.Monitor.get(module.params['id'])
+ if 'errors' in monitor:
+ module.fail_json(msg="Failed to retrieve monitor with id %s, errors are %s" % (module.params['id'], str(monitor['errors'])))
+ return monitor
+ else:
+ monitors = api.Monitor.get_all()
+ for monitor in monitors:
+ if monitor['name'] == _fix_template_vars(module.params['name']):
+ return monitor
+ return {}
+
+
+def _post_monitor(module, options):
+ try:
+ kwargs = dict(type=module.params['type'], query=module.params['query'],
+ name=_fix_template_vars(module.params['name']),
+ message=_fix_template_vars(module.params['notification_message']),
+ escalation_message=_fix_template_vars(module.params['escalation_message']),
+ options=options)
+ if module.params['tags'] is not None:
+ kwargs['tags'] = module.params['tags']
+ msg = api.Monitor.create(**kwargs)
+ if 'errors' in msg:
+ module.fail_json(msg=str(msg['errors']))
+ else:
+ module.exit_json(changed=True, msg=msg)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+def _equal_dicts(a, b, ignore_keys):
+ ka = set(a).difference(ignore_keys)
+ kb = set(b).difference(ignore_keys)
+ return ka == kb and all(a[k] == b[k] for k in ka)
+
+
+def _update_monitor(module, monitor, options):
+ try:
+ kwargs = dict(id=monitor['id'], query=module.params['query'],
+ name=_fix_template_vars(module.params['name']),
+ message=_fix_template_vars(module.params['notification_message']),
+ escalation_message=_fix_template_vars(module.params['escalation_message']),
+ options=options)
+ if module.params['tags'] is not None:
+ kwargs['tags'] = module.params['tags']
+ msg = api.Monitor.update(**kwargs)
+
+ if 'errors' in msg:
+ module.fail_json(msg=str(msg['errors']))
+ elif _equal_dicts(msg, monitor, ['creator', 'overall_state', 'modified', 'matching_downtimes', 'overall_state_modified']):
+ module.exit_json(changed=False, msg=msg)
+ else:
+ module.exit_json(changed=True, msg=msg)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+def install_monitor(module):
+ options = {
+ "silenced": module.params['silenced'],
+ "notify_no_data": module.boolean(module.params['notify_no_data']),
+ "no_data_timeframe": module.params['no_data_timeframe'],
+ "timeout_h": module.params['timeout_h'],
+ "renotify_interval": module.params['renotify_interval'],
+ "escalation_message": module.params['escalation_message'],
+ "notify_audit": module.boolean(module.params['notify_audit']),
+ "locked": module.boolean(module.params['locked']),
+ "require_full_window": module.params['require_full_window'],
+ "new_host_delay": module.params['new_host_delay'],
+ "evaluation_delay": module.params['evaluation_delay'],
+ "include_tags": module.params['include_tags'],
+ }
+
+ if module.params['type'] == "service check":
+ options["thresholds"] = module.params['thresholds'] or {'ok': 1, 'critical': 1, 'warning': 1}
+ if module.params['type'] in ["metric alert", "log alert"] and module.params['thresholds'] is not None:
+ options["thresholds"] = module.params['thresholds']
+
+ monitor = _get_monitor(module)
+ if not monitor:
+ _post_monitor(module, options)
+ else:
+ _update_monitor(module, monitor, options)
+
+
+def delete_monitor(module):
+ monitor = _get_monitor(module)
+ if not monitor:
+ module.exit_json(changed=False)
+ try:
+ msg = api.Monitor.delete(monitor['id'])
+ module.exit_json(changed=True, msg=msg)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+def mute_monitor(module):
+ monitor = _get_monitor(module)
+ if not monitor:
+ module.fail_json(msg="Monitor %s not found!" % module.params['name'])
+ elif monitor['options']['silenced']:
+ module.fail_json(msg="Monitor is already muted. Datadog does not allow to modify muted alerts, consider unmuting it first.")
+ elif (module.params['silenced'] is not None and len(set(monitor['options']['silenced']) ^ set(module.params['silenced'])) == 0):
+ module.exit_json(changed=False)
+ try:
+ if module.params['silenced'] is None or module.params['silenced'] == "":
+ msg = api.Monitor.mute(id=monitor['id'])
+ else:
+ msg = api.Monitor.mute(id=monitor['id'], silenced=module.params['silenced'])
+ module.exit_json(changed=True, msg=msg)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+def unmute_monitor(module):
+ monitor = _get_monitor(module)
+ if not monitor:
+ module.fail_json(msg="Monitor %s not found!" % module.params['name'])
+ elif not monitor['options']['silenced']:
+ module.exit_json(changed=False)
+ try:
+ msg = api.Monitor.unmute(monitor['id'])
+ module.exit_json(changed=True, msg=msg)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/honeybadger_deployment.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/honeybadger_deployment.py
new file mode 100644
index 00000000..0b96af04
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/honeybadger_deployment.py
@@ -0,0 +1,128 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014 Benjamin Curtis <benjamin.curtis@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: honeybadger_deployment
+author: "Benjamin Curtis (@stympy)"
+short_description: Notify Honeybadger.io about app deployments
+description:
+ - Notify Honeybadger.io about app deployments (see http://docs.honeybadger.io/article/188-deployment-tracking)
+options:
+ token:
+ type: str
+ description:
+ - API token.
+ required: true
+ environment:
+ type: str
+ description:
+ - The environment name, typically 'production', 'staging', etc.
+ required: true
+ user:
+ type: str
+ description:
+ - The username of the person doing the deployment
+ repo:
+ type: str
+ description:
+ - URL of the project repository
+ revision:
+ type: str
+ description:
+ - A hash, number, tag, or other identifier showing what revision was deployed
+ url:
+ type: str
+ description:
+ - Optional URL to submit the notification to.
+ default: "https://api.honeybadger.io/v1/deploys"
+ validate_certs:
+ description:
+ - If C(no), SSL certificates for the target url will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+
+'''
+
+EXAMPLES = '''
+- name: Notify Honeybadger.io about an app deployment
+ community.general.honeybadger_deployment:
+ token: AAAAAA
+ environment: staging
+ user: ansible
+ revision: b6826b8
+ repo: 'git@github.com:user/repo.git'
+'''
+
+RETURN = '''# '''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import fetch_url
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(required=True, no_log=True),
+ environment=dict(required=True),
+ user=dict(required=False),
+ repo=dict(required=False),
+ revision=dict(required=False),
+ url=dict(required=False, default='https://api.honeybadger.io/v1/deploys'),
+ validate_certs=dict(default=True, type='bool'),
+ ),
+ supports_check_mode=True
+ )
+
+ params = {}
+
+ if module.params["environment"]:
+ params["deploy[environment]"] = module.params["environment"]
+
+ if module.params["user"]:
+ params["deploy[local_username]"] = module.params["user"]
+
+ if module.params["repo"]:
+ params["deploy[repository]"] = module.params["repo"]
+
+ if module.params["revision"]:
+ params["deploy[revision]"] = module.params["revision"]
+
+ params["api_key"] = module.params["token"]
+
+ url = module.params.get('url')
+
+ # If we're in check mode, just exit pretending like we succeeded
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ try:
+ data = urlencode(params)
+ response, info = fetch_url(module, url, data=data)
+ except Exception as e:
+ module.fail_json(msg='Unable to notify Honeybadger: %s' % to_native(e), exception=traceback.format_exc())
+ else:
+ if info['status'] == 201:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/icinga2_feature.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/icinga2_feature.py
new file mode 100644
index 00000000..b59c0e11
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/icinga2_feature.py
@@ -0,0 +1,126 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Loic Blot <loic.blot@unix-experience.fr>
+# Copyright (c) 2018, Ansible Project
+# Sponsored by Infopro Digital. http://www.infopro-digital.com/
+# Sponsored by E.T.A.I. http://www.etai.fr/
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: icinga2_feature
+
+short_description: Manage Icinga2 feature
+description:
+ - This module can be used to enable or disable an Icinga2 feature.
+author: "Loic Blot (@nerzhul)"
+options:
+ name:
+ type: str
+ description:
+ - This is the feature name to enable or disable.
+ required: True
+ state:
+ type: str
+ description:
+ - If set to C(present) and feature is disabled, then feature is enabled.
+ - If set to C(present) and feature is already enabled, then nothing is changed.
+ - If set to C(absent) and feature is enabled, then feature is disabled.
+ - If set to C(absent) and feature is already disabled, then nothing is changed.
+ choices: [ "present", "absent" ]
+ default: present
+'''
+
+EXAMPLES = '''
+- name: Enable ido-pgsql feature
+ community.general.icinga2_feature:
+ name: ido-pgsql
+ state: present
+
+- name: Disable api feature
+ community.general.icinga2_feature:
+ name: api
+ state: absent
+'''
+
+RETURN = '''
+#
+'''
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Icinga2FeatureHelper:
+ def __init__(self, module):
+ self.module = module
+ self._icinga2 = module.get_bin_path('icinga2', True)
+ self.feature_name = self.module.params['name']
+ self.state = self.module.params['state']
+
+ def _exec(self, args):
+ cmd = [self._icinga2, 'feature']
+ rc, out, err = self.module.run_command(cmd + args, check_rc=True)
+ return rc, out
+
+ def manage(self):
+ rc, out = self._exec(["list"])
+ if rc != 0:
+ self.module.fail_json(msg="Unable to list icinga2 features. "
+ "Ensure icinga2 is installed and present in binary path.")
+
+ # If feature is already in good state, just exit
+ if (re.search("Disabled features:.* %s[ \n]" % self.feature_name, out) and self.state == "absent") or \
+ (re.search("Enabled features:.* %s[ \n]" % self.feature_name, out) and self.state == "present"):
+ self.module.exit_json(changed=False)
+
+ if self.module.check_mode:
+ self.module.exit_json(changed=True)
+
+ feature_enable_str = "enable" if self.state == "present" else "disable"
+
+ rc, out = self._exec([feature_enable_str, self.feature_name])
+
+ change_applied = False
+ if self.state == "present":
+ if rc != 0:
+ self.module.fail_json(msg="Failed to %s feature %s."
+ " icinga2 command returned %s" % (feature_enable_str,
+ self.feature_name,
+ out))
+
+ if re.search("already enabled", out) is None:
+ change_applied = True
+ else:
+ if rc == 0:
+ change_applied = True
+ # RC is not 0 for this already disabled feature, handle it as no change applied
+ elif re.search("Cannot disable feature '%s'. Target file .* does not exist" % self.feature_name, out):
+ change_applied = False
+ else:
+ self.module.fail_json(msg="Failed to disable feature. Command returns %s" % out)
+
+ self.module.exit_json(changed=change_applied)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', choices=["present", "absent"], default="present")
+ ),
+ supports_check_mode=True
+ )
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+ Icinga2FeatureHelper(module).manage()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/icinga2_host.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/icinga2_host.py
new file mode 100644
index 00000000..65c95812
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/icinga2_host.py
@@ -0,0 +1,331 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# This module is proudly sponsored by CGI (www.cgi.com) and
+# KPN (www.kpn.com).
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: icinga2_host
+short_description: Manage a host in Icinga2
+description:
+ - "Add or remove a host to Icinga2 through the API."
+ - "See U(https://www.icinga.com/docs/icinga2/latest/doc/12-icinga2-api/)"
+author: "Jurgen Brand (@t794104)"
+options:
+ url:
+ type: str
+ description:
+ - HTTP, HTTPS, or FTP URL in the form (http|https|ftp)://[user[:pass]]@host.domain[:port]/path
+ use_proxy:
+ description:
+ - If C(no), it will not use a proxy, even if one is defined in
+ an environment variable on the target hosts.
+ type: bool
+ default: 'yes'
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+ url_username:
+ type: str
+ description:
+ - The username for use in HTTP basic authentication.
+ - This parameter can be used without C(url_password) for sites that allow empty passwords.
+ url_password:
+ type: str
+ description:
+ - The password for use in HTTP basic authentication.
+ - If the C(url_username) parameter is not specified, the C(url_password) parameter will not be used.
+ force_basic_auth:
+ description:
+ - httplib2, the library used by the uri module only sends authentication information when a webservice
+ responds to an initial request with a 401 status. Since some basic auth services do not properly
+ send a 401, logins will fail. This option forces the sending of the Basic authentication header
+ upon initial request.
+ type: bool
+ default: 'no'
+ client_cert:
+ type: path
+ description:
+ - PEM formatted certificate chain file to be used for SSL client
+ authentication. This file can also include the key as well, and if
+ the key is included, C(client_key) is not required.
+ client_key:
+ type: path
+ description:
+ - PEM formatted file that contains your private key to be used for SSL
+ client authentication. If C(client_cert) contains both the certificate
+ and key, this option is not required.
+ state:
+ type: str
+ description:
+ - Apply feature state.
+ choices: [ "present", "absent" ]
+ default: present
+ name:
+ type: str
+ description:
+ - Name used to create / delete the host. This does not need to be the FQDN, but does needs to be unique.
+ required: true
+ zone:
+ type: str
+ description:
+ - The zone from where this host should be polled.
+ template:
+ type: str
+ description:
+ - The template used to define the host.
+ - Template cannot be modified after object creation.
+ check_command:
+ type: str
+ description:
+ - The command used to check if the host is alive.
+ default: "hostalive"
+ display_name:
+ type: str
+ description:
+ - The name used to display the host.
+ - If not specified, it defaults to the value of the I(name) parameter.
+ ip:
+ type: str
+ description:
+ - The IP address of the host.
+ required: true
+ variables:
+ type: dict
+ description:
+ - Dictionary of variables.
+extends_documentation_fragment:
+ - url
+'''
+
+EXAMPLES = '''
+- name: Add host to icinga
+ community.general.icinga2_host:
+ url: "https://icinga2.example.com"
+ url_username: "ansible"
+ url_password: "a_secret"
+ state: present
+ name: "{{ ansible_fqdn }}"
+ ip: "{{ ansible_default_ipv4.address }}"
+ variables:
+ foo: "bar"
+ delegate_to: 127.0.0.1
+'''
+
+RETURN = '''
+name:
+ description: The name used to create, modify or delete the host
+ type: str
+ returned: always
+data:
+ description: The data structure used for create, modify or delete of the host
+ type: dict
+ returned: always
+'''
+
+import json
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url, url_argument_spec
+
+
+# ===========================================
+# Icinga2 API class
+#
+class icinga2_api:
+ module = None
+
+ def __init__(self, module):
+ self.module = module
+
+ def call_url(self, path, data='', method='GET'):
+ headers = {
+ 'Accept': 'application/json',
+ 'X-HTTP-Method-Override': method,
+ }
+ url = self.module.params.get("url") + "/" + path
+ rsp, info = fetch_url(module=self.module, url=url, data=data, headers=headers, method=method, use_proxy=self.module.params['use_proxy'])
+ body = ''
+ if rsp:
+ body = json.loads(rsp.read())
+ if info['status'] >= 400:
+ body = info['body']
+ return {'code': info['status'], 'data': body}
+
+ def check_connection(self):
+ ret = self.call_url('v1/status')
+ if ret['code'] == 200:
+ return True
+ return False
+
+ def exists(self, hostname):
+ data = {
+ "filter": "match(\"" + hostname + "\", host.name)",
+ }
+ ret = self.call_url(
+ path="v1/objects/hosts",
+ data=self.module.jsonify(data)
+ )
+ if ret['code'] == 200:
+ if len(ret['data']['results']) == 1:
+ return True
+ return False
+
+ def create(self, hostname, data):
+ ret = self.call_url(
+ path="v1/objects/hosts/" + hostname,
+ data=self.module.jsonify(data),
+ method="PUT"
+ )
+ return ret
+
+ def delete(self, hostname):
+ data = {"cascade": 1}
+ ret = self.call_url(
+ path="v1/objects/hosts/" + hostname,
+ data=self.module.jsonify(data),
+ method="DELETE"
+ )
+ return ret
+
+ def modify(self, hostname, data):
+ ret = self.call_url(
+ path="v1/objects/hosts/" + hostname,
+ data=self.module.jsonify(data),
+ method="POST"
+ )
+ return ret
+
+ def diff(self, hostname, data):
+ ret = self.call_url(
+ path="v1/objects/hosts/" + hostname,
+ method="GET"
+ )
+ changed = False
+ ic_data = ret['data']['results'][0]
+ for key in data['attrs']:
+ if key not in ic_data['attrs'].keys():
+ changed = True
+ elif data['attrs'][key] != ic_data['attrs'][key]:
+ changed = True
+ return changed
+
+
+# ===========================================
+# Module execution.
+#
+def main():
+ # use the predefined argument spec for url
+ argument_spec = url_argument_spec()
+ # add our own arguments
+ argument_spec.update(
+ state=dict(default="present", choices=["absent", "present"]),
+ name=dict(required=True, aliases=['host']),
+ zone=dict(),
+ template=dict(default=None),
+ check_command=dict(default="hostalive"),
+ display_name=dict(default=None),
+ ip=dict(required=True),
+ variables=dict(type='dict', default=None),
+ )
+
+ # Define the main module
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ state = module.params["state"]
+ name = module.params["name"]
+ zone = module.params["zone"]
+ template = [name]
+ if module.params["template"]:
+ template.append(module.params["template"])
+ check_command = module.params["check_command"]
+ ip = module.params["ip"]
+ display_name = module.params["display_name"]
+ if not display_name:
+ display_name = name
+ variables = module.params["variables"]
+
+ try:
+ icinga = icinga2_api(module=module)
+ icinga.check_connection()
+ except Exception as e:
+ module.fail_json(msg="unable to connect to Icinga. Exception message: %s" % (e))
+
+ data = {
+ 'attrs': {
+ 'address': ip,
+ 'display_name': display_name,
+ 'check_command': check_command,
+ 'zone': zone,
+ 'vars': {
+ 'made_by': "ansible",
+ },
+ 'templates': template,
+ }
+ }
+
+ if variables:
+ data['attrs']['vars'].update(variables)
+
+ changed = False
+ if icinga.exists(name):
+ if state == "absent":
+ if module.check_mode:
+ module.exit_json(changed=True, name=name, data=data)
+ else:
+ try:
+ ret = icinga.delete(name)
+ if ret['code'] == 200:
+ changed = True
+ else:
+ module.fail_json(msg="bad return code (%s) deleting host: '%s'" % (ret['code'], ret['data']))
+ except Exception as e:
+ module.fail_json(msg="exception deleting host: " + str(e))
+
+ elif icinga.diff(name, data):
+ if module.check_mode:
+ module.exit_json(changed=False, name=name, data=data)
+
+ # Template attribute is not allowed in modification
+ del data['attrs']['templates']
+
+ ret = icinga.modify(name, data)
+
+ if ret['code'] == 200:
+ changed = True
+ else:
+ module.fail_json(msg="bad return code (%s) modifying host: '%s'" % (ret['code'], ret['data']))
+
+ else:
+ if state == "present":
+ if module.check_mode:
+ changed = True
+ else:
+ try:
+ ret = icinga.create(name, data)
+ if ret['code'] == 200:
+ changed = True
+ else:
+ module.fail_json(msg="bad return code (%s) creating host: '%s'" % (ret['code'], ret['data']))
+ except Exception as e:
+ module.fail_json(msg="exception creating host: " + str(e))
+
+ module.exit_json(changed=changed, name=name, data=data)
+
+
+# import module snippets
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/librato_annotation.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/librato_annotation.py
new file mode 100644
index 00000000..d0fd406d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/librato_annotation.py
@@ -0,0 +1,166 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (C) Seth Edwards, 2014
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: librato_annotation
+short_description: create an annotation in librato
+description:
+ - Create an annotation event on the given annotation stream :name. If the annotation stream does not exist, it will be created automatically
+author: "Seth Edwards (@Sedward)"
+requirements: []
+options:
+ user:
+ type: str
+ description:
+ - Librato account username
+ required: true
+ api_key:
+ type: str
+ description:
+ - Librato account api key
+ required: true
+ name:
+ type: str
+ description:
+ - The annotation stream name
+ - If the annotation stream does not exist, it will be created automatically
+ required: false
+ title:
+ type: str
+ description:
+ - The title of an annotation is a string and may contain spaces
+ - The title should be a short, high-level summary of the annotation e.g. v45 Deployment
+ required: true
+ source:
+ type: str
+ description:
+ - A string which describes the originating source of an annotation when that annotation is tracked across multiple members of a population
+ required: false
+ description:
+ type: str
+ description:
+ - The description contains extra metadata about a particular annotation
+ - The description should contain specifics on the individual annotation e.g. Deployed 9b562b2 shipped new feature foo!
+ required: false
+ start_time:
+ type: int
+ description:
+ - The unix timestamp indicating the time at which the event referenced by this annotation started
+ required: false
+ end_time:
+ type: int
+ description:
+ - The unix timestamp indicating the time at which the event referenced by this annotation ended
+ - For events that have a duration, this is a useful way to annotate the duration of the event
+ required: false
+ links:
+ type: list
+ description:
+ - See examples
+'''
+
+EXAMPLES = '''
+- name: Create a simple annotation event with a source
+ community.general.librato_annotation:
+ user: user@example.com
+ api_key: XXXXXXXXXXXXXXXXX
+ title: App Config Change
+ source: foo.bar
+ description: This is a detailed description of the config change
+
+- name: Create an annotation that includes a link
+ community.general.librato_annotation:
+ user: user@example.com
+ api_key: XXXXXXXXXXXXXXXXXX
+ name: code.deploy
+ title: app code deploy
+ description: this is a detailed description of a deployment
+ links:
+ - rel: example
+ href: http://www.example.com/deploy
+
+- name: Create an annotation with a start_time and end_time
+ community.general.librato_annotation:
+ user: user@example.com
+ api_key: XXXXXXXXXXXXXXXXXX
+ name: maintenance
+ title: Maintenance window
+ description: This is a detailed description of maintenance
+ start_time: 1395940006
+ end_time: 1395954406
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def post_annotation(module):
+ user = module.params['user']
+ api_key = module.params['api_key']
+ name = module.params['name']
+ title = module.params['title']
+
+ url = 'https://metrics-api.librato.com/v1/annotations/%s' % name
+ params = {}
+ params['title'] = title
+
+ if module.params['source'] is not None:
+ params['source'] = module.params['source']
+ if module.params['description'] is not None:
+ params['description'] = module.params['description']
+ if module.params['start_time'] is not None:
+ params['start_time'] = module.params['start_time']
+ if module.params['end_time'] is not None:
+ params['end_time'] = module.params['end_time']
+ if module.params['links'] is not None:
+ params['links'] = module.params['links']
+
+ json_body = module.jsonify(params)
+
+ headers = {}
+ headers['Content-Type'] = 'application/json'
+
+ # Hack send parameters the way fetch_url wants them
+ module.params['url_username'] = user
+ module.params['url_password'] = api_key
+ response, info = fetch_url(module, url, data=json_body, headers=headers)
+ response_code = str(info['status'])
+ response_body = info['body']
+ if info['status'] != 201:
+ if info['status'] >= 400:
+ module.fail_json(msg="Request Failed. Response code: " + response_code + " Response body: " + response_body)
+ else:
+ module.fail_json(msg="Request Failed. Response code: " + response_code)
+ response = response.read()
+ module.exit_json(changed=True, annotation=response)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ user=dict(required=True),
+ api_key=dict(required=True, no_log=True),
+ name=dict(required=False),
+ title=dict(required=True),
+ source=dict(required=False),
+ description=dict(required=False),
+ start_time=dict(required=False, default=None, type='int'),
+ end_time=dict(required=False, default=None, type='int'),
+ links=dict(type='list')
+ )
+ )
+
+ post_annotation(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/logentries.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/logentries.py
new file mode 100644
index 00000000..8f39fb51
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/logentries.py
@@ -0,0 +1,156 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Ivan Vanderbyl <ivan@app.io>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: logentries
+author: "Ivan Vanderbyl (@ivanvanderbyl)"
+short_description: Module for tracking logs via logentries.com
+description:
+ - Sends logs to LogEntries in realtime
+options:
+ path:
+ type: str
+ description:
+ - path to a log file
+ required: true
+ state:
+ type: str
+ description:
+ - following state of the log
+ choices: [ 'present', 'absent', 'followed', 'unfollowed' ]
+ required: false
+ default: present
+ name:
+ type: str
+ description:
+ - name of the log
+ required: false
+ logtype:
+ type: str
+ description:
+ - type of the log
+ required: false
+ aliases: [type]
+
+notes:
+ - Requires the LogEntries agent which can be installed following the instructions at logentries.com
+'''
+EXAMPLES = '''
+- name: Track nginx logs
+ community.general.logentries:
+ path: /var/log/nginx/access.log
+ state: present
+ name: nginx-access-log
+
+- name: Stop tracking nginx logs
+ community.general.logentries:
+ path: /var/log/nginx/error.log
+ state: absent
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def query_log_status(module, le_path, path, state="present"):
+ """ Returns whether a log is followed or not. """
+
+ if state == "present":
+ rc, out, err = module.run_command("%s followed %s" % (le_path, path))
+ if rc == 0:
+ return True
+
+ return False
+
+
+def follow_log(module, le_path, logs, name=None, logtype=None):
+ """ Follows one or more logs if not already followed. """
+
+ followed_count = 0
+
+ for log in logs:
+ if query_log_status(module, le_path, log):
+ continue
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ cmd = [le_path, 'follow', log]
+ if name:
+ cmd.extend(['--name', name])
+ if logtype:
+ cmd.extend(['--type', logtype])
+ rc, out, err = module.run_command(' '.join(cmd))
+
+ if not query_log_status(module, le_path, log):
+ module.fail_json(msg="failed to follow '%s': %s" % (log, err.strip()))
+
+ followed_count += 1
+
+ if followed_count > 0:
+ module.exit_json(changed=True, msg="followed %d log(s)" % (followed_count,))
+
+ module.exit_json(changed=False, msg="logs(s) already followed")
+
+
+def unfollow_log(module, le_path, logs):
+ """ Unfollows one or more logs if followed. """
+
+ removed_count = 0
+
+ # Using a for loop in case of error, we can report the package that failed
+ for log in logs:
+ # Query the log first, to see if we even need to remove.
+ if not query_log_status(module, le_path, log):
+ continue
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+ rc, out, err = module.run_command([le_path, 'rm', log])
+
+ if query_log_status(module, le_path, log):
+ module.fail_json(msg="failed to remove '%s': %s" % (log, err.strip()))
+
+ removed_count += 1
+
+ if removed_count > 0:
+ module.exit_json(changed=True, msg="removed %d package(s)" % removed_count)
+
+ module.exit_json(changed=False, msg="logs(s) already unfollowed")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(required=True),
+ state=dict(default="present", choices=["present", "followed", "absent", "unfollowed"]),
+ name=dict(required=False, default=None, type='str'),
+ logtype=dict(required=False, default=None, type='str', aliases=['type'])
+ ),
+ supports_check_mode=True
+ )
+
+ le_path = module.get_bin_path('le', True, ['/usr/local/bin'])
+
+ p = module.params
+
+ # Handle multiple log files
+ logs = p["path"].split(",")
+ logs = filter(None, logs)
+
+ if p["state"] in ["present", "followed"]:
+ follow_log(module, le_path, logs, name=p['name'], logtype=p['logtype'])
+
+ elif p["state"] in ["absent", "unfollowed"]:
+ unfollow_log(module, le_path, logs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/logstash_plugin.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/logstash_plugin.py
new file mode 100644
index 00000000..4a45c04a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/logstash_plugin.py
@@ -0,0 +1,172 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# (c) 2017, Loic Blot <loic.blot@unix-experience.fr>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: logstash_plugin
+short_description: Manage Logstash plugins
+description:
+ - Manages Logstash plugins.
+author: Loic Blot (@nerzhul)
+options:
+ name:
+ type: str
+ description:
+ - Install plugin with that name.
+ required: True
+ state:
+ type: str
+ description:
+ - Apply plugin state.
+ choices: ["present", "absent"]
+ default: present
+ plugin_bin:
+ type: path
+ description:
+ - Specify logstash-plugin to use for plugin management.
+ default: /usr/share/logstash/bin/logstash-plugin
+ proxy_host:
+ type: str
+ description:
+ - Proxy host to use during plugin installation.
+ proxy_port:
+ type: str
+ description:
+ - Proxy port to use during plugin installation.
+ version:
+ type: str
+ description:
+ - Specify plugin Version of the plugin to install.
+ If plugin exists with previous version, it will NOT be updated.
+'''
+
+EXAMPLES = '''
+- name: Install Logstash beats input plugin
+ community.general.logstash_plugin:
+ state: present
+ name: logstash-input-beats
+
+- name: Install specific version of a plugin
+ community.general.logstash_plugin:
+ state: present
+ name: logstash-input-syslog
+ version: '3.2.0'
+
+- name: Uninstall Logstash plugin
+ community.general.logstash_plugin:
+ state: absent
+ name: logstash-filter-multiline
+
+- name: Install Logstash plugin with alternate heap size
+ community.general.logstash_plugin:
+ state: present
+ name: logstash-input-beats
+ environment:
+ LS_JAVA_OPTS: "-Xms256m -Xmx256m"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+PACKAGE_STATE_MAP = dict(
+ present="install",
+ absent="remove"
+)
+
+
+def is_plugin_present(module, plugin_bin, plugin_name):
+ cmd_args = [plugin_bin, "list", plugin_name]
+ rc, out, err = module.run_command(" ".join(cmd_args))
+ return rc == 0
+
+
+def parse_error(string):
+ reason = "reason: "
+ try:
+ return string[string.index(reason) + len(reason):].strip()
+ except ValueError:
+ return string
+
+
+def install_plugin(module, plugin_bin, plugin_name, version, proxy_host, proxy_port):
+ cmd_args = [plugin_bin, PACKAGE_STATE_MAP["present"], plugin_name]
+
+ if version:
+ cmd_args.append("--version %s" % version)
+
+ if proxy_host and proxy_port:
+ cmd_args.append("-DproxyHost=%s -DproxyPort=%s" % (proxy_host, proxy_port))
+
+ cmd = " ".join(cmd_args)
+
+ if module.check_mode:
+ rc, out, err = 0, "check mode", ""
+ else:
+ rc, out, err = module.run_command(cmd)
+
+ if rc != 0:
+ reason = parse_error(out)
+ module.fail_json(msg=reason)
+
+ return True, cmd, out, err
+
+
+def remove_plugin(module, plugin_bin, plugin_name):
+ cmd_args = [plugin_bin, PACKAGE_STATE_MAP["absent"], plugin_name]
+
+ cmd = " ".join(cmd_args)
+
+ if module.check_mode:
+ rc, out, err = 0, "check mode", ""
+ else:
+ rc, out, err = module.run_command(cmd)
+
+ if rc != 0:
+ reason = parse_error(out)
+ module.fail_json(msg=reason)
+
+ return True, cmd, out, err
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(default="present", choices=PACKAGE_STATE_MAP.keys()),
+ plugin_bin=dict(default="/usr/share/logstash/bin/logstash-plugin", type="path"),
+ proxy_host=dict(default=None),
+ proxy_port=dict(default=None),
+ version=dict(default=None)
+ ),
+ supports_check_mode=True
+ )
+
+ name = module.params["name"]
+ state = module.params["state"]
+ plugin_bin = module.params["plugin_bin"]
+ proxy_host = module.params["proxy_host"]
+ proxy_port = module.params["proxy_port"]
+ version = module.params["version"]
+
+ present = is_plugin_present(module, plugin_bin, name)
+
+ # skip if the state is correct
+ if (present and state == "present") or (state == "absent" and not present):
+ module.exit_json(changed=False, name=name, state=state)
+
+ if state == "present":
+ changed, cmd, out, err = install_plugin(module, plugin_bin, name, version, proxy_host, proxy_port)
+ elif state == "absent":
+ changed, cmd, out, err = remove_plugin(module, plugin_bin, name)
+
+ module.exit_json(changed=changed, cmd=cmd, name=name, state=state, stdout=out, stderr=err)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/monit.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/monit.py
new file mode 100644
index 00000000..1dfe76d6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/monit.py
@@ -0,0 +1,340 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Darryl Stoflet <stoflet@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: monit
+short_description: Manage the state of a program monitored via Monit
+description:
+ - Manage the state of a program monitored via I(Monit).
+options:
+ name:
+ description:
+ - The name of the I(monit) program/process to manage.
+ required: true
+ type: str
+ state:
+ description:
+ - The state of service.
+ required: true
+ choices: [ "present", "started", "stopped", "restarted", "monitored", "unmonitored", "reloaded" ]
+ type: str
+ timeout:
+ description:
+ - If there are pending actions for the service monitored by monit, then Ansible will check
+ for up to this many seconds to verify the requested action has been performed.
+ Ansible will sleep for five seconds between each check.
+ default: 300
+ type: int
+author:
+ - Darryl Stoflet (@dstoflet)
+ - Simon Kelly (@snopoke)
+'''
+
+EXAMPLES = '''
+- name: Manage the state of program httpd to be in started state
+ community.general.monit:
+ name: httpd
+ state: started
+'''
+
+import time
+import re
+
+from collections import namedtuple
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import python_2_unicode_compatible
+
+
+STATE_COMMAND_MAP = {
+ 'stopped': 'stop',
+ 'started': 'start',
+ 'monitored': 'monitor',
+ 'unmonitored': 'unmonitor',
+ 'restarted': 'restart'
+}
+
+MONIT_SERVICES = ['Process', 'File', 'Fifo', 'Filesystem', 'Directory', 'Remote host', 'System', 'Program',
+ 'Network']
+
+
+@python_2_unicode_compatible
+class StatusValue(namedtuple("Status", "value, is_pending")):
+ MISSING = 'missing'
+ OK = 'ok'
+ NOT_MONITORED = 'not_monitored'
+ INITIALIZING = 'initializing'
+ DOES_NOT_EXIST = 'does_not_exist'
+ EXECUTION_FAILED = 'execution_failed'
+ ALL_STATUS = [
+ MISSING, OK, NOT_MONITORED, INITIALIZING, DOES_NOT_EXIST, EXECUTION_FAILED
+ ]
+
+ def __new__(cls, value, is_pending=False):
+ return super(StatusValue, cls).__new__(cls, value, is_pending)
+
+ def pending(self):
+ return StatusValue(self.value, True)
+
+ def __getattr__(self, item):
+ if item in ('is_%s' % status for status in self.ALL_STATUS):
+ return self.value == getattr(self, item[3:].upper())
+ raise AttributeError(item)
+
+ def __str__(self):
+ return "%s%s" % (self.value, " (pending)" if self.is_pending else "")
+
+
+class Status(object):
+ MISSING = StatusValue(StatusValue.MISSING)
+ OK = StatusValue(StatusValue.OK)
+ RUNNING = StatusValue(StatusValue.OK)
+ NOT_MONITORED = StatusValue(StatusValue.NOT_MONITORED)
+ INITIALIZING = StatusValue(StatusValue.INITIALIZING)
+ DOES_NOT_EXIST = StatusValue(StatusValue.DOES_NOT_EXIST)
+ EXECUTION_FAILED = StatusValue(StatusValue.EXECUTION_FAILED)
+
+
+class Monit(object):
+ def __init__(self, module, monit_bin_path, service_name, timeout):
+ self.module = module
+ self.monit_bin_path = monit_bin_path
+ self.process_name = service_name
+ self.timeout = timeout
+
+ self._monit_version = None
+ self._raw_version = None
+ self._status_change_retry_count = 6
+
+ def monit_version(self):
+ if self._monit_version is None:
+ self._raw_version, version = self._get_monit_version()
+ # Use only major and minor even if there are more these should be enough
+ self._monit_version = version[0], version[1]
+ return self._monit_version
+
+ def _get_monit_version(self):
+ rc, out, err = self.module.run_command('%s -V' % self.monit_bin_path, check_rc=True)
+ version_line = out.split('\n')[0]
+ raw_version = re.search(r"([0-9]+\.){1,2}([0-9]+)?", version_line).group()
+ return raw_version, tuple(map(int, raw_version.split('.')))
+
+ def exit_fail(self, msg, status=None, **kwargs):
+ kwargs.update({
+ 'msg': msg,
+ 'monit_version': self._raw_version,
+ 'process_status': str(status) if status else None,
+ })
+ self.module.fail_json(**kwargs)
+
+ def exit_success(self, state):
+ self.module.exit_json(changed=True, name=self.process_name, state=state)
+
+ @property
+ def command_args(self):
+ return "-B" if self.monit_version() > (5, 18) else ""
+
+ def get_status(self, validate=False):
+ """Return the status of the process in monit.
+
+ :@param validate: Force monit to re-check the status of the process
+ """
+ monit_command = "validate" if validate else "status"
+ check_rc = False if validate else True # 'validate' always has rc = 1
+ command = ' '.join([self.monit_bin_path, monit_command, self.command_args, self.process_name])
+ rc, out, err = self.module.run_command(command, check_rc=check_rc)
+ return self._parse_status(out, err)
+
+ def _parse_status(self, output, err):
+ escaped_monit_services = '|'.join([re.escape(x) for x in MONIT_SERVICES])
+ pattern = "(%s) '%s'" % (escaped_monit_services, re.escape(self.process_name))
+ if not re.search(pattern, output, re.IGNORECASE):
+ return Status.MISSING
+
+ status_val = re.findall(r"^\s*status\s*([\w\- ]+)", output, re.MULTILINE)
+ if not status_val:
+ self.exit_fail("Unable to find process status", stdout=output, stderr=err)
+
+ status_val = status_val[0].strip().upper()
+ if ' | ' in status_val:
+ status_val = status_val.split(' | ')[0]
+ if ' - ' not in status_val:
+ status_val = status_val.replace(' ', '_')
+ return getattr(Status, status_val)
+ else:
+ status_val, substatus = status_val.split(' - ')
+ action, state = substatus.split()
+ if action in ['START', 'INITIALIZING', 'RESTART', 'MONITOR']:
+ status = Status.OK
+ else:
+ status = Status.NOT_MONITORED
+
+ if state == 'pending':
+ status = status.pending()
+ return status
+
+ def is_process_present(self):
+ rc, out, err = self.module.run_command('%s summary %s' % (self.monit_bin_path, self.command_args), check_rc=True)
+ return bool(re.findall(r'\b%s\b' % self.process_name, out))
+
+ def is_process_running(self):
+ return self.get_status().is_ok
+
+ def run_command(self, command):
+ """Runs a monit command, and returns the new status."""
+ return self.module.run_command('%s %s %s' % (self.monit_bin_path, command, self.process_name), check_rc=True)
+
+ def wait_for_status_change(self, current_status):
+ running_status = self.get_status()
+ if running_status.value != current_status.value or current_status.value == StatusValue.EXECUTION_FAILED:
+ return running_status
+
+ loop_count = 0
+ while running_status.value == current_status.value:
+ if loop_count >= self._status_change_retry_count:
+ self.exit_fail('waited too long for monit to change state', running_status)
+
+ loop_count += 1
+ time.sleep(0.5)
+ validate = loop_count % 2 == 0 # force recheck of status every second try
+ running_status = self.get_status(validate)
+ return running_status
+
+ def wait_for_monit_to_stop_pending(self, current_status=None):
+ """Fails this run if there is no status or it's pending/initializing for timeout"""
+ timeout_time = time.time() + self.timeout
+
+ if not current_status:
+ current_status = self.get_status()
+ waiting_status = [
+ StatusValue.MISSING,
+ StatusValue.INITIALIZING,
+ StatusValue.DOES_NOT_EXIST,
+ ]
+ while current_status.is_pending or (current_status.value in waiting_status):
+ if time.time() >= timeout_time:
+ self.exit_fail('waited too long for "pending", or "initiating" status to go away', current_status)
+
+ time.sleep(5)
+ current_status = self.get_status(validate=True)
+ return current_status
+
+ def reload(self):
+ rc, out, err = self.module.run_command('%s reload' % self.monit_bin_path)
+ if rc != 0:
+ self.exit_fail('monit reload failed', stdout=out, stderr=err)
+ self.exit_success(state='reloaded')
+
+ def present(self):
+ self.run_command('reload')
+
+ timeout_time = time.time() + self.timeout
+ while not self.is_process_present():
+ if time.time() >= timeout_time:
+ self.exit_fail('waited too long for process to become "present"')
+
+ time.sleep(5)
+
+ self.exit_success(state='present')
+
+ def change_state(self, state, expected_status, invert_expected=None):
+ current_status = self.get_status()
+ self.run_command(STATE_COMMAND_MAP[state])
+ status = self.wait_for_status_change(current_status)
+ status = self.wait_for_monit_to_stop_pending(status)
+ status_match = status.value == expected_status.value
+ if invert_expected:
+ status_match = not status_match
+ if status_match:
+ self.exit_success(state=state)
+ self.exit_fail('%s process not %s' % (self.process_name, state), status)
+
+ def stop(self):
+ self.change_state('stopped', Status.NOT_MONITORED)
+
+ def unmonitor(self):
+ self.change_state('unmonitored', Status.NOT_MONITORED)
+
+ def restart(self):
+ self.change_state('restarted', Status.OK)
+
+ def start(self):
+ self.change_state('started', Status.OK)
+
+ def monitor(self):
+ self.change_state('monitored', Status.NOT_MONITORED, invert_expected=True)
+
+
+def main():
+ arg_spec = dict(
+ name=dict(required=True),
+ timeout=dict(default=300, type='int'),
+ state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped', 'monitored', 'unmonitored', 'reloaded'])
+ )
+
+ module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
+
+ name = module.params['name']
+ state = module.params['state']
+ timeout = module.params['timeout']
+
+ monit = Monit(module, module.get_bin_path('monit', True), name, timeout)
+
+ def exit_if_check_mode():
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ if state == 'reloaded':
+ exit_if_check_mode()
+ monit.reload()
+
+ present = monit.is_process_present()
+
+ if not present and not state == 'present':
+ module.fail_json(msg='%s process not presently configured with monit' % name, name=name)
+
+ if state == 'present':
+ if present:
+ module.exit_json(changed=False, name=name, state=state)
+ exit_if_check_mode()
+ monit.present()
+
+ monit.wait_for_monit_to_stop_pending()
+ running = monit.is_process_running()
+
+ if running and state in ['started', 'monitored']:
+ module.exit_json(changed=False, name=name, state=state)
+
+ if running and state == 'stopped':
+ exit_if_check_mode()
+ monit.stop()
+
+ if running and state == 'unmonitored':
+ exit_if_check_mode()
+ monit.unmonitor()
+
+ elif state == 'restarted':
+ exit_if_check_mode()
+ monit.restart()
+
+ elif not running and state == 'started':
+ exit_if_check_mode()
+ monit.start()
+
+ elif not running and state == 'monitored':
+ exit_if_check_mode()
+ monit.monitor()
+
+ module.exit_json(changed=False, name=name, state=state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/nagios.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/nagios.py
new file mode 100644
index 00000000..248fd105
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/nagios.py
@@ -0,0 +1,1304 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# This file is largely copied from the Nagios module included in the
+# Func project. Original copyright follows:
+#
+# func-nagios - Schedule downtime and enables/disable notifications
+# Copyright 2011, Red Hat, Inc.
+# Tim Bielawa <tbielawa@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: nagios
+short_description: Perform common tasks in Nagios related to downtime and notifications.
+description:
+ - "The C(nagios) module has two basic functions: scheduling downtime and toggling alerts for services or hosts."
+ - The C(nagios) module is not idempotent.
+ - All actions require the I(host) parameter to be given explicitly. In playbooks you can use the C({{inventory_hostname}}) variable to refer
+ to the host the playbook is currently running on.
+ - You can specify multiple services at once by separating them with commas, .e.g., C(services=httpd,nfs,puppet).
+ - When specifying what service to handle there is a special service value, I(host), which will handle alerts/downtime/acknowledge for the I(host itself),
+ e.g., C(service=host). This keyword may not be given with other services at the same time.
+ I(Setting alerts/downtime/acknowledge for a host does not affect alerts/downtime/acknowledge for any of the services running on it.)
+ To schedule downtime for all services on particular host use keyword "all", e.g., C(service=all).
+options:
+ action:
+ description:
+ - Action to take.
+ - servicegroup options were added in 2.0.
+ - delete_downtime options were added in 2.2.
+ - The C(acknowledge) and C(forced_check) actions were added in community.general 1.2.0.
+ required: true
+ choices: [ "downtime", "delete_downtime", "enable_alerts", "disable_alerts", "silence", "unsilence",
+ "silence_nagios", "unsilence_nagios", "command", "servicegroup_service_downtime",
+ "servicegroup_host_downtime", "acknowledge", "forced_check" ]
+ type: str
+ host:
+ description:
+ - Host to operate on in Nagios.
+ type: str
+ cmdfile:
+ description:
+ - Path to the nagios I(command file) (FIFO pipe).
+ Only required if auto-detection fails.
+ type: str
+ author:
+ description:
+ - Author to leave downtime comments as.
+ Only usable with the C(downtime) and C(acknowledge) action.
+ type: str
+ default: Ansible
+ comment:
+ description:
+ - Comment for C(downtime) and C(acknowledge)action.
+ type: str
+ default: Scheduling downtime
+ start:
+ description:
+ - When downtime should start, in time_t format (epoch seconds).
+ version_added: '0.2.0'
+ type: str
+ minutes:
+ description:
+ - Minutes to schedule downtime for.
+ - Only usable with the C(downtime) action.
+ type: int
+ default: 30
+ services:
+ description:
+ - What to manage downtime/alerts for. Separate multiple services with commas.
+ C(service) is an alias for C(services).
+ B(Required) option when using the C(downtime), C(acknowledge), C(forced_check), C(enable_alerts), and C(disable_alerts) actions.
+ aliases: [ "service" ]
+ type: str
+ servicegroup:
+ description:
+ - The Servicegroup we want to set downtimes/alerts for.
+ B(Required) option when using the C(servicegroup_service_downtime) amd C(servicegroup_host_downtime).
+ type: str
+ command:
+ description:
+ - The raw command to send to nagios, which
+ should not include the submitted time header or the line-feed
+ B(Required) option when using the C(command) action.
+ type: str
+
+author: "Tim Bielawa (@tbielawa)"
+'''
+
+EXAMPLES = '''
+- name: Set 30 minutes of apache downtime
+ community.general.nagios:
+ action: downtime
+ minutes: 30
+ service: httpd
+ host: '{{ inventory_hostname }}'
+
+- name: Schedule an hour of HOST downtime
+ community.general.nagios:
+ action: downtime
+ minutes: 60
+ service: host
+ host: '{{ inventory_hostname }}'
+
+- name: Schedule an hour of HOST downtime starting at 2019-04-23T02:00:00+00:00
+ community.general.nagios:
+ action: downtime
+ start: 1555984800
+ minutes: 60
+ service: host
+ host: '{{ inventory_hostname }}'
+
+- name: Schedule an hour of HOST downtime, with a comment describing the reason
+ community.general.nagios:
+ action: downtime
+ minutes: 60
+ service: host
+ host: '{{ inventory_hostname }}'
+ comment: Rebuilding machine
+
+- name: Schedule downtime for ALL services on HOST
+ community.general.nagios:
+ action: downtime
+ minutes: 45
+ service: all
+ host: '{{ inventory_hostname }}'
+
+- name: Schedule downtime for a few services
+ community.general.nagios:
+ action: downtime
+ services: frob,foobar,qeuz
+ host: '{{ inventory_hostname }}'
+
+- name: Set 30 minutes downtime for all services in servicegroup foo
+ community.general.nagios:
+ action: servicegroup_service_downtime
+ minutes: 30
+ servicegroup: foo
+ host: '{{ inventory_hostname }}'
+
+- name: Set 30 minutes downtime for all host in servicegroup foo
+ community.general.nagios:
+ action: servicegroup_host_downtime
+ minutes: 30
+ servicegroup: foo
+ host: '{{ inventory_hostname }}'
+
+- name: Delete all downtime for a given host
+ community.general.nagios:
+ action: delete_downtime
+ host: '{{ inventory_hostname }}'
+ service: all
+
+- name: Delete all downtime for HOST with a particular comment
+ community.general.nagios:
+ action: delete_downtime
+ host: '{{ inventory_hostname }}'
+ service: host
+ comment: Planned maintenance
+
+- name: Acknowledge an HOST with a particular comment
+ community.general.nagios:
+ action: acknowledge
+ service: host
+ host: '{{ inventory_hostname }}'
+ comment: 'power outage - see casenr 12345'
+
+- name: Acknowledge an active service problem for the httpd service with a particular comment
+ community.general.nagios:
+ action: acknowledge
+ service: httpd
+ host: '{{ inventory_hostname }}'
+ comment: 'service crashed - see casenr 12345'
+
+- name: Reset a passive service check for snmp trap
+ community.general.nagios:
+ action: forced_check
+ service: snmp
+ host: '{{ inventory_hostname }}'
+
+- name: Force an active service check for the httpd service
+ community.general.nagios:
+ action: forced_check
+ service: httpd
+ host: '{{ inventory_hostname }}'
+
+- name: Force an active service check for all services of a particular host
+ community.general.nagios:
+ action: forced_check
+ service: all
+ host: '{{ inventory_hostname }}'
+
+- name: Force an active service check for a particular host
+ community.general.nagios:
+ action: forced_check
+ service: host
+ host: '{{ inventory_hostname }}'
+
+- name: Enable SMART disk alerts
+ community.general.nagios:
+ action: enable_alerts
+ service: smart
+ host: '{{ inventory_hostname }}'
+
+- name: Disable httpd and nfs alerts
+ community.general.nagios:
+ action: disable_alerts
+ service: httpd,nfs
+ host: '{{ inventory_hostname }}'
+
+- name: Disable HOST alerts
+ community.general.nagios:
+ action: disable_alerts
+ service: host
+ host: '{{ inventory_hostname }}'
+
+- name: Silence ALL alerts
+ community.general.nagios:
+ action: silence
+ host: '{{ inventory_hostname }}'
+
+- name: Unsilence all alerts
+ community.general.nagios:
+ action: unsilence
+ host: '{{ inventory_hostname }}'
+
+- name: Shut up nagios
+ community.general.nagios:
+ action: silence_nagios
+
+- name: Annoy me negios
+ community.general.nagios:
+ action: unsilence_nagios
+
+- name: Command something
+ community.general.nagios:
+ action: command
+ command: DISABLE_FAILURE_PREDICTION
+'''
+
+import time
+import os.path
+import stat
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+######################################################################
+
+def which_cmdfile():
+ locations = [
+ # rhel
+ '/etc/nagios/nagios.cfg',
+ # debian
+ '/etc/nagios3/nagios.cfg',
+ # older debian
+ '/etc/nagios2/nagios.cfg',
+ # bsd, solaris
+ '/usr/local/etc/nagios/nagios.cfg',
+ # groundwork it monitoring
+ '/usr/local/groundwork/nagios/etc/nagios.cfg',
+ # open monitoring distribution
+ '/omd/sites/oppy/tmp/nagios/nagios.cfg',
+ # ???
+ '/usr/local/nagios/etc/nagios.cfg',
+ '/usr/local/nagios/nagios.cfg',
+ '/opt/nagios/etc/nagios.cfg',
+ '/opt/nagios/nagios.cfg',
+ # icinga on debian/ubuntu
+ '/etc/icinga/icinga.cfg',
+ # icinga installed from source (default location)
+ '/usr/local/icinga/etc/icinga.cfg',
+ ]
+
+ for path in locations:
+ if os.path.exists(path):
+ for line in open(path):
+ if line.startswith('command_file'):
+ return line.split('=')[1].strip()
+
+ return None
+
+######################################################################
+
+
+def main():
+ ACTION_CHOICES = [
+ 'downtime',
+ 'delete_downtime',
+ 'silence',
+ 'unsilence',
+ 'enable_alerts',
+ 'disable_alerts',
+ 'silence_nagios',
+ 'unsilence_nagios',
+ 'command',
+ 'servicegroup_host_downtime',
+ 'servicegroup_service_downtime',
+ 'acknowledge',
+ 'forced_check',
+ ]
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ action=dict(required=True, choices=ACTION_CHOICES),
+ author=dict(default='Ansible'),
+ comment=dict(default='Scheduling downtime'),
+ host=dict(required=False, default=None),
+ servicegroup=dict(required=False, default=None),
+ start=dict(required=False, default=None),
+ minutes=dict(default=30, type='int'),
+ cmdfile=dict(default=which_cmdfile()),
+ services=dict(default=None, aliases=['service']),
+ command=dict(required=False, default=None),
+ )
+ )
+
+ action = module.params['action']
+ host = module.params['host']
+ servicegroup = module.params['servicegroup']
+ start = module.params['start']
+ services = module.params['services']
+ cmdfile = module.params['cmdfile']
+ command = module.params['command']
+
+ ##################################################################
+ # Required args per action:
+ # downtime = (minutes, service, host)
+ # acknowledge = (service, host)
+ # (un)silence = (host)
+ # (enable/disable)_alerts = (service, host)
+ # command = command
+ #
+ # AnsibleModule will verify most stuff, we need to verify
+ # 'service' manually.
+
+ ##################################################################
+ if action not in ['command', 'silence_nagios', 'unsilence_nagios']:
+ if not host:
+ module.fail_json(msg='no host specified for action requiring one')
+ ######################################################################
+ if action == 'downtime':
+ # Make sure there's an actual service selected
+ if not services:
+ module.fail_json(msg='no service selected to set downtime for')
+
+ ######################################################################
+ if action == 'delete_downtime':
+ # Make sure there's an actual service selected
+ if not services:
+ module.fail_json(msg='no service selected to set downtime for')
+
+ ######################################################################
+
+ if action in ['servicegroup_service_downtime', 'servicegroup_host_downtime']:
+ # Make sure there's an actual servicegroup selected
+ if not servicegroup:
+ module.fail_json(msg='no servicegroup selected to set downtime for')
+
+ ##################################################################
+ if action in ['enable_alerts', 'disable_alerts']:
+ if not services:
+ module.fail_json(msg='a service is required when setting alerts')
+
+ if action in ['command']:
+ if not command:
+ module.fail_json(msg='no command passed for command action')
+ ######################################################################
+ if action == 'acknowledge':
+ # Make sure there's an actual service selected
+ if not services:
+ module.fail_json(msg='no service selected to acknowledge')
+
+ ##################################################################
+ if action == 'forced_check':
+ # Make sure there's an actual service selected
+ if not services:
+ module.fail_json(msg='no service selected to check')
+
+ ##################################################################
+ if not cmdfile:
+ module.fail_json(msg='unable to locate nagios.cfg')
+
+ ##################################################################
+ ansible_nagios = Nagios(module, **module.params)
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ ansible_nagios.act()
+ ##################################################################
+
+
+######################################################################
+class Nagios(object):
+ """
+ Perform common tasks in Nagios related to downtime and
+ notifications.
+
+ The complete set of external commands Nagios handles is documented
+ on their website:
+
+ http://old.nagios.org/developerinfo/externalcommands/commandlist.php
+
+ Note that in the case of `schedule_svc_downtime`,
+ `enable_svc_notifications`, and `disable_svc_notifications`, the
+ service argument should be passed as a list.
+ """
+
+ def __init__(self, module, **kwargs):
+ self.module = module
+ self.action = kwargs['action']
+ self.author = kwargs['author']
+ self.comment = kwargs['comment']
+ self.host = kwargs['host']
+ self.servicegroup = kwargs['servicegroup']
+ if kwargs['start'] is not None:
+ self.start = int(kwargs['start'])
+ else:
+ self.start = None
+ self.minutes = kwargs['minutes']
+ self.cmdfile = kwargs['cmdfile']
+ self.command = kwargs['command']
+
+ if (kwargs['services'] is None) or (kwargs['services'] == 'host') or (kwargs['services'] == 'all'):
+ self.services = kwargs['services']
+ else:
+ self.services = kwargs['services'].split(',')
+
+ self.command_results = []
+
+ def _now(self):
+ """
+ The time in seconds since 12:00:00AM Jan 1, 1970
+ """
+
+ return int(time.time())
+
+ def _write_command(self, cmd):
+ """
+ Write the given command to the Nagios command file
+ """
+
+ if not os.path.exists(self.cmdfile):
+ self.module.fail_json(msg='nagios command file does not exist',
+ cmdfile=self.cmdfile)
+ if not stat.S_ISFIFO(os.stat(self.cmdfile).st_mode):
+ self.module.fail_json(msg='nagios command file is not a fifo file',
+ cmdfile=self.cmdfile)
+ try:
+ fp = open(self.cmdfile, 'w')
+ fp.write(cmd)
+ fp.flush()
+ fp.close()
+ self.command_results.append(cmd.strip())
+ except IOError:
+ self.module.fail_json(msg='unable to write to nagios command file',
+ cmdfile=self.cmdfile)
+
+ def _fmt_dt_str(self, cmd, host, duration, author=None,
+ comment=None, start=None,
+ svc=None, fixed=1, trigger=0):
+ """
+ Format an external-command downtime string.
+
+ cmd - Nagios command ID
+ host - Host schedule downtime on
+ duration - Minutes to schedule downtime for
+ author - Name to file the downtime as
+ comment - Reason for running this command (upgrade, reboot, etc)
+ start - Start of downtime in seconds since 12:00AM Jan 1 1970
+ Default is to use the entry time (now)
+ svc - Service to schedule downtime for, omit when for host downtime
+ fixed - Start now if 1, start when a problem is detected if 0
+ trigger - Optional ID of event to start downtime from. Leave as 0 for
+ fixed downtime.
+
+ Syntax: [submitted] COMMAND;<host_name>;[<service_description>]
+ <start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
+ <comment>
+ """
+
+ entry_time = self._now()
+ if start is None:
+ start = entry_time
+
+ hdr = "[%s] %s;%s;" % (entry_time, cmd, host)
+ duration_s = (duration * 60)
+ end = start + duration_s
+
+ if not author:
+ author = self.author
+
+ if not comment:
+ comment = self.comment
+
+ if svc is not None:
+ dt_args = [svc, str(start), str(end), str(fixed), str(trigger),
+ str(duration_s), author, comment]
+ else:
+ # Downtime for a host if no svc specified
+ dt_args = [str(start), str(end), str(fixed), str(trigger),
+ str(duration_s), author, comment]
+
+ dt_arg_str = ";".join(dt_args)
+ dt_str = hdr + dt_arg_str + "\n"
+
+ return dt_str
+
+ def _fmt_ack_str(self, cmd, host, author=None,
+ comment=None, svc=None, sticky=0, notify=1, persistent=0):
+ """
+ Format an external-command acknowledge string.
+
+ cmd - Nagios command ID
+ host - Host schedule downtime on
+ author - Name to file the downtime as
+ comment - Reason for running this command (upgrade, reboot, etc)
+ svc - Service to schedule downtime for, omit when for host downtime
+ sticky - the acknowledgement will remain until the host returns to an UP state if set to 1
+ notify - a notification will be sent out to contacts
+ persistent - survive across restarts of the Nagios process
+
+ Syntax: [submitted] COMMAND;<host_name>;[<service_description>]
+ <sticky>;<notify>;<persistent>;<author>;<comment>
+ """
+
+ entry_time = self._now()
+ hdr = "[%s] %s;%s;" % (entry_time, cmd, host)
+
+ if not author:
+ author = self.author
+
+ if not comment:
+ comment = self.comment
+
+ if svc is not None:
+ ack_args = [svc, str(sticky), str(notify), str(persistent), author, comment]
+ else:
+ # Downtime for a host if no svc specified
+ ack_args = [str(sticky), str(notify), str(persistent), author, comment]
+
+ ack_arg_str = ";".join(ack_args)
+ ack_str = hdr + ack_arg_str + "\n"
+
+ return ack_str
+
+ def _fmt_dt_del_str(self, cmd, host, svc=None, start=None, comment=None):
+ """
+ Format an external-command downtime deletion string.
+
+ cmd - Nagios command ID
+ host - Host to remove scheduled downtime from
+ comment - Reason downtime was added (upgrade, reboot, etc)
+ start - Start of downtime in seconds since 12:00AM Jan 1 1970
+ svc - Service to remove downtime for, omit to remove all downtime for the host
+
+ Syntax: [submitted] COMMAND;<host_name>;
+ [<service_desription>];[<start_time>];[<comment>]
+ """
+
+ entry_time = self._now()
+ hdr = "[%s] %s;%s;" % (entry_time, cmd, host)
+
+ if comment is None:
+ comment = self.comment
+
+ dt_del_args = []
+ if svc is not None:
+ dt_del_args.append(svc)
+ else:
+ dt_del_args.append('')
+
+ if start is not None:
+ dt_del_args.append(str(start))
+ else:
+ dt_del_args.append('')
+
+ if comment is not None:
+ dt_del_args.append(comment)
+ else:
+ dt_del_args.append('')
+
+ dt_del_arg_str = ";".join(dt_del_args)
+ dt_del_str = hdr + dt_del_arg_str + "\n"
+
+ return dt_del_str
+
+ def _fmt_chk_str(self, cmd, host, svc=None, start=None):
+ """
+ Format an external-command forced host or service check string.
+
+ cmd - Nagios command ID
+ host - Host to check service from
+ svc - Service to check
+ start - check time
+
+ Syntax: [submitted] COMMAND;<host_name>;[<service_description>];<check_time>
+ """
+
+ entry_time = self._now()
+ hdr = "[%s] %s;%s;" % (entry_time, cmd, host)
+
+ if start is None:
+ start = entry_time + 3
+
+ if svc is None:
+ chk_args = [str(start)]
+ else:
+ chk_args = [svc, str(start)]
+
+ chk_arg_str = ";".join(chk_args)
+ chk_str = hdr + chk_arg_str + "\n"
+
+ return chk_str
+
+ def _fmt_notif_str(self, cmd, host=None, svc=None):
+ """
+ Format an external-command notification string.
+
+ cmd - Nagios command ID.
+ host - Host to en/disable notifications on.. A value is not required
+ for global downtime
+ svc - Service to schedule downtime for. A value is not required
+ for host downtime.
+
+ Syntax: [submitted] COMMAND;<host_name>[;<service_description>]
+ """
+
+ entry_time = self._now()
+ notif_str = "[%s] %s" % (entry_time, cmd)
+ if host is not None:
+ notif_str += ";%s" % host
+
+ if svc is not None:
+ notif_str += ";%s" % svc
+
+ notif_str += "\n"
+
+ return notif_str
+
+ def schedule_svc_downtime(self, host, services=None, minutes=30, start=None):
+ """
+ This command is used to schedule downtime for a particular
+ service.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the service.
+
+ Syntax: SCHEDULE_SVC_DOWNTIME;<host_name>;<service_description>
+ <start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
+ <comment>
+ """
+
+ cmd = "SCHEDULE_SVC_DOWNTIME"
+
+ if services is None:
+ services = []
+
+ for service in services:
+ dt_cmd_str = self._fmt_dt_str(cmd, host, minutes, start=start, svc=service)
+ self._write_command(dt_cmd_str)
+
+ def schedule_host_downtime(self, host, minutes=30, start=None):
+ """
+ This command is used to schedule downtime for a particular
+ host.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the host.
+
+ Syntax: SCHEDULE_HOST_DOWNTIME;<host_name>;<start_time>;<end_time>;
+ <fixed>;<trigger_id>;<duration>;<author>;<comment>
+ """
+
+ cmd = "SCHEDULE_HOST_DOWNTIME"
+ dt_cmd_str = self._fmt_dt_str(cmd, host, minutes, start=start)
+ self._write_command(dt_cmd_str)
+
+ def acknowledge_svc_problem(self, host, services=None):
+ """
+ This command is used to acknowledge a particular
+ service problem.
+
+ By acknowledging the current problem, future notifications
+ for the same servicestate are disabled
+
+ Syntax: ACKNOWLEDGE_SVC_PROBLEM;<host_name>;<service_description>;
+ <sticky>;<notify>;<persistent>;<author>;<comment>
+ """
+
+ cmd = "ACKNOWLEDGE_SVC_PROBLEM"
+
+ if services is None:
+ services = []
+
+ for service in services:
+ ack_cmd_str = self._fmt_ack_str(cmd, host, svc=service)
+ self._write_command(ack_cmd_str)
+
+ def acknowledge_host_problem(self, host):
+ """
+ This command is used to acknowledge a particular
+ host problem.
+
+ By acknowledging the current problem, future notifications
+ for the same servicestate are disabled
+
+ Syntax: ACKNOWLEDGE_HOST_PROBLEM;<host_name>;<sticky>;<notify>;
+ <persistent>;<author>;<comment>
+ """
+
+ cmd = "ACKNOWLEDGE_HOST_PROBLEM"
+ ack_cmd_str = self._fmt_ack_str(cmd, host)
+ self._write_command(ack_cmd_str)
+
+ def schedule_forced_host_check(self, host):
+ """
+ This command schedules a forced active check for a particular host.
+
+ Syntax: SCHEDULE_FORCED_HOST_CHECK;<host_name>;<check_time>
+ """
+
+ cmd = "SCHEDULE_FORCED_HOST_CHECK"
+
+ chk_cmd_str = self._fmt_chk_str(cmd, host, svc=None)
+ self._write_command(chk_cmd_str)
+
+ def schedule_forced_host_svc_check(self, host):
+ """
+ This command schedules a forced active check for all services
+ associated with a particular host.
+
+ Syntax: SCHEDULE_FORCED_HOST_SVC_CHECKS;<host_name>;<check_time>
+ """
+
+ cmd = "SCHEDULE_FORCED_HOST_SVC_CHECKS"
+
+ chk_cmd_str = self._fmt_chk_str(cmd, host, svc=None)
+ self._write_command(chk_cmd_str)
+
+ def schedule_forced_svc_check(self, host, services=None):
+ """
+ This command schedules a forced active check for a particular
+ service.
+
+ Syntax: SCHEDULE_FORCED_SVC_CHECK;<host_name>;<service_description>;<check_time>
+ """
+
+ cmd = "SCHEDULE_FORCED_SVC_CHECK"
+
+ if services is None:
+ services = []
+
+ for service in services:
+ chk_cmd_str = self._fmt_chk_str(cmd, host, svc=service)
+ self._write_command(chk_cmd_str)
+
+ def schedule_host_svc_downtime(self, host, minutes=30, start=None):
+ """
+ This command is used to schedule downtime for
+ all services associated with a particular host.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the host.
+
+ SCHEDULE_HOST_SVC_DOWNTIME;<host_name>;<start_time>;<end_time>;
+ <fixed>;<trigger_id>;<duration>;<author>;<comment>
+ """
+
+ cmd = "SCHEDULE_HOST_SVC_DOWNTIME"
+ dt_cmd_str = self._fmt_dt_str(cmd, host, minutes, start=start)
+ self._write_command(dt_cmd_str)
+
+ def delete_host_downtime(self, host, services=None, comment=None):
+ """
+ This command is used to remove scheduled downtime for a particular
+ host.
+
+ Syntax: DEL_DOWNTIME_BY_HOST_NAME;<host_name>;
+ [<service_desription>];[<start_time>];[<comment>]
+ """
+
+ cmd = "DEL_DOWNTIME_BY_HOST_NAME"
+
+ if services is None:
+ dt_del_cmd_str = self._fmt_dt_del_str(cmd, host, comment=comment)
+ self._write_command(dt_del_cmd_str)
+ else:
+ for service in services:
+ dt_del_cmd_str = self._fmt_dt_del_str(cmd, host, svc=service, comment=comment)
+ self._write_command(dt_del_cmd_str)
+
+ def schedule_hostgroup_host_downtime(self, hostgroup, minutes=30, start=None):
+ """
+ This command is used to schedule downtime for all hosts in a
+ particular hostgroup.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the hosts.
+
+ Syntax: SCHEDULE_HOSTGROUP_HOST_DOWNTIME;<hostgroup_name>;<start_time>;
+ <end_time>;<fixed>;<trigger_id>;<duration>;<author>;<comment>
+ """
+
+ cmd = "SCHEDULE_HOSTGROUP_HOST_DOWNTIME"
+ dt_cmd_str = self._fmt_dt_str(cmd, hostgroup, minutes, start=start)
+ self._write_command(dt_cmd_str)
+
+ def schedule_hostgroup_svc_downtime(self, hostgroup, minutes=30, start=None):
+ """
+ This command is used to schedule downtime for all services in
+ a particular hostgroup.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the services.
+
+ Note that scheduling downtime for services does not
+ automatically schedule downtime for the hosts those services
+ are associated with.
+
+ Syntax: SCHEDULE_HOSTGROUP_SVC_DOWNTIME;<hostgroup_name>;<start_time>;
+ <end_time>;<fixed>;<trigger_id>;<duration>;<author>;<comment>
+ """
+
+ cmd = "SCHEDULE_HOSTGROUP_SVC_DOWNTIME"
+ dt_cmd_str = self._fmt_dt_str(cmd, hostgroup, minutes, start=start)
+ self._write_command(dt_cmd_str)
+
+ def schedule_servicegroup_host_downtime(self, servicegroup, minutes=30, start=None):
+ """
+ This command is used to schedule downtime for all hosts in a
+ particular servicegroup.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the hosts.
+
+ Syntax: SCHEDULE_SERVICEGROUP_HOST_DOWNTIME;<servicegroup_name>;
+ <start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
+ <comment>
+ """
+
+ cmd = "SCHEDULE_SERVICEGROUP_HOST_DOWNTIME"
+ dt_cmd_str = self._fmt_dt_str(cmd, servicegroup, minutes, start=start)
+ self._write_command(dt_cmd_str)
+
+ def schedule_servicegroup_svc_downtime(self, servicegroup, minutes=30, start=None):
+ """
+ This command is used to schedule downtime for all services in
+ a particular servicegroup.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the services.
+
+ Note that scheduling downtime for services does not
+ automatically schedule downtime for the hosts those services
+ are associated with.
+
+ Syntax: SCHEDULE_SERVICEGROUP_SVC_DOWNTIME;<servicegroup_name>;
+ <start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
+ <comment>
+ """
+
+ cmd = "SCHEDULE_SERVICEGROUP_SVC_DOWNTIME"
+ dt_cmd_str = self._fmt_dt_str(cmd, servicegroup, minutes, start=start)
+ self._write_command(dt_cmd_str)
+
+ def disable_host_svc_notifications(self, host):
+ """
+ This command is used to prevent notifications from being sent
+ out for all services on the specified host.
+
+ Note that this command does not disable notifications from
+ being sent out about the host.
+
+ Syntax: DISABLE_HOST_SVC_NOTIFICATIONS;<host_name>
+ """
+
+ cmd = "DISABLE_HOST_SVC_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, host)
+ self._write_command(notif_str)
+
+ def disable_host_notifications(self, host):
+ """
+ This command is used to prevent notifications from being sent
+ out for the specified host.
+
+ Note that this command does not disable notifications for
+ services associated with this host.
+
+ Syntax: DISABLE_HOST_NOTIFICATIONS;<host_name>
+ """
+
+ cmd = "DISABLE_HOST_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, host)
+ self._write_command(notif_str)
+
+ def disable_svc_notifications(self, host, services=None):
+ """
+ This command is used to prevent notifications from being sent
+ out for the specified service.
+
+ Note that this command does not disable notifications from
+ being sent out about the host.
+
+ Syntax: DISABLE_SVC_NOTIFICATIONS;<host_name>;<service_description>
+ """
+
+ cmd = "DISABLE_SVC_NOTIFICATIONS"
+
+ if services is None:
+ services = []
+
+ for service in services:
+ notif_str = self._fmt_notif_str(cmd, host, svc=service)
+ self._write_command(notif_str)
+
+ def disable_servicegroup_host_notifications(self, servicegroup):
+ """
+ This command is used to prevent notifications from being sent
+ out for all hosts in the specified servicegroup.
+
+ Note that this command does not disable notifications for
+ services associated with hosts in this service group.
+
+ Syntax: DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS;<servicegroup_name>
+ """
+
+ cmd = "DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, servicegroup)
+ self._write_command(notif_str)
+
+ def disable_servicegroup_svc_notifications(self, servicegroup):
+ """
+ This command is used to prevent notifications from being sent
+ out for all services in the specified servicegroup.
+
+ Note that this does not prevent notifications from being sent
+ out about the hosts in this servicegroup.
+
+ Syntax: DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS;<servicegroup_name>
+ """
+
+ cmd = "DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, servicegroup)
+ self._write_command(notif_str)
+
+ def disable_hostgroup_host_notifications(self, hostgroup):
+ """
+ Disables notifications for all hosts in a particular
+ hostgroup.
+
+ Note that this does not disable notifications for the services
+ associated with the hosts in the hostgroup - see the
+ DISABLE_HOSTGROUP_SVC_NOTIFICATIONS command for that.
+
+ Syntax: DISABLE_HOSTGROUP_HOST_NOTIFICATIONS;<hostgroup_name>
+ """
+
+ cmd = "DISABLE_HOSTGROUP_HOST_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, hostgroup)
+ self._write_command(notif_str)
+
+ def disable_hostgroup_svc_notifications(self, hostgroup):
+ """
+ Disables notifications for all services associated with hosts
+ in a particular hostgroup.
+
+ Note that this does not disable notifications for the hosts in
+ the hostgroup - see the DISABLE_HOSTGROUP_HOST_NOTIFICATIONS
+ command for that.
+
+ Syntax: DISABLE_HOSTGROUP_SVC_NOTIFICATIONS;<hostgroup_name>
+ """
+
+ cmd = "DISABLE_HOSTGROUP_SVC_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, hostgroup)
+ self._write_command(notif_str)
+
+ def enable_host_notifications(self, host):
+ """
+ Enables notifications for a particular host.
+
+ Note that this command does not enable notifications for
+ services associated with this host.
+
+ Syntax: ENABLE_HOST_NOTIFICATIONS;<host_name>
+ """
+
+ cmd = "ENABLE_HOST_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, host)
+ self._write_command(notif_str)
+
+ def enable_host_svc_notifications(self, host):
+ """
+ Enables notifications for all services on the specified host.
+
+ Note that this does not enable notifications for the host.
+
+ Syntax: ENABLE_HOST_SVC_NOTIFICATIONS;<host_name>
+ """
+
+ cmd = "ENABLE_HOST_SVC_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, host)
+ nagios_return = self._write_command(notif_str)
+
+ if nagios_return:
+ return notif_str
+ else:
+ return "Fail: could not write to the command file"
+
+ def enable_svc_notifications(self, host, services=None):
+ """
+ Enables notifications for a particular service.
+
+ Note that this does not enable notifications for the host.
+
+ Syntax: ENABLE_SVC_NOTIFICATIONS;<host_name>;<service_description>
+ """
+
+ cmd = "ENABLE_SVC_NOTIFICATIONS"
+
+ if services is None:
+ services = []
+
+ nagios_return = True
+ return_str_list = []
+ for service in services:
+ notif_str = self._fmt_notif_str(cmd, host, svc=service)
+ nagios_return = self._write_command(notif_str) and nagios_return
+ return_str_list.append(notif_str)
+
+ if nagios_return:
+ return return_str_list
+ else:
+ return "Fail: could not write to the command file"
+
+ def enable_hostgroup_host_notifications(self, hostgroup):
+ """
+ Enables notifications for all hosts in a particular hostgroup.
+
+ Note that this command does not enable notifications for
+ services associated with the hosts in this hostgroup.
+
+ Syntax: ENABLE_HOSTGROUP_HOST_NOTIFICATIONS;<hostgroup_name>
+ """
+
+ cmd = "ENABLE_HOSTGROUP_HOST_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, hostgroup)
+ nagios_return = self._write_command(notif_str)
+
+ if nagios_return:
+ return notif_str
+ else:
+ return "Fail: could not write to the command file"
+
+ def enable_hostgroup_svc_notifications(self, hostgroup):
+ """
+ Enables notifications for all services that are associated
+ with hosts in a particular hostgroup.
+
+ Note that this does not enable notifications for the hosts in
+ this hostgroup.
+
+ Syntax: ENABLE_HOSTGROUP_SVC_NOTIFICATIONS;<hostgroup_name>
+ """
+
+ cmd = "ENABLE_HOSTGROUP_SVC_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, hostgroup)
+ nagios_return = self._write_command(notif_str)
+
+ if nagios_return:
+ return notif_str
+ else:
+ return "Fail: could not write to the command file"
+
+ def enable_servicegroup_host_notifications(self, servicegroup):
+ """
+ Enables notifications for all hosts that have services that
+ are members of a particular servicegroup.
+
+ Note that this command does not enable notifications for
+ services associated with the hosts in this servicegroup.
+
+ Syntax: ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS;<servicegroup_name>
+ """
+
+ cmd = "ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, servicegroup)
+ nagios_return = self._write_command(notif_str)
+
+ if nagios_return:
+ return notif_str
+ else:
+ return "Fail: could not write to the command file"
+
+ def enable_servicegroup_svc_notifications(self, servicegroup):
+ """
+ Enables notifications for all services that are members of a
+ particular servicegroup.
+
+ Note that this does not enable notifications for the hosts in
+ this servicegroup.
+
+ Syntax: ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS;<servicegroup_name>
+ """
+
+ cmd = "ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, servicegroup)
+ nagios_return = self._write_command(notif_str)
+
+ if nagios_return:
+ return notif_str
+ else:
+ return "Fail: could not write to the command file"
+
+ def silence_host(self, host):
+ """
+ This command is used to prevent notifications from being sent
+ out for the host and all services on the specified host.
+
+ This is equivalent to calling disable_host_svc_notifications
+ and disable_host_notifications.
+
+ Syntax: DISABLE_HOST_SVC_NOTIFICATIONS;<host_name>
+ Syntax: DISABLE_HOST_NOTIFICATIONS;<host_name>
+ """
+
+ cmd = [
+ "DISABLE_HOST_SVC_NOTIFICATIONS",
+ "DISABLE_HOST_NOTIFICATIONS"
+ ]
+ nagios_return = True
+ return_str_list = []
+ for c in cmd:
+ notif_str = self._fmt_notif_str(c, host)
+ nagios_return = self._write_command(notif_str) and nagios_return
+ return_str_list.append(notif_str)
+
+ if nagios_return:
+ return return_str_list
+ else:
+ return "Fail: could not write to the command file"
+
+ def unsilence_host(self, host):
+ """
+ This command is used to enable notifications for the host and
+ all services on the specified host.
+
+ This is equivalent to calling enable_host_svc_notifications
+ and enable_host_notifications.
+
+ Syntax: ENABLE_HOST_SVC_NOTIFICATIONS;<host_name>
+ Syntax: ENABLE_HOST_NOTIFICATIONS;<host_name>
+ """
+
+ cmd = [
+ "ENABLE_HOST_SVC_NOTIFICATIONS",
+ "ENABLE_HOST_NOTIFICATIONS"
+ ]
+ nagios_return = True
+ return_str_list = []
+ for c in cmd:
+ notif_str = self._fmt_notif_str(c, host)
+ nagios_return = self._write_command(notif_str) and nagios_return
+ return_str_list.append(notif_str)
+
+ if nagios_return:
+ return return_str_list
+ else:
+ return "Fail: could not write to the command file"
+
+ def silence_nagios(self):
+ """
+ This command is used to disable notifications for all hosts and services
+ in nagios.
+
+ This is a 'SHUT UP, NAGIOS' command
+ """
+ cmd = 'DISABLE_NOTIFICATIONS'
+ self._write_command(self._fmt_notif_str(cmd))
+
+ def unsilence_nagios(self):
+ """
+ This command is used to enable notifications for all hosts and services
+ in nagios.
+
+ This is a 'OK, NAGIOS, GO'' command
+ """
+ cmd = 'ENABLE_NOTIFICATIONS'
+ self._write_command(self._fmt_notif_str(cmd))
+
+ def nagios_cmd(self, cmd):
+ """
+ This sends an arbitrary command to nagios
+
+ It prepends the submitted time and appends a \n
+
+ You just have to provide the properly formatted command
+ """
+
+ pre = '[%s]' % int(time.time())
+
+ post = '\n'
+ cmdstr = '%s %s%s' % (pre, cmd, post)
+ self._write_command(cmdstr)
+
+ def act(self):
+ """
+ Figure out what you want to do from ansible, and then do the
+ needful (at the earliest).
+ """
+ # host or service downtime?
+ if self.action == 'downtime':
+ if self.services == 'host':
+ self.schedule_host_downtime(self.host, minutes=self.minutes,
+ start=self.start)
+ elif self.services == 'all':
+ self.schedule_host_svc_downtime(self.host, minutes=self.minutes,
+ start=self.start)
+ else:
+ self.schedule_svc_downtime(self.host,
+ services=self.services,
+ minutes=self.minutes,
+ start=self.start)
+
+ elif self.action == 'acknowledge':
+ if self.services == 'host':
+ self.acknowledge_host_problem(self.host)
+ else:
+ self.acknowledge_svc_problem(self.host, services=self.services)
+
+ elif self.action == 'delete_downtime':
+ if self.services == 'host':
+ self.delete_host_downtime(self.host)
+ elif self.services == 'all':
+ self.delete_host_downtime(self.host, comment='')
+ else:
+ self.delete_host_downtime(self.host, services=self.services)
+
+ elif self.action == 'forced_check':
+ if self.services == 'host':
+ self.schedule_forced_host_check(self.host)
+ elif self.services == 'all':
+ self.schedule_forced_host_svc_check(self.host)
+ else:
+ self.schedule_forced_svc_check(self.host, services=self.services)
+
+ elif self.action == "servicegroup_host_downtime":
+ if self.servicegroup:
+ self.schedule_servicegroup_host_downtime(servicegroup=self.servicegroup, minutes=self.minutes, start=self.start)
+ elif self.action == "servicegroup_service_downtime":
+ if self.servicegroup:
+ self.schedule_servicegroup_svc_downtime(servicegroup=self.servicegroup, minutes=self.minutes, start=self.start)
+
+ # toggle the host AND service alerts
+ elif self.action == 'silence':
+ self.silence_host(self.host)
+
+ elif self.action == 'unsilence':
+ self.unsilence_host(self.host)
+
+ # toggle host/svc alerts
+ elif self.action == 'enable_alerts':
+ if self.services == 'host':
+ self.enable_host_notifications(self.host)
+ elif self.services == 'all':
+ self.enable_host_svc_notifications(self.host)
+ else:
+ self.enable_svc_notifications(self.host,
+ services=self.services)
+
+ elif self.action == 'disable_alerts':
+ if self.services == 'host':
+ self.disable_host_notifications(self.host)
+ elif self.services == 'all':
+ self.disable_host_svc_notifications(self.host)
+ else:
+ self.disable_svc_notifications(self.host,
+ services=self.services)
+ elif self.action == 'silence_nagios':
+ self.silence_nagios()
+
+ elif self.action == 'unsilence_nagios':
+ self.unsilence_nagios()
+
+ elif self.action == 'command':
+ self.nagios_cmd(self.command)
+
+ # wtf?
+ else:
+ self.module.fail_json(msg="unknown action specified: '%s'" %
+ self.action)
+
+ self.module.exit_json(nagios_commands=self.command_results,
+ changed=True)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/newrelic_deployment.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/newrelic_deployment.py
new file mode 100644
index 00000000..af953e0a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/newrelic_deployment.py
@@ -0,0 +1,146 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2013 Matt Coddington <coddington@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: newrelic_deployment
+author: "Matt Coddington (@mcodd)"
+short_description: Notify newrelic about app deployments
+description:
+ - Notify newrelic about app deployments (see https://docs.newrelic.com/docs/apm/new-relic-apm/maintenance/deployment-notifications#api)
+options:
+ token:
+ type: str
+ description:
+ - API token, to place in the x-api-key header.
+ required: true
+ app_name:
+ type: str
+ description:
+ - (one of app_name or application_id are required) The value of app_name in the newrelic.yml file used by the application
+ required: false
+ application_id:
+ type: str
+ description:
+ - (one of app_name or application_id are required) The application id, found in the URL when viewing the application in RPM
+ required: false
+ changelog:
+ type: str
+ description:
+ - A list of changes for this deployment
+ required: false
+ description:
+ type: str
+ description:
+ - Text annotation for the deployment - notes for you
+ required: false
+ revision:
+ type: str
+ description:
+ - A revision number (e.g., git commit SHA)
+ required: false
+ user:
+ type: str
+ description:
+ - The name of the user/process that triggered this deployment
+ required: false
+ appname:
+ type: str
+ description:
+ - Name of the application
+ required: false
+ environment:
+ type: str
+ description:
+ - The environment for this deployment
+ required: false
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ type: bool
+
+requirements: []
+'''
+
+EXAMPLES = '''
+- name: Notify newrelic about an app deployment
+ community.general.newrelic_deployment:
+ token: AAAAAA
+ app_name: myapp
+ user: ansible deployment
+ revision: '1.0'
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(required=True, no_log=True),
+ app_name=dict(required=False),
+ application_id=dict(required=False),
+ changelog=dict(required=False),
+ description=dict(required=False),
+ revision=dict(required=False),
+ user=dict(required=False),
+ appname=dict(required=False),
+ environment=dict(required=False),
+ validate_certs=dict(default=True, type='bool'),
+ ),
+ required_one_of=[['app_name', 'application_id']],
+ supports_check_mode=True
+ )
+
+ # build list of params
+ params = {}
+ if module.params["app_name"] and module.params["application_id"]:
+ module.fail_json(msg="only one of 'app_name' or 'application_id' can be set")
+
+ if module.params["app_name"]:
+ params["app_name"] = module.params["app_name"]
+ elif module.params["application_id"]:
+ params["application_id"] = module.params["application_id"]
+ else:
+ module.fail_json(msg="you must set one of 'app_name' or 'application_id'")
+
+ for item in ["changelog", "description", "revision", "user", "appname", "environment"]:
+ if module.params[item]:
+ params[item] = module.params[item]
+
+ # If we're in check mode, just exit pretending like we succeeded
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ # Send the data to NewRelic
+ url = "https://rpm.newrelic.com/deployments.xml"
+ data = urlencode(params)
+ headers = {
+ 'x-api-key': module.params["token"],
+ }
+ response, info = fetch_url(module, url, data=data, headers=headers)
+ if info['status'] in (200, 201):
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg="unable to update newrelic: %s" % info['msg'])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/pagerduty.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/pagerduty.py
new file mode 100644
index 00000000..306b596b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/pagerduty.py
@@ -0,0 +1,279 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: pagerduty
+short_description: Create PagerDuty maintenance windows
+description:
+ - This module will let you create PagerDuty maintenance windows
+author:
+ - "Andrew Newdigate (@suprememoocow)"
+ - "Dylan Silva (@thaumos)"
+ - "Justin Johns (!UNKNOWN)"
+ - "Bruce Pennypacker (@bpennypacker)"
+requirements:
+ - PagerDuty API access
+options:
+ state:
+ type: str
+ description:
+ - Create a maintenance window or get a list of ongoing windows.
+ required: true
+ choices: [ "running", "started", "ongoing", "absent" ]
+ name:
+ type: str
+ description:
+ - PagerDuty unique subdomain. Obsolete. It is not used with PagerDuty REST v2 API.
+ user:
+ type: str
+ description:
+ - PagerDuty user ID. Obsolete. Please, use I(token) for authorization.
+ token:
+ type: str
+ description:
+ - A pagerduty token, generated on the pagerduty site. It is used for authorization.
+ required: true
+ requester_id:
+ type: str
+ description:
+ - ID of user making the request. Only needed when creating a maintenance_window.
+ service:
+ type: list
+ description:
+ - A comma separated list of PagerDuty service IDs.
+ aliases: [ services ]
+ window_id:
+ type: str
+ description:
+ - ID of maintenance window. Only needed when absent a maintenance_window.
+ hours:
+ type: str
+ description:
+ - Length of maintenance window in hours.
+ default: '1'
+ minutes:
+ type: str
+ description:
+ - Maintenance window in minutes (this is added to the hours).
+ default: '0'
+ desc:
+ type: str
+ description:
+ - Short description of maintenance window.
+ default: Created by Ansible
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+'''
+
+EXAMPLES = '''
+- name: List ongoing maintenance windows using a token
+ community.general.pagerduty:
+ name: companyabc
+ token: xxxxxxxxxxxxxx
+ state: ongoing
+
+- name: Create a 1 hour maintenance window for service FOO123
+ community.general.pagerduty:
+ name: companyabc
+ user: example@example.com
+ token: yourtoken
+ state: running
+ service: FOO123
+
+- name: Create a 5 minute maintenance window for service FOO123
+ community.general.pagerduty:
+ name: companyabc
+ token: xxxxxxxxxxxxxx
+ hours: 0
+ minutes: 5
+ state: running
+ service: FOO123
+
+
+- name: Create a 4 hour maintenance window for service FOO123 with the description "deployment"
+ community.general.pagerduty:
+ name: companyabc
+ user: example@example.com
+ state: running
+ service: FOO123
+ hours: 4
+ desc: deployment
+ register: pd_window
+
+- name: Delete the previous maintenance window
+ community.general.pagerduty:
+ name: companyabc
+ user: example@example.com
+ state: absent
+ window_id: '{{ pd_window.result.maintenance_window.id }}'
+
+# Delete a maintenance window from a separate playbook than its creation,
+# and if it is the only existing maintenance window
+- name: Check
+ community.general.pagerduty:
+ requester_id: XXXXXXX
+ token: yourtoken
+ state: ongoing
+ register: pd_window
+
+- name: Delete
+ community.general.pagerduty:
+ requester_id: XXXXXXX
+ token: yourtoken
+ state: absent
+ window_id: "{{ pd_window.result.maintenance_windows[0].id }}"
+'''
+
+import datetime
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+class PagerDutyRequest(object):
+ def __init__(self, module, name, user, token):
+ self.module = module
+ self.name = name
+ self.user = user
+ self.token = token
+ self.headers = {
+ 'Content-Type': 'application/json',
+ "Authorization": self._auth_header(),
+ 'Accept': 'application/vnd.pagerduty+json;version=2'
+ }
+
+ def ongoing(self, http_call=fetch_url):
+ url = "https://api.pagerduty.com/maintenance_windows?filter=ongoing"
+ headers = dict(self.headers)
+
+ response, info = http_call(self.module, url, headers=headers)
+ if info['status'] != 200:
+ self.module.fail_json(msg="failed to lookup the ongoing window: %s" % info['msg'])
+
+ json_out = self._read_response(response)
+
+ return False, json_out, False
+
+ def create(self, requester_id, service, hours, minutes, desc, http_call=fetch_url):
+ if not requester_id:
+ self.module.fail_json(msg="requester_id is required when maintenance window should be created")
+
+ url = 'https://api.pagerduty.com/maintenance_windows'
+
+ headers = dict(self.headers)
+ headers.update({'From': requester_id})
+
+ start, end = self._compute_start_end_time(hours, minutes)
+ services = self._create_services_payload(service)
+
+ request_data = {'maintenance_window': {'start_time': start, 'end_time': end, 'description': desc, 'services': services}}
+
+ data = json.dumps(request_data)
+ response, info = http_call(self.module, url, data=data, headers=headers, method='POST')
+ if info['status'] != 201:
+ self.module.fail_json(msg="failed to create the window: %s" % info['msg'])
+
+ json_out = self._read_response(response)
+
+ return False, json_out, True
+
+ def _create_services_payload(self, service):
+ if (isinstance(service, list)):
+ return [{'id': s, 'type': 'service_reference'} for s in service]
+ else:
+ return [{'id': service, 'type': 'service_reference'}]
+
+ def _compute_start_end_time(self, hours, minutes):
+ now = datetime.datetime.utcnow()
+ later = now + datetime.timedelta(hours=int(hours), minutes=int(minutes))
+ start = now.strftime("%Y-%m-%dT%H:%M:%SZ")
+ end = later.strftime("%Y-%m-%dT%H:%M:%SZ")
+ return start, end
+
+ def absent(self, window_id, http_call=fetch_url):
+ url = "https://api.pagerduty.com/maintenance_windows/" + window_id
+ headers = dict(self.headers)
+
+ response, info = http_call(self.module, url, headers=headers, method='DELETE')
+ if info['status'] != 204:
+ self.module.fail_json(msg="failed to delete the window: %s" % info['msg'])
+
+ json_out = self._read_response(response)
+
+ return False, json_out, True
+
+ def _auth_header(self):
+ return "Token token=%s" % self.token
+
+ def _read_response(self, response):
+ try:
+ return json.loads(response.read())
+ except Exception:
+ return ""
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(required=True, choices=['running', 'started', 'ongoing', 'absent']),
+ name=dict(required=False),
+ user=dict(required=False),
+ token=dict(required=True, no_log=True),
+ service=dict(required=False, type='list', aliases=["services"]),
+ window_id=dict(required=False),
+ requester_id=dict(required=False),
+ hours=dict(default='1', required=False), # @TODO change to int?
+ minutes=dict(default='0', required=False), # @TODO change to int?
+ desc=dict(default='Created by Ansible', required=False),
+ validate_certs=dict(default=True, type='bool'),
+ )
+ )
+
+ state = module.params['state']
+ name = module.params['name']
+ user = module.params['user']
+ service = module.params['service']
+ window_id = module.params['window_id']
+ hours = module.params['hours']
+ minutes = module.params['minutes']
+ token = module.params['token']
+ desc = module.params['desc']
+ requester_id = module.params['requester_id']
+
+ pd = PagerDutyRequest(module, name, user, token)
+
+ if state == "running" or state == "started":
+ if not service:
+ module.fail_json(msg="service not specified")
+ (rc, out, changed) = pd.create(requester_id, service, hours, minutes, desc)
+ if rc == 0:
+ changed = True
+
+ if state == "ongoing":
+ (rc, out, changed) = pd.ongoing()
+
+ if state == "absent":
+ (rc, out, changed) = pd.absent(window_id)
+
+ if rc != 0:
+ module.fail_json(msg="failed", result=out)
+
+ module.exit_json(msg="success", result=out, changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/pagerduty_alert.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/pagerduty_alert.py
new file mode 100644
index 00000000..736ada5e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/pagerduty_alert.py
@@ -0,0 +1,255 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: pagerduty_alert
+short_description: Trigger, acknowledge or resolve PagerDuty incidents
+description:
+ - This module will let you trigger, acknowledge or resolve a PagerDuty incident by sending events
+author:
+ - "Amanpreet Singh (@ApsOps)"
+requirements:
+ - PagerDuty API access
+options:
+ name:
+ type: str
+ description:
+ - PagerDuty unique subdomain. Obsolete. It is not used with PagerDuty REST v2 API.
+ service_id:
+ type: str
+ description:
+ - ID of PagerDuty service when incidents will be triggered, acknowledged or resolved.
+ required: true
+ service_key:
+ type: str
+ description:
+ - The GUID of one of your "Generic API" services. Obsolete. Please use I(integration_key).
+ integration_key:
+ type: str
+ description:
+ - The GUID of one of your "Generic API" services.
+ - This is the "integration key" listed on a "Integrations" tab of PagerDuty service.
+ state:
+ type: str
+ description:
+ - Type of event to be sent.
+ required: true
+ choices:
+ - 'triggered'
+ - 'acknowledged'
+ - 'resolved'
+ api_key:
+ type: str
+ description:
+ - The pagerduty API key (readonly access), generated on the pagerduty site.
+ required: true
+ desc:
+ type: str
+ description:
+ - For C(triggered) I(state) - Required. Short description of the problem that led to this trigger. This field (or a truncated version)
+ will be used when generating phone calls, SMS messages and alert emails. It will also appear on the incidents tables in the PagerDuty UI.
+ The maximum length is 1024 characters.
+ - For C(acknowledged) or C(resolved) I(state) - Text that will appear in the incident's log associated with this event.
+ required: false
+ default: Created via Ansible
+ incident_key:
+ type: str
+ description:
+ - Identifies the incident to which this I(state) should be applied.
+ - For C(triggered) I(state) - If there's no open (i.e. unresolved) incident with this key, a new one will be created. If there's already an
+ open incident with a matching key, this event will be appended to that incident's log. The event key provides an easy way to "de-dup"
+ problem reports.
+ - For C(acknowledged) or C(resolved) I(state) - This should be the incident_key you received back when the incident was first opened by a
+ trigger event. Acknowledge events referencing resolved or nonexistent incidents will be discarded.
+ required: false
+ client:
+ type: str
+ description:
+ - The name of the monitoring client that is triggering this event.
+ required: false
+ client_url:
+ type: str
+ description:
+ - The URL of the monitoring client that is triggering this event.
+ required: false
+'''
+
+EXAMPLES = '''
+- name: Trigger an incident with just the basic options
+ community.general.pagerduty_alert:
+ name: companyabc
+ integration_key: xxx
+ api_key: yourapikey
+ service_id: PDservice
+ state: triggered
+ desc: problem that led to this trigger
+
+- name: Trigger an incident with more options
+ community.general.pagerduty_alert:
+ integration_key: xxx
+ api_key: yourapikey
+ service_id: PDservice
+ state: triggered
+ desc: problem that led to this trigger
+ incident_key: somekey
+ client: Sample Monitoring Service
+ client_url: http://service.example.com
+
+- name: Acknowledge an incident based on incident_key
+ community.general.pagerduty_alert:
+ integration_key: xxx
+ api_key: yourapikey
+ service_id: PDservice
+ state: acknowledged
+ incident_key: somekey
+ desc: "some text for incident's log"
+
+- name: Resolve an incident based on incident_key
+ community.general.pagerduty_alert:
+ integration_key: xxx
+ api_key: yourapikey
+ service_id: PDservice
+ state: resolved
+ incident_key: somekey
+ desc: "some text for incident's log"
+'''
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.six.moves.urllib.parse import urlparse, urlencode, urlunparse
+
+
+def check(module, name, state, service_id, integration_key, api_key, incident_key=None, http_call=fetch_url):
+ url = 'https://api.pagerduty.com/incidents'
+ headers = {
+ "Content-type": "application/json",
+ "Authorization": "Token token=%s" % api_key,
+ 'Accept': 'application/vnd.pagerduty+json;version=2'
+ }
+
+ params = {
+ 'service_ids[]': service_id,
+ 'sort_by': 'incident_number:desc',
+ 'time_zone': 'UTC'
+ }
+ if incident_key:
+ params['incident_key'] = incident_key
+
+ url_parts = list(urlparse(url))
+ url_parts[4] = urlencode(params, True)
+
+ url = urlunparse(url_parts)
+
+ response, info = http_call(module, url, method='get', headers=headers)
+
+ if info['status'] != 200:
+ module.fail_json(msg="failed to check current incident status."
+ "Reason: %s" % info['msg'])
+
+ incidents = json.loads(response.read())["incidents"]
+ msg = "No corresponding incident"
+
+ if len(incidents) == 0:
+ if state in ('acknowledged', 'resolved'):
+ return msg, False
+ return msg, True
+ elif state != incidents[0]["status"]:
+ return incidents[0], True
+
+ return incidents[0], False
+
+
+def send_event(module, service_key, event_type, desc,
+ incident_key=None, client=None, client_url=None):
+ url = "https://events.pagerduty.com/generic/2010-04-15/create_event.json"
+ headers = {
+ "Content-type": "application/json"
+ }
+
+ data = {
+ "service_key": service_key,
+ "event_type": event_type,
+ "incident_key": incident_key,
+ "description": desc,
+ "client": client,
+ "client_url": client_url
+ }
+
+ response, info = fetch_url(module, url, method='post',
+ headers=headers, data=json.dumps(data))
+ if info['status'] != 200:
+ module.fail_json(msg="failed to %s. Reason: %s" %
+ (event_type, info['msg']))
+ json_out = json.loads(response.read())
+ return json_out
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=False),
+ service_id=dict(required=True),
+ service_key=dict(required=False, no_log=True),
+ integration_key=dict(required=False, no_log=True),
+ api_key=dict(required=True, no_log=True),
+ state=dict(required=True,
+ choices=['triggered', 'acknowledged', 'resolved']),
+ client=dict(required=False, default=None),
+ client_url=dict(required=False, default=None),
+ desc=dict(required=False, default='Created via Ansible'),
+ incident_key=dict(required=False, default=None)
+ ),
+ supports_check_mode=True
+ )
+
+ name = module.params['name']
+ service_id = module.params['service_id']
+ integration_key = module.params['integration_key']
+ service_key = module.params['service_key']
+ api_key = module.params['api_key']
+ state = module.params['state']
+ client = module.params['client']
+ client_url = module.params['client_url']
+ desc = module.params['desc']
+ incident_key = module.params['incident_key']
+
+ if integration_key is None:
+ if service_key is not None:
+ integration_key = service_key
+ module.warn('"service_key" is obsolete parameter and will be removed.'
+ ' Please, use "integration_key" instead')
+ else:
+ module.fail_json(msg="'integration_key' is required parameter")
+
+ state_event_dict = {
+ 'triggered': 'trigger',
+ 'acknowledged': 'acknowledge',
+ 'resolved': 'resolve'
+ }
+
+ event_type = state_event_dict[state]
+
+ if event_type != 'trigger' and incident_key is None:
+ module.fail_json(msg="incident_key is required for "
+ "acknowledge or resolve events")
+
+ out, changed = check(module, name, state, service_id,
+ integration_key, api_key, incident_key)
+
+ if not module.check_mode and changed is True:
+ out = send_event(module, integration_key, event_type, desc,
+ incident_key, client, client_url)
+
+ module.exit_json(result=out, changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/pagerduty_change.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/pagerduty_change.py
new file mode 100644
index 00000000..358a6961
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/pagerduty_change.py
@@ -0,0 +1,192 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2020, Adam Vaughan (@adamvaughan) avaughan@pagerduty.com
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: pagerduty_change
+short_description: Track a code or infrastructure change as a PagerDuty change event
+version_added: 1.3.0
+description:
+ - This module will let you create a PagerDuty change event each time the module is run.
+ - This is not an idempotent action and a new change event will be created each time it is run.
+author:
+ - Adam Vaughan (@adamvaughan)
+requirements:
+ - PagerDuty integration key
+options:
+ integration_key:
+ description:
+ - The integration key that identifies the service the change was made to.
+ This can be found by adding an integration to a service in PagerDuty.
+ required: true
+ type: str
+ summary:
+ description:
+ - A short description of the change that occurred.
+ required: true
+ type: str
+ source:
+ description:
+ - The source of the change event.
+ default: Ansible
+ type: str
+ user:
+ description:
+ - The name of the user or process that triggered this deployment.
+ type: str
+ repo:
+ description:
+ - The URL of the project repository.
+ required: false
+ type: str
+ revision:
+ description:
+ - An identifier of the revision being deployed, typically a number or SHA from a version control system.
+ required: false
+ type: str
+ environment:
+ description:
+ - The environment name, typically C(production), C(staging), etc.
+ required: false
+ type: str
+ link_url:
+ description:
+ - A URL where more information about the deployment can be obtained.
+ required: false
+ type: str
+ link_text:
+ description:
+ - Descriptive text for a URL where more information about the deployment can be obtained.
+ required: false
+ type: str
+ url:
+ description:
+ - URL to submit the change event to.
+ required: false
+ default: https://events.pagerduty.com/v2/change/enqueue
+ type: str
+ validate_certs:
+ description:
+ - If C(no), SSL certificates for the target URL will not be validated.
+ This should only be used on personally controlled sites using self-signed certificates.
+ required: false
+ default: yes
+ type: bool
+notes:
+ - Supports C(check_mode). Note that check mode simply does nothing except returning C(changed=true) in case the I(url) seems to be correct.
+'''
+
+EXAMPLES = '''
+- name: Track the deployment as a PagerDuty change event
+ community.general.pagerduty_change:
+ integration_key: abc123abc123abc123abc123abc123ab
+ summary: The application was deployed
+
+- name: Track the deployment as a PagerDuty change event with more details
+ community.general.pagerduty_change:
+ integration_key: abc123abc123abc123abc123abc123ab
+ summary: The application was deployed
+ source: Ansible Deploy
+ user: ansible
+ repo: github.com/ansible/ansible
+ revision: '4.2'
+ environment: production
+ link_url: https://github.com/ansible-collections/community.general/pull/1269
+ link_text: View changes on GitHub
+'''
+
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.basic import AnsibleModule
+from datetime import datetime
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ integration_key=dict(required=True, type='str', no_log=True),
+ summary=dict(required=True, type='str'),
+ source=dict(required=False, default='Ansible', type='str'),
+ user=dict(required=False, type='str'),
+ repo=dict(required=False, type='str'),
+ revision=dict(required=False, type='str'),
+ environment=dict(required=False, type='str'),
+ link_url=dict(required=False, type='str'),
+ link_text=dict(required=False, type='str'),
+ url=dict(required=False,
+ default='https://events.pagerduty.com/v2/change/enqueue', type='str'),
+ validate_certs=dict(default=True, type='bool')
+ ),
+ supports_check_mode=True
+ )
+
+ # API documented at https://developer.pagerduty.com/docs/events-api-v2/send-change-events/
+
+ url = module.params['url']
+ headers = {'Content-Type': 'application/json'}
+
+ if module.check_mode:
+ _response, info = fetch_url(
+ module, url, headers=headers, method='POST')
+
+ if info['status'] == 400:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(
+ msg='Checking the PagerDuty change event API returned an unexpected response: %d' % (info['status']))
+
+ custom_details = {}
+
+ if module.params['user']:
+ custom_details['user'] = module.params['user']
+
+ if module.params['repo']:
+ custom_details['repo'] = module.params['repo']
+
+ if module.params['revision']:
+ custom_details['revision'] = module.params['revision']
+
+ if module.params['environment']:
+ custom_details['environment'] = module.params['environment']
+
+ now = datetime.utcnow()
+ timestamp = now.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
+
+ payload = {
+ 'summary': module.params['summary'],
+ 'source': module.params['source'],
+ 'timestamp': timestamp,
+ 'custom_details': custom_details
+ }
+
+ event = {
+ 'routing_key': module.params['integration_key'],
+ 'payload': payload
+ }
+
+ if module.params['link_url']:
+ link = {
+ 'href': module.params['link_url']
+ }
+
+ if module.params['link_text']:
+ link['text'] = module.params['link_text']
+
+ event['links'] = [link]
+
+ _response, info = fetch_url(
+ module, url, data=module.jsonify(event), headers=headers, method='POST')
+
+ if info['status'] == 202:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(
+ msg='Creating PagerDuty change event failed with %d' % (info['status']))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/pagerduty_user.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/pagerduty_user.py
new file mode 100644
index 00000000..4b20a321
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/pagerduty_user.py
@@ -0,0 +1,263 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Zainab Alsaffar <Zainab.Alsaffar@mail.rit.edu>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: pagerduty_user
+short_description: Manage a user account on PagerDuty
+description:
+ - This module manages the creation/removal of a user account on PagerDuty.
+version_added: '1.3.0'
+author: Zainab Alsaffar (@zanssa)
+requirements:
+ - pdpyras python module = 4.1.1
+ - PagerDuty API Access
+options:
+ access_token:
+ description:
+ - An API access token to authenticate with the PagerDuty REST API.
+ required: true
+ type: str
+ pd_user:
+ description:
+ - Name of the user in PagerDuty.
+ required: true
+ type: str
+ pd_email:
+ description:
+ - The user's email address.
+ - I(pd_email) is the unique identifier used and cannot be updated using this module.
+ required: true
+ type: str
+ pd_role:
+ description:
+ - The user's role.
+ choices: ['global_admin', 'manager', 'responder', 'observer', 'stakeholder', 'limited_stakeholder', 'restricted_access']
+ default: 'responder'
+ type: str
+ state:
+ description:
+ - State of the user.
+ - On C(present), it creates a user if the user doesn't exist.
+ - On C(absent), it removes a user if the account exists.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+ pd_teams:
+ description:
+ - The teams to which the user belongs.
+ - Required if I(state=present).
+ type: list
+ elements: str
+notes:
+ - Supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+- name: Create a user account on PagerDuty
+ community.general.pagerduty_user:
+ access_token: 'Your_Access_token'
+ pd_user: user_full_name
+ pd_email: user_email
+ pd_role: user_pd_role
+ pd_teams: user_pd_teams
+ state: "present"
+
+- name: Remove a user account from PagerDuty
+ community.general.pagerduty_user:
+ access_token: 'Your_Access_token'
+ pd_user: user_full_name
+ pd_email: user_email
+ state: "absent"
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+import traceback
+from os import path
+
+try:
+ from pdpyras import APISession
+ HAS_PD_PY = True
+except ImportError:
+ HAS_PD_PY = False
+ PD_IMPORT_ERR = traceback.format_exc()
+
+try:
+ from pdpyras import PDClientError
+ HAS_PD_CLIENT_ERR = True
+except ImportError:
+ HAS_PD_CLIENT_ERR = False
+ PD_CLIENT_ERR_IMPORT_ERR = traceback.format_exc()
+
+
+class PagerDutyUser(object):
+ def __init__(self, module, session):
+ self._module = module
+ self._apisession = session
+
+ # check if the user exists
+ def does_user_exist(self, pd_email):
+ for user in self._apisession.iter_all('users'):
+ if user['email'] == pd_email:
+ return user['id']
+
+ # create a user account on PD
+ def add_pd_user(self, pd_name, pd_email, pd_role):
+ try:
+ user = self._apisession.persist('users', 'email', {
+ "name": pd_name,
+ "email": pd_email,
+ "type": "user",
+ "role": pd_role,
+ })
+ return user
+
+ except PDClientError as e:
+ if e.response.status_code == 400:
+ self._module.fail_json(
+ msg="Failed to add %s due to invalid argument" % (pd_name))
+ if e.response.status_code == 401:
+ self._module.fail_json(msg="Failed to add %s due to invalid API key" % (pd_name))
+ if e.response.status_code == 402:
+ self._module.fail_json(
+ msg="Failed to add %s due to inability to perform the action within the API token" % (pd_name))
+ if e.response.status_code == 403:
+ self._module.fail_json(
+ msg="Failed to add %s due to inability to review the requested resource within the API token" % (pd_name))
+ if e.response.status_code == 429:
+ self._module.fail_json(
+ msg="Failed to add %s due to reaching the limit of making requests" % (pd_name))
+
+ # delete a user account from PD
+ def delete_user(self, pd_user_id, pd_name):
+ try:
+ user_path = path.join('/users/', pd_user_id)
+ self._apisession.rdelete(user_path)
+
+ except PDClientError as e:
+ if e.response.status_code == 404:
+ self._module.fail_json(
+ msg="Failed to remove %s as user was not found" % (pd_name))
+ if e.response.status_code == 403:
+ self._module.fail_json(
+ msg="Failed to remove %s due to inability to review the requested resource within the API token" % (pd_name))
+ if e.response.status_code == 401:
+ # print out the list of incidents
+ pd_incidents = self.get_incidents_assigned_to_user(pd_user_id)
+ self._module.fail_json(msg="Failed to remove %s as user has assigned incidents %s" % (pd_name, pd_incidents))
+ if e.response.status_code == 429:
+ self._module.fail_json(
+ msg="Failed to remove %s due to reaching the limit of making requests" % (pd_name))
+
+ # get incidents assigned to a user
+ def get_incidents_assigned_to_user(self, pd_user_id):
+ incident_info = {}
+ incidents = self._apisession.list_all('incidents', params={'user_ids[]': [pd_user_id]})
+
+ for incident in incidents:
+ incident_info = {
+ 'title': incident['title'],
+ 'key': incident['incident_key'],
+ 'status': incident['status']
+ }
+ return incident_info
+
+ # add a user to a team/teams
+ def add_user_to_teams(self, pd_user_id, pd_teams, pd_role):
+ updated_team = None
+ for team in pd_teams:
+ team_info = self._apisession.find('teams', team, attribute='name')
+ if team_info is not None:
+ try:
+ updated_team = self._apisession.rput('/teams/' + team_info['id'] + '/users/' + pd_user_id, json={
+ 'role': pd_role
+ })
+ except PDClientError:
+ updated_team = None
+ return updated_team
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ access_token=dict(type='str', required=True, no_log=True),
+ pd_user=dict(type='str', required=True),
+ pd_email=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ pd_role=dict(type='str', default='responder',
+ choices=['global_admin', 'manager', 'responder', 'observer', 'stakeholder', 'limited_stakeholder', 'restricted_access']),
+ pd_teams=dict(type='list', elements='str', required=False)),
+ required_if=[['state', 'present', ['pd_teams']], ],
+ supports_check_mode=True,
+ )
+
+ if not HAS_PD_PY:
+ module.fail_json(msg=missing_required_lib('pdpyras', url='https://github.com/PagerDuty/pdpyras'), exception=PD_IMPORT_ERR)
+
+ if not HAS_PD_CLIENT_ERR:
+ module.fail_json(msg=missing_required_lib('PDClientError', url='https://github.com/PagerDuty/pdpyras'), exception=PD_CLIENT_ERR_IMPORT_ERR)
+
+ access_token = module.params['access_token']
+ pd_user = module.params['pd_user']
+ pd_email = module.params['pd_email']
+ state = module.params['state']
+ pd_role = module.params['pd_role']
+ pd_teams = module.params['pd_teams']
+
+ if pd_role:
+ pd_role_gui_value = {
+ 'global_admin': 'admin',
+ 'manager': 'user',
+ 'responder': 'limited_user',
+ 'observer': 'observer',
+ 'stakeholder': 'read_only_user',
+ 'limited_stakeholder': 'read_only_limited_user',
+ 'restricted_access': 'restricted_access'
+ }
+ pd_role = pd_role_gui_value[pd_role]
+
+ # authenticate with PD API
+ try:
+ session = APISession(access_token)
+ except PDClientError as e:
+ module.fail_json(msg="Failed to authenticate with PagerDuty: %s" % e)
+
+ user = PagerDutyUser(module, session)
+
+ user_exists = user.does_user_exist(pd_email)
+
+ if user_exists:
+ if state == "absent":
+ # remove user
+ if not module.check_mode:
+ user.delete_user(user_exists, pd_user)
+ module.exit_json(changed=True, result="Successfully deleted user %s" % pd_user)
+ else:
+ module.exit_json(changed=False, result="User %s already exists." % pd_user)
+
+ # in case that the user does not exist
+ else:
+ if state == "absent":
+ module.exit_json(changed=False, result="User %s was not found." % pd_user)
+
+ else:
+ # add user, adds user with the default notification rule and contact info (email)
+ if not module.check_mode:
+ user.add_pd_user(pd_user, pd_email, pd_role)
+ # get user's id
+ pd_user_id = user.does_user_exist(pd_email)
+ # add a user to the team/s
+ user.add_user_to_teams(pd_user_id, pd_teams, pd_role)
+ module.exit_json(changed=True, result="Successfully created & added user %s to team %s" % (pd_user, pd_teams))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/pingdom.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/pingdom.py
new file mode 100644
index 00000000..23ed2545
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/pingdom.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: pingdom
+short_description: Pause/unpause Pingdom alerts
+description:
+ - This module will let you pause/unpause Pingdom alerts
+author:
+ - "Dylan Silva (@thaumos)"
+ - "Justin Johns (!UNKNOWN)"
+requirements:
+ - "This pingdom python library: https://github.com/mbabineau/pingdom-python"
+options:
+ state:
+ type: str
+ description:
+ - Define whether or not the check should be running or paused.
+ required: true
+ choices: [ "running", "paused", "started", "stopped" ]
+ checkid:
+ type: str
+ description:
+ - Pingdom ID of the check.
+ required: true
+ uid:
+ type: str
+ description:
+ - Pingdom user ID.
+ required: true
+ passwd:
+ type: str
+ description:
+ - Pingdom user password.
+ required: true
+ key:
+ type: str
+ description:
+ - Pingdom API key.
+ required: true
+notes:
+ - This module does not yet have support to add/remove checks.
+'''
+
+EXAMPLES = '''
+- name: Pause the check with the ID of 12345
+ community.general.pingdom:
+ uid: example@example.com
+ passwd: password123
+ key: apipassword123
+ checkid: 12345
+ state: paused
+
+- name: Unpause the check with the ID of 12345
+ community.general.pingdom:
+ uid: example@example.com
+ passwd: password123
+ key: apipassword123
+ checkid: 12345
+ state: running
+'''
+
+import traceback
+
+PINGDOM_IMP_ERR = None
+try:
+ import pingdom
+ HAS_PINGDOM = True
+except Exception:
+ PINGDOM_IMP_ERR = traceback.format_exc()
+ HAS_PINGDOM = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+def pause(checkid, uid, passwd, key):
+
+ c = pingdom.PingdomConnection(uid, passwd, key)
+ c.modify_check(checkid, paused=True)
+ check = c.get_check(checkid)
+ name = check.name
+ result = check.status
+ # if result != "paused": # api output buggy - accept raw exception for now
+ # return (True, name, result)
+ return (False, name, result)
+
+
+def unpause(checkid, uid, passwd, key):
+
+ c = pingdom.PingdomConnection(uid, passwd, key)
+ c.modify_check(checkid, paused=False)
+ check = c.get_check(checkid)
+ name = check.name
+ result = check.status
+ # if result != "up": # api output buggy - accept raw exception for now
+ # return (True, name, result)
+ return (False, name, result)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(required=True, choices=['running', 'paused', 'started', 'stopped']),
+ checkid=dict(required=True),
+ uid=dict(required=True),
+ passwd=dict(required=True, no_log=True),
+ key=dict(required=True, no_log=True),
+ )
+ )
+
+ if not HAS_PINGDOM:
+ module.fail_json(msg=missing_required_lib("pingdom"), exception=PINGDOM_IMP_ERR)
+
+ checkid = module.params['checkid']
+ state = module.params['state']
+ uid = module.params['uid']
+ passwd = module.params['passwd']
+ key = module.params['key']
+
+ if (state == "paused" or state == "stopped"):
+ (rc, name, result) = pause(checkid, uid, passwd, key)
+
+ if (state == "running" or state == "started"):
+ (rc, name, result) = unpause(checkid, uid, passwd, key)
+
+ if rc != 0:
+ module.fail_json(checkid=checkid, name=name, status=result)
+
+ module.exit_json(checkid=checkid, name=name, status=result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/rollbar_deployment.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/rollbar_deployment.py
new file mode 100644
index 00000000..161361b7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/rollbar_deployment.py
@@ -0,0 +1,143 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014, Max Riveiro, <kavu13@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rollbar_deployment
+author: "Max Riveiro (@kavu)"
+short_description: Notify Rollbar about app deployments
+description:
+ - Notify Rollbar about app deployments
+ (see https://rollbar.com/docs/deploys_other/)
+options:
+ token:
+ type: str
+ description:
+ - Your project access token.
+ required: true
+ environment:
+ type: str
+ description:
+ - Name of the environment being deployed, e.g. 'production'.
+ required: true
+ revision:
+ type: str
+ description:
+ - Revision number/sha being deployed.
+ required: true
+ user:
+ type: str
+ description:
+ - User who deployed.
+ required: false
+ rollbar_user:
+ type: str
+ description:
+ - Rollbar username of the user who deployed.
+ required: false
+ comment:
+ type: str
+ description:
+ - Deploy comment (e.g. what is being deployed).
+ required: false
+ url:
+ type: str
+ description:
+ - Optional URL to submit the notification to.
+ required: false
+ default: 'https://api.rollbar.com/api/1/deploy/'
+ validate_certs:
+ description:
+ - If C(no), SSL certificates for the target url will not be validated.
+ This should only be used on personally controlled sites using
+ self-signed certificates.
+ required: false
+ default: 'yes'
+ type: bool
+'''
+
+EXAMPLES = '''
+ - name: Rollbar deployment notification
+ community.general.rollbar_deployment:
+ token: AAAAAA
+ environment: staging
+ user: ansible
+ revision: '4.2'
+ rollbar_user: admin
+ comment: Test Deploy
+
+ - name: Notify rollbar about current git revision deployment by current user
+ community.general.rollbar_deployment:
+ token: "{{ rollbar_access_token }}"
+ environment: production
+ revision: "{{ lookup('pipe', 'git rev-parse HEAD') }}"
+ user: "{{ lookup('env', 'USER') }}"
+'''
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import fetch_url
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(required=True, no_log=True),
+ environment=dict(required=True),
+ revision=dict(required=True),
+ user=dict(required=False),
+ rollbar_user=dict(required=False),
+ comment=dict(required=False),
+ url=dict(
+ required=False,
+ default='https://api.rollbar.com/api/1/deploy/'
+ ),
+ validate_certs=dict(default=True, type='bool'),
+ ),
+ supports_check_mode=True
+ )
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ params = dict(
+ access_token=module.params['token'],
+ environment=module.params['environment'],
+ revision=module.params['revision']
+ )
+
+ if module.params['user']:
+ params['local_username'] = module.params['user']
+
+ if module.params['rollbar_user']:
+ params['rollbar_username'] = module.params['rollbar_user']
+
+ if module.params['comment']:
+ params['comment'] = module.params['comment']
+
+ url = module.params.get('url')
+
+ try:
+ data = urlencode(params)
+ response, info = fetch_url(module, url, data=data, method='POST')
+ except Exception as e:
+ module.fail_json(msg='Unable to notify Rollbar: %s' % to_native(e), exception=traceback.format_exc())
+ else:
+ if info['status'] == 200:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg='HTTP result code: %d connecting to %s' % (info['status'], url))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_check.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_check.py
new file mode 100644
index 00000000..9ebe2765
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_check.py
@@ -0,0 +1,370 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Anders Ingemann <aim@secoya.dk>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: sensu_check
+short_description: Manage Sensu checks
+description:
+ - Manage the checks that should be run on a machine by I(Sensu).
+ - Most options do not have a default and will not be added to the check definition unless specified.
+ - All defaults except I(path), I(state), I(backup) and I(metric) are not managed by this module,
+ - they are simply specified for your convenience.
+options:
+ name:
+ type: str
+ description:
+ - The name of the check
+ - This is the key that is used to determine whether a check exists
+ required: true
+ state:
+ type: str
+ description:
+ - Whether the check should be present or not
+ choices: [ 'present', 'absent' ]
+ default: present
+ path:
+ type: str
+ description:
+ - Path to the json file of the check to be added/removed.
+ - Will be created if it does not exist (unless I(state=absent)).
+ - The parent folders need to exist when I(state=present), otherwise an error will be thrown
+ default: /etc/sensu/conf.d/checks.json
+ backup:
+ description:
+ - Create a backup file (if yes), including the timestamp information so
+ - you can get the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: 'no'
+ command:
+ type: str
+ description:
+ - Path to the sensu check to run (not required when I(state=absent))
+ handlers:
+ type: list
+ description:
+ - List of handlers to notify when the check fails
+ default: []
+ subscribers:
+ type: list
+ description:
+ - List of subscribers/channels this check should run for
+ - See sensu_subscribers to subscribe a machine to a channel
+ default: []
+ interval:
+ type: int
+ description:
+ - Check interval in seconds
+ timeout:
+ type: int
+ description:
+ - Timeout for the check
+ - If not specified, it defaults to 10.
+ ttl:
+ type: int
+ description:
+ - Time to live in seconds until the check is considered stale
+ handle:
+ description:
+ - Whether the check should be handled or not
+ - Default is C(false).
+ type: bool
+ subdue_begin:
+ type: str
+ description:
+ - When to disable handling of check failures
+ subdue_end:
+ type: str
+ description:
+ - When to enable handling of check failures
+ dependencies:
+ type: list
+ description:
+ - Other checks this check depends on, if dependencies fail,
+ - handling of this check will be disabled
+ default: []
+ metric:
+ description:
+ - Whether the check is a metric
+ type: bool
+ default: 'no'
+ standalone:
+ description:
+ - Whether the check should be scheduled by the sensu client or server
+ - This option obviates the need for specifying the I(subscribers) option
+ - Default is C(false).
+ type: bool
+ publish:
+ description:
+ - Whether the check should be scheduled at all.
+ - You can still issue it via the sensu api
+ - Default is C(false).
+ type: bool
+ occurrences:
+ type: int
+ description:
+ - Number of event occurrences before the handler should take action
+ - If not specified, defaults to 1.
+ refresh:
+ type: int
+ description:
+ - Number of seconds handlers should wait before taking second action
+ aggregate:
+ description:
+ - Classifies the check as an aggregate check,
+ - making it available via the aggregate API
+ - Default is C(false).
+ type: bool
+ low_flap_threshold:
+ type: int
+ description:
+ - The low threshold for flap detection
+ high_flap_threshold:
+ type: int
+ description:
+ - The high threshold for flap detection
+ custom:
+ type: dict
+ description:
+ - A hash/dictionary of custom parameters for mixing to the configuration.
+ - You can't rewrite others module parameters using this
+ default: {}
+ source:
+ type: str
+ description:
+ - The check source, used to create a JIT Sensu client for an external resource (e.g. a network switch).
+author: "Anders Ingemann (@andsens)"
+'''
+
+EXAMPLES = '''
+# Fetch metrics about the CPU load every 60 seconds,
+# the sensu server has a handler called 'relay' which forwards stats to graphite
+- name: Get cpu metrics
+ community.general.sensu_check:
+ name: cpu_load
+ command: /etc/sensu/plugins/system/cpu-mpstat-metrics.rb
+ metric: yes
+ handlers: relay
+ subscribers: common
+ interval: 60
+
+# Check whether nginx is running
+- name: Check nginx process
+ community.general.sensu_check:
+ name: nginx_running
+ command: /etc/sensu/plugins/processes/check-procs.rb -f /var/run/nginx.pid
+ handlers: default
+ subscribers: nginx
+ interval: 60
+
+# Stop monitoring the disk capacity.
+# Note that the check will still show up in the sensu dashboard,
+# to remove it completely you need to issue a DELETE request to the sensu api.
+- name: Check disk
+ community.general.sensu_check:
+ name: check_disk_capacity
+ state: absent
+'''
+
+import json
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def sensu_check(module, path, name, state='present', backup=False):
+ changed = False
+ reasons = []
+
+ stream = None
+ try:
+ try:
+ stream = open(path, 'r')
+ config = json.load(stream)
+ except IOError as e:
+ if e.errno == 2: # File not found, non-fatal
+ if state == 'absent':
+ reasons.append('file did not exist and state is `absent\'')
+ return changed, reasons
+ config = {}
+ else:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except ValueError:
+ msg = '{path} contains invalid JSON'.format(path=path)
+ module.fail_json(msg=msg)
+ finally:
+ if stream:
+ stream.close()
+
+ if 'checks' not in config:
+ if state == 'absent':
+ reasons.append('`checks\' section did not exist and state is `absent\'')
+ return changed, reasons
+ config['checks'] = {}
+ changed = True
+ reasons.append('`checks\' section did not exist')
+
+ if state == 'absent':
+ if name in config['checks']:
+ del config['checks'][name]
+ changed = True
+ reasons.append('check was present and state is `absent\'')
+
+ if state == 'present':
+ if name not in config['checks']:
+ check = {}
+ config['checks'][name] = check
+ changed = True
+ reasons.append('check was absent and state is `present\'')
+ else:
+ check = config['checks'][name]
+ simple_opts = ['command',
+ 'handlers',
+ 'subscribers',
+ 'interval',
+ 'timeout',
+ 'ttl',
+ 'handle',
+ 'dependencies',
+ 'standalone',
+ 'publish',
+ 'occurrences',
+ 'refresh',
+ 'aggregate',
+ 'low_flap_threshold',
+ 'high_flap_threshold',
+ 'source',
+ ]
+ for opt in simple_opts:
+ if module.params[opt] is not None:
+ if opt not in check or check[opt] != module.params[opt]:
+ check[opt] = module.params[opt]
+ changed = True
+ reasons.append('`{opt}\' did not exist or was different'.format(opt=opt))
+ else:
+ if opt in check:
+ del check[opt]
+ changed = True
+ reasons.append('`{opt}\' was removed'.format(opt=opt))
+
+ if module.params['custom']:
+ # Convert to json
+ custom_params = module.params['custom']
+ overwrited_fields = set(custom_params.keys()) & set(simple_opts + ['type', 'subdue', 'subdue_begin', 'subdue_end'])
+ if overwrited_fields:
+ msg = 'You can\'t overwriting standard module parameters via "custom". You are trying overwrite: {opt}'.format(opt=list(overwrited_fields))
+ module.fail_json(msg=msg)
+
+ for k, v in custom_params.items():
+ if k in config['checks'][name]:
+ if not config['checks'][name][k] == v:
+ changed = True
+ reasons.append('`custom param {opt}\' was changed'.format(opt=k))
+ else:
+ changed = True
+ reasons.append('`custom param {opt}\' was added'.format(opt=k))
+ check[k] = v
+ simple_opts += custom_params.keys()
+
+ # Remove obsolete custom params
+ for opt in set(config['checks'][name].keys()) - set(simple_opts + ['type', 'subdue', 'subdue_begin', 'subdue_end']):
+ changed = True
+ reasons.append('`custom param {opt}\' was deleted'.format(opt=opt))
+ del check[opt]
+
+ if module.params['metric']:
+ if 'type' not in check or check['type'] != 'metric':
+ check['type'] = 'metric'
+ changed = True
+ reasons.append('`type\' was not defined or not `metric\'')
+ if not module.params['metric'] and 'type' in check:
+ del check['type']
+ changed = True
+ reasons.append('`type\' was defined')
+
+ if module.params['subdue_begin'] is not None and module.params['subdue_end'] is not None:
+ subdue = {'begin': module.params['subdue_begin'],
+ 'end': module.params['subdue_end'],
+ }
+ if 'subdue' not in check or check['subdue'] != subdue:
+ check['subdue'] = subdue
+ changed = True
+ reasons.append('`subdue\' did not exist or was different')
+ else:
+ if 'subdue' in check:
+ del check['subdue']
+ changed = True
+ reasons.append('`subdue\' was removed')
+
+ if changed and not module.check_mode:
+ if backup:
+ module.backup_local(path)
+ try:
+ try:
+ stream = open(path, 'w')
+ stream.write(json.dumps(config, indent=2) + '\n')
+ except IOError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ finally:
+ if stream:
+ stream.close()
+
+ return changed, reasons
+
+
+def main():
+
+ arg_spec = {'name': {'type': 'str', 'required': True},
+ 'path': {'type': 'str', 'default': '/etc/sensu/conf.d/checks.json'},
+ 'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']},
+ 'backup': {'type': 'bool', 'default': 'no'},
+ 'command': {'type': 'str'},
+ 'handlers': {'type': 'list'},
+ 'subscribers': {'type': 'list'},
+ 'interval': {'type': 'int'},
+ 'timeout': {'type': 'int'},
+ 'ttl': {'type': 'int'},
+ 'handle': {'type': 'bool'},
+ 'subdue_begin': {'type': 'str'},
+ 'subdue_end': {'type': 'str'},
+ 'dependencies': {'type': 'list'},
+ 'metric': {'type': 'bool', 'default': 'no'},
+ 'standalone': {'type': 'bool'},
+ 'publish': {'type': 'bool'},
+ 'occurrences': {'type': 'int'},
+ 'refresh': {'type': 'int'},
+ 'aggregate': {'type': 'bool'},
+ 'low_flap_threshold': {'type': 'int'},
+ 'high_flap_threshold': {'type': 'int'},
+ 'custom': {'type': 'dict'},
+ 'source': {'type': 'str'},
+ }
+
+ required_together = [['subdue_begin', 'subdue_end']]
+
+ module = AnsibleModule(argument_spec=arg_spec,
+ required_together=required_together,
+ supports_check_mode=True)
+ if module.params['state'] != 'absent' and module.params['command'] is None:
+ module.fail_json(msg="missing required arguments: %s" % ",".join(['command']))
+
+ path = module.params['path']
+ name = module.params['name']
+ state = module.params['state']
+ backup = module.params['backup']
+
+ changed, reasons = sensu_check(module, path, name, state, backup)
+
+ module.exit_json(path=path, changed=changed, msg='OK', name=name, reasons=reasons)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_client.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_client.py
new file mode 100644
index 00000000..35444f60
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_client.py
@@ -0,0 +1,258 @@
+#!/usr/bin/python
+
+# (c) 2017, Red Hat Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: sensu_client
+author: "David Moreau Simard (@dmsimard)"
+short_description: Manages Sensu client configuration
+description:
+ - Manages Sensu client configuration.
+ - 'For more information, refer to the Sensu documentation: U(https://sensuapp.org/docs/latest/reference/clients.html)'
+options:
+ state:
+ type: str
+ description:
+ - Whether the client should be present or not
+ choices: [ 'present', 'absent' ]
+ default: present
+ name:
+ type: str
+ description:
+ - A unique name for the client. The name cannot contain special characters or spaces.
+ - If not specified, it defaults to the system hostname as determined by Ruby Socket.gethostname (provided by Sensu).
+ address:
+ type: str
+ description:
+ - An address to help identify and reach the client. This is only informational, usually an IP address or hostname.
+ - If not specified it defaults to non-loopback IPv4 address as determined by Ruby Socket.ip_address_list (provided by Sensu).
+ subscriptions:
+ type: list
+ description:
+ - An array of client subscriptions, a list of roles and/or responsibilities assigned to the system (e.g. webserver).
+ - These subscriptions determine which monitoring checks are executed by the client, as check requests are sent to subscriptions.
+ - The subscriptions array items must be strings.
+ safe_mode:
+ description:
+ - If safe mode is enabled for the client. Safe mode requires local check definitions in order to accept a check request and execute the check.
+ type: bool
+ default: 'no'
+ redact:
+ type: list
+ description:
+ - Client definition attributes to redact (values) when logging and sending client keepalives.
+ socket:
+ type: dict
+ description:
+ - The socket definition scope, used to configure the Sensu client socket.
+ keepalives:
+ description:
+ - If Sensu should monitor keepalives for this client.
+ type: bool
+ default: 'yes'
+ keepalive:
+ type: dict
+ description:
+ - The keepalive definition scope, used to configure Sensu client keepalives behavior (e.g. keepalive thresholds, etc).
+ registration:
+ type: dict
+ description:
+ - The registration definition scope, used to configure Sensu registration event handlers.
+ deregister:
+ description:
+ - If a deregistration event should be created upon Sensu client process stop.
+ - Default is C(false).
+ type: bool
+ deregistration:
+ type: dict
+ description:
+ - The deregistration definition scope, used to configure automated Sensu client de-registration.
+ ec2:
+ type: dict
+ description:
+ - The ec2 definition scope, used to configure the Sensu Enterprise AWS EC2 integration (Sensu Enterprise users only).
+ chef:
+ type: dict
+ description:
+ - The chef definition scope, used to configure the Sensu Enterprise Chef integration (Sensu Enterprise users only).
+ puppet:
+ type: dict
+ description:
+ - The puppet definition scope, used to configure the Sensu Enterprise Puppet integration (Sensu Enterprise users only).
+ servicenow:
+ type: dict
+ description:
+ - The servicenow definition scope, used to configure the Sensu Enterprise ServiceNow integration (Sensu Enterprise users only).
+notes:
+ - Check mode is supported
+'''
+
+EXAMPLES = '''
+# Minimum possible configuration
+- name: Configure Sensu client
+ community.general.sensu_client:
+ subscriptions:
+ - default
+
+# With customization
+- name: Configure Sensu client
+ community.general.sensu_client:
+ name: "{{ ansible_fqdn }}"
+ address: "{{ ansible_default_ipv4['address'] }}"
+ subscriptions:
+ - default
+ - webserver
+ redact:
+ - password
+ socket:
+ bind: 127.0.0.1
+ port: 3030
+ keepalive:
+ thresholds:
+ warning: 180
+ critical: 300
+ handlers:
+ - email
+ custom:
+ - broadcast: irc
+ occurrences: 3
+ register: client
+ notify:
+ - Restart sensu-client
+
+- name: Secure Sensu client configuration file
+ ansible.builtin.file:
+ path: "{{ client['file'] }}"
+ owner: "sensu"
+ group: "sensu"
+ mode: "0600"
+
+- name: Delete the Sensu client configuration
+ community.general.sensu_client:
+ state: "absent"
+'''
+
+RETURN = '''
+config:
+ description: Effective client configuration, when state is present
+ returned: success
+ type: dict
+ sample: {'name': 'client', 'subscriptions': ['default']}
+file:
+ description: Path to the client configuration file
+ returned: success
+ type: str
+ sample: "/etc/sensu/conf.d/client.json"
+'''
+
+import json
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ supports_check_mode=True,
+ argument_spec=dict(
+ state=dict(type='str', required=False, choices=['present', 'absent'], default='present'),
+ name=dict(type='str', required=False),
+ address=dict(type='str', required=False),
+ subscriptions=dict(type='list', required=False),
+ safe_mode=dict(type='bool', required=False, default=False),
+ redact=dict(type='list', required=False),
+ socket=dict(type='dict', required=False),
+ keepalives=dict(type='bool', required=False, default=True),
+ keepalive=dict(type='dict', required=False),
+ registration=dict(type='dict', required=False),
+ deregister=dict(type='bool', required=False),
+ deregistration=dict(type='dict', required=False),
+ ec2=dict(type='dict', required=False),
+ chef=dict(type='dict', required=False),
+ puppet=dict(type='dict', required=False),
+ servicenow=dict(type='dict', required=False)
+ ),
+ required_if=[
+ ['state', 'present', ['subscriptions']]
+ ]
+ )
+
+ state = module.params['state']
+ path = "/etc/sensu/conf.d/client.json"
+
+ if state == 'absent':
+ if os.path.exists(path):
+ if module.check_mode:
+ msg = '{path} would have been deleted'.format(path=path)
+ module.exit_json(msg=msg, changed=True)
+ else:
+ try:
+ os.remove(path)
+ msg = '{path} deleted successfully'.format(path=path)
+ module.exit_json(msg=msg, changed=True)
+ except OSError as e:
+ msg = 'Exception when trying to delete {path}: {exception}'
+ module.fail_json(
+ msg=msg.format(path=path, exception=str(e)))
+ else:
+ # Idempotency: it's okay if the file doesn't exist
+ msg = '{path} already does not exist'.format(path=path)
+ module.exit_json(msg=msg)
+
+ # Build client configuration from module arguments
+ config = {'client': {}}
+ args = ['name', 'address', 'subscriptions', 'safe_mode', 'redact',
+ 'socket', 'keepalives', 'keepalive', 'registration', 'deregister',
+ 'deregistration', 'ec2', 'chef', 'puppet', 'servicenow']
+
+ for arg in args:
+ if arg in module.params and module.params[arg] is not None:
+ config['client'][arg] = module.params[arg]
+
+ # Load the current config, if there is one, so we can compare
+ current_config = None
+ try:
+ current_config = json.load(open(path, 'r'))
+ except (IOError, ValueError):
+ # File either doesn't exist or it's invalid JSON
+ pass
+
+ if current_config is not None and current_config == config:
+ # Config is the same, let's not change anything
+ module.exit_json(msg='Client configuration is already up to date',
+ config=config['client'],
+ file=path)
+
+ # Validate that directory exists before trying to write to it
+ if not module.check_mode and not os.path.exists(os.path.dirname(path)):
+ try:
+ os.makedirs(os.path.dirname(path))
+ except OSError as e:
+ module.fail_json(msg='Unable to create {0}: {1}'.format(os.path.dirname(path),
+ str(e)))
+
+ if module.check_mode:
+ module.exit_json(msg='Client configuration would have been updated',
+ changed=True,
+ config=config['client'],
+ file=path)
+
+ try:
+ with open(path, 'w') as client:
+ client.write(json.dumps(config, indent=4))
+ module.exit_json(msg='Client configuration updated',
+ changed=True,
+ config=config['client'],
+ file=path)
+ except (OSError, IOError) as e:
+ module.fail_json(msg='Unable to write file {0}: {1}'.format(path,
+ str(e)))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_handler.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_handler.py
new file mode 100644
index 00000000..53152edc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_handler.py
@@ -0,0 +1,270 @@
+#!/usr/bin/python
+
+# (c) 2017, Red Hat Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: sensu_handler
+author: "David Moreau Simard (@dmsimard)"
+short_description: Manages Sensu handler configuration
+description:
+ - Manages Sensu handler configuration
+ - 'For more information, refer to the Sensu documentation: U(https://sensuapp.org/docs/latest/reference/handlers.html)'
+options:
+ state:
+ type: str
+ description:
+ - Whether the handler should be present or not
+ choices: [ 'present', 'absent' ]
+ default: present
+ name:
+ type: str
+ description:
+ - A unique name for the handler. The name cannot contain special characters or spaces.
+ required: True
+ type:
+ type: str
+ description:
+ - The handler type
+ choices: [ 'pipe', 'tcp', 'udp', 'transport', 'set' ]
+ filter:
+ type: str
+ description:
+ - The Sensu event filter (name) to use when filtering events for the handler.
+ filters:
+ type: list
+ description:
+ - An array of Sensu event filters (names) to use when filtering events for the handler.
+ - Each array item must be a string.
+ severities:
+ type: list
+ description:
+ - An array of check result severities the handler will handle.
+ - 'NOTE: event resolution bypasses this filtering.'
+ - "Example: [ 'warning', 'critical', 'unknown' ]."
+ mutator:
+ type: str
+ description:
+ - The Sensu event mutator (name) to use to mutate event data for the handler.
+ timeout:
+ type: int
+ description:
+ - The handler execution duration timeout in seconds (hard stop).
+ - Only used by pipe and tcp handler types.
+ default: 10
+ handle_silenced:
+ description:
+ - If events matching one or more silence entries should be handled.
+ type: bool
+ default: 'no'
+ handle_flapping:
+ description:
+ - If events in the flapping state should be handled.
+ type: bool
+ default: 'no'
+ command:
+ type: str
+ description:
+ - The handler command to be executed.
+ - The event data is passed to the process via STDIN.
+ - 'NOTE: the command attribute is only required for Pipe handlers (i.e. handlers configured with "type": "pipe").'
+ socket:
+ type: dict
+ description:
+ - The socket definition scope, used to configure the TCP/UDP handler socket.
+ - 'NOTE: the socket attribute is only required for TCP/UDP handlers (i.e. handlers configured with "type": "tcp" or "type": "udp").'
+ pipe:
+ type: dict
+ description:
+ - The pipe definition scope, used to configure the Sensu transport pipe.
+ - 'NOTE: the pipe attribute is only required for Transport handlers (i.e. handlers configured with "type": "transport").'
+ handlers:
+ type: list
+ description:
+ - An array of Sensu event handlers (names) to use for events using the handler set.
+ - Each array item must be a string.
+ - 'NOTE: the handlers attribute is only required for handler sets (i.e. handlers configured with "type": "set").'
+notes:
+ - Check mode is supported
+'''
+
+EXAMPLES = '''
+# Configure a handler that sends event data as STDIN (pipe)
+- name: Configure IRC Sensu handler
+ community.general.sensu_handler:
+ name: "irc_handler"
+ type: "pipe"
+ command: "/usr/local/bin/notify-irc.sh"
+ severities:
+ - "ok"
+ - "critical"
+ - "warning"
+ - "unknown"
+ timeout: 15
+ notify:
+ - Restart sensu-client
+ - Restart sensu-server
+
+# Delete a handler
+- name: Delete IRC Sensu handler
+ community.general.sensu_handler:
+ name: "irc_handler"
+ state: "absent"
+
+# Example of a TCP handler
+- name: Configure TCP Sensu handler
+ community.general.sensu_handler:
+ name: "tcp_handler"
+ type: "tcp"
+ timeout: 30
+ socket:
+ host: "10.0.1.99"
+ port: 4444
+ register: handler
+ notify:
+ - Restart sensu-client
+ - Restart sensu-server
+
+- name: Secure Sensu handler configuration file
+ ansible.builtin.file:
+ path: "{{ handler['file'] }}"
+ owner: "sensu"
+ group: "sensu"
+ mode: "0600"
+'''
+
+RETURN = '''
+config:
+ description: Effective handler configuration, when state is present
+ returned: success
+ type: dict
+ sample: {'name': 'irc', 'type': 'pipe', 'command': '/usr/local/bin/notify-irc.sh'}
+file:
+ description: Path to the handler configuration file
+ returned: success
+ type: str
+ sample: "/etc/sensu/conf.d/handlers/irc.json"
+name:
+ description: Name of the handler
+ returned: success
+ type: str
+ sample: "irc"
+'''
+
+import json
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ supports_check_mode=True,
+ argument_spec=dict(
+ state=dict(type='str', required=False, choices=['present', 'absent'], default='present'),
+ name=dict(type='str', required=True),
+ type=dict(type='str', required=False, choices=['pipe', 'tcp', 'udp', 'transport', 'set']),
+ filter=dict(type='str', required=False),
+ filters=dict(type='list', required=False),
+ severities=dict(type='list', required=False),
+ mutator=dict(type='str', required=False),
+ timeout=dict(type='int', required=False, default=10),
+ handle_silenced=dict(type='bool', required=False, default=False),
+ handle_flapping=dict(type='bool', required=False, default=False),
+ command=dict(type='str', required=False),
+ socket=dict(type='dict', required=False),
+ pipe=dict(type='dict', required=False),
+ handlers=dict(type='list', required=False),
+ ),
+ required_if=[
+ ['state', 'present', ['type']],
+ ['type', 'pipe', ['command']],
+ ['type', 'tcp', ['socket']],
+ ['type', 'udp', ['socket']],
+ ['type', 'transport', ['pipe']],
+ ['type', 'set', ['handlers']]
+ ]
+ )
+
+ state = module.params['state']
+ name = module.params['name']
+ path = '/etc/sensu/conf.d/handlers/{0}.json'.format(name)
+
+ if state == 'absent':
+ if os.path.exists(path):
+ if module.check_mode:
+ msg = '{path} would have been deleted'.format(path=path)
+ module.exit_json(msg=msg, changed=True)
+ else:
+ try:
+ os.remove(path)
+ msg = '{path} deleted successfully'.format(path=path)
+ module.exit_json(msg=msg, changed=True)
+ except OSError as e:
+ msg = 'Exception when trying to delete {path}: {exception}'
+ module.fail_json(
+ msg=msg.format(path=path, exception=str(e)))
+ else:
+ # Idempotency: it's okay if the file doesn't exist
+ msg = '{path} already does not exist'.format(path=path)
+ module.exit_json(msg=msg)
+
+ # Build handler configuration from module arguments
+ config = {'handlers': {name: {}}}
+ args = ['type', 'filter', 'filters', 'severities', 'mutator', 'timeout',
+ 'handle_silenced', 'handle_flapping', 'command', 'socket',
+ 'pipe', 'handlers']
+
+ for arg in args:
+ if arg in module.params and module.params[arg] is not None:
+ config['handlers'][name][arg] = module.params[arg]
+
+ # Load the current config, if there is one, so we can compare
+ current_config = None
+ try:
+ current_config = json.load(open(path, 'r'))
+ except (IOError, ValueError):
+ # File either doesn't exist or it's invalid JSON
+ pass
+
+ if current_config is not None and current_config == config:
+ # Config is the same, let's not change anything
+ module.exit_json(msg='Handler configuration is already up to date',
+ config=config['handlers'][name],
+ file=path,
+ name=name)
+
+ # Validate that directory exists before trying to write to it
+ if not module.check_mode and not os.path.exists(os.path.dirname(path)):
+ try:
+ os.makedirs(os.path.dirname(path))
+ except OSError as e:
+ module.fail_json(msg='Unable to create {0}: {1}'.format(os.path.dirname(path),
+ str(e)))
+
+ if module.check_mode:
+ module.exit_json(msg='Handler configuration would have been updated',
+ changed=True,
+ config=config['handlers'][name],
+ file=path,
+ name=name)
+
+ try:
+ with open(path, 'w') as handler:
+ handler.write(json.dumps(config, indent=4))
+ module.exit_json(msg='Handler configuration updated',
+ changed=True,
+ config=config['handlers'][name],
+ file=path,
+ name=name)
+ except (OSError, IOError) as e:
+ module.fail_json(msg='Unable to write file {0}: {1}'.format(path,
+ str(e)))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_silence.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_silence.py
new file mode 100644
index 00000000..12dc5d20
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_silence.py
@@ -0,0 +1,297 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Steven Bambling <smbambling@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: sensu_silence
+author: Steven Bambling (@smbambling)
+short_description: Manage Sensu silence entries
+description:
+ - Create and clear (delete) a silence entries via the Sensu API
+ for subscriptions and checks.
+options:
+ check:
+ type: str
+ description:
+ - Specifies the check which the silence entry applies to.
+ creator:
+ type: str
+ description:
+ - Specifies the entity responsible for this entry.
+ expire:
+ type: int
+ description:
+ - If specified, the silence entry will be automatically cleared
+ after this number of seconds.
+ expire_on_resolve:
+ description:
+ - If specified as true, the silence entry will be automatically
+ cleared once the condition it is silencing is resolved.
+ type: bool
+ reason:
+ type: str
+ description:
+ - If specified, this free-form string is used to provide context or
+ rationale for the reason this silence entry was created.
+ state:
+ type: str
+ description:
+ - Specifies to create or clear (delete) a silence entry via the Sensu API
+ default: present
+ choices: ['present', 'absent']
+ subscription:
+ type: str
+ description:
+ - Specifies the subscription which the silence entry applies to.
+ - To create a silence entry for a client prepend C(client:) to client name.
+ Example - C(client:server1.example.dev)
+ required: true
+ url:
+ type: str
+ description:
+ - Specifies the URL of the Sensu monitoring host server.
+ required: false
+ default: http://127.0.01:4567
+'''
+
+EXAMPLES = '''
+# Silence ALL checks for a given client
+- name: Silence server1.example.dev
+ community.general.sensu_silence:
+ subscription: client:server1.example.dev
+ creator: "{{ ansible_user_id }}"
+ reason: Performing maintenance
+
+# Silence specific check for a client
+- name: Silence CPU_Usage check for server1.example.dev
+ community.general.sensu_silence:
+ subscription: client:server1.example.dev
+ check: CPU_Usage
+ creator: "{{ ansible_user_id }}"
+ reason: Investigation alert issue
+
+# Silence multiple clients from a dict
+ silence:
+ server1.example.dev:
+ reason: 'Deployment in progress'
+ server2.example.dev:
+ reason: 'Deployment in progress'
+
+- name: Silence several clients from a dict
+ community.general.sensu_silence:
+ subscription: "client:{{ item.key }}"
+ reason: "{{ item.value.reason }}"
+ creator: "{{ ansible_user_id }}"
+ with_dict: "{{ silence }}"
+'''
+
+RETURN = '''
+'''
+
+import json
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def query(module, url, check, subscription):
+ headers = {
+ 'Content-Type': 'application/json',
+ }
+
+ url = url + '/silenced'
+
+ request_data = {
+ 'check': check,
+ 'subscription': subscription,
+ }
+
+ # Remove keys with None value
+ for k, v in dict(request_data).items():
+ if v is None:
+ del request_data[k]
+
+ response, info = fetch_url(
+ module, url, method='GET',
+ headers=headers, data=json.dumps(request_data)
+ )
+
+ if info['status'] == 500:
+ module.fail_json(
+ msg="Failed to query silence %s. Reason: %s" % (subscription, info)
+ )
+
+ try:
+ json_out = json.loads(to_native(response.read()))
+ except Exception:
+ json_out = ""
+
+ return False, json_out, False
+
+
+def clear(module, url, check, subscription):
+ # Test if silence exists before clearing
+ (rc, out, changed) = query(module, url, check, subscription)
+
+ d = dict((i['subscription'], i['check']) for i in out)
+ subscription_exists = subscription in d
+ if check and subscription_exists:
+ exists = (check == d[subscription])
+ else:
+ exists = subscription_exists
+
+ # If check/subscription doesn't exist
+ # exit with changed state of False
+ if not exists:
+ return False, out, changed
+
+ # module.check_mode is inherited from the AnsibleMOdule class
+ if not module.check_mode:
+ headers = {
+ 'Content-Type': 'application/json',
+ }
+
+ url = url + '/silenced/clear'
+
+ request_data = {
+ 'check': check,
+ 'subscription': subscription,
+ }
+
+ # Remove keys with None value
+ for k, v in dict(request_data).items():
+ if v is None:
+ del request_data[k]
+
+ response, info = fetch_url(
+ module, url, method='POST',
+ headers=headers, data=json.dumps(request_data)
+ )
+
+ if info['status'] != 204:
+ module.fail_json(
+ msg="Failed to silence %s. Reason: %s" % (subscription, info)
+ )
+
+ try:
+ json_out = json.loads(to_native(response.read()))
+ except Exception:
+ json_out = ""
+
+ return False, json_out, True
+ return False, out, True
+
+
+def create(
+ module, url, check, creator, expire,
+ expire_on_resolve, reason, subscription):
+ (rc, out, changed) = query(module, url, check, subscription)
+ for i in out:
+ if (i['subscription'] == subscription):
+ if (
+ (check is None or check == i['check']) and
+ (
+ creator == '' or
+ creator == i['creator']) and
+ (
+ reason == '' or
+ reason == i['reason']) and
+ (
+ expire is None or expire == i['expire']) and
+ (
+ expire_on_resolve is None or
+ expire_on_resolve == i['expire_on_resolve']
+ )
+ ):
+ return False, out, False
+
+ # module.check_mode is inherited from the AnsibleMOdule class
+ if not module.check_mode:
+ headers = {
+ 'Content-Type': 'application/json',
+ }
+
+ url = url + '/silenced'
+
+ request_data = {
+ 'check': check,
+ 'creator': creator,
+ 'expire': expire,
+ 'expire_on_resolve': expire_on_resolve,
+ 'reason': reason,
+ 'subscription': subscription,
+ }
+
+ # Remove keys with None value
+ for k, v in dict(request_data).items():
+ if v is None:
+ del request_data[k]
+
+ response, info = fetch_url(
+ module, url, method='POST',
+ headers=headers, data=json.dumps(request_data)
+ )
+
+ if info['status'] != 201:
+ module.fail_json(
+ msg="Failed to silence %s. Reason: %s" %
+ (subscription, info['msg'])
+ )
+
+ try:
+ json_out = json.loads(to_native(response.read()))
+ except Exception:
+ json_out = ""
+
+ return False, json_out, True
+ return False, out, True
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ check=dict(required=False),
+ creator=dict(required=False),
+ expire=dict(type='int', required=False),
+ expire_on_resolve=dict(type='bool', required=False),
+ reason=dict(required=False),
+ state=dict(default='present', choices=['present', 'absent']),
+ subscription=dict(required=True),
+ url=dict(required=False, default='http://127.0.01:4567'),
+ ),
+ supports_check_mode=True
+ )
+
+ url = module.params['url']
+ check = module.params['check']
+ creator = module.params['creator']
+ expire = module.params['expire']
+ expire_on_resolve = module.params['expire_on_resolve']
+ reason = module.params['reason']
+ subscription = module.params['subscription']
+ state = module.params['state']
+
+ if state == 'present':
+ (rc, out, changed) = create(
+ module, url, check, creator,
+ expire, expire_on_resolve, reason, subscription
+ )
+
+ if state == 'absent':
+ (rc, out, changed) = clear(module, url, check, subscription)
+
+ if rc != 0:
+ module.fail_json(msg="failed", result=out)
+ module.exit_json(msg="success", result=out, changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_subscription.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_subscription.py
new file mode 100644
index 00000000..6316254d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/sensu/sensu_subscription.py
@@ -0,0 +1,152 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Anders Ingemann <aim@secoya.dk>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: sensu_subscription
+short_description: Manage Sensu subscriptions
+description:
+ - Manage which I(sensu channels) a machine should subscribe to
+options:
+ name:
+ type: str
+ description:
+ - The name of the channel
+ required: true
+ state:
+ type: str
+ description:
+ - Whether the machine should subscribe or unsubscribe from the channel
+ choices: [ 'present', 'absent' ]
+ required: false
+ default: present
+ path:
+ type: str
+ description:
+ - Path to the subscriptions json file
+ required: false
+ default: /etc/sensu/conf.d/subscriptions.json
+ backup:
+ description:
+ - Create a backup file (if yes), including the timestamp information so you
+ - can get the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ required: false
+ default: no
+requirements: [ ]
+author: Anders Ingemann (@andsens)
+'''
+
+RETURN = '''
+reasons:
+ description: the reasons why the module changed or did not change something
+ returned: success
+ type: list
+ sample: ["channel subscription was absent and state is `present'"]
+'''
+
+EXAMPLES = '''
+# Subscribe to the nginx channel
+- name: Subscribe to nginx checks
+ community.general.sensu_subscription: name=nginx
+
+# Unsubscribe from the common checks channel
+- name: Unsubscribe from common checks
+ community.general.sensu_subscription: name=common state=absent
+'''
+
+import json
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def sensu_subscription(module, path, name, state='present', backup=False):
+ changed = False
+ reasons = []
+
+ try:
+ config = json.load(open(path))
+ except IOError as e:
+ if e.errno == 2: # File not found, non-fatal
+ if state == 'absent':
+ reasons.append('file did not exist and state is `absent\'')
+ return changed, reasons
+ config = {}
+ else:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except ValueError:
+ msg = '{path} contains invalid JSON'.format(path=path)
+ module.fail_json(msg=msg)
+
+ if 'client' not in config:
+ if state == 'absent':
+ reasons.append('`client\' did not exist and state is `absent\'')
+ return changed, reasons
+ config['client'] = {}
+ changed = True
+ reasons.append('`client\' did not exist')
+
+ if 'subscriptions' not in config['client']:
+ if state == 'absent':
+ reasons.append('`client.subscriptions\' did not exist and state is `absent\'')
+ return changed, reasons
+ config['client']['subscriptions'] = []
+ changed = True
+ reasons.append('`client.subscriptions\' did not exist')
+
+ if name not in config['client']['subscriptions']:
+ if state == 'absent':
+ reasons.append('channel subscription was absent')
+ return changed, reasons
+ config['client']['subscriptions'].append(name)
+ changed = True
+ reasons.append('channel subscription was absent and state is `present\'')
+ else:
+ if state == 'absent':
+ config['client']['subscriptions'].remove(name)
+ changed = True
+ reasons.append('channel subscription was present and state is `absent\'')
+
+ if changed and not module.check_mode:
+ if backup:
+ module.backup_local(path)
+ try:
+ open(path, 'w').write(json.dumps(config, indent=2) + '\n')
+ except IOError as e:
+ module.fail_json(msg='Failed to write to file %s: %s' % (path, to_native(e)),
+ exception=traceback.format_exc())
+
+ return changed, reasons
+
+
+def main():
+ arg_spec = {'name': {'type': 'str', 'required': True},
+ 'path': {'type': 'str', 'default': '/etc/sensu/conf.d/subscriptions.json'},
+ 'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']},
+ 'backup': {'type': 'bool', 'default': 'no'},
+ }
+
+ module = AnsibleModule(argument_spec=arg_spec,
+ supports_check_mode=True)
+
+ path = module.params['path']
+ name = module.params['name']
+ state = module.params['state']
+ backup = module.params['backup']
+
+ changed, reasons = sensu_subscription(module, path, name, state, backup)
+
+ module.exit_json(path=path, name=name, changed=changed, msg='OK', reasons=reasons)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/spectrum_device.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/spectrum_device.py
new file mode 100644
index 00000000..77e3b153
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/spectrum_device.py
@@ -0,0 +1,332 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Renato Orgito <orgito@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: spectrum_device
+short_description: Creates/deletes devices in CA Spectrum.
+description:
+ - This module allows you to create and delete devices in CA Spectrum U(https://www.ca.com/us/products/ca-spectrum.html).
+ - Tested on CA Spectrum 9.4.2, 10.1.1 and 10.2.1
+author: "Renato Orgito (@orgito)"
+options:
+ device:
+ type: str
+ aliases: [ host, name ]
+ required: true
+ description:
+ - IP address of the device.
+ - If a hostname is given, it will be resolved to the IP address.
+ community:
+ type: str
+ description:
+ - SNMP community used for device discovery.
+ - Required when C(state=present).
+ required: true
+ landscape:
+ type: str
+ required: true
+ description:
+ - Landscape handle of the SpectroServer to which add or remove the device.
+ state:
+ type: str
+ required: false
+ description:
+ - On C(present) creates the device when it does not exist.
+ - On C(absent) removes the device when it exists.
+ choices: ['present', 'absent']
+ default: 'present'
+ url:
+ type: str
+ aliases: [ oneclick_url ]
+ required: true
+ description:
+ - HTTP, HTTPS URL of the Oneclick server in the form (http|https)://host.domain[:port]
+ url_username:
+ type: str
+ aliases: [ oneclick_user ]
+ required: true
+ description:
+ - Oneclick user name.
+ url_password:
+ type: str
+ aliases: [ oneclick_password ]
+ required: true
+ description:
+ - Oneclick user password.
+ use_proxy:
+ required: false
+ description:
+ - if C(no), it will not use a proxy, even if one is defined in an environment
+ variable on the target hosts.
+ default: 'yes'
+ type: bool
+ validate_certs:
+ required: false
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ default: 'yes'
+ type: bool
+ agentport:
+ type: int
+ required: false
+ description:
+ - UDP port used for SNMP discovery.
+ default: 161
+notes:
+ - The devices will be created inside the I(Universe) container of the specified landscape.
+ - All the operations will be performed only on the specified landscape.
+'''
+
+EXAMPLES = '''
+- name: Add device to CA Spectrum
+ local_action:
+ module: spectrum_device
+ device: '{{ ansible_host }}'
+ community: secret
+ landscape: '0x100000'
+ oneclick_url: http://oneclick.example.com:8080
+ oneclick_user: username
+ oneclick_password: password
+ state: present
+
+
+- name: Remove device from CA Spectrum
+ local_action:
+ module: spectrum_device
+ device: '{{ ansible_host }}'
+ landscape: '{{ landscape_handle }}'
+ oneclick_url: http://oneclick.example.com:8080
+ oneclick_user: username
+ oneclick_password: password
+ use_proxy: no
+ state: absent
+'''
+
+RETURN = '''
+device:
+ description: device data when state = present
+ returned: success
+ type: dict
+ sample: {'model_handle': '0x1007ab', 'landscape': '0x100000', 'address': '10.10.5.1'}
+'''
+
+from socket import gethostbyname, gaierror
+import xml.etree.ElementTree as ET
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def request(resource, xml=None, method=None):
+ headers = {
+ "Content-Type": "application/xml",
+ "Accept": "application/xml"
+ }
+
+ url = module.params['oneclick_url'] + '/spectrum/restful/' + resource
+
+ response, info = fetch_url(module, url, data=xml, method=method, headers=headers, timeout=45)
+
+ if info['status'] == 401:
+ module.fail_json(msg="failed to authenticate to Oneclick server")
+
+ if info['status'] not in (200, 201, 204):
+ module.fail_json(msg=info['msg'])
+
+ return response.read()
+
+
+def post(resource, xml=None):
+ return request(resource, xml=xml, method='POST')
+
+
+def delete(resource):
+ return request(resource, xml=None, method='DELETE')
+
+
+def get_ip():
+ try:
+ device_ip = gethostbyname(module.params.get('device'))
+ except gaierror:
+ module.fail_json(msg="failed to resolve device ip address for '%s'" % module.params.get('device'))
+
+ return device_ip
+
+
+def get_device(device_ip):
+ """Query OneClick for the device using the IP Address"""
+ resource = '/models'
+ landscape_min = "0x%x" % int(module.params.get('landscape'), 16)
+ landscape_max = "0x%x" % (int(module.params.get('landscape'), 16) + 0x100000)
+
+ xml = """<?xml version="1.0" encoding="UTF-8"?>
+ <rs:model-request throttlesize="5"
+ xmlns:rs="http://www.ca.com/spectrum/restful/schema/request"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://www.ca.com/spectrum/restful/schema/request ../../../xsd/Request.xsd">
+ <rs:target-models>
+ <rs:models-search>
+ <rs:search-criteria xmlns="http://www.ca.com/spectrum/restful/schema/filter">
+ <action-models>
+ <filtered-models>
+ <and>
+ <equals>
+ <model-type>SearchManager</model-type>
+ </equals>
+ <greater-than>
+ <attribute id="0x129fa">
+ <value>{mh_min}</value>
+ </attribute>
+ </greater-than>
+ <less-than>
+ <attribute id="0x129fa">
+ <value>{mh_max}</value>
+ </attribute>
+ </less-than>
+ </and>
+ </filtered-models>
+ <action>FIND_DEV_MODELS_BY_IP</action>
+ <attribute id="AttributeID.NETWORK_ADDRESS">
+ <value>{search_ip}</value>
+ </attribute>
+ </action-models>
+ </rs:search-criteria>
+ </rs:models-search>
+ </rs:target-models>
+ <rs:requested-attribute id="0x12d7f" /> <!--Network Address-->
+ </rs:model-request>
+ """.format(search_ip=device_ip, mh_min=landscape_min, mh_max=landscape_max)
+
+ result = post(resource, xml=xml)
+
+ root = ET.fromstring(result)
+
+ if root.get('total-models') == '0':
+ return None
+
+ namespace = dict(ca='http://www.ca.com/spectrum/restful/schema/response')
+
+ # get the first device
+ model = root.find('ca:model-responses', namespace).find('ca:model', namespace)
+
+ if model.get('error'):
+ module.fail_json(msg="error checking device: %s" % model.get('error'))
+
+ # get the attributes
+ model_handle = model.get('mh')
+
+ model_address = model.find('./*[@id="0x12d7f"]').text
+
+ # derive the landscape handler from the model handler of the device
+ model_landscape = "0x%x" % int(int(model_handle, 16) // 0x100000 * 0x100000)
+
+ device = dict(
+ model_handle=model_handle,
+ address=model_address,
+ landscape=model_landscape)
+
+ return device
+
+
+def add_device():
+ device_ip = get_ip()
+ device = get_device(device_ip)
+
+ if device:
+ module.exit_json(changed=False, device=device)
+
+ if module.check_mode:
+ device = dict(
+ model_handle=None,
+ address=device_ip,
+ landscape="0x%x" % int(module.params.get('landscape'), 16))
+ module.exit_json(changed=True, device=device)
+
+ resource = 'model?ipaddress=' + device_ip + '&commstring=' + module.params.get('community')
+ resource += '&landscapeid=' + module.params.get('landscape')
+
+ if module.params.get('agentport', None):
+ resource += '&agentport=' + str(module.params.get('agentport', 161))
+
+ result = post(resource)
+ root = ET.fromstring(result)
+
+ if root.get('error') != 'Success':
+ module.fail_json(msg=root.get('error-message'))
+
+ namespace = dict(ca='http://www.ca.com/spectrum/restful/schema/response')
+ model = root.find('ca:model', namespace)
+
+ model_handle = model.get('mh')
+ model_landscape = "0x%x" % int(int(model_handle, 16) // 0x100000 * 0x100000)
+
+ device = dict(
+ model_handle=model_handle,
+ address=device_ip,
+ landscape=model_landscape,
+ )
+
+ module.exit_json(changed=True, device=device)
+
+
+def remove_device():
+ device_ip = get_ip()
+ device = get_device(device_ip)
+
+ if device is None:
+ module.exit_json(changed=False)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ resource = '/model/' + device['model_handle']
+ result = delete(resource)
+
+ root = ET.fromstring(result)
+
+ namespace = dict(ca='http://www.ca.com/spectrum/restful/schema/response')
+ error = root.find('ca:error', namespace).text
+
+ if error != 'Success':
+ error_message = root.find('ca:error-message', namespace).text
+ module.fail_json(msg="%s %s" % (error, error_message))
+
+ module.exit_json(changed=True)
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ device=dict(required=True, aliases=['host', 'name']),
+ landscape=dict(required=True),
+ state=dict(choices=['present', 'absent'], default='present'),
+ community=dict(required=True, no_log=True), # @TODO remove the 'required', given the required_if ?
+ agentport=dict(type='int', default=161),
+ url=dict(required=True, aliases=['oneclick_url']),
+ url_username=dict(required=True, aliases=['oneclick_user']),
+ url_password=dict(required=True, no_log=True, aliases=['oneclick_password']),
+ use_proxy=dict(type='bool', default=True),
+ validate_certs=dict(type='bool', default=True),
+ ),
+ required_if=[('state', 'present', ['community'])],
+ supports_check_mode=True
+ )
+
+ if module.params.get('state') == 'present':
+ add_device()
+ else:
+ remove_device()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/stackdriver.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/stackdriver.py
new file mode 100644
index 00000000..8e2d19a9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/stackdriver.py
@@ -0,0 +1,215 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: stackdriver
+short_description: Send code deploy and annotation events to stackdriver
+description:
+ - Send code deploy and annotation events to Stackdriver
+author: "Ben Whaley (@bwhaley)"
+options:
+ key:
+ type: str
+ description:
+ - API key.
+ required: true
+ event:
+ type: str
+ description:
+ - The type of event to send, either annotation or deploy
+ choices: ['annotation', 'deploy']
+ required: true
+ revision_id:
+ type: str
+ description:
+ - The revision of the code that was deployed. Required for deploy events
+ deployed_by:
+ type: str
+ description:
+ - The person or robot responsible for deploying the code
+ default: "Ansible"
+ deployed_to:
+ type: str
+ description:
+ - "The environment code was deployed to. (ie: development, staging, production)"
+ repository:
+ type: str
+ description:
+ - The repository (or project) deployed
+ msg:
+ type: str
+ description:
+ - The contents of the annotation message, in plain text.  Limited to 256 characters. Required for annotation.
+ annotated_by:
+ type: str
+ description:
+ - The person or robot who the annotation should be attributed to.
+ default: "Ansible"
+ level:
+ type: str
+ description:
+ - one of INFO/WARN/ERROR, defaults to INFO if not supplied.  May affect display.
+ choices: ['INFO', 'WARN', 'ERROR']
+ default: 'INFO'
+ instance_id:
+ type: str
+ description:
+ - id of an EC2 instance that this event should be attached to, which will limit the contexts where this event is shown
+ event_epoch:
+ type: str
+ description:
+ - "Unix timestamp of where the event should appear in the timeline, defaults to now. Be careful with this."
+'''
+
+EXAMPLES = '''
+- name: Send a code deploy event to stackdriver
+ community.general.stackdriver:
+ key: AAAAAA
+ event: deploy
+ deployed_to: production
+ deployed_by: leeroyjenkins
+ repository: MyWebApp
+ revision_id: abcd123
+
+- name: Send an annotation event to stackdriver
+ community.general.stackdriver:
+ key: AAAAAA
+ event: annotation
+ msg: Greetings from Ansible
+ annotated_by: leeroyjenkins
+ level: WARN
+ instance_id: i-abcd1234
+'''
+
+# ===========================================
+# Stackdriver module specific support methods.
+#
+
+import json
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import fetch_url
+
+
+def send_deploy_event(module, key, revision_id, deployed_by='Ansible', deployed_to=None, repository=None):
+ """Send a deploy event to Stackdriver"""
+ deploy_api = "https://event-gateway.stackdriver.com/v1/deployevent"
+
+ params = {}
+ params['revision_id'] = revision_id
+ params['deployed_by'] = deployed_by
+ if deployed_to:
+ params['deployed_to'] = deployed_to
+ if repository:
+ params['repository'] = repository
+
+ return do_send_request(module, deploy_api, params, key)
+
+
+def send_annotation_event(module, key, msg, annotated_by='Ansible', level=None, instance_id=None, event_epoch=None):
+ """Send an annotation event to Stackdriver"""
+ annotation_api = "https://event-gateway.stackdriver.com/v1/annotationevent"
+
+ params = {}
+ params['message'] = msg
+ if annotated_by:
+ params['annotated_by'] = annotated_by
+ if level:
+ params['level'] = level
+ if instance_id:
+ params['instance_id'] = instance_id
+ if event_epoch:
+ params['event_epoch'] = event_epoch
+
+ return do_send_request(module, annotation_api, params, key)
+
+
+def do_send_request(module, url, params, key):
+ data = json.dumps(params)
+ headers = {
+ 'Content-Type': 'application/json',
+ 'x-stackdriver-apikey': key
+ }
+ response, info = fetch_url(module, url, headers=headers, data=data, method='POST')
+ if info['status'] != 200:
+ module.fail_json(msg="Unable to send msg: %s" % info['msg'])
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict( # @TODO add types
+ key=dict(required=True, no_log=True),
+ event=dict(required=True, choices=['deploy', 'annotation']),
+ msg=dict(),
+ revision_id=dict(),
+ annotated_by=dict(default='Ansible'),
+ level=dict(default='INFO', choices=['INFO', 'WARN', 'ERROR']),
+ instance_id=dict(),
+ event_epoch=dict(), # @TODO int?
+ deployed_by=dict(default='Ansible'),
+ deployed_to=dict(),
+ repository=dict(),
+ ),
+ supports_check_mode=True
+ )
+
+ key = module.params["key"]
+ event = module.params["event"]
+
+ # Annotation params
+ msg = module.params["msg"]
+ annotated_by = module.params["annotated_by"]
+ level = module.params["level"]
+ instance_id = module.params["instance_id"]
+ event_epoch = module.params["event_epoch"]
+
+ # Deploy params
+ revision_id = module.params["revision_id"]
+ deployed_by = module.params["deployed_by"]
+ deployed_to = module.params["deployed_to"]
+ repository = module.params["repository"]
+
+ ##################################################################
+ # deploy requires revision_id
+ # annotation requires msg
+ # We verify these manually
+ ##################################################################
+
+ if event == 'deploy':
+ if not revision_id:
+ module.fail_json(msg="revision_id required for deploy events")
+ try:
+ send_deploy_event(module, key, revision_id, deployed_by, deployed_to, repository)
+ except Exception as e:
+ module.fail_json(msg="unable to sent deploy event: %s" % to_native(e),
+ exception=traceback.format_exc())
+
+ if event == 'annotation':
+ if not msg:
+ module.fail_json(msg="msg required for annotation events")
+ try:
+ send_annotation_event(module, key, msg, annotated_by, level, instance_id, event_epoch)
+ except Exception as e:
+ module.fail_json(msg="unable to sent annotation event: %s" % to_native(e),
+ exception=traceback.format_exc())
+
+ changed = True
+ module.exit_json(changed=changed, deployed_by=deployed_by)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/statusio_maintenance.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/statusio_maintenance.py
new file mode 100644
index 00000000..0414f6e8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/statusio_maintenance.py
@@ -0,0 +1,465 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Benjamin Copeland (@bhcopeland) <ben@copeland.me.uk>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: statusio_maintenance
+short_description: Create maintenance windows for your status.io dashboard
+description:
+ - Creates a maintenance window for status.io
+ - Deletes a maintenance window for status.io
+notes:
+ - You can use the apiary API url (http://docs.statusio.apiary.io/) to
+ capture API traffic
+ - Use start_date and start_time with minutes to set future maintenance window
+author: Benjamin Copeland (@bhcopeland) <ben@copeland.me.uk>
+options:
+ title:
+ type: str
+ description:
+ - A descriptive title for the maintenance window
+ default: "A new maintenance window"
+ desc:
+ type: str
+ description:
+ - Message describing the maintenance window
+ default: "Created by Ansible"
+ state:
+ type: str
+ description:
+ - Desired state of the package.
+ default: "present"
+ choices: ["present", "absent"]
+ api_id:
+ type: str
+ description:
+ - Your unique API ID from status.io
+ required: true
+ api_key:
+ type: str
+ description:
+ - Your unique API Key from status.io
+ required: true
+ statuspage:
+ type: str
+ description:
+ - Your unique StatusPage ID from status.io
+ required: true
+ url:
+ type: str
+ description:
+ - Status.io API URL. A private apiary can be used instead.
+ default: "https://api.status.io"
+ components:
+ type: list
+ description:
+ - The given name of your component (server name)
+ aliases: ['component']
+ containers:
+ type: list
+ description:
+ - The given name of your container (data center)
+ aliases: ['container']
+ all_infrastructure_affected:
+ description:
+ - If it affects all components and containers
+ type: bool
+ default: 'no'
+ automation:
+ description:
+ - Automatically start and end the maintenance window
+ type: bool
+ default: 'no'
+ maintenance_notify_now:
+ description:
+ - Notify subscribers now
+ type: bool
+ default: 'no'
+ maintenance_notify_72_hr:
+ description:
+ - Notify subscribers 72 hours before maintenance start time
+ type: bool
+ default: 'no'
+ maintenance_notify_24_hr:
+ description:
+ - Notify subscribers 24 hours before maintenance start time
+ type: bool
+ default: 'no'
+ maintenance_notify_1_hr:
+ description:
+ - Notify subscribers 1 hour before maintenance start time
+ type: bool
+ default: 'no'
+ maintenance_id:
+ type: str
+ description:
+ - The maintenance id number when deleting a maintenance window
+ minutes:
+ type: int
+ description:
+ - The length of time in UTC that the maintenance will run
+ (starting from playbook runtime)
+ default: 10
+ start_date:
+ type: str
+ description:
+ - Date maintenance is expected to start (Month/Day/Year) (UTC)
+ - End Date is worked out from start_date + minutes
+ start_time:
+ type: str
+ description:
+ - Time maintenance is expected to start (Hour:Minutes) (UTC)
+ - End Time is worked out from start_time + minutes
+'''
+
+EXAMPLES = '''
+- name: Create a maintenance window for 10 minutes on server1, with automation to stop the maintenance
+ community.general.statusio_maintenance:
+ title: Router Upgrade from ansible
+ desc: Performing a Router Upgrade
+ components: server1.example.com
+ api_id: api_id
+ api_key: api_key
+ statuspage: statuspage_id
+ maintenance_notify_1_hr: True
+ automation: True
+
+- name: Create a maintenance window for 60 minutes on server1 and server2
+ community.general.statusio_maintenance:
+ title: Routine maintenance
+ desc: Some security updates
+ components:
+ - server1.example.com
+ - server2.example.com
+ minutes: 60
+ api_id: api_id
+ api_key: api_key
+ statuspage: statuspage_id
+ maintenance_notify_1_hr: True
+ automation: True
+ delegate_to: localhost
+
+- name: Create a future maintenance window for 24 hours to all hosts inside the Primary Data Center
+ community.general.statusio_maintenance:
+ title: Data center downtime
+ desc: Performing a Upgrade to our data center
+ components: Primary Data Center
+ api_id: api_id
+ api_key: api_key
+ statuspage: statuspage_id
+ start_date: 01/01/2016
+ start_time: 12:00
+ minutes: 1440
+
+- name: Delete a maintenance window
+ community.general.statusio_maintenance:
+ title: Remove a maintenance window
+ maintenance_id: 561f90faf74bc94a4700087b
+ statuspage: statuspage_id
+ api_id: api_id
+ api_key: api_key
+ state: absent
+
+'''
+# TODO: Add RETURN documentation.
+RETURN = ''' # '''
+
+import datetime
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import open_url
+
+
+def get_api_auth_headers(api_id, api_key, url, statuspage):
+
+ headers = {
+ "x-api-id": api_id,
+ "x-api-key": api_key,
+ "Content-Type": "application/json"
+ }
+
+ try:
+ response = open_url(
+ url + "/v2/component/list/" + statuspage, headers=headers)
+ data = json.loads(response.read())
+ if data['status']['message'] == 'Authentication failed':
+ return 1, None, None, "Authentication failed: " \
+ "Check api_id/api_key and statuspage id."
+ else:
+ auth_headers = headers
+ auth_content = data
+ except Exception as e:
+ return 1, None, None, to_native(e)
+ return 0, auth_headers, auth_content, None
+
+
+def get_component_ids(auth_content, components):
+ host_ids = []
+ lower_components = [x.lower() for x in components]
+ for result in auth_content["result"]:
+ if result['name'].lower() in lower_components:
+ data = {
+ "component_id": result["_id"],
+ "container_id": result["containers"][0]["_id"]
+ }
+ host_ids.append(data)
+ lower_components.remove(result['name'].lower())
+ if len(lower_components):
+ # items not found in the api
+ return 1, None, lower_components
+ return 0, host_ids, None
+
+
+def get_container_ids(auth_content, containers):
+ host_ids = []
+ lower_containers = [x.lower() for x in containers]
+ for result in auth_content["result"]:
+ if result["containers"][0]["name"].lower() in lower_containers:
+ data = {
+ "component_id": result["_id"],
+ "container_id": result["containers"][0]["_id"]
+ }
+ host_ids.append(data)
+ lower_containers.remove(result["containers"][0]["name"].lower())
+
+ if len(lower_containers):
+ # items not found in the api
+ return 1, None, lower_containers
+ return 0, host_ids, None
+
+
+def get_date_time(start_date, start_time, minutes):
+ returned_date = []
+ if start_date and start_time:
+ try:
+ datetime.datetime.strptime(start_date, '%m/%d/%Y')
+ returned_date.append(start_date)
+ except (NameError, ValueError):
+ return 1, None, "Not a valid start_date format."
+ try:
+ datetime.datetime.strptime(start_time, '%H:%M')
+ returned_date.append(start_time)
+ except (NameError, ValueError):
+ return 1, None, "Not a valid start_time format."
+ try:
+ # Work out end date/time based on minutes
+ date_time_start = datetime.datetime.strptime(
+ start_time + start_date, '%H:%M%m/%d/%Y')
+ delta = date_time_start + datetime.timedelta(minutes=minutes)
+ returned_date.append(delta.strftime("%m/%d/%Y"))
+ returned_date.append(delta.strftime("%H:%M"))
+ except (NameError, ValueError):
+ return 1, None, "Couldn't work out a valid date"
+ else:
+ now = datetime.datetime.utcnow()
+ delta = now + datetime.timedelta(minutes=minutes)
+ # start_date
+ returned_date.append(now.strftime("%m/%d/%Y"))
+ returned_date.append(now.strftime("%H:%M"))
+ # end_date
+ returned_date.append(delta.strftime("%m/%d/%Y"))
+ returned_date.append(delta.strftime("%H:%M"))
+ return 0, returned_date, None
+
+
+def create_maintenance(auth_headers, url, statuspage, host_ids,
+ all_infrastructure_affected, automation, title, desc,
+ returned_date, maintenance_notify_now,
+ maintenance_notify_72_hr, maintenance_notify_24_hr,
+ maintenance_notify_1_hr):
+ returned_dates = [[x] for x in returned_date]
+ component_id = []
+ container_id = []
+ for val in host_ids:
+ component_id.append(val['component_id'])
+ container_id.append(val['container_id'])
+ try:
+ values = json.dumps({
+ "statuspage_id": statuspage,
+ "components": component_id,
+ "containers": container_id,
+ "all_infrastructure_affected": str(int(all_infrastructure_affected)),
+ "automation": str(int(automation)),
+ "maintenance_name": title,
+ "maintenance_details": desc,
+ "date_planned_start": returned_dates[0],
+ "time_planned_start": returned_dates[1],
+ "date_planned_end": returned_dates[2],
+ "time_planned_end": returned_dates[3],
+ "maintenance_notify_now": str(int(maintenance_notify_now)),
+ "maintenance_notify_72_hr": str(int(maintenance_notify_72_hr)),
+ "maintenance_notify_24_hr": str(int(maintenance_notify_24_hr)),
+ "maintenance_notify_1_hr": str(int(maintenance_notify_1_hr))
+ })
+ response = open_url(
+ url + "/v2/maintenance/schedule", data=values,
+ headers=auth_headers)
+ data = json.loads(response.read())
+
+ if data["status"]["error"] == "yes":
+ return 1, None, data["status"]["message"]
+ except Exception as e:
+ return 1, None, to_native(e)
+ return 0, None, None
+
+
+def delete_maintenance(auth_headers, url, statuspage, maintenance_id):
+ try:
+ values = json.dumps({
+ "statuspage_id": statuspage,
+ "maintenance_id": maintenance_id,
+ })
+ response = open_url(
+ url=url + "/v2/maintenance/delete",
+ data=values,
+ headers=auth_headers)
+ data = json.loads(response.read())
+ if data["status"]["error"] == "yes":
+ return 1, None, "Invalid maintenance_id"
+ except Exception as e:
+ return 1, None, to_native(e)
+ return 0, None, None
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_id=dict(required=True),
+ api_key=dict(required=True, no_log=True),
+ statuspage=dict(required=True),
+ state=dict(required=False, default='present',
+ choices=['present', 'absent']),
+ url=dict(default='https://api.status.io', required=False),
+ components=dict(type='list', required=False, default=None,
+ aliases=['component']),
+ containers=dict(type='list', required=False, default=None,
+ aliases=['container']),
+ all_infrastructure_affected=dict(type='bool', default=False,
+ required=False),
+ automation=dict(type='bool', default=False, required=False),
+ title=dict(required=False, default='A new maintenance window'),
+ desc=dict(required=False, default='Created by Ansible'),
+ minutes=dict(type='int', required=False, default=10),
+ maintenance_notify_now=dict(type='bool', default=False,
+ required=False),
+ maintenance_notify_72_hr=dict(type='bool', default=False,
+ required=False),
+ maintenance_notify_24_hr=dict(type='bool', default=False,
+ required=False),
+ maintenance_notify_1_hr=dict(type='bool', default=False,
+ required=False),
+ maintenance_id=dict(required=False, default=None),
+ start_date=dict(default=None, required=False),
+ start_time=dict(default=None, required=False)
+ ),
+ supports_check_mode=True,
+ )
+
+ api_id = module.params['api_id']
+ api_key = module.params['api_key']
+ statuspage = module.params['statuspage']
+ state = module.params['state']
+ url = module.params['url']
+ components = module.params['components']
+ containers = module.params['containers']
+ all_infrastructure_affected = module.params['all_infrastructure_affected']
+ automation = module.params['automation']
+ title = module.params['title']
+ desc = module.params['desc']
+ minutes = module.params['minutes']
+ maintenance_notify_now = module.params['maintenance_notify_now']
+ maintenance_notify_72_hr = module.params['maintenance_notify_72_hr']
+ maintenance_notify_24_hr = module.params['maintenance_notify_24_hr']
+ maintenance_notify_1_hr = module.params['maintenance_notify_1_hr']
+ maintenance_id = module.params['maintenance_id']
+ start_date = module.params['start_date']
+ start_time = module.params['start_time']
+
+ if state == "present":
+
+ if api_id and api_key:
+ (rc, auth_headers, auth_content, error) = \
+ get_api_auth_headers(api_id, api_key, url, statuspage)
+ if rc != 0:
+ module.fail_json(msg="Failed to get auth keys: %s" % error)
+ else:
+ auth_headers = {}
+ auth_content = {}
+
+ if minutes or start_time and start_date:
+ (rc, returned_date, error) = get_date_time(
+ start_date, start_time, minutes)
+ if rc != 0:
+ module.fail_json(msg="Failed to set date/time: %s" % error)
+
+ if not components and not containers:
+ return module.fail_json(msg="A Component or Container must be "
+ "defined")
+ elif components and containers:
+ return module.fail_json(msg="Components and containers cannot "
+ "be used together")
+ else:
+ if components:
+ (rc, host_ids, error) = get_component_ids(auth_content,
+ components)
+ if rc != 0:
+ module.fail_json(msg="Failed to find component %s" % error)
+
+ if containers:
+ (rc, host_ids, error) = get_container_ids(auth_content,
+ containers)
+ if rc != 0:
+ module.fail_json(msg="Failed to find container %s" % error)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ (rc, _, error) = create_maintenance(
+ auth_headers, url, statuspage, host_ids,
+ all_infrastructure_affected, automation,
+ title, desc, returned_date, maintenance_notify_now,
+ maintenance_notify_72_hr, maintenance_notify_24_hr,
+ maintenance_notify_1_hr)
+ if rc == 0:
+ module.exit_json(changed=True, result="Successfully created "
+ "maintenance")
+ else:
+ module.fail_json(msg="Failed to create maintenance: %s"
+ % error)
+
+ if state == "absent":
+
+ if api_id and api_key:
+ (rc, auth_headers, auth_content, error) = \
+ get_api_auth_headers(api_id, api_key, url, statuspage)
+ if rc != 0:
+ module.fail_json(msg="Failed to get auth keys: %s" % error)
+ else:
+ auth_headers = {}
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ (rc, _, error) = delete_maintenance(
+ auth_headers, url, statuspage, maintenance_id)
+ if rc == 0:
+ module.exit_json(
+ changed=True,
+ result="Successfully deleted maintenance"
+ )
+ else:
+ module.fail_json(
+ msg="Failed to delete maintenance: %s" % error)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/uptimerobot.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/uptimerobot.py
new file mode 100644
index 00000000..bb4e60fa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/monitoring/uptimerobot.py
@@ -0,0 +1,149 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: uptimerobot
+short_description: Pause and start Uptime Robot monitoring
+description:
+ - This module will let you start and pause Uptime Robot Monitoring
+author: "Nate Kingsley (@nate-kingsley)"
+requirements:
+ - Valid Uptime Robot API Key
+options:
+ state:
+ type: str
+ description:
+ - Define whether or not the monitor should be running or paused.
+ required: true
+ choices: [ "started", "paused" ]
+ monitorid:
+ type: str
+ description:
+ - ID of the monitor to check.
+ required: true
+ apikey:
+ type: str
+ description:
+ - Uptime Robot API key.
+ required: true
+notes:
+ - Support for adding and removing monitors and alert contacts has not yet been implemented.
+'''
+
+EXAMPLES = '''
+- name: Pause the monitor with an ID of 12345
+ community.general.uptimerobot:
+ monitorid: 12345
+ apikey: 12345-1234512345
+ state: paused
+
+- name: Start the monitor with an ID of 12345
+ community.general.uptimerobot:
+ monitorid: 12345
+ apikey: 12345-1234512345
+ state: started
+'''
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils._text import to_text
+
+
+API_BASE = "https://api.uptimerobot.com/"
+
+API_ACTIONS = dict(
+ status='getMonitors?',
+ editMonitor='editMonitor?'
+)
+
+API_FORMAT = 'json'
+API_NOJSONCALLBACK = 1
+CHANGED_STATE = False
+SUPPORTS_CHECK_MODE = False
+
+
+def checkID(module, params):
+
+ data = urlencode(params)
+ full_uri = API_BASE + API_ACTIONS['status'] + data
+ req, info = fetch_url(module, full_uri)
+ result = to_text(req.read())
+ jsonresult = json.loads(result)
+ req.close()
+ return jsonresult
+
+
+def startMonitor(module, params):
+
+ params['monitorStatus'] = 1
+ data = urlencode(params)
+ full_uri = API_BASE + API_ACTIONS['editMonitor'] + data
+ req, info = fetch_url(module, full_uri)
+ result = to_text(req.read())
+ jsonresult = json.loads(result)
+ req.close()
+ return jsonresult['stat']
+
+
+def pauseMonitor(module, params):
+
+ params['monitorStatus'] = 0
+ data = urlencode(params)
+ full_uri = API_BASE + API_ACTIONS['editMonitor'] + data
+ req, info = fetch_url(module, full_uri)
+ result = to_text(req.read())
+ jsonresult = json.loads(result)
+ req.close()
+ return jsonresult['stat']
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(required=True, choices=['started', 'paused']),
+ apikey=dict(required=True, no_log=True),
+ monitorid=dict(required=True)
+ ),
+ supports_check_mode=SUPPORTS_CHECK_MODE
+ )
+
+ params = dict(
+ apiKey=module.params['apikey'],
+ monitors=module.params['monitorid'],
+ monitorID=module.params['monitorid'],
+ format=API_FORMAT,
+ noJsonCallback=API_NOJSONCALLBACK
+ )
+
+ check_result = checkID(module, params)
+
+ if check_result['stat'] != "ok":
+ module.fail_json(
+ msg="failed",
+ result=check_result['message']
+ )
+
+ if module.params['state'] == 'started':
+ monitor_result = startMonitor(module, params)
+ else:
+ monitor_result = pauseMonitor(module, params)
+
+ module.exit_json(
+ msg="success",
+ result=monitor_result
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/mqtt.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/mqtt.py
new file mode 100644
index 00000000..0551ab20
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/mqtt.py
@@ -0,0 +1,248 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, 2014, Jan-Piet Mens <jpmens () gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: mqtt
+short_description: Publish a message on an MQTT topic for the IoT
+description:
+ - Publish a message on an MQTT topic.
+options:
+ server:
+ type: str
+ description:
+ - MQTT broker address/name
+ default: localhost
+ port:
+ type: int
+ description:
+ - MQTT broker port number
+ default: 1883
+ username:
+ type: str
+ description:
+ - Username to authenticate against the broker.
+ password:
+ type: str
+ description:
+ - Password for C(username) to authenticate against the broker.
+ client_id:
+ type: str
+ description:
+ - MQTT client identifier
+ - If not specified, a value C(hostname + pid) will be used.
+ topic:
+ type: str
+ description:
+ - MQTT topic name
+ required: true
+ payload:
+ type: str
+ description:
+ - Payload. The special string C("None") may be used to send a NULL
+ (i.e. empty) payload which is useful to simply notify with the I(topic)
+ or to clear previously retained messages.
+ required: true
+ qos:
+ type: str
+ description:
+ - QoS (Quality of Service)
+ default: "0"
+ choices: [ "0", "1", "2" ]
+ retain:
+ description:
+ - Setting this flag causes the broker to retain (i.e. keep) the message so that
+ applications that subsequently subscribe to the topic can received the last
+ retained message immediately.
+ type: bool
+ default: 'no'
+ ca_cert:
+ type: path
+ description:
+ - The path to the Certificate Authority certificate files that are to be
+ treated as trusted by this client. If this is the only option given
+ then the client will operate in a similar manner to a web browser. That
+ is to say it will require the broker to have a certificate signed by the
+ Certificate Authorities in ca_certs and will communicate using TLS v1,
+ but will not attempt any form of authentication. This provides basic
+ network encryption but may not be sufficient depending on how the broker
+ is configured.
+ aliases: [ ca_certs ]
+ client_cert:
+ type: path
+ description:
+ - The path pointing to the PEM encoded client certificate. If this is not
+ None it will be used as client information for TLS based
+ authentication. Support for this feature is broker dependent.
+ aliases: [ certfile ]
+ client_key:
+ type: path
+ description:
+ - The path pointing to the PEM encoded client private key. If this is not
+ None it will be used as client information for TLS based
+ authentication. Support for this feature is broker dependent.
+ aliases: [ keyfile ]
+ tls_version:
+ description:
+ - Specifies the version of the SSL/TLS protocol to be used.
+ - By default (if the python version supports it) the highest TLS version is
+ detected. If unavailable, TLS v1 is used.
+ type: str
+ choices:
+ - tlsv1.1
+ - tlsv1.2
+requirements: [ mosquitto ]
+notes:
+ - This module requires a connection to an MQTT broker such as Mosquitto
+ U(http://mosquitto.org) and the I(Paho) C(mqtt) Python client (U(https://pypi.org/project/paho-mqtt/)).
+author: "Jan-Piet Mens (@jpmens)"
+'''
+
+EXAMPLES = '''
+- name: Publish a message on an MQTT topic
+ community.general.mqtt:
+ topic: 'service/ansible/{{ ansible_hostname }}'
+ payload: 'Hello at {{ ansible_date_time.iso8601 }}'
+ qos: 0
+ retain: False
+ client_id: ans001
+ delegate_to: localhost
+'''
+
+# ===========================================
+# MQTT module support methods.
+#
+
+import os
+import ssl
+import traceback
+import platform
+from distutils.version import LooseVersion
+
+HAS_PAHOMQTT = True
+PAHOMQTT_IMP_ERR = None
+try:
+ import socket
+ import paho.mqtt.publish as mqtt
+except ImportError:
+ PAHOMQTT_IMP_ERR = traceback.format_exc()
+ HAS_PAHOMQTT = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+# ===========================================
+# Main
+#
+
+def main():
+ tls_map = {}
+
+ try:
+ tls_map['tlsv1.2'] = ssl.PROTOCOL_TLSv1_2
+ except AttributeError:
+ pass
+
+ try:
+ tls_map['tlsv1.1'] = ssl.PROTOCOL_TLSv1_1
+ except AttributeError:
+ pass
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ server=dict(default='localhost'),
+ port=dict(default=1883, type='int'),
+ topic=dict(required=True),
+ payload=dict(required=True),
+ client_id=dict(default=None),
+ qos=dict(default="0", choices=["0", "1", "2"]),
+ retain=dict(default=False, type='bool'),
+ username=dict(default=None),
+ password=dict(default=None, no_log=True),
+ ca_cert=dict(default=None, type='path', aliases=['ca_certs']),
+ client_cert=dict(default=None, type='path', aliases=['certfile']),
+ client_key=dict(default=None, type='path', aliases=['keyfile']),
+ tls_version=dict(default=None, choices=['tlsv1.1', 'tlsv1.2'])
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_PAHOMQTT:
+ module.fail_json(msg=missing_required_lib('paho-mqtt'), exception=PAHOMQTT_IMP_ERR)
+
+ server = module.params.get("server", 'localhost')
+ port = module.params.get("port", 1883)
+ topic = module.params.get("topic")
+ payload = module.params.get("payload")
+ client_id = module.params.get("client_id", '')
+ qos = int(module.params.get("qos", 0))
+ retain = module.params.get("retain")
+ username = module.params.get("username", None)
+ password = module.params.get("password", None)
+ ca_certs = module.params.get("ca_cert", None)
+ certfile = module.params.get("client_cert", None)
+ keyfile = module.params.get("client_key", None)
+ tls_version = module.params.get("tls_version", None)
+
+ if client_id is None:
+ client_id = "%s_%s" % (socket.getfqdn(), os.getpid())
+
+ if payload and payload == 'None':
+ payload = None
+
+ auth = None
+ if username is not None:
+ auth = {'username': username, 'password': password}
+
+ tls = None
+ if ca_certs is not None:
+ if tls_version:
+ tls_version = tls_map.get(tls_version, ssl.PROTOCOL_SSLv23)
+ else:
+ if LooseVersion(platform.python_version()) <= "3.5.2":
+ # Specifying `None` on later versions of python seems sufficient to
+ # instruct python to autonegotiate the SSL/TLS connection. On versions
+ # 3.5.2 and lower though we need to specify the version.
+ #
+ # Note that this is an alias for PROTOCOL_TLS, but PROTOCOL_TLS was
+ # not available until 3.5.3.
+ tls_version = ssl.PROTOCOL_SSLv23
+
+ tls = {
+ 'ca_certs': ca_certs,
+ 'certfile': certfile,
+ 'keyfile': keyfile,
+ 'tls_version': tls_version,
+ }
+
+ try:
+ mqtt.single(
+ topic,
+ payload,
+ qos=qos,
+ retain=retain,
+ client_id=client_id,
+ hostname=server,
+ port=port,
+ auth=auth,
+ tls=tls
+ )
+ except Exception as e:
+ module.fail_json(
+ msg="unable to publish to MQTT broker %s" % to_native(e),
+ exception=traceback.format_exc()
+ )
+
+ module.exit_json(changed=False, topic=topic)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/mssql_db.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/mssql_db.py
new file mode 100644
index 00000000..e6c5f183
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/mssql_db.py
@@ -0,0 +1,233 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Vedit Firat Arig <firatarig@gmail.com>
+# Outline and parts are reused from Mark Theunissen's mysql_db module
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: mssql_db
+short_description: Add or remove MSSQL databases from a remote host.
+description:
+ - Add or remove MSSQL databases from a remote host.
+options:
+ name:
+ description:
+ - name of the database to add or remove
+ required: true
+ aliases: [ db ]
+ type: str
+ login_user:
+ description:
+ - The username used to authenticate with
+ type: str
+ login_password:
+ description:
+ - The password used to authenticate with
+ type: str
+ login_host:
+ description:
+ - Host running the database
+ type: str
+ required: true
+ login_port:
+ description:
+ - Port of the MSSQL server. Requires login_host be defined as other than localhost if login_port is used
+ default: '1433'
+ type: str
+ state:
+ description:
+ - The database state
+ default: present
+ choices: [ "present", "absent", "import" ]
+ type: str
+ target:
+ description:
+ - Location, on the remote host, of the dump file to read from or write to. Uncompressed SQL
+ files (C(.sql)) files are supported.
+ type: str
+ autocommit:
+ description:
+ - Automatically commit the change only if the import succeed. Sometimes it is necessary to use autocommit=true, since some content can't be changed
+ within a transaction.
+ type: bool
+ default: 'no'
+notes:
+ - Requires the pymssql Python package on the remote host. For Ubuntu, this
+ is as easy as pip install pymssql (See M(ansible.builtin.pip).)
+requirements:
+ - python >= 2.7
+ - pymssql
+author: Vedit Firat Arig (@vedit)
+'''
+
+EXAMPLES = '''
+- name: Create a new database with name 'jackdata'
+ community.general.mssql_db:
+ name: jackdata
+ state: present
+
+# Copy database dump file to remote host and restore it to database 'my_db'
+- name: Copy database dump file to remote host
+ ansible.builtin.copy:
+ src: dump.sql
+ dest: /tmp
+
+- name: Restore the dump file to database 'my_db'
+ community.general.mssql_db:
+ name: my_db
+ state: import
+ target: /tmp/dump.sql
+'''
+
+RETURN = '''
+#
+'''
+
+import os
+import traceback
+
+PYMSSQL_IMP_ERR = None
+try:
+ import pymssql
+except ImportError:
+ PYMSSQL_IMP_ERR = traceback.format_exc()
+ mssql_found = False
+else:
+ mssql_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+def db_exists(conn, cursor, db):
+ cursor.execute("SELECT name FROM master.sys.databases WHERE name = %s", db)
+ conn.commit()
+ return bool(cursor.rowcount)
+
+
+def db_create(conn, cursor, db):
+ cursor.execute("CREATE DATABASE [%s]" % db)
+ return db_exists(conn, cursor, db)
+
+
+def db_delete(conn, cursor, db):
+ try:
+ cursor.execute("ALTER DATABASE [%s] SET single_user WITH ROLLBACK IMMEDIATE" % db)
+ except Exception:
+ pass
+ cursor.execute("DROP DATABASE [%s]" % db)
+ return not db_exists(conn, cursor, db)
+
+
+def db_import(conn, cursor, module, db, target):
+ if os.path.isfile(target):
+ with open(target, 'r') as backup:
+ sqlQuery = "USE [%s]\n" % db
+ for line in backup:
+ if line is None:
+ break
+ elif line.startswith('GO'):
+ cursor.execute(sqlQuery)
+ sqlQuery = "USE [%s]\n" % db
+ else:
+ sqlQuery += line
+ cursor.execute(sqlQuery)
+ conn.commit()
+ return 0, "import successful", ""
+ else:
+ return 1, "cannot find target file", "cannot find target file"
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, aliases=['db']),
+ login_user=dict(default=''),
+ login_password=dict(default='', no_log=True),
+ login_host=dict(required=True),
+ login_port=dict(default='1433'),
+ target=dict(default=None),
+ autocommit=dict(type='bool', default=False),
+ state=dict(
+ default='present', choices=['present', 'absent', 'import'])
+ )
+ )
+
+ if not mssql_found:
+ module.fail_json(msg=missing_required_lib('pymssql'), exception=PYMSSQL_IMP_ERR)
+
+ db = module.params['name']
+ state = module.params['state']
+ autocommit = module.params['autocommit']
+ target = module.params["target"]
+
+ login_user = module.params['login_user']
+ login_password = module.params['login_password']
+ login_host = module.params['login_host']
+ login_port = module.params['login_port']
+
+ login_querystring = login_host
+ if login_port != "1433":
+ login_querystring = "%s:%s" % (login_host, login_port)
+
+ if login_user != "" and login_password == "":
+ module.fail_json(msg="when supplying login_user arguments login_password must be provided")
+
+ try:
+ conn = pymssql.connect(user=login_user, password=login_password, host=login_querystring, database='master')
+ cursor = conn.cursor()
+ except Exception as e:
+ if "Unknown database" in str(e):
+ errno, errstr = e.args
+ module.fail_json(msg="ERROR: %s %s" % (errno, errstr))
+ else:
+ module.fail_json(msg="unable to connect, check login_user and login_password are correct, or alternatively check your "
+ "@sysconfdir@/freetds.conf / ${HOME}/.freetds.conf")
+
+ conn.autocommit(True)
+ changed = False
+
+ if db_exists(conn, cursor, db):
+ if state == "absent":
+ try:
+ changed = db_delete(conn, cursor, db)
+ except Exception as e:
+ module.fail_json(msg="error deleting database: " + str(e))
+ elif state == "import":
+ conn.autocommit(autocommit)
+ rc, stdout, stderr = db_import(conn, cursor, module, db, target)
+
+ if rc != 0:
+ module.fail_json(msg="%s" % stderr)
+ else:
+ module.exit_json(changed=True, db=db, msg=stdout)
+ else:
+ if state == "present":
+ try:
+ changed = db_create(conn, cursor, db)
+ except Exception as e:
+ module.fail_json(msg="error creating database: " + str(e))
+ elif state == "import":
+ try:
+ changed = db_create(conn, cursor, db)
+ except Exception as e:
+ module.fail_json(msg="error creating database: " + str(e))
+
+ conn.autocommit(autocommit)
+ rc, stdout, stderr = db_import(conn, cursor, module, db, target)
+
+ if rc != 0:
+ module.fail_json(msg="%s" % stderr)
+ else:
+ module.exit_json(changed=True, db=db, msg=stdout)
+
+ module.exit_json(changed=changed, db=db)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_aggregate.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_aggregate.py
new file mode 100644
index 00000000..f82bd7ea
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_aggregate.py
@@ -0,0 +1,228 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: na_cdot_aggregate
+
+short_description: Manage NetApp cDOT aggregates.
+extends_documentation_fragment:
+- community.general._netapp.ontap
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: Updated modules released with increased functionality
+ alternative: Use M(netapp.ontap.na_ontap_aggregate) instead.
+
+description:
+- Create or destroy aggregates on NetApp cDOT.
+
+options:
+
+ state:
+ required: true
+ description:
+ - Whether the specified aggregate should exist or not.
+ choices: ['present', 'absent']
+
+ name:
+ required: true
+ description:
+ - The name of the aggregate to manage.
+
+ disk_count:
+ description:
+ - Number of disks to place into the aggregate, including parity disks.
+ - The disks in this newly-created aggregate come from the spare disk pool.
+ - The smallest disks in this pool join the aggregate first, unless the C(disk-size) argument is provided.
+ - Either C(disk-count) or C(disks) must be supplied. Range [0..2^31-1].
+ - Required when C(state=present).
+
+'''
+
+EXAMPLES = """
+- name: Manage Aggregates
+ community.general.na_cdot_aggregate:
+ state: present
+ name: ansibleAggr
+ disk_count: 1
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Manage Aggregates
+ community.general.na_cdot_aggregate:
+ state: present
+ name: ansibleAggr
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppCDOTAggregate(object):
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+ disk_count=dict(required=False, type='int'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['disk_count'])
+ ],
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.name = p['name']
+ self.disk_count = p['disk_count']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_ontap_zapi(module=self.module)
+
+ def get_aggr(self):
+ """
+ Checks if aggregate exists.
+
+ :return:
+ True if aggregate found
+ False if aggregate is not found
+ :rtype: bool
+ """
+
+ aggr_get_iter = netapp_utils.zapi.NaElement('aggr-get-iter')
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'aggr-attributes', **{'aggregate-name': self.name})
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ aggr_get_iter.add_child_elem(query)
+
+ try:
+ result = self.server.invoke_successfully(aggr_get_iter,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ # Error 13040 denotes an aggregate not being found.
+ if to_native(e.code) == "13040":
+ return False
+ else:
+ self.module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ if (result.get_child_by_name('num-records') and
+ int(result.get_child_content('num-records')) >= 1):
+ return True
+ else:
+ return False
+
+ def create_aggr(self):
+ aggr_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'aggr-create', **{'aggregate': self.name,
+ 'disk-count': str(self.disk_count)})
+
+ try:
+ self.server.invoke_successfully(aggr_create,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error provisioning aggregate %s: %s" % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def delete_aggr(self):
+ aggr_destroy = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'aggr-destroy', **{'aggregate': self.name})
+
+ try:
+ self.server.invoke_successfully(aggr_destroy,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error removing aggregate %s: %s" % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def rename_aggregate(self):
+ aggr_rename = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'aggr-rename', **{'aggregate': self.name,
+ 'new-aggregate-name':
+ self.name})
+
+ try:
+ self.server.invoke_successfully(aggr_rename,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error renaming aggregate %s: %s" % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ changed = False
+ aggregate_exists = self.get_aggr()
+ rename_aggregate = False
+
+ # check if anything needs to be changed (add/delete/update)
+
+ if aggregate_exists:
+ if self.state == 'absent':
+ changed = True
+
+ elif self.state == 'present':
+ if self.name is not None and not self.name == self.name:
+ rename_aggregate = True
+ changed = True
+
+ else:
+ if self.state == 'present':
+ # Aggregate does not exist, but requested state is present.
+ changed = True
+
+ if changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ if not aggregate_exists:
+ self.create_aggr()
+
+ else:
+ if rename_aggregate:
+ self.rename_aggregate()
+
+ elif self.state == 'absent':
+ self.delete_aggr()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = NetAppCDOTAggregate()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_license.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_license.py
new file mode 100644
index 00000000..36c5416a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_license.py
@@ -0,0 +1,296 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: na_cdot_license
+
+short_description: Manage NetApp cDOT protocol and feature licenses
+extends_documentation_fragment:
+- community.general._netapp.ontap
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: Updated modules released with increased functionality
+ alternative: Use M(netapp.ontap.na_ontap_license) instead.
+
+description:
+- Add or remove licenses on NetApp ONTAP.
+
+options:
+
+ remove_unused:
+ description:
+ - Remove licenses that have no controller affiliation in the cluster.
+ type: bool
+ default: false
+
+ remove_expired:
+ description:
+ - Remove licenses that have expired in the cluster.
+ type: bool
+ default: false
+
+ serial_number:
+ description:
+ - Serial number of the node associated with the license.
+ - This parameter is used primarily when removing license for a specific service.
+ - If this parameter is not provided, the cluster serial number is used by default.
+
+ licenses:
+ description:
+ - List of licenses to add or remove.
+ - Please note that trying to remove a non-existent license will throw an error.
+ suboptions:
+ base:
+ description:
+ - Cluster Base License
+ nfs:
+ description:
+ - NFS License
+ cifs:
+ description:
+ - CIFS License
+ iscsi:
+ description:
+ - iSCSI License
+ fcp:
+ description:
+ - FCP License
+ cdmi:
+ description:
+ - CDMI License
+ snaprestore:
+ description:
+ - SnapRestore License
+ snapmirror:
+ description:
+ - SnapMirror License
+ flexclone:
+ description:
+ - FlexClone License
+ snapvault:
+ description:
+ - SnapVault License
+ snaplock:
+ description:
+ - SnapLock License
+ snapmanagersuite:
+ description:
+ - SnapManagerSuite License
+ snapprotectapps:
+ description:
+ - SnapProtectApp License
+ v_storageattach:
+ description:
+ - Virtual Attached Storage License
+
+'''
+
+
+EXAMPLES = """
+- name: Add licenses
+ community.general.na_cdot_license:
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ serial_number: #################
+ licenses:
+ nfs: #################
+ cifs: #################
+ iscsi: #################
+ fcp: #################
+ snaprestore: #################
+ flexclone: #################
+
+- name: Remove licenses
+ community.general.na_cdot_license:
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ remove_unused: false
+ remove_expired: true
+ serial_number: #################
+ licenses:
+ nfs: remove
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppCDOTLicense(object):
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ serial_number=dict(required=False, type='str', default=None),
+ remove_unused=dict(default=False, type='bool'),
+ remove_expired=dict(default=False, type='bool'),
+ licenses=dict(default=False, type='dict'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=False
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.serial_number = p['serial_number']
+ self.remove_unused = p['remove_unused']
+ self.remove_expired = p['remove_expired']
+ self.licenses = p['licenses']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_ontap_zapi(module=self.module)
+
+ def get_licensing_status(self):
+ """
+ Check licensing status
+
+ :return: package (key) and licensing status (value)
+ :rtype: dict
+ """
+ license_status = netapp_utils.zapi.NaElement('license-v2-status-list-info')
+ result = None
+ try:
+ result = self.server.invoke_successfully(license_status,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error checking license status: %s" %
+ to_native(e), exception=traceback.format_exc())
+
+ return_dictionary = {}
+ license_v2_status = result.get_child_by_name('license-v2-status')
+ if license_v2_status:
+ for license_v2_status_info in license_v2_status.get_children():
+ package = license_v2_status_info.get_child_content('package')
+ status = license_v2_status_info.get_child_content('method')
+ return_dictionary[package] = status
+
+ return return_dictionary
+
+ def remove_licenses(self, remove_list):
+ """
+ Remove requested licenses
+ :param:
+ remove_list : List of packages to remove
+
+ """
+ license_delete = netapp_utils.zapi.NaElement('license-v2-delete')
+ for package in remove_list:
+ license_delete.add_new_child('package', package)
+
+ if self.serial_number is not None:
+ license_delete.add_new_child('serial-number', self.serial_number)
+
+ try:
+ self.server.invoke_successfully(license_delete,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error removing license %s" %
+ to_native(e), exception=traceback.format_exc())
+
+ def remove_unused_licenses(self):
+ """
+ Remove unused licenses
+ """
+ remove_unused = netapp_utils.zapi.NaElement('license-v2-delete-unused')
+ try:
+ self.server.invoke_successfully(remove_unused,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error removing unused licenses: %s" %
+ to_native(e), exception=traceback.format_exc())
+
+ def remove_expired_licenses(self):
+ """
+ Remove expired licenses
+ """
+ remove_expired = netapp_utils.zapi.NaElement('license-v2-delete-expired')
+ try:
+ self.server.invoke_successfully(remove_expired,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error removing expired licenses: %s" %
+ to_native(e), exception=traceback.format_exc())
+
+ def update_licenses(self):
+ """
+ Update licenses
+ """
+ # Remove unused and expired licenses, if requested.
+ if self.remove_unused:
+ self.remove_unused_licenses()
+
+ if self.remove_expired:
+ self.remove_expired_licenses()
+
+ # Next, add/remove specific requested licenses.
+ license_add = netapp_utils.zapi.NaElement('license-v2-add')
+ codes = netapp_utils.zapi.NaElement('codes')
+ remove_list = []
+ for key, value in self.licenses.items():
+ str_value = str(value)
+ # Make sure license is not an empty string.
+ if str_value and str_value.strip():
+ if str_value.lower() == 'remove':
+ remove_list.append(str(key).lower())
+ else:
+ codes.add_new_child('license-code-v2', str_value)
+
+ # Remove requested licenses.
+ if len(remove_list) != 0:
+ self.remove_licenses(remove_list)
+
+ # Add requested licenses
+ if len(codes.get_children()) != 0:
+ license_add.add_child_elem(codes)
+ try:
+ self.server.invoke_successfully(license_add,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error adding licenses: %s" %
+ to_native(e), exception=traceback.format_exc())
+
+ def apply(self):
+ changed = False
+ # Add / Update licenses.
+ license_status = self.get_licensing_status()
+ self.update_licenses()
+ new_license_status = self.get_licensing_status()
+
+ if license_status != new_license_status:
+ changed = True
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = NetAppCDOTLicense()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_lun.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_lun.py
new file mode 100644
index 00000000..3236dbee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_lun.py
@@ -0,0 +1,373 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: na_cdot_lun
+
+short_description: Manage NetApp cDOT luns
+extends_documentation_fragment:
+- community.general._netapp.ontap
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: Updated modules released with increased functionality
+ alternative: Use M(netapp.ontap.na_ontap_lun) instead.
+
+description:
+- Create, destroy, resize luns on NetApp cDOT.
+
+options:
+
+ state:
+ description:
+ - Whether the specified lun should exist or not.
+ required: true
+ choices: ['present', 'absent']
+
+ name:
+ description:
+ - The name of the lun to manage.
+ required: true
+
+ flexvol_name:
+ description:
+ - The name of the FlexVol the lun should exist on.
+ - Required when C(state=present).
+
+ size:
+ description:
+ - The size of the lun in C(size_unit).
+ - Required when C(state=present).
+
+ size_unit:
+ description:
+ - The unit used to interpret the size parameter.
+ choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
+ default: 'gb'
+
+ force_resize:
+ description:
+ - Forcibly reduce the size. This is required for reducing the size of the LUN to avoid accidentally reducing the LUN size.
+ default: false
+
+ force_remove:
+ description:
+ - If "true", override checks that prevent a LUN from being destroyed if it is online and mapped.
+ - If "false", destroying an online and mapped LUN will fail.
+ default: false
+
+ force_remove_fenced:
+ description:
+ - If "true", override checks that prevent a LUN from being destroyed while it is fenced.
+ - If "false", attempting to destroy a fenced LUN will fail.
+ - The default if not specified is "false". This field is available in Data ONTAP 8.2 and later.
+ default: false
+
+ vserver:
+ required: true
+ description:
+ - The name of the vserver to use.
+
+'''
+
+EXAMPLES = """
+- name: Create LUN
+ community.general.na_cdot_lun:
+ state: present
+ name: ansibleLUN
+ flexvol_name: ansibleVolume
+ vserver: ansibleVServer
+ size: 5
+ size_unit: mb
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Resize Lun
+ community.general.na_cdot_lun:
+ state: present
+ name: ansibleLUN
+ force_resize: True
+ flexvol_name: ansibleVolume
+ vserver: ansibleVServer
+ size: 5
+ size_unit: gb
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppCDOTLUN(object):
+
+ def __init__(self):
+
+ self._size_unit_map = dict(
+ bytes=1,
+ b=1,
+ kb=1024,
+ mb=1024 ** 2,
+ gb=1024 ** 3,
+ tb=1024 ** 4,
+ pb=1024 ** 5,
+ eb=1024 ** 6,
+ zb=1024 ** 7,
+ yb=1024 ** 8
+ )
+
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+ size=dict(type='int'),
+ size_unit=dict(default='gb',
+ choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb',
+ 'pb', 'eb', 'zb', 'yb'], type='str'),
+ force_resize=dict(default=False, type='bool'),
+ force_remove=dict(default=False, type='bool'),
+ force_remove_fenced=dict(default=False, type='bool'),
+ flexvol_name=dict(type='str'),
+ vserver=dict(required=True, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['flexvol_name', 'size'])
+ ],
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.name = p['name']
+ self.size_unit = p['size_unit']
+ if p['size'] is not None:
+ self.size = p['size'] * self._size_unit_map[self.size_unit]
+ else:
+ self.size = None
+ self.force_resize = p['force_resize']
+ self.force_remove = p['force_remove']
+ self.force_remove_fenced = p['force_remove_fenced']
+ self.flexvol_name = p['flexvol_name']
+ self.vserver = p['vserver']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_ontap_zapi(module=self.module, vserver=self.vserver)
+
+ def get_lun(self):
+ """
+ Return details about the LUN
+
+ :return: Details about the lun
+ :rtype: dict
+ """
+
+ luns = []
+ tag = None
+ while True:
+ lun_info = netapp_utils.zapi.NaElement('lun-get-iter')
+ if tag:
+ lun_info.add_new_child('tag', tag, True)
+
+ query_details = netapp_utils.zapi.NaElement('lun-info')
+ query_details.add_new_child('vserver', self.vserver)
+ query_details.add_new_child('volume', self.flexvol_name)
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+
+ lun_info.add_child_elem(query)
+
+ result = self.server.invoke_successfully(lun_info, True)
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ attr_list = result.get_child_by_name('attributes-list')
+ luns.extend(attr_list.get_children())
+
+ tag = result.get_child_content('next-tag')
+
+ if tag is None:
+ break
+
+ # The LUNs have been extracted.
+ # Find the specified lun and extract details.
+ return_value = None
+ for lun in luns:
+ path = lun.get_child_content('path')
+ _rest, _splitter, found_name = path.rpartition('/')
+
+ if found_name == self.name:
+ size = lun.get_child_content('size')
+
+ # Find out if the lun is attached
+ attached_to = None
+ lun_id = None
+ if lun.get_child_content('mapped') == 'true':
+ lun_map_list = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'lun-map-list-info', **{'path': path})
+
+ result = self.server.invoke_successfully(
+ lun_map_list, enable_tunneling=True)
+
+ igroups = result.get_child_by_name('initiator-groups')
+ if igroups:
+ for igroup_info in igroups.get_children():
+ igroup = igroup_info.get_child_content(
+ 'initiator-group-name')
+ attached_to = igroup
+ lun_id = igroup_info.get_child_content('lun-id')
+
+ return_value = {
+ 'name': found_name,
+ 'size': size,
+ 'attached_to': attached_to,
+ 'lun_id': lun_id
+ }
+ else:
+ continue
+
+ return return_value
+
+ def create_lun(self):
+ """
+ Create LUN with requested name and size
+ """
+ path = '/vol/%s/%s' % (self.flexvol_name, self.name)
+ lun_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'lun-create-by-size', **{'path': path,
+ 'size': str(self.size),
+ 'ostype': 'linux'})
+
+ try:
+ self.server.invoke_successfully(lun_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error provisioning lun %s of size %s: %s" % (self.name, self.size, to_native(e)),
+ exception=traceback.format_exc())
+
+ def delete_lun(self):
+ """
+ Delete requested LUN
+ """
+ path = '/vol/%s/%s' % (self.flexvol_name, self.name)
+
+ lun_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'lun-destroy', **{'path': path,
+ 'force': str(self.force_remove),
+ 'destroy-fenced-lun':
+ str(self.force_remove_fenced)})
+
+ try:
+ self.server.invoke_successfully(lun_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error deleting lun %s: %s" % (path, to_native(e)),
+ exception=traceback.format_exc())
+
+ def resize_lun(self):
+ """
+ Resize requested LUN.
+
+ :return: True if LUN was actually re-sized, false otherwise.
+ :rtype: bool
+ """
+ path = '/vol/%s/%s' % (self.flexvol_name, self.name)
+
+ lun_resize = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'lun-resize', **{'path': path,
+ 'size': str(self.size),
+ 'force': str(self.force_resize)})
+ try:
+ self.server.invoke_successfully(lun_resize, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ if to_native(e.code) == "9042":
+ # Error 9042 denotes the new LUN size being the same as the
+ # old LUN size. This happens when there's barely any difference
+ # in the two sizes. For example, from 8388608 bytes to
+ # 8194304 bytes. This should go away if/when the default size
+ # requested/reported to/from the controller is changed to a
+ # larger unit (MB/GB/TB).
+ return False
+ else:
+ self.module.fail_json(msg="Error resizing lun %s: %s" % (path, to_native(e)),
+ exception=traceback.format_exc())
+
+ return True
+
+ def apply(self):
+ property_changed = False
+ multiple_properties_changed = False
+ size_changed = False
+ lun_exists = False
+ lun_detail = self.get_lun()
+
+ if lun_detail:
+ lun_exists = True
+ current_size = lun_detail['size']
+
+ if self.state == 'absent':
+ property_changed = True
+
+ elif self.state == 'present':
+ if not int(current_size) == self.size:
+ size_changed = True
+ property_changed = True
+
+ else:
+ if self.state == 'present':
+ property_changed = True
+
+ if property_changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ if not lun_exists:
+ self.create_lun()
+
+ else:
+ if size_changed:
+ # Ensure that size was actually changed. Please
+ # read notes in 'resize_lun' function for details.
+ size_changed = self.resize_lun()
+ if not size_changed and not \
+ multiple_properties_changed:
+ property_changed = False
+
+ elif self.state == 'absent':
+ self.delete_lun()
+
+ changed = property_changed or size_changed
+ # TODO: include other details about the lun (size, etc.)
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = NetAppCDOTLUN()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_qtree.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_qtree.py
new file mode 100644
index 00000000..9f7ce60d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_qtree.py
@@ -0,0 +1,234 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: na_cdot_qtree
+
+short_description: Manage qtrees
+extends_documentation_fragment:
+- community.general._netapp.ontap
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: Updated modules released with increased functionality
+ alternative: Use M(netapp.ontap.na_ontap_qtree) instead.
+
+description:
+- Create or destroy Qtrees.
+
+options:
+
+ state:
+ description:
+ - Whether the specified Qtree should exist or not.
+ required: true
+ choices: ['present', 'absent']
+
+ name:
+ description:
+ - The name of the Qtree to manage.
+ required: true
+
+ flexvol_name:
+ description:
+ - The name of the FlexVol the Qtree should exist on. Required when C(state=present).
+
+ vserver:
+ description:
+ - The name of the vserver to use.
+ required: true
+
+'''
+
+EXAMPLES = """
+- name: Create QTree
+ community.general.na_cdot_qtree:
+ state: present
+ name: ansibleQTree
+ flexvol_name: ansibleVolume
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Rename QTree
+ community.general.na_cdot_qtree:
+ state: present
+ name: ansibleQTree
+ flexvol_name: ansibleVolume
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppCDOTQTree(object):
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+ flexvol_name=dict(type='str'),
+ vserver=dict(required=True, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['flexvol_name'])
+ ],
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.name = p['name']
+ self.flexvol_name = p['flexvol_name']
+ self.vserver = p['vserver']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_ontap_zapi(module=self.module, vserver=self.vserver)
+
+ def get_qtree(self):
+ """
+ Checks if the qtree exists.
+
+ :return:
+ True if qtree found
+ False if qtree is not found
+ :rtype: bool
+ """
+
+ qtree_list_iter = netapp_utils.zapi.NaElement('qtree-list-iter')
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'qtree-info', **{'vserver': self.vserver,
+ 'volume': self.flexvol_name,
+ 'qtree': self.name})
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ qtree_list_iter.add_child_elem(query)
+
+ result = self.server.invoke_successfully(qtree_list_iter,
+ enable_tunneling=True)
+
+ if (result.get_child_by_name('num-records') and
+ int(result.get_child_content('num-records')) >= 1):
+ return True
+ else:
+ return False
+
+ def create_qtree(self):
+ qtree_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'qtree-create', **{'volume': self.flexvol_name,
+ 'qtree': self.name})
+
+ try:
+ self.server.invoke_successfully(qtree_create,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error provisioning qtree %s: %s" % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def delete_qtree(self):
+ path = '/vol/%s/%s' % (self.flexvol_name, self.name)
+ qtree_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'qtree-delete', **{'qtree': path})
+
+ try:
+ self.server.invoke_successfully(qtree_delete,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error deleting qtree %s: %s" % (path, to_native(e)),
+ exception=traceback.format_exc())
+
+ def rename_qtree(self):
+ path = '/vol/%s/%s' % (self.flexvol_name, self.name)
+ new_path = '/vol/%s/%s' % (self.flexvol_name, self.name)
+ qtree_rename = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'qtree-rename', **{'qtree': path,
+ 'new-qtree-name': new_path})
+
+ try:
+ self.server.invoke_successfully(qtree_rename,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error renaming qtree %s: %s" % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ changed = False
+ qtree_exists = False
+ rename_qtree = False
+ qtree_detail = self.get_qtree()
+
+ if qtree_detail:
+ qtree_exists = True
+
+ if self.state == 'absent':
+ # Qtree exists, but requested state is 'absent'.
+ changed = True
+
+ elif self.state == 'present':
+ if self.name is not None and not self.name == \
+ self.name:
+ changed = True
+ rename_qtree = True
+
+ else:
+ if self.state == 'present':
+ # Qtree does not exist, but requested state is 'present'.
+ changed = True
+
+ if changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ if not qtree_exists:
+ self.create_qtree()
+
+ else:
+ if rename_qtree:
+ self.rename_qtree()
+
+ elif self.state == 'absent':
+ self.delete_qtree()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = NetAppCDOTQTree()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_svm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_svm.py
new file mode 100644
index 00000000..0227a014
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_svm.py
@@ -0,0 +1,246 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: na_cdot_svm
+
+short_description: Manage NetApp cDOT svm
+extends_documentation_fragment:
+- community.general._netapp.ontap
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: Updated modules released with increased functionality
+ alternative: Use M(netapp.ontap.na_ontap_svm) instead.
+
+description:
+- Create or destroy svm on NetApp cDOT
+
+options:
+
+ state:
+ description:
+ - Whether the specified SVM should exist or not.
+ required: true
+ choices: ['present', 'absent']
+
+ name:
+ description:
+ - The name of the SVM to manage.
+ required: true
+
+ root_volume:
+ description:
+ - Root volume of the SVM. Required when C(state=present).
+
+ root_volume_aggregate:
+ description:
+ - The aggregate on which the root volume will be created.
+ - Required when C(state=present).
+
+ root_volume_security_style:
+ description:
+ - Security Style of the root volume.
+ - When specified as part of the vserver-create, this field represents the security style for the Vserver root volume.
+ - When specified as part of vserver-get-iter call, this will return the list of matching Vservers.
+ - Possible values are 'unix', 'ntfs', 'mixed'.
+ - The 'unified' security style, which applies only to Infinite Volumes, cannot be applied to a Vserver's root volume.
+ - Valid options are "unix" for NFS, "ntfs" for CIFS, "mixed" for Mixed, "unified" for Unified.
+ - Required when C(state=present)
+ choices: ['unix', 'ntfs', 'mixed', 'unified']
+
+'''
+
+EXAMPLES = """
+
+ - name: Create SVM
+ community.general.na_cdot_svm:
+ state: present
+ name: ansibleVServer
+ root_volume: vol1
+ root_volume_aggregate: aggr1
+ root_volume_security_style: mixed
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppCDOTSVM(object):
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+ root_volume=dict(type='str'),
+ root_volume_aggregate=dict(type='str'),
+ root_volume_security_style=dict(type='str', choices=['unix',
+ 'ntfs',
+ 'mixed',
+ 'unified'
+ ]),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['root_volume',
+ 'root_volume_aggregate',
+ 'root_volume_security_style'])
+ ],
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.name = p['name']
+ self.root_volume = p['root_volume']
+ self.root_volume_aggregate = p['root_volume_aggregate']
+ self.root_volume_security_style = p['root_volume_security_style']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_ontap_zapi(module=self.module)
+
+ def get_vserver(self):
+ """
+ Checks if vserver exists.
+
+ :return:
+ True if vserver found
+ False if vserver is not found
+ :rtype: bool
+ """
+
+ vserver_info = netapp_utils.zapi.NaElement('vserver-get-iter')
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'vserver-info', **{'vserver-name': self.name})
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ vserver_info.add_child_elem(query)
+
+ result = self.server.invoke_successfully(vserver_info,
+ enable_tunneling=False)
+
+ if (result.get_child_by_name('num-records') and
+ int(result.get_child_content('num-records')) >= 1):
+
+ """
+ TODO:
+ Return more relevant parameters about vserver that can
+ be updated by the playbook.
+ """
+ return True
+ else:
+ return False
+
+ def create_vserver(self):
+ vserver_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'vserver-create', **{'vserver-name': self.name,
+ 'root-volume': self.root_volume,
+ 'root-volume-aggregate':
+ self.root_volume_aggregate,
+ 'root-volume-security-style':
+ self.root_volume_security_style
+ })
+
+ try:
+ self.server.invoke_successfully(vserver_create,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error provisioning SVM %s with root volume %s on aggregate %s: %s'
+ % (self.name, self.root_volume, self.root_volume_aggregate, to_native(e)),
+ exception=traceback.format_exc())
+
+ def delete_vserver(self):
+ vserver_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'vserver-destroy', **{'vserver-name': self.name})
+
+ try:
+ self.server.invoke_successfully(vserver_delete,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error deleting SVM %s with root volume %s on aggregate %s: %s'
+ % (self.name, self.root_volume, self.root_volume_aggregate, to_native(e)),
+ exception=traceback.format_exc())
+
+ def rename_vserver(self):
+ vserver_rename = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'vserver-rename', **{'vserver-name': self.name,
+ 'new-name': self.name})
+
+ try:
+ self.server.invoke_successfully(vserver_rename,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error renaming SVM %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ changed = False
+ vserver_exists = self.get_vserver()
+ rename_vserver = False
+ if vserver_exists:
+ if self.state == 'absent':
+ changed = True
+
+ elif self.state == 'present':
+ # Update properties
+ pass
+
+ else:
+ if self.state == 'present':
+ changed = True
+
+ if changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ if not vserver_exists:
+ self.create_vserver()
+
+ else:
+ if rename_vserver:
+ self.rename_vserver()
+
+ elif self.state == 'absent':
+ self.delete_vserver()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = NetAppCDOTSVM()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_user.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_user.py
new file mode 100644
index 00000000..626e0aa0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_user.py
@@ -0,0 +1,301 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: na_cdot_user
+
+short_description: useradmin configuration and management
+extends_documentation_fragment:
+- community.general._netapp.ontap
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: Updated modules released with increased functionality
+ alternative: Use M(netapp.ontap.na_ontap_user) instead.
+
+description:
+- Create or destroy users.
+
+options:
+
+ state:
+ description:
+ - Whether the specified user should exist or not.
+ required: true
+ choices: ['present', 'absent']
+
+ name:
+ description:
+ - The name of the user to manage.
+ required: true
+
+ application:
+ description:
+ - Applications to grant access to.
+ required: true
+ choices: ['console', 'http','ontapi','rsh','snmp','sp','ssh','telnet']
+
+ authentication_method:
+ description:
+ - Authentication method for the application.
+ - Not all authentication methods are valid for an application.
+ - Valid authentication methods for each application are as denoted in I(authentication_choices_description).
+ - password for console application
+ - password, domain, nsswitch, cert for http application.
+ - password, domain, nsswitch, cert for ontapi application.
+ - community for snmp application (when creating SNMPv1 and SNMPv2 users).
+ - usm and community for snmp application (when creating SNMPv3 users).
+ - password for sp application.
+ - password for rsh application.
+ - password for telnet application.
+ - password, publickey, domain, nsswitch for ssh application.
+ required: true
+ choices: ['community', 'password', 'publickey', 'domain', 'nsswitch', 'usm']
+
+ set_password:
+ description:
+ - Password for the user account.
+ - It is ignored for creating snmp users, but is required for creating non-snmp users.
+ - For an existing user, this value will be used as the new password.
+
+ role_name:
+ description:
+ - The name of the role. Required when C(state=present)
+
+
+ vserver:
+ description:
+ - The name of the vserver to use.
+ required: true
+
+'''
+
+EXAMPLES = """
+
+ - name: Create User
+ community.general.na_cdot_user:
+ state: present
+ name: SampleUser
+ application: ssh
+ authentication_method: password
+ set_password: apn1242183u1298u41
+ role_name: vsadmin
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppCDOTUser(object):
+ """
+ Common operations to manage users and roles.
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+
+ application=dict(required=True, type='str', choices=[
+ 'console', 'http', 'ontapi', 'rsh',
+ 'snmp', 'sp', 'ssh', 'telnet']),
+ authentication_method=dict(required=True, type='str',
+ choices=['community', 'password',
+ 'publickey', 'domain',
+ 'nsswitch', 'usm']),
+ set_password=dict(required=False, type='str', default=None),
+ role_name=dict(required=False, type='str'),
+
+ vserver=dict(required=True, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['role_name'])
+ ],
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.name = p['name']
+
+ self.application = p['application']
+ self.authentication_method = p['authentication_method']
+ self.set_password = p['set_password']
+ self.role_name = p['role_name']
+
+ self.vserver = p['vserver']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_ontap_zapi(module=self.module)
+
+ def get_user(self):
+ """
+ Checks if the user exists.
+
+ :return:
+ True if user found
+ False if user is not found
+ :rtype: bool
+ """
+
+ security_login_get_iter = netapp_utils.zapi.NaElement('security-login-get-iter')
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-account-info', **{'vserver': self.vserver,
+ 'user-name': self.name,
+ 'application': self.application,
+ 'authentication-method':
+ self.authentication_method})
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ security_login_get_iter.add_child_elem(query)
+
+ try:
+ result = self.server.invoke_successfully(security_login_get_iter,
+ enable_tunneling=False)
+
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ return True
+ else:
+ return False
+
+ except netapp_utils.zapi.NaApiError as e:
+ # Error 16034 denotes a user not being found.
+ if to_native(e.code) == "16034":
+ return False
+ else:
+ self.module.fail_json(msg='Error getting user %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def create_user(self):
+ user_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-create', **{'vserver': self.vserver,
+ 'user-name': self.name,
+ 'application': self.application,
+ 'authentication-method':
+ self.authentication_method,
+ 'role-name': self.role_name})
+ if self.set_password is not None:
+ user_create.add_new_child('password', self.set_password)
+
+ try:
+ self.server.invoke_successfully(user_create,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error creating user %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def delete_user(self):
+ user_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-delete', **{'vserver': self.vserver,
+ 'user-name': self.name,
+ 'application': self.application,
+ 'authentication-method':
+ self.authentication_method})
+
+ try:
+ self.server.invoke_successfully(user_delete,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error removing user %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def change_password(self):
+ """
+ Changes the password
+
+ :return:
+ True if password updated
+ False if password is not updated
+ :rtype: bool
+ """
+ self.server.set_vserver(self.vserver)
+ modify_password = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-modify-password', **{
+ 'new-password': str(self.set_password),
+ 'user-name': self.name})
+ try:
+ self.server.invoke_successfully(modify_password,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ if to_native(e.code) == '13114':
+ return False
+ else:
+ self.module.fail_json(msg='Error setting password for user %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ self.server.set_vserver(None)
+ return True
+
+ def apply(self):
+ property_changed = False
+ password_changed = False
+ user_exists = self.get_user()
+
+ if user_exists:
+ if self.state == 'absent':
+ property_changed = True
+
+ elif self.state == 'present':
+ if self.set_password is not None:
+ password_changed = self.change_password()
+ else:
+ if self.state == 'present':
+ # Check if anything needs to be updated
+ property_changed = True
+
+ if property_changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ if not user_exists:
+ self.create_user()
+
+ # Add ability to update parameters.
+
+ elif self.state == 'absent':
+ self.delete_user()
+
+ changed = property_changed or password_changed
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = NetAppCDOTUser()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_user_role.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_user_role.py
new file mode 100644
index 00000000..88133200
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_user_role.py
@@ -0,0 +1,227 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: na_cdot_user_role
+
+short_description: useradmin configuration and management
+extends_documentation_fragment:
+- community.general._netapp.ontap
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: Updated modules released with increased functionality
+ alternative: Use M(netapp.ontap.na_ontap_user_role) instead.
+
+description:
+- Create or destroy user roles
+
+options:
+
+ state:
+ description:
+ - Whether the specified user should exist or not.
+ required: true
+ choices: ['present', 'absent']
+
+ name:
+ description:
+ - The name of the role to manage.
+ required: true
+
+ command_directory_name:
+ description:
+ - The command or command directory to which the role has an access.
+ required: true
+
+ access_level:
+ description:
+ - The name of the role to manage.
+ choices: ['none', 'readonly', 'all']
+ default: 'all'
+
+ vserver:
+ description:
+ - The name of the vserver to use.
+ required: true
+
+'''
+
+EXAMPLES = """
+
+ - name: Create User Role
+ community.general.na_cdot_user_role:
+ state: present
+ name: ansibleRole
+ command_directory_name: DEFAULT
+ access_level: none
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppCDOTUserRole(object):
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+
+ command_directory_name=dict(required=True, type='str'),
+ access_level=dict(required=False, type='str', default='all',
+ choices=['none', 'readonly', 'all']),
+
+ vserver=dict(required=True, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.name = p['name']
+
+ self.command_directory_name = p['command_directory_name']
+ self.access_level = p['access_level']
+
+ self.vserver = p['vserver']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_ontap_zapi(module=self.module)
+
+ def get_role(self):
+ """
+ Checks if the role exists for specific command-directory-name.
+
+ :return:
+ True if role found
+ False if role is not found
+ :rtype: bool
+ """
+
+ security_login_role_get_iter = netapp_utils.zapi.NaElement(
+ 'security-login-role-get-iter')
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-role-info', **{'vserver': self.vserver,
+ 'role-name': self.name,
+ 'command-directory-name':
+ self.command_directory_name})
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ security_login_role_get_iter.add_child_elem(query)
+
+ try:
+ result = self.server.invoke_successfully(
+ security_login_role_get_iter, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ # Error 16031 denotes a role not being found.
+ if to_native(e.code) == "16031":
+ return False
+ else:
+ self.module.fail_json(msg='Error getting role %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ if (result.get_child_by_name('num-records') and
+ int(result.get_child_content('num-records')) >= 1):
+ return True
+ else:
+ return False
+
+ def create_role(self):
+ role_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-role-create', **{'vserver': self.vserver,
+ 'role-name': self.name,
+ 'command-directory-name':
+ self.command_directory_name,
+ 'access-level':
+ self.access_level})
+
+ try:
+ self.server.invoke_successfully(role_create,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error creating role %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def delete_role(self):
+ role_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-role-delete', **{'vserver': self.vserver,
+ 'role-name': self.name,
+ 'command-directory-name':
+ self.command_directory_name})
+
+ try:
+ self.server.invoke_successfully(role_delete,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error removing role %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ changed = False
+ role_exists = self.get_role()
+
+ if role_exists:
+ if self.state == 'absent':
+ changed = True
+
+ # Check if properties need to be updated
+ else:
+ if self.state == 'present':
+ changed = True
+
+ if changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ if not role_exists:
+ self.create_role()
+
+ # Update properties
+
+ elif self.state == 'absent':
+ self.delete_role()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = NetAppCDOTUserRole()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_volume.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_volume.py
new file mode 100644
index 00000000..c10911d4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_cdot_volume.py
@@ -0,0 +1,437 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: na_cdot_volume
+
+short_description: Manage NetApp cDOT volumes
+extends_documentation_fragment:
+- community.general._netapp.ontap
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: Updated modules released with increased functionality
+ alternative: Use M(netapp.ontap.na_ontap_volume) instead.
+
+description:
+- Create or destroy volumes on NetApp cDOT
+
+options:
+
+ state:
+ description:
+ - Whether the specified volume should exist or not.
+ required: true
+ choices: ['present', 'absent']
+
+ name:
+ description:
+ - The name of the volume to manage.
+ required: true
+
+ infinite:
+ description:
+ - Set True if the volume is an Infinite Volume.
+ type: bool
+ default: 'no'
+
+ online:
+ description:
+ - Whether the specified volume is online, or not.
+ type: bool
+ default: 'yes'
+
+ aggregate_name:
+ description:
+ - The name of the aggregate the flexvol should exist on. Required when C(state=present).
+
+ size:
+ description:
+ - The size of the volume in (size_unit). Required when C(state=present).
+
+ size_unit:
+ description:
+ - The unit used to interpret the size parameter.
+ choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
+ default: 'gb'
+
+ vserver:
+ description:
+ - Name of the vserver to use.
+ required: true
+
+ junction_path:
+ description:
+ - Junction path where to mount the volume
+ required: false
+
+ export_policy:
+ description:
+ - Export policy to set for the specified junction path.
+ required: false
+ default: default
+
+ snapshot_policy:
+ description:
+ - Snapshot policy to set for the specified volume.
+ required: false
+ default: default
+
+'''
+
+EXAMPLES = """
+
+ - name: Create FlexVol
+ community.general.na_cdot_volume:
+ state: present
+ name: ansibleVolume
+ infinite: False
+ aggregate_name: aggr1
+ size: 20
+ size_unit: mb
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ junction_path: /ansibleVolume
+ export_policy: all_nfs_networks
+ snapshot_policy: daily
+
+ - name: Make FlexVol offline
+ community.general.na_cdot_volume:
+ state: present
+ name: ansibleVolume
+ infinite: False
+ online: False
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+"""
+
+RETURN = """
+
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppCDOTVolume(object):
+
+ def __init__(self):
+
+ self._size_unit_map = dict(
+ bytes=1,
+ b=1,
+ kb=1024,
+ mb=1024 ** 2,
+ gb=1024 ** 3,
+ tb=1024 ** 4,
+ pb=1024 ** 5,
+ eb=1024 ** 6,
+ zb=1024 ** 7,
+ yb=1024 ** 8
+ )
+
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+ is_infinite=dict(required=False, type='bool', default=False, aliases=['infinite']),
+ is_online=dict(required=False, type='bool', default=True, aliases=['online']),
+ size=dict(type='int'),
+ size_unit=dict(default='gb',
+ choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb',
+ 'pb', 'eb', 'zb', 'yb'], type='str'),
+ aggregate_name=dict(type='str'),
+ vserver=dict(required=True, type='str', default=None),
+ junction_path=dict(required=False, type='str', default=None),
+ export_policy=dict(required=False, type='str', default='default'),
+ snapshot_policy=dict(required=False, type='str', default='default'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['aggregate_name', 'size'])
+ ],
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.name = p['name']
+ self.is_infinite = p['is_infinite']
+ self.is_online = p['is_online']
+ self.size_unit = p['size_unit']
+ self.vserver = p['vserver']
+ self.junction_path = p['junction_path']
+ self.export_policy = p['export_policy']
+ self.snapshot_policy = p['snapshot_policy']
+
+ if p['size'] is not None:
+ self.size = p['size'] * self._size_unit_map[self.size_unit]
+ else:
+ self.size = None
+ self.aggregate_name = p['aggregate_name']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_ontap_zapi(module=self.module, vserver=self.vserver)
+
+ def get_volume(self):
+ """
+ Return details about the volume
+ :param:
+ name : Name of the volume
+
+ :return: Details about the volume. None if not found.
+ :rtype: dict
+ """
+ volume_info = netapp_utils.zapi.NaElement('volume-get-iter')
+ volume_attributes = netapp_utils.zapi.NaElement('volume-attributes')
+ volume_id_attributes = netapp_utils.zapi.NaElement('volume-id-attributes')
+ volume_id_attributes.add_new_child('name', self.name)
+ volume_attributes.add_child_elem(volume_id_attributes)
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(volume_attributes)
+
+ volume_info.add_child_elem(query)
+
+ result = self.server.invoke_successfully(volume_info, True)
+
+ return_value = None
+
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) >= 1:
+
+ volume_attributes = result.get_child_by_name(
+ 'attributes-list').get_child_by_name(
+ 'volume-attributes')
+ # Get volume's current size
+ volume_space_attributes = volume_attributes.get_child_by_name(
+ 'volume-space-attributes')
+ current_size = volume_space_attributes.get_child_content('size')
+
+ # Get volume's state (online/offline)
+ volume_state_attributes = volume_attributes.get_child_by_name(
+ 'volume-state-attributes')
+ current_state = volume_state_attributes.get_child_content('state')
+ is_online = None
+ if current_state == "online":
+ is_online = True
+ elif current_state == "offline":
+ is_online = False
+ return_value = {
+ 'name': self.name,
+ 'size': current_size,
+ 'is_online': is_online,
+ }
+
+ return return_value
+
+ def create_volume(self):
+ create_parameters = {'volume': self.name,
+ 'containing-aggr-name': self.aggregate_name,
+ 'size': str(self.size),
+ }
+ if self.junction_path:
+ create_parameters['junction-path'] = str(self.junction_path)
+ if self.export_policy != 'default':
+ create_parameters['export-policy'] = str(self.export_policy)
+ if self.snapshot_policy != 'default':
+ create_parameters['snapshot-policy'] = str(self.snapshot_policy)
+
+ volume_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-create', **create_parameters)
+
+ try:
+ self.server.invoke_successfully(volume_create,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error provisioning volume %s of size %s: %s' % (self.name, self.size, to_native(e)),
+ exception=traceback.format_exc())
+
+ def delete_volume(self):
+ if self.is_infinite:
+ volume_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-destroy-async', **{'volume-name': self.name})
+ else:
+ volume_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-destroy', **{'name': self.name, 'unmount-and-offline':
+ 'true'})
+
+ try:
+ self.server.invoke_successfully(volume_delete,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error deleting volume %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def rename_volume(self):
+ """
+ Rename the volume.
+
+ Note: 'is_infinite' needs to be set to True in order to rename an
+ Infinite Volume.
+ """
+ if self.is_infinite:
+ volume_rename = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-rename-async',
+ **{'volume-name': self.name, 'new-volume-name': str(
+ self.name)})
+ else:
+ volume_rename = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-rename', **{'volume': self.name, 'new-volume-name': str(
+ self.name)})
+ try:
+ self.server.invoke_successfully(volume_rename,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error renaming volume %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def resize_volume(self):
+ """
+ Re-size the volume.
+
+ Note: 'is_infinite' needs to be set to True in order to rename an
+ Infinite Volume.
+ """
+ if self.is_infinite:
+ volume_resize = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-size-async',
+ **{'volume-name': self.name, 'new-size': str(
+ self.size)})
+ else:
+ volume_resize = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-size', **{'volume': self.name, 'new-size': str(
+ self.size)})
+ try:
+ self.server.invoke_successfully(volume_resize,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error re-sizing volume %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def change_volume_state(self):
+ """
+ Change volume's state (offline/online).
+
+ Note: 'is_infinite' needs to be set to True in order to change the
+ state of an Infinite Volume.
+ """
+ state_requested = None
+ if self.is_online:
+ # Requested state is 'online'.
+ state_requested = "online"
+ if self.is_infinite:
+ volume_change_state = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-online-async',
+ **{'volume-name': self.name})
+ else:
+ volume_change_state = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-online',
+ **{'name': self.name})
+ else:
+ # Requested state is 'offline'.
+ state_requested = "offline"
+ if self.is_infinite:
+ volume_change_state = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-offline-async',
+ **{'volume-name': self.name})
+ else:
+ volume_change_state = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-offline',
+ **{'name': self.name})
+ try:
+ self.server.invoke_successfully(volume_change_state,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error changing the state of volume %s to %s: %s' %
+ (self.name, state_requested, to_native(e)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ changed = False
+ volume_exists = False
+ rename_volume = False
+ resize_volume = False
+ volume_detail = self.get_volume()
+
+ if volume_detail:
+ volume_exists = True
+
+ if self.state == 'absent':
+ changed = True
+
+ elif self.state == 'present':
+ if str(volume_detail['size']) != str(self.size):
+ resize_volume = True
+ changed = True
+ if (volume_detail['is_online'] is not None) and (volume_detail['is_online'] != self.is_online):
+ changed = True
+ if self.is_online is False:
+ # Volume is online, but requested state is offline
+ pass
+ else:
+ # Volume is offline but requested state is online
+ pass
+
+ else:
+ if self.state == 'present':
+ changed = True
+
+ if changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ if not volume_exists:
+ self.create_volume()
+
+ else:
+ if resize_volume:
+ self.resize_volume()
+ if volume_detail['is_online'] is not \
+ None and volume_detail['is_online'] != \
+ self.is_online:
+ self.change_volume_state()
+ # Ensure re-naming is the last change made.
+ if rename_volume:
+ self.rename_volume()
+
+ elif self.state == 'absent':
+ self.delete_volume()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = NetAppCDOTVolume()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_ontap_gather_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_ontap_gather_facts.py
new file mode 100644
index 00000000..0fc61afb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/na_ontap_gather_facts.py
@@ -0,0 +1,610 @@
+#!/usr/bin/python
+
+# (c) 2018 Piotr Olczak <piotr.olczak@redhat.com>
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: na_ontap_gather_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favour of C(_info) module.
+ alternative: Use M(netapp.ontap.na_ontap_info) instead.
+author: Piotr Olczak (@dprts) <polczak@redhat.com>
+extends_documentation_fragment:
+- community.general._netapp.na_ontap
+
+short_description: NetApp information gatherer
+description:
+ - This module allows you to gather various information about ONTAP configuration
+requirements:
+ - netapp_lib
+options:
+ state:
+ description:
+ - Returns "info"
+ default: "info"
+ choices: ['info']
+ gather_subset:
+ description:
+ - When supplied, this argument will restrict the facts collected
+ to a given subset. Possible values for this argument include
+ "aggregate_info", "cluster_node_info", "igroup_info", "lun_info", "net_dns_info",
+ "net_ifgrp_info",
+ "net_interface_info", "net_port_info", "nvme_info", "nvme_interface_info",
+ "nvme_namespace_info", "nvme_subsystem_info", "ontap_version",
+ "qos_adaptive_policy_info", "qos_policy_info", "security_key_manager_key_info",
+ "security_login_account_info", "storage_failover_info", "volume_info",
+ "vserver_info", "vserver_login_banner_info", "vserver_motd_info", "vserver_nfs_info"
+ Can specify a list of values to include a larger subset. Values can also be used
+ with an initial C(M(!)) to specify that a specific subset should
+ not be collected.
+ - nvme is supported with ONTAP 9.4 onwards.
+ - use "help" to get a list of supported facts for your system.
+ default: "all"
+'''
+
+EXAMPLES = '''
+- name: Get NetApp info (Password Authentication)
+ community.general.na_ontap_gather_facts:
+ state: info
+ hostname: "na-vsim"
+ username: "admin"
+ password: "admins_password"
+- ansible.builtin.debug:
+ var: ontap_facts
+- name: Limit Fact Gathering to Aggregate Information
+ community.general.na_ontap_gather_facts:
+ state: info
+ hostname: "na-vsim"
+ username: "admin"
+ password: "admins_password"
+ gather_subset: "aggregate_info"
+- name: Limit Fact Gathering to Volume and Lun Information
+ community.general.na_ontap_gather_facts:
+ state: info
+ hostname: "na-vsim"
+ username: "admin"
+ password: "admins_password"
+ gather_subset:
+ - volume_info
+ - lun_info
+- name: Gather all facts except for volume and lun information
+ community.general.na_ontap_gather_facts:
+ state: info
+ hostname: "na-vsim"
+ username: "admin"
+ password: "admins_password"
+ gather_subset:
+ - "!volume_info"
+ - "!lun_info"
+'''
+
+RETURN = '''
+ontap_facts:
+ description: Returns various information about NetApp cluster configuration
+ returned: always
+ type: dict
+ sample: '{
+ "ontap_facts": {
+ "aggregate_info": {...},
+ "cluster_node_info": {...},
+ "net_dns_info": {...},
+ "net_ifgrp_info": {...},
+ "net_interface_info": {...},
+ "net_port_info": {...},
+ "security_key_manager_key_info": {...},
+ "security_login_account_info": {...},
+ "volume_info": {...},
+ "lun_info": {...},
+ "storage_failover_info": {...},
+ "vserver_login_banner_info": {...},
+ "vserver_motd_info": {...},
+ "vserver_info": {...},
+ "vserver_nfs_info": {...},
+ "ontap_version": {...},
+ "igroup_info": {...},
+ "qos_policy_info": {...},
+ "qos_adaptive_policy_info": {...}
+ }'
+'''
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+try:
+ import xmltodict
+ HAS_XMLTODICT = True
+except ImportError:
+ HAS_XMLTODICT = False
+
+try:
+ import json
+ HAS_JSON = True
+except ImportError:
+ HAS_JSON = False
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPGatherFacts(object):
+ '''Class with gather facts methods'''
+
+ def __init__(self, module):
+ self.module = module
+ self.netapp_info = dict()
+
+ # thanks to coreywan (https://github.com/ansible/ansible/pull/47016)
+ # for starting this
+ # min_version identifies the ontapi version which supports this ZAPI
+ # use 0 if it is supported since 9.1
+ self.fact_subsets = {
+ 'net_dns_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'net-dns-get-iter',
+ 'attribute': 'net-dns-info',
+ 'field': 'vserver-name',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'net_interface_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'net-interface-get-iter',
+ 'attribute': 'net-interface-info',
+ 'field': 'interface-name',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'net_port_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'net-port-get-iter',
+ 'attribute': 'net-port-info',
+ 'field': ('node', 'port'),
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'cluster_node_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'cluster-node-get-iter',
+ 'attribute': 'cluster-node-info',
+ 'field': 'node-name',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'security_login_account_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'security-login-get-iter',
+ 'attribute': 'security-login-account-info',
+ 'field': ('vserver', 'user-name', 'application', 'authentication-method'),
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'aggregate_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'aggr-get-iter',
+ 'attribute': 'aggr-attributes',
+ 'field': 'aggregate-name',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'volume_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'volume-get-iter',
+ 'attribute': 'volume-attributes',
+ 'field': ('name', 'owning-vserver-name'),
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'lun_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'lun-get-iter',
+ 'attribute': 'lun-info',
+ 'field': 'path',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'storage_failover_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'cf-get-iter',
+ 'attribute': 'storage-failover-info',
+ 'field': 'node',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'vserver_motd_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'vserver-motd-get-iter',
+ 'attribute': 'vserver-motd-info',
+ 'field': 'vserver',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'vserver_login_banner_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'vserver-login-banner-get-iter',
+ 'attribute': 'vserver-login-banner-info',
+ 'field': 'vserver',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'security_key_manager_key_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'security-key-manager-key-get-iter',
+ 'attribute': 'security-key-manager-key-info',
+ 'field': ('node', 'key-id'),
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'vserver_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'vserver-get-iter',
+ 'attribute': 'vserver-info',
+ 'field': 'vserver-name',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'vserver_nfs_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'nfs-service-get-iter',
+ 'attribute': 'nfs-info',
+ 'field': 'vserver',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'net_ifgrp_info': {
+ 'method': self.get_ifgrp_info,
+ 'kwargs': {},
+ 'min_version': '0',
+ },
+ 'ontap_version': {
+ 'method': self.ontapi,
+ 'kwargs': {},
+ 'min_version': '0',
+ },
+ 'system_node_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'system-node-get-iter',
+ 'attribute': 'node-details-info',
+ 'field': 'node',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'igroup_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'igroup-get-iter',
+ 'attribute': 'initiator-group-info',
+ 'field': ('vserver', 'initiator-group-name'),
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'qos_policy_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'qos-policy-group-get-iter',
+ 'attribute': 'qos-policy-group-info',
+ 'field': 'policy-group',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ # supported in ONTAP 9.3 and onwards
+ 'qos_adaptive_policy_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'qos-adaptive-policy-group-get-iter',
+ 'attribute': 'qos-adaptive-policy-group-info',
+ 'field': 'policy-group',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '130',
+ },
+ # supported in ONTAP 9.4 and onwards
+ 'nvme_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'nvme-get-iter',
+ 'attribute': 'nvme-target-service-info',
+ 'field': 'vserver',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '140',
+ },
+ 'nvme_interface_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'nvme-interface-get-iter',
+ 'attribute': 'nvme-interface-info',
+ 'field': 'vserver',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '140',
+ },
+ 'nvme_subsystem_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'nvme-subsystem-get-iter',
+ 'attribute': 'nvme-subsystem-info',
+ 'field': 'subsystem',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '140',
+ },
+ 'nvme_namespace_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'nvme-namespace-get-iter',
+ 'attribute': 'nvme-namespace-info',
+ 'field': 'path',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '140',
+ },
+ }
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def ontapi(self):
+ '''Method to get ontapi version'''
+
+ api = 'system-get-ontapi-version'
+ api_call = netapp_utils.zapi.NaElement(api)
+ try:
+ results = self.server.invoke_successfully(api_call, enable_tunneling=False)
+ ontapi_version = results.get_child_content('minor-version')
+ return ontapi_version if ontapi_version is not None else '0'
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error calling API %s: %s" %
+ (api, to_native(error)), exception=traceback.format_exc())
+
+ def call_api(self, call, query=None):
+ '''Main method to run an API call'''
+
+ api_call = netapp_utils.zapi.NaElement(call)
+ result = None
+
+ if query:
+ for key, val in query.items():
+ # Can val be nested?
+ api_call.add_new_child(key, val)
+ try:
+ result = self.server.invoke_successfully(api_call, enable_tunneling=False)
+ return result
+ except netapp_utils.zapi.NaApiError as error:
+ if call in ['security-key-manager-key-get-iter']:
+ return result
+ else:
+ self.module.fail_json(msg="Error calling API %s: %s"
+ % (call, to_native(error)), exception=traceback.format_exc())
+
+ def get_ifgrp_info(self):
+ '''Method to get network port ifgroups info'''
+
+ try:
+ net_port_info = self.netapp_info['net_port_info']
+ except KeyError:
+ net_port_info_calls = self.fact_subsets['net_port_info']
+ net_port_info = net_port_info_calls['method'](**net_port_info_calls['kwargs'])
+ interfaces = net_port_info.keys()
+
+ ifgrps = []
+ for ifn in interfaces:
+ if net_port_info[ifn]['port_type'] == 'if_group':
+ ifgrps.append(ifn)
+
+ net_ifgrp_info = dict()
+ for ifgrp in ifgrps:
+ query = dict()
+ query['node'], query['ifgrp-name'] = ifgrp.split(':')
+
+ tmp = self.get_generic_get_iter('net-port-ifgrp-get', field=('node', 'ifgrp-name'),
+ attribute='net-ifgrp-info', query=query)
+ net_ifgrp_info = net_ifgrp_info.copy()
+ net_ifgrp_info.update(tmp)
+ return net_ifgrp_info
+
+ def get_generic_get_iter(self, call, attribute=None, field=None, query=None):
+ '''Method to run a generic get-iter call'''
+
+ generic_call = self.call_api(call, query)
+
+ if call == 'net-port-ifgrp-get':
+ children = 'attributes'
+ else:
+ children = 'attributes-list'
+
+ if generic_call is None:
+ return None
+
+ if field is None:
+ out = []
+ else:
+ out = {}
+
+ attributes_list = generic_call.get_child_by_name(children)
+
+ if attributes_list is None:
+ return None
+
+ for child in attributes_list.get_children():
+ dic = xmltodict.parse(child.to_string(), xml_attribs=False)
+
+ if attribute is not None:
+ dic = dic[attribute]
+
+ if isinstance(field, str):
+ unique_key = _finditem(dic, field)
+ out = out.copy()
+ out.update({unique_key: convert_keys(json.loads(json.dumps(dic)))})
+ elif isinstance(field, tuple):
+ unique_key = ':'.join([_finditem(dic, el) for el in field])
+ out = out.copy()
+ out.update({unique_key: convert_keys(json.loads(json.dumps(dic)))})
+ else:
+ out.append(convert_keys(json.loads(json.dumps(dic))))
+
+ return out
+
+ def get_all(self, gather_subset):
+ '''Method to get all subsets'''
+
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_gather_facts", cserver)
+
+ self.netapp_info['ontap_version'] = self.ontapi()
+
+ run_subset = self.get_subset(gather_subset, self.netapp_info['ontap_version'])
+ if 'help' in gather_subset:
+ self.netapp_info['help'] = sorted(run_subset)
+ else:
+ for subset in run_subset:
+ call = self.fact_subsets[subset]
+ self.netapp_info[subset] = call['method'](**call['kwargs'])
+
+ return self.netapp_info
+
+ def get_subset(self, gather_subset, version):
+ '''Method to get a single subset'''
+
+ runable_subsets = set()
+ exclude_subsets = set()
+ usable_subsets = [key for key in self.fact_subsets.keys() if version >= self.fact_subsets[key]['min_version']]
+ if 'help' in gather_subset:
+ return usable_subsets
+ for subset in gather_subset:
+ if subset == 'all':
+ runable_subsets.update(usable_subsets)
+ return runable_subsets
+ if subset.startswith('!'):
+ subset = subset[1:]
+ if subset == 'all':
+ return set()
+ exclude = True
+ else:
+ exclude = False
+
+ if subset not in usable_subsets:
+ if subset not in self.fact_subsets.keys():
+ self.module.fail_json(msg='Bad subset: %s' % subset)
+ self.module.fail_json(msg='Remote system at version %s does not support %s' %
+ (version, subset))
+
+ if exclude:
+ exclude_subsets.add(subset)
+ else:
+ runable_subsets.add(subset)
+
+ if not runable_subsets:
+ runable_subsets.update(usable_subsets)
+
+ runable_subsets.difference_update(exclude_subsets)
+
+ return runable_subsets
+
+
+# https://stackoverflow.com/questions/14962485/finding-a-key-recursively-in-a-dictionary
+def __finditem(obj, key):
+
+ if key in obj:
+ return obj[key]
+ for dummy, val in obj.items():
+ if isinstance(val, dict):
+ item = __finditem(val, key)
+ if item is not None:
+ return item
+ return None
+
+
+def _finditem(obj, key):
+
+ value = __finditem(obj, key)
+ if value is not None:
+ return value
+ raise KeyError(key)
+
+
+def convert_keys(d_param):
+ '''Method to convert hyphen to underscore'''
+
+ out = {}
+ if isinstance(d_param, dict):
+ for key, val in d_param.items():
+ val = convert_keys(val)
+ out[key.replace('-', '_')] = val
+ else:
+ return d_param
+ return out
+
+
+def main():
+ '''Execute action'''
+
+ argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ argument_spec.update(dict(
+ state=dict(default='info', choices=['info']),
+ gather_subset=dict(default=['all'], type='list'),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ if not HAS_XMLTODICT:
+ module.fail_json(msg="xmltodict missing")
+
+ if not HAS_JSON:
+ module.fail_json(msg="json missing")
+
+ state = module.params['state']
+ gather_subset = module.params['gather_subset']
+ if gather_subset is None:
+ gather_subset = ['all']
+ gf_obj = NetAppONTAPGatherFacts(module)
+ gf_all = gf_obj.get_all(gather_subset)
+ result = {'state': state, 'changed': False}
+ module.exit_json(ansible_facts={'ontap_facts': gf_all}, **result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nagios.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nagios.py
new file mode 100644
index 00000000..248fd105
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nagios.py
@@ -0,0 +1,1304 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# This file is largely copied from the Nagios module included in the
+# Func project. Original copyright follows:
+#
+# func-nagios - Schedule downtime and enables/disable notifications
+# Copyright 2011, Red Hat, Inc.
+# Tim Bielawa <tbielawa@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: nagios
+short_description: Perform common tasks in Nagios related to downtime and notifications.
+description:
+ - "The C(nagios) module has two basic functions: scheduling downtime and toggling alerts for services or hosts."
+ - The C(nagios) module is not idempotent.
+ - All actions require the I(host) parameter to be given explicitly. In playbooks you can use the C({{inventory_hostname}}) variable to refer
+ to the host the playbook is currently running on.
+ - You can specify multiple services at once by separating them with commas, .e.g., C(services=httpd,nfs,puppet).
+ - When specifying what service to handle there is a special service value, I(host), which will handle alerts/downtime/acknowledge for the I(host itself),
+ e.g., C(service=host). This keyword may not be given with other services at the same time.
+ I(Setting alerts/downtime/acknowledge for a host does not affect alerts/downtime/acknowledge for any of the services running on it.)
+ To schedule downtime for all services on particular host use keyword "all", e.g., C(service=all).
+options:
+ action:
+ description:
+ - Action to take.
+ - servicegroup options were added in 2.0.
+ - delete_downtime options were added in 2.2.
+ - The C(acknowledge) and C(forced_check) actions were added in community.general 1.2.0.
+ required: true
+ choices: [ "downtime", "delete_downtime", "enable_alerts", "disable_alerts", "silence", "unsilence",
+ "silence_nagios", "unsilence_nagios", "command", "servicegroup_service_downtime",
+ "servicegroup_host_downtime", "acknowledge", "forced_check" ]
+ type: str
+ host:
+ description:
+ - Host to operate on in Nagios.
+ type: str
+ cmdfile:
+ description:
+ - Path to the nagios I(command file) (FIFO pipe).
+ Only required if auto-detection fails.
+ type: str
+ author:
+ description:
+ - Author to leave downtime comments as.
+ Only usable with the C(downtime) and C(acknowledge) action.
+ type: str
+ default: Ansible
+ comment:
+ description:
+ - Comment for C(downtime) and C(acknowledge)action.
+ type: str
+ default: Scheduling downtime
+ start:
+ description:
+ - When downtime should start, in time_t format (epoch seconds).
+ version_added: '0.2.0'
+ type: str
+ minutes:
+ description:
+ - Minutes to schedule downtime for.
+ - Only usable with the C(downtime) action.
+ type: int
+ default: 30
+ services:
+ description:
+ - What to manage downtime/alerts for. Separate multiple services with commas.
+ C(service) is an alias for C(services).
+ B(Required) option when using the C(downtime), C(acknowledge), C(forced_check), C(enable_alerts), and C(disable_alerts) actions.
+ aliases: [ "service" ]
+ type: str
+ servicegroup:
+ description:
+ - The Servicegroup we want to set downtimes/alerts for.
+ B(Required) option when using the C(servicegroup_service_downtime) amd C(servicegroup_host_downtime).
+ type: str
+ command:
+ description:
+ - The raw command to send to nagios, which
+ should not include the submitted time header or the line-feed
+ B(Required) option when using the C(command) action.
+ type: str
+
+author: "Tim Bielawa (@tbielawa)"
+'''
+
+EXAMPLES = '''
+- name: Set 30 minutes of apache downtime
+ community.general.nagios:
+ action: downtime
+ minutes: 30
+ service: httpd
+ host: '{{ inventory_hostname }}'
+
+- name: Schedule an hour of HOST downtime
+ community.general.nagios:
+ action: downtime
+ minutes: 60
+ service: host
+ host: '{{ inventory_hostname }}'
+
+- name: Schedule an hour of HOST downtime starting at 2019-04-23T02:00:00+00:00
+ community.general.nagios:
+ action: downtime
+ start: 1555984800
+ minutes: 60
+ service: host
+ host: '{{ inventory_hostname }}'
+
+- name: Schedule an hour of HOST downtime, with a comment describing the reason
+ community.general.nagios:
+ action: downtime
+ minutes: 60
+ service: host
+ host: '{{ inventory_hostname }}'
+ comment: Rebuilding machine
+
+- name: Schedule downtime for ALL services on HOST
+ community.general.nagios:
+ action: downtime
+ minutes: 45
+ service: all
+ host: '{{ inventory_hostname }}'
+
+- name: Schedule downtime for a few services
+ community.general.nagios:
+ action: downtime
+ services: frob,foobar,qeuz
+ host: '{{ inventory_hostname }}'
+
+- name: Set 30 minutes downtime for all services in servicegroup foo
+ community.general.nagios:
+ action: servicegroup_service_downtime
+ minutes: 30
+ servicegroup: foo
+ host: '{{ inventory_hostname }}'
+
+- name: Set 30 minutes downtime for all host in servicegroup foo
+ community.general.nagios:
+ action: servicegroup_host_downtime
+ minutes: 30
+ servicegroup: foo
+ host: '{{ inventory_hostname }}'
+
+- name: Delete all downtime for a given host
+ community.general.nagios:
+ action: delete_downtime
+ host: '{{ inventory_hostname }}'
+ service: all
+
+- name: Delete all downtime for HOST with a particular comment
+ community.general.nagios:
+ action: delete_downtime
+ host: '{{ inventory_hostname }}'
+ service: host
+ comment: Planned maintenance
+
+- name: Acknowledge an HOST with a particular comment
+ community.general.nagios:
+ action: acknowledge
+ service: host
+ host: '{{ inventory_hostname }}'
+ comment: 'power outage - see casenr 12345'
+
+- name: Acknowledge an active service problem for the httpd service with a particular comment
+ community.general.nagios:
+ action: acknowledge
+ service: httpd
+ host: '{{ inventory_hostname }}'
+ comment: 'service crashed - see casenr 12345'
+
+- name: Reset a passive service check for snmp trap
+ community.general.nagios:
+ action: forced_check
+ service: snmp
+ host: '{{ inventory_hostname }}'
+
+- name: Force an active service check for the httpd service
+ community.general.nagios:
+ action: forced_check
+ service: httpd
+ host: '{{ inventory_hostname }}'
+
+- name: Force an active service check for all services of a particular host
+ community.general.nagios:
+ action: forced_check
+ service: all
+ host: '{{ inventory_hostname }}'
+
+- name: Force an active service check for a particular host
+ community.general.nagios:
+ action: forced_check
+ service: host
+ host: '{{ inventory_hostname }}'
+
+- name: Enable SMART disk alerts
+ community.general.nagios:
+ action: enable_alerts
+ service: smart
+ host: '{{ inventory_hostname }}'
+
+- name: Disable httpd and nfs alerts
+ community.general.nagios:
+ action: disable_alerts
+ service: httpd,nfs
+ host: '{{ inventory_hostname }}'
+
+- name: Disable HOST alerts
+ community.general.nagios:
+ action: disable_alerts
+ service: host
+ host: '{{ inventory_hostname }}'
+
+- name: Silence ALL alerts
+ community.general.nagios:
+ action: silence
+ host: '{{ inventory_hostname }}'
+
+- name: Unsilence all alerts
+ community.general.nagios:
+ action: unsilence
+ host: '{{ inventory_hostname }}'
+
+- name: Shut up nagios
+ community.general.nagios:
+ action: silence_nagios
+
+- name: Annoy me negios
+ community.general.nagios:
+ action: unsilence_nagios
+
+- name: Command something
+ community.general.nagios:
+ action: command
+ command: DISABLE_FAILURE_PREDICTION
+'''
+
+import time
+import os.path
+import stat
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+######################################################################
+
+def which_cmdfile():
+ locations = [
+ # rhel
+ '/etc/nagios/nagios.cfg',
+ # debian
+ '/etc/nagios3/nagios.cfg',
+ # older debian
+ '/etc/nagios2/nagios.cfg',
+ # bsd, solaris
+ '/usr/local/etc/nagios/nagios.cfg',
+ # groundwork it monitoring
+ '/usr/local/groundwork/nagios/etc/nagios.cfg',
+ # open monitoring distribution
+ '/omd/sites/oppy/tmp/nagios/nagios.cfg',
+ # ???
+ '/usr/local/nagios/etc/nagios.cfg',
+ '/usr/local/nagios/nagios.cfg',
+ '/opt/nagios/etc/nagios.cfg',
+ '/opt/nagios/nagios.cfg',
+ # icinga on debian/ubuntu
+ '/etc/icinga/icinga.cfg',
+ # icinga installed from source (default location)
+ '/usr/local/icinga/etc/icinga.cfg',
+ ]
+
+ for path in locations:
+ if os.path.exists(path):
+ for line in open(path):
+ if line.startswith('command_file'):
+ return line.split('=')[1].strip()
+
+ return None
+
+######################################################################
+
+
+def main():
+ ACTION_CHOICES = [
+ 'downtime',
+ 'delete_downtime',
+ 'silence',
+ 'unsilence',
+ 'enable_alerts',
+ 'disable_alerts',
+ 'silence_nagios',
+ 'unsilence_nagios',
+ 'command',
+ 'servicegroup_host_downtime',
+ 'servicegroup_service_downtime',
+ 'acknowledge',
+ 'forced_check',
+ ]
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ action=dict(required=True, choices=ACTION_CHOICES),
+ author=dict(default='Ansible'),
+ comment=dict(default='Scheduling downtime'),
+ host=dict(required=False, default=None),
+ servicegroup=dict(required=False, default=None),
+ start=dict(required=False, default=None),
+ minutes=dict(default=30, type='int'),
+ cmdfile=dict(default=which_cmdfile()),
+ services=dict(default=None, aliases=['service']),
+ command=dict(required=False, default=None),
+ )
+ )
+
+ action = module.params['action']
+ host = module.params['host']
+ servicegroup = module.params['servicegroup']
+ start = module.params['start']
+ services = module.params['services']
+ cmdfile = module.params['cmdfile']
+ command = module.params['command']
+
+ ##################################################################
+ # Required args per action:
+ # downtime = (minutes, service, host)
+ # acknowledge = (service, host)
+ # (un)silence = (host)
+ # (enable/disable)_alerts = (service, host)
+ # command = command
+ #
+ # AnsibleModule will verify most stuff, we need to verify
+ # 'service' manually.
+
+ ##################################################################
+ if action not in ['command', 'silence_nagios', 'unsilence_nagios']:
+ if not host:
+ module.fail_json(msg='no host specified for action requiring one')
+ ######################################################################
+ if action == 'downtime':
+ # Make sure there's an actual service selected
+ if not services:
+ module.fail_json(msg='no service selected to set downtime for')
+
+ ######################################################################
+ if action == 'delete_downtime':
+ # Make sure there's an actual service selected
+ if not services:
+ module.fail_json(msg='no service selected to set downtime for')
+
+ ######################################################################
+
+ if action in ['servicegroup_service_downtime', 'servicegroup_host_downtime']:
+ # Make sure there's an actual servicegroup selected
+ if not servicegroup:
+ module.fail_json(msg='no servicegroup selected to set downtime for')
+
+ ##################################################################
+ if action in ['enable_alerts', 'disable_alerts']:
+ if not services:
+ module.fail_json(msg='a service is required when setting alerts')
+
+ if action in ['command']:
+ if not command:
+ module.fail_json(msg='no command passed for command action')
+ ######################################################################
+ if action == 'acknowledge':
+ # Make sure there's an actual service selected
+ if not services:
+ module.fail_json(msg='no service selected to acknowledge')
+
+ ##################################################################
+ if action == 'forced_check':
+ # Make sure there's an actual service selected
+ if not services:
+ module.fail_json(msg='no service selected to check')
+
+ ##################################################################
+ if not cmdfile:
+ module.fail_json(msg='unable to locate nagios.cfg')
+
+ ##################################################################
+ ansible_nagios = Nagios(module, **module.params)
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ ansible_nagios.act()
+ ##################################################################
+
+
+######################################################################
+class Nagios(object):
+ """
+ Perform common tasks in Nagios related to downtime and
+ notifications.
+
+ The complete set of external commands Nagios handles is documented
+ on their website:
+
+ http://old.nagios.org/developerinfo/externalcommands/commandlist.php
+
+ Note that in the case of `schedule_svc_downtime`,
+ `enable_svc_notifications`, and `disable_svc_notifications`, the
+ service argument should be passed as a list.
+ """
+
+ def __init__(self, module, **kwargs):
+ self.module = module
+ self.action = kwargs['action']
+ self.author = kwargs['author']
+ self.comment = kwargs['comment']
+ self.host = kwargs['host']
+ self.servicegroup = kwargs['servicegroup']
+ if kwargs['start'] is not None:
+ self.start = int(kwargs['start'])
+ else:
+ self.start = None
+ self.minutes = kwargs['minutes']
+ self.cmdfile = kwargs['cmdfile']
+ self.command = kwargs['command']
+
+ if (kwargs['services'] is None) or (kwargs['services'] == 'host') or (kwargs['services'] == 'all'):
+ self.services = kwargs['services']
+ else:
+ self.services = kwargs['services'].split(',')
+
+ self.command_results = []
+
+ def _now(self):
+ """
+ The time in seconds since 12:00:00AM Jan 1, 1970
+ """
+
+ return int(time.time())
+
+ def _write_command(self, cmd):
+ """
+ Write the given command to the Nagios command file
+ """
+
+ if not os.path.exists(self.cmdfile):
+ self.module.fail_json(msg='nagios command file does not exist',
+ cmdfile=self.cmdfile)
+ if not stat.S_ISFIFO(os.stat(self.cmdfile).st_mode):
+ self.module.fail_json(msg='nagios command file is not a fifo file',
+ cmdfile=self.cmdfile)
+ try:
+ fp = open(self.cmdfile, 'w')
+ fp.write(cmd)
+ fp.flush()
+ fp.close()
+ self.command_results.append(cmd.strip())
+ except IOError:
+ self.module.fail_json(msg='unable to write to nagios command file',
+ cmdfile=self.cmdfile)
+
+ def _fmt_dt_str(self, cmd, host, duration, author=None,
+ comment=None, start=None,
+ svc=None, fixed=1, trigger=0):
+ """
+ Format an external-command downtime string.
+
+ cmd - Nagios command ID
+ host - Host schedule downtime on
+ duration - Minutes to schedule downtime for
+ author - Name to file the downtime as
+ comment - Reason for running this command (upgrade, reboot, etc)
+ start - Start of downtime in seconds since 12:00AM Jan 1 1970
+ Default is to use the entry time (now)
+ svc - Service to schedule downtime for, omit when for host downtime
+ fixed - Start now if 1, start when a problem is detected if 0
+ trigger - Optional ID of event to start downtime from. Leave as 0 for
+ fixed downtime.
+
+ Syntax: [submitted] COMMAND;<host_name>;[<service_description>]
+ <start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
+ <comment>
+ """
+
+ entry_time = self._now()
+ if start is None:
+ start = entry_time
+
+ hdr = "[%s] %s;%s;" % (entry_time, cmd, host)
+ duration_s = (duration * 60)
+ end = start + duration_s
+
+ if not author:
+ author = self.author
+
+ if not comment:
+ comment = self.comment
+
+ if svc is not None:
+ dt_args = [svc, str(start), str(end), str(fixed), str(trigger),
+ str(duration_s), author, comment]
+ else:
+ # Downtime for a host if no svc specified
+ dt_args = [str(start), str(end), str(fixed), str(trigger),
+ str(duration_s), author, comment]
+
+ dt_arg_str = ";".join(dt_args)
+ dt_str = hdr + dt_arg_str + "\n"
+
+ return dt_str
+
+ def _fmt_ack_str(self, cmd, host, author=None,
+ comment=None, svc=None, sticky=0, notify=1, persistent=0):
+ """
+ Format an external-command acknowledge string.
+
+ cmd - Nagios command ID
+ host - Host schedule downtime on
+ author - Name to file the downtime as
+ comment - Reason for running this command (upgrade, reboot, etc)
+ svc - Service to schedule downtime for, omit when for host downtime
+ sticky - the acknowledgement will remain until the host returns to an UP state if set to 1
+ notify - a notification will be sent out to contacts
+ persistent - survive across restarts of the Nagios process
+
+ Syntax: [submitted] COMMAND;<host_name>;[<service_description>]
+ <sticky>;<notify>;<persistent>;<author>;<comment>
+ """
+
+ entry_time = self._now()
+ hdr = "[%s] %s;%s;" % (entry_time, cmd, host)
+
+ if not author:
+ author = self.author
+
+ if not comment:
+ comment = self.comment
+
+ if svc is not None:
+ ack_args = [svc, str(sticky), str(notify), str(persistent), author, comment]
+ else:
+ # Downtime for a host if no svc specified
+ ack_args = [str(sticky), str(notify), str(persistent), author, comment]
+
+ ack_arg_str = ";".join(ack_args)
+ ack_str = hdr + ack_arg_str + "\n"
+
+ return ack_str
+
+ def _fmt_dt_del_str(self, cmd, host, svc=None, start=None, comment=None):
+ """
+ Format an external-command downtime deletion string.
+
+ cmd - Nagios command ID
+ host - Host to remove scheduled downtime from
+ comment - Reason downtime was added (upgrade, reboot, etc)
+ start - Start of downtime in seconds since 12:00AM Jan 1 1970
+ svc - Service to remove downtime for, omit to remove all downtime for the host
+
+ Syntax: [submitted] COMMAND;<host_name>;
+ [<service_desription>];[<start_time>];[<comment>]
+ """
+
+ entry_time = self._now()
+ hdr = "[%s] %s;%s;" % (entry_time, cmd, host)
+
+ if comment is None:
+ comment = self.comment
+
+ dt_del_args = []
+ if svc is not None:
+ dt_del_args.append(svc)
+ else:
+ dt_del_args.append('')
+
+ if start is not None:
+ dt_del_args.append(str(start))
+ else:
+ dt_del_args.append('')
+
+ if comment is not None:
+ dt_del_args.append(comment)
+ else:
+ dt_del_args.append('')
+
+ dt_del_arg_str = ";".join(dt_del_args)
+ dt_del_str = hdr + dt_del_arg_str + "\n"
+
+ return dt_del_str
+
+ def _fmt_chk_str(self, cmd, host, svc=None, start=None):
+ """
+ Format an external-command forced host or service check string.
+
+ cmd - Nagios command ID
+ host - Host to check service from
+ svc - Service to check
+ start - check time
+
+ Syntax: [submitted] COMMAND;<host_name>;[<service_description>];<check_time>
+ """
+
+ entry_time = self._now()
+ hdr = "[%s] %s;%s;" % (entry_time, cmd, host)
+
+ if start is None:
+ start = entry_time + 3
+
+ if svc is None:
+ chk_args = [str(start)]
+ else:
+ chk_args = [svc, str(start)]
+
+ chk_arg_str = ";".join(chk_args)
+ chk_str = hdr + chk_arg_str + "\n"
+
+ return chk_str
+
+ def _fmt_notif_str(self, cmd, host=None, svc=None):
+ """
+ Format an external-command notification string.
+
+ cmd - Nagios command ID.
+ host - Host to en/disable notifications on.. A value is not required
+ for global downtime
+ svc - Service to schedule downtime for. A value is not required
+ for host downtime.
+
+ Syntax: [submitted] COMMAND;<host_name>[;<service_description>]
+ """
+
+ entry_time = self._now()
+ notif_str = "[%s] %s" % (entry_time, cmd)
+ if host is not None:
+ notif_str += ";%s" % host
+
+ if svc is not None:
+ notif_str += ";%s" % svc
+
+ notif_str += "\n"
+
+ return notif_str
+
+ def schedule_svc_downtime(self, host, services=None, minutes=30, start=None):
+ """
+ This command is used to schedule downtime for a particular
+ service.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the service.
+
+ Syntax: SCHEDULE_SVC_DOWNTIME;<host_name>;<service_description>
+ <start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
+ <comment>
+ """
+
+ cmd = "SCHEDULE_SVC_DOWNTIME"
+
+ if services is None:
+ services = []
+
+ for service in services:
+ dt_cmd_str = self._fmt_dt_str(cmd, host, minutes, start=start, svc=service)
+ self._write_command(dt_cmd_str)
+
+ def schedule_host_downtime(self, host, minutes=30, start=None):
+ """
+ This command is used to schedule downtime for a particular
+ host.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the host.
+
+ Syntax: SCHEDULE_HOST_DOWNTIME;<host_name>;<start_time>;<end_time>;
+ <fixed>;<trigger_id>;<duration>;<author>;<comment>
+ """
+
+ cmd = "SCHEDULE_HOST_DOWNTIME"
+ dt_cmd_str = self._fmt_dt_str(cmd, host, minutes, start=start)
+ self._write_command(dt_cmd_str)
+
+ def acknowledge_svc_problem(self, host, services=None):
+ """
+ This command is used to acknowledge a particular
+ service problem.
+
+ By acknowledging the current problem, future notifications
+ for the same servicestate are disabled
+
+ Syntax: ACKNOWLEDGE_SVC_PROBLEM;<host_name>;<service_description>;
+ <sticky>;<notify>;<persistent>;<author>;<comment>
+ """
+
+ cmd = "ACKNOWLEDGE_SVC_PROBLEM"
+
+ if services is None:
+ services = []
+
+ for service in services:
+ ack_cmd_str = self._fmt_ack_str(cmd, host, svc=service)
+ self._write_command(ack_cmd_str)
+
+ def acknowledge_host_problem(self, host):
+ """
+ This command is used to acknowledge a particular
+ host problem.
+
+ By acknowledging the current problem, future notifications
+ for the same servicestate are disabled
+
+ Syntax: ACKNOWLEDGE_HOST_PROBLEM;<host_name>;<sticky>;<notify>;
+ <persistent>;<author>;<comment>
+ """
+
+ cmd = "ACKNOWLEDGE_HOST_PROBLEM"
+ ack_cmd_str = self._fmt_ack_str(cmd, host)
+ self._write_command(ack_cmd_str)
+
+ def schedule_forced_host_check(self, host):
+ """
+ This command schedules a forced active check for a particular host.
+
+ Syntax: SCHEDULE_FORCED_HOST_CHECK;<host_name>;<check_time>
+ """
+
+ cmd = "SCHEDULE_FORCED_HOST_CHECK"
+
+ chk_cmd_str = self._fmt_chk_str(cmd, host, svc=None)
+ self._write_command(chk_cmd_str)
+
+ def schedule_forced_host_svc_check(self, host):
+ """
+ This command schedules a forced active check for all services
+ associated with a particular host.
+
+ Syntax: SCHEDULE_FORCED_HOST_SVC_CHECKS;<host_name>;<check_time>
+ """
+
+ cmd = "SCHEDULE_FORCED_HOST_SVC_CHECKS"
+
+ chk_cmd_str = self._fmt_chk_str(cmd, host, svc=None)
+ self._write_command(chk_cmd_str)
+
+ def schedule_forced_svc_check(self, host, services=None):
+ """
+ This command schedules a forced active check for a particular
+ service.
+
+ Syntax: SCHEDULE_FORCED_SVC_CHECK;<host_name>;<service_description>;<check_time>
+ """
+
+ cmd = "SCHEDULE_FORCED_SVC_CHECK"
+
+ if services is None:
+ services = []
+
+ for service in services:
+ chk_cmd_str = self._fmt_chk_str(cmd, host, svc=service)
+ self._write_command(chk_cmd_str)
+
+ def schedule_host_svc_downtime(self, host, minutes=30, start=None):
+ """
+ This command is used to schedule downtime for
+ all services associated with a particular host.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the host.
+
+ SCHEDULE_HOST_SVC_DOWNTIME;<host_name>;<start_time>;<end_time>;
+ <fixed>;<trigger_id>;<duration>;<author>;<comment>
+ """
+
+ cmd = "SCHEDULE_HOST_SVC_DOWNTIME"
+ dt_cmd_str = self._fmt_dt_str(cmd, host, minutes, start=start)
+ self._write_command(dt_cmd_str)
+
+ def delete_host_downtime(self, host, services=None, comment=None):
+ """
+ This command is used to remove scheduled downtime for a particular
+ host.
+
+ Syntax: DEL_DOWNTIME_BY_HOST_NAME;<host_name>;
+ [<service_desription>];[<start_time>];[<comment>]
+ """
+
+ cmd = "DEL_DOWNTIME_BY_HOST_NAME"
+
+ if services is None:
+ dt_del_cmd_str = self._fmt_dt_del_str(cmd, host, comment=comment)
+ self._write_command(dt_del_cmd_str)
+ else:
+ for service in services:
+ dt_del_cmd_str = self._fmt_dt_del_str(cmd, host, svc=service, comment=comment)
+ self._write_command(dt_del_cmd_str)
+
+ def schedule_hostgroup_host_downtime(self, hostgroup, minutes=30, start=None):
+ """
+ This command is used to schedule downtime for all hosts in a
+ particular hostgroup.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the hosts.
+
+ Syntax: SCHEDULE_HOSTGROUP_HOST_DOWNTIME;<hostgroup_name>;<start_time>;
+ <end_time>;<fixed>;<trigger_id>;<duration>;<author>;<comment>
+ """
+
+ cmd = "SCHEDULE_HOSTGROUP_HOST_DOWNTIME"
+ dt_cmd_str = self._fmt_dt_str(cmd, hostgroup, minutes, start=start)
+ self._write_command(dt_cmd_str)
+
+ def schedule_hostgroup_svc_downtime(self, hostgroup, minutes=30, start=None):
+ """
+ This command is used to schedule downtime for all services in
+ a particular hostgroup.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the services.
+
+ Note that scheduling downtime for services does not
+ automatically schedule downtime for the hosts those services
+ are associated with.
+
+ Syntax: SCHEDULE_HOSTGROUP_SVC_DOWNTIME;<hostgroup_name>;<start_time>;
+ <end_time>;<fixed>;<trigger_id>;<duration>;<author>;<comment>
+ """
+
+ cmd = "SCHEDULE_HOSTGROUP_SVC_DOWNTIME"
+ dt_cmd_str = self._fmt_dt_str(cmd, hostgroup, minutes, start=start)
+ self._write_command(dt_cmd_str)
+
+ def schedule_servicegroup_host_downtime(self, servicegroup, minutes=30, start=None):
+ """
+ This command is used to schedule downtime for all hosts in a
+ particular servicegroup.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the hosts.
+
+ Syntax: SCHEDULE_SERVICEGROUP_HOST_DOWNTIME;<servicegroup_name>;
+ <start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
+ <comment>
+ """
+
+ cmd = "SCHEDULE_SERVICEGROUP_HOST_DOWNTIME"
+ dt_cmd_str = self._fmt_dt_str(cmd, servicegroup, minutes, start=start)
+ self._write_command(dt_cmd_str)
+
+ def schedule_servicegroup_svc_downtime(self, servicegroup, minutes=30, start=None):
+ """
+ This command is used to schedule downtime for all services in
+ a particular servicegroup.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the services.
+
+ Note that scheduling downtime for services does not
+ automatically schedule downtime for the hosts those services
+ are associated with.
+
+ Syntax: SCHEDULE_SERVICEGROUP_SVC_DOWNTIME;<servicegroup_name>;
+ <start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
+ <comment>
+ """
+
+ cmd = "SCHEDULE_SERVICEGROUP_SVC_DOWNTIME"
+ dt_cmd_str = self._fmt_dt_str(cmd, servicegroup, minutes, start=start)
+ self._write_command(dt_cmd_str)
+
+ def disable_host_svc_notifications(self, host):
+ """
+ This command is used to prevent notifications from being sent
+ out for all services on the specified host.
+
+ Note that this command does not disable notifications from
+ being sent out about the host.
+
+ Syntax: DISABLE_HOST_SVC_NOTIFICATIONS;<host_name>
+ """
+
+ cmd = "DISABLE_HOST_SVC_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, host)
+ self._write_command(notif_str)
+
+ def disable_host_notifications(self, host):
+ """
+ This command is used to prevent notifications from being sent
+ out for the specified host.
+
+ Note that this command does not disable notifications for
+ services associated with this host.
+
+ Syntax: DISABLE_HOST_NOTIFICATIONS;<host_name>
+ """
+
+ cmd = "DISABLE_HOST_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, host)
+ self._write_command(notif_str)
+
+ def disable_svc_notifications(self, host, services=None):
+ """
+ This command is used to prevent notifications from being sent
+ out for the specified service.
+
+ Note that this command does not disable notifications from
+ being sent out about the host.
+
+ Syntax: DISABLE_SVC_NOTIFICATIONS;<host_name>;<service_description>
+ """
+
+ cmd = "DISABLE_SVC_NOTIFICATIONS"
+
+ if services is None:
+ services = []
+
+ for service in services:
+ notif_str = self._fmt_notif_str(cmd, host, svc=service)
+ self._write_command(notif_str)
+
+ def disable_servicegroup_host_notifications(self, servicegroup):
+ """
+ This command is used to prevent notifications from being sent
+ out for all hosts in the specified servicegroup.
+
+ Note that this command does not disable notifications for
+ services associated with hosts in this service group.
+
+ Syntax: DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS;<servicegroup_name>
+ """
+
+ cmd = "DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, servicegroup)
+ self._write_command(notif_str)
+
+ def disable_servicegroup_svc_notifications(self, servicegroup):
+ """
+ This command is used to prevent notifications from being sent
+ out for all services in the specified servicegroup.
+
+ Note that this does not prevent notifications from being sent
+ out about the hosts in this servicegroup.
+
+ Syntax: DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS;<servicegroup_name>
+ """
+
+ cmd = "DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, servicegroup)
+ self._write_command(notif_str)
+
+ def disable_hostgroup_host_notifications(self, hostgroup):
+ """
+ Disables notifications for all hosts in a particular
+ hostgroup.
+
+ Note that this does not disable notifications for the services
+ associated with the hosts in the hostgroup - see the
+ DISABLE_HOSTGROUP_SVC_NOTIFICATIONS command for that.
+
+ Syntax: DISABLE_HOSTGROUP_HOST_NOTIFICATIONS;<hostgroup_name>
+ """
+
+ cmd = "DISABLE_HOSTGROUP_HOST_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, hostgroup)
+ self._write_command(notif_str)
+
+ def disable_hostgroup_svc_notifications(self, hostgroup):
+ """
+ Disables notifications for all services associated with hosts
+ in a particular hostgroup.
+
+ Note that this does not disable notifications for the hosts in
+ the hostgroup - see the DISABLE_HOSTGROUP_HOST_NOTIFICATIONS
+ command for that.
+
+ Syntax: DISABLE_HOSTGROUP_SVC_NOTIFICATIONS;<hostgroup_name>
+ """
+
+ cmd = "DISABLE_HOSTGROUP_SVC_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, hostgroup)
+ self._write_command(notif_str)
+
+ def enable_host_notifications(self, host):
+ """
+ Enables notifications for a particular host.
+
+ Note that this command does not enable notifications for
+ services associated with this host.
+
+ Syntax: ENABLE_HOST_NOTIFICATIONS;<host_name>
+ """
+
+ cmd = "ENABLE_HOST_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, host)
+ self._write_command(notif_str)
+
+ def enable_host_svc_notifications(self, host):
+ """
+ Enables notifications for all services on the specified host.
+
+ Note that this does not enable notifications for the host.
+
+ Syntax: ENABLE_HOST_SVC_NOTIFICATIONS;<host_name>
+ """
+
+ cmd = "ENABLE_HOST_SVC_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, host)
+ nagios_return = self._write_command(notif_str)
+
+ if nagios_return:
+ return notif_str
+ else:
+ return "Fail: could not write to the command file"
+
+ def enable_svc_notifications(self, host, services=None):
+ """
+ Enables notifications for a particular service.
+
+ Note that this does not enable notifications for the host.
+
+ Syntax: ENABLE_SVC_NOTIFICATIONS;<host_name>;<service_description>
+ """
+
+ cmd = "ENABLE_SVC_NOTIFICATIONS"
+
+ if services is None:
+ services = []
+
+ nagios_return = True
+ return_str_list = []
+ for service in services:
+ notif_str = self._fmt_notif_str(cmd, host, svc=service)
+ nagios_return = self._write_command(notif_str) and nagios_return
+ return_str_list.append(notif_str)
+
+ if nagios_return:
+ return return_str_list
+ else:
+ return "Fail: could not write to the command file"
+
+ def enable_hostgroup_host_notifications(self, hostgroup):
+ """
+ Enables notifications for all hosts in a particular hostgroup.
+
+ Note that this command does not enable notifications for
+ services associated with the hosts in this hostgroup.
+
+ Syntax: ENABLE_HOSTGROUP_HOST_NOTIFICATIONS;<hostgroup_name>
+ """
+
+ cmd = "ENABLE_HOSTGROUP_HOST_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, hostgroup)
+ nagios_return = self._write_command(notif_str)
+
+ if nagios_return:
+ return notif_str
+ else:
+ return "Fail: could not write to the command file"
+
+ def enable_hostgroup_svc_notifications(self, hostgroup):
+ """
+ Enables notifications for all services that are associated
+ with hosts in a particular hostgroup.
+
+ Note that this does not enable notifications for the hosts in
+ this hostgroup.
+
+ Syntax: ENABLE_HOSTGROUP_SVC_NOTIFICATIONS;<hostgroup_name>
+ """
+
+ cmd = "ENABLE_HOSTGROUP_SVC_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, hostgroup)
+ nagios_return = self._write_command(notif_str)
+
+ if nagios_return:
+ return notif_str
+ else:
+ return "Fail: could not write to the command file"
+
+ def enable_servicegroup_host_notifications(self, servicegroup):
+ """
+ Enables notifications for all hosts that have services that
+ are members of a particular servicegroup.
+
+ Note that this command does not enable notifications for
+ services associated with the hosts in this servicegroup.
+
+ Syntax: ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS;<servicegroup_name>
+ """
+
+ cmd = "ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, servicegroup)
+ nagios_return = self._write_command(notif_str)
+
+ if nagios_return:
+ return notif_str
+ else:
+ return "Fail: could not write to the command file"
+
+ def enable_servicegroup_svc_notifications(self, servicegroup):
+ """
+ Enables notifications for all services that are members of a
+ particular servicegroup.
+
+ Note that this does not enable notifications for the hosts in
+ this servicegroup.
+
+ Syntax: ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS;<servicegroup_name>
+ """
+
+ cmd = "ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, servicegroup)
+ nagios_return = self._write_command(notif_str)
+
+ if nagios_return:
+ return notif_str
+ else:
+ return "Fail: could not write to the command file"
+
+ def silence_host(self, host):
+ """
+ This command is used to prevent notifications from being sent
+ out for the host and all services on the specified host.
+
+ This is equivalent to calling disable_host_svc_notifications
+ and disable_host_notifications.
+
+ Syntax: DISABLE_HOST_SVC_NOTIFICATIONS;<host_name>
+ Syntax: DISABLE_HOST_NOTIFICATIONS;<host_name>
+ """
+
+ cmd = [
+ "DISABLE_HOST_SVC_NOTIFICATIONS",
+ "DISABLE_HOST_NOTIFICATIONS"
+ ]
+ nagios_return = True
+ return_str_list = []
+ for c in cmd:
+ notif_str = self._fmt_notif_str(c, host)
+ nagios_return = self._write_command(notif_str) and nagios_return
+ return_str_list.append(notif_str)
+
+ if nagios_return:
+ return return_str_list
+ else:
+ return "Fail: could not write to the command file"
+
+ def unsilence_host(self, host):
+ """
+ This command is used to enable notifications for the host and
+ all services on the specified host.
+
+ This is equivalent to calling enable_host_svc_notifications
+ and enable_host_notifications.
+
+ Syntax: ENABLE_HOST_SVC_NOTIFICATIONS;<host_name>
+ Syntax: ENABLE_HOST_NOTIFICATIONS;<host_name>
+ """
+
+ cmd = [
+ "ENABLE_HOST_SVC_NOTIFICATIONS",
+ "ENABLE_HOST_NOTIFICATIONS"
+ ]
+ nagios_return = True
+ return_str_list = []
+ for c in cmd:
+ notif_str = self._fmt_notif_str(c, host)
+ nagios_return = self._write_command(notif_str) and nagios_return
+ return_str_list.append(notif_str)
+
+ if nagios_return:
+ return return_str_list
+ else:
+ return "Fail: could not write to the command file"
+
+ def silence_nagios(self):
+ """
+ This command is used to disable notifications for all hosts and services
+ in nagios.
+
+ This is a 'SHUT UP, NAGIOS' command
+ """
+ cmd = 'DISABLE_NOTIFICATIONS'
+ self._write_command(self._fmt_notif_str(cmd))
+
+ def unsilence_nagios(self):
+ """
+ This command is used to enable notifications for all hosts and services
+ in nagios.
+
+ This is a 'OK, NAGIOS, GO'' command
+ """
+ cmd = 'ENABLE_NOTIFICATIONS'
+ self._write_command(self._fmt_notif_str(cmd))
+
+ def nagios_cmd(self, cmd):
+ """
+ This sends an arbitrary command to nagios
+
+ It prepends the submitted time and appends a \n
+
+ You just have to provide the properly formatted command
+ """
+
+ pre = '[%s]' % int(time.time())
+
+ post = '\n'
+ cmdstr = '%s %s%s' % (pre, cmd, post)
+ self._write_command(cmdstr)
+
+ def act(self):
+ """
+ Figure out what you want to do from ansible, and then do the
+ needful (at the earliest).
+ """
+ # host or service downtime?
+ if self.action == 'downtime':
+ if self.services == 'host':
+ self.schedule_host_downtime(self.host, minutes=self.minutes,
+ start=self.start)
+ elif self.services == 'all':
+ self.schedule_host_svc_downtime(self.host, minutes=self.minutes,
+ start=self.start)
+ else:
+ self.schedule_svc_downtime(self.host,
+ services=self.services,
+ minutes=self.minutes,
+ start=self.start)
+
+ elif self.action == 'acknowledge':
+ if self.services == 'host':
+ self.acknowledge_host_problem(self.host)
+ else:
+ self.acknowledge_svc_problem(self.host, services=self.services)
+
+ elif self.action == 'delete_downtime':
+ if self.services == 'host':
+ self.delete_host_downtime(self.host)
+ elif self.services == 'all':
+ self.delete_host_downtime(self.host, comment='')
+ else:
+ self.delete_host_downtime(self.host, services=self.services)
+
+ elif self.action == 'forced_check':
+ if self.services == 'host':
+ self.schedule_forced_host_check(self.host)
+ elif self.services == 'all':
+ self.schedule_forced_host_svc_check(self.host)
+ else:
+ self.schedule_forced_svc_check(self.host, services=self.services)
+
+ elif self.action == "servicegroup_host_downtime":
+ if self.servicegroup:
+ self.schedule_servicegroup_host_downtime(servicegroup=self.servicegroup, minutes=self.minutes, start=self.start)
+ elif self.action == "servicegroup_service_downtime":
+ if self.servicegroup:
+ self.schedule_servicegroup_svc_downtime(servicegroup=self.servicegroup, minutes=self.minutes, start=self.start)
+
+ # toggle the host AND service alerts
+ elif self.action == 'silence':
+ self.silence_host(self.host)
+
+ elif self.action == 'unsilence':
+ self.unsilence_host(self.host)
+
+ # toggle host/svc alerts
+ elif self.action == 'enable_alerts':
+ if self.services == 'host':
+ self.enable_host_notifications(self.host)
+ elif self.services == 'all':
+ self.enable_host_svc_notifications(self.host)
+ else:
+ self.enable_svc_notifications(self.host,
+ services=self.services)
+
+ elif self.action == 'disable_alerts':
+ if self.services == 'host':
+ self.disable_host_notifications(self.host)
+ elif self.services == 'all':
+ self.disable_host_svc_notifications(self.host)
+ else:
+ self.disable_svc_notifications(self.host,
+ services=self.services)
+ elif self.action == 'silence_nagios':
+ self.silence_nagios()
+
+ elif self.action == 'unsilence_nagios':
+ self.unsilence_nagios()
+
+ elif self.action == 'command':
+ self.nagios_cmd(self.command)
+
+ # wtf?
+ else:
+ self.module.fail_json(msg="unknown action specified: '%s'" %
+ self.action)
+
+ self.module.exit_json(nagios_commands=self.command_results,
+ changed=True)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/cloudflare_dns.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/cloudflare_dns.py
new file mode 100644
index 00000000..fc62aa70
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/cloudflare_dns.py
@@ -0,0 +1,878 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016 Michael Gruener <michael.gruener@chaosmoon.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: cloudflare_dns
+author:
+- Michael Gruener (@mgruener)
+requirements:
+ - python >= 2.6
+short_description: Manage Cloudflare DNS records
+description:
+ - "Manages dns records via the Cloudflare API, see the docs: U(https://api.cloudflare.com/)"
+options:
+ api_token:
+ description:
+ - API token.
+ - Required for api token authentication.
+ - "You can obtain your API token from the bottom of the Cloudflare 'My Account' page, found here: U(https://dash.cloudflare.com/)"
+ type: str
+ required: false
+ version_added: '0.2.0'
+ account_api_key:
+ description:
+ - Account API key.
+ - Required for api keys authentication.
+ - "You can obtain your API key from the bottom of the Cloudflare 'My Account' page, found here: U(https://dash.cloudflare.com/)"
+ type: str
+ required: false
+ aliases: [ account_api_token ]
+ account_email:
+ description:
+ - Account email. Required for api keys authentication.
+ type: str
+ required: false
+ algorithm:
+ description:
+ - Algorithm number.
+ - Required for C(type=DS) and C(type=SSHFP) when C(state=present).
+ type: int
+ cert_usage:
+ description:
+ - Certificate usage number.
+ - Required for C(type=TLSA) when C(state=present).
+ type: int
+ choices: [ 0, 1, 2, 3 ]
+ hash_type:
+ description:
+ - Hash type number.
+ - Required for C(type=DS), C(type=SSHFP) and C(type=TLSA) when C(state=present).
+ type: int
+ choices: [ 1, 2 ]
+ key_tag:
+ description:
+ - DNSSEC key tag.
+ - Needed for C(type=DS) when C(state=present).
+ type: int
+ port:
+ description:
+ - Service port.
+ - Required for C(type=SRV) and C(type=TLSA).
+ type: int
+ priority:
+ description:
+ - Record priority.
+ - Required for C(type=MX) and C(type=SRV)
+ default: 1
+ type: int
+ proto:
+ description:
+ - Service protocol. Required for C(type=SRV) and C(type=TLSA).
+ - Common values are TCP and UDP.
+ - Before Ansible 2.6 only TCP and UDP were available.
+ type: str
+ proxied:
+ description:
+ - Proxy through Cloudflare network or just use DNS.
+ type: bool
+ default: no
+ record:
+ description:
+ - Record to add.
+ - Required if C(state=present).
+ - Default is C(@) (e.g. the zone name).
+ type: str
+ default: '@'
+ aliases: [ name ]
+ selector:
+ description:
+ - Selector number.
+ - Required for C(type=TLSA) when C(state=present).
+ choices: [ 0, 1 ]
+ type: int
+ service:
+ description:
+ - Record service.
+ - Required for C(type=SRV)
+ type: str
+ solo:
+ description:
+ - Whether the record should be the only one for that record type and record name.
+ - Only use with C(state=present).
+ - This will delete all other records with the same record name and type.
+ type: bool
+ state:
+ description:
+ - Whether the record(s) should exist or not.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ timeout:
+ description:
+ - Timeout for Cloudflare API calls.
+ type: int
+ default: 30
+ ttl:
+ description:
+ - The TTL to give the new record.
+ - Must be between 120 and 2,147,483,647 seconds, or 1 for automatic.
+ type: int
+ default: 1
+ type:
+ description:
+ - The type of DNS record to create. Required if C(state=present).
+ - C(type=DS), C(type=SSHFP) and C(type=TLSA) added in Ansible 2.7.
+ type: str
+ choices: [ A, AAAA, CNAME, DS, MX, NS, SPF, SRV, SSHFP, TLSA, TXT ]
+ value:
+ description:
+ - The record value.
+ - Required for C(state=present).
+ type: str
+ aliases: [ content ]
+ weight:
+ description:
+ - Service weight.
+ - Required for C(type=SRV).
+ type: int
+ default: 1
+ zone:
+ description:
+ - The name of the Zone to work with (e.g. "example.com").
+ - The Zone must already exist.
+ type: str
+ required: true
+ aliases: [ domain ]
+'''
+
+EXAMPLES = r'''
+- name: Create a test.example.net A record to point to 127.0.0.1
+ community.general.cloudflare_dns:
+ zone: example.net
+ record: test
+ type: A
+ value: 127.0.0.1
+ account_email: test@example.com
+ account_api_key: dummyapitoken
+ register: record
+
+- name: Create a record using api token
+ community.general.cloudflare_dns:
+ zone: example.net
+ record: test
+ type: A
+ value: 127.0.0.1
+ api_token: dummyapitoken
+
+- name: Create a example.net CNAME record to example.com
+ community.general.cloudflare_dns:
+ zone: example.net
+ type: CNAME
+ value: example.com
+ account_email: test@example.com
+ account_api_key: dummyapitoken
+ state: present
+
+- name: Change its TTL
+ community.general.cloudflare_dns:
+ zone: example.net
+ type: CNAME
+ value: example.com
+ ttl: 600
+ account_email: test@example.com
+ account_api_key: dummyapitoken
+ state: present
+
+- name: Delete the record
+ community.general.cloudflare_dns:
+ zone: example.net
+ type: CNAME
+ value: example.com
+ account_email: test@example.com
+ account_api_key: dummyapitoken
+ state: absent
+
+- name: Create a example.net CNAME record to example.com and proxy through Cloudflare's network
+ community.general.cloudflare_dns:
+ zone: example.net
+ type: CNAME
+ value: example.com
+ proxied: yes
+ account_email: test@example.com
+ account_api_key: dummyapitoken
+ state: present
+
+# This deletes all other TXT records named "test.example.net"
+- name: Create TXT record "test.example.net" with value "unique value"
+ community.general.cloudflare_dns:
+ domain: example.net
+ record: test
+ type: TXT
+ value: unique value
+ solo: true
+ account_email: test@example.com
+ account_api_key: dummyapitoken
+ state: present
+
+- name: Create an SRV record _foo._tcp.example.net
+ community.general.cloudflare_dns:
+ domain: example.net
+ service: foo
+ proto: tcp
+ port: 3500
+ priority: 10
+ weight: 20
+ type: SRV
+ value: fooserver.example.net
+
+- name: Create a SSHFP record login.example.com
+ community.general.cloudflare_dns:
+ zone: example.com
+ record: login
+ type: SSHFP
+ algorithm: 4
+ hash_type: 2
+ value: 9dc1d6742696d2f51ca1f1a78b3d16a840f7d111eb9454239e70db31363f33e1
+
+- name: Create a TLSA record _25._tcp.mail.example.com
+ community.general.cloudflare_dns:
+ zone: example.com
+ record: mail
+ port: 25
+ proto: tcp
+ type: TLSA
+ cert_usage: 3
+ selector: 1
+ hash_type: 1
+ value: 6b76d034492b493e15a7376fccd08e63befdad0edab8e442562f532338364bf3
+
+- name: Create a DS record for subdomain.example.com
+ community.general.cloudflare_dns:
+ zone: example.com
+ record: subdomain
+ type: DS
+ key_tag: 5464
+ algorithm: 8
+ hash_type: 2
+ value: B4EB5AC4467D2DFB3BAF9FB9961DC1B6FED54A58CDFAA3E465081EC86F89BFAB
+'''
+
+RETURN = r'''
+record:
+ description: A dictionary containing the record data.
+ returned: success, except on record deletion
+ type: complex
+ contains:
+ content:
+ description: The record content (details depend on record type).
+ returned: success
+ type: str
+ sample: 192.0.2.91
+ created_on:
+ description: The record creation date.
+ returned: success
+ type: str
+ sample: "2016-03-25T19:09:42.516553Z"
+ data:
+ description: Additional record data.
+ returned: success, if type is SRV, DS, SSHFP or TLSA
+ type: dict
+ sample: {
+ name: "jabber",
+ port: 8080,
+ priority: 10,
+ proto: "_tcp",
+ service: "_xmpp",
+ target: "jabberhost.sample.com",
+ weight: 5,
+ }
+ id:
+ description: The record ID.
+ returned: success
+ type: str
+ sample: f9efb0549e96abcb750de63b38c9576e
+ locked:
+ description: No documentation available.
+ returned: success
+ type: bool
+ sample: False
+ meta:
+ description: No documentation available.
+ returned: success
+ type: dict
+ sample: { auto_added: false }
+ modified_on:
+ description: Record modification date.
+ returned: success
+ type: str
+ sample: "2016-03-25T19:09:42.516553Z"
+ name:
+ description: The record name as FQDN (including _service and _proto for SRV).
+ returned: success
+ type: str
+ sample: www.sample.com
+ priority:
+ description: Priority of the MX record.
+ returned: success, if type is MX
+ type: int
+ sample: 10
+ proxiable:
+ description: Whether this record can be proxied through Cloudflare.
+ returned: success
+ type: bool
+ sample: False
+ proxied:
+ description: Whether the record is proxied through Cloudflare.
+ returned: success
+ type: bool
+ sample: False
+ ttl:
+ description: The time-to-live for the record.
+ returned: success
+ type: int
+ sample: 300
+ type:
+ description: The record type.
+ returned: success
+ type: str
+ sample: A
+ zone_id:
+ description: The ID of the zone containing the record.
+ returned: success
+ type: str
+ sample: abcede0bf9f0066f94029d2e6b73856a
+ zone_name:
+ description: The name of the zone containing the record.
+ returned: success
+ type: str
+ sample: sample.com
+'''
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils._text import to_native, to_text
+from ansible.module_utils.urls import fetch_url
+
+
+def lowercase_string(param):
+ if not isinstance(param, str):
+ return param
+ return param.lower()
+
+
+class CloudflareAPI(object):
+
+ cf_api_endpoint = 'https://api.cloudflare.com/client/v4'
+ changed = False
+
+ def __init__(self, module):
+ self.module = module
+ self.api_token = module.params['api_token']
+ self.account_api_key = module.params['account_api_key']
+ self.account_email = module.params['account_email']
+ self.algorithm = module.params['algorithm']
+ self.cert_usage = module.params['cert_usage']
+ self.hash_type = module.params['hash_type']
+ self.key_tag = module.params['key_tag']
+ self.port = module.params['port']
+ self.priority = module.params['priority']
+ self.proto = lowercase_string(module.params['proto'])
+ self.proxied = module.params['proxied']
+ self.selector = module.params['selector']
+ self.record = lowercase_string(module.params['record'])
+ self.service = lowercase_string(module.params['service'])
+ self.is_solo = module.params['solo']
+ self.state = module.params['state']
+ self.timeout = module.params['timeout']
+ self.ttl = module.params['ttl']
+ self.type = module.params['type']
+ self.value = module.params['value']
+ self.weight = module.params['weight']
+ self.zone = lowercase_string(module.params['zone'])
+
+ if self.record == '@':
+ self.record = self.zone
+
+ if (self.type in ['CNAME', 'NS', 'MX', 'SRV']) and (self.value is not None):
+ self.value = self.value.rstrip('.').lower()
+
+ if (self.type == 'AAAA') and (self.value is not None):
+ self.value = self.value.lower()
+
+ if (self.type == 'SRV'):
+ if (self.proto is not None) and (not self.proto.startswith('_')):
+ self.proto = '_' + self.proto
+ if (self.service is not None) and (not self.service.startswith('_')):
+ self.service = '_' + self.service
+
+ if (self.type == 'TLSA'):
+ if (self.proto is not None) and (not self.proto.startswith('_')):
+ self.proto = '_' + self.proto
+ if (self.port is not None):
+ self.port = '_' + str(self.port)
+
+ if not self.record.endswith(self.zone):
+ self.record = self.record + '.' + self.zone
+
+ if (self.type == 'DS'):
+ if self.record == self.zone:
+ self.module.fail_json(msg="DS records only apply to subdomains.")
+
+ def _cf_simple_api_call(self, api_call, method='GET', payload=None):
+ if self.api_token:
+ headers = {
+ 'Authorization': 'Bearer ' + self.api_token,
+ 'Content-Type': 'application/json',
+ }
+ else:
+ headers = {
+ 'X-Auth-Email': self.account_email,
+ 'X-Auth-Key': self.account_api_key,
+ 'Content-Type': 'application/json',
+ }
+ data = None
+ if payload:
+ try:
+ data = json.dumps(payload)
+ except Exception as e:
+ self.module.fail_json(msg="Failed to encode payload as JSON: %s " % to_native(e))
+
+ resp, info = fetch_url(self.module,
+ self.cf_api_endpoint + api_call,
+ headers=headers,
+ data=data,
+ method=method,
+ timeout=self.timeout)
+
+ if info['status'] not in [200, 304, 400, 401, 403, 429, 405, 415]:
+ self.module.fail_json(msg="Failed API call {0}; got unexpected HTTP code {1}: {2}".format(api_call, info['status'], info.get('msg')))
+
+ error_msg = ''
+ if info['status'] == 401:
+ # Unauthorized
+ error_msg = "API user does not have permission; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
+ elif info['status'] == 403:
+ # Forbidden
+ error_msg = "API request not authenticated; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
+ elif info['status'] == 429:
+ # Too many requests
+ error_msg = "API client is rate limited; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
+ elif info['status'] == 405:
+ # Method not allowed
+ error_msg = "API incorrect HTTP method provided; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
+ elif info['status'] == 415:
+ # Unsupported Media Type
+ error_msg = "API request is not valid JSON; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
+ elif info['status'] == 400:
+ # Bad Request
+ error_msg = "API bad request; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
+
+ result = None
+ try:
+ content = resp.read()
+ except AttributeError:
+ if info['body']:
+ content = info['body']
+ else:
+ error_msg += "; The API response was empty"
+
+ if content:
+ try:
+ result = json.loads(to_text(content, errors='surrogate_or_strict'))
+ except (getattr(json, 'JSONDecodeError', ValueError)) as e:
+ error_msg += "; Failed to parse API response with error {0}: {1}".format(to_native(e), content)
+
+ # Without a valid/parsed JSON response no more error processing can be done
+ if result is None:
+ self.module.fail_json(msg=error_msg)
+
+ if 'success' not in result:
+ error_msg += "; Unexpected error details: {0}".format(result.get('error'))
+ self.module.fail_json(msg=error_msg)
+
+ if not result['success']:
+ error_msg += "; Error details: "
+ for error in result['errors']:
+ error_msg += "code: {0}, error: {1}; ".format(error['code'], error['message'])
+ if 'error_chain' in error:
+ for chain_error in error['error_chain']:
+ error_msg += "code: {0}, error: {1}; ".format(chain_error['code'], chain_error['message'])
+ self.module.fail_json(msg=error_msg)
+
+ return result, info['status']
+
+ def _cf_api_call(self, api_call, method='GET', payload=None):
+ result, status = self._cf_simple_api_call(api_call, method, payload)
+
+ data = result['result']
+
+ if 'result_info' in result:
+ pagination = result['result_info']
+ if pagination['total_pages'] > 1:
+ next_page = int(pagination['page']) + 1
+ parameters = ['page={0}'.format(next_page)]
+ # strip "page" parameter from call parameters (if there are any)
+ if '?' in api_call:
+ raw_api_call, query = api_call.split('?', 1)
+ parameters += [param for param in query.split('&') if not param.startswith('page')]
+ else:
+ raw_api_call = api_call
+ while next_page <= pagination['total_pages']:
+ raw_api_call += '?' + '&'.join(parameters)
+ result, status = self._cf_simple_api_call(raw_api_call, method, payload)
+ data += result['result']
+ next_page += 1
+
+ return data, status
+
+ def _get_zone_id(self, zone=None):
+ if not zone:
+ zone = self.zone
+
+ zones = self.get_zones(zone)
+ if len(zones) > 1:
+ self.module.fail_json(msg="More than one zone matches {0}".format(zone))
+
+ if len(zones) < 1:
+ self.module.fail_json(msg="No zone found with name {0}".format(zone))
+
+ return zones[0]['id']
+
+ def get_zones(self, name=None):
+ if not name:
+ name = self.zone
+ param = ''
+ if name:
+ param = '?' + urlencode({'name': name})
+ zones, status = self._cf_api_call('/zones' + param)
+ return zones
+
+ def get_dns_records(self, zone_name=None, type=None, record=None, value=''):
+ if not zone_name:
+ zone_name = self.zone
+ if not type:
+ type = self.type
+ if not record:
+ record = self.record
+ # necessary because None as value means to override user
+ # set module value
+ if (not value) and (value is not None):
+ value = self.value
+
+ zone_id = self._get_zone_id()
+ api_call = '/zones/{0}/dns_records'.format(zone_id)
+ query = {}
+ if type:
+ query['type'] = type
+ if record:
+ query['name'] = record
+ if value:
+ query['content'] = value
+ if query:
+ api_call += '?' + urlencode(query)
+
+ records, status = self._cf_api_call(api_call)
+ return records
+
+ def delete_dns_records(self, **kwargs):
+ params = {}
+ for param in ['port', 'proto', 'service', 'solo', 'type', 'record', 'value', 'weight', 'zone',
+ 'algorithm', 'cert_usage', 'hash_type', 'selector', 'key_tag']:
+ if param in kwargs:
+ params[param] = kwargs[param]
+ else:
+ params[param] = getattr(self, param)
+
+ records = []
+ content = params['value']
+ search_record = params['record']
+ if params['type'] == 'SRV':
+ if not (params['value'] is None or params['value'] == ''):
+ content = str(params['weight']) + '\t' + str(params['port']) + '\t' + params['value']
+ search_record = params['service'] + '.' + params['proto'] + '.' + params['record']
+ elif params['type'] == 'DS':
+ if not (params['value'] is None or params['value'] == ''):
+ content = str(params['key_tag']) + '\t' + str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value']
+ elif params['type'] == 'SSHFP':
+ if not (params['value'] is None or params['value'] == ''):
+ content = str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value']
+ elif params['type'] == 'TLSA':
+ if not (params['value'] is None or params['value'] == ''):
+ content = str(params['cert_usage']) + '\t' + str(params['selector']) + '\t' + str(params['hash_type']) + '\t' + params['value']
+ search_record = params['port'] + '.' + params['proto'] + '.' + params['record']
+ if params['solo']:
+ search_value = None
+ else:
+ search_value = content
+
+ records = self.get_dns_records(params['zone'], params['type'], search_record, search_value)
+
+ for rr in records:
+ if params['solo']:
+ if not ((rr['type'] == params['type']) and (rr['name'] == search_record) and (rr['content'] == content)):
+ self.changed = True
+ if not self.module.check_mode:
+ result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(rr['zone_id'], rr['id']), 'DELETE')
+ else:
+ self.changed = True
+ if not self.module.check_mode:
+ result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(rr['zone_id'], rr['id']), 'DELETE')
+ return self.changed
+
+ def ensure_dns_record(self, **kwargs):
+ params = {}
+ for param in ['port', 'priority', 'proto', 'proxied', 'service', 'ttl', 'type', 'record', 'value', 'weight', 'zone',
+ 'algorithm', 'cert_usage', 'hash_type', 'selector', 'key_tag']:
+ if param in kwargs:
+ params[param] = kwargs[param]
+ else:
+ params[param] = getattr(self, param)
+
+ search_value = params['value']
+ search_record = params['record']
+ new_record = None
+ if (params['type'] is None) or (params['record'] is None):
+ self.module.fail_json(msg="You must provide a type and a record to create a new record")
+
+ if (params['type'] in ['A', 'AAAA', 'CNAME', 'TXT', 'MX', 'NS', 'SPF']):
+ if not params['value']:
+ self.module.fail_json(msg="You must provide a non-empty value to create this record type")
+
+ # there can only be one CNAME per record
+ # ignoring the value when searching for existing
+ # CNAME records allows us to update the value if it
+ # changes
+ if params['type'] == 'CNAME':
+ search_value = None
+
+ new_record = {
+ "type": params['type'],
+ "name": params['record'],
+ "content": params['value'],
+ "ttl": params['ttl']
+ }
+
+ if (params['type'] in ['A', 'AAAA', 'CNAME']):
+ new_record["proxied"] = params["proxied"]
+
+ if params['type'] == 'MX':
+ for attr in [params['priority'], params['value']]:
+ if (attr is None) or (attr == ''):
+ self.module.fail_json(msg="You must provide priority and a value to create this record type")
+ new_record = {
+ "type": params['type'],
+ "name": params['record'],
+ "content": params['value'],
+ "priority": params['priority'],
+ "ttl": params['ttl']
+ }
+
+ if params['type'] == 'SRV':
+ for attr in [params['port'], params['priority'], params['proto'], params['service'], params['weight'], params['value']]:
+ if (attr is None) or (attr == ''):
+ self.module.fail_json(msg="You must provide port, priority, proto, service, weight and a value to create this record type")
+ srv_data = {
+ "target": params['value'],
+ "port": params['port'],
+ "weight": params['weight'],
+ "priority": params['priority'],
+ "name": params['record'][:-len('.' + params['zone'])],
+ "proto": params['proto'],
+ "service": params['service']
+ }
+ new_record = {"type": params['type'], "ttl": params['ttl'], 'data': srv_data}
+ search_value = str(params['weight']) + '\t' + str(params['port']) + '\t' + params['value']
+ search_record = params['service'] + '.' + params['proto'] + '.' + params['record']
+
+ if params['type'] == 'DS':
+ for attr in [params['key_tag'], params['algorithm'], params['hash_type'], params['value']]:
+ if (attr is None) or (attr == ''):
+ self.module.fail_json(msg="You must provide key_tag, algorithm, hash_type and a value to create this record type")
+ ds_data = {
+ "key_tag": params['key_tag'],
+ "algorithm": params['algorithm'],
+ "digest_type": params['hash_type'],
+ "digest": params['value'],
+ }
+ new_record = {
+ "type": params['type'],
+ "name": params['record'],
+ 'data': ds_data,
+ "ttl": params['ttl'],
+ }
+ search_value = str(params['key_tag']) + '\t' + str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value']
+
+ if params['type'] == 'SSHFP':
+ for attr in [params['algorithm'], params['hash_type'], params['value']]:
+ if (attr is None) or (attr == ''):
+ self.module.fail_json(msg="You must provide algorithm, hash_type and a value to create this record type")
+ sshfp_data = {
+ "fingerprint": params['value'],
+ "type": params['hash_type'],
+ "algorithm": params['algorithm'],
+ }
+ new_record = {
+ "type": params['type'],
+ "name": params['record'],
+ 'data': sshfp_data,
+ "ttl": params['ttl'],
+ }
+ search_value = str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value']
+
+ if params['type'] == 'TLSA':
+ for attr in [params['port'], params['proto'], params['cert_usage'], params['selector'], params['hash_type'], params['value']]:
+ if (attr is None) or (attr == ''):
+ self.module.fail_json(msg="You must provide port, proto, cert_usage, selector, hash_type and a value to create this record type")
+ search_record = params['port'] + '.' + params['proto'] + '.' + params['record']
+ tlsa_data = {
+ "usage": params['cert_usage'],
+ "selector": params['selector'],
+ "matching_type": params['hash_type'],
+ "certificate": params['value'],
+ }
+ new_record = {
+ "type": params['type'],
+ "name": search_record,
+ 'data': tlsa_data,
+ "ttl": params['ttl'],
+ }
+ search_value = str(params['cert_usage']) + '\t' + str(params['selector']) + '\t' + str(params['hash_type']) + '\t' + params['value']
+
+ zone_id = self._get_zone_id(params['zone'])
+ records = self.get_dns_records(params['zone'], params['type'], search_record, search_value)
+ # in theory this should be impossible as cloudflare does not allow
+ # the creation of duplicate records but lets cover it anyways
+ if len(records) > 1:
+ self.module.fail_json(msg="More than one record already exists for the given attributes. That should be impossible, please open an issue!")
+ # record already exists, check if it must be updated
+ if len(records) == 1:
+ cur_record = records[0]
+ do_update = False
+ if (params['ttl'] is not None) and (cur_record['ttl'] != params['ttl']):
+ do_update = True
+ if (params['priority'] is not None) and ('priority' in cur_record) and (cur_record['priority'] != params['priority']):
+ do_update = True
+ if ('proxied' in new_record) and ('proxied' in cur_record) and (cur_record['proxied'] != params['proxied']):
+ do_update = True
+ if ('data' in new_record) and ('data' in cur_record):
+ if (cur_record['data'] != new_record['data']):
+ do_update = True
+ if (params['type'] == 'CNAME') and (cur_record['content'] != new_record['content']):
+ do_update = True
+ if do_update:
+ if self.module.check_mode:
+ result = new_record
+ else:
+ result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(zone_id, records[0]['id']), 'PUT', new_record)
+ self.changed = True
+ return result, self.changed
+ else:
+ return records, self.changed
+ if self.module.check_mode:
+ result = new_record
+ else:
+ result, info = self._cf_api_call('/zones/{0}/dns_records'.format(zone_id), 'POST', new_record)
+ self.changed = True
+ return result, self.changed
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_token=dict(type='str', required=False, no_log=True),
+ account_api_key=dict(type='str', required=False, no_log=True, aliases=['account_api_token']),
+ account_email=dict(type='str', required=False),
+ algorithm=dict(type='int'),
+ cert_usage=dict(type='int', choices=[0, 1, 2, 3]),
+ hash_type=dict(type='int', choices=[1, 2]),
+ key_tag=dict(type='int'),
+ port=dict(type='int'),
+ priority=dict(type='int', default=1),
+ proto=dict(type='str'),
+ proxied=dict(type='bool', default=False),
+ record=dict(type='str', default='@', aliases=['name']),
+ selector=dict(type='int', choices=[0, 1]),
+ service=dict(type='str'),
+ solo=dict(type='bool'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ timeout=dict(type='int', default=30),
+ ttl=dict(type='int', default=1),
+ type=dict(type='str', choices=['A', 'AAAA', 'CNAME', 'DS', 'MX', 'NS', 'SPF', 'SRV', 'SSHFP', 'TLSA', 'TXT']),
+ value=dict(type='str', aliases=['content']),
+ weight=dict(type='int', default=1),
+ zone=dict(type='str', required=True, aliases=['domain']),
+ ),
+ supports_check_mode=True,
+ required_if=[
+ ('state', 'present', ['record', 'type', 'value']),
+ ('state', 'absent', ['record']),
+ ('type', 'SRV', ['proto', 'service']),
+ ('type', 'TLSA', ['proto', 'port']),
+ ],
+ )
+
+ if not module.params['api_token'] and not (module.params['account_api_key'] and module.params['account_email']):
+ module.fail_json(msg="Either api_token or account_api_key and account_email params are required.")
+ if module.params['type'] == 'SRV':
+ if not ((module.params['weight'] is not None and module.params['port'] is not None
+ and not (module.params['value'] is None or module.params['value'] == ''))
+ or (module.params['weight'] is None and module.params['port'] is None
+ and (module.params['value'] is None or module.params['value'] == ''))):
+ module.fail_json(msg="For SRV records the params weight, port and value all need to be defined, or not at all.")
+
+ if module.params['type'] == 'SSHFP':
+ if not ((module.params['algorithm'] is not None and module.params['hash_type'] is not None
+ and not (module.params['value'] is None or module.params['value'] == ''))
+ or (module.params['algorithm'] is None and module.params['hash_type'] is None
+ and (module.params['value'] is None or module.params['value'] == ''))):
+ module.fail_json(msg="For SSHFP records the params algorithm, hash_type and value all need to be defined, or not at all.")
+
+ if module.params['type'] == 'TLSA':
+ if not ((module.params['cert_usage'] is not None and module.params['selector'] is not None and module.params['hash_type'] is not None
+ and not (module.params['value'] is None or module.params['value'] == ''))
+ or (module.params['cert_usage'] is None and module.params['selector'] is None and module.params['hash_type'] is None
+ and (module.params['value'] is None or module.params['value'] == ''))):
+ module.fail_json(msg="For TLSA records the params cert_usage, selector, hash_type and value all need to be defined, or not at all.")
+
+ if module.params['type'] == 'DS':
+ if not ((module.params['key_tag'] is not None and module.params['algorithm'] is not None and module.params['hash_type'] is not None
+ and not (module.params['value'] is None or module.params['value'] == ''))
+ or (module.params['key_tag'] is None and module.params['algorithm'] is None and module.params['hash_type'] is None
+ and (module.params['value'] is None or module.params['value'] == ''))):
+ module.fail_json(msg="For DS records the params key_tag, algorithm, hash_type and value all need to be defined, or not at all.")
+
+ changed = False
+ cf_api = CloudflareAPI(module)
+
+ # sanity checks
+ if cf_api.is_solo and cf_api.state == 'absent':
+ module.fail_json(msg="solo=true can only be used with state=present")
+
+ # perform add, delete or update (only the TTL can be updated) of one or
+ # more records
+ if cf_api.state == 'present':
+ # delete all records matching record name + type
+ if cf_api.is_solo:
+ changed = cf_api.delete_dns_records(solo=cf_api.is_solo)
+ result, changed = cf_api.ensure_dns_record()
+ if isinstance(result, list):
+ module.exit_json(changed=changed, result={'record': result[0]})
+
+ module.exit_json(changed=changed, result={'record': result})
+ else:
+ # force solo to False, just to be sure
+ changed = cf_api.delete_dns_records(solo=False)
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/dnsimple.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/dnsimple.py
new file mode 100644
index 00000000..1c814a9b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/dnsimple.py
@@ -0,0 +1,336 @@
+#!/usr/bin/python
+#
+# Copyright: Ansible Project
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: dnsimple
+short_description: Interface with dnsimple.com (a DNS hosting service)
+description:
+ - "Manages domains and records via the DNSimple API, see the docs: U(http://developer.dnsimple.com/)."
+notes:
+ - DNSimple API v1 is deprecated. Please install dnsimple-python>=1.0.0 which uses v2 API.
+options:
+ account_email:
+ description:
+ - Account email. If omitted, the environment variables C(DNSIMPLE_EMAIL) and C(DNSIMPLE_API_TOKEN) will be looked for.
+ - "If those aren't found, a C(.dnsimple) file will be looked for, see: U(https://github.com/mikemaccana/dnsimple-python#getting-started)."
+ type: str
+ account_api_token:
+ description:
+ - Account API token. See I(account_email) for more information.
+ type: str
+ domain:
+ description:
+ - Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNSimple.
+ - If omitted, a list of domains will be returned.
+ - If domain is present but the domain doesn't exist, it will be created.
+ type: str
+ record:
+ description:
+ - Record to add, if blank a record for the domain will be created, supports the wildcard (*).
+ type: str
+ record_ids:
+ description:
+ - List of records to ensure they either exist or do not exist.
+ type: list
+ type:
+ description:
+ - The type of DNS record to create.
+ choices: [ 'A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL' ]
+ type: str
+ ttl:
+ description:
+ - The TTL to give the new record in seconds.
+ default: 3600
+ type: int
+ value:
+ description:
+ - Record value.
+ - Must be specified when trying to ensure a record exists.
+ type: str
+ priority:
+ description:
+ - Record priority.
+ type: int
+ state:
+ description:
+ - whether the record should exist or not.
+ choices: [ 'present', 'absent' ]
+ default: present
+ type: str
+ solo:
+ description:
+ - Whether the record should be the only one for that record type and record name.
+ - Only use with C(state) is set to C(present) on a record.
+ type: 'bool'
+ default: no
+requirements:
+ - "dnsimple >= 1.0.0"
+author: "Alex Coomans (@drcapulet)"
+'''
+
+EXAMPLES = '''
+- name: Authenticate using email and API token and fetch all domains
+ community.general.dnsimple:
+ account_email: test@example.com
+ account_api_token: dummyapitoken
+ delegate_to: localhost
+
+- name: Fetch my.com domain records
+ community.general.dnsimple:
+ domain: my.com
+ state: present
+ delegate_to: localhost
+ register: records
+
+- name: Delete a domain
+ community.general.dnsimple:
+ domain: my.com
+ state: absent
+ delegate_to: localhost
+
+- name: Create a test.my.com A record to point to 127.0.0.1
+ community.general.dnsimple:
+ domain: my.com
+ record: test
+ type: A
+ value: 127.0.0.1
+ delegate_to: localhost
+ register: record
+
+- name: Delete record using record_ids
+ community.general.dnsimple:
+ domain: my.com
+ record_ids: '{{ record["id"] }}'
+ state: absent
+ delegate_to: localhost
+
+- name: Create a my.com CNAME record to example.com
+ community.general.dnsimple:
+ domain: my.com
+ record: ''
+ type: CNAME
+ value: example.com
+ state: present
+ delegate_to: localhost
+
+- name: Change TTL value for a record
+ community.general.dnsimple:
+ domain: my.com
+ record: ''
+ type: CNAME
+ value: example.com
+ ttl: 600
+ state: present
+ delegate_to: localhost
+
+- name: Delete the record
+ community.general.dnsimple:
+ domain: my.com
+ record: ''
+ type: CNAME
+ value: example.com
+ state: absent
+ delegate_to: localhost
+'''
+
+RETURN = r"""# """
+
+import os
+import traceback
+from distutils.version import LooseVersion
+
+DNSIMPLE_IMP_ERR = None
+try:
+ from dnsimple import DNSimple
+ from dnsimple.dnsimple import __version__ as dnsimple_version
+ from dnsimple.dnsimple import DNSimpleException
+ HAS_DNSIMPLE = True
+except ImportError:
+ DNSIMPLE_IMP_ERR = traceback.format_exc()
+ HAS_DNSIMPLE = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ account_email=dict(type='str'),
+ account_api_token=dict(type='str', no_log=True),
+ domain=dict(type='str'),
+ record=dict(type='str'),
+ record_ids=dict(type='list'),
+ type=dict(type='str', choices=['A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO',
+ 'POOL']),
+ ttl=dict(type='int', default=3600),
+ value=dict(type='str'),
+ priority=dict(type='int'),
+ state=dict(type='str', choices=['present', 'absent'], default='present'),
+ solo=dict(type='bool', default=False),
+ ),
+ required_together=[
+ ['record', 'value']
+ ],
+ supports_check_mode=True,
+ )
+
+ if not HAS_DNSIMPLE:
+ module.fail_json(msg=missing_required_lib('dnsimple'), exception=DNSIMPLE_IMP_ERR)
+
+ if LooseVersion(dnsimple_version) < LooseVersion('1.0.0'):
+ module.fail_json(msg="Current version of dnsimple Python module [%s] uses 'v1' API which is deprecated."
+ " Please upgrade to version 1.0.0 and above to use dnsimple 'v2' API." % dnsimple_version)
+
+ account_email = module.params.get('account_email')
+ account_api_token = module.params.get('account_api_token')
+ domain = module.params.get('domain')
+ record = module.params.get('record')
+ record_ids = module.params.get('record_ids')
+ record_type = module.params.get('type')
+ ttl = module.params.get('ttl')
+ value = module.params.get('value')
+ priority = module.params.get('priority')
+ state = module.params.get('state')
+ is_solo = module.params.get('solo')
+
+ if account_email and account_api_token:
+ client = DNSimple(email=account_email, api_token=account_api_token)
+ elif os.environ.get('DNSIMPLE_EMAIL') and os.environ.get('DNSIMPLE_API_TOKEN'):
+ client = DNSimple(email=os.environ.get('DNSIMPLE_EMAIL'), api_token=os.environ.get('DNSIMPLE_API_TOKEN'))
+ else:
+ client = DNSimple()
+
+ try:
+ # Let's figure out what operation we want to do
+
+ # No domain, return a list
+ if not domain:
+ domains = client.domains()
+ module.exit_json(changed=False, result=[d['domain'] for d in domains])
+
+ # Domain & No record
+ if domain and record is None and not record_ids:
+ domains = [d['domain'] for d in client.domains()]
+ if domain.isdigit():
+ dr = next((d for d in domains if d['id'] == int(domain)), None)
+ else:
+ dr = next((d for d in domains if d['name'] == domain), None)
+ if state == 'present':
+ if dr:
+ module.exit_json(changed=False, result=dr)
+ else:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=True, result=client.add_domain(domain)['domain'])
+
+ # state is absent
+ else:
+ if dr:
+ if not module.check_mode:
+ client.delete(domain)
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=False)
+
+ # need the not none check since record could be an empty string
+ if domain and record is not None:
+ records = [r['record'] for r in client.records(str(domain), params={'name': record})]
+
+ if not record_type:
+ module.fail_json(msg="Missing the record type")
+
+ if not value:
+ module.fail_json(msg="Missing the record value")
+
+ rr = next((r for r in records if r['name'] == record and r['type'] == record_type and r['content'] == value), None)
+
+ if state == 'present':
+ changed = False
+ if is_solo:
+ # delete any records that have the same name and record type
+ same_type = [r['id'] for r in records if r['name'] == record and r['type'] == record_type]
+ if rr:
+ same_type = [rid for rid in same_type if rid != rr['id']]
+ if same_type:
+ if not module.check_mode:
+ for rid in same_type:
+ client.delete_record(str(domain), rid)
+ changed = True
+ if rr:
+ # check if we need to update
+ if rr['ttl'] != ttl or rr['priority'] != priority:
+ data = {}
+ if ttl:
+ data['ttl'] = ttl
+ if priority:
+ data['priority'] = priority
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=True, result=client.update_record(str(domain), str(rr['id']), data)['record'])
+ else:
+ module.exit_json(changed=changed, result=rr)
+ else:
+ # create it
+ data = {
+ 'name': record,
+ 'type': record_type,
+ 'content': value,
+ }
+ if ttl:
+ data['ttl'] = ttl
+ if priority:
+ data['priority'] = priority
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=True, result=client.add_record(str(domain), data)['record'])
+
+ # state is absent
+ else:
+ if rr:
+ if not module.check_mode:
+ client.delete_record(str(domain), rr['id'])
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=False)
+
+ # Make sure these record_ids either all exist or none
+ if domain and record_ids:
+ current_records = [str(r['record']['id']) for r in client.records(str(domain))]
+ wanted_records = [str(r) for r in record_ids]
+ if state == 'present':
+ difference = list(set(wanted_records) - set(current_records))
+ if difference:
+ module.fail_json(msg="Missing the following records: %s" % difference)
+ else:
+ module.exit_json(changed=False)
+
+ # state is absent
+ else:
+ difference = list(set(wanted_records) & set(current_records))
+ if difference:
+ if not module.check_mode:
+ for rid in difference:
+ client.delete_record(str(domain), rid)
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=False)
+
+ except DNSimpleException as e:
+ module.fail_json(msg="Unable to contact DNSimple: %s" % e.message)
+
+ module.fail_json(msg="Unknown what you wanted me to do")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/dnsmadeeasy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/dnsmadeeasy.py
new file mode 100644
index 00000000..75135c82
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/dnsmadeeasy.py
@@ -0,0 +1,717 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: dnsmadeeasy
+short_description: Interface with dnsmadeeasy.com (a DNS hosting service).
+description:
+ - >
+ Manages DNS records via the v2 REST API of the DNS Made Easy service. It handles records only; there is no manipulation of domains or
+ monitor/account support yet. See: U(https://www.dnsmadeeasy.com/integration/restapi/)
+options:
+ account_key:
+ description:
+ - Account API Key.
+ required: true
+ type: str
+
+ account_secret:
+ description:
+ - Account Secret Key.
+ required: true
+ type: str
+
+ domain:
+ description:
+ - Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNS Made Easy (e.g. "839989") for faster
+ resolution
+ required: true
+ type: str
+
+ sandbox:
+ description:
+ - Decides if the sandbox API should be used. Otherwise (default) the production API of DNS Made Easy is used.
+ type: bool
+ default: 'no'
+
+ record_name:
+ description:
+ - Record name to get/create/delete/update. If record_name is not specified; all records for the domain will be returned in "result" regardless
+ of the state argument.
+ type: str
+
+ record_type:
+ description:
+ - Record type.
+ choices: [ 'A', 'AAAA', 'CNAME', 'ANAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT' ]
+ type: str
+
+ record_value:
+ description:
+ - >
+ Record value. HTTPRED: <redirection URL>, MX: <priority> <target name>, NS: <name server>, PTR: <target name>,
+ SRV: <priority> <weight> <port> <target name>, TXT: <text value>"
+ - >
+ If record_value is not specified; no changes will be made and the record will be returned in 'result'
+ (in other words, this module can be used to fetch a record's current id, type, and ttl)
+ type: str
+
+ record_ttl:
+ description:
+ - record's "Time to live". Number of seconds the record remains cached in DNS servers.
+ default: 1800
+ type: int
+
+ state:
+ description:
+ - whether the record should exist or not
+ required: true
+ choices: [ 'present', 'absent' ]
+ type: str
+
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+
+ monitor:
+ description:
+ - If C(yes), add or change the monitor. This is applicable only for A records.
+ type: bool
+ default: 'no'
+
+ systemDescription:
+ description:
+ - Description used by the monitor.
+ default: ''
+ type: str
+
+ maxEmails:
+ description:
+ - Number of emails sent to the contact list by the monitor.
+ default: 1
+ type: int
+
+ protocol:
+ description:
+ - Protocol used by the monitor.
+ default: 'HTTP'
+ choices: ['TCP', 'UDP', 'HTTP', 'DNS', 'SMTP', 'HTTPS']
+ type: str
+
+ port:
+ description:
+ - Port used by the monitor.
+ default: 80
+ type: int
+
+ sensitivity:
+ description:
+ - Number of checks the monitor performs before a failover occurs where Low = 8, Medium = 5,and High = 3.
+ default: 'Medium'
+ choices: ['Low', 'Medium', 'High']
+ type: str
+
+ contactList:
+ description:
+ - Name or id of the contact list that the monitor will notify.
+ - The default C('') means the Account Owner.
+ default: ''
+ type: str
+
+ httpFqdn:
+ description:
+ - The fully qualified domain name used by the monitor.
+ type: str
+
+ httpFile:
+ description:
+ - The file at the Fqdn that the monitor queries for HTTP or HTTPS.
+ type: str
+
+ httpQueryString:
+ description:
+ - The string in the httpFile that the monitor queries for HTTP or HTTPS.
+ type: str
+
+ failover:
+ description:
+ - If C(yes), add or change the failover. This is applicable only for A records.
+ type: bool
+ default: 'no'
+
+ autoFailover:
+ description:
+ - If true, fallback to the primary IP address is manual after a failover.
+ - If false, fallback to the primary IP address is automatic after a failover.
+ type: bool
+ default: 'no'
+
+ ip1:
+ description:
+ - Primary IP address for the failover.
+ - Required if adding or changing the monitor or failover.
+ type: str
+
+ ip2:
+ description:
+ - Secondary IP address for the failover.
+ - Required if adding or changing the failover.
+ type: str
+
+ ip3:
+ description:
+ - Tertiary IP address for the failover.
+ type: str
+
+ ip4:
+ description:
+ - Quaternary IP address for the failover.
+ type: str
+
+ ip5:
+ description:
+ - Quinary IP address for the failover.
+ type: str
+
+notes:
+ - The DNS Made Easy service requires that machines interacting with the API have the proper time and timezone set. Be sure you are within a few
+ seconds of actual time by using NTP.
+ - This module returns record(s) and monitor(s) in the "result" element when 'state' is set to 'present'.
+ These values can be be registered and used in your playbooks.
+ - Only A records can have a monitor or failover.
+ - To add failover, the 'failover', 'autoFailover', 'port', 'protocol', 'ip1', and 'ip2' options are required.
+ - To add monitor, the 'monitor', 'port', 'protocol', 'maxEmails', 'systemDescription', and 'ip1' options are required.
+ - The monitor and the failover will share 'port', 'protocol', and 'ip1' options.
+
+requirements: [ hashlib, hmac ]
+author: "Brice Burgess (@briceburg)"
+'''
+
+EXAMPLES = '''
+- name: Fetch my.com domain records
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ register: response
+
+- name: Create a record
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_type: A
+ record_value: 127.0.0.1
+
+- name: Update the previously created record
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_value: 192.0.2.23
+
+- name: Fetch a specific record
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ register: response
+
+- name: Delete a record
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ record_type: A
+ state: absent
+ record_name: test
+
+- name: Add a failover
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_type: A
+ record_value: 127.0.0.1
+ failover: True
+ ip1: 127.0.0.2
+ ip2: 127.0.0.3
+
+- name: Add a failover
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_type: A
+ record_value: 127.0.0.1
+ failover: True
+ ip1: 127.0.0.2
+ ip2: 127.0.0.3
+ ip3: 127.0.0.4
+ ip4: 127.0.0.5
+ ip5: 127.0.0.6
+
+- name: Add a monitor
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_type: A
+ record_value: 127.0.0.1
+ monitor: yes
+ ip1: 127.0.0.2
+ protocol: HTTP # default
+ port: 80 # default
+ maxEmails: 1
+ systemDescription: Monitor Test A record
+ contactList: my contact list
+
+- name: Add a monitor with http options
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_type: A
+ record_value: 127.0.0.1
+ monitor: yes
+ ip1: 127.0.0.2
+ protocol: HTTP # default
+ port: 80 # default
+ maxEmails: 1
+ systemDescription: Monitor Test A record
+ contactList: 1174 # contact list id
+ httpFqdn: http://my.com
+ httpFile: example
+ httpQueryString: some string
+
+- name: Add a monitor and a failover
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_type: A
+ record_value: 127.0.0.1
+ failover: True
+ ip1: 127.0.0.2
+ ip2: 127.0.0.3
+ monitor: yes
+ protocol: HTTPS
+ port: 443
+ maxEmails: 1
+ systemDescription: monitoring my.com status
+ contactList: emergencycontacts
+
+- name: Remove a failover
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_type: A
+ record_value: 127.0.0.1
+ failover: no
+
+- name: Remove a monitor
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_type: A
+ record_value: 127.0.0.1
+ monitor: no
+'''
+
+# ============================================
+# DNSMadeEasy module specific support methods.
+#
+
+import json
+import hashlib
+import hmac
+import locale
+from time import strftime, gmtime
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.six import string_types
+
+
+class DME2(object):
+
+ def __init__(self, apikey, secret, domain, sandbox, module):
+ self.module = module
+
+ self.api = apikey
+ self.secret = secret
+
+ if sandbox:
+ self.baseurl = 'https://api.sandbox.dnsmadeeasy.com/V2.0/'
+ self.module.warn(warning="Sandbox is enabled. All actions are made against the URL %s" % self.baseurl)
+ else:
+ self.baseurl = 'https://api.dnsmadeeasy.com/V2.0/'
+
+ self.domain = str(domain)
+ self.domain_map = None # ["domain_name"] => ID
+ self.record_map = None # ["record_name"] => ID
+ self.records = None # ["record_ID"] => <record>
+ self.all_records = None
+ self.contactList_map = None # ["contactList_name"] => ID
+
+ # Lookup the domain ID if passed as a domain name vs. ID
+ if not self.domain.isdigit():
+ self.domain = self.getDomainByName(self.domain)['id']
+
+ self.record_url = 'dns/managed/' + str(self.domain) + '/records'
+ self.monitor_url = 'monitor'
+ self.contactList_url = 'contactList'
+
+ def _headers(self):
+ currTime = self._get_date()
+ hashstring = self._create_hash(currTime)
+ headers = {'x-dnsme-apiKey': self.api,
+ 'x-dnsme-hmac': hashstring,
+ 'x-dnsme-requestDate': currTime,
+ 'content-type': 'application/json'}
+ return headers
+
+ def _get_date(self):
+ locale.setlocale(locale.LC_TIME, 'C')
+ return strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime())
+
+ def _create_hash(self, rightnow):
+ return hmac.new(self.secret.encode(), rightnow.encode(), hashlib.sha1).hexdigest()
+
+ def query(self, resource, method, data=None):
+ url = self.baseurl + resource
+ if data and not isinstance(data, string_types):
+ data = urlencode(data)
+
+ response, info = fetch_url(self.module, url, data=data, method=method, headers=self._headers())
+ if info['status'] not in (200, 201, 204):
+ self.module.fail_json(msg="%s returned %s, with body: %s" % (url, info['status'], info['msg']))
+
+ try:
+ return json.load(response)
+ except Exception:
+ return {}
+
+ def getDomain(self, domain_id):
+ if not self.domain_map:
+ self._instMap('domain')
+
+ return self.domains.get(domain_id, False)
+
+ def getDomainByName(self, domain_name):
+ if not self.domain_map:
+ self._instMap('domain')
+
+ return self.getDomain(self.domain_map.get(domain_name, 0))
+
+ def getDomains(self):
+ return self.query('dns/managed', 'GET')['data']
+
+ def getRecord(self, record_id):
+ if not self.record_map:
+ self._instMap('record')
+
+ return self.records.get(record_id, False)
+
+ # Try to find a single record matching this one.
+ # How we do this depends on the type of record. For instance, there
+ # can be several MX records for a single record_name while there can
+ # only be a single CNAME for a particular record_name. Note also that
+ # there can be several records with different types for a single name.
+ def getMatchingRecord(self, record_name, record_type, record_value):
+ # Get all the records if not already cached
+ if not self.all_records:
+ self.all_records = self.getRecords()
+
+ if record_type in ["CNAME", "ANAME", "HTTPRED", "PTR"]:
+ for result in self.all_records:
+ if result['name'] == record_name and result['type'] == record_type:
+ return result
+ return False
+ elif record_type in ["A", "AAAA", "MX", "NS", "TXT", "SRV"]:
+ for result in self.all_records:
+ if record_type == "MX":
+ value = record_value.split(" ")[1]
+ # Note that TXT records are surrounded by quotes in the API response.
+ elif record_type == "TXT":
+ value = '"{0}"'.format(record_value)
+ elif record_type == "SRV":
+ value = record_value.split(" ")[3]
+ else:
+ value = record_value
+ if result['name'] == record_name and result['type'] == record_type and result['value'] == value:
+ return result
+ return False
+ else:
+ raise Exception('record_type not yet supported')
+
+ def getRecords(self):
+ return self.query(self.record_url, 'GET')['data']
+
+ def _instMap(self, type):
+ # @TODO cache this call so it's executed only once per ansible execution
+ map = {}
+ results = {}
+
+ # iterate over e.g. self.getDomains() || self.getRecords()
+ for result in getattr(self, 'get' + type.title() + 's')():
+
+ map[result['name']] = result['id']
+ results[result['id']] = result
+
+ # e.g. self.domain_map || self.record_map
+ setattr(self, type + '_map', map)
+ setattr(self, type + 's', results) # e.g. self.domains || self.records
+
+ def prepareRecord(self, data):
+ return json.dumps(data, separators=(',', ':'))
+
+ def createRecord(self, data):
+ # @TODO update the cache w/ resultant record + id when impleneted
+ return self.query(self.record_url, 'POST', data)
+
+ def updateRecord(self, record_id, data):
+ # @TODO update the cache w/ resultant record + id when impleneted
+ return self.query(self.record_url + '/' + str(record_id), 'PUT', data)
+
+ def deleteRecord(self, record_id):
+ # @TODO remove record from the cache when impleneted
+ return self.query(self.record_url + '/' + str(record_id), 'DELETE')
+
+ def getMonitor(self, record_id):
+ return self.query(self.monitor_url + '/' + str(record_id), 'GET')
+
+ def updateMonitor(self, record_id, data):
+ return self.query(self.monitor_url + '/' + str(record_id), 'PUT', data)
+
+ def prepareMonitor(self, data):
+ return json.dumps(data, separators=(',', ':'))
+
+ def getContactList(self, contact_list_id):
+ if not self.contactList_map:
+ self._instMap('contactList')
+
+ return self.contactLists.get(contact_list_id, False)
+
+ def getContactlists(self):
+ return self.query(self.contactList_url, 'GET')['data']
+
+ def getContactListByName(self, name):
+ if not self.contactList_map:
+ self._instMap('contactList')
+
+ return self.getContactList(self.contactList_map.get(name, 0))
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ account_key=dict(required=True, no_log=True),
+ account_secret=dict(required=True, no_log=True),
+ domain=dict(required=True),
+ sandbox=dict(default=False, type='bool'),
+ state=dict(required=True, choices=['present', 'absent']),
+ record_name=dict(required=False),
+ record_type=dict(required=False, choices=[
+ 'A', 'AAAA', 'CNAME', 'ANAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT']),
+ record_value=dict(required=False),
+ record_ttl=dict(required=False, default=1800, type='int'),
+ monitor=dict(default=False, type='bool'),
+ systemDescription=dict(default=''),
+ maxEmails=dict(default=1, type='int'),
+ protocol=dict(default='HTTP', choices=['TCP', 'UDP', 'HTTP', 'DNS', 'SMTP', 'HTTPS']),
+ port=dict(default=80, type='int'),
+ sensitivity=dict(default='Medium', choices=['Low', 'Medium', 'High']),
+ contactList=dict(default=None),
+ httpFqdn=dict(required=False),
+ httpFile=dict(required=False),
+ httpQueryString=dict(required=False),
+ failover=dict(default=False, type='bool'),
+ autoFailover=dict(default=False, type='bool'),
+ ip1=dict(required=False),
+ ip2=dict(required=False),
+ ip3=dict(required=False),
+ ip4=dict(required=False),
+ ip5=dict(required=False),
+ validate_certs=dict(default=True, type='bool'),
+ ),
+ required_together=[
+ ['record_value', 'record_ttl', 'record_type']
+ ],
+ required_if=[
+ ['failover', True, ['autoFailover', 'port', 'protocol', 'ip1', 'ip2']],
+ ['monitor', True, ['port', 'protocol', 'maxEmails', 'systemDescription', 'ip1']]
+ ]
+ )
+
+ protocols = dict(TCP=1, UDP=2, HTTP=3, DNS=4, SMTP=5, HTTPS=6)
+ sensitivities = dict(Low=8, Medium=5, High=3)
+
+ DME = DME2(module.params["account_key"], module.params[
+ "account_secret"], module.params["domain"], module.params["sandbox"], module)
+ state = module.params["state"]
+ record_name = module.params["record_name"]
+ record_type = module.params["record_type"]
+ record_value = module.params["record_value"]
+
+ # Follow Keyword Controlled Behavior
+ if record_name is None:
+ domain_records = DME.getRecords()
+ if not domain_records:
+ module.fail_json(
+ msg="The requested domain name is not accessible with this api_key; try using its ID if known.")
+ module.exit_json(changed=False, result=domain_records)
+
+ # Fetch existing record + Build new one
+ current_record = DME.getMatchingRecord(record_name, record_type, record_value)
+ new_record = {'name': record_name}
+ for i in ["record_value", "record_type", "record_ttl"]:
+ if not module.params[i] is None:
+ new_record[i[len("record_"):]] = module.params[i]
+ # Special handling for mx record
+ if new_record["type"] == "MX":
+ new_record["mxLevel"] = new_record["value"].split(" ")[0]
+ new_record["value"] = new_record["value"].split(" ")[1]
+
+ # Special handling for SRV records
+ if new_record["type"] == "SRV":
+ new_record["priority"] = new_record["value"].split(" ")[0]
+ new_record["weight"] = new_record["value"].split(" ")[1]
+ new_record["port"] = new_record["value"].split(" ")[2]
+ new_record["value"] = new_record["value"].split(" ")[3]
+
+ # Fetch existing monitor if the A record indicates it should exist and build the new monitor
+ current_monitor = dict()
+ new_monitor = dict()
+ if current_record and current_record['type'] == 'A':
+ current_monitor = DME.getMonitor(current_record['id'])
+
+ # Build the new monitor
+ for i in ['monitor', 'systemDescription', 'protocol', 'port', 'sensitivity', 'maxEmails',
+ 'contactList', 'httpFqdn', 'httpFile', 'httpQueryString',
+ 'failover', 'autoFailover', 'ip1', 'ip2', 'ip3', 'ip4', 'ip5']:
+ if module.params[i] is not None:
+ if i == 'protocol':
+ # The API requires protocol to be a numeric in the range 1-6
+ new_monitor['protocolId'] = protocols[module.params[i]]
+ elif i == 'sensitivity':
+ # The API requires sensitivity to be a numeric of 8, 5, or 3
+ new_monitor[i] = sensitivities[module.params[i]]
+ elif i == 'contactList':
+ # The module accepts either the name or the id of the contact list
+ contact_list_id = module.params[i]
+ if not contact_list_id.isdigit() and contact_list_id != '':
+ contact_list = DME.getContactListByName(contact_list_id)
+ if not contact_list:
+ module.fail_json(msg="Contact list {0} does not exist".format(contact_list_id))
+ contact_list_id = contact_list.get('id', '')
+ new_monitor['contactListId'] = contact_list_id
+ else:
+ # The module option names match the API field names
+ new_monitor[i] = module.params[i]
+
+ # Compare new record against existing one
+ record_changed = False
+ if current_record:
+ for i in new_record:
+ # Remove leading and trailing quote character from values because TXT records
+ # are surrounded by quotes.
+ if str(current_record[i]).strip('"') != str(new_record[i]):
+ record_changed = True
+ new_record['id'] = str(current_record['id'])
+
+ monitor_changed = False
+ if current_monitor:
+ for i in new_monitor:
+ if str(current_monitor.get(i)) != str(new_monitor[i]):
+ monitor_changed = True
+
+ # Follow Keyword Controlled Behavior
+ if state == 'present':
+ # return the record if no value is specified
+ if "value" not in new_record:
+ if not current_record:
+ module.fail_json(
+ msg="A record with name '%s' does not exist for domain '%s.'" % (record_name, module.params['domain']))
+ module.exit_json(changed=False, result=dict(record=current_record, monitor=current_monitor))
+
+ # create record and monitor as the record does not exist
+ if not current_record:
+ record = DME.createRecord(DME.prepareRecord(new_record))
+ if new_monitor.get('monitor') and record_type == "A":
+ monitor = DME.updateMonitor(record['id'], DME.prepareMonitor(new_monitor))
+ module.exit_json(changed=True, result=dict(record=record, monitor=monitor))
+ else:
+ module.exit_json(changed=True, result=dict(record=record, monitor=current_monitor))
+
+ # update the record
+ updated = False
+ if record_changed:
+ DME.updateRecord(current_record['id'], DME.prepareRecord(new_record))
+ updated = True
+ if monitor_changed:
+ DME.updateMonitor(current_monitor['recordId'], DME.prepareMonitor(new_monitor))
+ updated = True
+ if updated:
+ module.exit_json(changed=True, result=dict(record=new_record, monitor=new_monitor))
+
+ # return the record (no changes)
+ module.exit_json(changed=False, result=dict(record=current_record, monitor=current_monitor))
+
+ elif state == 'absent':
+ changed = False
+ # delete the record (and the monitor/failover) if it exists
+ if current_record:
+ DME.deleteRecord(current_record['id'])
+ module.exit_json(changed=True)
+
+ # record does not exist, return w/o change.
+ module.exit_json(changed=changed)
+
+ else:
+ module.fail_json(
+ msg="'%s' is an unknown value for the state argument" % state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/haproxy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/haproxy.py
new file mode 100644
index 00000000..848cc1fa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/haproxy.py
@@ -0,0 +1,479 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Ravi Bhure <ravibhure@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: haproxy
+short_description: Enable, disable, and set weights for HAProxy backend servers using socket commands
+author:
+- Ravi Bhure (@ravibhure)
+description:
+ - Enable, disable, drain and set weights for HAProxy backend servers using socket commands.
+notes:
+ - Enable, disable and drain commands are restricted and can only be issued on
+ sockets configured for level 'admin'. For example, you can add the line
+ 'stats socket /var/run/haproxy.sock level admin' to the general section of
+ haproxy.cfg. See U(http://haproxy.1wt.eu/download/1.5/doc/configuration.txt).
+ - Depends on netcat (nc) being available; you need to install the appropriate
+ package for your operating system before this module can be used.
+options:
+ backend:
+ description:
+ - Name of the HAProxy backend pool.
+ - If this parameter is unset, it will be auto-detected.
+ type: str
+ drain:
+ description:
+ - Wait until the server has no active connections or until the timeout
+ determined by wait_interval and wait_retries is reached.
+ - Continue only after the status changes to 'MAINT'.
+ - This overrides the shutdown_sessions option.
+ type: bool
+ default: false
+ host:
+ description:
+ - Name of the backend host to change.
+ type: str
+ required: true
+ shutdown_sessions:
+ description:
+ - When disabling a server, immediately terminate all the sessions attached
+ to the specified server.
+ - This can be used to terminate long-running sessions after a server is put
+ into maintenance mode. Overridden by the drain option.
+ type: bool
+ default: no
+ socket:
+ description:
+ - Path to the HAProxy socket file.
+ type: path
+ default: /var/run/haproxy.sock
+ state:
+ description:
+ - Desired state of the provided backend host.
+ - Note that C(drain) state was added in version 2.4.
+ - It is supported only by HAProxy version 1.5 or later,
+ - When used on versions < 1.5, it will be ignored.
+ type: str
+ required: true
+ choices: [ disabled, drain, enabled ]
+ agent:
+ description:
+ - Disable/enable agent checks (depending on I(state) value).
+ type: bool
+ default: no
+ version_added: 1.0.0
+ health:
+ description:
+ - Disable/enable health checks (depending on I(state) value).
+ type: bool
+ default: no
+ version_added: "1.0.0"
+ fail_on_not_found:
+ description:
+ - Fail whenever trying to enable/disable a backend host that does not exist
+ type: bool
+ default: no
+ wait:
+ description:
+ - Wait until the server reports a status of 'UP' when C(state=enabled),
+ status of 'MAINT' when C(state=disabled) or status of 'DRAIN' when C(state=drain)
+ type: bool
+ default: no
+ wait_interval:
+ description:
+ - Number of seconds to wait between retries.
+ type: int
+ default: 5
+ wait_retries:
+ description:
+ - Number of times to check for status after changing the state.
+ type: int
+ default: 25
+ weight:
+ description:
+ - The value passed in argument.
+ - If the value ends with the `%` sign, then the new weight will be
+ relative to the initially configured weight.
+ - Relative weights are only permitted between 0 and 100% and absolute
+ weights are permitted between 0 and 256.
+ type: str
+'''
+
+EXAMPLES = r'''
+- name: Disable server in 'www' backend pool
+ community.general.haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
+ backend: www
+
+- name: Disable server in 'www' backend pool, also stop health/agent checks
+ community.general.haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
+ health: yes
+ agent: yes
+
+- name: Disable server without backend pool name (apply to all available backend pool)
+ community.general.haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
+
+- name: Disable server, provide socket file
+ community.general.haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
+ socket: /var/run/haproxy.sock
+ backend: www
+
+- name: Disable server, provide socket file, wait until status reports in maintenance
+ community.general.haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
+ socket: /var/run/haproxy.sock
+ backend: www
+ wait: yes
+
+# Place server in drain mode, providing a socket file. Then check the server's
+# status every minute to see if it changes to maintenance mode, continuing if it
+# does in an hour and failing otherwise.
+- community.general.haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
+ socket: /var/run/haproxy.sock
+ backend: www
+ wait: yes
+ drain: yes
+ wait_interval: 1
+ wait_retries: 60
+
+- name: Disable backend server in 'www' backend pool and drop open sessions to it
+ community.general.haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
+ backend: www
+ socket: /var/run/haproxy.sock
+ shutdown_sessions: yes
+
+- name: Disable server without backend pool name (apply to all available backend pool) but fail when the backend host is not found
+ community.general.haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
+ fail_on_not_found: yes
+
+- name: Enable server in 'www' backend pool
+ community.general.haproxy:
+ state: enabled
+ host: '{{ inventory_hostname }}'
+ backend: www
+
+- name: Enable server in 'www' backend pool wait until healthy
+ community.general.haproxy:
+ state: enabled
+ host: '{{ inventory_hostname }}'
+ backend: www
+ wait: yes
+
+- name: Enable server in 'www' backend pool wait until healthy. Retry 10 times with intervals of 5 seconds to retrieve the health
+ community.general.haproxy:
+ state: enabled
+ host: '{{ inventory_hostname }}'
+ backend: www
+ wait: yes
+ wait_retries: 10
+ wait_interval: 5
+
+- name: Enable server in 'www' backend pool with change server(s) weight
+ community.general.haproxy:
+ state: enabled
+ host: '{{ inventory_hostname }}'
+ socket: /var/run/haproxy.sock
+ weight: 10
+ backend: www
+
+- name: Set the server in 'www' backend pool to drain mode
+ community.general.haproxy:
+ state: drain
+ host: '{{ inventory_hostname }}'
+ socket: /var/run/haproxy.sock
+ backend: www
+'''
+
+import csv
+import socket
+import time
+from string import Template
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes, to_text
+
+
+DEFAULT_SOCKET_LOCATION = "/var/run/haproxy.sock"
+RECV_SIZE = 1024
+ACTION_CHOICES = ['enabled', 'disabled', 'drain']
+WAIT_RETRIES = 25
+WAIT_INTERVAL = 5
+
+
+######################################################################
+class TimeoutException(Exception):
+ pass
+
+
+class HAProxy(object):
+ """
+ Used for communicating with HAProxy through its local UNIX socket interface.
+ Perform common tasks in Haproxy related to enable server and
+ disable server.
+
+ The complete set of external commands Haproxy handles is documented
+ on their website:
+
+ http://haproxy.1wt.eu/download/1.5/doc/configuration.txt#Unix Socket commands
+ """
+
+ def __init__(self, module):
+ self.module = module
+
+ self.state = self.module.params['state']
+ self.host = self.module.params['host']
+ self.backend = self.module.params['backend']
+ self.weight = self.module.params['weight']
+ self.socket = self.module.params['socket']
+ self.shutdown_sessions = self.module.params['shutdown_sessions']
+ self.fail_on_not_found = self.module.params['fail_on_not_found']
+ self.agent = self.module.params['agent']
+ self.health = self.module.params['health']
+ self.wait = self.module.params['wait']
+ self.wait_retries = self.module.params['wait_retries']
+ self.wait_interval = self.module.params['wait_interval']
+ self._drain = self.module.params['drain']
+ self.command_results = {}
+
+ def execute(self, cmd, timeout=200, capture_output=True):
+ """
+ Executes a HAProxy command by sending a message to a HAProxy's local
+ UNIX socket and waiting up to 'timeout' milliseconds for the response.
+ """
+ self.client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ self.client.connect(self.socket)
+ self.client.sendall(to_bytes('%s\n' % cmd))
+
+ result = b''
+ buf = b''
+ buf = self.client.recv(RECV_SIZE)
+ while buf:
+ result += buf
+ buf = self.client.recv(RECV_SIZE)
+ result = to_text(result, errors='surrogate_or_strict')
+
+ if capture_output:
+ self.capture_command_output(cmd, result.strip())
+ self.client.close()
+ return result
+
+ def capture_command_output(self, cmd, output):
+ """
+ Capture the output for a command
+ """
+ if 'command' not in self.command_results:
+ self.command_results['command'] = []
+ self.command_results['command'].append(cmd)
+ if 'output' not in self.command_results:
+ self.command_results['output'] = []
+ self.command_results['output'].append(output)
+
+ def discover_all_backends(self):
+ """
+ Discover all entries with svname = 'BACKEND' and return a list of their corresponding
+ pxnames
+ """
+ data = self.execute('show stat', 200, False).lstrip('# ')
+ r = csv.DictReader(data.splitlines())
+ return tuple(map(lambda d: d['pxname'], filter(lambda d: d['svname'] == 'BACKEND', r)))
+
+ def discover_version(self):
+ """
+ Attempt to extract the haproxy version.
+ Return a tuple containing major and minor version.
+ """
+ data = self.execute('show info', 200, False)
+ lines = data.splitlines()
+ line = [x for x in lines if 'Version:' in x]
+ try:
+ version_values = line[0].partition(':')[2].strip().split('.', 3)
+ version = (int(version_values[0]), int(version_values[1]))
+ except (ValueError, TypeError, IndexError):
+ version = None
+
+ return version
+
+ def execute_for_backends(self, cmd, pxname, svname, wait_for_status=None):
+ """
+ Run some command on the specified backends. If no backends are provided they will
+ be discovered automatically (all backends)
+ """
+ # Discover backends if none are given
+ if pxname is None:
+ backends = self.discover_all_backends()
+ else:
+ backends = [pxname]
+
+ # Run the command for each requested backend
+ for backend in backends:
+ # Fail when backends were not found
+ state = self.get_state_for(backend, svname)
+ if (self.fail_on_not_found) and state is None:
+ self.module.fail_json(
+ msg="The specified backend '%s/%s' was not found!" % (backend, svname))
+
+ if state is not None:
+ self.execute(Template(cmd).substitute(pxname=backend, svname=svname))
+ if self.wait:
+ self.wait_until_status(backend, svname, wait_for_status)
+
+ def get_state_for(self, pxname, svname):
+ """
+ Find the state of specific services. When pxname is not set, get all backends for a specific host.
+ Returns a list of dictionaries containing the status and weight for those services.
+ """
+ data = self.execute('show stat', 200, False).lstrip('# ')
+ r = csv.DictReader(data.splitlines())
+ state = tuple(
+ map(
+ lambda d: {'status': d['status'], 'weight': d['weight'], 'scur': d['scur']},
+ filter(lambda d: (pxname is None or d['pxname']
+ == pxname) and d['svname'] == svname, r)
+ )
+ )
+ return state or None
+
+ def wait_until_status(self, pxname, svname, status):
+ """
+ Wait for a service to reach the specified status. Try RETRIES times
+ with INTERVAL seconds of sleep in between. If the service has not reached
+ the expected status in that time, the module will fail. If the service was
+ not found, the module will fail.
+ """
+ for i in range(1, self.wait_retries):
+ state = self.get_state_for(pxname, svname)
+
+ # We can assume there will only be 1 element in state because both svname and pxname are always set when we get here
+ # When using track we get a status like this: MAINT (via pxname/svname) so we need to do substring matching
+ if status in state[0]['status']:
+ if not self._drain or (state[0]['scur'] == '0' and 'MAINT' in state):
+ return True
+ else:
+ time.sleep(self.wait_interval)
+
+ self.module.fail_json(msg="server %s/%s not status '%s' after %d retries. Aborting." %
+ (pxname, svname, status, self.wait_retries))
+
+ def enabled(self, host, backend, weight):
+ """
+ Enabled action, marks server to UP and checks are re-enabled,
+ also supports to get current weight for server (default) and
+ set the weight for haproxy backend server when provides.
+ """
+ cmd = "get weight $pxname/$svname; enable server $pxname/$svname"
+ if self.agent:
+ cmd += "; enable agent $pxname/$svname"
+ if self.health:
+ cmd += "; enable health $pxname/$svname"
+ if weight:
+ cmd += "; set weight $pxname/$svname %s" % weight
+ self.execute_for_backends(cmd, backend, host, 'UP')
+
+ def disabled(self, host, backend, shutdown_sessions):
+ """
+ Disabled action, marks server to DOWN for maintenance. In this mode, no more checks will be
+ performed on the server until it leaves maintenance,
+ also it shutdown sessions while disabling backend host server.
+ """
+ cmd = "get weight $pxname/$svname"
+ if self.agent:
+ cmd += "; disable agent $pxname/$svname"
+ if self.health:
+ cmd += "; disable health $pxname/$svname"
+ cmd += "; disable server $pxname/$svname"
+ if shutdown_sessions:
+ cmd += "; shutdown sessions server $pxname/$svname"
+ self.execute_for_backends(cmd, backend, host, 'MAINT')
+
+ def drain(self, host, backend, status='DRAIN'):
+ """
+ Drain action, sets the server to DRAIN mode.
+ In this mode mode, the server will not accept any new connections
+ other than those that are accepted via persistence.
+ """
+ haproxy_version = self.discover_version()
+
+ # check if haproxy version suppots DRAIN state (starting with 1.5)
+ if haproxy_version and (1, 5) <= haproxy_version:
+ cmd = "set server $pxname/$svname state drain"
+ self.execute_for_backends(cmd, backend, host, status)
+
+ def act(self):
+ """
+ Figure out what you want to do from ansible, and then do it.
+ """
+ # Get the state before the run
+ self.command_results['state_before'] = self.get_state_for(self.backend, self.host)
+
+ # toggle enable/disbale server
+ if self.state == 'enabled':
+ self.enabled(self.host, self.backend, self.weight)
+ elif self.state == 'disabled' and self._drain:
+ self.drain(self.host, self.backend, status='MAINT')
+ elif self.state == 'disabled':
+ self.disabled(self.host, self.backend, self.shutdown_sessions)
+ elif self.state == 'drain':
+ self.drain(self.host, self.backend)
+ else:
+ self.module.fail_json(msg="unknown state specified: '%s'" % self.state)
+
+ # Get the state after the run
+ self.command_results['state_after'] = self.get_state_for(self.backend, self.host)
+
+ # Report change status
+ self.command_results['changed'] = (self.command_results['state_before'] != self.command_results['state_after'])
+
+ self.module.exit_json(**self.command_results)
+
+
+def main():
+
+ # load ansible module object
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', required=True, choices=ACTION_CHOICES),
+ host=dict(type='str', required=True),
+ backend=dict(type='str'),
+ weight=dict(type='str'),
+ socket=dict(type='path', default=DEFAULT_SOCKET_LOCATION),
+ shutdown_sessions=dict(type='bool', default=False),
+ fail_on_not_found=dict(type='bool', default=False),
+ health=dict(type='bool', default=False),
+ agent=dict(type='bool', default=False),
+ wait=dict(type='bool', default=False),
+ wait_retries=dict(type='int', default=WAIT_RETRIES),
+ wait_interval=dict(type='int', default=WAIT_INTERVAL),
+ drain=dict(type='bool', default=False),
+ ),
+ )
+
+ if not socket:
+ module.fail_json(msg="unable to locate haproxy socket")
+
+ ansible_haproxy = HAProxy(module)
+ ansible_haproxy.act()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/hetzner_failover_ip.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/hetzner_failover_ip.py
new file mode 100644
index 00000000..a57e0ab8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/hetzner_failover_ip.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019 Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: hetzner_failover_ip
+short_description: Manage Hetzner's failover IPs
+author:
+ - Felix Fontein (@felixfontein)
+description:
+ - Manage Hetzner's failover IPs.
+seealso:
+ - name: Failover IP documentation
+ description: Hetzner's documentation on failover IPs.
+ link: https://wiki.hetzner.de/index.php/Failover/en
+ - module: community.general.hetzner_failover_ip_info
+ description: Retrieve information on failover IPs.
+extends_documentation_fragment:
+- community.general.hetzner
+
+options:
+ failover_ip:
+ description: The failover IP address.
+ type: str
+ required: yes
+ state:
+ description:
+ - Defines whether the IP will be routed or not.
+ - If set to C(routed), I(value) must be specified.
+ type: str
+ choices:
+ - routed
+ - unrouted
+ default: routed
+ value:
+ description:
+ - The new value for the failover IP address.
+ - Required when setting I(state) to C(routed).
+ type: str
+ timeout:
+ description:
+ - Timeout to use when routing or unrouting the failover IP.
+ - Note that the API call returns when the failover IP has been
+ successfully routed to the new address, respectively successfully
+ unrouted.
+ type: int
+ default: 180
+'''
+
+EXAMPLES = r'''
+- name: Set value of failover IP 1.2.3.4 to 5.6.7.8
+ community.general.hetzner_failover_ip:
+ hetzner_user: foo
+ hetzner_password: bar
+ failover_ip: 1.2.3.4
+ value: 5.6.7.8
+
+- name: Set value of failover IP 1.2.3.4 to unrouted
+ community.general.hetzner_failover_ip:
+ hetzner_user: foo
+ hetzner_password: bar
+ failover_ip: 1.2.3.4
+ state: unrouted
+'''
+
+RETURN = r'''
+value:
+ description:
+ - The value of the failover IP.
+ - Will be C(none) if the IP is unrouted.
+ returned: success
+ type: str
+state:
+ description:
+ - Will be C(routed) or C(unrouted).
+ returned: success
+ type: str
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.hetzner import (
+ HETZNER_DEFAULT_ARGUMENT_SPEC,
+ get_failover,
+ set_failover,
+ get_failover_state,
+)
+
+
+def main():
+ argument_spec = dict(
+ failover_ip=dict(type='str', required=True),
+ state=dict(type='str', default='routed', choices=['routed', 'unrouted']),
+ value=dict(type='str'),
+ timeout=dict(type='int', default=180),
+ )
+ argument_spec.update(HETZNER_DEFAULT_ARGUMENT_SPEC)
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=(
+ ('state', 'routed', ['value']),
+ ),
+ )
+
+ failover_ip = module.params['failover_ip']
+ value = get_failover(module, failover_ip)
+ changed = False
+ before = get_failover_state(value)
+
+ if module.params['state'] == 'routed':
+ new_value = module.params['value']
+ else:
+ new_value = None
+
+ if value != new_value:
+ if module.check_mode:
+ value = new_value
+ changed = True
+ else:
+ value, changed = set_failover(module, failover_ip, new_value, timeout=module.params['timeout'])
+
+ after = get_failover_state(value)
+ module.exit_json(
+ changed=changed,
+ diff=dict(
+ before=before,
+ after=after,
+ ),
+ **after
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/hetzner_failover_ip_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/hetzner_failover_ip_info.py
new file mode 100644
index 00000000..4d6f9f37
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/hetzner_failover_ip_info.py
@@ -0,0 +1,117 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019 Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: hetzner_failover_ip_info
+short_description: Retrieve information on Hetzner's failover IPs
+author:
+ - Felix Fontein (@felixfontein)
+description:
+ - Retrieve information on Hetzner's failover IPs.
+seealso:
+ - name: Failover IP documentation
+ description: Hetzner's documentation on failover IPs.
+ link: https://wiki.hetzner.de/index.php/Failover/en
+ - module: community.general.hetzner_failover_ip
+ description: Manage failover IPs.
+extends_documentation_fragment:
+- community.general.hetzner
+
+options:
+ failover_ip:
+ description: The failover IP address.
+ type: str
+ required: yes
+'''
+
+EXAMPLES = r'''
+- name: Get value of failover IP 1.2.3.4
+ community.general.hetzner_failover_ip_info:
+ hetzner_user: foo
+ hetzner_password: bar
+ failover_ip: 1.2.3.4
+ value: 5.6.7.8
+ register: result
+
+- name: Print value of failover IP 1.2.3.4 in case it is routed
+ ansible.builtin.debug:
+ msg: "1.2.3.4 routes to {{ result.value }}"
+ when: result.state == 'routed'
+'''
+
+RETURN = r'''
+value:
+ description:
+ - The value of the failover IP.
+ - Will be C(none) if the IP is unrouted.
+ returned: success
+ type: str
+state:
+ description:
+ - Will be C(routed) or C(unrouted).
+ returned: success
+ type: str
+failover_ip:
+ description:
+ - The failover IP.
+ returned: success
+ type: str
+ sample: '1.2.3.4'
+failover_netmask:
+ description:
+ - The netmask for the failover IP.
+ returned: success
+ type: str
+ sample: '255.255.255.255'
+server_ip:
+ description:
+ - The main IP of the server this failover IP is associated to.
+ - This is I(not) the server the failover IP is routed to.
+ returned: success
+ type: str
+server_number:
+ description:
+ - The number of the server this failover IP is associated to.
+ - This is I(not) the server the failover IP is routed to.
+ returned: success
+ type: int
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.hetzner import (
+ HETZNER_DEFAULT_ARGUMENT_SPEC,
+ get_failover_record,
+ get_failover_state,
+)
+
+
+def main():
+ argument_spec = dict(
+ failover_ip=dict(type='str', required=True),
+ )
+ argument_spec.update(HETZNER_DEFAULT_ARGUMENT_SPEC)
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ failover = get_failover_record(module, module.params['failover_ip'])
+ result = get_failover_state(failover['active_server_ip'])
+ result['failover_ip'] = failover['ip']
+ result['failover_netmask'] = failover['netmask']
+ result['server_ip'] = failover['server_ip']
+ result['server_number'] = failover['server_number']
+ result['changed'] = False
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/hetzner_firewall.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/hetzner_firewall.py
new file mode 100644
index 00000000..ade9bd95
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/hetzner_firewall.py
@@ -0,0 +1,509 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019 Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: hetzner_firewall
+version_added: '0.2.0'
+short_description: Manage Hetzner's dedicated server firewall
+author:
+ - Felix Fontein (@felixfontein)
+description:
+ - Manage Hetzner's dedicated server firewall.
+ - Note that idempotency check for TCP flags simply compares strings and doesn't
+ try to interpret the rules. This might change in the future.
+seealso:
+ - name: Firewall documentation
+ description: Hetzner's documentation on the stateless firewall for dedicated servers
+ link: https://wiki.hetzner.de/index.php/Robot_Firewall/en
+ - module: community.general.hetzner_firewall_info
+ description: Retrieve information on firewall configuration.
+extends_documentation_fragment:
+- community.general.hetzner
+
+options:
+ server_ip:
+ description: The server's main IP address.
+ required: yes
+ type: str
+ port:
+ description:
+ - Switch port of firewall.
+ type: str
+ choices: [ main, kvm ]
+ default: main
+ state:
+ description:
+ - Status of the firewall.
+ - Firewall is active if state is C(present), and disabled if state is C(absent).
+ type: str
+ default: present
+ choices: [ present, absent ]
+ whitelist_hos:
+ description:
+ - Whether Hetzner services have access.
+ type: bool
+ rules:
+ description:
+ - Firewall rules.
+ type: dict
+ suboptions:
+ input:
+ description:
+ - Input firewall rules.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - Name of the firewall rule.
+ type: str
+ ip_version:
+ description:
+ - Internet protocol version.
+ - Note that currently, only IPv4 is supported by Hetzner.
+ required: yes
+ type: str
+ choices: [ ipv4, ipv6 ]
+ dst_ip:
+ description:
+ - Destination IP address or subnet address.
+ - CIDR notation.
+ type: str
+ dst_port:
+ description:
+ - Destination port or port range.
+ type: str
+ src_ip:
+ description:
+ - Source IP address or subnet address.
+ - CIDR notation.
+ type: str
+ src_port:
+ description:
+ - Source port or port range.
+ type: str
+ protocol:
+ description:
+ - Protocol above IP layer
+ type: str
+ tcp_flags:
+ description:
+ - TCP flags or logical combination of flags.
+ - Flags supported by Hetzner are C(syn), C(fin), C(rst), C(psh) and C(urg).
+ - They can be combined with C(|) (logical or) and C(&) (logical and).
+ - See L(the documentation,https://wiki.hetzner.de/index.php/Robot_Firewall/en#Parameter)
+ for more information.
+ type: str
+ action:
+ description:
+ - Action if rule matches.
+ required: yes
+ type: str
+ choices: [ accept, discard ]
+ update_timeout:
+ description:
+ - Timeout to use when configuring the firewall.
+ - Note that the API call returns before the firewall has been
+ successfully set up.
+ type: int
+ default: 30
+ wait_for_configured:
+ description:
+ - Whether to wait until the firewall has been successfully configured before
+ determining what to do, and before returning from the module.
+ - The API returns status C(in progress) when the firewall is currently
+ being configured. If this happens, the module will try again until
+ the status changes to C(active) or C(disabled).
+ - Please note that there is a request limit. If you have to do multiple
+ updates, it can be better to disable waiting, and regularly use
+ M(community.general.hetzner_firewall_info) to query status.
+ type: bool
+ default: yes
+ wait_delay:
+ description:
+ - Delay to wait (in seconds) before checking again whether the firewall has
+ been configured.
+ type: int
+ default: 10
+ timeout:
+ description:
+ - Timeout (in seconds) for waiting for firewall to be configured.
+ type: int
+ default: 180
+'''
+
+EXAMPLES = r'''
+- name: Configure firewall for server with main IP 1.2.3.4
+ community.general.hetzner_firewall:
+ hetzner_user: foo
+ hetzner_password: bar
+ server_ip: 1.2.3.4
+ state: present
+ whitelist_hos: yes
+ rules:
+ input:
+ - name: Allow everything to ports 20-23 from 4.3.2.1/24
+ ip_version: ipv4
+ src_ip: 4.3.2.1/24
+ dst_port: '20-23'
+ action: accept
+ - name: Allow everything to port 443
+ ip_version: ipv4
+ dst_port: '443'
+ action: accept
+ - name: Drop everything else
+ ip_version: ipv4
+ action: discard
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result }}"
+'''
+
+RETURN = r'''
+firewall:
+ description:
+ - The firewall configuration.
+ type: dict
+ returned: success
+ contains:
+ port:
+ description:
+ - Switch port of firewall.
+ - C(main) or C(kvm).
+ type: str
+ sample: main
+ server_ip:
+ description:
+ - Server's main IP address.
+ type: str
+ sample: 1.2.3.4
+ server_number:
+ description:
+ - Hetzner's internal server number.
+ type: int
+ sample: 12345
+ status:
+ description:
+ - Status of the firewall.
+ - C(active) or C(disabled).
+ - Will be C(in process) if the firewall is currently updated, and
+ I(wait_for_configured) is set to C(no) or I(timeout) to a too small value.
+ type: str
+ sample: active
+ whitelist_hos:
+ description:
+ - Whether Hetzner services have access.
+ type: bool
+ sample: true
+ rules:
+ description:
+ - Firewall rules.
+ type: dict
+ contains:
+ input:
+ description:
+ - Input firewall rules.
+ type: list
+ elements: dict
+ contains:
+ name:
+ description:
+ - Name of the firewall rule.
+ type: str
+ sample: Allow HTTP access to server
+ ip_version:
+ description:
+ - Internet protocol version.
+ type: str
+ sample: ipv4
+ dst_ip:
+ description:
+ - Destination IP address or subnet address.
+ - CIDR notation.
+ type: str
+ sample: 1.2.3.4/32
+ dst_port:
+ description:
+ - Destination port or port range.
+ type: str
+ sample: "443"
+ src_ip:
+ description:
+ - Source IP address or subnet address.
+ - CIDR notation.
+ type: str
+ sample: null
+ src_port:
+ description:
+ - Source port or port range.
+ type: str
+ sample: null
+ protocol:
+ description:
+ - Protocol above IP layer
+ type: str
+ sample: tcp
+ tcp_flags:
+ description:
+ - TCP flags or logical combination of flags.
+ type: str
+ sample: null
+ action:
+ description:
+ - Action if rule matches.
+ - C(accept) or C(discard).
+ type: str
+ sample: accept
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.compat import ipaddress as compat_ipaddress
+from ansible_collections.community.general.plugins.module_utils.hetzner import (
+ HETZNER_DEFAULT_ARGUMENT_SPEC,
+ BASE_URL,
+ fetch_url_json,
+ fetch_url_json_with_retries,
+ CheckDoneTimeoutException,
+)
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils._text import to_native, to_text
+
+
+RULE_OPTION_NAMES = [
+ 'name', 'ip_version', 'dst_ip', 'dst_port', 'src_ip', 'src_port',
+ 'protocol', 'tcp_flags', 'action',
+]
+
+RULES = ['input']
+
+
+def restrict_dict(dictionary, fields):
+ result = dict()
+ for k, v in dictionary.items():
+ if k in fields:
+ result[k] = v
+ return result
+
+
+def restrict_firewall_config(config):
+ result = restrict_dict(config, ['port', 'status', 'whitelist_hos'])
+ result['rules'] = dict()
+ for ruleset in RULES:
+ result['rules'][ruleset] = [
+ restrict_dict(rule, RULE_OPTION_NAMES)
+ for rule in config['rules'].get(ruleset) or []
+ ]
+ return result
+
+
+def update(before, after, params, name):
+ bv = before.get(name)
+ after[name] = bv
+ changed = False
+ pv = params[name]
+ if pv is not None:
+ changed = pv != bv
+ if changed:
+ after[name] = pv
+ return changed
+
+
+def normalize_ip(ip, ip_version):
+ if ip is None:
+ return ip
+ if '/' in ip:
+ ip, range = ip.split('/')
+ else:
+ ip, range = ip, ''
+ ip_addr = to_native(compat_ipaddress.ip_address(to_text(ip)).compressed)
+ if range == '':
+ range = '32' if ip_version.lower() == 'ipv4' else '128'
+ return ip_addr + '/' + range
+
+
+def update_rules(before, after, params, ruleset):
+ before_rules = before['rules'][ruleset]
+ after_rules = after['rules'][ruleset]
+ params_rules = params['rules'][ruleset]
+ changed = len(before_rules) != len(params_rules)
+ for no, rule in enumerate(params_rules):
+ rule['src_ip'] = normalize_ip(rule['src_ip'], rule['ip_version'])
+ rule['dst_ip'] = normalize_ip(rule['dst_ip'], rule['ip_version'])
+ if no < len(before_rules):
+ before_rule = before_rules[no]
+ before_rule['src_ip'] = normalize_ip(before_rule['src_ip'], before_rule['ip_version'])
+ before_rule['dst_ip'] = normalize_ip(before_rule['dst_ip'], before_rule['ip_version'])
+ if before_rule != rule:
+ changed = True
+ after_rules.append(rule)
+ return changed
+
+
+def encode_rule(output, rulename, input):
+ for i, rule in enumerate(input['rules'][rulename]):
+ for k, v in rule.items():
+ if v is not None:
+ output['rules[{0}][{1}][{2}]'.format(rulename, i, k)] = v
+
+
+def create_default_rules_object():
+ rules = dict()
+ for ruleset in RULES:
+ rules[ruleset] = []
+ return rules
+
+
+def firewall_configured(result, error):
+ return result['firewall']['status'] != 'in process'
+
+
+def main():
+ argument_spec = dict(
+ server_ip=dict(type='str', required=True),
+ port=dict(type='str', default='main', choices=['main', 'kvm']),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ whitelist_hos=dict(type='bool'),
+ rules=dict(type='dict', options=dict(
+ input=dict(type='list', elements='dict', options=dict(
+ name=dict(type='str'),
+ ip_version=dict(type='str', required=True, choices=['ipv4', 'ipv6']),
+ dst_ip=dict(type='str'),
+ dst_port=dict(type='str'),
+ src_ip=dict(type='str'),
+ src_port=dict(type='str'),
+ protocol=dict(type='str'),
+ tcp_flags=dict(type='str'),
+ action=dict(type='str', required=True, choices=['accept', 'discard']),
+ )),
+ )),
+ update_timeout=dict(type='int', default=30),
+ wait_for_configured=dict(type='bool', default=True),
+ wait_delay=dict(type='int', default=10),
+ timeout=dict(type='int', default=180),
+ )
+ argument_spec.update(HETZNER_DEFAULT_ARGUMENT_SPEC)
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ # Sanitize input
+ module.params['status'] = 'active' if (module.params['state'] == 'present') else 'disabled'
+ if module.params['rules'] is None:
+ module.params['rules'] = {}
+ if module.params['rules'].get('input') is None:
+ module.params['rules']['input'] = []
+
+ server_ip = module.params['server_ip']
+
+ # https://robot.your-server.de/doc/webservice/en.html#get-firewall-server-ip
+ url = "{0}/firewall/{1}".format(BASE_URL, server_ip)
+ if module.params['wait_for_configured']:
+ try:
+ result, error = fetch_url_json_with_retries(
+ module,
+ url,
+ check_done_callback=firewall_configured,
+ check_done_delay=module.params['wait_delay'],
+ check_done_timeout=module.params['timeout'],
+ )
+ except CheckDoneTimeoutException as dummy:
+ module.fail_json(msg='Timeout while waiting for firewall to be configured.')
+ else:
+ result, error = fetch_url_json(module, url)
+ if not firewall_configured(result, error):
+ module.fail_json(msg='Firewall configuration cannot be read as it is not configured.')
+
+ full_before = result['firewall']
+ if not full_before.get('rules'):
+ full_before['rules'] = create_default_rules_object()
+ before = restrict_firewall_config(full_before)
+
+ # Build wanted (after) state and compare
+ after = dict(before)
+ changed = False
+ changed |= update(before, after, module.params, 'port')
+ changed |= update(before, after, module.params, 'status')
+ changed |= update(before, after, module.params, 'whitelist_hos')
+ after['rules'] = create_default_rules_object()
+ if module.params['status'] == 'active':
+ for ruleset in RULES:
+ changed |= update_rules(before, after, module.params, ruleset)
+
+ # Update if different
+ construct_result = True
+ construct_status = None
+ if changed and not module.check_mode:
+ # https://robot.your-server.de/doc/webservice/en.html#post-firewall-server-ip
+ url = "{0}/firewall/{1}".format(BASE_URL, server_ip)
+ headers = {"Content-type": "application/x-www-form-urlencoded"}
+ data = dict(after)
+ data['whitelist_hos'] = str(data['whitelist_hos']).lower()
+ del data['rules']
+ for ruleset in RULES:
+ encode_rule(data, ruleset, after)
+ result, error = fetch_url_json(
+ module,
+ url,
+ method='POST',
+ timeout=module.params['update_timeout'],
+ data=urlencode(data),
+ headers=headers,
+ )
+ if module.params['wait_for_configured'] and not firewall_configured(result, error):
+ try:
+ result, error = fetch_url_json_with_retries(
+ module,
+ url,
+ check_done_callback=firewall_configured,
+ check_done_delay=module.params['wait_delay'],
+ check_done_timeout=module.params['timeout'],
+ skip_first=True,
+ )
+ except CheckDoneTimeoutException as e:
+ result, error = e.result, e.error
+ module.warn('Timeout while waiting for firewall to be configured.')
+
+ full_after = result['firewall']
+ if not full_after.get('rules'):
+ full_after['rules'] = create_default_rules_object()
+ construct_status = full_after['status']
+ if construct_status != 'in process':
+ # Only use result if configuration is done, so that diff will be ok
+ after = restrict_firewall_config(full_after)
+ construct_result = False
+
+ if construct_result:
+ # Construct result (used for check mode, and configuration still in process)
+ full_after = dict(full_before)
+ for k, v in after.items():
+ if k != 'rules':
+ full_after[k] = after[k]
+ if construct_status is not None:
+ # We want 'in process' here
+ full_after['status'] = construct_status
+ full_after['rules'] = dict()
+ for ruleset in RULES:
+ full_after['rules'][ruleset] = after['rules'][ruleset]
+
+ module.exit_json(
+ changed=changed,
+ diff=dict(
+ before=before,
+ after=after,
+ ),
+ firewall=full_after,
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/hetzner_firewall_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/hetzner_firewall_info.py
new file mode 100644
index 00000000..fde06a5a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/hetzner_firewall_info.py
@@ -0,0 +1,226 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2019 Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: hetzner_firewall_info
+version_added: '0.2.0'
+short_description: Manage Hetzner's dedicated server firewall
+author:
+ - Felix Fontein (@felixfontein)
+description:
+ - Manage Hetzner's dedicated server firewall.
+seealso:
+ - name: Firewall documentation
+ description: Hetzner's documentation on the stateless firewall for dedicated servers
+ link: https://wiki.hetzner.de/index.php/Robot_Firewall/en
+ - module: community.general.hetzner_firewall
+ description: Configure firewall.
+extends_documentation_fragment:
+- community.general.hetzner
+
+options:
+ server_ip:
+ description: The server's main IP address.
+ type: str
+ required: yes
+ wait_for_configured:
+ description:
+ - Whether to wait until the firewall has been successfully configured before
+ determining what to do, and before returning from the module.
+ - The API returns status C(in progress) when the firewall is currently
+ being configured. If this happens, the module will try again until
+ the status changes to C(active) or C(disabled).
+ - Please note that there is a request limit. If you have to do multiple
+ updates, it can be better to disable waiting, and regularly use
+ M(community.general.hetzner_firewall_info) to query status.
+ type: bool
+ default: yes
+ wait_delay:
+ description:
+ - Delay to wait (in seconds) before checking again whether the firewall has
+ been configured.
+ type: int
+ default: 10
+ timeout:
+ description:
+ - Timeout (in seconds) for waiting for firewall to be configured.
+ type: int
+ default: 180
+'''
+
+EXAMPLES = r'''
+- name: Get firewall configuration for server with main IP 1.2.3.4
+ community.general.hetzner_firewall_info:
+ hetzner_user: foo
+ hetzner_password: bar
+ server_ip: 1.2.3.4
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.firewall }}"
+'''
+
+RETURN = r'''
+firewall:
+ description:
+ - The firewall configuration.
+ type: dict
+ returned: success
+ contains:
+ port:
+ description:
+ - Switch port of firewall.
+ - C(main) or C(kvm).
+ type: str
+ sample: main
+ server_ip:
+ description:
+ - Server's main IP address.
+ type: str
+ sample: 1.2.3.4
+ server_number:
+ description:
+ - Hetzner's internal server number.
+ type: int
+ sample: 12345
+ status:
+ description:
+ - Status of the firewall.
+ - C(active) or C(disabled).
+ - Will be C(in process) if the firewall is currently updated, and
+ I(wait_for_configured) is set to C(no) or I(timeout) to a too small value.
+ type: str
+ sample: active
+ whitelist_hos:
+ description:
+ - Whether Hetzner services have access.
+ type: bool
+ sample: true
+ rules:
+ description:
+ - Firewall rules.
+ type: dict
+ contains:
+ input:
+ description:
+ - Input firewall rules.
+ type: list
+ elements: dict
+ contains:
+ name:
+ description:
+ - Name of the firewall rule.
+ type: str
+ sample: Allow HTTP access to server
+ ip_version:
+ description:
+ - Internet protocol version.
+ type: str
+ sample: ipv4
+ dst_ip:
+ description:
+ - Destination IP address or subnet address.
+ - CIDR notation.
+ type: str
+ sample: 1.2.3.4/32
+ dst_port:
+ description:
+ - Destination port or port range.
+ type: str
+ sample: "443"
+ src_ip:
+ description:
+ - Source IP address or subnet address.
+ - CIDR notation.
+ type: str
+ sample: null
+ src_port:
+ description:
+ - Source port or port range.
+ type: str
+ sample: null
+ protocol:
+ description:
+ - Protocol above IP layer
+ type: str
+ sample: tcp
+ tcp_flags:
+ description:
+ - TCP flags or logical combination of flags.
+ type: str
+ sample: null
+ action:
+ description:
+ - Action if rule matches.
+ - C(accept) or C(discard).
+ type: str
+ sample: accept
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.hetzner import (
+ HETZNER_DEFAULT_ARGUMENT_SPEC,
+ BASE_URL,
+ fetch_url_json,
+ fetch_url_json_with_retries,
+ CheckDoneTimeoutException,
+)
+
+
+def firewall_configured(result, error):
+ return result['firewall']['status'] != 'in process'
+
+
+def main():
+ argument_spec = dict(
+ server_ip=dict(type='str', required=True),
+ wait_for_configured=dict(type='bool', default=True),
+ wait_delay=dict(type='int', default=10),
+ timeout=dict(type='int', default=180),
+ )
+ argument_spec.update(HETZNER_DEFAULT_ARGUMENT_SPEC)
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ server_ip = module.params['server_ip']
+
+ # https://robot.your-server.de/doc/webservice/en.html#get-firewall-server-ip
+ url = "{0}/firewall/{1}".format(BASE_URL, server_ip)
+ if module.params['wait_for_configured']:
+ try:
+ result, error = fetch_url_json_with_retries(
+ module,
+ url,
+ check_done_callback=firewall_configured,
+ check_done_delay=module.params['wait_delay'],
+ check_done_timeout=module.params['timeout'],
+ )
+ except CheckDoneTimeoutException as dummy:
+ module.fail_json(msg='Timeout while waiting for firewall to be configured.')
+ else:
+ result, error = fetch_url_json(module, url)
+
+ firewall = result['firewall']
+ if not firewall.get('rules'):
+ firewall['rules'] = dict()
+ for ruleset in ['input']:
+ firewall['rules'][ruleset] = []
+
+ module.exit_json(
+ changed=False,
+ firewall=firewall,
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/infinity/infinity.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/infinity/infinity.py
new file mode 100644
index 00000000..ab41f680
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/infinity/infinity.py
@@ -0,0 +1,565 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, <meiliu@fusionlayer.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+module: infinity
+short_description: Manage Infinity IPAM using Rest API
+description:
+ - Manage Infinity IPAM using REST API.
+author:
+ - Meirong Liu (@MeganLiu)
+options:
+ server_ip:
+ description:
+ - Infinity server_ip with IP address.
+ type: str
+ required: true
+ username:
+ description:
+ - Username to access Infinity.
+ - The user must have REST API privileges.
+ type: str
+ required: true
+ password:
+ description:
+ - Infinity password.
+ type: str
+ required: true
+ action:
+ description:
+ - Action to perform
+ type: str
+ required: true
+ choices: [add_network, delete_network, get_network, get_network_id, release_ip, release_network, reserve_network, reserve_next_available_ip ]
+ network_id:
+ description:
+ - Network ID.
+ type: str
+ default: ''
+ ip_address:
+ description:
+ - IP Address for a reservation or a release.
+ type: str
+ default: ''
+ network_address:
+ description:
+ - Network address with CIDR format (e.g., 192.168.310.0).
+ type: str
+ default: ''
+ network_size:
+ description:
+ - Network bitmask (e.g. 255.255.255.220) or CIDR format (e.g., /26).
+ type: str
+ default: ''
+ network_name:
+ description:
+ - The name of a network.
+ type: str
+ default: ''
+ network_location:
+ description:
+ - The parent network id for a given network.
+ type: int
+ default: -1
+ network_type:
+ description:
+ - Network type defined by Infinity
+ type: str
+ choices: [ lan, shared_lan, supernet ]
+ default: lan
+ network_family:
+ description:
+ - Network family defined by Infinity, e.g. IPv4, IPv6 and Dual stack
+ type: str
+ choices: [ 4, 6, dual ]
+ default: 4
+'''
+
+EXAMPLES = r'''
+---
+- hosts: localhost
+ connection: local
+ strategy: debug
+ tasks:
+ - name: Reserve network into Infinity IPAM
+ community.general.infinity:
+ server_ip: 80.75.107.12
+ username: username
+ password: password
+ action: reserve_network
+ network_name: reserve_new_ansible_network
+ network_family: 4
+ network_type: lan
+ network_id: 1201
+ network_size: /28
+ register: infinity
+'''
+
+RETURN = r'''
+network_id:
+ description: id for a given network
+ returned: success
+ type: str
+ sample: '1501'
+ip_info:
+ description: when reserve next available ip address from a network, the ip address info ) is returned.
+ returned: success
+ type: str
+ sample: '{"address": "192.168.10.3", "hostname": "", "FQDN": "", "domainname": "", "id": 3229}'
+network_info:
+ description: when reserving a LAN network from a Infinity supernet by providing network_size, the information about the reserved network is returned.
+ returned: success
+ type: str
+ sample: {"network_address": "192.168.10.32/28","network_family": "4", "network_id": 3102,
+ "network_size": null,"description": null,"network_location": "3085",
+ "ranges": { "id": 0, "name": null,"first_ip": null,"type": null,"last_ip": null},
+ "network_type": "lan","network_name": "'reserve_new_ansible_network'"}
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule, json
+from ansible.module_utils.urls import open_url
+
+
+class Infinity(object):
+ """
+ Class for manage REST API calls with the Infinity.
+ """
+
+ def __init__(self, module, server_ip, username, password):
+ self.module = module
+ self.auth_user = username
+ self.auth_pass = password
+ self.base_url = "https://%s/rest/v1/" % (str(server_ip))
+
+ def _get_api_call_ansible_handler(
+ self,
+ method='get',
+ resource_url='',
+ stat_codes=None,
+ params=None,
+ payload_data=None):
+ """
+ Perform the HTTPS request by using ansible get/delete method
+ """
+ stat_codes = [200] if stat_codes is None else stat_codes
+ request_url = str(self.base_url) + str(resource_url)
+ response = None
+ headers = {'Content-Type': 'application/json'}
+ if not request_url:
+ self.module.exit_json(
+ msg="When sending Rest api call , the resource URL is empty, please check.")
+ if payload_data and not isinstance(payload_data, str):
+ payload_data = json.dumps(payload_data)
+ response_raw = open_url(
+ str(request_url),
+ method=method,
+ timeout=20,
+ headers=headers,
+ url_username=self.auth_user,
+ url_password=self.auth_pass,
+ validate_certs=False,
+ force_basic_auth=True,
+ data=payload_data)
+
+ response = response_raw.read()
+ payload = ''
+ if response_raw.code not in stat_codes:
+ self.module.exit_json(
+ changed=False,
+ meta=" openurl response_raw.code show error and error code is %r" %
+ (response_raw.code))
+ else:
+ if isinstance(response, str) and len(response) > 0:
+ payload = response
+ elif method.lower() == 'delete' and response_raw.code == 204:
+ payload = 'Delete is done.'
+ if isinstance(payload, dict) and "text" in payload:
+ self.module.exit_json(
+ changed=False,
+ meta="when calling rest api, returned data is not json ")
+ raise Exception(payload["text"])
+ return payload
+
+ # ---------------------------------------------------------------------------
+ # get_network()
+ # ---------------------------------------------------------------------------
+ def get_network(self, network_id, network_name, limit=-1):
+ """
+ Search network_name inside Infinity by using rest api
+ Network id or network_name needs to be provided
+ return the details of a given with given network_id or name
+ """
+ if network_name is None and network_id is None:
+ self.module.exit_json(
+ msg="You must specify one of the options 'network_name' or 'network_id'.")
+ method = "get"
+ resource_url = ''
+ params = {}
+ response = None
+ if network_id:
+ resource_url = "networks/" + str(network_id)
+ response = self._get_api_call_ansible_handler(method, resource_url)
+ if network_id is None and network_name:
+ method = "get"
+ resource_url = "search"
+ params = {"query": json.dumps(
+ {"name": network_name, "type": "network"})}
+ response = self._get_api_call_ansible_handler(
+ method, resource_url, payload_data=json.dumps(params))
+ if response and isinstance(response, str):
+ response = json.loads(response)
+ if response and isinstance(response, list) and len(
+ response) > 1 and limit == 1:
+ response = response[0]
+ response = json.dumps(response)
+ return response
+
+ # ---------------------------------------------------------------------------
+ # get_network_id()
+ # ---------------------------------------------------------------------------
+ def get_network_id(self, network_name="", network_type='lan'):
+ """
+ query network_id from Infinity via rest api based on given network_name
+ """
+ method = 'get'
+ resource_url = 'search'
+ response = None
+ if network_name is None:
+ self.module.exit_json(
+ msg="You must specify the option 'network_name'")
+ params = {"query": json.dumps(
+ {"name": network_name, "type": "network"})}
+ response = self._get_api_call_ansible_handler(
+ method, resource_url, payload_data=json.dumps(params))
+ network_id = ""
+ if response and isinstance(response, str):
+ response = json.loads(response)
+ if response and isinstance(response, list):
+ response = response[0]
+ network_id = response['id']
+ return network_id
+
+ # ---------------------------------------------------------------------------
+ # reserve_next_available_ip()
+ # ---------------------------------------------------------------------------
+ def reserve_next_available_ip(self, network_id=""):
+ """
+ Reserve ip address via Infinity by using rest api
+ network_id: the id of the network that users would like to reserve network from
+ return the next available ip address from that given network
+ """
+ method = "post"
+ resource_url = ''
+ response = None
+ ip_info = ''
+ if not network_id:
+ self.module.exit_json(
+ msg="You must specify the option 'network_id'.")
+ if network_id:
+ resource_url = "networks/" + str(network_id) + "/reserve_ip"
+ response = self._get_api_call_ansible_handler(method, resource_url)
+ if response and response.find(
+ "[") >= 0 and response.find("]") >= 0:
+ start_pos = response.find("{")
+ end_pos = response.find("}")
+ ip_info = response[start_pos: (end_pos + 1)]
+ return ip_info
+
+ # -------------------------
+ # release_ip()
+ # -------------------------
+ def release_ip(self, network_id="", ip_address=""):
+ """
+ Reserve ip address via Infinity by using rest api
+ """
+ method = "get"
+ resource_url = ''
+ response = None
+ if ip_address is None or network_id is None:
+ self.module.exit_json(
+ msg="You must specify those two options: 'network_id' and 'ip_address'.")
+
+ resource_url = "networks/" + str(network_id) + "/children"
+ response = self._get_api_call_ansible_handler(method, resource_url)
+ if not response:
+ self.module.exit_json(
+ msg="There is an error in release ip %s from network %s." %
+ (ip_address, network_id))
+
+ ip_list = json.loads(response)
+ ip_idlist = []
+ for ip_item in ip_list:
+ ip_id = ip_item['id']
+ ip_idlist.append(ip_id)
+ deleted_ip_id = ''
+ for ip_id in ip_idlist:
+ ip_response = ''
+ resource_url = "ip_addresses/" + str(ip_id)
+ ip_response = self._get_api_call_ansible_handler(
+ method,
+ resource_url,
+ stat_codes=[200])
+ if ip_response and json.loads(
+ ip_response)['address'] == str(ip_address):
+ deleted_ip_id = ip_id
+ break
+ if deleted_ip_id:
+ method = 'delete'
+ resource_url = "ip_addresses/" + str(deleted_ip_id)
+ response = self._get_api_call_ansible_handler(
+ method, resource_url, stat_codes=[204])
+ else:
+ self.module.exit_json(
+ msg=" When release ip, could not find the ip address %r from the given network %r' ." %
+ (ip_address, network_id))
+
+ return response
+
+ # -------------------
+ # delete_network()
+ # -------------------
+ def delete_network(self, network_id="", network_name=""):
+ """
+ delete network from Infinity by using rest api
+ """
+ method = 'delete'
+ resource_url = ''
+ response = None
+ if network_id is None and network_name is None:
+ self.module.exit_json(
+ msg="You must specify one of those options: 'network_id','network_name' .")
+ if network_id is None and network_name:
+ network_id = self.get_network_id(network_name=network_name)
+ if network_id:
+ resource_url = "networks/" + str(network_id)
+ response = self._get_api_call_ansible_handler(
+ method, resource_url, stat_codes=[204])
+ return response
+
+ # reserve_network()
+ # ---------------------------------------------------------------------------
+ def reserve_network(self, network_id="",
+ reserved_network_name="", reserved_network_description="",
+ reserved_network_size="", reserved_network_family='4',
+ reserved_network_type='lan', reserved_network_address="",):
+ """
+ Reserves the first available network of specified size from a given supernet
+ <dt>network_name (required)</dt><dd>Name of the network</dd>
+ <dt>description (optional)</dt><dd>Free description</dd>
+ <dt>network_family (required)</dt><dd>Address family of the network. One of '4', '6', 'IPv4', 'IPv6', 'dual'</dd>
+ <dt>network_address (optional)</dt><dd>Address of the new network. If not given, the first network available will be created.</dd>
+ <dt>network_size (required)</dt><dd>Size of the new network in /&lt;prefix&gt; notation.</dd>
+ <dt>network_type (required)</dt><dd>Type of network. One of 'supernet', 'lan', 'shared_lan'</dd>
+
+ """
+ method = 'post'
+ resource_url = ''
+ network_info = None
+ if network_id is None or reserved_network_name is None or reserved_network_size is None:
+ self.module.exit_json(
+ msg="You must specify those options: 'network_id', 'reserved_network_name' and 'reserved_network_size'")
+ if network_id:
+ resource_url = "networks/" + str(network_id) + "/reserve_network"
+ if not reserved_network_family:
+ reserved_network_family = '4'
+ if not reserved_network_type:
+ reserved_network_type = 'lan'
+ payload_data = {
+ "network_name": reserved_network_name,
+ 'description': reserved_network_description,
+ 'network_size': reserved_network_size,
+ 'network_family': reserved_network_family,
+ 'network_type': reserved_network_type,
+ 'network_location': int(network_id)}
+ if reserved_network_address:
+ payload_data.update({'network_address': reserved_network_address})
+
+ network_info = self._get_api_call_ansible_handler(
+ method, resource_url, stat_codes=[200, 201], payload_data=payload_data)
+
+ return network_info
+
+ # ---------------------------------------------------------------------------
+ # release_network()
+ # ---------------------------------------------------------------------------
+ def release_network(
+ self,
+ network_id="",
+ released_network_name="",
+ released_network_type='lan'):
+ """
+ Release the network with name 'released_network_name' from the given supernet network_id
+ """
+ method = 'get'
+ response = None
+ if network_id is None or released_network_name is None:
+ self.module.exit_json(
+ msg="You must specify those options 'network_id', 'reserved_network_name' and 'reserved_network_size'")
+ matched_network_id = ""
+ resource_url = "networks/" + str(network_id) + "/children"
+ response = self._get_api_call_ansible_handler(method, resource_url)
+ if not response:
+ self.module.exit_json(
+ msg=" there is an error in releasing network %r from network %s." %
+ (network_id, released_network_name))
+ if response:
+ response = json.loads(response)
+ for child_net in response:
+ if child_net['network'] and child_net['network']['network_name'] == released_network_name:
+ matched_network_id = child_net['network']['network_id']
+ break
+ response = None
+ if matched_network_id:
+ method = 'delete'
+ resource_url = "networks/" + str(matched_network_id)
+ response = self._get_api_call_ansible_handler(
+ method, resource_url, stat_codes=[204])
+ else:
+ self.module.exit_json(
+ msg=" When release network , could not find the network %r from the given superent %r' " %
+ (released_network_name, network_id))
+
+ return response
+
+ # ---------------------------------------------------------------------------
+ # add_network()
+ # ---------------------------------------------------------------------------
+ def add_network(
+ self, network_name="", network_address="",
+ network_size="", network_family='4',
+ network_type='lan', network_location=-1):
+ """
+ add a new LAN network into a given supernet Fusionlayer Infinity via rest api or default supernet
+ required fields=['network_name', 'network_family', 'network_type', 'network_address','network_size' ]
+ """
+ method = 'post'
+ resource_url = 'networks'
+ response = None
+ if network_name is None or network_address is None or network_size is None:
+ self.module.exit_json(
+ msg="You must specify those options 'network_name', 'network_address' and 'network_size'")
+
+ if not network_family:
+ network_family = '4'
+ if not network_type:
+ network_type = 'lan'
+ if not network_location:
+ network_location = -1
+ payload_data = {
+ "network_name": network_name,
+ 'network_address': network_address,
+ 'network_size': network_size,
+ 'network_family': network_family,
+ 'network_type': network_type,
+ 'network_location': network_location}
+ response = self._get_api_call_ansible_handler(
+ method='post', resource_url=resource_url,
+ stat_codes=[200], payload_data=payload_data)
+ return response
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ server_ip=dict(type='str', required=True),
+ username=dict(type='str', required=True),
+ password=dict(type='str', required=True, no_log=True),
+ network_id=dict(type='str'),
+ ip_address=dict(type='str'),
+ network_name=dict(type='str'),
+ network_location=dict(type='int', default=-1),
+ network_family=dict(type='str', default='4', choices=['4', '6', 'dual']),
+ network_type=dict(type='str', default='lan', choices=['lan', 'shared_lan', 'supernet']),
+ network_address=dict(type='str'),
+ network_size=dict(type='str'),
+ action=dict(type='str', required=True, choices=[
+ 'add_network',
+ 'delete_network',
+ 'get_network',
+ 'get_network_id',
+ 'release_ip',
+ 'release_network',
+ 'reserve_network',
+ 'reserve_next_available_ip',
+ ],),
+ ),
+ required_together=(
+ ['username', 'password'],
+ ),
+ )
+ server_ip = module.params["server_ip"]
+ username = module.params["username"]
+ password = module.params["password"]
+ action = module.params["action"]
+ network_id = module.params["network_id"]
+ released_ip = module.params["ip_address"]
+ network_name = module.params["network_name"]
+ network_family = module.params["network_family"]
+ network_type = module.params["network_type"]
+ network_address = module.params["network_address"]
+ network_size = module.params["network_size"]
+ network_location = module.params["network_location"]
+ my_infinity = Infinity(module, server_ip, username, password)
+ result = ''
+ if action == "reserve_next_available_ip":
+ if network_id:
+ result = my_infinity.reserve_next_available_ip(network_id)
+ if not result:
+ result = 'There is an error in calling method of reserve_next_available_ip'
+ module.exit_json(changed=False, meta=result)
+ module.exit_json(changed=True, meta=result)
+ elif action == "release_ip":
+ if network_id and released_ip:
+ result = my_infinity.release_ip(
+ network_id=network_id, ip_address=released_ip)
+ module.exit_json(changed=True, meta=result)
+ elif action == "delete_network":
+ result = my_infinity.delete_network(
+ network_id=network_id, network_name=network_name)
+ module.exit_json(changed=True, meta=result)
+
+ elif action == "get_network_id":
+ result = my_infinity.get_network_id(
+ network_name=network_name, network_type=network_type)
+ module.exit_json(changed=True, meta=result)
+ elif action == "get_network":
+ result = my_infinity.get_network(
+ network_id=network_id, network_name=network_name)
+ module.exit_json(changed=True, meta=result)
+ elif action == "reserve_network":
+ result = my_infinity.reserve_network(
+ network_id=network_id,
+ reserved_network_name=network_name,
+ reserved_network_size=network_size,
+ reserved_network_family=network_family,
+ reserved_network_type=network_type,
+ reserved_network_address=network_address)
+ module.exit_json(changed=True, meta=result)
+ elif action == "release_network":
+ result = my_infinity.release_network(
+ network_id=network_id,
+ released_network_name=network_name,
+ released_network_type=network_type)
+ module.exit_json(changed=True, meta=result)
+
+ elif action == "add_network":
+ result = my_infinity.add_network(
+ network_name=network_name,
+ network_location=network_location,
+ network_address=network_address,
+ network_size=network_size,
+ network_family=network_family,
+ network_type=network_type)
+
+ module.exit_json(changed=True, meta=result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ip_netns.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ip_netns.py
new file mode 100644
index 00000000..50aec392
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ip_netns.py
@@ -0,0 +1,144 @@
+#!/usr/bin/python
+# (c) 2017, Arie Bregman <abregman@redhat.com>
+#
+# This file is a module for Ansible that interacts with Network Manager
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ip_netns
+author: "Arie Bregman (@bregman-arie)"
+short_description: Manage network namespaces
+requirements: [ ip ]
+description:
+ - Create or delete network namespaces using the ip command.
+options:
+ name:
+ required: false
+ description:
+ - Name of the namespace
+ type: str
+ state:
+ required: false
+ default: "present"
+ choices: [ present, absent ]
+ description:
+ - Whether the namespace should exist
+ type: str
+'''
+
+EXAMPLES = '''
+- name: Create a namespace named mario
+ community.general.ip_netns:
+ name: mario
+ state: present
+
+- name: Delete a namespace named luigi
+ community.general.ip_netns:
+ name: luigi
+ state: absent
+'''
+
+RETURN = '''
+# Default return values
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_text
+
+
+class Namespace(object):
+ """Interface to network namespaces. """
+
+ def __init__(self, module):
+ self.module = module
+ self.name = module.params['name']
+ self.state = module.params['state']
+
+ def _netns(self, command):
+ '''Run ip nents command'''
+ return self.module.run_command(['ip', 'netns'] + command)
+
+ def exists(self):
+ '''Check if the namespace already exists'''
+ rc, out, err = self.module.run_command('ip netns list')
+ if rc != 0:
+ self.module.fail_json(msg=to_text(err))
+ return self.name in out
+
+ def add(self):
+ '''Create network namespace'''
+ rtc, out, err = self._netns(['add', self.name])
+
+ if rtc != 0:
+ self.module.fail_json(msg=err)
+
+ def delete(self):
+ '''Delete network namespace'''
+ rtc, out, err = self._netns(['del', self.name])
+ if rtc != 0:
+ self.module.fail_json(msg=err)
+
+ def check(self):
+ '''Run check mode'''
+ changed = False
+
+ if self.state == 'present' and self.exists():
+ changed = True
+
+ elif self.state == 'absent' and self.exists():
+ changed = True
+ elif self.state == 'present' and not self.exists():
+ changed = True
+
+ self.module.exit_json(changed=changed)
+
+ def run(self):
+ '''Make the necessary changes'''
+ changed = False
+
+ if self.state == 'absent':
+ if self.exists():
+ self.delete()
+ changed = True
+ elif self.state == 'present':
+ if not self.exists():
+ self.add()
+ changed = True
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ """Entry point."""
+ module = AnsibleModule(
+ argument_spec={
+ 'name': {'default': None},
+ 'state': {'default': 'present', 'choices': ['present', 'absent']},
+ },
+ supports_check_mode=True,
+ )
+
+ network_namespace = Namespace(module)
+ if module.check_mode:
+ network_namespace.check()
+ else:
+ network_namespace.run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ipify_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ipify_facts.py
new file mode 100644
index 00000000..dcdc5ef8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ipify_facts.py
@@ -0,0 +1,105 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2015, René Moser <mail@renemoser.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ipify_facts
+short_description: Retrieve the public IP of your internet gateway
+description:
+ - If behind NAT and need to know the public IP of your internet gateway.
+author:
+- René Moser (@resmo)
+options:
+ api_url:
+ description:
+ - URL of the ipify.org API service.
+ - C(?format=json) will be appended per default.
+ type: str
+ default: https://api.ipify.org/
+ timeout:
+ description:
+ - HTTP connection timeout in seconds.
+ type: int
+ default: 10
+ validate_certs:
+ description:
+ - When set to C(NO), SSL certificates will not be validated.
+ type: bool
+ default: yes
+notes:
+ - Visit https://www.ipify.org to get more information.
+'''
+
+EXAMPLES = r'''
+# Gather IP facts from ipify.org
+- name: Get my public IP
+ community.general.ipify_facts:
+
+# Gather IP facts from your own ipify service endpoint with a custom timeout
+- name: Get my public IP
+ community.general.ipify_facts:
+ api_url: http://api.example.com/ipify
+ timeout: 20
+'''
+
+RETURN = r'''
+---
+ipify_public_ip:
+ description: Public IP of the internet gateway.
+ returned: success
+ type: str
+ sample: 1.2.3.4
+'''
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils._text import to_text
+
+
+class IpifyFacts(object):
+
+ def __init__(self):
+ self.api_url = module.params.get('api_url')
+ self.timeout = module.params.get('timeout')
+
+ def run(self):
+ result = {
+ 'ipify_public_ip': None
+ }
+ (response, info) = fetch_url(module=module, url=self.api_url + "?format=json", force=True, timeout=self.timeout)
+
+ if not response:
+ module.fail_json(msg="No valid or no response from url %s within %s seconds (timeout)" % (self.api_url, self.timeout))
+
+ data = json.loads(to_text(response.read()))
+ result['ipify_public_ip'] = data.get('ip')
+ return result
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_url=dict(type='str', default='https://api.ipify.org/'),
+ timeout=dict(type='int', default=10),
+ validate_certs=dict(type='bool', default=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ ipify_facts = IpifyFacts().run()
+ ipify_facts_result = dict(changed=False, ansible_facts=ipify_facts)
+ module.exit_json(**ipify_facts_result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ipinfoio_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ipinfoio_facts.py
new file mode 100644
index 00000000..f4186cdc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ipinfoio_facts.py
@@ -0,0 +1,131 @@
+#!/usr/bin/python
+# -*- coding: UTF-8 -*-
+
+# Copyright: (c) 2016, Aleksei Kostiuk <unitoff@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ipinfoio_facts
+short_description: "Retrieve IP geolocation facts of a host's IP address"
+description:
+ - "Gather IP geolocation facts of a host's IP address using ipinfo.io API"
+author: "Aleksei Kostiuk (@akostyuk)"
+options:
+ timeout:
+ description:
+ - HTTP connection timeout in seconds
+ required: false
+ default: 10
+ type: int
+ http_agent:
+ description:
+ - Set http user agent
+ required: false
+ default: "ansible-ipinfoio-module/0.0.1"
+ type: str
+notes:
+ - "Check http://ipinfo.io/ for more information"
+'''
+
+EXAMPLES = '''
+# Retrieve geolocation data of a host's IP address
+- name: Get IP geolocation data
+ community.general.ipinfoio_facts:
+'''
+
+RETURN = '''
+ansible_facts:
+ description: "Dictionary of ip geolocation facts for a host's IP address"
+ returned: changed
+ type: complex
+ contains:
+ ip:
+ description: "Public IP address of a host"
+ type: str
+ sample: "8.8.8.8"
+ hostname:
+ description: Domain name
+ type: str
+ sample: "google-public-dns-a.google.com"
+ country:
+ description: ISO 3166-1 alpha-2 country code
+ type: str
+ sample: "US"
+ region:
+ description: State or province name
+ type: str
+ sample: "California"
+ city:
+ description: City name
+ type: str
+ sample: "Mountain View"
+ loc:
+ description: Latitude and Longitude of the location
+ type: str
+ sample: "37.3860,-122.0838"
+ org:
+ description: "organization's name"
+ type: str
+ sample: "AS3356 Level 3 Communications, Inc."
+ postal:
+ description: Postal code
+ type: str
+ sample: "94035"
+'''
+from ansible.module_utils.basic import AnsibleModule
+
+from ansible.module_utils.urls import fetch_url
+
+
+USER_AGENT = 'ansible-ipinfoio-module/0.0.1'
+
+
+class IpinfoioFacts(object):
+
+ def __init__(self, module):
+ self.url = 'https://ipinfo.io/json'
+ self.timeout = module.params.get('timeout')
+ self.module = module
+
+ def get_geo_data(self):
+ response, info = fetch_url(self.module, self.url, force=True, # NOQA
+ timeout=self.timeout)
+ try:
+ info['status'] == 200
+ except AssertionError:
+ self.module.fail_json(msg='Could not get {0} page, '
+ 'check for connectivity!'.format(self.url))
+ else:
+ try:
+ content = response.read()
+ result = self.module.from_json(content.decode('utf8'))
+ except ValueError:
+ self.module.fail_json(
+ msg='Failed to parse the ipinfo.io response: '
+ '{0} {1}'.format(self.url, content))
+ else:
+ return result
+
+
+def main():
+ module = AnsibleModule( # NOQA
+ argument_spec=dict(
+ http_agent=dict(default=USER_AGENT),
+ timeout=dict(type='int', default=10),
+ ),
+ supports_check_mode=True,
+ )
+
+ ipinfoio = IpinfoioFacts(module)
+ ipinfoio_result = dict(
+ changed=False, ansible_facts=ipinfoio.get_geo_data())
+ module.exit_json(**ipinfoio_result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ipwcli_dns.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ipwcli_dns.py
new file mode 100644
index 00000000..355c7034
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ipwcli_dns.py
@@ -0,0 +1,342 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2020, Christian Wollinger <cwollinger@web.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ipwcli_dns
+
+short_description: Manage DNS Records for Ericsson IPWorks via ipwcli
+
+version_added: '0.2.0'
+
+description:
+ - "Manage DNS records for the Ericsson IPWorks DNS server. The module will use the ipwcli to deploy the DNS records."
+
+requirements:
+ - ipwcli (installed on Ericsson IPWorks)
+
+notes:
+ - To make the DNS record changes effective, you need to run C(update dnsserver) on the ipwcli.
+
+options:
+ dnsname:
+ description:
+ - Name of the record.
+ required: true
+ type: str
+ type:
+ description:
+ - Type of the record.
+ required: true
+ type: str
+ choices: [ NAPTR, SRV, A, AAAA ]
+ container:
+ description:
+ - Sets the container zone for the record.
+ required: true
+ type: str
+ address:
+ description:
+ - The IP address for the A or AAAA record.
+ - Required for C(type=A) or C(type=AAAA)
+ type: str
+ ttl:
+ description:
+ - Sets the TTL of the record.
+ type: int
+ default: 3600
+ state:
+ description:
+ - Whether the record should exist or not.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ priority:
+ description:
+ - Sets the priority of the SRV record.
+ type: int
+ default: 10
+ weight:
+ description:
+ - Sets the weight of the SRV record.
+ type: int
+ default: 10
+ port:
+ description:
+ - Sets the port of the SRV record.
+ - Required for C(type=SRV)
+ type: int
+ target:
+ description:
+ - Sets the target of the SRV record.
+ - Required for C(type=SRV)
+ type: str
+ order:
+ description:
+ - Sets the order of the NAPTR record.
+ - Required for C(type=NAPTR)
+ type: int
+ preference:
+ description:
+ - Sets the preference of the NAPTR record.
+ - Required for C(type=NAPTR)
+ type: int
+ flags:
+ description:
+ - Sets one of the possible flags of NAPTR record.
+ - Required for C(type=NAPTR)
+ type: str
+ choices: ['S', 'A', 'U', 'P']
+ service:
+ description:
+ - Sets the service of the NAPTR record.
+ - Required for C(type=NAPTR)
+ type: str
+ replacement:
+ description:
+ - Sets the replacement of the NAPTR record.
+ - Required for C(type=NAPTR)
+ type: str
+ username:
+ description:
+ - Username to login on ipwcli.
+ type: str
+ required: true
+ password:
+ description:
+ - Password to login on ipwcli.
+ type: str
+ required: true
+
+author:
+ - Christian Wollinger (@cwollinger)
+'''
+
+EXAMPLES = '''
+- name: Create A record
+ community.general.ipwcli_dns:
+ dnsname: example.com
+ type: A
+ container: ZoneOne
+ address: 127.0.0.1
+
+- name: Remove SRV record if exists
+ community.general.ipwcli_dns:
+ dnsname: _sip._tcp.test.example.com
+ type: SRV
+ container: ZoneOne
+ ttl: 100
+ state: absent
+ target: example.com
+ port: 5060
+
+- name: Create NAPTR record
+ community.general.ipwcli_dns:
+ dnsname: test.example.com
+ type: NAPTR
+ preference: 10
+ container: ZoneOne
+ ttl: 100
+ order: 10
+ service: 'SIP+D2T'
+ replacement: '_sip._tcp.test.example.com.'
+ flags: S
+'''
+
+RETURN = '''
+record:
+ description: The created record from the input params
+ type: str
+ returned: always
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+import os
+
+
+class ResourceRecord(object):
+
+ def __init__(self, module):
+ self.module = module
+ self.dnsname = module.params['dnsname']
+ self.dnstype = module.params['type']
+ self.container = module.params['container']
+ self.address = module.params['address']
+ self.ttl = module.params['ttl']
+ self.state = module.params['state']
+ self.priority = module.params['priority']
+ self.weight = module.params['weight']
+ self.port = module.params['port']
+ self.target = module.params['target']
+ self.order = module.params['order']
+ self.preference = module.params['preference']
+ self.flags = module.params['flags']
+ self.service = module.params['service']
+ self.replacement = module.params['replacement']
+ self.user = module.params['username']
+ self.password = module.params['password']
+
+ def create_naptrrecord(self):
+ # create NAPTR record with the given params
+ record = ('naptrrecord %s -set ttl=%s;container=%s;order=%s;preference=%s;flags="%s";service="%s";replacement="%s"'
+ % (self.dnsname, self.ttl, self.container, self.order, self.preference, self.flags, self.service, self.replacement))
+ return record
+
+ def create_srvrecord(self):
+ # create SRV record with the given params
+ record = ('srvrecord %s -set ttl=%s;container=%s;priority=%s;weight=%s;port=%s;target=%s'
+ % (self.dnsname, self.ttl, self.container, self.priority, self.weight, self.port, self.target))
+ return record
+
+ def create_arecord(self):
+ # create A record with the given params
+ if self.dnstype == 'AAAA':
+ record = 'aaaarecord %s %s -set ttl=%s;container=%s' % (self.dnsname, self.address, self.ttl, self.container)
+ else:
+ record = 'arecord %s %s -set ttl=%s;container=%s' % (self.dnsname, self.address, self.ttl, self.container)
+
+ return record
+
+ def list_record(self, record):
+ # check if the record exists via list on ipwcli
+ search = 'list %s' % (record.replace(';', '&&').replace('set', 'where'))
+ cmd = [self.module.get_bin_path('ipwcli', True)]
+ cmd.append('-user=%s' % (self.user))
+ cmd.append('-password=%s' % (self.password))
+ rc, out, err = self.module.run_command(cmd, data=search)
+
+ if 'Invalid username or password' in out:
+ self.module.fail_json(msg='access denied at ipwcli login: Invalid username or password')
+
+ if (('ARecord %s' % self.dnsname in out and rc == 0) or ('SRVRecord %s' % self.dnsname in out and rc == 0) or
+ ('NAPTRRecord %s' % self.dnsname in out and rc == 0)):
+ return True, rc, out, err
+
+ return False, rc, out, err
+
+ def deploy_record(self, record):
+ # check what happens if create fails on ipworks
+ stdin = 'create %s' % (record)
+ cmd = [self.module.get_bin_path('ipwcli', True)]
+ cmd.append('-user=%s' % (self.user))
+ cmd.append('-password=%s' % (self.password))
+ rc, out, err = self.module.run_command(cmd, data=stdin)
+
+ if 'Invalid username or password' in out:
+ self.module.fail_json(msg='access denied at ipwcli login: Invalid username or password')
+
+ if '1 object(s) created.' in out:
+ return rc, out, err
+ else:
+ self.module.fail_json(msg='record creation failed', stderr=out)
+
+ def delete_record(self, record):
+ # check what happens if create fails on ipworks
+ stdin = 'delete %s' % (record.replace(';', '&&').replace('set', 'where'))
+ cmd = [self.module.get_bin_path('ipwcli', True)]
+ cmd.append('-user=%s' % (self.user))
+ cmd.append('-password=%s' % (self.password))
+ rc, out, err = self.module.run_command(cmd, data=stdin)
+
+ if 'Invalid username or password' in out:
+ self.module.fail_json(msg='access denied at ipwcli login: Invalid username or password')
+
+ if '1 object(s) were updated.' in out:
+ return rc, out, err
+ else:
+ self.module.fail_json(msg='record deletion failed', stderr=out)
+
+
+def run_module():
+ # define available arguments/parameters a user can pass to the module
+ module_args = dict(
+ dnsname=dict(type='str', required=True),
+ type=dict(type='str', required=True, choices=['A', 'AAAA', 'SRV', 'NAPTR']),
+ container=dict(type='str', required=True),
+ address=dict(type='str', required=False),
+ ttl=dict(type='int', required=False, default=3600),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ priority=dict(type='int', required=False, default=10),
+ weight=dict(type='int', required=False, default=10),
+ port=dict(type='int', required=False),
+ target=dict(type='str', required=False),
+ order=dict(type='int', required=False),
+ preference=dict(type='int', required=False),
+ flags=dict(type='str', required=False, choices=['S', 'A', 'U', 'P']),
+ service=dict(type='str', required=False),
+ replacement=dict(type='str', required=False),
+ username=dict(type='str', required=True),
+ password=dict(type='str', required=True, no_log=True)
+ )
+
+ # define result
+ result = dict(
+ changed=False,
+ stdout='',
+ stderr='',
+ rc=0,
+ record=''
+ )
+
+ # supports check mode
+ module = AnsibleModule(
+ argument_spec=module_args,
+ required_if=[
+ ['type', 'A', ['address']],
+ ['type', 'AAAA', ['address']],
+ ['type', 'SRV', ['port', 'target']],
+ ['type', 'NAPTR', ['preference', 'order', 'service', 'replacement']],
+ ],
+ supports_check_mode=True
+ )
+
+ user = ResourceRecord(module)
+
+ if user.dnstype == 'NAPTR':
+ record = user.create_naptrrecord()
+ elif user.dnstype == 'SRV':
+ record = user.create_srvrecord()
+ elif user.dnstype == 'A' or user.dnstype == 'AAAA':
+ record = user.create_arecord()
+
+ found, rc, out, err = user.list_record(record)
+
+ if found and user.state == 'absent':
+ if module.check_mode:
+ module.exit_json(changed=True)
+ rc, out, err = user.delete_record(record)
+ result['changed'] = True
+ result['record'] = record
+ result['rc'] = rc
+ result['stdout'] = out
+ result['stderr'] = err
+ elif not found and user.state == 'present':
+ if module.check_mode:
+ module.exit_json(changed=True)
+ rc, out, err = user.deploy_record(record)
+ result['changed'] = True
+ result['record'] = record
+ result['rc'] = rc
+ result['stdout'] = out
+ result['stderr'] = err
+ else:
+ result['changed'] = False
+ result['record'] = record
+ result['rc'] = rc
+ result['stdout'] = out
+ result['stderr'] = err
+
+ module.exit_json(**result)
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ldap/ldap_attr.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ldap/ldap_attr.py
new file mode 100644
index 00000000..f983b857
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ldap/ldap_attr.py
@@ -0,0 +1,284 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Peter Sagerson <psagers@ignorare.net>
+# Copyright: (c) 2016, Jiri Tyr <jiri.tyr@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ldap_attr
+short_description: Add or remove LDAP attribute values
+description:
+ - Add or remove LDAP attribute values.
+notes:
+ - This only deals with attributes on existing entries. To add or remove
+ whole entries, see M(community.general.ldap_entry).
+ - The default authentication settings will attempt to use a SASL EXTERNAL
+ bind over a UNIX domain socket. This works well with the default Ubuntu
+ install for example, which includes a cn=peercred,cn=external,cn=auth ACL
+ rule allowing root to modify the server configuration. If you need to use
+ a simple bind to access your server, pass the credentials in I(bind_dn)
+ and I(bind_pw).
+ - For I(state=present) and I(state=absent), all value comparisons are
+ performed on the server for maximum accuracy. For I(state=exact), values
+ have to be compared in Python, which obviously ignores LDAP matching
+ rules. This should work out in most cases, but it is theoretically
+ possible to see spurious changes when target and actual values are
+ semantically identical but lexically distinct.
+ - "The I(params) parameter was removed due to circumventing Ansible's parameter
+ handling. The I(params) parameter started disallowing setting the I(bind_pw) parameter in
+ Ansible-2.7 as it was insecure to set the parameter that way."
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.14
+ why: 'The current "ldap_attr" module does not support LDAP attribute insertions or deletions with objectClass dependencies.'
+ alternative: 'Use M(community.general.ldap_attrs) instead. Deprecated in community.general 0.2.0.'
+author:
+ - Jiri Tyr (@jtyr)
+requirements:
+ - python-ldap
+options:
+ name:
+ description:
+ - The name of the attribute to modify.
+ type: str
+ required: true
+ state:
+ description:
+ - The state of the attribute values.
+ - If C(present), all given values will be added if they're missing.
+ - If C(absent), all given values will be removed if present.
+ - If C(exact), the set of values will be forced to exactly those provided and no others.
+ - If I(state=exact) and I(value) is an empty list, all values for this attribute will be removed.
+ type: str
+ choices: [ absent, exact, present ]
+ default: present
+ values:
+ description:
+ - The value(s) to add or remove. This can be a string or a list of
+ strings. The complex argument format is required in order to pass
+ a list of strings (see examples).
+ type: raw
+ required: true
+extends_documentation_fragment:
+- community.general.ldap.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Configure directory number 1 for example.com
+ community.general.ldap_attr:
+ dn: olcDatabase={1}hdb,cn=config
+ name: olcSuffix
+ values: dc=example,dc=com
+ state: exact
+
+# The complex argument format is required here to pass a list of ACL strings.
+- name: Set up the ACL
+ community.general.ldap_attr:
+ dn: olcDatabase={1}hdb,cn=config
+ name: olcAccess
+ values:
+ - >-
+ {0}to attrs=userPassword,shadowLastChange
+ by self write
+ by anonymous auth
+ by dn="cn=admin,dc=example,dc=com" write
+ by * none'
+ - >-
+ {1}to dn.base="dc=example,dc=com"
+ by dn="cn=admin,dc=example,dc=com" write
+ by * read
+ state: exact
+
+- name: Declare some indexes
+ community.general.ldap_attr:
+ dn: olcDatabase={1}hdb,cn=config
+ name: olcDbIndex
+ values: "{{ item }}"
+ with_items:
+ - objectClass eq
+ - uid eq
+
+- name: Set up a root user, which we can use later to bootstrap the directory
+ community.general.ldap_attr:
+ dn: olcDatabase={1}hdb,cn=config
+ name: "{{ item.key }}"
+ values: "{{ item.value }}"
+ state: exact
+ with_dict:
+ olcRootDN: cn=root,dc=example,dc=com
+ olcRootPW: "{SSHA}tabyipcHzhwESzRaGA7oQ/SDoBZQOGND"
+
+- name: Get rid of an unneeded attribute
+ community.general.ldap_attr:
+ dn: uid=jdoe,ou=people,dc=example,dc=com
+ name: shadowExpire
+ values: []
+ state: exact
+ server_uri: ldap://localhost/
+ bind_dn: cn=admin,dc=example,dc=com
+ bind_pw: password
+
+#
+# The same as in the previous example but with the authentication details
+# stored in the ldap_auth variable:
+#
+# ldap_auth:
+# server_uri: ldap://localhost/
+# bind_dn: cn=admin,dc=example,dc=com
+# bind_pw: password
+#
+# In the example below, 'args' is a task keyword, passed at the same level as the module
+- name: Get rid of an unneeded attribute
+ community.general.ldap_attr:
+ dn: uid=jdoe,ou=people,dc=example,dc=com
+ name: shadowExpire
+ values: []
+ state: exact
+ args: "{{ ldap_auth }}"
+'''
+
+RETURN = r'''
+modlist:
+ description: list of modified parameters
+ returned: success
+ type: list
+ sample: '[[2, "olcRootDN", ["cn=root,dc=example,dc=com"]]]'
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native, to_bytes
+from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs
+
+LDAP_IMP_ERR = None
+try:
+ import ldap
+
+ HAS_LDAP = True
+except ImportError:
+ LDAP_IMP_ERR = traceback.format_exc()
+ HAS_LDAP = False
+
+
+class LdapAttr(LdapGeneric):
+ def __init__(self, module):
+ LdapGeneric.__init__(self, module)
+
+ # Shortcuts
+ self.name = self.module.params['name']
+ self.state = self.module.params['state']
+
+ # Normalize values
+ if isinstance(self.module.params['values'], list):
+ self.values = list(map(to_bytes, self.module.params['values']))
+ else:
+ self.values = [to_bytes(self.module.params['values'])]
+
+ def add(self):
+ values_to_add = list(filter(self._is_value_absent, self.values))
+
+ if len(values_to_add) > 0:
+ modlist = [(ldap.MOD_ADD, self.name, values_to_add)]
+ else:
+ modlist = []
+
+ return modlist
+
+ def delete(self):
+ values_to_delete = list(filter(self._is_value_present, self.values))
+
+ if len(values_to_delete) > 0:
+ modlist = [(ldap.MOD_DELETE, self.name, values_to_delete)]
+ else:
+ modlist = []
+
+ return modlist
+
+ def exact(self):
+ try:
+ results = self.connection.search_s(
+ self.dn, ldap.SCOPE_BASE, attrlist=[self.name])
+ except ldap.LDAPError as e:
+ self.fail("Cannot search for attribute %s" % self.name, e)
+
+ current = results[0][1].get(self.name, [])
+ modlist = []
+
+ if frozenset(self.values) != frozenset(current):
+ if len(current) == 0:
+ modlist = [(ldap.MOD_ADD, self.name, self.values)]
+ elif len(self.values) == 0:
+ modlist = [(ldap.MOD_DELETE, self.name, None)]
+ else:
+ modlist = [(ldap.MOD_REPLACE, self.name, self.values)]
+
+ return modlist
+
+ def _is_value_present(self, value):
+ """ True if the target attribute has the given value. """
+ try:
+ is_present = bool(
+ self.connection.compare_s(self.dn, self.name, value))
+ except ldap.NO_SUCH_ATTRIBUTE:
+ is_present = False
+
+ return is_present
+
+ def _is_value_absent(self, value):
+ """ True if the target attribute doesn't have the given value. """
+ return not self._is_value_present(value)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=gen_specs(
+ name=dict(type='str', required=True),
+ params=dict(type='dict'),
+ state=dict(type='str', default='present', choices=['absent', 'exact', 'present']),
+ values=dict(type='raw', required=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ if not HAS_LDAP:
+ module.fail_json(msg=missing_required_lib('python-ldap'),
+ exception=LDAP_IMP_ERR)
+
+ if module.params['params']:
+ module.fail_json(msg="The `params` option to ldap_attr was removed in since it circumvents Ansible's option handling")
+
+ # Instantiate the LdapAttr object
+ ldap = LdapAttr(module)
+
+ state = module.params['state']
+
+ # Perform action
+ if state == 'present':
+ modlist = ldap.add()
+ elif state == 'absent':
+ modlist = ldap.delete()
+ elif state == 'exact':
+ modlist = ldap.exact()
+
+ changed = False
+
+ if len(modlist) > 0:
+ changed = True
+
+ if not module.check_mode:
+ try:
+ ldap.connection.modify_s(ldap.dn, modlist)
+ except Exception as e:
+ module.fail_json(msg="Attribute action failed.", details=to_native(e))
+
+ module.exit_json(changed=changed, modlist=modlist)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ldap/ldap_attrs.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ldap/ldap_attrs.py
new file mode 100644
index 00000000..ae5cb7fd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ldap/ldap_attrs.py
@@ -0,0 +1,318 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Maciej Delmanowski <drybjed@gmail.com>
+# Copyright: (c) 2017, Alexander Korinek <noles@a3k.net>
+# Copyright: (c) 2016, Peter Sagerson <psagers@ignorare.net>
+# Copyright: (c) 2016, Jiri Tyr <jiri.tyr@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ldap_attrs
+short_description: Add or remove multiple LDAP attribute values
+description:
+ - Add or remove multiple LDAP attribute values.
+notes:
+ - This only deals with attributes on existing entries. To add or remove
+ whole entries, see M(community.general.ldap_entry).
+ - The default authentication settings will attempt to use a SASL EXTERNAL
+ bind over a UNIX domain socket. This works well with the default Ubuntu
+ install for example, which includes a cn=peercred,cn=external,cn=auth ACL
+ rule allowing root to modify the server configuration. If you need to use
+ a simple bind to access your server, pass the credentials in I(bind_dn)
+ and I(bind_pw).
+ - For I(state=present) and I(state=absent), all value comparisons are
+ performed on the server for maximum accuracy. For I(state=exact), values
+ have to be compared in Python, which obviously ignores LDAP matching
+ rules. This should work out in most cases, but it is theoretically
+ possible to see spurious changes when target and actual values are
+ semantically identical but lexically distinct.
+version_added: '0.2.0'
+author:
+ - Jiri Tyr (@jtyr)
+ - Alexander Korinek (@noles)
+ - Maciej Delmanowski (@drybjed)
+requirements:
+ - python-ldap
+options:
+ state:
+ required: false
+ type: str
+ choices: [present, absent, exact]
+ default: present
+ description:
+ - The state of the attribute values. If C(present), all given attribute
+ values will be added if they're missing. If C(absent), all given
+ attribute values will be removed if present. If C(exact), the set of
+ attribute values will be forced to exactly those provided and no others.
+ If I(state=exact) and the attribute I(value) is empty, all values for
+ this attribute will be removed.
+ attributes:
+ required: true
+ type: dict
+ description:
+ - The attribute(s) and value(s) to add or remove. The complex argument format is required in order to pass
+ a list of strings (see examples).
+ ordered:
+ required: false
+ type: bool
+ default: 'no'
+ description:
+ - If C(yes), prepend list values with X-ORDERED index numbers in all
+ attributes specified in the current task. This is useful mostly with
+ I(olcAccess) attribute to easily manage LDAP Access Control Lists.
+extends_documentation_fragment:
+- community.general.ldap.documentation
+
+'''
+
+
+EXAMPLES = r'''
+- name: Configure directory number 1 for example.com
+ community.general.ldap_attrs:
+ dn: olcDatabase={1}hdb,cn=config
+ attributes:
+ olcSuffix: dc=example,dc=com
+ state: exact
+
+# The complex argument format is required here to pass a list of ACL strings.
+- name: Set up the ACL
+ community.general.ldap_attrs:
+ dn: olcDatabase={1}hdb,cn=config
+ attributes:
+ olcAccess:
+ - >-
+ {0}to attrs=userPassword,shadowLastChange
+ by self write
+ by anonymous auth
+ by dn="cn=admin,dc=example,dc=com" write
+ by * none'
+ - >-
+ {1}to dn.base="dc=example,dc=com"
+ by dn="cn=admin,dc=example,dc=com" write
+ by * read
+ state: exact
+
+# An alternative approach with automatic X-ORDERED numbering
+- name: Set up the ACL
+ community.general.ldap_attrs:
+ dn: olcDatabase={1}hdb,cn=config
+ attributes:
+ olcAccess:
+ - >-
+ to attrs=userPassword,shadowLastChange
+ by self write
+ by anonymous auth
+ by dn="cn=admin,dc=example,dc=com" write
+ by * none'
+ - >-
+ to dn.base="dc=example,dc=com"
+ by dn="cn=admin,dc=example,dc=com" write
+ by * read
+ ordered: yes
+ state: exact
+
+- name: Declare some indexes
+ community.general.ldap_attrs:
+ dn: olcDatabase={1}hdb,cn=config
+ attributes:
+ olcDbIndex:
+ - objectClass eq
+ - uid eq
+
+- name: Set up a root user, which we can use later to bootstrap the directory
+ community.general.ldap_attrs:
+ dn: olcDatabase={1}hdb,cn=config
+ attributes:
+ olcRootDN: cn=root,dc=example,dc=com
+ olcRootPW: "{SSHA}tabyipcHzhwESzRaGA7oQ/SDoBZQOGND"
+ state: exact
+
+- name: Remove an attribute with a specific value
+ community.general.ldap_attrs:
+ dn: uid=jdoe,ou=people,dc=example,dc=com
+ attributes:
+ description: "An example user account"
+ state: absent
+ server_uri: ldap://localhost/
+ bind_dn: cn=admin,dc=example,dc=com
+ bind_pw: password
+
+- name: Remove specified attribute(s) from an entry
+ community.general.ldap_attrs:
+ dn: uid=jdoe,ou=people,dc=example,dc=com
+ attributes:
+ description: []
+ state: exact
+ server_uri: ldap://localhost/
+ bind_dn: cn=admin,dc=example,dc=com
+ bind_pw: password
+'''
+
+
+RETURN = r'''
+modlist:
+ description: list of modified parameters
+ returned: success
+ type: list
+ sample: '[[2, "olcRootDN", ["cn=root,dc=example,dc=com"]]]'
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native, to_bytes
+from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs
+import re
+
+LDAP_IMP_ERR = None
+try:
+ import ldap
+
+ HAS_LDAP = True
+except ImportError:
+ LDAP_IMP_ERR = traceback.format_exc()
+ HAS_LDAP = False
+
+
+class LdapAttrs(LdapGeneric):
+ def __init__(self, module):
+ LdapGeneric.__init__(self, module)
+
+ # Shortcuts
+ self.attrs = self.module.params['attributes']
+ self.state = self.module.params['state']
+ self.ordered = self.module.params['ordered']
+
+ def _order_values(self, values):
+ """ Preprend X-ORDERED index numbers to attribute's values. """
+ ordered_values = []
+
+ if isinstance(values, list):
+ for index, value in enumerate(values):
+ cleaned_value = re.sub(r'^\{\d+\}', '', value)
+ ordered_values.append('{' + str(index) + '}' + cleaned_value)
+
+ return ordered_values
+
+ def _normalize_values(self, values):
+ """ Normalize attribute's values. """
+ norm_values = []
+
+ if isinstance(values, list):
+ if self.ordered:
+ norm_values = list(map(to_bytes,
+ self._order_values(list(map(str,
+ values)))))
+ else:
+ norm_values = list(map(to_bytes, values))
+ else:
+ norm_values = [to_bytes(str(values))]
+
+ return norm_values
+
+ def add(self):
+ modlist = []
+ for name, values in self.module.params['attributes'].items():
+ norm_values = self._normalize_values(values)
+ for value in norm_values:
+ if self._is_value_absent(name, value):
+ modlist.append((ldap.MOD_ADD, name, value))
+
+ return modlist
+
+ def delete(self):
+ modlist = []
+ for name, values in self.module.params['attributes'].items():
+ norm_values = self._normalize_values(values)
+ for value in norm_values:
+ if self._is_value_present(name, value):
+ modlist.append((ldap.MOD_DELETE, name, value))
+
+ return modlist
+
+ def exact(self):
+ modlist = []
+ for name, values in self.module.params['attributes'].items():
+ norm_values = self._normalize_values(values)
+ try:
+ results = self.connection.search_s(
+ self.dn, ldap.SCOPE_BASE, attrlist=[name])
+ except ldap.LDAPError as e:
+ self.fail("Cannot search for attribute %s" % name, e)
+
+ current = results[0][1].get(name, [])
+
+ if frozenset(norm_values) != frozenset(current):
+ if len(current) == 0:
+ modlist.append((ldap.MOD_ADD, name, norm_values))
+ elif len(norm_values) == 0:
+ modlist.append((ldap.MOD_DELETE, name, None))
+ else:
+ modlist.append((ldap.MOD_REPLACE, name, norm_values))
+
+ return modlist
+
+ def _is_value_present(self, name, value):
+ """ True if the target attribute has the given value. """
+ try:
+ is_present = bool(
+ self.connection.compare_s(self.dn, name, value))
+ except ldap.NO_SUCH_ATTRIBUTE:
+ is_present = False
+
+ return is_present
+
+ def _is_value_absent(self, name, value):
+ """ True if the target attribute doesn't have the given value. """
+ return not self._is_value_present(name, value)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=gen_specs(
+ attributes=dict(type='dict', required=True),
+ ordered=dict(type='bool', default=False, required=False),
+ state=dict(type='str', default='present', choices=['absent', 'exact', 'present']),
+ ),
+ supports_check_mode=True,
+ )
+
+ if not HAS_LDAP:
+ module.fail_json(msg=missing_required_lib('python-ldap'),
+ exception=LDAP_IMP_ERR)
+
+ # Instantiate the LdapAttr object
+ ldap = LdapAttrs(module)
+
+ state = module.params['state']
+
+ # Perform action
+ if state == 'present':
+ modlist = ldap.add()
+ elif state == 'absent':
+ modlist = ldap.delete()
+ elif state == 'exact':
+ modlist = ldap.exact()
+
+ changed = False
+
+ if len(modlist) > 0:
+ changed = True
+
+ if not module.check_mode:
+ try:
+ ldap.connection.modify_s(ldap.dn, modlist)
+ except Exception as e:
+ module.fail_json(msg="Attribute action failed.", details=to_native(e))
+
+ module.exit_json(changed=changed, modlist=modlist)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ldap/ldap_entry.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ldap/ldap_entry.py
new file mode 100644
index 00000000..7ee0c3dd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ldap/ldap_entry.py
@@ -0,0 +1,226 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Peter Sagerson <psagers@ignorare.net>
+# Copyright: (c) 2016, Jiri Tyr <jiri.tyr@gmail.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ldap_entry
+short_description: Add or remove LDAP entries.
+description:
+ - Add or remove LDAP entries. This module only asserts the existence or
+ non-existence of an LDAP entry, not its attributes. To assert the
+ attribute values of an entry, see M(community.general.ldap_attr).
+notes:
+ - The default authentication settings will attempt to use a SASL EXTERNAL
+ bind over a UNIX domain socket. This works well with the default Ubuntu
+ install for example, which includes a cn=peercred,cn=external,cn=auth ACL
+ rule allowing root to modify the server configuration. If you need to use
+ a simple bind to access your server, pass the credentials in I(bind_dn)
+ and I(bind_pw).
+ - "The I(params) parameter was removed due to circumventing Ansible's parameter
+ handling. The I(params) parameter started disallowing setting the I(bind_pw) parameter in
+ Ansible-2.7 as it was insecure to set the parameter that way."
+author:
+ - Jiri Tyr (@jtyr)
+requirements:
+ - python-ldap
+options:
+ attributes:
+ description:
+ - If I(state=present), attributes necessary to create an entry. Existing
+ entries are never modified. To assert specific attribute values on an
+ existing entry, use M(community.general.ldap_attr) module instead.
+ type: dict
+ objectClass:
+ description:
+ - If I(state=present), value or list of values to use when creating
+ the entry. It can either be a string or an actual list of
+ strings.
+ type: list
+ elements: str
+ state:
+ description:
+ - The target state of the entry.
+ choices: [present, absent]
+ default: present
+extends_documentation_fragment:
+- community.general.ldap.documentation
+
+'''
+
+
+EXAMPLES = """
+- name: Make sure we have a parent entry for users
+ community.general.ldap_entry:
+ dn: ou=users,dc=example,dc=com
+ objectClass: organizationalUnit
+
+- name: Make sure we have an admin user
+ community.general.ldap_entry:
+ dn: cn=admin,dc=example,dc=com
+ objectClass:
+ - simpleSecurityObject
+ - organizationalRole
+ attributes:
+ description: An LDAP administrator
+ userPassword: "{SSHA}tabyipcHzhwESzRaGA7oQ/SDoBZQOGND"
+
+- name: Get rid of an old entry
+ community.general.ldap_entry:
+ dn: ou=stuff,dc=example,dc=com
+ state: absent
+ server_uri: ldap://localhost/
+ bind_dn: cn=admin,dc=example,dc=com
+ bind_pw: password
+
+#
+# The same as in the previous example but with the authentication details
+# stored in the ldap_auth variable:
+#
+# ldap_auth:
+# server_uri: ldap://localhost/
+# bind_dn: cn=admin,dc=example,dc=com
+# bind_pw: password
+#
+# In the example below, 'args' is a task keyword, passed at the same level as the module
+- name: Get rid of an old entry
+ community.general.ldap_entry:
+ dn: ou=stuff,dc=example,dc=com
+ state: absent
+ args: "{{ ldap_auth }}"
+"""
+
+
+RETURN = """
+# Default return values
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native, to_bytes
+from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs
+
+LDAP_IMP_ERR = None
+try:
+ import ldap.modlist
+
+ HAS_LDAP = True
+except ImportError:
+ LDAP_IMP_ERR = traceback.format_exc()
+ HAS_LDAP = False
+
+
+class LdapEntry(LdapGeneric):
+ def __init__(self, module):
+ LdapGeneric.__init__(self, module)
+
+ # Shortcuts
+ self.state = self.module.params['state']
+
+ # Add the objectClass into the list of attributes
+ self.module.params['attributes']['objectClass'] = (
+ self.module.params['objectClass'])
+
+ # Load attributes
+ if self.state == 'present':
+ self.attrs = self._load_attrs()
+
+ def _load_attrs(self):
+ """ Turn attribute's value to array. """
+ attrs = {}
+
+ for name, value in self.module.params['attributes'].items():
+ if isinstance(value, list):
+ attrs[name] = list(map(to_bytes, value))
+ else:
+ attrs[name] = [to_bytes(value)]
+
+ return attrs
+
+ def add(self):
+ """ If self.dn does not exist, returns a callable that will add it. """
+ def _add():
+ self.connection.add_s(self.dn, modlist)
+
+ if not self._is_entry_present():
+ modlist = ldap.modlist.addModlist(self.attrs)
+ action = _add
+ else:
+ action = None
+
+ return action
+
+ def delete(self):
+ """ If self.dn exists, returns a callable that will delete it. """
+ def _delete():
+ self.connection.delete_s(self.dn)
+
+ if self._is_entry_present():
+ action = _delete
+ else:
+ action = None
+
+ return action
+
+ def _is_entry_present(self):
+ try:
+ self.connection.search_s(self.dn, ldap.SCOPE_BASE)
+ except ldap.NO_SUCH_OBJECT:
+ is_present = False
+ else:
+ is_present = True
+
+ return is_present
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=gen_specs(
+ attributes=dict(default={}, type='dict'),
+ objectClass=dict(type='list', elements='str'),
+ params=dict(type='dict'),
+ state=dict(default='present', choices=['present', 'absent']),
+ ),
+ required_if=[('state', 'present', ['objectClass'])],
+ supports_check_mode=True,
+ )
+
+ if not HAS_LDAP:
+ module.fail_json(msg=missing_required_lib('python-ldap'),
+ exception=LDAP_IMP_ERR)
+
+ if module.params['params']:
+ module.fail_json(msg="The `params` option to ldap_attr was removed since it circumvents Ansible's option handling")
+
+ state = module.params['state']
+
+ # Instantiate the LdapEntry object
+ ldap = LdapEntry(module)
+
+ # Get the action function
+ if state == 'present':
+ action = ldap.add()
+ elif state == 'absent':
+ action = ldap.delete()
+
+ # Perform the action
+ if action is not None and not module.check_mode:
+ try:
+ action()
+ except Exception as e:
+ module.fail_json(msg="Entry action failed.", details=to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=(action is not None))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ldap/ldap_passwd.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ldap/ldap_passwd.py
new file mode 100644
index 00000000..8d86ee93
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ldap/ldap_passwd.py
@@ -0,0 +1,143 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017-2018, Keller Fuchs <kellerfuchs@hashbang.sh>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ldap_passwd
+short_description: Set passwords in LDAP.
+description:
+ - Set a password for an LDAP entry. This module only asserts that
+ a given password is valid for a given entry. To assert the
+ existence of an entry, see M(community.general.ldap_entry).
+notes:
+ - The default authentication settings will attempt to use a SASL EXTERNAL
+ bind over a UNIX domain socket. This works well with the default Ubuntu
+ install for example, which includes a cn=peercred,cn=external,cn=auth ACL
+ rule allowing root to modify the server configuration. If you need to use
+ a simple bind to access your server, pass the credentials in I(bind_dn)
+ and I(bind_pw).
+author:
+ - Keller Fuchs (@KellerFuchs)
+requirements:
+ - python-ldap
+options:
+ passwd:
+ description:
+ - The (plaintext) password to be set for I(dn).
+ type: str
+extends_documentation_fragment:
+- community.general.ldap.documentation
+
+'''
+
+EXAMPLES = """
+- name: Set a password for the admin user
+ community.general.ldap_passwd:
+ dn: cn=admin,dc=example,dc=com
+ passwd: "{{ vault_secret }}"
+
+- name: Setting passwords in bulk
+ community.general.ldap_passwd:
+ dn: "{{ item.key }}"
+ passwd: "{{ item.value }}"
+ with_dict:
+ alice: alice123123
+ bob: "|30b!"
+ admin: "{{ vault_secret }}"
+"""
+
+RETURN = """
+modlist:
+ description: list of modified parameters
+ returned: success
+ type: list
+ sample: '[[2, "olcRootDN", ["cn=root,dc=example,dc=com"]]]'
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs
+
+LDAP_IMP_ERR = None
+try:
+ import ldap
+
+ HAS_LDAP = True
+except ImportError:
+ LDAP_IMP_ERR = traceback.format_exc()
+ HAS_LDAP = False
+
+
+class LdapPasswd(LdapGeneric):
+ def __init__(self, module):
+ LdapGeneric.__init__(self, module)
+
+ # Shortcuts
+ self.passwd = self.module.params['passwd']
+
+ def passwd_check(self):
+ try:
+ tmp_con = ldap.initialize(self.server_uri)
+ except ldap.LDAPError as e:
+ self.fail("Cannot initialize LDAP connection", e)
+
+ if self.start_tls:
+ try:
+ tmp_con.start_tls_s()
+ except ldap.LDAPError as e:
+ self.fail("Cannot start TLS.", e)
+
+ try:
+ tmp_con.simple_bind_s(self.dn, self.passwd)
+ except ldap.INVALID_CREDENTIALS:
+ return True
+ except ldap.LDAPError as e:
+ self.fail("Cannot bind to the server.", e)
+ else:
+ return False
+ finally:
+ tmp_con.unbind()
+
+ def passwd_set(self):
+ # Exit early if the password is already valid
+ if not self.passwd_check():
+ return False
+
+ # Change the password (or throw an exception)
+ try:
+ self.connection.passwd_s(self.dn, None, self.passwd)
+ except ldap.LDAPError as e:
+ self.fail("Unable to set password", e)
+
+ # Password successfully changed
+ return True
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=gen_specs(passwd=dict(no_log=True)),
+ supports_check_mode=True,
+ )
+
+ if not HAS_LDAP:
+ module.fail_json(msg=missing_required_lib('python-ldap'),
+ exception=LDAP_IMP_ERR)
+
+ ldap = LdapPasswd(module)
+
+ if module.check_mode:
+ module.exit_json(changed=ldap.passwd_check())
+
+ module.exit_json(changed=ldap.passwd_set())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ldap/ldap_search.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ldap/ldap_search.py
new file mode 100644
index 00000000..3b1a2833
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/ldap/ldap_search.py
@@ -0,0 +1,180 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Peter Sagerson <psagers@ignorare.net>
+# Copyright: (c) 2020, Sebastian Pfahl <eryx@gmx.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: ldap_search
+version_added: '0.2.0'
+short_description: Search for entries in a LDAP server
+description:
+ - Return the results of an LDAP search.
+notes:
+ - The default authentication settings will attempt to use a SASL EXTERNAL
+ bind over a UNIX domain socket. This works well with the default Ubuntu
+ install for example, which includes a C(cn=peercred,cn=external,cn=auth) ACL
+ rule allowing root to modify the server configuration. If you need to use
+ a simple bind to access your server, pass the credentials in I(bind_dn)
+ and I(bind_pw).
+author:
+ - Sebastian Pfahl (@eryx12o45)
+requirements:
+ - python-ldap
+options:
+ dn:
+ required: true
+ type: str
+ description:
+ - The LDAP DN to search in.
+ scope:
+ choices: [base, onelevel, subordinate, children]
+ default: base
+ type: str
+ description:
+ - The LDAP scope to use.
+ filter:
+ default: '(objectClass=*)'
+ type: str
+ description:
+ - Used for filtering the LDAP search result.
+ attrs:
+ type: list
+ elements: str
+ description:
+ - A list of attributes for limiting the result. Use an
+ actual list or a comma-separated string.
+ schema:
+ default: false
+ type: bool
+ description:
+ - Set to C(true) to return the full attribute schema of entries, not
+ their attribute values. Overrides I(attrs) when provided.
+extends_documentation_fragment:
+ - community.general.ldap.documentation
+"""
+
+EXAMPLES = r"""
+- name: Return all entries within the 'groups' organizational unit.
+ community.general.ldap_search:
+ dn: "ou=groups,dc=example,dc=com"
+ register: ldap_groups
+
+- name: Return GIDs for all groups
+ community.general.ldap_search:
+ dn: "ou=groups,dc=example,dc=com"
+ scope: "onelevel"
+ attrs:
+ - "gidNumber"
+ register: ldap_group_gids
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs
+
+LDAP_IMP_ERR = None
+try:
+ import ldap
+
+ HAS_LDAP = True
+except ImportError:
+ LDAP_IMP_ERR = traceback.format_exc()
+ HAS_LDAP = False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=gen_specs(
+ dn=dict(type='str', required=True),
+ scope=dict(type='str', default='base', choices=['base', 'onelevel', 'subordinate', 'children']),
+ filter=dict(type='str', default='(objectClass=*)'),
+ attrs=dict(type='list', elements='str'),
+ schema=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+
+ if not HAS_LDAP:
+ module.fail_json(msg=missing_required_lib('python-ldap'),
+ exception=LDAP_IMP_ERR)
+
+ if not module.check_mode:
+ try:
+ LdapSearch(module).main()
+ except Exception as exception:
+ module.fail_json(msg="Attribute action failed.", details=to_native(exception))
+
+ module.exit_json(changed=False)
+
+
+def _extract_entry(dn, attrs):
+ extracted = {'dn': dn}
+ for attr, val in list(attrs.items()):
+ if len(val) == 1:
+ extracted[attr] = val[0]
+ else:
+ extracted[attr] = val
+ return extracted
+
+
+class LdapSearch(LdapGeneric):
+ def __init__(self, module):
+ LdapGeneric.__init__(self, module)
+
+ self.dn = self.module.params['dn']
+ self.filterstr = self.module.params['filter']
+ self.attrlist = []
+ self._load_scope()
+ self._load_attrs()
+ self._load_schema()
+
+ def _load_schema(self):
+ self.schema = self.module.boolean(self.module.params['schema'])
+ if self.schema:
+ self.attrsonly = 1
+ else:
+ self.attrsonly = 0
+
+ def _load_scope(self):
+ spec = dict(
+ base=ldap.SCOPE_BASE,
+ onelevel=ldap.SCOPE_ONELEVEL,
+ subordinate=ldap.SCOPE_SUBORDINATE,
+ children=ldap.SCOPE_SUBTREE,
+ )
+ self.scope = spec[self.module.params['scope']]
+
+ def _load_attrs(self):
+ self.attrlist = self.module.params['attrs'] or None
+
+ def main(self):
+ results = self.perform_search()
+ self.module.exit_json(changed=False, results=results)
+
+ def perform_search(self):
+ try:
+ results = self.connection.search_s(
+ self.dn,
+ self.scope,
+ filterstr=self.filterstr,
+ attrlist=self.attrlist,
+ attrsonly=self.attrsonly
+ )
+ if self.schema:
+ return [dict(dn=result[0], attrs=list(result[1].keys())) for result in results]
+ else:
+ return [_extract_entry(result[0], result[1]) for result in results]
+ except ldap.NO_SUCH_OBJECT:
+ self.module.fail_json(msg="Base not found: {0}".format(self.dn))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/lldp.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/lldp.py
new file mode 100644
index 00000000..ae86db40
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/lldp.py
@@ -0,0 +1,79 @@
+#!/usr/bin/python
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: lldp
+requirements: [ lldpctl ]
+short_description: get details reported by lldp
+description:
+ - Reads data out of lldpctl
+options: {}
+author: "Andy Hill (@andyhky)"
+notes:
+ - Requires lldpd running and lldp enabled on switches
+'''
+
+EXAMPLES = '''
+# Retrieve switch/port information
+ - name: Gather information from lldp
+ community.general.lldp:
+
+ - name: Print each switch/port
+ ansible.builtin.debug:
+ msg: "{{ lldp[item]['chassis']['name'] }} / {{ lldp[item]['port']['ifname'] }}"
+ with_items: "{{ lldp.keys() }}"
+
+# TASK: [Print each switch/port] ***********************************************************
+# ok: [10.13.0.22] => (item=eth2) => {"item": "eth2", "msg": "switch1.example.com / Gi0/24"}
+# ok: [10.13.0.22] => (item=eth1) => {"item": "eth1", "msg": "switch2.example.com / Gi0/3"}
+# ok: [10.13.0.22] => (item=eth0) => {"item": "eth0", "msg": "switch3.example.com / Gi0/3"}
+
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def gather_lldp(module):
+ cmd = [module.get_bin_path('lldpctl'), '-f', 'keyvalue']
+ rc, output, err = module.run_command(cmd)
+ if output:
+ output_dict = {}
+ current_dict = {}
+ lldp_entries = output.split("\n")
+
+ for entry in lldp_entries:
+ if entry.startswith('lldp'):
+ path, value = entry.strip().split("=", 1)
+ path = path.split(".")
+ path_components, final = path[:-1], path[-1]
+ else:
+ value = current_dict[final] + '\n' + entry
+
+ current_dict = output_dict
+ for path_component in path_components:
+ current_dict[path_component] = current_dict.get(path_component, {})
+ current_dict = current_dict[path_component]
+ current_dict[final] = value
+ return output_dict
+
+
+def main():
+ module = AnsibleModule({})
+
+ lldp_output = gather_lldp(module)
+ try:
+ data = {'lldp': lldp_output['lldp']}
+ module.exit_json(ansible_facts=data)
+ except TypeError:
+ module.fail_json(msg="lldpctl command failed. is lldpd running?")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/netcup_dns.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/netcup_dns.py
new file mode 100644
index 00000000..5d63a5b3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/netcup_dns.py
@@ -0,0 +1,268 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018 Nicolai Buchwitz <nb@tipi-net.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: netcup_dns
+notes: []
+short_description: manage Netcup DNS records
+description:
+ - "Manages DNS records via the Netcup API, see the docs U(https://ccp.netcup.net/run/webservice/servers/endpoint.php)"
+options:
+ api_key:
+ description:
+ - API key for authentication, must be obtained via the netcup CCP (U(https://ccp.netcup.net))
+ required: True
+ type: str
+ api_password:
+ description:
+ - API password for authentication, must be obtained via the netcup CCP (https://ccp.netcup.net)
+ required: True
+ type: str
+ customer_id:
+ description:
+ - Netcup customer id
+ required: True
+ type: int
+ domain:
+ description:
+ - Domainname the records should be added / removed
+ required: True
+ type: str
+ record:
+ description:
+ - Record to add or delete, supports wildcard (*). Default is C(@) (e.g. the zone name)
+ default: "@"
+ aliases: [ name ]
+ type: str
+ type:
+ description:
+ - Record type
+ choices: ['A', 'AAAA', 'MX', 'CNAME', 'CAA', 'SRV', 'TXT', 'TLSA', 'NS', 'DS']
+ required: True
+ type: str
+ value:
+ description:
+ - Record value
+ required: true
+ type: str
+ solo:
+ type: bool
+ default: False
+ description:
+ - Whether the record should be the only one for that record type and record name. Only use with C(state=present)
+ - This will delete all other records with the same record name and type.
+ priority:
+ description:
+ - Record priority. Required for C(type=MX)
+ required: False
+ type: int
+ state:
+ description:
+ - Whether the record should exist or not
+ required: False
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+requirements:
+ - "nc-dnsapi >= 0.1.3"
+author: "Nicolai Buchwitz (@nbuchwitz)"
+
+'''
+
+EXAMPLES = '''
+- name: Create a record of type A
+ community.general.netcup_dns:
+ api_key: "..."
+ api_password: "..."
+ customer_id: "..."
+ domain: "example.com"
+ name: "mail"
+ type: "A"
+ value: "127.0.0.1"
+
+- name: Delete that record
+ community.general.netcup_dns:
+ api_key: "..."
+ api_password: "..."
+ customer_id: "..."
+ domain: "example.com"
+ name: "mail"
+ type: "A"
+ value: "127.0.0.1"
+ state: absent
+
+- name: Create a wildcard record
+ community.general.netcup_dns:
+ api_key: "..."
+ api_password: "..."
+ customer_id: "..."
+ domain: "example.com"
+ name: "*"
+ type: "A"
+ value: "127.0.1.1"
+
+- name: Set the MX record for example.com
+ community.general.netcup_dns:
+ api_key: "..."
+ api_password: "..."
+ customer_id: "..."
+ domain: "example.com"
+ type: "MX"
+ value: "mail.example.com"
+
+- name: Set a record and ensure that this is the only one
+ community.general.netcup_dns:
+ api_key: "..."
+ api_password: "..."
+ customer_id: "..."
+ name: "demo"
+ domain: "example.com"
+ type: "AAAA"
+ value: "::1"
+ solo: true
+'''
+
+RETURN = '''
+records:
+ description: list containing all records
+ returned: success
+ type: complex
+ contains:
+ name:
+ description: the record name
+ returned: success
+ type: str
+ sample: fancy-hostname
+ type:
+ description: the record type
+ returned: succcess
+ type: str
+ sample: A
+ value:
+ description: the record destination
+ returned: success
+ type: str
+ sample: 127.0.0.1
+ priority:
+ description: the record priority (only relevant if type=MX)
+ returned: success
+ type: int
+ sample: 0
+ id:
+ description: internal id of the record
+ returned: success
+ type: int
+ sample: 12345
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+NCDNSAPI_IMP_ERR = None
+try:
+ import nc_dnsapi
+ from nc_dnsapi import DNSRecord
+
+ HAS_NCDNSAPI = True
+except ImportError:
+ NCDNSAPI_IMP_ERR = traceback.format_exc()
+ HAS_NCDNSAPI = False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(required=True, no_log=True),
+ api_password=dict(required=True, no_log=True),
+ customer_id=dict(required=True, type='int'),
+
+ domain=dict(required=True),
+ record=dict(required=False, default='@', aliases=['name']),
+ type=dict(required=True, choices=['A', 'AAAA', 'MX', 'CNAME', 'CAA', 'SRV', 'TXT', 'TLSA', 'NS', 'DS']),
+ value=dict(required=True),
+ priority=dict(required=False, type='int'),
+ solo=dict(required=False, type='bool', default=False),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_NCDNSAPI:
+ module.fail_json(msg=missing_required_lib('nc-dnsapi'), exception=NCDNSAPI_IMP_ERR)
+
+ api_key = module.params.get('api_key')
+ api_password = module.params.get('api_password')
+ customer_id = module.params.get('customer_id')
+ domain = module.params.get('domain')
+ record_type = module.params.get('type')
+ record = module.params.get('record')
+ value = module.params.get('value')
+ priority = module.params.get('priority')
+ solo = module.params.get('solo')
+ state = module.params.get('state')
+
+ if record_type == 'MX' and not priority:
+ module.fail_json(msg="record type MX required the 'priority' argument")
+
+ has_changed = False
+ all_records = []
+ try:
+ with nc_dnsapi.Client(customer_id, api_key, api_password) as api:
+ all_records = api.dns_records(domain)
+ record = DNSRecord(record, record_type, value, priority=priority)
+
+ # try to get existing record
+ record_exists = False
+ for r in all_records:
+ if r == record:
+ record_exists = True
+ record = r
+
+ break
+
+ if state == 'present':
+ if solo:
+ obsolete_records = [r for r in all_records if
+ r.hostname == record.hostname
+ and r.type == record.type
+ and not r.destination == record.destination]
+
+ if obsolete_records:
+ if not module.check_mode:
+ all_records = api.delete_dns_records(domain, obsolete_records)
+
+ has_changed = True
+
+ if not record_exists:
+ if not module.check_mode:
+ all_records = api.add_dns_record(domain, record)
+
+ has_changed = True
+ elif state == 'absent' and record_exists:
+ if not module.check_mode:
+ all_records = api.delete_dns_record(domain, record)
+
+ has_changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=ex.message)
+
+ module.exit_json(changed=has_changed, result={"records": [record_data(r) for r in all_records]})
+
+
+def record_data(r):
+ return {"name": r.hostname, "type": r.type, "value": r.destination, "priority": r.priority, "id": r.id}
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_a_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_a_record.py
new file mode 100644
index 00000000..660c9bc3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_a_record.py
@@ -0,0 +1,170 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_a_record
+author: "Blair Rampling (@brampling)"
+short_description: Configure Infoblox NIOS A records
+description:
+ - Adds and/or removes instances of A record objects from
+ Infoblox NIOS servers. This module manages NIOS C(record:a) objects
+ using the Infoblox WAPI interface over REST.
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - Specifies the fully qualified hostname to add or remove from
+ the system
+ required: true
+ type: str
+ view:
+ description:
+ - Sets the DNS view to associate this A record with. The DNS
+ view must already be configured on the system
+ default: default
+ aliases:
+ - dns_view
+ type: str
+ ipv4addr:
+ description:
+ - Configures the IPv4 address for this A record. Users can dynamically
+ allocate ipv4 address to A record by passing dictionary containing,
+ I(nios_next_ip) and I(CIDR network range). See example
+ aliases:
+ - ipv4
+ type: str
+ ttl:
+ description:
+ - Configures the TTL to be associated with this A record
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure an A record
+ community.general.nios_a_record:
+ name: a.ansible.com
+ ipv4: 192.168.10.1
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Add a comment to an existing A record
+ community.general.nios_a_record:
+ name: a.ansible.com
+ ipv4: 192.168.10.1
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Remove an A record from the system
+ community.general.nios_a_record:
+ name: a.ansible.com
+ ipv4: 192.168.10.1
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Update an A record name
+ community.general.nios_a_record:
+ name: {new_name: a_new.ansible.com, old_name: a.ansible.com}
+ ipv4: 192.168.10.1
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Dynamically add a record to next available ip
+ community.general.nios_a_record:
+ name: a.ansible.com
+ ipv4: {nios_next_ip: 192.168.10.0/24}
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_A_RECORD
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+
+ ib_spec = dict(
+ name=dict(required=True, ib_req=True),
+ view=dict(default='default', aliases=['dns_view'], ib_req=True),
+
+ ipv4addr=dict(aliases=['ipv4'], ib_req=True),
+
+ ttl=dict(type='int'),
+
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_A_RECORD, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_aaaa_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_aaaa_record.py
new file mode 100644
index 00000000..b7caecee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_aaaa_record.py
@@ -0,0 +1,154 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_aaaa_record
+author: "Blair Rampling (@brampling)"
+short_description: Configure Infoblox NIOS AAAA records
+description:
+ - Adds and/or removes instances of AAAA record objects from
+ Infoblox NIOS servers. This module manages NIOS C(record:aaaa) objects
+ using the Infoblox WAPI interface over REST.
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - Specifies the fully qualified hostname to add or remove from
+ the system
+ required: true
+ view:
+ description:
+ - Sets the DNS view to associate this AAAA record with. The DNS
+ view must already be configured on the system
+ default: default
+ aliases:
+ - dns_view
+ ipv6addr:
+ description:
+ - Configures the IPv6 address for this AAAA record.
+ aliases:
+ - ipv6
+ ttl:
+ description:
+ - Configures the TTL to be associated with this AAAA record
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure an AAAA record
+ community.general.nios_aaaa_record:
+ name: aaaa.ansible.com
+ ipv6: 2001:0db8:85a3:0000:0000:8a2e:0370:7334
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Add a comment to an existing AAAA record
+ community.general.nios_aaaa_record:
+ name: aaaa.ansible.com
+ ipv6: 2001:0db8:85a3:0000:0000:8a2e:0370:7334
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Remove an AAAA record from the system
+ community.general.nios_aaaa_record:
+ name: aaaa.ansible.com
+ ipv6: 2001:0db8:85a3:0000:0000:8a2e:0370:7334
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Update an AAAA record name
+ community.general.nios_aaaa_record:
+ name: {new_name: aaaa_new.ansible.com, old_name: aaaa.ansible.com}
+ ipv6: 2001:0db8:85a3:0000:0000:8a2e:0370:7334
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_AAAA_RECORD
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+
+ ib_spec = dict(
+ name=dict(required=True, ib_req=True),
+ view=dict(default='default', aliases=['dns_view'], ib_req=True),
+
+ ipv6addr=dict(aliases=['ipv6'], ib_req=True),
+
+ ttl=dict(type='int'),
+
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_AAAA_RECORD, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_cname_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_cname_record.py
new file mode 100644
index 00000000..2863d148
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_cname_record.py
@@ -0,0 +1,143 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_cname_record
+author: "Blair Rampling (@brampling)"
+short_description: Configure Infoblox NIOS CNAME records
+description:
+ - Adds and/or removes instances of CNAME record objects from
+ Infoblox NIOS servers. This module manages NIOS C(record:cname) objects
+ using the Infoblox WAPI interface over REST.
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - Specifies the fully qualified hostname to add or remove from
+ the system
+ required: true
+ view:
+ description:
+ - Sets the DNS view to associate this CNAME record with. The DNS
+ view must already be configured on the system
+ default: default
+ aliases:
+ - dns_view
+ canonical:
+ description:
+ - Configures the canonical name for this CNAME record.
+ aliases:
+ - cname
+ ttl:
+ description:
+ - Configures the TTL to be associated with this CNAME record
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure a CNAME record
+ community.general.nios_cname_record:
+ name: cname.ansible.com
+ canonical: realhost.ansible.com
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Add a comment to an existing CNAME record
+ community.general.nios_cname_record:
+ name: cname.ansible.com
+ canonical: realhost.ansible.com
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Remove a CNAME record from the system
+ community.general.nios_cname_record:
+ name: cname.ansible.com
+ canonical: realhost.ansible.com
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_CNAME_RECORD
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+
+ ib_spec = dict(
+ name=dict(required=True, ib_req=True),
+ view=dict(default='default', aliases=['dns_view'], ib_req=True),
+
+ canonical=dict(aliases=['cname'], ib_req=True),
+
+ ttl=dict(type='int'),
+
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_CNAME_RECORD, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_dns_view.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_dns_view.py
new file mode 100644
index 00000000..1bb8d068
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_dns_view.py
@@ -0,0 +1,139 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_dns_view
+author: "Peter Sprygada (@privateip)"
+short_description: Configure Infoblox NIOS DNS views
+description:
+ - Adds and/or removes instances of DNS view objects from
+ Infoblox NIOS servers. This module manages NIOS C(view) objects
+ using the Infoblox WAPI interface over REST.
+ - Updates instances of DNS view object from Infoblox NIOS servers.
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - Specifies the fully qualified hostname to add or remove from
+ the system. User can also update the hostname as it is possible
+ to pass a dict containing I(new_name), I(old_name). See examples.
+ required: true
+ aliases:
+ - view
+ network_view:
+ description:
+ - Specifies the name of the network view to assign the configured
+ DNS view to. The network view must already be configured on the
+ target system.
+ default: default
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ required: false
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ required: false
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ required: false
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure a new dns view instance
+ community.general.nios_dns_view:
+ name: ansible-dns
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Update the comment for dns view
+ community.general.nios_dns_view:
+ name: ansible-dns
+ comment: this is an example comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Remove the dns view instance
+ community.general.nios_dns_view:
+ name: ansible-dns
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Update the dns view instance
+ community.general.nios_dns_view:
+ name: {new_name: ansible-dns-new, old_name: ansible-dns}
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_DNS_VIEW
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+ ib_spec = dict(
+ name=dict(required=True, aliases=['view'], ib_req=True),
+ network_view=dict(default='default', ib_req=True),
+
+ extattrs=dict(type='dict'),
+ comment=dict()
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_DNS_VIEW, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_fixed_address.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_fixed_address.py
new file mode 100644
index 00000000..a46db04f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_fixed_address.py
@@ -0,0 +1,283 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_fixed_address
+author: "Sumit Jaiswal (@sjaiswal)"
+short_description: Configure Infoblox NIOS DHCP Fixed Address
+description:
+ - A fixed address is a specific IP address that a DHCP server
+ always assigns when a lease request comes from a particular
+ MAC address of the client.
+ - Supports both IPV4 and IPV6 internet protocols
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - Specifies the hostname with which fixed DHCP ip-address is stored
+ for respective mac.
+ required: true
+ ipaddr:
+ description:
+ - IPV4/V6 address of the fixed address.
+ required: true
+ mac:
+ description:
+ - The MAC address of the interface.
+ required: true
+ network:
+ description:
+ - Specifies the network range in which ipaddr exists.
+ required: true
+ network_view:
+ description:
+ - Configures the name of the network view to associate with this
+ configured instance.
+ required: false
+ default: default
+ options:
+ description:
+ - Configures the set of DHCP options to be included as part of
+ the configured network instance. This argument accepts a list
+ of values (see suboptions). When configuring suboptions at
+ least one of C(name) or C(num) must be specified.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - The name of the DHCP option to configure
+ num:
+ description:
+ - The number of the DHCP option to configure
+ value:
+ description:
+ - The value of the DHCP option specified by C(name)
+ required: true
+ use_option:
+ description:
+ - Only applies to a subset of options (see NIOS API documentation)
+ type: bool
+ default: 'yes'
+ vendor_class:
+ description:
+ - The name of the space this DHCP option is associated to
+ default: DHCP
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure ipv4 dhcp fixed address
+ community.general.nios_fixed_address:
+ name: ipv4_fixed
+ ipaddr: 192.168.10.1
+ mac: 08:6d:41:e8:fd:e8
+ network: 192.168.10.0/24
+ network_view: default
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Configure a ipv6 dhcp fixed address
+ community.general.nios_fixed_address:
+ name: ipv6_fixed
+ ipaddr: fe80::1/10
+ mac: 08:6d:41:e8:fd:e8
+ network: fe80::/64
+ network_view: default
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Set dhcp options for a ipv4 fixed address
+ community.general.nios_fixed_address:
+ name: ipv4_fixed
+ ipaddr: 192.168.10.1
+ mac: 08:6d:41:e8:fd:e8
+ network: 192.168.10.0/24
+ network_view: default
+ comment: this is a test comment
+ options:
+ - name: domain-name
+ value: ansible.com
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Remove a ipv4 dhcp fixed address
+ community.general.nios_fixed_address:
+ name: ipv4_fixed
+ ipaddr: 192.168.10.1
+ mac: 08:6d:41:e8:fd:e8
+ network: 192.168.10.0/24
+ network_view: default
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+import socket
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_IPV4_FIXED_ADDRESS, NIOS_IPV6_FIXED_ADDRESS
+
+
+def validate_ip_address(address):
+ try:
+ socket.inet_aton(address)
+ except socket.error:
+ return False
+ return address.count(".") == 3
+
+
+def validate_ip_v6_address(address):
+ try:
+ socket.inet_pton(socket.AF_INET6, address)
+ except socket.error:
+ return False
+ return True
+
+
+def options(module):
+ ''' Transforms the module argument into a valid WAPI struct
+ This function will transform the options argument into a structure that
+ is a valid WAPI structure in the format of:
+ {
+ name: <value>,
+ num: <value>,
+ value: <value>,
+ use_option: <value>,
+ vendor_class: <value>
+ }
+ It will remove any options that are set to None since WAPI will error on
+ that condition. The use_option field only applies
+ to special options that are displayed separately from other options and
+ have a use flag. This function removes the use_option flag from all
+ other options. It will also verify that either `name` or `num` is
+ set in the structure but does not validate the values are equal.
+ The remainder of the value validation is performed by WAPI
+ '''
+ special_options = ['routers', 'router-templates', 'domain-name-servers',
+ 'domain-name', 'broadcast-address', 'broadcast-address-offset',
+ 'dhcp-lease-time', 'dhcp6.name-servers']
+ options = list()
+ for item in module.params['options']:
+ opt = dict([(k, v) for k, v in iteritems(item) if v is not None])
+ if 'name' not in opt and 'num' not in opt:
+ module.fail_json(msg='one of `name` or `num` is required for option value')
+ if opt['name'] not in special_options:
+ del opt['use_option']
+ options.append(opt)
+ return options
+
+
+def validate_ip_addr_type(ip, arg_spec, module):
+ '''This function will check if the argument ip is type v4/v6 and return appropriate infoblox network type
+ '''
+ check_ip = ip.split('/')
+
+ if validate_ip_address(check_ip[0]) and 'ipaddr' in arg_spec:
+ arg_spec['ipv4addr'] = arg_spec.pop('ipaddr')
+ module.params['ipv4addr'] = module.params.pop('ipaddr')
+ return NIOS_IPV4_FIXED_ADDRESS, arg_spec, module
+ elif validate_ip_v6_address(check_ip[0]) and 'ipaddr' in arg_spec:
+ arg_spec['ipv6addr'] = arg_spec.pop('ipaddr')
+ module.params['ipv6addr'] = module.params.pop('ipaddr')
+ return NIOS_IPV6_FIXED_ADDRESS, arg_spec, module
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+ option_spec = dict(
+ # one of name or num is required; enforced by the function options()
+ name=dict(),
+ num=dict(type='int'),
+
+ value=dict(required=True),
+
+ use_option=dict(type='bool', default=True),
+ vendor_class=dict(default='DHCP')
+ )
+
+ ib_spec = dict(
+ name=dict(required=True),
+ ipaddr=dict(required=True, ib_req=True),
+ mac=dict(required=True, ib_req=True),
+ network=dict(required=True),
+ network_view=dict(default='default'),
+
+ options=dict(type='list', elements='dict', options=option_spec, transform=options),
+
+ extattrs=dict(type='dict'),
+ comment=dict()
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ # to get the argument ipaddr
+ obj_filter = dict([(k, module.params[k]) for k, v in iteritems(ib_spec) if v.get('ib_req')])
+ # to modify argument based on ipaddr type i.e. IPV4/IPV6
+ fixed_address_ip_type, ib_spec, module = validate_ip_addr_type(obj_filter['ipaddr'], ib_spec, module)
+
+ wapi = WapiModule(module)
+
+ result = wapi.run(fixed_address_ip_type, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_host_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_host_record.py
new file mode 100644
index 00000000..efab39de
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_host_record.py
@@ -0,0 +1,336 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_host_record
+author: "Peter Sprygada (@privateip)"
+short_description: Configure Infoblox NIOS host records
+description:
+ - Adds and/or removes instances of host record objects from
+ Infoblox NIOS servers. This module manages NIOS C(record:host) objects
+ using the Infoblox WAPI interface over REST.
+ - Updates instances of host record object from Infoblox NIOS servers.
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - Specifies the fully qualified hostname to add or remove from
+ the system. User can also update the hostname as it is possible
+ to pass a dict containing I(new_name), I(old_name). See examples.
+ required: true
+ view:
+ description:
+ - Sets the DNS view to associate this host record with. The DNS
+ view must already be configured on the system
+ default: default
+ aliases:
+ - dns_view
+ configure_for_dns:
+ description:
+ - Sets the DNS to particular parent. If user needs to bypass DNS
+ user can make the value to false.
+ type: bool
+ required: false
+ default: true
+ aliases:
+ - dns
+ ipv4addrs:
+ description:
+ - Configures the IPv4 addresses for this host record. This argument
+ accepts a list of values (see suboptions)
+ aliases:
+ - ipv4
+ type: list
+ elements: dict
+ suboptions:
+ ipv4addr:
+ description:
+ - Configures the IPv4 address for the host record. Users can dynamically
+ allocate ipv4 address to host record by passing dictionary containing,
+ I(nios_next_ip) and I(CIDR network range). If user wants to add or
+ remove the ipv4 address from existing record, I(add/remove)
+ params need to be used. See examples
+ required: true
+ aliases:
+ - address
+ configure_for_dhcp:
+ description:
+ - Configure the host_record over DHCP instead of DNS, if user
+ changes it to true, user need to mention MAC address to configure
+ required: false
+ aliases:
+ - dhcp
+ mac:
+ description:
+ - Configures the hardware MAC address for the host record. If user makes
+ DHCP to true, user need to mention MAC address.
+ required: false
+ add:
+ description:
+ - If user wants to add the ipv4 address to an existing host record.
+ Note that with I(add) user will have to keep the I(state) as I(present),
+ as new IP address is allocated to existing host record. See examples.
+ type: bool
+ required: false
+ version_added: '0.2.0'
+ remove:
+ description:
+ - If user wants to remove the ipv4 address from an existing host record.
+ Note that with I(remove) user will have to change the I(state) to I(absent),
+ as IP address is de-allocated from an existing host record. See examples.
+ type: bool
+ required: false
+ version_added: '0.2.0'
+ ipv6addrs:
+ description:
+ - Configures the IPv6 addresses for the host record. This argument
+ accepts a list of values (see options)
+ aliases:
+ - ipv6
+ type: list
+ elements: dict
+ suboptions:
+ ipv6addr:
+ description:
+ - Configures the IPv6 address for the host record
+ required: true
+ aliases:
+ - address
+ configure_for_dhcp:
+ description:
+ - Configure the host_record over DHCP instead of DNS, if user
+ changes it to true, user need to mention MAC address to configure
+ required: false
+ aliases:
+ description:
+ - Configures an optional list of additional aliases to add to the host
+ record. These are equivalent to CNAMEs but held within a host
+ record. Must be in list format.
+ ttl:
+ description:
+ - Configures the TTL to be associated with this host record
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure an ipv4 host record
+ community.general.nios_host_record:
+ name: host.ansible.com
+ ipv4:
+ - address: 192.168.10.1
+ aliases:
+ - cname.ansible.com
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Add a comment to an existing host record
+ community.general.nios_host_record:
+ name: host.ansible.com
+ ipv4:
+ - address: 192.168.10.1
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Remove a host record from the system
+ community.general.nios_host_record:
+ name: host.ansible.com
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Update an ipv4 host record
+ community.general.nios_host_record:
+ name: {new_name: host-new.ansible.com, old_name: host.ansible.com}
+ ipv4:
+ - address: 192.168.10.1
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Create an ipv4 host record bypassing DNS
+ community.general.nios_host_record:
+ name: new_host
+ ipv4:
+ - address: 192.168.10.1
+ dns: false
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Create an ipv4 host record over DHCP
+ community.general.nios_host_record:
+ name: host.ansible.com
+ ipv4:
+ - address: 192.168.10.1
+ dhcp: true
+ mac: 00-80-C8-E3-4C-BD
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Dynamically add host record to next available ip
+ community.general.nios_host_record:
+ name: host.ansible.com
+ ipv4:
+ - address: {nios_next_ip: 192.168.10.0/24}
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Add ip to host record
+ community.general.nios_host_record:
+ name: host.ansible.com
+ ipv4:
+ - address: 192.168.10.2
+ add: true
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Remove ip to host record
+ community.general.nios_host_record:
+ name: host.ansible.com
+ ipv4:
+ - address: 192.168.10.1
+ remove: true
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_HOST_RECORD
+
+
+def ipaddr(module, key, filtered_keys=None):
+ ''' Transforms the input value into a struct supported by WAPI
+ This function will transform the input from the playbook into a struct
+ that is valid for WAPI in the form of:
+ {
+ ipv4addr: <value>,
+ mac: <value>
+ }
+ This function does not validate the values are properly formatted or in
+ the acceptable range, that is left to WAPI.
+ '''
+ filtered_keys = filtered_keys or list()
+ objects = list()
+ for item in module.params[key]:
+ objects.append(dict([(k, v) for k, v in iteritems(item) if v is not None and k not in filtered_keys]))
+ return objects
+
+
+def ipv4addrs(module):
+ return ipaddr(module, 'ipv4addrs', filtered_keys=['address', 'dhcp'])
+
+
+def ipv6addrs(module):
+ return ipaddr(module, 'ipv6addrs', filtered_keys=['address', 'dhcp'])
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+ ipv4addr_spec = dict(
+ ipv4addr=dict(required=True, aliases=['address'], ib_req=True),
+ configure_for_dhcp=dict(type='bool', required=False, aliases=['dhcp'], ib_req=True),
+ mac=dict(required=False, ib_req=True),
+ add=dict(type='bool', required=False),
+ remove=dict(type='bool', required=False)
+ )
+
+ ipv6addr_spec = dict(
+ ipv6addr=dict(required=True, aliases=['address'], ib_req=True),
+ configure_for_dhcp=dict(type='bool', required=False, ib_req=True),
+ mac=dict(required=False, ib_req=True)
+ )
+
+ ib_spec = dict(
+ name=dict(required=True, ib_req=True),
+ view=dict(default='default', aliases=['dns_view'], ib_req=True),
+
+ ipv4addrs=dict(type='list', aliases=['ipv4'], elements='dict', options=ipv4addr_spec, transform=ipv4addrs),
+ ipv6addrs=dict(type='list', aliases=['ipv6'], elements='dict', options=ipv6addr_spec, transform=ipv6addrs),
+ configure_for_dns=dict(type='bool', default=True, required=False, aliases=['dns'], ib_req=True),
+ aliases=dict(type='list'),
+
+ ttl=dict(type='int'),
+
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_HOST_RECORD, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_member.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_member.py
new file mode 100644
index 00000000..aff8ca93
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_member.py
@@ -0,0 +1,519 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_member
+author: "Krishna Vasudevan (@krisvasudevan)"
+short_description: Configure Infoblox NIOS members
+description:
+ - Adds and/or removes Infoblox NIOS servers. This module manages NIOS C(member) objects using the Infoblox WAPI interface over REST.
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ host_name:
+ description:
+ - Specifies the host name of the member to either add or remove from
+ the NIOS instance.
+ required: true
+ aliases:
+ - name
+ vip_setting:
+ description:
+ - Configures the network settings for the grid member.
+ type: list
+ elements: dict
+ suboptions:
+ address:
+ description:
+ - The IPv4 Address of the Grid Member
+ subnet_mask:
+ description:
+ - The subnet mask for the Grid Member
+ gateway:
+ description:
+ - The default gateway for the Grid Member
+ ipv6_setting:
+ description:
+ - Configures the IPv6 settings for the grid member.
+ type: list
+ elements: dict
+ suboptions:
+ virtual_ip:
+ description:
+ - The IPv6 Address of the Grid Member
+ cidr_prefix:
+ description:
+ - The IPv6 CIDR prefix for the Grid Member
+ gateway:
+ description:
+ - The gateway address for the Grid Member
+ config_addr_type:
+ description:
+ - Address configuration type (IPV4/IPV6/BOTH)
+ default: IPV4
+ comment:
+ description:
+ - A descriptive comment of the Grid member.
+ extattrs:
+ description:
+ - Extensible attributes associated with the object.
+ enable_ha:
+ description:
+ - If set to True, the member has two physical nodes (HA pair).
+ type: bool
+ router_id:
+ description:
+ - Virtual router identifier. Provide this ID if "ha_enabled" is set to "true". This is a unique VRID number (from 1 to 255) for the local subnet.
+ lan2_enabled:
+ description:
+ - When set to "true", the LAN2 port is enabled as an independent port or as a port for failover purposes.
+ type: bool
+ lan2_port_setting:
+ description:
+ - Settings for the Grid member LAN2 port if 'lan2_enabled' is set to "true".
+ type: list
+ elements: dict
+ suboptions:
+ enabled:
+ description:
+ - If set to True, then it has its own IP settings.
+ type: bool
+ network_setting:
+ description:
+ - If the 'enable' field is set to True, this defines IPv4 network settings for LAN2.
+ type: list
+ elements: dict
+ suboptions:
+ address:
+ description:
+ - The IPv4 Address of LAN2
+ subnet_mask:
+ description:
+ - The subnet mask of LAN2
+ gateway:
+ description:
+ - The default gateway of LAN2
+ v6_network_setting:
+ description:
+ - If the 'enable' field is set to True, this defines IPv6 network settings for LAN2.
+ type: list
+ elements: dict
+ suboptions:
+ virtual_ip:
+ description:
+ - The IPv6 Address of LAN2
+ cidr_prefix:
+ description:
+ - The IPv6 CIDR prefix of LAN2
+ gateway:
+ description:
+ - The gateway address of LAN2
+ platform:
+ description:
+ - Configures the Hardware Platform.
+ default: INFOBLOX
+ node_info:
+ description:
+ - Configures the node information list with detailed status report on the operations of the Grid Member.
+ type: list
+ elements: dict
+ suboptions:
+ lan2_physical_setting:
+ description:
+ - Physical port settings for the LAN2 interface.
+ type: list
+ elements: dict
+ suboptions:
+ auto_port_setting_enabled:
+ description:
+ - Enable or disalbe the auto port setting.
+ type: bool
+ duplex:
+ description:
+ - The port duplex; if speed is 1000, duplex must be FULL.
+ speed:
+ description:
+ - The port speed; if speed is 1000, duplex is FULL.
+ lan_ha_port_setting:
+ description:
+ - LAN/HA port settings for the node.
+ type: list
+ elements: dict
+ suboptions:
+ ha_ip_address:
+ description:
+ - HA IP address.
+ ha_port_setting:
+ description:
+ - Physical port settings for the HA interface.
+ type: list
+ elements: dict
+ suboptions:
+ auto_port_setting_enabled:
+ description:
+ - Enable or disalbe the auto port setting.
+ type: bool
+ duplex:
+ description:
+ - The port duplex; if speed is 1000, duplex must be FULL.
+ speed:
+ description:
+ - The port speed; if speed is 1000, duplex is FULL.
+ lan_port_setting:
+ description:
+ - Physical port settings for the LAN interface.
+ type: list
+ elements: dict
+ suboptions:
+ auto_port_setting_enabled:
+ description:
+ - Enable or disalbe the auto port setting.
+ type: bool
+ duplex:
+ description:
+ - The port duplex; if speed is 1000, duplex must be FULL.
+ speed:
+ description:
+ - The port speed; if speed is 1000, duplex is FULL.
+ mgmt_ipv6addr:
+ description:
+ - Public IPv6 address for the LAN1 interface.
+ mgmt_lan:
+ description:
+ - Public IPv4 address for the LAN1 interface.
+ mgmt_network_setting:
+ description:
+ - Network settings for the MGMT port of the node.
+ type: list
+ elements: dict
+ suboptions:
+ address:
+ description:
+ - The IPv4 Address of MGMT
+ subnet_mask:
+ description:
+ - The subnet mask of MGMT
+ gateway:
+ description:
+ - The default gateway of MGMT
+ v6_mgmt_network_setting:
+ description:
+ - The network settings for the IPv6 MGMT port of the node.
+ type: list
+ elements: dict
+ suboptions:
+ virtual_ip:
+ description:
+ - The IPv6 Address of MGMT
+ cidr_prefix:
+ description:
+ - The IPv6 CIDR prefix of MGMT
+ gateway:
+ description:
+ - The gateway address of MGMT
+ mgmt_port_setting:
+ description:
+ - Settings for the member MGMT port.
+ type: list
+ elements: dict
+ suboptions:
+ enabled:
+ description:
+ - Determines if MGMT port settings should be enabled.
+ type: bool
+ security_access_enabled:
+ description:
+ - Determines if security access on the MGMT port is enabled or not.
+ type: bool
+ vpn_enabled:
+ description:
+ - Determines if VPN on the MGMT port is enabled or not.
+ type: bool
+ upgrade_group:
+ description:
+ - The name of the upgrade group to which this Grid member belongs.
+ default: Default
+ use_syslog_proxy_setting:
+ description:
+ - Use flag for external_syslog_server_enable , syslog_servers, syslog_proxy_setting, syslog_size
+ type: bool
+ external_syslog_server_enable:
+ description:
+ - Determines if external syslog servers should be enabled
+ type: bool
+ syslog_servers:
+ description:
+ - The list of external syslog servers.
+ type: list
+ elements: dict
+ suboptions:
+ address:
+ description:
+ - The server address.
+ category_list:
+ description:
+ - The list of all syslog logging categories.
+ connection_type:
+ description:
+ - The connection type for communicating with this server.(STCP/TCP?UDP)
+ default: UDP
+ local_interface:
+ description:
+ - The local interface through which the appliance sends syslog messages to the syslog server.(ANY/LAN/MGMT)
+ default: ANY
+ message_node_id:
+ description:
+ - Identify the node in the syslog message. (HOSTNAME/IP_HOSTNAME/LAN/MGMT)
+ default: LAN
+ message_source:
+ description:
+ - The source of syslog messages to be sent to the external syslog server.
+ default: ANY
+ only_category_list:
+ description:
+ - The list of selected syslog logging categories. The appliance forwards syslog messages that belong to the selected categories.
+ type: bool
+ port:
+ description:
+ - The port this server listens on.
+ default: 514
+ severity:
+ description:
+ - The severity filter. The appliance sends log messages of the specified severity and above to the external syslog server.
+ default: DEBUG
+ pre_provisioning:
+ description:
+ - Pre-provisioning information.
+ type: list
+ elements: dict
+ suboptions:
+ hardware_info:
+ description:
+ - An array of structures that describe the hardware being pre-provisioned.
+ type: list
+ elements: dict
+ suboptions:
+ hwmodel:
+ description:
+ - Hardware model
+ hwtype:
+ description:
+ - Hardware type.
+ licenses:
+ description:
+ - An array of license types.
+ create_token:
+ description:
+ - Flag for initiating a create token request for pre-provisioned members.
+ type: bool
+ default: False
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Add a member to the grid with IPv4 address
+ community.general.nios_member:
+ host_name: member01.localdomain
+ vip_setting:
+ - address: 192.168.1.100
+ subnet_mask: 255.255.255.0
+ gateway: 192.168.1.1
+ config_addr_type: IPV4
+ platform: VNIOS
+ comment: "Created by Ansible"
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Add a HA member to the grid
+ community.general.nios_member:
+ host_name: memberha.localdomain
+ vip_setting:
+ - address: 192.168.1.100
+ subnet_mask: 255.255.255.0
+ gateway: 192.168.1.1
+ config_addr_type: IPV4
+ platform: VNIOS
+ enable_ha: true
+ router_id: 150
+ node_info:
+ - lan_ha_port_setting:
+ - ha_ip_address: 192.168.1.70
+ mgmt_lan: 192.168.1.80
+ - lan_ha_port_setting:
+ - ha_ip_address: 192.168.1.71
+ mgmt_lan: 192.168.1.81
+ comment: "Created by Ansible"
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Update the member with pre-provisioning details specified
+ community.general.nios_member:
+ name: member01.localdomain
+ pre_provisioning:
+ - hardware_info:
+ - hwmodel: IB-VM-820
+ hwtype: IB-VNIOS
+ licenses:
+ - dns
+ - dhcp
+ - enterprise
+ - vnios
+ comment: "Updated by Ansible"
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Remove the member
+ community.general.nios_member:
+ name: member01.localdomain
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_MEMBER
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+ ipv4_spec = dict(
+ address=dict(),
+ subnet_mask=dict(),
+ gateway=dict(),
+ )
+
+ ipv6_spec = dict(
+ virtual_ip=dict(),
+ cidr_prefix=dict(type='int'),
+ gateway=dict(),
+ )
+
+ port_spec = dict(
+ auto_port_setting_enabled=dict(type='bool'),
+ duplex=dict(),
+ speed=dict(),
+ )
+
+ lan2_port_spec = dict(
+ enabled=dict(type='bool'),
+ network_setting=dict(type='list', elements='dict', options=ipv4_spec),
+ v6_network_setting=dict(type='list', elements='dict', options=ipv6_spec),
+ )
+
+ ha_port_spec = dict(
+ ha_ip_address=dict(),
+ ha_port_setting=dict(type='list', elements='dict', options=port_spec),
+ lan_port_setting=dict(type='list', elements='dict', options=port_spec),
+ mgmt_lan=dict(),
+ mgmt_ipv6addr=dict(),
+ )
+
+ node_spec = dict(
+ lan2_physical_setting=dict(type='list', elements='dict', options=port_spec),
+ lan_ha_port_setting=dict(type='list', elements='dict', options=ha_port_spec),
+ mgmt_network_setting=dict(type='list', elements='dict', options=ipv4_spec),
+ v6_mgmt_network_setting=dict(type='list', elements='dict', options=ipv6_spec),
+ )
+
+ mgmt_port_spec = dict(
+ enabled=dict(type='bool'),
+ security_access_enabled=dict(type='bool'),
+ vpn_enabled=dict(type='bool'),
+ )
+
+ syslog_spec = dict(
+ address=dict(),
+ category_list=dict(type='list'),
+ connection_type=dict(default='UDP'),
+ local_interface=dict(default='ANY'),
+ message_node_id=dict(default='LAN'),
+ message_source=dict(default='ANY'),
+ only_category_list=dict(type='bool'),
+ port=dict(type='int', default=514),
+ severity=dict(default='DEBUG'),
+ )
+
+ hw_spec = dict(
+ hwmodel=dict(),
+ hwtype=dict(),
+ )
+
+ pre_prov_spec = dict(
+ hardware_info=dict(type='list', elements='dict', options=hw_spec),
+ licenses=dict(type='list'),
+ )
+
+ ib_spec = dict(
+ host_name=dict(required=True, aliases=['name'], ib_req=True),
+ vip_setting=dict(type='list', elements='dict', options=ipv4_spec),
+ ipv6_setting=dict(type='list', elements='dict', options=ipv6_spec),
+ config_addr_type=dict(default='IPV4'),
+ comment=dict(),
+ enable_ha=dict(type='bool', default=False),
+ router_id=dict(type='int'),
+ lan2_enabled=dict(type='bool', default=False),
+ lan2_port_setting=dict(type='list', elements='dict', options=lan2_port_spec),
+ platform=dict(default='INFOBLOX'),
+ node_info=dict(type='list', elements='dict', options=node_spec),
+ mgmt_port_setting=dict(type='list', elements='dict', options=mgmt_port_spec),
+ upgrade_group=dict(default='Default'),
+ use_syslog_proxy_setting=dict(type='bool'),
+ external_syslog_server_enable=dict(type='bool'),
+ syslog_servers=dict(type='list', elements='dict', options=syslog_spec),
+ pre_provisioning=dict(type='list', elements='dict', options=pre_prov_spec),
+ extattrs=dict(type='dict'),
+ create_token=dict(type='bool', default=False),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_MEMBER, ib_spec)
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_mx_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_mx_record.py
new file mode 100644
index 00000000..ca1f1f81
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_mx_record.py
@@ -0,0 +1,150 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_mx_record
+author: "Blair Rampling (@brampling)"
+short_description: Configure Infoblox NIOS MX records
+description:
+ - Adds and/or removes instances of MX record objects from
+ Infoblox NIOS servers. This module manages NIOS C(record:mx) objects
+ using the Infoblox WAPI interface over REST.
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - Specifies the fully qualified hostname to add or remove from
+ the system
+ required: true
+ view:
+ description:
+ - Sets the DNS view to associate this a record with. The DNS
+ view must already be configured on the system
+ default: default
+ aliases:
+ - dns_view
+ mail_exchanger:
+ description:
+ - Configures the mail exchanger FQDN for this MX record.
+ aliases:
+ - mx
+ preference:
+ description:
+ - Configures the preference (0-65535) for this MX record.
+ ttl:
+ description:
+ - Configures the TTL to be associated with this host record
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure an MX record
+ community.general.nios_mx_record:
+ name: ansible.com
+ mx: mailhost.ansible.com
+ preference: 0
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Add a comment to an existing MX record
+ community.general.nios_mx_record:
+ name: ansible.com
+ mx: mailhost.ansible.com
+ preference: 0
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Remove an MX record from the system
+ community.general.nios_mx_record:
+ name: ansible.com
+ mx: mailhost.ansible.com
+ preference: 0
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_MX_RECORD
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+
+ ib_spec = dict(
+ name=dict(required=True, ib_req=True),
+ view=dict(default='default', aliases=['dns_view'], ib_req=True),
+
+ mail_exchanger=dict(aliases=['mx'], ib_req=True),
+ preference=dict(type='int', ib_req=True),
+
+ ttl=dict(type='int'),
+
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_MX_RECORD, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_naptr_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_naptr_record.py
new file mode 100644
index 00000000..de57e692
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_naptr_record.py
@@ -0,0 +1,183 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_naptr_record
+author: "Blair Rampling (@brampling)"
+short_description: Configure Infoblox NIOS NAPTR records
+description:
+ - Adds and/or removes instances of NAPTR record objects from
+ Infoblox NIOS servers. This module manages NIOS C(record:naptr) objects
+ using the Infoblox WAPI interface over REST.
+requirements:
+ - infoblox_client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - Specifies the fully qualified hostname to add or remove from
+ the system
+ required: true
+ view:
+ description:
+ - Sets the DNS view to associate this a record with. The DNS
+ view must already be configured on the system
+ default: default
+ aliases:
+ - dns_view
+ order:
+ description:
+ - Configures the order (0-65535) for this NAPTR record. This parameter
+ specifies the order in which the NAPTR rules are applied when
+ multiple rules are present.
+ preference:
+ description:
+ - Configures the preference (0-65535) for this NAPTR record. The
+ preference field determines the order NAPTR records are processed
+ when multiple records with the same order parameter are present.
+ replacement:
+ description:
+ - Configures the replacement field for this NAPTR record.
+ For nonterminal NAPTR records, this field specifies the
+ next domain name to look up.
+ services:
+ description:
+ - Configures the services field (128 characters maximum) for this
+ NAPTR record. The services field contains protocol and service
+ identifiers, such as "http+E2U" or "SIPS+D2T".
+ required: false
+ flags:
+ description:
+ - Configures the flags field for this NAPTR record. These control the
+ interpretation of the fields for an NAPTR record object. Supported
+ values for the flags field are "U", "S", "P" and "A".
+ required: false
+ regexp:
+ description:
+ - Configures the regexp field for this NAPTR record. This is the
+ regular expression-based rewriting rule of the NAPTR record. This
+ should be a POSIX compliant regular expression, including the
+ substitution rule and flags. Refer to RFC 2915 for the field syntax
+ details.
+ required: false
+ ttl:
+ description:
+ - Configures the TTL to be associated with this NAPTR record
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure a NAPTR record
+ community.general.nios_naptr_record:
+ name: '*.subscriber-100.ansiblezone.com'
+ order: 1000
+ preference: 10
+ replacement: replacement1.network.ansiblezone.com
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Add a comment to an existing NAPTR record
+ community.general.nios_naptr_record:
+ name: '*.subscriber-100.ansiblezone.com'
+ order: 1000
+ preference: 10
+ replacement: replacement1.network.ansiblezone.com
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Remove a NAPTR record from the system
+ community.general.nios_naptr_record:
+ name: '*.subscriber-100.ansiblezone.com'
+ order: 1000
+ preference: 10
+ replacement: replacement1.network.ansiblezone.com
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+
+ ib_spec = dict(
+ name=dict(required=True, ib_req=True),
+ view=dict(default='default', aliases=['dns_view'], ib_req=True),
+
+ order=dict(type='int', ib_req=True),
+ preference=dict(type='int', ib_req=True),
+ replacement=dict(ib_req=True),
+ services=dict(),
+ flags=dict(),
+ regexp=dict(),
+
+ ttl=dict(type='int'),
+
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run('record:naptr', ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_network.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_network.py
new file mode 100644
index 00000000..54b8dfb1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_network.py
@@ -0,0 +1,295 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_network
+author: "Peter Sprygada (@privateip)"
+short_description: Configure Infoblox NIOS network object
+description:
+ - Adds and/or removes instances of network objects from
+ Infoblox NIOS servers. This module manages NIOS C(network) objects
+ using the Infoblox WAPI interface over REST.
+ - Supports both IPV4 and IPV6 internet protocols
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ network:
+ description:
+ - Specifies the network to add or remove from the system. The value
+ should use CIDR notation.
+ required: true
+ aliases:
+ - name
+ - cidr
+ network_view:
+ description:
+ - Configures the name of the network view to associate with this
+ configured instance.
+ required: true
+ default: default
+ options:
+ description:
+ - Configures the set of DHCP options to be included as part of
+ the configured network instance. This argument accepts a list
+ of values (see suboptions). When configuring suboptions at
+ least one of C(name) or C(num) must be specified.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - The name of the DHCP option to configure. The standard options are
+ C(router), C(router-templates), C(domain-name-servers), C(domain-name),
+ C(broadcast-address), C(broadcast-address-offset), C(dhcp-lease-time),
+ and C(dhcp6.name-servers).
+ num:
+ description:
+ - The number of the DHCP option to configure
+ value:
+ description:
+ - The value of the DHCP option specified by C(name)
+ required: true
+ use_option:
+ description:
+ - Only applies to a subset of options (see NIOS API documentation)
+ type: bool
+ default: 'yes'
+ vendor_class:
+ description:
+ - The name of the space this DHCP option is associated to
+ default: DHCP
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ container:
+ description:
+ - If set to true it'll create the network container to be added or removed
+ from the system.
+ type: bool
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure a network ipv4
+ community.general.nios_network:
+ network: 192.168.10.0/24
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Configure a network ipv6
+ community.general.nios_network:
+ network: fe80::/64
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Set dhcp options for a network ipv4
+ community.general.nios_network:
+ network: 192.168.10.0/24
+ comment: this is a test comment
+ options:
+ - name: domain-name
+ value: ansible.com
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Remove a network ipv4
+ community.general.nios_network:
+ network: 192.168.10.0/24
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Configure a ipv4 network container
+ community.general.nios_network:
+ network: 192.168.10.0/24
+ container: true
+ comment: test network container
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Configure a ipv6 network container
+ community.general.nios_network:
+ network: fe80::/64
+ container: true
+ comment: test network container
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Remove a ipv4 network container
+ community.general.nios_network:
+ networkr: 192.168.10.0/24
+ container: true
+ comment: test network container
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import validate_ip_address, validate_ip_v6_address
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_IPV4_NETWORK, NIOS_IPV6_NETWORK
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_IPV4_NETWORK_CONTAINER, NIOS_IPV6_NETWORK_CONTAINER
+
+
+def options(module):
+ ''' Transforms the module argument into a valid WAPI struct
+ This function will transform the options argument into a structure that
+ is a valid WAPI structure in the format of:
+ {
+ name: <value>,
+ num: <value>,
+ value: <value>,
+ use_option: <value>,
+ vendor_class: <value>
+ }
+ It will remove any options that are set to None since WAPI will error on
+ that condition. It will also verify that either `name` or `num` is
+ set in the structure but does not validate the values are equal.
+ The remainder of the value validation is performed by WAPI
+ '''
+ options = list()
+ for item in module.params['options']:
+ opt = dict([(k, v) for k, v in iteritems(item) if v is not None])
+ if 'name' not in opt and 'num' not in opt:
+ module.fail_json(msg='one of `name` or `num` is required for option value')
+ options.append(opt)
+ return options
+
+
+def check_ip_addr_type(obj_filter, ib_spec):
+ '''This function will check if the argument ip is type v4/v6 and return appropriate infoblox
+ network/networkcontainer type
+ '''
+
+ ip = obj_filter['network']
+ if 'container' in obj_filter and obj_filter['container']:
+ check_ip = ip.split('/')
+ del ib_spec['container'] # removing the container key from post arguments
+ del ib_spec['options'] # removing option argument as for network container it's not supported
+ if validate_ip_address(check_ip[0]):
+ return NIOS_IPV4_NETWORK_CONTAINER, ib_spec
+ elif validate_ip_v6_address(check_ip[0]):
+ return NIOS_IPV6_NETWORK_CONTAINER, ib_spec
+ else:
+ check_ip = ip.split('/')
+ del ib_spec['container'] # removing the container key from post arguments
+ if validate_ip_address(check_ip[0]):
+ return NIOS_IPV4_NETWORK, ib_spec
+ elif validate_ip_v6_address(check_ip[0]):
+ return NIOS_IPV6_NETWORK, ib_spec
+
+
+def check_vendor_specific_dhcp_option(module, ib_spec):
+ '''This function will check if the argument dhcp option belongs to vendor-specific and if yes then will remove
+ use_options flag which is not supported with vendor-specific dhcp options.
+ '''
+ for key, value in iteritems(ib_spec):
+ if isinstance(module.params[key], list):
+ temp_dict = module.params[key][0]
+ if 'num' in temp_dict:
+ if temp_dict['num'] in (43, 124, 125):
+ del module.params[key][0]['use_option']
+ return ib_spec
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+ option_spec = dict(
+ # one of name or num is required; enforced by the function options()
+ name=dict(),
+ num=dict(type='int'),
+
+ value=dict(required=True),
+
+ use_option=dict(type='bool', default=True),
+ vendor_class=dict(default='DHCP')
+ )
+
+ ib_spec = dict(
+ network=dict(required=True, aliases=['name', 'cidr'], ib_req=True),
+ network_view=dict(default='default', ib_req=True),
+
+ options=dict(type='list', elements='dict', options=option_spec, transform=options),
+
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ container=dict(type='bool', ib_req=True)
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ # to get the argument ipaddr
+ obj_filter = dict([(k, module.params[k]) for k, v in iteritems(ib_spec) if v.get('ib_req')])
+ network_type, ib_spec = check_ip_addr_type(obj_filter, ib_spec)
+
+ wapi = WapiModule(module)
+ # to check for vendor specific dhcp option
+ ib_spec = check_vendor_specific_dhcp_option(module, ib_spec)
+
+ result = wapi.run(network_type, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_network_view.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_network_view.py
new file mode 100644
index 00000000..d13052b0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_network_view.py
@@ -0,0 +1,128 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_network_view
+author: "Peter Sprygada (@privateip)"
+short_description: Configure Infoblox NIOS network views
+description:
+ - Adds and/or removes instances of network view objects from
+ Infoblox NIOS servers. This module manages NIOS C(networkview) objects
+ using the Infoblox WAPI interface over REST.
+ - Updates instances of network view object from Infoblox NIOS servers.
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - Specifies the fully qualified hostname to add or remove from
+ the system. User can also update the hostname as it is possible
+ to pass a dict containing I(new_name), I(old_name). See examples.
+ required: true
+ aliases:
+ - network_view
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure a new network view
+ community.general.nios_network_view:
+ name: ansible
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Update the comment for network view
+ community.general.nios_network_view:
+ name: ansible
+ comment: this is an example comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Remove the network view
+ community.general.nios_network_view:
+ name: ansible
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Update a existing network view
+ community.general.nios_network_view:
+ name: {new_name: ansible-new, old_name: ansible}
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_NETWORK_VIEW
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+ ib_spec = dict(
+ name=dict(required=True, aliases=['network_view'], ib_req=True),
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_NETWORK_VIEW, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_nsgroup.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_nsgroup.py
new file mode 100644
index 00000000..bf2afd3b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_nsgroup.py
@@ -0,0 +1,361 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_nsgroup
+short_description: Configure InfoBlox DNS Nameserver Groups
+extends_documentation_fragment:
+- community.general.nios
+
+author:
+ - Erich Birngruber (@ebirn)
+ - Sumit Jaiswal (@sjaiswal)
+description:
+ - Adds and/or removes nameserver groups form Infoblox NIOS servers.
+ This module manages NIOS C(nsgroup) objects using the Infoblox. WAPI interface over REST.
+requirements:
+ - infoblox_client
+options:
+ name:
+ description:
+ - Specifies the name of the NIOS nameserver group to be managed.
+ required: true
+ grid_primary:
+ description:
+ - This host is to be used as primary server in this nameserver group. It must be a grid member.
+ This option is required when setting I(use_external_primaries) to C(false).
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - Provide the name of the grid member to identify the host.
+ required: true
+ enable_preferred_primaries:
+ description:
+ - This flag represents whether the preferred_primaries field values of this member are used (see Infoblox WAPI docs).
+ default: false
+ type: bool
+ grid_replicate:
+ description:
+ - Use DNS zone transfers if set to C(True) or ID Grid Replication if set to C(False).
+ type: bool
+ default: false
+ lead:
+ description:
+ - This flag controls if the grid lead secondary nameserver performs zone transfers to non lead secondaries.
+ type: bool
+ default: false
+ stealth:
+ description:
+ - Configure the external nameserver as stealth server (without NS record) in the zones.
+ type: bool
+ default: false
+ preferred_primaries:
+ description:
+ - Provide a list of elements like in I(external_primaries) to set the precedence of preferred primary nameservers.
+ type: list
+ elements: dict
+ grid_secondaries:
+ description:
+ - Configures the list of grid member hosts that act as secondary nameservers.
+ This option is required when setting I(use_external_primaries) to C(true).
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - Provide the name of the grid member to identify the host.
+ required: true
+ enable_preferred_primaries:
+ description:
+ - This flag represents whether the preferred_primaries field values of this member are used (see Infoblox WAPI docs).
+ default: false
+ type: bool
+ grid_replicate:
+ description:
+ - Use DNS zone transfers if set to C(True) or ID Grid Replication if set to C(False)
+ type: bool
+ default: false
+ lead:
+ description:
+ - This flag controls if the grid lead secondary nameserver performs zone transfers to non lead secondaries.
+ type: bool
+ default: false
+ stealth:
+ description:
+ - Configure the external nameserver as stealth server (without NS record) in the zones.
+ type: bool
+ default: false
+ preferred_primaries:
+ description:
+ - Provide a list of elements like in I(external_primaries) to set the precedence of preferred primary nameservers.
+ type: list
+ elements: dict
+ is_grid_default:
+ description:
+ - If set to C(True) this nsgroup will become the default nameserver group for new zones.
+ type: bool
+ required: false
+ default: false
+ use_external_primary:
+ description:
+ - This flag controls whether the group is using an external primary nameserver.
+ Note that modification of this field requires passing values for I(grid_secondaries) and I(external_primaries).
+ type: bool
+ required: false
+ default: false
+ external_primaries:
+ description:
+ - Configures a list of external nameservers (non-members of the grid).
+ This option is required when setting I(use_external_primaries) to C(true).
+ type: list
+ elements: dict
+ suboptions:
+ address:
+ description:
+ - Configures the IP address of the external nameserver
+ required: true
+ name:
+ description:
+ - Set a label for the external nameserver
+ required: true
+ stealth:
+ description:
+ - Configure the external nameserver as stealth server (without NS record) in the zones.
+ type: bool
+ default: false
+ tsig_key_name:
+ description:
+ - Sets a label for the I(tsig_key) value
+ tsig_key_alg:
+ description:
+ - Provides the algorithm used for the I(tsig_key) in use.
+ choices: ['HMAC-MD5', 'HMAC-SHA256']
+ default: 'HMAC-MD5'
+ tsig_key:
+ description:
+ - Set a DNS TSIG key for the nameserver to secure zone transfers (AFXRs).
+ required: false
+ external_secondaries:
+ description:
+ - Allows to provide a list of external secondary nameservers, that are not members of the grid.
+ type: list
+ elements: dict
+ suboptions:
+ address:
+ description:
+ - Configures the IP address of the external nameserver
+ required: true
+ name:
+ description:
+ - Set a label for the external nameserver
+ required: true
+ stealth:
+ description:
+ - Configure the external nameserver as stealth server (without NS record) in the zones.
+ type: bool
+ default: false
+ tsig_key_name:
+ description:
+ - Sets a label for the I(tsig_key) value
+ tsig_key_alg:
+ description:
+ - Provides the algorithm used for the I(tsig_key) in use.
+ choices: ['HMAC-MD5', 'HMAC-SHA256']
+ default: 'HMAC-MD5'
+ tsig_key:
+ description:
+ - Set a DNS TSIG key for the nameserver to secure zone transfers (AFXRs).
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ required: false
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ required: false
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ choices: [present, absent]
+ default: present
+'''
+
+EXAMPLES = '''
+- name: Create simple infoblox nameserver group
+ community.general.nios_nsgroup:
+ name: my-simple-group
+ comment: "this is a simple nameserver group"
+ grid_primary:
+ - name: infoblox-test.example.com
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Create infoblox nameserver group with external primaries
+ community.general.nios_nsgroup:
+ name: my-example-group
+ use_external_primary: true
+ comment: "this is my example nameserver group"
+ external_primaries: "{{ ext_nameservers }}"
+ grid_secondaries:
+ - name: infoblox-test.example.com
+ lead: True
+ preferred_primaries: "{{ ext_nameservers }}"
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Delete infoblox nameserver group
+ community.general.nios_nsgroup:
+ name: my-simple-group
+ comment: "this is a simple nameserver group"
+ grid_primary:
+ - name: infoblox-test.example.com
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_NSGROUP
+
+
+# from infoblox documentation
+# Fields List
+# Field Type Req R/O Base Search
+# comment String N N Y : = ~
+# extattrs Extattr N N N ext
+# external_primaries [struct] N N N N/A
+# external_secondaries [struct] N N N N/A
+# grid_primary [struct] N N N N/A
+# grid_secondaries [struct] N N N N/A
+# is_grid_default Bool N N N N/A
+# is_multimaster Bool N Y N N/A
+# name String Y N Y : = ~
+# use_external_primary Bool N N N N/A
+
+
+def main():
+ '''entrypoint for module execution.'''
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+
+ # cleanup tsig fields
+ def clean_tsig(ext):
+ if 'tsig_key' in ext and not ext['tsig_key']:
+ del ext['tsig_key']
+ if 'tsig_key' not in ext and 'tsig_key_name' in ext and not ext['tsig_key_name']:
+ del ext['tsig_key_name']
+ if 'tsig_key' not in ext and 'tsig_key_alg' in ext:
+ del ext['tsig_key_alg']
+
+ def clean_grid_member(member):
+ if member['preferred_primaries']:
+ for ext in member['preferred_primaries']:
+ clean_tsig(ext)
+ if member['enable_preferred_primaries'] is False:
+ del member['enable_preferred_primaries']
+ del member['preferred_primaries']
+ if member['lead'] is False:
+ del member['lead']
+ if member['grid_replicate'] is False:
+ del member['grid_replicate']
+
+ def ext_primaries_transform(module):
+ if module.params['external_primaries']:
+ for ext in module.params['external_primaries']:
+ clean_tsig(ext)
+ return module.params['external_primaries']
+
+ def ext_secondaries_transform(module):
+ if module.params['external_secondaries']:
+ for ext in module.params['external_secondaries']:
+ clean_tsig(ext)
+ return module.params['external_secondaries']
+
+ def grid_primary_preferred_transform(module):
+ for member in module.params['grid_primary']:
+ clean_grid_member(member)
+ return module.params['grid_primary']
+
+ def grid_secondaries_preferred_primaries_transform(module):
+ for member in module.params['grid_secondaries']:
+ clean_grid_member(member)
+ return module.params['grid_secondaries']
+
+ extserver_spec = dict(
+ address=dict(required=True, ib_req=True),
+ name=dict(required=True, ib_req=True),
+ stealth=dict(type='bool', default=False),
+ tsig_key=dict(no_log=True),
+ tsig_key_alg=dict(choices=['HMAC-MD5', 'HMAC-SHA256'], default='HMAC-MD5'),
+ tsig_key_name=dict(required=True)
+ )
+
+ memberserver_spec = dict(
+ name=dict(required=True, ib_req=True),
+ enable_preferred_primaries=dict(type='bool', default=False),
+ grid_replicate=dict(type='bool', default=False),
+ lead=dict(type='bool', default=False),
+ preferred_primaries=dict(type='list', elements='dict', options=extserver_spec, default=[]),
+ stealth=dict(type='bool', default=False),
+ )
+
+ ib_spec = dict(
+ name=dict(required=True, ib_req=True),
+ grid_primary=dict(type='list', elements='dict', options=memberserver_spec,
+ transform=grid_primary_preferred_transform),
+ grid_secondaries=dict(type='list', elements='dict', options=memberserver_spec,
+ transform=grid_secondaries_preferred_primaries_transform),
+ external_primaries=dict(type='list', elements='dict', options=extserver_spec, transform=ext_primaries_transform),
+ external_secondaries=dict(type='list', elements='dict', options=extserver_spec,
+ transform=ext_secondaries_transform),
+ is_grid_default=dict(type='bool', default=False),
+ use_external_primary=dict(type='bool', default=False),
+ extattrs=dict(),
+ comment=dict(),
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_NSGROUP, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_ptr_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_ptr_record.py
new file mode 100644
index 00000000..96fb175b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_ptr_record.py
@@ -0,0 +1,153 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_ptr_record
+author: "Trebuchet Clement (@clementtrebuchet)"
+short_description: Configure Infoblox NIOS PTR records
+description:
+ - Adds and/or removes instances of PTR record objects from
+ Infoblox NIOS servers. This module manages NIOS C(record:ptr) objects
+ using the Infoblox WAPI interface over REST.
+requirements:
+ - infoblox_client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - The name of the DNS PTR record in FQDN format to add or remove from
+ the system.
+ The field is required only for an PTR object in Forward Mapping Zone.
+ required: false
+ view:
+ description:
+ - Sets the DNS view to associate this a record with. The DNS
+ view must already be configured on the system
+ required: false
+ aliases:
+ - dns_view
+ ipv4addr:
+ description:
+ - The IPv4 Address of the record. Mutually exclusive with the ipv6addr.
+ aliases:
+ - ipv4
+ ipv6addr:
+ description:
+ - The IPv6 Address of the record. Mutually exclusive with the ipv4addr.
+ aliases:
+ - ipv6
+ ptrdname:
+ description:
+ - The domain name of the DNS PTR record in FQDN format.
+ ttl:
+ description:
+ - Time To Live (TTL) value for the record.
+ A 32-bit unsigned integer that represents the duration, in seconds, that the record is valid (cached).
+ Zero indicates that the record should not be cached.
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance. Maximum 256 characters.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Create a PTR Record
+ community.general.nios_ptr_record:
+ ipv4: 192.168.10.1
+ ptrdname: host.ansible.com
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Delete a PTR Record
+ community.general.nios_ptr_record:
+ ipv4: 192.168.10.1
+ ptrdname: host.ansible.com
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_PTR_RECORD
+
+
+def main():
+ # Module entry point
+ ib_spec = dict(
+ name=dict(required=False),
+ view=dict(aliases=['dns_view'], ib_req=True),
+ ipv4addr=dict(aliases=['ipv4'], ib_req=True),
+ ipv6addr=dict(aliases=['ipv6'], ib_req=True),
+ ptrdname=dict(ib_req=True),
+
+ ttl=dict(type='int'),
+
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ mutually_exclusive = [('ipv4addr', 'ipv6addr')]
+ required_one_of = [
+ ['ipv4addr', 'ipv6addr']
+ ]
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True,
+ required_one_of=required_one_of)
+
+ if module.params['ipv4addr']:
+ del ib_spec['ipv6addr']
+ elif module.params['ipv6addr']:
+ del ib_spec['ipv4addr']
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_PTR_RECORD, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_srv_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_srv_record.py
new file mode 100644
index 00000000..c519c191
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_srv_record.py
@@ -0,0 +1,162 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_srv_record
+author: "Blair Rampling (@brampling)"
+short_description: Configure Infoblox NIOS SRV records
+description:
+ - Adds and/or removes instances of SRV record objects from
+ Infoblox NIOS servers. This module manages NIOS C(record:srv) objects
+ using the Infoblox WAPI interface over REST.
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - Specifies the fully qualified hostname to add or remove from
+ the system
+ required: true
+ view:
+ description:
+ - Sets the DNS view to associate this a record with. The DNS
+ view must already be configured on the system
+ default: default
+ aliases:
+ - dns_view
+ port:
+ description:
+ - Configures the port (0-65535) of this SRV record.
+ priority:
+ description:
+ - Configures the priority (0-65535) for this SRV record.
+ target:
+ description:
+ - Configures the target FQDN for this SRV record.
+ weight:
+ description:
+ - Configures the weight (0-65535) for this SRV record.
+ ttl:
+ description:
+ - Configures the TTL to be associated with this host record
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure an SRV record
+ community.general.nios_srv_record:
+ name: _sip._tcp.service.ansible.com
+ port: 5080
+ priority: 10
+ target: service1.ansible.com
+ weight: 10
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Add a comment to an existing SRV record
+ community.general.nios_srv_record:
+ name: _sip._tcp.service.ansible.com
+ port: 5080
+ priority: 10
+ target: service1.ansible.com
+ weight: 10
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Remove an SRV record from the system
+ community.general.nios_srv_record:
+ name: _sip._tcp.service.ansible.com
+ port: 5080
+ priority: 10
+ target: service1.ansible.com
+ weight: 10
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_SRV_RECORD
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+
+ ib_spec = dict(
+ name=dict(required=True, ib_req=True),
+ view=dict(default='default', aliases=['dns_view'], ib_req=True),
+
+ port=dict(type='int', ib_req=True),
+ priority=dict(type='int', ib_req=True),
+ target=dict(ib_req=True),
+ weight=dict(type='int', ib_req=True),
+
+ ttl=dict(type='int'),
+
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_SRV_RECORD, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_txt_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_txt_record.py
new file mode 100644
index 00000000..0dcdbadb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_txt_record.py
@@ -0,0 +1,128 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_txt_record
+author: "Corey Wanless (@coreywan)"
+short_description: Configure Infoblox NIOS txt records
+description:
+ - Adds and/or removes instances of txt record objects from
+ Infoblox NIOS servers. This module manages NIOS C(record:txt) objects
+ using the Infoblox WAPI interface over REST.
+requirements:
+ - infoblox_client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - Specifies the fully qualified hostname to add or remove from
+ the system
+ required: true
+ view:
+ description:
+ - Sets the DNS view to associate this tst record with. The DNS
+ view must already be configured on the system
+ default: default
+ aliases:
+ - dns_view
+ text:
+ description:
+ - Text associated with the record. It can contain up to 255 bytes
+ per substring, up to a total of 512 bytes. To enter leading,
+ trailing, or embedded spaces in the text, add quotes around the
+ text to preserve the spaces.
+ ttl:
+ description:
+ - Configures the TTL to be associated with this tst record
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+ - name: Ensure a text Record Exists
+ community.general.nios_txt_record:
+ name: fqdn.txt.record.com
+ text: mytext
+ state: present
+ view: External
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+
+ - name: Ensure a text Record does not exist
+ community.general.nios_txt_record:
+ name: fqdn.txt.record.com
+ text: mytext
+ state: absent
+ view: External
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+
+ ib_spec = dict(
+ name=dict(required=True, ib_req=True),
+ view=dict(default='default', aliases=['dns_view'], ib_req=True),
+ text=dict(ib_req=True),
+ ttl=dict(type='int'),
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run('record:txt', ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_zone.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_zone.py
new file mode 100644
index 00000000..8a7607fe
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nios/nios_zone.py
@@ -0,0 +1,226 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_zone
+author: "Peter Sprygada (@privateip)"
+short_description: Configure Infoblox NIOS DNS zones
+description:
+ - Adds and/or removes instances of DNS zone objects from
+ Infoblox NIOS servers. This module manages NIOS C(zone_auth) objects
+ using the Infoblox WAPI interface over REST.
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ fqdn:
+ description:
+ - Specifies the qualified domain name to either add or remove from
+ the NIOS instance based on the configured C(state) value.
+ required: true
+ aliases:
+ - name
+ view:
+ description:
+ - Configures the DNS view name for the configured resource. The
+ specified DNS zone must already exist on the running NIOS instance
+ prior to configuring zones.
+ default: default
+ aliases:
+ - dns_view
+ grid_primary:
+ description:
+ - Configures the grid primary servers for this zone.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - The name of the grid primary server
+ required: true
+ grid_secondaries:
+ description:
+ - Configures the grid secondary servers for this zone.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - The name of the grid secondary server
+ required: true
+ ns_group:
+ description:
+ - Configures the name server group for this zone. Name server group is
+ mutually exclusive with grid primary and grid secondaries.
+ restart_if_needed:
+ description:
+ - If set to true, causes the NIOS DNS service to restart and load the
+ new zone configuration
+ type: bool
+ zone_format:
+ description:
+ - Create an authorative Reverse-Mapping Zone which is an area of network
+ space for which one or more name servers-primary and secondary-have the
+ responsibility to respond to address-to-name queries. It supports
+ reverse-mapping zones for both IPv4 and IPv6 addresses.
+ default: FORWARD
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure a zone on the system using grid primary and secondaries
+ community.general.nios_zone:
+ name: ansible.com
+ grid_primary:
+ - name: gridprimary.grid.com
+ grid_secondaries:
+ - name: gridsecondary1.grid.com
+ - name: gridsecondary2.grid.com
+ restart_if_needed: true
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Configure a zone on the system using a name server group
+ community.general.nios_zone:
+ name: ansible.com
+ ns_group: examplensg
+ restart_if_needed: true
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Configure a reverse mapping zone on the system using IPV4 zone format
+ community.general.nios_zone:
+ name: 10.10.10.0/24
+ zone_format: IPV4
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Configure a reverse mapping zone on the system using IPV6 zone format
+ community.general.nios_zone:
+ name: 100::1/128
+ zone_format: IPV6
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Update the comment and ext attributes for an existing zone
+ community.general.nios_zone:
+ name: ansible.com
+ comment: this is an example comment
+ extattrs:
+ Site: west-dc
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Remove the dns zone
+ community.general.nios_zone:
+ name: ansible.com
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Remove the reverse mapping dns zone from the system with IPV4 zone format
+ community.general.nios_zone:
+ name: 10.10.10.0/24
+ zone_format: IPV4
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_ZONE
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+ grid_spec = dict(
+ name=dict(required=True),
+ )
+
+ ib_spec = dict(
+ fqdn=dict(required=True, aliases=['name'], ib_req=True, update=False),
+ zone_format=dict(default='FORWARD', ib_req=False),
+ view=dict(default='default', aliases=['dns_view'], ib_req=True),
+
+ grid_primary=dict(type='list', elements='dict', options=grid_spec),
+ grid_secondaries=dict(type='list', elements='dict', options=grid_spec),
+ ns_group=dict(),
+ restart_if_needed=dict(type='bool'),
+
+ extattrs=dict(type='dict'),
+ comment=dict()
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['ns_group', 'grid_primary'],
+ ['ns_group', 'grid_secondaries']
+ ])
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_ZONE, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nmcli.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nmcli.py
new file mode 100644
index 00000000..60626294
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nmcli.py
@@ -0,0 +1,1115 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Chris Long <alcamie@gmail.com> <chlong@redhat.com>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: nmcli
+author:
+- Chris Long (@alcamie101)
+short_description: Manage Networking
+requirements:
+- nmcli
+description:
+ - 'Manage the network devices. Create, modify and manage various connection and device type e.g., ethernet, teams, bonds, vlans etc.'
+ - 'On CentOS 8 and Fedora >=29 like systems, the requirements can be met by installing the following packages: NetworkManager.'
+ - 'On CentOS 7 and Fedora <=28 like systems, the requirements can be met by installing the following packages: NetworkManager-tui.'
+ - 'On Ubuntu and Debian like systems, the requirements can be met by installing the following packages: network-manager'
+ - 'On openSUSE, the requirements can be met by installing the following packages: NetworkManager.'
+options:
+ state:
+ description:
+ - Whether the device should exist or not, taking action if the state is different from what is stated.
+ type: str
+ required: true
+ choices: [ absent, present ]
+ autoconnect:
+ description:
+ - Whether the connection should start on boot.
+ - Whether the connection profile can be automatically activated
+ type: bool
+ default: yes
+ conn_name:
+ description:
+ - The name used to call the connection. Pattern is <type>[-<ifname>][-<num>].
+ type: str
+ required: true
+ ifname:
+ description:
+ - The interface to bind the connection to.
+ - The connection will only be applicable to this interface name.
+ - A special value of C('*') can be used for interface-independent connections.
+ - The ifname argument is mandatory for all connection types except bond, team, bridge and vlan.
+ - This parameter defaults to C(conn_name) when left unset.
+ type: str
+ type:
+ description:
+ - This is the type of device or network connection that you wish to create or modify.
+ - Type C(generic) is added in Ansible 2.5.
+ type: str
+ choices: [ bond, bond-slave, bridge, bridge-slave, ethernet, generic, ipip, sit, team, team-slave, vlan, vxlan ]
+ mode:
+ description:
+ - This is the type of device or network connection that you wish to create for a bond, team or bridge.
+ type: str
+ choices: [ 802.3ad, active-backup, balance-alb, balance-rr, balance-tlb, balance-xor, broadcast ]
+ default: balance-rr
+ master:
+ description:
+ - Master <master (ifname, or connection UUID or conn_name) of bridge, team, bond master connection profile.
+ type: str
+ ip4:
+ description:
+ - The IPv4 address to this interface.
+ - Use the format C(192.0.2.24/24).
+ type: str
+ gw4:
+ description:
+ - The IPv4 gateway for this interface.
+ - Use the format C(192.0.2.1).
+ type: str
+ dns4:
+ description:
+ - A list of up to 3 dns servers.
+ - IPv4 format e.g. to add two IPv4 DNS server addresses, use C(192.0.2.53 198.51.100.53).
+ elements: str
+ type: list
+ dns4_search:
+ description:
+ - A list of DNS search domains.
+ elements: str
+ type: list
+ ip6:
+ description:
+ - The IPv6 address to this interface.
+ - Use the format C(abbe::cafe).
+ type: str
+ gw6:
+ description:
+ - The IPv6 gateway for this interface.
+ - Use the format C(2001:db8::1).
+ type: str
+ dns6:
+ description:
+ - A list of up to 3 dns servers.
+ - IPv6 format e.g. to add two IPv6 DNS server addresses, use C(2001:4860:4860::8888 2001:4860:4860::8844).
+ elements: str
+ type: list
+ dns6_search:
+ description:
+ - A list of DNS search domains.
+ elements: str
+ type: list
+ mtu:
+ description:
+ - The connection MTU, e.g. 9000. This can't be applied when creating the interface and is done once the interface has been created.
+ - Can be used when modifying Team, VLAN, Ethernet (Future plans to implement wifi, pppoe, infiniband)
+ - This parameter defaults to C(1500) when unset.
+ type: int
+ dhcp_client_id:
+ description:
+ - DHCP Client Identifier sent to the DHCP server.
+ type: str
+ primary:
+ description:
+ - This is only used with bond and is the primary interface name (for "active-backup" mode), this is the usually the 'ifname'.
+ type: str
+ miimon:
+ description:
+ - This is only used with bond - miimon.
+ - This parameter defaults to C(100) when unset.
+ type: int
+ downdelay:
+ description:
+ - This is only used with bond - downdelay.
+ type: int
+ updelay:
+ description:
+ - This is only used with bond - updelay.
+ type: int
+ arp_interval:
+ description:
+ - This is only used with bond - ARP interval.
+ type: int
+ arp_ip_target:
+ description:
+ - This is only used with bond - ARP IP target.
+ type: str
+ stp:
+ description:
+ - This is only used with bridge and controls whether Spanning Tree Protocol (STP) is enabled for this bridge.
+ type: bool
+ default: yes
+ priority:
+ description:
+ - This is only used with 'bridge' - sets STP priority.
+ type: int
+ default: 128
+ forwarddelay:
+ description:
+ - This is only used with bridge - [forward-delay <2-30>] STP forwarding delay, in seconds.
+ type: int
+ default: 15
+ hellotime:
+ description:
+ - This is only used with bridge - [hello-time <1-10>] STP hello time, in seconds.
+ type: int
+ default: 2
+ maxage:
+ description:
+ - This is only used with bridge - [max-age <6-42>] STP maximum message age, in seconds.
+ type: int
+ default: 20
+ ageingtime:
+ description:
+ - This is only used with bridge - [ageing-time <0-1000000>] the Ethernet MAC address aging time, in seconds.
+ type: int
+ default: 300
+ mac:
+ description:
+ - This is only used with bridge - MAC address of the bridge.
+ - Note this requires a recent kernel feature, originally introduced in 3.15 upstream kernel.
+ type: str
+ slavepriority:
+ description:
+ - This is only used with 'bridge-slave' - [<0-63>] - STP priority of this slave.
+ type: int
+ default: 32
+ path_cost:
+ description:
+ - This is only used with 'bridge-slave' - [<1-65535>] - STP port cost for destinations via this slave.
+ type: int
+ default: 100
+ hairpin:
+ description:
+ - This is only used with 'bridge-slave' - 'hairpin mode' for the slave, which allows frames to be sent back out through the slave the
+ frame was received on.
+ type: bool
+ default: yes
+ vlanid:
+ description:
+ - This is only used with VLAN - VLAN ID in range <0-4095>.
+ type: int
+ vlandev:
+ description:
+ - This is only used with VLAN - parent device this VLAN is on, can use ifname.
+ type: str
+ flags:
+ description:
+ - This is only used with VLAN - flags.
+ type: str
+ ingress:
+ description:
+ - This is only used with VLAN - VLAN ingress priority mapping.
+ type: str
+ egress:
+ description:
+ - This is only used with VLAN - VLAN egress priority mapping.
+ type: str
+ vxlan_id:
+ description:
+ - This is only used with VXLAN - VXLAN ID.
+ type: int
+ vxlan_remote:
+ description:
+ - This is only used with VXLAN - VXLAN destination IP address.
+ type: str
+ vxlan_local:
+ description:
+ - This is only used with VXLAN - VXLAN local IP address.
+ type: str
+ ip_tunnel_dev:
+ description:
+ - This is used with IPIP/SIT - parent device this IPIP/SIT tunnel, can use ifname.
+ type: str
+ ip_tunnel_remote:
+ description:
+ - This is used with IPIP/SIT - IPIP/SIT destination IP address.
+ type: str
+ ip_tunnel_local:
+ description:
+ - This is used with IPIP/SIT - IPIP/SIT local IP address.
+ type: str
+'''
+
+EXAMPLES = r'''
+# These examples are using the following inventory:
+#
+# ## Directory layout:
+#
+# |_/inventory/cloud-hosts
+# | /group_vars/openstack-stage.yml
+# | /host_vars/controller-01.openstack.host.com
+# | /host_vars/controller-02.openstack.host.com
+# |_/playbook/library/nmcli.py
+# | /playbook-add.yml
+# | /playbook-del.yml
+# ```
+#
+# ## inventory examples
+# ### groups_vars
+# ```yml
+# ---
+# #devops_os_define_network
+# storage_gw: "192.0.2.254"
+# external_gw: "198.51.100.254"
+# tenant_gw: "203.0.113.254"
+#
+# #Team vars
+# nmcli_team:
+# - conn_name: tenant
+# ip4: '{{ tenant_ip }}'
+# gw4: '{{ tenant_gw }}'
+# - conn_name: external
+# ip4: '{{ external_ip }}'
+# gw4: '{{ external_gw }}'
+# - conn_name: storage
+# ip4: '{{ storage_ip }}'
+# gw4: '{{ storage_gw }}'
+# nmcli_team_slave:
+# - conn_name: em1
+# ifname: em1
+# master: tenant
+# - conn_name: em2
+# ifname: em2
+# master: tenant
+# - conn_name: p2p1
+# ifname: p2p1
+# master: storage
+# - conn_name: p2p2
+# ifname: p2p2
+# master: external
+#
+# #bond vars
+# nmcli_bond:
+# - conn_name: tenant
+# ip4: '{{ tenant_ip }}'
+# gw4: ''
+# mode: balance-rr
+# - conn_name: external
+# ip4: '{{ external_ip }}'
+# gw4: ''
+# mode: balance-rr
+# - conn_name: storage
+# ip4: '{{ storage_ip }}'
+# gw4: '{{ storage_gw }}'
+# mode: balance-rr
+# nmcli_bond_slave:
+# - conn_name: em1
+# ifname: em1
+# master: tenant
+# - conn_name: em2
+# ifname: em2
+# master: tenant
+# - conn_name: p2p1
+# ifname: p2p1
+# master: storage
+# - conn_name: p2p2
+# ifname: p2p2
+# master: external
+#
+# #ethernet vars
+# nmcli_ethernet:
+# - conn_name: em1
+# ifname: em1
+# ip4: '{{ tenant_ip }}'
+# gw4: '{{ tenant_gw }}'
+# - conn_name: em2
+# ifname: em2
+# ip4: '{{ tenant_ip1 }}'
+# gw4: '{{ tenant_gw }}'
+# - conn_name: p2p1
+# ifname: p2p1
+# ip4: '{{ storage_ip }}'
+# gw4: '{{ storage_gw }}'
+# - conn_name: p2p2
+# ifname: p2p2
+# ip4: '{{ external_ip }}'
+# gw4: '{{ external_gw }}'
+# ```
+#
+# ### host_vars
+# ```yml
+# ---
+# storage_ip: "192.0.2.91/23"
+# external_ip: "198.51.100.23/21"
+# tenant_ip: "203.0.113.77/23"
+# ```
+
+
+
+## playbook-add.yml example
+
+---
+- hosts: openstack-stage
+ remote_user: root
+ tasks:
+
+ - name: Install needed network manager libs
+ ansible.builtin.package:
+ name:
+ - NetworkManager-libnm
+ - nm-connection-editor
+ - libsemanage-python
+ - policycoreutils-python
+ state: present
+
+##### Working with all cloud nodes - Teaming
+ - name: Try nmcli add team - conn_name only & ip4 gw4
+ community.general.nmcli:
+ type: team
+ conn_name: '{{ item.conn_name }}'
+ ip4: '{{ item.ip4 }}'
+ gw4: '{{ item.gw4 }}'
+ state: present
+ with_items:
+ - '{{ nmcli_team }}'
+
+ - name: Try nmcli add teams-slave
+ community.general.nmcli:
+ type: team-slave
+ conn_name: '{{ item.conn_name }}'
+ ifname: '{{ item.ifname }}'
+ master: '{{ item.master }}'
+ state: present
+ with_items:
+ - '{{ nmcli_team_slave }}'
+
+###### Working with all cloud nodes - Bonding
+ - name: Try nmcli add bond - conn_name only & ip4 gw4 mode
+ community.general.nmcli:
+ type: bond
+ conn_name: '{{ item.conn_name }}'
+ ip4: '{{ item.ip4 }}'
+ gw4: '{{ item.gw4 }}'
+ mode: '{{ item.mode }}'
+ state: present
+ with_items:
+ - '{{ nmcli_bond }}'
+
+ - name: Try nmcli add bond-slave
+ community.general.nmcli:
+ type: bond-slave
+ conn_name: '{{ item.conn_name }}'
+ ifname: '{{ item.ifname }}'
+ master: '{{ item.master }}'
+ state: present
+ with_items:
+ - '{{ nmcli_bond_slave }}'
+
+##### Working with all cloud nodes - Ethernet
+ - name: Try nmcli add Ethernet - conn_name only & ip4 gw4
+ community.general.nmcli:
+ type: ethernet
+ conn_name: '{{ item.conn_name }}'
+ ip4: '{{ item.ip4 }}'
+ gw4: '{{ item.gw4 }}'
+ state: present
+ with_items:
+ - '{{ nmcli_ethernet }}'
+
+## playbook-del.yml example
+- hosts: openstack-stage
+ remote_user: root
+ tasks:
+
+ - name: Try nmcli del team - multiple
+ community.general.nmcli:
+ conn_name: '{{ item.conn_name }}'
+ state: absent
+ with_items:
+ - conn_name: em1
+ - conn_name: em2
+ - conn_name: p1p1
+ - conn_name: p1p2
+ - conn_name: p2p1
+ - conn_name: p2p2
+ - conn_name: tenant
+ - conn_name: storage
+ - conn_name: external
+ - conn_name: team-em1
+ - conn_name: team-em2
+ - conn_name: team-p1p1
+ - conn_name: team-p1p2
+ - conn_name: team-p2p1
+ - conn_name: team-p2p2
+
+ - name: Add an Ethernet connection with static IP configuration
+ community.general.nmcli:
+ conn_name: my-eth1
+ ifname: eth1
+ type: ethernet
+ ip4: 192.0.2.100/24
+ gw4: 192.0.2.1
+ state: present
+
+ - name: Add an Team connection with static IP configuration
+ community.general.nmcli:
+ conn_name: my-team1
+ ifname: my-team1
+ type: team
+ ip4: 192.0.2.100/24
+ gw4: 192.0.2.1
+ state: present
+ autoconnect: yes
+
+ - name: Optionally, at the same time specify IPv6 addresses for the device
+ community.general.nmcli:
+ conn_name: my-eth1
+ ifname: eth1
+ type: ethernet
+ ip4: 192.0.2.100/24
+ gw4: 192.0.2.1
+ ip6: 2001:db8::cafe
+ gw6: 2001:db8::1
+ state: present
+
+ - name: Add two IPv4 DNS server addresses
+ community.general.nmcli:
+ conn_name: my-eth1
+ type: ethernet
+ dns4:
+ - 192.0.2.53
+ - 198.51.100.53
+ state: present
+
+ - name: Make a profile usable for all compatible Ethernet interfaces
+ community.general.nmcli:
+ ctype: ethernet
+ name: my-eth1
+ ifname: '*'
+ state: present
+
+ - name: Change the property of a setting e.g. MTU
+ community.general.nmcli:
+ conn_name: my-eth1
+ mtu: 9000
+ type: ethernet
+ state: present
+
+ - name: Add VxLan
+ community.general.nmcli:
+ type: vxlan
+ conn_name: vxlan_test1
+ vxlan_id: 16
+ vxlan_local: 192.168.1.2
+ vxlan_remote: 192.168.1.5
+
+ - name: Add ipip
+ community.general.nmcli:
+ type: ipip
+ conn_name: ipip_test1
+ ip_tunnel_dev: eth0
+ ip_tunnel_local: 192.168.1.2
+ ip_tunnel_remote: 192.168.1.5
+
+ - name: Add sit
+ community.general.nmcli:
+ type: sit
+ conn_name: sit_test1
+ ip_tunnel_dev: eth0
+ ip_tunnel_local: 192.168.1.2
+ ip_tunnel_remote: 192.168.1.5
+
+# nmcli exits with status 0 if it succeeds and exits with a status greater
+# than zero when there is a failure. The following list of status codes may be
+# returned:
+#
+# - 0 Success - indicates the operation succeeded
+# - 1 Unknown or unspecified error
+# - 2 Invalid user input, wrong nmcli invocation
+# - 3 Timeout expired (see --wait option)
+# - 4 Connection activation failed
+# - 5 Connection deactivation failed
+# - 6 Disconnecting device failed
+# - 7 Connection deletion failed
+# - 8 NetworkManager is not running
+# - 9 nmcli and NetworkManager versions mismatch
+# - 10 Connection, device, or access point does not exist.
+'''
+
+RETURN = r"""#
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_text
+import re
+
+
+class NmcliModuleError(Exception):
+ pass
+
+
+class Nmcli(object):
+ """
+ This is the generic nmcli manipulation class that is subclassed based on platform.
+ A subclass may wish to override the following action methods:-
+ - create_connection()
+ - delete_connection()
+ - modify_connection()
+ - show_connection()
+ - up_connection()
+ - down_connection()
+ All subclasses MUST define platform and distribution (which may be None).
+ """
+
+ platform = 'Generic'
+ distribution = None
+
+ def __init__(self, module):
+ self.module = module
+ self.state = module.params['state']
+ self.autoconnect = module.params['autoconnect']
+ self.conn_name = module.params['conn_name']
+ self.master = module.params['master']
+ self.ifname = module.params['ifname']
+ self.type = module.params['type']
+ self.ip4 = module.params['ip4']
+ self.gw4 = module.params['gw4']
+ self.dns4 = module.params['dns4']
+ self.dns4_search = module.params['dns4_search']
+ self.ip6 = module.params['ip6']
+ self.gw6 = module.params['gw6']
+ self.dns6 = module.params['dns6']
+ self.dns6_search = module.params['dns6_search']
+ self.mtu = module.params['mtu']
+ self.stp = module.params['stp']
+ self.priority = module.params['priority']
+ self.mode = module.params['mode']
+ self.miimon = module.params['miimon']
+ self.primary = module.params['primary']
+ self.downdelay = module.params['downdelay']
+ self.updelay = module.params['updelay']
+ self.arp_interval = module.params['arp_interval']
+ self.arp_ip_target = module.params['arp_ip_target']
+ self.slavepriority = module.params['slavepriority']
+ self.forwarddelay = module.params['forwarddelay']
+ self.hellotime = module.params['hellotime']
+ self.maxage = module.params['maxage']
+ self.ageingtime = module.params['ageingtime']
+ self.hairpin = module.params['hairpin']
+ self.path_cost = module.params['path_cost']
+ self.mac = module.params['mac']
+ self.vlanid = module.params['vlanid']
+ self.vlandev = module.params['vlandev']
+ self.flags = module.params['flags']
+ self.ingress = module.params['ingress']
+ self.egress = module.params['egress']
+ self.vxlan_id = module.params['vxlan_id']
+ self.vxlan_local = module.params['vxlan_local']
+ self.vxlan_remote = module.params['vxlan_remote']
+ self.ip_tunnel_dev = module.params['ip_tunnel_dev']
+ self.ip_tunnel_local = module.params['ip_tunnel_local']
+ self.ip_tunnel_remote = module.params['ip_tunnel_remote']
+ self.nmcli_bin = self.module.get_bin_path('nmcli', True)
+ self.dhcp_client_id = module.params['dhcp_client_id']
+
+ if self.ip4:
+ self.ipv4_method = 'manual'
+ else:
+ # supported values for 'ipv4.method': [auto, link-local, manual, shared, disabled]
+ # TODO: add a new module parameter to specify a non 'manual' value
+ self.ipv4_method = None
+
+ if self.ip6:
+ self.ipv6_method = 'manual'
+ else:
+ # supported values for 'ipv6.method': [ignore, auto, dhcp, link-local, manual, shared]
+ # TODO: add a new module parameter to specify a non 'manual' value
+ self.ipv6_method = None
+
+ def execute_command(self, cmd, use_unsafe_shell=False, data=None):
+ if isinstance(cmd, list):
+ cmd = [to_text(item) for item in cmd]
+ else:
+ cmd = to_text(cmd)
+ return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data)
+
+ def connection_options(self, detect_change=False):
+ # Options common to multiple connection types.
+ options = {
+ 'connection.autoconnect': self.autoconnect,
+ }
+
+ # IP address options.
+ if self.ip_conn_type:
+ options.update({
+ 'ipv4.addresses': self.ip4,
+ 'ipv4.dhcp-client-id': self.dhcp_client_id,
+ 'ipv4.dns': self.dns4,
+ 'ipv4.dns-search': self.dns4_search,
+ 'ipv4.gateway': self.gw4,
+ 'ipv4.method': self.ipv4_method,
+ 'ipv6.addresses': self.ip6,
+ 'ipv6.dns': self.dns6,
+ 'ipv6.dns-search': self.dns6_search,
+ 'ipv6.gateway': self.gw6,
+ 'ipv6.method': self.ipv6_method,
+ })
+
+ # Layer 2 options.
+ if self.mac_conn_type:
+ options.update({self.mac_setting: self.mac})
+
+ if self.mtu_conn_type:
+ options.update({self.mtu_setting: self.mtu})
+
+ # Connections that can have a master.
+ if self.slave_conn_type:
+ options.update({
+ 'connection.master': self.master,
+ })
+
+ # Options specific to a connection type.
+ if self.type == 'bond':
+ options.update({
+ 'arp-interval': self.arp_interval,
+ 'arp-ip-target': self.arp_ip_target,
+ 'downdelay': self.downdelay,
+ 'miimon': self.miimon,
+ 'mode': self.mode,
+ 'primary': self.primary,
+ 'updelay': self.updelay,
+ })
+ elif self.type == 'bridge':
+ options.update({
+ 'bridge.ageing-time': self.ageingtime,
+ 'bridge.forward-delay': self.forwarddelay,
+ 'bridge.hello-time': self.hellotime,
+ 'bridge.max-age': self.maxage,
+ 'bridge.priority': self.priority,
+ 'bridge.stp': self.stp,
+ })
+ elif self.type == 'bridge-slave':
+ options.update({
+ 'bridge-port.path-cost': self.path_cost,
+ 'bridge-port.hairpin-mode': self.hairpin,
+ 'bridge-port.priority': self.slavepriority,
+ })
+ elif self.tunnel_conn_type:
+ options.update({
+ 'ip-tunnel.local': self.ip_tunnel_local,
+ 'ip-tunnel.mode': self.type,
+ 'ip-tunnel.parent': self.ip_tunnel_dev,
+ 'ip-tunnel.remote': self.ip_tunnel_remote,
+ })
+ elif self.type == 'vlan':
+ options.update({
+ 'vlan.id': self.vlanid,
+ 'vlan.parent': self.vlandev,
+ })
+ elif self.type == 'vxlan':
+ options.update({
+ 'vxlan.id': self.vxlan_id,
+ 'vxlan.local': self.vxlan_local,
+ 'vxlan.remote': self.vxlan_remote,
+ })
+
+ # Convert settings values based on the situation.
+ for setting, value in options.items():
+ setting_type = self.settings_type(setting)
+ convert_func = None
+ if setting_type is bool:
+ # Convert all bool options to yes/no.
+ convert_func = self.bool_to_string
+ if detect_change:
+ if setting in ('vlan.id', 'vxlan.id'):
+ # Convert VLAN/VXLAN IDs to text when detecting changes.
+ convert_func = to_text
+ elif setting == self.mtu_setting:
+ # MTU is 'auto' by default when detecting changes.
+ convert_func = self.mtu_to_string
+ elif setting_type is list:
+ # Convert lists to strings for nmcli create/modify commands.
+ convert_func = self.list_to_string
+
+ if callable(convert_func):
+ options[setting] = convert_func(options[setting])
+
+ return options
+
+ @property
+ def ip_conn_type(self):
+ return self.type in (
+ 'bond',
+ 'bridge',
+ 'ethernet',
+ 'generic',
+ 'team',
+ 'vlan',
+ )
+
+ @property
+ def mac_conn_type(self):
+ return self.type == 'bridge'
+
+ @property
+ def mac_setting(self):
+ if self.type == 'bridge':
+ return 'bridge.mac-address'
+ else:
+ return '802-3-ethernet.cloned-mac-address'
+
+ @property
+ def mtu_conn_type(self):
+ return self.type in (
+ 'ethernet',
+ 'team-slave',
+ )
+
+ @property
+ def mtu_setting(self):
+ return '802-3-ethernet.mtu'
+
+ @staticmethod
+ def mtu_to_string(mtu):
+ if not mtu:
+ return 'auto'
+ else:
+ return to_text(mtu)
+
+ @property
+ def slave_conn_type(self):
+ return self.type in (
+ 'bond-slave',
+ 'bridge-slave',
+ 'team-slave',
+ )
+
+ @property
+ def tunnel_conn_type(self):
+ return self.type in (
+ 'ipip',
+ 'sit',
+ )
+
+ @staticmethod
+ def bool_to_string(boolean):
+ if boolean:
+ return "yes"
+ else:
+ return "no"
+
+ @staticmethod
+ def list_to_string(lst):
+ return ",".join(lst or [""])
+
+ @staticmethod
+ def settings_type(setting):
+ if setting in ('bridge.stp',
+ 'bridge-port.hairpin-mode',
+ 'connection.autoconnect'):
+ return bool
+ elif setting in ('ipv4.dns',
+ 'ipv4.dns-search',
+ 'ipv6.dns',
+ 'ipv6.dns-search'):
+ return list
+ return str
+
+ def list_connection_info(self):
+ cmd = [self.nmcli_bin, '--fields', 'name', '--terse', 'con', 'show']
+ (rc, out, err) = self.execute_command(cmd)
+ if rc != 0:
+ raise NmcliModuleError(err)
+ return out.splitlines()
+
+ def connection_exists(self):
+ return self.conn_name in self.list_connection_info()
+
+ def down_connection(self):
+ cmd = [self.nmcli_bin, 'con', 'down', self.conn_name]
+ return self.execute_command(cmd)
+
+ def up_connection(self):
+ cmd = [self.nmcli_bin, 'con', 'up', self.conn_name]
+ return self.execute_command(cmd)
+
+ def connection_update(self, nmcli_command):
+ if nmcli_command == 'create':
+ cmd = [self.nmcli_bin, 'con', 'add', 'type']
+ if self.tunnel_conn_type:
+ cmd.append('ip-tunnel')
+ else:
+ cmd.append(self.type)
+ cmd.append('con-name')
+ elif nmcli_command == 'modify':
+ cmd = [self.nmcli_bin, 'con', 'modify']
+ else:
+ self.module.fail_json(msg="Invalid nmcli command.")
+ cmd.append(self.conn_name)
+
+ # Use connection name as default for interface name on creation.
+ if nmcli_command == 'create' and self.ifname is None:
+ ifname = self.conn_name
+ else:
+ ifname = self.ifname
+
+ options = {
+ 'connection.interface-name': ifname,
+ }
+
+ options.update(self.connection_options())
+
+ # Constructing the command.
+ for key, value in options.items():
+ if value is not None:
+ cmd.extend([key, value])
+
+ return self.execute_command(cmd)
+
+ def create_connection(self):
+ status = self.connection_update('create')
+ if self.create_connection_up:
+ status = self.up_connection()
+ return status
+
+ @property
+ def create_connection_up(self):
+ if self.type in ('bond', 'ethernet'):
+ if (self.mtu is not None) or (self.dns4 is not None) or (self.dns6 is not None):
+ return True
+ elif self.type == 'team':
+ if (self.dns4 is not None) or (self.dns6 is not None):
+ return True
+ return False
+
+ def remove_connection(self):
+ # self.down_connection()
+ cmd = [self.nmcli_bin, 'con', 'del', self.conn_name]
+ return self.execute_command(cmd)
+
+ def modify_connection(self):
+ return self.connection_update('modify')
+
+ def show_connection(self):
+ cmd = [self.nmcli_bin, 'con', 'show', self.conn_name]
+
+ (rc, out, err) = self.execute_command(cmd)
+
+ if rc != 0:
+ raise NmcliModuleError(err)
+
+ p_enum_value = re.compile(r'^([-]?\d+) \((\w+)\)$')
+
+ conn_info = dict()
+ for line in out.splitlines():
+ pair = line.split(':', 1)
+ key = pair[0].strip()
+ key_type = self.settings_type(key)
+ if key and len(pair) > 1:
+ raw_value = pair[1].lstrip()
+ if raw_value == '--':
+ conn_info[key] = None
+ elif key == 'bond.options':
+ # Aliases such as 'miimon', 'downdelay' are equivalent to the +bond.options 'option=value' syntax.
+ opts = raw_value.split(',')
+ for opt in opts:
+ alias_pair = opt.split('=', 1)
+ if len(alias_pair) > 1:
+ alias_key = alias_pair[0]
+ alias_value = alias_pair[1]
+ conn_info[alias_key] = alias_value
+ elif key_type == list:
+ conn_info[key] = [s.strip() for s in raw_value.split(',')]
+ else:
+ m_enum = p_enum_value.match(raw_value)
+ if m_enum is not None:
+ value = m_enum.group(1)
+ else:
+ value = raw_value
+ conn_info[key] = value
+
+ return conn_info
+
+ def _compare_conn_params(self, conn_info, options):
+ # See nmcli(1) for details
+ param_alias = {
+ 'type': 'connection.type',
+ 'con-name': 'connection.id',
+ 'autoconnect': 'connection.autoconnect',
+ 'ifname': 'connection.interface-name',
+ 'mac': self.mac_setting,
+ 'master': 'connection.master',
+ 'slave-type': 'connection.slave-type',
+ }
+
+ changed = False
+ diff_before = dict()
+ diff_after = dict()
+
+ for key, value in options.items():
+ if not value:
+ continue
+
+ if key in conn_info:
+ current_value = conn_info[key]
+ elif key in param_alias:
+ real_key = param_alias[key]
+ if real_key in conn_info:
+ current_value = conn_info[real_key]
+ else:
+ # alias parameter does not exist
+ current_value = None
+ else:
+ # parameter does not exist
+ current_value = None
+
+ if isinstance(current_value, list) and isinstance(value, list):
+ # compare values between two lists
+ if sorted(current_value) != sorted(value):
+ changed = True
+ else:
+ if current_value != to_text(value):
+ changed = True
+
+ diff_before[key] = current_value
+ diff_after[key] = value
+
+ diff = {
+ 'before': diff_before,
+ 'after': diff_after,
+ }
+ return (changed, diff)
+
+ def is_connection_changed(self):
+ options = {
+ 'connection.interface-name': self.ifname,
+ }
+ options.update(self.connection_options(detect_change=True))
+ return self._compare_conn_params(self.show_connection(), options)
+
+
+def main():
+ # Parsing argument file
+ module = AnsibleModule(
+ argument_spec=dict(
+ autoconnect=dict(type='bool', default=True),
+ state=dict(type='str', required=True, choices=['absent', 'present']),
+ conn_name=dict(type='str', required=True),
+ master=dict(type='str'),
+ ifname=dict(type='str'),
+ type=dict(type='str',
+ choices=['bond', 'bond-slave', 'bridge', 'bridge-slave', 'ethernet', 'generic', 'ipip', 'sit', 'team', 'team-slave', 'vlan', 'vxlan']),
+ ip4=dict(type='str'),
+ gw4=dict(type='str'),
+ dns4=dict(type='list', elements='str'),
+ dns4_search=dict(type='list', elements='str'),
+ dhcp_client_id=dict(type='str'),
+ ip6=dict(type='str'),
+ gw6=dict(type='str'),
+ dns6=dict(type='list', elements='str'),
+ dns6_search=dict(type='list', elements='str'),
+ # Bond Specific vars
+ mode=dict(type='str', default='balance-rr',
+ choices=['802.3ad', 'active-backup', 'balance-alb', 'balance-rr', 'balance-tlb', 'balance-xor', 'broadcast']),
+ miimon=dict(type='int'),
+ downdelay=dict(type='int'),
+ updelay=dict(type='int'),
+ arp_interval=dict(type='int'),
+ arp_ip_target=dict(type='str'),
+ primary=dict(type='str'),
+ # general usage
+ mtu=dict(type='int'),
+ mac=dict(type='str'),
+ # bridge specific vars
+ stp=dict(type='bool', default=True),
+ priority=dict(type='int', default=128),
+ slavepriority=dict(type='int', default=32),
+ forwarddelay=dict(type='int', default=15),
+ hellotime=dict(type='int', default=2),
+ maxage=dict(type='int', default=20),
+ ageingtime=dict(type='int', default=300),
+ hairpin=dict(type='bool', default=True),
+ path_cost=dict(type='int', default=100),
+ # vlan specific vars
+ vlanid=dict(type='int'),
+ vlandev=dict(type='str'),
+ flags=dict(type='str'),
+ ingress=dict(type='str'),
+ egress=dict(type='str'),
+ # vxlan specific vars
+ vxlan_id=dict(type='int'),
+ vxlan_local=dict(type='str'),
+ vxlan_remote=dict(type='str'),
+ # ip-tunnel specific vars
+ ip_tunnel_dev=dict(type='str'),
+ ip_tunnel_local=dict(type='str'),
+ ip_tunnel_remote=dict(type='str'),
+ ),
+ supports_check_mode=True,
+ )
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ nmcli = Nmcli(module)
+
+ (rc, out, err) = (None, '', '')
+ result = {'conn_name': nmcli.conn_name, 'state': nmcli.state}
+
+ # check for issues
+ if nmcli.conn_name is None:
+ nmcli.module.fail_json(msg="Please specify a name for the connection")
+ # team-slave checks
+ if nmcli.type == 'team-slave':
+ if nmcli.master is None:
+ nmcli.module.fail_json(msg="Please specify a name for the master when type is %s" % nmcli.type)
+ if nmcli.ifname is None:
+ nmcli.module.fail_json(msg="Please specify an interface name for the connection when type is %s" % nmcli.type)
+
+ try:
+ if nmcli.state == 'absent':
+ if nmcli.connection_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = nmcli.down_connection()
+ (rc, out, err) = nmcli.remove_connection()
+ if rc != 0:
+ module.fail_json(name=('No Connection named %s exists' % nmcli.conn_name), msg=err, rc=rc)
+
+ elif nmcli.state == 'present':
+ if nmcli.connection_exists():
+ changed, diff = nmcli.is_connection_changed()
+ if module._diff:
+ result['diff'] = diff
+
+ if changed:
+ # modify connection (note: this function is check mode aware)
+ # result['Connection']=('Connection %s of Type %s is not being added' % (nmcli.conn_name, nmcli.type))
+ result['Exists'] = 'Connections do exist so we are modifying them'
+ if module.check_mode:
+ module.exit_json(changed=True, **result)
+ (rc, out, err) = nmcli.modify_connection()
+ else:
+ result['Exists'] = 'Connections already exist and no changes made'
+ if module.check_mode:
+ module.exit_json(changed=False, **result)
+ if not nmcli.connection_exists():
+ result['Connection'] = ('Connection %s of Type %s is being added' % (nmcli.conn_name, nmcli.type))
+ if module.check_mode:
+ module.exit_json(changed=True, **result)
+ (rc, out, err) = nmcli.create_connection()
+ if rc is not None and rc != 0:
+ module.fail_json(name=nmcli.conn_name, msg=err, rc=rc)
+ except NmcliModuleError as e:
+ module.fail_json(name=nmcli.conn_name, msg=str(e))
+
+ if rc is None:
+ result['changed'] = False
+ else:
+ result['changed'] = True
+ if out:
+ result['stdout'] = out
+ if err:
+ result['stderr'] = err
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nsupdate.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nsupdate.py
new file mode 100644
index 00000000..9d4a5186
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/nsupdate.py
@@ -0,0 +1,469 @@
+#!/usr/bin/python
+
+# (c) 2016, Marcin Skarbek <github@skarbek.name>
+# (c) 2016, Andreas Olsson <andreas@arrakis.se>
+# (c) 2017, Loic Blot <loic.blot@unix-experience.fr>
+#
+# This module was ported from https://github.com/mskarbek/ansible-nsupdate
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: nsupdate
+
+short_description: Manage DNS records.
+description:
+ - Create, update and remove DNS records using DDNS updates
+requirements:
+ - dnspython
+author: "Loic Blot (@nerzhul)"
+options:
+ state:
+ description:
+ - Manage DNS record.
+ choices: ['present', 'absent']
+ default: 'present'
+ server:
+ description:
+ - Apply DNS modification on this server, specified by IPv4 or IPv6 address.
+ required: true
+ port:
+ description:
+ - Use this TCP port when connecting to C(server).
+ default: 53
+ key_name:
+ description:
+ - Use TSIG key name to authenticate against DNS C(server)
+ key_secret:
+ description:
+ - Use TSIG key secret, associated with C(key_name), to authenticate against C(server)
+ key_algorithm:
+ description:
+ - Specify key algorithm used by C(key_secret).
+ choices: ['HMAC-MD5.SIG-ALG.REG.INT', 'hmac-md5', 'hmac-sha1', 'hmac-sha224', 'hmac-sha256', 'hmac-sha384',
+ 'hmac-sha512']
+ default: 'hmac-md5'
+ zone:
+ description:
+ - DNS record will be modified on this C(zone).
+ - When omitted DNS will be queried to attempt finding the correct zone.
+ - Starting with Ansible 2.7 this parameter is optional.
+ record:
+ description:
+ - Sets the DNS record to modify. When zone is omitted this has to be absolute (ending with a dot).
+ required: true
+ type:
+ description:
+ - Sets the record type.
+ default: 'A'
+ ttl:
+ description:
+ - Sets the record TTL.
+ default: 3600
+ value:
+ description:
+ - Sets the record value.
+ protocol:
+ description:
+ - Sets the transport protocol (TCP or UDP). TCP is the recommended and a more robust option.
+ default: 'tcp'
+ choices: ['tcp', 'udp']
+'''
+
+EXAMPLES = '''
+- name: Add or modify ansible.example.org A to 192.168.1.1"
+ community.general.nsupdate:
+ key_name: "nsupdate"
+ key_secret: "+bFQtBCta7j2vWkjPkAFtgA=="
+ server: "10.1.1.1"
+ zone: "example.org"
+ record: "ansible"
+ value: "192.168.1.1"
+
+- name: Add or modify ansible.example.org A to 192.168.1.1, 192.168.1.2 and 192.168.1.3"
+ community.general.nsupdate:
+ key_name: "nsupdate"
+ key_secret: "+bFQtBCta7j2vWkjPkAFtgA=="
+ server: "10.1.1.1"
+ zone: "example.org"
+ record: "ansible"
+ value: ["192.168.1.1", "192.168.1.2", "192.168.1.3"]
+
+- name: Remove puppet.example.org CNAME
+ community.general.nsupdate:
+ key_name: "nsupdate"
+ key_secret: "+bFQtBCta7j2vWkjPkAFtgA=="
+ server: "10.1.1.1"
+ zone: "example.org"
+ record: "puppet"
+ type: "CNAME"
+ state: absent
+
+- name: Add 1.1.168.192.in-addr.arpa. PTR for ansible.example.org
+ community.general.nsupdate:
+ key_name: "nsupdate"
+ key_secret: "+bFQtBCta7j2vWkjPkAFtgA=="
+ server: "10.1.1.1"
+ record: "1.1.168.192.in-addr.arpa."
+ type: "PTR"
+ value: "ansible.example.org."
+ state: present
+
+- name: Remove 1.1.168.192.in-addr.arpa. PTR
+ community.general.nsupdate:
+ key_name: "nsupdate"
+ key_secret: "+bFQtBCta7j2vWkjPkAFtgA=="
+ server: "10.1.1.1"
+ record: "1.1.168.192.in-addr.arpa."
+ type: "PTR"
+ state: absent
+'''
+
+RETURN = '''
+changed:
+ description: If module has modified record
+ returned: success
+ type: str
+record:
+ description: DNS record
+ returned: success
+ type: str
+ sample: 'ansible'
+ttl:
+ description: DNS record TTL
+ returned: success
+ type: int
+ sample: 86400
+type:
+ description: DNS record type
+ returned: success
+ type: str
+ sample: 'CNAME'
+value:
+ description: DNS record value(s)
+ returned: success
+ type: list
+ sample: '192.168.1.1'
+zone:
+ description: DNS record zone
+ returned: success
+ type: str
+ sample: 'example.org.'
+dns_rc:
+ description: dnspython return code
+ returned: always
+ type: int
+ sample: 4
+dns_rc_str:
+ description: dnspython return code (string representation)
+ returned: always
+ type: str
+ sample: 'REFUSED'
+'''
+
+import traceback
+
+from binascii import Error as binascii_error
+from socket import error as socket_error
+
+DNSPYTHON_IMP_ERR = None
+try:
+ import dns.update
+ import dns.query
+ import dns.tsigkeyring
+ import dns.message
+ import dns.resolver
+
+ HAVE_DNSPYTHON = True
+except ImportError:
+ DNSPYTHON_IMP_ERR = traceback.format_exc()
+ HAVE_DNSPYTHON = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+class RecordManager(object):
+ def __init__(self, module):
+ self.module = module
+
+ if module.params['key_name']:
+ try:
+ self.keyring = dns.tsigkeyring.from_text({
+ module.params['key_name']: module.params['key_secret']
+ })
+ except TypeError:
+ module.fail_json(msg='Missing key_secret')
+ except binascii_error as e:
+ module.fail_json(msg='TSIG key error: %s' % to_native(e))
+ else:
+ self.keyring = None
+
+ if module.params['key_algorithm'] == 'hmac-md5':
+ self.algorithm = 'HMAC-MD5.SIG-ALG.REG.INT'
+ else:
+ self.algorithm = module.params['key_algorithm']
+
+ if module.params['zone'] is None:
+ if module.params['record'][-1] != '.':
+ self.module.fail_json(msg='record must be absolute when omitting zone parameter')
+ self.zone = self.lookup_zone()
+ else:
+ self.zone = module.params['zone']
+
+ if self.zone[-1] != '.':
+ self.zone += '.'
+
+ if module.params['record'][-1] != '.':
+ self.fqdn = module.params['record'] + '.' + self.zone
+ else:
+ self.fqdn = module.params['record']
+
+ if self.module.params['type'].lower() == 'txt' and self.module.params['value'] is not None:
+ self.value = list(map(self.txt_helper, self.module.params['value']))
+ else:
+ self.value = self.module.params['value']
+
+ self.dns_rc = 0
+
+ def txt_helper(self, entry):
+ if entry[0] == '"' and entry[-1] == '"':
+ return entry
+ return '"{text}"'.format(text=entry)
+
+ def lookup_zone(self):
+ name = dns.name.from_text(self.module.params['record'])
+ while True:
+ query = dns.message.make_query(name, dns.rdatatype.SOA)
+ if self.keyring:
+ query.use_tsig(keyring=self.keyring, algorithm=self.algorithm)
+ try:
+ if self.module.params['protocol'] == 'tcp':
+ lookup = dns.query.tcp(query, self.module.params['server'], timeout=10, port=self.module.params['port'])
+ else:
+ lookup = dns.query.udp(query, self.module.params['server'], timeout=10, port=self.module.params['port'])
+ except (dns.tsig.PeerBadKey, dns.tsig.PeerBadSignature) as e:
+ self.module.fail_json(msg='TSIG update error (%s): %s' % (e.__class__.__name__, to_native(e)))
+ except (socket_error, dns.exception.Timeout) as e:
+ self.module.fail_json(msg='DNS server error: (%s): %s' % (e.__class__.__name__, to_native(e)))
+ if lookup.rcode() in [dns.rcode.SERVFAIL, dns.rcode.REFUSED]:
+ self.module.fail_json(msg='Zone lookup failure: \'%s\' will not respond to queries regarding \'%s\'.' % (
+ self.module.params['server'], self.module.params['record']))
+ try:
+ zone = lookup.authority[0].name
+ if zone == name:
+ return zone.to_text()
+ except IndexError:
+ pass
+ try:
+ name = name.parent()
+ except dns.name.NoParent:
+ self.module.fail_json(msg='Zone lookup of \'%s\' failed for unknown reason.' % (self.module.params['record']))
+
+ def __do_update(self, update):
+ response = None
+ try:
+ if self.module.params['protocol'] == 'tcp':
+ response = dns.query.tcp(update, self.module.params['server'], timeout=10, port=self.module.params['port'])
+ else:
+ response = dns.query.udp(update, self.module.params['server'], timeout=10, port=self.module.params['port'])
+ except (dns.tsig.PeerBadKey, dns.tsig.PeerBadSignature) as e:
+ self.module.fail_json(msg='TSIG update error (%s): %s' % (e.__class__.__name__, to_native(e)))
+ except (socket_error, dns.exception.Timeout) as e:
+ self.module.fail_json(msg='DNS server error: (%s): %s' % (e.__class__.__name__, to_native(e)))
+ return response
+
+ def create_or_update_record(self):
+ result = {'changed': False, 'failed': False}
+
+ exists = self.record_exists()
+ if exists in [0, 2]:
+ if self.module.check_mode:
+ self.module.exit_json(changed=True)
+
+ if exists == 0:
+ self.dns_rc = self.create_record()
+ if self.dns_rc != 0:
+ result['msg'] = "Failed to create DNS record (rc: %d)" % self.dns_rc
+
+ elif exists == 2:
+ self.dns_rc = self.modify_record()
+ if self.dns_rc != 0:
+ result['msg'] = "Failed to update DNS record (rc: %d)" % self.dns_rc
+
+ if self.dns_rc != 0:
+ result['failed'] = True
+ else:
+ result['changed'] = True
+
+ else:
+ result['changed'] = False
+
+ return result
+
+ def create_record(self):
+ update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm)
+ for entry in self.value:
+ try:
+ update.add(self.module.params['record'],
+ self.module.params['ttl'],
+ self.module.params['type'],
+ entry)
+ except AttributeError:
+ self.module.fail_json(msg='value needed when state=present')
+ except dns.exception.SyntaxError:
+ self.module.fail_json(msg='Invalid/malformed value')
+
+ response = self.__do_update(update)
+ return dns.message.Message.rcode(response)
+
+ def modify_record(self):
+ update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm)
+ update.delete(self.module.params['record'], self.module.params['type'])
+ for entry in self.value:
+ try:
+ update.add(self.module.params['record'],
+ self.module.params['ttl'],
+ self.module.params['type'],
+ entry)
+ except AttributeError:
+ self.module.fail_json(msg='value needed when state=present')
+ except dns.exception.SyntaxError:
+ self.module.fail_json(msg='Invalid/malformed value')
+ response = self.__do_update(update)
+
+ return dns.message.Message.rcode(response)
+
+ def remove_record(self):
+ result = {'changed': False, 'failed': False}
+
+ if self.record_exists() == 0:
+ return result
+
+ # Check mode and record exists, declared fake change.
+ if self.module.check_mode:
+ self.module.exit_json(changed=True)
+
+ update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm)
+ update.delete(self.module.params['record'], self.module.params['type'])
+
+ response = self.__do_update(update)
+ self.dns_rc = dns.message.Message.rcode(response)
+
+ if self.dns_rc != 0:
+ result['failed'] = True
+ result['msg'] = "Failed to delete record (rc: %d)" % self.dns_rc
+ else:
+ result['changed'] = True
+
+ return result
+
+ def record_exists(self):
+ update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm)
+ try:
+ update.present(self.module.params['record'], self.module.params['type'])
+ except dns.rdatatype.UnknownRdatatype as e:
+ self.module.fail_json(msg='Record error: {0}'.format(to_native(e)))
+
+ response = self.__do_update(update)
+ self.dns_rc = dns.message.Message.rcode(response)
+ if self.dns_rc == 0:
+ if self.module.params['state'] == 'absent':
+ return 1
+ for entry in self.value:
+ try:
+ update.present(self.module.params['record'], self.module.params['type'], entry)
+ except AttributeError:
+ self.module.fail_json(msg='value needed when state=present')
+ except dns.exception.SyntaxError:
+ self.module.fail_json(msg='Invalid/malformed value')
+ response = self.__do_update(update)
+ self.dns_rc = dns.message.Message.rcode(response)
+ if self.dns_rc == 0:
+ if self.ttl_changed():
+ return 2
+ else:
+ return 1
+ else:
+ return 2
+ else:
+ return 0
+
+ def ttl_changed(self):
+ query = dns.message.make_query(self.fqdn, self.module.params['type'])
+ if self.keyring:
+ query.use_tsig(keyring=self.keyring, algorithm=self.algorithm)
+
+ try:
+ if self.module.params['protocol'] == 'tcp':
+ lookup = dns.query.tcp(query, self.module.params['server'], timeout=10, port=self.module.params['port'])
+ else:
+ lookup = dns.query.udp(query, self.module.params['server'], timeout=10, port=self.module.params['port'])
+ except (dns.tsig.PeerBadKey, dns.tsig.PeerBadSignature) as e:
+ self.module.fail_json(msg='TSIG update error (%s): %s' % (e.__class__.__name__, to_native(e)))
+ except (socket_error, dns.exception.Timeout) as e:
+ self.module.fail_json(msg='DNS server error: (%s): %s' % (e.__class__.__name__, to_native(e)))
+
+ if lookup.rcode() != dns.rcode.NOERROR:
+ self.module.fail_json(msg='Failed to lookup TTL of existing matching record.')
+
+ current_ttl = lookup.answer[0].ttl
+ return current_ttl != self.module.params['ttl']
+
+
+def main():
+ tsig_algs = ['HMAC-MD5.SIG-ALG.REG.INT', 'hmac-md5', 'hmac-sha1', 'hmac-sha224',
+ 'hmac-sha256', 'hmac-sha384', 'hmac-sha512']
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(required=False, default='present', choices=['present', 'absent'], type='str'),
+ server=dict(required=True, type='str'),
+ port=dict(required=False, default=53, type='int'),
+ key_name=dict(required=False, type='str'),
+ key_secret=dict(required=False, type='str', no_log=True),
+ key_algorithm=dict(required=False, default='hmac-md5', choices=tsig_algs, type='str'),
+ zone=dict(required=False, default=None, type='str'),
+ record=dict(required=True, type='str'),
+ type=dict(required=False, default='A', type='str'),
+ ttl=dict(required=False, default=3600, type='int'),
+ value=dict(required=False, default=None, type='list'),
+ protocol=dict(required=False, default='tcp', choices=['tcp', 'udp'], type='str')
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAVE_DNSPYTHON:
+ module.fail_json(msg=missing_required_lib('dnspython'), exception=DNSPYTHON_IMP_ERR)
+
+ if len(module.params["record"]) == 0:
+ module.fail_json(msg='record cannot be empty.')
+
+ record = RecordManager(module)
+ result = {}
+ if module.params["state"] == 'absent':
+ result = record.remove_record()
+ elif module.params["state"] == 'present':
+ result = record.create_or_update_record()
+
+ result['dns_rc'] = record.dns_rc
+ result['dns_rc_str'] = dns.rcode.to_text(record.dns_rc)
+ if result['failed']:
+ module.fail_json(**result)
+ else:
+ result['record'] = dict(zone=record.zone,
+ record=module.params['record'],
+ type=module.params['type'],
+ ttl=module.params['ttl'],
+ value=record.value)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/omapi_host.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/omapi_host.py
new file mode 100644
index 00000000..4e6738cd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/omapi_host.py
@@ -0,0 +1,310 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# copyright: (c) 2016, Loic Blot <loic.blot@unix-experience.fr>
+# Sponsored by Infopro Digital. http://www.infopro-digital.com/
+# Sponsored by E.T.A.I. http://www.etai.fr/
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: omapi_host
+short_description: Setup OMAPI hosts.
+description: Manage OMAPI hosts into compatible DHCPd servers
+requirements:
+ - pypureomapi
+author:
+- Loic Blot (@nerzhul)
+options:
+ state:
+ description:
+ - Create or remove OMAPI host.
+ type: str
+ required: true
+ choices: [ absent, present ]
+ hostname:
+ description:
+ - Sets the host lease hostname (mandatory if state=present).
+ type: str
+ aliases: [ name ]
+ host:
+ description:
+ - Sets OMAPI server host to interact with.
+ type: str
+ default: localhost
+ port:
+ description:
+ - Sets the OMAPI server port to interact with.
+ type: int
+ default: 7911
+ key_name:
+ description:
+ - Sets the TSIG key name for authenticating against OMAPI server.
+ type: str
+ required: true
+ key:
+ description:
+ - Sets the TSIG key content for authenticating against OMAPI server.
+ type: str
+ required: true
+ macaddr:
+ description:
+ - Sets the lease host MAC address.
+ type: str
+ required: true
+ ip:
+ description:
+ - Sets the lease host IP address.
+ type: str
+ statements:
+ description:
+ - Attach a list of OMAPI DHCP statements with host lease (without ending semicolon).
+ type: list
+ default: []
+ ddns:
+ description:
+ - Enable dynamic DNS updates for this host.
+ type: bool
+ default: no
+
+'''
+EXAMPLES = r'''
+- name: Add a host using OMAPI
+ community.general.omapi_host:
+ key_name: defomapi
+ key: +bFQtBCta6j2vWkjPkNFtgA==
+ host: 10.98.4.55
+ macaddr: 44:dd:ab:dd:11:44
+ name: server01
+ ip: 192.168.88.99
+ ddns: yes
+ statements:
+ - filename "pxelinux.0"
+ - next-server 1.1.1.1
+ state: present
+
+- name: Remove a host using OMAPI
+ community.general.omapi_host:
+ key_name: defomapi
+ key: +bFQtBCta6j2vWkjPkNFtgA==
+ host: 10.1.1.1
+ macaddr: 00:66:ab:dd:11:44
+ state: absent
+'''
+
+RETURN = r'''
+lease:
+ description: dictionary containing host information
+ returned: success
+ type: complex
+ contains:
+ ip-address:
+ description: IP address, if there is.
+ returned: success
+ type: str
+ sample: '192.168.1.5'
+ hardware-address:
+ description: MAC address
+ returned: success
+ type: str
+ sample: '00:11:22:33:44:55'
+ hardware-type:
+ description: hardware type, generally '1'
+ returned: success
+ type: int
+ sample: 1
+ name:
+ description: hostname
+ returned: success
+ type: str
+ sample: 'mydesktop'
+'''
+
+import binascii
+import socket
+import struct
+import traceback
+
+PUREOMAPI_IMP_ERR = None
+try:
+ from pypureomapi import Omapi, OmapiMessage, OmapiError, OmapiErrorNotFound
+ from pypureomapi import pack_ip, unpack_ip, pack_mac, unpack_mac
+ from pypureomapi import OMAPI_OP_STATUS, OMAPI_OP_UPDATE
+ pureomapi_found = True
+except ImportError:
+ PUREOMAPI_IMP_ERR = traceback.format_exc()
+ pureomapi_found = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_bytes, to_native
+
+
+class OmapiHostManager:
+ def __init__(self, module):
+ self.module = module
+ self.omapi = None
+ self.connect()
+
+ def connect(self):
+ try:
+ self.omapi = Omapi(self.module.params['host'], self.module.params['port'], to_bytes(self.module.params['key_name']),
+ self.module.params['key'])
+ except binascii.Error:
+ self.module.fail_json(msg="Unable to open OMAPI connection. 'key' is not a valid base64 key.")
+ except OmapiError as e:
+ self.module.fail_json(msg="Unable to open OMAPI connection. Ensure 'host', 'port', 'key' and 'key_name' "
+ "are valid. Exception was: %s" % to_native(e))
+ except socket.error as e:
+ self.module.fail_json(msg="Unable to connect to OMAPI server: %s" % to_native(e))
+
+ def get_host(self, macaddr):
+ msg = OmapiMessage.open(to_bytes("host", errors='surrogate_or_strict'))
+ msg.obj.append((to_bytes("hardware-address", errors='surrogate_or_strict'), pack_mac(macaddr)))
+ msg.obj.append((to_bytes("hardware-type", errors='surrogate_or_strict'), struct.pack("!I", 1)))
+ response = self.omapi.query_server(msg)
+ if response.opcode != OMAPI_OP_UPDATE:
+ return None
+ return response
+
+ @staticmethod
+ def unpack_facts(obj):
+ result = dict(obj)
+ if 'hardware-address' in result:
+ result['hardware-address'] = to_native(unpack_mac(result[to_bytes('hardware-address')]))
+
+ if 'ip-address' in result:
+ result['ip-address'] = to_native(unpack_ip(result[to_bytes('ip-address')]))
+
+ if 'hardware-type' in result:
+ result['hardware-type'] = struct.unpack("!I", result[to_bytes('hardware-type')])
+
+ return result
+
+ def setup_host(self):
+ if self.module.params['hostname'] is None or len(self.module.params['hostname']) == 0:
+ self.module.fail_json(msg="name attribute could not be empty when adding or modifying host.")
+
+ msg = None
+ host_response = self.get_host(self.module.params['macaddr'])
+ # If host was not found using macaddr, add create message
+ if host_response is None:
+ msg = OmapiMessage.open(to_bytes('host', errors='surrogate_or_strict'))
+ msg.message.append((to_bytes('create'), struct.pack('!I', 1)))
+ msg.message.append((to_bytes('exclusive'), struct.pack('!I', 1)))
+ msg.obj.append((to_bytes('hardware-address'), pack_mac(self.module.params['macaddr'])))
+ msg.obj.append((to_bytes('hardware-type'), struct.pack('!I', 1)))
+ msg.obj.append((to_bytes('name'), to_bytes(self.module.params['hostname'])))
+ if self.module.params['ip'] is not None:
+ msg.obj.append((to_bytes("ip-address", errors='surrogate_or_strict'), pack_ip(self.module.params['ip'])))
+
+ stmt_join = ""
+ if self.module.params['ddns']:
+ stmt_join += 'ddns-hostname "{0}"; '.format(self.module.params['hostname'])
+
+ try:
+ if len(self.module.params['statements']) > 0:
+ stmt_join += "; ".join(self.module.params['statements'])
+ stmt_join += "; "
+ except TypeError as e:
+ self.module.fail_json(msg="Invalid statements found: %s" % to_native(e))
+
+ if len(stmt_join) > 0:
+ msg.obj.append((to_bytes('statements'), to_bytes(stmt_join)))
+
+ try:
+ response = self.omapi.query_server(msg)
+ if response.opcode != OMAPI_OP_UPDATE:
+ self.module.fail_json(msg="Failed to add host, ensure authentication and host parameters "
+ "are valid.")
+ self.module.exit_json(changed=True, lease=self.unpack_facts(response.obj))
+ except OmapiError as e:
+ self.module.fail_json(msg="OMAPI error: %s" % to_native(e))
+ # Forge update message
+ else:
+ response_obj = self.unpack_facts(host_response.obj)
+ fields_to_update = {}
+
+ if to_bytes('ip-address', errors='surrogate_or_strict') not in response_obj or \
+ unpack_ip(response_obj[to_bytes('ip-address', errors='surrogate_or_strict')]) != self.module.params['ip']:
+ fields_to_update['ip-address'] = pack_ip(self.module.params['ip'])
+
+ # Name cannot be changed
+ if 'name' not in response_obj or response_obj['name'] != self.module.params['hostname']:
+ self.module.fail_json(msg="Changing hostname is not supported. Old was %s, new is %s. "
+ "Please delete host and add new." %
+ (response_obj['name'], self.module.params['hostname']))
+
+ """
+ # It seems statements are not returned by OMAPI, then we cannot modify them at this moment.
+ if 'statements' not in response_obj and len(self.module.params['statements']) > 0 or \
+ response_obj['statements'] != self.module.params['statements']:
+ with open('/tmp/omapi', 'w') as fb:
+ for (k,v) in iteritems(response_obj):
+ fb.writelines('statements: %s %s\n' % (k, v))
+ """
+ if len(fields_to_update) == 0:
+ self.module.exit_json(changed=False, lease=response_obj)
+ else:
+ msg = OmapiMessage.update(host_response.handle)
+ msg.update_object(fields_to_update)
+
+ try:
+ response = self.omapi.query_server(msg)
+ if response.opcode != OMAPI_OP_STATUS:
+ self.module.fail_json(msg="Failed to modify host, ensure authentication and host parameters "
+ "are valid.")
+ self.module.exit_json(changed=True)
+ except OmapiError as e:
+ self.module.fail_json(msg="OMAPI error: %s" % to_native(e))
+
+ def remove_host(self):
+ try:
+ self.omapi.del_host(self.module.params['macaddr'])
+ self.module.exit_json(changed=True)
+ except OmapiErrorNotFound:
+ self.module.exit_json()
+ except OmapiError as e:
+ self.module.fail_json(msg="OMAPI error: %s" % to_native(e))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', required=True, choices=['absent', 'present']),
+ host=dict(type='str', default="localhost"),
+ port=dict(type='int', default=7911),
+ key_name=dict(type='str', required=True),
+ key=dict(type='str', required=True, no_log=True),
+ macaddr=dict(type='str', required=True),
+ hostname=dict(type='str', aliases=['name']),
+ ip=dict(type='str'),
+ ddns=dict(type='bool', default=False),
+ statements=dict(type='list', default=[]),
+ ),
+ supports_check_mode=False,
+ )
+
+ if not pureomapi_found:
+ module.fail_json(msg=missing_required_lib('pypureomapi'), exception=PUREOMAPI_IMP_ERR)
+
+ if module.params['key'] is None or len(module.params["key"]) == 0:
+ module.fail_json(msg="'key' parameter cannot be empty.")
+
+ if module.params['key_name'] is None or len(module.params["key_name"]) == 0:
+ module.fail_json(msg="'key_name' parameter cannot be empty.")
+
+ host_manager = OmapiHostManager(module)
+ try:
+ if module.params['state'] == 'present':
+ host_manager.setup_host()
+ elif module.params['state'] == 'absent':
+ host_manager.remove_host()
+ except ValueError as e:
+ module.fail_json(msg="OMAPI input value error: %s" % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/snmp_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/snmp_facts.py
new file mode 100644
index 00000000..661db460
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/net_tools/snmp_facts.py
@@ -0,0 +1,459 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# This file is part of Networklore's snmp library for Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: snmp_facts
+author:
+- Patrick Ogenstad (@ogenstad)
+short_description: Retrieve facts for a device using SNMP
+description:
+ - Retrieve facts for a device using SNMP, the facts will be
+ inserted to the ansible_facts key.
+requirements:
+ - pysnmp
+options:
+ host:
+ description:
+ - Set to target SNMP server (normally C({{ inventory_hostname }})).
+ type: str
+ required: true
+ version:
+ description:
+ - SNMP Version to use, C(v2), C(v2c) or C(v3).
+ type: str
+ required: true
+ choices: [ v2, v2c, v3 ]
+ community:
+ description:
+ - The SNMP community string, required if I(version) is C(v2) or C(v2c).
+ type: str
+ level:
+ description:
+ - Authentication level.
+ - Required if I(version) is C(v3).
+ type: str
+ choices: [ authNoPriv, authPriv ]
+ username:
+ description:
+ - Username for SNMPv3.
+ - Required if I(version) is C(v3).
+ type: str
+ integrity:
+ description:
+ - Hashing algorithm.
+ - Required if I(version) is C(v3).
+ type: str
+ choices: [ md5, sha ]
+ authkey:
+ description:
+ - Authentication key.
+ - Required I(version) is C(v3).
+ type: str
+ privacy:
+ description:
+ - Encryption algorithm.
+ - Required if I(level) is C(authPriv).
+ type: str
+ choices: [ aes, des ]
+ privkey:
+ description:
+ - Encryption key.
+ - Required if I(level) is C(authPriv).
+ type: str
+'''
+
+EXAMPLES = r'''
+- name: Gather facts with SNMP version 2
+ community.general.snmp_facts:
+ host: '{{ inventory_hostname }}'
+ version: v2c
+ community: public
+ delegate_to: local
+
+- name: Gather facts using SNMP version 3
+ community.general.snmp_facts:
+ host: '{{ inventory_hostname }}'
+ version: v3
+ level: authPriv
+ integrity: sha
+ privacy: aes
+ username: snmp-user
+ authkey: abc12345
+ privkey: def6789
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+ansible_sysdescr:
+ description: A textual description of the entity.
+ returned: success
+ type: str
+ sample: Linux ubuntu-user 4.4.0-93-generic #116-Ubuntu SMP Fri Aug 11 21:17:51 UTC 2017 x86_64
+ansible_sysobjectid:
+ description: The vendor's authoritative identification of the network management subsystem contained in the entity.
+ returned: success
+ type: str
+ sample: 1.3.6.1.4.1.8072.3.2.10
+ansible_sysuptime:
+ description: The time (in hundredths of a second) since the network management portion of the system was last re-initialized.
+ returned: success
+ type: int
+ sample: 42388
+ansible_syscontact:
+ description: The textual identification of the contact person for this managed node, together with information on how to contact this person.
+ returned: success
+ type: str
+ sample: Me <me@example.org>
+ansible_sysname:
+ description: An administratively-assigned name for this managed node.
+ returned: success
+ type: str
+ sample: ubuntu-user
+ansible_syslocation:
+ description: The physical location of this node (e.g., `telephone closet, 3rd floor').
+ returned: success
+ type: str
+ sample: Sitting on the Dock of the Bay
+ansible_all_ipv4_addresses:
+ description: List of all IPv4 addresses.
+ returned: success
+ type: list
+ sample: ["127.0.0.1", "172.17.0.1"]
+ansible_interfaces:
+ description: Dictionary of each network interface and its metadata.
+ returned: success
+ type: dict
+ sample: {
+ "1": {
+ "adminstatus": "up",
+ "description": "",
+ "ifindex": "1",
+ "ipv4": [
+ {
+ "address": "127.0.0.1",
+ "netmask": "255.0.0.0"
+ }
+ ],
+ "mac": "",
+ "mtu": "65536",
+ "name": "lo",
+ "operstatus": "up",
+ "speed": "65536"
+ },
+ "2": {
+ "adminstatus": "up",
+ "description": "",
+ "ifindex": "2",
+ "ipv4": [
+ {
+ "address": "192.168.213.128",
+ "netmask": "255.255.255.0"
+ }
+ ],
+ "mac": "000a305a52a1",
+ "mtu": "1500",
+ "name": "Intel Corporation 82545EM Gigabit Ethernet Controller (Copper)",
+ "operstatus": "up",
+ "speed": "1500"
+ }
+ }
+'''
+
+import binascii
+import traceback
+from collections import defaultdict
+
+PYSNMP_IMP_ERR = None
+try:
+ from pysnmp.entity.rfc3413.oneliner import cmdgen
+ from pysnmp.proto.rfc1905 import EndOfMibView
+ HAS_PYSNMP = True
+except Exception:
+ PYSNMP_IMP_ERR = traceback.format_exc()
+ HAS_PYSNMP = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_text
+
+
+class DefineOid(object):
+
+ def __init__(self, dotprefix=False):
+ if dotprefix:
+ dp = "."
+ else:
+ dp = ""
+
+ # From SNMPv2-MIB
+ self.sysDescr = dp + "1.3.6.1.2.1.1.1.0"
+ self.sysObjectId = dp + "1.3.6.1.2.1.1.2.0"
+ self.sysUpTime = dp + "1.3.6.1.2.1.1.3.0"
+ self.sysContact = dp + "1.3.6.1.2.1.1.4.0"
+ self.sysName = dp + "1.3.6.1.2.1.1.5.0"
+ self.sysLocation = dp + "1.3.6.1.2.1.1.6.0"
+
+ # From IF-MIB
+ self.ifIndex = dp + "1.3.6.1.2.1.2.2.1.1"
+ self.ifDescr = dp + "1.3.6.1.2.1.2.2.1.2"
+ self.ifMtu = dp + "1.3.6.1.2.1.2.2.1.4"
+ self.ifSpeed = dp + "1.3.6.1.2.1.2.2.1.5"
+ self.ifPhysAddress = dp + "1.3.6.1.2.1.2.2.1.6"
+ self.ifAdminStatus = dp + "1.3.6.1.2.1.2.2.1.7"
+ self.ifOperStatus = dp + "1.3.6.1.2.1.2.2.1.8"
+ self.ifAlias = dp + "1.3.6.1.2.1.31.1.1.1.18"
+
+ # From IP-MIB
+ self.ipAdEntAddr = dp + "1.3.6.1.2.1.4.20.1.1"
+ self.ipAdEntIfIndex = dp + "1.3.6.1.2.1.4.20.1.2"
+ self.ipAdEntNetMask = dp + "1.3.6.1.2.1.4.20.1.3"
+
+
+def decode_hex(hexstring):
+
+ if len(hexstring) < 3:
+ return hexstring
+ if hexstring[:2] == "0x":
+ return to_text(binascii.unhexlify(hexstring[2:]))
+ return hexstring
+
+
+def decode_mac(hexstring):
+
+ if len(hexstring) != 14:
+ return hexstring
+ if hexstring[:2] == "0x":
+ return hexstring[2:]
+ return hexstring
+
+
+def lookup_adminstatus(int_adminstatus):
+ adminstatus_options = {
+ 1: 'up',
+ 2: 'down',
+ 3: 'testing'
+ }
+ if int_adminstatus in adminstatus_options:
+ return adminstatus_options[int_adminstatus]
+ return ""
+
+
+def lookup_operstatus(int_operstatus):
+ operstatus_options = {
+ 1: 'up',
+ 2: 'down',
+ 3: 'testing',
+ 4: 'unknown',
+ 5: 'dormant',
+ 6: 'notPresent',
+ 7: 'lowerLayerDown'
+ }
+ if int_operstatus in operstatus_options:
+ return operstatus_options[int_operstatus]
+ return ""
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(type='str', required=True),
+ version=dict(type='str', required=True, choices=['v2', 'v2c', 'v3']),
+ community=dict(type='str'),
+ username=dict(type='str'),
+ level=dict(type='str', choices=['authNoPriv', 'authPriv']),
+ integrity=dict(type='str', choices=['md5', 'sha']),
+ privacy=dict(type='str', choices=['aes', 'des']),
+ authkey=dict(type='str', no_log=True),
+ privkey=dict(type='str', no_log=True),
+ ),
+ required_together=(
+ ['username', 'level', 'integrity', 'authkey'],
+ ['privacy', 'privkey'],
+ ),
+ supports_check_mode=False,
+ )
+
+ m_args = module.params
+
+ if not HAS_PYSNMP:
+ module.fail_json(msg=missing_required_lib('pysnmp'), exception=PYSNMP_IMP_ERR)
+
+ cmdGen = cmdgen.CommandGenerator()
+
+ # Verify that we receive a community when using snmp v2
+ if m_args['version'] in ("v2", "v2c"):
+ if m_args['community'] is None:
+ module.fail_json(msg='Community not set when using snmp version 2')
+
+ if m_args['version'] == "v3":
+ if m_args['username'] is None:
+ module.fail_json(msg='Username not set when using snmp version 3')
+
+ if m_args['level'] == "authPriv" and m_args['privacy'] is None:
+ module.fail_json(msg='Privacy algorithm not set when using authPriv')
+
+ if m_args['integrity'] == "sha":
+ integrity_proto = cmdgen.usmHMACSHAAuthProtocol
+ elif m_args['integrity'] == "md5":
+ integrity_proto = cmdgen.usmHMACMD5AuthProtocol
+
+ if m_args['privacy'] == "aes":
+ privacy_proto = cmdgen.usmAesCfb128Protocol
+ elif m_args['privacy'] == "des":
+ privacy_proto = cmdgen.usmDESPrivProtocol
+
+ # Use SNMP Version 2
+ if m_args['version'] in ("v2", "v2c"):
+ snmp_auth = cmdgen.CommunityData(m_args['community'])
+
+ # Use SNMP Version 3 with authNoPriv
+ elif m_args['level'] == "authNoPriv":
+ snmp_auth = cmdgen.UsmUserData(m_args['username'], authKey=m_args['authkey'], authProtocol=integrity_proto)
+
+ # Use SNMP Version 3 with authPriv
+ else:
+ snmp_auth = cmdgen.UsmUserData(m_args['username'], authKey=m_args['authkey'], privKey=m_args['privkey'], authProtocol=integrity_proto,
+ privProtocol=privacy_proto)
+
+ # Use p to prefix OIDs with a dot for polling
+ p = DefineOid(dotprefix=True)
+ # Use v without a prefix to use with return values
+ v = DefineOid(dotprefix=False)
+
+ def Tree():
+ return defaultdict(Tree)
+
+ results = Tree()
+
+ errorIndication, errorStatus, errorIndex, varBinds = cmdGen.getCmd(
+ snmp_auth,
+ cmdgen.UdpTransportTarget((m_args['host'], 161)),
+ cmdgen.MibVariable(p.sysDescr,),
+ cmdgen.MibVariable(p.sysObjectId,),
+ cmdgen.MibVariable(p.sysUpTime,),
+ cmdgen.MibVariable(p.sysContact,),
+ cmdgen.MibVariable(p.sysName,),
+ cmdgen.MibVariable(p.sysLocation,),
+ lookupMib=False
+ )
+
+ if errorIndication:
+ module.fail_json(msg=str(errorIndication))
+
+ for oid, val in varBinds:
+ current_oid = oid.prettyPrint()
+ current_val = val.prettyPrint()
+ if current_oid == v.sysDescr:
+ results['ansible_sysdescr'] = decode_hex(current_val)
+ elif current_oid == v.sysObjectId:
+ results['ansible_sysobjectid'] = current_val
+ elif current_oid == v.sysUpTime:
+ results['ansible_sysuptime'] = current_val
+ elif current_oid == v.sysContact:
+ results['ansible_syscontact'] = current_val
+ elif current_oid == v.sysName:
+ results['ansible_sysname'] = current_val
+ elif current_oid == v.sysLocation:
+ results['ansible_syslocation'] = current_val
+
+ errorIndication, errorStatus, errorIndex, varTable = cmdGen.nextCmd(
+ snmp_auth,
+ cmdgen.UdpTransportTarget((m_args['host'], 161)),
+ cmdgen.MibVariable(p.ifIndex,),
+ cmdgen.MibVariable(p.ifDescr,),
+ cmdgen.MibVariable(p.ifMtu,),
+ cmdgen.MibVariable(p.ifSpeed,),
+ cmdgen.MibVariable(p.ifPhysAddress,),
+ cmdgen.MibVariable(p.ifAdminStatus,),
+ cmdgen.MibVariable(p.ifOperStatus,),
+ cmdgen.MibVariable(p.ipAdEntAddr,),
+ cmdgen.MibVariable(p.ipAdEntIfIndex,),
+ cmdgen.MibVariable(p.ipAdEntNetMask,),
+
+ cmdgen.MibVariable(p.ifAlias,),
+ lookupMib=False
+ )
+
+ if errorIndication:
+ module.fail_json(msg=str(errorIndication))
+
+ interface_indexes = []
+
+ all_ipv4_addresses = []
+ ipv4_networks = Tree()
+
+ for varBinds in varTable:
+ for oid, val in varBinds:
+ if isinstance(val, EndOfMibView):
+ continue
+ current_oid = oid.prettyPrint()
+ current_val = val.prettyPrint()
+ if v.ifIndex in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['ifindex'] = current_val
+ interface_indexes.append(ifIndex)
+ if v.ifDescr in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['name'] = current_val
+ if v.ifMtu in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['mtu'] = current_val
+ if v.ifSpeed in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['speed'] = current_val
+ if v.ifPhysAddress in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['mac'] = decode_mac(current_val)
+ if v.ifAdminStatus in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['adminstatus'] = lookup_adminstatus(int(current_val))
+ if v.ifOperStatus in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['operstatus'] = lookup_operstatus(int(current_val))
+ if v.ipAdEntAddr in current_oid:
+ curIPList = current_oid.rsplit('.', 4)[-4:]
+ curIP = ".".join(curIPList)
+ ipv4_networks[curIP]['address'] = current_val
+ all_ipv4_addresses.append(current_val)
+ if v.ipAdEntIfIndex in current_oid:
+ curIPList = current_oid.rsplit('.', 4)[-4:]
+ curIP = ".".join(curIPList)
+ ipv4_networks[curIP]['interface'] = current_val
+ if v.ipAdEntNetMask in current_oid:
+ curIPList = current_oid.rsplit('.', 4)[-4:]
+ curIP = ".".join(curIPList)
+ ipv4_networks[curIP]['netmask'] = current_val
+
+ if v.ifAlias in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['description'] = current_val
+
+ interface_to_ipv4 = {}
+ for ipv4_network in ipv4_networks:
+ current_interface = ipv4_networks[ipv4_network]['interface']
+ current_network = {
+ 'address': ipv4_networks[ipv4_network]['address'],
+ 'netmask': ipv4_networks[ipv4_network]['netmask']
+ }
+ if current_interface not in interface_to_ipv4:
+ interface_to_ipv4[current_interface] = []
+ interface_to_ipv4[current_interface].append(current_network)
+ else:
+ interface_to_ipv4[current_interface].append(current_network)
+
+ for interface in interface_to_ipv4:
+ results['ansible_interfaces'][int(interface)]['ipv4'] = interface_to_ipv4[interface]
+
+ results['ansible_all_ipv4_addresses'] = all_ipv4_addresses
+
+ module.exit_json(ansible_facts=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/netcup_dns.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/netcup_dns.py
new file mode 100644
index 00000000..5d63a5b3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/netcup_dns.py
@@ -0,0 +1,268 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018 Nicolai Buchwitz <nb@tipi-net.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: netcup_dns
+notes: []
+short_description: manage Netcup DNS records
+description:
+ - "Manages DNS records via the Netcup API, see the docs U(https://ccp.netcup.net/run/webservice/servers/endpoint.php)"
+options:
+ api_key:
+ description:
+ - API key for authentication, must be obtained via the netcup CCP (U(https://ccp.netcup.net))
+ required: True
+ type: str
+ api_password:
+ description:
+ - API password for authentication, must be obtained via the netcup CCP (https://ccp.netcup.net)
+ required: True
+ type: str
+ customer_id:
+ description:
+ - Netcup customer id
+ required: True
+ type: int
+ domain:
+ description:
+ - Domainname the records should be added / removed
+ required: True
+ type: str
+ record:
+ description:
+ - Record to add or delete, supports wildcard (*). Default is C(@) (e.g. the zone name)
+ default: "@"
+ aliases: [ name ]
+ type: str
+ type:
+ description:
+ - Record type
+ choices: ['A', 'AAAA', 'MX', 'CNAME', 'CAA', 'SRV', 'TXT', 'TLSA', 'NS', 'DS']
+ required: True
+ type: str
+ value:
+ description:
+ - Record value
+ required: true
+ type: str
+ solo:
+ type: bool
+ default: False
+ description:
+ - Whether the record should be the only one for that record type and record name. Only use with C(state=present)
+ - This will delete all other records with the same record name and type.
+ priority:
+ description:
+ - Record priority. Required for C(type=MX)
+ required: False
+ type: int
+ state:
+ description:
+ - Whether the record should exist or not
+ required: False
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+requirements:
+ - "nc-dnsapi >= 0.1.3"
+author: "Nicolai Buchwitz (@nbuchwitz)"
+
+'''
+
+EXAMPLES = '''
+- name: Create a record of type A
+ community.general.netcup_dns:
+ api_key: "..."
+ api_password: "..."
+ customer_id: "..."
+ domain: "example.com"
+ name: "mail"
+ type: "A"
+ value: "127.0.0.1"
+
+- name: Delete that record
+ community.general.netcup_dns:
+ api_key: "..."
+ api_password: "..."
+ customer_id: "..."
+ domain: "example.com"
+ name: "mail"
+ type: "A"
+ value: "127.0.0.1"
+ state: absent
+
+- name: Create a wildcard record
+ community.general.netcup_dns:
+ api_key: "..."
+ api_password: "..."
+ customer_id: "..."
+ domain: "example.com"
+ name: "*"
+ type: "A"
+ value: "127.0.1.1"
+
+- name: Set the MX record for example.com
+ community.general.netcup_dns:
+ api_key: "..."
+ api_password: "..."
+ customer_id: "..."
+ domain: "example.com"
+ type: "MX"
+ value: "mail.example.com"
+
+- name: Set a record and ensure that this is the only one
+ community.general.netcup_dns:
+ api_key: "..."
+ api_password: "..."
+ customer_id: "..."
+ name: "demo"
+ domain: "example.com"
+ type: "AAAA"
+ value: "::1"
+ solo: true
+'''
+
+RETURN = '''
+records:
+ description: list containing all records
+ returned: success
+ type: complex
+ contains:
+ name:
+ description: the record name
+ returned: success
+ type: str
+ sample: fancy-hostname
+ type:
+ description: the record type
+ returned: succcess
+ type: str
+ sample: A
+ value:
+ description: the record destination
+ returned: success
+ type: str
+ sample: 127.0.0.1
+ priority:
+ description: the record priority (only relevant if type=MX)
+ returned: success
+ type: int
+ sample: 0
+ id:
+ description: internal id of the record
+ returned: success
+ type: int
+ sample: 12345
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+NCDNSAPI_IMP_ERR = None
+try:
+ import nc_dnsapi
+ from nc_dnsapi import DNSRecord
+
+ HAS_NCDNSAPI = True
+except ImportError:
+ NCDNSAPI_IMP_ERR = traceback.format_exc()
+ HAS_NCDNSAPI = False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(required=True, no_log=True),
+ api_password=dict(required=True, no_log=True),
+ customer_id=dict(required=True, type='int'),
+
+ domain=dict(required=True),
+ record=dict(required=False, default='@', aliases=['name']),
+ type=dict(required=True, choices=['A', 'AAAA', 'MX', 'CNAME', 'CAA', 'SRV', 'TXT', 'TLSA', 'NS', 'DS']),
+ value=dict(required=True),
+ priority=dict(required=False, type='int'),
+ solo=dict(required=False, type='bool', default=False),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_NCDNSAPI:
+ module.fail_json(msg=missing_required_lib('nc-dnsapi'), exception=NCDNSAPI_IMP_ERR)
+
+ api_key = module.params.get('api_key')
+ api_password = module.params.get('api_password')
+ customer_id = module.params.get('customer_id')
+ domain = module.params.get('domain')
+ record_type = module.params.get('type')
+ record = module.params.get('record')
+ value = module.params.get('value')
+ priority = module.params.get('priority')
+ solo = module.params.get('solo')
+ state = module.params.get('state')
+
+ if record_type == 'MX' and not priority:
+ module.fail_json(msg="record type MX required the 'priority' argument")
+
+ has_changed = False
+ all_records = []
+ try:
+ with nc_dnsapi.Client(customer_id, api_key, api_password) as api:
+ all_records = api.dns_records(domain)
+ record = DNSRecord(record, record_type, value, priority=priority)
+
+ # try to get existing record
+ record_exists = False
+ for r in all_records:
+ if r == record:
+ record_exists = True
+ record = r
+
+ break
+
+ if state == 'present':
+ if solo:
+ obsolete_records = [r for r in all_records if
+ r.hostname == record.hostname
+ and r.type == record.type
+ and not r.destination == record.destination]
+
+ if obsolete_records:
+ if not module.check_mode:
+ all_records = api.delete_dns_records(domain, obsolete_records)
+
+ has_changed = True
+
+ if not record_exists:
+ if not module.check_mode:
+ all_records = api.add_dns_record(domain, record)
+
+ has_changed = True
+ elif state == 'absent' and record_exists:
+ if not module.check_mode:
+ all_records = api.delete_dns_record(domain, record)
+
+ has_changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=ex.message)
+
+ module.exit_json(changed=has_changed, result={"records": [record_data(r) for r in all_records]})
+
+
+def record_data(r):
+ return {"name": r.hostname, "type": r.type, "value": r.destination, "priority": r.priority, "id": r.id}
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/newrelic_deployment.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/newrelic_deployment.py
new file mode 100644
index 00000000..af953e0a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/newrelic_deployment.py
@@ -0,0 +1,146 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2013 Matt Coddington <coddington@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: newrelic_deployment
+author: "Matt Coddington (@mcodd)"
+short_description: Notify newrelic about app deployments
+description:
+ - Notify newrelic about app deployments (see https://docs.newrelic.com/docs/apm/new-relic-apm/maintenance/deployment-notifications#api)
+options:
+ token:
+ type: str
+ description:
+ - API token, to place in the x-api-key header.
+ required: true
+ app_name:
+ type: str
+ description:
+ - (one of app_name or application_id are required) The value of app_name in the newrelic.yml file used by the application
+ required: false
+ application_id:
+ type: str
+ description:
+ - (one of app_name or application_id are required) The application id, found in the URL when viewing the application in RPM
+ required: false
+ changelog:
+ type: str
+ description:
+ - A list of changes for this deployment
+ required: false
+ description:
+ type: str
+ description:
+ - Text annotation for the deployment - notes for you
+ required: false
+ revision:
+ type: str
+ description:
+ - A revision number (e.g., git commit SHA)
+ required: false
+ user:
+ type: str
+ description:
+ - The name of the user/process that triggered this deployment
+ required: false
+ appname:
+ type: str
+ description:
+ - Name of the application
+ required: false
+ environment:
+ type: str
+ description:
+ - The environment for this deployment
+ required: false
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ type: bool
+
+requirements: []
+'''
+
+EXAMPLES = '''
+- name: Notify newrelic about an app deployment
+ community.general.newrelic_deployment:
+ token: AAAAAA
+ app_name: myapp
+ user: ansible deployment
+ revision: '1.0'
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(required=True, no_log=True),
+ app_name=dict(required=False),
+ application_id=dict(required=False),
+ changelog=dict(required=False),
+ description=dict(required=False),
+ revision=dict(required=False),
+ user=dict(required=False),
+ appname=dict(required=False),
+ environment=dict(required=False),
+ validate_certs=dict(default=True, type='bool'),
+ ),
+ required_one_of=[['app_name', 'application_id']],
+ supports_check_mode=True
+ )
+
+ # build list of params
+ params = {}
+ if module.params["app_name"] and module.params["application_id"]:
+ module.fail_json(msg="only one of 'app_name' or 'application_id' can be set")
+
+ if module.params["app_name"]:
+ params["app_name"] = module.params["app_name"]
+ elif module.params["application_id"]:
+ params["application_id"] = module.params["application_id"]
+ else:
+ module.fail_json(msg="you must set one of 'app_name' or 'application_id'")
+
+ for item in ["changelog", "description", "revision", "user", "appname", "environment"]:
+ if module.params[item]:
+ params[item] = module.params[item]
+
+ # If we're in check mode, just exit pretending like we succeeded
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ # Send the data to NewRelic
+ url = "https://rpm.newrelic.com/deployments.xml"
+ data = urlencode(params)
+ headers = {
+ 'x-api-key': module.params["token"],
+ }
+ response, info = fetch_url(module, url, data=data, headers=headers)
+ if info['status'] in (200, 201):
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg="unable to update newrelic: %s" % info['msg'])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nexmo.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nexmo.py
new file mode 100644
index 00000000..e6135cc2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nexmo.py
@@ -0,0 +1,135 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: nexmo
+short_description: Send a SMS via nexmo
+description:
+ - Send a SMS message via nexmo
+author: "Matt Martz (@sivel)"
+options:
+ api_key:
+ type: str
+ description:
+ - Nexmo API Key
+ required: true
+ api_secret:
+ type: str
+ description:
+ - Nexmo API Secret
+ required: true
+ src:
+ type: int
+ description:
+ - Nexmo Number to send from
+ required: true
+ dest:
+ type: list
+ description:
+ - Phone number(s) to send SMS message to
+ required: true
+ msg:
+ type: str
+ description:
+ - Message to text to send. Messages longer than 160 characters will be
+ split into multiple messages
+ required: true
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+extends_documentation_fragment:
+ - url
+'''
+
+EXAMPLES = """
+- name: Send notification message via Nexmo
+ community.general.nexmo:
+ api_key: 640c8a53
+ api_secret: 0ce239a6
+ src: 12345678901
+ dest:
+ - 10987654321
+ - 16789012345
+ msg: '{{ inventory_hostname }} completed'
+ delegate_to: localhost
+"""
+import json
+
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url, url_argument_spec
+
+
+NEXMO_API = 'https://rest.nexmo.com/sms/json'
+
+
+def send_msg(module):
+ failed = list()
+ responses = dict()
+ msg = {
+ 'api_key': module.params.get('api_key'),
+ 'api_secret': module.params.get('api_secret'),
+ 'from': module.params.get('src'),
+ 'text': module.params.get('msg')
+ }
+ for number in module.params.get('dest'):
+ msg['to'] = number
+ url = "%s?%s" % (NEXMO_API, urlencode(msg))
+
+ headers = dict(Accept='application/json')
+ response, info = fetch_url(module, url, headers=headers)
+ if info['status'] != 200:
+ failed.append(number)
+ responses[number] = dict(failed=True)
+
+ try:
+ responses[number] = json.load(response)
+ except Exception:
+ failed.append(number)
+ responses[number] = dict(failed=True)
+ else:
+ for message in responses[number]['messages']:
+ if int(message['status']) != 0:
+ failed.append(number)
+ responses[number] = dict(failed=True, **responses[number])
+
+ if failed:
+ msg = 'One or messages failed to send'
+ else:
+ msg = ''
+
+ module.exit_json(failed=bool(failed), msg=msg, changed=False,
+ responses=responses)
+
+
+def main():
+ argument_spec = url_argument_spec()
+ argument_spec.update(
+ dict(
+ api_key=dict(required=True, no_log=True),
+ api_secret=dict(required=True, no_log=True),
+ src=dict(required=True, type='int'),
+ dest=dict(required=True, type='list'),
+ msg=dict(required=True),
+ ),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec
+ )
+
+ send_msg(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nginx_status_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nginx_status_facts.py
new file mode 100644
index 00000000..3a68f8da
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nginx_status_facts.py
@@ -0,0 +1,160 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2016, René Moser <mail@renemoser.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: nginx_status_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favour of C(_info) module.
+ alternative: Use M(community.general.nginx_status_info) instead.
+short_description: Retrieve nginx status facts.
+description:
+ - Gathers facts from nginx from an URL having C(stub_status) enabled.
+author: "René Moser (@resmo)"
+options:
+ url:
+ type: str
+ description:
+ - URL of the nginx status.
+ required: true
+ timeout:
+ type: int
+ description:
+ - HTTP connection timeout in seconds.
+ required: false
+ default: 10
+
+notes:
+ - See http://nginx.org/en/docs/http/ngx_http_stub_status_module.html for more information.
+'''
+
+EXAMPLES = '''
+# Gather status facts from nginx on localhost
+- name: Get current http stats
+ community.general.nginx_status_facts:
+ url: http://localhost/nginx_status
+
+# Gather status facts from nginx on localhost with a custom timeout of 20 seconds
+- name: Get current http stats
+ community.general.nginx_status_facts:
+ url: http://localhost/nginx_status
+ timeout: 20
+'''
+
+RETURN = '''
+---
+nginx_status_facts.active_connections:
+ description: Active connections.
+ returned: success
+ type: int
+ sample: 2340
+nginx_status_facts.accepts:
+ description: The total number of accepted client connections.
+ returned: success
+ type: int
+ sample: 81769947
+nginx_status_facts.handled:
+ description: The total number of handled connections. Generally, the parameter value is the same as accepts unless some resource limits have been reached.
+ returned: success
+ type: int
+ sample: 81769947
+nginx_status_facts.requests:
+ description: The total number of client requests.
+ returned: success
+ type: int
+ sample: 144332345
+nginx_status_facts.reading:
+ description: The current number of connections where nginx is reading the request header.
+ returned: success
+ type: int
+ sample: 0
+nginx_status_facts.writing:
+ description: The current number of connections where nginx is writing the response back to the client.
+ returned: success
+ type: int
+ sample: 241
+nginx_status_facts.waiting:
+ description: The current number of idle client connections waiting for a request.
+ returned: success
+ type: int
+ sample: 2092
+nginx_status_facts.data:
+ description: HTTP response as is.
+ returned: success
+ type: str
+ sample: "Active connections: 2340 \nserver accepts handled requests\n 81769947 81769947 144332345 \nReading: 0 Writing: 241 Waiting: 2092 \n"
+'''
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils._text import to_text
+
+
+class NginxStatusFacts(object):
+
+ def __init__(self):
+ self.url = module.params.get('url')
+ self.timeout = module.params.get('timeout')
+
+ def run(self):
+ result = {
+ 'nginx_status_facts': {
+ 'active_connections': None,
+ 'accepts': None,
+ 'handled': None,
+ 'requests': None,
+ 'reading': None,
+ 'writing': None,
+ 'waiting': None,
+ 'data': None,
+ }
+ }
+ (response, info) = fetch_url(module=module, url=self.url, force=True, timeout=self.timeout)
+ if not response:
+ module.fail_json(msg="No valid or no response from url %s within %s seconds (timeout)" % (self.url, self.timeout))
+
+ data = to_text(response.read(), errors='surrogate_or_strict')
+ if not data:
+ return result
+
+ result['nginx_status_facts']['data'] = data
+ expr = r'Active connections: ([0-9]+) \nserver accepts handled requests\n ([0-9]+) ([0-9]+) ([0-9]+) \n' \
+ r'Reading: ([0-9]+) Writing: ([0-9]+) Waiting: ([0-9]+)'
+ match = re.match(expr, data, re.S)
+ if match:
+ result['nginx_status_facts']['active_connections'] = int(match.group(1))
+ result['nginx_status_facts']['accepts'] = int(match.group(2))
+ result['nginx_status_facts']['handled'] = int(match.group(3))
+ result['nginx_status_facts']['requests'] = int(match.group(4))
+ result['nginx_status_facts']['reading'] = int(match.group(5))
+ result['nginx_status_facts']['writing'] = int(match.group(6))
+ result['nginx_status_facts']['waiting'] = int(match.group(7))
+ return result
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ url=dict(required=True),
+ timeout=dict(type='int', default=10),
+ ),
+ supports_check_mode=True,
+ )
+
+ nginx_status_facts = NginxStatusFacts().run()
+ result = dict(changed=False, ansible_facts=nginx_status_facts)
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nginx_status_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nginx_status_info.py
new file mode 100644
index 00000000..a13a57a6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nginx_status_info.py
@@ -0,0 +1,155 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2016, René Moser <mail@renemoser.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: nginx_status_info
+short_description: Retrieve information on nginx status.
+description:
+ - Gathers information from nginx from an URL having C(stub_status) enabled.
+author: "René Moser (@resmo)"
+options:
+ url:
+ type: str
+ description:
+ - URL of the nginx status.
+ required: true
+ timeout:
+ type: int
+ description:
+ - HTTP connection timeout in seconds.
+ required: false
+ default: 10
+
+notes:
+ - See U(http://nginx.org/en/docs/http/ngx_http_stub_status_module.html) for more information.
+'''
+
+EXAMPLES = r'''
+# Gather status info from nginx on localhost
+- name: Get current http stats
+ community.general.nginx_status_info:
+ url: http://localhost/nginx_status
+ register: result
+
+# Gather status info from nginx on localhost with a custom timeout of 20 seconds
+- name: Get current http stats
+ community.general.nginx_status_info:
+ url: http://localhost/nginx_status
+ timeout: 20
+ register: result
+'''
+
+RETURN = r'''
+---
+active_connections:
+ description: Active connections.
+ returned: success
+ type: int
+ sample: 2340
+accepts:
+ description: The total number of accepted client connections.
+ returned: success
+ type: int
+ sample: 81769947
+handled:
+ description: The total number of handled connections. Generally, the parameter value is the same as accepts unless some resource limits have been reached.
+ returned: success
+ type: int
+ sample: 81769947
+requests:
+ description: The total number of client requests.
+ returned: success
+ type: int
+ sample: 144332345
+reading:
+ description: The current number of connections where nginx is reading the request header.
+ returned: success
+ type: int
+ sample: 0
+writing:
+ description: The current number of connections where nginx is writing the response back to the client.
+ returned: success
+ type: int
+ sample: 241
+waiting:
+ description: The current number of idle client connections waiting for a request.
+ returned: success
+ type: int
+ sample: 2092
+data:
+ description: HTTP response as is.
+ returned: success
+ type: str
+ sample: "Active connections: 2340 \nserver accepts handled requests\n 81769947 81769947 144332345 \nReading: 0 Writing: 241 Waiting: 2092 \n"
+'''
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils._text import to_text
+
+
+class NginxStatusInfo(object):
+
+ def __init__(self):
+ self.url = module.params.get('url')
+ self.timeout = module.params.get('timeout')
+
+ def run(self):
+ result = {
+ 'active_connections': None,
+ 'accepts': None,
+ 'handled': None,
+ 'requests': None,
+ 'reading': None,
+ 'writing': None,
+ 'waiting': None,
+ 'data': None,
+ }
+ (response, info) = fetch_url(module=module, url=self.url, force=True, timeout=self.timeout)
+ if not response:
+ module.fail_json(msg="No valid or no response from url %s within %s seconds (timeout)" % (self.url, self.timeout))
+
+ data = to_text(response.read(), errors='surrogate_or_strict')
+ if not data:
+ return result
+
+ result['data'] = data
+ expr = r'Active connections: ([0-9]+) \nserver accepts handled requests\n ([0-9]+) ([0-9]+) ([0-9]+) \n' \
+ r'Reading: ([0-9]+) Writing: ([0-9]+) Waiting: ([0-9]+)'
+ match = re.match(expr, data, re.S)
+ if match:
+ result['active_connections'] = int(match.group(1))
+ result['accepts'] = int(match.group(2))
+ result['handled'] = int(match.group(3))
+ result['requests'] = int(match.group(4))
+ result['reading'] = int(match.group(5))
+ result['writing'] = int(match.group(6))
+ result['waiting'] = int(match.group(7))
+ return result
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ url=dict(type='str', required=True),
+ timeout=dict(type='int', default=10),
+ ),
+ supports_check_mode=True,
+ )
+
+ nginx_status_info = NginxStatusInfo().run()
+ module.exit_json(changed=False, **nginx_status_info)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nictagadm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nictagadm.py
new file mode 100644
index 00000000..7db7c5ea
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nictagadm.py
@@ -0,0 +1,234 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Bruce Smith <Bruce.Smith.IT@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: nictagadm
+short_description: Manage nic tags on SmartOS systems
+description:
+ - Create or delete nic tags on SmartOS systems.
+author:
+- Bruce Smith (@SmithX10)
+options:
+ name:
+ description:
+ - Name of the nic tag.
+ required: true
+ type: str
+ mac:
+ description:
+ - Specifies the I(mac) address to attach the nic tag to when not creating an I(etherstub).
+ - Parameters I(mac) and I(etherstub) are mutually exclusive.
+ type: str
+ etherstub:
+ description:
+ - Specifies that the nic tag will be attached to a created I(etherstub).
+ - Parameter I(etherstub) is mutually exclusive with both I(mtu), and I(mac).
+ type: bool
+ default: no
+ mtu:
+ description:
+ - Specifies the size of the I(mtu) of the desired nic tag.
+ - Parameters I(mtu) and I(etherstub) are mutually exclusive.
+ type: int
+ force:
+ description:
+ - When I(state) is absent set this switch will use the C(-f) parameter and delete the nic tag regardless of existing VMs.
+ type: bool
+ default: no
+ state:
+ description:
+ - Create or delete a SmartOS nic tag.
+ type: str
+ choices: [ absent, present ]
+ default: present
+'''
+
+EXAMPLES = r'''
+- name: Create 'storage0' on '00:1b:21:a3:f5:4d'
+ community.general.nictagadm:
+ name: storage0
+ mac: 00:1b:21:a3:f5:4d
+ mtu: 9000
+ state: present
+
+- name: Remove 'storage0' nic tag
+ community.general.nictagadm:
+ name: storage0
+ state: absent
+'''
+
+RETURN = r'''
+name:
+ description: nic tag name
+ returned: always
+ type: str
+ sample: storage0
+mac:
+ description: MAC Address that the nic tag was attached to.
+ returned: always
+ type: str
+ sample: 00:1b:21:a3:f5:4d
+etherstub:
+ description: specifies if the nic tag will create and attach to an etherstub.
+ returned: always
+ type: bool
+ sample: False
+mtu:
+ description: specifies which MTU size was passed during the nictagadm add command. mtu and etherstub are mutually exclusive.
+ returned: always
+ type: int
+ sample: 1500
+force:
+ description: Shows if -f was used during the deletion of a nic tag
+ returned: always
+ type: bool
+ sample: False
+state:
+ description: state of the target
+ returned: always
+ type: str
+ sample: present
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.network import is_mac
+
+
+class NicTag(object):
+
+ def __init__(self, module):
+ self.module = module
+
+ self.name = module.params['name']
+ self.mac = module.params['mac']
+ self.etherstub = module.params['etherstub']
+ self.mtu = module.params['mtu']
+ self.force = module.params['force']
+ self.state = module.params['state']
+
+ self.nictagadm_bin = self.module.get_bin_path('nictagadm', True)
+
+ def is_valid_mac(self):
+ return is_mac(self.mac.lower())
+
+ def nictag_exists(self):
+ cmd = [self.nictagadm_bin]
+
+ cmd.append('exists')
+ cmd.append(self.name)
+
+ (rc, dummy, dummy) = self.module.run_command(cmd)
+
+ return rc == 0
+
+ def add_nictag(self):
+ cmd = [self.nictagadm_bin]
+
+ cmd.append('-v')
+ cmd.append('add')
+
+ if self.etherstub:
+ cmd.append('-l')
+
+ if self.mtu:
+ cmd.append('-p')
+ cmd.append('mtu=' + str(self.mtu))
+
+ if self.mac:
+ cmd.append('-p')
+ cmd.append('mac=' + str(self.mac))
+
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+ def delete_nictag(self):
+ cmd = [self.nictagadm_bin]
+
+ cmd.append('-v')
+ cmd.append('delete')
+
+ if self.force:
+ cmd.append('-f')
+
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ mac=dict(type='str'),
+ etherstub=dict(type='bool', default=False),
+ mtu=dict(type='int'),
+ force=dict(type='bool', default=False),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ),
+ mutually_exclusive=[
+ ['etherstub', 'mac'],
+ ['etherstub', 'mtu'],
+ ],
+ required_if=[
+ ['etherstub', False, ['name', 'mac']],
+ ['state', 'absent', ['name', 'force']],
+ ],
+ supports_check_mode=True
+ )
+
+ nictag = NicTag(module)
+
+ rc = None
+ out = ''
+ err = ''
+ result = dict(
+ changed=False,
+ etherstub=nictag.etherstub,
+ force=nictag.force,
+ name=nictag.name,
+ mac=nictag.mac,
+ mtu=nictag.mtu,
+ state=nictag.state,
+ )
+
+ if not nictag.is_valid_mac():
+ module.fail_json(msg='Invalid MAC Address Value',
+ name=nictag.name,
+ mac=nictag.mac,
+ etherstub=nictag.etherstub)
+
+ if nictag.state == 'absent':
+ if nictag.nictag_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = nictag.delete_nictag()
+ if rc != 0:
+ module.fail_json(name=nictag.name, msg=err, rc=rc)
+ elif nictag.state == 'present':
+ if not nictag.nictag_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = nictag.add_nictag()
+ if rc is not None and rc != 0:
+ module.fail_json(name=nictag.name, msg=err, rc=rc)
+
+ if rc is not None:
+ result['changed'] = True
+ if out:
+ result['stdout'] = out
+ if err:
+ result['stderr'] = err
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_a_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_a_record.py
new file mode 100644
index 00000000..660c9bc3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_a_record.py
@@ -0,0 +1,170 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_a_record
+author: "Blair Rampling (@brampling)"
+short_description: Configure Infoblox NIOS A records
+description:
+ - Adds and/or removes instances of A record objects from
+ Infoblox NIOS servers. This module manages NIOS C(record:a) objects
+ using the Infoblox WAPI interface over REST.
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - Specifies the fully qualified hostname to add or remove from
+ the system
+ required: true
+ type: str
+ view:
+ description:
+ - Sets the DNS view to associate this A record with. The DNS
+ view must already be configured on the system
+ default: default
+ aliases:
+ - dns_view
+ type: str
+ ipv4addr:
+ description:
+ - Configures the IPv4 address for this A record. Users can dynamically
+ allocate ipv4 address to A record by passing dictionary containing,
+ I(nios_next_ip) and I(CIDR network range). See example
+ aliases:
+ - ipv4
+ type: str
+ ttl:
+ description:
+ - Configures the TTL to be associated with this A record
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure an A record
+ community.general.nios_a_record:
+ name: a.ansible.com
+ ipv4: 192.168.10.1
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Add a comment to an existing A record
+ community.general.nios_a_record:
+ name: a.ansible.com
+ ipv4: 192.168.10.1
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Remove an A record from the system
+ community.general.nios_a_record:
+ name: a.ansible.com
+ ipv4: 192.168.10.1
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Update an A record name
+ community.general.nios_a_record:
+ name: {new_name: a_new.ansible.com, old_name: a.ansible.com}
+ ipv4: 192.168.10.1
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Dynamically add a record to next available ip
+ community.general.nios_a_record:
+ name: a.ansible.com
+ ipv4: {nios_next_ip: 192.168.10.0/24}
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_A_RECORD
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+
+ ib_spec = dict(
+ name=dict(required=True, ib_req=True),
+ view=dict(default='default', aliases=['dns_view'], ib_req=True),
+
+ ipv4addr=dict(aliases=['ipv4'], ib_req=True),
+
+ ttl=dict(type='int'),
+
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_A_RECORD, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_aaaa_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_aaaa_record.py
new file mode 100644
index 00000000..b7caecee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_aaaa_record.py
@@ -0,0 +1,154 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_aaaa_record
+author: "Blair Rampling (@brampling)"
+short_description: Configure Infoblox NIOS AAAA records
+description:
+ - Adds and/or removes instances of AAAA record objects from
+ Infoblox NIOS servers. This module manages NIOS C(record:aaaa) objects
+ using the Infoblox WAPI interface over REST.
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - Specifies the fully qualified hostname to add or remove from
+ the system
+ required: true
+ view:
+ description:
+ - Sets the DNS view to associate this AAAA record with. The DNS
+ view must already be configured on the system
+ default: default
+ aliases:
+ - dns_view
+ ipv6addr:
+ description:
+ - Configures the IPv6 address for this AAAA record.
+ aliases:
+ - ipv6
+ ttl:
+ description:
+ - Configures the TTL to be associated with this AAAA record
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure an AAAA record
+ community.general.nios_aaaa_record:
+ name: aaaa.ansible.com
+ ipv6: 2001:0db8:85a3:0000:0000:8a2e:0370:7334
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Add a comment to an existing AAAA record
+ community.general.nios_aaaa_record:
+ name: aaaa.ansible.com
+ ipv6: 2001:0db8:85a3:0000:0000:8a2e:0370:7334
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Remove an AAAA record from the system
+ community.general.nios_aaaa_record:
+ name: aaaa.ansible.com
+ ipv6: 2001:0db8:85a3:0000:0000:8a2e:0370:7334
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Update an AAAA record name
+ community.general.nios_aaaa_record:
+ name: {new_name: aaaa_new.ansible.com, old_name: aaaa.ansible.com}
+ ipv6: 2001:0db8:85a3:0000:0000:8a2e:0370:7334
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_AAAA_RECORD
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+
+ ib_spec = dict(
+ name=dict(required=True, ib_req=True),
+ view=dict(default='default', aliases=['dns_view'], ib_req=True),
+
+ ipv6addr=dict(aliases=['ipv6'], ib_req=True),
+
+ ttl=dict(type='int'),
+
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_AAAA_RECORD, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_cname_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_cname_record.py
new file mode 100644
index 00000000..2863d148
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_cname_record.py
@@ -0,0 +1,143 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_cname_record
+author: "Blair Rampling (@brampling)"
+short_description: Configure Infoblox NIOS CNAME records
+description:
+ - Adds and/or removes instances of CNAME record objects from
+ Infoblox NIOS servers. This module manages NIOS C(record:cname) objects
+ using the Infoblox WAPI interface over REST.
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - Specifies the fully qualified hostname to add or remove from
+ the system
+ required: true
+ view:
+ description:
+ - Sets the DNS view to associate this CNAME record with. The DNS
+ view must already be configured on the system
+ default: default
+ aliases:
+ - dns_view
+ canonical:
+ description:
+ - Configures the canonical name for this CNAME record.
+ aliases:
+ - cname
+ ttl:
+ description:
+ - Configures the TTL to be associated with this CNAME record
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure a CNAME record
+ community.general.nios_cname_record:
+ name: cname.ansible.com
+ canonical: realhost.ansible.com
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Add a comment to an existing CNAME record
+ community.general.nios_cname_record:
+ name: cname.ansible.com
+ canonical: realhost.ansible.com
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Remove a CNAME record from the system
+ community.general.nios_cname_record:
+ name: cname.ansible.com
+ canonical: realhost.ansible.com
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_CNAME_RECORD
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+
+ ib_spec = dict(
+ name=dict(required=True, ib_req=True),
+ view=dict(default='default', aliases=['dns_view'], ib_req=True),
+
+ canonical=dict(aliases=['cname'], ib_req=True),
+
+ ttl=dict(type='int'),
+
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_CNAME_RECORD, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_dns_view.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_dns_view.py
new file mode 100644
index 00000000..1bb8d068
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_dns_view.py
@@ -0,0 +1,139 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_dns_view
+author: "Peter Sprygada (@privateip)"
+short_description: Configure Infoblox NIOS DNS views
+description:
+ - Adds and/or removes instances of DNS view objects from
+ Infoblox NIOS servers. This module manages NIOS C(view) objects
+ using the Infoblox WAPI interface over REST.
+ - Updates instances of DNS view object from Infoblox NIOS servers.
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - Specifies the fully qualified hostname to add or remove from
+ the system. User can also update the hostname as it is possible
+ to pass a dict containing I(new_name), I(old_name). See examples.
+ required: true
+ aliases:
+ - view
+ network_view:
+ description:
+ - Specifies the name of the network view to assign the configured
+ DNS view to. The network view must already be configured on the
+ target system.
+ default: default
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ required: false
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ required: false
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ required: false
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure a new dns view instance
+ community.general.nios_dns_view:
+ name: ansible-dns
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Update the comment for dns view
+ community.general.nios_dns_view:
+ name: ansible-dns
+ comment: this is an example comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Remove the dns view instance
+ community.general.nios_dns_view:
+ name: ansible-dns
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Update the dns view instance
+ community.general.nios_dns_view:
+ name: {new_name: ansible-dns-new, old_name: ansible-dns}
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_DNS_VIEW
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+ ib_spec = dict(
+ name=dict(required=True, aliases=['view'], ib_req=True),
+ network_view=dict(default='default', ib_req=True),
+
+ extattrs=dict(type='dict'),
+ comment=dict()
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_DNS_VIEW, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_fixed_address.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_fixed_address.py
new file mode 100644
index 00000000..a46db04f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_fixed_address.py
@@ -0,0 +1,283 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_fixed_address
+author: "Sumit Jaiswal (@sjaiswal)"
+short_description: Configure Infoblox NIOS DHCP Fixed Address
+description:
+ - A fixed address is a specific IP address that a DHCP server
+ always assigns when a lease request comes from a particular
+ MAC address of the client.
+ - Supports both IPV4 and IPV6 internet protocols
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - Specifies the hostname with which fixed DHCP ip-address is stored
+ for respective mac.
+ required: true
+ ipaddr:
+ description:
+ - IPV4/V6 address of the fixed address.
+ required: true
+ mac:
+ description:
+ - The MAC address of the interface.
+ required: true
+ network:
+ description:
+ - Specifies the network range in which ipaddr exists.
+ required: true
+ network_view:
+ description:
+ - Configures the name of the network view to associate with this
+ configured instance.
+ required: false
+ default: default
+ options:
+ description:
+ - Configures the set of DHCP options to be included as part of
+ the configured network instance. This argument accepts a list
+ of values (see suboptions). When configuring suboptions at
+ least one of C(name) or C(num) must be specified.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - The name of the DHCP option to configure
+ num:
+ description:
+ - The number of the DHCP option to configure
+ value:
+ description:
+ - The value of the DHCP option specified by C(name)
+ required: true
+ use_option:
+ description:
+ - Only applies to a subset of options (see NIOS API documentation)
+ type: bool
+ default: 'yes'
+ vendor_class:
+ description:
+ - The name of the space this DHCP option is associated to
+ default: DHCP
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure ipv4 dhcp fixed address
+ community.general.nios_fixed_address:
+ name: ipv4_fixed
+ ipaddr: 192.168.10.1
+ mac: 08:6d:41:e8:fd:e8
+ network: 192.168.10.0/24
+ network_view: default
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Configure a ipv6 dhcp fixed address
+ community.general.nios_fixed_address:
+ name: ipv6_fixed
+ ipaddr: fe80::1/10
+ mac: 08:6d:41:e8:fd:e8
+ network: fe80::/64
+ network_view: default
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Set dhcp options for a ipv4 fixed address
+ community.general.nios_fixed_address:
+ name: ipv4_fixed
+ ipaddr: 192.168.10.1
+ mac: 08:6d:41:e8:fd:e8
+ network: 192.168.10.0/24
+ network_view: default
+ comment: this is a test comment
+ options:
+ - name: domain-name
+ value: ansible.com
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Remove a ipv4 dhcp fixed address
+ community.general.nios_fixed_address:
+ name: ipv4_fixed
+ ipaddr: 192.168.10.1
+ mac: 08:6d:41:e8:fd:e8
+ network: 192.168.10.0/24
+ network_view: default
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+import socket
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_IPV4_FIXED_ADDRESS, NIOS_IPV6_FIXED_ADDRESS
+
+
+def validate_ip_address(address):
+ try:
+ socket.inet_aton(address)
+ except socket.error:
+ return False
+ return address.count(".") == 3
+
+
+def validate_ip_v6_address(address):
+ try:
+ socket.inet_pton(socket.AF_INET6, address)
+ except socket.error:
+ return False
+ return True
+
+
+def options(module):
+ ''' Transforms the module argument into a valid WAPI struct
+ This function will transform the options argument into a structure that
+ is a valid WAPI structure in the format of:
+ {
+ name: <value>,
+ num: <value>,
+ value: <value>,
+ use_option: <value>,
+ vendor_class: <value>
+ }
+ It will remove any options that are set to None since WAPI will error on
+ that condition. The use_option field only applies
+ to special options that are displayed separately from other options and
+ have a use flag. This function removes the use_option flag from all
+ other options. It will also verify that either `name` or `num` is
+ set in the structure but does not validate the values are equal.
+ The remainder of the value validation is performed by WAPI
+ '''
+ special_options = ['routers', 'router-templates', 'domain-name-servers',
+ 'domain-name', 'broadcast-address', 'broadcast-address-offset',
+ 'dhcp-lease-time', 'dhcp6.name-servers']
+ options = list()
+ for item in module.params['options']:
+ opt = dict([(k, v) for k, v in iteritems(item) if v is not None])
+ if 'name' not in opt and 'num' not in opt:
+ module.fail_json(msg='one of `name` or `num` is required for option value')
+ if opt['name'] not in special_options:
+ del opt['use_option']
+ options.append(opt)
+ return options
+
+
+def validate_ip_addr_type(ip, arg_spec, module):
+ '''This function will check if the argument ip is type v4/v6 and return appropriate infoblox network type
+ '''
+ check_ip = ip.split('/')
+
+ if validate_ip_address(check_ip[0]) and 'ipaddr' in arg_spec:
+ arg_spec['ipv4addr'] = arg_spec.pop('ipaddr')
+ module.params['ipv4addr'] = module.params.pop('ipaddr')
+ return NIOS_IPV4_FIXED_ADDRESS, arg_spec, module
+ elif validate_ip_v6_address(check_ip[0]) and 'ipaddr' in arg_spec:
+ arg_spec['ipv6addr'] = arg_spec.pop('ipaddr')
+ module.params['ipv6addr'] = module.params.pop('ipaddr')
+ return NIOS_IPV6_FIXED_ADDRESS, arg_spec, module
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+ option_spec = dict(
+ # one of name or num is required; enforced by the function options()
+ name=dict(),
+ num=dict(type='int'),
+
+ value=dict(required=True),
+
+ use_option=dict(type='bool', default=True),
+ vendor_class=dict(default='DHCP')
+ )
+
+ ib_spec = dict(
+ name=dict(required=True),
+ ipaddr=dict(required=True, ib_req=True),
+ mac=dict(required=True, ib_req=True),
+ network=dict(required=True),
+ network_view=dict(default='default'),
+
+ options=dict(type='list', elements='dict', options=option_spec, transform=options),
+
+ extattrs=dict(type='dict'),
+ comment=dict()
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ # to get the argument ipaddr
+ obj_filter = dict([(k, module.params[k]) for k, v in iteritems(ib_spec) if v.get('ib_req')])
+ # to modify argument based on ipaddr type i.e. IPV4/IPV6
+ fixed_address_ip_type, ib_spec, module = validate_ip_addr_type(obj_filter['ipaddr'], ib_spec, module)
+
+ wapi = WapiModule(module)
+
+ result = wapi.run(fixed_address_ip_type, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_host_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_host_record.py
new file mode 100644
index 00000000..efab39de
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_host_record.py
@@ -0,0 +1,336 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_host_record
+author: "Peter Sprygada (@privateip)"
+short_description: Configure Infoblox NIOS host records
+description:
+ - Adds and/or removes instances of host record objects from
+ Infoblox NIOS servers. This module manages NIOS C(record:host) objects
+ using the Infoblox WAPI interface over REST.
+ - Updates instances of host record object from Infoblox NIOS servers.
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - Specifies the fully qualified hostname to add or remove from
+ the system. User can also update the hostname as it is possible
+ to pass a dict containing I(new_name), I(old_name). See examples.
+ required: true
+ view:
+ description:
+ - Sets the DNS view to associate this host record with. The DNS
+ view must already be configured on the system
+ default: default
+ aliases:
+ - dns_view
+ configure_for_dns:
+ description:
+ - Sets the DNS to particular parent. If user needs to bypass DNS
+ user can make the value to false.
+ type: bool
+ required: false
+ default: true
+ aliases:
+ - dns
+ ipv4addrs:
+ description:
+ - Configures the IPv4 addresses for this host record. This argument
+ accepts a list of values (see suboptions)
+ aliases:
+ - ipv4
+ type: list
+ elements: dict
+ suboptions:
+ ipv4addr:
+ description:
+ - Configures the IPv4 address for the host record. Users can dynamically
+ allocate ipv4 address to host record by passing dictionary containing,
+ I(nios_next_ip) and I(CIDR network range). If user wants to add or
+ remove the ipv4 address from existing record, I(add/remove)
+ params need to be used. See examples
+ required: true
+ aliases:
+ - address
+ configure_for_dhcp:
+ description:
+ - Configure the host_record over DHCP instead of DNS, if user
+ changes it to true, user need to mention MAC address to configure
+ required: false
+ aliases:
+ - dhcp
+ mac:
+ description:
+ - Configures the hardware MAC address for the host record. If user makes
+ DHCP to true, user need to mention MAC address.
+ required: false
+ add:
+ description:
+ - If user wants to add the ipv4 address to an existing host record.
+ Note that with I(add) user will have to keep the I(state) as I(present),
+ as new IP address is allocated to existing host record. See examples.
+ type: bool
+ required: false
+ version_added: '0.2.0'
+ remove:
+ description:
+ - If user wants to remove the ipv4 address from an existing host record.
+ Note that with I(remove) user will have to change the I(state) to I(absent),
+ as IP address is de-allocated from an existing host record. See examples.
+ type: bool
+ required: false
+ version_added: '0.2.0'
+ ipv6addrs:
+ description:
+ - Configures the IPv6 addresses for the host record. This argument
+ accepts a list of values (see options)
+ aliases:
+ - ipv6
+ type: list
+ elements: dict
+ suboptions:
+ ipv6addr:
+ description:
+ - Configures the IPv6 address for the host record
+ required: true
+ aliases:
+ - address
+ configure_for_dhcp:
+ description:
+ - Configure the host_record over DHCP instead of DNS, if user
+ changes it to true, user need to mention MAC address to configure
+ required: false
+ aliases:
+ description:
+ - Configures an optional list of additional aliases to add to the host
+ record. These are equivalent to CNAMEs but held within a host
+ record. Must be in list format.
+ ttl:
+ description:
+ - Configures the TTL to be associated with this host record
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure an ipv4 host record
+ community.general.nios_host_record:
+ name: host.ansible.com
+ ipv4:
+ - address: 192.168.10.1
+ aliases:
+ - cname.ansible.com
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Add a comment to an existing host record
+ community.general.nios_host_record:
+ name: host.ansible.com
+ ipv4:
+ - address: 192.168.10.1
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Remove a host record from the system
+ community.general.nios_host_record:
+ name: host.ansible.com
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Update an ipv4 host record
+ community.general.nios_host_record:
+ name: {new_name: host-new.ansible.com, old_name: host.ansible.com}
+ ipv4:
+ - address: 192.168.10.1
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Create an ipv4 host record bypassing DNS
+ community.general.nios_host_record:
+ name: new_host
+ ipv4:
+ - address: 192.168.10.1
+ dns: false
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Create an ipv4 host record over DHCP
+ community.general.nios_host_record:
+ name: host.ansible.com
+ ipv4:
+ - address: 192.168.10.1
+ dhcp: true
+ mac: 00-80-C8-E3-4C-BD
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Dynamically add host record to next available ip
+ community.general.nios_host_record:
+ name: host.ansible.com
+ ipv4:
+ - address: {nios_next_ip: 192.168.10.0/24}
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Add ip to host record
+ community.general.nios_host_record:
+ name: host.ansible.com
+ ipv4:
+ - address: 192.168.10.2
+ add: true
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Remove ip to host record
+ community.general.nios_host_record:
+ name: host.ansible.com
+ ipv4:
+ - address: 192.168.10.1
+ remove: true
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_HOST_RECORD
+
+
+def ipaddr(module, key, filtered_keys=None):
+ ''' Transforms the input value into a struct supported by WAPI
+ This function will transform the input from the playbook into a struct
+ that is valid for WAPI in the form of:
+ {
+ ipv4addr: <value>,
+ mac: <value>
+ }
+ This function does not validate the values are properly formatted or in
+ the acceptable range, that is left to WAPI.
+ '''
+ filtered_keys = filtered_keys or list()
+ objects = list()
+ for item in module.params[key]:
+ objects.append(dict([(k, v) for k, v in iteritems(item) if v is not None and k not in filtered_keys]))
+ return objects
+
+
+def ipv4addrs(module):
+ return ipaddr(module, 'ipv4addrs', filtered_keys=['address', 'dhcp'])
+
+
+def ipv6addrs(module):
+ return ipaddr(module, 'ipv6addrs', filtered_keys=['address', 'dhcp'])
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+ ipv4addr_spec = dict(
+ ipv4addr=dict(required=True, aliases=['address'], ib_req=True),
+ configure_for_dhcp=dict(type='bool', required=False, aliases=['dhcp'], ib_req=True),
+ mac=dict(required=False, ib_req=True),
+ add=dict(type='bool', required=False),
+ remove=dict(type='bool', required=False)
+ )
+
+ ipv6addr_spec = dict(
+ ipv6addr=dict(required=True, aliases=['address'], ib_req=True),
+ configure_for_dhcp=dict(type='bool', required=False, ib_req=True),
+ mac=dict(required=False, ib_req=True)
+ )
+
+ ib_spec = dict(
+ name=dict(required=True, ib_req=True),
+ view=dict(default='default', aliases=['dns_view'], ib_req=True),
+
+ ipv4addrs=dict(type='list', aliases=['ipv4'], elements='dict', options=ipv4addr_spec, transform=ipv4addrs),
+ ipv6addrs=dict(type='list', aliases=['ipv6'], elements='dict', options=ipv6addr_spec, transform=ipv6addrs),
+ configure_for_dns=dict(type='bool', default=True, required=False, aliases=['dns'], ib_req=True),
+ aliases=dict(type='list'),
+
+ ttl=dict(type='int'),
+
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_HOST_RECORD, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_member.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_member.py
new file mode 100644
index 00000000..aff8ca93
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_member.py
@@ -0,0 +1,519 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_member
+author: "Krishna Vasudevan (@krisvasudevan)"
+short_description: Configure Infoblox NIOS members
+description:
+ - Adds and/or removes Infoblox NIOS servers. This module manages NIOS C(member) objects using the Infoblox WAPI interface over REST.
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ host_name:
+ description:
+ - Specifies the host name of the member to either add or remove from
+ the NIOS instance.
+ required: true
+ aliases:
+ - name
+ vip_setting:
+ description:
+ - Configures the network settings for the grid member.
+ type: list
+ elements: dict
+ suboptions:
+ address:
+ description:
+ - The IPv4 Address of the Grid Member
+ subnet_mask:
+ description:
+ - The subnet mask for the Grid Member
+ gateway:
+ description:
+ - The default gateway for the Grid Member
+ ipv6_setting:
+ description:
+ - Configures the IPv6 settings for the grid member.
+ type: list
+ elements: dict
+ suboptions:
+ virtual_ip:
+ description:
+ - The IPv6 Address of the Grid Member
+ cidr_prefix:
+ description:
+ - The IPv6 CIDR prefix for the Grid Member
+ gateway:
+ description:
+ - The gateway address for the Grid Member
+ config_addr_type:
+ description:
+ - Address configuration type (IPV4/IPV6/BOTH)
+ default: IPV4
+ comment:
+ description:
+ - A descriptive comment of the Grid member.
+ extattrs:
+ description:
+ - Extensible attributes associated with the object.
+ enable_ha:
+ description:
+ - If set to True, the member has two physical nodes (HA pair).
+ type: bool
+ router_id:
+ description:
+ - Virtual router identifier. Provide this ID if "ha_enabled" is set to "true". This is a unique VRID number (from 1 to 255) for the local subnet.
+ lan2_enabled:
+ description:
+ - When set to "true", the LAN2 port is enabled as an independent port or as a port for failover purposes.
+ type: bool
+ lan2_port_setting:
+ description:
+ - Settings for the Grid member LAN2 port if 'lan2_enabled' is set to "true".
+ type: list
+ elements: dict
+ suboptions:
+ enabled:
+ description:
+ - If set to True, then it has its own IP settings.
+ type: bool
+ network_setting:
+ description:
+ - If the 'enable' field is set to True, this defines IPv4 network settings for LAN2.
+ type: list
+ elements: dict
+ suboptions:
+ address:
+ description:
+ - The IPv4 Address of LAN2
+ subnet_mask:
+ description:
+ - The subnet mask of LAN2
+ gateway:
+ description:
+ - The default gateway of LAN2
+ v6_network_setting:
+ description:
+ - If the 'enable' field is set to True, this defines IPv6 network settings for LAN2.
+ type: list
+ elements: dict
+ suboptions:
+ virtual_ip:
+ description:
+ - The IPv6 Address of LAN2
+ cidr_prefix:
+ description:
+ - The IPv6 CIDR prefix of LAN2
+ gateway:
+ description:
+ - The gateway address of LAN2
+ platform:
+ description:
+ - Configures the Hardware Platform.
+ default: INFOBLOX
+ node_info:
+ description:
+ - Configures the node information list with detailed status report on the operations of the Grid Member.
+ type: list
+ elements: dict
+ suboptions:
+ lan2_physical_setting:
+ description:
+ - Physical port settings for the LAN2 interface.
+ type: list
+ elements: dict
+ suboptions:
+ auto_port_setting_enabled:
+ description:
+ - Enable or disalbe the auto port setting.
+ type: bool
+ duplex:
+ description:
+ - The port duplex; if speed is 1000, duplex must be FULL.
+ speed:
+ description:
+ - The port speed; if speed is 1000, duplex is FULL.
+ lan_ha_port_setting:
+ description:
+ - LAN/HA port settings for the node.
+ type: list
+ elements: dict
+ suboptions:
+ ha_ip_address:
+ description:
+ - HA IP address.
+ ha_port_setting:
+ description:
+ - Physical port settings for the HA interface.
+ type: list
+ elements: dict
+ suboptions:
+ auto_port_setting_enabled:
+ description:
+ - Enable or disalbe the auto port setting.
+ type: bool
+ duplex:
+ description:
+ - The port duplex; if speed is 1000, duplex must be FULL.
+ speed:
+ description:
+ - The port speed; if speed is 1000, duplex is FULL.
+ lan_port_setting:
+ description:
+ - Physical port settings for the LAN interface.
+ type: list
+ elements: dict
+ suboptions:
+ auto_port_setting_enabled:
+ description:
+ - Enable or disalbe the auto port setting.
+ type: bool
+ duplex:
+ description:
+ - The port duplex; if speed is 1000, duplex must be FULL.
+ speed:
+ description:
+ - The port speed; if speed is 1000, duplex is FULL.
+ mgmt_ipv6addr:
+ description:
+ - Public IPv6 address for the LAN1 interface.
+ mgmt_lan:
+ description:
+ - Public IPv4 address for the LAN1 interface.
+ mgmt_network_setting:
+ description:
+ - Network settings for the MGMT port of the node.
+ type: list
+ elements: dict
+ suboptions:
+ address:
+ description:
+ - The IPv4 Address of MGMT
+ subnet_mask:
+ description:
+ - The subnet mask of MGMT
+ gateway:
+ description:
+ - The default gateway of MGMT
+ v6_mgmt_network_setting:
+ description:
+ - The network settings for the IPv6 MGMT port of the node.
+ type: list
+ elements: dict
+ suboptions:
+ virtual_ip:
+ description:
+ - The IPv6 Address of MGMT
+ cidr_prefix:
+ description:
+ - The IPv6 CIDR prefix of MGMT
+ gateway:
+ description:
+ - The gateway address of MGMT
+ mgmt_port_setting:
+ description:
+ - Settings for the member MGMT port.
+ type: list
+ elements: dict
+ suboptions:
+ enabled:
+ description:
+ - Determines if MGMT port settings should be enabled.
+ type: bool
+ security_access_enabled:
+ description:
+ - Determines if security access on the MGMT port is enabled or not.
+ type: bool
+ vpn_enabled:
+ description:
+ - Determines if VPN on the MGMT port is enabled or not.
+ type: bool
+ upgrade_group:
+ description:
+ - The name of the upgrade group to which this Grid member belongs.
+ default: Default
+ use_syslog_proxy_setting:
+ description:
+ - Use flag for external_syslog_server_enable , syslog_servers, syslog_proxy_setting, syslog_size
+ type: bool
+ external_syslog_server_enable:
+ description:
+ - Determines if external syslog servers should be enabled
+ type: bool
+ syslog_servers:
+ description:
+ - The list of external syslog servers.
+ type: list
+ elements: dict
+ suboptions:
+ address:
+ description:
+ - The server address.
+ category_list:
+ description:
+ - The list of all syslog logging categories.
+ connection_type:
+ description:
+ - The connection type for communicating with this server.(STCP/TCP?UDP)
+ default: UDP
+ local_interface:
+ description:
+ - The local interface through which the appliance sends syslog messages to the syslog server.(ANY/LAN/MGMT)
+ default: ANY
+ message_node_id:
+ description:
+ - Identify the node in the syslog message. (HOSTNAME/IP_HOSTNAME/LAN/MGMT)
+ default: LAN
+ message_source:
+ description:
+ - The source of syslog messages to be sent to the external syslog server.
+ default: ANY
+ only_category_list:
+ description:
+ - The list of selected syslog logging categories. The appliance forwards syslog messages that belong to the selected categories.
+ type: bool
+ port:
+ description:
+ - The port this server listens on.
+ default: 514
+ severity:
+ description:
+ - The severity filter. The appliance sends log messages of the specified severity and above to the external syslog server.
+ default: DEBUG
+ pre_provisioning:
+ description:
+ - Pre-provisioning information.
+ type: list
+ elements: dict
+ suboptions:
+ hardware_info:
+ description:
+ - An array of structures that describe the hardware being pre-provisioned.
+ type: list
+ elements: dict
+ suboptions:
+ hwmodel:
+ description:
+ - Hardware model
+ hwtype:
+ description:
+ - Hardware type.
+ licenses:
+ description:
+ - An array of license types.
+ create_token:
+ description:
+ - Flag for initiating a create token request for pre-provisioned members.
+ type: bool
+ default: False
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Add a member to the grid with IPv4 address
+ community.general.nios_member:
+ host_name: member01.localdomain
+ vip_setting:
+ - address: 192.168.1.100
+ subnet_mask: 255.255.255.0
+ gateway: 192.168.1.1
+ config_addr_type: IPV4
+ platform: VNIOS
+ comment: "Created by Ansible"
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Add a HA member to the grid
+ community.general.nios_member:
+ host_name: memberha.localdomain
+ vip_setting:
+ - address: 192.168.1.100
+ subnet_mask: 255.255.255.0
+ gateway: 192.168.1.1
+ config_addr_type: IPV4
+ platform: VNIOS
+ enable_ha: true
+ router_id: 150
+ node_info:
+ - lan_ha_port_setting:
+ - ha_ip_address: 192.168.1.70
+ mgmt_lan: 192.168.1.80
+ - lan_ha_port_setting:
+ - ha_ip_address: 192.168.1.71
+ mgmt_lan: 192.168.1.81
+ comment: "Created by Ansible"
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Update the member with pre-provisioning details specified
+ community.general.nios_member:
+ name: member01.localdomain
+ pre_provisioning:
+ - hardware_info:
+ - hwmodel: IB-VM-820
+ hwtype: IB-VNIOS
+ licenses:
+ - dns
+ - dhcp
+ - enterprise
+ - vnios
+ comment: "Updated by Ansible"
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Remove the member
+ community.general.nios_member:
+ name: member01.localdomain
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_MEMBER
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+ ipv4_spec = dict(
+ address=dict(),
+ subnet_mask=dict(),
+ gateway=dict(),
+ )
+
+ ipv6_spec = dict(
+ virtual_ip=dict(),
+ cidr_prefix=dict(type='int'),
+ gateway=dict(),
+ )
+
+ port_spec = dict(
+ auto_port_setting_enabled=dict(type='bool'),
+ duplex=dict(),
+ speed=dict(),
+ )
+
+ lan2_port_spec = dict(
+ enabled=dict(type='bool'),
+ network_setting=dict(type='list', elements='dict', options=ipv4_spec),
+ v6_network_setting=dict(type='list', elements='dict', options=ipv6_spec),
+ )
+
+ ha_port_spec = dict(
+ ha_ip_address=dict(),
+ ha_port_setting=dict(type='list', elements='dict', options=port_spec),
+ lan_port_setting=dict(type='list', elements='dict', options=port_spec),
+ mgmt_lan=dict(),
+ mgmt_ipv6addr=dict(),
+ )
+
+ node_spec = dict(
+ lan2_physical_setting=dict(type='list', elements='dict', options=port_spec),
+ lan_ha_port_setting=dict(type='list', elements='dict', options=ha_port_spec),
+ mgmt_network_setting=dict(type='list', elements='dict', options=ipv4_spec),
+ v6_mgmt_network_setting=dict(type='list', elements='dict', options=ipv6_spec),
+ )
+
+ mgmt_port_spec = dict(
+ enabled=dict(type='bool'),
+ security_access_enabled=dict(type='bool'),
+ vpn_enabled=dict(type='bool'),
+ )
+
+ syslog_spec = dict(
+ address=dict(),
+ category_list=dict(type='list'),
+ connection_type=dict(default='UDP'),
+ local_interface=dict(default='ANY'),
+ message_node_id=dict(default='LAN'),
+ message_source=dict(default='ANY'),
+ only_category_list=dict(type='bool'),
+ port=dict(type='int', default=514),
+ severity=dict(default='DEBUG'),
+ )
+
+ hw_spec = dict(
+ hwmodel=dict(),
+ hwtype=dict(),
+ )
+
+ pre_prov_spec = dict(
+ hardware_info=dict(type='list', elements='dict', options=hw_spec),
+ licenses=dict(type='list'),
+ )
+
+ ib_spec = dict(
+ host_name=dict(required=True, aliases=['name'], ib_req=True),
+ vip_setting=dict(type='list', elements='dict', options=ipv4_spec),
+ ipv6_setting=dict(type='list', elements='dict', options=ipv6_spec),
+ config_addr_type=dict(default='IPV4'),
+ comment=dict(),
+ enable_ha=dict(type='bool', default=False),
+ router_id=dict(type='int'),
+ lan2_enabled=dict(type='bool', default=False),
+ lan2_port_setting=dict(type='list', elements='dict', options=lan2_port_spec),
+ platform=dict(default='INFOBLOX'),
+ node_info=dict(type='list', elements='dict', options=node_spec),
+ mgmt_port_setting=dict(type='list', elements='dict', options=mgmt_port_spec),
+ upgrade_group=dict(default='Default'),
+ use_syslog_proxy_setting=dict(type='bool'),
+ external_syslog_server_enable=dict(type='bool'),
+ syslog_servers=dict(type='list', elements='dict', options=syslog_spec),
+ pre_provisioning=dict(type='list', elements='dict', options=pre_prov_spec),
+ extattrs=dict(type='dict'),
+ create_token=dict(type='bool', default=False),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_MEMBER, ib_spec)
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_mx_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_mx_record.py
new file mode 100644
index 00000000..ca1f1f81
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_mx_record.py
@@ -0,0 +1,150 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_mx_record
+author: "Blair Rampling (@brampling)"
+short_description: Configure Infoblox NIOS MX records
+description:
+ - Adds and/or removes instances of MX record objects from
+ Infoblox NIOS servers. This module manages NIOS C(record:mx) objects
+ using the Infoblox WAPI interface over REST.
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - Specifies the fully qualified hostname to add or remove from
+ the system
+ required: true
+ view:
+ description:
+ - Sets the DNS view to associate this a record with. The DNS
+ view must already be configured on the system
+ default: default
+ aliases:
+ - dns_view
+ mail_exchanger:
+ description:
+ - Configures the mail exchanger FQDN for this MX record.
+ aliases:
+ - mx
+ preference:
+ description:
+ - Configures the preference (0-65535) for this MX record.
+ ttl:
+ description:
+ - Configures the TTL to be associated with this host record
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure an MX record
+ community.general.nios_mx_record:
+ name: ansible.com
+ mx: mailhost.ansible.com
+ preference: 0
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Add a comment to an existing MX record
+ community.general.nios_mx_record:
+ name: ansible.com
+ mx: mailhost.ansible.com
+ preference: 0
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Remove an MX record from the system
+ community.general.nios_mx_record:
+ name: ansible.com
+ mx: mailhost.ansible.com
+ preference: 0
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_MX_RECORD
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+
+ ib_spec = dict(
+ name=dict(required=True, ib_req=True),
+ view=dict(default='default', aliases=['dns_view'], ib_req=True),
+
+ mail_exchanger=dict(aliases=['mx'], ib_req=True),
+ preference=dict(type='int', ib_req=True),
+
+ ttl=dict(type='int'),
+
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_MX_RECORD, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_naptr_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_naptr_record.py
new file mode 100644
index 00000000..de57e692
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_naptr_record.py
@@ -0,0 +1,183 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_naptr_record
+author: "Blair Rampling (@brampling)"
+short_description: Configure Infoblox NIOS NAPTR records
+description:
+ - Adds and/or removes instances of NAPTR record objects from
+ Infoblox NIOS servers. This module manages NIOS C(record:naptr) objects
+ using the Infoblox WAPI interface over REST.
+requirements:
+ - infoblox_client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - Specifies the fully qualified hostname to add or remove from
+ the system
+ required: true
+ view:
+ description:
+ - Sets the DNS view to associate this a record with. The DNS
+ view must already be configured on the system
+ default: default
+ aliases:
+ - dns_view
+ order:
+ description:
+ - Configures the order (0-65535) for this NAPTR record. This parameter
+ specifies the order in which the NAPTR rules are applied when
+ multiple rules are present.
+ preference:
+ description:
+ - Configures the preference (0-65535) for this NAPTR record. The
+ preference field determines the order NAPTR records are processed
+ when multiple records with the same order parameter are present.
+ replacement:
+ description:
+ - Configures the replacement field for this NAPTR record.
+ For nonterminal NAPTR records, this field specifies the
+ next domain name to look up.
+ services:
+ description:
+ - Configures the services field (128 characters maximum) for this
+ NAPTR record. The services field contains protocol and service
+ identifiers, such as "http+E2U" or "SIPS+D2T".
+ required: false
+ flags:
+ description:
+ - Configures the flags field for this NAPTR record. These control the
+ interpretation of the fields for an NAPTR record object. Supported
+ values for the flags field are "U", "S", "P" and "A".
+ required: false
+ regexp:
+ description:
+ - Configures the regexp field for this NAPTR record. This is the
+ regular expression-based rewriting rule of the NAPTR record. This
+ should be a POSIX compliant regular expression, including the
+ substitution rule and flags. Refer to RFC 2915 for the field syntax
+ details.
+ required: false
+ ttl:
+ description:
+ - Configures the TTL to be associated with this NAPTR record
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure a NAPTR record
+ community.general.nios_naptr_record:
+ name: '*.subscriber-100.ansiblezone.com'
+ order: 1000
+ preference: 10
+ replacement: replacement1.network.ansiblezone.com
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Add a comment to an existing NAPTR record
+ community.general.nios_naptr_record:
+ name: '*.subscriber-100.ansiblezone.com'
+ order: 1000
+ preference: 10
+ replacement: replacement1.network.ansiblezone.com
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Remove a NAPTR record from the system
+ community.general.nios_naptr_record:
+ name: '*.subscriber-100.ansiblezone.com'
+ order: 1000
+ preference: 10
+ replacement: replacement1.network.ansiblezone.com
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+
+ ib_spec = dict(
+ name=dict(required=True, ib_req=True),
+ view=dict(default='default', aliases=['dns_view'], ib_req=True),
+
+ order=dict(type='int', ib_req=True),
+ preference=dict(type='int', ib_req=True),
+ replacement=dict(ib_req=True),
+ services=dict(),
+ flags=dict(),
+ regexp=dict(),
+
+ ttl=dict(type='int'),
+
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run('record:naptr', ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_network.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_network.py
new file mode 100644
index 00000000..54b8dfb1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_network.py
@@ -0,0 +1,295 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_network
+author: "Peter Sprygada (@privateip)"
+short_description: Configure Infoblox NIOS network object
+description:
+ - Adds and/or removes instances of network objects from
+ Infoblox NIOS servers. This module manages NIOS C(network) objects
+ using the Infoblox WAPI interface over REST.
+ - Supports both IPV4 and IPV6 internet protocols
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ network:
+ description:
+ - Specifies the network to add or remove from the system. The value
+ should use CIDR notation.
+ required: true
+ aliases:
+ - name
+ - cidr
+ network_view:
+ description:
+ - Configures the name of the network view to associate with this
+ configured instance.
+ required: true
+ default: default
+ options:
+ description:
+ - Configures the set of DHCP options to be included as part of
+ the configured network instance. This argument accepts a list
+ of values (see suboptions). When configuring suboptions at
+ least one of C(name) or C(num) must be specified.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - The name of the DHCP option to configure. The standard options are
+ C(router), C(router-templates), C(domain-name-servers), C(domain-name),
+ C(broadcast-address), C(broadcast-address-offset), C(dhcp-lease-time),
+ and C(dhcp6.name-servers).
+ num:
+ description:
+ - The number of the DHCP option to configure
+ value:
+ description:
+ - The value of the DHCP option specified by C(name)
+ required: true
+ use_option:
+ description:
+ - Only applies to a subset of options (see NIOS API documentation)
+ type: bool
+ default: 'yes'
+ vendor_class:
+ description:
+ - The name of the space this DHCP option is associated to
+ default: DHCP
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ container:
+ description:
+ - If set to true it'll create the network container to be added or removed
+ from the system.
+ type: bool
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure a network ipv4
+ community.general.nios_network:
+ network: 192.168.10.0/24
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Configure a network ipv6
+ community.general.nios_network:
+ network: fe80::/64
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Set dhcp options for a network ipv4
+ community.general.nios_network:
+ network: 192.168.10.0/24
+ comment: this is a test comment
+ options:
+ - name: domain-name
+ value: ansible.com
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Remove a network ipv4
+ community.general.nios_network:
+ network: 192.168.10.0/24
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Configure a ipv4 network container
+ community.general.nios_network:
+ network: 192.168.10.0/24
+ container: true
+ comment: test network container
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Configure a ipv6 network container
+ community.general.nios_network:
+ network: fe80::/64
+ container: true
+ comment: test network container
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Remove a ipv4 network container
+ community.general.nios_network:
+ networkr: 192.168.10.0/24
+ container: true
+ comment: test network container
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import validate_ip_address, validate_ip_v6_address
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_IPV4_NETWORK, NIOS_IPV6_NETWORK
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_IPV4_NETWORK_CONTAINER, NIOS_IPV6_NETWORK_CONTAINER
+
+
+def options(module):
+ ''' Transforms the module argument into a valid WAPI struct
+ This function will transform the options argument into a structure that
+ is a valid WAPI structure in the format of:
+ {
+ name: <value>,
+ num: <value>,
+ value: <value>,
+ use_option: <value>,
+ vendor_class: <value>
+ }
+ It will remove any options that are set to None since WAPI will error on
+ that condition. It will also verify that either `name` or `num` is
+ set in the structure but does not validate the values are equal.
+ The remainder of the value validation is performed by WAPI
+ '''
+ options = list()
+ for item in module.params['options']:
+ opt = dict([(k, v) for k, v in iteritems(item) if v is not None])
+ if 'name' not in opt and 'num' not in opt:
+ module.fail_json(msg='one of `name` or `num` is required for option value')
+ options.append(opt)
+ return options
+
+
+def check_ip_addr_type(obj_filter, ib_spec):
+ '''This function will check if the argument ip is type v4/v6 and return appropriate infoblox
+ network/networkcontainer type
+ '''
+
+ ip = obj_filter['network']
+ if 'container' in obj_filter and obj_filter['container']:
+ check_ip = ip.split('/')
+ del ib_spec['container'] # removing the container key from post arguments
+ del ib_spec['options'] # removing option argument as for network container it's not supported
+ if validate_ip_address(check_ip[0]):
+ return NIOS_IPV4_NETWORK_CONTAINER, ib_spec
+ elif validate_ip_v6_address(check_ip[0]):
+ return NIOS_IPV6_NETWORK_CONTAINER, ib_spec
+ else:
+ check_ip = ip.split('/')
+ del ib_spec['container'] # removing the container key from post arguments
+ if validate_ip_address(check_ip[0]):
+ return NIOS_IPV4_NETWORK, ib_spec
+ elif validate_ip_v6_address(check_ip[0]):
+ return NIOS_IPV6_NETWORK, ib_spec
+
+
+def check_vendor_specific_dhcp_option(module, ib_spec):
+ '''This function will check if the argument dhcp option belongs to vendor-specific and if yes then will remove
+ use_options flag which is not supported with vendor-specific dhcp options.
+ '''
+ for key, value in iteritems(ib_spec):
+ if isinstance(module.params[key], list):
+ temp_dict = module.params[key][0]
+ if 'num' in temp_dict:
+ if temp_dict['num'] in (43, 124, 125):
+ del module.params[key][0]['use_option']
+ return ib_spec
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+ option_spec = dict(
+ # one of name or num is required; enforced by the function options()
+ name=dict(),
+ num=dict(type='int'),
+
+ value=dict(required=True),
+
+ use_option=dict(type='bool', default=True),
+ vendor_class=dict(default='DHCP')
+ )
+
+ ib_spec = dict(
+ network=dict(required=True, aliases=['name', 'cidr'], ib_req=True),
+ network_view=dict(default='default', ib_req=True),
+
+ options=dict(type='list', elements='dict', options=option_spec, transform=options),
+
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ container=dict(type='bool', ib_req=True)
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ # to get the argument ipaddr
+ obj_filter = dict([(k, module.params[k]) for k, v in iteritems(ib_spec) if v.get('ib_req')])
+ network_type, ib_spec = check_ip_addr_type(obj_filter, ib_spec)
+
+ wapi = WapiModule(module)
+ # to check for vendor specific dhcp option
+ ib_spec = check_vendor_specific_dhcp_option(module, ib_spec)
+
+ result = wapi.run(network_type, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_network_view.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_network_view.py
new file mode 100644
index 00000000..d13052b0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_network_view.py
@@ -0,0 +1,128 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_network_view
+author: "Peter Sprygada (@privateip)"
+short_description: Configure Infoblox NIOS network views
+description:
+ - Adds and/or removes instances of network view objects from
+ Infoblox NIOS servers. This module manages NIOS C(networkview) objects
+ using the Infoblox WAPI interface over REST.
+ - Updates instances of network view object from Infoblox NIOS servers.
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - Specifies the fully qualified hostname to add or remove from
+ the system. User can also update the hostname as it is possible
+ to pass a dict containing I(new_name), I(old_name). See examples.
+ required: true
+ aliases:
+ - network_view
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure a new network view
+ community.general.nios_network_view:
+ name: ansible
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Update the comment for network view
+ community.general.nios_network_view:
+ name: ansible
+ comment: this is an example comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Remove the network view
+ community.general.nios_network_view:
+ name: ansible
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Update a existing network view
+ community.general.nios_network_view:
+ name: {new_name: ansible-new, old_name: ansible}
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_NETWORK_VIEW
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+ ib_spec = dict(
+ name=dict(required=True, aliases=['network_view'], ib_req=True),
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_NETWORK_VIEW, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_nsgroup.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_nsgroup.py
new file mode 100644
index 00000000..bf2afd3b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_nsgroup.py
@@ -0,0 +1,361 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_nsgroup
+short_description: Configure InfoBlox DNS Nameserver Groups
+extends_documentation_fragment:
+- community.general.nios
+
+author:
+ - Erich Birngruber (@ebirn)
+ - Sumit Jaiswal (@sjaiswal)
+description:
+ - Adds and/or removes nameserver groups form Infoblox NIOS servers.
+ This module manages NIOS C(nsgroup) objects using the Infoblox. WAPI interface over REST.
+requirements:
+ - infoblox_client
+options:
+ name:
+ description:
+ - Specifies the name of the NIOS nameserver group to be managed.
+ required: true
+ grid_primary:
+ description:
+ - This host is to be used as primary server in this nameserver group. It must be a grid member.
+ This option is required when setting I(use_external_primaries) to C(false).
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - Provide the name of the grid member to identify the host.
+ required: true
+ enable_preferred_primaries:
+ description:
+ - This flag represents whether the preferred_primaries field values of this member are used (see Infoblox WAPI docs).
+ default: false
+ type: bool
+ grid_replicate:
+ description:
+ - Use DNS zone transfers if set to C(True) or ID Grid Replication if set to C(False).
+ type: bool
+ default: false
+ lead:
+ description:
+ - This flag controls if the grid lead secondary nameserver performs zone transfers to non lead secondaries.
+ type: bool
+ default: false
+ stealth:
+ description:
+ - Configure the external nameserver as stealth server (without NS record) in the zones.
+ type: bool
+ default: false
+ preferred_primaries:
+ description:
+ - Provide a list of elements like in I(external_primaries) to set the precedence of preferred primary nameservers.
+ type: list
+ elements: dict
+ grid_secondaries:
+ description:
+ - Configures the list of grid member hosts that act as secondary nameservers.
+ This option is required when setting I(use_external_primaries) to C(true).
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - Provide the name of the grid member to identify the host.
+ required: true
+ enable_preferred_primaries:
+ description:
+ - This flag represents whether the preferred_primaries field values of this member are used (see Infoblox WAPI docs).
+ default: false
+ type: bool
+ grid_replicate:
+ description:
+ - Use DNS zone transfers if set to C(True) or ID Grid Replication if set to C(False)
+ type: bool
+ default: false
+ lead:
+ description:
+ - This flag controls if the grid lead secondary nameserver performs zone transfers to non lead secondaries.
+ type: bool
+ default: false
+ stealth:
+ description:
+ - Configure the external nameserver as stealth server (without NS record) in the zones.
+ type: bool
+ default: false
+ preferred_primaries:
+ description:
+ - Provide a list of elements like in I(external_primaries) to set the precedence of preferred primary nameservers.
+ type: list
+ elements: dict
+ is_grid_default:
+ description:
+ - If set to C(True) this nsgroup will become the default nameserver group for new zones.
+ type: bool
+ required: false
+ default: false
+ use_external_primary:
+ description:
+ - This flag controls whether the group is using an external primary nameserver.
+ Note that modification of this field requires passing values for I(grid_secondaries) and I(external_primaries).
+ type: bool
+ required: false
+ default: false
+ external_primaries:
+ description:
+ - Configures a list of external nameservers (non-members of the grid).
+ This option is required when setting I(use_external_primaries) to C(true).
+ type: list
+ elements: dict
+ suboptions:
+ address:
+ description:
+ - Configures the IP address of the external nameserver
+ required: true
+ name:
+ description:
+ - Set a label for the external nameserver
+ required: true
+ stealth:
+ description:
+ - Configure the external nameserver as stealth server (without NS record) in the zones.
+ type: bool
+ default: false
+ tsig_key_name:
+ description:
+ - Sets a label for the I(tsig_key) value
+ tsig_key_alg:
+ description:
+ - Provides the algorithm used for the I(tsig_key) in use.
+ choices: ['HMAC-MD5', 'HMAC-SHA256']
+ default: 'HMAC-MD5'
+ tsig_key:
+ description:
+ - Set a DNS TSIG key for the nameserver to secure zone transfers (AFXRs).
+ required: false
+ external_secondaries:
+ description:
+ - Allows to provide a list of external secondary nameservers, that are not members of the grid.
+ type: list
+ elements: dict
+ suboptions:
+ address:
+ description:
+ - Configures the IP address of the external nameserver
+ required: true
+ name:
+ description:
+ - Set a label for the external nameserver
+ required: true
+ stealth:
+ description:
+ - Configure the external nameserver as stealth server (without NS record) in the zones.
+ type: bool
+ default: false
+ tsig_key_name:
+ description:
+ - Sets a label for the I(tsig_key) value
+ tsig_key_alg:
+ description:
+ - Provides the algorithm used for the I(tsig_key) in use.
+ choices: ['HMAC-MD5', 'HMAC-SHA256']
+ default: 'HMAC-MD5'
+ tsig_key:
+ description:
+ - Set a DNS TSIG key for the nameserver to secure zone transfers (AFXRs).
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ required: false
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ required: false
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ choices: [present, absent]
+ default: present
+'''
+
+EXAMPLES = '''
+- name: Create simple infoblox nameserver group
+ community.general.nios_nsgroup:
+ name: my-simple-group
+ comment: "this is a simple nameserver group"
+ grid_primary:
+ - name: infoblox-test.example.com
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Create infoblox nameserver group with external primaries
+ community.general.nios_nsgroup:
+ name: my-example-group
+ use_external_primary: true
+ comment: "this is my example nameserver group"
+ external_primaries: "{{ ext_nameservers }}"
+ grid_secondaries:
+ - name: infoblox-test.example.com
+ lead: True
+ preferred_primaries: "{{ ext_nameservers }}"
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Delete infoblox nameserver group
+ community.general.nios_nsgroup:
+ name: my-simple-group
+ comment: "this is a simple nameserver group"
+ grid_primary:
+ - name: infoblox-test.example.com
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_NSGROUP
+
+
+# from infoblox documentation
+# Fields List
+# Field Type Req R/O Base Search
+# comment String N N Y : = ~
+# extattrs Extattr N N N ext
+# external_primaries [struct] N N N N/A
+# external_secondaries [struct] N N N N/A
+# grid_primary [struct] N N N N/A
+# grid_secondaries [struct] N N N N/A
+# is_grid_default Bool N N N N/A
+# is_multimaster Bool N Y N N/A
+# name String Y N Y : = ~
+# use_external_primary Bool N N N N/A
+
+
+def main():
+ '''entrypoint for module execution.'''
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+
+ # cleanup tsig fields
+ def clean_tsig(ext):
+ if 'tsig_key' in ext and not ext['tsig_key']:
+ del ext['tsig_key']
+ if 'tsig_key' not in ext and 'tsig_key_name' in ext and not ext['tsig_key_name']:
+ del ext['tsig_key_name']
+ if 'tsig_key' not in ext and 'tsig_key_alg' in ext:
+ del ext['tsig_key_alg']
+
+ def clean_grid_member(member):
+ if member['preferred_primaries']:
+ for ext in member['preferred_primaries']:
+ clean_tsig(ext)
+ if member['enable_preferred_primaries'] is False:
+ del member['enable_preferred_primaries']
+ del member['preferred_primaries']
+ if member['lead'] is False:
+ del member['lead']
+ if member['grid_replicate'] is False:
+ del member['grid_replicate']
+
+ def ext_primaries_transform(module):
+ if module.params['external_primaries']:
+ for ext in module.params['external_primaries']:
+ clean_tsig(ext)
+ return module.params['external_primaries']
+
+ def ext_secondaries_transform(module):
+ if module.params['external_secondaries']:
+ for ext in module.params['external_secondaries']:
+ clean_tsig(ext)
+ return module.params['external_secondaries']
+
+ def grid_primary_preferred_transform(module):
+ for member in module.params['grid_primary']:
+ clean_grid_member(member)
+ return module.params['grid_primary']
+
+ def grid_secondaries_preferred_primaries_transform(module):
+ for member in module.params['grid_secondaries']:
+ clean_grid_member(member)
+ return module.params['grid_secondaries']
+
+ extserver_spec = dict(
+ address=dict(required=True, ib_req=True),
+ name=dict(required=True, ib_req=True),
+ stealth=dict(type='bool', default=False),
+ tsig_key=dict(no_log=True),
+ tsig_key_alg=dict(choices=['HMAC-MD5', 'HMAC-SHA256'], default='HMAC-MD5'),
+ tsig_key_name=dict(required=True)
+ )
+
+ memberserver_spec = dict(
+ name=dict(required=True, ib_req=True),
+ enable_preferred_primaries=dict(type='bool', default=False),
+ grid_replicate=dict(type='bool', default=False),
+ lead=dict(type='bool', default=False),
+ preferred_primaries=dict(type='list', elements='dict', options=extserver_spec, default=[]),
+ stealth=dict(type='bool', default=False),
+ )
+
+ ib_spec = dict(
+ name=dict(required=True, ib_req=True),
+ grid_primary=dict(type='list', elements='dict', options=memberserver_spec,
+ transform=grid_primary_preferred_transform),
+ grid_secondaries=dict(type='list', elements='dict', options=memberserver_spec,
+ transform=grid_secondaries_preferred_primaries_transform),
+ external_primaries=dict(type='list', elements='dict', options=extserver_spec, transform=ext_primaries_transform),
+ external_secondaries=dict(type='list', elements='dict', options=extserver_spec,
+ transform=ext_secondaries_transform),
+ is_grid_default=dict(type='bool', default=False),
+ use_external_primary=dict(type='bool', default=False),
+ extattrs=dict(),
+ comment=dict(),
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_NSGROUP, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_ptr_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_ptr_record.py
new file mode 100644
index 00000000..96fb175b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_ptr_record.py
@@ -0,0 +1,153 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_ptr_record
+author: "Trebuchet Clement (@clementtrebuchet)"
+short_description: Configure Infoblox NIOS PTR records
+description:
+ - Adds and/or removes instances of PTR record objects from
+ Infoblox NIOS servers. This module manages NIOS C(record:ptr) objects
+ using the Infoblox WAPI interface over REST.
+requirements:
+ - infoblox_client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - The name of the DNS PTR record in FQDN format to add or remove from
+ the system.
+ The field is required only for an PTR object in Forward Mapping Zone.
+ required: false
+ view:
+ description:
+ - Sets the DNS view to associate this a record with. The DNS
+ view must already be configured on the system
+ required: false
+ aliases:
+ - dns_view
+ ipv4addr:
+ description:
+ - The IPv4 Address of the record. Mutually exclusive with the ipv6addr.
+ aliases:
+ - ipv4
+ ipv6addr:
+ description:
+ - The IPv6 Address of the record. Mutually exclusive with the ipv4addr.
+ aliases:
+ - ipv6
+ ptrdname:
+ description:
+ - The domain name of the DNS PTR record in FQDN format.
+ ttl:
+ description:
+ - Time To Live (TTL) value for the record.
+ A 32-bit unsigned integer that represents the duration, in seconds, that the record is valid (cached).
+ Zero indicates that the record should not be cached.
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance. Maximum 256 characters.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Create a PTR Record
+ community.general.nios_ptr_record:
+ ipv4: 192.168.10.1
+ ptrdname: host.ansible.com
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Delete a PTR Record
+ community.general.nios_ptr_record:
+ ipv4: 192.168.10.1
+ ptrdname: host.ansible.com
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_PTR_RECORD
+
+
+def main():
+ # Module entry point
+ ib_spec = dict(
+ name=dict(required=False),
+ view=dict(aliases=['dns_view'], ib_req=True),
+ ipv4addr=dict(aliases=['ipv4'], ib_req=True),
+ ipv6addr=dict(aliases=['ipv6'], ib_req=True),
+ ptrdname=dict(ib_req=True),
+
+ ttl=dict(type='int'),
+
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ mutually_exclusive = [('ipv4addr', 'ipv6addr')]
+ required_one_of = [
+ ['ipv4addr', 'ipv6addr']
+ ]
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True,
+ required_one_of=required_one_of)
+
+ if module.params['ipv4addr']:
+ del ib_spec['ipv6addr']
+ elif module.params['ipv6addr']:
+ del ib_spec['ipv4addr']
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_PTR_RECORD, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_srv_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_srv_record.py
new file mode 100644
index 00000000..c519c191
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_srv_record.py
@@ -0,0 +1,162 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_srv_record
+author: "Blair Rampling (@brampling)"
+short_description: Configure Infoblox NIOS SRV records
+description:
+ - Adds and/or removes instances of SRV record objects from
+ Infoblox NIOS servers. This module manages NIOS C(record:srv) objects
+ using the Infoblox WAPI interface over REST.
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - Specifies the fully qualified hostname to add or remove from
+ the system
+ required: true
+ view:
+ description:
+ - Sets the DNS view to associate this a record with. The DNS
+ view must already be configured on the system
+ default: default
+ aliases:
+ - dns_view
+ port:
+ description:
+ - Configures the port (0-65535) of this SRV record.
+ priority:
+ description:
+ - Configures the priority (0-65535) for this SRV record.
+ target:
+ description:
+ - Configures the target FQDN for this SRV record.
+ weight:
+ description:
+ - Configures the weight (0-65535) for this SRV record.
+ ttl:
+ description:
+ - Configures the TTL to be associated with this host record
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure an SRV record
+ community.general.nios_srv_record:
+ name: _sip._tcp.service.ansible.com
+ port: 5080
+ priority: 10
+ target: service1.ansible.com
+ weight: 10
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Add a comment to an existing SRV record
+ community.general.nios_srv_record:
+ name: _sip._tcp.service.ansible.com
+ port: 5080
+ priority: 10
+ target: service1.ansible.com
+ weight: 10
+ comment: this is a test comment
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+
+- name: Remove an SRV record from the system
+ community.general.nios_srv_record:
+ name: _sip._tcp.service.ansible.com
+ port: 5080
+ priority: 10
+ target: service1.ansible.com
+ weight: 10
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_SRV_RECORD
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+
+ ib_spec = dict(
+ name=dict(required=True, ib_req=True),
+ view=dict(default='default', aliases=['dns_view'], ib_req=True),
+
+ port=dict(type='int', ib_req=True),
+ priority=dict(type='int', ib_req=True),
+ target=dict(ib_req=True),
+ weight=dict(type='int', ib_req=True),
+
+ ttl=dict(type='int'),
+
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_SRV_RECORD, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_txt_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_txt_record.py
new file mode 100644
index 00000000..0dcdbadb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_txt_record.py
@@ -0,0 +1,128 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_txt_record
+author: "Corey Wanless (@coreywan)"
+short_description: Configure Infoblox NIOS txt records
+description:
+ - Adds and/or removes instances of txt record objects from
+ Infoblox NIOS servers. This module manages NIOS C(record:txt) objects
+ using the Infoblox WAPI interface over REST.
+requirements:
+ - infoblox_client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ name:
+ description:
+ - Specifies the fully qualified hostname to add or remove from
+ the system
+ required: true
+ view:
+ description:
+ - Sets the DNS view to associate this tst record with. The DNS
+ view must already be configured on the system
+ default: default
+ aliases:
+ - dns_view
+ text:
+ description:
+ - Text associated with the record. It can contain up to 255 bytes
+ per substring, up to a total of 512 bytes. To enter leading,
+ trailing, or embedded spaces in the text, add quotes around the
+ text to preserve the spaces.
+ ttl:
+ description:
+ - Configures the TTL to be associated with this tst record
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+ - name: Ensure a text Record Exists
+ community.general.nios_txt_record:
+ name: fqdn.txt.record.com
+ text: mytext
+ state: present
+ view: External
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+
+ - name: Ensure a text Record does not exist
+ community.general.nios_txt_record:
+ name: fqdn.txt.record.com
+ text: mytext
+ state: absent
+ view: External
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+
+ ib_spec = dict(
+ name=dict(required=True, ib_req=True),
+ view=dict(default='default', aliases=['dns_view'], ib_req=True),
+ text=dict(ib_req=True),
+ ttl=dict(type='int'),
+ extattrs=dict(type='dict'),
+ comment=dict(),
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ wapi = WapiModule(module)
+ result = wapi.run('record:txt', ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_zone.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_zone.py
new file mode 100644
index 00000000..8a7607fe
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nios_zone.py
@@ -0,0 +1,226 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nios_zone
+author: "Peter Sprygada (@privateip)"
+short_description: Configure Infoblox NIOS DNS zones
+description:
+ - Adds and/or removes instances of DNS zone objects from
+ Infoblox NIOS servers. This module manages NIOS C(zone_auth) objects
+ using the Infoblox WAPI interface over REST.
+requirements:
+ - infoblox-client
+extends_documentation_fragment:
+- community.general.nios
+
+options:
+ fqdn:
+ description:
+ - Specifies the qualified domain name to either add or remove from
+ the NIOS instance based on the configured C(state) value.
+ required: true
+ aliases:
+ - name
+ view:
+ description:
+ - Configures the DNS view name for the configured resource. The
+ specified DNS zone must already exist on the running NIOS instance
+ prior to configuring zones.
+ default: default
+ aliases:
+ - dns_view
+ grid_primary:
+ description:
+ - Configures the grid primary servers for this zone.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - The name of the grid primary server
+ required: true
+ grid_secondaries:
+ description:
+ - Configures the grid secondary servers for this zone.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - The name of the grid secondary server
+ required: true
+ ns_group:
+ description:
+ - Configures the name server group for this zone. Name server group is
+ mutually exclusive with grid primary and grid secondaries.
+ restart_if_needed:
+ description:
+ - If set to true, causes the NIOS DNS service to restart and load the
+ new zone configuration
+ type: bool
+ zone_format:
+ description:
+ - Create an authorative Reverse-Mapping Zone which is an area of network
+ space for which one or more name servers-primary and secondary-have the
+ responsibility to respond to address-to-name queries. It supports
+ reverse-mapping zones for both IPv4 and IPv6 addresses.
+ default: FORWARD
+ extattrs:
+ description:
+ - Allows for the configuration of Extensible Attributes on the
+ instance of the object. This argument accepts a set of key / value
+ pairs for configuration.
+ comment:
+ description:
+ - Configures a text string comment to be associated with the instance
+ of this object. The provided text string will be configured on the
+ object instance.
+ state:
+ description:
+ - Configures the intended state of the instance of the object on
+ the NIOS server. When this value is set to C(present), the object
+ is configured on the device and when this value is set to C(absent)
+ the value is removed (if necessary) from the device.
+ default: present
+ choices:
+ - present
+ - absent
+'''
+
+EXAMPLES = '''
+- name: Configure a zone on the system using grid primary and secondaries
+ community.general.nios_zone:
+ name: ansible.com
+ grid_primary:
+ - name: gridprimary.grid.com
+ grid_secondaries:
+ - name: gridsecondary1.grid.com
+ - name: gridsecondary2.grid.com
+ restart_if_needed: true
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Configure a zone on the system using a name server group
+ community.general.nios_zone:
+ name: ansible.com
+ ns_group: examplensg
+ restart_if_needed: true
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Configure a reverse mapping zone on the system using IPV4 zone format
+ community.general.nios_zone:
+ name: 10.10.10.0/24
+ zone_format: IPV4
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Configure a reverse mapping zone on the system using IPV6 zone format
+ community.general.nios_zone:
+ name: 100::1/128
+ zone_format: IPV6
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Update the comment and ext attributes for an existing zone
+ community.general.nios_zone:
+ name: ansible.com
+ comment: this is an example comment
+ extattrs:
+ Site: west-dc
+ state: present
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Remove the dns zone
+ community.general.nios_zone:
+ name: ansible.com
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+- name: Remove the reverse mapping dns zone from the system with IPV4 zone format
+ community.general.nios_zone:
+ name: 10.10.10.0/24
+ zone_format: IPV4
+ state: absent
+ provider:
+ host: "{{ inventory_hostname_short }}"
+ username: admin
+ password: admin
+ connection: local
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_ZONE
+
+
+def main():
+ ''' Main entry point for module execution
+ '''
+ grid_spec = dict(
+ name=dict(required=True),
+ )
+
+ ib_spec = dict(
+ fqdn=dict(required=True, aliases=['name'], ib_req=True, update=False),
+ zone_format=dict(default='FORWARD', ib_req=False),
+ view=dict(default='default', aliases=['dns_view'], ib_req=True),
+
+ grid_primary=dict(type='list', elements='dict', options=grid_spec),
+ grid_secondaries=dict(type='list', elements='dict', options=grid_spec),
+ ns_group=dict(),
+ restart_if_needed=dict(type='bool'),
+
+ extattrs=dict(type='dict'),
+ comment=dict()
+ )
+
+ argument_spec = dict(
+ provider=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+ argument_spec.update(ib_spec)
+ argument_spec.update(WapiModule.provider_spec)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['ns_group', 'grid_primary'],
+ ['ns_group', 'grid_secondaries']
+ ])
+
+ wapi = WapiModule(module)
+ result = wapi.run(NIOS_ZONE, ib_spec)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nmcli.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nmcli.py
new file mode 100644
index 00000000..60626294
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nmcli.py
@@ -0,0 +1,1115 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Chris Long <alcamie@gmail.com> <chlong@redhat.com>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: nmcli
+author:
+- Chris Long (@alcamie101)
+short_description: Manage Networking
+requirements:
+- nmcli
+description:
+ - 'Manage the network devices. Create, modify and manage various connection and device type e.g., ethernet, teams, bonds, vlans etc.'
+ - 'On CentOS 8 and Fedora >=29 like systems, the requirements can be met by installing the following packages: NetworkManager.'
+ - 'On CentOS 7 and Fedora <=28 like systems, the requirements can be met by installing the following packages: NetworkManager-tui.'
+ - 'On Ubuntu and Debian like systems, the requirements can be met by installing the following packages: network-manager'
+ - 'On openSUSE, the requirements can be met by installing the following packages: NetworkManager.'
+options:
+ state:
+ description:
+ - Whether the device should exist or not, taking action if the state is different from what is stated.
+ type: str
+ required: true
+ choices: [ absent, present ]
+ autoconnect:
+ description:
+ - Whether the connection should start on boot.
+ - Whether the connection profile can be automatically activated
+ type: bool
+ default: yes
+ conn_name:
+ description:
+ - The name used to call the connection. Pattern is <type>[-<ifname>][-<num>].
+ type: str
+ required: true
+ ifname:
+ description:
+ - The interface to bind the connection to.
+ - The connection will only be applicable to this interface name.
+ - A special value of C('*') can be used for interface-independent connections.
+ - The ifname argument is mandatory for all connection types except bond, team, bridge and vlan.
+ - This parameter defaults to C(conn_name) when left unset.
+ type: str
+ type:
+ description:
+ - This is the type of device or network connection that you wish to create or modify.
+ - Type C(generic) is added in Ansible 2.5.
+ type: str
+ choices: [ bond, bond-slave, bridge, bridge-slave, ethernet, generic, ipip, sit, team, team-slave, vlan, vxlan ]
+ mode:
+ description:
+ - This is the type of device or network connection that you wish to create for a bond, team or bridge.
+ type: str
+ choices: [ 802.3ad, active-backup, balance-alb, balance-rr, balance-tlb, balance-xor, broadcast ]
+ default: balance-rr
+ master:
+ description:
+ - Master <master (ifname, or connection UUID or conn_name) of bridge, team, bond master connection profile.
+ type: str
+ ip4:
+ description:
+ - The IPv4 address to this interface.
+ - Use the format C(192.0.2.24/24).
+ type: str
+ gw4:
+ description:
+ - The IPv4 gateway for this interface.
+ - Use the format C(192.0.2.1).
+ type: str
+ dns4:
+ description:
+ - A list of up to 3 dns servers.
+ - IPv4 format e.g. to add two IPv4 DNS server addresses, use C(192.0.2.53 198.51.100.53).
+ elements: str
+ type: list
+ dns4_search:
+ description:
+ - A list of DNS search domains.
+ elements: str
+ type: list
+ ip6:
+ description:
+ - The IPv6 address to this interface.
+ - Use the format C(abbe::cafe).
+ type: str
+ gw6:
+ description:
+ - The IPv6 gateway for this interface.
+ - Use the format C(2001:db8::1).
+ type: str
+ dns6:
+ description:
+ - A list of up to 3 dns servers.
+ - IPv6 format e.g. to add two IPv6 DNS server addresses, use C(2001:4860:4860::8888 2001:4860:4860::8844).
+ elements: str
+ type: list
+ dns6_search:
+ description:
+ - A list of DNS search domains.
+ elements: str
+ type: list
+ mtu:
+ description:
+ - The connection MTU, e.g. 9000. This can't be applied when creating the interface and is done once the interface has been created.
+ - Can be used when modifying Team, VLAN, Ethernet (Future plans to implement wifi, pppoe, infiniband)
+ - This parameter defaults to C(1500) when unset.
+ type: int
+ dhcp_client_id:
+ description:
+ - DHCP Client Identifier sent to the DHCP server.
+ type: str
+ primary:
+ description:
+ - This is only used with bond and is the primary interface name (for "active-backup" mode), this is the usually the 'ifname'.
+ type: str
+ miimon:
+ description:
+ - This is only used with bond - miimon.
+ - This parameter defaults to C(100) when unset.
+ type: int
+ downdelay:
+ description:
+ - This is only used with bond - downdelay.
+ type: int
+ updelay:
+ description:
+ - This is only used with bond - updelay.
+ type: int
+ arp_interval:
+ description:
+ - This is only used with bond - ARP interval.
+ type: int
+ arp_ip_target:
+ description:
+ - This is only used with bond - ARP IP target.
+ type: str
+ stp:
+ description:
+ - This is only used with bridge and controls whether Spanning Tree Protocol (STP) is enabled for this bridge.
+ type: bool
+ default: yes
+ priority:
+ description:
+ - This is only used with 'bridge' - sets STP priority.
+ type: int
+ default: 128
+ forwarddelay:
+ description:
+ - This is only used with bridge - [forward-delay <2-30>] STP forwarding delay, in seconds.
+ type: int
+ default: 15
+ hellotime:
+ description:
+ - This is only used with bridge - [hello-time <1-10>] STP hello time, in seconds.
+ type: int
+ default: 2
+ maxage:
+ description:
+ - This is only used with bridge - [max-age <6-42>] STP maximum message age, in seconds.
+ type: int
+ default: 20
+ ageingtime:
+ description:
+ - This is only used with bridge - [ageing-time <0-1000000>] the Ethernet MAC address aging time, in seconds.
+ type: int
+ default: 300
+ mac:
+ description:
+ - This is only used with bridge - MAC address of the bridge.
+ - Note this requires a recent kernel feature, originally introduced in 3.15 upstream kernel.
+ type: str
+ slavepriority:
+ description:
+ - This is only used with 'bridge-slave' - [<0-63>] - STP priority of this slave.
+ type: int
+ default: 32
+ path_cost:
+ description:
+ - This is only used with 'bridge-slave' - [<1-65535>] - STP port cost for destinations via this slave.
+ type: int
+ default: 100
+ hairpin:
+ description:
+ - This is only used with 'bridge-slave' - 'hairpin mode' for the slave, which allows frames to be sent back out through the slave the
+ frame was received on.
+ type: bool
+ default: yes
+ vlanid:
+ description:
+ - This is only used with VLAN - VLAN ID in range <0-4095>.
+ type: int
+ vlandev:
+ description:
+ - This is only used with VLAN - parent device this VLAN is on, can use ifname.
+ type: str
+ flags:
+ description:
+ - This is only used with VLAN - flags.
+ type: str
+ ingress:
+ description:
+ - This is only used with VLAN - VLAN ingress priority mapping.
+ type: str
+ egress:
+ description:
+ - This is only used with VLAN - VLAN egress priority mapping.
+ type: str
+ vxlan_id:
+ description:
+ - This is only used with VXLAN - VXLAN ID.
+ type: int
+ vxlan_remote:
+ description:
+ - This is only used with VXLAN - VXLAN destination IP address.
+ type: str
+ vxlan_local:
+ description:
+ - This is only used with VXLAN - VXLAN local IP address.
+ type: str
+ ip_tunnel_dev:
+ description:
+ - This is used with IPIP/SIT - parent device this IPIP/SIT tunnel, can use ifname.
+ type: str
+ ip_tunnel_remote:
+ description:
+ - This is used with IPIP/SIT - IPIP/SIT destination IP address.
+ type: str
+ ip_tunnel_local:
+ description:
+ - This is used with IPIP/SIT - IPIP/SIT local IP address.
+ type: str
+'''
+
+EXAMPLES = r'''
+# These examples are using the following inventory:
+#
+# ## Directory layout:
+#
+# |_/inventory/cloud-hosts
+# | /group_vars/openstack-stage.yml
+# | /host_vars/controller-01.openstack.host.com
+# | /host_vars/controller-02.openstack.host.com
+# |_/playbook/library/nmcli.py
+# | /playbook-add.yml
+# | /playbook-del.yml
+# ```
+#
+# ## inventory examples
+# ### groups_vars
+# ```yml
+# ---
+# #devops_os_define_network
+# storage_gw: "192.0.2.254"
+# external_gw: "198.51.100.254"
+# tenant_gw: "203.0.113.254"
+#
+# #Team vars
+# nmcli_team:
+# - conn_name: tenant
+# ip4: '{{ tenant_ip }}'
+# gw4: '{{ tenant_gw }}'
+# - conn_name: external
+# ip4: '{{ external_ip }}'
+# gw4: '{{ external_gw }}'
+# - conn_name: storage
+# ip4: '{{ storage_ip }}'
+# gw4: '{{ storage_gw }}'
+# nmcli_team_slave:
+# - conn_name: em1
+# ifname: em1
+# master: tenant
+# - conn_name: em2
+# ifname: em2
+# master: tenant
+# - conn_name: p2p1
+# ifname: p2p1
+# master: storage
+# - conn_name: p2p2
+# ifname: p2p2
+# master: external
+#
+# #bond vars
+# nmcli_bond:
+# - conn_name: tenant
+# ip4: '{{ tenant_ip }}'
+# gw4: ''
+# mode: balance-rr
+# - conn_name: external
+# ip4: '{{ external_ip }}'
+# gw4: ''
+# mode: balance-rr
+# - conn_name: storage
+# ip4: '{{ storage_ip }}'
+# gw4: '{{ storage_gw }}'
+# mode: balance-rr
+# nmcli_bond_slave:
+# - conn_name: em1
+# ifname: em1
+# master: tenant
+# - conn_name: em2
+# ifname: em2
+# master: tenant
+# - conn_name: p2p1
+# ifname: p2p1
+# master: storage
+# - conn_name: p2p2
+# ifname: p2p2
+# master: external
+#
+# #ethernet vars
+# nmcli_ethernet:
+# - conn_name: em1
+# ifname: em1
+# ip4: '{{ tenant_ip }}'
+# gw4: '{{ tenant_gw }}'
+# - conn_name: em2
+# ifname: em2
+# ip4: '{{ tenant_ip1 }}'
+# gw4: '{{ tenant_gw }}'
+# - conn_name: p2p1
+# ifname: p2p1
+# ip4: '{{ storage_ip }}'
+# gw4: '{{ storage_gw }}'
+# - conn_name: p2p2
+# ifname: p2p2
+# ip4: '{{ external_ip }}'
+# gw4: '{{ external_gw }}'
+# ```
+#
+# ### host_vars
+# ```yml
+# ---
+# storage_ip: "192.0.2.91/23"
+# external_ip: "198.51.100.23/21"
+# tenant_ip: "203.0.113.77/23"
+# ```
+
+
+
+## playbook-add.yml example
+
+---
+- hosts: openstack-stage
+ remote_user: root
+ tasks:
+
+ - name: Install needed network manager libs
+ ansible.builtin.package:
+ name:
+ - NetworkManager-libnm
+ - nm-connection-editor
+ - libsemanage-python
+ - policycoreutils-python
+ state: present
+
+##### Working with all cloud nodes - Teaming
+ - name: Try nmcli add team - conn_name only & ip4 gw4
+ community.general.nmcli:
+ type: team
+ conn_name: '{{ item.conn_name }}'
+ ip4: '{{ item.ip4 }}'
+ gw4: '{{ item.gw4 }}'
+ state: present
+ with_items:
+ - '{{ nmcli_team }}'
+
+ - name: Try nmcli add teams-slave
+ community.general.nmcli:
+ type: team-slave
+ conn_name: '{{ item.conn_name }}'
+ ifname: '{{ item.ifname }}'
+ master: '{{ item.master }}'
+ state: present
+ with_items:
+ - '{{ nmcli_team_slave }}'
+
+###### Working with all cloud nodes - Bonding
+ - name: Try nmcli add bond - conn_name only & ip4 gw4 mode
+ community.general.nmcli:
+ type: bond
+ conn_name: '{{ item.conn_name }}'
+ ip4: '{{ item.ip4 }}'
+ gw4: '{{ item.gw4 }}'
+ mode: '{{ item.mode }}'
+ state: present
+ with_items:
+ - '{{ nmcli_bond }}'
+
+ - name: Try nmcli add bond-slave
+ community.general.nmcli:
+ type: bond-slave
+ conn_name: '{{ item.conn_name }}'
+ ifname: '{{ item.ifname }}'
+ master: '{{ item.master }}'
+ state: present
+ with_items:
+ - '{{ nmcli_bond_slave }}'
+
+##### Working with all cloud nodes - Ethernet
+ - name: Try nmcli add Ethernet - conn_name only & ip4 gw4
+ community.general.nmcli:
+ type: ethernet
+ conn_name: '{{ item.conn_name }}'
+ ip4: '{{ item.ip4 }}'
+ gw4: '{{ item.gw4 }}'
+ state: present
+ with_items:
+ - '{{ nmcli_ethernet }}'
+
+## playbook-del.yml example
+- hosts: openstack-stage
+ remote_user: root
+ tasks:
+
+ - name: Try nmcli del team - multiple
+ community.general.nmcli:
+ conn_name: '{{ item.conn_name }}'
+ state: absent
+ with_items:
+ - conn_name: em1
+ - conn_name: em2
+ - conn_name: p1p1
+ - conn_name: p1p2
+ - conn_name: p2p1
+ - conn_name: p2p2
+ - conn_name: tenant
+ - conn_name: storage
+ - conn_name: external
+ - conn_name: team-em1
+ - conn_name: team-em2
+ - conn_name: team-p1p1
+ - conn_name: team-p1p2
+ - conn_name: team-p2p1
+ - conn_name: team-p2p2
+
+ - name: Add an Ethernet connection with static IP configuration
+ community.general.nmcli:
+ conn_name: my-eth1
+ ifname: eth1
+ type: ethernet
+ ip4: 192.0.2.100/24
+ gw4: 192.0.2.1
+ state: present
+
+ - name: Add an Team connection with static IP configuration
+ community.general.nmcli:
+ conn_name: my-team1
+ ifname: my-team1
+ type: team
+ ip4: 192.0.2.100/24
+ gw4: 192.0.2.1
+ state: present
+ autoconnect: yes
+
+ - name: Optionally, at the same time specify IPv6 addresses for the device
+ community.general.nmcli:
+ conn_name: my-eth1
+ ifname: eth1
+ type: ethernet
+ ip4: 192.0.2.100/24
+ gw4: 192.0.2.1
+ ip6: 2001:db8::cafe
+ gw6: 2001:db8::1
+ state: present
+
+ - name: Add two IPv4 DNS server addresses
+ community.general.nmcli:
+ conn_name: my-eth1
+ type: ethernet
+ dns4:
+ - 192.0.2.53
+ - 198.51.100.53
+ state: present
+
+ - name: Make a profile usable for all compatible Ethernet interfaces
+ community.general.nmcli:
+ ctype: ethernet
+ name: my-eth1
+ ifname: '*'
+ state: present
+
+ - name: Change the property of a setting e.g. MTU
+ community.general.nmcli:
+ conn_name: my-eth1
+ mtu: 9000
+ type: ethernet
+ state: present
+
+ - name: Add VxLan
+ community.general.nmcli:
+ type: vxlan
+ conn_name: vxlan_test1
+ vxlan_id: 16
+ vxlan_local: 192.168.1.2
+ vxlan_remote: 192.168.1.5
+
+ - name: Add ipip
+ community.general.nmcli:
+ type: ipip
+ conn_name: ipip_test1
+ ip_tunnel_dev: eth0
+ ip_tunnel_local: 192.168.1.2
+ ip_tunnel_remote: 192.168.1.5
+
+ - name: Add sit
+ community.general.nmcli:
+ type: sit
+ conn_name: sit_test1
+ ip_tunnel_dev: eth0
+ ip_tunnel_local: 192.168.1.2
+ ip_tunnel_remote: 192.168.1.5
+
+# nmcli exits with status 0 if it succeeds and exits with a status greater
+# than zero when there is a failure. The following list of status codes may be
+# returned:
+#
+# - 0 Success - indicates the operation succeeded
+# - 1 Unknown or unspecified error
+# - 2 Invalid user input, wrong nmcli invocation
+# - 3 Timeout expired (see --wait option)
+# - 4 Connection activation failed
+# - 5 Connection deactivation failed
+# - 6 Disconnecting device failed
+# - 7 Connection deletion failed
+# - 8 NetworkManager is not running
+# - 9 nmcli and NetworkManager versions mismatch
+# - 10 Connection, device, or access point does not exist.
+'''
+
+RETURN = r"""#
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_text
+import re
+
+
+class NmcliModuleError(Exception):
+ pass
+
+
+class Nmcli(object):
+ """
+ This is the generic nmcli manipulation class that is subclassed based on platform.
+ A subclass may wish to override the following action methods:-
+ - create_connection()
+ - delete_connection()
+ - modify_connection()
+ - show_connection()
+ - up_connection()
+ - down_connection()
+ All subclasses MUST define platform and distribution (which may be None).
+ """
+
+ platform = 'Generic'
+ distribution = None
+
+ def __init__(self, module):
+ self.module = module
+ self.state = module.params['state']
+ self.autoconnect = module.params['autoconnect']
+ self.conn_name = module.params['conn_name']
+ self.master = module.params['master']
+ self.ifname = module.params['ifname']
+ self.type = module.params['type']
+ self.ip4 = module.params['ip4']
+ self.gw4 = module.params['gw4']
+ self.dns4 = module.params['dns4']
+ self.dns4_search = module.params['dns4_search']
+ self.ip6 = module.params['ip6']
+ self.gw6 = module.params['gw6']
+ self.dns6 = module.params['dns6']
+ self.dns6_search = module.params['dns6_search']
+ self.mtu = module.params['mtu']
+ self.stp = module.params['stp']
+ self.priority = module.params['priority']
+ self.mode = module.params['mode']
+ self.miimon = module.params['miimon']
+ self.primary = module.params['primary']
+ self.downdelay = module.params['downdelay']
+ self.updelay = module.params['updelay']
+ self.arp_interval = module.params['arp_interval']
+ self.arp_ip_target = module.params['arp_ip_target']
+ self.slavepriority = module.params['slavepriority']
+ self.forwarddelay = module.params['forwarddelay']
+ self.hellotime = module.params['hellotime']
+ self.maxage = module.params['maxage']
+ self.ageingtime = module.params['ageingtime']
+ self.hairpin = module.params['hairpin']
+ self.path_cost = module.params['path_cost']
+ self.mac = module.params['mac']
+ self.vlanid = module.params['vlanid']
+ self.vlandev = module.params['vlandev']
+ self.flags = module.params['flags']
+ self.ingress = module.params['ingress']
+ self.egress = module.params['egress']
+ self.vxlan_id = module.params['vxlan_id']
+ self.vxlan_local = module.params['vxlan_local']
+ self.vxlan_remote = module.params['vxlan_remote']
+ self.ip_tunnel_dev = module.params['ip_tunnel_dev']
+ self.ip_tunnel_local = module.params['ip_tunnel_local']
+ self.ip_tunnel_remote = module.params['ip_tunnel_remote']
+ self.nmcli_bin = self.module.get_bin_path('nmcli', True)
+ self.dhcp_client_id = module.params['dhcp_client_id']
+
+ if self.ip4:
+ self.ipv4_method = 'manual'
+ else:
+ # supported values for 'ipv4.method': [auto, link-local, manual, shared, disabled]
+ # TODO: add a new module parameter to specify a non 'manual' value
+ self.ipv4_method = None
+
+ if self.ip6:
+ self.ipv6_method = 'manual'
+ else:
+ # supported values for 'ipv6.method': [ignore, auto, dhcp, link-local, manual, shared]
+ # TODO: add a new module parameter to specify a non 'manual' value
+ self.ipv6_method = None
+
+ def execute_command(self, cmd, use_unsafe_shell=False, data=None):
+ if isinstance(cmd, list):
+ cmd = [to_text(item) for item in cmd]
+ else:
+ cmd = to_text(cmd)
+ return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data)
+
+ def connection_options(self, detect_change=False):
+ # Options common to multiple connection types.
+ options = {
+ 'connection.autoconnect': self.autoconnect,
+ }
+
+ # IP address options.
+ if self.ip_conn_type:
+ options.update({
+ 'ipv4.addresses': self.ip4,
+ 'ipv4.dhcp-client-id': self.dhcp_client_id,
+ 'ipv4.dns': self.dns4,
+ 'ipv4.dns-search': self.dns4_search,
+ 'ipv4.gateway': self.gw4,
+ 'ipv4.method': self.ipv4_method,
+ 'ipv6.addresses': self.ip6,
+ 'ipv6.dns': self.dns6,
+ 'ipv6.dns-search': self.dns6_search,
+ 'ipv6.gateway': self.gw6,
+ 'ipv6.method': self.ipv6_method,
+ })
+
+ # Layer 2 options.
+ if self.mac_conn_type:
+ options.update({self.mac_setting: self.mac})
+
+ if self.mtu_conn_type:
+ options.update({self.mtu_setting: self.mtu})
+
+ # Connections that can have a master.
+ if self.slave_conn_type:
+ options.update({
+ 'connection.master': self.master,
+ })
+
+ # Options specific to a connection type.
+ if self.type == 'bond':
+ options.update({
+ 'arp-interval': self.arp_interval,
+ 'arp-ip-target': self.arp_ip_target,
+ 'downdelay': self.downdelay,
+ 'miimon': self.miimon,
+ 'mode': self.mode,
+ 'primary': self.primary,
+ 'updelay': self.updelay,
+ })
+ elif self.type == 'bridge':
+ options.update({
+ 'bridge.ageing-time': self.ageingtime,
+ 'bridge.forward-delay': self.forwarddelay,
+ 'bridge.hello-time': self.hellotime,
+ 'bridge.max-age': self.maxage,
+ 'bridge.priority': self.priority,
+ 'bridge.stp': self.stp,
+ })
+ elif self.type == 'bridge-slave':
+ options.update({
+ 'bridge-port.path-cost': self.path_cost,
+ 'bridge-port.hairpin-mode': self.hairpin,
+ 'bridge-port.priority': self.slavepriority,
+ })
+ elif self.tunnel_conn_type:
+ options.update({
+ 'ip-tunnel.local': self.ip_tunnel_local,
+ 'ip-tunnel.mode': self.type,
+ 'ip-tunnel.parent': self.ip_tunnel_dev,
+ 'ip-tunnel.remote': self.ip_tunnel_remote,
+ })
+ elif self.type == 'vlan':
+ options.update({
+ 'vlan.id': self.vlanid,
+ 'vlan.parent': self.vlandev,
+ })
+ elif self.type == 'vxlan':
+ options.update({
+ 'vxlan.id': self.vxlan_id,
+ 'vxlan.local': self.vxlan_local,
+ 'vxlan.remote': self.vxlan_remote,
+ })
+
+ # Convert settings values based on the situation.
+ for setting, value in options.items():
+ setting_type = self.settings_type(setting)
+ convert_func = None
+ if setting_type is bool:
+ # Convert all bool options to yes/no.
+ convert_func = self.bool_to_string
+ if detect_change:
+ if setting in ('vlan.id', 'vxlan.id'):
+ # Convert VLAN/VXLAN IDs to text when detecting changes.
+ convert_func = to_text
+ elif setting == self.mtu_setting:
+ # MTU is 'auto' by default when detecting changes.
+ convert_func = self.mtu_to_string
+ elif setting_type is list:
+ # Convert lists to strings for nmcli create/modify commands.
+ convert_func = self.list_to_string
+
+ if callable(convert_func):
+ options[setting] = convert_func(options[setting])
+
+ return options
+
+ @property
+ def ip_conn_type(self):
+ return self.type in (
+ 'bond',
+ 'bridge',
+ 'ethernet',
+ 'generic',
+ 'team',
+ 'vlan',
+ )
+
+ @property
+ def mac_conn_type(self):
+ return self.type == 'bridge'
+
+ @property
+ def mac_setting(self):
+ if self.type == 'bridge':
+ return 'bridge.mac-address'
+ else:
+ return '802-3-ethernet.cloned-mac-address'
+
+ @property
+ def mtu_conn_type(self):
+ return self.type in (
+ 'ethernet',
+ 'team-slave',
+ )
+
+ @property
+ def mtu_setting(self):
+ return '802-3-ethernet.mtu'
+
+ @staticmethod
+ def mtu_to_string(mtu):
+ if not mtu:
+ return 'auto'
+ else:
+ return to_text(mtu)
+
+ @property
+ def slave_conn_type(self):
+ return self.type in (
+ 'bond-slave',
+ 'bridge-slave',
+ 'team-slave',
+ )
+
+ @property
+ def tunnel_conn_type(self):
+ return self.type in (
+ 'ipip',
+ 'sit',
+ )
+
+ @staticmethod
+ def bool_to_string(boolean):
+ if boolean:
+ return "yes"
+ else:
+ return "no"
+
+ @staticmethod
+ def list_to_string(lst):
+ return ",".join(lst or [""])
+
+ @staticmethod
+ def settings_type(setting):
+ if setting in ('bridge.stp',
+ 'bridge-port.hairpin-mode',
+ 'connection.autoconnect'):
+ return bool
+ elif setting in ('ipv4.dns',
+ 'ipv4.dns-search',
+ 'ipv6.dns',
+ 'ipv6.dns-search'):
+ return list
+ return str
+
+ def list_connection_info(self):
+ cmd = [self.nmcli_bin, '--fields', 'name', '--terse', 'con', 'show']
+ (rc, out, err) = self.execute_command(cmd)
+ if rc != 0:
+ raise NmcliModuleError(err)
+ return out.splitlines()
+
+ def connection_exists(self):
+ return self.conn_name in self.list_connection_info()
+
+ def down_connection(self):
+ cmd = [self.nmcli_bin, 'con', 'down', self.conn_name]
+ return self.execute_command(cmd)
+
+ def up_connection(self):
+ cmd = [self.nmcli_bin, 'con', 'up', self.conn_name]
+ return self.execute_command(cmd)
+
+ def connection_update(self, nmcli_command):
+ if nmcli_command == 'create':
+ cmd = [self.nmcli_bin, 'con', 'add', 'type']
+ if self.tunnel_conn_type:
+ cmd.append('ip-tunnel')
+ else:
+ cmd.append(self.type)
+ cmd.append('con-name')
+ elif nmcli_command == 'modify':
+ cmd = [self.nmcli_bin, 'con', 'modify']
+ else:
+ self.module.fail_json(msg="Invalid nmcli command.")
+ cmd.append(self.conn_name)
+
+ # Use connection name as default for interface name on creation.
+ if nmcli_command == 'create' and self.ifname is None:
+ ifname = self.conn_name
+ else:
+ ifname = self.ifname
+
+ options = {
+ 'connection.interface-name': ifname,
+ }
+
+ options.update(self.connection_options())
+
+ # Constructing the command.
+ for key, value in options.items():
+ if value is not None:
+ cmd.extend([key, value])
+
+ return self.execute_command(cmd)
+
+ def create_connection(self):
+ status = self.connection_update('create')
+ if self.create_connection_up:
+ status = self.up_connection()
+ return status
+
+ @property
+ def create_connection_up(self):
+ if self.type in ('bond', 'ethernet'):
+ if (self.mtu is not None) or (self.dns4 is not None) or (self.dns6 is not None):
+ return True
+ elif self.type == 'team':
+ if (self.dns4 is not None) or (self.dns6 is not None):
+ return True
+ return False
+
+ def remove_connection(self):
+ # self.down_connection()
+ cmd = [self.nmcli_bin, 'con', 'del', self.conn_name]
+ return self.execute_command(cmd)
+
+ def modify_connection(self):
+ return self.connection_update('modify')
+
+ def show_connection(self):
+ cmd = [self.nmcli_bin, 'con', 'show', self.conn_name]
+
+ (rc, out, err) = self.execute_command(cmd)
+
+ if rc != 0:
+ raise NmcliModuleError(err)
+
+ p_enum_value = re.compile(r'^([-]?\d+) \((\w+)\)$')
+
+ conn_info = dict()
+ for line in out.splitlines():
+ pair = line.split(':', 1)
+ key = pair[0].strip()
+ key_type = self.settings_type(key)
+ if key and len(pair) > 1:
+ raw_value = pair[1].lstrip()
+ if raw_value == '--':
+ conn_info[key] = None
+ elif key == 'bond.options':
+ # Aliases such as 'miimon', 'downdelay' are equivalent to the +bond.options 'option=value' syntax.
+ opts = raw_value.split(',')
+ for opt in opts:
+ alias_pair = opt.split('=', 1)
+ if len(alias_pair) > 1:
+ alias_key = alias_pair[0]
+ alias_value = alias_pair[1]
+ conn_info[alias_key] = alias_value
+ elif key_type == list:
+ conn_info[key] = [s.strip() for s in raw_value.split(',')]
+ else:
+ m_enum = p_enum_value.match(raw_value)
+ if m_enum is not None:
+ value = m_enum.group(1)
+ else:
+ value = raw_value
+ conn_info[key] = value
+
+ return conn_info
+
+ def _compare_conn_params(self, conn_info, options):
+ # See nmcli(1) for details
+ param_alias = {
+ 'type': 'connection.type',
+ 'con-name': 'connection.id',
+ 'autoconnect': 'connection.autoconnect',
+ 'ifname': 'connection.interface-name',
+ 'mac': self.mac_setting,
+ 'master': 'connection.master',
+ 'slave-type': 'connection.slave-type',
+ }
+
+ changed = False
+ diff_before = dict()
+ diff_after = dict()
+
+ for key, value in options.items():
+ if not value:
+ continue
+
+ if key in conn_info:
+ current_value = conn_info[key]
+ elif key in param_alias:
+ real_key = param_alias[key]
+ if real_key in conn_info:
+ current_value = conn_info[real_key]
+ else:
+ # alias parameter does not exist
+ current_value = None
+ else:
+ # parameter does not exist
+ current_value = None
+
+ if isinstance(current_value, list) and isinstance(value, list):
+ # compare values between two lists
+ if sorted(current_value) != sorted(value):
+ changed = True
+ else:
+ if current_value != to_text(value):
+ changed = True
+
+ diff_before[key] = current_value
+ diff_after[key] = value
+
+ diff = {
+ 'before': diff_before,
+ 'after': diff_after,
+ }
+ return (changed, diff)
+
+ def is_connection_changed(self):
+ options = {
+ 'connection.interface-name': self.ifname,
+ }
+ options.update(self.connection_options(detect_change=True))
+ return self._compare_conn_params(self.show_connection(), options)
+
+
+def main():
+ # Parsing argument file
+ module = AnsibleModule(
+ argument_spec=dict(
+ autoconnect=dict(type='bool', default=True),
+ state=dict(type='str', required=True, choices=['absent', 'present']),
+ conn_name=dict(type='str', required=True),
+ master=dict(type='str'),
+ ifname=dict(type='str'),
+ type=dict(type='str',
+ choices=['bond', 'bond-slave', 'bridge', 'bridge-slave', 'ethernet', 'generic', 'ipip', 'sit', 'team', 'team-slave', 'vlan', 'vxlan']),
+ ip4=dict(type='str'),
+ gw4=dict(type='str'),
+ dns4=dict(type='list', elements='str'),
+ dns4_search=dict(type='list', elements='str'),
+ dhcp_client_id=dict(type='str'),
+ ip6=dict(type='str'),
+ gw6=dict(type='str'),
+ dns6=dict(type='list', elements='str'),
+ dns6_search=dict(type='list', elements='str'),
+ # Bond Specific vars
+ mode=dict(type='str', default='balance-rr',
+ choices=['802.3ad', 'active-backup', 'balance-alb', 'balance-rr', 'balance-tlb', 'balance-xor', 'broadcast']),
+ miimon=dict(type='int'),
+ downdelay=dict(type='int'),
+ updelay=dict(type='int'),
+ arp_interval=dict(type='int'),
+ arp_ip_target=dict(type='str'),
+ primary=dict(type='str'),
+ # general usage
+ mtu=dict(type='int'),
+ mac=dict(type='str'),
+ # bridge specific vars
+ stp=dict(type='bool', default=True),
+ priority=dict(type='int', default=128),
+ slavepriority=dict(type='int', default=32),
+ forwarddelay=dict(type='int', default=15),
+ hellotime=dict(type='int', default=2),
+ maxage=dict(type='int', default=20),
+ ageingtime=dict(type='int', default=300),
+ hairpin=dict(type='bool', default=True),
+ path_cost=dict(type='int', default=100),
+ # vlan specific vars
+ vlanid=dict(type='int'),
+ vlandev=dict(type='str'),
+ flags=dict(type='str'),
+ ingress=dict(type='str'),
+ egress=dict(type='str'),
+ # vxlan specific vars
+ vxlan_id=dict(type='int'),
+ vxlan_local=dict(type='str'),
+ vxlan_remote=dict(type='str'),
+ # ip-tunnel specific vars
+ ip_tunnel_dev=dict(type='str'),
+ ip_tunnel_local=dict(type='str'),
+ ip_tunnel_remote=dict(type='str'),
+ ),
+ supports_check_mode=True,
+ )
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ nmcli = Nmcli(module)
+
+ (rc, out, err) = (None, '', '')
+ result = {'conn_name': nmcli.conn_name, 'state': nmcli.state}
+
+ # check for issues
+ if nmcli.conn_name is None:
+ nmcli.module.fail_json(msg="Please specify a name for the connection")
+ # team-slave checks
+ if nmcli.type == 'team-slave':
+ if nmcli.master is None:
+ nmcli.module.fail_json(msg="Please specify a name for the master when type is %s" % nmcli.type)
+ if nmcli.ifname is None:
+ nmcli.module.fail_json(msg="Please specify an interface name for the connection when type is %s" % nmcli.type)
+
+ try:
+ if nmcli.state == 'absent':
+ if nmcli.connection_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = nmcli.down_connection()
+ (rc, out, err) = nmcli.remove_connection()
+ if rc != 0:
+ module.fail_json(name=('No Connection named %s exists' % nmcli.conn_name), msg=err, rc=rc)
+
+ elif nmcli.state == 'present':
+ if nmcli.connection_exists():
+ changed, diff = nmcli.is_connection_changed()
+ if module._diff:
+ result['diff'] = diff
+
+ if changed:
+ # modify connection (note: this function is check mode aware)
+ # result['Connection']=('Connection %s of Type %s is not being added' % (nmcli.conn_name, nmcli.type))
+ result['Exists'] = 'Connections do exist so we are modifying them'
+ if module.check_mode:
+ module.exit_json(changed=True, **result)
+ (rc, out, err) = nmcli.modify_connection()
+ else:
+ result['Exists'] = 'Connections already exist and no changes made'
+ if module.check_mode:
+ module.exit_json(changed=False, **result)
+ if not nmcli.connection_exists():
+ result['Connection'] = ('Connection %s of Type %s is being added' % (nmcli.conn_name, nmcli.type))
+ if module.check_mode:
+ module.exit_json(changed=True, **result)
+ (rc, out, err) = nmcli.create_connection()
+ if rc is not None and rc != 0:
+ module.fail_json(name=nmcli.conn_name, msg=err, rc=rc)
+ except NmcliModuleError as e:
+ module.fail_json(name=nmcli.conn_name, msg=str(e))
+
+ if rc is None:
+ result['changed'] = False
+ else:
+ result['changed'] = True
+ if out:
+ result['stdout'] = out
+ if err:
+ result['stderr'] = err
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nomad_job.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nomad_job.py
new file mode 100644
index 00000000..6c285797
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nomad_job.py
@@ -0,0 +1,255 @@
+#!/usr/bin/python
+# coding: utf-8 -*-
+
+# (c) 2020, FERREIRA Christophe <christophe.ferreira@cnaf.fr>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nomad_job
+author: FERREIRA Christophe (@chris93111)
+version_added: "1.3.0"
+short_description: Launch a Nomad Job
+description:
+ - Launch a Nomad job.
+ - Stop a Nomad job.
+ - Force start a Nomad job
+requirements:
+ - python-nomad
+extends_documentation_fragment:
+ - community.general.nomad
+options:
+ name:
+ description:
+ - Name of job for delete, stop and start job without source.
+ - Name of job for delete, stop and start job without source.
+ - Either this or I(content) must be specified.
+ type: str
+ state:
+ description:
+ - Deploy or remove job.
+ choices: ["present", "absent"]
+ required: true
+ type: str
+ force_start:
+ description:
+ - Force job to started.
+ type: bool
+ default: false
+ content:
+ description:
+ - Content of Nomad job.
+ - Either this or I(name) must be specified.
+ type: str
+ content_format:
+ description:
+ - Type of content of Nomad job.
+ choices: ["hcl", "json"]
+ default: hcl
+ type: str
+notes:
+ - C(check_mode) is supported.
+seealso:
+ - name: Nomad jobs documentation
+ description: Complete documentation for Nomad API jobs.
+ link: https://www.nomadproject.io/api-docs/jobs/
+'''
+
+EXAMPLES = '''
+- name: Create job
+ community.general.nomad_job:
+ host: localhost
+ state: present
+ content: "{{ lookup('ansible.builtin.file', 'job.hcl') }}"
+ timeout: 120
+
+- name: Stop job
+ community.general.nomad_job:
+ host: localhost
+ state: absent
+ name: api
+
+- name: Force job to start
+ community.general.nomad_job:
+ host: localhost
+ state: present
+ name: api
+ timeout: 120
+ force_start: true
+'''
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+import_nomad = None
+try:
+ import nomad
+ import_nomad = True
+except ImportError:
+ import_nomad = False
+
+
+def run():
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(required=True, type='str'),
+ state=dict(required=True, choices=['present', 'absent']),
+ use_ssl=dict(type='bool', default=True),
+ timeout=dict(type='int', default=5),
+ validate_certs=dict(type='bool', default=True),
+ client_cert=dict(type='path', default=None),
+ client_key=dict(type='path', default=None),
+ namespace=dict(type='str', default=None),
+ name=dict(type='str', default=None),
+ content_format=dict(choices=['hcl', 'json'], default='hcl'),
+ content=dict(type='str', default=None),
+ force_start=dict(type='bool', default=False),
+ token=dict(type='str', default=None, no_log=True)
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ["name", "content"]
+ ],
+ required_one_of=[
+ ['name', 'content']
+ ]
+ )
+
+ if not import_nomad:
+ module.fail_json(msg=missing_required_lib("python-nomad"))
+
+ certificate_ssl = (module.params.get('client_cert'), module.params.get('client_key'))
+
+ nomad_client = nomad.Nomad(
+ host=module.params.get('host'),
+ secure=module.params.get('use_ssl'),
+ timeout=module.params.get('timeout'),
+ verify=module.params.get('validate_certs'),
+ cert=certificate_ssl,
+ namespace=module.params.get('namespace'),
+ token=module.params.get('token')
+ )
+
+ if module.params.get('state') == "present":
+
+ if module.params.get('name') and not module.params.get('force_start'):
+ module.fail_json(msg='For start job with name, force_start is needed')
+
+ changed = False
+ if module.params.get('content'):
+
+ if module.params.get('content_format') == 'json':
+
+ job_json = module.params.get('content')
+ try:
+ job_json = json.loads(job_json)
+ except ValueError as e:
+ module.fail_json(msg=to_native(e))
+ job = dict()
+ job['job'] = job_json
+ try:
+ job_id = job_json.get('ID')
+ if job_id is None:
+ module.fail_json(msg="Cannot retrieve job with ID None")
+ plan = nomad_client.job.plan_job(job_id, job, diff=True)
+ if not plan['Diff'].get('Type') == "None":
+ changed = True
+ if not module.check_mode:
+ result = nomad_client.jobs.register_job(job)
+ else:
+ result = plan
+ else:
+ result = plan
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+ if module.params.get('content_format') == 'hcl':
+
+ try:
+ job_hcl = module.params.get('content')
+ job_json = nomad_client.jobs.parse(job_hcl)
+ job = dict()
+ job['job'] = job_json
+ except nomad.api.exceptions.BadRequestNomadException as err:
+ msg = str(err.nomad_resp.reason) + " " + str(err.nomad_resp.text)
+ module.fail_json(msg=to_native(msg))
+ try:
+ job_id = job_json.get('ID')
+ plan = nomad_client.job.plan_job(job_id, job, diff=True)
+ if not plan['Diff'].get('Type') == "None":
+ changed = True
+ if not module.check_mode:
+ result = nomad_client.jobs.register_job(job)
+ else:
+ result = plan
+ else:
+ result = plan
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+ if module.params.get('force_start'):
+
+ try:
+ job = dict()
+ if module.params.get('name'):
+ job_name = module.params.get('name')
+ else:
+ job_name = job_json['Name']
+ job_json = nomad_client.job.get_job(job_name)
+ if job_json['Status'] == 'running':
+ result = job_json
+ else:
+ job_json['Status'] = 'running'
+ job_json['Stop'] = False
+ job['job'] = job_json
+ if not module.check_mode:
+ result = nomad_client.jobs.register_job(job)
+ else:
+ result = nomad_client.validate.validate_job(job)
+ if not result.status_code == 200:
+ module.fail_json(msg=to_native(result.text))
+ result = json.loads(result.text)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+ if module.params.get('state') == "absent":
+
+ try:
+ if not module.params.get('name') is None:
+ job_name = module.params.get('name')
+ else:
+ if module.params.get('content_format') == 'hcl':
+ job_json = nomad_client.jobs.parse(module.params.get('content'))
+ job_name = job_json['Name']
+ if module.params.get('content_format') == 'json':
+ job_json = module.params.get('content')
+ job_name = job_json['Name']
+ job = nomad_client.job.get_job(job_name)
+ if job['Status'] == 'dead':
+ changed = False
+ result = job
+ else:
+ if not module.check_mode:
+ result = nomad_client.job.deregister_job(job_name)
+ else:
+ result = job
+ changed = True
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+ module.exit_json(changed=changed, result=result)
+
+
+def main():
+
+ run()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nomad_job_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nomad_job_info.py
new file mode 100644
index 00000000..9e935328
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nomad_job_info.py
@@ -0,0 +1,345 @@
+#!/usr/bin/python
+# coding: utf-8 -*-
+
+# (c) 2020, FERREIRA Christophe <christophe.ferreira@cnaf.fr>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nomad_job_info
+author: FERREIRA Christophe (@chris93111)
+version_added: "1.3.0"
+short_description: Get Nomad Jobs info
+description:
+ - Get info for one Nomad job.
+ - List Nomad jobs.
+requirements:
+ - python-nomad
+extends_documentation_fragment:
+ - community.general.nomad
+options:
+ name:
+ description:
+ - Name of job for Get info.
+ - If not specified, lists all jobs.
+ type: str
+notes:
+ - C(check_mode) is supported.
+seealso:
+ - name: Nomad jobs documentation
+ description: Complete documentation for Nomad API jobs.
+ link: https://www.nomadproject.io/api-docs/jobs/
+'''
+
+EXAMPLES = '''
+- name: Get info for job awx
+ community.general.nomad_job:
+ host: localhost
+ name: awx
+ register: result
+
+- name: List Nomad jobs
+ community.general.nomad_job:
+ host: localhost
+ register: result
+
+'''
+
+RETURN = '''
+result:
+ description: List with dictionary contains jobs info
+ returned: success
+ type: list
+ sample: [
+ {
+ "Affinities": null,
+ "AllAtOnce": false,
+ "Constraints": null,
+ "ConsulToken": "",
+ "CreateIndex": 13,
+ "Datacenters": [
+ "dc1"
+ ],
+ "Dispatched": false,
+ "ID": "example",
+ "JobModifyIndex": 13,
+ "Meta": null,
+ "ModifyIndex": 13,
+ "Multiregion": null,
+ "Name": "example",
+ "Namespace": "default",
+ "NomadTokenID": "",
+ "ParameterizedJob": null,
+ "ParentID": "",
+ "Payload": null,
+ "Periodic": null,
+ "Priority": 50,
+ "Region": "global",
+ "Spreads": null,
+ "Stable": false,
+ "Status": "pending",
+ "StatusDescription": "",
+ "Stop": false,
+ "SubmitTime": 1602244370615307000,
+ "TaskGroups": [
+ {
+ "Affinities": null,
+ "Constraints": null,
+ "Count": 1,
+ "EphemeralDisk": {
+ "Migrate": false,
+ "SizeMB": 300,
+ "Sticky": false
+ },
+ "Meta": null,
+ "Migrate": {
+ "HealthCheck": "checks",
+ "HealthyDeadline": 300000000000,
+ "MaxParallel": 1,
+ "MinHealthyTime": 10000000000
+ },
+ "Name": "cache",
+ "Networks": null,
+ "ReschedulePolicy": {
+ "Attempts": 0,
+ "Delay": 30000000000,
+ "DelayFunction": "exponential",
+ "Interval": 0,
+ "MaxDelay": 3600000000000,
+ "Unlimited": true
+ },
+ "RestartPolicy": {
+ "Attempts": 3,
+ "Delay": 15000000000,
+ "Interval": 1800000000000,
+ "Mode": "fail"
+ },
+ "Scaling": null,
+ "Services": null,
+ "ShutdownDelay": null,
+ "Spreads": null,
+ "StopAfterClientDisconnect": null,
+ "Tasks": [
+ {
+ "Affinities": null,
+ "Artifacts": null,
+ "CSIPluginConfig": null,
+ "Config": {
+ "image": "redis:3.2",
+ "port_map": [
+ {
+ "db": 6379.0
+ }
+ ]
+ },
+ "Constraints": null,
+ "DispatchPayload": null,
+ "Driver": "docker",
+ "Env": null,
+ "KillSignal": "",
+ "KillTimeout": 5000000000,
+ "Kind": "",
+ "Leader": false,
+ "Lifecycle": null,
+ "LogConfig": {
+ "MaxFileSizeMB": 10,
+ "MaxFiles": 10
+ },
+ "Meta": null,
+ "Name": "redis",
+ "Resources": {
+ "CPU": 500,
+ "Devices": null,
+ "DiskMB": 0,
+ "IOPS": 0,
+ "MemoryMB": 256,
+ "Networks": [
+ {
+ "CIDR": "",
+ "DNS": null,
+ "Device": "",
+ "DynamicPorts": [
+ {
+ "HostNetwork": "default",
+ "Label": "db",
+ "To": 0,
+ "Value": 0
+ }
+ ],
+ "IP": "",
+ "MBits": 10,
+ "Mode": "",
+ "ReservedPorts": null
+ }
+ ]
+ },
+ "RestartPolicy": {
+ "Attempts": 3,
+ "Delay": 15000000000,
+ "Interval": 1800000000000,
+ "Mode": "fail"
+ },
+ "Services": [
+ {
+ "AddressMode": "auto",
+ "CanaryMeta": null,
+ "CanaryTags": null,
+ "Checks": [
+ {
+ "AddressMode": "",
+ "Args": null,
+ "CheckRestart": null,
+ "Command": "",
+ "Expose": false,
+ "FailuresBeforeCritical": 0,
+ "GRPCService": "",
+ "GRPCUseTLS": false,
+ "Header": null,
+ "InitialStatus": "",
+ "Interval": 10000000000,
+ "Method": "",
+ "Name": "alive",
+ "Path": "",
+ "PortLabel": "",
+ "Protocol": "",
+ "SuccessBeforePassing": 0,
+ "TLSSkipVerify": false,
+ "TaskName": "",
+ "Timeout": 2000000000,
+ "Type": "tcp"
+ }
+ ],
+ "Connect": null,
+ "EnableTagOverride": false,
+ "Meta": null,
+ "Name": "redis-cache",
+ "PortLabel": "db",
+ "Tags": [
+ "global",
+ "cache"
+ ],
+ "TaskName": ""
+ }
+ ],
+ "ShutdownDelay": 0,
+ "Templates": null,
+ "User": "",
+ "Vault": null,
+ "VolumeMounts": null
+ }
+ ],
+ "Update": {
+ "AutoPromote": false,
+ "AutoRevert": false,
+ "Canary": 0,
+ "HealthCheck": "checks",
+ "HealthyDeadline": 180000000000,
+ "MaxParallel": 1,
+ "MinHealthyTime": 10000000000,
+ "ProgressDeadline": 600000000000,
+ "Stagger": 30000000000
+ },
+ "Volumes": null
+ }
+ ],
+ "Type": "service",
+ "Update": {
+ "AutoPromote": false,
+ "AutoRevert": false,
+ "Canary": 0,
+ "HealthCheck": "",
+ "HealthyDeadline": 0,
+ "MaxParallel": 1,
+ "MinHealthyTime": 0,
+ "ProgressDeadline": 0,
+ "Stagger": 30000000000
+ },
+ "VaultNamespace": "",
+ "VaultToken": "",
+ "Version": 0
+ }
+ ]
+
+'''
+
+
+import os
+import json
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+import_nomad = None
+try:
+ import nomad
+ import_nomad = True
+except ImportError:
+ import_nomad = False
+
+
+def run():
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(required=True, type='str'),
+ use_ssl=dict(type='bool', default=True),
+ timeout=dict(type='int', default=5),
+ validate_certs=dict(type='bool', default=True),
+ client_cert=dict(type='path', default=None),
+ client_key=dict(type='path', default=None),
+ namespace=dict(type='str', default=None),
+ name=dict(type='str', default=None),
+ token=dict(type='str', default=None, no_log=True)
+ ),
+ supports_check_mode=True
+ )
+
+ if not import_nomad:
+ module.fail_json(msg=missing_required_lib("python-nomad"))
+
+ certificate_ssl = (module.params.get('client_cert'), module.params.get('client_key'))
+
+ nomad_client = nomad.Nomad(
+ host=module.params.get('host'),
+ secure=module.params.get('use_ssl'),
+ timeout=module.params.get('timeout'),
+ verify=module.params.get('validate_certs'),
+ cert=certificate_ssl,
+ namespace=module.params.get('namespace'),
+ token=module.params.get('token')
+ )
+
+ changed = False
+ nomad_jobs = list()
+ try:
+ job_list = nomad_client.jobs.get_jobs()
+ for job in job_list:
+ nomad_jobs.append(nomad_client.job.get_job(job.get('ID')))
+ result = nomad_jobs
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+ if module.params.get('name'):
+ filter = list()
+ try:
+ for job in result:
+ if job.get('ID') == module.params.get('name'):
+ filter.append(job)
+ result = filter
+ if not filter:
+ module.fail_json(msg="Couldn't find Job with id " + str(module.params.get('name')))
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+ module.exit_json(changed=changed, result=result)
+
+
+def main():
+
+ run()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nosh.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nosh.py
new file mode 100644
index 00000000..0f7de471
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nosh.py
@@ -0,0 +1,537 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Thomas Caravia <taca@kadisius.eu>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: nosh
+author:
+ - "Thomas Caravia (@tacatac)"
+short_description: Manage services with nosh
+description:
+ - Control running and enabled state for system-wide or user services.
+ - BSD and Linux systems are supported.
+options:
+ name:
+ type: str
+ required: true
+ description:
+ - Name of the service to manage.
+ state:
+ type: str
+ required: false
+ choices: [ started, stopped, reset, restarted, reloaded ]
+ description:
+ - C(started)/C(stopped) are idempotent actions that will not run
+ commands unless necessary.
+ C(restarted) will always bounce the service.
+ C(reloaded) will send a SIGHUP or start the service.
+ C(reset) will start or stop the service according to whether it is
+ enabled or not.
+ enabled:
+ required: false
+ type: bool
+ description:
+ - Enable or disable the service, independently of C(*.preset) file
+ preference or running state. Mutually exclusive with I(preset). Will take
+ effect prior to I(state=reset).
+ preset:
+ required: false
+ type: bool
+ description:
+ - Enable or disable the service according to local preferences in *.preset files.
+ Mutually exclusive with I(enabled). Only has an effect if set to true. Will take
+ effect prior to I(state=reset).
+ user:
+ required: false
+ default: 'no'
+ type: bool
+ description:
+ - Run system-control talking to the calling user's service manager, rather than
+ the system-wide service manager.
+requirements:
+ - A system with an active nosh service manager, see Notes for further information.
+notes:
+ - Information on the nosh utilities suite may be found at U(https://jdebp.eu/Softwares/nosh/).
+'''
+
+EXAMPLES = '''
+- name: Start dnscache if not running
+ community.general.nosh: name=dnscache state=started
+
+- name: Stop mpd, if running
+ community.general.nosh: name=mpd state=stopped
+
+- name: Restart unbound or start it if not already running
+ community.general.nosh:
+ name: unbound
+ state: restarted
+
+- name: Reload fail2ban or start it if not already running
+ community.general.nosh:
+ name: fail2ban
+ state: reloaded
+
+- name: Disable nsd
+ community.general.nosh: name=nsd enabled=no
+
+- name: For package installers, set nginx running state according to local enable settings, preset and reset
+ community.general.nosh: name=nginx preset=True state=reset
+
+- name: Reboot the host if nosh is the system manager, would need a "wait_for*" task at least, not recommended as-is
+ community.general.nosh: name=reboot state=started
+
+- name: Using conditionals with the module facts
+ tasks:
+ - name: Obtain information on tinydns service
+ community.general.nosh: name=tinydns
+ register: result
+
+ - name: Fail if service not loaded
+ ansible.builtin.fail: msg="The {{ result.name }} service is not loaded"
+ when: not result.status
+
+ - name: Fail if service is running
+ ansible.builtin.fail: msg="The {{ result.name }} service is running"
+ when: result.status and result.status['DaemontoolsEncoreState'] == "running"
+'''
+
+RETURN = '''
+name:
+ description: name used to find the service
+ returned: success
+ type: str
+ sample: "sshd"
+service_path:
+ description: resolved path for the service
+ returned: success
+ type: str
+ sample: "/var/sv/sshd"
+enabled:
+ description: whether the service is enabled at system bootstrap
+ returned: success
+ type: bool
+ sample: True
+preset:
+ description: whether the enabled status reflects the one set in the relevant C(*.preset) file
+ returned: success
+ type: bool
+ sample: 'False'
+state:
+ description: service process run state, C(None) if the service is not loaded and will not be started
+ returned: if state option is used
+ type: str
+ sample: "reloaded"
+status:
+ description: a dictionary with the key=value pairs returned by `system-control show-json` or C(None) if the service is not loaded
+ returned: success
+ type: complex
+ contains:
+ After:
+ description: [] # FIXME
+ returned: success
+ type: list
+ sample: ["/etc/service-bundles/targets/basic","../sshdgenkeys", "log"]
+ Before:
+ description: [] # FIXME
+ returned: success
+ type: list
+ sample: ["/etc/service-bundles/targets/shutdown"]
+ Conflicts:
+ description: [] # FIXME
+ returned: success
+ type: list
+ sample: '[]'
+ DaemontoolsEncoreState:
+ description: [] # FIXME
+ returned: success
+ type: str
+ sample: "running"
+ DaemontoolsState:
+ description: [] # FIXME
+ returned: success
+ type: str
+ sample: "up"
+ Enabled:
+ description: [] # FIXME
+ returned: success
+ type: bool
+ sample: True
+ LogService:
+ description: [] # FIXME
+ returned: success
+ type: str
+ sample: "../cyclog@sshd"
+ MainPID:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 661
+ Paused:
+ description: [] # FIXME
+ returned: success
+ type: bool
+ sample: 'False'
+ ReadyAfterRun:
+ description: [] # FIXME
+ returned: success
+ type: bool
+ sample: 'False'
+ RemainAfterExit:
+ description: [] # FIXME
+ returned: success
+ type: bool
+ sample: 'False'
+ Required-By:
+ description: [] # FIXME
+ returned: success
+ type: list
+ sample: '[]'
+ RestartExitStatusCode:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: '0'
+ RestartExitStatusNumber:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: '0'
+ RestartTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 4611686019935648081
+ RestartUTCTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 1508260140
+ RunExitStatusCode:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: '0'
+ RunExitStatusNumber:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: '0'
+ RunTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 4611686019935648081
+ RunUTCTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 1508260140
+ StartExitStatusCode:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 1
+ StartExitStatusNumber:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: '0'
+ StartTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 4611686019935648081
+ StartUTCTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 1508260140
+ StopExitStatusCode:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: '0'
+ StopExitStatusNumber:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: '0'
+ StopTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 4611686019935648081
+ StopUTCTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 1508260140
+ Stopped-By:
+ description: [] # FIXME
+ returned: success
+ type: list
+ sample: ["/etc/service-bundles/targets/shutdown"]
+ Timestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 4611686019935648081
+ UTCTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 1508260140
+ Want:
+ description: [] # FIXME
+ returned: success
+ type: str
+ sample: "nothing"
+ Wanted-By:
+ description: [] # FIXME
+ returned: success
+ type: list
+ sample: ["/etc/service-bundles/targets/server","/etc/service-bundles/targets/sockets"]
+ Wants:
+ description: [] # FIXME
+ returned: success
+ type: list
+ sample: ["/etc/service-bundles/targets/basic","../sshdgenkeys"]
+user:
+ description: whether the user-level service manager is called
+ returned: success
+ type: bool
+ sample: False
+'''
+
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.service import fail_if_missing
+from ansible.module_utils._text import to_native
+
+
+def run_sys_ctl(module, args):
+ sys_ctl = [module.get_bin_path('system-control', required=True)]
+ if module.params['user']:
+ sys_ctl = sys_ctl + ['--user']
+ return module.run_command(sys_ctl + args)
+
+
+def get_service_path(module, service):
+ (rc, out, err) = run_sys_ctl(module, ['find', service])
+ # fail if service not found
+ if rc != 0:
+ fail_if_missing(module, False, service, msg='host')
+ else:
+ return to_native(out).strip()
+
+
+def service_is_enabled(module, service_path):
+ (rc, out, err) = run_sys_ctl(module, ['is-enabled', service_path])
+ return rc == 0
+
+
+def service_is_preset_enabled(module, service_path):
+ (rc, out, err) = run_sys_ctl(module, ['preset', '--dry-run', service_path])
+ return to_native(out).strip().startswith("enable")
+
+
+def service_is_loaded(module, service_path):
+ (rc, out, err) = run_sys_ctl(module, ['is-loaded', service_path])
+ return rc == 0
+
+
+def get_service_status(module, service_path):
+ (rc, out, err) = run_sys_ctl(module, ['show-json', service_path])
+ # will fail if not service is not loaded
+ if err is not None and err:
+ module.fail_json(msg=err)
+ else:
+ json_out = json.loads(to_native(out).strip())
+ status = json_out[service_path] # descend past service path header
+ return status
+
+
+def service_is_running(service_status):
+ return service_status['DaemontoolsEncoreState'] in set(['starting', 'started', 'running'])
+
+
+def handle_enabled(module, result, service_path):
+ """Enable or disable a service as needed.
+
+ - 'preset' will set the enabled state according to available preset file settings.
+ - 'enabled' will set the enabled state explicitly, independently of preset settings.
+
+ These options are set to "mutually exclusive" but the explicit 'enabled' option will
+ have priority if the check is bypassed.
+ """
+
+ # computed prior in control flow
+ preset = result['preset']
+ enabled = result['enabled']
+
+ # preset, effect only if option set to true (no reverse preset)
+ if module.params['preset']:
+ action = 'preset'
+
+ # run preset if needed
+ if preset != module.params['preset']:
+ result['changed'] = True
+ if not module.check_mode:
+ (rc, out, err) = run_sys_ctl(module, [action, service_path])
+ if rc != 0:
+ module.fail_json(msg="Unable to %s service %s: %s" % (action, service_path, out + err))
+ result['preset'] = not preset
+ result['enabled'] = not enabled
+
+ # enabled/disabled state
+ if module.params['enabled'] is not None:
+ if module.params['enabled']:
+ action = 'enable'
+ else:
+ action = 'disable'
+
+ # change enable/disable if needed
+ if enabled != module.params['enabled']:
+ result['changed'] = True
+ if not module.check_mode:
+ (rc, out, err) = run_sys_ctl(module, [action, service_path])
+ if rc != 0:
+ module.fail_json(msg="Unable to %s service %s: %s" % (action, service_path, out + err))
+ result['enabled'] = not enabled
+ result['preset'] = not preset
+
+
+def handle_state(module, result, service_path):
+ """Set service running state as needed.
+
+ Takes into account the fact that a service may not be loaded (no supervise directory) in
+ which case it is 'stopped' as far as the service manager is concerned. No status information
+ can be obtained and the service can only be 'started'.
+ """
+ # default to desired state, no action
+ result['state'] = module.params['state']
+ state = module.params['state']
+ action = None
+
+ # computed prior in control flow, possibly modified by handle_enabled()
+ enabled = result['enabled']
+
+ # service not loaded -> not started by manager, no status information
+ if not service_is_loaded(module, service_path):
+ if state in ['started', 'restarted', 'reloaded']:
+ action = 'start'
+ result['state'] = 'started'
+ elif state == 'reset':
+ if enabled:
+ action = 'start'
+ result['state'] = 'started'
+ else:
+ result['state'] = None
+ else:
+ result['state'] = None
+
+ # service is loaded
+ else:
+ # get status information
+ result['status'] = get_service_status(module, service_path)
+ running = service_is_running(result['status'])
+
+ if state == 'started':
+ if not running:
+ action = 'start'
+ elif state == 'stopped':
+ if running:
+ action = 'stop'
+ # reset = start/stop according to enabled status
+ elif state == 'reset':
+ if enabled is not running:
+ if running:
+ action = 'stop'
+ result['state'] = 'stopped'
+ else:
+ action = 'start'
+ result['state'] = 'started'
+ # start if not running, 'service' module constraint
+ elif state == 'restarted':
+ if not running:
+ action = 'start'
+ result['state'] = 'started'
+ else:
+ action = 'condrestart'
+ # start if not running, 'service' module constraint
+ elif state == 'reloaded':
+ if not running:
+ action = 'start'
+ result['state'] = 'started'
+ else:
+ action = 'hangup'
+
+ # change state as needed
+ if action:
+ result['changed'] = True
+ if not module.check_mode:
+ (rc, out, err) = run_sys_ctl(module, [action, service_path])
+ if rc != 0:
+ module.fail_json(msg="Unable to %s service %s: %s" % (action, service_path, err))
+
+# ===========================================
+# Main control flow
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', choices=['started', 'stopped', 'reset', 'restarted', 'reloaded']),
+ enabled=dict(type='bool'),
+ preset=dict(type='bool'),
+ user=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[['enabled', 'preset']],
+ )
+
+ service = module.params['name']
+ rc = 0
+ out = err = ''
+ result = {
+ 'name': service,
+ 'changed': False,
+ 'status': None,
+ }
+
+ # check service can be found (or fail) and get path
+ service_path = get_service_path(module, service)
+
+ # get preliminary service facts
+ result['service_path'] = service_path
+ result['user'] = module.params['user']
+ result['enabled'] = service_is_enabled(module, service_path)
+ result['preset'] = result['enabled'] is service_is_preset_enabled(module, service_path)
+
+ # set enabled state, service need not be loaded
+ if module.params['enabled'] is not None or module.params['preset']:
+ handle_enabled(module, result, service_path)
+
+ # set service running state
+ if module.params['state'] is not None:
+ handle_state(module, result, service_path)
+
+ # get final service status if possible
+ if service_is_loaded(module, service_path):
+ result['status'] = get_service_status(module, service_path)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/bearychat.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/bearychat.py
new file mode 100644
index 00000000..4c907ea6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/bearychat.py
@@ -0,0 +1,182 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Jiangge Zhang <tonyseek@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: bearychat
+short_description: Send BearyChat notifications
+description:
+ - The M(community.general.bearychat) module sends notifications to U(https://bearychat.com)
+ via the Incoming Robot integration.
+author: "Jiangge Zhang (@tonyseek)"
+options:
+ url:
+ type: str
+ description:
+ - BearyChat WebHook URL. This authenticates you to the bearychat
+ service. It looks like
+ C(https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60).
+ required: true
+ text:
+ type: str
+ description:
+ - Message to send.
+ markdown:
+ description:
+ - If C(yes), text will be parsed as markdown.
+ default: 'yes'
+ type: bool
+ channel:
+ type: str
+ description:
+ - Channel to send the message to. If absent, the message goes to the
+ default channel selected by the I(url).
+ attachments:
+ type: list
+ elements: dict
+ description:
+ - Define a list of attachments. For more information, see
+ https://github.com/bearyinnovative/bearychat-tutorial/blob/master/robots/incoming.md#attachments
+'''
+
+EXAMPLES = """
+- name: Send notification message via BearyChat
+ local_action:
+ module: bearychat
+ url: |
+ https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60
+ text: "{{ inventory_hostname }} completed"
+
+- name: Send notification message via BearyChat all options
+ local_action:
+ module: bearychat
+ url: |
+ https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60
+ text: "{{ inventory_hostname }} completed"
+ markdown: no
+ channel: "#ansible"
+ attachments:
+ - title: "Ansible on {{ inventory_hostname }}"
+ text: "May the Force be with you."
+ color: "#ffffff"
+ images:
+ - http://example.com/index.png
+"""
+
+RETURN = """
+msg:
+ description: execution result
+ returned: success
+ type: str
+ sample: "OK"
+"""
+
+try:
+ from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse
+ HAS_URLPARSE = True
+except Exception:
+ HAS_URLPARSE = False
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def build_payload_for_bearychat(module, text, markdown, channel, attachments):
+ payload = {}
+ if text is not None:
+ payload['text'] = text
+ if markdown is not None:
+ payload['markdown'] = markdown
+ if channel is not None:
+ payload['channel'] = channel
+ if attachments is not None:
+ payload.setdefault('attachments', []).extend(
+ build_payload_for_bearychat_attachment(
+ module, item.get('title'), item.get('text'), item.get('color'),
+ item.get('images'))
+ for item in attachments)
+ payload = 'payload=%s' % module.jsonify(payload)
+ return payload
+
+
+def build_payload_for_bearychat_attachment(module, title, text, color, images):
+ attachment = {}
+ if title is not None:
+ attachment['title'] = title
+ if text is not None:
+ attachment['text'] = text
+ if color is not None:
+ attachment['color'] = color
+ if images is not None:
+ target_images = attachment.setdefault('images', [])
+ if not isinstance(images, (list, tuple)):
+ images = [images]
+ for image in images:
+ if isinstance(image, dict) and 'url' in image:
+ image = {'url': image['url']}
+ elif hasattr(image, 'startswith') and image.startswith('http'):
+ image = {'url': image}
+ else:
+ module.fail_json(
+ msg="BearyChat doesn't have support for this kind of "
+ "attachment image")
+ target_images.append(image)
+ return attachment
+
+
+def do_notify_bearychat(module, url, payload):
+ response, info = fetch_url(module, url, data=payload)
+ if info['status'] != 200:
+ url_info = urlparse(url)
+ obscured_incoming_webhook = urlunparse(
+ (url_info.scheme, url_info.netloc, '[obscured]', '', '', ''))
+ module.fail_json(
+ msg=" failed to send %s to %s: %s" % (
+ payload, obscured_incoming_webhook, info['msg']))
+
+
+def main():
+ module = AnsibleModule(argument_spec={
+ 'url': dict(type='str', required=True, no_log=True),
+ 'text': dict(type='str'),
+ 'markdown': dict(default=True, type='bool'),
+ 'channel': dict(type='str'),
+ 'attachments': dict(type='list', elements='dict'),
+ })
+
+ if not HAS_URLPARSE:
+ module.fail_json(msg='urlparse is not installed')
+
+ url = module.params['url']
+ text = module.params['text']
+ markdown = module.params['markdown']
+ channel = module.params['channel']
+ attachments = module.params['attachments']
+
+ payload = build_payload_for_bearychat(
+ module, text, markdown, channel, attachments)
+ do_notify_bearychat(module, url, payload)
+
+ module.exit_json(msg="OK")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/campfire.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/campfire.py
new file mode 100644
index 00000000..c6848238
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/campfire.py
@@ -0,0 +1,154 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: campfire
+short_description: Send a message to Campfire
+description:
+ - Send a message to Campfire.
+ - Messages with newlines will result in a "Paste" message being sent.
+options:
+ subscription:
+ type: str
+ description:
+ - The subscription name to use.
+ required: true
+ token:
+ type: str
+ description:
+ - API token.
+ required: true
+ room:
+ type: str
+ description:
+ - Room number to which the message should be sent.
+ required: true
+ msg:
+ type: str
+ description:
+ - The message body.
+ required: true
+ notify:
+ type: str
+ description:
+ - Send a notification sound before the message.
+ required: false
+ choices: ["56k", "bell", "bezos", "bueller", "clowntown",
+ "cottoneyejoe", "crickets", "dadgummit", "dangerzone",
+ "danielsan", "deeper", "drama", "greatjob", "greyjoy",
+ "guarantee", "heygirl", "horn", "horror",
+ "inconceivable", "live", "loggins", "makeitso", "noooo",
+ "nyan", "ohmy", "ohyeah", "pushit", "rimshot",
+ "rollout", "rumble", "sax", "secret", "sexyback",
+ "story", "tada", "tmyk", "trololo", "trombone", "unix",
+ "vuvuzela", "what", "whoomp", "yeah", "yodel"]
+
+# informational: requirements for nodes
+requirements: [ ]
+author: "Adam Garside (@fabulops)"
+'''
+
+EXAMPLES = '''
+- name: Send a message to Campfire
+ community.general.campfire:
+ subscription: foo
+ token: 12345
+ room: 123
+ msg: Task completed.
+
+- name: Send a message to Campfire
+ community.general.campfire:
+ subscription: foo
+ token: 12345
+ room: 123
+ notify: loggins
+ msg: Task completed ... with feeling.
+'''
+
+try:
+ from html import escape as html_escape
+except ImportError:
+ # Python-3.2 or later
+ import cgi
+
+ def html_escape(text, quote=True):
+ return cgi.escape(text, quote)
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ subscription=dict(required=True),
+ token=dict(required=True, no_log=True),
+ room=dict(required=True),
+ msg=dict(required=True),
+ notify=dict(required=False,
+ choices=["56k", "bell", "bezos", "bueller",
+ "clowntown", "cottoneyejoe",
+ "crickets", "dadgummit", "dangerzone",
+ "danielsan", "deeper", "drama",
+ "greatjob", "greyjoy", "guarantee",
+ "heygirl", "horn", "horror",
+ "inconceivable", "live", "loggins",
+ "makeitso", "noooo", "nyan", "ohmy",
+ "ohyeah", "pushit", "rimshot",
+ "rollout", "rumble", "sax", "secret",
+ "sexyback", "story", "tada", "tmyk",
+ "trololo", "trombone", "unix",
+ "vuvuzela", "what", "whoomp", "yeah",
+ "yodel"]),
+ ),
+ supports_check_mode=False
+ )
+
+ subscription = module.params["subscription"]
+ token = module.params["token"]
+ room = module.params["room"]
+ msg = module.params["msg"]
+ notify = module.params["notify"]
+
+ URI = "https://%s.campfirenow.com" % subscription
+ NSTR = "<message><type>SoundMessage</type><body>%s</body></message>"
+ MSTR = "<message><body>%s</body></message>"
+ AGENT = "Ansible/1.2"
+
+ # Hack to add basic auth username and password the way fetch_url expects
+ module.params['url_username'] = token
+ module.params['url_password'] = 'X'
+
+ target_url = '%s/room/%s/speak.xml' % (URI, room)
+ headers = {'Content-Type': 'application/xml',
+ 'User-agent': AGENT}
+
+ # Send some audible notification if requested
+ if notify:
+ response, info = fetch_url(module, target_url, data=NSTR % html_escape(notify), headers=headers)
+ if info['status'] not in [200, 201]:
+ module.fail_json(msg="unable to send msg: '%s', campfire api"
+ " returned error code: '%s'" %
+ (notify, info['status']))
+
+ # Send the message
+ response, info = fetch_url(module, target_url, data=MSTR % html_escape(msg), headers=headers)
+ if info['status'] not in [200, 201]:
+ module.fail_json(msg="unable to send msg: '%s', campfire api"
+ " returned error code: '%s'" %
+ (msg, info['status']))
+
+ module.exit_json(changed=True, room=room, msg=msg, notify=notify)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/catapult.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/catapult.py
new file mode 100644
index 00000000..13833620
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/catapult.py
@@ -0,0 +1,154 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Jonathan Mainguy <jon@soh.re>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+# basis of code taken from the ansible twillio and nexmo modules
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: catapult
+short_description: Send a sms / mms using the catapult bandwidth api
+description:
+ - Allows notifications to be sent using sms / mms via the catapult bandwidth api.
+options:
+ src:
+ type: str
+ description:
+ - One of your catapult telephone numbers the message should come from (must be in E.164 format, like C(+19195551212)).
+ required: true
+ dest:
+ type: list
+ elements: str
+ description:
+ - The phone number or numbers the message should be sent to (must be in E.164 format, like C(+19195551212)).
+ required: true
+ msg:
+ type: str
+ description:
+ - The contents of the text message (must be 2048 characters or less).
+ required: true
+ media:
+ type: str
+ description:
+ - For MMS messages, a media url to the location of the media to be sent with the message.
+ user_id:
+ type: str
+ description:
+ - User Id from Api account page.
+ required: true
+ api_token:
+ type: str
+ description:
+ - Api Token from Api account page.
+ required: true
+ api_secret:
+ type: str
+ description:
+ - Api Secret from Api account page.
+ required: true
+
+author: "Jonathan Mainguy (@Jmainguy)"
+notes:
+ - Will return changed even if the media url is wrong.
+ - Will return changed if the destination number is invalid.
+
+'''
+
+EXAMPLES = '''
+- name: Send a mms to multiple users
+ community.general.catapult:
+ src: "+15035555555"
+ dest:
+ - "+12525089000"
+ - "+12018994225"
+ media: "http://example.com/foobar.jpg"
+ msg: "Task is complete"
+ user_id: "{{ user_id }}"
+ api_token: "{{ api_token }}"
+ api_secret: "{{ api_secret }}"
+
+- name: Send a sms to a single user
+ community.general.catapult:
+ src: "+15035555555"
+ dest: "+12018994225"
+ msg: "Consider yourself notified"
+ user_id: "{{ user_id }}"
+ api_token: "{{ api_token }}"
+ api_secret: "{{ api_secret }}"
+
+'''
+
+RETURN = '''
+changed:
+ description: Whether the api accepted the message.
+ returned: always
+ type: bool
+ sample: True
+'''
+
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def send(module, src, dest, msg, media, user_id, api_token, api_secret):
+ """
+ Send the message
+ """
+ AGENT = "Ansible"
+ URI = "https://api.catapult.inetwork.com/v1/users/%s/messages" % user_id
+ data = {'from': src, 'to': dest, 'text': msg}
+ if media:
+ data['media'] = media
+
+ headers = {'User-Agent': AGENT, 'Content-type': 'application/json'}
+
+ # Hack module params to have the Basic auth params that fetch_url expects
+ module.params['url_username'] = api_token.replace('\n', '')
+ module.params['url_password'] = api_secret.replace('\n', '')
+
+ return fetch_url(module, URI, data=json.dumps(data), headers=headers, method="post")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ src=dict(required=True),
+ dest=dict(required=True, type='list', elements='str'),
+ msg=dict(required=True),
+ user_id=dict(required=True),
+ api_token=dict(required=True, no_log=True),
+ api_secret=dict(required=True, no_log=True),
+ media=dict(default=None, required=False),
+ ),
+ )
+
+ src = module.params['src']
+ dest = module.params['dest']
+ msg = module.params['msg']
+ media = module.params['media']
+ user_id = module.params['user_id']
+ api_token = module.params['api_token']
+ api_secret = module.params['api_secret']
+
+ for number in dest:
+ rc, info = send(module, src, number, msg, media, user_id, api_token, api_secret)
+ if info["status"] != 201:
+ body = json.loads(info["body"])
+ fail_msg = body["message"]
+ module.fail_json(msg=fail_msg)
+
+ changed = True
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/cisco_spark.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/cisco_spark.py
new file mode 100644
index 00000000..4015c185
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/cisco_spark.py
@@ -0,0 +1,191 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: cisco_webex
+short_description: Send a message to a Cisco Webex Teams Room or Individual
+description:
+ - Send a message to a Cisco Webex Teams Room or Individual with options to control the formatting.
+author: Drew Rusell (@drew-russell)
+notes:
+ - The C(recipient_id) type must be valid for the supplied C(recipient_id).
+ - Full API documentation can be found at U(https://developer.webex.com/docs/api/basics).
+
+options:
+
+ recipient_type:
+ description:
+ - The request parameter you would like to send the message to.
+ - Messages can be sent to either a room or individual (by ID or E-Mail).
+ required: yes
+ choices: ['roomId', 'toPersonEmail', 'toPersonId']
+ type: str
+
+ recipient_id:
+ description:
+ - The unique identifier associated with the supplied C(recipient_type).
+ required: yes
+ type: str
+
+ msg_type:
+ description:
+ - Specifies how you would like the message formatted.
+ default: text
+ choices: ['text', 'markdown']
+ type: str
+ aliases: ['message_type']
+
+ personal_token:
+ description:
+ - Your personal access token required to validate the Webex Teams API.
+ required: yes
+ aliases: ['token']
+ type: str
+
+ msg:
+ description:
+ - The message you would like to send.
+ required: yes
+ type: str
+ aliases: ['message']
+'''
+
+EXAMPLES = """
+# Note: The following examples assume a variable file has been imported
+# that contains the appropriate information.
+
+- name: Cisco Webex Teams - Markdown Message to a Room
+ community.general.cisco_webex:
+ recipient_type: roomId
+ recipient_id: "{{ room_id }}"
+ msg_type: markdown
+ personal_token: "{{ token }}"
+ msg: "**Cisco Webex Teams Ansible Module - Room Message in Markdown**"
+
+- name: Cisco Webex Teams - Text Message to a Room
+ community.general.cisco_webex:
+ recipient_type: roomId
+ recipient_id: "{{ room_id }}"
+ msg_type: text
+ personal_token: "{{ token }}"
+ msg: "Cisco Webex Teams Ansible Module - Room Message in Text"
+
+- name: Cisco Webex Teams - Text Message by an Individuals ID
+ community.general.cisco_webex:
+ recipient_type: toPersonId
+ recipient_id: "{{ person_id}}"
+ msg_type: text
+ personal_token: "{{ token }}"
+ msg: "Cisco Webex Teams Ansible Module - Text Message to Individual by ID"
+
+- name: Cisco Webex Teams - Text Message by an Individuals E-Mail Address
+ community.general.cisco_webex:
+ recipient_type: toPersonEmail
+ recipient_id: "{{ person_email }}"
+ msg_type: text
+ personal_token: "{{ token }}"
+ msg: "Cisco Webex Teams Ansible Module - Text Message to Individual by E-Mail"
+
+"""
+
+RETURN = """
+status_code:
+ description:
+ - The Response Code returned by the Webex Teams API.
+ - Full Response Code explanations can be found at U(https://developer.webex.com/docs/api/basics).
+ returned: always
+ type: int
+ sample: 200
+
+message:
+ description:
+ - The Response Message returned by the Webex Teams API.
+ - Full Response Code explanations can be found at U(https://developer.webex.com/docs/api/basics).
+ returned: always
+ type: str
+ sample: OK (585 bytes)
+"""
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def webex_msg(module):
+ """When check mode is specified, establish a read only connection, that does not return any user specific
+ data, to validate connectivity. In regular mode, send a message to a Cisco Webex Teams Room or Individual"""
+
+ # Ansible Specific Variables
+ results = {}
+ ansible = module.params
+
+ headers = {
+ 'Authorization': 'Bearer {0}'.format(ansible['personal_token']),
+ 'content-type': 'application/json'
+ }
+
+ if module.check_mode:
+ url = "https://webexapis.com/v1/people/me"
+ payload = None
+
+ else:
+ url = "https://webexapis.com/v1/messages"
+
+ payload = {
+ ansible['recipient_type']: ansible['recipient_id'],
+ ansible['msg_type']: ansible['msg']
+ }
+
+ payload = module.jsonify(payload)
+
+ response, info = fetch_url(module, url, data=payload, headers=headers)
+
+ status_code = info['status']
+ msg = info['msg']
+
+ # Module will fail if the response is not 200
+ if status_code != 200:
+ results['failed'] = True
+ results['status_code'] = status_code
+ results['message'] = msg
+ else:
+ results['failed'] = False
+ results['status_code'] = status_code
+
+ if module.check_mode:
+ results['message'] = 'Authentication Successful.'
+ else:
+ results['message'] = msg
+
+ return results
+
+
+def main():
+ '''Ansible main. '''
+ module = AnsibleModule(
+ argument_spec=dict(
+ recipient_type=dict(required=True, choices=['roomId', 'toPersonEmail', 'toPersonId']),
+ recipient_id=dict(required=True, no_log=True),
+ msg_type=dict(required=False, default='text', aliases=['message_type'], choices=['text', 'markdown']),
+ personal_token=dict(required=True, no_log=True, aliases=['token']),
+ msg=dict(required=True, aliases=['message'],
+ deprecated_aliases=[dict(name='message', version='3.0.0',
+ collection_name='community.general')]), # was Ansible 2.14
+ ),
+
+ supports_check_mode=True
+ )
+
+ results = webex_msg(module)
+
+ module.exit_json(**results)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/cisco_webex.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/cisco_webex.py
new file mode 100644
index 00000000..4015c185
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/cisco_webex.py
@@ -0,0 +1,191 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: cisco_webex
+short_description: Send a message to a Cisco Webex Teams Room or Individual
+description:
+ - Send a message to a Cisco Webex Teams Room or Individual with options to control the formatting.
+author: Drew Rusell (@drew-russell)
+notes:
+ - The C(recipient_id) type must be valid for the supplied C(recipient_id).
+ - Full API documentation can be found at U(https://developer.webex.com/docs/api/basics).
+
+options:
+
+ recipient_type:
+ description:
+ - The request parameter you would like to send the message to.
+ - Messages can be sent to either a room or individual (by ID or E-Mail).
+ required: yes
+ choices: ['roomId', 'toPersonEmail', 'toPersonId']
+ type: str
+
+ recipient_id:
+ description:
+ - The unique identifier associated with the supplied C(recipient_type).
+ required: yes
+ type: str
+
+ msg_type:
+ description:
+ - Specifies how you would like the message formatted.
+ default: text
+ choices: ['text', 'markdown']
+ type: str
+ aliases: ['message_type']
+
+ personal_token:
+ description:
+ - Your personal access token required to validate the Webex Teams API.
+ required: yes
+ aliases: ['token']
+ type: str
+
+ msg:
+ description:
+ - The message you would like to send.
+ required: yes
+ type: str
+ aliases: ['message']
+'''
+
+EXAMPLES = """
+# Note: The following examples assume a variable file has been imported
+# that contains the appropriate information.
+
+- name: Cisco Webex Teams - Markdown Message to a Room
+ community.general.cisco_webex:
+ recipient_type: roomId
+ recipient_id: "{{ room_id }}"
+ msg_type: markdown
+ personal_token: "{{ token }}"
+ msg: "**Cisco Webex Teams Ansible Module - Room Message in Markdown**"
+
+- name: Cisco Webex Teams - Text Message to a Room
+ community.general.cisco_webex:
+ recipient_type: roomId
+ recipient_id: "{{ room_id }}"
+ msg_type: text
+ personal_token: "{{ token }}"
+ msg: "Cisco Webex Teams Ansible Module - Room Message in Text"
+
+- name: Cisco Webex Teams - Text Message by an Individuals ID
+ community.general.cisco_webex:
+ recipient_type: toPersonId
+ recipient_id: "{{ person_id}}"
+ msg_type: text
+ personal_token: "{{ token }}"
+ msg: "Cisco Webex Teams Ansible Module - Text Message to Individual by ID"
+
+- name: Cisco Webex Teams - Text Message by an Individuals E-Mail Address
+ community.general.cisco_webex:
+ recipient_type: toPersonEmail
+ recipient_id: "{{ person_email }}"
+ msg_type: text
+ personal_token: "{{ token }}"
+ msg: "Cisco Webex Teams Ansible Module - Text Message to Individual by E-Mail"
+
+"""
+
+RETURN = """
+status_code:
+ description:
+ - The Response Code returned by the Webex Teams API.
+ - Full Response Code explanations can be found at U(https://developer.webex.com/docs/api/basics).
+ returned: always
+ type: int
+ sample: 200
+
+message:
+ description:
+ - The Response Message returned by the Webex Teams API.
+ - Full Response Code explanations can be found at U(https://developer.webex.com/docs/api/basics).
+ returned: always
+ type: str
+ sample: OK (585 bytes)
+"""
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def webex_msg(module):
+ """When check mode is specified, establish a read only connection, that does not return any user specific
+ data, to validate connectivity. In regular mode, send a message to a Cisco Webex Teams Room or Individual"""
+
+ # Ansible Specific Variables
+ results = {}
+ ansible = module.params
+
+ headers = {
+ 'Authorization': 'Bearer {0}'.format(ansible['personal_token']),
+ 'content-type': 'application/json'
+ }
+
+ if module.check_mode:
+ url = "https://webexapis.com/v1/people/me"
+ payload = None
+
+ else:
+ url = "https://webexapis.com/v1/messages"
+
+ payload = {
+ ansible['recipient_type']: ansible['recipient_id'],
+ ansible['msg_type']: ansible['msg']
+ }
+
+ payload = module.jsonify(payload)
+
+ response, info = fetch_url(module, url, data=payload, headers=headers)
+
+ status_code = info['status']
+ msg = info['msg']
+
+ # Module will fail if the response is not 200
+ if status_code != 200:
+ results['failed'] = True
+ results['status_code'] = status_code
+ results['message'] = msg
+ else:
+ results['failed'] = False
+ results['status_code'] = status_code
+
+ if module.check_mode:
+ results['message'] = 'Authentication Successful.'
+ else:
+ results['message'] = msg
+
+ return results
+
+
+def main():
+ '''Ansible main. '''
+ module = AnsibleModule(
+ argument_spec=dict(
+ recipient_type=dict(required=True, choices=['roomId', 'toPersonEmail', 'toPersonId']),
+ recipient_id=dict(required=True, no_log=True),
+ msg_type=dict(required=False, default='text', aliases=['message_type'], choices=['text', 'markdown']),
+ personal_token=dict(required=True, no_log=True, aliases=['token']),
+ msg=dict(required=True, aliases=['message'],
+ deprecated_aliases=[dict(name='message', version='3.0.0',
+ collection_name='community.general')]), # was Ansible 2.14
+ ),
+
+ supports_check_mode=True
+ )
+
+ results = webex_msg(module)
+
+ module.exit_json(**results)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/flowdock.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/flowdock.py
new file mode 100644
index 00000000..a1842c5d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/flowdock.py
@@ -0,0 +1,197 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2013 Matt Coddington <coddington@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: flowdock
+author: "Matt Coddington (@mcodd)"
+short_description: Send a message to a flowdock
+description:
+ - Send a message to a flowdock team inbox or chat using the push API (see https://www.flowdock.com/api/team-inbox and https://www.flowdock.com/api/chat)
+options:
+ token:
+ type: str
+ description:
+ - API token.
+ required: true
+ type:
+ type: str
+ description:
+ - Whether to post to 'inbox' or 'chat'
+ required: true
+ choices: [ "inbox", "chat" ]
+ msg:
+ type: str
+ description:
+ - Content of the message
+ required: true
+ tags:
+ type: str
+ description:
+ - tags of the message, separated by commas
+ required: false
+ external_user_name:
+ type: str
+ description:
+ - (chat only - required) Name of the "user" sending the message
+ required: false
+ from_address:
+ type: str
+ description:
+ - (inbox only - required) Email address of the message sender
+ required: false
+ source:
+ type: str
+ description:
+ - (inbox only - required) Human readable identifier of the application that uses the Flowdock API
+ required: false
+ subject:
+ type: str
+ description:
+ - (inbox only - required) Subject line of the message
+ required: false
+ from_name:
+ type: str
+ description:
+ - (inbox only) Name of the message sender
+ required: false
+ reply_to:
+ type: str
+ description:
+ - (inbox only) Email address for replies
+ required: false
+ project:
+ type: str
+ description:
+ - (inbox only) Human readable identifier for more detailed message categorization
+ required: false
+ link:
+ type: str
+ description:
+ - (inbox only) Link associated with the message. This will be used to link the message subject in Team Inbox.
+ required: false
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ type: bool
+
+requirements: [ ]
+'''
+
+EXAMPLES = '''
+- name: Send a message to a flowdock
+ community.general.flowdock:
+ type: inbox
+ token: AAAAAA
+ from_address: user@example.com
+ source: my cool app
+ msg: test from ansible
+ subject: test subject
+
+- name: Send a message to a flowdock
+ community.general.flowdock:
+ type: chat
+ token: AAAAAA
+ external_user_name: testuser
+ msg: test from ansible
+ tags: tag1,tag2,tag3
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(required=True, no_log=True),
+ msg=dict(required=True),
+ type=dict(required=True, choices=["inbox", "chat"]),
+ external_user_name=dict(required=False),
+ from_address=dict(required=False),
+ source=dict(required=False),
+ subject=dict(required=False),
+ from_name=dict(required=False),
+ reply_to=dict(required=False),
+ project=dict(required=False),
+ tags=dict(required=False),
+ link=dict(required=False),
+ validate_certs=dict(default=True, type='bool'),
+ ),
+ supports_check_mode=True
+ )
+
+ type = module.params["type"]
+ token = module.params["token"]
+ if type == 'inbox':
+ url = "https://api.flowdock.com/v1/messages/team_inbox/%s" % (token)
+ else:
+ url = "https://api.flowdock.com/v1/messages/chat/%s" % (token)
+
+ params = {}
+
+ # required params
+ params['content'] = module.params["msg"]
+
+ # required params for the 'chat' type
+ if module.params['external_user_name']:
+ if type == 'inbox':
+ module.fail_json(msg="external_user_name is not valid for the 'inbox' type")
+ else:
+ params['external_user_name'] = module.params["external_user_name"]
+ elif type == 'chat':
+ module.fail_json(msg="external_user_name is required for the 'chat' type")
+
+ # required params for the 'inbox' type
+ for item in ['from_address', 'source', 'subject']:
+ if module.params[item]:
+ if type == 'chat':
+ module.fail_json(msg="%s is not valid for the 'chat' type" % item)
+ else:
+ params[item] = module.params[item]
+ elif type == 'inbox':
+ module.fail_json(msg="%s is required for the 'inbox' type" % item)
+
+ # optional params
+ if module.params["tags"]:
+ params['tags'] = module.params["tags"]
+
+ # optional params for the 'inbox' type
+ for item in ['from_name', 'reply_to', 'project', 'link']:
+ if module.params[item]:
+ if type == 'chat':
+ module.fail_json(msg="%s is not valid for the 'chat' type" % item)
+ else:
+ params[item] = module.params[item]
+
+ # If we're in check mode, just exit pretending like we succeeded
+ if module.check_mode:
+ module.exit_json(changed=False)
+
+ # Send the data to Flowdock
+ data = urlencode(params)
+ response, info = fetch_url(module, url, data=data)
+ if info['status'] != 200:
+ module.fail_json(msg="unable to send msg: %s" % info['msg'])
+
+ module.exit_json(changed=True, msg=module.params["msg"])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/grove.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/grove.py
new file mode 100644
index 00000000..c1816e63
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/grove.py
@@ -0,0 +1,116 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: grove
+short_description: Sends a notification to a grove.io channel
+description:
+ - The C(grove) module sends a message for a service to a Grove.io
+ channel.
+options:
+ channel_token:
+ type: str
+ description:
+ - Token of the channel to post to.
+ required: true
+ service:
+ type: str
+ description:
+ - Name of the service (displayed as the "user" in the message)
+ required: false
+ default: ansible
+ message:
+ type: str
+ description:
+ - Message content
+ required: true
+ url:
+ type: str
+ description:
+ - Service URL for the web client
+ required: false
+ icon_url:
+ type: str
+ description:
+ - Icon for the service
+ required: false
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ default: 'yes'
+ type: bool
+author: "Jonas Pfenniger (@zimbatm)"
+'''
+
+EXAMPLES = '''
+- name: Sends a notification to a grove.io channel
+ community.general.grove:
+ channel_token: 6Ph62VBBJOccmtTPZbubiPzdrhipZXtg
+ service: my-app
+ message: 'deployed {{ target }}'
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url
+
+
+BASE_URL = 'https://grove.io/api/notice/%s/'
+
+# ==============================================================
+# do_notify_grove
+
+
+def do_notify_grove(module, channel_token, service, message, url=None, icon_url=None):
+ my_url = BASE_URL % (channel_token,)
+
+ my_data = dict(service=service, message=message)
+ if url is not None:
+ my_data['url'] = url
+ if icon_url is not None:
+ my_data['icon_url'] = icon_url
+
+ data = urlencode(my_data)
+ response, info = fetch_url(module, my_url, data=data)
+ if info['status'] != 200:
+ module.fail_json(msg="failed to send notification: %s" % info['msg'])
+
+# ==============================================================
+# main
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ channel_token=dict(type='str', required=True, no_log=True),
+ message=dict(type='str', required=True),
+ service=dict(type='str', default='ansible'),
+ url=dict(type='str', default=None),
+ icon_url=dict(type='str', default=None),
+ validate_certs=dict(default=True, type='bool'),
+ )
+ )
+
+ channel_token = module.params['channel_token']
+ service = module.params['service']
+ message = module.params['message']
+ url = module.params['url']
+ icon_url = module.params['icon_url']
+
+ do_notify_grove(module, channel_token, service, message, url, icon_url)
+
+ # Mission complete
+ module.exit_json(msg="OK")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/hipchat.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/hipchat.py
new file mode 100644
index 00000000..06c9fca4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/hipchat.py
@@ -0,0 +1,212 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: hipchat
+short_description: Send a message to Hipchat.
+description:
+ - Send a message to a Hipchat room, with options to control the formatting.
+options:
+ token:
+ type: str
+ description:
+ - API token.
+ required: true
+ room:
+ type: str
+ description:
+ - ID or name of the room.
+ required: true
+ msg_from:
+ type: str
+ description:
+ - Name the message will appear to be sent from. Max length is 15
+ characters - above this it will be truncated.
+ default: Ansible
+ aliases: [from]
+ msg:
+ type: str
+ description:
+ - The message body.
+ required: true
+ color:
+ type: str
+ description:
+ - Background color for the message.
+ default: yellow
+ choices: [ "yellow", "red", "green", "purple", "gray", "random" ]
+ msg_format:
+ type: str
+ description:
+ - Message format.
+ default: text
+ choices: [ "text", "html" ]
+ notify:
+ description:
+ - If true, a notification will be triggered for users in the room.
+ type: bool
+ default: 'yes'
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+ api:
+ type: str
+ description:
+ - API url if using a self-hosted hipchat server. For Hipchat API version
+ 2 use the default URI with C(/v2) instead of C(/v1).
+ default: 'https://api.hipchat.com/v1'
+
+author:
+- Shirou Wakayama (@shirou)
+- Paul Bourdel (@pb8226)
+'''
+
+EXAMPLES = '''
+- name: Send a message to a Hipchat room
+ community.general.hipchat:
+ room: notif
+ msg: Ansible task finished
+
+- name: Send a message to a Hipchat room using Hipchat API version 2
+ community.general.hipchat:
+ api: https://api.hipchat.com/v2/
+ token: OAUTH2_TOKEN
+ room: notify
+ msg: Ansible task finished
+'''
+
+# ===========================================
+# HipChat module specific support methods.
+#
+
+import json
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.six.moves.urllib.request import pathname2url
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import fetch_url
+
+
+DEFAULT_URI = "https://api.hipchat.com/v1"
+
+MSG_URI_V1 = "/rooms/message"
+
+NOTIFY_URI_V2 = "/room/{id_or_name}/notification"
+
+
+def send_msg_v1(module, token, room, msg_from, msg, msg_format='text',
+ color='yellow', notify=False, api=MSG_URI_V1):
+ '''sending message to hipchat v1 server'''
+
+ params = {}
+ params['room_id'] = room
+ params['from'] = msg_from[:15] # max length is 15
+ params['message'] = msg
+ params['message_format'] = msg_format
+ params['color'] = color
+ params['api'] = api
+ params['notify'] = int(notify)
+
+ url = api + MSG_URI_V1 + "?auth_token=%s" % (token)
+ data = urlencode(params)
+
+ if module.check_mode:
+ # In check mode, exit before actually sending the message
+ module.exit_json(changed=False)
+
+ response, info = fetch_url(module, url, data=data)
+ if info['status'] == 200:
+ return response.read()
+ else:
+ module.fail_json(msg="failed to send message, return status=%s" % str(info['status']))
+
+
+def send_msg_v2(module, token, room, msg_from, msg, msg_format='text',
+ color='yellow', notify=False, api=NOTIFY_URI_V2):
+ '''sending message to hipchat v2 server'''
+
+ headers = {'Authorization': 'Bearer %s' % token, 'Content-Type': 'application/json'}
+
+ body = dict()
+ body['message'] = msg
+ body['color'] = color
+ body['message_format'] = msg_format
+ body['notify'] = notify
+
+ POST_URL = api + NOTIFY_URI_V2
+
+ url = POST_URL.replace('{id_or_name}', pathname2url(room))
+ data = json.dumps(body)
+
+ if module.check_mode:
+ # In check mode, exit before actually sending the message
+ module.exit_json(changed=False)
+
+ response, info = fetch_url(module, url, data=data, headers=headers, method='POST')
+
+ # https://www.hipchat.com/docs/apiv2/method/send_room_notification shows
+ # 204 to be the expected result code.
+ if info['status'] in [200, 204]:
+ return response.read()
+ else:
+ module.fail_json(msg="failed to send message, return status=%s" % str(info['status']))
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(required=True, no_log=True),
+ room=dict(required=True),
+ msg=dict(required=True),
+ msg_from=dict(default="Ansible", aliases=['from']),
+ color=dict(default="yellow", choices=["yellow", "red", "green",
+ "purple", "gray", "random"]),
+ msg_format=dict(default="text", choices=["text", "html"]),
+ notify=dict(default=True, type='bool'),
+ validate_certs=dict(default=True, type='bool'),
+ api=dict(default=DEFAULT_URI),
+ ),
+ supports_check_mode=True
+ )
+
+ token = module.params["token"]
+ room = str(module.params["room"])
+ msg = module.params["msg"]
+ msg_from = module.params["msg_from"]
+ color = module.params["color"]
+ msg_format = module.params["msg_format"]
+ notify = module.params["notify"]
+ api = module.params["api"]
+
+ try:
+ if api.find('/v2') != -1:
+ send_msg_v2(module, token, room, msg_from, msg, msg_format, color, notify, api)
+ else:
+ send_msg_v1(module, token, room, msg_from, msg, msg_format, color, notify, api)
+ except Exception as e:
+ module.fail_json(msg="unable to send msg: %s" % to_native(e), exception=traceback.format_exc())
+
+ changed = True
+ module.exit_json(changed=changed, room=room, msg_from=msg_from, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/irc.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/irc.py
new file mode 100644
index 00000000..1c050fc1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/irc.py
@@ -0,0 +1,303 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Jan-Piet Mens <jpmens () gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: irc
+short_description: Send a message to an IRC channel or a nick
+description:
+ - Send a message to an IRC channel or a nick. This is a very simplistic implementation.
+options:
+ server:
+ type: str
+ description:
+ - IRC server name/address
+ default: localhost
+ port:
+ type: int
+ description:
+ - IRC server port number
+ default: 6667
+ nick:
+ type: str
+ description:
+ - Nickname to send the message from. May be shortened, depending on server's NICKLEN setting.
+ default: ansible
+ msg:
+ type: str
+ description:
+ - The message body.
+ required: true
+ topic:
+ type: str
+ description:
+ - Set the channel topic
+ color:
+ type: str
+ description:
+ - Text color for the message. ("none" is a valid option in 1.6 or later, in 1.6 and prior, the default color is black, not "none").
+ Added 11 more colors in version 2.0.
+ default: "none"
+ choices: [ "none", "white", "black", "blue", "green", "red", "brown", "purple", "orange", "yellow", "light_green", "teal", "light_cyan",
+ "light_blue", "pink", "gray", "light_gray"]
+ aliases: [colour]
+ channel:
+ type: str
+ description:
+ - Channel name. One of nick_to or channel needs to be set. When both are set, the message will be sent to both of them.
+ nick_to:
+ type: list
+ elements: str
+ description:
+ - A list of nicknames to send the message to. One of nick_to or channel needs to be set. When both are defined, the message will be sent to both of them.
+ key:
+ type: str
+ description:
+ - Channel key
+ passwd:
+ type: str
+ description:
+ - Server password
+ timeout:
+ type: int
+ description:
+ - Timeout to use while waiting for successful registration and join
+ messages, this is to prevent an endless loop
+ default: 30
+ use_ssl:
+ description:
+ - Designates whether TLS/SSL should be used when connecting to the IRC server
+ type: bool
+ default: 'no'
+ part:
+ description:
+ - Designates whether user should part from channel after sending message or not.
+ Useful for when using a faux bot and not wanting join/parts between messages.
+ type: bool
+ default: 'yes'
+ style:
+ type: str
+ description:
+ - Text style for the message. Note italic does not work on some clients
+ choices: [ "bold", "underline", "reverse", "italic", "none" ]
+ default: none
+
+# informational: requirements for nodes
+requirements: [ socket ]
+author:
+ - "Jan-Piet Mens (@jpmens)"
+ - "Matt Martz (@sivel)"
+'''
+
+EXAMPLES = '''
+- name: Send a message to an IRC channel from nick ansible
+ community.general.irc:
+ server: irc.example.net
+ channel: #t1
+ msg: Hello world
+
+- name: Send a message to an IRC channel
+ local_action:
+ module: irc
+ port: 6669
+ server: irc.example.net
+ channel: #t1
+ msg: 'All finished at {{ ansible_date_time.iso8601 }}'
+ color: red
+ nick: ansibleIRC
+
+- name: Send a message to an IRC channel
+ local_action:
+ module: irc
+ port: 6669
+ server: irc.example.net
+ channel: #t1
+ nick_to:
+ - nick1
+ - nick2
+ msg: 'All finished at {{ ansible_date_time.iso8601 }}'
+ color: red
+ nick: ansibleIRC
+'''
+
+# ===========================================
+# IRC module support methods.
+#
+
+import re
+import socket
+import ssl
+import time
+import traceback
+
+from ansible.module_utils._text import to_native, to_bytes
+from ansible.module_utils.basic import AnsibleModule
+
+
+def send_msg(msg, server='localhost', port='6667', channel=None, nick_to=None, key=None, topic=None,
+ nick="ansible", color='none', passwd=False, timeout=30, use_ssl=False, part=True, style=None):
+ '''send message to IRC'''
+ nick_to = [] if nick_to is None else nick_to
+
+ colornumbers = {
+ 'white': "00",
+ 'black': "01",
+ 'blue': "02",
+ 'green': "03",
+ 'red': "04",
+ 'brown': "05",
+ 'purple': "06",
+ 'orange': "07",
+ 'yellow': "08",
+ 'light_green': "09",
+ 'teal': "10",
+ 'light_cyan': "11",
+ 'light_blue': "12",
+ 'pink': "13",
+ 'gray': "14",
+ 'light_gray': "15",
+ }
+
+ stylechoices = {
+ 'bold': "\x02",
+ 'underline': "\x1F",
+ 'reverse': "\x16",
+ 'italic': "\x1D",
+ }
+
+ try:
+ styletext = stylechoices[style]
+ except Exception:
+ styletext = ""
+
+ try:
+ colornumber = colornumbers[color]
+ colortext = "\x03" + colornumber
+ except Exception:
+ colortext = ""
+
+ message = styletext + colortext + msg
+
+ irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ if use_ssl:
+ irc = ssl.wrap_socket(irc)
+ irc.connect((server, int(port)))
+
+ if passwd:
+ irc.send(to_bytes('PASS %s\r\n' % passwd))
+ irc.send(to_bytes('NICK %s\r\n' % nick))
+ irc.send(to_bytes('USER %s %s %s :ansible IRC\r\n' % (nick, nick, nick)))
+ motd = ''
+ start = time.time()
+ while 1:
+ motd += to_native(irc.recv(1024))
+ # The server might send back a shorter nick than we specified (due to NICKLEN),
+ # so grab that and use it from now on (assuming we find the 00[1-4] response).
+ match = re.search(r'^:\S+ 00[1-4] (?P<nick>\S+) :', motd, flags=re.M)
+ if match:
+ nick = match.group('nick')
+ break
+ elif time.time() - start > timeout:
+ raise Exception('Timeout waiting for IRC server welcome response')
+ time.sleep(0.5)
+
+ if channel:
+ if key:
+ irc.send(to_bytes('JOIN %s %s\r\n' % (channel, key)))
+ else:
+ irc.send(to_bytes('JOIN %s\r\n' % channel))
+
+ join = ''
+ start = time.time()
+ while 1:
+ join += to_native(irc.recv(1024))
+ if re.search(r'^:\S+ 366 %s %s :' % (nick, channel), join, flags=re.M | re.I):
+ break
+ elif time.time() - start > timeout:
+ raise Exception('Timeout waiting for IRC JOIN response')
+ time.sleep(0.5)
+
+ if topic is not None:
+ irc.send(to_bytes('TOPIC %s :%s\r\n' % (channel, topic)))
+ time.sleep(1)
+
+ if nick_to:
+ for nick in nick_to:
+ irc.send(to_bytes('PRIVMSG %s :%s\r\n' % (nick, message)))
+ if channel:
+ irc.send(to_bytes('PRIVMSG %s :%s\r\n' % (channel, message)))
+ time.sleep(1)
+ if part:
+ if channel:
+ irc.send(to_bytes('PART %s\r\n' % channel))
+ irc.send(to_bytes('QUIT\r\n'))
+ time.sleep(1)
+ irc.close()
+
+# ===========================================
+# Main
+#
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ server=dict(default='localhost'),
+ port=dict(type='int', default=6667),
+ nick=dict(default='ansible'),
+ nick_to=dict(required=False, type='list', elements='str'),
+ msg=dict(required=True),
+ color=dict(default="none", aliases=['colour'], choices=["white", "black", "blue",
+ "green", "red", "brown",
+ "purple", "orange", "yellow",
+ "light_green", "teal", "light_cyan",
+ "light_blue", "pink", "gray",
+ "light_gray", "none"]),
+ style=dict(default="none", choices=["underline", "reverse", "bold", "italic", "none"]),
+ channel=dict(required=False),
+ key=dict(no_log=True),
+ topic=dict(),
+ passwd=dict(no_log=True),
+ timeout=dict(type='int', default=30),
+ part=dict(type='bool', default=True),
+ use_ssl=dict(type='bool', default=False)
+ ),
+ supports_check_mode=True,
+ required_one_of=[['channel', 'nick_to']]
+ )
+
+ server = module.params["server"]
+ port = module.params["port"]
+ nick = module.params["nick"]
+ nick_to = module.params["nick_to"]
+ msg = module.params["msg"]
+ color = module.params["color"]
+ channel = module.params["channel"]
+ topic = module.params["topic"]
+ if topic and not channel:
+ module.fail_json(msg="When topic is specified, a channel is required.")
+ key = module.params["key"]
+ passwd = module.params["passwd"]
+ timeout = module.params["timeout"]
+ use_ssl = module.params["use_ssl"]
+ part = module.params["part"]
+ style = module.params["style"]
+
+ try:
+ send_msg(msg, server, port, channel, nick_to, key, topic, nick, color, passwd, timeout, use_ssl, part, style)
+ except Exception as e:
+ module.fail_json(msg="unable to send to IRC: %s" % to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=False, channel=channel, nick=nick,
+ msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/jabber.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/jabber.py
new file mode 100644
index 00000000..68e2c593
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/jabber.py
@@ -0,0 +1,166 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, Brian Coca <bcoca@ansible.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: jabber
+short_description: Send a message to jabber user or chat room
+description:
+ - Send a message to jabber
+options:
+ user:
+ type: str
+ description:
+ - User as which to connect
+ required: true
+ password:
+ type: str
+ description:
+ - password for user to connect
+ required: true
+ to:
+ type: str
+ description:
+ - user ID or name of the room, when using room use a slash to indicate your nick.
+ required: true
+ msg:
+ type: str
+ description:
+ - The message body.
+ required: true
+ host:
+ type: str
+ description:
+ - host to connect, overrides user info
+ port:
+ type: int
+ description:
+ - port to connect to, overrides default
+ default: 5222
+ encoding:
+ type: str
+ description:
+ - message encoding
+
+# informational: requirements for nodes
+requirements:
+ - python xmpp (xmpppy)
+author: "Brian Coca (@bcoca)"
+'''
+
+EXAMPLES = '''
+- name: Send a message to a user
+ community.general.jabber:
+ user: mybot@example.net
+ password: secret
+ to: friend@example.net
+ msg: Ansible task finished
+
+- name: Send a message to a room
+ community.general.jabber:
+ user: mybot@example.net
+ password: secret
+ to: mychaps@conference.example.net/ansiblebot
+ msg: Ansible task finished
+
+- name: Send a message, specifying the host and port
+ community.general.jabber:
+ user: mybot@example.net
+ host: talk.example.net
+ port: 5223
+ password: secret
+ to: mychaps@example.net
+ msg: Ansible task finished
+'''
+
+import time
+import traceback
+
+HAS_XMPP = True
+XMPP_IMP_ERR = None
+try:
+ import xmpp
+except ImportError:
+ XMPP_IMP_ERR = traceback.format_exc()
+ HAS_XMPP = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ user=dict(required=True),
+ password=dict(required=True, no_log=True),
+ to=dict(required=True),
+ msg=dict(required=True),
+ host=dict(required=False),
+ port=dict(required=False, default=5222, type='int'),
+ encoding=dict(required=False),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_XMPP:
+ module.fail_json(msg=missing_required_lib('xmpppy'), exception=XMPP_IMP_ERR)
+
+ jid = xmpp.JID(module.params['user'])
+ user = jid.getNode()
+ server = jid.getDomain()
+ port = module.params['port']
+ password = module.params['password']
+ try:
+ to, nick = module.params['to'].split('/', 1)
+ except ValueError:
+ to, nick = module.params['to'], None
+
+ if module.params['host']:
+ host = module.params['host']
+ else:
+ host = server
+ if module.params['encoding']:
+ xmpp.simplexml.ENCODING = module.params['encoding']
+
+ msg = xmpp.protocol.Message(body=module.params['msg'])
+
+ try:
+ conn = xmpp.Client(server, debug=[])
+ if not conn.connect(server=(host, port)):
+ module.fail_json(rc=1, msg='Failed to connect to server: %s' % (server))
+ if not conn.auth(user, password, 'Ansible'):
+ module.fail_json(rc=1, msg='Failed to authorize %s on: %s' % (user, server))
+ # some old servers require this, also the sleep following send
+ conn.sendInitPresence(requestRoster=0)
+
+ if nick: # sending to room instead of user, need to join
+ msg.setType('groupchat')
+ msg.setTag('x', namespace='http://jabber.org/protocol/muc#user')
+ join = xmpp.Presence(to=module.params['to'])
+ join.setTag('x', namespace='http://jabber.org/protocol/muc')
+ conn.send(join)
+ time.sleep(1)
+ else:
+ msg.setType('chat')
+
+ msg.setTo(to)
+ if not module.check_mode:
+ conn.send(msg)
+ time.sleep(1)
+ conn.disconnect()
+ except Exception as e:
+ module.fail_json(msg="unable to send msg: %s" % to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=False, to=to, user=user, msg=msg.getBody())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/logentries_msg.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/logentries_msg.py
new file mode 100644
index 00000000..59e0f325
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/logentries_msg.py
@@ -0,0 +1,99 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: logentries_msg
+short_description: Send a message to logentries.
+description:
+ - Send a message to logentries
+requirements:
+ - "python >= 2.6"
+options:
+ token:
+ type: str
+ description:
+ - Log token.
+ required: true
+ msg:
+ type: str
+ description:
+ - The message body.
+ required: true
+ api:
+ type: str
+ description:
+ - API endpoint
+ default: data.logentries.com
+ port:
+ type: int
+ description:
+ - API endpoint port
+ default: 80
+author: "Jimmy Tang (@jcftang) <jimmy_tang@rapid7.com>"
+'''
+
+RETURN = '''# '''
+
+EXAMPLES = '''
+- name: Send a message to logentries
+ community.general.logentries_msg:
+ token=00000000-0000-0000-0000-000000000000
+ msg="{{ ansible_hostname }}"
+'''
+
+import socket
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def send_msg(module, token, msg, api, port):
+
+ message = "{0} {1}\n".format(token, msg)
+
+ api_ip = socket.gethostbyname(api)
+
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ s.connect((api_ip, port))
+ try:
+ if not module.check_mode:
+ s.send(message)
+ except Exception as e:
+ module.fail_json(msg="failed to send message, msg=%s" % e)
+ s.close()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(type='str', required=True, no_log=True),
+ msg=dict(type='str', required=True),
+ api=dict(type='str', default="data.logentries.com"),
+ port=dict(type='int', default=80)),
+ supports_check_mode=True
+ )
+
+ token = module.params["token"]
+ msg = module.params["msg"]
+ api = module.params["api"]
+ port = module.params["port"]
+
+ changed = False
+ try:
+ send_msg(module, token, msg, api, port)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg="unable to send msg: %s" % e)
+
+ module.exit_json(changed=changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/mail.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/mail.py
new file mode 100644
index 00000000..574f8478
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/mail.py
@@ -0,0 +1,386 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Dag Wieers (@dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+author:
+- Dag Wieers (@dagwieers)
+module: mail
+short_description: Send an email
+description:
+- This module is useful for sending emails from playbooks.
+- One may wonder why automate sending emails? In complex environments
+ there are from time to time processes that cannot be automated, either
+ because you lack the authority to make it so, or because not everyone
+ agrees to a common approach.
+- If you cannot automate a specific step, but the step is non-blocking,
+ sending out an email to the responsible party to make them perform their
+ part of the bargain is an elegant way to put the responsibility in
+ someone else's lap.
+- Of course sending out a mail can be equally useful as a way to notify
+ one or more people in a team that a specific action has been
+ (successfully) taken.
+options:
+ sender:
+ description:
+ - The email-address the mail is sent from. May contain address and phrase.
+ type: str
+ default: root
+ aliases: [ from ]
+ to:
+ description:
+ - The email-address(es) the mail is being sent to.
+ - This is a list, which may contain address and phrase portions.
+ type: list
+ default: root
+ aliases: [ recipients ]
+ cc:
+ description:
+ - The email-address(es) the mail is being copied to.
+ - This is a list, which may contain address and phrase portions.
+ type: list
+ bcc:
+ description:
+ - The email-address(es) the mail is being 'blind' copied to.
+ - This is a list, which may contain address and phrase portions.
+ type: list
+ subject:
+ description:
+ - The subject of the email being sent.
+ required: yes
+ type: str
+ aliases: [ msg ]
+ body:
+ description:
+ - The body of the email being sent.
+ type: str
+ username:
+ description:
+ - If SMTP requires username.
+ type: str
+ password:
+ description:
+ - If SMTP requires password.
+ type: str
+ host:
+ description:
+ - The mail server.
+ type: str
+ default: localhost
+ port:
+ description:
+ - The mail server port.
+ - This must be a valid integer between 1 and 65534
+ type: int
+ default: 25
+ attach:
+ description:
+ - A list of pathnames of files to attach to the message.
+ - Attached files will have their content-type set to C(application/octet-stream).
+ type: list
+ default: []
+ headers:
+ description:
+ - A list of headers which should be added to the message.
+ - Each individual header is specified as C(header=value) (see example below).
+ type: list
+ default: []
+ charset:
+ description:
+ - The character set of email being sent.
+ type: str
+ default: utf-8
+ subtype:
+ description:
+ - The minor mime type, can be either C(plain) or C(html).
+ - The major type is always C(text).
+ type: str
+ choices: [ html, plain ]
+ default: plain
+ secure:
+ description:
+ - If C(always), the connection will only send email if the connection is Encrypted.
+ If the server doesn't accept the encrypted connection it will fail.
+ - If C(try), the connection will attempt to setup a secure SSL/TLS session, before trying to send.
+ - If C(never), the connection will not attempt to setup a secure SSL/TLS session, before sending
+ - If C(starttls), the connection will try to upgrade to a secure SSL/TLS connection, before sending.
+ If it is unable to do so it will fail.
+ type: str
+ choices: [ always, never, starttls, try ]
+ default: try
+ timeout:
+ description:
+ - Sets the timeout in seconds for connection attempts.
+ type: int
+ default: 20
+'''
+
+EXAMPLES = r'''
+- name: Example playbook sending mail to root
+ community.general.mail:
+ subject: System {{ ansible_hostname }} has been successfully provisioned.
+ delegate_to: localhost
+
+- name: Sending an e-mail using Gmail SMTP servers
+ community.general.mail:
+ host: smtp.gmail.com
+ port: 587
+ username: username@gmail.com
+ password: mysecret
+ to: John Smith <john.smith@example.com>
+ subject: Ansible-report
+ body: System {{ ansible_hostname }} has been successfully provisioned.
+ delegate_to: localhost
+
+- name: Send e-mail to a bunch of users, attaching files
+ community.general.mail:
+ host: 127.0.0.1
+ port: 2025
+ subject: Ansible-report
+ body: Hello, this is an e-mail. I hope you like it ;-)
+ from: jane@example.net (Jane Jolie)
+ to:
+ - John Doe <j.d@example.org>
+ - Suzie Something <sue@example.com>
+ cc: Charlie Root <root@localhost>
+ attach:
+ - /etc/group
+ - /tmp/avatar2.png
+ headers:
+ - Reply-To=john@example.com
+ - X-Special="Something or other"
+ charset: us-ascii
+ delegate_to: localhost
+
+- name: Sending an e-mail using the remote machine, not the Ansible controller node
+ community.general.mail:
+ host: localhost
+ port: 25
+ to: John Smith <john.smith@example.com>
+ subject: Ansible-report
+ body: System {{ ansible_hostname }} has been successfully provisioned.
+
+- name: Sending an e-mail using Legacy SSL to the remote machine
+ community.general.mail:
+ host: localhost
+ port: 25
+ to: John Smith <john.smith@example.com>
+ subject: Ansible-report
+ body: System {{ ansible_hostname }} has been successfully provisioned.
+ secure: always
+
+- name: Sending an e-mail using StartTLS to the remote machine
+ community.general.mail:
+ host: localhost
+ port: 25
+ to: John Smith <john.smith@example.com>
+ subject: Ansible-report
+ body: System {{ ansible_hostname }} has been successfully provisioned.
+ secure: starttls
+'''
+
+import os
+import smtplib
+import ssl
+import traceback
+from email import encoders
+from email.utils import parseaddr, formataddr, formatdate
+from email.mime.base import MIMEBase
+from email.mime.multipart import MIMEMultipart
+from email.mime.text import MIMEText
+from email.header import Header
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import PY3
+from ansible.module_utils._text import to_native
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ username=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ host=dict(type='str', default='localhost'),
+ port=dict(type='int', default=25),
+ sender=dict(type='str', default='root', aliases=['from']),
+ to=dict(type='list', default=['root'], aliases=['recipients']),
+ cc=dict(type='list', default=[]),
+ bcc=dict(type='list', default=[]),
+ subject=dict(type='str', required=True, aliases=['msg']),
+ body=dict(type='str'),
+ attach=dict(type='list', default=[]),
+ headers=dict(type='list', default=[]),
+ charset=dict(type='str', default='utf-8'),
+ subtype=dict(type='str', default='plain', choices=['html', 'plain']),
+ secure=dict(type='str', default='try', choices=['always', 'never', 'starttls', 'try']),
+ timeout=dict(type='int', default=20),
+ ),
+ required_together=[['password', 'username']],
+ )
+
+ username = module.params.get('username')
+ password = module.params.get('password')
+ host = module.params.get('host')
+ port = module.params.get('port')
+ sender = module.params.get('sender')
+ recipients = module.params.get('to')
+ copies = module.params.get('cc')
+ blindcopies = module.params.get('bcc')
+ subject = module.params.get('subject')
+ body = module.params.get('body')
+ attach_files = module.params.get('attach')
+ headers = module.params.get('headers')
+ charset = module.params.get('charset')
+ subtype = module.params.get('subtype')
+ secure = module.params.get('secure')
+ timeout = module.params.get('timeout')
+
+ code = 0
+ secure_state = False
+ sender_phrase, sender_addr = parseaddr(sender)
+
+ if not body:
+ body = subject
+
+ try:
+ if secure != 'never':
+ try:
+ if PY3:
+ smtp = smtplib.SMTP_SSL(host=host, port=port, timeout=timeout)
+ else:
+ smtp = smtplib.SMTP_SSL(timeout=timeout)
+ code, smtpmessage = smtp.connect(host, port)
+ secure_state = True
+ except ssl.SSLError as e:
+ if secure == 'always':
+ module.fail_json(rc=1, msg='Unable to start an encrypted session to %s:%s: %s' %
+ (host, port, to_native(e)), exception=traceback.format_exc())
+ except Exception:
+ pass
+
+ if not secure_state:
+ if PY3:
+ smtp = smtplib.SMTP(host=host, port=port, timeout=timeout)
+ else:
+ smtp = smtplib.SMTP(timeout=timeout)
+ code, smtpmessage = smtp.connect(host, port)
+
+ except smtplib.SMTPException as e:
+ module.fail_json(rc=1, msg='Unable to Connect %s:%s: %s' % (host, port, to_native(e)), exception=traceback.format_exc())
+
+ try:
+ smtp.ehlo()
+ except smtplib.SMTPException as e:
+ module.fail_json(rc=1, msg='Helo failed for host %s:%s: %s' % (host, port, to_native(e)), exception=traceback.format_exc())
+
+ if int(code) > 0:
+ if not secure_state and secure in ('starttls', 'try'):
+ if smtp.has_extn('STARTTLS'):
+ try:
+ smtp.starttls()
+ secure_state = True
+ except smtplib.SMTPException as e:
+ module.fail_json(rc=1, msg='Unable to start an encrypted session to %s:%s: %s' %
+ (host, port, to_native(e)), exception=traceback.format_exc())
+ try:
+ smtp.ehlo()
+ except smtplib.SMTPException as e:
+ module.fail_json(rc=1, msg='Helo failed for host %s:%s: %s' % (host, port, to_native(e)), exception=traceback.format_exc())
+ else:
+ if secure == 'starttls':
+ module.fail_json(rc=1, msg='StartTLS is not offered on server %s:%s' % (host, port))
+
+ if username and password:
+ if smtp.has_extn('AUTH'):
+ try:
+ smtp.login(username, password)
+ except smtplib.SMTPAuthenticationError:
+ module.fail_json(rc=1, msg='Authentication to %s:%s failed, please check your username and/or password' % (host, port))
+ except smtplib.SMTPException:
+ module.fail_json(rc=1, msg='No Suitable authentication method was found on %s:%s' % (host, port))
+ else:
+ module.fail_json(rc=1, msg="No Authentication on the server at %s:%s" % (host, port))
+
+ if not secure_state and (username and password):
+ module.warn('Username and Password was sent without encryption')
+
+ msg = MIMEMultipart(_charset=charset)
+ msg['From'] = formataddr((sender_phrase, sender_addr))
+ msg['Date'] = formatdate(localtime=True)
+ msg['Subject'] = Header(subject, charset)
+ msg.preamble = "Multipart message"
+
+ for header in headers:
+ # NOTE: Backward compatible with old syntax using '|' as delimiter
+ for hdr in [x.strip() for x in header.split('|')]:
+ try:
+ h_key, h_val = hdr.split('=')
+ h_val = to_native(Header(h_val, charset))
+ msg.add_header(h_key, h_val)
+ except Exception:
+ module.warn("Skipping header '%s', unable to parse" % hdr)
+
+ if 'X-Mailer' not in msg:
+ msg.add_header('X-Mailer', 'Ansible mail module')
+
+ addr_list = []
+ for addr in [x.strip() for x in blindcopies]:
+ addr_list.append(parseaddr(addr)[1]) # address only, w/o phrase
+
+ to_list = []
+ for addr in [x.strip() for x in recipients]:
+ to_list.append(formataddr(parseaddr(addr)))
+ addr_list.append(parseaddr(addr)[1]) # address only, w/o phrase
+ msg['To'] = ", ".join(to_list)
+
+ cc_list = []
+ for addr in [x.strip() for x in copies]:
+ cc_list.append(formataddr(parseaddr(addr)))
+ addr_list.append(parseaddr(addr)[1]) # address only, w/o phrase
+ msg['Cc'] = ", ".join(cc_list)
+
+ part = MIMEText(body + "\n\n", _subtype=subtype, _charset=charset)
+ msg.attach(part)
+
+ # NOTE: Backware compatibility with old syntax using space as delimiter is not retained
+ # This breaks files with spaces in it :-(
+ for filename in attach_files:
+ try:
+ part = MIMEBase('application', 'octet-stream')
+ with open(filename, 'rb') as fp:
+ part.set_payload(fp.read())
+ encoders.encode_base64(part)
+ part.add_header('Content-disposition', 'attachment', filename=os.path.basename(filename))
+ msg.attach(part)
+ except Exception as e:
+ module.fail_json(rc=1, msg="Failed to send community.general.mail: can't attach file %s: %s" %
+ (filename, to_native(e)), exception=traceback.format_exc())
+
+ composed = msg.as_string()
+
+ try:
+ result = smtp.sendmail(sender_addr, set(addr_list), composed)
+ except Exception as e:
+ module.fail_json(rc=1, msg="Failed to send mail to '%s': %s" %
+ (", ".join(set(addr_list)), to_native(e)), exception=traceback.format_exc())
+
+ smtp.quit()
+
+ if result:
+ for key in result:
+ module.warn("Failed to send mail to '%s': %s %s" % (key, result[key][0], result[key][1]))
+ module.exit_json(msg='Failed to send mail to at least one recipient', result=result)
+
+ module.exit_json(msg='Mail sent successfully', result=result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/matrix.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/matrix.py
new file mode 100644
index 00000000..d94ed2b8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/matrix.py
@@ -0,0 +1,139 @@
+#!/usr/bin/python
+# coding: utf-8
+
+# (c) 2018, Jan Christian Grünhage <jan.christian@gruenhage.xyz>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+author: "Jan Christian Grünhage (@jcgruenhage)"
+module: matrix
+short_description: Send notifications to matrix
+description:
+ - This module sends html formatted notifications to matrix rooms.
+options:
+ msg_plain:
+ type: str
+ description:
+ - Plain text form of the message to send to matrix, usually markdown
+ required: true
+ msg_html:
+ type: str
+ description:
+ - HTML form of the message to send to matrix
+ required: true
+ room_id:
+ type: str
+ description:
+ - ID of the room to send the notification to
+ required: true
+ hs_url:
+ type: str
+ description:
+ - URL of the homeserver, where the CS-API is reachable
+ required: true
+ token:
+ type: str
+ description:
+ - Authentication token for the API call. If provided, user_id and password are not required
+ user_id:
+ type: str
+ description:
+ - The user id of the user
+ password:
+ type: str
+ description:
+ - The password to log in with
+requirements:
+ - matrix-client (Python library)
+'''
+
+EXAMPLES = '''
+- name: Send matrix notification with token
+ community.general.matrix:
+ msg_plain: "**hello world**"
+ msg_html: "<b>hello world</b>"
+ room_id: "!12345678:server.tld"
+ hs_url: "https://matrix.org"
+ token: "{{ matrix_auth_token }}"
+
+- name: Send matrix notification with user_id and password
+ community.general.matrix:
+ msg_plain: "**hello world**"
+ msg_html: "<b>hello world</b>"
+ room_id: "!12345678:server.tld"
+ hs_url: "https://matrix.org"
+ user_id: "ansible_notification_bot"
+ password: "{{ matrix_auth_password }}"
+'''
+
+RETURN = '''
+'''
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+MATRIX_IMP_ERR = None
+try:
+ from matrix_client.client import MatrixClient
+except ImportError:
+ MATRIX_IMP_ERR = traceback.format_exc()
+ matrix_found = False
+else:
+ matrix_found = True
+
+
+def run_module():
+ module_args = dict(
+ msg_plain=dict(type='str', required=True),
+ msg_html=dict(type='str', required=True),
+ room_id=dict(type='str', required=True),
+ hs_url=dict(type='str', required=True),
+ token=dict(type='str', required=False, no_log=True),
+ user_id=dict(type='str', required=False),
+ password=dict(type='str', required=False, no_log=True),
+ )
+
+ result = dict(
+ changed=False,
+ message=''
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ mutually_exclusive=[['password', 'token']],
+ required_one_of=[['password', 'token']],
+ required_together=[['user_id', 'password']],
+ supports_check_mode=True
+ )
+
+ if not matrix_found:
+ module.fail_json(msg=missing_required_lib('matrix-client'), exception=MATRIX_IMP_ERR)
+
+ if module.check_mode:
+ return result
+
+ # create a client object
+ client = MatrixClient(module.params['hs_url'])
+ if module.params['token'] is not None:
+ client.api.token = module.params['token']
+ else:
+ client.login(module.params['user_id'], module.params['password'], sync=False)
+
+ # make sure we are in a given room and return a room object for it
+ room = client.join_room(module.params['room_id'])
+ # send an html formatted messages
+ room.send_html(module.params['msg_html'], module.params['msg_plain'])
+
+ module.exit_json(**result)
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/mattermost.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/mattermost.py
new file mode 100644
index 00000000..579cfa5b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/mattermost.py
@@ -0,0 +1,151 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Benjamin Jolivot <bjolivot@gmail.com>
+# Inspired by slack module :
+# # (c) 2017, Steve Pletcher <steve@steve-pletcher.com>
+# # (c) 2016, René Moser <mail@renemoser.net>
+# # (c) 2015, Stefan Berggren <nsg@nsg.cc>
+# # (c) 2014, Ramon de la Fuente <ramon@delafuente.nl>)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: mattermost
+short_description: Send Mattermost notifications
+description:
+ - Sends notifications to U(http://your.mattermost.url) via the Incoming WebHook integration.
+author: "Benjamin Jolivot (@bjolivot)"
+options:
+ url:
+ type: str
+ description:
+ - Mattermost url (i.e. http://mattermost.yourcompany.com).
+ required: true
+ api_key:
+ type: str
+ description:
+ - Mattermost webhook api key. Log into your mattermost site, go to
+ Menu -> Integration -> Incoming Webhook -> Add Incoming Webhook.
+ This will give you full URL. api_key is the last part.
+ http://mattermost.example.com/hooks/C(API_KEY)
+ required: true
+ text:
+ type: str
+ description:
+ - Text to send. Note that the module does not handle escaping characters.
+ required: true
+ channel:
+ type: str
+ description:
+ - Channel to send the message to. If absent, the message goes to the channel selected for the I(api_key).
+ username:
+ type: str
+ description:
+ - This is the sender of the message (Username Override need to be enabled by mattermost admin, see mattermost doc.
+ default: Ansible
+ icon_url:
+ type: str
+ description:
+ - Url for the message sender's icon.
+ default: https://www.ansible.com/favicon.ico
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ default: yes
+ type: bool
+'''
+
+EXAMPLES = """
+- name: Send notification message via Mattermost
+ community.general.mattermost:
+ url: http://mattermost.example.com
+ api_key: my_api_key
+ text: '{{ inventory_hostname }} completed'
+
+- name: Send notification message via Mattermost all options
+ community.general.mattermost:
+ url: http://mattermost.example.com
+ api_key: my_api_key
+ text: '{{ inventory_hostname }} completed'
+ channel: notifications
+ username: 'Ansible on {{ inventory_hostname }}'
+ icon_url: http://www.example.com/some-image-file.png
+"""
+
+RETURN = '''
+payload:
+ description: Mattermost payload
+ returned: success
+ type: str
+webhook_url:
+ description: URL the webhook is sent to
+ returned: success
+ type: str
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def main():
+ module = AnsibleModule(
+ supports_check_mode=True,
+ argument_spec=dict(
+ url=dict(type='str', required=True),
+ api_key=dict(type='str', required=True, no_log=True),
+ text=dict(type='str', required=True),
+ channel=dict(type='str', default=None),
+ username=dict(type='str', default='Ansible'),
+ icon_url=dict(type='str', default='https://www.ansible.com/favicon.ico'),
+ validate_certs=dict(default=True, type='bool'),
+ )
+ )
+ # init return dict
+ result = dict(changed=False, msg="OK")
+
+ # define webhook
+ webhook_url = "{0}/hooks/{1}".format(module.params['url'], module.params['api_key'])
+ result['webhook_url'] = webhook_url
+
+ # define payload
+ payload = {}
+ for param in ['text', 'channel', 'username', 'icon_url']:
+ if module.params[param] is not None:
+ payload[param] = module.params[param]
+
+ payload = module.jsonify(payload)
+ result['payload'] = payload
+
+ # http headers
+ headers = {
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/json',
+ }
+
+ # notes:
+ # Nothing is done in check mode
+ # it'll pass even if your server is down or/and if your token is invalid.
+ # If someone find good way to check...
+
+ # send request if not in test mode
+ if module.check_mode is False:
+ response, info = fetch_url(module=module, url=webhook_url, headers=headers, method='POST', data=payload)
+
+ # something's wrong
+ if info['status'] != 200:
+ # some problem
+ result['msg'] = "Failed to send mattermost message, the error was: {0}".format(info['msg'])
+ module.fail_json(**result)
+
+ # Looks good
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/mqtt.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/mqtt.py
new file mode 100644
index 00000000..0551ab20
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/mqtt.py
@@ -0,0 +1,248 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, 2014, Jan-Piet Mens <jpmens () gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: mqtt
+short_description: Publish a message on an MQTT topic for the IoT
+description:
+ - Publish a message on an MQTT topic.
+options:
+ server:
+ type: str
+ description:
+ - MQTT broker address/name
+ default: localhost
+ port:
+ type: int
+ description:
+ - MQTT broker port number
+ default: 1883
+ username:
+ type: str
+ description:
+ - Username to authenticate against the broker.
+ password:
+ type: str
+ description:
+ - Password for C(username) to authenticate against the broker.
+ client_id:
+ type: str
+ description:
+ - MQTT client identifier
+ - If not specified, a value C(hostname + pid) will be used.
+ topic:
+ type: str
+ description:
+ - MQTT topic name
+ required: true
+ payload:
+ type: str
+ description:
+ - Payload. The special string C("None") may be used to send a NULL
+ (i.e. empty) payload which is useful to simply notify with the I(topic)
+ or to clear previously retained messages.
+ required: true
+ qos:
+ type: str
+ description:
+ - QoS (Quality of Service)
+ default: "0"
+ choices: [ "0", "1", "2" ]
+ retain:
+ description:
+ - Setting this flag causes the broker to retain (i.e. keep) the message so that
+ applications that subsequently subscribe to the topic can received the last
+ retained message immediately.
+ type: bool
+ default: 'no'
+ ca_cert:
+ type: path
+ description:
+ - The path to the Certificate Authority certificate files that are to be
+ treated as trusted by this client. If this is the only option given
+ then the client will operate in a similar manner to a web browser. That
+ is to say it will require the broker to have a certificate signed by the
+ Certificate Authorities in ca_certs and will communicate using TLS v1,
+ but will not attempt any form of authentication. This provides basic
+ network encryption but may not be sufficient depending on how the broker
+ is configured.
+ aliases: [ ca_certs ]
+ client_cert:
+ type: path
+ description:
+ - The path pointing to the PEM encoded client certificate. If this is not
+ None it will be used as client information for TLS based
+ authentication. Support for this feature is broker dependent.
+ aliases: [ certfile ]
+ client_key:
+ type: path
+ description:
+ - The path pointing to the PEM encoded client private key. If this is not
+ None it will be used as client information for TLS based
+ authentication. Support for this feature is broker dependent.
+ aliases: [ keyfile ]
+ tls_version:
+ description:
+ - Specifies the version of the SSL/TLS protocol to be used.
+ - By default (if the python version supports it) the highest TLS version is
+ detected. If unavailable, TLS v1 is used.
+ type: str
+ choices:
+ - tlsv1.1
+ - tlsv1.2
+requirements: [ mosquitto ]
+notes:
+ - This module requires a connection to an MQTT broker such as Mosquitto
+ U(http://mosquitto.org) and the I(Paho) C(mqtt) Python client (U(https://pypi.org/project/paho-mqtt/)).
+author: "Jan-Piet Mens (@jpmens)"
+'''
+
+EXAMPLES = '''
+- name: Publish a message on an MQTT topic
+ community.general.mqtt:
+ topic: 'service/ansible/{{ ansible_hostname }}'
+ payload: 'Hello at {{ ansible_date_time.iso8601 }}'
+ qos: 0
+ retain: False
+ client_id: ans001
+ delegate_to: localhost
+'''
+
+# ===========================================
+# MQTT module support methods.
+#
+
+import os
+import ssl
+import traceback
+import platform
+from distutils.version import LooseVersion
+
+HAS_PAHOMQTT = True
+PAHOMQTT_IMP_ERR = None
+try:
+ import socket
+ import paho.mqtt.publish as mqtt
+except ImportError:
+ PAHOMQTT_IMP_ERR = traceback.format_exc()
+ HAS_PAHOMQTT = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+# ===========================================
+# Main
+#
+
+def main():
+ tls_map = {}
+
+ try:
+ tls_map['tlsv1.2'] = ssl.PROTOCOL_TLSv1_2
+ except AttributeError:
+ pass
+
+ try:
+ tls_map['tlsv1.1'] = ssl.PROTOCOL_TLSv1_1
+ except AttributeError:
+ pass
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ server=dict(default='localhost'),
+ port=dict(default=1883, type='int'),
+ topic=dict(required=True),
+ payload=dict(required=True),
+ client_id=dict(default=None),
+ qos=dict(default="0", choices=["0", "1", "2"]),
+ retain=dict(default=False, type='bool'),
+ username=dict(default=None),
+ password=dict(default=None, no_log=True),
+ ca_cert=dict(default=None, type='path', aliases=['ca_certs']),
+ client_cert=dict(default=None, type='path', aliases=['certfile']),
+ client_key=dict(default=None, type='path', aliases=['keyfile']),
+ tls_version=dict(default=None, choices=['tlsv1.1', 'tlsv1.2'])
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_PAHOMQTT:
+ module.fail_json(msg=missing_required_lib('paho-mqtt'), exception=PAHOMQTT_IMP_ERR)
+
+ server = module.params.get("server", 'localhost')
+ port = module.params.get("port", 1883)
+ topic = module.params.get("topic")
+ payload = module.params.get("payload")
+ client_id = module.params.get("client_id", '')
+ qos = int(module.params.get("qos", 0))
+ retain = module.params.get("retain")
+ username = module.params.get("username", None)
+ password = module.params.get("password", None)
+ ca_certs = module.params.get("ca_cert", None)
+ certfile = module.params.get("client_cert", None)
+ keyfile = module.params.get("client_key", None)
+ tls_version = module.params.get("tls_version", None)
+
+ if client_id is None:
+ client_id = "%s_%s" % (socket.getfqdn(), os.getpid())
+
+ if payload and payload == 'None':
+ payload = None
+
+ auth = None
+ if username is not None:
+ auth = {'username': username, 'password': password}
+
+ tls = None
+ if ca_certs is not None:
+ if tls_version:
+ tls_version = tls_map.get(tls_version, ssl.PROTOCOL_SSLv23)
+ else:
+ if LooseVersion(platform.python_version()) <= "3.5.2":
+ # Specifying `None` on later versions of python seems sufficient to
+ # instruct python to autonegotiate the SSL/TLS connection. On versions
+ # 3.5.2 and lower though we need to specify the version.
+ #
+ # Note that this is an alias for PROTOCOL_TLS, but PROTOCOL_TLS was
+ # not available until 3.5.3.
+ tls_version = ssl.PROTOCOL_SSLv23
+
+ tls = {
+ 'ca_certs': ca_certs,
+ 'certfile': certfile,
+ 'keyfile': keyfile,
+ 'tls_version': tls_version,
+ }
+
+ try:
+ mqtt.single(
+ topic,
+ payload,
+ qos=qos,
+ retain=retain,
+ client_id=client_id,
+ hostname=server,
+ port=port,
+ auth=auth,
+ tls=tls
+ )
+ except Exception as e:
+ module.fail_json(
+ msg="unable to publish to MQTT broker %s" % to_native(e),
+ exception=traceback.format_exc()
+ )
+
+ module.exit_json(changed=False, topic=topic)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/nexmo.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/nexmo.py
new file mode 100644
index 00000000..e6135cc2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/nexmo.py
@@ -0,0 +1,135 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: nexmo
+short_description: Send a SMS via nexmo
+description:
+ - Send a SMS message via nexmo
+author: "Matt Martz (@sivel)"
+options:
+ api_key:
+ type: str
+ description:
+ - Nexmo API Key
+ required: true
+ api_secret:
+ type: str
+ description:
+ - Nexmo API Secret
+ required: true
+ src:
+ type: int
+ description:
+ - Nexmo Number to send from
+ required: true
+ dest:
+ type: list
+ description:
+ - Phone number(s) to send SMS message to
+ required: true
+ msg:
+ type: str
+ description:
+ - Message to text to send. Messages longer than 160 characters will be
+ split into multiple messages
+ required: true
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+extends_documentation_fragment:
+ - url
+'''
+
+EXAMPLES = """
+- name: Send notification message via Nexmo
+ community.general.nexmo:
+ api_key: 640c8a53
+ api_secret: 0ce239a6
+ src: 12345678901
+ dest:
+ - 10987654321
+ - 16789012345
+ msg: '{{ inventory_hostname }} completed'
+ delegate_to: localhost
+"""
+import json
+
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url, url_argument_spec
+
+
+NEXMO_API = 'https://rest.nexmo.com/sms/json'
+
+
+def send_msg(module):
+ failed = list()
+ responses = dict()
+ msg = {
+ 'api_key': module.params.get('api_key'),
+ 'api_secret': module.params.get('api_secret'),
+ 'from': module.params.get('src'),
+ 'text': module.params.get('msg')
+ }
+ for number in module.params.get('dest'):
+ msg['to'] = number
+ url = "%s?%s" % (NEXMO_API, urlencode(msg))
+
+ headers = dict(Accept='application/json')
+ response, info = fetch_url(module, url, headers=headers)
+ if info['status'] != 200:
+ failed.append(number)
+ responses[number] = dict(failed=True)
+
+ try:
+ responses[number] = json.load(response)
+ except Exception:
+ failed.append(number)
+ responses[number] = dict(failed=True)
+ else:
+ for message in responses[number]['messages']:
+ if int(message['status']) != 0:
+ failed.append(number)
+ responses[number] = dict(failed=True, **responses[number])
+
+ if failed:
+ msg = 'One or messages failed to send'
+ else:
+ msg = ''
+
+ module.exit_json(failed=bool(failed), msg=msg, changed=False,
+ responses=responses)
+
+
+def main():
+ argument_spec = url_argument_spec()
+ argument_spec.update(
+ dict(
+ api_key=dict(required=True, no_log=True),
+ api_secret=dict(required=True, no_log=True),
+ src=dict(required=True, type='int'),
+ dest=dict(required=True, type='list'),
+ msg=dict(required=True),
+ ),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec
+ )
+
+ send_msg(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/office_365_connector_card.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/office_365_connector_card.py
new file mode 100644
index 00000000..2574a750
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/office_365_connector_card.py
@@ -0,0 +1,299 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017 Marc Sensenich <hello@marc-sensenich.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: office_365_connector_card
+short_description: Use webhooks to create Connector Card messages within an Office 365 group
+description:
+ - Creates Connector Card messages through
+ - Office 365 Connectors U(https://dev.outlook.com/Connectors)
+author: "Marc Sensenich (@marc-sensenich)"
+notes:
+ - This module is not idempotent, therefore if the same task is run twice
+ there will be two Connector Cards created
+options:
+ webhook:
+ type: str
+ description:
+ - The webhook URL is given to you when you create a new Connector.
+ required: true
+ summary:
+ type: str
+ description:
+ - A string used for summarizing card content.
+ - This will be shown as the message subject.
+ - This is required if the text parameter isn't populated.
+ color:
+ type: str
+ description:
+ - Accent color used for branding or indicating status in the card.
+ title:
+ type: str
+ description:
+ - A title for the Connector message. Shown at the top of the message.
+ text:
+ type: str
+ description:
+ - The main text of the card.
+ - This will be rendered below the sender information and optional title,
+ - and above any sections or actions present.
+ actions:
+ type: list
+ description:
+ - This array of objects will power the action links
+ - found at the bottom of the card.
+ sections:
+ type: list
+ description:
+ - Contains a list of sections to display in the card.
+ - For more information see https://dev.outlook.com/Connectors/reference.
+'''
+
+EXAMPLES = """
+- name: Create a simple Connector Card
+ community.general.office_365_connector_card:
+ webhook: https://outlook.office.com/webhook/GUID/IncomingWebhook/GUID/GUID
+ text: 'Hello, World!'
+
+- name: Create a Connector Card with the full format
+ community.general.office_365_connector_card:
+ webhook: https://outlook.office.com/webhook/GUID/IncomingWebhook/GUID/GUID
+ summary: This is the summary property
+ title: This is the **card's title** property
+ text: This is the **card's text** property. Lorem ipsum dolor sit amet, consectetur
+ adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
+ color: E81123
+ sections:
+ - title: This is the **section's title** property
+ activity_image: http://connectorsdemo.azurewebsites.net/images/MSC12_Oscar_002.jpg
+ activity_title: This is the section's **activityTitle** property
+ activity_subtitle: This is the section's **activitySubtitle** property
+ activity_text: This is the section's **activityText** property.
+ hero_image:
+ image: http://connectorsdemo.azurewebsites.net/images/WIN12_Scene_01.jpg
+ title: This is the image's alternate text
+ text: This is the section's text property. Lorem ipsum dolor sit amet, consectetur
+ adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
+ facts:
+ - name: This is a fact name
+ value: This is a fact value
+ - name: This is a fact name
+ value: This is a fact value
+ - name: This is a fact name
+ value: This is a fact value
+ images:
+ - image: http://connectorsdemo.azurewebsites.net/images/MicrosoftSurface_024_Cafe_OH-06315_VS_R1c.jpg
+ title: This is the image's alternate text
+ - image: http://connectorsdemo.azurewebsites.net/images/WIN12_Scene_01.jpg
+ title: This is the image's alternate text
+ - image: http://connectorsdemo.azurewebsites.net/images/WIN12_Anthony_02.jpg
+ title: This is the image's alternate text
+ actions:
+ - "@type": ActionCard
+ name: Comment
+ inputs:
+ - "@type": TextInput
+ id: comment
+ is_multiline: true
+ title: Input's title property
+ actions:
+ - "@type": HttpPOST
+ name: Save
+ target: http://...
+ - "@type": ActionCard
+ name: Due Date
+ inputs:
+ - "@type": DateInput
+ id: dueDate
+ title: Input's title property
+ actions:
+ - "@type": HttpPOST
+ name: Save
+ target: http://...
+ - "@type": HttpPOST
+ name: Action's name prop.
+ target: http://...
+ - "@type": OpenUri
+ name: Action's name prop
+ targets:
+ - os: default
+ uri: http://...
+ - start_group: true
+ title: This is the title of a **second section**
+ text: This second section is visually separated from the first one by setting its
+ **startGroup** property to true.
+"""
+
+RETURN = """
+"""
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+
+OFFICE_365_CARD_CONTEXT = "http://schema.org/extensions"
+OFFICE_365_CARD_TYPE = "MessageCard"
+OFFICE_365_CARD_EMPTY_PAYLOAD_MSG = "Summary or Text is required."
+OFFICE_365_INVALID_WEBHOOK_MSG = "The Incoming Webhook was not reachable."
+
+
+def build_actions(actions):
+ action_items = []
+
+ for action in actions:
+ action_item = snake_dict_to_camel_dict(action)
+ action_items.append(action_item)
+
+ return action_items
+
+
+def build_sections(sections):
+ sections_created = []
+
+ for section in sections:
+ sections_created.append(build_section(section))
+
+ return sections_created
+
+
+def build_section(section):
+ section_payload = dict()
+
+ if 'title' in section:
+ section_payload['title'] = section['title']
+
+ if 'start_group' in section:
+ section_payload['startGroup'] = section['start_group']
+
+ if 'activity_image' in section:
+ section_payload['activityImage'] = section['activity_image']
+
+ if 'activity_title' in section:
+ section_payload['activityTitle'] = section['activity_title']
+
+ if 'activity_subtitle' in section:
+ section_payload['activitySubtitle'] = section['activity_subtitle']
+
+ if 'activity_text' in section:
+ section_payload['activityText'] = section['activity_text']
+
+ if 'hero_image' in section:
+ section_payload['heroImage'] = section['hero_image']
+
+ if 'text' in section:
+ section_payload['text'] = section['text']
+
+ if 'facts' in section:
+ section_payload['facts'] = section['facts']
+
+ if 'images' in section:
+ section_payload['images'] = section['images']
+
+ if 'actions' in section:
+ section_payload['potentialAction'] = build_actions(section['actions'])
+
+ return section_payload
+
+
+def build_payload_for_connector_card(module, summary=None, color=None, title=None, text=None, actions=None, sections=None):
+ payload = dict()
+ payload['@context'] = OFFICE_365_CARD_CONTEXT
+ payload['@type'] = OFFICE_365_CARD_TYPE
+
+ if summary is not None:
+ payload['summary'] = summary
+
+ if color is not None:
+ payload['themeColor'] = color
+
+ if title is not None:
+ payload['title'] = title
+
+ if text is not None:
+ payload['text'] = text
+
+ if actions:
+ payload['potentialAction'] = build_actions(actions)
+
+ if sections:
+ payload['sections'] = build_sections(sections)
+
+ payload = module.jsonify(payload)
+ return payload
+
+
+def do_notify_connector_card_webhook(module, webhook, payload):
+ headers = {
+ 'Content-Type': 'application/json'
+ }
+
+ response, info = fetch_url(
+ module=module,
+ url=webhook,
+ headers=headers,
+ method='POST',
+ data=payload
+ )
+
+ if info['status'] == 200:
+ module.exit_json(changed=True)
+ elif info['status'] == 400 and module.check_mode:
+ if info['body'] == OFFICE_365_CARD_EMPTY_PAYLOAD_MSG:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg=OFFICE_365_INVALID_WEBHOOK_MSG)
+ else:
+ module.fail_json(
+ msg="failed to send %s as a connector card to Incoming Webhook: %s"
+ % (payload, info['msg'])
+ )
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ webhook=dict(required=True, no_log=True),
+ summary=dict(type='str'),
+ color=dict(type='str'),
+ title=dict(type='str'),
+ text=dict(type='str'),
+ actions=dict(type='list'),
+ sections=dict(type='list')
+ ),
+ supports_check_mode=True
+ )
+
+ webhook = module.params['webhook']
+ summary = module.params['summary']
+ color = module.params['color']
+ title = module.params['title']
+ text = module.params['text']
+ actions = module.params['actions']
+ sections = module.params['sections']
+
+ payload = build_payload_for_connector_card(
+ module,
+ summary,
+ color,
+ title,
+ text,
+ actions,
+ sections)
+
+ if module.check_mode:
+ # In check mode, send an empty payload to validate connection
+ check_mode_payload = build_payload_for_connector_card(module)
+ do_notify_connector_card_webhook(module, webhook, check_mode_payload)
+
+ do_notify_connector_card_webhook(module, webhook, payload)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/pushbullet.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/pushbullet.py
new file mode 100644
index 00000000..ab27fd5e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/pushbullet.py
@@ -0,0 +1,185 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+author: "Willy Barro (@willybarro)"
+requirements: [ pushbullet.py ]
+module: pushbullet
+short_description: Sends notifications to Pushbullet
+description:
+ - This module sends push notifications via Pushbullet to channels or devices.
+options:
+ api_key:
+ type: str
+ description:
+ - Push bullet API token
+ required: true
+ channel:
+ type: str
+ description:
+ - The channel TAG you wish to broadcast a push notification,
+ as seen on the "My Channels" > "Edit your channel" at
+ Pushbullet page.
+ device:
+ type: str
+ description:
+ - The device NAME you wish to send a push notification,
+ as seen on the Pushbullet main page.
+ push_type:
+ type: str
+ description:
+ - Thing you wish to push.
+ default: note
+ choices: [ "note", "link" ]
+ title:
+ type: str
+ description:
+ - Title of the notification.
+ required: true
+ body:
+ type: str
+ description:
+ - Body of the notification, e.g. Details of the fault you're alerting.
+
+notes:
+ - Requires pushbullet.py Python package on the remote host.
+ You can install it via pip with ($ pip install pushbullet.py).
+ See U(https://github.com/randomchars/pushbullet.py)
+'''
+
+EXAMPLES = '''
+- name: Sends a push notification to a device
+ community.general.pushbullet:
+ api_key: "ABC123abc123ABC123abc123ABC123ab"
+ device: "Chrome"
+ title: "You may see this on Google Chrome"
+
+- name: Sends a link to a device
+ community.general.pushbullet:
+ api_key: ABC123abc123ABC123abc123ABC123ab
+ device: Chrome
+ push_type: link
+ title: Ansible Documentation
+ body: https://docs.ansible.com/
+
+- name: Sends a push notification to a channel
+ community.general.pushbullet:
+ api_key: ABC123abc123ABC123abc123ABC123ab
+ channel: my-awesome-channel
+ title: Broadcasting a message to the #my-awesome-channel folks
+
+- name: Sends a push notification with title and body to a channel
+ community.general.pushbullet:
+ api_key: ABC123abc123ABC123abc123ABC123ab
+ channel: my-awesome-channel
+ title: ALERT! Signup service is down
+ body: Error rate on signup service is over 90% for more than 2 minutes
+'''
+
+import traceback
+
+PUSHBULLET_IMP_ERR = None
+try:
+ from pushbullet import PushBullet
+ from pushbullet.errors import InvalidKeyError, PushError
+except ImportError:
+ PUSHBULLET_IMP_ERR = traceback.format_exc()
+ pushbullet_found = False
+else:
+ pushbullet_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+# ===========================================
+# Main
+#
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(type='str', required=True, no_log=True),
+ channel=dict(type='str', default=None),
+ device=dict(type='str', default=None),
+ push_type=dict(type='str', default="note", choices=['note', 'link']),
+ title=dict(type='str', required=True),
+ body=dict(type='str', default=None),
+ url=dict(type='str', default=None),
+ ),
+ mutually_exclusive=(
+ ['channel', 'device'],
+ ),
+ supports_check_mode=True
+ )
+
+ api_key = module.params['api_key']
+ channel = module.params['channel']
+ device = module.params['device']
+ push_type = module.params['push_type']
+ title = module.params['title']
+ body = module.params['body']
+ url = module.params['url']
+
+ if not pushbullet_found:
+ module.fail_json(msg=missing_required_lib('pushbullet.py'), exception=PUSHBULLET_IMP_ERR)
+
+ # Init pushbullet
+ try:
+ pb = PushBullet(api_key)
+ target = None
+ except InvalidKeyError:
+ module.fail_json(msg="Invalid api_key")
+
+ # Checks for channel/device
+ if device is None and channel is None:
+ module.fail_json(msg="You need to provide a channel or a device.")
+
+ # Search for given device
+ if device is not None:
+ devices_by_nickname = {}
+ for d in pb.devices:
+ devices_by_nickname[d.nickname] = d
+
+ if device in devices_by_nickname:
+ target = devices_by_nickname[device]
+ else:
+ module.fail_json(msg="Device '%s' not found. Available devices: '%s'" % (device, "', '".join(devices_by_nickname.keys())))
+
+ # Search for given channel
+ if channel is not None:
+ channels_by_tag = {}
+ for c in pb.channels:
+ channels_by_tag[c.channel_tag] = c
+
+ if channel in channels_by_tag:
+ target = channels_by_tag[channel]
+ else:
+ module.fail_json(msg="Channel '%s' not found. Available channels: '%s'" % (channel, "', '".join(channels_by_tag.keys())))
+
+ # If in check mode, exit saying that we succeeded
+ if module.check_mode:
+ module.exit_json(changed=False, msg="OK")
+
+ # Send push notification
+ try:
+ if push_type == "link":
+ target.push_link(title, url, body)
+ else:
+ target.push_note(title, body)
+ module.exit_json(changed=False, msg="OK")
+ except PushError as e:
+ module.fail_json(msg="An error occurred, Pushbullet's response: %s" % str(e))
+
+ module.fail_json(msg="An unknown error has occurred")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/pushover.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/pushover.py
new file mode 100644
index 00000000..7f73592a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/pushover.py
@@ -0,0 +1,153 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2012, Jim Richardson <weaselkeeper@gmail.com>
+# Copyright (c) 2019, Bernd Arnold <wopfel@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: pushover
+short_description: Send notifications via U(https://pushover.net)
+description:
+ - Send notifications via pushover, to subscriber list of devices, and email
+ addresses. Requires pushover app on devices.
+notes:
+ - You will require a pushover.net account to use this module. But no account
+ is required to receive messages.
+options:
+ msg:
+ type: str
+ description:
+ - What message you wish to send.
+ required: true
+ app_token:
+ type: str
+ description:
+ - Pushover issued token identifying your pushover app.
+ required: true
+ user_key:
+ type: str
+ description:
+ - Pushover issued authentication key for your user.
+ required: true
+ title:
+ type: str
+ description:
+ - Message title.
+ required: false
+ pri:
+ type: str
+ description:
+ - Message priority (see U(https://pushover.net) for details).
+ required: false
+ default: '0'
+ choices: [ '-2', '-1', '0', '1', '2' ]
+ device:
+ type: str
+ description:
+ - A device the message should be sent to. Multiple devices can be specified, separated by a comma.
+ required: false
+ version_added: 1.2.0
+
+author:
+ - "Jim Richardson (@weaselkeeper)"
+ - "Bernd Arnold (@wopfel)"
+'''
+
+EXAMPLES = '''
+- name: Send notifications via pushover.net
+ community.general.pushover:
+ msg: '{{ inventory_hostname }} is acting strange ...'
+ app_token: wxfdksl
+ user_key: baa5fe97f2c5ab3ca8f0bb59
+ delegate_to: localhost
+
+- name: Send notifications via pushover.net
+ community.general.pushover:
+ title: 'Alert!'
+ msg: '{{ inventory_hostname }} has exploded in flames, It is now time to panic'
+ pri: 1
+ app_token: wxfdksl
+ user_key: baa5fe97f2c5ab3ca8f0bb59
+ delegate_to: localhost
+
+- name: Send notifications via pushover.net to a specific device
+ community.general.pushover:
+ msg: '{{ inventory_hostname }} has been lost somewhere'
+ app_token: wxfdksl
+ user_key: baa5fe97f2c5ab3ca8f0bb59
+ device: admins-iPhone
+ delegate_to: localhost
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url
+
+
+class Pushover(object):
+ ''' Instantiates a pushover object, use it to send notifications '''
+ base_uri = 'https://api.pushover.net'
+
+ def __init__(self, module, user, token):
+ self.module = module
+ self.user = user
+ self.token = token
+
+ def run(self, priority, msg, title, device):
+ ''' Do, whatever it is, we do. '''
+
+ url = '%s/1/messages.json' % (self.base_uri)
+
+ # parse config
+ options = dict(user=self.user,
+ token=self.token,
+ priority=priority,
+ message=msg)
+
+ if title is not None:
+ options = dict(options,
+ title=title)
+
+ if device is not None:
+ options = dict(options,
+ device=device)
+
+ data = urlencode(options)
+
+ headers = {"Content-type": "application/x-www-form-urlencoded"}
+ r, info = fetch_url(self.module, url, method='POST', data=data, headers=headers)
+ if info['status'] != 200:
+ raise Exception(info)
+
+ return r.read()
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ title=dict(type='str'),
+ msg=dict(required=True),
+ app_token=dict(required=True, no_log=True),
+ user_key=dict(required=True, no_log=True),
+ pri=dict(required=False, default='0', choices=['-2', '-1', '0', '1', '2']),
+ device=dict(type='str'),
+ ),
+ )
+
+ msg_object = Pushover(module, module.params['user_key'], module.params['app_token'])
+ try:
+ response = msg_object.run(module.params['pri'], module.params['msg'], module.params['title'], module.params['device'])
+ except Exception:
+ module.fail_json(msg='Unable to send msg via pushover')
+
+ module.exit_json(msg='message sent successfully: %s' % response, changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/rocketchat.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/rocketchat.py
new file mode 100644
index 00000000..13a93dd8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/rocketchat.py
@@ -0,0 +1,241 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Deepak Kothandan <deepak.kothandan@outlook.com>
+# (c) 2015, Stefan Berggren <nsg@nsg.cc>
+# (c) 2014, Ramon de la Fuente <ramon@delafuente.nl>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: rocketchat
+short_description: Send notifications to Rocket Chat
+description:
+ - The C(rocketchat) module sends notifications to Rocket Chat via the Incoming WebHook integration
+author: "Ramon de la Fuente (@ramondelafuente)"
+options:
+ domain:
+ type: str
+ description:
+ - The domain for your environment without protocol. (i.e.
+ C(example.com) or C(chat.example.com))
+ required: true
+ token:
+ type: str
+ description:
+ - Rocket Chat Incoming Webhook integration token. This provides
+ authentication to Rocket Chat's Incoming webhook for posting
+ messages.
+ required: true
+ protocol:
+ type: str
+ description:
+ - Specify the protocol used to send notification messages before the webhook url. (i.e. http or https)
+ default: https
+ choices:
+ - 'http'
+ - 'https'
+ msg:
+ type: str
+ description:
+ - Message to be sent.
+ channel:
+ type: str
+ description:
+ - Channel to send the message to. If absent, the message goes to the channel selected for the I(token)
+ specified during the creation of webhook.
+ username:
+ type: str
+ description:
+ - This is the sender of the message.
+ default: "Ansible"
+ icon_url:
+ type: str
+ description:
+ - URL for the message sender's icon.
+ default: "https://www.ansible.com/favicon.ico"
+ icon_emoji:
+ type: str
+ description:
+ - Emoji for the message sender. The representation for the available emojis can be
+ got from Rocket Chat. (for example :thumbsup:) (if I(icon_emoji) is set, I(icon_url) will not be used)
+ link_names:
+ type: int
+ description:
+ - Automatically create links for channels and usernames in I(msg).
+ default: 1
+ choices:
+ - 1
+ - 0
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+ color:
+ type: str
+ description:
+ - Allow text to use default colors - use the default of 'normal' to not send a custom color bar at the start of the message
+ default: 'normal'
+ choices:
+ - 'normal'
+ - 'good'
+ - 'warning'
+ - 'danger'
+ attachments:
+ type: list
+ description:
+ - Define a list of attachments.
+'''
+
+EXAMPLES = """
+- name: Send notification message via Rocket Chat
+ community.general.rocketchat:
+ token: thetoken/generatedby/rocketchat
+ domain: chat.example.com
+ msg: '{{ inventory_hostname }} completed'
+ delegate_to: localhost
+
+- name: Send notification message via Rocket Chat all options
+ community.general.rocketchat:
+ domain: chat.example.com
+ token: thetoken/generatedby/rocketchat
+ msg: '{{ inventory_hostname }} completed'
+ channel: #ansible
+ username: 'Ansible on {{ inventory_hostname }}'
+ icon_url: http://www.example.com/some-image-file.png
+ link_names: 0
+ delegate_to: localhost
+
+- name: Insert a color bar in front of the message for visibility purposes and use the default webhook icon and name configured in rocketchat
+ community.general.rocketchat:
+ token: thetoken/generatedby/rocketchat
+ domain: chat.example.com
+ msg: '{{ inventory_hostname }} is alive!'
+ color: good
+ username: ''
+ icon_url: ''
+ delegate_to: localhost
+
+- name: Use the attachments API
+ community.general.rocketchat:
+ token: thetoken/generatedby/rocketchat
+ domain: chat.example.com
+ attachments:
+ - text: Display my system load on host A and B
+ color: #ff00dd
+ title: System load
+ fields:
+ - title: System A
+ value: 'load average: 0,74, 0,66, 0,63'
+ short: True
+ - title: System B
+ value: 'load average: 5,16, 4,64, 2,43'
+ short: True
+ delegate_to: localhost
+"""
+
+RETURN = """
+changed:
+ description: A flag indicating if any change was made or not.
+ returned: success
+ type: bool
+ sample: false
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+ROCKETCHAT_INCOMING_WEBHOOK = '%s://%s/hooks/%s'
+
+
+def build_payload_for_rocketchat(module, text, channel, username, icon_url, icon_emoji, link_names, color, attachments):
+ payload = {}
+ if color == "normal" and text is not None:
+ payload = dict(text=text)
+ elif text is not None:
+ payload = dict(attachments=[dict(text=text, color=color)])
+ if channel is not None:
+ if (channel[0] == '#') or (channel[0] == '@'):
+ payload['channel'] = channel
+ else:
+ payload['channel'] = '#' + channel
+ if username is not None:
+ payload['username'] = username
+ if icon_emoji is not None:
+ payload['icon_emoji'] = icon_emoji
+ else:
+ payload['icon_url'] = icon_url
+ if link_names is not None:
+ payload['link_names'] = link_names
+
+ if attachments is not None:
+ if 'attachments' not in payload:
+ payload['attachments'] = []
+
+ if attachments is not None:
+ for attachment in attachments:
+ if 'fallback' not in attachment:
+ attachment['fallback'] = attachment['text']
+ payload['attachments'].append(attachment)
+
+ payload = "payload=" + module.jsonify(payload)
+ return payload
+
+
+def do_notify_rocketchat(module, domain, token, protocol, payload):
+
+ if token.count('/') < 1:
+ module.fail_json(msg="Invalid Token specified, provide a valid token")
+
+ rocketchat_incoming_webhook = ROCKETCHAT_INCOMING_WEBHOOK % (protocol, domain, token)
+
+ response, info = fetch_url(module, rocketchat_incoming_webhook, data=payload)
+ if info['status'] != 200:
+ module.fail_json(msg="failed to send message, return status=%s" % str(info['status']))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ domain=dict(type='str', required=True),
+ token=dict(type='str', required=True, no_log=True),
+ protocol=dict(type='str', default='https', choices=['http', 'https']),
+ msg=dict(type='str', required=False),
+ channel=dict(type='str'),
+ username=dict(type='str', default='Ansible'),
+ icon_url=dict(type='str', default='https://www.ansible.com/favicon.ico'),
+ icon_emoji=dict(type='str'),
+ link_names=dict(type='int', default=1, choices=[0, 1]),
+ validate_certs=dict(default=True, type='bool'),
+ color=dict(type='str', default='normal', choices=['normal', 'good', 'warning', 'danger']),
+ attachments=dict(type='list', required=False)
+ )
+ )
+
+ domain = module.params['domain']
+ token = module.params['token']
+ protocol = module.params['protocol']
+ text = module.params['msg']
+ channel = module.params['channel']
+ username = module.params['username']
+ icon_url = module.params['icon_url']
+ icon_emoji = module.params['icon_emoji']
+ link_names = module.params['link_names']
+ color = module.params['color']
+ attachments = module.params['attachments']
+
+ payload = build_payload_for_rocketchat(module, text, channel, username, icon_url, icon_emoji, link_names, color, attachments)
+ do_notify_rocketchat(module, domain, token, protocol, payload)
+
+ module.exit_json(msg="OK")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/say.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/say.py
new file mode 100644
index 00000000..1c66adf6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/say.py
@@ -0,0 +1,91 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Michael DeHaan <michael@ansible.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: say
+short_description: Makes a computer to speak.
+description:
+ - makes a computer speak! Amuse your friends, annoy your coworkers!
+notes:
+ - In 2.5, this module has been renamed from C(osx_say) to M(community.general.say).
+ - If you like this module, you may also be interested in the osx_say callback plugin.
+ - A list of available voices, with language, can be found by running C(say -v ?) on a OSX host and C(espeak --voices) on a Linux host.
+options:
+ msg:
+ type: str
+ description:
+ What to say
+ required: true
+ voice:
+ type: str
+ description:
+ What voice to use
+ required: false
+requirements: [ say or espeak or espeak-ng ]
+author:
+ - "Ansible Core Team"
+ - "Michael DeHaan (@mpdehaan)"
+'''
+
+EXAMPLES = '''
+- name: Makes a computer to speak
+ community.general.say:
+ msg: '{{ inventory_hostname }} is all done'
+ voice: Zarvox
+ delegate_to: localhost
+'''
+import platform
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def say(module, executable, msg, voice):
+ cmd = [executable, msg]
+ if voice:
+ cmd.extend(('-v', voice))
+ module.run_command(cmd, check_rc=True)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ msg=dict(required=True),
+ voice=dict(required=False),
+ ),
+ supports_check_mode=True
+ )
+
+ msg = module.params['msg']
+ voice = module.params['voice']
+ possibles = ('say', 'espeak', 'espeak-ng')
+
+ if platform.system() != 'Darwin':
+ # 'say' binary available, it might be GNUstep tool which doesn't support 'voice' parameter
+ voice = None
+
+ for possible in possibles:
+ executable = module.get_bin_path(possible)
+ if executable:
+ break
+ else:
+ module.fail_json(msg='Unable to find either %s' % ', '.join(possibles))
+
+ if module.check_mode:
+ module.exit_json(msg=msg, changed=False)
+
+ say(module, executable, msg, voice)
+
+ module.exit_json(msg=msg, changed=True)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/sendgrid.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/sendgrid.py
new file mode 100644
index 00000000..67132771
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/sendgrid.py
@@ -0,0 +1,268 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Matt Makai <matthew.makai@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: sendgrid
+short_description: Sends an email with the SendGrid API
+description:
+ - "Sends an email with a SendGrid account through their API, not through
+ the SMTP service."
+notes:
+ - "This module is non-idempotent because it sends an email through the
+ external API. It is idempotent only in the case that the module fails."
+ - "Like the other notification modules, this one requires an external
+ dependency to work. In this case, you'll need an active SendGrid
+ account."
+ - "In order to use api_key, cc, bcc, attachments, from_name, html_body, headers
+ you must pip install sendgrid"
+ - "since 2.2 I(username) and I(password) are not required if you supply an I(api_key)"
+requirements:
+ - sendgrid Python library 1.6.22 or lower (Sendgrid API V2 supported)
+options:
+ username:
+ type: str
+ description:
+ - Username for logging into the SendGrid account.
+ - Since 2.2 it is only required if I(api_key) is not supplied.
+ password:
+ type: str
+ description:
+ - Password that corresponds to the username.
+ - Since 2.2 it is only required if I(api_key) is not supplied.
+ from_address:
+ type: str
+ description:
+ - The address in the "from" field for the email.
+ required: true
+ to_addresses:
+ type: list
+ description:
+ - A list with one or more recipient email addresses.
+ required: true
+ subject:
+ type: str
+ description:
+ - The desired subject for the email.
+ required: true
+ api_key:
+ type: str
+ description:
+ - Sendgrid API key to use instead of username/password.
+ cc:
+ type: list
+ description:
+ - A list of email addresses to cc.
+ bcc:
+ type: list
+ description:
+ - A list of email addresses to bcc.
+ attachments:
+ type: list
+ description:
+ - A list of relative or explicit paths of files you want to attach (7MB limit as per SendGrid docs).
+ from_name:
+ type: str
+ description:
+ - The name you want to appear in the from field, i.e 'John Doe'.
+ html_body:
+ description:
+ - Whether the body is html content that should be rendered.
+ type: bool
+ default: 'no'
+ headers:
+ type: dict
+ description:
+ - A dict to pass on as headers.
+ body:
+ type: str
+ description:
+ - The e-mail body content.
+ required: yes
+author: "Matt Makai (@makaimc)"
+'''
+
+EXAMPLES = r'''
+- name: Send an email to a single recipient that the deployment was successful
+ community.general.sendgrid:
+ username: "{{ sendgrid_username }}"
+ password: "{{ sendgrid_password }}"
+ from_address: "ansible@mycompany.com"
+ to_addresses:
+ - "ops@mycompany.com"
+ subject: "Deployment success."
+ body: "The most recent Ansible deployment was successful."
+ delegate_to: localhost
+
+- name: Send an email to more than one recipient that the build failed
+ community.general.sendgrid:
+ username: "{{ sendgrid_username }}"
+ password: "{{ sendgrid_password }}"
+ from_address: "build@mycompany.com"
+ to_addresses:
+ - "ops@mycompany.com"
+ - "devteam@mycompany.com"
+ subject: "Build failure!."
+ body: "Unable to pull source repository from Git server."
+ delegate_to: localhost
+'''
+
+# =======================================
+# sendgrid module support methods
+#
+import os
+import traceback
+
+from distutils.version import LooseVersion
+
+SENDGRID_IMP_ERR = None
+try:
+ import sendgrid
+ HAS_SENDGRID = True
+except ImportError:
+ SENDGRID_IMP_ERR = traceback.format_exc()
+ HAS_SENDGRID = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils._text import to_bytes
+from ansible.module_utils.urls import fetch_url
+
+
+def post_sendgrid_api(module, username, password, from_address, to_addresses,
+ subject, body, api_key=None, cc=None, bcc=None, attachments=None,
+ html_body=False, from_name=None, headers=None):
+
+ if not HAS_SENDGRID:
+ SENDGRID_URI = "https://api.sendgrid.com/api/mail.send.json"
+ AGENT = "Ansible"
+ data = {'api_user': username, 'api_key': password,
+ 'from': from_address, 'subject': subject, 'text': body}
+ encoded_data = urlencode(data)
+ to_addresses_api = ''
+ for recipient in to_addresses:
+ recipient = to_bytes(recipient, errors='surrogate_or_strict')
+ to_addresses_api += '&to[]=%s' % recipient
+ encoded_data += to_addresses_api
+
+ headers = {'User-Agent': AGENT,
+ 'Content-type': 'application/x-www-form-urlencoded',
+ 'Accept': 'application/json'}
+ return fetch_url(module, SENDGRID_URI, data=encoded_data, headers=headers, method='POST')
+ else:
+ # Remove this check when adding Sendgrid API v3 support
+ if LooseVersion(sendgrid.version.__version__) > LooseVersion("1.6.22"):
+ module.fail_json(msg="Please install sendgrid==1.6.22 or lower since module uses Sendgrid V2 APIs.")
+
+ if api_key:
+ sg = sendgrid.SendGridClient(api_key)
+ else:
+ sg = sendgrid.SendGridClient(username, password)
+
+ message = sendgrid.Mail()
+ message.set_subject(subject)
+
+ for recip in to_addresses:
+ message.add_to(recip)
+
+ if cc:
+ for recip in cc:
+ message.add_cc(recip)
+ if bcc:
+ for recip in bcc:
+ message.add_bcc(recip)
+
+ if headers:
+ message.set_headers(headers)
+
+ if attachments:
+ for f in attachments:
+ name = os.path.basename(f)
+ message.add_attachment(name, f)
+
+ if from_name:
+ message.set_from('%s <%s.' % (from_name, from_address))
+ else:
+ message.set_from(from_address)
+
+ if html_body:
+ message.set_html(body)
+ else:
+ message.set_text(body)
+
+ return sg.send(message)
+# =======================================
+# Main
+#
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ username=dict(required=False),
+ password=dict(required=False, no_log=True),
+ api_key=dict(required=False, no_log=True),
+ bcc=dict(required=False, type='list'),
+ cc=dict(required=False, type='list'),
+ headers=dict(required=False, type='dict'),
+ from_address=dict(required=True),
+ from_name=dict(required=False),
+ to_addresses=dict(required=True, type='list'),
+ subject=dict(required=True),
+ body=dict(required=True),
+ html_body=dict(required=False, default=False, type='bool'),
+ attachments=dict(required=False, type='list')
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['api_key', 'password'],
+ ['api_key', 'username']
+ ],
+ required_together=[['username', 'password']],
+ )
+
+ username = module.params['username']
+ password = module.params['password']
+ api_key = module.params['api_key']
+ bcc = module.params['bcc']
+ cc = module.params['cc']
+ headers = module.params['headers']
+ from_name = module.params['from_name']
+ from_address = module.params['from_address']
+ to_addresses = module.params['to_addresses']
+ subject = module.params['subject']
+ body = module.params['body']
+ html_body = module.params['html_body']
+ attachments = module.params['attachments']
+
+ sendgrid_lib_args = [api_key, bcc, cc, headers, from_name, html_body, attachments]
+
+ if any(lib_arg is not None for lib_arg in sendgrid_lib_args) and not HAS_SENDGRID:
+ reason = 'when using any of the following arguments: ' \
+ 'api_key, bcc, cc, headers, from_name, html_body, attachments'
+ module.fail_json(msg=missing_required_lib('sendgrid', reason=reason),
+ exception=SENDGRID_IMP_ERR)
+
+ response, info = post_sendgrid_api(module, username, password,
+ from_address, to_addresses, subject, body, attachments=attachments,
+ bcc=bcc, cc=cc, headers=headers, html_body=html_body, api_key=api_key)
+
+ if not HAS_SENDGRID:
+ if info['status'] != 200:
+ module.fail_json(msg="unable to send email through SendGrid API: %s" % info['msg'])
+ else:
+ if response != 200:
+ module.fail_json(msg="unable to send email through SendGrid API: %s" % info['message'])
+
+ module.exit_json(msg=subject, changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/slack.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/slack.py
new file mode 100644
index 00000000..946fc9aa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/slack.py
@@ -0,0 +1,487 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Lee Goolsbee <lgoolsbee@atlassian.com>
+# (c) 2020, Michal Middleton <mm.404@icloud.com>
+# (c) 2017, Steve Pletcher <steve@steve-pletcher.com>
+# (c) 2016, René Moser <mail@renemoser.net>
+# (c) 2015, Stefan Berggren <nsg@nsg.cc>
+# (c) 2014, Ramon de la Fuente <ramon@delafuente.nl>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+module: slack
+short_description: Send Slack notifications
+description:
+ - The C(slack) module sends notifications to U(http://slack.com) via the Incoming WebHook integration
+author: "Ramon de la Fuente (@ramondelafuente)"
+options:
+ domain:
+ type: str
+ description:
+ - Slack (sub)domain for your environment without protocol. (i.e.
+ C(example.slack.com)) In 1.8 and beyond, this is deprecated and may
+ be ignored. See token documentation for information.
+ token:
+ type: str
+ description:
+ - Slack integration token. This authenticates you to the slack service.
+ Make sure to use the correct type of token, depending on what method you use.
+ - "Webhook token:
+ Prior to 1.8, a token looked like C(3Ffe373sfhRE6y42Fg3rvf4GlK). In
+ 1.8 and above, ansible adapts to the new slack API where tokens look
+ like C(G922VJP24/D921DW937/3Ffe373sfhRE6y42Fg3rvf4GlK). If tokens
+ are in the new format then slack will ignore any value of domain. If
+ the token is in the old format the domain is required. Ansible has no
+ control of when slack will get rid of the old API. When slack does
+ that the old format will stop working. ** Please keep in mind the tokens
+ are not the API tokens but are the webhook tokens. In slack these are
+ found in the webhook URL which are obtained under the apps and integrations.
+ The incoming webhooks can be added in that area. In some cases this may
+ be locked by your Slack admin and you must request access. It is there
+ that the incoming webhooks can be added. The key is on the end of the
+ URL given to you in that section."
+ - "WebAPI token:
+ Slack WebAPI requires a personal, bot or work application token. These tokens start with C(xoxp-), C(xoxb-)
+ or C(xoxa-), eg. C(xoxb-1234-56789abcdefghijklmnop). WebAPI token is required if you intend to receive thread_id.
+ See Slack's documentation (U(https://api.slack.com/docs/token-types)) for more information."
+ required: true
+ msg:
+ type: str
+ description:
+ - Message to send. Note that the module does not handle escaping characters.
+ Plain-text angle brackets and ampersands should be converted to HTML entities (e.g. & to &amp;) before sending.
+ See Slack's documentation (U(https://api.slack.com/docs/message-formatting)) for more.
+ channel:
+ type: str
+ description:
+ - Channel to send the message to. If absent, the message goes to the channel selected for the I(token).
+ thread_id:
+ description:
+ - Optional. Timestamp of parent message to thread this message. https://api.slack.com/docs/message-threading
+ type: str
+ message_id:
+ description:
+ - Optional. Message ID to edit, instead of posting a new message.
+ Corresponds to C(ts) in the Slack API (U(https://api.slack.com/messaging/modifying)).
+ type: str
+ version_added: 1.2.0
+ username:
+ type: str
+ description:
+ - This is the sender of the message.
+ default: "Ansible"
+ icon_url:
+ type: str
+ description:
+ - Url for the message sender's icon (default C(https://www.ansible.com/favicon.ico))
+ default: https://www.ansible.com/favicon.ico
+ icon_emoji:
+ type: str
+ description:
+ - Emoji for the message sender. See Slack documentation for options.
+ (if I(icon_emoji) is set, I(icon_url) will not be used)
+ link_names:
+ type: int
+ description:
+ - Automatically create links for channels and usernames in I(msg).
+ default: 1
+ choices:
+ - 1
+ - 0
+ parse:
+ type: str
+ description:
+ - Setting for the message parser at Slack
+ choices:
+ - 'full'
+ - 'none'
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+ color:
+ type: str
+ description:
+ - Allow text to use default colors - use the default of 'normal' to not send a custom color bar at the start of the message.
+ - Allowed values for color can be one of 'normal', 'good', 'warning', 'danger', any valid 3 digit or 6 digit hex color value.
+ - Specifying value in hex is supported since Ansible 2.8.
+ default: 'normal'
+ attachments:
+ type: list
+ description:
+ - Define a list of attachments. This list mirrors the Slack JSON API.
+ - For more information, see U(https://api.slack.com/docs/attachments).
+ blocks:
+ description:
+ - Define a list of blocks. This list mirrors the Slack JSON API.
+ - For more information, see U(https://api.slack.com/block-kit).
+ type: list
+ elements: dict
+ version_added: 1.0.0
+"""
+
+EXAMPLES = """
+- name: Send notification message via Slack
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ msg: '{{ inventory_hostname }} completed'
+ delegate_to: localhost
+
+- name: Send notification message via Slack all options
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ msg: '{{ inventory_hostname }} completed'
+ channel: '#ansible'
+ thread_id: '1539917263.000100'
+ username: 'Ansible on {{ inventory_hostname }}'
+ icon_url: http://www.example.com/some-image-file.png
+ link_names: 0
+ parse: 'none'
+ delegate_to: localhost
+
+- name: Insert a color bar in front of the message for visibility purposes and use the default webhook icon and name configured in Slack
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ msg: '{{ inventory_hostname }} is alive!'
+ color: good
+ username: ''
+ icon_url: ''
+
+- name: Insert a color bar in front of the message with valid hex color value
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ msg: 'This message uses color in hex value'
+ color: '#00aacc'
+ username: ''
+ icon_url: ''
+
+- name: Use the attachments API
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ attachments:
+ - text: Display my system load on host A and B
+ color: '#ff00dd'
+ title: System load
+ fields:
+ - title: System A
+ value: "load average: 0,74, 0,66, 0,63"
+ short: True
+ - title: System B
+ value: 'load average: 5,16, 4,64, 2,43'
+ short: True
+
+- name: Use the blocks API
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ blocks:
+ - type: section
+ text:
+ type: mrkdwn
+ text: |-
+ *System load*
+ Display my system load on host A and B
+ - type: context
+ elements:
+ - type: mrkdwn
+ text: |-
+ *System A*
+ load average: 0,74, 0,66, 0,63
+ - type: mrkdwn
+ text: |-
+ *System B*
+ load average: 5,16, 4,64, 2,43
+
+- name: Send a message with a link using Slack markup
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ msg: We sent this message using <https://www.ansible.com|Ansible>!
+
+- name: Send a message with angle brackets and ampersands
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ msg: This message has &lt;brackets&gt; &amp; ampersands in plain text.
+
+- name: Initial Threaded Slack message
+ community.general.slack:
+ channel: '#ansible'
+ token: xoxb-1234-56789abcdefghijklmnop
+ msg: 'Starting a thread with my initial post.'
+ register: slack_response
+- name: Add more info to thread
+ community.general.slack:
+ channel: '#ansible'
+ token: xoxb-1234-56789abcdefghijklmnop
+ thread_id: "{{ slack_response['ts'] }}"
+ color: good
+ msg: 'And this is my threaded response!'
+
+- name: Send a message to be edited later on
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ channel: '#ansible'
+ msg: Deploying something...
+ register: slack_response
+- name: Edit message
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ channel: "{{ slack_response.channel }}"
+ msg: Deployment complete!
+ message_id: "{{ slack_response.ts }}"
+"""
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url
+
+OLD_SLACK_INCOMING_WEBHOOK = 'https://%s/services/hooks/incoming-webhook?token=%s'
+SLACK_INCOMING_WEBHOOK = 'https://hooks.slack.com/services/%s'
+SLACK_POSTMESSAGE_WEBAPI = 'https://slack.com/api/chat.postMessage'
+SLACK_UPDATEMESSAGE_WEBAPI = 'https://slack.com/api/chat.update'
+SLACK_CONVERSATIONS_HISTORY_WEBAPI = 'https://slack.com/api/conversations.history'
+
+# Escaping quotes and apostrophes to avoid ending string prematurely in ansible call.
+# We do not escape other characters used as Slack metacharacters (e.g. &, <, >).
+escape_table = {
+ '"': "\"",
+ "'": "\'",
+}
+
+
+def is_valid_hex_color(color_choice):
+ if re.match(r'^#([A-Fa-f0-9]{6}|[A-Fa-f0-9]{3})$', color_choice):
+ return True
+ return False
+
+
+def escape_quotes(text):
+ '''Backslash any quotes within text.'''
+ return "".join(escape_table.get(c, c) for c in text)
+
+
+def recursive_escape_quotes(obj, keys):
+ '''Recursively escape quotes inside supplied keys inside block kit objects'''
+ if isinstance(obj, dict):
+ escaped = {}
+ for k, v in obj.items():
+ if isinstance(v, str) and k in keys:
+ escaped[k] = escape_quotes(v)
+ else:
+ escaped[k] = recursive_escape_quotes(v, keys)
+ elif isinstance(obj, list):
+ escaped = [recursive_escape_quotes(v, keys) for v in obj]
+ else:
+ escaped = obj
+ return escaped
+
+
+def build_payload_for_slack(module, text, channel, thread_id, username, icon_url, icon_emoji, link_names,
+ parse, color, attachments, blocks, message_id):
+ payload = {}
+ if color == "normal" and text is not None:
+ payload = dict(text=escape_quotes(text))
+ elif text is not None:
+ # With a custom color we have to set the message as attachment, and explicitly turn markdown parsing on for it.
+ payload = dict(attachments=[dict(text=escape_quotes(text), color=color, mrkdwn_in=["text"])])
+ if channel is not None:
+ if channel.startswith(('#', '@', 'C0')):
+ payload['channel'] = channel
+ else:
+ payload['channel'] = '#' + channel
+ if thread_id is not None:
+ payload['thread_ts'] = thread_id
+ if username is not None:
+ payload['username'] = username
+ if icon_emoji is not None:
+ payload['icon_emoji'] = icon_emoji
+ else:
+ payload['icon_url'] = icon_url
+ if link_names is not None:
+ payload['link_names'] = link_names
+ if parse is not None:
+ payload['parse'] = parse
+ if message_id is not None:
+ payload['ts'] = message_id
+
+ if attachments is not None:
+ if 'attachments' not in payload:
+ payload['attachments'] = []
+
+ if attachments is not None:
+ attachment_keys_to_escape = [
+ 'title',
+ 'text',
+ 'author_name',
+ 'pretext',
+ 'fallback',
+ ]
+ for attachment in attachments:
+ for key in attachment_keys_to_escape:
+ if key in attachment:
+ attachment[key] = escape_quotes(attachment[key])
+
+ if 'fallback' not in attachment:
+ attachment['fallback'] = attachment['text']
+
+ payload['attachments'].append(attachment)
+
+ if blocks is not None:
+ block_keys_to_escape = [
+ 'text',
+ 'alt_text'
+ ]
+ payload['blocks'] = recursive_escape_quotes(blocks, block_keys_to_escape)
+
+ return payload
+
+
+def get_slack_message(module, domain, token, channel, ts):
+ headers = {
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/json',
+ 'Authorization': 'Bearer ' + token
+ }
+ qs = urlencode({
+ 'channel': channel,
+ 'ts': ts,
+ 'limit': 1,
+ 'inclusive': 'true',
+ })
+ url = SLACK_CONVERSATIONS_HISTORY_WEBAPI + '?' + qs
+ response, info = fetch_url(module=module, url=url, headers=headers, method='GET')
+ if info['status'] != 200:
+ module.fail_json(msg="failed to get slack message")
+ data = module.from_json(response.read())
+ if len(data['messages']) < 1:
+ module.fail_json(msg="no messages matching ts: %s" % ts)
+ if len(data['messages']) > 1:
+ module.fail_json(msg="more than 1 message matching ts: %s" % ts)
+ return data['messages'][0]
+
+
+def do_notify_slack(module, domain, token, payload):
+ use_webapi = False
+ if token.count('/') >= 2:
+ # New style webhook token
+ slack_uri = SLACK_INCOMING_WEBHOOK % (token)
+ elif re.match(r'^xox[abp]-\S+$', token):
+ slack_uri = SLACK_UPDATEMESSAGE_WEBAPI if 'ts' in payload else SLACK_POSTMESSAGE_WEBAPI
+ use_webapi = True
+ else:
+ if not domain:
+ module.fail_json(msg="Slack has updated its webhook API. You need to specify a token of the form "
+ "XXXX/YYYY/ZZZZ in your playbook")
+ slack_uri = OLD_SLACK_INCOMING_WEBHOOK % (domain, token)
+
+ headers = {
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/json',
+ }
+ if use_webapi:
+ headers['Authorization'] = 'Bearer ' + token
+
+ data = module.jsonify(payload)
+ response, info = fetch_url(module=module, url=slack_uri, headers=headers, method='POST', data=data)
+
+ if info['status'] != 200:
+ if use_webapi:
+ obscured_incoming_webhook = slack_uri
+ else:
+ obscured_incoming_webhook = SLACK_INCOMING_WEBHOOK % ('[obscured]')
+ module.fail_json(msg=" failed to send %s to %s: %s" % (data, obscured_incoming_webhook, info['msg']))
+
+ # each API requires different handling
+ if use_webapi:
+ return module.from_json(response.read())
+ else:
+ return {'webhook': 'ok'}
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ domain=dict(type='str', required=False, default=None),
+ token=dict(type='str', required=True, no_log=True),
+ msg=dict(type='str', required=False, default=None),
+ channel=dict(type='str', default=None),
+ thread_id=dict(type='str', default=None),
+ username=dict(type='str', default='Ansible'),
+ icon_url=dict(type='str', default='https://www.ansible.com/favicon.ico'),
+ icon_emoji=dict(type='str', default=None),
+ link_names=dict(type='int', default=1, choices=[0, 1]),
+ parse=dict(type='str', default=None, choices=['none', 'full']),
+ validate_certs=dict(default=True, type='bool'),
+ color=dict(type='str', default='normal'),
+ attachments=dict(type='list', required=False, default=None),
+ blocks=dict(type='list', elements='dict'),
+ message_id=dict(type='str', default=None),
+ ),
+ supports_check_mode=True,
+ )
+
+ domain = module.params['domain']
+ token = module.params['token']
+ text = module.params['msg']
+ channel = module.params['channel']
+ thread_id = module.params['thread_id']
+ username = module.params['username']
+ icon_url = module.params['icon_url']
+ icon_emoji = module.params['icon_emoji']
+ link_names = module.params['link_names']
+ parse = module.params['parse']
+ color = module.params['color']
+ attachments = module.params['attachments']
+ blocks = module.params['blocks']
+ message_id = module.params['message_id']
+
+ color_choices = ['normal', 'good', 'warning', 'danger']
+ if color not in color_choices and not is_valid_hex_color(color):
+ module.fail_json(msg="Color value specified should be either one of %r "
+ "or any valid hex value with length 3 or 6." % color_choices)
+
+ changed = True
+
+ # if updating an existing message, we can check if there's anything to update
+ if message_id is not None:
+ changed = False
+ msg = get_slack_message(module, domain, token, channel, message_id)
+ for key in ('icon_url', 'icon_emoji', 'link_names', 'color', 'attachments', 'blocks'):
+ if msg.get(key) != module.params.get(key):
+ changed = True
+ break
+ # if check mode is active, we shouldn't do anything regardless.
+ # if changed=False, we don't need to do anything, so don't do it.
+ if module.check_mode or not changed:
+ module.exit_json(changed=changed, ts=msg['ts'], channel=msg['channel'])
+ elif module.check_mode:
+ module.exit_json(changed=changed)
+
+ payload = build_payload_for_slack(module, text, channel, thread_id, username, icon_url, icon_emoji, link_names,
+ parse, color, attachments, blocks, message_id)
+ slack_response = do_notify_slack(module, domain, token, payload)
+
+ if 'ok' in slack_response:
+ # Evaluate WebAPI response
+ if slack_response['ok']:
+ # return payload as a string for backwards compatibility
+ payload_json = module.jsonify(payload)
+ module.exit_json(changed=changed, ts=slack_response['ts'], channel=slack_response['channel'],
+ api=slack_response, payload=payload_json)
+ else:
+ module.fail_json(msg="Slack API error", error=slack_response['error'])
+ else:
+ # Exit with plain OK from WebHook, since we don't have more information
+ # If we get 200 from webhook, the only answer is OK
+ module.exit_json(msg="OK")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/syslogger.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/syslogger.py
new file mode 100644
index 00000000..7f4f899f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/syslogger.py
@@ -0,0 +1,189 @@
+#!/usr/bin/python
+# Copyright: (c) 2017, Tim Rightnour <thegarbledone@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: syslogger
+short_description: Log messages in the syslog
+description:
+ - Uses syslog to add log entries to the host.
+options:
+ msg:
+ type: str
+ description:
+ - This is the message to place in syslog.
+ required: True
+ priority:
+ type: str
+ description:
+ - Set the log priority.
+ choices: [ "emerg", "alert", "crit", "err", "warning", "notice", "info", "debug" ]
+ default: "info"
+ facility:
+ type: str
+ description:
+ - Set the log facility.
+ choices: [ "kern", "user", "mail", "daemon", "auth", "lpr", "news",
+ "uucp", "cron", "syslog", "local0", "local1", "local2",
+ "local3", "local4", "local5", "local6", "local7" ]
+ default: "daemon"
+ log_pid:
+ description:
+ - Log the PID in brackets.
+ type: bool
+ default: False
+ ident:
+ description:
+ - Specify the name of application name which is sending the log to syslog.
+ type: str
+ default: 'ansible_syslogger'
+ version_added: '0.2.0'
+author:
+ - Tim Rightnour (@garbled1)
+'''
+
+EXAMPLES = r'''
+- name: Simple Usage
+ community.general.syslogger:
+ msg: "I will end up as daemon.info"
+
+- name: Send a log message with err priority and user facility with log_pid
+ community.general.syslogger:
+ msg: "Hello from Ansible"
+ priority: "err"
+ facility: "user"
+ log_pid: true
+
+- name: Specify the name of application which is sending log message
+ community.general.syslogger:
+ ident: "MyApp"
+ msg: "I want to believe"
+ priority: "alert"
+'''
+
+RETURN = r'''
+ident:
+ description: Name of application sending the message to log
+ returned: always
+ type: str
+ sample: "ansible_syslogger"
+ version_added: '0.2.0'
+priority:
+ description: Priority level
+ returned: always
+ type: str
+ sample: "daemon"
+facility:
+ description: Syslog facility
+ returned: always
+ type: str
+ sample: "info"
+log_pid:
+ description: Log PID status
+ returned: always
+ type: bool
+ sample: True
+msg:
+ description: Message sent to syslog
+ returned: always
+ type: str
+ sample: "Hello from Ansible"
+'''
+
+import syslog
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def get_facility(facility):
+ return {
+ 'kern': syslog.LOG_KERN,
+ 'user': syslog.LOG_USER,
+ 'mail': syslog.LOG_MAIL,
+ 'daemon': syslog.LOG_DAEMON,
+ 'auth': syslog.LOG_AUTH,
+ 'lpr': syslog.LOG_LPR,
+ 'news': syslog.LOG_NEWS,
+ 'uucp': syslog.LOG_UUCP,
+ 'cron': syslog.LOG_CRON,
+ 'syslog': syslog.LOG_SYSLOG,
+ 'local0': syslog.LOG_LOCAL0,
+ 'local1': syslog.LOG_LOCAL1,
+ 'local2': syslog.LOG_LOCAL2,
+ 'local3': syslog.LOG_LOCAL3,
+ 'local4': syslog.LOG_LOCAL4,
+ 'local5': syslog.LOG_LOCAL5,
+ 'local6': syslog.LOG_LOCAL6,
+ 'local7': syslog.LOG_LOCAL7
+ }.get(facility, syslog.LOG_DAEMON)
+
+
+def get_priority(priority):
+ return {
+ 'emerg': syslog.LOG_EMERG,
+ 'alert': syslog.LOG_ALERT,
+ 'crit': syslog.LOG_CRIT,
+ 'err': syslog.LOG_ERR,
+ 'warning': syslog.LOG_WARNING,
+ 'notice': syslog.LOG_NOTICE,
+ 'info': syslog.LOG_INFO,
+ 'debug': syslog.LOG_DEBUG
+ }.get(priority, syslog.LOG_INFO)
+
+
+def main():
+ # define the available arguments/parameters that a user can pass to
+ # the module
+ module_args = dict(
+ ident=dict(type='str', default='ansible_syslogger'),
+ msg=dict(type='str', required=True),
+ priority=dict(type='str', required=False,
+ choices=["emerg", "alert", "crit", "err", "warning",
+ "notice", "info", "debug"],
+ default='info'),
+ facility=dict(type='str', required=False,
+ choices=["kern", "user", "mail", "daemon", "auth",
+ "lpr", "news", "uucp", "cron", "syslog",
+ "local0", "local1", "local2", "local3",
+ "local4", "local5", "local6", "local7"],
+ default='daemon'),
+ log_pid=dict(type='bool', required=False, default=False)
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ )
+
+ result = dict(
+ changed=False,
+ ident=module.params['ident'],
+ priority=module.params['priority'],
+ facility=module.params['facility'],
+ log_pid=module.params['log_pid'],
+ msg=module.params['msg']
+ )
+
+ # do the logging
+ try:
+ syslog.openlog(module.params['ident'],
+ syslog.LOG_PID if module.params['log_pid'] else 0,
+ get_facility(module.params['facility']))
+ syslog.syslog(get_priority(module.params['priority']),
+ module.params['msg'])
+ syslog.closelog()
+ result['changed'] = True
+
+ except Exception as exc:
+ module.fail_json(error='Failed to write to syslog %s' % to_native(exc), exception=traceback.format_exc(), **result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/telegram.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/telegram.py
new file mode 100644
index 00000000..c1ef841c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/telegram.py
@@ -0,0 +1,114 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Artem Feofanov <artem.feofanov@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: telegram
+author: "Artem Feofanov (@tyouxa)"
+
+short_description: module for sending notifications via telegram
+
+description:
+ - Send notifications via telegram bot, to a verified group or user
+notes:
+ - You will require a telegram account and create telegram bot to use this module.
+options:
+ msg:
+ type: str
+ description:
+ - What message you wish to send.
+ required: true
+ msg_format:
+ type: str
+ description:
+ - Message format. Formatting options `markdown` and `html` described in
+ Telegram API docs (https://core.telegram.org/bots/api#formatting-options).
+ If option `plain` set, message will not be formatted.
+ default: plain
+ choices: [ "plain", "markdown", "html" ]
+ token:
+ type: str
+ description:
+ - Token identifying your telegram bot.
+ required: true
+ chat_id:
+ type: str
+ description:
+ - Telegram group or user chat_id
+ required: true
+
+'''
+
+EXAMPLES = """
+
+- name: Send a message to chat in playbook
+ community.general.telegram:
+ token: '9999999:XXXXXXXXXXXXXXXXXXXXXXX'
+ chat_id: 000000
+ msg: Ansible task finished
+"""
+
+RETURN = """
+
+msg:
+ description: The message you attempted to send
+ returned: success
+ type: str
+ sample: "Ansible task finished"
+telegram_error:
+ description: Error message gotten from Telegram API
+ returned: failure
+ type: str
+ sample: "Bad Request: message text is empty"
+"""
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import quote
+from ansible.module_utils.urls import fetch_url
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(type='str', required=True, no_log=True),
+ chat_id=dict(type='str', required=True, no_log=True),
+ msg_format=dict(type='str', required=False, default='plain',
+ choices=['plain', 'markdown', 'html']),
+ msg=dict(type='str', required=True)),
+ supports_check_mode=True
+ )
+
+ token = quote(module.params.get('token'))
+ chat_id = quote(module.params.get('chat_id'))
+ msg_format = quote(module.params.get('msg_format'))
+ msg = quote(module.params.get('msg'))
+
+ url = 'https://api.telegram.org/bot' + token + \
+ '/sendMessage?text=' + msg + '&chat_id=' + chat_id
+ if msg_format in ('markdown', 'html'):
+ url += '&parse_mode=' + msg_format
+
+ if module.check_mode:
+ module.exit_json(changed=False)
+
+ response, info = fetch_url(module, url)
+ if info['status'] == 200:
+ module.exit_json(changed=True)
+ else:
+ body = json.loads(info['body'])
+ module.fail_json(msg="failed to send message, return status=%s" % str(info['status']),
+ telegram_error=body['description'])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/twilio.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/twilio.py
new file mode 100644
index 00000000..5ec995f4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/twilio.py
@@ -0,0 +1,173 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Matt Makai <matthew.makai@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: twilio
+short_description: Sends a text message to a mobile phone through Twilio.
+description:
+ - Sends a text message to a phone number through the Twilio messaging API.
+notes:
+ - This module is non-idempotent because it sends an email through the
+ external API. It is idempotent only in the case that the module fails.
+ - Like the other notification modules, this one requires an external
+ dependency to work. In this case, you'll need a Twilio account with
+ a purchased or verified phone number to send the text message.
+options:
+ account_sid:
+ type: str
+ description:
+ user's Twilio account token found on the account page
+ required: true
+ auth_token:
+ type: str
+ description: user's Twilio authentication token
+ required: true
+ msg:
+ type: str
+ description:
+ the body of the text message
+ required: true
+ to_numbers:
+ type: list
+ description:
+ one or more phone numbers to send the text message to,
+ format +15551112222
+ required: true
+ aliases: [ to_number ]
+ from_number:
+ type: str
+ description:
+ the Twilio number to send the text message from, format +15551112222
+ required: true
+ media_url:
+ type: str
+ description:
+ a URL with a picture, video or sound clip to send with an MMS
+ (multimedia message) instead of a plain SMS
+ required: false
+
+author: "Matt Makai (@makaimc)"
+'''
+
+EXAMPLES = '''
+# send an SMS about the build status to (555) 303 5681
+# note: replace account_sid and auth_token values with your credentials
+# and you have to have the 'from_number' on your Twilio account
+- name: Send a text message to a mobile phone through Twilio
+ community.general.twilio:
+ msg: All servers with webserver role are now configured.
+ account_sid: ACXXXXXXXXXXXXXXXXX
+ auth_token: ACXXXXXXXXXXXXXXXXX
+ from_number: +15552014545
+ to_number: +15553035681
+ delegate_to: localhost
+
+# send an SMS to multiple phone numbers about the deployment
+# note: replace account_sid and auth_token values with your credentials
+# and you have to have the 'from_number' on your Twilio account
+- name: Send a text message to a mobile phone through Twilio
+ community.general.twilio:
+ msg: This server configuration is now complete.
+ account_sid: ACXXXXXXXXXXXXXXXXX
+ auth_token: ACXXXXXXXXXXXXXXXXX
+ from_number: +15553258899
+ to_numbers:
+ - +15551113232
+ - +12025551235
+ - +19735559010
+ delegate_to: localhost
+
+# send an MMS to a single recipient with an update on the deployment
+# and an image of the results
+# note: replace account_sid and auth_token values with your credentials
+# and you have to have the 'from_number' on your Twilio account
+- name: Send a text message to a mobile phone through Twilio
+ community.general.twilio:
+ msg: Deployment complete!
+ account_sid: ACXXXXXXXXXXXXXXXXX
+ auth_token: ACXXXXXXXXXXXXXXXXX
+ from_number: +15552014545
+ to_number: +15553035681
+ media_url: https://demo.twilio.com/logo.png
+ delegate_to: localhost
+'''
+
+# =======================================
+# twilio module support methods
+#
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url
+
+
+def post_twilio_api(module, account_sid, auth_token, msg, from_number,
+ to_number, media_url=None):
+ URI = "https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json" \
+ % (account_sid,)
+ AGENT = "Ansible"
+
+ data = {'From': from_number, 'To': to_number, 'Body': msg}
+ if media_url:
+ data['MediaUrl'] = media_url
+ encoded_data = urlencode(data)
+
+ headers = {'User-Agent': AGENT,
+ 'Content-type': 'application/x-www-form-urlencoded',
+ 'Accept': 'application/json',
+ }
+
+ # Hack module params to have the Basic auth params that fetch_url expects
+ module.params['url_username'] = account_sid.replace('\n', '')
+ module.params['url_password'] = auth_token.replace('\n', '')
+
+ return fetch_url(module, URI, data=encoded_data, headers=headers)
+
+
+# =======================================
+# Main
+#
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ account_sid=dict(required=True),
+ auth_token=dict(required=True, no_log=True),
+ msg=dict(required=True),
+ from_number=dict(required=True),
+ to_numbers=dict(required=True, aliases=['to_number'], type='list'),
+ media_url=dict(default=None, required=False),
+ ),
+ supports_check_mode=True
+ )
+
+ account_sid = module.params['account_sid']
+ auth_token = module.params['auth_token']
+ msg = module.params['msg']
+ from_number = module.params['from_number']
+ to_numbers = module.params['to_numbers']
+ media_url = module.params['media_url']
+
+ for number in to_numbers:
+ r, info = post_twilio_api(module, account_sid, auth_token, msg,
+ from_number, number, media_url)
+ if info['status'] not in [200, 201]:
+ body_message = "unknown error"
+ if 'body' in info:
+ body = module.from_json(info['body'])
+ body_message = body['message']
+ module.fail_json(msg="unable to send message to %s: %s" % (number, body_message))
+
+ module.exit_json(msg=msg, changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/typetalk.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/typetalk.py
new file mode 100644
index 00000000..6f8e4e8b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/notification/typetalk.py
@@ -0,0 +1,128 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: typetalk
+short_description: Send a message to typetalk
+description:
+ - Send a message to typetalk using typetalk API
+options:
+ client_id:
+ type: str
+ description:
+ - OAuth2 client ID
+ required: true
+ client_secret:
+ type: str
+ description:
+ - OAuth2 client secret
+ required: true
+ topic:
+ type: int
+ description:
+ - topic id to post message
+ required: true
+ msg:
+ type: str
+ description:
+ - message body
+ required: true
+requirements: [ json ]
+author: "Takashi Someda (@tksmd)"
+'''
+
+EXAMPLES = '''
+- name: Send a message to typetalk
+ community.general.typetalk:
+ client_id: 12345
+ client_secret: 12345
+ topic: 1
+ msg: install completed
+'''
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url, ConnectionError
+
+
+def do_request(module, url, params, headers=None):
+ data = urlencode(params)
+ if headers is None:
+ headers = dict()
+ headers = dict(headers, **{
+ 'User-Agent': 'Ansible/typetalk module',
+ })
+ r, info = fetch_url(module, url, data=data, headers=headers)
+ if info['status'] != 200:
+ exc = ConnectionError(info['msg'])
+ exc.code = info['status']
+ raise exc
+ return r
+
+
+def get_access_token(module, client_id, client_secret):
+ params = {
+ 'client_id': client_id,
+ 'client_secret': client_secret,
+ 'grant_type': 'client_credentials',
+ 'scope': 'topic.post'
+ }
+ res = do_request(module, 'https://typetalk.com/oauth2/access_token', params)
+ return json.load(res)['access_token']
+
+
+def send_message(module, client_id, client_secret, topic, msg):
+ """
+ send message to typetalk
+ """
+ try:
+ access_token = get_access_token(module, client_id, client_secret)
+ url = 'https://typetalk.com/api/v1/topics/%d' % topic
+ headers = {
+ 'Authorization': 'Bearer %s' % access_token,
+ }
+ do_request(module, url, {'message': msg}, headers)
+ return True, {'access_token': access_token}
+ except ConnectionError as e:
+ return False, e
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ client_id=dict(required=True),
+ client_secret=dict(required=True, no_log=True),
+ topic=dict(required=True, type='int'),
+ msg=dict(required=True),
+ ),
+ supports_check_mode=False
+ )
+
+ if not json:
+ module.fail_json(msg="json module is required")
+
+ client_id = module.params["client_id"]
+ client_secret = module.params["client_secret"]
+ topic = module.params["topic"]
+ msg = module.params["msg"]
+
+ res, error = send_message(module, client_id, client_secret, topic, msg)
+ if not res:
+ module.fail_json(msg='fail to send message with response code %s' % error.code)
+
+ module.exit_json(changed=True, topic=topic, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/npm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/npm.py
new file mode 100644
index 00000000..3ef81eaa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/npm.py
@@ -0,0 +1,308 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017 Chris Hoffman <christopher.hoffman@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: npm
+short_description: Manage node.js packages with npm
+description:
+ - Manage node.js packages with Node Package Manager (npm).
+author: "Chris Hoffman (@chrishoffman)"
+options:
+ name:
+ description:
+ - The name of a node.js library to install.
+ type: str
+ required: false
+ path:
+ description:
+ - The base path where to install the node.js libraries.
+ type: path
+ required: false
+ version:
+ description:
+ - The version to be installed.
+ type: str
+ required: false
+ global:
+ description:
+ - Install the node.js library globally.
+ required: false
+ default: no
+ type: bool
+ executable:
+ description:
+ - The executable location for npm.
+ - This is useful if you are using a version manager, such as nvm.
+ type: path
+ required: false
+ ignore_scripts:
+ description:
+ - Use the C(--ignore-scripts) flag when installing.
+ required: false
+ type: bool
+ default: no
+ unsafe_perm:
+ description:
+ - Use the C(--unsafe-perm) flag when installing.
+ type: bool
+ default: no
+ ci:
+ description:
+ - Install packages based on package-lock file, same as running C(npm ci).
+ type: bool
+ default: no
+ production:
+ description:
+ - Install dependencies in production mode, excluding devDependencies.
+ required: false
+ type: bool
+ default: no
+ registry:
+ description:
+ - The registry to install modules from.
+ required: false
+ type: str
+ state:
+ description:
+ - The state of the node.js library.
+ required: false
+ type: str
+ default: present
+ choices: [ "present", "absent", "latest" ]
+requirements:
+ - npm installed in bin path (recommended /usr/local/bin)
+'''
+
+EXAMPLES = r'''
+- name: Install "coffee-script" node.js package.
+ community.general.npm:
+ name: coffee-script
+ path: /app/location
+
+- name: Install "coffee-script" node.js package on version 1.6.1.
+ community.general.npm:
+ name: coffee-script
+ version: '1.6.1'
+ path: /app/location
+
+- name: Install "coffee-script" node.js package globally.
+ community.general.npm:
+ name: coffee-script
+ global: yes
+
+- name: Remove the globally package "coffee-script".
+ community.general.npm:
+ name: coffee-script
+ global: yes
+ state: absent
+
+- name: Install "coffee-script" node.js package from custom registry.
+ community.general.npm:
+ name: coffee-script
+ registry: 'http://registry.mysite.com'
+
+- name: Install packages based on package.json.
+ community.general.npm:
+ path: /app/location
+
+- name: Update packages based on package.json to their latest version.
+ community.general.npm:
+ path: /app/location
+ state: latest
+
+- name: Install packages based on package.json using the npm installed with nvm v0.10.1.
+ community.general.npm:
+ path: /app/location
+ executable: /opt/nvm/v0.10.1/bin/npm
+ state: present
+'''
+
+import json
+import os
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+class Npm(object):
+ def __init__(self, module, **kwargs):
+ self.module = module
+ self.glbl = kwargs['glbl']
+ self.name = kwargs['name']
+ self.version = kwargs['version']
+ self.path = kwargs['path']
+ self.registry = kwargs['registry']
+ self.production = kwargs['production']
+ self.ignore_scripts = kwargs['ignore_scripts']
+ self.unsafe_perm = kwargs['unsafe_perm']
+ self.state = kwargs['state']
+
+ if kwargs['executable']:
+ self.executable = kwargs['executable'].split(' ')
+ else:
+ self.executable = [module.get_bin_path('npm', True)]
+
+ if kwargs['version'] and self.state != 'absent':
+ self.name_version = self.name + '@' + str(self.version)
+ else:
+ self.name_version = self.name
+
+ def _exec(self, args, run_in_check_mode=False, check_rc=True, add_package_name=True):
+ if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
+ cmd = self.executable + args
+
+ if self.glbl:
+ cmd.append('--global')
+ if self.production and ('install' in cmd or 'update' in cmd):
+ cmd.append('--production')
+ if self.ignore_scripts:
+ cmd.append('--ignore-scripts')
+ if self.unsafe_perm:
+ cmd.append('--unsafe-perm')
+ if self.name and add_package_name:
+ cmd.append(self.name_version)
+ if self.registry:
+ cmd.append('--registry')
+ cmd.append(self.registry)
+
+ # If path is specified, cd into that path and run the command.
+ cwd = None
+ if self.path:
+ if not os.path.exists(self.path):
+ os.makedirs(self.path)
+ if not os.path.isdir(self.path):
+ self.module.fail_json(msg="path %s is not a directory" % self.path)
+ cwd = self.path
+
+ rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd)
+ return out
+ return ''
+
+ def list(self):
+ cmd = ['list', '--json', '--long']
+
+ installed = list()
+ missing = list()
+ data = {}
+ try:
+ data = json.loads(self._exec(cmd, True, False, False) or '{}')
+ except (getattr(json, 'JSONDecodeError', ValueError)) as e:
+ self.module.fail_json(msg="Failed to parse NPM output with error %s" % to_native(e))
+ if 'dependencies' in data:
+ for dep in data['dependencies']:
+ if 'missing' in data['dependencies'][dep] and data['dependencies'][dep]['missing']:
+ missing.append(dep)
+ elif 'invalid' in data['dependencies'][dep] and data['dependencies'][dep]['invalid']:
+ missing.append(dep)
+ else:
+ installed.append(dep)
+ if self.name and self.name not in installed:
+ missing.append(self.name)
+ # Named dependency not installed
+ else:
+ missing.append(self.name)
+
+ return installed, missing
+
+ def install(self):
+ return self._exec(['install'])
+
+ def ci_install(self):
+ return self._exec(['ci'])
+
+ def update(self):
+ return self._exec(['update'])
+
+ def uninstall(self):
+ return self._exec(['uninstall'])
+
+ def list_outdated(self):
+ outdated = list()
+ data = self._exec(['outdated'], True, False)
+ for dep in data.splitlines():
+ if dep:
+ # node.js v0.10.22 changed the `npm outdated` module separator
+ # from "@" to " ". Split on both for backwards compatibility.
+ pkg, other = re.split(r'\s|@', dep, 1)
+ outdated.append(pkg)
+
+ return outdated
+
+
+def main():
+ arg_spec = dict(
+ name=dict(default=None, type='str'),
+ path=dict(default=None, type='path'),
+ version=dict(default=None, type='str'),
+ production=dict(default=False, type='bool'),
+ executable=dict(default=None, type='path'),
+ registry=dict(default=None, type='str'),
+ state=dict(default='present', choices=['present', 'absent', 'latest']),
+ ignore_scripts=dict(default=False, type='bool'),
+ unsafe_perm=dict(default=False, type='bool'),
+ ci=dict(default=False, type='bool'),
+ )
+ arg_spec['global'] = dict(default=False, type='bool')
+ module = AnsibleModule(
+ argument_spec=arg_spec,
+ supports_check_mode=True
+ )
+
+ name = module.params['name']
+ path = module.params['path']
+ version = module.params['version']
+ glbl = module.params['global']
+ production = module.params['production']
+ executable = module.params['executable']
+ registry = module.params['registry']
+ state = module.params['state']
+ ignore_scripts = module.params['ignore_scripts']
+ unsafe_perm = module.params['unsafe_perm']
+ ci = module.params['ci']
+
+ if not path and not glbl:
+ module.fail_json(msg='path must be specified when not using global')
+ if state == 'absent' and not name:
+ module.fail_json(msg='uninstalling a package is only available for named packages')
+
+ npm = Npm(module, name=name, path=path, version=version, glbl=glbl, production=production,
+ executable=executable, registry=registry, ignore_scripts=ignore_scripts,
+ unsafe_perm=unsafe_perm, state=state)
+
+ changed = False
+ if ci:
+ npm.ci_install()
+ changed = True
+ elif state == 'present':
+ installed, missing = npm.list()
+ if missing:
+ changed = True
+ npm.install()
+ elif state == 'latest':
+ installed, missing = npm.list()
+ outdated = npm.list_outdated()
+ if missing:
+ changed = True
+ npm.install()
+ if outdated:
+ changed = True
+ npm.update()
+ else: # absent
+ installed, missing = npm.list()
+ if name in installed:
+ changed = True
+ npm.uninstall()
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/nsupdate.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nsupdate.py
new file mode 100644
index 00000000..9d4a5186
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/nsupdate.py
@@ -0,0 +1,469 @@
+#!/usr/bin/python
+
+# (c) 2016, Marcin Skarbek <github@skarbek.name>
+# (c) 2016, Andreas Olsson <andreas@arrakis.se>
+# (c) 2017, Loic Blot <loic.blot@unix-experience.fr>
+#
+# This module was ported from https://github.com/mskarbek/ansible-nsupdate
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: nsupdate
+
+short_description: Manage DNS records.
+description:
+ - Create, update and remove DNS records using DDNS updates
+requirements:
+ - dnspython
+author: "Loic Blot (@nerzhul)"
+options:
+ state:
+ description:
+ - Manage DNS record.
+ choices: ['present', 'absent']
+ default: 'present'
+ server:
+ description:
+ - Apply DNS modification on this server, specified by IPv4 or IPv6 address.
+ required: true
+ port:
+ description:
+ - Use this TCP port when connecting to C(server).
+ default: 53
+ key_name:
+ description:
+ - Use TSIG key name to authenticate against DNS C(server)
+ key_secret:
+ description:
+ - Use TSIG key secret, associated with C(key_name), to authenticate against C(server)
+ key_algorithm:
+ description:
+ - Specify key algorithm used by C(key_secret).
+ choices: ['HMAC-MD5.SIG-ALG.REG.INT', 'hmac-md5', 'hmac-sha1', 'hmac-sha224', 'hmac-sha256', 'hmac-sha384',
+ 'hmac-sha512']
+ default: 'hmac-md5'
+ zone:
+ description:
+ - DNS record will be modified on this C(zone).
+ - When omitted DNS will be queried to attempt finding the correct zone.
+ - Starting with Ansible 2.7 this parameter is optional.
+ record:
+ description:
+ - Sets the DNS record to modify. When zone is omitted this has to be absolute (ending with a dot).
+ required: true
+ type:
+ description:
+ - Sets the record type.
+ default: 'A'
+ ttl:
+ description:
+ - Sets the record TTL.
+ default: 3600
+ value:
+ description:
+ - Sets the record value.
+ protocol:
+ description:
+ - Sets the transport protocol (TCP or UDP). TCP is the recommended and a more robust option.
+ default: 'tcp'
+ choices: ['tcp', 'udp']
+'''
+
+EXAMPLES = '''
+- name: Add or modify ansible.example.org A to 192.168.1.1"
+ community.general.nsupdate:
+ key_name: "nsupdate"
+ key_secret: "+bFQtBCta7j2vWkjPkAFtgA=="
+ server: "10.1.1.1"
+ zone: "example.org"
+ record: "ansible"
+ value: "192.168.1.1"
+
+- name: Add or modify ansible.example.org A to 192.168.1.1, 192.168.1.2 and 192.168.1.3"
+ community.general.nsupdate:
+ key_name: "nsupdate"
+ key_secret: "+bFQtBCta7j2vWkjPkAFtgA=="
+ server: "10.1.1.1"
+ zone: "example.org"
+ record: "ansible"
+ value: ["192.168.1.1", "192.168.1.2", "192.168.1.3"]
+
+- name: Remove puppet.example.org CNAME
+ community.general.nsupdate:
+ key_name: "nsupdate"
+ key_secret: "+bFQtBCta7j2vWkjPkAFtgA=="
+ server: "10.1.1.1"
+ zone: "example.org"
+ record: "puppet"
+ type: "CNAME"
+ state: absent
+
+- name: Add 1.1.168.192.in-addr.arpa. PTR for ansible.example.org
+ community.general.nsupdate:
+ key_name: "nsupdate"
+ key_secret: "+bFQtBCta7j2vWkjPkAFtgA=="
+ server: "10.1.1.1"
+ record: "1.1.168.192.in-addr.arpa."
+ type: "PTR"
+ value: "ansible.example.org."
+ state: present
+
+- name: Remove 1.1.168.192.in-addr.arpa. PTR
+ community.general.nsupdate:
+ key_name: "nsupdate"
+ key_secret: "+bFQtBCta7j2vWkjPkAFtgA=="
+ server: "10.1.1.1"
+ record: "1.1.168.192.in-addr.arpa."
+ type: "PTR"
+ state: absent
+'''
+
+RETURN = '''
+changed:
+ description: If module has modified record
+ returned: success
+ type: str
+record:
+ description: DNS record
+ returned: success
+ type: str
+ sample: 'ansible'
+ttl:
+ description: DNS record TTL
+ returned: success
+ type: int
+ sample: 86400
+type:
+ description: DNS record type
+ returned: success
+ type: str
+ sample: 'CNAME'
+value:
+ description: DNS record value(s)
+ returned: success
+ type: list
+ sample: '192.168.1.1'
+zone:
+ description: DNS record zone
+ returned: success
+ type: str
+ sample: 'example.org.'
+dns_rc:
+ description: dnspython return code
+ returned: always
+ type: int
+ sample: 4
+dns_rc_str:
+ description: dnspython return code (string representation)
+ returned: always
+ type: str
+ sample: 'REFUSED'
+'''
+
+import traceback
+
+from binascii import Error as binascii_error
+from socket import error as socket_error
+
+DNSPYTHON_IMP_ERR = None
+try:
+ import dns.update
+ import dns.query
+ import dns.tsigkeyring
+ import dns.message
+ import dns.resolver
+
+ HAVE_DNSPYTHON = True
+except ImportError:
+ DNSPYTHON_IMP_ERR = traceback.format_exc()
+ HAVE_DNSPYTHON = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+class RecordManager(object):
+ def __init__(self, module):
+ self.module = module
+
+ if module.params['key_name']:
+ try:
+ self.keyring = dns.tsigkeyring.from_text({
+ module.params['key_name']: module.params['key_secret']
+ })
+ except TypeError:
+ module.fail_json(msg='Missing key_secret')
+ except binascii_error as e:
+ module.fail_json(msg='TSIG key error: %s' % to_native(e))
+ else:
+ self.keyring = None
+
+ if module.params['key_algorithm'] == 'hmac-md5':
+ self.algorithm = 'HMAC-MD5.SIG-ALG.REG.INT'
+ else:
+ self.algorithm = module.params['key_algorithm']
+
+ if module.params['zone'] is None:
+ if module.params['record'][-1] != '.':
+ self.module.fail_json(msg='record must be absolute when omitting zone parameter')
+ self.zone = self.lookup_zone()
+ else:
+ self.zone = module.params['zone']
+
+ if self.zone[-1] != '.':
+ self.zone += '.'
+
+ if module.params['record'][-1] != '.':
+ self.fqdn = module.params['record'] + '.' + self.zone
+ else:
+ self.fqdn = module.params['record']
+
+ if self.module.params['type'].lower() == 'txt' and self.module.params['value'] is not None:
+ self.value = list(map(self.txt_helper, self.module.params['value']))
+ else:
+ self.value = self.module.params['value']
+
+ self.dns_rc = 0
+
+ def txt_helper(self, entry):
+ if entry[0] == '"' and entry[-1] == '"':
+ return entry
+ return '"{text}"'.format(text=entry)
+
+ def lookup_zone(self):
+ name = dns.name.from_text(self.module.params['record'])
+ while True:
+ query = dns.message.make_query(name, dns.rdatatype.SOA)
+ if self.keyring:
+ query.use_tsig(keyring=self.keyring, algorithm=self.algorithm)
+ try:
+ if self.module.params['protocol'] == 'tcp':
+ lookup = dns.query.tcp(query, self.module.params['server'], timeout=10, port=self.module.params['port'])
+ else:
+ lookup = dns.query.udp(query, self.module.params['server'], timeout=10, port=self.module.params['port'])
+ except (dns.tsig.PeerBadKey, dns.tsig.PeerBadSignature) as e:
+ self.module.fail_json(msg='TSIG update error (%s): %s' % (e.__class__.__name__, to_native(e)))
+ except (socket_error, dns.exception.Timeout) as e:
+ self.module.fail_json(msg='DNS server error: (%s): %s' % (e.__class__.__name__, to_native(e)))
+ if lookup.rcode() in [dns.rcode.SERVFAIL, dns.rcode.REFUSED]:
+ self.module.fail_json(msg='Zone lookup failure: \'%s\' will not respond to queries regarding \'%s\'.' % (
+ self.module.params['server'], self.module.params['record']))
+ try:
+ zone = lookup.authority[0].name
+ if zone == name:
+ return zone.to_text()
+ except IndexError:
+ pass
+ try:
+ name = name.parent()
+ except dns.name.NoParent:
+ self.module.fail_json(msg='Zone lookup of \'%s\' failed for unknown reason.' % (self.module.params['record']))
+
+ def __do_update(self, update):
+ response = None
+ try:
+ if self.module.params['protocol'] == 'tcp':
+ response = dns.query.tcp(update, self.module.params['server'], timeout=10, port=self.module.params['port'])
+ else:
+ response = dns.query.udp(update, self.module.params['server'], timeout=10, port=self.module.params['port'])
+ except (dns.tsig.PeerBadKey, dns.tsig.PeerBadSignature) as e:
+ self.module.fail_json(msg='TSIG update error (%s): %s' % (e.__class__.__name__, to_native(e)))
+ except (socket_error, dns.exception.Timeout) as e:
+ self.module.fail_json(msg='DNS server error: (%s): %s' % (e.__class__.__name__, to_native(e)))
+ return response
+
+ def create_or_update_record(self):
+ result = {'changed': False, 'failed': False}
+
+ exists = self.record_exists()
+ if exists in [0, 2]:
+ if self.module.check_mode:
+ self.module.exit_json(changed=True)
+
+ if exists == 0:
+ self.dns_rc = self.create_record()
+ if self.dns_rc != 0:
+ result['msg'] = "Failed to create DNS record (rc: %d)" % self.dns_rc
+
+ elif exists == 2:
+ self.dns_rc = self.modify_record()
+ if self.dns_rc != 0:
+ result['msg'] = "Failed to update DNS record (rc: %d)" % self.dns_rc
+
+ if self.dns_rc != 0:
+ result['failed'] = True
+ else:
+ result['changed'] = True
+
+ else:
+ result['changed'] = False
+
+ return result
+
+ def create_record(self):
+ update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm)
+ for entry in self.value:
+ try:
+ update.add(self.module.params['record'],
+ self.module.params['ttl'],
+ self.module.params['type'],
+ entry)
+ except AttributeError:
+ self.module.fail_json(msg='value needed when state=present')
+ except dns.exception.SyntaxError:
+ self.module.fail_json(msg='Invalid/malformed value')
+
+ response = self.__do_update(update)
+ return dns.message.Message.rcode(response)
+
+ def modify_record(self):
+ update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm)
+ update.delete(self.module.params['record'], self.module.params['type'])
+ for entry in self.value:
+ try:
+ update.add(self.module.params['record'],
+ self.module.params['ttl'],
+ self.module.params['type'],
+ entry)
+ except AttributeError:
+ self.module.fail_json(msg='value needed when state=present')
+ except dns.exception.SyntaxError:
+ self.module.fail_json(msg='Invalid/malformed value')
+ response = self.__do_update(update)
+
+ return dns.message.Message.rcode(response)
+
+ def remove_record(self):
+ result = {'changed': False, 'failed': False}
+
+ if self.record_exists() == 0:
+ return result
+
+ # Check mode and record exists, declared fake change.
+ if self.module.check_mode:
+ self.module.exit_json(changed=True)
+
+ update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm)
+ update.delete(self.module.params['record'], self.module.params['type'])
+
+ response = self.__do_update(update)
+ self.dns_rc = dns.message.Message.rcode(response)
+
+ if self.dns_rc != 0:
+ result['failed'] = True
+ result['msg'] = "Failed to delete record (rc: %d)" % self.dns_rc
+ else:
+ result['changed'] = True
+
+ return result
+
+ def record_exists(self):
+ update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm)
+ try:
+ update.present(self.module.params['record'], self.module.params['type'])
+ except dns.rdatatype.UnknownRdatatype as e:
+ self.module.fail_json(msg='Record error: {0}'.format(to_native(e)))
+
+ response = self.__do_update(update)
+ self.dns_rc = dns.message.Message.rcode(response)
+ if self.dns_rc == 0:
+ if self.module.params['state'] == 'absent':
+ return 1
+ for entry in self.value:
+ try:
+ update.present(self.module.params['record'], self.module.params['type'], entry)
+ except AttributeError:
+ self.module.fail_json(msg='value needed when state=present')
+ except dns.exception.SyntaxError:
+ self.module.fail_json(msg='Invalid/malformed value')
+ response = self.__do_update(update)
+ self.dns_rc = dns.message.Message.rcode(response)
+ if self.dns_rc == 0:
+ if self.ttl_changed():
+ return 2
+ else:
+ return 1
+ else:
+ return 2
+ else:
+ return 0
+
+ def ttl_changed(self):
+ query = dns.message.make_query(self.fqdn, self.module.params['type'])
+ if self.keyring:
+ query.use_tsig(keyring=self.keyring, algorithm=self.algorithm)
+
+ try:
+ if self.module.params['protocol'] == 'tcp':
+ lookup = dns.query.tcp(query, self.module.params['server'], timeout=10, port=self.module.params['port'])
+ else:
+ lookup = dns.query.udp(query, self.module.params['server'], timeout=10, port=self.module.params['port'])
+ except (dns.tsig.PeerBadKey, dns.tsig.PeerBadSignature) as e:
+ self.module.fail_json(msg='TSIG update error (%s): %s' % (e.__class__.__name__, to_native(e)))
+ except (socket_error, dns.exception.Timeout) as e:
+ self.module.fail_json(msg='DNS server error: (%s): %s' % (e.__class__.__name__, to_native(e)))
+
+ if lookup.rcode() != dns.rcode.NOERROR:
+ self.module.fail_json(msg='Failed to lookup TTL of existing matching record.')
+
+ current_ttl = lookup.answer[0].ttl
+ return current_ttl != self.module.params['ttl']
+
+
+def main():
+ tsig_algs = ['HMAC-MD5.SIG-ALG.REG.INT', 'hmac-md5', 'hmac-sha1', 'hmac-sha224',
+ 'hmac-sha256', 'hmac-sha384', 'hmac-sha512']
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(required=False, default='present', choices=['present', 'absent'], type='str'),
+ server=dict(required=True, type='str'),
+ port=dict(required=False, default=53, type='int'),
+ key_name=dict(required=False, type='str'),
+ key_secret=dict(required=False, type='str', no_log=True),
+ key_algorithm=dict(required=False, default='hmac-md5', choices=tsig_algs, type='str'),
+ zone=dict(required=False, default=None, type='str'),
+ record=dict(required=True, type='str'),
+ type=dict(required=False, default='A', type='str'),
+ ttl=dict(required=False, default=3600, type='int'),
+ value=dict(required=False, default=None, type='list'),
+ protocol=dict(required=False, default='tcp', choices=['tcp', 'udp'], type='str')
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAVE_DNSPYTHON:
+ module.fail_json(msg=missing_required_lib('dnspython'), exception=DNSPYTHON_IMP_ERR)
+
+ if len(module.params["record"]) == 0:
+ module.fail_json(msg='record cannot be empty.')
+
+ record = RecordManager(module)
+ result = {}
+ if module.params["state"] == 'absent':
+ result = record.remove_record()
+ elif module.params["state"] == 'present':
+ result = record.create_or_update_record()
+
+ result['dns_rc'] = record.dns_rc
+ result['dns_rc_str'] = dns.rcode.to_text(record.dns_rc)
+ if result['failed']:
+ module.fail_json(**result)
+ else:
+ result['record'] = dict(zone=record.zone,
+ record=module.params['record'],
+ type=module.params['type'],
+ ttl=module.params['ttl'],
+ value=record.value)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oci_vcn.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oci_vcn.py
new file mode 100644
index 00000000..06dc4af0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oci_vcn.py
@@ -0,0 +1,221 @@
+#!/usr/bin/python
+# Copyright (c) 2017, 2018, Oracle and/or its affiliates.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oci_vcn
+short_description: Manage Virtual Cloud Networks(VCN) in OCI
+description:
+ - This module allows the user to create, delete and update virtual cloud networks(VCNs) in OCI.
+ The complete Oracle Cloud Infrastructure Ansible Modules can be downloaded from
+ U(https://github.com/oracle/oci-ansible-modules/releases).
+options:
+ cidr_block:
+ description: The CIDR IP address block of the VCN. Required when creating a VCN with I(state=present).
+ type: str
+ required: false
+ compartment_id:
+ description: The OCID of the compartment to contain the VCN. Required when creating a VCN with I(state=present).
+ This option is mutually exclusive with I(vcn_id).
+ type: str
+ display_name:
+ description: A user-friendly name. Does not have to be unique, and it's changeable.
+ type: str
+ aliases: [ 'name' ]
+ dns_label:
+ description: A DNS label for the VCN, used in conjunction with the VNIC's hostname and subnet's DNS label to
+ form a fully qualified domain name (FQDN) for each VNIC within this subnet (for example,
+ bminstance-1.subnet123.vcn1.oraclevcn.com). Not required to be unique, but it's a best practice
+ to set unique DNS labels for VCNs in your tenancy. Must be an alphanumeric string that begins
+ with a letter. The value cannot be changed.
+ type: str
+ state:
+ description: Create or update a VCN with I(state=present). Use I(state=absent) to delete a VCN.
+ type: str
+ default: present
+ choices: ['present', 'absent']
+ vcn_id:
+ description: The OCID of the VCN. Required when deleting a VCN with I(state=absent) or updating a VCN
+ with I(state=present). This option is mutually exclusive with I(compartment_id).
+ type: str
+ aliases: [ 'id' ]
+author: "Rohit Chaware (@rohitChaware)"
+extends_documentation_fragment:
+- community.general.oracle
+- community.general.oracle_creatable_resource
+- community.general.oracle_wait_options
+- community.general.oracle_tags
+
+'''
+
+EXAMPLES = """
+- name: Create a VCN
+ community.general.oci_vcn:
+ cidr_block: '10.0.0.0/16'
+ compartment_id: 'ocid1.compartment.oc1..xxxxxEXAMPLExxxxx'
+ display_name: my_vcn
+ dns_label: ansiblevcn
+
+- name: Updates the specified VCN's display name
+ community.general.oci_vcn:
+ vcn_id: ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx
+ display_name: ansible_vcn
+
+- name: Delete the specified VCN
+ community.general.oci_vcn:
+ vcn_id: ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx
+ state: absent
+"""
+
+RETURN = """
+vcn:
+ description: Information about the VCN
+ returned: On successful create and update operation
+ type: dict
+ sample: {
+ "cidr_block": "10.0.0.0/16",
+ compartment_id": "ocid1.compartment.oc1..xxxxxEXAMPLExxxxx",
+ "default_dhcp_options_id": "ocid1.dhcpoptions.oc1.phx.xxxxxEXAMPLExxxxx",
+ "default_route_table_id": "ocid1.routetable.oc1.phx.xxxxxEXAMPLExxxxx",
+ "default_security_list_id": "ocid1.securitylist.oc1.phx.xxxxxEXAMPLExxxxx",
+ "display_name": "ansible_vcn",
+ "dns_label": "ansiblevcn",
+ "id": "ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx",
+ "lifecycle_state": "AVAILABLE",
+ "time_created": "2017-11-13T20:22:40.626000+00:00",
+ "vcn_domain_name": "ansiblevcn.oraclevcn.com"
+ }
+"""
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils.oracle import oci_utils
+
+try:
+ from oci.core.virtual_network_client import VirtualNetworkClient
+ from oci.core.models import CreateVcnDetails
+ from oci.core.models import UpdateVcnDetails
+
+ HAS_OCI_PY_SDK = True
+except ImportError:
+ HAS_OCI_PY_SDK = False
+
+
+def delete_vcn(virtual_network_client, module):
+ result = oci_utils.delete_and_wait(
+ resource_type="vcn",
+ client=virtual_network_client,
+ get_fn=virtual_network_client.get_vcn,
+ kwargs_get={"vcn_id": module.params["vcn_id"]},
+ delete_fn=virtual_network_client.delete_vcn,
+ kwargs_delete={"vcn_id": module.params["vcn_id"]},
+ module=module,
+ )
+ return result
+
+
+def update_vcn(virtual_network_client, module):
+ result = oci_utils.check_and_update_resource(
+ resource_type="vcn",
+ client=virtual_network_client,
+ get_fn=virtual_network_client.get_vcn,
+ kwargs_get={"vcn_id": module.params["vcn_id"]},
+ update_fn=virtual_network_client.update_vcn,
+ primitive_params_update=["vcn_id"],
+ kwargs_non_primitive_update={UpdateVcnDetails: "update_vcn_details"},
+ module=module,
+ update_attributes=UpdateVcnDetails().attribute_map.keys(),
+ )
+ return result
+
+
+def create_vcn(virtual_network_client, module):
+ create_vcn_details = CreateVcnDetails()
+ for attribute in create_vcn_details.attribute_map.keys():
+ if attribute in module.params:
+ setattr(create_vcn_details, attribute, module.params[attribute])
+
+ result = oci_utils.create_and_wait(
+ resource_type="vcn",
+ create_fn=virtual_network_client.create_vcn,
+ kwargs_create={"create_vcn_details": create_vcn_details},
+ client=virtual_network_client,
+ get_fn=virtual_network_client.get_vcn,
+ get_param="vcn_id",
+ module=module,
+ )
+ return result
+
+
+def main():
+ module_args = oci_utils.get_taggable_arg_spec(
+ supports_create=True, supports_wait=True
+ )
+ module_args.update(
+ dict(
+ cidr_block=dict(type="str", required=False),
+ compartment_id=dict(type="str", required=False),
+ display_name=dict(type="str", required=False, aliases=["name"]),
+ dns_label=dict(type="str", required=False),
+ state=dict(
+ type="str",
+ required=False,
+ default="present",
+ choices=["absent", "present"],
+ ),
+ vcn_id=dict(type="str", required=False, aliases=["id"]),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=False,
+ mutually_exclusive=[["compartment_id", "vcn_id"]],
+ )
+
+ if not HAS_OCI_PY_SDK:
+ module.fail_json(msg=missing_required_lib("oci"))
+
+ virtual_network_client = oci_utils.create_service_client(
+ module, VirtualNetworkClient
+ )
+
+ exclude_attributes = {"display_name": True, "dns_label": True}
+ state = module.params["state"]
+ vcn_id = module.params["vcn_id"]
+
+ if state == "absent":
+ if vcn_id is not None:
+ result = delete_vcn(virtual_network_client, module)
+ else:
+ module.fail_json(
+ msg="Specify vcn_id with state as 'absent' to delete a VCN."
+ )
+
+ else:
+ if vcn_id is not None:
+ result = update_vcn(virtual_network_client, module)
+ else:
+ result = oci_utils.check_and_create_resource(
+ resource_type="vcn",
+ create_fn=create_vcn,
+ kwargs_create={
+ "virtual_network_client": virtual_network_client,
+ "module": module,
+ },
+ list_fn=virtual_network_client.list_vcns,
+ kwargs_list={"compartment_id": module.params["compartment_id"]},
+ module=module,
+ model=CreateVcnDetails(),
+ exclude_attributes=exclude_attributes,
+ )
+
+ module.exit_json(**result)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/odbc.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/odbc.py
new file mode 100644
index 00000000..313a7f70
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/odbc.py
@@ -0,0 +1,168 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, John Westcott <john.westcott.iv@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: odbc
+author: "John Westcott IV (@john-westcott-iv)"
+version_added: "1.0.0"
+short_description: Execute SQL via ODBC
+description:
+ - Read/Write info via ODBC drivers.
+options:
+ dsn:
+ description:
+ - The connection string passed into ODBC.
+ required: yes
+ type: str
+ query:
+ description:
+ - The SQL query to perform.
+ required: yes
+ type: str
+ params:
+ description:
+ - Parameters to pass to the SQL query.
+ type: list
+ elements: str
+ commit:
+ description:
+ - Perform a commit after the execution of the SQL query.
+ - Some databases allow a commit after a select whereas others raise an exception.
+ - Default is C(true) to support legacy module behavior.
+ type: bool
+ default: yes
+ version_added: 1.3.0
+requirements:
+ - "python >= 2.6"
+ - "pyodbc"
+
+notes:
+ - "Like the command module, this module always returns changed = yes whether or not the query would change the database."
+ - "To alter this behavior you can use C(changed_when): [yes or no]."
+ - "For details about return values (description and row_count) see U(https://github.com/mkleehammer/pyodbc/wiki/Cursor)."
+'''
+
+EXAMPLES = '''
+- name: Set some values in the test db
+ community.general.odbc:
+ dsn: "DRIVER={ODBC Driver 13 for SQL Server};Server=db.ansible.com;Database=my_db;UID=admin;PWD=password;"
+ query: "Select * from table_a where column1 = ?"
+ params:
+ - "value1"
+ commit: false
+ changed_when: no
+'''
+
+RETURN = '''
+results:
+ description: List of lists of strings containing selected rows, likely empty for DDL statements.
+ returned: success
+ type: list
+ elements: list
+description:
+ description: "List of dicts about the columns selected from the cursors, likely empty for DDL statements. See notes."
+ returned: success
+ type: list
+ elements: dict
+row_count:
+ description: "The number of rows selected or modified according to the cursor defaults to -1. See notes."
+ returned: success
+ type: str
+'''
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+HAS_PYODBC = None
+try:
+ import pyodbc
+ HAS_PYODBC = True
+except ImportError as e:
+ HAS_PYODBC = False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ dsn=dict(type='str', required=True, no_log=True),
+ query=dict(type='str', required=True),
+ params=dict(type='list', elements='str'),
+ commit=dict(type='bool', default=True),
+ ),
+ )
+
+ dsn = module.params.get('dsn')
+ query = module.params.get('query')
+ params = module.params.get('params')
+ commit = module.params.get('commit')
+
+ if not HAS_PYODBC:
+ module.fail_json(msg=missing_required_lib('pyodbc'))
+
+ # Try to make a connection with the DSN
+ connection = None
+ try:
+ connection = pyodbc.connect(dsn)
+ except Exception as e:
+ module.fail_json(msg='Failed to connect to DSN: {0}'.format(to_native(e)))
+
+ result = dict(
+ changed=True,
+ description=[],
+ row_count=-1,
+ results=[],
+ )
+
+ try:
+ cursor = connection.cursor()
+
+ if params:
+ cursor.execute(query, params)
+ else:
+ cursor.execute(query)
+ if commit:
+ cursor.commit()
+ try:
+ # Get the rows out into an 2d array
+ for row in cursor.fetchall():
+ new_row = []
+ for column in row:
+ new_row.append("{0}".format(column))
+ result['results'].append(new_row)
+
+ # Return additional information from the cursor
+ for row_description in cursor.description:
+ description = {}
+ description['name'] = row_description[0]
+ description['type'] = row_description[1].__name__
+ description['display_size'] = row_description[2]
+ description['internal_size'] = row_description[3]
+ description['precision'] = row_description[4]
+ description['scale'] = row_description[5]
+ description['nullable'] = row_description[6]
+ result['description'].append(description)
+
+ result['row_count'] = cursor.rowcount
+ except pyodbc.ProgrammingError as pe:
+ pass
+ except Exception as e:
+ module.fail_json(msg="Exception while reading rows: {0}".format(to_native(e)))
+
+ cursor.close()
+ except Exception as e:
+ module.fail_json(msg="Failed to execute query: {0}".format(to_native(e)))
+ finally:
+ connection.close()
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/office_365_connector_card.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/office_365_connector_card.py
new file mode 100644
index 00000000..2574a750
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/office_365_connector_card.py
@@ -0,0 +1,299 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017 Marc Sensenich <hello@marc-sensenich.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: office_365_connector_card
+short_description: Use webhooks to create Connector Card messages within an Office 365 group
+description:
+ - Creates Connector Card messages through
+ - Office 365 Connectors U(https://dev.outlook.com/Connectors)
+author: "Marc Sensenich (@marc-sensenich)"
+notes:
+ - This module is not idempotent, therefore if the same task is run twice
+ there will be two Connector Cards created
+options:
+ webhook:
+ type: str
+ description:
+ - The webhook URL is given to you when you create a new Connector.
+ required: true
+ summary:
+ type: str
+ description:
+ - A string used for summarizing card content.
+ - This will be shown as the message subject.
+ - This is required if the text parameter isn't populated.
+ color:
+ type: str
+ description:
+ - Accent color used for branding or indicating status in the card.
+ title:
+ type: str
+ description:
+ - A title for the Connector message. Shown at the top of the message.
+ text:
+ type: str
+ description:
+ - The main text of the card.
+ - This will be rendered below the sender information and optional title,
+ - and above any sections or actions present.
+ actions:
+ type: list
+ description:
+ - This array of objects will power the action links
+ - found at the bottom of the card.
+ sections:
+ type: list
+ description:
+ - Contains a list of sections to display in the card.
+ - For more information see https://dev.outlook.com/Connectors/reference.
+'''
+
+EXAMPLES = """
+- name: Create a simple Connector Card
+ community.general.office_365_connector_card:
+ webhook: https://outlook.office.com/webhook/GUID/IncomingWebhook/GUID/GUID
+ text: 'Hello, World!'
+
+- name: Create a Connector Card with the full format
+ community.general.office_365_connector_card:
+ webhook: https://outlook.office.com/webhook/GUID/IncomingWebhook/GUID/GUID
+ summary: This is the summary property
+ title: This is the **card's title** property
+ text: This is the **card's text** property. Lorem ipsum dolor sit amet, consectetur
+ adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
+ color: E81123
+ sections:
+ - title: This is the **section's title** property
+ activity_image: http://connectorsdemo.azurewebsites.net/images/MSC12_Oscar_002.jpg
+ activity_title: This is the section's **activityTitle** property
+ activity_subtitle: This is the section's **activitySubtitle** property
+ activity_text: This is the section's **activityText** property.
+ hero_image:
+ image: http://connectorsdemo.azurewebsites.net/images/WIN12_Scene_01.jpg
+ title: This is the image's alternate text
+ text: This is the section's text property. Lorem ipsum dolor sit amet, consectetur
+ adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
+ facts:
+ - name: This is a fact name
+ value: This is a fact value
+ - name: This is a fact name
+ value: This is a fact value
+ - name: This is a fact name
+ value: This is a fact value
+ images:
+ - image: http://connectorsdemo.azurewebsites.net/images/MicrosoftSurface_024_Cafe_OH-06315_VS_R1c.jpg
+ title: This is the image's alternate text
+ - image: http://connectorsdemo.azurewebsites.net/images/WIN12_Scene_01.jpg
+ title: This is the image's alternate text
+ - image: http://connectorsdemo.azurewebsites.net/images/WIN12_Anthony_02.jpg
+ title: This is the image's alternate text
+ actions:
+ - "@type": ActionCard
+ name: Comment
+ inputs:
+ - "@type": TextInput
+ id: comment
+ is_multiline: true
+ title: Input's title property
+ actions:
+ - "@type": HttpPOST
+ name: Save
+ target: http://...
+ - "@type": ActionCard
+ name: Due Date
+ inputs:
+ - "@type": DateInput
+ id: dueDate
+ title: Input's title property
+ actions:
+ - "@type": HttpPOST
+ name: Save
+ target: http://...
+ - "@type": HttpPOST
+ name: Action's name prop.
+ target: http://...
+ - "@type": OpenUri
+ name: Action's name prop
+ targets:
+ - os: default
+ uri: http://...
+ - start_group: true
+ title: This is the title of a **second section**
+ text: This second section is visually separated from the first one by setting its
+ **startGroup** property to true.
+"""
+
+RETURN = """
+"""
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+
+OFFICE_365_CARD_CONTEXT = "http://schema.org/extensions"
+OFFICE_365_CARD_TYPE = "MessageCard"
+OFFICE_365_CARD_EMPTY_PAYLOAD_MSG = "Summary or Text is required."
+OFFICE_365_INVALID_WEBHOOK_MSG = "The Incoming Webhook was not reachable."
+
+
+def build_actions(actions):
+ action_items = []
+
+ for action in actions:
+ action_item = snake_dict_to_camel_dict(action)
+ action_items.append(action_item)
+
+ return action_items
+
+
+def build_sections(sections):
+ sections_created = []
+
+ for section in sections:
+ sections_created.append(build_section(section))
+
+ return sections_created
+
+
+def build_section(section):
+ section_payload = dict()
+
+ if 'title' in section:
+ section_payload['title'] = section['title']
+
+ if 'start_group' in section:
+ section_payload['startGroup'] = section['start_group']
+
+ if 'activity_image' in section:
+ section_payload['activityImage'] = section['activity_image']
+
+ if 'activity_title' in section:
+ section_payload['activityTitle'] = section['activity_title']
+
+ if 'activity_subtitle' in section:
+ section_payload['activitySubtitle'] = section['activity_subtitle']
+
+ if 'activity_text' in section:
+ section_payload['activityText'] = section['activity_text']
+
+ if 'hero_image' in section:
+ section_payload['heroImage'] = section['hero_image']
+
+ if 'text' in section:
+ section_payload['text'] = section['text']
+
+ if 'facts' in section:
+ section_payload['facts'] = section['facts']
+
+ if 'images' in section:
+ section_payload['images'] = section['images']
+
+ if 'actions' in section:
+ section_payload['potentialAction'] = build_actions(section['actions'])
+
+ return section_payload
+
+
+def build_payload_for_connector_card(module, summary=None, color=None, title=None, text=None, actions=None, sections=None):
+ payload = dict()
+ payload['@context'] = OFFICE_365_CARD_CONTEXT
+ payload['@type'] = OFFICE_365_CARD_TYPE
+
+ if summary is not None:
+ payload['summary'] = summary
+
+ if color is not None:
+ payload['themeColor'] = color
+
+ if title is not None:
+ payload['title'] = title
+
+ if text is not None:
+ payload['text'] = text
+
+ if actions:
+ payload['potentialAction'] = build_actions(actions)
+
+ if sections:
+ payload['sections'] = build_sections(sections)
+
+ payload = module.jsonify(payload)
+ return payload
+
+
+def do_notify_connector_card_webhook(module, webhook, payload):
+ headers = {
+ 'Content-Type': 'application/json'
+ }
+
+ response, info = fetch_url(
+ module=module,
+ url=webhook,
+ headers=headers,
+ method='POST',
+ data=payload
+ )
+
+ if info['status'] == 200:
+ module.exit_json(changed=True)
+ elif info['status'] == 400 and module.check_mode:
+ if info['body'] == OFFICE_365_CARD_EMPTY_PAYLOAD_MSG:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg=OFFICE_365_INVALID_WEBHOOK_MSG)
+ else:
+ module.fail_json(
+ msg="failed to send %s as a connector card to Incoming Webhook: %s"
+ % (payload, info['msg'])
+ )
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ webhook=dict(required=True, no_log=True),
+ summary=dict(type='str'),
+ color=dict(type='str'),
+ title=dict(type='str'),
+ text=dict(type='str'),
+ actions=dict(type='list'),
+ sections=dict(type='list')
+ ),
+ supports_check_mode=True
+ )
+
+ webhook = module.params['webhook']
+ summary = module.params['summary']
+ color = module.params['color']
+ title = module.params['title']
+ text = module.params['text']
+ actions = module.params['actions']
+ sections = module.params['sections']
+
+ payload = build_payload_for_connector_card(
+ module,
+ summary,
+ color,
+ title,
+ text,
+ actions,
+ sections)
+
+ if module.check_mode:
+ # In check mode, send an empty payload to validate connection
+ check_mode_payload = build_payload_for_connector_card(module)
+ do_notify_connector_card_webhook(module, webhook, check_mode_payload)
+
+ do_notify_connector_card_webhook(module, webhook, payload)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ohai.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ohai.py
new file mode 100644
index 00000000..64092fd1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ohai.py
@@ -0,0 +1,47 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ohai
+short_description: Returns inventory data from I(Ohai)
+description:
+ - Similar to the M(community.general.facter) module, this runs the I(Ohai) discovery program
+ (U(https://docs.chef.io/ohai.html)) on the remote host and
+ returns JSON inventory data.
+ I(Ohai) data is a bit more verbose and nested than I(facter).
+options: {}
+notes: []
+requirements: [ "ohai" ]
+author:
+ - "Ansible Core Team"
+ - "Michael DeHaan (@mpdehaan)"
+'''
+
+EXAMPLES = '''
+# Retrieve (ohai) data from all Web servers and store in one-file per host
+ansible webservers -m ohai --tree=/tmp/ohaidata
+'''
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict()
+ )
+ cmd = ["/usr/bin/env", "ohai"]
+ rc, out, err = module.run_command(cmd, check_rc=True)
+ module.exit_json(**json.loads(out))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/omapi_host.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/omapi_host.py
new file mode 100644
index 00000000..4e6738cd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/omapi_host.py
@@ -0,0 +1,310 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# copyright: (c) 2016, Loic Blot <loic.blot@unix-experience.fr>
+# Sponsored by Infopro Digital. http://www.infopro-digital.com/
+# Sponsored by E.T.A.I. http://www.etai.fr/
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: omapi_host
+short_description: Setup OMAPI hosts.
+description: Manage OMAPI hosts into compatible DHCPd servers
+requirements:
+ - pypureomapi
+author:
+- Loic Blot (@nerzhul)
+options:
+ state:
+ description:
+ - Create or remove OMAPI host.
+ type: str
+ required: true
+ choices: [ absent, present ]
+ hostname:
+ description:
+ - Sets the host lease hostname (mandatory if state=present).
+ type: str
+ aliases: [ name ]
+ host:
+ description:
+ - Sets OMAPI server host to interact with.
+ type: str
+ default: localhost
+ port:
+ description:
+ - Sets the OMAPI server port to interact with.
+ type: int
+ default: 7911
+ key_name:
+ description:
+ - Sets the TSIG key name for authenticating against OMAPI server.
+ type: str
+ required: true
+ key:
+ description:
+ - Sets the TSIG key content for authenticating against OMAPI server.
+ type: str
+ required: true
+ macaddr:
+ description:
+ - Sets the lease host MAC address.
+ type: str
+ required: true
+ ip:
+ description:
+ - Sets the lease host IP address.
+ type: str
+ statements:
+ description:
+ - Attach a list of OMAPI DHCP statements with host lease (without ending semicolon).
+ type: list
+ default: []
+ ddns:
+ description:
+ - Enable dynamic DNS updates for this host.
+ type: bool
+ default: no
+
+'''
+EXAMPLES = r'''
+- name: Add a host using OMAPI
+ community.general.omapi_host:
+ key_name: defomapi
+ key: +bFQtBCta6j2vWkjPkNFtgA==
+ host: 10.98.4.55
+ macaddr: 44:dd:ab:dd:11:44
+ name: server01
+ ip: 192.168.88.99
+ ddns: yes
+ statements:
+ - filename "pxelinux.0"
+ - next-server 1.1.1.1
+ state: present
+
+- name: Remove a host using OMAPI
+ community.general.omapi_host:
+ key_name: defomapi
+ key: +bFQtBCta6j2vWkjPkNFtgA==
+ host: 10.1.1.1
+ macaddr: 00:66:ab:dd:11:44
+ state: absent
+'''
+
+RETURN = r'''
+lease:
+ description: dictionary containing host information
+ returned: success
+ type: complex
+ contains:
+ ip-address:
+ description: IP address, if there is.
+ returned: success
+ type: str
+ sample: '192.168.1.5'
+ hardware-address:
+ description: MAC address
+ returned: success
+ type: str
+ sample: '00:11:22:33:44:55'
+ hardware-type:
+ description: hardware type, generally '1'
+ returned: success
+ type: int
+ sample: 1
+ name:
+ description: hostname
+ returned: success
+ type: str
+ sample: 'mydesktop'
+'''
+
+import binascii
+import socket
+import struct
+import traceback
+
+PUREOMAPI_IMP_ERR = None
+try:
+ from pypureomapi import Omapi, OmapiMessage, OmapiError, OmapiErrorNotFound
+ from pypureomapi import pack_ip, unpack_ip, pack_mac, unpack_mac
+ from pypureomapi import OMAPI_OP_STATUS, OMAPI_OP_UPDATE
+ pureomapi_found = True
+except ImportError:
+ PUREOMAPI_IMP_ERR = traceback.format_exc()
+ pureomapi_found = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_bytes, to_native
+
+
+class OmapiHostManager:
+ def __init__(self, module):
+ self.module = module
+ self.omapi = None
+ self.connect()
+
+ def connect(self):
+ try:
+ self.omapi = Omapi(self.module.params['host'], self.module.params['port'], to_bytes(self.module.params['key_name']),
+ self.module.params['key'])
+ except binascii.Error:
+ self.module.fail_json(msg="Unable to open OMAPI connection. 'key' is not a valid base64 key.")
+ except OmapiError as e:
+ self.module.fail_json(msg="Unable to open OMAPI connection. Ensure 'host', 'port', 'key' and 'key_name' "
+ "are valid. Exception was: %s" % to_native(e))
+ except socket.error as e:
+ self.module.fail_json(msg="Unable to connect to OMAPI server: %s" % to_native(e))
+
+ def get_host(self, macaddr):
+ msg = OmapiMessage.open(to_bytes("host", errors='surrogate_or_strict'))
+ msg.obj.append((to_bytes("hardware-address", errors='surrogate_or_strict'), pack_mac(macaddr)))
+ msg.obj.append((to_bytes("hardware-type", errors='surrogate_or_strict'), struct.pack("!I", 1)))
+ response = self.omapi.query_server(msg)
+ if response.opcode != OMAPI_OP_UPDATE:
+ return None
+ return response
+
+ @staticmethod
+ def unpack_facts(obj):
+ result = dict(obj)
+ if 'hardware-address' in result:
+ result['hardware-address'] = to_native(unpack_mac(result[to_bytes('hardware-address')]))
+
+ if 'ip-address' in result:
+ result['ip-address'] = to_native(unpack_ip(result[to_bytes('ip-address')]))
+
+ if 'hardware-type' in result:
+ result['hardware-type'] = struct.unpack("!I", result[to_bytes('hardware-type')])
+
+ return result
+
+ def setup_host(self):
+ if self.module.params['hostname'] is None or len(self.module.params['hostname']) == 0:
+ self.module.fail_json(msg="name attribute could not be empty when adding or modifying host.")
+
+ msg = None
+ host_response = self.get_host(self.module.params['macaddr'])
+ # If host was not found using macaddr, add create message
+ if host_response is None:
+ msg = OmapiMessage.open(to_bytes('host', errors='surrogate_or_strict'))
+ msg.message.append((to_bytes('create'), struct.pack('!I', 1)))
+ msg.message.append((to_bytes('exclusive'), struct.pack('!I', 1)))
+ msg.obj.append((to_bytes('hardware-address'), pack_mac(self.module.params['macaddr'])))
+ msg.obj.append((to_bytes('hardware-type'), struct.pack('!I', 1)))
+ msg.obj.append((to_bytes('name'), to_bytes(self.module.params['hostname'])))
+ if self.module.params['ip'] is not None:
+ msg.obj.append((to_bytes("ip-address", errors='surrogate_or_strict'), pack_ip(self.module.params['ip'])))
+
+ stmt_join = ""
+ if self.module.params['ddns']:
+ stmt_join += 'ddns-hostname "{0}"; '.format(self.module.params['hostname'])
+
+ try:
+ if len(self.module.params['statements']) > 0:
+ stmt_join += "; ".join(self.module.params['statements'])
+ stmt_join += "; "
+ except TypeError as e:
+ self.module.fail_json(msg="Invalid statements found: %s" % to_native(e))
+
+ if len(stmt_join) > 0:
+ msg.obj.append((to_bytes('statements'), to_bytes(stmt_join)))
+
+ try:
+ response = self.omapi.query_server(msg)
+ if response.opcode != OMAPI_OP_UPDATE:
+ self.module.fail_json(msg="Failed to add host, ensure authentication and host parameters "
+ "are valid.")
+ self.module.exit_json(changed=True, lease=self.unpack_facts(response.obj))
+ except OmapiError as e:
+ self.module.fail_json(msg="OMAPI error: %s" % to_native(e))
+ # Forge update message
+ else:
+ response_obj = self.unpack_facts(host_response.obj)
+ fields_to_update = {}
+
+ if to_bytes('ip-address', errors='surrogate_or_strict') not in response_obj or \
+ unpack_ip(response_obj[to_bytes('ip-address', errors='surrogate_or_strict')]) != self.module.params['ip']:
+ fields_to_update['ip-address'] = pack_ip(self.module.params['ip'])
+
+ # Name cannot be changed
+ if 'name' not in response_obj or response_obj['name'] != self.module.params['hostname']:
+ self.module.fail_json(msg="Changing hostname is not supported. Old was %s, new is %s. "
+ "Please delete host and add new." %
+ (response_obj['name'], self.module.params['hostname']))
+
+ """
+ # It seems statements are not returned by OMAPI, then we cannot modify them at this moment.
+ if 'statements' not in response_obj and len(self.module.params['statements']) > 0 or \
+ response_obj['statements'] != self.module.params['statements']:
+ with open('/tmp/omapi', 'w') as fb:
+ for (k,v) in iteritems(response_obj):
+ fb.writelines('statements: %s %s\n' % (k, v))
+ """
+ if len(fields_to_update) == 0:
+ self.module.exit_json(changed=False, lease=response_obj)
+ else:
+ msg = OmapiMessage.update(host_response.handle)
+ msg.update_object(fields_to_update)
+
+ try:
+ response = self.omapi.query_server(msg)
+ if response.opcode != OMAPI_OP_STATUS:
+ self.module.fail_json(msg="Failed to modify host, ensure authentication and host parameters "
+ "are valid.")
+ self.module.exit_json(changed=True)
+ except OmapiError as e:
+ self.module.fail_json(msg="OMAPI error: %s" % to_native(e))
+
+ def remove_host(self):
+ try:
+ self.omapi.del_host(self.module.params['macaddr'])
+ self.module.exit_json(changed=True)
+ except OmapiErrorNotFound:
+ self.module.exit_json()
+ except OmapiError as e:
+ self.module.fail_json(msg="OMAPI error: %s" % to_native(e))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', required=True, choices=['absent', 'present']),
+ host=dict(type='str', default="localhost"),
+ port=dict(type='int', default=7911),
+ key_name=dict(type='str', required=True),
+ key=dict(type='str', required=True, no_log=True),
+ macaddr=dict(type='str', required=True),
+ hostname=dict(type='str', aliases=['name']),
+ ip=dict(type='str'),
+ ddns=dict(type='bool', default=False),
+ statements=dict(type='list', default=[]),
+ ),
+ supports_check_mode=False,
+ )
+
+ if not pureomapi_found:
+ module.fail_json(msg=missing_required_lib('pypureomapi'), exception=PUREOMAPI_IMP_ERR)
+
+ if module.params['key'] is None or len(module.params["key"]) == 0:
+ module.fail_json(msg="'key' parameter cannot be empty.")
+
+ if module.params['key_name'] is None or len(module.params["key_name"]) == 0:
+ module.fail_json(msg="'key_name' parameter cannot be empty.")
+
+ host_manager = OmapiHostManager(module)
+ try:
+ if module.params['state'] == 'present':
+ host_manager.setup_host()
+ elif module.params['state'] == 'absent':
+ host_manager.remove_host()
+ except ValueError as e:
+ module.fail_json(msg="OMAPI input value error: %s" % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ome_device_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ome_device_info.py
new file mode 100644
index 00000000..68fbb1e6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ome_device_info.py
@@ -0,0 +1,413 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 1.2
+# Copyright (C) 2019 Dell Inc.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# All rights reserved. Dell, EMC, and other trademarks are trademarks of Dell Inc. or its subsidiaries.
+# Other trademarks may be trademarks of their respective owners.
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_device_info
+short_description: Retrieves the information about Device.
+description:
+ - This module retrieves the list of all devices information with the exhaustive inventory of each
+ device.
+options:
+ hostname:
+ description:
+ - Target IP Address or hostname.
+ type: str
+ required: True
+ username:
+ description:
+ - Target username.
+ type: str
+ required: True
+ password:
+ description:
+ - Target user password.
+ type: str
+ required: True
+ port:
+ description:
+ - Target HTTPS port.
+ type: int
+ default: 443
+ fact_subset:
+ description:
+ - C(basic_inventory) returns the list of the devices.
+ - C(detailed_inventory) returns the inventory details of specified devices.
+ - C(subsystem_health) returns the health status of specified devices.
+ type: str
+ choices: [basic_inventory, detailed_inventory, subsystem_health ]
+ default: basic_inventory
+ system_query_options:
+ description:
+ - I(system_query_options) applicable for the choices of the fact_subset. Either I(device_id) or I(device_service_tag)
+ is mandatory for C(detailed_inventory) and C(subsystem_health) or both can be applicable.
+ type: dict
+ suboptions:
+ device_id:
+ description:
+ - A list of unique identifier is applicable
+ for C(detailed_inventory) and C(subsystem_health).
+ type: list
+ device_service_tag:
+ description:
+ - A list of service tags are applicable for C(detailed_inventory)
+ and C(subsystem_health).
+ type: list
+ inventory_type:
+ description:
+ - For C(detailed_inventory), it returns details of the specified inventory type.
+ type: str
+ filter:
+ description:
+ - For C(basic_inventory), it filters the collection of devices.
+ I(filter) query format should be aligned with OData standards.
+ type: str
+
+requirements:
+ - "python >= 2.7.5"
+author: "Sajna Shetty(@Sajna-Shetty)"
+'''
+
+EXAMPLES = """
+---
+- name: Retrieve basic inventory of all devices.
+ community.general.ome_device_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+
+- name: Retrieve basic inventory for devices identified by IDs 33333 or 11111 using filtering.
+ community.general.ome_device_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ fact_subset: "basic_inventory"
+ system_query_options:
+ filter: "Id eq 33333 or Id eq 11111"
+
+- name: Retrieve inventory details of specified devices identified by IDs 11111 and 22222.
+ community.general.ome_device_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ fact_subset: "detailed_inventory"
+ system_query_options:
+ device_id:
+ - 11111
+ - 22222
+
+- name: Retrieve inventory details of specified devices identified by service tags MXL1234 and MXL4567.
+ community.general.ome_device_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ fact_subset: "detailed_inventory"
+ system_query_options:
+ device_service_tag:
+ - MXL1234
+ - MXL4567
+
+- name: Retrieve details of specified inventory type of specified devices identified by ID and service tags.
+ community.general.ome_device_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ fact_subset: "detailed_inventory"
+ system_query_options:
+ device_id:
+ - 11111
+ device_service_tag:
+ - MXL1234
+ - MXL4567
+ inventory_type: "serverDeviceCards"
+
+- name: Retrieve subsystem health of specified devices identified by service tags.
+ community.general.ome_device_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ fact_subset: "subsystem_health"
+ system_query_options:
+ device_service_tag:
+ - MXL1234
+ - MXL4567
+
+"""
+
+RETURN = '''
+---
+msg:
+ type: str
+ description: Over all device information status.
+ returned: on error
+ sample: "Failed to fetch the device information"
+device_info:
+ type: dict
+ description: Returns the information collected from the Device.
+ returned: success
+ sample: {
+ "value": [
+ {
+ "Actions": null,
+ "AssetTag": null,
+ "ChassisServiceTag": null,
+ "ConnectionState": true,
+ "DeviceManagement": [
+ {
+ "DnsName": "dnsname.host.com",
+ "InstrumentationName": "MX-12345",
+ "MacAddress": "11:10:11:10:11:10",
+ "ManagementId": 12345,
+ "ManagementProfile": [
+ {
+ "HasCreds": 0,
+ "ManagementId": 12345,
+ "ManagementProfileId": 12345,
+ "ManagementURL": "https://192.168.0.1:443",
+ "Status": 1000,
+ "StatusDateTime": "2019-01-21 06:30:08.501"
+ }
+ ],
+ "ManagementType": 2,
+ "NetworkAddress": "192.168.0.1"
+ }
+ ],
+ "DeviceName": "MX-0003I",
+ "DeviceServiceTag": "MXL1234",
+ "DeviceSubscription": null,
+ "LastInventoryTime": "2019-01-21 06:30:08.501",
+ "LastStatusTime": "2019-01-21 06:30:02.492",
+ "ManagedState": 3000,
+ "Model": "PowerEdge MX7000",
+ "PowerState": 17,
+ "SlotConfiguration": {},
+ "Status": 4000,
+ "SystemId": 2031,
+ "Type": 2000
+ }
+ ]
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.remote_management.dellemc.ome import RestOME
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+
+DEVICES_INVENTORY_DETAILS = "detailed_inventory"
+DEVICES_SUBSYSTEM_HEALTH = "subsystem_health"
+DEVICES_INVENTORY_TYPE = "inventory_type"
+DEVICE_LIST = "basic_inventory"
+DESC_HTTP_ERROR = "HTTP Error 404: Not Found"
+device_fact_error_report = {}
+
+DEVICE_RESOURCE_COLLECTION = {
+ DEVICE_LIST: {"resource": "DeviceService/Devices"},
+ DEVICES_INVENTORY_DETAILS: {"resource": "DeviceService/Devices({Id})/InventoryDetails"},
+ DEVICES_INVENTORY_TYPE: {"resource": "DeviceService/Devices({Id})/InventoryDetails('{InventoryType}')"},
+ DEVICES_SUBSYSTEM_HEALTH: {"resource": "DeviceService/Devices({Id})/SubSystemHealth"},
+}
+
+
+def _get_device_id_from_service_tags(service_tags, rest_obj):
+ """
+ Get device ids from device service tag
+ Returns :dict : device_id to service_tag map
+ :arg service_tags: service tag
+ :arg rest_obj: RestOME class object in case of request with session.
+ :returns: dict eg: {1345:"MXL1245"}
+ """
+ try:
+ path = DEVICE_RESOURCE_COLLECTION[DEVICE_LIST]["resource"]
+ resp = rest_obj.invoke_request('GET', path)
+ if resp.success:
+ devices_list = resp.json_data["value"]
+ service_tag_dict = {}
+ for item in devices_list:
+ if item["DeviceServiceTag"] in service_tags:
+ service_tag_dict.update({item["Id"]: item["DeviceServiceTag"]})
+ available_service_tags = service_tag_dict.values()
+ not_available_service_tag = list(set(service_tags) - set(available_service_tags))
+ device_fact_error_report.update(dict((tag, DESC_HTTP_ERROR) for tag in not_available_service_tag))
+ else:
+ raise ValueError(resp.json_data)
+ except (URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError) as err:
+ raise err
+ return service_tag_dict
+
+
+def is_int(val):
+ """check when device_id numeric represented value is int"""
+ try:
+ int(val)
+ return True
+ except ValueError:
+ return False
+
+
+def _check_duplicate_device_id(device_id_list, service_tag_dict):
+ """If service_tag is duplicate of device_id, then updates the message as Duplicate report
+ :arg1: device_id_list : list of device_id
+ :arg2: service_tag_id_dict: dictionary of device_id to service tag map"""
+ if device_id_list:
+ device_id_represents_int = [int(device_id) for device_id in device_id_list if device_id and is_int(device_id)]
+ common_val = list(set(device_id_represents_int) & set(service_tag_dict.keys()))
+ for device_id in common_val:
+ device_fact_error_report.update(
+ {service_tag_dict[device_id]: "Duplicate report of device_id: {0}".format(device_id)})
+ del service_tag_dict[device_id]
+
+
+def _get_device_identifier_map(module_params, rest_obj):
+ """
+ Builds the identifiers mapping
+ :returns: the dict of device_id to server_tag map
+ eg: {"device_id":{1234: None},"device_service_tag":{1345:"MXL1234"}}"""
+ system_query_options_param = module_params.get("system_query_options")
+ device_id_service_tag_dict = {}
+ if system_query_options_param is not None:
+ device_id_list = system_query_options_param.get("device_id")
+ device_service_tag_list = system_query_options_param.get("device_service_tag")
+ if device_id_list:
+ device_id_dict = dict((device_id, None) for device_id in list(set(device_id_list)))
+ device_id_service_tag_dict["device_id"] = device_id_dict
+ if device_service_tag_list:
+ service_tag_dict = _get_device_id_from_service_tags(device_service_tag_list,
+ rest_obj)
+
+ _check_duplicate_device_id(device_id_list, service_tag_dict)
+ device_id_service_tag_dict["device_service_tag"] = service_tag_dict
+ return device_id_service_tag_dict
+
+
+def _get_query_parameters(module_params):
+ """
+ Builds query parameter
+ :returns: dictionary, which is applicable builds the query format
+ eg : {"$filter":"Type eq 2000"}
+ """
+ system_query_options_param = module_params.get("system_query_options")
+ query_parameter = None
+ if system_query_options_param:
+ filter_by_val = system_query_options_param.get("filter")
+ if filter_by_val:
+ query_parameter = {"$filter": filter_by_val}
+ return query_parameter
+
+
+def _get_resource_parameters(module_params, rest_obj):
+ """
+ Identifies the resource path by different states
+ :returns: dictionary containing identifier with respective resource path
+ eg:{"device_id":{1234:""DeviceService/Devices(1234)/InventoryDetails"},
+ "device_service_tag":{"MXL1234":"DeviceService/Devices(1345)/InventoryDetails"}}
+ """
+ fact_subset = module_params["fact_subset"]
+ path_dict = {}
+ if fact_subset != DEVICE_LIST:
+ inventory_type = None
+ device_id_service_tag_dict = _get_device_identifier_map(module_params, rest_obj)
+ if fact_subset == DEVICES_INVENTORY_DETAILS:
+ system_query_options = module_params.get("system_query_options")
+ inventory_type = system_query_options.get(DEVICES_INVENTORY_TYPE)
+ path_identifier = DEVICES_INVENTORY_TYPE if inventory_type else fact_subset
+ for identifier_type, identifier_dict in device_id_service_tag_dict.items():
+ path_dict[identifier_type] = {}
+ for device_id, service_tag in identifier_dict.items():
+ key_identifier = service_tag if identifier_type == "device_service_tag" else device_id
+ path = DEVICE_RESOURCE_COLLECTION[path_identifier]["resource"].format(Id=device_id,
+ InventoryType=inventory_type)
+ path_dict[identifier_type].update({key_identifier: path})
+ else:
+ path_dict.update({DEVICE_LIST: DEVICE_RESOURCE_COLLECTION[DEVICE_LIST]["resource"]})
+ return path_dict
+
+
+def _check_mutually_inclusive_arguments(val, module_params, required_args):
+ """"
+ Throws error if arguments detailed_inventory, subsystem_health
+ not exists with qualifier device_id or device_service_tag"""
+ system_query_options_param = module_params.get("system_query_options")
+ if system_query_options_param is None or (system_query_options_param is not None and not any(
+ system_query_options_param.get(qualifier) for qualifier in required_args)):
+ raise ValueError("One of the following {0} is required for {1}".format(required_args, val))
+
+
+def _validate_inputs(module_params):
+ """validates input parameters"""
+ fact_subset = module_params["fact_subset"]
+ if fact_subset != "basic_inventory":
+ _check_mutually_inclusive_arguments(fact_subset, module_params, ["device_id", "device_service_tag"])
+
+
+def main():
+ system_query_options = {"type": 'dict', "required": False, "options": {
+ "device_id": {"type": 'list'},
+ "device_service_tag": {"type": 'list'},
+ "inventory_type": {"type": 'str'},
+ "filter": {"type": 'str', "required": False},
+ }}
+
+ module = AnsibleModule(
+ argument_spec={
+ "hostname": {"required": True, "type": 'str'},
+ "username": {"required": True, "type": 'str'},
+ "password": {"required": True, "type": 'str', "no_log": True},
+ "port": {"required": False, "default": 443, "type": 'int'},
+ "fact_subset": {"required": False, "default": "basic_inventory",
+ "choices": ['basic_inventory', 'detailed_inventory', 'subsystem_health']},
+ "system_query_options": system_query_options,
+ },
+ required_if=[['fact_subset', 'detailed_inventory', ['system_query_options']],
+ ['fact_subset', 'subsystem_health', ['system_query_options']], ],
+ supports_check_mode=False)
+
+ try:
+ _validate_inputs(module.params)
+ with RestOME(module.params, req_session=True) as rest_obj:
+ device_facts = _get_resource_parameters(module.params, rest_obj)
+ resp_status = []
+ if device_facts.get("basic_inventory"):
+ query_param = _get_query_parameters(module.params)
+ resp = rest_obj.invoke_request('GET', device_facts["basic_inventory"], query_param=query_param)
+ device_facts = resp.json_data
+ resp_status.append(resp.status_code)
+ else:
+ for identifier_type, path_dict_map in device_facts.items():
+ for identifier, path in path_dict_map.items():
+ try:
+ resp = rest_obj.invoke_request('GET', path)
+ data = resp.json_data
+ resp_status.append(resp.status_code)
+ except HTTPError as err:
+ data = str(err)
+ path_dict_map[identifier] = data
+ if any(device_fact_error_report):
+ if "device_service_tag" in device_facts:
+ device_facts["device_service_tag"].update(device_fact_error_report)
+ else:
+ device_facts["device_service_tag"] = device_fact_error_report
+ if 200 in resp_status:
+ module.exit_json(device_info=device_facts)
+ else:
+ module.fail_json(msg="Failed to fetch the device information")
+ except (URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/one_host.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/one_host.py
new file mode 100644
index 00000000..efe1ce22
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/one_host.py
@@ -0,0 +1,283 @@
+#!/usr/bin/python
+#
+# Copyright 2018 www.privaz.io Valletech AB
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: one_host
+
+short_description: Manages OpenNebula Hosts
+
+
+requirements:
+ - pyone
+
+description:
+ - "Manages OpenNebula Hosts"
+
+options:
+ name:
+ description:
+ - Hostname of the machine to manage.
+ required: true
+ type: str
+ state:
+ description:
+ - Takes the host to the desired lifecycle state.
+ - If C(absent) the host will be deleted from the cluster.
+ - If C(present) the host will be created in the cluster (includes C(enabled), C(disabled) and C(offline) states).
+ - If C(enabled) the host is fully operational.
+ - C(disabled), e.g. to perform maintenance operations.
+ - C(offline), host is totally offline.
+ choices:
+ - absent
+ - present
+ - enabled
+ - disabled
+ - offline
+ default: present
+ type: str
+ im_mad_name:
+ description:
+ - The name of the information manager, this values are taken from the oned.conf with the tag name IM_MAD (name)
+ default: kvm
+ type: str
+ vmm_mad_name:
+ description:
+ - The name of the virtual machine manager mad name, this values are taken from the oned.conf with the tag name VM_MAD (name)
+ default: kvm
+ type: str
+ cluster_id:
+ description:
+ - The cluster ID.
+ default: 0
+ type: int
+ cluster_name:
+ description:
+ - The cluster specified by name.
+ type: str
+ labels:
+ description:
+ - The labels for this host.
+ type: list
+ template:
+ description:
+ - The template or attribute changes to merge into the host template.
+ aliases:
+ - attributes
+ type: dict
+
+extends_documentation_fragment:
+- community.general.opennebula
+
+
+author:
+ - Rafael del Valle (@rvalle)
+'''
+
+EXAMPLES = '''
+- name: Create a new host in OpenNebula
+ community.general.one_host:
+ name: host1
+ cluster_id: 1
+ api_url: http://127.0.0.1:2633/RPC2
+
+- name: Create a host and adjust its template
+ community.general.one_host:
+ name: host2
+ cluster_name: default
+ template:
+ LABELS:
+ - gold
+ - ssd
+ RESERVED_CPU: -100
+'''
+
+# TODO: pending setting guidelines on returned values
+RETURN = '''
+'''
+
+# TODO: Documentation on valid state transitions is required to properly implement all valid cases
+# TODO: To be coherent with CLI this module should also provide "flush" functionality
+
+from ansible_collections.community.general.plugins.module_utils.opennebula import OpenNebulaModule
+
+try:
+ from pyone import HOST_STATES, HOST_STATUS
+except ImportError:
+ pass # handled at module utils
+
+
+# Pseudo definitions...
+
+HOST_ABSENT = -99 # the host is absent (special case defined by this module)
+
+
+class HostModule(OpenNebulaModule):
+
+ def __init__(self):
+
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ state=dict(choices=['present', 'absent', 'enabled', 'disabled', 'offline'], default='present'),
+ im_mad_name=dict(type='str', default="kvm"),
+ vmm_mad_name=dict(type='str', default="kvm"),
+ cluster_id=dict(type='int', default=0),
+ cluster_name=dict(type='str'),
+ labels=dict(type='list'),
+ template=dict(type='dict', aliases=['attributes']),
+ )
+
+ mutually_exclusive = [
+ ['cluster_id', 'cluster_name']
+ ]
+
+ OpenNebulaModule.__init__(self, argument_spec, mutually_exclusive=mutually_exclusive)
+
+ def allocate_host(self):
+ """
+ Creates a host entry in OpenNebula
+ Returns: True on success, fails otherwise.
+
+ """
+ if not self.one.host.allocate(self.get_parameter('name'),
+ self.get_parameter('vmm_mad_name'),
+ self.get_parameter('im_mad_name'),
+ self.get_parameter('cluster_id')):
+ self.fail(msg="could not allocate host")
+ else:
+ self.result['changed'] = True
+ return True
+
+ def wait_for_host_state(self, host, target_states):
+ """
+ Utility method that waits for a host state.
+ Args:
+ host:
+ target_states:
+
+ """
+ return self.wait_for_state('host',
+ lambda: self.one.host.info(host.ID).STATE,
+ lambda s: HOST_STATES(s).name, target_states,
+ invalid_states=[HOST_STATES.ERROR, HOST_STATES.MONITORING_ERROR])
+
+ def run(self, one, module, result):
+
+ # Get the list of hosts
+ host_name = self.get_parameter("name")
+ host = self.get_host_by_name(host_name)
+
+ # manage host state
+ desired_state = self.get_parameter('state')
+ if bool(host):
+ current_state = host.STATE
+ current_state_name = HOST_STATES(host.STATE).name
+ else:
+ current_state = HOST_ABSENT
+ current_state_name = "ABSENT"
+
+ # apply properties
+ if desired_state == 'present':
+ if current_state == HOST_ABSENT:
+ self.allocate_host()
+ host = self.get_host_by_name(host_name)
+ self.wait_for_host_state(host, [HOST_STATES.MONITORED])
+ elif current_state in [HOST_STATES.ERROR, HOST_STATES.MONITORING_ERROR]:
+ self.fail(msg="invalid host state %s" % current_state_name)
+
+ elif desired_state == 'enabled':
+ if current_state == HOST_ABSENT:
+ self.allocate_host()
+ host = self.get_host_by_name(host_name)
+ self.wait_for_host_state(host, [HOST_STATES.MONITORED])
+ elif current_state in [HOST_STATES.DISABLED, HOST_STATES.OFFLINE]:
+ if one.host.status(host.ID, HOST_STATUS.ENABLED):
+ self.wait_for_host_state(host, [HOST_STATES.MONITORED])
+ result['changed'] = True
+ else:
+ self.fail(msg="could not enable host")
+ elif current_state in [HOST_STATES.MONITORED]:
+ pass
+ else:
+ self.fail(msg="unknown host state %s, cowardly refusing to change state to enable" % current_state_name)
+
+ elif desired_state == 'disabled':
+ if current_state == HOST_ABSENT:
+ self.fail(msg='absent host cannot be put in disabled state')
+ elif current_state in [HOST_STATES.MONITORED, HOST_STATES.OFFLINE]:
+ if one.host.status(host.ID, HOST_STATUS.DISABLED):
+ self.wait_for_host_state(host, [HOST_STATES.DISABLED])
+ result['changed'] = True
+ else:
+ self.fail(msg="could not disable host")
+ elif current_state in [HOST_STATES.DISABLED]:
+ pass
+ else:
+ self.fail(msg="unknown host state %s, cowardly refusing to change state to disable" % current_state_name)
+
+ elif desired_state == 'offline':
+ if current_state == HOST_ABSENT:
+ self.fail(msg='absent host cannot be placed in offline state')
+ elif current_state in [HOST_STATES.MONITORED, HOST_STATES.DISABLED]:
+ if one.host.status(host.ID, HOST_STATUS.OFFLINE):
+ self.wait_for_host_state(host, [HOST_STATES.OFFLINE])
+ result['changed'] = True
+ else:
+ self.fail(msg="could not set host offline")
+ elif current_state in [HOST_STATES.OFFLINE]:
+ pass
+ else:
+ self.fail(msg="unknown host state %s, cowardly refusing to change state to offline" % current_state_name)
+
+ elif desired_state == 'absent':
+ if current_state != HOST_ABSENT:
+ if one.host.delete(host.ID):
+ result['changed'] = True
+ else:
+ self.fail(msg="could not delete host from cluster")
+
+ # if we reach this point we can assume that the host was taken to the desired state
+
+ if desired_state != "absent":
+ # manipulate or modify the template
+ desired_template_changes = self.get_parameter('template')
+
+ if desired_template_changes is None:
+ desired_template_changes = dict()
+
+ # complete the template with specific ansible parameters
+ if self.is_parameter('labels'):
+ desired_template_changes['LABELS'] = self.get_parameter('labels')
+
+ if self.requires_template_update(host.TEMPLATE, desired_template_changes):
+ # setup the root element so that pyone will generate XML instead of attribute vector
+ desired_template_changes = {"TEMPLATE": desired_template_changes}
+ if one.host.update(host.ID, desired_template_changes, 1): # merge the template
+ result['changed'] = True
+ else:
+ self.fail(msg="failed to update the host template")
+
+ # the cluster
+ if host.CLUSTER_ID != self.get_parameter('cluster_id'):
+ if one.cluster.addhost(self.get_parameter('cluster_id'), host.ID):
+ result['changed'] = True
+ else:
+ self.fail(msg="failed to update the host cluster")
+
+ # return
+ self.exit()
+
+
+def main():
+ HostModule().run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/one_image.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/one_image.py
new file mode 100644
index 00000000..867bab62
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/one_image.py
@@ -0,0 +1,426 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+"""
+(c) 2018, Milan Ilic <milani@nordeus.com>
+
+This file is part of Ansible
+
+Ansible is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+Ansible is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a clone of the GNU General Public License
+along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+DOCUMENTATION = '''
+---
+module: one_image
+short_description: Manages OpenNebula images
+description:
+ - Manages OpenNebula images
+requirements:
+ - python-oca
+options:
+ api_url:
+ description:
+ - URL of the OpenNebula RPC server.
+ - It is recommended to use HTTPS so that the username/password are not
+ - transferred over the network unencrypted.
+ - If not set then the value of the C(ONE_URL) environment variable is used.
+ type: str
+ api_username:
+ description:
+ - Name of the user to login into the OpenNebula RPC server. If not set
+ - then the value of the C(ONE_USERNAME) environment variable is used.
+ type: str
+ api_password:
+ description:
+ - Password of the user to login into OpenNebula RPC server. If not set
+ - then the value of the C(ONE_PASSWORD) environment variable is used.
+ type: str
+ id:
+ description:
+ - A C(id) of the image you would like to manage.
+ type: int
+ name:
+ description:
+ - A C(name) of the image you would like to manage.
+ type: str
+ state:
+ description:
+ - C(present) - state that is used to manage the image
+ - C(absent) - delete the image
+ - C(cloned) - clone the image
+ - C(renamed) - rename the image to the C(new_name)
+ choices: ["present", "absent", "cloned", "renamed"]
+ default: present
+ type: str
+ enabled:
+ description:
+ - Whether the image should be enabled or disabled.
+ type: bool
+ new_name:
+ description:
+ - A name that will be assigned to the existing or new image.
+ - In the case of cloning, by default C(new_name) will take the name of the origin image with the prefix 'Copy of'.
+ type: str
+author:
+ - "Milan Ilic (@ilicmilan)"
+'''
+
+EXAMPLES = '''
+- name: Fetch the IMAGE by id
+ community.general.one_image:
+ id: 45
+ register: result
+
+- name: Print the IMAGE properties
+ ansible.builtin.debug:
+ msg: result
+
+- name: Rename existing IMAGE
+ community.general.one_image:
+ id: 34
+ state: renamed
+ new_name: bar-image
+
+- name: Disable the IMAGE by id
+ community.general.one_image:
+ id: 37
+ enabled: no
+
+- name: Enable the IMAGE by name
+ community.general.one_image:
+ name: bar-image
+ enabled: yes
+
+- name: Clone the IMAGE by name
+ community.general.one_image:
+ name: bar-image
+ state: cloned
+ new_name: bar-image-clone
+ register: result
+
+- name: Delete the IMAGE by id
+ community.general.one_image:
+ id: '{{ result.id }}'
+ state: absent
+'''
+
+RETURN = '''
+id:
+ description: image id
+ type: int
+ returned: success
+ sample: 153
+name:
+ description: image name
+ type: str
+ returned: success
+ sample: app1
+group_id:
+ description: image's group id
+ type: int
+ returned: success
+ sample: 1
+group_name:
+ description: image's group name
+ type: str
+ returned: success
+ sample: one-users
+owner_id:
+ description: image's owner id
+ type: int
+ returned: success
+ sample: 143
+owner_name:
+ description: image's owner name
+ type: str
+ returned: success
+ sample: ansible-test
+state:
+ description: state of image instance
+ type: str
+ returned: success
+ sample: READY
+used:
+ description: is image in use
+ type: bool
+ returned: success
+ sample: true
+running_vms:
+ description: count of running vms that use this image
+ type: int
+ returned: success
+ sample: 7
+'''
+
+try:
+ import oca
+ HAS_OCA = True
+except ImportError:
+ HAS_OCA = False
+
+from ansible.module_utils.basic import AnsibleModule
+import os
+
+
+def get_image(module, client, predicate):
+ pool = oca.ImagePool(client)
+ # Filter -2 means fetch all images user can Use
+ pool.info(filter=-2)
+
+ for image in pool:
+ if predicate(image):
+ return image
+
+ return None
+
+
+def get_image_by_name(module, client, image_name):
+ return get_image(module, client, lambda image: (image.name == image_name))
+
+
+def get_image_by_id(module, client, image_id):
+ return get_image(module, client, lambda image: (image.id == image_id))
+
+
+def get_image_instance(module, client, requested_id, requested_name):
+ if requested_id:
+ return get_image_by_id(module, client, requested_id)
+ else:
+ return get_image_by_name(module, client, requested_name)
+
+
+IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS']
+
+
+def get_image_info(image):
+ image.info()
+
+ info = {
+ 'id': image.id,
+ 'name': image.name,
+ 'state': IMAGE_STATES[image.state],
+ 'running_vms': image.running_vms,
+ 'used': bool(image.running_vms),
+ 'user_name': image.uname,
+ 'user_id': image.uid,
+ 'group_name': image.gname,
+ 'group_id': image.gid,
+ }
+
+ return info
+
+
+def wait_for_state(module, image, wait_timeout, state_predicate):
+ import time
+ start_time = time.time()
+
+ while (time.time() - start_time) < wait_timeout:
+ image.info()
+ state = image.state
+
+ if state_predicate(state):
+ return image
+
+ time.sleep(1)
+
+ module.fail_json(msg="Wait timeout has expired!")
+
+
+def wait_for_ready(module, image, wait_timeout=60):
+ return wait_for_state(module, image, wait_timeout, lambda state: (state in [IMAGE_STATES.index('READY')]))
+
+
+def wait_for_delete(module, image, wait_timeout=60):
+ return wait_for_state(module, image, wait_timeout, lambda state: (state in [IMAGE_STATES.index('DELETE')]))
+
+
+def enable_image(module, client, image, enable):
+ image.info()
+ changed = False
+
+ state = image.state
+
+ if state not in [IMAGE_STATES.index('READY'), IMAGE_STATES.index('DISABLED'), IMAGE_STATES.index('ERROR')]:
+ if enable:
+ module.fail_json(msg="Cannot enable " + IMAGE_STATES[state] + " image!")
+ else:
+ module.fail_json(msg="Cannot disable " + IMAGE_STATES[state] + " image!")
+
+ if ((enable and state != IMAGE_STATES.index('READY')) or
+ (not enable and state != IMAGE_STATES.index('DISABLED'))):
+ changed = True
+
+ if changed and not module.check_mode:
+ client.call('image.enable', image.id, enable)
+
+ result = get_image_info(image)
+ result['changed'] = changed
+
+ return result
+
+
+def clone_image(module, client, image, new_name):
+ if new_name is None:
+ new_name = "Copy of " + image.name
+
+ tmp_image = get_image_by_name(module, client, new_name)
+ if tmp_image:
+ result = get_image_info(tmp_image)
+ result['changed'] = False
+ return result
+
+ if image.state == IMAGE_STATES.index('DISABLED'):
+ module.fail_json(msg="Cannot clone DISABLED image")
+
+ if not module.check_mode:
+ new_id = client.call('image.clone', image.id, new_name)
+ image = get_image_by_id(module, client, new_id)
+ wait_for_ready(module, image)
+
+ result = get_image_info(image)
+ result['changed'] = True
+
+ return result
+
+
+def rename_image(module, client, image, new_name):
+ if new_name is None:
+ module.fail_json(msg="'new_name' option has to be specified when the state is 'renamed'")
+
+ if new_name == image.name:
+ result = get_image_info(image)
+ result['changed'] = False
+ return result
+
+ tmp_image = get_image_by_name(module, client, new_name)
+ if tmp_image:
+ module.fail_json(msg="Name '" + new_name + "' is already taken by IMAGE with id=" + str(tmp_image.id))
+
+ if not module.check_mode:
+ client.call('image.rename', image.id, new_name)
+
+ result = get_image_info(image)
+ result['changed'] = True
+ return result
+
+
+def delete_image(module, client, image):
+
+ if not image:
+ return {'changed': False}
+
+ if image.running_vms > 0:
+ module.fail_json(msg="Cannot delete image. There are " + str(image.running_vms) + " VMs using it.")
+
+ if not module.check_mode:
+ client.call('image.delete', image.id)
+ wait_for_delete(module, image)
+
+ return {'changed': True}
+
+
+def get_connection_info(module):
+
+ url = module.params.get('api_url')
+ username = module.params.get('api_username')
+ password = module.params.get('api_password')
+
+ if not url:
+ url = os.environ.get('ONE_URL')
+
+ if not username:
+ username = os.environ.get('ONE_USERNAME')
+
+ if not password:
+ password = os.environ.get('ONE_PASSWORD')
+
+ if not(url and username and password):
+ module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified")
+ from collections import namedtuple
+
+ auth_params = namedtuple('auth', ('url', 'username', 'password'))
+
+ return auth_params(url=url, username=username, password=password)
+
+
+def main():
+ fields = {
+ "api_url": {"required": False, "type": "str"},
+ "api_username": {"required": False, "type": "str"},
+ "api_password": {"required": False, "type": "str", "no_log": True},
+ "id": {"required": False, "type": "int"},
+ "name": {"required": False, "type": "str"},
+ "state": {
+ "default": "present",
+ "choices": ['present', 'absent', 'cloned', 'renamed'],
+ "type": "str"
+ },
+ "enabled": {"required": False, "type": "bool"},
+ "new_name": {"required": False, "type": "str"},
+ }
+
+ module = AnsibleModule(argument_spec=fields,
+ mutually_exclusive=[['id', 'name']],
+ supports_check_mode=True)
+
+ if not HAS_OCA:
+ module.fail_json(msg='This module requires python-oca to work!')
+
+ auth = get_connection_info(module)
+ params = module.params
+ id = params.get('id')
+ name = params.get('name')
+ state = params.get('state')
+ enabled = params.get('enabled')
+ new_name = params.get('new_name')
+ client = oca.Client(auth.username + ':' + auth.password, auth.url)
+
+ result = {}
+
+ if not id and state == 'renamed':
+ module.fail_json(msg="Option 'id' is required when the state is 'renamed'")
+
+ image = get_image_instance(module, client, id, name)
+ if not image and state != 'absent':
+ if id:
+ module.fail_json(msg="There is no image with id=" + str(id))
+ else:
+ module.fail_json(msg="There is no image with name=" + name)
+
+ if state == 'absent':
+ result = delete_image(module, client, image)
+ else:
+ result = get_image_info(image)
+ changed = False
+ result['changed'] = False
+
+ if enabled is not None:
+ result = enable_image(module, client, image, enabled)
+ if state == "cloned":
+ result = clone_image(module, client, image, new_name)
+ elif state == "renamed":
+ result = rename_image(module, client, image, new_name)
+
+ changed = changed or result['changed']
+ result['changed'] = changed
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/one_image_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/one_image_facts.py
new file mode 100644
index 00000000..0d2bd070
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/one_image_facts.py
@@ -0,0 +1,293 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+"""
+(c) 2018, Milan Ilic <milani@nordeus.com>
+
+This file is part of Ansible
+
+Ansible is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+Ansible is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a clone of the GNU General Public License
+along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+DOCUMENTATION = '''
+---
+module: one_image_info
+short_description: Gather information on OpenNebula images
+description:
+ - Gather information on OpenNebula images.
+ - This module was called C(one_image_facts) before Ansible 2.9. The usage did not change.
+requirements:
+ - pyone
+options:
+ api_url:
+ description:
+ - URL of the OpenNebula RPC server.
+ - It is recommended to use HTTPS so that the username/password are not
+ - transferred over the network unencrypted.
+ - If not set then the value of the C(ONE_URL) environment variable is used.
+ type: str
+ api_username:
+ description:
+ - Name of the user to login into the OpenNebula RPC server. If not set
+ - then the value of the C(ONE_USERNAME) environment variable is used.
+ type: str
+ api_password:
+ description:
+ - Password of the user to login into OpenNebula RPC server. If not set
+ - then the value of the C(ONE_PASSWORD) environment variable is used.
+ type: str
+ ids:
+ description:
+ - A list of images ids whose facts you want to gather.
+ aliases: ['id']
+ type: list
+ name:
+ description:
+ - A C(name) of the image whose facts will be gathered.
+ - If the C(name) begins with '~' the C(name) will be used as regex pattern
+ - which restricts the list of images (whose facts will be returned) whose names match specified regex.
+ - Also, if the C(name) begins with '~*' case-insensitive matching will be performed.
+ - See examples for more details.
+ type: str
+author:
+ - "Milan Ilic (@ilicmilan)"
+ - "Jan Meerkamp (@meerkampdvv)"
+'''
+
+EXAMPLES = '''
+- name: Gather facts about all images
+ community.general.one_image_info:
+ register: result
+
+- name: Print all images facts
+ ansible.builtin.debug:
+ msg: result
+
+- name: Gather facts about an image using ID
+ community.general.one_image_info:
+ ids:
+ - 123
+
+- name: Gather facts about an image using the name
+ community.general.one_image_info:
+ name: 'foo-image'
+ register: foo_image
+
+- name: Gather facts about all IMAGEs whose name matches regex 'app-image-.*'
+ community.general.one_image_info:
+ name: '~app-image-.*'
+ register: app_images
+
+- name: Gather facts about all IMAGEs whose name matches regex 'foo-image-.*' ignoring cases
+ community.general.one_image_info:
+ name: '~*foo-image-.*'
+ register: foo_images
+'''
+
+RETURN = '''
+images:
+ description: A list of images info
+ type: complex
+ returned: success
+ contains:
+ id:
+ description: image id
+ type: int
+ sample: 153
+ name:
+ description: image name
+ type: str
+ sample: app1
+ group_id:
+ description: image's group id
+ type: int
+ sample: 1
+ group_name:
+ description: image's group name
+ type: str
+ sample: one-users
+ owner_id:
+ description: image's owner id
+ type: int
+ sample: 143
+ owner_name:
+ description: image's owner name
+ type: str
+ sample: ansible-test
+ state:
+ description: state of image instance
+ type: str
+ sample: READY
+ used:
+ description: is image in use
+ type: bool
+ sample: true
+ running_vms:
+ description: count of running vms that use this image
+ type: int
+ sample: 7
+'''
+
+try:
+ import pyone
+ HAS_PYONE = True
+except ImportError:
+ HAS_PYONE = False
+
+from ansible.module_utils.basic import AnsibleModule
+import os
+
+
+def get_all_images(client):
+ pool = client.imagepool.info(-2, -1, -1, -1)
+ # Filter -2 means fetch all images user can Use
+
+ return pool
+
+
+IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS']
+
+
+def get_image_info(image):
+ info = {
+ 'id': image.ID,
+ 'name': image.NAME,
+ 'state': IMAGE_STATES[image.STATE],
+ 'running_vms': image.RUNNING_VMS,
+ 'used': bool(image.RUNNING_VMS),
+ 'user_name': image.UNAME,
+ 'user_id': image.UID,
+ 'group_name': image.GNAME,
+ 'group_id': image.GID,
+ }
+ return info
+
+
+def get_images_by_ids(module, client, ids):
+ images = []
+ pool = get_all_images(client)
+
+ for image in pool.IMAGE:
+ if str(image.ID) in ids:
+ images.append(image)
+ ids.remove(str(image.ID))
+ if len(ids) == 0:
+ break
+
+ if len(ids) > 0:
+ module.fail_json(msg='There is no IMAGE(s) with id(s)=' + ', '.join('{id}'.format(id=str(image_id)) for image_id in ids))
+
+ return images
+
+
+def get_images_by_name(module, client, name_pattern):
+
+ images = []
+ pattern = None
+
+ pool = get_all_images(client)
+
+ if name_pattern.startswith('~'):
+ import re
+ if name_pattern[1] == '*':
+ pattern = re.compile(name_pattern[2:], re.IGNORECASE)
+ else:
+ pattern = re.compile(name_pattern[1:])
+
+ for image in pool.IMAGE:
+ if pattern is not None:
+ if pattern.match(image.NAME):
+ images.append(image)
+ elif name_pattern == image.NAME:
+ images.append(image)
+ break
+
+ # if the specific name is indicated
+ if pattern is None and len(images) == 0:
+ module.fail_json(msg="There is no IMAGE with name=" + name_pattern)
+
+ return images
+
+
+def get_connection_info(module):
+
+ url = module.params.get('api_url')
+ username = module.params.get('api_username')
+ password = module.params.get('api_password')
+
+ if not url:
+ url = os.environ.get('ONE_URL')
+
+ if not username:
+ username = os.environ.get('ONE_USERNAME')
+
+ if not password:
+ password = os.environ.get('ONE_PASSWORD')
+
+ if not(url and username and password):
+ module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified")
+ from collections import namedtuple
+
+ auth_params = namedtuple('auth', ('url', 'username', 'password'))
+
+ return auth_params(url=url, username=username, password=password)
+
+
+def main():
+ fields = {
+ "api_url": {"required": False, "type": "str"},
+ "api_username": {"required": False, "type": "str"},
+ "api_password": {"required": False, "type": "str", "no_log": True},
+ "ids": {"required": False, "aliases": ['id'], "type": "list"},
+ "name": {"required": False, "type": "str"},
+ }
+
+ module = AnsibleModule(argument_spec=fields,
+ mutually_exclusive=[['ids', 'name']],
+ supports_check_mode=True)
+ if module._name in ('one_image_facts', 'community.general.one_image_facts'):
+ module.deprecate("The 'one_image_facts' module has been renamed to 'one_image_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ if not HAS_PYONE:
+ module.fail_json(msg='This module requires pyone to work!')
+
+ auth = get_connection_info(module)
+ params = module.params
+ ids = params.get('ids')
+ name = params.get('name')
+ client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password)
+
+ result = {'images': []}
+ images = []
+
+ if ids:
+ images = get_images_by_ids(module, client, ids)
+ elif name:
+ images = get_images_by_name(module, client, name)
+ else:
+ images = get_all_images(client).IMAGE
+
+ for image in images:
+ result['images'].append(get_image_info(image))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/one_image_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/one_image_info.py
new file mode 100644
index 00000000..0d2bd070
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/one_image_info.py
@@ -0,0 +1,293 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+"""
+(c) 2018, Milan Ilic <milani@nordeus.com>
+
+This file is part of Ansible
+
+Ansible is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+Ansible is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a clone of the GNU General Public License
+along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+DOCUMENTATION = '''
+---
+module: one_image_info
+short_description: Gather information on OpenNebula images
+description:
+ - Gather information on OpenNebula images.
+ - This module was called C(one_image_facts) before Ansible 2.9. The usage did not change.
+requirements:
+ - pyone
+options:
+ api_url:
+ description:
+ - URL of the OpenNebula RPC server.
+ - It is recommended to use HTTPS so that the username/password are not
+ - transferred over the network unencrypted.
+ - If not set then the value of the C(ONE_URL) environment variable is used.
+ type: str
+ api_username:
+ description:
+ - Name of the user to login into the OpenNebula RPC server. If not set
+ - then the value of the C(ONE_USERNAME) environment variable is used.
+ type: str
+ api_password:
+ description:
+ - Password of the user to login into OpenNebula RPC server. If not set
+ - then the value of the C(ONE_PASSWORD) environment variable is used.
+ type: str
+ ids:
+ description:
+ - A list of images ids whose facts you want to gather.
+ aliases: ['id']
+ type: list
+ name:
+ description:
+ - A C(name) of the image whose facts will be gathered.
+ - If the C(name) begins with '~' the C(name) will be used as regex pattern
+ - which restricts the list of images (whose facts will be returned) whose names match specified regex.
+ - Also, if the C(name) begins with '~*' case-insensitive matching will be performed.
+ - See examples for more details.
+ type: str
+author:
+ - "Milan Ilic (@ilicmilan)"
+ - "Jan Meerkamp (@meerkampdvv)"
+'''
+
+EXAMPLES = '''
+- name: Gather facts about all images
+ community.general.one_image_info:
+ register: result
+
+- name: Print all images facts
+ ansible.builtin.debug:
+ msg: result
+
+- name: Gather facts about an image using ID
+ community.general.one_image_info:
+ ids:
+ - 123
+
+- name: Gather facts about an image using the name
+ community.general.one_image_info:
+ name: 'foo-image'
+ register: foo_image
+
+- name: Gather facts about all IMAGEs whose name matches regex 'app-image-.*'
+ community.general.one_image_info:
+ name: '~app-image-.*'
+ register: app_images
+
+- name: Gather facts about all IMAGEs whose name matches regex 'foo-image-.*' ignoring cases
+ community.general.one_image_info:
+ name: '~*foo-image-.*'
+ register: foo_images
+'''
+
+RETURN = '''
+images:
+ description: A list of images info
+ type: complex
+ returned: success
+ contains:
+ id:
+ description: image id
+ type: int
+ sample: 153
+ name:
+ description: image name
+ type: str
+ sample: app1
+ group_id:
+ description: image's group id
+ type: int
+ sample: 1
+ group_name:
+ description: image's group name
+ type: str
+ sample: one-users
+ owner_id:
+ description: image's owner id
+ type: int
+ sample: 143
+ owner_name:
+ description: image's owner name
+ type: str
+ sample: ansible-test
+ state:
+ description: state of image instance
+ type: str
+ sample: READY
+ used:
+ description: is image in use
+ type: bool
+ sample: true
+ running_vms:
+ description: count of running vms that use this image
+ type: int
+ sample: 7
+'''
+
+try:
+ import pyone
+ HAS_PYONE = True
+except ImportError:
+ HAS_PYONE = False
+
+from ansible.module_utils.basic import AnsibleModule
+import os
+
+
+def get_all_images(client):
+ pool = client.imagepool.info(-2, -1, -1, -1)
+ # Filter -2 means fetch all images user can Use
+
+ return pool
+
+
+IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS']
+
+
+def get_image_info(image):
+ info = {
+ 'id': image.ID,
+ 'name': image.NAME,
+ 'state': IMAGE_STATES[image.STATE],
+ 'running_vms': image.RUNNING_VMS,
+ 'used': bool(image.RUNNING_VMS),
+ 'user_name': image.UNAME,
+ 'user_id': image.UID,
+ 'group_name': image.GNAME,
+ 'group_id': image.GID,
+ }
+ return info
+
+
+def get_images_by_ids(module, client, ids):
+ images = []
+ pool = get_all_images(client)
+
+ for image in pool.IMAGE:
+ if str(image.ID) in ids:
+ images.append(image)
+ ids.remove(str(image.ID))
+ if len(ids) == 0:
+ break
+
+ if len(ids) > 0:
+ module.fail_json(msg='There is no IMAGE(s) with id(s)=' + ', '.join('{id}'.format(id=str(image_id)) for image_id in ids))
+
+ return images
+
+
+def get_images_by_name(module, client, name_pattern):
+
+ images = []
+ pattern = None
+
+ pool = get_all_images(client)
+
+ if name_pattern.startswith('~'):
+ import re
+ if name_pattern[1] == '*':
+ pattern = re.compile(name_pattern[2:], re.IGNORECASE)
+ else:
+ pattern = re.compile(name_pattern[1:])
+
+ for image in pool.IMAGE:
+ if pattern is not None:
+ if pattern.match(image.NAME):
+ images.append(image)
+ elif name_pattern == image.NAME:
+ images.append(image)
+ break
+
+ # if the specific name is indicated
+ if pattern is None and len(images) == 0:
+ module.fail_json(msg="There is no IMAGE with name=" + name_pattern)
+
+ return images
+
+
+def get_connection_info(module):
+
+ url = module.params.get('api_url')
+ username = module.params.get('api_username')
+ password = module.params.get('api_password')
+
+ if not url:
+ url = os.environ.get('ONE_URL')
+
+ if not username:
+ username = os.environ.get('ONE_USERNAME')
+
+ if not password:
+ password = os.environ.get('ONE_PASSWORD')
+
+ if not(url and username and password):
+ module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified")
+ from collections import namedtuple
+
+ auth_params = namedtuple('auth', ('url', 'username', 'password'))
+
+ return auth_params(url=url, username=username, password=password)
+
+
+def main():
+ fields = {
+ "api_url": {"required": False, "type": "str"},
+ "api_username": {"required": False, "type": "str"},
+ "api_password": {"required": False, "type": "str", "no_log": True},
+ "ids": {"required": False, "aliases": ['id'], "type": "list"},
+ "name": {"required": False, "type": "str"},
+ }
+
+ module = AnsibleModule(argument_spec=fields,
+ mutually_exclusive=[['ids', 'name']],
+ supports_check_mode=True)
+ if module._name in ('one_image_facts', 'community.general.one_image_facts'):
+ module.deprecate("The 'one_image_facts' module has been renamed to 'one_image_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ if not HAS_PYONE:
+ module.fail_json(msg='This module requires pyone to work!')
+
+ auth = get_connection_info(module)
+ params = module.params
+ ids = params.get('ids')
+ name = params.get('name')
+ client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password)
+
+ result = {'images': []}
+ images = []
+
+ if ids:
+ images = get_images_by_ids(module, client, ids)
+ elif name:
+ images = get_images_by_name(module, client, name)
+ else:
+ images = get_all_images(client).IMAGE
+
+ for image in images:
+ result['images'].append(get_image_info(image))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/one_service.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/one_service.py
new file mode 100644
index 00000000..68f8398f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/one_service.py
@@ -0,0 +1,768 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+"""
+(c) 2017, Milan Ilic <milani@nordeus.com>
+
+This file is part of Ansible
+
+Ansible is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+Ansible is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+DOCUMENTATION = '''
+---
+module: one_service
+short_description: Deploy and manage OpenNebula services
+description:
+ - Manage OpenNebula services
+options:
+ api_url:
+ description:
+ - URL of the OpenNebula OneFlow API server.
+ - It is recommended to use HTTPS so that the username/password are not transferred over the network unencrypted.
+ - If not set then the value of the ONEFLOW_URL environment variable is used.
+ type: str
+ api_username:
+ description:
+ - Name of the user to login into the OpenNebula OneFlow API server. If not set then the value of the C(ONEFLOW_USERNAME) environment variable is used.
+ type: str
+ api_password:
+ description:
+ - Password of the user to login into OpenNebula OneFlow API server. If not set then the value of the C(ONEFLOW_PASSWORD) environment variable is used.
+ type: str
+ template_name:
+ description:
+ - Name of service template to use to create a new instance of a service
+ type: str
+ template_id:
+ description:
+ - ID of a service template to use to create a new instance of a service
+ type: int
+ service_id:
+ description:
+ - ID of a service instance that you would like to manage
+ type: int
+ service_name:
+ description:
+ - Name of a service instance that you would like to manage
+ type: str
+ unique:
+ description:
+ - Setting C(unique=yes) will make sure that there is only one service instance running with a name set with C(service_name) when
+ - instantiating a service from a template specified with C(template_id)/C(template_name). Check examples below.
+ type: bool
+ default: no
+ state:
+ description:
+ - C(present) - instantiate a service from a template specified with C(template_id)/C(template_name).
+ - C(absent) - terminate an instance of a service specified with C(service_id)/C(service_name).
+ choices: ["present", "absent"]
+ default: present
+ type: str
+ mode:
+ description:
+ - Set permission mode of a service instance in octet format, e.g. C(600) to give owner C(use) and C(manage) and nothing to group and others.
+ type: str
+ owner_id:
+ description:
+ - ID of the user which will be set as the owner of the service
+ type: int
+ group_id:
+ description:
+ - ID of the group which will be set as the group of the service
+ type: int
+ wait:
+ description:
+ - Wait for the instance to reach RUNNING state after DEPLOYING or COOLDOWN state after SCALING
+ type: bool
+ default: no
+ wait_timeout:
+ description:
+ - How long before wait gives up, in seconds
+ default: 300
+ type: int
+ custom_attrs:
+ description:
+ - Dictionary of key/value custom attributes which will be used when instantiating a new service.
+ default: {}
+ type: dict
+ role:
+ description:
+ - Name of the role whose cardinality should be changed
+ type: str
+ cardinality:
+ description:
+ - Number of VMs for the specified role
+ type: int
+ force:
+ description:
+ - Force the new cardinality even if it is outside the limits
+ type: bool
+ default: no
+author:
+ - "Milan Ilic (@ilicmilan)"
+'''
+
+EXAMPLES = '''
+- name: Instantiate a new service
+ community.general.one_service:
+ template_id: 90
+ register: result
+
+- name: Print service properties
+ ansible.builtin.debug:
+ msg: result
+
+- name: Instantiate a new service with specified service_name, service group and mode
+ community.general.one_service:
+ template_name: 'app1_template'
+ service_name: 'app1'
+ group_id: 1
+ mode: '660'
+
+- name: Instantiate a new service with template_id and pass custom_attrs dict
+ community.general.one_service:
+ template_id: 90
+ custom_attrs:
+ public_network_id: 21
+ private_network_id: 26
+
+- name: Instantiate a new service 'foo' if the service doesn't already exist, otherwise do nothing
+ community.general.one_service:
+ template_id: 53
+ service_name: 'foo'
+ unique: yes
+
+- name: Delete a service by ID
+ community.general.one_service:
+ service_id: 153
+ state: absent
+
+- name: Get service info
+ community.general.one_service:
+ service_id: 153
+ register: service_info
+
+- name: Change service owner, group and mode
+ community.general.one_service:
+ service_name: 'app2'
+ owner_id: 34
+ group_id: 113
+ mode: '600'
+
+- name: Instantiate service and wait for it to become RUNNING
+ community.general.one_service:
+ template_id: 43
+ service_name: 'foo1'
+
+- name: Wait service to become RUNNING
+ community.general.one_service:
+ service_id: 112
+ wait: yes
+
+- name: Change role cardinality
+ community.general.one_service:
+ service_id: 153
+ role: bar
+ cardinality: 5
+
+- name: Change role cardinality and wait for it to be applied
+ community.general.one_service:
+ service_id: 112
+ role: foo
+ cardinality: 7
+ wait: yes
+'''
+
+RETURN = '''
+service_id:
+ description: service id
+ type: int
+ returned: success
+ sample: 153
+service_name:
+ description: service name
+ type: str
+ returned: success
+ sample: app1
+group_id:
+ description: service's group id
+ type: int
+ returned: success
+ sample: 1
+group_name:
+ description: service's group name
+ type: str
+ returned: success
+ sample: one-users
+owner_id:
+ description: service's owner id
+ type: int
+ returned: success
+ sample: 143
+owner_name:
+ description: service's owner name
+ type: str
+ returned: success
+ sample: ansible-test
+state:
+ description: state of service instance
+ type: str
+ returned: success
+ sample: RUNNING
+mode:
+ description: service's mode
+ type: int
+ returned: success
+ sample: 660
+roles:
+ description: list of dictionaries of roles, each role is described by name, cardinality, state and nodes ids
+ type: list
+ returned: success
+ sample: '[{"cardinality": 1,"name": "foo","state": "RUNNING","ids": [ 123, 456 ]},
+ {"cardinality": 2,"name": "bar","state": "RUNNING", "ids": [ 452, 567, 746 ]}]'
+'''
+
+import os
+import sys
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import open_url
+
+STATES = ("PENDING", "DEPLOYING", "RUNNING", "UNDEPLOYING", "WARNING", "DONE",
+ "FAILED_UNDEPLOYING", "FAILED_DEPLOYING", "SCALING", "FAILED_SCALING", "COOLDOWN")
+
+
+def get_all_templates(module, auth):
+ try:
+ all_templates = open_url(url=(auth.url + "/service_template"), method="GET", force_basic_auth=True, url_username=auth.user, url_password=auth.password)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ return module.from_json(all_templates.read())
+
+
+def get_template(module, auth, pred):
+ all_templates_dict = get_all_templates(module, auth)
+
+ found = 0
+ found_template = None
+ template_name = ''
+
+ if "DOCUMENT_POOL" in all_templates_dict and "DOCUMENT" in all_templates_dict["DOCUMENT_POOL"]:
+ for template in all_templates_dict["DOCUMENT_POOL"]["DOCUMENT"]:
+ if pred(template):
+ found = found + 1
+ found_template = template
+ template_name = template["NAME"]
+
+ if found <= 0:
+ return None
+ elif found > 1:
+ module.fail_json(msg="There is no template with unique name: " + template_name)
+ else:
+ return found_template
+
+
+def get_all_services(module, auth):
+ try:
+ response = open_url(auth.url + "/service", method="GET", force_basic_auth=True, url_username=auth.user, url_password=auth.password)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ return module.from_json(response.read())
+
+
+def get_service(module, auth, pred):
+ all_services_dict = get_all_services(module, auth)
+
+ found = 0
+ found_service = None
+ service_name = ''
+
+ if "DOCUMENT_POOL" in all_services_dict and "DOCUMENT" in all_services_dict["DOCUMENT_POOL"]:
+ for service in all_services_dict["DOCUMENT_POOL"]["DOCUMENT"]:
+ if pred(service):
+ found = found + 1
+ found_service = service
+ service_name = service["NAME"]
+
+ # fail if there are more services with same name
+ if found > 1:
+ module.fail_json(msg="There are multiple services with a name: '" +
+ service_name + "'. You have to use a unique service name or use 'service_id' instead.")
+ elif found <= 0:
+ return None
+ else:
+ return found_service
+
+
+def get_service_by_id(module, auth, service_id):
+ return get_service(module, auth, lambda service: (int(service["ID"]) == int(service_id))) if service_id else None
+
+
+def get_service_by_name(module, auth, service_name):
+ return get_service(module, auth, lambda service: (service["NAME"] == service_name))
+
+
+def get_service_info(module, auth, service):
+
+ result = {
+ "service_id": int(service["ID"]),
+ "service_name": service["NAME"],
+ "group_id": int(service["GID"]),
+ "group_name": service["GNAME"],
+ "owner_id": int(service["UID"]),
+ "owner_name": service["UNAME"],
+ "state": STATES[service["TEMPLATE"]["BODY"]["state"]]
+ }
+
+ roles_status = service["TEMPLATE"]["BODY"]["roles"]
+ roles = []
+ for role in roles_status:
+ nodes_ids = []
+ if "nodes" in role:
+ for node in role["nodes"]:
+ nodes_ids.append(node["deploy_id"])
+ roles.append({"name": role["name"], "cardinality": role["cardinality"], "state": STATES[int(role["state"])], "ids": nodes_ids})
+
+ result["roles"] = roles
+ result["mode"] = int(parse_service_permissions(service))
+
+ return result
+
+
+def create_service(module, auth, template_id, service_name, custom_attrs, unique, wait, wait_timeout):
+ # make sure that the values in custom_attrs dict are strings
+ custom_attrs_with_str = dict((k, str(v)) for k, v in custom_attrs.items())
+
+ data = {
+ "action": {
+ "perform": "instantiate",
+ "params": {
+ "merge_template": {
+ "custom_attrs_values": custom_attrs_with_str,
+ "name": service_name
+ }
+ }
+ }
+ }
+
+ try:
+ response = open_url(auth.url + "/service_template/" + str(template_id) + "/action", method="POST",
+ data=module.jsonify(data), force_basic_auth=True, url_username=auth.user, url_password=auth.password)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ service_result = module.from_json(response.read())["DOCUMENT"]
+
+ return service_result
+
+
+def wait_for_service_to_become_ready(module, auth, service_id, wait_timeout):
+ import time
+ start_time = time.time()
+
+ while (time.time() - start_time) < wait_timeout:
+ try:
+ status_result = open_url(auth.url + "/service/" + str(service_id), method="GET",
+ force_basic_auth=True, url_username=auth.user, url_password=auth.password)
+ except Exception as e:
+ module.fail_json(msg="Request for service status has failed. Error message: " + str(e))
+
+ status_result = module.from_json(status_result.read())
+ service_state = status_result["DOCUMENT"]["TEMPLATE"]["BODY"]["state"]
+
+ if service_state in [STATES.index("RUNNING"), STATES.index("COOLDOWN")]:
+ return status_result["DOCUMENT"]
+ elif service_state not in [STATES.index("PENDING"), STATES.index("DEPLOYING"), STATES.index("SCALING")]:
+ log_message = ''
+ for log_info in status_result["DOCUMENT"]["TEMPLATE"]["BODY"]["log"]:
+ if log_info["severity"] == "E":
+ log_message = log_message + log_info["message"]
+ break
+
+ module.fail_json(msg="Deploying is unsuccessful. Service state: " + STATES[service_state] + ". Error message: " + log_message)
+
+ time.sleep(1)
+
+ module.fail_json(msg="Wait timeout has expired")
+
+
+def change_service_permissions(module, auth, service_id, permissions):
+
+ data = {
+ "action": {
+ "perform": "chmod",
+ "params": {"octet": permissions}
+ }
+ }
+
+ try:
+ status_result = open_url(auth.url + "/service/" + str(service_id) + "/action", method="POST", force_basic_auth=True,
+ url_username=auth.user, url_password=auth.password, data=module.jsonify(data))
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def change_service_owner(module, auth, service_id, owner_id):
+ data = {
+ "action": {
+ "perform": "chown",
+ "params": {"owner_id": owner_id}
+ }
+ }
+
+ try:
+ status_result = open_url(auth.url + "/service/" + str(service_id) + "/action", method="POST", force_basic_auth=True,
+ url_username=auth.user, url_password=auth.password, data=module.jsonify(data))
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def change_service_group(module, auth, service_id, group_id):
+
+ data = {
+ "action": {
+ "perform": "chgrp",
+ "params": {"group_id": group_id}
+ }
+ }
+
+ try:
+ status_result = open_url(auth.url + "/service/" + str(service_id) + "/action", method="POST", force_basic_auth=True,
+ url_username=auth.user, url_password=auth.password, data=module.jsonify(data))
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def change_role_cardinality(module, auth, service_id, role, cardinality, force):
+
+ data = {
+ "cardinality": cardinality,
+ "force": force
+ }
+
+ try:
+ status_result = open_url(auth.url + "/service/" + str(service_id) + "/role/" + role, method="PUT",
+ force_basic_auth=True, url_username=auth.user, url_password=auth.password, data=module.jsonify(data))
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ if status_result.getcode() != 204:
+ module.fail_json(msg="Failed to change cardinality for role: " + role + ". Return code: " + str(status_result.getcode()))
+
+
+def check_change_service_owner(module, service, owner_id):
+ old_owner_id = int(service["UID"])
+
+ return old_owner_id != owner_id
+
+
+def check_change_service_group(module, service, group_id):
+ old_group_id = int(service["GID"])
+
+ return old_group_id != group_id
+
+
+def parse_service_permissions(service):
+ perm_dict = service["PERMISSIONS"]
+ '''
+ This is the structure of the 'PERMISSIONS' dictionary:
+
+ "PERMISSIONS": {
+ "OWNER_U": "1",
+ "OWNER_M": "1",
+ "OWNER_A": "0",
+ "GROUP_U": "0",
+ "GROUP_M": "0",
+ "GROUP_A": "0",
+ "OTHER_U": "0",
+ "OTHER_M": "0",
+ "OTHER_A": "0"
+ }
+ '''
+
+ owner_octal = int(perm_dict["OWNER_U"]) * 4 + int(perm_dict["OWNER_M"]) * 2 + int(perm_dict["OWNER_A"])
+ group_octal = int(perm_dict["GROUP_U"]) * 4 + int(perm_dict["GROUP_M"]) * 2 + int(perm_dict["GROUP_A"])
+ other_octal = int(perm_dict["OTHER_U"]) * 4 + int(perm_dict["OTHER_M"]) * 2 + int(perm_dict["OTHER_A"])
+
+ permissions = str(owner_octal) + str(group_octal) + str(other_octal)
+
+ return permissions
+
+
+def check_change_service_permissions(module, service, permissions):
+ old_permissions = parse_service_permissions(service)
+
+ return old_permissions != permissions
+
+
+def check_change_role_cardinality(module, service, role_name, cardinality):
+ roles_list = service["TEMPLATE"]["BODY"]["roles"]
+
+ for role in roles_list:
+ if role["name"] == role_name:
+ return int(role["cardinality"]) != cardinality
+
+ module.fail_json(msg="There is no role with name: " + role_name)
+
+
+def create_service_and_operation(module, auth, template_id, service_name, owner_id, group_id, permissions, custom_attrs, unique, wait, wait_timeout):
+ if not service_name:
+ service_name = ''
+ changed = False
+ service = None
+
+ if unique:
+ service = get_service_by_name(module, auth, service_name)
+
+ if not service:
+ if not module.check_mode:
+ service = create_service(module, auth, template_id, service_name, custom_attrs, unique, wait, wait_timeout)
+ changed = True
+
+ # if check_mode=true and there would be changes, service doesn't exist and we can not get it
+ if module.check_mode and changed:
+ return {"changed": True}
+
+ result = service_operation(module, auth, owner_id=owner_id, group_id=group_id, wait=wait,
+ wait_timeout=wait_timeout, permissions=permissions, service=service)
+
+ if result["changed"]:
+ changed = True
+
+ result["changed"] = changed
+
+ return result
+
+
+def service_operation(module, auth, service_id=None, owner_id=None, group_id=None, permissions=None,
+ role=None, cardinality=None, force=None, wait=False, wait_timeout=None, service=None):
+
+ changed = False
+
+ if not service:
+ service = get_service_by_id(module, auth, service_id)
+ else:
+ service_id = service["ID"]
+
+ if not service:
+ module.fail_json(msg="There is no service with id: " + str(service_id))
+
+ if owner_id:
+ if check_change_service_owner(module, service, owner_id):
+ if not module.check_mode:
+ change_service_owner(module, auth, service_id, owner_id)
+ changed = True
+ if group_id:
+ if check_change_service_group(module, service, group_id):
+ if not module.check_mode:
+ change_service_group(module, auth, service_id, group_id)
+ changed = True
+ if permissions:
+ if check_change_service_permissions(module, service, permissions):
+ if not module.check_mode:
+ change_service_permissions(module, auth, service_id, permissions)
+ changed = True
+
+ if role:
+ if check_change_role_cardinality(module, service, role, cardinality):
+ if not module.check_mode:
+ change_role_cardinality(module, auth, service_id, role, cardinality, force)
+ changed = True
+
+ if wait and not module.check_mode:
+ service = wait_for_service_to_become_ready(module, auth, service_id, wait_timeout)
+
+ # if something has changed, fetch service info again
+ if changed:
+ service = get_service_by_id(module, auth, service_id)
+
+ service_info = get_service_info(module, auth, service)
+ service_info["changed"] = changed
+
+ return service_info
+
+
+def delete_service(module, auth, service_id):
+ service = get_service_by_id(module, auth, service_id)
+ if not service:
+ return {"changed": False}
+
+ service_info = get_service_info(module, auth, service)
+
+ service_info["changed"] = True
+
+ if module.check_mode:
+ return service_info
+
+ try:
+ result = open_url(auth.url + '/service/' + str(service_id), method="DELETE", force_basic_auth=True, url_username=auth.user, url_password=auth.password)
+ except Exception as e:
+ module.fail_json(msg="Service deletion has failed. Error message: " + str(e))
+
+ return service_info
+
+
+def get_template_by_name(module, auth, template_name):
+ return get_template(module, auth, lambda template: (template["NAME"] == template_name))
+
+
+def get_template_by_id(module, auth, template_id):
+ return get_template(module, auth, lambda template: (int(template["ID"]) == int(template_id))) if template_id else None
+
+
+def get_template_id(module, auth, requested_id, requested_name):
+ template = get_template_by_id(module, auth, requested_id) if requested_id else get_template_by_name(module, auth, requested_name)
+
+ if template:
+ return template["ID"]
+
+ return None
+
+
+def get_service_id_by_name(module, auth, service_name):
+ service = get_service_by_name(module, auth, service_name)
+
+ if service:
+ return service["ID"]
+
+ return None
+
+
+def get_connection_info(module):
+
+ url = module.params.get('api_url')
+ username = module.params.get('api_username')
+ password = module.params.get('api_password')
+
+ if not url:
+ url = os.environ.get('ONEFLOW_URL')
+
+ if not username:
+ username = os.environ.get('ONEFLOW_USERNAME')
+
+ if not password:
+ password = os.environ.get('ONEFLOW_PASSWORD')
+
+ if not(url and username and password):
+ module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified")
+ from collections import namedtuple
+
+ auth_params = namedtuple('auth', ('url', 'user', 'password'))
+
+ return auth_params(url=url, user=username, password=password)
+
+
+def main():
+ fields = {
+ "api_url": {"required": False, "type": "str"},
+ "api_username": {"required": False, "type": "str"},
+ "api_password": {"required": False, "type": "str", "no_log": True},
+ "service_name": {"required": False, "type": "str"},
+ "service_id": {"required": False, "type": "int"},
+ "template_name": {"required": False, "type": "str"},
+ "template_id": {"required": False, "type": "int"},
+ "state": {
+ "default": "present",
+ "choices": ['present', 'absent'],
+ "type": "str"
+ },
+ "mode": {"required": False, "type": "str"},
+ "owner_id": {"required": False, "type": "int"},
+ "group_id": {"required": False, "type": "int"},
+ "unique": {"default": False, "type": "bool"},
+ "wait": {"default": False, "type": "bool"},
+ "wait_timeout": {"default": 300, "type": "int"},
+ "custom_attrs": {"default": {}, "type": "dict"},
+ "role": {"required": False, "type": "str"},
+ "cardinality": {"required": False, "type": "int"},
+ "force": {"default": False, "type": "bool"}
+ }
+
+ module = AnsibleModule(argument_spec=fields,
+ mutually_exclusive=[
+ ['template_id', 'template_name', 'service_id'],
+ ['service_id', 'service_name'],
+ ['template_id', 'template_name', 'role'],
+ ['template_id', 'template_name', 'cardinality'],
+ ['service_id', 'custom_attrs']
+ ],
+ required_together=[['role', 'cardinality']],
+ supports_check_mode=True)
+
+ auth = get_connection_info(module)
+ params = module.params
+ service_name = params.get('service_name')
+ service_id = params.get('service_id')
+
+ requested_template_id = params.get('template_id')
+ requested_template_name = params.get('template_name')
+ state = params.get('state')
+ permissions = params.get('mode')
+ owner_id = params.get('owner_id')
+ group_id = params.get('group_id')
+ unique = params.get('unique')
+ wait = params.get('wait')
+ wait_timeout = params.get('wait_timeout')
+ custom_attrs = params.get('custom_attrs')
+ role = params.get('role')
+ cardinality = params.get('cardinality')
+ force = params.get('force')
+
+ template_id = None
+
+ if requested_template_id or requested_template_name:
+ template_id = get_template_id(module, auth, requested_template_id, requested_template_name)
+ if not template_id:
+ if requested_template_id:
+ module.fail_json(msg="There is no template with template_id: " + str(requested_template_id))
+ elif requested_template_name:
+ module.fail_json(msg="There is no template with name: " + requested_template_name)
+
+ if unique and not service_name:
+ module.fail_json(msg="You cannot use unique without passing service_name!")
+
+ if template_id and state == 'absent':
+ module.fail_json(msg="State absent is not valid for template")
+
+ if template_id and state == 'present': # Instantiate a service
+ result = create_service_and_operation(module, auth, template_id, service_name, owner_id,
+ group_id, permissions, custom_attrs, unique, wait, wait_timeout)
+ else:
+ if not (service_id or service_name):
+ module.fail_json(msg="To manage the service at least the service id or service name should be specified!")
+ if custom_attrs:
+ module.fail_json(msg="You can only set custom_attrs when instantiate service!")
+
+ if not service_id:
+ service_id = get_service_id_by_name(module, auth, service_name)
+ # The task should be failed when we want to manage a non-existent service identified by its name
+ if not service_id and state == 'present':
+ module.fail_json(msg="There is no service with name: " + service_name)
+
+ if state == 'absent':
+ result = delete_service(module, auth, service_id)
+ else:
+ result = service_operation(module, auth, service_id, owner_id, group_id, permissions, role, cardinality, force, wait, wait_timeout)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/one_vm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/one_vm.py
new file mode 100644
index 00000000..286514bd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/one_vm.py
@@ -0,0 +1,1599 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+"""
+(c) 2017, Milan Ilic <milani@nordeus.com>
+(c) 2019, Jan Meerkamp <meerkamp@dvv.de>
+
+This file is part of Ansible
+
+Ansible is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+Ansible is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+DOCUMENTATION = '''
+---
+module: one_vm
+short_description: Creates or terminates OpenNebula instances
+description:
+ - Manages OpenNebula instances
+requirements:
+ - pyone
+options:
+ api_url:
+ description:
+ - URL of the OpenNebula RPC server.
+ - It is recommended to use HTTPS so that the username/password are not
+ - transferred over the network unencrypted.
+ - If not set then the value of the C(ONE_URL) environment variable is used.
+ type: str
+ api_username:
+ description:
+ - Name of the user to login into the OpenNebula RPC server. If not set
+ - then the value of the C(ONE_USERNAME) environment variable is used.
+ type: str
+ api_password:
+ description:
+ - Password of the user to login into OpenNebula RPC server. If not set
+ - then the value of the C(ONE_PASSWORD) environment variable is used.
+ - if both I(api_username) or I(api_password) are not set, then it will try
+ - authenticate with ONE auth file. Default path is "~/.one/one_auth".
+ - Set environment variable C(ONE_AUTH) to override this path.
+ type: str
+ template_name:
+ description:
+ - Name of VM template to use to create a new instace
+ type: str
+ template_id:
+ description:
+ - ID of a VM template to use to create a new instance
+ type: int
+ vm_start_on_hold:
+ description:
+ - Set to true to put vm on hold while creating
+ default: False
+ type: bool
+ instance_ids:
+ description:
+ - A list of instance ids used for states':' C(absent), C(running), C(rebooted), C(poweredoff)
+ aliases: ['ids']
+ type: list
+ state:
+ description:
+ - C(present) - create instances from a template specified with C(template_id)/C(template_name).
+ - C(running) - run instances
+ - C(poweredoff) - power-off instances
+ - C(rebooted) - reboot instances
+ - C(absent) - terminate instances
+ choices: ["present", "absent", "running", "rebooted", "poweredoff"]
+ default: present
+ type: str
+ hard:
+ description:
+ - Reboot, power-off or terminate instances C(hard)
+ default: no
+ type: bool
+ wait:
+ description:
+ - Wait for the instance to reach its desired state before returning. Keep
+ - in mind if you are waiting for instance to be in running state it
+ - doesn't mean that you will be able to SSH on that machine only that
+ - boot process have started on that instance, see 'wait_for' example for
+ - details.
+ default: yes
+ type: bool
+ wait_timeout:
+ description:
+ - How long before wait gives up, in seconds
+ default: 300
+ type: int
+ attributes:
+ description:
+ - A dictionary of key/value attributes to add to new instances, or for
+ - setting C(state) of instances with these attributes.
+ - Keys are case insensitive and OpenNebula automatically converts them to upper case.
+ - Be aware C(NAME) is a special attribute which sets the name of the VM when it's deployed.
+ - C(#) character(s) can be appended to the C(NAME) and the module will automatically add
+ - indexes to the names of VMs.
+ - For example':' C(NAME':' foo-###) would create VMs with names C(foo-000), C(foo-001),...
+ - When used with C(count_attributes) and C(exact_count) the module will
+ - match the base name without the index part.
+ default: {}
+ type: dict
+ labels:
+ description:
+ - A list of labels to associate with new instances, or for setting
+ - C(state) of instances with these labels.
+ default: []
+ type: list
+ count_attributes:
+ description:
+ - A dictionary of key/value attributes that can only be used with
+ - C(exact_count) to determine how many nodes based on a specific
+ - attributes criteria should be deployed. This can be expressed in
+ - multiple ways and is shown in the EXAMPLES section.
+ type: dict
+ count_labels:
+ description:
+ - A list of labels that can only be used with C(exact_count) to determine
+ - how many nodes based on a specific labels criteria should be deployed.
+ - This can be expressed in multiple ways and is shown in the EXAMPLES
+ - section.
+ type: list
+ count:
+ description:
+ - Number of instances to launch
+ default: 1
+ type: int
+ exact_count:
+ description:
+ - Indicates how many instances that match C(count_attributes) and
+ - C(count_labels) parameters should be deployed. Instances are either
+ - created or terminated based on this value.
+ - NOTE':' Instances with the least IDs will be terminated first.
+ type: int
+ mode:
+ description:
+ - Set permission mode of the instance in octet format, e.g. C(600) to give owner C(use) and C(manage) and nothing to group and others.
+ type: str
+ owner_id:
+ description:
+ - ID of the user which will be set as the owner of the instance
+ type: int
+ group_id:
+ description:
+ - ID of the group which will be set as the group of the instance
+ type: int
+ memory:
+ description:
+ - The size of the memory for new instances (in MB, GB, ...)
+ type: str
+ disk_size:
+ description:
+ - The size of the disk created for new instances (in MB, GB, TB,...).
+ - NOTE':' If The Template hats Multiple Disks the Order of the Sizes is
+ - matched against the order specified in C(template_id)/C(template_name).
+ type: list
+ cpu:
+ description:
+ - Percentage of CPU divided by 100 required for the new instance. Half a
+ - processor is written 0.5.
+ type: float
+ vcpu:
+ description:
+ - Number of CPUs (cores) new VM will have.
+ type: int
+ networks:
+ description:
+ - A list of dictionaries with network parameters. See examples for more details.
+ default: []
+ type: list
+ disk_saveas:
+ description:
+ - Creates an image from a VM disk.
+ - It is a dictionary where you have to specify C(name) of the new image.
+ - Optionally you can specify C(disk_id) of the disk you want to save. By default C(disk_id) is 0.
+ - I(NOTE)':' This operation will only be performed on the first VM (if more than one VM ID is passed)
+ - and the VM has to be in the C(poweredoff) state.
+ - Also this operation will fail if an image with specified C(name) already exists.
+ type: dict
+ persistent:
+ description:
+ - Create a private persistent copy of the template plus any image defined in DISK, and instantiate that copy.
+ default: NO
+ type: bool
+ version_added: '0.2.0'
+ datastore_id:
+ description:
+ - Name of Datastore to use to create a new instace
+ version_added: '0.2.0'
+ type: int
+ datastore_name:
+ description:
+ - Name of Datastore to use to create a new instace
+ version_added: '0.2.0'
+ type: str
+author:
+ - "Milan Ilic (@ilicmilan)"
+ - "Jan Meerkamp (@meerkampdvv)"
+'''
+
+
+EXAMPLES = '''
+- name: Create a new instance
+ community.general.one_vm:
+ template_id: 90
+ register: result
+
+- name: Print VM properties
+ ansible.builtin.debug:
+ msg: result
+
+- name: Deploy a new VM on hold
+ community.general.one_vm:
+ template_name: 'app1_template'
+ vm_start_on_hold: 'True'
+
+- name: Deploy a new VM and set its name to 'foo'
+ community.general.one_vm:
+ template_name: 'app1_template'
+ attributes:
+ name: foo
+
+- name: Deploy a new VM and set its group_id and mode
+ community.general.one_vm:
+ template_id: 90
+ group_id: 16
+ mode: 660
+
+- name: Deploy a new VM as persistent
+ community.general.one_vm:
+ template_id: 90
+ persistent: yes
+
+- name: Change VM's permissions to 640
+ community.general.one_vm:
+ instance_ids: 5
+ mode: 640
+
+- name: Deploy 2 new instances and set memory, vcpu, disk_size and 3 networks
+ community.general.one_vm:
+ template_id: 15
+ disk_size: 35.2 GB
+ memory: 4 GB
+ vcpu: 4
+ count: 2
+ networks:
+ - NETWORK_ID: 27
+ - NETWORK: "default-network"
+ NETWORK_UNAME: "app-user"
+ SECURITY_GROUPS: "120,124"
+ - NETWORK_ID: 27
+ SECURITY_GROUPS: "10"
+
+- name: Deploy a new instance which uses a Template with two Disks
+ community.general.one_vm:
+ template_id: 42
+ disk_size:
+ - 35.2 GB
+ - 50 GB
+ memory: 4 GB
+ vcpu: 4
+ count: 1
+ networks:
+ - NETWORK_ID: 27
+
+- name: "Deploy an new instance with attribute 'bar: bar1' and set its name to 'foo'"
+ community.general.one_vm:
+ template_id: 53
+ attributes:
+ name: foo
+ bar: bar1
+
+- name: "Enforce that 2 instances with attributes 'foo1: app1' and 'foo2: app2' are deployed"
+ community.general.one_vm:
+ template_id: 53
+ attributes:
+ foo1: app1
+ foo2: app2
+ exact_count: 2
+ count_attributes:
+ foo1: app1
+ foo2: app2
+
+- name: Enforce that 4 instances with an attribute 'bar' are deployed
+ community.general.one_vm:
+ template_id: 53
+ attributes:
+ name: app
+ bar: bar2
+ exact_count: 4
+ count_attributes:
+ bar:
+
+# Deploy 2 new instances with attribute 'foo: bar' and labels 'app1' and 'app2' and names in format 'fooapp-##'
+# Names will be: fooapp-00 and fooapp-01
+- name: Deploy 2 new instances
+ community.general.one_vm:
+ template_id: 53
+ attributes:
+ name: fooapp-##
+ foo: bar
+ labels:
+ - app1
+ - app2
+ count: 2
+
+# Deploy 2 new instances with attribute 'app: app1' and names in format 'fooapp-###'
+# Names will be: fooapp-002 and fooapp-003
+- name: Deploy 2 new instances
+ community.general.one_vm:
+ template_id: 53
+ attributes:
+ name: fooapp-###
+ app: app1
+ count: 2
+
+# Reboot all instances with name in format 'fooapp-#'
+# Instances 'fooapp-00', 'fooapp-01', 'fooapp-002' and 'fooapp-003' will be rebooted
+- name: Reboot all instances with names in a certain format
+ community.general.one_vm:
+ attributes:
+ name: fooapp-#
+ state: rebooted
+
+# Enforce that only 1 instance with name in format 'fooapp-#' is deployed
+# The task will delete oldest instances, so only the 'fooapp-003' will remain
+- name: Enforce that only 1 instance with name in a certain format is deployed
+ community.general.one_vm:
+ template_id: 53
+ exact_count: 1
+ count_attributes:
+ name: fooapp-#
+
+- name: Deploy an new instance with a network
+ community.general.one_vm:
+ template_id: 53
+ networks:
+ - NETWORK_ID: 27
+ register: vm
+
+- name: Wait for SSH to come up
+ ansible.builtin.wait_for_connection:
+ delegate_to: '{{ vm.instances[0].networks[0].ip }}'
+
+- name: Terminate VMs by ids
+ community.general.one_vm:
+ instance_ids:
+ - 153
+ - 160
+ state: absent
+
+- name: Reboot all VMs that have labels 'foo' and 'app1'
+ community.general.one_vm:
+ labels:
+ - foo
+ - app1
+ state: rebooted
+
+- name: "Fetch all VMs that have name 'foo' and attribute 'app: bar'"
+ community.general.one_vm:
+ attributes:
+ name: foo
+ app: bar
+ register: results
+
+- name: Deploy 2 new instances with labels 'foo1' and 'foo2'
+ community.general.one_vm:
+ template_name: app_template
+ labels:
+ - foo1
+ - foo2
+ count: 2
+
+- name: Enforce that only 1 instance with label 'foo1' will be running
+ community.general.one_vm:
+ template_name: app_template
+ labels:
+ - foo1
+ exact_count: 1
+ count_labels:
+ - foo1
+
+- name: Terminate all instances that have attribute foo
+ community.general.one_vm:
+ template_id: 53
+ exact_count: 0
+ count_attributes:
+ foo:
+
+- name: "Power-off the VM and save VM's disk with id=0 to the image with name 'foo-image'"
+ community.general.one_vm:
+ instance_ids: 351
+ state: poweredoff
+ disk_saveas:
+ name: foo-image
+
+- name: "Save VM's disk with id=1 to the image with name 'bar-image'"
+ community.general.one_vm:
+ instance_ids: 351
+ disk_saveas:
+ name: bar-image
+ disk_id: 1
+'''
+
+RETURN = '''
+instances_ids:
+ description: a list of instances ids whose state is changed or which are fetched with C(instance_ids) option.
+ type: list
+ returned: success
+ sample: [ 1234, 1235 ]
+instances:
+ description: a list of instances info whose state is changed or which are fetched with C(instance_ids) option.
+ type: complex
+ returned: success
+ contains:
+ vm_id:
+ description: vm id
+ type: int
+ sample: 153
+ vm_name:
+ description: vm name
+ type: str
+ sample: foo
+ template_id:
+ description: vm's template id
+ type: int
+ sample: 153
+ group_id:
+ description: vm's group id
+ type: int
+ sample: 1
+ group_name:
+ description: vm's group name
+ type: str
+ sample: one-users
+ owner_id:
+ description: vm's owner id
+ type: int
+ sample: 143
+ owner_name:
+ description: vm's owner name
+ type: str
+ sample: app-user
+ mode:
+ description: vm's mode
+ type: str
+ returned: success
+ sample: 660
+ state:
+ description: state of an instance
+ type: str
+ sample: ACTIVE
+ lcm_state:
+ description: lcm state of an instance that is only relevant when the state is ACTIVE
+ type: str
+ sample: RUNNING
+ cpu:
+ description: Percentage of CPU divided by 100
+ type: float
+ sample: 0.2
+ vcpu:
+ description: Number of CPUs (cores)
+ type: int
+ sample: 2
+ memory:
+ description: The size of the memory in MB
+ type: str
+ sample: 4096 MB
+ disk_size:
+ description: The size of the disk in MB
+ type: str
+ sample: 20480 MB
+ networks:
+ description: a list of dictionaries with info about IP, NAME, MAC, SECURITY_GROUPS for each NIC
+ type: list
+ sample: [
+ {
+ "ip": "10.120.5.33",
+ "mac": "02:00:0a:78:05:21",
+ "name": "default-test-private",
+ "security_groups": "0,10"
+ },
+ {
+ "ip": "10.120.5.34",
+ "mac": "02:00:0a:78:05:22",
+ "name": "default-test-private",
+ "security_groups": "0"
+ }
+ ]
+ uptime_h:
+ description: Uptime of the instance in hours
+ type: int
+ sample: 35
+ labels:
+ description: A list of string labels that are associated with the instance
+ type: list
+ sample: [
+ "foo",
+ "spec-label"
+ ]
+ attributes:
+ description: A dictionary of key/values attributes that are associated with the instance
+ type: dict
+ sample: {
+ "HYPERVISOR": "kvm",
+ "LOGO": "images/logos/centos.png",
+ "TE_GALAXY": "bar",
+ "USER_INPUTS": null
+ }
+tagged_instances:
+ description:
+ - A list of instances info based on a specific attributes and/or
+ - labels that are specified with C(count_attributes) and C(count_labels)
+ - options.
+ type: complex
+ returned: success
+ contains:
+ vm_id:
+ description: vm id
+ type: int
+ sample: 153
+ vm_name:
+ description: vm name
+ type: str
+ sample: foo
+ template_id:
+ description: vm's template id
+ type: int
+ sample: 153
+ group_id:
+ description: vm's group id
+ type: int
+ sample: 1
+ group_name:
+ description: vm's group name
+ type: str
+ sample: one-users
+ owner_id:
+ description: vm's user id
+ type: int
+ sample: 143
+ owner_name:
+ description: vm's user name
+ type: str
+ sample: app-user
+ mode:
+ description: vm's mode
+ type: str
+ returned: success
+ sample: 660
+ state:
+ description: state of an instance
+ type: str
+ sample: ACTIVE
+ lcm_state:
+ description: lcm state of an instance that is only relevant when the state is ACTIVE
+ type: str
+ sample: RUNNING
+ cpu:
+ description: Percentage of CPU divided by 100
+ type: float
+ sample: 0.2
+ vcpu:
+ description: Number of CPUs (cores)
+ type: int
+ sample: 2
+ memory:
+ description: The size of the memory in MB
+ type: str
+ sample: 4096 MB
+ disk_size:
+ description: The size of the disk in MB
+ type: list
+ sample: [
+ "20480 MB",
+ "10240 MB"
+ ]
+ networks:
+ description: a list of dictionaries with info about IP, NAME, MAC, SECURITY_GROUPS for each NIC
+ type: list
+ sample: [
+ {
+ "ip": "10.120.5.33",
+ "mac": "02:00:0a:78:05:21",
+ "name": "default-test-private",
+ "security_groups": "0,10"
+ },
+ {
+ "ip": "10.120.5.34",
+ "mac": "02:00:0a:78:05:22",
+ "name": "default-test-private",
+ "security_groups": "0"
+ }
+ ]
+ uptime_h:
+ description: Uptime of the instance in hours
+ type: int
+ sample: 35
+ labels:
+ description: A list of string labels that are associated with the instance
+ type: list
+ sample: [
+ "foo",
+ "spec-label"
+ ]
+ attributes:
+ description: A dictionary of key/values attributes that are associated with the instance
+ type: dict
+ sample: {
+ "HYPERVISOR": "kvm",
+ "LOGO": "images/logos/centos.png",
+ "TE_GALAXY": "bar",
+ "USER_INPUTS": null
+ }
+'''
+
+try:
+ import pyone
+ HAS_PYONE = True
+except ImportError:
+ HAS_PYONE = False
+
+from ansible.module_utils.basic import AnsibleModule
+import os
+
+
+def get_template(module, client, predicate):
+
+ pool = client.templatepool.info(-2, -1, -1, -1)
+ # Filter -2 means fetch all templates user can Use
+ found = 0
+ found_template = None
+ template_name = ''
+
+ for template in pool.VMTEMPLATE:
+ if predicate(template):
+ found = found + 1
+ found_template = template
+ template_name = template.NAME
+
+ if found == 0:
+ return None
+ elif found > 1:
+ module.fail_json(msg='There are more templates with name: ' + template_name)
+ return found_template
+
+
+def get_template_by_name(module, client, template_name):
+ return get_template(module, client, lambda template: (template.NAME == template_name))
+
+
+def get_template_by_id(module, client, template_id):
+ return get_template(module, client, lambda template: (template.ID == template_id))
+
+
+def get_template_id(module, client, requested_id, requested_name):
+ template = get_template_by_id(module, client, requested_id) if requested_id is not None else get_template_by_name(module, client, requested_name)
+ if template:
+ return template.ID
+ else:
+ return None
+
+
+def get_datastore(module, client, predicate):
+ pool = client.datastorepool.info()
+ found = 0
+ found_datastore = None
+ datastore_name = ''
+
+ for datastore in pool.DATASTORE:
+ if predicate(datastore):
+ found = found + 1
+ found_datastore = datastore
+ datastore_name = datastore.NAME
+
+ if found == 0:
+ return None
+ elif found > 1:
+ module.fail_json(msg='There are more datastores with name: ' + datastore_name)
+ return found_datastore
+
+
+def get_datastore_by_name(module, client, datastore_name):
+ return get_datastore(module, client, lambda datastore: (datastore.NAME == datastore_name))
+
+
+def get_datastore_by_id(module, client, datastore_id):
+ return get_datastore(module, client, lambda datastore: (datastore.ID == datastore_id))
+
+
+def get_datastore_id(module, client, requested_id, requested_name):
+ datastore = get_datastore_by_id(module, client, requested_id) if requested_id else get_datastore_by_name(module, client, requested_name)
+ if datastore:
+ return datastore.ID
+ else:
+ return None
+
+
+def get_vm_by_id(client, vm_id):
+ try:
+ vm = client.vm.info(int(vm_id))
+ except BaseException:
+ return None
+ return vm
+
+
+def get_vms_by_ids(module, client, state, ids):
+ vms = []
+
+ for vm_id in ids:
+ vm = get_vm_by_id(client, vm_id)
+ if vm is None and state != 'absent':
+ module.fail_json(msg='There is no VM with id=' + str(vm_id))
+ vms.append(vm)
+
+ return vms
+
+
+def get_vm_info(client, vm):
+
+ vm = client.vm.info(vm.ID)
+
+ networks_info = []
+
+ disk_size = []
+ if 'DISK' in vm.TEMPLATE:
+ if isinstance(vm.TEMPLATE['DISK'], list):
+ for disk in vm.TEMPLATE['DISK']:
+ disk_size.append(disk['SIZE'] + ' MB')
+ else:
+ disk_size.append(vm.TEMPLATE['DISK']['SIZE'] + ' MB')
+
+ if 'NIC' in vm.TEMPLATE:
+ if isinstance(vm.TEMPLATE['NIC'], list):
+ for nic in vm.TEMPLATE['NIC']:
+ networks_info.append({'ip': nic['IP'], 'mac': nic['MAC'], 'name': nic['NETWORK'], 'security_groups': nic['SECURITY_GROUPS']})
+ else:
+ networks_info.append(
+ {'ip': vm.TEMPLATE['NIC']['IP'], 'mac': vm.TEMPLATE['NIC']['MAC'],
+ 'name': vm.TEMPLATE['NIC']['NETWORK'], 'security_groups': vm.TEMPLATE['NIC']['SECURITY_GROUPS']})
+ import time
+
+ current_time = time.localtime()
+ vm_start_time = time.localtime(vm.STIME)
+
+ vm_uptime = time.mktime(current_time) - time.mktime(vm_start_time)
+ vm_uptime /= (60 * 60)
+
+ permissions_str = parse_vm_permissions(client, vm)
+
+ # LCM_STATE is VM's sub-state that is relevant only when STATE is ACTIVE
+ vm_lcm_state = None
+ if vm.STATE == VM_STATES.index('ACTIVE'):
+ vm_lcm_state = LCM_STATES[vm.LCM_STATE]
+
+ vm_labels, vm_attributes = get_vm_labels_and_attributes_dict(client, vm.ID)
+
+ info = {
+ 'template_id': int(vm.TEMPLATE['TEMPLATE_ID']),
+ 'vm_id': vm.ID,
+ 'vm_name': vm.NAME,
+ 'state': VM_STATES[vm.STATE],
+ 'lcm_state': vm_lcm_state,
+ 'owner_name': vm.UNAME,
+ 'owner_id': vm.UID,
+ 'networks': networks_info,
+ 'disk_size': disk_size,
+ 'memory': vm.TEMPLATE['MEMORY'] + ' MB',
+ 'vcpu': vm.TEMPLATE['VCPU'],
+ 'cpu': vm.TEMPLATE['CPU'],
+ 'group_name': vm.GNAME,
+ 'group_id': vm.GID,
+ 'uptime_h': int(vm_uptime),
+ 'attributes': vm_attributes,
+ 'mode': permissions_str,
+ 'labels': vm_labels
+ }
+
+ return info
+
+
+def parse_vm_permissions(client, vm):
+ vm_PERMISSIONS = client.vm.info(vm.ID).PERMISSIONS
+
+ owner_octal = int(vm_PERMISSIONS.OWNER_U) * 4 + int(vm_PERMISSIONS.OWNER_M) * 2 + int(vm_PERMISSIONS.OWNER_A)
+ group_octal = int(vm_PERMISSIONS.GROUP_U) * 4 + int(vm_PERMISSIONS.GROUP_M) * 2 + int(vm_PERMISSIONS.GROUP_A)
+ other_octal = int(vm_PERMISSIONS.OTHER_U) * 4 + int(vm_PERMISSIONS.OTHER_M) * 2 + int(vm_PERMISSIONS.OTHER_A)
+
+ permissions = str(owner_octal) + str(group_octal) + str(other_octal)
+
+ return permissions
+
+
+def set_vm_permissions(module, client, vms, permissions):
+ changed = False
+
+ for vm in vms:
+ vm = client.vm.info(vm.ID)
+ old_permissions = parse_vm_permissions(client, vm)
+ changed = changed or old_permissions != permissions
+
+ if not module.check_mode and old_permissions != permissions:
+ permissions_str = bin(int(permissions, base=8))[2:] # 600 -> 110000000
+ mode_bits = [int(d) for d in permissions_str]
+ try:
+ client.vm.chmod(
+ vm.ID, mode_bits[0], mode_bits[1], mode_bits[2], mode_bits[3], mode_bits[4], mode_bits[5], mode_bits[6], mode_bits[7], mode_bits[8])
+ except pyone.OneAuthorizationException:
+ module.fail_json(msg="Permissions changing is unsuccessful, but instances are present if you deployed them.")
+
+ return changed
+
+
+def set_vm_ownership(module, client, vms, owner_id, group_id):
+ changed = False
+
+ for vm in vms:
+ vm = client.vm.info(vm.ID)
+ if owner_id is None:
+ owner_id = vm.UID
+ if group_id is None:
+ group_id = vm.GID
+
+ changed = changed or owner_id != vm.UID or group_id != vm.GID
+
+ if not module.check_mode and (owner_id != vm.UID or group_id != vm.GID):
+ try:
+ client.vm.chown(vm.ID, owner_id, group_id)
+ except pyone.OneAuthorizationException:
+ module.fail_json(msg="Ownership changing is unsuccessful, but instances are present if you deployed them.")
+
+ return changed
+
+
+def get_size_in_MB(module, size_str):
+
+ SYMBOLS = ['B', 'KB', 'MB', 'GB', 'TB']
+
+ s = size_str
+ init = size_str
+ num = ""
+ while s and s[0:1].isdigit() or s[0:1] == '.':
+ num += s[0]
+ s = s[1:]
+ num = float(num)
+ symbol = s.strip()
+
+ if symbol not in SYMBOLS:
+ module.fail_json(msg="Cannot interpret %r %r %d" % (init, symbol, num))
+
+ prefix = {'B': 1}
+
+ for i, s in enumerate(SYMBOLS[1:]):
+ prefix[s] = 1 << (i + 1) * 10
+
+ size_in_bytes = int(num * prefix[symbol])
+ size_in_MB = size_in_bytes / (1024 * 1024)
+
+ return size_in_MB
+
+
+def create_disk_str(module, client, template_id, disk_size_list):
+
+ if not disk_size_list:
+ return ''
+
+ template = client.template.info(template_id)
+ if isinstance(template.TEMPLATE['DISK'], list):
+ # check if the number of disks is correct
+ if len(template.TEMPLATE['DISK']) != len(disk_size_list):
+ module.fail_json(msg='This template has ' + str(len(template.TEMPLATE['DISK'])) + ' disks but you defined ' + str(len(disk_size_list)))
+ result = ''
+ index = 0
+ for DISKS in template.TEMPLATE['DISK']:
+ disk = {}
+ diskresult = ''
+ # Get all info about existed disk e.g. IMAGE_ID,...
+ for key, value in DISKS.items():
+ disk[key] = value
+ # copy disk attributes if it is not the size attribute
+ diskresult += 'DISK = [' + ','.join('{key}="{val}"'.format(key=key, val=val) for key, val in disk.items() if key != 'SIZE')
+ # Set the Disk Size
+ diskresult += ', SIZE=' + str(int(get_size_in_MB(module, disk_size_list[index]))) + ']\n'
+ result += diskresult
+ index += 1
+ else:
+ if len(disk_size_list) > 1:
+ module.fail_json(msg='This template has one disk but you defined ' + str(len(disk_size_list)))
+ disk = {}
+ # Get all info about existed disk e.g. IMAGE_ID,...
+ for key, value in template.TEMPLATE['DISK'].items():
+ disk[key] = value
+ # copy disk attributes if it is not the size attribute
+ result = 'DISK = [' + ','.join('{key}="{val}"'.format(key=key, val=val) for key, val in disk.items() if key != 'SIZE')
+ # Set the Disk Size
+ result += ', SIZE=' + str(int(get_size_in_MB(module, disk_size_list[0]))) + ']\n'
+
+ return result
+
+
+def create_attributes_str(attributes_dict, labels_list):
+
+ attributes_str = ''
+
+ if labels_list:
+ attributes_str += 'LABELS="' + ','.join('{label}'.format(label=label) for label in labels_list) + '"\n'
+ if attributes_dict:
+ attributes_str += '\n'.join('{key}="{val}"'.format(key=key.upper(), val=val) for key, val in attributes_dict.items()) + '\n'
+
+ return attributes_str
+
+
+def create_nics_str(network_attrs_list):
+ nics_str = ''
+
+ for network in network_attrs_list:
+ # Packing key-value dict in string with format key="value", key="value"
+ network_str = ','.join('{key}="{val}"'.format(key=key, val=val) for key, val in network.items())
+ nics_str = nics_str + 'NIC = [' + network_str + ']\n'
+
+ return nics_str
+
+
+def create_vm(module, client, template_id, attributes_dict, labels_list, disk_size, network_attrs_list, vm_start_on_hold, vm_persistent):
+
+ if attributes_dict:
+ vm_name = attributes_dict.get('NAME', '')
+
+ disk_str = create_disk_str(module, client, template_id, disk_size)
+ vm_extra_template_str = create_attributes_str(attributes_dict, labels_list) + create_nics_str(network_attrs_list) + disk_str
+ try:
+ vm_id = client.template.instantiate(template_id, vm_name, vm_start_on_hold, vm_extra_template_str, vm_persistent)
+ except pyone.OneException as e:
+ module.fail_json(msg=str(e))
+ vm = get_vm_by_id(client, vm_id)
+
+ return get_vm_info(client, vm)
+
+
+def generate_next_index(vm_filled_indexes_list, num_sign_cnt):
+ counter = 0
+ cnt_str = str(counter).zfill(num_sign_cnt)
+
+ while cnt_str in vm_filled_indexes_list:
+ counter = counter + 1
+ cnt_str = str(counter).zfill(num_sign_cnt)
+
+ return cnt_str
+
+
+def get_vm_labels_and_attributes_dict(client, vm_id):
+ vm_USER_TEMPLATE = client.vm.info(vm_id).USER_TEMPLATE
+
+ attrs_dict = {}
+ labels_list = []
+
+ for key, value in vm_USER_TEMPLATE.items():
+ if key != 'LABELS':
+ attrs_dict[key] = value
+ else:
+ if key is not None:
+ labels_list = value.split(',')
+
+ return labels_list, attrs_dict
+
+
+def get_all_vms_by_attributes(client, attributes_dict, labels_list):
+ pool = client.vmpool.info(-2, -1, -1, -1).VM
+ vm_list = []
+ name = ''
+ if attributes_dict:
+ name = attributes_dict.pop('NAME', '')
+
+ if name != '':
+ base_name = name[:len(name) - name.count('#')]
+ # Check does the name have indexed format
+ with_hash = name.endswith('#')
+
+ for vm in pool:
+ if vm.NAME.startswith(base_name):
+ if with_hash and vm.NAME[len(base_name):].isdigit():
+ # If the name has indexed format and after base_name it has only digits it'll be matched
+ vm_list.append(vm)
+ elif not with_hash and vm.NAME == name:
+ # If the name is not indexed it has to be same
+ vm_list.append(vm)
+ pool = vm_list
+
+ import copy
+
+ vm_list = copy.copy(pool)
+
+ for vm in pool:
+ remove_list = []
+ vm_labels_list, vm_attributes_dict = get_vm_labels_and_attributes_dict(client, vm.ID)
+
+ if attributes_dict and len(attributes_dict) > 0:
+ for key, val in attributes_dict.items():
+ if key in vm_attributes_dict:
+ if val and vm_attributes_dict[key] != val:
+ remove_list.append(vm)
+ break
+ else:
+ remove_list.append(vm)
+ break
+ vm_list = list(set(vm_list).difference(set(remove_list)))
+
+ remove_list = []
+ if labels_list and len(labels_list) > 0:
+ for label in labels_list:
+ if label not in vm_labels_list:
+ remove_list.append(vm)
+ break
+ vm_list = list(set(vm_list).difference(set(remove_list)))
+
+ return vm_list
+
+
+def create_count_of_vms(
+ module, client, template_id, count, attributes_dict, labels_list, disk_size, network_attrs_list, wait, wait_timeout, vm_start_on_hold, vm_persistent):
+ new_vms_list = []
+
+ vm_name = ''
+ if attributes_dict:
+ vm_name = attributes_dict.get('NAME', '')
+
+ if module.check_mode:
+ return True, [], []
+
+ # Create list of used indexes
+ vm_filled_indexes_list = None
+ num_sign_cnt = vm_name.count('#')
+ if vm_name != '' and num_sign_cnt > 0:
+ vm_list = get_all_vms_by_attributes(client, {'NAME': vm_name}, None)
+ base_name = vm_name[:len(vm_name) - num_sign_cnt]
+ vm_name = base_name
+ # Make list which contains used indexes in format ['000', '001',...]
+ vm_filled_indexes_list = list((vm.NAME[len(base_name):].zfill(num_sign_cnt)) for vm in vm_list)
+
+ while count > 0:
+ new_vm_name = vm_name
+ # Create indexed name
+ if vm_filled_indexes_list is not None:
+ next_index = generate_next_index(vm_filled_indexes_list, num_sign_cnt)
+ vm_filled_indexes_list.append(next_index)
+ new_vm_name += next_index
+ # Update NAME value in the attributes in case there is index
+ attributes_dict['NAME'] = new_vm_name
+ new_vm_dict = create_vm(module, client, template_id, attributes_dict, labels_list, disk_size, network_attrs_list, vm_start_on_hold, vm_persistent)
+ new_vm_id = new_vm_dict.get('vm_id')
+ new_vm = get_vm_by_id(client, new_vm_id)
+ new_vms_list.append(new_vm)
+ count -= 1
+
+ if vm_start_on_hold:
+ if wait:
+ for vm in new_vms_list:
+ wait_for_hold(module, client, vm, wait_timeout)
+ else:
+ if wait:
+ for vm in new_vms_list:
+ wait_for_running(module, client, vm, wait_timeout)
+
+ return True, new_vms_list, []
+
+
+def create_exact_count_of_vms(module, client, template_id, exact_count, attributes_dict, count_attributes_dict,
+ labels_list, count_labels_list, disk_size, network_attrs_list, hard, wait, wait_timeout, vm_start_on_hold, vm_persistent):
+
+ vm_list = get_all_vms_by_attributes(client, count_attributes_dict, count_labels_list)
+
+ vm_count_diff = exact_count - len(vm_list)
+ changed = vm_count_diff != 0
+
+ new_vms_list = []
+ instances_list = []
+ tagged_instances_list = vm_list
+
+ if module.check_mode:
+ return changed, instances_list, tagged_instances_list
+
+ if vm_count_diff > 0:
+ # Add more VMs
+ changed, instances_list, tagged_instances = create_count_of_vms(module, client, template_id, vm_count_diff, attributes_dict,
+ labels_list, disk_size, network_attrs_list, wait, wait_timeout,
+ vm_start_on_hold, vm_persistent)
+
+ tagged_instances_list += instances_list
+ elif vm_count_diff < 0:
+ # Delete surplus VMs
+ old_vms_list = []
+
+ while vm_count_diff < 0:
+ old_vm = vm_list.pop(0)
+ old_vms_list.append(old_vm)
+ terminate_vm(module, client, old_vm, hard)
+ vm_count_diff += 1
+
+ if wait:
+ for vm in old_vms_list:
+ wait_for_done(module, client, vm, wait_timeout)
+
+ instances_list = old_vms_list
+ # store only the remaining instances
+ old_vms_set = set(old_vms_list)
+ tagged_instances_list = [vm for vm in vm_list if vm not in old_vms_set]
+
+ return changed, instances_list, tagged_instances_list
+
+
+VM_STATES = ['INIT', 'PENDING', 'HOLD', 'ACTIVE', 'STOPPED', 'SUSPENDED', 'DONE', '', 'POWEROFF', 'UNDEPLOYED', 'CLONING', 'CLONING_FAILURE']
+LCM_STATES = ['LCM_INIT', 'PROLOG', 'BOOT', 'RUNNING', 'MIGRATE', 'SAVE_STOP',
+ 'SAVE_SUSPEND', 'SAVE_MIGRATE', 'PROLOG_MIGRATE', 'PROLOG_RESUME',
+ 'EPILOG_STOP', 'EPILOG', 'SHUTDOWN', 'STATE13', 'STATE14', 'CLEANUP_RESUBMIT', 'UNKNOWN', 'HOTPLUG', 'SHUTDOWN_POWEROFF',
+ 'BOOT_UNKNOWN', 'BOOT_POWEROFF', 'BOOT_SUSPENDED', 'BOOT_STOPPED', 'CLEANUP_DELETE', 'HOTPLUG_SNAPSHOT', 'HOTPLUG_NIC',
+ 'HOTPLUG_SAVEAS', 'HOTPLUG_SAVEAS_POWEROFF', 'HOTPULG_SAVEAS_SUSPENDED', 'SHUTDOWN_UNDEPLOY']
+
+
+def wait_for_state(module, client, vm, wait_timeout, state_predicate):
+ import time
+ start_time = time.time()
+
+ while (time.time() - start_time) < wait_timeout:
+ vm = client.vm.info(vm.ID)
+ state = vm.STATE
+ lcm_state = vm.LCM_STATE
+
+ if state_predicate(state, lcm_state):
+ return vm
+ elif state not in [VM_STATES.index('INIT'), VM_STATES.index('PENDING'), VM_STATES.index('HOLD'),
+ VM_STATES.index('ACTIVE'), VM_STATES.index('CLONING'), VM_STATES.index('POWEROFF')]:
+ module.fail_json(msg='Action is unsuccessful. VM state: ' + VM_STATES[state])
+
+ time.sleep(1)
+
+ module.fail_json(msg="Wait timeout has expired!")
+
+
+def wait_for_running(module, client, vm, wait_timeout):
+ return wait_for_state(module, client, vm, wait_timeout, lambda state,
+ lcm_state: (state in [VM_STATES.index('ACTIVE')] and lcm_state in [LCM_STATES.index('RUNNING')]))
+
+
+def wait_for_done(module, client, vm, wait_timeout):
+ return wait_for_state(module, client, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('DONE')]))
+
+
+def wait_for_hold(module, client, vm, wait_timeout):
+ return wait_for_state(module, client, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('HOLD')]))
+
+
+def wait_for_poweroff(module, client, vm, wait_timeout):
+ return wait_for_state(module, client, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('POWEROFF')]))
+
+
+def terminate_vm(module, client, vm, hard=False):
+ changed = False
+
+ if not vm:
+ return changed
+
+ changed = True
+
+ if not module.check_mode:
+ if hard:
+ client.vm.action('terminate-hard', vm.ID)
+ else:
+ client.vm.action('terminate', vm.ID)
+
+ return changed
+
+
+def terminate_vms(module, client, vms, hard):
+ changed = False
+
+ for vm in vms:
+ changed = terminate_vm(module, client, vm, hard) or changed
+
+ return changed
+
+
+def poweroff_vm(module, client, vm, hard):
+ vm = client.vm.info(vm.ID)
+ changed = False
+
+ lcm_state = vm.LCM_STATE
+ state = vm.STATE
+
+ if lcm_state not in [LCM_STATES.index('SHUTDOWN'), LCM_STATES.index('SHUTDOWN_POWEROFF')] and state not in [VM_STATES.index('POWEROFF')]:
+ changed = True
+
+ if changed and not module.check_mode:
+ if not hard:
+ client.vm.action('poweroff', vm.ID)
+ else:
+ client.vm.action('poweroff-hard', vm.ID)
+
+ return changed
+
+
+def poweroff_vms(module, client, vms, hard):
+ changed = False
+
+ for vm in vms:
+ changed = poweroff_vm(module, client, vm, hard) or changed
+
+ return changed
+
+
+def reboot_vms(module, client, vms, wait_timeout, hard):
+
+ if not module.check_mode:
+ # Firstly, power-off all instances
+ for vm in vms:
+ vm = client.vm.info(vm.ID)
+ lcm_state = vm.LCM_STATE
+ state = vm.STATE
+ if lcm_state not in [LCM_STATES.index('SHUTDOWN_POWEROFF')] and state not in [VM_STATES.index('POWEROFF')]:
+ poweroff_vm(module, client, vm, hard)
+
+ # Wait for all to be power-off
+ for vm in vms:
+ wait_for_poweroff(module, client, vm, wait_timeout)
+
+ for vm in vms:
+ resume_vm(module, client, vm)
+
+ return True
+
+
+def resume_vm(module, client, vm):
+ vm = client.vm.info(vm.ID)
+ changed = False
+
+ lcm_state = vm.LCM_STATE
+ if lcm_state == LCM_STATES.index('SHUTDOWN_POWEROFF'):
+ module.fail_json(msg="Cannot perform action 'resume' because this action is not available " +
+ "for LCM_STATE: 'SHUTDOWN_POWEROFF'. Wait for the VM to shutdown properly")
+ if lcm_state not in [LCM_STATES.index('RUNNING')]:
+ changed = True
+
+ if changed and not module.check_mode:
+ client.vm.action('resume', vm.ID)
+
+ return changed
+
+
+def resume_vms(module, client, vms):
+ changed = False
+
+ for vm in vms:
+ changed = resume_vm(module, client, vm) or changed
+
+ return changed
+
+
+def check_name_attribute(module, attributes):
+ if attributes.get("NAME"):
+ import re
+ if re.match(r'^[^#]+#*$', attributes.get("NAME")) is None:
+ module.fail_json(msg="Ilegal 'NAME' attribute: '" + attributes.get("NAME") +
+ "' .Signs '#' are allowed only at the end of the name and the name cannot contain only '#'.")
+
+
+TEMPLATE_RESTRICTED_ATTRIBUTES = ["CPU", "VCPU", "OS", "FEATURES", "MEMORY", "DISK", "NIC", "INPUT", "GRAPHICS",
+ "CONTEXT", "CREATED_BY", "CPU_COST", "DISK_COST", "MEMORY_COST",
+ "TEMPLATE_ID", "VMID", "AUTOMATIC_DS_REQUIREMENTS", "DEPLOY_FOLDER", "LABELS"]
+
+
+def check_attributes(module, attributes):
+ for key in attributes.keys():
+ if key in TEMPLATE_RESTRICTED_ATTRIBUTES:
+ module.fail_json(msg='Restricted attribute `' + key + '` cannot be used when filtering VMs.')
+ # Check the format of the name attribute
+ check_name_attribute(module, attributes)
+
+
+def disk_save_as(module, client, vm, disk_saveas, wait_timeout):
+ if not disk_saveas.get('name'):
+ module.fail_json(msg="Key 'name' is required for 'disk_saveas' option")
+
+ image_name = disk_saveas.get('name')
+ disk_id = disk_saveas.get('disk_id', 0)
+
+ if not module.check_mode:
+ if vm.STATE != VM_STATES.index('POWEROFF'):
+ module.fail_json(msg="'disksaveas' option can be used only when the VM is in 'POWEROFF' state")
+ try:
+ client.vm.disksaveas(vm.ID, disk_id, image_name, 'OS', -1)
+ except pyone.OneException as e:
+ module.fail_json(msg=str(e))
+ wait_for_poweroff(module, client, vm, wait_timeout) # wait for VM to leave the hotplug_saveas_poweroff state
+
+
+def get_connection_info(module):
+
+ url = module.params.get('api_url')
+ username = module.params.get('api_username')
+ password = module.params.get('api_password')
+
+ if not url:
+ url = os.environ.get('ONE_URL')
+
+ if not username:
+ username = os.environ.get('ONE_USERNAME')
+
+ if not password:
+ password = os.environ.get('ONE_PASSWORD')
+
+ if not username:
+ if not password:
+ authfile = os.environ.get('ONE_AUTH')
+ if authfile is None:
+ authfile = os.path.join(os.environ.get("HOME"), ".one", "one_auth")
+ try:
+ with open(authfile, "r") as fp:
+ authstring = fp.read().rstrip()
+ username = authstring.split(":")[0]
+ password = authstring.split(":")[1]
+ except (OSError, IOError):
+ module.fail_json(msg=("Could not find or read ONE_AUTH file at '%s'" % authfile))
+ except Exception:
+ module.fail_json(msg=("Error occurs when read ONE_AUTH file at '%s'" % authfile))
+ if not url:
+ module.fail_json(msg="Opennebula API url (api_url) is not specified")
+ from collections import namedtuple
+
+ auth_params = namedtuple('auth', ('url', 'username', 'password'))
+
+ return auth_params(url=url, username=username, password=password)
+
+
+def main():
+ fields = {
+ "api_url": {"required": False, "type": "str"},
+ "api_username": {"required": False, "type": "str"},
+ "api_password": {"required": False, "type": "str", "no_log": True},
+ "instance_ids": {"required": False, "aliases": ['ids'], "type": "list"},
+ "template_name": {"required": False, "type": "str"},
+ "template_id": {"required": False, "type": "int"},
+ "vm_start_on_hold": {"default": False, "type": "bool"},
+ "state": {
+ "default": "present",
+ "choices": ['present', 'absent', 'rebooted', 'poweredoff', 'running'],
+ "type": "str"
+ },
+ "mode": {"required": False, "type": "str"},
+ "owner_id": {"required": False, "type": "int"},
+ "group_id": {"required": False, "type": "int"},
+ "wait": {"default": True, "type": "bool"},
+ "wait_timeout": {"default": 300, "type": "int"},
+ "hard": {"default": False, "type": "bool"},
+ "memory": {"required": False, "type": "str"},
+ "cpu": {"required": False, "type": "float"},
+ "vcpu": {"required": False, "type": "int"},
+ "disk_size": {"required": False, "type": "list"},
+ "datastore_name": {"required": False, "type": "str"},
+ "datastore_id": {"required": False, "type": "int"},
+ "networks": {"default": [], "type": "list"},
+ "count": {"default": 1, "type": "int"},
+ "exact_count": {"required": False, "type": "int"},
+ "attributes": {"default": {}, "type": "dict"},
+ "count_attributes": {"required": False, "type": "dict"},
+ "labels": {"default": [], "type": "list"},
+ "count_labels": {"required": False, "type": "list"},
+ "disk_saveas": {"type": "dict"},
+ "persistent": {"default": False, "type": "bool"}
+ }
+
+ module = AnsibleModule(argument_spec=fields,
+ mutually_exclusive=[
+ ['template_id', 'template_name', 'instance_ids'],
+ ['template_id', 'template_name', 'disk_saveas'],
+ ['instance_ids', 'count_attributes', 'count'],
+ ['instance_ids', 'count_labels', 'count'],
+ ['instance_ids', 'exact_count'],
+ ['instance_ids', 'attributes'],
+ ['instance_ids', 'labels'],
+ ['disk_saveas', 'attributes'],
+ ['disk_saveas', 'labels'],
+ ['exact_count', 'count'],
+ ['count', 'hard'],
+ ['instance_ids', 'cpu'], ['instance_ids', 'vcpu'],
+ ['instance_ids', 'memory'], ['instance_ids', 'disk_size'],
+ ['instance_ids', 'networks'],
+ ['persistent', 'disk_size']
+ ],
+ supports_check_mode=True)
+
+ if not HAS_PYONE:
+ module.fail_json(msg='This module requires pyone to work!')
+
+ auth = get_connection_info(module)
+ params = module.params
+ instance_ids = params.get('instance_ids')
+ requested_template_name = params.get('template_name')
+ requested_template_id = params.get('template_id')
+ put_vm_on_hold = params.get('vm_start_on_hold')
+ state = params.get('state')
+ permissions = params.get('mode')
+ owner_id = params.get('owner_id')
+ group_id = params.get('group_id')
+ wait = params.get('wait')
+ wait_timeout = params.get('wait_timeout')
+ hard = params.get('hard')
+ memory = params.get('memory')
+ cpu = params.get('cpu')
+ vcpu = params.get('vcpu')
+ disk_size = params.get('disk_size')
+ requested_datastore_id = params.get('datastore_id')
+ requested_datastore_name = params.get('datastore_name')
+ networks = params.get('networks')
+ count = params.get('count')
+ exact_count = params.get('exact_count')
+ attributes = params.get('attributes')
+ count_attributes = params.get('count_attributes')
+ labels = params.get('labels')
+ count_labels = params.get('count_labels')
+ disk_saveas = params.get('disk_saveas')
+ persistent = params.get('persistent')
+
+ if not (auth.username and auth.password):
+ module.warn("Credentials missing")
+ else:
+ one_client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password)
+
+ if attributes:
+ attributes = dict((key.upper(), value) for key, value in attributes.items())
+ check_attributes(module, attributes)
+
+ if count_attributes:
+ count_attributes = dict((key.upper(), value) for key, value in count_attributes.items())
+ if not attributes:
+ import copy
+ module.warn('When you pass `count_attributes` without `attributes` option when deploying, `attributes` option will have same values implicitly.')
+ attributes = copy.copy(count_attributes)
+ check_attributes(module, count_attributes)
+
+ if count_labels and not labels:
+ module.warn('When you pass `count_labels` without `labels` option when deploying, `labels` option will have same values implicitly.')
+ labels = count_labels
+
+ # Fetch template
+ template_id = None
+ if requested_template_id is not None or requested_template_name:
+ template_id = get_template_id(module, one_client, requested_template_id, requested_template_name)
+ if template_id is None:
+ if requested_template_id is not None:
+ module.fail_json(msg='There is no template with template_id: ' + str(requested_template_id))
+ elif requested_template_name:
+ module.fail_json(msg="There is no template with name: " + requested_template_name)
+
+ # Fetch datastore
+ datastore_id = None
+ if requested_datastore_id or requested_datastore_name:
+ datastore_id = get_datastore_id(module, one_client, requested_datastore_id, requested_datastore_name)
+ if datastore_id is None:
+ if requested_datastore_id:
+ module.fail_json(msg='There is no datastore with datastore_id: ' + str(requested_datastore_id))
+ elif requested_datastore_name:
+ module.fail_json(msg="There is no datastore with name: " + requested_datastore_name)
+ else:
+ attributes['SCHED_DS_REQUIREMENTS'] = 'ID=' + str(datastore_id)
+
+ if exact_count and template_id is None:
+ module.fail_json(msg='Option `exact_count` needs template_id or template_name')
+
+ if exact_count is not None and not (count_attributes or count_labels):
+ module.fail_json(msg='Either `count_attributes` or `count_labels` has to be specified with option `exact_count`.')
+ if (count_attributes or count_labels) and exact_count is None:
+ module.fail_json(msg='Option `exact_count` has to be specified when either `count_attributes` or `count_labels` is used.')
+ if template_id is not None and state != 'present':
+ module.fail_json(msg="Only state 'present' is valid for the template")
+
+ if memory:
+ attributes['MEMORY'] = str(int(get_size_in_MB(module, memory)))
+ if cpu:
+ attributes['CPU'] = str(cpu)
+ if vcpu:
+ attributes['VCPU'] = str(vcpu)
+
+ if exact_count is not None and state != 'present':
+ module.fail_json(msg='The `exact_count` option is valid only for the `present` state')
+ if exact_count is not None and exact_count < 0:
+ module.fail_json(msg='`exact_count` cannot be less than 0')
+ if count <= 0:
+ module.fail_json(msg='`count` has to be greater than 0')
+
+ if permissions is not None:
+ import re
+ if re.match("^[0-7]{3}$", permissions) is None:
+ module.fail_json(msg="Option `mode` has to have exactly 3 digits and be in the octet format e.g. 600")
+
+ if exact_count is not None:
+ # Deploy an exact count of VMs
+ changed, instances_list, tagged_instances_list = create_exact_count_of_vms(module, one_client, template_id, exact_count, attributes,
+ count_attributes, labels, count_labels, disk_size,
+ networks, hard, wait, wait_timeout, put_vm_on_hold, persistent)
+ vms = tagged_instances_list
+ elif template_id is not None and state == 'present':
+ # Deploy count VMs
+ changed, instances_list, tagged_instances_list = create_count_of_vms(module, one_client, template_id, count,
+ attributes, labels, disk_size, networks, wait, wait_timeout,
+ put_vm_on_hold, persistent)
+ # instances_list - new instances
+ # tagged_instances_list - all instances with specified `count_attributes` and `count_labels`
+ vms = instances_list
+ else:
+ # Fetch data of instances, or change their state
+ if not (instance_ids or attributes or labels):
+ module.fail_json(msg="At least one of `instance_ids`,`attributes`,`labels` must be passed!")
+
+ if memory or cpu or vcpu or disk_size or networks:
+ module.fail_json(msg="Parameters as `memory`, `cpu`, `vcpu`, `disk_size` and `networks` you can only set when deploying a VM!")
+
+ if hard and state not in ['rebooted', 'poweredoff', 'absent', 'present']:
+ module.fail_json(msg="The 'hard' option can be used only for one of these states: 'rebooted', 'poweredoff', 'absent' and 'present'")
+
+ vms = []
+ tagged = False
+ changed = False
+
+ if instance_ids:
+ vms = get_vms_by_ids(module, one_client, state, instance_ids)
+ else:
+ tagged = True
+ vms = get_all_vms_by_attributes(one_client, attributes, labels)
+
+ if len(vms) == 0 and state != 'absent' and state != 'present':
+ module.fail_json(msg='There are no instances with specified `instance_ids`, `attributes` and/or `labels`')
+
+ if len(vms) == 0 and state == 'present' and not tagged:
+ module.fail_json(msg='There are no instances with specified `instance_ids`.')
+
+ if tagged and state == 'absent':
+ module.fail_json(msg='Option `instance_ids` is required when state is `absent`.')
+
+ if state == 'absent':
+ changed = terminate_vms(module, one_client, vms, hard)
+ elif state == 'rebooted':
+ changed = reboot_vms(module, one_client, vms, wait_timeout, hard)
+ elif state == 'poweredoff':
+ changed = poweroff_vms(module, one_client, vms, hard)
+ elif state == 'running':
+ changed = resume_vms(module, one_client, vms)
+
+ instances_list = vms
+ tagged_instances_list = []
+
+ if permissions is not None:
+ changed = set_vm_permissions(module, one_client, vms, permissions) or changed
+
+ if owner_id is not None or group_id is not None:
+ changed = set_vm_ownership(module, one_client, vms, owner_id, group_id) or changed
+
+ if wait and not module.check_mode and state != 'present':
+ wait_for = {
+ 'absent': wait_for_done,
+ 'rebooted': wait_for_running,
+ 'poweredoff': wait_for_poweroff,
+ 'running': wait_for_running
+ }
+ for vm in vms:
+ if vm is not None:
+ wait_for[state](module, one_client, vm, wait_timeout)
+
+ if disk_saveas is not None:
+ if len(vms) == 0:
+ module.fail_json(msg="There is no VM whose disk will be saved.")
+ disk_save_as(module, one_client, vms[0], disk_saveas, wait_timeout)
+ changed = True
+
+ # instances - a list of instances info whose state is changed or which are fetched with C(instance_ids) option
+ instances = list(get_vm_info(one_client, vm) for vm in instances_list if vm is not None)
+ instances_ids = list(vm.ID for vm in instances_list if vm is not None)
+ # tagged_instances - A list of instances info based on a specific attributes and/or labels that are specified with C(count_attributes) and C(count_labels)
+ tagged_instances = list(get_vm_info(one_client, vm) for vm in tagged_instances_list if vm is not None)
+
+ result = {'changed': changed, 'instances': instances, 'instances_ids': instances_ids, 'tagged_instances': tagged_instances}
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_firewall_policy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_firewall_policy.py
new file mode 100644
index 00000000..90694861
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_firewall_policy.py
@@ -0,0 +1,573 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneandone_firewall_policy
+short_description: Configure 1&1 firewall policy.
+description:
+ - Create, remove, reconfigure, update firewall policies.
+ This module has a dependency on 1and1 >= 1.0
+options:
+ state:
+ description:
+ - Define a firewall policy state to create, remove, or update.
+ required: false
+ type: str
+ default: 'present'
+ choices: [ "present", "absent", "update" ]
+ auth_token:
+ description:
+ - Authenticating API token provided by 1&1.
+ type: str
+ api_url:
+ description:
+ - Custom API URL. Overrides the
+ ONEANDONE_API_URL environment variable.
+ type: str
+ required: false
+ name:
+ description:
+ - Firewall policy name used with present state. Used as identifier (id or name) when used with absent state.
+ maxLength=128
+ type: str
+ firewall_policy:
+ description:
+ - The identifier (id or name) of the firewall policy used with update state.
+ type: str
+ rules:
+ description:
+ - A list of rules that will be set for the firewall policy.
+ Each rule must contain protocol parameter, in addition to three optional parameters
+ (port_from, port_to, and source)
+ type: list
+ add_server_ips:
+ description:
+ - A list of server identifiers (id or name) to be assigned to a firewall policy.
+ Used in combination with update state.
+ type: list
+ required: false
+ remove_server_ips:
+ description:
+ - A list of server IP ids to be unassigned from a firewall policy. Used in combination with update state.
+ type: list
+ required: false
+ add_rules:
+ description:
+ - A list of rules that will be added to an existing firewall policy.
+ It is syntax is the same as the one used for rules parameter. Used in combination with update state.
+ type: list
+ required: false
+ remove_rules:
+ description:
+ - A list of rule ids that will be removed from an existing firewall policy. Used in combination with update state.
+ type: list
+ required: false
+ description:
+ description:
+ - Firewall policy description. maxLength=256
+ type: str
+ required: false
+ wait:
+ description:
+ - wait for the instance to be in state 'running' before returning
+ required: false
+ default: "yes"
+ type: bool
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ wait_interval:
+ description:
+ - Defines the number of seconds to wait when using the _wait_for methods
+ type: int
+ default: 5
+
+requirements:
+ - "1and1"
+ - "python >= 2.6"
+
+author:
+ - "Amel Ajdinovic (@aajdinov)"
+ - "Ethan Devenport (@edevenport)"
+'''
+
+EXAMPLES = '''
+- name: Create a firewall policy
+ community.general.oneandone_firewall_policy:
+ auth_token: oneandone_private_api_key
+ name: ansible-firewall-policy
+ description: Testing creation of firewall policies with ansible
+ rules:
+ -
+ protocol: TCP
+ port_from: 80
+ port_to: 80
+ source: 0.0.0.0
+ wait: true
+ wait_timeout: 500
+
+- name: Destroy a firewall policy
+ community.general.oneandone_firewall_policy:
+ auth_token: oneandone_private_api_key
+ state: absent
+ name: ansible-firewall-policy
+
+- name: Update a firewall policy
+ community.general.oneandone_firewall_policy:
+ auth_token: oneandone_private_api_key
+ state: update
+ firewall_policy: ansible-firewall-policy
+ name: ansible-firewall-policy-updated
+ description: Testing creation of firewall policies with ansible - updated
+
+- name: Add server to a firewall policy
+ community.general.oneandone_firewall_policy:
+ auth_token: oneandone_private_api_key
+ firewall_policy: ansible-firewall-policy-updated
+ add_server_ips:
+ - server_identifier (id or name)
+ - server_identifier #2 (id or name)
+ wait: true
+ wait_timeout: 500
+ state: update
+
+- name: Remove server from a firewall policy
+ community.general.oneandone_firewall_policy:
+ auth_token: oneandone_private_api_key
+ firewall_policy: ansible-firewall-policy-updated
+ remove_server_ips:
+ - B2504878540DBC5F7634EB00A07C1EBD (server's IP id)
+ wait: true
+ wait_timeout: 500
+ state: update
+
+- name: Add rules to a firewall policy
+ community.general.oneandone_firewall_policy:
+ auth_token: oneandone_private_api_key
+ firewall_policy: ansible-firewall-policy-updated
+ description: Adding rules to an existing firewall policy
+ add_rules:
+ -
+ protocol: TCP
+ port_from: 70
+ port_to: 70
+ source: 0.0.0.0
+ -
+ protocol: TCP
+ port_from: 60
+ port_to: 60
+ source: 0.0.0.0
+ wait: true
+ wait_timeout: 500
+ state: update
+
+- name: Remove rules from a firewall policy
+ community.general.oneandone_firewall_policy:
+ auth_token: oneandone_private_api_key
+ firewall_policy: ansible-firewall-policy-updated
+ remove_rules:
+ - rule_id #1
+ - rule_id #2
+ - ...
+ wait: true
+ wait_timeout: 500
+ state: update
+'''
+
+RETURN = '''
+firewall_policy:
+ description: Information about the firewall policy that was processed
+ type: dict
+ sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Policy"}'
+ returned: always
+'''
+
+import os
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.oneandone import (
+ get_firewall_policy,
+ get_server,
+ OneAndOneResources,
+ wait_for_resource_creation_completion
+)
+
+HAS_ONEANDONE_SDK = True
+
+try:
+ import oneandone.client
+except ImportError:
+ HAS_ONEANDONE_SDK = False
+
+
+def _check_mode(module, result):
+ if module.check_mode:
+ module.exit_json(
+ changed=result
+ )
+
+
+def _add_server_ips(module, oneandone_conn, firewall_id, server_ids):
+ """
+ Assigns servers to a firewall policy.
+ """
+ try:
+ attach_servers = []
+
+ for _server_id in server_ids:
+ server = get_server(oneandone_conn, _server_id, True)
+ attach_server = oneandone.client.AttachServer(
+ server_id=server['id'],
+ server_ip_id=next(iter(server['ips'] or []), None)['id']
+ )
+ attach_servers.append(attach_server)
+
+ if module.check_mode:
+ if attach_servers:
+ return True
+ return False
+
+ firewall_policy = oneandone_conn.attach_server_firewall_policy(
+ firewall_id=firewall_id,
+ server_ips=attach_servers)
+ return firewall_policy
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def _remove_firewall_server(module, oneandone_conn, firewall_id, server_ip_id):
+ """
+ Unassigns a server/IP from a firewall policy.
+ """
+ try:
+ if module.check_mode:
+ firewall_server = oneandone_conn.get_firewall_server(
+ firewall_id=firewall_id,
+ server_ip_id=server_ip_id)
+ if firewall_server:
+ return True
+ return False
+
+ firewall_policy = oneandone_conn.remove_firewall_server(
+ firewall_id=firewall_id,
+ server_ip_id=server_ip_id)
+ return firewall_policy
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def _add_firewall_rules(module, oneandone_conn, firewall_id, rules):
+ """
+ Adds new rules to a firewall policy.
+ """
+ try:
+ firewall_rules = []
+
+ for rule in rules:
+ firewall_rule = oneandone.client.FirewallPolicyRule(
+ protocol=rule['protocol'],
+ port_from=rule['port_from'],
+ port_to=rule['port_to'],
+ source=rule['source'])
+ firewall_rules.append(firewall_rule)
+
+ if module.check_mode:
+ firewall_policy_id = get_firewall_policy(oneandone_conn, firewall_id)
+ if (firewall_rules and firewall_policy_id):
+ return True
+ return False
+
+ firewall_policy = oneandone_conn.add_firewall_policy_rule(
+ firewall_id=firewall_id,
+ firewall_policy_rules=firewall_rules
+ )
+ return firewall_policy
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def _remove_firewall_rule(module, oneandone_conn, firewall_id, rule_id):
+ """
+ Removes a rule from a firewall policy.
+ """
+ try:
+ if module.check_mode:
+ rule = oneandone_conn.get_firewall_policy_rule(
+ firewall_id=firewall_id,
+ rule_id=rule_id)
+ if rule:
+ return True
+ return False
+
+ firewall_policy = oneandone_conn.remove_firewall_rule(
+ firewall_id=firewall_id,
+ rule_id=rule_id
+ )
+ return firewall_policy
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def update_firewall_policy(module, oneandone_conn):
+ """
+ Updates a firewall policy based on input arguments.
+ Firewall rules and server ips can be added/removed to/from
+ firewall policy. Firewall policy name and description can be
+ updated as well.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ try:
+ firewall_policy_id = module.params.get('firewall_policy')
+ name = module.params.get('name')
+ description = module.params.get('description')
+ add_server_ips = module.params.get('add_server_ips')
+ remove_server_ips = module.params.get('remove_server_ips')
+ add_rules = module.params.get('add_rules')
+ remove_rules = module.params.get('remove_rules')
+
+ changed = False
+
+ firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy_id, True)
+ if firewall_policy is None:
+ _check_mode(module, False)
+
+ if name or description:
+ _check_mode(module, True)
+ firewall_policy = oneandone_conn.modify_firewall(
+ firewall_id=firewall_policy['id'],
+ name=name,
+ description=description)
+ changed = True
+
+ if add_server_ips:
+ if module.check_mode:
+ _check_mode(module, _add_server_ips(module,
+ oneandone_conn,
+ firewall_policy['id'],
+ add_server_ips))
+
+ firewall_policy = _add_server_ips(module, oneandone_conn, firewall_policy['id'], add_server_ips)
+ changed = True
+
+ if remove_server_ips:
+ chk_changed = False
+ for server_ip_id in remove_server_ips:
+ if module.check_mode:
+ chk_changed |= _remove_firewall_server(module,
+ oneandone_conn,
+ firewall_policy['id'],
+ server_ip_id)
+
+ _remove_firewall_server(module,
+ oneandone_conn,
+ firewall_policy['id'],
+ server_ip_id)
+ _check_mode(module, chk_changed)
+ firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy['id'], True)
+ changed = True
+
+ if add_rules:
+ firewall_policy = _add_firewall_rules(module,
+ oneandone_conn,
+ firewall_policy['id'],
+ add_rules)
+ _check_mode(module, firewall_policy)
+ changed = True
+
+ if remove_rules:
+ chk_changed = False
+ for rule_id in remove_rules:
+ if module.check_mode:
+ chk_changed |= _remove_firewall_rule(module,
+ oneandone_conn,
+ firewall_policy['id'],
+ rule_id)
+
+ _remove_firewall_rule(module,
+ oneandone_conn,
+ firewall_policy['id'],
+ rule_id)
+ _check_mode(module, chk_changed)
+ firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy['id'], True)
+ changed = True
+
+ return (changed, firewall_policy)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def create_firewall_policy(module, oneandone_conn):
+ """
+ Create a new firewall policy.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ try:
+ name = module.params.get('name')
+ description = module.params.get('description')
+ rules = module.params.get('rules')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ firewall_rules = []
+
+ for rule in rules:
+ firewall_rule = oneandone.client.FirewallPolicyRule(
+ protocol=rule['protocol'],
+ port_from=rule['port_from'],
+ port_to=rule['port_to'],
+ source=rule['source'])
+ firewall_rules.append(firewall_rule)
+
+ firewall_policy_obj = oneandone.client.FirewallPolicy(
+ name=name,
+ description=description
+ )
+
+ _check_mode(module, True)
+ firewall_policy = oneandone_conn.create_firewall_policy(
+ firewall_policy=firewall_policy_obj,
+ firewall_policy_rules=firewall_rules
+ )
+
+ if wait:
+ wait_for_resource_creation_completion(
+ oneandone_conn,
+ OneAndOneResources.firewall_policy,
+ firewall_policy['id'],
+ wait_timeout,
+ wait_interval)
+
+ firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy['id'], True) # refresh
+ changed = True if firewall_policy else False
+
+ _check_mode(module, False)
+
+ return (changed, firewall_policy)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def remove_firewall_policy(module, oneandone_conn):
+ """
+ Removes a firewall policy.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ try:
+ fp_id = module.params.get('name')
+ firewall_policy_id = get_firewall_policy(oneandone_conn, fp_id)
+ if module.check_mode:
+ if firewall_policy_id is None:
+ _check_mode(module, False)
+ _check_mode(module, True)
+ firewall_policy = oneandone_conn.delete_firewall(firewall_policy_id)
+
+ changed = True if firewall_policy else False
+
+ return (changed, {
+ 'id': firewall_policy['id'],
+ 'name': firewall_policy['name']
+ })
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ auth_token=dict(
+ type='str', no_log=True,
+ default=os.environ.get('ONEANDONE_AUTH_TOKEN')),
+ api_url=dict(
+ type='str',
+ default=os.environ.get('ONEANDONE_API_URL')),
+ name=dict(type='str'),
+ firewall_policy=dict(type='str'),
+ description=dict(type='str'),
+ rules=dict(type='list', default=[]),
+ add_server_ips=dict(type='list', default=[]),
+ remove_server_ips=dict(type='list', default=[]),
+ add_rules=dict(type='list', default=[]),
+ remove_rules=dict(type='list', default=[]),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ wait_interval=dict(type='int', default=5),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'update']),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_ONEANDONE_SDK:
+ module.fail_json(msg='1and1 required for this module')
+
+ if not module.params.get('auth_token'):
+ module.fail_json(
+ msg='The "auth_token" parameter or ' +
+ 'ONEANDONE_AUTH_TOKEN environment variable is required.')
+
+ if not module.params.get('api_url'):
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'))
+ else:
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'), api_url=module.params.get('api_url'))
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('name'):
+ module.fail_json(
+ msg="'name' parameter is required to delete a firewall policy.")
+ try:
+ (changed, firewall_policy) = remove_firewall_policy(module, oneandone_conn)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ elif state == 'update':
+ if not module.params.get('firewall_policy'):
+ module.fail_json(
+ msg="'firewall_policy' parameter is required to update a firewall policy.")
+ try:
+ (changed, firewall_policy) = update_firewall_policy(module, oneandone_conn)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ elif state == 'present':
+ for param in ('name', 'rules'):
+ if not module.params.get(param):
+ module.fail_json(
+ msg="%s parameter is required for new firewall policies." % param)
+ try:
+ (changed, firewall_policy) = create_firewall_policy(module, oneandone_conn)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ module.exit_json(changed=changed, firewall_policy=firewall_policy)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_load_balancer.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_load_balancer.py
new file mode 100644
index 00000000..62551560
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_load_balancer.py
@@ -0,0 +1,677 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneandone_load_balancer
+short_description: Configure 1&1 load balancer.
+description:
+ - Create, remove, update load balancers.
+ This module has a dependency on 1and1 >= 1.0
+options:
+ state:
+ description:
+ - Define a load balancer state to create, remove, or update.
+ type: str
+ required: false
+ default: 'present'
+ choices: [ "present", "absent", "update" ]
+ auth_token:
+ description:
+ - Authenticating API token provided by 1&1.
+ type: str
+ load_balancer:
+ description:
+ - The identifier (id or name) of the load balancer used with update state.
+ type: str
+ api_url:
+ description:
+ - Custom API URL. Overrides the
+ ONEANDONE_API_URL environment variable.
+ type: str
+ required: false
+ name:
+ description:
+ - Load balancer name used with present state. Used as identifier (id or name) when used with absent state.
+ maxLength=128
+ type: str
+ health_check_test:
+ description:
+ - Type of the health check. At the moment, HTTP is not allowed.
+ type: str
+ choices: [ "NONE", "TCP", "HTTP", "ICMP" ]
+ health_check_interval:
+ description:
+ - Health check period in seconds. minimum=5, maximum=300, multipleOf=1
+ type: str
+ health_check_path:
+ description:
+ - Url to call for checking. Required for HTTP health check. maxLength=1000
+ type: str
+ required: false
+ health_check_parse:
+ description:
+ - Regular expression to check. Required for HTTP health check. maxLength=64
+ type: str
+ required: false
+ persistence:
+ description:
+ - Persistence.
+ type: bool
+ persistence_time:
+ description:
+ - Persistence time in seconds. Required if persistence is enabled. minimum=30, maximum=1200, multipleOf=1
+ type: str
+ method:
+ description:
+ - Balancing procedure.
+ type: str
+ choices: [ "ROUND_ROBIN", "LEAST_CONNECTIONS" ]
+ datacenter:
+ description:
+ - ID or country code of the datacenter where the load balancer will be created.
+ - If not specified, it defaults to I(US).
+ type: str
+ choices: [ "US", "ES", "DE", "GB" ]
+ required: false
+ rules:
+ description:
+ - A list of rule objects that will be set for the load balancer. Each rule must contain protocol,
+ port_balancer, and port_server parameters, in addition to source parameter, which is optional.
+ type: list
+ description:
+ description:
+ - Description of the load balancer. maxLength=256
+ type: str
+ required: false
+ add_server_ips:
+ description:
+ - A list of server identifiers (id or name) to be assigned to a load balancer.
+ Used in combination with update state.
+ type: list
+ required: false
+ remove_server_ips:
+ description:
+ - A list of server IP ids to be unassigned from a load balancer. Used in combination with update state.
+ type: list
+ required: false
+ add_rules:
+ description:
+ - A list of rules that will be added to an existing load balancer.
+ It is syntax is the same as the one used for rules parameter. Used in combination with update state.
+ type: list
+ required: false
+ remove_rules:
+ description:
+ - A list of rule ids that will be removed from an existing load balancer. Used in combination with update state.
+ type: list
+ required: false
+ wait:
+ description:
+ - wait for the instance to be in state 'running' before returning
+ required: false
+ default: "yes"
+ type: bool
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ wait_interval:
+ description:
+ - Defines the number of seconds to wait when using the _wait_for methods
+ type: int
+ default: 5
+
+requirements:
+ - "1and1"
+ - "python >= 2.6"
+
+author:
+ - Amel Ajdinovic (@aajdinov)
+ - Ethan Devenport (@edevenport)
+'''
+
+EXAMPLES = '''
+- name: Create a load balancer
+ community.general.oneandone_load_balancer:
+ auth_token: oneandone_private_api_key
+ name: ansible load balancer
+ description: Testing creation of load balancer with ansible
+ health_check_test: TCP
+ health_check_interval: 40
+ persistence: true
+ persistence_time: 1200
+ method: ROUND_ROBIN
+ datacenter: US
+ rules:
+ -
+ protocol: TCP
+ port_balancer: 80
+ port_server: 80
+ source: 0.0.0.0
+ wait: true
+ wait_timeout: 500
+
+- name: Destroy a load balancer
+ community.general.oneandone_load_balancer:
+ auth_token: oneandone_private_api_key
+ name: ansible load balancer
+ wait: true
+ wait_timeout: 500
+ state: absent
+
+- name: Update a load balancer
+ community.general.oneandone_load_balancer:
+ auth_token: oneandone_private_api_key
+ load_balancer: ansible load balancer
+ name: ansible load balancer updated
+ description: Testing the update of a load balancer with ansible
+ wait: true
+ wait_timeout: 500
+ state: update
+
+- name: Add server to a load balancer
+ community.general.oneandone_load_balancer:
+ auth_token: oneandone_private_api_key
+ load_balancer: ansible load balancer updated
+ description: Adding server to a load balancer with ansible
+ add_server_ips:
+ - server identifier (id or name)
+ wait: true
+ wait_timeout: 500
+ state: update
+
+- name: Remove server from a load balancer
+ community.general.oneandone_load_balancer:
+ auth_token: oneandone_private_api_key
+ load_balancer: ansible load balancer updated
+ description: Removing server from a load balancer with ansible
+ remove_server_ips:
+ - B2504878540DBC5F7634EB00A07C1EBD (server's ip id)
+ wait: true
+ wait_timeout: 500
+ state: update
+
+- name: Add rules to a load balancer
+ community.general.oneandone_load_balancer:
+ auth_token: oneandone_private_api_key
+ load_balancer: ansible load balancer updated
+ description: Adding rules to a load balancer with ansible
+ add_rules:
+ -
+ protocol: TCP
+ port_balancer: 70
+ port_server: 70
+ source: 0.0.0.0
+ -
+ protocol: TCP
+ port_balancer: 60
+ port_server: 60
+ source: 0.0.0.0
+ wait: true
+ wait_timeout: 500
+ state: update
+
+- name: Remove rules from a load balancer
+ community.general.oneandone_load_balancer:
+ auth_token: oneandone_private_api_key
+ load_balancer: ansible load balancer updated
+ description: Adding rules to a load balancer with ansible
+ remove_rules:
+ - rule_id #1
+ - rule_id #2
+ - ...
+ wait: true
+ wait_timeout: 500
+ state: update
+'''
+
+RETURN = '''
+load_balancer:
+ description: Information about the load balancer that was processed
+ type: dict
+ sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Balancer"}'
+ returned: always
+'''
+
+import os
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.oneandone import (
+ get_load_balancer,
+ get_server,
+ get_datacenter,
+ OneAndOneResources,
+ wait_for_resource_creation_completion
+)
+
+HAS_ONEANDONE_SDK = True
+
+try:
+ import oneandone.client
+except ImportError:
+ HAS_ONEANDONE_SDK = False
+
+DATACENTERS = ['US', 'ES', 'DE', 'GB']
+HEALTH_CHECK_TESTS = ['NONE', 'TCP', 'HTTP', 'ICMP']
+METHODS = ['ROUND_ROBIN', 'LEAST_CONNECTIONS']
+
+
+def _check_mode(module, result):
+ if module.check_mode:
+ module.exit_json(
+ changed=result
+ )
+
+
+def _add_server_ips(module, oneandone_conn, load_balancer_id, server_ids):
+ """
+ Assigns servers to a load balancer.
+ """
+ try:
+ attach_servers = []
+
+ for server_id in server_ids:
+ server = get_server(oneandone_conn, server_id, True)
+ attach_server = oneandone.client.AttachServer(
+ server_id=server['id'],
+ server_ip_id=next(iter(server['ips'] or []), None)['id']
+ )
+ attach_servers.append(attach_server)
+
+ if module.check_mode:
+ if attach_servers:
+ return True
+ return False
+
+ load_balancer = oneandone_conn.attach_load_balancer_server(
+ load_balancer_id=load_balancer_id,
+ server_ips=attach_servers)
+ return load_balancer
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _remove_load_balancer_server(module, oneandone_conn, load_balancer_id, server_ip_id):
+ """
+ Unassigns a server/IP from a load balancer.
+ """
+ try:
+ if module.check_mode:
+ lb_server = oneandone_conn.get_load_balancer_server(
+ load_balancer_id=load_balancer_id,
+ server_ip_id=server_ip_id)
+ if lb_server:
+ return True
+ return False
+
+ load_balancer = oneandone_conn.remove_load_balancer_server(
+ load_balancer_id=load_balancer_id,
+ server_ip_id=server_ip_id)
+ return load_balancer
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _add_load_balancer_rules(module, oneandone_conn, load_balancer_id, rules):
+ """
+ Adds new rules to a load_balancer.
+ """
+ try:
+ load_balancer_rules = []
+
+ for rule in rules:
+ load_balancer_rule = oneandone.client.LoadBalancerRule(
+ protocol=rule['protocol'],
+ port_balancer=rule['port_balancer'],
+ port_server=rule['port_server'],
+ source=rule['source'])
+ load_balancer_rules.append(load_balancer_rule)
+
+ if module.check_mode:
+ lb_id = get_load_balancer(oneandone_conn, load_balancer_id)
+ if (load_balancer_rules and lb_id):
+ return True
+ return False
+
+ load_balancer = oneandone_conn.add_load_balancer_rule(
+ load_balancer_id=load_balancer_id,
+ load_balancer_rules=load_balancer_rules
+ )
+
+ return load_balancer
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _remove_load_balancer_rule(module, oneandone_conn, load_balancer_id, rule_id):
+ """
+ Removes a rule from a load_balancer.
+ """
+ try:
+ if module.check_mode:
+ rule = oneandone_conn.get_load_balancer_rule(
+ load_balancer_id=load_balancer_id,
+ rule_id=rule_id)
+ if rule:
+ return True
+ return False
+
+ load_balancer = oneandone_conn.remove_load_balancer_rule(
+ load_balancer_id=load_balancer_id,
+ rule_id=rule_id
+ )
+ return load_balancer
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def update_load_balancer(module, oneandone_conn):
+ """
+ Updates a load_balancer based on input arguments.
+ Load balancer rules and server ips can be added/removed to/from
+ load balancer. Load balancer name, description, health_check_test,
+ health_check_interval, persistence, persistence_time, and method
+ can be updated as well.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ load_balancer_id = module.params.get('load_balancer')
+ name = module.params.get('name')
+ description = module.params.get('description')
+ health_check_test = module.params.get('health_check_test')
+ health_check_interval = module.params.get('health_check_interval')
+ health_check_path = module.params.get('health_check_path')
+ health_check_parse = module.params.get('health_check_parse')
+ persistence = module.params.get('persistence')
+ persistence_time = module.params.get('persistence_time')
+ method = module.params.get('method')
+ add_server_ips = module.params.get('add_server_ips')
+ remove_server_ips = module.params.get('remove_server_ips')
+ add_rules = module.params.get('add_rules')
+ remove_rules = module.params.get('remove_rules')
+
+ changed = False
+
+ load_balancer = get_load_balancer(oneandone_conn, load_balancer_id, True)
+ if load_balancer is None:
+ _check_mode(module, False)
+
+ if (name or description or health_check_test or health_check_interval or health_check_path or
+ health_check_parse or persistence or persistence_time or method):
+ _check_mode(module, True)
+ load_balancer = oneandone_conn.modify_load_balancer(
+ load_balancer_id=load_balancer['id'],
+ name=name,
+ description=description,
+ health_check_test=health_check_test,
+ health_check_interval=health_check_interval,
+ health_check_path=health_check_path,
+ health_check_parse=health_check_parse,
+ persistence=persistence,
+ persistence_time=persistence_time,
+ method=method)
+ changed = True
+
+ if add_server_ips:
+ if module.check_mode:
+ _check_mode(module, _add_server_ips(module,
+ oneandone_conn,
+ load_balancer['id'],
+ add_server_ips))
+
+ load_balancer = _add_server_ips(module, oneandone_conn, load_balancer['id'], add_server_ips)
+ changed = True
+
+ if remove_server_ips:
+ chk_changed = False
+ for server_ip_id in remove_server_ips:
+ if module.check_mode:
+ chk_changed |= _remove_load_balancer_server(module,
+ oneandone_conn,
+ load_balancer['id'],
+ server_ip_id)
+
+ _remove_load_balancer_server(module,
+ oneandone_conn,
+ load_balancer['id'],
+ server_ip_id)
+ _check_mode(module, chk_changed)
+ load_balancer = get_load_balancer(oneandone_conn, load_balancer['id'], True)
+ changed = True
+
+ if add_rules:
+ load_balancer = _add_load_balancer_rules(module,
+ oneandone_conn,
+ load_balancer['id'],
+ add_rules)
+ _check_mode(module, load_balancer)
+ changed = True
+
+ if remove_rules:
+ chk_changed = False
+ for rule_id in remove_rules:
+ if module.check_mode:
+ chk_changed |= _remove_load_balancer_rule(module,
+ oneandone_conn,
+ load_balancer['id'],
+ rule_id)
+
+ _remove_load_balancer_rule(module,
+ oneandone_conn,
+ load_balancer['id'],
+ rule_id)
+ _check_mode(module, chk_changed)
+ load_balancer = get_load_balancer(oneandone_conn, load_balancer['id'], True)
+ changed = True
+
+ try:
+ return (changed, load_balancer)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def create_load_balancer(module, oneandone_conn):
+ """
+ Create a new load_balancer.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ try:
+ name = module.params.get('name')
+ description = module.params.get('description')
+ health_check_test = module.params.get('health_check_test')
+ health_check_interval = module.params.get('health_check_interval')
+ health_check_path = module.params.get('health_check_path')
+ health_check_parse = module.params.get('health_check_parse')
+ persistence = module.params.get('persistence')
+ persistence_time = module.params.get('persistence_time')
+ method = module.params.get('method')
+ datacenter = module.params.get('datacenter')
+ rules = module.params.get('rules')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ load_balancer_rules = []
+
+ datacenter_id = None
+ if datacenter is not None:
+ datacenter_id = get_datacenter(oneandone_conn, datacenter)
+ if datacenter_id is None:
+ module.fail_json(
+ msg='datacenter %s not found.' % datacenter)
+
+ for rule in rules:
+ load_balancer_rule = oneandone.client.LoadBalancerRule(
+ protocol=rule['protocol'],
+ port_balancer=rule['port_balancer'],
+ port_server=rule['port_server'],
+ source=rule['source'])
+ load_balancer_rules.append(load_balancer_rule)
+
+ _check_mode(module, True)
+ load_balancer_obj = oneandone.client.LoadBalancer(
+ health_check_path=health_check_path,
+ health_check_parse=health_check_parse,
+ name=name,
+ description=description,
+ health_check_test=health_check_test,
+ health_check_interval=health_check_interval,
+ persistence=persistence,
+ persistence_time=persistence_time,
+ method=method,
+ datacenter_id=datacenter_id
+ )
+
+ load_balancer = oneandone_conn.create_load_balancer(
+ load_balancer=load_balancer_obj,
+ load_balancer_rules=load_balancer_rules
+ )
+
+ if wait:
+ wait_for_resource_creation_completion(oneandone_conn,
+ OneAndOneResources.load_balancer,
+ load_balancer['id'],
+ wait_timeout,
+ wait_interval)
+
+ load_balancer = get_load_balancer(oneandone_conn, load_balancer['id'], True) # refresh
+ changed = True if load_balancer else False
+
+ _check_mode(module, False)
+
+ return (changed, load_balancer)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def remove_load_balancer(module, oneandone_conn):
+ """
+ Removes a load_balancer.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ try:
+ lb_id = module.params.get('name')
+ load_balancer_id = get_load_balancer(oneandone_conn, lb_id)
+ if module.check_mode:
+ if load_balancer_id is None:
+ _check_mode(module, False)
+ _check_mode(module, True)
+ load_balancer = oneandone_conn.delete_load_balancer(load_balancer_id)
+
+ changed = True if load_balancer else False
+
+ return (changed, {
+ 'id': load_balancer['id'],
+ 'name': load_balancer['name']
+ })
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ auth_token=dict(
+ type='str', no_log=True,
+ default=os.environ.get('ONEANDONE_AUTH_TOKEN')),
+ api_url=dict(
+ type='str',
+ default=os.environ.get('ONEANDONE_API_URL')),
+ load_balancer=dict(type='str'),
+ name=dict(type='str'),
+ description=dict(type='str'),
+ health_check_test=dict(
+ choices=HEALTH_CHECK_TESTS),
+ health_check_interval=dict(type='str'),
+ health_check_path=dict(type='str'),
+ health_check_parse=dict(type='str'),
+ persistence=dict(type='bool'),
+ persistence_time=dict(type='str'),
+ method=dict(
+ choices=METHODS),
+ datacenter=dict(
+ choices=DATACENTERS),
+ rules=dict(type='list', default=[]),
+ add_server_ips=dict(type='list', default=[]),
+ remove_server_ips=dict(type='list', default=[]),
+ add_rules=dict(type='list', default=[]),
+ remove_rules=dict(type='list', default=[]),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ wait_interval=dict(type='int', default=5),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'update']),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_ONEANDONE_SDK:
+ module.fail_json(msg='1and1 required for this module')
+
+ if not module.params.get('auth_token'):
+ module.fail_json(
+ msg='auth_token parameter is required.')
+
+ if not module.params.get('api_url'):
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'))
+ else:
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'), api_url=module.params.get('api_url'))
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('name'):
+ module.fail_json(
+ msg="'name' parameter is required for deleting a load balancer.")
+ try:
+ (changed, load_balancer) = remove_load_balancer(module, oneandone_conn)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+ elif state == 'update':
+ if not module.params.get('load_balancer'):
+ module.fail_json(
+ msg="'load_balancer' parameter is required for updating a load balancer.")
+ try:
+ (changed, load_balancer) = update_load_balancer(module, oneandone_conn)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ elif state == 'present':
+ for param in ('name', 'health_check_test', 'health_check_interval', 'persistence',
+ 'persistence_time', 'method', 'rules'):
+ if not module.params.get(param):
+ module.fail_json(
+ msg="%s parameter is required for new load balancers." % param)
+ try:
+ (changed, load_balancer) = create_load_balancer(module, oneandone_conn)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ module.exit_json(changed=changed, load_balancer=load_balancer)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_monitoring_policy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_monitoring_policy.py
new file mode 100644
index 00000000..79fed9a6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_monitoring_policy.py
@@ -0,0 +1,1026 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneandone_monitoring_policy
+short_description: Configure 1&1 monitoring policy.
+description:
+ - Create, remove, update monitoring policies
+ (and add/remove ports, processes, and servers).
+ This module has a dependency on 1and1 >= 1.0
+options:
+ state:
+ description:
+ - Define a monitoring policy's state to create, remove, update.
+ type: str
+ required: false
+ default: present
+ choices: [ "present", "absent", "update" ]
+ auth_token:
+ description:
+ - Authenticating API token provided by 1&1.
+ type: str
+ api_url:
+ description:
+ - Custom API URL. Overrides the
+ ONEANDONE_API_URL environment variable.
+ type: str
+ required: false
+ name:
+ description:
+ - Monitoring policy name used with present state. Used as identifier (id or name) when used with absent state. maxLength=128
+ type: str
+ monitoring_policy:
+ description:
+ - The identifier (id or name) of the monitoring policy used with update state.
+ type: str
+ agent:
+ description:
+ - Set true for using agent.
+ type: str
+ email:
+ description:
+ - User's email. maxLength=128
+ type: str
+ description:
+ description:
+ - Monitoring policy description. maxLength=256
+ type: str
+ required: false
+ thresholds:
+ description:
+ - Monitoring policy thresholds. Each of the suboptions have warning and critical,
+ which both have alert and value suboptions. Warning is used to set limits for
+ warning alerts, critical is used to set critical alerts. alert enables alert,
+ and value is used to advise when the value is exceeded.
+ type: list
+ suboptions:
+ cpu:
+ description:
+ - Consumption limits of CPU.
+ required: true
+ ram:
+ description:
+ - Consumption limits of RAM.
+ required: true
+ disk:
+ description:
+ - Consumption limits of hard disk.
+ required: true
+ internal_ping:
+ description:
+ - Response limits of internal ping.
+ required: true
+ transfer:
+ description:
+ - Consumption limits for transfer.
+ required: true
+ ports:
+ description:
+ - Array of ports that will be monitoring.
+ type: list
+ suboptions:
+ protocol:
+ description:
+ - Internet protocol.
+ choices: [ "TCP", "UDP" ]
+ required: true
+ port:
+ description:
+ - Port number. minimum=1, maximum=65535
+ required: true
+ alert_if:
+ description:
+ - Case of alert.
+ choices: [ "RESPONDING", "NOT_RESPONDING" ]
+ required: true
+ email_notification:
+ description:
+ - Set true for sending e-mail notifications.
+ required: true
+ processes:
+ description:
+ - Array of processes that will be monitoring.
+ type: list
+ suboptions:
+ process:
+ description:
+ - Name of the process. maxLength=50
+ required: true
+ alert_if:
+ description:
+ - Case of alert.
+ choices: [ "RUNNING", "NOT_RUNNING" ]
+ required: true
+ add_ports:
+ description:
+ - Ports to add to the monitoring policy.
+ type: list
+ required: false
+ add_processes:
+ description:
+ - Processes to add to the monitoring policy.
+ type: list
+ required: false
+ add_servers:
+ description:
+ - Servers to add to the monitoring policy.
+ type: list
+ required: false
+ remove_ports:
+ description:
+ - Ports to remove from the monitoring policy.
+ type: list
+ required: false
+ remove_processes:
+ description:
+ - Processes to remove from the monitoring policy.
+ type: list
+ required: false
+ remove_servers:
+ description:
+ - Servers to remove from the monitoring policy.
+ type: list
+ required: false
+ update_ports:
+ description:
+ - Ports to be updated on the monitoring policy.
+ type: list
+ required: false
+ update_processes:
+ description:
+ - Processes to be updated on the monitoring policy.
+ type: list
+ required: false
+ wait:
+ description:
+ - wait for the instance to be in state 'running' before returning
+ required: false
+ default: "yes"
+ type: bool
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ wait_interval:
+ description:
+ - Defines the number of seconds to wait when using the _wait_for methods
+ type: int
+ default: 5
+
+requirements:
+ - "1and1"
+ - "python >= 2.6"
+
+author:
+ - "Amel Ajdinovic (@aajdinov)"
+ - "Ethan Devenport (@edevenport)"
+'''
+
+EXAMPLES = '''
+- name: Create a monitoring policy
+ oneandone_moitoring_policy:
+ auth_token: oneandone_private_api_key
+ name: ansible monitoring policy
+ description: Testing creation of a monitoring policy with ansible
+ email: your@emailaddress.com
+ agent: true
+ thresholds:
+ -
+ cpu:
+ warning:
+ value: 80
+ alert: false
+ critical:
+ value: 92
+ alert: false
+ -
+ ram:
+ warning:
+ value: 80
+ alert: false
+ critical:
+ value: 90
+ alert: false
+ -
+ disk:
+ warning:
+ value: 80
+ alert: false
+ critical:
+ value: 90
+ alert: false
+ -
+ internal_ping:
+ warning:
+ value: 50
+ alert: false
+ critical:
+ value: 100
+ alert: false
+ -
+ transfer:
+ warning:
+ value: 1000
+ alert: false
+ critical:
+ value: 2000
+ alert: false
+ ports:
+ -
+ protocol: TCP
+ port: 22
+ alert_if: RESPONDING
+ email_notification: false
+ processes:
+ -
+ process: test
+ alert_if: NOT_RUNNING
+ email_notification: false
+ wait: true
+
+- name: Destroy a monitoring policy
+ oneandone_moitoring_policy:
+ auth_token: oneandone_private_api_key
+ state: absent
+ name: ansible monitoring policy
+
+- name: Update a monitoring policy
+ oneandone_moitoring_policy:
+ auth_token: oneandone_private_api_key
+ monitoring_policy: ansible monitoring policy
+ name: ansible monitoring policy updated
+ description: Testing creation of a monitoring policy with ansible updated
+ email: another@emailaddress.com
+ thresholds:
+ -
+ cpu:
+ warning:
+ value: 70
+ alert: false
+ critical:
+ value: 90
+ alert: false
+ -
+ ram:
+ warning:
+ value: 70
+ alert: false
+ critical:
+ value: 80
+ alert: false
+ -
+ disk:
+ warning:
+ value: 70
+ alert: false
+ critical:
+ value: 80
+ alert: false
+ -
+ internal_ping:
+ warning:
+ value: 60
+ alert: false
+ critical:
+ value: 90
+ alert: false
+ -
+ transfer:
+ warning:
+ value: 900
+ alert: false
+ critical:
+ value: 1900
+ alert: false
+ wait: true
+ state: update
+
+- name: Add a port to a monitoring policy
+ oneandone_moitoring_policy:
+ auth_token: oneandone_private_api_key
+ monitoring_policy: ansible monitoring policy updated
+ add_ports:
+ -
+ protocol: TCP
+ port: 33
+ alert_if: RESPONDING
+ email_notification: false
+ wait: true
+ state: update
+
+- name: Update existing ports of a monitoring policy
+ oneandone_moitoring_policy:
+ auth_token: oneandone_private_api_key
+ monitoring_policy: ansible monitoring policy updated
+ update_ports:
+ -
+ id: existing_port_id
+ protocol: TCP
+ port: 34
+ alert_if: RESPONDING
+ email_notification: false
+ -
+ id: existing_port_id
+ protocol: TCP
+ port: 23
+ alert_if: RESPONDING
+ email_notification: false
+ wait: true
+ state: update
+
+- name: Remove a port from a monitoring policy
+ oneandone_moitoring_policy:
+ auth_token: oneandone_private_api_key
+ monitoring_policy: ansible monitoring policy updated
+ remove_ports:
+ - port_id
+ state: update
+
+- name: Add a process to a monitoring policy
+ oneandone_moitoring_policy:
+ auth_token: oneandone_private_api_key
+ monitoring_policy: ansible monitoring policy updated
+ add_processes:
+ -
+ process: test_2
+ alert_if: NOT_RUNNING
+ email_notification: false
+ wait: true
+ state: update
+
+- name: Update existing processes of a monitoring policy
+ oneandone_moitoring_policy:
+ auth_token: oneandone_private_api_key
+ monitoring_policy: ansible monitoring policy updated
+ update_processes:
+ -
+ id: process_id
+ process: test_1
+ alert_if: NOT_RUNNING
+ email_notification: false
+ -
+ id: process_id
+ process: test_3
+ alert_if: NOT_RUNNING
+ email_notification: false
+ wait: true
+ state: update
+
+- name: Remove a process from a monitoring policy
+ oneandone_moitoring_policy:
+ auth_token: oneandone_private_api_key
+ monitoring_policy: ansible monitoring policy updated
+ remove_processes:
+ - process_id
+ wait: true
+ state: update
+
+- name: Add server to a monitoring policy
+ oneandone_moitoring_policy:
+ auth_token: oneandone_private_api_key
+ monitoring_policy: ansible monitoring policy updated
+ add_servers:
+ - server id or name
+ wait: true
+ state: update
+
+- name: Remove server from a monitoring policy
+ oneandone_moitoring_policy:
+ auth_token: oneandone_private_api_key
+ monitoring_policy: ansible monitoring policy updated
+ remove_servers:
+ - server01
+ wait: true
+ state: update
+'''
+
+RETURN = '''
+monitoring_policy:
+ description: Information about the monitoring policy that was processed
+ type: dict
+ sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Policy"}'
+ returned: always
+'''
+
+import os
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.oneandone import (
+ get_monitoring_policy,
+ get_server,
+ OneAndOneResources,
+ wait_for_resource_creation_completion
+)
+
+HAS_ONEANDONE_SDK = True
+
+try:
+ import oneandone.client
+except ImportError:
+ HAS_ONEANDONE_SDK = False
+
+
+def _check_mode(module, result):
+ if module.check_mode:
+ module.exit_json(
+ changed=result
+ )
+
+
+def _add_ports(module, oneandone_conn, monitoring_policy_id, ports):
+ """
+ Adds new ports to a monitoring policy.
+ """
+ try:
+ monitoring_policy_ports = []
+
+ for _port in ports:
+ monitoring_policy_port = oneandone.client.Port(
+ protocol=_port['protocol'],
+ port=_port['port'],
+ alert_if=_port['alert_if'],
+ email_notification=_port['email_notification']
+ )
+ monitoring_policy_ports.append(monitoring_policy_port)
+
+ if module.check_mode:
+ if monitoring_policy_ports:
+ return True
+ return False
+
+ monitoring_policy = oneandone_conn.add_port(
+ monitoring_policy_id=monitoring_policy_id,
+ ports=monitoring_policy_ports)
+ return monitoring_policy
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _delete_monitoring_policy_port(module, oneandone_conn, monitoring_policy_id, port_id):
+ """
+ Removes a port from a monitoring policy.
+ """
+ try:
+ if module.check_mode:
+ monitoring_policy = oneandone_conn.delete_monitoring_policy_port(
+ monitoring_policy_id=monitoring_policy_id,
+ port_id=port_id)
+ if monitoring_policy:
+ return True
+ return False
+
+ monitoring_policy = oneandone_conn.delete_monitoring_policy_port(
+ monitoring_policy_id=monitoring_policy_id,
+ port_id=port_id)
+ return monitoring_policy
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _modify_port(module, oneandone_conn, monitoring_policy_id, port_id, port):
+ """
+ Modifies a monitoring policy port.
+ """
+ try:
+ if module.check_mode:
+ cm_port = oneandone_conn.get_monitoring_policy_port(
+ monitoring_policy_id=monitoring_policy_id,
+ port_id=port_id)
+ if cm_port:
+ return True
+ return False
+
+ monitoring_policy_port = oneandone.client.Port(
+ protocol=port['protocol'],
+ port=port['port'],
+ alert_if=port['alert_if'],
+ email_notification=port['email_notification']
+ )
+
+ monitoring_policy = oneandone_conn.modify_port(
+ monitoring_policy_id=monitoring_policy_id,
+ port_id=port_id,
+ port=monitoring_policy_port)
+ return monitoring_policy
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _add_processes(module, oneandone_conn, monitoring_policy_id, processes):
+ """
+ Adds new processes to a monitoring policy.
+ """
+ try:
+ monitoring_policy_processes = []
+
+ for _process in processes:
+ monitoring_policy_process = oneandone.client.Process(
+ process=_process['process'],
+ alert_if=_process['alert_if'],
+ email_notification=_process['email_notification']
+ )
+ monitoring_policy_processes.append(monitoring_policy_process)
+
+ if module.check_mode:
+ mp_id = get_monitoring_policy(oneandone_conn, monitoring_policy_id)
+ if (monitoring_policy_processes and mp_id):
+ return True
+ return False
+
+ monitoring_policy = oneandone_conn.add_process(
+ monitoring_policy_id=monitoring_policy_id,
+ processes=monitoring_policy_processes)
+ return monitoring_policy
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _delete_monitoring_policy_process(module, oneandone_conn, monitoring_policy_id, process_id):
+ """
+ Removes a process from a monitoring policy.
+ """
+ try:
+ if module.check_mode:
+ process = oneandone_conn.get_monitoring_policy_process(
+ monitoring_policy_id=monitoring_policy_id,
+ process_id=process_id
+ )
+ if process:
+ return True
+ return False
+
+ monitoring_policy = oneandone_conn.delete_monitoring_policy_process(
+ monitoring_policy_id=monitoring_policy_id,
+ process_id=process_id)
+ return monitoring_policy
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _modify_process(module, oneandone_conn, monitoring_policy_id, process_id, process):
+ """
+ Modifies a monitoring policy process.
+ """
+ try:
+ if module.check_mode:
+ cm_process = oneandone_conn.get_monitoring_policy_process(
+ monitoring_policy_id=monitoring_policy_id,
+ process_id=process_id)
+ if cm_process:
+ return True
+ return False
+
+ monitoring_policy_process = oneandone.client.Process(
+ process=process['process'],
+ alert_if=process['alert_if'],
+ email_notification=process['email_notification']
+ )
+
+ monitoring_policy = oneandone_conn.modify_process(
+ monitoring_policy_id=monitoring_policy_id,
+ process_id=process_id,
+ process=monitoring_policy_process)
+ return monitoring_policy
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _attach_monitoring_policy_server(module, oneandone_conn, monitoring_policy_id, servers):
+ """
+ Attaches servers to a monitoring policy.
+ """
+ try:
+ attach_servers = []
+
+ for _server_id in servers:
+ server_id = get_server(oneandone_conn, _server_id)
+ attach_server = oneandone.client.AttachServer(
+ server_id=server_id
+ )
+ attach_servers.append(attach_server)
+
+ if module.check_mode:
+ if attach_servers:
+ return True
+ return False
+
+ monitoring_policy = oneandone_conn.attach_monitoring_policy_server(
+ monitoring_policy_id=monitoring_policy_id,
+ servers=attach_servers)
+ return monitoring_policy
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _detach_monitoring_policy_server(module, oneandone_conn, monitoring_policy_id, server_id):
+ """
+ Detaches a server from a monitoring policy.
+ """
+ try:
+ if module.check_mode:
+ mp_server = oneandone_conn.get_monitoring_policy_server(
+ monitoring_policy_id=monitoring_policy_id,
+ server_id=server_id)
+ if mp_server:
+ return True
+ return False
+
+ monitoring_policy = oneandone_conn.detach_monitoring_policy_server(
+ monitoring_policy_id=monitoring_policy_id,
+ server_id=server_id)
+ return monitoring_policy
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def update_monitoring_policy(module, oneandone_conn):
+ """
+ Updates a monitoring_policy based on input arguments.
+ Monitoring policy ports, processes and servers can be added/removed to/from
+ a monitoring policy. Monitoring policy name, description, email,
+ thresholds for cpu, ram, disk, transfer and internal_ping
+ can be updated as well.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ try:
+ monitoring_policy_id = module.params.get('monitoring_policy')
+ name = module.params.get('name')
+ description = module.params.get('description')
+ email = module.params.get('email')
+ thresholds = module.params.get('thresholds')
+ add_ports = module.params.get('add_ports')
+ update_ports = module.params.get('update_ports')
+ remove_ports = module.params.get('remove_ports')
+ add_processes = module.params.get('add_processes')
+ update_processes = module.params.get('update_processes')
+ remove_processes = module.params.get('remove_processes')
+ add_servers = module.params.get('add_servers')
+ remove_servers = module.params.get('remove_servers')
+
+ changed = False
+
+ monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy_id, True)
+ if monitoring_policy is None:
+ _check_mode(module, False)
+
+ _monitoring_policy = oneandone.client.MonitoringPolicy(
+ name=name,
+ description=description,
+ email=email
+ )
+
+ _thresholds = None
+
+ if thresholds:
+ threshold_entities = ['cpu', 'ram', 'disk', 'internal_ping', 'transfer']
+
+ _thresholds = []
+ for treshold in thresholds:
+ key = treshold.keys()[0]
+ if key in threshold_entities:
+ _threshold = oneandone.client.Threshold(
+ entity=key,
+ warning_value=treshold[key]['warning']['value'],
+ warning_alert=str(treshold[key]['warning']['alert']).lower(),
+ critical_value=treshold[key]['critical']['value'],
+ critical_alert=str(treshold[key]['critical']['alert']).lower())
+ _thresholds.append(_threshold)
+
+ if name or description or email or thresholds:
+ _check_mode(module, True)
+ monitoring_policy = oneandone_conn.modify_monitoring_policy(
+ monitoring_policy_id=monitoring_policy['id'],
+ monitoring_policy=_monitoring_policy,
+ thresholds=_thresholds)
+ changed = True
+
+ if add_ports:
+ if module.check_mode:
+ _check_mode(module, _add_ports(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ add_ports))
+
+ monitoring_policy = _add_ports(module, oneandone_conn, monitoring_policy['id'], add_ports)
+ changed = True
+
+ if update_ports:
+ chk_changed = False
+ for update_port in update_ports:
+ if module.check_mode:
+ chk_changed |= _modify_port(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ update_port['id'],
+ update_port)
+
+ _modify_port(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ update_port['id'],
+ update_port)
+ monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True)
+ changed = True
+
+ if remove_ports:
+ chk_changed = False
+ for port_id in remove_ports:
+ if module.check_mode:
+ chk_changed |= _delete_monitoring_policy_port(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ port_id)
+
+ _delete_monitoring_policy_port(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ port_id)
+ _check_mode(module, chk_changed)
+ monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True)
+ changed = True
+
+ if add_processes:
+ monitoring_policy = _add_processes(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ add_processes)
+ _check_mode(module, monitoring_policy)
+ changed = True
+
+ if update_processes:
+ chk_changed = False
+ for update_process in update_processes:
+ if module.check_mode:
+ chk_changed |= _modify_process(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ update_process['id'],
+ update_process)
+
+ _modify_process(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ update_process['id'],
+ update_process)
+ _check_mode(module, chk_changed)
+ monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True)
+ changed = True
+
+ if remove_processes:
+ chk_changed = False
+ for process_id in remove_processes:
+ if module.check_mode:
+ chk_changed |= _delete_monitoring_policy_process(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ process_id)
+
+ _delete_monitoring_policy_process(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ process_id)
+ _check_mode(module, chk_changed)
+ monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True)
+ changed = True
+
+ if add_servers:
+ monitoring_policy = _attach_monitoring_policy_server(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ add_servers)
+ _check_mode(module, monitoring_policy)
+ changed = True
+
+ if remove_servers:
+ chk_changed = False
+ for _server_id in remove_servers:
+ server_id = get_server(oneandone_conn, _server_id)
+
+ if module.check_mode:
+ chk_changed |= _detach_monitoring_policy_server(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ server_id)
+
+ _detach_monitoring_policy_server(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ server_id)
+ _check_mode(module, chk_changed)
+ monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True)
+ changed = True
+
+ return (changed, monitoring_policy)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def create_monitoring_policy(module, oneandone_conn):
+ """
+ Creates a new monitoring policy.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ try:
+ name = module.params.get('name')
+ description = module.params.get('description')
+ email = module.params.get('email')
+ agent = module.params.get('agent')
+ thresholds = module.params.get('thresholds')
+ ports = module.params.get('ports')
+ processes = module.params.get('processes')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ _monitoring_policy = oneandone.client.MonitoringPolicy(name,
+ description,
+ email,
+ agent, )
+
+ _monitoring_policy.specs['agent'] = str(_monitoring_policy.specs['agent']).lower()
+
+ threshold_entities = ['cpu', 'ram', 'disk', 'internal_ping', 'transfer']
+
+ _thresholds = []
+ for treshold in thresholds:
+ key = treshold.keys()[0]
+ if key in threshold_entities:
+ _threshold = oneandone.client.Threshold(
+ entity=key,
+ warning_value=treshold[key]['warning']['value'],
+ warning_alert=str(treshold[key]['warning']['alert']).lower(),
+ critical_value=treshold[key]['critical']['value'],
+ critical_alert=str(treshold[key]['critical']['alert']).lower())
+ _thresholds.append(_threshold)
+
+ _ports = []
+ for port in ports:
+ _port = oneandone.client.Port(
+ protocol=port['protocol'],
+ port=port['port'],
+ alert_if=port['alert_if'],
+ email_notification=str(port['email_notification']).lower())
+ _ports.append(_port)
+
+ _processes = []
+ for process in processes:
+ _process = oneandone.client.Process(
+ process=process['process'],
+ alert_if=process['alert_if'],
+ email_notification=str(process['email_notification']).lower())
+ _processes.append(_process)
+
+ _check_mode(module, True)
+ monitoring_policy = oneandone_conn.create_monitoring_policy(
+ monitoring_policy=_monitoring_policy,
+ thresholds=_thresholds,
+ ports=_ports,
+ processes=_processes
+ )
+
+ if wait:
+ wait_for_resource_creation_completion(
+ oneandone_conn,
+ OneAndOneResources.monitoring_policy,
+ monitoring_policy['id'],
+ wait_timeout,
+ wait_interval)
+
+ changed = True if monitoring_policy else False
+
+ _check_mode(module, False)
+
+ return (changed, monitoring_policy)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def remove_monitoring_policy(module, oneandone_conn):
+ """
+ Removes a monitoring policy.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ try:
+ mp_id = module.params.get('name')
+ monitoring_policy_id = get_monitoring_policy(oneandone_conn, mp_id)
+ if module.check_mode:
+ if monitoring_policy_id is None:
+ _check_mode(module, False)
+ _check_mode(module, True)
+ monitoring_policy = oneandone_conn.delete_monitoring_policy(monitoring_policy_id)
+
+ changed = True if monitoring_policy else False
+
+ return (changed, {
+ 'id': monitoring_policy['id'],
+ 'name': monitoring_policy['name']
+ })
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ auth_token=dict(
+ type='str', no_log=True,
+ default=os.environ.get('ONEANDONE_AUTH_TOKEN')),
+ api_url=dict(
+ type='str',
+ default=os.environ.get('ONEANDONE_API_URL')),
+ name=dict(type='str'),
+ monitoring_policy=dict(type='str'),
+ agent=dict(type='str'),
+ email=dict(type='str'),
+ description=dict(type='str'),
+ thresholds=dict(type='list', default=[]),
+ ports=dict(type='list', default=[]),
+ processes=dict(type='list', default=[]),
+ add_ports=dict(type='list', default=[]),
+ update_ports=dict(type='list', default=[]),
+ remove_ports=dict(type='list', default=[]),
+ add_processes=dict(type='list', default=[]),
+ update_processes=dict(type='list', default=[]),
+ remove_processes=dict(type='list', default=[]),
+ add_servers=dict(type='list', default=[]),
+ remove_servers=dict(type='list', default=[]),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ wait_interval=dict(type='int', default=5),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'update']),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_ONEANDONE_SDK:
+ module.fail_json(msg='1and1 required for this module')
+
+ if not module.params.get('auth_token'):
+ module.fail_json(
+ msg='auth_token parameter is required.')
+
+ if not module.params.get('api_url'):
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'))
+ else:
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'), api_url=module.params.get('api_url'))
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('name'):
+ module.fail_json(
+ msg="'name' parameter is required to delete a monitoring policy.")
+ try:
+ (changed, monitoring_policy) = remove_monitoring_policy(module, oneandone_conn)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+ elif state == 'update':
+ if not module.params.get('monitoring_policy'):
+ module.fail_json(
+ msg="'monitoring_policy' parameter is required to update a monitoring policy.")
+ try:
+ (changed, monitoring_policy) = update_monitoring_policy(module, oneandone_conn)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ elif state == 'present':
+ for param in ('name', 'agent', 'email', 'thresholds', 'ports', 'processes'):
+ if not module.params.get(param):
+ module.fail_json(
+ msg="%s parameter is required for a new monitoring policy." % param)
+ try:
+ (changed, monitoring_policy) = create_monitoring_policy(module, oneandone_conn)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ module.exit_json(changed=changed, monitoring_policy=monitoring_policy)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_private_network.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_private_network.py
new file mode 100644
index 00000000..7eae6ea3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_private_network.py
@@ -0,0 +1,454 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneandone_private_network
+short_description: Configure 1&1 private networking.
+description:
+ - Create, remove, reconfigure, update a private network.
+ This module has a dependency on 1and1 >= 1.0
+options:
+ state:
+ description:
+ - Define a network's state to create, remove, or update.
+ type: str
+ required: false
+ default: 'present'
+ choices: [ "present", "absent", "update" ]
+ auth_token:
+ description:
+ - Authenticating API token provided by 1&1.
+ type: str
+ private_network:
+ description:
+ - The identifier (id or name) of the network used with update state.
+ type: str
+ api_url:
+ description:
+ - Custom API URL. Overrides the
+ ONEANDONE_API_URL environment variable.
+ type: str
+ required: false
+ name:
+ description:
+ - Private network name used with present state. Used as identifier (id or name) when used with absent state.
+ type: str
+ description:
+ description:
+ - Set a description for the network.
+ type: str
+ datacenter:
+ description:
+ - The identifier of the datacenter where the private network will be created
+ type: str
+ choices: [US, ES, DE, GB]
+ network_address:
+ description:
+ - Set a private network space, i.e. 192.168.1.0
+ type: str
+ subnet_mask:
+ description:
+ - Set the netmask for the private network, i.e. 255.255.255.0
+ type: str
+ add_members:
+ description:
+ - List of server identifiers (name or id) to be added to the private network.
+ type: list
+ remove_members:
+ description:
+ - List of server identifiers (name or id) to be removed from the private network.
+ type: list
+ wait:
+ description:
+ - wait for the instance to be in state 'running' before returning
+ required: false
+ default: "yes"
+ type: bool
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ wait_interval:
+ description:
+ - Defines the number of seconds to wait when using the _wait_for methods
+ type: int
+ default: 5
+
+requirements:
+ - "1and1"
+ - "python >= 2.6"
+
+author:
+ - Amel Ajdinovic (@aajdinov)
+ - Ethan Devenport (@edevenport)
+'''
+
+EXAMPLES = '''
+- name: Create a private network
+ community.general.oneandone_private_network:
+ auth_token: oneandone_private_api_key
+ name: backup_network
+ description: Testing creation of a private network with ansible
+ network_address: 70.35.193.100
+ subnet_mask: 255.0.0.0
+ datacenter: US
+
+- name: Destroy a private network
+ community.general.oneandone_private_network:
+ auth_token: oneandone_private_api_key
+ state: absent
+ name: backup_network
+
+- name: Modify the private network
+ community.general.oneandone_private_network:
+ auth_token: oneandone_private_api_key
+ state: update
+ private_network: backup_network
+ network_address: 192.168.2.0
+ subnet_mask: 255.255.255.0
+
+- name: Add members to the private network
+ community.general.oneandone_private_network:
+ auth_token: oneandone_private_api_key
+ state: update
+ private_network: backup_network
+ add_members:
+ - server identifier (id or name)
+
+- name: Remove members from the private network
+ community.general.oneandone_private_network:
+ auth_token: oneandone_private_api_key
+ state: update
+ private_network: backup_network
+ remove_members:
+ - server identifier (id or name)
+'''
+
+RETURN = '''
+private_network:
+ description: Information about the private network.
+ type: dict
+ sample: '{"name": "backup_network", "id": "55726DEDA20C99CF6F2AF8F18CAC9963"}'
+ returned: always
+'''
+
+import os
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.oneandone import (
+ get_private_network,
+ get_server,
+ get_datacenter,
+ OneAndOneResources,
+ wait_for_resource_creation_completion,
+ wait_for_resource_deletion_completion
+)
+
+HAS_ONEANDONE_SDK = True
+
+try:
+ import oneandone.client
+except ImportError:
+ HAS_ONEANDONE_SDK = False
+
+DATACENTERS = ['US', 'ES', 'DE', 'GB']
+
+
+def _check_mode(module, result):
+ if module.check_mode:
+ module.exit_json(
+ changed=result
+ )
+
+
+def _add_servers(module, oneandone_conn, name, members):
+ try:
+ private_network_id = get_private_network(oneandone_conn, name)
+
+ if module.check_mode:
+ if private_network_id and members:
+ return True
+ return False
+
+ network = oneandone_conn.attach_private_network_servers(
+ private_network_id=private_network_id,
+ server_ids=members)
+
+ return network
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def _remove_member(module, oneandone_conn, name, member_id):
+ try:
+ private_network_id = get_private_network(oneandone_conn, name)
+
+ if module.check_mode:
+ if private_network_id:
+ network_member = oneandone_conn.get_private_network_server(
+ private_network_id=private_network_id,
+ server_id=member_id)
+ if network_member:
+ return True
+ return False
+
+ network = oneandone_conn.remove_private_network_server(
+ private_network_id=name,
+ server_id=member_id)
+
+ return network
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def create_network(module, oneandone_conn):
+ """
+ Create new private network
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+
+ Returns a dictionary containing a 'changed' attribute indicating whether
+ any network was added.
+ """
+ name = module.params.get('name')
+ description = module.params.get('description')
+ network_address = module.params.get('network_address')
+ subnet_mask = module.params.get('subnet_mask')
+ datacenter = module.params.get('datacenter')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ if datacenter is not None:
+ datacenter_id = get_datacenter(oneandone_conn, datacenter)
+ if datacenter_id is None:
+ module.fail_json(
+ msg='datacenter %s not found.' % datacenter)
+
+ try:
+ _check_mode(module, True)
+ network = oneandone_conn.create_private_network(
+ private_network=oneandone.client.PrivateNetwork(
+ name=name,
+ description=description,
+ network_address=network_address,
+ subnet_mask=subnet_mask,
+ datacenter_id=datacenter_id
+ ))
+
+ if wait:
+ wait_for_resource_creation_completion(
+ oneandone_conn,
+ OneAndOneResources.private_network,
+ network['id'],
+ wait_timeout,
+ wait_interval)
+ network = get_private_network(oneandone_conn,
+ network['id'],
+ True)
+
+ changed = True if network else False
+
+ _check_mode(module, False)
+
+ return (changed, network)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def update_network(module, oneandone_conn):
+ """
+ Modifies a private network.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ try:
+ _private_network_id = module.params.get('private_network')
+ _name = module.params.get('name')
+ _description = module.params.get('description')
+ _network_address = module.params.get('network_address')
+ _subnet_mask = module.params.get('subnet_mask')
+ _add_members = module.params.get('add_members')
+ _remove_members = module.params.get('remove_members')
+
+ changed = False
+
+ private_network = get_private_network(oneandone_conn,
+ _private_network_id,
+ True)
+ if private_network is None:
+ _check_mode(module, False)
+
+ if _name or _description or _network_address or _subnet_mask:
+ _check_mode(module, True)
+ private_network = oneandone_conn.modify_private_network(
+ private_network_id=private_network['id'],
+ name=_name,
+ description=_description,
+ network_address=_network_address,
+ subnet_mask=_subnet_mask)
+ changed = True
+
+ if _add_members:
+ instances = []
+
+ for member in _add_members:
+ instance_id = get_server(oneandone_conn, member)
+ instance_obj = oneandone.client.AttachServer(server_id=instance_id)
+
+ instances.extend([instance_obj])
+ private_network = _add_servers(module, oneandone_conn, private_network['id'], instances)
+ _check_mode(module, private_network)
+ changed = True
+
+ if _remove_members:
+ chk_changed = False
+ for member in _remove_members:
+ instance = get_server(oneandone_conn, member, True)
+
+ if module.check_mode:
+ chk_changed |= _remove_member(module,
+ oneandone_conn,
+ private_network['id'],
+ instance['id'])
+ _check_mode(module, instance and chk_changed)
+
+ _remove_member(module,
+ oneandone_conn,
+ private_network['id'],
+ instance['id'])
+ private_network = get_private_network(oneandone_conn,
+ private_network['id'],
+ True)
+ changed = True
+
+ return (changed, private_network)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def remove_network(module, oneandone_conn):
+ """
+ Removes a private network.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object.
+ """
+ try:
+ pn_id = module.params.get('name')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ private_network_id = get_private_network(oneandone_conn, pn_id)
+ if module.check_mode:
+ if private_network_id is None:
+ _check_mode(module, False)
+ _check_mode(module, True)
+ private_network = oneandone_conn.delete_private_network(private_network_id)
+ wait_for_resource_deletion_completion(oneandone_conn,
+ OneAndOneResources.private_network,
+ private_network['id'],
+ wait_timeout,
+ wait_interval)
+
+ changed = True if private_network else False
+
+ return (changed, {
+ 'id': private_network['id'],
+ 'name': private_network['name']
+ })
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ auth_token=dict(
+ type='str', no_log=True,
+ default=os.environ.get('ONEANDONE_AUTH_TOKEN')),
+ api_url=dict(
+ type='str',
+ default=os.environ.get('ONEANDONE_API_URL')),
+ private_network=dict(type='str'),
+ name=dict(type='str'),
+ description=dict(type='str'),
+ network_address=dict(type='str'),
+ subnet_mask=dict(type='str'),
+ add_members=dict(type='list', default=[]),
+ remove_members=dict(type='list', default=[]),
+ datacenter=dict(
+ choices=DATACENTERS),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ wait_interval=dict(type='int', default=5),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'update']),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_ONEANDONE_SDK:
+ module.fail_json(msg='1and1 required for this module')
+
+ if not module.params.get('auth_token'):
+ module.fail_json(
+ msg='auth_token parameter is required.')
+
+ if not module.params.get('api_url'):
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'))
+ else:
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'), api_url=module.params.get('api_url'))
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('name'):
+ module.fail_json(
+ msg="'name' parameter is required for deleting a network.")
+ try:
+ (changed, private_network) = remove_network(module, oneandone_conn)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+ elif state == 'update':
+ if not module.params.get('private_network'):
+ module.fail_json(
+ msg="'private_network' parameter is required for updating a network.")
+ try:
+ (changed, private_network) = update_network(module, oneandone_conn)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+ elif state == 'present':
+ if not module.params.get('name'):
+ module.fail_json(
+ msg="'name' parameter is required for new networks.")
+ try:
+ (changed, private_network) = create_network(module, oneandone_conn)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ module.exit_json(changed=changed, private_network=private_network)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_public_ip.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_public_ip.py
new file mode 100644
index 00000000..edefbc93
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_public_ip.py
@@ -0,0 +1,341 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneandone_public_ip
+short_description: Configure 1&1 public IPs.
+description:
+ - Create, update, and remove public IPs.
+ This module has a dependency on 1and1 >= 1.0
+options:
+ state:
+ description:
+ - Define a public ip state to create, remove, or update.
+ type: str
+ required: false
+ default: 'present'
+ choices: [ "present", "absent", "update" ]
+ auth_token:
+ description:
+ - Authenticating API token provided by 1&1.
+ type: str
+ api_url:
+ description:
+ - Custom API URL. Overrides the
+ ONEANDONE_API_URL environment variable.
+ type: str
+ required: false
+ reverse_dns:
+ description:
+ - Reverse DNS name. maxLength=256
+ type: str
+ required: false
+ datacenter:
+ description:
+ - ID of the datacenter where the IP will be created (only for unassigned IPs).
+ type: str
+ choices: [US, ES, DE, GB]
+ default: US
+ required: false
+ type:
+ description:
+ - Type of IP. Currently, only IPV4 is available.
+ type: str
+ choices: ["IPV4", "IPV6"]
+ default: 'IPV4'
+ required: false
+ public_ip_id:
+ description:
+ - The ID of the public IP used with update and delete states.
+ type: str
+ wait:
+ description:
+ - wait for the instance to be in state 'running' before returning
+ required: false
+ default: "yes"
+ type: bool
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ wait_interval:
+ description:
+ - Defines the number of seconds to wait when using the _wait_for methods
+ type: int
+ default: 5
+
+requirements:
+ - "1and1"
+ - "python >= 2.6"
+
+author:
+ - Amel Ajdinovic (@aajdinov)
+ - Ethan Devenport (@edevenport)
+'''
+
+EXAMPLES = '''
+- name: Create a public IP
+ community.general.oneandone_public_ip:
+ auth_token: oneandone_private_api_key
+ reverse_dns: example.com
+ datacenter: US
+ type: IPV4
+
+- name: Update a public IP
+ community.general.oneandone_public_ip:
+ auth_token: oneandone_private_api_key
+ public_ip_id: public ip id
+ reverse_dns: secondexample.com
+ state: update
+
+- name: Delete a public IP
+ community.general.oneandone_public_ip:
+ auth_token: oneandone_private_api_key
+ public_ip_id: public ip id
+ state: absent
+'''
+
+RETURN = '''
+public_ip:
+ description: Information about the public ip that was processed
+ type: dict
+ sample: '{"id": "F77CC589EBC120905B4F4719217BFF6D", "ip": "10.5.132.106"}'
+ returned: always
+'''
+
+import os
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.oneandone import (
+ get_datacenter,
+ get_public_ip,
+ OneAndOneResources,
+ wait_for_resource_creation_completion
+)
+
+HAS_ONEANDONE_SDK = True
+
+try:
+ import oneandone.client
+except ImportError:
+ HAS_ONEANDONE_SDK = False
+
+DATACENTERS = ['US', 'ES', 'DE', 'GB']
+
+TYPES = ['IPV4', 'IPV6']
+
+
+def _check_mode(module, result):
+ if module.check_mode:
+ module.exit_json(
+ changed=result
+ )
+
+
+def create_public_ip(module, oneandone_conn):
+ """
+ Create new public IP
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+
+ Returns a dictionary containing a 'changed' attribute indicating whether
+ any public IP was added.
+ """
+ reverse_dns = module.params.get('reverse_dns')
+ datacenter = module.params.get('datacenter')
+ ip_type = module.params.get('type')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ if datacenter is not None:
+ datacenter_id = get_datacenter(oneandone_conn, datacenter)
+ if datacenter_id is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='datacenter %s not found.' % datacenter)
+
+ try:
+ _check_mode(module, True)
+ public_ip = oneandone_conn.create_public_ip(
+ reverse_dns=reverse_dns,
+ ip_type=ip_type,
+ datacenter_id=datacenter_id)
+
+ if wait:
+ wait_for_resource_creation_completion(oneandone_conn,
+ OneAndOneResources.public_ip,
+ public_ip['id'],
+ wait_timeout,
+ wait_interval)
+ public_ip = oneandone_conn.get_public_ip(public_ip['id'])
+
+ changed = True if public_ip else False
+
+ return (changed, public_ip)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def update_public_ip(module, oneandone_conn):
+ """
+ Update a public IP
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+
+ Returns a dictionary containing a 'changed' attribute indicating whether
+ any public IP was changed.
+ """
+ reverse_dns = module.params.get('reverse_dns')
+ public_ip_id = module.params.get('public_ip_id')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ public_ip = get_public_ip(oneandone_conn, public_ip_id, True)
+ if public_ip is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='public IP %s not found.' % public_ip_id)
+
+ try:
+ _check_mode(module, True)
+ public_ip = oneandone_conn.modify_public_ip(
+ ip_id=public_ip['id'],
+ reverse_dns=reverse_dns)
+
+ if wait:
+ wait_for_resource_creation_completion(oneandone_conn,
+ OneAndOneResources.public_ip,
+ public_ip['id'],
+ wait_timeout,
+ wait_interval)
+ public_ip = oneandone_conn.get_public_ip(public_ip['id'])
+
+ changed = True if public_ip else False
+
+ return (changed, public_ip)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def delete_public_ip(module, oneandone_conn):
+ """
+ Delete a public IP
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+
+ Returns a dictionary containing a 'changed' attribute indicating whether
+ any public IP was deleted.
+ """
+ public_ip_id = module.params.get('public_ip_id')
+
+ public_ip = get_public_ip(oneandone_conn, public_ip_id, True)
+ if public_ip is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='public IP %s not found.' % public_ip_id)
+
+ try:
+ _check_mode(module, True)
+ deleted_public_ip = oneandone_conn.delete_public_ip(
+ ip_id=public_ip['id'])
+
+ changed = True if deleted_public_ip else False
+
+ return (changed, {
+ 'id': public_ip['id']
+ })
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ auth_token=dict(
+ type='str', no_log=True,
+ default=os.environ.get('ONEANDONE_AUTH_TOKEN')),
+ api_url=dict(
+ type='str',
+ default=os.environ.get('ONEANDONE_API_URL')),
+ public_ip_id=dict(type='str'),
+ reverse_dns=dict(type='str'),
+ datacenter=dict(
+ choices=DATACENTERS,
+ default='US'),
+ type=dict(
+ choices=TYPES,
+ default='IPV4'),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ wait_interval=dict(type='int', default=5),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'update']),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_ONEANDONE_SDK:
+ module.fail_json(msg='1and1 required for this module')
+
+ if not module.params.get('auth_token'):
+ module.fail_json(
+ msg='auth_token parameter is required.')
+
+ if not module.params.get('api_url'):
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'))
+ else:
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'), api_url=module.params.get('api_url'))
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('public_ip_id'):
+ module.fail_json(
+ msg="'public_ip_id' parameter is required to delete a public ip.")
+ try:
+ (changed, public_ip) = delete_public_ip(module, oneandone_conn)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+ elif state == 'update':
+ if not module.params.get('public_ip_id'):
+ module.fail_json(
+ msg="'public_ip_id' parameter is required to update a public ip.")
+ try:
+ (changed, public_ip) = update_public_ip(module, oneandone_conn)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ elif state == 'present':
+ try:
+ (changed, public_ip) = create_public_ip(module, oneandone_conn)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ module.exit_json(changed=changed, public_ip=public_ip)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_server.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_server.py
new file mode 100644
index 00000000..1e6caab5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneandone_server.py
@@ -0,0 +1,705 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneandone_server
+short_description: Create, destroy, start, stop, and reboot a 1&1 Host server.
+description:
+ - Create, destroy, update, start, stop, and reboot a 1&1 Host server.
+ When the server is created it can optionally wait for it to be 'running' before returning.
+options:
+ state:
+ description:
+ - Define a server's state to create, remove, start or stop it.
+ type: str
+ default: present
+ choices: [ "present", "absent", "running", "stopped" ]
+ auth_token:
+ description:
+ - Authenticating API token provided by 1&1. Overrides the
+ ONEANDONE_AUTH_TOKEN environment variable.
+ type: str
+ api_url:
+ description:
+ - Custom API URL. Overrides the
+ ONEANDONE_API_URL environment variable.
+ type: str
+ datacenter:
+ description:
+ - The datacenter location.
+ type: str
+ default: US
+ choices: [ "US", "ES", "DE", "GB" ]
+ hostname:
+ description:
+ - The hostname or ID of the server. Only used when state is 'present'.
+ type: str
+ description:
+ description:
+ - The description of the server.
+ type: str
+ appliance:
+ description:
+ - The operating system name or ID for the server.
+ It is required only for 'present' state.
+ type: str
+ fixed_instance_size:
+ description:
+ - The instance size name or ID of the server.
+ It is required only for 'present' state, and it is mutually exclusive with
+ vcore, cores_per_processor, ram, and hdds parameters.
+ - 'The available choices are: C(S), C(M), C(L), C(XL), C(XXL), C(3XL), C(4XL), C(5XL)'
+ type: str
+ vcore:
+ description:
+ - The total number of processors.
+ It must be provided with cores_per_processor, ram, and hdds parameters.
+ type: int
+ cores_per_processor:
+ description:
+ - The number of cores per processor.
+ It must be provided with vcore, ram, and hdds parameters.
+ type: int
+ ram:
+ description:
+ - The amount of RAM memory.
+ It must be provided with with vcore, cores_per_processor, and hdds parameters.
+ type: float
+ hdds:
+ description:
+ - A list of hard disks with nested "size" and "is_main" properties.
+ It must be provided with vcore, cores_per_processor, and ram parameters.
+ type: list
+ private_network:
+ description:
+ - The private network name or ID.
+ type: str
+ firewall_policy:
+ description:
+ - The firewall policy name or ID.
+ type: str
+ load_balancer:
+ description:
+ - The load balancer name or ID.
+ type: str
+ monitoring_policy:
+ description:
+ - The monitoring policy name or ID.
+ type: str
+ server:
+ description:
+ - Server identifier (ID or hostname). It is required for all states except 'running' and 'present'.
+ type: str
+ count:
+ description:
+ - The number of servers to create.
+ type: int
+ default: 1
+ ssh_key:
+ description:
+ - User's public SSH key (contents, not path).
+ type: raw
+ server_type:
+ description:
+ - The type of server to be built.
+ type: str
+ default: "cloud"
+ choices: [ "cloud", "baremetal", "k8s_node" ]
+ wait:
+ description:
+ - Wait for the server to be in state 'running' before returning.
+ Also used for delete operation (set to 'false' if you don't want to wait
+ for each individual server to be deleted before moving on with
+ other tasks.)
+ type: bool
+ default: 'yes'
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ wait_interval:
+ description:
+ - Defines the number of seconds to wait when using the wait_for methods
+ type: int
+ default: 5
+ auto_increment:
+ description:
+ - When creating multiple servers at once, whether to differentiate
+ hostnames by appending a count after them or substituting the count
+ where there is a %02d or %03d in the hostname string.
+ type: bool
+ default: 'yes'
+
+requirements:
+ - "1and1"
+ - "python >= 2.6"
+
+author:
+ - "Amel Ajdinovic (@aajdinov)"
+ - "Ethan Devenport (@edevenport)"
+
+'''
+
+EXAMPLES = '''
+- name: Create three servers and enumerate their names
+ community.general.oneandone_server:
+ auth_token: oneandone_private_api_key
+ hostname: node%02d
+ fixed_instance_size: XL
+ datacenter: US
+ appliance: C5A349786169F140BCBC335675014C08
+ auto_increment: true
+ count: 3
+
+- name: Create three servers, passing in an ssh_key
+ community.general.oneandone_server:
+ auth_token: oneandone_private_api_key
+ hostname: node%02d
+ vcore: 2
+ cores_per_processor: 4
+ ram: 8.0
+ hdds:
+ - size: 50
+ is_main: false
+ datacenter: ES
+ appliance: C5A349786169F140BCBC335675014C08
+ count: 3
+ wait: yes
+ wait_timeout: 600
+ wait_interval: 10
+ ssh_key: SSH_PUBLIC_KEY
+
+- name: Removing server
+ community.general.oneandone_server:
+ auth_token: oneandone_private_api_key
+ state: absent
+ server: 'node01'
+
+- name: Starting server
+ community.general.oneandone_server:
+ auth_token: oneandone_private_api_key
+ state: running
+ server: 'node01'
+
+- name: Stopping server
+ community.general.oneandone_server:
+ auth_token: oneandone_private_api_key
+ state: stopped
+ server: 'node01'
+'''
+
+RETURN = '''
+servers:
+ description: Information about each server that was processed
+ type: list
+ sample: '[{"hostname": "my-server", "id": "server-id"}]'
+ returned: always
+'''
+
+import os
+import time
+from ansible.module_utils.six.moves import xrange
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.oneandone import (
+ get_datacenter,
+ get_fixed_instance_size,
+ get_appliance,
+ get_private_network,
+ get_monitoring_policy,
+ get_firewall_policy,
+ get_load_balancer,
+ get_server,
+ OneAndOneResources,
+ wait_for_resource_creation_completion,
+ wait_for_resource_deletion_completion
+)
+
+HAS_ONEANDONE_SDK = True
+
+try:
+ import oneandone.client
+except ImportError:
+ HAS_ONEANDONE_SDK = False
+
+DATACENTERS = ['US', 'ES', 'DE', 'GB']
+
+ONEANDONE_SERVER_STATES = (
+ 'DEPLOYING',
+ 'POWERED_OFF',
+ 'POWERED_ON',
+ 'POWERING_ON',
+ 'POWERING_OFF',
+)
+
+
+def _check_mode(module, result):
+ if module.check_mode:
+ module.exit_json(
+ changed=result
+ )
+
+
+def _create_server(module, oneandone_conn, hostname, description,
+ fixed_instance_size_id, vcore, cores_per_processor, ram,
+ hdds, datacenter_id, appliance_id, ssh_key,
+ private_network_id, firewall_policy_id, load_balancer_id,
+ monitoring_policy_id, server_type, wait, wait_timeout,
+ wait_interval):
+
+ try:
+ existing_server = get_server(oneandone_conn, hostname)
+
+ if existing_server:
+ if module.check_mode:
+ return False
+ return None
+
+ if module.check_mode:
+ return True
+
+ server = oneandone_conn.create_server(
+ oneandone.client.Server(
+ name=hostname,
+ description=description,
+ fixed_instance_size_id=fixed_instance_size_id,
+ vcore=vcore,
+ cores_per_processor=cores_per_processor,
+ ram=ram,
+ appliance_id=appliance_id,
+ datacenter_id=datacenter_id,
+ rsa_key=ssh_key,
+ private_network_id=private_network_id,
+ firewall_policy_id=firewall_policy_id,
+ load_balancer_id=load_balancer_id,
+ monitoring_policy_id=monitoring_policy_id,
+ server_type=server_type,), hdds)
+
+ if wait:
+ wait_for_resource_creation_completion(
+ oneandone_conn,
+ OneAndOneResources.server,
+ server['id'],
+ wait_timeout,
+ wait_interval)
+ server = oneandone_conn.get_server(server['id']) # refresh
+
+ return server
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _insert_network_data(server):
+ for addr_data in server['ips']:
+ if addr_data['type'] == 'IPV6':
+ server['public_ipv6'] = addr_data['ip']
+ elif addr_data['type'] == 'IPV4':
+ server['public_ipv4'] = addr_data['ip']
+ return server
+
+
+def create_server(module, oneandone_conn):
+ """
+ Create new server
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+
+ Returns a dictionary containing a 'changed' attribute indicating whether
+ any server was added, and a 'servers' attribute with the list of the
+ created servers' hostname, id and ip addresses.
+ """
+ hostname = module.params.get('hostname')
+ description = module.params.get('description')
+ auto_increment = module.params.get('auto_increment')
+ count = module.params.get('count')
+ fixed_instance_size = module.params.get('fixed_instance_size')
+ vcore = module.params.get('vcore')
+ cores_per_processor = module.params.get('cores_per_processor')
+ ram = module.params.get('ram')
+ hdds = module.params.get('hdds')
+ datacenter = module.params.get('datacenter')
+ appliance = module.params.get('appliance')
+ ssh_key = module.params.get('ssh_key')
+ private_network = module.params.get('private_network')
+ monitoring_policy = module.params.get('monitoring_policy')
+ firewall_policy = module.params.get('firewall_policy')
+ load_balancer = module.params.get('load_balancer')
+ server_type = module.params.get('server_type')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ datacenter_id = get_datacenter(oneandone_conn, datacenter)
+ if datacenter_id is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='datacenter %s not found.' % datacenter)
+
+ fixed_instance_size_id = None
+ if fixed_instance_size:
+ fixed_instance_size_id = get_fixed_instance_size(
+ oneandone_conn,
+ fixed_instance_size)
+ if fixed_instance_size_id is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='fixed_instance_size %s not found.' % fixed_instance_size)
+
+ appliance_id = get_appliance(oneandone_conn, appliance)
+ if appliance_id is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='appliance %s not found.' % appliance)
+
+ private_network_id = None
+ if private_network:
+ private_network_id = get_private_network(
+ oneandone_conn,
+ private_network)
+ if private_network_id is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='private network %s not found.' % private_network)
+
+ monitoring_policy_id = None
+ if monitoring_policy:
+ monitoring_policy_id = get_monitoring_policy(
+ oneandone_conn,
+ monitoring_policy)
+ if monitoring_policy_id is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='monitoring policy %s not found.' % monitoring_policy)
+
+ firewall_policy_id = None
+ if firewall_policy:
+ firewall_policy_id = get_firewall_policy(
+ oneandone_conn,
+ firewall_policy)
+ if firewall_policy_id is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='firewall policy %s not found.' % firewall_policy)
+
+ load_balancer_id = None
+ if load_balancer:
+ load_balancer_id = get_load_balancer(
+ oneandone_conn,
+ load_balancer)
+ if load_balancer_id is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='load balancer %s not found.' % load_balancer)
+
+ if auto_increment:
+ hostnames = _auto_increment_hostname(count, hostname)
+ descriptions = _auto_increment_description(count, description)
+ else:
+ hostnames = [hostname] * count
+ descriptions = [description] * count
+
+ hdd_objs = []
+ if hdds:
+ for hdd in hdds:
+ hdd_objs.append(oneandone.client.Hdd(
+ size=hdd['size'],
+ is_main=hdd['is_main']
+ ))
+
+ servers = []
+ for index, name in enumerate(hostnames):
+ server = _create_server(
+ module=module,
+ oneandone_conn=oneandone_conn,
+ hostname=name,
+ description=descriptions[index],
+ fixed_instance_size_id=fixed_instance_size_id,
+ vcore=vcore,
+ cores_per_processor=cores_per_processor,
+ ram=ram,
+ hdds=hdd_objs,
+ datacenter_id=datacenter_id,
+ appliance_id=appliance_id,
+ ssh_key=ssh_key,
+ private_network_id=private_network_id,
+ monitoring_policy_id=monitoring_policy_id,
+ firewall_policy_id=firewall_policy_id,
+ load_balancer_id=load_balancer_id,
+ server_type=server_type,
+ wait=wait,
+ wait_timeout=wait_timeout,
+ wait_interval=wait_interval)
+ if server:
+ servers.append(server)
+
+ changed = False
+
+ if servers:
+ for server in servers:
+ if server:
+ _check_mode(module, True)
+ _check_mode(module, False)
+ servers = [_insert_network_data(_server) for _server in servers]
+ changed = True
+
+ _check_mode(module, False)
+
+ return (changed, servers)
+
+
+def remove_server(module, oneandone_conn):
+ """
+ Removes a server.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object.
+
+ Returns a dictionary containing a 'changed' attribute indicating whether
+ the server was removed, and a 'removed_server' attribute with
+ the removed server's hostname and id.
+ """
+ server_id = module.params.get('server')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ changed = False
+ removed_server = None
+
+ server = get_server(oneandone_conn, server_id, True)
+ if server:
+ _check_mode(module, True)
+ try:
+ oneandone_conn.delete_server(server_id=server['id'])
+ if wait:
+ wait_for_resource_deletion_completion(oneandone_conn,
+ OneAndOneResources.server,
+ server['id'],
+ wait_timeout,
+ wait_interval)
+ changed = True
+ except Exception as ex:
+ module.fail_json(
+ msg="failed to terminate the server: %s" % str(ex))
+
+ removed_server = {
+ 'id': server['id'],
+ 'hostname': server['name']
+ }
+ _check_mode(module, False)
+
+ return (changed, removed_server)
+
+
+def startstop_server(module, oneandone_conn):
+ """
+ Starts or Stops a server.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object.
+
+ Returns a dictionary with a 'changed' attribute indicating whether
+ anything has changed for the server as a result of this function
+ being run, and a 'server' attribute with basic information for
+ the server.
+ """
+ state = module.params.get('state')
+ server_id = module.params.get('server')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ changed = False
+
+ # Resolve server
+ server = get_server(oneandone_conn, server_id, True)
+ if server:
+ # Attempt to change the server state, only if it's not already there
+ # or on its way.
+ try:
+ if state == 'stopped' and server['status']['state'] == 'POWERED_ON':
+ _check_mode(module, True)
+ oneandone_conn.modify_server_status(
+ server_id=server['id'],
+ action='POWER_OFF',
+ method='SOFTWARE')
+ elif state == 'running' and server['status']['state'] == 'POWERED_OFF':
+ _check_mode(module, True)
+ oneandone_conn.modify_server_status(
+ server_id=server['id'],
+ action='POWER_ON',
+ method='SOFTWARE')
+ except Exception as ex:
+ module.fail_json(
+ msg="failed to set server %s to state %s: %s" % (
+ server_id, state, str(ex)))
+
+ _check_mode(module, False)
+
+ # Make sure the server has reached the desired state
+ if wait:
+ operation_completed = False
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(wait_interval)
+ server = oneandone_conn.get_server(server['id']) # refresh
+ server_state = server['status']['state']
+ if state == 'stopped' and server_state == 'POWERED_OFF':
+ operation_completed = True
+ break
+ if state == 'running' and server_state == 'POWERED_ON':
+ operation_completed = True
+ break
+ if not operation_completed:
+ module.fail_json(
+ msg="Timeout waiting for server %s to get to state %s" % (
+ server_id, state))
+
+ changed = True
+ server = _insert_network_data(server)
+
+ _check_mode(module, False)
+
+ return (changed, server)
+
+
+def _auto_increment_hostname(count, hostname):
+ """
+ Allow a custom incremental count in the hostname when defined with the
+ string formatting (%) operator. Otherwise, increment using name-01,
+ name-02, name-03, and so forth.
+ """
+ if '%' not in hostname:
+ hostname = "%s-%%01d" % hostname
+
+ return [
+ hostname % i
+ for i in xrange(1, count + 1)
+ ]
+
+
+def _auto_increment_description(count, description):
+ """
+ Allow the incremental count in the description when defined with the
+ string formatting (%) operator. Otherwise, repeat the same description.
+ """
+ if '%' in description:
+ return [
+ description % i
+ for i in xrange(1, count + 1)
+ ]
+ else:
+ return [description] * count
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ auth_token=dict(
+ type='str',
+ default=os.environ.get('ONEANDONE_AUTH_TOKEN'),
+ no_log=True),
+ api_url=dict(
+ type='str',
+ default=os.environ.get('ONEANDONE_API_URL')),
+ hostname=dict(type='str'),
+ description=dict(type='str'),
+ appliance=dict(type='str'),
+ fixed_instance_size=dict(type='str'),
+ vcore=dict(type='int'),
+ cores_per_processor=dict(type='int'),
+ ram=dict(type='float'),
+ hdds=dict(type='list'),
+ count=dict(type='int', default=1),
+ ssh_key=dict(type='raw'),
+ auto_increment=dict(type='bool', default=True),
+ server=dict(type='str'),
+ datacenter=dict(
+ choices=DATACENTERS,
+ default='US'),
+ private_network=dict(type='str'),
+ firewall_policy=dict(type='str'),
+ load_balancer=dict(type='str'),
+ monitoring_policy=dict(type='str'),
+ server_type=dict(type='str', default='cloud', choices=['cloud', 'baremetal', 'k8s_node']),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ wait_interval=dict(type='int', default=5),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'running', 'stopped']),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=(['fixed_instance_size', 'vcore'], ['fixed_instance_size', 'cores_per_processor'],
+ ['fixed_instance_size', 'ram'], ['fixed_instance_size', 'hdds'],),
+ required_together=(['vcore', 'cores_per_processor', 'ram', 'hdds'],)
+ )
+
+ if not HAS_ONEANDONE_SDK:
+ module.fail_json(msg='1and1 required for this module')
+
+ if not module.params.get('auth_token'):
+ module.fail_json(
+ msg='The "auth_token" parameter or ' +
+ 'ONEANDONE_AUTH_TOKEN environment variable is required.')
+
+ if not module.params.get('api_url'):
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'))
+ else:
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'), api_url=module.params.get('api_url'))
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('server'):
+ module.fail_json(
+ msg="'server' parameter is required for deleting a server.")
+ try:
+ (changed, servers) = remove_server(module, oneandone_conn)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ elif state in ('running', 'stopped'):
+ if not module.params.get('server'):
+ module.fail_json(
+ msg="'server' parameter is required for starting/stopping a server.")
+ try:
+ (changed, servers) = startstop_server(module, oneandone_conn)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ elif state == 'present':
+ for param in ('hostname',
+ 'appliance',
+ 'datacenter'):
+ if not module.params.get(param):
+ module.fail_json(
+ msg="%s parameter is required for new server." % param)
+ try:
+ (changed, servers) = create_server(module, oneandone_conn)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ module.exit_json(changed=changed, servers=servers)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/onepassword_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/onepassword_facts.py
new file mode 100644
index 00000000..6a5c3d92
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/onepassword_facts.py
@@ -0,0 +1,392 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Ryan Conway (@rylon)
+# (c) 2018, Scott Buchanan <sbuchanan@ri.pn> (onepassword.py used as starting point)
+# (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: onepassword_info
+author:
+ - Ryan Conway (@Rylon)
+requirements:
+ - C(op) 1Password command line utility. See U(https://support.1password.com/command-line/)
+notes:
+ - Tested with C(op) version 0.5.5
+ - "Based on the C(onepassword) lookup plugin by Scott Buchanan <sbuchanan@ri.pn>."
+ - When this module is called with the deprecated C(onepassword_facts) name, potentially sensitive data
+ from 1Password is returned as Ansible facts. Facts are subject to caching if enabled, which means this
+ data could be stored in clear text on disk or in a database.
+short_description: Gather items from 1Password
+description:
+ - M(community.general.onepassword_info) wraps the C(op) command line utility to fetch data about one or more 1Password items.
+ - A fatal error occurs if any of the items being searched for can not be found.
+ - Recommend using with the C(no_log) option to avoid logging the values of the secrets being retrieved.
+ - This module was called C(onepassword_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.onepassword_info) module no longer returns C(ansible_facts)!
+ You must now use the C(register) option to use the facts in other tasks.
+options:
+ search_terms:
+ type: list
+ description:
+ - A list of one or more search terms.
+ - Each search term can either be a simple string or it can be a dictionary for more control.
+ - When passing a simple string, I(field) is assumed to be C(password).
+ - When passing a dictionary, the following fields are available.
+ suboptions:
+ name:
+ type: str
+ description:
+ - The name of the 1Password item to search for (required).
+ field:
+ type: str
+ description:
+ - The name of the field to search for within this item (optional, defaults to "password" (or "document" if the item has an attachment).
+ section:
+ type: str
+ description:
+ - The name of a section within this item containing the specified field (optional, will search all sections if not specified).
+ vault:
+ type: str
+ description:
+ - The name of the particular 1Password vault to search, useful if your 1Password user has access to multiple vaults (optional).
+ required: True
+ auto_login:
+ type: dict
+ description:
+ - A dictionary containing authentication details. If this is set, M(community.general.onepassword_info)
+ will attempt to sign in to 1Password automatically.
+ - Without this option, you must have already logged in via the 1Password CLI before running Ansible.
+ - It is B(highly) recommended to store 1Password credentials in an Ansible Vault. Ensure that the key used to encrypt
+ the Ansible Vault is equal to or greater in strength than the 1Password master password.
+ suboptions:
+ subdomain:
+ type: str
+ description:
+ - 1Password subdomain name (<subdomain>.1password.com).
+ - If this is not specified, the most recent subdomain will be used.
+ username:
+ type: str
+ description:
+ - 1Password username.
+ - Only required for initial sign in.
+ master_password:
+ type: str
+ description:
+ - The master password for your subdomain.
+ - This is always required when specifying C(auto_login).
+ required: True
+ secret_key:
+ type: str
+ description:
+ - The secret key for your subdomain.
+ - Only required for initial sign in.
+ default: {}
+ required: False
+ cli_path:
+ type: path
+ description: Used to specify the exact path to the C(op) command line interface
+ required: False
+ default: 'op'
+'''
+
+EXAMPLES = '''
+# Gather secrets from 1Password, assuming there is a 'password' field:
+- name: Get a password
+ community.general.onepassword_info:
+ search_terms: My 1Password item
+ delegate_to: localhost
+ register: my_1password_item
+ no_log: true # Don't want to log the secrets to the console!
+
+# Gather secrets from 1Password, with more advanced search terms:
+- name: Get a password
+ community.general.onepassword_info:
+ search_terms:
+ - name: My 1Password item
+ field: Custom field name # optional, defaults to 'password'
+ section: Custom section name # optional, defaults to 'None'
+ vault: Name of the vault # optional, only necessary if there is more than 1 Vault available
+ delegate_to: localhost
+ register: my_1password_item
+ no_log: True # Don't want to log the secrets to the console!
+
+# Gather secrets combining simple and advanced search terms to retrieve two items, one of which we fetch two
+# fields. In the first 'password' is fetched, as a field name is not specified (default behaviour) and in the
+# second, 'Custom field name' is fetched, as that is specified explicitly.
+- name: Get a password
+ community.general.onepassword_info:
+ search_terms:
+ - My 1Password item # 'name' is optional when passing a simple string...
+ - name: My Other 1Password item # ...but it can also be set for consistency
+ - name: My 1Password item
+ field: Custom field name # optional, defaults to 'password'
+ section: Custom section name # optional, defaults to 'None'
+ vault: Name of the vault # optional, only necessary if there is more than 1 Vault available
+ - name: A 1Password item with document attachment
+ delegate_to: localhost
+ register: my_1password_item
+ no_log: true # Don't want to log the secrets to the console!
+
+- name: Debug a password (for example)
+ ansible.builtin.debug:
+ msg: "{{ my_1password_item['onepassword']['My 1Password item'] }}"
+'''
+
+RETURN = '''
+---
+# One or more dictionaries for each matching item from 1Password, along with the appropriate fields.
+# This shows the response you would expect to receive from the third example documented above.
+onepassword:
+ description: Dictionary of each 1password item matching the given search terms, shows what would be returned from the third example above.
+ returned: success
+ type: dict
+ sample:
+ "My 1Password item":
+ password: the value of this field
+ Custom field name: the value of this field
+ "My Other 1Password item":
+ password: the value of this field
+ "A 1Password item with document attachment":
+ document: the contents of the document attached to this item
+'''
+
+
+import errno
+import json
+import os
+import re
+
+from subprocess import Popen, PIPE
+
+from ansible.module_utils._text import to_bytes, to_native
+from ansible.module_utils.basic import AnsibleModule
+
+
+class AnsibleModuleError(Exception):
+ def __init__(self, results):
+ self.results = results
+
+ def __repr__(self):
+ return self.results
+
+
+class OnePasswordInfo(object):
+
+ def __init__(self):
+ self.cli_path = module.params.get('cli_path')
+ self.config_file_path = '~/.op/config'
+ self.auto_login = module.params.get('auto_login')
+ self.logged_in = False
+ self.token = None
+
+ terms = module.params.get('search_terms')
+ self.terms = self.parse_search_terms(terms)
+
+ def _run(self, args, expected_rc=0, command_input=None, ignore_errors=False):
+ if self.token:
+ # Adds the session token to all commands if we're logged in.
+ args += [to_bytes('--session=') + self.token]
+
+ command = [self.cli_path] + args
+ p = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
+ out, err = p.communicate(input=command_input)
+ rc = p.wait()
+ if not ignore_errors and rc != expected_rc:
+ raise AnsibleModuleError(to_native(err))
+ return rc, out, err
+
+ def _parse_field(self, data_json, item_id, field_name, section_title=None):
+ data = json.loads(data_json)
+
+ if ('documentAttributes' in data['details']):
+ # This is actually a document, let's fetch the document data instead!
+ document = self._run(["get", "document", data['overview']['title']])
+ return {'document': document[1].strip()}
+
+ else:
+ # This is not a document, let's try to find the requested field
+
+ # Some types of 1Password items have a 'password' field directly alongside the 'fields' attribute,
+ # not inside it, so we need to check there first.
+ if (field_name in data['details']):
+ return {field_name: data['details'][field_name]}
+
+ # Otherwise we continue looking inside the 'fields' attribute for the specified field.
+ else:
+ if section_title is None:
+ for field_data in data['details'].get('fields', []):
+ if field_data.get('name', '').lower() == field_name.lower():
+ return {field_name: field_data.get('value', '')}
+
+ # Not found it yet, so now lets see if there are any sections defined
+ # and search through those for the field. If a section was given, we skip
+ # any non-matching sections, otherwise we search them all until we find the field.
+ for section_data in data['details'].get('sections', []):
+ if section_title is not None and section_title.lower() != section_data['title'].lower():
+ continue
+ for field_data in section_data.get('fields', []):
+ if field_data.get('t', '').lower() == field_name.lower():
+ return {field_name: field_data.get('v', '')}
+
+ # We will get here if the field could not be found in any section and the item wasn't a document to be downloaded.
+ optional_section_title = '' if section_title is None else " in the section '%s'" % section_title
+ module.fail_json(msg="Unable to find an item in 1Password named '%s' with the field '%s'%s." % (item_id, field_name, optional_section_title))
+
+ def parse_search_terms(self, terms):
+ processed_terms = []
+
+ for term in terms:
+ if not isinstance(term, dict):
+ term = {'name': term}
+
+ if 'name' not in term:
+ module.fail_json(msg="Missing required 'name' field from search term, got: '%s'" % to_native(term))
+
+ term['field'] = term.get('field', 'password')
+ term['section'] = term.get('section', None)
+ term['vault'] = term.get('vault', None)
+
+ processed_terms.append(term)
+
+ return processed_terms
+
+ def get_raw(self, item_id, vault=None):
+ try:
+ args = ["get", "item", item_id]
+ if vault is not None:
+ args += ['--vault={0}'.format(vault)]
+ rc, output, dummy = self._run(args)
+ return output
+
+ except Exception as e:
+ if re.search(".*not found.*", to_native(e)):
+ module.fail_json(msg="Unable to find an item in 1Password named '%s'." % item_id)
+ else:
+ module.fail_json(msg="Unexpected error attempting to find an item in 1Password named '%s': %s" % (item_id, to_native(e)))
+
+ def get_field(self, item_id, field, section=None, vault=None):
+ output = self.get_raw(item_id, vault)
+ return self._parse_field(output, item_id, field, section) if output != '' else ''
+
+ def full_login(self):
+ if self.auto_login is not None:
+ if None in [self.auto_login.get('subdomain'), self.auto_login.get('username'),
+ self.auto_login.get('secret_key'), self.auto_login.get('master_password')]:
+ module.fail_json(msg='Unable to perform initial sign in to 1Password. '
+ 'subdomain, username, secret_key, and master_password are required to perform initial sign in.')
+
+ args = [
+ 'signin',
+ '{0}.1password.com'.format(self.auto_login['subdomain']),
+ to_bytes(self.auto_login['username']),
+ to_bytes(self.auto_login['secret_key']),
+ '--output=raw',
+ ]
+
+ try:
+ rc, out, err = self._run(args, command_input=to_bytes(self.auto_login['master_password']))
+ self.token = out.strip()
+ except AnsibleModuleError as e:
+ module.fail_json(msg="Failed to perform initial sign in to 1Password: %s" % to_native(e))
+ else:
+ module.fail_json(msg="Unable to perform an initial sign in to 1Password. Please run '%s sigin' "
+ "or define credentials in 'auto_login'. See the module documentation for details." % self.cli_path)
+
+ def get_token(self):
+ # If the config file exists, assume an initial signin has taken place and try basic sign in
+ if os.path.isfile(self.config_file_path):
+
+ if self.auto_login is not None:
+
+ # Since we are not currently signed in, master_password is required at a minimum
+ if not self.auto_login.get('master_password'):
+ module.fail_json(msg="Unable to sign in to 1Password. 'auto_login.master_password' is required.")
+
+ # Try signing in using the master_password and a subdomain if one is provided
+ try:
+ args = ['signin', '--output=raw']
+
+ if self.auto_login.get('subdomain'):
+ args = ['signin', self.auto_login['subdomain'], '--output=raw']
+
+ rc, out, err = self._run(args, command_input=to_bytes(self.auto_login['master_password']))
+ self.token = out.strip()
+
+ except AnsibleModuleError:
+ self.full_login()
+
+ else:
+ self.full_login()
+
+ else:
+ # Attempt a full sign in since there appears to be no existing sign in
+ self.full_login()
+
+ def assert_logged_in(self):
+ try:
+ rc, out, err = self._run(['get', 'account'], ignore_errors=True)
+ if rc == 0:
+ self.logged_in = True
+ if not self.logged_in:
+ self.get_token()
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ module.fail_json(msg="1Password CLI tool '%s' not installed in path on control machine" % self.cli_path)
+ raise e
+
+ def run(self):
+ result = {}
+
+ self.assert_logged_in()
+
+ for term in self.terms:
+ value = self.get_field(term['name'], term['field'], term['section'], term['vault'])
+
+ if term['name'] in result:
+ # If we already have a result for this key, we have to append this result dictionary
+ # to the existing one. This is only applicable when there is a single item
+ # in 1Password which has two different fields, and we want to retrieve both of them.
+ result[term['name']].update(value)
+ else:
+ # If this is the first result for this key, simply set it.
+ result[term['name']] = value
+
+ return result
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ cli_path=dict(type='path', default='op'),
+ auto_login=dict(type='dict', options=dict(
+ subdomain=dict(type='str'),
+ username=dict(type='str'),
+ master_password=dict(required=True, type='str', no_log=True),
+ secret_key=dict(type='str', no_log=True),
+ ), default=None),
+ search_terms=dict(required=True, type='list')
+ ),
+ supports_check_mode=True
+ )
+
+ results = {'onepassword': OnePasswordInfo().run()}
+
+ if module._name in ('onepassword_facts', 'community.general.onepassword_facts'):
+ module.deprecate("The 'onepassword_facts' module has been renamed to 'onepassword_info'. "
+ "When called with the new name it no longer returns 'ansible_facts'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+ module.exit_json(changed=False, ansible_facts=results)
+ else:
+ module.exit_json(changed=False, **results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/onepassword_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/onepassword_info.py
new file mode 100644
index 00000000..6a5c3d92
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/onepassword_info.py
@@ -0,0 +1,392 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Ryan Conway (@rylon)
+# (c) 2018, Scott Buchanan <sbuchanan@ri.pn> (onepassword.py used as starting point)
+# (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: onepassword_info
+author:
+ - Ryan Conway (@Rylon)
+requirements:
+ - C(op) 1Password command line utility. See U(https://support.1password.com/command-line/)
+notes:
+ - Tested with C(op) version 0.5.5
+ - "Based on the C(onepassword) lookup plugin by Scott Buchanan <sbuchanan@ri.pn>."
+ - When this module is called with the deprecated C(onepassword_facts) name, potentially sensitive data
+ from 1Password is returned as Ansible facts. Facts are subject to caching if enabled, which means this
+ data could be stored in clear text on disk or in a database.
+short_description: Gather items from 1Password
+description:
+ - M(community.general.onepassword_info) wraps the C(op) command line utility to fetch data about one or more 1Password items.
+ - A fatal error occurs if any of the items being searched for can not be found.
+ - Recommend using with the C(no_log) option to avoid logging the values of the secrets being retrieved.
+ - This module was called C(onepassword_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.onepassword_info) module no longer returns C(ansible_facts)!
+ You must now use the C(register) option to use the facts in other tasks.
+options:
+ search_terms:
+ type: list
+ description:
+ - A list of one or more search terms.
+ - Each search term can either be a simple string or it can be a dictionary for more control.
+ - When passing a simple string, I(field) is assumed to be C(password).
+ - When passing a dictionary, the following fields are available.
+ suboptions:
+ name:
+ type: str
+ description:
+ - The name of the 1Password item to search for (required).
+ field:
+ type: str
+ description:
+ - The name of the field to search for within this item (optional, defaults to "password" (or "document" if the item has an attachment).
+ section:
+ type: str
+ description:
+ - The name of a section within this item containing the specified field (optional, will search all sections if not specified).
+ vault:
+ type: str
+ description:
+ - The name of the particular 1Password vault to search, useful if your 1Password user has access to multiple vaults (optional).
+ required: True
+ auto_login:
+ type: dict
+ description:
+ - A dictionary containing authentication details. If this is set, M(community.general.onepassword_info)
+ will attempt to sign in to 1Password automatically.
+ - Without this option, you must have already logged in via the 1Password CLI before running Ansible.
+ - It is B(highly) recommended to store 1Password credentials in an Ansible Vault. Ensure that the key used to encrypt
+ the Ansible Vault is equal to or greater in strength than the 1Password master password.
+ suboptions:
+ subdomain:
+ type: str
+ description:
+ - 1Password subdomain name (<subdomain>.1password.com).
+ - If this is not specified, the most recent subdomain will be used.
+ username:
+ type: str
+ description:
+ - 1Password username.
+ - Only required for initial sign in.
+ master_password:
+ type: str
+ description:
+ - The master password for your subdomain.
+ - This is always required when specifying C(auto_login).
+ required: True
+ secret_key:
+ type: str
+ description:
+ - The secret key for your subdomain.
+ - Only required for initial sign in.
+ default: {}
+ required: False
+ cli_path:
+ type: path
+ description: Used to specify the exact path to the C(op) command line interface
+ required: False
+ default: 'op'
+'''
+
+EXAMPLES = '''
+# Gather secrets from 1Password, assuming there is a 'password' field:
+- name: Get a password
+ community.general.onepassword_info:
+ search_terms: My 1Password item
+ delegate_to: localhost
+ register: my_1password_item
+ no_log: true # Don't want to log the secrets to the console!
+
+# Gather secrets from 1Password, with more advanced search terms:
+- name: Get a password
+ community.general.onepassword_info:
+ search_terms:
+ - name: My 1Password item
+ field: Custom field name # optional, defaults to 'password'
+ section: Custom section name # optional, defaults to 'None'
+ vault: Name of the vault # optional, only necessary if there is more than 1 Vault available
+ delegate_to: localhost
+ register: my_1password_item
+ no_log: True # Don't want to log the secrets to the console!
+
+# Gather secrets combining simple and advanced search terms to retrieve two items, one of which we fetch two
+# fields. In the first 'password' is fetched, as a field name is not specified (default behaviour) and in the
+# second, 'Custom field name' is fetched, as that is specified explicitly.
+- name: Get a password
+ community.general.onepassword_info:
+ search_terms:
+ - My 1Password item # 'name' is optional when passing a simple string...
+ - name: My Other 1Password item # ...but it can also be set for consistency
+ - name: My 1Password item
+ field: Custom field name # optional, defaults to 'password'
+ section: Custom section name # optional, defaults to 'None'
+ vault: Name of the vault # optional, only necessary if there is more than 1 Vault available
+ - name: A 1Password item with document attachment
+ delegate_to: localhost
+ register: my_1password_item
+ no_log: true # Don't want to log the secrets to the console!
+
+- name: Debug a password (for example)
+ ansible.builtin.debug:
+ msg: "{{ my_1password_item['onepassword']['My 1Password item'] }}"
+'''
+
+RETURN = '''
+---
+# One or more dictionaries for each matching item from 1Password, along with the appropriate fields.
+# This shows the response you would expect to receive from the third example documented above.
+onepassword:
+ description: Dictionary of each 1password item matching the given search terms, shows what would be returned from the third example above.
+ returned: success
+ type: dict
+ sample:
+ "My 1Password item":
+ password: the value of this field
+ Custom field name: the value of this field
+ "My Other 1Password item":
+ password: the value of this field
+ "A 1Password item with document attachment":
+ document: the contents of the document attached to this item
+'''
+
+
+import errno
+import json
+import os
+import re
+
+from subprocess import Popen, PIPE
+
+from ansible.module_utils._text import to_bytes, to_native
+from ansible.module_utils.basic import AnsibleModule
+
+
+class AnsibleModuleError(Exception):
+ def __init__(self, results):
+ self.results = results
+
+ def __repr__(self):
+ return self.results
+
+
+class OnePasswordInfo(object):
+
+ def __init__(self):
+ self.cli_path = module.params.get('cli_path')
+ self.config_file_path = '~/.op/config'
+ self.auto_login = module.params.get('auto_login')
+ self.logged_in = False
+ self.token = None
+
+ terms = module.params.get('search_terms')
+ self.terms = self.parse_search_terms(terms)
+
+ def _run(self, args, expected_rc=0, command_input=None, ignore_errors=False):
+ if self.token:
+ # Adds the session token to all commands if we're logged in.
+ args += [to_bytes('--session=') + self.token]
+
+ command = [self.cli_path] + args
+ p = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
+ out, err = p.communicate(input=command_input)
+ rc = p.wait()
+ if not ignore_errors and rc != expected_rc:
+ raise AnsibleModuleError(to_native(err))
+ return rc, out, err
+
+ def _parse_field(self, data_json, item_id, field_name, section_title=None):
+ data = json.loads(data_json)
+
+ if ('documentAttributes' in data['details']):
+ # This is actually a document, let's fetch the document data instead!
+ document = self._run(["get", "document", data['overview']['title']])
+ return {'document': document[1].strip()}
+
+ else:
+ # This is not a document, let's try to find the requested field
+
+ # Some types of 1Password items have a 'password' field directly alongside the 'fields' attribute,
+ # not inside it, so we need to check there first.
+ if (field_name in data['details']):
+ return {field_name: data['details'][field_name]}
+
+ # Otherwise we continue looking inside the 'fields' attribute for the specified field.
+ else:
+ if section_title is None:
+ for field_data in data['details'].get('fields', []):
+ if field_data.get('name', '').lower() == field_name.lower():
+ return {field_name: field_data.get('value', '')}
+
+ # Not found it yet, so now lets see if there are any sections defined
+ # and search through those for the field. If a section was given, we skip
+ # any non-matching sections, otherwise we search them all until we find the field.
+ for section_data in data['details'].get('sections', []):
+ if section_title is not None and section_title.lower() != section_data['title'].lower():
+ continue
+ for field_data in section_data.get('fields', []):
+ if field_data.get('t', '').lower() == field_name.lower():
+ return {field_name: field_data.get('v', '')}
+
+ # We will get here if the field could not be found in any section and the item wasn't a document to be downloaded.
+ optional_section_title = '' if section_title is None else " in the section '%s'" % section_title
+ module.fail_json(msg="Unable to find an item in 1Password named '%s' with the field '%s'%s." % (item_id, field_name, optional_section_title))
+
+ def parse_search_terms(self, terms):
+ processed_terms = []
+
+ for term in terms:
+ if not isinstance(term, dict):
+ term = {'name': term}
+
+ if 'name' not in term:
+ module.fail_json(msg="Missing required 'name' field from search term, got: '%s'" % to_native(term))
+
+ term['field'] = term.get('field', 'password')
+ term['section'] = term.get('section', None)
+ term['vault'] = term.get('vault', None)
+
+ processed_terms.append(term)
+
+ return processed_terms
+
+ def get_raw(self, item_id, vault=None):
+ try:
+ args = ["get", "item", item_id]
+ if vault is not None:
+ args += ['--vault={0}'.format(vault)]
+ rc, output, dummy = self._run(args)
+ return output
+
+ except Exception as e:
+ if re.search(".*not found.*", to_native(e)):
+ module.fail_json(msg="Unable to find an item in 1Password named '%s'." % item_id)
+ else:
+ module.fail_json(msg="Unexpected error attempting to find an item in 1Password named '%s': %s" % (item_id, to_native(e)))
+
+ def get_field(self, item_id, field, section=None, vault=None):
+ output = self.get_raw(item_id, vault)
+ return self._parse_field(output, item_id, field, section) if output != '' else ''
+
+ def full_login(self):
+ if self.auto_login is not None:
+ if None in [self.auto_login.get('subdomain'), self.auto_login.get('username'),
+ self.auto_login.get('secret_key'), self.auto_login.get('master_password')]:
+ module.fail_json(msg='Unable to perform initial sign in to 1Password. '
+ 'subdomain, username, secret_key, and master_password are required to perform initial sign in.')
+
+ args = [
+ 'signin',
+ '{0}.1password.com'.format(self.auto_login['subdomain']),
+ to_bytes(self.auto_login['username']),
+ to_bytes(self.auto_login['secret_key']),
+ '--output=raw',
+ ]
+
+ try:
+ rc, out, err = self._run(args, command_input=to_bytes(self.auto_login['master_password']))
+ self.token = out.strip()
+ except AnsibleModuleError as e:
+ module.fail_json(msg="Failed to perform initial sign in to 1Password: %s" % to_native(e))
+ else:
+ module.fail_json(msg="Unable to perform an initial sign in to 1Password. Please run '%s sigin' "
+ "or define credentials in 'auto_login'. See the module documentation for details." % self.cli_path)
+
+ def get_token(self):
+ # If the config file exists, assume an initial signin has taken place and try basic sign in
+ if os.path.isfile(self.config_file_path):
+
+ if self.auto_login is not None:
+
+ # Since we are not currently signed in, master_password is required at a minimum
+ if not self.auto_login.get('master_password'):
+ module.fail_json(msg="Unable to sign in to 1Password. 'auto_login.master_password' is required.")
+
+ # Try signing in using the master_password and a subdomain if one is provided
+ try:
+ args = ['signin', '--output=raw']
+
+ if self.auto_login.get('subdomain'):
+ args = ['signin', self.auto_login['subdomain'], '--output=raw']
+
+ rc, out, err = self._run(args, command_input=to_bytes(self.auto_login['master_password']))
+ self.token = out.strip()
+
+ except AnsibleModuleError:
+ self.full_login()
+
+ else:
+ self.full_login()
+
+ else:
+ # Attempt a full sign in since there appears to be no existing sign in
+ self.full_login()
+
+ def assert_logged_in(self):
+ try:
+ rc, out, err = self._run(['get', 'account'], ignore_errors=True)
+ if rc == 0:
+ self.logged_in = True
+ if not self.logged_in:
+ self.get_token()
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ module.fail_json(msg="1Password CLI tool '%s' not installed in path on control machine" % self.cli_path)
+ raise e
+
+ def run(self):
+ result = {}
+
+ self.assert_logged_in()
+
+ for term in self.terms:
+ value = self.get_field(term['name'], term['field'], term['section'], term['vault'])
+
+ if term['name'] in result:
+ # If we already have a result for this key, we have to append this result dictionary
+ # to the existing one. This is only applicable when there is a single item
+ # in 1Password which has two different fields, and we want to retrieve both of them.
+ result[term['name']].update(value)
+ else:
+ # If this is the first result for this key, simply set it.
+ result[term['name']] = value
+
+ return result
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ cli_path=dict(type='path', default='op'),
+ auto_login=dict(type='dict', options=dict(
+ subdomain=dict(type='str'),
+ username=dict(type='str'),
+ master_password=dict(required=True, type='str', no_log=True),
+ secret_key=dict(type='str', no_log=True),
+ ), default=None),
+ search_terms=dict(required=True, type='list')
+ ),
+ supports_check_mode=True
+ )
+
+ results = {'onepassword': OnePasswordInfo().run()}
+
+ if module._name in ('onepassword_facts', 'community.general.onepassword_facts'):
+ module.deprecate("The 'onepassword_facts' module has been renamed to 'onepassword_info'. "
+ "When called with the new name it no longer returns 'ansible_facts'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+ module.exit_json(changed=False, ansible_facts=results)
+ else:
+ module.exit_json(changed=False, **results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_datacenter_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_datacenter_facts.py
new file mode 100644
index 00000000..19aa7a27
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_datacenter_facts.py
@@ -0,0 +1,153 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_datacenter_info
+short_description: Retrieve information about the OneView Data Centers
+description:
+ - Retrieve information about the OneView Data Centers.
+ - This module was called C(oneview_datacenter_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_datacenter_info) module no longer returns C(ansible_facts)!
+requirements:
+ - "hpOneView >= 2.0.1"
+author:
+ - Alex Monteiro (@aalexmonteiro)
+ - Madhav Bharadwaj (@madhav-bharadwaj)
+ - Priyanka Sood (@soodpr)
+ - Ricardo Galeno (@ricardogpsf)
+options:
+ name:
+ description:
+ - Data Center name.
+ options:
+ description:
+ - "Retrieve additional information. Options available: 'visualContent'."
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Data Centers
+ community.general.oneview_datacenter_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.datacenters }}"
+
+- name: Gather paginated, filtered and sorted information about Data Centers
+ community.general.oneview_datacenter_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ params:
+ start: 0
+ count: 3
+ sort: 'name:descending'
+ filter: 'state=Unmanaged'
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.datacenters }}"
+
+- name: Gather information about a Data Center by name
+ community.general.oneview_datacenter_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ name: "My Data Center"
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.datacenters }}"
+
+- name: Gather information about the Data Center Visual Content
+ community.general.oneview_datacenter_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ name: "My Data Center"
+ options:
+ - visualContent
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.datacenters }}"
+- ansible.builtin.debug:
+ msg: "{{ result.datacenter_visual_content }}"
+'''
+
+RETURN = '''
+datacenters:
+ description: Has all the OneView information about the Data Centers.
+ returned: Always, but can be null.
+ type: dict
+
+datacenter_visual_content:
+ description: Has information about the Data Center Visual Content.
+ returned: When requested, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class DatacenterInfoModule(OneViewModuleBase):
+ argument_spec = dict(
+ name=dict(type='str'),
+ options=dict(type='list'),
+ params=dict(type='dict')
+ )
+
+ def __init__(self):
+ super(DatacenterInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_datacenter_facts', 'community.general.oneview_datacenter_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_datacenter_facts' module has been renamed to 'oneview_datacenter_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+
+ client = self.oneview_client.datacenters
+ info = {}
+
+ if self.module.params.get('name'):
+ datacenters = client.get_by('name', self.module.params['name'])
+
+ if self.options and 'visualContent' in self.options:
+ if datacenters:
+ info['datacenter_visual_content'] = client.get_visual_content(datacenters[0]['uri'])
+ else:
+ info['datacenter_visual_content'] = None
+
+ info['datacenters'] = datacenters
+ else:
+ info['datacenters'] = client.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False,
+ ansible_facts=info)
+ else:
+ return dict(changed=False, **info)
+
+
+def main():
+ DatacenterInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_datacenter_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_datacenter_info.py
new file mode 100644
index 00000000..19aa7a27
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_datacenter_info.py
@@ -0,0 +1,153 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_datacenter_info
+short_description: Retrieve information about the OneView Data Centers
+description:
+ - Retrieve information about the OneView Data Centers.
+ - This module was called C(oneview_datacenter_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_datacenter_info) module no longer returns C(ansible_facts)!
+requirements:
+ - "hpOneView >= 2.0.1"
+author:
+ - Alex Monteiro (@aalexmonteiro)
+ - Madhav Bharadwaj (@madhav-bharadwaj)
+ - Priyanka Sood (@soodpr)
+ - Ricardo Galeno (@ricardogpsf)
+options:
+ name:
+ description:
+ - Data Center name.
+ options:
+ description:
+ - "Retrieve additional information. Options available: 'visualContent'."
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Data Centers
+ community.general.oneview_datacenter_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.datacenters }}"
+
+- name: Gather paginated, filtered and sorted information about Data Centers
+ community.general.oneview_datacenter_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ params:
+ start: 0
+ count: 3
+ sort: 'name:descending'
+ filter: 'state=Unmanaged'
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.datacenters }}"
+
+- name: Gather information about a Data Center by name
+ community.general.oneview_datacenter_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ name: "My Data Center"
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.datacenters }}"
+
+- name: Gather information about the Data Center Visual Content
+ community.general.oneview_datacenter_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ name: "My Data Center"
+ options:
+ - visualContent
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.datacenters }}"
+- ansible.builtin.debug:
+ msg: "{{ result.datacenter_visual_content }}"
+'''
+
+RETURN = '''
+datacenters:
+ description: Has all the OneView information about the Data Centers.
+ returned: Always, but can be null.
+ type: dict
+
+datacenter_visual_content:
+ description: Has information about the Data Center Visual Content.
+ returned: When requested, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class DatacenterInfoModule(OneViewModuleBase):
+ argument_spec = dict(
+ name=dict(type='str'),
+ options=dict(type='list'),
+ params=dict(type='dict')
+ )
+
+ def __init__(self):
+ super(DatacenterInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_datacenter_facts', 'community.general.oneview_datacenter_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_datacenter_facts' module has been renamed to 'oneview_datacenter_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+
+ client = self.oneview_client.datacenters
+ info = {}
+
+ if self.module.params.get('name'):
+ datacenters = client.get_by('name', self.module.params['name'])
+
+ if self.options and 'visualContent' in self.options:
+ if datacenters:
+ info['datacenter_visual_content'] = client.get_visual_content(datacenters[0]['uri'])
+ else:
+ info['datacenter_visual_content'] = None
+
+ info['datacenters'] = datacenters
+ else:
+ info['datacenters'] = client.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False,
+ ansible_facts=info)
+ else:
+ return dict(changed=False, **info)
+
+
+def main():
+ DatacenterInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_enclosure_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_enclosure_facts.py
new file mode 100644
index 00000000..7963de74
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_enclosure_facts.py
@@ -0,0 +1,225 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_enclosure_info
+short_description: Retrieve information about one or more Enclosures
+description:
+ - Retrieve information about one or more of the Enclosures from OneView.
+ - This module was called C(oneview_enclosure_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_enclosure_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - Enclosure name.
+ options:
+ description:
+ - "List with options to gather additional information about an Enclosure and related resources.
+ Options allowed: C(script), C(environmentalConfiguration), and C(utilization). For the option C(utilization),
+ you can provide specific parameters."
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Enclosures
+ community.general.oneview_enclosure_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.enclosures }}"
+
+- name: Gather paginated, filtered and sorted information about Enclosures
+ community.general.oneview_enclosure_info:
+ params:
+ start: 0
+ count: 3
+ sort: name:descending
+ filter: status=OK
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.enclosures }}"
+
+- name: Gather information about an Enclosure by name
+ community.general.oneview_enclosure_info:
+ name: Enclosure-Name
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.enclosures }}"
+
+- name: Gather information about an Enclosure by name with options
+ community.general.oneview_enclosure_info:
+ name: Test-Enclosure
+ options:
+ - script # optional
+ - environmentalConfiguration # optional
+ - utilization # optional
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.enclosures }}"
+- ansible.builtin.debug:
+ msg: "{{ result.enclosure_script }}"
+- ansible.builtin.debug:
+ msg: "{{ result.enclosure_environmental_configuration }}"
+- ansible.builtin.debug:
+ msg: "{{ result.enclosure_utilization }}"
+
+- name: "Gather information about an Enclosure with temperature data at a resolution of one sample per day, between two
+ specified dates"
+ community.general.oneview_enclosure_info:
+ name: Test-Enclosure
+ options:
+ - utilization: # optional
+ fields: AmbientTemperature
+ filter:
+ - startDate=2016-07-01T14:29:42.000Z
+ - endDate=2017-07-01T03:29:42.000Z
+ view: day
+ refresh: false
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.enclosures }}"
+- ansible.builtin.debug:
+ msg: "{{ result.enclosure_utilization }}"
+'''
+
+RETURN = '''
+enclosures:
+ description: Has all the OneView information about the Enclosures.
+ returned: Always, but can be null.
+ type: dict
+
+enclosure_script:
+ description: Has all the OneView information about the script of an Enclosure.
+ returned: When requested, but can be null.
+ type: str
+
+enclosure_environmental_configuration:
+ description: Has all the OneView information about the environmental configuration of an Enclosure.
+ returned: When requested, but can be null.
+ type: dict
+
+enclosure_utilization:
+ description: Has all the OneView information about the utilization of an Enclosure.
+ returned: When requested, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class EnclosureInfoModule(OneViewModuleBase):
+ argument_spec = dict(name=dict(type='str'), options=dict(type='list'), params=dict(type='dict'))
+
+ def __init__(self):
+ super(EnclosureInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_enclosure_facts', 'community.general.oneview_enclosure_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_enclosure_facts' module has been renamed to 'oneview_enclosure_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+
+ info = {}
+
+ if self.module.params['name']:
+ enclosures = self._get_by_name(self.module.params['name'])
+
+ if self.options and enclosures:
+ info = self._gather_optional_info(self.options, enclosures[0])
+ else:
+ enclosures = self.oneview_client.enclosures.get_all(**self.facts_params)
+
+ info['enclosures'] = enclosures
+
+ if self.is_old_facts:
+ return dict(changed=False,
+ ansible_facts=info)
+ else:
+ return dict(changed=False, **info)
+
+ def _gather_optional_info(self, options, enclosure):
+
+ enclosure_client = self.oneview_client.enclosures
+ info = {}
+
+ if options.get('script'):
+ info['enclosure_script'] = enclosure_client.get_script(enclosure['uri'])
+ if options.get('environmentalConfiguration'):
+ env_config = enclosure_client.get_environmental_configuration(enclosure['uri'])
+ info['enclosure_environmental_configuration'] = env_config
+ if options.get('utilization'):
+ info['enclosure_utilization'] = self._get_utilization(enclosure, options['utilization'])
+
+ return info
+
+ def _get_utilization(self, enclosure, params):
+ fields = view = refresh = filter = ''
+
+ if isinstance(params, dict):
+ fields = params.get('fields')
+ view = params.get('view')
+ refresh = params.get('refresh')
+ filter = params.get('filter')
+
+ return self.oneview_client.enclosures.get_utilization(enclosure['uri'],
+ fields=fields,
+ filter=filter,
+ refresh=refresh,
+ view=view)
+
+ def _get_by_name(self, name):
+ return self.oneview_client.enclosures.get_by('name', name)
+
+
+def main():
+ EnclosureInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_enclosure_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_enclosure_info.py
new file mode 100644
index 00000000..7963de74
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_enclosure_info.py
@@ -0,0 +1,225 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_enclosure_info
+short_description: Retrieve information about one or more Enclosures
+description:
+ - Retrieve information about one or more of the Enclosures from OneView.
+ - This module was called C(oneview_enclosure_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_enclosure_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - Enclosure name.
+ options:
+ description:
+ - "List with options to gather additional information about an Enclosure and related resources.
+ Options allowed: C(script), C(environmentalConfiguration), and C(utilization). For the option C(utilization),
+ you can provide specific parameters."
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Enclosures
+ community.general.oneview_enclosure_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.enclosures }}"
+
+- name: Gather paginated, filtered and sorted information about Enclosures
+ community.general.oneview_enclosure_info:
+ params:
+ start: 0
+ count: 3
+ sort: name:descending
+ filter: status=OK
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.enclosures }}"
+
+- name: Gather information about an Enclosure by name
+ community.general.oneview_enclosure_info:
+ name: Enclosure-Name
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.enclosures }}"
+
+- name: Gather information about an Enclosure by name with options
+ community.general.oneview_enclosure_info:
+ name: Test-Enclosure
+ options:
+ - script # optional
+ - environmentalConfiguration # optional
+ - utilization # optional
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.enclosures }}"
+- ansible.builtin.debug:
+ msg: "{{ result.enclosure_script }}"
+- ansible.builtin.debug:
+ msg: "{{ result.enclosure_environmental_configuration }}"
+- ansible.builtin.debug:
+ msg: "{{ result.enclosure_utilization }}"
+
+- name: "Gather information about an Enclosure with temperature data at a resolution of one sample per day, between two
+ specified dates"
+ community.general.oneview_enclosure_info:
+ name: Test-Enclosure
+ options:
+ - utilization: # optional
+ fields: AmbientTemperature
+ filter:
+ - startDate=2016-07-01T14:29:42.000Z
+ - endDate=2017-07-01T03:29:42.000Z
+ view: day
+ refresh: false
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.enclosures }}"
+- ansible.builtin.debug:
+ msg: "{{ result.enclosure_utilization }}"
+'''
+
+RETURN = '''
+enclosures:
+ description: Has all the OneView information about the Enclosures.
+ returned: Always, but can be null.
+ type: dict
+
+enclosure_script:
+ description: Has all the OneView information about the script of an Enclosure.
+ returned: When requested, but can be null.
+ type: str
+
+enclosure_environmental_configuration:
+ description: Has all the OneView information about the environmental configuration of an Enclosure.
+ returned: When requested, but can be null.
+ type: dict
+
+enclosure_utilization:
+ description: Has all the OneView information about the utilization of an Enclosure.
+ returned: When requested, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class EnclosureInfoModule(OneViewModuleBase):
+ argument_spec = dict(name=dict(type='str'), options=dict(type='list'), params=dict(type='dict'))
+
+ def __init__(self):
+ super(EnclosureInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_enclosure_facts', 'community.general.oneview_enclosure_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_enclosure_facts' module has been renamed to 'oneview_enclosure_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+
+ info = {}
+
+ if self.module.params['name']:
+ enclosures = self._get_by_name(self.module.params['name'])
+
+ if self.options and enclosures:
+ info = self._gather_optional_info(self.options, enclosures[0])
+ else:
+ enclosures = self.oneview_client.enclosures.get_all(**self.facts_params)
+
+ info['enclosures'] = enclosures
+
+ if self.is_old_facts:
+ return dict(changed=False,
+ ansible_facts=info)
+ else:
+ return dict(changed=False, **info)
+
+ def _gather_optional_info(self, options, enclosure):
+
+ enclosure_client = self.oneview_client.enclosures
+ info = {}
+
+ if options.get('script'):
+ info['enclosure_script'] = enclosure_client.get_script(enclosure['uri'])
+ if options.get('environmentalConfiguration'):
+ env_config = enclosure_client.get_environmental_configuration(enclosure['uri'])
+ info['enclosure_environmental_configuration'] = env_config
+ if options.get('utilization'):
+ info['enclosure_utilization'] = self._get_utilization(enclosure, options['utilization'])
+
+ return info
+
+ def _get_utilization(self, enclosure, params):
+ fields = view = refresh = filter = ''
+
+ if isinstance(params, dict):
+ fields = params.get('fields')
+ view = params.get('view')
+ refresh = params.get('refresh')
+ filter = params.get('filter')
+
+ return self.oneview_client.enclosures.get_utilization(enclosure['uri'],
+ fields=fields,
+ filter=filter,
+ refresh=refresh,
+ view=view)
+
+ def _get_by_name(self, name):
+ return self.oneview_client.enclosures.get_by('name', name)
+
+
+def main():
+ EnclosureInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_ethernet_network.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_ethernet_network.py
new file mode 100644
index 00000000..a81e144a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_ethernet_network.py
@@ -0,0 +1,247 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_ethernet_network
+short_description: Manage OneView Ethernet Network resources
+description:
+ - Provides an interface to manage Ethernet Network resources. Can create, update, or delete.
+requirements:
+ - hpOneView >= 3.1.0
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ state:
+ description:
+ - Indicates the desired state for the Ethernet Network resource.
+ - C(present) will ensure data properties are compliant with OneView.
+ - C(absent) will remove the resource from OneView, if it exists.
+ - C(default_bandwidth_reset) will reset the network connection template to the default.
+ default: present
+ choices: [present, absent, default_bandwidth_reset]
+ data:
+ description:
+ - List with Ethernet Network properties.
+ required: true
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.validateetag
+
+'''
+
+EXAMPLES = '''
+- name: Ensure that the Ethernet Network is present using the default configuration
+ community.general.oneview_ethernet_network:
+ config: '/etc/oneview/oneview_config.json'
+ state: present
+ data:
+ name: 'Test Ethernet Network'
+ vlanId: '201'
+ delegate_to: localhost
+
+- name: Update the Ethernet Network changing bandwidth and purpose
+ community.general.oneview_ethernet_network:
+ config: '/etc/oneview/oneview_config.json'
+ state: present
+ data:
+ name: 'Test Ethernet Network'
+ purpose: Management
+ bandwidth:
+ maximumBandwidth: 3000
+ typicalBandwidth: 2000
+ delegate_to: localhost
+
+- name: Ensure that the Ethernet Network is present with name 'Renamed Ethernet Network'
+ community.general.oneview_ethernet_network:
+ config: '/etc/oneview/oneview_config.json'
+ state: present
+ data:
+ name: 'Test Ethernet Network'
+ newName: 'Renamed Ethernet Network'
+ delegate_to: localhost
+
+- name: Ensure that the Ethernet Network is absent
+ community.general.oneview_ethernet_network:
+ config: '/etc/oneview/oneview_config.json'
+ state: absent
+ data:
+ name: 'New Ethernet Network'
+ delegate_to: localhost
+
+- name: Create Ethernet networks in bulk
+ community.general.oneview_ethernet_network:
+ config: '/etc/oneview/oneview_config.json'
+ state: present
+ data:
+ vlanIdRange: '1-10,15,17'
+ purpose: General
+ namePrefix: TestNetwork
+ smartLink: false
+ privateNetwork: false
+ bandwidth:
+ maximumBandwidth: 10000
+ typicalBandwidth: 2000
+ delegate_to: localhost
+
+- name: Reset to the default network connection template
+ community.general.oneview_ethernet_network:
+ config: '/etc/oneview/oneview_config.json'
+ state: default_bandwidth_reset
+ data:
+ name: 'Test Ethernet Network'
+ delegate_to: localhost
+'''
+
+RETURN = '''
+ethernet_network:
+ description: Has the facts about the Ethernet Networks.
+ returned: On state 'present'. Can be null.
+ type: dict
+
+ethernet_network_bulk:
+ description: Has the facts about the Ethernet Networks affected by the bulk insert.
+ returned: When 'vlanIdRange' attribute is in data argument. Can be null.
+ type: dict
+
+ethernet_network_connection_template:
+ description: Has the facts about the Ethernet Network Connection Template.
+ returned: On state 'default_bandwidth_reset'. Can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleResourceNotFound
+
+
+class EthernetNetworkModule(OneViewModuleBase):
+ MSG_CREATED = 'Ethernet Network created successfully.'
+ MSG_UPDATED = 'Ethernet Network updated successfully.'
+ MSG_DELETED = 'Ethernet Network deleted successfully.'
+ MSG_ALREADY_PRESENT = 'Ethernet Network is already present.'
+ MSG_ALREADY_ABSENT = 'Ethernet Network is already absent.'
+
+ MSG_BULK_CREATED = 'Ethernet Networks created successfully.'
+ MSG_MISSING_BULK_CREATED = 'Some missing Ethernet Networks were created successfully.'
+ MSG_BULK_ALREADY_EXIST = 'The specified Ethernet Networks already exist.'
+ MSG_CONNECTION_TEMPLATE_RESET = 'Ethernet Network connection template was reset to the default.'
+ MSG_ETHERNET_NETWORK_NOT_FOUND = 'Ethernet Network was not found.'
+
+ RESOURCE_FACT_NAME = 'ethernet_network'
+
+ def __init__(self):
+
+ argument_spec = dict(
+ state=dict(type='str', default='present', choices=['absent', 'default_bandwidth_reset', 'present']),
+ data=dict(type='dict', required=True),
+ )
+
+ super(EthernetNetworkModule, self).__init__(additional_arg_spec=argument_spec, validate_etag_support=True)
+
+ self.resource_client = self.oneview_client.ethernet_networks
+
+ def execute_module(self):
+
+ changed, msg, ansible_facts, resource = False, '', {}, None
+
+ if self.data.get('name'):
+ resource = self.get_by_name(self.data['name'])
+
+ if self.state == 'present':
+ if self.data.get('vlanIdRange'):
+ return self._bulk_present()
+ else:
+ return self._present(resource)
+ elif self.state == 'absent':
+ return self.resource_absent(resource)
+ elif self.state == 'default_bandwidth_reset':
+ changed, msg, ansible_facts = self._default_bandwidth_reset(resource)
+ return dict(changed=changed, msg=msg, ansible_facts=ansible_facts)
+
+ def _present(self, resource):
+
+ bandwidth = self.data.pop('bandwidth', None)
+ scope_uris = self.data.pop('scopeUris', None)
+ result = self.resource_present(resource, self.RESOURCE_FACT_NAME)
+
+ if bandwidth:
+ if self._update_connection_template(result['ansible_facts']['ethernet_network'], bandwidth)[0]:
+ result['changed'] = True
+ result['msg'] = self.MSG_UPDATED
+
+ if scope_uris is not None:
+ result = self.resource_scopes_set(result, 'ethernet_network', scope_uris)
+
+ return result
+
+ def _bulk_present(self):
+ vlan_id_range = self.data['vlanIdRange']
+ result = dict(ansible_facts={})
+ ethernet_networks = self.resource_client.get_range(self.data['namePrefix'], vlan_id_range)
+
+ if not ethernet_networks:
+ self.resource_client.create_bulk(self.data)
+ result['changed'] = True
+ result['msg'] = self.MSG_BULK_CREATED
+
+ else:
+ vlan_ids = self.resource_client.dissociate_values_or_ranges(vlan_id_range)
+ for net in ethernet_networks[:]:
+ vlan_ids.remove(net['vlanId'])
+
+ if len(vlan_ids) == 0:
+ result['msg'] = self.MSG_BULK_ALREADY_EXIST
+ result['changed'] = False
+ else:
+ if len(vlan_ids) == 1:
+ self.data['vlanIdRange'] = '{0}-{1}'.format(vlan_ids[0], vlan_ids[0])
+ else:
+ self.data['vlanIdRange'] = ','.join(map(str, vlan_ids))
+
+ self.resource_client.create_bulk(self.data)
+ result['changed'] = True
+ result['msg'] = self.MSG_MISSING_BULK_CREATED
+ result['ansible_facts']['ethernet_network_bulk'] = self.resource_client.get_range(self.data['namePrefix'], vlan_id_range)
+
+ return result
+
+ def _update_connection_template(self, ethernet_network, bandwidth):
+
+ if 'connectionTemplateUri' not in ethernet_network:
+ return False, None
+
+ connection_template = self.oneview_client.connection_templates.get(ethernet_network['connectionTemplateUri'])
+
+ merged_data = connection_template.copy()
+ merged_data.update({'bandwidth': bandwidth})
+
+ if not self.compare(connection_template, merged_data):
+ connection_template = self.oneview_client.connection_templates.update(merged_data)
+ return True, connection_template
+ else:
+ return False, None
+
+ def _default_bandwidth_reset(self, resource):
+
+ if not resource:
+ raise OneViewModuleResourceNotFound(self.MSG_ETHERNET_NETWORK_NOT_FOUND)
+
+ default_connection_template = self.oneview_client.connection_templates.get_default()
+
+ changed, connection_template = self._update_connection_template(resource, default_connection_template['bandwidth'])
+
+ return changed, self.MSG_CONNECTION_TEMPLATE_RESET, dict(
+ ethernet_network_connection_template=connection_template)
+
+
+def main():
+ EthernetNetworkModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_ethernet_network_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_ethernet_network_facts.py
new file mode 100644
index 00000000..b1790932
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_ethernet_network_facts.py
@@ -0,0 +1,165 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_ethernet_network_info
+short_description: Retrieve the information about one or more of the OneView Ethernet Networks
+description:
+ - Retrieve the information about one or more of the Ethernet Networks from OneView.
+ - This module was called C(oneview_ethernet_network_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_ethernet_network_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - Ethernet Network name.
+ options:
+ description:
+ - "List with options to gather additional information about an Ethernet Network and related resources.
+ Options allowed: C(associatedProfiles) and C(associatedUplinkGroups)."
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Ethernet Networks
+ community.general.oneview_ethernet_network_info:
+ config: /etc/oneview/oneview_config.json
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.ethernet_networks }}"
+
+- name: Gather paginated and filtered information about Ethernet Networks
+ community.general.oneview_ethernet_network_info:
+ config: /etc/oneview/oneview_config.json
+ params:
+ start: 1
+ count: 3
+ sort: 'name:descending'
+ filter: 'purpose=General'
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.ethernet_networks }}"
+
+- name: Gather information about an Ethernet Network by name
+ community.general.oneview_ethernet_network_info:
+ config: /etc/oneview/oneview_config.json
+ name: Ethernet network name
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.ethernet_networks }}"
+
+- name: Gather information about an Ethernet Network by name with options
+ community.general.oneview_ethernet_network_info:
+ config: /etc/oneview/oneview_config.json
+ name: eth1
+ options:
+ - associatedProfiles
+ - associatedUplinkGroups
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.enet_associated_profiles }}"
+- ansible.builtin.debug:
+ msg: "{{ result.enet_associated_uplink_groups }}"
+'''
+
+RETURN = '''
+ethernet_networks:
+ description: Has all the OneView information about the Ethernet Networks.
+ returned: Always, but can be null.
+ type: dict
+
+enet_associated_profiles:
+ description: Has all the OneView information about the profiles which are using the Ethernet network.
+ returned: When requested, but can be null.
+ type: dict
+
+enet_associated_uplink_groups:
+ description: Has all the OneView information about the uplink sets which are using the Ethernet network.
+ returned: When requested, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class EthernetNetworkInfoModule(OneViewModuleBase):
+ argument_spec = dict(
+ name=dict(type='str'),
+ options=dict(type='list'),
+ params=dict(type='dict')
+ )
+
+ def __init__(self):
+ super(EthernetNetworkInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_ethernet_network_facts', 'community.general.oneview_ethernet_network_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_ethernet_network_facts' module has been renamed to 'oneview_ethernet_network_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ self.resource_client = self.oneview_client.ethernet_networks
+
+ def execute_module(self):
+ info = {}
+ if self.module.params['name']:
+ ethernet_networks = self.resource_client.get_by('name', self.module.params['name'])
+
+ if self.module.params.get('options') and ethernet_networks:
+ info = self.__gather_optional_info(ethernet_networks[0])
+ else:
+ ethernet_networks = self.resource_client.get_all(**self.facts_params)
+
+ info['ethernet_networks'] = ethernet_networks
+
+ if self.is_old_facts:
+ return dict(changed=False, ansible_facts=info)
+ else:
+ return dict(changed=False, **info)
+
+ def __gather_optional_info(self, ethernet_network):
+
+ info = {}
+
+ if self.options.get('associatedProfiles'):
+ info['enet_associated_profiles'] = self.__get_associated_profiles(ethernet_network)
+ if self.options.get('associatedUplinkGroups'):
+ info['enet_associated_uplink_groups'] = self.__get_associated_uplink_groups(ethernet_network)
+
+ return info
+
+ def __get_associated_profiles(self, ethernet_network):
+ associated_profiles = self.resource_client.get_associated_profiles(ethernet_network['uri'])
+ return [self.oneview_client.server_profiles.get(x) for x in associated_profiles]
+
+ def __get_associated_uplink_groups(self, ethernet_network):
+ uplink_groups = self.resource_client.get_associated_uplink_groups(ethernet_network['uri'])
+ return [self.oneview_client.uplink_sets.get(x) for x in uplink_groups]
+
+
+def main():
+ EthernetNetworkInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_ethernet_network_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_ethernet_network_info.py
new file mode 100644
index 00000000..b1790932
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_ethernet_network_info.py
@@ -0,0 +1,165 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_ethernet_network_info
+short_description: Retrieve the information about one or more of the OneView Ethernet Networks
+description:
+ - Retrieve the information about one or more of the Ethernet Networks from OneView.
+ - This module was called C(oneview_ethernet_network_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_ethernet_network_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - Ethernet Network name.
+ options:
+ description:
+ - "List with options to gather additional information about an Ethernet Network and related resources.
+ Options allowed: C(associatedProfiles) and C(associatedUplinkGroups)."
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Ethernet Networks
+ community.general.oneview_ethernet_network_info:
+ config: /etc/oneview/oneview_config.json
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.ethernet_networks }}"
+
+- name: Gather paginated and filtered information about Ethernet Networks
+ community.general.oneview_ethernet_network_info:
+ config: /etc/oneview/oneview_config.json
+ params:
+ start: 1
+ count: 3
+ sort: 'name:descending'
+ filter: 'purpose=General'
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.ethernet_networks }}"
+
+- name: Gather information about an Ethernet Network by name
+ community.general.oneview_ethernet_network_info:
+ config: /etc/oneview/oneview_config.json
+ name: Ethernet network name
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.ethernet_networks }}"
+
+- name: Gather information about an Ethernet Network by name with options
+ community.general.oneview_ethernet_network_info:
+ config: /etc/oneview/oneview_config.json
+ name: eth1
+ options:
+ - associatedProfiles
+ - associatedUplinkGroups
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.enet_associated_profiles }}"
+- ansible.builtin.debug:
+ msg: "{{ result.enet_associated_uplink_groups }}"
+'''
+
+RETURN = '''
+ethernet_networks:
+ description: Has all the OneView information about the Ethernet Networks.
+ returned: Always, but can be null.
+ type: dict
+
+enet_associated_profiles:
+ description: Has all the OneView information about the profiles which are using the Ethernet network.
+ returned: When requested, but can be null.
+ type: dict
+
+enet_associated_uplink_groups:
+ description: Has all the OneView information about the uplink sets which are using the Ethernet network.
+ returned: When requested, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class EthernetNetworkInfoModule(OneViewModuleBase):
+ argument_spec = dict(
+ name=dict(type='str'),
+ options=dict(type='list'),
+ params=dict(type='dict')
+ )
+
+ def __init__(self):
+ super(EthernetNetworkInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_ethernet_network_facts', 'community.general.oneview_ethernet_network_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_ethernet_network_facts' module has been renamed to 'oneview_ethernet_network_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ self.resource_client = self.oneview_client.ethernet_networks
+
+ def execute_module(self):
+ info = {}
+ if self.module.params['name']:
+ ethernet_networks = self.resource_client.get_by('name', self.module.params['name'])
+
+ if self.module.params.get('options') and ethernet_networks:
+ info = self.__gather_optional_info(ethernet_networks[0])
+ else:
+ ethernet_networks = self.resource_client.get_all(**self.facts_params)
+
+ info['ethernet_networks'] = ethernet_networks
+
+ if self.is_old_facts:
+ return dict(changed=False, ansible_facts=info)
+ else:
+ return dict(changed=False, **info)
+
+ def __gather_optional_info(self, ethernet_network):
+
+ info = {}
+
+ if self.options.get('associatedProfiles'):
+ info['enet_associated_profiles'] = self.__get_associated_profiles(ethernet_network)
+ if self.options.get('associatedUplinkGroups'):
+ info['enet_associated_uplink_groups'] = self.__get_associated_uplink_groups(ethernet_network)
+
+ return info
+
+ def __get_associated_profiles(self, ethernet_network):
+ associated_profiles = self.resource_client.get_associated_profiles(ethernet_network['uri'])
+ return [self.oneview_client.server_profiles.get(x) for x in associated_profiles]
+
+ def __get_associated_uplink_groups(self, ethernet_network):
+ uplink_groups = self.resource_client.get_associated_uplink_groups(ethernet_network['uri'])
+ return [self.oneview_client.uplink_sets.get(x) for x in uplink_groups]
+
+
+def main():
+ EthernetNetworkInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fc_network.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fc_network.py
new file mode 100644
index 00000000..45fa035c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fc_network.py
@@ -0,0 +1,121 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_fc_network
+short_description: Manage OneView Fibre Channel Network resources.
+description:
+ - Provides an interface to manage Fibre Channel Network resources. Can create, update, and delete.
+requirements:
+ - "hpOneView >= 4.0.0"
+author: "Felipe Bulsoni (@fgbulsoni)"
+options:
+ state:
+ description:
+ - Indicates the desired state for the Fibre Channel Network resource.
+ C(present) will ensure data properties are compliant with OneView.
+ C(absent) will remove the resource from OneView, if it exists.
+ choices: ['present', 'absent']
+ required: true
+ data:
+ description:
+ - List with the Fibre Channel Network properties.
+ required: true
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.validateetag
+
+'''
+
+EXAMPLES = '''
+- name: Ensure that the Fibre Channel Network is present using the default configuration
+ community.general.oneview_fc_network:
+ config: "{{ config_file_path }}"
+ state: present
+ data:
+ name: 'New FC Network'
+
+- name: Ensure that the Fibre Channel Network is present with fabricType 'DirectAttach'
+ community.general.oneview_fc_network:
+ config: "{{ config_file_path }}"
+ state: present
+ data:
+ name: 'New FC Network'
+ fabricType: 'DirectAttach'
+
+- name: Ensure that the Fibre Channel Network is present and is inserted in the desired scopes
+ community.general.oneview_fc_network:
+ config: "{{ config_file_path }}"
+ state: present
+ data:
+ name: 'New FC Network'
+ scopeUris:
+ - '/rest/scopes/00SC123456'
+ - '/rest/scopes/01SC123456'
+
+- name: Ensure that the Fibre Channel Network is absent
+ community.general.oneview_fc_network:
+ config: "{{ config_file_path }}"
+ state: absent
+ data:
+ name: 'New FC Network'
+'''
+
+RETURN = '''
+fc_network:
+ description: Has the facts about the managed OneView FC Network.
+ returned: On state 'present'. Can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class FcNetworkModule(OneViewModuleBase):
+ MSG_CREATED = 'FC Network created successfully.'
+ MSG_UPDATED = 'FC Network updated successfully.'
+ MSG_DELETED = 'FC Network deleted successfully.'
+ MSG_ALREADY_PRESENT = 'FC Network is already present.'
+ MSG_ALREADY_ABSENT = 'FC Network is already absent.'
+ RESOURCE_FACT_NAME = 'fc_network'
+
+ def __init__(self):
+
+ additional_arg_spec = dict(data=dict(required=True, type='dict'),
+ state=dict(
+ required=True,
+ choices=['present', 'absent']))
+
+ super(FcNetworkModule, self).__init__(additional_arg_spec=additional_arg_spec,
+ validate_etag_support=True)
+
+ self.resource_client = self.oneview_client.fc_networks
+
+ def execute_module(self):
+ resource = self.get_by_name(self.data['name'])
+
+ if self.state == 'present':
+ return self._present(resource)
+ else:
+ return self.resource_absent(resource)
+
+ def _present(self, resource):
+ scope_uris = self.data.pop('scopeUris', None)
+ result = self.resource_present(resource, self.RESOURCE_FACT_NAME)
+ if scope_uris is not None:
+ result = self.resource_scopes_set(result, 'fc_network', scope_uris)
+ return result
+
+
+def main():
+ FcNetworkModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fc_network_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fc_network_facts.py
new file mode 100644
index 00000000..2fad241a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fc_network_facts.py
@@ -0,0 +1,110 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_fc_network_info
+short_description: Retrieve the information about one or more of the OneView Fibre Channel Networks
+description:
+ - Retrieve the information about one or more of the Fibre Channel Networks from OneView.
+ - This module was called C(oneview_fc_network_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_fc_network_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - Fibre Channel Network name.
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Fibre Channel Networks
+ community.general.oneview_fc_network_info:
+ config: /etc/oneview/oneview_config.json
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.fc_networks }}"
+
+- name: Gather paginated, filtered and sorted information about Fibre Channel Networks
+ community.general.oneview_fc_network_info:
+ config: /etc/oneview/oneview_config.json
+ params:
+ start: 1
+ count: 3
+ sort: 'name:descending'
+ filter: 'fabricType=FabricAttach'
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.fc_networks }}"
+
+- name: Gather information about a Fibre Channel Network by name
+ community.general.oneview_fc_network_info:
+ config: /etc/oneview/oneview_config.json
+ name: network name
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.fc_networks }}"
+'''
+
+RETURN = '''
+fc_networks:
+ description: Has all the OneView information about the Fibre Channel Networks.
+ returned: Always, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class FcNetworkInfoModule(OneViewModuleBase):
+ def __init__(self):
+
+ argument_spec = dict(
+ name=dict(required=False, type='str'),
+ params=dict(required=False, type='dict')
+ )
+
+ super(FcNetworkInfoModule, self).__init__(additional_arg_spec=argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_fc_network_facts', 'community.general.oneview_fc_network_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_fc_network_facts' module has been renamed to 'oneview_fc_network_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+
+ if self.module.params['name']:
+ fc_networks = self.oneview_client.fc_networks.get_by('name', self.module.params['name'])
+ else:
+ fc_networks = self.oneview_client.fc_networks.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False, ansible_facts=dict(fc_networks=fc_networks))
+ else:
+ return dict(changed=False, fc_networks=fc_networks)
+
+
+def main():
+ FcNetworkInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fc_network_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fc_network_info.py
new file mode 100644
index 00000000..2fad241a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fc_network_info.py
@@ -0,0 +1,110 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_fc_network_info
+short_description: Retrieve the information about one or more of the OneView Fibre Channel Networks
+description:
+ - Retrieve the information about one or more of the Fibre Channel Networks from OneView.
+ - This module was called C(oneview_fc_network_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_fc_network_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - Fibre Channel Network name.
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Fibre Channel Networks
+ community.general.oneview_fc_network_info:
+ config: /etc/oneview/oneview_config.json
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.fc_networks }}"
+
+- name: Gather paginated, filtered and sorted information about Fibre Channel Networks
+ community.general.oneview_fc_network_info:
+ config: /etc/oneview/oneview_config.json
+ params:
+ start: 1
+ count: 3
+ sort: 'name:descending'
+ filter: 'fabricType=FabricAttach'
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.fc_networks }}"
+
+- name: Gather information about a Fibre Channel Network by name
+ community.general.oneview_fc_network_info:
+ config: /etc/oneview/oneview_config.json
+ name: network name
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.fc_networks }}"
+'''
+
+RETURN = '''
+fc_networks:
+ description: Has all the OneView information about the Fibre Channel Networks.
+ returned: Always, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class FcNetworkInfoModule(OneViewModuleBase):
+ def __init__(self):
+
+ argument_spec = dict(
+ name=dict(required=False, type='str'),
+ params=dict(required=False, type='dict')
+ )
+
+ super(FcNetworkInfoModule, self).__init__(additional_arg_spec=argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_fc_network_facts', 'community.general.oneview_fc_network_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_fc_network_facts' module has been renamed to 'oneview_fc_network_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+
+ if self.module.params['name']:
+ fc_networks = self.oneview_client.fc_networks.get_by('name', self.module.params['name'])
+ else:
+ fc_networks = self.oneview_client.fc_networks.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False, ansible_facts=dict(fc_networks=fc_networks))
+ else:
+ return dict(changed=False, fc_networks=fc_networks)
+
+
+def main():
+ FcNetworkInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fcoe_network.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fcoe_network.py
new file mode 100644
index 00000000..79d8ae21
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fcoe_network.py
@@ -0,0 +1,117 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_fcoe_network
+short_description: Manage OneView FCoE Network resources
+description:
+ - Provides an interface to manage FCoE Network resources. Can create, update, or delete.
+requirements:
+ - "python >= 2.7.9"
+ - "hpOneView >= 4.0.0"
+author: "Felipe Bulsoni (@fgbulsoni)"
+options:
+ state:
+ description:
+ - Indicates the desired state for the FCoE Network resource.
+ C(present) will ensure data properties are compliant with OneView.
+ C(absent) will remove the resource from OneView, if it exists.
+ default: present
+ choices: ['present', 'absent']
+ data:
+ description:
+ - List with FCoE Network properties.
+ required: true
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.validateetag
+
+'''
+
+EXAMPLES = '''
+- name: Ensure that FCoE Network is present using the default configuration
+ community.general.oneview_fcoe_network:
+ config: '/etc/oneview/oneview_config.json'
+ state: present
+ data:
+ name: Test FCoE Network
+ vlanId: 201
+ delegate_to: localhost
+
+- name: Update the FCOE network scopes
+ community.general.oneview_fcoe_network:
+ config: '/etc/oneview/oneview_config.json'
+ state: present
+ data:
+ name: New FCoE Network
+ scopeUris:
+ - '/rest/scopes/00SC123456'
+ - '/rest/scopes/01SC123456'
+ delegate_to: localhost
+
+- name: Ensure that FCoE Network is absent
+ community.general.oneview_fcoe_network:
+ config: '/etc/oneview/oneview_config.json'
+ state: absent
+ data:
+ name: New FCoE Network
+ delegate_to: localhost
+'''
+
+RETURN = '''
+fcoe_network:
+ description: Has the facts about the OneView FCoE Networks.
+ returned: On state 'present'. Can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class FcoeNetworkModule(OneViewModuleBase):
+ MSG_CREATED = 'FCoE Network created successfully.'
+ MSG_UPDATED = 'FCoE Network updated successfully.'
+ MSG_DELETED = 'FCoE Network deleted successfully.'
+ MSG_ALREADY_PRESENT = 'FCoE Network is already present.'
+ MSG_ALREADY_ABSENT = 'FCoE Network is already absent.'
+ RESOURCE_FACT_NAME = 'fcoe_network'
+
+ def __init__(self):
+
+ additional_arg_spec = dict(data=dict(required=True, type='dict'),
+ state=dict(default='present',
+ choices=['present', 'absent']))
+
+ super(FcoeNetworkModule, self).__init__(additional_arg_spec=additional_arg_spec,
+ validate_etag_support=True)
+
+ self.resource_client = self.oneview_client.fcoe_networks
+
+ def execute_module(self):
+ resource = self.get_by_name(self.data.get('name'))
+
+ if self.state == 'present':
+ return self.__present(resource)
+ elif self.state == 'absent':
+ return self.resource_absent(resource)
+
+ def __present(self, resource):
+ scope_uris = self.data.pop('scopeUris', None)
+ result = self.resource_present(resource, self.RESOURCE_FACT_NAME)
+ if scope_uris is not None:
+ result = self.resource_scopes_set(result, 'fcoe_network', scope_uris)
+ return result
+
+
+def main():
+ FcoeNetworkModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fcoe_network_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fcoe_network_facts.py
new file mode 100644
index 00000000..8c1980df
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fcoe_network_facts.py
@@ -0,0 +1,110 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_fcoe_network_info
+short_description: Retrieve the information about one or more of the OneView FCoE Networks
+description:
+ - Retrieve the information about one or more of the FCoE Networks from OneView.
+ - This module was called C(oneview_fcoe_network_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_fcoe_network_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - FCoE Network name.
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all FCoE Networks
+ community.general.oneview_fcoe_network_info:
+ config: /etc/oneview/oneview_config.json
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.fcoe_networks }}"
+
+- name: Gather paginated, filtered and sorted information about FCoE Networks
+ community.general.oneview_fcoe_network_info:
+ config: /etc/oneview/oneview_config.json
+ params:
+ start: 0
+ count: 3
+ sort: 'name:descending'
+ filter: 'vlanId=2'
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.fcoe_networks }}"
+
+- name: Gather information about a FCoE Network by name
+ community.general.oneview_fcoe_network_info:
+ config: /etc/oneview/oneview_config.json
+ name: Test FCoE Network Information
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.fcoe_networks }}"
+'''
+
+RETURN = '''
+fcoe_networks:
+ description: Has all the OneView information about the FCoE Networks.
+ returned: Always, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class FcoeNetworkInfoModule(OneViewModuleBase):
+ def __init__(self):
+ argument_spec = dict(
+ name=dict(type='str'),
+ params=dict(type='dict'),
+ )
+
+ super(FcoeNetworkInfoModule, self).__init__(additional_arg_spec=argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_fcoe_network_facts', 'community.general.oneview_fcoe_network_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_fcoe_network_facts' module has been renamed to 'oneview_fcoe_network_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+
+ if self.module.params['name']:
+ fcoe_networks = self.oneview_client.fcoe_networks.get_by('name', self.module.params['name'])
+ else:
+ fcoe_networks = self.oneview_client.fcoe_networks.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False,
+ ansible_facts=dict(fcoe_networks=fcoe_networks))
+ else:
+ return dict(changed=False, fcoe_networks=fcoe_networks)
+
+
+def main():
+ FcoeNetworkInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fcoe_network_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fcoe_network_info.py
new file mode 100644
index 00000000..8c1980df
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_fcoe_network_info.py
@@ -0,0 +1,110 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_fcoe_network_info
+short_description: Retrieve the information about one or more of the OneView FCoE Networks
+description:
+ - Retrieve the information about one or more of the FCoE Networks from OneView.
+ - This module was called C(oneview_fcoe_network_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_fcoe_network_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - FCoE Network name.
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all FCoE Networks
+ community.general.oneview_fcoe_network_info:
+ config: /etc/oneview/oneview_config.json
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.fcoe_networks }}"
+
+- name: Gather paginated, filtered and sorted information about FCoE Networks
+ community.general.oneview_fcoe_network_info:
+ config: /etc/oneview/oneview_config.json
+ params:
+ start: 0
+ count: 3
+ sort: 'name:descending'
+ filter: 'vlanId=2'
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.fcoe_networks }}"
+
+- name: Gather information about a FCoE Network by name
+ community.general.oneview_fcoe_network_info:
+ config: /etc/oneview/oneview_config.json
+ name: Test FCoE Network Information
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.fcoe_networks }}"
+'''
+
+RETURN = '''
+fcoe_networks:
+ description: Has all the OneView information about the FCoE Networks.
+ returned: Always, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class FcoeNetworkInfoModule(OneViewModuleBase):
+ def __init__(self):
+ argument_spec = dict(
+ name=dict(type='str'),
+ params=dict(type='dict'),
+ )
+
+ super(FcoeNetworkInfoModule, self).__init__(additional_arg_spec=argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_fcoe_network_facts', 'community.general.oneview_fcoe_network_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_fcoe_network_facts' module has been renamed to 'oneview_fcoe_network_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+
+ if self.module.params['name']:
+ fcoe_networks = self.oneview_client.fcoe_networks.get_by('name', self.module.params['name'])
+ else:
+ fcoe_networks = self.oneview_client.fcoe_networks.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False,
+ ansible_facts=dict(fcoe_networks=fcoe_networks))
+ else:
+ return dict(changed=False, fcoe_networks=fcoe_networks)
+
+
+def main():
+ FcoeNetworkInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group.py
new file mode 100644
index 00000000..8ca49e21
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group.py
@@ -0,0 +1,164 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_logical_interconnect_group
+short_description: Manage OneView Logical Interconnect Group resources
+description:
+ - Provides an interface to manage Logical Interconnect Group resources. Can create, update, or delete.
+requirements:
+ - hpOneView >= 4.0.0
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ state:
+ description:
+ - Indicates the desired state for the Logical Interconnect Group resource.
+ C(absent) will remove the resource from OneView, if it exists.
+ C(present) will ensure data properties are compliant with OneView.
+ choices: [absent, present]
+ default: present
+ data:
+ description:
+ - List with the Logical Interconnect Group properties.
+ required: true
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.validateetag
+
+'''
+
+EXAMPLES = '''
+- name: Ensure that the Logical Interconnect Group is present
+ community.general.oneview_logical_interconnect_group:
+ config: /etc/oneview/oneview_config.json
+ state: present
+ data:
+ name: Test Logical Interconnect Group
+ uplinkSets: []
+ enclosureType: C7000
+ interconnectMapTemplate:
+ interconnectMapEntryTemplates:
+ - logicalDownlinkUri: ~
+ logicalLocation:
+ locationEntries:
+ - relativeValue: 1
+ type: Bay
+ - relativeValue: 1
+ type: Enclosure
+ permittedInterconnectTypeName: HP VC Flex-10/10D Module
+ # Alternatively you can inform permittedInterconnectTypeUri
+ delegate_to: localhost
+
+- name: Ensure that the Logical Interconnect Group has the specified scopes
+ community.general.oneview_logical_interconnect_group:
+ config: /etc/oneview/oneview_config.json
+ state: present
+ data:
+ name: Test Logical Interconnect Group
+ scopeUris:
+ - /rest/scopes/00SC123456
+ - /rest/scopes/01SC123456
+ delegate_to: localhost
+
+- name: Ensure that the Logical Interconnect Group is present with name 'Test'
+ community.general.oneview_logical_interconnect_group:
+ config: /etc/oneview/oneview_config.json
+ state: present
+ data:
+ name: New Logical Interconnect Group
+ newName: Test
+ delegate_to: localhost
+
+- name: Ensure that the Logical Interconnect Group is absent
+ community.general.oneview_logical_interconnect_group:
+ config: /etc/oneview/oneview_config.json
+ state: absent
+ data:
+ name: New Logical Interconnect Group
+ delegate_to: localhost
+'''
+
+RETURN = '''
+logical_interconnect_group:
+ description: Has the facts about the OneView Logical Interconnect Group.
+ returned: On state 'present'. Can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleResourceNotFound
+
+
+class LogicalInterconnectGroupModule(OneViewModuleBase):
+ MSG_CREATED = 'Logical Interconnect Group created successfully.'
+ MSG_UPDATED = 'Logical Interconnect Group updated successfully.'
+ MSG_DELETED = 'Logical Interconnect Group deleted successfully.'
+ MSG_ALREADY_PRESENT = 'Logical Interconnect Group is already present.'
+ MSG_ALREADY_ABSENT = 'Logical Interconnect Group is already absent.'
+ MSG_INTERCONNECT_TYPE_NOT_FOUND = 'Interconnect Type was not found.'
+
+ RESOURCE_FACT_NAME = 'logical_interconnect_group'
+
+ def __init__(self):
+ argument_spec = dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ data=dict(required=True, type='dict')
+ )
+
+ super(LogicalInterconnectGroupModule, self).__init__(additional_arg_spec=argument_spec,
+ validate_etag_support=True)
+ self.resource_client = self.oneview_client.logical_interconnect_groups
+
+ def execute_module(self):
+ resource = self.get_by_name(self.data['name'])
+
+ if self.state == 'present':
+ return self.__present(resource)
+ elif self.state == 'absent':
+ return self.resource_absent(resource)
+
+ def __present(self, resource):
+ scope_uris = self.data.pop('scopeUris', None)
+
+ self.__replace_name_by_uris(self.data)
+ result = self.resource_present(resource, self.RESOURCE_FACT_NAME)
+
+ if scope_uris is not None:
+ result = self.resource_scopes_set(result, 'logical_interconnect_group', scope_uris)
+
+ return result
+
+ def __replace_name_by_uris(self, data):
+ map_template = data.get('interconnectMapTemplate')
+
+ if map_template:
+ map_entry_templates = map_template.get('interconnectMapEntryTemplates')
+ if map_entry_templates:
+ for value in map_entry_templates:
+ permitted_interconnect_type_name = value.pop('permittedInterconnectTypeName', None)
+ if permitted_interconnect_type_name:
+ value['permittedInterconnectTypeUri'] = self.__get_interconnect_type_by_name(
+ permitted_interconnect_type_name).get('uri')
+
+ def __get_interconnect_type_by_name(self, name):
+ i_type = self.oneview_client.interconnect_types.get_by('name', name)
+ if i_type:
+ return i_type[0]
+ else:
+ raise OneViewModuleResourceNotFound(self.MSG_INTERCONNECT_TYPE_NOT_FOUND)
+
+
+def main():
+ LogicalInterconnectGroupModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group_facts.py
new file mode 100644
index 00000000..16a78309
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group_facts.py
@@ -0,0 +1,122 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_logical_interconnect_group_info
+short_description: Retrieve information about one or more of the OneView Logical Interconnect Groups
+description:
+ - Retrieve information about one or more of the Logical Interconnect Groups from OneView
+ - This module was called C(oneview_logical_interconnect_group_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_logical_interconnect_group_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - Logical Interconnect Group name.
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Logical Interconnect Groups
+ community.general.oneview_logical_interconnect_group_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.logical_interconnect_groups }}"
+
+- name: Gather paginated, filtered and sorted information about Logical Interconnect Groups
+ community.general.oneview_logical_interconnect_group_info:
+ params:
+ start: 0
+ count: 3
+ sort: name:descending
+ filter: name=LIGName
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.logical_interconnect_groups }}"
+
+- name: Gather information about a Logical Interconnect Group by name
+ community.general.oneview_logical_interconnect_group_info:
+ name: logical interconnect group name
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.logical_interconnect_groups }}"
+'''
+
+RETURN = '''
+logical_interconnect_groups:
+ description: Has all the OneView information about the Logical Interconnect Groups.
+ returned: Always, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class LogicalInterconnectGroupInfoModule(OneViewModuleBase):
+ def __init__(self):
+
+ argument_spec = dict(
+ name=dict(type='str'),
+ params=dict(type='dict'),
+ )
+
+ super(LogicalInterconnectGroupInfoModule, self).__init__(additional_arg_spec=argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_logical_interconnect_group_facts', 'community.general.oneview_logical_interconnect_group_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_logical_interconnect_group_facts' module has been renamed to 'oneview_logical_interconnect_group_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+ if self.module.params.get('name'):
+ ligs = self.oneview_client.logical_interconnect_groups.get_by('name', self.module.params['name'])
+ else:
+ ligs = self.oneview_client.logical_interconnect_groups.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False, ansible_facts=dict(logical_interconnect_groups=ligs))
+ else:
+ return dict(changed=False, logical_interconnect_groups=ligs)
+
+
+def main():
+ LogicalInterconnectGroupInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group_info.py
new file mode 100644
index 00000000..16a78309
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group_info.py
@@ -0,0 +1,122 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_logical_interconnect_group_info
+short_description: Retrieve information about one or more of the OneView Logical Interconnect Groups
+description:
+ - Retrieve information about one or more of the Logical Interconnect Groups from OneView
+ - This module was called C(oneview_logical_interconnect_group_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_logical_interconnect_group_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - Logical Interconnect Group name.
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Logical Interconnect Groups
+ community.general.oneview_logical_interconnect_group_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.logical_interconnect_groups }}"
+
+- name: Gather paginated, filtered and sorted information about Logical Interconnect Groups
+ community.general.oneview_logical_interconnect_group_info:
+ params:
+ start: 0
+ count: 3
+ sort: name:descending
+ filter: name=LIGName
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.logical_interconnect_groups }}"
+
+- name: Gather information about a Logical Interconnect Group by name
+ community.general.oneview_logical_interconnect_group_info:
+ name: logical interconnect group name
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.logical_interconnect_groups }}"
+'''
+
+RETURN = '''
+logical_interconnect_groups:
+ description: Has all the OneView information about the Logical Interconnect Groups.
+ returned: Always, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class LogicalInterconnectGroupInfoModule(OneViewModuleBase):
+ def __init__(self):
+
+ argument_spec = dict(
+ name=dict(type='str'),
+ params=dict(type='dict'),
+ )
+
+ super(LogicalInterconnectGroupInfoModule, self).__init__(additional_arg_spec=argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_logical_interconnect_group_facts', 'community.general.oneview_logical_interconnect_group_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_logical_interconnect_group_facts' module has been renamed to 'oneview_logical_interconnect_group_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+ if self.module.params.get('name'):
+ ligs = self.oneview_client.logical_interconnect_groups.get_by('name', self.module.params['name'])
+ else:
+ ligs = self.oneview_client.logical_interconnect_groups.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False, ansible_facts=dict(logical_interconnect_groups=ligs))
+ else:
+ return dict(changed=False, logical_interconnect_groups=ligs)
+
+
+def main():
+ LogicalInterconnectGroupInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_network_set.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_network_set.py
new file mode 100644
index 00000000..cc70d5e5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_network_set.py
@@ -0,0 +1,150 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_network_set
+short_description: Manage HPE OneView Network Set resources
+description:
+ - Provides an interface to manage Network Set resources. Can create, update, or delete.
+requirements:
+ - hpOneView >= 4.0.0
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ state:
+ description:
+ - Indicates the desired state for the Network Set resource.
+ - C(present) will ensure data properties are compliant with OneView.
+ - C(absent) will remove the resource from OneView, if it exists.
+ default: present
+ choices: ['present', 'absent']
+ data:
+ description:
+ - List with the Network Set properties.
+ required: true
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.validateetag
+
+'''
+
+EXAMPLES = '''
+- name: Create a Network Set
+ community.general.oneview_network_set:
+ config: /etc/oneview/oneview_config.json
+ state: present
+ data:
+ name: OneViewSDK Test Network Set
+ networkUris:
+ - Test Ethernet Network_1 # can be a name
+ - /rest/ethernet-networks/e4360c9d-051d-4931-b2aa-7de846450dd8 # or a URI
+ delegate_to: localhost
+
+- name: Update the Network Set name to 'OneViewSDK Test Network Set - Renamed' and change the associated networks
+ community.general.oneview_network_set:
+ config: /etc/oneview/oneview_config.json
+ state: present
+ data:
+ name: OneViewSDK Test Network Set
+ newName: OneViewSDK Test Network Set - Renamed
+ networkUris:
+ - Test Ethernet Network_1
+ delegate_to: localhost
+
+- name: Delete the Network Set
+ community.general.oneview_network_set:
+ config: /etc/oneview/oneview_config.json
+ state: absent
+ data:
+ name: OneViewSDK Test Network Set - Renamed
+ delegate_to: localhost
+
+- name: Update the Network set with two scopes
+ community.general.oneview_network_set:
+ config: /etc/oneview/oneview_config.json
+ state: present
+ data:
+ name: OneViewSDK Test Network Set
+ scopeUris:
+ - /rest/scopes/01SC123456
+ - /rest/scopes/02SC123456
+ delegate_to: localhost
+'''
+
+RETURN = '''
+network_set:
+ description: Has the facts about the Network Set.
+ returned: On state 'present', but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleResourceNotFound
+
+
+class NetworkSetModule(OneViewModuleBase):
+ MSG_CREATED = 'Network Set created successfully.'
+ MSG_UPDATED = 'Network Set updated successfully.'
+ MSG_DELETED = 'Network Set deleted successfully.'
+ MSG_ALREADY_PRESENT = 'Network Set is already present.'
+ MSG_ALREADY_ABSENT = 'Network Set is already absent.'
+ MSG_ETHERNET_NETWORK_NOT_FOUND = 'Ethernet Network not found: '
+ RESOURCE_FACT_NAME = 'network_set'
+
+ argument_spec = dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ data=dict(required=True, type='dict'))
+
+ def __init__(self):
+ super(NetworkSetModule, self).__init__(additional_arg_spec=self.argument_spec,
+ validate_etag_support=True)
+ self.resource_client = self.oneview_client.network_sets
+
+ def execute_module(self):
+ resource = self.get_by_name(self.data.get('name'))
+
+ if self.state == 'present':
+ return self._present(resource)
+ elif self.state == 'absent':
+ return self.resource_absent(resource)
+
+ def _present(self, resource):
+ scope_uris = self.data.pop('scopeUris', None)
+ self._replace_network_name_by_uri(self.data)
+ result = self.resource_present(resource, self.RESOURCE_FACT_NAME)
+ if scope_uris is not None:
+ result = self.resource_scopes_set(result, self.RESOURCE_FACT_NAME, scope_uris)
+ return result
+
+ def _get_ethernet_network_by_name(self, name):
+ result = self.oneview_client.ethernet_networks.get_by('name', name)
+ return result[0] if result else None
+
+ def _get_network_uri(self, network_name_or_uri):
+ if network_name_or_uri.startswith('/rest/ethernet-networks'):
+ return network_name_or_uri
+ else:
+ enet_network = self._get_ethernet_network_by_name(network_name_or_uri)
+ if enet_network:
+ return enet_network['uri']
+ else:
+ raise OneViewModuleResourceNotFound(self.MSG_ETHERNET_NETWORK_NOT_FOUND + network_name_or_uri)
+
+ def _replace_network_name_by_uri(self, data):
+ if 'networkUris' in data:
+ data['networkUris'] = [self._get_network_uri(x) for x in data['networkUris']]
+
+
+def main():
+ NetworkSetModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_network_set_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_network_set_facts.py
new file mode 100644
index 00000000..68c18db9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_network_set_facts.py
@@ -0,0 +1,166 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_network_set_info
+short_description: Retrieve information about the OneView Network Sets
+description:
+ - Retrieve information about the Network Sets from OneView.
+ - This module was called C(oneview_network_set_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_network_set_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - Network Set name.
+
+ options:
+ description:
+ - "List with options to gather information about Network Set.
+ Option allowed: C(withoutEthernet).
+ The option C(withoutEthernet) retrieves the list of network_sets excluding Ethernet networks."
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Network Sets
+ community.general.oneview_network_set_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.network_sets }}"
+
+- name: Gather paginated, filtered, and sorted information about Network Sets
+ community.general.oneview_network_set_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ params:
+ start: 0
+ count: 3
+ sort: 'name:descending'
+ filter: name='netset001'
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.network_sets }}"
+
+- name: Gather information about all Network Sets, excluding Ethernet networks
+ community.general.oneview_network_set_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ options:
+ - withoutEthernet
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.network_sets }}"
+
+- name: Gather information about a Network Set by name
+ community.general.oneview_network_set_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ name: Name of the Network Set
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.network_sets }}"
+
+- name: Gather information about a Network Set by name, excluding Ethernet networks
+ community.general.oneview_network_set_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ name: Name of the Network Set
+ options:
+ - withoutEthernet
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.network_sets }}"
+'''
+
+RETURN = '''
+network_sets:
+ description: Has all the OneView information about the Network Sets.
+ returned: Always, but can be empty.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class NetworkSetInfoModule(OneViewModuleBase):
+ argument_spec = dict(
+ name=dict(type='str'),
+ options=dict(type='list'),
+ params=dict(type='dict'),
+ )
+
+ def __init__(self):
+ super(NetworkSetInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_network_set_facts', 'community.general.oneview_network_set_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_network_set_facts' module has been renamed to 'oneview_network_set_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+
+ name = self.module.params.get('name')
+
+ if 'withoutEthernet' in self.options:
+ filter_by_name = ("\"'name'='%s'\"" % name) if name else ''
+ network_sets = self.oneview_client.network_sets.get_all_without_ethernet(filter=filter_by_name)
+ elif name:
+ network_sets = self.oneview_client.network_sets.get_by('name', name)
+ else:
+ network_sets = self.oneview_client.network_sets.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False,
+ ansible_facts=dict(network_sets=network_sets))
+ else:
+ return dict(changed=False, network_sets=network_sets)
+
+
+def main():
+ NetworkSetInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_network_set_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_network_set_info.py
new file mode 100644
index 00000000..68c18db9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_network_set_info.py
@@ -0,0 +1,166 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_network_set_info
+short_description: Retrieve information about the OneView Network Sets
+description:
+ - Retrieve information about the Network Sets from OneView.
+ - This module was called C(oneview_network_set_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_network_set_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - Network Set name.
+
+ options:
+ description:
+ - "List with options to gather information about Network Set.
+ Option allowed: C(withoutEthernet).
+ The option C(withoutEthernet) retrieves the list of network_sets excluding Ethernet networks."
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Network Sets
+ community.general.oneview_network_set_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.network_sets }}"
+
+- name: Gather paginated, filtered, and sorted information about Network Sets
+ community.general.oneview_network_set_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ params:
+ start: 0
+ count: 3
+ sort: 'name:descending'
+ filter: name='netset001'
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.network_sets }}"
+
+- name: Gather information about all Network Sets, excluding Ethernet networks
+ community.general.oneview_network_set_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ options:
+ - withoutEthernet
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.network_sets }}"
+
+- name: Gather information about a Network Set by name
+ community.general.oneview_network_set_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ name: Name of the Network Set
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.network_sets }}"
+
+- name: Gather information about a Network Set by name, excluding Ethernet networks
+ community.general.oneview_network_set_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ name: Name of the Network Set
+ options:
+ - withoutEthernet
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.network_sets }}"
+'''
+
+RETURN = '''
+network_sets:
+ description: Has all the OneView information about the Network Sets.
+ returned: Always, but can be empty.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class NetworkSetInfoModule(OneViewModuleBase):
+ argument_spec = dict(
+ name=dict(type='str'),
+ options=dict(type='list'),
+ params=dict(type='dict'),
+ )
+
+ def __init__(self):
+ super(NetworkSetInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_network_set_facts', 'community.general.oneview_network_set_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_network_set_facts' module has been renamed to 'oneview_network_set_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+
+ name = self.module.params.get('name')
+
+ if 'withoutEthernet' in self.options:
+ filter_by_name = ("\"'name'='%s'\"" % name) if name else ''
+ network_sets = self.oneview_client.network_sets.get_all_without_ethernet(filter=filter_by_name)
+ elif name:
+ network_sets = self.oneview_client.network_sets.get_by('name', name)
+ else:
+ network_sets = self.oneview_client.network_sets.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False,
+ ansible_facts=dict(network_sets=network_sets))
+ else:
+ return dict(changed=False, network_sets=network_sets)
+
+
+def main():
+ NetworkSetInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_san_manager.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_san_manager.py
new file mode 100644
index 00000000..57e93475
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_san_manager.py
@@ -0,0 +1,215 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_san_manager
+short_description: Manage OneView SAN Manager resources
+description:
+ - Provides an interface to manage SAN Manager resources. Can create, update, or delete.
+requirements:
+ - hpOneView >= 3.1.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ state:
+ description:
+ - Indicates the desired state for the Uplink Set resource.
+ - C(present) ensures data properties are compliant with OneView.
+ - C(absent) removes the resource from OneView, if it exists.
+ - C(connection_information_set) updates the connection information for the SAN Manager. This operation is non-idempotent.
+ default: present
+ choices: [present, absent, connection_information_set]
+ data:
+ description:
+ - List with SAN Manager properties.
+ required: true
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.validateetag
+
+'''
+
+EXAMPLES = '''
+- name: Creates a Device Manager for the Brocade SAN provider with the given hostname and credentials
+ community.general.oneview_san_manager:
+ config: /etc/oneview/oneview_config.json
+ state: present
+ data:
+ providerDisplayName: Brocade Network Advisor
+ connectionInfo:
+ - name: Host
+ value: 172.18.15.1
+ - name: Port
+ value: 5989
+ - name: Username
+ value: username
+ - name: Password
+ value: password
+ - name: UseSsl
+ value: true
+ delegate_to: localhost
+
+- name: Ensure a Device Manager for the Cisco SAN Provider is present
+ community.general.oneview_san_manager:
+ config: /etc/oneview/oneview_config.json
+ state: present
+ data:
+ name: 172.18.20.1
+ providerDisplayName: Cisco
+ connectionInfo:
+ - name: Host
+ value: 172.18.20.1
+ - name: SnmpPort
+ value: 161
+ - name: SnmpUserName
+ value: admin
+ - name: SnmpAuthLevel
+ value: authnopriv
+ - name: SnmpAuthProtocol
+ value: sha
+ - name: SnmpAuthString
+ value: password
+ delegate_to: localhost
+
+- name: Sets the SAN Manager connection information
+ community.general.oneview_san_manager:
+ config: /etc/oneview/oneview_config.json
+ state: connection_information_set
+ data:
+ connectionInfo:
+ - name: Host
+ value: '172.18.15.1'
+ - name: Port
+ value: '5989'
+ - name: Username
+ value: 'username'
+ - name: Password
+ value: 'password'
+ - name: UseSsl
+ value: true
+ delegate_to: localhost
+
+- name: Refreshes the SAN Manager
+ community.general.oneview_san_manager:
+ config: /etc/oneview/oneview_config.json
+ state: present
+ data:
+ name: 172.18.15.1
+ refreshState: RefreshPending
+ delegate_to: localhost
+
+- name: Delete the SAN Manager recently created
+ community.general.oneview_san_manager:
+ config: /etc/oneview/oneview_config.json
+ state: absent
+ data:
+ name: '172.18.15.1'
+ delegate_to: localhost
+'''
+
+RETURN = '''
+san_manager:
+ description: Has the OneView facts about the SAN Manager.
+ returned: On state 'present'. Can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleValueError
+
+
+class SanManagerModule(OneViewModuleBase):
+ MSG_CREATED = 'SAN Manager created successfully.'
+ MSG_UPDATED = 'SAN Manager updated successfully.'
+ MSG_DELETED = 'SAN Manager deleted successfully.'
+ MSG_ALREADY_PRESENT = 'SAN Manager is already present.'
+ MSG_ALREADY_ABSENT = 'SAN Manager is already absent.'
+ MSG_SAN_MANAGER_PROVIDER_DISPLAY_NAME_NOT_FOUND = "The provider '{0}' was not found."
+
+ argument_spec = dict(
+ state=dict(type='str', default='present', choices=['absent', 'present', 'connection_information_set']),
+ data=dict(type='dict', required=True)
+ )
+
+ def __init__(self):
+ super(SanManagerModule, self).__init__(additional_arg_spec=self.argument_spec, validate_etag_support=True)
+ self.resource_client = self.oneview_client.san_managers
+
+ def execute_module(self):
+ if self.data.get('connectionInfo'):
+ for connection_hash in self.data.get('connectionInfo'):
+ if connection_hash.get('name') == 'Host':
+ resource_name = connection_hash.get('value')
+ elif self.data.get('name'):
+ resource_name = self.data.get('name')
+ else:
+ msg = 'A "name" or "connectionInfo" must be provided inside the "data" field for this operation. '
+ msg += 'If a "connectionInfo" is provided, the "Host" name is considered as the "name" for the resource.'
+ raise OneViewModuleValueError(msg.format())
+
+ resource = self.resource_client.get_by_name(resource_name)
+
+ if self.state == 'present':
+ changed, msg, san_manager = self._present(resource)
+ return dict(changed=changed, msg=msg, ansible_facts=dict(san_manager=san_manager))
+
+ elif self.state == 'absent':
+ return self.resource_absent(resource, method='remove')
+
+ elif self.state == 'connection_information_set':
+ changed, msg, san_manager = self._connection_information_set(resource)
+ return dict(changed=changed, msg=msg, ansible_facts=dict(san_manager=san_manager))
+
+ def _present(self, resource):
+ if not resource:
+ provider_uri = self.data.get('providerUri', self._get_provider_uri_by_display_name(self.data))
+ return True, self.MSG_CREATED, self.resource_client.add(self.data, provider_uri)
+ else:
+ merged_data = resource.copy()
+ merged_data.update(self.data)
+
+ # Remove 'connectionInfo' from comparison, since it is not possible to validate it.
+ resource.pop('connectionInfo', None)
+ merged_data.pop('connectionInfo', None)
+
+ if self.compare(resource, merged_data):
+ return False, self.MSG_ALREADY_PRESENT, resource
+ else:
+ updated_san_manager = self.resource_client.update(resource=merged_data, id_or_uri=resource['uri'])
+ return True, self.MSG_UPDATED, updated_san_manager
+
+ def _connection_information_set(self, resource):
+ if not resource:
+ return self._present(resource)
+ else:
+ merged_data = resource.copy()
+ merged_data.update(self.data)
+ merged_data.pop('refreshState', None)
+ if not self.data.get('connectionInfo', None):
+ raise OneViewModuleValueError('A connectionInfo field is required for this operation.')
+ updated_san_manager = self.resource_client.update(resource=merged_data, id_or_uri=resource['uri'])
+ return True, self.MSG_UPDATED, updated_san_manager
+
+ def _get_provider_uri_by_display_name(self, data):
+ display_name = data.get('providerDisplayName')
+ provider_uri = self.resource_client.get_provider_uri(display_name)
+
+ if not provider_uri:
+ raise OneViewModuleValueError(self.MSG_SAN_MANAGER_PROVIDER_DISPLAY_NAME_NOT_FOUND.format(display_name))
+
+ return provider_uri
+
+
+def main():
+ SanManagerModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_san_manager_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_san_manager_facts.py
new file mode 100644
index 00000000..c4a6b7a8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_san_manager_facts.py
@@ -0,0 +1,121 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_san_manager_info
+short_description: Retrieve information about one or more of the OneView SAN Managers
+description:
+ - Retrieve information about one or more of the SAN Managers from OneView
+ - This module was called C(oneview_san_manager_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_san_manager_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ provider_display_name:
+ description:
+ - Provider Display Name.
+ params:
+ description:
+ - List of params to delimit, filter and sort the list of resources.
+ - "params allowed:
+ - C(start): The first item to return, using 0-based indexing.
+ - C(count): The number of resources to return.
+ - C(query): A general query string to narrow the list of resources returned.
+ - C(sort): The sort order of the returned data set."
+extends_documentation_fragment:
+- community.general.oneview
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all SAN Managers
+ community.general.oneview_san_manager_info:
+ config: /etc/oneview/oneview_config.json
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.san_managers }}"
+
+- name: Gather paginated, filtered and sorted information about SAN Managers
+ community.general.oneview_san_manager_info:
+ config: /etc/oneview/oneview_config.json
+ params:
+ start: 0
+ count: 3
+ sort: name:ascending
+ query: isInternal eq false
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.san_managers }}"
+
+- name: Gather information about a SAN Manager by provider display name
+ community.general.oneview_san_manager_info:
+ config: /etc/oneview/oneview_config.json
+ provider_display_name: Brocade Network Advisor
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.san_managers }}"
+'''
+
+RETURN = '''
+san_managers:
+ description: Has all the OneView information about the SAN Managers.
+ returned: Always, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class SanManagerInfoModule(OneViewModuleBase):
+ argument_spec = dict(
+ provider_display_name=dict(type='str'),
+ params=dict(type='dict')
+ )
+
+ def __init__(self):
+ super(SanManagerInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
+ self.resource_client = self.oneview_client.san_managers
+ self.is_old_facts = self.module._name in ('oneview_san_manager_facts', 'community.general.oneview_san_manager_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_san_manager_facts' module has been renamed to 'oneview_san_manager_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+ if self.module.params.get('provider_display_name'):
+ provider_display_name = self.module.params['provider_display_name']
+ san_manager = self.oneview_client.san_managers.get_by_provider_display_name(provider_display_name)
+ if san_manager:
+ resources = [san_manager]
+ else:
+ resources = []
+ else:
+ resources = self.oneview_client.san_managers.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False, ansible_facts=dict(san_managers=resources))
+ else:
+ return dict(changed=False, san_managers=resources)
+
+
+def main():
+ SanManagerInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_san_manager_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_san_manager_info.py
new file mode 100644
index 00000000..c4a6b7a8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/oneview_san_manager_info.py
@@ -0,0 +1,121 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_san_manager_info
+short_description: Retrieve information about one or more of the OneView SAN Managers
+description:
+ - Retrieve information about one or more of the SAN Managers from OneView
+ - This module was called C(oneview_san_manager_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_san_manager_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ provider_display_name:
+ description:
+ - Provider Display Name.
+ params:
+ description:
+ - List of params to delimit, filter and sort the list of resources.
+ - "params allowed:
+ - C(start): The first item to return, using 0-based indexing.
+ - C(count): The number of resources to return.
+ - C(query): A general query string to narrow the list of resources returned.
+ - C(sort): The sort order of the returned data set."
+extends_documentation_fragment:
+- community.general.oneview
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all SAN Managers
+ community.general.oneview_san_manager_info:
+ config: /etc/oneview/oneview_config.json
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.san_managers }}"
+
+- name: Gather paginated, filtered and sorted information about SAN Managers
+ community.general.oneview_san_manager_info:
+ config: /etc/oneview/oneview_config.json
+ params:
+ start: 0
+ count: 3
+ sort: name:ascending
+ query: isInternal eq false
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.san_managers }}"
+
+- name: Gather information about a SAN Manager by provider display name
+ community.general.oneview_san_manager_info:
+ config: /etc/oneview/oneview_config.json
+ provider_display_name: Brocade Network Advisor
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.san_managers }}"
+'''
+
+RETURN = '''
+san_managers:
+ description: Has all the OneView information about the SAN Managers.
+ returned: Always, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class SanManagerInfoModule(OneViewModuleBase):
+ argument_spec = dict(
+ provider_display_name=dict(type='str'),
+ params=dict(type='dict')
+ )
+
+ def __init__(self):
+ super(SanManagerInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
+ self.resource_client = self.oneview_client.san_managers
+ self.is_old_facts = self.module._name in ('oneview_san_manager_facts', 'community.general.oneview_san_manager_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_san_manager_facts' module has been renamed to 'oneview_san_manager_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+ if self.module.params.get('provider_display_name'):
+ provider_display_name = self.module.params['provider_display_name']
+ san_manager = self.oneview_client.san_managers.get_by_provider_display_name(provider_display_name)
+ if san_manager:
+ resources = [san_manager]
+ else:
+ resources = []
+ else:
+ resources = self.oneview_client.san_managers.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False, ansible_facts=dict(san_managers=resources))
+ else:
+ return dict(changed=False, san_managers=resources)
+
+
+def main():
+ SanManagerInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/online_server_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/online_server_facts.py
new file mode 100644
index 00000000..f1e74aa6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/online_server_facts.py
@@ -0,0 +1,175 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: online_server_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favour of C(_info) module.
+ alternative: Use M(community.general.online_server_info) instead.
+short_description: Gather facts about Online servers.
+description:
+ - Gather facts about the servers.
+ - U(https://www.online.net/en/dedicated-server)
+author:
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.online
+
+'''
+
+EXAMPLES = r'''
+- name: Gather Online server facts
+ community.general.online_server_facts:
+ api_token: '0d1627e8-bbf0-44c5-a46f-5c4d3aef033f'
+'''
+
+RETURN = r'''
+---
+online_server_facts:
+ description: Response from Online API
+ returned: success
+ type: complex
+ sample:
+ "online_server_facts": [
+ {
+ "abuse": "abuse@example.com",
+ "anti_ddos": false,
+ "bmc": {
+ "session_key": null
+ },
+ "boot_mode": "normal",
+ "contacts": {
+ "owner": "foobar",
+ "tech": "foobar"
+ },
+ "disks": [
+ {
+ "$ref": "/api/v1/server/hardware/disk/68452"
+ },
+ {
+ "$ref": "/api/v1/server/hardware/disk/68453"
+ }
+ ],
+ "drive_arrays": [
+ {
+ "disks": [
+ {
+ "$ref": "/api/v1/server/hardware/disk/68452"
+ },
+ {
+ "$ref": "/api/v1/server/hardware/disk/68453"
+ }
+ ],
+ "raid_controller": {
+ "$ref": "/api/v1/server/hardware/raidController/9910"
+ },
+ "raid_level": "RAID1"
+ }
+ ],
+ "hardware_watch": true,
+ "hostname": "sd-42",
+ "id": 42,
+ "ip": [
+ {
+ "address": "195.154.172.149",
+ "mac": "28:92:4a:33:5e:c6",
+ "reverse": "195-154-172-149.rev.poneytelecom.eu.",
+ "switch_port_state": "up",
+ "type": "public"
+ },
+ {
+ "address": "10.90.53.212",
+ "mac": "28:92:4a:33:5e:c7",
+ "reverse": null,
+ "switch_port_state": "up",
+ "type": "private"
+ }
+ ],
+ "last_reboot": "2018-08-23T08:32:03.000Z",
+ "location": {
+ "block": "A",
+ "datacenter": "DC3",
+ "position": 19,
+ "rack": "A23",
+ "room": "4 4-4"
+ },
+ "network": {
+ "ip": [
+ "195.154.172.149"
+ ],
+ "ipfo": [],
+ "private": [
+ "10.90.53.212"
+ ]
+ },
+ "offer": "Pro-1-S-SATA",
+ "os": {
+ "name": "FreeBSD",
+ "version": "11.1-RELEASE"
+ },
+ "power": "ON",
+ "proactive_monitoring": false,
+ "raid_controllers": [
+ {
+ "$ref": "/api/v1/server/hardware/raidController/9910"
+ }
+ ],
+ "support": "Basic service level"
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.online import (
+ Online, OnlineException, online_argument_spec
+)
+
+
+class OnlineServerFacts(Online):
+
+ def __init__(self, module):
+ super(OnlineServerFacts, self).__init__(module)
+ self.name = 'api/v1/server'
+
+ def _get_server_detail(self, server_path):
+ try:
+ return self.get(path=server_path).json
+ except OnlineException as exc:
+ self.module.fail_json(msg="A problem occurred while fetching: %s (%s)" % (server_path, exc))
+
+ def all_detailed_servers(self):
+ servers_api_path = self.get_resources()
+
+ server_data = (
+ self._get_server_detail(server_api_path)
+ for server_api_path in servers_api_path
+ )
+
+ return [s for s in server_data if s is not None]
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=online_argument_spec(),
+ supports_check_mode=True,
+ )
+
+ try:
+ servers_facts = OnlineServerFacts(module).all_detailed_servers()
+ module.exit_json(
+ ansible_facts={'online_server_facts': servers_facts}
+ )
+ except OnlineException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/online_server_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/online_server_info.py
new file mode 100644
index 00000000..f0e73aea
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/online_server_info.py
@@ -0,0 +1,175 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: online_server_info
+short_description: Gather information about Online servers.
+description:
+ - Gather information about the servers.
+ - U(https://www.online.net/en/dedicated-server)
+author:
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.online
+
+'''
+
+EXAMPLES = r'''
+- name: Gather Online server information
+ community.general.online_server_info:
+ api_token: '0d1627e8-bbf0-44c5-a46f-5c4d3aef033f'
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.online_server_info }}"
+'''
+
+RETURN = r'''
+---
+online_server_info:
+ description: Response from Online API
+ returned: success
+ type: complex
+ sample:
+ "online_server_info": [
+ {
+ "abuse": "abuse@example.com",
+ "anti_ddos": false,
+ "bmc": {
+ "session_key": null
+ },
+ "boot_mode": "normal",
+ "contacts": {
+ "owner": "foobar",
+ "tech": "foobar"
+ },
+ "disks": [
+ {
+ "$ref": "/api/v1/server/hardware/disk/68452"
+ },
+ {
+ "$ref": "/api/v1/server/hardware/disk/68453"
+ }
+ ],
+ "drive_arrays": [
+ {
+ "disks": [
+ {
+ "$ref": "/api/v1/server/hardware/disk/68452"
+ },
+ {
+ "$ref": "/api/v1/server/hardware/disk/68453"
+ }
+ ],
+ "raid_controller": {
+ "$ref": "/api/v1/server/hardware/raidController/9910"
+ },
+ "raid_level": "RAID1"
+ }
+ ],
+ "hardware_watch": true,
+ "hostname": "sd-42",
+ "id": 42,
+ "ip": [
+ {
+ "address": "195.154.172.149",
+ "mac": "28:92:4a:33:5e:c6",
+ "reverse": "195-154-172-149.rev.poneytelecom.eu.",
+ "switch_port_state": "up",
+ "type": "public"
+ },
+ {
+ "address": "10.90.53.212",
+ "mac": "28:92:4a:33:5e:c7",
+ "reverse": null,
+ "switch_port_state": "up",
+ "type": "private"
+ }
+ ],
+ "last_reboot": "2018-08-23T08:32:03.000Z",
+ "location": {
+ "block": "A",
+ "datacenter": "DC3",
+ "position": 19,
+ "rack": "A23",
+ "room": "4 4-4"
+ },
+ "network": {
+ "ip": [
+ "195.154.172.149"
+ ],
+ "ipfo": [],
+ "private": [
+ "10.90.53.212"
+ ]
+ },
+ "offer": "Pro-1-S-SATA",
+ "os": {
+ "name": "FreeBSD",
+ "version": "11.1-RELEASE"
+ },
+ "power": "ON",
+ "proactive_monitoring": false,
+ "raid_controllers": [
+ {
+ "$ref": "/api/v1/server/hardware/raidController/9910"
+ }
+ ],
+ "support": "Basic service level"
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.online import (
+ Online, OnlineException, online_argument_spec
+)
+
+
+class OnlineServerInfo(Online):
+
+ def __init__(self, module):
+ super(OnlineServerInfo, self).__init__(module)
+ self.name = 'api/v1/server'
+
+ def _get_server_detail(self, server_path):
+ try:
+ return self.get(path=server_path).json
+ except OnlineException as exc:
+ self.module.fail_json(msg="A problem occurred while fetching: %s (%s)" % (server_path, exc))
+
+ def all_detailed_servers(self):
+ servers_api_path = self.get_resources()
+
+ server_data = (
+ self._get_server_detail(server_api_path)
+ for server_api_path in servers_api_path
+ )
+
+ return [s for s in server_data if s is not None]
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=online_argument_spec(),
+ supports_check_mode=True,
+ )
+
+ try:
+ servers_info = OnlineServerInfo(module).all_detailed_servers()
+ module.exit_json(
+ online_server_info=servers_info
+ )
+ except OnlineException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/online_user_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/online_user_facts.py
new file mode 100644
index 00000000..7b78924e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/online_user_facts.py
@@ -0,0 +1,76 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: online_user_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favour of C(_info) module.
+ alternative: Use M(community.general.online_user_info) instead.
+short_description: Gather facts about Online user.
+description:
+ - Gather facts about the user.
+author:
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.online
+
+'''
+
+EXAMPLES = r'''
+- name: Gather Online user facts
+ community.general.online_user_facts:
+'''
+
+RETURN = r'''
+---
+online_user_facts:
+ description: Response from Online API
+ returned: success
+ type: complex
+ sample:
+ "online_user_facts": {
+ "company": "foobar LLC",
+ "email": "foobar@example.com",
+ "first_name": "foo",
+ "id": 42,
+ "last_name": "bar",
+ "login": "foobar"
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.online import (
+ Online, OnlineException, online_argument_spec
+)
+
+
+class OnlineUserFacts(Online):
+
+ def __init__(self, module):
+ super(OnlineUserFacts, self).__init__(module)
+ self.name = 'api/v1/user'
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=online_argument_spec(),
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ ansible_facts={'online_user_facts': OnlineUserFacts(module).get_resources()}
+ )
+ except OnlineException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/online_user_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/online_user_info.py
new file mode 100644
index 00000000..093a2c68
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/online_user_info.py
@@ -0,0 +1,76 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: online_user_info
+short_description: Gather information about Online user.
+description:
+ - Gather information about the user.
+author:
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.online
+
+'''
+
+EXAMPLES = r'''
+- name: Gather Online user info
+ community.general.online_user_info:
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.online_user_info }}"
+'''
+
+RETURN = r'''
+---
+online_user_info:
+ description: Response from Online API
+ returned: success
+ type: complex
+ sample:
+ "online_user_info": {
+ "company": "foobar LLC",
+ "email": "foobar@example.com",
+ "first_name": "foo",
+ "id": 42,
+ "last_name": "bar",
+ "login": "foobar"
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.online import (
+ Online, OnlineException, online_argument_spec
+)
+
+
+class OnlineUserInfo(Online):
+
+ def __init__(self, module):
+ super(OnlineUserInfo, self).__init__(module)
+ self.name = 'api/v1/user'
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=online_argument_spec(),
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ online_user_info=OnlineUserInfo(module).get_resources()
+ )
+ except OnlineException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/open_iscsi.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/open_iscsi.py
new file mode 100644
index 00000000..222bb82f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/open_iscsi.py
@@ -0,0 +1,375 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Serge van Ginderachter <serge@vanginderachter.be>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: open_iscsi
+author:
+- Serge van Ginderachter (@srvg)
+short_description: Manage iSCSI targets with Open-iSCSI
+description:
+ - Discover targets on given portal, (dis)connect targets, mark targets to
+ manually or auto start, return device nodes of connected targets.
+requirements:
+ - open_iscsi library and tools (iscsiadm)
+options:
+ portal:
+ description:
+ - The domain name or IP address of the iSCSI target.
+ type: str
+ aliases: [ ip ]
+ port:
+ description:
+ - The port on which the iSCSI target process listens.
+ type: str
+ default: 3260
+ target:
+ description:
+ - The iSCSI target name.
+ type: str
+ aliases: [ name, targetname ]
+ login:
+ description:
+ - Whether the target node should be connected.
+ type: bool
+ aliases: [ state ]
+ node_auth:
+ description:
+ - The value for C(discovery.sendtargets.auth.authmethod).
+ type: str
+ default: CHAP
+ node_user:
+ description:
+ - The value for C(discovery.sendtargets.auth.username).
+ type: str
+ node_pass:
+ description:
+ - The value for C(discovery.sendtargets.auth.password).
+ type: str
+ auto_node_startup:
+ description:
+ - Whether the target node should be automatically connected at startup.
+ type: bool
+ aliases: [ automatic ]
+ discover:
+ description:
+ - Whether the list of target nodes on the portal should be
+ (re)discovered and added to the persistent iSCSI database.
+ - Keep in mind that C(iscsiadm) discovery resets configuration, like C(node.startup)
+ to manual, hence combined with C(auto_node_startup=yes) will always return
+ a changed state.
+ type: bool
+ default: false
+ show_nodes:
+ description:
+ - Whether the list of nodes in the persistent iSCSI database should be returned by the module.
+ type: bool
+ default: false
+'''
+
+EXAMPLES = r'''
+- name: Perform a discovery on sun.com and show available target nodes
+ community.general.open_iscsi:
+ show_nodes: yes
+ discover: yes
+ portal: sun.com
+
+- name: Perform a discovery on 10.1.2.3 and show available target nodes
+ community.general.open_iscsi:
+ show_nodes: yes
+ discover: yes
+ ip: 10.1.2.3
+
+# NOTE: Only works if exactly one target is exported to the initiator
+- name: Discover targets on portal and login to the one available
+ community.general.open_iscsi:
+ portal: '{{ iscsi_target }}'
+ login: yes
+ discover: yes
+
+- name: Connect to the named target, after updating the local persistent database (cache)
+ community.general.open_iscsi:
+ login: yes
+ target: iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d
+
+- name: Disconnect from the cached named target
+ community.general.open_iscsi:
+ login: no
+ target: iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d
+'''
+
+import glob
+import os
+import socket
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+
+ISCSIADM = 'iscsiadm'
+
+
+def compare_nodelists(l1, l2):
+ l1.sort()
+ l2.sort()
+ return l1 == l2
+
+
+def iscsi_get_cached_nodes(module, portal=None):
+ cmd = '%s --mode node' % iscsiadm_cmd
+ (rc, out, err) = module.run_command(cmd)
+
+ if rc == 0:
+ lines = out.splitlines()
+ nodes = []
+ for line in lines:
+ # line format is "ip:port,target_portal_group_tag targetname"
+ parts = line.split()
+ if len(parts) > 2:
+ module.fail_json(msg='error parsing output', cmd=cmd)
+ target = parts[1]
+ parts = parts[0].split(':')
+ target_portal = parts[0]
+
+ if portal is None or portal == target_portal:
+ nodes.append(target)
+
+ # older versions of scsiadm don't have nice return codes
+ # for newer versions see iscsiadm(8); also usr/iscsiadm.c for details
+ # err can contain [N|n]o records...
+ elif rc == 21 or (rc == 255 and "o records found" in err):
+ nodes = []
+ else:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+ return nodes
+
+
+def iscsi_discover(module, portal, port):
+ cmd = '%s --mode discovery --type sendtargets --portal %s:%s' % (iscsiadm_cmd, portal, port)
+ (rc, out, err) = module.run_command(cmd)
+
+ if rc > 0:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+
+def target_loggedon(module, target):
+ cmd = '%s --mode session' % iscsiadm_cmd
+ (rc, out, err) = module.run_command(cmd)
+
+ if rc == 0:
+ return target in out
+ elif rc == 21:
+ return False
+ else:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+
+def target_login(module, target, portal=None, port=None):
+ node_auth = module.params['node_auth']
+ node_user = module.params['node_user']
+ node_pass = module.params['node_pass']
+
+ if node_user:
+ params = [('node.session.auth.authmethod', node_auth),
+ ('node.session.auth.username', node_user),
+ ('node.session.auth.password', node_pass)]
+ for (name, value) in params:
+ cmd = '%s --mode node --targetname %s --op=update --name %s --value %s' % (iscsiadm_cmd, target, name, value)
+ (rc, out, err) = module.run_command(cmd)
+ if rc > 0:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+ cmd = '%s --mode node --targetname %s --login' % (iscsiadm_cmd, target)
+ if portal is not None and port is not None:
+ cmd += ' --portal %s:%s' % (portal, port)
+
+ (rc, out, err) = module.run_command(cmd)
+
+ if rc > 0:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+
+def target_logout(module, target):
+ cmd = '%s --mode node --targetname %s --logout' % (iscsiadm_cmd, target)
+ (rc, out, err) = module.run_command(cmd)
+
+ if rc > 0:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+
+def target_device_node(module, target):
+ # if anyone know a better way to find out which devicenodes get created for
+ # a given target...
+
+ devices = glob.glob('/dev/disk/by-path/*%s*' % target)
+ devdisks = []
+ for dev in devices:
+ # exclude partitions
+ if "-part" not in dev:
+ devdisk = os.path.realpath(dev)
+ # only add once (multi-path?)
+ if devdisk not in devdisks:
+ devdisks.append(devdisk)
+ return devdisks
+
+
+def target_isauto(module, target):
+ cmd = '%s --mode node --targetname %s' % (iscsiadm_cmd, target)
+ (rc, out, err) = module.run_command(cmd)
+
+ if rc == 0:
+ lines = out.splitlines()
+ for line in lines:
+ if 'node.startup' in line:
+ return 'automatic' in line
+ return False
+ else:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+
+def target_setauto(module, target):
+ cmd = '%s --mode node --targetname %s --op=update --name node.startup --value automatic' % (iscsiadm_cmd, target)
+ (rc, out, err) = module.run_command(cmd)
+
+ if rc > 0:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+
+def target_setmanual(module, target):
+ cmd = '%s --mode node --targetname %s --op=update --name node.startup --value manual' % (iscsiadm_cmd, target)
+ (rc, out, err) = module.run_command(cmd)
+
+ if rc > 0:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+
+def main():
+ # load ansible module object
+ module = AnsibleModule(
+ argument_spec=dict(
+
+ # target
+ portal=dict(type='str', aliases=['ip']),
+ port=dict(type='str', default='3260'),
+ target=dict(type='str', aliases=['name', 'targetname']),
+ node_auth=dict(type='str', default='CHAP'),
+ node_user=dict(type='str'),
+ node_pass=dict(type='str', no_log=True),
+
+ # actions
+ login=dict(type='bool', aliases=['state']),
+ auto_node_startup=dict(type='bool', aliases=['automatic']),
+ discover=dict(type='bool', default=False),
+ show_nodes=dict(type='bool', default=False),
+ ),
+
+ required_together=[['node_user', 'node_pass']],
+ supports_check_mode=True,
+ )
+
+ global iscsiadm_cmd
+ iscsiadm_cmd = module.get_bin_path('iscsiadm', required=True)
+
+ # parameters
+ portal = module.params['portal']
+ if portal:
+ try:
+ portal = socket.getaddrinfo(portal, None)[0][4][0]
+ except socket.gaierror:
+ module.fail_json(msg="Portal address is incorrect")
+
+ target = module.params['target']
+ port = module.params['port']
+ login = module.params['login']
+ automatic = module.params['auto_node_startup']
+ discover = module.params['discover']
+ show_nodes = module.params['show_nodes']
+
+ check = module.check_mode
+
+ cached = iscsi_get_cached_nodes(module, portal)
+
+ # return json dict
+ result = {}
+ result['changed'] = False
+
+ if discover:
+ if portal is None:
+ module.fail_json(msg="Need to specify at least the portal (ip) to discover")
+ elif check:
+ nodes = cached
+ else:
+ iscsi_discover(module, portal, port)
+ nodes = iscsi_get_cached_nodes(module, portal)
+ if not compare_nodelists(cached, nodes):
+ result['changed'] |= True
+ result['cache_updated'] = True
+ else:
+ nodes = cached
+
+ if login is not None or automatic is not None:
+ if target is None:
+ if len(nodes) > 1:
+ module.fail_json(msg="Need to specify a target")
+ else:
+ target = nodes[0]
+ else:
+ # check given target is in cache
+ check_target = False
+ for node in nodes:
+ if node == target:
+ check_target = True
+ break
+ if not check_target:
+ module.fail_json(msg="Specified target not found")
+
+ if show_nodes:
+ result['nodes'] = nodes
+
+ if login is not None:
+ loggedon = target_loggedon(module, target)
+ if (login and loggedon) or (not login and not loggedon):
+ result['changed'] |= False
+ if login:
+ result['devicenodes'] = target_device_node(module, target)
+ elif not check:
+ if login:
+ target_login(module, target, portal, port)
+ # give udev some time
+ time.sleep(1)
+ result['devicenodes'] = target_device_node(module, target)
+ else:
+ target_logout(module, target)
+ result['changed'] |= True
+ result['connection_changed'] = True
+ else:
+ result['changed'] |= True
+ result['connection_changed'] = True
+
+ if automatic is not None:
+ isauto = target_isauto(module, target)
+ if (automatic and isauto) or (not automatic and not isauto):
+ result['changed'] |= False
+ result['automatic_changed'] = False
+ elif not check:
+ if automatic:
+ target_setauto(module, target)
+ else:
+ target_setmanual(module, target)
+ result['changed'] |= True
+ result['automatic_changed'] = True
+ else:
+ result['changed'] |= True
+ result['automatic_changed'] = True
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/openbsd_pkg.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/openbsd_pkg.py
new file mode 100644
index 00000000..7432c48a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/openbsd_pkg.py
@@ -0,0 +1,653 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Patrik Lundin <patrik@sigterm.se>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: openbsd_pkg
+author:
+- Patrik Lundin (@eest)
+short_description: Manage packages on OpenBSD
+description:
+ - Manage packages on OpenBSD using the pkg tools.
+requirements:
+- python >= 2.5
+options:
+ name:
+ description:
+ - A name or a list of names of the packages.
+ required: yes
+ type: list
+ elements: str
+ state:
+ description:
+ - C(present) will make sure the package is installed.
+ C(latest) will make sure the latest version of the package is installed.
+ C(absent) will make sure the specified package is not installed.
+ choices: [ absent, latest, present, installed, removed ]
+ default: present
+ type: str
+ build:
+ description:
+ - Build the package from source instead of downloading and installing
+ a binary. Requires that the port source tree is already installed.
+ Automatically builds and installs the 'sqlports' package, if it is
+ not already installed.
+ - Mutually exclusive with I(snapshot).
+ type: bool
+ default: no
+ snapshot:
+ description:
+ - Force C(%c) and C(%m) to expand to C(snapshots), even on a release kernel.
+ - Mutually exclusive with I(build).
+ type: bool
+ default: no
+ version_added: 1.3.0
+ ports_dir:
+ description:
+ - When used in combination with the C(build) option, allows overriding
+ the default ports source directory.
+ default: /usr/ports
+ type: path
+ clean:
+ description:
+ - When updating or removing packages, delete the extra configuration
+ file(s) in the old packages which are annotated with @extra in
+ the packaging-list.
+ type: bool
+ default: no
+ quick:
+ description:
+ - Replace or delete packages quickly; do not bother with checksums
+ before removing normal files.
+ type: bool
+ default: no
+notes:
+ - When used with a `loop:` each package will be processed individually,
+ it is much more efficient to pass the list directly to the `name` option.
+'''
+
+EXAMPLES = '''
+- name: Make sure nmap is installed
+ community.general.openbsd_pkg:
+ name: nmap
+ state: present
+
+- name: Make sure nmap is the latest version
+ community.general.openbsd_pkg:
+ name: nmap
+ state: latest
+
+- name: Make sure nmap is not installed
+ community.general.openbsd_pkg:
+ name: nmap
+ state: absent
+
+- name: Make sure nmap is installed, build it from source if it is not
+ community.general.openbsd_pkg:
+ name: nmap
+ state: present
+ build: yes
+
+- name: Specify a pkg flavour with '--'
+ community.general.openbsd_pkg:
+ name: vim--no_x11
+ state: present
+
+- name: Specify the default flavour to avoid ambiguity errors
+ community.general.openbsd_pkg:
+ name: vim--
+ state: present
+
+- name: Specify a package branch (requires at least OpenBSD 6.0)
+ community.general.openbsd_pkg:
+ name: python%3.5
+ state: present
+
+- name: Update all packages on the system
+ community.general.openbsd_pkg:
+ name: '*'
+ state: latest
+
+- name: Purge a package and it's configuration files
+ community.general.openbsd_pkg:
+ name: mpd
+ clean: yes
+ state: absent
+
+- name: Quickly remove a package without checking checksums
+ community.general.openbsd_pkg:
+ name: qt5
+ quick: yes
+ state: absent
+'''
+
+import os
+import platform
+import re
+import shlex
+import sqlite3
+
+from distutils.version import StrictVersion
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+# Function used for executing commands.
+def execute_command(cmd, module):
+ # Break command line into arguments.
+ # This makes run_command() use shell=False which we need to not cause shell
+ # expansion of special characters like '*'.
+ cmd_args = shlex.split(cmd)
+ return module.run_command(cmd_args)
+
+
+# Function used to find out if a package is currently installed.
+def get_package_state(names, pkg_spec, module):
+ info_cmd = 'pkg_info -Iq'
+
+ for name in names:
+ command = "%s inst:%s" % (info_cmd, name)
+
+ rc, stdout, stderr = execute_command(command, module)
+
+ if stderr:
+ module.fail_json(msg="failed in get_package_state(): " + stderr)
+
+ if stdout:
+ # If the requested package name is just a stem, like "python", we may
+ # find multiple packages with that name.
+ pkg_spec[name]['installed_names'] = [installed_name for installed_name in stdout.splitlines()]
+ module.debug("get_package_state(): installed_names = %s" % pkg_spec[name]['installed_names'])
+ pkg_spec[name]['installed_state'] = True
+ else:
+ pkg_spec[name]['installed_state'] = False
+
+
+# Function used to make sure a package is present.
+def package_present(names, pkg_spec, module):
+ build = module.params['build']
+
+ for name in names:
+ # It is possible package_present() has been called from package_latest().
+ # In that case we do not want to operate on the whole list of names,
+ # only the leftovers.
+ if pkg_spec['package_latest_leftovers']:
+ if name not in pkg_spec['package_latest_leftovers']:
+ module.debug("package_present(): ignoring '%s' which is not a package_latest() leftover" % name)
+ continue
+ else:
+ module.debug("package_present(): handling package_latest() leftovers, installing '%s'" % name)
+
+ if module.check_mode:
+ install_cmd = 'pkg_add -Imn'
+ else:
+ if build is True:
+ port_dir = "%s/%s" % (module.params['ports_dir'], get_package_source_path(name, pkg_spec, module))
+ if os.path.isdir(port_dir):
+ if pkg_spec[name]['flavor']:
+ flavors = pkg_spec[name]['flavor'].replace('-', ' ')
+ install_cmd = "cd %s && make clean=depends && FLAVOR=\"%s\" make install && make clean=depends" % (port_dir, flavors)
+ elif pkg_spec[name]['subpackage']:
+ install_cmd = "cd %s && make clean=depends && SUBPACKAGE=\"%s\" make install && make clean=depends" % (port_dir,
+ pkg_spec[name]['subpackage'])
+ else:
+ install_cmd = "cd %s && make install && make clean=depends" % (port_dir)
+ else:
+ module.fail_json(msg="the port source directory %s does not exist" % (port_dir))
+ else:
+ install_cmd = 'pkg_add -Im'
+
+ if module.params['snapshot'] is True:
+ install_cmd += ' -Dsnap'
+
+ if pkg_spec[name]['installed_state'] is False:
+
+ # Attempt to install the package
+ if build is True and not module.check_mode:
+ (pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = module.run_command(install_cmd, module, use_unsafe_shell=True)
+ else:
+ (pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command("%s %s" % (install_cmd, name), module)
+
+ # The behaviour of pkg_add is a bit different depending on if a
+ # specific version is supplied or not.
+ #
+ # When a specific version is supplied the return code will be 0 when
+ # a package is found and 1 when it is not. If a version is not
+ # supplied the tool will exit 0 in both cases.
+ #
+ # It is important to note that "version" relates to the
+ # packages-specs(7) notion of a version. If using the branch syntax
+ # (like "python%3.5") even though a branch name may look like a
+ # version string it is not used an one by pkg_add.
+ if pkg_spec[name]['version'] or build is True:
+ # Depend on the return code.
+ module.debug("package_present(): depending on return code for name '%s'" % name)
+ if pkg_spec[name]['rc']:
+ pkg_spec[name]['changed'] = False
+ else:
+ # Depend on stderr instead.
+ module.debug("package_present(): depending on stderr for name '%s'" % name)
+ if pkg_spec[name]['stderr']:
+ # There is a corner case where having an empty directory in
+ # installpath prior to the right location will result in a
+ # "file:/local/package/directory/ is empty" message on stderr
+ # while still installing the package, so we need to look for
+ # for a message like "packagename-1.0: ok" just in case.
+ match = re.search(r"\W%s-[^:]+: ok\W" % pkg_spec[name]['stem'], pkg_spec[name]['stdout'])
+
+ if match:
+ # It turns out we were able to install the package.
+ module.debug("package_present(): we were able to install package for name '%s'" % name)
+ else:
+ # We really did fail, fake the return code.
+ module.debug("package_present(): we really did fail for name '%s'" % name)
+ pkg_spec[name]['rc'] = 1
+ pkg_spec[name]['changed'] = False
+ else:
+ module.debug("package_present(): stderr was not set for name '%s'" % name)
+
+ if pkg_spec[name]['rc'] == 0:
+ pkg_spec[name]['changed'] = True
+
+ else:
+ pkg_spec[name]['rc'] = 0
+ pkg_spec[name]['stdout'] = ''
+ pkg_spec[name]['stderr'] = ''
+ pkg_spec[name]['changed'] = False
+
+
+# Function used to make sure a package is the latest available version.
+def package_latest(names, pkg_spec, module):
+ if module.params['build'] is True:
+ module.fail_json(msg="the combination of build=%s and state=latest is not supported" % module.params['build'])
+
+ upgrade_cmd = 'pkg_add -um'
+
+ if module.check_mode:
+ upgrade_cmd += 'n'
+
+ if module.params['clean']:
+ upgrade_cmd += 'c'
+
+ if module.params['quick']:
+ upgrade_cmd += 'q'
+
+ if module.params['snapshot']:
+ upgrade_cmd += ' -Dsnap'
+
+ for name in names:
+ if pkg_spec[name]['installed_state'] is True:
+
+ # Attempt to upgrade the package.
+ (pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command("%s %s" % (upgrade_cmd, name), module)
+
+ # Look for output looking something like "nmap-6.01->6.25: ok" to see if
+ # something changed (or would have changed). Use \W to delimit the match
+ # from progress meter output.
+ pkg_spec[name]['changed'] = False
+ for installed_name in pkg_spec[name]['installed_names']:
+ module.debug("package_latest(): checking for pre-upgrade package name: %s" % installed_name)
+ match = re.search(r"\W%s->.+: ok\W" % installed_name, pkg_spec[name]['stdout'])
+ if match:
+ module.debug("package_latest(): pre-upgrade package name match: %s" % installed_name)
+
+ pkg_spec[name]['changed'] = True
+ break
+
+ # FIXME: This part is problematic. Based on the issues mentioned (and
+ # handled) in package_present() it is not safe to blindly trust stderr
+ # as an indicator that the command failed, and in the case with
+ # empty installpath directories this will break.
+ #
+ # For now keep this safeguard here, but ignore it if we managed to
+ # parse out a successful update above. This way we will report a
+ # successful run when we actually modify something but fail
+ # otherwise.
+ if pkg_spec[name]['changed'] is not True:
+ if pkg_spec[name]['stderr']:
+ pkg_spec[name]['rc'] = 1
+
+ else:
+ # Note packages that need to be handled by package_present
+ module.debug("package_latest(): package '%s' is not installed, will be handled by package_present()" % name)
+ pkg_spec['package_latest_leftovers'].append(name)
+
+ # If there were any packages that were not installed we call
+ # package_present() which will handle those.
+ if pkg_spec['package_latest_leftovers']:
+ module.debug("package_latest(): calling package_present() to handle leftovers")
+ package_present(names, pkg_spec, module)
+
+
+# Function used to make sure a package is not installed.
+def package_absent(names, pkg_spec, module):
+ remove_cmd = 'pkg_delete -I'
+
+ if module.check_mode:
+ remove_cmd += 'n'
+
+ if module.params['clean']:
+ remove_cmd += 'c'
+
+ if module.params['quick']:
+ remove_cmd += 'q'
+
+ for name in names:
+ if pkg_spec[name]['installed_state'] is True:
+ # Attempt to remove the package.
+ (pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command("%s %s" % (remove_cmd, name), module)
+
+ if pkg_spec[name]['rc'] == 0:
+ pkg_spec[name]['changed'] = True
+ else:
+ pkg_spec[name]['changed'] = False
+
+ else:
+ pkg_spec[name]['rc'] = 0
+ pkg_spec[name]['stdout'] = ''
+ pkg_spec[name]['stderr'] = ''
+ pkg_spec[name]['changed'] = False
+
+
+# Function used to parse the package name based on packages-specs(7).
+# The general name structure is "stem-version[-flavors]".
+#
+# Names containing "%" are a special variation not part of the
+# packages-specs(7) syntax. See pkg_add(1) on OpenBSD 6.0 or later for a
+# description.
+def parse_package_name(names, pkg_spec, module):
+
+ # Initialize empty list of package_latest() leftovers.
+ pkg_spec['package_latest_leftovers'] = []
+
+ for name in names:
+ module.debug("parse_package_name(): parsing name: %s" % name)
+ # Do some initial matches so we can base the more advanced regex on that.
+ version_match = re.search("-[0-9]", name)
+ versionless_match = re.search("--", name)
+
+ # Stop if someone is giving us a name that both has a version and is
+ # version-less at the same time.
+ if version_match and versionless_match:
+ module.fail_json(msg="package name both has a version and is version-less: " + name)
+
+ # All information for a given name is kept in the pkg_spec keyed by that name.
+ pkg_spec[name] = {}
+
+ # If name includes a version.
+ if version_match:
+ match = re.search("^(?P<stem>[^%]+)-(?P<version>[0-9][^-]*)(?P<flavor_separator>-)?(?P<flavor>[a-z].*)?(%(?P<branch>.+))?$", name)
+ if match:
+ pkg_spec[name]['stem'] = match.group('stem')
+ pkg_spec[name]['version_separator'] = '-'
+ pkg_spec[name]['version'] = match.group('version')
+ pkg_spec[name]['flavor_separator'] = match.group('flavor_separator')
+ pkg_spec[name]['flavor'] = match.group('flavor')
+ pkg_spec[name]['branch'] = match.group('branch')
+ pkg_spec[name]['style'] = 'version'
+ module.debug("version_match: stem: %(stem)s, version: %(version)s, flavor_separator: %(flavor_separator)s, "
+ "flavor: %(flavor)s, branch: %(branch)s, style: %(style)s" % pkg_spec[name])
+ else:
+ module.fail_json(msg="unable to parse package name at version_match: " + name)
+
+ # If name includes no version but is version-less ("--").
+ elif versionless_match:
+ match = re.search("^(?P<stem>[^%]+)--(?P<flavor>[a-z].*)?(%(?P<branch>.+))?$", name)
+ if match:
+ pkg_spec[name]['stem'] = match.group('stem')
+ pkg_spec[name]['version_separator'] = '-'
+ pkg_spec[name]['version'] = None
+ pkg_spec[name]['flavor_separator'] = '-'
+ pkg_spec[name]['flavor'] = match.group('flavor')
+ pkg_spec[name]['branch'] = match.group('branch')
+ pkg_spec[name]['style'] = 'versionless'
+ module.debug("versionless_match: stem: %(stem)s, flavor: %(flavor)s, branch: %(branch)s, style: %(style)s" % pkg_spec[name])
+ else:
+ module.fail_json(msg="unable to parse package name at versionless_match: " + name)
+
+ # If name includes no version, and is not version-less, it is all a
+ # stem, possibly with a branch (%branchname) tacked on at the
+ # end.
+ else:
+ match = re.search("^(?P<stem>[^%]+)(%(?P<branch>.+))?$", name)
+ if match:
+ pkg_spec[name]['stem'] = match.group('stem')
+ pkg_spec[name]['version_separator'] = None
+ pkg_spec[name]['version'] = None
+ pkg_spec[name]['flavor_separator'] = None
+ pkg_spec[name]['flavor'] = None
+ pkg_spec[name]['branch'] = match.group('branch')
+ pkg_spec[name]['style'] = 'stem'
+ module.debug("stem_match: stem: %(stem)s, branch: %(branch)s, style: %(style)s" % pkg_spec[name])
+ else:
+ module.fail_json(msg="unable to parse package name at else: " + name)
+
+ # Verify that the managed host is new enough to support branch syntax.
+ if pkg_spec[name]['branch']:
+ branch_release = "6.0"
+
+ if StrictVersion(platform.release()) < StrictVersion(branch_release):
+ module.fail_json(msg="package name using 'branch' syntax requires at least OpenBSD %s: %s" % (branch_release, name))
+
+ # Sanity check that there are no trailing dashes in flavor.
+ # Try to stop strange stuff early so we can be strict later.
+ if pkg_spec[name]['flavor']:
+ match = re.search("-$", pkg_spec[name]['flavor'])
+ if match:
+ module.fail_json(msg="trailing dash in flavor: " + pkg_spec[name]['flavor'])
+
+
+# Function used for figuring out the port path.
+def get_package_source_path(name, pkg_spec, module):
+ pkg_spec[name]['subpackage'] = None
+ if pkg_spec[name]['stem'] == 'sqlports':
+ return 'databases/sqlports'
+ else:
+ # try for an exact match first
+ sqlports_db_file = '/usr/local/share/sqlports'
+ if not os.path.isfile(sqlports_db_file):
+ module.fail_json(msg="sqlports file '%s' is missing" % sqlports_db_file)
+
+ conn = sqlite3.connect(sqlports_db_file)
+ first_part_of_query = 'SELECT fullpkgpath, fullpkgname FROM ports WHERE fullpkgname'
+ query = first_part_of_query + ' = ?'
+ module.debug("package_package_source_path(): exact query: %s" % query)
+ cursor = conn.execute(query, (name,))
+ results = cursor.fetchall()
+
+ # next, try for a fuzzier match
+ if len(results) < 1:
+ looking_for = pkg_spec[name]['stem'] + (pkg_spec[name]['version_separator'] or '-') + (pkg_spec[name]['version'] or '%')
+ query = first_part_of_query + ' LIKE ?'
+ if pkg_spec[name]['flavor']:
+ looking_for += pkg_spec[name]['flavor_separator'] + pkg_spec[name]['flavor']
+ module.debug("package_package_source_path(): fuzzy flavor query: %s" % query)
+ cursor = conn.execute(query, (looking_for,))
+ elif pkg_spec[name]['style'] == 'versionless':
+ query += ' AND fullpkgname NOT LIKE ?'
+ module.debug("package_package_source_path(): fuzzy versionless query: %s" % query)
+ cursor = conn.execute(query, (looking_for, "%s-%%" % looking_for,))
+ else:
+ module.debug("package_package_source_path(): fuzzy query: %s" % query)
+ cursor = conn.execute(query, (looking_for,))
+ results = cursor.fetchall()
+
+ # error if we don't find exactly 1 match
+ conn.close()
+ if len(results) < 1:
+ module.fail_json(msg="could not find a port by the name '%s'" % name)
+ if len(results) > 1:
+ matches = map(lambda x: x[1], results)
+ module.fail_json(msg="too many matches, unsure which to build: %s" % ' OR '.join(matches))
+
+ # there's exactly 1 match, so figure out the subpackage, if any, then return
+ fullpkgpath = results[0][0]
+ parts = fullpkgpath.split(',')
+ if len(parts) > 1 and parts[1][0] == '-':
+ pkg_spec[name]['subpackage'] = parts[1]
+ return parts[0]
+
+
+# Function used for upgrading all installed packages.
+def upgrade_packages(pkg_spec, module):
+ if module.check_mode:
+ upgrade_cmd = 'pkg_add -Imnu'
+ else:
+ upgrade_cmd = 'pkg_add -Imu'
+
+ if module.params['snapshot']:
+ upgrade_cmd += ' -Dsnap'
+
+ # Create a minimal pkg_spec entry for '*' to store return values.
+ pkg_spec['*'] = {}
+
+ # Attempt to upgrade all packages.
+ pkg_spec['*']['rc'], pkg_spec['*']['stdout'], pkg_spec['*']['stderr'] = execute_command("%s" % upgrade_cmd, module)
+
+ # Try to find any occurrence of a package changing version like:
+ # "bzip2-1.0.6->1.0.6p0: ok".
+ match = re.search(r"\W\w.+->.+: ok\W", pkg_spec['*']['stdout'])
+ if match:
+ pkg_spec['*']['changed'] = True
+
+ else:
+ pkg_spec['*']['changed'] = False
+
+ # It seems we can not trust the return value, so depend on the presence of
+ # stderr to know if something failed.
+ if pkg_spec['*']['stderr']:
+ pkg_spec['*']['rc'] = 1
+ else:
+ pkg_spec['*']['rc'] = 0
+
+
+# ===========================================
+# Main control flow.
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='list', elements='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'installed', 'latest', 'present', 'removed']),
+ build=dict(type='bool', default=False),
+ snapshot=dict(type='bool', default=False),
+ ports_dir=dict(type='path', default='/usr/ports'),
+ quick=dict(type='bool', default=False),
+ clean=dict(type='bool', default=False),
+ ),
+ mutually_exclusive=[['snapshot', 'build']],
+ supports_check_mode=True
+ )
+
+ name = module.params['name']
+ state = module.params['state']
+ build = module.params['build']
+ ports_dir = module.params['ports_dir']
+
+ rc = 0
+ stdout = ''
+ stderr = ''
+ result = {}
+ result['name'] = name
+ result['state'] = state
+ result['build'] = build
+
+ # The data structure used to keep track of package information.
+ pkg_spec = {}
+
+ if build is True:
+ if not os.path.isdir(ports_dir):
+ module.fail_json(msg="the ports source directory %s does not exist" % (ports_dir))
+
+ # build sqlports if its not installed yet
+ parse_package_name(['sqlports'], pkg_spec, module)
+ get_package_state(['sqlports'], pkg_spec, module)
+ if not pkg_spec['sqlports']['installed_state']:
+ module.debug("main(): installing 'sqlports' because build=%s" % module.params['build'])
+ package_present(['sqlports'], pkg_spec, module)
+
+ asterisk_name = False
+ for n in name:
+ if n == '*':
+ if len(name) != 1:
+ module.fail_json(msg="the package name '*' can not be mixed with other names")
+
+ asterisk_name = True
+
+ if asterisk_name:
+ if state != 'latest':
+ module.fail_json(msg="the package name '*' is only valid when using state=latest")
+ else:
+ # Perform an upgrade of all installed packages.
+ upgrade_packages(pkg_spec, module)
+ else:
+ # Parse package names and put results in the pkg_spec dictionary.
+ parse_package_name(name, pkg_spec, module)
+
+ # Not sure how the branch syntax is supposed to play together
+ # with build mode. Disable it for now.
+ for n in name:
+ if pkg_spec[n]['branch'] and module.params['build'] is True:
+ module.fail_json(msg="the combination of 'branch' syntax and build=%s is not supported: %s" % (module.params['build'], n))
+
+ # Get state for all package names.
+ get_package_state(name, pkg_spec, module)
+
+ # Perform requested action.
+ if state in ['installed', 'present']:
+ package_present(name, pkg_spec, module)
+ elif state in ['absent', 'removed']:
+ package_absent(name, pkg_spec, module)
+ elif state == 'latest':
+ package_latest(name, pkg_spec, module)
+
+ # The combined changed status for all requested packages. If anything
+ # is changed this is set to True.
+ combined_changed = False
+
+ # The combined failed status for all requested packages. If anything
+ # failed this is set to True.
+ combined_failed = False
+
+ # We combine all error messages in this comma separated string, for example:
+ # "msg": "Can't find nmapp\n, Can't find nmappp\n"
+ combined_error_message = ''
+
+ # Loop over all requested package names and check if anything failed or
+ # changed.
+ for n in name:
+ if pkg_spec[n]['rc'] != 0:
+ combined_failed = True
+ if pkg_spec[n]['stderr']:
+ if combined_error_message:
+ combined_error_message += ", %s" % pkg_spec[n]['stderr']
+ else:
+ combined_error_message = pkg_spec[n]['stderr']
+ else:
+ if combined_error_message:
+ combined_error_message += ", %s" % pkg_spec[n]['stdout']
+ else:
+ combined_error_message = pkg_spec[n]['stdout']
+
+ if pkg_spec[n]['changed'] is True:
+ combined_changed = True
+
+ # If combined_error_message contains anything at least some part of the
+ # list of requested package names failed.
+ if combined_failed:
+ module.fail_json(msg=combined_error_message, **result)
+
+ result['changed'] = combined_changed
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/opendj_backendprop.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/opendj_backendprop.py
new file mode 100644
index 00000000..aa477e42
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/opendj_backendprop.py
@@ -0,0 +1,198 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2016, Werner Dijkerman (ikben@werner-dijkerman.nl)
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: opendj_backendprop
+short_description: Will update the backend configuration of OpenDJ via the dsconfig set-backend-prop command.
+description:
+ - This module will update settings for OpenDJ with the command set-backend-prop.
+ - It will check first via de get-backend-prop if configuration needs to be applied.
+author:
+ - Werner Dijkerman (@dj-wasabi)
+options:
+ opendj_bindir:
+ description:
+ - The path to the bin directory of OpenDJ.
+ required: false
+ default: /opt/opendj/bin
+ hostname:
+ description:
+ - The hostname of the OpenDJ server.
+ required: true
+ port:
+ description:
+ - The Admin port on which the OpenDJ instance is available.
+ required: true
+ username:
+ description:
+ - The username to connect to.
+ required: false
+ default: cn=Directory Manager
+ password:
+ description:
+ - The password for the cn=Directory Manager user.
+ - Either password or passwordfile is needed.
+ required: false
+ passwordfile:
+ description:
+ - Location to the password file which holds the password for the cn=Directory Manager user.
+ - Either password or passwordfile is needed.
+ required: false
+ backend:
+ description:
+ - The name of the backend on which the property needs to be updated.
+ required: true
+ name:
+ description:
+ - The configuration setting to update.
+ required: true
+ value:
+ description:
+ - The value for the configuration item.
+ required: true
+ state:
+ description:
+ - If configuration needs to be added/updated
+ required: false
+ default: "present"
+'''
+
+EXAMPLES = '''
+ - name: Add or update OpenDJ backend properties
+ action: opendj_backendprop
+ hostname=localhost
+ port=4444
+ username="cn=Directory Manager"
+ password=password
+ backend=userRoot
+ name=index-entry-limit
+ value=5000
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class BackendProp(object):
+
+ def __init__(self, module):
+ self._module = module
+
+ def get_property(self, opendj_bindir, hostname, port, username, password_method, backend_name):
+ my_command = [
+ opendj_bindir + '/dsconfig',
+ 'get-backend-prop',
+ '-h', hostname,
+ '--port', str(port),
+ '--bindDN', username,
+ '--backend-name', backend_name,
+ '-n', '-X', '-s'
+ ] + password_method
+ rc, stdout, stderr = self._module.run_command(my_command)
+ if rc == 0:
+ return stdout
+ else:
+ self._module.fail_json(msg="Error message: " + str(stderr))
+
+ def set_property(self, opendj_bindir, hostname, port, username, password_method, backend_name, name, value):
+ my_command = [
+ opendj_bindir + '/dsconfig',
+ 'set-backend-prop',
+ '-h', hostname,
+ '--port', str(port),
+ '--bindDN', username,
+ '--backend-name', backend_name,
+ '--set', name + ":" + value,
+ '-n', '-X'
+ ] + password_method
+ rc, stdout, stderr = self._module.run_command(my_command)
+ if rc == 0:
+ return True
+ else:
+ self._module.fail_json(msg="Error message: " + stderr)
+
+ def validate_data(self, data=None, name=None, value=None):
+ for config_line in data.split('\n'):
+ if config_line:
+ split_line = config_line.split()
+ if split_line[0] == name:
+ if split_line[1] == value:
+ return True
+ return False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ opendj_bindir=dict(default="/opt/opendj/bin", type="path"),
+ hostname=dict(required=True),
+ port=dict(required=True),
+ username=dict(default="cn=Directory Manager", required=False),
+ password=dict(required=False, no_log=True),
+ passwordfile=dict(required=False, type="path"),
+ backend=dict(required=True),
+ name=dict(required=True),
+ value=dict(required=True),
+ state=dict(default="present"),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[['password', 'passwordfile']],
+ required_one_of=[['password', 'passwordfile']]
+ )
+
+ opendj_bindir = module.params['opendj_bindir']
+ hostname = module.params['hostname']
+ port = module.params['port']
+ username = module.params['username']
+ password = module.params['password']
+ passwordfile = module.params['passwordfile']
+ backend_name = module.params['backend']
+ name = module.params['name']
+ value = module.params['value']
+ state = module.params['state']
+
+ if module.params["password"] is not None:
+ password_method = ['-w', password]
+ elif module.params["passwordfile"] is not None:
+ password_method = ['-j', passwordfile]
+
+ opendj = BackendProp(module)
+ validate = opendj.get_property(opendj_bindir=opendj_bindir,
+ hostname=hostname,
+ port=port,
+ username=username,
+ password_method=password_method,
+ backend_name=backend_name)
+
+ if validate:
+ if not opendj.validate_data(data=validate, name=name, value=value):
+ if module.check_mode:
+ module.exit_json(changed=True)
+ if opendj.set_property(opendj_bindir=opendj_bindir,
+ hostname=hostname,
+ port=port,
+ username=username,
+ password_method=password_method,
+ backend_name=backend_name,
+ name=name,
+ value=value):
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=False)
+ else:
+ module.exit_json(changed=False)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/openwrt_init.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/openwrt_init.py
new file mode 100644
index 00000000..817ed9f4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/openwrt_init.py
@@ -0,0 +1,198 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# (c) 2016, Andrew Gaffney <andrew@agaffney.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: openwrt_init
+author:
+ - "Andrew Gaffney (@agaffney)"
+short_description: Manage services on OpenWrt.
+description:
+ - Controls OpenWrt services on remote hosts.
+options:
+ name:
+ type: str
+ description:
+ - Name of the service.
+ required: true
+ aliases: ['service']
+ state:
+ type: str
+ description:
+ - C(started)/C(stopped) are idempotent actions that will not run commands unless necessary.
+ C(restarted) will always bounce the service. C(reloaded) will always reload.
+ choices: [ 'started', 'stopped', 'restarted', 'reloaded' ]
+ enabled:
+ description:
+ - Whether the service should start on boot. B(At least one of state and enabled are required.)
+ type: bool
+ pattern:
+ type: str
+ description:
+ - If the service does not respond to the 'running' command, name a
+ substring to look for as would be found in the output of the I(ps)
+ command as a stand-in for a 'running' result. If the string is found,
+ the service will be assumed to be running.
+notes:
+ - One option other than name is required.
+requirements:
+ - An OpenWrt system (with python)
+'''
+
+EXAMPLES = '''
+- name: Start service httpd, if not running
+ community.general.openwrt_init:
+ state: started
+ name: httpd
+
+- name: Stop service cron, if running
+ community.general.openwrt_init:
+ name: cron
+ state: stopped
+
+- name: Reload service httpd, in all cases
+ community.general.openwrt_init:
+ name: httpd
+ state: reloaded
+
+- name: Enable service httpd
+ community.general.openwrt_init:
+ name: httpd
+ enabled: yes
+'''
+
+RETURN = '''
+'''
+
+import os
+import glob
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes, to_native
+
+module = None
+init_script = None
+
+
+# ===============================
+# Check if service is enabled
+def is_enabled():
+ (rc, out, err) = module.run_command("%s enabled" % init_script)
+ if rc == 0:
+ return True
+ return False
+
+
+# ===========================================
+# Main control flow
+
+def main():
+ global module, init_script
+ # init
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, type='str', aliases=['service']),
+ state=dict(type='str', choices=['started', 'stopped', 'restarted', 'reloaded']),
+ enabled=dict(type='bool'),
+ pattern=dict(type='str', required=False, default=None),
+ ),
+ supports_check_mode=True,
+ required_one_of=[['state', 'enabled']],
+ )
+
+ # initialize
+ service = module.params['name']
+ init_script = '/etc/init.d/' + service
+ rc = 0
+ out = err = ''
+ result = {
+ 'name': service,
+ 'changed': False,
+ }
+
+ # check if service exists
+ if not os.path.exists(init_script):
+ module.fail_json(msg='service %s does not exist' % service)
+
+ # Enable/disable service startup at boot if requested
+ if module.params['enabled'] is not None:
+ # do we need to enable the service?
+ enabled = is_enabled()
+
+ # default to current state
+ result['enabled'] = enabled
+
+ # Change enable/disable if needed
+ if enabled != module.params['enabled']:
+ result['changed'] = True
+ if module.params['enabled']:
+ action = 'enable'
+ else:
+ action = 'disable'
+
+ if not module.check_mode:
+ (rc, out, err) = module.run_command("%s %s" % (init_script, action))
+ # openwrt init scripts can return a non-zero exit code on a successful 'enable'
+ # command if the init script doesn't contain a STOP value, so we ignore the exit
+ # code and explicitly check if the service is now in the desired state
+ if is_enabled() != module.params['enabled']:
+ module.fail_json(msg="Unable to %s service %s: %s" % (action, service, err))
+
+ result['enabled'] = not enabled
+
+ if module.params['state'] is not None:
+ running = False
+
+ # check if service is currently running
+ if module.params['pattern']:
+ # Find ps binary
+ psbin = module.get_bin_path('ps', True)
+
+ # this should be busybox ps, so we only want/need to the 'w' option
+ (rc, psout, pserr) = module.run_command('%s w' % psbin)
+ # If rc is 0, set running as appropriate
+ if rc == 0:
+ lines = psout.split("\n")
+ for line in lines:
+ if module.params['pattern'] in line and "pattern=" not in line:
+ # so as to not confuse ./hacking/test-module.py
+ running = True
+ break
+ else:
+ (rc, out, err) = module.run_command("%s running" % init_script)
+ if rc == 0:
+ running = True
+
+ # default to desired state
+ result['state'] = module.params['state']
+
+ # determine action, if any
+ action = None
+ if module.params['state'] == 'started':
+ if not running:
+ action = 'start'
+ result['changed'] = True
+ elif module.params['state'] == 'stopped':
+ if running:
+ action = 'stop'
+ result['changed'] = True
+ else:
+ action = module.params['state'][:-2] # remove 'ed' from restarted/reloaded
+ result['state'] = 'started'
+ result['changed'] = True
+
+ if action:
+ if not module.check_mode:
+ (rc, out, err) = module.run_command("%s %s" % (init_script, action))
+ if rc != 0:
+ module.fail_json(msg="Unable to %s service %s: %s" % (action, service, err))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/opkg.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/opkg.py
new file mode 100644
index 00000000..7da9a487
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/opkg.py
@@ -0,0 +1,197 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Patrick Pelletier <pp.pelletier@gmail.com>
+# Based on pacman (Afterburn) and pkgin (Shaun Zinck) modules
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: opkg
+author: "Patrick Pelletier (@skinp)"
+short_description: Package manager for OpenWrt
+description:
+ - Manages OpenWrt packages
+options:
+ name:
+ description:
+ - name of package to install/remove
+ aliases: [pkg]
+ required: true
+ type: str
+ state:
+ description:
+ - state of the package
+ choices: [ 'present', 'absent', 'installed', 'removed' ]
+ default: present
+ type: str
+ force:
+ description:
+ - opkg --force parameter used
+ choices:
+ - ""
+ - "depends"
+ - "maintainer"
+ - "reinstall"
+ - "overwrite"
+ - "downgrade"
+ - "space"
+ - "postinstall"
+ - "remove"
+ - "checksum"
+ - "removal-of-dependent-packages"
+ type: str
+ update_cache:
+ description:
+ - update the package db first
+ aliases: ['update-cache']
+ default: "no"
+ type: bool
+requirements:
+ - opkg
+ - python
+'''
+EXAMPLES = '''
+- name: Install foo
+ community.general.opkg:
+ name: foo
+ state: present
+
+- name: Update cache and install foo
+ community.general.opkg:
+ name: foo
+ state: present
+ update_cache: yes
+
+- name: Remove foo
+ community.general.opkg:
+ name: foo
+ state: absent
+
+- name: Remove foo and bar
+ community.general.opkg:
+ name: foo,bar
+ state: absent
+
+- name: Install foo using overwrite option forcibly
+ community.general.opkg:
+ name: foo
+ state: present
+ force: overwrite
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import shlex_quote
+
+
+def update_package_db(module, opkg_path):
+ """ Updates packages list. """
+
+ rc, out, err = module.run_command("%s update" % opkg_path)
+
+ if rc != 0:
+ module.fail_json(msg="could not update package db")
+
+
+def query_package(module, opkg_path, name, state="present"):
+ """ Returns whether a package is installed or not. """
+
+ if state == "present":
+
+ rc, out, err = module.run_command("%s list-installed | grep -q \"^%s \"" % (shlex_quote(opkg_path), shlex_quote(name)), use_unsafe_shell=True)
+ if rc == 0:
+ return True
+
+ return False
+
+
+def remove_packages(module, opkg_path, packages):
+ """ Uninstalls one or more packages if installed. """
+
+ p = module.params
+ force = p["force"]
+ if force:
+ force = "--force-%s" % force
+
+ remove_c = 0
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ if not query_package(module, opkg_path, package):
+ continue
+
+ rc, out, err = module.run_command("%s remove %s %s" % (opkg_path, force, package))
+
+ if query_package(module, opkg_path, package):
+ module.fail_json(msg="failed to remove %s: %s" % (package, out))
+
+ remove_c += 1
+
+ if remove_c > 0:
+
+ module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, opkg_path, packages):
+ """ Installs one or more packages if not already installed. """
+
+ p = module.params
+ force = p["force"]
+ if force:
+ force = "--force-%s" % force
+
+ install_c = 0
+
+ for package in packages:
+ if query_package(module, opkg_path, package):
+ continue
+
+ rc, out, err = module.run_command("%s install %s %s" % (opkg_path, force, package))
+
+ if not query_package(module, opkg_path, package):
+ module.fail_json(msg="failed to install %s: %s" % (package, out))
+
+ install_c += 1
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg="installed %s package(s)" % (install_c))
+
+ module.exit_json(changed=False, msg="package(s) already present")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(aliases=["pkg"], required=True),
+ state=dict(default="present", choices=["present", "installed", "absent", "removed"]),
+ force=dict(default="", choices=["", "depends", "maintainer", "reinstall", "overwrite", "downgrade", "space", "postinstall", "remove",
+ "checksum", "removal-of-dependent-packages"]),
+ update_cache=dict(default="no", aliases=["update-cache"], type='bool')
+ )
+ )
+
+ opkg_path = module.get_bin_path('opkg', True, ['/bin'])
+
+ p = module.params
+
+ if p["update_cache"]:
+ update_package_db(module, opkg_path)
+
+ pkgs = p["name"].split(",")
+
+ if p["state"] in ["present", "installed"]:
+ install_packages(module, opkg_path, pkgs)
+
+ elif p["state"] in ["absent", "removed"]:
+ remove_packages(module, opkg_path, pkgs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/osx_defaults.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/osx_defaults.py
new file mode 100644
index 00000000..a0362908
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/osx_defaults.py
@@ -0,0 +1,395 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, GeekChimp - Franck Nijhof <franck@geekchimp.com> (DO NOT CONTACT!)
+# Copyright: (c) 2019, Ansible project
+# Copyright: (c) 2019, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: osx_defaults
+author:
+# DO NOT RE-ADD GITHUB HANDLE!
+- Franck Nijhof (!UNKNOWN)
+short_description: Manage macOS user defaults
+description:
+ - osx_defaults allows users to read, write, and delete macOS user defaults from Ansible scripts.
+ - macOS applications and other programs use the defaults system to record user preferences and other
+ information that must be maintained when the applications are not running (such as default font for new
+ documents, or the position of an Info panel).
+options:
+ domain:
+ description:
+ - The domain is a domain name of the form C(com.companyname.appname).
+ type: str
+ default: NSGlobalDomain
+ host:
+ description:
+ - The host on which the preference should apply.
+ - The special value C(currentHost) corresponds to the C(-currentHost) switch of the defaults commandline tool.
+ type: str
+ key:
+ description:
+ - The key of the user preference.
+ type: str
+ type:
+ description:
+ - The type of value to write.
+ type: str
+ choices: [ array, bool, boolean, date, float, int, integer, string ]
+ default: string
+ array_add:
+ description:
+ - Add new elements to the array for a key which has an array as its value.
+ type: bool
+ default: no
+ value:
+ description:
+ - The value to write.
+ - Only required when C(state=present).
+ type: raw
+ state:
+ description:
+ - The state of the user defaults.
+ - If set to C(list) will query the given parameter specified by C(key). Returns 'null' is nothing found or mis-spelled.
+ - C(list) added in version 2.8.
+ type: str
+ choices: [ absent, list, present ]
+ default: present
+ path:
+ description:
+ - The path in which to search for C(defaults).
+ type: str
+ default: /usr/bin:/usr/local/bin
+notes:
+ - Apple Mac caches defaults. You may need to logout and login to apply the changes.
+'''
+
+EXAMPLES = r'''
+# TODO: Describe what happens in each example
+
+- community.general.osx_defaults:
+ domain: com.apple.Safari
+ key: IncludeInternalDebugMenu
+ type: bool
+ value: true
+ state: present
+
+- community.general.osx_defaults:
+ domain: NSGlobalDomain
+ key: AppleMeasurementUnits
+ type: string
+ value: Centimeters
+ state: present
+
+- community.general.osx_defaults:
+ domain: /Library/Preferences/com.apple.SoftwareUpdate
+ key: AutomaticCheckEnabled
+ type: int
+ value: 1
+ become: yes
+
+- community.general.osx_defaults:
+ domain: com.apple.screensaver
+ host: currentHost
+ key: showClock
+ type: int
+ value: 1
+
+- community.general.osx_defaults:
+ key: AppleMeasurementUnits
+ type: string
+ value: Centimeters
+
+- community.general.osx_defaults:
+ key: AppleLanguages
+ type: array
+ value:
+ - en
+ - nl
+
+- community.general.osx_defaults:
+ domain: com.geekchimp.macable
+ key: ExampleKeyToRemove
+ state: absent
+'''
+
+from datetime import datetime
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import binary_type, text_type
+
+
+# exceptions --------------------------------------------------------------- {{{
+class OSXDefaultsException(Exception):
+ def __init__(self, msg):
+ self.message = msg
+
+
+# /exceptions -------------------------------------------------------------- }}}
+
+# class MacDefaults -------------------------------------------------------- {{{
+class OSXDefaults(object):
+ """ Class to manage Mac OS user defaults """
+
+ # init ---------------------------------------------------------------- {{{
+ def __init__(self, module):
+ """ Initialize this module. Finds 'defaults' executable and preps the parameters """
+ # Initial var for storing current defaults value
+ self.current_value = None
+ self.module = module
+ self.domain = module.params['domain']
+ self.host = module.params['host']
+ self.key = module.params['key']
+ self.type = module.params['type']
+ self.array_add = module.params['array_add']
+ self.value = module.params['value']
+ self.state = module.params['state']
+ self.path = module.params['path']
+
+ # Try to find the defaults executable
+ self.executable = self.module.get_bin_path(
+ 'defaults',
+ required=False,
+ opt_dirs=self.path.split(':'),
+ )
+
+ if not self.executable:
+ raise OSXDefaultsException("Unable to locate defaults executable.")
+
+ # Ensure the value is the correct type
+ if self.state != 'absent':
+ self.value = self._convert_type(self.type, self.value)
+
+ # /init --------------------------------------------------------------- }}}
+
+ # tools --------------------------------------------------------------- {{{
+ @staticmethod
+ def is_int(value):
+ as_str = str(value)
+ if (as_str.startswith("-")):
+ return as_str[1:].isdigit()
+ else:
+ return as_str.isdigit()
+
+ @staticmethod
+ def _convert_type(data_type, value):
+ """ Converts value to given type """
+ if data_type == "string":
+ return str(value)
+ elif data_type in ["bool", "boolean"]:
+ if isinstance(value, (binary_type, text_type)):
+ value = value.lower()
+ if value in [True, 1, "true", "1", "yes"]:
+ return True
+ elif value in [False, 0, "false", "0", "no"]:
+ return False
+ raise OSXDefaultsException("Invalid boolean value: {0}".format(repr(value)))
+ elif data_type == "date":
+ try:
+ return datetime.strptime(value.split("+")[0].strip(), "%Y-%m-%d %H:%M:%S")
+ except ValueError:
+ raise OSXDefaultsException(
+ "Invalid date value: {0}. Required format yyy-mm-dd hh:mm:ss.".format(repr(value))
+ )
+ elif data_type in ["int", "integer"]:
+ if not OSXDefaults.is_int(value):
+ raise OSXDefaultsException("Invalid integer value: {0}".format(repr(value)))
+ return int(value)
+ elif data_type == "float":
+ try:
+ value = float(value)
+ except ValueError:
+ raise OSXDefaultsException("Invalid float value: {0}".format(repr(value)))
+ return value
+ elif data_type == "array":
+ if not isinstance(value, list):
+ raise OSXDefaultsException("Invalid value. Expected value to be an array")
+ return value
+
+ raise OSXDefaultsException('Type is not supported: {0}'.format(data_type))
+
+ def _host_args(self):
+ """ Returns a normalized list of commandline arguments based on the "host" attribute """
+ if self.host is None:
+ return []
+ elif self.host == 'currentHost':
+ return ['-currentHost']
+ else:
+ return ['-host', self.host]
+
+ def _base_command(self):
+ """ Returns a list containing the "defaults" executable and any common base arguments """
+ return [self.executable] + self._host_args()
+
+ @staticmethod
+ def _convert_defaults_str_to_list(value):
+ """ Converts array output from defaults to an list """
+ # Split output of defaults. Every line contains a value
+ value = value.splitlines()
+
+ # Remove first and last item, those are not actual values
+ value.pop(0)
+ value.pop(-1)
+
+ # Remove spaces at beginning and comma (,) at the end, unquote and unescape double quotes
+ value = [re.sub('^ *"?|"?,? *$', '', x.replace('\\"', '"')) for x in value]
+
+ return value
+
+ # /tools -------------------------------------------------------------- }}}
+
+ # commands ------------------------------------------------------------ {{{
+ def read(self):
+ """ Reads value of this domain & key from defaults """
+ # First try to find out the type
+ rc, out, err = self.module.run_command(self._base_command() + ["read-type", self.domain, self.key])
+
+ # If RC is 1, the key does not exist
+ if rc == 1:
+ return None
+
+ # If the RC is not 0, then terrible happened! Ooooh nooo!
+ if rc != 0:
+ raise OSXDefaultsException("An error occurred while reading key type from defaults: %s" % out)
+
+ # Ok, lets parse the type from output
+ data_type = out.strip().replace('Type is ', '')
+
+ # Now get the current value
+ rc, out, err = self.module.run_command(self._base_command() + ["read", self.domain, self.key])
+
+ # Strip output
+ out = out.strip()
+
+ # An non zero RC at this point is kinda strange...
+ if rc != 0:
+ raise OSXDefaultsException("An error occurred while reading key value from defaults: %s" % out)
+
+ # Convert string to list when type is array
+ if data_type == "array":
+ out = self._convert_defaults_str_to_list(out)
+
+ # Store the current_value
+ self.current_value = self._convert_type(data_type, out)
+
+ def write(self):
+ """ Writes value to this domain & key to defaults """
+ # We need to convert some values so the defaults commandline understands it
+ if isinstance(self.value, bool):
+ if self.value:
+ value = "TRUE"
+ else:
+ value = "FALSE"
+ elif isinstance(self.value, (int, float)):
+ value = str(self.value)
+ elif self.array_add and self.current_value is not None:
+ value = list(set(self.value) - set(self.current_value))
+ elif isinstance(self.value, datetime):
+ value = self.value.strftime('%Y-%m-%d %H:%M:%S')
+ else:
+ value = self.value
+
+ # When the type is array and array_add is enabled, morph the type :)
+ if self.type == "array" and self.array_add:
+ self.type = "array-add"
+
+ # All values should be a list, for easy passing it to the command
+ if not isinstance(value, list):
+ value = [value]
+
+ rc, out, err = self.module.run_command(self._base_command() + ['write', self.domain, self.key, '-' + self.type] + value)
+
+ if rc != 0:
+ raise OSXDefaultsException('An error occurred while writing value to defaults: %s' % out)
+
+ def delete(self):
+ """ Deletes defaults key from domain """
+ rc, out, err = self.module.run_command(self._base_command() + ['delete', self.domain, self.key])
+ if rc != 0:
+ raise OSXDefaultsException("An error occurred while deleting key from defaults: %s" % out)
+
+ # /commands ----------------------------------------------------------- }}}
+
+ # run ----------------------------------------------------------------- {{{
+ """ Does the magic! :) """
+
+ def run(self):
+
+ # Get the current value from defaults
+ self.read()
+
+ if self.state == 'list':
+ self.module.exit_json(key=self.key, value=self.current_value)
+
+ # Handle absent state
+ if self.state == "absent":
+ if self.current_value is None:
+ return False
+ if self.module.check_mode:
+ return True
+ self.delete()
+ return True
+
+ # There is a type mismatch! Given type does not match the type in defaults
+ value_type = type(self.value)
+ if self.current_value is not None and not isinstance(self.current_value, value_type):
+ raise OSXDefaultsException("Type mismatch. Type in defaults: %s" % type(self.current_value).__name__)
+
+ # Current value matches the given value. Nothing need to be done. Arrays need extra care
+ if self.type == "array" and self.current_value is not None and not self.array_add and \
+ set(self.current_value) == set(self.value):
+ return False
+ elif self.type == "array" and self.current_value is not None and self.array_add and len(list(set(self.value) - set(self.current_value))) == 0:
+ return False
+ elif self.current_value == self.value:
+ return False
+
+ if self.module.check_mode:
+ return True
+
+ # Change/Create/Set given key/value for domain in defaults
+ self.write()
+ return True
+
+ # /run ---------------------------------------------------------------- }}}
+
+
+# /class MacDefaults ------------------------------------------------------ }}}
+
+
+# main -------------------------------------------------------------------- {{{
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ domain=dict(type='str', default='NSGlobalDomain'),
+ host=dict(type='str'),
+ key=dict(type='str'),
+ type=dict(type='str', default='string', choices=['array', 'bool', 'boolean', 'date', 'float', 'int', 'integer', 'string']),
+ array_add=dict(type='bool', default=False),
+ value=dict(type='raw'),
+ state=dict(type='str', default='present', choices=['absent', 'list', 'present']),
+ path=dict(type='str', default='/usr/bin:/usr/local/bin'),
+ ),
+ supports_check_mode=True,
+ required_if=(
+ ('state', 'present', ['value']),
+ ),
+ )
+
+ try:
+ defaults = OSXDefaults(module=module)
+ module.exit_json(changed=defaults.run())
+ except OSXDefaultsException as e:
+ module.fail_json(msg=e.message)
+
+
+# /main ------------------------------------------------------------------- }}}
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovh_ip_failover.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovh_ip_failover.py
new file mode 100644
index 00000000..7ed3a5ee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovh_ip_failover.py
@@ -0,0 +1,261 @@
+#!/usr/bin/python
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ovh_ip_failover
+short_description: Manage OVH IP failover address
+description:
+ - Manage OVH (French European hosting provider) IP Failover Address. For now, this module can only be used to move
+ an ip failover (or failover block) between services
+author: "Pascal HERAUD (@pascalheraud)"
+notes:
+ - Uses the python OVH Api U(https://github.com/ovh/python-ovh).
+ You have to create an application (a key and secret) with a consummer
+ key as described into U(https://docs.ovh.com/gb/en/customer/first-steps-with-ovh-api/)
+requirements:
+ - ovh >= 0.4.8
+options:
+ name:
+ required: true
+ description:
+ - The IP address to manage (can be a single IP like 1.1.1.1
+ or a block like 1.1.1.1/28 )
+ type: str
+ service:
+ required: true
+ description:
+ - The name of the OVH service this IP address should be routed
+ type: str
+ endpoint:
+ required: true
+ description:
+ - The endpoint to use ( for instance ovh-eu)
+ type: str
+ wait_completion:
+ required: false
+ default: true
+ type: bool
+ description:
+ - If true, the module will wait for the IP address to be moved.
+ If false, exit without waiting. The taskId will be returned
+ in module output
+ wait_task_completion:
+ required: false
+ default: 0
+ description:
+ - If not 0, the module will wait for this task id to be
+ completed. Use wait_task_completion if you want to wait for
+ completion of a previously executed task with
+ wait_completion=false. You can execute this module repeatedly on
+ a list of failover IPs using wait_completion=false (see examples)
+ type: int
+ application_key:
+ required: true
+ description:
+ - The applicationKey to use
+ type: str
+ application_secret:
+ required: true
+ description:
+ - The application secret to use
+ type: str
+ consumer_key:
+ required: true
+ description:
+ - The consumer key to use
+ type: str
+ timeout:
+ required: false
+ default: 120
+ description:
+ - The timeout in seconds used to wait for a task to be
+ completed. Default is 120 seconds.
+ type: int
+
+'''
+
+EXAMPLES = '''
+# Route an IP address 1.1.1.1 to the service ns666.ovh.net
+- community.general.ovh_ip_failover:
+ name: 1.1.1.1
+ service: ns666.ovh.net
+ endpoint: ovh-eu
+ application_key: yourkey
+ application_secret: yoursecret
+ consumer_key: yourconsumerkey
+- community.general.ovh_ip_failover:
+ name: 1.1.1.1
+ service: ns666.ovh.net
+ endpoint: ovh-eu
+ wait_completion: false
+ application_key: yourkey
+ application_secret: yoursecret
+ consumer_key: yourconsumerkey
+ register: moved
+- community.general.ovh_ip_failover:
+ name: 1.1.1.1
+ service: ns666.ovh.net
+ endpoint: ovh-eu
+ wait_task_completion: "{{moved.taskId}}"
+ application_key: yourkey
+ application_secret: yoursecret
+ consumer_key: yourconsumerkey
+'''
+
+RETURN = '''
+'''
+
+import time
+
+try:
+ import ovh
+ import ovh.exceptions
+ from ovh.exceptions import APIError
+ HAS_OVH = True
+except ImportError:
+ HAS_OVH = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import quote_plus
+
+
+def getOvhClient(ansibleModule):
+ endpoint = ansibleModule.params.get('endpoint')
+ application_key = ansibleModule.params.get('application_key')
+ application_secret = ansibleModule.params.get('application_secret')
+ consumer_key = ansibleModule.params.get('consumer_key')
+
+ return ovh.Client(
+ endpoint=endpoint,
+ application_key=application_key,
+ application_secret=application_secret,
+ consumer_key=consumer_key
+ )
+
+
+def waitForNoTask(client, name, timeout):
+ currentTimeout = timeout
+ while client.get('/ip/{0}/task'.format(quote_plus(name)),
+ function='genericMoveFloatingIp',
+ status='todo'):
+ time.sleep(1) # Delay for 1 sec
+ currentTimeout -= 1
+ if currentTimeout < 0:
+ return False
+ return True
+
+
+def waitForTaskDone(client, name, taskId, timeout):
+ currentTimeout = timeout
+ while True:
+ task = client.get('/ip/{0}/task/{1}'.format(quote_plus(name), taskId))
+ if task['status'] == 'done':
+ return True
+ time.sleep(5) # Delay for 5 sec because it's long to wait completion, do not harass the API
+ currentTimeout -= 5
+ if currentTimeout < 0:
+ return False
+ return True
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ service=dict(required=True),
+ endpoint=dict(required=True),
+ wait_completion=dict(default=True, type='bool'),
+ wait_task_completion=dict(default=0, type='int'),
+ application_key=dict(required=True, no_log=True),
+ application_secret=dict(required=True, no_log=True),
+ consumer_key=dict(required=True, no_log=True),
+ timeout=dict(default=120, type='int')
+ ),
+ supports_check_mode=True
+ )
+
+ result = dict(
+ changed=False
+ )
+
+ if not HAS_OVH:
+ module.fail_json(msg='ovh-api python module is required to run this module ')
+
+ # Get parameters
+ name = module.params.get('name')
+ service = module.params.get('service')
+ timeout = module.params.get('timeout')
+ wait_completion = module.params.get('wait_completion')
+ wait_task_completion = module.params.get('wait_task_completion')
+
+ # Connect to OVH API
+ client = getOvhClient(module)
+
+ # Check that the load balancing exists
+ try:
+ ips = client.get('/ip', ip=name, type='failover')
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for getting the list of ips, '
+ 'check application key, secret, consumerkey and parameters. '
+ 'Error returned by OVH api was : {0}'.format(apiError))
+
+ if name not in ips and '{0}/32'.format(name) not in ips:
+ module.fail_json(msg='IP {0} does not exist'.format(name))
+
+ # Check that no task is pending before going on
+ try:
+ if not waitForNoTask(client, name, timeout):
+ module.fail_json(
+ msg='Timeout of {0} seconds while waiting for no pending '
+ 'tasks before executing the module '.format(timeout))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for getting the list of pending tasks '
+ 'of the ip, check application key, secret, consumerkey '
+ 'and parameters. Error returned by OVH api was : {0}'
+ .format(apiError))
+
+ try:
+ ipproperties = client.get('/ip/{0}'.format(quote_plus(name)))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for getting the properties '
+ 'of the ip, check application key, secret, consumerkey '
+ 'and parameters. Error returned by OVH api was : {0}'
+ .format(apiError))
+
+ if ipproperties['routedTo']['serviceName'] != service:
+ if not module.check_mode:
+ if wait_task_completion == 0:
+ # Move the IP and get the created taskId
+ task = client.post('/ip/{0}/move'.format(quote_plus(name)), to=service)
+ taskId = task['taskId']
+ result['moved'] = True
+ else:
+ # Just wait for the given taskId to be completed
+ taskId = wait_task_completion
+ result['moved'] = False
+ result['taskId'] = taskId
+ if wait_completion or wait_task_completion != 0:
+ if not waitForTaskDone(client, name, taskId, timeout):
+ module.fail_json(
+ msg='Timeout of {0} seconds while waiting for completion '
+ 'of move ip to service'.format(timeout))
+ result['waited'] = True
+ else:
+ result['waited'] = False
+ result['changed'] = True
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovh_ip_loadbalancing_backend.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovh_ip_loadbalancing_backend.py
new file mode 100644
index 00000000..965a499c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovh_ip_loadbalancing_backend.py
@@ -0,0 +1,311 @@
+#!/usr/bin/python
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ovh_ip_loadbalancing_backend
+short_description: Manage OVH IP LoadBalancing backends
+description:
+ - Manage OVH (French European hosting provider) LoadBalancing IP backends
+author: Pascal Heraud (@pascalheraud)
+notes:
+ - Uses the python OVH Api U(https://github.com/ovh/python-ovh).
+ You have to create an application (a key and secret) with a consumer
+ key as described into U(https://docs.ovh.com/gb/en/customer/first-steps-with-ovh-api/)
+requirements:
+ - ovh > 0.3.5
+options:
+ name:
+ required: true
+ description:
+ - Name of the LoadBalancing internal name (ip-X.X.X.X)
+ type: str
+ backend:
+ required: true
+ description:
+ - The IP address of the backend to update / modify / delete
+ type: str
+ state:
+ default: present
+ choices: ['present', 'absent']
+ description:
+ - Determines whether the backend is to be created/modified
+ or deleted
+ type: str
+ probe:
+ default: 'none'
+ choices: ['none', 'http', 'icmp' , 'oco']
+ description:
+ - Determines the type of probe to use for this backend
+ type: str
+ weight:
+ default: 8
+ description:
+ - Determines the weight for this backend
+ type: int
+ endpoint:
+ required: true
+ description:
+ - The endpoint to use ( for instance ovh-eu)
+ type: str
+ application_key:
+ required: true
+ description:
+ - The applicationKey to use
+ type: str
+ application_secret:
+ required: true
+ description:
+ - The application secret to use
+ type: str
+ consumer_key:
+ required: true
+ description:
+ - The consumer key to use
+ type: str
+ timeout:
+ default: 120
+ description:
+ - The timeout in seconds used to wait for a task to be
+ completed.
+ type: int
+
+'''
+
+EXAMPLES = '''
+- name: Adds or modify the backend '212.1.1.1' to a loadbalancing 'ip-1.1.1.1'
+ ovh_ip_loadbalancing:
+ name: ip-1.1.1.1
+ backend: 212.1.1.1
+ state: present
+ probe: none
+ weight: 8
+ endpoint: ovh-eu
+ application_key: yourkey
+ application_secret: yoursecret
+ consumer_key: yourconsumerkey
+
+- name: Removes a backend '212.1.1.1' from a loadbalancing 'ip-1.1.1.1'
+ ovh_ip_loadbalancing:
+ name: ip-1.1.1.1
+ backend: 212.1.1.1
+ state: absent
+ endpoint: ovh-eu
+ application_key: yourkey
+ application_secret: yoursecret
+ consumer_key: yourconsumerkey
+'''
+
+RETURN = '''
+'''
+
+import time
+
+try:
+ import ovh
+ import ovh.exceptions
+ from ovh.exceptions import APIError
+ HAS_OVH = True
+except ImportError:
+ HAS_OVH = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def getOvhClient(ansibleModule):
+ endpoint = ansibleModule.params.get('endpoint')
+ application_key = ansibleModule.params.get('application_key')
+ application_secret = ansibleModule.params.get('application_secret')
+ consumer_key = ansibleModule.params.get('consumer_key')
+
+ return ovh.Client(
+ endpoint=endpoint,
+ application_key=application_key,
+ application_secret=application_secret,
+ consumer_key=consumer_key
+ )
+
+
+def waitForNoTask(client, name, timeout):
+ currentTimeout = timeout
+ while len(client.get('/ip/loadBalancing/{0}/task'.format(name))) > 0:
+ time.sleep(1) # Delay for 1 sec
+ currentTimeout -= 1
+ if currentTimeout < 0:
+ return False
+ return True
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ backend=dict(required=True),
+ weight=dict(default=8, type='int'),
+ probe=dict(default='none',
+ choices=['none', 'http', 'icmp', 'oco']),
+ state=dict(default='present', choices=['present', 'absent']),
+ endpoint=dict(required=True),
+ application_key=dict(required=True, no_log=True),
+ application_secret=dict(required=True, no_log=True),
+ consumer_key=dict(required=True, no_log=True),
+ timeout=dict(default=120, type='int')
+ )
+ )
+
+ if not HAS_OVH:
+ module.fail_json(msg='ovh-api python module'
+ 'is required to run this module ')
+
+ # Get parameters
+ name = module.params.get('name')
+ state = module.params.get('state')
+ backend = module.params.get('backend')
+ weight = module.params.get('weight')
+ probe = module.params.get('probe')
+ timeout = module.params.get('timeout')
+
+ # Connect to OVH API
+ client = getOvhClient(module)
+
+ # Check that the load balancing exists
+ try:
+ loadBalancings = client.get('/ip/loadBalancing')
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for getting the list of loadBalancing, '
+ 'check application key, secret, consumerkey and parameters. '
+ 'Error returned by OVH api was : {0}'.format(apiError))
+
+ if name not in loadBalancings:
+ module.fail_json(msg='IP LoadBalancing {0} does not exist'.format(name))
+
+ # Check that no task is pending before going on
+ try:
+ if not waitForNoTask(client, name, timeout):
+ module.fail_json(
+ msg='Timeout of {0} seconds while waiting for no pending '
+ 'tasks before executing the module '.format(timeout))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for getting the list of pending tasks '
+ 'of the loadBalancing, check application key, secret, consumerkey '
+ 'and parameters. Error returned by OVH api was : {0}'
+ .format(apiError))
+
+ try:
+ backends = client.get('/ip/loadBalancing/{0}/backend'.format(name))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for getting the list of backends '
+ 'of the loadBalancing, check application key, secret, consumerkey '
+ 'and parameters. Error returned by OVH api was : {0}'
+ .format(apiError))
+
+ backendExists = backend in backends
+ moduleChanged = False
+ if state == "absent":
+ if backendExists:
+ # Remove backend
+ try:
+ client.delete(
+ '/ip/loadBalancing/{0}/backend/{1}'.format(name, backend))
+ if not waitForNoTask(client, name, timeout):
+ module.fail_json(
+ msg='Timeout of {0} seconds while waiting for completion '
+ 'of removing backend task'.format(timeout))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for deleting the backend, '
+ 'check application key, secret, consumerkey and '
+ 'parameters. Error returned by OVH api was : {0}'
+ .format(apiError))
+ moduleChanged = True
+ else:
+ if backendExists:
+ # Get properties
+ try:
+ backendProperties = client.get(
+ '/ip/loadBalancing/{0}/backend/{1}'.format(name, backend))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for getting the backend properties, '
+ 'check application key, secret, consumerkey and '
+ 'parameters. Error returned by OVH api was : {0}'
+ .format(apiError))
+
+ if (backendProperties['weight'] != weight):
+ # Change weight
+ try:
+ client.post(
+ '/ip/loadBalancing/{0}/backend/{1}/setWeight'
+ .format(name, backend), weight=weight)
+ if not waitForNoTask(client, name, timeout):
+ module.fail_json(
+ msg='Timeout of {0} seconds while waiting for completion '
+ 'of setWeight to backend task'
+ .format(timeout))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for updating the weight of the '
+ 'backend, check application key, secret, consumerkey '
+ 'and parameters. Error returned by OVH api was : {0}'
+ .format(apiError))
+ moduleChanged = True
+
+ if (backendProperties['probe'] != probe):
+ # Change probe
+ backendProperties['probe'] = probe
+ try:
+ client.put(
+ '/ip/loadBalancing/{0}/backend/{1}'
+ .format(name, backend), probe=probe)
+ if not waitForNoTask(client, name, timeout):
+ module.fail_json(
+ msg='Timeout of {0} seconds while waiting for completion of '
+ 'setProbe to backend task'
+ .format(timeout))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for updating the probe of '
+ 'the backend, check application key, secret, '
+ 'consumerkey and parameters. Error returned by OVH api '
+ 'was : {0}'
+ .format(apiError))
+ moduleChanged = True
+
+ else:
+ # Creates backend
+ try:
+ try:
+ client.post('/ip/loadBalancing/{0}/backend'.format(name),
+ ipBackend=backend, probe=probe, weight=weight)
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for creating the backend, check '
+ 'application key, secret, consumerkey and parameters. '
+ 'Error returned by OVH api was : {0}'
+ .format(apiError))
+
+ if not waitForNoTask(client, name, timeout):
+ module.fail_json(
+ msg='Timeout of {0} seconds while waiting for completion of '
+ 'backend creation task'.format(timeout))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for creating the backend, check '
+ 'application key, secret, consumerkey and parameters. '
+ 'Error returned by OVH api was : {0}'.format(apiError))
+ moduleChanged = True
+
+ module.exit_json(changed=moduleChanged)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovh_monthly_billing.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovh_monthly_billing.py
new file mode 100644
index 00000000..75c70a79
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovh_monthly_billing.py
@@ -0,0 +1,157 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Francois Lallart (@fraff)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovh_monthly_billing
+author: Francois Lallart (@fraff)
+version_added: '0.2.0'
+short_description: Manage OVH monthly billing
+description:
+ - Enable monthly billing on OVH cloud intances (be aware OVH does not allow to disable it).
+requirements: [ "ovh" ]
+options:
+ project_id:
+ required: true
+ type: str
+ description:
+ - ID of the project, get it with U(https://api.ovh.com/console/#/cloud/project#GET)
+ instance_id:
+ required: true
+ type: str
+ description:
+ - ID of the instance, get it with U(https://api.ovh.com/console/#/cloud/project/%7BserviceName%7D/instance#GET)
+ endpoint:
+ type: str
+ description:
+ - The endpoint to use (for instance ovh-eu)
+ application_key:
+ type: str
+ description:
+ - The applicationKey to use
+ application_secret:
+ type: str
+ description:
+ - The application secret to use
+ consumer_key:
+ type: str
+ description:
+ - The consumer key to use
+'''
+
+EXAMPLES = '''
+- name: Basic usage, using auth from /etc/ovh.conf
+ community.general.ovh_monthly_billing:
+ project_id: 0c727a20aa144485b70c44dee9123b46
+ instance_id: 8fa89ad2-8f08-4220-9fa4-9695ea23e948
+
+# Get openstack cloud ID and instance ID, OVH use them in its API
+- name: Get openstack cloud ID and instance ID
+ os_server_info:
+ cloud: myProjectName
+ region_name: myRegionName
+ server: myServerName
+ register: openstack_servers
+
+- name: Use IDs
+ community.general.ovh_monthly_billing:
+ project_id: "{{ openstack_servers.0.tenant_id }}"
+ instance_id: "{{ openstack_servers.0.id }}"
+ application_key: yourkey
+ application_secret: yoursecret
+ consumer_key: yourconsumerkey
+'''
+
+RETURN = '''
+'''
+
+import os
+import sys
+import traceback
+
+try:
+ import ovh
+ import ovh.exceptions
+ from ovh.exceptions import APIError
+ HAS_OVH = True
+except ImportError:
+ HAS_OVH = False
+ OVH_IMPORT_ERROR = traceback.format_exc()
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ project_id=dict(required=True),
+ instance_id=dict(required=True),
+ endpoint=dict(required=False),
+ application_key=dict(required=False, no_log=True),
+ application_secret=dict(required=False, no_log=True),
+ consumer_key=dict(required=False, no_log=True),
+ ),
+ supports_check_mode=True
+ )
+
+ # Get parameters
+ project_id = module.params.get('project_id')
+ instance_id = module.params.get('instance_id')
+ endpoint = module.params.get('endpoint')
+ application_key = module.params.get('application_key')
+ application_secret = module.params.get('application_secret')
+ consumer_key = module.params.get('consumer_key')
+ project = ""
+ instance = ""
+ ovh_billing_status = ""
+
+ if not HAS_OVH:
+ module.fail_json(msg='python-ovh is required to run this module, see https://github.com/ovh/python-ovh')
+
+ # Connect to OVH API
+ client = ovh.Client(
+ endpoint=endpoint,
+ application_key=application_key,
+ application_secret=application_secret,
+ consumer_key=consumer_key
+ )
+
+ # Check that the instance exists
+ try:
+ project = client.get('/cloud/project/{0}'.format(project_id))
+ except ovh.exceptions.ResourceNotFoundError:
+ module.fail_json(msg='project {0} does not exist'.format(project_id))
+
+ # Check that the instance exists
+ try:
+ instance = client.get('/cloud/project/{0}/instance/{1}'.format(project_id, instance_id))
+ except ovh.exceptions.ResourceNotFoundError:
+ module.fail_json(msg='instance {0} does not exist in project {1}'.format(instance_id, project_id))
+
+ # Is monthlyBilling already enabled or pending ?
+ if instance['monthlyBilling'] is not None:
+ if instance['monthlyBilling']['status'] in ['ok', 'activationPending']:
+ module.exit_json(changed=False, ovh_billing_status=instance['monthlyBilling'])
+
+ if module.check_mode:
+ module.exit_json(changed=True, msg="Dry Run!")
+
+ try:
+ ovh_billing_status = client.post('/cloud/project/{0}/instance/{1}/activeMonthlyBilling'.format(project_id, instance_id))
+ module.exit_json(changed=True, ovh_billing_status=ovh_billing_status['monthlyBilling'])
+ except APIError as apiError:
+ module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
+
+ # We should never reach here
+ module.fail_json(msg='Internal ovh_monthly_billing module error')
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt.py
new file mode 100644
index 00000000..25e3081c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt.py
@@ -0,0 +1,503 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2013, Vincent Van der Kussen <vincent at vanderkussen.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt
+author:
+- Vincent Van der Kussen (@vincentvdk)
+short_description: oVirt/RHEV platform management
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.14
+ why: This module is for deprecated version of ovirt.
+ alternative: Use C(ovirt_vm) from the C(ovirt.ovirt) collection instead
+description:
+ - This module only supports oVirt/RHEV version 3. A newer module M(ovirt.ovirt.ovirt_vm) supports oVirt/RHV version 4.
+ - Allows you to create new instances, either from scratch or an image, in addition to deleting or stopping instances on the oVirt/RHEV platform.
+options:
+ user:
+ description:
+ - The user to authenticate with.
+ type: str
+ required: true
+ url:
+ description:
+ - The url of the oVirt instance.
+ type: str
+ required: true
+ instance_name:
+ description:
+ - The name of the instance to use.
+ type: str
+ required: true
+ aliases: [ vmname ]
+ password:
+ description:
+ - Password of the user to authenticate with.
+ type: str
+ required: true
+ image:
+ description:
+ - The template to use for the instance.
+ type: str
+ resource_type:
+ description:
+ - Whether you want to deploy an image or create an instance from scratch.
+ type: str
+ choices: [ new, template ]
+ zone:
+ description:
+ - Deploy the image to this oVirt cluster.
+ type: str
+ instance_disksize:
+ description:
+ - Size of the instance's disk in GB.
+ type: str
+ aliases: [ vm_disksize]
+ instance_cpus:
+ description:
+ - The instance's number of CPUs.
+ type: str
+ default: 1
+ aliases: [ vmcpus ]
+ instance_nic:
+ description:
+ - The name of the network interface in oVirt/RHEV.
+ type: str
+ aliases: [ vmnic ]
+ instance_network:
+ description:
+ - The logical network the machine should belong to.
+ type: str
+ default: rhevm
+ aliases: [ vmnetwork ]
+ instance_mem:
+ description:
+ - The instance's amount of memory in MB.
+ type: str
+ aliases: [ vmmem ]
+ instance_type:
+ description:
+ - Define whether the instance is a server, desktop or high_performance.
+ - I(high_performance) is supported since Ansible 2.5 and oVirt/RHV 4.2.
+ type: str
+ choices: [ desktop, server, high_performance ]
+ default: server
+ aliases: [ vmtype ]
+ disk_alloc:
+ description:
+ - Define whether disk is thin or preallocated.
+ type: str
+ choices: [ preallocated, thin ]
+ default: thin
+ disk_int:
+ description:
+ - Interface type of the disk.
+ type: str
+ choices: [ ide, virtio ]
+ default: virtio
+ instance_os:
+ description:
+ - Type of Operating System.
+ type: str
+ aliases: [ vmos ]
+ instance_cores:
+ description:
+ - Define the instance's number of cores.
+ type: str
+ default: 1
+ aliases: [ vmcores ]
+ sdomain:
+ description:
+ - The Storage Domain where you want to create the instance's disk on.
+ type: str
+ region:
+ description:
+ - The oVirt/RHEV datacenter where you want to deploy to.
+ type: str
+ instance_dns:
+ description:
+ - Define the instance's Primary DNS server.
+ type: str
+ aliases: [ dns ]
+ instance_domain:
+ description:
+ - Define the instance's Domain.
+ type: str
+ aliases: [ domain ]
+ instance_hostname:
+ description:
+ - Define the instance's Hostname.
+ type: str
+ aliases: [ hostname ]
+ instance_ip:
+ description:
+ - Define the instance's IP.
+ type: str
+ aliases: [ ip ]
+ instance_netmask:
+ description:
+ - Define the instance's Netmask.
+ type: str
+ aliases: [ netmask ]
+ instance_gateway:
+ description:
+ - Define the instance's Gateway.
+ type: str
+ aliases: [ gateway ]
+ instance_rootpw:
+ description:
+ - Define the instance's Root password.
+ type: str
+ aliases: [ rootpw ]
+ instance_key:
+ description:
+ - Define the instance's Authorized key.
+ type: str
+ aliases: [ key ]
+ state:
+ description:
+ - Create, terminate or remove instances.
+ type: str
+ choices: [ absent, present, restart, shutdown, started ]
+ default: present
+requirements:
+ - ovirt-engine-sdk-python
+'''
+
+EXAMPLES = '''
+- name: Basic example to provision from image
+ community.general.ovirt:
+ user: admin@internal
+ url: https://ovirt.example.com
+ instance_name: ansiblevm04
+ password: secret
+ image: centos_64
+ zone: cluster01
+ resource_type: template
+
+- name: Full example to create new instance from scratch
+ community.general.ovirt:
+ instance_name: testansible
+ resource_type: new
+ instance_type: server
+ user: admin@internal
+ password: secret
+ url: https://ovirt.example.com
+ instance_disksize: 10
+ zone: cluster01
+ region: datacenter1
+ instance_cpus: 1
+ instance_nic: nic1
+ instance_network: rhevm
+ instance_mem: 1000
+ disk_alloc: thin
+ sdomain: FIBER01
+ instance_cores: 1
+ instance_os: rhel_6x64
+ disk_int: virtio
+
+- name: Stopping an existing instance
+ community.general.ovirt:
+ instance_name: testansible
+ state: stopped
+ user: admin@internal
+ password: secret
+ url: https://ovirt.example.com
+
+- name: Start an existing instance
+ community.general.ovirt:
+ instance_name: testansible
+ state: started
+ user: admin@internal
+ password: secret
+ url: https://ovirt.example.com
+
+- name: Start an instance with cloud init information
+ community.general.ovirt:
+ instance_name: testansible
+ state: started
+ user: admin@internal
+ password: secret
+ url: https://ovirt.example.com
+ hostname: testansible
+ domain: ansible.local
+ ip: 192.0.2.100
+ netmask: 255.255.255.0
+ gateway: 192.0.2.1
+ rootpw: bigsecret
+'''
+
+import time
+
+try:
+ from ovirtsdk.api import API
+ from ovirtsdk.xml import params
+ HAS_OVIRTSDK = True
+except ImportError:
+ HAS_OVIRTSDK = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.removed import removed_module
+
+
+# ------------------------------------------------------------------- #
+# create connection with API
+#
+def conn(url, user, password):
+ api = API(url=url, username=user, password=password, insecure=True)
+ try:
+ value = api.test()
+ except Exception:
+ raise Exception("error connecting to the oVirt API")
+ return api
+
+
+# ------------------------------------------------------------------- #
+# Create VM from scratch
+def create_vm(conn, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int):
+ if vmdisk_alloc == 'thin':
+ # define VM params
+ vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), os=params.OperatingSystem(type_=vmos),
+ template=conn.templates.get(name="Blank"), memory=1024 * 1024 * int(vmmem),
+ cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores), sockets=vmcpus)), type_=vmtype)
+ # define disk params
+ vmdisk = params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=True, interface=vmdisk_int, type_="System",
+ format='cow',
+ storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)]))
+ # define network parameters
+ network_net = params.Network(name=vmnetwork)
+ nic_net1 = params.NIC(name='nic1', network=network_net, interface='virtio')
+ elif vmdisk_alloc == 'preallocated':
+ # define VM params
+ vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), os=params.OperatingSystem(type_=vmos),
+ template=conn.templates.get(name="Blank"), memory=1024 * 1024 * int(vmmem),
+ cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores), sockets=vmcpus)), type_=vmtype)
+ # define disk params
+ vmdisk = params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=False, interface=vmdisk_int, type_="System",
+ format='raw', storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)]))
+ # define network parameters
+ network_net = params.Network(name=vmnetwork)
+ nic_net1 = params.NIC(name=vmnic, network=network_net, interface='virtio')
+
+ try:
+ conn.vms.add(vmparams)
+ except Exception:
+ raise Exception("Error creating VM with specified parameters")
+ vm = conn.vms.get(name=vmname)
+ try:
+ vm.disks.add(vmdisk)
+ except Exception:
+ raise Exception("Error attaching disk")
+ try:
+ vm.nics.add(nic_net1)
+ except Exception:
+ raise Exception("Error adding nic")
+
+
+# create an instance from a template
+def create_vm_template(conn, vmname, image, zone):
+ vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), template=conn.templates.get(name=image), disks=params.Disks(clone=True))
+ try:
+ conn.vms.add(vmparams)
+ except Exception:
+ raise Exception('error adding template %s' % image)
+
+
+# start instance
+def vm_start(conn, vmname, hostname=None, ip=None, netmask=None, gateway=None,
+ domain=None, dns=None, rootpw=None, key=None):
+ vm = conn.vms.get(name=vmname)
+ use_cloud_init = False
+ nics = None
+ nic = None
+ if hostname or ip or netmask or gateway or domain or dns or rootpw or key:
+ use_cloud_init = True
+ if ip and netmask and gateway:
+ ipinfo = params.IP(address=ip, netmask=netmask, gateway=gateway)
+ nic = params.GuestNicConfiguration(name='eth0', boot_protocol='STATIC', ip=ipinfo, on_boot=True)
+ nics = params.Nics()
+ nics = params.GuestNicsConfiguration(nic_configuration=[nic])
+ initialization = params.Initialization(regenerate_ssh_keys=True, host_name=hostname, domain=domain, user_name='root',
+ root_password=rootpw, nic_configurations=nics, dns_servers=dns,
+ authorized_ssh_keys=key)
+ action = params.Action(use_cloud_init=use_cloud_init, vm=params.VM(initialization=initialization))
+ vm.start(action=action)
+
+
+# Stop instance
+def vm_stop(conn, vmname):
+ vm = conn.vms.get(name=vmname)
+ vm.stop()
+
+
+# restart instance
+def vm_restart(conn, vmname):
+ state = vm_status(conn, vmname)
+ vm = conn.vms.get(name=vmname)
+ vm.stop()
+ while conn.vms.get(vmname).get_status().get_state() != 'down':
+ time.sleep(5)
+ vm.start()
+
+
+# remove an instance
+def vm_remove(conn, vmname):
+ vm = conn.vms.get(name=vmname)
+ vm.delete()
+
+
+# ------------------------------------------------------------------- #
+# VM statuses
+#
+# Get the VMs status
+def vm_status(conn, vmname):
+ status = conn.vms.get(name=vmname).status.state
+ return status
+
+
+# Get VM object and return it's name if object exists
+def get_vm(conn, vmname):
+ vm = conn.vms.get(name=vmname)
+ if vm is None:
+ name = "empty"
+ else:
+ name = vm.get_name()
+ return name
+
+# ------------------------------------------------------------------- #
+# Hypervisor operations
+#
+# not available yet
+# ------------------------------------------------------------------- #
+# Main
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['absent', 'present', 'restart', 'shutdown', 'started']),
+ user=dict(type='str', required=True),
+ url=dict(type='str', required=True),
+ instance_name=dict(type='str', required=True, aliases=['vmname']),
+ password=dict(type='str', required=True, no_log=True),
+ image=dict(type='str'),
+ resource_type=dict(type='str', choices=['new', 'template']),
+ zone=dict(type='str'),
+ instance_disksize=dict(type='str', aliases=['vm_disksize']),
+ instance_cpus=dict(type='str', default=1, aliases=['vmcpus']),
+ instance_nic=dict(type='str', aliases=['vmnic']),
+ instance_network=dict(type='str', default='rhevm', aliases=['vmnetwork']),
+ instance_mem=dict(type='str', aliases=['vmmem']),
+ instance_type=dict(type='str', default='server', aliases=['vmtype'], choices=['desktop', 'server', 'high_performance']),
+ disk_alloc=dict(type='str', default='thin', choices=['preallocated', 'thin']),
+ disk_int=dict(type='str', default='virtio', choices=['ide', 'virtio']),
+ instance_os=dict(type='str', aliases=['vmos']),
+ instance_cores=dict(type='str', default=1, aliases=['vmcores']),
+ instance_hostname=dict(type='str', aliases=['hostname']),
+ instance_ip=dict(type='str', aliases=['ip']),
+ instance_netmask=dict(type='str', aliases=['netmask']),
+ instance_gateway=dict(type='str', aliases=['gateway']),
+ instance_domain=dict(type='str', aliases=['domain']),
+ instance_dns=dict(type='str', aliases=['dns']),
+ instance_rootpw=dict(type='str', aliases=['rootpw'], no_log=True),
+ instance_key=dict(type='str', aliases=['key'], no_log=True),
+ sdomain=dict(type='str'),
+ region=dict(type='str'),
+ ),
+ )
+
+ if not HAS_OVIRTSDK:
+ module.fail_json(msg='ovirtsdk required for this module')
+
+ state = module.params['state']
+ user = module.params['user']
+ url = module.params['url']
+ vmname = module.params['instance_name']
+ password = module.params['password']
+ image = module.params['image'] # name of the image to deploy
+ resource_type = module.params['resource_type'] # template or from scratch
+ zone = module.params['zone'] # oVirt cluster
+ vmdisk_size = module.params['instance_disksize'] # disksize
+ vmcpus = module.params['instance_cpus'] # number of cpu
+ vmnic = module.params['instance_nic'] # network interface
+ vmnetwork = module.params['instance_network'] # logical network
+ vmmem = module.params['instance_mem'] # mem size
+ vmdisk_alloc = module.params['disk_alloc'] # thin, preallocated
+ vmdisk_int = module.params['disk_int'] # disk interface virtio or ide
+ vmos = module.params['instance_os'] # Operating System
+ vmtype = module.params['instance_type'] # server, desktop or high_performance
+ vmcores = module.params['instance_cores'] # number of cores
+ sdomain = module.params['sdomain'] # storage domain to store disk on
+ region = module.params['region'] # oVirt Datacenter
+ hostname = module.params['instance_hostname']
+ ip = module.params['instance_ip']
+ netmask = module.params['instance_netmask']
+ gateway = module.params['instance_gateway']
+ domain = module.params['instance_domain']
+ dns = module.params['instance_dns']
+ rootpw = module.params['instance_rootpw']
+ key = module.params['instance_key']
+ # initialize connection
+ try:
+ c = conn(url + "/api", user, password)
+ except Exception as e:
+ module.fail_json(msg='%s' % e)
+
+ if state == 'present':
+ if get_vm(c, vmname) == "empty":
+ if resource_type == 'template':
+ try:
+ create_vm_template(c, vmname, image, zone)
+ except Exception as e:
+ module.fail_json(msg='%s' % e)
+ module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmname, image))
+ elif resource_type == 'new':
+ # FIXME: refactor, use keyword args.
+ try:
+ create_vm(c, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int)
+ except Exception as e:
+ module.fail_json(msg='%s' % e)
+ module.exit_json(changed=True, msg="deployed VM %s from scratch" % vmname)
+ else:
+ module.exit_json(changed=False, msg="You did not specify a resource type")
+ else:
+ module.exit_json(changed=False, msg="VM %s already exists" % vmname)
+
+ if state == 'started':
+ if vm_status(c, vmname) == 'up':
+ module.exit_json(changed=False, msg="VM %s is already running" % vmname)
+ else:
+ # vm_start(c, vmname)
+ vm_start(c, vmname, hostname, ip, netmask, gateway, domain, dns, rootpw, key)
+ module.exit_json(changed=True, msg="VM %s started" % vmname)
+
+ if state == 'shutdown':
+ if vm_status(c, vmname) == 'down':
+ module.exit_json(changed=False, msg="VM %s is already shutdown" % vmname)
+ else:
+ vm_stop(c, vmname)
+ module.exit_json(changed=True, msg="VM %s is shutting down" % vmname)
+
+ if state == 'restart':
+ if vm_status(c, vmname) == 'up':
+ vm_restart(c, vmname)
+ module.exit_json(changed=True, msg="VM %s is restarted" % vmname)
+ else:
+ module.exit_json(changed=False, msg="VM %s is not running" % vmname)
+
+ if state == 'absent':
+ if get_vm(c, vmname) == "empty":
+ module.exit_json(changed=False, msg="VM %s does not exist" % vmname)
+ else:
+ vm_remove(c, vmname)
+ module.exit_json(changed=True, msg="VM %s removed" % vmname)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_affinity_label_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_affinity_label_facts.py
new file mode 100644
index 00000000..e560e13e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_affinity_label_facts.py
@@ -0,0 +1,196 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_affinity_label_facts
+short_description: Retrieve information about one or more oVirt/RHV affinity labels
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_affinity_label_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV affinity labels."
+notes:
+ - "This module returns a variable C(ovirt_affinity_labels), which
+ contains a list of affinity labels. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ name:
+ description:
+ - "Name of the affinity labels which should be listed."
+ vm:
+ description:
+ - "Name of the VM, which affinity labels should be listed."
+ host:
+ description:
+ - "Name of the host, which affinity labels should be listed."
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all affinity labels, which names start with label
+ ovirt_affinity_label_info:
+ name: label*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_affinity_labels }}"
+
+- name: >
+ Gather information about all affinity labels, which are assigned to VMs
+ which names start with postgres
+ ovirt_affinity_label_info:
+ vm: postgres*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_affinity_labels }}"
+
+- name: >
+ Gather information about all affinity labels, which are assigned to hosts
+ which names start with west
+ ovirt_affinity_label_info:
+ host: west*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_affinity_labels }}"
+
+- name: >
+ Gather information about all affinity labels, which are assigned to hosts
+ which names start with west or VMs which names start with postgres
+ ovirt_affinity_label_info:
+ host: west*
+ vm: postgres*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_affinity_labels }}"
+'''
+
+RETURN = '''
+ovirt_affinity_labels:
+ description: "List of dictionaries describing the affinity labels. Affinity labels attributes are mapped to dictionary keys,
+ all affinity labels attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/affinity_label."
+ returned: On success.
+ type: list
+'''
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ search_by_name,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ name=dict(default=None),
+ host=dict(default=None),
+ vm=dict(default=None),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_affinity_label_facts', 'community.general.ovirt_affinity_label_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_affinity_label_facts' module has been renamed to 'ovirt_affinity_label_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ affinity_labels_service = connection.system_service().affinity_labels_service()
+ labels = []
+ all_labels = affinity_labels_service.list()
+ if module.params['name']:
+ labels.extend([
+ l for l in all_labels
+ if fnmatch.fnmatch(l.name, module.params['name'])
+ ])
+ if module.params['host']:
+ hosts_service = connection.system_service().hosts_service()
+ if search_by_name(hosts_service, module.params['host']) is None:
+ raise Exception("Host '%s' was not found." % module.params['host'])
+ labels.extend([
+ label
+ for label in all_labels
+ for host in connection.follow_link(label.hosts)
+ if fnmatch.fnmatch(hosts_service.service(host.id).get().name, module.params['host'])
+ ])
+ if module.params['vm']:
+ vms_service = connection.system_service().vms_service()
+ if search_by_name(vms_service, module.params['vm']) is None:
+ raise Exception("Vm '%s' was not found." % module.params['vm'])
+ labels.extend([
+ label
+ for label in all_labels
+ for vm in connection.follow_link(label.vms)
+ if fnmatch.fnmatch(vms_service.service(vm.id).get().name, module.params['vm'])
+ ])
+
+ if not (module.params['vm'] or module.params['host'] or module.params['name']):
+ labels = all_labels
+
+ result = dict(
+ ovirt_affinity_labels=[
+ get_dict_of_struct(
+ struct=l,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for l in labels
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_api_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_api_facts.py
new file mode 100644
index 00000000..4085a702
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_api_facts.py
@@ -0,0 +1,98 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ovirt_api_facts
+short_description: Retrieve information about the oVirt/RHV API
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_api_info) instead.
+description:
+ - "Retrieve information about the oVirt/RHV API."
+notes:
+ - "This module returns a variable C(ovirt_api),
+ which contains a information about oVirt/RHV API. You need to register the result with
+ the I(register) keyword to use it."
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information oVirt API
+ ovirt_api_info:
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_api }}"
+'''
+
+RETURN = '''
+ovirt_api:
+ description: "Dictionary describing the oVirt API information.
+ Api attributes are mapped to dictionary keys,
+ all API attributes can be found at following
+ url: https://ovirt.example.com/ovirt-engine/api/model#types/api."
+ returned: On success.
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec()
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_api_facts', 'community.general.ovirt_api_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_api_facts' module has been renamed to 'ovirt_api_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ api = connection.system_service().get()
+ result = dict(
+ ovirt_api=get_dict_of_struct(
+ struct=api,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ )
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_cluster_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_cluster_facts.py
new file mode 100644
index 00000000..e4916a26
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_cluster_facts.py
@@ -0,0 +1,125 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_cluster_facts
+short_description: Retrieve information about one or more oVirt/RHV clusters
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_cluster_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV clusters."
+notes:
+ - "This module returns a variable C(ovirt_clusters), which
+ contains a list of clusters. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search cluster X from datacenter Y use following pattern:
+ name=X and datacenter=Y"
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all clusters which names start with production
+ ovirt_cluster_info:
+ pattern:
+ name: 'production*'
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_clusters }}"
+'''
+
+RETURN = '''
+ovirt_clusters:
+ description: "List of dictionaries describing the clusters. Cluster attributes are mapped to dictionary keys,
+ all clusters attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/cluster."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_cluster_facts', 'community.general.ovirt_cluster_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_cluster_facts' module has been renamed to 'ovirt_cluster_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ clusters_service = connection.system_service().clusters_service()
+ clusters = clusters_service.list(search=module.params['pattern'])
+ result = dict(
+ ovirt_clusters=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in clusters
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_datacenter_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_datacenter_facts.py
new file mode 100644
index 00000000..0de72729
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_datacenter_facts.py
@@ -0,0 +1,108 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_datacenter_facts
+short_description: Retrieve information about one or more oVirt/RHV datacenters
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_datacenter_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV datacenters."
+notes:
+ - "This module returns a variable C(ovirt_datacenters), which
+ contains a list of datacenters. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search datacenter I(X) use following pattern: I(name=X)"
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all data centers which names start with production
+ ovirt_datacenter_info:
+ pattern: name=production*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_datacenters }}"
+'''
+
+RETURN = '''
+ovirt_datacenters:
+ description: "List of dictionaries describing the datacenters. Datacenter attributes are mapped to dictionary keys,
+ all datacenters attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/data_center."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_datacenter_facts', 'community.general.ovirt_datacenter_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_datacenter_facts' module has been renamed to 'ovirt_datacenter_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ datacenters_service = connection.system_service().data_centers_service()
+ datacenters = datacenters_service.list(search=module.params['pattern'])
+ result = dict(
+ ovirt_datacenters=[
+ get_dict_of_struct(
+ struct=d,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for d in datacenters
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_disk_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_disk_facts.py
new file mode 100644
index 00000000..6e0c9f69
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_disk_facts.py
@@ -0,0 +1,125 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2017 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_disk_facts
+short_description: Retrieve information about one or more oVirt/RHV disks
+author: "Katerina Koukiou (@KKoukiou)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_disk_info) instead
+description:
+ - "Retrieve information about one or more oVirt/RHV disks."
+notes:
+ - "This module returns a variable C(ovirt_disks), which
+ contains a list of disks. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search Disk X from storage Y use following pattern:
+ name=X and storage.name=Y"
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all Disks which names start with centos
+ ovirt_disk_info:
+ pattern: name=centos*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_disks }}"
+'''
+
+RETURN = '''
+ovirt_disks:
+ description: "List of dictionaries describing the Disks. Disk attributes are mapped to dictionary keys,
+ all Disks attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/disk."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_disk_facts', 'community.general.ovirt_disk_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_disk_facts' module has been renamed to 'ovirt_disk_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ disks_service = connection.system_service().disks_service()
+ disks = disks_service.list(
+ search=module.params['pattern'],
+ )
+ result = dict(
+ ovirt_disks=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in disks
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_event_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_event_facts.py
new file mode 100644
index 00000000..50a20654
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_event_facts.py
@@ -0,0 +1,170 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_event_facts
+short_description: This module can be used to retrieve information about one or more oVirt/RHV events
+author: "Chris Keller (@nasx)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_event_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV events."
+options:
+ case_sensitive:
+ description:
+ - "Indicates if the search performed using the search parameter should be performed taking case
+ into account. The default value is true, which means that case is taken into account. If you
+ want to search ignoring case set it to false."
+ required: false
+ default: true
+ type: bool
+
+ from_:
+ description:
+ - "Indicates the event index after which events should be returned. The indexes of events are
+ strictly increasing, so when this parameter is used only the events with greater indexes
+ will be returned."
+ required: false
+ type: int
+
+ max:
+ description:
+ - "Sets the maximum number of events to return. If not specified all the events are returned."
+ required: false
+ type: int
+
+ search:
+ description:
+ - "Search term which is accepted by the oVirt/RHV API."
+ - "For example to search for events of severity alert use the following pattern: severity=alert"
+ required: false
+ type: str
+
+ headers:
+ description:
+ - "Additional HTTP headers."
+ required: false
+ type: str
+
+ query:
+ description:
+ - "Additional URL query parameters."
+ required: false
+ type: str
+
+ wait:
+ description:
+ - "If True wait for the response."
+ required: false
+ default: true
+ type: bool
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain the auth parameter for simplicity,
+# look at the ovirt_auth module to see how to reuse authentication.
+
+- name: Return all events
+ ovirt_event_info:
+ register: result
+
+- name: Return the last 10 events
+ ovirt_event_info:
+ max: 10
+ register: result
+
+- name: Return all events of type alert
+ ovirt_event_info:
+ search: "severity=alert"
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.ovirt_events }}"
+'''
+
+RETURN = '''
+ovirt_events:
+ description: "List of dictionaries describing the events. Event attributes are mapped to dictionary keys.
+ All event attributes can be found at the following url:
+ http://ovirt.github.io/ovirt-engine-api-model/master/#types/event"
+ returned: On success."
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ case_sensitive=dict(default=True, type='bool', required=False),
+ from_=dict(default=None, type='int', required=False),
+ max=dict(default=None, type='int', required=False),
+ search=dict(default='', required=False),
+ headers=dict(default='', required=False),
+ query=dict(default='', required=False),
+ wait=dict(default=True, type='bool', required=False)
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_event_facts', 'community.general.ovirt_event_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_event_facts' module has been renamed to 'ovirt_event_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ events_service = connection.system_service().events_service()
+ events = events_service.list(
+ case_sensitive=module.params['case_sensitive'],
+ from_=module.params['from_'],
+ max=module.params['max'],
+ search=module.params['search'],
+ headers=module.params['headers'],
+ query=module.params['query'],
+ wait=module.params['wait']
+ )
+
+ result = dict(
+ ovirt_events=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in events
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_external_provider_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_external_provider_facts.py
new file mode 100644
index 00000000..f9ac8b97
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_external_provider_facts.py
@@ -0,0 +1,165 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_external_provider_facts
+short_description: Retrieve information about one or more oVirt/RHV external providers
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_external_provider_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV external providers."
+notes:
+ - "This module returns a variable C(ovirt_external_providers), which
+ contains a list of external_providers. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ type:
+ description:
+ - "Type of the external provider."
+ choices: ['os_image', 'os_network', 'os_volume', 'foreman']
+ required: true
+ type: str
+ name:
+ description:
+ - "Name of the external provider, can be used as glob expression."
+ type: str
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all image external providers named glance
+ ovirt_external_provider_info:
+ type: os_image
+ name: glance
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_external_providers }}"
+'''
+
+RETURN = '''
+ovirt_external_providers:
+ description:
+ - "List of dictionaries. Content depends on I(type)."
+ - "For type C(foreman), attributes appearing in the dictionary can be found on your oVirt/RHV instance
+ at the following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/external_host_provider."
+ - "For type C(os_image), attributes appearing in the dictionary can be found on your oVirt/RHV instance
+ at the following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/openstack_image_provider."
+ - "For type C(os_volume), attributes appearing in the dictionary can be found on your oVirt/RHV instance
+ at the following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/openstack_volume_provider."
+ - "For type C(os_network), attributes appearing in the dictionary can be found on your oVirt/RHV instance
+ at the following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/openstack_network_provider."
+ returned: On success
+ type: list
+'''
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def _external_provider_service(provider_type, system_service):
+ if provider_type == 'os_image':
+ return system_service.openstack_image_providers_service()
+ elif provider_type == 'os_network':
+ return system_service.openstack_network_providers_service()
+ elif provider_type == 'os_volume':
+ return system_service.openstack_volume_providers_service()
+ elif provider_type == 'foreman':
+ return system_service.external_host_providers_service()
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ name=dict(default=None, required=False),
+ type=dict(
+ required=True,
+ choices=['os_image', 'os_network', 'os_volume', 'foreman'],
+ aliases=['provider'],
+ ),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_external_provider_facts', 'community.general.ovirt_external_provider_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_external_provider_facts' module has been renamed to 'ovirt_external_provider_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ external_providers_service = _external_provider_service(
+ provider_type=module.params.pop('type'),
+ system_service=connection.system_service(),
+ )
+ if module.params['name']:
+ external_providers = [
+ e for e in external_providers_service.list()
+ if fnmatch.fnmatch(e.name, module.params['name'])
+ ]
+ else:
+ external_providers = external_providers_service.list()
+
+ result = dict(
+ ovirt_external_providers=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in external_providers
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_group_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_group_facts.py
new file mode 100644
index 00000000..40b037f4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_group_facts.py
@@ -0,0 +1,123 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_group_facts
+short_description: Retrieve information about one or more oVirt/RHV groups
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_group_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV groups."
+notes:
+ - "This module returns a variable C(ovirt_groups), which
+ contains a list of groups. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search group X use following pattern: name=X"
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all groups which names start with admin
+ ovirt_group_info:
+ pattern: name=admin*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_groups }}"
+'''
+
+RETURN = '''
+ovirt_groups:
+ description: "List of dictionaries describing the groups. Group attributes are mapped to dictionary keys,
+ all groups attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/group."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_group_facts', 'community.general.ovirt_group_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_group_facts' module has been renamed to 'ovirt_group_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ groups_service = connection.system_service().groups_service()
+ groups = groups_service.list(search=module.params['pattern'])
+ result = dict(
+ ovirt_groups=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in groups
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_host_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_host_facts.py
new file mode 100644
index 00000000..ea585e90
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_host_facts.py
@@ -0,0 +1,149 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_host_facts
+short_description: Retrieve information about one or more oVirt/RHV hosts
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_host_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV hosts."
+notes:
+ - "This module returns a variable C(ovirt_hosts), which
+ contains a list of hosts. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search host X from datacenter Y use following pattern:
+ name=X and datacenter=Y"
+ all_content:
+ description:
+ - "If I(true) all the attributes of the hosts should be
+ included in the response."
+ default: False
+ type: bool
+ cluster_version:
+ description:
+ - "Filter the hosts based on the cluster version."
+ type: str
+
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all hosts which names start with host and belong to data center west
+ ovirt_host_info:
+ pattern: name=host* and datacenter=west
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_hosts }}"
+
+- name: Gather information about all hosts with cluster version 4.2
+ ovirt_host_info:
+ pattern: name=host*
+ cluster_version: "4.2"
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_hosts }}"
+'''
+
+RETURN = '''
+ovirt_hosts:
+ description: "List of dictionaries describing the hosts. Host attributes are mapped to dictionary keys,
+ all hosts attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/host."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def get_filtered_hosts(cluster_version, hosts, connection):
+ # Filtering by cluster version returns only those which have same cluster version as input
+ filtered_hosts = []
+ for host in hosts:
+ cluster = connection.follow_link(host.cluster)
+ cluster_version_host = str(cluster.version.major) + '.' + str(cluster.version.minor)
+ if cluster_version_host == cluster_version:
+ filtered_hosts.append(host)
+ return filtered_hosts
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ all_content=dict(default=False, type='bool'),
+ cluster_version=dict(default=None, type='str'),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_host_facts', 'community.general.ovirt_host_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_host_facts' module has been renamed to 'ovirt_host_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ hosts_service = connection.system_service().hosts_service()
+ hosts = hosts_service.list(
+ search=module.params['pattern'],
+ all_content=module.params['all_content']
+ )
+ cluster_version = module.params.get('cluster_version')
+ if cluster_version is not None:
+ hosts = get_filtered_hosts(cluster_version, hosts, connection)
+ result = dict(
+ ovirt_hosts=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in hosts
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_host_storage_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_host_storage_facts.py
new file mode 100644
index 00000000..62af3e4b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_host_storage_facts.py
@@ -0,0 +1,187 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2017 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_host_storage_facts
+short_description: Retrieve information about one or more oVirt/RHV HostStorages (applicable only for block storage)
+author: "Daniel Erez (@derez)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_host_storage_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV HostStorages (applicable only for block storage)."
+options:
+ host:
+ description:
+ - "Host to get device list from."
+ required: true
+ iscsi:
+ description:
+ - "Dictionary with values for iSCSI storage type:"
+ suboptions:
+ address:
+ description:
+ - "Address of the iSCSI storage server."
+ target:
+ description:
+ - "The target IQN for the storage device."
+ username:
+ description:
+ - "A CHAP user name for logging into a target."
+ password:
+ description:
+ - "A CHAP password for logging into a target."
+ portal:
+ description:
+ - "The portal being used to connect with iscsi."
+ fcp:
+ description:
+ - "Dictionary with values for fibre channel storage type:"
+ suboptions:
+ address:
+ description:
+ - "Address of the fibre channel storage server."
+ port:
+ description:
+ - "Port of the fibre channel storage server."
+ lun_id:
+ description:
+ - "LUN id."
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about HostStorages with specified target and address
+ ovirt_host_storage_info:
+ host: myhost
+ iscsi:
+ target: iqn.2016-08-09.domain-01:nickname
+ address: 10.34.63.204
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_host_storages }}"
+'''
+
+RETURN = '''
+ovirt_host_storages:
+ description: "List of dictionaries describing the HostStorage. HostStorage attributes are mapped to dictionary keys,
+ all HostStorage attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/host_storage."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ get_id_by_name,
+)
+
+
+def _login(host_service, iscsi):
+ host_service.iscsi_login(
+ iscsi=otypes.IscsiDetails(
+ username=iscsi.get('username'),
+ password=iscsi.get('password'),
+ address=iscsi.get('address'),
+ target=iscsi.get('target'),
+ portal=iscsi.get('portal')
+ ),
+ )
+
+
+def _get_storage_type(params):
+ for sd_type in ['iscsi', 'fcp']:
+ if params.get(sd_type) is not None:
+ return sd_type
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ host=dict(required=True),
+ iscsi=dict(default=None, type='dict'),
+ fcp=dict(default=None, type='dict'),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_host_storage_facts', 'community.general.ovirt_host_storage_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_host_storage_facts' module has been renamed to 'ovirt_host_storage_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+
+ # Get Host
+ hosts_service = connection.system_service().hosts_service()
+ host_id = get_id_by_name(hosts_service, module.params['host'])
+ storage_type = _get_storage_type(module.params)
+ host_service = hosts_service.host_service(host_id)
+
+ if storage_type == 'iscsi':
+ # Login
+ iscsi = module.params.get('iscsi')
+ _login(host_service, iscsi)
+
+ # Get LUNs exposed from the specified target
+ host_storages = host_service.storage_service().list()
+
+ if storage_type == 'iscsi':
+ filterred_host_storages = [host_storage for host_storage in host_storages
+ if host_storage.type == otypes.StorageType.ISCSI]
+ if 'target' in iscsi:
+ filterred_host_storages = [host_storage for host_storage in filterred_host_storages
+ if iscsi.get('target') == host_storage.logical_units[0].target]
+ elif storage_type == 'fcp':
+ filterred_host_storages = [host_storage for host_storage in host_storages
+ if host_storage.type == otypes.StorageType.FCP]
+
+ result = dict(
+ ovirt_host_storages=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in filterred_host_storages
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_network_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_network_facts.py
new file mode 100644
index 00000000..781dd858
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_network_facts.py
@@ -0,0 +1,125 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_network_facts
+short_description: Retrieve information about one or more oVirt/RHV networks
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_network_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV networks."
+notes:
+ - "This module returns a variable C(ovirt_networks), which
+ contains a list of networks. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search network starting with string vlan1 use: name=vlan1*"
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all networks which names start with vlan1
+ ovirt_network_info:
+ pattern: name=vlan1*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_networks }}"
+'''
+
+
+RETURN = '''
+ovirt_networks:
+ description: "List of dictionaries describing the networks. Network attributes are mapped to dictionary keys,
+ all networks attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/network."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_network_facts', 'community.general.ovirt_network_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_network_facts' module has been renamed to 'ovirt_network_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ networks_service = connection.system_service().networks_service()
+ networks = networks_service.list(search=module.params['pattern'])
+ result = dict(
+ ovirt_networks=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in networks
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_nic_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_nic_facts.py
new file mode 100644
index 00000000..2cc1194f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_nic_facts.py
@@ -0,0 +1,143 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_nic_facts
+short_description: Retrieve information about one or more oVirt/RHV virtual machine network interfaces
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_nic_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV virtual machine network interfaces."
+notes:
+ - "This module returns a variable C(ovirt_nics), which
+ contains a list of NICs. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ vm:
+ description:
+ - "Name of the VM where NIC is attached."
+ required: true
+ name:
+ description:
+ - "Name of the NIC, can be used as glob expression."
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all NICs which names start with eth for VM named centos7
+ ovirt_nic_info:
+ vm: centos7
+ name: eth*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_nics }}"
+'''
+
+RETURN = '''
+ovirt_nics:
+ description: "List of dictionaries describing the network interfaces. NIC attributes are mapped to dictionary keys,
+ all NICs attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/nic."
+ returned: On success.
+ type: list
+'''
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ search_by_name,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ vm=dict(required=True),
+ name=dict(default=None),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_nic_facts', 'community.general.ovirt_nic_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_nic_facts' module has been renamed to 'ovirt_nic_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ vms_service = connection.system_service().vms_service()
+ vm_name = module.params['vm']
+ vm = search_by_name(vms_service, vm_name)
+ if vm is None:
+ raise Exception("VM '%s' was not found." % vm_name)
+
+ nics_service = vms_service.service(vm.id).nics_service()
+ if module.params['name']:
+ nics = [
+ e for e in nics_service.list()
+ if fnmatch.fnmatch(e.name, module.params['name'])
+ ]
+ else:
+ nics = nics_service.list()
+
+ result = dict(
+ ovirt_nics=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in nics
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_permission_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_permission_facts.py
new file mode 100644
index 00000000..52ba3624
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_permission_facts.py
@@ -0,0 +1,166 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_permission_facts
+short_description: Retrieve information about one or more oVirt/RHV permissions
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_permission_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV permissions."
+notes:
+ - "This module returns a variable C(ovirt_permissions), which
+ contains a list of permissions. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ user_name:
+ description:
+ - "Username of the user to manage. In most LDAPs it's I(uid) of the user, but in Active Directory you must specify I(UPN) of the user."
+ group_name:
+ description:
+ - "Name of the group to manage."
+ authz_name:
+ description:
+ - "Authorization provider of the user/group. In previous versions of oVirt/RHV known as domain."
+ required: true
+ aliases: ['domain']
+ namespace:
+ description:
+ - "Namespace of the authorization provider, where user/group resides."
+ required: false
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all permissions of user with username john
+ ovirt_permission_info:
+ user_name: john
+ authz_name: example.com-authz
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_permissions }}"
+'''
+
+RETURN = '''
+ovirt_permissions:
+ description: "List of dictionaries describing the permissions. Permission attributes are mapped to dictionary keys,
+ all permissions attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/permission."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+try:
+ import ovirtsdk4 as sdk
+except ImportError:
+ pass
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_link_name,
+ ovirt_info_full_argument_spec,
+ search_by_name,
+)
+
+
+def _permissions_service(connection, module):
+ if module.params['user_name']:
+ service = connection.system_service().users_service()
+ entity = next(
+ iter(
+ service.list(
+ search='usrname={0}'.format(
+ '{0}@{1}'.format(module.params['user_name'], module.params['authz_name'])
+ )
+ )
+ ),
+ None
+ )
+ else:
+ service = connection.system_service().groups_service()
+ entity = search_by_name(service, module.params['group_name'])
+
+ if entity is None:
+ raise Exception("User/Group wasn't found.")
+
+ return service.service(entity.id).permissions_service()
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ authz_name=dict(required=True, aliases=['domain']),
+ user_name=dict(default=None),
+ group_name=dict(default=None),
+ namespace=dict(default=None),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_permission_facts', 'community.general.ovirt_permission_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_permission_facts' module has been renamed to 'ovirt_permission_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ permissions_service = _permissions_service(connection, module)
+ permissions = []
+ for p in permissions_service.list():
+ newperm = dict()
+ for key, value in p.__dict__.items():
+ if value and isinstance(value, sdk.Struct):
+ newperm[key[1:]] = get_link_name(connection, value)
+ newperm['%s_id' % key[1:]] = value.id
+ permissions.append(newperm)
+
+ result = dict(ovirt_permissions=permissions)
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_quota_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_quota_facts.py
new file mode 100644
index 00000000..b2424305
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_quota_facts.py
@@ -0,0 +1,143 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_quota_facts
+short_description: Retrieve information about one or more oVirt/RHV quotas
+author: "Maor Lipchuk (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_quota_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV quotas."
+notes:
+ - "This module returns a variable C(ovirt_quotas), which
+ contains a list of quotas. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ data_center:
+ description:
+ - "Name of the datacenter where quota resides."
+ required: true
+ name:
+ description:
+ - "Name of the quota, can be used as glob expression."
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about quota named C<myquota> in Default datacenter
+ ovirt_quota_info:
+ data_center: Default
+ name: myquota
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_quotas }}"
+'''
+
+RETURN = '''
+ovirt_quotas:
+ description: "List of dictionaries describing the quotas. Quota attributes are mapped to dictionary keys,
+ all quotas attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/quota."
+ returned: On success.
+ type: list
+'''
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ search_by_name,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ data_center=dict(required=True),
+ name=dict(default=None),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_quota_facts', 'community.general.ovirt_quota_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_quota_facts' module has been renamed to 'ovirt_quota_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ datacenters_service = connection.system_service().data_centers_service()
+ dc_name = module.params['data_center']
+ dc = search_by_name(datacenters_service, dc_name)
+ if dc is None:
+ raise Exception("Datacenter '%s' was not found." % dc_name)
+
+ quotas_service = datacenters_service.service(dc.id).quotas_service()
+ if module.params['name']:
+ quotas = [
+ e for e in quotas_service.list()
+ if fnmatch.fnmatch(e.name, module.params['name'])
+ ]
+ else:
+ quotas = quotas_service.list()
+
+ result = dict(
+ ovirt_quotas=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in quotas
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_scheduling_policy_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_scheduling_policy_facts.py
new file mode 100644
index 00000000..eeaeb610
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_scheduling_policy_facts.py
@@ -0,0 +1,140 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2017 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_scheduling_policy_facts
+short_description: Retrieve information about one or more oVirt scheduling policies
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_scheduling_policy_info) instead.
+description:
+ - "Retrieve information about one or more oVirt scheduling policies."
+notes:
+ - "This module returns a variable C(ovirt_scheduling_policies),
+ which contains a list of scheduling policies. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ id:
+ description:
+ - "ID of the scheduling policy."
+ name:
+ description:
+ - "Name of the scheduling policy, can be used as glob expression."
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all scheduling policies with name InClusterUpgrade
+ ovirt_scheduling_policy_info:
+ name: InClusterUpgrade
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_scheduling_policies }}"
+'''
+
+RETURN = '''
+ovirt_scheduling_policies:
+ description: "List of dictionaries describing the scheduling policies.
+ Scheduling policies attributes are mapped to dictionary keys,
+ all scheduling policies attributes can be found at following
+ url: https://ovirt.example.com/ovirt-engine/api/model#types/scheduling_policy."
+ returned: On success.
+ type: list
+'''
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ id=dict(default=None),
+ name=dict(default=None),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_scheduling_policy_facts', 'community.general.ovirt_scheduling_policy_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_scheduling_policy_facts' module has been renamed to 'ovirt_scheduling_policy_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ system_service = connection.system_service()
+ sched_policies_service = system_service.scheduling_policies_service()
+ if module.params['name']:
+ sched_policies = [
+ e for e in sched_policies_service.list()
+ if fnmatch.fnmatch(e.name, module.params['name'])
+ ]
+ elif module.params['id']:
+ sched_policies = [
+ sched_policies_service.service(module.params['id']).get()
+ ]
+ else:
+ sched_policies = sched_policies_service.list()
+
+ result = dict(
+ ovirt_scheduling_policies=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in sched_policies
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_snapshot_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_snapshot_facts.py
new file mode 100644
index 00000000..73746883
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_snapshot_facts.py
@@ -0,0 +1,137 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_snapshot_facts
+short_description: Retrieve information about one or more oVirt/RHV virtual machine snapshots
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_snapshot_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV virtual machine snapshots."
+notes:
+ - "This module returns a variable C(ovirt_snapshots), which
+ contains a list of snapshots. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ vm:
+ description:
+ - "Name of the VM with snapshot."
+ required: true
+ description:
+ description:
+ - "Description of the snapshot, can be used as glob expression."
+ snapshot_id:
+ description:
+ - "Id of the snapshot we want to retrieve information about."
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all snapshots which description start with update for VM named centos7
+ ovirt_snapshot_info:
+ vm: centos7
+ description: update*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_snapshots }}"
+'''
+
+RETURN = '''
+ovirt_snapshots:
+ description: "List of dictionaries describing the snapshot. Snapshot attributes are mapped to dictionary keys,
+ all snapshot attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/snapshot."
+ returned: On success.
+ type: list
+'''
+
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ search_by_name,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ vm=dict(required=True),
+ description=dict(default=None),
+ snapshot_id=dict(default=None),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_snapshot_facts', 'community.general.ovirt_snapshot_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_snapshot_facts' module has been renamed to 'ovirt_snapshot_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ vms_service = connection.system_service().vms_service()
+ vm_name = module.params['vm']
+ vm = search_by_name(vms_service, vm_name)
+ if vm is None:
+ raise Exception("VM '%s' was not found." % vm_name)
+
+ snapshots_service = vms_service.service(vm.id).snapshots_service()
+ if module.params['description']:
+ snapshots = [
+ e for e in snapshots_service.list()
+ if fnmatch.fnmatch(e.description, module.params['description'])
+ ]
+ elif module.params['snapshot_id']:
+ snapshots = [
+ snapshots_service.snapshot_service(module.params['snapshot_id']).get()
+ ]
+ else:
+ snapshots = snapshots_service.list()
+
+ result = dict(
+ ovirt_snapshots=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in snapshots
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_storage_domain_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_storage_domain_facts.py
new file mode 100644
index 00000000..b9d814c1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_storage_domain_facts.py
@@ -0,0 +1,126 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_storage_domain_facts
+short_description: Retrieve information about one or more oVirt/RHV storage domains
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_storage_domain_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV storage domains."
+notes:
+ - "This module returns a variable C(ovirt_storage_domains), which
+ contains a list of storage domains. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search storage domain X from datacenter Y use following pattern:
+ name=X and datacenter=Y"
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: >
+ Gather information about all storage domains which names
+ start with data and belong to data center west
+ ovirt_storage_domain_info:
+ pattern: name=data* and datacenter=west
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_storage_domains }}"
+'''
+
+RETURN = '''
+ovirt_storage_domains:
+ description: "List of dictionaries describing the storage domains. Storage_domain attributes are mapped to dictionary keys,
+ all storage domains attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/storage_domain."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_storage_domain_facts', 'community.general.ovirt_storage_domain_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_storage_domain_facts' module has been renamed to 'ovirt_storage_domain_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ storage_domains_service = connection.system_service().storage_domains_service()
+ storage_domains = storage_domains_service.list(search=module.params['pattern'])
+ result = dict(
+ ovirt_storage_domains=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in storage_domains
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_storage_template_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_storage_template_facts.py
new file mode 100644
index 00000000..1c583278
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_storage_template_facts.py
@@ -0,0 +1,142 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2017 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_storage_template_facts
+short_description: Retrieve information about one or more oVirt/RHV templates relate to a storage domain.
+author: "Maor Lipchuk (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_storage_template_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV templates relate to a storage domain."
+notes:
+ - "This module returns a variable C(ovirt_storage_templates), which
+ contains a list of templates. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ unregistered:
+ description:
+ - "Flag which indicates whether to get unregistered templates which contain one or more
+ disks which reside on a storage domain or diskless templates."
+ type: bool
+ default: false
+ max:
+ description:
+ - "Sets the maximum number of templates to return. If not specified all the templates are returned."
+ storage_domain:
+ description:
+ - "The storage domain name where the templates should be listed."
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all templates which relate to a storage domain and are unregistered
+ ovirt_storage_template_info:
+ unregistered: yes
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_storage_templates }}"
+'''
+
+RETURN = '''
+ovirt_storage_templates:
+ description: "List of dictionaries describing the Templates. Template attributes are mapped to dictionary keys,
+ all Templates attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/template."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ get_id_by_name
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ storage_domain=dict(default=None),
+ max=dict(default=None, type='int'),
+ unregistered=dict(default=False, type='bool'),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_storage_template_facts', 'community.general.ovirt_storage_template_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_storage_template_facts' module has been renamed to 'ovirt_storage_template_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ storage_domains_service = connection.system_service().storage_domains_service()
+ sd_id = get_id_by_name(storage_domains_service, module.params['storage_domain'])
+ storage_domain_service = storage_domains_service.storage_domain_service(sd_id)
+ templates_service = storage_domain_service.templates_service()
+
+ # Find the unregistered Template we want to register:
+ if module.params.get('unregistered'):
+ templates = templates_service.list(unregistered=True)
+ else:
+ templates = templates_service.list(max=module.params['max'])
+ result = dict(
+ ovirt_storage_templates=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in templates
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_storage_vm_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_storage_vm_facts.py
new file mode 100644
index 00000000..d0247948
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_storage_vm_facts.py
@@ -0,0 +1,142 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2017 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_storage_vm_facts
+short_description: Retrieve information about one or more oVirt/RHV virtual machines relate to a storage domain.
+author: "Maor Lipchuk (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_storage_vm_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV virtual machines relate to a storage domain."
+notes:
+ - "This module returns a variable C(ovirt_storage_vms), which
+ contains a list of virtual machines. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ unregistered:
+ description:
+ - "Flag which indicates whether to get unregistered virtual machines which contain one or more
+ disks which reside on a storage domain or diskless virtual machines."
+ type: bool
+ default: false
+ max:
+ description:
+ - "Sets the maximum number of virtual machines to return. If not specified all the virtual machines are returned."
+ storage_domain:
+ description:
+ - "The storage domain name where the virtual machines should be listed."
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all VMs which relate to a storage domain and are unregistered
+ ovirt_vms_info:
+ unregistered: yes
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_storage_vms }}"
+'''
+
+RETURN = '''
+ovirt_storage_vms:
+ description: "List of dictionaries describing the VMs. VM attributes are mapped to dictionary keys,
+ all VMs attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/vm."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ get_id_by_name
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ storage_domain=dict(default=None),
+ max=dict(default=None, type='int'),
+ unregistered=dict(default=False, type='bool'),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_storage_vm_facts', 'community.general.ovirt_storage_vm_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_storage_vm_facts' module has been renamed to 'ovirt_storage_vm_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ storage_domains_service = connection.system_service().storage_domains_service()
+ sd_id = get_id_by_name(storage_domains_service, module.params['storage_domain'])
+ storage_domain_service = storage_domains_service.storage_domain_service(sd_id)
+ vms_service = storage_domain_service.vms_service()
+
+ # Find the unregistered VM we want to register:
+ if module.params.get('unregistered'):
+ vms = vms_service.list(unregistered=True)
+ else:
+ vms = vms_service.list()
+ result = dict(
+ ovirt_storage_vms=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in vms
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_tag_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_tag_facts.py
new file mode 100644
index 00000000..c6e9b744
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_tag_facts.py
@@ -0,0 +1,176 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_tag_facts
+short_description: Retrieve information about one or more oVirt/RHV tags
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_tag_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV tags."
+notes:
+ - "This module returns a variable C(ovirt_tags), which
+ contains a list of tags. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ name:
+ description:
+ - "Name of the tag which should be listed."
+ vm:
+ description:
+ - "Name of the VM, which tags should be listed."
+ host:
+ description:
+ - "Name of the host, which tags should be listed."
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all tags, which names start with tag
+ ovirt_tag_info:
+ name: tag*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_tags }}"
+
+- name: Gather information about all tags, which are assigned to VM postgres
+ ovirt_tag_info:
+ vm: postgres
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_tags }}"
+
+- name: Gather information about all tags, which are assigned to host west
+ ovirt_tag_info:
+ host: west
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_tags }}"
+'''
+
+RETURN = '''
+ovirt_tags:
+ description: "List of dictionaries describing the tags. Tags attributes are mapped to dictionary keys,
+ all tags attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/tag."
+ returned: On success.
+ type: list
+'''
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+ search_by_name,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ name=dict(default=None),
+ host=dict(default=None),
+ vm=dict(default=None),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_tag_facts', 'community.general.ovirt_tag_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_tag_facts' module has been renamed to 'ovirt_tag_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ tags_service = connection.system_service().tags_service()
+ tags = []
+ all_tags = tags_service.list()
+ if module.params['name']:
+ tags.extend([
+ t for t in all_tags
+ if fnmatch.fnmatch(t.name, module.params['name'])
+ ])
+ if module.params['host']:
+ hosts_service = connection.system_service().hosts_service()
+ host = search_by_name(hosts_service, module.params['host'])
+ if host is None:
+ raise Exception("Host '%s' was not found." % module.params['host'])
+ tags.extend([
+ tag for tag in hosts_service.host_service(host.id).tags_service().list()
+ ])
+ if module.params['vm']:
+ vms_service = connection.system_service().vms_service()
+ vm = search_by_name(vms_service, module.params['vm'])
+ if vm is None:
+ raise Exception("Vm '%s' was not found." % module.params['vm'])
+ tags.extend([
+ tag for tag in vms_service.vm_service(vm.id).tags_service().list()
+ ])
+
+ if not (module.params['vm'] or module.params['host'] or module.params['name']):
+ tags = all_tags
+
+ result = dict(
+ ovirt_tags=[
+ get_dict_of_struct(
+ struct=t,
+ connection=connection,
+ fetch_nested=module.params['fetch_nested'],
+ attributes=module.params['nested_attributes'],
+ ) for t in tags
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_template_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_template_facts.py
new file mode 100644
index 00000000..7595c64a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_template_facts.py
@@ -0,0 +1,124 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_template_facts
+short_description: Retrieve information about one or more oVirt/RHV templates
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_template_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV templates."
+notes:
+ - "This module returns a variable C(ovirt_templates), which
+ contains a list of templates. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search template X from datacenter Y use following pattern:
+ name=X and datacenter=Y"
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all templates which names start with centos and belongs to data center west
+ ovirt_template_info:
+ pattern: name=centos* and datacenter=west
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_templates }}"
+'''
+
+RETURN = '''
+ovirt_templates:
+ description: "List of dictionaries describing the templates. Template attributes are mapped to dictionary keys,
+ all templates attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/template."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_template_facts', 'community.general.ovirt_template_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_template_facts' module has been renamed to 'ovirt_template_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ templates_service = connection.system_service().templates_service()
+ templates = templates_service.list(search=module.params['pattern'])
+ result = dict(
+ ovirt_templates=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in templates
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_user_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_user_facts.py
new file mode 100644
index 00000000..ce7ab8d6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_user_facts.py
@@ -0,0 +1,123 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_user_facts
+short_description: Retrieve information about one or more oVirt/RHV users
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_user_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV users."
+notes:
+ - "This module returns a variable C(ovirt_users), which
+ contains a list of users. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search user X use following pattern: name=X"
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all users which first names start with john
+ ovirt_user_info:
+ pattern: name=john*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_users }}"
+'''
+
+RETURN = '''
+ovirt_users:
+ description: "List of dictionaries describing the users. User attributes are mapped to dictionary keys,
+ all users attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/user."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_user_facts', 'community.general.ovirt_user_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_user_facts' module has been renamed to 'ovirt_user_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ users_service = connection.system_service().users_service()
+ users = users_service.list(search=module.params['pattern'])
+ result = dict(
+ ovirt_users=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in users
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_vm_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_vm_facts.py
new file mode 100644
index 00000000..a5182755
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_vm_facts.py
@@ -0,0 +1,166 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_vm_facts
+short_description: Retrieve information about one or more oVirt/RHV virtual machines
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_vm_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV virtual machines."
+notes:
+ - "This module returns a variable C(ovirt_vms), which
+ contains a list of virtual machines. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search VM X from cluster Y use following pattern:
+ name=X and cluster=Y"
+ all_content:
+ description:
+ - "If I(true) all the attributes of the virtual machines should be
+ included in the response."
+ type: bool
+ default: false
+ case_sensitive:
+ description:
+ - "If I(true) performed search will take case into account."
+ type: bool
+ default: true
+ max:
+ description:
+ - "The maximum number of results to return."
+ next_run:
+ description:
+ - "Indicates if the returned result describes the virtual machine as it is currently running or if describes
+ the virtual machine with the modifications that have already been performed but that will only come into
+ effect when the virtual machine is restarted. By default the value is set by engine."
+ type: bool
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all VMs which names start with centos and belong to cluster west
+ ovirt_vm_info:
+ pattern: name=centos* and cluster=west
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_vms }}"
+
+- name: Gather info about next run configuration of virtual machine named myvm
+ ovirt_vm_info:
+ pattern: name=myvm
+ next_run: true
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_vms[0] }}"
+'''
+
+RETURN = '''
+ovirt_vms:
+ description: "List of dictionaries describing the VMs. VM attributes are mapped to dictionary keys,
+ all VMs attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/vm."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ all_content=dict(default=False, type='bool'),
+ next_run=dict(default=None, type='bool'),
+ case_sensitive=dict(default=True, type='bool'),
+ max=dict(default=None, type='int'),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_vm_facts', 'community.general.ovirt_vm_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_vm_facts' module has been renamed to 'ovirt_vm_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ vms_service = connection.system_service().vms_service()
+ vms = vms_service.list(
+ search=module.params['pattern'],
+ all_content=module.params['all_content'],
+ case_sensitive=module.params['case_sensitive'],
+ max=module.params['max'],
+ )
+ if module.params['next_run']:
+ vms = [vms_service.vm_service(vm.id).get(next_run=True) for vm in vms]
+
+ result = dict(
+ ovirt_vms=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in vms
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_vmpool_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_vmpool_facts.py
new file mode 100644
index 00000000..24842be5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ovirt_vmpool_facts.py
@@ -0,0 +1,123 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovirt_vmpool_facts
+short_description: Retrieve information about one or more oVirt/RHV vmpools
+author: "Ondra Machacek (@machacekondra)"
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: When migrating to collection we decided to use only _info modules.
+ alternative: Use M(ovirt.ovirt.ovirt_vmpool_info) instead.
+description:
+ - "Retrieve information about one or more oVirt/RHV vmpools."
+notes:
+ - "This module returns a variable C(ovirt_vmpools), which
+ contains a list of vmpools. You need to register the result with
+ the I(register) keyword to use it."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt/RHV search backend."
+ - "For example to search vmpool X: name=X"
+extends_documentation_fragment:
+- community.general.ovirt_facts
+
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+- name: Gather information about all vm pools which names start with centos
+ ovirt_vmpool_info:
+ pattern: name=centos*
+ register: result
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: "{{ result.ovirt_vm_pools }}"
+'''
+
+RETURN = '''
+ovirt_vm_pools:
+ description: "List of dictionaries describing the vmpools. Vm pool attributes are mapped to dictionary keys,
+ all vmpools attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/vm_pool."
+ returned: On success.
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.removed import removed_module
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_info_full_argument_spec,
+)
+
+
+def main():
+ argument_spec = ovirt_info_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ is_old_facts = module._name in ('ovirt_vmpool_facts', 'community.general.ovirt_vmpool_facts')
+ if is_old_facts:
+ module.deprecate("The 'ovirt_vmpool_facts' module has been renamed to 'ovirt_vmpool_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ check_sdk(module)
+
+ try:
+ auth = module.params.pop('auth')
+ connection = create_connection(auth)
+ vmpools_service = connection.system_service().vm_pools_service()
+ vmpools = vmpools_service.list(search=module.params['pattern'])
+ result = dict(
+ ovirt_vm_pools=[
+ get_dict_of_struct(
+ struct=c,
+ connection=connection,
+ fetch_nested=module.params.get('fetch_nested'),
+ attributes=module.params.get('nested_attributes'),
+ ) for c in vmpools
+ ],
+ )
+ if is_old_facts:
+ module.exit_json(changed=False, ansible_facts=result)
+ else:
+ module.exit_json(changed=False, **result)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=auth.get('token') is None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/pacemaker_cluster.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pacemaker_cluster.py
new file mode 100644
index 00000000..4ec6010f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pacemaker_cluster.py
@@ -0,0 +1,222 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Mathieu Bultel <mbultel@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: pacemaker_cluster
+short_description: Manage pacemaker clusters
+author:
+- Mathieu Bultel (@matbu)
+description:
+ - This module can manage a pacemaker cluster and nodes from Ansible using
+ the pacemaker cli.
+options:
+ state:
+ description:
+ - Indicate desired state of the cluster
+ choices: [ cleanup, offline, online, restart ]
+ type: str
+ node:
+ description:
+ - Specify which node of the cluster you want to manage. None == the
+ cluster status itself, 'all' == check the status of all nodes.
+ type: str
+ timeout:
+ description:
+ - Timeout when the module should considered that the action has failed
+ default: 300
+ type: int
+ force:
+ description:
+ - Force the change of the cluster state
+ type: bool
+ default: 'yes'
+'''
+EXAMPLES = '''
+---
+- name: Set cluster Online
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: Get cluster state
+ community.general.pacemaker_cluster:
+ state: online
+'''
+
+RETURN = '''
+changed:
+ description: True if the cluster state has changed
+ type: bool
+ returned: always
+out:
+ description: The output of the current state of the cluster. It return a
+ list of the nodes state.
+ type: str
+ sample: 'out: [[" overcloud-controller-0", " Online"]]}'
+ returned: always
+rc:
+ description: exit code of the module
+ type: bool
+ returned: always
+'''
+
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+_PCS_CLUSTER_DOWN = "Error: cluster is not currently running on this node"
+
+
+def get_cluster_status(module):
+ cmd = "pcs cluster status"
+ rc, out, err = module.run_command(cmd)
+ if out in _PCS_CLUSTER_DOWN:
+ return 'offline'
+ else:
+ return 'online'
+
+
+def get_node_status(module, node='all'):
+ if node == 'all':
+ cmd = "pcs cluster pcsd-status %s" % node
+ else:
+ cmd = "pcs cluster pcsd-status"
+ rc, out, err = module.run_command(cmd)
+ if rc == 1:
+ module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err))
+ status = []
+ for o in out.splitlines():
+ status.append(o.split(':'))
+ return status
+
+
+def clean_cluster(module, timeout):
+ cmd = "pcs resource cleanup"
+ rc, out, err = module.run_command(cmd)
+ if rc == 1:
+ module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err))
+
+
+def set_cluster(module, state, timeout, force):
+ if state == 'online':
+ cmd = "pcs cluster start"
+ if state == 'offline':
+ cmd = "pcs cluster stop"
+ if force:
+ cmd = "%s --force" % cmd
+ rc, out, err = module.run_command(cmd)
+ if rc == 1:
+ module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err))
+
+ t = time.time()
+ ready = False
+ while time.time() < t + timeout:
+ cluster_state = get_cluster_status(module)
+ if cluster_state == state:
+ ready = True
+ break
+ if not ready:
+ module.fail_json(msg="Failed to set the state `%s` on the cluster\n" % (state))
+
+
+def set_node(module, state, timeout, force, node='all'):
+ # map states
+ if state == 'online':
+ cmd = "pcs cluster start"
+ if state == 'offline':
+ cmd = "pcs cluster stop"
+ if force:
+ cmd = "%s --force" % cmd
+
+ nodes_state = get_node_status(module, node)
+ for node in nodes_state:
+ if node[1].strip().lower() != state:
+ cmd = "%s %s" % (cmd, node[0].strip())
+ rc, out, err = module.run_command(cmd)
+ if rc == 1:
+ module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err))
+
+ t = time.time()
+ ready = False
+ while time.time() < t + timeout:
+ nodes_state = get_node_status(module)
+ for node in nodes_state:
+ if node[1].strip().lower() == state:
+ ready = True
+ break
+ if not ready:
+ module.fail_json(msg="Failed to set the state `%s` on the cluster\n" % (state))
+
+
+def main():
+ argument_spec = dict(
+ state=dict(type='str', choices=['online', 'offline', 'restart', 'cleanup']),
+ node=dict(type='str'),
+ timeout=dict(type='int', default=300),
+ force=dict(type='bool', default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec,
+ supports_check_mode=True,
+ )
+ changed = False
+ state = module.params['state']
+ node = module.params['node']
+ force = module.params['force']
+ timeout = module.params['timeout']
+
+ if state in ['online', 'offline']:
+ # Get cluster status
+ if node is None:
+ cluster_state = get_cluster_status(module)
+ if cluster_state == state:
+ module.exit_json(changed=changed, out=cluster_state)
+ else:
+ set_cluster(module, state, timeout, force)
+ cluster_state = get_cluster_status(module)
+ if cluster_state == state:
+ module.exit_json(changed=True, out=cluster_state)
+ else:
+ module.fail_json(msg="Fail to bring the cluster %s" % state)
+ else:
+ cluster_state = get_node_status(module, node)
+ # Check cluster state
+ for node_state in cluster_state:
+ if node_state[1].strip().lower() == state:
+ module.exit_json(changed=changed, out=cluster_state)
+ else:
+ # Set cluster status if needed
+ set_cluster(module, state, timeout, force)
+ cluster_state = get_node_status(module, node)
+ module.exit_json(changed=True, out=cluster_state)
+
+ if state in ['restart']:
+ set_cluster(module, 'offline', timeout, force)
+ cluster_state = get_cluster_status(module)
+ if cluster_state == 'offline':
+ set_cluster(module, 'online', timeout, force)
+ cluster_state = get_cluster_status(module)
+ if cluster_state == 'online':
+ module.exit_json(changed=True, out=cluster_state)
+ else:
+ module.fail_json(msg="Failed during the restart of the cluster, the cluster can't be started")
+ else:
+ module.fail_json(msg="Failed during the restart of the cluster, the cluster can't be stopped")
+
+ if state in ['cleanup']:
+ clean_cluster(module, timeout)
+ cluster_state = get_cluster_status(module)
+ module.exit_json(changed=True,
+ out=cluster_state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/bower.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/bower.py
new file mode 100644
index 00000000..911d99b7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/bower.py
@@ -0,0 +1,228 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Michael Warkentin <mwarkentin@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: bower
+short_description: Manage bower packages with bower
+description:
+ - Manage bower packages with bower
+author: "Michael Warkentin (@mwarkentin)"
+options:
+ name:
+ type: str
+ description:
+ - The name of a bower package to install
+ offline:
+ description:
+ - Install packages from local cache, if the packages were installed before
+ type: bool
+ default: 'no'
+ production:
+ description:
+ - Install with --production flag
+ type: bool
+ default: 'no'
+ path:
+ type: path
+ description:
+ - The base path where to install the bower packages
+ required: true
+ relative_execpath:
+ type: path
+ description:
+ - Relative path to bower executable from install path
+ state:
+ type: str
+ description:
+ - The state of the bower package
+ default: present
+ choices: [ "present", "absent", "latest" ]
+ version:
+ type: str
+ description:
+ - The version to be installed
+'''
+
+EXAMPLES = '''
+- name: Install "bootstrap" bower package.
+ community.general.bower:
+ name: bootstrap
+
+- name: Install "bootstrap" bower package on version 3.1.1.
+ community.general.bower:
+ name: bootstrap
+ version: '3.1.1'
+
+- name: Remove the "bootstrap" bower package.
+ community.general.bower:
+ name: bootstrap
+ state: absent
+
+- name: Install packages based on bower.json.
+ community.general.bower:
+ path: /app/location
+
+- name: Update packages based on bower.json to their latest version.
+ community.general.bower:
+ path: /app/location
+ state: latest
+
+# install bower locally and run from there
+- npm:
+ path: /app/location
+ name: bower
+ global: no
+- community.general.bower:
+ path: /app/location
+ relative_execpath: node_modules/.bin
+'''
+import json
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Bower(object):
+ def __init__(self, module, **kwargs):
+ self.module = module
+ self.name = kwargs['name']
+ self.offline = kwargs['offline']
+ self.production = kwargs['production']
+ self.path = kwargs['path']
+ self.relative_execpath = kwargs['relative_execpath']
+ self.version = kwargs['version']
+
+ if kwargs['version']:
+ self.name_version = self.name + '#' + self.version
+ else:
+ self.name_version = self.name
+
+ def _exec(self, args, run_in_check_mode=False, check_rc=True):
+ if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
+ cmd = []
+
+ if self.relative_execpath:
+ cmd.append(os.path.join(self.path, self.relative_execpath, "bower"))
+ if not os.path.isfile(cmd[-1]):
+ self.module.fail_json(msg="bower not found at relative path %s" % self.relative_execpath)
+ else:
+ cmd.append("bower")
+
+ cmd.extend(args)
+ cmd.extend(['--config.interactive=false', '--allow-root'])
+
+ if self.name:
+ cmd.append(self.name_version)
+
+ if self.offline:
+ cmd.append('--offline')
+
+ if self.production:
+ cmd.append('--production')
+
+ # If path is specified, cd into that path and run the command.
+ cwd = None
+ if self.path:
+ if not os.path.exists(self.path):
+ os.makedirs(self.path)
+ if not os.path.isdir(self.path):
+ self.module.fail_json(msg="path %s is not a directory" % self.path)
+ cwd = self.path
+
+ rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd)
+ return out
+ return ''
+
+ def list(self):
+ cmd = ['list', '--json']
+
+ installed = list()
+ missing = list()
+ outdated = list()
+ data = json.loads(self._exec(cmd, True, False))
+ if 'dependencies' in data:
+ for dep in data['dependencies']:
+ dep_data = data['dependencies'][dep]
+ if dep_data.get('missing', False):
+ missing.append(dep)
+ elif ('version' in dep_data['pkgMeta'] and
+ 'update' in dep_data and
+ dep_data['pkgMeta']['version'] != dep_data['update']['latest']):
+ outdated.append(dep)
+ elif dep_data.get('incompatible', False):
+ outdated.append(dep)
+ else:
+ installed.append(dep)
+ # Named dependency not installed
+ else:
+ missing.append(self.name)
+
+ return installed, missing, outdated
+
+ def install(self):
+ return self._exec(['install'])
+
+ def update(self):
+ return self._exec(['update'])
+
+ def uninstall(self):
+ return self._exec(['uninstall'])
+
+
+def main():
+ arg_spec = dict(
+ name=dict(default=None),
+ offline=dict(default=False, type='bool'),
+ production=dict(default=False, type='bool'),
+ path=dict(required=True, type='path'),
+ relative_execpath=dict(default=None, required=False, type='path'),
+ state=dict(default='present', choices=['present', 'absent', 'latest', ]),
+ version=dict(default=None),
+ )
+ module = AnsibleModule(
+ argument_spec=arg_spec
+ )
+
+ name = module.params['name']
+ offline = module.params['offline']
+ production = module.params['production']
+ path = module.params['path']
+ relative_execpath = module.params['relative_execpath']
+ state = module.params['state']
+ version = module.params['version']
+
+ if state == 'absent' and not name:
+ module.fail_json(msg='uninstalling a package is only available for named packages')
+
+ bower = Bower(module, name=name, offline=offline, production=production, path=path, relative_execpath=relative_execpath, version=version)
+
+ changed = False
+ if state == 'present':
+ installed, missing, outdated = bower.list()
+ if missing:
+ changed = True
+ bower.install()
+ elif state == 'latest':
+ installed, missing, outdated = bower.list()
+ if missing or outdated:
+ changed = True
+ bower.update()
+ else: # Absent
+ installed, missing, outdated = bower.list()
+ if name in installed:
+ changed = True
+ bower.uninstall()
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/bundler.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/bundler.py
new file mode 100644
index 00000000..8be17d6f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/bundler.py
@@ -0,0 +1,202 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Tim Hoiberg <tim.hoiberg@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: bundler
+short_description: Manage Ruby Gem dependencies with Bundler
+description:
+ - Manage installation and Gem version dependencies for Ruby using the Bundler gem
+options:
+ executable:
+ type: str
+ description:
+ - The path to the bundler executable
+ state:
+ type: str
+ description:
+ - The desired state of the Gem bundle. C(latest) updates gems to the most recent, acceptable version
+ choices: [present, latest]
+ default: present
+ chdir:
+ type: path
+ description:
+ - The directory to execute the bundler commands from. This directory
+ needs to contain a valid Gemfile or .bundle/ directory
+ - If not specified, it will default to the temporary working directory
+ exclude_groups:
+ type: list
+ description:
+ - A list of Gemfile groups to exclude during operations. This only
+ applies when state is C(present). Bundler considers this
+ a 'remembered' property for the Gemfile and will automatically exclude
+ groups in future operations even if C(exclude_groups) is not set
+ clean:
+ description:
+ - Only applies if state is C(present). If set removes any gems on the
+ target host that are not in the gemfile
+ type: bool
+ default: 'no'
+ gemfile:
+ type: path
+ description:
+ - Only applies if state is C(present). The path to the gemfile to use to install gems.
+ - If not specified it will default to the Gemfile in current directory
+ local:
+ description:
+ - If set only installs gems from the cache on the target host
+ type: bool
+ default: 'no'
+ deployment_mode:
+ description:
+ - Only applies if state is C(present). If set it will install gems in
+ ./vendor/bundle instead of the default location. Requires a Gemfile.lock
+ file to have been created prior
+ type: bool
+ default: 'no'
+ user_install:
+ description:
+ - Only applies if state is C(present). Installs gems in the local user's cache or for all users
+ type: bool
+ default: 'yes'
+ gem_path:
+ type: path
+ description:
+ - Only applies if state is C(present). Specifies the directory to
+ install the gems into. If C(chdir) is set then this path is relative to
+ C(chdir)
+ - If not specified the default RubyGems gem paths will be used.
+ binstub_directory:
+ type: path
+ description:
+ - Only applies if state is C(present). Specifies the directory to
+ install any gem bins files to. When executed the bin files will run
+ within the context of the Gemfile and fail if any required gem
+ dependencies are not installed. If C(chdir) is set then this path is
+ relative to C(chdir)
+ extra_args:
+ type: str
+ description:
+ - A space separated string of additional commands that can be applied to
+ the Bundler command. Refer to the Bundler documentation for more
+ information
+author: "Tim Hoiberg (@thoiberg)"
+'''
+
+EXAMPLES = '''
+- name: Install gems from a Gemfile in the current directory
+ community.general.bundler:
+ state: present
+ executable: ~/.rvm/gems/2.1.5/bin/bundle
+
+- name: Exclude the production group from installing
+ community.general.bundler:
+ state: present
+ exclude_groups: production
+
+- name: Install gems into ./vendor/bundle
+ community.general.bundler:
+ state: present
+ deployment_mode: yes
+
+- name: Install gems using a Gemfile in another directory
+ community.general.bundler:
+ state: present
+ gemfile: ../rails_project/Gemfile
+
+- name: Update Gemfile in another directory
+ community.general.bundler:
+ state: latest
+ chdir: ~/rails_project
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def get_bundler_executable(module):
+ if module.params.get('executable'):
+ result = module.params.get('executable').split(' ')
+ else:
+ result = [module.get_bin_path('bundle', True)]
+ return result
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ executable=dict(default=None, required=False),
+ state=dict(default='present', required=False, choices=['present', 'latest']),
+ chdir=dict(default=None, required=False, type='path'),
+ exclude_groups=dict(default=None, required=False, type='list'),
+ clean=dict(default=False, required=False, type='bool'),
+ gemfile=dict(default=None, required=False, type='path'),
+ local=dict(default=False, required=False, type='bool'),
+ deployment_mode=dict(default=False, required=False, type='bool'),
+ user_install=dict(default=True, required=False, type='bool'),
+ gem_path=dict(default=None, required=False, type='path'),
+ binstub_directory=dict(default=None, required=False, type='path'),
+ extra_args=dict(default=None, required=False),
+ ),
+ supports_check_mode=True
+ )
+
+ state = module.params.get('state')
+ chdir = module.params.get('chdir')
+ exclude_groups = module.params.get('exclude_groups')
+ clean = module.params.get('clean')
+ gemfile = module.params.get('gemfile')
+ local = module.params.get('local')
+ deployment_mode = module.params.get('deployment_mode')
+ user_install = module.params.get('user_install')
+ gem_path = module.params.get('gem_path')
+ binstub_directory = module.params.get('binstub_directory')
+ extra_args = module.params.get('extra_args')
+
+ cmd = get_bundler_executable(module)
+
+ if module.check_mode:
+ cmd.append('check')
+ rc, out, err = module.run_command(cmd, cwd=chdir, check_rc=False)
+
+ module.exit_json(changed=rc != 0, state=state, stdout=out, stderr=err)
+
+ if state == 'present':
+ cmd.append('install')
+ if exclude_groups:
+ cmd.extend(['--without', ':'.join(exclude_groups)])
+ if clean:
+ cmd.append('--clean')
+ if gemfile:
+ cmd.extend(['--gemfile', gemfile])
+ if local:
+ cmd.append('--local')
+ if deployment_mode:
+ cmd.append('--deployment')
+ if not user_install:
+ cmd.append('--system')
+ if gem_path:
+ cmd.extend(['--path', gem_path])
+ if binstub_directory:
+ cmd.extend(['--binstubs', binstub_directory])
+ else:
+ cmd.append('update')
+ if local:
+ cmd.append('--local')
+
+ if extra_args:
+ cmd.extend(extra_args.split(' '))
+
+ rc, out, err = module.run_command(cmd, cwd=chdir, check_rc=True)
+
+ module.exit_json(changed='Installing' in out, state=state, stdout=out, stderr=err)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/composer.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/composer.py
new file mode 100644
index 00000000..3bc09c2d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/composer.py
@@ -0,0 +1,267 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Dimitrios Tydeas Mengidis <tydeas.dr@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: composer
+author:
+ - "Dimitrios Tydeas Mengidis (@dmtrs)"
+ - "René Moser (@resmo)"
+short_description: Dependency Manager for PHP
+description:
+ - >
+ Composer is a tool for dependency management in PHP. It allows you to
+ declare the dependent libraries your project needs and it will install
+ them in your project for you.
+options:
+ command:
+ type: str
+ description:
+ - Composer command like "install", "update" and so on.
+ default: install
+ arguments:
+ type: str
+ description:
+ - Composer arguments like required package, version and so on.
+ executable:
+ type: path
+ description:
+ - Path to PHP Executable on the remote host, if PHP is not in PATH.
+ aliases: [ php_path ]
+ working_dir:
+ type: path
+ description:
+ - Directory of your project (see --working-dir). This is required when
+ the command is not run globally.
+ - Will be ignored if C(global_command=true).
+ aliases: [ working-dir ]
+ global_command:
+ description:
+ - Runs the specified command globally.
+ type: bool
+ default: false
+ aliases: [ global-command ]
+ prefer_source:
+ description:
+ - Forces installation from package sources when possible (see --prefer-source).
+ default: false
+ type: bool
+ aliases: [ prefer-source ]
+ prefer_dist:
+ description:
+ - Forces installation from package dist even for dev versions (see --prefer-dist).
+ default: false
+ type: bool
+ aliases: [ prefer-dist ]
+ no_dev:
+ description:
+ - Disables installation of require-dev packages (see --no-dev).
+ default: true
+ type: bool
+ aliases: [ no-dev ]
+ no_scripts:
+ description:
+ - Skips the execution of all scripts defined in composer.json (see --no-scripts).
+ default: false
+ type: bool
+ aliases: [ no-scripts ]
+ no_plugins:
+ description:
+ - Disables all plugins ( see --no-plugins ).
+ default: false
+ type: bool
+ aliases: [ no-plugins ]
+ optimize_autoloader:
+ description:
+ - Optimize autoloader during autoloader dump (see --optimize-autoloader).
+ - Convert PSR-0/4 autoloading to classmap to get a faster autoloader.
+ - Recommended especially for production, but can take a bit of time to run.
+ default: true
+ type: bool
+ aliases: [ optimize-autoloader ]
+ classmap_authoritative:
+ description:
+ - Autoload classes from classmap only.
+ - Implicitely enable optimize_autoloader.
+ - Recommended especially for production, but can take a bit of time to run.
+ default: false
+ type: bool
+ aliases: [ classmap-authoritative ]
+ apcu_autoloader:
+ description:
+ - Uses APCu to cache found/not-found classes
+ default: false
+ type: bool
+ aliases: [ apcu-autoloader ]
+ ignore_platform_reqs:
+ description:
+ - Ignore php, hhvm, lib-* and ext-* requirements and force the installation even if the local machine does not fulfill these.
+ default: false
+ type: bool
+ aliases: [ ignore-platform-reqs ]
+requirements:
+ - php
+ - composer installed in bin path (recommended /usr/local/bin)
+notes:
+ - Default options that are always appended in each execution are --no-ansi, --no-interaction and --no-progress if available.
+ - We received reports about issues on macOS if composer was installed by Homebrew. Please use the official install method to avoid issues.
+'''
+
+EXAMPLES = '''
+- name: Download and installs all libs and dependencies outlined in the /path/to/project/composer.lock
+ community.general.composer:
+ command: install
+ working_dir: /path/to/project
+
+- name: Install a new package
+ community.general.composer:
+ command: require
+ arguments: my/package
+ working_dir: /path/to/project
+
+- name: Clone and install a project with all dependencies
+ community.general.composer:
+ command: create-project
+ arguments: package/package /path/to/project ~1.0
+ working_dir: /path/to/project
+ prefer_dist: yes
+
+- name: Install a package globally
+ community.general.composer:
+ command: require
+ global_command: yes
+ arguments: my/package
+'''
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+
+
+def parse_out(string):
+ return re.sub(r"\s+", " ", string).strip()
+
+
+def has_changed(string):
+ for no_change in ["Nothing to install or update", "Nothing to install, update or remove"]:
+ if no_change in string:
+ return False
+
+ return True
+
+
+def get_available_options(module, command='install'):
+ # get all available options from a composer command using composer help to json
+ rc, out, err = composer_command(module, "help %s --format=json" % command)
+ if rc != 0:
+ output = parse_out(err)
+ module.fail_json(msg=output)
+
+ command_help_json = module.from_json(out)
+ return command_help_json['definition']['options']
+
+
+def composer_command(module, command, arguments="", options=None, global_command=False):
+ if options is None:
+ options = []
+
+ if module.params['executable'] is None:
+ php_path = module.get_bin_path("php", True, ["/usr/local/bin"])
+ else:
+ php_path = module.params['executable']
+
+ composer_path = module.get_bin_path("composer", True, ["/usr/local/bin"])
+ cmd = "%s %s %s %s %s %s" % (php_path, composer_path, "global" if global_command else "", command, " ".join(options), arguments)
+ return module.run_command(cmd)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ command=dict(default="install", type="str"),
+ arguments=dict(default="", type="str"),
+ executable=dict(type="path", aliases=["php_path"]),
+ working_dir=dict(type="path", aliases=["working-dir"]),
+ global_command=dict(default=False, type="bool", aliases=["global-command"]),
+ prefer_source=dict(default=False, type="bool", aliases=["prefer-source"]),
+ prefer_dist=dict(default=False, type="bool", aliases=["prefer-dist"]),
+ no_dev=dict(default=True, type="bool", aliases=["no-dev"]),
+ no_scripts=dict(default=False, type="bool", aliases=["no-scripts"]),
+ no_plugins=dict(default=False, type="bool", aliases=["no-plugins"]),
+ apcu_autoloader=dict(default=False, type="bool", aliases=["apcu-autoloader"]),
+ optimize_autoloader=dict(default=True, type="bool", aliases=["optimize-autoloader"]),
+ classmap_authoritative=dict(default=False, type="bool", aliases=["classmap-authoritative"]),
+ ignore_platform_reqs=dict(default=False, type="bool", aliases=["ignore-platform-reqs"]),
+ ),
+ required_if=[('global_command', False, ['working_dir'])],
+ supports_check_mode=True
+ )
+
+ # Get composer command with fallback to default
+ command = module.params['command']
+ if re.search(r"\s", command):
+ module.fail_json(msg="Use the 'arguments' param for passing arguments with the 'command'")
+
+ arguments = module.params['arguments']
+ global_command = module.params['global_command']
+ available_options = get_available_options(module=module, command=command)
+
+ options = []
+
+ # Default options
+ default_options = [
+ 'no-ansi',
+ 'no-interaction',
+ 'no-progress',
+ ]
+
+ for option in default_options:
+ if option in available_options:
+ option = "--%s" % option
+ options.append(option)
+
+ if not global_command:
+ options.extend(['--working-dir', "'%s'" % module.params['working_dir']])
+
+ option_params = {
+ 'prefer_source': 'prefer-source',
+ 'prefer_dist': 'prefer-dist',
+ 'no_dev': 'no-dev',
+ 'no_scripts': 'no-scripts',
+ 'no_plugins': 'no-plugins',
+ 'apcu_autoloader': 'acpu-autoloader',
+ 'optimize_autoloader': 'optimize-autoloader',
+ 'classmap_authoritative': 'classmap-authoritative',
+ 'ignore_platform_reqs': 'ignore-platform-reqs',
+ }
+
+ for param, option in option_params.items():
+ if module.params.get(param) and option in available_options:
+ option = "--%s" % option
+ options.append(option)
+
+ if module.check_mode:
+ if 'dry-run' in available_options:
+ options.append('--dry-run')
+ else:
+ module.exit_json(skipped=True, msg="command '%s' does not support check mode, skipping" % command)
+
+ rc, out, err = composer_command(module, command, arguments, options, global_command)
+
+ if rc != 0:
+ output = parse_out(err)
+ module.fail_json(msg=output, stdout=err)
+ else:
+ # Composer version > 1.0.0-alpha9 now use stderr for standard notification messages
+ output = parse_out(out + err)
+ module.exit_json(changed=has_changed(output), msg=output, stdout=out + err)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/cpanm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/cpanm.py
new file mode 100644
index 00000000..3b43b443
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/cpanm.py
@@ -0,0 +1,214 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Franck Cuny <franck@lumberjaph.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: cpanm
+short_description: Manages Perl library dependencies.
+description:
+ - Manage Perl library dependencies.
+options:
+ name:
+ type: str
+ description:
+ - The name of the Perl library to install. You may use the "full distribution path", e.g. MIYAGAWA/Plack-0.99_05.tar.gz
+ aliases: ["pkg"]
+ from_path:
+ type: path
+ description:
+ - The local directory from where to install
+ notest:
+ description:
+ - Do not run unit tests
+ type: bool
+ default: no
+ locallib:
+ description:
+ - Specify the install base to install modules
+ type: path
+ mirror:
+ description:
+ - Specifies the base URL for the CPAN mirror to use
+ type: str
+ mirror_only:
+ description:
+ - Use the mirror's index file instead of the CPAN Meta DB
+ type: bool
+ default: no
+ installdeps:
+ description:
+ - Only install dependencies
+ type: bool
+ default: no
+ version:
+ description:
+ - minimum version of perl module to consider acceptable
+ type: str
+ system_lib:
+ description:
+ - Use this if you want to install modules to the system perl include path. You must be root or have "passwordless" sudo for this to work.
+ - This uses the cpanm commandline option '--sudo', which has nothing to do with ansible privilege escalation.
+ type: bool
+ default: no
+ aliases: ['use_sudo']
+ executable:
+ description:
+ - Override the path to the cpanm executable
+ type: path
+notes:
+ - Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host.
+author: "Franck Cuny (@fcuny)"
+'''
+
+EXAMPLES = '''
+- name: Install Dancer perl package
+ community.general.cpanm:
+ name: Dancer
+
+- name: Install version 0.99_05 of the Plack perl package
+ community.general.cpanm:
+ name: MIYAGAWA/Plack-0.99_05.tar.gz
+
+- name: Install Dancer into the specified locallib
+ community.general.cpanm:
+ name: Dancer
+ locallib: /srv/webapps/my_app/extlib
+
+- name: Install perl dependencies from local directory
+ community.general.cpanm:
+ from_path: /srv/webapps/my_app/src/
+
+- name: Install Dancer perl package without running the unit tests in indicated locallib
+ community.general.cpanm:
+ name: Dancer
+ notest: True
+ locallib: /srv/webapps/my_app/extlib
+
+- name: Install Dancer perl package from a specific mirror
+ community.general.cpanm:
+ name: Dancer
+ mirror: 'http://cpan.cpantesters.org/'
+
+- name: Install Dancer perl package into the system root path
+ community.general.cpanm:
+ name: Dancer
+ system_lib: yes
+
+- name: Install Dancer if it is not already installed OR the installed version is older than version 1.0
+ community.general.cpanm:
+ name: Dancer
+ version: '1.0'
+'''
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def _is_package_installed(module, name, locallib, cpanm, version):
+ cmd = ""
+ if locallib:
+ os.environ["PERL5LIB"] = "%s/lib/perl5" % locallib
+ cmd = "%s perl -e ' use %s" % (cmd, name)
+ if version:
+ cmd = "%s %s;'" % (cmd, version)
+ else:
+ cmd = "%s;'" % cmd
+ res, stdout, stderr = module.run_command(cmd, check_rc=False)
+ return res == 0
+
+
+def _build_cmd_line(name, from_path, notest, locallib, mirror, mirror_only, installdeps, cpanm, use_sudo):
+ # this code should use "%s" like everything else and just return early but not fixing all of it now.
+ # don't copy stuff like this
+ if from_path:
+ cmd = cpanm + " " + from_path
+ else:
+ cmd = cpanm + " " + name
+
+ if notest is True:
+ cmd = cmd + " -n"
+
+ if locallib is not None:
+ cmd = cmd + " -l " + locallib
+
+ if mirror is not None:
+ cmd = cmd + " --mirror " + mirror
+
+ if mirror_only is True:
+ cmd = cmd + " --mirror-only"
+
+ if installdeps is True:
+ cmd = cmd + " --installdeps"
+
+ if use_sudo is True:
+ cmd = cmd + " --sudo"
+
+ return cmd
+
+
+def _get_cpanm_path(module):
+ if module.params['executable']:
+ result = module.params['executable']
+ else:
+ result = module.get_bin_path('cpanm', True)
+ return result
+
+
+def main():
+ arg_spec = dict(
+ name=dict(default=None, required=False, aliases=['pkg']),
+ from_path=dict(default=None, required=False, type='path'),
+ notest=dict(default=False, type='bool'),
+ locallib=dict(default=None, required=False, type='path'),
+ mirror=dict(default=None, required=False),
+ mirror_only=dict(default=False, type='bool'),
+ installdeps=dict(default=False, type='bool'),
+ system_lib=dict(default=False, type='bool', aliases=['use_sudo']),
+ version=dict(default=None, required=False),
+ executable=dict(required=False, type='path'),
+ )
+
+ module = AnsibleModule(
+ argument_spec=arg_spec,
+ required_one_of=[['name', 'from_path']],
+ )
+
+ cpanm = _get_cpanm_path(module)
+ name = module.params['name']
+ from_path = module.params['from_path']
+ notest = module.boolean(module.params.get('notest', False))
+ locallib = module.params['locallib']
+ mirror = module.params['mirror']
+ mirror_only = module.params['mirror_only']
+ installdeps = module.params['installdeps']
+ use_sudo = module.params['system_lib']
+ version = module.params['version']
+
+ changed = False
+
+ installed = _is_package_installed(module, name, locallib, cpanm, version)
+
+ if not installed:
+ cmd = _build_cmd_line(name, from_path, notest, locallib, mirror, mirror_only, installdeps, cpanm, use_sudo)
+
+ rc_cpanm, out_cpanm, err_cpanm = module.run_command(cmd, check_rc=False)
+
+ if rc_cpanm != 0:
+ module.fail_json(msg=err_cpanm, cmd=cmd)
+
+ if (err_cpanm.find('is up to date') == -1 and out_cpanm.find('is up to date') == -1):
+ changed = True
+
+ module.exit_json(changed=changed, binary=cpanm, name=name)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/easy_install.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/easy_install.py
new file mode 100644
index 00000000..5e1d7930
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/easy_install.py
@@ -0,0 +1,198 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Matt Wright <matt@nobien.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: easy_install
+short_description: Installs Python libraries
+description:
+ - Installs Python libraries, optionally in a I(virtualenv)
+options:
+ name:
+ type: str
+ description:
+ - A Python library name
+ required: true
+ virtualenv:
+ type: str
+ description:
+ - an optional I(virtualenv) directory path to install into. If the
+ I(virtualenv) does not exist, it is created automatically
+ virtualenv_site_packages:
+ description:
+ - Whether the virtual environment will inherit packages from the
+ global site-packages directory. Note that if this setting is
+ changed on an already existing virtual environment it will not
+ have any effect, the environment must be deleted and newly
+ created.
+ type: bool
+ default: 'no'
+ virtualenv_command:
+ type: str
+ description:
+ - The command to create the virtual environment with. For example
+ C(pyvenv), C(virtualenv), C(virtualenv2).
+ default: virtualenv
+ executable:
+ type: str
+ description:
+ - The explicit executable or a pathname to the executable to be used to
+ run easy_install for a specific version of Python installed in the
+ system. For example C(easy_install-3.3), if there are both Python 2.7
+ and 3.3 installations in the system and you want to run easy_install
+ for the Python 3.3 installation.
+ default: easy_install
+ state:
+ type: str
+ description:
+ - The desired state of the library. C(latest) ensures that the latest version is installed.
+ choices: [present, latest]
+ default: present
+notes:
+ - Please note that the C(easy_install) module can only install Python
+ libraries. Thus this module is not able to remove libraries. It is
+ generally recommended to use the M(ansible.builtin.pip) module which you can first install
+ using M(community.general.easy_install).
+ - Also note that I(virtualenv) must be installed on the remote host if the
+ C(virtualenv) parameter is specified.
+requirements: [ "virtualenv" ]
+author: "Matt Wright (@mattupstate)"
+'''
+
+EXAMPLES = '''
+- name: Install or update pip
+ community.general.easy_install:
+ name: pip
+ state: latest
+
+- name: Install Bottle into the specified virtualenv
+ community.general.easy_install:
+ name: bottle
+ virtualenv: /webapps/myapp/venv
+'''
+
+import os
+import os.path
+import tempfile
+from ansible.module_utils.basic import AnsibleModule
+
+
+def install_package(module, name, easy_install, executable_arguments):
+ cmd = '%s %s %s' % (easy_install, ' '.join(executable_arguments), name)
+ rc, out, err = module.run_command(cmd)
+ return rc, out, err
+
+
+def _is_package_installed(module, name, easy_install, executable_arguments):
+ # Copy and add to the arguments
+ executable_arguments = executable_arguments[:]
+ executable_arguments.append('--dry-run')
+ rc, out, err = install_package(module, name, easy_install, executable_arguments)
+ if rc:
+ module.fail_json(msg=err)
+ return 'Downloading' not in out
+
+
+def _get_easy_install(module, env=None, executable=None):
+ candidate_easy_inst_basenames = ['easy_install']
+ easy_install = None
+ if executable is not None:
+ if os.path.isabs(executable):
+ easy_install = executable
+ else:
+ candidate_easy_inst_basenames.insert(0, executable)
+ if easy_install is None:
+ if env is None:
+ opt_dirs = []
+ else:
+ # Try easy_install with the virtualenv directory first.
+ opt_dirs = ['%s/bin' % env]
+ for basename in candidate_easy_inst_basenames:
+ easy_install = module.get_bin_path(basename, False, opt_dirs)
+ if easy_install is not None:
+ break
+ # easy_install should have been found by now. The final call to
+ # get_bin_path will trigger fail_json.
+ if easy_install is None:
+ basename = candidate_easy_inst_basenames[0]
+ easy_install = module.get_bin_path(basename, True, opt_dirs)
+ return easy_install
+
+
+def main():
+ arg_spec = dict(
+ name=dict(required=True),
+ state=dict(required=False,
+ default='present',
+ choices=['present', 'latest'],
+ type='str'),
+ virtualenv=dict(default=None, required=False),
+ virtualenv_site_packages=dict(default=False, type='bool'),
+ virtualenv_command=dict(default='virtualenv', required=False),
+ executable=dict(default='easy_install', required=False),
+ )
+
+ module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
+
+ name = module.params['name']
+ env = module.params['virtualenv']
+ executable = module.params['executable']
+ site_packages = module.params['virtualenv_site_packages']
+ virtualenv_command = module.params['virtualenv_command']
+ executable_arguments = []
+ if module.params['state'] == 'latest':
+ executable_arguments.append('--upgrade')
+
+ rc = 0
+ err = ''
+ out = ''
+
+ if env:
+ virtualenv = module.get_bin_path(virtualenv_command, True)
+
+ if not os.path.exists(os.path.join(env, 'bin', 'activate')):
+ if module.check_mode:
+ module.exit_json(changed=True)
+ command = '%s %s' % (virtualenv, env)
+ if site_packages:
+ command += ' --system-site-packages'
+ cwd = tempfile.gettempdir()
+ rc_venv, out_venv, err_venv = module.run_command(command, cwd=cwd)
+
+ rc += rc_venv
+ out += out_venv
+ err += err_venv
+
+ easy_install = _get_easy_install(module, env, executable)
+
+ cmd = None
+ changed = False
+ installed = _is_package_installed(module, name, easy_install, executable_arguments)
+
+ if not installed:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ rc_easy_inst, out_easy_inst, err_easy_inst = install_package(module, name, easy_install, executable_arguments)
+
+ rc += rc_easy_inst
+ out += out_easy_inst
+ err += err_easy_inst
+
+ changed = True
+
+ if rc != 0:
+ module.fail_json(msg=err, cmd=cmd)
+
+ module.exit_json(changed=changed, binary=easy_install,
+ name=name, virtualenv=env)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/gem.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/gem.py
new file mode 100644
index 00000000..516c9b0a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/gem.py
@@ -0,0 +1,311 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Johan Wiren <johan.wiren.se@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gem
+short_description: Manage Ruby gems
+description:
+ - Manage installation and uninstallation of Ruby gems.
+options:
+ name:
+ type: str
+ description:
+ - The name of the gem to be managed.
+ required: true
+ state:
+ type: str
+ description:
+ - The desired state of the gem. C(latest) ensures that the latest version is installed.
+ required: false
+ choices: [present, absent, latest]
+ default: present
+ gem_source:
+ type: path
+ description:
+ - The path to a local gem used as installation source.
+ required: false
+ include_dependencies:
+ description:
+ - Whether to include dependencies or not.
+ required: false
+ type: bool
+ default: "yes"
+ repository:
+ type: str
+ description:
+ - The repository from which the gem will be installed
+ required: false
+ aliases: [source]
+ user_install:
+ description:
+ - Install gem in user's local gems cache or for all users
+ required: false
+ type: bool
+ default: "yes"
+ executable:
+ type: path
+ description:
+ - Override the path to the gem executable
+ required: false
+ install_dir:
+ type: path
+ description:
+ - Install the gems into a specific directory.
+ These gems will be independent from the global installed ones.
+ Specifying this requires user_install to be false.
+ required: false
+ env_shebang:
+ description:
+ - Rewrite the shebang line on installed scripts to use /usr/bin/env.
+ required: false
+ default: "no"
+ type: bool
+ version:
+ type: str
+ description:
+ - Version of the gem to be installed/removed.
+ required: false
+ pre_release:
+ description:
+ - Allow installation of pre-release versions of the gem.
+ required: false
+ default: "no"
+ type: bool
+ include_doc:
+ description:
+ - Install with or without docs.
+ required: false
+ default: "no"
+ type: bool
+ build_flags:
+ type: str
+ description:
+ - Allow adding build flags for gem compilation
+ required: false
+ force:
+ description:
+ - Force gem to install, bypassing dependency checks.
+ required: false
+ default: "no"
+ type: bool
+author:
+ - "Ansible Core Team"
+ - "Johan Wiren (@johanwiren)"
+'''
+
+EXAMPLES = '''
+- name: Install version 1.0 of vagrant
+ community.general.gem:
+ name: vagrant
+ version: 1.0
+ state: present
+
+- name: Install latest available version of rake
+ community.general.gem:
+ name: rake
+ state: latest
+
+- name: Install rake version 1.0 from a local gem on disk
+ community.general.gem:
+ name: rake
+ gem_source: /path/to/gems/rake-1.0.gem
+ state: present
+'''
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def get_rubygems_path(module):
+ if module.params['executable']:
+ result = module.params['executable'].split(' ')
+ else:
+ result = [module.get_bin_path('gem', True)]
+ return result
+
+
+def get_rubygems_version(module):
+ cmd = get_rubygems_path(module) + ['--version']
+ (rc, out, err) = module.run_command(cmd, check_rc=True)
+
+ match = re.match(r'^(\d+)\.(\d+)\.(\d+)', out)
+ if not match:
+ return None
+
+ return tuple(int(x) for x in match.groups())
+
+
+def get_rubygems_environ(module):
+ if module.params['install_dir']:
+ return {'GEM_HOME': module.params['install_dir']}
+ return None
+
+
+def get_installed_versions(module, remote=False):
+
+ cmd = get_rubygems_path(module)
+ cmd.append('query')
+ if remote:
+ cmd.append('--remote')
+ if module.params['repository']:
+ cmd.extend(['--source', module.params['repository']])
+ cmd.append('-n')
+ cmd.append('^%s$' % module.params['name'])
+
+ environ = get_rubygems_environ(module)
+ (rc, out, err) = module.run_command(cmd, environ_update=environ, check_rc=True)
+ installed_versions = []
+ for line in out.splitlines():
+ match = re.match(r"\S+\s+\((?:default: )?(.+)\)", line)
+ if match:
+ versions = match.group(1)
+ for version in versions.split(', '):
+ installed_versions.append(version.split()[0])
+ return installed_versions
+
+
+def exists(module):
+ if module.params['state'] == 'latest':
+ remoteversions = get_installed_versions(module, remote=True)
+ if remoteversions:
+ module.params['version'] = remoteversions[0]
+ installed_versions = get_installed_versions(module)
+ if module.params['version']:
+ if module.params['version'] in installed_versions:
+ return True
+ else:
+ if installed_versions:
+ return True
+ return False
+
+
+def uninstall(module):
+
+ if module.check_mode:
+ return
+ cmd = get_rubygems_path(module)
+ environ = get_rubygems_environ(module)
+ cmd.append('uninstall')
+ if module.params['install_dir']:
+ cmd.extend(['--install-dir', module.params['install_dir']])
+
+ if module.params['version']:
+ cmd.extend(['--version', module.params['version']])
+ else:
+ cmd.append('--all')
+ cmd.append('--executable')
+ cmd.append(module.params['name'])
+ module.run_command(cmd, environ_update=environ, check_rc=True)
+
+
+def install(module):
+
+ if module.check_mode:
+ return
+
+ ver = get_rubygems_version(module)
+ if ver:
+ major = ver[0]
+ else:
+ major = None
+
+ cmd = get_rubygems_path(module)
+ cmd.append('install')
+ if module.params['version']:
+ cmd.extend(['--version', module.params['version']])
+ if module.params['repository']:
+ cmd.extend(['--source', module.params['repository']])
+ if not module.params['include_dependencies']:
+ cmd.append('--ignore-dependencies')
+ else:
+ if major and major < 2:
+ cmd.append('--include-dependencies')
+ if module.params['user_install']:
+ cmd.append('--user-install')
+ else:
+ cmd.append('--no-user-install')
+ if module.params['install_dir']:
+ cmd.extend(['--install-dir', module.params['install_dir']])
+ if module.params['pre_release']:
+ cmd.append('--pre')
+ if not module.params['include_doc']:
+ if major and major < 2:
+ cmd.append('--no-rdoc')
+ cmd.append('--no-ri')
+ else:
+ cmd.append('--no-document')
+ if module.params['env_shebang']:
+ cmd.append('--env-shebang')
+ cmd.append(module.params['gem_source'])
+ if module.params['build_flags']:
+ cmd.extend(['--', module.params['build_flags']])
+ if module.params['force']:
+ cmd.append('--force')
+ module.run_command(cmd, check_rc=True)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ executable=dict(required=False, type='path'),
+ gem_source=dict(required=False, type='path'),
+ include_dependencies=dict(required=False, default=True, type='bool'),
+ name=dict(required=True, type='str'),
+ repository=dict(required=False, aliases=['source'], type='str'),
+ state=dict(required=False, default='present', choices=['present', 'absent', 'latest'], type='str'),
+ user_install=dict(required=False, default=True, type='bool'),
+ install_dir=dict(required=False, type='path'),
+ pre_release=dict(required=False, default=False, type='bool'),
+ include_doc=dict(required=False, default=False, type='bool'),
+ env_shebang=dict(required=False, default=False, type='bool'),
+ version=dict(required=False, type='str'),
+ build_flags=dict(required=False, type='str'),
+ force=dict(required=False, default=False, type='bool'),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[['gem_source', 'repository'], ['gem_source', 'version']],
+ )
+
+ if module.params['version'] and module.params['state'] == 'latest':
+ module.fail_json(msg="Cannot specify version when state=latest")
+ if module.params['gem_source'] and module.params['state'] == 'latest':
+ module.fail_json(msg="Cannot maintain state=latest when installing from local source")
+ if module.params['user_install'] and module.params['install_dir']:
+ module.fail_json(msg="install_dir requires user_install=false")
+
+ if not module.params['gem_source']:
+ module.params['gem_source'] = module.params['name']
+
+ changed = False
+
+ if module.params['state'] in ['present', 'latest']:
+ if not exists(module):
+ install(module)
+ changed = True
+ elif module.params['state'] == 'absent':
+ if exists(module):
+ uninstall(module)
+ changed = True
+
+ result = {}
+ result['name'] = module.params['name']
+ result['state'] = module.params['state']
+ if module.params['version']:
+ result['version'] = module.params['version']
+ result['changed'] = changed
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/maven_artifact.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/maven_artifact.py
new file mode 100644
index 00000000..03c3d4d4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/maven_artifact.py
@@ -0,0 +1,712 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2014, Chris Schmidt <chris.schmidt () contrastsecurity.com>
+#
+# Built using https://github.com/hamnis/useful-scripts/blob/master/python/download-maven-artifact
+# as a reference and starting point.
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: maven_artifact
+short_description: Downloads an Artifact from a Maven Repository
+description:
+ - Downloads an artifact from a maven repository given the maven coordinates provided to the module.
+ - Can retrieve snapshots or release versions of the artifact and will resolve the latest available
+ version if one is not available.
+author: "Chris Schmidt (@chrisisbeef)"
+requirements:
+ - lxml
+ - boto if using a S3 repository (s3://...)
+options:
+ group_id:
+ type: str
+ description:
+ - The Maven groupId coordinate
+ required: true
+ artifact_id:
+ type: str
+ description:
+ - The maven artifactId coordinate
+ required: true
+ version:
+ type: str
+ description:
+ - The maven version coordinate
+ - Mutually exclusive with I(version_by_spec).
+ version_by_spec:
+ type: str
+ description:
+ - The maven dependency version ranges.
+ - See supported version ranges on U(https://cwiki.apache.org/confluence/display/MAVENOLD/Dependency+Mediation+and+Conflict+Resolution)
+ - The range type "(,1.0],[1.2,)" and "(,1.1),(1.1,)" is not supported.
+ - Mutually exclusive with I(version).
+ version_added: '0.2.0'
+ classifier:
+ type: str
+ description:
+ - The maven classifier coordinate
+ extension:
+ type: str
+ description:
+ - The maven type/extension coordinate
+ default: jar
+ repository_url:
+ type: str
+ description:
+ - The URL of the Maven Repository to download from.
+ - Use s3://... if the repository is hosted on Amazon S3, added in version 2.2.
+ - Use file://... if the repository is local, added in version 2.6
+ default: https://repo1.maven.org/maven2
+ username:
+ type: str
+ description:
+ - The username to authenticate as to the Maven Repository. Use AWS secret key of the repository is hosted on S3
+ aliases: [ "aws_secret_key" ]
+ password:
+ type: str
+ description:
+ - The password to authenticate with to the Maven Repository. Use AWS secret access key of the repository is hosted on S3
+ aliases: [ "aws_secret_access_key" ]
+ headers:
+ description:
+ - Add custom HTTP headers to a request in hash/dict format.
+ type: dict
+ force_basic_auth:
+ description:
+ - httplib2, the library used by the uri module only sends authentication information when a webservice
+ responds to an initial request with a 401 status. Since some basic auth services do not properly
+ send a 401, logins will fail. This option forces the sending of the Basic authentication header
+ upon initial request.
+ default: 'no'
+ type: bool
+ version_added: '0.2.0'
+ dest:
+ type: path
+ description:
+ - The path where the artifact should be written to
+ - If file mode or ownerships are specified and destination path already exists, they affect the downloaded file
+ required: true
+ state:
+ type: str
+ description:
+ - The desired state of the artifact
+ default: present
+ choices: [present,absent]
+ timeout:
+ type: int
+ description:
+ - Specifies a timeout in seconds for the connection attempt
+ default: 10
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be set to C(no) when no other option exists.
+ type: bool
+ default: 'yes'
+ client_cert:
+ description:
+ - PEM formatted certificate chain file to be used for SSL client authentication.
+ - This file can also include the key as well, and if the key is included, I(client_key) is not required.
+ type: path
+ version_added: '1.3.0'
+ client_key:
+ description:
+ - PEM formatted file that contains your private key to be used for SSL client authentication.
+ - If I(client_cert) contains both the certificate and key, this option is not required.
+ type: path
+ version_added: '1.3.0'
+ keep_name:
+ description:
+ - If C(yes), the downloaded artifact's name is preserved, i.e the version number remains part of it.
+ - This option only has effect when C(dest) is a directory and C(version) is set to C(latest) or C(version_by_spec)
+ is defined.
+ type: bool
+ default: 'no'
+ verify_checksum:
+ type: str
+ description:
+ - If C(never), the md5 checksum will never be downloaded and verified.
+ - If C(download), the md5 checksum will be downloaded and verified only after artifact download. This is the default.
+ - If C(change), the md5 checksum will be downloaded and verified if the destination already exist,
+ to verify if they are identical. This was the behaviour before 2.6. Since it downloads the md5 before (maybe)
+ downloading the artifact, and since some repository software, when acting as a proxy/cache, return a 404 error
+ if the artifact has not been cached yet, it may fail unexpectedly.
+ If you still need it, you should consider using C(always) instead - if you deal with a checksum, it is better to
+ use it to verify integrity after download.
+ - C(always) combines C(download) and C(change).
+ required: false
+ default: 'download'
+ choices: ['never', 'download', 'change', 'always']
+extends_documentation_fragment:
+ - files
+'''
+
+EXAMPLES = '''
+- name: Download the latest version of the JUnit framework artifact from Maven Central
+ community.general.maven_artifact:
+ group_id: junit
+ artifact_id: junit
+ dest: /tmp/junit-latest.jar
+
+- name: Download JUnit 4.11 from Maven Central
+ community.general.maven_artifact:
+ group_id: junit
+ artifact_id: junit
+ version: 4.11
+ dest: /tmp/junit-4.11.jar
+
+- name: Download an artifact from a private repository requiring authentication
+ community.general.maven_artifact:
+ group_id: com.company
+ artifact_id: library-name
+ repository_url: 'https://repo.company.com/maven'
+ username: user
+ password: pass
+ dest: /tmp/library-name-latest.jar
+
+- name: Download an artifact from a private repository requiring certificate authentication
+ community.general.maven_artifact:
+ group_id: com.company
+ artifact_id: library-name
+ repository_url: 'https://repo.company.com/maven'
+ client_cert: /path/to/cert.pem
+ client_key: /path/to/key.pem
+ dest: /tmp/library-name-latest.jar
+
+- name: Download a WAR File to the Tomcat webapps directory to be deployed
+ community.general.maven_artifact:
+ group_id: com.company
+ artifact_id: web-app
+ extension: war
+ repository_url: 'https://repo.company.com/maven'
+ dest: /var/lib/tomcat7/webapps/web-app.war
+
+- name: Keep a downloaded artifact's name, i.e. retain the version
+ community.general.maven_artifact:
+ version: latest
+ artifact_id: spring-core
+ group_id: org.springframework
+ dest: /tmp/
+ keep_name: yes
+
+- name: Download the latest version of the JUnit framework artifact from Maven local
+ community.general.maven_artifact:
+ group_id: junit
+ artifact_id: junit
+ dest: /tmp/junit-latest.jar
+ repository_url: "file://{{ lookup('env','HOME') }}/.m2/repository"
+
+- name: Download the latest version between 3.8 and 4.0 (exclusive) of the JUnit framework artifact from Maven Central
+ community.general.maven_artifact:
+ group_id: junit
+ artifact_id: junit
+ version_by_spec: "[3.8,4.0)"
+ dest: /tmp/
+'''
+
+import hashlib
+import os
+import posixpath
+import shutil
+import io
+import tempfile
+import traceback
+import re
+
+from ansible.module_utils.ansible_release import __version__ as ansible_version
+from re import match
+
+LXML_ETREE_IMP_ERR = None
+try:
+ from lxml import etree
+ HAS_LXML_ETREE = True
+except ImportError:
+ LXML_ETREE_IMP_ERR = traceback.format_exc()
+ HAS_LXML_ETREE = False
+
+BOTO_IMP_ERR = None
+try:
+ import boto3
+ HAS_BOTO = True
+except ImportError:
+ BOTO_IMP_ERR = traceback.format_exc()
+ HAS_BOTO = False
+
+SEMANTIC_VERSION_IMP_ERR = None
+try:
+ from semantic_version import Version, Spec
+ HAS_SEMANTIC_VERSION = True
+except ImportError:
+ SEMANTIC_VERSION_IMP_ERR = traceback.format_exc()
+ HAS_SEMANTIC_VERSION = False
+
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils._text import to_bytes, to_native, to_text
+
+
+def split_pre_existing_dir(dirname):
+ '''
+ Return the first pre-existing directory and a list of the new directories that will be created.
+ '''
+ head, tail = os.path.split(dirname)
+ b_head = to_bytes(head, errors='surrogate_or_strict')
+ if not os.path.exists(b_head):
+ if head == dirname:
+ return None, [head]
+ else:
+ (pre_existing_dir, new_directory_list) = split_pre_existing_dir(head)
+ else:
+ return head, [tail]
+ new_directory_list.append(tail)
+ return pre_existing_dir, new_directory_list
+
+
+def adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed):
+ '''
+ Walk the new directories list and make sure that permissions are as we would expect
+ '''
+ if new_directory_list:
+ first_sub_dir = new_directory_list.pop(0)
+ if not pre_existing_dir:
+ working_dir = first_sub_dir
+ else:
+ working_dir = os.path.join(pre_existing_dir, first_sub_dir)
+ directory_args['path'] = working_dir
+ changed = module.set_fs_attributes_if_different(directory_args, changed)
+ changed = adjust_recursive_directory_permissions(working_dir, new_directory_list, module, directory_args, changed)
+ return changed
+
+
+class Artifact(object):
+ def __init__(self, group_id, artifact_id, version, version_by_spec, classifier='', extension='jar'):
+ if not group_id:
+ raise ValueError("group_id must be set")
+ if not artifact_id:
+ raise ValueError("artifact_id must be set")
+
+ self.group_id = group_id
+ self.artifact_id = artifact_id
+ self.version = version
+ self.version_by_spec = version_by_spec
+ self.classifier = classifier
+
+ if not extension:
+ self.extension = "jar"
+ else:
+ self.extension = extension
+
+ def is_snapshot(self):
+ return self.version and self.version.endswith("SNAPSHOT")
+
+ def path(self, with_version=True):
+ base = posixpath.join(self.group_id.replace(".", "/"), self.artifact_id)
+ if with_version and self.version:
+ timestamp_version_match = re.match("^(.*-)?([0-9]{8}\\.[0-9]{6}-[0-9]+)$", self.version)
+ if timestamp_version_match:
+ base = posixpath.join(base, timestamp_version_match.group(1) + "SNAPSHOT")
+ else:
+ base = posixpath.join(base, self.version)
+ return base
+
+ def _generate_filename(self):
+ filename = self.artifact_id + "-" + self.classifier + "." + self.extension
+ if not self.classifier:
+ filename = self.artifact_id + "." + self.extension
+ return filename
+
+ def get_filename(self, filename=None):
+ if not filename:
+ filename = self._generate_filename()
+ elif os.path.isdir(filename):
+ filename = os.path.join(filename, self._generate_filename())
+ return filename
+
+ def __str__(self):
+ result = "%s:%s:%s" % (self.group_id, self.artifact_id, self.version)
+ if self.classifier:
+ result = "%s:%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.classifier, self.version)
+ elif self.extension != "jar":
+ result = "%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.version)
+ return result
+
+ @staticmethod
+ def parse(input):
+ parts = input.split(":")
+ if len(parts) >= 3:
+ g = parts[0]
+ a = parts[1]
+ v = parts[len(parts) - 1]
+ t = None
+ c = None
+ if len(parts) == 4:
+ t = parts[2]
+ if len(parts) == 5:
+ t = parts[2]
+ c = parts[3]
+ return Artifact(g, a, v, c, t)
+ else:
+ return None
+
+
+class MavenDownloader:
+ def __init__(self, module, base, local=False, headers=None):
+ self.module = module
+ if base.endswith("/"):
+ base = base.rstrip("/")
+ self.base = base
+ self.local = local
+ self.headers = headers
+ self.user_agent = "Ansible {0} maven_artifact".format(ansible_version)
+ self.latest_version_found = None
+ self.metadata_file_name = "maven-metadata-local.xml" if local else "maven-metadata.xml"
+
+ def find_version_by_spec(self, artifact):
+ path = "/%s/%s" % (artifact.path(False), self.metadata_file_name)
+ content = self._getContent(self.base + path, "Failed to retrieve the maven metadata file: " + path)
+ xml = etree.fromstring(content)
+ original_versions = xml.xpath("/metadata/versioning/versions/version/text()")
+ versions = []
+ for version in original_versions:
+ try:
+ versions.append(Version.coerce(version))
+ except ValueError:
+ # This means that version string is not a valid semantic versioning
+ pass
+
+ parse_versions_syntax = {
+ # example -> (,1.0]
+ r"^\(,(?P<upper_bound>[0-9.]*)]$": "<={upper_bound}",
+ # example -> 1.0
+ r"^(?P<version>[0-9.]*)$": "~={version}",
+ # example -> [1.0]
+ r"^\[(?P<version>[0-9.]*)\]$": "=={version}",
+ # example -> [1.2, 1.3]
+ r"^\[(?P<lower_bound>[0-9.]*),\s*(?P<upper_bound>[0-9.]*)\]$": ">={lower_bound},<={upper_bound}",
+ # example -> [1.2, 1.3)
+ r"^\[(?P<lower_bound>[0-9.]*),\s*(?P<upper_bound>[0-9.]+)\)$": ">={lower_bound},<{upper_bound}",
+ # example -> [1.5,)
+ r"^\[(?P<lower_bound>[0-9.]*),\)$": ">={lower_bound}",
+ }
+
+ for regex, spec_format in parse_versions_syntax.items():
+ regex_result = match(regex, artifact.version_by_spec)
+ if regex_result:
+ spec = Spec(spec_format.format(**regex_result.groupdict()))
+ selected_version = spec.select(versions)
+
+ if not selected_version:
+ raise ValueError("No version found with this spec version: {0}".format(artifact.version_by_spec))
+
+ # To deal when repos on maven don't have patch number on first build (e.g. 3.8 instead of 3.8.0)
+ if str(selected_version) not in original_versions:
+ selected_version.patch = None
+
+ return str(selected_version)
+
+ raise ValueError("The spec version {0} is not supported! ".format(artifact.version_by_spec))
+
+ def find_latest_version_available(self, artifact):
+ if self.latest_version_found:
+ return self.latest_version_found
+ path = "/%s/%s" % (artifact.path(False), self.metadata_file_name)
+ content = self._getContent(self.base + path, "Failed to retrieve the maven metadata file: " + path)
+ xml = etree.fromstring(content)
+ v = xml.xpath("/metadata/versioning/versions/version[last()]/text()")
+ if v:
+ self.latest_version_found = v[0]
+ return v[0]
+
+ def find_uri_for_artifact(self, artifact):
+ if artifact.version_by_spec:
+ artifact.version = self.find_version_by_spec(artifact)
+
+ if artifact.version == "latest":
+ artifact.version = self.find_latest_version_available(artifact)
+
+ if artifact.is_snapshot():
+ if self.local:
+ return self._uri_for_artifact(artifact, artifact.version)
+ path = "/%s/%s" % (artifact.path(), self.metadata_file_name)
+ content = self._getContent(self.base + path, "Failed to retrieve the maven metadata file: " + path)
+ xml = etree.fromstring(content)
+
+ for snapshotArtifact in xml.xpath("/metadata/versioning/snapshotVersions/snapshotVersion"):
+ classifier = snapshotArtifact.xpath("classifier/text()")
+ artifact_classifier = classifier[0] if classifier else ''
+ extension = snapshotArtifact.xpath("extension/text()")
+ artifact_extension = extension[0] if extension else ''
+ if artifact_classifier == artifact.classifier and artifact_extension == artifact.extension:
+ return self._uri_for_artifact(artifact, snapshotArtifact.xpath("value/text()")[0])
+ timestamp_xmlpath = xml.xpath("/metadata/versioning/snapshot/timestamp/text()")
+ if timestamp_xmlpath:
+ timestamp = timestamp_xmlpath[0]
+ build_number = xml.xpath("/metadata/versioning/snapshot/buildNumber/text()")[0]
+ return self._uri_for_artifact(artifact, artifact.version.replace("SNAPSHOT", timestamp + "-" + build_number))
+
+ return self._uri_for_artifact(artifact, artifact.version)
+
+ def _uri_for_artifact(self, artifact, version=None):
+ if artifact.is_snapshot() and not version:
+ raise ValueError("Expected uniqueversion for snapshot artifact " + str(artifact))
+ elif not artifact.is_snapshot():
+ version = artifact.version
+ if artifact.classifier:
+ return posixpath.join(self.base, artifact.path(), artifact.artifact_id + "-" + version + "-" + artifact.classifier + "." + artifact.extension)
+
+ return posixpath.join(self.base, artifact.path(), artifact.artifact_id + "-" + version + "." + artifact.extension)
+
+ # for small files, directly get the full content
+ def _getContent(self, url, failmsg, force=True):
+ if self.local:
+ parsed_url = urlparse(url)
+ if os.path.isfile(parsed_url.path):
+ with io.open(parsed_url.path, 'rb') as f:
+ return f.read()
+ if force:
+ raise ValueError(failmsg + " because can not find file: " + url)
+ return None
+ response = self._request(url, failmsg, force)
+ if response:
+ return response.read()
+ return None
+
+ # only for HTTP request
+ def _request(self, url, failmsg, force=True):
+ url_to_use = url
+ parsed_url = urlparse(url)
+
+ if parsed_url.scheme == 's3':
+ parsed_url = urlparse(url)
+ bucket_name = parsed_url.netloc
+ key_name = parsed_url.path[1:]
+ client = boto3.client('s3', aws_access_key_id=self.module.params.get('username', ''), aws_secret_access_key=self.module.params.get('password', ''))
+ url_to_use = client.generate_presigned_url('get_object', Params={'Bucket': bucket_name, 'Key': key_name}, ExpiresIn=10)
+
+ req_timeout = self.module.params.get('timeout')
+
+ # Hack to add parameters in the way that fetch_url expects
+ self.module.params['url_username'] = self.module.params.get('username', '')
+ self.module.params['url_password'] = self.module.params.get('password', '')
+ self.module.params['http_agent'] = self.user_agent
+
+ response, info = fetch_url(self.module, url_to_use, timeout=req_timeout, headers=self.headers)
+ if info['status'] == 200:
+ return response
+ if force:
+ raise ValueError(failmsg + " because of " + info['msg'] + "for URL " + url_to_use)
+ return None
+
+ def download(self, tmpdir, artifact, verify_download, filename=None):
+ if (not artifact.version and not artifact.version_by_spec) or artifact.version == "latest":
+ artifact = Artifact(artifact.group_id, artifact.artifact_id, self.find_latest_version_available(artifact), None,
+ artifact.classifier, artifact.extension)
+ url = self.find_uri_for_artifact(artifact)
+ tempfd, tempname = tempfile.mkstemp(dir=tmpdir)
+
+ try:
+ # copy to temp file
+ if self.local:
+ parsed_url = urlparse(url)
+ if os.path.isfile(parsed_url.path):
+ shutil.copy2(parsed_url.path, tempname)
+ else:
+ return "Can not find local file: " + parsed_url.path
+ else:
+ response = self._request(url, "Failed to download artifact " + str(artifact))
+ with os.fdopen(tempfd, 'wb') as f:
+ shutil.copyfileobj(response, f)
+
+ if verify_download:
+ invalid_md5 = self.is_invalid_md5(tempname, url)
+ if invalid_md5:
+ # if verify_change was set, the previous file would be deleted
+ os.remove(tempname)
+ return invalid_md5
+ except Exception as e:
+ os.remove(tempname)
+ raise e
+
+ # all good, now copy temp file to target
+ shutil.move(tempname, artifact.get_filename(filename))
+ return None
+
+ def is_invalid_md5(self, file, remote_url):
+ if os.path.exists(file):
+ local_md5 = self._local_md5(file)
+ if self.local:
+ parsed_url = urlparse(remote_url)
+ remote_md5 = self._local_md5(parsed_url.path)
+ else:
+ try:
+ remote_md5 = to_text(self._getContent(remote_url + '.md5', "Failed to retrieve MD5", False), errors='strict')
+ except UnicodeError as e:
+ return "Cannot retrieve a valid md5 from %s: %s" % (remote_url, to_native(e))
+ if(not remote_md5):
+ return "Cannot find md5 from " + remote_url
+ try:
+ # Check if remote md5 only contains md5 or md5 + filename
+ _remote_md5 = remote_md5.split(None)[0]
+ remote_md5 = _remote_md5
+ # remote_md5 is empty so we continue and keep original md5 string
+ # This should not happen since we check for remote_md5 before
+ except IndexError:
+ pass
+ if local_md5.lower() == remote_md5.lower():
+ return None
+ else:
+ return "Checksum does not match: we computed " + local_md5 + " but the repository states " + remote_md5
+
+ return "Path does not exist: " + file
+
+ def _local_md5(self, file):
+ md5 = hashlib.md5()
+ with io.open(file, 'rb') as f:
+ for chunk in iter(lambda: f.read(8192), b''):
+ md5.update(chunk)
+ return md5.hexdigest()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ group_id=dict(required=True),
+ artifact_id=dict(required=True),
+ version=dict(default=None),
+ version_by_spec=dict(default=None),
+ classifier=dict(default=''),
+ extension=dict(default='jar'),
+ repository_url=dict(default='https://repo1.maven.org/maven2'),
+ username=dict(default=None, aliases=['aws_secret_key']),
+ password=dict(default=None, no_log=True, aliases=['aws_secret_access_key']),
+ headers=dict(type='dict'),
+ force_basic_auth=dict(default=False, type='bool'),
+ state=dict(default="present", choices=["present", "absent"]), # TODO - Implement a "latest" state
+ timeout=dict(default=10, type='int'),
+ dest=dict(type="path", required=True),
+ validate_certs=dict(required=False, default=True, type='bool'),
+ client_cert=dict(type="path", required=False),
+ client_key=dict(type="path", required=False),
+ keep_name=dict(required=False, default=False, type='bool'),
+ verify_checksum=dict(required=False, default='download', choices=['never', 'download', 'change', 'always']),
+ directory_mode=dict(type='str'), # Used since https://github.com/ansible/ansible/pull/24965, not sure
+ # if this should really be here.
+ ),
+ add_file_common_args=True,
+ mutually_exclusive=([('version', 'version_by_spec')])
+ )
+
+ if not HAS_LXML_ETREE:
+ module.fail_json(msg=missing_required_lib('lxml'), exception=LXML_ETREE_IMP_ERR)
+
+ if module.params['version_by_spec'] and not HAS_SEMANTIC_VERSION:
+ module.fail_json(msg=missing_required_lib('semantic_version'), exception=SEMANTIC_VERSION_IMP_ERR)
+
+ repository_url = module.params["repository_url"]
+ if not repository_url:
+ repository_url = "https://repo1.maven.org/maven2"
+ try:
+ parsed_url = urlparse(repository_url)
+ except AttributeError as e:
+ module.fail_json(msg='url parsing went wrong %s' % e)
+
+ local = parsed_url.scheme == "file"
+
+ if parsed_url.scheme == 's3' and not HAS_BOTO:
+ module.fail_json(msg=missing_required_lib('boto3', reason='when using s3:// repository URLs'),
+ exception=BOTO_IMP_ERR)
+
+ group_id = module.params["group_id"]
+ artifact_id = module.params["artifact_id"]
+ version = module.params["version"]
+ version_by_spec = module.params["version_by_spec"]
+ classifier = module.params["classifier"]
+ extension = module.params["extension"]
+ headers = module.params['headers']
+ state = module.params["state"]
+ dest = module.params["dest"]
+ b_dest = to_bytes(dest, errors='surrogate_or_strict')
+ keep_name = module.params["keep_name"]
+ verify_checksum = module.params["verify_checksum"]
+ verify_download = verify_checksum in ['download', 'always']
+ verify_change = verify_checksum in ['change', 'always']
+
+ downloader = MavenDownloader(module, repository_url, local, headers)
+
+ if not version_by_spec and not version:
+ version = "latest"
+
+ try:
+ artifact = Artifact(group_id, artifact_id, version, version_by_spec, classifier, extension)
+ except ValueError as e:
+ module.fail_json(msg=e.args[0])
+
+ changed = False
+ prev_state = "absent"
+
+ if dest.endswith(os.sep):
+ b_dest = to_bytes(dest, errors='surrogate_or_strict')
+ if not os.path.exists(b_dest):
+ (pre_existing_dir, new_directory_list) = split_pre_existing_dir(dest)
+ os.makedirs(b_dest)
+ directory_args = module.load_file_common_arguments(module.params)
+ directory_mode = module.params["directory_mode"]
+ if directory_mode is not None:
+ directory_args['mode'] = directory_mode
+ else:
+ directory_args['mode'] = None
+ changed = adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed)
+
+ if os.path.isdir(b_dest):
+ version_part = version
+ if version == 'latest':
+ version_part = downloader.find_latest_version_available(artifact)
+ elif version_by_spec:
+ version_part = downloader.find_version_by_spec(artifact)
+
+ filename = "{artifact_id}{version_part}{classifier}.{extension}".format(
+ artifact_id=artifact_id,
+ version_part="-{0}".format(version_part) if keep_name else "",
+ classifier="-{0}".format(classifier) if classifier else "",
+ extension=extension
+ )
+ dest = posixpath.join(dest, filename)
+
+ b_dest = to_bytes(dest, errors='surrogate_or_strict')
+
+ if os.path.lexists(b_dest) and ((not verify_change) or not downloader.is_invalid_md5(dest, downloader.find_uri_for_artifact(artifact))):
+ prev_state = "present"
+
+ if prev_state == "absent":
+ try:
+ download_error = downloader.download(module.tmpdir, artifact, verify_download, b_dest)
+ if download_error is None:
+ changed = True
+ else:
+ module.fail_json(msg="Cannot retrieve the artifact to destination: " + download_error)
+ except ValueError as e:
+ module.fail_json(msg=e.args[0])
+
+ try:
+ file_args = module.load_file_common_arguments(module.params, path=dest)
+ except TypeError:
+ # The path argument is only supported in Ansible-base 2.10+. Fall back to
+ # pre-2.10 behavior for older Ansible versions.
+ module.params['path'] = dest
+ file_args = module.load_file_common_arguments(module.params)
+ changed = module.set_fs_attributes_if_different(file_args, changed)
+ if changed:
+ module.exit_json(state=state, dest=dest, group_id=group_id, artifact_id=artifact_id, version=version, classifier=classifier,
+ extension=extension, repository_url=repository_url, changed=changed)
+ else:
+ module.exit_json(state=state, dest=dest, changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/npm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/npm.py
new file mode 100644
index 00000000..3ef81eaa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/npm.py
@@ -0,0 +1,308 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017 Chris Hoffman <christopher.hoffman@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: npm
+short_description: Manage node.js packages with npm
+description:
+ - Manage node.js packages with Node Package Manager (npm).
+author: "Chris Hoffman (@chrishoffman)"
+options:
+ name:
+ description:
+ - The name of a node.js library to install.
+ type: str
+ required: false
+ path:
+ description:
+ - The base path where to install the node.js libraries.
+ type: path
+ required: false
+ version:
+ description:
+ - The version to be installed.
+ type: str
+ required: false
+ global:
+ description:
+ - Install the node.js library globally.
+ required: false
+ default: no
+ type: bool
+ executable:
+ description:
+ - The executable location for npm.
+ - This is useful if you are using a version manager, such as nvm.
+ type: path
+ required: false
+ ignore_scripts:
+ description:
+ - Use the C(--ignore-scripts) flag when installing.
+ required: false
+ type: bool
+ default: no
+ unsafe_perm:
+ description:
+ - Use the C(--unsafe-perm) flag when installing.
+ type: bool
+ default: no
+ ci:
+ description:
+ - Install packages based on package-lock file, same as running C(npm ci).
+ type: bool
+ default: no
+ production:
+ description:
+ - Install dependencies in production mode, excluding devDependencies.
+ required: false
+ type: bool
+ default: no
+ registry:
+ description:
+ - The registry to install modules from.
+ required: false
+ type: str
+ state:
+ description:
+ - The state of the node.js library.
+ required: false
+ type: str
+ default: present
+ choices: [ "present", "absent", "latest" ]
+requirements:
+ - npm installed in bin path (recommended /usr/local/bin)
+'''
+
+EXAMPLES = r'''
+- name: Install "coffee-script" node.js package.
+ community.general.npm:
+ name: coffee-script
+ path: /app/location
+
+- name: Install "coffee-script" node.js package on version 1.6.1.
+ community.general.npm:
+ name: coffee-script
+ version: '1.6.1'
+ path: /app/location
+
+- name: Install "coffee-script" node.js package globally.
+ community.general.npm:
+ name: coffee-script
+ global: yes
+
+- name: Remove the globally package "coffee-script".
+ community.general.npm:
+ name: coffee-script
+ global: yes
+ state: absent
+
+- name: Install "coffee-script" node.js package from custom registry.
+ community.general.npm:
+ name: coffee-script
+ registry: 'http://registry.mysite.com'
+
+- name: Install packages based on package.json.
+ community.general.npm:
+ path: /app/location
+
+- name: Update packages based on package.json to their latest version.
+ community.general.npm:
+ path: /app/location
+ state: latest
+
+- name: Install packages based on package.json using the npm installed with nvm v0.10.1.
+ community.general.npm:
+ path: /app/location
+ executable: /opt/nvm/v0.10.1/bin/npm
+ state: present
+'''
+
+import json
+import os
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+class Npm(object):
+ def __init__(self, module, **kwargs):
+ self.module = module
+ self.glbl = kwargs['glbl']
+ self.name = kwargs['name']
+ self.version = kwargs['version']
+ self.path = kwargs['path']
+ self.registry = kwargs['registry']
+ self.production = kwargs['production']
+ self.ignore_scripts = kwargs['ignore_scripts']
+ self.unsafe_perm = kwargs['unsafe_perm']
+ self.state = kwargs['state']
+
+ if kwargs['executable']:
+ self.executable = kwargs['executable'].split(' ')
+ else:
+ self.executable = [module.get_bin_path('npm', True)]
+
+ if kwargs['version'] and self.state != 'absent':
+ self.name_version = self.name + '@' + str(self.version)
+ else:
+ self.name_version = self.name
+
+ def _exec(self, args, run_in_check_mode=False, check_rc=True, add_package_name=True):
+ if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
+ cmd = self.executable + args
+
+ if self.glbl:
+ cmd.append('--global')
+ if self.production and ('install' in cmd or 'update' in cmd):
+ cmd.append('--production')
+ if self.ignore_scripts:
+ cmd.append('--ignore-scripts')
+ if self.unsafe_perm:
+ cmd.append('--unsafe-perm')
+ if self.name and add_package_name:
+ cmd.append(self.name_version)
+ if self.registry:
+ cmd.append('--registry')
+ cmd.append(self.registry)
+
+ # If path is specified, cd into that path and run the command.
+ cwd = None
+ if self.path:
+ if not os.path.exists(self.path):
+ os.makedirs(self.path)
+ if not os.path.isdir(self.path):
+ self.module.fail_json(msg="path %s is not a directory" % self.path)
+ cwd = self.path
+
+ rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd)
+ return out
+ return ''
+
+ def list(self):
+ cmd = ['list', '--json', '--long']
+
+ installed = list()
+ missing = list()
+ data = {}
+ try:
+ data = json.loads(self._exec(cmd, True, False, False) or '{}')
+ except (getattr(json, 'JSONDecodeError', ValueError)) as e:
+ self.module.fail_json(msg="Failed to parse NPM output with error %s" % to_native(e))
+ if 'dependencies' in data:
+ for dep in data['dependencies']:
+ if 'missing' in data['dependencies'][dep] and data['dependencies'][dep]['missing']:
+ missing.append(dep)
+ elif 'invalid' in data['dependencies'][dep] and data['dependencies'][dep]['invalid']:
+ missing.append(dep)
+ else:
+ installed.append(dep)
+ if self.name and self.name not in installed:
+ missing.append(self.name)
+ # Named dependency not installed
+ else:
+ missing.append(self.name)
+
+ return installed, missing
+
+ def install(self):
+ return self._exec(['install'])
+
+ def ci_install(self):
+ return self._exec(['ci'])
+
+ def update(self):
+ return self._exec(['update'])
+
+ def uninstall(self):
+ return self._exec(['uninstall'])
+
+ def list_outdated(self):
+ outdated = list()
+ data = self._exec(['outdated'], True, False)
+ for dep in data.splitlines():
+ if dep:
+ # node.js v0.10.22 changed the `npm outdated` module separator
+ # from "@" to " ". Split on both for backwards compatibility.
+ pkg, other = re.split(r'\s|@', dep, 1)
+ outdated.append(pkg)
+
+ return outdated
+
+
+def main():
+ arg_spec = dict(
+ name=dict(default=None, type='str'),
+ path=dict(default=None, type='path'),
+ version=dict(default=None, type='str'),
+ production=dict(default=False, type='bool'),
+ executable=dict(default=None, type='path'),
+ registry=dict(default=None, type='str'),
+ state=dict(default='present', choices=['present', 'absent', 'latest']),
+ ignore_scripts=dict(default=False, type='bool'),
+ unsafe_perm=dict(default=False, type='bool'),
+ ci=dict(default=False, type='bool'),
+ )
+ arg_spec['global'] = dict(default=False, type='bool')
+ module = AnsibleModule(
+ argument_spec=arg_spec,
+ supports_check_mode=True
+ )
+
+ name = module.params['name']
+ path = module.params['path']
+ version = module.params['version']
+ glbl = module.params['global']
+ production = module.params['production']
+ executable = module.params['executable']
+ registry = module.params['registry']
+ state = module.params['state']
+ ignore_scripts = module.params['ignore_scripts']
+ unsafe_perm = module.params['unsafe_perm']
+ ci = module.params['ci']
+
+ if not path and not glbl:
+ module.fail_json(msg='path must be specified when not using global')
+ if state == 'absent' and not name:
+ module.fail_json(msg='uninstalling a package is only available for named packages')
+
+ npm = Npm(module, name=name, path=path, version=version, glbl=glbl, production=production,
+ executable=executable, registry=registry, ignore_scripts=ignore_scripts,
+ unsafe_perm=unsafe_perm, state=state)
+
+ changed = False
+ if ci:
+ npm.ci_install()
+ changed = True
+ elif state == 'present':
+ installed, missing = npm.list()
+ if missing:
+ changed = True
+ npm.install()
+ elif state == 'latest':
+ installed, missing = npm.list()
+ outdated = npm.list_outdated()
+ if missing:
+ changed = True
+ npm.install()
+ if outdated:
+ changed = True
+ npm.update()
+ else: # absent
+ installed, missing = npm.list()
+ if name in installed:
+ changed = True
+ npm.uninstall()
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/pear.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/pear.py
new file mode 100644
index 00000000..fef04d32
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/pear.py
@@ -0,0 +1,319 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Afterburn <https://github.com/afterburn>
+# (c) 2013, Aaron Bull Schaefer <aaron@elasticdog.com>
+# (c) 2015, Jonathan Lestrelin <jonathan.lestrelin@gmail.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: pear
+short_description: Manage pear/pecl packages
+description:
+ - Manage PHP packages with the pear package manager.
+author:
+ - Jonathan Lestrelin (@jle64) <jonathan.lestrelin@gmail.com>
+options:
+ name:
+ type: str
+ description:
+ - Name of the package to install, upgrade, or remove.
+ required: true
+ aliases: [pkg]
+ state:
+ type: str
+ description:
+ - Desired state of the package.
+ default: "present"
+ choices: ["present", "installed", "latest", "absent", "removed"]
+ executable:
+ type: path
+ description:
+ - Path to the pear executable.
+ prompts:
+ description:
+ - List of regular expressions that can be used to detect prompts during pear package installation to answer the expected question.
+ - Prompts will be processed in the same order as the packages list.
+ - You can optionnally specify an answer to any question in the list.
+ - If no answer is provided, the list item will only contain the regular expression.
+ - "To specify an answer, the item will be a dict with the regular expression as key and the answer as value C(my_regular_expression: 'an_answer')."
+ - You can provide a list containing items with or without answer.
+ - A prompt list can be shorter or longer than the packages list but will issue a warning.
+ - If you want to specify that a package will not need prompts in the middle of a list, C(null).
+ type: list
+ elements: raw
+ version_added: 0.2.0
+'''
+
+EXAMPLES = r'''
+- name: Install pear package
+ community.general.pear:
+ name: Net_URL2
+ state: present
+
+- name: Install pecl package
+ community.general.pear:
+ name: pecl/json_post
+ state: present
+
+- name: Install pecl package with expected prompt
+ community.general.pear:
+ name: pecl/apcu
+ state: present
+ prompts:
+ - (.*)Enable internal debugging in APCu \[no\]
+
+- name: Install pecl package with expected prompt and an answer
+ community.general.pear:
+ name: pecl/apcu
+ state: present
+ prompts:
+ - (.*)Enable internal debugging in APCu \[no\]: "yes"
+
+- name: Install multiple pear/pecl packages at once with prompts.
+ Prompts will be processed on the same order as the packages order.
+ If there is more prompts than packages, packages without prompts will be installed without any prompt expected.
+ If there is more packages than prompts, additionnal prompts will be ignored.
+ community.general.pear:
+ name: pecl/gnupg, pecl/apcu
+ state: present
+ prompts:
+ - I am a test prompt because gnupg doesnt asks anything
+ - (.*)Enable internal debugging in APCu \[no\]: "yes"
+
+- name: Install multiple pear/pecl packages at once skipping the first prompt.
+ Prompts will be processed on the same order as the packages order.
+ If there is more prompts than packages, packages without prompts will be installed without any prompt expected.
+ If there is more packages than prompts, additionnal prompts will be ignored.
+ community.general.pear:
+ name: pecl/gnupg, pecl/apcu
+ state: present
+ prompts:
+ - null
+ - (.*)Enable internal debugging in APCu \[no\]: "yes"
+
+- name: Upgrade package
+ community.general.pear:
+ name: Net_URL2
+ state: latest
+
+- name: Remove packages
+ community.general.pear:
+ name: Net_URL2,pecl/json_post
+ state: absent
+'''
+
+import os
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.basic import AnsibleModule
+
+
+def get_local_version(pear_output):
+ """Take pear remoteinfo output and get the installed version"""
+ lines = pear_output.split('\n')
+ for line in lines:
+ if 'Installed ' in line:
+ installed = line.rsplit(None, 1)[-1].strip()
+ if installed == '-':
+ continue
+ return installed
+ return None
+
+
+def _get_pear_path(module):
+ if module.params['executable'] and os.path.isfile(module.params['executable']):
+ result = module.params['executable']
+ else:
+ result = module.get_bin_path('pear', True, [module.params['executable']])
+ return result
+
+
+def get_repository_version(pear_output):
+ """Take pear remote-info output and get the latest version"""
+ lines = pear_output.split('\n')
+ for line in lines:
+ if 'Latest ' in line:
+ return line.rsplit(None, 1)[-1].strip()
+ return None
+
+
+def query_package(module, name, state="present"):
+ """Query the package status in both the local system and the repository.
+ Returns a boolean to indicate if the package is installed,
+ and a second boolean to indicate if the package is up-to-date."""
+ if state == "present":
+ lcmd = "%s info %s" % (_get_pear_path(module), name)
+ lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False)
+ if lrc != 0:
+ # package is not installed locally
+ return False, False
+
+ rcmd = "%s remote-info %s" % (_get_pear_path(module), name)
+ rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False)
+
+ # get the version installed locally (if any)
+ lversion = get_local_version(rstdout)
+
+ # get the version in the repository
+ rversion = get_repository_version(rstdout)
+
+ if rrc == 0:
+ # Return True to indicate that the package is installed locally,
+ # and the result of the version number comparison
+ # to determine if the package is up-to-date.
+ return True, (lversion == rversion)
+
+ return False, False
+
+
+def remove_packages(module, packages):
+ remove_c = 0
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ installed, updated = query_package(module, package)
+ if not installed:
+ continue
+
+ cmd = "%s uninstall %s" % (_get_pear_path(module), package)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc != 0:
+ module.fail_json(msg="failed to remove %s: %s" % (package, to_text(stdout + stderr)))
+
+ remove_c += 1
+
+ if remove_c > 0:
+
+ module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, state, packages, prompts):
+ install_c = 0
+ has_prompt = bool(prompts)
+ default_stdin = "\n"
+
+ if has_prompt:
+ nb_prompts = len(prompts)
+ nb_packages = len(packages)
+
+ if nb_prompts > 0 and (nb_prompts != nb_packages):
+ if nb_prompts > nb_packages:
+ diff = nb_prompts - nb_packages
+ msg = "%s packages to install but %s prompts to expect. %s prompts will be ignored" % (to_text(nb_packages), to_text(nb_prompts), to_text(diff))
+ else:
+ diff = nb_packages - nb_prompts
+ msg = "%s packages to install but only %s prompts to expect. %s packages won't be expected to have a prompt" \
+ % (to_text(nb_packages), to_text(nb_prompts), to_text(diff))
+ module.warn(msg)
+
+ # Preparing prompts answer according to item type
+ tmp_prompts = []
+ for _item in prompts:
+ # If the current item is a dict then we expect it's key to be the prompt regex and it's value to be the answer
+ # We also expect here that the dict only has ONE key and the first key will be taken
+ if isinstance(_item, dict):
+ key = list(_item.keys())[0]
+ answer = _item[key] + "\n"
+
+ tmp_prompts.append((key, answer))
+ elif not _item:
+ tmp_prompts.append((None, default_stdin))
+ else:
+ tmp_prompts.append((_item, default_stdin))
+ prompts = tmp_prompts
+ for i, package in enumerate(packages):
+ # if the package is installed and state == present
+ # or state == latest and is up-to-date then skip
+ installed, updated = query_package(module, package)
+ if installed and (state == 'present' or (state == 'latest' and updated)):
+ continue
+
+ if state == 'present':
+ command = 'install'
+
+ if state == 'latest':
+ command = 'upgrade'
+
+ if has_prompt and i < len(prompts):
+ prompt_regex = prompts[i][0]
+ data = prompts[i][1]
+ else:
+ prompt_regex = None
+ data = default_stdin
+
+ cmd = "%s %s %s" % (_get_pear_path(module), command, package)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False, prompt_regex=prompt_regex, data=data, binary_data=True)
+ if rc != 0:
+ module.fail_json(msg="failed to install %s: %s" % (package, to_text(stdout + stderr)))
+
+ install_c += 1
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg="installed %s package(s)" % (install_c))
+
+ module.exit_json(changed=False, msg="package(s) already installed")
+
+
+def check_packages(module, packages, state):
+ would_be_changed = []
+ for package in packages:
+ installed, updated = query_package(module, package)
+ if ((state in ["present", "latest"] and not installed) or
+ (state == "absent" and installed) or
+ (state == "latest" and not updated)):
+ would_be_changed.append(package)
+ if would_be_changed:
+ if state == "absent":
+ state = "removed"
+ module.exit_json(changed=True, msg="%s package(s) would be %s" % (
+ len(would_be_changed), state))
+ else:
+ module.exit_json(change=False, msg="package(s) already %s" % state)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(aliases=['pkg'], required=True),
+ state=dict(default='present', choices=['present', 'installed', "latest", 'absent', 'removed']),
+ executable=dict(default=None, required=False, type='path'),
+ prompts=dict(default=None, required=False, type='list', elements='raw'),
+ ),
+ supports_check_mode=True)
+
+ p = module.params
+
+ # normalize the state parameter
+ if p['state'] in ['present', 'installed']:
+ p['state'] = 'present'
+ elif p['state'] in ['absent', 'removed']:
+ p['state'] = 'absent'
+
+ if p['name']:
+ pkgs = p['name'].split(',')
+
+ pkg_files = []
+ for i, pkg in enumerate(pkgs):
+ pkg_files.append(None)
+
+ if module.check_mode:
+ check_packages(module, pkgs, p['state'])
+
+ if p['state'] in ['present', 'latest']:
+ install_packages(module, p['state'], pkgs, p["prompts"])
+ elif p['state'] == 'absent':
+ remove_packages(module, pkgs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/pip_package_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/pip_package_info.py
new file mode 100644
index 00000000..08eb2e95
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/pip_package_info.py
@@ -0,0 +1,147 @@
+#!/usr/bin/python
+# (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# started out with AWX's scan_packages module
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: pip_package_info
+short_description: pip package information
+description:
+ - Return information about installed pip packages
+options:
+ clients:
+ description:
+ - A list of the pip executables that will be used to get the packages.
+ They can be supplied with the full path or just the executable name, i.e `pip3.7`.
+ default: ['pip']
+ required: False
+ type: list
+requirements:
+ - The requested pip executables must be installed on the target.
+author:
+ - Matthew Jones (@matburt)
+ - Brian Coca (@bcoca)
+ - Adam Miller (@maxamillion)
+'''
+
+EXAMPLES = '''
+- name: Just get the list from default pip
+ community.general.pip_package_info:
+
+- name: Get the facts for default pip, pip2 and pip3.6
+ community.general.pip_package_info:
+ clients: ['pip', 'pip2', 'pip3.6']
+
+- name: Get from specific paths (virtualenvs?)
+ community.general.pip_package_info:
+ clients: '/home/me/projec42/python/pip3.5'
+'''
+
+RETURN = '''
+packages:
+ description: a dictionary of installed package data
+ returned: always
+ type: dict
+ contains:
+ python:
+ description: A dictionary with each pip client which then contains a list of dicts with python package information
+ returned: always
+ type: dict
+ sample:
+ "packages": {
+ "pip": {
+ "Babel": [
+ {
+ "name": "Babel",
+ "source": "pip",
+ "version": "2.6.0"
+ }
+ ],
+ "Flask": [
+ {
+ "name": "Flask",
+ "source": "pip",
+ "version": "1.0.2"
+ }
+ ],
+ "Flask-SQLAlchemy": [
+ {
+ "name": "Flask-SQLAlchemy",
+ "source": "pip",
+ "version": "2.3.2"
+ }
+ ],
+ "Jinja2": [
+ {
+ "name": "Jinja2",
+ "source": "pip",
+ "version": "2.10"
+ }
+ ],
+ },
+ }
+'''
+import json
+import os
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.facts.packages import CLIMgr
+
+
+class PIP(CLIMgr):
+
+ def __init__(self, pip):
+
+ self.CLI = pip
+
+ def list_installed(self):
+ global module
+ rc, out, err = module.run_command([self._cli, 'list', '-l', '--format=json'])
+ if rc != 0:
+ raise Exception("Unable to list packages rc=%s : %s" % (rc, err))
+ return json.loads(out)
+
+ def get_package_details(self, package):
+ package['source'] = self.CLI
+ return package
+
+
+def main():
+
+ # start work
+ global module
+ module = AnsibleModule(argument_spec=dict(clients={'type': 'list', 'default': ['pip']},), supports_check_mode=True)
+ packages = {}
+ results = {'packages': {}}
+ clients = module.params['clients']
+
+ found = 0
+ for pip in clients:
+
+ if not os.path.basename(pip).startswith('pip'):
+ module.warn('Skipping invalid pip client: %s' % (pip))
+ continue
+ try:
+ pip_mgr = PIP(pip)
+ if pip_mgr.is_available():
+ found += 1
+ packages[pip] = pip_mgr.get_packages()
+ except Exception as e:
+ module.warn('Failed to retrieve packages with %s: %s' % (pip, to_text(e)))
+ continue
+
+ if found == 0:
+ module.fail_json(msg='Unable to use any of the supplied pip clients: %s' % clients)
+
+ # return info
+ results['packages'] = packages
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/yarn.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/yarn.py
new file mode 100644
index 00000000..77489e24
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/language/yarn.py
@@ -0,0 +1,394 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017 David Gunter <david.gunter@tivix.com>
+# Copyright (c) 2017 Chris Hoffman <christopher.hoffman@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: yarn
+short_description: Manage node.js packages with Yarn
+description:
+ - Manage node.js packages with the Yarn package manager (https://yarnpkg.com/)
+author:
+ - "David Gunter (@verkaufer)"
+ - "Chris Hoffman (@chrishoffman), creator of NPM Ansible module)"
+options:
+ name:
+ type: str
+ description:
+ - The name of a node.js library to install
+ - If omitted all packages in package.json are installed.
+ - To globally install from local node.js library. Prepend "file:" to the path of the node.js library.
+ required: false
+ path:
+ type: path
+ description:
+ - The base path where Node.js libraries will be installed.
+ - This is where the node_modules folder lives.
+ required: false
+ version:
+ type: str
+ description:
+ - The version of the library to be installed.
+ - Must be in semver format. If "latest" is desired, use "state" arg instead
+ required: false
+ global:
+ description:
+ - Install the node.js library globally
+ required: false
+ default: no
+ type: bool
+ executable:
+ type: path
+ description:
+ - The executable location for yarn.
+ required: false
+ ignore_scripts:
+ description:
+ - Use the --ignore-scripts flag when installing.
+ required: false
+ type: bool
+ default: no
+ production:
+ description:
+ - Install dependencies in production mode.
+ - Yarn will ignore any dependencies under devDependencies in package.json
+ required: false
+ type: bool
+ default: no
+ registry:
+ type: str
+ description:
+ - The registry to install modules from.
+ required: false
+ state:
+ type: str
+ description:
+ - Installation state of the named node.js library
+ - If absent is selected, a name option must be provided
+ required: false
+ default: present
+ choices: [ "present", "absent", "latest" ]
+requirements:
+ - Yarn installed in bin path (typically /usr/local/bin)
+'''
+
+EXAMPLES = '''
+- name: Install "imagemin" node.js package.
+ community.general.yarn:
+ name: imagemin
+ path: /app/location
+
+- name: Install "imagemin" node.js package on version 5.3.1
+ community.general.yarn:
+ name: imagemin
+ version: '5.3.1'
+ path: /app/location
+
+- name: Install "imagemin" node.js package globally.
+ community.general.yarn:
+ name: imagemin
+ global: yes
+
+- name: Remove the globally-installed package "imagemin".
+ community.general.yarn:
+ name: imagemin
+ global: yes
+ state: absent
+
+- name: Install "imagemin" node.js package from custom registry.
+ community.general.yarn:
+ name: imagemin
+ registry: 'http://registry.mysite.com'
+
+- name: Install packages based on package.json.
+ community.general.yarn:
+ path: /app/location
+
+- name: Update all packages in package.json to their latest version.
+ community.general.yarn:
+ path: /app/location
+ state: latest
+'''
+
+RETURN = '''
+changed:
+ description: Whether Yarn changed any package data
+ returned: always
+ type: bool
+ sample: true
+msg:
+ description: Provides an error message if Yarn syntax was incorrect
+ returned: failure
+ type: str
+ sample: "Package must be explicitly named when uninstalling."
+invocation:
+ description: Parameters and values used during execution
+ returned: success
+ type: dict
+ sample: {
+ "module_args": {
+ "executable": null,
+ "globally": false,
+ "ignore_scripts": false,
+ "name": null,
+ "path": "/some/path/folder",
+ "production": false,
+ "registry": null,
+ "state": "present",
+ "version": null
+ }
+ }
+out:
+ description: Output generated from Yarn with emojis removed.
+ returned: always
+ type: str
+ sample: "yarn add v0.16.1[1/4] Resolving packages...[2/4] Fetching packages...[3/4] Linking dependencies...[4/4]
+ Building fresh packages...success Saved lockfile.success Saved 1 new dependency..left-pad@1.1.3 Done in 0.59s."
+'''
+
+import os
+import re
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Yarn(object):
+
+ DEFAULT_GLOBAL_INSTALLATION_PATH = '~/.config/yarn/global'
+
+ def __init__(self, module, **kwargs):
+ self.module = module
+ self.globally = kwargs['globally']
+ self.name = kwargs['name']
+ self.version = kwargs['version']
+ self.path = kwargs['path']
+ self.registry = kwargs['registry']
+ self.production = kwargs['production']
+ self.ignore_scripts = kwargs['ignore_scripts']
+
+ # Specify a version of package if version arg passed in
+ self.name_version = None
+
+ if kwargs['executable']:
+ self.executable = kwargs['executable'].split(' ')
+ else:
+ self.executable = [module.get_bin_path('yarn', True)]
+
+ if kwargs['version'] and self.name is not None:
+ self.name_version = self.name + '@' + str(self.version)
+ elif self.name is not None:
+ self.name_version = self.name
+
+ def _exec(self, args, run_in_check_mode=False, check_rc=True):
+ if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
+
+ if self.globally:
+ # Yarn global arg is inserted before the command (e.g. `yarn global {some-command}`)
+ args.insert(0, 'global')
+
+ cmd = self.executable + args
+
+ if self.production:
+ cmd.append('--production')
+ if self.ignore_scripts:
+ cmd.append('--ignore-scripts')
+ if self.registry:
+ cmd.append('--registry')
+ cmd.append(self.registry)
+
+ # always run Yarn without emojis when called via Ansible
+ cmd.append('--no-emoji')
+
+ # If path is specified, cd into that path and run the command.
+ cwd = None
+ if self.path and not self.globally:
+ if not os.path.exists(self.path):
+ # Module will make directory if not exists.
+ os.makedirs(self.path)
+ if not os.path.isdir(self.path):
+ self.module.fail_json(msg="Path provided %s is not a directory" % self.path)
+ cwd = self.path
+
+ if not os.path.isfile(os.path.join(self.path, 'package.json')):
+ self.module.fail_json(msg="Package.json does not exist in provided path.")
+
+ rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd)
+ return out, err
+
+ return(None, None)
+
+ def list(self):
+ cmd = ['list', '--depth=0', '--json']
+
+ installed = list()
+ missing = list()
+
+ if not os.path.isfile(os.path.join(self.path, 'yarn.lock')):
+ missing.append(self.name)
+ return installed, missing
+
+ result, error = self._exec(cmd, True, False)
+
+ if error:
+ self.module.fail_json(msg=error)
+
+ data = json.loads(result)
+ try:
+ dependencies = data['data']['trees']
+ except KeyError:
+ missing.append(self.name)
+ return installed, missing
+
+ for dep in dependencies:
+ name, version = dep['name'].rsplit('@', 1)
+ installed.append(name)
+
+ if self.name not in installed:
+ missing.append(self.name)
+
+ return installed, missing
+
+ def install(self):
+ if self.name_version:
+ # Yarn has a separate command for installing packages by name...
+ return self._exec(['add', self.name_version])
+ # And one for installing all packages in package.json
+ return self._exec(['install', '--non-interactive'])
+
+ def update(self):
+ return self._exec(['upgrade', '--latest'])
+
+ def uninstall(self):
+ return self._exec(['remove', self.name])
+
+ def list_outdated(self):
+ outdated = list()
+
+ if not os.path.isfile(os.path.join(self.path, 'yarn.lock')):
+ return outdated
+
+ cmd_result, err = self._exec(['outdated', '--json'], True, False)
+ if err:
+ self.module.fail_json(msg=err)
+
+ if not cmd_result:
+ return outdated
+
+ outdated_packages_data = cmd_result.splitlines()[1]
+
+ data = json.loads(outdated_packages_data)
+
+ try:
+ outdated_dependencies = data['data']['body']
+ except KeyError:
+ return outdated
+
+ for dep in outdated_dependencies:
+ # Outdated dependencies returned as a list of lists, where
+ # item at index 0 is the name of the dependency
+ outdated.append(dep[0])
+ return outdated
+
+
+def main():
+ arg_spec = dict(
+ name=dict(default=None),
+ path=dict(default=None, type='path'),
+ version=dict(default=None),
+ production=dict(default=False, type='bool'),
+ executable=dict(default=None, type='path'),
+ registry=dict(default=None),
+ state=dict(default='present', choices=['present', 'absent', 'latest']),
+ ignore_scripts=dict(default=False, type='bool'),
+ )
+ arg_spec['global'] = dict(default=False, type='bool')
+ module = AnsibleModule(
+ argument_spec=arg_spec,
+ supports_check_mode=True
+ )
+
+ name = module.params['name']
+ path = module.params['path']
+ version = module.params['version']
+ globally = module.params['global']
+ production = module.params['production']
+ executable = module.params['executable']
+ registry = module.params['registry']
+ state = module.params['state']
+ ignore_scripts = module.params['ignore_scripts']
+
+ # When installing globally, users should not be able to define a path for installation.
+ # Require a path if global is False, though!
+ if path is None and globally is False:
+ module.fail_json(msg='Path must be specified when not using global arg')
+ elif path and globally is True:
+ module.fail_json(msg='Cannot specify path if doing global installation')
+
+ if state == 'absent' and not name:
+ module.fail_json(msg='Package must be explicitly named when uninstalling.')
+ if state == 'latest':
+ version = 'latest'
+
+ # When installing globally, use the defined path for global node_modules
+ if globally:
+ path = Yarn.DEFAULT_GLOBAL_INSTALLATION_PATH
+
+ yarn = Yarn(module,
+ name=name,
+ path=path,
+ version=version,
+ globally=globally,
+ production=production,
+ executable=executable,
+ registry=registry,
+ ignore_scripts=ignore_scripts)
+
+ changed = False
+ out = ''
+ err = ''
+ if state == 'present':
+
+ if not name:
+ changed = True
+ out, err = yarn.install()
+ else:
+ installed, missing = yarn.list()
+ if len(missing):
+ changed = True
+ out, err = yarn.install()
+
+ elif state == 'latest':
+
+ if not name:
+ changed = True
+ out, err = yarn.install()
+ else:
+ installed, missing = yarn.list()
+ outdated = yarn.list_outdated()
+ if len(missing):
+ changed = True
+ out, err = yarn.install()
+ if len(outdated):
+ changed = True
+ out, err = yarn.update()
+ else:
+ # state == absent
+ installed, missing = yarn.list()
+ if name in installed:
+ changed = True
+ out, err = yarn.uninstall()
+
+ module.exit_json(changed=changed, out=out, err=err)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/apk.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/apk.py
new file mode 100644
index 00000000..74b738de
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/apk.py
@@ -0,0 +1,357 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Kevin Brebanov <https://github.com/kbrebanov>
+# Based on pacman (Afterburn <https://github.com/afterburn>, Aaron Bull Schaefer <aaron@elasticdog.com>)
+# and apt (Matthew Williams <matthew@flowroute.com>) modules.
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: apk
+short_description: Manages apk packages
+description:
+ - Manages I(apk) packages for Alpine Linux.
+author: "Kevin Brebanov (@kbrebanov)"
+options:
+ available:
+ description:
+ - During upgrade, reset versioned world dependencies and change logic to prefer replacing or downgrading packages (instead of holding them)
+ if the currently installed package is no longer available from any repository.
+ type: bool
+ default: no
+ name:
+ description:
+ - A package name, like C(foo), or multiple packages, like C(foo, bar).
+ type: list
+ elements: str
+ no_cache:
+ description:
+ - Do not use any local cache path.
+ type: bool
+ default: no
+ version_added: 1.0.0
+ repository:
+ description:
+ - A package repository or multiple repositories.
+ Unlike with the underlying apk command, this list will override the system repositories rather than supplement them.
+ type: list
+ elements: str
+ state:
+ description:
+ - Indicates the desired package(s) state.
+ - C(present) ensures the package(s) is/are present. C(installed) can be used as an alias.
+ - C(absent) ensures the package(s) is/are absent. C(removed) can be used as an alias.
+ - C(latest) ensures the package(s) is/are present and the latest version(s).
+ default: present
+ choices: [ "present", "absent", "latest", "installed", "removed" ]
+ type: str
+ update_cache:
+ description:
+ - Update repository indexes. Can be run with other steps or on it's own.
+ type: bool
+ default: no
+ upgrade:
+ description:
+ - Upgrade all installed packages to their latest version.
+ type: bool
+ default: no
+notes:
+ - '"name" and "upgrade" are mutually exclusive.'
+ - When used with a `loop:` each package will be processed individually, it is much more efficient to pass the list directly to the `name` option.
+'''
+
+EXAMPLES = '''
+- name: Update repositories and install foo package
+ community.general.apk:
+ name: foo
+ update_cache: yes
+
+- name: Update repositories and install foo and bar packages
+ community.general.apk:
+ name: foo,bar
+ update_cache: yes
+
+- name: Remove foo package
+ community.general.apk:
+ name: foo
+ state: absent
+
+- name: Remove foo and bar packages
+ community.general.apk:
+ name: foo,bar
+ state: absent
+
+- name: Install the package foo
+ community.general.apk:
+ name: foo
+ state: present
+
+- name: Install the packages foo and bar
+ community.general.apk:
+ name: foo,bar
+ state: present
+
+- name: Update repositories and update package foo to latest version
+ community.general.apk:
+ name: foo
+ state: latest
+ update_cache: yes
+
+- name: Update repositories and update packages foo and bar to latest versions
+ community.general.apk:
+ name: foo,bar
+ state: latest
+ update_cache: yes
+
+- name: Update all installed packages to the latest versions
+ community.general.apk:
+ upgrade: yes
+
+- name: Upgrade / replace / downgrade / uninstall all installed packages to the latest versions available
+ community.general.apk:
+ available: yes
+ upgrade: yes
+
+- name: Update repositories as a separate step
+ community.general.apk:
+ update_cache: yes
+
+- name: Install package from a specific repository
+ community.general.apk:
+ name: foo
+ state: latest
+ update_cache: yes
+ repository: http://dl-3.alpinelinux.org/alpine/edge/main
+
+- name: Install package without using cache
+ community.general.apk:
+ name: foo
+ state: latest
+ no_cache: yes
+'''
+
+RETURN = '''
+packages:
+ description: a list of packages that have been changed
+ returned: when packages have changed
+ type: list
+ sample: ['package', 'other-package']
+'''
+
+import re
+# Import module snippets.
+from ansible.module_utils.basic import AnsibleModule
+
+
+def parse_for_packages(stdout):
+ packages = []
+ data = stdout.split('\n')
+ regex = re.compile(r'^\(\d+/\d+\)\s+\S+\s+(\S+)')
+ for l in data:
+ p = regex.search(l)
+ if p:
+ packages.append(p.group(1))
+ return packages
+
+
+def update_package_db(module, exit):
+ cmd = "%s update" % (APK_PATH)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ if rc != 0:
+ module.fail_json(msg="could not update package db", stdout=stdout, stderr=stderr)
+ elif exit:
+ module.exit_json(changed=True, msg='updated repository indexes', stdout=stdout, stderr=stderr)
+ else:
+ return True
+
+
+def query_toplevel(module, name):
+ # /etc/apk/world contains a list of top-level packages separated by ' ' or \n
+ # packages may contain repository (@) or version (=<>~) separator characters or start with negation !
+ regex = re.compile(r'^' + re.escape(name) + r'([@=<>~].+)?$')
+ with open('/etc/apk/world') as f:
+ content = f.read().split()
+ for p in content:
+ if regex.search(p):
+ return True
+ return False
+
+
+def query_package(module, name):
+ cmd = "%s -v info --installed %s" % (APK_PATH, name)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ if rc == 0:
+ return True
+ else:
+ return False
+
+
+def query_latest(module, name):
+ cmd = "%s version %s" % (APK_PATH, name)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ search_pattern = r"(%s)-[\d\.\w]+-[\d\w]+\s+(.)\s+[\d\.\w]+-[\d\w]+\s+" % (re.escape(name))
+ match = re.search(search_pattern, stdout)
+ if match and match.group(2) == "<":
+ return False
+ return True
+
+
+def query_virtual(module, name):
+ cmd = "%s -v info --description %s" % (APK_PATH, name)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ search_pattern = r"^%s: virtual meta package" % (re.escape(name))
+ if re.search(search_pattern, stdout):
+ return True
+ return False
+
+
+def get_dependencies(module, name):
+ cmd = "%s -v info --depends %s" % (APK_PATH, name)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ dependencies = stdout.split()
+ if len(dependencies) > 1:
+ return dependencies[1:]
+ else:
+ return []
+
+
+def upgrade_packages(module, available):
+ if module.check_mode:
+ cmd = "%s upgrade --simulate" % (APK_PATH)
+ else:
+ cmd = "%s upgrade" % (APK_PATH)
+ if available:
+ cmd = "%s --available" % cmd
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ packagelist = parse_for_packages(stdout)
+ if rc != 0:
+ module.fail_json(msg="failed to upgrade packages", stdout=stdout, stderr=stderr, packages=packagelist)
+ if re.search(r'^OK', stdout):
+ module.exit_json(changed=False, msg="packages already upgraded", stdout=stdout, stderr=stderr, packages=packagelist)
+ module.exit_json(changed=True, msg="upgraded packages", stdout=stdout, stderr=stderr, packages=packagelist)
+
+
+def install_packages(module, names, state):
+ upgrade = False
+ to_install = []
+ to_upgrade = []
+ for name in names:
+ # Check if virtual package
+ if query_virtual(module, name):
+ # Get virtual package dependencies
+ dependencies = get_dependencies(module, name)
+ for dependency in dependencies:
+ if state == 'latest' and not query_latest(module, dependency):
+ to_upgrade.append(dependency)
+ else:
+ if not query_toplevel(module, name):
+ to_install.append(name)
+ elif state == 'latest' and not query_latest(module, name):
+ to_upgrade.append(name)
+ if to_upgrade:
+ upgrade = True
+ if not to_install and not upgrade:
+ module.exit_json(changed=False, msg="package(s) already installed")
+ packages = " ".join(to_install + to_upgrade)
+ if upgrade:
+ if module.check_mode:
+ cmd = "%s add --upgrade --simulate %s" % (APK_PATH, packages)
+ else:
+ cmd = "%s add --upgrade %s" % (APK_PATH, packages)
+ else:
+ if module.check_mode:
+ cmd = "%s add --simulate %s" % (APK_PATH, packages)
+ else:
+ cmd = "%s add %s" % (APK_PATH, packages)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ packagelist = parse_for_packages(stdout)
+ if rc != 0:
+ module.fail_json(msg="failed to install %s" % (packages), stdout=stdout, stderr=stderr, packages=packagelist)
+ module.exit_json(changed=True, msg="installed %s package(s)" % (packages), stdout=stdout, stderr=stderr, packages=packagelist)
+
+
+def remove_packages(module, names):
+ installed = []
+ for name in names:
+ if query_package(module, name):
+ installed.append(name)
+ if not installed:
+ module.exit_json(changed=False, msg="package(s) already removed")
+ names = " ".join(installed)
+ if module.check_mode:
+ cmd = "%s del --purge --simulate %s" % (APK_PATH, names)
+ else:
+ cmd = "%s del --purge %s" % (APK_PATH, names)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ packagelist = parse_for_packages(stdout)
+ # Check to see if packages are still present because of dependencies
+ for name in installed:
+ if query_package(module, name):
+ rc = 1
+ break
+ if rc != 0:
+ module.fail_json(msg="failed to remove %s package(s)" % (names), stdout=stdout, stderr=stderr, packages=packagelist)
+ module.exit_json(changed=True, msg="removed %s package(s)" % (names), stdout=stdout, stderr=stderr, packages=packagelist)
+
+# ==========================================
+# Main control flow.
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'installed', 'absent', 'removed', 'latest']),
+ name=dict(type='list', elements='str'),
+ no_cache=dict(default=False, type='bool'),
+ repository=dict(type='list', elements='str'),
+ update_cache=dict(default=False, type='bool'),
+ upgrade=dict(default=False, type='bool'),
+ available=dict(default=False, type='bool'),
+ ),
+ required_one_of=[['name', 'update_cache', 'upgrade']],
+ mutually_exclusive=[['name', 'upgrade']],
+ supports_check_mode=True
+ )
+
+ # Set LANG env since we parse stdout
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ global APK_PATH
+ APK_PATH = module.get_bin_path('apk', required=True)
+
+ p = module.params
+
+ if p['no_cache']:
+ APK_PATH = "%s --no-cache" % (APK_PATH, )
+
+ # add repositories to the APK_PATH
+ if p['repository']:
+ for r in p['repository']:
+ APK_PATH = "%s --repository %s --repositories-file /dev/null" % (APK_PATH, r)
+
+ # normalize the state parameter
+ if p['state'] in ['present', 'installed']:
+ p['state'] = 'present'
+ if p['state'] in ['absent', 'removed']:
+ p['state'] = 'absent'
+
+ if p['update_cache']:
+ update_package_db(module, not p['name'] and not p['upgrade'])
+
+ if p['upgrade']:
+ upgrade_packages(module, p['available'])
+
+ if p['state'] in ['present', 'latest']:
+ install_packages(module, p['name'], p['state'])
+ elif p['state'] == 'absent':
+ remove_packages(module, p['name'])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/apt_repo.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/apt_repo.py
new file mode 100644
index 00000000..d196e03b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/apt_repo.py
@@ -0,0 +1,146 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Mikhail Gordeev
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: apt_repo
+short_description: Manage APT repositories via apt-repo
+description:
+ - Manages APT repositories using apt-repo tool.
+ - See U(https://www.altlinux.org/Apt-repo) for details about apt-repo
+notes:
+ - This module works on ALT based distros.
+ - Does NOT support checkmode, due to a limitation in apt-repo tool.
+options:
+ repo:
+ description:
+ - Name of the repository to add or remove.
+ required: true
+ type: str
+ state:
+ description:
+ - Indicates the desired repository state.
+ choices: [ absent, present ]
+ default: present
+ type: str
+ remove_others:
+ description:
+ - Remove other then added repositories
+ - Used if I(state=present)
+ type: bool
+ default: no
+ update:
+ description:
+ - Update the package database after changing repositories.
+ type: bool
+ default: no
+author:
+- Mikhail Gordeev (@obirvalger)
+'''
+
+EXAMPLES = '''
+- name: Remove all repositories
+ community.general.apt_repo:
+ repo: all
+ state: absent
+
+- name: Add repository `Sisysphus` and remove other repositories
+ community.general.apt_repo:
+ repo: Sisysphus
+ state: present
+ remove_others: yes
+
+- name: Add local repository `/space/ALT/Sisyphus` and update package cache
+ community.general.apt_repo:
+ repo: copy:///space/ALT/Sisyphus
+ state: present
+ update: yes
+'''
+
+RETURN = ''' # '''
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+APT_REPO_PATH = "/usr/bin/apt-repo"
+
+
+def apt_repo(module, *args):
+ """run apt-repo with args and return its output"""
+ # make args list to use in concatenation
+ args = list(args)
+ rc, out, err = module.run_command([APT_REPO_PATH] + args)
+
+ if rc != 0:
+ module.fail_json(msg="'%s' failed: %s" % (' '.join(['apt-repo'] + args), err))
+
+ return out
+
+
+def add_repo(module, repo):
+ """add a repository"""
+ apt_repo(module, 'add', repo)
+
+
+def rm_repo(module, repo):
+ """remove a repository"""
+ apt_repo(module, 'rm', repo)
+
+
+def set_repo(module, repo):
+ """add a repository and remove other repositories"""
+ # first add to validate repository
+ apt_repo(module, 'add', repo)
+ apt_repo(module, 'rm', 'all')
+ apt_repo(module, 'add', repo)
+
+
+def update(module):
+ """update package cache"""
+ apt_repo(module, 'update')
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ repo=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ remove_others=dict(type='bool', default=False),
+ update=dict(type='bool', default=False),
+ ),
+ )
+
+ if not os.path.exists(APT_REPO_PATH):
+ module.fail_json(msg='cannot find /usr/bin/apt-repo')
+
+ params = module.params
+ repo = params['repo']
+ state = params['state']
+ old_repositories = apt_repo(module)
+
+ if state == 'present':
+ if params['remove_others']:
+ set_repo(module, repo)
+ else:
+ add_repo(module, repo)
+ elif state == 'absent':
+ rm_repo(module, repo)
+
+ if params['update']:
+ update(module)
+
+ new_repositories = apt_repo(module)
+ changed = old_repositories != new_repositories
+ module.exit_json(changed=changed, repo=repo, state=state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/apt_rpm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/apt_rpm.py
new file mode 100644
index 00000000..6b6bb7ec
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/apt_rpm.py
@@ -0,0 +1,183 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Evgenii Terechkov
+# Written by Evgenii Terechkov <evg@altlinux.org>
+# Based on urpmi module written by Philippe Makowski <philippem@mageia.org>
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: apt_rpm
+short_description: apt_rpm package manager
+description:
+ - Manages packages with I(apt-rpm). Both low-level (I(rpm)) and high-level (I(apt-get)) package manager binaries required.
+options:
+ package:
+ description:
+ - list of packages to install, upgrade or remove.
+ required: true
+ aliases: [ name, pkg ]
+ type: list
+ elements: str
+ state:
+ description:
+ - Indicates the desired package state.
+ choices: [ absent, present, installed, removed ]
+ default: present
+ type: str
+ update_cache:
+ description:
+ - update the package database first C(apt-get update).
+ aliases: [ 'update-cache' ]
+ type: bool
+ default: no
+author:
+- Evgenii Terechkov (@evgkrsk)
+'''
+
+EXAMPLES = '''
+- name: Install package foo
+ community.general.apt_rpm:
+ pkg: foo
+ state: present
+
+- name: Install packages foo and bar
+ community.general.apt_rpm:
+ pkg:
+ - foo
+ - bar
+ state: present
+
+- name: Remove package foo
+ community.general.apt_rpm:
+ pkg: foo
+ state: absent
+
+- name: Remove packages foo and bar
+ community.general.apt_rpm:
+ pkg: foo,bar
+ state: absent
+
+# bar will be the updated if a newer version exists
+- name: Update the package database and install bar
+ community.general.apt_rpm:
+ name: bar
+ state: present
+ update_cache: yes
+'''
+
+import json
+import os
+import shlex
+import sys
+
+from ansible.module_utils.basic import AnsibleModule
+
+APT_PATH = "/usr/bin/apt-get"
+RPM_PATH = "/usr/bin/rpm"
+
+
+def query_package(module, name):
+ # rpm -q returns 0 if the package is installed,
+ # 1 if it is not installed
+ rc, out, err = module.run_command("%s -q %s" % (RPM_PATH, name))
+ if rc == 0:
+ return True
+ else:
+ return False
+
+
+def query_package_provides(module, name):
+ # rpm -q returns 0 if the package is installed,
+ # 1 if it is not installed
+ rc, out, err = module.run_command("%s -q --provides %s" % (RPM_PATH, name))
+ return rc == 0
+
+
+def update_package_db(module):
+ rc, out, err = module.run_command("%s update" % APT_PATH)
+
+ if rc != 0:
+ module.fail_json(msg="could not update package db: %s" % err)
+
+
+def remove_packages(module, packages):
+
+ remove_c = 0
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ if not query_package(module, package):
+ continue
+
+ rc, out, err = module.run_command("%s -y remove %s" % (APT_PATH, package))
+
+ if rc != 0:
+ module.fail_json(msg="failed to remove %s: %s" % (package, err))
+
+ remove_c += 1
+
+ if remove_c > 0:
+ module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, pkgspec):
+
+ packages = ""
+ for package in pkgspec:
+ if not query_package_provides(module, package):
+ packages += "'%s' " % package
+
+ if len(packages) != 0:
+
+ rc, out, err = module.run_command("%s -y install %s" % (APT_PATH, packages))
+
+ installed = True
+ for packages in pkgspec:
+ if not query_package_provides(module, package):
+ installed = False
+
+ # apt-rpm always have 0 for exit code if --force is used
+ if rc or not installed:
+ module.fail_json(msg="'apt-get -y install %s' failed: %s" % (packages, err))
+ else:
+ module.exit_json(changed=True, msg="%s present(s)" % packages)
+ else:
+ module.exit_json(changed=False)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['absent', 'installed', 'present', 'removed']),
+ update_cache=dict(type='bool', default=False, aliases=['update-cache']),
+ package=dict(type='list', elements='str', required=True, aliases=['name', 'pkg']),
+ ),
+ )
+
+ if not os.path.exists(APT_PATH) or not os.path.exists(RPM_PATH):
+ module.fail_json(msg="cannot find /usr/bin/apt-get and/or /usr/bin/rpm")
+
+ p = module.params
+
+ if p['update_cache']:
+ update_package_db(module)
+
+ packages = p['package']
+
+ if p['state'] in ['installed', 'present']:
+ install_packages(module, packages)
+
+ elif p['state'] in ['absent', 'removed']:
+ remove_packages(module, packages)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/flatpak.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/flatpak.py
new file mode 100644
index 00000000..1be1a722
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/flatpak.py
@@ -0,0 +1,312 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017 John Kwiatkoski (@JayKayy) <jkwiat40@gmail.com>
+# Copyright: (c) 2018 Alexander Bethke (@oolongbrothers) <oolongbrothers@gmx.net>
+# Copyright: (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+# ATTENTION CONTRIBUTORS!
+#
+# TL;DR: Run this module's integration tests manually before opening a pull request
+#
+# Long explanation:
+# The integration tests for this module are currently NOT run on the Ansible project's continuous
+# delivery pipeline. So please: When you make changes to this module, make sure that you run the
+# included integration tests manually for both Python 2 and Python 3:
+#
+# Python 2:
+# ansible-test integration -v --docker fedora28 --docker-privileged --allow-unsupported --python 2.7 flatpak
+# Python 3:
+# ansible-test integration -v --docker fedora28 --docker-privileged --allow-unsupported --python 3.6 flatpak
+#
+# Because of external dependencies, the current integration tests are somewhat too slow and brittle
+# to be included right now. I have plans to rewrite the integration tests based on a local flatpak
+# repository so that they can be included into the normal CI pipeline.
+# //oolongbrothers
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: flatpak
+short_description: Manage flatpaks
+description:
+- Allows users to add or remove flatpaks.
+- See the M(community.general.flatpak_remote) module for managing flatpak remotes.
+author:
+- John Kwiatkoski (@JayKayy)
+- Alexander Bethke (@oolongbrothers)
+requirements:
+- flatpak
+options:
+ executable:
+ description:
+ - The path to the C(flatpak) executable to use.
+ - By default, this module looks for the C(flatpak) executable on the path.
+ type: path
+ default: flatpak
+ method:
+ description:
+ - The installation method to use.
+ - Defines if the I(flatpak) is supposed to be installed globally for the whole C(system)
+ or only for the current C(user).
+ type: str
+ choices: [ system, user ]
+ default: system
+ name:
+ description:
+ - The name of the flatpak to manage.
+ - When used with I(state=present), I(name) can be specified as an C(http(s)) URL to a
+ C(flatpakref) file or the unique reverse DNS name that identifies a flatpak.
+ - When supplying a reverse DNS name, you can use the I(remote) option to specify on what remote
+ to look for the flatpak. An example for a reverse DNS name is C(org.gnome.gedit).
+ - When used with I(state=absent), it is recommended to specify the name in the reverse DNS
+ format.
+ - When supplying an C(http(s)) URL with I(state=absent), the module will try to match the
+ installed flatpak based on the name of the flatpakref to remove it. However, there is no
+ guarantee that the names of the flatpakref file and the reverse DNS name of the installed
+ flatpak do match.
+ type: str
+ required: true
+ remote:
+ description:
+ - The flatpak remote (repository) to install the flatpak from.
+ - By default, C(flathub) is assumed, but you do need to add the flathub flatpak_remote before
+ you can use this.
+ - See the M(community.general.flatpak_remote) module for managing flatpak remotes.
+ type: str
+ default: flathub
+ state:
+ description:
+ - Indicates the desired package state.
+ choices: [ absent, present ]
+ type: str
+ default: present
+'''
+
+EXAMPLES = r'''
+- name: Install the spotify flatpak
+ community.general.flatpak:
+ name: https://s3.amazonaws.com/alexlarsson/spotify-repo/spotify.flatpakref
+ state: present
+
+- name: Install the gedit flatpak package
+ community.general.flatpak:
+ name: https://git.gnome.org/browse/gnome-apps-nightly/plain/gedit.flatpakref
+ state: present
+
+- name: Install the gedit package from flathub for current user
+ community.general.flatpak:
+ name: org.gnome.gedit
+ state: present
+ method: user
+
+- name: Install the Gnome Calendar flatpak from the gnome remote system-wide
+ community.general.flatpak:
+ name: org.gnome.Calendar
+ state: present
+ remote: gnome
+
+- name: Remove the gedit flatpak
+ community.general.flatpak:
+ name: org.gnome.gedit
+ state: absent
+'''
+
+RETURN = r'''
+command:
+ description: The exact flatpak command that was executed
+ returned: When a flatpak command has been executed
+ type: str
+ sample: "/usr/bin/flatpak install --user --nontinteractive flathub org.gnome.Calculator"
+msg:
+ description: Module error message
+ returned: failure
+ type: str
+ sample: "Executable '/usr/local/bin/flatpak' was not found on the system."
+rc:
+ description: Return code from flatpak binary
+ returned: When a flatpak command has been executed
+ type: int
+ sample: 0
+stderr:
+ description: Error output from flatpak binary
+ returned: When a flatpak command has been executed
+ type: str
+ sample: "error: Error searching remote flathub: Can't find ref org.gnome.KDE"
+stdout:
+ description: Output from flatpak binary
+ returned: When a flatpak command has been executed
+ type: str
+ sample: "org.gnome.Calendar/x86_64/stable\tcurrent\norg.gnome.gitg/x86_64/stable\tcurrent\n"
+'''
+
+from distutils.version import StrictVersion
+
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+from ansible.module_utils.basic import AnsibleModule
+
+OUTDATED_FLATPAK_VERSION_ERROR_MESSAGE = "Unknown option --columns=application"
+
+
+def install_flat(module, binary, remote, name, method):
+ """Add a new flatpak."""
+ global result
+ flatpak_version = _flatpak_version(module, binary)
+ if StrictVersion(flatpak_version) < StrictVersion('1.1.3'):
+ noninteractive_arg = "-y"
+ else:
+ noninteractive_arg = "--noninteractive"
+ if name.startswith('http://') or name.startswith('https://'):
+ command = [binary, "install", "--{0}".format(method), noninteractive_arg, name]
+ else:
+ command = [binary, "install", "--{0}".format(method), noninteractive_arg, remote, name]
+ _flatpak_command(module, module.check_mode, command)
+ result['changed'] = True
+
+
+def uninstall_flat(module, binary, name, method):
+ """Remove an existing flatpak."""
+ global result
+ flatpak_version = _flatpak_version(module, binary)
+ if StrictVersion(flatpak_version) < StrictVersion('1.1.3'):
+ noninteractive_arg = "-y"
+ else:
+ noninteractive_arg = "--noninteractive"
+ installed_flat_name = _match_installed_flat_name(module, binary, name, method)
+ command = [binary, "uninstall", "--{0}".format(method), noninteractive_arg, name]
+ _flatpak_command(module, module.check_mode, command)
+ result['changed'] = True
+
+
+def flatpak_exists(module, binary, name, method):
+ """Check if the flatpak is installed."""
+ command = [binary, "list", "--{0}".format(method), "--app"]
+ output = _flatpak_command(module, False, command)
+ name = _parse_flatpak_name(name).lower()
+ if name in output.lower():
+ return True
+ return False
+
+
+def _match_installed_flat_name(module, binary, name, method):
+ # This is a difficult function, since if the user supplies a flatpakref url,
+ # we have to rely on a naming convention:
+ # The flatpakref file name needs to match the flatpak name
+ global result
+ parsed_name = _parse_flatpak_name(name)
+ # Try running flatpak list with columns feature
+ command = [binary, "list", "--{0}".format(method), "--app", "--columns=application"]
+ _flatpak_command(module, False, command, ignore_failure=True)
+ if result['rc'] != 0 and OUTDATED_FLATPAK_VERSION_ERROR_MESSAGE in result['stderr']:
+ # Probably flatpak before 1.2
+ matched_flatpak_name = \
+ _match_flat_using_flatpak_column_feature(module, binary, parsed_name, method)
+ else:
+ # Probably flatpak >= 1.2
+ matched_flatpak_name = \
+ _match_flat_using_outdated_flatpak_format(module, binary, parsed_name, method)
+
+ if matched_flatpak_name:
+ return matched_flatpak_name
+ else:
+ result['msg'] = "Flatpak removal failed: Could not match any installed flatpaks to " +\
+ "the name `{0}`. ".format(_parse_flatpak_name(name)) +\
+ "If you used a URL, try using the reverse DNS name of the flatpak"
+ module.fail_json(**result)
+
+
+def _match_flat_using_outdated_flatpak_format(module, binary, parsed_name, method):
+ global result
+ command = [binary, "list", "--{0}".format(method), "--app", "--columns=application"]
+ output = _flatpak_command(module, False, command)
+ for row in output.split('\n'):
+ if parsed_name.lower() == row.lower():
+ return row
+
+
+def _match_flat_using_flatpak_column_feature(module, binary, parsed_name, method):
+ global result
+ command = [binary, "list", "--{0}".format(method), "--app"]
+ output = _flatpak_command(module, False, command)
+ for row in output.split('\n'):
+ if parsed_name.lower() in row.lower():
+ return row.split()[0]
+
+
+def _parse_flatpak_name(name):
+ if name.startswith('http://') or name.startswith('https://'):
+ file_name = urlparse(name).path.split('/')[-1]
+ file_name_without_extension = file_name.split('.')[0:-1]
+ common_name = ".".join(file_name_without_extension)
+ else:
+ common_name = name
+ return common_name
+
+
+def _flatpak_version(module, binary):
+ global result
+ command = [binary, "--version"]
+ output = _flatpak_command(module, False, command)
+ version_number = output.split()[1]
+ return version_number
+
+
+def _flatpak_command(module, noop, command, ignore_failure=False):
+ global result
+ result['command'] = ' '.join(command)
+ if noop:
+ result['rc'] = 0
+ return ""
+
+ result['rc'], result['stdout'], result['stderr'] = module.run_command(
+ command, check_rc=not ignore_failure
+ )
+ return result['stdout']
+
+
+def main():
+ # This module supports check mode
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ remote=dict(type='str', default='flathub'),
+ method=dict(type='str', default='system',
+ choices=['user', 'system']),
+ state=dict(type='str', default='present',
+ choices=['absent', 'present']),
+ executable=dict(type='path', default='flatpak')
+ ),
+ supports_check_mode=True,
+ )
+
+ name = module.params['name']
+ state = module.params['state']
+ remote = module.params['remote']
+ method = module.params['method']
+ executable = module.params['executable']
+ binary = module.get_bin_path(executable, None)
+
+ global result
+ result = dict(
+ changed=False
+ )
+
+ # If the binary was not found, fail the operation
+ if not binary:
+ module.fail_json(msg="Executable '%s' was not found on the system." % executable, **result)
+
+ if state == 'present' and not flatpak_exists(module, binary, name, method):
+ install_flat(module, binary, remote, name, method)
+ elif state == 'absent' and flatpak_exists(module, binary, name, method):
+ uninstall_flat(module, binary, name, method)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/flatpak_remote.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/flatpak_remote.py
new file mode 100644
index 00000000..dbb211c2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/flatpak_remote.py
@@ -0,0 +1,234 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017 John Kwiatkoski (@JayKayy) <jkwiat40@gmail.com>
+# Copyright: (c) 2018 Alexander Bethke (@oolongbrothers) <oolongbrothers@gmx.net>
+# Copyright: (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+# ATTENTION CONTRIBUTORS!
+#
+# TL;DR: Run this module's integration tests manually before opening a pull request
+#
+# Long explanation:
+# The integration tests for this module are currently NOT run on the Ansible project's continuous
+# delivery pipeline. So please: When you make changes to this module, make sure that you run the
+# included integration tests manually for both Python 2 and Python 3:
+#
+# Python 2:
+# ansible-test integration -v --docker fedora28 --docker-privileged --allow-unsupported --python 2.7 flatpak_remote
+# Python 3:
+# ansible-test integration -v --docker fedora28 --docker-privileged --allow-unsupported --python 3.6 flatpak_remote
+#
+# Because of external dependencies, the current integration tests are somewhat too slow and brittle
+# to be included right now. I have plans to rewrite the integration tests based on a local flatpak
+# repository so that they can be included into the normal CI pipeline.
+# //oolongbrothers
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: flatpak_remote
+short_description: Manage flatpak repository remotes
+description:
+- Allows users to add or remove flatpak remotes.
+- The flatpak remotes concept is comparable to what is called repositories in other packaging
+ formats.
+- Currently, remote addition is only supported via I(flatpakrepo) file URLs.
+- Existing remotes will not be updated.
+- See the M(community.general.flatpak) module for managing flatpaks.
+author:
+- John Kwiatkoski (@JayKayy)
+- Alexander Bethke (@oolongbrothers)
+requirements:
+- flatpak
+options:
+ executable:
+ description:
+ - The path to the C(flatpak) executable to use.
+ - By default, this module looks for the C(flatpak) executable on the path.
+ type: str
+ default: flatpak
+ flatpakrepo_url:
+ description:
+ - The URL to the I(flatpakrepo) file representing the repository remote to add.
+ - When used with I(state=present), the flatpak remote specified under the I(flatpakrepo_url)
+ is added using the specified installation C(method).
+ - When used with I(state=absent), this is not required.
+ - Required when I(state=present).
+ type: str
+ method:
+ description:
+ - The installation method to use.
+ - Defines if the I(flatpak) is supposed to be installed globally for the whole C(system)
+ or only for the current C(user).
+ type: str
+ choices: [ system, user ]
+ default: system
+ name:
+ description:
+ - The desired name for the flatpak remote to be registered under on the managed host.
+ - When used with I(state=present), the remote will be added to the managed host under
+ the specified I(name).
+ - When used with I(state=absent) the remote with that name will be removed.
+ type: str
+ required: true
+ state:
+ description:
+ - Indicates the desired package state.
+ type: str
+ choices: [ absent, present ]
+ default: present
+'''
+
+EXAMPLES = r'''
+- name: Add the Gnome flatpak remote to the system installation
+ community.general.flatpak_remote:
+ name: gnome
+ state: present
+ flatpakrepo_url: https://sdk.gnome.org/gnome-apps.flatpakrepo
+
+- name: Add the flathub flatpak repository remote to the user installation
+ community.general.flatpak_remote:
+ name: flathub
+ state: present
+ flatpakrepo_url: https://dl.flathub.org/repo/flathub.flatpakrepo
+ method: user
+
+- name: Remove the Gnome flatpak remote from the user installation
+ community.general.flatpak_remote:
+ name: gnome
+ state: absent
+ method: user
+
+- name: Remove the flathub remote from the system installation
+ community.general.flatpak_remote:
+ name: flathub
+ state: absent
+'''
+
+RETURN = r'''
+command:
+ description: The exact flatpak command that was executed
+ returned: When a flatpak command has been executed
+ type: str
+ sample: "/usr/bin/flatpak remote-add --system flatpak-test https://dl.flathub.org/repo/flathub.flatpakrepo"
+msg:
+ description: Module error message
+ returned: failure
+ type: str
+ sample: "Executable '/usr/local/bin/flatpak' was not found on the system."
+rc:
+ description: Return code from flatpak binary
+ returned: When a flatpak command has been executed
+ type: int
+ sample: 0
+stderr:
+ description: Error output from flatpak binary
+ returned: When a flatpak command has been executed
+ type: str
+ sample: "error: GPG verification enabled, but no summary found (check that the configured URL in remote config is correct)\n"
+stdout:
+ description: Output from flatpak binary
+ returned: When a flatpak command has been executed
+ type: str
+ sample: "flathub\tFlathub\thttps://dl.flathub.org/repo/\t1\t\n"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes, to_native
+
+
+def add_remote(module, binary, name, flatpakrepo_url, method):
+ """Add a new remote."""
+ global result
+ command = [binary, "remote-add", "--{0}".format(method), name, flatpakrepo_url]
+ _flatpak_command(module, module.check_mode, command)
+ result['changed'] = True
+
+
+def remove_remote(module, binary, name, method):
+ """Remove an existing remote."""
+ global result
+ command = [binary, "remote-delete", "--{0}".format(method), "--force", name]
+ _flatpak_command(module, module.check_mode, command)
+ result['changed'] = True
+
+
+def remote_exists(module, binary, name, method):
+ """Check if the remote exists."""
+ command = [binary, "remote-list", "-d", "--{0}".format(method)]
+ # The query operation for the remote needs to be run even in check mode
+ output = _flatpak_command(module, False, command)
+ for line in output.splitlines():
+ listed_remote = line.split()
+ if len(listed_remote) == 0:
+ continue
+ if listed_remote[0] == to_native(name):
+ return True
+ return False
+
+
+def _flatpak_command(module, noop, command):
+ global result
+ result['command'] = ' '.join(command)
+ if noop:
+ result['rc'] = 0
+ return ""
+
+ result['rc'], result['stdout'], result['stderr'] = module.run_command(
+ command, check_rc=True
+ )
+ return result['stdout']
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ flatpakrepo_url=dict(type='str'),
+ method=dict(type='str', default='system',
+ choices=['user', 'system']),
+ state=dict(type='str', default="present",
+ choices=['absent', 'present']),
+ executable=dict(type='str', default="flatpak")
+ ),
+ # This module supports check mode
+ supports_check_mode=True,
+ )
+
+ name = module.params['name']
+ flatpakrepo_url = module.params['flatpakrepo_url']
+ method = module.params['method']
+ state = module.params['state']
+ executable = module.params['executable']
+ binary = module.get_bin_path(executable, None)
+
+ if flatpakrepo_url is None:
+ flatpakrepo_url = ''
+
+ global result
+ result = dict(
+ changed=False
+ )
+
+ # If the binary was not found, fail the operation
+ if not binary:
+ module.fail_json(msg="Executable '%s' was not found on the system." % executable, **result)
+
+ remote_already_exists = remote_exists(module, binary, to_bytes(name), method)
+
+ if state == 'present' and not remote_already_exists:
+ add_remote(module, binary, name, flatpakrepo_url, method)
+ elif state == 'absent' and remote_already_exists:
+ remove_remote(module, binary, name, method)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/homebrew.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/homebrew.py
new file mode 100644
index 00000000..21dea647
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/homebrew.py
@@ -0,0 +1,971 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Andrew Dunham <andrew@du.nham.ca>
+# (c) 2013, Daniel Jaouen <dcj24@cornell.edu>
+# (c) 2015, Indrajit Raychaudhuri <irc+code@indrajit.com>
+#
+# Based on macports (Jimmy Tang <jcftang@gmail.com>)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: homebrew
+author:
+ - "Indrajit Raychaudhuri (@indrajitr)"
+ - "Daniel Jaouen (@danieljaouen)"
+ - "Andrew Dunham (@andrew-d)"
+requirements:
+ - "python >= 2.6"
+ - homebrew must already be installed on the target system
+short_description: Package manager for Homebrew
+description:
+ - Manages Homebrew packages
+options:
+ name:
+ description:
+ - A list of names of packages to install/remove.
+ aliases: [ 'formula', 'package', 'pkg' ]
+ type: list
+ elements: str
+ path:
+ description:
+ - "A ':' separated list of paths to search for 'brew' executable.
+ Since a package (I(formula) in homebrew parlance) location is prefixed relative to the actual path of I(brew) command,
+ providing an alternative I(brew) path enables managing different set of packages in an alternative location in the system."
+ default: '/usr/local/bin:/opt/homebrew/bin'
+ type: path
+ state:
+ description:
+ - state of the package.
+ choices: [ 'absent', 'head', 'installed', 'latest', 'linked', 'present', 'removed', 'uninstalled', 'unlinked', 'upgraded' ]
+ default: present
+ type: str
+ update_homebrew:
+ description:
+ - update homebrew itself first.
+ type: bool
+ default: no
+ aliases: ['update-brew']
+ upgrade_all:
+ description:
+ - upgrade all homebrew packages.
+ type: bool
+ default: no
+ aliases: ['upgrade']
+ install_options:
+ description:
+ - options flags to install a package.
+ aliases: ['options']
+ type: list
+ elements: str
+ upgrade_options:
+ description:
+ - Option flags to upgrade.
+ type: list
+ elements: str
+ version_added: '0.2.0'
+notes:
+ - When used with a `loop:` each package will be processed individually,
+ it is much more efficient to pass the list directly to the `name` option.
+'''
+
+EXAMPLES = '''
+# Install formula foo with 'brew' in default path
+- community.general.homebrew:
+ name: foo
+ state: present
+
+# Install formula foo with 'brew' in alternate path C(/my/other/location/bin)
+- community.general.homebrew:
+ name: foo
+ path: /my/other/location/bin
+ state: present
+
+# Update homebrew first and install formula foo with 'brew' in default path
+- community.general.homebrew:
+ name: foo
+ state: present
+ update_homebrew: yes
+
+# Update homebrew first and upgrade formula foo to latest available with 'brew' in default path
+- community.general.homebrew:
+ name: foo
+ state: latest
+ update_homebrew: yes
+
+# Update homebrew and upgrade all packages
+- community.general.homebrew:
+ update_homebrew: yes
+ upgrade_all: yes
+
+# Miscellaneous other examples
+- community.general.homebrew:
+ name: foo
+ state: head
+
+- community.general.homebrew:
+ name: foo
+ state: linked
+
+- community.general.homebrew:
+ name: foo
+ state: absent
+
+- community.general.homebrew:
+ name: foo,bar
+ state: absent
+
+- community.general.homebrew:
+ name: foo
+ state: present
+ install_options: with-baz,enable-debug
+
+- name: Use ignored-pinned option while upgrading all
+ community.general.homebrew:
+ upgrade_all: yes
+ upgrade_options: ignored-pinned
+'''
+
+RETURN = '''
+msg:
+ description: if the cache was updated or not
+ returned: always
+ type: str
+ sample: "Changed: 0, Unchanged: 2"
+unchanged_pkgs:
+ description:
+ - List of package names which are unchanged after module run
+ returned: success
+ type: list
+ sample: ["awscli", "ag"]
+ version_added: '0.2.0'
+changed_pkgs:
+ description:
+ - List of package names which are changed after module run
+ returned: success
+ type: list
+ sample: ['git', 'git-cola']
+ version_added: '0.2.0'
+'''
+
+import os.path
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems, string_types
+
+
+# exceptions -------------------------------------------------------------- {{{
+class HomebrewException(Exception):
+ pass
+# /exceptions ------------------------------------------------------------- }}}
+
+
+# utils ------------------------------------------------------------------- {{{
+def _create_regex_group_complement(s):
+ lines = (line.strip() for line in s.split('\n') if line.strip())
+ chars = filter(None, (line.split('#')[0].strip() for line in lines))
+ group = r'[^' + r''.join(chars) + r']'
+ return re.compile(group)
+# /utils ------------------------------------------------------------------ }}}
+
+
+class Homebrew(object):
+ '''A class to manage Homebrew packages.'''
+
+ # class regexes ------------------------------------------------ {{{
+ VALID_PATH_CHARS = r'''
+ \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
+ \s # spaces
+ : # colons
+ {sep} # the OS-specific path separator
+ . # dots
+ \- # dashes
+ '''.format(sep=os.path.sep)
+
+ VALID_BREW_PATH_CHARS = r'''
+ \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
+ \s # spaces
+ {sep} # the OS-specific path separator
+ . # dots
+ \- # dashes
+ '''.format(sep=os.path.sep)
+
+ VALID_PACKAGE_CHARS = r'''
+ \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
+ . # dots
+ / # slash (for taps)
+ \+ # plusses
+ \- # dashes
+ : # colons (for URLs)
+ @ # at-sign
+ '''
+
+ INVALID_PATH_REGEX = _create_regex_group_complement(VALID_PATH_CHARS)
+ INVALID_BREW_PATH_REGEX = _create_regex_group_complement(VALID_BREW_PATH_CHARS)
+ INVALID_PACKAGE_REGEX = _create_regex_group_complement(VALID_PACKAGE_CHARS)
+ # /class regexes ----------------------------------------------- }}}
+
+ # class validations -------------------------------------------- {{{
+ @classmethod
+ def valid_path(cls, path):
+ '''
+ `path` must be one of:
+ - list of paths
+ - a string containing only:
+ - alphanumeric characters
+ - dashes
+ - dots
+ - spaces
+ - colons
+ - os.path.sep
+ '''
+
+ if isinstance(path, string_types):
+ return not cls.INVALID_PATH_REGEX.search(path)
+
+ try:
+ iter(path)
+ except TypeError:
+ return False
+ else:
+ paths = path
+ return all(cls.valid_brew_path(path_) for path_ in paths)
+
+ @classmethod
+ def valid_brew_path(cls, brew_path):
+ '''
+ `brew_path` must be one of:
+ - None
+ - a string containing only:
+ - alphanumeric characters
+ - dashes
+ - dots
+ - spaces
+ - os.path.sep
+ '''
+
+ if brew_path is None:
+ return True
+
+ return (
+ isinstance(brew_path, string_types)
+ and not cls.INVALID_BREW_PATH_REGEX.search(brew_path)
+ )
+
+ @classmethod
+ def valid_package(cls, package):
+ '''A valid package is either None or alphanumeric.'''
+
+ if package is None:
+ return True
+
+ return (
+ isinstance(package, string_types)
+ and not cls.INVALID_PACKAGE_REGEX.search(package)
+ )
+
+ @classmethod
+ def valid_state(cls, state):
+ '''
+ A valid state is one of:
+ - None
+ - installed
+ - upgraded
+ - head
+ - linked
+ - unlinked
+ - absent
+ '''
+
+ if state is None:
+ return True
+ else:
+ return (
+ isinstance(state, string_types)
+ and state.lower() in (
+ 'installed',
+ 'upgraded',
+ 'head',
+ 'linked',
+ 'unlinked',
+ 'absent',
+ )
+ )
+
+ @classmethod
+ def valid_module(cls, module):
+ '''A valid module is an instance of AnsibleModule.'''
+
+ return isinstance(module, AnsibleModule)
+
+ # /class validations ------------------------------------------- }}}
+
+ # class properties --------------------------------------------- {{{
+ @property
+ def module(self):
+ return self._module
+
+ @module.setter
+ def module(self, module):
+ if not self.valid_module(module):
+ self._module = None
+ self.failed = True
+ self.message = 'Invalid module: {0}.'.format(module)
+ raise HomebrewException(self.message)
+
+ else:
+ self._module = module
+ return module
+
+ @property
+ def path(self):
+ return self._path
+
+ @path.setter
+ def path(self, path):
+ if not self.valid_path(path):
+ self._path = []
+ self.failed = True
+ self.message = 'Invalid path: {0}.'.format(path)
+ raise HomebrewException(self.message)
+
+ else:
+ if isinstance(path, string_types):
+ self._path = path.split(':')
+ else:
+ self._path = path
+
+ return path
+
+ @property
+ def brew_path(self):
+ return self._brew_path
+
+ @brew_path.setter
+ def brew_path(self, brew_path):
+ if not self.valid_brew_path(brew_path):
+ self._brew_path = None
+ self.failed = True
+ self.message = 'Invalid brew_path: {0}.'.format(brew_path)
+ raise HomebrewException(self.message)
+
+ else:
+ self._brew_path = brew_path
+ return brew_path
+
+ @property
+ def params(self):
+ return self._params
+
+ @params.setter
+ def params(self, params):
+ self._params = self.module.params
+ return self._params
+
+ @property
+ def current_package(self):
+ return self._current_package
+
+ @current_package.setter
+ def current_package(self, package):
+ if not self.valid_package(package):
+ self._current_package = None
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(package)
+ raise HomebrewException(self.message)
+
+ else:
+ self._current_package = package
+ return package
+ # /class properties -------------------------------------------- }}}
+
+ def __init__(self, module, path, packages=None, state=None,
+ update_homebrew=False, upgrade_all=False,
+ install_options=None, upgrade_options=None):
+ if not install_options:
+ install_options = list()
+ if not upgrade_options:
+ upgrade_options = list()
+ self._setup_status_vars()
+ self._setup_instance_vars(module=module, path=path, packages=packages,
+ state=state, update_homebrew=update_homebrew,
+ upgrade_all=upgrade_all,
+ install_options=install_options,
+ upgrade_options=upgrade_options,)
+
+ self._prep()
+
+ # prep --------------------------------------------------------- {{{
+ def _setup_status_vars(self):
+ self.failed = False
+ self.changed = False
+ self.changed_count = 0
+ self.unchanged_count = 0
+ self.changed_pkgs = []
+ self.unchanged_pkgs = []
+ self.message = ''
+
+ def _setup_instance_vars(self, **kwargs):
+ for key, val in iteritems(kwargs):
+ setattr(self, key, val)
+
+ def _prep(self):
+ self._prep_brew_path()
+
+ def _prep_brew_path(self):
+ if not self.module:
+ self.brew_path = None
+ self.failed = True
+ self.message = 'AnsibleModule not set.'
+ raise HomebrewException(self.message)
+
+ self.brew_path = self.module.get_bin_path(
+ 'brew',
+ required=True,
+ opt_dirs=self.path,
+ )
+ if not self.brew_path:
+ self.brew_path = None
+ self.failed = True
+ self.message = 'Unable to locate homebrew executable.'
+ raise HomebrewException('Unable to locate homebrew executable.')
+
+ return self.brew_path
+
+ def _status(self):
+ return (self.failed, self.changed, self.message)
+ # /prep -------------------------------------------------------- }}}
+
+ def run(self):
+ try:
+ self._run()
+ except HomebrewException:
+ pass
+
+ if not self.failed and (self.changed_count + self.unchanged_count > 1):
+ self.message = "Changed: %d, Unchanged: %d" % (
+ self.changed_count,
+ self.unchanged_count,
+ )
+ (failed, changed, message) = self._status()
+
+ return (failed, changed, message)
+
+ # checks ------------------------------------------------------- {{{
+ def _current_package_is_installed(self):
+ if not self.valid_package(self.current_package):
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ cmd = [
+ "{brew_path}".format(brew_path=self.brew_path),
+ "info",
+ self.current_package,
+ ]
+ rc, out, err = self.module.run_command(cmd)
+ for line in out.split('\n'):
+ if (
+ re.search(r'Built from source', line)
+ or re.search(r'Poured from bottle', line)
+ ):
+ return True
+
+ return False
+
+ def _current_package_is_outdated(self):
+ if not self.valid_package(self.current_package):
+ return False
+
+ rc, out, err = self.module.run_command([
+ self.brew_path,
+ 'outdated',
+ self.current_package,
+ ])
+
+ return rc != 0
+
+ def _current_package_is_installed_from_head(self):
+ if not Homebrew.valid_package(self.current_package):
+ return False
+ elif not self._current_package_is_installed():
+ return False
+
+ rc, out, err = self.module.run_command([
+ self.brew_path,
+ 'info',
+ self.current_package,
+ ])
+
+ try:
+ version_info = [line for line in out.split('\n') if line][0]
+ except IndexError:
+ return False
+
+ return version_info.split(' ')[-1] == 'HEAD'
+ # /checks ------------------------------------------------------ }}}
+
+ # commands ----------------------------------------------------- {{{
+ def _run(self):
+ if self.update_homebrew:
+ self._update_homebrew()
+
+ if self.upgrade_all:
+ self._upgrade_all()
+
+ if self.packages:
+ if self.state == 'installed':
+ return self._install_packages()
+ elif self.state == 'upgraded':
+ return self._upgrade_packages()
+ elif self.state == 'head':
+ return self._install_packages()
+ elif self.state == 'linked':
+ return self._link_packages()
+ elif self.state == 'unlinked':
+ return self._unlink_packages()
+ elif self.state == 'absent':
+ return self._uninstall_packages()
+
+ # updated -------------------------------- {{{
+ def _update_homebrew(self):
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Homebrew would be updated.'
+ raise HomebrewException(self.message)
+
+ rc, out, err = self.module.run_command([
+ self.brew_path,
+ 'update',
+ ])
+ if rc == 0:
+ if out and isinstance(out, string_types):
+ already_updated = any(
+ re.search(r'Already up-to-date.', s.strip(), re.IGNORECASE)
+ for s in out.split('\n')
+ if s
+ )
+ if not already_updated:
+ self.changed = True
+ self.message = 'Homebrew updated successfully.'
+ else:
+ self.message = 'Homebrew already up-to-date.'
+
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewException(self.message)
+ # /updated ------------------------------- }}}
+
+ # _upgrade_all --------------------------- {{{
+ def _upgrade_all(self):
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Homebrew packages would be upgraded.'
+ raise HomebrewException(self.message)
+ cmd = [self.brew_path, 'upgrade'] + self.upgrade_options
+
+ rc, out, err = self.module.run_command(cmd)
+ if rc == 0:
+ if not out:
+ self.message = 'Homebrew packages already upgraded.'
+
+ else:
+ self.changed = True
+ self.message = 'Homebrew upgraded.'
+
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewException(self.message)
+ # /_upgrade_all -------------------------- }}}
+
+ # installed ------------------------------ {{{
+ def _install_current_package(self):
+ if not self.valid_package(self.current_package):
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if self._current_package_is_installed():
+ self.unchanged_count += 1
+ self.unchanged_pkgs.append(self.current_package)
+ self.message = 'Package already installed: {0}'.format(
+ self.current_package,
+ )
+ return True
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Package would be installed: {0}'.format(
+ self.current_package
+ )
+ raise HomebrewException(self.message)
+
+ if self.state == 'head':
+ head = '--HEAD'
+ else:
+ head = None
+
+ opts = (
+ [self.brew_path, 'install']
+ + self.install_options
+ + [self.current_package, head]
+ )
+ cmd = [opt for opt in opts if opt]
+ rc, out, err = self.module.run_command(cmd)
+
+ if self._current_package_is_installed():
+ self.changed_count += 1
+ self.changed_pkgs.append(self.current_package)
+ self.changed = True
+ self.message = 'Package installed: {0}'.format(self.current_package)
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewException(self.message)
+
+ def _install_packages(self):
+ for package in self.packages:
+ self.current_package = package
+ self._install_current_package()
+
+ return True
+ # /installed ----------------------------- }}}
+
+ # upgraded ------------------------------- {{{
+ def _upgrade_current_package(self):
+ command = 'upgrade'
+
+ if not self.valid_package(self.current_package):
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if not self._current_package_is_installed():
+ command = 'install'
+
+ if self._current_package_is_installed() and not self._current_package_is_outdated():
+ self.message = 'Package is already upgraded: {0}'.format(
+ self.current_package,
+ )
+ self.unchanged_count += 1
+ self.unchanged_pkgs.append(self.current_package)
+ return True
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Package would be upgraded: {0}'.format(
+ self.current_package
+ )
+ raise HomebrewException(self.message)
+
+ opts = (
+ [self.brew_path, command]
+ + self.install_options
+ + [self.current_package]
+ )
+ cmd = [opt for opt in opts if opt]
+ rc, out, err = self.module.run_command(cmd)
+
+ if self._current_package_is_installed() and not self._current_package_is_outdated():
+ self.changed_count += 1
+ self.changed_pkgs.append(self.current_package)
+ self.changed = True
+ self.message = 'Package upgraded: {0}'.format(self.current_package)
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewException(self.message)
+
+ def _upgrade_all_packages(self):
+ opts = (
+ [self.brew_path, 'upgrade']
+ + self.install_options
+ )
+ cmd = [opt for opt in opts if opt]
+ rc, out, err = self.module.run_command(cmd)
+
+ if rc == 0:
+ self.changed = True
+ self.message = 'All packages upgraded.'
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewException(self.message)
+
+ def _upgrade_packages(self):
+ if not self.packages:
+ self._upgrade_all_packages()
+ else:
+ for package in self.packages:
+ self.current_package = package
+ self._upgrade_current_package()
+ return True
+ # /upgraded ------------------------------ }}}
+
+ # uninstalled ---------------------------- {{{
+ def _uninstall_current_package(self):
+ if not self.valid_package(self.current_package):
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if not self._current_package_is_installed():
+ self.unchanged_count += 1
+ self.unchanged_pkgs.append(self.current_package)
+ self.message = 'Package already uninstalled: {0}'.format(
+ self.current_package,
+ )
+ return True
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Package would be uninstalled: {0}'.format(
+ self.current_package
+ )
+ raise HomebrewException(self.message)
+
+ opts = (
+ [self.brew_path, 'uninstall', '--force']
+ + self.install_options
+ + [self.current_package]
+ )
+ cmd = [opt for opt in opts if opt]
+ rc, out, err = self.module.run_command(cmd)
+
+ if not self._current_package_is_installed():
+ self.changed_count += 1
+ self.changed_pkgs.append(self.current_package)
+ self.changed = True
+ self.message = 'Package uninstalled: {0}'.format(self.current_package)
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewException(self.message)
+
+ def _uninstall_packages(self):
+ for package in self.packages:
+ self.current_package = package
+ self._uninstall_current_package()
+
+ return True
+ # /uninstalled ----------------------------- }}}
+
+ # linked --------------------------------- {{{
+ def _link_current_package(self):
+ if not self.valid_package(self.current_package):
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if not self._current_package_is_installed():
+ self.failed = True
+ self.message = 'Package not installed: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Package would be linked: {0}'.format(
+ self.current_package
+ )
+ raise HomebrewException(self.message)
+
+ opts = (
+ [self.brew_path, 'link']
+ + self.install_options
+ + [self.current_package]
+ )
+ cmd = [opt for opt in opts if opt]
+ rc, out, err = self.module.run_command(cmd)
+
+ if rc == 0:
+ self.changed_count += 1
+ self.changed_pkgs.append(self.current_package)
+ self.changed = True
+ self.message = 'Package linked: {0}'.format(self.current_package)
+
+ return True
+ else:
+ self.failed = True
+ self.message = 'Package could not be linked: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ def _link_packages(self):
+ for package in self.packages:
+ self.current_package = package
+ self._link_current_package()
+
+ return True
+ # /linked -------------------------------- }}}
+
+ # unlinked ------------------------------- {{{
+ def _unlink_current_package(self):
+ if not self.valid_package(self.current_package):
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if not self._current_package_is_installed():
+ self.failed = True
+ self.message = 'Package not installed: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Package would be unlinked: {0}'.format(
+ self.current_package
+ )
+ raise HomebrewException(self.message)
+
+ opts = (
+ [self.brew_path, 'unlink']
+ + self.install_options
+ + [self.current_package]
+ )
+ cmd = [opt for opt in opts if opt]
+ rc, out, err = self.module.run_command(cmd)
+
+ if rc == 0:
+ self.changed_count += 1
+ self.changed_pkgs.append(self.current_package)
+ self.changed = True
+ self.message = 'Package unlinked: {0}'.format(self.current_package)
+
+ return True
+ else:
+ self.failed = True
+ self.message = 'Package could not be unlinked: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ def _unlink_packages(self):
+ for package in self.packages:
+ self.current_package = package
+ self._unlink_current_package()
+
+ return True
+ # /unlinked ------------------------------ }}}
+ # /commands ---------------------------------------------------- }}}
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(
+ aliases=["pkg", "package", "formula"],
+ required=False,
+ type='list',
+ elements='str',
+ ),
+ path=dict(
+ default="/usr/local/bin:/opt/homebrew/bin",
+ required=False,
+ type='path',
+ ),
+ state=dict(
+ default="present",
+ choices=[
+ "present", "installed",
+ "latest", "upgraded", "head",
+ "linked", "unlinked",
+ "absent", "removed", "uninstalled",
+ ],
+ ),
+ update_homebrew=dict(
+ default=False,
+ aliases=["update-brew"],
+ type='bool',
+ ),
+ upgrade_all=dict(
+ default=False,
+ aliases=["upgrade"],
+ type='bool',
+ ),
+ install_options=dict(
+ default=None,
+ aliases=['options'],
+ type='list',
+ elements='str',
+ ),
+ upgrade_options=dict(
+ default=None,
+ type='list',
+ elements='str',
+ )
+ ),
+ supports_check_mode=True,
+ )
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ p = module.params
+
+ if p['name']:
+ packages = p['name']
+ else:
+ packages = None
+
+ path = p['path']
+ if path:
+ path = path.split(':')
+
+ state = p['state']
+ if state in ('present', 'installed'):
+ state = 'installed'
+ if state in ('head', ):
+ state = 'head'
+ if state in ('latest', 'upgraded'):
+ state = 'upgraded'
+ if state == 'linked':
+ state = 'linked'
+ if state == 'unlinked':
+ state = 'unlinked'
+ if state in ('absent', 'removed', 'uninstalled'):
+ state = 'absent'
+
+ update_homebrew = p['update_homebrew']
+ if not update_homebrew:
+ module.run_command_environ_update.update(
+ dict(HOMEBREW_NO_AUTO_UPDATE="True")
+ )
+ upgrade_all = p['upgrade_all']
+ p['install_options'] = p['install_options'] or []
+ install_options = ['--{0}'.format(install_option)
+ for install_option in p['install_options']]
+
+ p['upgrade_options'] = p['upgrade_options'] or []
+ upgrade_options = ['--{0}'.format(upgrade_option)
+ for upgrade_option in p['upgrade_options']]
+ brew = Homebrew(module=module, path=path, packages=packages,
+ state=state, update_homebrew=update_homebrew,
+ upgrade_all=upgrade_all, install_options=install_options,
+ upgrade_options=upgrade_options)
+ (failed, changed, message) = brew.run()
+ changed_pkgs = brew.changed_pkgs
+ unchanged_pkgs = brew.unchanged_pkgs
+
+ if failed:
+ module.fail_json(msg=message)
+ module.exit_json(
+ changed=changed,
+ msg=message,
+ unchanged_pkgs=unchanged_pkgs,
+ changed_pkgs=changed_pkgs
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/homebrew_cask.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/homebrew_cask.py
new file mode 100644
index 00000000..feb1ba68
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/homebrew_cask.py
@@ -0,0 +1,875 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Daniel Jaouen <dcj24@cornell.edu>
+# Copyright: (c) 2016, Indrajit Raychaudhuri <irc+code@indrajit.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: homebrew_cask
+author:
+- "Indrajit Raychaudhuri (@indrajitr)"
+- "Daniel Jaouen (@danieljaouen)"
+- "Enric Lluelles (@enriclluelles)"
+requirements:
+- "python >= 2.6"
+short_description: Install and uninstall homebrew casks
+description:
+- Manages Homebrew casks.
+options:
+ name:
+ description:
+ - Name of cask to install or remove.
+ aliases: [ 'cask', 'package', 'pkg' ]
+ type: list
+ elements: str
+ path:
+ description:
+ - "':' separated list of paths to search for 'brew' executable."
+ default: '/usr/local/bin:/opt/homebrew/bin'
+ type: path
+ state:
+ description:
+ - State of the cask.
+ choices: [ 'absent', 'installed', 'latest', 'present', 'removed', 'uninstalled', 'upgraded' ]
+ default: present
+ type: str
+ sudo_password:
+ description:
+ - The sudo password to be passed to SUDO_ASKPASS.
+ required: false
+ type: str
+ update_homebrew:
+ description:
+ - Update homebrew itself first.
+ - Note that C(brew cask update) is a synonym for C(brew update).
+ type: bool
+ default: no
+ aliases: [ 'update-brew' ]
+ install_options:
+ description:
+ - Options flags to install a package.
+ aliases: [ 'options' ]
+ type: list
+ elements: str
+ accept_external_apps:
+ description:
+ - Allow external apps.
+ type: bool
+ default: no
+ upgrade_all:
+ description:
+ - Upgrade all casks.
+ - Mutually exclusive with C(upgraded) state.
+ type: bool
+ default: no
+ aliases: [ 'upgrade' ]
+ greedy:
+ description:
+ - Upgrade casks that auto update.
+ - Passes --greedy to brew cask outdated when checking
+ if an installed cask has a newer version available.
+ type: bool
+ default: no
+'''
+EXAMPLES = '''
+- name: Install cask
+ community.general.homebrew_cask:
+ name: alfred
+ state: present
+
+- name: Remove cask
+ community.general.homebrew_cask:
+ name: alfred
+ state: absent
+
+- name: Install cask with install options
+ community.general.homebrew_cask:
+ name: alfred
+ state: present
+ install_options: 'appdir=/Applications'
+
+- name: Install cask with install options
+ community.general.homebrew_cask:
+ name: alfred
+ state: present
+ install_options: 'debug,appdir=/Applications'
+
+- name: Allow external app
+ community.general.homebrew_cask:
+ name: alfred
+ state: present
+ accept_external_apps: True
+
+- name: Remove cask with force option
+ community.general.homebrew_cask:
+ name: alfred
+ state: absent
+ install_options: force
+
+- name: Upgrade all casks
+ community.general.homebrew_cask:
+ upgrade_all: true
+
+- name: Upgrade given cask with force option
+ community.general.homebrew_cask:
+ name: alfred
+ state: upgraded
+ install_options: force
+
+- name: Upgrade cask with greedy option
+ community.general.homebrew_cask:
+ name: 1password
+ state: upgraded
+ greedy: True
+
+- name: Using sudo password for installing cask
+ community.general.homebrew_cask:
+ name: wireshark
+ state: present
+ sudo_password: "{{ ansible_become_pass }}"
+'''
+
+import os
+import re
+import tempfile
+from distutils import version
+
+from ansible.module_utils._text import to_bytes
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems, string_types
+
+
+# exceptions -------------------------------------------------------------- {{{
+class HomebrewCaskException(Exception):
+ pass
+# /exceptions ------------------------------------------------------------- }}}
+
+
+# utils ------------------------------------------------------------------- {{{
+def _create_regex_group_complement(s):
+ lines = (line.strip() for line in s.split('\n') if line.strip())
+ chars = filter(None, (line.split('#')[0].strip() for line in lines))
+ group = r'[^' + r''.join(chars) + r']'
+ return re.compile(group)
+# /utils ------------------------------------------------------------------ }}}
+
+
+class HomebrewCask(object):
+ '''A class to manage Homebrew casks.'''
+
+ # class regexes ------------------------------------------------ {{{
+ VALID_PATH_CHARS = r'''
+ \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
+ \s # spaces
+ : # colons
+ {sep} # the OS-specific path separator
+ . # dots
+ \- # dashes
+ '''.format(sep=os.path.sep)
+
+ VALID_BREW_PATH_CHARS = r'''
+ \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
+ \s # spaces
+ {sep} # the OS-specific path separator
+ . # dots
+ \- # dashes
+ '''.format(sep=os.path.sep)
+
+ VALID_CASK_CHARS = r'''
+ \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
+ . # dots
+ / # slash (for taps)
+ \- # dashes
+ @ # at symbol
+ '''
+
+ INVALID_PATH_REGEX = _create_regex_group_complement(VALID_PATH_CHARS)
+ INVALID_BREW_PATH_REGEX = _create_regex_group_complement(VALID_BREW_PATH_CHARS)
+ INVALID_CASK_REGEX = _create_regex_group_complement(VALID_CASK_CHARS)
+ # /class regexes ----------------------------------------------- }}}
+
+ # class validations -------------------------------------------- {{{
+ @classmethod
+ def valid_path(cls, path):
+ '''
+ `path` must be one of:
+ - list of paths
+ - a string containing only:
+ - alphanumeric characters
+ - dashes
+ - dots
+ - spaces
+ - colons
+ - os.path.sep
+ '''
+
+ if isinstance(path, (string_types)):
+ return not cls.INVALID_PATH_REGEX.search(path)
+
+ try:
+ iter(path)
+ except TypeError:
+ return False
+ else:
+ paths = path
+ return all(cls.valid_brew_path(path_) for path_ in paths)
+
+ @classmethod
+ def valid_brew_path(cls, brew_path):
+ '''
+ `brew_path` must be one of:
+ - None
+ - a string containing only:
+ - alphanumeric characters
+ - dashes
+ - dots
+ - spaces
+ - os.path.sep
+ '''
+
+ if brew_path is None:
+ return True
+
+ return (
+ isinstance(brew_path, string_types)
+ and not cls.INVALID_BREW_PATH_REGEX.search(brew_path)
+ )
+
+ @classmethod
+ def valid_cask(cls, cask):
+ '''A valid cask is either None or alphanumeric + backslashes.'''
+
+ if cask is None:
+ return True
+
+ return (
+ isinstance(cask, string_types)
+ and not cls.INVALID_CASK_REGEX.search(cask)
+ )
+
+ @classmethod
+ def valid_state(cls, state):
+ '''
+ A valid state is one of:
+ - installed
+ - absent
+ '''
+
+ if state is None:
+ return True
+ else:
+ return (
+ isinstance(state, string_types)
+ and state.lower() in (
+ 'installed',
+ 'absent',
+ )
+ )
+
+ @classmethod
+ def valid_module(cls, module):
+ '''A valid module is an instance of AnsibleModule.'''
+
+ return isinstance(module, AnsibleModule)
+ # /class validations ------------------------------------------- }}}
+
+ # class properties --------------------------------------------- {{{
+ @property
+ def module(self):
+ return self._module
+
+ @module.setter
+ def module(self, module):
+ if not self.valid_module(module):
+ self._module = None
+ self.failed = True
+ self.message = 'Invalid module: {0}.'.format(module)
+ raise HomebrewCaskException(self.message)
+
+ else:
+ self._module = module
+ return module
+
+ @property
+ def path(self):
+ return self._path
+
+ @path.setter
+ def path(self, path):
+ if not self.valid_path(path):
+ self._path = []
+ self.failed = True
+ self.message = 'Invalid path: {0}.'.format(path)
+ raise HomebrewCaskException(self.message)
+
+ else:
+ if isinstance(path, string_types):
+ self._path = path.split(':')
+ else:
+ self._path = path
+
+ return path
+
+ @property
+ def brew_path(self):
+ return self._brew_path
+
+ @brew_path.setter
+ def brew_path(self, brew_path):
+ if not self.valid_brew_path(brew_path):
+ self._brew_path = None
+ self.failed = True
+ self.message = 'Invalid brew_path: {0}.'.format(brew_path)
+ raise HomebrewCaskException(self.message)
+
+ else:
+ self._brew_path = brew_path
+ return brew_path
+
+ @property
+ def params(self):
+ return self._params
+
+ @params.setter
+ def params(self, params):
+ self._params = self.module.params
+ return self._params
+
+ @property
+ def current_cask(self):
+ return self._current_cask
+
+ @current_cask.setter
+ def current_cask(self, cask):
+ if not self.valid_cask(cask):
+ self._current_cask = None
+ self.failed = True
+ self.message = 'Invalid cask: {0}.'.format(cask)
+ raise HomebrewCaskException(self.message)
+
+ else:
+ self._current_cask = cask
+ return cask
+
+ @property
+ def brew_version(self):
+ try:
+ return self._brew_version
+ except AttributeError:
+ return None
+
+ @brew_version.setter
+ def brew_version(self, brew_version):
+ self._brew_version = brew_version
+
+ # /class properties -------------------------------------------- }}}
+
+ def __init__(self, module, path=path, casks=None, state=None,
+ sudo_password=None, update_homebrew=False,
+ install_options=None, accept_external_apps=False,
+ upgrade_all=False, greedy=False):
+ if not install_options:
+ install_options = list()
+ self._setup_status_vars()
+ self._setup_instance_vars(module=module, path=path, casks=casks,
+ state=state, sudo_password=sudo_password,
+ update_homebrew=update_homebrew,
+ install_options=install_options,
+ accept_external_apps=accept_external_apps,
+ upgrade_all=upgrade_all,
+ greedy=greedy, )
+
+ self._prep()
+
+ # prep --------------------------------------------------------- {{{
+ def _setup_status_vars(self):
+ self.failed = False
+ self.changed = False
+ self.changed_count = 0
+ self.unchanged_count = 0
+ self.message = ''
+
+ def _setup_instance_vars(self, **kwargs):
+ for key, val in iteritems(kwargs):
+ setattr(self, key, val)
+
+ def _prep(self):
+ self._prep_brew_path()
+
+ def _prep_brew_path(self):
+ if not self.module:
+ self.brew_path = None
+ self.failed = True
+ self.message = 'AnsibleModule not set.'
+ raise HomebrewCaskException(self.message)
+
+ self.brew_path = self.module.get_bin_path(
+ 'brew',
+ required=True,
+ opt_dirs=self.path,
+ )
+ if not self.brew_path:
+ self.brew_path = None
+ self.failed = True
+ self.message = 'Unable to locate homebrew executable.'
+ raise HomebrewCaskException('Unable to locate homebrew executable.')
+
+ return self.brew_path
+
+ def _status(self):
+ return (self.failed, self.changed, self.message)
+ # /prep -------------------------------------------------------- }}}
+
+ def run(self):
+ try:
+ self._run()
+ except HomebrewCaskException:
+ pass
+
+ if not self.failed and (self.changed_count + self.unchanged_count > 1):
+ self.message = "Changed: %d, Unchanged: %d" % (
+ self.changed_count,
+ self.unchanged_count,
+ )
+ (failed, changed, message) = self._status()
+
+ return (failed, changed, message)
+
+ # checks ------------------------------------------------------- {{{
+ def _current_cask_is_outdated(self):
+ if not self.valid_cask(self.current_cask):
+ return False
+
+ if self._brew_cask_command_is_deprecated():
+ base_opts = [self.brew_path, 'outdated', '--cask']
+ else:
+ base_opts = [self.brew_path, 'cask', 'outdated']
+
+ cask_is_outdated_command = base_opts + (['--greedy'] if self.greedy else []) + [self.current_cask]
+
+ rc, out, err = self.module.run_command(cask_is_outdated_command)
+
+ return out != ""
+
+ def _current_cask_is_installed(self):
+ if not self.valid_cask(self.current_cask):
+ self.failed = True
+ self.message = 'Invalid cask: {0}.'.format(self.current_cask)
+ raise HomebrewCaskException(self.message)
+
+ if self._brew_cask_command_is_deprecated():
+ base_opts = [self.brew_path, "list", "--cask"]
+ else:
+ base_opts = [self.brew_path, "cask", "list"]
+
+ cmd = base_opts + [self.current_cask]
+ rc, out, err = self.module.run_command(cmd)
+
+ if rc == 0:
+ return True
+ else:
+ return False
+
+ def _get_brew_version(self):
+ if self.brew_version:
+ return self.brew_version
+
+ cmd = [self.brew_path, '--version']
+
+ rc, out, err = self.module.run_command(cmd, check_rc=True)
+
+ # get version string from first line of "brew --version" output
+ version = out.split('\n')[0].split(' ')[1]
+ self.brew_version = version
+ return self.brew_version
+
+ def _brew_cask_command_is_deprecated(self):
+ # The `brew cask` replacements were fully available in 2.6.0 (https://brew.sh/2020/12/01/homebrew-2.6.0/)
+ return version.LooseVersion(self._get_brew_version()) >= version.LooseVersion('2.6.0')
+ # /checks ------------------------------------------------------ }}}
+
+ # commands ----------------------------------------------------- {{{
+ def _run(self):
+ if self.upgrade_all:
+ return self._upgrade_all()
+
+ if self.casks:
+ if self.state == 'installed':
+ return self._install_casks()
+ elif self.state == 'upgraded':
+ return self._upgrade_casks()
+ elif self.state == 'absent':
+ return self._uninstall_casks()
+
+ self.failed = True
+ self.message = "You must select a cask to install."
+ raise HomebrewCaskException(self.message)
+
+ # sudo_password fix ---------------------- {{{
+ def _run_command_with_sudo_password(self, cmd):
+ rc, out, err = '', '', ''
+
+ with tempfile.NamedTemporaryFile() as sudo_askpass_file:
+ sudo_askpass_file.write(b"#!/bin/sh\n\necho '%s'\n" % to_bytes(self.sudo_password))
+ os.chmod(sudo_askpass_file.name, 0o700)
+ sudo_askpass_file.file.close()
+
+ rc, out, err = self.module.run_command(
+ cmd,
+ environ_update={'SUDO_ASKPASS': sudo_askpass_file.name}
+ )
+
+ self.module.add_cleanup_file(sudo_askpass_file.name)
+
+ return (rc, out, err)
+ # /sudo_password fix --------------------- }}}
+
+ # updated -------------------------------- {{{
+ def _update_homebrew(self):
+ rc, out, err = self.module.run_command([
+ self.brew_path,
+ 'update',
+ ])
+ if rc == 0:
+ if out and isinstance(out, string_types):
+ already_updated = any(
+ re.search(r'Already up-to-date.', s.strip(), re.IGNORECASE)
+ for s in out.split('\n')
+ if s
+ )
+ if not already_updated:
+ self.changed = True
+ self.message = 'Homebrew updated successfully.'
+ else:
+ self.message = 'Homebrew already up-to-date.'
+
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewCaskException(self.message)
+ # /updated ------------------------------- }}}
+
+ # _upgrade_all --------------------------- {{{
+ def _upgrade_all(self):
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Casks would be upgraded.'
+ raise HomebrewCaskException(self.message)
+
+ if self._brew_cask_command_is_deprecated():
+ cmd = [self.brew_path, 'upgrade', '--cask']
+ else:
+ cmd = [self.brew_path, 'cask', 'upgrade']
+
+ rc, out, err = '', '', ''
+
+ if self.sudo_password:
+ rc, out, err = self._run_command_with_sudo_password(cmd)
+ else:
+ rc, out, err = self.module.run_command(cmd)
+
+ if rc == 0:
+ if re.search(r'==> No Casks to upgrade', out.strip(), re.IGNORECASE):
+ self.message = 'Homebrew casks already upgraded.'
+
+ else:
+ self.changed = True
+ self.message = 'Homebrew casks upgraded.'
+
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewCaskException(self.message)
+ # /_upgrade_all -------------------------- }}}
+
+ # installed ------------------------------ {{{
+ def _install_current_cask(self):
+ if not self.valid_cask(self.current_cask):
+ self.failed = True
+ self.message = 'Invalid cask: {0}.'.format(self.current_cask)
+ raise HomebrewCaskException(self.message)
+
+ if self._current_cask_is_installed():
+ self.unchanged_count += 1
+ self.message = 'Cask already installed: {0}'.format(
+ self.current_cask,
+ )
+ return True
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Cask would be installed: {0}'.format(
+ self.current_cask
+ )
+ raise HomebrewCaskException(self.message)
+
+ if self._brew_cask_command_is_deprecated():
+ base_opts = [self.brew_path, 'install', '--cask']
+ else:
+ base_opts = [self.brew_path, 'cask', 'install']
+
+ opts = base_opts + [self.current_cask] + self.install_options
+
+ cmd = [opt for opt in opts if opt]
+
+ rc, out, err = '', '', ''
+
+ if self.sudo_password:
+ rc, out, err = self._run_command_with_sudo_password(cmd)
+ else:
+ rc, out, err = self.module.run_command(cmd)
+
+ if self._current_cask_is_installed():
+ self.changed_count += 1
+ self.changed = True
+ self.message = 'Cask installed: {0}'.format(self.current_cask)
+ return True
+ elif self.accept_external_apps and re.search(r"Error: It seems there is already an App at", err):
+ self.unchanged_count += 1
+ self.message = 'Cask already installed: {0}'.format(
+ self.current_cask,
+ )
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewCaskException(self.message)
+
+ def _install_casks(self):
+ for cask in self.casks:
+ self.current_cask = cask
+ self._install_current_cask()
+
+ return True
+ # /installed ----------------------------- }}}
+
+ # upgraded ------------------------------- {{{
+ def _upgrade_current_cask(self):
+ command = 'upgrade'
+
+ if not self.valid_cask(self.current_cask):
+ self.failed = True
+ self.message = 'Invalid cask: {0}.'.format(self.current_cask)
+ raise HomebrewCaskException(self.message)
+
+ if not self._current_cask_is_installed():
+ command = 'install'
+
+ if self._current_cask_is_installed() and not self._current_cask_is_outdated():
+ self.message = 'Cask is already upgraded: {0}'.format(
+ self.current_cask,
+ )
+ self.unchanged_count += 1
+ return True
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Cask would be upgraded: {0}'.format(
+ self.current_cask
+ )
+ raise HomebrewCaskException(self.message)
+
+ if self._brew_cask_command_is_deprecated():
+ base_opts = [self.brew_path, command, '--cask']
+ else:
+ base_opts = [self.brew_path, 'cask', command]
+
+ opts = base_opts + self.install_options + [self.current_cask]
+
+ cmd = [opt for opt in opts if opt]
+
+ rc, out, err = '', '', ''
+
+ if self.sudo_password:
+ rc, out, err = self._run_command_with_sudo_password(cmd)
+ else:
+ rc, out, err = self.module.run_command(cmd)
+
+ if self._current_cask_is_installed() and not self._current_cask_is_outdated():
+ self.changed_count += 1
+ self.changed = True
+ self.message = 'Cask upgraded: {0}'.format(self.current_cask)
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewCaskException(self.message)
+
+ def _upgrade_casks(self):
+ for cask in self.casks:
+ self.current_cask = cask
+ self._upgrade_current_cask()
+
+ return True
+ # /upgraded ------------------------------ }}}
+
+ # uninstalled ---------------------------- {{{
+ def _uninstall_current_cask(self):
+ if not self.valid_cask(self.current_cask):
+ self.failed = True
+ self.message = 'Invalid cask: {0}.'.format(self.current_cask)
+ raise HomebrewCaskException(self.message)
+
+ if not self._current_cask_is_installed():
+ self.unchanged_count += 1
+ self.message = 'Cask already uninstalled: {0}'.format(
+ self.current_cask,
+ )
+ return True
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Cask would be uninstalled: {0}'.format(
+ self.current_cask
+ )
+ raise HomebrewCaskException(self.message)
+
+ if self._brew_cask_command_is_deprecated():
+ base_opts = [self.brew_path, 'uninstall', '--cask']
+ else:
+ base_opts = [self.brew_path, 'cask', 'uninstall']
+
+ opts = base_opts + [self.current_cask] + self.install_options
+
+ cmd = [opt for opt in opts if opt]
+
+ rc, out, err = '', '', ''
+
+ if self.sudo_password:
+ rc, out, err = self._run_command_with_sudo_password(cmd)
+ else:
+ rc, out, err = self.module.run_command(cmd)
+
+ if not self._current_cask_is_installed():
+ self.changed_count += 1
+ self.changed = True
+ self.message = 'Cask uninstalled: {0}'.format(self.current_cask)
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewCaskException(self.message)
+
+ def _uninstall_casks(self):
+ for cask in self.casks:
+ self.current_cask = cask
+ self._uninstall_current_cask()
+
+ return True
+ # /uninstalled --------------------------- }}}
+ # /commands ---------------------------------------------------- }}}
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(
+ aliases=["pkg", "package", "cask"],
+ required=False,
+ type='list',
+ elements='str',
+ ),
+ path=dict(
+ default="/usr/local/bin:/opt/homebrew/bin",
+ required=False,
+ type='path',
+ ),
+ state=dict(
+ default="present",
+ choices=[
+ "present", "installed",
+ "latest", "upgraded",
+ "absent", "removed", "uninstalled",
+ ],
+ ),
+ sudo_password=dict(
+ type="str",
+ required=False,
+ no_log=True,
+ ),
+ update_homebrew=dict(
+ default=False,
+ aliases=["update-brew"],
+ type='bool',
+ ),
+ install_options=dict(
+ default=None,
+ aliases=['options'],
+ type='list',
+ elements='str',
+ ),
+ accept_external_apps=dict(
+ default=False,
+ type='bool',
+ ),
+ upgrade_all=dict(
+ default=False,
+ aliases=["upgrade"],
+ type='bool',
+ ),
+ greedy=dict(
+ default=False,
+ type='bool',
+ ),
+ ),
+ supports_check_mode=True,
+ )
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ p = module.params
+
+ if p['name']:
+ casks = p['name']
+ else:
+ casks = None
+
+ path = p['path']
+ if path:
+ path = path.split(':')
+
+ state = p['state']
+ if state in ('present', 'installed'):
+ state = 'installed'
+ if state in ('latest', 'upgraded'):
+ state = 'upgraded'
+ if state in ('absent', 'removed', 'uninstalled'):
+ state = 'absent'
+
+ sudo_password = p['sudo_password']
+
+ update_homebrew = p['update_homebrew']
+ upgrade_all = p['upgrade_all']
+ greedy = p['greedy']
+ p['install_options'] = p['install_options'] or []
+ install_options = ['--{0}'.format(install_option)
+ for install_option in p['install_options']]
+
+ accept_external_apps = p['accept_external_apps']
+
+ brew_cask = HomebrewCask(module=module, path=path, casks=casks,
+ state=state, sudo_password=sudo_password,
+ update_homebrew=update_homebrew,
+ install_options=install_options,
+ accept_external_apps=accept_external_apps,
+ upgrade_all=upgrade_all,
+ greedy=greedy,
+ )
+ (failed, changed, message) = brew_cask.run()
+ if failed:
+ module.fail_json(msg=message)
+ else:
+ module.exit_json(changed=changed, msg=message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/homebrew_tap.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/homebrew_tap.py
new file mode 100644
index 00000000..d31da485
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/homebrew_tap.py
@@ -0,0 +1,256 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Daniel Jaouen <dcj24@cornell.edu>
+# (c) 2016, Indrajit Raychaudhuri <irc+code@indrajit.com>
+#
+# Based on homebrew (Andrew Dunham <andrew@du.nham.ca>)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: homebrew_tap
+author:
+ - "Indrajit Raychaudhuri (@indrajitr)"
+ - "Daniel Jaouen (@danieljaouen)"
+short_description: Tap a Homebrew repository.
+description:
+ - Tap external Homebrew repositories.
+options:
+ name:
+ description:
+ - The GitHub user/organization repository to tap.
+ required: true
+ aliases: ['tap']
+ type: list
+ elements: str
+ url:
+ description:
+ - The optional git URL of the repository to tap. The URL is not
+ assumed to be on GitHub, and the protocol doesn't have to be HTTP.
+ Any location and protocol that git can handle is fine.
+ - I(name) option may not be a list of multiple taps (but a single
+ tap instead) when this option is provided.
+ required: false
+ type: str
+ state:
+ description:
+ - state of the repository.
+ choices: [ 'present', 'absent' ]
+ required: false
+ default: 'present'
+ type: str
+requirements: [ homebrew ]
+'''
+
+EXAMPLES = '''
+- name: Tap a Homebrew repository, state present
+ community.general.homebrew_tap:
+ name: homebrew/dupes
+
+- name: Tap a Homebrew repository, state absent
+ community.general.homebrew_tap:
+ name: homebrew/dupes
+ state: absent
+
+- name: Tap a Homebrew repository, state present
+ community.general.homebrew_tap:
+ name: homebrew/dupes,homebrew/science
+ state: present
+
+- name: Tap a Homebrew repository using url, state present
+ community.general.homebrew_tap:
+ name: telemachus/brew
+ url: 'https://bitbucket.org/telemachus/brew'
+'''
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def a_valid_tap(tap):
+ '''Returns True if the tap is valid.'''
+ regex = re.compile(r'^([\w-]+)/(homebrew-)?([\w-]+)$')
+ return regex.match(tap)
+
+
+def already_tapped(module, brew_path, tap):
+ '''Returns True if already tapped.'''
+
+ rc, out, err = module.run_command([
+ brew_path,
+ 'tap',
+ ])
+
+ taps = [tap_.strip().lower() for tap_ in out.split('\n') if tap_]
+ tap_name = re.sub('homebrew-', '', tap.lower())
+
+ return tap_name in taps
+
+
+def add_tap(module, brew_path, tap, url=None):
+ '''Adds a single tap.'''
+ failed, changed, msg = False, False, ''
+
+ if not a_valid_tap(tap):
+ failed = True
+ msg = 'not a valid tap: %s' % tap
+
+ elif not already_tapped(module, brew_path, tap):
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ rc, out, err = module.run_command([
+ brew_path,
+ 'tap',
+ tap,
+ url,
+ ])
+ if rc == 0:
+ changed = True
+ msg = 'successfully tapped: %s' % tap
+ else:
+ failed = True
+ msg = 'failed to tap: %s' % tap
+
+ else:
+ msg = 'already tapped: %s' % tap
+
+ return (failed, changed, msg)
+
+
+def add_taps(module, brew_path, taps):
+ '''Adds one or more taps.'''
+ failed, unchanged, added, msg = False, 0, 0, ''
+
+ for tap in taps:
+ (failed, changed, msg) = add_tap(module, brew_path, tap)
+ if failed:
+ break
+ if changed:
+ added += 1
+ else:
+ unchanged += 1
+
+ if failed:
+ msg = 'added: %d, unchanged: %d, error: ' + msg
+ msg = msg % (added, unchanged)
+ elif added:
+ changed = True
+ msg = 'added: %d, unchanged: %d' % (added, unchanged)
+ else:
+ msg = 'added: %d, unchanged: %d' % (added, unchanged)
+
+ return (failed, changed, msg)
+
+
+def remove_tap(module, brew_path, tap):
+ '''Removes a single tap.'''
+ failed, changed, msg = False, False, ''
+
+ if not a_valid_tap(tap):
+ failed = True
+ msg = 'not a valid tap: %s' % tap
+
+ elif already_tapped(module, brew_path, tap):
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ rc, out, err = module.run_command([
+ brew_path,
+ 'untap',
+ tap,
+ ])
+ if not already_tapped(module, brew_path, tap):
+ changed = True
+ msg = 'successfully untapped: %s' % tap
+ else:
+ failed = True
+ msg = 'failed to untap: %s' % tap
+
+ else:
+ msg = 'already untapped: %s' % tap
+
+ return (failed, changed, msg)
+
+
+def remove_taps(module, brew_path, taps):
+ '''Removes one or more taps.'''
+ failed, unchanged, removed, msg = False, 0, 0, ''
+
+ for tap in taps:
+ (failed, changed, msg) = remove_tap(module, brew_path, tap)
+ if failed:
+ break
+ if changed:
+ removed += 1
+ else:
+ unchanged += 1
+
+ if failed:
+ msg = 'removed: %d, unchanged: %d, error: ' + msg
+ msg = msg % (removed, unchanged)
+ elif removed:
+ changed = True
+ msg = 'removed: %d, unchanged: %d' % (removed, unchanged)
+ else:
+ msg = 'removed: %d, unchanged: %d' % (removed, unchanged)
+
+ return (failed, changed, msg)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(aliases=['tap'], type='list', required=True, elements='str'),
+ url=dict(default=None, required=False),
+ state=dict(default='present', choices=['present', 'absent']),
+ ),
+ supports_check_mode=True,
+ )
+
+ brew_path = module.get_bin_path(
+ 'brew',
+ required=True,
+ opt_dirs=['/usr/local/bin', '/opt/homebrew/bin']
+ )
+
+ taps = module.params['name']
+ url = module.params['url']
+
+ if module.params['state'] == 'present':
+ if url is None:
+ # No tap URL provided explicitly, continue with bulk addition
+ # of all the taps.
+ failed, changed, msg = add_taps(module, brew_path, taps)
+ else:
+ # When an tap URL is provided explicitly, we allow adding
+ # *single* tap only. Validate and proceed to add single tap.
+ if len(taps) > 1:
+ msg = "List of multiple taps may not be provided with 'url' option."
+ module.fail_json(msg=msg)
+ else:
+ failed, changed, msg = add_tap(module, brew_path, taps[0], url)
+
+ if failed:
+ module.fail_json(msg=msg)
+ else:
+ module.exit_json(changed=changed, msg=msg)
+
+ elif module.params['state'] == 'absent':
+ failed, changed, msg = remove_taps(module, brew_path, taps)
+
+ if failed:
+ module.fail_json(msg=msg)
+ else:
+ module.exit_json(changed=changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/installp.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/installp.py
new file mode 100644
index 00000000..af7a950a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/installp.py
@@ -0,0 +1,292 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Kairo Araujo <kairo@kairo.eti.br>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: installp
+author:
+- Kairo Araujo (@kairoaraujo)
+short_description: Manage packages on AIX
+description:
+ - Manage packages using 'installp' on AIX
+options:
+ accept_license:
+ description:
+ - Whether to accept the license for the package(s).
+ type: bool
+ default: no
+ name:
+ description:
+ - One or more packages to install or remove.
+ - Use C(all) to install all packages available on informed C(repository_path).
+ type: list
+ elements: str
+ required: true
+ aliases: [ pkg ]
+ repository_path:
+ description:
+ - Path with AIX packages (required to install).
+ type: path
+ state:
+ description:
+ - Whether the package needs to be present on or absent from the system.
+ type: str
+ choices: [ absent, present ]
+ default: present
+notes:
+- If the package is already installed, even the package/fileset is new, the module will not install it.
+'''
+
+EXAMPLES = r'''
+- name: Install package foo
+ community.general.installp:
+ name: foo
+ repository_path: /repository/AIX71/installp/base
+ accept_license: yes
+ state: present
+
+- name: Install bos.sysmgt that includes bos.sysmgt.nim.master, bos.sysmgt.nim.spot
+ community.general.installp:
+ name: bos.sysmgt
+ repository_path: /repository/AIX71/installp/base
+ accept_license: yes
+ state: present
+
+- name: Install bos.sysmgt.nim.master only
+ community.general.installp:
+ name: bos.sysmgt.nim.master
+ repository_path: /repository/AIX71/installp/base
+ accept_license: yes
+ state: present
+
+- name: Install bos.sysmgt.nim.master and bos.sysmgt.nim.spot
+ community.general.installp:
+ name: bos.sysmgt.nim.master, bos.sysmgt.nim.spot
+ repository_path: /repository/AIX71/installp/base
+ accept_license: yes
+ state: present
+
+- name: Remove packages bos.sysmgt.nim.master
+ community.general.installp:
+ name: bos.sysmgt.nim.master
+ state: absent
+'''
+
+RETURN = r''' # '''
+
+import os
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def _check_new_pkg(module, package, repository_path):
+ """
+ Check if the package of fileset is correct name and repository path.
+
+ :param module: Ansible module arguments spec.
+ :param package: Package/fileset name.
+ :param repository_path: Repository package path.
+ :return: Bool, package information.
+ """
+
+ if os.path.isdir(repository_path):
+ installp_cmd = module.get_bin_path('installp', True)
+ rc, package_result, err = module.run_command("%s -l -MR -d %s" % (installp_cmd, repository_path))
+ if rc != 0:
+ module.fail_json(msg="Failed to run installp.", rc=rc, err=err)
+
+ if package == 'all':
+ pkg_info = "All packages on dir"
+ return True, pkg_info
+
+ else:
+ pkg_info = {}
+ for line in package_result.splitlines():
+ if re.findall(package, line):
+ pkg_name = line.split()[0].strip()
+ pkg_version = line.split()[1].strip()
+ pkg_info[pkg_name] = pkg_version
+
+ return True, pkg_info
+
+ return False, None
+
+ else:
+ module.fail_json(msg="Repository path %s is not valid." % repository_path)
+
+
+def _check_installed_pkg(module, package, repository_path):
+ """
+ Check the package on AIX.
+ It verifies if the package is installed and informations
+
+ :param module: Ansible module parameters spec.
+ :param package: Package/fileset name.
+ :param repository_path: Repository package path.
+ :return: Bool, package data.
+ """
+
+ lslpp_cmd = module.get_bin_path('lslpp', True)
+ rc, lslpp_result, err = module.run_command("%s -lcq %s*" % (lslpp_cmd, package))
+
+ if rc == 1:
+ package_state = ' '.join(err.split()[-2:])
+ if package_state == 'not installed.':
+ return False, None
+ else:
+ module.fail_json(msg="Failed to run lslpp.", rc=rc, err=err)
+
+ if rc != 0:
+ module.fail_json(msg="Failed to run lslpp.", rc=rc, err=err)
+
+ pkg_data = {}
+ full_pkg_data = lslpp_result.splitlines()
+ for line in full_pkg_data:
+ pkg_name, fileset, level = line.split(':')[0:3]
+ pkg_data[pkg_name] = fileset, level
+
+ return True, pkg_data
+
+
+def remove(module, installp_cmd, packages):
+ repository_path = None
+ remove_count = 0
+ removed_pkgs = []
+ not_found_pkg = []
+ for package in packages:
+ pkg_check, dummy = _check_installed_pkg(module, package, repository_path)
+
+ if pkg_check:
+ if not module.check_mode:
+ rc, remove_out, err = module.run_command("%s -u %s" % (installp_cmd, package))
+ if rc != 0:
+ module.fail_json(msg="Failed to run installp.", rc=rc, err=err)
+ remove_count += 1
+ removed_pkgs.append(package)
+
+ else:
+ not_found_pkg.append(package)
+
+ if remove_count > 0:
+ if len(not_found_pkg) > 1:
+ not_found_pkg.insert(0, "Package(s) not found: ")
+
+ changed = True
+ msg = "Packages removed: %s. %s " % (' '.join(removed_pkgs), ' '.join(not_found_pkg))
+
+ else:
+ changed = False
+ msg = ("No packages removed, all packages not found: %s" % ' '.join(not_found_pkg))
+
+ return changed, msg
+
+
+def install(module, installp_cmd, packages, repository_path, accept_license):
+ installed_pkgs = []
+ not_found_pkgs = []
+ already_installed_pkgs = {}
+
+ accept_license_param = {
+ True: '-Y',
+ False: '',
+ }
+
+ # Validate if package exists on repository path.
+ for package in packages:
+ pkg_check, pkg_data = _check_new_pkg(module, package, repository_path)
+
+ # If package exists on repository path, check if package is installed.
+ if pkg_check:
+ pkg_check_current, pkg_info = _check_installed_pkg(module, package, repository_path)
+
+ # If package is already installed.
+ if pkg_check_current:
+ # Check if package is a package and not a fileset, get version
+ # and add the package into already installed list
+ if package in pkg_info.keys():
+ already_installed_pkgs[package] = pkg_info[package][1]
+
+ else:
+ # If the package is not a package but a fileset, confirm
+ # and add the fileset/package into already installed list
+ for key in pkg_info.keys():
+ if package in pkg_info[key]:
+ already_installed_pkgs[package] = pkg_info[key][1]
+
+ else:
+ if not module.check_mode:
+ rc, out, err = module.run_command("%s -a %s -X -d %s %s" % (installp_cmd, accept_license_param[accept_license], repository_path, package))
+ if rc != 0:
+ module.fail_json(msg="Failed to run installp", rc=rc, err=err)
+ installed_pkgs.append(package)
+
+ else:
+ not_found_pkgs.append(package)
+
+ if len(installed_pkgs) > 0:
+ installed_msg = (" Installed: %s." % ' '.join(installed_pkgs))
+ else:
+ installed_msg = ''
+
+ if len(not_found_pkgs) > 0:
+ not_found_msg = (" Not found: %s." % ' '.join(not_found_pkgs))
+ else:
+ not_found_msg = ''
+
+ if len(already_installed_pkgs) > 0:
+ already_installed_msg = (" Already installed: %s." % already_installed_pkgs)
+ else:
+ already_installed_msg = ''
+
+ if len(installed_pkgs) > 0:
+ changed = True
+ msg = ("%s%s%s" % (installed_msg, not_found_msg, already_installed_msg))
+ else:
+ changed = False
+ msg = ("No packages installed.%s%s%s" % (installed_msg, not_found_msg, already_installed_msg))
+
+ return changed, msg
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='list', elements='str', required=True, aliases=['pkg']),
+ repository_path=dict(type='path'),
+ accept_license=dict(type='bool', default=False),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ),
+ supports_check_mode=True,
+ )
+
+ name = module.params['name']
+ repository_path = module.params['repository_path']
+ accept_license = module.params['accept_license']
+ state = module.params['state']
+
+ installp_cmd = module.get_bin_path('installp', True)
+
+ if state == 'present':
+ if repository_path is None:
+ module.fail_json(msg="repository_path is required to install package")
+
+ changed, msg = install(module, installp_cmd, name, repository_path, accept_license)
+
+ elif state == 'absent':
+ changed, msg = remove(module, installp_cmd, name)
+
+ else:
+ module.fail_json(changed=False, msg="Unexpected state.")
+
+ module.exit_json(changed=changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/layman.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/layman.py
new file mode 100644
index 00000000..3c990205
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/layman.py
@@ -0,0 +1,268 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Jakub Jirutka <jakub@jirutka.cz>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: layman
+author: "Jakub Jirutka (@jirutka)"
+short_description: Manage Gentoo overlays
+description:
+ - Uses Layman to manage an additional repositories for the Portage package manager on Gentoo Linux.
+ Please note that Layman must be installed on a managed node prior using this module.
+requirements:
+ - "python >= 2.6"
+ - layman python module
+options:
+ name:
+ description:
+ - The overlay id to install, synchronize, or uninstall.
+ Use 'ALL' to sync all of the installed overlays (can be used only when C(state=updated)).
+ required: true
+ type: str
+ list_url:
+ description:
+ - An URL of the alternative overlays list that defines the overlay to install.
+ This list will be fetched and saved under C(${overlay_defs})/${name}.xml), where
+ C(overlay_defs) is readed from the Layman's configuration.
+ aliases: [url]
+ type: str
+ state:
+ description:
+ - Whether to install (C(present)), sync (C(updated)), or uninstall (C(absent)) the overlay.
+ default: present
+ choices: [present, absent, updated]
+ type: str
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be
+ set to C(no) when no other option exists. Prior to 1.9.3 the code
+ defaulted to C(no).
+ type: bool
+ default: yes
+'''
+
+EXAMPLES = '''
+- name: Install the overlay mozilla which is on the central overlays list
+ community.general.layman:
+ name: mozilla
+
+- name: Install the overlay cvut from the specified alternative list
+ community.general.layman:
+ name: cvut
+ list_url: 'http://raw.github.com/cvut/gentoo-overlay/master/overlay.xml'
+
+- name: Update (sync) the overlay cvut or install if not installed yet
+ community.general.layman:
+ name: cvut
+ list_url: 'http://raw.github.com/cvut/gentoo-overlay/master/overlay.xml'
+ state: updated
+
+- name: Update (sync) all of the installed overlays
+ community.general.layman:
+ name: ALL
+ state: updated
+
+- name: Uninstall the overlay cvut
+ community.general.layman:
+ name: cvut
+ state: absent
+'''
+
+import shutil
+import traceback
+
+from os import path
+
+LAYMAN_IMP_ERR = None
+try:
+ from layman.api import LaymanAPI
+ from layman.config import BareConfig
+ HAS_LAYMAN_API = True
+except ImportError:
+ LAYMAN_IMP_ERR = traceback.format_exc()
+ HAS_LAYMAN_API = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.urls import fetch_url
+
+
+USERAGENT = 'ansible-httpget'
+
+
+class ModuleError(Exception):
+ pass
+
+
+def init_layman(config=None):
+ '''Returns the initialized ``LaymanAPI``.
+
+ :param config: the layman's configuration to use (optional)
+ '''
+ if config is None:
+ config = BareConfig(read_configfile=True, quietness=1)
+ return LaymanAPI(config)
+
+
+def download_url(module, url, dest):
+ '''
+ :param url: the URL to download
+ :param dest: the absolute path of where to save the downloaded content to;
+ it must be writable and not a directory
+
+ :raises ModuleError
+ '''
+
+ # Hack to add params in the form that fetch_url expects
+ module.params['http_agent'] = USERAGENT
+ response, info = fetch_url(module, url)
+ if info['status'] != 200:
+ raise ModuleError("Failed to get %s: %s" % (url, info['msg']))
+
+ try:
+ with open(dest, 'w') as f:
+ shutil.copyfileobj(response, f)
+ except IOError as e:
+ raise ModuleError("Failed to write: %s" % str(e))
+
+
+def install_overlay(module, name, list_url=None):
+ '''Installs the overlay repository. If not on the central overlays list,
+ then :list_url of an alternative list must be provided. The list will be
+ fetched and saved under ``%(overlay_defs)/%(name.xml)`` (location of the
+ ``overlay_defs`` is read from the Layman's configuration).
+
+ :param name: the overlay id
+ :param list_url: the URL of the remote repositories list to look for the overlay
+ definition (optional, default: None)
+
+ :returns: True if the overlay was installed, or False if already exists
+ (i.e. nothing has changed)
+ :raises ModuleError
+ '''
+ # read Layman configuration
+ layman_conf = BareConfig(read_configfile=True)
+ layman = init_layman(layman_conf)
+
+ if layman.is_installed(name):
+ return False
+
+ if module.check_mode:
+ mymsg = 'Would add layman repo \'' + name + '\''
+ module.exit_json(changed=True, msg=mymsg)
+
+ if not layman.is_repo(name):
+ if not list_url:
+ raise ModuleError("Overlay '%s' is not on the list of known "
+ "overlays and URL of the remote list was not provided." % name)
+
+ overlay_defs = layman_conf.get_option('overlay_defs')
+ dest = path.join(overlay_defs, name + '.xml')
+
+ download_url(module, list_url, dest)
+
+ # reload config
+ layman = init_layman()
+
+ if not layman.add_repos(name):
+ raise ModuleError(layman.get_errors())
+
+ return True
+
+
+def uninstall_overlay(module, name):
+ '''Uninstalls the given overlay repository from the system.
+
+ :param name: the overlay id to uninstall
+
+ :returns: True if the overlay was uninstalled, or False if doesn't exist
+ (i.e. nothing has changed)
+ :raises ModuleError
+ '''
+ layman = init_layman()
+
+ if not layman.is_installed(name):
+ return False
+
+ if module.check_mode:
+ mymsg = 'Would remove layman repo \'' + name + '\''
+ module.exit_json(changed=True, msg=mymsg)
+
+ layman.delete_repos(name)
+ if layman.get_errors():
+ raise ModuleError(layman.get_errors())
+
+ return True
+
+
+def sync_overlay(name):
+ '''Synchronizes the specified overlay repository.
+
+ :param name: the overlay repository id to sync
+ :raises ModuleError
+ '''
+ layman = init_layman()
+
+ if not layman.sync(name):
+ messages = [str(item[1]) for item in layman.sync_results[2]]
+ raise ModuleError(messages)
+
+
+def sync_overlays():
+ '''Synchronize all of the installed overlays.
+
+ :raises ModuleError
+ '''
+ layman = init_layman()
+
+ for name in layman.get_installed():
+ sync_overlay(name)
+
+
+def main():
+ # define module
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ list_url=dict(aliases=['url']),
+ state=dict(default="present", choices=['present', 'absent', 'updated']),
+ validate_certs=dict(required=False, default=True, type='bool'),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_LAYMAN_API:
+ module.fail_json(msg=missing_required_lib('Layman'), exception=LAYMAN_IMP_ERR)
+
+ state, name, url = (module.params[key] for key in ['state', 'name', 'list_url'])
+
+ changed = False
+ try:
+ if state == 'present':
+ changed = install_overlay(module, name, url)
+
+ elif state == 'updated':
+ if name == 'ALL':
+ sync_overlays()
+ elif install_overlay(module, name, url):
+ changed = True
+ else:
+ sync_overlay(name)
+ else:
+ changed = uninstall_overlay(module, name)
+
+ except ModuleError as e:
+ module.fail_json(msg=e.message)
+ else:
+ module.exit_json(changed=changed, name=name)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/macports.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/macports.py
new file mode 100644
index 00000000..a865a8f3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/macports.py
@@ -0,0 +1,307 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Jimmy Tang <jcftang@gmail.com>
+# Based on okpg (Patrick Pelletier <pp.pelletier@gmail.com>), pacman
+# (Afterburn) and pkgin (Shaun Zinck) modules
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: macports
+author: "Jimmy Tang (@jcftang)"
+short_description: Package manager for MacPorts
+description:
+ - Manages MacPorts packages (ports)
+options:
+ name:
+ description:
+ - A list of port names.
+ aliases: ['port']
+ type: list
+ elements: str
+ selfupdate:
+ description:
+ - Update Macports and the ports tree, either prior to installing ports or as a separate step.
+ - Equivalent to running C(port selfupdate).
+ aliases: ['update_cache', 'update_ports']
+ default: "no"
+ type: bool
+ state:
+ description:
+ - Indicates the desired state of the port.
+ choices: [ 'present', 'absent', 'active', 'inactive', 'installed', 'removed']
+ default: present
+ type: str
+ upgrade:
+ description:
+ - Upgrade all outdated ports, either prior to installing ports or as a separate step.
+ - Equivalent to running C(port upgrade outdated).
+ default: "no"
+ type: bool
+ variant:
+ description:
+ - A port variant specification.
+ - 'C(variant) is only supported with state: I(installed)/I(present).'
+ aliases: ['variants']
+ type: str
+'''
+EXAMPLES = '''
+- name: Install the foo port
+ community.general.macports:
+ name: foo
+
+- name: Install the universal, x11 variant of the foo port
+ community.general.macports:
+ name: foo
+ variant: +universal+x11
+
+- name: Install a list of ports
+ community.general.macports:
+ name: "{{ ports }}"
+ vars:
+ ports:
+ - foo
+ - foo-tools
+
+- name: Update Macports and the ports tree, then upgrade all outdated ports
+ community.general.macports:
+ selfupdate: yes
+ upgrade: yes
+
+- name: Update Macports and the ports tree, then install the foo port
+ community.general.macports:
+ name: foo
+ selfupdate: yes
+
+- name: Remove the foo port
+ community.general.macports:
+ name: foo
+ state: absent
+
+- name: Activate the foo port
+ community.general.macports:
+ name: foo
+ state: active
+
+- name: Deactivate the foo port
+ community.general.macports:
+ name: foo
+ state: inactive
+'''
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import shlex_quote
+
+
+def selfupdate(module, port_path):
+ """ Update Macports and the ports tree. """
+
+ rc, out, err = module.run_command("%s -v selfupdate" % port_path)
+
+ if rc == 0:
+ updated = any(
+ re.search(r'Total number of ports parsed:\s+[^0]', s.strip()) or
+ re.search(r'Installing new Macports release', s.strip())
+ for s in out.split('\n')
+ if s
+ )
+ if updated:
+ changed = True
+ msg = "Macports updated successfully"
+ else:
+ changed = False
+ msg = "Macports already up-to-date"
+
+ return (changed, msg)
+ else:
+ module.fail_json(msg="Failed to update Macports", stdout=out, stderr=err)
+
+
+def upgrade(module, port_path):
+ """ Upgrade outdated ports. """
+
+ rc, out, err = module.run_command("%s upgrade outdated" % port_path)
+
+ # rc is 1 when nothing to upgrade so check stdout first.
+ if out.strip() == "Nothing to upgrade.":
+ changed = False
+ msg = "Ports already upgraded"
+ return (changed, msg)
+ elif rc == 0:
+ changed = True
+ msg = "Outdated ports upgraded successfully"
+ return (changed, msg)
+ else:
+ module.fail_json(msg="Failed to upgrade outdated ports", stdout=out, stderr=err)
+
+
+def query_port(module, port_path, name, state="present"):
+ """ Returns whether a port is installed or not. """
+
+ if state == "present":
+
+ rc, out, err = module.run_command([port_path, "-q", "installed", name])
+
+ if rc == 0 and out.strip().startswith(name + " "):
+ return True
+
+ return False
+
+ elif state == "active":
+
+ rc, out, err = module.run_command([port_path, "-q", "installed", name])
+
+ if rc == 0 and "(active)" in out:
+ return True
+
+ return False
+
+
+def remove_ports(module, port_path, ports):
+ """ Uninstalls one or more ports if installed. """
+
+ remove_c = 0
+ # Using a for loop in case of error, we can report the port that failed
+ for port in ports:
+ # Query the port first, to see if we even need to remove
+ if not query_port(module, port_path, port):
+ continue
+
+ rc, out, err = module.run_command("%s uninstall %s" % (port_path, port))
+
+ if query_port(module, port_path, port):
+ module.fail_json(msg="Failed to remove %s: %s" % (port, err))
+
+ remove_c += 1
+
+ if remove_c > 0:
+
+ module.exit_json(changed=True, msg="Removed %s port(s)" % remove_c)
+
+ module.exit_json(changed=False, msg="Port(s) already absent")
+
+
+def install_ports(module, port_path, ports, variant):
+ """ Installs one or more ports if not already installed. """
+
+ install_c = 0
+
+ for port in ports:
+ if query_port(module, port_path, port):
+ continue
+
+ rc, out, err = module.run_command("%s install %s %s" % (port_path, port, variant))
+
+ if not query_port(module, port_path, port):
+ module.fail_json(msg="Failed to install %s: %s" % (port, err))
+
+ install_c += 1
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg="Installed %s port(s)" % (install_c))
+
+ module.exit_json(changed=False, msg="Port(s) already present")
+
+
+def activate_ports(module, port_path, ports):
+ """ Activate a port if it's inactive. """
+
+ activate_c = 0
+
+ for port in ports:
+ if not query_port(module, port_path, port):
+ module.fail_json(msg="Failed to activate %s, port(s) not present" % (port))
+
+ if query_port(module, port_path, port, state="active"):
+ continue
+
+ rc, out, err = module.run_command("%s activate %s" % (port_path, port))
+
+ if not query_port(module, port_path, port, state="active"):
+ module.fail_json(msg="Failed to activate %s: %s" % (port, err))
+
+ activate_c += 1
+
+ if activate_c > 0:
+ module.exit_json(changed=True, msg="Activated %s port(s)" % (activate_c))
+
+ module.exit_json(changed=False, msg="Port(s) already active")
+
+
+def deactivate_ports(module, port_path, ports):
+ """ Deactivate a port if it's active. """
+
+ deactivated_c = 0
+
+ for port in ports:
+ if not query_port(module, port_path, port):
+ module.fail_json(msg="Failed to deactivate %s, port(s) not present" % (port))
+
+ if not query_port(module, port_path, port, state="active"):
+ continue
+
+ rc, out, err = module.run_command("%s deactivate %s" % (port_path, port))
+
+ if query_port(module, port_path, port, state="active"):
+ module.fail_json(msg="Failed to deactivate %s: %s" % (port, err))
+
+ deactivated_c += 1
+
+ if deactivated_c > 0:
+ module.exit_json(changed=True, msg="Deactivated %s port(s)" % (deactivated_c))
+
+ module.exit_json(changed=False, msg="Port(s) already inactive")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='list', elements='str', aliases=["port"]),
+ selfupdate=dict(aliases=["update_cache", "update_ports"], default=False, type='bool'),
+ state=dict(default="present", choices=["present", "installed", "absent", "removed", "active", "inactive"]),
+ upgrade=dict(default=False, type='bool'),
+ variant=dict(aliases=["variants"], default=None, type='str')
+ )
+ )
+
+ port_path = module.get_bin_path('port', True, ['/opt/local/bin'])
+
+ p = module.params
+
+ if p["selfupdate"]:
+ (changed, msg) = selfupdate(module, port_path)
+ if not (p["name"] or p["upgrade"]):
+ module.exit_json(changed=changed, msg=msg)
+
+ if p["upgrade"]:
+ (changed, msg) = upgrade(module, port_path)
+ if not p["name"]:
+ module.exit_json(changed=changed, msg=msg)
+
+ pkgs = p["name"]
+
+ variant = p["variant"]
+
+ if p["state"] in ["present", "installed"]:
+ install_ports(module, port_path, pkgs, variant)
+
+ elif p["state"] in ["absent", "removed"]:
+ remove_ports(module, port_path, pkgs)
+
+ elif p["state"] == "active":
+ activate_ports(module, port_path, pkgs)
+
+ elif p["state"] == "inactive":
+ deactivate_ports(module, port_path, pkgs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/mas.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/mas.py
new file mode 100644
index 00000000..bc3e6dfd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/mas.py
@@ -0,0 +1,295 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Lukas Bestle <project-ansible@lukasbestle.com>
+# Copyright: (c) 2017, Michael Heap <m@michaelheap.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: mas
+short_description: Manage Mac App Store applications with mas-cli
+description:
+ - Installs, uninstalls and updates macOS applications from the Mac App Store using the C(mas-cli).
+version_added: '0.2.0'
+author:
+ - Michael Heap (@mheap)
+ - Lukas Bestle (@lukasbestle)
+options:
+ id:
+ description:
+ - The Mac App Store identifier of the app(s) you want to manage.
+ - This can be found by running C(mas search APP_NAME) on your machine.
+ type: list
+ elements: int
+ state:
+ description:
+ - Desired state of the app installation.
+ - The C(absent) value requires root permissions, also see the examples.
+ type: str
+ choices:
+ - absent
+ - latest
+ - present
+ default: present
+ upgrade_all:
+ description:
+ - Upgrade all installed Mac App Store apps.
+ type: bool
+ default: "no"
+ aliases: ["upgrade"]
+requirements:
+ - macOS 10.11+
+ - "mas-cli (U(https://github.com/mas-cli/mas)) 1.5.0+ available as C(mas) in the bin path"
+ - The Apple ID to use already needs to be signed in to the Mac App Store (check with C(mas account)).
+notes:
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = '''
+- name: Install Keynote
+ community.general.mas:
+ id: 409183694
+ state: present
+
+- name: Install Divvy with command mas installed in /usr/local/bin
+ community.general.mas:
+ id: 413857545
+ state: present
+ environment:
+ PATH: /usr/local/bin:{{ ansible_facts.env.PATH }}
+
+- name: Install a list of apps
+ community.general.mas:
+ id:
+ - 409183694 # Keynote
+ - 413857545 # Divvy
+ state: present
+
+- name: Ensure the latest Keynote version is installed
+ community.general.mas:
+ id: 409183694
+ state: latest
+
+- name: Upgrade all installed Mac App Store apps
+ community.general.mas:
+ upgrade_all: yes
+
+- name: Install specific apps and also upgrade all others
+ community.general.mas:
+ id:
+ - 409183694 # Keynote
+ - 413857545 # Divvy
+ state: present
+ upgrade_all: yes
+
+- name: Uninstall Divvy
+ community.general.mas:
+ id: 413857545
+ state: absent
+ become: yes # Uninstallation requires root permissions
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from distutils.version import StrictVersion
+import os
+
+
+class Mas(object):
+
+ def __init__(self, module):
+ self.module = module
+
+ # Initialize data properties
+ self.mas_path = self.module.get_bin_path('mas')
+ self._checked_signin = False
+ self._installed = None # Populated only if needed
+ self._outdated = None # Populated only if needed
+ self.count_install = 0
+ self.count_upgrade = 0
+ self.count_uninstall = 0
+ self.result = {
+ 'changed': False
+ }
+
+ self.check_mas_tool()
+
+ def app_command(self, command, id):
+ ''' Runs a `mas` command on a given app; command can be 'install', 'upgrade' or 'uninstall' '''
+
+ if not self.module.check_mode:
+ if command != 'uninstall':
+ self.check_signin()
+
+ rc, out, err = self.run([command, str(id)])
+ if rc != 0:
+ self.module.fail_json(
+ msg="Error running command '{0}' on app '{1}': {2}".format(command, str(id), out.rstrip())
+ )
+
+ # No error or dry run
+ self.__dict__['count_' + command] += 1
+
+ def check_mas_tool(self):
+ ''' Verifies that the `mas` tool is available in a recent version '''
+
+ # Is the `mas` tool available at all?
+ if not self.mas_path:
+ self.module.fail_json(msg='Required `mas` tool is not installed')
+
+ # Is the version recent enough?
+ rc, out, err = self.run(['version'])
+ if rc != 0 or not out.strip() or StrictVersion(out.strip()) < StrictVersion('1.5.0'):
+ self.module.fail_json(msg='`mas` tool in version 1.5.0+ needed, got ' + out.strip())
+
+ def check_signin(self):
+ ''' Verifies that the user is signed in to the Mac App Store '''
+
+ # Only check this once per execution
+ if self._checked_signin:
+ return
+
+ rc, out, err = self.run(['account'])
+ if out.split("\n", 1)[0].rstrip() == 'Not signed in':
+ self.module.fail_json(msg='You must be signed in to the Mac App Store')
+
+ self._checked_signin = True
+
+ def exit(self):
+ ''' Exit with the data we have collected over time '''
+
+ msgs = []
+ if self.count_install > 0:
+ msgs.append('Installed {0} app(s)'.format(self.count_install))
+ if self.count_upgrade > 0:
+ msgs.append('Upgraded {0} app(s)'.format(self.count_upgrade))
+ if self.count_uninstall > 0:
+ msgs.append('Uninstalled {0} app(s)'.format(self.count_uninstall))
+
+ if msgs:
+ self.result['changed'] = True
+ self.result['msg'] = ', '.join(msgs)
+
+ self.module.exit_json(**self.result)
+
+ def get_current_state(self, command):
+ ''' Returns the list of all app IDs; command can either be 'list' or 'outdated' '''
+
+ rc, raw_apps, err = self.run([command])
+ rows = raw_apps.split("\n")
+ if rows[0] == "No installed apps found":
+ rows = []
+ apps = []
+ for r in rows:
+ # Format: "123456789 App Name"
+ r = r.split(' ', 1)
+ if len(r) == 2:
+ apps.append(int(r[0]))
+
+ return apps
+
+ def installed(self):
+ ''' Returns the list of installed apps '''
+
+ # Populate cache if not already done
+ if self._installed is None:
+ self._installed = self.get_current_state('list')
+
+ return self._installed
+
+ def is_installed(self, id):
+ ''' Checks whether the given app is installed '''
+
+ return int(id) in self.installed()
+
+ def is_outdated(self, id):
+ ''' Checks whether the given app is installed, but outdated '''
+
+ return int(id) in self.outdated()
+
+ def outdated(self):
+ ''' Returns the list of installed, but outdated apps '''
+
+ # Populate cache if not already done
+ if self._outdated is None:
+ self._outdated = self.get_current_state('outdated')
+
+ return self._outdated
+
+ def run(self, cmd):
+ ''' Runs a command of the `mas` tool '''
+
+ cmd.insert(0, self.mas_path)
+ return self.module.run_command(cmd, False)
+
+ def upgrade_all(self):
+ ''' Upgrades all installed apps and sets the correct result data '''
+
+ outdated = self.outdated()
+
+ if not self.module.check_mode:
+ self.check_signin()
+
+ rc, out, err = self.run(['upgrade'])
+ if rc != 0:
+ self.module.fail_json(msg='Could not upgrade all apps: ' + out.rstrip())
+
+ self.count_upgrade += len(outdated)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ id=dict(type='list', elements='int'),
+ state=dict(type='str', default='present', choices=['absent', 'latest', 'present']),
+ upgrade_all=dict(type='bool', default=False, aliases=['upgrade']),
+ ),
+ supports_check_mode=True
+ )
+ mas = Mas(module)
+
+ if module.params['id']:
+ apps = module.params['id']
+ else:
+ apps = []
+
+ state = module.params['state']
+ upgrade = module.params['upgrade_all']
+
+ # Run operations on the given app IDs
+ for app in sorted(set(apps)):
+ if state == 'present':
+ if not mas.is_installed(app):
+ mas.app_command('install', app)
+
+ elif state == 'absent':
+ if mas.is_installed(app):
+ # Ensure we are root
+ if os.getuid() != 0:
+ module.fail_json(msg="Uninstalling apps requires root permissions ('become: yes')")
+
+ mas.app_command('uninstall', app)
+
+ elif state == 'latest':
+ if not mas.is_installed(app):
+ mas.app_command('install', app)
+ elif mas.is_outdated(app):
+ mas.app_command('upgrade', app)
+
+ # Upgrade all apps if requested
+ mas._outdated = None # Clear cache
+ if upgrade and mas.outdated():
+ mas.upgrade_all()
+
+ # Exit with the collected data
+ mas.exit()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/openbsd_pkg.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/openbsd_pkg.py
new file mode 100644
index 00000000..7432c48a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/openbsd_pkg.py
@@ -0,0 +1,653 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Patrik Lundin <patrik@sigterm.se>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: openbsd_pkg
+author:
+- Patrik Lundin (@eest)
+short_description: Manage packages on OpenBSD
+description:
+ - Manage packages on OpenBSD using the pkg tools.
+requirements:
+- python >= 2.5
+options:
+ name:
+ description:
+ - A name or a list of names of the packages.
+ required: yes
+ type: list
+ elements: str
+ state:
+ description:
+ - C(present) will make sure the package is installed.
+ C(latest) will make sure the latest version of the package is installed.
+ C(absent) will make sure the specified package is not installed.
+ choices: [ absent, latest, present, installed, removed ]
+ default: present
+ type: str
+ build:
+ description:
+ - Build the package from source instead of downloading and installing
+ a binary. Requires that the port source tree is already installed.
+ Automatically builds and installs the 'sqlports' package, if it is
+ not already installed.
+ - Mutually exclusive with I(snapshot).
+ type: bool
+ default: no
+ snapshot:
+ description:
+ - Force C(%c) and C(%m) to expand to C(snapshots), even on a release kernel.
+ - Mutually exclusive with I(build).
+ type: bool
+ default: no
+ version_added: 1.3.0
+ ports_dir:
+ description:
+ - When used in combination with the C(build) option, allows overriding
+ the default ports source directory.
+ default: /usr/ports
+ type: path
+ clean:
+ description:
+ - When updating or removing packages, delete the extra configuration
+ file(s) in the old packages which are annotated with @extra in
+ the packaging-list.
+ type: bool
+ default: no
+ quick:
+ description:
+ - Replace or delete packages quickly; do not bother with checksums
+ before removing normal files.
+ type: bool
+ default: no
+notes:
+ - When used with a `loop:` each package will be processed individually,
+ it is much more efficient to pass the list directly to the `name` option.
+'''
+
+EXAMPLES = '''
+- name: Make sure nmap is installed
+ community.general.openbsd_pkg:
+ name: nmap
+ state: present
+
+- name: Make sure nmap is the latest version
+ community.general.openbsd_pkg:
+ name: nmap
+ state: latest
+
+- name: Make sure nmap is not installed
+ community.general.openbsd_pkg:
+ name: nmap
+ state: absent
+
+- name: Make sure nmap is installed, build it from source if it is not
+ community.general.openbsd_pkg:
+ name: nmap
+ state: present
+ build: yes
+
+- name: Specify a pkg flavour with '--'
+ community.general.openbsd_pkg:
+ name: vim--no_x11
+ state: present
+
+- name: Specify the default flavour to avoid ambiguity errors
+ community.general.openbsd_pkg:
+ name: vim--
+ state: present
+
+- name: Specify a package branch (requires at least OpenBSD 6.0)
+ community.general.openbsd_pkg:
+ name: python%3.5
+ state: present
+
+- name: Update all packages on the system
+ community.general.openbsd_pkg:
+ name: '*'
+ state: latest
+
+- name: Purge a package and it's configuration files
+ community.general.openbsd_pkg:
+ name: mpd
+ clean: yes
+ state: absent
+
+- name: Quickly remove a package without checking checksums
+ community.general.openbsd_pkg:
+ name: qt5
+ quick: yes
+ state: absent
+'''
+
+import os
+import platform
+import re
+import shlex
+import sqlite3
+
+from distutils.version import StrictVersion
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+# Function used for executing commands.
+def execute_command(cmd, module):
+ # Break command line into arguments.
+ # This makes run_command() use shell=False which we need to not cause shell
+ # expansion of special characters like '*'.
+ cmd_args = shlex.split(cmd)
+ return module.run_command(cmd_args)
+
+
+# Function used to find out if a package is currently installed.
+def get_package_state(names, pkg_spec, module):
+ info_cmd = 'pkg_info -Iq'
+
+ for name in names:
+ command = "%s inst:%s" % (info_cmd, name)
+
+ rc, stdout, stderr = execute_command(command, module)
+
+ if stderr:
+ module.fail_json(msg="failed in get_package_state(): " + stderr)
+
+ if stdout:
+ # If the requested package name is just a stem, like "python", we may
+ # find multiple packages with that name.
+ pkg_spec[name]['installed_names'] = [installed_name for installed_name in stdout.splitlines()]
+ module.debug("get_package_state(): installed_names = %s" % pkg_spec[name]['installed_names'])
+ pkg_spec[name]['installed_state'] = True
+ else:
+ pkg_spec[name]['installed_state'] = False
+
+
+# Function used to make sure a package is present.
+def package_present(names, pkg_spec, module):
+ build = module.params['build']
+
+ for name in names:
+ # It is possible package_present() has been called from package_latest().
+ # In that case we do not want to operate on the whole list of names,
+ # only the leftovers.
+ if pkg_spec['package_latest_leftovers']:
+ if name not in pkg_spec['package_latest_leftovers']:
+ module.debug("package_present(): ignoring '%s' which is not a package_latest() leftover" % name)
+ continue
+ else:
+ module.debug("package_present(): handling package_latest() leftovers, installing '%s'" % name)
+
+ if module.check_mode:
+ install_cmd = 'pkg_add -Imn'
+ else:
+ if build is True:
+ port_dir = "%s/%s" % (module.params['ports_dir'], get_package_source_path(name, pkg_spec, module))
+ if os.path.isdir(port_dir):
+ if pkg_spec[name]['flavor']:
+ flavors = pkg_spec[name]['flavor'].replace('-', ' ')
+ install_cmd = "cd %s && make clean=depends && FLAVOR=\"%s\" make install && make clean=depends" % (port_dir, flavors)
+ elif pkg_spec[name]['subpackage']:
+ install_cmd = "cd %s && make clean=depends && SUBPACKAGE=\"%s\" make install && make clean=depends" % (port_dir,
+ pkg_spec[name]['subpackage'])
+ else:
+ install_cmd = "cd %s && make install && make clean=depends" % (port_dir)
+ else:
+ module.fail_json(msg="the port source directory %s does not exist" % (port_dir))
+ else:
+ install_cmd = 'pkg_add -Im'
+
+ if module.params['snapshot'] is True:
+ install_cmd += ' -Dsnap'
+
+ if pkg_spec[name]['installed_state'] is False:
+
+ # Attempt to install the package
+ if build is True and not module.check_mode:
+ (pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = module.run_command(install_cmd, module, use_unsafe_shell=True)
+ else:
+ (pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command("%s %s" % (install_cmd, name), module)
+
+ # The behaviour of pkg_add is a bit different depending on if a
+ # specific version is supplied or not.
+ #
+ # When a specific version is supplied the return code will be 0 when
+ # a package is found and 1 when it is not. If a version is not
+ # supplied the tool will exit 0 in both cases.
+ #
+ # It is important to note that "version" relates to the
+ # packages-specs(7) notion of a version. If using the branch syntax
+ # (like "python%3.5") even though a branch name may look like a
+ # version string it is not used an one by pkg_add.
+ if pkg_spec[name]['version'] or build is True:
+ # Depend on the return code.
+ module.debug("package_present(): depending on return code for name '%s'" % name)
+ if pkg_spec[name]['rc']:
+ pkg_spec[name]['changed'] = False
+ else:
+ # Depend on stderr instead.
+ module.debug("package_present(): depending on stderr for name '%s'" % name)
+ if pkg_spec[name]['stderr']:
+ # There is a corner case where having an empty directory in
+ # installpath prior to the right location will result in a
+ # "file:/local/package/directory/ is empty" message on stderr
+ # while still installing the package, so we need to look for
+ # for a message like "packagename-1.0: ok" just in case.
+ match = re.search(r"\W%s-[^:]+: ok\W" % pkg_spec[name]['stem'], pkg_spec[name]['stdout'])
+
+ if match:
+ # It turns out we were able to install the package.
+ module.debug("package_present(): we were able to install package for name '%s'" % name)
+ else:
+ # We really did fail, fake the return code.
+ module.debug("package_present(): we really did fail for name '%s'" % name)
+ pkg_spec[name]['rc'] = 1
+ pkg_spec[name]['changed'] = False
+ else:
+ module.debug("package_present(): stderr was not set for name '%s'" % name)
+
+ if pkg_spec[name]['rc'] == 0:
+ pkg_spec[name]['changed'] = True
+
+ else:
+ pkg_spec[name]['rc'] = 0
+ pkg_spec[name]['stdout'] = ''
+ pkg_spec[name]['stderr'] = ''
+ pkg_spec[name]['changed'] = False
+
+
+# Function used to make sure a package is the latest available version.
+def package_latest(names, pkg_spec, module):
+ if module.params['build'] is True:
+ module.fail_json(msg="the combination of build=%s and state=latest is not supported" % module.params['build'])
+
+ upgrade_cmd = 'pkg_add -um'
+
+ if module.check_mode:
+ upgrade_cmd += 'n'
+
+ if module.params['clean']:
+ upgrade_cmd += 'c'
+
+ if module.params['quick']:
+ upgrade_cmd += 'q'
+
+ if module.params['snapshot']:
+ upgrade_cmd += ' -Dsnap'
+
+ for name in names:
+ if pkg_spec[name]['installed_state'] is True:
+
+ # Attempt to upgrade the package.
+ (pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command("%s %s" % (upgrade_cmd, name), module)
+
+ # Look for output looking something like "nmap-6.01->6.25: ok" to see if
+ # something changed (or would have changed). Use \W to delimit the match
+ # from progress meter output.
+ pkg_spec[name]['changed'] = False
+ for installed_name in pkg_spec[name]['installed_names']:
+ module.debug("package_latest(): checking for pre-upgrade package name: %s" % installed_name)
+ match = re.search(r"\W%s->.+: ok\W" % installed_name, pkg_spec[name]['stdout'])
+ if match:
+ module.debug("package_latest(): pre-upgrade package name match: %s" % installed_name)
+
+ pkg_spec[name]['changed'] = True
+ break
+
+ # FIXME: This part is problematic. Based on the issues mentioned (and
+ # handled) in package_present() it is not safe to blindly trust stderr
+ # as an indicator that the command failed, and in the case with
+ # empty installpath directories this will break.
+ #
+ # For now keep this safeguard here, but ignore it if we managed to
+ # parse out a successful update above. This way we will report a
+ # successful run when we actually modify something but fail
+ # otherwise.
+ if pkg_spec[name]['changed'] is not True:
+ if pkg_spec[name]['stderr']:
+ pkg_spec[name]['rc'] = 1
+
+ else:
+ # Note packages that need to be handled by package_present
+ module.debug("package_latest(): package '%s' is not installed, will be handled by package_present()" % name)
+ pkg_spec['package_latest_leftovers'].append(name)
+
+ # If there were any packages that were not installed we call
+ # package_present() which will handle those.
+ if pkg_spec['package_latest_leftovers']:
+ module.debug("package_latest(): calling package_present() to handle leftovers")
+ package_present(names, pkg_spec, module)
+
+
+# Function used to make sure a package is not installed.
+def package_absent(names, pkg_spec, module):
+ remove_cmd = 'pkg_delete -I'
+
+ if module.check_mode:
+ remove_cmd += 'n'
+
+ if module.params['clean']:
+ remove_cmd += 'c'
+
+ if module.params['quick']:
+ remove_cmd += 'q'
+
+ for name in names:
+ if pkg_spec[name]['installed_state'] is True:
+ # Attempt to remove the package.
+ (pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command("%s %s" % (remove_cmd, name), module)
+
+ if pkg_spec[name]['rc'] == 0:
+ pkg_spec[name]['changed'] = True
+ else:
+ pkg_spec[name]['changed'] = False
+
+ else:
+ pkg_spec[name]['rc'] = 0
+ pkg_spec[name]['stdout'] = ''
+ pkg_spec[name]['stderr'] = ''
+ pkg_spec[name]['changed'] = False
+
+
+# Function used to parse the package name based on packages-specs(7).
+# The general name structure is "stem-version[-flavors]".
+#
+# Names containing "%" are a special variation not part of the
+# packages-specs(7) syntax. See pkg_add(1) on OpenBSD 6.0 or later for a
+# description.
+def parse_package_name(names, pkg_spec, module):
+
+ # Initialize empty list of package_latest() leftovers.
+ pkg_spec['package_latest_leftovers'] = []
+
+ for name in names:
+ module.debug("parse_package_name(): parsing name: %s" % name)
+ # Do some initial matches so we can base the more advanced regex on that.
+ version_match = re.search("-[0-9]", name)
+ versionless_match = re.search("--", name)
+
+ # Stop if someone is giving us a name that both has a version and is
+ # version-less at the same time.
+ if version_match and versionless_match:
+ module.fail_json(msg="package name both has a version and is version-less: " + name)
+
+ # All information for a given name is kept in the pkg_spec keyed by that name.
+ pkg_spec[name] = {}
+
+ # If name includes a version.
+ if version_match:
+ match = re.search("^(?P<stem>[^%]+)-(?P<version>[0-9][^-]*)(?P<flavor_separator>-)?(?P<flavor>[a-z].*)?(%(?P<branch>.+))?$", name)
+ if match:
+ pkg_spec[name]['stem'] = match.group('stem')
+ pkg_spec[name]['version_separator'] = '-'
+ pkg_spec[name]['version'] = match.group('version')
+ pkg_spec[name]['flavor_separator'] = match.group('flavor_separator')
+ pkg_spec[name]['flavor'] = match.group('flavor')
+ pkg_spec[name]['branch'] = match.group('branch')
+ pkg_spec[name]['style'] = 'version'
+ module.debug("version_match: stem: %(stem)s, version: %(version)s, flavor_separator: %(flavor_separator)s, "
+ "flavor: %(flavor)s, branch: %(branch)s, style: %(style)s" % pkg_spec[name])
+ else:
+ module.fail_json(msg="unable to parse package name at version_match: " + name)
+
+ # If name includes no version but is version-less ("--").
+ elif versionless_match:
+ match = re.search("^(?P<stem>[^%]+)--(?P<flavor>[a-z].*)?(%(?P<branch>.+))?$", name)
+ if match:
+ pkg_spec[name]['stem'] = match.group('stem')
+ pkg_spec[name]['version_separator'] = '-'
+ pkg_spec[name]['version'] = None
+ pkg_spec[name]['flavor_separator'] = '-'
+ pkg_spec[name]['flavor'] = match.group('flavor')
+ pkg_spec[name]['branch'] = match.group('branch')
+ pkg_spec[name]['style'] = 'versionless'
+ module.debug("versionless_match: stem: %(stem)s, flavor: %(flavor)s, branch: %(branch)s, style: %(style)s" % pkg_spec[name])
+ else:
+ module.fail_json(msg="unable to parse package name at versionless_match: " + name)
+
+ # If name includes no version, and is not version-less, it is all a
+ # stem, possibly with a branch (%branchname) tacked on at the
+ # end.
+ else:
+ match = re.search("^(?P<stem>[^%]+)(%(?P<branch>.+))?$", name)
+ if match:
+ pkg_spec[name]['stem'] = match.group('stem')
+ pkg_spec[name]['version_separator'] = None
+ pkg_spec[name]['version'] = None
+ pkg_spec[name]['flavor_separator'] = None
+ pkg_spec[name]['flavor'] = None
+ pkg_spec[name]['branch'] = match.group('branch')
+ pkg_spec[name]['style'] = 'stem'
+ module.debug("stem_match: stem: %(stem)s, branch: %(branch)s, style: %(style)s" % pkg_spec[name])
+ else:
+ module.fail_json(msg="unable to parse package name at else: " + name)
+
+ # Verify that the managed host is new enough to support branch syntax.
+ if pkg_spec[name]['branch']:
+ branch_release = "6.0"
+
+ if StrictVersion(platform.release()) < StrictVersion(branch_release):
+ module.fail_json(msg="package name using 'branch' syntax requires at least OpenBSD %s: %s" % (branch_release, name))
+
+ # Sanity check that there are no trailing dashes in flavor.
+ # Try to stop strange stuff early so we can be strict later.
+ if pkg_spec[name]['flavor']:
+ match = re.search("-$", pkg_spec[name]['flavor'])
+ if match:
+ module.fail_json(msg="trailing dash in flavor: " + pkg_spec[name]['flavor'])
+
+
+# Function used for figuring out the port path.
+def get_package_source_path(name, pkg_spec, module):
+ pkg_spec[name]['subpackage'] = None
+ if pkg_spec[name]['stem'] == 'sqlports':
+ return 'databases/sqlports'
+ else:
+ # try for an exact match first
+ sqlports_db_file = '/usr/local/share/sqlports'
+ if not os.path.isfile(sqlports_db_file):
+ module.fail_json(msg="sqlports file '%s' is missing" % sqlports_db_file)
+
+ conn = sqlite3.connect(sqlports_db_file)
+ first_part_of_query = 'SELECT fullpkgpath, fullpkgname FROM ports WHERE fullpkgname'
+ query = first_part_of_query + ' = ?'
+ module.debug("package_package_source_path(): exact query: %s" % query)
+ cursor = conn.execute(query, (name,))
+ results = cursor.fetchall()
+
+ # next, try for a fuzzier match
+ if len(results) < 1:
+ looking_for = pkg_spec[name]['stem'] + (pkg_spec[name]['version_separator'] or '-') + (pkg_spec[name]['version'] or '%')
+ query = first_part_of_query + ' LIKE ?'
+ if pkg_spec[name]['flavor']:
+ looking_for += pkg_spec[name]['flavor_separator'] + pkg_spec[name]['flavor']
+ module.debug("package_package_source_path(): fuzzy flavor query: %s" % query)
+ cursor = conn.execute(query, (looking_for,))
+ elif pkg_spec[name]['style'] == 'versionless':
+ query += ' AND fullpkgname NOT LIKE ?'
+ module.debug("package_package_source_path(): fuzzy versionless query: %s" % query)
+ cursor = conn.execute(query, (looking_for, "%s-%%" % looking_for,))
+ else:
+ module.debug("package_package_source_path(): fuzzy query: %s" % query)
+ cursor = conn.execute(query, (looking_for,))
+ results = cursor.fetchall()
+
+ # error if we don't find exactly 1 match
+ conn.close()
+ if len(results) < 1:
+ module.fail_json(msg="could not find a port by the name '%s'" % name)
+ if len(results) > 1:
+ matches = map(lambda x: x[1], results)
+ module.fail_json(msg="too many matches, unsure which to build: %s" % ' OR '.join(matches))
+
+ # there's exactly 1 match, so figure out the subpackage, if any, then return
+ fullpkgpath = results[0][0]
+ parts = fullpkgpath.split(',')
+ if len(parts) > 1 and parts[1][0] == '-':
+ pkg_spec[name]['subpackage'] = parts[1]
+ return parts[0]
+
+
+# Function used for upgrading all installed packages.
+def upgrade_packages(pkg_spec, module):
+ if module.check_mode:
+ upgrade_cmd = 'pkg_add -Imnu'
+ else:
+ upgrade_cmd = 'pkg_add -Imu'
+
+ if module.params['snapshot']:
+ upgrade_cmd += ' -Dsnap'
+
+ # Create a minimal pkg_spec entry for '*' to store return values.
+ pkg_spec['*'] = {}
+
+ # Attempt to upgrade all packages.
+ pkg_spec['*']['rc'], pkg_spec['*']['stdout'], pkg_spec['*']['stderr'] = execute_command("%s" % upgrade_cmd, module)
+
+ # Try to find any occurrence of a package changing version like:
+ # "bzip2-1.0.6->1.0.6p0: ok".
+ match = re.search(r"\W\w.+->.+: ok\W", pkg_spec['*']['stdout'])
+ if match:
+ pkg_spec['*']['changed'] = True
+
+ else:
+ pkg_spec['*']['changed'] = False
+
+ # It seems we can not trust the return value, so depend on the presence of
+ # stderr to know if something failed.
+ if pkg_spec['*']['stderr']:
+ pkg_spec['*']['rc'] = 1
+ else:
+ pkg_spec['*']['rc'] = 0
+
+
+# ===========================================
+# Main control flow.
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='list', elements='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'installed', 'latest', 'present', 'removed']),
+ build=dict(type='bool', default=False),
+ snapshot=dict(type='bool', default=False),
+ ports_dir=dict(type='path', default='/usr/ports'),
+ quick=dict(type='bool', default=False),
+ clean=dict(type='bool', default=False),
+ ),
+ mutually_exclusive=[['snapshot', 'build']],
+ supports_check_mode=True
+ )
+
+ name = module.params['name']
+ state = module.params['state']
+ build = module.params['build']
+ ports_dir = module.params['ports_dir']
+
+ rc = 0
+ stdout = ''
+ stderr = ''
+ result = {}
+ result['name'] = name
+ result['state'] = state
+ result['build'] = build
+
+ # The data structure used to keep track of package information.
+ pkg_spec = {}
+
+ if build is True:
+ if not os.path.isdir(ports_dir):
+ module.fail_json(msg="the ports source directory %s does not exist" % (ports_dir))
+
+ # build sqlports if its not installed yet
+ parse_package_name(['sqlports'], pkg_spec, module)
+ get_package_state(['sqlports'], pkg_spec, module)
+ if not pkg_spec['sqlports']['installed_state']:
+ module.debug("main(): installing 'sqlports' because build=%s" % module.params['build'])
+ package_present(['sqlports'], pkg_spec, module)
+
+ asterisk_name = False
+ for n in name:
+ if n == '*':
+ if len(name) != 1:
+ module.fail_json(msg="the package name '*' can not be mixed with other names")
+
+ asterisk_name = True
+
+ if asterisk_name:
+ if state != 'latest':
+ module.fail_json(msg="the package name '*' is only valid when using state=latest")
+ else:
+ # Perform an upgrade of all installed packages.
+ upgrade_packages(pkg_spec, module)
+ else:
+ # Parse package names and put results in the pkg_spec dictionary.
+ parse_package_name(name, pkg_spec, module)
+
+ # Not sure how the branch syntax is supposed to play together
+ # with build mode. Disable it for now.
+ for n in name:
+ if pkg_spec[n]['branch'] and module.params['build'] is True:
+ module.fail_json(msg="the combination of 'branch' syntax and build=%s is not supported: %s" % (module.params['build'], n))
+
+ # Get state for all package names.
+ get_package_state(name, pkg_spec, module)
+
+ # Perform requested action.
+ if state in ['installed', 'present']:
+ package_present(name, pkg_spec, module)
+ elif state in ['absent', 'removed']:
+ package_absent(name, pkg_spec, module)
+ elif state == 'latest':
+ package_latest(name, pkg_spec, module)
+
+ # The combined changed status for all requested packages. If anything
+ # is changed this is set to True.
+ combined_changed = False
+
+ # The combined failed status for all requested packages. If anything
+ # failed this is set to True.
+ combined_failed = False
+
+ # We combine all error messages in this comma separated string, for example:
+ # "msg": "Can't find nmapp\n, Can't find nmappp\n"
+ combined_error_message = ''
+
+ # Loop over all requested package names and check if anything failed or
+ # changed.
+ for n in name:
+ if pkg_spec[n]['rc'] != 0:
+ combined_failed = True
+ if pkg_spec[n]['stderr']:
+ if combined_error_message:
+ combined_error_message += ", %s" % pkg_spec[n]['stderr']
+ else:
+ combined_error_message = pkg_spec[n]['stderr']
+ else:
+ if combined_error_message:
+ combined_error_message += ", %s" % pkg_spec[n]['stdout']
+ else:
+ combined_error_message = pkg_spec[n]['stdout']
+
+ if pkg_spec[n]['changed'] is True:
+ combined_changed = True
+
+ # If combined_error_message contains anything at least some part of the
+ # list of requested package names failed.
+ if combined_failed:
+ module.fail_json(msg=combined_error_message, **result)
+
+ result['changed'] = combined_changed
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/opkg.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/opkg.py
new file mode 100644
index 00000000..7da9a487
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/opkg.py
@@ -0,0 +1,197 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Patrick Pelletier <pp.pelletier@gmail.com>
+# Based on pacman (Afterburn) and pkgin (Shaun Zinck) modules
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: opkg
+author: "Patrick Pelletier (@skinp)"
+short_description: Package manager for OpenWrt
+description:
+ - Manages OpenWrt packages
+options:
+ name:
+ description:
+ - name of package to install/remove
+ aliases: [pkg]
+ required: true
+ type: str
+ state:
+ description:
+ - state of the package
+ choices: [ 'present', 'absent', 'installed', 'removed' ]
+ default: present
+ type: str
+ force:
+ description:
+ - opkg --force parameter used
+ choices:
+ - ""
+ - "depends"
+ - "maintainer"
+ - "reinstall"
+ - "overwrite"
+ - "downgrade"
+ - "space"
+ - "postinstall"
+ - "remove"
+ - "checksum"
+ - "removal-of-dependent-packages"
+ type: str
+ update_cache:
+ description:
+ - update the package db first
+ aliases: ['update-cache']
+ default: "no"
+ type: bool
+requirements:
+ - opkg
+ - python
+'''
+EXAMPLES = '''
+- name: Install foo
+ community.general.opkg:
+ name: foo
+ state: present
+
+- name: Update cache and install foo
+ community.general.opkg:
+ name: foo
+ state: present
+ update_cache: yes
+
+- name: Remove foo
+ community.general.opkg:
+ name: foo
+ state: absent
+
+- name: Remove foo and bar
+ community.general.opkg:
+ name: foo,bar
+ state: absent
+
+- name: Install foo using overwrite option forcibly
+ community.general.opkg:
+ name: foo
+ state: present
+ force: overwrite
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import shlex_quote
+
+
+def update_package_db(module, opkg_path):
+ """ Updates packages list. """
+
+ rc, out, err = module.run_command("%s update" % opkg_path)
+
+ if rc != 0:
+ module.fail_json(msg="could not update package db")
+
+
+def query_package(module, opkg_path, name, state="present"):
+ """ Returns whether a package is installed or not. """
+
+ if state == "present":
+
+ rc, out, err = module.run_command("%s list-installed | grep -q \"^%s \"" % (shlex_quote(opkg_path), shlex_quote(name)), use_unsafe_shell=True)
+ if rc == 0:
+ return True
+
+ return False
+
+
+def remove_packages(module, opkg_path, packages):
+ """ Uninstalls one or more packages if installed. """
+
+ p = module.params
+ force = p["force"]
+ if force:
+ force = "--force-%s" % force
+
+ remove_c = 0
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ if not query_package(module, opkg_path, package):
+ continue
+
+ rc, out, err = module.run_command("%s remove %s %s" % (opkg_path, force, package))
+
+ if query_package(module, opkg_path, package):
+ module.fail_json(msg="failed to remove %s: %s" % (package, out))
+
+ remove_c += 1
+
+ if remove_c > 0:
+
+ module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, opkg_path, packages):
+ """ Installs one or more packages if not already installed. """
+
+ p = module.params
+ force = p["force"]
+ if force:
+ force = "--force-%s" % force
+
+ install_c = 0
+
+ for package in packages:
+ if query_package(module, opkg_path, package):
+ continue
+
+ rc, out, err = module.run_command("%s install %s %s" % (opkg_path, force, package))
+
+ if not query_package(module, opkg_path, package):
+ module.fail_json(msg="failed to install %s: %s" % (package, out))
+
+ install_c += 1
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg="installed %s package(s)" % (install_c))
+
+ module.exit_json(changed=False, msg="package(s) already present")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(aliases=["pkg"], required=True),
+ state=dict(default="present", choices=["present", "installed", "absent", "removed"]),
+ force=dict(default="", choices=["", "depends", "maintainer", "reinstall", "overwrite", "downgrade", "space", "postinstall", "remove",
+ "checksum", "removal-of-dependent-packages"]),
+ update_cache=dict(default="no", aliases=["update-cache"], type='bool')
+ )
+ )
+
+ opkg_path = module.get_bin_path('opkg', True, ['/bin'])
+
+ p = module.params
+
+ if p["update_cache"]:
+ update_package_db(module, opkg_path)
+
+ pkgs = p["name"].split(",")
+
+ if p["state"] in ["present", "installed"]:
+ install_packages(module, opkg_path, pkgs)
+
+ elif p["state"] in ["absent", "removed"]:
+ remove_packages(module, opkg_path, pkgs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pacman.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pacman.py
new file mode 100644
index 00000000..0931ddc7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pacman.py
@@ -0,0 +1,481 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Afterburn <https://github.com/afterburn>
+# Copyright: (c) 2013, Aaron Bull Schaefer <aaron@elasticdog.com>
+# Copyright: (c) 2015, Indrajit Raychaudhuri <irc+code@indrajit.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: pacman
+short_description: Manage packages with I(pacman)
+description:
+ - Manage packages with the I(pacman) package manager, which is used by Arch Linux and its variants.
+author:
+ - Indrajit Raychaudhuri (@indrajitr)
+ - Aaron Bull Schaefer (@elasticdog) <aaron@elasticdog.com>
+ - Maxime de Roucy (@tchernomax)
+options:
+ name:
+ description:
+ - Name or list of names of the package(s) or file(s) to install, upgrade, or remove.
+ Can't be used in combination with C(upgrade).
+ aliases: [ package, pkg ]
+ type: list
+ elements: str
+
+ state:
+ description:
+ - Desired state of the package.
+ default: present
+ choices: [ absent, latest, present, installed, removed ]
+ type: str
+
+ force:
+ description:
+ - When removing package, force remove package, without any checks.
+ Same as `extra_args="--nodeps --nodeps"`.
+ When update_cache, force redownload repo databases.
+ Same as `update_cache_extra_args="--refresh --refresh"`.
+ default: no
+ type: bool
+
+ extra_args:
+ description:
+ - Additional option to pass to pacman when enforcing C(state).
+ default:
+ type: str
+
+ update_cache:
+ description:
+ - Whether or not to refresh the master package lists.
+ - This can be run as part of a package installation or as a separate step.
+ default: no
+ type: bool
+ aliases: [ update-cache ]
+
+ update_cache_extra_args:
+ description:
+ - Additional option to pass to pacman when enforcing C(update_cache).
+ default:
+ type: str
+
+ upgrade:
+ description:
+ - Whether or not to upgrade the whole system.
+ Can't be used in combination with C(name).
+ default: no
+ type: bool
+
+ upgrade_extra_args:
+ description:
+ - Additional option to pass to pacman when enforcing C(upgrade).
+ default:
+ type: str
+
+notes:
+ - When used with a `loop:` each package will be processed individually,
+ it is much more efficient to pass the list directly to the `name` option.
+'''
+
+RETURN = '''
+packages:
+ description: a list of packages that have been changed
+ returned: when upgrade is set to yes
+ type: list
+ sample: [ package, other-package ]
+'''
+
+EXAMPLES = '''
+- name: Install package foo from repo
+ community.general.pacman:
+ name: foo
+ state: present
+
+- name: Install package bar from file
+ community.general.pacman:
+ name: ~/bar-1.0-1-any.pkg.tar.xz
+ state: present
+
+- name: Install package foo from repo and bar from file
+ community.general.pacman:
+ name:
+ - foo
+ - ~/bar-1.0-1-any.pkg.tar.xz
+ state: present
+
+- name: Upgrade package foo
+ community.general.pacman:
+ name: foo
+ state: latest
+ update_cache: yes
+
+- name: Remove packages foo and bar
+ community.general.pacman:
+ name:
+ - foo
+ - bar
+ state: absent
+
+- name: Recursively remove package baz
+ community.general.pacman:
+ name: baz
+ state: absent
+ extra_args: --recursive
+
+- name: Run the equivalent of "pacman -Sy" as a separate step
+ community.general.pacman:
+ update_cache: yes
+
+- name: Run the equivalent of "pacman -Su" as a separate step
+ community.general.pacman:
+ upgrade: yes
+
+- name: Run the equivalent of "pacman -Syu" as a separate step
+ community.general.pacman:
+ update_cache: yes
+ upgrade: yes
+
+- name: Run the equivalent of "pacman -Rdd", force remove package baz
+ community.general.pacman:
+ name: baz
+ state: absent
+ force: yes
+'''
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def get_version(pacman_output):
+ """Take pacman -Q or pacman -S output and get the Version"""
+ fields = pacman_output.split()
+ if len(fields) == 2:
+ return fields[1]
+ return None
+
+
+def get_name(module, pacman_output):
+ """Take pacman -Q or pacman -S output and get the package name"""
+ fields = pacman_output.split()
+ if len(fields) == 2:
+ return fields[0]
+ module.fail_json(msg="get_name: fail to retrieve package name from pacman output")
+
+
+def query_package(module, pacman_path, name, state="present"):
+ """Query the package status in both the local system and the repository. Returns a boolean to indicate if the package is installed, a second
+ boolean to indicate if the package is up-to-date and a third boolean to indicate whether online information were available
+ """
+ if state == "present":
+ lcmd = "%s --query %s" % (pacman_path, name)
+ lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False)
+ if lrc != 0:
+ # package is not installed locally
+ return False, False, False
+ else:
+ # a non-zero exit code doesn't always mean the package is installed
+ # for example, if the package name queried is "provided" by another package
+ installed_name = get_name(module, lstdout)
+ if installed_name != name:
+ return False, False, False
+
+ # get the version installed locally (if any)
+ lversion = get_version(lstdout)
+
+ rcmd = "%s --sync --print-format \"%%n %%v\" %s" % (pacman_path, name)
+ rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False)
+ # get the version in the repository
+ rversion = get_version(rstdout)
+
+ if rrc == 0:
+ # Return True to indicate that the package is installed locally, and the result of the version number comparison
+ # to determine if the package is up-to-date.
+ return True, (lversion == rversion), False
+
+ # package is installed but cannot fetch remote Version. Last True stands for the error
+ return True, True, True
+
+
+def update_package_db(module, pacman_path):
+ if module.params['force']:
+ module.params["update_cache_extra_args"] += " --refresh --refresh"
+
+ cmd = "%s --sync --refresh %s" % (pacman_path, module.params["update_cache_extra_args"])
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc == 0:
+ return True
+ else:
+ module.fail_json(msg="could not update package db")
+
+
+def upgrade(module, pacman_path):
+ cmdupgrade = "%s --sync --sysupgrade --quiet --noconfirm %s" % (pacman_path, module.params["upgrade_extra_args"])
+ cmdneedrefresh = "%s --query --upgrades" % (pacman_path)
+ rc, stdout, stderr = module.run_command(cmdneedrefresh, check_rc=False)
+ data = stdout.split('\n')
+ data.remove('')
+ packages = []
+ diff = {
+ 'before': '',
+ 'after': '',
+ }
+
+ if rc == 0:
+ # Match lines of `pacman -Qu` output of the form:
+ # (package name) (before version-release) -> (after version-release)
+ # e.g., "ansible 2.7.1-1 -> 2.7.2-1"
+ regex = re.compile(r'([\w+\-.@]+) (\S+-\S+) -> (\S+-\S+)')
+ for p in data:
+ m = regex.search(p)
+ packages.append(m.group(1))
+ if module._diff:
+ diff['before'] += "%s-%s\n" % (m.group(1), m.group(2))
+ diff['after'] += "%s-%s\n" % (m.group(1), m.group(3))
+ if module.check_mode:
+ module.exit_json(changed=True, msg="%s package(s) would be upgraded" % (len(data)), packages=packages, diff=diff)
+ rc, stdout, stderr = module.run_command(cmdupgrade, check_rc=False)
+ if rc == 0:
+ module.exit_json(changed=True, msg='System upgraded', packages=packages, diff=diff)
+ else:
+ module.fail_json(msg="Could not upgrade")
+ else:
+ module.exit_json(changed=False, msg='Nothing to upgrade', packages=packages)
+
+
+def remove_packages(module, pacman_path, packages):
+ data = []
+ diff = {
+ 'before': '',
+ 'after': '',
+ }
+
+ if module.params["force"]:
+ module.params["extra_args"] += " --nodeps --nodeps"
+
+ remove_c = 0
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ installed, updated, unknown = query_package(module, pacman_path, package)
+ if not installed:
+ continue
+
+ cmd = "%s --remove --noconfirm --noprogressbar %s %s" % (pacman_path, module.params["extra_args"], package)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc != 0:
+ module.fail_json(msg="failed to remove %s" % (package))
+
+ if module._diff:
+ d = stdout.split('\n')[2].split(' ')[2:]
+ for i, pkg in enumerate(d):
+ d[i] = re.sub('-[0-9].*$', '', d[i].split('/')[-1])
+ diff['before'] += "%s\n" % pkg
+ data.append('\n'.join(d))
+
+ remove_c += 1
+
+ if remove_c > 0:
+ module.exit_json(changed=True, msg="removed %s package(s)" % remove_c, diff=diff)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, pacman_path, state, packages, package_files):
+ install_c = 0
+ package_err = []
+ message = ""
+ data = []
+ diff = {
+ 'before': '',
+ 'after': '',
+ }
+
+ to_install_repos = []
+ to_install_files = []
+ for i, package in enumerate(packages):
+ # if the package is installed and state == present or state == latest and is up-to-date then skip
+ installed, updated, latestError = query_package(module, pacman_path, package)
+ if latestError and state == 'latest':
+ package_err.append(package)
+
+ if installed and (state == 'present' or (state == 'latest' and updated)):
+ continue
+
+ if package_files[i]:
+ to_install_files.append(package_files[i])
+ else:
+ to_install_repos.append(package)
+
+ if to_install_repos:
+ cmd = "%s --sync --noconfirm --noprogressbar --needed %s %s" % (pacman_path, module.params["extra_args"], " ".join(to_install_repos))
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc != 0:
+ module.fail_json(msg="failed to install %s: %s" % (" ".join(to_install_repos), stderr))
+
+ # As we pass `--needed` to pacman returns a single line of ` there is nothing to do` if no change is performed.
+ # The check for > 3 is here because we pick the 4th line in normal operation.
+ if len(stdout.split('\n')) > 3:
+ data = stdout.split('\n')[3].split(' ')[2:]
+ data = [i for i in data if i != '']
+ for i, pkg in enumerate(data):
+ data[i] = re.sub('-[0-9].*$', '', data[i].split('/')[-1])
+ if module._diff:
+ diff['after'] += "%s\n" % pkg
+
+ install_c += len(to_install_repos)
+
+ if to_install_files:
+ cmd = "%s --upgrade --noconfirm --noprogressbar --needed %s %s" % (pacman_path, module.params["extra_args"], " ".join(to_install_files))
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc != 0:
+ module.fail_json(msg="failed to install %s: %s" % (" ".join(to_install_files), stderr))
+
+ # As we pass `--needed` to pacman returns a single line of ` there is nothing to do` if no change is performed.
+ # The check for > 3 is here because we pick the 4th line in normal operation.
+ if len(stdout.split('\n')) > 3:
+ data = stdout.split('\n')[3].split(' ')[2:]
+ data = [i for i in data if i != '']
+ for i, pkg in enumerate(data):
+ data[i] = re.sub('-[0-9].*$', '', data[i].split('/')[-1])
+ if module._diff:
+ diff['after'] += "%s\n" % pkg
+
+ install_c += len(to_install_files)
+
+ if state == 'latest' and len(package_err) > 0:
+ message = "But could not ensure 'latest' state for %s package(s) as remote version could not be fetched." % (package_err)
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg="installed %s package(s). %s" % (install_c, message), diff=diff)
+
+ module.exit_json(changed=False, msg="package(s) already installed. %s" % (message), diff=diff)
+
+
+def check_packages(module, pacman_path, packages, state):
+ would_be_changed = []
+ diff = {
+ 'before': '',
+ 'after': '',
+ 'before_header': '',
+ 'after_header': ''
+ }
+
+ for package in packages:
+ installed, updated, unknown = query_package(module, pacman_path, package)
+ if ((state in ["present", "latest"] and not installed) or
+ (state == "absent" and installed) or
+ (state == "latest" and not updated)):
+ would_be_changed.append(package)
+ if would_be_changed:
+ if state == "absent":
+ state = "removed"
+
+ if module._diff and (state == 'removed'):
+ diff['before_header'] = 'removed'
+ diff['before'] = '\n'.join(would_be_changed) + '\n'
+ elif module._diff and ((state == 'present') or (state == 'latest')):
+ diff['after_header'] = 'installed'
+ diff['after'] = '\n'.join(would_be_changed) + '\n'
+
+ module.exit_json(changed=True, msg="%s package(s) would be %s" % (
+ len(would_be_changed), state), diff=diff)
+ else:
+ module.exit_json(changed=False, msg="package(s) already %s" % state, diff=diff)
+
+
+def expand_package_groups(module, pacman_path, pkgs):
+ expanded = []
+
+ __, stdout, __ = module.run_command([pacman_path, "--sync", "--groups", "--quiet"], check_rc=True)
+ available_groups = stdout.splitlines()
+
+ for pkg in pkgs:
+ if pkg: # avoid empty strings
+ if pkg in available_groups:
+ # A group was found matching the package name: expand it
+ cmd = [pacman_path, "--sync", "--groups", "--quiet", pkg]
+ rc, stdout, stderr = module.run_command(cmd, check_rc=True)
+ expanded.extend([name.strip() for name in stdout.splitlines()])
+ else:
+ expanded.append(pkg)
+
+ return expanded
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='list', elements='str', aliases=['pkg', 'package']),
+ state=dict(type='str', default='present', choices=['present', 'installed', 'latest', 'absent', 'removed']),
+ force=dict(type='bool', default=False),
+ extra_args=dict(type='str', default=''),
+ upgrade=dict(type='bool', default=False),
+ upgrade_extra_args=dict(type='str', default=''),
+ update_cache=dict(type='bool', default=False, aliases=['update-cache']),
+ update_cache_extra_args=dict(type='str', default=''),
+ ),
+ required_one_of=[['name', 'update_cache', 'upgrade']],
+ mutually_exclusive=[['name', 'upgrade']],
+ supports_check_mode=True,
+ )
+
+ pacman_path = module.get_bin_path('pacman', True)
+ module.run_command_environ_update = dict(LC_ALL='C')
+
+ p = module.params
+
+ # normalize the state parameter
+ if p['state'] in ['present', 'installed']:
+ p['state'] = 'present'
+ elif p['state'] in ['absent', 'removed']:
+ p['state'] = 'absent'
+
+ if p["update_cache"] and not module.check_mode:
+ update_package_db(module, pacman_path)
+ if not (p['name'] or p['upgrade']):
+ module.exit_json(changed=True, msg='Updated the package master lists')
+
+ if p['update_cache'] and module.check_mode and not (p['name'] or p['upgrade']):
+ module.exit_json(changed=True, msg='Would have updated the package cache')
+
+ if p['upgrade']:
+ upgrade(module, pacman_path)
+
+ if p['name']:
+ pkgs = expand_package_groups(module, pacman_path, p['name'])
+
+ pkg_files = []
+ for i, pkg in enumerate(pkgs):
+ if not pkg: # avoid empty strings
+ continue
+ elif re.match(r".*\.pkg\.tar(\.(gz|bz2|xz|lrz|lzo|Z|zst))?$", pkg):
+ # The package given is a filename, extract the raw pkg name from
+ # it and store the filename
+ pkg_files.append(pkg)
+ pkgs[i] = re.sub(r'-[0-9].*$', '', pkgs[i].split('/')[-1])
+ else:
+ pkg_files.append(None)
+
+ if module.check_mode:
+ check_packages(module, pacman_path, pkgs, p['state'])
+
+ if p['state'] in ['present', 'latest']:
+ install_packages(module, pacman_path, p['state'], pkgs, pkg_files)
+ elif p['state'] == 'absent':
+ remove_packages(module, pacman_path, pkgs)
+ else:
+ module.exit_json(changed=False, msg="No package specified to work on.")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pkg5.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pkg5.py
new file mode 100644
index 00000000..266c073f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pkg5.py
@@ -0,0 +1,178 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Peter Oliver <ansible@mavit.org.uk>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: pkg5
+author:
+- Peter Oliver (@mavit)
+short_description: Manages packages with the Solaris 11 Image Packaging System
+description:
+ - IPS packages are the native packages in Solaris 11 and higher.
+notes:
+ - The naming of IPS packages is explained at U(http://www.oracle.com/technetwork/articles/servers-storage-admin/ips-package-versioning-2232906.html).
+options:
+ name:
+ description:
+ - An FRMI of the package(s) to be installed/removed/updated.
+ - Multiple packages may be specified, separated by C(,).
+ required: true
+ type: list
+ elements: str
+ state:
+ description:
+ - Whether to install (I(present), I(latest)), or remove (I(absent)) a package.
+ choices: [ absent, latest, present, installed, removed, uninstalled ]
+ default: present
+ type: str
+ accept_licenses:
+ description:
+ - Accept any licences.
+ type: bool
+ default: no
+ aliases: [ accept, accept_licences ]
+ be_name:
+ description:
+ - Creates a new boot environment with the given name.
+ type: str
+ refresh:
+ description:
+ - Refresh publishers before execution.
+ type: bool
+ default: yes
+'''
+EXAMPLES = '''
+- name: Install Vim
+ community.general.pkg5:
+ name: editor/vim
+
+- name: Install Vim without refreshing publishers
+ community.general.pkg5:
+ name: editor/vim
+ refresh: no
+
+- name: Remove finger daemon
+ community.general.pkg5:
+ name: service/network/finger
+ state: absent
+
+- name: Install several packages at once
+ community.general.pkg5:
+ name:
+ - /file/gnu-findutils
+ - /text/gnu-grep
+'''
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='list', elements='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'installed', 'latest', 'present', 'removed', 'uninstalled']),
+ accept_licenses=dict(type='bool', default=False, aliases=['accept', 'accept_licences']),
+ be_name=dict(type='str'),
+ refresh=dict(type='bool', default=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ params = module.params
+ packages = []
+
+ # pkg(5) FRMIs include a comma before the release number, but
+ # AnsibleModule will have split this into multiple items for us.
+ # Try to spot where this has happened and fix it.
+ for fragment in params['name']:
+ if re.search(r'^\d+(?:\.\d+)*', fragment) and packages and re.search(r'@[^,]*$', packages[-1]):
+ packages[-1] += ',' + fragment
+ else:
+ packages.append(fragment)
+
+ if params['state'] in ['present', 'installed']:
+ ensure(module, 'present', packages, params)
+ elif params['state'] in ['latest']:
+ ensure(module, 'latest', packages, params)
+ elif params['state'] in ['absent', 'uninstalled', 'removed']:
+ ensure(module, 'absent', packages, params)
+
+
+def ensure(module, state, packages, params):
+ response = {
+ 'results': [],
+ 'msg': '',
+ }
+ behaviour = {
+ 'present': {
+ 'filter': lambda p: not is_installed(module, p),
+ 'subcommand': 'install',
+ },
+ 'latest': {
+ 'filter': lambda p: (
+ not is_installed(module, p) or not is_latest(module, p)
+ ),
+ 'subcommand': 'install',
+ },
+ 'absent': {
+ 'filter': lambda p: is_installed(module, p),
+ 'subcommand': 'uninstall',
+ },
+ }
+
+ if module.check_mode:
+ dry_run = ['-n']
+ else:
+ dry_run = []
+
+ if params['accept_licenses']:
+ accept_licenses = ['--accept']
+ else:
+ accept_licenses = []
+
+ if params['be_name']:
+ beadm = ['--be-name=' + module.params['be_name']]
+ else:
+ beadm = []
+
+ if params['refresh']:
+ no_refresh = []
+ else:
+ no_refresh = ['--no-refresh']
+
+ to_modify = list(filter(behaviour[state]['filter'], packages))
+ if to_modify:
+ rc, out, err = module.run_command(['pkg', behaviour[state]['subcommand']] + dry_run + accept_licenses + beadm + no_refresh + ['-q', '--'] + to_modify)
+ response['rc'] = rc
+ response['results'].append(out)
+ response['msg'] += err
+ response['changed'] = True
+ if rc == 4:
+ response['changed'] = False
+ response['failed'] = False
+ elif rc != 0:
+ module.fail_json(**response)
+
+ module.exit_json(**response)
+
+
+def is_installed(module, package):
+ rc, out, err = module.run_command(['pkg', 'list', '--', package])
+ return not bool(int(rc))
+
+
+def is_latest(module, package):
+ rc, out, err = module.run_command(['pkg', 'list', '-u', '--', package])
+ return bool(int(rc))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pkg5_publisher.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pkg5_publisher.py
new file mode 100644
index 00000000..95d57765
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pkg5_publisher.py
@@ -0,0 +1,202 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014 Peter Oliver <ansible@mavit.org.uk>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: pkg5_publisher
+author: "Peter Oliver (@mavit)"
+short_description: Manages Solaris 11 Image Packaging System publishers
+description:
+ - IPS packages are the native packages in Solaris 11 and higher.
+ - This modules will configure which publishers a client will download IPS
+ packages from.
+options:
+ name:
+ description:
+ - The publisher's name.
+ required: true
+ aliases: [ publisher ]
+ type: str
+ state:
+ description:
+ - Whether to ensure that a publisher is present or absent.
+ default: present
+ choices: [ present, absent ]
+ type: str
+ sticky:
+ description:
+ - Packages installed from a sticky repository can only receive updates
+ from that repository.
+ type: bool
+ enabled:
+ description:
+ - Is the repository enabled or disabled?
+ type: bool
+ origin:
+ description:
+ - A path or URL to the repository.
+ - Multiple values may be provided.
+ type: list
+ elements: str
+ mirror:
+ description:
+ - A path or URL to the repository mirror.
+ - Multiple values may be provided.
+ type: list
+ elements: str
+'''
+EXAMPLES = '''
+- name: Fetch packages for the solaris publisher direct from Oracle
+ community.general.pkg5_publisher:
+ name: solaris
+ sticky: true
+ origin: https://pkg.oracle.com/solaris/support/
+
+- name: Configure a publisher for locally-produced packages
+ community.general.pkg5_publisher:
+ name: site
+ origin: 'https://pkg.example.com/site/'
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, aliases=['publisher']),
+ state=dict(default='present', choices=['present', 'absent']),
+ sticky=dict(type='bool'),
+ enabled=dict(type='bool'),
+ # search_after=dict(),
+ # search_before=dict(),
+ origin=dict(type='list', elements='str'),
+ mirror=dict(type='list', elements='str'),
+ )
+ )
+
+ for option in ['origin', 'mirror']:
+ if module.params[option] == ['']:
+ module.params[option] = []
+
+ if module.params['state'] == 'present':
+ modify_publisher(module, module.params)
+ else:
+ unset_publisher(module, module.params['name'])
+
+
+def modify_publisher(module, params):
+ name = params['name']
+ existing = get_publishers(module)
+
+ if name in existing:
+ for option in ['origin', 'mirror', 'sticky', 'enabled']:
+ if params[option] is not None:
+ if params[option] != existing[name][option]:
+ return set_publisher(module, params)
+ else:
+ return set_publisher(module, params)
+
+ module.exit_json()
+
+
+def set_publisher(module, params):
+ name = params['name']
+ args = []
+
+ if params['origin'] is not None:
+ args.append('--remove-origin=*')
+ args.extend(['--add-origin=' + u for u in params['origin']])
+ if params['mirror'] is not None:
+ args.append('--remove-mirror=*')
+ args.extend(['--add-mirror=' + u for u in params['mirror']])
+
+ if params['sticky'] is not None and params['sticky']:
+ args.append('--sticky')
+ elif params['sticky'] is not None:
+ args.append('--non-sticky')
+
+ if params['enabled'] is not None and params['enabled']:
+ args.append('--enable')
+ elif params['enabled'] is not None:
+ args.append('--disable')
+
+ rc, out, err = module.run_command(
+ ["pkg", "set-publisher"] + args + [name],
+ check_rc=True
+ )
+ response = {
+ 'rc': rc,
+ 'results': [out],
+ 'msg': err,
+ 'changed': True,
+ }
+ if rc != 0:
+ module.fail_json(**response)
+ module.exit_json(**response)
+
+
+def unset_publisher(module, publisher):
+ if publisher not in get_publishers(module):
+ module.exit_json()
+
+ rc, out, err = module.run_command(
+ ["pkg", "unset-publisher", publisher],
+ check_rc=True
+ )
+ response = {
+ 'rc': rc,
+ 'results': [out],
+ 'msg': err,
+ 'changed': True,
+ }
+ if rc != 0:
+ module.fail_json(**response)
+ module.exit_json(**response)
+
+
+def get_publishers(module):
+ rc, out, err = module.run_command(["pkg", "publisher", "-Ftsv"], True)
+
+ lines = out.splitlines()
+ keys = lines.pop(0).lower().split("\t")
+
+ publishers = {}
+ for line in lines:
+ values = dict(zip(keys, map(unstringify, line.split("\t"))))
+ name = values['publisher']
+
+ if name not in publishers:
+ publishers[name] = dict(
+ (k, values[k]) for k in ['sticky', 'enabled']
+ )
+ publishers[name]['origin'] = []
+ publishers[name]['mirror'] = []
+
+ if values['type'] is not None:
+ publishers[name][values['type']].append(values['uri'])
+
+ return publishers
+
+
+def unstringify(val):
+ if val == "-" or val == '':
+ return None
+ elif val == "true":
+ return True
+ elif val == "false":
+ return False
+ else:
+ return val
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pkgin.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pkgin.py
new file mode 100644
index 00000000..2937314f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pkgin.py
@@ -0,0 +1,388 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2013 Shaun Zinck <shaun.zinck at gmail.com>
+# Copyright (c) 2015 Lawrence Leonard Gilbert <larry@L2G.to>
+# Copyright (c) 2016 Jasper Lievisse Adriaanse <j at jasper.la>
+#
+# Written by Shaun Zinck
+# Based on pacman module written by Afterburn <http://github.com/afterburn>
+# that was based on apt module written by Matthew Williams <matthew@flowroute.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: pkgin
+short_description: Package manager for SmartOS, NetBSD, et al.
+description:
+ - "The standard package manager for SmartOS, but also usable on NetBSD
+ or any OS that uses C(pkgsrc). (Home: U(http://pkgin.net/))"
+author:
+ - "Larry Gilbert (@L2G)"
+ - "Shaun Zinck (@szinck)"
+ - "Jasper Lievisse Adriaanse (@jasperla)"
+notes:
+ - "Known bug with pkgin < 0.8.0: if a package is removed and another
+ package depends on it, the other package will be silently removed as
+ well. New to Ansible 1.9: check-mode support."
+options:
+ name:
+ description:
+ - Name of package to install/remove;
+ - multiple names may be given, separated by commas
+ aliases: [pkg]
+ type: list
+ elements: str
+ state:
+ description:
+ - Intended state of the package
+ choices: [ 'present', 'absent' ]
+ default: present
+ type: str
+ update_cache:
+ description:
+ - Update repository database. Can be run with other steps or on it's own.
+ type: bool
+ default: no
+ upgrade:
+ description:
+ - Upgrade main packages to their newer versions
+ type: bool
+ default: no
+ full_upgrade:
+ description:
+ - Upgrade all packages to their newer versions
+ type: bool
+ default: no
+ clean:
+ description:
+ - Clean packages cache
+ type: bool
+ default: no
+ force:
+ description:
+ - Force package reinstall
+ type: bool
+ default: no
+'''
+
+EXAMPLES = '''
+- name: Install package foo
+ community.general.pkgin:
+ name: foo
+ state: present
+
+- name: Install specific version of foo package
+ community.general.pkgin:
+ name: foo-2.0.1
+ state: present
+
+- name: Update cache and install foo package
+ community.general.pkgin:
+ name: foo
+ update_cache: yes
+
+- name: Remove package foo
+ community.general.pkgin:
+ name: foo
+ state: absent
+
+- name: Remove packages foo and bar
+ community.general.pkgin:
+ name: foo,bar
+ state: absent
+
+- name: Update repositories as a separate step
+ community.general.pkgin:
+ update_cache: yes
+
+- name: Upgrade main packages (equivalent to pkgin upgrade)
+ community.general.pkgin:
+ upgrade: yes
+
+- name: Upgrade all packages (equivalent to pkgin full-upgrade)
+ community.general.pkgin:
+ full_upgrade: yes
+
+- name: Force-upgrade all packages (equivalent to pkgin -F full-upgrade)
+ community.general.pkgin:
+ full_upgrade: yes
+ force: yes
+
+- name: Clean packages cache (equivalent to pkgin clean)
+ community.general.pkgin:
+ clean: yes
+'''
+
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class PackageState(object):
+ PRESENT = 1
+ NOT_INSTALLED = 2
+ OUTDATED = 4
+ NOT_FOUND = 8
+
+
+def query_package(module, name):
+ """Search for the package by name and return state of the package.
+ """
+
+ # test whether '-p' (parsable) flag is supported.
+ rc, out, err = module.run_command("%s -p -v" % PKGIN_PATH)
+
+ if rc == 0:
+ pflag = '-p'
+ splitchar = ';'
+ else:
+ pflag = ''
+ splitchar = ' '
+
+ # Use "pkgin search" to find the package. The regular expression will
+ # only match on the complete name.
+ rc, out, err = module.run_command("%s %s search \"^%s$\"" % (PKGIN_PATH, pflag, name))
+
+ # rc will not be 0 unless the search was a success
+ if rc == 0:
+
+ # Search results may contain more than one line (e.g., 'emacs'), so iterate
+ # through each line to see if we have a match.
+ packages = out.split('\n')
+
+ for package in packages:
+
+ # Break up line at spaces. The first part will be the package with its
+ # version (e.g. 'gcc47-libs-4.7.2nb4'), and the second will be the state
+ # of the package:
+ # '' - not installed
+ # '<' - installed but out of date
+ # '=' - installed and up to date
+ # '>' - installed but newer than the repository version
+ pkgname_with_version, raw_state = package.split(splitchar)[0:2]
+
+ # Search for package, stripping version
+ # (results in sth like 'gcc47-libs' or 'emacs24-nox11')
+ pkg_search_obj = re.search(r'^(.*?)\-[0-9][0-9.]*(nb[0-9]+)*', pkgname_with_version, re.M)
+
+ # Do not proceed unless we have a match
+ if not pkg_search_obj:
+ continue
+
+ # Grab matched string
+ pkgname_without_version = pkg_search_obj.group(1)
+
+ if name not in (pkgname_with_version, pkgname_without_version):
+ continue
+
+ # The package was found; now return its state
+ if raw_state == '<':
+ return PackageState.OUTDATED
+ elif raw_state == '=' or raw_state == '>':
+ return PackageState.PRESENT
+ else:
+ # Package found but not installed
+ return PackageState.NOT_INSTALLED
+ # no fall-through
+
+ # No packages were matched
+ return PackageState.NOT_FOUND
+
+ # Search failed
+ return PackageState.NOT_FOUND
+
+
+def format_action_message(module, action, count):
+ vars = {"actioned": action,
+ "count": count}
+
+ if module.check_mode:
+ message = "would have %(actioned)s %(count)d package" % vars
+ else:
+ message = "%(actioned)s %(count)d package" % vars
+
+ if count == 1:
+ return message
+ else:
+ return message + "s"
+
+
+def format_pkgin_command(module, command, package=None):
+ # Not all commands take a package argument, so cover this up by passing
+ # an empty string. Some commands (e.g. 'update') will ignore extra
+ # arguments, however this behaviour cannot be relied on for others.
+ if package is None:
+ package = ""
+
+ if module.params["force"]:
+ force = "-F"
+ else:
+ force = ""
+
+ vars = {"pkgin": PKGIN_PATH,
+ "command": command,
+ "package": package,
+ "force": force}
+
+ if module.check_mode:
+ return "%(pkgin)s -n %(command)s %(package)s" % vars
+ else:
+ return "%(pkgin)s -y %(force)s %(command)s %(package)s" % vars
+
+
+def remove_packages(module, packages):
+
+ remove_c = 0
+
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ if query_package(module, package) in [PackageState.NOT_INSTALLED, PackageState.NOT_FOUND]:
+ continue
+
+ rc, out, err = module.run_command(
+ format_pkgin_command(module, "remove", package))
+
+ if not module.check_mode and query_package(module, package) in [PackageState.PRESENT, PackageState.OUTDATED]:
+ module.fail_json(msg="failed to remove %s: %s" % (package, out))
+
+ remove_c += 1
+
+ if remove_c > 0:
+ module.exit_json(changed=True, msg=format_action_message(module, "removed", remove_c))
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, packages):
+
+ install_c = 0
+
+ for package in packages:
+ query_result = query_package(module, package)
+ if query_result in [PackageState.PRESENT, PackageState.OUTDATED]:
+ continue
+ elif query_result is PackageState.NOT_FOUND:
+ module.fail_json(msg="failed to find package %s for installation" % package)
+
+ rc, out, err = module.run_command(
+ format_pkgin_command(module, "install", package))
+
+ if not module.check_mode and not query_package(module, package) in [PackageState.PRESENT, PackageState.OUTDATED]:
+ module.fail_json(msg="failed to install %s: %s" % (package, out))
+
+ install_c += 1
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg=format_action_message(module, "installed", install_c))
+
+ module.exit_json(changed=False, msg="package(s) already present")
+
+
+def update_package_db(module):
+ rc, out, err = module.run_command(
+ format_pkgin_command(module, "update"))
+
+ if rc == 0:
+ if re.search('database for.*is up-to-date\n$', out):
+ return False, "database is up-to-date"
+ else:
+ return True, "updated repository database"
+ else:
+ module.fail_json(msg="could not update package db")
+
+
+def do_upgrade_packages(module, full=False):
+ if full:
+ cmd = "full-upgrade"
+ else:
+ cmd = "upgrade"
+
+ rc, out, err = module.run_command(
+ format_pkgin_command(module, cmd))
+
+ if rc == 0:
+ if re.search('^nothing to do.\n$', out):
+ module.exit_json(changed=False, msg="nothing left to upgrade")
+ else:
+ module.fail_json(msg="could not %s packages" % cmd)
+
+
+def upgrade_packages(module):
+ do_upgrade_packages(module)
+
+
+def full_upgrade_packages(module):
+ do_upgrade_packages(module, True)
+
+
+def clean_cache(module):
+ rc, out, err = module.run_command(
+ format_pkgin_command(module, "clean"))
+
+ if rc == 0:
+ # There's no indication if 'clean' actually removed anything,
+ # so assume it did.
+ module.exit_json(changed=True, msg="cleaned caches")
+ else:
+ module.fail_json(msg="could not clean package cache")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default="present", choices=["present", "absent"]),
+ name=dict(aliases=["pkg"], type='list', elements='str'),
+ update_cache=dict(default=False, type='bool'),
+ upgrade=dict(default=False, type='bool'),
+ full_upgrade=dict(default=False, type='bool'),
+ clean=dict(default=False, type='bool'),
+ force=dict(default=False, type='bool')),
+ required_one_of=[['name', 'update_cache', 'upgrade', 'full_upgrade', 'clean']],
+ supports_check_mode=True)
+
+ global PKGIN_PATH
+ PKGIN_PATH = module.get_bin_path('pkgin', True, ['/opt/local/bin'])
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ p = module.params
+
+ if p["update_cache"]:
+ c, msg = update_package_db(module)
+ if not (p['name'] or p["upgrade"] or p["full_upgrade"]):
+ module.exit_json(changed=c, msg=msg)
+
+ if p["upgrade"]:
+ upgrade_packages(module)
+ if not p['name']:
+ module.exit_json(changed=True, msg='upgraded packages')
+
+ if p["full_upgrade"]:
+ full_upgrade_packages(module)
+ if not p['name']:
+ module.exit_json(changed=True, msg='upgraded all packages')
+
+ if p["clean"]:
+ clean_cache(module)
+ if not p['name']:
+ module.exit_json(changed=True, msg='cleaned caches')
+
+ pkgs = p["name"]
+
+ if p["state"] == "present":
+ install_packages(module, pkgs)
+
+ elif p["state"] == "absent":
+ remove_packages(module, pkgs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pkgng.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pkgng.py
new file mode 100644
index 00000000..d5ed4a0c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pkgng.py
@@ -0,0 +1,485 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, bleader
+# Written by bleader <bleader@ratonland.org>
+# Based on pkgin module written by Shaun Zinck <shaun.zinck at gmail.com>
+# that was based on pacman module written by Afterburn <https://github.com/afterburn>
+# that was based on apt module written by Matthew Williams <matthew@flowroute.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: pkgng
+short_description: Package manager for FreeBSD >= 9.0
+description:
+ - Manage binary packages for FreeBSD using 'pkgng' which is available in versions after 9.0.
+options:
+ name:
+ description:
+ - Name or list of names of packages to install/remove.
+ - "With I(name=*), I(state: latest) will operate, but I(state: present) and I(state: absent) will be noops."
+ - >
+ Warning: In Ansible 2.9 and earlier this module had a misfeature
+ where I(name=*) with I(state: latest) or I(state: present) would
+ install every package from every package repository, filling up
+ the machines disk. Avoid using them unless you are certain that
+ your role will only be used with newer versions.
+ required: true
+ aliases: [pkg]
+ type: list
+ elements: str
+ state:
+ description:
+ - State of the package.
+ - 'Note: "latest" added in 2.7'
+ choices: [ 'present', 'latest', 'absent' ]
+ required: false
+ default: present
+ type: str
+ cached:
+ description:
+ - Use local package base instead of fetching an updated one.
+ type: bool
+ required: false
+ default: no
+ annotation:
+ description:
+ - A comma-separated list of keyvalue-pairs of the form
+ C(<+/-/:><key>[=<value>]). A C(+) denotes adding an annotation, a
+ C(-) denotes removing an annotation, and C(:) denotes modifying an
+ annotation.
+ If setting or modifying annotations, a value must be provided.
+ required: false
+ type: str
+ pkgsite:
+ description:
+ - For pkgng versions before 1.1.4, specify packagesite to use
+ for downloading packages. If not specified, use settings from
+ C(/usr/local/etc/pkg.conf).
+ - For newer pkgng versions, specify a the name of a repository
+ configured in C(/usr/local/etc/pkg/repos).
+ required: false
+ type: str
+ rootdir:
+ description:
+ - For pkgng versions 1.5 and later, pkg will install all packages
+ within the specified root directory.
+ - Can not be used together with I(chroot) or I(jail) options.
+ required: false
+ type: path
+ chroot:
+ description:
+ - Pkg will chroot in the specified environment.
+ - Can not be used together with I(rootdir) or I(jail) options.
+ required: false
+ type: path
+ jail:
+ description:
+ - Pkg will execute in the given jail name or id.
+ - Can not be used together with I(chroot) or I(rootdir) options.
+ type: str
+ autoremove:
+ description:
+ - Remove automatically installed packages which are no longer needed.
+ required: false
+ type: bool
+ default: no
+ ignore_osver:
+ description:
+ - Ignore FreeBSD OS version check, useful on -STABLE and -CURRENT branches.
+ - Defines the C(IGNORE_OSVERSION) environment variable.
+ required: false
+ type: bool
+ default: no
+ version_added: 1.3.0
+author: "bleader (@bleader)"
+notes:
+ - When using pkgsite, be careful that already in cache packages won't be downloaded again.
+ - When used with a `loop:` each package will be processed individually,
+ it is much more efficient to pass the list directly to the `name` option.
+'''
+
+EXAMPLES = '''
+- name: Install package foo
+ community.general.pkgng:
+ name: foo
+ state: present
+
+- name: Annotate package foo and bar
+ community.general.pkgng:
+ name: foo,bar
+ annotation: '+test1=baz,-test2,:test3=foobar'
+
+- name: Remove packages foo and bar
+ community.general.pkgng:
+ name: foo,bar
+ state: absent
+
+# "latest" support added in 2.7
+- name: Upgrade package baz
+ community.general.pkgng:
+ name: baz
+ state: latest
+
+- name: Upgrade all installed packages (see warning for the name option first!)
+ community.general.pkgng:
+ name: "*"
+ state: latest
+'''
+
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+
+
+def query_package(module, pkgng_path, name, dir_arg):
+
+ rc, out, err = module.run_command("%s %s info -g -e %s" % (pkgng_path, dir_arg, name))
+
+ if rc == 0:
+ return True
+
+ return False
+
+
+def query_update(module, pkgng_path, name, dir_arg, old_pkgng, pkgsite):
+
+ # Check to see if a package upgrade is available.
+ # rc = 0, no updates available or package not installed
+ # rc = 1, updates available
+ if old_pkgng:
+ rc, out, err = module.run_command("%s %s upgrade -g -n %s" % (pkgsite, pkgng_path, name))
+ else:
+ rc, out, err = module.run_command("%s %s upgrade %s -g -n %s" % (pkgng_path, dir_arg, pkgsite, name))
+
+ if rc == 1:
+ return True
+
+ return False
+
+
+def pkgng_older_than(module, pkgng_path, compare_version):
+
+ rc, out, err = module.run_command("%s -v" % pkgng_path)
+ version = [int(x) for x in re.split(r'[\._]', out)]
+
+ i = 0
+ new_pkgng = True
+ while compare_version[i] == version[i]:
+ i += 1
+ if i == min(len(compare_version), len(version)):
+ break
+ else:
+ if compare_version[i] > version[i]:
+ new_pkgng = False
+ return not new_pkgng
+
+
+def upgrade_packages(module, pkgng_path, dir_arg):
+ # Run a 'pkg upgrade', updating all packages.
+ upgraded_c = 0
+
+ cmd = "%s %s upgrade -y" % (pkgng_path, dir_arg)
+ if module.check_mode:
+ cmd += " -n"
+ rc, out, err = module.run_command(cmd)
+
+ match = re.search('^Number of packages to be upgraded: ([0-9]+)', out, re.MULTILINE)
+ if match:
+ upgraded_c = int(match.group(1))
+
+ if upgraded_c > 0:
+ return (True, "updated %s package(s)" % upgraded_c, out, err)
+ return (False, "no packages need upgrades", out, err)
+
+
+def remove_packages(module, pkgng_path, packages, dir_arg):
+ remove_c = 0
+ stdout = ""
+ stderr = ""
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ if not query_package(module, pkgng_path, package, dir_arg):
+ continue
+
+ if not module.check_mode:
+ rc, out, err = module.run_command("%s %s delete -y %s" % (pkgng_path, dir_arg, package))
+ stdout += out
+ stderr += err
+
+ if not module.check_mode and query_package(module, pkgng_path, package, dir_arg):
+ module.fail_json(msg="failed to remove %s: %s" % (package, out), stdout=stdout, stderr=stderr)
+
+ remove_c += 1
+
+ if remove_c > 0:
+ return (True, "removed %s package(s)" % remove_c, stdout, stderr)
+
+ return (False, "package(s) already absent", stdout, stderr)
+
+
+def install_packages(module, pkgng_path, packages, cached, pkgsite, dir_arg, state, ignoreosver):
+ install_c = 0
+ stdout = ""
+ stderr = ""
+
+ # as of pkg-1.1.4, PACKAGESITE is deprecated in favor of repository definitions
+ # in /usr/local/etc/pkg/repos
+ old_pkgng = pkgng_older_than(module, pkgng_path, [1, 1, 4])
+ if pkgsite != "":
+ if old_pkgng:
+ pkgsite = "PACKAGESITE=%s" % (pkgsite)
+ else:
+ pkgsite = "-r %s" % (pkgsite)
+
+ # This environment variable skips mid-install prompts,
+ # setting them to their default values.
+ batch_var = 'env BATCH=yes'
+
+ if ignoreosver:
+ # Ignore FreeBSD OS version check,
+ # useful on -STABLE and -CURRENT branches.
+ batch_var = batch_var + ' IGNORE_OSVERSION=yes'
+
+ if not module.check_mode and not cached:
+ if old_pkgng:
+ rc, out, err = module.run_command("%s %s update" % (pkgsite, pkgng_path))
+ else:
+ rc, out, err = module.run_command("%s %s %s update" % (batch_var, pkgng_path, dir_arg))
+ stdout += out
+ stderr += err
+ if rc != 0:
+ module.fail_json(msg="Could not update catalogue [%d]: %s %s" % (rc, out, err), stdout=stdout, stderr=stderr)
+
+ for package in packages:
+ already_installed = query_package(module, pkgng_path, package, dir_arg)
+ if already_installed and state == "present":
+ continue
+
+ update_available = query_update(module, pkgng_path, package, dir_arg, old_pkgng, pkgsite)
+ if not update_available and already_installed and state == "latest":
+ continue
+
+ if not module.check_mode:
+ if already_installed:
+ action = "upgrade"
+ else:
+ action = "install"
+ if old_pkgng:
+ rc, out, err = module.run_command("%s %s %s %s -g -U -y %s" % (batch_var, pkgsite, pkgng_path, action, package))
+ else:
+ rc, out, err = module.run_command("%s %s %s %s %s -g -U -y %s" % (batch_var, pkgng_path, dir_arg, action, pkgsite, package))
+ stdout += out
+ stderr += err
+
+ if not module.check_mode and not query_package(module, pkgng_path, package, dir_arg):
+ module.fail_json(msg="failed to %s %s: %s" % (action, package, out), stdout=stdout, stderr=stderr)
+
+ install_c += 1
+
+ if install_c > 0:
+ return (True, "added %s package(s)" % (install_c), stdout, stderr)
+
+ return (False, "package(s) already %s" % (state), stdout, stderr)
+
+
+def annotation_query(module, pkgng_path, package, tag, dir_arg):
+ rc, out, err = module.run_command("%s %s info -g -A %s" % (pkgng_path, dir_arg, package))
+ match = re.search(r'^\s*(?P<tag>%s)\s*:\s*(?P<value>\w+)' % tag, out, flags=re.MULTILINE)
+ if match:
+ return match.group('value')
+ return False
+
+
+def annotation_add(module, pkgng_path, package, tag, value, dir_arg):
+ _value = annotation_query(module, pkgng_path, package, tag, dir_arg)
+ if not _value:
+ # Annotation does not exist, add it.
+ rc, out, err = module.run_command('%s %s annotate -y -A %s %s "%s"'
+ % (pkgng_path, dir_arg, package, tag, value))
+ if rc != 0:
+ module.fail_json(msg="could not annotate %s: %s"
+ % (package, out), stderr=err)
+ return True
+ elif _value != value:
+ # Annotation exists, but value differs
+ module.fail_json(
+ mgs="failed to annotate %s, because %s is already set to %s, but should be set to %s"
+ % (package, tag, _value, value))
+ return False
+ else:
+ # Annotation exists, nothing to do
+ return False
+
+
+def annotation_delete(module, pkgng_path, package, tag, value, dir_arg):
+ _value = annotation_query(module, pkgng_path, package, tag, dir_arg)
+ if _value:
+ rc, out, err = module.run_command('%s %s annotate -y -D %s %s'
+ % (pkgng_path, dir_arg, package, tag))
+ if rc != 0:
+ module.fail_json(msg="could not delete annotation to %s: %s"
+ % (package, out), stderr=err)
+ return True
+ return False
+
+
+def annotation_modify(module, pkgng_path, package, tag, value, dir_arg):
+ _value = annotation_query(module, pkgng_path, package, tag, dir_arg)
+ if not value:
+ # No such tag
+ module.fail_json(msg="could not change annotation to %s: tag %s does not exist"
+ % (package, tag))
+ elif _value == value:
+ # No change in value
+ return False
+ else:
+ rc, out, err = module.run_command('%s %s annotate -y -M %s %s "%s"'
+ % (pkgng_path, dir_arg, package, tag, value))
+ if rc != 0:
+ module.fail_json(msg="could not change annotation annotation to %s: %s"
+ % (package, out), stderr=err)
+ return True
+
+
+def annotate_packages(module, pkgng_path, packages, annotation, dir_arg):
+ annotate_c = 0
+ annotations = map(lambda _annotation:
+ re.match(r'(?P<operation>[\+-:])(?P<tag>\w+)(=(?P<value>\w+))?',
+ _annotation).groupdict(),
+ re.split(r',', annotation))
+
+ operation = {
+ '+': annotation_add,
+ '-': annotation_delete,
+ ':': annotation_modify
+ }
+
+ for package in packages:
+ for _annotation in annotations:
+ if operation[_annotation['operation']](module, pkgng_path, package, _annotation['tag'], _annotation['value']):
+ annotate_c += 1
+
+ if annotate_c > 0:
+ return (True, "added %s annotations." % annotate_c)
+ return (False, "changed no annotations")
+
+
+def autoremove_packages(module, pkgng_path, dir_arg):
+ stdout = ""
+ stderr = ""
+ rc, out, err = module.run_command("%s %s autoremove -n" % (pkgng_path, dir_arg))
+
+ autoremove_c = 0
+
+ match = re.search('^Deinstallation has been requested for the following ([0-9]+) packages', out, re.MULTILINE)
+ if match:
+ autoremove_c = int(match.group(1))
+
+ if autoremove_c == 0:
+ return (False, "no package(s) to autoremove", stdout, stderr)
+
+ if not module.check_mode:
+ rc, out, err = module.run_command("%s %s autoremove -y" % (pkgng_path, dir_arg))
+ stdout += out
+ stderr += err
+
+ return (True, "autoremoved %d package(s)" % (autoremove_c), stdout, stderr)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default="present", choices=["present", "latest", "absent"], required=False),
+ name=dict(aliases=["pkg"], required=True, type='list', elements='str'),
+ cached=dict(default=False, type='bool'),
+ ignore_osver=dict(default=False, required=False, type='bool'),
+ annotation=dict(default="", required=False),
+ pkgsite=dict(default="", required=False),
+ rootdir=dict(default="", required=False, type='path'),
+ chroot=dict(default="", required=False, type='path'),
+ jail=dict(default="", required=False, type='str'),
+ autoremove=dict(default=False, type='bool')),
+ supports_check_mode=True,
+ mutually_exclusive=[["rootdir", "chroot", "jail"]])
+
+ pkgng_path = module.get_bin_path('pkg', True)
+
+ p = module.params
+
+ pkgs = p["name"]
+
+ changed = False
+ msgs = []
+ stdout = ""
+ stderr = ""
+ dir_arg = ""
+
+ if p["rootdir"] != "":
+ old_pkgng = pkgng_older_than(module, pkgng_path, [1, 5, 0])
+ if old_pkgng:
+ module.fail_json(msg="To use option 'rootdir' pkg version must be 1.5 or greater")
+ else:
+ dir_arg = "--rootdir %s" % (p["rootdir"])
+
+ if p["ignore_osver"]:
+ old_pkgng = pkgng_older_than(module, pkgng_path, [1, 11, 0])
+ if old_pkgng:
+ module.fail_json(msg="To use option 'ignore_osver' pkg version must be 1.11 or greater")
+
+ if p["chroot"] != "":
+ dir_arg = '--chroot %s' % (p["chroot"])
+
+ if p["jail"] != "":
+ dir_arg = '--jail %s' % (p["jail"])
+
+ if pkgs == ['*'] and p["state"] == 'latest':
+ # Operate on all installed packages. Only state: latest makes sense here.
+ _changed, _msg, _stdout, _stderr = upgrade_packages(module, pkgng_path, dir_arg)
+ changed = changed or _changed
+ stdout += _stdout
+ stderr += _stderr
+ msgs.append(_msg)
+
+ # Operate on named packages
+ named_packages = [pkg for pkg in pkgs if pkg != '*']
+ if p["state"] in ("present", "latest") and named_packages:
+ _changed, _msg, _out, _err = install_packages(module, pkgng_path, named_packages,
+ p["cached"], p["pkgsite"], dir_arg,
+ p["state"], p["ignore_osver"])
+ stdout += _out
+ stderr += _err
+ changed = changed or _changed
+ msgs.append(_msg)
+
+ elif p["state"] == "absent" and named_packages:
+ _changed, _msg, _out, _err = remove_packages(module, pkgng_path, named_packages, dir_arg)
+ stdout += _out
+ stderr += _err
+ changed = changed or _changed
+ msgs.append(_msg)
+
+ if p["autoremove"]:
+ _changed, _msg, _stdout, _stderr = autoremove_packages(module, pkgng_path, dir_arg)
+ changed = changed or _changed
+ stdout += _stdout
+ stderr += _stderr
+ msgs.append(_msg)
+
+ if p["annotation"]:
+ _changed, _msg = annotate_packages(module, pkgng_path, pkgs, p["annotation"], dir_arg)
+ changed = changed or _changed
+ msgs.append(_msg)
+
+ module.exit_json(changed=changed, msg=", ".join(msgs), stdout=stdout, stderr=stderr)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pkgutil.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pkgutil.py
new file mode 100644
index 00000000..9ec0ebaa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pkgutil.py
@@ -0,0 +1,293 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Alexander Winkler <mail () winkler-alexander.de>
+# based on svr4pkg by
+# Boyd Adamson <boyd () boydadamson.com> (2012)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: pkgutil
+short_description: OpenCSW package management on Solaris
+description:
+- This module installs, updates and removes packages from the OpenCSW project for Solaris.
+- Unlike the M(community.general.svr4pkg) module, it will resolve and download dependencies.
+- See U(https://www.opencsw.org/) for more information about the project.
+author:
+- Alexander Winkler (@dermute)
+- David Ponessa (@scathatheworm)
+options:
+ name:
+ description:
+ - The name of the package.
+ - When using C(state=latest), this can be C('*'), which updates all installed packages managed by pkgutil.
+ type: list
+ required: true
+ elements: str
+ aliases: [ pkg ]
+ site:
+ description:
+ - The repository path to install the package from.
+ - Its global definition is in C(/etc/opt/csw/pkgutil.conf).
+ required: false
+ type: str
+ state:
+ description:
+ - Whether to install (C(present)/C(installed)), or remove (C(absent)/C(removed)) packages.
+ - The upgrade (C(latest)) operation will update/install the packages to the latest version available.
+ type: str
+ required: true
+ choices: [ absent, installed, latest, present, removed ]
+ update_catalog:
+ description:
+ - If you always want to refresh your catalog from the mirror, even when it's not stale, set this to C(yes).
+ type: bool
+ default: no
+ force:
+ description:
+ - To allow the update process to downgrade packages to match what is present in the repository, set this to C(yes).
+ - This is useful for rolling back to stable from testing, or similar operations.
+ type: bool
+ default: false
+ version_added: 1.2.0
+notes:
+- In order to check the availability of packages, the catalog cache under C(/var/opt/csw/pkgutil) may be refreshed even in check mode.
+'''
+
+EXAMPLES = r'''
+- name: Install a package
+ community.general.pkgutil:
+ name: CSWcommon
+ state: present
+
+- name: Install a package from a specific repository
+ community.general.pkgutil:
+ name: CSWnrpe
+ site: ftp://myinternal.repo/opencsw/kiel
+ state: latest
+
+- name: Remove a package
+ community.general.pkgutil:
+ name: CSWtop
+ state: absent
+
+- name: Install several packages
+ community.general.pkgutil:
+ name:
+ - CSWsudo
+ - CSWtop
+ state: present
+
+- name: Update all packages
+ community.general.pkgutil:
+ name: '*'
+ state: latest
+
+- name: Update all packages and force versions to match latest in catalog
+ community.general.pkgutil:
+ name: '*'
+ state: latest
+ force: yes
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def packages_not_installed(module, names):
+ ''' Check if each package is installed and return list of the ones absent '''
+ pkgs = []
+ for pkg in names:
+ rc, out, err = run_command(module, ['pkginfo', '-q', pkg])
+ if rc != 0:
+ pkgs.append(pkg)
+ return pkgs
+
+
+def packages_installed(module, names):
+ ''' Check if each package is installed and return list of the ones present '''
+ pkgs = []
+ for pkg in names:
+ if not pkg.startswith('CSW'):
+ continue
+ rc, out, err = run_command(module, ['pkginfo', '-q', pkg])
+ if rc == 0:
+ pkgs.append(pkg)
+ return pkgs
+
+
+def packages_not_latest(module, names, site, update_catalog):
+ ''' Check status of each package and return list of the ones with an upgrade available '''
+ cmd = ['pkgutil']
+ if update_catalog:
+ cmd.append('-U')
+ cmd.append('-c')
+ if site is not None:
+ cmd.extend('-t', site)
+ if names != ['*']:
+ cmd.extend(names)
+ rc, out, err = run_command(module, cmd)
+
+ # Find packages in the catalog which are not up to date
+ packages = []
+ for line in out.split('\n')[1:-1]:
+ if 'catalog' not in line and 'SAME' not in line:
+ packages.append(line.split(' ')[0])
+
+ # Remove duplicates
+ return list(set(packages))
+
+
+def run_command(module, cmd, **kwargs):
+ progname = cmd[0]
+ cmd[0] = module.get_bin_path(progname, True, ['/opt/csw/bin'])
+ return module.run_command(cmd, **kwargs)
+
+
+def package_install(module, state, pkgs, site, update_catalog, force):
+ cmd = ['pkgutil']
+ if module.check_mode:
+ cmd.append('-n')
+ cmd.append('-iy')
+ if update_catalog:
+ cmd.append('-U')
+ if site is not None:
+ cmd.extend('-t', site)
+ if force:
+ cmd.append('-f')
+ cmd.extend(pkgs)
+ return run_command(module, cmd)
+
+
+def package_upgrade(module, pkgs, site, update_catalog, force):
+ cmd = ['pkgutil']
+ if module.check_mode:
+ cmd.append('-n')
+ cmd.append('-uy')
+ if update_catalog:
+ cmd.append('-U')
+ if site is not None:
+ cmd.extend('-t', site)
+ if force:
+ cmd.append('-f')
+ cmd += pkgs
+ return run_command(module, cmd)
+
+
+def package_uninstall(module, pkgs):
+ cmd = ['pkgutil']
+ if module.check_mode:
+ cmd.append('-n')
+ cmd.append('-ry')
+ cmd.extend(pkgs)
+ return run_command(module, cmd)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='list', elements='str', required=True, aliases=['pkg']),
+ state=dict(type='str', required=True, choices=['absent', 'installed', 'latest', 'present', 'removed']),
+ site=dict(type='str'),
+ update_catalog=dict(type='bool', default=False),
+ force=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+ name = module.params['name']
+ state = module.params['state']
+ site = module.params['site']
+ update_catalog = module.params['update_catalog']
+ force = module.params['force']
+
+ rc = None
+ out = ''
+ err = ''
+ result = dict(
+ name=name,
+ state=state,
+ )
+
+ if state in ['installed', 'present']:
+ # Fail with an explicit error when trying to "install" '*'
+ if name == ['*']:
+ module.fail_json(msg="Can not use 'state: present' with name: '*'")
+
+ # Build list of packages that are actually not installed from the ones requested
+ pkgs = packages_not_installed(module, name)
+
+ # If the package list is empty then all packages are already present
+ if pkgs == []:
+ module.exit_json(changed=False)
+
+ (rc, out, err) = package_install(module, state, pkgs, site, update_catalog, force)
+ if rc != 0:
+ module.fail_json(msg=(err or out))
+
+ elif state in ['latest']:
+ # When using latest for *
+ if name == ['*']:
+ # Check for packages that are actually outdated
+ pkgs = packages_not_latest(module, name, site, update_catalog)
+
+ # If the package list comes up empty, everything is already up to date
+ if pkgs == []:
+ module.exit_json(changed=False)
+
+ # If there are packages to update, just empty the list and run the command without it
+ # pkgutil logic is to update all when run without packages names
+ pkgs = []
+ (rc, out, err) = package_upgrade(module, pkgs, site, update_catalog, force)
+ if rc != 0:
+ module.fail_json(msg=(err or out))
+ else:
+ # Build list of packages that are either outdated or not installed
+ pkgs = packages_not_installed(module, name)
+ pkgs += packages_not_latest(module, name, site, update_catalog)
+
+ # If the package list is empty that means all packages are installed and up to date
+ if pkgs == []:
+ module.exit_json(changed=False)
+
+ (rc, out, err) = package_upgrade(module, pkgs, site, update_catalog, force)
+ if rc != 0:
+ module.fail_json(msg=(err or out))
+
+ elif state in ['absent', 'removed']:
+ # Build list of packages requested for removal that are actually present
+ pkgs = packages_installed(module, name)
+
+ # If the list is empty, no packages need to be removed
+ if pkgs == []:
+ module.exit_json(changed=False)
+
+ (rc, out, err) = package_uninstall(module, pkgs)
+ if rc != 0:
+ module.fail_json(msg=(err or out))
+
+ if rc is None:
+ # pkgutil was not executed because the package was already present/absent/up to date
+ result['changed'] = False
+ elif rc == 0:
+ result['changed'] = True
+ else:
+ result['changed'] = False
+ result['failed'] = True
+
+ if out:
+ result['stdout'] = out
+ if err:
+ result['stderr'] = err
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/portage.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/portage.py
new file mode 100644
index 00000000..1f0fdc68
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/portage.py
@@ -0,0 +1,539 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, William L Thomson Jr
+# (c) 2013, Yap Sok Ann
+# Written by Yap Sok Ann <sokann@gmail.com>
+# Modified by William L. Thomson Jr. <wlt@o-sinc.com>
+# Based on apt module written by Matthew Williams <matthew@flowroute.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: portage
+short_description: Package manager for Gentoo
+description:
+ - Manages Gentoo packages
+
+options:
+ package:
+ description:
+ - Package atom or set, e.g. C(sys-apps/foo) or C(>foo-2.13) or C(@world)
+ aliases: [name]
+ type: list
+ elements: str
+
+ state:
+ description:
+ - State of the package atom
+ default: "present"
+ choices: [ "present", "installed", "emerged", "absent", "removed", "unmerged", "latest" ]
+ type: str
+
+ update:
+ description:
+ - Update packages to the best version available (--update)
+ type: bool
+ default: no
+
+ deep:
+ description:
+ - Consider the entire dependency tree of packages (--deep)
+ type: bool
+ default: no
+
+ newuse:
+ description:
+ - Include installed packages where USE flags have changed (--newuse)
+ type: bool
+ default: no
+
+ changed_use:
+ description:
+ - Include installed packages where USE flags have changed, except when
+ - flags that the user has not enabled are added or removed
+ - (--changed-use)
+ type: bool
+ default: no
+
+ oneshot:
+ description:
+ - Do not add the packages to the world file (--oneshot)
+ type: bool
+ default: no
+
+ noreplace:
+ description:
+ - Do not re-emerge installed packages (--noreplace)
+ type: bool
+ default: yes
+
+ nodeps:
+ description:
+ - Only merge packages but not their dependencies (--nodeps)
+ type: bool
+ default: no
+
+ onlydeps:
+ description:
+ - Only merge packages' dependencies but not the packages (--onlydeps)
+ type: bool
+ default: no
+
+ depclean:
+ description:
+ - Remove packages not needed by explicitly merged packages (--depclean)
+ - If no package is specified, clean up the world's dependencies
+ - Otherwise, --depclean serves as a dependency aware version of --unmerge
+ type: bool
+ default: no
+
+ quiet:
+ description:
+ - Run emerge in quiet mode (--quiet)
+ type: bool
+ default: no
+
+ verbose:
+ description:
+ - Run emerge in verbose mode (--verbose)
+ type: bool
+ default: no
+
+ sync:
+ description:
+ - Sync package repositories first
+ - If yes, perform "emerge --sync"
+ - If web, perform "emerge-webrsync"
+ choices: [ "web", "yes", "no" ]
+ type: str
+
+ getbinpkgonly:
+ description:
+ - Merge only packages specified at C(PORTAGE_BINHOST) in C(make.conf).
+ type: bool
+ default: no
+ version_added: 1.3.0
+
+ getbinpkg:
+ description:
+ - Prefer packages specified at C(PORTAGE_BINHOST) in C(make.conf).
+ type: bool
+ default: no
+
+ usepkgonly:
+ description:
+ - Merge only binaries (no compiling).
+ type: bool
+ default: no
+
+ usepkg:
+ description:
+ - Tries to use the binary package(s) in the locally available packages directory.
+ type: bool
+ default: no
+
+ keepgoing:
+ description:
+ - Continue as much as possible after an error.
+ type: bool
+ default: no
+
+ jobs:
+ description:
+ - Specifies the number of packages to build simultaneously.
+ - "Since version 2.6: Value of 0 or False resets any previously added"
+ - --jobs setting values
+ type: int
+
+ loadavg:
+ description:
+ - Specifies that no new builds should be started if there are
+ - other builds running and the load average is at least LOAD
+ - "Since version 2.6: Value of 0 or False resets any previously added"
+ - --load-average setting values
+ type: float
+
+ quietbuild:
+ description:
+ - Redirect all build output to logs alone, and do not display it
+ - on stdout (--quiet-build)
+ type: bool
+ default: no
+
+ quietfail:
+ description:
+ - Suppresses display of the build log on stdout (--quiet-fail)
+ - Only the die message and the path of the build log will be
+ - displayed on stdout.
+ type: bool
+ default: no
+
+requirements: [ gentoolkit ]
+author:
+ - "William L Thomson Jr (@wltjr)"
+ - "Yap Sok Ann (@sayap)"
+ - "Andrew Udvare (@Tatsh)"
+'''
+
+EXAMPLES = '''
+- name: Make sure package foo is installed
+ community.general.portage:
+ package: foo
+ state: present
+
+- name: Make sure package foo is not installed
+ community.general.portage:
+ package: foo
+ state: absent
+
+- name: Update package foo to the latest version (os specific alternative to latest)
+ community.general.portage:
+ package: foo
+ update: yes
+
+- name: Install package foo using PORTAGE_BINHOST setup
+ community.general.portage:
+ package: foo
+ getbinpkg: yes
+
+- name: Re-install world from binary packages only and do not allow any compiling
+ community.general.portage:
+ package: '@world'
+ usepkgonly: yes
+
+- name: Sync repositories and update world
+ community.general.portage:
+ package: '@world'
+ update: yes
+ deep: yes
+ sync: yes
+
+- name: Remove unneeded packages
+ community.general.portage:
+ depclean: yes
+
+- name: Remove package foo if it is not explicitly needed
+ community.general.portage:
+ package: foo
+ state: absent
+ depclean: yes
+'''
+
+import os
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def query_package(module, package, action):
+ if package.startswith('@'):
+ return query_set(module, package, action)
+ return query_atom(module, package, action)
+
+
+def query_atom(module, atom, action):
+ cmd = '%s list %s' % (module.equery_path, atom)
+
+ rc, out, err = module.run_command(cmd)
+ return rc == 0
+
+
+def query_set(module, set, action):
+ system_sets = [
+ '@live-rebuild',
+ '@module-rebuild',
+ '@preserved-rebuild',
+ '@security',
+ '@selected',
+ '@system',
+ '@world',
+ '@x11-module-rebuild',
+ ]
+
+ if set in system_sets:
+ if action == 'unmerge':
+ module.fail_json(msg='set %s cannot be removed' % set)
+ return False
+
+ world_sets_path = '/var/lib/portage/world_sets'
+ if not os.path.exists(world_sets_path):
+ return False
+
+ cmd = 'grep %s %s' % (set, world_sets_path)
+
+ rc, out, err = module.run_command(cmd)
+ return rc == 0
+
+
+def sync_repositories(module, webrsync=False):
+ if module.check_mode:
+ module.exit_json(msg='check mode not supported by sync')
+
+ if webrsync:
+ webrsync_path = module.get_bin_path('emerge-webrsync', required=True)
+ cmd = '%s --quiet' % webrsync_path
+ else:
+ cmd = '%s --sync --quiet --ask=n' % module.emerge_path
+
+ rc, out, err = module.run_command(cmd)
+ if rc != 0:
+ module.fail_json(msg='could not sync package repositories')
+
+
+# Note: In the 3 functions below, equery is done one-by-one, but emerge is done
+# in one go. If that is not desirable, split the packages into multiple tasks
+# instead of joining them together with comma.
+
+
+def emerge_packages(module, packages):
+ """Run emerge command against given list of atoms."""
+ p = module.params
+
+ if p['noreplace'] and not (p['update'] or p['state'] == 'latest'):
+ for package in packages:
+ if p['noreplace'] and not query_package(module, package, 'emerge'):
+ break
+ else:
+ module.exit_json(changed=False, msg='Packages already present.')
+ if module.check_mode:
+ module.exit_json(changed=True, msg='Packages would be installed.')
+
+ args = []
+ emerge_flags = {
+ 'update': '--update',
+ 'deep': '--deep',
+ 'newuse': '--newuse',
+ 'changed_use': '--changed-use',
+ 'oneshot': '--oneshot',
+ 'noreplace': '--noreplace',
+ 'nodeps': '--nodeps',
+ 'onlydeps': '--onlydeps',
+ 'quiet': '--quiet',
+ 'verbose': '--verbose',
+ 'getbinpkgonly': '--getbinpkgonly',
+ 'getbinpkg': '--getbinpkg',
+ 'usepkgonly': '--usepkgonly',
+ 'usepkg': '--usepkg',
+ 'keepgoing': '--keep-going',
+ 'quietbuild': '--quiet-build',
+ 'quietfail': '--quiet-fail',
+ }
+ for flag, arg in emerge_flags.items():
+ if p[flag]:
+ args.append(arg)
+
+ if p['state'] and p['state'] == 'latest':
+ args.append("--update")
+
+ emerge_flags = {
+ 'jobs': '--jobs',
+ 'loadavg': '--load-average',
+ }
+
+ for flag, arg in emerge_flags.items():
+ flag_val = p[flag]
+
+ if flag_val is None:
+ """Fallback to default: don't use this argument at all."""
+ continue
+
+ if not flag_val:
+ """If the value is 0 or 0.0: add the flag, but not the value."""
+ args.append(arg)
+ continue
+
+ """Add the --flag=value pair."""
+ args.extend((arg, to_native(flag_val)))
+
+ cmd, (rc, out, err) = run_emerge(module, packages, *args)
+ if rc != 0:
+ module.fail_json(
+ cmd=cmd, rc=rc, stdout=out, stderr=err,
+ msg='Packages not installed.',
+ )
+
+ # Check for SSH error with PORTAGE_BINHOST, since rc is still 0 despite
+ # this error
+ if (p['usepkgonly'] or p['getbinpkg'] or p['getbinpkgonly']) \
+ and 'Permission denied (publickey).' in err:
+ module.fail_json(
+ cmd=cmd, rc=rc, stdout=out, stderr=err,
+ msg='Please check your PORTAGE_BINHOST configuration in make.conf '
+ 'and your SSH authorized_keys file',
+ )
+
+ changed = True
+ for line in out.splitlines():
+ if re.match(r'(?:>+) Emerging (?:binary )?\(1 of', line):
+ msg = 'Packages installed.'
+ break
+ elif module.check_mode and re.match(r'\[(binary|ebuild)', line):
+ msg = 'Packages would be installed.'
+ break
+ else:
+ changed = False
+ msg = 'No packages installed.'
+
+ module.exit_json(
+ changed=changed, cmd=cmd, rc=rc, stdout=out, stderr=err,
+ msg=msg,
+ )
+
+
+def unmerge_packages(module, packages):
+ p = module.params
+
+ for package in packages:
+ if query_package(module, package, 'unmerge'):
+ break
+ else:
+ module.exit_json(changed=False, msg='Packages already absent.')
+
+ args = ['--unmerge']
+
+ for flag in ['quiet', 'verbose']:
+ if p[flag]:
+ args.append('--%s' % flag)
+
+ cmd, (rc, out, err) = run_emerge(module, packages, *args)
+
+ if rc != 0:
+ module.fail_json(
+ cmd=cmd, rc=rc, stdout=out, stderr=err,
+ msg='Packages not removed.',
+ )
+
+ module.exit_json(
+ changed=True, cmd=cmd, rc=rc, stdout=out, stderr=err,
+ msg='Packages removed.',
+ )
+
+
+def cleanup_packages(module, packages):
+ p = module.params
+
+ if packages:
+ for package in packages:
+ if query_package(module, package, 'unmerge'):
+ break
+ else:
+ module.exit_json(changed=False, msg='Packages already absent.')
+
+ args = ['--depclean']
+
+ for flag in ['quiet', 'verbose']:
+ if p[flag]:
+ args.append('--%s' % flag)
+
+ cmd, (rc, out, err) = run_emerge(module, packages, *args)
+ if rc != 0:
+ module.fail_json(cmd=cmd, rc=rc, stdout=out, stderr=err)
+
+ removed = 0
+ for line in out.splitlines():
+ if not line.startswith('Number removed:'):
+ continue
+ parts = line.split(':')
+ removed = int(parts[1].strip())
+ changed = removed > 0
+
+ module.exit_json(
+ changed=changed, cmd=cmd, rc=rc, stdout=out, stderr=err,
+ msg='Depclean completed.',
+ )
+
+
+def run_emerge(module, packages, *args):
+ args = list(args)
+
+ args.append('--ask=n')
+ if module.check_mode:
+ args.append('--pretend')
+
+ cmd = [module.emerge_path] + args + packages
+ return cmd, module.run_command(cmd)
+
+
+portage_present_states = ['present', 'emerged', 'installed', 'latest']
+portage_absent_states = ['absent', 'unmerged', 'removed']
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ package=dict(type='list', elements='str', default=None, aliases=['name']),
+ state=dict(
+ default=portage_present_states[0],
+ choices=portage_present_states + portage_absent_states,
+ ),
+ update=dict(default=False, type='bool'),
+ deep=dict(default=False, type='bool'),
+ newuse=dict(default=False, type='bool'),
+ changed_use=dict(default=False, type='bool'),
+ oneshot=dict(default=False, type='bool'),
+ noreplace=dict(default=True, type='bool'),
+ nodeps=dict(default=False, type='bool'),
+ onlydeps=dict(default=False, type='bool'),
+ depclean=dict(default=False, type='bool'),
+ quiet=dict(default=False, type='bool'),
+ verbose=dict(default=False, type='bool'),
+ sync=dict(default=None, choices=['yes', 'web', 'no']),
+ getbinpkgonly=dict(default=False, type='bool'),
+ getbinpkg=dict(default=False, type='bool'),
+ usepkgonly=dict(default=False, type='bool'),
+ usepkg=dict(default=False, type='bool'),
+ keepgoing=dict(default=False, type='bool'),
+ jobs=dict(default=None, type='int'),
+ loadavg=dict(default=None, type='float'),
+ quietbuild=dict(default=False, type='bool'),
+ quietfail=dict(default=False, type='bool'),
+ ),
+ required_one_of=[['package', 'sync', 'depclean']],
+ mutually_exclusive=[
+ ['nodeps', 'onlydeps'],
+ ['quiet', 'verbose'],
+ ['quietbuild', 'verbose'],
+ ['quietfail', 'verbose'],
+ ],
+ supports_check_mode=True,
+ )
+
+ module.emerge_path = module.get_bin_path('emerge', required=True)
+ module.equery_path = module.get_bin_path('equery', required=True)
+
+ p = module.params
+
+ if p['sync'] and p['sync'].strip() != 'no':
+ sync_repositories(module, webrsync=(p['sync'] == 'web'))
+ if not p['package']:
+ module.exit_json(msg='Sync successfully finished.')
+
+ packages = []
+ if p['package']:
+ packages.extend(p['package'])
+
+ if p['depclean']:
+ if packages and p['state'] not in portage_absent_states:
+ module.fail_json(
+ msg='Depclean can only be used with package when the state is '
+ 'one of: %s' % portage_absent_states,
+ )
+
+ cleanup_packages(module, packages)
+
+ elif p['state'] in portage_present_states:
+ emerge_packages(module, packages)
+
+ elif p['state'] in portage_absent_states:
+ unmerge_packages(module, packages)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/portinstall.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/portinstall.py
new file mode 100644
index 00000000..d1c33cc5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/portinstall.py
@@ -0,0 +1,210 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, berenddeboer
+# Written by berenddeboer <berend@pobox.com>
+# Based on pkgng module written by bleader <bleader at ratonland.org>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: portinstall
+short_description: Installing packages from FreeBSD's ports system
+description:
+ - Manage packages for FreeBSD using 'portinstall'.
+options:
+ name:
+ description:
+ - name of package to install/remove
+ aliases: [pkg]
+ required: true
+ type: str
+ state:
+ description:
+ - state of the package
+ choices: [ 'present', 'absent' ]
+ required: false
+ default: present
+ type: str
+ use_packages:
+ description:
+ - use packages instead of ports whenever available
+ type: bool
+ required: false
+ default: yes
+author: "berenddeboer (@berenddeboer)"
+'''
+
+EXAMPLES = '''
+- name: Install package foo
+ community.general.portinstall:
+ name: foo
+ state: present
+
+- name: Install package security/cyrus-sasl2-saslauthd
+ community.general.portinstall:
+ name: security/cyrus-sasl2-saslauthd
+ state: present
+
+- name: Remove packages foo and bar
+ community.general.portinstall:
+ name: foo,bar
+ state: absent
+'''
+
+import os
+import re
+import sys
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import shlex_quote
+
+
+def query_package(module, name):
+
+ pkg_info_path = module.get_bin_path('pkg_info', False)
+
+ # Assume that if we have pkg_info, we haven't upgraded to pkgng
+ if pkg_info_path:
+ pkgng = False
+ pkg_glob_path = module.get_bin_path('pkg_glob', True)
+ rc, out, err = module.run_command("%s -e `pkg_glob %s`" % (pkg_info_path, shlex_quote(name)), use_unsafe_shell=True)
+ else:
+ pkgng = True
+ pkg_info_path = module.get_bin_path('pkg', True)
+ pkg_info_path = pkg_info_path + " info"
+ rc, out, err = module.run_command("%s %s" % (pkg_info_path, name))
+
+ found = rc == 0
+
+ if not found:
+ # databases/mysql55-client installs as mysql-client, so try solving
+ # that the ugly way. Pity FreeBSD doesn't have a fool proof way of checking
+ # some package is installed
+ name_without_digits = re.sub('[0-9]', '', name)
+ if name != name_without_digits:
+ if pkgng:
+ rc, out, err = module.run_command("%s %s" % (pkg_info_path, name_without_digits))
+ else:
+ rc, out, err = module.run_command("%s %s" % (pkg_info_path, name_without_digits))
+
+ found = rc == 0
+
+ return found
+
+
+def matching_packages(module, name):
+
+ ports_glob_path = module.get_bin_path('ports_glob', True)
+ rc, out, err = module.run_command("%s %s" % (ports_glob_path, name))
+ # counts the number of packages found
+ occurrences = out.count('\n')
+ if occurrences == 0:
+ name_without_digits = re.sub('[0-9]', '', name)
+ if name != name_without_digits:
+ rc, out, err = module.run_command("%s %s" % (ports_glob_path, name_without_digits))
+ occurrences = out.count('\n')
+ return occurrences
+
+
+def remove_packages(module, packages):
+
+ remove_c = 0
+ pkg_glob_path = module.get_bin_path('pkg_glob', True)
+
+ # If pkg_delete not found, we assume pkgng
+ pkg_delete_path = module.get_bin_path('pkg_delete', False)
+ if not pkg_delete_path:
+ pkg_delete_path = module.get_bin_path('pkg', True)
+ pkg_delete_path = pkg_delete_path + " delete -y"
+
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ if not query_package(module, package):
+ continue
+
+ rc, out, err = module.run_command("%s `%s %s`" % (pkg_delete_path, pkg_glob_path, shlex_quote(package)), use_unsafe_shell=True)
+
+ if query_package(module, package):
+ name_without_digits = re.sub('[0-9]', '', package)
+ rc, out, err = module.run_command("%s `%s %s`" % (pkg_delete_path, pkg_glob_path,
+ shlex_quote(name_without_digits)),
+ use_unsafe_shell=True)
+ if query_package(module, package):
+ module.fail_json(msg="failed to remove %s: %s" % (package, out))
+
+ remove_c += 1
+
+ if remove_c > 0:
+
+ module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, packages, use_packages):
+
+ install_c = 0
+
+ # If portinstall not found, automagically install
+ portinstall_path = module.get_bin_path('portinstall', False)
+ if not portinstall_path:
+ pkg_path = module.get_bin_path('pkg', False)
+ if pkg_path:
+ module.run_command("pkg install -y portupgrade")
+ portinstall_path = module.get_bin_path('portinstall', True)
+
+ if use_packages:
+ portinstall_params = "--use-packages"
+ else:
+ portinstall_params = ""
+
+ for package in packages:
+ if query_package(module, package):
+ continue
+
+ # TODO: check how many match
+ matches = matching_packages(module, package)
+ if matches == 1:
+ rc, out, err = module.run_command("%s --batch %s %s" % (portinstall_path, portinstall_params, package))
+ if not query_package(module, package):
+ module.fail_json(msg="failed to install %s: %s" % (package, out))
+ elif matches == 0:
+ module.fail_json(msg="no matches for package %s" % (package))
+ else:
+ module.fail_json(msg="%s matches found for package name %s" % (matches, package))
+
+ install_c += 1
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg="present %s package(s)" % (install_c))
+
+ module.exit_json(changed=False, msg="package(s) already present")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default="present", choices=["present", "absent"]),
+ name=dict(aliases=["pkg"], required=True),
+ use_packages=dict(type='bool', default=True)))
+
+ p = module.params
+
+ pkgs = p["name"].split(",")
+
+ if p["state"] == "present":
+ install_packages(module, pkgs, p["use_packages"])
+
+ elif p["state"] == "absent":
+ remove_packages(module, pkgs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pulp_repo.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pulp_repo.py
new file mode 100644
index 00000000..8dbc6b9a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/pulp_repo.py
@@ -0,0 +1,754 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Joe Adams <@sysadmind>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: pulp_repo
+author: "Joe Adams (@sysadmind)"
+short_description: Add or remove Pulp repos from a remote host.
+description:
+ - Add or remove Pulp repos from a remote host.
+options:
+ add_export_distributor:
+ description:
+ - Whether or not to add the export distributor to new C(rpm) repositories.
+ type: bool
+ default: no
+ feed:
+ description:
+ - Upstream feed URL to receive updates from.
+ type: str
+ force_basic_auth:
+ description:
+ - httplib2, the library used by the M(ansible.builtin.uri) module only sends
+ authentication information when a webservice responds to an initial
+ request with a 401 status. Since some basic auth services do not
+ properly send a 401, logins will fail. This option forces the sending of
+ the Basic authentication header upon initial request.
+ type: bool
+ default: no
+ generate_sqlite:
+ description:
+ - Boolean flag to indicate whether sqlite files should be generated during
+ a repository publish.
+ required: false
+ type: bool
+ default: no
+ feed_ca_cert:
+ description:
+ - CA certificate string used to validate the feed source SSL certificate.
+ This can be the file content or the path to the file.
+ The ca_cert alias will be removed in community.general 3.0.0.
+ type: str
+ aliases: [ importer_ssl_ca_cert, ca_cert ]
+ feed_client_cert:
+ description:
+ - Certificate used as the client certificate when synchronizing the
+ repository. This is used to communicate authentication information to
+ the feed source. The value to this option must be the full path to the
+ certificate. The specified file may be the certificate itself or a
+ single file containing both the certificate and private key. This can be
+ the file content or the path to the file.
+ - If not specified the default value will come from client_cert. Which will
+ change in community.general 3.0.0.
+ type: str
+ aliases: [ importer_ssl_client_cert ]
+ feed_client_key:
+ description:
+ - Private key to the certificate specified in I(importer_ssl_client_cert),
+ assuming it is not included in the certificate file itself. This can be
+ the file content or the path to the file.
+ - If not specified the default value will come from client_key. Which will
+ change in community.general 3.0.0.
+ type: str
+ aliases: [ importer_ssl_client_key ]
+ name:
+ description:
+ - Name of the repo to add or remove. This correlates to repo-id in Pulp.
+ required: true
+ type: str
+ aliases: [ repo ]
+ proxy_host:
+ description:
+ - Proxy url setting for the pulp repository importer. This is in the
+ format scheme://host.
+ required: false
+ default: null
+ type: str
+ proxy_port:
+ description:
+ - Proxy port setting for the pulp repository importer.
+ required: false
+ default: null
+ type: str
+ proxy_username:
+ description:
+ - Proxy username for the pulp repository importer.
+ required: false
+ default: null
+ type: str
+ proxy_password:
+ description:
+ - Proxy password for the pulp repository importer.
+ required: false
+ default: null
+ type: str
+ publish_distributor:
+ description:
+ - Distributor to use when state is C(publish). The default is to
+ publish all distributors.
+ type: str
+ pulp_host:
+ description:
+ - URL of the pulp server to connect to.
+ default: https://127.0.0.1
+ type: str
+ relative_url:
+ description:
+ - Relative URL for the local repository. It's required when state=present.
+ type: str
+ repo_type:
+ description:
+ - Repo plugin type to use (i.e. C(rpm), C(docker)).
+ default: rpm
+ type: str
+ repoview:
+ description:
+ - Whether to generate repoview files for a published repository. Setting
+ this to "yes" automatically activates `generate_sqlite`.
+ required: false
+ type: bool
+ default: no
+ serve_http:
+ description:
+ - Make the repo available over HTTP.
+ type: bool
+ default: no
+ serve_https:
+ description:
+ - Make the repo available over HTTPS.
+ type: bool
+ default: yes
+ state:
+ description:
+ - The repo state. A state of C(sync) will queue a sync of the repo.
+ This is asynchronous but not delayed like a scheduled sync. A state of
+ C(publish) will use the repository's distributor to publish the content.
+ default: present
+ choices: [ "present", "absent", "sync", "publish" ]
+ type: str
+ url_password:
+ description:
+ - The password for use in HTTP basic authentication to the pulp API.
+ If the I(url_username) parameter is not specified, the I(url_password)
+ parameter will not be used.
+ url_username:
+ description:
+ - The username for use in HTTP basic authentication to the pulp API.
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be
+ used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: yes
+ wait_for_completion:
+ description:
+ - Wait for asynchronous tasks to complete before returning.
+ type: bool
+ default: no
+notes:
+ - This module can currently only create distributors and importers on rpm
+ repositories. Contributions to support other repo types are welcome.
+extends_documentation_fragment:
+ - url
+'''
+
+EXAMPLES = '''
+- name: Create a new repo with name 'my_repo'
+ community.general.pulp_repo:
+ name: my_repo
+ relative_url: my/repo
+ state: present
+
+- name: Create a repo with a feed and a relative URL
+ community.general.pulp_repo:
+ name: my_centos_updates
+ repo_type: rpm
+ feed: http://mirror.centos.org/centos/6/updates/x86_64/
+ relative_url: centos/6/updates
+ url_username: admin
+ url_password: admin
+ force_basic_auth: yes
+ state: present
+
+- name: Remove a repo from the pulp server
+ community.general.pulp_repo:
+ name: my_old_repo
+ repo_type: rpm
+ state: absent
+'''
+
+RETURN = '''
+repo:
+ description: Name of the repo that the action was performed on.
+ returned: success
+ type: str
+ sample: my_repo
+'''
+
+import json
+import os
+from time import sleep
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.urls import url_argument_spec
+
+
+class pulp_server(object):
+ """
+ Class to interact with a Pulp server
+ """
+
+ def __init__(self, module, pulp_host, repo_type, wait_for_completion=False):
+ self.module = module
+ self.host = pulp_host
+ self.repo_type = repo_type
+ self.repo_cache = dict()
+ self.wait_for_completion = wait_for_completion
+
+ def check_repo_exists(self, repo_id):
+ try:
+ self.get_repo_config_by_id(repo_id)
+ except IndexError:
+ return False
+ else:
+ return True
+
+ def compare_repo_distributor_config(self, repo_id, **kwargs):
+ repo_config = self.get_repo_config_by_id(repo_id)
+
+ for distributor in repo_config['distributors']:
+ for key, value in kwargs.items():
+ if key not in distributor['config'].keys():
+ return False
+
+ if not distributor['config'][key] == value:
+ return False
+
+ return True
+
+ def compare_repo_importer_config(self, repo_id, **kwargs):
+ repo_config = self.get_repo_config_by_id(repo_id)
+
+ for importer in repo_config['importers']:
+ for key, value in kwargs.items():
+ if value is not None:
+ if key not in importer['config'].keys():
+ return False
+
+ if not importer['config'][key] == value:
+ return False
+
+ return True
+
+ def create_repo(
+ self,
+ repo_id,
+ relative_url,
+ feed=None,
+ generate_sqlite=False,
+ serve_http=False,
+ serve_https=True,
+ proxy_host=None,
+ proxy_port=None,
+ proxy_username=None,
+ proxy_password=None,
+ repoview=False,
+ ssl_ca_cert=None,
+ ssl_client_cert=None,
+ ssl_client_key=None,
+ add_export_distributor=False
+ ):
+ url = "%s/pulp/api/v2/repositories/" % self.host
+ data = dict()
+ data['id'] = repo_id
+ data['distributors'] = []
+
+ if self.repo_type == 'rpm':
+ yum_distributor = dict()
+ yum_distributor['distributor_id'] = "yum_distributor"
+ yum_distributor['distributor_type_id'] = "yum_distributor"
+ yum_distributor['auto_publish'] = True
+ yum_distributor['distributor_config'] = dict()
+ yum_distributor['distributor_config']['http'] = serve_http
+ yum_distributor['distributor_config']['https'] = serve_https
+ yum_distributor['distributor_config']['relative_url'] = relative_url
+ yum_distributor['distributor_config']['repoview'] = repoview
+ yum_distributor['distributor_config']['generate_sqlite'] = generate_sqlite or repoview
+ data['distributors'].append(yum_distributor)
+
+ if add_export_distributor:
+ export_distributor = dict()
+ export_distributor['distributor_id'] = "export_distributor"
+ export_distributor['distributor_type_id'] = "export_distributor"
+ export_distributor['auto_publish'] = False
+ export_distributor['distributor_config'] = dict()
+ export_distributor['distributor_config']['http'] = serve_http
+ export_distributor['distributor_config']['https'] = serve_https
+ export_distributor['distributor_config']['relative_url'] = relative_url
+ export_distributor['distributor_config']['repoview'] = repoview
+ export_distributor['distributor_config']['generate_sqlite'] = generate_sqlite or repoview
+ data['distributors'].append(export_distributor)
+
+ data['importer_type_id'] = "yum_importer"
+ data['importer_config'] = dict()
+
+ if feed:
+ data['importer_config']['feed'] = feed
+
+ if proxy_host:
+ data['importer_config']['proxy_host'] = proxy_host
+
+ if proxy_port:
+ data['importer_config']['proxy_port'] = proxy_port
+
+ if proxy_username:
+ data['importer_config']['proxy_username'] = proxy_username
+
+ if proxy_password:
+ data['importer_config']['proxy_password'] = proxy_password
+
+ if ssl_ca_cert:
+ data['importer_config']['ssl_ca_cert'] = ssl_ca_cert
+
+ if ssl_client_cert:
+ data['importer_config']['ssl_client_cert'] = ssl_client_cert
+
+ if ssl_client_key:
+ data['importer_config']['ssl_client_key'] = ssl_client_key
+
+ data['notes'] = {
+ "_repo-type": "rpm-repo"
+ }
+
+ response, info = fetch_url(
+ self.module,
+ url,
+ data=json.dumps(data),
+ method='POST')
+
+ if info['status'] != 201:
+ self.module.fail_json(
+ msg="Failed to create repo.",
+ status_code=info['status'],
+ response=info['msg'],
+ url=url)
+ else:
+ return True
+
+ def delete_repo(self, repo_id):
+ url = "%s/pulp/api/v2/repositories/%s/" % (self.host, repo_id)
+ response, info = fetch_url(self.module, url, data='', method='DELETE')
+
+ if info['status'] != 202:
+ self.module.fail_json(
+ msg="Failed to delete repo.",
+ status_code=info['status'],
+ response=info['msg'],
+ url=url)
+
+ if self.wait_for_completion:
+ self.verify_tasks_completed(json.load(response))
+
+ return True
+
+ def get_repo_config_by_id(self, repo_id):
+ if repo_id not in self.repo_cache.keys():
+ repo_array = [x for x in self.repo_list if x['id'] == repo_id]
+ self.repo_cache[repo_id] = repo_array[0]
+
+ return self.repo_cache[repo_id]
+
+ def publish_repo(self, repo_id, publish_distributor):
+ url = "%s/pulp/api/v2/repositories/%s/actions/publish/" % (self.host, repo_id)
+
+ # If there's no distributor specified, we will publish them all
+ if publish_distributor is None:
+ repo_config = self.get_repo_config_by_id(repo_id)
+
+ for distributor in repo_config['distributors']:
+ data = dict()
+ data['id'] = distributor['id']
+ response, info = fetch_url(
+ self.module,
+ url,
+ data=json.dumps(data),
+ method='POST')
+
+ if info['status'] != 202:
+ self.module.fail_json(
+ msg="Failed to publish the repo.",
+ status_code=info['status'],
+ response=info['msg'],
+ url=url,
+ distributor=distributor['id'])
+ else:
+ data = dict()
+ data['id'] = publish_distributor
+ response, info = fetch_url(
+ self.module,
+ url,
+ data=json.dumps(data),
+ method='POST')
+
+ if info['status'] != 202:
+ self.module.fail_json(
+ msg="Failed to publish the repo",
+ status_code=info['status'],
+ response=info['msg'],
+ url=url,
+ distributor=publish_distributor)
+
+ if self.wait_for_completion:
+ self.verify_tasks_completed(json.load(response))
+
+ return True
+
+ def sync_repo(self, repo_id):
+ url = "%s/pulp/api/v2/repositories/%s/actions/sync/" % (self.host, repo_id)
+ response, info = fetch_url(self.module, url, data='', method='POST')
+
+ if info['status'] != 202:
+ self.module.fail_json(
+ msg="Failed to schedule a sync of the repo.",
+ status_code=info['status'],
+ response=info['msg'],
+ url=url)
+
+ if self.wait_for_completion:
+ self.verify_tasks_completed(json.load(response))
+
+ return True
+
+ def update_repo_distributor_config(self, repo_id, **kwargs):
+ url = "%s/pulp/api/v2/repositories/%s/distributors/" % (self.host, repo_id)
+ repo_config = self.get_repo_config_by_id(repo_id)
+
+ for distributor in repo_config['distributors']:
+ distributor_url = "%s%s/" % (url, distributor['id'])
+ data = dict()
+ data['distributor_config'] = dict()
+
+ for key, value in kwargs.items():
+ data['distributor_config'][key] = value
+
+ response, info = fetch_url(
+ self.module,
+ distributor_url,
+ data=json.dumps(data),
+ method='PUT')
+
+ if info['status'] != 202:
+ self.module.fail_json(
+ msg="Failed to set the relative url for the repository.",
+ status_code=info['status'],
+ response=info['msg'],
+ url=url)
+
+ def update_repo_importer_config(self, repo_id, **kwargs):
+ url = "%s/pulp/api/v2/repositories/%s/importers/" % (self.host, repo_id)
+ data = dict()
+ importer_config = dict()
+
+ for key, value in kwargs.items():
+ if value is not None:
+ importer_config[key] = value
+
+ data['importer_config'] = importer_config
+
+ if self.repo_type == 'rpm':
+ data['importer_type_id'] = "yum_importer"
+
+ response, info = fetch_url(
+ self.module,
+ url,
+ data=json.dumps(data),
+ method='POST')
+
+ if info['status'] != 202:
+ self.module.fail_json(
+ msg="Failed to set the repo importer configuration",
+ status_code=info['status'],
+ response=info['msg'],
+ importer_config=importer_config,
+ url=url)
+
+ def set_repo_list(self):
+ url = "%s/pulp/api/v2/repositories/?details=true" % self.host
+ response, info = fetch_url(self.module, url, method='GET')
+
+ if info['status'] != 200:
+ self.module.fail_json(
+ msg="Request failed",
+ status_code=info['status'],
+ response=info['msg'],
+ url=url)
+
+ self.repo_list = json.load(response)
+
+ def verify_tasks_completed(self, response_dict):
+ for task in response_dict['spawned_tasks']:
+ task_url = "%s%s" % (self.host, task['_href'])
+
+ while True:
+ response, info = fetch_url(
+ self.module,
+ task_url,
+ data='',
+ method='GET')
+
+ if info['status'] != 200:
+ self.module.fail_json(
+ msg="Failed to check async task status.",
+ status_code=info['status'],
+ response=info['msg'],
+ url=task_url)
+
+ task_dict = json.load(response)
+
+ if task_dict['state'] == 'finished':
+ return True
+
+ if task_dict['state'] == 'error':
+ self.module.fail_json(msg="Asynchronous task failed to complete.", error=task_dict['error'])
+
+ sleep(2)
+
+
+def main():
+ argument_spec = url_argument_spec()
+ argument_spec.update(
+ add_export_distributor=dict(default=False, type='bool'),
+ feed=dict(),
+ generate_sqlite=dict(default=False, type='bool'),
+ feed_ca_cert=dict(aliases=['importer_ssl_ca_cert', 'ca_cert'],
+ deprecated_aliases=[dict(name='ca_cert', version='3.0.0',
+ collection_name='community.general')]), # was Ansible 2.14
+ feed_client_cert=dict(aliases=['importer_ssl_client_cert']),
+ feed_client_key=dict(aliases=['importer_ssl_client_key'], no_log=True),
+ name=dict(required=True, aliases=['repo']),
+ proxy_host=dict(),
+ proxy_port=dict(),
+ proxy_username=dict(),
+ proxy_password=dict(no_log=True),
+ publish_distributor=dict(),
+ pulp_host=dict(default="https://127.0.0.1"),
+ relative_url=dict(),
+ repo_type=dict(default="rpm"),
+ repoview=dict(default=False, type='bool'),
+ serve_http=dict(default=False, type='bool'),
+ serve_https=dict(default=True, type='bool'),
+ state=dict(
+ default="present",
+ choices=['absent', 'present', 'sync', 'publish']),
+ wait_for_completion=dict(default=False, type="bool"))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ add_export_distributor = module.params['add_export_distributor']
+ feed = module.params['feed']
+ generate_sqlite = module.params['generate_sqlite']
+ importer_ssl_ca_cert = module.params['feed_ca_cert']
+ importer_ssl_client_cert = module.params['feed_client_cert']
+ if importer_ssl_client_cert is None and module.params['client_cert'] is not None:
+ importer_ssl_client_cert = module.params['client_cert']
+ module.deprecate("To specify client certificates to be used with the repo to sync, and not for communication with the "
+ "Pulp instance, use the new options `feed_client_cert` and `feed_client_key` (available since "
+ "Ansible 2.9.2). Until community.general 3.0.0, the default value for `feed_client_cert` will be "
+ "taken from `client_cert` if only the latter is specified",
+ version="3.0.0", collection_name='community.general') # was Ansible 2.14
+ importer_ssl_client_key = module.params['feed_client_key']
+ if importer_ssl_client_key is None and module.params['client_key'] is not None:
+ importer_ssl_client_key = module.params['client_key']
+ module.deprecate("In Ansible 2.9.2 `feed_client_key` option was added. Until community.general 3.0.0 the default "
+ "value will come from client_key option",
+ version="3.0.0", collection_name='community.general') # was Ansible 2.14
+ proxy_host = module.params['proxy_host']
+ proxy_port = module.params['proxy_port']
+ proxy_username = module.params['proxy_username']
+ proxy_password = module.params['proxy_password']
+ publish_distributor = module.params['publish_distributor']
+ pulp_host = module.params['pulp_host']
+ relative_url = module.params['relative_url']
+ repo = module.params['name']
+ repo_type = module.params['repo_type']
+ repoview = module.params['repoview']
+ serve_http = module.params['serve_http']
+ serve_https = module.params['serve_https']
+ state = module.params['state']
+ wait_for_completion = module.params['wait_for_completion']
+
+ if (state == 'present') and (not relative_url):
+ module.fail_json(msg="When state is present, relative_url is required.")
+
+ # Ensure that the importer_ssl_* is the content and not a file path
+ if importer_ssl_ca_cert is not None:
+ importer_ssl_ca_cert_file_path = os.path.abspath(importer_ssl_ca_cert)
+ if os.path.isfile(importer_ssl_ca_cert_file_path):
+ importer_ssl_ca_cert_file_object = open(importer_ssl_ca_cert_file_path, 'r')
+ try:
+ importer_ssl_ca_cert = importer_ssl_ca_cert_file_object.read()
+ finally:
+ importer_ssl_ca_cert_file_object.close()
+
+ if importer_ssl_client_cert is not None:
+ importer_ssl_client_cert_file_path = os.path.abspath(importer_ssl_client_cert)
+ if os.path.isfile(importer_ssl_client_cert_file_path):
+ importer_ssl_client_cert_file_object = open(importer_ssl_client_cert_file_path, 'r')
+ try:
+ importer_ssl_client_cert = importer_ssl_client_cert_file_object.read()
+ finally:
+ importer_ssl_client_cert_file_object.close()
+
+ if importer_ssl_client_key is not None:
+ importer_ssl_client_key_file_path = os.path.abspath(importer_ssl_client_key)
+ if os.path.isfile(importer_ssl_client_key_file_path):
+ importer_ssl_client_key_file_object = open(importer_ssl_client_key_file_path, 'r')
+ try:
+ importer_ssl_client_key = importer_ssl_client_key_file_object.read()
+ finally:
+ importer_ssl_client_key_file_object.close()
+
+ server = pulp_server(module, pulp_host, repo_type, wait_for_completion=wait_for_completion)
+ server.set_repo_list()
+ repo_exists = server.check_repo_exists(repo)
+
+ changed = False
+
+ if state == 'absent' and repo_exists:
+ if not module.check_mode:
+ server.delete_repo(repo)
+
+ changed = True
+
+ if state == 'sync':
+ if not repo_exists:
+ module.fail_json(msg="Repository was not found. The repository can not be synced.")
+
+ if not module.check_mode:
+ server.sync_repo(repo)
+
+ changed = True
+
+ if state == 'publish':
+ if not repo_exists:
+ module.fail_json(msg="Repository was not found. The repository can not be published.")
+
+ if not module.check_mode:
+ server.publish_repo(repo, publish_distributor)
+
+ changed = True
+
+ if state == 'present':
+ if not repo_exists:
+ if not module.check_mode:
+ server.create_repo(
+ repo_id=repo,
+ relative_url=relative_url,
+ feed=feed,
+ generate_sqlite=generate_sqlite,
+ serve_http=serve_http,
+ serve_https=serve_https,
+ proxy_host=proxy_host,
+ proxy_port=proxy_port,
+ proxy_username=proxy_username,
+ proxy_password=proxy_password,
+ repoview=repoview,
+ ssl_ca_cert=importer_ssl_ca_cert,
+ ssl_client_cert=importer_ssl_client_cert,
+ ssl_client_key=importer_ssl_client_key,
+ add_export_distributor=add_export_distributor)
+
+ changed = True
+
+ else:
+ # Check to make sure all the settings are correct
+ # The importer config gets overwritten on set and not updated, so
+ # we set the whole config at the same time.
+ if not server.compare_repo_importer_config(
+ repo,
+ feed=feed,
+ proxy_host=proxy_host,
+ proxy_port=proxy_port,
+ proxy_username=proxy_username,
+ proxy_password=proxy_password,
+ ssl_ca_cert=importer_ssl_ca_cert,
+ ssl_client_cert=importer_ssl_client_cert,
+ ssl_client_key=importer_ssl_client_key
+ ):
+ if not module.check_mode:
+ server.update_repo_importer_config(
+ repo,
+ feed=feed,
+ proxy_host=proxy_host,
+ proxy_port=proxy_port,
+ proxy_username=proxy_username,
+ proxy_password=proxy_password,
+ ssl_ca_cert=importer_ssl_ca_cert,
+ ssl_client_cert=importer_ssl_client_cert,
+ ssl_client_key=importer_ssl_client_key)
+
+ changed = True
+
+ if relative_url is not None:
+ if not server.compare_repo_distributor_config(
+ repo,
+ relative_url=relative_url
+ ):
+ if not module.check_mode:
+ server.update_repo_distributor_config(
+ repo,
+ relative_url=relative_url)
+
+ changed = True
+
+ if not server.compare_repo_distributor_config(repo, generate_sqlite=generate_sqlite):
+ if not module.check_mode:
+ server.update_repo_distributor_config(repo, generate_sqlite=generate_sqlite)
+
+ changed = True
+
+ if not server.compare_repo_distributor_config(repo, repoview=repoview):
+ if not module.check_mode:
+ server.update_repo_distributor_config(repo, repoview=repoview)
+
+ changed = True
+
+ if not server.compare_repo_distributor_config(repo, http=serve_http):
+ if not module.check_mode:
+ server.update_repo_distributor_config(repo, http=serve_http)
+
+ changed = True
+
+ if not server.compare_repo_distributor_config(repo, https=serve_https):
+ if not module.check_mode:
+ server.update_repo_distributor_config(repo, https=serve_https)
+
+ changed = True
+
+ module.exit_json(changed=changed, repo=repo)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/redhat_subscription.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/redhat_subscription.py
new file mode 100644
index 00000000..a4599588
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/redhat_subscription.py
@@ -0,0 +1,930 @@
+#!/usr/bin/python
+
+# James Laska (jlaska@redhat.com)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: redhat_subscription
+short_description: Manage registration and subscriptions to RHSM using the C(subscription-manager) command
+description:
+ - Manage registration and subscription to the Red Hat Subscription Management entitlement platform using the C(subscription-manager) command
+author: "Barnaby Court (@barnabycourt)"
+notes:
+ - In order to register a system, subscription-manager requires either a username and password, or an activationkey and an Organization ID.
+ - Since 2.5 values for I(server_hostname), I(server_insecure), I(rhsm_baseurl),
+ I(server_proxy_hostname), I(server_proxy_port), I(server_proxy_user) and
+ I(server_proxy_password) are no longer taken from the C(/etc/rhsm/rhsm.conf)
+ config file and default to None.
+requirements:
+ - subscription-manager
+options:
+ state:
+ description:
+ - whether to register and subscribe (C(present)), or unregister (C(absent)) a system
+ choices: [ "present", "absent" ]
+ default: "present"
+ type: str
+ username:
+ description:
+ - access.redhat.com or Sat6 username
+ type: str
+ password:
+ description:
+ - access.redhat.com or Sat6 password
+ type: str
+ server_hostname:
+ description:
+ - Specify an alternative Red Hat Subscription Management or Sat6 server
+ type: str
+ server_insecure:
+ description:
+ - Enable or disable https server certificate verification when connecting to C(server_hostname)
+ type: str
+ rhsm_baseurl:
+ description:
+ - Specify CDN baseurl
+ type: str
+ rhsm_repo_ca_cert:
+ description:
+ - Specify an alternative location for a CA certificate for CDN
+ type: str
+ server_proxy_hostname:
+ description:
+ - Specify a HTTP proxy hostname
+ type: str
+ server_proxy_port:
+ description:
+ - Specify a HTTP proxy port
+ type: str
+ server_proxy_user:
+ description:
+ - Specify a user for HTTP proxy with basic authentication
+ type: str
+ server_proxy_password:
+ description:
+ - Specify a password for HTTP proxy with basic authentication
+ type: str
+ auto_attach:
+ description:
+ - Upon successful registration, auto-consume available subscriptions
+ - Added in favor of deprecated autosubscribe in 2.5.
+ type: bool
+ aliases: [autosubscribe]
+ activationkey:
+ description:
+ - supply an activation key for use with registration
+ type: str
+ org_id:
+ description:
+ - Organization ID to use in conjunction with activationkey
+ type: str
+ environment:
+ description:
+ - Register with a specific environment in the destination org. Used with Red Hat Satellite 6.x or Katello
+ type: str
+ pool:
+ description:
+ - |
+ Specify a subscription pool name to consume. Regular expressions accepted. Use I(pool_ids) instead if
+ possible, as it is much faster. Mutually exclusive with I(pool_ids).
+ default: '^$'
+ type: str
+ pool_ids:
+ description:
+ - |
+ Specify subscription pool IDs to consume. Prefer over I(pool) when possible as it is much faster.
+ A pool ID may be specified as a C(string) - just the pool ID (ex. C(0123456789abcdef0123456789abcdef)),
+ or as a C(dict) with the pool ID as the key, and a quantity as the value (ex.
+ C(0123456789abcdef0123456789abcdef: 2). If the quantity is provided, it is used to consume multiple
+ entitlements from a pool (the pool must support this). Mutually exclusive with I(pool).
+ default: []
+ type: list
+ consumer_type:
+ description:
+ - The type of unit to register, defaults to system
+ type: str
+ consumer_name:
+ description:
+ - Name of the system to register, defaults to the hostname
+ type: str
+ consumer_id:
+ description:
+ - |
+ References an existing consumer ID to resume using a previous registration
+ for this system. If the system's identity certificate is lost or corrupted,
+ this option allows it to resume using its previous identity and subscriptions.
+ The default is to not specify a consumer ID so a new ID is created.
+ type: str
+ force_register:
+ description:
+ - Register the system even if it is already registered
+ type: bool
+ default: no
+ release:
+ description:
+ - Set a release version
+ type: str
+ syspurpose:
+ description:
+ - Set syspurpose attributes in file C(/etc/rhsm/syspurpose/syspurpose.json)
+ and synchronize these attributes with RHSM server. Syspurpose attributes help attach
+ the most appropriate subscriptions to the system automatically. When C(syspurpose.json) file
+ already contains some attributes, then new attributes overwrite existing attributes.
+ When some attribute is not listed in the new list of attributes, the existing
+ attribute will be removed from C(syspurpose.json) file. Unknown attributes are ignored.
+ type: dict
+ default: {}
+ suboptions:
+ usage:
+ description: Syspurpose attribute usage
+ type: str
+ role:
+ description: Syspurpose attribute role
+ type: str
+ service_level_agreement:
+ description: Syspurpose attribute service_level_agreement
+ type: str
+ addons:
+ description: Syspurpose attribute addons
+ type: list
+ sync:
+ description:
+ - When this option is true, then syspurpose attributes are synchronized with
+ RHSM server immediately. When this option is false, then syspurpose attributes
+ will be synchronized with RHSM server by rhsmcertd daemon.
+ type: bool
+ default: no
+'''
+
+EXAMPLES = '''
+- name: Register as user (joe_user) with password (somepass) and auto-subscribe to available content.
+ community.general.redhat_subscription:
+ state: present
+ username: joe_user
+ password: somepass
+ auto_attach: true
+
+- name: Same as above but subscribe to a specific pool by ID.
+ community.general.redhat_subscription:
+ state: present
+ username: joe_user
+ password: somepass
+ pool_ids: 0123456789abcdef0123456789abcdef
+
+- name: Register and subscribe to multiple pools.
+ community.general.redhat_subscription:
+ state: present
+ username: joe_user
+ password: somepass
+ pool_ids:
+ - 0123456789abcdef0123456789abcdef
+ - 1123456789abcdef0123456789abcdef
+
+- name: Same as above but consume multiple entitlements.
+ community.general.redhat_subscription:
+ state: present
+ username: joe_user
+ password: somepass
+ pool_ids:
+ - 0123456789abcdef0123456789abcdef: 2
+ - 1123456789abcdef0123456789abcdef: 4
+
+- name: Register and pull existing system data.
+ community.general.redhat_subscription:
+ state: present
+ username: joe_user
+ password: somepass
+ consumer_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+
+- name: Register with activationkey and consume subscriptions matching Red Hat Enterprise Server or Red Hat Virtualization
+ community.general.redhat_subscription:
+ state: present
+ activationkey: 1-222333444
+ org_id: 222333444
+ pool: '^(Red Hat Enterprise Server|Red Hat Virtualization)$'
+
+- name: Update the consumed subscriptions from the previous example (remove Red Hat Virtualization subscription)
+ community.general.redhat_subscription:
+ state: present
+ activationkey: 1-222333444
+ org_id: 222333444
+ pool: '^Red Hat Enterprise Server$'
+
+- name: Register as user credentials into given environment (against Red Hat Satellite 6.x), and auto-subscribe.
+ community.general.redhat_subscription:
+ state: present
+ username: joe_user
+ password: somepass
+ environment: Library
+ auto_attach: true
+
+- name: Register as user (joe_user) with password (somepass) and a specific release
+ community.general.redhat_subscription:
+ state: present
+ username: joe_user
+ password: somepass
+ release: 7.4
+
+- name: Register as user (joe_user) with password (somepass), set syspurpose attributes and synchronize them with server
+ community.general.redhat_subscription:
+ state: present
+ username: joe_user
+ password: somepass
+ auto_attach: true
+ syspurpose:
+ usage: "Production"
+ role: "Red Hat Enterprise Server"
+ service_level_agreement: "Premium"
+ addons:
+ - addon1
+ - addon2
+ sync: true
+'''
+
+RETURN = '''
+subscribed_pool_ids:
+ description: List of pool IDs to which system is now subscribed
+ returned: success
+ type: complex
+ sample: {
+ "8a85f9815ab905d3015ab928c7005de4": "1"
+ }
+'''
+
+from os.path import isfile
+from os import unlink
+import re
+import shutil
+import tempfile
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six.moves import configparser
+
+
+SUBMAN_CMD = None
+
+
+class RegistrationBase(object):
+
+ REDHAT_REPO = "/etc/yum.repos.d/redhat.repo"
+
+ def __init__(self, module, username=None, password=None):
+ self.module = module
+ self.username = username
+ self.password = password
+
+ def configure(self):
+ raise NotImplementedError("Must be implemented by a sub-class")
+
+ def enable(self):
+ # Remove any existing redhat.repo
+ if isfile(self.REDHAT_REPO):
+ unlink(self.REDHAT_REPO)
+
+ def register(self):
+ raise NotImplementedError("Must be implemented by a sub-class")
+
+ def unregister(self):
+ raise NotImplementedError("Must be implemented by a sub-class")
+
+ def unsubscribe(self):
+ raise NotImplementedError("Must be implemented by a sub-class")
+
+ def update_plugin_conf(self, plugin, enabled=True):
+ plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin
+
+ if isfile(plugin_conf):
+ tmpfd, tmpfile = tempfile.mkstemp()
+ shutil.copy2(plugin_conf, tmpfile)
+ cfg = configparser.ConfigParser()
+ cfg.read([tmpfile])
+
+ if enabled:
+ cfg.set('main', 'enabled', '1')
+ else:
+ cfg.set('main', 'enabled', '0')
+
+ fd = open(tmpfile, 'w+')
+ cfg.write(fd)
+ fd.close()
+ self.module.atomic_move(tmpfile, plugin_conf)
+
+ def subscribe(self, **kwargs):
+ raise NotImplementedError("Must be implemented by a sub-class")
+
+
+class Rhsm(RegistrationBase):
+ def __init__(self, module, username=None, password=None):
+ RegistrationBase.__init__(self, module, username, password)
+ self.module = module
+
+ def enable(self):
+ '''
+ Enable the system to receive updates from subscription-manager.
+ This involves updating affected yum plugins and removing any
+ conflicting yum repositories.
+ '''
+ RegistrationBase.enable(self)
+ self.update_plugin_conf('rhnplugin', False)
+ self.update_plugin_conf('subscription-manager', True)
+
+ def configure(self, **kwargs):
+ '''
+ Configure the system as directed for registration with RHSM
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+
+ args = [SUBMAN_CMD, 'config']
+
+ # Pass supplied **kwargs as parameters to subscription-manager. Ignore
+ # non-configuration parameters and replace '_' with '.'. For example,
+ # 'server_hostname' becomes '--server.hostname'.
+ options = []
+ for k, v in sorted(kwargs.items()):
+ if re.search(r'^(server|rhsm)_', k) and v is not None:
+ options.append('--%s=%s' % (k.replace('_', '.', 1), v))
+
+ # When there is nothing to configure, then it is not necessary
+ # to run config command, because it only returns current
+ # content of current configuration file
+ if len(options) == 0:
+ return
+
+ args.extend(options)
+
+ self.module.run_command(args, check_rc=True)
+
+ @property
+ def is_registered(self):
+ '''
+ Determine whether the current system
+ Returns:
+ * Boolean - whether the current system is currently registered to
+ RHSM.
+ '''
+
+ args = [SUBMAN_CMD, 'identity']
+ rc, stdout, stderr = self.module.run_command(args, check_rc=False)
+ if rc == 0:
+ return True
+ else:
+ return False
+
+ def register(self, username, password, auto_attach, activationkey, org_id,
+ consumer_type, consumer_name, consumer_id, force_register, environment,
+ rhsm_baseurl, server_insecure, server_hostname, server_proxy_hostname,
+ server_proxy_port, server_proxy_user, server_proxy_password, release):
+ '''
+ Register the current system to the provided RHSM or Sat6 server
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+ args = [SUBMAN_CMD, 'register']
+
+ # Generate command arguments
+ if force_register:
+ args.extend(['--force'])
+
+ if rhsm_baseurl:
+ args.extend(['--baseurl', rhsm_baseurl])
+
+ if server_insecure:
+ args.extend(['--insecure'])
+
+ if server_hostname:
+ args.extend(['--serverurl', server_hostname])
+
+ if org_id:
+ args.extend(['--org', org_id])
+
+ if server_proxy_hostname and server_proxy_port:
+ args.extend(['--proxy', server_proxy_hostname + ':' + server_proxy_port])
+
+ if server_proxy_user:
+ args.extend(['--proxyuser', server_proxy_user])
+
+ if server_proxy_password:
+ args.extend(['--proxypassword', server_proxy_password])
+
+ if activationkey:
+ args.extend(['--activationkey', activationkey])
+ else:
+ if auto_attach:
+ args.append('--auto-attach')
+ if username:
+ args.extend(['--username', username])
+ if password:
+ args.extend(['--password', password])
+ if consumer_type:
+ args.extend(['--type', consumer_type])
+ if consumer_name:
+ args.extend(['--name', consumer_name])
+ if consumer_id:
+ args.extend(['--consumerid', consumer_id])
+ if environment:
+ args.extend(['--environment', environment])
+
+ if release:
+ args.extend(['--release', release])
+
+ rc, stderr, stdout = self.module.run_command(args, check_rc=True, expand_user_and_vars=False)
+
+ def unsubscribe(self, serials=None):
+ '''
+ Unsubscribe a system from subscribed channels
+ Args:
+ serials(list or None): list of serials to unsubscribe. If
+ serials is none or an empty list, then
+ all subscribed channels will be removed.
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+ items = []
+ if serials is not None and serials:
+ items = ["--serial=%s" % s for s in serials]
+ if serials is None:
+ items = ["--all"]
+
+ if items:
+ args = [SUBMAN_CMD, 'unsubscribe'] + items
+ rc, stderr, stdout = self.module.run_command(args, check_rc=True)
+ return serials
+
+ def unregister(self):
+ '''
+ Unregister a currently registered system
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+ args = [SUBMAN_CMD, 'unregister']
+ rc, stderr, stdout = self.module.run_command(args, check_rc=True)
+ self.update_plugin_conf('rhnplugin', False)
+ self.update_plugin_conf('subscription-manager', False)
+
+ def subscribe(self, regexp):
+ '''
+ Subscribe current system to available pools matching the specified
+ regular expression. It matches regexp against available pool ids first.
+ If any pool ids match, subscribe to those pools and return.
+
+ If no pool ids match, then match regexp against available pool product
+ names. Note this can still easily match many many pools. Then subscribe
+ to those pools.
+
+ Since a pool id is a more specific match, we only fallback to matching
+ against names if we didn't match pool ids.
+
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+ # See https://github.com/ansible/ansible/issues/19466
+
+ # subscribe to pools whose pool id matches regexp (and only the pool id)
+ subscribed_pool_ids = self.subscribe_pool(regexp)
+
+ # If we found any matches, we are done
+ # Don't attempt to match pools by product name
+ if subscribed_pool_ids:
+ return subscribed_pool_ids
+
+ # We didn't match any pool ids.
+ # Now try subscribing to pools based on product name match
+ # Note: This can match lots of product names.
+ subscribed_by_product_pool_ids = self.subscribe_product(regexp)
+ if subscribed_by_product_pool_ids:
+ return subscribed_by_product_pool_ids
+
+ # no matches
+ return []
+
+ def subscribe_by_pool_ids(self, pool_ids):
+ """
+ Try to subscribe to the list of pool IDs
+ """
+ available_pools = RhsmPools(self.module)
+
+ available_pool_ids = [p.get_pool_id() for p in available_pools]
+
+ for pool_id, quantity in sorted(pool_ids.items()):
+ if pool_id in available_pool_ids:
+ args = [SUBMAN_CMD, 'attach', '--pool', pool_id]
+ if quantity is not None:
+ args.extend(['--quantity', to_native(quantity)])
+ rc, stderr, stdout = self.module.run_command(args, check_rc=True)
+ else:
+ self.module.fail_json(msg='Pool ID: %s not in list of available pools' % pool_id)
+ return pool_ids
+
+ def subscribe_pool(self, regexp):
+ '''
+ Subscribe current system to available pools matching the specified
+ regular expression
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+
+ # Available pools ready for subscription
+ available_pools = RhsmPools(self.module)
+
+ subscribed_pool_ids = []
+ for pool in available_pools.filter_pools(regexp):
+ pool.subscribe()
+ subscribed_pool_ids.append(pool.get_pool_id())
+ return subscribed_pool_ids
+
+ def subscribe_product(self, regexp):
+ '''
+ Subscribe current system to available pools matching the specified
+ regular expression
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+
+ # Available pools ready for subscription
+ available_pools = RhsmPools(self.module)
+
+ subscribed_pool_ids = []
+ for pool in available_pools.filter_products(regexp):
+ pool.subscribe()
+ subscribed_pool_ids.append(pool.get_pool_id())
+ return subscribed_pool_ids
+
+ def update_subscriptions(self, regexp):
+ changed = False
+ consumed_pools = RhsmPools(self.module, consumed=True)
+ pool_ids_to_keep = [p.get_pool_id() for p in consumed_pools.filter_pools(regexp)]
+ pool_ids_to_keep.extend([p.get_pool_id() for p in consumed_pools.filter_products(regexp)])
+
+ serials_to_remove = [p.Serial for p in consumed_pools if p.get_pool_id() not in pool_ids_to_keep]
+ serials = self.unsubscribe(serials=serials_to_remove)
+
+ subscribed_pool_ids = self.subscribe(regexp)
+
+ if subscribed_pool_ids or serials:
+ changed = True
+ return {'changed': changed, 'subscribed_pool_ids': subscribed_pool_ids,
+ 'unsubscribed_serials': serials}
+
+ def update_subscriptions_by_pool_ids(self, pool_ids):
+ changed = False
+ consumed_pools = RhsmPools(self.module, consumed=True)
+
+ existing_pools = {}
+ for p in consumed_pools:
+ existing_pools[p.get_pool_id()] = p.QuantityUsed
+
+ serials_to_remove = [p.Serial for p in consumed_pools if pool_ids.get(p.get_pool_id(), 0) != p.QuantityUsed]
+ serials = self.unsubscribe(serials=serials_to_remove)
+
+ missing_pools = {}
+ for pool_id, quantity in sorted(pool_ids.items()):
+ if existing_pools.get(pool_id, 0) != quantity:
+ missing_pools[pool_id] = quantity
+
+ self.subscribe_by_pool_ids(missing_pools)
+
+ if missing_pools or serials:
+ changed = True
+ return {'changed': changed, 'subscribed_pool_ids': missing_pools.keys(),
+ 'unsubscribed_serials': serials}
+
+ def sync_syspurpose(self):
+ """
+ Try to synchronize syspurpose attributes with server
+ """
+ args = [SUBMAN_CMD, 'status']
+ rc, stdout, stderr = self.module.run_command(args, check_rc=False)
+
+
+class RhsmPool(object):
+ '''
+ Convenience class for housing subscription information
+ '''
+
+ def __init__(self, module, **kwargs):
+ self.module = module
+ for k, v in kwargs.items():
+ setattr(self, k, v)
+
+ def __str__(self):
+ return str(self.__getattribute__('_name'))
+
+ def get_pool_id(self):
+ return getattr(self, 'PoolId', getattr(self, 'PoolID'))
+
+ def subscribe(self):
+ args = "subscription-manager attach --pool %s" % self.get_pool_id()
+ rc, stdout, stderr = self.module.run_command(args, check_rc=True)
+ if rc == 0:
+ return True
+ else:
+ return False
+
+
+class RhsmPools(object):
+ """
+ This class is used for manipulating pools subscriptions with RHSM
+ """
+
+ def __init__(self, module, consumed=False):
+ self.module = module
+ self.products = self._load_product_list(consumed)
+
+ def __iter__(self):
+ return self.products.__iter__()
+
+ def _load_product_list(self, consumed=False):
+ """
+ Loads list of all available or consumed pools for system in data structure
+
+ Args:
+ consumed(bool): if True list consumed pools, else list available pools (default False)
+ """
+ args = "subscription-manager list"
+ if consumed:
+ args += " --consumed"
+ else:
+ args += " --available"
+ lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+ rc, stdout, stderr = self.module.run_command(args, check_rc=True, environ_update=lang_env)
+
+ products = []
+ for line in stdout.split('\n'):
+ # Remove leading+trailing whitespace
+ line = line.strip()
+ # An empty line implies the end of a output group
+ if len(line) == 0:
+ continue
+ # If a colon ':' is found, parse
+ elif ':' in line:
+ (key, value) = line.split(':', 1)
+ key = key.strip().replace(" ", "") # To unify
+ value = value.strip()
+ if key in ['ProductName', 'SubscriptionName']:
+ # Remember the name for later processing
+ products.append(RhsmPool(self.module, _name=value, key=value))
+ elif products:
+ # Associate value with most recently recorded product
+ products[-1].__setattr__(key, value)
+ # FIXME - log some warning?
+ # else:
+ # warnings.warn("Unhandled subscription key/value: %s/%s" % (key,value))
+ return products
+
+ def filter_pools(self, regexp='^$'):
+ '''
+ Return a list of RhsmPools whose pool id matches the provided regular expression
+ '''
+ r = re.compile(regexp)
+ for product in self.products:
+ if r.search(product.get_pool_id()):
+ yield product
+
+ def filter_products(self, regexp='^$'):
+ '''
+ Return a list of RhsmPools whose product name matches the provided regular expression
+ '''
+ r = re.compile(regexp)
+ for product in self.products:
+ if r.search(product._name):
+ yield product
+
+
+class SysPurpose(object):
+ """
+ This class is used for reading and writing to syspurpose.json file
+ """
+
+ SYSPURPOSE_FILE_PATH = "/etc/rhsm/syspurpose/syspurpose.json"
+
+ ALLOWED_ATTRIBUTES = ['role', 'usage', 'service_level_agreement', 'addons']
+
+ def __init__(self, path=None):
+ """
+ Initialize class used for reading syspurpose json file
+ """
+ self.path = path or self.SYSPURPOSE_FILE_PATH
+
+ def update_syspurpose(self, new_syspurpose):
+ """
+ Try to update current syspurpose with new attributes from new_syspurpose
+ """
+ syspurpose = {}
+ syspurpose_changed = False
+ for key, value in new_syspurpose.items():
+ if key in self.ALLOWED_ATTRIBUTES:
+ if value is not None:
+ syspurpose[key] = value
+ elif key == 'sync':
+ pass
+ else:
+ raise KeyError("Attribute: %s not in list of allowed attributes: %s" %
+ (key, self.ALLOWED_ATTRIBUTES))
+ current_syspurpose = self._read_syspurpose()
+ if current_syspurpose != syspurpose:
+ syspurpose_changed = True
+ # Update current syspurpose with new values
+ current_syspurpose.update(syspurpose)
+ # When some key is not listed in new syspurpose, then delete it from current syspurpose
+ # and ignore custom attributes created by user (e.g. "foo": "bar")
+ for key in list(current_syspurpose):
+ if key in self.ALLOWED_ATTRIBUTES and key not in syspurpose:
+ del current_syspurpose[key]
+ self._write_syspurpose(current_syspurpose)
+ return syspurpose_changed
+
+ def _write_syspurpose(self, new_syspurpose):
+ """
+ This function tries to update current new_syspurpose attributes to
+ json file.
+ """
+ with open(self.path, "w") as fp:
+ fp.write(json.dumps(new_syspurpose, indent=2, ensure_ascii=False, sort_keys=True))
+
+ def _read_syspurpose(self):
+ """
+ Read current syspurpuse from json file.
+ """
+ current_syspurpose = {}
+ try:
+ with open(self.path, "r") as fp:
+ content = fp.read()
+ except IOError:
+ pass
+ else:
+ current_syspurpose = json.loads(content)
+ return current_syspurpose
+
+
+def main():
+
+ # Load RHSM configuration from file
+ rhsm = Rhsm(None)
+
+ # Note: the default values for parameters are:
+ # 'type': 'str', 'default': None, 'required': False
+ # So there is no need to repeat these values for each parameter.
+ module = AnsibleModule(
+ argument_spec={
+ 'state': {'default': 'present', 'choices': ['present', 'absent']},
+ 'username': {},
+ 'password': {'no_log': True},
+ 'server_hostname': {},
+ 'server_insecure': {},
+ 'rhsm_baseurl': {},
+ 'rhsm_repo_ca_cert': {},
+ 'auto_attach': {'aliases': ['autosubscribe'], 'type': 'bool'},
+ 'activationkey': {'no_log': True},
+ 'org_id': {},
+ 'environment': {},
+ 'pool': {'default': '^$'},
+ 'pool_ids': {'default': [], 'type': 'list'},
+ 'consumer_type': {},
+ 'consumer_name': {},
+ 'consumer_id': {},
+ 'force_register': {'default': False, 'type': 'bool'},
+ 'server_proxy_hostname': {},
+ 'server_proxy_port': {},
+ 'server_proxy_user': {},
+ 'server_proxy_password': {'no_log': True},
+ 'release': {},
+ 'syspurpose': {
+ 'type': 'dict',
+ 'options': {
+ 'role': {},
+ 'usage': {},
+ 'service_level_agreement': {},
+ 'addons': {'type': 'list'},
+ 'sync': {'type': 'bool', 'default': False}
+ }
+ }
+ },
+ required_together=[['username', 'password'],
+ ['server_proxy_hostname', 'server_proxy_port'],
+ ['server_proxy_user', 'server_proxy_password']],
+ mutually_exclusive=[['activationkey', 'username'],
+ ['activationkey', 'consumer_id'],
+ ['activationkey', 'environment'],
+ ['activationkey', 'autosubscribe'],
+ ['pool', 'pool_ids']],
+ required_if=[['state', 'present', ['username', 'activationkey'], True]],
+ )
+
+ rhsm.module = module
+ state = module.params['state']
+ username = module.params['username']
+ password = module.params['password']
+ server_hostname = module.params['server_hostname']
+ server_insecure = module.params['server_insecure']
+ rhsm_baseurl = module.params['rhsm_baseurl']
+ rhsm_repo_ca_cert = module.params['rhsm_repo_ca_cert']
+ auto_attach = module.params['auto_attach']
+ activationkey = module.params['activationkey']
+ org_id = module.params['org_id']
+ if activationkey and not org_id:
+ module.fail_json(msg='org_id is required when using activationkey')
+ environment = module.params['environment']
+ pool = module.params['pool']
+ pool_ids = {}
+ for value in module.params['pool_ids']:
+ if isinstance(value, dict):
+ if len(value) != 1:
+ module.fail_json(msg='Unable to parse pool_ids option.')
+ pool_id, quantity = list(value.items())[0]
+ else:
+ pool_id, quantity = value, None
+ pool_ids[pool_id] = quantity
+ consumer_type = module.params["consumer_type"]
+ consumer_name = module.params["consumer_name"]
+ consumer_id = module.params["consumer_id"]
+ force_register = module.params["force_register"]
+ server_proxy_hostname = module.params['server_proxy_hostname']
+ server_proxy_port = module.params['server_proxy_port']
+ server_proxy_user = module.params['server_proxy_user']
+ server_proxy_password = module.params['server_proxy_password']
+ release = module.params['release']
+ syspurpose = module.params['syspurpose']
+
+ global SUBMAN_CMD
+ SUBMAN_CMD = module.get_bin_path('subscription-manager', True)
+
+ syspurpose_changed = False
+ if syspurpose is not None:
+ try:
+ syspurpose_changed = SysPurpose().update_syspurpose(syspurpose)
+ except Exception as err:
+ module.fail_json(msg="Failed to update syspurpose attributes: %s" % to_native(err))
+
+ # Ensure system is registered
+ if state == 'present':
+
+ # Register system
+ if rhsm.is_registered and not force_register:
+ if syspurpose and 'sync' in syspurpose and syspurpose['sync'] is True:
+ try:
+ rhsm.sync_syspurpose()
+ except Exception as e:
+ module.fail_json(msg="Failed to synchronize syspurpose attributes: %s" % to_native(e))
+ if pool != '^$' or pool_ids:
+ try:
+ if pool_ids:
+ result = rhsm.update_subscriptions_by_pool_ids(pool_ids)
+ else:
+ result = rhsm.update_subscriptions(pool)
+ except Exception as e:
+ module.fail_json(msg="Failed to update subscriptions for '%s': %s" % (server_hostname, to_native(e)))
+ else:
+ module.exit_json(**result)
+ else:
+ if syspurpose_changed is True:
+ module.exit_json(changed=True, msg="Syspurpose attributes changed.")
+ else:
+ module.exit_json(changed=False, msg="System already registered.")
+ else:
+ try:
+ rhsm.enable()
+ rhsm.configure(**module.params)
+ rhsm.register(username, password, auto_attach, activationkey, org_id,
+ consumer_type, consumer_name, consumer_id, force_register,
+ environment, rhsm_baseurl, server_insecure, server_hostname,
+ server_proxy_hostname, server_proxy_port, server_proxy_user, server_proxy_password, release)
+ if syspurpose and 'sync' in syspurpose and syspurpose['sync'] is True:
+ rhsm.sync_syspurpose()
+ if pool_ids:
+ subscribed_pool_ids = rhsm.subscribe_by_pool_ids(pool_ids)
+ elif pool != '^$':
+ subscribed_pool_ids = rhsm.subscribe(pool)
+ else:
+ subscribed_pool_ids = []
+ except Exception as e:
+ module.fail_json(msg="Failed to register with '%s': %s" % (server_hostname, to_native(e)))
+ else:
+ module.exit_json(changed=True,
+ msg="System successfully registered to '%s'." % server_hostname,
+ subscribed_pool_ids=subscribed_pool_ids)
+
+ # Ensure system is *not* registered
+ if state == 'absent':
+ if not rhsm.is_registered:
+ module.exit_json(changed=False, msg="System already unregistered.")
+ else:
+ try:
+ rhsm.unsubscribe()
+ rhsm.unregister()
+ except Exception as e:
+ module.fail_json(msg="Failed to unregister: %s" % to_native(e))
+ else:
+ module.exit_json(changed=True, msg="System successfully unregistered from %s." % server_hostname)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/rhn_channel.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/rhn_channel.py
new file mode 100644
index 00000000..63be0323
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/rhn_channel.py
@@ -0,0 +1,192 @@
+#!/usr/bin/python
+
+# Copyright: (c) Vincent Van de Kussen
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: rhn_channel
+short_description: Adds or removes Red Hat software channels
+description:
+ - Adds or removes Red Hat software channels.
+author:
+- Vincent Van der Kussen (@vincentvdk)
+notes:
+ - This module fetches the system id from RHN.
+ - This module doesn't support I(check_mode).
+options:
+ name:
+ description:
+ - Name of the software channel.
+ required: true
+ type: str
+ sysname:
+ description:
+ - Name of the system as it is known in RHN/Satellite.
+ required: true
+ type: str
+ state:
+ description:
+ - Whether the channel should be present or not, taking action if the state is different from what is stated.
+ default: present
+ choices: [ present, absent ]
+ type: str
+ url:
+ description:
+ - The full URL to the RHN/Satellite API.
+ required: true
+ type: str
+ user:
+ description:
+ - RHN/Satellite login.
+ required: true
+ type: str
+ password:
+ description:
+ - RHN/Satellite password.
+ aliases: [pwd]
+ required: true
+ type: str
+ validate_certs:
+ description:
+ - If C(False), SSL certificates will not be validated.
+ - This should only set to C(False) when used on self controlled sites
+ using self-signed certificates, and you are absolutely sure that nobody
+ can modify traffic between the module and the site.
+ type: bool
+ default: true
+ version_added: '0.2.0'
+'''
+
+EXAMPLES = '''
+- name: Add a Red Hat software channel
+ community.general.rhn_channel:
+ name: rhel-x86_64-server-v2vwin-6
+ sysname: server01
+ url: https://rhn.redhat.com/rpc/api
+ user: rhnuser
+ password: guessme
+ delegate_to: localhost
+'''
+
+import ssl
+from ansible.module_utils._text import to_text
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import xmlrpc_client
+
+
+def get_systemid(client, session, sysname):
+ systems = client.system.listUserSystems(session)
+ for system in systems:
+ if system.get('name') == sysname:
+ idres = system.get('id')
+ idd = int(idres)
+ return idd
+
+
+def subscribe_channels(channelname, client, session, sysname, sys_id):
+ channels = base_channels(client, session, sys_id)
+ channels.append(channelname)
+ return client.system.setChildChannels(session, sys_id, channels)
+
+
+def unsubscribe_channels(channelname, client, session, sysname, sys_id):
+ channels = base_channels(client, session, sys_id)
+ channels.remove(channelname)
+ return client.system.setChildChannels(session, sys_id, channels)
+
+
+def base_channels(client, session, sys_id):
+ basechan = client.channel.software.listSystemChannels(session, sys_id)
+ try:
+ chans = [item['label'] for item in basechan]
+ except KeyError:
+ chans = [item['channel_label'] for item in basechan]
+ return chans
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ name=dict(type='str', required=True),
+ sysname=dict(type='str', required=True),
+ url=dict(type='str', required=True),
+ user=dict(type='str', required=True),
+ password=dict(type='str', required=True, aliases=['pwd'], no_log=True),
+ validate_certs=dict(type='bool', default=True),
+ )
+ )
+
+ state = module.params['state']
+ channelname = module.params['name']
+ systname = module.params['sysname']
+ saturl = module.params['url']
+ user = module.params['user']
+ password = module.params['password']
+ validate_certs = module.params['validate_certs']
+
+ ssl_context = None
+ if not validate_certs:
+ try: # Python 2.7.9 and newer
+ ssl_context = ssl.create_unverified_context()
+ except AttributeError: # Legacy Python that doesn't verify HTTPS certificates by default
+ ssl_context = ssl._create_unverified_context()
+ else: # Python 2.7.8 and older
+ ssl._create_default_https_context = ssl._create_unverified_https_context
+
+ # initialize connection
+ if ssl_context:
+ client = xmlrpc_client.ServerProxy(saturl, context=ssl_context)
+ else:
+ client = xmlrpc_client.Server(saturl)
+
+ try:
+ session = client.auth.login(user, password)
+ except Exception as e:
+ module.fail_json(msg="Unable to establish session with Satellite server: %s " % to_text(e))
+
+ if not session:
+ module.fail_json(msg="Failed to establish session with Satellite server.")
+
+ # get systemid
+ try:
+ sys_id = get_systemid(client, session, systname)
+ except Exception as e:
+ module.fail_json(msg="Unable to get system id: %s " % to_text(e))
+
+ if not sys_id:
+ module.fail_json(msg="Failed to get system id.")
+
+ # get channels for system
+ try:
+ chans = base_channels(client, session, sys_id)
+ except Exception as e:
+ module.fail_json(msg="Unable to get channel information: %s " % to_text(e))
+
+ try:
+ if state == 'present':
+ if channelname in chans:
+ module.exit_json(changed=False, msg="Channel %s already exists" % channelname)
+ else:
+ subscribe_channels(channelname, client, session, systname, sys_id)
+ module.exit_json(changed=True, msg="Channel %s added" % channelname)
+
+ if state == 'absent':
+ if channelname not in chans:
+ module.exit_json(changed=False, msg="Not subscribed to channel %s." % channelname)
+ else:
+ unsubscribe_channels(channelname, client, session, systname, sys_id)
+ module.exit_json(changed=True, msg="Channel %s removed" % channelname)
+ except Exception as e:
+ module.fail_json(msg='Unable to %s channel (%s): %s' % ('add' if state == 'present' else 'remove', channelname, to_text(e)))
+ finally:
+ client.auth.logout(session)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/rhn_register.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/rhn_register.py
new file mode 100644
index 00000000..dfc408a5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/rhn_register.py
@@ -0,0 +1,431 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) James Laska
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: rhn_register
+short_description: Manage Red Hat Network registration using the C(rhnreg_ks) command
+description:
+ - Manage registration to the Red Hat Network.
+author:
+- James Laska (@jlaska)
+notes:
+ - This is for older Red Hat products. You probably want the M(community.general.redhat_subscription) module instead.
+ - In order to register a system, C(rhnreg_ks) requires either a username and password, or an activationkey.
+requirements:
+ - rhnreg_ks
+ - either libxml2 or lxml
+options:
+ state:
+ description:
+ - Whether to register (C(present)), or unregister (C(absent)) a system.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ username:
+ description:
+ - Red Hat Network username.
+ type: str
+ password:
+ description:
+ - Red Hat Network password.
+ type: str
+ server_url:
+ description:
+ - Specify an alternative Red Hat Network server URL.
+ - The default is the current value of I(serverURL) from C(/etc/sysconfig/rhn/up2date).
+ type: str
+ activationkey:
+ description:
+ - Supply an activation key for use with registration.
+ type: str
+ profilename:
+ description:
+ - Supply an profilename for use with registration.
+ type: str
+ ca_cert:
+ description:
+ - Supply a custom ssl CA certificate file for use with registration.
+ type: path
+ aliases: [ sslcacert ]
+ systemorgid:
+ description:
+ - Supply an organizational id for use with registration.
+ type: str
+ channels:
+ description:
+ - Optionally specify a list of channels to subscribe to upon successful registration.
+ type: list
+ elements: str
+ default: []
+ enable_eus:
+ description:
+ - If C(no), extended update support will be requested.
+ type: bool
+ default: no
+ nopackages:
+ description:
+ - If C(yes), the registered node will not upload its installed packages information to Satellite server.
+ type: bool
+ default: no
+'''
+
+EXAMPLES = r'''
+- name: Unregister system from RHN
+ community.general.rhn_register:
+ state: absent
+ username: joe_user
+ password: somepass
+
+- name: Register as user with password and auto-subscribe to available content
+ community.general.rhn_register:
+ state: present
+ username: joe_user
+ password: somepass
+
+- name: Register with activationkey and enable extended update support
+ community.general.rhn_register:
+ state: present
+ activationkey: 1-222333444
+ enable_eus: yes
+
+- name: Register with activationkey and set a profilename which may differ from the hostname
+ community.general.rhn_register:
+ state: present
+ activationkey: 1-222333444
+ profilename: host.example.com.custom
+
+- name: Register as user with password against a satellite server
+ community.general.rhn_register:
+ state: present
+ username: joe_user
+ password: somepass
+ server_url: https://xmlrpc.my.satellite/XMLRPC
+
+- name: Register as user with password and enable channels
+ community.general.rhn_register:
+ state: present
+ username: joe_user
+ password: somepass
+ channels: rhel-x86_64-server-6-foo-1,rhel-x86_64-server-6-bar-1
+'''
+
+RETURN = r'''
+# Default return values
+'''
+
+import os
+import sys
+
+# Attempt to import rhn client tools
+sys.path.insert(0, '/usr/share/rhn')
+try:
+ import up2date_client
+ import up2date_client.config
+ HAS_UP2DATE_CLIENT = True
+except ImportError:
+ HAS_UP2DATE_CLIENT = False
+
+# INSERT REDHAT SNIPPETS
+from ansible_collections.community.general.plugins.module_utils import redhat
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import urllib, xmlrpc_client
+
+
+class Rhn(redhat.RegistrationBase):
+
+ def __init__(self, module=None, username=None, password=None):
+ redhat.RegistrationBase.__init__(self, module, username, password)
+ self.config = self.load_config()
+ self.server = None
+ self.session = None
+
+ def logout(self):
+ if self.session is not None:
+ self.server.auth.logout(self.session)
+
+ def load_config(self):
+ '''
+ Read configuration from /etc/sysconfig/rhn/up2date
+ '''
+ if not HAS_UP2DATE_CLIENT:
+ return None
+
+ config = up2date_client.config.initUp2dateConfig()
+
+ return config
+
+ @property
+ def server_url(self):
+ return self.config['serverURL']
+
+ @property
+ def hostname(self):
+ '''
+ Return the non-xmlrpc RHN hostname. This is a convenience method
+ used for displaying a more readable RHN hostname.
+
+ Returns: str
+ '''
+ url = urllib.parse.urlparse(self.server_url)
+ return url[1].replace('xmlrpc.', '')
+
+ @property
+ def systemid(self):
+ systemid = None
+ xpath_str = "//member[name='system_id']/value/string"
+
+ if os.path.isfile(self.config['systemIdPath']):
+ fd = open(self.config['systemIdPath'], 'r')
+ xml_data = fd.read()
+ fd.close()
+
+ # Ugh, xml parsing time ...
+ # First, try parsing with libxml2 ...
+ if systemid is None:
+ try:
+ import libxml2
+ doc = libxml2.parseDoc(xml_data)
+ ctxt = doc.xpathNewContext()
+ systemid = ctxt.xpathEval(xpath_str)[0].content
+ doc.freeDoc()
+ ctxt.xpathFreeContext()
+ except ImportError:
+ pass
+
+ # m-kay, let's try with lxml now ...
+ if systemid is None:
+ try:
+ from lxml import etree
+ root = etree.fromstring(xml_data)
+ systemid = root.xpath(xpath_str)[0].text
+ except ImportError:
+ raise Exception('"libxml2" or "lxml" is required for this module.')
+
+ # Strip the 'ID-' prefix
+ if systemid is not None and systemid.startswith('ID-'):
+ systemid = systemid[3:]
+
+ return int(systemid)
+
+ @property
+ def is_registered(self):
+ '''
+ Determine whether the current system is registered.
+
+ Returns: True|False
+ '''
+ return os.path.isfile(self.config['systemIdPath'])
+
+ def configure_server_url(self, server_url):
+ '''
+ Configure server_url for registration
+ '''
+
+ self.config.set('serverURL', server_url)
+ self.config.save()
+
+ def enable(self):
+ '''
+ Prepare the system for RHN registration. This includes ...
+ * enabling the rhnplugin yum plugin
+ * disabling the subscription-manager yum plugin
+ '''
+ redhat.RegistrationBase.enable(self)
+ self.update_plugin_conf('rhnplugin', True)
+ self.update_plugin_conf('subscription-manager', False)
+
+ def register(self, enable_eus=False, activationkey=None, profilename=None, sslcacert=None, systemorgid=None, nopackages=False):
+ '''
+ Register system to RHN. If enable_eus=True, extended update
+ support will be requested.
+ '''
+ register_cmd = ['/usr/sbin/rhnreg_ks', '--force']
+ if self.username:
+ register_cmd.extend(['--username', self.username, '--password', self.password])
+ if self.server_url:
+ register_cmd.extend(['--serverUrl', self.server_url])
+ if enable_eus:
+ register_cmd.append('--use-eus-channel')
+ if nopackages:
+ register_cmd.append('--nopackages')
+ if activationkey is not None:
+ register_cmd.extend(['--activationkey', activationkey])
+ if profilename is not None:
+ register_cmd.extend(['--profilename', profilename])
+ if sslcacert is not None:
+ register_cmd.extend(['--sslCACert', sslcacert])
+ if systemorgid is not None:
+ register_cmd.extend(['--systemorgid', systemorgid])
+ rc, stdout, stderr = self.module.run_command(register_cmd, check_rc=True)
+
+ def api(self, method, *args):
+ '''
+ Convenience RPC wrapper
+ '''
+ if self.server is None:
+ if self.hostname != 'rhn.redhat.com':
+ url = "https://%s/rpc/api" % self.hostname
+ else:
+ url = "https://xmlrpc.%s/rpc/api" % self.hostname
+ self.server = xmlrpc_client.ServerProxy(url)
+ self.session = self.server.auth.login(self.username, self.password)
+
+ func = getattr(self.server, method)
+ return func(self.session, *args)
+
+ def unregister(self):
+ '''
+ Unregister a previously registered system
+ '''
+
+ # Initiate RPC connection
+ self.api('system.deleteSystems', [self.systemid])
+
+ # Remove systemid file
+ os.unlink(self.config['systemIdPath'])
+
+ def subscribe(self, channels):
+ if not channels:
+ return
+
+ if self._is_hosted():
+ current_channels = self.api('channel.software.listSystemChannels', self.systemid)
+ new_channels = [item['channel_label'] for item in current_channels]
+ new_channels.extend(channels)
+ return self.api('channel.software.setSystemChannels', self.systemid, list(new_channels))
+
+ else:
+ current_channels = self.api('channel.software.listSystemChannels', self.systemid)
+ current_channels = [item['label'] for item in current_channels]
+ new_base = None
+ new_childs = []
+ for ch in channels:
+ if ch in current_channels:
+ continue
+ if self.api('channel.software.getDetails', ch)['parent_channel_label'] == '':
+ new_base = ch
+ else:
+ if ch not in new_childs:
+ new_childs.append(ch)
+ out_base = 0
+ out_childs = 0
+
+ if new_base:
+ out_base = self.api('system.setBaseChannel', self.systemid, new_base)
+
+ if new_childs:
+ out_childs = self.api('system.setChildChannels', self.systemid, new_childs)
+
+ return out_base and out_childs
+
+ def _is_hosted(self):
+ '''
+ Return True if we are running against Hosted (rhn.redhat.com) or
+ False otherwise (when running against Satellite or Spacewalk)
+ '''
+ return 'rhn.redhat.com' in self.hostname
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ username=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ server_url=dict(type='str'),
+ activationkey=dict(type='str', no_log=True),
+ profilename=dict(type='str'),
+ ca_cert=dict(type='path', aliases=['sslcacert']),
+ systemorgid=dict(type='str'),
+ enable_eus=dict(type='bool', default=False),
+ nopackages=dict(type='bool', default=False),
+ channels=dict(type='list', elements='str', default=[]),
+ ),
+ # username/password is required for state=absent, or if channels is not empty
+ # (basically anything that uses self.api requires username/password) but it doesn't
+ # look like we can express that with required_if/required_together/mutually_exclusive
+
+ # only username+password can be used for unregister
+ required_if=[['state', 'absent', ['username', 'password']]],
+ )
+
+ if not HAS_UP2DATE_CLIENT:
+ module.fail_json(msg="Unable to import up2date_client. Is 'rhn-client-tools' installed?")
+
+ server_url = module.params['server_url']
+ username = module.params['username']
+ password = module.params['password']
+
+ state = module.params['state']
+ activationkey = module.params['activationkey']
+ profilename = module.params['profilename']
+ sslcacert = module.params['ca_cert']
+ systemorgid = module.params['systemorgid']
+ channels = module.params['channels']
+ enable_eus = module.params['enable_eus']
+ nopackages = module.params['nopackages']
+
+ rhn = Rhn(module=module, username=username, password=password)
+
+ # use the provided server url and persist it to the rhn config.
+ if server_url:
+ rhn.configure_server_url(server_url)
+
+ if not rhn.server_url:
+ module.fail_json(
+ msg="No serverURL was found (from either the 'server_url' module arg or the config file option 'serverURL' in /etc/sysconfig/rhn/up2date)"
+ )
+
+ # Ensure system is registered
+ if state == 'present':
+
+ # Check for missing parameters ...
+ if not (activationkey or rhn.username or rhn.password):
+ module.fail_json(msg="Missing arguments, must supply an activationkey (%s) or username (%s) and password (%s)" % (activationkey, rhn.username,
+ rhn.password))
+ if not activationkey and not (rhn.username and rhn.password):
+ module.fail_json(msg="Missing arguments, If registering without an activationkey, must supply username or password")
+
+ # Register system
+ if rhn.is_registered:
+ module.exit_json(changed=False, msg="System already registered.")
+
+ try:
+ rhn.enable()
+ rhn.register(enable_eus, activationkey, profilename, sslcacert, systemorgid, nopackages)
+ rhn.subscribe(channels)
+ except Exception as exc:
+ module.fail_json(msg="Failed to register with '%s': %s" % (rhn.hostname, exc))
+ finally:
+ rhn.logout()
+
+ module.exit_json(changed=True, msg="System successfully registered to '%s'." % rhn.hostname)
+
+ # Ensure system is *not* registered
+ if state == 'absent':
+ if not rhn.is_registered:
+ module.exit_json(changed=False, msg="System already unregistered.")
+
+ if not (rhn.username and rhn.password):
+ module.fail_json(msg="Missing arguments, the system is currently registered and unregistration requires a username and password")
+
+ try:
+ rhn.unregister()
+ except Exception as exc:
+ module.fail_json(msg="Failed to unregister: %s" % exc)
+ finally:
+ rhn.logout()
+
+ module.exit_json(changed=True, msg="System successfully unregistered from %s." % rhn.hostname)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/rhsm_release.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/rhsm_release.py
new file mode 100644
index 00000000..22b280f1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/rhsm_release.py
@@ -0,0 +1,124 @@
+#!/usr/bin/python
+
+# (c) 2018, Sean Myers <sean.myers@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: rhsm_release
+short_description: Set or Unset RHSM Release version
+description:
+ - Sets or unsets the release version used by RHSM repositories.
+notes:
+ - This module will fail on an unregistered system.
+ Use the C(redhat_subscription) module to register a system
+ prior to setting the RHSM release.
+requirements:
+ - Red Hat Enterprise Linux 6+ with subscription-manager installed
+options:
+ release:
+ description:
+ - RHSM release version to use (use null to unset)
+ required: true
+ type: str
+author:
+ - Sean Myers (@seandst)
+'''
+
+EXAMPLES = '''
+# Set release version to 7.1
+- name: Set RHSM release version
+ community.general.rhsm_release:
+ release: "7.1"
+
+# Set release version to 6Server
+- name: Set RHSM release version
+ community.general.rhsm_release:
+ release: "6Server"
+
+# Unset release version
+- name: Unset RHSM release release
+ community.general.rhsm_release:
+ release: null
+'''
+
+RETURN = '''
+current_release:
+ description: The current RHSM release version value
+ returned: success
+ type: str
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+import re
+
+# Matches release-like values such as 7.2, 6.10, 10Server,
+# but rejects unlikely values, like 100Server, 100.0, 1.100, etc.
+release_matcher = re.compile(r'\b\d{1,2}(?:\.\d{1,2}|Server)\b')
+
+
+def _sm_release(module, *args):
+ # pass args to s-m release, e.g. _sm_release(module, '--set', '0.1') becomes
+ # "subscription-manager release --set 0.1"
+ sm_bin = module.get_bin_path('subscription-manager', required=True)
+ cmd = '{0} release {1}'.format(sm_bin, " ".join(args))
+ # delegate nonzero rc handling to run_command
+ return module.run_command(cmd, check_rc=True)
+
+
+def get_release(module):
+ # Get the current release version, or None if release unset
+ rc, out, err = _sm_release(module, '--show')
+ try:
+ match = release_matcher.findall(out)[0]
+ except IndexError:
+ # 0'th index did not exist; no matches
+ match = None
+
+ return match
+
+
+def set_release(module, release):
+ # Set current release version, or unset if release is None
+ if release is None:
+ args = ('--unset',)
+ else:
+ args = ('--set', release)
+
+ return _sm_release(module, *args)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ release=dict(type='str', required=True),
+ ),
+ supports_check_mode=True
+ )
+
+ target_release = module.params['release']
+
+ # sanity check: the target release at least looks like a valid release
+ if target_release and not release_matcher.findall(target_release):
+ module.fail_json(msg='"{0}" does not appear to be a valid release.'.format(target_release))
+
+ # Will fail with useful error from s-m if system not subscribed
+ current_release = get_release(module)
+
+ changed = (target_release != current_release)
+ if not module.check_mode and changed:
+ set_release(module, target_release)
+ # If setting the release fails, then a fail_json would have exited with
+ # the s-m error, e.g. "No releases match '7.20'...". If not, then the
+ # current release is now set to the target release (job's done)
+ current_release = target_release
+
+ module.exit_json(current_release=current_release, changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/rhsm_repository.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/rhsm_repository.py
new file mode 100644
index 00000000..7317be66
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/rhsm_repository.py
@@ -0,0 +1,245 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2017, Giovanni Sciortino (@giovannisciortino)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: rhsm_repository
+short_description: Manage RHSM repositories using the subscription-manager command
+description:
+ - Manage (Enable/Disable) RHSM repositories to the Red Hat Subscription
+ Management entitlement platform using the C(subscription-manager) command.
+author: Giovanni Sciortino (@giovannisciortino)
+notes:
+ - In order to manage RHSM repositories the system must be already registered
+ to RHSM manually or using the Ansible C(redhat_subscription) module.
+
+requirements:
+ - subscription-manager
+options:
+ state:
+ description:
+ - If state is equal to present or disabled, indicates the desired
+ repository state.
+ choices: [present, enabled, absent, disabled]
+ default: "enabled"
+ type: str
+ name:
+ description:
+ - The ID of repositories to enable.
+ - To operate on several repositories this can accept a comma separated
+ list or a YAML list.
+ required: True
+ type: list
+ elements: str
+ purge:
+ description:
+ - Disable all currently enabled repositories that are not not specified in C(name).
+ Only set this to C(True) if passing in a list of repositories to the C(name) field.
+ Using this with C(loop) will most likely not have the desired result.
+ type: bool
+ default: no
+'''
+
+EXAMPLES = '''
+- name: Enable a RHSM repository
+ community.general.rhsm_repository:
+ name: rhel-7-server-rpms
+
+- name: Disable all RHSM repositories
+ community.general.rhsm_repository:
+ name: '*'
+ state: disabled
+
+- name: Enable all repositories starting with rhel-6-server
+ community.general.rhsm_repository:
+ name: rhel-6-server*
+ state: enabled
+
+- name: Disable all repositories except rhel-7-server-rpms
+ community.general.rhsm_repository:
+ name: rhel-7-server-rpms
+ purge: True
+'''
+
+RETURN = '''
+repositories:
+ description:
+ - The list of RHSM repositories with their states.
+ - When this module is used to change the repository states, this list contains the updated states after the changes.
+ returned: success
+ type: list
+'''
+
+import re
+import os
+from fnmatch import fnmatch
+from copy import deepcopy
+from ansible.module_utils.basic import AnsibleModule
+
+
+def run_subscription_manager(module, arguments):
+ # Execute subscription-manager with arguments and manage common errors
+ rhsm_bin = module.get_bin_path('subscription-manager')
+ if not rhsm_bin:
+ module.fail_json(msg='The executable file subscription-manager was not found in PATH')
+
+ lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+ rc, out, err = module.run_command("%s %s" % (rhsm_bin, " ".join(arguments)), environ_update=lang_env)
+
+ if rc == 1 and (err == 'The password you typed is invalid.\nPlease try again.\n' or os.getuid() != 0):
+ module.fail_json(msg='The executable file subscription-manager must be run using root privileges')
+ elif rc == 0 and out == 'This system has no repositories available through subscriptions.\n':
+ module.fail_json(msg='This system has no repositories available through subscriptions')
+ elif rc == 1:
+ module.fail_json(msg='subscription-manager failed with the following error: %s' % err)
+ else:
+ return rc, out, err
+
+
+def get_repository_list(module, list_parameter):
+ # Generate RHSM repository list and return a list of dict
+ if list_parameter == 'list_enabled':
+ rhsm_arguments = ['repos', '--list-enabled']
+ elif list_parameter == 'list_disabled':
+ rhsm_arguments = ['repos', '--list-disabled']
+ elif list_parameter == 'list':
+ rhsm_arguments = ['repos', '--list']
+ rc, out, err = run_subscription_manager(module, rhsm_arguments)
+
+ skip_lines = [
+ '+----------------------------------------------------------+',
+ ' Available Repositories in /etc/yum.repos.d/redhat.repo'
+ ]
+ repo_id_re = re.compile(r'Repo ID:\s+(.*)')
+ repo_name_re = re.compile(r'Repo Name:\s+(.*)')
+ repo_url_re = re.compile(r'Repo URL:\s+(.*)')
+ repo_enabled_re = re.compile(r'Enabled:\s+(.*)')
+
+ repo_id = ''
+ repo_name = ''
+ repo_url = ''
+ repo_enabled = ''
+
+ repo_result = []
+ for line in out.splitlines():
+ if line == '' or line in skip_lines:
+ continue
+
+ repo_id_match = repo_id_re.match(line)
+ if repo_id_match:
+ repo_id = repo_id_match.group(1)
+ continue
+
+ repo_name_match = repo_name_re.match(line)
+ if repo_name_match:
+ repo_name = repo_name_match.group(1)
+ continue
+
+ repo_url_match = repo_url_re.match(line)
+ if repo_url_match:
+ repo_url = repo_url_match.group(1)
+ continue
+
+ repo_enabled_match = repo_enabled_re.match(line)
+ if repo_enabled_match:
+ repo_enabled = repo_enabled_match.group(1)
+
+ repo = {
+ "id": repo_id,
+ "name": repo_name,
+ "url": repo_url,
+ "enabled": True if repo_enabled == '1' else False
+ }
+
+ repo_result.append(repo)
+
+ return repo_result
+
+
+def repository_modify(module, state, name, purge=False):
+ name = set(name)
+ current_repo_list = get_repository_list(module, 'list')
+ updated_repo_list = deepcopy(current_repo_list)
+ matched_existing_repo = {}
+ for repoid in name:
+ matched_existing_repo[repoid] = []
+ for idx, repo in enumerate(current_repo_list):
+ if fnmatch(repo['id'], repoid):
+ matched_existing_repo[repoid].append(repo)
+ # Update current_repo_list to return it as result variable
+ updated_repo_list[idx]['enabled'] = True if state == 'enabled' else False
+
+ changed = False
+ results = []
+ diff_before = ""
+ diff_after = ""
+ rhsm_arguments = ['repos']
+
+ for repoid in matched_existing_repo:
+ if len(matched_existing_repo[repoid]) == 0:
+ results.append("%s is not a valid repository ID" % repoid)
+ module.fail_json(results=results, msg="%s is not a valid repository ID" % repoid)
+ for repo in matched_existing_repo[repoid]:
+ if state in ['disabled', 'absent']:
+ if repo['enabled']:
+ changed = True
+ diff_before += "Repository '%s' is enabled for this system\n" % repo['id']
+ diff_after += "Repository '%s' is disabled for this system\n" % repo['id']
+ results.append("Repository '%s' is disabled for this system" % repo['id'])
+ rhsm_arguments += ['--disable', repo['id']]
+ elif state in ['enabled', 'present']:
+ if not repo['enabled']:
+ changed = True
+ diff_before += "Repository '%s' is disabled for this system\n" % repo['id']
+ diff_after += "Repository '%s' is enabled for this system\n" % repo['id']
+ results.append("Repository '%s' is enabled for this system" % repo['id'])
+ rhsm_arguments += ['--enable', repo['id']]
+
+ # Disable all enabled repos on the system that are not in the task and not
+ # marked as disabled by the task
+ if purge:
+ enabled_repo_ids = set(repo['id'] for repo in updated_repo_list if repo['enabled'])
+ matched_repoids_set = set(matched_existing_repo.keys())
+ difference = enabled_repo_ids.difference(matched_repoids_set)
+ if len(difference) > 0:
+ for repoid in difference:
+ changed = True
+ diff_before.join("Repository '{repoid}'' is enabled for this system\n".format(repoid=repoid))
+ diff_after.join("Repository '{repoid}' is disabled for this system\n".format(repoid=repoid))
+ results.append("Repository '{repoid}' is disabled for this system".format(repoid=repoid))
+ rhsm_arguments.extend(['--disable', repoid])
+
+ diff = {'before': diff_before,
+ 'after': diff_after,
+ 'before_header': "RHSM repositories",
+ 'after_header': "RHSM repositories"}
+
+ if not module.check_mode and changed:
+ rc, out, err = run_subscription_manager(module, rhsm_arguments)
+ results = out.splitlines()
+ module.exit_json(results=results, changed=changed, repositories=updated_repo_list, diff=diff)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='list', elements='str', required=True),
+ state=dict(choices=['enabled', 'disabled', 'present', 'absent'], default='enabled'),
+ purge=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+ name = module.params['name']
+ state = module.params['state']
+ purge = module.params['purge']
+
+ repository_modify(module, state, name, purge)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/slackpkg.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/slackpkg.py
new file mode 100644
index 00000000..424f5b1b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/slackpkg.py
@@ -0,0 +1,205 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Kim Nørgaard
+# Written by Kim Nørgaard <jasen@jasen.dk>
+# Based on pkgng module written by bleader <bleader@ratonland.org>
+# that was based on pkgin module written by Shaun Zinck <shaun.zinck at gmail.com>
+# that was based on pacman module written by Afterburn <https://github.com/afterburn>
+# that was based on apt module written by Matthew Williams <matthew@flowroute.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: slackpkg
+short_description: Package manager for Slackware >= 12.2
+description:
+ - Manage binary packages for Slackware using 'slackpkg' which
+ is available in versions after 12.2.
+options:
+ name:
+ description:
+ - name of package to install/remove
+ required: true
+ type: list
+ elements: str
+ aliases: [pkg]
+
+ state:
+ description:
+ - state of the package, you can use "installed" as an alias for C(present) and removed as one for C(absent).
+ choices: [ 'present', 'absent', 'latest', 'installed', 'removed' ]
+ required: false
+ default: present
+ type: str
+
+ update_cache:
+ description:
+ - update the package database first
+ required: false
+ default: false
+ type: bool
+ aliases: [update-cache]
+
+author: Kim Nørgaard (@KimNorgaard)
+requirements: [ "Slackware >= 12.2" ]
+'''
+
+EXAMPLES = '''
+- name: Install package foo
+ community.general.slackpkg:
+ name: foo
+ state: present
+
+- name: Remove packages foo and bar
+ community.general.slackpkg:
+ name: foo,bar
+ state: absent
+
+- name: Make sure that it is the most updated package
+ community.general.slackpkg:
+ name: foo
+ state: latest
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def query_package(module, slackpkg_path, name):
+
+ import platform
+ import os
+ import re
+
+ machine = platform.machine()
+ # Exception for kernel-headers package on x86_64
+ if name == 'kernel-headers' and machine == 'x86_64':
+ machine = 'x86'
+ pattern = re.compile('^%s-[^-]+-(%s|noarch|fw)-[^-]+$' % (re.escape(name), re.escape(machine)))
+ packages = [f for f in os.listdir('/var/log/packages') if pattern.match(f)]
+
+ if len(packages) > 0:
+ return True
+
+ return False
+
+
+def remove_packages(module, slackpkg_path, packages):
+
+ remove_c = 0
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ if not query_package(module, slackpkg_path, package):
+ continue
+
+ if not module.check_mode:
+ rc, out, err = module.run_command("%s -default_answer=y -batch=on \
+ remove %s" % (slackpkg_path,
+ package))
+
+ if not module.check_mode and query_package(module, slackpkg_path,
+ package):
+ module.fail_json(msg="failed to remove %s: %s" % (package, out))
+
+ remove_c += 1
+
+ if remove_c > 0:
+
+ module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, slackpkg_path, packages):
+
+ install_c = 0
+
+ for package in packages:
+ if query_package(module, slackpkg_path, package):
+ continue
+
+ if not module.check_mode:
+ rc, out, err = module.run_command("%s -default_answer=y -batch=on \
+ install %s" % (slackpkg_path,
+ package))
+
+ if not module.check_mode and not query_package(module, slackpkg_path,
+ package):
+ module.fail_json(msg="failed to install %s: %s" % (package, out),
+ stderr=err)
+
+ install_c += 1
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg="present %s package(s)"
+ % (install_c))
+
+ module.exit_json(changed=False, msg="package(s) already present")
+
+
+def upgrade_packages(module, slackpkg_path, packages):
+ install_c = 0
+
+ for package in packages:
+ if not module.check_mode:
+ rc, out, err = module.run_command("%s -default_answer=y -batch=on \
+ upgrade %s" % (slackpkg_path,
+ package))
+
+ if not module.check_mode and not query_package(module, slackpkg_path,
+ package):
+ module.fail_json(msg="failed to install %s: %s" % (package, out),
+ stderr=err)
+
+ install_c += 1
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg="present %s package(s)"
+ % (install_c))
+
+ module.exit_json(changed=False, msg="package(s) already present")
+
+
+def update_cache(module, slackpkg_path):
+ rc, out, err = module.run_command("%s -batch=on update" % (slackpkg_path))
+ if rc != 0:
+ module.fail_json(msg="Could not update package cache")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default="present", choices=['installed', 'removed', 'absent', 'present', 'latest']),
+ name=dict(aliases=["pkg"], required=True, type='list', elements='str'),
+ update_cache=dict(default=False, aliases=["update-cache"],
+ type='bool'),
+ ),
+ supports_check_mode=True)
+
+ slackpkg_path = module.get_bin_path('slackpkg', True)
+
+ p = module.params
+
+ pkgs = p['name']
+
+ if p["update_cache"]:
+ update_cache(module, slackpkg_path)
+
+ if p['state'] == 'latest':
+ upgrade_packages(module, slackpkg_path, pkgs)
+
+ elif p['state'] in ['present', 'installed']:
+ install_packages(module, slackpkg_path, pkgs)
+
+ elif p["state"] in ['removed', 'absent']:
+ remove_packages(module, slackpkg_path, pkgs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/snap.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/snap.py
new file mode 100644
index 00000000..9776b4e5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/snap.py
@@ -0,0 +1,256 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Stanislas Lange (angristan) <angristan@pm.me>
+# Copyright: (c) 2018, Victor Carceler <vcarceler@iespuigcastellar.xeill.net>
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: snap
+
+short_description: Manages snaps
+
+
+description:
+ - "Manages snaps packages."
+
+options:
+ name:
+ description:
+ - Name of the snap to install or remove. Can be a list of snaps.
+ required: true
+ type: list
+ elements: str
+ state:
+ description:
+ - Desired state of the package.
+ required: false
+ default: present
+ choices: [ absent, present ]
+ type: str
+ classic:
+ description:
+ - Confinement policy. The classic confinement allows a snap to have
+ the same level of access to the system as "classic" packages,
+ like those managed by APT. This option corresponds to the --classic argument.
+ This option can only be specified if there is a single snap in the task.
+ type: bool
+ required: false
+ default: no
+ channel:
+ description:
+ - Define which release of a snap is installed and tracked for updates.
+ This option can only be specified if there is a single snap in the task.
+ type: str
+ required: false
+ default: stable
+
+author:
+ - Victor Carceler (@vcarceler) <vcarceler@iespuigcastellar.xeill.net>
+ - Stanislas Lange (@angristan) <angristan@pm.me>
+'''
+
+EXAMPLES = '''
+# Install "foo" and "bar" snap
+- name: Install foo
+ community.general.snap:
+ name:
+ - foo
+ - bar
+
+# Remove "foo" snap
+- name: Remove foo
+ community.general.snap:
+ name: foo
+ state: absent
+
+# Install a snap with classic confinement
+- name: Install "foo" with option --classic
+ community.general.snap:
+ name: foo
+ classic: yes
+
+# Install a snap with from a specific channel
+- name: Install "foo" with option --channel=latest/edge
+ community.general.snap:
+ name: foo
+ channel: latest/edge
+'''
+
+RETURN = '''
+classic:
+ description: Whether or not the snaps were installed with the classic confinement
+ type: bool
+ returned: When snaps are installed
+channel:
+ description: The channel the snaps were installed from
+ type: str
+ returned: When snaps are installed
+cmd:
+ description: The command that was executed on the host
+ type: str
+ returned: When changed is true
+snaps_installed:
+ description: The list of actually installed snaps
+ type: list
+ returned: When any snaps have been installed
+snaps_removed:
+ description: The list of actually removed snaps
+ type: list
+ returned: When any snaps have been removed
+'''
+
+import operator
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def validate_input_snaps(module):
+ """Ensure that all exist."""
+ for snap_name in module.params['name']:
+ if not snap_exists(module, snap_name):
+ module.fail_json(msg="No snap matching '%s' available." % snap_name)
+
+
+def snap_exists(module, snap_name):
+ snap_path = module.get_bin_path("snap", True)
+ cmd_parts = [snap_path, 'info', snap_name]
+ cmd = ' '.join(cmd_parts)
+ rc, out, err = module.run_command(cmd, check_rc=False)
+
+ return rc == 0
+
+
+def is_snap_installed(module, snap_name):
+ snap_path = module.get_bin_path("snap", True)
+ cmd_parts = [snap_path, 'list', snap_name]
+ cmd = ' '.join(cmd_parts)
+ rc, out, err = module.run_command(cmd, check_rc=False)
+
+ return rc == 0
+
+
+def get_snap_for_action(module):
+ """Construct a list of snaps to use for current action."""
+ snaps = module.params['name']
+
+ is_present_state = module.params['state'] == 'present'
+ negation_predicate = operator.not_ if is_present_state else bool
+
+ def predicate(s):
+ return negation_predicate(is_snap_installed(module, s))
+
+ return [s for s in snaps if predicate(s)]
+
+
+def get_base_cmd_parts(module):
+ action_map = {
+ 'present': 'install',
+ 'absent': 'remove',
+ }
+
+ state = module.params['state']
+
+ classic = ['--classic'] if module.params['classic'] else []
+ channel = ['--channel', module.params['channel']] if module.params['channel'] and module.params['channel'] != 'stable' else []
+
+ snap_path = module.get_bin_path("snap", True)
+ snap_action = action_map[state]
+
+ cmd_parts = [snap_path, snap_action]
+ if snap_action == 'install':
+ cmd_parts += classic + channel
+
+ return cmd_parts
+
+
+def get_cmd_parts(module, snap_names):
+ """Return list of cmds to run in exec format."""
+ is_install_mode = module.params['state'] == 'present'
+ has_multiple_snaps = len(snap_names) > 1
+
+ cmd_parts = get_base_cmd_parts(module)
+ has_one_pkg_params = '--classic' in cmd_parts or '--channel' in cmd_parts
+
+ if not (is_install_mode and has_one_pkg_params and has_multiple_snaps):
+ return [cmd_parts + snap_names]
+
+ return [cmd_parts + [s] for s in snap_names]
+
+
+def run_cmd_for(module, snap_names):
+ cmds_parts = get_cmd_parts(module, snap_names)
+ cmd = '; '.join(' '.join(c) for c in cmds_parts)
+ cmd = 'sh -c "{0}"'.format(cmd)
+
+ # Actually execute the snap command
+ return (cmd, ) + module.run_command(cmd, check_rc=False)
+
+
+def execute_action(module):
+ is_install_mode = module.params['state'] == 'present'
+ exit_kwargs = {
+ 'classic': module.params['classic'],
+ 'channel': module.params['channel'],
+ } if is_install_mode else {}
+
+ actionable_snaps = get_snap_for_action(module)
+ if not actionable_snaps:
+ module.exit_json(changed=False, **exit_kwargs)
+
+ changed_def_args = {
+ 'changed': True,
+ 'snaps_{result}'.
+ format(result='installed' if is_install_mode
+ else 'removed'): actionable_snaps,
+ }
+
+ if module.check_mode:
+ module.exit_json(**dict(changed_def_args, **exit_kwargs))
+
+ cmd, rc, out, err = run_cmd_for(module, actionable_snaps)
+ cmd_out_args = {
+ 'cmd': cmd,
+ 'rc': rc,
+ 'stdout': out,
+ 'stderr': err,
+ }
+
+ if rc == 0:
+ module.exit_json(**dict(changed_def_args, **dict(cmd_out_args, **exit_kwargs)))
+ else:
+ msg = "Ooops! Snap installation failed while executing '{cmd}', please examine logs and error output for more details.".format(cmd=cmd)
+ if is_install_mode:
+ m = re.match(r'^error: This revision of snap "(?P<package_name>\w+)" was published using classic confinement', err)
+ if m is not None:
+ err_pkg = m.group('package_name')
+ msg = "Couldn't install {name} because it requires classic confinement".format(name=err_pkg)
+ module.fail_json(msg=msg, **dict(cmd_out_args, **exit_kwargs))
+
+
+def main():
+ module_args = {
+ 'name': dict(type='list', elements='str', required=True),
+ 'state': dict(type='str', required=False, default='present', choices=['absent', 'present']),
+ 'classic': dict(type='bool', required=False, default=False),
+ 'channel': dict(type='str', required=False, default='stable'),
+ }
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True,
+ )
+
+ validate_input_snaps(module)
+
+ # Apply changes to the snaps
+ execute_action(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/sorcery.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/sorcery.py
new file mode 100644
index 00000000..347413fc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/sorcery.py
@@ -0,0 +1,644 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015-2016, Vlad Glagolev <scm@vaygr.net>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: sorcery
+short_description: Package manager for Source Mage GNU/Linux
+description:
+ - Manages "spells" on Source Mage GNU/Linux using I(sorcery) toolchain
+author: "Vlad Glagolev (@vaygr)"
+notes:
+ - When all three components are selected, the update goes by the sequence --
+ Sorcery -> Grimoire(s) -> Spell(s); you cannot override it.
+ - grimoire handling (i.e. add/remove, including SCM/rsync versions) is not
+ yet supported.
+requirements:
+ - bash
+options:
+ name:
+ description:
+ - Name of the spell
+ - multiple names can be given, separated by commas
+ - special value '*' in conjunction with states C(latest) or
+ C(rebuild) will update or rebuild the whole system respectively
+ aliases: ["spell"]
+ type: list
+ elements: str
+
+ state:
+ description:
+ - Whether to cast, dispel or rebuild a package
+ - state C(cast) is an equivalent of C(present), not C(latest)
+ - state C(latest) always triggers C(update_cache=yes)
+ - state C(rebuild) implies cast of all specified spells, not only
+ those existed before
+ choices: ["present", "latest", "absent", "cast", "dispelled", "rebuild"]
+ default: "present"
+ type: str
+
+ depends:
+ description:
+ - Comma-separated list of _optional_ dependencies to build a spell
+ (or make sure it is built) with; use +/- in front of dependency
+ to turn it on/off ('+' is optional though)
+ - this option is ignored if C(name) parameter is equal to '*' or
+ contains more than one spell
+ - providers must be supplied in the form recognized by Sorcery, e.g.
+ 'openssl(SSL)'
+ type: str
+
+ update:
+ description:
+ - Whether or not to update sorcery scripts at the very first stage
+ type: bool
+ default: no
+
+ update_cache:
+ description:
+ - Whether or not to update grimoire collection before casting spells
+ type: bool
+ default: no
+ aliases: ["update_codex"]
+
+ cache_valid_time:
+ description:
+ - Time in seconds to invalidate grimoire collection on update
+ - especially useful for SCM and rsync grimoires
+ - makes sense only in pair with C(update_cache)
+ type: int
+'''
+
+
+EXAMPLES = '''
+- name: Make sure spell foo is installed
+ community.general.sorcery:
+ spell: foo
+ state: present
+
+- name: Make sure spells foo, bar and baz are removed
+ community.general.sorcery:
+ spell: foo,bar,baz
+ state: absent
+
+- name: Make sure spell foo with dependencies bar and baz is installed
+ community.general.sorcery:
+ spell: foo
+ depends: bar,baz
+ state: present
+
+- name: Make sure spell foo with bar and without baz dependencies is installed
+ community.general.sorcery:
+ spell: foo
+ depends: +bar,-baz
+ state: present
+
+- name: Make sure spell foo with libressl (providing SSL) dependency is installed
+ community.general.sorcery:
+ spell: foo
+ depends: libressl(SSL)
+ state: present
+
+- name: Make sure spells with/without required dependencies (if any) are installed
+ community.general.sorcery:
+ name: "{{ item.spell }}"
+ depends: "{{ item.depends | default(None) }}"
+ state: present
+ loop:
+ - { spell: 'vifm', depends: '+file,-gtk+2' }
+ - { spell: 'fwknop', depends: 'gpgme' }
+ - { spell: 'pv,tnftp,tor' }
+
+- name: Install the latest version of spell foo using regular glossary
+ community.general.sorcery:
+ name: foo
+ state: latest
+
+- name: Rebuild spell foo
+ community.general.sorcery:
+ spell: foo
+ state: rebuild
+
+- name: Rebuild the whole system, but update Sorcery and Codex first
+ community.general.sorcery:
+ spell: '*'
+ state: rebuild
+ update: yes
+ update_cache: yes
+
+- name: Refresh the grimoire collection if it is 1 day old using native sorcerous alias
+ community.general.sorcery:
+ update_codex: yes
+ cache_valid_time: 86400
+
+- name: Update only Sorcery itself
+ community.general.sorcery:
+ update: yes
+'''
+
+
+RETURN = '''
+'''
+
+
+import datetime
+import fileinput
+import os
+import re
+import shutil
+import sys
+
+
+# auto-filled at module init
+SORCERY = {
+ 'sorcery': None,
+ 'scribe': None,
+ 'cast': None,
+ 'dispel': None,
+ 'gaze': None
+}
+
+SORCERY_LOG_DIR = "/var/log/sorcery"
+SORCERY_STATE_DIR = "/var/state/sorcery"
+
+
+def get_sorcery_ver(module):
+ """ Get Sorcery version. """
+
+ cmd_sorcery = "%s --version" % SORCERY['sorcery']
+
+ rc, stdout, stderr = module.run_command(cmd_sorcery)
+
+ if rc != 0 or not stdout:
+ module.fail_json(msg="unable to get Sorcery version")
+
+ return stdout.strip()
+
+
+def codex_fresh(codex, module):
+ """ Check if grimoire collection is fresh enough. """
+
+ if not module.params['cache_valid_time']:
+ return False
+
+ timedelta = datetime.timedelta(seconds=module.params['cache_valid_time'])
+
+ for grimoire in codex:
+ lastupdate_path = os.path.join(SORCERY_STATE_DIR,
+ grimoire + ".lastupdate")
+
+ try:
+ mtime = os.stat(lastupdate_path).st_mtime
+ except Exception:
+ return False
+
+ lastupdate_ts = datetime.datetime.fromtimestamp(mtime)
+
+ # if any grimoire is not fresh, we invalidate the Codex
+ if lastupdate_ts + timedelta < datetime.datetime.now():
+ return False
+
+ return True
+
+
+def codex_list(module):
+ """ List valid grimoire collection. """
+
+ codex = {}
+
+ cmd_scribe = "%s index" % SORCERY['scribe']
+
+ rc, stdout, stderr = module.run_command(cmd_scribe)
+
+ if rc != 0:
+ module.fail_json(msg="unable to list grimoire collection, fix your Codex")
+
+ rex = re.compile(r"^\s*\[\d+\] : (?P<grim>[\w\-+.]+) : [\w\-+./]+(?: : (?P<ver>[\w\-+.]+))?\s*$")
+
+ # drop 4-line header and empty trailing line
+ for line in stdout.splitlines()[4:-1]:
+ match = rex.match(line)
+
+ if match:
+ codex[match.group('grim')] = match.group('ver')
+
+ if not codex:
+ module.fail_json(msg="no grimoires to operate on; add at least one")
+
+ return codex
+
+
+def update_sorcery(module):
+ """ Update sorcery scripts.
+
+ This runs 'sorcery update' ('sorcery -u'). Check mode always returns a
+ positive change value.
+
+ """
+
+ changed = False
+
+ if module.check_mode:
+ if not module.params['name'] and not module.params['update_cache']:
+ module.exit_json(changed=True, msg="would have updated Sorcery")
+ else:
+ sorcery_ver = get_sorcery_ver(module)
+
+ cmd_sorcery = "%s update" % SORCERY['sorcery']
+
+ rc, stdout, stderr = module.run_command(cmd_sorcery)
+
+ if rc != 0:
+ module.fail_json(msg="unable to update Sorcery: " + stdout)
+
+ if sorcery_ver != get_sorcery_ver(module):
+ changed = True
+
+ if not module.params['name'] and not module.params['update_cache']:
+ module.exit_json(changed=changed,
+ msg="successfully updated Sorcery")
+
+
+def update_codex(module):
+ """ Update grimoire collections.
+
+ This runs 'scribe update'. Check mode always returns a positive change
+ value when 'cache_valid_time' is used.
+
+ """
+
+ params = module.params
+
+ changed = False
+
+ codex = codex_list(module)
+ fresh = codex_fresh(codex, module)
+
+ if module.check_mode:
+ if not params['name']:
+ if not fresh:
+ changed = True
+
+ module.exit_json(changed=changed, msg="would have updated Codex")
+ elif not fresh or params['name'] and params['state'] == 'latest':
+ # SILENT is required as a workaround for query() in libgpg
+ module.run_command_environ_update.update(dict(SILENT='1'))
+
+ cmd_scribe = "%s update" % SORCERY['scribe']
+
+ rc, stdout, stderr = module.run_command(cmd_scribe)
+
+ if rc != 0:
+ module.fail_json(msg="unable to update Codex: " + stdout)
+
+ if codex != codex_list(module):
+ changed = True
+
+ if not params['name']:
+ module.exit_json(changed=changed,
+ msg="successfully updated Codex")
+
+
+def match_depends(module):
+ """ Check for matching dependencies.
+
+ This inspects spell's dependencies with the desired states and returns
+ 'False' if a recast is needed to match them. It also adds required lines
+ to the system-wide depends file for proper recast procedure.
+
+ """
+
+ params = module.params
+ spells = params['name']
+
+ depends = {}
+
+ depends_ok = True
+
+ if len(spells) > 1 or not params['depends']:
+ return depends_ok
+
+ spell = spells[0]
+
+ if module.check_mode:
+ sorcery_depends_orig = os.path.join(SORCERY_STATE_DIR, "depends")
+ sorcery_depends = os.path.join(SORCERY_STATE_DIR, "depends.check")
+
+ try:
+ shutil.copy2(sorcery_depends_orig, sorcery_depends)
+ except IOError:
+ module.fail_json(msg="failed to copy depends.check file")
+ else:
+ sorcery_depends = os.path.join(SORCERY_STATE_DIR, "depends")
+
+ rex = re.compile(r"^(?P<status>\+?|\-){1}(?P<depend>[a-z0-9]+[a-z0-9_\-\+\.]*(\([A-Z0-9_\-\+\.]+\))*)$")
+
+ for d in params['depends'].split(','):
+ match = rex.match(d)
+
+ if not match:
+ module.fail_json(msg="wrong depends line for spell '%s'" % spell)
+
+ # normalize status
+ if not match.group('status') or match.group('status') == '+':
+ status = 'on'
+ else:
+ status = 'off'
+
+ depends[match.group('depend')] = status
+
+ # drop providers spec
+ depends_list = [s.split('(')[0] for s in depends]
+
+ cmd_gaze = "%s -q version %s" % (SORCERY['gaze'], ' '.join(depends_list))
+
+ rc, stdout, stderr = module.run_command(cmd_gaze)
+
+ if rc != 0:
+ module.fail_json(msg="wrong dependencies for spell '%s'" % spell)
+
+ fi = fileinput.input(sorcery_depends, inplace=True)
+
+ try:
+ try:
+ for line in fi:
+ if line.startswith(spell + ':'):
+ match = None
+
+ for d in depends:
+ # when local status is 'off' and dependency is provider,
+ # use only provider value
+ d_offset = d.find('(')
+
+ if d_offset == -1:
+ d_p = ''
+ else:
+ d_p = re.escape(d[d_offset:])
+
+ # .escape() is needed mostly for the spells like 'libsigc++'
+ rex = re.compile("%s:(?:%s|%s):(?P<lstatus>on|off):optional:" %
+ (re.escape(spell), re.escape(d), d_p))
+
+ match = rex.match(line)
+
+ # we matched the line "spell:dependency:on|off:optional:"
+ if match:
+ # if we also matched the local status, mark dependency
+ # as empty and put it back into depends file
+ if match.group('lstatus') == depends[d]:
+ depends[d] = None
+
+ sys.stdout.write(line)
+
+ # status is not that we need, so keep this dependency
+ # in the list for further reverse switching;
+ # stop and process the next line in both cases
+ break
+
+ if not match:
+ sys.stdout.write(line)
+ else:
+ sys.stdout.write(line)
+ except IOError:
+ module.fail_json(msg="I/O error on the depends file")
+ finally:
+ fi.close()
+
+ depends_new = [v for v in depends if depends[v]]
+
+ if depends_new:
+ try:
+ try:
+ fl = open(sorcery_depends, 'a')
+
+ for k in depends_new:
+ fl.write("%s:%s:%s:optional::\n" % (spell, k, depends[k]))
+ except IOError:
+ module.fail_json(msg="I/O error on the depends file")
+ finally:
+ fl.close()
+
+ depends_ok = False
+
+ if module.check_mode:
+ try:
+ os.remove(sorcery_depends)
+ except IOError:
+ module.fail_json(msg="failed to clean up depends.backup file")
+
+ return depends_ok
+
+
+def manage_spells(module):
+ """ Cast or dispel spells.
+
+ This manages the whole system ('*'), list or a single spell. Command 'cast'
+ is used to install or rebuild spells, while 'dispel' takes care of theirs
+ removal from the system.
+
+ """
+
+ params = module.params
+ spells = params['name']
+
+ sorcery_queue = os.path.join(SORCERY_LOG_DIR, "queue/install")
+
+ if spells == '*':
+ if params['state'] == 'latest':
+ # back up original queue
+ try:
+ os.rename(sorcery_queue, sorcery_queue + ".backup")
+ except IOError:
+ module.fail_json(msg="failed to backup the update queue")
+
+ # see update_codex()
+ module.run_command_environ_update.update(dict(SILENT='1'))
+
+ cmd_sorcery = "%s queue"
+
+ rc, stdout, stderr = module.run_command(cmd_sorcery)
+
+ if rc != 0:
+ module.fail_json(msg="failed to generate the update queue")
+
+ try:
+ queue_size = os.stat(sorcery_queue).st_size
+ except Exception:
+ module.fail_json(msg="failed to read the update queue")
+
+ if queue_size != 0:
+ if module.check_mode:
+ try:
+ os.rename(sorcery_queue + ".backup", sorcery_queue)
+ except IOError:
+ module.fail_json(msg="failed to restore the update queue")
+
+ module.exit_json(changed=True, msg="would have updated the system")
+
+ cmd_cast = "%s --queue" % SORCERY['cast']
+
+ rc, stdout, stderr = module.run_command(cmd_cast)
+
+ if rc != 0:
+ module.fail_json(msg="failed to update the system")
+
+ module.exit_json(changed=True, msg="successfully updated the system")
+ else:
+ module.exit_json(changed=False, msg="the system is already up to date")
+ elif params['state'] == 'rebuild':
+ if module.check_mode:
+ module.exit_json(changed=True, msg="would have rebuilt the system")
+
+ cmd_sorcery = "%s rebuild" % SORCERY['sorcery']
+
+ rc, stdout, stderr = module.run_command(cmd_sorcery)
+
+ if rc != 0:
+ module.fail_json(msg="failed to rebuild the system: " + stdout)
+
+ module.exit_json(changed=True, msg="successfully rebuilt the system")
+ else:
+ module.fail_json(msg="unsupported operation on '*' name value")
+ else:
+ if params['state'] in ('present', 'latest', 'rebuild', 'absent'):
+ # extract versions from the 'gaze' command
+ cmd_gaze = "%s -q version %s" % (SORCERY['gaze'], ' '.join(spells))
+
+ rc, stdout, stderr = module.run_command(cmd_gaze)
+
+ # fail if any of spells cannot be found
+ if rc != 0:
+ module.fail_json(msg="failed to locate spell(s) in the list (%s)" %
+ ', '.join(spells))
+
+ cast_queue = []
+ dispel_queue = []
+
+ rex = re.compile(r"[^|]+\|[^|]+\|(?P<spell>[^|]+)\|(?P<grim_ver>[^|]+)\|(?P<inst_ver>[^$]+)")
+
+ # drop 2-line header and empty trailing line
+ for line in stdout.splitlines()[2:-1]:
+ match = rex.match(line)
+
+ cast = False
+
+ if params['state'] == 'present':
+ # spell is not installed..
+ if match.group('inst_ver') == '-':
+ # ..so set up depends reqs for it
+ match_depends(module)
+
+ cast = True
+ # spell is installed..
+ else:
+ # ..but does not conform depends reqs
+ if not match_depends(module):
+ cast = True
+ elif params['state'] == 'latest':
+ # grimoire and installed versions do not match..
+ if match.group('grim_ver') != match.group('inst_ver'):
+ # ..so check for depends reqs first and set them up
+ match_depends(module)
+
+ cast = True
+ # grimoire and installed versions match..
+ else:
+ # ..but the spell does not conform depends reqs
+ if not match_depends(module):
+ cast = True
+ elif params['state'] == 'rebuild':
+ cast = True
+ # 'absent'
+ else:
+ if match.group('inst_ver') != '-':
+ dispel_queue.append(match.group('spell'))
+
+ if cast:
+ cast_queue.append(match.group('spell'))
+
+ if cast_queue:
+ if module.check_mode:
+ module.exit_json(changed=True, msg="would have cast spell(s)")
+
+ cmd_cast = "%s -c %s" % (SORCERY['cast'], ' '.join(cast_queue))
+
+ rc, stdout, stderr = module.run_command(cmd_cast)
+
+ if rc != 0:
+ module.fail_json(msg="failed to cast spell(s): %s" + stdout)
+
+ module.exit_json(changed=True, msg="successfully cast spell(s)")
+ elif params['state'] != 'absent':
+ module.exit_json(changed=False, msg="spell(s) are already cast")
+
+ if dispel_queue:
+ if module.check_mode:
+ module.exit_json(changed=True, msg="would have dispelled spell(s)")
+
+ cmd_dispel = "%s %s" % (SORCERY['dispel'], ' '.join(dispel_queue))
+
+ rc, stdout, stderr = module.run_command(cmd_dispel)
+
+ if rc != 0:
+ module.fail_json(msg="failed to dispel spell(s): %s" + stdout)
+
+ module.exit_json(changed=True, msg="successfully dispelled spell(s)")
+ else:
+ module.exit_json(changed=False, msg="spell(s) are already dispelled")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(default=None, aliases=['spell'], type='list', elements='str'),
+ state=dict(default='present', choices=['present', 'latest',
+ 'absent', 'cast', 'dispelled', 'rebuild']),
+ depends=dict(default=None),
+ update=dict(default=False, type='bool'),
+ update_cache=dict(default=False, aliases=['update_codex'], type='bool'),
+ cache_valid_time=dict(default=0, type='int')
+ ),
+ required_one_of=[['name', 'update', 'update_cache']],
+ supports_check_mode=True
+ )
+
+ if os.geteuid() != 0:
+ module.fail_json(msg="root privileges are required for this operation")
+
+ for c in SORCERY:
+ SORCERY[c] = module.get_bin_path(c, True)
+
+ # prepare environment: run sorcery commands without asking questions
+ module.run_command_environ_update = dict(PROMPT_DELAY='0', VOYEUR='0')
+
+ params = module.params
+
+ # normalize 'state' parameter
+ if params['state'] in ('present', 'cast'):
+ params['state'] = 'present'
+ elif params['state'] in ('absent', 'dispelled'):
+ params['state'] = 'absent'
+
+ if params['update']:
+ update_sorcery(module)
+
+ if params['update_cache'] or params['state'] == 'latest':
+ update_codex(module)
+
+ if params['name']:
+ manage_spells(module)
+
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/svr4pkg.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/svr4pkg.py
new file mode 100644
index 00000000..21d17f4d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/svr4pkg.py
@@ -0,0 +1,263 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Boyd Adamson <boyd () boydadamson.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: svr4pkg
+short_description: Manage Solaris SVR4 packages
+description:
+ - Manages SVR4 packages on Solaris 10 and 11.
+ - These were the native packages on Solaris <= 10 and are available
+ as a legacy feature in Solaris 11.
+ - Note that this is a very basic packaging system. It will not enforce
+ dependencies on install or remove.
+author: "Boyd Adamson (@brontitall)"
+options:
+ name:
+ description:
+ - Package name, e.g. C(SUNWcsr)
+ required: true
+ type: str
+
+ state:
+ description:
+ - Whether to install (C(present)), or remove (C(absent)) a package.
+ - If the package is to be installed, then I(src) is required.
+ - The SVR4 package system doesn't provide an upgrade operation. You need to uninstall the old, then install the new package.
+ required: true
+ choices: ["present", "absent"]
+ type: str
+
+ src:
+ description:
+ - Specifies the location to install the package from. Required when C(state=present).
+ - "Can be any path acceptable to the C(pkgadd) command's C(-d) option. e.g.: C(somefile.pkg), C(/dir/with/pkgs), C(http:/server/mypkgs.pkg)."
+ - If using a file or directory, they must already be accessible by the host. See the M(ansible.builtin.copy) module for a way to get them there.
+ type: str
+ proxy:
+ description:
+ - HTTP[s] proxy to be used if C(src) is a URL.
+ type: str
+ response_file:
+ description:
+ - Specifies the location of a response file to be used if package expects input on install. (added in Ansible 1.4)
+ required: false
+ type: str
+ zone:
+ description:
+ - Whether to install the package only in the current zone, or install it into all zones.
+ - The installation into all zones works only if you are working with the global zone.
+ required: false
+ default: "all"
+ choices: ["current", "all"]
+ type: str
+ category:
+ description:
+ - Install/Remove category instead of a single package.
+ required: false
+ type: bool
+ default: false
+'''
+
+EXAMPLES = '''
+- name: Install a package from an already copied file
+ community.general.svr4pkg:
+ name: CSWcommon
+ src: /tmp/cswpkgs.pkg
+ state: present
+
+- name: Install a package directly from an http site
+ community.general.svr4pkg:
+ name: CSWpkgutil
+ src: 'http://get.opencsw.org/now'
+ state: present
+ zone: current
+
+- name: Install a package with a response file
+ community.general.svr4pkg:
+ name: CSWggrep
+ src: /tmp/third-party.pkg
+ response_file: /tmp/ggrep.response
+ state: present
+
+- name: Ensure that a package is not installed
+ community.general.svr4pkg:
+ name: SUNWgnome-sound-recorder
+ state: absent
+
+- name: Ensure that a category is not installed
+ community.general.svr4pkg:
+ name: FIREFOX
+ state: absent
+ category: true
+'''
+
+
+import os
+import tempfile
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def package_installed(module, name, category):
+ cmd = [module.get_bin_path('pkginfo', True)]
+ cmd.append('-q')
+ if category:
+ cmd.append('-c')
+ cmd.append(name)
+ rc, out, err = module.run_command(' '.join(cmd))
+ if rc == 0:
+ return True
+ else:
+ return False
+
+
+def create_admin_file():
+ (desc, filename) = tempfile.mkstemp(prefix='ansible_svr4pkg', text=True)
+ fullauto = '''
+mail=
+instance=unique
+partial=nocheck
+runlevel=quit
+idepend=nocheck
+rdepend=nocheck
+space=quit
+setuid=nocheck
+conflict=nocheck
+action=nocheck
+networktimeout=60
+networkretries=3
+authentication=quit
+keystore=/var/sadm/security
+proxy=
+basedir=default
+'''
+ os.write(desc, fullauto)
+ os.close(desc)
+ return filename
+
+
+def run_command(module, cmd):
+ progname = cmd[0]
+ cmd[0] = module.get_bin_path(progname, True)
+ return module.run_command(cmd)
+
+
+def package_install(module, name, src, proxy, response_file, zone, category):
+ adminfile = create_admin_file()
+ cmd = ['pkgadd', '-n']
+ if zone == 'current':
+ cmd += ['-G']
+ cmd += ['-a', adminfile, '-d', src]
+ if proxy is not None:
+ cmd += ['-x', proxy]
+ if response_file is not None:
+ cmd += ['-r', response_file]
+ if category:
+ cmd += ['-Y']
+ cmd.append(name)
+ (rc, out, err) = run_command(module, cmd)
+ os.unlink(adminfile)
+ return (rc, out, err)
+
+
+def package_uninstall(module, name, src, category):
+ adminfile = create_admin_file()
+ if category:
+ cmd = ['pkgrm', '-na', adminfile, '-Y', name]
+ else:
+ cmd = ['pkgrm', '-na', adminfile, name]
+ (rc, out, err) = run_command(module, cmd)
+ os.unlink(adminfile)
+ return (rc, out, err)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(required=True, choices=['present', 'absent']),
+ src=dict(default=None),
+ proxy=dict(default=None),
+ response_file=dict(default=None),
+ zone=dict(required=False, default='all', choices=['current', 'all']),
+ category=dict(default=False, type='bool')
+ ),
+ supports_check_mode=True
+ )
+ state = module.params['state']
+ name = module.params['name']
+ src = module.params['src']
+ proxy = module.params['proxy']
+ response_file = module.params['response_file']
+ zone = module.params['zone']
+ category = module.params['category']
+ rc = None
+ out = ''
+ err = ''
+ result = {}
+ result['name'] = name
+ result['state'] = state
+
+ if state == 'present':
+ if src is None:
+ module.fail_json(name=name,
+ msg="src is required when state=present")
+ if not package_installed(module, name, category):
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = package_install(module, name, src, proxy, response_file, zone, category)
+ # Stdout is normally empty but for some packages can be
+ # very long and is not often useful
+ if len(out) > 75:
+ out = out[:75] + '...'
+
+ elif state == 'absent':
+ if package_installed(module, name, category):
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = package_uninstall(module, name, src, category)
+ out = out[:75]
+
+ # Returncodes as per pkgadd(1m)
+ # 0 Successful completion
+ # 1 Fatal error.
+ # 2 Warning.
+ # 3 Interruption.
+ # 4 Administration.
+ # 5 Administration. Interaction is required. Do not use pkgadd -n.
+ # 10 Reboot after installation of all packages.
+ # 20 Reboot after installation of this package.
+ # 99 (observed) pkgadd: ERROR: could not process datastream from </tmp/pkgutil.pkg>
+ if rc in (0, 2, 3, 10, 20):
+ result['changed'] = True
+ # no install nor uninstall, or failed
+ else:
+ result['changed'] = False
+
+ # rc will be none when the package already was installed and no action took place
+ # Only return failed=False when the returncode is known to be good as there may be more
+ # undocumented failure return codes
+ if rc not in (None, 0, 2, 10, 20):
+ result['failed'] = True
+ else:
+ result['failed'] = False
+
+ if out:
+ result['stdout'] = out
+ if err:
+ result['stderr'] = err
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/swdepot.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/swdepot.py
new file mode 100644
index 00000000..7e9db835
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/swdepot.py
@@ -0,0 +1,206 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Raul Melo
+# Written by Raul Melo <raulmelo@gmail.com>
+# Based on yum module written by Seth Vidal <skvidal at fedoraproject.org>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: swdepot
+short_description: Manage packages with swdepot package manager (HP-UX)
+description:
+ - Will install, upgrade and remove packages with swdepot package manager (HP-UX)
+notes: []
+author: "Raul Melo (@melodous)"
+options:
+ name:
+ description:
+ - package name.
+ aliases: [pkg]
+ required: true
+ type: str
+ state:
+ description:
+ - whether to install (C(present), C(latest)), or remove (C(absent)) a package.
+ required: true
+ choices: [ 'present', 'latest', 'absent']
+ type: str
+ depot:
+ description:
+ - The source repository from which install or upgrade a package.
+ type: str
+'''
+
+EXAMPLES = '''
+- name: Install a package
+ community.general.swdepot:
+ name: unzip-6.0
+ state: present
+ depot: 'repository:/path'
+
+- name: Install the latest version of a package
+ community.general.swdepot:
+ name: unzip
+ state: latest
+ depot: 'repository:/path'
+
+- name: Remove a package
+ community.general.swdepot:
+ name: unzip
+ state: absent
+'''
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import shlex_quote
+
+
+def compare_package(version1, version2):
+ """ Compare version packages.
+ Return values:
+ -1 first minor
+ 0 equal
+ 1 first greater """
+
+ def normalize(v):
+ return [int(x) for x in re.sub(r'(\.0+)*$', '', v).split(".")]
+ normalized_version1 = normalize(version1)
+ normalized_version2 = normalize(version2)
+ if normalized_version1 == normalized_version2:
+ rc = 0
+ elif normalized_version1 < normalized_version2:
+ rc = -1
+ else:
+ rc = 1
+ return rc
+
+
+def query_package(module, name, depot=None):
+ """ Returns whether a package is installed or not and version. """
+
+ cmd_list = '/usr/sbin/swlist -a revision -l product'
+ if depot:
+ rc, stdout, stderr = module.run_command("%s -s %s %s | grep %s" % (cmd_list, shlex_quote(depot), shlex_quote(name), shlex_quote(name)),
+ use_unsafe_shell=True)
+ else:
+ rc, stdout, stderr = module.run_command("%s %s | grep %s" % (cmd_list, shlex_quote(name), shlex_quote(name)), use_unsafe_shell=True)
+ if rc == 0:
+ version = re.sub(r"\s\s+|\t", " ", stdout).strip().split()[1]
+ else:
+ version = None
+
+ return rc, version
+
+
+def remove_package(module, name):
+ """ Uninstall package if installed. """
+
+ cmd_remove = '/usr/sbin/swremove'
+ rc, stdout, stderr = module.run_command("%s %s" % (cmd_remove, name))
+
+ if rc == 0:
+ return rc, stdout
+ else:
+ return rc, stderr
+
+
+def install_package(module, depot, name):
+ """ Install package if not already installed """
+
+ cmd_install = '/usr/sbin/swinstall -x mount_all_filesystems=false'
+ rc, stdout, stderr = module.run_command("%s -s %s %s" % (cmd_install, depot, name))
+ if rc == 0:
+ return rc, stdout
+ else:
+ return rc, stderr
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(aliases=['pkg'], required=True),
+ state=dict(choices=['present', 'absent', 'latest'], required=True),
+ depot=dict(default=None, required=False)
+ ),
+ supports_check_mode=True
+ )
+ name = module.params['name']
+ state = module.params['state']
+ depot = module.params['depot']
+
+ changed = False
+ msg = "No changed"
+ rc = 0
+ if (state == 'present' or state == 'latest') and depot is None:
+ output = "depot parameter is mandatory in present or latest task"
+ module.fail_json(name=name, msg=output, rc=rc)
+
+ # Check local version
+ rc, version_installed = query_package(module, name)
+ if not rc:
+ installed = True
+ msg = "Already installed"
+
+ else:
+ installed = False
+
+ if (state == 'present' or state == 'latest') and installed is False:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ rc, output = install_package(module, depot, name)
+
+ if not rc:
+ changed = True
+ msg = "Package installed"
+
+ else:
+ module.fail_json(name=name, msg=output, rc=rc)
+
+ elif state == 'latest' and installed is True:
+ # Check depot version
+ rc, version_depot = query_package(module, name, depot)
+
+ if not rc:
+ if compare_package(version_installed, version_depot) == -1:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ # Install new version
+ rc, output = install_package(module, depot, name)
+
+ if not rc:
+ msg = "Package upgraded, Before " + version_installed + " Now " + version_depot
+ changed = True
+
+ else:
+ module.fail_json(name=name, msg=output, rc=rc)
+
+ else:
+ output = "Software package not in repository " + depot
+ module.fail_json(name=name, msg=output, rc=rc)
+
+ elif state == 'absent' and installed is True:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ rc, output = remove_package(module, name)
+ if not rc:
+ changed = True
+ msg = "Package removed"
+ else:
+ module.fail_json(name=name, msg=output, rc=rc)
+
+ if module.check_mode:
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=changed, name=name, state=state, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/swupd.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/swupd.py
new file mode 100644
index 00000000..4dac01be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/swupd.py
@@ -0,0 +1,313 @@
+#!/usr/bin/python
+
+# (c) 2017, Alberto Murillo <alberto.murillo.silva@intel.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: swupd
+short_description: Manages updates and bundles in ClearLinux systems.
+description:
+ - Manages updates and bundles with the swupd bundle manager, which is used by the
+ Clear Linux Project for Intel Architecture.
+author: Alberto Murillo (@albertomurillo)
+options:
+ contenturl:
+ description:
+ - URL pointing to the contents of available bundles.
+ If not specified, the contents are retrieved from clearlinux.org.
+ type: str
+ format:
+ description:
+ - The format suffix for version file downloads. For example [1,2,3,staging,etc].
+ If not specified, the default format is used.
+ type: str
+ manifest:
+ description:
+ - The manifest contains information about the bundles at certain version of the OS.
+ Specify a Manifest version to verify against that version or leave unspecified to
+ verify against the current version.
+ aliases: [release, version]
+ type: int
+ name:
+ description:
+ - Name of the (I)bundle to install or remove.
+ aliases: [bundle]
+ type: str
+ state:
+ description:
+ - Indicates the desired (I)bundle state. C(present) ensures the bundle
+ is installed while C(absent) ensures the (I)bundle is not installed.
+ default: present
+ choices: [present, absent]
+ type: str
+ update:
+ description:
+ - Updates the OS to the latest version.
+ type: bool
+ default: false
+ url:
+ description:
+ - Overrides both I(contenturl) and I(versionurl).
+ type: str
+ verify:
+ description:
+ - Verify content for OS version.
+ type: bool
+ default: false
+ versionurl:
+ description:
+ - URL for version string download.
+ type: str
+'''
+
+EXAMPLES = '''
+- name: Update the OS to the latest version
+ community.general.swupd:
+ update: yes
+
+- name: Installs the "foo" bundle
+ community.general.swupd:
+ name: foo
+ state: present
+
+- name: Removes the "foo" bundle
+ community.general.swupd:
+ name: foo
+ state: absent
+
+- name: Check integrity of filesystem
+ community.general.swupd:
+ verify: yes
+
+- name: Downgrade OS to release 12920
+ community.general.swupd:
+ verify: yes
+ manifest: 12920
+'''
+
+RETURN = '''
+stdout:
+ description: stdout of swupd
+ returned: always
+ type: str
+stderr:
+ description: stderr of swupd
+ returned: always
+ type: str
+'''
+
+import os
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Swupd(object):
+ FILES_NOT_MATCH = "files did not match"
+ FILES_REPLACED = "missing files were replaced"
+ FILES_FIXED = "files were fixed"
+ FILES_DELETED = "files were deleted"
+
+ def __init__(self, module):
+ # Fail if swupd is not found
+ self.module = module
+ self.swupd_cmd = module.get_bin_path("swupd", False)
+ if not self.swupd_cmd:
+ module.fail_json(msg="Could not find swupd.")
+
+ # Initialize parameters
+ for key in module.params.keys():
+ setattr(self, key, module.params[key])
+
+ # Initialize return values
+ self.changed = False
+ self.failed = False
+ self.msg = None
+ self.rc = None
+ self.stderr = ""
+ self.stdout = ""
+
+ def _run_cmd(self, cmd):
+ self.rc, self.stdout, self.stderr = self.module.run_command(cmd, check_rc=False)
+
+ def _get_cmd(self, command):
+ cmd = "%s %s" % (self.swupd_cmd, command)
+
+ if self.format:
+ cmd += " --format=%s" % self.format
+ if self.manifest:
+ cmd += " --manifest=%s" % self.manifest
+ if self.url:
+ cmd += " --url=%s" % self.url
+ else:
+ if self.contenturl and command != "check-update":
+ cmd += " --contenturl=%s" % self.contenturl
+ if self.versionurl:
+ cmd += " --versionurl=%s" % self.versionurl
+
+ return cmd
+
+ def _is_bundle_installed(self, bundle):
+ try:
+ os.stat("/usr/share/clear/bundles/%s" % bundle)
+ except OSError:
+ return False
+
+ return True
+
+ def _needs_update(self):
+ cmd = self._get_cmd("check-update")
+ self._run_cmd(cmd)
+
+ if self.rc == 0:
+ return True
+
+ if self.rc == 1:
+ return False
+
+ self.failed = True
+ self.msg = "Failed to check for updates"
+
+ def _needs_verify(self):
+ cmd = self._get_cmd("verify")
+ self._run_cmd(cmd)
+
+ if self.rc != 0:
+ self.failed = True
+ self.msg = "Failed to check for filesystem inconsistencies."
+
+ if self.FILES_NOT_MATCH in self.stdout:
+ return True
+
+ return False
+
+ def install_bundle(self, bundle):
+ """Installs a bundle with `swupd bundle-add bundle`"""
+ if self.module.check_mode:
+ self.module.exit_json(changed=not self._is_bundle_installed(bundle))
+
+ if self._is_bundle_installed(bundle):
+ self.msg = "Bundle %s is already installed" % bundle
+ return
+
+ cmd = self._get_cmd("bundle-add %s" % bundle)
+ self._run_cmd(cmd)
+
+ if self.rc == 0:
+ self.changed = True
+ self.msg = "Bundle %s installed" % bundle
+ return
+
+ self.failed = True
+ self.msg = "Failed to install bundle %s" % bundle
+
+ def remove_bundle(self, bundle):
+ """Removes a bundle with `swupd bundle-remove bundle`"""
+ if self.module.check_mode:
+ self.module.exit_json(changed=self._is_bundle_installed(bundle))
+
+ if not self._is_bundle_installed(bundle):
+ self.msg = "Bundle %s not installed"
+ return
+
+ cmd = self._get_cmd("bundle-remove %s" % bundle)
+ self._run_cmd(cmd)
+
+ if self.rc == 0:
+ self.changed = True
+ self.msg = "Bundle %s removed" % bundle
+ return
+
+ self.failed = True
+ self.msg = "Failed to remove bundle %s" % bundle
+
+ def update_os(self):
+ """Updates the os with `swupd update`"""
+ if self.module.check_mode:
+ self.module.exit_json(changed=self._needs_update())
+
+ if not self._needs_update():
+ self.msg = "There are no updates available"
+ return
+
+ cmd = self._get_cmd("update")
+ self._run_cmd(cmd)
+
+ if self.rc == 0:
+ self.changed = True
+ self.msg = "Update successful"
+ return
+
+ self.failed = True
+ self.msg = "Failed to check for updates"
+
+ def verify_os(self):
+ """Verifies filesystem against specified or current version"""
+ if self.module.check_mode:
+ self.module.exit_json(changed=self._needs_verify())
+
+ if not self._needs_verify():
+ self.msg = "No files where changed"
+ return
+
+ cmd = self._get_cmd("verify --fix")
+ self._run_cmd(cmd)
+
+ if self.rc == 0 and (self.FILES_REPLACED in self.stdout or self.FILES_FIXED in self.stdout or self.FILES_DELETED in self.stdout):
+ self.changed = True
+ self.msg = "Fix successful"
+ return
+
+ self.failed = True
+ self.msg = "Failed to verify the OS"
+
+
+def main():
+ """The main function."""
+ module = AnsibleModule(
+ argument_spec=dict(
+ contenturl=dict(type="str"),
+ format=dict(type="str"),
+ manifest=dict(aliases=["release", "version"], type="int"),
+ name=dict(aliases=["bundle"], type="str"),
+ state=dict(default="present", choices=["present", "absent"], type="str"),
+ update=dict(default=False, type="bool"),
+ url=dict(type="str"),
+ verify=dict(default=False, type="bool"),
+ versionurl=dict(type="str"),
+ ),
+ required_one_of=[["name", "update", "verify"]],
+ mutually_exclusive=[["name", "update", "verify"]],
+ supports_check_mode=True
+ )
+
+ swupd = Swupd(module)
+
+ name = module.params["name"]
+ state = module.params["state"]
+ update = module.params["update"]
+ verify = module.params["verify"]
+
+ if update:
+ swupd.update_os()
+ elif verify:
+ swupd.verify_os()
+ elif state == "present":
+ swupd.install_bundle(name)
+ elif state == "absent":
+ swupd.remove_bundle(name)
+ else:
+ swupd.failed = True
+
+ if swupd.failed:
+ module.fail_json(msg=swupd.msg, stdout=swupd.stdout, stderr=swupd.stderr)
+ else:
+ module.exit_json(changed=swupd.changed, msg=swupd.msg, stdout=swupd.stdout, stderr=swupd.stderr)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/urpmi.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/urpmi.py
new file mode 100644
index 00000000..9d54fbcf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/urpmi.py
@@ -0,0 +1,219 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Philippe Makowski
+# Written by Philippe Makowski <philippem@mageia.org>
+# Based on apt module written by Matthew Williams <matthew@flowroute.com>
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: urpmi
+short_description: Urpmi manager
+description:
+ - Manages packages with I(urpmi) (such as for Mageia or Mandriva)
+options:
+ name:
+ description:
+ - A list of package names to install, upgrade or remove.
+ required: yes
+ aliases: [ package, pkg ]
+ type: list
+ elements: str
+ state:
+ description:
+ - Indicates the desired package state.
+ choices: [ absent, present, installed, removed ]
+ default: present
+ type: str
+ update_cache:
+ description:
+ - Update the package database first C(urpmi.update -a).
+ type: bool
+ default: no
+ aliases: ['update-cache']
+ no_recommends:
+ description:
+ - Corresponds to the C(--no-recommends) option for I(urpmi).
+ type: bool
+ default: yes
+ aliases: ['no-recommends']
+ force:
+ description:
+ - Assume "yes" is the answer to any question urpmi has to ask.
+ Corresponds to the C(--force) option for I(urpmi).
+ type: bool
+ default: yes
+ root:
+ description:
+ - Specifies an alternative install root, relative to which all packages will be installed.
+ Corresponds to the C(--root) option for I(urpmi).
+ aliases: [ installroot ]
+ type: str
+author:
+- Philippe Makowski (@pmakowski)
+'''
+
+EXAMPLES = '''
+- name: Install package foo
+ community.general.urpmi:
+ pkg: foo
+ state: present
+
+- name: Remove package foo
+ community.general.urpmi:
+ pkg: foo
+ state: absent
+
+- name: Remove packages foo and bar
+ community.general.urpmi:
+ pkg: foo,bar
+ state: absent
+
+- name: Update the package database (urpmi.update -a -q) and install bar (bar will be the updated if a newer version exists)
+- community.general.urpmi:
+ name: bar
+ state: present
+ update_cache: yes
+'''
+
+
+import os
+import shlex
+import sys
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def query_package(module, name, root):
+ # rpm -q returns 0 if the package is installed,
+ # 1 if it is not installed
+ rpm_path = module.get_bin_path("rpm", True)
+ cmd = "%s -q %s %s" % (rpm_path, name, root_option(root))
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ if rc == 0:
+ return True
+ else:
+ return False
+
+
+def query_package_provides(module, name, root):
+ # rpm -q returns 0 if the package is installed,
+ # 1 if it is not installed
+ rpm_path = module.get_bin_path("rpm", True)
+ cmd = "%s -q --whatprovides %s %s" % (rpm_path, name, root_option(root))
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ return rc == 0
+
+
+def update_package_db(module):
+
+ urpmiupdate_path = module.get_bin_path("urpmi.update", True)
+ cmd = "%s -a -q" % (urpmiupdate_path,)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ if rc != 0:
+ module.fail_json(msg="could not update package db")
+
+
+def remove_packages(module, packages, root):
+
+ remove_c = 0
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ if not query_package(module, package, root):
+ continue
+
+ urpme_path = module.get_bin_path("urpme", True)
+ cmd = "%s --auto %s %s" % (urpme_path, root_option(root), package)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc != 0:
+ module.fail_json(msg="failed to remove %s" % (package))
+
+ remove_c += 1
+
+ if remove_c > 0:
+
+ module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, pkgspec, root, force=True, no_recommends=True):
+
+ packages = ""
+ for package in pkgspec:
+ if not query_package_provides(module, package, root):
+ packages += "'%s' " % package
+
+ if len(packages) != 0:
+ if no_recommends:
+ no_recommends_yes = '--no-recommends'
+ else:
+ no_recommends_yes = ''
+
+ if force:
+ force_yes = '--force'
+ else:
+ force_yes = ''
+
+ urpmi_path = module.get_bin_path("urpmi", True)
+ cmd = ("%s --auto %s --quiet %s %s %s" % (urpmi_path, force_yes,
+ no_recommends_yes,
+ root_option(root),
+ packages))
+
+ rc, out, err = module.run_command(cmd)
+
+ for package in pkgspec:
+ if not query_package_provides(module, package, root):
+ module.fail_json(msg="'urpmi %s' failed: %s" % (package, err))
+
+ # urpmi always have 0 for exit code if --force is used
+ if rc:
+ module.fail_json(msg="'urpmi %s' failed: %s" % (packages, err))
+ else:
+ module.exit_json(changed=True, msg="%s present(s)" % packages)
+ else:
+ module.exit_json(changed=False)
+
+
+def root_option(root):
+ if (root):
+ return "--root=%s" % (root)
+ else:
+ return ""
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present',
+ choices=['absent', 'installed', 'present', 'removed']),
+ update_cache=dict(type='bool', default=False, aliases=['update-cache']),
+ force=dict(type='bool', default=True),
+ no_recommends=dict(type='bool', default=True, aliases=['no-recommends']),
+ name=dict(type='list', elements='str', required=True, aliases=['package', 'pkg']),
+ root=dict(type='str', aliases=['installroot']),
+ ),
+ )
+
+ p = module.params
+
+ if p['update_cache']:
+ update_package_db(module)
+
+ if p['state'] in ['installed', 'present']:
+ install_packages(module, p['name'], p['root'], p['force'], p['no_recommends'])
+
+ elif p['state'] in ['removed', 'absent']:
+ remove_packages(module, p['name'], p['root'])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/xbps.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/xbps.py
new file mode 100644
index 00000000..6f2f5dfa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/xbps.py
@@ -0,0 +1,336 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2016 Dino Occhialini <dino.occhialini@gmail.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: xbps
+short_description: Manage packages with XBPS
+description:
+ - Manage packages with the XBPS package manager.
+author:
+ - "Dino Occhialini (@dinoocch)"
+ - "Michael Aldridge (@the-maldridge)"
+options:
+ name:
+ description:
+ - Name of the package to install, upgrade, or remove.
+ aliases: [pkg,package]
+ type: list
+ elements: str
+ state:
+ description:
+ - Desired state of the package.
+ default: "present"
+ choices: ["present", "absent", "latest", "installed", "removed"]
+ type: str
+ recurse:
+ description:
+ - When removing a package, also remove its dependencies, provided
+ that they are not required by other packages and were not
+ explicitly installed by a user.
+ type: bool
+ default: no
+ update_cache:
+ description:
+ - Whether or not to refresh the master package lists. This can be
+ run as part of a package installation or as a separate step.
+ aliases: ['update-cache']
+ type: bool
+ default: yes
+ upgrade:
+ description:
+ - Whether or not to upgrade whole system
+ type: bool
+ default: no
+ upgrade_xbps:
+ description:
+ - Whether or not to upgrade the xbps package when necessary.
+ Before installing new packages,
+ xbps requires the user to update the xbps package itself.
+ Thus when this option is set to C(no),
+ upgrades and installations will fail when xbps is not up to date.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+ force:
+ description:
+ - This option doesn't have any effect and is deprecated, it will be
+ removed in 3.0.0.
+ type: bool
+ default: no
+'''
+
+EXAMPLES = '''
+- name: Install package foo (automatically updating the xbps package if needed)
+ community.general.xbps: name=foo state=present
+
+- name: Upgrade package foo
+ community.general.xbps: name=foo state=latest update_cache=yes
+
+- name: Remove packages foo and bar
+ community.general.xbps: name=foo,bar state=absent
+
+- name: Recursively remove package foo
+ community.general.xbps: name=foo state=absent recurse=yes
+
+- name: Update package cache
+ community.general.xbps: update_cache=yes
+
+- name: Upgrade packages
+ community.general.xbps: upgrade=yes
+
+- name: Install a package, failing if the xbps package is out of date
+ community.general.xbps:
+ name: foo
+ state: present
+ upgrade_xbps: no
+'''
+
+RETURN = '''
+msg:
+ description: Message about results
+ returned: success
+ type: str
+ sample: "System Upgraded"
+packages:
+ description: Packages that are affected/would be affected
+ type: list
+ sample: ["ansible"]
+ returned: success
+'''
+
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def is_installed(xbps_output):
+ """Returns package install state"""
+ return bool(len(xbps_output))
+
+
+def query_package(module, xbps_path, name, state="present"):
+ """Returns Package info"""
+ if state == "present":
+ lcmd = "%s %s" % (xbps_path['query'], name)
+ lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False)
+ if not is_installed(lstdout):
+ # package is not installed locally
+ return False, False
+
+ rcmd = "%s -Sun" % (xbps_path['install'])
+ rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False)
+ if rrc == 0 or rrc == 17:
+ """Return True to indicate that the package is installed locally,
+ and the result of the version number comparison to determine if the
+ package is up-to-date"""
+ return True, name not in rstdout
+
+ return False, False
+
+
+def update_package_db(module, xbps_path):
+ """Returns True if update_package_db changed"""
+ cmd = "%s -S" % (xbps_path['install'])
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc != 0:
+ module.fail_json(msg="Could not update package db")
+ if "avg rate" in stdout:
+ return True
+ else:
+ return False
+
+
+def upgrade_xbps(module, xbps_path, exit_on_success=False):
+ cmdupgradexbps = "%s -uy xbps" % (xbps_path['install'])
+ rc, stdout, stderr = module.run_command(cmdupgradexbps, check_rc=False)
+ if rc != 0:
+ module.fail_json(msg='Could not upgrade xbps itself')
+
+
+def upgrade(module, xbps_path):
+ """Returns true is full upgrade succeeds"""
+ cmdupgrade = "%s -uy" % (xbps_path['install'])
+ cmdneedupgrade = "%s -un" % (xbps_path['install'])
+
+ rc, stdout, stderr = module.run_command(cmdneedupgrade, check_rc=False)
+ if rc == 0:
+ if(len(stdout.splitlines()) == 0):
+ module.exit_json(changed=False, msg='Nothing to upgrade')
+ elif module.check_mode:
+ module.exit_json(changed=True, msg='Would have performed upgrade')
+ else:
+ rc, stdout, stderr = module.run_command(cmdupgrade, check_rc=False)
+ if rc == 0:
+ module.exit_json(changed=True, msg='System upgraded')
+ elif rc == 16 and module.params['upgrade_xbps']:
+ upgrade_xbps(module, xbps_path)
+ # avoid loops by not trying self-upgrade again
+ module.params['upgrade_xbps'] = False
+ upgrade(module, xbps_path)
+ else:
+ module.fail_json(msg="Could not upgrade")
+ else:
+ module.fail_json(msg="Could not upgrade")
+
+
+def remove_packages(module, xbps_path, packages):
+ """Returns true if package removal succeeds"""
+ changed_packages = []
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ installed, updated = query_package(module, xbps_path, package)
+ if not installed:
+ continue
+
+ cmd = "%s -y %s" % (xbps_path['remove'], package)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc != 0:
+ module.fail_json(msg="failed to remove %s" % (package))
+
+ changed_packages.append(package)
+
+ if len(changed_packages) > 0:
+
+ module.exit_json(changed=True, msg="removed %s package(s)" %
+ len(changed_packages), packages=changed_packages)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, xbps_path, state, packages):
+ """Returns true if package install succeeds."""
+ toInstall = []
+ for i, package in enumerate(packages):
+ """If the package is installed and state == present or state == latest
+ and is up-to-date then skip"""
+ installed, updated = query_package(module, xbps_path, package)
+ if installed and (state == 'present' or
+ (state == 'latest' and updated)):
+ continue
+
+ toInstall.append(package)
+
+ if len(toInstall) == 0:
+ module.exit_json(changed=False, msg="Nothing to Install")
+
+ cmd = "%s -y %s" % (xbps_path['install'], " ".join(toInstall))
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc == 16 and module.params['upgrade_xbps']:
+ upgrade_xbps(module, xbps_path)
+ # avoid loops by not trying self-update again
+ module.params['upgrade_xbps'] = False
+ install_packages(module, xbps_path, state, packages)
+ elif rc != 0 and not (state == 'latest' and rc == 17):
+ module.fail_json(msg="failed to install %s" % (package))
+
+ module.exit_json(changed=True, msg="installed %s package(s)"
+ % (len(toInstall)),
+ packages=toInstall)
+
+
+def check_packages(module, xbps_path, packages, state):
+ """Returns change status of command"""
+ would_be_changed = []
+ for package in packages:
+ installed, updated = query_package(module, xbps_path, package)
+ if ((state in ["present", "latest"] and not installed) or
+ (state == "absent" and installed) or
+ (state == "latest" and not updated)):
+ would_be_changed.append(package)
+ if would_be_changed:
+ if state == "absent":
+ state = "removed"
+ module.exit_json(changed=True, msg="%s package(s) would be %s" % (
+ len(would_be_changed), state),
+ packages=would_be_changed)
+ else:
+ module.exit_json(changed=False, msg="package(s) already %s" % state,
+ packages=[])
+
+
+def update_cache(module, xbps_path, upgrade_planned):
+ """Update package cache"""
+ if module.check_mode:
+ if upgrade_planned:
+ return
+ module.exit_json(
+ changed=True, msg='Would have updated the package cache'
+ )
+ changed = update_package_db(module, xbps_path)
+ if not upgrade_planned:
+ module.exit_json(changed=changed, msg=(
+ 'Updated the package master lists' if changed
+ else 'Package list already up to date'
+ ))
+
+
+def main():
+ """Returns, calling appropriate command"""
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(default=None, aliases=['pkg', 'package'], type='list', elements='str'),
+ state=dict(default='present', choices=['present', 'installed',
+ 'latest', 'absent',
+ 'removed']),
+ recurse=dict(default=False, type='bool'),
+ force=dict(default=False, type='bool', removed_in_version='3.0.0', removed_from_collection='community.general'),
+ upgrade=dict(default=False, type='bool'),
+ update_cache=dict(default=True, aliases=['update-cache'],
+ type='bool'),
+ upgrade_xbps=dict(default=True, type='bool')
+ ),
+ required_one_of=[['name', 'update_cache', 'upgrade']],
+ supports_check_mode=True)
+
+ xbps_path = dict()
+ xbps_path['install'] = module.get_bin_path('xbps-install', True)
+ xbps_path['query'] = module.get_bin_path('xbps-query', True)
+ xbps_path['remove'] = module.get_bin_path('xbps-remove', True)
+
+ if not os.path.exists(xbps_path['install']):
+ module.fail_json(msg="cannot find xbps, in path %s"
+ % (xbps_path['install']))
+
+ p = module.params
+
+ # normalize the state parameter
+ if p['state'] in ['present', 'installed']:
+ p['state'] = 'present'
+ elif p['state'] in ['absent', 'removed']:
+ p['state'] = 'absent'
+
+ if p['update_cache']:
+ update_cache(module, xbps_path, (p['name'] or p['upgrade']))
+
+ if p['upgrade']:
+ upgrade(module, xbps_path)
+
+ if p['name']:
+ pkgs = p['name']
+
+ if module.check_mode:
+ check_packages(module, xbps_path, pkgs, p['state'])
+
+ if p['state'] in ['present', 'latest']:
+ install_packages(module, xbps_path, p['state'], pkgs)
+ elif p['state'] == 'absent':
+ remove_packages(module, xbps_path, pkgs)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/zypper.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/zypper.py
new file mode 100644
index 00000000..9ad539ca
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/zypper.py
@@ -0,0 +1,561 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Patrick Callahan <pmc@patrickcallahan.com>
+# based on
+# openbsd_pkg
+# (c) 2013
+# Patrik Lundin <patrik.lundin.swe@gmail.com>
+#
+# yum
+# (c) 2012, Red Hat, Inc
+# Written by Seth Vidal <skvidal at fedoraproject.org>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: zypper
+author:
+ - "Patrick Callahan (@dirtyharrycallahan)"
+ - "Alexander Gubin (@alxgu)"
+ - "Thomas O'Donnell (@andytom)"
+ - "Robin Roth (@robinro)"
+ - "Andrii Radyk (@AnderEnder)"
+short_description: Manage packages on SUSE and openSUSE
+description:
+ - Manage packages on SUSE and openSUSE using the zypper and rpm tools.
+options:
+ name:
+ description:
+ - Package name C(name) or package specifier or a list of either.
+ - Can include a version like C(name=1.0), C(name>3.4) or C(name<=2.7). If a version is given, C(oldpackage) is implied and zypper is allowed to
+ update the package within the version range given.
+ - You can also pass a url or a local path to a rpm file.
+ - When using state=latest, this can be '*', which updates all installed packages.
+ required: true
+ aliases: [ 'pkg' ]
+ type: list
+ elements: str
+ state:
+ description:
+ - C(present) will make sure the package is installed.
+ C(latest) will make sure the latest version of the package is installed.
+ C(absent) will make sure the specified package is not installed.
+ C(dist-upgrade) will make sure the latest version of all installed packages from all enabled repositories is installed.
+ - When using C(dist-upgrade), I(name) should be C('*').
+ required: false
+ choices: [ present, latest, absent, dist-upgrade, installed, removed ]
+ default: "present"
+ type: str
+ type:
+ description:
+ - The type of package to be operated on.
+ required: false
+ choices: [ package, patch, pattern, product, srcpackage, application ]
+ default: "package"
+ type: str
+ extra_args_precommand:
+ required: false
+ description:
+ - Add additional global target options to C(zypper).
+ - Options should be supplied in a single line as if given in the command line.
+ type: str
+ disable_gpg_check:
+ description:
+ - Whether to disable to GPG signature checking of the package
+ signature being installed. Has an effect only if state is
+ I(present) or I(latest).
+ required: false
+ default: "no"
+ type: bool
+ disable_recommends:
+ description:
+ - Corresponds to the C(--no-recommends) option for I(zypper). Default behavior (C(yes)) modifies zypper's default behavior; C(no) does
+ install recommended packages.
+ required: false
+ default: "yes"
+ type: bool
+ force:
+ description:
+ - Adds C(--force) option to I(zypper). Allows to downgrade packages and change vendor or architecture.
+ required: false
+ default: "no"
+ type: bool
+ force_resolution:
+ description:
+ - Adds C(--force-resolution) option to I(zypper). Allows to (un)install packages with conflicting requirements (resolver will choose a solution).
+ required: false
+ default: "no"
+ type: bool
+ version_added: '0.2.0'
+ update_cache:
+ description:
+ - Run the equivalent of C(zypper refresh) before the operation. Disabled in check mode.
+ required: false
+ default: "no"
+ type: bool
+ aliases: [ "refresh" ]
+ oldpackage:
+ description:
+ - Adds C(--oldpackage) option to I(zypper). Allows to downgrade packages with less side-effects than force. This is implied as soon as a
+ version is specified as part of the package name.
+ required: false
+ default: "no"
+ type: bool
+ extra_args:
+ required: false
+ description:
+ - Add additional options to C(zypper) command.
+ - Options should be supplied in a single line as if given in the command line.
+ type: str
+ allow_vendor_change:
+ type: bool
+ required: false
+ default: false
+ description:
+ - Adds C(--allow_vendor_change) option to I(zypper) dist-upgrade command.
+ version_added: '0.2.0'
+ replacefiles:
+ type: bool
+ required: false
+ default: false
+ description:
+ - Adds C(--replacefiles) option to I(zypper) install/update command.
+ version_added: '0.2.0'
+notes:
+ - When used with a `loop:` each package will be processed individually,
+ it is much more efficient to pass the list directly to the `name` option.
+# informational: requirements for nodes
+requirements:
+ - "zypper >= 1.0 # included in openSUSE >= 11.1 or SUSE Linux Enterprise Server/Desktop >= 11.0"
+ - python-xml
+ - rpm
+'''
+
+EXAMPLES = '''
+- name: Install nmap
+ community.general.zypper:
+ name: nmap
+ state: present
+
+- name: Install apache2 with recommended packages
+ community.general.zypper:
+ name: apache2
+ state: present
+ disable_recommends: no
+
+- name: Apply a given patch
+ community.general.zypper:
+ name: openSUSE-2016-128
+ state: present
+ type: patch
+
+- name: Remove the nmap package
+ community.general.zypper:
+ name: nmap
+ state: absent
+
+- name: Install the nginx rpm from a remote repo
+ community.general.zypper:
+ name: 'http://nginx.org/packages/sles/12/x86_64/RPMS/nginx-1.8.0-1.sles12.ngx.x86_64.rpm'
+ state: present
+
+- name: Install local rpm file
+ community.general.zypper:
+ name: /tmp/fancy-software.rpm
+ state: present
+
+- name: Update all packages
+ community.general.zypper:
+ name: '*'
+ state: latest
+
+- name: Apply all available patches
+ community.general.zypper:
+ name: '*'
+ state: latest
+ type: patch
+
+- name: Perform a dist-upgrade with additional arguments
+ community.general.zypper:
+ name: '*'
+ state: dist-upgrade
+ allow_vendor_change: true
+ extra_args: '--allow-arch-change'
+
+- name: Perform a installaion of nmap with the install option replacefiles
+ community.general.zypper:
+ name: 'nmap'
+ state: latest
+ replacefiles: true
+
+- name: Refresh repositories and update package openssl
+ community.general.zypper:
+ name: openssl
+ state: present
+ update_cache: yes
+
+- name: "Install specific version (possible comparisons: <, >, <=, >=, =)"
+ community.general.zypper:
+ name: 'docker>=1.10'
+ state: present
+
+- name: Wait 20 seconds to acquire the lock before failing
+ community.general.zypper:
+ name: mosh
+ state: present
+ environment:
+ ZYPP_LOCK_TIMEOUT: 20
+'''
+
+import xml
+import re
+from xml.dom.minidom import parseString as parseXML
+from ansible.module_utils._text import to_native
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Package:
+ def __init__(self, name, prefix, version):
+ self.name = name
+ self.prefix = prefix
+ self.version = version
+ self.shouldinstall = (prefix == '+')
+
+ def __str__(self):
+ return self.prefix + self.name + self.version
+
+
+def split_name_version(name):
+ """splits of the package name and desired version
+
+ example formats:
+ - docker>=1.10
+ - apache=2.4
+
+ Allowed version specifiers: <, >, <=, >=, =
+ Allowed version format: [0-9.-]*
+
+ Also allows a prefix indicating remove "-", "~" or install "+"
+ """
+
+ prefix = ''
+ if name[0] in ['-', '~', '+']:
+ prefix = name[0]
+ name = name[1:]
+ if prefix == '~':
+ prefix = '-'
+
+ version_check = re.compile('^(.*?)((?:<|>|<=|>=|=)[0-9.-]*)?$')
+ try:
+ reres = version_check.match(name)
+ name, version = reres.groups()
+ if version is None:
+ version = ''
+ return prefix, name, version
+ except Exception:
+ return prefix, name, ''
+
+
+def get_want_state(names, remove=False):
+ packages = []
+ urls = []
+ for name in names:
+ if '://' in name or name.endswith('.rpm'):
+ urls.append(name)
+ else:
+ prefix, pname, version = split_name_version(name)
+ if prefix not in ['-', '+']:
+ if remove:
+ prefix = '-'
+ else:
+ prefix = '+'
+ packages.append(Package(pname, prefix, version))
+ return packages, urls
+
+
+def get_installed_state(m, packages):
+ "get installed state of packages"
+
+ cmd = get_cmd(m, 'search')
+ cmd.extend(['--match-exact', '--details', '--installed-only'])
+ cmd.extend([p.name for p in packages])
+ return parse_zypper_xml(m, cmd, fail_not_found=False)[0]
+
+
+def parse_zypper_xml(m, cmd, fail_not_found=True, packages=None):
+ rc, stdout, stderr = m.run_command(cmd, check_rc=False)
+
+ try:
+ dom = parseXML(stdout)
+ except xml.parsers.expat.ExpatError as exc:
+ m.fail_json(msg="Failed to parse zypper xml output: %s" % to_native(exc),
+ rc=rc, stdout=stdout, stderr=stderr, cmd=cmd)
+
+ if rc == 104:
+ # exit code 104 is ZYPPER_EXIT_INF_CAP_NOT_FOUND (no packages found)
+ if fail_not_found:
+ errmsg = dom.getElementsByTagName('message')[-1].childNodes[0].data
+ m.fail_json(msg=errmsg, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd)
+ else:
+ return {}, rc, stdout, stderr
+ elif rc in [0, 106, 103]:
+ # zypper exit codes
+ # 0: success
+ # 106: signature verification failed
+ # 103: zypper was upgraded, run same command again
+ if packages is None:
+ firstrun = True
+ packages = {}
+ solvable_list = dom.getElementsByTagName('solvable')
+ for solvable in solvable_list:
+ name = solvable.getAttribute('name')
+ packages[name] = {}
+ packages[name]['version'] = solvable.getAttribute('edition')
+ packages[name]['oldversion'] = solvable.getAttribute('edition-old')
+ status = solvable.getAttribute('status')
+ packages[name]['installed'] = status == "installed"
+ packages[name]['group'] = solvable.parentNode.nodeName
+ if rc == 103 and firstrun:
+ # if this was the first run and it failed with 103
+ # run zypper again with the same command to complete update
+ return parse_zypper_xml(m, cmd, fail_not_found=fail_not_found, packages=packages)
+
+ return packages, rc, stdout, stderr
+ m.fail_json(msg='Zypper run command failed with return code %s.' % rc, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd)
+
+
+def get_cmd(m, subcommand):
+ "puts together the basic zypper command arguments with those passed to the module"
+ is_install = subcommand in ['install', 'update', 'patch', 'dist-upgrade']
+ is_refresh = subcommand == 'refresh'
+ cmd = ['/usr/bin/zypper', '--quiet', '--non-interactive', '--xmlout']
+ if m.params['extra_args_precommand']:
+ args_list = m.params['extra_args_precommand'].split()
+ cmd.extend(args_list)
+ # add global options before zypper command
+ if (is_install or is_refresh) and m.params['disable_gpg_check']:
+ cmd.append('--no-gpg-checks')
+
+ if subcommand == 'search':
+ cmd.append('--disable-repositories')
+
+ cmd.append(subcommand)
+ if subcommand not in ['patch', 'dist-upgrade'] and not is_refresh:
+ cmd.extend(['--type', m.params['type']])
+ if m.check_mode and subcommand != 'search':
+ cmd.append('--dry-run')
+ if is_install:
+ cmd.append('--auto-agree-with-licenses')
+ if m.params['disable_recommends']:
+ cmd.append('--no-recommends')
+ if m.params['force']:
+ cmd.append('--force')
+ if m.params['force_resolution']:
+ cmd.append('--force-resolution')
+ if m.params['oldpackage']:
+ cmd.append('--oldpackage')
+ if m.params['replacefiles']:
+ cmd.append('--replacefiles')
+ if subcommand == 'dist-upgrade' and m.params['allow_vendor_change']:
+ cmd.append('--allow-vendor-change')
+ if m.params['extra_args']:
+ args_list = m.params['extra_args'].split(' ')
+ cmd.extend(args_list)
+
+ return cmd
+
+
+def set_diff(m, retvals, result):
+ # TODO: if there is only one package, set before/after to version numbers
+ packages = {'installed': [], 'removed': [], 'upgraded': []}
+ if result:
+ for p in result:
+ group = result[p]['group']
+ if group == 'to-upgrade':
+ versions = ' (' + result[p]['oldversion'] + ' => ' + result[p]['version'] + ')'
+ packages['upgraded'].append(p + versions)
+ elif group == 'to-install':
+ packages['installed'].append(p)
+ elif group == 'to-remove':
+ packages['removed'].append(p)
+
+ output = ''
+ for state in packages:
+ if packages[state]:
+ output += state + ': ' + ', '.join(packages[state]) + '\n'
+ if 'diff' not in retvals:
+ retvals['diff'] = {}
+ if 'prepared' not in retvals['diff']:
+ retvals['diff']['prepared'] = output
+ else:
+ retvals['diff']['prepared'] += '\n' + output
+
+
+def package_present(m, name, want_latest):
+ "install and update (if want_latest) the packages in name_install, while removing the packages in name_remove"
+ retvals = {'rc': 0, 'stdout': '', 'stderr': ''}
+ packages, urls = get_want_state(name)
+
+ # add oldpackage flag when a version is given to allow downgrades
+ if any(p.version for p in packages):
+ m.params['oldpackage'] = True
+
+ if not want_latest:
+ # for state=present: filter out already installed packages
+ # if a version is given leave the package in to let zypper handle the version
+ # resolution
+ packageswithoutversion = [p for p in packages if not p.version]
+ prerun_state = get_installed_state(m, packageswithoutversion)
+ # generate lists of packages to install or remove
+ packages = [p for p in packages if p.shouldinstall != (p.name in prerun_state)]
+
+ if not packages and not urls:
+ # nothing to install/remove and nothing to update
+ return None, retvals
+
+ # zypper install also updates packages
+ cmd = get_cmd(m, 'install')
+ cmd.append('--')
+ cmd.extend(urls)
+ # pass packages to zypper
+ # allow for + or - prefixes in install/remove lists
+ # also add version specifier if given
+ # do this in one zypper run to allow for dependency-resolution
+ # for example "-exim postfix" runs without removing packages depending on mailserver
+ cmd.extend([str(p) for p in packages])
+
+ retvals['cmd'] = cmd
+ result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd)
+
+ return result, retvals
+
+
+def package_update_all(m):
+ "run update or patch on all available packages"
+
+ retvals = {'rc': 0, 'stdout': '', 'stderr': ''}
+ if m.params['type'] == 'patch':
+ cmdname = 'patch'
+ elif m.params['state'] == 'dist-upgrade':
+ cmdname = 'dist-upgrade'
+ else:
+ cmdname = 'update'
+
+ cmd = get_cmd(m, cmdname)
+ retvals['cmd'] = cmd
+ result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd)
+ return result, retvals
+
+
+def package_absent(m, name):
+ "remove the packages in name"
+ retvals = {'rc': 0, 'stdout': '', 'stderr': ''}
+ # Get package state
+ packages, urls = get_want_state(name, remove=True)
+ if any(p.prefix == '+' for p in packages):
+ m.fail_json(msg="Can not combine '+' prefix with state=remove/absent.")
+ if urls:
+ m.fail_json(msg="Can not remove via URL.")
+ if m.params['type'] == 'patch':
+ m.fail_json(msg="Can not remove patches.")
+ prerun_state = get_installed_state(m, packages)
+ packages = [p for p in packages if p.name in prerun_state]
+
+ if not packages:
+ return None, retvals
+
+ cmd = get_cmd(m, 'remove')
+ cmd.extend([p.name + p.version for p in packages])
+
+ retvals['cmd'] = cmd
+ result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd)
+ return result, retvals
+
+
+def repo_refresh(m):
+ "update the repositories"
+ retvals = {'rc': 0, 'stdout': '', 'stderr': ''}
+
+ cmd = get_cmd(m, 'refresh')
+
+ retvals['cmd'] = cmd
+ result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd)
+
+ return retvals
+
+# ===========================================
+# Main control flow
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, aliases=['pkg'], type='list', elements='str'),
+ state=dict(required=False, default='present', choices=['absent', 'installed', 'latest', 'present', 'removed', 'dist-upgrade']),
+ type=dict(required=False, default='package', choices=['package', 'patch', 'pattern', 'product', 'srcpackage', 'application']),
+ extra_args_precommand=dict(required=False, default=None),
+ disable_gpg_check=dict(required=False, default=False, type='bool'),
+ disable_recommends=dict(required=False, default=True, type='bool'),
+ force=dict(required=False, default=False, type='bool'),
+ force_resolution=dict(required=False, default=False, type='bool'),
+ update_cache=dict(required=False, aliases=['refresh'], default=False, type='bool'),
+ oldpackage=dict(required=False, default=False, type='bool'),
+ extra_args=dict(required=False, default=None),
+ allow_vendor_change=dict(required=False, default=False, type='bool'),
+ replacefiles=dict(required=False, default=False, type='bool')
+ ),
+ supports_check_mode=True
+ )
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+
+ name = module.params['name']
+ state = module.params['state']
+ update_cache = module.params['update_cache']
+
+ # remove empty strings from package list
+ name = list(filter(None, name))
+
+ # Refresh repositories
+ if update_cache and not module.check_mode:
+ retvals = repo_refresh(module)
+
+ if retvals['rc'] != 0:
+ module.fail_json(msg="Zypper refresh run failed.", **retvals)
+
+ # Perform requested action
+ if name == ['*'] and state in ['latest', 'dist-upgrade']:
+ packages_changed, retvals = package_update_all(module)
+ elif name != ['*'] and state == 'dist-upgrade':
+ module.fail_json(msg="Can not dist-upgrade specific packages.")
+ else:
+ if state in ['absent', 'removed']:
+ packages_changed, retvals = package_absent(module, name)
+ elif state in ['installed', 'present', 'latest']:
+ packages_changed, retvals = package_present(module, name, state == 'latest')
+
+ retvals['changed'] = retvals['rc'] == 0 and bool(packages_changed)
+
+ if module._diff:
+ set_diff(module, retvals, packages_changed)
+
+ if retvals['rc'] != 0:
+ module.fail_json(msg="Zypper run failed.", **retvals)
+
+ if not retvals['changed']:
+ del retvals['stdout']
+ del retvals['stderr']
+
+ module.exit_json(name=name, state=state, update_cache=update_cache, **retvals)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/zypper_repository.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/zypper_repository.py
new file mode 100644
index 00000000..55738b58
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packaging/os/zypper_repository.py
@@ -0,0 +1,402 @@
+#!/usr/bin/python
+# encoding: utf-8
+
+# (c) 2013, Matthias Vogelgesang <matthias.vogelgesang@gmail.com>
+# (c) 2014, Justin Lecher <jlec@gentoo.org>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: zypper_repository
+author: "Matthias Vogelgesang (@matze)"
+short_description: Add and remove Zypper repositories
+description:
+ - Add or remove Zypper repositories on SUSE and openSUSE
+options:
+ name:
+ description:
+ - A name for the repository. Not required when adding repofiles.
+ type: str
+ repo:
+ description:
+ - URI of the repository or .repo file. Required when state=present.
+ type: str
+ state:
+ description:
+ - A source string state.
+ choices: [ "absent", "present" ]
+ default: "present"
+ type: str
+ description:
+ description:
+ - A description of the repository
+ type: str
+ disable_gpg_check:
+ description:
+ - Whether to disable GPG signature checking of
+ all packages. Has an effect only if state is
+ I(present).
+ - Needs zypper version >= 1.6.2.
+ type: bool
+ default: no
+ autorefresh:
+ description:
+ - Enable autorefresh of the repository.
+ type: bool
+ default: yes
+ aliases: [ "refresh" ]
+ priority:
+ description:
+ - Set priority of repository. Packages will always be installed
+ from the repository with the smallest priority number.
+ - Needs zypper version >= 1.12.25.
+ type: int
+ overwrite_multiple:
+ description:
+ - Overwrite multiple repository entries, if repositories with both name and
+ URL already exist.
+ type: bool
+ default: no
+ auto_import_keys:
+ description:
+ - Automatically import the gpg signing key of the new or changed repository.
+ - Has an effect only if state is I(present). Has no effect on existing (unchanged) repositories or in combination with I(absent).
+ - Implies runrefresh.
+ - Only works with C(.repo) files if `name` is given explicitly.
+ type: bool
+ default: no
+ runrefresh:
+ description:
+ - Refresh the package list of the given repository.
+ - Can be used with repo=* to refresh all repositories.
+ type: bool
+ default: no
+ enabled:
+ description:
+ - Set repository to enabled (or disabled).
+ type: bool
+ default: yes
+
+
+requirements:
+ - "zypper >= 1.0 # included in openSUSE >= 11.1 or SUSE Linux Enterprise Server/Desktop >= 11.0"
+ - python-xml
+'''
+
+EXAMPLES = '''
+- name: Add NVIDIA repository for graphics drivers
+ community.general.zypper_repository:
+ name: nvidia-repo
+ repo: 'ftp://download.nvidia.com/opensuse/12.2'
+ state: present
+
+- name: Remove NVIDIA repository
+ community.general.zypper_repository:
+ name: nvidia-repo
+ repo: 'ftp://download.nvidia.com/opensuse/12.2'
+ state: absent
+
+- name: Add python development repository
+ community.general.zypper_repository:
+ repo: 'http://download.opensuse.org/repositories/devel:/languages:/python/SLE_11_SP3/devel:languages:python.repo'
+
+- name: Refresh all repos
+ community.general.zypper_repository:
+ repo: '*'
+ runrefresh: yes
+
+- name: Add a repo and add its gpg key
+ community.general.zypper_repository:
+ repo: 'http://download.opensuse.org/repositories/systemsmanagement/openSUSE_Leap_42.1/'
+ auto_import_keys: yes
+
+- name: Force refresh of a repository
+ community.general.zypper_repository:
+ repo: 'http://my_internal_ci_repo/repo'
+ name: my_ci_repo
+ state: present
+ runrefresh: yes
+'''
+
+import traceback
+
+XML_IMP_ERR = None
+try:
+ from xml.dom.minidom import parseString as parseXML
+ HAS_XML = True
+except ImportError:
+ XML_IMP_ERR = traceback.format_exc()
+ HAS_XML = False
+
+from distutils.version import LooseVersion
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+REPO_OPTS = ['alias', 'name', 'priority', 'enabled', 'autorefresh', 'gpgcheck']
+
+
+def _get_cmd(*args):
+ """Combines the non-interactive zypper command with arguments/subcommands"""
+ cmd = ['/usr/bin/zypper', '--quiet', '--non-interactive']
+ cmd.extend(args)
+
+ return cmd
+
+
+def _parse_repos(module):
+ """parses the output of zypper --xmlout repos and return a parse repo dictionary"""
+ cmd = _get_cmd('--xmlout', 'repos')
+
+ if not HAS_XML:
+ module.fail_json(msg=missing_required_lib("python-xml"), exception=XML_IMP_ERR)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ if rc == 0:
+ repos = []
+ dom = parseXML(stdout)
+ repo_list = dom.getElementsByTagName('repo')
+ for repo in repo_list:
+ opts = {}
+ for o in REPO_OPTS:
+ opts[o] = repo.getAttribute(o)
+ opts['url'] = repo.getElementsByTagName('url')[0].firstChild.data
+ # A repo can be uniquely identified by an alias + url
+ repos.append(opts)
+ return repos
+ # exit code 6 is ZYPPER_EXIT_NO_REPOS (no repositories defined)
+ elif rc == 6:
+ return []
+ else:
+ module.fail_json(msg='Failed to execute "%s"' % " ".join(cmd), rc=rc, stdout=stdout, stderr=stderr)
+
+
+def _repo_changes(realrepo, repocmp):
+ "Check whether the 2 given repos have different settings."
+ for k in repocmp:
+ if repocmp[k] and k not in realrepo:
+ return True
+
+ for k, v in realrepo.items():
+ if k in repocmp and repocmp[k]:
+ valold = str(repocmp[k] or "")
+ valnew = v or ""
+ if k == "url":
+ valold, valnew = valold.rstrip("/"), valnew.rstrip("/")
+ if valold != valnew:
+ return True
+ return False
+
+
+def repo_exists(module, repodata, overwrite_multiple):
+ """Check whether the repository already exists.
+
+ returns (exists, mod, old_repos)
+ exists: whether a matching (name, URL) repo exists
+ mod: whether there are changes compared to the existing repo
+ old_repos: list of matching repos
+ """
+ existing_repos = _parse_repos(module)
+
+ # look for repos that have matching alias or url to the one searched
+ repos = []
+ for kw in ['alias', 'url']:
+ name = repodata[kw]
+ for oldr in existing_repos:
+ if repodata[kw] == oldr[kw] and oldr not in repos:
+ repos.append(oldr)
+
+ if len(repos) == 0:
+ # Repo does not exist yet
+ return (False, False, None)
+ elif len(repos) == 1:
+ # Found an existing repo, look for changes
+ has_changes = _repo_changes(repos[0], repodata)
+ return (True, has_changes, repos)
+ elif len(repos) >= 2:
+ if overwrite_multiple:
+ # Found two repos and want to overwrite_multiple
+ return (True, True, repos)
+ else:
+ errmsg = 'More than one repo matched "%s": "%s".' % (name, repos)
+ errmsg += ' Use overwrite_multiple to allow more than one repo to be overwritten'
+ module.fail_json(msg=errmsg)
+
+
+def addmodify_repo(module, repodata, old_repos, zypper_version, warnings):
+ "Adds the repo, removes old repos before, that would conflict."
+ repo = repodata['url']
+ cmd = _get_cmd('addrepo', '--check')
+ if repodata['name']:
+ cmd.extend(['--name', repodata['name']])
+
+ # priority on addrepo available since 1.12.25
+ # https://github.com/openSUSE/zypper/blob/b9b3cb6db76c47dc4c47e26f6a4d2d4a0d12b06d/package/zypper.changes#L327-L336
+ if repodata['priority']:
+ if zypper_version >= LooseVersion('1.12.25'):
+ cmd.extend(['--priority', str(repodata['priority'])])
+ else:
+ warnings.append("Setting priority only available for zypper >= 1.12.25. Ignoring priority argument.")
+
+ if repodata['enabled'] == '0':
+ cmd.append('--disable')
+
+ # gpgcheck available since 1.6.2
+ # https://github.com/openSUSE/zypper/blob/b9b3cb6db76c47dc4c47e26f6a4d2d4a0d12b06d/package/zypper.changes#L2446-L2449
+ # the default changed in the past, so don't assume a default here and show warning for old zypper versions
+ if zypper_version >= LooseVersion('1.6.2'):
+ if repodata['gpgcheck'] == '1':
+ cmd.append('--gpgcheck')
+ else:
+ cmd.append('--no-gpgcheck')
+ else:
+ warnings.append("Enabling/disabling gpgcheck only available for zypper >= 1.6.2. Using zypper default value.")
+
+ if repodata['autorefresh'] == '1':
+ cmd.append('--refresh')
+
+ cmd.append(repo)
+
+ if not repo.endswith('.repo'):
+ cmd.append(repodata['alias'])
+
+ if old_repos is not None:
+ for oldrepo in old_repos:
+ remove_repo(module, oldrepo['url'])
+
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ return rc, stdout, stderr
+
+
+def remove_repo(module, repo):
+ "Removes the repo."
+ cmd = _get_cmd('removerepo', repo)
+
+ rc, stdout, stderr = module.run_command(cmd, check_rc=True)
+ return rc, stdout, stderr
+
+
+def get_zypper_version(module):
+ rc, stdout, stderr = module.run_command(['/usr/bin/zypper', '--version'])
+ if rc != 0 or not stdout.startswith('zypper '):
+ return LooseVersion('1.0')
+ return LooseVersion(stdout.split()[1])
+
+
+def runrefreshrepo(module, auto_import_keys=False, shortname=None):
+ "Forces zypper to refresh repo metadata."
+ if auto_import_keys:
+ cmd = _get_cmd('--gpg-auto-import-keys', 'refresh', '--force')
+ else:
+ cmd = _get_cmd('refresh', '--force')
+ if shortname is not None:
+ cmd.extend(['-r', shortname])
+
+ rc, stdout, stderr = module.run_command(cmd, check_rc=True)
+ return rc, stdout, stderr
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=False),
+ repo=dict(required=False),
+ state=dict(choices=['present', 'absent'], default='present'),
+ runrefresh=dict(required=False, default=False, type='bool'),
+ description=dict(required=False),
+ disable_gpg_check=dict(required=False, default=False, type='bool'),
+ autorefresh=dict(required=False, default=True, type='bool', aliases=['refresh']),
+ priority=dict(required=False, type='int'),
+ enabled=dict(required=False, default=True, type='bool'),
+ overwrite_multiple=dict(required=False, default=False, type='bool'),
+ auto_import_keys=dict(required=False, default=False, type='bool'),
+ ),
+ supports_check_mode=False,
+ required_one_of=[['state', 'runrefresh']],
+ )
+
+ repo = module.params['repo']
+ alias = module.params['name']
+ state = module.params['state']
+ overwrite_multiple = module.params['overwrite_multiple']
+ auto_import_keys = module.params['auto_import_keys']
+ runrefresh = module.params['runrefresh']
+
+ zypper_version = get_zypper_version(module)
+ warnings = [] # collect warning messages for final output
+
+ repodata = {
+ 'url': repo,
+ 'alias': alias,
+ 'name': module.params['description'],
+ 'priority': module.params['priority'],
+ }
+ # rewrite bools in the language that zypper lr -x provides for easier comparison
+ if module.params['enabled']:
+ repodata['enabled'] = '1'
+ else:
+ repodata['enabled'] = '0'
+ if module.params['disable_gpg_check']:
+ repodata['gpgcheck'] = '0'
+ else:
+ repodata['gpgcheck'] = '1'
+ if module.params['autorefresh']:
+ repodata['autorefresh'] = '1'
+ else:
+ repodata['autorefresh'] = '0'
+
+ def exit_unchanged():
+ module.exit_json(changed=False, repodata=repodata, state=state)
+
+ # Check run-time module parameters
+ if repo == '*' or alias == '*':
+ if runrefresh:
+ runrefreshrepo(module, auto_import_keys)
+ module.exit_json(changed=False, runrefresh=True)
+ else:
+ module.fail_json(msg='repo=* can only be used with the runrefresh option.')
+
+ if state == 'present' and not repo:
+ module.fail_json(msg='Module option state=present requires repo')
+ if state == 'absent' and not repo and not alias:
+ module.fail_json(msg='Alias or repo parameter required when state=absent')
+
+ if repo and repo.endswith('.repo'):
+ if alias:
+ module.fail_json(msg='Incompatible option: \'name\'. Do not use name when adding .repo files')
+ else:
+ if not alias and state == "present":
+ module.fail_json(msg='Name required when adding non-repo files.')
+
+ exists, mod, old_repos = repo_exists(module, repodata, overwrite_multiple)
+
+ if repo:
+ shortname = repo
+ else:
+ shortname = alias
+
+ if state == 'present':
+ if exists and not mod:
+ if runrefresh:
+ runrefreshrepo(module, auto_import_keys, shortname)
+ exit_unchanged()
+ rc, stdout, stderr = addmodify_repo(module, repodata, old_repos, zypper_version, warnings)
+ if rc == 0 and (runrefresh or auto_import_keys):
+ runrefreshrepo(module, auto_import_keys, shortname)
+ elif state == 'absent':
+ if not exists:
+ exit_unchanged()
+ rc, stdout, stderr = remove_repo(module, shortname)
+
+ if rc == 0:
+ module.exit_json(changed=True, repodata=repodata, state=state, warnings=warnings)
+ else:
+ module.fail_json(msg="Zypper failed with rc %s" % rc, rc=rc, stdout=stdout, stderr=stderr, repodata=repodata, state=state, warnings=warnings)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_device.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_device.py
new file mode 100644
index 00000000..c76530f5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_device.py
@@ -0,0 +1,651 @@
+#!/usr/bin/python
+# (c) 2016, Tomas Karasek <tom.to.the.k@gmail.com>
+# (c) 2016, Matt Baldwin <baldwin@stackpointcloud.com>
+# (c) 2016, Thibaud Morel l'Horset <teebes@gmail.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: packet_device
+
+short_description: Manage a bare metal server in the Packet Host.
+
+description:
+ - Manage a bare metal server in the Packet Host (a "device" in the API terms).
+ - When the machine is created it can optionally wait for public IP address, or for active state.
+ - This module has a dependency on packet >= 1.0.
+ - API is documented at U(https://www.packet.net/developers/api/devices).
+
+
+author:
+ - Tomas Karasek (@t0mk) <tom.to.the.k@gmail.com>
+ - Matt Baldwin (@baldwinSPC) <baldwin@stackpointcloud.com>
+ - Thibaud Morel l'Horset (@teebes) <teebes@gmail.com>
+
+options:
+ auth_token:
+ description:
+ - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN).
+
+ count:
+ description:
+ - The number of devices to create. Count number can be included in hostname via the %d string formatter.
+ default: 1
+
+ count_offset:
+ description:
+ - From which number to start the count.
+ default: 1
+
+ device_ids:
+ description:
+ - List of device IDs on which to operate.
+
+ tags:
+ description:
+ - List of device tags.
+ - Currently implemented only for device creation.
+ type: list
+ elements: str
+ version_added: '0.2.0'
+
+ facility:
+ description:
+ - Facility slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/facilities/).
+
+ features:
+ description:
+ - Dict with "features" for device creation. See Packet API docs for details.
+
+ hostnames:
+ description:
+ - A hostname of a device, or a list of hostnames.
+ - If given string or one-item list, you can use the C("%d") Python string format to expand numbers from I(count).
+ - If only one hostname, it might be expanded to list if I(count)>1.
+ aliases: [name]
+
+ locked:
+ description:
+ - Whether to lock a created device.
+ default: false
+ aliases: [lock]
+ type: bool
+
+ operating_system:
+ description:
+ - OS slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/operatingsystems/).
+
+ plan:
+ description:
+ - Plan slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/plans/).
+
+ project_id:
+ description:
+ - ID of project of the device.
+ required: true
+
+ state:
+ description:
+ - Desired state of the device.
+ - If set to C(present) (the default), the module call will return immediately after the device-creating HTTP request successfully returns.
+ - If set to C(active), the module call will block until all the specified devices are in state active due to the Packet API, or until I(wait_timeout).
+ choices: [present, absent, active, inactive, rebooted]
+ default: present
+
+ user_data:
+ description:
+ - Userdata blob made available to the machine
+
+ wait_for_public_IPv:
+ description:
+ - Whether to wait for the instance to be assigned a public IPv4/IPv6 address.
+ - If set to 4, it will wait until IPv4 is assigned to the instance.
+ - If set to 6, wait until public IPv6 is assigned to the instance.
+ choices: [4,6]
+
+ wait_timeout:
+ description:
+ - How long (seconds) to wait either for automatic IP address assignment, or for the device to reach the C(active) I(state).
+ - If I(wait_for_public_IPv) is set and I(state) is C(active), the module will wait for both events consequently, applying the timeout twice.
+ default: 900
+ ipxe_script_url:
+ description:
+ - URL of custom iPXE script for provisioning.
+ - More about custom iPXE for Packet devices at U(https://help.packet.net/technical/infrastructure/custom-ipxe).
+ always_pxe:
+ description:
+ - Persist PXE as the first boot option.
+ - Normally, the PXE process happens only on the first boot. Set this arg to have your device continuously boot to iPXE.
+ default: false
+ type: bool
+
+
+requirements:
+ - "packet-python >= 1.35"
+
+notes:
+ - Doesn't support check mode.
+
+'''
+
+EXAMPLES = '''
+# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN.
+# You can also pass it to the auth_token parameter of the module instead.
+
+# Creating devices
+
+- name: Create 1 device
+ hosts: localhost
+ tasks:
+ - community.general.packet_device:
+ project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
+ hostnames: myserver
+ tags: ci-xyz
+ operating_system: ubuntu_16_04
+ plan: baremetal_0
+ facility: sjc1
+
+# Create the same device and wait until it is in state "active", (when it's
+# ready for other API operations). Fail if the device is not "active" in
+# 10 minutes.
+
+- name: Create device and wait up to 10 minutes for active state
+ hosts: localhost
+ tasks:
+ - community.general.packet_device:
+ project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
+ hostnames: myserver
+ operating_system: ubuntu_16_04
+ plan: baremetal_0
+ facility: sjc1
+ state: active
+ wait_timeout: 600
+
+- name: Create 3 ubuntu devices called server-01, server-02 and server-03
+ hosts: localhost
+ tasks:
+ - community.general.packet_device:
+ project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
+ hostnames: server-%02d
+ count: 3
+ operating_system: ubuntu_16_04
+ plan: baremetal_0
+ facility: sjc1
+
+- name: Create 3 coreos devices with userdata, wait until they get IPs and then wait for SSH
+ hosts: localhost
+ tasks:
+ - name: Create 3 devices and register their facts
+ community.general.packet_device:
+ hostnames: [coreos-one, coreos-two, coreos-three]
+ operating_system: coreos_stable
+ plan: baremetal_0
+ facility: ewr1
+ locked: true
+ project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
+ wait_for_public_IPv: 4
+ user_data: |
+ #cloud-config
+ ssh_authorized_keys:
+ - {{ lookup('file', 'my_packet_sshkey') }}
+ coreos:
+ etcd:
+ discovery: https://discovery.etcd.io/6a28e078895c5ec737174db2419bb2f3
+ addr: $private_ipv4:4001
+ peer-addr: $private_ipv4:7001
+ fleet:
+ public-ip: $private_ipv4
+ units:
+ - name: etcd.service
+ command: start
+ - name: fleet.service
+ command: start
+ register: newhosts
+
+ - name: Wait for ssh
+ ansible.builtin.wait_for:
+ delay: 1
+ host: "{{ item.public_ipv4 }}"
+ port: 22
+ state: started
+ timeout: 500
+ with_items: "{{ newhosts.devices }}"
+
+
+# Other states of devices
+
+- name: Remove 3 devices by uuid
+ hosts: localhost
+ tasks:
+ - community.general.packet_device:
+ project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
+ state: absent
+ device_ids:
+ - 1fb4faf8-a638-4ac7-8f47-86fe514c30d8
+ - 2eb4faf8-a638-4ac7-8f47-86fe514c3043
+ - 6bb4faf8-a638-4ac7-8f47-86fe514c301f
+'''
+
+RETURN = '''
+changed:
+ description: True if a device was altered in any way (created, modified or removed)
+ type: bool
+ sample: True
+ returned: success
+
+devices:
+ description: Information about each device that was processed
+ type: list
+ sample: '[{"hostname": "my-server.com", "id": "2a5122b9-c323-4d5c-b53c-9ad3f54273e7",
+ "public_ipv4": "147.229.15.12", "private-ipv4": "10.0.15.12",
+ "tags": [], "locked": false, "state": "provisioning",
+ "public_ipv6": ""2604:1380:2:5200::3"}]'
+ returned: success
+''' # NOQA
+
+
+import os
+import re
+import time
+import uuid
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+HAS_PACKET_SDK = True
+try:
+ import packet
+except ImportError:
+ HAS_PACKET_SDK = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+NAME_RE = r'({0}|{0}{1}*{0})'.format(r'[a-zA-Z0-9]', r'[a-zA-Z0-9\-]')
+HOSTNAME_RE = r'({0}\.)*{0}$'.format(NAME_RE)
+MAX_DEVICES = 100
+
+PACKET_DEVICE_STATES = (
+ 'queued',
+ 'provisioning',
+ 'failed',
+ 'powering_on',
+ 'active',
+ 'powering_off',
+ 'inactive',
+ 'rebooting',
+)
+
+PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN"
+
+
+ALLOWED_STATES = ['absent', 'active', 'inactive', 'rebooted', 'present']
+
+
+def serialize_device(device):
+ """
+ Standard representation for a device as returned by various tasks::
+
+ {
+ 'id': 'device_id'
+ 'hostname': 'device_hostname',
+ 'tags': [],
+ 'locked': false,
+ 'state': 'provisioning',
+ 'ip_addresses': [
+ {
+ "address": "147.75.194.227",
+ "address_family": 4,
+ "public": true
+ },
+ {
+ "address": "2604:1380:2:5200::3",
+ "address_family": 6,
+ "public": true
+ },
+ {
+ "address": "10.100.11.129",
+ "address_family": 4,
+ "public": false
+ }
+ ],
+ "private_ipv4": "10.100.11.129",
+ "public_ipv4": "147.75.194.227",
+ "public_ipv6": "2604:1380:2:5200::3",
+ }
+
+ """
+ device_data = {}
+ device_data['id'] = device.id
+ device_data['hostname'] = device.hostname
+ device_data['tags'] = device.tags
+ device_data['locked'] = device.locked
+ device_data['state'] = device.state
+ device_data['ip_addresses'] = [
+ {
+ 'address': addr_data['address'],
+ 'address_family': addr_data['address_family'],
+ 'public': addr_data['public'],
+ }
+ for addr_data in device.ip_addresses
+ ]
+ # Also include each IPs as a key for easier lookup in roles.
+ # Key names:
+ # - public_ipv4
+ # - public_ipv6
+ # - private_ipv4
+ # - private_ipv6 (if there is one)
+ for ipdata in device_data['ip_addresses']:
+ if ipdata['public']:
+ if ipdata['address_family'] == 6:
+ device_data['public_ipv6'] = ipdata['address']
+ elif ipdata['address_family'] == 4:
+ device_data['public_ipv4'] = ipdata['address']
+ elif not ipdata['public']:
+ if ipdata['address_family'] == 6:
+ # Packet doesn't give public ipv6 yet, but maybe one
+ # day they will
+ device_data['private_ipv6'] = ipdata['address']
+ elif ipdata['address_family'] == 4:
+ device_data['private_ipv4'] = ipdata['address']
+ return device_data
+
+
+def is_valid_hostname(hostname):
+ return re.match(HOSTNAME_RE, hostname) is not None
+
+
+def is_valid_uuid(myuuid):
+ try:
+ val = uuid.UUID(myuuid, version=4)
+ except ValueError:
+ return False
+ return str(val) == myuuid
+
+
+def listify_string_name_or_id(s):
+ if ',' in s:
+ return s.split(',')
+ else:
+ return [s]
+
+
+def get_hostname_list(module):
+ # hostname is a list-typed param, so I guess it should return list
+ # (and it does, in Ansible 2.2.1) but in order to be defensive,
+ # I keep here the code to convert an eventual string to list
+ hostnames = module.params.get('hostnames')
+ count = module.params.get('count')
+ count_offset = module.params.get('count_offset')
+ if isinstance(hostnames, str):
+ hostnames = listify_string_name_or_id(hostnames)
+ if not isinstance(hostnames, list):
+ raise Exception("name %s is not convertible to list" % hostnames)
+
+ # at this point, hostnames is a list
+ hostnames = [h.strip() for h in hostnames]
+
+ if (len(hostnames) > 1) and (count > 1):
+ _msg = ("If you set count>1, you should only specify one hostname "
+ "with the %d formatter, not a list of hostnames.")
+ raise Exception(_msg)
+
+ if (len(hostnames) == 1) and (count > 0):
+ hostname_spec = hostnames[0]
+ count_range = range(count_offset, count_offset + count)
+ if re.search(r"%\d{0,2}d", hostname_spec):
+ hostnames = [hostname_spec % i for i in count_range]
+ elif count > 1:
+ hostname_spec = '%s%%02d' % hostname_spec
+ hostnames = [hostname_spec % i for i in count_range]
+
+ for hn in hostnames:
+ if not is_valid_hostname(hn):
+ raise Exception("Hostname '%s' does not seem to be valid" % hn)
+
+ if len(hostnames) > MAX_DEVICES:
+ raise Exception("You specified too many hostnames, max is %d" %
+ MAX_DEVICES)
+ return hostnames
+
+
+def get_device_id_list(module):
+ device_ids = module.params.get('device_ids')
+
+ if isinstance(device_ids, str):
+ device_ids = listify_string_name_or_id(device_ids)
+
+ device_ids = [di.strip() for di in device_ids]
+
+ for di in device_ids:
+ if not is_valid_uuid(di):
+ raise Exception("Device ID '%s' does not seem to be valid" % di)
+
+ if len(device_ids) > MAX_DEVICES:
+ raise Exception("You specified too many devices, max is %d" %
+ MAX_DEVICES)
+ return device_ids
+
+
+def create_single_device(module, packet_conn, hostname):
+
+ for param in ('hostnames', 'operating_system', 'plan'):
+ if not module.params.get(param):
+ raise Exception("%s parameter is required for new device."
+ % param)
+ project_id = module.params.get('project_id')
+ plan = module.params.get('plan')
+ tags = module.params.get('tags')
+ user_data = module.params.get('user_data')
+ facility = module.params.get('facility')
+ operating_system = module.params.get('operating_system')
+ locked = module.params.get('locked')
+ ipxe_script_url = module.params.get('ipxe_script_url')
+ always_pxe = module.params.get('always_pxe')
+ if operating_system != 'custom_ipxe':
+ for param in ('ipxe_script_url', 'always_pxe'):
+ if module.params.get(param):
+ raise Exception('%s parameter is not valid for non custom_ipxe operating_system.' % param)
+
+ device = packet_conn.create_device(
+ project_id=project_id,
+ hostname=hostname,
+ tags=tags,
+ plan=plan,
+ facility=facility,
+ operating_system=operating_system,
+ userdata=user_data,
+ locked=locked,
+ ipxe_script_url=ipxe_script_url,
+ always_pxe=always_pxe)
+ return device
+
+
+def refresh_device_list(module, packet_conn, devices):
+ device_ids = [d.id for d in devices]
+ new_device_list = get_existing_devices(module, packet_conn)
+ return [d for d in new_device_list if d.id in device_ids]
+
+
+def wait_for_devices_active(module, packet_conn, watched_devices):
+ wait_timeout = module.params.get('wait_timeout')
+ wait_timeout = time.time() + wait_timeout
+ refreshed = watched_devices
+ while wait_timeout > time.time():
+ refreshed = refresh_device_list(module, packet_conn, watched_devices)
+ if all(d.state == 'active' for d in refreshed):
+ return refreshed
+ time.sleep(5)
+ raise Exception("Waiting for state \"active\" timed out for devices: %s"
+ % [d.hostname for d in refreshed if d.state != "active"])
+
+
+def wait_for_public_IPv(module, packet_conn, created_devices):
+
+ def has_public_ip(addr_list, ip_v):
+ return any([a['public'] and a['address_family'] == ip_v and
+ a['address'] for a in addr_list])
+
+ def all_have_public_ip(ds, ip_v):
+ return all([has_public_ip(d.ip_addresses, ip_v) for d in ds])
+
+ address_family = module.params.get('wait_for_public_IPv')
+
+ wait_timeout = module.params.get('wait_timeout')
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ refreshed = refresh_device_list(module, packet_conn, created_devices)
+ if all_have_public_ip(refreshed, address_family):
+ return refreshed
+ time.sleep(5)
+
+ raise Exception("Waiting for IPv%d address timed out. Hostnames: %s"
+ % (address_family, [d.hostname for d in created_devices]))
+
+
+def get_existing_devices(module, packet_conn):
+ project_id = module.params.get('project_id')
+ return packet_conn.list_devices(
+ project_id, params={
+ 'per_page': MAX_DEVICES})
+
+
+def get_specified_device_identifiers(module):
+ if module.params.get('device_ids'):
+ device_id_list = get_device_id_list(module)
+ return {'ids': device_id_list, 'hostnames': []}
+ elif module.params.get('hostnames'):
+ hostname_list = get_hostname_list(module)
+ return {'hostnames': hostname_list, 'ids': []}
+
+
+def act_on_devices(module, packet_conn, target_state):
+ specified_identifiers = get_specified_device_identifiers(module)
+ existing_devices = get_existing_devices(module, packet_conn)
+ changed = False
+ create_hostnames = []
+ if target_state in ['present', 'active', 'rebooted']:
+ # states where we might create non-existing specified devices
+ existing_devices_names = [ed.hostname for ed in existing_devices]
+ create_hostnames = [hn for hn in specified_identifiers['hostnames']
+ if hn not in existing_devices_names]
+
+ process_devices = [d for d in existing_devices
+ if (d.id in specified_identifiers['ids']) or
+ (d.hostname in specified_identifiers['hostnames'])]
+
+ if target_state != 'present':
+ _absent_state_map = {}
+ for s in PACKET_DEVICE_STATES:
+ _absent_state_map[s] = packet.Device.delete
+
+ state_map = {
+ 'absent': _absent_state_map,
+ 'active': {'inactive': packet.Device.power_on,
+ 'provisioning': None, 'rebooting': None
+ },
+ 'inactive': {'active': packet.Device.power_off},
+ 'rebooted': {'active': packet.Device.reboot,
+ 'inactive': packet.Device.power_on,
+ 'provisioning': None, 'rebooting': None
+ },
+ }
+
+ # First do non-creation actions, it might be faster
+ for d in process_devices:
+ if d.state == target_state:
+ continue
+ if d.state in state_map[target_state]:
+ api_operation = state_map[target_state].get(d.state)
+ if api_operation is not None:
+ api_operation(d)
+ changed = True
+ else:
+ _msg = (
+ "I don't know how to process existing device %s from state %s "
+ "to state %s" %
+ (d.hostname, d.state, target_state))
+ raise Exception(_msg)
+
+ # At last create missing devices
+ created_devices = []
+ if create_hostnames:
+ created_devices = [create_single_device(module, packet_conn, n)
+ for n in create_hostnames]
+ if module.params.get('wait_for_public_IPv'):
+ created_devices = wait_for_public_IPv(
+ module, packet_conn, created_devices)
+ changed = True
+
+ processed_devices = created_devices + process_devices
+ if target_state == 'active':
+ processed_devices = wait_for_devices_active(
+ module, packet_conn, processed_devices)
+
+ return {
+ 'changed': changed,
+ 'devices': [serialize_device(d) for d in processed_devices]
+ }
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ auth_token=dict(default=os.environ.get(PACKET_API_TOKEN_ENV_VAR),
+ no_log=True),
+ count=dict(type='int', default=1),
+ count_offset=dict(type='int', default=1),
+ device_ids=dict(type='list'),
+ facility=dict(),
+ features=dict(type='dict'),
+ hostnames=dict(type='list', aliases=['name']),
+ tags=dict(type='list', elements='str'),
+ locked=dict(type='bool', default=False, aliases=['lock']),
+ operating_system=dict(),
+ plan=dict(),
+ project_id=dict(required=True),
+ state=dict(choices=ALLOWED_STATES, default='present'),
+ user_data=dict(default=None),
+ wait_for_public_IPv=dict(type='int', choices=[4, 6]),
+ wait_timeout=dict(type='int', default=900),
+ ipxe_script_url=dict(default=''),
+ always_pxe=dict(type='bool', default=False),
+ ),
+ required_one_of=[('device_ids', 'hostnames',)],
+ mutually_exclusive=[
+ ('hostnames', 'device_ids'),
+ ('count', 'device_ids'),
+ ('count_offset', 'device_ids'),
+ ]
+ )
+
+ if not HAS_PACKET_SDK:
+ module.fail_json(msg='packet required for this module')
+
+ if not module.params.get('auth_token'):
+ _fail_msg = ("if Packet API token is not in environment variable %s, "
+ "the auth_token parameter is required" %
+ PACKET_API_TOKEN_ENV_VAR)
+ module.fail_json(msg=_fail_msg)
+
+ auth_token = module.params.get('auth_token')
+
+ packet_conn = packet.Manager(auth_token=auth_token)
+
+ state = module.params.get('state')
+
+ try:
+ module.exit_json(**act_on_devices(module, packet_conn, state))
+ except Exception as e:
+ module.fail_json(msg='failed to set device state %s, error: %s' %
+ (state, to_native(e)), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_ip_subnet.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_ip_subnet.py
new file mode 100644
index 00000000..fbc12698
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_ip_subnet.py
@@ -0,0 +1,326 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Nurfet Becirevic <nurfet.becirevic@gmail.com>
+# Copyright: (c) 2017, Tomas Karasek <tom.to.the.k@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: packet_ip_subnet
+
+short_description: Assign IP subnet to a bare metal server.
+
+description:
+ - Assign or unassign IPv4 or IPv6 subnets to or from a device in the Packet host.
+ - IPv4 subnets must come from already reserved block.
+ - IPv6 subnets must come from publicly routable /56 block from your project.
+ - See U(https://support.packet.com/kb/articles/elastic-ips) for more info on IP block reservation.
+
+version_added: '0.2.0'
+
+author:
+ - Tomas Karasek (@t0mk) <tom.to.the.k@gmail.com>
+ - Nurfet Becirevic (@nurfet-becirevic) <nurfet.becirevic@gmail.com>
+
+options:
+ auth_token:
+ description:
+ - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN).
+ type: str
+
+ hostname:
+ description:
+ - A hostname of a device to/from which to assign/remove a subnet.
+ required: False
+ type: str
+
+ device_id:
+ description:
+ - UUID of a device to/from which to assign/remove a subnet.
+ required: False
+ type: str
+
+ project_id:
+ description:
+ - UUID of a project of the device to/from which to assign/remove a subnet.
+ type: str
+
+ device_count:
+ description:
+ - The number of devices to retrieve from the project. The max allowed value is 1000.
+ - See U(https://www.packet.com/developers/api/#retrieve-all-devices-of-a-project) for more info.
+ default: 100
+ type: int
+
+ cidr:
+ description:
+ - IPv4 or IPv6 subnet which you want to manage. It must come from a reserved block for your project in the Packet Host.
+ aliases: [name]
+ type: str
+ required: true
+
+ state:
+ description:
+ - Desired state of the IP subnet on the specified device.
+ - With state == C(present), you must specify either hostname or device_id. Subnet with given CIDR will then be assigned to the specified device.
+ - With state == C(absent), you can specify either hostname or device_id. The subnet will be removed from specified devices.
+ - If you leave both hostname and device_id empty, the subnet will be removed from any device it's assigned to.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+requirements:
+ - "packet-python >= 1.35"
+ - "python >= 2.6"
+'''
+
+EXAMPLES = '''
+# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN.
+# You can also pass it to the auth_token parameter of the module instead.
+
+- name: Create 1 device and assign an arbitrary public IPv4 subnet to it
+ hosts: localhost
+ tasks:
+
+ - packet_device:
+ project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
+ hostnames: myserver
+ operating_system: ubuntu_16_04
+ plan: baremetal_0
+ facility: sjc1
+ state: active
+
+# Pick an IPv4 address from a block allocated to your project.
+
+ - community.general.packet_ip_subnet:
+ project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
+ hostname: myserver
+ cidr: "147.75.201.78/32"
+
+# Release IP address 147.75.201.78
+
+- name: Unassign IP address from any device in your project
+ hosts: localhost
+ tasks:
+ - community.general.packet_ip_subnet:
+ project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
+ cidr: "147.75.201.78/32"
+ state: absent
+'''
+
+RETURN = '''
+changed:
+ description: True if an IP address assignments were altered in any way (created or removed).
+ type: bool
+ sample: True
+ returned: success
+
+device_id:
+ type: str
+ description: UUID of the device associated with the specified IP address.
+ returned: success
+
+subnet:
+ description: Dict with data about the handled IP subnet.
+ type: dict
+ sample:
+ address: 147.75.90.241
+ address_family: 4
+ assigned_to: { href : /devices/61f9aa5e-0530-47f5-97c2-113828e61ed0 }
+ cidr: 31
+ created_at: '2017-08-07T15:15:30Z'
+ enabled: True
+ gateway: 147.75.90.240
+ href: /ips/31eda960-0a16-4c0f-b196-f3dc4928529f
+ id: 1eda960-0a16-4c0f-b196-f3dc4928529f
+ manageable: True
+ management: True
+ netmask: 255.255.255.254
+ network: 147.75.90.240
+ public: True
+ returned: success
+'''
+
+
+import uuid
+import re
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback
+from ansible.module_utils._text import to_native
+
+HAS_PACKET_SDK = True
+
+try:
+ import packet
+except ImportError:
+ HAS_PACKET_SDK = False
+
+
+NAME_RE = r'({0}|{0}{1}*{0})'.format(r'[a-zA-Z0-9]', r'[a-zA-Z0-9\-]')
+HOSTNAME_RE = r'({0}\.)*{0}$'.format(NAME_RE)
+PROJECT_MAX_DEVICES = 100
+
+
+PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN"
+
+
+ALLOWED_STATES = ['absent', 'present']
+
+
+def is_valid_hostname(hostname):
+ return re.match(HOSTNAME_RE, hostname) is not None
+
+
+def is_valid_uuid(myuuid):
+ try:
+ val = uuid.UUID(myuuid, version=4)
+ except ValueError:
+ return False
+ return str(val) == myuuid
+
+
+def get_existing_devices(module, packet_conn):
+ project_id = module.params.get('project_id')
+ if not is_valid_uuid(project_id):
+ raise Exception("Project ID {0} does not seem to be valid".format(project_id))
+
+ per_page = module.params.get('device_count')
+ return packet_conn.list_devices(
+ project_id, params={'per_page': per_page})
+
+
+def get_specified_device_identifiers(module):
+ if module.params.get('device_id'):
+ _d_id = module.params.get('device_id')
+ if not is_valid_uuid(_d_id):
+ raise Exception("Device ID '{0}' does not seem to be valid".format(_d_id))
+ return {'device_id': _d_id, 'hostname': None}
+ elif module.params.get('hostname'):
+ _hn = module.params.get('hostname')
+ if not is_valid_hostname(_hn):
+ raise Exception("Hostname '{0}' does not seem to be valid".format(_hn))
+ return {'hostname': _hn, 'device_id': None}
+ else:
+ return {'hostname': None, 'device_id': None}
+
+
+def parse_subnet_cidr(cidr):
+ if "/" not in cidr:
+ raise Exception("CIDR expression in wrong format, must be address/prefix_len")
+ addr, prefixlen = cidr.split("/")
+ try:
+ prefixlen = int(prefixlen)
+ except ValueError:
+ raise("Wrong prefix length in CIDR expression {0}".format(cidr))
+ return addr, prefixlen
+
+
+def act_on_assignment(target_state, module, packet_conn):
+ return_dict = {'changed': False}
+ specified_cidr = module.params.get("cidr")
+ address, prefixlen = parse_subnet_cidr(specified_cidr)
+
+ specified_identifier = get_specified_device_identifiers(module)
+
+ if module.check_mode:
+ return return_dict
+
+ if (specified_identifier['hostname'] is None) and (
+ specified_identifier['device_id'] is None):
+ if target_state == 'absent':
+ # The special case to release the IP from any assignment
+ for d in get_existing_devices(module, packet_conn):
+ for ia in d.ip_addresses:
+ if address == ia['address'] and prefixlen == ia['cidr']:
+ packet_conn.call_api(ia['href'], "DELETE")
+ return_dict['changed'] = True
+ return_dict['subnet'] = ia
+ return_dict['device_id'] = d.id
+ return return_dict
+ raise Exception("If you assign an address, you must specify either "
+ "target device ID or target unique hostname.")
+
+ if specified_identifier['device_id'] is not None:
+ device = packet_conn.get_device(specified_identifier['device_id'])
+ else:
+ all_devices = get_existing_devices(module, packet_conn)
+ hn = specified_identifier['hostname']
+ matching_devices = [d for d in all_devices if d.hostname == hn]
+ if len(matching_devices) > 1:
+ raise Exception("There are more than one devices matching given hostname {0}".format(hn))
+ if len(matching_devices) == 0:
+ raise Exception("There is no device matching given hostname {0}".format(hn))
+ device = matching_devices[0]
+
+ return_dict['device_id'] = device.id
+ assignment_dicts = [i for i in device.ip_addresses
+ if i['address'] == address and i['cidr'] == prefixlen]
+ if len(assignment_dicts) > 1:
+ raise Exception("IP address {0} is assigned more than once for device {1}".format(
+ specified_cidr, device.hostname))
+
+ if target_state == "absent":
+ if len(assignment_dicts) == 1:
+ packet_conn.call_api(assignment_dicts[0]['href'], "DELETE")
+ return_dict['subnet'] = assignment_dicts[0]
+ return_dict['changed'] = True
+ elif target_state == "present":
+ if len(assignment_dicts) == 0:
+ new_assignment = packet_conn.call_api(
+ "devices/{0}/ips".format(device.id), "POST", {"address": "{0}".format(specified_cidr)})
+ return_dict['changed'] = True
+ return_dict['subnet'] = new_assignment
+ return return_dict
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ auth_token=dict(
+ type='str',
+ fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]),
+ no_log=True
+ ),
+ device_id=dict(type='str'),
+ hostname=dict(type='str'),
+ project_id=dict(type='str'),
+ device_count=dict(type='int', default=PROJECT_MAX_DEVICES),
+ cidr=dict(type='str', required=True, aliases=['name']),
+ state=dict(choices=ALLOWED_STATES, default='present'),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[('hostname', 'device_id')],
+ required_one_of=[['hostname', 'device_id', 'project_id']],
+ required_by=dict(
+ hostname=('project_id',),
+ ),
+ )
+
+ if not HAS_PACKET_SDK:
+ module.fail_json(msg='packet required for this module')
+
+ if not module.params.get('auth_token'):
+ _fail_msg = ("if Packet API token is not in environment variable {0}, "
+ "the auth_token parameter is required".format(PACKET_API_TOKEN_ENV_VAR))
+ module.fail_json(msg=_fail_msg)
+
+ auth_token = module.params.get('auth_token')
+
+ packet_conn = packet.Manager(auth_token=auth_token)
+
+ state = module.params.get('state')
+
+ try:
+ module.exit_json(**act_on_assignment(state, module, packet_conn))
+ except Exception as e:
+ module.fail_json(
+ msg="failed to set IP subnet to state {0}, error: {1}".format(state, to_native(e)))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_project.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_project.py
new file mode 100644
index 00000000..38d7ca76
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_project.py
@@ -0,0 +1,244 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Nurfet Becirevic <nurfet.becirevic@gmail.com>
+# Copyright: (c) 2019, Tomas Karasek <tom.to.the.k@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: packet_project
+
+short_description: Create/delete a project in Packet host.
+
+description:
+ - Create/delete a project in Packet host.
+ - API is documented at U(https://www.packet.com/developers/api/#projects).
+
+version_added: '0.2.0'
+
+author:
+ - Tomas Karasek (@t0mk) <tom.to.the.k@gmail.com>
+ - Nurfet Becirevic (@nurfet-becirevic) <nurfet.becirevic@gmail.com>
+
+options:
+ state:
+ description:
+ - Indicate desired state of the target.
+ default: present
+ choices: ['present', 'absent']
+ type: str
+
+ payment_method:
+ description:
+ - Payment method is name of one of the payment methods available to your user.
+ - When blank, the API assumes the default payment method.
+ type: str
+
+ auth_token:
+ description:
+ - Packet api token. You can also supply it in env var C(PACKET_API_TOKEN).
+ type: str
+
+ name:
+ description:
+ - Name for/of the project.
+ type: str
+
+ org_id:
+ description:
+ - UUID of the organization to create a project for.
+ - When blank, the API assumes the default organization.
+ type: str
+
+ id:
+ description:
+ - UUID of the project which you want to remove.
+ type: str
+
+ custom_data:
+ description:
+ - Custom data about the project to create.
+ type: str
+
+requirements:
+ - "python >= 2.6"
+ - "packet-python >= 1.40"
+
+'''
+
+EXAMPLES = '''
+# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN.
+# You can also pass the api token in module param auth_token.
+
+- name: Create new project
+ hosts: localhost
+ tasks:
+ community.general.packet_project:
+ name: "new project"
+
+- name: Create new project within non-default organization
+ hosts: localhost
+ tasks:
+ community.general.packet_project:
+ name: "my org project"
+ org_id: a4cc87f9-e00f-48c2-9460-74aa60beb6b0
+
+- name: Remove project by id
+ hosts: localhost
+ tasks:
+ community.general.packet_project:
+ state: absent
+ id: eef49903-7a09-4ca1-af67-4087c29ab5b6
+
+- name: Create new project with non-default billing method
+ hosts: localhost
+ tasks:
+ community.general.packet_project:
+ name: "newer project"
+ payment_method: "the other visa"
+'''
+
+RETURN = '''
+changed:
+ description: True if a project was created or removed.
+ type: bool
+ sample: True
+ returned: success
+
+name:
+ description: Name of addressed project.
+ type: str
+ returned: success
+
+id:
+ description: UUID of addressed project.
+ type: str
+ returned: success
+'''
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback
+from ansible.module_utils._text import to_native
+
+HAS_PACKET_SDK = True
+
+try:
+ import packet
+except ImportError:
+ HAS_PACKET_SDK = False
+
+PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN"
+
+
+def act_on_project(target_state, module, packet_conn):
+ result_dict = {'changed': False}
+ given_id = module.params.get('id')
+ given_name = module.params.get('name')
+ if given_id:
+ matching_projects = [
+ p for p in packet_conn.list_projects() if given_id == p.id]
+ else:
+ matching_projects = [
+ p for p in packet_conn.list_projects() if given_name == p.name]
+
+ if target_state == 'present':
+ if len(matching_projects) == 0:
+ org_id = module.params.get('org_id')
+ custom_data = module.params.get('custom_data')
+ payment_method = module.params.get('payment_method')
+
+ if not org_id:
+ params = {
+ "name": given_name,
+ "payment_method_id": payment_method,
+ "customdata": custom_data
+ }
+ new_project_data = packet_conn.call_api("projects", "POST", params)
+ new_project = packet.Project(new_project_data, packet_conn)
+ else:
+ new_project = packet_conn.create_organization_project(
+ org_id=org_id,
+ name=given_name,
+ payment_method_id=payment_method,
+ customdata=custom_data
+ )
+
+ result_dict['changed'] = True
+ matching_projects.append(new_project)
+
+ result_dict['name'] = matching_projects[0].name
+ result_dict['id'] = matching_projects[0].id
+ else:
+ if len(matching_projects) > 1:
+ _msg = ("More than projects matched for module call with state = absent: "
+ "{0}".format(to_native(matching_projects)))
+ module.fail_json(msg=_msg)
+
+ if len(matching_projects) == 1:
+ p = matching_projects[0]
+ result_dict['name'] = p.name
+ result_dict['id'] = p.id
+ result_dict['changed'] = True
+ try:
+ p.delete()
+ except Exception as e:
+ _msg = ("while trying to remove project {0}, id {1}, got error: {2}".format(
+ p.name, p.id, to_native(e)))
+ module.fail_json(msg=_msg)
+ return result_dict
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(choices=['present', 'absent'], default='present'),
+ auth_token=dict(
+ type='str',
+ fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]),
+ no_log=True
+ ),
+ name=dict(type='str'),
+ id=dict(type='str'),
+ org_id=dict(type='str'),
+ payment_method=dict(type='str'),
+ custom_data=dict(type='str'),
+ ),
+ supports_check_mode=True,
+ required_one_of=[("name", "id",)],
+ mutually_exclusive=[
+ ('name', 'id'),
+ ]
+ )
+ if not HAS_PACKET_SDK:
+ module.fail_json(msg='packet required for this module')
+
+ if not module.params.get('auth_token'):
+ _fail_msg = ("if Packet API token is not in environment variable {0}, "
+ "the auth_token parameter is required".format(PACKET_API_TOKEN_ENV_VAR))
+ module.fail_json(msg=_fail_msg)
+
+ auth_token = module.params.get('auth_token')
+
+ packet_conn = packet.Manager(auth_token=auth_token)
+
+ state = module.params.get('state')
+
+ if state in ['present', 'absent']:
+ if module.check_mode:
+ module.exit_json(changed=False)
+
+ try:
+ module.exit_json(**act_on_project(state, module, packet_conn))
+ except Exception as e:
+ module.fail_json(
+ msg="failed to set project state {0}: {1}".format(state, to_native(e)))
+ else:
+ module.fail_json(msg="{0} is not a valid state for this module".format(state))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_sshkey.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_sshkey.py
new file mode 100644
index 00000000..73233d89
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_sshkey.py
@@ -0,0 +1,261 @@
+#!/usr/bin/python
+# Copyright 2016 Tomas Karasek <tom.to.the.k@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: packet_sshkey
+short_description: Create/delete an SSH key in Packet host.
+description:
+ - Create/delete an SSH key in Packet host.
+ - API is documented at U(https://www.packet.net/help/api/#page:ssh-keys,header:ssh-keys-ssh-keys-post).
+author: "Tomas Karasek (@t0mk) <tom.to.the.k@gmail.com>"
+options:
+ state:
+ description:
+ - Indicate desired state of the target.
+ default: present
+ choices: ['present', 'absent']
+ auth_token:
+ description:
+ - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN).
+ label:
+ description:
+ - Label for the key. If you keep it empty, it will be read from key string.
+ id:
+ description:
+ - UUID of the key which you want to remove.
+ fingerprint:
+ description:
+ - Fingerprint of the key which you want to remove.
+ key:
+ description:
+ - Public Key string ({type} {base64 encoded key} {description}).
+ key_file:
+ description:
+ - File with the public key.
+
+requirements:
+ - "python >= 2.6"
+ - packet-python
+
+'''
+
+EXAMPLES = '''
+# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN.
+# You can also pass the api token in module param auth_token.
+
+- name: Create sshkey from string
+ hosts: localhost
+ tasks:
+ community.general.packet_sshkey:
+ key: "{{ lookup('file', 'my_packet_sshkey.pub') }}"
+
+- name: Create sshkey from file
+ hosts: localhost
+ tasks:
+ community.general.packet_sshkey:
+ label: key from file
+ key_file: ~/ff.pub
+
+- name: Remove sshkey by id
+ hosts: localhost
+ tasks:
+ community.general.packet_sshkey:
+ state: absent
+ id: eef49903-7a09-4ca1-af67-4087c29ab5b6
+'''
+
+RETURN = '''
+changed:
+ description: True if a sshkey was created or removed.
+ type: bool
+ sample: True
+ returned: always
+sshkeys:
+ description: Information about sshkeys that were created/removed.
+ type: list
+ sample: [
+ {
+ "fingerprint": "5c:93:74:7c:ed:07:17:62:28:75:79:23:d6:08:93:46",
+ "id": "41d61bd8-3342-428b-a09c-e67bdd18a9b7",
+ "key": "ssh-dss AAAAB3NzaC1kc3MAAACBAIfNT5S0ncP4BBJBYNhNPxFF9lqVhfPeu6SM1LoCocxqDc1AT3zFRi8hjIf6TLZ2AA4FYbcAWxLMhiBxZRVldT9GdBXile78kAK5z3bKTwq152DCqpxwwbaTIggLFhsU8wrfBsPWnDuAxZ0h7mmrCjoLIE3CNLDA/NmV3iB8xMThAAAAFQCStcesSgR1adPORzBxTr7hug92LwAAAIBOProm3Gk+HWedLyE8IfofLaOeRnbBRHAOL4z0SexKkVOnQ/LGN/uDIIPGGBDYTvXgKZT+jbHeulRJ2jKgfSpGKN4JxFQ8uzVH492jEiiUJtT72Ss1dCV4PmyERVIw+f54itihV3z/t25dWgowhb0int8iC/OY3cGodlmYb3wdcQAAAIBuLbB45djZXzUkOTzzcRDIRfhaxo5WipbtEM2B1fuBt2gyrvksPpH/LK6xTjdIIb0CxPu4OCxwJG0aOz5kJoRnOWIXQGhH7VowrJhsqhIc8gN9ErbO5ea8b1L76MNcAotmBDeTUiPw01IJ8MdDxfmcsCslJKgoRKSmQpCwXQtN2g== tomk@hp2",
+ "label": "mynewkey33"
+ }
+ ]
+ returned: always
+''' # NOQA
+
+import os
+import uuid
+
+from ansible.module_utils.basic import AnsibleModule
+
+HAS_PACKET_SDK = True
+try:
+ import packet
+except ImportError:
+ HAS_PACKET_SDK = False
+
+
+PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN"
+
+
+def serialize_sshkey(sshkey):
+ sshkey_data = {}
+ copy_keys = ['id', 'key', 'label', 'fingerprint']
+ for name in copy_keys:
+ sshkey_data[name] = getattr(sshkey, name)
+ return sshkey_data
+
+
+def is_valid_uuid(myuuid):
+ try:
+ val = uuid.UUID(myuuid, version=4)
+ except ValueError:
+ return False
+ return str(val) == myuuid
+
+
+def load_key_string(key_str):
+ ret_dict = {}
+ key_str = key_str.strip()
+ ret_dict['key'] = key_str
+ cut_key = key_str.split()
+ if len(cut_key) in [2, 3]:
+ if len(cut_key) == 3:
+ ret_dict['label'] = cut_key[2]
+ else:
+ raise Exception("Public key %s is in wrong format" % key_str)
+ return ret_dict
+
+
+def get_sshkey_selector(module):
+ key_id = module.params.get('id')
+ if key_id:
+ if not is_valid_uuid(key_id):
+ raise Exception("sshkey ID %s is not valid UUID" % key_id)
+ selecting_fields = ['label', 'fingerprint', 'id', 'key']
+ select_dict = {}
+ for f in selecting_fields:
+ if module.params.get(f) is not None:
+ select_dict[f] = module.params.get(f)
+
+ if module.params.get('key_file'):
+ with open(module.params.get('key_file')) as _file:
+ loaded_key = load_key_string(_file.read())
+ select_dict['key'] = loaded_key['key']
+ if module.params.get('label') is None:
+ if loaded_key.get('label'):
+ select_dict['label'] = loaded_key['label']
+
+ def selector(k):
+ if 'key' in select_dict:
+ # if key string is specified, compare only the key strings
+ return k.key == select_dict['key']
+ else:
+ # if key string not specified, all the fields must match
+ return all([select_dict[f] == getattr(k, f) for f in select_dict])
+ return selector
+
+
+def act_on_sshkeys(target_state, module, packet_conn):
+ selector = get_sshkey_selector(module)
+ existing_sshkeys = packet_conn.list_ssh_keys()
+ matching_sshkeys = filter(selector, existing_sshkeys)
+ changed = False
+ if target_state == 'present':
+ if matching_sshkeys == []:
+ # there is no key matching the fields from module call
+ # => create the key, label and
+ newkey = {}
+ if module.params.get('key_file'):
+ with open(module.params.get('key_file')) as f:
+ newkey = load_key_string(f.read())
+ if module.params.get('key'):
+ newkey = load_key_string(module.params.get('key'))
+ if module.params.get('label'):
+ newkey['label'] = module.params.get('label')
+ for param in ('label', 'key'):
+ if param not in newkey:
+ _msg = ("If you want to ensure a key is present, you must "
+ "supply both a label and a key string, either in "
+ "module params, or in a key file. %s is missing"
+ % param)
+ raise Exception(_msg)
+ matching_sshkeys = []
+ new_key_response = packet_conn.create_ssh_key(
+ newkey['label'], newkey['key'])
+ changed = True
+
+ matching_sshkeys.append(new_key_response)
+ else:
+ # state is 'absent' => delete matching keys
+ for k in matching_sshkeys:
+ try:
+ k.delete()
+ changed = True
+ except Exception as e:
+ _msg = ("while trying to remove sshkey %s, id %s %s, "
+ "got error: %s" %
+ (k.label, k.id, target_state, e))
+ raise Exception(_msg)
+
+ return {
+ 'changed': changed,
+ 'sshkeys': [serialize_sshkey(k) for k in matching_sshkeys]
+ }
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(choices=['present', 'absent'], default='present'),
+ auth_token=dict(default=os.environ.get(PACKET_API_TOKEN_ENV_VAR),
+ no_log=True),
+ label=dict(type='str', aliases=['name'], default=None),
+ id=dict(type='str', default=None),
+ fingerprint=dict(type='str', default=None),
+ key=dict(type='str', default=None, no_log=True),
+ key_file=dict(type='path', default=None),
+ ),
+ mutually_exclusive=[
+ ('label', 'id'),
+ ('label', 'fingerprint'),
+ ('id', 'fingerprint'),
+ ('key', 'fingerprint'),
+ ('key', 'id'),
+ ('key_file', 'key'),
+ ]
+ )
+
+ if not HAS_PACKET_SDK:
+ module.fail_json(msg='packet required for this module')
+
+ if not module.params.get('auth_token'):
+ _fail_msg = ("if Packet API token is not in environment variable %s, "
+ "the auth_token parameter is required" %
+ PACKET_API_TOKEN_ENV_VAR)
+ module.fail_json(msg=_fail_msg)
+
+ auth_token = module.params.get('auth_token')
+
+ packet_conn = packet.Manager(auth_token=auth_token)
+
+ state = module.params.get('state')
+
+ if state in ['present', 'absent']:
+ try:
+ module.exit_json(**act_on_sshkeys(state, module, packet_conn))
+ except Exception as e:
+ module.fail_json(msg='failed to set sshkey state: %s' % str(e))
+ else:
+ module.fail_json(msg='%s is not a valid state for this module' % state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_volume.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_volume.py
new file mode 100644
index 00000000..2966139a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_volume.py
@@ -0,0 +1,321 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Nurfet Becirevic <nurfet.becirevic@gmail.com>
+# Copyright: (c) 2017, Tomas Karasek <tom.to.the.k@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: packet_volume
+
+short_description: Create/delete a volume in Packet host.
+
+description:
+ - Create/delete a volume in Packet host.
+ - API is documented at U(https://www.packet.com/developers/api/#volumes).
+
+version_added: '0.2.0'
+
+author:
+ - Tomas Karasek (@t0mk) <tom.to.the.k@gmail.com>
+ - Nurfet Becirevic (@nurfet-becirevic) <nurfet.becirevic@gmail.com>
+
+options:
+ state:
+ description:
+ - Desired state of the volume.
+ default: present
+ choices: ['present', 'absent']
+ type: str
+
+ project_id:
+ description:
+ - ID of project of the device.
+ required: true
+ type: str
+
+ auth_token:
+ description:
+ - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN).
+ type: str
+
+ name:
+ description:
+ - Selector for API-generated name of the volume
+ type: str
+
+ description:
+ description:
+ - User-defined description attribute for Packet volume.
+ - "It is used used as idempotent identifier - if volume with given
+ description exists, new one is not created."
+ type: str
+
+ id:
+ description:
+ - UUID of a volume.
+ type: str
+
+ plan:
+ description:
+ - storage_1 for standard tier, storage_2 for premium (performance) tier.
+ - Tiers are described at U(https://www.packet.com/cloud/storage/).
+ choices: ['storage_1', 'storage_2']
+ default: 'storage_1'
+ type: str
+
+ facility:
+ description:
+ - Location of the volume.
+ - Volumes can only be attached to device in the same location.
+ type: str
+
+ size:
+ description:
+ - Size of the volume in gigabytes.
+ type: int
+
+ locked:
+ description:
+ - Create new volume locked.
+ type: bool
+ default: False
+
+ billing_cycle:
+ description:
+ - Billing cycle for new volume.
+ choices: ['hourly', 'monthly']
+ default: 'hourly'
+ type: str
+
+ snapshot_policy:
+ description:
+ - Snapshot policy for new volume.
+ type: dict
+
+ suboptions:
+ snapshot_count:
+ description:
+ - How many snapshots to keep, a positive integer.
+ required: True
+ type: int
+
+ snapshot_frequency:
+ description:
+ - Frequency of snapshots.
+ required: True
+ choices: ["15min", "1hour", "1day", "1week", "1month", "1year"]
+ type: str
+
+requirements:
+ - "python >= 2.6"
+ - "packet-python >= 1.35"
+
+'''
+
+EXAMPLES = '''
+# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN.
+# You can also pass the api token in module param auth_token.
+
+- hosts: localhost
+ vars:
+ volname: testvol123
+ project_id: 53000fb2-ee46-4673-93a8-de2c2bdba33b
+
+ tasks:
+ - name: Create volume
+ community.general.packet_volume:
+ description: "{{ volname }}"
+ project_id: "{{ project_id }}"
+ facility: 'ewr1'
+ plan: 'storage_1'
+ state: present
+ size: 10
+ snapshot_policy:
+ snapshot_count: 10
+ snapshot_frequency: 1day
+ register: result_create
+
+ - name: Delete volume
+ community.general.packet_volume:
+ id: "{{ result_create.id }}"
+ project_id: "{{ project_id }}"
+ state: absent
+'''
+
+RETURN = '''
+id:
+ description: UUID of specified volume
+ type: str
+ returned: success
+ sample: 53000fb2-ee46-4673-93a8-de2c2bdba33c
+name:
+ description: The API-generated name of the volume resource.
+ type: str
+ returned: if volume is attached/detached to/from some device
+ sample: "volume-a91dc506"
+description:
+ description: The user-defined description of the volume resource.
+ type: str
+ returned: success
+ sample: "Just another volume"
+'''
+
+import uuid
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback
+from ansible.module_utils._text import to_native
+
+HAS_PACKET_SDK = True
+
+
+try:
+ import packet
+except ImportError:
+ HAS_PACKET_SDK = False
+
+
+PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN"
+
+VOLUME_PLANS = ["storage_1", "storage_2"]
+VOLUME_STATES = ["present", "absent"]
+BILLING = ["hourly", "monthly"]
+
+
+def is_valid_uuid(myuuid):
+ try:
+ val = uuid.UUID(myuuid, version=4)
+ except ValueError:
+ return False
+ return str(val) == myuuid
+
+
+def get_volume_selector(module):
+ if module.params.get('id'):
+ i = module.params.get('id')
+ if not is_valid_uuid(i):
+ raise Exception("Volume ID '{0}' is not a valid UUID".format(i))
+ return lambda v: v['id'] == i
+ elif module.params.get('name'):
+ n = module.params.get('name')
+ return lambda v: v['name'] == n
+ elif module.params.get('description'):
+ d = module.params.get('description')
+ return lambda v: v['description'] == d
+
+
+def get_or_fail(params, key):
+ item = params.get(key)
+ if item is None:
+ raise Exception("{0} must be specified for new volume".format(key))
+ return item
+
+
+def act_on_volume(target_state, module, packet_conn):
+ return_dict = {'changed': False}
+ s = get_volume_selector(module)
+ project_id = module.params.get("project_id")
+ api_method = "projects/{0}/storage".format(project_id)
+ all_volumes = packet_conn.call_api(api_method, "GET")['volumes']
+ matching_volumes = [v for v in all_volumes if s(v)]
+
+ if target_state == "present":
+ if len(matching_volumes) == 0:
+ params = {
+ "description": get_or_fail(module.params, "description"),
+ "size": get_or_fail(module.params, "size"),
+ "plan": get_or_fail(module.params, "plan"),
+ "facility": get_or_fail(module.params, "facility"),
+ "locked": get_or_fail(module.params, "locked"),
+ "billing_cycle": get_or_fail(module.params, "billing_cycle"),
+ "snapshot_policies": module.params.get("snapshot_policy"),
+ }
+
+ new_volume_data = packet_conn.call_api(api_method, "POST", params)
+ return_dict['changed'] = True
+ for k in ['id', 'name', 'description']:
+ return_dict[k] = new_volume_data[k]
+
+ else:
+ for k in ['id', 'name', 'description']:
+ return_dict[k] = matching_volumes[0][k]
+
+ else:
+ if len(matching_volumes) > 1:
+ _msg = ("More than one volume matches in module call for absent state: {0}".format(
+ to_native(matching_volumes)))
+ module.fail_json(msg=_msg)
+
+ if len(matching_volumes) == 1:
+ volume = matching_volumes[0]
+ packet_conn.call_api("storage/{0}".format(volume['id']), "DELETE")
+ return_dict['changed'] = True
+ for k in ['id', 'name', 'description']:
+ return_dict[k] = volume[k]
+
+ return return_dict
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ id=dict(type='str', default=None),
+ description=dict(type="str", default=None),
+ name=dict(type='str', default=None),
+ state=dict(choices=VOLUME_STATES, default="present"),
+ auth_token=dict(
+ type='str',
+ fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]),
+ no_log=True
+ ),
+ project_id=dict(required=True),
+ plan=dict(choices=VOLUME_PLANS, default="storage_1"),
+ facility=dict(type="str"),
+ size=dict(type="int"),
+ locked=dict(type="bool", default=False),
+ snapshot_policy=dict(type='dict', default=None),
+ billing_cycle=dict(type='str', choices=BILLING, default="hourly"),
+ ),
+ supports_check_mode=True,
+ required_one_of=[("name", "id", "description")],
+ mutually_exclusive=[
+ ('name', 'id'),
+ ('id', 'description'),
+ ('name', 'description'),
+ ]
+ )
+
+ if not HAS_PACKET_SDK:
+ module.fail_json(msg='packet required for this module')
+
+ if not module.params.get('auth_token'):
+ _fail_msg = ("if Packet API token is not in environment variable {0}, "
+ "the auth_token parameter is required".format(PACKET_API_TOKEN_ENV_VAR))
+ module.fail_json(msg=_fail_msg)
+
+ auth_token = module.params.get('auth_token')
+
+ packet_conn = packet.Manager(auth_token=auth_token)
+
+ state = module.params.get('state')
+
+ if state in VOLUME_STATES:
+ if module.check_mode:
+ module.exit_json(changed=False)
+
+ try:
+ module.exit_json(**act_on_volume(state, module, packet_conn))
+ except Exception as e:
+ module.fail_json(
+ msg="failed to set volume state {0}: {1}".format(
+ state, to_native(e)))
+ else:
+ module.fail_json(msg="{0} is not a valid state for this module".format(state))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_volume_attachment.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_volume_attachment.py
new file mode 100644
index 00000000..a1a38bb4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/packet_volume_attachment.py
@@ -0,0 +1,299 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Nurfet Becirevic <nurfet.becirevic@gmail.com>
+# Copyright: (c) 2017, Tomas Karasek <tom.to.the.k@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: packet_volume_attachment
+
+short_description: Attach/detach a volume to a device in the Packet host.
+
+description:
+ - Attach/detach a volume to a device in the Packet host.
+ - API is documented at U(https://www.packet.com/developers/api/volumes/).
+ - "This module creates the attachment route in the Packet API. In order to discover
+ the block devices on the server, you have to run the Attach Scripts,
+ as documented at U(https://help.packet.net/technical/storage/packet-block-storage-linux)."
+
+version_added: '0.2.0'
+
+author:
+ - Tomas Karasek (@t0mk) <tom.to.the.k@gmail.com>
+ - Nurfet Becirevic (@nurfet-becirevic) <nurfet.becirevic@gmail.com>
+
+options:
+ state:
+ description:
+ - Indicate desired state of the attachment.
+ default: present
+ choices: ['present', 'absent']
+ type: str
+
+ auth_token:
+ description:
+ - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN).
+ type: str
+
+ project_id:
+ description:
+ - UUID of the project to which the device and volume belong.
+ type: str
+ required: true
+
+ volume:
+ description:
+ - Selector for the volume.
+ - It can be a UUID, an API-generated volume name, or user-defined description string.
+ - 'Example values: 4a347482-b546-4f67-8300-fb5018ef0c5, volume-4a347482, "my volume"'
+ type: str
+ required: true
+
+ device:
+ description:
+ - Selector for the device.
+ - It can be a UUID of the device, or a hostname.
+ - 'Example values: 98a14f7a-3d27-4478-b7cf-35b5670523f3, "my device"'
+ type: str
+
+requirements:
+ - "python >= 2.6"
+ - "packet-python >= 1.35"
+
+'''
+
+EXAMPLES = '''
+# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN.
+# You can also pass the api token in module param auth_token.
+
+- hosts: localhost
+
+ vars:
+ volname: testvol
+ devname: testdev
+ project_id: 52000fb2-ee46-4673-93a8-de2c2bdba33b
+
+ tasks:
+ - name: Create volume
+ packet_volume:
+ description: "{{ volname }}"
+ project_id: "{{ project_id }}"
+ facility: ewr1
+ plan: storage_1
+ state: present
+ size: 10
+ snapshot_policy:
+ snapshot_count: 10
+ snapshot_frequency: 1day
+
+ - name: Create a device
+ packet_device:
+ project_id: "{{ project_id }}"
+ hostnames: "{{ devname }}"
+ operating_system: ubuntu_16_04
+ plan: baremetal_0
+ facility: ewr1
+ state: present
+
+ - name: Attach testvol to testdev
+ community.general.packet_volume_attachment:
+ project_id: "{{ project_id }}"
+ volume: "{{ volname }}"
+ device: "{{ devname }}"
+
+ - name: Detach testvol from testdev
+ community.general.packet_volume_attachment:
+ project_id: "{{ project_id }}"
+ volume: "{{ volname }}"
+ device: "{{ devname }}"
+ state: absent
+'''
+
+RETURN = '''
+volume_id:
+ description: UUID of volume addressed by the module call.
+ type: str
+ returned: success
+
+device_id:
+ description: UUID of device addressed by the module call.
+ type: str
+ returned: success
+'''
+
+import uuid
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback
+from ansible.module_utils._text import to_native
+
+HAS_PACKET_SDK = True
+
+
+try:
+ import packet
+except ImportError:
+ HAS_PACKET_SDK = False
+
+
+PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN"
+
+STATES = ["present", "absent"]
+
+
+def is_valid_uuid(myuuid):
+ try:
+ val = uuid.UUID(myuuid, version=4)
+ except ValueError:
+ return False
+ return str(val) == myuuid
+
+
+def get_volume_selector(spec):
+ if is_valid_uuid(spec):
+ return lambda v: v['id'] == spec
+ else:
+ return lambda v: v['name'] == spec or v['description'] == spec
+
+
+def get_device_selector(spec):
+ if is_valid_uuid(spec):
+ return lambda v: v['id'] == spec
+ else:
+ return lambda v: v['hostname'] == spec
+
+
+def do_attach(packet_conn, vol_id, dev_id):
+ api_method = "storage/{0}/attachments".format(vol_id)
+ packet_conn.call_api(
+ api_method,
+ params={"device_id": dev_id},
+ type="POST")
+
+
+def do_detach(packet_conn, vol, dev_id=None):
+ def dev_match(a):
+ return (dev_id is None) or (a['device']['id'] == dev_id)
+ for a in vol['attachments']:
+ if dev_match(a):
+ print(a['href'])
+ packet_conn.call_api(a['href'], type="DELETE")
+
+
+def validate_selected(l, resource_type, spec):
+ if len(l) > 1:
+ _msg = ("more than one {0} matches specification {1}: {2}".format(
+ resource_type, spec, l))
+ raise Exception(_msg)
+ if len(l) == 0:
+ _msg = "no {0} matches specification: {1}".format(resource_type, spec)
+ raise Exception(_msg)
+
+
+def get_attached_dev_ids(volume_dict):
+ if len(volume_dict['attachments']) == 0:
+ return []
+ else:
+ return [a['device']['id'] for a in volume_dict['attachments']]
+
+
+def act_on_volume_attachment(target_state, module, packet_conn):
+ return_dict = {'changed': False}
+ volspec = module.params.get("volume")
+ devspec = module.params.get("device")
+ if devspec is None and target_state == 'present':
+ raise Exception("If you want to attach a volume, you must specify a device.")
+ project_id = module.params.get("project_id")
+ volumes_api_method = "projects/{0}/storage".format(project_id)
+ volumes = packet_conn.call_api(volumes_api_method,
+ params={'include': 'facility,attachments.device'})['volumes']
+ v_match = get_volume_selector(volspec)
+ matching_volumes = [v for v in volumes if v_match(v)]
+ validate_selected(matching_volumes, "volume", volspec)
+ volume = matching_volumes[0]
+ return_dict['volume_id'] = volume['id']
+
+ device = None
+ if devspec is not None:
+ devices_api_method = "projects/{0}/devices".format(project_id)
+ devices = packet_conn.call_api(devices_api_method)['devices']
+ d_match = get_device_selector(devspec)
+ matching_devices = [d for d in devices if d_match(d)]
+ validate_selected(matching_devices, "device", devspec)
+ device = matching_devices[0]
+ return_dict['device_id'] = device['id']
+
+ attached_device_ids = get_attached_dev_ids(volume)
+
+ if target_state == "present":
+ if len(attached_device_ids) == 0:
+ do_attach(packet_conn, volume['id'], device['id'])
+ return_dict['changed'] = True
+ elif device['id'] not in attached_device_ids:
+ # Don't reattach volume which is attached to a different device.
+ # Rather fail than force remove a device on state == 'present'.
+ raise Exception("volume {0} is already attached to device {1}".format(
+ volume, attached_device_ids))
+ else:
+ if device is None:
+ if len(attached_device_ids) > 0:
+ do_detach(packet_conn, volume)
+ return_dict['changed'] = True
+ elif device['id'] in attached_device_ids:
+ do_detach(packet_conn, volume, device['id'])
+ return_dict['changed'] = True
+
+ return return_dict
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(choices=STATES, default="present"),
+ auth_token=dict(
+ type='str',
+ fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]),
+ no_log=True
+ ),
+ volume=dict(type="str", required=True),
+ project_id=dict(type="str", required=True),
+ device=dict(type="str"),
+ ),
+ supports_check_mode=True,
+ )
+
+ if not HAS_PACKET_SDK:
+ module.fail_json(msg='packet required for this module')
+
+ if not module.params.get('auth_token'):
+ _fail_msg = ("if Packet API token is not in environment variable {0}, "
+ "the auth_token parameter is required".format(PACKET_API_TOKEN_ENV_VAR))
+ module.fail_json(msg=_fail_msg)
+
+ auth_token = module.params.get('auth_token')
+
+ packet_conn = packet.Manager(auth_token=auth_token)
+
+ state = module.params.get('state')
+
+ if state in STATES:
+ if module.check_mode:
+ module.exit_json(changed=False)
+
+ try:
+ module.exit_json(
+ **act_on_volume_attachment(state, module, packet_conn))
+ except Exception as e:
+ module.fail_json(
+ msg="failed to set volume_attachment state {0}: {1}".format(state, to_native(e)))
+ else:
+ module.fail_json(msg="{0} is not a valid state for this module".format(state))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/pacman.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pacman.py
new file mode 100644
index 00000000..0931ddc7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pacman.py
@@ -0,0 +1,481 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Afterburn <https://github.com/afterburn>
+# Copyright: (c) 2013, Aaron Bull Schaefer <aaron@elasticdog.com>
+# Copyright: (c) 2015, Indrajit Raychaudhuri <irc+code@indrajit.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: pacman
+short_description: Manage packages with I(pacman)
+description:
+ - Manage packages with the I(pacman) package manager, which is used by Arch Linux and its variants.
+author:
+ - Indrajit Raychaudhuri (@indrajitr)
+ - Aaron Bull Schaefer (@elasticdog) <aaron@elasticdog.com>
+ - Maxime de Roucy (@tchernomax)
+options:
+ name:
+ description:
+ - Name or list of names of the package(s) or file(s) to install, upgrade, or remove.
+ Can't be used in combination with C(upgrade).
+ aliases: [ package, pkg ]
+ type: list
+ elements: str
+
+ state:
+ description:
+ - Desired state of the package.
+ default: present
+ choices: [ absent, latest, present, installed, removed ]
+ type: str
+
+ force:
+ description:
+ - When removing package, force remove package, without any checks.
+ Same as `extra_args="--nodeps --nodeps"`.
+ When update_cache, force redownload repo databases.
+ Same as `update_cache_extra_args="--refresh --refresh"`.
+ default: no
+ type: bool
+
+ extra_args:
+ description:
+ - Additional option to pass to pacman when enforcing C(state).
+ default:
+ type: str
+
+ update_cache:
+ description:
+ - Whether or not to refresh the master package lists.
+ - This can be run as part of a package installation or as a separate step.
+ default: no
+ type: bool
+ aliases: [ update-cache ]
+
+ update_cache_extra_args:
+ description:
+ - Additional option to pass to pacman when enforcing C(update_cache).
+ default:
+ type: str
+
+ upgrade:
+ description:
+ - Whether or not to upgrade the whole system.
+ Can't be used in combination with C(name).
+ default: no
+ type: bool
+
+ upgrade_extra_args:
+ description:
+ - Additional option to pass to pacman when enforcing C(upgrade).
+ default:
+ type: str
+
+notes:
+ - When used with a `loop:` each package will be processed individually,
+ it is much more efficient to pass the list directly to the `name` option.
+'''
+
+RETURN = '''
+packages:
+ description: a list of packages that have been changed
+ returned: when upgrade is set to yes
+ type: list
+ sample: [ package, other-package ]
+'''
+
+EXAMPLES = '''
+- name: Install package foo from repo
+ community.general.pacman:
+ name: foo
+ state: present
+
+- name: Install package bar from file
+ community.general.pacman:
+ name: ~/bar-1.0-1-any.pkg.tar.xz
+ state: present
+
+- name: Install package foo from repo and bar from file
+ community.general.pacman:
+ name:
+ - foo
+ - ~/bar-1.0-1-any.pkg.tar.xz
+ state: present
+
+- name: Upgrade package foo
+ community.general.pacman:
+ name: foo
+ state: latest
+ update_cache: yes
+
+- name: Remove packages foo and bar
+ community.general.pacman:
+ name:
+ - foo
+ - bar
+ state: absent
+
+- name: Recursively remove package baz
+ community.general.pacman:
+ name: baz
+ state: absent
+ extra_args: --recursive
+
+- name: Run the equivalent of "pacman -Sy" as a separate step
+ community.general.pacman:
+ update_cache: yes
+
+- name: Run the equivalent of "pacman -Su" as a separate step
+ community.general.pacman:
+ upgrade: yes
+
+- name: Run the equivalent of "pacman -Syu" as a separate step
+ community.general.pacman:
+ update_cache: yes
+ upgrade: yes
+
+- name: Run the equivalent of "pacman -Rdd", force remove package baz
+ community.general.pacman:
+ name: baz
+ state: absent
+ force: yes
+'''
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def get_version(pacman_output):
+ """Take pacman -Q or pacman -S output and get the Version"""
+ fields = pacman_output.split()
+ if len(fields) == 2:
+ return fields[1]
+ return None
+
+
+def get_name(module, pacman_output):
+ """Take pacman -Q or pacman -S output and get the package name"""
+ fields = pacman_output.split()
+ if len(fields) == 2:
+ return fields[0]
+ module.fail_json(msg="get_name: fail to retrieve package name from pacman output")
+
+
+def query_package(module, pacman_path, name, state="present"):
+ """Query the package status in both the local system and the repository. Returns a boolean to indicate if the package is installed, a second
+ boolean to indicate if the package is up-to-date and a third boolean to indicate whether online information were available
+ """
+ if state == "present":
+ lcmd = "%s --query %s" % (pacman_path, name)
+ lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False)
+ if lrc != 0:
+ # package is not installed locally
+ return False, False, False
+ else:
+ # a non-zero exit code doesn't always mean the package is installed
+ # for example, if the package name queried is "provided" by another package
+ installed_name = get_name(module, lstdout)
+ if installed_name != name:
+ return False, False, False
+
+ # get the version installed locally (if any)
+ lversion = get_version(lstdout)
+
+ rcmd = "%s --sync --print-format \"%%n %%v\" %s" % (pacman_path, name)
+ rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False)
+ # get the version in the repository
+ rversion = get_version(rstdout)
+
+ if rrc == 0:
+ # Return True to indicate that the package is installed locally, and the result of the version number comparison
+ # to determine if the package is up-to-date.
+ return True, (lversion == rversion), False
+
+ # package is installed but cannot fetch remote Version. Last True stands for the error
+ return True, True, True
+
+
+def update_package_db(module, pacman_path):
+ if module.params['force']:
+ module.params["update_cache_extra_args"] += " --refresh --refresh"
+
+ cmd = "%s --sync --refresh %s" % (pacman_path, module.params["update_cache_extra_args"])
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc == 0:
+ return True
+ else:
+ module.fail_json(msg="could not update package db")
+
+
+def upgrade(module, pacman_path):
+ cmdupgrade = "%s --sync --sysupgrade --quiet --noconfirm %s" % (pacman_path, module.params["upgrade_extra_args"])
+ cmdneedrefresh = "%s --query --upgrades" % (pacman_path)
+ rc, stdout, stderr = module.run_command(cmdneedrefresh, check_rc=False)
+ data = stdout.split('\n')
+ data.remove('')
+ packages = []
+ diff = {
+ 'before': '',
+ 'after': '',
+ }
+
+ if rc == 0:
+ # Match lines of `pacman -Qu` output of the form:
+ # (package name) (before version-release) -> (after version-release)
+ # e.g., "ansible 2.7.1-1 -> 2.7.2-1"
+ regex = re.compile(r'([\w+\-.@]+) (\S+-\S+) -> (\S+-\S+)')
+ for p in data:
+ m = regex.search(p)
+ packages.append(m.group(1))
+ if module._diff:
+ diff['before'] += "%s-%s\n" % (m.group(1), m.group(2))
+ diff['after'] += "%s-%s\n" % (m.group(1), m.group(3))
+ if module.check_mode:
+ module.exit_json(changed=True, msg="%s package(s) would be upgraded" % (len(data)), packages=packages, diff=diff)
+ rc, stdout, stderr = module.run_command(cmdupgrade, check_rc=False)
+ if rc == 0:
+ module.exit_json(changed=True, msg='System upgraded', packages=packages, diff=diff)
+ else:
+ module.fail_json(msg="Could not upgrade")
+ else:
+ module.exit_json(changed=False, msg='Nothing to upgrade', packages=packages)
+
+
+def remove_packages(module, pacman_path, packages):
+ data = []
+ diff = {
+ 'before': '',
+ 'after': '',
+ }
+
+ if module.params["force"]:
+ module.params["extra_args"] += " --nodeps --nodeps"
+
+ remove_c = 0
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ installed, updated, unknown = query_package(module, pacman_path, package)
+ if not installed:
+ continue
+
+ cmd = "%s --remove --noconfirm --noprogressbar %s %s" % (pacman_path, module.params["extra_args"], package)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc != 0:
+ module.fail_json(msg="failed to remove %s" % (package))
+
+ if module._diff:
+ d = stdout.split('\n')[2].split(' ')[2:]
+ for i, pkg in enumerate(d):
+ d[i] = re.sub('-[0-9].*$', '', d[i].split('/')[-1])
+ diff['before'] += "%s\n" % pkg
+ data.append('\n'.join(d))
+
+ remove_c += 1
+
+ if remove_c > 0:
+ module.exit_json(changed=True, msg="removed %s package(s)" % remove_c, diff=diff)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, pacman_path, state, packages, package_files):
+ install_c = 0
+ package_err = []
+ message = ""
+ data = []
+ diff = {
+ 'before': '',
+ 'after': '',
+ }
+
+ to_install_repos = []
+ to_install_files = []
+ for i, package in enumerate(packages):
+ # if the package is installed and state == present or state == latest and is up-to-date then skip
+ installed, updated, latestError = query_package(module, pacman_path, package)
+ if latestError and state == 'latest':
+ package_err.append(package)
+
+ if installed and (state == 'present' or (state == 'latest' and updated)):
+ continue
+
+ if package_files[i]:
+ to_install_files.append(package_files[i])
+ else:
+ to_install_repos.append(package)
+
+ if to_install_repos:
+ cmd = "%s --sync --noconfirm --noprogressbar --needed %s %s" % (pacman_path, module.params["extra_args"], " ".join(to_install_repos))
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc != 0:
+ module.fail_json(msg="failed to install %s: %s" % (" ".join(to_install_repos), stderr))
+
+ # As we pass `--needed` to pacman returns a single line of ` there is nothing to do` if no change is performed.
+ # The check for > 3 is here because we pick the 4th line in normal operation.
+ if len(stdout.split('\n')) > 3:
+ data = stdout.split('\n')[3].split(' ')[2:]
+ data = [i for i in data if i != '']
+ for i, pkg in enumerate(data):
+ data[i] = re.sub('-[0-9].*$', '', data[i].split('/')[-1])
+ if module._diff:
+ diff['after'] += "%s\n" % pkg
+
+ install_c += len(to_install_repos)
+
+ if to_install_files:
+ cmd = "%s --upgrade --noconfirm --noprogressbar --needed %s %s" % (pacman_path, module.params["extra_args"], " ".join(to_install_files))
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc != 0:
+ module.fail_json(msg="failed to install %s: %s" % (" ".join(to_install_files), stderr))
+
+ # As we pass `--needed` to pacman returns a single line of ` there is nothing to do` if no change is performed.
+ # The check for > 3 is here because we pick the 4th line in normal operation.
+ if len(stdout.split('\n')) > 3:
+ data = stdout.split('\n')[3].split(' ')[2:]
+ data = [i for i in data if i != '']
+ for i, pkg in enumerate(data):
+ data[i] = re.sub('-[0-9].*$', '', data[i].split('/')[-1])
+ if module._diff:
+ diff['after'] += "%s\n" % pkg
+
+ install_c += len(to_install_files)
+
+ if state == 'latest' and len(package_err) > 0:
+ message = "But could not ensure 'latest' state for %s package(s) as remote version could not be fetched." % (package_err)
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg="installed %s package(s). %s" % (install_c, message), diff=diff)
+
+ module.exit_json(changed=False, msg="package(s) already installed. %s" % (message), diff=diff)
+
+
+def check_packages(module, pacman_path, packages, state):
+ would_be_changed = []
+ diff = {
+ 'before': '',
+ 'after': '',
+ 'before_header': '',
+ 'after_header': ''
+ }
+
+ for package in packages:
+ installed, updated, unknown = query_package(module, pacman_path, package)
+ if ((state in ["present", "latest"] and not installed) or
+ (state == "absent" and installed) or
+ (state == "latest" and not updated)):
+ would_be_changed.append(package)
+ if would_be_changed:
+ if state == "absent":
+ state = "removed"
+
+ if module._diff and (state == 'removed'):
+ diff['before_header'] = 'removed'
+ diff['before'] = '\n'.join(would_be_changed) + '\n'
+ elif module._diff and ((state == 'present') or (state == 'latest')):
+ diff['after_header'] = 'installed'
+ diff['after'] = '\n'.join(would_be_changed) + '\n'
+
+ module.exit_json(changed=True, msg="%s package(s) would be %s" % (
+ len(would_be_changed), state), diff=diff)
+ else:
+ module.exit_json(changed=False, msg="package(s) already %s" % state, diff=diff)
+
+
+def expand_package_groups(module, pacman_path, pkgs):
+ expanded = []
+
+ __, stdout, __ = module.run_command([pacman_path, "--sync", "--groups", "--quiet"], check_rc=True)
+ available_groups = stdout.splitlines()
+
+ for pkg in pkgs:
+ if pkg: # avoid empty strings
+ if pkg in available_groups:
+ # A group was found matching the package name: expand it
+ cmd = [pacman_path, "--sync", "--groups", "--quiet", pkg]
+ rc, stdout, stderr = module.run_command(cmd, check_rc=True)
+ expanded.extend([name.strip() for name in stdout.splitlines()])
+ else:
+ expanded.append(pkg)
+
+ return expanded
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='list', elements='str', aliases=['pkg', 'package']),
+ state=dict(type='str', default='present', choices=['present', 'installed', 'latest', 'absent', 'removed']),
+ force=dict(type='bool', default=False),
+ extra_args=dict(type='str', default=''),
+ upgrade=dict(type='bool', default=False),
+ upgrade_extra_args=dict(type='str', default=''),
+ update_cache=dict(type='bool', default=False, aliases=['update-cache']),
+ update_cache_extra_args=dict(type='str', default=''),
+ ),
+ required_one_of=[['name', 'update_cache', 'upgrade']],
+ mutually_exclusive=[['name', 'upgrade']],
+ supports_check_mode=True,
+ )
+
+ pacman_path = module.get_bin_path('pacman', True)
+ module.run_command_environ_update = dict(LC_ALL='C')
+
+ p = module.params
+
+ # normalize the state parameter
+ if p['state'] in ['present', 'installed']:
+ p['state'] = 'present'
+ elif p['state'] in ['absent', 'removed']:
+ p['state'] = 'absent'
+
+ if p["update_cache"] and not module.check_mode:
+ update_package_db(module, pacman_path)
+ if not (p['name'] or p['upgrade']):
+ module.exit_json(changed=True, msg='Updated the package master lists')
+
+ if p['update_cache'] and module.check_mode and not (p['name'] or p['upgrade']):
+ module.exit_json(changed=True, msg='Would have updated the package cache')
+
+ if p['upgrade']:
+ upgrade(module, pacman_path)
+
+ if p['name']:
+ pkgs = expand_package_groups(module, pacman_path, p['name'])
+
+ pkg_files = []
+ for i, pkg in enumerate(pkgs):
+ if not pkg: # avoid empty strings
+ continue
+ elif re.match(r".*\.pkg\.tar(\.(gz|bz2|xz|lrz|lzo|Z|zst))?$", pkg):
+ # The package given is a filename, extract the raw pkg name from
+ # it and store the filename
+ pkg_files.append(pkg)
+ pkgs[i] = re.sub(r'-[0-9].*$', '', pkgs[i].split('/')[-1])
+ else:
+ pkg_files.append(None)
+
+ if module.check_mode:
+ check_packages(module, pacman_path, pkgs, p['state'])
+
+ if p['state'] in ['present', 'latest']:
+ install_packages(module, pacman_path, p['state'], pkgs, pkg_files)
+ elif p['state'] == 'absent':
+ remove_packages(module, pacman_path, pkgs)
+ else:
+ module.exit_json(changed=False, msg="No package specified to work on.")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/pagerduty.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pagerduty.py
new file mode 100644
index 00000000..306b596b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pagerduty.py
@@ -0,0 +1,279 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: pagerduty
+short_description: Create PagerDuty maintenance windows
+description:
+ - This module will let you create PagerDuty maintenance windows
+author:
+ - "Andrew Newdigate (@suprememoocow)"
+ - "Dylan Silva (@thaumos)"
+ - "Justin Johns (!UNKNOWN)"
+ - "Bruce Pennypacker (@bpennypacker)"
+requirements:
+ - PagerDuty API access
+options:
+ state:
+ type: str
+ description:
+ - Create a maintenance window or get a list of ongoing windows.
+ required: true
+ choices: [ "running", "started", "ongoing", "absent" ]
+ name:
+ type: str
+ description:
+ - PagerDuty unique subdomain. Obsolete. It is not used with PagerDuty REST v2 API.
+ user:
+ type: str
+ description:
+ - PagerDuty user ID. Obsolete. Please, use I(token) for authorization.
+ token:
+ type: str
+ description:
+ - A pagerduty token, generated on the pagerduty site. It is used for authorization.
+ required: true
+ requester_id:
+ type: str
+ description:
+ - ID of user making the request. Only needed when creating a maintenance_window.
+ service:
+ type: list
+ description:
+ - A comma separated list of PagerDuty service IDs.
+ aliases: [ services ]
+ window_id:
+ type: str
+ description:
+ - ID of maintenance window. Only needed when absent a maintenance_window.
+ hours:
+ type: str
+ description:
+ - Length of maintenance window in hours.
+ default: '1'
+ minutes:
+ type: str
+ description:
+ - Maintenance window in minutes (this is added to the hours).
+ default: '0'
+ desc:
+ type: str
+ description:
+ - Short description of maintenance window.
+ default: Created by Ansible
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+'''
+
+EXAMPLES = '''
+- name: List ongoing maintenance windows using a token
+ community.general.pagerduty:
+ name: companyabc
+ token: xxxxxxxxxxxxxx
+ state: ongoing
+
+- name: Create a 1 hour maintenance window for service FOO123
+ community.general.pagerduty:
+ name: companyabc
+ user: example@example.com
+ token: yourtoken
+ state: running
+ service: FOO123
+
+- name: Create a 5 minute maintenance window for service FOO123
+ community.general.pagerduty:
+ name: companyabc
+ token: xxxxxxxxxxxxxx
+ hours: 0
+ minutes: 5
+ state: running
+ service: FOO123
+
+
+- name: Create a 4 hour maintenance window for service FOO123 with the description "deployment"
+ community.general.pagerduty:
+ name: companyabc
+ user: example@example.com
+ state: running
+ service: FOO123
+ hours: 4
+ desc: deployment
+ register: pd_window
+
+- name: Delete the previous maintenance window
+ community.general.pagerduty:
+ name: companyabc
+ user: example@example.com
+ state: absent
+ window_id: '{{ pd_window.result.maintenance_window.id }}'
+
+# Delete a maintenance window from a separate playbook than its creation,
+# and if it is the only existing maintenance window
+- name: Check
+ community.general.pagerduty:
+ requester_id: XXXXXXX
+ token: yourtoken
+ state: ongoing
+ register: pd_window
+
+- name: Delete
+ community.general.pagerduty:
+ requester_id: XXXXXXX
+ token: yourtoken
+ state: absent
+ window_id: "{{ pd_window.result.maintenance_windows[0].id }}"
+'''
+
+import datetime
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+class PagerDutyRequest(object):
+ def __init__(self, module, name, user, token):
+ self.module = module
+ self.name = name
+ self.user = user
+ self.token = token
+ self.headers = {
+ 'Content-Type': 'application/json',
+ "Authorization": self._auth_header(),
+ 'Accept': 'application/vnd.pagerduty+json;version=2'
+ }
+
+ def ongoing(self, http_call=fetch_url):
+ url = "https://api.pagerduty.com/maintenance_windows?filter=ongoing"
+ headers = dict(self.headers)
+
+ response, info = http_call(self.module, url, headers=headers)
+ if info['status'] != 200:
+ self.module.fail_json(msg="failed to lookup the ongoing window: %s" % info['msg'])
+
+ json_out = self._read_response(response)
+
+ return False, json_out, False
+
+ def create(self, requester_id, service, hours, minutes, desc, http_call=fetch_url):
+ if not requester_id:
+ self.module.fail_json(msg="requester_id is required when maintenance window should be created")
+
+ url = 'https://api.pagerduty.com/maintenance_windows'
+
+ headers = dict(self.headers)
+ headers.update({'From': requester_id})
+
+ start, end = self._compute_start_end_time(hours, minutes)
+ services = self._create_services_payload(service)
+
+ request_data = {'maintenance_window': {'start_time': start, 'end_time': end, 'description': desc, 'services': services}}
+
+ data = json.dumps(request_data)
+ response, info = http_call(self.module, url, data=data, headers=headers, method='POST')
+ if info['status'] != 201:
+ self.module.fail_json(msg="failed to create the window: %s" % info['msg'])
+
+ json_out = self._read_response(response)
+
+ return False, json_out, True
+
+ def _create_services_payload(self, service):
+ if (isinstance(service, list)):
+ return [{'id': s, 'type': 'service_reference'} for s in service]
+ else:
+ return [{'id': service, 'type': 'service_reference'}]
+
+ def _compute_start_end_time(self, hours, minutes):
+ now = datetime.datetime.utcnow()
+ later = now + datetime.timedelta(hours=int(hours), minutes=int(minutes))
+ start = now.strftime("%Y-%m-%dT%H:%M:%SZ")
+ end = later.strftime("%Y-%m-%dT%H:%M:%SZ")
+ return start, end
+
+ def absent(self, window_id, http_call=fetch_url):
+ url = "https://api.pagerduty.com/maintenance_windows/" + window_id
+ headers = dict(self.headers)
+
+ response, info = http_call(self.module, url, headers=headers, method='DELETE')
+ if info['status'] != 204:
+ self.module.fail_json(msg="failed to delete the window: %s" % info['msg'])
+
+ json_out = self._read_response(response)
+
+ return False, json_out, True
+
+ def _auth_header(self):
+ return "Token token=%s" % self.token
+
+ def _read_response(self, response):
+ try:
+ return json.loads(response.read())
+ except Exception:
+ return ""
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(required=True, choices=['running', 'started', 'ongoing', 'absent']),
+ name=dict(required=False),
+ user=dict(required=False),
+ token=dict(required=True, no_log=True),
+ service=dict(required=False, type='list', aliases=["services"]),
+ window_id=dict(required=False),
+ requester_id=dict(required=False),
+ hours=dict(default='1', required=False), # @TODO change to int?
+ minutes=dict(default='0', required=False), # @TODO change to int?
+ desc=dict(default='Created by Ansible', required=False),
+ validate_certs=dict(default=True, type='bool'),
+ )
+ )
+
+ state = module.params['state']
+ name = module.params['name']
+ user = module.params['user']
+ service = module.params['service']
+ window_id = module.params['window_id']
+ hours = module.params['hours']
+ minutes = module.params['minutes']
+ token = module.params['token']
+ desc = module.params['desc']
+ requester_id = module.params['requester_id']
+
+ pd = PagerDutyRequest(module, name, user, token)
+
+ if state == "running" or state == "started":
+ if not service:
+ module.fail_json(msg="service not specified")
+ (rc, out, changed) = pd.create(requester_id, service, hours, minutes, desc)
+ if rc == 0:
+ changed = True
+
+ if state == "ongoing":
+ (rc, out, changed) = pd.ongoing()
+
+ if state == "absent":
+ (rc, out, changed) = pd.absent(window_id)
+
+ if rc != 0:
+ module.fail_json(msg="failed", result=out)
+
+ module.exit_json(msg="success", result=out, changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/pagerduty_alert.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pagerduty_alert.py
new file mode 100644
index 00000000..736ada5e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pagerduty_alert.py
@@ -0,0 +1,255 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: pagerduty_alert
+short_description: Trigger, acknowledge or resolve PagerDuty incidents
+description:
+ - This module will let you trigger, acknowledge or resolve a PagerDuty incident by sending events
+author:
+ - "Amanpreet Singh (@ApsOps)"
+requirements:
+ - PagerDuty API access
+options:
+ name:
+ type: str
+ description:
+ - PagerDuty unique subdomain. Obsolete. It is not used with PagerDuty REST v2 API.
+ service_id:
+ type: str
+ description:
+ - ID of PagerDuty service when incidents will be triggered, acknowledged or resolved.
+ required: true
+ service_key:
+ type: str
+ description:
+ - The GUID of one of your "Generic API" services. Obsolete. Please use I(integration_key).
+ integration_key:
+ type: str
+ description:
+ - The GUID of one of your "Generic API" services.
+ - This is the "integration key" listed on a "Integrations" tab of PagerDuty service.
+ state:
+ type: str
+ description:
+ - Type of event to be sent.
+ required: true
+ choices:
+ - 'triggered'
+ - 'acknowledged'
+ - 'resolved'
+ api_key:
+ type: str
+ description:
+ - The pagerduty API key (readonly access), generated on the pagerduty site.
+ required: true
+ desc:
+ type: str
+ description:
+ - For C(triggered) I(state) - Required. Short description of the problem that led to this trigger. This field (or a truncated version)
+ will be used when generating phone calls, SMS messages and alert emails. It will also appear on the incidents tables in the PagerDuty UI.
+ The maximum length is 1024 characters.
+ - For C(acknowledged) or C(resolved) I(state) - Text that will appear in the incident's log associated with this event.
+ required: false
+ default: Created via Ansible
+ incident_key:
+ type: str
+ description:
+ - Identifies the incident to which this I(state) should be applied.
+ - For C(triggered) I(state) - If there's no open (i.e. unresolved) incident with this key, a new one will be created. If there's already an
+ open incident with a matching key, this event will be appended to that incident's log. The event key provides an easy way to "de-dup"
+ problem reports.
+ - For C(acknowledged) or C(resolved) I(state) - This should be the incident_key you received back when the incident was first opened by a
+ trigger event. Acknowledge events referencing resolved or nonexistent incidents will be discarded.
+ required: false
+ client:
+ type: str
+ description:
+ - The name of the monitoring client that is triggering this event.
+ required: false
+ client_url:
+ type: str
+ description:
+ - The URL of the monitoring client that is triggering this event.
+ required: false
+'''
+
+EXAMPLES = '''
+- name: Trigger an incident with just the basic options
+ community.general.pagerduty_alert:
+ name: companyabc
+ integration_key: xxx
+ api_key: yourapikey
+ service_id: PDservice
+ state: triggered
+ desc: problem that led to this trigger
+
+- name: Trigger an incident with more options
+ community.general.pagerduty_alert:
+ integration_key: xxx
+ api_key: yourapikey
+ service_id: PDservice
+ state: triggered
+ desc: problem that led to this trigger
+ incident_key: somekey
+ client: Sample Monitoring Service
+ client_url: http://service.example.com
+
+- name: Acknowledge an incident based on incident_key
+ community.general.pagerduty_alert:
+ integration_key: xxx
+ api_key: yourapikey
+ service_id: PDservice
+ state: acknowledged
+ incident_key: somekey
+ desc: "some text for incident's log"
+
+- name: Resolve an incident based on incident_key
+ community.general.pagerduty_alert:
+ integration_key: xxx
+ api_key: yourapikey
+ service_id: PDservice
+ state: resolved
+ incident_key: somekey
+ desc: "some text for incident's log"
+'''
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.six.moves.urllib.parse import urlparse, urlencode, urlunparse
+
+
+def check(module, name, state, service_id, integration_key, api_key, incident_key=None, http_call=fetch_url):
+ url = 'https://api.pagerduty.com/incidents'
+ headers = {
+ "Content-type": "application/json",
+ "Authorization": "Token token=%s" % api_key,
+ 'Accept': 'application/vnd.pagerduty+json;version=2'
+ }
+
+ params = {
+ 'service_ids[]': service_id,
+ 'sort_by': 'incident_number:desc',
+ 'time_zone': 'UTC'
+ }
+ if incident_key:
+ params['incident_key'] = incident_key
+
+ url_parts = list(urlparse(url))
+ url_parts[4] = urlencode(params, True)
+
+ url = urlunparse(url_parts)
+
+ response, info = http_call(module, url, method='get', headers=headers)
+
+ if info['status'] != 200:
+ module.fail_json(msg="failed to check current incident status."
+ "Reason: %s" % info['msg'])
+
+ incidents = json.loads(response.read())["incidents"]
+ msg = "No corresponding incident"
+
+ if len(incidents) == 0:
+ if state in ('acknowledged', 'resolved'):
+ return msg, False
+ return msg, True
+ elif state != incidents[0]["status"]:
+ return incidents[0], True
+
+ return incidents[0], False
+
+
+def send_event(module, service_key, event_type, desc,
+ incident_key=None, client=None, client_url=None):
+ url = "https://events.pagerduty.com/generic/2010-04-15/create_event.json"
+ headers = {
+ "Content-type": "application/json"
+ }
+
+ data = {
+ "service_key": service_key,
+ "event_type": event_type,
+ "incident_key": incident_key,
+ "description": desc,
+ "client": client,
+ "client_url": client_url
+ }
+
+ response, info = fetch_url(module, url, method='post',
+ headers=headers, data=json.dumps(data))
+ if info['status'] != 200:
+ module.fail_json(msg="failed to %s. Reason: %s" %
+ (event_type, info['msg']))
+ json_out = json.loads(response.read())
+ return json_out
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=False),
+ service_id=dict(required=True),
+ service_key=dict(required=False, no_log=True),
+ integration_key=dict(required=False, no_log=True),
+ api_key=dict(required=True, no_log=True),
+ state=dict(required=True,
+ choices=['triggered', 'acknowledged', 'resolved']),
+ client=dict(required=False, default=None),
+ client_url=dict(required=False, default=None),
+ desc=dict(required=False, default='Created via Ansible'),
+ incident_key=dict(required=False, default=None)
+ ),
+ supports_check_mode=True
+ )
+
+ name = module.params['name']
+ service_id = module.params['service_id']
+ integration_key = module.params['integration_key']
+ service_key = module.params['service_key']
+ api_key = module.params['api_key']
+ state = module.params['state']
+ client = module.params['client']
+ client_url = module.params['client_url']
+ desc = module.params['desc']
+ incident_key = module.params['incident_key']
+
+ if integration_key is None:
+ if service_key is not None:
+ integration_key = service_key
+ module.warn('"service_key" is obsolete parameter and will be removed.'
+ ' Please, use "integration_key" instead')
+ else:
+ module.fail_json(msg="'integration_key' is required parameter")
+
+ state_event_dict = {
+ 'triggered': 'trigger',
+ 'acknowledged': 'acknowledge',
+ 'resolved': 'resolve'
+ }
+
+ event_type = state_event_dict[state]
+
+ if event_type != 'trigger' and incident_key is None:
+ module.fail_json(msg="incident_key is required for "
+ "acknowledge or resolve events")
+
+ out, changed = check(module, name, state, service_id,
+ integration_key, api_key, incident_key)
+
+ if not module.check_mode and changed is True:
+ out = send_event(module, integration_key, event_type, desc,
+ incident_key, client, client_url)
+
+ module.exit_json(result=out, changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/pagerduty_change.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pagerduty_change.py
new file mode 100644
index 00000000..358a6961
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pagerduty_change.py
@@ -0,0 +1,192 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2020, Adam Vaughan (@adamvaughan) avaughan@pagerduty.com
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: pagerduty_change
+short_description: Track a code or infrastructure change as a PagerDuty change event
+version_added: 1.3.0
+description:
+ - This module will let you create a PagerDuty change event each time the module is run.
+ - This is not an idempotent action and a new change event will be created each time it is run.
+author:
+ - Adam Vaughan (@adamvaughan)
+requirements:
+ - PagerDuty integration key
+options:
+ integration_key:
+ description:
+ - The integration key that identifies the service the change was made to.
+ This can be found by adding an integration to a service in PagerDuty.
+ required: true
+ type: str
+ summary:
+ description:
+ - A short description of the change that occurred.
+ required: true
+ type: str
+ source:
+ description:
+ - The source of the change event.
+ default: Ansible
+ type: str
+ user:
+ description:
+ - The name of the user or process that triggered this deployment.
+ type: str
+ repo:
+ description:
+ - The URL of the project repository.
+ required: false
+ type: str
+ revision:
+ description:
+ - An identifier of the revision being deployed, typically a number or SHA from a version control system.
+ required: false
+ type: str
+ environment:
+ description:
+ - The environment name, typically C(production), C(staging), etc.
+ required: false
+ type: str
+ link_url:
+ description:
+ - A URL where more information about the deployment can be obtained.
+ required: false
+ type: str
+ link_text:
+ description:
+ - Descriptive text for a URL where more information about the deployment can be obtained.
+ required: false
+ type: str
+ url:
+ description:
+ - URL to submit the change event to.
+ required: false
+ default: https://events.pagerduty.com/v2/change/enqueue
+ type: str
+ validate_certs:
+ description:
+ - If C(no), SSL certificates for the target URL will not be validated.
+ This should only be used on personally controlled sites using self-signed certificates.
+ required: false
+ default: yes
+ type: bool
+notes:
+ - Supports C(check_mode). Note that check mode simply does nothing except returning C(changed=true) in case the I(url) seems to be correct.
+'''
+
+EXAMPLES = '''
+- name: Track the deployment as a PagerDuty change event
+ community.general.pagerduty_change:
+ integration_key: abc123abc123abc123abc123abc123ab
+ summary: The application was deployed
+
+- name: Track the deployment as a PagerDuty change event with more details
+ community.general.pagerduty_change:
+ integration_key: abc123abc123abc123abc123abc123ab
+ summary: The application was deployed
+ source: Ansible Deploy
+ user: ansible
+ repo: github.com/ansible/ansible
+ revision: '4.2'
+ environment: production
+ link_url: https://github.com/ansible-collections/community.general/pull/1269
+ link_text: View changes on GitHub
+'''
+
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.basic import AnsibleModule
+from datetime import datetime
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ integration_key=dict(required=True, type='str', no_log=True),
+ summary=dict(required=True, type='str'),
+ source=dict(required=False, default='Ansible', type='str'),
+ user=dict(required=False, type='str'),
+ repo=dict(required=False, type='str'),
+ revision=dict(required=False, type='str'),
+ environment=dict(required=False, type='str'),
+ link_url=dict(required=False, type='str'),
+ link_text=dict(required=False, type='str'),
+ url=dict(required=False,
+ default='https://events.pagerduty.com/v2/change/enqueue', type='str'),
+ validate_certs=dict(default=True, type='bool')
+ ),
+ supports_check_mode=True
+ )
+
+ # API documented at https://developer.pagerduty.com/docs/events-api-v2/send-change-events/
+
+ url = module.params['url']
+ headers = {'Content-Type': 'application/json'}
+
+ if module.check_mode:
+ _response, info = fetch_url(
+ module, url, headers=headers, method='POST')
+
+ if info['status'] == 400:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(
+ msg='Checking the PagerDuty change event API returned an unexpected response: %d' % (info['status']))
+
+ custom_details = {}
+
+ if module.params['user']:
+ custom_details['user'] = module.params['user']
+
+ if module.params['repo']:
+ custom_details['repo'] = module.params['repo']
+
+ if module.params['revision']:
+ custom_details['revision'] = module.params['revision']
+
+ if module.params['environment']:
+ custom_details['environment'] = module.params['environment']
+
+ now = datetime.utcnow()
+ timestamp = now.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
+
+ payload = {
+ 'summary': module.params['summary'],
+ 'source': module.params['source'],
+ 'timestamp': timestamp,
+ 'custom_details': custom_details
+ }
+
+ event = {
+ 'routing_key': module.params['integration_key'],
+ 'payload': payload
+ }
+
+ if module.params['link_url']:
+ link = {
+ 'href': module.params['link_url']
+ }
+
+ if module.params['link_text']:
+ link['text'] = module.params['link_text']
+
+ event['links'] = [link]
+
+ _response, info = fetch_url(
+ module, url, data=module.jsonify(event), headers=headers, method='POST')
+
+ if info['status'] == 202:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(
+ msg='Creating PagerDuty change event failed with %d' % (info['status']))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/pagerduty_user.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pagerduty_user.py
new file mode 100644
index 00000000..4b20a321
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pagerduty_user.py
@@ -0,0 +1,263 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Zainab Alsaffar <Zainab.Alsaffar@mail.rit.edu>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: pagerduty_user
+short_description: Manage a user account on PagerDuty
+description:
+ - This module manages the creation/removal of a user account on PagerDuty.
+version_added: '1.3.0'
+author: Zainab Alsaffar (@zanssa)
+requirements:
+ - pdpyras python module = 4.1.1
+ - PagerDuty API Access
+options:
+ access_token:
+ description:
+ - An API access token to authenticate with the PagerDuty REST API.
+ required: true
+ type: str
+ pd_user:
+ description:
+ - Name of the user in PagerDuty.
+ required: true
+ type: str
+ pd_email:
+ description:
+ - The user's email address.
+ - I(pd_email) is the unique identifier used and cannot be updated using this module.
+ required: true
+ type: str
+ pd_role:
+ description:
+ - The user's role.
+ choices: ['global_admin', 'manager', 'responder', 'observer', 'stakeholder', 'limited_stakeholder', 'restricted_access']
+ default: 'responder'
+ type: str
+ state:
+ description:
+ - State of the user.
+ - On C(present), it creates a user if the user doesn't exist.
+ - On C(absent), it removes a user if the account exists.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+ pd_teams:
+ description:
+ - The teams to which the user belongs.
+ - Required if I(state=present).
+ type: list
+ elements: str
+notes:
+ - Supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+- name: Create a user account on PagerDuty
+ community.general.pagerduty_user:
+ access_token: 'Your_Access_token'
+ pd_user: user_full_name
+ pd_email: user_email
+ pd_role: user_pd_role
+ pd_teams: user_pd_teams
+ state: "present"
+
+- name: Remove a user account from PagerDuty
+ community.general.pagerduty_user:
+ access_token: 'Your_Access_token'
+ pd_user: user_full_name
+ pd_email: user_email
+ state: "absent"
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+import traceback
+from os import path
+
+try:
+ from pdpyras import APISession
+ HAS_PD_PY = True
+except ImportError:
+ HAS_PD_PY = False
+ PD_IMPORT_ERR = traceback.format_exc()
+
+try:
+ from pdpyras import PDClientError
+ HAS_PD_CLIENT_ERR = True
+except ImportError:
+ HAS_PD_CLIENT_ERR = False
+ PD_CLIENT_ERR_IMPORT_ERR = traceback.format_exc()
+
+
+class PagerDutyUser(object):
+ def __init__(self, module, session):
+ self._module = module
+ self._apisession = session
+
+ # check if the user exists
+ def does_user_exist(self, pd_email):
+ for user in self._apisession.iter_all('users'):
+ if user['email'] == pd_email:
+ return user['id']
+
+ # create a user account on PD
+ def add_pd_user(self, pd_name, pd_email, pd_role):
+ try:
+ user = self._apisession.persist('users', 'email', {
+ "name": pd_name,
+ "email": pd_email,
+ "type": "user",
+ "role": pd_role,
+ })
+ return user
+
+ except PDClientError as e:
+ if e.response.status_code == 400:
+ self._module.fail_json(
+ msg="Failed to add %s due to invalid argument" % (pd_name))
+ if e.response.status_code == 401:
+ self._module.fail_json(msg="Failed to add %s due to invalid API key" % (pd_name))
+ if e.response.status_code == 402:
+ self._module.fail_json(
+ msg="Failed to add %s due to inability to perform the action within the API token" % (pd_name))
+ if e.response.status_code == 403:
+ self._module.fail_json(
+ msg="Failed to add %s due to inability to review the requested resource within the API token" % (pd_name))
+ if e.response.status_code == 429:
+ self._module.fail_json(
+ msg="Failed to add %s due to reaching the limit of making requests" % (pd_name))
+
+ # delete a user account from PD
+ def delete_user(self, pd_user_id, pd_name):
+ try:
+ user_path = path.join('/users/', pd_user_id)
+ self._apisession.rdelete(user_path)
+
+ except PDClientError as e:
+ if e.response.status_code == 404:
+ self._module.fail_json(
+ msg="Failed to remove %s as user was not found" % (pd_name))
+ if e.response.status_code == 403:
+ self._module.fail_json(
+ msg="Failed to remove %s due to inability to review the requested resource within the API token" % (pd_name))
+ if e.response.status_code == 401:
+ # print out the list of incidents
+ pd_incidents = self.get_incidents_assigned_to_user(pd_user_id)
+ self._module.fail_json(msg="Failed to remove %s as user has assigned incidents %s" % (pd_name, pd_incidents))
+ if e.response.status_code == 429:
+ self._module.fail_json(
+ msg="Failed to remove %s due to reaching the limit of making requests" % (pd_name))
+
+ # get incidents assigned to a user
+ def get_incidents_assigned_to_user(self, pd_user_id):
+ incident_info = {}
+ incidents = self._apisession.list_all('incidents', params={'user_ids[]': [pd_user_id]})
+
+ for incident in incidents:
+ incident_info = {
+ 'title': incident['title'],
+ 'key': incident['incident_key'],
+ 'status': incident['status']
+ }
+ return incident_info
+
+ # add a user to a team/teams
+ def add_user_to_teams(self, pd_user_id, pd_teams, pd_role):
+ updated_team = None
+ for team in pd_teams:
+ team_info = self._apisession.find('teams', team, attribute='name')
+ if team_info is not None:
+ try:
+ updated_team = self._apisession.rput('/teams/' + team_info['id'] + '/users/' + pd_user_id, json={
+ 'role': pd_role
+ })
+ except PDClientError:
+ updated_team = None
+ return updated_team
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ access_token=dict(type='str', required=True, no_log=True),
+ pd_user=dict(type='str', required=True),
+ pd_email=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ pd_role=dict(type='str', default='responder',
+ choices=['global_admin', 'manager', 'responder', 'observer', 'stakeholder', 'limited_stakeholder', 'restricted_access']),
+ pd_teams=dict(type='list', elements='str', required=False)),
+ required_if=[['state', 'present', ['pd_teams']], ],
+ supports_check_mode=True,
+ )
+
+ if not HAS_PD_PY:
+ module.fail_json(msg=missing_required_lib('pdpyras', url='https://github.com/PagerDuty/pdpyras'), exception=PD_IMPORT_ERR)
+
+ if not HAS_PD_CLIENT_ERR:
+ module.fail_json(msg=missing_required_lib('PDClientError', url='https://github.com/PagerDuty/pdpyras'), exception=PD_CLIENT_ERR_IMPORT_ERR)
+
+ access_token = module.params['access_token']
+ pd_user = module.params['pd_user']
+ pd_email = module.params['pd_email']
+ state = module.params['state']
+ pd_role = module.params['pd_role']
+ pd_teams = module.params['pd_teams']
+
+ if pd_role:
+ pd_role_gui_value = {
+ 'global_admin': 'admin',
+ 'manager': 'user',
+ 'responder': 'limited_user',
+ 'observer': 'observer',
+ 'stakeholder': 'read_only_user',
+ 'limited_stakeholder': 'read_only_limited_user',
+ 'restricted_access': 'restricted_access'
+ }
+ pd_role = pd_role_gui_value[pd_role]
+
+ # authenticate with PD API
+ try:
+ session = APISession(access_token)
+ except PDClientError as e:
+ module.fail_json(msg="Failed to authenticate with PagerDuty: %s" % e)
+
+ user = PagerDutyUser(module, session)
+
+ user_exists = user.does_user_exist(pd_email)
+
+ if user_exists:
+ if state == "absent":
+ # remove user
+ if not module.check_mode:
+ user.delete_user(user_exists, pd_user)
+ module.exit_json(changed=True, result="Successfully deleted user %s" % pd_user)
+ else:
+ module.exit_json(changed=False, result="User %s already exists." % pd_user)
+
+ # in case that the user does not exist
+ else:
+ if state == "absent":
+ module.exit_json(changed=False, result="User %s was not found." % pd_user)
+
+ else:
+ # add user, adds user with the default notification rule and contact info (email)
+ if not module.check_mode:
+ user.add_pd_user(pd_user, pd_email, pd_role)
+ # get user's id
+ pd_user_id = user.does_user_exist(pd_email)
+ # add a user to the team/s
+ user.add_user_to_teams(pd_user_id, pd_teams, pd_role)
+ module.exit_json(changed=True, result="Successfully created & added user %s to team %s" % (pd_user, pd_teams))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/pam_limits.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pam_limits.py
new file mode 100644
index 00000000..c63493ee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pam_limits.py
@@ -0,0 +1,317 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Sebastien Rohaut <sebastien.rohaut@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: pam_limits
+author:
+ - "Sebastien Rohaut (@usawa)"
+short_description: Modify Linux PAM limits
+description:
+ - The C(pam_limits) module modifies PAM limits. The default file is
+ C(/etc/security/limits.conf). For the full documentation, see C(man 5
+ limits.conf).
+options:
+ domain:
+ type: str
+ description:
+ - A username, @groupname, wildcard, uid/gid range.
+ required: true
+ limit_type:
+ type: str
+ description:
+ - Limit type, see C(man 5 limits.conf) for an explanation
+ required: true
+ choices: [ "hard", "soft", "-" ]
+ limit_item:
+ type: str
+ description:
+ - The limit to be set
+ required: true
+ choices:
+ - "core"
+ - "data"
+ - "fsize"
+ - "memlock"
+ - "nofile"
+ - "rss"
+ - "stack"
+ - "cpu"
+ - "nproc"
+ - "as"
+ - "maxlogins"
+ - "maxsyslogins"
+ - "priority"
+ - "locks"
+ - "sigpending"
+ - "msgqueue"
+ - "nice"
+ - "rtprio"
+ - "chroot"
+ value:
+ type: str
+ description:
+ - The value of the limit.
+ required: true
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can get
+ the original file back if you somehow clobbered it incorrectly.
+ required: false
+ type: bool
+ default: "no"
+ use_min:
+ description:
+ - If set to C(yes), the minimal value will be used or conserved.
+ If the specified value is inferior to the value in the file, file content is replaced with the new value,
+ else content is not modified.
+ required: false
+ type: bool
+ default: "no"
+ use_max:
+ description:
+ - If set to C(yes), the maximal value will be used or conserved.
+ If the specified value is superior to the value in the file, file content is replaced with the new value,
+ else content is not modified.
+ required: false
+ type: bool
+ default: "no"
+ dest:
+ type: str
+ description:
+ - Modify the limits.conf path.
+ required: false
+ default: "/etc/security/limits.conf"
+ comment:
+ type: str
+ description:
+ - Comment associated with the limit.
+ required: false
+ default: ''
+notes:
+ - If C(dest) file doesn't exist, it is created.
+'''
+
+EXAMPLES = '''
+- name: Add or modify nofile soft limit for the user joe
+ community.general.pam_limits:
+ domain: joe
+ limit_type: soft
+ limit_item: nofile
+ value: 64000
+
+- name: Add or modify fsize hard limit for the user smith. Keep or set the maximal value.
+ community.general.pam_limits:
+ domain: smith
+ limit_type: hard
+ limit_item: fsize
+ value: 1000000
+ use_max: yes
+
+- name: Add or modify memlock, both soft and hard, limit for the user james with a comment.
+ community.general.pam_limits:
+ domain: james
+ limit_type: '-'
+ limit_item: memlock
+ value: unlimited
+ comment: unlimited memory lock for james
+
+- name: Add or modify hard nofile limits for wildcard domain
+ community.general.pam_limits:
+ domain: '*'
+ limit_type: hard
+ limit_item: nofile
+ value: 39693561
+'''
+
+import os
+import os.path
+import tempfile
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ pam_items = ['core', 'data', 'fsize', 'memlock', 'nofile', 'rss', 'stack', 'cpu', 'nproc', 'as', 'maxlogins', 'maxsyslogins', 'priority', 'locks',
+ 'sigpending', 'msgqueue', 'nice', 'rtprio', 'chroot']
+
+ pam_types = ['soft', 'hard', '-']
+
+ limits_conf = '/etc/security/limits.conf'
+
+ module = AnsibleModule(
+ # not checking because of daisy chain to file module
+ argument_spec=dict(
+ domain=dict(required=True, type='str'),
+ limit_type=dict(required=True, type='str', choices=pam_types),
+ limit_item=dict(required=True, type='str', choices=pam_items),
+ value=dict(required=True, type='str'),
+ use_max=dict(default=False, type='bool'),
+ use_min=dict(default=False, type='bool'),
+ backup=dict(default=False, type='bool'),
+ dest=dict(default=limits_conf, type='str'),
+ comment=dict(required=False, default='', type='str')
+ )
+ )
+
+ domain = module.params['domain']
+ limit_type = module.params['limit_type']
+ limit_item = module.params['limit_item']
+ value = module.params['value']
+ use_max = module.params['use_max']
+ use_min = module.params['use_min']
+ backup = module.params['backup']
+ limits_conf = module.params['dest']
+ new_comment = module.params['comment']
+
+ changed = False
+
+ if os.path.isfile(limits_conf):
+ if not os.access(limits_conf, os.W_OK):
+ module.fail_json(msg="%s is not writable. Use sudo" % limits_conf)
+ else:
+ limits_conf_dir = os.path.dirname(limits_conf)
+ if os.path.isdir(limits_conf_dir) and os.access(limits_conf_dir, os.W_OK):
+ open(limits_conf, 'a').close()
+ changed = True
+ else:
+ module.fail_json(msg="directory %s is not writable (check presence, access rights, use sudo)" % limits_conf_dir)
+
+ if use_max and use_min:
+ module.fail_json(msg="Cannot use use_min and use_max at the same time.")
+
+ if not (value in ['unlimited', 'infinity', '-1'] or value.isdigit()):
+ module.fail_json(msg="Argument 'value' can be one of 'unlimited', 'infinity', '-1' or positive number. Refer to manual pages for more details.")
+
+ # Backup
+ if backup:
+ backup_file = module.backup_local(limits_conf)
+
+ space_pattern = re.compile(r'\s+')
+
+ message = ''
+ f = open(limits_conf, 'rb')
+ # Tempfile
+ nf = tempfile.NamedTemporaryFile(mode='w+')
+
+ found = False
+ new_value = value
+
+ for line in f:
+ line = to_native(line, errors='surrogate_or_strict')
+ if line.startswith('#'):
+ nf.write(line)
+ continue
+
+ newline = re.sub(space_pattern, ' ', line).strip()
+ if not newline:
+ nf.write(line)
+ continue
+
+ # Remove comment in line
+ newline = newline.split('#', 1)[0]
+ try:
+ old_comment = line.split('#', 1)[1]
+ except Exception:
+ old_comment = ''
+
+ newline = newline.rstrip()
+
+ if not new_comment:
+ new_comment = old_comment
+
+ line_fields = newline.split(' ')
+
+ if len(line_fields) != 4:
+ nf.write(line)
+ continue
+
+ line_domain = line_fields[0]
+ line_type = line_fields[1]
+ line_item = line_fields[2]
+ actual_value = line_fields[3]
+
+ if not (actual_value in ['unlimited', 'infinity', '-1'] or actual_value.isdigit()):
+ module.fail_json(msg="Invalid configuration of '%s'. Current value of %s is unsupported." % (limits_conf, line_item))
+
+ # Found the line
+ if line_domain == domain and line_type == limit_type and line_item == limit_item:
+ found = True
+ if value == actual_value:
+ message = line
+ nf.write(line)
+ continue
+
+ actual_value_unlimited = actual_value in ['unlimited', 'infinity', '-1']
+ value_unlimited = value in ['unlimited', 'infinity', '-1']
+
+ if use_max:
+ if value.isdigit() and actual_value.isdigit():
+ new_value = str(max(int(value), int(actual_value)))
+ elif actual_value_unlimited:
+ new_value = actual_value
+ else:
+ new_value = value
+
+ if use_min:
+ if value.isdigit() and actual_value.isdigit():
+ new_value = str(min(int(value), int(actual_value)))
+ elif value_unlimited:
+ new_value = actual_value
+ else:
+ new_value = value
+
+ # Change line only if value has changed
+ if new_value != actual_value:
+ changed = True
+ if new_comment:
+ new_comment = "\t#" + new_comment
+ new_limit = domain + "\t" + limit_type + "\t" + limit_item + "\t" + new_value + new_comment + "\n"
+ message = new_limit
+ nf.write(new_limit)
+ else:
+ message = line
+ nf.write(line)
+ else:
+ nf.write(line)
+
+ if not found:
+ changed = True
+ if new_comment:
+ new_comment = "\t#" + new_comment
+ new_limit = domain + "\t" + limit_type + "\t" + limit_item + "\t" + new_value + new_comment + "\n"
+ message = new_limit
+ nf.write(new_limit)
+
+ f.close()
+ nf.flush()
+
+ # Copy tempfile to newfile
+ module.atomic_move(nf.name, f.name)
+
+ try:
+ nf.close()
+ except Exception:
+ pass
+
+ res_args = dict(
+ changed=changed, msg=message
+ )
+
+ if backup:
+ res_args['backup_file'] = backup_file
+
+ module.exit_json(**res_args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/pamd.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pamd.py
new file mode 100644
index 00000000..45f00826
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pamd.py
@@ -0,0 +1,866 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Kenneth D. Evensen <kdevensen@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: pamd
+author:
+ - Kenneth D. Evensen (@kevensen)
+short_description: Manage PAM Modules
+description:
+ - Edit PAM service's type, control, module path and module arguments.
+ - In order for a PAM rule to be modified, the type, control and
+ module_path must match an existing rule. See man(5) pam.d for details.
+options:
+ name:
+ description:
+ - The name generally refers to the PAM service file to
+ change, for example system-auth.
+ type: str
+ required: true
+ type:
+ description:
+ - The type of the PAM rule being modified.
+ - The C(type), C(control) and C(module_path) all must match a rule to be modified.
+ type: str
+ required: true
+ choices: [ account, -account, auth, -auth, password, -password, session, -session ]
+ control:
+ description:
+ - The control of the PAM rule being modified.
+ - This may be a complicated control with brackets. If this is the case, be
+ sure to put "[bracketed controls]" in quotes.
+ - The C(type), C(control) and C(module_path) all must match a rule to be modified.
+ type: str
+ required: true
+ module_path:
+ description:
+ - The module path of the PAM rule being modified.
+ - The C(type), C(control) and C(module_path) all must match a rule to be modified.
+ type: str
+ required: true
+ new_type:
+ description:
+ - The new type to assign to the new rule.
+ type: str
+ choices: [ account, -account, auth, -auth, password, -password, session, -session ]
+ new_control:
+ description:
+ - The new control to assign to the new rule.
+ type: str
+ new_module_path:
+ description:
+ - The new module path to be assigned to the new rule.
+ type: str
+ module_arguments:
+ description:
+ - When state is C(updated), the module_arguments will replace existing module_arguments.
+ - When state is C(args_absent) args matching those listed in module_arguments will be removed.
+ - When state is C(args_present) any args listed in module_arguments are added if
+ missing from the existing rule.
+ - Furthermore, if the module argument takes a value denoted by C(=),
+ the value will be changed to that specified in module_arguments.
+ type: list
+ elements: str
+ state:
+ description:
+ - The default of C(updated) will modify an existing rule if type,
+ control and module_path all match an existing rule.
+ - With C(before), the new rule will be inserted before a rule matching type,
+ control and module_path.
+ - Similarly, with C(after), the new rule will be inserted after an existing rulematching type,
+ control and module_path.
+ - With either C(before) or C(after) new_type, new_control, and new_module_path must all be specified.
+ - If state is C(args_absent) or C(args_present), new_type, new_control, and new_module_path will be ignored.
+ - State C(absent) will remove the rule. The 'absent' state was added in Ansible 2.4.
+ type: str
+ choices: [ absent, before, after, args_absent, args_present, updated ]
+ default: updated
+ path:
+ description:
+ - This is the path to the PAM service files.
+ type: path
+ default: /etc/pam.d
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can
+ get the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: no
+'''
+
+EXAMPLES = r'''
+- name: Update pamd rule's control in /etc/pam.d/system-auth
+ community.general.pamd:
+ name: system-auth
+ type: auth
+ control: required
+ module_path: pam_faillock.so
+ new_control: sufficient
+
+- name: Update pamd rule's complex control in /etc/pam.d/system-auth
+ community.general.pamd:
+ name: system-auth
+ type: session
+ control: '[success=1 default=ignore]'
+ module_path: pam_succeed_if.so
+ new_control: '[success=2 default=ignore]'
+
+- name: Insert a new rule before an existing rule
+ community.general.pamd:
+ name: system-auth
+ type: auth
+ control: required
+ module_path: pam_faillock.so
+ new_type: auth
+ new_control: sufficient
+ new_module_path: pam_faillock.so
+ state: before
+
+- name: Insert a new rule pam_wheel.so with argument 'use_uid' after an \
+ existing rule pam_rootok.so
+ community.general.pamd:
+ name: su
+ type: auth
+ control: sufficient
+ module_path: pam_rootok.so
+ new_type: auth
+ new_control: required
+ new_module_path: pam_wheel.so
+ module_arguments: 'use_uid'
+ state: after
+
+- name: Remove module arguments from an existing rule
+ community.general.pamd:
+ name: system-auth
+ type: auth
+ control: required
+ module_path: pam_faillock.so
+ module_arguments: ''
+ state: updated
+
+- name: Replace all module arguments in an existing rule
+ community.general.pamd:
+ name: system-auth
+ type: auth
+ control: required
+ module_path: pam_faillock.so
+ module_arguments: 'preauth
+ silent
+ deny=3
+ unlock_time=604800
+ fail_interval=900'
+ state: updated
+
+- name: Remove specific arguments from a rule
+ community.general.pamd:
+ name: system-auth
+ type: session
+ control: '[success=1 default=ignore]'
+ module_path: pam_succeed_if.so
+ module_arguments: crond,quiet
+ state: args_absent
+
+- name: Ensure specific arguments are present in a rule
+ community.general.pamd:
+ name: system-auth
+ type: session
+ control: '[success=1 default=ignore]'
+ module_path: pam_succeed_if.so
+ module_arguments: crond,quiet
+ state: args_present
+
+- name: Ensure specific arguments are present in a rule (alternative)
+ community.general.pamd:
+ name: system-auth
+ type: session
+ control: '[success=1 default=ignore]'
+ module_path: pam_succeed_if.so
+ module_arguments:
+ - crond
+ - quiet
+ state: args_present
+
+- name: Module arguments requiring commas must be listed as a Yaml list
+ community.general.pamd:
+ name: special-module
+ type: account
+ control: required
+ module_path: pam_access.so
+ module_arguments:
+ - listsep=,
+ state: args_present
+
+- name: Update specific argument value in a rule
+ community.general.pamd:
+ name: system-auth
+ type: auth
+ control: required
+ module_path: pam_faillock.so
+ module_arguments: 'fail_interval=300'
+ state: args_present
+
+- name: Add pam common-auth rule for duo
+ community.general.pamd:
+ name: common-auth
+ new_type: auth
+ new_control: '[success=1 default=ignore]'
+ new_module_path: '/lib64/security/pam_duo.so'
+ state: after
+ type: auth
+ module_path: pam_sss.so
+ control: 'requisite'
+'''
+
+RETURN = r'''
+change_count:
+ description: How many rules were changed.
+ type: int
+ sample: 1
+ returned: success
+new_rule:
+ description: The changes to the rule. This was available in Ansible 2.4 and Ansible 2.5. It was removed in Ansible 2.6.
+ type: str
+ sample: None None None sha512 shadow try_first_pass use_authtok
+ returned: success
+updated_rule_(n):
+ description: The rule(s) that was/were changed. This is only available in
+ Ansible 2.4 and was removed in Ansible 2.5.
+ type: str
+ sample:
+ - password sufficient pam_unix.so sha512 shadow try_first_pass
+ use_authtok
+ returned: success
+action:
+ description:
+ - "That action that was taken and is one of: update_rule,
+ insert_before_rule, insert_after_rule, args_present, args_absent,
+ absent. This was available in Ansible 2.4 and removed in Ansible 2.8"
+ returned: always
+ type: str
+ sample: "update_rule"
+dest:
+ description:
+ - "Path to pam.d service that was changed. This is only available in
+ Ansible 2.3 and was removed in Ansible 2.4."
+ returned: success
+ type: str
+ sample: "/etc/pam.d/system-auth"
+backupdest:
+ description:
+ - "The file name of the backup file, if created."
+ returned: success
+ type: str
+...
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+import os
+import re
+from tempfile import NamedTemporaryFile
+from datetime import datetime
+
+
+RULE_REGEX = re.compile(r"""(?P<rule_type>-?(?:auth|account|session|password))\s+
+ (?P<control>\[.*\]|\S*)\s+
+ (?P<path>\S*)\s*
+ (?P<args>.*)\s*""", re.X)
+
+RULE_ARG_REGEX = re.compile(r"""(\[.*\]|\S*)""")
+
+VALID_TYPES = ['account', '-account', 'auth', '-auth', 'password', '-password', 'session', '-session']
+
+
+class PamdLine(object):
+
+ def __init__(self, line):
+ self.line = line
+ self.prev = None
+ self.next = None
+
+ @property
+ def is_valid(self):
+ if self.line.strip() == '':
+ return True
+ return False
+
+ def validate(self):
+ if not self.is_valid:
+ return False, "Rule is not valid " + self.line
+ return True, "Rule is valid " + self.line
+
+ # Method to check if a rule matches the type, control and path.
+ def matches(self, rule_type, rule_control, rule_path, rule_args=None):
+ return False
+
+ def __str__(self):
+ return str(self.line)
+
+
+class PamdEmptyLine(PamdLine):
+ pass
+
+
+class PamdComment(PamdLine):
+
+ def __init__(self, line):
+ super(PamdComment, self).__init__(line)
+
+ @property
+ def is_valid(self):
+ if self.line.startswith('#'):
+ return True
+ return False
+
+
+class PamdInclude(PamdLine):
+ def __init__(self, line):
+ super(PamdInclude, self).__init__(line)
+
+ @property
+ def is_valid(self):
+ if self.line.startswith('@include'):
+ return True
+ return False
+
+
+class PamdRule(PamdLine):
+
+ valid_simple_controls = ['required', 'requisite', 'sufficient', 'optional', 'include', 'substack', 'definitive']
+ valid_control_values = ['success', 'open_err', 'symbol_err', 'service_err', 'system_err', 'buf_err',
+ 'perm_denied', 'auth_err', 'cred_insufficient', 'authinfo_unavail', 'user_unknown',
+ 'maxtries', 'new_authtok_reqd', 'acct_expired', 'session_err', 'cred_unavail',
+ 'cred_expired', 'cred_err', 'no_module_data', 'conv_err', 'authtok_err',
+ 'authtok_recover_err', 'authtok_lock_busy', 'authtok_disable_aging', 'try_again',
+ 'ignore', 'abort', 'authtok_expired', 'module_unknown', 'bad_item', 'conv_again',
+ 'incomplete', 'default']
+ valid_control_actions = ['ignore', 'bad', 'die', 'ok', 'done', 'reset']
+
+ def __init__(self, rule_type, rule_control, rule_path, rule_args=None):
+ self.prev = None
+ self.next = None
+ self._control = None
+ self._args = None
+ self.rule_type = rule_type
+ self.rule_control = rule_control
+
+ self.rule_path = rule_path
+ self.rule_args = rule_args
+
+ # Method to check if a rule matches the type, control and path.
+ def matches(self, rule_type, rule_control, rule_path, rule_args=None):
+ if (rule_type == self.rule_type and
+ rule_control == self.rule_control and
+ rule_path == self.rule_path):
+ return True
+ return False
+
+ @classmethod
+ def rule_from_string(cls, line):
+ rule_match = RULE_REGEX.search(line)
+ rule_args = parse_module_arguments(rule_match.group('args'))
+ return cls(rule_match.group('rule_type'), rule_match.group('control'), rule_match.group('path'), rule_args)
+
+ def __str__(self):
+ if self.rule_args:
+ return '{0: <11}{1} {2} {3}'.format(self.rule_type, self.rule_control, self.rule_path, ' '.join(self.rule_args))
+ return '{0: <11}{1} {2}'.format(self.rule_type, self.rule_control, self.rule_path)
+
+ @property
+ def rule_control(self):
+ if isinstance(self._control, list):
+ return '[' + ' '.join(self._control) + ']'
+ return self._control
+
+ @rule_control.setter
+ def rule_control(self, control):
+ if control.startswith('['):
+ control = control.replace(' = ', '=').replace('[', '').replace(']', '')
+ self._control = control.split(' ')
+ else:
+ self._control = control
+
+ @property
+ def rule_args(self):
+ if not self._args:
+ return []
+ return self._args
+
+ @rule_args.setter
+ def rule_args(self, args):
+ self._args = parse_module_arguments(args)
+
+ @property
+ def line(self):
+ return str(self)
+
+ @classmethod
+ def is_action_unsigned_int(cls, string_num):
+ number = 0
+ try:
+ number = int(string_num)
+ except ValueError:
+ return False
+
+ if number >= 0:
+ return True
+ return False
+
+ @property
+ def is_valid(self):
+ return self.validate()[0]
+
+ def validate(self):
+ # Validate the rule type
+ if self.rule_type not in VALID_TYPES:
+ return False, "Rule type, " + self.rule_type + ", is not valid in rule " + self.line
+ # Validate the rule control
+ if isinstance(self._control, str) and self.rule_control not in PamdRule.valid_simple_controls:
+ return False, "Rule control, " + self.rule_control + ", is not valid in rule " + self.line
+ elif isinstance(self._control, list):
+ for control in self._control:
+ value, action = control.split("=")
+ if value not in PamdRule.valid_control_values:
+ return False, "Rule control value, " + value + ", is not valid in rule " + self.line
+ if action not in PamdRule.valid_control_actions and not PamdRule.is_action_unsigned_int(action):
+ return False, "Rule control action, " + action + ", is not valid in rule " + self.line
+
+ # TODO: Validate path
+
+ return True, "Rule is valid " + self.line
+
+
+# PamdService encapsulates an entire service and contains one or more rules. It seems the best way is to do this
+# as a doubly linked list.
+class PamdService(object):
+
+ def __init__(self, content):
+ self._head = None
+ self._tail = None
+ for line in content.splitlines():
+ if line.lstrip().startswith('#'):
+ pamd_line = PamdComment(line)
+ elif line.lstrip().startswith('@include'):
+ pamd_line = PamdInclude(line)
+ elif line.strip() == '':
+ pamd_line = PamdEmptyLine(line)
+ else:
+ pamd_line = PamdRule.rule_from_string(line)
+
+ self.append(pamd_line)
+
+ def append(self, pamd_line):
+ if self._head is None:
+ self._head = self._tail = pamd_line
+ else:
+ pamd_line.prev = self._tail
+ pamd_line.next = None
+ self._tail.next = pamd_line
+ self._tail = pamd_line
+
+ def remove(self, rule_type, rule_control, rule_path):
+ current_line = self._head
+ changed = 0
+
+ while current_line is not None:
+ if current_line.matches(rule_type, rule_control, rule_path):
+ if current_line.prev is not None:
+ current_line.prev.next = current_line.next
+ if current_line.next is not None:
+ current_line.next.prev = current_line.prev
+ else:
+ self._head = current_line.next
+ current_line.next.prev = None
+ changed += 1
+
+ current_line = current_line.next
+ return changed
+
+ def get(self, rule_type, rule_control, rule_path):
+ lines = []
+ current_line = self._head
+ while current_line is not None:
+
+ if isinstance(current_line, PamdRule) and current_line.matches(rule_type, rule_control, rule_path):
+ lines.append(current_line)
+
+ current_line = current_line.next
+
+ return lines
+
+ def has_rule(self, rule_type, rule_control, rule_path):
+ if self.get(rule_type, rule_control, rule_path):
+ return True
+ return False
+
+ def update_rule(self, rule_type, rule_control, rule_path,
+ new_type=None, new_control=None, new_path=None, new_args=None):
+ # Get a list of rules we want to change
+ rules_to_find = self.get(rule_type, rule_control, rule_path)
+
+ new_args = parse_module_arguments(new_args)
+
+ changes = 0
+ for current_rule in rules_to_find:
+ rule_changed = False
+ if new_type:
+ if(current_rule.rule_type != new_type):
+ rule_changed = True
+ current_rule.rule_type = new_type
+ if new_control:
+ if(current_rule.rule_control != new_control):
+ rule_changed = True
+ current_rule.rule_control = new_control
+ if new_path:
+ if(current_rule.rule_path != new_path):
+ rule_changed = True
+ current_rule.rule_path = new_path
+ if new_args:
+ if(current_rule.rule_args != new_args):
+ rule_changed = True
+ current_rule.rule_args = new_args
+
+ if rule_changed:
+ changes += 1
+
+ return changes
+
+ def insert_before(self, rule_type, rule_control, rule_path,
+ new_type=None, new_control=None, new_path=None, new_args=None):
+ # Get a list of rules we want to change
+ rules_to_find = self.get(rule_type, rule_control, rule_path)
+ changes = 0
+ # There are two cases to consider.
+ # 1. The new rule doesn't exist before the existing rule
+ # 2. The new rule exists
+
+ for current_rule in rules_to_find:
+ # Create a new rule
+ new_rule = PamdRule(new_type, new_control, new_path, new_args)
+ # First we'll get the previous rule.
+ previous_rule = current_rule.prev
+
+ # Next we may have to loop backwards if the previous line is a comment. If it
+ # is, we'll get the previous "rule's" previous.
+ while previous_rule is not None and isinstance(previous_rule, (PamdComment, PamdEmptyLine)):
+ previous_rule = previous_rule.prev
+ # Next we'll see if the previous rule matches what we are trying to insert.
+ if previous_rule is not None and not previous_rule.matches(new_type, new_control, new_path):
+ # First set the original previous rule's next to the new_rule
+ previous_rule.next = new_rule
+ # Second, set the new_rule's previous to the original previous
+ new_rule.prev = previous_rule
+ # Third, set the new rule's next to the current rule
+ new_rule.next = current_rule
+ # Fourth, set the current rule's previous to the new_rule
+ current_rule.prev = new_rule
+
+ changes += 1
+
+ # Handle the case where it is the first rule in the list.
+ elif previous_rule is None:
+ # This is the case where the current rule is not only the first rule
+ # but the first line as well. So we set the head to the new rule
+ if current_rule.prev is None:
+ self._head = new_rule
+ # This case would occur if the previous line was a comment.
+ else:
+ current_rule.prev.next = new_rule
+ new_rule.prev = current_rule.prev
+ new_rule.next = current_rule
+ current_rule.prev = new_rule
+ changes += 1
+
+ return changes
+
+ def insert_after(self, rule_type, rule_control, rule_path,
+ new_type=None, new_control=None, new_path=None, new_args=None):
+ # Get a list of rules we want to change
+ rules_to_find = self.get(rule_type, rule_control, rule_path)
+ changes = 0
+ # There are two cases to consider.
+ # 1. The new rule doesn't exist after the existing rule
+ # 2. The new rule exists
+ for current_rule in rules_to_find:
+ # First we'll get the next rule.
+ next_rule = current_rule.next
+ # Next we may have to loop forwards if the next line is a comment. If it
+ # is, we'll get the next "rule's" next.
+ while next_rule is not None and isinstance(next_rule, (PamdComment, PamdEmptyLine)):
+ next_rule = next_rule.next
+
+ # First we create a new rule
+ new_rule = PamdRule(new_type, new_control, new_path, new_args)
+ if next_rule is not None and not next_rule.matches(new_type, new_control, new_path):
+ # If the previous rule doesn't match we'll insert our new rule.
+
+ # Second set the original next rule's previous to the new_rule
+ next_rule.prev = new_rule
+ # Third, set the new_rule's next to the original next rule
+ new_rule.next = next_rule
+ # Fourth, set the new rule's previous to the current rule
+ new_rule.prev = current_rule
+ # Fifth, set the current rule's next to the new_rule
+ current_rule.next = new_rule
+
+ changes += 1
+
+ # This is the case where the current_rule is the last in the list
+ elif next_rule is None:
+ new_rule.prev = self._tail
+ new_rule.next = None
+ self._tail.next = new_rule
+ self._tail = new_rule
+
+ current_rule.next = new_rule
+ changes += 1
+
+ return changes
+
+ def add_module_arguments(self, rule_type, rule_control, rule_path, args_to_add):
+ # Get a list of rules we want to change
+ rules_to_find = self.get(rule_type, rule_control, rule_path)
+
+ args_to_add = parse_module_arguments(args_to_add)
+
+ changes = 0
+
+ for current_rule in rules_to_find:
+ rule_changed = False
+
+ # create some structures to evaluate the situation
+ simple_new_args = set()
+ key_value_new_args = dict()
+
+ for arg in args_to_add:
+ if arg.startswith("["):
+ continue
+ elif "=" in arg:
+ key, value = arg.split("=")
+ key_value_new_args[key] = value
+ else:
+ simple_new_args.add(arg)
+
+ key_value_new_args_set = set(key_value_new_args)
+
+ simple_current_args = set()
+ key_value_current_args = dict()
+
+ for arg in current_rule.rule_args:
+ if arg.startswith("["):
+ continue
+ elif "=" in arg:
+ key, value = arg.split("=")
+ key_value_current_args[key] = value
+ else:
+ simple_current_args.add(arg)
+
+ key_value_current_args_set = set(key_value_current_args)
+
+ new_args_to_add = list()
+
+ # Handle new simple arguments
+ if simple_new_args.difference(simple_current_args):
+ for arg in simple_new_args.difference(simple_current_args):
+ new_args_to_add.append(arg)
+
+ # Handle new key value arguments
+ if key_value_new_args_set.difference(key_value_current_args_set):
+ for key in key_value_new_args_set.difference(key_value_current_args_set):
+ new_args_to_add.append(key + '=' + key_value_new_args[key])
+
+ if new_args_to_add:
+ current_rule.rule_args += new_args_to_add
+ rule_changed = True
+
+ # Handle existing key value arguments when value is not equal
+ if key_value_new_args_set.intersection(key_value_current_args_set):
+ for key in key_value_new_args_set.intersection(key_value_current_args_set):
+ if key_value_current_args[key] != key_value_new_args[key]:
+ arg_index = current_rule.rule_args.index(key + '=' + key_value_current_args[key])
+ current_rule.rule_args[arg_index] = str(key + '=' + key_value_new_args[key])
+ rule_changed = True
+
+ if rule_changed:
+ changes += 1
+
+ return changes
+
+ def remove_module_arguments(self, rule_type, rule_control, rule_path, args_to_remove):
+ # Get a list of rules we want to change
+ rules_to_find = self.get(rule_type, rule_control, rule_path)
+
+ args_to_remove = parse_module_arguments(args_to_remove)
+
+ changes = 0
+
+ for current_rule in rules_to_find:
+ if not args_to_remove:
+ args_to_remove = []
+
+ # Let's check to see if there are any args to remove by finding the intersection
+ # of the rule's current args and the args_to_remove lists
+ if not list(set(current_rule.rule_args) & set(args_to_remove)):
+ continue
+
+ # There are args to remove, so we create a list of new_args absent the args
+ # to remove.
+ current_rule.rule_args = [arg for arg in current_rule.rule_args if arg not in args_to_remove]
+
+ changes += 1
+
+ return changes
+
+ def validate(self):
+ current_line = self._head
+
+ while current_line is not None:
+ if not current_line.validate()[0]:
+ return current_line.validate()
+ current_line = current_line.next
+ return True, "Module is valid"
+
+ def __str__(self):
+ lines = []
+ current_line = self._head
+
+ while current_line is not None:
+ lines.append(str(current_line))
+ current_line = current_line.next
+
+ if lines[1].startswith("# Updated by Ansible"):
+ lines.pop(1)
+
+ lines.insert(1, "# Updated by Ansible - " + datetime.now().isoformat())
+
+ return '\n'.join(lines) + '\n'
+
+
+def parse_module_arguments(module_arguments):
+ # Return empty list if we have no args to parse
+ if not module_arguments:
+ return []
+ elif isinstance(module_arguments, list) and len(module_arguments) == 1 and not module_arguments[0]:
+ return []
+
+ if not isinstance(module_arguments, list):
+ module_arguments = [module_arguments]
+
+ parsed_args = list()
+
+ for arg in module_arguments:
+ for item in filter(None, RULE_ARG_REGEX.findall(arg)):
+ if not item.startswith("["):
+ re.sub("\\s*=\\s*", "=", item)
+ parsed_args.append(item)
+
+ return parsed_args
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ type=dict(type='str', required=True, choices=VALID_TYPES),
+ control=dict(type='str', required=True),
+ module_path=dict(type='str', required=True),
+ new_type=dict(type='str', choices=VALID_TYPES),
+ new_control=dict(type='str'),
+ new_module_path=dict(type='str'),
+ module_arguments=dict(type='list', elements='str'),
+ state=dict(type='str', default='updated', choices=['absent', 'after', 'args_absent', 'args_present', 'before', 'updated']),
+ path=dict(type='path', default='/etc/pam.d'),
+ backup=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ required_if=[
+ ("state", "args_present", ["module_arguments"]),
+ ("state", "args_absent", ["module_arguments"]),
+ ("state", "before", ["new_control", "new_type", "new_module_path"]),
+ ("state", "after", ["new_control", "new_type", "new_module_path"]),
+ ],
+ )
+ content = str()
+ fname = os.path.join(module.params["path"], module.params["name"])
+
+ # Open the file and read the content or fail
+ try:
+ with open(fname, 'r') as service_file_obj:
+ content = service_file_obj.read()
+ except IOError as e:
+ # If unable to read the file, fail out
+ module.fail_json(msg='Unable to open/read PAM module file %s with error %s.' % (fname, str(e)))
+
+ # Assuming we didn't fail, create the service
+ service = PamdService(content)
+ # Set the action
+ action = module.params['state']
+
+ changes = 0
+
+ # Take action
+ if action == 'updated':
+ changes = service.update_rule(module.params['type'], module.params['control'], module.params['module_path'],
+ module.params['new_type'], module.params['new_control'], module.params['new_module_path'],
+ module.params['module_arguments'])
+ elif action == 'before':
+ changes = service.insert_before(module.params['type'], module.params['control'], module.params['module_path'],
+ module.params['new_type'], module.params['new_control'], module.params['new_module_path'],
+ module.params['module_arguments'])
+ elif action == 'after':
+ changes = service.insert_after(module.params['type'], module.params['control'], module.params['module_path'],
+ module.params['new_type'], module.params['new_control'], module.params['new_module_path'],
+ module.params['module_arguments'])
+ elif action == 'args_absent':
+ changes = service.remove_module_arguments(module.params['type'], module.params['control'], module.params['module_path'],
+ module.params['module_arguments'])
+ elif action == 'args_present':
+ if [arg for arg in parse_module_arguments(module.params['module_arguments']) if arg.startswith("[")]:
+ module.fail_json(msg="Unable to process bracketed '[' complex arguments with 'args_present'. Please use 'updated'.")
+
+ changes = service.add_module_arguments(module.params['type'], module.params['control'], module.params['module_path'],
+ module.params['module_arguments'])
+ elif action == 'absent':
+ changes = service.remove(module.params['type'], module.params['control'], module.params['module_path'])
+
+ valid, msg = service.validate()
+
+ # If the module is not valid (meaning one of the rules is invalid), we will fail
+ if not valid:
+ module.fail_json(msg=msg)
+
+ result = dict(
+ changed=(changes > 0),
+ change_count=changes,
+ backupdest='',
+ )
+
+ # If not check mode and something changed, backup the original if necessary then write out the file or fail
+ if not module.check_mode and result['changed']:
+ # First, create a backup if desired.
+ if module.params['backup']:
+ result['backupdest'] = module.backup_local(fname)
+ try:
+ temp_file = NamedTemporaryFile(mode='w', dir=module.tmpdir, delete=False)
+ with open(temp_file.name, 'w') as fd:
+ fd.write(str(service))
+
+ except IOError:
+ module.fail_json(msg='Unable to create temporary \
+ file %s' % temp_file)
+
+ module.atomic_move(temp_file.name, os.path.realpath(fname))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/parted.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/parted.py
new file mode 100644
index 00000000..daf68c29
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/parted.py
@@ -0,0 +1,797 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Fabrizio Colonna <colofabrix@tin.it>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+author:
+ - Fabrizio Colonna (@ColOfAbRiX)
+module: parted
+short_description: Configure block device partitions
+description:
+ - This module allows configuring block device partition using the C(parted)
+ command line tool. For a full description of the fields and the options
+ check the GNU parted manual.
+requirements:
+ - This module requires parted version 1.8.3 and above
+ - align option (except 'undefined') requires parted 2.1 and above
+ - If the version of parted is below 3.1, it requires a Linux version running
+ the sysfs file system C(/sys/).
+options:
+ device:
+ description: The block device (disk) where to operate.
+ type: str
+ required: True
+ align:
+ description: Set alignment for newly created partitions. Use 'undefined' for parted default aligment.
+ type: str
+ choices: [ cylinder, minimal, none, optimal, undefined ]
+ default: optimal
+ number:
+ description:
+ - The number of the partition to work with or the number of the partition
+ that will be created.
+ - Required when performing any action on the disk, except fetching information.
+ type: int
+ unit:
+ description:
+ - Selects the current default unit that Parted will use to display
+ locations and capacities on the disk and to interpret those given by the
+ user if they are not suffixed by an unit.
+ - When fetching information about a disk, it is always recommended to specify a unit.
+ type: str
+ choices: [ s, B, KB, KiB, MB, MiB, GB, GiB, TB, TiB, '%', cyl, chs, compact ]
+ default: KiB
+ label:
+ description:
+ - Disk label type to use.
+ - If C(device) already contains different label, it will be changed to C(label) and any previous partitions will be lost.
+ type: str
+ choices: [ aix, amiga, bsd, dvh, gpt, loop, mac, msdos, pc98, sun ]
+ default: msdos
+ part_type:
+ description:
+ - May be specified only with 'msdos' or 'dvh' partition tables.
+ - A C(name) must be specified for a 'gpt' partition table.
+ - Neither C(part_type) nor C(name) may be used with a 'sun' partition table.
+ type: str
+ choices: [ extended, logical, primary ]
+ default: primary
+ part_start:
+ description:
+ - Where the partition will start as offset from the beginning of the disk,
+ that is, the "distance" from the start of the disk. Negative numbers
+ specify distance from the end of the disk.
+ - The distance can be specified with all the units supported by parted
+ (except compat) and it is case sensitive, e.g. C(10GiB), C(15%).
+ - Using negative values may require setting of C(fs_type) (see notes).
+ type: str
+ default: 0%
+ part_end:
+ description:
+ - Where the partition will end as offset from the beginning of the disk,
+ that is, the "distance" from the start of the disk. Negative numbers
+ specify distance from the end of the disk.
+ - The distance can be specified with all the units supported by parted
+ (except compat) and it is case sensitive, e.g. C(10GiB), C(15%).
+ type: str
+ default: 100%
+ name:
+ description:
+ - Sets the name for the partition number (GPT, Mac, MIPS and PC98 only).
+ type: str
+ flags:
+ description: A list of the flags that has to be set on the partition.
+ type: list
+ elements: str
+ state:
+ description:
+ - Whether to create or delete a partition.
+ - If set to C(info) the module will only return the device information.
+ type: str
+ choices: [ absent, present, info ]
+ default: info
+ fs_type:
+ description:
+ - If specified and the partition does not exist, will set filesystem type to given partition.
+ - Parameter optional, but see notes below about negative negative C(part_start) values.
+ type: str
+ version_added: '0.2.0'
+ resize:
+ description:
+ - Call C(resizepart) on existing partitions to match the size specified by I(part_end).
+ type: bool
+ default: false
+ version_added: '1.3.0'
+
+notes:
+ - When fetching information about a new disk and when the version of parted
+ installed on the system is before version 3.1, the module queries the kernel
+ through C(/sys/) to obtain disk information. In this case the units CHS and
+ CYL are not supported.
+ - Negative C(part_start) start values were rejected if C(fs_type) was not given.
+ This bug was fixed in parted 3.2.153. If you want to use negative C(part_start),
+ specify C(fs_type) as well or make sure your system contains newer parted.
+'''
+
+RETURN = r'''
+partition_info:
+ description: Current partition information
+ returned: success
+ type: complex
+ contains:
+ disk:
+ description: Generic device information.
+ type: dict
+ partitions:
+ description: List of device partitions.
+ type: list
+ script:
+ description: parted script executed by module
+ type: str
+ sample: {
+ "disk": {
+ "dev": "/dev/sdb",
+ "logical_block": 512,
+ "model": "VMware Virtual disk",
+ "physical_block": 512,
+ "size": 5.0,
+ "table": "msdos",
+ "unit": "gib"
+ },
+ "partitions": [{
+ "begin": 0.0,
+ "end": 1.0,
+ "flags": ["boot", "lvm"],
+ "fstype": "",
+ "name": "",
+ "num": 1,
+ "size": 1.0
+ }, {
+ "begin": 1.0,
+ "end": 5.0,
+ "flags": [],
+ "fstype": "",
+ "name": "",
+ "num": 2,
+ "size": 4.0
+ }],
+ "script": "unit KiB print "
+ }
+'''
+
+EXAMPLES = r'''
+- name: Create a new ext4 primary partition
+ community.general.parted:
+ device: /dev/sdb
+ number: 1
+ state: present
+ fs_type: ext4
+
+- name: Remove partition number 1
+ community.general.parted:
+ device: /dev/sdb
+ number: 1
+ state: absent
+
+- name: Create a new primary partition with a size of 1GiB
+ community.general.parted:
+ device: /dev/sdb
+ number: 1
+ state: present
+ part_end: 1GiB
+
+- name: Create a new primary partition for LVM
+ community.general.parted:
+ device: /dev/sdb
+ number: 2
+ flags: [ lvm ]
+ state: present
+ part_start: 1GiB
+
+- name: Create a new primary partition with a size of 1GiB at disk's end
+ community.general.parted:
+ device: /dev/sdb
+ number: 3
+ state: present
+ fs_type: ext3
+ part_start: -1GiB
+
+# Example on how to read info and reuse it in subsequent task
+- name: Read device information (always use unit when probing)
+ community.general.parted: device=/dev/sdb unit=MiB
+ register: sdb_info
+
+- name: Remove all partitions from disk
+ community.general.parted:
+ device: /dev/sdb
+ number: '{{ item.num }}'
+ state: absent
+ loop: '{{ sdb_info.partitions }}'
+
+- name: Extend an existing partition to fill all available space
+ community.general.parted:
+ device: /dev/sdb
+ number: "{{ sdb_info.partitions | length }}"
+ part_end: "100%"
+ resize: true
+ state: present
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+import math
+import re
+import os
+
+
+# Reference prefixes (International System of Units and IEC)
+units_si = ['B', 'KB', 'MB', 'GB', 'TB']
+units_iec = ['KiB', 'MiB', 'GiB', 'TiB']
+parted_units = units_si + units_iec + ['s', '%', 'cyl', 'chs', 'compact']
+
+
+def parse_unit(size_str, unit=''):
+ """
+ Parses a string containing a size or boundary information
+ """
+ matches = re.search(r'^(-?[\d.]+) *([\w%]+)?$', size_str)
+ if matches is None:
+ # "<cylinder>,<head>,<sector>" format
+ matches = re.search(r'^(\d+),(\d+),(\d+)$', size_str)
+ if matches is None:
+ module.fail_json(
+ msg="Error interpreting parted size output: '%s'" % size_str
+ )
+
+ size = {
+ 'cylinder': int(matches.group(1)),
+ 'head': int(matches.group(2)),
+ 'sector': int(matches.group(3))
+ }
+ unit = 'chs'
+
+ else:
+ # Normal format: "<number>[<unit>]"
+ if matches.group(2) is not None:
+ unit = matches.group(2)
+
+ size = float(matches.group(1))
+
+ return size, unit
+
+
+def parse_partition_info(parted_output, unit):
+ """
+ Parses the output of parted and transforms the data into
+ a dictionary.
+
+ Parted Machine Parseable Output:
+ See: https://lists.alioth.debian.org/pipermail/parted-devel/2006-December/00
+ 0573.html
+ - All lines end with a semicolon (;)
+ - The first line indicates the units in which the output is expressed.
+ CHS, CYL and BYT stands for CHS, Cylinder and Bytes respectively.
+ - The second line is made of disk information in the following format:
+ "path":"size":"transport-type":"logical-sector-size":"physical-sector-siz
+ e":"partition-table-type":"model-name";
+ - If the first line was either CYL or CHS, the next line will contain
+ information on no. of cylinders, heads, sectors and cylinder size.
+ - Partition information begins from the next line. This is of the format:
+ (for BYT)
+ "number":"begin":"end":"size":"filesystem-type":"partition-name":"flags-s
+ et";
+ (for CHS/CYL)
+ "number":"begin":"end":"filesystem-type":"partition-name":"flags-set";
+ """
+ lines = [x for x in parted_output.split('\n') if x.strip() != '']
+
+ # Generic device info
+ generic_params = lines[1].rstrip(';').split(':')
+
+ # The unit is read once, because parted always returns the same unit
+ size, unit = parse_unit(generic_params[1], unit)
+
+ generic = {
+ 'dev': generic_params[0],
+ 'size': size,
+ 'unit': unit.lower(),
+ 'table': generic_params[5],
+ 'model': generic_params[6],
+ 'logical_block': int(generic_params[3]),
+ 'physical_block': int(generic_params[4])
+ }
+
+ # CYL and CHS have an additional line in the output
+ if unit in ['cyl', 'chs']:
+ chs_info = lines[2].rstrip(';').split(':')
+ cyl_size, cyl_unit = parse_unit(chs_info[3])
+ generic['chs_info'] = {
+ 'cylinders': int(chs_info[0]),
+ 'heads': int(chs_info[1]),
+ 'sectors': int(chs_info[2]),
+ 'cyl_size': cyl_size,
+ 'cyl_size_unit': cyl_unit.lower()
+ }
+ lines = lines[1:]
+
+ parts = []
+ for line in lines[2:]:
+ part_params = line.rstrip(';').split(':')
+
+ # CHS use a different format than BYT, but contrary to what stated by
+ # the author, CYL is the same as BYT. I've tested this undocumented
+ # behaviour down to parted version 1.8.3, which is the first version
+ # that supports the machine parseable output.
+ if unit != 'chs':
+ size = parse_unit(part_params[3])[0]
+ fstype = part_params[4]
+ name = part_params[5]
+ flags = part_params[6]
+
+ else:
+ size = ""
+ fstype = part_params[3]
+ name = part_params[4]
+ flags = part_params[5]
+
+ parts.append({
+ 'num': int(part_params[0]),
+ 'begin': parse_unit(part_params[1])[0],
+ 'end': parse_unit(part_params[2])[0],
+ 'size': size,
+ 'fstype': fstype,
+ 'name': name,
+ 'flags': [f.strip() for f in flags.split(', ') if f != ''],
+ 'unit': unit.lower(),
+ })
+
+ return {'generic': generic, 'partitions': parts}
+
+
+def format_disk_size(size_bytes, unit):
+ """
+ Formats a size in bytes into a different unit, like parted does. It doesn't
+ manage CYL and CHS formats, though.
+ This function has been adapted from https://github.com/Distrotech/parted/blo
+ b/279d9d869ff472c52b9ec2e180d568f0c99e30b0/libparted/unit.c
+ """
+ global units_si, units_iec
+
+ unit = unit.lower()
+
+ # Shortcut
+ if size_bytes == 0:
+ return 0.0, 'b'
+
+ # Cases where we default to 'compact'
+ if unit in ['', 'compact', 'cyl', 'chs']:
+ index = max(0, int(
+ (math.log10(size_bytes) - 1.0) / 3.0
+ ))
+ unit = 'b'
+ if index < len(units_si):
+ unit = units_si[index]
+
+ # Find the appropriate multiplier
+ multiplier = 1.0
+ if unit in units_si:
+ multiplier = 1000.0 ** units_si.index(unit)
+ elif unit in units_iec:
+ multiplier = 1024.0 ** units_iec.index(unit)
+
+ output = size_bytes // multiplier * (1 + 1E-16)
+
+ # Corrections to round up as per IEEE754 standard
+ if output < 10:
+ w = output + 0.005
+ elif output < 100:
+ w = output + 0.05
+ else:
+ w = output + 0.5
+
+ if w < 10:
+ precision = 2
+ elif w < 100:
+ precision = 1
+ else:
+ precision = 0
+
+ # Round and return
+ return round(output, precision), unit
+
+
+def convert_to_bytes(size_str, unit):
+ size = float(size_str)
+ multiplier = 1.0
+ if unit in units_si:
+ multiplier = 1000.0 ** units_si.index(unit)
+ elif unit in units_iec:
+ multiplier = 1024.0 ** (units_iec.index(unit) + 1)
+ elif unit in ['', 'compact', 'cyl', 'chs']:
+ # As per format_disk_size, default to compact, which defaults to megabytes
+ multiplier = 1000.0 ** units_si.index("MB")
+
+ output = size * multiplier
+ return int(output)
+
+
+def get_unlabeled_device_info(device, unit):
+ """
+ Fetches device information directly from the kernel and it is used when
+ parted cannot work because of a missing label. It always returns a 'unknown'
+ label.
+ """
+ device_name = os.path.basename(device)
+ base = "/sys/block/%s" % device_name
+
+ vendor = read_record(base + "/device/vendor", "Unknown")
+ model = read_record(base + "/device/model", "model")
+ logic_block = int(read_record(base + "/queue/logical_block_size", 0))
+ phys_block = int(read_record(base + "/queue/physical_block_size", 0))
+ size_bytes = int(read_record(base + "/size", 0)) * logic_block
+
+ size, unit = format_disk_size(size_bytes, unit)
+
+ return {
+ 'generic': {
+ 'dev': device,
+ 'table': "unknown",
+ 'size': size,
+ 'unit': unit,
+ 'logical_block': logic_block,
+ 'physical_block': phys_block,
+ 'model': "%s %s" % (vendor, model),
+ },
+ 'partitions': []
+ }
+
+
+def get_device_info(device, unit):
+ """
+ Fetches information about a disk and its partitions and it returns a
+ dictionary.
+ """
+ global module, parted_exec
+
+ # If parted complains about missing labels, it means there are no partitions.
+ # In this case only, use a custom function to fetch information and emulate
+ # parted formats for the unit.
+ label_needed = check_parted_label(device)
+ if label_needed:
+ return get_unlabeled_device_info(device, unit)
+
+ command = "%s -s -m %s -- unit '%s' print" % (parted_exec, device, unit)
+ rc, out, err = module.run_command(command)
+ if rc != 0 and 'unrecognised disk label' not in err:
+ module.fail_json(msg=(
+ "Error while getting device information with parted "
+ "script: '%s'" % command),
+ rc=rc, out=out, err=err
+ )
+
+ return parse_partition_info(out, unit)
+
+
+def check_parted_label(device):
+ """
+ Determines if parted needs a label to complete its duties. Versions prior
+ to 3.1 don't return data when there is no label. For more information see:
+ http://upstream.rosalinux.ru/changelogs/libparted/3.1/changelog.html
+ """
+ global parted_exec
+
+ # Check the version
+ parted_major, parted_minor, _ = parted_version()
+ if (parted_major == 3 and parted_minor >= 1) or parted_major > 3:
+ return False
+
+ # Older parted versions return a message in the stdout and RC > 0.
+ rc, out, err = module.run_command("%s -s -m %s print" % (parted_exec, device))
+ if rc != 0 and 'unrecognised disk label' in out.lower():
+ return True
+
+ return False
+
+
+def parse_parted_version(out):
+ """
+ Returns version tuple from the output of "parted --version" command
+ """
+ lines = [x for x in out.split('\n') if x.strip() != '']
+ if len(lines) == 0:
+ return None, None, None
+
+ # Sample parted versions (see as well test unit):
+ # parted (GNU parted) 3.3
+ # parted (GNU parted) 3.4.5
+ # parted (GNU parted) 3.3.14-dfc61
+ matches = re.search(r'^parted.+\s(\d+)\.(\d+)(?:\.(\d+))?', lines[0].strip())
+
+ if matches is None:
+ return None, None, None
+
+ # Convert version to numbers
+ major = int(matches.group(1))
+ minor = int(matches.group(2))
+ rev = 0
+ if matches.group(3) is not None:
+ rev = int(matches.group(3))
+
+ return major, minor, rev
+
+
+def parted_version():
+ """
+ Returns the major and minor version of parted installed on the system.
+ """
+ global module, parted_exec
+
+ rc, out, err = module.run_command("%s --version" % parted_exec)
+ if rc != 0:
+ module.fail_json(
+ msg="Failed to get parted version.", rc=rc, out=out, err=err
+ )
+
+ (major, minor, rev) = parse_parted_version(out)
+ if major is None:
+ module.fail_json(msg="Failed to get parted version.", rc=0, out=out)
+
+ return major, minor, rev
+
+
+def parted(script, device, align):
+ """
+ Runs a parted script.
+ """
+ global module, parted_exec
+
+ align_option = '-a %s' % align
+ if align == 'undefined':
+ align_option = ''
+
+ if script and not module.check_mode:
+ command = "%s -s -m %s %s -- %s" % (parted_exec, align_option, device, script)
+ rc, out, err = module.run_command(command)
+
+ if rc != 0:
+ module.fail_json(
+ msg="Error while running parted script: %s" % command.strip(),
+ rc=rc, out=out, err=err
+ )
+
+
+def read_record(file_path, default=None):
+ """
+ Reads the first line of a file and returns it.
+ """
+ try:
+ f = open(file_path, 'r')
+ try:
+ return f.readline().strip()
+ finally:
+ f.close()
+ except IOError:
+ return default
+
+
+def part_exists(partitions, attribute, number):
+ """
+ Looks if a partition that has a specific value for a specific attribute
+ actually exists.
+ """
+ return any(
+ part[attribute] and
+ part[attribute] == number for part in partitions
+ )
+
+
+def check_size_format(size_str):
+ """
+ Checks if the input string is an allowed size
+ """
+ size, unit = parse_unit(size_str)
+ return unit in parted_units
+
+
+def main():
+ global module, units_si, units_iec, parted_exec
+
+ changed = False
+ output_script = ""
+ script = ""
+ module = AnsibleModule(
+ argument_spec=dict(
+ device=dict(type='str', required=True),
+ align=dict(type='str', default='optimal', choices=['cylinder', 'minimal', 'none', 'optimal', 'undefined']),
+ number=dict(type='int'),
+
+ # unit <unit> command
+ unit=dict(type='str', default='KiB', choices=parted_units),
+
+ # mklabel <label-type> command
+ label=dict(type='str', default='msdos', choices=['aix', 'amiga', 'bsd', 'dvh', 'gpt', 'loop', 'mac', 'msdos', 'pc98', 'sun']),
+
+ # mkpart <part-type> [<fs-type>] <start> <end> command
+ part_type=dict(type='str', default='primary', choices=['extended', 'logical', 'primary']),
+ part_start=dict(type='str', default='0%'),
+ part_end=dict(type='str', default='100%'),
+ fs_type=dict(type='str'),
+
+ # name <partition> <name> command
+ name=dict(type='str'),
+
+ # set <partition> <flag> <state> command
+ flags=dict(type='list', elements='str'),
+
+ # rm/mkpart command
+ state=dict(type='str', default='info', choices=['absent', 'info', 'present']),
+
+ # resize part
+ resize=dict(type='bool', default=False),
+ ),
+ required_if=[
+ ['state', 'present', ['number']],
+ ['state', 'absent', ['number']],
+ ],
+ supports_check_mode=True,
+ )
+ module.run_command_environ_update = {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C', 'LC_CTYPE': 'C'}
+
+ # Data extraction
+ device = module.params['device']
+ align = module.params['align']
+ number = module.params['number']
+ unit = module.params['unit']
+ label = module.params['label']
+ part_type = module.params['part_type']
+ part_start = module.params['part_start']
+ part_end = module.params['part_end']
+ name = module.params['name']
+ state = module.params['state']
+ flags = module.params['flags']
+ fs_type = module.params['fs_type']
+ resize = module.params['resize']
+
+ # Parted executable
+ parted_exec = module.get_bin_path('parted', True)
+
+ # Conditioning
+ if number is not None and number < 1:
+ module.fail_json(msg="The partition number must be greater then 0.")
+ if not check_size_format(part_start):
+ module.fail_json(
+ msg="The argument 'part_start' doesn't respect required format."
+ "The size unit is case sensitive.",
+ err=parse_unit(part_start)
+ )
+ if not check_size_format(part_end):
+ module.fail_json(
+ msg="The argument 'part_end' doesn't respect required format."
+ "The size unit is case sensitive.",
+ err=parse_unit(part_end)
+ )
+
+ # Read the current disk information
+ current_device = get_device_info(device, unit)
+ current_parts = current_device['partitions']
+
+ if state == 'present':
+
+ # Assign label if required
+ mklabel_needed = current_device['generic'].get('table', None) != label
+ if mklabel_needed:
+ script += "mklabel %s " % label
+
+ # Create partition if required
+ if part_type and (mklabel_needed or not part_exists(current_parts, 'num', number)):
+ script += "mkpart %s %s%s %s " % (
+ part_type,
+ '%s ' % fs_type if fs_type is not None else '',
+ part_start,
+ part_end
+ )
+
+ # Set the unit of the run
+ if unit and script:
+ script = "unit %s %s" % (unit, script)
+
+ # If partition exists, try to resize
+ if resize and part_exists(current_parts, 'num', number):
+ # Ensure new end is different to current
+ partition = [p for p in current_parts if p['num'] == number][0]
+ current_part_end = convert_to_bytes(partition['end'], unit)
+
+ size, parsed_unit = parse_unit(part_end, unit)
+ if parsed_unit == "%":
+ size = int((int(current_device['generic']['size']) * size) / 100)
+ parsed_unit = unit
+
+ desired_part_end = convert_to_bytes(size, parsed_unit)
+
+ if current_part_end != desired_part_end:
+ script += "resizepart %s %s " % (
+ number,
+ part_end
+ )
+
+ # Execute the script and update the data structure.
+ # This will create the partition for the next steps
+ if script:
+ output_script += script
+ parted(script, device, align)
+ changed = True
+ script = ""
+
+ if not module.check_mode:
+ current_parts = get_device_info(device, unit)['partitions']
+
+ if part_exists(current_parts, 'num', number) or module.check_mode:
+ if changed and module.check_mode:
+ partition = {'flags': []} # Empty structure for the check-mode
+ else:
+ partition = [p for p in current_parts if p['num'] == number][0]
+
+ # Assign name to the partition
+ if name is not None and partition.get('name', None) != name:
+ # Wrap double quotes in single quotes so the shell doesn't strip
+ # the double quotes as those need to be included in the arg
+ # passed to parted
+ script += 'name %s \'"%s"\' ' % (number, name)
+
+ # Manage flags
+ if flags:
+ # Parted infers boot with esp, if you assign esp, boot is set
+ # and if boot is unset, esp is also unset.
+ if 'esp' in flags and 'boot' not in flags:
+ flags.append('boot')
+
+ # Compute only the changes in flags status
+ flags_off = list(set(partition['flags']) - set(flags))
+ flags_on = list(set(flags) - set(partition['flags']))
+
+ for f in flags_on:
+ script += "set %s %s on " % (number, f)
+
+ for f in flags_off:
+ script += "set %s %s off " % (number, f)
+
+ # Set the unit of the run
+ if unit and script:
+ script = "unit %s %s" % (unit, script)
+
+ # Execute the script
+ if script:
+ output_script += script
+ changed = True
+ parted(script, device, align)
+
+ elif state == 'absent':
+ # Remove the partition
+ if part_exists(current_parts, 'num', number) or module.check_mode:
+ script = "rm %s " % number
+ output_script += script
+ changed = True
+ parted(script, device, align)
+
+ elif state == 'info':
+ output_script = "unit '%s' print " % unit
+
+ # Final status of the device
+ final_device_status = get_device_info(device, unit)
+ module.exit_json(
+ changed=changed,
+ disk=final_device_status['generic'],
+ partitions=final_device_status['partitions'],
+ script=output_script.strip()
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/pear.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pear.py
new file mode 100644
index 00000000..fef04d32
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pear.py
@@ -0,0 +1,319 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Afterburn <https://github.com/afterburn>
+# (c) 2013, Aaron Bull Schaefer <aaron@elasticdog.com>
+# (c) 2015, Jonathan Lestrelin <jonathan.lestrelin@gmail.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: pear
+short_description: Manage pear/pecl packages
+description:
+ - Manage PHP packages with the pear package manager.
+author:
+ - Jonathan Lestrelin (@jle64) <jonathan.lestrelin@gmail.com>
+options:
+ name:
+ type: str
+ description:
+ - Name of the package to install, upgrade, or remove.
+ required: true
+ aliases: [pkg]
+ state:
+ type: str
+ description:
+ - Desired state of the package.
+ default: "present"
+ choices: ["present", "installed", "latest", "absent", "removed"]
+ executable:
+ type: path
+ description:
+ - Path to the pear executable.
+ prompts:
+ description:
+ - List of regular expressions that can be used to detect prompts during pear package installation to answer the expected question.
+ - Prompts will be processed in the same order as the packages list.
+ - You can optionnally specify an answer to any question in the list.
+ - If no answer is provided, the list item will only contain the regular expression.
+ - "To specify an answer, the item will be a dict with the regular expression as key and the answer as value C(my_regular_expression: 'an_answer')."
+ - You can provide a list containing items with or without answer.
+ - A prompt list can be shorter or longer than the packages list but will issue a warning.
+ - If you want to specify that a package will not need prompts in the middle of a list, C(null).
+ type: list
+ elements: raw
+ version_added: 0.2.0
+'''
+
+EXAMPLES = r'''
+- name: Install pear package
+ community.general.pear:
+ name: Net_URL2
+ state: present
+
+- name: Install pecl package
+ community.general.pear:
+ name: pecl/json_post
+ state: present
+
+- name: Install pecl package with expected prompt
+ community.general.pear:
+ name: pecl/apcu
+ state: present
+ prompts:
+ - (.*)Enable internal debugging in APCu \[no\]
+
+- name: Install pecl package with expected prompt and an answer
+ community.general.pear:
+ name: pecl/apcu
+ state: present
+ prompts:
+ - (.*)Enable internal debugging in APCu \[no\]: "yes"
+
+- name: Install multiple pear/pecl packages at once with prompts.
+ Prompts will be processed on the same order as the packages order.
+ If there is more prompts than packages, packages without prompts will be installed without any prompt expected.
+ If there is more packages than prompts, additionnal prompts will be ignored.
+ community.general.pear:
+ name: pecl/gnupg, pecl/apcu
+ state: present
+ prompts:
+ - I am a test prompt because gnupg doesnt asks anything
+ - (.*)Enable internal debugging in APCu \[no\]: "yes"
+
+- name: Install multiple pear/pecl packages at once skipping the first prompt.
+ Prompts will be processed on the same order as the packages order.
+ If there is more prompts than packages, packages without prompts will be installed without any prompt expected.
+ If there is more packages than prompts, additionnal prompts will be ignored.
+ community.general.pear:
+ name: pecl/gnupg, pecl/apcu
+ state: present
+ prompts:
+ - null
+ - (.*)Enable internal debugging in APCu \[no\]: "yes"
+
+- name: Upgrade package
+ community.general.pear:
+ name: Net_URL2
+ state: latest
+
+- name: Remove packages
+ community.general.pear:
+ name: Net_URL2,pecl/json_post
+ state: absent
+'''
+
+import os
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.basic import AnsibleModule
+
+
+def get_local_version(pear_output):
+ """Take pear remoteinfo output and get the installed version"""
+ lines = pear_output.split('\n')
+ for line in lines:
+ if 'Installed ' in line:
+ installed = line.rsplit(None, 1)[-1].strip()
+ if installed == '-':
+ continue
+ return installed
+ return None
+
+
+def _get_pear_path(module):
+ if module.params['executable'] and os.path.isfile(module.params['executable']):
+ result = module.params['executable']
+ else:
+ result = module.get_bin_path('pear', True, [module.params['executable']])
+ return result
+
+
+def get_repository_version(pear_output):
+ """Take pear remote-info output and get the latest version"""
+ lines = pear_output.split('\n')
+ for line in lines:
+ if 'Latest ' in line:
+ return line.rsplit(None, 1)[-1].strip()
+ return None
+
+
+def query_package(module, name, state="present"):
+ """Query the package status in both the local system and the repository.
+ Returns a boolean to indicate if the package is installed,
+ and a second boolean to indicate if the package is up-to-date."""
+ if state == "present":
+ lcmd = "%s info %s" % (_get_pear_path(module), name)
+ lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False)
+ if lrc != 0:
+ # package is not installed locally
+ return False, False
+
+ rcmd = "%s remote-info %s" % (_get_pear_path(module), name)
+ rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False)
+
+ # get the version installed locally (if any)
+ lversion = get_local_version(rstdout)
+
+ # get the version in the repository
+ rversion = get_repository_version(rstdout)
+
+ if rrc == 0:
+ # Return True to indicate that the package is installed locally,
+ # and the result of the version number comparison
+ # to determine if the package is up-to-date.
+ return True, (lversion == rversion)
+
+ return False, False
+
+
+def remove_packages(module, packages):
+ remove_c = 0
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ installed, updated = query_package(module, package)
+ if not installed:
+ continue
+
+ cmd = "%s uninstall %s" % (_get_pear_path(module), package)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc != 0:
+ module.fail_json(msg="failed to remove %s: %s" % (package, to_text(stdout + stderr)))
+
+ remove_c += 1
+
+ if remove_c > 0:
+
+ module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, state, packages, prompts):
+ install_c = 0
+ has_prompt = bool(prompts)
+ default_stdin = "\n"
+
+ if has_prompt:
+ nb_prompts = len(prompts)
+ nb_packages = len(packages)
+
+ if nb_prompts > 0 and (nb_prompts != nb_packages):
+ if nb_prompts > nb_packages:
+ diff = nb_prompts - nb_packages
+ msg = "%s packages to install but %s prompts to expect. %s prompts will be ignored" % (to_text(nb_packages), to_text(nb_prompts), to_text(diff))
+ else:
+ diff = nb_packages - nb_prompts
+ msg = "%s packages to install but only %s prompts to expect. %s packages won't be expected to have a prompt" \
+ % (to_text(nb_packages), to_text(nb_prompts), to_text(diff))
+ module.warn(msg)
+
+ # Preparing prompts answer according to item type
+ tmp_prompts = []
+ for _item in prompts:
+ # If the current item is a dict then we expect it's key to be the prompt regex and it's value to be the answer
+ # We also expect here that the dict only has ONE key and the first key will be taken
+ if isinstance(_item, dict):
+ key = list(_item.keys())[0]
+ answer = _item[key] + "\n"
+
+ tmp_prompts.append((key, answer))
+ elif not _item:
+ tmp_prompts.append((None, default_stdin))
+ else:
+ tmp_prompts.append((_item, default_stdin))
+ prompts = tmp_prompts
+ for i, package in enumerate(packages):
+ # if the package is installed and state == present
+ # or state == latest and is up-to-date then skip
+ installed, updated = query_package(module, package)
+ if installed and (state == 'present' or (state == 'latest' and updated)):
+ continue
+
+ if state == 'present':
+ command = 'install'
+
+ if state == 'latest':
+ command = 'upgrade'
+
+ if has_prompt and i < len(prompts):
+ prompt_regex = prompts[i][0]
+ data = prompts[i][1]
+ else:
+ prompt_regex = None
+ data = default_stdin
+
+ cmd = "%s %s %s" % (_get_pear_path(module), command, package)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False, prompt_regex=prompt_regex, data=data, binary_data=True)
+ if rc != 0:
+ module.fail_json(msg="failed to install %s: %s" % (package, to_text(stdout + stderr)))
+
+ install_c += 1
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg="installed %s package(s)" % (install_c))
+
+ module.exit_json(changed=False, msg="package(s) already installed")
+
+
+def check_packages(module, packages, state):
+ would_be_changed = []
+ for package in packages:
+ installed, updated = query_package(module, package)
+ if ((state in ["present", "latest"] and not installed) or
+ (state == "absent" and installed) or
+ (state == "latest" and not updated)):
+ would_be_changed.append(package)
+ if would_be_changed:
+ if state == "absent":
+ state = "removed"
+ module.exit_json(changed=True, msg="%s package(s) would be %s" % (
+ len(would_be_changed), state))
+ else:
+ module.exit_json(change=False, msg="package(s) already %s" % state)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(aliases=['pkg'], required=True),
+ state=dict(default='present', choices=['present', 'installed', "latest", 'absent', 'removed']),
+ executable=dict(default=None, required=False, type='path'),
+ prompts=dict(default=None, required=False, type='list', elements='raw'),
+ ),
+ supports_check_mode=True)
+
+ p = module.params
+
+ # normalize the state parameter
+ if p['state'] in ['present', 'installed']:
+ p['state'] = 'present'
+ elif p['state'] in ['absent', 'removed']:
+ p['state'] = 'absent'
+
+ if p['name']:
+ pkgs = p['name'].split(',')
+
+ pkg_files = []
+ for i, pkg in enumerate(pkgs):
+ pkg_files.append(None)
+
+ if module.check_mode:
+ check_packages(module, pkgs, p['state'])
+
+ if p['state'] in ['present', 'latest']:
+ install_packages(module, p['state'], pkgs, p["prompts"])
+ elif p['state'] == 'absent':
+ remove_packages(module, pkgs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/pids.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pids.py
new file mode 100644
index 00000000..1bee180b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pids.py
@@ -0,0 +1,84 @@
+#!/usr/bin/python
+# Copyright: (c) 2019, Saranya Sridharan
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: pids
+description: "Retrieves a list of PIDs of given process name in Ansible controller/controlled machines.Returns an empty list if no process in that name exists."
+short_description: "Retrieves process IDs list if the process is running otherwise return empty list"
+author:
+ - Saranya Sridharan (@saranyasridharan)
+requirements:
+ - psutil(python module)
+options:
+ name:
+ description: the name of the process you want to get PID for.
+ required: true
+ type: str
+'''
+
+EXAMPLES = '''
+# Pass the process name
+- name: Getting process IDs of the process
+ community.general.pids:
+ name: python
+ register: pids_of_python
+
+- name: Printing the process IDs obtained
+ ansible.builtin.debug:
+ msg: "PIDS of python:{{pids_of_python.pids|join(',')}}"
+'''
+
+RETURN = '''
+pids:
+ description: Process IDs of the given process
+ returned: list of none, one, or more process IDs
+ type: list
+ sample: [100,200]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+ import psutil
+ HAS_PSUTIL = True
+except ImportError:
+ HAS_PSUTIL = False
+
+
+def compare_lower(a, b):
+ if a is None or b is None:
+ # this could just be "return False" but would lead to surprising behavior if both a and b are None
+ return a == b
+
+ return a.lower() == b.lower()
+
+
+def get_pid(name):
+ pids = []
+
+ for proc in psutil.process_iter(attrs=['name', 'cmdline']):
+ if compare_lower(proc.info['name'], name) or \
+ proc.info['cmdline'] and compare_lower(proc.info['cmdline'][0], name):
+ pids.append(proc.pid)
+
+ return pids
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, type="str"),
+ ),
+ supports_check_mode=True,
+ )
+ if not HAS_PSUTIL:
+ module.fail_json(msg="Missing required 'psutil' python module. Try installing it with: pip install psutil")
+ name = module.params["name"]
+ response = dict(pids=get_pid(name))
+ module.exit_json(**response)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/pingdom.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pingdom.py
new file mode 100644
index 00000000..23ed2545
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pingdom.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: pingdom
+short_description: Pause/unpause Pingdom alerts
+description:
+ - This module will let you pause/unpause Pingdom alerts
+author:
+ - "Dylan Silva (@thaumos)"
+ - "Justin Johns (!UNKNOWN)"
+requirements:
+ - "This pingdom python library: https://github.com/mbabineau/pingdom-python"
+options:
+ state:
+ type: str
+ description:
+ - Define whether or not the check should be running or paused.
+ required: true
+ choices: [ "running", "paused", "started", "stopped" ]
+ checkid:
+ type: str
+ description:
+ - Pingdom ID of the check.
+ required: true
+ uid:
+ type: str
+ description:
+ - Pingdom user ID.
+ required: true
+ passwd:
+ type: str
+ description:
+ - Pingdom user password.
+ required: true
+ key:
+ type: str
+ description:
+ - Pingdom API key.
+ required: true
+notes:
+ - This module does not yet have support to add/remove checks.
+'''
+
+EXAMPLES = '''
+- name: Pause the check with the ID of 12345
+ community.general.pingdom:
+ uid: example@example.com
+ passwd: password123
+ key: apipassword123
+ checkid: 12345
+ state: paused
+
+- name: Unpause the check with the ID of 12345
+ community.general.pingdom:
+ uid: example@example.com
+ passwd: password123
+ key: apipassword123
+ checkid: 12345
+ state: running
+'''
+
+import traceback
+
+PINGDOM_IMP_ERR = None
+try:
+ import pingdom
+ HAS_PINGDOM = True
+except Exception:
+ PINGDOM_IMP_ERR = traceback.format_exc()
+ HAS_PINGDOM = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+def pause(checkid, uid, passwd, key):
+
+ c = pingdom.PingdomConnection(uid, passwd, key)
+ c.modify_check(checkid, paused=True)
+ check = c.get_check(checkid)
+ name = check.name
+ result = check.status
+ # if result != "paused": # api output buggy - accept raw exception for now
+ # return (True, name, result)
+ return (False, name, result)
+
+
+def unpause(checkid, uid, passwd, key):
+
+ c = pingdom.PingdomConnection(uid, passwd, key)
+ c.modify_check(checkid, paused=False)
+ check = c.get_check(checkid)
+ name = check.name
+ result = check.status
+ # if result != "up": # api output buggy - accept raw exception for now
+ # return (True, name, result)
+ return (False, name, result)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(required=True, choices=['running', 'paused', 'started', 'stopped']),
+ checkid=dict(required=True),
+ uid=dict(required=True),
+ passwd=dict(required=True, no_log=True),
+ key=dict(required=True, no_log=True),
+ )
+ )
+
+ if not HAS_PINGDOM:
+ module.fail_json(msg=missing_required_lib("pingdom"), exception=PINGDOM_IMP_ERR)
+
+ checkid = module.params['checkid']
+ state = module.params['state']
+ uid = module.params['uid']
+ passwd = module.params['passwd']
+ key = module.params['key']
+
+ if (state == "paused" or state == "stopped"):
+ (rc, name, result) = pause(checkid, uid, passwd, key)
+
+ if (state == "running" or state == "started"):
+ (rc, name, result) = unpause(checkid, uid, passwd, key)
+
+ if rc != 0:
+ module.fail_json(checkid=checkid, name=name, status=result)
+
+ module.exit_json(checkid=checkid, name=name, status=result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/pip_package_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pip_package_info.py
new file mode 100644
index 00000000..08eb2e95
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pip_package_info.py
@@ -0,0 +1,147 @@
+#!/usr/bin/python
+# (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# started out with AWX's scan_packages module
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: pip_package_info
+short_description: pip package information
+description:
+ - Return information about installed pip packages
+options:
+ clients:
+ description:
+ - A list of the pip executables that will be used to get the packages.
+ They can be supplied with the full path or just the executable name, i.e `pip3.7`.
+ default: ['pip']
+ required: False
+ type: list
+requirements:
+ - The requested pip executables must be installed on the target.
+author:
+ - Matthew Jones (@matburt)
+ - Brian Coca (@bcoca)
+ - Adam Miller (@maxamillion)
+'''
+
+EXAMPLES = '''
+- name: Just get the list from default pip
+ community.general.pip_package_info:
+
+- name: Get the facts for default pip, pip2 and pip3.6
+ community.general.pip_package_info:
+ clients: ['pip', 'pip2', 'pip3.6']
+
+- name: Get from specific paths (virtualenvs?)
+ community.general.pip_package_info:
+ clients: '/home/me/projec42/python/pip3.5'
+'''
+
+RETURN = '''
+packages:
+ description: a dictionary of installed package data
+ returned: always
+ type: dict
+ contains:
+ python:
+ description: A dictionary with each pip client which then contains a list of dicts with python package information
+ returned: always
+ type: dict
+ sample:
+ "packages": {
+ "pip": {
+ "Babel": [
+ {
+ "name": "Babel",
+ "source": "pip",
+ "version": "2.6.0"
+ }
+ ],
+ "Flask": [
+ {
+ "name": "Flask",
+ "source": "pip",
+ "version": "1.0.2"
+ }
+ ],
+ "Flask-SQLAlchemy": [
+ {
+ "name": "Flask-SQLAlchemy",
+ "source": "pip",
+ "version": "2.3.2"
+ }
+ ],
+ "Jinja2": [
+ {
+ "name": "Jinja2",
+ "source": "pip",
+ "version": "2.10"
+ }
+ ],
+ },
+ }
+'''
+import json
+import os
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.facts.packages import CLIMgr
+
+
+class PIP(CLIMgr):
+
+ def __init__(self, pip):
+
+ self.CLI = pip
+
+ def list_installed(self):
+ global module
+ rc, out, err = module.run_command([self._cli, 'list', '-l', '--format=json'])
+ if rc != 0:
+ raise Exception("Unable to list packages rc=%s : %s" % (rc, err))
+ return json.loads(out)
+
+ def get_package_details(self, package):
+ package['source'] = self.CLI
+ return package
+
+
+def main():
+
+ # start work
+ global module
+ module = AnsibleModule(argument_spec=dict(clients={'type': 'list', 'default': ['pip']},), supports_check_mode=True)
+ packages = {}
+ results = {'packages': {}}
+ clients = module.params['clients']
+
+ found = 0
+ for pip in clients:
+
+ if not os.path.basename(pip).startswith('pip'):
+ module.warn('Skipping invalid pip client: %s' % (pip))
+ continue
+ try:
+ pip_mgr = PIP(pip)
+ if pip_mgr.is_available():
+ found += 1
+ packages[pip] = pip_mgr.get_packages()
+ except Exception as e:
+ module.warn('Failed to retrieve packages with %s: %s' % (pip, to_text(e)))
+ continue
+
+ if found == 0:
+ module.fail_json(msg='Unable to use any of the supplied pip clients: %s' % clients)
+
+ # return info
+ results['packages'] = packages
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/pkg5.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pkg5.py
new file mode 100644
index 00000000..266c073f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pkg5.py
@@ -0,0 +1,178 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Peter Oliver <ansible@mavit.org.uk>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: pkg5
+author:
+- Peter Oliver (@mavit)
+short_description: Manages packages with the Solaris 11 Image Packaging System
+description:
+ - IPS packages are the native packages in Solaris 11 and higher.
+notes:
+ - The naming of IPS packages is explained at U(http://www.oracle.com/technetwork/articles/servers-storage-admin/ips-package-versioning-2232906.html).
+options:
+ name:
+ description:
+ - An FRMI of the package(s) to be installed/removed/updated.
+ - Multiple packages may be specified, separated by C(,).
+ required: true
+ type: list
+ elements: str
+ state:
+ description:
+ - Whether to install (I(present), I(latest)), or remove (I(absent)) a package.
+ choices: [ absent, latest, present, installed, removed, uninstalled ]
+ default: present
+ type: str
+ accept_licenses:
+ description:
+ - Accept any licences.
+ type: bool
+ default: no
+ aliases: [ accept, accept_licences ]
+ be_name:
+ description:
+ - Creates a new boot environment with the given name.
+ type: str
+ refresh:
+ description:
+ - Refresh publishers before execution.
+ type: bool
+ default: yes
+'''
+EXAMPLES = '''
+- name: Install Vim
+ community.general.pkg5:
+ name: editor/vim
+
+- name: Install Vim without refreshing publishers
+ community.general.pkg5:
+ name: editor/vim
+ refresh: no
+
+- name: Remove finger daemon
+ community.general.pkg5:
+ name: service/network/finger
+ state: absent
+
+- name: Install several packages at once
+ community.general.pkg5:
+ name:
+ - /file/gnu-findutils
+ - /text/gnu-grep
+'''
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='list', elements='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'installed', 'latest', 'present', 'removed', 'uninstalled']),
+ accept_licenses=dict(type='bool', default=False, aliases=['accept', 'accept_licences']),
+ be_name=dict(type='str'),
+ refresh=dict(type='bool', default=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ params = module.params
+ packages = []
+
+ # pkg(5) FRMIs include a comma before the release number, but
+ # AnsibleModule will have split this into multiple items for us.
+ # Try to spot where this has happened and fix it.
+ for fragment in params['name']:
+ if re.search(r'^\d+(?:\.\d+)*', fragment) and packages and re.search(r'@[^,]*$', packages[-1]):
+ packages[-1] += ',' + fragment
+ else:
+ packages.append(fragment)
+
+ if params['state'] in ['present', 'installed']:
+ ensure(module, 'present', packages, params)
+ elif params['state'] in ['latest']:
+ ensure(module, 'latest', packages, params)
+ elif params['state'] in ['absent', 'uninstalled', 'removed']:
+ ensure(module, 'absent', packages, params)
+
+
+def ensure(module, state, packages, params):
+ response = {
+ 'results': [],
+ 'msg': '',
+ }
+ behaviour = {
+ 'present': {
+ 'filter': lambda p: not is_installed(module, p),
+ 'subcommand': 'install',
+ },
+ 'latest': {
+ 'filter': lambda p: (
+ not is_installed(module, p) or not is_latest(module, p)
+ ),
+ 'subcommand': 'install',
+ },
+ 'absent': {
+ 'filter': lambda p: is_installed(module, p),
+ 'subcommand': 'uninstall',
+ },
+ }
+
+ if module.check_mode:
+ dry_run = ['-n']
+ else:
+ dry_run = []
+
+ if params['accept_licenses']:
+ accept_licenses = ['--accept']
+ else:
+ accept_licenses = []
+
+ if params['be_name']:
+ beadm = ['--be-name=' + module.params['be_name']]
+ else:
+ beadm = []
+
+ if params['refresh']:
+ no_refresh = []
+ else:
+ no_refresh = ['--no-refresh']
+
+ to_modify = list(filter(behaviour[state]['filter'], packages))
+ if to_modify:
+ rc, out, err = module.run_command(['pkg', behaviour[state]['subcommand']] + dry_run + accept_licenses + beadm + no_refresh + ['-q', '--'] + to_modify)
+ response['rc'] = rc
+ response['results'].append(out)
+ response['msg'] += err
+ response['changed'] = True
+ if rc == 4:
+ response['changed'] = False
+ response['failed'] = False
+ elif rc != 0:
+ module.fail_json(**response)
+
+ module.exit_json(**response)
+
+
+def is_installed(module, package):
+ rc, out, err = module.run_command(['pkg', 'list', '--', package])
+ return not bool(int(rc))
+
+
+def is_latest(module, package):
+ rc, out, err = module.run_command(['pkg', 'list', '-u', '--', package])
+ return bool(int(rc))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/pkg5_publisher.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pkg5_publisher.py
new file mode 100644
index 00000000..95d57765
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pkg5_publisher.py
@@ -0,0 +1,202 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014 Peter Oliver <ansible@mavit.org.uk>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: pkg5_publisher
+author: "Peter Oliver (@mavit)"
+short_description: Manages Solaris 11 Image Packaging System publishers
+description:
+ - IPS packages are the native packages in Solaris 11 and higher.
+ - This modules will configure which publishers a client will download IPS
+ packages from.
+options:
+ name:
+ description:
+ - The publisher's name.
+ required: true
+ aliases: [ publisher ]
+ type: str
+ state:
+ description:
+ - Whether to ensure that a publisher is present or absent.
+ default: present
+ choices: [ present, absent ]
+ type: str
+ sticky:
+ description:
+ - Packages installed from a sticky repository can only receive updates
+ from that repository.
+ type: bool
+ enabled:
+ description:
+ - Is the repository enabled or disabled?
+ type: bool
+ origin:
+ description:
+ - A path or URL to the repository.
+ - Multiple values may be provided.
+ type: list
+ elements: str
+ mirror:
+ description:
+ - A path or URL to the repository mirror.
+ - Multiple values may be provided.
+ type: list
+ elements: str
+'''
+EXAMPLES = '''
+- name: Fetch packages for the solaris publisher direct from Oracle
+ community.general.pkg5_publisher:
+ name: solaris
+ sticky: true
+ origin: https://pkg.oracle.com/solaris/support/
+
+- name: Configure a publisher for locally-produced packages
+ community.general.pkg5_publisher:
+ name: site
+ origin: 'https://pkg.example.com/site/'
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, aliases=['publisher']),
+ state=dict(default='present', choices=['present', 'absent']),
+ sticky=dict(type='bool'),
+ enabled=dict(type='bool'),
+ # search_after=dict(),
+ # search_before=dict(),
+ origin=dict(type='list', elements='str'),
+ mirror=dict(type='list', elements='str'),
+ )
+ )
+
+ for option in ['origin', 'mirror']:
+ if module.params[option] == ['']:
+ module.params[option] = []
+
+ if module.params['state'] == 'present':
+ modify_publisher(module, module.params)
+ else:
+ unset_publisher(module, module.params['name'])
+
+
+def modify_publisher(module, params):
+ name = params['name']
+ existing = get_publishers(module)
+
+ if name in existing:
+ for option in ['origin', 'mirror', 'sticky', 'enabled']:
+ if params[option] is not None:
+ if params[option] != existing[name][option]:
+ return set_publisher(module, params)
+ else:
+ return set_publisher(module, params)
+
+ module.exit_json()
+
+
+def set_publisher(module, params):
+ name = params['name']
+ args = []
+
+ if params['origin'] is not None:
+ args.append('--remove-origin=*')
+ args.extend(['--add-origin=' + u for u in params['origin']])
+ if params['mirror'] is not None:
+ args.append('--remove-mirror=*')
+ args.extend(['--add-mirror=' + u for u in params['mirror']])
+
+ if params['sticky'] is not None and params['sticky']:
+ args.append('--sticky')
+ elif params['sticky'] is not None:
+ args.append('--non-sticky')
+
+ if params['enabled'] is not None and params['enabled']:
+ args.append('--enable')
+ elif params['enabled'] is not None:
+ args.append('--disable')
+
+ rc, out, err = module.run_command(
+ ["pkg", "set-publisher"] + args + [name],
+ check_rc=True
+ )
+ response = {
+ 'rc': rc,
+ 'results': [out],
+ 'msg': err,
+ 'changed': True,
+ }
+ if rc != 0:
+ module.fail_json(**response)
+ module.exit_json(**response)
+
+
+def unset_publisher(module, publisher):
+ if publisher not in get_publishers(module):
+ module.exit_json()
+
+ rc, out, err = module.run_command(
+ ["pkg", "unset-publisher", publisher],
+ check_rc=True
+ )
+ response = {
+ 'rc': rc,
+ 'results': [out],
+ 'msg': err,
+ 'changed': True,
+ }
+ if rc != 0:
+ module.fail_json(**response)
+ module.exit_json(**response)
+
+
+def get_publishers(module):
+ rc, out, err = module.run_command(["pkg", "publisher", "-Ftsv"], True)
+
+ lines = out.splitlines()
+ keys = lines.pop(0).lower().split("\t")
+
+ publishers = {}
+ for line in lines:
+ values = dict(zip(keys, map(unstringify, line.split("\t"))))
+ name = values['publisher']
+
+ if name not in publishers:
+ publishers[name] = dict(
+ (k, values[k]) for k in ['sticky', 'enabled']
+ )
+ publishers[name]['origin'] = []
+ publishers[name]['mirror'] = []
+
+ if values['type'] is not None:
+ publishers[name][values['type']].append(values['uri'])
+
+ return publishers
+
+
+def unstringify(val):
+ if val == "-" or val == '':
+ return None
+ elif val == "true":
+ return True
+ elif val == "false":
+ return False
+ else:
+ return val
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/pkgin.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pkgin.py
new file mode 100644
index 00000000..2937314f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pkgin.py
@@ -0,0 +1,388 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2013 Shaun Zinck <shaun.zinck at gmail.com>
+# Copyright (c) 2015 Lawrence Leonard Gilbert <larry@L2G.to>
+# Copyright (c) 2016 Jasper Lievisse Adriaanse <j at jasper.la>
+#
+# Written by Shaun Zinck
+# Based on pacman module written by Afterburn <http://github.com/afterburn>
+# that was based on apt module written by Matthew Williams <matthew@flowroute.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: pkgin
+short_description: Package manager for SmartOS, NetBSD, et al.
+description:
+ - "The standard package manager for SmartOS, but also usable on NetBSD
+ or any OS that uses C(pkgsrc). (Home: U(http://pkgin.net/))"
+author:
+ - "Larry Gilbert (@L2G)"
+ - "Shaun Zinck (@szinck)"
+ - "Jasper Lievisse Adriaanse (@jasperla)"
+notes:
+ - "Known bug with pkgin < 0.8.0: if a package is removed and another
+ package depends on it, the other package will be silently removed as
+ well. New to Ansible 1.9: check-mode support."
+options:
+ name:
+ description:
+ - Name of package to install/remove;
+ - multiple names may be given, separated by commas
+ aliases: [pkg]
+ type: list
+ elements: str
+ state:
+ description:
+ - Intended state of the package
+ choices: [ 'present', 'absent' ]
+ default: present
+ type: str
+ update_cache:
+ description:
+ - Update repository database. Can be run with other steps or on it's own.
+ type: bool
+ default: no
+ upgrade:
+ description:
+ - Upgrade main packages to their newer versions
+ type: bool
+ default: no
+ full_upgrade:
+ description:
+ - Upgrade all packages to their newer versions
+ type: bool
+ default: no
+ clean:
+ description:
+ - Clean packages cache
+ type: bool
+ default: no
+ force:
+ description:
+ - Force package reinstall
+ type: bool
+ default: no
+'''
+
+EXAMPLES = '''
+- name: Install package foo
+ community.general.pkgin:
+ name: foo
+ state: present
+
+- name: Install specific version of foo package
+ community.general.pkgin:
+ name: foo-2.0.1
+ state: present
+
+- name: Update cache and install foo package
+ community.general.pkgin:
+ name: foo
+ update_cache: yes
+
+- name: Remove package foo
+ community.general.pkgin:
+ name: foo
+ state: absent
+
+- name: Remove packages foo and bar
+ community.general.pkgin:
+ name: foo,bar
+ state: absent
+
+- name: Update repositories as a separate step
+ community.general.pkgin:
+ update_cache: yes
+
+- name: Upgrade main packages (equivalent to pkgin upgrade)
+ community.general.pkgin:
+ upgrade: yes
+
+- name: Upgrade all packages (equivalent to pkgin full-upgrade)
+ community.general.pkgin:
+ full_upgrade: yes
+
+- name: Force-upgrade all packages (equivalent to pkgin -F full-upgrade)
+ community.general.pkgin:
+ full_upgrade: yes
+ force: yes
+
+- name: Clean packages cache (equivalent to pkgin clean)
+ community.general.pkgin:
+ clean: yes
+'''
+
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class PackageState(object):
+ PRESENT = 1
+ NOT_INSTALLED = 2
+ OUTDATED = 4
+ NOT_FOUND = 8
+
+
+def query_package(module, name):
+ """Search for the package by name and return state of the package.
+ """
+
+ # test whether '-p' (parsable) flag is supported.
+ rc, out, err = module.run_command("%s -p -v" % PKGIN_PATH)
+
+ if rc == 0:
+ pflag = '-p'
+ splitchar = ';'
+ else:
+ pflag = ''
+ splitchar = ' '
+
+ # Use "pkgin search" to find the package. The regular expression will
+ # only match on the complete name.
+ rc, out, err = module.run_command("%s %s search \"^%s$\"" % (PKGIN_PATH, pflag, name))
+
+ # rc will not be 0 unless the search was a success
+ if rc == 0:
+
+ # Search results may contain more than one line (e.g., 'emacs'), so iterate
+ # through each line to see if we have a match.
+ packages = out.split('\n')
+
+ for package in packages:
+
+ # Break up line at spaces. The first part will be the package with its
+ # version (e.g. 'gcc47-libs-4.7.2nb4'), and the second will be the state
+ # of the package:
+ # '' - not installed
+ # '<' - installed but out of date
+ # '=' - installed and up to date
+ # '>' - installed but newer than the repository version
+ pkgname_with_version, raw_state = package.split(splitchar)[0:2]
+
+ # Search for package, stripping version
+ # (results in sth like 'gcc47-libs' or 'emacs24-nox11')
+ pkg_search_obj = re.search(r'^(.*?)\-[0-9][0-9.]*(nb[0-9]+)*', pkgname_with_version, re.M)
+
+ # Do not proceed unless we have a match
+ if not pkg_search_obj:
+ continue
+
+ # Grab matched string
+ pkgname_without_version = pkg_search_obj.group(1)
+
+ if name not in (pkgname_with_version, pkgname_without_version):
+ continue
+
+ # The package was found; now return its state
+ if raw_state == '<':
+ return PackageState.OUTDATED
+ elif raw_state == '=' or raw_state == '>':
+ return PackageState.PRESENT
+ else:
+ # Package found but not installed
+ return PackageState.NOT_INSTALLED
+ # no fall-through
+
+ # No packages were matched
+ return PackageState.NOT_FOUND
+
+ # Search failed
+ return PackageState.NOT_FOUND
+
+
+def format_action_message(module, action, count):
+ vars = {"actioned": action,
+ "count": count}
+
+ if module.check_mode:
+ message = "would have %(actioned)s %(count)d package" % vars
+ else:
+ message = "%(actioned)s %(count)d package" % vars
+
+ if count == 1:
+ return message
+ else:
+ return message + "s"
+
+
+def format_pkgin_command(module, command, package=None):
+ # Not all commands take a package argument, so cover this up by passing
+ # an empty string. Some commands (e.g. 'update') will ignore extra
+ # arguments, however this behaviour cannot be relied on for others.
+ if package is None:
+ package = ""
+
+ if module.params["force"]:
+ force = "-F"
+ else:
+ force = ""
+
+ vars = {"pkgin": PKGIN_PATH,
+ "command": command,
+ "package": package,
+ "force": force}
+
+ if module.check_mode:
+ return "%(pkgin)s -n %(command)s %(package)s" % vars
+ else:
+ return "%(pkgin)s -y %(force)s %(command)s %(package)s" % vars
+
+
+def remove_packages(module, packages):
+
+ remove_c = 0
+
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ if query_package(module, package) in [PackageState.NOT_INSTALLED, PackageState.NOT_FOUND]:
+ continue
+
+ rc, out, err = module.run_command(
+ format_pkgin_command(module, "remove", package))
+
+ if not module.check_mode and query_package(module, package) in [PackageState.PRESENT, PackageState.OUTDATED]:
+ module.fail_json(msg="failed to remove %s: %s" % (package, out))
+
+ remove_c += 1
+
+ if remove_c > 0:
+ module.exit_json(changed=True, msg=format_action_message(module, "removed", remove_c))
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, packages):
+
+ install_c = 0
+
+ for package in packages:
+ query_result = query_package(module, package)
+ if query_result in [PackageState.PRESENT, PackageState.OUTDATED]:
+ continue
+ elif query_result is PackageState.NOT_FOUND:
+ module.fail_json(msg="failed to find package %s for installation" % package)
+
+ rc, out, err = module.run_command(
+ format_pkgin_command(module, "install", package))
+
+ if not module.check_mode and not query_package(module, package) in [PackageState.PRESENT, PackageState.OUTDATED]:
+ module.fail_json(msg="failed to install %s: %s" % (package, out))
+
+ install_c += 1
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg=format_action_message(module, "installed", install_c))
+
+ module.exit_json(changed=False, msg="package(s) already present")
+
+
+def update_package_db(module):
+ rc, out, err = module.run_command(
+ format_pkgin_command(module, "update"))
+
+ if rc == 0:
+ if re.search('database for.*is up-to-date\n$', out):
+ return False, "database is up-to-date"
+ else:
+ return True, "updated repository database"
+ else:
+ module.fail_json(msg="could not update package db")
+
+
+def do_upgrade_packages(module, full=False):
+ if full:
+ cmd = "full-upgrade"
+ else:
+ cmd = "upgrade"
+
+ rc, out, err = module.run_command(
+ format_pkgin_command(module, cmd))
+
+ if rc == 0:
+ if re.search('^nothing to do.\n$', out):
+ module.exit_json(changed=False, msg="nothing left to upgrade")
+ else:
+ module.fail_json(msg="could not %s packages" % cmd)
+
+
+def upgrade_packages(module):
+ do_upgrade_packages(module)
+
+
+def full_upgrade_packages(module):
+ do_upgrade_packages(module, True)
+
+
+def clean_cache(module):
+ rc, out, err = module.run_command(
+ format_pkgin_command(module, "clean"))
+
+ if rc == 0:
+ # There's no indication if 'clean' actually removed anything,
+ # so assume it did.
+ module.exit_json(changed=True, msg="cleaned caches")
+ else:
+ module.fail_json(msg="could not clean package cache")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default="present", choices=["present", "absent"]),
+ name=dict(aliases=["pkg"], type='list', elements='str'),
+ update_cache=dict(default=False, type='bool'),
+ upgrade=dict(default=False, type='bool'),
+ full_upgrade=dict(default=False, type='bool'),
+ clean=dict(default=False, type='bool'),
+ force=dict(default=False, type='bool')),
+ required_one_of=[['name', 'update_cache', 'upgrade', 'full_upgrade', 'clean']],
+ supports_check_mode=True)
+
+ global PKGIN_PATH
+ PKGIN_PATH = module.get_bin_path('pkgin', True, ['/opt/local/bin'])
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ p = module.params
+
+ if p["update_cache"]:
+ c, msg = update_package_db(module)
+ if not (p['name'] or p["upgrade"] or p["full_upgrade"]):
+ module.exit_json(changed=c, msg=msg)
+
+ if p["upgrade"]:
+ upgrade_packages(module)
+ if not p['name']:
+ module.exit_json(changed=True, msg='upgraded packages')
+
+ if p["full_upgrade"]:
+ full_upgrade_packages(module)
+ if not p['name']:
+ module.exit_json(changed=True, msg='upgraded all packages')
+
+ if p["clean"]:
+ clean_cache(module)
+ if not p['name']:
+ module.exit_json(changed=True, msg='cleaned caches')
+
+ pkgs = p["name"]
+
+ if p["state"] == "present":
+ install_packages(module, pkgs)
+
+ elif p["state"] == "absent":
+ remove_packages(module, pkgs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/pkgng.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pkgng.py
new file mode 100644
index 00000000..d5ed4a0c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pkgng.py
@@ -0,0 +1,485 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, bleader
+# Written by bleader <bleader@ratonland.org>
+# Based on pkgin module written by Shaun Zinck <shaun.zinck at gmail.com>
+# that was based on pacman module written by Afterburn <https://github.com/afterburn>
+# that was based on apt module written by Matthew Williams <matthew@flowroute.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: pkgng
+short_description: Package manager for FreeBSD >= 9.0
+description:
+ - Manage binary packages for FreeBSD using 'pkgng' which is available in versions after 9.0.
+options:
+ name:
+ description:
+ - Name or list of names of packages to install/remove.
+ - "With I(name=*), I(state: latest) will operate, but I(state: present) and I(state: absent) will be noops."
+ - >
+ Warning: In Ansible 2.9 and earlier this module had a misfeature
+ where I(name=*) with I(state: latest) or I(state: present) would
+ install every package from every package repository, filling up
+ the machines disk. Avoid using them unless you are certain that
+ your role will only be used with newer versions.
+ required: true
+ aliases: [pkg]
+ type: list
+ elements: str
+ state:
+ description:
+ - State of the package.
+ - 'Note: "latest" added in 2.7'
+ choices: [ 'present', 'latest', 'absent' ]
+ required: false
+ default: present
+ type: str
+ cached:
+ description:
+ - Use local package base instead of fetching an updated one.
+ type: bool
+ required: false
+ default: no
+ annotation:
+ description:
+ - A comma-separated list of keyvalue-pairs of the form
+ C(<+/-/:><key>[=<value>]). A C(+) denotes adding an annotation, a
+ C(-) denotes removing an annotation, and C(:) denotes modifying an
+ annotation.
+ If setting or modifying annotations, a value must be provided.
+ required: false
+ type: str
+ pkgsite:
+ description:
+ - For pkgng versions before 1.1.4, specify packagesite to use
+ for downloading packages. If not specified, use settings from
+ C(/usr/local/etc/pkg.conf).
+ - For newer pkgng versions, specify a the name of a repository
+ configured in C(/usr/local/etc/pkg/repos).
+ required: false
+ type: str
+ rootdir:
+ description:
+ - For pkgng versions 1.5 and later, pkg will install all packages
+ within the specified root directory.
+ - Can not be used together with I(chroot) or I(jail) options.
+ required: false
+ type: path
+ chroot:
+ description:
+ - Pkg will chroot in the specified environment.
+ - Can not be used together with I(rootdir) or I(jail) options.
+ required: false
+ type: path
+ jail:
+ description:
+ - Pkg will execute in the given jail name or id.
+ - Can not be used together with I(chroot) or I(rootdir) options.
+ type: str
+ autoremove:
+ description:
+ - Remove automatically installed packages which are no longer needed.
+ required: false
+ type: bool
+ default: no
+ ignore_osver:
+ description:
+ - Ignore FreeBSD OS version check, useful on -STABLE and -CURRENT branches.
+ - Defines the C(IGNORE_OSVERSION) environment variable.
+ required: false
+ type: bool
+ default: no
+ version_added: 1.3.0
+author: "bleader (@bleader)"
+notes:
+ - When using pkgsite, be careful that already in cache packages won't be downloaded again.
+ - When used with a `loop:` each package will be processed individually,
+ it is much more efficient to pass the list directly to the `name` option.
+'''
+
+EXAMPLES = '''
+- name: Install package foo
+ community.general.pkgng:
+ name: foo
+ state: present
+
+- name: Annotate package foo and bar
+ community.general.pkgng:
+ name: foo,bar
+ annotation: '+test1=baz,-test2,:test3=foobar'
+
+- name: Remove packages foo and bar
+ community.general.pkgng:
+ name: foo,bar
+ state: absent
+
+# "latest" support added in 2.7
+- name: Upgrade package baz
+ community.general.pkgng:
+ name: baz
+ state: latest
+
+- name: Upgrade all installed packages (see warning for the name option first!)
+ community.general.pkgng:
+ name: "*"
+ state: latest
+'''
+
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+
+
+def query_package(module, pkgng_path, name, dir_arg):
+
+ rc, out, err = module.run_command("%s %s info -g -e %s" % (pkgng_path, dir_arg, name))
+
+ if rc == 0:
+ return True
+
+ return False
+
+
+def query_update(module, pkgng_path, name, dir_arg, old_pkgng, pkgsite):
+
+ # Check to see if a package upgrade is available.
+ # rc = 0, no updates available or package not installed
+ # rc = 1, updates available
+ if old_pkgng:
+ rc, out, err = module.run_command("%s %s upgrade -g -n %s" % (pkgsite, pkgng_path, name))
+ else:
+ rc, out, err = module.run_command("%s %s upgrade %s -g -n %s" % (pkgng_path, dir_arg, pkgsite, name))
+
+ if rc == 1:
+ return True
+
+ return False
+
+
+def pkgng_older_than(module, pkgng_path, compare_version):
+
+ rc, out, err = module.run_command("%s -v" % pkgng_path)
+ version = [int(x) for x in re.split(r'[\._]', out)]
+
+ i = 0
+ new_pkgng = True
+ while compare_version[i] == version[i]:
+ i += 1
+ if i == min(len(compare_version), len(version)):
+ break
+ else:
+ if compare_version[i] > version[i]:
+ new_pkgng = False
+ return not new_pkgng
+
+
+def upgrade_packages(module, pkgng_path, dir_arg):
+ # Run a 'pkg upgrade', updating all packages.
+ upgraded_c = 0
+
+ cmd = "%s %s upgrade -y" % (pkgng_path, dir_arg)
+ if module.check_mode:
+ cmd += " -n"
+ rc, out, err = module.run_command(cmd)
+
+ match = re.search('^Number of packages to be upgraded: ([0-9]+)', out, re.MULTILINE)
+ if match:
+ upgraded_c = int(match.group(1))
+
+ if upgraded_c > 0:
+ return (True, "updated %s package(s)" % upgraded_c, out, err)
+ return (False, "no packages need upgrades", out, err)
+
+
+def remove_packages(module, pkgng_path, packages, dir_arg):
+ remove_c = 0
+ stdout = ""
+ stderr = ""
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ if not query_package(module, pkgng_path, package, dir_arg):
+ continue
+
+ if not module.check_mode:
+ rc, out, err = module.run_command("%s %s delete -y %s" % (pkgng_path, dir_arg, package))
+ stdout += out
+ stderr += err
+
+ if not module.check_mode and query_package(module, pkgng_path, package, dir_arg):
+ module.fail_json(msg="failed to remove %s: %s" % (package, out), stdout=stdout, stderr=stderr)
+
+ remove_c += 1
+
+ if remove_c > 0:
+ return (True, "removed %s package(s)" % remove_c, stdout, stderr)
+
+ return (False, "package(s) already absent", stdout, stderr)
+
+
+def install_packages(module, pkgng_path, packages, cached, pkgsite, dir_arg, state, ignoreosver):
+ install_c = 0
+ stdout = ""
+ stderr = ""
+
+ # as of pkg-1.1.4, PACKAGESITE is deprecated in favor of repository definitions
+ # in /usr/local/etc/pkg/repos
+ old_pkgng = pkgng_older_than(module, pkgng_path, [1, 1, 4])
+ if pkgsite != "":
+ if old_pkgng:
+ pkgsite = "PACKAGESITE=%s" % (pkgsite)
+ else:
+ pkgsite = "-r %s" % (pkgsite)
+
+ # This environment variable skips mid-install prompts,
+ # setting them to their default values.
+ batch_var = 'env BATCH=yes'
+
+ if ignoreosver:
+ # Ignore FreeBSD OS version check,
+ # useful on -STABLE and -CURRENT branches.
+ batch_var = batch_var + ' IGNORE_OSVERSION=yes'
+
+ if not module.check_mode and not cached:
+ if old_pkgng:
+ rc, out, err = module.run_command("%s %s update" % (pkgsite, pkgng_path))
+ else:
+ rc, out, err = module.run_command("%s %s %s update" % (batch_var, pkgng_path, dir_arg))
+ stdout += out
+ stderr += err
+ if rc != 0:
+ module.fail_json(msg="Could not update catalogue [%d]: %s %s" % (rc, out, err), stdout=stdout, stderr=stderr)
+
+ for package in packages:
+ already_installed = query_package(module, pkgng_path, package, dir_arg)
+ if already_installed and state == "present":
+ continue
+
+ update_available = query_update(module, pkgng_path, package, dir_arg, old_pkgng, pkgsite)
+ if not update_available and already_installed and state == "latest":
+ continue
+
+ if not module.check_mode:
+ if already_installed:
+ action = "upgrade"
+ else:
+ action = "install"
+ if old_pkgng:
+ rc, out, err = module.run_command("%s %s %s %s -g -U -y %s" % (batch_var, pkgsite, pkgng_path, action, package))
+ else:
+ rc, out, err = module.run_command("%s %s %s %s %s -g -U -y %s" % (batch_var, pkgng_path, dir_arg, action, pkgsite, package))
+ stdout += out
+ stderr += err
+
+ if not module.check_mode and not query_package(module, pkgng_path, package, dir_arg):
+ module.fail_json(msg="failed to %s %s: %s" % (action, package, out), stdout=stdout, stderr=stderr)
+
+ install_c += 1
+
+ if install_c > 0:
+ return (True, "added %s package(s)" % (install_c), stdout, stderr)
+
+ return (False, "package(s) already %s" % (state), stdout, stderr)
+
+
+def annotation_query(module, pkgng_path, package, tag, dir_arg):
+ rc, out, err = module.run_command("%s %s info -g -A %s" % (pkgng_path, dir_arg, package))
+ match = re.search(r'^\s*(?P<tag>%s)\s*:\s*(?P<value>\w+)' % tag, out, flags=re.MULTILINE)
+ if match:
+ return match.group('value')
+ return False
+
+
+def annotation_add(module, pkgng_path, package, tag, value, dir_arg):
+ _value = annotation_query(module, pkgng_path, package, tag, dir_arg)
+ if not _value:
+ # Annotation does not exist, add it.
+ rc, out, err = module.run_command('%s %s annotate -y -A %s %s "%s"'
+ % (pkgng_path, dir_arg, package, tag, value))
+ if rc != 0:
+ module.fail_json(msg="could not annotate %s: %s"
+ % (package, out), stderr=err)
+ return True
+ elif _value != value:
+ # Annotation exists, but value differs
+ module.fail_json(
+ mgs="failed to annotate %s, because %s is already set to %s, but should be set to %s"
+ % (package, tag, _value, value))
+ return False
+ else:
+ # Annotation exists, nothing to do
+ return False
+
+
+def annotation_delete(module, pkgng_path, package, tag, value, dir_arg):
+ _value = annotation_query(module, pkgng_path, package, tag, dir_arg)
+ if _value:
+ rc, out, err = module.run_command('%s %s annotate -y -D %s %s'
+ % (pkgng_path, dir_arg, package, tag))
+ if rc != 0:
+ module.fail_json(msg="could not delete annotation to %s: %s"
+ % (package, out), stderr=err)
+ return True
+ return False
+
+
+def annotation_modify(module, pkgng_path, package, tag, value, dir_arg):
+ _value = annotation_query(module, pkgng_path, package, tag, dir_arg)
+ if not value:
+ # No such tag
+ module.fail_json(msg="could not change annotation to %s: tag %s does not exist"
+ % (package, tag))
+ elif _value == value:
+ # No change in value
+ return False
+ else:
+ rc, out, err = module.run_command('%s %s annotate -y -M %s %s "%s"'
+ % (pkgng_path, dir_arg, package, tag, value))
+ if rc != 0:
+ module.fail_json(msg="could not change annotation annotation to %s: %s"
+ % (package, out), stderr=err)
+ return True
+
+
+def annotate_packages(module, pkgng_path, packages, annotation, dir_arg):
+ annotate_c = 0
+ annotations = map(lambda _annotation:
+ re.match(r'(?P<operation>[\+-:])(?P<tag>\w+)(=(?P<value>\w+))?',
+ _annotation).groupdict(),
+ re.split(r',', annotation))
+
+ operation = {
+ '+': annotation_add,
+ '-': annotation_delete,
+ ':': annotation_modify
+ }
+
+ for package in packages:
+ for _annotation in annotations:
+ if operation[_annotation['operation']](module, pkgng_path, package, _annotation['tag'], _annotation['value']):
+ annotate_c += 1
+
+ if annotate_c > 0:
+ return (True, "added %s annotations." % annotate_c)
+ return (False, "changed no annotations")
+
+
+def autoremove_packages(module, pkgng_path, dir_arg):
+ stdout = ""
+ stderr = ""
+ rc, out, err = module.run_command("%s %s autoremove -n" % (pkgng_path, dir_arg))
+
+ autoremove_c = 0
+
+ match = re.search('^Deinstallation has been requested for the following ([0-9]+) packages', out, re.MULTILINE)
+ if match:
+ autoremove_c = int(match.group(1))
+
+ if autoremove_c == 0:
+ return (False, "no package(s) to autoremove", stdout, stderr)
+
+ if not module.check_mode:
+ rc, out, err = module.run_command("%s %s autoremove -y" % (pkgng_path, dir_arg))
+ stdout += out
+ stderr += err
+
+ return (True, "autoremoved %d package(s)" % (autoremove_c), stdout, stderr)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default="present", choices=["present", "latest", "absent"], required=False),
+ name=dict(aliases=["pkg"], required=True, type='list', elements='str'),
+ cached=dict(default=False, type='bool'),
+ ignore_osver=dict(default=False, required=False, type='bool'),
+ annotation=dict(default="", required=False),
+ pkgsite=dict(default="", required=False),
+ rootdir=dict(default="", required=False, type='path'),
+ chroot=dict(default="", required=False, type='path'),
+ jail=dict(default="", required=False, type='str'),
+ autoremove=dict(default=False, type='bool')),
+ supports_check_mode=True,
+ mutually_exclusive=[["rootdir", "chroot", "jail"]])
+
+ pkgng_path = module.get_bin_path('pkg', True)
+
+ p = module.params
+
+ pkgs = p["name"]
+
+ changed = False
+ msgs = []
+ stdout = ""
+ stderr = ""
+ dir_arg = ""
+
+ if p["rootdir"] != "":
+ old_pkgng = pkgng_older_than(module, pkgng_path, [1, 5, 0])
+ if old_pkgng:
+ module.fail_json(msg="To use option 'rootdir' pkg version must be 1.5 or greater")
+ else:
+ dir_arg = "--rootdir %s" % (p["rootdir"])
+
+ if p["ignore_osver"]:
+ old_pkgng = pkgng_older_than(module, pkgng_path, [1, 11, 0])
+ if old_pkgng:
+ module.fail_json(msg="To use option 'ignore_osver' pkg version must be 1.11 or greater")
+
+ if p["chroot"] != "":
+ dir_arg = '--chroot %s' % (p["chroot"])
+
+ if p["jail"] != "":
+ dir_arg = '--jail %s' % (p["jail"])
+
+ if pkgs == ['*'] and p["state"] == 'latest':
+ # Operate on all installed packages. Only state: latest makes sense here.
+ _changed, _msg, _stdout, _stderr = upgrade_packages(module, pkgng_path, dir_arg)
+ changed = changed or _changed
+ stdout += _stdout
+ stderr += _stderr
+ msgs.append(_msg)
+
+ # Operate on named packages
+ named_packages = [pkg for pkg in pkgs if pkg != '*']
+ if p["state"] in ("present", "latest") and named_packages:
+ _changed, _msg, _out, _err = install_packages(module, pkgng_path, named_packages,
+ p["cached"], p["pkgsite"], dir_arg,
+ p["state"], p["ignore_osver"])
+ stdout += _out
+ stderr += _err
+ changed = changed or _changed
+ msgs.append(_msg)
+
+ elif p["state"] == "absent" and named_packages:
+ _changed, _msg, _out, _err = remove_packages(module, pkgng_path, named_packages, dir_arg)
+ stdout += _out
+ stderr += _err
+ changed = changed or _changed
+ msgs.append(_msg)
+
+ if p["autoremove"]:
+ _changed, _msg, _stdout, _stderr = autoremove_packages(module, pkgng_path, dir_arg)
+ changed = changed or _changed
+ stdout += _stdout
+ stderr += _stderr
+ msgs.append(_msg)
+
+ if p["annotation"]:
+ _changed, _msg = annotate_packages(module, pkgng_path, pkgs, p["annotation"], dir_arg)
+ changed = changed or _changed
+ msgs.append(_msg)
+
+ module.exit_json(changed=changed, msg=", ".join(msgs), stdout=stdout, stderr=stderr)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/pkgutil.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pkgutil.py
new file mode 100644
index 00000000..9ec0ebaa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pkgutil.py
@@ -0,0 +1,293 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Alexander Winkler <mail () winkler-alexander.de>
+# based on svr4pkg by
+# Boyd Adamson <boyd () boydadamson.com> (2012)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: pkgutil
+short_description: OpenCSW package management on Solaris
+description:
+- This module installs, updates and removes packages from the OpenCSW project for Solaris.
+- Unlike the M(community.general.svr4pkg) module, it will resolve and download dependencies.
+- See U(https://www.opencsw.org/) for more information about the project.
+author:
+- Alexander Winkler (@dermute)
+- David Ponessa (@scathatheworm)
+options:
+ name:
+ description:
+ - The name of the package.
+ - When using C(state=latest), this can be C('*'), which updates all installed packages managed by pkgutil.
+ type: list
+ required: true
+ elements: str
+ aliases: [ pkg ]
+ site:
+ description:
+ - The repository path to install the package from.
+ - Its global definition is in C(/etc/opt/csw/pkgutil.conf).
+ required: false
+ type: str
+ state:
+ description:
+ - Whether to install (C(present)/C(installed)), or remove (C(absent)/C(removed)) packages.
+ - The upgrade (C(latest)) operation will update/install the packages to the latest version available.
+ type: str
+ required: true
+ choices: [ absent, installed, latest, present, removed ]
+ update_catalog:
+ description:
+ - If you always want to refresh your catalog from the mirror, even when it's not stale, set this to C(yes).
+ type: bool
+ default: no
+ force:
+ description:
+ - To allow the update process to downgrade packages to match what is present in the repository, set this to C(yes).
+ - This is useful for rolling back to stable from testing, or similar operations.
+ type: bool
+ default: false
+ version_added: 1.2.0
+notes:
+- In order to check the availability of packages, the catalog cache under C(/var/opt/csw/pkgutil) may be refreshed even in check mode.
+'''
+
+EXAMPLES = r'''
+- name: Install a package
+ community.general.pkgutil:
+ name: CSWcommon
+ state: present
+
+- name: Install a package from a specific repository
+ community.general.pkgutil:
+ name: CSWnrpe
+ site: ftp://myinternal.repo/opencsw/kiel
+ state: latest
+
+- name: Remove a package
+ community.general.pkgutil:
+ name: CSWtop
+ state: absent
+
+- name: Install several packages
+ community.general.pkgutil:
+ name:
+ - CSWsudo
+ - CSWtop
+ state: present
+
+- name: Update all packages
+ community.general.pkgutil:
+ name: '*'
+ state: latest
+
+- name: Update all packages and force versions to match latest in catalog
+ community.general.pkgutil:
+ name: '*'
+ state: latest
+ force: yes
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def packages_not_installed(module, names):
+ ''' Check if each package is installed and return list of the ones absent '''
+ pkgs = []
+ for pkg in names:
+ rc, out, err = run_command(module, ['pkginfo', '-q', pkg])
+ if rc != 0:
+ pkgs.append(pkg)
+ return pkgs
+
+
+def packages_installed(module, names):
+ ''' Check if each package is installed and return list of the ones present '''
+ pkgs = []
+ for pkg in names:
+ if not pkg.startswith('CSW'):
+ continue
+ rc, out, err = run_command(module, ['pkginfo', '-q', pkg])
+ if rc == 0:
+ pkgs.append(pkg)
+ return pkgs
+
+
+def packages_not_latest(module, names, site, update_catalog):
+ ''' Check status of each package and return list of the ones with an upgrade available '''
+ cmd = ['pkgutil']
+ if update_catalog:
+ cmd.append('-U')
+ cmd.append('-c')
+ if site is not None:
+ cmd.extend('-t', site)
+ if names != ['*']:
+ cmd.extend(names)
+ rc, out, err = run_command(module, cmd)
+
+ # Find packages in the catalog which are not up to date
+ packages = []
+ for line in out.split('\n')[1:-1]:
+ if 'catalog' not in line and 'SAME' not in line:
+ packages.append(line.split(' ')[0])
+
+ # Remove duplicates
+ return list(set(packages))
+
+
+def run_command(module, cmd, **kwargs):
+ progname = cmd[0]
+ cmd[0] = module.get_bin_path(progname, True, ['/opt/csw/bin'])
+ return module.run_command(cmd, **kwargs)
+
+
+def package_install(module, state, pkgs, site, update_catalog, force):
+ cmd = ['pkgutil']
+ if module.check_mode:
+ cmd.append('-n')
+ cmd.append('-iy')
+ if update_catalog:
+ cmd.append('-U')
+ if site is not None:
+ cmd.extend('-t', site)
+ if force:
+ cmd.append('-f')
+ cmd.extend(pkgs)
+ return run_command(module, cmd)
+
+
+def package_upgrade(module, pkgs, site, update_catalog, force):
+ cmd = ['pkgutil']
+ if module.check_mode:
+ cmd.append('-n')
+ cmd.append('-uy')
+ if update_catalog:
+ cmd.append('-U')
+ if site is not None:
+ cmd.extend('-t', site)
+ if force:
+ cmd.append('-f')
+ cmd += pkgs
+ return run_command(module, cmd)
+
+
+def package_uninstall(module, pkgs):
+ cmd = ['pkgutil']
+ if module.check_mode:
+ cmd.append('-n')
+ cmd.append('-ry')
+ cmd.extend(pkgs)
+ return run_command(module, cmd)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='list', elements='str', required=True, aliases=['pkg']),
+ state=dict(type='str', required=True, choices=['absent', 'installed', 'latest', 'present', 'removed']),
+ site=dict(type='str'),
+ update_catalog=dict(type='bool', default=False),
+ force=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+ name = module.params['name']
+ state = module.params['state']
+ site = module.params['site']
+ update_catalog = module.params['update_catalog']
+ force = module.params['force']
+
+ rc = None
+ out = ''
+ err = ''
+ result = dict(
+ name=name,
+ state=state,
+ )
+
+ if state in ['installed', 'present']:
+ # Fail with an explicit error when trying to "install" '*'
+ if name == ['*']:
+ module.fail_json(msg="Can not use 'state: present' with name: '*'")
+
+ # Build list of packages that are actually not installed from the ones requested
+ pkgs = packages_not_installed(module, name)
+
+ # If the package list is empty then all packages are already present
+ if pkgs == []:
+ module.exit_json(changed=False)
+
+ (rc, out, err) = package_install(module, state, pkgs, site, update_catalog, force)
+ if rc != 0:
+ module.fail_json(msg=(err or out))
+
+ elif state in ['latest']:
+ # When using latest for *
+ if name == ['*']:
+ # Check for packages that are actually outdated
+ pkgs = packages_not_latest(module, name, site, update_catalog)
+
+ # If the package list comes up empty, everything is already up to date
+ if pkgs == []:
+ module.exit_json(changed=False)
+
+ # If there are packages to update, just empty the list and run the command without it
+ # pkgutil logic is to update all when run without packages names
+ pkgs = []
+ (rc, out, err) = package_upgrade(module, pkgs, site, update_catalog, force)
+ if rc != 0:
+ module.fail_json(msg=(err or out))
+ else:
+ # Build list of packages that are either outdated or not installed
+ pkgs = packages_not_installed(module, name)
+ pkgs += packages_not_latest(module, name, site, update_catalog)
+
+ # If the package list is empty that means all packages are installed and up to date
+ if pkgs == []:
+ module.exit_json(changed=False)
+
+ (rc, out, err) = package_upgrade(module, pkgs, site, update_catalog, force)
+ if rc != 0:
+ module.fail_json(msg=(err or out))
+
+ elif state in ['absent', 'removed']:
+ # Build list of packages requested for removal that are actually present
+ pkgs = packages_installed(module, name)
+
+ # If the list is empty, no packages need to be removed
+ if pkgs == []:
+ module.exit_json(changed=False)
+
+ (rc, out, err) = package_uninstall(module, pkgs)
+ if rc != 0:
+ module.fail_json(msg=(err or out))
+
+ if rc is None:
+ # pkgutil was not executed because the package was already present/absent/up to date
+ result['changed'] = False
+ elif rc == 0:
+ result['changed'] = True
+ else:
+ result['changed'] = False
+ result['failed'] = True
+
+ if out:
+ result['stdout'] = out
+ if err:
+ result['stderr'] = err
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/portage.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/portage.py
new file mode 100644
index 00000000..1f0fdc68
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/portage.py
@@ -0,0 +1,539 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, William L Thomson Jr
+# (c) 2013, Yap Sok Ann
+# Written by Yap Sok Ann <sokann@gmail.com>
+# Modified by William L. Thomson Jr. <wlt@o-sinc.com>
+# Based on apt module written by Matthew Williams <matthew@flowroute.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: portage
+short_description: Package manager for Gentoo
+description:
+ - Manages Gentoo packages
+
+options:
+ package:
+ description:
+ - Package atom or set, e.g. C(sys-apps/foo) or C(>foo-2.13) or C(@world)
+ aliases: [name]
+ type: list
+ elements: str
+
+ state:
+ description:
+ - State of the package atom
+ default: "present"
+ choices: [ "present", "installed", "emerged", "absent", "removed", "unmerged", "latest" ]
+ type: str
+
+ update:
+ description:
+ - Update packages to the best version available (--update)
+ type: bool
+ default: no
+
+ deep:
+ description:
+ - Consider the entire dependency tree of packages (--deep)
+ type: bool
+ default: no
+
+ newuse:
+ description:
+ - Include installed packages where USE flags have changed (--newuse)
+ type: bool
+ default: no
+
+ changed_use:
+ description:
+ - Include installed packages where USE flags have changed, except when
+ - flags that the user has not enabled are added or removed
+ - (--changed-use)
+ type: bool
+ default: no
+
+ oneshot:
+ description:
+ - Do not add the packages to the world file (--oneshot)
+ type: bool
+ default: no
+
+ noreplace:
+ description:
+ - Do not re-emerge installed packages (--noreplace)
+ type: bool
+ default: yes
+
+ nodeps:
+ description:
+ - Only merge packages but not their dependencies (--nodeps)
+ type: bool
+ default: no
+
+ onlydeps:
+ description:
+ - Only merge packages' dependencies but not the packages (--onlydeps)
+ type: bool
+ default: no
+
+ depclean:
+ description:
+ - Remove packages not needed by explicitly merged packages (--depclean)
+ - If no package is specified, clean up the world's dependencies
+ - Otherwise, --depclean serves as a dependency aware version of --unmerge
+ type: bool
+ default: no
+
+ quiet:
+ description:
+ - Run emerge in quiet mode (--quiet)
+ type: bool
+ default: no
+
+ verbose:
+ description:
+ - Run emerge in verbose mode (--verbose)
+ type: bool
+ default: no
+
+ sync:
+ description:
+ - Sync package repositories first
+ - If yes, perform "emerge --sync"
+ - If web, perform "emerge-webrsync"
+ choices: [ "web", "yes", "no" ]
+ type: str
+
+ getbinpkgonly:
+ description:
+ - Merge only packages specified at C(PORTAGE_BINHOST) in C(make.conf).
+ type: bool
+ default: no
+ version_added: 1.3.0
+
+ getbinpkg:
+ description:
+ - Prefer packages specified at C(PORTAGE_BINHOST) in C(make.conf).
+ type: bool
+ default: no
+
+ usepkgonly:
+ description:
+ - Merge only binaries (no compiling).
+ type: bool
+ default: no
+
+ usepkg:
+ description:
+ - Tries to use the binary package(s) in the locally available packages directory.
+ type: bool
+ default: no
+
+ keepgoing:
+ description:
+ - Continue as much as possible after an error.
+ type: bool
+ default: no
+
+ jobs:
+ description:
+ - Specifies the number of packages to build simultaneously.
+ - "Since version 2.6: Value of 0 or False resets any previously added"
+ - --jobs setting values
+ type: int
+
+ loadavg:
+ description:
+ - Specifies that no new builds should be started if there are
+ - other builds running and the load average is at least LOAD
+ - "Since version 2.6: Value of 0 or False resets any previously added"
+ - --load-average setting values
+ type: float
+
+ quietbuild:
+ description:
+ - Redirect all build output to logs alone, and do not display it
+ - on stdout (--quiet-build)
+ type: bool
+ default: no
+
+ quietfail:
+ description:
+ - Suppresses display of the build log on stdout (--quiet-fail)
+ - Only the die message and the path of the build log will be
+ - displayed on stdout.
+ type: bool
+ default: no
+
+requirements: [ gentoolkit ]
+author:
+ - "William L Thomson Jr (@wltjr)"
+ - "Yap Sok Ann (@sayap)"
+ - "Andrew Udvare (@Tatsh)"
+'''
+
+EXAMPLES = '''
+- name: Make sure package foo is installed
+ community.general.portage:
+ package: foo
+ state: present
+
+- name: Make sure package foo is not installed
+ community.general.portage:
+ package: foo
+ state: absent
+
+- name: Update package foo to the latest version (os specific alternative to latest)
+ community.general.portage:
+ package: foo
+ update: yes
+
+- name: Install package foo using PORTAGE_BINHOST setup
+ community.general.portage:
+ package: foo
+ getbinpkg: yes
+
+- name: Re-install world from binary packages only and do not allow any compiling
+ community.general.portage:
+ package: '@world'
+ usepkgonly: yes
+
+- name: Sync repositories and update world
+ community.general.portage:
+ package: '@world'
+ update: yes
+ deep: yes
+ sync: yes
+
+- name: Remove unneeded packages
+ community.general.portage:
+ depclean: yes
+
+- name: Remove package foo if it is not explicitly needed
+ community.general.portage:
+ package: foo
+ state: absent
+ depclean: yes
+'''
+
+import os
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def query_package(module, package, action):
+ if package.startswith('@'):
+ return query_set(module, package, action)
+ return query_atom(module, package, action)
+
+
+def query_atom(module, atom, action):
+ cmd = '%s list %s' % (module.equery_path, atom)
+
+ rc, out, err = module.run_command(cmd)
+ return rc == 0
+
+
+def query_set(module, set, action):
+ system_sets = [
+ '@live-rebuild',
+ '@module-rebuild',
+ '@preserved-rebuild',
+ '@security',
+ '@selected',
+ '@system',
+ '@world',
+ '@x11-module-rebuild',
+ ]
+
+ if set in system_sets:
+ if action == 'unmerge':
+ module.fail_json(msg='set %s cannot be removed' % set)
+ return False
+
+ world_sets_path = '/var/lib/portage/world_sets'
+ if not os.path.exists(world_sets_path):
+ return False
+
+ cmd = 'grep %s %s' % (set, world_sets_path)
+
+ rc, out, err = module.run_command(cmd)
+ return rc == 0
+
+
+def sync_repositories(module, webrsync=False):
+ if module.check_mode:
+ module.exit_json(msg='check mode not supported by sync')
+
+ if webrsync:
+ webrsync_path = module.get_bin_path('emerge-webrsync', required=True)
+ cmd = '%s --quiet' % webrsync_path
+ else:
+ cmd = '%s --sync --quiet --ask=n' % module.emerge_path
+
+ rc, out, err = module.run_command(cmd)
+ if rc != 0:
+ module.fail_json(msg='could not sync package repositories')
+
+
+# Note: In the 3 functions below, equery is done one-by-one, but emerge is done
+# in one go. If that is not desirable, split the packages into multiple tasks
+# instead of joining them together with comma.
+
+
+def emerge_packages(module, packages):
+ """Run emerge command against given list of atoms."""
+ p = module.params
+
+ if p['noreplace'] and not (p['update'] or p['state'] == 'latest'):
+ for package in packages:
+ if p['noreplace'] and not query_package(module, package, 'emerge'):
+ break
+ else:
+ module.exit_json(changed=False, msg='Packages already present.')
+ if module.check_mode:
+ module.exit_json(changed=True, msg='Packages would be installed.')
+
+ args = []
+ emerge_flags = {
+ 'update': '--update',
+ 'deep': '--deep',
+ 'newuse': '--newuse',
+ 'changed_use': '--changed-use',
+ 'oneshot': '--oneshot',
+ 'noreplace': '--noreplace',
+ 'nodeps': '--nodeps',
+ 'onlydeps': '--onlydeps',
+ 'quiet': '--quiet',
+ 'verbose': '--verbose',
+ 'getbinpkgonly': '--getbinpkgonly',
+ 'getbinpkg': '--getbinpkg',
+ 'usepkgonly': '--usepkgonly',
+ 'usepkg': '--usepkg',
+ 'keepgoing': '--keep-going',
+ 'quietbuild': '--quiet-build',
+ 'quietfail': '--quiet-fail',
+ }
+ for flag, arg in emerge_flags.items():
+ if p[flag]:
+ args.append(arg)
+
+ if p['state'] and p['state'] == 'latest':
+ args.append("--update")
+
+ emerge_flags = {
+ 'jobs': '--jobs',
+ 'loadavg': '--load-average',
+ }
+
+ for flag, arg in emerge_flags.items():
+ flag_val = p[flag]
+
+ if flag_val is None:
+ """Fallback to default: don't use this argument at all."""
+ continue
+
+ if not flag_val:
+ """If the value is 0 or 0.0: add the flag, but not the value."""
+ args.append(arg)
+ continue
+
+ """Add the --flag=value pair."""
+ args.extend((arg, to_native(flag_val)))
+
+ cmd, (rc, out, err) = run_emerge(module, packages, *args)
+ if rc != 0:
+ module.fail_json(
+ cmd=cmd, rc=rc, stdout=out, stderr=err,
+ msg='Packages not installed.',
+ )
+
+ # Check for SSH error with PORTAGE_BINHOST, since rc is still 0 despite
+ # this error
+ if (p['usepkgonly'] or p['getbinpkg'] or p['getbinpkgonly']) \
+ and 'Permission denied (publickey).' in err:
+ module.fail_json(
+ cmd=cmd, rc=rc, stdout=out, stderr=err,
+ msg='Please check your PORTAGE_BINHOST configuration in make.conf '
+ 'and your SSH authorized_keys file',
+ )
+
+ changed = True
+ for line in out.splitlines():
+ if re.match(r'(?:>+) Emerging (?:binary )?\(1 of', line):
+ msg = 'Packages installed.'
+ break
+ elif module.check_mode and re.match(r'\[(binary|ebuild)', line):
+ msg = 'Packages would be installed.'
+ break
+ else:
+ changed = False
+ msg = 'No packages installed.'
+
+ module.exit_json(
+ changed=changed, cmd=cmd, rc=rc, stdout=out, stderr=err,
+ msg=msg,
+ )
+
+
+def unmerge_packages(module, packages):
+ p = module.params
+
+ for package in packages:
+ if query_package(module, package, 'unmerge'):
+ break
+ else:
+ module.exit_json(changed=False, msg='Packages already absent.')
+
+ args = ['--unmerge']
+
+ for flag in ['quiet', 'verbose']:
+ if p[flag]:
+ args.append('--%s' % flag)
+
+ cmd, (rc, out, err) = run_emerge(module, packages, *args)
+
+ if rc != 0:
+ module.fail_json(
+ cmd=cmd, rc=rc, stdout=out, stderr=err,
+ msg='Packages not removed.',
+ )
+
+ module.exit_json(
+ changed=True, cmd=cmd, rc=rc, stdout=out, stderr=err,
+ msg='Packages removed.',
+ )
+
+
+def cleanup_packages(module, packages):
+ p = module.params
+
+ if packages:
+ for package in packages:
+ if query_package(module, package, 'unmerge'):
+ break
+ else:
+ module.exit_json(changed=False, msg='Packages already absent.')
+
+ args = ['--depclean']
+
+ for flag in ['quiet', 'verbose']:
+ if p[flag]:
+ args.append('--%s' % flag)
+
+ cmd, (rc, out, err) = run_emerge(module, packages, *args)
+ if rc != 0:
+ module.fail_json(cmd=cmd, rc=rc, stdout=out, stderr=err)
+
+ removed = 0
+ for line in out.splitlines():
+ if not line.startswith('Number removed:'):
+ continue
+ parts = line.split(':')
+ removed = int(parts[1].strip())
+ changed = removed > 0
+
+ module.exit_json(
+ changed=changed, cmd=cmd, rc=rc, stdout=out, stderr=err,
+ msg='Depclean completed.',
+ )
+
+
+def run_emerge(module, packages, *args):
+ args = list(args)
+
+ args.append('--ask=n')
+ if module.check_mode:
+ args.append('--pretend')
+
+ cmd = [module.emerge_path] + args + packages
+ return cmd, module.run_command(cmd)
+
+
+portage_present_states = ['present', 'emerged', 'installed', 'latest']
+portage_absent_states = ['absent', 'unmerged', 'removed']
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ package=dict(type='list', elements='str', default=None, aliases=['name']),
+ state=dict(
+ default=portage_present_states[0],
+ choices=portage_present_states + portage_absent_states,
+ ),
+ update=dict(default=False, type='bool'),
+ deep=dict(default=False, type='bool'),
+ newuse=dict(default=False, type='bool'),
+ changed_use=dict(default=False, type='bool'),
+ oneshot=dict(default=False, type='bool'),
+ noreplace=dict(default=True, type='bool'),
+ nodeps=dict(default=False, type='bool'),
+ onlydeps=dict(default=False, type='bool'),
+ depclean=dict(default=False, type='bool'),
+ quiet=dict(default=False, type='bool'),
+ verbose=dict(default=False, type='bool'),
+ sync=dict(default=None, choices=['yes', 'web', 'no']),
+ getbinpkgonly=dict(default=False, type='bool'),
+ getbinpkg=dict(default=False, type='bool'),
+ usepkgonly=dict(default=False, type='bool'),
+ usepkg=dict(default=False, type='bool'),
+ keepgoing=dict(default=False, type='bool'),
+ jobs=dict(default=None, type='int'),
+ loadavg=dict(default=None, type='float'),
+ quietbuild=dict(default=False, type='bool'),
+ quietfail=dict(default=False, type='bool'),
+ ),
+ required_one_of=[['package', 'sync', 'depclean']],
+ mutually_exclusive=[
+ ['nodeps', 'onlydeps'],
+ ['quiet', 'verbose'],
+ ['quietbuild', 'verbose'],
+ ['quietfail', 'verbose'],
+ ],
+ supports_check_mode=True,
+ )
+
+ module.emerge_path = module.get_bin_path('emerge', required=True)
+ module.equery_path = module.get_bin_path('equery', required=True)
+
+ p = module.params
+
+ if p['sync'] and p['sync'].strip() != 'no':
+ sync_repositories(module, webrsync=(p['sync'] == 'web'))
+ if not p['package']:
+ module.exit_json(msg='Sync successfully finished.')
+
+ packages = []
+ if p['package']:
+ packages.extend(p['package'])
+
+ if p['depclean']:
+ if packages and p['state'] not in portage_absent_states:
+ module.fail_json(
+ msg='Depclean can only be used with package when the state is '
+ 'one of: %s' % portage_absent_states,
+ )
+
+ cleanup_packages(module, packages)
+
+ elif p['state'] in portage_present_states:
+ emerge_packages(module, packages)
+
+ elif p['state'] in portage_absent_states:
+ unmerge_packages(module, packages)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/portinstall.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/portinstall.py
new file mode 100644
index 00000000..d1c33cc5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/portinstall.py
@@ -0,0 +1,210 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, berenddeboer
+# Written by berenddeboer <berend@pobox.com>
+# Based on pkgng module written by bleader <bleader at ratonland.org>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: portinstall
+short_description: Installing packages from FreeBSD's ports system
+description:
+ - Manage packages for FreeBSD using 'portinstall'.
+options:
+ name:
+ description:
+ - name of package to install/remove
+ aliases: [pkg]
+ required: true
+ type: str
+ state:
+ description:
+ - state of the package
+ choices: [ 'present', 'absent' ]
+ required: false
+ default: present
+ type: str
+ use_packages:
+ description:
+ - use packages instead of ports whenever available
+ type: bool
+ required: false
+ default: yes
+author: "berenddeboer (@berenddeboer)"
+'''
+
+EXAMPLES = '''
+- name: Install package foo
+ community.general.portinstall:
+ name: foo
+ state: present
+
+- name: Install package security/cyrus-sasl2-saslauthd
+ community.general.portinstall:
+ name: security/cyrus-sasl2-saslauthd
+ state: present
+
+- name: Remove packages foo and bar
+ community.general.portinstall:
+ name: foo,bar
+ state: absent
+'''
+
+import os
+import re
+import sys
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import shlex_quote
+
+
+def query_package(module, name):
+
+ pkg_info_path = module.get_bin_path('pkg_info', False)
+
+ # Assume that if we have pkg_info, we haven't upgraded to pkgng
+ if pkg_info_path:
+ pkgng = False
+ pkg_glob_path = module.get_bin_path('pkg_glob', True)
+ rc, out, err = module.run_command("%s -e `pkg_glob %s`" % (pkg_info_path, shlex_quote(name)), use_unsafe_shell=True)
+ else:
+ pkgng = True
+ pkg_info_path = module.get_bin_path('pkg', True)
+ pkg_info_path = pkg_info_path + " info"
+ rc, out, err = module.run_command("%s %s" % (pkg_info_path, name))
+
+ found = rc == 0
+
+ if not found:
+ # databases/mysql55-client installs as mysql-client, so try solving
+ # that the ugly way. Pity FreeBSD doesn't have a fool proof way of checking
+ # some package is installed
+ name_without_digits = re.sub('[0-9]', '', name)
+ if name != name_without_digits:
+ if pkgng:
+ rc, out, err = module.run_command("%s %s" % (pkg_info_path, name_without_digits))
+ else:
+ rc, out, err = module.run_command("%s %s" % (pkg_info_path, name_without_digits))
+
+ found = rc == 0
+
+ return found
+
+
+def matching_packages(module, name):
+
+ ports_glob_path = module.get_bin_path('ports_glob', True)
+ rc, out, err = module.run_command("%s %s" % (ports_glob_path, name))
+ # counts the number of packages found
+ occurrences = out.count('\n')
+ if occurrences == 0:
+ name_without_digits = re.sub('[0-9]', '', name)
+ if name != name_without_digits:
+ rc, out, err = module.run_command("%s %s" % (ports_glob_path, name_without_digits))
+ occurrences = out.count('\n')
+ return occurrences
+
+
+def remove_packages(module, packages):
+
+ remove_c = 0
+ pkg_glob_path = module.get_bin_path('pkg_glob', True)
+
+ # If pkg_delete not found, we assume pkgng
+ pkg_delete_path = module.get_bin_path('pkg_delete', False)
+ if not pkg_delete_path:
+ pkg_delete_path = module.get_bin_path('pkg', True)
+ pkg_delete_path = pkg_delete_path + " delete -y"
+
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ if not query_package(module, package):
+ continue
+
+ rc, out, err = module.run_command("%s `%s %s`" % (pkg_delete_path, pkg_glob_path, shlex_quote(package)), use_unsafe_shell=True)
+
+ if query_package(module, package):
+ name_without_digits = re.sub('[0-9]', '', package)
+ rc, out, err = module.run_command("%s `%s %s`" % (pkg_delete_path, pkg_glob_path,
+ shlex_quote(name_without_digits)),
+ use_unsafe_shell=True)
+ if query_package(module, package):
+ module.fail_json(msg="failed to remove %s: %s" % (package, out))
+
+ remove_c += 1
+
+ if remove_c > 0:
+
+ module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, packages, use_packages):
+
+ install_c = 0
+
+ # If portinstall not found, automagically install
+ portinstall_path = module.get_bin_path('portinstall', False)
+ if not portinstall_path:
+ pkg_path = module.get_bin_path('pkg', False)
+ if pkg_path:
+ module.run_command("pkg install -y portupgrade")
+ portinstall_path = module.get_bin_path('portinstall', True)
+
+ if use_packages:
+ portinstall_params = "--use-packages"
+ else:
+ portinstall_params = ""
+
+ for package in packages:
+ if query_package(module, package):
+ continue
+
+ # TODO: check how many match
+ matches = matching_packages(module, package)
+ if matches == 1:
+ rc, out, err = module.run_command("%s --batch %s %s" % (portinstall_path, portinstall_params, package))
+ if not query_package(module, package):
+ module.fail_json(msg="failed to install %s: %s" % (package, out))
+ elif matches == 0:
+ module.fail_json(msg="no matches for package %s" % (package))
+ else:
+ module.fail_json(msg="%s matches found for package name %s" % (matches, package))
+
+ install_c += 1
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg="present %s package(s)" % (install_c))
+
+ module.exit_json(changed=False, msg="package(s) already present")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default="present", choices=["present", "absent"]),
+ name=dict(aliases=["pkg"], required=True),
+ use_packages=dict(type='bool', default=True)))
+
+ p = module.params
+
+ pkgs = p["name"].split(",")
+
+ if p["state"] == "present":
+ install_packages(module, pkgs, p["use_packages"])
+
+ elif p["state"] == "absent":
+ remove_packages(module, pkgs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_copy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_copy.py
new file mode 100644
index 00000000..bf66f3d4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_copy.py
@@ -0,0 +1,420 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_copy
+short_description: Copy data between a file/program and a PostgreSQL table
+description:
+- Copy data between a file/program and a PostgreSQL table.
+
+options:
+ copy_to:
+ description:
+ - Copy the contents of a table to a file.
+ - Can also copy the results of a SELECT query.
+ - Mutually exclusive with I(copy_from) and I(dst).
+ type: path
+ aliases: [ to ]
+ copy_from:
+ description:
+ - Copy data from a file to a table (appending the data to whatever is in the table already).
+ - Mutually exclusive with I(copy_to) and I(src).
+ type: path
+ aliases: [ from ]
+ src:
+ description:
+ - Copy data from I(copy_from) to I(src=tablename).
+ - Used with I(copy_to) only.
+ type: str
+ aliases: [ source ]
+ dst:
+ description:
+ - Copy data to I(dst=tablename) from I(copy_from=/path/to/data.file).
+ - Used with I(copy_from) only.
+ type: str
+ aliases: [ destination ]
+ columns:
+ description:
+ - List of column names for the src/dst table to COPY FROM/TO.
+ type: list
+ elements: str
+ aliases: [ column ]
+ program:
+ description:
+ - Mark I(src)/I(dst) as a program. Data will be copied to/from a program.
+ - See block Examples and PROGRAM arg description U(https://www.postgresql.org/docs/current/sql-copy.html).
+ type: bool
+ default: no
+ options:
+ description:
+ - Options of COPY command.
+ - See the full list of available options U(https://www.postgresql.org/docs/current/sql-copy.html).
+ type: dict
+ db:
+ description:
+ - Name of database to connect to.
+ type: str
+ aliases: [ login_db ]
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+notes:
+- Supports PostgreSQL version 9.4+.
+- COPY command is only allowed to database superusers.
+- if I(check_mode=yes), we just check the src/dst table availability
+ and return the COPY query that actually has not been executed.
+- If i(check_mode=yes) and the source has been passed as SQL, the module
+ will execute it and rolled the transaction back but pay attention
+ it can affect database performance (e.g., if SQL collects a lot of data).
+
+seealso:
+- name: COPY command reference
+ description: Complete reference of the COPY command documentation.
+ link: https://www.postgresql.org/docs/current/sql-copy.html
+
+author:
+- Andrew Klychkov (@Andersson007)
+
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Copy text TAB-separated data from file /tmp/data.txt to acme table
+ community.general.postgresql_copy:
+ copy_from: /tmp/data.txt
+ dst: acme
+
+- name: Copy CSV (comma-separated) data from file /tmp/data.csv to columns id, name of table acme
+ community.general.postgresql_copy:
+ copy_from: /tmp/data.csv
+ dst: acme
+ columns: id,name
+ options:
+ format: csv
+
+- name: >
+ Copy text vertical-bar-separated data from file /tmp/data.txt to bar table.
+ The NULL values are specified as N
+ community.general.postgresql_copy:
+ copy_from: /tmp/data.csv
+ dst: bar
+ options:
+ delimiter: '|'
+ null: 'N'
+
+- name: Copy data from acme table to file /tmp/data.txt in text format, TAB-separated
+ community.general.postgresql_copy:
+ src: acme
+ copy_to: /tmp/data.txt
+
+- name: Copy data from SELECT query to/tmp/data.csv in CSV format
+ community.general.postgresql_copy:
+ src: 'SELECT * FROM acme'
+ copy_to: /tmp/data.csv
+ options:
+ format: csv
+
+- name: Copy CSV data from my_table to gzip
+ community.general.postgresql_copy:
+ src: my_table
+ copy_to: 'gzip > /tmp/data.csv.gz'
+ program: yes
+ options:
+ format: csv
+
+- name: >
+ Copy data from columns id, name of table bar to /tmp/data.txt.
+ Output format is text, vertical-bar-separated, NULL as N
+ community.general.postgresql_copy:
+ src: bar
+ columns:
+ - id
+ - name
+ copy_to: /tmp/data.csv
+ options:
+ delimiter: '|'
+ null: 'N'
+'''
+
+RETURN = r'''
+queries:
+ description: List of executed queries.
+ returned: always
+ type: str
+ sample: [ "COPY test_table FROM '/tmp/data_file.txt' (FORMAT csv, DELIMITER ',', NULL 'NULL')" ]
+src:
+ description: Data source.
+ returned: always
+ type: str
+ sample: "mytable"
+dst:
+ description: Data destination.
+ returned: always
+ type: str
+ sample: "/tmp/data.csv"
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+ pg_quote_identifier,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils.six import iteritems
+
+
+class PgCopyData(object):
+
+ """Implements behavior of COPY FROM, COPY TO PostgreSQL command.
+
+ Arguments:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+
+ Attributes:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+ changed (bool) -- something was changed after execution or not
+ executed_queries (list) -- executed queries
+ dst (str) -- data destination table (when copy_from)
+ src (str) -- data source table (when copy_to)
+ opt_need_quotes (tuple) -- values of these options must be passed
+ to SQL in quotes
+ """
+
+ def __init__(self, module, cursor):
+ self.module = module
+ self.cursor = cursor
+ self.executed_queries = []
+ self.changed = False
+ self.dst = ''
+ self.src = ''
+ self.opt_need_quotes = (
+ 'DELIMITER',
+ 'NULL',
+ 'QUOTE',
+ 'ESCAPE',
+ 'ENCODING',
+ )
+
+ def copy_from(self):
+ """Implements COPY FROM command behavior."""
+ self.src = self.module.params['copy_from']
+ self.dst = self.module.params['dst']
+
+ query_fragments = ['COPY %s' % pg_quote_identifier(self.dst, 'table')]
+
+ if self.module.params.get('columns'):
+ query_fragments.append('(%s)' % ','.join(self.module.params['columns']))
+
+ query_fragments.append('FROM')
+
+ if self.module.params.get('program'):
+ query_fragments.append('PROGRAM')
+
+ query_fragments.append("'%s'" % self.src)
+
+ if self.module.params.get('options'):
+ query_fragments.append(self.__transform_options())
+
+ # Note: check mode is implemented here:
+ if self.module.check_mode:
+ self.changed = self.__check_table(self.dst)
+
+ if self.changed:
+ self.executed_queries.append(' '.join(query_fragments))
+ else:
+ if exec_sql(self, ' '.join(query_fragments), return_bool=True):
+ self.changed = True
+
+ def copy_to(self):
+ """Implements COPY TO command behavior."""
+ self.src = self.module.params['src']
+ self.dst = self.module.params['copy_to']
+
+ if 'SELECT ' in self.src.upper():
+ # If src is SQL SELECT statement:
+ query_fragments = ['COPY (%s)' % self.src]
+ else:
+ # If src is a table:
+ query_fragments = ['COPY %s' % pg_quote_identifier(self.src, 'table')]
+
+ if self.module.params.get('columns'):
+ query_fragments.append('(%s)' % ','.join(self.module.params['columns']))
+
+ query_fragments.append('TO')
+
+ if self.module.params.get('program'):
+ query_fragments.append('PROGRAM')
+
+ query_fragments.append("'%s'" % self.dst)
+
+ if self.module.params.get('options'):
+ query_fragments.append(self.__transform_options())
+
+ # Note: check mode is implemented here:
+ if self.module.check_mode:
+ self.changed = self.__check_table(self.src)
+
+ if self.changed:
+ self.executed_queries.append(' '.join(query_fragments))
+ else:
+ if exec_sql(self, ' '.join(query_fragments), return_bool=True):
+ self.changed = True
+
+ def __transform_options(self):
+ """Transform options dict into a suitable string."""
+ for (key, val) in iteritems(self.module.params['options']):
+ if key.upper() in self.opt_need_quotes:
+ self.module.params['options'][key] = "'%s'" % val
+
+ opt = ['%s %s' % (key, val) for (key, val) in iteritems(self.module.params['options'])]
+ return '(%s)' % ', '.join(opt)
+
+ def __check_table(self, table):
+ """Check table or SQL in transaction mode for check_mode.
+
+ Return True if it is OK.
+
+ Arguments:
+ table (str) - Table name that needs to be checked.
+ It can be SQL SELECT statement that was passed
+ instead of the table name.
+ """
+ if 'SELECT ' in table.upper():
+ # In this case table is actually SQL SELECT statement.
+ # If SQL fails, it's handled by exec_sql():
+ exec_sql(self, table, add_to_executed=False)
+ # If exec_sql was passed, it means all is OK:
+ return True
+
+ exec_sql(self, 'SELECT 1 FROM %s' % pg_quote_identifier(table, 'table'),
+ add_to_executed=False)
+ # If SQL was executed successfully:
+ return True
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ copy_to=dict(type='path', aliases=['to']),
+ copy_from=dict(type='path', aliases=['from']),
+ src=dict(type='str', aliases=['source']),
+ dst=dict(type='str', aliases=['destination']),
+ columns=dict(type='list', elements='str', aliases=['column']),
+ options=dict(type='dict'),
+ program=dict(type='bool', default=False),
+ db=dict(type='str', aliases=['login_db']),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['copy_from', 'copy_to'],
+ ['copy_from', 'src'],
+ ['copy_to', 'dst'],
+ ]
+ )
+
+ if not module.params['trust_input']:
+ # Check input for potentially dangerous elements:
+ opt_list = None
+ if module.params['options']:
+ opt_list = ['%s %s' % (key, val) for (key, val) in iteritems(module.params['options'])]
+
+ check_input(module,
+ module.params['copy_to'],
+ module.params['copy_from'],
+ module.params['src'],
+ module.params['dst'],
+ opt_list,
+ module.params['columns'],
+ module.params['session_role'])
+
+ # Note: we don't need to check mutually exclusive params here, because they are
+ # checked automatically by AnsibleModule (mutually_exclusive=[] list above).
+ if module.params.get('copy_from') and not module.params.get('dst'):
+ module.fail_json(msg='dst param is necessary with copy_from')
+
+ elif module.params.get('copy_to') and not module.params.get('src'):
+ module.fail_json(msg='src param is necessary with copy_to')
+
+ # Connect to DB and make cursor object:
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=False)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ ##############
+ # Create the object and do main job:
+ data = PgCopyData(module, cursor)
+
+ # Note: parameters like dst, src, etc. are got
+ # from module object into data object of PgCopyData class.
+ # Therefore not need to pass args to the methods below.
+ # Note: check mode is implemented inside the methods below
+ # by checking passed module.check_mode arg.
+ if module.params.get('copy_to'):
+ data.copy_to()
+
+ elif module.params.get('copy_from'):
+ data.copy_from()
+
+ # Finish:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ cursor.close()
+ db_connection.close()
+
+ # Return some values:
+ module.exit_json(
+ changed=data.changed,
+ queries=data.executed_queries,
+ src=data.src,
+ dst=data.dst,
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_db.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_db.py
new file mode 100644
index 00000000..8fde39ec
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_db.py
@@ -0,0 +1,667 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_db
+short_description: Add or remove PostgreSQL databases from a remote host.
+description:
+ - Add or remove PostgreSQL databases from a remote host.
+options:
+ name:
+ description:
+ - Name of the database to add or remove
+ type: str
+ required: true
+ aliases: [ db ]
+ port:
+ description:
+ - Database port to connect (if needed)
+ type: int
+ default: 5432
+ aliases:
+ - login_port
+ owner:
+ description:
+ - Name of the role to set as owner of the database
+ type: str
+ template:
+ description:
+ - Template used to create the database
+ type: str
+ encoding:
+ description:
+ - Encoding of the database
+ type: str
+ lc_collate:
+ description:
+ - Collation order (LC_COLLATE) to use in the database. Must match collation order of template database unless C(template0) is used as template.
+ type: str
+ lc_ctype:
+ description:
+ - Character classification (LC_CTYPE) to use in the database (e.g. lower, upper, ...) Must match LC_CTYPE of template database unless C(template0)
+ is used as template.
+ type: str
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally.
+ type: str
+ state:
+ description:
+ - The database state.
+ - C(present) implies that the database should be created if necessary.
+ - C(absent) implies that the database should be removed if present.
+ - C(dump) requires a target definition to which the database will be backed up. (Added in Ansible 2.4)
+ Note that in some PostgreSQL versions of pg_dump, which is an embedded PostgreSQL utility and is used by the module,
+ returns rc 0 even when errors occurred (e.g. the connection is forbidden by pg_hba.conf, etc.),
+ so the module returns changed=True but the dump has not actually been done. Please, be sure that your version of
+ pg_dump returns rc 1 in this case.
+ - C(restore) also requires a target definition from which the database will be restored. (Added in Ansible 2.4)
+ - The format of the backup will be detected based on the target name.
+ - Supported compression formats for dump and restore include C(.pgc), C(.bz2), C(.gz) and C(.xz)
+ - Supported formats for dump and restore include C(.sql) and C(.tar)
+ - "Restore program is selected by target file format: C(.tar) and C(.pgc) are handled by pg_restore, other with pgsql."
+ type: str
+ choices: [ absent, dump, present, restore ]
+ default: present
+ target:
+ description:
+ - File to back up or restore from.
+ - Used when I(state) is C(dump) or C(restore).
+ type: path
+ target_opts:
+ description:
+ - Additional arguments for pg_dump or restore program (pg_restore or psql, depending on target's format).
+ - Used when I(state) is C(dump) or C(restore).
+ type: str
+ maintenance_db:
+ description:
+ - The value specifies the initial database (which is also called as maintenance DB) that Ansible connects to.
+ type: str
+ default: postgres
+ conn_limit:
+ description:
+ - Specifies the database connection limit.
+ type: str
+ tablespace:
+ description:
+ - The tablespace to set for the database
+ U(https://www.postgresql.org/docs/current/sql-alterdatabase.html).
+ - If you want to move the database back to the default tablespace,
+ explicitly set this to pg_default.
+ type: path
+ dump_extra_args:
+ description:
+ - Provides additional arguments when I(state) is C(dump).
+ - Cannot be used with dump-file-format-related arguments like ``--format=d``.
+ type: str
+ version_added: '0.2.0'
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(owner), I(conn_limit), I(encoding),
+ I(db), I(template), I(tablespace), I(session_role) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+seealso:
+- name: CREATE DATABASE reference
+ description: Complete reference of the CREATE DATABASE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createdatabase.html
+- name: DROP DATABASE reference
+ description: Complete reference of the DROP DATABASE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-dropdatabase.html
+- name: pg_dump reference
+ description: Complete reference of pg_dump documentation.
+ link: https://www.postgresql.org/docs/current/app-pgdump.html
+- name: pg_restore reference
+ description: Complete reference of pg_restore documentation.
+ link: https://www.postgresql.org/docs/current/app-pgrestore.html
+- module: community.general.postgresql_tablespace
+- module: community.general.postgresql_info
+- module: community.general.postgresql_ping
+notes:
+- State C(dump) and C(restore) don't require I(psycopg2) since version 2.8.
+author: "Ansible Core Team"
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Create a new database with name "acme"
+ community.general.postgresql_db:
+ name: acme
+
+# Note: If a template different from "template0" is specified, encoding and locale settings must match those of the template.
+- name: Create a new database with name "acme" and specific encoding and locale # settings.
+ community.general.postgresql_db:
+ name: acme
+ encoding: UTF-8
+ lc_collate: de_DE.UTF-8
+ lc_ctype: de_DE.UTF-8
+ template: template0
+
+# Note: Default limit for the number of concurrent connections to a specific database is "-1", which means "unlimited"
+- name: Create a new database with name "acme" which has a limit of 100 concurrent connections
+ community.general.postgresql_db:
+ name: acme
+ conn_limit: "100"
+
+- name: Dump an existing database to a file
+ community.general.postgresql_db:
+ name: acme
+ state: dump
+ target: /tmp/acme.sql
+
+- name: Dump an existing database to a file excluding the test table
+ community.general.postgresql_db:
+ name: acme
+ state: dump
+ target: /tmp/acme.sql
+ dump_extra_args: --exclude-table=test
+
+- name: Dump an existing database to a file (with compression)
+ community.general.postgresql_db:
+ name: acme
+ state: dump
+ target: /tmp/acme.sql.gz
+
+- name: Dump a single schema for an existing database
+ community.general.postgresql_db:
+ name: acme
+ state: dump
+ target: /tmp/acme.sql
+ target_opts: "-n public"
+
+- name: Dump only table1 and table2 from the acme database
+ community.general.postgresql_db:
+ name: acme
+ state: dump
+ target: /tmp/table1_table2.sql
+ target_opts: "-t table1 -t table2"
+
+# Note: In the example below, if database foo exists and has another tablespace
+# the tablespace will be changed to foo. Access to the database will be locked
+# until the copying of database files is finished.
+- name: Create a new database called foo in tablespace bar
+ community.general.postgresql_db:
+ name: foo
+ tablespace: bar
+'''
+
+RETURN = r'''
+executed_commands:
+ description: List of commands which tried to run.
+ returned: always
+ type: list
+ sample: ["CREATE DATABASE acme"]
+ version_added: '0.2.0'
+'''
+
+
+import os
+import subprocess
+import traceback
+
+try:
+ import psycopg2
+ import psycopg2.extras
+except ImportError:
+ HAS_PSYCOPG2 = False
+else:
+ HAS_PSYCOPG2 = True
+
+import ansible_collections.community.general.plugins.module_utils.postgres as pgutils
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+ SQLParseError,
+)
+from ansible.module_utils.six import iteritems
+from ansible.module_utils.six.moves import shlex_quote
+from ansible.module_utils._text import to_native
+
+executed_commands = []
+
+
+class NotSupportedError(Exception):
+ pass
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+
+def set_owner(cursor, db, owner):
+ query = 'ALTER DATABASE "%s" OWNER TO "%s"' % (db, owner)
+ executed_commands.append(query)
+ cursor.execute(query)
+ return True
+
+
+def set_conn_limit(cursor, db, conn_limit):
+ query = 'ALTER DATABASE "%s" CONNECTION LIMIT %s' % (db, conn_limit)
+ executed_commands.append(query)
+ cursor.execute(query)
+ return True
+
+
+def get_encoding_id(cursor, encoding):
+ query = "SELECT pg_char_to_encoding(%(encoding)s) AS encoding_id;"
+ cursor.execute(query, {'encoding': encoding})
+ return cursor.fetchone()['encoding_id']
+
+
+def get_db_info(cursor, db):
+ query = """
+ SELECT rolname AS owner,
+ pg_encoding_to_char(encoding) AS encoding, encoding AS encoding_id,
+ datcollate AS lc_collate, datctype AS lc_ctype, pg_database.datconnlimit AS conn_limit,
+ spcname AS tablespace
+ FROM pg_database
+ JOIN pg_roles ON pg_roles.oid = pg_database.datdba
+ JOIN pg_tablespace ON pg_tablespace.oid = pg_database.dattablespace
+ WHERE datname = %(db)s
+ """
+ cursor.execute(query, {'db': db})
+ return cursor.fetchone()
+
+
+def db_exists(cursor, db):
+ query = "SELECT * FROM pg_database WHERE datname=%(db)s"
+ cursor.execute(query, {'db': db})
+ return cursor.rowcount == 1
+
+
+def db_delete(cursor, db):
+ if db_exists(cursor, db):
+ query = 'DROP DATABASE "%s"' % db
+ executed_commands.append(query)
+ cursor.execute(query)
+ return True
+ else:
+ return False
+
+
+def db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace):
+ params = dict(enc=encoding, collate=lc_collate, ctype=lc_ctype, conn_limit=conn_limit, tablespace=tablespace)
+ if not db_exists(cursor, db):
+ query_fragments = ['CREATE DATABASE "%s"' % db]
+ if owner:
+ query_fragments.append('OWNER "%s"' % owner)
+ if template:
+ query_fragments.append('TEMPLATE "%s"' % template)
+ if encoding:
+ query_fragments.append('ENCODING %(enc)s')
+ if lc_collate:
+ query_fragments.append('LC_COLLATE %(collate)s')
+ if lc_ctype:
+ query_fragments.append('LC_CTYPE %(ctype)s')
+ if tablespace:
+ query_fragments.append('TABLESPACE "%s"' % tablespace)
+ if conn_limit:
+ query_fragments.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit})
+ query = ' '.join(query_fragments)
+ executed_commands.append(cursor.mogrify(query, params))
+ cursor.execute(query, params)
+ return True
+ else:
+ db_info = get_db_info(cursor, db)
+ if (encoding and get_encoding_id(cursor, encoding) != db_info['encoding_id']):
+ raise NotSupportedError(
+ 'Changing database encoding is not supported. '
+ 'Current encoding: %s' % db_info['encoding']
+ )
+ elif lc_collate and lc_collate != db_info['lc_collate']:
+ raise NotSupportedError(
+ 'Changing LC_COLLATE is not supported. '
+ 'Current LC_COLLATE: %s' % db_info['lc_collate']
+ )
+ elif lc_ctype and lc_ctype != db_info['lc_ctype']:
+ raise NotSupportedError(
+ 'Changing LC_CTYPE is not supported.'
+ 'Current LC_CTYPE: %s' % db_info['lc_ctype']
+ )
+ else:
+ changed = False
+
+ if owner and owner != db_info['owner']:
+ changed = set_owner(cursor, db, owner)
+
+ if conn_limit and conn_limit != str(db_info['conn_limit']):
+ changed = set_conn_limit(cursor, db, conn_limit)
+
+ if tablespace and tablespace != db_info['tablespace']:
+ changed = set_tablespace(cursor, db, tablespace)
+
+ return changed
+
+
+def db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace):
+ if not db_exists(cursor, db):
+ return False
+ else:
+ db_info = get_db_info(cursor, db)
+ if (encoding and get_encoding_id(cursor, encoding) != db_info['encoding_id']):
+ return False
+ elif lc_collate and lc_collate != db_info['lc_collate']:
+ return False
+ elif lc_ctype and lc_ctype != db_info['lc_ctype']:
+ return False
+ elif owner and owner != db_info['owner']:
+ return False
+ elif conn_limit and conn_limit != str(db_info['conn_limit']):
+ return False
+ elif tablespace and tablespace != db_info['tablespace']:
+ return False
+ else:
+ return True
+
+
+def db_dump(module, target, target_opts="",
+ db=None,
+ dump_extra_args=None,
+ user=None,
+ password=None,
+ host=None,
+ port=None,
+ **kw):
+
+ flags = login_flags(db, host, port, user, db_prefix=False)
+ cmd = module.get_bin_path('pg_dump', True)
+ comp_prog_path = None
+
+ if os.path.splitext(target)[-1] == '.tar':
+ flags.append(' --format=t')
+ elif os.path.splitext(target)[-1] == '.pgc':
+ flags.append(' --format=c')
+ if os.path.splitext(target)[-1] == '.gz':
+ if module.get_bin_path('pigz'):
+ comp_prog_path = module.get_bin_path('pigz', True)
+ else:
+ comp_prog_path = module.get_bin_path('gzip', True)
+ elif os.path.splitext(target)[-1] == '.bz2':
+ comp_prog_path = module.get_bin_path('bzip2', True)
+ elif os.path.splitext(target)[-1] == '.xz':
+ comp_prog_path = module.get_bin_path('xz', True)
+
+ cmd += "".join(flags)
+
+ if dump_extra_args:
+ cmd += " {0} ".format(dump_extra_args)
+
+ if target_opts:
+ cmd += " {0} ".format(target_opts)
+
+ if comp_prog_path:
+ # Use a fifo to be notified of an error in pg_dump
+ # Using shell pipe has no way to return the code of the first command
+ # in a portable way.
+ fifo = os.path.join(module.tmpdir, 'pg_fifo')
+ os.mkfifo(fifo)
+ cmd = '{1} <{3} > {2} & {0} >{3}'.format(cmd, comp_prog_path, shlex_quote(target), fifo)
+ else:
+ cmd = '{0} > {1}'.format(cmd, shlex_quote(target))
+
+ return do_with_password(module, cmd, password)
+
+
+def db_restore(module, target, target_opts="",
+ db=None,
+ user=None,
+ password=None,
+ host=None,
+ port=None,
+ **kw):
+
+ flags = login_flags(db, host, port, user)
+ comp_prog_path = None
+ cmd = module.get_bin_path('psql', True)
+
+ if os.path.splitext(target)[-1] == '.sql':
+ flags.append(' --file={0}'.format(target))
+
+ elif os.path.splitext(target)[-1] == '.tar':
+ flags.append(' --format=Tar')
+ cmd = module.get_bin_path('pg_restore', True)
+
+ elif os.path.splitext(target)[-1] == '.pgc':
+ flags.append(' --format=Custom')
+ cmd = module.get_bin_path('pg_restore', True)
+
+ elif os.path.splitext(target)[-1] == '.gz':
+ comp_prog_path = module.get_bin_path('zcat', True)
+
+ elif os.path.splitext(target)[-1] == '.bz2':
+ comp_prog_path = module.get_bin_path('bzcat', True)
+
+ elif os.path.splitext(target)[-1] == '.xz':
+ comp_prog_path = module.get_bin_path('xzcat', True)
+
+ cmd += "".join(flags)
+ if target_opts:
+ cmd += " {0} ".format(target_opts)
+
+ if comp_prog_path:
+ env = os.environ.copy()
+ if password:
+ env = {"PGPASSWORD": password}
+ p1 = subprocess.Popen([comp_prog_path, target], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ p2 = subprocess.Popen(cmd, stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=env)
+ (stdout2, stderr2) = p2.communicate()
+ p1.stdout.close()
+ p1.wait()
+ if p1.returncode != 0:
+ stderr1 = p1.stderr.read()
+ return p1.returncode, '', stderr1, 'cmd: ****'
+ else:
+ return p2.returncode, '', stderr2, 'cmd: ****'
+ else:
+ cmd = '{0} < {1}'.format(cmd, shlex_quote(target))
+
+ return do_with_password(module, cmd, password)
+
+
+def login_flags(db, host, port, user, db_prefix=True):
+ """
+ returns a list of connection argument strings each prefixed
+ with a space and quoted where necessary to later be combined
+ in a single shell string with `"".join(rv)`
+
+ db_prefix determines if "--dbname" is prefixed to the db argument,
+ since the argument was introduced in 9.3.
+ """
+ flags = []
+ if db:
+ if db_prefix:
+ flags.append(' --dbname={0}'.format(shlex_quote(db)))
+ else:
+ flags.append(' {0}'.format(shlex_quote(db)))
+ if host:
+ flags.append(' --host={0}'.format(host))
+ if port:
+ flags.append(' --port={0}'.format(port))
+ if user:
+ flags.append(' --username={0}'.format(user))
+ return flags
+
+
+def do_with_password(module, cmd, password):
+ env = {}
+ if password:
+ env = {"PGPASSWORD": password}
+ executed_commands.append(cmd)
+ rc, stderr, stdout = module.run_command(cmd, use_unsafe_shell=True, environ_update=env)
+ return rc, stderr, stdout, cmd
+
+
+def set_tablespace(cursor, db, tablespace):
+ query = 'ALTER DATABASE "%s" SET TABLESPACE "%s"' % (db, tablespace)
+ executed_commands.append(query)
+ cursor.execute(query)
+ return True
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = pgutils.postgres_common_argument_spec()
+ argument_spec.update(
+ db=dict(type='str', required=True, aliases=['name']),
+ owner=dict(type='str', default=''),
+ template=dict(type='str', default=''),
+ encoding=dict(type='str', default=''),
+ lc_collate=dict(type='str', default=''),
+ lc_ctype=dict(type='str', default=''),
+ state=dict(type='str', default='present', choices=['absent', 'dump', 'present', 'restore']),
+ target=dict(type='path', default=''),
+ target_opts=dict(type='str', default=''),
+ maintenance_db=dict(type='str', default="postgres"),
+ session_role=dict(type='str'),
+ conn_limit=dict(type='str', default=''),
+ tablespace=dict(type='path', default=''),
+ dump_extra_args=dict(type='str', default=None),
+ trust_input=dict(type='bool', default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ db = module.params["db"]
+ owner = module.params["owner"]
+ template = module.params["template"]
+ encoding = module.params["encoding"]
+ lc_collate = module.params["lc_collate"]
+ lc_ctype = module.params["lc_ctype"]
+ target = module.params["target"]
+ target_opts = module.params["target_opts"]
+ state = module.params["state"]
+ changed = False
+ maintenance_db = module.params['maintenance_db']
+ session_role = module.params["session_role"]
+ conn_limit = module.params['conn_limit']
+ tablespace = module.params['tablespace']
+ dump_extra_args = module.params['dump_extra_args']
+ trust_input = module.params['trust_input']
+
+ # Check input
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, owner, conn_limit, encoding, db, template, tablespace, session_role)
+
+ raw_connection = state in ("dump", "restore")
+
+ if not raw_connection:
+ pgutils.ensure_required_libs(module)
+
+ # To use defaults values, keyword arguments must be absent, so
+ # check which values are empty and don't include in the **kw
+ # dictionary
+ params_map = {
+ "login_host": "host",
+ "login_user": "user",
+ "login_password": "password",
+ "port": "port",
+ "ssl_mode": "sslmode",
+ "ca_cert": "sslrootcert"
+ }
+ kw = dict((params_map[k], v) for (k, v) in iteritems(module.params)
+ if k in params_map and v != '' and v is not None)
+
+ # If a login_unix_socket is specified, incorporate it here.
+ is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
+
+ if is_localhost and module.params["login_unix_socket"] != "":
+ kw["host"] = module.params["login_unix_socket"]
+
+ if target == "":
+ target = "{0}/{1}.sql".format(os.getcwd(), db)
+ target = os.path.expanduser(target)
+
+ if not raw_connection:
+ try:
+ db_connection = psycopg2.connect(database=maintenance_db, **kw)
+
+ # Enable autocommit so we can create databases
+ if psycopg2.__version__ >= '2.4.2':
+ db_connection.autocommit = True
+ else:
+ db_connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
+ cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
+
+ except TypeError as e:
+ if 'sslrootcert' in e.args[0]:
+ module.fail_json(msg='Postgresql server must be at least version 8.4 to support sslrootcert. Exception: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
+
+ except Exception as e:
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
+
+ if session_role:
+ try:
+ cursor.execute('SET ROLE "%s"' % session_role)
+ except Exception as e:
+ module.fail_json(msg="Could not switch role: %s" % to_native(e), exception=traceback.format_exc())
+
+ try:
+ if module.check_mode:
+ if state == "absent":
+ changed = db_exists(cursor, db)
+ elif state == "present":
+ changed = not db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace)
+ module.exit_json(changed=changed, db=db, executed_commands=executed_commands)
+
+ if state == "absent":
+ try:
+ changed = db_delete(cursor, db)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ elif state == "present":
+ try:
+ changed = db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ elif state in ("dump", "restore"):
+ method = state == "dump" and db_dump or db_restore
+ try:
+ if state == 'dump':
+ rc, stdout, stderr, cmd = method(module, target, target_opts, db, dump_extra_args, **kw)
+ else:
+ rc, stdout, stderr, cmd = method(module, target, target_opts, db, **kw)
+
+ if rc != 0:
+ module.fail_json(msg=stderr, stdout=stdout, rc=rc, cmd=cmd)
+ else:
+ module.exit_json(changed=True, msg=stdout, stderr=stderr, rc=rc, cmd=cmd,
+ executed_commands=executed_commands)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ except NotSupportedError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except SystemExit:
+ # Avoid catching this on Python 2.4
+ raise
+ except Exception as e:
+ module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=changed, db=db, executed_commands=executed_commands)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_ext.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_ext.py
new file mode 100644
index 00000000..3fa82dac
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_ext.py
@@ -0,0 +1,443 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_ext
+short_description: Add or remove PostgreSQL extensions from a database
+description:
+- Add or remove PostgreSQL extensions from a database.
+options:
+ name:
+ description:
+ - Name of the extension to add or remove.
+ required: true
+ type: str
+ aliases:
+ - ext
+ db:
+ description:
+ - Name of the database to add or remove the extension to/from.
+ required: true
+ type: str
+ aliases:
+ - login_db
+ schema:
+ description:
+ - Name of the schema to add the extension to.
+ type: str
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ - The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally.
+ type: str
+ state:
+ description:
+ - The database extension state.
+ default: present
+ choices: [ absent, present ]
+ type: str
+ cascade:
+ description:
+ - Automatically install/remove any extensions that this extension depends on
+ that are not already installed/removed (supported since PostgreSQL 9.6).
+ type: bool
+ default: no
+ login_unix_socket:
+ description:
+ - Path to a Unix domain socket for local connections.
+ type: str
+ ssl_mode:
+ description:
+ - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
+ - See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
+ - Default of C(prefer) matches libpq default.
+ type: str
+ default: prefer
+ choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
+ ca_cert:
+ description:
+ - Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
+ - If the file exists, the server's certificate will be verified to be signed by one of these authorities.
+ type: str
+ aliases: [ ssl_rootcert ]
+ version:
+ description:
+ - Extension version to add or update to. Has effect with I(state=present) only.
+ - If not specified, the latest extension version will be created.
+ - It can't downgrade an extension version.
+ When version downgrade is needed, remove the extension and create new one with appropriate version.
+ - Set I(version=latest) to update the extension to the latest available version.
+ type: str
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(ext), I(schema),
+ I(version), I(session_role) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+seealso:
+- name: PostgreSQL extensions
+ description: General information about PostgreSQL extensions.
+ link: https://www.postgresql.org/docs/current/external-extensions.html
+- name: CREATE EXTENSION reference
+ description: Complete reference of the CREATE EXTENSION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createextension.html
+- name: ALTER EXTENSION reference
+ description: Complete reference of the ALTER EXTENSION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-alterextension.html
+- name: DROP EXTENSION reference
+ description: Complete reference of the DROP EXTENSION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-droppublication.html
+notes:
+- The default authentication assumes that you are either logging in as
+ or sudo'ing to the C(postgres) account on the host.
+- This module uses I(psycopg2), a Python PostgreSQL database adapter.
+- You must ensure that C(psycopg2) is installed on the host before using this module.
+- If the remote host is the PostgreSQL server (which is the default case),
+ then PostgreSQL must also be installed on the remote host.
+- For Ubuntu-based systems, install the C(postgresql), C(libpq-dev),
+ and C(python-psycopg2) packages on the remote host before using this module.
+- Incomparable versions, for example PostGIS ``unpackaged``, cannot be installed.
+requirements: [ psycopg2 ]
+author:
+- Daniel Schep (@dschep)
+- Thomas O'Donnell (@andytom)
+- Sandro Santilli (@strk)
+- Andrew Klychkov (@Andersson007)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Adds postgis extension to the database acme in the schema foo
+ community.general.postgresql_ext:
+ name: postgis
+ db: acme
+ schema: foo
+
+- name: Removes postgis extension to the database acme
+ community.general.postgresql_ext:
+ name: postgis
+ db: acme
+ state: absent
+
+- name: Adds earthdistance extension to the database template1 cascade
+ community.general.postgresql_ext:
+ name: earthdistance
+ db: template1
+ cascade: true
+
+# In the example below, if earthdistance extension is installed,
+# it will be removed too because it depends on cube:
+- name: Removes cube extension from the database acme cascade
+ community.general.postgresql_ext:
+ name: cube
+ db: acme
+ cascade: yes
+ state: absent
+
+- name: Create extension foo of version 1.2 or update it if it's already created
+ community.general.postgresql_ext:
+ db: acme
+ name: foo
+ version: 1.2
+
+- name: Assuming extension foo is created, update it to the latest version
+ community.general.postgresql_ext:
+ db: acme
+ name: foo
+ version: latest
+'''
+
+RETURN = r'''
+query:
+ description: List of executed queries.
+ returned: always
+ type: list
+ sample: ["DROP EXTENSION \"acme\""]
+
+'''
+
+import traceback
+
+from distutils.version import LooseVersion
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils._text import to_native
+
+executed_queries = []
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+def ext_exists(cursor, ext):
+ query = "SELECT * FROM pg_extension WHERE extname=%(ext)s"
+ cursor.execute(query, {'ext': ext})
+ return cursor.rowcount == 1
+
+
+def ext_delete(cursor, ext, cascade):
+ if ext_exists(cursor, ext):
+ query = "DROP EXTENSION \"%s\"" % ext
+ if cascade:
+ query += " CASCADE"
+ cursor.execute(query)
+ executed_queries.append(query)
+ return True
+ else:
+ return False
+
+
+def ext_update_version(cursor, ext, version):
+ """Update extension version.
+
+ Return True if success.
+
+ Args:
+ cursor (cursor) -- cursor object of psycopg2 library
+ ext (str) -- extension name
+ version (str) -- extension version
+ """
+ query = "ALTER EXTENSION \"%s\" UPDATE" % ext
+ params = {}
+
+ if version != 'latest':
+ query += " TO %(ver)s"
+ params['ver'] = version
+
+ cursor.execute(query, params)
+ executed_queries.append(cursor.mogrify(query, params))
+
+ return True
+
+
+def ext_create(cursor, ext, schema, cascade, version):
+ query = "CREATE EXTENSION \"%s\"" % ext
+ params = {}
+
+ if schema:
+ query += " WITH SCHEMA \"%s\"" % schema
+ if version:
+ query += " VERSION %(ver)s"
+ params['ver'] = version
+ if cascade:
+ query += " CASCADE"
+
+ cursor.execute(query, params)
+ executed_queries.append(cursor.mogrify(query, params))
+ return True
+
+
+def ext_get_versions(cursor, ext):
+ """
+ Get the current created extension version and available versions.
+
+ Return tuple (current_version, [list of available versions]).
+
+ Note: the list of available versions contains only versions
+ that higher than the current created version.
+ If the extension is not created, this list will contain all
+ available versions.
+
+ Args:
+ cursor (cursor) -- cursor object of psycopg2 library
+ ext (str) -- extension name
+ """
+
+ # 1. Get the current extension version:
+ query = ("SELECT extversion FROM pg_catalog.pg_extension "
+ "WHERE extname = %(ext)s")
+
+ current_version = '0'
+ cursor.execute(query, {'ext': ext})
+ res = cursor.fetchone()
+ if res:
+ current_version = res[0]
+
+ # 2. Get available versions:
+ query = ("SELECT version FROM pg_available_extension_versions "
+ "WHERE name = %(ext)s")
+ cursor.execute(query, {'ext': ext})
+ res = cursor.fetchall()
+
+ available_versions = parse_ext_versions(current_version, res)
+
+ if current_version == '0':
+ current_version = False
+
+ return (current_version, available_versions)
+
+
+def parse_ext_versions(current_version, ext_ver_list):
+ """Parse ext versions.
+
+ Args:
+ current_version (str) -- version to compare elements of ext_ver_list with
+ ext_ver_list (list) -- list containing dicts with versions
+
+ Return a sorted list with versions that are higher than current_version.
+
+ Note: Incomparable versions (e.g., postgis version "unpackaged") are skipped.
+ """
+ available_versions = []
+
+ for line in ext_ver_list:
+ if line['version'] == 'unpackaged':
+ continue
+
+ try:
+ if LooseVersion(line['version']) > LooseVersion(current_version):
+ available_versions.append(line['version'])
+ except Exception:
+ # When a version cannot be compared, skip it
+ # (there's a note in the documentation)
+ continue
+
+ return sorted(available_versions, key=LooseVersion)
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ db=dict(type="str", required=True, aliases=["login_db"]),
+ ext=dict(type="str", required=True, aliases=["name"]),
+ schema=dict(type="str"),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ cascade=dict(type="bool", default=False),
+ session_role=dict(type="str"),
+ version=dict(type="str"),
+ trust_input=dict(type="bool", default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ ext = module.params["ext"]
+ schema = module.params["schema"]
+ state = module.params["state"]
+ cascade = module.params["cascade"]
+ version = module.params["version"]
+ session_role = module.params["session_role"]
+ trust_input = module.params["trust_input"]
+ changed = False
+
+ if not trust_input:
+ check_input(module, ext, schema, version, session_role)
+
+ if version and state == 'absent':
+ module.warn("Parameter version is ignored when state=absent")
+
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ try:
+ # Get extension info and available versions:
+ curr_version, available_versions = ext_get_versions(cursor, ext)
+
+ if state == "present":
+ if version == 'latest':
+ if available_versions:
+ version = available_versions[-1]
+ else:
+ version = ''
+
+ if version:
+ # If the specific version is passed and it is not available for update:
+ if version not in available_versions:
+ if not curr_version:
+ module.fail_json(msg="Passed version '%s' is not available" % version)
+
+ elif LooseVersion(curr_version) == LooseVersion(version):
+ changed = False
+
+ else:
+ module.fail_json(msg="Passed version '%s' is lower than "
+ "the current created version '%s' or "
+ "the passed version is not available" % (version, curr_version))
+
+ # If the specific version is passed and it is higher that the current version:
+ if curr_version:
+ if LooseVersion(curr_version) < LooseVersion(version):
+ if module.check_mode:
+ changed = True
+ else:
+ changed = ext_update_version(cursor, ext, version)
+
+ # If the specific version is passed and it is created now:
+ if curr_version == version:
+ changed = False
+
+ # If the ext doesn't exist and installed:
+ elif not curr_version and available_versions:
+ if module.check_mode:
+ changed = True
+ else:
+ changed = ext_create(cursor, ext, schema, cascade, version)
+
+ # If version is not passed:
+ else:
+ if not curr_version:
+ # If the ext doesn't exist and it's installed:
+ if available_versions:
+ if module.check_mode:
+ changed = True
+ else:
+ changed = ext_create(cursor, ext, schema, cascade, version)
+
+ # If the ext doesn't exist and not installed:
+ else:
+ module.fail_json(msg="Extension %s is not installed" % ext)
+
+ elif state == "absent":
+ if curr_version:
+ if module.check_mode:
+ changed = True
+ else:
+ changed = ext_delete(cursor, ext, cascade)
+ else:
+ changed = False
+
+ except Exception as e:
+ db_connection.close()
+ module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc())
+
+ db_connection.close()
+ module.exit_json(changed=changed, db=module.params["db"], ext=ext, queries=executed_queries)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_idx.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_idx.py
new file mode 100644
index 00000000..6ffee31d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_idx.py
@@ -0,0 +1,589 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018-2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_idx
+short_description: Create or drop indexes from a PostgreSQL database
+description:
+- Create or drop indexes from a PostgreSQL database.
+
+options:
+ idxname:
+ description:
+ - Name of the index to create or drop.
+ type: str
+ required: true
+ aliases:
+ - name
+ db:
+ description:
+ - Name of database to connect to and where the index will be created/dropped.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ schema:
+ description:
+ - Name of a database schema where the index will be created.
+ type: str
+ state:
+ description:
+ - Index state.
+ - C(present) implies the index will be created if it does not exist.
+ - C(absent) implies the index will be dropped if it exists.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ table:
+ description:
+ - Table to create index on it.
+ - Mutually exclusive with I(state=absent).
+ type: str
+ columns:
+ description:
+ - List of index columns that need to be covered by index.
+ - Mutually exclusive with I(state=absent).
+ type: list
+ elements: str
+ aliases:
+ - column
+ cond:
+ description:
+ - Index conditions.
+ - Mutually exclusive with I(state=absent).
+ type: str
+ idxtype:
+ description:
+ - Index type (like btree, gist, gin, etc.).
+ - Mutually exclusive with I(state=absent).
+ type: str
+ aliases:
+ - type
+ concurrent:
+ description:
+ - Enable or disable concurrent mode (CREATE / DROP INDEX CONCURRENTLY).
+ - Pay attention, if I(concurrent=no), the table will be locked (ACCESS EXCLUSIVE) during the building process.
+ For more information about the lock levels see U(https://www.postgresql.org/docs/current/explicit-locking.html).
+ - If the building process was interrupted for any reason when I(cuncurrent=yes), the index becomes invalid.
+ In this case it should be dropped and created again.
+ - Mutually exclusive with I(cascade=yes).
+ type: bool
+ default: yes
+ unique:
+ description:
+ - Enable unique index.
+ - Only btree currently supports unique indexes.
+ type: bool
+ default: no
+ version_added: '0.2.0'
+ tablespace:
+ description:
+ - Set a tablespace for the index.
+ - Mutually exclusive with I(state=absent).
+ required: false
+ type: str
+ storage_params:
+ description:
+ - Storage parameters like fillfactor, vacuum_cleanup_index_scale_factor, etc.
+ - Mutually exclusive with I(state=absent).
+ type: list
+ elements: str
+ cascade:
+ description:
+ - Automatically drop objects that depend on the index,
+ and in turn all objects that depend on those objects.
+ - It used only with I(state=absent).
+ - Mutually exclusive with I(concurrent=yes)
+ type: bool
+ default: no
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(idxname), I(session_role),
+ I(schema), I(table), I(columns), I(tablespace), I(storage_params),
+ I(cond) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+
+seealso:
+- module: community.general.postgresql_table
+- module: community.general.postgresql_tablespace
+- name: PostgreSQL indexes reference
+ description: General information about PostgreSQL indexes.
+ link: https://www.postgresql.org/docs/current/indexes.html
+- name: CREATE INDEX reference
+ description: Complete reference of the CREATE INDEX command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createindex.html
+- name: ALTER INDEX reference
+ description: Complete reference of the ALTER INDEX command documentation.
+ link: https://www.postgresql.org/docs/current/sql-alterindex.html
+- name: DROP INDEX reference
+ description: Complete reference of the DROP INDEX command documentation.
+ link: https://www.postgresql.org/docs/current/sql-dropindex.html
+
+notes:
+- The index building process can affect database performance.
+- To avoid table locks on production databases, use I(concurrent=yes) (default behavior).
+
+author:
+- Andrew Klychkov (@Andersson007)
+- Thomas O'Donnell (@andytom)
+
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Create btree index if not exists test_idx concurrently covering columns id and name of table products
+ community.general.postgresql_idx:
+ db: acme
+ table: products
+ columns: id,name
+ name: test_idx
+
+- name: Create btree index test_idx concurrently with tablespace called ssd and storage parameter
+ community.general.postgresql_idx:
+ db: acme
+ table: products
+ columns:
+ - id
+ - name
+ idxname: test_idx
+ tablespace: ssd
+ storage_params:
+ - fillfactor=90
+
+- name: Create gist index test_gist_idx concurrently on column geo_data of table map
+ community.general.postgresql_idx:
+ db: somedb
+ table: map
+ idxtype: gist
+ columns: geo_data
+ idxname: test_gist_idx
+
+# Note: for the example below pg_trgm extension must be installed for gin_trgm_ops
+- name: Create gin index gin0_idx not concurrently on column comment of table test
+ community.general.postgresql_idx:
+ idxname: gin0_idx
+ table: test
+ columns: comment gin_trgm_ops
+ concurrent: no
+ idxtype: gin
+
+- name: Drop btree test_idx concurrently
+ community.general.postgresql_idx:
+ db: mydb
+ idxname: test_idx
+ state: absent
+
+- name: Drop test_idx cascade
+ community.general.postgresql_idx:
+ db: mydb
+ idxname: test_idx
+ state: absent
+ cascade: yes
+ concurrent: no
+
+- name: Create btree index test_idx concurrently on columns id,comment where column id > 1
+ community.general.postgresql_idx:
+ db: mydb
+ table: test
+ columns: id,comment
+ idxname: test_idx
+ cond: id > 1
+
+- name: Create unique btree index if not exists test_unique_idx on column name of table products
+ community.general.postgresql_idx:
+ db: acme
+ table: products
+ columns: name
+ name: test_unique_idx
+ unique: yes
+ concurrent: no
+'''
+
+RETURN = r'''
+name:
+ description: Index name.
+ returned: always
+ type: str
+ sample: 'foo_idx'
+state:
+ description: Index state.
+ returned: always
+ type: str
+ sample: 'present'
+schema:
+ description: Schema where index exists.
+ returned: always
+ type: str
+ sample: 'public'
+tablespace:
+ description: Tablespace where index exists.
+ returned: always
+ type: str
+ sample: 'ssd'
+query:
+ description: Query that was tried to be executed.
+ returned: always
+ type: str
+ sample: 'CREATE INDEX CONCURRENTLY foo_idx ON test_table USING BTREE (id)'
+storage_params:
+ description: Index storage parameters.
+ returned: always
+ type: list
+ sample: [ "fillfactor=90" ]
+valid:
+ description: Index validity.
+ returned: always
+ type: bool
+ sample: true
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import check_input
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+
+VALID_IDX_TYPES = ('BTREE', 'HASH', 'GIST', 'SPGIST', 'GIN', 'BRIN')
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+class Index(object):
+
+ """Class for working with PostgreSQL indexes.
+
+ TODO:
+ 1. Add possibility to change ownership
+ 2. Add possibility to change tablespace
+ 3. Add list called executed_queries (executed_query should be left too)
+ 4. Use self.module instead of passing arguments to the methods whenever possible
+
+ Args:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+ schema (str) -- name of the index schema
+ name (str) -- name of the index
+
+ Attrs:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+ schema (str) -- name of the index schema
+ name (str) -- name of the index
+ exists (bool) -- flag the index exists in the DB or not
+ info (dict) -- dict that contents information about the index
+ executed_query (str) -- executed query
+ """
+
+ def __init__(self, module, cursor, schema, name):
+ self.name = name
+ if schema:
+ self.schema = schema
+ else:
+ self.schema = 'public'
+ self.module = module
+ self.cursor = cursor
+ self.info = {
+ 'name': self.name,
+ 'state': 'absent',
+ 'schema': '',
+ 'tblname': '',
+ 'tblspace': '',
+ 'valid': True,
+ 'storage_params': [],
+ }
+ self.exists = False
+ self.__exists_in_db()
+ self.executed_query = ''
+
+ def get_info(self):
+ """Refresh index info.
+
+ Return self.info dict.
+ """
+ self.__exists_in_db()
+ return self.info
+
+ def __exists_in_db(self):
+ """Check index existence, collect info, add it to self.info dict.
+
+ Return True if the index exists, otherwise, return False.
+ """
+ query = ("SELECT i.schemaname, i.tablename, i.tablespace, "
+ "pi.indisvalid, c.reloptions "
+ "FROM pg_catalog.pg_indexes AS i "
+ "JOIN pg_catalog.pg_class AS c "
+ "ON i.indexname = c.relname "
+ "JOIN pg_catalog.pg_index AS pi "
+ "ON c.oid = pi.indexrelid "
+ "WHERE i.indexname = %(name)s")
+
+ res = exec_sql(self, query, query_params={'name': self.name}, add_to_executed=False)
+ if res:
+ self.exists = True
+ self.info = dict(
+ name=self.name,
+ state='present',
+ schema=res[0][0],
+ tblname=res[0][1],
+ tblspace=res[0][2] if res[0][2] else '',
+ valid=res[0][3],
+ storage_params=res[0][4] if res[0][4] else [],
+ )
+ return True
+
+ else:
+ self.exists = False
+ return False
+
+ def create(self, tblname, idxtype, columns, cond, tblspace,
+ storage_params, concurrent=True, unique=False):
+ """Create PostgreSQL index.
+
+ Return True if success, otherwise, return False.
+
+ Args:
+ tblname (str) -- name of a table for the index
+ idxtype (str) -- type of the index like BTREE, BRIN, etc
+ columns (str) -- string of comma-separated columns that need to be covered by index
+ tblspace (str) -- tablespace for storing the index
+ storage_params (str) -- string of comma-separated storage parameters
+
+ Kwargs:
+ concurrent (bool) -- build index in concurrent mode, default True
+ """
+ if self.exists:
+ return False
+
+ if idxtype is None:
+ idxtype = "BTREE"
+
+ query = 'CREATE'
+
+ if unique:
+ query += ' UNIQUE'
+
+ query += ' INDEX'
+
+ if concurrent:
+ query += ' CONCURRENTLY'
+
+ query += ' "%s"' % self.name
+
+ query += ' ON "%s"."%s" ' % (self.schema, tblname)
+
+ query += 'USING %s (%s)' % (idxtype, columns)
+
+ if storage_params:
+ query += ' WITH (%s)' % storage_params
+
+ if tblspace:
+ query += ' TABLESPACE "%s"' % tblspace
+
+ if cond:
+ query += ' WHERE %s' % cond
+
+ self.executed_query = query
+
+ return exec_sql(self, query, return_bool=True, add_to_executed=False)
+
+ def drop(self, cascade=False, concurrent=True):
+ """Drop PostgreSQL index.
+
+ Return True if success, otherwise, return False.
+
+ Args:
+ schema (str) -- name of the index schema
+
+ Kwargs:
+ cascade (bool) -- automatically drop objects that depend on the index,
+ default False
+ concurrent (bool) -- build index in concurrent mode, default True
+ """
+ if not self.exists:
+ return False
+
+ query = 'DROP INDEX'
+
+ if concurrent:
+ query += ' CONCURRENTLY'
+
+ query += ' "%s"."%s"' % (self.schema, self.name)
+
+ if cascade:
+ query += ' CASCADE'
+
+ self.executed_query = query
+
+ return exec_sql(self, query, return_bool=True, add_to_executed=False)
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ idxname=dict(type='str', required=True, aliases=['name']),
+ db=dict(type='str', aliases=['login_db']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ concurrent=dict(type='bool', default=True),
+ unique=dict(type='bool', default=False),
+ table=dict(type='str'),
+ idxtype=dict(type='str', aliases=['type']),
+ columns=dict(type='list', elements='str', aliases=['column']),
+ cond=dict(type='str'),
+ session_role=dict(type='str'),
+ tablespace=dict(type='str'),
+ storage_params=dict(type='list', elements='str'),
+ cascade=dict(type='bool', default=False),
+ schema=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ idxname = module.params["idxname"]
+ state = module.params["state"]
+ concurrent = module.params["concurrent"]
+ unique = module.params["unique"]
+ table = module.params["table"]
+ idxtype = module.params["idxtype"]
+ columns = module.params["columns"]
+ cond = module.params["cond"]
+ tablespace = module.params["tablespace"]
+ storage_params = module.params["storage_params"]
+ cascade = module.params["cascade"]
+ schema = module.params["schema"]
+ session_role = module.params["session_role"]
+ trust_input = module.params["trust_input"]
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, idxname, session_role, schema, table, columns,
+ tablespace, storage_params, cond)
+
+ if concurrent and cascade:
+ module.fail_json(msg="Concurrent mode and cascade parameters are mutually exclusive")
+
+ if unique and (idxtype and idxtype != 'btree'):
+ module.fail_json(msg="Only btree currently supports unique indexes")
+
+ if state == 'present':
+ if not table:
+ module.fail_json(msg="Table must be specified")
+ if not columns:
+ module.fail_json(msg="At least one column must be specified")
+ else:
+ if table or columns or cond or idxtype or tablespace:
+ module.fail_json(msg="Index %s is going to be removed, so it does not "
+ "make sense to pass a table name, columns, conditions, "
+ "index type, or tablespace" % idxname)
+
+ if cascade and state != 'absent':
+ module.fail_json(msg="cascade parameter used only with state=absent")
+
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ # Set defaults:
+ changed = False
+
+ # Do job:
+ index = Index(module, cursor, schema, idxname)
+ kw = index.get_info()
+ kw['query'] = ''
+
+ #
+ # check_mode start
+ if module.check_mode:
+ if state == 'present' and index.exists:
+ kw['changed'] = False
+ module.exit_json(**kw)
+
+ elif state == 'present' and not index.exists:
+ kw['changed'] = True
+ module.exit_json(**kw)
+
+ elif state == 'absent' and not index.exists:
+ kw['changed'] = False
+ module.exit_json(**kw)
+
+ elif state == 'absent' and index.exists:
+ kw['changed'] = True
+ module.exit_json(**kw)
+ # check_mode end
+ #
+
+ if state == "present":
+ if idxtype and idxtype.upper() not in VALID_IDX_TYPES:
+ module.fail_json(msg="Index type '%s' of %s is not in valid types" % (idxtype, idxname))
+
+ columns = ','.join(columns)
+
+ if storage_params:
+ storage_params = ','.join(storage_params)
+
+ changed = index.create(table, idxtype, columns, cond, tablespace, storage_params, concurrent, unique)
+
+ if changed:
+ kw = index.get_info()
+ kw['state'] = 'present'
+ kw['query'] = index.executed_query
+
+ else:
+ changed = index.drop(cascade, concurrent)
+
+ if changed:
+ kw['state'] = 'absent'
+ kw['query'] = index.executed_query
+
+ if not kw['valid']:
+ db_connection.rollback()
+ module.warn("Index %s is invalid! ROLLBACK" % idxname)
+
+ if not concurrent:
+ db_connection.commit()
+
+ kw['changed'] = changed
+ db_connection.close()
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_info.py
new file mode 100644
index 00000000..aeec8651
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_info.py
@@ -0,0 +1,1030 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_info
+short_description: Gather information about PostgreSQL servers
+description:
+- Gathers information about PostgreSQL servers.
+options:
+ filter:
+ description:
+ - Limit the collected information by comma separated string or YAML list.
+ - Allowable values are C(version),
+ C(databases), C(in_recovery), C(settings), C(tablespaces), C(roles),
+ C(replications), C(repl_slots).
+ - By default, collects all subsets.
+ - You can use shell-style (fnmatch) wildcard to pass groups of values (see Examples).
+ - You can use '!' before value (for example, C(!settings)) to exclude it from the information.
+ - If you pass including and excluding values to the filter, for example, I(filter=!settings,ver),
+ the excluding values will be ignored.
+ type: list
+ elements: str
+ db:
+ description:
+ - Name of database to connect.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ trust_input:
+ description:
+ - If C(no), check whether a value of I(session_role) is potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via I(session_role) are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+seealso:
+- module: community.general.postgresql_ping
+author:
+- Andrew Klychkov (@Andersson007)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+# Display info from postgres hosts.
+# ansible postgres -m postgresql_info
+
+# Display only databases and roles info from all hosts using shell-style wildcards:
+# ansible all -m postgresql_info -a 'filter=dat*,rol*'
+
+# Display only replications and repl_slots info from standby hosts using shell-style wildcards:
+# ansible standby -m postgresql_info -a 'filter=repl*'
+
+# Display all info from databases hosts except settings:
+# ansible databases -m postgresql_info -a 'filter=!settings'
+
+- name: Collect PostgreSQL version and extensions
+ become: yes
+ become_user: postgres
+ community.general.postgresql_info:
+ filter: ver*,ext*
+
+- name: Collect all info except settings and roles
+ become: yes
+ become_user: postgres
+ community.general.postgresql_info:
+ filter: "!settings,!roles"
+
+# On FreeBSD with PostgreSQL 9.5 version and lower use pgsql user to become
+# and pass "postgres" as a database to connect to
+- name: Collect tablespaces and repl_slots info
+ become: yes
+ become_user: pgsql
+ community.general.postgresql_info:
+ db: postgres
+ filter:
+ - tablesp*
+ - repl_sl*
+
+- name: Collect all info except databases
+ become: yes
+ become_user: postgres
+ community.general.postgresql_info:
+ filter:
+ - "!databases"
+'''
+
+RETURN = r'''
+version:
+ description: Database server version U(https://www.postgresql.org/support/versioning/).
+ returned: always
+ type: dict
+ sample: { "version": { "major": 10, "minor": 6 } }
+ contains:
+ major:
+ description: Major server version.
+ returned: always
+ type: int
+ sample: 11
+ minor:
+ description: Minor server version.
+ returned: always
+ type: int
+ sample: 1
+in_recovery:
+ description: Indicates if the service is in recovery mode or not.
+ returned: always
+ type: bool
+ sample: false
+databases:
+ description: Information about databases.
+ returned: always
+ type: dict
+ sample:
+ - { "postgres": { "access_priv": "", "collate": "en_US.UTF-8",
+ "ctype": "en_US.UTF-8", "encoding": "UTF8", "owner": "postgres", "size": "7997 kB" } }
+ contains:
+ database_name:
+ description: Database name.
+ returned: always
+ type: dict
+ sample: template1
+ contains:
+ access_priv:
+ description: Database access privileges.
+ returned: always
+ type: str
+ sample: "=c/postgres_npostgres=CTc/postgres"
+ collate:
+ description:
+ - Database collation U(https://www.postgresql.org/docs/current/collation.html).
+ returned: always
+ type: str
+ sample: en_US.UTF-8
+ ctype:
+ description:
+ - Database LC_CTYPE U(https://www.postgresql.org/docs/current/multibyte.html).
+ returned: always
+ type: str
+ sample: en_US.UTF-8
+ encoding:
+ description:
+ - Database encoding U(https://www.postgresql.org/docs/current/multibyte.html).
+ returned: always
+ type: str
+ sample: UTF8
+ owner:
+ description:
+ - Database owner U(https://www.postgresql.org/docs/current/sql-createdatabase.html).
+ returned: always
+ type: str
+ sample: postgres
+ size:
+ description: Database size in bytes.
+ returned: always
+ type: str
+ sample: 8189415
+ extensions:
+ description:
+ - Extensions U(https://www.postgresql.org/docs/current/sql-createextension.html).
+ returned: always
+ type: dict
+ sample:
+ - { "plpgsql": { "description": "PL/pgSQL procedural language",
+ "extversion": { "major": 1, "minor": 0 } } }
+ contains:
+ extdescription:
+ description: Extension description.
+ returned: if existent
+ type: str
+ sample: PL/pgSQL procedural language
+ extversion:
+ description: Extension description.
+ returned: always
+ type: dict
+ contains:
+ major:
+ description: Extension major version.
+ returned: always
+ type: int
+ sample: 1
+ minor:
+ description: Extension minor version.
+ returned: always
+ type: int
+ sample: 0
+ nspname:
+ description: Namespace where the extension is.
+ returned: always
+ type: str
+ sample: pg_catalog
+ languages:
+ description: Procedural languages U(https://www.postgresql.org/docs/current/xplang.html).
+ returned: always
+ type: dict
+ sample: { "sql": { "lanacl": "", "lanowner": "postgres" } }
+ contains:
+ lanacl:
+ description:
+ - Language access privileges
+ U(https://www.postgresql.org/docs/current/catalog-pg-language.html).
+ returned: always
+ type: str
+ sample: "{postgres=UC/postgres,=U/postgres}"
+ lanowner:
+ description:
+ - Language owner U(https://www.postgresql.org/docs/current/catalog-pg-language.html).
+ returned: always
+ type: str
+ sample: postgres
+ namespaces:
+ description:
+ - Namespaces (schema) U(https://www.postgresql.org/docs/current/sql-createschema.html).
+ returned: always
+ type: dict
+ sample: { "pg_catalog": { "nspacl": "{postgres=UC/postgres,=U/postgres}", "nspowner": "postgres" } }
+ contains:
+ nspacl:
+ description:
+ - Access privileges U(https://www.postgresql.org/docs/current/catalog-pg-namespace.html).
+ returned: always
+ type: str
+ sample: "{postgres=UC/postgres,=U/postgres}"
+ nspowner:
+ description:
+ - Schema owner U(https://www.postgresql.org/docs/current/catalog-pg-namespace.html).
+ returned: always
+ type: str
+ sample: postgres
+ publications:
+ description:
+ - Information about logical replication publications (available for PostgreSQL 10 and higher)
+ U(https://www.postgresql.org/docs/current/logical-replication-publication.html).
+ - Content depends on PostgreSQL server version.
+ returned: if configured
+ type: dict
+ sample: { "pub1": { "ownername": "postgres", "puballtables": true, "pubinsert": true, "pubupdate": true } }
+ version_added: '0.2.0'
+ subscriptions:
+ description:
+ - Information about replication subscriptions (available for PostgreSQL 10 and higher)
+ U(https://www.postgresql.org/docs/current/logical-replication-subscription.html).
+ - Content depends on PostgreSQL server version.
+ returned: if configured
+ type: dict
+ sample:
+ - { "my_subscription": {"ownername": "postgres", "subenabled": true, "subpublications": ["first_publication"] } }
+ version_added: '0.2.0'
+repl_slots:
+ description:
+ - Replication slots (available in 9.4 and later)
+ U(https://www.postgresql.org/docs/current/view-pg-replication-slots.html).
+ returned: if existent
+ type: dict
+ sample: { "slot0": { "active": false, "database": null, "plugin": null, "slot_type": "physical" } }
+ contains:
+ active:
+ description:
+ - True means that a receiver has connected to it, and it is currently reserving archives.
+ returned: always
+ type: bool
+ sample: true
+ database:
+ description: Database name this slot is associated with, or null.
+ returned: always
+ type: str
+ sample: acme
+ plugin:
+ description:
+ - Base name of the shared object containing the output plugin
+ this logical slot is using, or null for physical slots.
+ returned: always
+ type: str
+ sample: pgoutput
+ slot_type:
+ description: The slot type - physical or logical.
+ returned: always
+ type: str
+ sample: logical
+replications:
+ description:
+ - Information about the current replications by process PIDs
+ U(https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-STATS-VIEWS-TABLE).
+ returned: if pg_stat_replication view existent
+ type: dict
+ sample:
+ - { "76580": { "app_name": "standby1", "backend_start": "2019-02-03 00:14:33.908593+03",
+ "client_addr": "10.10.10.2", "client_hostname": "", "state": "streaming", "usename": "postgres" } }
+ contains:
+ usename:
+ description:
+ - Name of the user logged into this WAL sender process ('usename' is a column name in pg_stat_replication view).
+ returned: always
+ type: str
+ sample: replication_user
+ app_name:
+ description: Name of the application that is connected to this WAL sender.
+ returned: if existent
+ type: str
+ sample: acme_srv
+ client_addr:
+ description:
+ - IP address of the client connected to this WAL sender.
+ - If this field is null, it indicates that the client is connected
+ via a Unix socket on the server machine.
+ returned: always
+ type: str
+ sample: 10.0.0.101
+ client_hostname:
+ description:
+ - Host name of the connected client, as reported by a reverse DNS lookup of client_addr.
+ - This field will only be non-null for IP connections, and only when log_hostname is enabled.
+ returned: always
+ type: str
+ sample: dbsrv1
+ backend_start:
+ description: Time when this process was started, i.e., when the client connected to this WAL sender.
+ returned: always
+ type: str
+ sample: "2019-02-03 00:14:33.908593+03"
+ state:
+ description: Current WAL sender state.
+ returned: always
+ type: str
+ sample: streaming
+tablespaces:
+ description:
+ - Information about tablespaces U(https://www.postgresql.org/docs/current/catalog-pg-tablespace.html).
+ returned: always
+ type: dict
+ sample:
+ - { "test": { "spcacl": "{postgres=C/postgres,andreyk=C/postgres}", "spcoptions": [ "seq_page_cost=1" ],
+ "spcowner": "postgres" } }
+ contains:
+ spcacl:
+ description: Tablespace access privileges.
+ returned: always
+ type: str
+ sample: "{postgres=C/postgres,andreyk=C/postgres}"
+ spcoptions:
+ description: Tablespace-level options.
+ returned: always
+ type: list
+ sample: [ "seq_page_cost=1" ]
+ spcowner:
+ description: Owner of the tablespace.
+ returned: always
+ type: str
+ sample: test_user
+roles:
+ description:
+ - Information about roles U(https://www.postgresql.org/docs/current/user-manag.html).
+ returned: always
+ type: dict
+ sample:
+ - { "test_role": { "canlogin": true, "member_of": [ "user_ro" ], "superuser": false,
+ "valid_until": "9999-12-31T23:59:59.999999+00:00" } }
+ contains:
+ canlogin:
+ description: Login privilege U(https://www.postgresql.org/docs/current/role-attributes.html).
+ returned: always
+ type: bool
+ sample: true
+ member_of:
+ description:
+ - Role membership U(https://www.postgresql.org/docs/current/role-membership.html).
+ returned: always
+ type: list
+ sample: [ "read_only_users" ]
+ superuser:
+ description: User is a superuser or not.
+ returned: always
+ type: bool
+ sample: false
+ valid_until:
+ description:
+ - Password expiration date U(https://www.postgresql.org/docs/current/sql-alterrole.html).
+ returned: always
+ type: str
+ sample: "9999-12-31T23:59:59.999999+00:00"
+pending_restart_settings:
+ description:
+ - List of settings that are pending restart to be set.
+ returned: always
+ type: list
+ sample: [ "shared_buffers" ]
+settings:
+ description:
+ - Information about run-time server parameters
+ U(https://www.postgresql.org/docs/current/view-pg-settings.html).
+ returned: always
+ type: dict
+ sample:
+ - { "work_mem": { "boot_val": "4096", "context": "user", "max_val": "2147483647",
+ "min_val": "64", "setting": "8192", "sourcefile": "/var/lib/pgsql/10/data/postgresql.auto.conf",
+ "unit": "kB", "vartype": "integer", "val_in_bytes": 4194304 } }
+ contains:
+ setting:
+ description: Current value of the parameter.
+ returned: always
+ type: str
+ sample: 49152
+ unit:
+ description: Implicit unit of the parameter.
+ returned: always
+ type: str
+ sample: kB
+ boot_val:
+ description:
+ - Parameter value assumed at server startup if the parameter is not otherwise set.
+ returned: always
+ type: str
+ sample: 4096
+ min_val:
+ description:
+ - Minimum allowed value of the parameter (null for non-numeric values).
+ returned: always
+ type: str
+ sample: 64
+ max_val:
+ description:
+ - Maximum allowed value of the parameter (null for non-numeric values).
+ returned: always
+ type: str
+ sample: 2147483647
+ sourcefile:
+ description:
+ - Configuration file the current value was set in.
+ - Null for values set from sources other than configuration files,
+ or when examined by a user who is neither a superuser or a member of pg_read_all_settings.
+ - Helpful when using include directives in configuration files.
+ returned: always
+ type: str
+ sample: /var/lib/pgsql/10/data/postgresql.auto.conf
+ context:
+ description:
+ - Context required to set the parameter's value.
+ - For more information see U(https://www.postgresql.org/docs/current/view-pg-settings.html).
+ returned: always
+ type: str
+ sample: user
+ vartype:
+ description:
+ - Parameter type (bool, enum, integer, real, or string).
+ returned: always
+ type: str
+ sample: integer
+ val_in_bytes:
+ description:
+ - Current value of the parameter in bytes.
+ returned: if supported
+ type: int
+ sample: 2147483647
+ pretty_val:
+ description:
+ - Value presented in the pretty form.
+ returned: always
+ type: str
+ sample: 2MB
+ pending_restart:
+ description:
+ - True if the value has been changed in the configuration file but needs a restart; or false otherwise.
+ - Returns only if C(settings) is passed.
+ returned: always
+ type: bool
+ sample: false
+'''
+
+from fnmatch import fnmatch
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils.six import iteritems
+from ansible.module_utils._text import to_native
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+class PgDbConn(object):
+ """Auxiliary class for working with PostgreSQL connection objects.
+
+ Arguments:
+ module (AnsibleModule): Object of AnsibleModule class that
+ contains connection parameters.
+ """
+
+ def __init__(self, module):
+ self.module = module
+ self.db_conn = None
+ self.cursor = None
+
+ def connect(self):
+ """Connect to a PostgreSQL database and return a cursor object.
+
+ Note: connection parameters are passed by self.module object.
+ """
+ conn_params = get_conn_params(self.module, self.module.params, warn_db_default=False)
+ self.db_conn = connect_to_db(self.module, conn_params)
+ return self.db_conn.cursor(cursor_factory=DictCursor)
+
+ def reconnect(self, dbname):
+ """Reconnect to another database and return a PostgreSQL cursor object.
+
+ Arguments:
+ dbname (string): Database name to connect to.
+ """
+ self.db_conn.close()
+
+ self.module.params['database'] = dbname
+ return self.connect()
+
+
+class PgClusterInfo(object):
+ """Class for collection information about a PostgreSQL instance.
+
+ Arguments:
+ module (AnsibleModule): Object of AnsibleModule class.
+ db_conn_obj (psycopg2.connect): PostgreSQL connection object.
+ """
+
+ def __init__(self, module, db_conn_obj):
+ self.module = module
+ self.db_obj = db_conn_obj
+ self.cursor = db_conn_obj.connect()
+ self.pg_info = {
+ "version": {},
+ "in_recovery": None,
+ "tablespaces": {},
+ "databases": {},
+ "replications": {},
+ "repl_slots": {},
+ "settings": {},
+ "roles": {},
+ "pending_restart_settings": [],
+ }
+
+ def collect(self, val_list=False):
+ """Collect information based on 'filter' option."""
+ subset_map = {
+ "version": self.get_pg_version,
+ "in_recovery": self.get_recovery_state,
+ "tablespaces": self.get_tablespaces,
+ "databases": self.get_db_info,
+ "replications": self.get_repl_info,
+ "repl_slots": self.get_rslot_info,
+ "settings": self.get_settings,
+ "roles": self.get_role_info,
+ }
+
+ incl_list = []
+ excl_list = []
+ # Notice: incl_list and excl_list
+ # don't make sense together, therefore,
+ # if incl_list is not empty, we collect
+ # only values from it:
+ if val_list:
+ for i in val_list:
+ if i[0] != '!':
+ incl_list.append(i)
+ else:
+ excl_list.append(i.lstrip('!'))
+
+ if incl_list:
+ for s in subset_map:
+ for i in incl_list:
+ if fnmatch(s, i):
+ subset_map[s]()
+ break
+ elif excl_list:
+ found = False
+ # Collect info:
+ for s in subset_map:
+ for e in excl_list:
+ if fnmatch(s, e):
+ found = True
+
+ if not found:
+ subset_map[s]()
+ else:
+ found = False
+
+ # Default behaviour, if include or exclude is not passed:
+ else:
+ # Just collect info for each item:
+ for s in subset_map:
+ subset_map[s]()
+
+ return self.pg_info
+
+ def get_pub_info(self):
+ """Get publication statistics."""
+ query = ("SELECT p.*, r.rolname AS ownername "
+ "FROM pg_catalog.pg_publication AS p "
+ "JOIN pg_catalog.pg_roles AS r "
+ "ON p.pubowner = r.oid")
+
+ result = self.__exec_sql(query)
+
+ if result:
+ result = [dict(row) for row in result]
+ else:
+ return {}
+
+ publications = {}
+
+ for elem in result:
+ if not publications.get(elem['pubname']):
+ publications[elem['pubname']] = {}
+
+ for key, val in iteritems(elem):
+ if key != 'pubname':
+ publications[elem['pubname']][key] = val
+
+ return publications
+
+ def get_subscr_info(self):
+ """Get subscription statistics."""
+ query = ("SELECT s.*, r.rolname AS ownername, d.datname AS dbname "
+ "FROM pg_catalog.pg_subscription s "
+ "JOIN pg_catalog.pg_database d "
+ "ON s.subdbid = d.oid "
+ "JOIN pg_catalog.pg_roles AS r "
+ "ON s.subowner = r.oid")
+
+ result = self.__exec_sql(query)
+
+ if result:
+ result = [dict(row) for row in result]
+ else:
+ return {}
+
+ subscr_info = {}
+
+ for elem in result:
+ if not subscr_info.get(elem['dbname']):
+ subscr_info[elem['dbname']] = {}
+
+ if not subscr_info[elem['dbname']].get(elem['subname']):
+ subscr_info[elem['dbname']][elem['subname']] = {}
+
+ for key, val in iteritems(elem):
+ if key not in ('subname', 'dbname'):
+ subscr_info[elem['dbname']][elem['subname']][key] = val
+
+ return subscr_info
+
+ def get_tablespaces(self):
+ """Get information about tablespaces."""
+ # Check spcoption exists:
+ opt = self.__exec_sql("SELECT column_name "
+ "FROM information_schema.columns "
+ "WHERE table_name = 'pg_tablespace' "
+ "AND column_name = 'spcoptions'")
+
+ if not opt:
+ query = ("SELECT s.spcname, a.rolname, s.spcacl "
+ "FROM pg_tablespace AS s "
+ "JOIN pg_authid AS a ON s.spcowner = a.oid")
+ else:
+ query = ("SELECT s.spcname, a.rolname, s.spcacl, s.spcoptions "
+ "FROM pg_tablespace AS s "
+ "JOIN pg_authid AS a ON s.spcowner = a.oid")
+
+ res = self.__exec_sql(query)
+ ts_dict = {}
+ for i in res:
+ ts_name = i[0]
+ ts_info = dict(
+ spcowner=i[1],
+ spcacl=i[2] if i[2] else '',
+ )
+ if opt:
+ ts_info['spcoptions'] = i[3] if i[3] else []
+
+ ts_dict[ts_name] = ts_info
+
+ self.pg_info["tablespaces"] = ts_dict
+
+ def get_ext_info(self):
+ """Get information about existing extensions."""
+ # Check that pg_extension exists:
+ res = self.__exec_sql("SELECT EXISTS (SELECT 1 FROM "
+ "information_schema.tables "
+ "WHERE table_name = 'pg_extension')")
+ if not res[0][0]:
+ return True
+
+ query = ("SELECT e.extname, e.extversion, n.nspname, c.description "
+ "FROM pg_catalog.pg_extension AS e "
+ "LEFT JOIN pg_catalog.pg_namespace AS n "
+ "ON n.oid = e.extnamespace "
+ "LEFT JOIN pg_catalog.pg_description AS c "
+ "ON c.objoid = e.oid "
+ "AND c.classoid = 'pg_catalog.pg_extension'::pg_catalog.regclass")
+ res = self.__exec_sql(query)
+ ext_dict = {}
+ for i in res:
+ ext_ver = i[1].split('.')
+
+ ext_dict[i[0]] = dict(
+ extversion=dict(
+ major=int(ext_ver[0]),
+ minor=int(ext_ver[1]),
+ ),
+ nspname=i[2],
+ description=i[3],
+ )
+
+ return ext_dict
+
+ def get_role_info(self):
+ """Get information about roles (in PgSQL groups and users are roles)."""
+ query = ("SELECT r.rolname, r.rolsuper, r.rolcanlogin, "
+ "r.rolvaliduntil, "
+ "ARRAY(SELECT b.rolname "
+ "FROM pg_catalog.pg_auth_members AS m "
+ "JOIN pg_catalog.pg_roles AS b ON (m.roleid = b.oid) "
+ "WHERE m.member = r.oid) AS memberof "
+ "FROM pg_catalog.pg_roles AS r "
+ "WHERE r.rolname !~ '^pg_'")
+
+ res = self.__exec_sql(query)
+ rol_dict = {}
+ for i in res:
+ rol_dict[i[0]] = dict(
+ superuser=i[1],
+ canlogin=i[2],
+ valid_until=i[3] if i[3] else '',
+ member_of=i[4] if i[4] else [],
+ )
+
+ self.pg_info["roles"] = rol_dict
+
+ def get_rslot_info(self):
+ """Get information about replication slots if exist."""
+ # Check that pg_replication_slots exists:
+ res = self.__exec_sql("SELECT EXISTS (SELECT 1 FROM "
+ "information_schema.tables "
+ "WHERE table_name = 'pg_replication_slots')")
+ if not res[0][0]:
+ return True
+
+ query = ("SELECT slot_name, plugin, slot_type, database, "
+ "active FROM pg_replication_slots")
+ res = self.__exec_sql(query)
+
+ # If there is no replication:
+ if not res:
+ return True
+
+ rslot_dict = {}
+ for i in res:
+ rslot_dict[i[0]] = dict(
+ plugin=i[1],
+ slot_type=i[2],
+ database=i[3],
+ active=i[4],
+ )
+
+ self.pg_info["repl_slots"] = rslot_dict
+
+ def get_settings(self):
+ """Get server settings."""
+ # Check pending restart column exists:
+ pend_rest_col_exists = self.__exec_sql("SELECT 1 FROM information_schema.columns "
+ "WHERE table_name = 'pg_settings' "
+ "AND column_name = 'pending_restart'")
+ if not pend_rest_col_exists:
+ query = ("SELECT name, setting, unit, context, vartype, "
+ "boot_val, min_val, max_val, sourcefile "
+ "FROM pg_settings")
+ else:
+ query = ("SELECT name, setting, unit, context, vartype, "
+ "boot_val, min_val, max_val, sourcefile, pending_restart "
+ "FROM pg_settings")
+
+ res = self.__exec_sql(query)
+
+ set_dict = {}
+ for i in res:
+ val_in_bytes = None
+ setting = i[1]
+ if i[2]:
+ unit = i[2]
+ else:
+ unit = ''
+
+ if unit == 'kB':
+ val_in_bytes = int(setting) * 1024
+
+ elif unit == '8kB':
+ val_in_bytes = int(setting) * 1024 * 8
+
+ elif unit == 'MB':
+ val_in_bytes = int(setting) * 1024 * 1024
+
+ if val_in_bytes is not None and val_in_bytes < 0:
+ val_in_bytes = 0
+
+ setting_name = i[0]
+ pretty_val = self.__get_pretty_val(setting_name)
+
+ pending_restart = None
+ if pend_rest_col_exists:
+ pending_restart = i[9]
+
+ set_dict[setting_name] = dict(
+ setting=setting,
+ unit=unit,
+ context=i[3],
+ vartype=i[4],
+ boot_val=i[5] if i[5] else '',
+ min_val=i[6] if i[6] else '',
+ max_val=i[7] if i[7] else '',
+ sourcefile=i[8] if i[8] else '',
+ pretty_val=pretty_val,
+ )
+ if val_in_bytes is not None:
+ set_dict[setting_name]['val_in_bytes'] = val_in_bytes
+
+ if pending_restart is not None:
+ set_dict[setting_name]['pending_restart'] = pending_restart
+ if pending_restart:
+ self.pg_info["pending_restart_settings"].append(setting_name)
+
+ self.pg_info["settings"] = set_dict
+
+ def get_repl_info(self):
+ """Get information about replication if the server is a master."""
+ # Check that pg_replication_slots exists:
+ res = self.__exec_sql("SELECT EXISTS (SELECT 1 FROM "
+ "information_schema.tables "
+ "WHERE table_name = 'pg_stat_replication')")
+ if not res[0][0]:
+ return True
+
+ query = ("SELECT r.pid, a.rolname, r.application_name, r.client_addr, "
+ "r.client_hostname, r.backend_start::text, r.state "
+ "FROM pg_stat_replication AS r "
+ "JOIN pg_authid AS a ON r.usesysid = a.oid")
+ res = self.__exec_sql(query)
+
+ # If there is no replication:
+ if not res:
+ return True
+
+ repl_dict = {}
+ for i in res:
+ repl_dict[i[0]] = dict(
+ usename=i[1],
+ app_name=i[2] if i[2] else '',
+ client_addr=i[3],
+ client_hostname=i[4] if i[4] else '',
+ backend_start=i[5],
+ state=i[6],
+ )
+
+ self.pg_info["replications"] = repl_dict
+
+ def get_lang_info(self):
+ """Get information about current supported languages."""
+ query = ("SELECT l.lanname, a.rolname, l.lanacl "
+ "FROM pg_language AS l "
+ "JOIN pg_authid AS a ON l.lanowner = a.oid")
+ res = self.__exec_sql(query)
+ lang_dict = {}
+ for i in res:
+ lang_dict[i[0]] = dict(
+ lanowner=i[1],
+ lanacl=i[2] if i[2] else '',
+ )
+
+ return lang_dict
+
+ def get_namespaces(self):
+ """Get information about namespaces."""
+ query = ("SELECT n.nspname, a.rolname, n.nspacl "
+ "FROM pg_catalog.pg_namespace AS n "
+ "JOIN pg_authid AS a ON a.oid = n.nspowner")
+ res = self.__exec_sql(query)
+
+ nsp_dict = {}
+ for i in res:
+ nsp_dict[i[0]] = dict(
+ nspowner=i[1],
+ nspacl=i[2] if i[2] else '',
+ )
+
+ return nsp_dict
+
+ def get_pg_version(self):
+ """Get major and minor PostgreSQL server version."""
+ query = "SELECT version()"
+ raw = self.__exec_sql(query)[0][0]
+ raw = raw.split()[1].split('.')
+ self.pg_info["version"] = dict(
+ major=int(raw[0]),
+ minor=int(raw[1]),
+ )
+
+ def get_recovery_state(self):
+ """Get if the service is in recovery mode."""
+ self.pg_info["in_recovery"] = self.__exec_sql("SELECT pg_is_in_recovery()")[0][0]
+
+ def get_db_info(self):
+ """Get information about the current database."""
+ # Following query returns:
+ # Name, Owner, Encoding, Collate, Ctype, Access Priv, Size
+ query = ("SELECT d.datname, "
+ "pg_catalog.pg_get_userbyid(d.datdba), "
+ "pg_catalog.pg_encoding_to_char(d.encoding), "
+ "d.datcollate, "
+ "d.datctype, "
+ "pg_catalog.array_to_string(d.datacl, E'\n'), "
+ "CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') "
+ "THEN pg_catalog.pg_database_size(d.datname)::text "
+ "ELSE 'No Access' END, "
+ "t.spcname "
+ "FROM pg_catalog.pg_database AS d "
+ "JOIN pg_catalog.pg_tablespace t ON d.dattablespace = t.oid "
+ "WHERE d.datname != 'template0'")
+
+ res = self.__exec_sql(query)
+
+ db_dict = {}
+ for i in res:
+ db_dict[i[0]] = dict(
+ owner=i[1],
+ encoding=i[2],
+ collate=i[3],
+ ctype=i[4],
+ access_priv=i[5] if i[5] else '',
+ size=i[6],
+ )
+
+ if self.cursor.connection.server_version >= 100000:
+ subscr_info = self.get_subscr_info()
+
+ for datname in db_dict:
+ self.cursor = self.db_obj.reconnect(datname)
+ db_dict[datname]['namespaces'] = self.get_namespaces()
+ db_dict[datname]['extensions'] = self.get_ext_info()
+ db_dict[datname]['languages'] = self.get_lang_info()
+ if self.cursor.connection.server_version >= 100000:
+ db_dict[datname]['publications'] = self.get_pub_info()
+ db_dict[datname]['subscriptions'] = subscr_info.get(datname, {})
+
+ self.pg_info["databases"] = db_dict
+
+ def __get_pretty_val(self, setting):
+ """Get setting's value represented by SHOW command."""
+ return self.__exec_sql("SHOW %s" % setting)[0][0]
+
+ def __exec_sql(self, query):
+ """Execute SQL and return the result."""
+ try:
+ self.cursor.execute(query)
+ res = self.cursor.fetchall()
+ if res:
+ return res
+ except Exception as e:
+ self.module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e)))
+ self.cursor.close()
+ return False
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ db=dict(type='str', aliases=['login_db']),
+ filter=dict(type='list', elements='str'),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ filter_ = module.params['filter']
+
+ if not module.params['trust_input']:
+ # Check input for potentially dangerous elements:
+ check_input(module, module.params['session_role'])
+
+ db_conn_obj = PgDbConn(module)
+
+ # Do job:
+ pg_info = PgClusterInfo(module, db_conn_obj)
+
+ module.exit_json(**pg_info.collect(filter_))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_lang.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_lang.py
new file mode 100644
index 00000000..8b28cd9c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_lang.py
@@ -0,0 +1,363 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2014, Jens Depuydt <http://www.jensd.be>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: postgresql_lang
+short_description: Adds, removes or changes procedural languages with a PostgreSQL database
+description:
+- Adds, removes or changes procedural languages with a PostgreSQL database.
+- This module allows you to add a language, remote a language or change the trust
+ relationship with a PostgreSQL database.
+- The module can be used on the machine where executed or on a remote host.
+- When removing a language from a database, it is possible that dependencies prevent
+ the database from being removed. In that case, you can specify I(cascade=yes) to
+ automatically drop objects that depend on the language (such as functions in the
+ language).
+- In case the language can't be deleted because it is required by the
+ database system, you can specify I(fail_on_drop=no) to ignore the error.
+- Be careful when marking a language as trusted since this could be a potential
+ security breach. Untrusted languages allow only users with the PostgreSQL superuser
+ privilege to use this language to create new functions.
+options:
+ lang:
+ description:
+ - Name of the procedural language to add, remove or change.
+ required: true
+ type: str
+ aliases:
+ - name
+ trust:
+ description:
+ - Make this language trusted for the selected db.
+ type: bool
+ default: 'no'
+ db:
+ description:
+ - Name of database to connect to and where the language will be added, removed or changed.
+ type: str
+ aliases:
+ - login_db
+ required: true
+ force_trust:
+ description:
+ - Marks the language as trusted, even if it's marked as untrusted in pg_pltemplate.
+ - Use with care!
+ type: bool
+ default: 'no'
+ fail_on_drop:
+ description:
+ - If C(yes), fail when removing a language. Otherwise just log and continue.
+ - In some cases, it is not possible to remove a language (used by the db-system).
+ - When dependencies block the removal, consider using I(cascade).
+ type: bool
+ default: 'yes'
+ cascade:
+ description:
+ - When dropping a language, also delete object that depend on this language.
+ - Only used when I(state=absent).
+ type: bool
+ default: 'no'
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ - The specified I(session_role) must be a role that the current I(login_user) is a member of.
+ - Permissions checking for SQL commands is carried out as though the
+ I(session_role) were the one that had logged in originally.
+ type: str
+ state:
+ description:
+ - The state of the language for the selected database.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ login_unix_socket:
+ description:
+ - Path to a Unix domain socket for local connections.
+ type: str
+ ssl_mode:
+ description:
+ - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
+ - See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
+ - Default of C(prefer) matches libpq default.
+ type: str
+ default: prefer
+ choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
+ ca_cert:
+ description:
+ - Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
+ - If the file exists, the server's certificate will be verified to be signed by one of these authorities.
+ type: str
+ aliases: [ ssl_rootcert ]
+ owner:
+ description:
+ - Set an owner for the language.
+ - Ignored when I(state=absent).
+ type: str
+ version_added: '0.2.0'
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(lang), I(session_role),
+ I(owner) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+seealso:
+- name: PostgreSQL languages
+ description: General information about PostgreSQL languages.
+ link: https://www.postgresql.org/docs/current/xplang.html
+- name: CREATE LANGUAGE reference
+ description: Complete reference of the CREATE LANGUAGE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createlanguage.html
+- name: ALTER LANGUAGE reference
+ description: Complete reference of the ALTER LANGUAGE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-alterlanguage.html
+- name: DROP LANGUAGE reference
+ description: Complete reference of the DROP LANGUAGE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-droplanguage.html
+author:
+- Jens Depuydt (@jensdepuydt)
+- Thomas O'Donnell (@andytom)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Add language pltclu to database testdb if it doesn't exist
+ community.general.postgresql_lang: db=testdb lang=pltclu state=present
+
+# Add language pltclu to database testdb if it doesn't exist and mark it as trusted.
+# Marks the language as trusted if it exists but isn't trusted yet.
+# force_trust makes sure that the language will be marked as trusted
+- name: Add language pltclu to database testdb if it doesn't exist and mark it as trusted
+ community.general.postgresql_lang:
+ db: testdb
+ lang: pltclu
+ state: present
+ trust: yes
+ force_trust: yes
+
+- name: Remove language pltclu from database testdb
+ community.general.postgresql_lang:
+ db: testdb
+ lang: pltclu
+ state: absent
+
+- name: Remove language pltclu from database testdb and remove all dependencies
+ community.general.postgresql_lang:
+ db: testdb
+ lang: pltclu
+ state: absent
+ cascade: yes
+
+- name: Remove language c from database testdb but ignore errors if something prevents the removal
+ community.general.postgresql_lang:
+ db: testdb
+ lang: pltclu
+ state: absent
+ fail_on_drop: no
+
+- name: In testdb change owner of mylang to alice
+ community.general.postgresql_lang:
+ db: testdb
+ lang: mylang
+ owner: alice
+'''
+
+RETURN = r'''
+queries:
+ description: List of executed queries.
+ returned: always
+ type: list
+ sample: ['CREATE LANGUAGE "acme"']
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import check_input
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+executed_queries = []
+
+
+def lang_exists(cursor, lang):
+ """Checks if language exists for db"""
+ query = "SELECT lanname FROM pg_language WHERE lanname = %(lang)s"
+ cursor.execute(query, {'lang': lang})
+ return cursor.rowcount > 0
+
+
+def lang_istrusted(cursor, lang):
+ """Checks if language is trusted for db"""
+ query = "SELECT lanpltrusted FROM pg_language WHERE lanname = %(lang)s"
+ cursor.execute(query, {'lang': lang})
+ return cursor.fetchone()[0]
+
+
+def lang_altertrust(cursor, lang, trust):
+ """Changes if language is trusted for db"""
+ query = "UPDATE pg_language SET lanpltrusted = %(trust)s WHERE lanname = %(lang)s"
+ cursor.execute(query, {'trust': trust, 'lang': lang})
+ executed_queries.append(cursor.mogrify(query, {'trust': trust, 'lang': lang}))
+ return True
+
+
+def lang_add(cursor, lang, trust):
+ """Adds language for db"""
+ if trust:
+ query = 'CREATE TRUSTED LANGUAGE "%s"' % lang
+ else:
+ query = 'CREATE LANGUAGE "%s"' % lang
+ executed_queries.append(query)
+ cursor.execute(query)
+ return True
+
+
+def lang_drop(cursor, lang, cascade):
+ """Drops language for db"""
+ cursor.execute("SAVEPOINT ansible_pgsql_lang_drop")
+ try:
+ if cascade:
+ query = "DROP LANGUAGE \"%s\" CASCADE" % lang
+ else:
+ query = "DROP LANGUAGE \"%s\"" % lang
+ executed_queries.append(query)
+ cursor.execute(query)
+ except Exception:
+ cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_lang_drop")
+ cursor.execute("RELEASE SAVEPOINT ansible_pgsql_lang_drop")
+ return False
+ cursor.execute("RELEASE SAVEPOINT ansible_pgsql_lang_drop")
+ return True
+
+
+def get_lang_owner(cursor, lang):
+ """Get language owner.
+
+ Args:
+ cursor (cursor): psycopg2 cursor object.
+ lang (str): language name.
+ """
+ query = ("SELECT r.rolname FROM pg_language l "
+ "JOIN pg_roles r ON l.lanowner = r.oid "
+ "WHERE l.lanname = %(lang)s")
+ cursor.execute(query, {'lang': lang})
+ return cursor.fetchone()[0]
+
+
+def set_lang_owner(cursor, lang, owner):
+ """Set language owner.
+
+ Args:
+ cursor (cursor): psycopg2 cursor object.
+ lang (str): language name.
+ owner (str): name of new owner.
+ """
+ query = "ALTER LANGUAGE \"%s\" OWNER TO \"%s\"" % (lang, owner)
+ executed_queries.append(query)
+ cursor.execute(query)
+ return True
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ db=dict(type="str", required=True, aliases=["login_db"]),
+ lang=dict(type="str", required=True, aliases=["name"]),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ trust=dict(type="bool", default="no"),
+ force_trust=dict(type="bool", default="no"),
+ cascade=dict(type="bool", default="no"),
+ fail_on_drop=dict(type="bool", default="yes"),
+ session_role=dict(type="str"),
+ owner=dict(type="str"),
+ trust_input=dict(type="bool", default="yes")
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ db = module.params["db"]
+ lang = module.params["lang"]
+ state = module.params["state"]
+ trust = module.params["trust"]
+ force_trust = module.params["force_trust"]
+ cascade = module.params["cascade"]
+ fail_on_drop = module.params["fail_on_drop"]
+ owner = module.params["owner"]
+ session_role = module.params["session_role"]
+ trust_input = module.params["trust_input"]
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, lang, session_role, owner)
+
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=False)
+ cursor = db_connection.cursor()
+
+ changed = False
+ kw = {'db': db, 'lang': lang, 'trust': trust}
+
+ if state == "present":
+ if lang_exists(cursor, lang):
+ lang_trusted = lang_istrusted(cursor, lang)
+ if (lang_trusted and not trust) or (not lang_trusted and trust):
+ if module.check_mode:
+ changed = True
+ else:
+ changed = lang_altertrust(cursor, lang, trust)
+ else:
+ if module.check_mode:
+ changed = True
+ else:
+ changed = lang_add(cursor, lang, trust)
+ if force_trust:
+ changed = lang_altertrust(cursor, lang, trust)
+
+ else:
+ if lang_exists(cursor, lang):
+ if module.check_mode:
+ changed = True
+ kw['lang_dropped'] = True
+ else:
+ changed = lang_drop(cursor, lang, cascade)
+ if fail_on_drop and not changed:
+ msg = ("unable to drop language, use cascade "
+ "to delete dependencies or fail_on_drop=no to ignore")
+ module.fail_json(msg=msg)
+ kw['lang_dropped'] = changed
+
+ if owner and state == 'present':
+ if lang_exists(cursor, lang):
+ if owner != get_lang_owner(cursor, lang):
+ changed = set_lang_owner(cursor, lang, owner)
+
+ if changed:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ kw['changed'] = changed
+ kw['queries'] = executed_queries
+ db_connection.close()
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_membership.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_membership.py
new file mode 100644
index 00000000..3292a6db
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_membership.py
@@ -0,0 +1,228 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_membership
+short_description: Add or remove PostgreSQL roles from groups
+description:
+- Adds or removes PostgreSQL roles from groups (other roles).
+- Users are roles with login privilege.
+- Groups are PostgreSQL roles usually without LOGIN privilege.
+- "Common use case:"
+- 1) add a new group (groups) by M(community.general.postgresql_user) module with I(role_attr_flags=NOLOGIN)
+- 2) grant them desired privileges by M(community.general.postgresql_privs) module
+- 3) add desired PostgreSQL users to the new group (groups) by this module
+options:
+ groups:
+ description:
+ - The list of groups (roles) that need to be granted to or revoked from I(target_roles).
+ required: yes
+ type: list
+ elements: str
+ aliases:
+ - group
+ - source_role
+ - source_roles
+ target_roles:
+ description:
+ - The list of target roles (groups will be granted to them).
+ required: yes
+ type: list
+ elements: str
+ aliases:
+ - target_role
+ - users
+ - user
+ fail_on_role:
+ description:
+ - If C(yes), fail when group or target_role doesn't exist. If C(no), just warn and continue.
+ default: yes
+ type: bool
+ state:
+ description:
+ - Membership state.
+ - I(state=present) implies the I(groups)must be granted to I(target_roles).
+ - I(state=absent) implies the I(groups) must be revoked from I(target_roles).
+ type: str
+ default: present
+ choices: [ absent, present ]
+ db:
+ description:
+ - Name of database to connect to.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(groups),
+ I(target_roles), I(session_role) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+seealso:
+- module: community.general.postgresql_user
+- module: community.general.postgresql_privs
+- module: community.general.postgresql_owner
+- name: PostgreSQL role membership reference
+ description: Complete reference of the PostgreSQL role membership documentation.
+ link: https://www.postgresql.org/docs/current/role-membership.html
+- name: PostgreSQL role attributes reference
+ description: Complete reference of the PostgreSQL role attributes documentation.
+ link: https://www.postgresql.org/docs/current/role-attributes.html
+author:
+- Andrew Klychkov (@Andersson007)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Grant role read_only to alice and bob
+ community.general.postgresql_membership:
+ group: read_only
+ target_roles:
+ - alice
+ - bob
+ state: present
+
+# you can also use target_roles: alice,bob,etc to pass the role list
+
+- name: Revoke role read_only and exec_func from bob. Ignore if roles don't exist
+ community.general.postgresql_membership:
+ groups:
+ - read_only
+ - exec_func
+ target_role: bob
+ fail_on_role: no
+ state: absent
+'''
+
+RETURN = r'''
+queries:
+ description: List of executed queries.
+ returned: always
+ type: str
+ sample: [ "GRANT \"user_ro\" TO \"alice\"" ]
+granted:
+ description: Dict of granted groups and roles.
+ returned: if I(state=present)
+ type: dict
+ sample: { "ro_group": [ "alice", "bob" ] }
+revoked:
+ description: Dict of revoked groups and roles.
+ returned: if I(state=absent)
+ type: dict
+ sample: { "ro_group": [ "alice", "bob" ] }
+state:
+ description: Membership state that tried to be set.
+ returned: always
+ type: str
+ sample: "present"
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import check_input
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ PgMembership,
+ postgres_common_argument_spec,
+)
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ groups=dict(type='list', elements='str', required=True, aliases=['group', 'source_role', 'source_roles']),
+ target_roles=dict(type='list', elements='str', required=True, aliases=['target_role', 'user', 'users']),
+ fail_on_role=dict(type='bool', default=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ db=dict(type='str', aliases=['login_db']),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ groups = module.params['groups']
+ target_roles = module.params['target_roles']
+ fail_on_role = module.params['fail_on_role']
+ state = module.params['state']
+ session_role = module.params['session_role']
+ trust_input = module.params['trust_input']
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, groups, target_roles, session_role)
+
+ conn_params = get_conn_params(module, module.params, warn_db_default=False)
+ db_connection = connect_to_db(module, conn_params, autocommit=False)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ ##############
+ # Create the object and do main job:
+
+ pg_membership = PgMembership(module, cursor, groups, target_roles, fail_on_role)
+
+ if state == 'present':
+ pg_membership.grant()
+
+ elif state == 'absent':
+ pg_membership.revoke()
+
+ # Rollback if it's possible and check_mode:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ cursor.close()
+ db_connection.close()
+
+ # Make return values:
+ return_dict = dict(
+ changed=pg_membership.changed,
+ state=state,
+ groups=pg_membership.groups,
+ target_roles=pg_membership.target_roles,
+ queries=pg_membership.executed_queries,
+ )
+
+ if state == 'present':
+ return_dict['granted'] = pg_membership.granted
+ elif state == 'absent':
+ return_dict['revoked'] = pg_membership.revoked
+
+ module.exit_json(**return_dict)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_owner.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_owner.py
new file mode 100644
index 00000000..06a09c59
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_owner.py
@@ -0,0 +1,453 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_owner
+short_description: Change an owner of PostgreSQL database object
+description:
+- Change an owner of PostgreSQL database object.
+- Also allows to reassign the ownership of database objects owned by a database role to another role.
+
+options:
+ new_owner:
+ description:
+ - Role (user/group) to set as an I(obj_name) owner.
+ type: str
+ required: yes
+ obj_name:
+ description:
+ - Name of a database object to change ownership.
+ - Mutually exclusive with I(reassign_owned_by).
+ type: str
+ obj_type:
+ description:
+ - Type of a database object.
+ - Mutually exclusive with I(reassign_owned_by).
+ type: str
+ choices: [ database, function, matview, sequence, schema, table, tablespace, view ]
+ aliases:
+ - type
+ reassign_owned_by:
+ description:
+ - The list of role names. The ownership of all the objects within the current database,
+ and of all shared objects (databases, tablespaces), owned by this role(s) will be reassigned to I(owner).
+ - Pay attention - it reassigns all objects owned by this role(s) in the I(db)!
+ - If role(s) exists, always returns changed True.
+ - Cannot reassign ownership of objects that are required by the database system.
+ - Mutually exclusive with C(obj_type).
+ type: list
+ elements: str
+ fail_on_role:
+ description:
+ - If C(yes), fail when I(reassign_owned_by) role does not exist.
+ Otherwise just warn and continue.
+ - Mutually exclusive with I(obj_name) and I(obj_type).
+ default: yes
+ type: bool
+ db:
+ description:
+ - Name of database to connect to.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(new_owner), I(obj_name),
+ I(reassign_owned_by), I(session_role) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+seealso:
+- module: community.general.postgresql_user
+- module: community.general.postgresql_privs
+- module: community.general.postgresql_membership
+- name: PostgreSQL REASSIGN OWNED command reference
+ description: Complete reference of the PostgreSQL REASSIGN OWNED command documentation.
+ link: https://www.postgresql.org/docs/current/sql-reassign-owned.html
+author:
+- Andrew Klychkov (@Andersson007)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+# Set owner as alice for function myfunc in database bar by ansible ad-hoc command:
+# ansible -m postgresql_owner -a "db=bar new_owner=alice obj_name=myfunc obj_type=function"
+
+- name: The same as above by playbook
+ community.general.postgresql_owner:
+ db: bar
+ new_owner: alice
+ obj_name: myfunc
+ obj_type: function
+
+- name: Set owner as bob for table acme in database bar
+ community.general.postgresql_owner:
+ db: bar
+ new_owner: bob
+ obj_name: acme
+ obj_type: table
+
+- name: Set owner as alice for view test_view in database bar
+ community.general.postgresql_owner:
+ db: bar
+ new_owner: alice
+ obj_name: test_view
+ obj_type: view
+
+- name: Set owner as bob for tablespace ssd in database foo
+ community.general.postgresql_owner:
+ db: foo
+ new_owner: bob
+ obj_name: ssd
+ obj_type: tablespace
+
+- name: Reassign all object in database bar owned by bob to alice
+ community.general.postgresql_owner:
+ db: bar
+ new_owner: alice
+ reassign_owned_by: bob
+
+- name: Reassign all object in database bar owned by bob and bill to alice
+ community.general.postgresql_owner:
+ db: bar
+ new_owner: alice
+ reassign_owned_by:
+ - bob
+ - bill
+'''
+
+RETURN = r'''
+queries:
+ description: List of executed queries.
+ returned: always
+ type: str
+ sample: [ 'REASSIGN OWNED BY "bob" TO "alice"' ]
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+ pg_quote_identifier,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+
+class PgOwnership(object):
+
+ """Class for changing ownership of PostgreSQL objects.
+
+ Arguments:
+ module (AnsibleModule): Object of Ansible module class.
+ cursor (psycopg2.connect.cursor): Cursor object for interaction with the database.
+ role (str): Role name to set as a new owner of objects.
+
+ Important:
+ If you want to add handling of a new type of database objects:
+ 1. Add a specific method for this like self.__set_db_owner(), etc.
+ 2. Add a condition with a check of ownership for new type objects to self.__is_owner()
+ 3. Add a condition with invocation of the specific method to self.set_owner()
+ 4. Add the information to the module documentation
+ That's all.
+ """
+
+ def __init__(self, module, cursor, role):
+ self.module = module
+ self.cursor = cursor
+ self.check_role_exists(role)
+ self.role = role
+ self.changed = False
+ self.executed_queries = []
+ self.obj_name = ''
+ self.obj_type = ''
+
+ def check_role_exists(self, role, fail_on_role=True):
+ """Check the role exists or not.
+
+ Arguments:
+ role (str): Role name.
+ fail_on_role (bool): If True, fail when the role does not exist.
+ Otherwise just warn and continue.
+ """
+ if not self.__role_exists(role):
+ if fail_on_role:
+ self.module.fail_json(msg="Role '%s' does not exist" % role)
+ else:
+ self.module.warn("Role '%s' does not exist, pass" % role)
+
+ return False
+
+ else:
+ return True
+
+ def reassign(self, old_owners, fail_on_role):
+ """Implements REASSIGN OWNED BY command.
+
+ If success, set self.changed as True.
+
+ Arguments:
+ old_owners (list): The ownership of all the objects within
+ the current database, and of all shared objects (databases, tablespaces),
+ owned by these roles will be reassigned to self.role.
+ fail_on_role (bool): If True, fail when a role from old_owners does not exist.
+ Otherwise just warn and continue.
+ """
+ roles = []
+ for r in old_owners:
+ if self.check_role_exists(r, fail_on_role):
+ roles.append('"%s"' % r)
+
+ # Roles do not exist, nothing to do, exit:
+ if not roles:
+ return False
+
+ old_owners = ','.join(roles)
+
+ query = ['REASSIGN OWNED BY']
+ query.append(old_owners)
+ query.append('TO "%s"' % self.role)
+ query = ' '.join(query)
+
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def set_owner(self, obj_type, obj_name):
+ """Change owner of a database object.
+
+ Arguments:
+ obj_type (str): Type of object (like database, table, view, etc.).
+ obj_name (str): Object name.
+ """
+ self.obj_name = obj_name
+ self.obj_type = obj_type
+
+ # if a new_owner is the object owner now,
+ # nothing to do:
+ if self.__is_owner():
+ return False
+
+ if obj_type == 'database':
+ self.__set_db_owner()
+
+ elif obj_type == 'function':
+ self.__set_func_owner()
+
+ elif obj_type == 'sequence':
+ self.__set_seq_owner()
+
+ elif obj_type == 'schema':
+ self.__set_schema_owner()
+
+ elif obj_type == 'table':
+ self.__set_table_owner()
+
+ elif obj_type == 'tablespace':
+ self.__set_tablespace_owner()
+
+ elif obj_type == 'view':
+ self.__set_view_owner()
+
+ elif obj_type == 'matview':
+ self.__set_mat_view_owner()
+
+ def __is_owner(self):
+ """Return True if self.role is the current object owner."""
+ if self.obj_type == 'table':
+ query = ("SELECT 1 FROM pg_tables "
+ "WHERE tablename = %(obj_name)s "
+ "AND tableowner = %(role)s")
+
+ elif self.obj_type == 'database':
+ query = ("SELECT 1 FROM pg_database AS d "
+ "JOIN pg_roles AS r ON d.datdba = r.oid "
+ "WHERE d.datname = %(obj_name)s "
+ "AND r.rolname = %(role)s")
+
+ elif self.obj_type == 'function':
+ query = ("SELECT 1 FROM pg_proc AS f "
+ "JOIN pg_roles AS r ON f.proowner = r.oid "
+ "WHERE f.proname = %(obj_name)s "
+ "AND r.rolname = %(role)s")
+
+ elif self.obj_type == 'sequence':
+ query = ("SELECT 1 FROM pg_class AS c "
+ "JOIN pg_roles AS r ON c.relowner = r.oid "
+ "WHERE c.relkind = 'S' AND c.relname = %(obj_name)s "
+ "AND r.rolname = %(role)s")
+
+ elif self.obj_type == 'schema':
+ query = ("SELECT 1 FROM information_schema.schemata "
+ "WHERE schema_name = %(obj_name)s "
+ "AND schema_owner = %(role)s")
+
+ elif self.obj_type == 'tablespace':
+ query = ("SELECT 1 FROM pg_tablespace AS t "
+ "JOIN pg_roles AS r ON t.spcowner = r.oid "
+ "WHERE t.spcname = %(obj_name)s "
+ "AND r.rolname = %(role)s")
+
+ elif self.obj_type == 'view':
+ query = ("SELECT 1 FROM pg_views "
+ "WHERE viewname = %(obj_name)s "
+ "AND viewowner = %(role)s")
+
+ elif self.obj_type == 'matview':
+ query = ("SELECT 1 FROM pg_matviews "
+ "WHERE matviewname = %(obj_name)s "
+ "AND matviewowner = %(role)s")
+
+ query_params = {'obj_name': self.obj_name, 'role': self.role}
+ return exec_sql(self, query, query_params, add_to_executed=False)
+
+ def __set_db_owner(self):
+ """Set the database owner."""
+ query = 'ALTER DATABASE "%s" OWNER TO "%s"' % (self.obj_name, self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __set_func_owner(self):
+ """Set the function owner."""
+ query = 'ALTER FUNCTION %s OWNER TO "%s"' % (self.obj_name, self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __set_seq_owner(self):
+ """Set the sequence owner."""
+ query = 'ALTER SEQUENCE %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'),
+ self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __set_schema_owner(self):
+ """Set the schema owner."""
+ query = 'ALTER SCHEMA %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'schema'),
+ self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __set_table_owner(self):
+ """Set the table owner."""
+ query = 'ALTER TABLE %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'),
+ self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __set_tablespace_owner(self):
+ """Set the tablespace owner."""
+ query = 'ALTER TABLESPACE "%s" OWNER TO "%s"' % (self.obj_name, self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __set_view_owner(self):
+ """Set the view owner."""
+ query = 'ALTER VIEW %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'),
+ self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __set_mat_view_owner(self):
+ """Set the materialized view owner."""
+ query = 'ALTER MATERIALIZED VIEW %s OWNER TO "%s"' % (pg_quote_identifier(self.obj_name, 'table'),
+ self.role)
+ self.changed = exec_sql(self, query, return_bool=True)
+
+ def __role_exists(self, role):
+ """Return True if role exists, otherwise return False."""
+ query_params = {'role': role}
+ query = "SELECT 1 FROM pg_roles WHERE rolname = %(role)s"
+ return exec_sql(self, query, query_params, add_to_executed=False)
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ new_owner=dict(type='str', required=True),
+ obj_name=dict(type='str'),
+ obj_type=dict(type='str', aliases=['type'], choices=[
+ 'database', 'function', 'matview', 'sequence', 'schema', 'table', 'tablespace', 'view']),
+ reassign_owned_by=dict(type='list', elements='str'),
+ fail_on_role=dict(type='bool', default=True),
+ db=dict(type='str', aliases=['login_db']),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['obj_name', 'reassign_owned_by'],
+ ['obj_type', 'reassign_owned_by'],
+ ['obj_name', 'fail_on_role'],
+ ['obj_type', 'fail_on_role'],
+ ],
+ supports_check_mode=True,
+ )
+
+ new_owner = module.params['new_owner']
+ obj_name = module.params['obj_name']
+ obj_type = module.params['obj_type']
+ reassign_owned_by = module.params['reassign_owned_by']
+ fail_on_role = module.params['fail_on_role']
+ session_role = module.params['session_role']
+ trust_input = module.params['trust_input']
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, new_owner, obj_name, reassign_owned_by, session_role)
+
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=False)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ ##############
+ # Create the object and do main job:
+ pg_ownership = PgOwnership(module, cursor, new_owner)
+
+ # if we want to change ownership:
+ if obj_name:
+ pg_ownership.set_owner(obj_type, obj_name)
+
+ # if we want to reassign objects owned by roles:
+ elif reassign_owned_by:
+ pg_ownership.reassign(reassign_owned_by, fail_on_role)
+
+ # Rollback if it's possible and check_mode:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ cursor.close()
+ db_connection.close()
+
+ module.exit_json(
+ changed=pg_ownership.changed,
+ queries=pg_ownership.executed_queries,
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_pg_hba.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_pg_hba.py
new file mode 100644
index 00000000..1f484bcf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_pg_hba.py
@@ -0,0 +1,745 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2019, Sebastiaan Mannem (@sebasmannem) <sebastiaan.mannem@enterprisedb.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+'''
+This module is used to manage postgres pg_hba files with Ansible.
+'''
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_pg_hba
+short_description: Add, remove or modify a rule in a pg_hba file
+description:
+ - The fundamental function of the module is to create, or delete lines in pg_hba files.
+ - The lines in the file should be in a typical pg_hba form and lines should be unique per key (type, databases, users, source).
+ If they are not unique and the SID is 'the one to change', only one for C(state=present) or none for C(state=absent) of the SID's will remain.
+extends_documentation_fragment: files
+options:
+ address:
+ description:
+ - The source address/net where the connections could come from.
+ - Will not be used for entries of I(type)=C(local).
+ - You can also use keywords C(all), C(samehost), and C(samenet).
+ default: samehost
+ type: str
+ aliases: [ source, src ]
+ backup:
+ description:
+ - If set, create a backup of the C(pg_hba) file before it is modified.
+ The location of the backup is returned in the (backup) variable by this module.
+ default: false
+ type: bool
+ backup_file:
+ description:
+ - Write backup to a specific backupfile rather than a temp file.
+ type: str
+ create:
+ description:
+ - Create an C(pg_hba) file if none exists.
+ - When set to false, an error is raised when the C(pg_hba) file doesn't exist.
+ default: false
+ type: bool
+ contype:
+ description:
+ - Type of the rule. If not set, C(postgresql_pg_hba) will only return contents.
+ type: str
+ choices: [ local, host, hostnossl, hostssl ]
+ databases:
+ description:
+ - Databases this line applies to.
+ default: all
+ type: str
+ dest:
+ description:
+ - Path to C(pg_hba) file to modify.
+ type: path
+ required: true
+ method:
+ description:
+ - Authentication method to be used.
+ type: str
+ choices: [ cert, gss, ident, krb5, ldap, md5, pam, password, peer, radius, reject, scram-sha-256 , sspi, trust ]
+ default: md5
+ netmask:
+ description:
+ - The netmask of the source address.
+ type: str
+ options:
+ description:
+ - Additional options for the authentication I(method).
+ type: str
+ order:
+ description:
+ - The entries will be written out in a specific order.
+ With this option you can control by which field they are ordered first, second and last.
+ s=source, d=databases, u=users.
+ This option is deprecated since 2.9 and will be removed in community.general 3.0.0.
+ Sortorder is now hardcoded to sdu.
+ type: str
+ default: sdu
+ choices: [ sdu, sud, dsu, dus, usd, uds ]
+ state:
+ description:
+ - The lines will be added/modified when C(state=present) and removed when C(state=absent).
+ type: str
+ default: present
+ choices: [ absent, present ]
+ users:
+ description:
+ - Users this line applies to.
+ type: str
+ default: all
+
+notes:
+ - The default authentication assumes that on the host, you are either logging in as or
+ sudo'ing to an account with appropriate permissions to read and modify the file.
+ - This module also returns the pg_hba info. You can use this module to only retrieve it by only specifying I(dest).
+ The info can be found in the returned data under key pg_hba, being a list, containing a dict per rule.
+ - This module will sort resulting C(pg_hba) files if a rule change is required.
+ This could give unexpected results with manual created hba files, if it was improperly sorted.
+ For example a rule was created for a net first and for a ip in that net range next.
+ In that situation, the 'ip specific rule' will never hit, it is in the C(pg_hba) file obsolete.
+ After the C(pg_hba) file is rewritten by the M(community.general.postgresql_pg_hba) module, the ip specific rule will be sorted above the range rule.
+ And then it will hit, which will give unexpected results.
+ - With the 'order' parameter you can control which field is used to sort first, next and last.
+ - The module supports a check mode and a diff mode.
+
+seealso:
+- name: PostgreSQL pg_hba.conf file reference
+ description: Complete reference of the PostgreSQL pg_hba.conf file documentation.
+ link: https://www.postgresql.org/docs/current/auth-pg-hba-conf.html
+
+requirements:
+ - ipaddress
+
+author: Sebastiaan Mannem (@sebasmannem)
+'''
+
+EXAMPLES = '''
+- name: Grant users joe and simon access to databases sales and logistics from ipv6 localhost ::1/128 using peer authentication.
+ community.general.postgresql_pg_hba:
+ dest: /var/lib/postgres/data/pg_hba.conf
+ contype: host
+ users: joe,simon
+ source: ::1
+ databases: sales,logistics
+ method: peer
+ create: true
+
+- name: Grant user replication from network 192.168.0.100/24 access for replication with client cert authentication.
+ community.general.postgresql_pg_hba:
+ dest: /var/lib/postgres/data/pg_hba.conf
+ contype: host
+ users: replication
+ source: 192.168.0.100/24
+ databases: replication
+ method: cert
+
+- name: Revoke access from local user mary on database mydb.
+ community.general.postgresql_pg_hba:
+ dest: /var/lib/postgres/data/pg_hba.conf
+ contype: local
+ users: mary
+ databases: mydb
+ state: absent
+'''
+
+RETURN = r'''
+msgs:
+ description: List of textual messages what was done
+ returned: always
+ type: list
+ sample:
+ "msgs": [
+ "Removing",
+ "Changed",
+ "Writing"
+ ]
+backup_file:
+ description: File that the original pg_hba file was backed up to
+ returned: changed
+ type: str
+ sample: /tmp/pg_hba_jxobj_p
+pg_hba:
+ description: List of the pg_hba rules as they are configured in the specified hba file
+ returned: always
+ type: list
+ sample:
+ "pg_hba": [
+ {
+ "db": "all",
+ "method": "md5",
+ "src": "samehost",
+ "type": "host",
+ "usr": "all"
+ }
+ ]
+'''
+
+import os
+import re
+import traceback
+
+IPADDRESS_IMP_ERR = None
+try:
+ import ipaddress
+except ImportError:
+ IPADDRESS_IMP_ERR = traceback.format_exc()
+
+import tempfile
+import shutil
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+# from ansible.module_utils.postgres import postgres_common_argument_spec
+
+PG_HBA_METHODS = ["trust", "reject", "md5", "password", "gss", "sspi", "krb5", "ident", "peer",
+ "ldap", "radius", "cert", "pam", "scram-sha-256"]
+PG_HBA_TYPES = ["local", "host", "hostssl", "hostnossl"]
+PG_HBA_ORDERS = ["sdu", "sud", "dsu", "dus", "usd", "uds"]
+PG_HBA_HDR = ['type', 'db', 'usr', 'src', 'mask', 'method', 'options']
+
+WHITESPACES_RE = re.compile(r'\s+')
+
+
+class PgHbaError(Exception):
+ '''
+ This exception is raised when parsing the pg_hba file ends in an error.
+ '''
+
+
+class PgHbaRuleError(PgHbaError):
+ '''
+ This exception is raised when parsing the pg_hba file ends in an error.
+ '''
+
+
+class PgHbaRuleChanged(PgHbaRuleError):
+ '''
+ This exception is raised when a new parsed rule is a changed version of an existing rule.
+ '''
+
+
+class PgHbaValueError(PgHbaError):
+ '''
+ This exception is raised when a new parsed rule is a changed version of an existing rule.
+ '''
+
+
+class PgHbaRuleValueError(PgHbaRuleError):
+ '''
+ This exception is raised when a new parsed rule is a changed version of an existing rule.
+ '''
+
+
+class PgHba(object):
+ """
+ PgHba object to read/write entries to/from.
+ pg_hba_file - the pg_hba file almost always /etc/pg_hba
+ """
+ def __init__(self, pg_hba_file=None, order="sdu", backup=False, create=False):
+ if order not in PG_HBA_ORDERS:
+ msg = "invalid order setting {0} (should be one of '{1}')."
+ raise PgHbaError(msg.format(order, "', '".join(PG_HBA_ORDERS)))
+ self.pg_hba_file = pg_hba_file
+ self.rules = None
+ self.comment = None
+ self.order = order
+ self.backup = backup
+ self.last_backup = None
+ self.create = create
+ self.unchanged()
+ # self.databases will be update by add_rule and gives some idea of the number of databases
+ # (at least that are handled by this pg_hba)
+ self.databases = set(['postgres', 'template0', 'template1'])
+
+ # self.databases will be update by add_rule and gives some idea of the number of users
+ # (at least that are handled by this pg_hba) since this might also be groups with multiple
+ # users, this might be totally off, but at least it is some info...
+ self.users = set(['postgres'])
+
+ self.read()
+
+ def unchanged(self):
+ '''
+ This method resets self.diff to a empty default
+ '''
+ self.diff = {'before': {'file': self.pg_hba_file, 'pg_hba': []},
+ 'after': {'file': self.pg_hba_file, 'pg_hba': []}}
+
+ def read(self):
+ '''
+ Read in the pg_hba from the system
+ '''
+ self.rules = {}
+ self.comment = []
+ # read the pg_hbafile
+ try:
+ with open(self.pg_hba_file, 'r') as file:
+ for line in file:
+ line = line.strip()
+ # uncomment
+ if '#' in line:
+ line, comment = line.split('#', 1)
+ self.comment.append('#' + comment)
+ try:
+ self.add_rule(PgHbaRule(line=line))
+ except PgHbaRuleError:
+ pass
+ self.unchanged()
+ except IOError:
+ pass
+
+ def write(self, backup_file=''):
+ '''
+ This method writes the PgHba rules (back) to a file.
+ '''
+ if not self.changed():
+ return False
+
+ contents = self.render()
+ if self.pg_hba_file:
+ if not (os.path.isfile(self.pg_hba_file) or self.create):
+ raise PgHbaError("pg_hba file '{0}' doesn't exist. "
+ "Use create option to autocreate.".format(self.pg_hba_file))
+ if self.backup and os.path.isfile(self.pg_hba_file):
+ if backup_file:
+ self.last_backup = backup_file
+ else:
+ __backup_file_h, self.last_backup = tempfile.mkstemp(prefix='pg_hba')
+ shutil.copy(self.pg_hba_file, self.last_backup)
+ fileh = open(self.pg_hba_file, 'w')
+ else:
+ filed, __path = tempfile.mkstemp(prefix='pg_hba')
+ fileh = os.fdopen(filed, 'w')
+
+ fileh.write(contents)
+ self.unchanged()
+ fileh.close()
+ return True
+
+ def add_rule(self, rule):
+ '''
+ This method can be used to add a rule to the list of rules in this PgHba object
+ '''
+ key = rule.key()
+ try:
+ try:
+ oldrule = self.rules[key]
+ except KeyError:
+ raise PgHbaRuleChanged
+ ekeys = set(list(oldrule.keys()) + list(rule.keys()))
+ ekeys.remove('line')
+ for k in ekeys:
+ if oldrule.get(k) != rule.get(k):
+ raise PgHbaRuleChanged('{0} changes {1}'.format(rule, oldrule))
+ except PgHbaRuleChanged:
+ self.rules[key] = rule
+ self.diff['after']['pg_hba'].append(rule.line())
+ if rule['db'] not in ['all', 'samerole', 'samegroup', 'replication']:
+ databases = set(rule['db'].split(','))
+ self.databases.update(databases)
+ if rule['usr'] != 'all':
+ user = rule['usr']
+ if user[0] == '+':
+ user = user[1:]
+ self.users.add(user)
+
+ def remove_rule(self, rule):
+ '''
+ This method can be used to find and remove a rule. It doesn't look for the exact rule, only
+ the rule with the same key.
+ '''
+ keys = rule.key()
+ try:
+ del self.rules[keys]
+ self.diff['before']['pg_hba'].append(rule.line())
+ except KeyError:
+ pass
+
+ def get_rules(self, with_lines=False):
+ '''
+ This method returns all the rules of the PgHba object
+ '''
+ rules = sorted(self.rules.values())
+ for rule in rules:
+ ret = {}
+ for key, value in rule.items():
+ ret[key] = value
+ if not with_lines:
+ if 'line' in ret:
+ del ret['line']
+ else:
+ ret['line'] = rule.line()
+
+ yield ret
+
+ def render(self):
+ '''
+ This method renders the content of the PgHba rules and comments.
+ The returning value can be used directly to write to a new file.
+ '''
+ comment = '\n'.join(self.comment)
+ rule_lines = '\n'.join([rule['line'] for rule in self.get_rules(with_lines=True)])
+ result = comment + '\n' + rule_lines
+ # End it properly with a linefeed (if not already).
+ if result and result[-1] not in ['\n', '\r']:
+ result += '\n'
+ return result
+
+ def changed(self):
+ '''
+ This method can be called to detect if the PgHba file has been changed.
+ '''
+ return bool(self.diff['before']['pg_hba'] or self.diff['after']['pg_hba'])
+
+
+class PgHbaRule(dict):
+ '''
+ This class represents one rule as defined in a line in a PgHbaFile.
+ '''
+
+ def __init__(self, contype=None, databases=None, users=None, source=None, netmask=None,
+ method=None, options=None, line=None):
+ '''
+ This function can be called with a comma seperated list of databases and a comma seperated
+ list of users and it will act as a generator that returns a expanded list of rules one by
+ one.
+ '''
+
+ super(PgHbaRule, self).__init__()
+
+ if line:
+ # Read values from line if parsed
+ self.fromline(line)
+
+ # read rule cols from parsed items
+ rule = dict(zip(PG_HBA_HDR, [contype, databases, users, source, netmask, method, options]))
+ for key, value in rule.items():
+ if value:
+ self[key] = value
+
+ # Some sanity checks
+ for key in ['method', 'type']:
+ if key not in self:
+ raise PgHbaRuleError('Missing {0} in rule {1}'.format(key, self))
+
+ if self['method'] not in PG_HBA_METHODS:
+ msg = "invalid method {0} (should be one of '{1}')."
+ raise PgHbaRuleValueError(msg.format(self['method'], "', '".join(PG_HBA_METHODS)))
+
+ if self['type'] not in PG_HBA_TYPES:
+ msg = "invalid connection type {0} (should be one of '{1}')."
+ raise PgHbaRuleValueError(msg.format(self['type'], "', '".join(PG_HBA_TYPES)))
+
+ if self['type'] == 'local':
+ self.unset('src')
+ self.unset('mask')
+ elif 'src' not in self:
+ raise PgHbaRuleError('Missing src in rule {1}'.format(self))
+ elif '/' in self['src']:
+ self.unset('mask')
+ else:
+ self['src'] = str(self.source())
+ self.unset('mask')
+
+ def unset(self, key):
+ '''
+ This method is used to unset certain columns if they exist
+ '''
+ if key in self:
+ del self[key]
+
+ def line(self):
+ '''
+ This method can be used to return (or generate) the line
+ '''
+ try:
+ return self['line']
+ except KeyError:
+ self['line'] = "\t".join([self[k] for k in PG_HBA_HDR if k in self.keys()])
+ return self['line']
+
+ def fromline(self, line):
+ '''
+ split into 'type', 'db', 'usr', 'src', 'mask', 'method', 'options' cols
+ '''
+ if WHITESPACES_RE.sub('', line) == '':
+ # empty line. skip this one...
+ return
+ cols = WHITESPACES_RE.split(line)
+ if len(cols) < 4:
+ msg = "Rule {0} has too few columns."
+ raise PgHbaValueError(msg.format(line))
+ if cols[0] not in PG_HBA_TYPES:
+ msg = "Rule {0} has unknown type: {1}."
+ raise PgHbaValueError(msg.format(line, cols[0]))
+ if cols[0] == 'local':
+ cols.insert(3, None) # No address
+ cols.insert(3, None) # No IP-mask
+ if len(cols) < 6:
+ cols.insert(4, None) # No IP-mask
+ elif cols[5] not in PG_HBA_METHODS:
+ cols.insert(4, None) # No IP-mask
+ if cols[5] not in PG_HBA_METHODS:
+ raise PgHbaValueError("Rule {0} of '{1}' type has invalid auth-method '{2}'".format(line, cols[0], cols[5]))
+
+ if len(cols) < 7:
+ cols.insert(6, None) # No auth-options
+ else:
+ cols[6] = " ".join(cols[6:]) # combine all auth-options
+ rule = dict(zip(PG_HBA_HDR, cols[:7]))
+ for key, value in rule.items():
+ if value:
+ self[key] = value
+
+ def key(self):
+ '''
+ This method can be used to get the key from a rule.
+ '''
+ if self['type'] == 'local':
+ source = 'local'
+ else:
+ source = str(self.source())
+ return (source, self['db'], self['usr'])
+
+ def source(self):
+ '''
+ This method is used to get the source of a rule as an ipaddress object if possible.
+ '''
+ if 'mask' in self.keys():
+ try:
+ ipaddress.ip_address(u'{0}'.format(self['src']))
+ except ValueError:
+ raise PgHbaValueError('Mask was specified, but source "{0}" '
+ 'is no valid ip'.format(self['src']))
+ # ipaddress module cannot work with ipv6 netmask, so lets convert it to prefixlen
+ # furthermore ipv4 with bad netmask throws 'Rule {} doesn't seem to be an ip, but has a
+ # mask error that doesn't seem to describe what is going on.
+ try:
+ mask_as_ip = ipaddress.ip_address(u'{0}'.format(self['mask']))
+ except ValueError:
+ raise PgHbaValueError('Mask {0} seems to be invalid'.format(self['mask']))
+ binvalue = "{0:b}".format(int(mask_as_ip))
+ if '01' in binvalue:
+ raise PgHbaValueError('IP mask {0} seems invalid '
+ '(binary value has 1 after 0)'.format(self['mask']))
+ prefixlen = binvalue.count('1')
+ sourcenw = '{0}/{1}'.format(self['src'], prefixlen)
+ try:
+ return ipaddress.ip_network(u'{0}'.format(sourcenw), strict=False)
+ except ValueError:
+ raise PgHbaValueError('{0} is no valid address range'.format(sourcenw))
+
+ try:
+ return ipaddress.ip_network(u'{0}'.format(self['src']), strict=False)
+ except ValueError:
+ return self['src']
+
+ def __lt__(self, other):
+ """This function helps sorted to decide how to sort.
+
+ It just checks itself against the other and decides on some key values
+ if it should be sorted higher or lower in the list.
+ The way it works:
+ For networks, every 1 in 'netmask in binary' makes the subnet more specific.
+ Therefore I chose to use prefix as the weight.
+ So a single IP (/32) should have twice the weight of a /16 network.
+ To keep everything in the same weight scale,
+ - for ipv6, we use a weight scale of 0 (all possible ipv6 addresses) to 128 (single ip)
+ - for ipv4, we use a weight scale of 0 (all possible ipv4 addresses) to 128 (single ip)
+ Therefore for ipv4, we use prefixlen (0-32) * 4 for weight,
+ which corresponds to ipv6 (0-128).
+ """
+ myweight = self.source_weight()
+ hisweight = other.source_weight()
+ if myweight != hisweight:
+ return myweight > hisweight
+
+ myweight = self.db_weight()
+ hisweight = other.db_weight()
+ if myweight != hisweight:
+ return myweight < hisweight
+
+ myweight = self.user_weight()
+ hisweight = other.user_weight()
+ if myweight != hisweight:
+ return myweight < hisweight
+ try:
+ return self['src'] < other['src']
+ except TypeError:
+ return self.source_type_weight() < other.source_type_weight()
+ except Exception:
+ # When all else fails, just compare the exact line.
+ return self.line() < other.line()
+
+ def source_weight(self):
+ """Report the weight of this source net.
+
+ Basically this is the netmask, where IPv4 is normalized to IPv6
+ (IPv4/32 has the same weight as IPv6/128).
+ """
+ if self['type'] == 'local':
+ return 130
+
+ sourceobj = self.source()
+ if isinstance(sourceobj, ipaddress.IPv4Network):
+ return sourceobj.prefixlen * 4
+ if isinstance(sourceobj, ipaddress.IPv6Network):
+ return sourceobj.prefixlen
+ if isinstance(sourceobj, str):
+ # You can also write all to match any IP address,
+ # samehost to match any of the server's own IP addresses,
+ # or samenet to match any address in any subnet that the server is connected to.
+ if sourceobj == 'all':
+ # (all is considered the full range of all ips, which has a weight of 0)
+ return 0
+ if sourceobj == 'samehost':
+ # (sort samehost second after local)
+ return 129
+ if sourceobj == 'samenet':
+ # Might write some fancy code to determine all prefix's
+ # from all interfaces and find a sane value for this one.
+ # For now, let's assume IPv4/24 or IPv6/96 (both have weight 96).
+ return 96
+ if sourceobj[0] == '.':
+ # suffix matching (domain name), let's assume a very large scale
+ # and therefore a very low weight IPv4/16 or IPv6/64 (both have weight 64).
+ return 64
+ # hostname, let's assume only one host matches, which is
+ # IPv4/32 or IPv6/128 (both have weight 128)
+ return 128
+ raise PgHbaValueError('Cannot deduct the source weight of this source {1}'.format(sourceobj))
+
+ def source_type_weight(self):
+ """Give a weight on the type of this source.
+
+ Basically make sure that IPv6Networks are sorted higher than IPv4Networks.
+ This is a 'when all else fails' solution in __lt__.
+ """
+ if self['type'] == 'local':
+ return 3
+
+ sourceobj = self.source()
+ if isinstance(sourceobj, ipaddress.IPv4Network):
+ return 2
+ if isinstance(sourceobj, ipaddress.IPv6Network):
+ return 1
+ if isinstance(sourceobj, str):
+ return 0
+ raise PgHbaValueError('This source {0} is of an unknown type...'.format(sourceobj))
+
+ def db_weight(self):
+ """Report the weight of the database.
+
+ Normally, just 1, but for replication this is 0, and for 'all', this is more than 2.
+ """
+ if self['db'] == 'all':
+ return 100000
+ if self['db'] == 'replication':
+ return 0
+ if self['db'] in ['samerole', 'samegroup']:
+ return 1
+ return 1 + self['db'].count(',')
+
+ def user_weight(self):
+ """Report weight when comparing users."""
+ if self['usr'] == 'all':
+ return 1000000
+ return 1
+
+
+def main():
+ '''
+ This function is the main function of this module
+ '''
+ # argument_spec = postgres_common_argument_spec()
+ argument_spec = dict()
+ argument_spec.update(
+ address=dict(type='str', default='samehost', aliases=['source', 'src']),
+ backup=dict(type='bool', default=False),
+ backup_file=dict(type='str'),
+ contype=dict(type='str', default=None, choices=PG_HBA_TYPES),
+ create=dict(type='bool', default=False),
+ databases=dict(type='str', default='all'),
+ dest=dict(type='path', required=True),
+ method=dict(type='str', default='md5', choices=PG_HBA_METHODS),
+ netmask=dict(type='str'),
+ options=dict(type='str'),
+ order=dict(type='str', default="sdu", choices=PG_HBA_ORDERS,
+ removed_in_version='3.0.0', removed_from_collection='community.general'),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ users=dict(type='str', default='all')
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ add_file_common_args=True,
+ supports_check_mode=True
+ )
+ if IPADDRESS_IMP_ERR is not None:
+ module.fail_json(msg=missing_required_lib('ipaddress'), exception=IPADDRESS_IMP_ERR)
+
+ contype = module.params["contype"]
+ create = bool(module.params["create"] or module.check_mode)
+ if module.check_mode:
+ backup = False
+ else:
+ backup = module.params['backup']
+ backup_file = module.params['backup_file']
+ databases = module.params["databases"]
+ dest = module.params["dest"]
+
+ method = module.params["method"]
+ netmask = module.params["netmask"]
+ options = module.params["options"]
+ order = module.params["order"]
+ source = module.params["address"]
+ state = module.params["state"]
+ users = module.params["users"]
+
+ ret = {'msgs': []}
+ try:
+ pg_hba = PgHba(dest, order, backup=backup, create=create)
+ except PgHbaError as error:
+ module.fail_json(msg='Error reading file:\n{0}'.format(error))
+
+ if contype:
+ try:
+ for database in databases.split(','):
+ for user in users.split(','):
+ rule = PgHbaRule(contype, database, user, source, netmask, method, options)
+ if state == "present":
+ ret['msgs'].append('Adding')
+ pg_hba.add_rule(rule)
+ else:
+ ret['msgs'].append('Removing')
+ pg_hba.remove_rule(rule)
+ except PgHbaError as error:
+ module.fail_json(msg='Error modifying rules:\n{0}'.format(error))
+ file_args = module.load_file_common_arguments(module.params)
+ ret['changed'] = changed = pg_hba.changed()
+ if changed:
+ ret['msgs'].append('Changed')
+ ret['diff'] = pg_hba.diff
+
+ if not module.check_mode:
+ ret['msgs'].append('Writing')
+ try:
+ if pg_hba.write(backup_file):
+ module.set_fs_attributes_if_different(file_args, True, pg_hba.diff,
+ expand=False)
+ except PgHbaError as error:
+ module.fail_json(msg='Error writing file:\n{0}'.format(error))
+ if pg_hba.last_backup:
+ ret['backup_file'] = pg_hba.last_backup
+
+ ret['pg_hba'] = list(pg_hba.get_rules())
+ module.exit_json(**ret)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_ping.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_ping.py
new file mode 100644
index 00000000..240cea57
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_ping.py
@@ -0,0 +1,170 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_ping
+short_description: Check remote PostgreSQL server availability
+description:
+- Simple module to check remote PostgreSQL server availability.
+options:
+ db:
+ description:
+ - Name of a database to connect to.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ version_added: '0.2.0'
+ trust_input:
+ description:
+ - If C(no), check whether a value of I(session_role) is potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via I(session_role) are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+seealso:
+- module: community.general.postgresql_info
+author:
+- Andrew Klychkov (@Andersson007)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+# PostgreSQL ping dbsrv server from the shell:
+# ansible dbsrv -m postgresql_ping
+
+# In the example below you need to generate certificates previously.
+# See https://www.postgresql.org/docs/current/libpq-ssl.html for more information.
+- name: PostgreSQL ping dbsrv server using not default credentials and ssl
+ community.general.postgresql_ping:
+ db: protected_db
+ login_host: dbsrv
+ login_user: secret
+ login_password: secret_pass
+ ca_cert: /root/root.crt
+ ssl_mode: verify-full
+'''
+
+RETURN = r'''
+is_available:
+ description: PostgreSQL server availability.
+ returned: always
+ type: bool
+ sample: true
+server_version:
+ description: PostgreSQL server version.
+ returned: always
+ type: dict
+ sample: { major: 10, minor: 1 }
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+
+class PgPing(object):
+ def __init__(self, module, cursor):
+ self.module = module
+ self.cursor = cursor
+ self.is_available = False
+ self.version = {}
+
+ def do(self):
+ self.get_pg_version()
+ return (self.is_available, self.version)
+
+ def get_pg_version(self):
+ query = "SELECT version()"
+ raw = exec_sql(self, query, add_to_executed=False)[0][0]
+ if raw:
+ self.is_available = True
+ raw = raw.split()[1].split('.')
+ self.version = dict(
+ major=int(raw[0]),
+ minor=int(raw[1]),
+ )
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ db=dict(type='str', aliases=['login_db']),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ if not module.params['trust_input']:
+ # Check input for potentially dangerous elements:
+ check_input(module, module.params['session_role'])
+
+ # Set some default values:
+ cursor = False
+ db_connection = False
+ result = dict(
+ changed=False,
+ is_available=False,
+ server_version=dict(),
+ )
+
+ conn_params = get_conn_params(module, module.params, warn_db_default=False)
+ db_connection = connect_to_db(module, conn_params, fail_on_conn=False)
+
+ if db_connection is not None:
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ # Do job:
+ pg_ping = PgPing(module, cursor)
+ if cursor:
+ # If connection established:
+ result["is_available"], result["server_version"] = pg_ping.do()
+ db_connection.rollback()
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_privs.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_privs.py
new file mode 100644
index 00000000..e8d64f36
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_privs.py
@@ -0,0 +1,1171 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# Copyright: (c) 2019, Tobias Birkefeld (@tcraxs) <t@craxs.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_privs
+short_description: Grant or revoke privileges on PostgreSQL database objects
+description:
+- Grant or revoke privileges on PostgreSQL database objects.
+- This module is basically a wrapper around most of the functionality of
+ PostgreSQL's GRANT and REVOKE statements with detection of changes
+ (GRANT/REVOKE I(privs) ON I(type) I(objs) TO/FROM I(roles)).
+options:
+ database:
+ description:
+ - Name of database to connect to.
+ required: yes
+ type: str
+ aliases:
+ - db
+ - login_db
+ state:
+ description:
+ - If C(present), the specified privileges are granted, if C(absent) they are revoked.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ privs:
+ description:
+ - Comma separated list of privileges to grant/revoke.
+ type: str
+ aliases:
+ - priv
+ type:
+ description:
+ - Type of database object to set privileges on.
+ - The C(default_privs) choice is available starting at version 2.7.
+ - The C(foreign_data_wrapper) and C(foreign_server) object types are available since Ansible version 2.8.
+ - The C(type) choice is available since Ansible version 2.10.
+ - The C(procedure) is supported since collection version 1.3.0 and PostgreSQL 11.
+ type: str
+ default: table
+ choices: [ database, default_privs, foreign_data_wrapper, foreign_server, function,
+ group, language, table, tablespace, schema, sequence, type , procedure]
+ objs:
+ description:
+ - Comma separated list of database objects to set privileges on.
+ - If I(type) is C(table), C(partition table), C(sequence), C(function) or C(procedure),
+ the special valueC(ALL_IN_SCHEMA) can be provided instead to specify all
+ database objects of type I(type) in the schema specified via I(schema).
+ (This also works with PostgreSQL < 9.0.) (C(ALL_IN_SCHEMA) is available
+ for C(function) and C(partition table) since Ansible 2.8)
+ - C(procedure) is supported since PostgreSQL 11 and M(community.general) collection 1.3.0.
+ - If I(type) is C(database), this parameter can be omitted, in which case
+ privileges are set for the database specified via I(database).
+ - If I(type) is I(function) or I(procedure), colons (":") in object names will be
+ replaced with commas (needed to specify signatures, see examples).
+ type: str
+ aliases:
+ - obj
+ schema:
+ description:
+ - Schema that contains the database objects specified via I(objs).
+ - May only be provided if I(type) is C(table), C(sequence), C(function), C(procedure), C(type),
+ or C(default_privs). Defaults to C(public) in these cases.
+ - Pay attention, for embedded types when I(type=type)
+ I(schema) can be C(pg_catalog) or C(information_schema) respectively.
+ type: str
+ roles:
+ description:
+ - Comma separated list of role (user/group) names to set permissions for.
+ - The special value C(PUBLIC) can be provided instead to set permissions
+ for the implicitly defined PUBLIC group.
+ type: str
+ required: yes
+ aliases:
+ - role
+ fail_on_role:
+ description:
+ - If C(yes), fail when target role (for whom privs need to be granted) does not exist.
+ Otherwise just warn and continue.
+ default: yes
+ type: bool
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ - The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally.
+ type: str
+ target_roles:
+ description:
+ - A list of existing role (user/group) names to set as the
+ default permissions for database objects subsequently created by them.
+ - Parameter I(target_roles) is only available with C(type=default_privs).
+ type: str
+ grant_option:
+ description:
+ - Whether C(role) may grant/revoke the specified privileges/group memberships to others.
+ - Set to C(no) to revoke GRANT OPTION, leave unspecified to make no changes.
+ - I(grant_option) only has an effect if I(state) is C(present).
+ type: bool
+ aliases:
+ - admin_option
+ host:
+ description:
+ - Database host address. If unspecified, connect via Unix socket.
+ type: str
+ aliases:
+ - login_host
+ port:
+ description:
+ - Database port to connect to.
+ type: int
+ default: 5432
+ aliases:
+ - login_port
+ unix_socket:
+ description:
+ - Path to a Unix domain socket for local connections.
+ type: str
+ aliases:
+ - login_unix_socket
+ login:
+ description:
+ - The username to authenticate with.
+ type: str
+ default: postgres
+ aliases:
+ - login_user
+ password:
+ description:
+ - The password to authenticate with.
+ type: str
+ aliases:
+ - login_password
+ ssl_mode:
+ description:
+ - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
+ - See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
+ - Default of C(prefer) matches libpq default.
+ type: str
+ default: prefer
+ choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
+ ca_cert:
+ description:
+ - Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
+ - If the file exists, the server's certificate will be verified to be signed by one of these authorities.
+ type: str
+ aliases:
+ - ssl_rootcert
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(roles), I(target_roles), I(session_role),
+ I(schema) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+ usage_on_types:
+ description:
+ - When adding default privileges, the module always implicitly adds ``USAGE ON TYPES``.
+ - To avoid this behavior, set I(usage_on_types) to C(no).
+ - Added to save backwards compatibility.
+ - Used only when adding default privileges, ignored otherwise.
+ type: bool
+ default: yes
+ version_added: '1.2.0'
+
+notes:
+- Parameters that accept comma separated lists (I(privs), I(objs), I(roles))
+ have singular alias names (I(priv), I(obj), I(role)).
+- To revoke only C(GRANT OPTION) for a specific object, set I(state) to
+ C(present) and I(grant_option) to C(no) (see examples).
+- Note that when revoking privileges from a role R, this role may still have
+ access via privileges granted to any role R is a member of including C(PUBLIC).
+- Note that when you use C(PUBLIC) role, the module always reports that the state has been changed.
+- Note that when revoking privileges from a role R, you do so as the user
+ specified via I(login). If R has been granted the same privileges by
+ another user also, R can still access database objects via these privileges.
+- When revoking privileges, C(RESTRICT) is assumed (see PostgreSQL docs).
+
+seealso:
+- module: community.general.postgresql_user
+- module: community.general.postgresql_owner
+- module: community.general.postgresql_membership
+- name: PostgreSQL privileges
+ description: General information about PostgreSQL privileges.
+ link: https://www.postgresql.org/docs/current/ddl-priv.html
+- name: PostgreSQL GRANT command reference
+ description: Complete reference of the PostgreSQL GRANT command documentation.
+ link: https://www.postgresql.org/docs/current/sql-grant.html
+- name: PostgreSQL REVOKE command reference
+ description: Complete reference of the PostgreSQL REVOKE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-revoke.html
+
+extends_documentation_fragment:
+- community.general.postgres
+
+
+author:
+- Bernhard Weitzhofer (@b6d)
+- Tobias Birkefeld (@tcraxs)
+'''
+
+EXAMPLES = r'''
+# On database "library":
+# GRANT SELECT, INSERT, UPDATE ON TABLE public.books, public.authors
+# TO librarian, reader WITH GRANT OPTION
+- name: Grant privs to librarian and reader on database library
+ community.general.postgresql_privs:
+ database: library
+ state: present
+ privs: SELECT,INSERT,UPDATE
+ type: table
+ objs: books,authors
+ schema: public
+ roles: librarian,reader
+ grant_option: yes
+
+- name: Same as above leveraging default values
+ community.general.postgresql_privs:
+ db: library
+ privs: SELECT,INSERT,UPDATE
+ objs: books,authors
+ roles: librarian,reader
+ grant_option: yes
+
+# REVOKE GRANT OPTION FOR INSERT ON TABLE books FROM reader
+# Note that role "reader" will be *granted* INSERT privilege itself if this
+# isn't already the case (since state: present).
+- name: Revoke privs from reader
+ community.general.postgresql_privs:
+ db: library
+ state: present
+ priv: INSERT
+ obj: books
+ role: reader
+ grant_option: no
+
+# "public" is the default schema. This also works for PostgreSQL 8.x.
+- name: REVOKE INSERT, UPDATE ON ALL TABLES IN SCHEMA public FROM reader
+ community.general.postgresql_privs:
+ db: library
+ state: absent
+ privs: INSERT,UPDATE
+ objs: ALL_IN_SCHEMA
+ role: reader
+
+- name: GRANT ALL PRIVILEGES ON SCHEMA public, math TO librarian
+ community.general.postgresql_privs:
+ db: library
+ privs: ALL
+ type: schema
+ objs: public,math
+ role: librarian
+
+# Note the separation of arguments with colons.
+- name: GRANT ALL PRIVILEGES ON FUNCTION math.add(int, int) TO librarian, reader
+ community.general.postgresql_privs:
+ db: library
+ privs: ALL
+ type: function
+ obj: add(int:int)
+ schema: math
+ roles: librarian,reader
+
+# Note that group role memberships apply cluster-wide and therefore are not
+# restricted to database "library" here.
+- name: GRANT librarian, reader TO alice, bob WITH ADMIN OPTION
+ community.general.postgresql_privs:
+ db: library
+ type: group
+ objs: librarian,reader
+ roles: alice,bob
+ admin_option: yes
+
+# Note that here "db: postgres" specifies the database to connect to, not the
+# database to grant privileges on (which is specified via the "objs" param)
+- name: GRANT ALL PRIVILEGES ON DATABASE library TO librarian
+ community.general.postgresql_privs:
+ db: postgres
+ privs: ALL
+ type: database
+ obj: library
+ role: librarian
+
+# If objs is omitted for type "database", it defaults to the database
+# to which the connection is established
+- name: GRANT ALL PRIVILEGES ON DATABASE library TO librarian
+ community.general.postgresql_privs:
+ db: library
+ privs: ALL
+ type: database
+ role: librarian
+
+# Available since version 2.7
+# Objs must be set, ALL_DEFAULT to TABLES/SEQUENCES/TYPES/FUNCTIONS
+# ALL_DEFAULT works only with privs=ALL
+# For specific
+- name: ALTER DEFAULT PRIVILEGES ON DATABASE library TO librarian
+ community.general.postgresql_privs:
+ db: library
+ objs: ALL_DEFAULT
+ privs: ALL
+ type: default_privs
+ role: librarian
+ grant_option: yes
+
+# Available since version 2.7
+# Objs must be set, ALL_DEFAULT to TABLES/SEQUENCES/TYPES/FUNCTIONS
+# ALL_DEFAULT works only with privs=ALL
+# For specific
+- name: ALTER DEFAULT PRIVILEGES ON DATABASE library TO reader, step 1
+ community.general.postgresql_privs:
+ db: library
+ objs: TABLES,SEQUENCES
+ privs: SELECT
+ type: default_privs
+ role: reader
+
+- name: ALTER DEFAULT PRIVILEGES ON DATABASE library TO reader, step 2
+ community.general.postgresql_privs:
+ db: library
+ objs: TYPES
+ privs: USAGE
+ type: default_privs
+ role: reader
+
+# Available since version 2.8
+- name: GRANT ALL PRIVILEGES ON FOREIGN DATA WRAPPER fdw TO reader
+ community.general.postgresql_privs:
+ db: test
+ objs: fdw
+ privs: ALL
+ type: foreign_data_wrapper
+ role: reader
+
+# Available since community.general 0.2.0
+- name: GRANT ALL PRIVILEGES ON TYPE customtype TO reader
+ community.general.postgresql_privs:
+ db: test
+ objs: customtype
+ privs: ALL
+ type: type
+ role: reader
+
+# Available since version 2.8
+- name: GRANT ALL PRIVILEGES ON FOREIGN SERVER fdw_server TO reader
+ community.general.postgresql_privs:
+ db: test
+ objs: fdw_server
+ privs: ALL
+ type: foreign_server
+ role: reader
+
+# Available since version 2.8
+# Grant 'execute' permissions on all functions in schema 'common' to role 'caller'
+- name: GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA common TO caller
+ community.general.postgresql_privs:
+ type: function
+ state: present
+ privs: EXECUTE
+ roles: caller
+ objs: ALL_IN_SCHEMA
+ schema: common
+
+# Available since collection version 1.3.0
+# Grant 'execute' permissions on all procedures in schema 'common' to role 'caller'
+# Needs PostreSQL 11 or higher and community.general 1.3.0 or higher
+- name: GRANT EXECUTE ON ALL PROCEDURES IN SCHEMA common TO caller
+ community.general.postgresql_privs:
+ type: prucedure
+ state: present
+ privs: EXECUTE
+ roles: caller
+ objs: ALL_IN_SCHEMA
+ schema: common
+
+# Available since version 2.8
+# ALTER DEFAULT PRIVILEGES FOR ROLE librarian IN SCHEMA library GRANT SELECT ON TABLES TO reader
+# GRANT SELECT privileges for new TABLES objects created by librarian as
+# default to the role reader.
+# For specific
+- name: ALTER privs
+ community.general.postgresql_privs:
+ db: library
+ schema: library
+ objs: TABLES
+ privs: SELECT
+ type: default_privs
+ role: reader
+ target_roles: librarian
+
+# Available since version 2.8
+# ALTER DEFAULT PRIVILEGES FOR ROLE librarian IN SCHEMA library REVOKE SELECT ON TABLES FROM reader
+# REVOKE SELECT privileges for new TABLES objects created by librarian as
+# default from the role reader.
+# For specific
+- name: ALTER privs
+ community.general.postgresql_privs:
+ db: library
+ state: absent
+ schema: library
+ objs: TABLES
+ privs: SELECT
+ type: default_privs
+ role: reader
+ target_roles: librarian
+
+# Available since community.general 0.2.0
+- name: Grant type privileges for pg_catalog.numeric type to alice
+ community.general.postgresql_privs:
+ type: type
+ roles: alice
+ privs: ALL
+ objs: numeric
+ schema: pg_catalog
+ db: acme
+'''
+
+RETURN = r'''
+queries:
+ description: List of executed queries.
+ returned: always
+ type: list
+ sample: ['REVOKE GRANT OPTION FOR INSERT ON TABLE "books" FROM "reader";']
+'''
+
+import traceback
+
+PSYCOPG2_IMP_ERR = None
+try:
+ import psycopg2
+ import psycopg2.extensions
+except ImportError:
+ PSYCOPG2_IMP_ERR = traceback.format_exc()
+ psycopg2 = None
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils.database import (
+ pg_quote_identifier,
+ check_input,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import postgres_common_argument_spec
+from ansible.module_utils._text import to_native
+
+VALID_PRIVS = frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE',
+ 'REFERENCES', 'TRIGGER', 'CREATE', 'CONNECT',
+ 'TEMPORARY', 'TEMP', 'EXECUTE', 'USAGE', 'ALL', 'USAGE'))
+VALID_DEFAULT_OBJS = {'TABLES': ('ALL', 'SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER'),
+ 'SEQUENCES': ('ALL', 'SELECT', 'UPDATE', 'USAGE'),
+ 'FUNCTIONS': ('ALL', 'EXECUTE'),
+ 'TYPES': ('ALL', 'USAGE')}
+
+executed_queries = []
+
+
+class Error(Exception):
+ pass
+
+
+def role_exists(module, cursor, rolname):
+ """Check user exists or not"""
+ query = "SELECT 1 FROM pg_roles WHERE rolname = '%s'" % rolname
+ try:
+ cursor.execute(query)
+ return cursor.rowcount > 0
+
+ except Exception as e:
+ module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e)))
+
+ return False
+
+
+# We don't have functools.partial in Python < 2.5
+def partial(f, *args, **kwargs):
+ """Partial function application"""
+
+ def g(*g_args, **g_kwargs):
+ new_kwargs = kwargs.copy()
+ new_kwargs.update(g_kwargs)
+ return f(*(args + g_args), **g_kwargs)
+
+ g.f = f
+ g.args = args
+ g.kwargs = kwargs
+ return g
+
+
+class Connection(object):
+ """Wrapper around a psycopg2 connection with some convenience methods"""
+
+ def __init__(self, params, module):
+ self.database = params.database
+ self.module = module
+ # To use defaults values, keyword arguments must be absent, so
+ # check which values are empty and don't include in the **kw
+ # dictionary
+ params_map = {
+ "host": "host",
+ "login": "user",
+ "password": "password",
+ "port": "port",
+ "database": "database",
+ "ssl_mode": "sslmode",
+ "ca_cert": "sslrootcert"
+ }
+
+ kw = dict((params_map[k], getattr(params, k)) for k in params_map
+ if getattr(params, k) != '' and getattr(params, k) is not None)
+
+ # If a unix_socket is specified, incorporate it here.
+ is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
+ if is_localhost and params.unix_socket != "":
+ kw["host"] = params.unix_socket
+
+ sslrootcert = params.ca_cert
+ if psycopg2.__version__ < '2.4.3' and sslrootcert is not None:
+ raise ValueError('psycopg2 must be at least 2.4.3 in order to user the ca_cert parameter')
+
+ self.connection = psycopg2.connect(**kw)
+ self.cursor = self.connection.cursor()
+ self.pg_version = self.connection.server_version
+
+ def commit(self):
+ self.connection.commit()
+
+ def rollback(self):
+ self.connection.rollback()
+
+ @property
+ def encoding(self):
+ """Connection encoding in Python-compatible form"""
+ return psycopg2.extensions.encodings[self.connection.encoding]
+
+ # Methods for querying database objects
+
+ # PostgreSQL < 9.0 doesn't support "ALL TABLES IN SCHEMA schema"-like
+ # phrases in GRANT or REVOKE statements, therefore alternative methods are
+ # provided here.
+
+ def schema_exists(self, schema):
+ query = """SELECT count(*)
+ FROM pg_catalog.pg_namespace WHERE nspname = %s"""
+ self.cursor.execute(query, (schema,))
+ return self.cursor.fetchone()[0] > 0
+
+ def get_all_tables_in_schema(self, schema):
+ if not self.schema_exists(schema):
+ raise Error('Schema "%s" does not exist.' % schema)
+ query = """SELECT relname
+ FROM pg_catalog.pg_class c
+ JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
+ WHERE nspname = %s AND relkind in ('r', 'v', 'm', 'p')"""
+ self.cursor.execute(query, (schema,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_all_sequences_in_schema(self, schema):
+ if not self.schema_exists(schema):
+ raise Error('Schema "%s" does not exist.' % schema)
+ query = """SELECT relname
+ FROM pg_catalog.pg_class c
+ JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
+ WHERE nspname = %s AND relkind = 'S'"""
+ self.cursor.execute(query, (schema,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_all_functions_in_schema(self, schema):
+ if not self.schema_exists(schema):
+ raise Error('Schema "%s" does not exist.' % schema)
+
+ query = ("SELECT p.proname, oidvectortypes(p.proargtypes) "
+ "FROM pg_catalog.pg_proc p "
+ "JOIN pg_namespace n ON n.oid = p.pronamespace "
+ "WHERE nspname = %s")
+
+ if self.pg_version >= 110000:
+ query += " and p.prokind = 'f'"
+
+ self.cursor.execute(query, (schema,))
+ return ["%s(%s)" % (t[0], t[1]) for t in self.cursor.fetchall()]
+
+ def get_all_procedures_in_schema(self, schema):
+ if self.pg_version < 110000:
+ raise Error("PostgreSQL verion must be >= 11 for type=procedure. Exit")
+
+ if not self.schema_exists(schema):
+ raise Error('Schema "%s" does not exist.' % schema)
+
+ query = ("SELECT p.proname, oidvectortypes(p.proargtypes) "
+ "FROM pg_catalog.pg_proc p "
+ "JOIN pg_namespace n ON n.oid = p.pronamespace "
+ "WHERE nspname = %s and p.prokind = 'p'")
+
+ self.cursor.execute(query, (schema,))
+ return ["%s(%s)" % (t[0], t[1]) for t in self.cursor.fetchall()]
+
+ # Methods for getting access control lists and group membership info
+
+ # To determine whether anything has changed after granting/revoking
+ # privileges, we compare the access control lists of the specified database
+ # objects before and afterwards. Python's list/string comparison should
+ # suffice for change detection, we should not actually have to parse ACLs.
+ # The same should apply to group membership information.
+
+ def get_table_acls(self, schema, tables):
+ query = """SELECT relacl
+ FROM pg_catalog.pg_class c
+ JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
+ WHERE nspname = %s AND relkind in ('r','p','v','m') AND relname = ANY (%s)
+ ORDER BY relname"""
+ self.cursor.execute(query, (schema, tables))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_sequence_acls(self, schema, sequences):
+ query = """SELECT relacl
+ FROM pg_catalog.pg_class c
+ JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
+ WHERE nspname = %s AND relkind = 'S' AND relname = ANY (%s)
+ ORDER BY relname"""
+ self.cursor.execute(query, (schema, sequences))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_function_acls(self, schema, function_signatures):
+ funcnames = [f.split('(', 1)[0] for f in function_signatures]
+ query = """SELECT proacl
+ FROM pg_catalog.pg_proc p
+ JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace
+ WHERE nspname = %s AND proname = ANY (%s)
+ ORDER BY proname, proargtypes"""
+ self.cursor.execute(query, (schema, funcnames))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_schema_acls(self, schemas):
+ query = """SELECT nspacl FROM pg_catalog.pg_namespace
+ WHERE nspname = ANY (%s) ORDER BY nspname"""
+ self.cursor.execute(query, (schemas,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_language_acls(self, languages):
+ query = """SELECT lanacl FROM pg_catalog.pg_language
+ WHERE lanname = ANY (%s) ORDER BY lanname"""
+ self.cursor.execute(query, (languages,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_tablespace_acls(self, tablespaces):
+ query = """SELECT spcacl FROM pg_catalog.pg_tablespace
+ WHERE spcname = ANY (%s) ORDER BY spcname"""
+ self.cursor.execute(query, (tablespaces,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_database_acls(self, databases):
+ query = """SELECT datacl FROM pg_catalog.pg_database
+ WHERE datname = ANY (%s) ORDER BY datname"""
+ self.cursor.execute(query, (databases,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_group_memberships(self, groups):
+ query = """SELECT roleid, grantor, member, admin_option
+ FROM pg_catalog.pg_auth_members am
+ JOIN pg_catalog.pg_roles r ON r.oid = am.roleid
+ WHERE r.rolname = ANY(%s)
+ ORDER BY roleid, grantor, member"""
+ self.cursor.execute(query, (groups,))
+ return self.cursor.fetchall()
+
+ def get_default_privs(self, schema, *args):
+ query = """SELECT defaclacl
+ FROM pg_default_acl a
+ JOIN pg_namespace b ON a.defaclnamespace=b.oid
+ WHERE b.nspname = %s;"""
+ self.cursor.execute(query, (schema,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_foreign_data_wrapper_acls(self, fdws):
+ query = """SELECT fdwacl FROM pg_catalog.pg_foreign_data_wrapper
+ WHERE fdwname = ANY (%s) ORDER BY fdwname"""
+ self.cursor.execute(query, (fdws,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_foreign_server_acls(self, fs):
+ query = """SELECT srvacl FROM pg_catalog.pg_foreign_server
+ WHERE srvname = ANY (%s) ORDER BY srvname"""
+ self.cursor.execute(query, (fs,))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ def get_type_acls(self, schema, types):
+ query = """SELECT t.typacl FROM pg_catalog.pg_type t
+ JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace
+ WHERE n.nspname = %s AND t.typname = ANY (%s) ORDER BY typname"""
+ self.cursor.execute(query, (schema, types))
+ return [t[0] for t in self.cursor.fetchall()]
+
+ # Manipulating privileges
+
+ def manipulate_privs(self, obj_type, privs, objs, roles, target_roles,
+ state, grant_option, schema_qualifier=None, fail_on_role=True, usage_on_types=True):
+ """Manipulate database object privileges.
+
+ :param obj_type: Type of database object to grant/revoke
+ privileges for.
+ :param privs: Either a list of privileges to grant/revoke
+ or None if type is "group".
+ :param objs: List of database objects to grant/revoke
+ privileges for.
+ :param roles: Either a list of role names or "PUBLIC"
+ for the implicitly defined "PUBLIC" group
+ :param target_roles: List of role names to grant/revoke
+ default privileges as.
+ :param state: "present" to grant privileges, "absent" to revoke.
+ :param grant_option: Only for state "present": If True, set
+ grant/admin option. If False, revoke it.
+ If None, don't change grant option.
+ :param schema_qualifier: Some object types ("TABLE", "SEQUENCE",
+ "FUNCTION") must be qualified by schema.
+ Ignored for other Types.
+ """
+ # get_status: function to get current status
+ if obj_type == 'table':
+ get_status = partial(self.get_table_acls, schema_qualifier)
+ elif obj_type == 'sequence':
+ get_status = partial(self.get_sequence_acls, schema_qualifier)
+ elif obj_type in ('function', 'procedure'):
+ get_status = partial(self.get_function_acls, schema_qualifier)
+ elif obj_type == 'schema':
+ get_status = self.get_schema_acls
+ elif obj_type == 'language':
+ get_status = self.get_language_acls
+ elif obj_type == 'tablespace':
+ get_status = self.get_tablespace_acls
+ elif obj_type == 'database':
+ get_status = self.get_database_acls
+ elif obj_type == 'group':
+ get_status = self.get_group_memberships
+ elif obj_type == 'default_privs':
+ get_status = partial(self.get_default_privs, schema_qualifier)
+ elif obj_type == 'foreign_data_wrapper':
+ get_status = self.get_foreign_data_wrapper_acls
+ elif obj_type == 'foreign_server':
+ get_status = self.get_foreign_server_acls
+ elif obj_type == 'type':
+ get_status = partial(self.get_type_acls, schema_qualifier)
+ else:
+ raise Error('Unsupported database object type "%s".' % obj_type)
+
+ # Return False (nothing has changed) if there are no objs to work on.
+ if not objs:
+ return False
+
+ # obj_ids: quoted db object identifiers (sometimes schema-qualified)
+ if obj_type in ('function', 'procedure'):
+ obj_ids = []
+ for obj in objs:
+ try:
+ f, args = obj.split('(', 1)
+ except Exception:
+ raise Error('Illegal function / procedure signature: "%s".' % obj)
+ obj_ids.append('"%s"."%s"(%s' % (schema_qualifier, f, args))
+ elif obj_type in ['table', 'sequence', 'type']:
+ obj_ids = ['"%s"."%s"' % (schema_qualifier, o) for o in objs]
+ else:
+ obj_ids = ['"%s"' % o for o in objs]
+
+ # set_what: SQL-fragment specifying what to set for the target roles:
+ # Either group membership or privileges on objects of a certain type
+ if obj_type == 'group':
+ set_what = ','.join(obj_ids)
+ elif obj_type == 'default_privs':
+ # We don't want privs to be quoted here
+ set_what = ','.join(privs)
+ else:
+ # function types are already quoted above
+ if obj_type not in ('function', 'procedure'):
+ obj_ids = [pg_quote_identifier(i, 'table') for i in obj_ids]
+ # Note: obj_type has been checked against a set of string literals
+ # and privs was escaped when it was parsed
+ # Note: Underscores are replaced with spaces to support multi-word obj_type
+ set_what = '%s ON %s %s' % (','.join(privs), obj_type.replace('_', ' '),
+ ','.join(obj_ids))
+
+ # for_whom: SQL-fragment specifying for whom to set the above
+ if roles == 'PUBLIC':
+ for_whom = 'PUBLIC'
+ else:
+ for_whom = []
+ for r in roles:
+ if not role_exists(self.module, self.cursor, r):
+ if fail_on_role:
+ self.module.fail_json(msg="Role '%s' does not exist" % r.strip())
+
+ else:
+ self.module.warn("Role '%s' does not exist, pass it" % r.strip())
+ else:
+ for_whom.append('"%s"' % r)
+
+ if not for_whom:
+ return False
+
+ for_whom = ','.join(for_whom)
+
+ # as_who:
+ as_who = None
+ if target_roles:
+ as_who = ','.join('"%s"' % r for r in target_roles)
+
+ if schema_qualifier:
+ schema_qualifier = '"%s"' % schema_qualifier
+
+ status_before = get_status(objs)
+
+ query = QueryBuilder(state) \
+ .for_objtype(obj_type) \
+ .with_grant_option(grant_option) \
+ .for_whom(for_whom) \
+ .as_who(as_who) \
+ .for_schema(schema_qualifier) \
+ .set_what(set_what) \
+ .for_objs(objs) \
+ .usage_on_types(usage_on_types) \
+ .build()
+
+ executed_queries.append(query)
+ self.cursor.execute(query)
+ if roles == 'PUBLIC':
+ return True
+
+ status_after = get_status(objs)
+
+ def nonesorted(e):
+ # For python 3+ that can fail trying
+ # to compare NoneType elements by sort method.
+ if e is None:
+ return ''
+ return e
+
+ status_before.sort(key=nonesorted)
+ status_after.sort(key=nonesorted)
+ return status_before != status_after
+
+
+class QueryBuilder(object):
+ def __init__(self, state):
+ self._grant_option = None
+ self._for_whom = None
+ self._as_who = None
+ self._set_what = None
+ self._obj_type = None
+ self._state = state
+ self._schema = None
+ self._objs = None
+ self._usage_on_types = None
+ self.query = []
+
+ def for_objs(self, objs):
+ self._objs = objs
+ return self
+
+ def for_schema(self, schema):
+ self._schema = schema
+ return self
+
+ def with_grant_option(self, option):
+ self._grant_option = option
+ return self
+
+ def for_whom(self, who):
+ self._for_whom = who
+ return self
+
+ def usage_on_types(self, usage_on_types):
+ self._usage_on_types = usage_on_types
+ return self
+
+ def as_who(self, target_roles):
+ self._as_who = target_roles
+ return self
+
+ def set_what(self, what):
+ self._set_what = what
+ return self
+
+ def for_objtype(self, objtype):
+ self._obj_type = objtype
+ return self
+
+ def build(self):
+ if self._state == 'present':
+ self.build_present()
+ elif self._state == 'absent':
+ self.build_absent()
+ else:
+ self.build_absent()
+ return '\n'.join(self.query)
+
+ def add_default_revoke(self):
+ for obj in self._objs:
+ if self._as_who:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} REVOKE ALL ON {2} FROM {3};'.format(self._as_who,
+ self._schema, obj,
+ self._for_whom))
+ else:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} REVOKE ALL ON {1} FROM {2};'.format(self._schema, obj,
+ self._for_whom))
+
+ def add_grant_option(self):
+ if self._grant_option:
+ if self._obj_type == 'group':
+ self.query[-1] += ' WITH ADMIN OPTION;'
+ else:
+ self.query[-1] += ' WITH GRANT OPTION;'
+ elif self._grant_option is False:
+ self.query[-1] += ';'
+ if self._obj_type == 'group':
+ self.query.append('REVOKE ADMIN OPTION FOR {0} FROM {1};'.format(self._set_what, self._for_whom))
+ elif not self._obj_type == 'default_privs':
+ self.query.append('REVOKE GRANT OPTION FOR {0} FROM {1};'.format(self._set_what, self._for_whom))
+ else:
+ self.query[-1] += ';'
+
+ def add_default_priv(self):
+ for obj in self._objs:
+ if self._as_who:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} GRANT {2} ON {3} TO {4}'.format(self._as_who,
+ self._schema,
+ self._set_what,
+ obj,
+ self._for_whom))
+ else:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} GRANT {1} ON {2} TO {3}'.format(self._schema,
+ self._set_what,
+ obj,
+ self._for_whom))
+ self.add_grant_option()
+
+ if self._usage_on_types:
+ if self._as_who:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} GRANT USAGE ON TYPES TO {2}'.format(self._as_who,
+ self._schema,
+ self._for_whom))
+ else:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} GRANT USAGE ON TYPES TO {1}'.format(self._schema, self._for_whom))
+ self.add_grant_option()
+
+ def build_present(self):
+ if self._obj_type == 'default_privs':
+ self.add_default_revoke()
+ self.add_default_priv()
+ else:
+ self.query.append('GRANT {0} TO {1}'.format(self._set_what, self._for_whom))
+ self.add_grant_option()
+
+ def build_absent(self):
+ if self._obj_type == 'default_privs':
+ self.query = []
+ for obj in ['TABLES', 'SEQUENCES', 'TYPES']:
+ if self._as_who:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} REVOKE ALL ON {2} FROM {3};'.format(self._as_who,
+ self._schema, obj,
+ self._for_whom))
+ else:
+ self.query.append(
+ 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} REVOKE ALL ON {1} FROM {2};'.format(self._schema, obj,
+ self._for_whom))
+ else:
+ self.query.append('REVOKE {0} FROM {1};'.format(self._set_what, self._for_whom))
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ database=dict(required=True, aliases=['db', 'login_db']),
+ state=dict(default='present', choices=['present', 'absent']),
+ privs=dict(required=False, aliases=['priv']),
+ type=dict(default='table',
+ choices=['table',
+ 'sequence',
+ 'function',
+ 'procedure',
+ 'database',
+ 'schema',
+ 'language',
+ 'tablespace',
+ 'group',
+ 'default_privs',
+ 'foreign_data_wrapper',
+ 'foreign_server',
+ 'type', ]),
+ objs=dict(required=False, aliases=['obj']),
+ schema=dict(required=False),
+ roles=dict(required=True, aliases=['role']),
+ session_role=dict(required=False),
+ target_roles=dict(required=False),
+ grant_option=dict(required=False, type='bool',
+ aliases=['admin_option']),
+ host=dict(default='', aliases=['login_host']),
+ unix_socket=dict(default='', aliases=['login_unix_socket']),
+ login=dict(default='postgres', aliases=['login_user']),
+ password=dict(default='', aliases=['login_password'], no_log=True),
+ fail_on_role=dict(type='bool', default=True),
+ trust_input=dict(type='bool', default=True),
+ usage_on_types=dict(type='bool', default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ fail_on_role = module.params['fail_on_role']
+ usage_on_types = module.params['usage_on_types']
+
+ # Create type object as namespace for module params
+ p = type('Params', (), module.params)
+ # param "schema": default, allowed depends on param "type"
+ if p.type in ['table', 'sequence', 'function', 'procedure', 'type', 'default_privs']:
+ p.schema = p.schema or 'public'
+ elif p.schema:
+ module.fail_json(msg='Argument "schema" is not allowed '
+ 'for type "%s".' % p.type)
+
+ # param "objs": default, required depends on param "type"
+ if p.type == 'database':
+ p.objs = p.objs or p.database
+ elif not p.objs:
+ module.fail_json(msg='Argument "objs" is required '
+ 'for type "%s".' % p.type)
+
+ # param "privs": allowed, required depends on param "type"
+ if p.type == 'group':
+ if p.privs:
+ module.fail_json(msg='Argument "privs" is not allowed '
+ 'for type "group".')
+ elif not p.privs:
+ module.fail_json(msg='Argument "privs" is required '
+ 'for type "%s".' % p.type)
+
+ # Check input
+ if not p.trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, p.roles, p.target_roles, p.session_role, p.schema)
+
+ # Connect to Database
+ if not psycopg2:
+ module.fail_json(msg=missing_required_lib('psycopg2'), exception=PSYCOPG2_IMP_ERR)
+ try:
+ conn = Connection(p, module)
+ except psycopg2.Error as e:
+ module.fail_json(msg='Could not connect to database: %s' % to_native(e), exception=traceback.format_exc())
+ except TypeError as e:
+ if 'sslrootcert' in e.args[0]:
+ module.fail_json(msg='Postgresql server must be at least version 8.4 to support sslrootcert')
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
+ except ValueError as e:
+ # We raise this when the psycopg library is too old
+ module.fail_json(msg=to_native(e))
+
+ if p.session_role:
+ try:
+ conn.cursor.execute('SET ROLE "%s"' % p.session_role)
+ except Exception as e:
+ module.fail_json(msg="Could not switch to role %s: %s" % (p.session_role, to_native(e)), exception=traceback.format_exc())
+
+ try:
+ # privs
+ if p.privs:
+ privs = frozenset(pr.upper() for pr in p.privs.split(','))
+ if not privs.issubset(VALID_PRIVS):
+ module.fail_json(msg='Invalid privileges specified: %s' % privs.difference(VALID_PRIVS))
+ else:
+ privs = None
+ # objs:
+ if p.type == 'table' and p.objs == 'ALL_IN_SCHEMA':
+ objs = conn.get_all_tables_in_schema(p.schema)
+ elif p.type == 'sequence' and p.objs == 'ALL_IN_SCHEMA':
+ objs = conn.get_all_sequences_in_schema(p.schema)
+ elif p.type == 'function' and p.objs == 'ALL_IN_SCHEMA':
+ objs = conn.get_all_functions_in_schema(p.schema)
+ elif p.type == 'procedure' and p.objs == 'ALL_IN_SCHEMA':
+ objs = conn.get_all_procedures_in_schema(p.schema)
+ elif p.type == 'default_privs':
+ if p.objs == 'ALL_DEFAULT':
+ objs = frozenset(VALID_DEFAULT_OBJS.keys())
+ else:
+ objs = frozenset(obj.upper() for obj in p.objs.split(','))
+ if not objs.issubset(VALID_DEFAULT_OBJS):
+ module.fail_json(
+ msg='Invalid Object set specified: %s' % objs.difference(VALID_DEFAULT_OBJS.keys()))
+ # Again, do we have valid privs specified for object type:
+ valid_objects_for_priv = frozenset(obj for obj in objs if privs.issubset(VALID_DEFAULT_OBJS[obj]))
+ if not valid_objects_for_priv == objs:
+ module.fail_json(
+ msg='Invalid priv specified. Valid object for priv: {0}. Objects: {1}'.format(
+ valid_objects_for_priv, objs))
+ else:
+ objs = p.objs.split(',')
+
+ # function signatures are encoded using ':' to separate args
+ if p.type in ('function', 'procedure'):
+ objs = [obj.replace(':', ',') for obj in objs]
+
+ # roles
+ if p.roles.upper() == 'PUBLIC':
+ roles = 'PUBLIC'
+ else:
+ roles = p.roles.split(',')
+
+ if len(roles) == 1 and not role_exists(module, conn.cursor, roles[0]):
+ module.exit_json(changed=False)
+
+ if fail_on_role:
+ module.fail_json(msg="Role '%s' does not exist" % roles[0].strip())
+
+ else:
+ module.warn("Role '%s' does not exist, nothing to do" % roles[0].strip())
+
+ # check if target_roles is set with type: default_privs
+ if p.target_roles and not p.type == 'default_privs':
+ module.warn('"target_roles" will be ignored '
+ 'Argument "type: default_privs" is required for usage of "target_roles".')
+
+ # target roles
+ if p.target_roles:
+ target_roles = p.target_roles.split(',')
+ else:
+ target_roles = None
+
+ changed = conn.manipulate_privs(
+ obj_type=p.type,
+ privs=privs,
+ objs=objs,
+ roles=roles,
+ target_roles=target_roles,
+ state=p.state,
+ grant_option=p.grant_option,
+ schema_qualifier=p.schema,
+ fail_on_role=fail_on_role,
+ usage_on_types=usage_on_types,
+ )
+
+ except Error as e:
+ conn.rollback()
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ except psycopg2.Error as e:
+ conn.rollback()
+ module.fail_json(msg=to_native(e))
+
+ if module.check_mode or not changed:
+ conn.rollback()
+ else:
+ conn.commit()
+ module.exit_json(changed=changed, queries=executed_queries)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_publication.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_publication.py
new file mode 100644
index 00000000..1db80adc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_publication.py
@@ -0,0 +1,682 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Loic Blot (@nerzhul) <loic.blot@unix-experience.fr>
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: postgresql_publication
+short_description: Add, update, or remove PostgreSQL publication
+description:
+- Add, update, or remove PostgreSQL publication.
+options:
+ name:
+ description:
+ - Name of the publication to add, update, or remove.
+ required: true
+ type: str
+ db:
+ description:
+ - Name of the database to connect to and where
+ the publication state will be changed.
+ aliases: [ login_db ]
+ type: str
+ tables:
+ description:
+ - List of tables to add to the publication.
+ - If no value is set all tables are targeted.
+ - If the publication already exists for specific tables and I(tables) is not passed,
+ nothing will be changed. If you need to add all tables to the publication with the same name,
+ drop existent and create new without passing I(tables).
+ type: list
+ elements: str
+ state:
+ description:
+ - The publication state.
+ default: present
+ choices: [ absent, present ]
+ type: str
+ parameters:
+ description:
+ - Dictionary with optional publication parameters.
+ - Available parameters depend on PostgreSQL version.
+ type: dict
+ owner:
+ description:
+ - Publication owner.
+ - If I(owner) is not defined, the owner will be set as I(login_user) or I(session_role).
+ type: str
+ cascade:
+ description:
+ - Drop publication dependencies. Has effect with I(state=absent) only.
+ type: bool
+ default: false
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ version_added: '0.2.0'
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(name), I(tables), I(owner),
+ I(session_role), I(params) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+notes:
+- PostgreSQL version must be 10 or greater.
+seealso:
+- name: CREATE PUBLICATION reference
+ description: Complete reference of the CREATE PUBLICATION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createpublication.html
+- name: ALTER PUBLICATION reference
+ description: Complete reference of the ALTER PUBLICATION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-alterpublication.html
+- name: DROP PUBLICATION reference
+ description: Complete reference of the DROP PUBLICATION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-droppublication.html
+author:
+- Loic Blot (@nerzhul) <loic.blot@unix-experience.fr>
+- Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Create a new publication with name "acme" targeting all tables in database "test".
+ community.general.postgresql_publication:
+ db: test
+ name: acme
+
+- name: Create publication "acme" publishing only prices and vehicles tables.
+ community.general.postgresql_publication:
+ name: acme
+ tables:
+ - prices
+ - vehicles
+
+- name: >
+ Create publication "acme", set user alice as an owner, targeting all tables.
+ Allowable DML operations are INSERT and UPDATE only
+ community.general.postgresql_publication:
+ name: acme
+ owner: alice
+ parameters:
+ publish: 'insert,update'
+
+- name: >
+ Assuming publication "acme" exists and there are targeted
+ tables "prices" and "vehicles", add table "stores" to the publication.
+ community.general.postgresql_publication:
+ name: acme
+ tables:
+ - prices
+ - vehicles
+ - stores
+
+- name: Remove publication "acme" if exists in database "test".
+ community.general.postgresql_publication:
+ db: test
+ name: acme
+ state: absent
+'''
+
+RETURN = r'''
+exists:
+ description:
+ - Flag indicates the publication exists or not at the end of runtime.
+ returned: always
+ type: bool
+ sample: true
+queries:
+ description: List of executed queries.
+ returned: always
+ type: str
+ sample: [ 'DROP PUBLICATION "acme" CASCADE' ]
+owner:
+ description: Owner of the publication at the end of runtime.
+ returned: if publication exists
+ type: str
+ sample: "alice"
+tables:
+ description:
+ - List of tables in the publication at the end of runtime.
+ - If all tables are published, returns empty list.
+ returned: if publication exists
+ type: list
+ sample: ["\"public\".\"prices\"", "\"public\".\"vehicles\""]
+alltables:
+ description:
+ - Flag indicates that all tables are published.
+ returned: if publication exists
+ type: bool
+ sample: false
+parameters:
+ description: Publication parameters at the end of runtime.
+ returned: if publication exists
+ type: dict
+ sample: {'publish': {'insert': false, 'delete': false, 'update': true}}
+'''
+
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+ pg_quote_identifier,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils.six import iteritems
+
+SUPPORTED_PG_VERSION = 10000
+
+
+################################
+# Module functions and classes #
+################################
+
+def transform_tables_representation(tbl_list):
+ """Add 'public.' to names of tables where a schema identifier is absent
+ and add quotes to each element.
+
+ Args:
+ tbl_list (list): List of table names.
+
+ Returns:
+ tbl_list (list): Changed list.
+ """
+ for i, table in enumerate(tbl_list):
+ if '.' not in table:
+ tbl_list[i] = pg_quote_identifier('public.%s' % table.strip(), 'table')
+ else:
+ tbl_list[i] = pg_quote_identifier(table.strip(), 'table')
+
+ return tbl_list
+
+
+class PgPublication():
+ """Class to work with PostgreSQL publication.
+
+ Args:
+ module (AnsibleModule): Object of AnsibleModule class.
+ cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
+ name (str): The name of the publication.
+
+ Attributes:
+ module (AnsibleModule): Object of AnsibleModule class.
+ cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
+ name (str): Name of the publication.
+ executed_queries (list): List of executed queries.
+ attrs (dict): Dict with publication attributes.
+ exists (bool): Flag indicates the publication exists or not.
+ """
+
+ def __init__(self, module, cursor, name):
+ self.module = module
+ self.cursor = cursor
+ self.name = name
+ self.executed_queries = []
+ self.attrs = {
+ 'alltables': False,
+ 'tables': [],
+ 'parameters': {},
+ 'owner': '',
+ }
+ self.exists = self.check_pub()
+
+ def get_info(self):
+ """Refresh the publication information.
+
+ Returns:
+ ``self.attrs``.
+ """
+ self.exists = self.check_pub()
+ return self.attrs
+
+ def check_pub(self):
+ """Check the publication and refresh ``self.attrs`` publication attribute.
+
+ Returns:
+ True if the publication with ``self.name`` exists, False otherwise.
+ """
+
+ pub_info = self.__get_general_pub_info()
+
+ if not pub_info:
+ # Publication does not exist:
+ return False
+
+ self.attrs['owner'] = pub_info.get('pubowner')
+
+ # Publication DML operations:
+ self.attrs['parameters']['publish'] = {}
+ self.attrs['parameters']['publish']['insert'] = pub_info.get('pubinsert', False)
+ self.attrs['parameters']['publish']['update'] = pub_info.get('pubupdate', False)
+ self.attrs['parameters']['publish']['delete'] = pub_info.get('pubdelete', False)
+ if pub_info.get('pubtruncate'):
+ self.attrs['parameters']['publish']['truncate'] = pub_info.get('pubtruncate')
+
+ # If alltables flag is False, get the list of targeted tables:
+ if not pub_info.get('puballtables'):
+ table_info = self.__get_tables_pub_info()
+ # Join sublists [['schema', 'table'], ...] to ['schema.table', ...]
+ # for better representation:
+ for i, schema_and_table in enumerate(table_info):
+ table_info[i] = pg_quote_identifier('.'.join(schema_and_table), 'table')
+
+ self.attrs['tables'] = table_info
+ else:
+ self.attrs['alltables'] = True
+
+ # Publication exists:
+ return True
+
+ def create(self, tables, params, owner, check_mode=True):
+ """Create the publication.
+
+ Args:
+ tables (list): List with names of the tables that need to be added to the publication.
+ params (dict): Dict contains optional publication parameters and their values.
+ owner (str): Name of the publication owner.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ changed (bool): True if publication has been created, otherwise False.
+ """
+ changed = True
+
+ query_fragments = ["CREATE PUBLICATION %s" % pg_quote_identifier(self.name, 'publication')]
+
+ if tables:
+ query_fragments.append("FOR TABLE %s" % ', '.join(tables))
+ else:
+ query_fragments.append("FOR ALL TABLES")
+
+ if params:
+ params_list = []
+ # Make list ["param = 'value'", ...] from params dict:
+ for (key, val) in iteritems(params):
+ params_list.append("%s = '%s'" % (key, val))
+
+ # Add the list to query_fragments:
+ query_fragments.append("WITH (%s)" % ', '.join(params_list))
+
+ changed = self.__exec_sql(' '.join(query_fragments), check_mode=check_mode)
+
+ if owner:
+ # If check_mode, just add possible SQL to
+ # executed_queries and return:
+ self.__pub_set_owner(owner, check_mode=check_mode)
+
+ return changed
+
+ def update(self, tables, params, owner, check_mode=True):
+ """Update the publication.
+
+ Args:
+ tables (list): List with names of the tables that need to be presented in the publication.
+ params (dict): Dict contains optional publication parameters and their values.
+ owner (str): Name of the publication owner.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ changed (bool): True if publication has been updated, otherwise False.
+ """
+ changed = False
+
+ # Add or drop tables from published tables suit:
+ if tables and not self.attrs['alltables']:
+
+ # 1. If needs to add table to the publication:
+ for tbl in tables:
+ if tbl not in self.attrs['tables']:
+ # If needs to add table to the publication:
+ changed = self.__pub_add_table(tbl, check_mode=check_mode)
+
+ # 2. if there is a table in targeted tables
+ # that's not presented in the passed tables:
+ for tbl in self.attrs['tables']:
+ if tbl not in tables:
+ changed = self.__pub_drop_table(tbl, check_mode=check_mode)
+
+ elif tables and self.attrs['alltables']:
+ changed = self.__pub_set_tables(tables, check_mode=check_mode)
+
+ # Update pub parameters:
+ if params:
+ for key, val in iteritems(params):
+ if self.attrs['parameters'].get(key):
+
+ # In PostgreSQL 10/11 only 'publish' optional parameter is presented.
+ if key == 'publish':
+ # 'publish' value can be only a string with comma-separated items
+ # of allowed DML operations like 'insert,update' or
+ # 'insert,update,delete', etc.
+ # Make dictionary to compare with current attrs later:
+ val_dict = self.attrs['parameters']['publish'].copy()
+ val_list = val.split(',')
+ for v in val_dict:
+ if v in val_list:
+ val_dict[v] = True
+ else:
+ val_dict[v] = False
+
+ # Compare val_dict and the dict with current 'publish' parameters,
+ # if they're different, set new values:
+ if val_dict != self.attrs['parameters']['publish']:
+ changed = self.__pub_set_param(key, val, check_mode=check_mode)
+
+ # Default behavior for other cases:
+ elif self.attrs['parameters'][key] != val:
+ changed = self.__pub_set_param(key, val, check_mode=check_mode)
+
+ else:
+ # If the parameter was not set before:
+ changed = self.__pub_set_param(key, val, check_mode=check_mode)
+
+ # Update pub owner:
+ if owner:
+ if owner != self.attrs['owner']:
+ changed = self.__pub_set_owner(owner, check_mode=check_mode)
+
+ return changed
+
+ def drop(self, cascade=False, check_mode=True):
+ """Drop the publication.
+
+ Kwargs:
+ cascade (bool): Flag indicates that publication needs to be deleted
+ with its dependencies.
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ changed (bool): True if publication has been updated, otherwise False.
+ """
+ if self.exists:
+ query_fragments = []
+ query_fragments.append("DROP PUBLICATION %s" % pg_quote_identifier(self.name, 'publication'))
+ if cascade:
+ query_fragments.append("CASCADE")
+
+ return self.__exec_sql(' '.join(query_fragments), check_mode=check_mode)
+
+ def __get_general_pub_info(self):
+ """Get and return general publication information.
+
+ Returns:
+ Dict with publication information if successful, False otherwise.
+ """
+ # Check pg_publication.pubtruncate exists (supported from PostgreSQL 11):
+ pgtrunc_sup = exec_sql(self, ("SELECT 1 FROM information_schema.columns "
+ "WHERE table_name = 'pg_publication' "
+ "AND column_name = 'pubtruncate'"), add_to_executed=False)
+
+ if pgtrunc_sup:
+ query = ("SELECT r.rolname AS pubowner, p.puballtables, p.pubinsert, "
+ "p.pubupdate , p.pubdelete, p.pubtruncate FROM pg_publication AS p "
+ "JOIN pg_catalog.pg_roles AS r "
+ "ON p.pubowner = r.oid "
+ "WHERE p.pubname = %(pname)s")
+ else:
+ query = ("SELECT r.rolname AS pubowner, p.puballtables, p.pubinsert, "
+ "p.pubupdate , p.pubdelete FROM pg_publication AS p "
+ "JOIN pg_catalog.pg_roles AS r "
+ "ON p.pubowner = r.oid "
+ "WHERE p.pubname = %(pname)s")
+
+ result = exec_sql(self, query, query_params={'pname': self.name}, add_to_executed=False)
+ if result:
+ return result[0]
+ else:
+ return False
+
+ def __get_tables_pub_info(self):
+ """Get and return tables that are published by the publication.
+
+ Returns:
+ List of dicts with published tables.
+ """
+ query = ("SELECT schemaname, tablename "
+ "FROM pg_publication_tables WHERE pubname = %(pname)s")
+ return exec_sql(self, query, query_params={'pname': self.name}, add_to_executed=False)
+
+ def __pub_add_table(self, table, check_mode=False):
+ """Add a table to the publication.
+
+ Args:
+ table (str): Table name.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = ("ALTER PUBLICATION %s ADD TABLE %s" % (pg_quote_identifier(self.name, 'publication'),
+ pg_quote_identifier(table, 'table')))
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __pub_drop_table(self, table, check_mode=False):
+ """Drop a table from the publication.
+
+ Args:
+ table (str): Table name.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = ("ALTER PUBLICATION %s DROP TABLE %s" % (pg_quote_identifier(self.name, 'publication'),
+ pg_quote_identifier(table, 'table')))
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __pub_set_tables(self, tables, check_mode=False):
+ """Set a table suit that need to be published by the publication.
+
+ Args:
+ tables (list): List of tables.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ quoted_tables = [pg_quote_identifier(t, 'table') for t in tables]
+ query = ("ALTER PUBLICATION %s SET TABLE %s" % (pg_quote_identifier(self.name, 'publication'),
+ ', '.join(quoted_tables)))
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __pub_set_param(self, param, value, check_mode=False):
+ """Set an optional publication parameter.
+
+ Args:
+ param (str): Name of the parameter.
+ value (str): Parameter value.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = ("ALTER PUBLICATION %s SET (%s = '%s')" % (pg_quote_identifier(self.name, 'publication'),
+ param, value))
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __pub_set_owner(self, role, check_mode=False):
+ """Set a publication owner.
+
+ Args:
+ role (str): Role (user) name that needs to be set as a publication owner.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = ('ALTER PUBLICATION %s '
+ 'OWNER TO "%s"' % (pg_quote_identifier(self.name, 'publication'), role))
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __exec_sql(self, query, check_mode=False):
+ """Execute SQL query.
+
+ Note: If we need just to get information from the database,
+ we use ``exec_sql`` function directly.
+
+ Args:
+ query (str): Query that needs to be executed.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just add ``query`` to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ if check_mode:
+ self.executed_queries.append(query)
+ return True
+ else:
+ return exec_sql(self, query, return_bool=True)
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ name=dict(required=True),
+ db=dict(type='str', aliases=['login_db']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ tables=dict(type='list', elements='str'),
+ parameters=dict(type='dict'),
+ owner=dict(type='str'),
+ cascade=dict(type='bool', default=False),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ # Parameters handling:
+ name = module.params['name']
+ state = module.params['state']
+ tables = module.params['tables']
+ params = module.params['parameters']
+ owner = module.params['owner']
+ cascade = module.params['cascade']
+ session_role = module.params['session_role']
+ trust_input = module.params['trust_input']
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ if not params:
+ params_list = None
+ else:
+ params_list = ['%s = %s' % (k, v) for k, v in iteritems(params)]
+
+ check_input(module, name, tables, owner, session_role, params_list)
+
+ if state == 'absent':
+ if tables:
+ module.warn('parameter "tables" is ignored when "state=absent"')
+ if params:
+ module.warn('parameter "parameters" is ignored when "state=absent"')
+ if owner:
+ module.warn('parameter "owner" is ignored when "state=absent"')
+
+ if state == 'present' and cascade:
+ module.warn('parameter "cascade" is ignored when "state=present"')
+
+ # Connect to DB and make cursor object:
+ conn_params = get_conn_params(module, module.params)
+ # We check publication state without DML queries execution, so set autocommit:
+ db_connection = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ # Check version:
+ if cursor.connection.server_version < SUPPORTED_PG_VERSION:
+ module.fail_json(msg="PostgreSQL server version should be 10.0 or greater")
+
+ # Nothing was changed by default:
+ changed = False
+
+ ###################################
+ # Create object and do rock'n'roll:
+ publication = PgPublication(module, cursor, name)
+
+ if tables:
+ tables = transform_tables_representation(tables)
+
+ # If module.check_mode=True, nothing will be changed:
+ if state == 'present':
+ if not publication.exists:
+ changed = publication.create(tables, params, owner, check_mode=module.check_mode)
+
+ else:
+ changed = publication.update(tables, params, owner, check_mode=module.check_mode)
+
+ elif state == 'absent':
+ changed = publication.drop(cascade=cascade, check_mode=module.check_mode)
+
+ # Get final publication info:
+ pub_fin_info = {}
+ if state == 'present' or (state == 'absent' and module.check_mode):
+ pub_fin_info = publication.get_info()
+ elif state == 'absent' and not module.check_mode:
+ publication.exists = False
+
+ # Connection is not needed any more:
+ cursor.close()
+ db_connection.close()
+
+ # Update publication info and return ret values:
+ module.exit_json(changed=changed, queries=publication.executed_queries, exists=publication.exists, **pub_fin_info)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_query.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_query.py
new file mode 100644
index 00000000..e231fbd3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_query.py
@@ -0,0 +1,452 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Felix Archambault
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_query
+short_description: Run PostgreSQL queries
+description:
+- Runs arbitrary PostgreSQL queries.
+- Can run queries from SQL script files.
+- Does not run against backup files. Use M(community.general.postgresql_db) with I(state=restore)
+ to run queries on files made by pg_dump/pg_dumpall utilities.
+options:
+ query:
+ description:
+ - SQL query to run. Variables can be escaped with psycopg2 syntax
+ U(http://initd.org/psycopg/docs/usage.html).
+ type: str
+ positional_args:
+ description:
+ - List of values to be passed as positional arguments to the query.
+ When the value is a list, it will be converted to PostgreSQL array.
+ - Mutually exclusive with I(named_args).
+ type: list
+ elements: raw
+ named_args:
+ description:
+ - Dictionary of key-value arguments to pass to the query.
+ When the value is a list, it will be converted to PostgreSQL array.
+ - Mutually exclusive with I(positional_args).
+ type: dict
+ path_to_script:
+ description:
+ - Path to a SQL script on the target machine.
+ - If the script contains several queries, they must be semicolon-separated.
+ - Mutually exclusive with I(query).
+ type: path
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ db:
+ description:
+ - Name of database to connect to and run queries against.
+ type: str
+ aliases:
+ - login_db
+ autocommit:
+ description:
+ - Execute in autocommit mode when the query can't be run inside a transaction block
+ (e.g., VACUUM).
+ - Mutually exclusive with I(check_mode).
+ type: bool
+ default: no
+ encoding:
+ description:
+ - Set the client encoding for the current session (e.g. C(UTF-8)).
+ - The default is the encoding defined by the database.
+ type: str
+ version_added: '0.2.0'
+ trust_input:
+ description:
+ - If C(no), check whether a value of I(session_role) is potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via I(session_role) are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+ search_path:
+ description:
+ - List of schema names to look in.
+ type: list
+ elements: str
+ version_added: '1.0.0'
+seealso:
+- module: community.general.postgresql_db
+- name: PostgreSQL Schema reference
+ description: Complete reference of the PostgreSQL schema documentation.
+ link: https://www.postgresql.org/docs/current/ddl-schemas.html
+author:
+- Felix Archambault (@archf)
+- Andrew Klychkov (@Andersson007)
+- Will Rouesnel (@wrouesnel)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Simple select query to acme db
+ community.general.postgresql_query:
+ db: acme
+ query: SELECT version()
+
+- name: Select query to db acme with positional arguments and non-default credentials
+ community.general.postgresql_query:
+ db: acme
+ login_user: django
+ login_password: mysecretpass
+ query: SELECT * FROM acme WHERE id = %s AND story = %s
+ positional_args:
+ - 1
+ - test
+
+- name: Select query to test_db with named_args
+ community.general.postgresql_query:
+ db: test_db
+ query: SELECT * FROM test WHERE id = %(id_val)s AND story = %(story_val)s
+ named_args:
+ id_val: 1
+ story_val: test
+
+- name: Insert query to test_table in db test_db
+ community.general.postgresql_query:
+ db: test_db
+ query: INSERT INTO test_table (id, story) VALUES (2, 'my_long_story')
+
+- name: Run queries from SQL script using UTF-8 client encoding for session
+ community.general.postgresql_query:
+ db: test_db
+ path_to_script: /var/lib/pgsql/test.sql
+ positional_args:
+ - 1
+ encoding: UTF-8
+
+- name: Example of using autocommit parameter
+ community.general.postgresql_query:
+ db: test_db
+ query: VACUUM
+ autocommit: yes
+
+- name: >
+ Insert data to the column of array type using positional_args.
+ Note that we use quotes here, the same as for passing JSON, etc.
+ community.general.postgresql_query:
+ query: INSERT INTO test_table (array_column) VALUES (%s)
+ positional_args:
+ - '{1,2,3}'
+
+# Pass list and string vars as positional_args
+- name: Set vars
+ ansible.builtin.set_fact:
+ my_list:
+ - 1
+ - 2
+ - 3
+ my_arr: '{1, 2, 3}'
+
+- name: Select from test table by passing positional_args as arrays
+ community.general.postgresql_query:
+ query: SELECT * FROM test_array_table WHERE arr_col1 = %s AND arr_col2 = %s
+ positional_args:
+ - '{{ my_list }}'
+ - '{{ my_arr|string }}'
+
+# Select from test table looking into app1 schema first, then,
+# if the schema doesn't exist or the table hasn't been found there,
+# try to find it in the schema public
+- name: Select from test using search_path
+ community.general.postgresql_query:
+ query: SELECT * FROM test_array_table
+ search_path:
+ - app1
+ - public
+'''
+
+RETURN = r'''
+query:
+ description:
+ - Executed query.
+ - When reading several queries from a file, it contains only the last one.
+ returned: always
+ type: str
+ sample: 'SELECT * FROM bar'
+statusmessage:
+ description:
+ - Attribute containing the message returned by the command.
+ - When reading several queries from a file, it contains a message of the last one.
+ returned: always
+ type: str
+ sample: 'INSERT 0 1'
+query_result:
+ description:
+ - List of dictionaries in column:value form representing returned rows.
+ - When running queries from a file, returns result of the last query.
+ returned: always
+ type: list
+ elements: dict
+ sample: [{"Column": "Value1"},{"Column": "Value2"}]
+query_list:
+ description:
+ - List of executed queries.
+ Useful when reading several queries from a file.
+ returned: always
+ type: list
+ elements: str
+ sample: ['SELECT * FROM foo', 'SELECT * FROM bar']
+query_all_results:
+ description:
+ - List containing results of all queries executed (one sublist for every query).
+ Useful when reading several queries from a file.
+ returned: always
+ type: list
+ elements: list
+ sample: [[{"Column": "Value1"},{"Column": "Value2"}], [{"Column": "Value1"},{"Column": "Value2"}]]
+rowcount:
+ description:
+ - Number of produced or affected rows.
+ - When using a script with multiple queries,
+ it contains a total number of produced or affected rows.
+ returned: changed
+ type: int
+ sample: 5
+'''
+
+try:
+ from psycopg2 import ProgrammingError as Psycopg2ProgrammingError
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # it is needed for checking 'no result to fetch' in main(),
+ # psycopg2 availability will be checked by connect_to_db() into
+ # ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six import iteritems
+
+
+# ===========================================
+# Module execution.
+#
+
+def list_to_pg_array(elem):
+ """Convert the passed list to PostgreSQL array
+ represented as a string.
+
+ Args:
+ elem (list): List that needs to be converted.
+
+ Returns:
+ elem (str): String representation of PostgreSQL array.
+ """
+ elem = str(elem).strip('[]')
+ elem = '{' + elem + '}'
+ return elem
+
+
+def convert_elements_to_pg_arrays(obj):
+ """Convert list elements of the passed object
+ to PostgreSQL arrays represented as strings.
+
+ Args:
+ obj (dict or list): Object whose elements need to be converted.
+
+ Returns:
+ obj (dict or list): Object with converted elements.
+ """
+ if isinstance(obj, dict):
+ for (key, elem) in iteritems(obj):
+ if isinstance(elem, list):
+ obj[key] = list_to_pg_array(elem)
+
+ elif isinstance(obj, list):
+ for i, elem in enumerate(obj):
+ if isinstance(elem, list):
+ obj[i] = list_to_pg_array(elem)
+
+ return obj
+
+
+def set_search_path(cursor, search_path):
+ """Set session's search_path.
+
+ Args:
+ cursor (Psycopg2 cursor): Database cursor object.
+ search_path (str): String containing comma-separated schema names.
+ """
+ cursor.execute('SET search_path TO %s' % search_path)
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ query=dict(type='str'),
+ db=dict(type='str', aliases=['login_db']),
+ positional_args=dict(type='list', elements='raw'),
+ named_args=dict(type='dict'),
+ session_role=dict(type='str'),
+ path_to_script=dict(type='path'),
+ autocommit=dict(type='bool', default=False),
+ encoding=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ search_path=dict(type='list', elements='str'),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=(('positional_args', 'named_args'),),
+ supports_check_mode=True,
+ )
+
+ query = module.params["query"]
+ positional_args = module.params["positional_args"]
+ named_args = module.params["named_args"]
+ path_to_script = module.params["path_to_script"]
+ autocommit = module.params["autocommit"]
+ encoding = module.params["encoding"]
+ session_role = module.params["session_role"]
+ trust_input = module.params["trust_input"]
+ search_path = module.params["search_path"]
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, session_role)
+
+ if autocommit and module.check_mode:
+ module.fail_json(msg="Using autocommit is mutually exclusive with check_mode")
+
+ if path_to_script and query:
+ module.fail_json(msg="path_to_script is mutually exclusive with query")
+
+ if positional_args:
+ positional_args = convert_elements_to_pg_arrays(positional_args)
+
+ elif named_args:
+ named_args = convert_elements_to_pg_arrays(named_args)
+
+ query_list = []
+ if path_to_script:
+ try:
+ with open(path_to_script, 'rb') as f:
+ query = to_native(f.read())
+ if ';' in query:
+ query_list = [q for q in query.split(';') if q != '\n']
+ else:
+ query_list.append(query)
+ except Exception as e:
+ module.fail_json(msg="Cannot read file '%s' : %s" % (path_to_script, to_native(e)))
+ else:
+ query_list.append(query)
+
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=autocommit)
+ if encoding is not None:
+ db_connection.set_client_encoding(encoding)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ if search_path:
+ set_search_path(cursor, '%s' % ','.join([x.strip(' ') for x in search_path]))
+
+ # Prepare args:
+ if module.params.get("positional_args"):
+ arguments = module.params["positional_args"]
+ elif module.params.get("named_args"):
+ arguments = module.params["named_args"]
+ else:
+ arguments = None
+
+ # Set defaults:
+ changed = False
+
+ query_all_results = []
+ rowcount = 0
+ statusmessage = ''
+
+ # Execute query:
+ for query in query_list:
+ try:
+ cursor.execute(query, arguments)
+ statusmessage = cursor.statusmessage
+ if cursor.rowcount > 0:
+ rowcount += cursor.rowcount
+
+ try:
+ query_result = [dict(row) for row in cursor.fetchall()]
+
+ except Psycopg2ProgrammingError as e:
+ if to_native(e) == 'no results to fetch':
+ query_result = {}
+
+ except Exception as e:
+ module.fail_json(msg="Cannot fetch rows from cursor: %s" % to_native(e))
+
+ query_all_results.append(query_result)
+
+ if 'SELECT' not in statusmessage:
+ if 'UPDATE' in statusmessage or 'INSERT' in statusmessage or 'DELETE' in statusmessage:
+ s = statusmessage.split()
+ if len(s) == 3:
+ if s[2] != '0':
+ changed = True
+
+ elif len(s) == 2:
+ if s[1] != '0':
+ changed = True
+
+ else:
+ changed = True
+
+ else:
+ changed = True
+
+ except Exception as e:
+ if not autocommit:
+ db_connection.rollback()
+
+ cursor.close()
+ db_connection.close()
+ module.fail_json(msg="Cannot execute SQL '%s' %s: %s, query list: %s" % (query, arguments, to_native(e), query_list))
+
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ if not autocommit:
+ db_connection.commit()
+
+ kw = dict(
+ changed=changed,
+ query=cursor.query,
+ query_list=query_list,
+ statusmessage=statusmessage,
+ query_result=query_result,
+ query_all_results=query_all_results,
+ rowcount=rowcount,
+ )
+
+ cursor.close()
+ db_connection.close()
+
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_schema.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_schema.py
new file mode 100644
index 00000000..e7f28ecf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_schema.py
@@ -0,0 +1,293 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_schema
+short_description: Add or remove PostgreSQL schema
+description:
+- Add or remove PostgreSQL schema.
+options:
+ name:
+ description:
+ - Name of the schema to add or remove.
+ required: true
+ type: str
+ aliases:
+ - schema
+ database:
+ description:
+ - Name of the database to connect to and add or remove the schema.
+ type: str
+ default: postgres
+ aliases:
+ - db
+ - login_db
+ owner:
+ description:
+ - Name of the role to set as owner of the schema.
+ type: str
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ - The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though the session_role
+ were the one that had logged in originally.
+ type: str
+ state:
+ description:
+ - The schema state.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ cascade_drop:
+ description:
+ - Drop schema with CASCADE to remove child objects.
+ type: bool
+ default: false
+ ssl_mode:
+ description:
+ - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
+ - See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
+ - Default of C(prefer) matches libpq default.
+ type: str
+ default: prefer
+ choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
+ ca_cert:
+ description:
+ - Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
+ - If the file exists, the server's certificate will be verified to be signed by one of these authorities.
+ type: str
+ aliases: [ ssl_rootcert ]
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(schema), I(owner), I(session_role) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+seealso:
+- name: PostgreSQL schemas
+ description: General information about PostgreSQL schemas.
+ link: https://www.postgresql.org/docs/current/ddl-schemas.html
+- name: CREATE SCHEMA reference
+ description: Complete reference of the CREATE SCHEMA command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createschema.html
+- name: ALTER SCHEMA reference
+ description: Complete reference of the ALTER SCHEMA command documentation.
+ link: https://www.postgresql.org/docs/current/sql-alterschema.html
+- name: DROP SCHEMA reference
+ description: Complete reference of the DROP SCHEMA command documentation.
+ link: https://www.postgresql.org/docs/current/sql-dropschema.html
+author:
+- Flavien Chantelot (@Dorn-) <contact@flavien.io>
+- Thomas O'Donnell (@andytom)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Create a new schema with name acme in test database
+ community.general.postgresql_schema:
+ db: test
+ name: acme
+
+- name: Create a new schema acme with a user bob who will own it
+ community.general.postgresql_schema:
+ name: acme
+ owner: bob
+
+- name: Drop schema "acme" with cascade
+ community.general.postgresql_schema:
+ name: acme
+ state: absent
+ cascade_drop: yes
+'''
+
+RETURN = r'''
+schema:
+ description: Name of the schema.
+ returned: success, changed
+ type: str
+ sample: "acme"
+queries:
+ description: List of executed queries.
+ returned: always
+ type: list
+ sample: ["CREATE SCHEMA \"acme\""]
+'''
+
+import traceback
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+ pg_quote_identifier,
+ SQLParseError,
+)
+from ansible.module_utils._text import to_native
+
+executed_queries = []
+
+
+class NotSupportedError(Exception):
+ pass
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+def set_owner(cursor, schema, owner):
+ query = 'ALTER SCHEMA %s OWNER TO "%s"' % (
+ pg_quote_identifier(schema, 'schema'), owner)
+ cursor.execute(query)
+ executed_queries.append(query)
+ return True
+
+
+def get_schema_info(cursor, schema):
+ query = ("SELECT schema_owner AS owner "
+ "FROM information_schema.schemata "
+ "WHERE schema_name = %(schema)s")
+ cursor.execute(query, {'schema': schema})
+ return cursor.fetchone()
+
+
+def schema_exists(cursor, schema):
+ query = ("SELECT schema_name FROM information_schema.schemata "
+ "WHERE schema_name = %(schema)s")
+ cursor.execute(query, {'schema': schema})
+ return cursor.rowcount == 1
+
+
+def schema_delete(cursor, schema, cascade):
+ if schema_exists(cursor, schema):
+ query = "DROP SCHEMA %s" % pg_quote_identifier(schema, 'schema')
+ if cascade:
+ query += " CASCADE"
+ cursor.execute(query)
+ executed_queries.append(query)
+ return True
+ else:
+ return False
+
+
+def schema_create(cursor, schema, owner):
+ if not schema_exists(cursor, schema):
+ query_fragments = ['CREATE SCHEMA %s' % pg_quote_identifier(schema, 'schema')]
+ if owner:
+ query_fragments.append('AUTHORIZATION "%s"' % owner)
+ query = ' '.join(query_fragments)
+ cursor.execute(query)
+ executed_queries.append(query)
+ return True
+ else:
+ schema_info = get_schema_info(cursor, schema)
+ if owner and owner != schema_info['owner']:
+ return set_owner(cursor, schema, owner)
+ else:
+ return False
+
+
+def schema_matches(cursor, schema, owner):
+ if not schema_exists(cursor, schema):
+ return False
+ else:
+ schema_info = get_schema_info(cursor, schema)
+ if owner and owner != schema_info['owner']:
+ return False
+ else:
+ return True
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ schema=dict(type="str", required=True, aliases=['name']),
+ owner=dict(type="str", default=""),
+ database=dict(type="str", default="postgres", aliases=["db", "login_db"]),
+ cascade_drop=dict(type="bool", default=False),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ session_role=dict(type="str"),
+ trust_input=dict(type="bool", default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ schema = module.params["schema"]
+ owner = module.params["owner"]
+ state = module.params["state"]
+ cascade_drop = module.params["cascade_drop"]
+ session_role = module.params["session_role"]
+ trust_input = module.params["trust_input"]
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, schema, owner, session_role)
+
+ changed = False
+
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ try:
+ if module.check_mode:
+ if state == "absent":
+ changed = not schema_exists(cursor, schema)
+ elif state == "present":
+ changed = not schema_matches(cursor, schema, owner)
+ module.exit_json(changed=changed, schema=schema)
+
+ if state == "absent":
+ try:
+ changed = schema_delete(cursor, schema, cascade_drop)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ elif state == "present":
+ try:
+ changed = schema_create(cursor, schema, owner)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except NotSupportedError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except SystemExit:
+ # Avoid catching this on Python 2.4
+ raise
+ except Exception as e:
+ module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc())
+
+ db_connection.close()
+ module.exit_json(changed=changed, schema=schema, queries=executed_queries)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_sequence.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_sequence.py
new file mode 100644
index 00000000..50cd628a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_sequence.py
@@ -0,0 +1,627 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Tobias Birkefeld (@tcraxs) <t@craxs.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_sequence
+short_description: Create, drop, or alter a PostgreSQL sequence
+description:
+- Allows to create, drop or change the definition of a sequence generator.
+options:
+ sequence:
+ description:
+ - The name of the sequence.
+ required: true
+ type: str
+ aliases:
+ - name
+ state:
+ description:
+ - The sequence state.
+ - If I(state=absent) other options will be ignored except of I(name) and
+ I(schema).
+ default: present
+ choices: [ absent, present ]
+ type: str
+ data_type:
+ description:
+ - Specifies the data type of the sequence. Valid types are bigint, integer,
+ and smallint. bigint is the default. The data type determines the default
+ minimum and maximum values of the sequence. For more info see the
+ documentation
+ U(https://www.postgresql.org/docs/current/sql-createsequence.html).
+ - Supported from PostgreSQL 10.
+ choices: [ bigint, integer, smallint ]
+ type: str
+ increment:
+ description:
+ - Increment specifies which value is added to the current sequence value
+ to create a new value.
+ - A positive value will make an ascending sequence, a negative one a
+ descending sequence. The default value is 1.
+ type: int
+ minvalue:
+ description:
+ - Minvalue determines the minimum value a sequence can generate. The
+ default for an ascending sequence is 1. The default for a descending
+ sequence is the minimum value of the data type.
+ type: int
+ aliases:
+ - min
+ maxvalue:
+ description:
+ - Maxvalue determines the maximum value for the sequence. The default for
+ an ascending sequence is the maximum
+ value of the data type. The default for a descending sequence is -1.
+ type: int
+ aliases:
+ - max
+ start:
+ description:
+ - Start allows the sequence to begin anywhere. The default starting value
+ is I(minvalue) for ascending sequences and I(maxvalue) for descending
+ ones.
+ type: int
+ cache:
+ description:
+ - Cache specifies how many sequence numbers are to be preallocated and
+ stored in memory for faster access. The minimum value is 1 (only one
+ value can be generated at a time, i.e., no cache), and this is also
+ the default.
+ type: int
+ cycle:
+ description:
+ - The cycle option allows the sequence to wrap around when the I(maxvalue)
+ or I(minvalue) has been reached by an ascending or descending sequence
+ respectively. If the limit is reached, the next number generated will be
+ the minvalue or maxvalue, respectively.
+ - If C(false) (NO CYCLE) is specified, any calls to nextval after the sequence
+ has reached its maximum value will return an error. False (NO CYCLE) is
+ the default.
+ type: bool
+ default: no
+ cascade:
+ description:
+ - Automatically drop objects that depend on the sequence, and in turn all
+ objects that depend on those objects.
+ - Ignored if I(state=present).
+ - Only used with I(state=absent).
+ type: bool
+ default: no
+ rename_to:
+ description:
+ - The new name for the I(sequence).
+ - Works only for existing sequences.
+ type: str
+ owner:
+ description:
+ - Set the owner for the I(sequence).
+ type: str
+ schema:
+ description:
+ - The schema of the I(sequence). This is be used to create and relocate
+ a I(sequence) in the given schema.
+ default: public
+ type: str
+ newschema:
+ description:
+ - The new schema for the I(sequence). Will be used for moving a
+ I(sequence) to another I(schema).
+ - Works only for existing sequences.
+ type: str
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified I(session_role)
+ must be a role that the current I(login_user) is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the I(session_role) were the one that had logged in originally.
+ type: str
+ db:
+ description:
+ - Name of database to connect to and run queries against.
+ type: str
+ aliases:
+ - database
+ - login_db
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(sequence), I(schema), I(rename_to),
+ I(owner), I(newschema), I(session_role) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+notes:
+- If you do not pass db parameter, sequence will be created in the database
+ named postgres.
+seealso:
+- module: community.general.postgresql_table
+- module: community.general.postgresql_owner
+- module: community.general.postgresql_privs
+- module: community.general.postgresql_tablespace
+- name: CREATE SEQUENCE reference
+ description: Complete reference of the CREATE SEQUENCE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createsequence.html
+- name: ALTER SEQUENCE reference
+ description: Complete reference of the ALTER SEQUENCE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-altersequence.html
+- name: DROP SEQUENCE reference
+ description: Complete reference of the DROP SEQUENCE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-dropsequence.html
+author:
+- Tobias Birkefeld (@tcraxs)
+- Thomas O'Donnell (@andytom)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Create an ascending bigint sequence called foobar in the default
+ database
+ community.general.postgresql_sequence:
+ name: foobar
+
+- name: Create an ascending integer sequence called foobar, starting at 101
+ community.general.postgresql_sequence:
+ name: foobar
+ data_type: integer
+ start: 101
+
+- name: Create an descending sequence called foobar, starting at 101 and
+ preallocated 10 sequence numbers in cache
+ community.general.postgresql_sequence:
+ name: foobar
+ increment: -1
+ cache: 10
+ start: 101
+
+- name: Create an ascending sequence called foobar, which cycle between 1 to 10
+ community.general.postgresql_sequence:
+ name: foobar
+ cycle: yes
+ min: 1
+ max: 10
+
+- name: Create an ascending bigint sequence called foobar in the default
+ database with owner foobar
+ community.general.postgresql_sequence:
+ name: foobar
+ owner: foobar
+
+- name: Rename an existing sequence named foo to bar
+ community.general.postgresql_sequence:
+ name: foo
+ rename_to: bar
+
+- name: Change the schema of an existing sequence to foobar
+ community.general.postgresql_sequence:
+ name: foobar
+ newschema: foobar
+
+- name: Change the owner of an existing sequence to foobar
+ community.general.postgresql_sequence:
+ name: foobar
+ owner: foobar
+
+- name: Drop a sequence called foobar
+ community.general.postgresql_sequence:
+ name: foobar
+ state: absent
+
+- name: Drop a sequence called foobar with cascade
+ community.general.postgresql_sequence:
+ name: foobar
+ cascade: yes
+ state: absent
+'''
+
+RETURN = r'''
+state:
+ description: Sequence state at the end of execution.
+ returned: always
+ type: str
+ sample: 'present'
+sequence:
+ description: Sequence name.
+ returned: always
+ type: str
+ sample: 'foobar'
+queries:
+ description: List of queries that was tried to be executed.
+ returned: always
+ type: str
+ sample: [ "CREATE SEQUENCE \"foo\"" ]
+schema:
+ description: Name of the schema of the sequence
+ returned: always
+ type: str
+ sample: 'foo'
+data_type:
+ description: Shows the current data type of the sequence.
+ returned: always
+ type: str
+ sample: 'bigint'
+increment:
+ description: The value of increment of the sequence. A positive value will
+ make an ascending sequence, a negative one a descending
+ sequence.
+ returned: always
+ type: int
+ sample: '-1'
+minvalue:
+ description: The value of minvalue of the sequence.
+ returned: always
+ type: int
+ sample: '1'
+maxvalue:
+ description: The value of maxvalue of the sequence.
+ returned: always
+ type: int
+ sample: '9223372036854775807'
+start:
+ description: The value of start of the sequence.
+ returned: always
+ type: int
+ sample: '12'
+cycle:
+ description: Shows if the sequence cycle or not.
+ returned: always
+ type: str
+ sample: 'NO'
+owner:
+ description: Shows the current owner of the sequence
+ after the successful run of the task.
+ returned: always
+ type: str
+ sample: 'postgres'
+newname:
+ description: Shows the new sequence name after rename.
+ returned: on success
+ type: str
+ sample: 'barfoo'
+newschema:
+ description: Shows the new schema of the sequence after schema change.
+ returned: on success
+ type: str
+ sample: 'foobar'
+'''
+
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+
+class Sequence(object):
+ """Implements behavior of CREATE, ALTER or DROP SEQUENCE PostgreSQL command.
+
+ Arguments:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+
+ Attributes:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+ changed (bool) -- something was changed after execution or not
+ executed_queries (list) -- executed queries
+ name (str) -- name of the sequence
+ owner (str) -- name of the owner of the sequence
+ schema (str) -- name of the schema (default: public)
+ data_type (str) -- data type of the sequence
+ start_value (int) -- value of the sequence start
+ minvalue (int) -- minimum value of the sequence
+ maxvalue (int) -- maximum value of the sequence
+ increment (int) -- increment value of the sequence
+ cycle (bool) -- sequence can cycle or not
+ new_name (str) -- name of the renamed sequence
+ new_schema (str) -- name of the new schema
+ exists (bool) -- sequence exists or not
+ """
+
+ def __init__(self, module, cursor):
+ self.module = module
+ self.cursor = cursor
+ self.executed_queries = []
+ self.name = self.module.params['sequence']
+ self.owner = ''
+ self.schema = self.module.params['schema']
+ self.data_type = ''
+ self.start_value = ''
+ self.minvalue = ''
+ self.maxvalue = ''
+ self.increment = ''
+ self.cycle = ''
+ self.new_name = ''
+ self.new_schema = ''
+ self.exists = False
+ # Collect info
+ self.get_info()
+
+ def get_info(self):
+ """Getter to refresh and get sequence info"""
+ query = ("SELECT "
+ "s.sequence_schema AS schemaname, "
+ "s.sequence_name AS sequencename, "
+ "pg_get_userbyid(c.relowner) AS sequenceowner, "
+ "s.data_type::regtype AS data_type, "
+ "s.start_value AS start_value, "
+ "s.minimum_value AS min_value, "
+ "s.maximum_value AS max_value, "
+ "s.increment AS increment_by, "
+ "s.cycle_option AS cycle "
+ "FROM information_schema.sequences s "
+ "JOIN pg_class c ON c.relname = s.sequence_name "
+ "LEFT JOIN pg_namespace n ON n.oid = c.relnamespace "
+ "WHERE NOT pg_is_other_temp_schema(n.oid) "
+ "AND c.relkind = 'S'::\"char\" "
+ "AND sequence_name = %(name)s "
+ "AND sequence_schema = %(schema)s")
+
+ res = exec_sql(self, query,
+ query_params={'name': self.name, 'schema': self.schema},
+ add_to_executed=False)
+
+ if not res:
+ self.exists = False
+ return False
+
+ if res:
+ self.exists = True
+ self.schema = res[0]['schemaname']
+ self.name = res[0]['sequencename']
+ self.owner = res[0]['sequenceowner']
+ self.data_type = res[0]['data_type']
+ self.start_value = res[0]['start_value']
+ self.minvalue = res[0]['min_value']
+ self.maxvalue = res[0]['max_value']
+ self.increment = res[0]['increment_by']
+ self.cycle = res[0]['cycle']
+
+ def create(self):
+ """Implements CREATE SEQUENCE command behavior."""
+ query = ['CREATE SEQUENCE']
+ query.append(self.__add_schema())
+
+ if self.module.params.get('data_type'):
+ query.append('AS %s' % self.module.params['data_type'])
+
+ if self.module.params.get('increment'):
+ query.append('INCREMENT BY %s' % self.module.params['increment'])
+
+ if self.module.params.get('minvalue'):
+ query.append('MINVALUE %s' % self.module.params['minvalue'])
+
+ if self.module.params.get('maxvalue'):
+ query.append('MAXVALUE %s' % self.module.params['maxvalue'])
+
+ if self.module.params.get('start'):
+ query.append('START WITH %s' % self.module.params['start'])
+
+ if self.module.params.get('cache'):
+ query.append('CACHE %s' % self.module.params['cache'])
+
+ if self.module.params.get('cycle'):
+ query.append('CYCLE')
+
+ return exec_sql(self, ' '.join(query), return_bool=True)
+
+ def drop(self):
+ """Implements DROP SEQUENCE command behavior."""
+ query = ['DROP SEQUENCE']
+ query.append(self.__add_schema())
+
+ if self.module.params.get('cascade'):
+ query.append('CASCADE')
+
+ return exec_sql(self, ' '.join(query), return_bool=True)
+
+ def rename(self):
+ """Implements ALTER SEQUENCE RENAME TO command behavior."""
+ query = ['ALTER SEQUENCE']
+ query.append(self.__add_schema())
+ query.append('RENAME TO "%s"' % self.module.params['rename_to'])
+
+ return exec_sql(self, ' '.join(query), return_bool=True)
+
+ def set_owner(self):
+ """Implements ALTER SEQUENCE OWNER TO command behavior."""
+ query = ['ALTER SEQUENCE']
+ query.append(self.__add_schema())
+ query.append('OWNER TO "%s"' % self.module.params['owner'])
+
+ return exec_sql(self, ' '.join(query), return_bool=True)
+
+ def set_schema(self):
+ """Implements ALTER SEQUENCE SET SCHEMA command behavior."""
+ query = ['ALTER SEQUENCE']
+ query.append(self.__add_schema())
+ query.append('SET SCHEMA "%s"' % self.module.params['newschema'])
+
+ return exec_sql(self, ' '.join(query), return_bool=True)
+
+ def __add_schema(self):
+ return '"%s"."%s"' % (self.schema, self.name)
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ sequence=dict(type='str', required=True, aliases=['name']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ data_type=dict(type='str', choices=['bigint', 'integer', 'smallint']),
+ increment=dict(type='int'),
+ minvalue=dict(type='int', aliases=['min']),
+ maxvalue=dict(type='int', aliases=['max']),
+ start=dict(type='int'),
+ cache=dict(type='int'),
+ cycle=dict(type='bool', default=False),
+ schema=dict(type='str', default='public'),
+ cascade=dict(type='bool', default=False),
+ rename_to=dict(type='str'),
+ owner=dict(type='str'),
+ newschema=dict(type='str'),
+ db=dict(type='str', default='', aliases=['login_db', 'database']),
+ session_role=dict(type='str'),
+ trust_input=dict(type="bool", default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['rename_to', 'data_type'],
+ ['rename_to', 'increment'],
+ ['rename_to', 'minvalue'],
+ ['rename_to', 'maxvalue'],
+ ['rename_to', 'start'],
+ ['rename_to', 'cache'],
+ ['rename_to', 'cycle'],
+ ['rename_to', 'cascade'],
+ ['rename_to', 'owner'],
+ ['rename_to', 'newschema'],
+ ['cascade', 'data_type'],
+ ['cascade', 'increment'],
+ ['cascade', 'minvalue'],
+ ['cascade', 'maxvalue'],
+ ['cascade', 'start'],
+ ['cascade', 'cache'],
+ ['cascade', 'cycle'],
+ ['cascade', 'owner'],
+ ['cascade', 'newschema'],
+ ]
+ )
+
+ if not module.params["trust_input"]:
+ check_input(
+ module,
+ module.params['sequence'],
+ module.params['schema'],
+ module.params['rename_to'],
+ module.params['owner'],
+ module.params['newschema'],
+ module.params['session_role'],
+ )
+
+ # Note: we don't need to check mutually exclusive params here, because they are
+ # checked automatically by AnsibleModule (mutually_exclusive=[] list above).
+
+ # Change autocommit to False if check_mode:
+ autocommit = not module.check_mode
+ # Connect to DB and make cursor object:
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=autocommit)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ ##############
+ # Create the object and do main job:
+ data = Sequence(module, cursor)
+
+ # Set defaults:
+ changed = False
+
+ # Create new sequence
+ if not data.exists and module.params['state'] == 'present':
+ if module.params.get('rename_to'):
+ module.fail_json(msg="Sequence '%s' does not exist, nothing to rename" % module.params['sequence'])
+ if module.params.get('newschema'):
+ module.fail_json(msg="Sequence '%s' does not exist, change of schema not possible" % module.params['sequence'])
+
+ changed = data.create()
+
+ # Drop non-existing sequence
+ elif not data.exists and module.params['state'] == 'absent':
+ # Nothing to do
+ changed = False
+
+ # Drop existing sequence
+ elif data.exists and module.params['state'] == 'absent':
+ changed = data.drop()
+
+ # Rename sequence
+ if data.exists and module.params.get('rename_to'):
+ if data.name != module.params['rename_to']:
+ changed = data.rename()
+ if changed:
+ data.new_name = module.params['rename_to']
+
+ # Refresh information
+ if module.params['state'] == 'present':
+ data.get_info()
+
+ # Change owner, schema and settings
+ if module.params['state'] == 'present' and data.exists:
+ # change owner
+ if module.params.get('owner'):
+ if data.owner != module.params['owner']:
+ changed = data.set_owner()
+
+ # Set schema
+ if module.params.get('newschema'):
+ if data.schema != module.params['newschema']:
+ changed = data.set_schema()
+ if changed:
+ data.new_schema = module.params['newschema']
+
+ # Rollback if it's possible and check_mode:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ cursor.close()
+ db_connection.close()
+
+ # Make return values:
+ kw = dict(
+ changed=changed,
+ state='present',
+ sequence=data.name,
+ queries=data.executed_queries,
+ schema=data.schema,
+ data_type=data.data_type,
+ increment=data.increment,
+ minvalue=data.minvalue,
+ maxvalue=data.maxvalue,
+ start=data.start_value,
+ cycle=data.cycle,
+ owner=data.owner,
+ )
+
+ if module.params['state'] == 'present':
+ if data.new_name:
+ kw['newname'] = data.new_name
+ if data.new_schema:
+ kw['newschema'] = data.new_schema
+
+ elif module.params['state'] == 'absent':
+ kw['state'] = 'absent'
+
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_set.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_set.py
new file mode 100644
index 00000000..737bded5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_set.py
@@ -0,0 +1,447 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_set
+short_description: Change a PostgreSQL server configuration parameter
+description:
+ - Allows to change a PostgreSQL server configuration parameter.
+ - The module uses ALTER SYSTEM command and applies changes by reload server configuration.
+ - ALTER SYSTEM is used for changing server configuration parameters across the entire database cluster.
+ - It can be more convenient and safe than the traditional method of manually editing the postgresql.conf file.
+ - ALTER SYSTEM writes the given parameter setting to the $PGDATA/postgresql.auto.conf file,
+ which is read in addition to postgresql.conf.
+ - The module allows to reset parameter to boot_val (cluster initial value) by I(reset=yes) or remove parameter
+ string from postgresql.auto.conf and reload I(value=default) (for settings with postmaster context restart is required).
+ - After change you can see in the ansible output the previous and
+ the new parameter value and other information using returned values and M(ansible.builtin.debug) module.
+options:
+ name:
+ description:
+ - Name of PostgreSQL server parameter.
+ type: str
+ required: true
+ value:
+ description:
+ - Parameter value to set.
+ - To remove parameter string from postgresql.auto.conf and
+ reload the server configuration you must pass I(value=default).
+ With I(value=default) the playbook always returns changed is true.
+ type: str
+ reset:
+ description:
+ - Restore parameter to initial state (boot_val). Mutually exclusive with I(value).
+ type: bool
+ default: false
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ db:
+ description:
+ - Name of database to connect.
+ type: str
+ aliases:
+ - login_db
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+notes:
+- Supported version of PostgreSQL is 9.4 and later.
+- Pay attention, change setting with 'postmaster' context can return changed is true
+ when actually nothing changes because the same value may be presented in
+ several different form, for example, 1024MB, 1GB, etc. However in pg_settings
+ system view it can be defined like 131072 number of 8kB pages.
+ The final check of the parameter value cannot compare it because the server was
+ not restarted and the value in pg_settings is not updated yet.
+- For some parameters restart of PostgreSQL server is required.
+ See official documentation U(https://www.postgresql.org/docs/current/view-pg-settings.html).
+seealso:
+- module: community.general.postgresql_info
+- name: PostgreSQL server configuration
+ description: General information about PostgreSQL server configuration.
+ link: https://www.postgresql.org/docs/current/runtime-config.html
+- name: PostgreSQL view pg_settings reference
+ description: Complete reference of the pg_settings view documentation.
+ link: https://www.postgresql.org/docs/current/view-pg-settings.html
+- name: PostgreSQL ALTER SYSTEM command reference
+ description: Complete reference of the ALTER SYSTEM command documentation.
+ link: https://www.postgresql.org/docs/current/sql-altersystem.html
+author:
+- Andrew Klychkov (@Andersson007)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Restore wal_keep_segments parameter to initial state
+ community.general.postgresql_set:
+ name: wal_keep_segments
+ reset: yes
+
+# Set work_mem parameter to 32MB and show what's been changed and restart is required or not
+# (output example: "msg": "work_mem 4MB >> 64MB restart_req: False")
+- name: Set work mem parameter
+ community.general.postgresql_set:
+ name: work_mem
+ value: 32mb
+ register: set
+
+- ansible.builtin.debug:
+ msg: "{{ set.name }} {{ set.prev_val_pretty }} >> {{ set.value_pretty }} restart_req: {{ set.restart_required }}"
+ when: set.changed
+# Ensure that the restart of PostgreSQL server must be required for some parameters.
+# In this situation you see the same parameter in prev_val_pretty and value_pretty, but 'changed=True'
+# (If you passed the value that was different from the current server setting).
+
+- name: Set log_min_duration_statement parameter to 1 second
+ community.general.postgresql_set:
+ name: log_min_duration_statement
+ value: 1s
+
+- name: Set wal_log_hints parameter to default value (remove parameter from postgresql.auto.conf)
+ community.general.postgresql_set:
+ name: wal_log_hints
+ value: default
+'''
+
+RETURN = r'''
+name:
+ description: Name of PostgreSQL server parameter.
+ returned: always
+ type: str
+ sample: 'shared_buffers'
+restart_required:
+ description: Information about parameter current state.
+ returned: always
+ type: bool
+ sample: true
+prev_val_pretty:
+ description: Information about previous state of the parameter.
+ returned: always
+ type: str
+ sample: '4MB'
+value_pretty:
+ description: Information about current state of the parameter.
+ returned: always
+ type: str
+ sample: '64MB'
+value:
+ description:
+ - Dictionary that contains the current parameter value (at the time of playbook finish).
+ - Pay attention that for real change some parameters restart of PostgreSQL server is required.
+ - Returns the current value in the check mode.
+ returned: always
+ type: dict
+ sample: { "value": 67108864, "unit": "b" }
+context:
+ description:
+ - PostgreSQL setting context.
+ returned: always
+ type: str
+ sample: user
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except Exception:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from copy import deepcopy
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils._text import to_native
+
+PG_REQ_VER = 90400
+
+# To allow to set value like 1mb instead of 1MB, etc:
+POSSIBLE_SIZE_UNITS = ("mb", "gb", "tb")
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+
+def param_get(cursor, module, name):
+ query = ("SELECT name, setting, unit, context, boot_val "
+ "FROM pg_settings WHERE name = %(name)s")
+ try:
+ cursor.execute(query, {'name': name})
+ info = cursor.fetchall()
+ cursor.execute("SHOW %s" % name)
+ val = cursor.fetchone()
+
+ except Exception as e:
+ module.fail_json(msg="Unable to get %s value due to : %s" % (name, to_native(e)))
+
+ raw_val = info[0][1]
+ unit = info[0][2]
+ context = info[0][3]
+ boot_val = info[0][4]
+
+ if val[0] == 'True':
+ val[0] = 'on'
+ elif val[0] == 'False':
+ val[0] = 'off'
+
+ if unit == 'kB':
+ if int(raw_val) > 0:
+ raw_val = int(raw_val) * 1024
+ if int(boot_val) > 0:
+ boot_val = int(boot_val) * 1024
+
+ unit = 'b'
+
+ elif unit == 'MB':
+ if int(raw_val) > 0:
+ raw_val = int(raw_val) * 1024 * 1024
+ if int(boot_val) > 0:
+ boot_val = int(boot_val) * 1024 * 1024
+
+ unit = 'b'
+
+ return (val[0], raw_val, unit, boot_val, context)
+
+
+def pretty_to_bytes(pretty_val):
+ # The function returns a value in bytes
+ # if the value contains 'B', 'kB', 'MB', 'GB', 'TB'.
+ # Otherwise it returns the passed argument.
+
+ val_in_bytes = None
+
+ if 'kB' in pretty_val:
+ num_part = int(''.join(d for d in pretty_val if d.isdigit()))
+ val_in_bytes = num_part * 1024
+
+ elif 'MB' in pretty_val.upper():
+ num_part = int(''.join(d for d in pretty_val if d.isdigit()))
+ val_in_bytes = num_part * 1024 * 1024
+
+ elif 'GB' in pretty_val.upper():
+ num_part = int(''.join(d for d in pretty_val if d.isdigit()))
+ val_in_bytes = num_part * 1024 * 1024 * 1024
+
+ elif 'TB' in pretty_val.upper():
+ num_part = int(''.join(d for d in pretty_val if d.isdigit()))
+ val_in_bytes = num_part * 1024 * 1024 * 1024 * 1024
+
+ elif 'B' in pretty_val.upper():
+ num_part = int(''.join(d for d in pretty_val if d.isdigit()))
+ val_in_bytes = num_part
+
+ else:
+ return pretty_val
+
+ return val_in_bytes
+
+
+def param_set(cursor, module, name, value, context):
+ try:
+ if str(value).lower() == 'default':
+ query = "ALTER SYSTEM SET %s = DEFAULT" % name
+ else:
+ query = "ALTER SYSTEM SET %s = '%s'" % (name, value)
+ cursor.execute(query)
+
+ if context != 'postmaster':
+ cursor.execute("SELECT pg_reload_conf()")
+
+ except Exception as e:
+ module.fail_json(msg="Unable to get %s value due to : %s" % (name, to_native(e)))
+
+ return True
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ name=dict(type='str', required=True),
+ db=dict(type='str', aliases=['login_db']),
+ value=dict(type='str'),
+ reset=dict(type='bool', default=False),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ name = module.params['name']
+ value = module.params['value']
+ reset = module.params['reset']
+ session_role = module.params['session_role']
+ trust_input = module.params['trust_input']
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, name, value, session_role)
+
+ # Allow to pass values like 1mb instead of 1MB, etc:
+ if value:
+ for unit in POSSIBLE_SIZE_UNITS:
+ if value[:-2].isdigit() and unit in value[-2:]:
+ value = value.upper()
+
+ if value is not None and reset:
+ module.fail_json(msg="%s: value and reset params are mutually exclusive" % name)
+
+ if value is None and not reset:
+ module.fail_json(msg="%s: at least one of value or reset param must be specified" % name)
+
+ conn_params = get_conn_params(module, module.params, warn_db_default=False)
+ db_connection = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ kw = {}
+ # Check server version (needs 9.4 or later):
+ ver = db_connection.server_version
+ if ver < PG_REQ_VER:
+ module.warn("PostgreSQL is %s version but %s or later is required" % (ver, PG_REQ_VER))
+ kw = dict(
+ changed=False,
+ restart_required=False,
+ value_pretty="",
+ prev_val_pretty="",
+ value={"value": "", "unit": ""},
+ )
+ kw['name'] = name
+ db_connection.close()
+ module.exit_json(**kw)
+
+ # Set default returned values:
+ restart_required = False
+ changed = False
+ kw['name'] = name
+ kw['restart_required'] = False
+
+ # Get info about param state:
+ res = param_get(cursor, module, name)
+ current_value = res[0]
+ raw_val = res[1]
+ unit = res[2]
+ boot_val = res[3]
+ context = res[4]
+
+ if value == 'True':
+ value = 'on'
+ elif value == 'False':
+ value = 'off'
+
+ kw['prev_val_pretty'] = current_value
+ kw['value_pretty'] = deepcopy(kw['prev_val_pretty'])
+ kw['context'] = context
+
+ # Do job
+ if context == "internal":
+ module.fail_json(msg="%s: cannot be changed (internal context). See "
+ "https://www.postgresql.org/docs/current/runtime-config-preset.html" % name)
+
+ if context == "postmaster":
+ restart_required = True
+
+ # If check_mode, just compare and exit:
+ if module.check_mode:
+ if pretty_to_bytes(value) == pretty_to_bytes(current_value):
+ kw['changed'] = False
+
+ else:
+ kw['value_pretty'] = value
+ kw['changed'] = True
+
+ # Anyway returns current raw value in the check_mode:
+ kw['value'] = dict(
+ value=raw_val,
+ unit=unit,
+ )
+ kw['restart_required'] = restart_required
+ module.exit_json(**kw)
+
+ # Set param (value can be an empty string):
+ if value is not None and value != current_value:
+ changed = param_set(cursor, module, name, value, context)
+
+ kw['value_pretty'] = value
+
+ # Reset param:
+ elif reset:
+ if raw_val == boot_val:
+ # nothing to change, exit:
+ kw['value'] = dict(
+ value=raw_val,
+ unit=unit,
+ )
+ module.exit_json(**kw)
+
+ changed = param_set(cursor, module, name, boot_val, context)
+
+ cursor.close()
+ db_connection.close()
+
+ # Reconnect and recheck current value:
+ if context in ('sighup', 'superuser-backend', 'backend', 'superuser', 'user'):
+ db_connection = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ res = param_get(cursor, module, name)
+ # f_ means 'final'
+ f_value = res[0]
+ f_raw_val = res[1]
+
+ if raw_val == f_raw_val:
+ changed = False
+
+ else:
+ changed = True
+
+ kw['value_pretty'] = f_value
+ kw['value'] = dict(
+ value=f_raw_val,
+ unit=unit,
+ )
+
+ cursor.close()
+ db_connection.close()
+
+ kw['changed'] = changed
+ kw['restart_required'] = restart_required
+
+ if restart_required and changed:
+ module.warn("Restart of PostgreSQL is required for setting %s" % name)
+
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_slot.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_slot.py
new file mode 100644
index 00000000..435a6c59
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_slot.py
@@ -0,0 +1,304 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, John Scalia (@jscalia), Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: postgresql_slot
+short_description: Add or remove replication slots from a PostgreSQL database
+description:
+- Add or remove physical or logical replication slots from a PostgreSQL database.
+
+options:
+ name:
+ description:
+ - Name of the replication slot to add or remove.
+ type: str
+ required: yes
+ aliases:
+ - slot_name
+ slot_type:
+ description:
+ - Slot type.
+ type: str
+ default: physical
+ choices: [ logical, physical ]
+ state:
+ description:
+ - The slot state.
+ - I(state=present) implies the slot must be present in the system.
+ - I(state=absent) implies the I(groups) must be revoked from I(target_roles).
+ type: str
+ default: present
+ choices: [ absent, present ]
+ immediately_reserve:
+ description:
+ - Optional parameter that when C(yes) specifies that the LSN for this replication slot be reserved
+ immediately, otherwise the default, C(no), specifies that the LSN is reserved on the first connection
+ from a streaming replication client.
+ - Is available from PostgreSQL version 9.6.
+ - Uses only with I(slot_type=physical).
+ - Mutually exclusive with I(slot_type=logical).
+ type: bool
+ default: no
+ output_plugin:
+ description:
+ - All logical slots must indicate which output plugin decoder they're using.
+ - This parameter does not apply to physical slots.
+ - It will be ignored with I(slot_type=physical).
+ type: str
+ default: "test_decoding"
+ db:
+ description:
+ - Name of database to connect to.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ trust_input:
+ description:
+ - If C(no), check the value of I(session_role) is potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via I(session_role) are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+
+notes:
+- Physical replication slots were introduced to PostgreSQL with version 9.4,
+ while logical replication slots were added beginning with version 10.0.
+
+seealso:
+- name: PostgreSQL pg_replication_slots view reference
+ description: Complete reference of the PostgreSQL pg_replication_slots view.
+ link: https://www.postgresql.org/docs/current/view-pg-replication-slots.html
+- name: PostgreSQL streaming replication protocol reference
+ description: Complete reference of the PostgreSQL streaming replication protocol documentation.
+ link: https://www.postgresql.org/docs/current/protocol-replication.html
+- name: PostgreSQL logical replication protocol reference
+ description: Complete reference of the PostgreSQL logical replication protocol documentation.
+ link: https://www.postgresql.org/docs/current/protocol-logical-replication.html
+
+author:
+- John Scalia (@jscalia)
+- Andrew Klychkov (@Andersson007)
+- Thomas O'Donnell (@andytom)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Create physical_one physical slot if doesn't exist
+ become_user: postgres
+ community.general.postgresql_slot:
+ slot_name: physical_one
+ db: ansible
+
+- name: Remove physical_one slot if exists
+ become_user: postgres
+ community.general.postgresql_slot:
+ slot_name: physical_one
+ db: ansible
+ state: absent
+
+- name: Create logical_one logical slot to the database acme if doesn't exist
+ community.general.postgresql_slot:
+ name: logical_slot_one
+ slot_type: logical
+ state: present
+ output_plugin: custom_decoder_one
+ db: "acme"
+
+- name: Remove logical_one slot if exists from the cluster running on another host and non-standard port
+ community.general.postgresql_slot:
+ name: logical_one
+ login_host: mydatabase.example.org
+ port: 5433
+ login_user: ourSuperuser
+ login_password: thePassword
+ state: absent
+'''
+
+RETURN = r'''
+name:
+ description: Name of the slot
+ returned: always
+ type: str
+ sample: "physical_one"
+queries:
+ description: List of executed queries.
+ returned: always
+ type: str
+ sample: [ "SELECT pg_create_physical_replication_slot('physical_one', False, False)" ]
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+class PgSlot(object):
+ def __init__(self, module, cursor, name):
+ self.module = module
+ self.cursor = cursor
+ self.name = name
+ self.exists = False
+ self.kind = ''
+ self.__slot_exists()
+ self.changed = False
+ self.executed_queries = []
+
+ def create(self, kind='physical', immediately_reserve=False, output_plugin=False, just_check=False):
+ if self.exists:
+ if self.kind == kind:
+ return False
+ else:
+ self.module.warn("slot with name '%s' already exists "
+ "but has another type '%s'" % (self.name, self.kind))
+ return False
+
+ if just_check:
+ return None
+
+ if kind == 'physical':
+ # Check server version (needs for immedately_reserverd needs 9.6+):
+ if self.cursor.connection.server_version < 96000:
+ query = "SELECT pg_create_physical_replication_slot(%(name)s)"
+
+ else:
+ query = "SELECT pg_create_physical_replication_slot(%(name)s, %(i_reserve)s)"
+
+ self.changed = exec_sql(self, query,
+ query_params={'name': self.name, 'i_reserve': immediately_reserve},
+ return_bool=True)
+
+ elif kind == 'logical':
+ query = "SELECT pg_create_logical_replication_slot(%(name)s, %(o_plugin)s)"
+ self.changed = exec_sql(self, query,
+ query_params={'name': self.name, 'o_plugin': output_plugin}, return_bool=True)
+
+ def drop(self):
+ if not self.exists:
+ return False
+
+ query = "SELECT pg_drop_replication_slot(%(name)s)"
+ self.changed = exec_sql(self, query, query_params={'name': self.name}, return_bool=True)
+
+ def __slot_exists(self):
+ query = "SELECT slot_type FROM pg_replication_slots WHERE slot_name = %(name)s"
+ res = exec_sql(self, query, query_params={'name': self.name}, add_to_executed=False)
+ if res:
+ self.exists = True
+ self.kind = res[0][0]
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ db=dict(type="str", aliases=["login_db"]),
+ name=dict(type="str", required=True, aliases=["slot_name"]),
+ slot_type=dict(type="str", default="physical", choices=["logical", "physical"]),
+ immediately_reserve=dict(type="bool", default=False),
+ session_role=dict(type="str"),
+ output_plugin=dict(type="str", default="test_decoding"),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ trust_input=dict(type="bool", default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ name = module.params["name"]
+ slot_type = module.params["slot_type"]
+ immediately_reserve = module.params["immediately_reserve"]
+ state = module.params["state"]
+ output_plugin = module.params["output_plugin"]
+
+ if not module.params["trust_input"]:
+ check_input(module, module.params['session_role'])
+
+ if immediately_reserve and slot_type == 'logical':
+ module.fail_json(msg="Module parameters immediately_reserve and slot_type=logical are mutually exclusive")
+
+ # When slot_type is logical and parameter db is not passed,
+ # the default database will be used to create the slot and
+ # the user should know about this.
+ # When the slot type is physical,
+ # it doesn't matter which database will be used
+ # because physical slots are global objects.
+ if slot_type == 'logical':
+ warn_db_default = True
+ else:
+ warn_db_default = False
+
+ conn_params = get_conn_params(module, module.params, warn_db_default=warn_db_default)
+ db_connection = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ ##################################
+ # Create an object and do main job
+ pg_slot = PgSlot(module, cursor, name)
+
+ changed = False
+
+ if module.check_mode:
+ if state == "present":
+ if not pg_slot.exists:
+ changed = True
+
+ pg_slot.create(slot_type, immediately_reserve, output_plugin, just_check=True)
+
+ elif state == "absent":
+ if pg_slot.exists:
+ changed = True
+ else:
+ if state == "absent":
+ pg_slot.drop()
+
+ elif state == "present":
+ pg_slot.create(slot_type, immediately_reserve, output_plugin)
+
+ changed = pg_slot.changed
+
+ db_connection.close()
+ module.exit_json(changed=changed, name=name, queries=pg_slot.executed_queries)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_subscription.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_subscription.py
new file mode 100644
index 00000000..0e2b3612
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_subscription.py
@@ -0,0 +1,717 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: postgresql_subscription
+short_description: Add, update, or remove PostgreSQL subscription
+description:
+- Add, update, or remove PostgreSQL subscription.
+version_added: '0.2.0'
+
+options:
+ name:
+ description:
+ - Name of the subscription to add, update, or remove.
+ type: str
+ required: yes
+ db:
+ description:
+ - Name of the database to connect to and where
+ the subscription state will be changed.
+ aliases: [ login_db ]
+ type: str
+ required: yes
+ state:
+ description:
+ - The subscription state.
+ - C(present) implies that if I(name) subscription doesn't exist, it will be created.
+ - C(absent) implies that if I(name) subscription exists, it will be removed.
+ - C(refresh) implies that if I(name) subscription exists, it will be refreshed.
+ Fetch missing table information from publisher. Always returns ``changed`` is ``True``.
+ This will start replication of tables that were added to the subscribed-to publications
+ since the last invocation of REFRESH PUBLICATION or since CREATE SUBSCRIPTION.
+ The existing data in the publications that are being subscribed to
+ should be copied once the replication starts.
+ - For more information about C(refresh) see U(https://www.postgresql.org/docs/current/sql-altersubscription.html).
+ type: str
+ choices: [ absent, present, refresh ]
+ default: present
+ owner:
+ description:
+ - Subscription owner.
+ - If I(owner) is not defined, the owner will be set as I(login_user) or I(session_role).
+ - Ignored when I(state) is not C(present).
+ type: str
+ publications:
+ description:
+ - The publication names on the publisher to use for the subscription.
+ - Ignored when I(state) is not C(present).
+ type: list
+ elements: str
+ connparams:
+ description:
+ - The connection dict param-value to connect to the publisher.
+ - For more information see U(https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING).
+ - Ignored when I(state) is not C(present).
+ type: dict
+ cascade:
+ description:
+ - Drop subscription dependencies. Has effect with I(state=absent) only.
+ - Ignored when I(state) is not C(absent).
+ type: bool
+ default: false
+ subsparams:
+ description:
+ - Dictionary of optional parameters for a subscription, e.g. copy_data, enabled, create_slot, etc.
+ - For update the subscription allowed keys are C(enabled), C(slot_name), C(synchronous_commit), C(publication_name).
+ - See available parameters to create a new subscription
+ on U(https://www.postgresql.org/docs/current/sql-createsubscription.html).
+ - Ignored when I(state) is not C(present).
+ type: dict
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ version_added: '0.2.0'
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(name), I(publications), I(owner),
+ I(session_role), I(connparams), I(subsparams) are potentially dangerous.
+ - It makes sense to use C(yes) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+
+notes:
+- PostgreSQL version must be 10 or greater.
+
+seealso:
+- module: community.general.postgresql_publication
+- module: community.general.postgresql_info
+- name: CREATE SUBSCRIPTION reference
+ description: Complete reference of the CREATE SUBSCRIPTION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createsubscription.html
+- name: ALTER SUBSCRIPTION reference
+ description: Complete reference of the ALTER SUBSCRIPTION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-altersubscription.html
+- name: DROP SUBSCRIPTION reference
+ description: Complete reference of the DROP SUBSCRIPTION command documentation.
+ link: https://www.postgresql.org/docs/current/sql-dropsubscription.html
+
+author:
+- Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: >
+ Create acme subscription in mydb database using acme_publication and
+ the following connection parameters to connect to the publisher.
+ Set the subscription owner as alice.
+ community.general.postgresql_subscription:
+ db: mydb
+ name: acme
+ state: present
+ publications: acme_publication
+ owner: alice
+ connparams:
+ host: 127.0.0.1
+ port: 5432
+ user: repl
+ password: replpass
+ dbname: mydb
+
+- name: Assuming that acme subscription exists, try to change conn parameters
+ community.general.postgresql_subscription:
+ db: mydb
+ name: acme
+ connparams:
+ host: 127.0.0.1
+ port: 5432
+ user: repl
+ password: replpass
+ connect_timeout: 100
+
+- name: Refresh acme publication
+ community.general.postgresql_subscription:
+ db: mydb
+ name: acme
+ state: refresh
+
+- name: Drop acme subscription from mydb with dependencies (cascade=yes)
+ community.general.postgresql_subscription:
+ db: mydb
+ name: acme
+ state: absent
+ cascade: yes
+
+- name: Assuming that acme subscription exists and enabled, disable the subscription
+ community.general.postgresql_subscription:
+ db: mydb
+ name: acme
+ state: present
+ subsparams:
+ enabled: no
+'''
+
+RETURN = r'''
+name:
+ description:
+ - Name of the subscription.
+ returned: always
+ type: str
+ sample: acme
+exists:
+ description:
+ - Flag indicates the subscription exists or not at the end of runtime.
+ returned: always
+ type: bool
+ sample: true
+queries:
+ description: List of executed queries.
+ returned: always
+ type: str
+ sample: [ 'DROP SUBSCRIPTION "mysubscription"' ]
+initial_state:
+ description: Subscription configuration at the beginning of runtime.
+ returned: always
+ type: dict
+ sample: {"conninfo": {}, "enabled": true, "owner": "postgres", "slotname": "test", "synccommit": true}
+final_state:
+ description: Subscription configuration at the end of runtime.
+ returned: always
+ type: dict
+ sample: {"conninfo": {}, "enabled": true, "owner": "postgres", "slotname": "test", "synccommit": true}
+'''
+
+from copy import deepcopy
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import check_input
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils.six import iteritems
+
+SUPPORTED_PG_VERSION = 10000
+
+SUBSPARAMS_KEYS_FOR_UPDATE = ('enabled', 'synchronous_commit', 'slot_name')
+
+
+################################
+# Module functions and classes #
+################################
+
+def convert_conn_params(conn_dict):
+ """Converts the passed connection dictionary to string.
+
+ Args:
+ conn_dict (list): Dictionary which needs to be converted.
+
+ Returns:
+ Connection string.
+ """
+ conn_list = []
+ for (param, val) in iteritems(conn_dict):
+ conn_list.append('%s=%s' % (param, val))
+
+ return ' '.join(conn_list)
+
+
+def convert_subscr_params(params_dict):
+ """Converts the passed params dictionary to string.
+
+ Args:
+ params_dict (list): Dictionary which needs to be converted.
+
+ Returns:
+ Parameters string.
+ """
+ params_list = []
+ for (param, val) in iteritems(params_dict):
+ if val is False:
+ val = 'false'
+ elif val is True:
+ val = 'true'
+
+ params_list.append('%s = %s' % (param, val))
+
+ return ', '.join(params_list)
+
+
+class PgSubscription():
+ """Class to work with PostgreSQL subscription.
+
+ Args:
+ module (AnsibleModule): Object of AnsibleModule class.
+ cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
+ name (str): The name of the subscription.
+ db (str): The database name the subscription will be associated with.
+
+ Attributes:
+ module (AnsibleModule): Object of AnsibleModule class.
+ cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
+ name (str): Name of subscription.
+ executed_queries (list): List of executed queries.
+ attrs (dict): Dict with subscription attributes.
+ exists (bool): Flag indicates the subscription exists or not.
+ """
+
+ def __init__(self, module, cursor, name, db):
+ self.module = module
+ self.cursor = cursor
+ self.name = name
+ self.db = db
+ self.executed_queries = []
+ self.attrs = {
+ 'owner': None,
+ 'enabled': None,
+ 'synccommit': None,
+ 'conninfo': {},
+ 'slotname': None,
+ 'publications': [],
+ }
+ self.empty_attrs = deepcopy(self.attrs)
+ self.exists = self.check_subscr()
+
+ def get_info(self):
+ """Refresh the subscription information.
+
+ Returns:
+ ``self.attrs``.
+ """
+ self.exists = self.check_subscr()
+ return self.attrs
+
+ def check_subscr(self):
+ """Check the subscription and refresh ``self.attrs`` subscription attribute.
+
+ Returns:
+ True if the subscription with ``self.name`` exists, False otherwise.
+ """
+
+ subscr_info = self.__get_general_subscr_info()
+
+ if not subscr_info:
+ # The subscription does not exist:
+ self.attrs = deepcopy(self.empty_attrs)
+ return False
+
+ self.attrs['owner'] = subscr_info.get('rolname')
+ self.attrs['enabled'] = subscr_info.get('subenabled')
+ self.attrs['synccommit'] = subscr_info.get('subenabled')
+ self.attrs['slotname'] = subscr_info.get('subslotname')
+ self.attrs['publications'] = subscr_info.get('subpublications')
+ if subscr_info.get('subconninfo'):
+ for param in subscr_info['subconninfo'].split(' '):
+ tmp = param.split('=')
+ try:
+ self.attrs['conninfo'][tmp[0]] = int(tmp[1])
+ except ValueError:
+ self.attrs['conninfo'][tmp[0]] = tmp[1]
+
+ return True
+
+ def create(self, connparams, publications, subsparams, check_mode=True):
+ """Create the subscription.
+
+ Args:
+ connparams (str): Connection string in libpq style.
+ publications (list): Publications on the master to use.
+ subsparams (str): Parameters string in WITH () clause style.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ changed (bool): True if the subscription has been created, otherwise False.
+ """
+ query_fragments = []
+ query_fragments.append("CREATE SUBSCRIPTION %s CONNECTION '%s' "
+ "PUBLICATION %s" % (self.name, connparams, ', '.join(publications)))
+
+ if subsparams:
+ query_fragments.append("WITH (%s)" % subsparams)
+
+ changed = self.__exec_sql(' '.join(query_fragments), check_mode=check_mode)
+
+ return changed
+
+ def update(self, connparams, publications, subsparams, check_mode=True):
+ """Update the subscription.
+
+ Args:
+ connparams (str): Connection string in libpq style.
+ publications (list): Publications on the master to use.
+ subsparams (dict): Dictionary of optional parameters.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ changed (bool): True if subscription has been updated, otherwise False.
+ """
+ changed = False
+
+ if connparams:
+ if connparams != self.attrs['conninfo']:
+ changed = self.__set_conn_params(convert_conn_params(connparams),
+ check_mode=check_mode)
+
+ if publications:
+ if sorted(self.attrs['publications']) != sorted(publications):
+ changed = self.__set_publications(publications, check_mode=check_mode)
+
+ if subsparams:
+ params_to_update = []
+
+ for (param, value) in iteritems(subsparams):
+ if param == 'enabled':
+ if self.attrs['enabled'] and value is False:
+ changed = self.enable(enabled=False, check_mode=check_mode)
+ elif not self.attrs['enabled'] and value is True:
+ changed = self.enable(enabled=True, check_mode=check_mode)
+
+ elif param == 'synchronous_commit':
+ if self.attrs['synccommit'] is True and value is False:
+ params_to_update.append("%s = false" % param)
+ elif self.attrs['synccommit'] is False and value is True:
+ params_to_update.append("%s = true" % param)
+
+ elif param == 'slot_name':
+ if self.attrs['slotname'] and self.attrs['slotname'] != value:
+ params_to_update.append("%s = %s" % (param, value))
+
+ else:
+ self.module.warn("Parameter '%s' is not in params supported "
+ "for update '%s', ignored..." % (param, SUBSPARAMS_KEYS_FOR_UPDATE))
+
+ if params_to_update:
+ changed = self.__set_params(params_to_update, check_mode=check_mode)
+
+ return changed
+
+ def drop(self, cascade=False, check_mode=True):
+ """Drop the subscription.
+
+ Kwargs:
+ cascade (bool): Flag indicates that the subscription needs to be deleted
+ with its dependencies.
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ changed (bool): True if the subscription has been removed, otherwise False.
+ """
+ if self.exists:
+ query_fragments = ["DROP SUBSCRIPTION %s" % self.name]
+ if cascade:
+ query_fragments.append("CASCADE")
+
+ return self.__exec_sql(' '.join(query_fragments), check_mode=check_mode)
+
+ def set_owner(self, role, check_mode=True):
+ """Set a subscription owner.
+
+ Args:
+ role (str): Role (user) name that needs to be set as a subscription owner.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = 'ALTER SUBSCRIPTION %s OWNER TO "%s"' % (self.name, role)
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def refresh(self, check_mode=True):
+ """Refresh publication.
+
+ Fetches missing table info from publisher.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = 'ALTER SUBSCRIPTION %s REFRESH PUBLICATION' % self.name
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __set_params(self, params_to_update, check_mode=True):
+ """Update optional subscription parameters.
+
+ Args:
+ params_to_update (list): Parameters with values to update.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = 'ALTER SUBSCRIPTION %s SET (%s)' % (self.name, ', '.join(params_to_update))
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __set_conn_params(self, connparams, check_mode=True):
+ """Update connection parameters.
+
+ Args:
+ connparams (str): Connection string in libpq style.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = "ALTER SUBSCRIPTION %s CONNECTION '%s'" % (self.name, connparams)
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __set_publications(self, publications, check_mode=True):
+ """Update publications.
+
+ Args:
+ publications (list): Publications on the master to use.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ query = 'ALTER SUBSCRIPTION %s SET PUBLICATION %s' % (self.name, ', '.join(publications))
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def enable(self, enabled=True, check_mode=True):
+ """Enable or disable the subscription.
+
+ Kwargs:
+ enable (bool): Flag indicates that the subscription needs
+ to be enabled or disabled.
+ check_mode (bool): If True, don't actually change anything,
+ just make SQL, add it to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ if enabled:
+ query = 'ALTER SUBSCRIPTION %s ENABLE' % self.name
+ else:
+ query = 'ALTER SUBSCRIPTION %s DISABLE' % self.name
+
+ return self.__exec_sql(query, check_mode=check_mode)
+
+ def __get_general_subscr_info(self):
+ """Get and return general subscription information.
+
+ Returns:
+ Dict with subscription information if successful, False otherwise.
+ """
+ query = ("SELECT d.datname, r.rolname, s.subenabled, "
+ "s.subconninfo, s.subslotname, s.subsynccommit, "
+ "s.subpublications FROM pg_catalog.pg_subscription s "
+ "JOIN pg_catalog.pg_database d "
+ "ON s.subdbid = d.oid "
+ "JOIN pg_catalog.pg_roles AS r "
+ "ON s.subowner = r.oid "
+ "WHERE s.subname = %(name)s AND d.datname = %(db)s")
+
+ result = exec_sql(self, query, query_params={'name': self.name, 'db': self.db}, add_to_executed=False)
+ if result:
+ return result[0]
+ else:
+ return False
+
+ def __exec_sql(self, query, check_mode=False):
+ """Execute SQL query.
+
+ Note: If we need just to get information from the database,
+ we use ``exec_sql`` function directly.
+
+ Args:
+ query (str): Query that needs to be executed.
+
+ Kwargs:
+ check_mode (bool): If True, don't actually change anything,
+ just add ``query`` to ``self.executed_queries`` and return True.
+
+ Returns:
+ True if successful, False otherwise.
+ """
+ if check_mode:
+ self.executed_queries.append(query)
+ return True
+ else:
+ return exec_sql(self, query, return_bool=True)
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ name=dict(type='str', required=True),
+ db=dict(type='str', required=True, aliases=['login_db']),
+ state=dict(type='str', default='present', choices=['absent', 'present', 'refresh']),
+ publications=dict(type='list', elements='str'),
+ connparams=dict(type='dict'),
+ cascade=dict(type='bool', default=False),
+ owner=dict(type='str'),
+ subsparams=dict(type='dict'),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ # Parameters handling:
+ db = module.params['db']
+ name = module.params['name']
+ state = module.params['state']
+ publications = module.params['publications']
+ cascade = module.params['cascade']
+ owner = module.params['owner']
+ subsparams = module.params['subsparams']
+ connparams = module.params['connparams']
+ session_role = module.params['session_role']
+ trust_input = module.params['trust_input']
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ if not subsparams:
+ subsparams_str = None
+ else:
+ subsparams_str = convert_subscr_params(subsparams)
+
+ if not connparams:
+ connparams_str = None
+ else:
+ connparams_str = convert_conn_params(connparams)
+
+ check_input(module, name, publications, owner, session_role,
+ connparams_str, subsparams_str)
+
+ if state == 'present' and cascade:
+ module.warn('parameter "cascade" is ignored when state is not absent')
+
+ if state != 'present':
+ if owner:
+ module.warn("parameter 'owner' is ignored when state is not 'present'")
+ if publications:
+ module.warn("parameter 'publications' is ignored when state is not 'present'")
+ if connparams:
+ module.warn("parameter 'connparams' is ignored when state is not 'present'")
+ if subsparams:
+ module.warn("parameter 'subsparams' is ignored when state is not 'present'")
+
+ # Connect to DB and make cursor object:
+ pg_conn_params = get_conn_params(module, module.params)
+ # We check subscription state without DML queries execution, so set autocommit:
+ db_connection = connect_to_db(module, pg_conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ # Check version:
+ if cursor.connection.server_version < SUPPORTED_PG_VERSION:
+ module.fail_json(msg="PostgreSQL server version should be 10.0 or greater")
+
+ # Set defaults:
+ changed = False
+ initial_state = {}
+ final_state = {}
+
+ ###################################
+ # Create object and do rock'n'roll:
+ subscription = PgSubscription(module, cursor, name, db)
+
+ if subscription.exists:
+ initial_state = deepcopy(subscription.attrs)
+ final_state = deepcopy(initial_state)
+
+ if state == 'present':
+ if not subscription.exists:
+ if subsparams:
+ subsparams = convert_subscr_params(subsparams)
+
+ if connparams:
+ connparams = convert_conn_params(connparams)
+
+ changed = subscription.create(connparams,
+ publications,
+ subsparams,
+ check_mode=module.check_mode)
+
+ else:
+ changed = subscription.update(connparams,
+ publications,
+ subsparams,
+ check_mode=module.check_mode)
+
+ if owner and subscription.attrs['owner'] != owner:
+ changed = subscription.set_owner(owner, check_mode=module.check_mode) or changed
+
+ elif state == 'absent':
+ changed = subscription.drop(cascade, check_mode=module.check_mode)
+
+ elif state == 'refresh':
+ if not subscription.exists:
+ module.fail_json(msg="Refresh failed: subscription '%s' does not exist" % name)
+
+ # Always returns True:
+ changed = subscription.refresh(check_mode=module.check_mode)
+
+ # Get final subscription info:
+ final_state = subscription.get_info()
+
+ # Connection is not needed any more:
+ cursor.close()
+ db_connection.close()
+
+ # Return ret values and exit:
+ module.exit_json(changed=changed,
+ name=name,
+ exists=subscription.exists,
+ queries=subscription.executed_queries,
+ initial_state=initial_state,
+ final_state=final_state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_table.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_table.py
new file mode 100644
index 00000000..5260853d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_table.py
@@ -0,0 +1,611 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_table
+short_description: Create, drop, or modify a PostgreSQL table
+description:
+- Allows to create, drop, rename, truncate a table, or change some table attributes.
+options:
+ table:
+ description:
+ - Table name.
+ required: true
+ aliases:
+ - name
+ type: str
+ state:
+ description:
+ - The table state. I(state=absent) is mutually exclusive with I(tablespace), I(owner), I(unlogged),
+ I(like), I(including), I(columns), I(truncate), I(storage_params) and, I(rename).
+ type: str
+ default: present
+ choices: [ absent, present ]
+ tablespace:
+ description:
+ - Set a tablespace for the table.
+ required: false
+ type: str
+ owner:
+ description:
+ - Set a table owner.
+ type: str
+ unlogged:
+ description:
+ - Create an unlogged table.
+ type: bool
+ default: no
+ like:
+ description:
+ - Create a table like another table (with similar DDL).
+ Mutually exclusive with I(columns), I(rename), and I(truncate).
+ type: str
+ including:
+ description:
+ - Keywords that are used with like parameter, may be DEFAULTS, CONSTRAINTS, INDEXES, STORAGE, COMMENTS or ALL.
+ Needs I(like) specified. Mutually exclusive with I(columns), I(rename), and I(truncate).
+ type: str
+ columns:
+ description:
+ - Columns that are needed.
+ type: list
+ elements: str
+ rename:
+ description:
+ - New table name. Mutually exclusive with I(tablespace), I(owner),
+ I(unlogged), I(like), I(including), I(columns), I(truncate), and I(storage_params).
+ type: str
+ truncate:
+ description:
+ - Truncate a table. Mutually exclusive with I(tablespace), I(owner), I(unlogged),
+ I(like), I(including), I(columns), I(rename), and I(storage_params).
+ type: bool
+ default: no
+ storage_params:
+ description:
+ - Storage parameters like fillfactor, autovacuum_vacuum_treshold, etc.
+ Mutually exclusive with I(rename) and I(truncate).
+ type: list
+ elements: str
+ db:
+ description:
+ - Name of database to connect and where the table will be created.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting.
+ The specified session_role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ cascade:
+ description:
+ - Automatically drop objects that depend on the table (such as views).
+ Used with I(state=absent) only.
+ type: bool
+ default: no
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+notes:
+- If you do not pass db parameter, tables will be created in the database
+ named postgres.
+- PostgreSQL allows to create columnless table, so columns param is optional.
+- Unlogged tables are available from PostgreSQL server version 9.1.
+seealso:
+- module: community.general.postgresql_sequence
+- module: community.general.postgresql_idx
+- module: community.general.postgresql_info
+- module: community.general.postgresql_tablespace
+- module: community.general.postgresql_owner
+- module: community.general.postgresql_privs
+- module: community.general.postgresql_copy
+- name: CREATE TABLE reference
+ description: Complete reference of the CREATE TABLE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createtable.html
+- name: ALTER TABLE reference
+ description: Complete reference of the ALTER TABLE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-altertable.html
+- name: DROP TABLE reference
+ description: Complete reference of the DROP TABLE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-droptable.html
+- name: PostgreSQL data types
+ description: Complete reference of the PostgreSQL data types documentation.
+ link: https://www.postgresql.org/docs/current/datatype.html
+author:
+- Andrei Klychkov (@Andersson007)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Create tbl2 in the acme database with the DDL like tbl1 with testuser as an owner
+ community.general.postgresql_table:
+ db: acme
+ name: tbl2
+ like: tbl1
+ owner: testuser
+
+- name: Create tbl2 in the acme database and tablespace ssd with the DDL like tbl1 including comments and indexes
+ community.general.postgresql_table:
+ db: acme
+ table: tbl2
+ like: tbl1
+ including: comments, indexes
+ tablespace: ssd
+
+- name: Create test_table with several columns in ssd tablespace with fillfactor=10 and autovacuum_analyze_threshold=1
+ community.general.postgresql_table:
+ name: test_table
+ columns:
+ - id bigserial primary key
+ - num bigint
+ - stories text
+ tablespace: ssd
+ storage_params:
+ - fillfactor=10
+ - autovacuum_analyze_threshold=1
+
+- name: Create an unlogged table in schema acme
+ community.general.postgresql_table:
+ name: acme.useless_data
+ columns: waste_id int
+ unlogged: true
+
+- name: Rename table foo to bar
+ community.general.postgresql_table:
+ table: foo
+ rename: bar
+
+- name: Rename table foo from schema acme to bar
+ community.general.postgresql_table:
+ name: acme.foo
+ rename: bar
+
+- name: Set owner to someuser
+ community.general.postgresql_table:
+ name: foo
+ owner: someuser
+
+- name: Change tablespace of foo table to new_tablespace and set owner to new_user
+ community.general.postgresql_table:
+ name: foo
+ tablespace: new_tablespace
+ owner: new_user
+
+- name: Truncate table foo
+ community.general.postgresql_table:
+ name: foo
+ truncate: yes
+
+- name: Drop table foo from schema acme
+ community.general.postgresql_table:
+ name: acme.foo
+ state: absent
+
+- name: Drop table bar cascade
+ community.general.postgresql_table:
+ name: bar
+ state: absent
+ cascade: yes
+'''
+
+RETURN = r'''
+table:
+ description: Name of a table.
+ returned: always
+ type: str
+ sample: 'foo'
+state:
+ description: Table state.
+ returned: always
+ type: str
+ sample: 'present'
+owner:
+ description: Table owner.
+ returned: always
+ type: str
+ sample: 'postgres'
+tablespace:
+ description: Tablespace.
+ returned: always
+ type: str
+ sample: 'ssd_tablespace'
+queries:
+ description: List of executed queries.
+ returned: always
+ type: str
+ sample: [ 'CREATE TABLE "test_table" (id bigint)' ]
+storage_params:
+ description: Storage parameters.
+ returned: always
+ type: list
+ sample: [ "fillfactor=100", "autovacuum_analyze_threshold=1" ]
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+ pg_quote_identifier,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+class Table(object):
+ def __init__(self, name, module, cursor):
+ self.name = name
+ self.module = module
+ self.cursor = cursor
+ self.info = {
+ 'owner': '',
+ 'tblspace': '',
+ 'storage_params': [],
+ }
+ self.exists = False
+ self.__exists_in_db()
+ self.executed_queries = []
+
+ def get_info(self):
+ """Getter to refresh and get table info"""
+ self.__exists_in_db()
+
+ def __exists_in_db(self):
+ """Check table exists and refresh info"""
+ if "." in self.name:
+ schema = self.name.split('.')[-2]
+ tblname = self.name.split('.')[-1]
+ else:
+ schema = 'public'
+ tblname = self.name
+
+ query = ("SELECT t.tableowner, t.tablespace, c.reloptions "
+ "FROM pg_tables AS t "
+ "INNER JOIN pg_class AS c ON c.relname = t.tablename "
+ "INNER JOIN pg_namespace AS n ON c.relnamespace = n.oid "
+ "WHERE t.tablename = %(tblname)s "
+ "AND n.nspname = %(schema)s")
+ res = exec_sql(self, query, query_params={'tblname': tblname, 'schema': schema},
+ add_to_executed=False)
+ if res:
+ self.exists = True
+ self.info = dict(
+ owner=res[0][0],
+ tblspace=res[0][1] if res[0][1] else '',
+ storage_params=res[0][2] if res[0][2] else [],
+ )
+
+ return True
+ else:
+ self.exists = False
+ return False
+
+ def create(self, columns='', params='', tblspace='',
+ unlogged=False, owner=''):
+ """
+ Create table.
+ If table exists, check passed args (params, tblspace, owner) and,
+ if they're different from current, change them.
+ Arguments:
+ params - storage params (passed by "WITH (...)" in SQL),
+ comma separated.
+ tblspace - tablespace.
+ owner - table owner.
+ unlogged - create unlogged table.
+ columns - column string (comma separated).
+ """
+ name = pg_quote_identifier(self.name, 'table')
+
+ changed = False
+
+ if self.exists:
+ if tblspace == 'pg_default' and self.info['tblspace'] is None:
+ pass # Because they have the same meaning
+ elif tblspace and self.info['tblspace'] != tblspace:
+ self.set_tblspace(tblspace)
+ changed = True
+
+ if owner and self.info['owner'] != owner:
+ self.set_owner(owner)
+ changed = True
+
+ if params:
+ param_list = [p.strip(' ') for p in params.split(',')]
+
+ new_param = False
+ for p in param_list:
+ if p not in self.info['storage_params']:
+ new_param = True
+
+ if new_param:
+ self.set_stor_params(params)
+ changed = True
+
+ if changed:
+ return True
+ return False
+
+ query = "CREATE"
+ if unlogged:
+ query += " UNLOGGED TABLE %s" % name
+ else:
+ query += " TABLE %s" % name
+
+ if columns:
+ query += " (%s)" % columns
+ else:
+ query += " ()"
+
+ if params:
+ query += " WITH (%s)" % params
+
+ if tblspace:
+ query += ' TABLESPACE "%s"' % tblspace
+
+ if exec_sql(self, query, return_bool=True):
+ changed = True
+
+ if owner:
+ changed = self.set_owner(owner)
+
+ return changed
+
+ def create_like(self, src_table, including='', tblspace='',
+ unlogged=False, params='', owner=''):
+ """
+ Create table like another table (with similar DDL).
+ Arguments:
+ src_table - source table.
+ including - corresponds to optional INCLUDING expression
+ in CREATE TABLE ... LIKE statement.
+ params - storage params (passed by "WITH (...)" in SQL),
+ comma separated.
+ tblspace - tablespace.
+ owner - table owner.
+ unlogged - create unlogged table.
+ """
+ changed = False
+
+ name = pg_quote_identifier(self.name, 'table')
+
+ query = "CREATE"
+ if unlogged:
+ query += " UNLOGGED TABLE %s" % name
+ else:
+ query += " TABLE %s" % name
+
+ query += " (LIKE %s" % pg_quote_identifier(src_table, 'table')
+
+ if including:
+ including = including.split(',')
+ for i in including:
+ query += " INCLUDING %s" % i
+
+ query += ')'
+
+ if params:
+ query += " WITH (%s)" % params
+
+ if tblspace:
+ query += ' TABLESPACE "%s"' % tblspace
+
+ if exec_sql(self, query, return_bool=True):
+ changed = True
+
+ if owner:
+ changed = self.set_owner(owner)
+
+ return changed
+
+ def truncate(self):
+ query = "TRUNCATE TABLE %s" % pg_quote_identifier(self.name, 'table')
+ return exec_sql(self, query, return_bool=True)
+
+ def rename(self, newname):
+ query = "ALTER TABLE %s RENAME TO %s" % (pg_quote_identifier(self.name, 'table'),
+ pg_quote_identifier(newname, 'table'))
+ return exec_sql(self, query, return_bool=True)
+
+ def set_owner(self, username):
+ query = 'ALTER TABLE %s OWNER TO "%s"' % (pg_quote_identifier(self.name, 'table'), username)
+ return exec_sql(self, query, return_bool=True)
+
+ def drop(self, cascade=False):
+ if not self.exists:
+ return False
+
+ query = "DROP TABLE %s" % pg_quote_identifier(self.name, 'table')
+ if cascade:
+ query += " CASCADE"
+ return exec_sql(self, query, return_bool=True)
+
+ def set_tblspace(self, tblspace):
+ query = 'ALTER TABLE %s SET TABLESPACE "%s"' % (pg_quote_identifier(self.name, 'table'), tblspace)
+ return exec_sql(self, query, return_bool=True)
+
+ def set_stor_params(self, params):
+ query = "ALTER TABLE %s SET (%s)" % (pg_quote_identifier(self.name, 'table'), params)
+ return exec_sql(self, query, return_bool=True)
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ table=dict(type='str', required=True, aliases=['name']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ db=dict(type='str', default='', aliases=['login_db']),
+ tablespace=dict(type='str'),
+ owner=dict(type='str'),
+ unlogged=dict(type='bool', default=False),
+ like=dict(type='str'),
+ including=dict(type='str'),
+ rename=dict(type='str'),
+ truncate=dict(type='bool', default=False),
+ columns=dict(type='list', elements='str'),
+ storage_params=dict(type='list', elements='str'),
+ session_role=dict(type='str'),
+ cascade=dict(type='bool', default=False),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ table = module.params['table']
+ state = module.params['state']
+ tablespace = module.params['tablespace']
+ owner = module.params['owner']
+ unlogged = module.params['unlogged']
+ like = module.params['like']
+ including = module.params['including']
+ newname = module.params['rename']
+ storage_params = module.params['storage_params']
+ truncate = module.params['truncate']
+ columns = module.params['columns']
+ cascade = module.params['cascade']
+ session_role = module.params['session_role']
+ trust_input = module.params['trust_input']
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, table, tablespace, owner, like, including,
+ newname, storage_params, columns, session_role)
+
+ if state == 'present' and cascade:
+ module.warn("cascade=true is ignored when state=present")
+
+ # Check mutual exclusive parameters:
+ if state == 'absent' and (truncate or newname or columns or tablespace or like or storage_params or unlogged or owner or including):
+ module.fail_json(msg="%s: state=absent is mutually exclusive with: "
+ "truncate, rename, columns, tablespace, "
+ "including, like, storage_params, unlogged, owner" % table)
+
+ if truncate and (newname or columns or like or unlogged or storage_params or owner or tablespace or including):
+ module.fail_json(msg="%s: truncate is mutually exclusive with: "
+ "rename, columns, like, unlogged, including, "
+ "storage_params, owner, tablespace" % table)
+
+ if newname and (columns or like or unlogged or storage_params or owner or tablespace or including):
+ module.fail_json(msg="%s: rename is mutually exclusive with: "
+ "columns, like, unlogged, including, "
+ "storage_params, owner, tablespace" % table)
+
+ if like and columns:
+ module.fail_json(msg="%s: like and columns params are mutually exclusive" % table)
+ if including and not like:
+ module.fail_json(msg="%s: including param needs like param specified" % table)
+
+ conn_params = get_conn_params(module, module.params)
+ db_connection = connect_to_db(module, conn_params, autocommit=False)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ if storage_params:
+ storage_params = ','.join(storage_params)
+
+ if columns:
+ columns = ','.join(columns)
+
+ ##############
+ # Do main job:
+ table_obj = Table(table, module, cursor)
+
+ # Set default returned values:
+ changed = False
+ kw = {}
+ kw['table'] = table
+ kw['state'] = ''
+ if table_obj.exists:
+ kw = dict(
+ table=table,
+ state='present',
+ owner=table_obj.info['owner'],
+ tablespace=table_obj.info['tblspace'],
+ storage_params=table_obj.info['storage_params'],
+ )
+
+ if state == 'absent':
+ changed = table_obj.drop(cascade=cascade)
+
+ elif truncate:
+ changed = table_obj.truncate()
+
+ elif newname:
+ changed = table_obj.rename(newname)
+ q = table_obj.executed_queries
+ table_obj = Table(newname, module, cursor)
+ table_obj.executed_queries = q
+
+ elif state == 'present' and not like:
+ changed = table_obj.create(columns, storage_params,
+ tablespace, unlogged, owner)
+
+ elif state == 'present' and like:
+ changed = table_obj.create_like(like, including, tablespace,
+ unlogged, storage_params)
+
+ if changed:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ # Refresh table info for RETURN.
+ # Note, if table has been renamed, it gets info by newname:
+ table_obj.get_info()
+ db_connection.commit()
+ if table_obj.exists:
+ kw = dict(
+ table=table,
+ state='present',
+ owner=table_obj.info['owner'],
+ tablespace=table_obj.info['tblspace'],
+ storage_params=table_obj.info['storage_params'],
+ )
+ else:
+ # We just change the table state here
+ # to keep other information about the dropped table:
+ kw['state'] = 'absent'
+
+ kw['queries'] = table_obj.executed_queries
+ kw['changed'] = changed
+ db_connection.close()
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_tablespace.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_tablespace.py
new file mode 100644
index 00000000..2062e6a1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_tablespace.py
@@ -0,0 +1,541 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Flavien Chantelot (@Dorn-)
+# Copyright: (c) 2018, Antoine Levy-Lambert (@antoinell)
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_tablespace
+short_description: Add or remove PostgreSQL tablespaces from remote hosts
+description:
+- Adds or removes PostgreSQL tablespaces from remote hosts.
+options:
+ tablespace:
+ description:
+ - Name of the tablespace to add or remove.
+ required: true
+ type: str
+ aliases:
+ - name
+ location:
+ description:
+ - Path to the tablespace directory in the file system.
+ - Ensure that the location exists and has right privileges.
+ type: path
+ aliases:
+ - path
+ state:
+ description:
+ - Tablespace state.
+ - I(state=present) implies the tablespace must be created if it doesn't exist.
+ - I(state=absent) implies the tablespace must be removed if present.
+ I(state=absent) is mutually exclusive with I(location), I(owner), i(set).
+ - See the Notes section for information about check mode restrictions.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ owner:
+ description:
+ - Name of the role to set as an owner of the tablespace.
+ - If this option is not specified, the tablespace owner is a role that creates the tablespace.
+ type: str
+ set:
+ description:
+ - Dict of tablespace options to set. Supported from PostgreSQL 9.0.
+ - For more information see U(https://www.postgresql.org/docs/current/sql-createtablespace.html).
+ - When reset is passed as an option's value, if the option was set previously, it will be removed.
+ type: dict
+ rename_to:
+ description:
+ - New name of the tablespace.
+ - The new name cannot begin with pg_, as such names are reserved for system tablespaces.
+ type: str
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ db:
+ description:
+ - Name of database to connect to and run queries against.
+ type: str
+ aliases:
+ - login_db
+ trust_input:
+ description:
+ - If C(no), check whether values of parameters I(tablespace), I(location), I(owner),
+ I(rename_to), I(session_role), I(settings_list) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via the parameters are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+
+notes:
+- I(state=absent) and I(state=present) (the second one if the tablespace doesn't exist) do not
+ support check mode because the corresponding PostgreSQL DROP and CREATE TABLESPACE commands
+ can not be run inside the transaction block.
+
+seealso:
+- name: PostgreSQL tablespaces
+ description: General information about PostgreSQL tablespaces.
+ link: https://www.postgresql.org/docs/current/manage-ag-tablespaces.html
+- name: CREATE TABLESPACE reference
+ description: Complete reference of the CREATE TABLESPACE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-createtablespace.html
+- name: ALTER TABLESPACE reference
+ description: Complete reference of the ALTER TABLESPACE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-altertablespace.html
+- name: DROP TABLESPACE reference
+ description: Complete reference of the DROP TABLESPACE command documentation.
+ link: https://www.postgresql.org/docs/current/sql-droptablespace.html
+
+author:
+- Flavien Chantelot (@Dorn-)
+- Antoine Levy-Lambert (@antoinell)
+- Andrew Klychkov (@Andersson007)
+
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Create a new tablespace called acme and set bob as an its owner
+ community.general.postgresql_tablespace:
+ name: acme
+ owner: bob
+ location: /data/foo
+
+- name: Create a new tablespace called bar with tablespace options
+ community.general.postgresql_tablespace:
+ name: bar
+ set:
+ random_page_cost: 1
+ seq_page_cost: 1
+
+- name: Reset random_page_cost option
+ community.general.postgresql_tablespace:
+ name: bar
+ set:
+ random_page_cost: reset
+
+- name: Rename the tablespace from bar to pcie_ssd
+ community.general.postgresql_tablespace:
+ name: bar
+ rename_to: pcie_ssd
+
+- name: Drop tablespace called bloat
+ community.general.postgresql_tablespace:
+ name: bloat
+ state: absent
+'''
+
+RETURN = r'''
+queries:
+ description: List of queries that was tried to be executed.
+ returned: always
+ type: str
+ sample: [ "CREATE TABLESPACE bar LOCATION '/incredible/ssd'" ]
+tablespace:
+ description: Tablespace name.
+ returned: always
+ type: str
+ sample: 'ssd'
+owner:
+ description: Tablespace owner.
+ returned: always
+ type: str
+ sample: 'Bob'
+options:
+ description: Tablespace options.
+ returned: always
+ type: dict
+ sample: { 'random_page_cost': 1, 'seq_page_cost': 1 }
+location:
+ description: Path to the tablespace in the file system.
+ returned: always
+ type: str
+ sample: '/incredible/fast/ssd'
+newname:
+ description: New tablespace name
+ returned: if existent
+ type: str
+ sample: new_ssd
+state:
+ description: Tablespace state at the end of execution.
+ returned: always
+ type: str
+ sample: 'present'
+'''
+
+try:
+ from psycopg2 import __version__ as PSYCOPG2_VERSION
+ from psycopg2.extras import DictCursor
+ from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT as AUTOCOMMIT
+ from psycopg2.extensions import ISOLATION_LEVEL_READ_COMMITTED as READ_COMMITTED
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+ pg_quote_identifier,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+
+
+class PgTablespace(object):
+
+ """Class for working with PostgreSQL tablespaces.
+
+ Args:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+ name (str) -- name of the tablespace
+
+ Attrs:
+ module (AnsibleModule) -- object of AnsibleModule class
+ cursor (cursor) -- cursor object of psycopg2 library
+ name (str) -- name of the tablespace
+ exists (bool) -- flag the tablespace exists in the DB or not
+ owner (str) -- tablespace owner
+ location (str) -- path to the tablespace directory in the file system
+ executed_queries (list) -- list of executed queries
+ new_name (str) -- new name for the tablespace
+ opt_not_supported (bool) -- flag indicates a tablespace option is supported or not
+ """
+
+ def __init__(self, module, cursor, name):
+ self.module = module
+ self.cursor = cursor
+ self.name = name
+ self.exists = False
+ self.owner = ''
+ self.settings = {}
+ self.location = ''
+ self.executed_queries = []
+ self.new_name = ''
+ self.opt_not_supported = False
+ # Collect info:
+ self.get_info()
+
+ def get_info(self):
+ """Get tablespace information."""
+ # Check that spcoptions exists:
+ opt = exec_sql(self, "SELECT 1 FROM information_schema.columns "
+ "WHERE table_name = 'pg_tablespace' "
+ "AND column_name = 'spcoptions'", add_to_executed=False)
+
+ # For 9.1 version and earlier:
+ location = exec_sql(self, "SELECT 1 FROM information_schema.columns "
+ "WHERE table_name = 'pg_tablespace' "
+ "AND column_name = 'spclocation'", add_to_executed=False)
+ if location:
+ location = 'spclocation'
+ else:
+ location = 'pg_tablespace_location(t.oid)'
+
+ if not opt:
+ self.opt_not_supported = True
+ query = ("SELECT r.rolname, (SELECT Null), %s "
+ "FROM pg_catalog.pg_tablespace AS t "
+ "JOIN pg_catalog.pg_roles AS r "
+ "ON t.spcowner = r.oid " % location)
+ else:
+ query = ("SELECT r.rolname, t.spcoptions, %s "
+ "FROM pg_catalog.pg_tablespace AS t "
+ "JOIN pg_catalog.pg_roles AS r "
+ "ON t.spcowner = r.oid " % location)
+
+ res = exec_sql(self, query + "WHERE t.spcname = %(name)s",
+ query_params={'name': self.name}, add_to_executed=False)
+
+ if not res:
+ self.exists = False
+ return False
+
+ if res[0][0]:
+ self.exists = True
+ self.owner = res[0][0]
+
+ if res[0][1]:
+ # Options exist:
+ for i in res[0][1]:
+ i = i.split('=')
+ self.settings[i[0]] = i[1]
+
+ if res[0][2]:
+ # Location exists:
+ self.location = res[0][2]
+
+ def create(self, location):
+ """Create tablespace.
+
+ Return True if success, otherwise, return False.
+
+ args:
+ location (str) -- tablespace directory path in the FS
+ """
+ query = ('CREATE TABLESPACE "%s" LOCATION \'%s\'' % (self.name, location))
+ return exec_sql(self, query, return_bool=True)
+
+ def drop(self):
+ """Drop tablespace.
+
+ Return True if success, otherwise, return False.
+ """
+ return exec_sql(self, 'DROP TABLESPACE "%s"' % self.name, return_bool=True)
+
+ def set_owner(self, new_owner):
+ """Set tablespace owner.
+
+ Return True if success, otherwise, return False.
+
+ args:
+ new_owner (str) -- name of a new owner for the tablespace"
+ """
+ if new_owner == self.owner:
+ return False
+
+ query = 'ALTER TABLESPACE "%s" OWNER TO "%s"' % (self.name, new_owner)
+ return exec_sql(self, query, return_bool=True)
+
+ def rename(self, newname):
+ """Rename tablespace.
+
+ Return True if success, otherwise, return False.
+
+ args:
+ newname (str) -- new name for the tablespace"
+ """
+ query = 'ALTER TABLESPACE "%s" RENAME TO "%s"' % (self.name, newname)
+ self.new_name = newname
+ return exec_sql(self, query, return_bool=True)
+
+ def set_settings(self, new_settings):
+ """Set tablespace settings (options).
+
+ If some setting has been changed, set changed = True.
+ After all settings list is handling, return changed.
+
+ args:
+ new_settings (list) -- list of new settings
+ """
+ # settings must be a dict {'key': 'value'}
+ if self.opt_not_supported:
+ return False
+
+ changed = False
+
+ # Apply new settings:
+ for i in new_settings:
+ if new_settings[i] == 'reset':
+ if i in self.settings:
+ changed = self.__reset_setting(i)
+ self.settings[i] = None
+
+ elif (i not in self.settings) or (str(new_settings[i]) != self.settings[i]):
+ changed = self.__set_setting("%s = '%s'" % (i, new_settings[i]))
+
+ return changed
+
+ def __reset_setting(self, setting):
+ """Reset tablespace setting.
+
+ Return True if success, otherwise, return False.
+
+ args:
+ setting (str) -- string in format "setting_name = 'setting_value'"
+ """
+ query = 'ALTER TABLESPACE "%s" RESET (%s)' % (self.name, setting)
+ return exec_sql(self, query, return_bool=True)
+
+ def __set_setting(self, setting):
+ """Set tablespace setting.
+
+ Return True if success, otherwise, return False.
+
+ args:
+ setting (str) -- string in format "setting_name = 'setting_value'"
+ """
+ query = 'ALTER TABLESPACE "%s" SET (%s)' % (self.name, setting)
+ return exec_sql(self, query, return_bool=True)
+
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ tablespace=dict(type='str', required=True, aliases=['name']),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ location=dict(type='path', aliases=['path']),
+ owner=dict(type='str'),
+ set=dict(type='dict'),
+ rename_to=dict(type='str'),
+ db=dict(type='str', aliases=['login_db']),
+ session_role=dict(type='str'),
+ trust_input=dict(type='bool', default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=(('positional_args', 'named_args'),),
+ supports_check_mode=True,
+ )
+
+ tablespace = module.params["tablespace"]
+ state = module.params["state"]
+ location = module.params["location"]
+ owner = module.params["owner"]
+ rename_to = module.params["rename_to"]
+ settings = module.params["set"]
+ session_role = module.params["session_role"]
+ trust_input = module.params["trust_input"]
+
+ if state == 'absent' and (location or owner or rename_to or settings):
+ module.fail_json(msg="state=absent is mutually exclusive location, "
+ "owner, rename_to, and set")
+
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ if not settings:
+ settings_list = None
+ else:
+ settings_list = ['%s = %s' % (k, v) for k, v in iteritems(settings)]
+
+ check_input(module, tablespace, location, owner,
+ rename_to, session_role, settings_list)
+
+ conn_params = get_conn_params(module, module.params, warn_db_default=False)
+ db_connection = connect_to_db(module, conn_params, autocommit=True)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ # Change autocommit to False if check_mode:
+ if module.check_mode:
+ if PSYCOPG2_VERSION >= '2.4.2':
+ db_connection.set_session(autocommit=False)
+ else:
+ db_connection.set_isolation_level(READ_COMMITTED)
+
+ # Set defaults:
+ autocommit = False
+ changed = False
+
+ ##############
+ # Create PgTablespace object and do main job:
+ tblspace = PgTablespace(module, cursor, tablespace)
+
+ # If tablespace exists with different location, exit:
+ if tblspace.exists and location and location != tblspace.location:
+ module.fail_json(msg="Tablespace '%s' exists with "
+ "different location '%s'" % (tblspace.name, tblspace.location))
+
+ # Create new tablespace:
+ if not tblspace.exists and state == 'present':
+ if rename_to:
+ module.fail_json(msg="Tablespace %s does not exist, nothing to rename" % tablespace)
+
+ if not location:
+ module.fail_json(msg="'location' parameter must be passed with "
+ "state=present if the tablespace doesn't exist")
+
+ # Because CREATE TABLESPACE can not be run inside the transaction block:
+ autocommit = True
+ if PSYCOPG2_VERSION >= '2.4.2':
+ db_connection.set_session(autocommit=True)
+ else:
+ db_connection.set_isolation_level(AUTOCOMMIT)
+
+ changed = tblspace.create(location)
+
+ # Drop non-existing tablespace:
+ elif not tblspace.exists and state == 'absent':
+ # Nothing to do:
+ module.fail_json(msg="Tries to drop nonexistent tablespace '%s'" % tblspace.name)
+
+ # Drop existing tablespace:
+ elif tblspace.exists and state == 'absent':
+ # Because DROP TABLESPACE can not be run inside the transaction block:
+ autocommit = True
+ if PSYCOPG2_VERSION >= '2.4.2':
+ db_connection.set_session(autocommit=True)
+ else:
+ db_connection.set_isolation_level(AUTOCOMMIT)
+
+ changed = tblspace.drop()
+
+ # Rename tablespace:
+ elif tblspace.exists and rename_to:
+ if tblspace.name != rename_to:
+ changed = tblspace.rename(rename_to)
+
+ if state == 'present':
+ # Refresh information:
+ tblspace.get_info()
+
+ # Change owner and settings:
+ if state == 'present' and tblspace.exists:
+ if owner:
+ changed = tblspace.set_owner(owner)
+
+ if settings:
+ changed = tblspace.set_settings(settings)
+
+ tblspace.get_info()
+
+ # Rollback if it's possible and check_mode:
+ if not autocommit:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ cursor.close()
+ db_connection.close()
+
+ # Make return values:
+ kw = dict(
+ changed=changed,
+ state='present',
+ tablespace=tblspace.name,
+ owner=tblspace.owner,
+ queries=tblspace.executed_queries,
+ options=tblspace.settings,
+ location=tblspace.location,
+ )
+
+ if state == 'present':
+ kw['state'] = 'present'
+
+ if tblspace.new_name:
+ kw['newname'] = tblspace.new_name
+
+ elif state == 'absent':
+ kw['state'] = 'absent'
+
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_user.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_user.py
new file mode 100644
index 00000000..79c987a7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_user.py
@@ -0,0 +1,993 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_user
+short_description: Create, alter, or remove a user (role) from a PostgreSQL server instance
+description:
+- Creates, alters, or removes a user (role) from a PostgreSQL server instance
+ ("cluster" in PostgreSQL terminology) and, optionally,
+ grants the user access to an existing database or tables.
+- A user is a role with login privilege.
+- You can also use it to grant or revoke user's privileges in a particular database.
+- You cannot remove a user while it still has any privileges granted to it in any database.
+- Set I(fail_on_user) to C(no) to make the module ignore failures when trying to remove a user.
+ In this case, the module reports if changes happened as usual and separately reports
+ whether the user has been removed or not.
+options:
+ name:
+ description:
+ - Name of the user (role) to add or remove.
+ type: str
+ required: true
+ aliases:
+ - user
+ password:
+ description:
+ - Set the user's password, before 1.4 this was required.
+ - Password can be passed unhashed or hashed (MD5-hashed).
+ - An unhashed password is automatically hashed when saved into the
+ database if I(encrypted) is set, otherwise it is saved in
+ plain text format.
+ - When passing an MD5-hashed password, you must generate it with the format
+ C('str["md5"] + md5[ password + username ]'), resulting in a total of
+ 35 characters. An easy way to do this is
+ C(echo "md5`echo -n 'verysecretpasswordJOE' | md5sum | awk '{print $1}'`").
+ - Note that if the provided password string is already in MD5-hashed
+ format, then it is used as-is, regardless of I(encrypted) option.
+ type: str
+ db:
+ description:
+ - Name of database to connect to and where user's permissions are granted.
+ type: str
+ aliases:
+ - login_db
+ fail_on_user:
+ description:
+ - If C(yes), fails when the user (role) cannot be removed. Otherwise just log and continue.
+ default: yes
+ type: bool
+ aliases:
+ - fail_on_role
+ priv:
+ description:
+ - "Slash-separated PostgreSQL privileges string: C(priv1/priv2), where
+ you can define the user's privileges for the database ( allowed options - 'CREATE',
+ 'CONNECT', 'TEMPORARY', 'TEMP', 'ALL'. For example C(CONNECT) ) or
+ for table ( allowed options - 'SELECT', 'INSERT', 'UPDATE', 'DELETE',
+ 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL'. For example
+ C(table:SELECT) ). Mixed example of this string:
+ C(CONNECT/CREATE/table1:SELECT/table2:INSERT)."
+ type: str
+ role_attr_flags:
+ description:
+ - "PostgreSQL user attributes string in the format: CREATEDB,CREATEROLE,SUPERUSER."
+ - Note that '[NO]CREATEUSER' is deprecated.
+ - To create a simple role for using it like a group, use C(NOLOGIN) flag.
+ type: str
+ choices: [ '[NO]SUPERUSER', '[NO]CREATEROLE', '[NO]CREATEDB',
+ '[NO]INHERIT', '[NO]LOGIN', '[NO]REPLICATION', '[NO]BYPASSRLS' ]
+ session_role:
+ description:
+ - Switch to session role after connecting.
+ - The specified session role must be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though the session role
+ were the one that had logged in originally.
+ type: str
+ state:
+ description:
+ - The user (role) state.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ encrypted:
+ description:
+ - Whether the password is stored hashed in the database.
+ - You can specify an unhashed password, and PostgreSQL ensures
+ the stored password is hashed when I(encrypted=yes) is set.
+ If you specify a hashed password, the module uses it as-is,
+ regardless of the setting of I(encrypted).
+ - "Note: Postgresql 10 and newer does not support unhashed passwords."
+ - Previous to Ansible 2.6, this was C(no) by default.
+ default: yes
+ type: bool
+ expires:
+ description:
+ - The date at which the user's password is to expire.
+ - If set to C('infinity'), user's password never expires.
+ - Note that this value must be a valid SQL date and time type.
+ type: str
+ no_password_changes:
+ description:
+ - If C(yes), does not inspect the database for password changes.
+ Useful when C(pg_authid) is not accessible (such as in AWS RDS).
+ Otherwise, makes password changes as necessary.
+ default: no
+ type: bool
+ conn_limit:
+ description:
+ - Specifies the user (role) connection limit.
+ type: int
+ ssl_mode:
+ description:
+ - Determines how an SSL session is negotiated with the server.
+ - See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
+ - Default of C(prefer) matches libpq default.
+ type: str
+ default: prefer
+ choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
+ ca_cert:
+ description:
+ - Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
+ - If the file exists, verifies that the server's certificate is signed by one of these authorities.
+ type: str
+ aliases: [ ssl_rootcert ]
+ groups:
+ description:
+ - The list of groups (roles) that you want to grant to the user.
+ type: list
+ elements: str
+ comment:
+ description:
+ - Adds a comment on the user (equivalent to the C(COMMENT ON ROLE) statement).
+ type: str
+ version_added: '0.2.0'
+ trust_input:
+ description:
+ - If C(no), checks whether values of options I(name), I(password), I(privs), I(expires),
+ I(role_attr_flags), I(groups), I(comment), I(session_role) are potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections through the options are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+notes:
+- The module creates a user (role) with login privilege by default.
+ Use C(NOLOGIN) I(role_attr_flags) to change this behaviour.
+- If you specify C(PUBLIC) as the user (role), then the privilege changes apply to all users (roles).
+ You may not specify password or role_attr_flags when the C(PUBLIC) user is specified.
+- SCRAM-SHA-256-hashed passwords (SASL Authentication) require PostgreSQL version 10 or newer.
+ On the previous versions the whole hashed string is used as a password.
+- 'Working with SCRAM-SHA-256-hashed passwords, be sure you use the I(environment:) variable
+ C(PGOPTIONS: "-c password_encryption=scram-sha-256") (see the provided example).'
+- Supports ``check_mode``.
+seealso:
+- module: community.general.postgresql_privs
+- module: community.general.postgresql_membership
+- module: community.general.postgresql_owner
+- name: PostgreSQL database roles
+ description: Complete reference of the PostgreSQL database roles documentation.
+ link: https://www.postgresql.org/docs/current/user-manag.html
+- name: PostgreSQL SASL Authentication
+ description: Complete reference of the PostgreSQL SASL Authentication.
+ link: https://www.postgresql.org/docs/current/sasl-authentication.html
+author:
+- Ansible Core Team
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Connect to acme database, create django user, and grant access to database and products table
+ community.general.postgresql_user:
+ db: acme
+ name: django
+ password: ceec4eif7ya
+ priv: "CONNECT/products:ALL"
+ expires: "Jan 31 2020"
+
+- name: Add a comment on django user
+ community.general.postgresql_user:
+ db: acme
+ name: django
+ comment: This is a test user
+
+# Connect to default database, create rails user, set its password (MD5-hashed),
+# and grant privilege to create other databases and demote rails from super user status if user exists
+- name: Create rails user, set MD5-hashed password, grant privs
+ community.general.postgresql_user:
+ name: rails
+ password: md59543f1d82624df2b31672ec0f7050460
+ role_attr_flags: CREATEDB,NOSUPERUSER
+
+- name: Connect to acme database and remove test user privileges from there
+ community.general.postgresql_user:
+ db: acme
+ name: test
+ priv: "ALL/products:ALL"
+ state: absent
+ fail_on_user: no
+
+- name: Connect to test database, remove test user from cluster
+ community.general.postgresql_user:
+ db: test
+ name: test
+ priv: ALL
+ state: absent
+
+- name: Connect to acme database and set user's password with no expire date
+ community.general.postgresql_user:
+ db: acme
+ name: django
+ password: mysupersecretword
+ priv: "CONNECT/products:ALL"
+ expires: infinity
+
+# Example privileges string format
+# INSERT,UPDATE/table:SELECT/anothertable:ALL
+
+- name: Connect to test database and remove an existing user's password
+ community.general.postgresql_user:
+ db: test
+ user: test
+ password: ""
+
+- name: Create user test and grant group user_ro and user_rw to it
+ community.general.postgresql_user:
+ name: test
+ groups:
+ - user_ro
+ - user_rw
+
+# Create user with a cleartext password if it does not exist or update its password.
+# The password will be encrypted with SCRAM algorithm (available since PostgreSQL 10)
+- name: Create appclient user with SCRAM-hashed password
+ community.general.postgresql_user:
+ name: appclient
+ password: "secret123"
+ environment:
+ PGOPTIONS: "-c password_encryption=scram-sha-256"
+'''
+
+RETURN = r'''
+queries:
+ description: List of executed queries.
+ returned: always
+ type: list
+ sample: ['CREATE USER "alice"', 'GRANT CONNECT ON DATABASE "acme" TO "alice"']
+'''
+
+import itertools
+import re
+import traceback
+from hashlib import md5, sha256
+import hmac
+from base64 import b64decode
+
+try:
+ import psycopg2
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ pg_quote_identifier,
+ SQLParseError,
+ check_input,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ get_conn_params,
+ PgMembership,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.six import iteritems
+import ansible_collections.community.general.plugins.module_utils.saslprep as saslprep
+
+try:
+ # pbkdf2_hmac is missing on python 2.6, we can safely assume,
+ # that postresql 10 capable instance have at least python 2.7 installed
+ from hashlib import pbkdf2_hmac
+ pbkdf2_found = True
+except ImportError:
+ pbkdf2_found = False
+
+
+FLAGS = ('SUPERUSER', 'CREATEROLE', 'CREATEDB', 'INHERIT', 'LOGIN', 'REPLICATION')
+FLAGS_BY_VERSION = {'BYPASSRLS': 90500}
+
+SCRAM_SHA256_REGEX = r'^SCRAM-SHA-256\$(\d+):([A-Za-z0-9+\/=]+)\$([A-Za-z0-9+\/=]+):([A-Za-z0-9+\/=]+)$'
+
+VALID_PRIVS = dict(table=frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL')),
+ database=frozenset(
+ ('CREATE', 'CONNECT', 'TEMPORARY', 'TEMP', 'ALL')),
+ )
+
+# map to cope with idiosyncracies of SUPERUSER and LOGIN
+PRIV_TO_AUTHID_COLUMN = dict(SUPERUSER='rolsuper', CREATEROLE='rolcreaterole',
+ CREATEDB='rolcreatedb', INHERIT='rolinherit', LOGIN='rolcanlogin',
+ REPLICATION='rolreplication', BYPASSRLS='rolbypassrls')
+
+executed_queries = []
+
+
+class InvalidFlagsError(Exception):
+ pass
+
+
+class InvalidPrivsError(Exception):
+ pass
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+
+def user_exists(cursor, user):
+ # The PUBLIC user is a special case that is always there
+ if user == 'PUBLIC':
+ return True
+ query = "SELECT rolname FROM pg_roles WHERE rolname=%(user)s"
+ cursor.execute(query, {'user': user})
+ return cursor.rowcount > 0
+
+
+def user_add(cursor, user, password, role_attr_flags, encrypted, expires, conn_limit):
+ """Create a new database user (role)."""
+ # Note: role_attr_flags escaped by parse_role_attrs and encrypted is a
+ # literal
+ query_password_data = dict(password=password, expires=expires)
+ query = ['CREATE USER "%(user)s"' %
+ {"user": user}]
+ if password is not None and password != '':
+ query.append("WITH %(crypt)s" % {"crypt": encrypted})
+ query.append("PASSWORD %(password)s")
+ if expires is not None:
+ query.append("VALID UNTIL %(expires)s")
+ if conn_limit is not None:
+ query.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit})
+ query.append(role_attr_flags)
+ query = ' '.join(query)
+ executed_queries.append(query)
+ cursor.execute(query, query_password_data)
+ return True
+
+
+def user_should_we_change_password(current_role_attrs, user, password, encrypted):
+ """Check if we should change the user's password.
+
+ Compare the proposed password with the existing one, comparing
+ hashes if encrypted. If we can't access it assume yes.
+ """
+
+ if current_role_attrs is None:
+ # on some databases, E.g. AWS RDS instances, there is no access to
+ # the pg_authid relation to check the pre-existing password, so we
+ # just assume password is different
+ return True
+
+ # Do we actually need to do anything?
+ pwchanging = False
+ if password is not None:
+ # Empty password means that the role shouldn't have a password, which
+ # means we need to check if the current password is None.
+ if password == '':
+ if current_role_attrs['rolpassword'] is not None:
+ pwchanging = True
+
+ # SCRAM hashes are represented as a special object, containing hash data:
+ # `SCRAM-SHA-256$<iteration count>:<salt>$<StoredKey>:<ServerKey>`
+ # for reference, see https://www.postgresql.org/docs/current/catalog-pg-authid.html
+ elif current_role_attrs['rolpassword'] is not None \
+ and pbkdf2_found \
+ and re.match(SCRAM_SHA256_REGEX, current_role_attrs['rolpassword']):
+
+ r = re.match(SCRAM_SHA256_REGEX, current_role_attrs['rolpassword'])
+ try:
+ # extract SCRAM params from rolpassword
+ it = int(r.group(1))
+ salt = b64decode(r.group(2))
+ server_key = b64decode(r.group(4))
+ # we'll never need `storedKey` as it is only used for server auth in SCRAM
+ # storedKey = b64decode(r.group(3))
+
+ # from RFC5802 https://tools.ietf.org/html/rfc5802#section-3
+ # SaltedPassword := Hi(Normalize(password), salt, i)
+ # ServerKey := HMAC(SaltedPassword, "Server Key")
+ normalized_password = saslprep.saslprep(to_text(password))
+ salted_password = pbkdf2_hmac('sha256', to_bytes(normalized_password), salt, it)
+
+ server_key_verifier = hmac.new(salted_password, digestmod=sha256)
+ server_key_verifier.update(b'Server Key')
+
+ if server_key_verifier.digest() != server_key:
+ pwchanging = True
+ except Exception:
+ # We assume the password is not scram encrypted
+ # or we cannot check it properly, e.g. due to missing dependencies
+ pwchanging = True
+
+ # 32: MD5 hashes are represented as a sequence of 32 hexadecimal digits
+ # 3: The size of the 'md5' prefix
+ # When the provided password looks like a MD5-hash, value of
+ # 'encrypted' is ignored.
+ elif (password.startswith('md5') and len(password) == 32 + 3) or encrypted == 'UNENCRYPTED':
+ if password != current_role_attrs['rolpassword']:
+ pwchanging = True
+ elif encrypted == 'ENCRYPTED':
+ hashed_password = 'md5{0}'.format(md5(to_bytes(password) + to_bytes(user)).hexdigest())
+ if hashed_password != current_role_attrs['rolpassword']:
+ pwchanging = True
+
+ return pwchanging
+
+
+def user_alter(db_connection, module, user, password, role_attr_flags, encrypted, expires, no_password_changes, conn_limit):
+ """Change user password and/or attributes. Return True if changed, False otherwise."""
+ changed = False
+
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+ # Note: role_attr_flags escaped by parse_role_attrs and encrypted is a
+ # literal
+ if user == 'PUBLIC':
+ if password is not None:
+ module.fail_json(msg="cannot change the password for PUBLIC user")
+ elif role_attr_flags != '':
+ module.fail_json(msg="cannot change the role_attr_flags for PUBLIC user")
+ else:
+ return False
+
+ # Handle passwords.
+ if not no_password_changes and (password is not None or role_attr_flags != '' or expires is not None or conn_limit is not None):
+ # Select password and all flag-like columns in order to verify changes.
+ try:
+ select = "SELECT * FROM pg_authid where rolname=%(user)s"
+ cursor.execute(select, {"user": user})
+ # Grab current role attributes.
+ current_role_attrs = cursor.fetchone()
+ except psycopg2.ProgrammingError:
+ current_role_attrs = None
+ db_connection.rollback()
+
+ pwchanging = user_should_we_change_password(current_role_attrs, user, password, encrypted)
+
+ if current_role_attrs is None:
+ try:
+ # AWS RDS instances does not allow user to access pg_authid
+ # so try to get current_role_attrs from pg_roles tables
+ select = "SELECT * FROM pg_roles where rolname=%(user)s"
+ cursor.execute(select, {"user": user})
+ # Grab current role attributes from pg_roles
+ current_role_attrs = cursor.fetchone()
+ except psycopg2.ProgrammingError as e:
+ db_connection.rollback()
+ module.fail_json(msg="Failed to get role details for current user %s: %s" % (user, e))
+
+ role_attr_flags_changing = False
+ if role_attr_flags:
+ role_attr_flags_dict = {}
+ for r in role_attr_flags.split(' '):
+ if r.startswith('NO'):
+ role_attr_flags_dict[r.replace('NO', '', 1)] = False
+ else:
+ role_attr_flags_dict[r] = True
+
+ for role_attr_name, role_attr_value in role_attr_flags_dict.items():
+ if current_role_attrs[PRIV_TO_AUTHID_COLUMN[role_attr_name]] != role_attr_value:
+ role_attr_flags_changing = True
+
+ if expires is not None:
+ cursor.execute("SELECT %s::timestamptz;", (expires,))
+ expires_with_tz = cursor.fetchone()[0]
+ expires_changing = expires_with_tz != current_role_attrs.get('rolvaliduntil')
+ else:
+ expires_changing = False
+
+ conn_limit_changing = (conn_limit is not None and conn_limit != current_role_attrs['rolconnlimit'])
+
+ if not pwchanging and not role_attr_flags_changing and not expires_changing and not conn_limit_changing:
+ return False
+
+ alter = ['ALTER USER "%(user)s"' % {"user": user}]
+ if pwchanging:
+ if password != '':
+ alter.append("WITH %(crypt)s" % {"crypt": encrypted})
+ alter.append("PASSWORD %(password)s")
+ else:
+ alter.append("WITH PASSWORD NULL")
+ alter.append(role_attr_flags)
+ elif role_attr_flags:
+ alter.append('WITH %s' % role_attr_flags)
+ if expires is not None:
+ alter.append("VALID UNTIL %(expires)s")
+ if conn_limit is not None:
+ alter.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit})
+
+ query_password_data = dict(password=password, expires=expires)
+ try:
+ cursor.execute(' '.join(alter), query_password_data)
+ changed = True
+ except psycopg2.InternalError as e:
+ if e.pgcode == '25006':
+ # Handle errors due to read-only transactions indicated by pgcode 25006
+ # ERROR: cannot execute ALTER ROLE in a read-only transaction
+ changed = False
+ module.fail_json(msg=e.pgerror, exception=traceback.format_exc())
+ return changed
+ else:
+ raise psycopg2.InternalError(e)
+ except psycopg2.NotSupportedError as e:
+ module.fail_json(msg=e.pgerror, exception=traceback.format_exc())
+
+ elif no_password_changes and role_attr_flags != '':
+ # Grab role information from pg_roles instead of pg_authid
+ select = "SELECT * FROM pg_roles where rolname=%(user)s"
+ cursor.execute(select, {"user": user})
+ # Grab current role attributes.
+ current_role_attrs = cursor.fetchone()
+
+ role_attr_flags_changing = False
+
+ if role_attr_flags:
+ role_attr_flags_dict = {}
+ for r in role_attr_flags.split(' '):
+ if r.startswith('NO'):
+ role_attr_flags_dict[r.replace('NO', '', 1)] = False
+ else:
+ role_attr_flags_dict[r] = True
+
+ for role_attr_name, role_attr_value in role_attr_flags_dict.items():
+ if current_role_attrs[PRIV_TO_AUTHID_COLUMN[role_attr_name]] != role_attr_value:
+ role_attr_flags_changing = True
+
+ if not role_attr_flags_changing:
+ return False
+
+ alter = ['ALTER USER "%(user)s"' %
+ {"user": user}]
+ if role_attr_flags:
+ alter.append('WITH %s' % role_attr_flags)
+
+ try:
+ cursor.execute(' '.join(alter))
+ except psycopg2.InternalError as e:
+ if e.pgcode == '25006':
+ # Handle errors due to read-only transactions indicated by pgcode 25006
+ # ERROR: cannot execute ALTER ROLE in a read-only transaction
+ changed = False
+ module.fail_json(msg=e.pgerror, exception=traceback.format_exc())
+ return changed
+ else:
+ raise psycopg2.InternalError(e)
+
+ # Grab new role attributes.
+ cursor.execute(select, {"user": user})
+ new_role_attrs = cursor.fetchone()
+
+ # Detect any differences between current_ and new_role_attrs.
+ changed = current_role_attrs != new_role_attrs
+
+ return changed
+
+
+def user_delete(cursor, user):
+ """Try to remove a user. Returns True if successful otherwise False"""
+ cursor.execute("SAVEPOINT ansible_pgsql_user_delete")
+ try:
+ query = 'DROP USER "%s"' % user
+ executed_queries.append(query)
+ cursor.execute(query)
+ except Exception:
+ cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_user_delete")
+ cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete")
+ return False
+
+ cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete")
+ return True
+
+
+def has_table_privileges(cursor, user, table, privs):
+ """
+ Return the difference between the privileges that a user already has and
+ the privileges that they desire to have.
+
+ :returns: tuple of:
+ * privileges that they have and were requested
+ * privileges they currently hold but were not requested
+ * privileges requested that they do not hold
+ """
+ cur_privs = get_table_privileges(cursor, user, table)
+ have_currently = cur_privs.intersection(privs)
+ other_current = cur_privs.difference(privs)
+ desired = privs.difference(cur_privs)
+ return (have_currently, other_current, desired)
+
+
+def get_table_privileges(cursor, user, table):
+ if '.' in table:
+ schema, table = table.split('.', 1)
+ else:
+ schema = 'public'
+ query = ("SELECT privilege_type FROM information_schema.role_table_grants "
+ "WHERE grantee=%(user)s AND table_name=%(table)s AND table_schema=%(schema)s")
+ cursor.execute(query, {'user': user, 'table': table, 'schema': schema})
+ return frozenset([x[0] for x in cursor.fetchall()])
+
+
+def grant_table_privileges(cursor, user, table, privs):
+ # Note: priv escaped by parse_privs
+ privs = ', '.join(privs)
+ query = 'GRANT %s ON TABLE %s TO "%s"' % (
+ privs, pg_quote_identifier(table, 'table'), user)
+ executed_queries.append(query)
+ cursor.execute(query)
+
+
+def revoke_table_privileges(cursor, user, table, privs):
+ # Note: priv escaped by parse_privs
+ privs = ', '.join(privs)
+ query = 'REVOKE %s ON TABLE %s FROM "%s"' % (
+ privs, pg_quote_identifier(table, 'table'), user)
+ executed_queries.append(query)
+ cursor.execute(query)
+
+
+def get_database_privileges(cursor, user, db):
+ priv_map = {
+ 'C': 'CREATE',
+ 'T': 'TEMPORARY',
+ 'c': 'CONNECT',
+ }
+ query = 'SELECT datacl FROM pg_database WHERE datname = %s'
+ cursor.execute(query, (db,))
+ datacl = cursor.fetchone()[0]
+ if datacl is None:
+ return set()
+ r = re.search(r'%s\\?"?=(C?T?c?)/[^,]+,?' % user, datacl)
+ if r is None:
+ return set()
+ o = set()
+ for v in r.group(1):
+ o.add(priv_map[v])
+ return normalize_privileges(o, 'database')
+
+
+def has_database_privileges(cursor, user, db, privs):
+ """
+ Return the difference between the privileges that a user already has and
+ the privileges that they desire to have.
+
+ :returns: tuple of:
+ * privileges that they have and were requested
+ * privileges they currently hold but were not requested
+ * privileges requested that they do not hold
+ """
+ cur_privs = get_database_privileges(cursor, user, db)
+ have_currently = cur_privs.intersection(privs)
+ other_current = cur_privs.difference(privs)
+ desired = privs.difference(cur_privs)
+ return (have_currently, other_current, desired)
+
+
+def grant_database_privileges(cursor, user, db, privs):
+ # Note: priv escaped by parse_privs
+ privs = ', '.join(privs)
+ if user == "PUBLIC":
+ query = 'GRANT %s ON DATABASE %s TO PUBLIC' % (
+ privs, pg_quote_identifier(db, 'database'))
+ else:
+ query = 'GRANT %s ON DATABASE %s TO "%s"' % (
+ privs, pg_quote_identifier(db, 'database'), user)
+
+ executed_queries.append(query)
+ cursor.execute(query)
+
+
+def revoke_database_privileges(cursor, user, db, privs):
+ # Note: priv escaped by parse_privs
+ privs = ', '.join(privs)
+ if user == "PUBLIC":
+ query = 'REVOKE %s ON DATABASE %s FROM PUBLIC' % (
+ privs, pg_quote_identifier(db, 'database'))
+ else:
+ query = 'REVOKE %s ON DATABASE %s FROM "%s"' % (
+ privs, pg_quote_identifier(db, 'database'), user)
+
+ executed_queries.append(query)
+ cursor.execute(query)
+
+
+def revoke_privileges(cursor, user, privs):
+ if privs is None:
+ return False
+
+ revoke_funcs = dict(table=revoke_table_privileges,
+ database=revoke_database_privileges)
+ check_funcs = dict(table=has_table_privileges,
+ database=has_database_privileges)
+
+ changed = False
+ for type_ in privs:
+ for name, privileges in iteritems(privs[type_]):
+ # Check that any of the privileges requested to be removed are
+ # currently granted to the user
+ differences = check_funcs[type_](cursor, user, name, privileges)
+ if differences[0]:
+ revoke_funcs[type_](cursor, user, name, privileges)
+ changed = True
+ return changed
+
+
+def grant_privileges(cursor, user, privs):
+ if privs is None:
+ return False
+
+ grant_funcs = dict(table=grant_table_privileges,
+ database=grant_database_privileges)
+ check_funcs = dict(table=has_table_privileges,
+ database=has_database_privileges)
+
+ changed = False
+ for type_ in privs:
+ for name, privileges in iteritems(privs[type_]):
+ # Check that any of the privileges requested for the user are
+ # currently missing
+ differences = check_funcs[type_](cursor, user, name, privileges)
+ if differences[2]:
+ grant_funcs[type_](cursor, user, name, privileges)
+ changed = True
+ return changed
+
+
+def parse_role_attrs(cursor, role_attr_flags):
+ """
+ Parse role attributes string for user creation.
+ Format:
+
+ attributes[,attributes,...]
+
+ Where:
+
+ attributes := CREATEDB,CREATEROLE,NOSUPERUSER,...
+ [ "[NO]SUPERUSER","[NO]CREATEROLE", "[NO]CREATEDB",
+ "[NO]INHERIT", "[NO]LOGIN", "[NO]REPLICATION",
+ "[NO]BYPASSRLS" ]
+
+ Note: "[NO]BYPASSRLS" role attribute introduced in 9.5
+ Note: "[NO]CREATEUSER" role attribute is deprecated.
+
+ """
+ flags = frozenset(role.upper() for role in role_attr_flags.split(',') if role)
+
+ valid_flags = frozenset(itertools.chain(FLAGS, get_valid_flags_by_version(cursor)))
+ valid_flags = frozenset(itertools.chain(valid_flags, ('NO%s' % flag for flag in valid_flags)))
+
+ if not flags.issubset(valid_flags):
+ raise InvalidFlagsError('Invalid role_attr_flags specified: %s' %
+ ' '.join(flags.difference(valid_flags)))
+
+ return ' '.join(flags)
+
+
+def normalize_privileges(privs, type_):
+ new_privs = set(privs)
+ if 'ALL' in new_privs:
+ new_privs.update(VALID_PRIVS[type_])
+ new_privs.remove('ALL')
+ if 'TEMP' in new_privs:
+ new_privs.add('TEMPORARY')
+ new_privs.remove('TEMP')
+
+ return new_privs
+
+
+def parse_privs(privs, db):
+ """
+ Parse privilege string to determine permissions for database db.
+ Format:
+
+ privileges[/privileges/...]
+
+ Where:
+
+ privileges := DATABASE_PRIVILEGES[,DATABASE_PRIVILEGES,...] |
+ TABLE_NAME:TABLE_PRIVILEGES[,TABLE_PRIVILEGES,...]
+ """
+ if privs is None:
+ return privs
+
+ o_privs = {
+ 'database': {},
+ 'table': {}
+ }
+ for token in privs.split('/'):
+ if ':' not in token:
+ type_ = 'database'
+ name = db
+ priv_set = frozenset(x.strip().upper()
+ for x in token.split(',') if x.strip())
+ else:
+ type_ = 'table'
+ name, privileges = token.split(':', 1)
+ priv_set = frozenset(x.strip().upper()
+ for x in privileges.split(',') if x.strip())
+
+ if not priv_set.issubset(VALID_PRIVS[type_]):
+ raise InvalidPrivsError('Invalid privs specified for %s: %s' %
+ (type_, ' '.join(priv_set.difference(VALID_PRIVS[type_]))))
+
+ priv_set = normalize_privileges(priv_set, type_)
+ o_privs[type_][name] = priv_set
+
+ return o_privs
+
+
+def get_valid_flags_by_version(cursor):
+ """
+ Some role attributes were introduced after certain versions. We want to
+ compile a list of valid flags against the current Postgres version.
+ """
+ current_version = cursor.connection.server_version
+
+ return [
+ flag
+ for flag, version_introduced in FLAGS_BY_VERSION.items()
+ if current_version >= version_introduced
+ ]
+
+
+def get_comment(cursor, user):
+ """Get user's comment."""
+ query = ("SELECT pg_catalog.shobj_description(r.oid, 'pg_authid') "
+ "FROM pg_catalog.pg_roles r "
+ "WHERE r.rolname = %(user)s")
+ cursor.execute(query, {'user': user})
+ return cursor.fetchone()[0]
+
+
+def add_comment(cursor, user, comment):
+ """Add comment on user."""
+ if comment != get_comment(cursor, user):
+ query = 'COMMENT ON ROLE "%s" IS ' % user
+ cursor.execute(query + '%(comment)s', {'comment': comment})
+ executed_queries.append(cursor.mogrify(query + '%(comment)s', {'comment': comment}))
+ return True
+ else:
+ return False
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ user=dict(type='str', required=True, aliases=['name']),
+ password=dict(type='str', default=None, no_log=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ priv=dict(type='str', default=None),
+ db=dict(type='str', default='', aliases=['login_db']),
+ fail_on_user=dict(type='bool', default=True, aliases=['fail_on_role']),
+ role_attr_flags=dict(type='str', default=''),
+ encrypted=dict(type='bool', default=True),
+ no_password_changes=dict(type='bool', default=False, no_log=False),
+ expires=dict(type='str', default=None),
+ conn_limit=dict(type='int', default=None),
+ session_role=dict(type='str'),
+ groups=dict(type='list', elements='str'),
+ comment=dict(type='str', default=None),
+ trust_input=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ user = module.params["user"]
+ password = module.params["password"]
+ state = module.params["state"]
+ fail_on_user = module.params["fail_on_user"]
+ if module.params['db'] == '' and module.params["priv"] is not None:
+ module.fail_json(msg="privileges require a database to be specified")
+ privs = parse_privs(module.params["priv"], module.params["db"])
+ no_password_changes = module.params["no_password_changes"]
+ if module.params["encrypted"]:
+ encrypted = "ENCRYPTED"
+ else:
+ encrypted = "UNENCRYPTED"
+ expires = module.params["expires"]
+ conn_limit = module.params["conn_limit"]
+ role_attr_flags = module.params["role_attr_flags"]
+ groups = module.params["groups"]
+ if groups:
+ groups = [e.strip() for e in groups]
+ comment = module.params["comment"]
+ session_role = module.params['session_role']
+
+ trust_input = module.params['trust_input']
+ if not trust_input:
+ # Check input for potentially dangerous elements:
+ check_input(module, user, password, privs, expires,
+ role_attr_flags, groups, comment, session_role)
+
+ conn_params = get_conn_params(module, module.params, warn_db_default=False)
+ db_connection = connect_to_db(module, conn_params)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ try:
+ role_attr_flags = parse_role_attrs(cursor, role_attr_flags)
+ except InvalidFlagsError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ kw = dict(user=user)
+ changed = False
+ user_removed = False
+
+ if state == "present":
+ if user_exists(cursor, user):
+ try:
+ changed = user_alter(db_connection, module, user, password,
+ role_attr_flags, encrypted, expires, no_password_changes, conn_limit)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ else:
+ try:
+ changed = user_add(cursor, user, password,
+ role_attr_flags, encrypted, expires, conn_limit)
+ except psycopg2.ProgrammingError as e:
+ module.fail_json(msg="Unable to add user with given requirement "
+ "due to : %s" % to_native(e),
+ exception=traceback.format_exc())
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ try:
+ changed = grant_privileges(cursor, user, privs) or changed
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ if groups:
+ target_roles = []
+ target_roles.append(user)
+ pg_membership = PgMembership(module, cursor, groups, target_roles)
+ changed = pg_membership.grant() or changed
+ executed_queries.extend(pg_membership.executed_queries)
+
+ if comment is not None:
+ try:
+ changed = add_comment(cursor, user, comment) or changed
+ except Exception as e:
+ module.fail_json(msg='Unable to add comment on role: %s' % to_native(e),
+ exception=traceback.format_exc())
+
+ else:
+ if user_exists(cursor, user):
+ if module.check_mode:
+ changed = True
+ kw['user_removed'] = True
+ else:
+ try:
+ changed = revoke_privileges(cursor, user, privs)
+ user_removed = user_delete(cursor, user)
+ except SQLParseError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ changed = changed or user_removed
+ if fail_on_user and not user_removed:
+ msg = "Unable to remove user"
+ module.fail_json(msg=msg)
+ kw['user_removed'] = user_removed
+
+ if changed:
+ if module.check_mode:
+ db_connection.rollback()
+ else:
+ db_connection.commit()
+
+ kw['changed'] = changed
+ kw['queries'] = executed_queries
+ module.exit_json(**kw)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_user_obj_stat_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_user_obj_stat_info.py
new file mode 100644
index 00000000..9d03408e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/postgresql_user_obj_stat_info.py
@@ -0,0 +1,335 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: postgresql_user_obj_stat_info
+short_description: Gather statistics about PostgreSQL user objects
+description:
+- Gathers statistics about PostgreSQL user objects.
+version_added: '0.2.0'
+options:
+ filter:
+ description:
+ - Limit the collected information by comma separated string or YAML list.
+ - Allowable values are C(functions), C(indexes), C(tables).
+ - By default, collects all subsets.
+ - Unsupported values are ignored.
+ type: list
+ elements: str
+ schema:
+ description:
+ - Restrict the output by certain schema.
+ type: str
+ db:
+ description:
+ - Name of database to connect.
+ type: str
+ aliases:
+ - login_db
+ session_role:
+ description:
+ - Switch to session_role after connecting. The specified session_role must
+ be a role that the current login_user is a member of.
+ - Permissions checking for SQL commands is carried out as though
+ the session_role were the one that had logged in originally.
+ type: str
+ trust_input:
+ description:
+ - If C(no), check the value of I(session_role) is potentially dangerous.
+ - It makes sense to use C(no) only when SQL injections via I(session_role) are possible.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+
+notes:
+- C(size) and C(total_size) returned values are presented in bytes.
+- For tracking function statistics the PostgreSQL C(track_functions) parameter must be enabled.
+ See U(https://www.postgresql.org/docs/current/runtime-config-statistics.html) for more information.
+seealso:
+- module: community.general.postgresql_info
+- module: community.general.postgresql_ping
+- name: PostgreSQL statistics collector reference
+ description: Complete reference of the PostgreSQL statistics collector documentation.
+ link: https://www.postgresql.org/docs/current/monitoring-stats.html
+author:
+- Andrew Klychkov (@Andersson007)
+- Thomas O'Donnell (@andytom)
+extends_documentation_fragment:
+- community.general.postgres
+
+'''
+
+EXAMPLES = r'''
+- name: Collect information about all supported user objects of the acme database
+ community.general.postgresql_user_obj_stat_info:
+ db: acme
+
+- name: Collect information about all supported user objects in the custom schema of the acme database
+ community.general.postgresql_user_obj_stat_info:
+ db: acme
+ schema: custom
+
+- name: Collect information about user tables and indexes in the acme database
+ community.general.postgresql_user_obj_stat_info:
+ db: acme
+ filter: tables, indexes
+'''
+
+RETURN = r'''
+indexes:
+ description: User index statistics
+ returned: always
+ type: dict
+ sample: {"public": {"test_id_idx": {"idx_scan": 0, "idx_tup_fetch": 0, "idx_tup_read": 0, "relname": "test", "size": 8192, ...}}}
+tables:
+ description: User table statistics.
+ returned: always
+ type: dict
+ sample: {"public": {"test": {"analyze_count": 3, "n_dead_tup": 0, "n_live_tup": 0, "seq_scan": 2, "size": 0, "total_size": 8192, ...}}}
+functions:
+ description: User function statistics.
+ returned: always
+ type: dict
+ sample: {"public": {"inc": {"calls": 1, "funcid": 26722, "self_time": 0.23, "total_time": 0.23}}}
+'''
+
+try:
+ from psycopg2.extras import DictCursor
+except ImportError:
+ # psycopg2 is checked by connect_to_db()
+ # from ansible.module_utils.postgres
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.database import (
+ check_input,
+)
+from ansible_collections.community.general.plugins.module_utils.postgres import (
+ connect_to_db,
+ exec_sql,
+ get_conn_params,
+ postgres_common_argument_spec,
+)
+from ansible.module_utils.six import iteritems
+
+
+# ===========================================
+# PostgreSQL module specific support methods.
+#
+
+
+class PgUserObjStatInfo():
+ """Class to collect information about PostgreSQL user objects.
+
+ Args:
+ module (AnsibleModule): Object of AnsibleModule class.
+ cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
+
+ Attributes:
+ module (AnsibleModule): Object of AnsibleModule class.
+ cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
+ executed_queries (list): List of executed queries.
+ info (dict): Statistics dictionary.
+ obj_func_mapping (dict): Mapping of object types to corresponding functions.
+ schema (str): Name of a schema to restrict stat collecting.
+ """
+
+ def __init__(self, module, cursor):
+ self.module = module
+ self.cursor = cursor
+ self.info = {
+ 'functions': {},
+ 'indexes': {},
+ 'tables': {},
+ }
+ self.obj_func_mapping = {
+ 'functions': self.get_func_stat,
+ 'indexes': self.get_idx_stat,
+ 'tables': self.get_tbl_stat,
+ }
+ self.schema = None
+
+ def collect(self, filter_=None, schema=None):
+ """Collect statistics information of user objects.
+
+ Kwargs:
+ filter_ (list): List of subsets which need to be collected.
+ schema (str): Restrict stat collecting by certain schema.
+
+ Returns:
+ ``self.info``.
+ """
+ if schema:
+ self.set_schema(schema)
+
+ if filter_:
+ for obj_type in filter_:
+ obj_type = obj_type.strip()
+ obj_func = self.obj_func_mapping.get(obj_type)
+
+ if obj_func is not None:
+ obj_func()
+ else:
+ self.module.warn("Unknown filter option '%s'" % obj_type)
+
+ else:
+ for obj_func in self.obj_func_mapping.values():
+ obj_func()
+
+ return self.info
+
+ def get_func_stat(self):
+ """Get function statistics and fill out self.info dictionary."""
+ query = "SELECT * FROM pg_stat_user_functions"
+ if self.schema:
+ query = "SELECT * FROM pg_stat_user_functions WHERE schemaname = %s"
+
+ result = exec_sql(self, query, query_params=(self.schema,),
+ add_to_executed=False)
+
+ if not result:
+ return
+
+ self.__fill_out_info(result,
+ info_key='functions',
+ schema_key='schemaname',
+ name_key='funcname')
+
+ def get_idx_stat(self):
+ """Get index statistics and fill out self.info dictionary."""
+ query = "SELECT * FROM pg_stat_user_indexes"
+ if self.schema:
+ query = "SELECT * FROM pg_stat_user_indexes WHERE schemaname = %s"
+
+ result = exec_sql(self, query, query_params=(self.schema,),
+ add_to_executed=False)
+
+ if not result:
+ return
+
+ self.__fill_out_info(result,
+ info_key='indexes',
+ schema_key='schemaname',
+ name_key='indexrelname')
+
+ def get_tbl_stat(self):
+ """Get table statistics and fill out self.info dictionary."""
+ query = "SELECT * FROM pg_stat_user_tables"
+ if self.schema:
+ query = "SELECT * FROM pg_stat_user_tables WHERE schemaname = %s"
+
+ result = exec_sql(self, query, query_params=(self.schema,),
+ add_to_executed=False)
+
+ if not result:
+ return
+
+ self.__fill_out_info(result,
+ info_key='tables',
+ schema_key='schemaname',
+ name_key='relname')
+
+ def __fill_out_info(self, result, info_key=None, schema_key=None, name_key=None):
+ # Convert result to list of dicts to handle it easier:
+ result = [dict(row) for row in result]
+
+ for elem in result:
+ # Add schema name as a key if not presented:
+ if not self.info[info_key].get(elem[schema_key]):
+ self.info[info_key][elem[schema_key]] = {}
+
+ # Add object name key as a subkey
+ # (they must be uniq over a schema, so no need additional checks):
+ self.info[info_key][elem[schema_key]][elem[name_key]] = {}
+
+ # Add other other attributes to a certain index:
+ for key, val in iteritems(elem):
+ if key not in (schema_key, name_key):
+ self.info[info_key][elem[schema_key]][elem[name_key]][key] = val
+
+ if info_key in ('tables', 'indexes'):
+ schemaname = elem[schema_key]
+ if self.schema:
+ schemaname = self.schema
+
+ relname = '%s.%s' % (schemaname, elem[name_key])
+
+ result = exec_sql(self, "SELECT pg_relation_size (%s)",
+ query_params=(relname,),
+ add_to_executed=False)
+
+ self.info[info_key][elem[schema_key]][elem[name_key]]['size'] = result[0][0]
+
+ if info_key == 'tables':
+ result = exec_sql(self, "SELECT pg_total_relation_size (%s)",
+ query_params=(relname,),
+ add_to_executed=False)
+
+ self.info[info_key][elem[schema_key]][elem[name_key]]['total_size'] = result[0][0]
+
+ def set_schema(self, schema):
+ """If schema exists, sets self.schema, otherwise fails."""
+ query = ("SELECT 1 FROM information_schema.schemata "
+ "WHERE schema_name = %s")
+ result = exec_sql(self, query, query_params=(schema,),
+ add_to_executed=False)
+
+ if result and result[0][0]:
+ self.schema = schema
+ else:
+ self.module.fail_json(msg="Schema '%s' does not exist" % (schema))
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+ argument_spec = postgres_common_argument_spec()
+ argument_spec.update(
+ db=dict(type='str', aliases=['login_db']),
+ filter=dict(type='list', elements='str'),
+ session_role=dict(type='str'),
+ schema=dict(type='str'),
+ trust_input=dict(type="bool", default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ filter_ = module.params["filter"]
+ schema = module.params["schema"]
+
+ if not module.params["trust_input"]:
+ check_input(module, module.params['session_role'])
+
+ # Connect to DB and make cursor object:
+ pg_conn_params = get_conn_params(module, module.params)
+ # We don't need to commit anything, so, set it to False:
+ db_connection = connect_to_db(module, pg_conn_params, autocommit=False)
+ cursor = db_connection.cursor(cursor_factory=DictCursor)
+
+ ############################
+ # Create object and do work:
+ pg_obj_info = PgUserObjStatInfo(module, cursor)
+
+ info_dict = pg_obj_info.collect(filter_, schema)
+
+ # Clean up:
+ cursor.close()
+ db_connection.close()
+
+ # Return information:
+ module.exit_json(**info_dict)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/profitbricks.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/profitbricks.py
new file mode 100644
index 00000000..90798672
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/profitbricks.py
@@ -0,0 +1,654 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: profitbricks
+short_description: Create, destroy, start, stop, and reboot a ProfitBricks virtual machine.
+description:
+ - Create, destroy, update, start, stop, and reboot a ProfitBricks virtual machine. When the virtual machine is created it can optionally wait
+ for it to be 'running' before returning. This module has a dependency on profitbricks >= 1.0.0
+options:
+ auto_increment:
+ description:
+ - Whether or not to increment a single number in the name for created virtual machines.
+ type: bool
+ default: 'yes'
+ name:
+ description:
+ - The name of the virtual machine.
+ type: str
+ image:
+ description:
+ - The system image ID for creating the virtual machine, e.g. a3eae284-a2fe-11e4-b187-5f1f641608c8.
+ type: str
+ image_password:
+ description:
+ - Password set for the administrative user.
+ type: str
+ ssh_keys:
+ description:
+ - Public SSH keys allowing access to the virtual machine.
+ type: list
+ datacenter:
+ description:
+ - The datacenter to provision this virtual machine.
+ type: str
+ cores:
+ description:
+ - The number of CPU cores to allocate to the virtual machine.
+ default: 2
+ type: int
+ ram:
+ description:
+ - The amount of memory to allocate to the virtual machine.
+ default: 2048
+ type: int
+ cpu_family:
+ description:
+ - The CPU family type to allocate to the virtual machine.
+ type: str
+ default: AMD_OPTERON
+ choices: [ "AMD_OPTERON", "INTEL_XEON" ]
+ volume_size:
+ description:
+ - The size in GB of the boot volume.
+ type: int
+ default: 10
+ bus:
+ description:
+ - The bus type for the volume.
+ type: str
+ default: VIRTIO
+ choices: [ "IDE", "VIRTIO"]
+ instance_ids:
+ description:
+ - list of instance ids, currently only used when state='absent' to remove instances.
+ type: list
+ count:
+ description:
+ - The number of virtual machines to create.
+ type: int
+ default: 1
+ location:
+ description:
+ - The datacenter location. Use only if you want to create the Datacenter or else this value is ignored.
+ type: str
+ default: us/las
+ choices: [ "us/las", "de/fra", "de/fkb" ]
+ assign_public_ip:
+ description:
+ - This will assign the machine to the public LAN. If no LAN exists with public Internet access it is created.
+ type: bool
+ default: 'no'
+ lan:
+ description:
+ - The ID of the LAN you wish to add the servers to.
+ type: int
+ default: 1
+ subscription_user:
+ description:
+ - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
+ type: str
+ subscription_password:
+ description:
+ - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
+ type: str
+ wait:
+ description:
+ - wait for the instance to be in state 'running' before returning
+ type: bool
+ default: 'yes'
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ remove_boot_volume:
+ description:
+ - remove the bootVolume of the virtual machine you're destroying.
+ type: bool
+ default: 'yes'
+ state:
+ description:
+ - create or terminate instances
+ - 'The choices available are: C(running), C(stopped), C(absent), C(present).'
+ type: str
+ default: 'present'
+ disk_type:
+ description:
+ - the type of disk to be allocated.
+ type: str
+ choices: [SSD, HDD]
+ default: HDD
+
+requirements:
+ - "profitbricks"
+ - "python >= 2.6"
+author: Matt Baldwin (@baldwinSPC) <baldwin@stackpointcloud.com>
+'''
+
+EXAMPLES = '''
+
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Provisioning example
+- name: Create three servers and enumerate their names
+ community.general.profitbricks:
+ datacenter: Tardis One
+ name: web%02d.stackpointcloud.com
+ cores: 4
+ ram: 2048
+ volume_size: 50
+ cpu_family: INTEL_XEON
+ image: a3eae284-a2fe-11e4-b187-5f1f641608c8
+ location: us/las
+ count: 3
+ assign_public_ip: true
+
+- name: Remove virtual machines
+ community.general.profitbricks:
+ datacenter: Tardis One
+ instance_ids:
+ - 'web001.stackpointcloud.com'
+ - 'web002.stackpointcloud.com'
+ - 'web003.stackpointcloud.com'
+ wait_timeout: 500
+ state: absent
+
+- name: Start virtual machines
+ community.general.profitbricks:
+ datacenter: Tardis One
+ instance_ids:
+ - 'web001.stackpointcloud.com'
+ - 'web002.stackpointcloud.com'
+ - 'web003.stackpointcloud.com'
+ wait_timeout: 500
+ state: running
+
+- name: Stop virtual machines
+ community.general.profitbricks:
+ datacenter: Tardis One
+ instance_ids:
+ - 'web001.stackpointcloud.com'
+ - 'web002.stackpointcloud.com'
+ - 'web003.stackpointcloud.com'
+ wait_timeout: 500
+ state: stopped
+'''
+
+import re
+import uuid
+import time
+import traceback
+
+HAS_PB_SDK = True
+
+try:
+ from profitbricks.client import ProfitBricksService, Volume, Server, Datacenter, NIC, LAN
+except ImportError:
+ HAS_PB_SDK = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import xrange
+from ansible.module_utils._text import to_native
+
+
+LOCATIONS = ['us/las',
+ 'de/fra',
+ 'de/fkb']
+
+uuid_match = re.compile(
+ r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
+
+
+def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
+ if not promise:
+ return
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(5)
+ operation_result = profitbricks.get_request(
+ request_id=promise['requestId'],
+ status=True)
+
+ if operation_result['metadata']['status'] == "DONE":
+ return
+ elif operation_result['metadata']['status'] == "FAILED":
+ raise Exception(
+ 'Request failed to complete ' + msg + ' "' + str(
+ promise['requestId']) + '" to complete.')
+
+ raise Exception(
+ 'Timed out waiting for async operation ' + msg + ' "' + str(
+ promise['requestId']
+ ) + '" to complete.')
+
+
+def _create_machine(module, profitbricks, datacenter, name):
+ cores = module.params.get('cores')
+ ram = module.params.get('ram')
+ cpu_family = module.params.get('cpu_family')
+ volume_size = module.params.get('volume_size')
+ disk_type = module.params.get('disk_type')
+ image_password = module.params.get('image_password')
+ ssh_keys = module.params.get('ssh_keys')
+ bus = module.params.get('bus')
+ lan = module.params.get('lan')
+ assign_public_ip = module.params.get('assign_public_ip')
+ subscription_user = module.params.get('subscription_user')
+ subscription_password = module.params.get('subscription_password')
+ location = module.params.get('location')
+ image = module.params.get('image')
+ assign_public_ip = module.boolean(module.params.get('assign_public_ip'))
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ if assign_public_ip:
+ public_found = False
+
+ lans = profitbricks.list_lans(datacenter)
+ for lan in lans['items']:
+ if lan['properties']['public']:
+ public_found = True
+ lan = lan['id']
+
+ if not public_found:
+ i = LAN(
+ name='public',
+ public=True)
+
+ lan_response = profitbricks.create_lan(datacenter, i)
+ _wait_for_completion(profitbricks, lan_response,
+ wait_timeout, "_create_machine")
+ lan = lan_response['id']
+
+ v = Volume(
+ name=str(uuid.uuid4()).replace('-', '')[:10],
+ size=volume_size,
+ image=image,
+ image_password=image_password,
+ ssh_keys=ssh_keys,
+ disk_type=disk_type,
+ bus=bus)
+
+ n = NIC(
+ lan=int(lan)
+ )
+
+ s = Server(
+ name=name,
+ ram=ram,
+ cores=cores,
+ cpu_family=cpu_family,
+ create_volumes=[v],
+ nics=[n],
+ )
+
+ try:
+ create_server_response = profitbricks.create_server(
+ datacenter_id=datacenter, server=s)
+
+ _wait_for_completion(profitbricks, create_server_response,
+ wait_timeout, "create_virtual_machine")
+
+ server_response = profitbricks.get_server(
+ datacenter_id=datacenter,
+ server_id=create_server_response['id'],
+ depth=3
+ )
+ except Exception as e:
+ module.fail_json(msg="failed to create the new server: %s" % str(e))
+ else:
+ return server_response
+
+
+def _startstop_machine(module, profitbricks, datacenter_id, server_id):
+ state = module.params.get('state')
+
+ try:
+ if state == 'running':
+ profitbricks.start_server(datacenter_id, server_id)
+ else:
+ profitbricks.stop_server(datacenter_id, server_id)
+
+ return True
+ except Exception as e:
+ module.fail_json(msg="failed to start or stop the virtual machine %s at %s: %s" % (server_id, datacenter_id, str(e)))
+
+
+def _create_datacenter(module, profitbricks):
+ datacenter = module.params.get('datacenter')
+ location = module.params.get('location')
+ wait_timeout = module.params.get('wait_timeout')
+
+ i = Datacenter(
+ name=datacenter,
+ location=location
+ )
+
+ try:
+ datacenter_response = profitbricks.create_datacenter(datacenter=i)
+
+ _wait_for_completion(profitbricks, datacenter_response,
+ wait_timeout, "_create_datacenter")
+
+ return datacenter_response
+ except Exception as e:
+ module.fail_json(msg="failed to create the new server(s): %s" % str(e))
+
+
+def create_virtual_machine(module, profitbricks):
+ """
+ Create new virtual machine
+
+ module : AnsibleModule object
+ community.general.profitbricks: authenticated profitbricks object
+
+ Returns:
+ True if a new virtual machine was created, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ name = module.params.get('name')
+ auto_increment = module.params.get('auto_increment')
+ count = module.params.get('count')
+ lan = module.params.get('lan')
+ wait_timeout = module.params.get('wait_timeout')
+ failed = True
+ datacenter_found = False
+
+ virtual_machines = []
+ virtual_machine_ids = []
+
+ # Locate UUID for datacenter if referenced by name.
+ datacenter_list = profitbricks.list_datacenters()
+ datacenter_id = _get_datacenter_id(datacenter_list, datacenter)
+ if datacenter_id:
+ datacenter_found = True
+
+ if not datacenter_found:
+ datacenter_response = _create_datacenter(module, profitbricks)
+ datacenter_id = datacenter_response['id']
+
+ _wait_for_completion(profitbricks, datacenter_response,
+ wait_timeout, "create_virtual_machine")
+
+ if auto_increment:
+ numbers = set()
+ count_offset = 1
+
+ try:
+ name % 0
+ except TypeError as e:
+ if e.message.startswith('not all'):
+ name = '%s%%d' % name
+ else:
+ module.fail_json(msg=e.message, exception=traceback.format_exc())
+
+ number_range = xrange(count_offset, count_offset + count + len(numbers))
+ available_numbers = list(set(number_range).difference(numbers))
+ names = []
+ numbers_to_use = available_numbers[:count]
+ for number in numbers_to_use:
+ names.append(name % number)
+ else:
+ names = [name]
+
+ # Prefetch a list of servers for later comparison.
+ server_list = profitbricks.list_servers(datacenter_id)
+ for name in names:
+ # Skip server creation if the server already exists.
+ if _get_server_id(server_list, name):
+ continue
+
+ create_response = _create_machine(module, profitbricks, str(datacenter_id), name)
+ nics = profitbricks.list_nics(datacenter_id, create_response['id'])
+ for n in nics['items']:
+ if lan == n['properties']['lan']:
+ create_response.update({'public_ip': n['properties']['ips'][0]})
+
+ virtual_machines.append(create_response)
+
+ failed = False
+
+ results = {
+ 'failed': failed,
+ 'machines': virtual_machines,
+ 'action': 'create',
+ 'instance_ids': {
+ 'instances': [i['id'] for i in virtual_machines],
+ }
+ }
+
+ return results
+
+
+def remove_virtual_machine(module, profitbricks):
+ """
+ Removes a virtual machine.
+
+ This will remove the virtual machine along with the bootVolume.
+
+ module : AnsibleModule object
+ community.general.profitbricks: authenticated profitbricks object.
+
+ Not yet supported: handle deletion of attached data disks.
+
+ Returns:
+ True if a new virtual server was deleted, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ instance_ids = module.params.get('instance_ids')
+ remove_boot_volume = module.params.get('remove_boot_volume')
+ changed = False
+
+ if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1:
+ module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting')
+
+ # Locate UUID for datacenter if referenced by name.
+ datacenter_list = profitbricks.list_datacenters()
+ datacenter_id = _get_datacenter_id(datacenter_list, datacenter)
+ if not datacenter_id:
+ module.fail_json(msg='Virtual data center \'%s\' not found.' % str(datacenter))
+
+ # Prefetch server list for later comparison.
+ server_list = profitbricks.list_servers(datacenter_id)
+ for instance in instance_ids:
+ # Locate UUID for server if referenced by name.
+ server_id = _get_server_id(server_list, instance)
+ if server_id:
+ # Remove the server's boot volume
+ if remove_boot_volume:
+ _remove_boot_volume(module, profitbricks, datacenter_id, server_id)
+
+ # Remove the server
+ try:
+ server_response = profitbricks.delete_server(datacenter_id, server_id)
+ except Exception as e:
+ module.fail_json(msg="failed to terminate the virtual server: %s" % to_native(e), exception=traceback.format_exc())
+ else:
+ changed = True
+
+ return changed
+
+
+def _remove_boot_volume(module, profitbricks, datacenter_id, server_id):
+ """
+ Remove the boot volume from the server
+ """
+ try:
+ server = profitbricks.get_server(datacenter_id, server_id)
+ volume_id = server['properties']['bootVolume']['id']
+ volume_response = profitbricks.delete_volume(datacenter_id, volume_id)
+ except Exception as e:
+ module.fail_json(msg="failed to remove the server's boot volume: %s" % to_native(e), exception=traceback.format_exc())
+
+
+def startstop_machine(module, profitbricks, state):
+ """
+ Starts or Stops a virtual machine.
+
+ module : AnsibleModule object
+ community.general.profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True when the servers process the action successfully, false otherwise.
+ """
+ if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1:
+ module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting')
+
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ changed = False
+
+ datacenter = module.params.get('datacenter')
+ instance_ids = module.params.get('instance_ids')
+
+ # Locate UUID for datacenter if referenced by name.
+ datacenter_list = profitbricks.list_datacenters()
+ datacenter_id = _get_datacenter_id(datacenter_list, datacenter)
+ if not datacenter_id:
+ module.fail_json(msg='Virtual data center \'%s\' not found.' % str(datacenter))
+
+ # Prefetch server list for later comparison.
+ server_list = profitbricks.list_servers(datacenter_id)
+ for instance in instance_ids:
+ # Locate UUID of server if referenced by name.
+ server_id = _get_server_id(server_list, instance)
+ if server_id:
+ _startstop_machine(module, profitbricks, datacenter_id, server_id)
+ changed = True
+
+ if wait:
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ matched_instances = []
+ for res in profitbricks.list_servers(datacenter_id)['items']:
+ if state == 'running':
+ if res['properties']['vmState'].lower() == state:
+ matched_instances.append(res)
+ elif state == 'stopped':
+ if res['properties']['vmState'].lower() == 'shutoff':
+ matched_instances.append(res)
+
+ if len(matched_instances) < len(instance_ids):
+ time.sleep(5)
+ else:
+ break
+
+ if wait_timeout <= time.time():
+ # waiting took too long
+ module.fail_json(msg="wait for virtual machine state timeout on %s" % time.asctime())
+
+ return (changed)
+
+
+def _get_datacenter_id(datacenters, identity):
+ """
+ Fetch and return datacenter UUID by datacenter name if found.
+ """
+ for datacenter in datacenters['items']:
+ if identity in (datacenter['properties']['name'], datacenter['id']):
+ return datacenter['id']
+ return None
+
+
+def _get_server_id(servers, identity):
+ """
+ Fetch and return server UUID by server name if found.
+ """
+ for server in servers['items']:
+ if identity in (server['properties']['name'], server['id']):
+ return server['id']
+ return None
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ datacenter=dict(),
+ name=dict(),
+ image=dict(),
+ cores=dict(type='int', default=2),
+ ram=dict(type='int', default=2048),
+ cpu_family=dict(choices=['AMD_OPTERON', 'INTEL_XEON'],
+ default='AMD_OPTERON'),
+ volume_size=dict(type='int', default=10),
+ disk_type=dict(choices=['HDD', 'SSD'], default='HDD'),
+ image_password=dict(default=None, no_log=True),
+ ssh_keys=dict(type='list', default=[]),
+ bus=dict(choices=['VIRTIO', 'IDE'], default='VIRTIO'),
+ lan=dict(type='int', default=1),
+ count=dict(type='int', default=1),
+ auto_increment=dict(type='bool', default=True),
+ instance_ids=dict(type='list', default=[]),
+ subscription_user=dict(),
+ subscription_password=dict(no_log=True),
+ location=dict(choices=LOCATIONS, default='us/las'),
+ assign_public_ip=dict(type='bool', default=False),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ remove_boot_volume=dict(type='bool', default=True),
+ state=dict(default='present'),
+ )
+ )
+
+ if not HAS_PB_SDK:
+ module.fail_json(msg='profitbricks required for this module')
+
+ subscription_user = module.params.get('subscription_user')
+ subscription_password = module.params.get('subscription_password')
+
+ profitbricks = ProfitBricksService(
+ username=subscription_user,
+ password=subscription_password)
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('datacenter'):
+ module.fail_json(msg='datacenter parameter is required ' +
+ 'for running or stopping machines.')
+
+ try:
+ (changed) = remove_virtual_machine(module, profitbricks)
+ module.exit_json(changed=changed)
+ except Exception as e:
+ module.fail_json(msg='failed to set instance state: %s' % to_native(e), exception=traceback.format_exc())
+
+ elif state in ('running', 'stopped'):
+ if not module.params.get('datacenter'):
+ module.fail_json(msg='datacenter parameter is required for ' +
+ 'running or stopping machines.')
+ try:
+ (changed) = startstop_machine(module, profitbricks, state)
+ module.exit_json(changed=changed)
+ except Exception as e:
+ module.fail_json(msg='failed to set instance state: %s' % to_native(e), exception=traceback.format_exc())
+
+ elif state == 'present':
+ if not module.params.get('name'):
+ module.fail_json(msg='name parameter is required for new instance')
+ if not module.params.get('image'):
+ module.fail_json(msg='image parameter is required for new instance')
+ if not module.params.get('subscription_user'):
+ module.fail_json(msg='subscription_user parameter is ' +
+ 'required for new instance')
+ if not module.params.get('subscription_password'):
+ module.fail_json(msg='subscription_password parameter is ' +
+ 'required for new instance')
+
+ try:
+ (machine_dict_array) = create_virtual_machine(module, profitbricks)
+ module.exit_json(**machine_dict_array)
+ except Exception as e:
+ module.fail_json(msg='failed to set instance state: %s' % to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/profitbricks_datacenter.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/profitbricks_datacenter.py
new file mode 100644
index 00000000..e3ba1d49
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/profitbricks_datacenter.py
@@ -0,0 +1,257 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: profitbricks_datacenter
+short_description: Create or destroy a ProfitBricks Virtual Datacenter.
+description:
+ - This is a simple module that supports creating or removing vDCs. A vDC is required before you can create servers. This module has a dependency
+ on profitbricks >= 1.0.0
+options:
+ name:
+ description:
+ - The name of the virtual datacenter.
+ type: str
+ description:
+ description:
+ - The description of the virtual datacenter.
+ type: str
+ required: false
+ location:
+ description:
+ - The datacenter location.
+ type: str
+ required: false
+ default: us/las
+ choices: [ "us/las", "de/fra", "de/fkb" ]
+ subscription_user:
+ description:
+ - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
+ type: str
+ required: false
+ subscription_password:
+ description:
+ - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
+ type: str
+ required: false
+ wait:
+ description:
+ - wait for the datacenter to be created before returning
+ required: false
+ default: "yes"
+ type: bool
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ state:
+ description:
+ - Create or terminate datacenters.
+ - "The available choices are: C(present), C(absent)."
+ type: str
+ required: false
+ default: 'present'
+
+requirements: [ "profitbricks" ]
+author: Matt Baldwin (@baldwinSPC) <baldwin@stackpointcloud.com>
+'''
+
+EXAMPLES = '''
+- name: Create a datacenter
+ community.general.profitbricks_datacenter:
+ datacenter: Tardis One
+ wait_timeout: 500
+
+- name: Destroy a datacenter (remove all servers, volumes, and other objects in the datacenter)
+ community.general.profitbricks_datacenter:
+ datacenter: Tardis One
+ wait_timeout: 500
+ state: absent
+'''
+
+import re
+import time
+
+HAS_PB_SDK = True
+try:
+ from profitbricks.client import ProfitBricksService, Datacenter
+except ImportError:
+ HAS_PB_SDK = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+LOCATIONS = ['us/las',
+ 'de/fra',
+ 'de/fkb']
+
+uuid_match = re.compile(
+ r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
+
+
+def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
+ if not promise:
+ return
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(5)
+ operation_result = profitbricks.get_request(
+ request_id=promise['requestId'],
+ status=True)
+
+ if operation_result['metadata']['status'] == "DONE":
+ return
+ elif operation_result['metadata']['status'] == "FAILED":
+ raise Exception(
+ 'Request failed to complete ' + msg + ' "' + str(
+ promise['requestId']) + '" to complete.')
+
+ raise Exception(
+ 'Timed out waiting for async operation ' + msg + ' "' + str(
+ promise['requestId']
+ ) + '" to complete.')
+
+
+def _remove_datacenter(module, profitbricks, datacenter):
+ try:
+ profitbricks.delete_datacenter(datacenter)
+ except Exception as e:
+ module.fail_json(msg="failed to remove the datacenter: %s" % str(e))
+
+
+def create_datacenter(module, profitbricks):
+ """
+ Creates a Datacenter
+
+ This will create a new Datacenter in the specified location.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if a new datacenter was created, false otherwise
+ """
+ name = module.params.get('name')
+ location = module.params.get('location')
+ description = module.params.get('description')
+ wait = module.params.get('wait')
+ wait_timeout = int(module.params.get('wait_timeout'))
+
+ i = Datacenter(
+ name=name,
+ location=location,
+ description=description
+ )
+
+ try:
+ datacenter_response = profitbricks.create_datacenter(datacenter=i)
+
+ if wait:
+ _wait_for_completion(profitbricks, datacenter_response,
+ wait_timeout, "_create_datacenter")
+
+ results = {
+ 'datacenter_id': datacenter_response['id']
+ }
+
+ return results
+
+ except Exception as e:
+ module.fail_json(msg="failed to create the new datacenter: %s" % str(e))
+
+
+def remove_datacenter(module, profitbricks):
+ """
+ Removes a Datacenter.
+
+ This will remove a datacenter.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the datacenter was deleted, false otherwise
+ """
+ name = module.params.get('name')
+ changed = False
+
+ if(uuid_match.match(name)):
+ _remove_datacenter(module, profitbricks, name)
+ changed = True
+ else:
+ datacenters = profitbricks.list_datacenters()
+
+ for d in datacenters['items']:
+ vdc = profitbricks.get_datacenter(d['id'])
+
+ if name == vdc['properties']['name']:
+ name = d['id']
+ _remove_datacenter(module, profitbricks, name)
+ changed = True
+
+ return changed
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(),
+ description=dict(),
+ location=dict(choices=LOCATIONS, default='us/las'),
+ subscription_user=dict(),
+ subscription_password=dict(no_log=True),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(default=600, type='int'),
+ state=dict(default='present'), # @TODO add choices
+ )
+ )
+ if not HAS_PB_SDK:
+ module.fail_json(msg='profitbricks required for this module')
+
+ if not module.params.get('subscription_user'):
+ module.fail_json(msg='subscription_user parameter is required')
+ if not module.params.get('subscription_password'):
+ module.fail_json(msg='subscription_password parameter is required')
+
+ subscription_user = module.params.get('subscription_user')
+ subscription_password = module.params.get('subscription_password')
+
+ profitbricks = ProfitBricksService(
+ username=subscription_user,
+ password=subscription_password)
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('name'):
+ module.fail_json(msg='name parameter is required deleting a virtual datacenter.')
+
+ try:
+ (changed) = remove_datacenter(module, profitbricks)
+ module.exit_json(
+ changed=changed)
+ except Exception as e:
+ module.fail_json(msg='failed to set datacenter state: %s' % str(e))
+
+ elif state == 'present':
+ if not module.params.get('name'):
+ module.fail_json(msg='name parameter is required for a new datacenter')
+ if not module.params.get('location'):
+ module.fail_json(msg='location parameter is required for a new datacenter')
+
+ try:
+ (datacenter_dict_array) = create_datacenter(module, profitbricks)
+ module.exit_json(**datacenter_dict_array)
+ except Exception as e:
+ module.fail_json(msg='failed to set datacenter state: %s' % str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/profitbricks_nic.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/profitbricks_nic.py
new file mode 100644
index 00000000..49941241
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/profitbricks_nic.py
@@ -0,0 +1,288 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: profitbricks_nic
+short_description: Create or Remove a NIC.
+description:
+ - This module allows you to create or restore a volume snapshot. This module has a dependency on profitbricks >= 1.0.0
+options:
+ datacenter:
+ description:
+ - The datacenter in which to operate.
+ type: str
+ required: true
+ server:
+ description:
+ - The server name or ID.
+ type: str
+ required: true
+ name:
+ description:
+ - The name or ID of the NIC. This is only required on deletes, but not on create.
+ - If not specified, it defaults to a value based on UUID4.
+ type: str
+ lan:
+ description:
+ - The LAN to place the NIC on. You can pass a LAN that doesn't exist and it will be created. Required on create.
+ type: str
+ subscription_user:
+ description:
+ - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
+ type: str
+ required: true
+ subscription_password:
+ description:
+ - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
+ type: str
+ required: true
+ wait:
+ description:
+ - wait for the operation to complete before returning
+ required: false
+ default: "yes"
+ type: bool
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ state:
+ description:
+ - Indicate desired state of the resource
+ - "The available choices are: C(present), C(absent)."
+ type: str
+ required: false
+ default: 'present'
+
+requirements: [ "profitbricks" ]
+author: Matt Baldwin (@baldwinSPC) <baldwin@stackpointcloud.com>
+'''
+
+EXAMPLES = '''
+- name: Create a NIC
+ community.general.profitbricks_nic:
+ datacenter: Tardis One
+ server: node002
+ lan: 2
+ wait_timeout: 500
+ state: present
+
+- name: Remove a NIC
+ community.general.profitbricks_nic:
+ datacenter: Tardis One
+ server: node002
+ name: 7341c2454f
+ wait_timeout: 500
+ state: absent
+'''
+
+import re
+import uuid
+import time
+
+HAS_PB_SDK = True
+try:
+ from profitbricks.client import ProfitBricksService, NIC
+except ImportError:
+ HAS_PB_SDK = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+uuid_match = re.compile(
+ r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
+
+
+def _make_default_name():
+ return str(uuid.uuid4()).replace('-', '')[:10]
+
+
+def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
+ if not promise:
+ return
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(5)
+ operation_result = profitbricks.get_request(
+ request_id=promise['requestId'],
+ status=True)
+
+ if operation_result['metadata']['status'] == "DONE":
+ return
+ elif operation_result['metadata']['status'] == "FAILED":
+ raise Exception(
+ 'Request failed to complete ' + msg + ' "' + str(
+ promise['requestId']) + '" to complete.')
+
+ raise Exception(
+ 'Timed out waiting for async operation ' + msg + ' "' + str(
+ promise['requestId']
+ ) + '" to complete.')
+
+
+def create_nic(module, profitbricks):
+ """
+ Creates a NIC.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the nic creates, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ server = module.params.get('server')
+ lan = module.params.get('lan')
+ name = module.params.get('name')
+ if name is None:
+ name = _make_default_name()
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ # Locate UUID for Datacenter
+ if not (uuid_match.match(datacenter)):
+ datacenter_list = profitbricks.list_datacenters()
+ for d in datacenter_list['items']:
+ dc = profitbricks.get_datacenter(d['id'])
+ if datacenter == dc['properties']['name']:
+ datacenter = d['id']
+ break
+
+ # Locate UUID for Server
+ if not (uuid_match.match(server)):
+ server_list = profitbricks.list_servers(datacenter)
+ for s in server_list['items']:
+ if server == s['properties']['name']:
+ server = s['id']
+ break
+ try:
+ n = NIC(
+ name=name,
+ lan=lan
+ )
+
+ nic_response = profitbricks.create_nic(datacenter, server, n)
+
+ if wait:
+ _wait_for_completion(profitbricks, nic_response,
+ wait_timeout, "create_nic")
+
+ return nic_response
+
+ except Exception as e:
+ module.fail_json(msg="failed to create the NIC: %s" % str(e))
+
+
+def delete_nic(module, profitbricks):
+ """
+ Removes a NIC
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the NIC was removed, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ server = module.params.get('server')
+ name = module.params.get('name')
+ if name is None:
+ name = _make_default_name()
+
+ # Locate UUID for Datacenter
+ if not (uuid_match.match(datacenter)):
+ datacenter_list = profitbricks.list_datacenters()
+ for d in datacenter_list['items']:
+ dc = profitbricks.get_datacenter(d['id'])
+ if datacenter == dc['properties']['name']:
+ datacenter = d['id']
+ break
+
+ # Locate UUID for Server
+ server_found = False
+ if not (uuid_match.match(server)):
+ server_list = profitbricks.list_servers(datacenter)
+ for s in server_list['items']:
+ if server == s['properties']['name']:
+ server_found = True
+ server = s['id']
+ break
+
+ if not server_found:
+ return False
+
+ # Locate UUID for NIC
+ nic_found = False
+ if not (uuid_match.match(name)):
+ nic_list = profitbricks.list_nics(datacenter, server)
+ for n in nic_list['items']:
+ if name == n['properties']['name']:
+ nic_found = True
+ name = n['id']
+ break
+
+ if not nic_found:
+ return False
+
+ try:
+ nic_response = profitbricks.delete_nic(datacenter, server, name)
+ return nic_response
+ except Exception as e:
+ module.fail_json(msg="failed to remove the NIC: %s" % str(e))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ datacenter=dict(required=True),
+ server=dict(required=True),
+ name=dict(),
+ lan=dict(),
+ subscription_user=dict(required=True),
+ subscription_password=dict(required=True, no_log=True),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ state=dict(default='present'),
+ ),
+ required_if=(
+ ('state', 'absent', ['name']),
+ ('state', 'present', ['lan']),
+ )
+ )
+
+ if not HAS_PB_SDK:
+ module.fail_json(msg='profitbricks required for this module')
+
+ subscription_user = module.params.get('subscription_user')
+ subscription_password = module.params.get('subscription_password')
+
+ profitbricks = ProfitBricksService(
+ username=subscription_user,
+ password=subscription_password)
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ try:
+ (changed) = delete_nic(module, profitbricks)
+ module.exit_json(changed=changed)
+ except Exception as e:
+ module.fail_json(msg='failed to set nic state: %s' % str(e))
+
+ elif state == 'present':
+ try:
+ (nic_dict) = create_nic(module, profitbricks)
+ module.exit_json(nics=nic_dict) # @FIXME changed not calculated?
+ except Exception as e:
+ module.fail_json(msg='failed to set nic state: %s' % str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/profitbricks_volume.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/profitbricks_volume.py
new file mode 100644
index 00000000..a63cbcdd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/profitbricks_volume.py
@@ -0,0 +1,425 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: profitbricks_volume
+short_description: Create or destroy a volume.
+description:
+ - Allows you to create or remove a volume from a ProfitBricks datacenter. This module has a dependency on profitbricks >= 1.0.0
+options:
+ datacenter:
+ description:
+ - The datacenter in which to create the volumes.
+ type: str
+ name:
+ description:
+ - The name of the volumes. You can enumerate the names using auto_increment.
+ type: str
+ size:
+ description:
+ - The size of the volume.
+ type: int
+ required: false
+ default: 10
+ bus:
+ description:
+ - The bus type.
+ type: str
+ required: false
+ default: VIRTIO
+ choices: [ "IDE", "VIRTIO"]
+ image:
+ description:
+ - The system image ID for the volume, e.g. a3eae284-a2fe-11e4-b187-5f1f641608c8. This can also be a snapshot image ID.
+ type: str
+ image_password:
+ description:
+ - Password set for the administrative user.
+ type: str
+ required: false
+ ssh_keys:
+ description:
+ - Public SSH keys allowing access to the virtual machine.
+ type: list
+ required: false
+ disk_type:
+ description:
+ - The disk type of the volume.
+ type: str
+ required: false
+ default: HDD
+ choices: [ "HDD", "SSD" ]
+ licence_type:
+ description:
+ - The licence type for the volume. This is used when the image is non-standard.
+ - "The available choices are: C(LINUX), C(WINDOWS), C(UNKNOWN), C(OTHER)."
+ type: str
+ required: false
+ default: UNKNOWN
+ count:
+ description:
+ - The number of volumes you wish to create.
+ type: int
+ required: false
+ default: 1
+ auto_increment:
+ description:
+ - Whether or not to increment a single number in the name for created virtual machines.
+ default: yes
+ type: bool
+ instance_ids:
+ description:
+ - list of instance ids, currently only used when state='absent' to remove instances.
+ type: list
+ required: false
+ subscription_user:
+ description:
+ - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
+ type: str
+ required: false
+ subscription_password:
+ description:
+ - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
+ type: str
+ required: false
+ wait:
+ description:
+ - wait for the datacenter to be created before returning
+ required: false
+ default: "yes"
+ type: bool
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ state:
+ description:
+ - create or terminate datacenters
+ - "The available choices are: C(present), C(absent)."
+ type: str
+ required: false
+ default: 'present'
+
+requirements: [ "profitbricks" ]
+author: Matt Baldwin (@baldwinSPC) <baldwin@stackpointcloud.com>
+'''
+
+EXAMPLES = '''
+- name: Create multiple volumes
+ community.general.profitbricks_volume:
+ datacenter: Tardis One
+ name: vol%02d
+ count: 5
+ auto_increment: yes
+ wait_timeout: 500
+ state: present
+
+- name: Remove Volumes
+ community.general.profitbricks_volume:
+ datacenter: Tardis One
+ instance_ids:
+ - 'vol01'
+ - 'vol02'
+ wait_timeout: 500
+ state: absent
+'''
+
+import re
+import time
+import traceback
+
+HAS_PB_SDK = True
+try:
+ from profitbricks.client import ProfitBricksService, Volume
+except ImportError:
+ HAS_PB_SDK = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import xrange
+from ansible.module_utils._text import to_native
+
+
+uuid_match = re.compile(
+ r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
+
+
+def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
+ if not promise:
+ return
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(5)
+ operation_result = profitbricks.get_request(
+ request_id=promise['requestId'],
+ status=True)
+
+ if operation_result['metadata']['status'] == "DONE":
+ return
+ elif operation_result['metadata']['status'] == "FAILED":
+ raise Exception(
+ 'Request failed to complete ' + msg + ' "' + str(
+ promise['requestId']) + '" to complete.')
+
+ raise Exception(
+ 'Timed out waiting for async operation ' + msg + ' "' + str(
+ promise['requestId']
+ ) + '" to complete.')
+
+
+def _create_volume(module, profitbricks, datacenter, name):
+ size = module.params.get('size')
+ bus = module.params.get('bus')
+ image = module.params.get('image')
+ image_password = module.params.get('image_password')
+ ssh_keys = module.params.get('ssh_keys')
+ disk_type = module.params.get('disk_type')
+ licence_type = module.params.get('licence_type')
+ wait_timeout = module.params.get('wait_timeout')
+ wait = module.params.get('wait')
+
+ try:
+ v = Volume(
+ name=name,
+ size=size,
+ bus=bus,
+ image=image,
+ image_password=image_password,
+ ssh_keys=ssh_keys,
+ disk_type=disk_type,
+ licence_type=licence_type
+ )
+
+ volume_response = profitbricks.create_volume(datacenter, v)
+
+ if wait:
+ _wait_for_completion(profitbricks, volume_response,
+ wait_timeout, "_create_volume")
+
+ except Exception as e:
+ module.fail_json(msg="failed to create the volume: %s" % str(e))
+
+ return volume_response
+
+
+def _delete_volume(module, profitbricks, datacenter, volume):
+ try:
+ profitbricks.delete_volume(datacenter, volume)
+ except Exception as e:
+ module.fail_json(msg="failed to remove the volume: %s" % str(e))
+
+
+def create_volume(module, profitbricks):
+ """
+ Creates a volume.
+
+ This will create a volume in a datacenter.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the volume was created, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ name = module.params.get('name')
+ auto_increment = module.params.get('auto_increment')
+ count = module.params.get('count')
+
+ datacenter_found = False
+ failed = True
+ volumes = []
+
+ # Locate UUID for Datacenter
+ if not (uuid_match.match(datacenter)):
+ datacenter_list = profitbricks.list_datacenters()
+ for d in datacenter_list['items']:
+ dc = profitbricks.get_datacenter(d['id'])
+ if datacenter == dc['properties']['name']:
+ datacenter = d['id']
+ datacenter_found = True
+ break
+
+ if not datacenter_found:
+ module.fail_json(msg='datacenter could not be found.')
+
+ if auto_increment:
+ numbers = set()
+ count_offset = 1
+
+ try:
+ name % 0
+ except TypeError as e:
+ if e.message.startswith('not all'):
+ name = '%s%%d' % name
+ else:
+ module.fail_json(msg=e.message, exception=traceback.format_exc())
+
+ number_range = xrange(count_offset, count_offset + count + len(numbers))
+ available_numbers = list(set(number_range).difference(numbers))
+ names = []
+ numbers_to_use = available_numbers[:count]
+ for number in numbers_to_use:
+ names.append(name % number)
+ else:
+ names = [name] * count
+
+ for name in names:
+ create_response = _create_volume(module, profitbricks, str(datacenter), name)
+ volumes.append(create_response)
+ _attach_volume(module, profitbricks, datacenter, create_response['id'])
+ failed = False
+
+ results = {
+ 'failed': failed,
+ 'volumes': volumes,
+ 'action': 'create',
+ 'instance_ids': {
+ 'instances': [i['id'] for i in volumes],
+ }
+ }
+
+ return results
+
+
+def delete_volume(module, profitbricks):
+ """
+ Removes a volume.
+
+ This will create a volume in a datacenter.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the volume was removed, false otherwise
+ """
+ if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1:
+ module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting')
+
+ datacenter = module.params.get('datacenter')
+ changed = False
+ instance_ids = module.params.get('instance_ids')
+
+ # Locate UUID for Datacenter
+ if not (uuid_match.match(datacenter)):
+ datacenter_list = profitbricks.list_datacenters()
+ for d in datacenter_list['items']:
+ dc = profitbricks.get_datacenter(d['id'])
+ if datacenter == dc['properties']['name']:
+ datacenter = d['id']
+ break
+
+ for n in instance_ids:
+ if(uuid_match.match(n)):
+ _delete_volume(module, profitbricks, datacenter, n)
+ changed = True
+ else:
+ volumes = profitbricks.list_volumes(datacenter)
+ for v in volumes['items']:
+ if n == v['properties']['name']:
+ volume_id = v['id']
+ _delete_volume(module, profitbricks, datacenter, volume_id)
+ changed = True
+
+ return changed
+
+
+def _attach_volume(module, profitbricks, datacenter, volume):
+ """
+ Attaches a volume.
+
+ This will attach a volume to the server.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the volume was attached, false otherwise
+ """
+ server = module.params.get('server')
+
+ # Locate UUID for Server
+ if server:
+ if not (uuid_match.match(server)):
+ server_list = profitbricks.list_servers(datacenter)
+ for s in server_list['items']:
+ if server == s['properties']['name']:
+ server = s['id']
+ break
+
+ try:
+ return profitbricks.attach_volume(datacenter, server, volume)
+ except Exception as e:
+ module.fail_json(msg='failed to attach volume: %s' % to_native(e), exception=traceback.format_exc())
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ datacenter=dict(),
+ server=dict(),
+ name=dict(),
+ size=dict(type='int', default=10),
+ bus=dict(choices=['VIRTIO', 'IDE'], default='VIRTIO'),
+ image=dict(),
+ image_password=dict(default=None, no_log=True),
+ ssh_keys=dict(type='list', default=[]),
+ disk_type=dict(choices=['HDD', 'SSD'], default='HDD'),
+ licence_type=dict(default='UNKNOWN'),
+ count=dict(type='int', default=1),
+ auto_increment=dict(type='bool', default=True),
+ instance_ids=dict(type='list', default=[]),
+ subscription_user=dict(),
+ subscription_password=dict(no_log=True),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ state=dict(default='present'),
+ )
+ )
+
+ if not module.params.get('subscription_user'):
+ module.fail_json(msg='subscription_user parameter is required')
+ if not module.params.get('subscription_password'):
+ module.fail_json(msg='subscription_password parameter is required')
+
+ subscription_user = module.params.get('subscription_user')
+ subscription_password = module.params.get('subscription_password')
+
+ profitbricks = ProfitBricksService(
+ username=subscription_user,
+ password=subscription_password)
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('datacenter'):
+ module.fail_json(msg='datacenter parameter is required for running or stopping machines.')
+
+ try:
+ (changed) = delete_volume(module, profitbricks)
+ module.exit_json(changed=changed)
+ except Exception as e:
+ module.fail_json(msg='failed to set volume state: %s' % to_native(e), exception=traceback.format_exc())
+
+ elif state == 'present':
+ if not module.params.get('datacenter'):
+ module.fail_json(msg='datacenter parameter is required for new instance')
+ if not module.params.get('name'):
+ module.fail_json(msg='name parameter is required for new instance')
+
+ try:
+ (volume_dict_array) = create_volume(module, profitbricks)
+ module.exit_json(**volume_dict_array)
+ except Exception as e:
+ module.fail_json(msg='failed to set volume state: %s' % to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/profitbricks_volume_attachments.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/profitbricks_volume_attachments.py
new file mode 100644
index 00000000..72f03e67
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/profitbricks_volume_attachments.py
@@ -0,0 +1,258 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: profitbricks_volume_attachments
+short_description: Attach or detach a volume.
+description:
+ - Allows you to attach or detach a volume from a ProfitBricks server. This module has a dependency on profitbricks >= 1.0.0
+options:
+ datacenter:
+ description:
+ - The datacenter in which to operate.
+ type: str
+ server:
+ description:
+ - The name of the server you wish to detach or attach the volume.
+ type: str
+ volume:
+ description:
+ - The volume name or ID.
+ type: str
+ subscription_user:
+ description:
+ - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
+ type: str
+ required: false
+ subscription_password:
+ description:
+ - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
+ type: str
+ required: false
+ wait:
+ description:
+ - wait for the operation to complete before returning
+ required: false
+ default: "yes"
+ type: bool
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ state:
+ description:
+ - Indicate desired state of the resource
+ - "The available choices are: C(present), C(absent)."
+ type: str
+ required: false
+ default: 'present'
+
+requirements: [ "profitbricks" ]
+author: Matt Baldwin (@baldwinSPC) <baldwin@stackpointcloud.com>
+'''
+
+EXAMPLES = '''
+- name: Attach a volume
+ community.general.profitbricks_volume_attachments:
+ datacenter: Tardis One
+ server: node002
+ volume: vol01
+ wait_timeout: 500
+ state: present
+
+- name: Detach a volume
+ community.general.profitbricks_volume_attachments:
+ datacenter: Tardis One
+ server: node002
+ volume: vol01
+ wait_timeout: 500
+ state: absent
+'''
+
+import re
+import time
+
+HAS_PB_SDK = True
+try:
+ from profitbricks.client import ProfitBricksService
+except ImportError:
+ HAS_PB_SDK = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+uuid_match = re.compile(
+ r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
+
+
+def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
+ if not promise:
+ return
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(5)
+ operation_result = profitbricks.get_request(
+ request_id=promise['requestId'],
+ status=True)
+
+ if operation_result['metadata']['status'] == "DONE":
+ return
+ elif operation_result['metadata']['status'] == "FAILED":
+ raise Exception(
+ 'Request failed to complete ' + msg + ' "' + str(
+ promise['requestId']) + '" to complete.')
+
+ raise Exception(
+ 'Timed out waiting for async operation ' + msg + ' "' + str(
+ promise['requestId']
+ ) + '" to complete.')
+
+
+def attach_volume(module, profitbricks):
+ """
+ Attaches a volume.
+
+ This will attach a volume to the server.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the volume was attached, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ server = module.params.get('server')
+ volume = module.params.get('volume')
+
+ # Locate UUID for Datacenter
+ if not (uuid_match.match(datacenter)):
+ datacenter_list = profitbricks.list_datacenters()
+ for d in datacenter_list['items']:
+ dc = profitbricks.get_datacenter(d['id'])
+ if datacenter == dc['properties']['name']:
+ datacenter = d['id']
+ break
+
+ # Locate UUID for Server
+ if not (uuid_match.match(server)):
+ server_list = profitbricks.list_servers(datacenter)
+ for s in server_list['items']:
+ if server == s['properties']['name']:
+ server = s['id']
+ break
+
+ # Locate UUID for Volume
+ if not (uuid_match.match(volume)):
+ volume_list = profitbricks.list_volumes(datacenter)
+ for v in volume_list['items']:
+ if volume == v['properties']['name']:
+ volume = v['id']
+ break
+
+ return profitbricks.attach_volume(datacenter, server, volume)
+
+
+def detach_volume(module, profitbricks):
+ """
+ Detaches a volume.
+
+ This will remove a volume from the server.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the volume was detached, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ server = module.params.get('server')
+ volume = module.params.get('volume')
+
+ # Locate UUID for Datacenter
+ if not (uuid_match.match(datacenter)):
+ datacenter_list = profitbricks.list_datacenters()
+ for d in datacenter_list['items']:
+ dc = profitbricks.get_datacenter(d['id'])
+ if datacenter == dc['properties']['name']:
+ datacenter = d['id']
+ break
+
+ # Locate UUID for Server
+ if not (uuid_match.match(server)):
+ server_list = profitbricks.list_servers(datacenter)
+ for s in server_list['items']:
+ if server == s['properties']['name']:
+ server = s['id']
+ break
+
+ # Locate UUID for Volume
+ if not (uuid_match.match(volume)):
+ volume_list = profitbricks.list_volumes(datacenter)
+ for v in volume_list['items']:
+ if volume == v['properties']['name']:
+ volume = v['id']
+ break
+
+ return profitbricks.detach_volume(datacenter, server, volume)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ datacenter=dict(),
+ server=dict(),
+ volume=dict(),
+ subscription_user=dict(),
+ subscription_password=dict(no_log=True),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ state=dict(default='present'),
+ )
+ )
+
+ if not HAS_PB_SDK:
+ module.fail_json(msg='profitbricks required for this module')
+
+ if not module.params.get('subscription_user'):
+ module.fail_json(msg='subscription_user parameter is required')
+ if not module.params.get('subscription_password'):
+ module.fail_json(msg='subscription_password parameter is required')
+ if not module.params.get('datacenter'):
+ module.fail_json(msg='datacenter parameter is required')
+ if not module.params.get('server'):
+ module.fail_json(msg='server parameter is required')
+ if not module.params.get('volume'):
+ module.fail_json(msg='volume parameter is required')
+
+ subscription_user = module.params.get('subscription_user')
+ subscription_password = module.params.get('subscription_password')
+
+ profitbricks = ProfitBricksService(
+ username=subscription_user,
+ password=subscription_password)
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ try:
+ (changed) = detach_volume(module, profitbricks)
+ module.exit_json(changed=changed)
+ except Exception as e:
+ module.fail_json(msg='failed to set volume_attach state: %s' % str(e))
+ elif state == 'present':
+ try:
+ attach_volume(module, profitbricks)
+ module.exit_json()
+ except Exception as e:
+ module.fail_json(msg='failed to set volume_attach state: %s' % str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox.py
new file mode 100644
index 00000000..140d56f6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox.py
@@ -0,0 +1,735 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: proxmox
+short_description: management of instances in Proxmox VE cluster
+description:
+ - allows you to create/delete/stop instances in Proxmox VE cluster
+ - Starting in Ansible 2.1, it automatically detects containerization type (lxc for PVE 4, openvz for older)
+ - From community.general 4.0.0 on, there will be no default values, see I(proxmox_default_behavior).
+options:
+ api_host:
+ description:
+ - the host of the Proxmox VE cluster
+ type: str
+ required: true
+ api_user:
+ description:
+ - the user to authenticate with
+ type: str
+ required: true
+ api_password:
+ description:
+ - the password to authenticate with
+ - you can use PROXMOX_PASSWORD environment variable
+ type: str
+ api_token_id:
+ description:
+ - Specify the token ID.
+ type: str
+ version_added: 1.3.0
+ api_token_secret:
+ description:
+ - Specify the token secret.
+ type: str
+ version_added: 1.3.0
+ vmid:
+ description:
+ - the instance id
+ - if not set, the next available VM ID will be fetched from ProxmoxAPI.
+ - if not set, will be fetched from PromoxAPI based on the hostname
+ type: str
+ validate_certs:
+ description:
+ - enable / disable https certificate verification
+ type: bool
+ default: 'no'
+ node:
+ description:
+ - Proxmox VE node, when new VM will be created
+ - required only for C(state=present)
+ - for another states will be autodiscovered
+ type: str
+ pool:
+ description:
+ - Proxmox VE resource pool
+ type: str
+ password:
+ description:
+ - the instance root password
+ - required only for C(state=present)
+ type: str
+ hostname:
+ description:
+ - the instance hostname
+ - required only for C(state=present)
+ - must be unique if vmid is not passed
+ type: str
+ ostemplate:
+ description:
+ - the template for VM creating
+ - required only for C(state=present)
+ type: str
+ disk:
+ description:
+ - hard disk size in GB for instance
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(3). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: str
+ cores:
+ description:
+ - Specify number of cores per socket.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(1). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: int
+ cpus:
+ description:
+ - numbers of allocated cpus for instance
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(1). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: int
+ memory:
+ description:
+ - memory size in MB for instance
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(512). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: int
+ swap:
+ description:
+ - swap memory size in MB for instance
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(0). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: int
+ netif:
+ description:
+ - specifies network interfaces for the container. As a hash/dictionary defining interfaces.
+ type: dict
+ mounts:
+ description:
+ - specifies additional mounts (separate disks) for the container. As a hash/dictionary defining mount points
+ type: dict
+ ip_address:
+ description:
+ - specifies the address the container will be assigned
+ type: str
+ onboot:
+ description:
+ - specifies whether a VM will be started during system bootup
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: bool
+ storage:
+ description:
+ - target storage
+ type: str
+ default: 'local'
+ cpuunits:
+ description:
+ - CPU weight for a VM
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(1000). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: int
+ nameserver:
+ description:
+ - sets DNS server IP address for a container
+ type: str
+ searchdomain:
+ description:
+ - sets DNS search domain for a container
+ type: str
+ timeout:
+ description:
+ - timeout for operations
+ type: int
+ default: 30
+ force:
+ description:
+ - forcing operations
+ - can be used only with states C(present), C(stopped), C(restarted)
+ - with C(state=present) force option allow to overwrite existing container
+ - with states C(stopped) , C(restarted) allow to force stop instance
+ type: bool
+ default: 'no'
+ state:
+ description:
+ - Indicate desired state of the instance
+ type: str
+ choices: ['present', 'started', 'absent', 'stopped', 'restarted']
+ default: present
+ pubkey:
+ description:
+ - Public key to add to /root/.ssh/authorized_keys. This was added on Proxmox 4.2, it is ignored for earlier versions
+ type: str
+ unprivileged:
+ description:
+ - Indicate if the container should be unprivileged
+ type: bool
+ default: 'no'
+ description:
+ description:
+ - Specify the description for the container. Only used on the configuration web interface.
+ - This is saved as a comment inside the configuration file.
+ type: str
+ version_added: '0.2.0'
+ hookscript:
+ description:
+ - Script that will be executed during various steps in the containers lifetime.
+ type: str
+ version_added: '0.2.0'
+ proxmox_default_behavior:
+ description:
+ - Various module options used to have default values. This cause problems when
+ user expects different behavior from proxmox by default or fill options which cause
+ problems when they have been set.
+ - The default value is C(compatibility), which will ensure that the default values
+ are used when the values are not explicitly specified by the user.
+ - From community.general 4.0.0 on, the default value will switch to C(no_defaults). To avoid
+ deprecation warnings, please set I(proxmox_default_behavior) to an explicit
+ value.
+ - This affects the I(disk), I(cores), I(cpus), I(memory), I(onboot), I(swap), I(cpuunits) options.
+ type: str
+ choices:
+ - compatibility
+ - no_defaults
+ version_added: "1.3.0"
+
+notes:
+ - Requires proxmoxer and requests modules on host. This modules can be installed with pip.
+requirements: [ "proxmoxer", "python >= 2.7", "requests" ]
+author: Sergei Antipov (@UnderGreen)
+'''
+
+EXAMPLES = r'''
+- name: Create new container with minimal options
+ community.general.proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+
+- name: Create new container with hookscript and description
+ community.general.proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+ hookscript: 'local:snippets/vm_hook.sh'
+ description: created with ansible
+
+- name: Create new container automatically selecting the next available vmid.
+ community.general.proxmox:
+ node: 'uk-mc02'
+ api_user: 'root@pam'
+ api_password: '1q2w3e'
+ api_host: 'node1'
+ password: '123456'
+ hostname: 'example.org'
+ ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+
+- name: Create new container with minimal options with force(it will rewrite existing container)
+ community.general.proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+ force: yes
+
+- name: Create new container with minimal options use environment PROXMOX_PASSWORD variable(you should export it before)
+ community.general.proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+
+- name: Create new container with minimal options defining network interface with dhcp
+ community.general.proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+ netif: '{"net0":"name=eth0,ip=dhcp,ip6=dhcp,bridge=vmbr0"}'
+
+- name: Create new container with minimal options defining network interface with static ip
+ community.general.proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+ netif: '{"net0":"name=eth0,gw=192.168.0.1,ip=192.168.0.2/24,bridge=vmbr0"}'
+
+- name: Create new container with minimal options defining a mount with 8GB
+ community.general.proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+ mounts: '{"mp0":"local:8,mp=/mnt/test/"}'
+
+- name: Create new container with minimal options defining a cpu core limit
+ community.general.proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+ cores: 2
+
+- name: Start container
+ community.general.proxmox:
+ vmid: 100
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ state: started
+
+- name: >
+ Start container with mount. You should enter a 90-second timeout because servers
+ with additional disks take longer to boot
+ community.general.proxmox:
+ vmid: 100
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ state: started
+ timeout: 90
+
+- name: Stop container
+ community.general.proxmox:
+ vmid: 100
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ state: stopped
+
+- name: Stop container with force
+ community.general.proxmox:
+ vmid: 100
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ force: yes
+ state: stopped
+
+- name: Restart container(stopped or mounted container you can't restart)
+ community.general.proxmox:
+ vmid: 100
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ state: restarted
+
+- name: Remove container
+ community.general.proxmox:
+ vmid: 100
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ state: absent
+'''
+
+import os
+import time
+import traceback
+from distutils.version import LooseVersion
+
+try:
+ from proxmoxer import ProxmoxAPI
+ HAS_PROXMOXER = True
+except ImportError:
+ HAS_PROXMOXER = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+VZ_TYPE = None
+
+
+def get_nextvmid(module, proxmox):
+ try:
+ vmid = proxmox.cluster.nextid.get()
+ return vmid
+ except Exception as e:
+ module.fail_json(msg="Unable to get next vmid. Failed with exception: %s" % to_native(e),
+ exception=traceback.format_exc())
+
+
+def get_vmid(proxmox, hostname):
+ return [vm['vmid'] for vm in proxmox.cluster.resources.get(type='vm') if 'name' in vm and vm['name'] == hostname]
+
+
+def get_instance(proxmox, vmid):
+ return [vm for vm in proxmox.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid)]
+
+
+def content_check(proxmox, node, ostemplate, template_store):
+ return [True for cnt in proxmox.nodes(node).storage(template_store).content.get() if cnt['volid'] == ostemplate]
+
+
+def node_check(proxmox, node):
+ return [True for nd in proxmox.nodes.get() if nd['node'] == node]
+
+
+def proxmox_version(proxmox):
+ apireturn = proxmox.version.get()
+ return LooseVersion(apireturn['version'])
+
+
+def create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout, **kwargs):
+ proxmox_node = proxmox.nodes(node)
+ kwargs = dict((k, v) for k, v in kwargs.items() if v is not None)
+
+ if VZ_TYPE == 'lxc':
+ kwargs['cpulimit'] = cpus
+ kwargs['rootfs'] = disk
+ if 'netif' in kwargs:
+ kwargs.update(kwargs['netif'])
+ del kwargs['netif']
+ if 'mounts' in kwargs:
+ kwargs.update(kwargs['mounts'])
+ del kwargs['mounts']
+ if 'pubkey' in kwargs:
+ if proxmox_version(proxmox) >= LooseVersion('4.2'):
+ kwargs['ssh-public-keys'] = kwargs['pubkey']
+ del kwargs['pubkey']
+ else:
+ kwargs['cpus'] = cpus
+ kwargs['disk'] = disk
+
+ taskid = getattr(proxmox_node, VZ_TYPE).create(vmid=vmid, storage=storage, memory=memory, swap=swap, **kwargs)
+
+ while timeout:
+ if (proxmox_node.tasks(taskid).status.get()['status'] == 'stopped' and
+ proxmox_node.tasks(taskid).status.get()['exitstatus'] == 'OK'):
+ return True
+ timeout -= 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s' %
+ proxmox_node.tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ return False
+
+
+def start_instance(module, proxmox, vm, vmid, timeout):
+ taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.start.post()
+ while timeout:
+ if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and
+ proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
+ return True
+ timeout -= 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s' %
+ proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ return False
+
+
+def stop_instance(module, proxmox, vm, vmid, timeout, force):
+ if force:
+ taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.shutdown.post(forceStop=1)
+ else:
+ taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.shutdown.post()
+ while timeout:
+ if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and
+ proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
+ return True
+ timeout -= 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s' %
+ proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ return False
+
+
+def umount_instance(module, proxmox, vm, vmid, timeout):
+ taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.umount.post()
+ while timeout:
+ if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and
+ proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
+ return True
+ timeout -= 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for unmounting VM. Last line in task before timeout: %s' %
+ proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ return False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_host=dict(required=True),
+ api_password=dict(no_log=True),
+ api_token_id=dict(no_log=True),
+ api_token_secret=dict(no_log=True),
+ api_user=dict(required=True),
+ vmid=dict(required=False),
+ validate_certs=dict(type='bool', default=False),
+ node=dict(),
+ pool=dict(),
+ password=dict(no_log=True),
+ hostname=dict(),
+ ostemplate=dict(),
+ disk=dict(type='str'),
+ cores=dict(type='int'),
+ cpus=dict(type='int'),
+ memory=dict(type='int'),
+ swap=dict(type='int'),
+ netif=dict(type='dict'),
+ mounts=dict(type='dict'),
+ ip_address=dict(),
+ onboot=dict(type='bool'),
+ storage=dict(default='local'),
+ cpuunits=dict(type='int'),
+ nameserver=dict(),
+ searchdomain=dict(),
+ timeout=dict(type='int', default=30),
+ force=dict(type='bool', default=False),
+ state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted']),
+ pubkey=dict(type='str', default=None),
+ unprivileged=dict(type='bool', default=False),
+ description=dict(type='str'),
+ hookscript=dict(type='str'),
+ proxmox_default_behavior=dict(type='str', choices=['compatibility', 'no_defaults']),
+ )
+ )
+
+ if not HAS_PROXMOXER:
+ module.fail_json(msg='proxmoxer required for this module')
+
+ state = module.params['state']
+ api_host = module.params['api_host']
+ api_password = module.params['api_password']
+ api_token_id = module.params['api_token_id']
+ api_token_secret = module.params['api_token_secret']
+ api_user = module.params['api_user']
+ vmid = module.params['vmid']
+ validate_certs = module.params['validate_certs']
+ node = module.params['node']
+ disk = module.params['disk']
+ cpus = module.params['cpus']
+ memory = module.params['memory']
+ swap = module.params['swap']
+ storage = module.params['storage']
+ hostname = module.params['hostname']
+ if module.params['ostemplate'] is not None:
+ template_store = module.params['ostemplate'].split(":")[0]
+ timeout = module.params['timeout']
+
+ if module.params['proxmox_default_behavior'] is None:
+ module.params['proxmox_default_behavior'] = 'compatibility'
+ module.deprecate(
+ 'The proxmox_default_behavior option will change its default value from "compatibility" to '
+ '"no_defaults" in community.general 4.0.0. To remove this warning, please specify an explicit value for it now',
+ version='4.0.0', collection_name='community.general'
+ )
+ if module.params['proxmox_default_behavior'] == 'compatibility':
+ old_default_values = dict(
+ disk="3",
+ cores=1,
+ cpus=1,
+ memory=512,
+ swap=0,
+ onboot=False,
+ cpuunits=1000,
+ )
+ for param, value in old_default_values.items():
+ if module.params[param] is None:
+ module.params[param] = value
+
+ auth_args = {'user': api_user}
+ if not (api_token_id and api_token_secret):
+ # If password not set get it from PROXMOX_PASSWORD env
+ if not api_password:
+ try:
+ api_password = os.environ['PROXMOX_PASSWORD']
+ except KeyError as e:
+ module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable')
+ auth_args['password'] = api_password
+ else:
+ auth_args['token_name'] = api_token_id
+ auth_args['token_value'] = api_token_secret
+
+ try:
+ proxmox = ProxmoxAPI(api_host, verify_ssl=validate_certs, **auth_args)
+ global VZ_TYPE
+ VZ_TYPE = 'openvz' if proxmox_version(proxmox) < LooseVersion('4.0') else 'lxc'
+ except Exception as e:
+ module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e)
+
+ # If vmid not set get the Next VM id from ProxmoxAPI
+ # If hostname is set get the VM id from ProxmoxAPI
+ if not vmid and state == 'present':
+ vmid = get_nextvmid(module, proxmox)
+ elif not vmid and hostname:
+ hosts = get_vmid(proxmox, hostname)
+ if len(hosts) == 0:
+ module.fail_json(msg="Vmid could not be fetched => Hostname doesn't exist (action: %s)" % state)
+ vmid = hosts[0]
+ elif not vmid:
+ module.exit_json(changed=False, msg="Vmid could not be fetched for the following action: %s" % state)
+
+ if state == 'present':
+ try:
+ if get_instance(proxmox, vmid) and not module.params['force']:
+ module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid)
+ # If no vmid was passed, there cannot be another VM named 'hostname'
+ if not module.params['vmid'] and get_vmid(proxmox, hostname) and not module.params['force']:
+ module.exit_json(changed=False, msg="VM with hostname %s already exists and has ID number %s" % (hostname, get_vmid(proxmox, hostname)[0]))
+ elif not (node, module.params['hostname'] and module.params['password'] and module.params['ostemplate']):
+ module.fail_json(msg='node, hostname, password and ostemplate are mandatory for creating vm')
+ elif not node_check(proxmox, node):
+ module.fail_json(msg="node '%s' not exists in cluster" % node)
+ elif not content_check(proxmox, node, module.params['ostemplate'], template_store):
+ module.fail_json(msg="ostemplate '%s' not exists on node %s and storage %s"
+ % (module.params['ostemplate'], node, template_store))
+
+ create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout,
+ cores=module.params['cores'],
+ pool=module.params['pool'],
+ password=module.params['password'],
+ hostname=module.params['hostname'],
+ ostemplate=module.params['ostemplate'],
+ netif=module.params['netif'],
+ mounts=module.params['mounts'],
+ ip_address=module.params['ip_address'],
+ onboot=int(module.params['onboot']),
+ cpuunits=module.params['cpuunits'],
+ nameserver=module.params['nameserver'],
+ searchdomain=module.params['searchdomain'],
+ force=int(module.params['force']),
+ pubkey=module.params['pubkey'],
+ unprivileged=int(module.params['unprivileged']),
+ description=module.params['description'],
+ hookscript=module.params['hookscript'])
+
+ module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmid, module.params['ostemplate']))
+ except Exception as e:
+ module.fail_json(msg="creation of %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e))
+
+ elif state == 'started':
+ try:
+ vm = get_instance(proxmox, vmid)
+ if not vm:
+ module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
+ if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running':
+ module.exit_json(changed=False, msg="VM %s is already running" % vmid)
+
+ if start_instance(module, proxmox, vm, vmid, timeout):
+ module.exit_json(changed=True, msg="VM %s started" % vmid)
+ except Exception as e:
+ module.fail_json(msg="starting of VM %s failed with exception: %s" % (vmid, e))
+
+ elif state == 'stopped':
+ try:
+ vm = get_instance(proxmox, vmid)
+ if not vm:
+ module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
+
+ if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted':
+ if module.params['force']:
+ if umount_instance(module, proxmox, vm, vmid, timeout):
+ module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
+ else:
+ module.exit_json(changed=False, msg=("VM %s is already shutdown, but mounted. "
+ "You can use force option to umount it.") % vmid)
+
+ if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped':
+ module.exit_json(changed=False, msg="VM %s is already shutdown" % vmid)
+
+ if stop_instance(module, proxmox, vm, vmid, timeout, force=module.params['force']):
+ module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
+ except Exception as e:
+ module.fail_json(msg="stopping of VM %s failed with exception: %s" % (vmid, e))
+
+ elif state == 'restarted':
+ try:
+ vm = get_instance(proxmox, vmid)
+ if not vm:
+ module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
+ if (getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped' or
+ getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted'):
+ module.exit_json(changed=False, msg="VM %s is not running" % vmid)
+
+ if (stop_instance(module, proxmox, vm, vmid, timeout, force=module.params['force']) and
+ start_instance(module, proxmox, vm, vmid, timeout)):
+ module.exit_json(changed=True, msg="VM %s is restarted" % vmid)
+ except Exception as e:
+ module.fail_json(msg="restarting of VM %s failed with exception: %s" % (vmid, e))
+
+ elif state == 'absent':
+ try:
+ vm = get_instance(proxmox, vmid)
+ if not vm:
+ module.exit_json(changed=False, msg="VM %s does not exist" % vmid)
+
+ if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running':
+ module.exit_json(changed=False, msg="VM %s is running. Stop it before deletion." % vmid)
+
+ if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted':
+ module.exit_json(changed=False, msg="VM %s is mounted. Stop it with force option before deletion." % vmid)
+
+ taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE).delete(vmid)
+ while timeout:
+ if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and
+ proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
+ module.exit_json(changed=True, msg="VM %s removed" % vmid)
+ timeout -= 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s'
+ % proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ except Exception as e:
+ module.fail_json(msg="deletion of VM %s failed with exception: %s" % (vmid, to_native(e)))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox_domain_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox_domain_info.py
new file mode 100644
index 00000000..fc7c37c6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox_domain_info.py
@@ -0,0 +1,133 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Tristan Le Guern (@Aversiste) <tleguern at bouledef.eu>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: proxmox_domain_info
+short_description: Retrieve information about one or more Proxmox VE domains
+version_added: 1.3.0
+description:
+ - Retrieve information about one or more Proxmox VE domains.
+options:
+ domain:
+ description:
+ - Restrict results to a specific authentication realm.
+ aliases: ['realm', 'name']
+ type: str
+author: Tristan Le Guern (@Aversiste)
+extends_documentation_fragment: community.general.proxmox.documentation
+'''
+
+
+EXAMPLES = '''
+- name: List existing domains
+ community.general.proxmox_domain_info:
+ api_host: helldorado
+ api_user: root@pam
+ api_password: "{{ password | default(omit) }}"
+ api_token_id: "{{ token_id | default(omit) }}"
+ api_token_secret: "{{ token_secret | default(omit) }}"
+ register: proxmox_domains
+
+- name: Retrieve information about the pve domain
+ community.general.proxmox_domain_info:
+ api_host: helldorado
+ api_user: root@pam
+ api_password: "{{ password | default(omit) }}"
+ api_token_id: "{{ token_id | default(omit) }}"
+ api_token_secret: "{{ token_secret | default(omit) }}"
+ domain: pve
+ register: proxmox_domain_pve
+'''
+
+
+RETURN = '''
+proxmox_domains:
+ description: List of authentication domains.
+ returned: always, but can be empty
+ type: list
+ elements: dict
+ contains:
+ comment:
+ description: Short description of the realm.
+ returned: on success
+ type: str
+ realm:
+ description: Realm name.
+ returned: on success
+ type: str
+ type:
+ description: Realm type.
+ returned: on success
+ type: str
+ digest:
+ description: Realm hash.
+ returned: on success, can be absent
+ type: str
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils.proxmox import (
+ proxmox_auth_argument_spec, ProxmoxAnsible, HAS_PROXMOXER, PROXMOXER_IMP_ERR)
+
+
+class ProxmoxDomainInfoAnsible(ProxmoxAnsible):
+ def get_domain(self, realm):
+ try:
+ domain = self.proxmox_api.access.domains.get(realm)
+ except Exception:
+ self.module.fail_json(msg="Domain '%s' does not exist" % realm)
+ domain['realm'] = realm
+ return domain
+
+ def get_domains(self):
+ domains = self.proxmox_api.access.domains.get()
+ return domains
+
+
+def proxmox_domain_info_argument_spec():
+ return dict(
+ domain=dict(type='str', aliases=['realm', 'name']),
+ )
+
+
+def main():
+ module_args = proxmox_auth_argument_spec()
+ domain_info_args = proxmox_domain_info_argument_spec()
+ module_args.update(domain_info_args)
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ required_one_of=[('api_password', 'api_token_id')],
+ required_together=[('api_token_id', 'api_token_secret')],
+ supports_check_mode=True
+ )
+ result = dict(
+ changed=False
+ )
+
+ if not HAS_PROXMOXER:
+ module.fail_json(msg=missing_required_lib('proxmoxer'), exception=PROXMOXER_IMP_ERR)
+
+ proxmox = ProxmoxDomainInfoAnsible(module)
+ domain = module.params['domain']
+
+ if domain:
+ domains = [proxmox.get_domain(realm=domain)]
+ else:
+ domains = proxmox.get_domains()
+ result['proxmox_domains'] = domains
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox_group_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox_group_info.py
new file mode 100644
index 00000000..063d28e5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox_group_info.py
@@ -0,0 +1,143 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Tristan Le Guern <tleguern at bouledef.eu>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: proxmox_group_info
+short_description: Retrieve information about one or more Proxmox VE groups
+version_added: 1.3.0
+description:
+ - Retrieve information about one or more Proxmox VE groups
+options:
+ group:
+ description:
+ - Restrict results to a specific group.
+ aliases: ['groupid', 'name']
+ type: str
+author: Tristan Le Guern (@Aversiste)
+extends_documentation_fragment: community.general.proxmox.documentation
+'''
+
+
+EXAMPLES = '''
+- name: List existing groups
+ community.general.proxmox_group_info:
+ api_host: helldorado
+ api_user: root@pam
+ api_password: "{{ password | default(omit) }}"
+ api_token_id: "{{ token_id | default(omit) }}"
+ api_token_secret: "{{ token_secret | default(omit) }}"
+ register: proxmox_groups
+
+- name: Retrieve information about the admin group
+ community.general.proxmox_group_info:
+ api_host: helldorado
+ api_user: root@pam
+ api_password: "{{ password | default(omit) }}"
+ api_token_id: "{{ token_id | default(omit) }}"
+ api_token_secret: "{{ token_secret | default(omit) }}"
+ group: admin
+ register: proxmox_group_admin
+'''
+
+
+RETURN = '''
+proxmox_groups:
+ description: List of groups.
+ returned: always, but can be empty
+ type: list
+ elements: dict
+ contains:
+ comment:
+ description: Short description of the group.
+ returned: on success, can be absent
+ type: str
+ groupid:
+ description: Group name.
+ returned: on success
+ type: str
+ users:
+ description: List of users in the group.
+ returned: on success
+ type: list
+ elements: str
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils.proxmox import (
+ proxmox_auth_argument_spec, ProxmoxAnsible, HAS_PROXMOXER, PROXMOXER_IMP_ERR)
+
+
+class ProxmoxGroupInfoAnsible(ProxmoxAnsible):
+ def get_group(self, groupid):
+ try:
+ group = self.proxmox_api.access.groups.get(groupid)
+ except Exception:
+ self.module.fail_json(msg="Group '%s' does not exist" % groupid)
+ group['groupid'] = groupid
+ return ProxmoxGroup(group)
+
+ def get_groups(self):
+ groups = self.proxmox_api.access.groups.get()
+ return [ProxmoxGroup(group) for group in groups]
+
+
+class ProxmoxGroup:
+ def __init__(self, group):
+ self.group = dict()
+ # Data representation is not the same depending on API calls
+ for k, v in group.items():
+ if k == 'users' and type(v) == str:
+ self.group['users'] = v.split(',')
+ elif k == 'members':
+ self.group['users'] = group['members']
+ else:
+ self.group[k] = v
+
+
+def proxmox_group_info_argument_spec():
+ return dict(
+ group=dict(type='str', aliases=['groupid', 'name']),
+ )
+
+
+def main():
+ module_args = proxmox_auth_argument_spec()
+ group_info_args = proxmox_group_info_argument_spec()
+ module_args.update(group_info_args)
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ required_one_of=[('api_password', 'api_token_id')],
+ required_together=[('api_token_id', 'api_token_secret')],
+ supports_check_mode=True
+ )
+ result = dict(
+ changed=False
+ )
+
+ if not HAS_PROXMOXER:
+ module.fail_json(msg=missing_required_lib('proxmoxer'), exception=PROXMOXER_IMP_ERR)
+
+ proxmox = ProxmoxGroupInfoAnsible(module)
+ group = module.params['group']
+
+ if group:
+ groups = [proxmox.get_group(group=group)]
+ else:
+ groups = proxmox.get_groups()
+ result['proxmox_groups'] = [group.group for group in groups]
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox_kvm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox_kvm.py
new file mode 100644
index 00000000..0161fefc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox_kvm.py
@@ -0,0 +1,1449 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Abdoul Bah (@helldorado) <bahabdoul at gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: proxmox_kvm
+short_description: Management of Qemu(KVM) Virtual Machines in Proxmox VE cluster.
+description:
+ - Allows you to create/delete/stop Qemu(KVM) Virtual Machines in Proxmox VE cluster.
+ - From community.general 4.0.0 on, there will be no default values, see I(proxmox_default_behavior).
+author: "Abdoul Bah (@helldorado) <bahabdoul at gmail.com>"
+options:
+ acpi:
+ description:
+ - Specify if ACPI should be enabled/disabled.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(yes). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: bool
+ agent:
+ description:
+ - Specify if the QEMU Guest Agent should be enabled/disabled.
+ type: bool
+ args:
+ description:
+ - Pass arbitrary arguments to kvm.
+ - This option is for experts only!
+ type: str
+ api_host:
+ description:
+ - Specify the target host of the Proxmox VE cluster.
+ type: str
+ required: true
+ api_user:
+ description:
+ - Specify the user to authenticate with.
+ type: str
+ required: true
+ api_password:
+ description:
+ - Specify the password to authenticate with.
+ - You can use C(PROXMOX_PASSWORD) environment variable.
+ type: str
+ api_token_id:
+ description:
+ - Specify the token ID.
+ type: str
+ version_added: 1.3.0
+ api_token_secret:
+ description:
+ - Specify the token secret.
+ type: str
+ version_added: 1.3.0
+ autostart:
+ description:
+ - Specify if the VM should be automatically restarted after crash (currently ignored in PVE API).
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: bool
+ balloon:
+ description:
+ - Specify the amount of RAM for the VM in MB.
+ - Using zero disables the balloon driver.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(0). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: int
+ bios:
+ description:
+ - Specify the BIOS implementation.
+ type: str
+ choices: ['seabios', 'ovmf']
+ boot:
+ description:
+ - Specify the boot order -> boot on floppy C(a), hard disk C(c), CD-ROM C(d), or network C(n).
+ - You can combine to set order.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(cnd). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: str
+ bootdisk:
+ description:
+ - Enable booting from specified disk. C((ide|sata|scsi|virtio)\d+)
+ type: str
+ cicustom:
+ description:
+ - 'cloud-init: Specify custom files to replace the automatically generated ones at start.'
+ type: str
+ version_added: 1.3.0
+ cipassword:
+ description:
+ - 'cloud-init: password of default user to create.'
+ type: str
+ version_added: 1.3.0
+ citype:
+ description:
+ - 'cloud-init: Specifies the cloud-init configuration format.'
+ - The default depends on the configured operating system type (C(ostype)).
+ - We use the C(nocloud) format for Linux, and C(configdrive2) for Windows.
+ type: str
+ choices: ['nocloud', 'configdrive2']
+ version_added: 1.3.0
+ ciuser:
+ description:
+ - 'cloud-init: username of default user to create.'
+ type: str
+ version_added: 1.3.0
+ clone:
+ description:
+ - Name of VM to be cloned. If C(vmid) is setted, C(clone) can take arbitrary value but required for initiating the clone.
+ type: str
+ cores:
+ description:
+ - Specify number of cores per socket.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(1). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: int
+ cpu:
+ description:
+ - Specify emulated CPU type.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(kvm64). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: str
+ cpulimit:
+ description:
+ - Specify if CPU usage will be limited. Value 0 indicates no CPU limit.
+ - If the computer has 2 CPUs, it has total of '2' CPU time
+ type: int
+ cpuunits:
+ description:
+ - Specify CPU weight for a VM.
+ - You can disable fair-scheduler configuration by setting this to 0
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(1000). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: int
+ delete:
+ description:
+ - Specify a list of settings you want to delete.
+ type: str
+ description:
+ description:
+ - Specify the description for the VM. Only used on the configuration web interface.
+ - This is saved as comment inside the configuration file.
+ type: str
+ digest:
+ description:
+ - Specify if to prevent changes if current configuration file has different SHA1 digest.
+ - This can be used to prevent concurrent modifications.
+ type: str
+ force:
+ description:
+ - Allow to force stop VM.
+ - Can be used with states C(stopped) and C(restarted).
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: bool
+ format:
+ description:
+ - Target drive's backing file's data format.
+ - Used only with clone
+ - Use I(format=unspecified) and I(full=false) for a linked clone.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(qcow2). If I(proxmox_default_behavior) is set to C(no_defaults),
+ not specifying this option is equivalent to setting it to C(unspecified).
+ Note that the default value of I(proxmox_default_behavior) changes in community.general 4.0.0.
+ type: str
+ choices: [ "cloop", "cow", "qcow", "qcow2", "qed", "raw", "vmdk", "unspecified" ]
+ freeze:
+ description:
+ - Specify if PVE should freeze CPU at startup (use 'c' monitor command to start execution).
+ type: bool
+ full:
+ description:
+ - Create a full copy of all disk. This is always done when you clone a normal VM.
+ - For VM templates, we try to create a linked clone by default.
+ - Used only with clone
+ type: bool
+ default: 'yes'
+ hostpci:
+ description:
+ - Specify a hash/dictionary of map host pci devices into guest. C(hostpci='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(hostpci[n]) where 0 ≤ n ≤ N.
+ - Values allowed are - C("host="HOSTPCIID[;HOSTPCIID2...]",pcie="1|0",rombar="1|0",x-vga="1|0"").
+ - The C(host) parameter is Host PCI device pass through. HOSTPCIID syntax is C(bus:dev.func) (hexadecimal numbers).
+ - C(pcie=boolean) I(default=0) Choose the PCI-express bus (needs the q35 machine model).
+ - C(rombar=boolean) I(default=1) Specify whether or not the device's ROM will be visible in the guest's memory map.
+ - C(x-vga=boolean) I(default=0) Enable vfio-vga device support.
+ - /!\ This option allows direct access to host hardware. So it is no longer possible to migrate such machines - use with special care.
+ type: dict
+ hotplug:
+ description:
+ - Selectively enable hotplug features.
+ - This is a comma separated list of hotplug features C('network', 'disk', 'cpu', 'memory' and 'usb').
+ - Value 0 disables hotplug completely and value 1 is an alias for the default C('network,disk,usb').
+ type: str
+ hugepages:
+ description:
+ - Enable/disable hugepages memory.
+ type: str
+ choices: ['any', '2', '1024']
+ ide:
+ description:
+ - A hash/dictionary of volume used as IDE hard disk or CD-ROM. C(ide='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(ide[n]) where 0 ≤ n ≤ 3.
+ - Values allowed are - C("storage:size,format=value").
+ - C(storage) is the storage identifier where to create the disk.
+ - C(size) is the size of the disk in GB.
+ - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol).
+ type: dict
+ ipconfig:
+ description:
+ - 'cloud-init: Set the IP configuration.'
+ - A hash/dictionary of network ip configurations. C(ipconfig='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(ipconfig[n]) where 0 ≤ n ≤ network interfaces.
+ - Values allowed are - C("[gw=<GatewayIPv4>] [,gw6=<GatewayIPv6>] [,ip=<IPv4Format/CIDR>] [,ip6=<IPv6Format/CIDR>]").
+ - 'cloud-init: Specify IP addresses and gateways for the corresponding interface.'
+ - IP addresses use CIDR notation, gateways are optional but they should be in the same subnet of specified IP address.
+ - The special string 'dhcp' can be used for IP addresses to use DHCP, in which case no explicit gateway should be provided.
+ - For IPv6 the special string 'auto' can be used to use stateless autoconfiguration.
+ - If cloud-init is enabled and neither an IPv4 nor an IPv6 address is specified, it defaults to using dhcp on IPv4.
+ type: dict
+ version_added: 1.3.0
+ keyboard:
+ description:
+ - Sets the keyboard layout for VNC server.
+ type: str
+ kvm:
+ description:
+ - Enable/disable KVM hardware virtualization.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(yes). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: bool
+ localtime:
+ description:
+ - Sets the real time clock to local time.
+ - This is enabled by default if ostype indicates a Microsoft OS.
+ type: bool
+ lock:
+ description:
+ - Lock/unlock the VM.
+ type: str
+ choices: ['migrate', 'backup', 'snapshot', 'rollback']
+ machine:
+ description:
+ - Specifies the Qemu machine type.
+ - type => C((pc|pc(-i440fx)?-\d+\.\d+(\.pxe)?|q35|pc-q35-\d+\.\d+(\.pxe)?))
+ type: str
+ memory:
+ description:
+ - Memory size in MB for instance.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(512). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: int
+ migrate_downtime:
+ description:
+ - Sets maximum tolerated downtime (in seconds) for migrations.
+ type: int
+ migrate_speed:
+ description:
+ - Sets maximum speed (in MB/s) for migrations.
+ - A value of 0 is no limit.
+ type: int
+ name:
+ description:
+ - Specifies the VM name. Only used on the configuration web interface.
+ - Required only for C(state=present).
+ type: str
+ nameservers:
+ description:
+ - 'cloud-init: DNS server IP address(es).'
+ - If unset, PVE host settings are used.
+ type: list
+ elements: str
+ version_added: 1.3.0
+ net:
+ description:
+ - A hash/dictionary of network interfaces for the VM. C(net='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(net[n]) where 0 ≤ n ≤ N.
+ - Values allowed are - C("model="XX:XX:XX:XX:XX:XX",bridge="value",rate="value",tag="value",firewall="1|0",trunks="vlanid"").
+ - Model is one of C(e1000 e1000-82540em e1000-82544gc e1000-82545em i82551 i82557b i82559er ne2k_isa ne2k_pci pcnet rtl8139 virtio vmxnet3).
+ - C(XX:XX:XX:XX:XX:XX) should be an unique MAC address. This is automatically generated if not specified.
+ - The C(bridge) parameter can be used to automatically add the interface to a bridge device. The Proxmox VE standard bridge is called 'vmbr0'.
+ - Option C(rate) is used to limit traffic bandwidth from and to this interface. It is specified as floating point number, unit is 'Megabytes per second'.
+ - If you specify no bridge, we create a kvm 'user' (NATed) network device, which provides DHCP and DNS services.
+ type: dict
+ newid:
+ description:
+ - VMID for the clone. Used only with clone.
+ - If newid is not set, the next available VM ID will be fetched from ProxmoxAPI.
+ type: int
+ node:
+ description:
+ - Proxmox VE node, where the new VM will be created.
+ - Only required for C(state=present).
+ - For other states, it will be autodiscovered.
+ type: str
+ numa:
+ description:
+ - A hash/dictionaries of NUMA topology. C(numa='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(numa[n]) where 0 ≤ n ≤ N.
+ - Values allowed are - C("cpu="<id[-id];...>",hostnodes="<id[-id];...>",memory="number",policy="(bind|interleave|preferred)"").
+ - C(cpus) CPUs accessing this NUMA node.
+ - C(hostnodes) Host NUMA nodes to use.
+ - C(memory) Amount of memory this NUMA node provides.
+ - C(policy) NUMA allocation policy.
+ type: dict
+ numa_enabled:
+ description:
+ - Enables NUMA.
+ type: bool
+ onboot:
+ description:
+ - Specifies whether a VM will be started during system bootup.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(yes). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: bool
+ ostype:
+ description:
+ - Specifies guest operating system. This is used to enable special optimization/features for specific operating systems.
+ - The l26 is Linux 2.6/3.X Kernel.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(l26). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: str
+ choices: ['other', 'wxp', 'w2k', 'w2k3', 'w2k8', 'wvista', 'win7', 'win8', 'win10', 'l24', 'l26', 'solaris']
+ parallel:
+ description:
+ - A hash/dictionary of map host parallel devices. C(parallel='{"key":"value", "key":"value"}').
+ - Keys allowed are - (parallel[n]) where 0 ≤ n ≤ 2.
+ - Values allowed are - C("/dev/parport\d+|/dev/usb/lp\d+").
+ type: dict
+ pool:
+ description:
+ - Add the new VM to the specified pool.
+ type: str
+ protection:
+ description:
+ - Enable/disable the protection flag of the VM. This will enable/disable the remove VM and remove disk operations.
+ type: bool
+ reboot:
+ description:
+ - Allow reboot. If set to C(yes), the VM exit on reboot.
+ type: bool
+ revert:
+ description:
+ - Revert a pending change.
+ type: str
+ sata:
+ description:
+ - A hash/dictionary of volume used as sata hard disk or CD-ROM. C(sata='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(sata[n]) where 0 ≤ n ≤ 5.
+ - Values allowed are - C("storage:size,format=value").
+ - C(storage) is the storage identifier where to create the disk.
+ - C(size) is the size of the disk in GB.
+ - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol).
+ type: dict
+ scsi:
+ description:
+ - A hash/dictionary of volume used as SCSI hard disk or CD-ROM. C(scsi='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(sata[n]) where 0 ≤ n ≤ 13.
+ - Values allowed are - C("storage:size,format=value").
+ - C(storage) is the storage identifier where to create the disk.
+ - C(size) is the size of the disk in GB.
+ - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol).
+ type: dict
+ scsihw:
+ description:
+ - Specifies the SCSI controller model.
+ type: str
+ choices: ['lsi', 'lsi53c810', 'virtio-scsi-pci', 'virtio-scsi-single', 'megasas', 'pvscsi']
+ searchdomains:
+ description:
+ - 'cloud-init: Sets DNS search domain(s).'
+ - If unset, PVE host settings are used.
+ type: list
+ elements: str
+ version_added: 1.3.0
+ serial:
+ description:
+ - A hash/dictionary of serial device to create inside the VM. C('{"key":"value", "key":"value"}').
+ - Keys allowed are - serial[n](str; required) where 0 ≤ n ≤ 3.
+ - Values allowed are - C((/dev/.+|socket)).
+ - /!\ If you pass through a host serial device, it is no longer possible to migrate such machines - use with special care.
+ type: dict
+ shares:
+ description:
+ - Rets amount of memory shares for auto-ballooning. (0 - 50000).
+ - The larger the number is, the more memory this VM gets.
+ - The number is relative to weights of all other running VMs.
+ - Using 0 disables auto-ballooning, this means no limit.
+ type: int
+ skiplock:
+ description:
+ - Ignore locks
+ - Only root is allowed to use this option.
+ type: bool
+ smbios:
+ description:
+ - Specifies SMBIOS type 1 fields.
+ type: str
+ snapname:
+ description:
+ - The name of the snapshot. Used only with clone.
+ type: str
+ sockets:
+ description:
+ - Sets the number of CPU sockets. (1 - N).
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(1). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: int
+ sshkeys:
+ description:
+ - 'cloud-init: SSH key to assign to the default user. NOT TESTED with multiple keys but a multi-line value should work.'
+ type: str
+ version_added: 1.3.0
+ startdate:
+ description:
+ - Sets the initial date of the real time clock.
+ - Valid format for date are C('now') or C('2016-09-25T16:01:21') or C('2016-09-25').
+ type: str
+ startup:
+ description:
+ - Startup and shutdown behavior. C([[order=]\d+] [,up=\d+] [,down=\d+]).
+ - Order is a non-negative number defining the general startup order.
+ - Shutdown in done with reverse ordering.
+ type: str
+ state:
+ description:
+ - Indicates desired state of the instance.
+ - If C(current), the current state of the VM will be fetched. You can access it with C(results.status)
+ type: str
+ choices: ['present', 'started', 'absent', 'stopped', 'restarted','current']
+ default: present
+ storage:
+ description:
+ - Target storage for full clone.
+ type: str
+ tablet:
+ description:
+ - Enables/disables the USB tablet device.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: bool
+ target:
+ description:
+ - Target node. Only allowed if the original VM is on shared storage.
+ - Used only with clone
+ type: str
+ tdf:
+ description:
+ - Enables/disables time drift fix.
+ type: bool
+ template:
+ description:
+ - Enables/disables the template.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: bool
+ timeout:
+ description:
+ - Timeout for operations.
+ type: int
+ default: 30
+ update:
+ description:
+ - If C(yes), the VM will be updated with new value.
+ - Cause of the operations of the API and security reasons, I have disabled the update of the following parameters
+ - C(net, virtio, ide, sata, scsi). Per example updating C(net) update the MAC address and C(virtio) create always new disk...
+ - Update of C(pool) is disabled. It needs an additional API endpoint not covered by this module.
+ type: bool
+ default: 'no'
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'no'
+ vcpus:
+ description:
+ - Sets number of hotplugged vcpus.
+ type: int
+ vga:
+ description:
+ - Select VGA type. If you want to use high resolution modes (>= 1280x1024x16) then you should use option 'std' or 'vmware'.
+ - If I(proxmox_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(std). Note that the default value of I(proxmox_default_behavior)
+ changes in community.general 4.0.0.
+ type: str
+ choices: ['std', 'cirrus', 'vmware', 'qxl', 'serial0', 'serial1', 'serial2', 'serial3', 'qxl2', 'qxl3', 'qxl4']
+ virtio:
+ description:
+ - A hash/dictionary of volume used as VIRTIO hard disk. C(virtio='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(virto[n]) where 0 ≤ n ≤ 15.
+ - Values allowed are - C("storage:size,format=value").
+ - C(storage) is the storage identifier where to create the disk.
+ - C(size) is the size of the disk in GB.
+ - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol).
+ type: dict
+ vmid:
+ description:
+ - Specifies the VM ID. Instead use I(name) parameter.
+ - If vmid is not set, the next available VM ID will be fetched from ProxmoxAPI.
+ type: int
+ watchdog:
+ description:
+ - Creates a virtual hardware watchdog device.
+ type: str
+ proxmox_default_behavior:
+ description:
+ - Various module options used to have default values. This cause problems when
+ user expects different behavior from proxmox by default or fill options which cause
+ problems when they have been set.
+ - The default value is C(compatibility), which will ensure that the default values
+ are used when the values are not explicitly specified by the user.
+ - From community.general 4.0.0 on, the default value will switch to C(no_defaults). To avoid
+ deprecation warnings, please set I(proxmox_default_behavior) to an explicit
+ value.
+ - This affects the I(acpi), I(autostart), I(balloon), I(boot), I(cores), I(cpu),
+ I(cpuunits), I(force), I(format), I(kvm), I(memory), I(onboot), I(ostype), I(sockets),
+ I(tablet), I(template), I(vga), options.
+ type: str
+ choices:
+ - compatibility
+ - no_defaults
+ version_added: "1.3.0"
+
+requirements: [ "proxmoxer", "requests" ]
+'''
+
+EXAMPLES = '''
+- name: Create new VM with minimal options
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+
+- name: Create new VM with minimal options and given vmid
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ vmid: 100
+
+- name: Create new VM with two network interface options
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ net:
+ net0: 'virtio,bridge=vmbr1,rate=200'
+ net1: 'e1000,bridge=vmbr2'
+
+- name: Create new VM with one network interface, three virto hard disk, 4 cores, and 2 vcpus
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ net:
+ net0: 'virtio,bridge=vmbr1,rate=200'
+ virtio:
+ virtio0: 'VMs_LVM:10'
+ virtio1: 'VMs:2,format=qcow2'
+ virtio2: 'VMs:5,format=raw'
+ cores: 4
+ vcpus: 2
+
+- name: >
+ Clone VM with only source VM name.
+ The VM source is spynal.
+ The target VM name is zavala
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ clone: spynal
+ name: zavala
+ node: sabrewulf
+ storage: VMs
+ format: qcow2
+ timeout: 500
+
+- name: >
+ Create linked clone VM with only source VM name.
+ The VM source is spynal.
+ The target VM name is zavala
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ clone: spynal
+ name: zavala
+ node: sabrewulf
+ storage: VMs
+ full: no
+ format: unspecified
+ timeout: 500
+
+- name: Clone VM with source vmid and target newid and raw format
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ clone: arbitrary_name
+ vmid: 108
+ newid: 152
+ name: zavala
+ node: sabrewulf
+ storage: LVM_STO
+ format: raw
+ timeout: 300
+
+- name: Create new VM and lock it for snapshot
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ lock: snapshot
+
+- name: Create new VM and set protection to disable the remove VM and remove disk operations
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ protection: yes
+
+- name: Create new VM using cloud-init with a username and password
+ community.general.proxmox_kvm:
+ node: sabrewulf
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ ide:
+ ide2: 'local:cloudinit,format=qcow2'
+ ciuser: mylinuxuser
+ cipassword: supersecret
+ searchdomains: 'mydomain.internal'
+ nameservers: 1.1.1.1
+ net:
+ net0: 'virtio,bridge=vmbr1,tag=77'
+ ipconfig:
+ ipconfig0: 'ip=192.168.1.1/24,gw=192.168.1.1'
+
+- name: Create new VM using Cloud-Init with an ssh key
+ community.general.proxmox_kvm:
+ node: sabrewulf
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ ide:
+ ide2: 'local:cloudinit,format=qcow2'
+ sshkeys: 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILJkVm98B71lD5XHfihwcYHE9TVpsJmK1vR1JcaU82L+'
+ searchdomains: 'mydomain.internal'
+ nameservers:
+ - '1.1.1.1'
+ - '8.8.8.8'
+ net:
+ net0: 'virtio,bridge=vmbr1,tag=77'
+ ipconfig:
+ ipconfig0: 'ip=192.168.1.1/24'
+
+- name: Start VM
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ state: started
+
+- name: Stop VM
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ state: stopped
+
+- name: Stop VM with force
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ state: stopped
+ force: yes
+
+- name: Restart VM
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ state: restarted
+
+- name: Remove VM
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ state: absent
+
+- name: Get VM current state
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ state: current
+
+- name: Update VM configuration
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ cores: 8
+ memory: 16384
+ update: yes
+
+- name: Delete QEMU parameters
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ delete: 'args,template,cpulimit'
+
+- name: Revert a pending change
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ revert: 'template,cpulimit'
+'''
+
+RETURN = '''
+devices:
+ description: The list of devices created or used.
+ returned: success
+ type: dict
+ sample: '
+ {
+ "ide0": "VMS_LVM:vm-115-disk-1",
+ "ide1": "VMs:115/vm-115-disk-3.raw",
+ "virtio0": "VMS_LVM:vm-115-disk-2",
+ "virtio1": "VMs:115/vm-115-disk-1.qcow2",
+ "virtio2": "VMs:115/vm-115-disk-2.raw"
+ }'
+mac:
+ description: List of mac address created and net[n] attached. Useful when you want to use provision systems like Foreman via PXE.
+ returned: success
+ type: dict
+ sample: '
+ {
+ "net0": "3E:6E:97:D2:31:9F",
+ "net1": "B6:A1:FC:EF:78:A4"
+ }'
+vmid:
+ description: The VM vmid.
+ returned: success
+ type: int
+ sample: 115
+status:
+ description:
+ - The current virtual machine status.
+ - Returned only when C(state=current)
+ returned: success
+ type: dict
+ sample: '{
+ "changed": false,
+ "msg": "VM kropta with vmid = 110 is running",
+ "status": "running"
+ }'
+'''
+
+import os
+import re
+import time
+import traceback
+from distutils.version import LooseVersion
+from ansible.module_utils.six.moves.urllib.parse import quote
+
+try:
+ from proxmoxer import ProxmoxAPI
+ HAS_PROXMOXER = True
+except ImportError:
+ HAS_PROXMOXER = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def get_nextvmid(module, proxmox):
+ try:
+ vmid = proxmox.cluster.nextid.get()
+ return vmid
+ except Exception as e:
+ module.fail_json(msg="Unable to get next vmid. Failed with exception: %s" % to_native(e),
+ exception=traceback.format_exc())
+
+
+def get_vmid(proxmox, name):
+ return [vm['vmid'] for vm in proxmox.cluster.resources.get(type='vm') if vm.get('name') == name]
+
+
+def get_vm(proxmox, vmid):
+ return [vm for vm in proxmox.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid)]
+
+
+def node_check(proxmox, node):
+ return [True for nd in proxmox.nodes.get() if nd['node'] == node]
+
+
+def get_vminfo(module, proxmox, node, vmid, **kwargs):
+ global results
+ results = {}
+ mac = {}
+ devices = {}
+ try:
+ vm = proxmox.nodes(node).qemu(vmid).config.get()
+ except Exception as e:
+ module.fail_json(msg='Getting information for VM with vmid = %s failed with exception: %s' % (vmid, e))
+
+ # Sanitize kwargs. Remove not defined args and ensure True and False converted to int.
+ kwargs = dict((k, v) for k, v in kwargs.items() if v is not None)
+
+ # Convert all dict in kwargs to elements. For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n]
+ for k in list(kwargs.keys()):
+ if isinstance(kwargs[k], dict):
+ kwargs.update(kwargs[k])
+ del kwargs[k]
+
+ # Split information by type
+ for k, v in kwargs.items():
+ if re.match(r'net[0-9]', k) is not None:
+ interface = k
+ k = vm[k]
+ k = re.search('=(.*?),', k).group(1)
+ mac[interface] = k
+ if (re.match(r'virtio[0-9]', k) is not None or
+ re.match(r'ide[0-9]', k) is not None or
+ re.match(r'scsi[0-9]', k) is not None or
+ re.match(r'sata[0-9]', k) is not None):
+ device = k
+ k = vm[k]
+ k = re.search('(.*?),', k).group(1)
+ devices[device] = k
+
+ results['mac'] = mac
+ results['devices'] = devices
+ results['vmid'] = int(vmid)
+
+
+def settings(module, proxmox, vmid, node, name, **kwargs):
+ proxmox_node = proxmox.nodes(node)
+
+ # Sanitize kwargs. Remove not defined args and ensure True and False converted to int.
+ kwargs = dict((k, v) for k, v in kwargs.items() if v is not None)
+
+ if proxmox_node.qemu(vmid).config.set(**kwargs) is None:
+ return True
+ else:
+ return False
+
+
+def wait_for_task(module, proxmox, node, taskid):
+ timeout = module.params['timeout']
+
+ while timeout:
+ task = proxmox.nodes(node).tasks(taskid).status.get()
+ if task['status'] == 'stopped' and task['exitstatus'] == 'OK':
+ # Wait an extra second as the API can be a ahead of the hypervisor
+ time.sleep(1)
+ return True
+ timeout = timeout - 1
+ if timeout == 0:
+ break
+ time.sleep(1)
+ return False
+
+
+def create_vm(module, proxmox, vmid, newid, node, name, memory, cpu, cores, sockets, update, **kwargs):
+ # Available only in PVE 4
+ only_v4 = ['force', 'protection', 'skiplock']
+ only_v6 = ['ciuser', 'cipassword', 'sshkeys', 'ipconfig']
+
+ # valide clone parameters
+ valid_clone_params = ['format', 'full', 'pool', 'snapname', 'storage', 'target']
+ clone_params = {}
+ # Default args for vm. Note: -args option is for experts only. It allows you to pass arbitrary arguments to kvm.
+ vm_args = "-serial unix:/var/run/qemu-server/{0}.serial,server,nowait".format(vmid)
+
+ proxmox_node = proxmox.nodes(node)
+
+ # Sanitize kwargs. Remove not defined args and ensure True and False converted to int.
+ kwargs = dict((k, v) for k, v in kwargs.items() if v is not None)
+ kwargs.update(dict([k, int(v)] for k, v in kwargs.items() if isinstance(v, bool)))
+
+ # The features work only on PVE 4+
+ if PVE_MAJOR_VERSION < 4:
+ for p in only_v4:
+ if p in kwargs:
+ del kwargs[p]
+
+ # The features work only on PVE 6
+ if PVE_MAJOR_VERSION < 6:
+ for p in only_v6:
+ if p in kwargs:
+ del kwargs[p]
+
+ # 'sshkeys' param expects an urlencoded string
+ if 'sshkeys' in kwargs:
+ urlencoded_ssh_keys = quote(kwargs['sshkeys'], safe='')
+ kwargs['sshkeys'] = str(urlencoded_ssh_keys)
+
+ # If update, don't update disk (virtio, ide, sata, scsi) and network interface
+ # pool parameter not supported by qemu/<vmid>/config endpoint on "update" (PVE 6.2) - only with "create"
+ if update:
+ if 'virtio' in kwargs:
+ del kwargs['virtio']
+ if 'sata' in kwargs:
+ del kwargs['sata']
+ if 'scsi' in kwargs:
+ del kwargs['scsi']
+ if 'ide' in kwargs:
+ del kwargs['ide']
+ if 'net' in kwargs:
+ del kwargs['net']
+ if 'force' in kwargs:
+ del kwargs['force']
+ if 'pool' in kwargs:
+ del kwargs['pool']
+
+ # Convert all dict in kwargs to elements. For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n], ipconfig[n]
+ for k in list(kwargs.keys()):
+ if isinstance(kwargs[k], dict):
+ kwargs.update(kwargs[k])
+ del kwargs[k]
+
+ # Rename numa_enabled to numa. According the API documentation
+ if 'numa_enabled' in kwargs:
+ kwargs['numa'] = kwargs['numa_enabled']
+ del kwargs['numa_enabled']
+
+ # PVE api expects strings for the following params
+ if 'nameservers' in module.params:
+ nameservers = module.params.pop('nameservers')
+ if nameservers:
+ kwargs['nameserver'] = ' '.join(nameservers)
+ if 'searchdomains' in module.params:
+ searchdomains = module.params.pop('searchdomains')
+ if searchdomains:
+ kwargs['searchdomain'] = ' '.join(searchdomains)
+
+ # -args and skiplock require root@pam user
+ if module.params['api_user'] == "root@pam" and module.params['args'] is None:
+ if not update:
+ kwargs['args'] = vm_args
+ elif module.params['api_user'] == "root@pam" and module.params['args'] is not None:
+ kwargs['args'] = module.params['args']
+ elif module.params['api_user'] != "root@pam" and module.params['args'] is not None:
+ module.fail_json(msg='args parameter require root@pam user. ')
+
+ if module.params['api_user'] != "root@pam" and module.params['skiplock'] is not None:
+ module.fail_json(msg='skiplock parameter require root@pam user. ')
+
+ if update:
+ if proxmox_node.qemu(vmid).config.set(name=name, memory=memory, cpu=cpu, cores=cores, sockets=sockets, **kwargs) is None:
+ return True
+ else:
+ return False
+ elif module.params['clone'] is not None:
+ for param in valid_clone_params:
+ if module.params[param] is not None:
+ clone_params[param] = module.params[param]
+ clone_params.update(dict([k, int(v)] for k, v in clone_params.items() if isinstance(v, bool)))
+ taskid = proxmox_node.qemu(vmid).clone.post(newid=newid, name=name, **clone_params)
+ else:
+ taskid = proxmox_node.qemu.create(vmid=vmid, name=name, memory=memory, cpu=cpu, cores=cores, sockets=sockets, **kwargs)
+
+ if not wait_for_task(module, proxmox, node, taskid):
+ module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s' %
+ proxmox_node.tasks(taskid).log.get()[:1])
+ return False
+ return True
+
+
+def start_vm(module, proxmox, vm):
+ vmid = vm[0]['vmid']
+ proxmox_node = proxmox.nodes(vm[0]['node'])
+ taskid = proxmox_node.qemu(vmid).status.start.post()
+ if not wait_for_task(module, proxmox, vm[0]['node'], taskid):
+ module.fail_json(msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s' %
+ proxmox_node.tasks(taskid).log.get()[:1])
+ return False
+ return True
+
+
+def stop_vm(module, proxmox, vm, force):
+ vmid = vm[0]['vmid']
+ proxmox_node = proxmox.nodes(vm[0]['node'])
+ taskid = proxmox_node.qemu(vmid).status.shutdown.post(forceStop=(1 if force else 0))
+ if not wait_for_task(module, proxmox, vm[0]['node'], taskid):
+ module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s' %
+ proxmox_node.tasks(taskid).log.get()[:1])
+ return False
+ return True
+
+
+def proxmox_version(proxmox):
+ apireturn = proxmox.version.get()
+ return LooseVersion(apireturn['version'])
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ acpi=dict(type='bool'),
+ agent=dict(type='bool'),
+ args=dict(type='str'),
+ api_host=dict(required=True),
+ api_password=dict(no_log=True),
+ api_token_id=dict(no_log=True),
+ api_token_secret=dict(no_log=True),
+ api_user=dict(required=True),
+ autostart=dict(type='bool'),
+ balloon=dict(type='int'),
+ bios=dict(choices=['seabios', 'ovmf']),
+ boot=dict(type='str'),
+ bootdisk=dict(type='str'),
+ cicustom=dict(type='str'),
+ cipassword=dict(type='str', no_log=True),
+ citype=dict(type='str', choices=['nocloud', 'configdrive2']),
+ ciuser=dict(type='str'),
+ clone=dict(type='str', default=None),
+ cores=dict(type='int'),
+ cpu=dict(type='str'),
+ cpulimit=dict(type='int'),
+ cpuunits=dict(type='int'),
+ delete=dict(type='str', default=None),
+ description=dict(type='str'),
+ digest=dict(type='str'),
+ force=dict(type='bool'),
+ format=dict(type='str', choices=['cloop', 'cow', 'qcow', 'qcow2', 'qed', 'raw', 'vmdk', 'unspecified']),
+ freeze=dict(type='bool'),
+ full=dict(type='bool', default=True),
+ hostpci=dict(type='dict'),
+ hotplug=dict(type='str'),
+ hugepages=dict(choices=['any', '2', '1024']),
+ ide=dict(type='dict'),
+ ipconfig=dict(type='dict'),
+ keyboard=dict(type='str'),
+ kvm=dict(type='bool'),
+ localtime=dict(type='bool'),
+ lock=dict(choices=['migrate', 'backup', 'snapshot', 'rollback']),
+ machine=dict(type='str'),
+ memory=dict(type='int'),
+ migrate_downtime=dict(type='int'),
+ migrate_speed=dict(type='int'),
+ name=dict(type='str'),
+ nameservers=dict(type='list', elements='str'),
+ net=dict(type='dict'),
+ newid=dict(type='int', default=None),
+ node=dict(),
+ numa=dict(type='dict'),
+ numa_enabled=dict(type='bool'),
+ onboot=dict(type='bool'),
+ ostype=dict(choices=['other', 'wxp', 'w2k', 'w2k3', 'w2k8', 'wvista', 'win7', 'win8', 'win10', 'l24', 'l26', 'solaris']),
+ parallel=dict(type='dict'),
+ pool=dict(type='str'),
+ protection=dict(type='bool'),
+ reboot=dict(type='bool'),
+ revert=dict(type='str'),
+ sata=dict(type='dict'),
+ scsi=dict(type='dict'),
+ scsihw=dict(choices=['lsi', 'lsi53c810', 'virtio-scsi-pci', 'virtio-scsi-single', 'megasas', 'pvscsi']),
+ serial=dict(type='dict'),
+ searchdomains=dict(type='list', elements='str'),
+ shares=dict(type='int'),
+ skiplock=dict(type='bool'),
+ smbios=dict(type='str'),
+ snapname=dict(type='str'),
+ sockets=dict(type='int'),
+ sshkeys=dict(type='str'),
+ startdate=dict(type='str'),
+ startup=dict(),
+ state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted', 'current']),
+ storage=dict(type='str'),
+ tablet=dict(type='bool'),
+ target=dict(type='str'),
+ tdf=dict(type='bool'),
+ template=dict(type='bool'),
+ timeout=dict(type='int', default=30),
+ update=dict(type='bool', default=False),
+ validate_certs=dict(type='bool', default=False),
+ vcpus=dict(type='int'),
+ vga=dict(choices=['std', 'cirrus', 'vmware', 'qxl', 'serial0', 'serial1', 'serial2', 'serial3', 'qxl2', 'qxl3', 'qxl4']),
+ virtio=dict(type='dict'),
+ vmid=dict(type='int', default=None),
+ watchdog=dict(),
+ proxmox_default_behavior=dict(type='str', choices=['compatibility', 'no_defaults']),
+ ),
+ mutually_exclusive=[('delete', 'revert'), ('delete', 'update'), ('revert', 'update'), ('clone', 'update'), ('clone', 'delete'), ('clone', 'revert')],
+ required_one_of=[('name', 'vmid',)],
+ required_if=[('state', 'present', ['node'])]
+ )
+
+ if not HAS_PROXMOXER:
+ module.fail_json(msg='proxmoxer required for this module')
+
+ api_host = module.params['api_host']
+ api_password = module.params['api_password']
+ api_token_id = module.params['api_token_id']
+ api_token_secret = module.params['api_token_secret']
+ api_user = module.params['api_user']
+ clone = module.params['clone']
+ cpu = module.params['cpu']
+ cores = module.params['cores']
+ delete = module.params['delete']
+ memory = module.params['memory']
+ name = module.params['name']
+ newid = module.params['newid']
+ node = module.params['node']
+ revert = module.params['revert']
+ sockets = module.params['sockets']
+ state = module.params['state']
+ update = bool(module.params['update'])
+ vmid = module.params['vmid']
+ validate_certs = module.params['validate_certs']
+
+ if module.params['proxmox_default_behavior'] is None:
+ module.params['proxmox_default_behavior'] = 'compatibility'
+ module.deprecate(
+ 'The proxmox_default_behavior option will change its default value from "compatibility" to '
+ '"no_defaults" in community.general 4.0.0. To remove this warning, please specify an explicit value for it now',
+ version='4.0.0', collection_name='community.general'
+ )
+ if module.params['proxmox_default_behavior'] == 'compatibility':
+ old_default_values = dict(
+ acpi=True,
+ autostart=False,
+ balloon=0,
+ boot='cnd',
+ cores=1,
+ cpu='kvm64',
+ cpuunits=1000,
+ force=False,
+ format='qcow2',
+ kvm=True,
+ memory=512,
+ ostype='l26',
+ sockets=1,
+ tablet=False,
+ template=False,
+ vga='std',
+ )
+ for param, value in old_default_values.items():
+ if module.params[param] is None:
+ module.params[param] = value
+
+ if module.params['format'] == 'unspecified':
+ module.params['format'] = None
+
+ auth_args = {'user': api_user}
+ if not (api_token_id and api_token_secret):
+ # If password not set get it from PROXMOX_PASSWORD env
+ if not api_password:
+ try:
+ api_password = os.environ['PROXMOX_PASSWORD']
+ except KeyError:
+ module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable')
+ auth_args['password'] = api_password
+ else:
+ auth_args['token_name'] = api_token_id
+ auth_args['token_value'] = api_token_secret
+
+ try:
+ proxmox = ProxmoxAPI(api_host, verify_ssl=validate_certs, **auth_args)
+ global PVE_MAJOR_VERSION
+ version = proxmox_version(proxmox)
+ PVE_MAJOR_VERSION = 3 if version < LooseVersion('4.0') else version.version[0]
+ except Exception as e:
+ module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e)
+
+ # If vmid is not defined then retrieve its value from the vm name,
+ # the cloned vm name or retrieve the next free VM id from ProxmoxAPI.
+ if not vmid:
+ if state == 'present' and not update and not clone and not delete and not revert:
+ try:
+ vmid = get_nextvmid(module, proxmox)
+ except Exception:
+ module.fail_json(msg="Can't get the next vmid for VM {0} automatically. Ensure your cluster state is good".format(name))
+ else:
+ clone_target = clone or name
+ try:
+ vmid = get_vmid(proxmox, clone_target)[0]
+ except Exception:
+ vmid = -1
+
+ if clone is not None:
+ # If newid is not defined then retrieve the next free id from ProxmoxAPI
+ if not newid:
+ try:
+ newid = get_nextvmid(module, proxmox)
+ except Exception:
+ module.fail_json(msg="Can't get the next vmid for VM {0} automatically. Ensure your cluster state is good".format(name))
+
+ # Ensure source VM name exists when cloning
+ if -1 == vmid:
+ module.fail_json(msg='VM with name = %s does not exist in cluster' % clone)
+
+ # Ensure source VM id exists when cloning
+ if not get_vm(proxmox, vmid):
+ module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid)
+
+ # Ensure the choosen VM name doesn't already exist when cloning
+ if get_vmid(proxmox, name):
+ module.exit_json(changed=False, msg="VM with name <%s> already exists" % name)
+
+ # Ensure the choosen VM id doesn't already exist when cloning
+ if get_vm(proxmox, newid):
+ module.exit_json(changed=False, msg="vmid %s with VM name %s already exists" % (newid, name))
+
+ if delete is not None:
+ try:
+ settings(module, proxmox, vmid, node, name, delete=delete)
+ module.exit_json(changed=True, msg="Settings has deleted on VM {0} with vmid {1}".format(name, vmid))
+ except Exception as e:
+ module.fail_json(msg='Unable to delete settings on VM {0} with vmid {1}: '.format(name, vmid) + str(e))
+
+ if revert is not None:
+ try:
+ settings(module, proxmox, vmid, node, name, revert=revert)
+ module.exit_json(changed=True, msg="Settings has reverted on VM {0} with vmid {1}".format(name, vmid))
+ except Exception as e:
+ module.fail_json(msg='Unable to revert settings on VM {0} with vmid {1}: Maybe is not a pending task... '.format(name, vmid) + str(e))
+
+ if state == 'present':
+ try:
+ if get_vm(proxmox, vmid) and not (update or clone):
+ module.exit_json(changed=False, msg="VM with vmid <%s> already exists" % vmid)
+ elif get_vmid(proxmox, name) and not (update or clone):
+ module.exit_json(changed=False, msg="VM with name <%s> already exists" % name)
+ elif not (node, name):
+ module.fail_json(msg='node, name is mandatory for creating/updating vm')
+ elif not node_check(proxmox, node):
+ module.fail_json(msg="node '%s' does not exist in cluster" % node)
+
+ create_vm(module, proxmox, vmid, newid, node, name, memory, cpu, cores, sockets, update,
+ acpi=module.params['acpi'],
+ agent=module.params['agent'],
+ autostart=module.params['autostart'],
+ balloon=module.params['balloon'],
+ bios=module.params['bios'],
+ boot=module.params['boot'],
+ bootdisk=module.params['bootdisk'],
+ cicustom=module.params['cicustom'],
+ cipassword=module.params['cipassword'],
+ citype=module.params['citype'],
+ ciuser=module.params['ciuser'],
+ cpulimit=module.params['cpulimit'],
+ cpuunits=module.params['cpuunits'],
+ description=module.params['description'],
+ digest=module.params['digest'],
+ force=module.params['force'],
+ freeze=module.params['freeze'],
+ hostpci=module.params['hostpci'],
+ hotplug=module.params['hotplug'],
+ hugepages=module.params['hugepages'],
+ ide=module.params['ide'],
+ ipconfig=module.params['ipconfig'],
+ keyboard=module.params['keyboard'],
+ kvm=module.params['kvm'],
+ localtime=module.params['localtime'],
+ lock=module.params['lock'],
+ machine=module.params['machine'],
+ migrate_downtime=module.params['migrate_downtime'],
+ migrate_speed=module.params['migrate_speed'],
+ net=module.params['net'],
+ numa=module.params['numa'],
+ numa_enabled=module.params['numa_enabled'],
+ onboot=module.params['onboot'],
+ ostype=module.params['ostype'],
+ parallel=module.params['parallel'],
+ pool=module.params['pool'],
+ protection=module.params['protection'],
+ reboot=module.params['reboot'],
+ sata=module.params['sata'],
+ scsi=module.params['scsi'],
+ scsihw=module.params['scsihw'],
+ serial=module.params['serial'],
+ shares=module.params['shares'],
+ skiplock=module.params['skiplock'],
+ smbios1=module.params['smbios'],
+ snapname=module.params['snapname'],
+ sshkeys=module.params['sshkeys'],
+ startdate=module.params['startdate'],
+ startup=module.params['startup'],
+ tablet=module.params['tablet'],
+ target=module.params['target'],
+ tdf=module.params['tdf'],
+ template=module.params['template'],
+ vcpus=module.params['vcpus'],
+ vga=module.params['vga'],
+ virtio=module.params['virtio'],
+ watchdog=module.params['watchdog'])
+
+ if not clone:
+ get_vminfo(module, proxmox, node, vmid,
+ ide=module.params['ide'],
+ net=module.params['net'],
+ sata=module.params['sata'],
+ scsi=module.params['scsi'],
+ virtio=module.params['virtio'])
+ if update:
+ module.exit_json(changed=True, msg="VM %s with vmid %s updated" % (name, vmid))
+ elif clone is not None:
+ module.exit_json(changed=True, msg="VM %s with newid %s cloned from vm with vmid %s" % (name, newid, vmid))
+ else:
+ module.exit_json(changed=True, msg="VM %s with vmid %s deployed" % (name, vmid), **results)
+ except Exception as e:
+ if update:
+ module.fail_json(msg="Unable to update vm {0} with vmid {1}=".format(name, vmid) + str(e))
+ elif clone is not None:
+ module.fail_json(msg="Unable to clone vm {0} from vmid {1}=".format(name, vmid) + str(e))
+ else:
+ module.fail_json(msg="creation of qemu VM %s with vmid %s failed with exception=%s" % (name, vmid, e))
+
+ elif state == 'started':
+ try:
+ if -1 == vmid:
+ module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
+ vm = get_vm(proxmox, vmid)
+ if not vm:
+ module.fail_json(msg='VM with vmid <%s> does not exist in cluster' % vmid)
+ if vm[0]['status'] == 'running':
+ module.exit_json(changed=False, msg="VM %s is already running" % vmid)
+
+ if start_vm(module, proxmox, vm):
+ module.exit_json(changed=True, msg="VM %s started" % vmid)
+ except Exception as e:
+ module.fail_json(msg="starting of VM %s failed with exception: %s" % (vmid, e))
+
+ elif state == 'stopped':
+ try:
+ if -1 == vmid:
+ module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
+
+ vm = get_vm(proxmox, vmid)
+ if not vm:
+ module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid)
+
+ if vm[0]['status'] == 'stopped':
+ module.exit_json(changed=False, msg="VM %s is already stopped" % vmid)
+
+ if stop_vm(module, proxmox, vm, force=module.params['force']):
+ module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
+ except Exception as e:
+ module.fail_json(msg="stopping of VM %s failed with exception: %s" % (vmid, e))
+
+ elif state == 'restarted':
+ try:
+ if -1 == vmid:
+ module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
+
+ vm = get_vm(proxmox, vmid)
+ if not vm:
+ module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid)
+ if vm[0]['status'] == 'stopped':
+ module.exit_json(changed=False, msg="VM %s is not running" % vmid)
+
+ if stop_vm(module, proxmox, vm, force=module.params['force']) and start_vm(module, proxmox, vm):
+ module.exit_json(changed=True, msg="VM %s is restarted" % vmid)
+ except Exception as e:
+ module.fail_json(msg="restarting of VM %s failed with exception: %s" % (vmid, e))
+
+ elif state == 'absent':
+ try:
+ vm = get_vm(proxmox, vmid)
+ if not vm:
+ module.exit_json(changed=False)
+
+ proxmox_node = proxmox.nodes(vm[0]['node'])
+ if vm[0]['status'] == 'running':
+ module.exit_json(changed=False, msg="VM %s is running. Stop it before deletion." % vmid)
+ taskid = proxmox_node.qemu.delete(vmid)
+ if not wait_for_task(module, proxmox, vm[0]['node'], taskid):
+ module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s' %
+ proxmox_node.tasks(taskid).log.get()[:1])
+ else:
+ module.exit_json(changed=True, msg="VM %s removed" % vmid)
+ except Exception as e:
+ module.fail_json(msg="deletion of VM %s failed with exception: %s" % (vmid, e))
+
+ elif state == 'current':
+ status = {}
+ if -1 == vmid:
+ module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
+ vm = get_vm(proxmox, vmid)
+ if not vm:
+ module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid)
+ current = proxmox.nodes(vm[0]['node']).qemu(vmid).status.current.get()['status']
+ status['status'] = current
+ if status:
+ module.exit_json(changed=False, msg="VM %s with vmid = %s is %s" % (name, vmid, current), **status)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox_template.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox_template.py
new file mode 100644
index 00000000..541dc28e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox_template.py
@@ -0,0 +1,306 @@
+#!/usr/bin/python
+#
+# Copyright: Ansible Project
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: proxmox_template
+short_description: management of OS templates in Proxmox VE cluster
+description:
+ - allows you to upload/delete templates in Proxmox VE cluster
+options:
+ api_host:
+ description:
+ - the host of the Proxmox VE cluster
+ type: str
+ required: true
+ api_user:
+ description:
+ - the user to authenticate with
+ type: str
+ required: true
+ api_password:
+ description:
+ - the password to authenticate with
+ - you can use PROXMOX_PASSWORD environment variable
+ type: str
+ api_token_id:
+ description:
+ - Specify the token ID.
+ type: str
+ version_added: 1.3.0
+ api_token_secret:
+ description:
+ - Specify the token secret.
+ type: str
+ version_added: 1.3.0
+ validate_certs:
+ description:
+ - enable / disable https certificate verification
+ default: 'no'
+ type: bool
+ node:
+ description:
+ - Proxmox VE node, when you will operate with template
+ type: str
+ src:
+ description:
+ - path to uploaded file
+ - required only for C(state=present)
+ type: path
+ template:
+ description:
+ - the template name
+ - Required for state C(absent) to delete a template.
+ - Required for state C(present) to download an appliance container template (pveam).
+ type: str
+ content_type:
+ description:
+ - content type
+ - required only for C(state=present)
+ type: str
+ default: 'vztmpl'
+ choices: ['vztmpl', 'iso']
+ storage:
+ description:
+ - target storage
+ type: str
+ default: 'local'
+ timeout:
+ description:
+ - timeout for operations
+ type: int
+ default: 30
+ force:
+ description:
+ - can be used only with C(state=present), exists template will be overwritten
+ type: bool
+ default: 'no'
+ state:
+ description:
+ - Indicate desired state of the template
+ type: str
+ choices: ['present', 'absent']
+ default: present
+notes:
+ - Requires proxmoxer and requests modules on host. This modules can be installed with pip.
+requirements: [ "proxmoxer", "requests" ]
+author: Sergei Antipov (@UnderGreen)
+'''
+
+EXAMPLES = '''
+- name: Upload new openvz template with minimal options
+ community.general.proxmox_template:
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ src: ~/ubuntu-14.04-x86_64.tar.gz
+
+- name: >
+ Upload new openvz template with minimal options use environment
+ PROXMOX_PASSWORD variable(you should export it before)
+ community.general.proxmox_template:
+ node: uk-mc02
+ api_user: root@pam
+ api_host: node1
+ src: ~/ubuntu-14.04-x86_64.tar.gz
+
+- name: Upload new openvz template with all options and force overwrite
+ community.general.proxmox_template:
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ storage: local
+ content_type: vztmpl
+ src: ~/ubuntu-14.04-x86_64.tar.gz
+ force: yes
+
+- name: Delete template with minimal options
+ community.general.proxmox_template:
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ template: ubuntu-14.04-x86_64.tar.gz
+ state: absent
+
+- name: Download proxmox appliance container template
+ community.general.proxmox_template:
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ storage: local
+ content_type: vztmpl
+ template: ubuntu-20.04-standard_20.04-1_amd64.tar.gz
+'''
+
+import os
+import time
+
+try:
+ from proxmoxer import ProxmoxAPI
+ HAS_PROXMOXER = True
+except ImportError:
+ HAS_PROXMOXER = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def get_template(proxmox, node, storage, content_type, template):
+ return [True for tmpl in proxmox.nodes(node).storage(storage).content.get()
+ if tmpl['volid'] == '%s:%s/%s' % (storage, content_type, template)]
+
+
+def task_status(module, proxmox, node, taskid, timeout):
+ """
+ Check the task status and wait until the task is completed or the timeout is reached.
+ """
+ while timeout:
+ task_status = proxmox.nodes(node).tasks(taskid).status.get()
+ if task_status['status'] == 'stopped' and task_status['exitstatus'] == 'OK':
+ return True
+ timeout = timeout - 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for uploading/downloading template. Last line in task before timeout: %s'
+ % proxmox.node(node).tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ return False
+
+
+def upload_template(module, proxmox, node, storage, content_type, realpath, timeout):
+ taskid = proxmox.nodes(node).storage(storage).upload.post(content=content_type, filename=open(realpath, 'rb'))
+ return task_status(module, proxmox, node, taskid, timeout)
+
+
+def download_template(module, proxmox, node, storage, template, timeout):
+ taskid = proxmox.nodes(node).aplinfo.post(storage=storage, template=template)
+ return task_status(module, proxmox, node, taskid, timeout)
+
+
+def delete_template(module, proxmox, node, storage, content_type, template, timeout):
+ volid = '%s:%s/%s' % (storage, content_type, template)
+ proxmox.nodes(node).storage(storage).content.delete(volid)
+ while timeout:
+ if not get_template(proxmox, node, storage, content_type, template):
+ return True
+ timeout = timeout - 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for deleting template.')
+
+ time.sleep(1)
+ return False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_host=dict(required=True),
+ api_password=dict(no_log=True),
+ api_token_id=dict(no_log=True),
+ api_token_secret=dict(no_log=True),
+ api_user=dict(required=True),
+ validate_certs=dict(type='bool', default=False),
+ node=dict(),
+ src=dict(type='path'),
+ template=dict(),
+ content_type=dict(default='vztmpl', choices=['vztmpl', 'iso']),
+ storage=dict(default='local'),
+ timeout=dict(type='int', default=30),
+ force=dict(type='bool', default=False),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ )
+
+ if not HAS_PROXMOXER:
+ module.fail_json(msg='proxmoxer required for this module')
+
+ state = module.params['state']
+ api_host = module.params['api_host']
+ api_password = module.params['api_password']
+ api_token_id = module.params['api_token_id']
+ api_token_secret = module.params['api_token_secret']
+ api_user = module.params['api_user']
+ validate_certs = module.params['validate_certs']
+ node = module.params['node']
+ storage = module.params['storage']
+ timeout = module.params['timeout']
+
+ auth_args = {'user': api_user}
+ if not (api_token_id and api_token_secret):
+ # If password not set get it from PROXMOX_PASSWORD env
+ if not api_password:
+ try:
+ api_password = os.environ['PROXMOX_PASSWORD']
+ except KeyError as e:
+ module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable')
+ auth_args['password'] = api_password
+ else:
+ auth_args['token_name'] = api_token_id
+ auth_args['token_value'] = api_token_secret
+
+ try:
+ proxmox = ProxmoxAPI(api_host, verify_ssl=validate_certs, **auth_args)
+ # Used to test the validity of the token if given
+ proxmox.version.get()
+ except Exception as e:
+ module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e)
+
+ if state == 'present':
+ try:
+ content_type = module.params['content_type']
+ src = module.params['src']
+
+ # download appliance template
+ if content_type == 'vztmpl' and not src:
+ template = module.params['template']
+
+ if not template:
+ module.fail_json(msg='template param for downloading appliance template is mandatory')
+
+ if get_template(proxmox, node, storage, content_type, template) and not module.params['force']:
+ module.exit_json(changed=False, msg='template with volid=%s:%s/%s already exists' % (storage, content_type, template))
+
+ if download_template(module, proxmox, node, storage, template, timeout):
+ module.exit_json(changed=True, msg='template with volid=%s:%s/%s downloaded' % (storage, content_type, template))
+
+ template = os.path.basename(src)
+ if get_template(proxmox, node, storage, content_type, template) and not module.params['force']:
+ module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already exists' % (storage, content_type, template))
+ elif not src:
+ module.fail_json(msg='src param to uploading template file is mandatory')
+ elif not (os.path.exists(src) and os.path.isfile(src)):
+ module.fail_json(msg='template file on path %s not exists' % src)
+
+ if upload_template(module, proxmox, node, storage, content_type, src, timeout):
+ module.exit_json(changed=True, msg='template with volid=%s:%s/%s uploaded' % (storage, content_type, template))
+ except Exception as e:
+ module.fail_json(msg="uploading/downloading of template %s failed with exception: %s" % (template, e))
+
+ elif state == 'absent':
+ try:
+ content_type = module.params['content_type']
+ template = module.params['template']
+
+ if not template:
+ module.fail_json(msg='template param is mandatory')
+ elif not get_template(proxmox, node, storage, content_type, template):
+ module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already deleted' % (storage, content_type, template))
+
+ if delete_template(module, proxmox, node, storage, content_type, template, timeout):
+ module.exit_json(changed=True, msg='template with volid=%s:%s/%s deleted' % (storage, content_type, template))
+ except Exception as e:
+ module.fail_json(msg="deleting of template %s failed with exception: %s" % (template, e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox_user_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox_user_info.py
new file mode 100644
index 00000000..1de93e60
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/proxmox_user_info.py
@@ -0,0 +1,256 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Tristan Le Guern <tleguern at bouledef.eu>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: proxmox_user_info
+short_description: Retrieve information about one or more Proxmox VE users
+version_added: 1.3.0
+description:
+ - Retrieve information about one or more Proxmox VE users
+options:
+ domain:
+ description:
+ - Restrict results to a specific authentication realm.
+ aliases: ['realm']
+ type: str
+ user:
+ description:
+ - Restrict results to a specific user.
+ aliases: ['name']
+ type: str
+ userid:
+ description:
+ - Restrict results to a specific user ID, which is a concatenation of a user and domain parts.
+ type: str
+author: Tristan Le Guern (@Aversiste)
+extends_documentation_fragment: community.general.proxmox.documentation
+'''
+
+EXAMPLES = '''
+- name: List existing users
+ community.general.proxmox_user_info:
+ api_host: helldorado
+ api_user: root@pam
+ api_password: "{{ password | default(omit) }}"
+ api_token_id: "{{ token_id | default(omit) }}"
+ api_token_secret: "{{ token_secret | default(omit) }}"
+ register: proxmox_users
+
+- name: List existing users in the pve authentication realm
+ community.general.proxmox_user_info:
+ api_host: helldorado
+ api_user: root@pam
+ api_password: "{{ password | default(omit) }}"
+ api_token_id: "{{ token_id | default(omit) }}"
+ api_token_secret: "{{ token_secret | default(omit) }}"
+ domain: pve
+ register: proxmox_users_pve
+
+- name: Retrieve information about admin@pve
+ community.general.proxmox_user_info:
+ api_host: helldorado
+ api_user: root@pam
+ api_password: "{{ password | default(omit) }}"
+ api_token_id: "{{ token_id | default(omit) }}"
+ api_token_secret: "{{ token_secret | default(omit) }}"
+ userid: admin@pve
+ register: proxmox_user_admin
+
+- name: Alternative way to retrieve information about admin@pve
+ community.general.proxmox_user_info:
+ api_host: helldorado
+ api_user: root@pam
+ api_password: "{{ password | default(omit) }}"
+ api_token_id: "{{ token_id | default(omit) }}"
+ api_token_secret: "{{ token_secret | default(omit) }}"
+ user: admin
+ domain: pve
+ register: proxmox_user_admin
+'''
+
+
+RETURN = '''
+proxmox_users:
+ description: List of users.
+ returned: always, but can be empty
+ type: list
+ elements: dict
+ contains:
+ comment:
+ description: Short description of the user.
+ returned: on success
+ type: str
+ domain:
+ description: User's authentication realm, also the right part of the user ID.
+ returned: on success
+ type: str
+ email:
+ description: User's email address.
+ returned: on success
+ type: str
+ enabled:
+ description: User's account state.
+ returned: on success
+ type: bool
+ expire:
+ description: Expiration date in seconds since EPOCH. Zero means no expiration.
+ returned: on success
+ type: int
+ firstname:
+ description: User's first name.
+ returned: on success
+ type: str
+ groups:
+ description: List of groups which the user is a member of.
+ returned: on success
+ type: list
+ elements: str
+ keys:
+ description: User's two factor authentication keys.
+ returned: on success
+ type: str
+ lastname:
+ description: User's last name.
+ returned: on success
+ type: str
+ tokens:
+ description: List of API tokens associated to the user.
+ returned: on success
+ type: list
+ elements: dict
+ contains:
+ comment:
+ description: Short description of the token.
+ returned: on success
+ type: str
+ expire:
+ description: Expiration date in seconds since EPOCH. Zero means no expiration.
+ returned: on success
+ type: int
+ privsep:
+ description: Describe if the API token is further restricted with ACLs or is fully privileged.
+ returned: on success
+ type: bool
+ tokenid:
+ description: Token name.
+ returned: on success
+ type: str
+ user:
+ description: User's login name, also the left part of the user ID.
+ returned: on success
+ type: str
+ userid:
+ description: Proxmox user ID, represented as user@realm.
+ returned: on success
+ type: str
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils.proxmox import (
+ proxmox_auth_argument_spec, ProxmoxAnsible, proxmox_to_ansible_bool, HAS_PROXMOXER, PROXMOXER_IMP_ERR)
+
+
+class ProxmoxUserInfoAnsible(ProxmoxAnsible):
+ def get_user(self, userid):
+ try:
+ user = self.proxmox_api.access.users.get(userid)
+ except Exception:
+ self.module.fail_json(msg="User '%s' does not exist" % userid)
+ user['userid'] = userid
+ return ProxmoxUser(user)
+
+ def get_users(self, domain=None):
+ users = self.proxmox_api.access.users.get(full=1)
+ users = [ProxmoxUser(user) for user in users]
+ if domain:
+ return [user for user in users if user.user['domain'] == domain]
+ return users
+
+
+class ProxmoxUser:
+ def __init__(self, user):
+ self.user = dict()
+ # Data representation is not the same depending on API calls
+ for k, v in user.items():
+ if k == 'enable':
+ self.user['enabled'] = proxmox_to_ansible_bool(user['enable'])
+ elif k == 'userid':
+ self.user['user'] = user['userid'].split('@')[0]
+ self.user['domain'] = user['userid'].split('@')[1]
+ self.user[k] = v
+ elif k in ['groups', 'tokens'] and (v == '' or v is None):
+ self.user[k] = []
+ elif k == 'groups' and type(v) == str:
+ self.user['groups'] = v.split(',')
+ elif k == 'tokens' and type(v) == list:
+ for token in v:
+ if 'privsep' in token:
+ token['privsep'] = proxmox_to_ansible_bool(token['privsep'])
+ self.user['tokens'] = v
+ elif k == 'tokens' and type(v) == dict:
+ self.user['tokens'] = list()
+ for tokenid, tokenvalues in v.items():
+ t = tokenvalues
+ t['tokenid'] = tokenid
+ if 'privsep' in tokenvalues:
+ t['privsep'] = proxmox_to_ansible_bool(tokenvalues['privsep'])
+ self.user['tokens'].append(t)
+ else:
+ self.user[k] = v
+
+
+def proxmox_user_info_argument_spec():
+ return dict(
+ domain=dict(type='str', aliases=['realm']),
+ user=dict(type='str', aliases=['name']),
+ userid=dict(type='str'),
+ )
+
+
+def main():
+ module_args = proxmox_auth_argument_spec()
+ user_info_args = proxmox_user_info_argument_spec()
+ module_args.update(user_info_args)
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ required_one_of=[('api_password', 'api_token_id')],
+ required_together=[('api_token_id', 'api_token_secret')],
+ mutually_exclusive=[('user', 'userid'), ('domain', 'userid')],
+ supports_check_mode=True
+ )
+ result = dict(
+ changed=False
+ )
+
+ if not HAS_PROXMOXER:
+ module.fail_json(msg=missing_required_lib('proxmoxer'), exception=PROXMOXER_IMP_ERR)
+
+ proxmox = ProxmoxUserInfoAnsible(module)
+ domain = module.params['domain']
+ user = module.params['user']
+ if user and domain:
+ userid = user + '@' + domain
+ else:
+ userid = module.params['userid']
+
+ if userid:
+ users = [proxmox.get_user(userid=userid)]
+ else:
+ users = proxmox.get_users(domain=domain)
+ result['proxmox_users'] = [user.user for user in users]
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/pubnub_blocks.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pubnub_blocks.py
new file mode 100644
index 00000000..8d9374a8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pubnub_blocks.py
@@ -0,0 +1,626 @@
+#!/usr/bin/python
+#
+# PubNub Real-time Cloud-Hosted Push API and Push Notification Client
+# Frameworks
+# Copyright (C) 2016 PubNub Inc.
+# http://www.pubnub.com/
+# http://www.pubnub.com/terms
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: pubnub_blocks
+short_description: PubNub blocks management module.
+description:
+ - "This module allows Ansible to interface with the PubNub BLOCKS
+ infrastructure by providing the following operations: create / remove,
+ start / stop and rename for blocks and create / modify / remove for event
+ handlers"
+author:
+ - PubNub <support@pubnub.com> (@pubnub)
+ - Sergey Mamontov <sergey@pubnub.com> (@parfeon)
+requirements:
+ - "python >= 2.7"
+ - "pubnub_blocks_client >= 1.0"
+options:
+ email:
+ description:
+ - Email from account for which new session should be started.
+ - "Not required if C(cache) contains result of previous module call (in
+ same play)."
+ required: false
+ type: str
+ password:
+ description:
+ - Password which match to account to which specified C(email) belong.
+ - "Not required if C(cache) contains result of previous module call (in
+ same play)."
+ required: false
+ type: str
+ cache:
+ description: >
+ In case if single play use blocks management module few times it is
+ preferred to enabled 'caching' by making previous module to share
+ gathered artifacts and pass them to this parameter.
+ required: false
+ type: dict
+ default: {}
+ account:
+ description:
+ - "Name of PubNub account for from which C(application) will be used to
+ manage blocks."
+ - "User's account will be used if value not set or empty."
+ type: str
+ required: false
+ application:
+ description:
+ - "Name of target PubNub application for which blocks configuration on
+ specific C(keyset) will be done."
+ type: str
+ required: true
+ keyset:
+ description:
+ - Name of application's keys set which is bound to managed blocks.
+ type: str
+ required: true
+ state:
+ description:
+ - "Intended block state after event handlers creation / update process
+ will be completed."
+ required: false
+ default: 'present'
+ choices: ['started', 'stopped', 'present', 'absent']
+ type: str
+ name:
+ description:
+ - Name of managed block which will be later visible on admin.pubnub.com.
+ required: true
+ type: str
+ description:
+ description:
+ - Short block description which will be later visible on
+ admin.pubnub.com. Used only if block doesn't exists and won't change
+ description for existing block.
+ required: false
+ type: str
+ event_handlers:
+ description:
+ - "List of event handlers which should be updated for specified block
+ C(name)."
+ - "Each entry for new event handler should contain: C(name), C(src),
+ C(channels), C(event). C(name) used as event handler name which can be
+ used later to make changes to it."
+ - C(src) is full path to file with event handler code.
+ - "C(channels) is name of channel from which event handler is waiting
+ for events."
+ - "C(event) is type of event which is able to trigger event handler:
+ I(js-before-publish), I(js-after-publish), I(js-after-presence)."
+ - "Each entry for existing handlers should contain C(name) (so target
+ handler can be identified). Rest parameters (C(src), C(channels) and
+ C(event)) can be added if changes required for them."
+ - "It is possible to rename event handler by adding C(changes) key to
+ event handler payload and pass dictionary, which will contain single key
+ C(name), where new name should be passed."
+ - "To remove particular event handler it is possible to set C(state) for
+ it to C(absent) and it will be removed."
+ required: false
+ default: []
+ type: list
+ changes:
+ description:
+ - "List of fields which should be changed by block itself (doesn't
+ affect any event handlers)."
+ - "Possible options for change is: C(name)."
+ required: false
+ default: {}
+ type: dict
+ validate_certs:
+ description:
+ - "This key allow to try skip certificates check when performing REST API
+ calls. Sometimes host may have issues with certificates on it and this
+ will cause problems to call PubNub REST API."
+ - If check should be ignored C(False) should be passed to this parameter.
+ required: false
+ default: true
+ type: bool
+'''
+
+EXAMPLES = '''
+# Event handler create example.
+- name: Create single event handler
+ community.general.pubnub_blocks:
+ email: '{{ email }}'
+ password: '{{ password }}'
+ application: '{{ app_name }}'
+ keyset: '{{ keyset_name }}'
+ name: '{{ block_name }}'
+ event_handlers:
+ -
+ src: '{{ path_to_handler_source }}'
+ name: '{{ handler_name }}'
+ event: 'js-before-publish'
+ channels: '{{ handler_channel }}'
+
+# Change event handler trigger event type.
+- name: Change event handler 'event'
+ community.general.pubnub_blocks:
+ email: '{{ email }}'
+ password: '{{ password }}'
+ application: '{{ app_name }}'
+ keyset: '{{ keyset_name }}'
+ name: '{{ block_name }}'
+ event_handlers:
+ -
+ name: '{{ handler_name }}'
+ event: 'js-after-publish'
+
+# Stop block and event handlers.
+- name: Stopping block
+ community.general.pubnub_blocks:
+ email: '{{ email }}'
+ password: '{{ password }}'
+ application: '{{ app_name }}'
+ keyset: '{{ keyset_name }}'
+ name: '{{ block_name }}'
+ state: stop
+
+# Multiple module calls with cached result passing
+- name: Create '{{ block_name }}' block
+ register: module_cache
+ community.general.pubnub_blocks:
+ email: '{{ email }}'
+ password: '{{ password }}'
+ application: '{{ app_name }}'
+ keyset: '{{ keyset_name }}'
+ name: '{{ block_name }}'
+ state: present
+- name: Add '{{ event_handler_1_name }}' handler to '{{ block_name }}'
+ register: module_cache
+ community.general.pubnub_blocks:
+ cache: '{{ module_cache }}'
+ application: '{{ app_name }}'
+ keyset: '{{ keyset_name }}'
+ name: '{{ block_name }}'
+ state: present
+ event_handlers:
+ -
+ src: '{{ path_to_handler_1_source }}'
+ name: '{{ event_handler_1_name }}'
+ channels: '{{ event_handler_1_channel }}'
+ event: 'js-before-publish'
+- name: Add '{{ event_handler_2_name }}' handler to '{{ block_name }}'
+ register: module_cache
+ community.general.pubnub_blocks:
+ cache: '{{ module_cache }}'
+ application: '{{ app_name }}'
+ keyset: '{{ keyset_name }}'
+ name: '{{ block_name }}'
+ state: present
+ event_handlers:
+ -
+ src: '{{ path_to_handler_2_source }}'
+ name: '{{ event_handler_2_name }}'
+ channels: '{{ event_handler_2_channel }}'
+ event: 'js-before-publish'
+- name: Start '{{ block_name }}' block
+ register: module_cache
+ community.general.pubnub_blocks:
+ cache: '{{ module_cache }}'
+ application: '{{ app_name }}'
+ keyset: '{{ keyset_name }}'
+ name: '{{ block_name }}'
+ state: started
+'''
+
+RETURN = '''
+module_cache:
+ description: "Cached account information. In case if with single play module
+ used few times it is better to pass cached data to next module calls to speed
+ up process."
+ type: dict
+ returned: always
+'''
+import copy
+import os
+
+try:
+ # Import PubNub BLOCKS client.
+ from pubnub_blocks_client import User, Account, Owner, Application, Keyset
+ from pubnub_blocks_client import Block, EventHandler
+ from pubnub_blocks_client import exceptions
+ HAS_PUBNUB_BLOCKS_CLIENT = True
+except ImportError:
+ HAS_PUBNUB_BLOCKS_CLIENT = False
+ User = None
+ Account = None
+ Owner = None
+ Application = None
+ Keyset = None
+ Block = None
+ EventHandler = None
+ exceptions = None
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_text
+
+
+def pubnub_user(module):
+ """Create and configure user model if it possible.
+
+ :type module: AnsibleModule
+ :param module: Reference on module which contain module launch
+ information and status report methods.
+
+ :rtype: User
+ :return: Reference on initialized and ready to use user or 'None' in
+ case if not all required information has been passed to block.
+ """
+ user = None
+ params = module.params
+
+ if params.get('cache') and params['cache'].get('module_cache'):
+ cache = params['cache']['module_cache']
+ user = User()
+ user.restore(cache=copy.deepcopy(cache['pnm_user']))
+ elif params.get('email') and params.get('password'):
+ user = User(email=params.get('email'), password=params.get('password'))
+ else:
+ err_msg = 'It looks like not account credentials has been passed or ' \
+ '\'cache\' field doesn\'t have result of previous module ' \
+ 'call.'
+ module.fail_json(msg='Missing account credentials.',
+ description=err_msg, changed=False)
+
+ return user
+
+
+def pubnub_account(module, user):
+ """Create and configure account if it is possible.
+
+ :type module: AnsibleModule
+ :param module: Reference on module which contain module launch
+ information and status report methods.
+ :type user: User
+ :param user: Reference on authorized user for which one of accounts
+ should be used during manipulations with block.
+
+ :rtype: Account
+ :return: Reference on initialized and ready to use account or 'None' in
+ case if not all required information has been passed to block.
+ """
+ params = module.params
+ if params.get('account'):
+ account_name = params.get('account')
+ account = user.account(name=params.get('account'))
+ if account is None:
+ err_frmt = 'It looks like there is no \'{0}\' account for ' \
+ 'authorized user. Please make sure what correct ' \
+ 'name has been passed during module configuration.'
+ module.fail_json(msg='Missing account.',
+ description=err_frmt.format(account_name),
+ changed=False)
+ else:
+ account = user.accounts()[0]
+
+ return account
+
+
+def pubnub_application(module, account):
+ """Retrieve reference on target application from account model.
+
+ NOTE: In case if account authorization will fail or there is no
+ application with specified name, module will exit with error.
+ :type module: AnsibleModule
+ :param module: Reference on module which contain module launch
+ information and status report methods.
+ :type account: Account
+ :param account: Reference on PubNub account model from which reference
+ on application should be fetched.
+
+ :rtype: Application
+ :return: Reference on initialized and ready to use application model.
+ """
+ application = None
+ params = module.params
+ try:
+ application = account.application(params['application'])
+ except (exceptions.AccountError, exceptions.GeneralPubNubError) as exc:
+ exc_msg = _failure_title_from_exception(exc)
+ exc_descr = exc.message if hasattr(exc, 'message') else exc.args[0]
+ module.fail_json(msg=exc_msg, description=exc_descr,
+ changed=account.changed,
+ module_cache=dict(account))
+
+ if application is None:
+ err_fmt = 'There is no \'{0}\' application for {1}. Make sure what ' \
+ 'correct application name has been passed. If application ' \
+ 'doesn\'t exist you can create it on admin.pubnub.com.'
+ email = account.owner.email
+ module.fail_json(msg=err_fmt.format(params['application'], email),
+ changed=account.changed, module_cache=dict(account))
+
+ return application
+
+
+def pubnub_keyset(module, account, application):
+ """Retrieve reference on target keyset from application model.
+
+ NOTE: In case if there is no keyset with specified name, module will
+ exit with error.
+ :type module: AnsibleModule
+ :param module: Reference on module which contain module launch
+ information and status report methods.
+ :type account: Account
+ :param account: Reference on PubNub account model which will be
+ used in case of error to export cached data.
+ :type application: Application
+ :param application: Reference on PubNub application model from which
+ reference on keyset should be fetched.
+
+ :rtype: Keyset
+ :return: Reference on initialized and ready to use keyset model.
+ """
+ params = module.params
+ keyset = application.keyset(params['keyset'])
+ if keyset is None:
+ err_fmt = 'There is no \'{0}\' keyset for \'{1}\' application. Make ' \
+ 'sure what correct keyset name has been passed. If keyset ' \
+ 'doesn\'t exist you can create it on admin.pubnub.com.'
+ module.fail_json(msg=err_fmt.format(params['keyset'],
+ application.name),
+ changed=account.changed, module_cache=dict(account))
+
+ return keyset
+
+
+def pubnub_block(module, account, keyset):
+ """Retrieve reference on target keyset from application model.
+
+ NOTE: In case if there is no block with specified name and module
+ configured to start/stop it, module will exit with error.
+ :type module: AnsibleModule
+ :param module: Reference on module which contain module launch
+ information and status report methods.
+ :type account: Account
+ :param account: Reference on PubNub account model which will be used in
+ case of error to export cached data.
+ :type keyset: Keyset
+ :param keyset: Reference on keyset model from which reference on block
+ should be fetched.
+
+ :rtype: Block
+ :return: Reference on initialized and ready to use keyset model.
+ """
+ block = None
+ params = module.params
+ try:
+ block = keyset.block(params['name'])
+ except (exceptions.KeysetError, exceptions.GeneralPubNubError) as exc:
+ exc_msg = _failure_title_from_exception(exc)
+ exc_descr = exc.message if hasattr(exc, 'message') else exc.args[0]
+ module.fail_json(msg=exc_msg, description=exc_descr,
+ changed=account.changed, module_cache=dict(account))
+
+ # Report error because block doesn't exists and at the same time
+ # requested to start/stop.
+ if block is None and params['state'] in ['started', 'stopped']:
+ block_name = params.get('name')
+ module.fail_json(msg="'{0}' block doesn't exists.".format(block_name),
+ changed=account.changed, module_cache=dict(account))
+
+ if block is None and params['state'] == 'present':
+ block = Block(name=params.get('name'),
+ description=params.get('description'))
+ keyset.add_block(block)
+
+ if block:
+ # Update block information if required.
+ if params.get('changes') and params['changes'].get('name'):
+ block.name = params['changes']['name']
+ if params.get('description'):
+ block.description = params.get('description')
+
+ return block
+
+
+def pubnub_event_handler(block, data):
+ """Retrieve reference on target event handler from application model.
+
+ :type block: Block
+ :param block: Reference on block model from which reference on event
+ handlers should be fetched.
+ :type data: dict
+ :param data: Reference on dictionary which contain information about
+ event handler and whether it should be created or not.
+
+ :rtype: EventHandler
+ :return: Reference on initialized and ready to use event handler model.
+ 'None' will be returned in case if there is no handler with
+ specified name and no request to create it.
+ """
+ event_handler = block.event_handler(data['name'])
+
+ # Prepare payload for event handler update.
+ changed_name = (data.pop('changes').get('name')
+ if 'changes' in data else None)
+ name = data.get('name') or changed_name
+ channels = data.get('channels')
+ event = data.get('event')
+ code = _content_of_file_at_path(data.get('src'))
+ state = data.get('state') or 'present'
+
+ # Create event handler if required.
+ if event_handler is None and state == 'present':
+ event_handler = EventHandler(name=name, channels=channels, event=event,
+ code=code)
+ block.add_event_handler(event_handler)
+
+ # Update event handler if required.
+ if event_handler is not None and state == 'present':
+ if name is not None:
+ event_handler.name = name
+ if channels is not None:
+ event_handler.channels = channels
+ if event is not None:
+ event_handler.event = event
+ if code is not None:
+ event_handler.code = code
+
+ return event_handler
+
+
+def _failure_title_from_exception(exception):
+ """Compose human-readable title for module error title.
+
+ Title will be based on status codes if they has been provided.
+ :type exception: exceptions.GeneralPubNubError
+ :param exception: Reference on exception for which title should be
+ composed.
+
+ :rtype: str
+ :return: Reference on error tile which should be shown on module
+ failure.
+ """
+ title = 'General REST API access error.'
+ if exception.code == exceptions.PN_AUTHORIZATION_MISSING_CREDENTIALS:
+ title = 'Authorization error: missing credentials.'
+ elif exception.code == exceptions.PN_AUTHORIZATION_WRONG_CREDENTIALS:
+ title = 'Authorization error: wrong credentials.'
+ elif exception.code == exceptions.PN_USER_INSUFFICIENT_RIGHTS:
+ title = 'API access error: insufficient access rights.'
+ elif exception.code == exceptions.PN_API_ACCESS_TOKEN_EXPIRED:
+ title = 'API access error: time token expired.'
+ elif exception.code == exceptions.PN_KEYSET_BLOCK_EXISTS:
+ title = 'Block create did fail: block with same name already exists).'
+ elif exception.code == exceptions.PN_KEYSET_BLOCKS_FETCH_DID_FAIL:
+ title = 'Unable fetch list of blocks for keyset.'
+ elif exception.code == exceptions.PN_BLOCK_CREATE_DID_FAIL:
+ title = 'Block creation did fail.'
+ elif exception.code == exceptions.PN_BLOCK_UPDATE_DID_FAIL:
+ title = 'Block update did fail.'
+ elif exception.code == exceptions.PN_BLOCK_REMOVE_DID_FAIL:
+ title = 'Block removal did fail.'
+ elif exception.code == exceptions.PN_BLOCK_START_STOP_DID_FAIL:
+ title = 'Block start/stop did fail.'
+ elif exception.code == exceptions.PN_EVENT_HANDLER_MISSING_FIELDS:
+ title = 'Event handler creation did fail: missing fields.'
+ elif exception.code == exceptions.PN_BLOCK_EVENT_HANDLER_EXISTS:
+ title = 'Event handler creation did fail: missing fields.'
+ elif exception.code == exceptions.PN_EVENT_HANDLER_CREATE_DID_FAIL:
+ title = 'Event handler creation did fail.'
+ elif exception.code == exceptions.PN_EVENT_HANDLER_UPDATE_DID_FAIL:
+ title = 'Event handler update did fail.'
+ elif exception.code == exceptions.PN_EVENT_HANDLER_REMOVE_DID_FAIL:
+ title = 'Event handler removal did fail.'
+
+ return title
+
+
+def _content_of_file_at_path(path):
+ """Read file content.
+
+ Try read content of file at specified path.
+ :type path: str
+ :param path: Full path to location of file which should be read'ed.
+ :rtype: content
+ :return: File content or 'None'
+ """
+ content = None
+ if path and os.path.exists(path):
+ with open(path, mode="rt") as opened_file:
+ b_content = opened_file.read()
+ try:
+ content = to_text(b_content, errors='surrogate_or_strict')
+ except UnicodeError:
+ pass
+
+ return content
+
+
+def main():
+ fields = dict(
+ email=dict(default='', required=False, type='str'),
+ password=dict(default='', required=False, type='str', no_log=True),
+ account=dict(default='', required=False, type='str'),
+ application=dict(required=True, type='str'),
+ keyset=dict(required=True, type='str'),
+ state=dict(default='present', type='str',
+ choices=['started', 'stopped', 'present', 'absent']),
+ name=dict(required=True, type='str'), description=dict(type='str'),
+ event_handlers=dict(default=list(), type='list'),
+ changes=dict(default=dict(), type='dict'),
+ cache=dict(default=dict(), type='dict'),
+ validate_certs=dict(default=True, type='bool'))
+ module = AnsibleModule(argument_spec=fields, supports_check_mode=True)
+
+ if not HAS_PUBNUB_BLOCKS_CLIENT:
+ module.fail_json(msg='pubnub_blocks_client required for this module.')
+
+ params = module.params
+
+ # Authorize user.
+ user = pubnub_user(module)
+ # Initialize PubNub account instance.
+ account = pubnub_account(module, user=user)
+ # Try fetch application with which module should work.
+ application = pubnub_application(module, account=account)
+ # Try fetch keyset with which module should work.
+ keyset = pubnub_keyset(module, account=account, application=application)
+ # Try fetch block with which module should work.
+ block = pubnub_block(module, account=account, keyset=keyset)
+ is_new_block = block is not None and block.uid == -1
+
+ # Check whether block should be removed or not.
+ if block is not None and params['state'] == 'absent':
+ keyset.remove_block(block)
+ block = None
+
+ if block is not None:
+ # Update block information if required.
+ if params.get('changes') and params['changes'].get('name'):
+ block.name = params['changes']['name']
+
+ # Process event changes to event handlers.
+ for event_handler_data in params.get('event_handlers') or list():
+ state = event_handler_data.get('state') or 'present'
+ event_handler = pubnub_event_handler(data=event_handler_data,
+ block=block)
+ if state == 'absent' and event_handler:
+ block.delete_event_handler(event_handler)
+
+ # Update block operation state if required.
+ if block and not is_new_block:
+ if params['state'] == 'started':
+ block.start()
+ elif params['state'] == 'stopped':
+ block.stop()
+
+ # Save current account state.
+ if not module.check_mode:
+ try:
+ account.save()
+ except (exceptions.APIAccessError, exceptions.KeysetError,
+ exceptions.BlockError, exceptions.EventHandlerError,
+ exceptions.GeneralPubNubError) as exc:
+ module_cache = dict(account)
+ module_cache.update(dict(pnm_user=dict(user)))
+ exc_msg = _failure_title_from_exception(exc)
+ exc_descr = exc.message if hasattr(exc, 'message') else exc.args[0]
+ module.fail_json(msg=exc_msg, description=exc_descr,
+ changed=account.changed,
+ module_cache=module_cache)
+
+ # Report module execution results.
+ module_cache = dict(account)
+ module_cache.update(dict(pnm_user=dict(user)))
+ changed_will_change = account.changed or account.will_change
+ module.exit_json(changed=changed_will_change, module_cache=module_cache)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/pulp_repo.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pulp_repo.py
new file mode 100644
index 00000000..8dbc6b9a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pulp_repo.py
@@ -0,0 +1,754 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Joe Adams <@sysadmind>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: pulp_repo
+author: "Joe Adams (@sysadmind)"
+short_description: Add or remove Pulp repos from a remote host.
+description:
+ - Add or remove Pulp repos from a remote host.
+options:
+ add_export_distributor:
+ description:
+ - Whether or not to add the export distributor to new C(rpm) repositories.
+ type: bool
+ default: no
+ feed:
+ description:
+ - Upstream feed URL to receive updates from.
+ type: str
+ force_basic_auth:
+ description:
+ - httplib2, the library used by the M(ansible.builtin.uri) module only sends
+ authentication information when a webservice responds to an initial
+ request with a 401 status. Since some basic auth services do not
+ properly send a 401, logins will fail. This option forces the sending of
+ the Basic authentication header upon initial request.
+ type: bool
+ default: no
+ generate_sqlite:
+ description:
+ - Boolean flag to indicate whether sqlite files should be generated during
+ a repository publish.
+ required: false
+ type: bool
+ default: no
+ feed_ca_cert:
+ description:
+ - CA certificate string used to validate the feed source SSL certificate.
+ This can be the file content or the path to the file.
+ The ca_cert alias will be removed in community.general 3.0.0.
+ type: str
+ aliases: [ importer_ssl_ca_cert, ca_cert ]
+ feed_client_cert:
+ description:
+ - Certificate used as the client certificate when synchronizing the
+ repository. This is used to communicate authentication information to
+ the feed source. The value to this option must be the full path to the
+ certificate. The specified file may be the certificate itself or a
+ single file containing both the certificate and private key. This can be
+ the file content or the path to the file.
+ - If not specified the default value will come from client_cert. Which will
+ change in community.general 3.0.0.
+ type: str
+ aliases: [ importer_ssl_client_cert ]
+ feed_client_key:
+ description:
+ - Private key to the certificate specified in I(importer_ssl_client_cert),
+ assuming it is not included in the certificate file itself. This can be
+ the file content or the path to the file.
+ - If not specified the default value will come from client_key. Which will
+ change in community.general 3.0.0.
+ type: str
+ aliases: [ importer_ssl_client_key ]
+ name:
+ description:
+ - Name of the repo to add or remove. This correlates to repo-id in Pulp.
+ required: true
+ type: str
+ aliases: [ repo ]
+ proxy_host:
+ description:
+ - Proxy url setting for the pulp repository importer. This is in the
+ format scheme://host.
+ required: false
+ default: null
+ type: str
+ proxy_port:
+ description:
+ - Proxy port setting for the pulp repository importer.
+ required: false
+ default: null
+ type: str
+ proxy_username:
+ description:
+ - Proxy username for the pulp repository importer.
+ required: false
+ default: null
+ type: str
+ proxy_password:
+ description:
+ - Proxy password for the pulp repository importer.
+ required: false
+ default: null
+ type: str
+ publish_distributor:
+ description:
+ - Distributor to use when state is C(publish). The default is to
+ publish all distributors.
+ type: str
+ pulp_host:
+ description:
+ - URL of the pulp server to connect to.
+ default: https://127.0.0.1
+ type: str
+ relative_url:
+ description:
+ - Relative URL for the local repository. It's required when state=present.
+ type: str
+ repo_type:
+ description:
+ - Repo plugin type to use (i.e. C(rpm), C(docker)).
+ default: rpm
+ type: str
+ repoview:
+ description:
+ - Whether to generate repoview files for a published repository. Setting
+ this to "yes" automatically activates `generate_sqlite`.
+ required: false
+ type: bool
+ default: no
+ serve_http:
+ description:
+ - Make the repo available over HTTP.
+ type: bool
+ default: no
+ serve_https:
+ description:
+ - Make the repo available over HTTPS.
+ type: bool
+ default: yes
+ state:
+ description:
+ - The repo state. A state of C(sync) will queue a sync of the repo.
+ This is asynchronous but not delayed like a scheduled sync. A state of
+ C(publish) will use the repository's distributor to publish the content.
+ default: present
+ choices: [ "present", "absent", "sync", "publish" ]
+ type: str
+ url_password:
+ description:
+ - The password for use in HTTP basic authentication to the pulp API.
+ If the I(url_username) parameter is not specified, the I(url_password)
+ parameter will not be used.
+ url_username:
+ description:
+ - The username for use in HTTP basic authentication to the pulp API.
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be
+ used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: yes
+ wait_for_completion:
+ description:
+ - Wait for asynchronous tasks to complete before returning.
+ type: bool
+ default: no
+notes:
+ - This module can currently only create distributors and importers on rpm
+ repositories. Contributions to support other repo types are welcome.
+extends_documentation_fragment:
+ - url
+'''
+
+EXAMPLES = '''
+- name: Create a new repo with name 'my_repo'
+ community.general.pulp_repo:
+ name: my_repo
+ relative_url: my/repo
+ state: present
+
+- name: Create a repo with a feed and a relative URL
+ community.general.pulp_repo:
+ name: my_centos_updates
+ repo_type: rpm
+ feed: http://mirror.centos.org/centos/6/updates/x86_64/
+ relative_url: centos/6/updates
+ url_username: admin
+ url_password: admin
+ force_basic_auth: yes
+ state: present
+
+- name: Remove a repo from the pulp server
+ community.general.pulp_repo:
+ name: my_old_repo
+ repo_type: rpm
+ state: absent
+'''
+
+RETURN = '''
+repo:
+ description: Name of the repo that the action was performed on.
+ returned: success
+ type: str
+ sample: my_repo
+'''
+
+import json
+import os
+from time import sleep
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.urls import url_argument_spec
+
+
+class pulp_server(object):
+ """
+ Class to interact with a Pulp server
+ """
+
+ def __init__(self, module, pulp_host, repo_type, wait_for_completion=False):
+ self.module = module
+ self.host = pulp_host
+ self.repo_type = repo_type
+ self.repo_cache = dict()
+ self.wait_for_completion = wait_for_completion
+
+ def check_repo_exists(self, repo_id):
+ try:
+ self.get_repo_config_by_id(repo_id)
+ except IndexError:
+ return False
+ else:
+ return True
+
+ def compare_repo_distributor_config(self, repo_id, **kwargs):
+ repo_config = self.get_repo_config_by_id(repo_id)
+
+ for distributor in repo_config['distributors']:
+ for key, value in kwargs.items():
+ if key not in distributor['config'].keys():
+ return False
+
+ if not distributor['config'][key] == value:
+ return False
+
+ return True
+
+ def compare_repo_importer_config(self, repo_id, **kwargs):
+ repo_config = self.get_repo_config_by_id(repo_id)
+
+ for importer in repo_config['importers']:
+ for key, value in kwargs.items():
+ if value is not None:
+ if key not in importer['config'].keys():
+ return False
+
+ if not importer['config'][key] == value:
+ return False
+
+ return True
+
+ def create_repo(
+ self,
+ repo_id,
+ relative_url,
+ feed=None,
+ generate_sqlite=False,
+ serve_http=False,
+ serve_https=True,
+ proxy_host=None,
+ proxy_port=None,
+ proxy_username=None,
+ proxy_password=None,
+ repoview=False,
+ ssl_ca_cert=None,
+ ssl_client_cert=None,
+ ssl_client_key=None,
+ add_export_distributor=False
+ ):
+ url = "%s/pulp/api/v2/repositories/" % self.host
+ data = dict()
+ data['id'] = repo_id
+ data['distributors'] = []
+
+ if self.repo_type == 'rpm':
+ yum_distributor = dict()
+ yum_distributor['distributor_id'] = "yum_distributor"
+ yum_distributor['distributor_type_id'] = "yum_distributor"
+ yum_distributor['auto_publish'] = True
+ yum_distributor['distributor_config'] = dict()
+ yum_distributor['distributor_config']['http'] = serve_http
+ yum_distributor['distributor_config']['https'] = serve_https
+ yum_distributor['distributor_config']['relative_url'] = relative_url
+ yum_distributor['distributor_config']['repoview'] = repoview
+ yum_distributor['distributor_config']['generate_sqlite'] = generate_sqlite or repoview
+ data['distributors'].append(yum_distributor)
+
+ if add_export_distributor:
+ export_distributor = dict()
+ export_distributor['distributor_id'] = "export_distributor"
+ export_distributor['distributor_type_id'] = "export_distributor"
+ export_distributor['auto_publish'] = False
+ export_distributor['distributor_config'] = dict()
+ export_distributor['distributor_config']['http'] = serve_http
+ export_distributor['distributor_config']['https'] = serve_https
+ export_distributor['distributor_config']['relative_url'] = relative_url
+ export_distributor['distributor_config']['repoview'] = repoview
+ export_distributor['distributor_config']['generate_sqlite'] = generate_sqlite or repoview
+ data['distributors'].append(export_distributor)
+
+ data['importer_type_id'] = "yum_importer"
+ data['importer_config'] = dict()
+
+ if feed:
+ data['importer_config']['feed'] = feed
+
+ if proxy_host:
+ data['importer_config']['proxy_host'] = proxy_host
+
+ if proxy_port:
+ data['importer_config']['proxy_port'] = proxy_port
+
+ if proxy_username:
+ data['importer_config']['proxy_username'] = proxy_username
+
+ if proxy_password:
+ data['importer_config']['proxy_password'] = proxy_password
+
+ if ssl_ca_cert:
+ data['importer_config']['ssl_ca_cert'] = ssl_ca_cert
+
+ if ssl_client_cert:
+ data['importer_config']['ssl_client_cert'] = ssl_client_cert
+
+ if ssl_client_key:
+ data['importer_config']['ssl_client_key'] = ssl_client_key
+
+ data['notes'] = {
+ "_repo-type": "rpm-repo"
+ }
+
+ response, info = fetch_url(
+ self.module,
+ url,
+ data=json.dumps(data),
+ method='POST')
+
+ if info['status'] != 201:
+ self.module.fail_json(
+ msg="Failed to create repo.",
+ status_code=info['status'],
+ response=info['msg'],
+ url=url)
+ else:
+ return True
+
+ def delete_repo(self, repo_id):
+ url = "%s/pulp/api/v2/repositories/%s/" % (self.host, repo_id)
+ response, info = fetch_url(self.module, url, data='', method='DELETE')
+
+ if info['status'] != 202:
+ self.module.fail_json(
+ msg="Failed to delete repo.",
+ status_code=info['status'],
+ response=info['msg'],
+ url=url)
+
+ if self.wait_for_completion:
+ self.verify_tasks_completed(json.load(response))
+
+ return True
+
+ def get_repo_config_by_id(self, repo_id):
+ if repo_id not in self.repo_cache.keys():
+ repo_array = [x for x in self.repo_list if x['id'] == repo_id]
+ self.repo_cache[repo_id] = repo_array[0]
+
+ return self.repo_cache[repo_id]
+
+ def publish_repo(self, repo_id, publish_distributor):
+ url = "%s/pulp/api/v2/repositories/%s/actions/publish/" % (self.host, repo_id)
+
+ # If there's no distributor specified, we will publish them all
+ if publish_distributor is None:
+ repo_config = self.get_repo_config_by_id(repo_id)
+
+ for distributor in repo_config['distributors']:
+ data = dict()
+ data['id'] = distributor['id']
+ response, info = fetch_url(
+ self.module,
+ url,
+ data=json.dumps(data),
+ method='POST')
+
+ if info['status'] != 202:
+ self.module.fail_json(
+ msg="Failed to publish the repo.",
+ status_code=info['status'],
+ response=info['msg'],
+ url=url,
+ distributor=distributor['id'])
+ else:
+ data = dict()
+ data['id'] = publish_distributor
+ response, info = fetch_url(
+ self.module,
+ url,
+ data=json.dumps(data),
+ method='POST')
+
+ if info['status'] != 202:
+ self.module.fail_json(
+ msg="Failed to publish the repo",
+ status_code=info['status'],
+ response=info['msg'],
+ url=url,
+ distributor=publish_distributor)
+
+ if self.wait_for_completion:
+ self.verify_tasks_completed(json.load(response))
+
+ return True
+
+ def sync_repo(self, repo_id):
+ url = "%s/pulp/api/v2/repositories/%s/actions/sync/" % (self.host, repo_id)
+ response, info = fetch_url(self.module, url, data='', method='POST')
+
+ if info['status'] != 202:
+ self.module.fail_json(
+ msg="Failed to schedule a sync of the repo.",
+ status_code=info['status'],
+ response=info['msg'],
+ url=url)
+
+ if self.wait_for_completion:
+ self.verify_tasks_completed(json.load(response))
+
+ return True
+
+ def update_repo_distributor_config(self, repo_id, **kwargs):
+ url = "%s/pulp/api/v2/repositories/%s/distributors/" % (self.host, repo_id)
+ repo_config = self.get_repo_config_by_id(repo_id)
+
+ for distributor in repo_config['distributors']:
+ distributor_url = "%s%s/" % (url, distributor['id'])
+ data = dict()
+ data['distributor_config'] = dict()
+
+ for key, value in kwargs.items():
+ data['distributor_config'][key] = value
+
+ response, info = fetch_url(
+ self.module,
+ distributor_url,
+ data=json.dumps(data),
+ method='PUT')
+
+ if info['status'] != 202:
+ self.module.fail_json(
+ msg="Failed to set the relative url for the repository.",
+ status_code=info['status'],
+ response=info['msg'],
+ url=url)
+
+ def update_repo_importer_config(self, repo_id, **kwargs):
+ url = "%s/pulp/api/v2/repositories/%s/importers/" % (self.host, repo_id)
+ data = dict()
+ importer_config = dict()
+
+ for key, value in kwargs.items():
+ if value is not None:
+ importer_config[key] = value
+
+ data['importer_config'] = importer_config
+
+ if self.repo_type == 'rpm':
+ data['importer_type_id'] = "yum_importer"
+
+ response, info = fetch_url(
+ self.module,
+ url,
+ data=json.dumps(data),
+ method='POST')
+
+ if info['status'] != 202:
+ self.module.fail_json(
+ msg="Failed to set the repo importer configuration",
+ status_code=info['status'],
+ response=info['msg'],
+ importer_config=importer_config,
+ url=url)
+
+ def set_repo_list(self):
+ url = "%s/pulp/api/v2/repositories/?details=true" % self.host
+ response, info = fetch_url(self.module, url, method='GET')
+
+ if info['status'] != 200:
+ self.module.fail_json(
+ msg="Request failed",
+ status_code=info['status'],
+ response=info['msg'],
+ url=url)
+
+ self.repo_list = json.load(response)
+
+ def verify_tasks_completed(self, response_dict):
+ for task in response_dict['spawned_tasks']:
+ task_url = "%s%s" % (self.host, task['_href'])
+
+ while True:
+ response, info = fetch_url(
+ self.module,
+ task_url,
+ data='',
+ method='GET')
+
+ if info['status'] != 200:
+ self.module.fail_json(
+ msg="Failed to check async task status.",
+ status_code=info['status'],
+ response=info['msg'],
+ url=task_url)
+
+ task_dict = json.load(response)
+
+ if task_dict['state'] == 'finished':
+ return True
+
+ if task_dict['state'] == 'error':
+ self.module.fail_json(msg="Asynchronous task failed to complete.", error=task_dict['error'])
+
+ sleep(2)
+
+
+def main():
+ argument_spec = url_argument_spec()
+ argument_spec.update(
+ add_export_distributor=dict(default=False, type='bool'),
+ feed=dict(),
+ generate_sqlite=dict(default=False, type='bool'),
+ feed_ca_cert=dict(aliases=['importer_ssl_ca_cert', 'ca_cert'],
+ deprecated_aliases=[dict(name='ca_cert', version='3.0.0',
+ collection_name='community.general')]), # was Ansible 2.14
+ feed_client_cert=dict(aliases=['importer_ssl_client_cert']),
+ feed_client_key=dict(aliases=['importer_ssl_client_key'], no_log=True),
+ name=dict(required=True, aliases=['repo']),
+ proxy_host=dict(),
+ proxy_port=dict(),
+ proxy_username=dict(),
+ proxy_password=dict(no_log=True),
+ publish_distributor=dict(),
+ pulp_host=dict(default="https://127.0.0.1"),
+ relative_url=dict(),
+ repo_type=dict(default="rpm"),
+ repoview=dict(default=False, type='bool'),
+ serve_http=dict(default=False, type='bool'),
+ serve_https=dict(default=True, type='bool'),
+ state=dict(
+ default="present",
+ choices=['absent', 'present', 'sync', 'publish']),
+ wait_for_completion=dict(default=False, type="bool"))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ add_export_distributor = module.params['add_export_distributor']
+ feed = module.params['feed']
+ generate_sqlite = module.params['generate_sqlite']
+ importer_ssl_ca_cert = module.params['feed_ca_cert']
+ importer_ssl_client_cert = module.params['feed_client_cert']
+ if importer_ssl_client_cert is None and module.params['client_cert'] is not None:
+ importer_ssl_client_cert = module.params['client_cert']
+ module.deprecate("To specify client certificates to be used with the repo to sync, and not for communication with the "
+ "Pulp instance, use the new options `feed_client_cert` and `feed_client_key` (available since "
+ "Ansible 2.9.2). Until community.general 3.0.0, the default value for `feed_client_cert` will be "
+ "taken from `client_cert` if only the latter is specified",
+ version="3.0.0", collection_name='community.general') # was Ansible 2.14
+ importer_ssl_client_key = module.params['feed_client_key']
+ if importer_ssl_client_key is None and module.params['client_key'] is not None:
+ importer_ssl_client_key = module.params['client_key']
+ module.deprecate("In Ansible 2.9.2 `feed_client_key` option was added. Until community.general 3.0.0 the default "
+ "value will come from client_key option",
+ version="3.0.0", collection_name='community.general') # was Ansible 2.14
+ proxy_host = module.params['proxy_host']
+ proxy_port = module.params['proxy_port']
+ proxy_username = module.params['proxy_username']
+ proxy_password = module.params['proxy_password']
+ publish_distributor = module.params['publish_distributor']
+ pulp_host = module.params['pulp_host']
+ relative_url = module.params['relative_url']
+ repo = module.params['name']
+ repo_type = module.params['repo_type']
+ repoview = module.params['repoview']
+ serve_http = module.params['serve_http']
+ serve_https = module.params['serve_https']
+ state = module.params['state']
+ wait_for_completion = module.params['wait_for_completion']
+
+ if (state == 'present') and (not relative_url):
+ module.fail_json(msg="When state is present, relative_url is required.")
+
+ # Ensure that the importer_ssl_* is the content and not a file path
+ if importer_ssl_ca_cert is not None:
+ importer_ssl_ca_cert_file_path = os.path.abspath(importer_ssl_ca_cert)
+ if os.path.isfile(importer_ssl_ca_cert_file_path):
+ importer_ssl_ca_cert_file_object = open(importer_ssl_ca_cert_file_path, 'r')
+ try:
+ importer_ssl_ca_cert = importer_ssl_ca_cert_file_object.read()
+ finally:
+ importer_ssl_ca_cert_file_object.close()
+
+ if importer_ssl_client_cert is not None:
+ importer_ssl_client_cert_file_path = os.path.abspath(importer_ssl_client_cert)
+ if os.path.isfile(importer_ssl_client_cert_file_path):
+ importer_ssl_client_cert_file_object = open(importer_ssl_client_cert_file_path, 'r')
+ try:
+ importer_ssl_client_cert = importer_ssl_client_cert_file_object.read()
+ finally:
+ importer_ssl_client_cert_file_object.close()
+
+ if importer_ssl_client_key is not None:
+ importer_ssl_client_key_file_path = os.path.abspath(importer_ssl_client_key)
+ if os.path.isfile(importer_ssl_client_key_file_path):
+ importer_ssl_client_key_file_object = open(importer_ssl_client_key_file_path, 'r')
+ try:
+ importer_ssl_client_key = importer_ssl_client_key_file_object.read()
+ finally:
+ importer_ssl_client_key_file_object.close()
+
+ server = pulp_server(module, pulp_host, repo_type, wait_for_completion=wait_for_completion)
+ server.set_repo_list()
+ repo_exists = server.check_repo_exists(repo)
+
+ changed = False
+
+ if state == 'absent' and repo_exists:
+ if not module.check_mode:
+ server.delete_repo(repo)
+
+ changed = True
+
+ if state == 'sync':
+ if not repo_exists:
+ module.fail_json(msg="Repository was not found. The repository can not be synced.")
+
+ if not module.check_mode:
+ server.sync_repo(repo)
+
+ changed = True
+
+ if state == 'publish':
+ if not repo_exists:
+ module.fail_json(msg="Repository was not found. The repository can not be published.")
+
+ if not module.check_mode:
+ server.publish_repo(repo, publish_distributor)
+
+ changed = True
+
+ if state == 'present':
+ if not repo_exists:
+ if not module.check_mode:
+ server.create_repo(
+ repo_id=repo,
+ relative_url=relative_url,
+ feed=feed,
+ generate_sqlite=generate_sqlite,
+ serve_http=serve_http,
+ serve_https=serve_https,
+ proxy_host=proxy_host,
+ proxy_port=proxy_port,
+ proxy_username=proxy_username,
+ proxy_password=proxy_password,
+ repoview=repoview,
+ ssl_ca_cert=importer_ssl_ca_cert,
+ ssl_client_cert=importer_ssl_client_cert,
+ ssl_client_key=importer_ssl_client_key,
+ add_export_distributor=add_export_distributor)
+
+ changed = True
+
+ else:
+ # Check to make sure all the settings are correct
+ # The importer config gets overwritten on set and not updated, so
+ # we set the whole config at the same time.
+ if not server.compare_repo_importer_config(
+ repo,
+ feed=feed,
+ proxy_host=proxy_host,
+ proxy_port=proxy_port,
+ proxy_username=proxy_username,
+ proxy_password=proxy_password,
+ ssl_ca_cert=importer_ssl_ca_cert,
+ ssl_client_cert=importer_ssl_client_cert,
+ ssl_client_key=importer_ssl_client_key
+ ):
+ if not module.check_mode:
+ server.update_repo_importer_config(
+ repo,
+ feed=feed,
+ proxy_host=proxy_host,
+ proxy_port=proxy_port,
+ proxy_username=proxy_username,
+ proxy_password=proxy_password,
+ ssl_ca_cert=importer_ssl_ca_cert,
+ ssl_client_cert=importer_ssl_client_cert,
+ ssl_client_key=importer_ssl_client_key)
+
+ changed = True
+
+ if relative_url is not None:
+ if not server.compare_repo_distributor_config(
+ repo,
+ relative_url=relative_url
+ ):
+ if not module.check_mode:
+ server.update_repo_distributor_config(
+ repo,
+ relative_url=relative_url)
+
+ changed = True
+
+ if not server.compare_repo_distributor_config(repo, generate_sqlite=generate_sqlite):
+ if not module.check_mode:
+ server.update_repo_distributor_config(repo, generate_sqlite=generate_sqlite)
+
+ changed = True
+
+ if not server.compare_repo_distributor_config(repo, repoview=repoview):
+ if not module.check_mode:
+ server.update_repo_distributor_config(repo, repoview=repoview)
+
+ changed = True
+
+ if not server.compare_repo_distributor_config(repo, http=serve_http):
+ if not module.check_mode:
+ server.update_repo_distributor_config(repo, http=serve_http)
+
+ changed = True
+
+ if not server.compare_repo_distributor_config(repo, https=serve_https):
+ if not module.check_mode:
+ server.update_repo_distributor_config(repo, https=serve_https)
+
+ changed = True
+
+ module.exit_json(changed=changed, repo=repo)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/puppet.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/puppet.py
new file mode 100644
index 00000000..db8c0ec8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/puppet.py
@@ -0,0 +1,330 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Hewlett-Packard Development Company, L.P.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: puppet
+short_description: Runs puppet
+description:
+ - Runs I(puppet) agent or apply in a reliable manner.
+options:
+ timeout:
+ description:
+ - How long to wait for I(puppet) to finish.
+ type: str
+ default: 30m
+ puppetmaster:
+ description:
+ - The hostname of the puppetmaster to contact.
+ type: str
+ modulepath:
+ description:
+ - Path to an alternate location for puppet modules.
+ type: str
+ manifest:
+ description:
+ - Path to the manifest file to run puppet apply on.
+ type: str
+ noop:
+ description:
+ - Override puppet.conf noop mode.
+ - When C(yes), run Puppet agent with C(--noop) switch set.
+ - When C(no), run Puppet agent with C(--no-noop) switch set.
+ - When unset (default), use default or puppet.conf value if defined.
+ type: bool
+ facts:
+ description:
+ - A dict of values to pass in as persistent external facter facts.
+ type: dict
+ facter_basename:
+ description:
+ - Basename of the facter output file.
+ type: str
+ default: ansible
+ environment:
+ description:
+ - Puppet environment to be used.
+ type: str
+ logdest:
+ description:
+ - Where the puppet logs should go, if puppet apply is being used.
+ - C(all) will go to both C(stdout) and C(syslog).
+ type: str
+ choices: [ all, stdout, syslog ]
+ default: stdout
+ certname:
+ description:
+ - The name to use when handling certificates.
+ type: str
+ tags:
+ description:
+ - A list of puppet tags to be used.
+ type: list
+ elements: str
+ execute:
+ description:
+ - Execute a specific piece of Puppet code.
+ - It has no effect with a puppetmaster.
+ type: str
+ use_srv_records:
+ description:
+ - Toggles use_srv_records flag
+ type: bool
+ summarize:
+ description:
+ - Whether to print a transaction summary.
+ type: bool
+ default: false
+ verbose:
+ description:
+ - Print extra information.
+ type: bool
+ default: false
+ debug:
+ description:
+ - Enable full debugging.
+ type: bool
+ default: false
+requirements:
+- puppet
+author:
+- Monty Taylor (@emonty)
+'''
+
+EXAMPLES = r'''
+- name: Run puppet agent and fail if anything goes wrong
+ community.general.puppet:
+
+- name: Run puppet and timeout in 5 minutes
+ community.general.puppet:
+ timeout: 5m
+
+- name: Run puppet using a different environment
+ community.general.puppet:
+ environment: testing
+
+- name: Run puppet using a specific certname
+ community.general.puppet:
+ certname: agent01.example.com
+
+- name: Run puppet using a specific piece of Puppet code. Has no effect with a puppetmaster
+ community.general.puppet:
+ execute: include ::mymodule
+
+- name: Run puppet using a specific tags
+ community.general.puppet:
+ tags:
+ - update
+ - nginx
+
+- name: Run puppet agent in noop mode
+ community.general.puppet:
+ noop: yes
+
+- name: Run a manifest with debug, log to both syslog and stdout, specify module path
+ community.general.puppet:
+ modulepath: /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules
+ logdest: all
+ manifest: /var/lib/example/puppet_step_config.pp
+'''
+
+import json
+import os
+import stat
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import shlex_quote
+
+
+def _get_facter_dir():
+ if os.getuid() == 0:
+ return '/etc/facter/facts.d'
+ else:
+ return os.path.expanduser('~/.facter/facts.d')
+
+
+def _write_structured_data(basedir, basename, data):
+ if not os.path.exists(basedir):
+ os.makedirs(basedir)
+ file_path = os.path.join(basedir, "{0}.json".format(basename))
+ # This is more complex than you might normally expect because we want to
+ # open the file with only u+rw set. Also, we use the stat constants
+ # because ansible still supports python 2.4 and the octal syntax changed
+ out_file = os.fdopen(
+ os.open(
+ file_path, os.O_CREAT | os.O_WRONLY,
+ stat.S_IRUSR | stat.S_IWUSR), 'w')
+ out_file.write(json.dumps(data).encode('utf8'))
+ out_file.close()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ timeout=dict(type='str', default='30m'),
+ puppetmaster=dict(type='str'),
+ modulepath=dict(type='str'),
+ manifest=dict(type='str'),
+ noop=dict(required=False, type='bool'),
+ logdest=dict(type='str', default='stdout', choices=['all',
+ 'stdout',
+ 'syslog']),
+ # internal code to work with --diff, do not use
+ show_diff=dict(type='bool', default=False, aliases=['show-diff']),
+ facts=dict(type='dict'),
+ facter_basename=dict(type='str', default='ansible'),
+ environment=dict(type='str'),
+ certname=dict(type='str'),
+ tags=dict(type='list', elements='str'),
+ execute=dict(type='str'),
+ summarize=dict(type='bool', default=False),
+ debug=dict(type='bool', default=False),
+ verbose=dict(type='bool', default=False),
+ use_srv_records=dict(type='bool'),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ('puppetmaster', 'manifest'),
+ ('puppetmaster', 'manifest', 'execute'),
+ ('puppetmaster', 'modulepath'),
+ ],
+ )
+ p = module.params
+
+ global PUPPET_CMD
+ PUPPET_CMD = module.get_bin_path("puppet", False, ['/opt/puppetlabs/bin'])
+
+ if not PUPPET_CMD:
+ module.fail_json(
+ msg="Could not find puppet. Please ensure it is installed.")
+
+ global TIMEOUT_CMD
+ TIMEOUT_CMD = module.get_bin_path("timeout", False)
+
+ if p['manifest']:
+ if not os.path.exists(p['manifest']):
+ module.fail_json(
+ msg="Manifest file %(manifest)s not found." % dict(
+ manifest=p['manifest']))
+
+ # Check if puppet is disabled here
+ if not p['manifest']:
+ rc, stdout, stderr = module.run_command(
+ PUPPET_CMD + " config print agent_disabled_lockfile")
+ if os.path.exists(stdout.strip()):
+ module.fail_json(
+ msg="Puppet agent is administratively disabled.",
+ disabled=True)
+ elif rc != 0:
+ module.fail_json(
+ msg="Puppet agent state could not be determined.")
+
+ if module.params['facts'] and not module.check_mode:
+ _write_structured_data(
+ _get_facter_dir(),
+ module.params['facter_basename'],
+ module.params['facts'])
+
+ if TIMEOUT_CMD:
+ base_cmd = "%(timeout_cmd)s -s 9 %(timeout)s %(puppet_cmd)s" % dict(
+ timeout_cmd=TIMEOUT_CMD,
+ timeout=shlex_quote(p['timeout']),
+ puppet_cmd=PUPPET_CMD)
+ else:
+ base_cmd = PUPPET_CMD
+
+ if not p['manifest'] and not p['execute']:
+ cmd = ("%(base_cmd)s agent --onetime"
+ " --no-daemonize --no-usecacheonfailure --no-splay"
+ " --detailed-exitcodes --verbose --color 0") % dict(base_cmd=base_cmd)
+ if p['puppetmaster']:
+ cmd += " --server %s" % shlex_quote(p['puppetmaster'])
+ if p['show_diff']:
+ cmd += " --show_diff"
+ if p['environment']:
+ cmd += " --environment '%s'" % p['environment']
+ if p['tags']:
+ cmd += " --tags '%s'" % ','.join(p['tags'])
+ if p['certname']:
+ cmd += " --certname='%s'" % p['certname']
+ if module.check_mode:
+ cmd += " --noop"
+ elif 'noop' in p:
+ if p['noop']:
+ cmd += " --noop"
+ else:
+ cmd += " --no-noop"
+ if p['use_srv_records'] is not None:
+ if not p['use_srv_records']:
+ cmd += " --no-use_srv_records"
+ else:
+ cmd += " --use_srv_records"
+ else:
+ cmd = "%s apply --detailed-exitcodes " % base_cmd
+ if p['logdest'] == 'syslog':
+ cmd += "--logdest syslog "
+ if p['logdest'] == 'all':
+ cmd += " --logdest syslog --logdest stdout"
+ if p['modulepath']:
+ cmd += "--modulepath='%s'" % p['modulepath']
+ if p['environment']:
+ cmd += "--environment '%s' " % p['environment']
+ if p['certname']:
+ cmd += " --certname='%s'" % p['certname']
+ if p['tags']:
+ cmd += " --tags '%s'" % ','.join(p['tags'])
+ if module.check_mode:
+ cmd += "--noop "
+ elif 'noop' in p:
+ if p['noop']:
+ cmd += " --noop"
+ else:
+ cmd += " --no-noop"
+ if p['execute']:
+ cmd += " --execute '%s'" % p['execute']
+ else:
+ cmd += " %s" % shlex_quote(p['manifest'])
+ if p['summarize']:
+ cmd += " --summarize"
+ if p['debug']:
+ cmd += " --debug"
+ if p['verbose']:
+ cmd += " --verbose"
+ rc, stdout, stderr = module.run_command(cmd)
+
+ if rc == 0:
+ # success
+ module.exit_json(rc=rc, changed=False, stdout=stdout, stderr=stderr)
+ elif rc == 1:
+ # rc==1 could be because it's disabled
+ # rc==1 could also mean there was a compilation failure
+ disabled = "administratively disabled" in stdout
+ if disabled:
+ msg = "puppet is disabled"
+ else:
+ msg = "puppet did not run"
+ module.exit_json(
+ rc=rc, disabled=disabled, msg=msg,
+ error=True, stdout=stdout, stderr=stderr)
+ elif rc == 2:
+ # success with changes
+ module.exit_json(rc=0, changed=True, stdout=stdout, stderr=stderr)
+ elif rc == 124:
+ # timeout
+ module.exit_json(
+ rc=rc, msg="%s timed out" % cmd, stdout=stdout, stderr=stderr)
+ else:
+ # failure
+ module.fail_json(
+ rc=rc, msg="%s failed with return code: %d" % (cmd, rc),
+ stdout=stdout, stderr=stderr)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/purefa_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/purefa_facts.py
new file mode 100644
index 00000000..5e8b5932
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/purefa_facts.py
@@ -0,0 +1,858 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: purefa_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favor of C(_info) module.
+ alternative: Use M(purestorage.flasharray.purefa_info) instead.
+short_description: Collect facts from Pure Storage FlashArray
+description:
+ - Collect facts information from a Pure Storage Flasharray running the
+ Purity//FA operating system. By default, the module will collect basic
+ fact information including hosts, host groups, protection
+ groups and volume counts. Additional fact information can be collected
+ based on the configured set of arguments.
+author:
+ - Pure Storage ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ gather_subset:
+ description:
+ - When supplied, this argument will define the facts to be collected.
+ Possible values for this include all, minimum, config, performance,
+ capacity, network, subnet, interfaces, hgroups, pgroups, hosts,
+ admins, volumes, snapshots, pods, vgroups, offload, apps and arrays.
+ type: list
+ required: false
+ default: minimum
+extends_documentation_fragment:
+- community.general.purestorage.fa
+
+'''
+
+EXAMPLES = r'''
+- name: Collect default set of facts
+ community.general.purefa_facts:
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Collect configuration and capacity facts
+ community.general.purefa_facts:
+ gather_subset:
+ - config
+ - capacity
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Collect all facts
+ community.general.purefa_facts:
+ gather_subset:
+ - all
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+'''
+
+RETURN = r'''
+ansible_facts:
+ description: Returns the facts collected from the FlashArray
+ returned: always
+ type: complex
+ sample: {
+ "capacity": {},
+ "config": {
+ "directory_service": {
+ "array_admin_group": null,
+ "base_dn": null,
+ "bind_password": null,
+ "bind_user": null,
+ "check_peer": false,
+ "enabled": false,
+ "group_base": null,
+ "readonly_group": null,
+ "storage_admin_group": null,
+ "uri": []
+ },
+ "dns": {
+ "domain": "domain.com",
+ "nameservers": [
+ "8.8.8.8",
+ "8.8.4.4"
+ ]
+ },
+ "ntp": [
+ "0.ntp.pool.org",
+ "1.ntp.pool.org",
+ "2.ntp.pool.org",
+ "3.ntp.pool.org"
+ ],
+ "smtp": [
+ {
+ "enabled": true,
+ "name": "alerts@acme.com"
+ },
+ {
+ "enabled": true,
+ "name": "user@acme.com"
+ }
+ ],
+ "snmp": [
+ {
+ "auth_passphrase": null,
+ "auth_protocol": null,
+ "community": null,
+ "host": "localhost",
+ "name": "localhost",
+ "privacy_passphrase": null,
+ "privacy_protocol": null,
+ "user": null,
+ "version": "v2c"
+ }
+ ],
+ "ssl_certs": {
+ "country": null,
+ "email": null,
+ "issued_by": "",
+ "issued_to": "",
+ "key_size": 2048,
+ "locality": null,
+ "organization": "Acme Storage, Inc.",
+ "organizational_unit": "Acme Storage, Inc.",
+ "state": null,
+ "status": "self-signed",
+ "valid_from": "2017-08-11T23:09:06Z",
+ "valid_to": "2027-08-09T23:09:06Z"
+ },
+ "syslog": []
+ },
+ "default": {
+ "array_name": "flasharray1",
+ "connected_arrays": 1,
+ "hostgroups": 0,
+ "hosts": 10,
+ "pods": 3,
+ "protection_groups": 1,
+ "purity_version": "5.0.4",
+ "snapshots": 1,
+ "volume_groups": 2
+ },
+ "hgroups": {},
+ "hosts": {
+ "host1": {
+ "hgroup": null,
+ "iqn": [
+ "iqn.1994-05.com.redhat:2f6f5715a533"
+ ],
+ "wwn": []
+ },
+ "host2": {
+ "hgroup": null,
+ "iqn": [
+ "iqn.1994-05.com.redhat:d17fb13fe0b"
+ ],
+ "wwn": []
+ },
+ "host3": {
+ "hgroup": null,
+ "iqn": [
+ "iqn.1994-05.com.redhat:97b1351bfb2"
+ ],
+ "wwn": []
+ },
+ "host4": {
+ "hgroup": null,
+ "iqn": [
+ "iqn.1994-05.com.redhat:dd84e9a7b2cb"
+ ],
+ "wwn": [
+ "10000000C96C48D1",
+ "10000000C96C48D2"
+ ]
+ }
+ },
+ "interfaces": {
+ "CT0.ETH4": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682",
+ "CT0.ETH5": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682",
+ "CT1.ETH4": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682",
+ "CT1.ETH5": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682"
+ },
+ "network": {
+ "ct0.eth0": {
+ "address": "10.10.10.10",
+ "gateway": "10.10.10.1",
+ "hwaddr": "ec:f4:bb:c8:8a:04",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "management"
+ ],
+ "speed": 1000000000
+ },
+ "ct0.eth2": {
+ "address": "10.10.10.11",
+ "gateway": null,
+ "hwaddr": "ec:f4:bb:c8:8a:00",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "replication"
+ ],
+ "speed": 10000000000
+ },
+ "ct0.eth3": {
+ "address": "10.10.10.12",
+ "gateway": null,
+ "hwaddr": "ec:f4:bb:c8:8a:02",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "replication"
+ ],
+ "speed": 10000000000
+ },
+ "ct0.eth4": {
+ "address": "10.10.10.13",
+ "gateway": null,
+ "hwaddr": "90:e2:ba:83:79:0c",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "iscsi"
+ ],
+ "speed": 10000000000
+ },
+ "ct0.eth5": {
+ "address": "10.10.10.14",
+ "gateway": null,
+ "hwaddr": "90:e2:ba:83:79:0d",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "iscsi"
+ ],
+ "speed": 10000000000
+ },
+ "vir0": {
+ "address": "10.10.10.20",
+ "gateway": "10.10.10.1",
+ "hwaddr": "fe:ba:e9:e7:6b:0f",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "management"
+ ],
+ "speed": 1000000000
+ }
+ },
+ "offload": {
+ "nfstarget": {
+ "address": "10.0.2.53",
+ "mount_options": null,
+ "mount_point": "/offload",
+ "protocol": "nfs",
+ "status": "scanning"
+ }
+ },
+ "performance": {
+ "input_per_sec": 8191,
+ "output_per_sec": 0,
+ "queue_depth": 1,
+ "reads_per_sec": 0,
+ "san_usec_per_write_op": 15,
+ "usec_per_read_op": 0,
+ "usec_per_write_op": 642,
+ "writes_per_sec": 2
+ },
+ "pgroups": {
+ "consisgroup-07b6b983-986e-46f5-bdc3-deaa3dbb299e-cinder": {
+ "hgroups": null,
+ "hosts": null,
+ "source": "host1",
+ "targets": null,
+ "volumes": [
+ "volume-1"
+ ]
+ }
+ },
+ "pods": {
+ "srm-pod": {
+ "arrays": [
+ {
+ "array_id": "52595f7e-b460-4b46-8851-a5defd2ac192",
+ "mediator_status": "online",
+ "name": "sn1-405-c09-37",
+ "status": "online"
+ },
+ {
+ "array_id": "a2c32301-f8a0-4382-949b-e69b552ce8ca",
+ "mediator_status": "online",
+ "name": "sn1-420-c11-31",
+ "status": "online"
+ }
+ ],
+ "source": null
+ }
+ },
+ "snapshots": {
+ "consisgroup.cgsnapshot": {
+ "created": "2018-03-28T09:34:02Z",
+ "size": 13958643712,
+ "source": "volume-1"
+ }
+ },
+ "subnet": {},
+ "vgroups": {
+ "vvol--vSphere-HA-0ffc7dd1-vg": {
+ "volumes": [
+ "vvol--vSphere-HA-0ffc7dd1-vg/Config-aad5d7c6"
+ ]
+ }
+ },
+ "volumes": {
+ "ansible_data": {
+ "bandwidth": null,
+ "hosts": [
+ [
+ "host1",
+ 1
+ ]
+ ],
+ "serial": "43BE47C12334399B000114A6",
+ "size": 1099511627776,
+ "source": null
+ }
+ }
+ }
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.pure import get_system, purefa_argument_spec
+
+
+ADMIN_API_VERSION = '1.14'
+S3_REQUIRED_API_VERSION = '1.16'
+LATENCY_REQUIRED_API_VERSION = '1.16'
+AC_REQUIRED_API_VERSION = '1.14'
+CAP_REQUIRED_API_VERSION = '1.6'
+SAN_REQUIRED_API_VERSION = '1.10'
+NVME_API_VERSION = '1.16'
+PREFERRED_API_VERSION = '1.15'
+CONN_STATUS_API_VERSION = '1.17'
+
+
+def generate_default_dict(array):
+ default_facts = {}
+ defaults = array.get()
+ api_version = array._list_available_rest_versions()
+ if AC_REQUIRED_API_VERSION in api_version:
+ default_facts['volume_groups'] = len(array.list_vgroups())
+ default_facts['connected_arrays'] = len(array.list_array_connections())
+ default_facts['pods'] = len(array.list_pods())
+ default_facts['connection_key'] = array.get(connection_key=True)['connection_key']
+ hosts = array.list_hosts()
+ admins = array.list_admins()
+ snaps = array.list_volumes(snap=True, pending=True)
+ pgroups = array.list_pgroups(pending=True)
+ hgroups = array.list_hgroups()
+ # Old FA arrays only report model from the primary controller
+ ct0_model = array.get_hardware('CT0')['model']
+ if ct0_model:
+ model = ct0_model
+ else:
+ ct1_model = array.get_hardware('CT1')['model']
+ model = ct1_model
+ default_facts['array_model'] = model
+ default_facts['array_name'] = defaults['array_name']
+ default_facts['purity_version'] = defaults['version']
+ default_facts['hosts'] = len(hosts)
+ default_facts['snapshots'] = len(snaps)
+ default_facts['protection_groups'] = len(pgroups)
+ default_facts['hostgroups'] = len(hgroups)
+ default_facts['admins'] = len(admins)
+ return default_facts
+
+
+def generate_perf_dict(array):
+ perf_facts = {}
+ api_version = array._list_available_rest_versions()
+ if LATENCY_REQUIRED_API_VERSION in api_version:
+ latency_info = array.get(action='monitor', latency=True)[0]
+ perf_info = array.get(action='monitor')[0]
+ # IOPS
+ perf_facts['writes_per_sec'] = perf_info['writes_per_sec']
+ perf_facts['reads_per_sec'] = perf_info['reads_per_sec']
+
+ # Bandwidth
+ perf_facts['input_per_sec'] = perf_info['input_per_sec']
+ perf_facts['output_per_sec'] = perf_info['output_per_sec']
+
+ # Latency
+ if LATENCY_REQUIRED_API_VERSION in api_version:
+ perf_facts['san_usec_per_read_op'] = latency_info['san_usec_per_read_op']
+ perf_facts['san_usec_per_write_op'] = latency_info['san_usec_per_write_op']
+ perf_facts['queue_usec_per_read_op'] = latency_info['queue_usec_per_read_op']
+ perf_facts['queue_usec_per_write_op'] = latency_info['queue_usec_per_write_op']
+ perf_facts['qos_rate_limit_usec_per_read_op'] = latency_info['qos_rate_limit_usec_per_read_op']
+ perf_facts['qos_rate_limit_usec_per_write_op'] = latency_info['qos_rate_limit_usec_per_write_op']
+ perf_facts['local_queue_usec_per_op'] = perf_info['local_queue_usec_per_op']
+ perf_facts['usec_per_read_op'] = perf_info['usec_per_read_op']
+ perf_facts['usec_per_write_op'] = perf_info['usec_per_write_op']
+ perf_facts['queue_depth'] = perf_info['queue_depth']
+ return perf_facts
+
+
+def generate_config_dict(array):
+ config_facts = {}
+ api_version = array._list_available_rest_versions()
+ # DNS
+ config_facts['dns'] = array.get_dns()
+ # SMTP
+ config_facts['smtp'] = array.list_alert_recipients()
+ # SNMP
+ config_facts['snmp'] = array.list_snmp_managers()
+ config_facts['snmp_v3_engine_id'] = array.get_snmp_engine_id()['engine_id']
+ # DS
+ config_facts['directory_service'] = array.get_directory_service()
+ if S3_REQUIRED_API_VERSION in api_version:
+ config_facts['directory_service_roles'] = {}
+ roles = array.list_directory_service_roles()
+ for role in range(0, len(roles)):
+ role_name = roles[role]['name']
+ config_facts['directory_service_roles'][role_name] = {
+ 'group': roles[role]['group'],
+ 'group_base': roles[role]['group_base'],
+ }
+ else:
+ config_facts['directory_service'].update(array.get_directory_service(groups=True))
+ # NTP
+ config_facts['ntp'] = array.get(ntpserver=True)['ntpserver']
+ # SYSLOG
+ config_facts['syslog'] = array.get(syslogserver=True)['syslogserver']
+ # Phonehome
+ config_facts['phonehome'] = array.get(phonehome=True)['phonehome']
+ # Proxy
+ config_facts['proxy'] = array.get(proxy=True)['proxy']
+ # Relay Host
+ config_facts['relayhost'] = array.get(relayhost=True)['relayhost']
+ # Sender Domain
+ config_facts['senderdomain'] = array.get(senderdomain=True)['senderdomain']
+ # SYSLOG
+ config_facts['syslog'] = array.get(syslogserver=True)['syslogserver']
+ # Idle Timeout
+ config_facts['idle_timeout'] = array.get(idle_timeout=True)['idle_timeout']
+ # SCSI Timeout
+ config_facts['scsi_timeout'] = array.get(scsi_timeout=True)['scsi_timeout']
+ # SSL
+ config_facts['ssl_certs'] = array.get_certificate()
+ # Global Admin settings
+ if S3_REQUIRED_API_VERSION in api_version:
+ config_facts['global_admin'] = array.get_global_admin_attributes()
+ return config_facts
+
+
+def generate_admin_dict(array):
+ api_version = array._list_available_rest_versions()
+ admin_facts = {}
+ if ADMIN_API_VERSION in api_version:
+ admins = array.list_admins()
+ for admin in range(0, len(admins)):
+ admin_name = admins[admin]['name']
+ admin_facts[admin_name] = {
+ 'type': admins[admin]['type'],
+ 'role': admins[admin]['role'],
+ }
+ return admin_facts
+
+
+def generate_subnet_dict(array):
+ sub_facts = {}
+ subnets = array.list_subnets()
+ for sub in range(0, len(subnets)):
+ sub_name = subnets[sub]['name']
+ if subnets[sub]['enabled']:
+ sub_facts[sub_name] = {
+ 'gateway': subnets[sub]['gateway'],
+ 'mtu': subnets[sub]['mtu'],
+ 'vlan': subnets[sub]['vlan'],
+ 'prefix': subnets[sub]['prefix'],
+ 'interfaces': subnets[sub]['interfaces'],
+ 'services': subnets[sub]['services'],
+ }
+ return sub_facts
+
+
+def generate_network_dict(array):
+ net_facts = {}
+ ports = array.list_network_interfaces()
+ for port in range(0, len(ports)):
+ int_name = ports[port]['name']
+ net_facts[int_name] = {
+ 'hwaddr': ports[port]['hwaddr'],
+ 'mtu': ports[port]['mtu'],
+ 'enabled': ports[port]['enabled'],
+ 'speed': ports[port]['speed'],
+ 'address': ports[port]['address'],
+ 'slaves': ports[port]['slaves'],
+ 'services': ports[port]['services'],
+ 'gateway': ports[port]['gateway'],
+ 'netmask': ports[port]['netmask'],
+ }
+ if ports[port]['subnet']:
+ subnets = array.get_subnet(ports[port]['subnet'])
+ if subnets['enabled']:
+ net_facts[int_name]['subnet'] = {
+ 'name': subnets['name'],
+ 'prefix': subnets['prefix'],
+ 'vlan': subnets['vlan'],
+ }
+ return net_facts
+
+
+def generate_capacity_dict(array):
+ capacity_facts = {}
+ api_version = array._list_available_rest_versions()
+ if CAP_REQUIRED_API_VERSION in api_version:
+ volumes = array.list_volumes(pending=True)
+ capacity_facts['provisioned_space'] = sum(item['size'] for item in volumes)
+ capacity = array.get(space=True)
+ total_capacity = capacity[0]['capacity']
+ used_space = capacity[0]["total"]
+ capacity_facts['free_space'] = total_capacity - used_space
+ capacity_facts['total_capacity'] = total_capacity
+ capacity_facts['data_reduction'] = capacity[0]['data_reduction']
+ capacity_facts['system_space'] = capacity[0]['system']
+ capacity_facts['volume_space'] = capacity[0]['volumes']
+ capacity_facts['shared_space'] = capacity[0]['shared_space']
+ capacity_facts['snapshot_space'] = capacity[0]['snapshots']
+ capacity_facts['thin_provisioning'] = capacity[0]['thin_provisioning']
+ capacity_facts['total_reduction'] = capacity[0]['total_reduction']
+
+ return capacity_facts
+
+
+def generate_snap_dict(array):
+ snap_facts = {}
+ snaps = array.list_volumes(snap=True)
+ for snap in range(0, len(snaps)):
+ snapshot = snaps[snap]['name']
+ snap_facts[snapshot] = {
+ 'size': snaps[snap]['size'],
+ 'source': snaps[snap]['source'],
+ 'created': snaps[snap]['created'],
+ }
+ return snap_facts
+
+
+def generate_vol_dict(array):
+ volume_facts = {}
+ vols = array.list_volumes()
+ for vol in range(0, len(vols)):
+ volume = vols[vol]['name']
+ volume_facts[volume] = {
+ 'source': vols[vol]['source'],
+ 'size': vols[vol]['size'],
+ 'serial': vols[vol]['serial'],
+ 'hosts': [],
+ 'bandwidth': ""
+ }
+ api_version = array._list_available_rest_versions()
+ if AC_REQUIRED_API_VERSION in api_version:
+ qvols = array.list_volumes(qos=True)
+ for qvol in range(0, len(qvols)):
+ volume = qvols[qvol]['name']
+ qos = qvols[qvol]['bandwidth_limit']
+ volume_facts[volume]['bandwidth'] = qos
+ vvols = array.list_volumes(protocol_endpoint=True)
+ for vvol in range(0, len(vvols)):
+ volume = vvols[vvol]['name']
+ volume_facts[volume] = {
+ 'source': vvols[vvol]['source'],
+ 'serial': vvols[vvol]['serial'],
+ 'hosts': []
+ }
+ cvols = array.list_volumes(connect=True)
+ for cvol in range(0, len(cvols)):
+ volume = cvols[cvol]['name']
+ voldict = [cvols[cvol]['host'], cvols[cvol]['lun']]
+ volume_facts[volume]['hosts'].append(voldict)
+ return volume_facts
+
+
+def generate_host_dict(array):
+ api_version = array._list_available_rest_versions()
+ host_facts = {}
+ hosts = array.list_hosts()
+ for host in range(0, len(hosts)):
+ hostname = hosts[host]['name']
+ tports = []
+ host_all_info = array.get_host(hostname, all=True)
+ if host_all_info:
+ tports = host_all_info[0]['target_port']
+ host_facts[hostname] = {
+ 'hgroup': hosts[host]['hgroup'],
+ 'iqn': hosts[host]['iqn'],
+ 'wwn': hosts[host]['wwn'],
+ 'personality': array.get_host(hostname,
+ personality=True)['personality'],
+ 'target_port': tports
+ }
+ if NVME_API_VERSION in api_version:
+ host_facts[hostname]['nqn'] = hosts[host]['nqn']
+ if PREFERRED_API_VERSION in api_version:
+ hosts = array.list_hosts(preferred_array=True)
+ for host in range(0, len(hosts)):
+ hostname = hosts[host]['name']
+ host_facts[hostname]['preferred_array'] = hosts[host]['preferred_array']
+ return host_facts
+
+
+def generate_pgroups_dict(array):
+ pgroups_facts = {}
+ pgroups = array.list_pgroups()
+ for pgroup in range(0, len(pgroups)):
+ protgroup = pgroups[pgroup]['name']
+ pgroups_facts[protgroup] = {
+ 'hgroups': pgroups[pgroup]['hgroups'],
+ 'hosts': pgroups[pgroup]['hosts'],
+ 'source': pgroups[pgroup]['source'],
+ 'targets': pgroups[pgroup]['targets'],
+ 'volumes': pgroups[pgroup]['volumes'],
+ }
+ prot_sched = array.get_pgroup(protgroup, schedule=True)
+ prot_reten = array.get_pgroup(protgroup, retention=True)
+ if prot_sched['snap_enabled'] or prot_sched['replicate_enabled']:
+ pgroups_facts[protgroup]['snap_freqyency'] = prot_sched['snap_frequency']
+ pgroups_facts[protgroup]['replicate_freqyency'] = prot_sched['replicate_frequency']
+ pgroups_facts[protgroup]['snap_enabled'] = prot_sched['snap_enabled']
+ pgroups_facts[protgroup]['replicate_enabled'] = prot_sched['replicate_enabled']
+ pgroups_facts[protgroup]['snap_at'] = prot_sched['snap_at']
+ pgroups_facts[protgroup]['replicate_at'] = prot_sched['replicate_at']
+ pgroups_facts[protgroup]['replicate_blackout'] = prot_sched['replicate_blackout']
+ pgroups_facts[protgroup]['per_day'] = prot_reten['per_day']
+ pgroups_facts[protgroup]['target_per_day'] = prot_reten['target_per_day']
+ pgroups_facts[protgroup]['target_days'] = prot_reten['target_days']
+ pgroups_facts[protgroup]['days'] = prot_reten['days']
+ pgroups_facts[protgroup]['all_for'] = prot_reten['all_for']
+ pgroups_facts[protgroup]['target_all_for'] = prot_reten['target_all_for']
+ if ":" in protgroup:
+ snap_transfers = array.get_pgroup(protgroup, snap=True, transfer=True)
+ pgroups_facts[protgroup]['snaps'] = {}
+ for snap_transfer in range(0, len(snap_transfers)):
+ snap = snap_transfers[snap_transfer]['name']
+ pgroups_facts[protgroup]['snaps'][snap] = {
+ 'created': snap_transfers[snap_transfer]['created'],
+ 'started': snap_transfers[snap_transfer]['started'],
+ 'completed': snap_transfers[snap_transfer]['completed'],
+ 'physical_bytes_written': snap_transfers[snap_transfer]['physical_bytes_written'],
+ 'data_transferred': snap_transfers[snap_transfer]['data_transferred'],
+ 'progress': snap_transfers[snap_transfer]['progress'],
+ }
+ return pgroups_facts
+
+
+def generate_pods_dict(array):
+ pods_facts = {}
+ api_version = array._list_available_rest_versions()
+ if AC_REQUIRED_API_VERSION in api_version:
+ pods = array.list_pods()
+ for pod in range(0, len(pods)):
+ acpod = pods[pod]['name']
+ pods_facts[acpod] = {
+ 'source': pods[pod]['source'],
+ 'arrays': pods[pod]['arrays'],
+ }
+ return pods_facts
+
+
+def generate_conn_array_dict(array):
+ conn_array_facts = {}
+ api_version = array._list_available_rest_versions()
+ if CONN_STATUS_API_VERSION in api_version:
+ carrays = array.list_connected_arrays()
+ for carray in range(0, len(carrays)):
+ arrayname = carrays[carray]['array_name']
+ conn_array_facts[arrayname] = {
+ 'array_id': carrays[carray]['id'],
+ 'throtled': carrays[carray]['throtled'],
+ 'version': carrays[carray]['version'],
+ 'type': carrays[carray]['type'],
+ 'mgmt_ip': carrays[carray]['management_address'],
+ 'repl_ip': carrays[carray]['replication_address'],
+ }
+ if CONN_STATUS_API_VERSION in api_version:
+ conn_array_facts[arrayname]['status'] = carrays[carray]['status']
+ return conn_array_facts
+
+
+def generate_apps_dict(array):
+ apps_facts = {}
+ api_version = array._list_available_rest_versions()
+ if SAN_REQUIRED_API_VERSION in api_version:
+ apps = array.list_apps()
+ for app in range(0, len(apps)):
+ appname = apps[app]['name']
+ apps_facts[appname] = {
+ 'version': apps[app]['version'],
+ 'status': apps[app]['status'],
+ 'description': apps[app]['description'],
+ }
+ return apps_facts
+
+
+def generate_vgroups_dict(array):
+ vgroups_facts = {}
+ api_version = array._list_available_rest_versions()
+ if AC_REQUIRED_API_VERSION in api_version:
+ vgroups = array.list_vgroups()
+ for vgroup in range(0, len(vgroups)):
+ virtgroup = vgroups[vgroup]['name']
+ vgroups_facts[virtgroup] = {
+ 'volumes': vgroups[vgroup]['volumes'],
+ }
+ return vgroups_facts
+
+
+def generate_nfs_offload_dict(array):
+ offload_facts = {}
+ api_version = array._list_available_rest_versions()
+ if AC_REQUIRED_API_VERSION in api_version:
+ offload = array.list_nfs_offload()
+ for target in range(0, len(offload)):
+ offloadt = offload[target]['name']
+ offload_facts[offloadt] = {
+ 'status': offload[target]['status'],
+ 'mount_point': offload[target]['mount_point'],
+ 'protocol': offload[target]['protocol'],
+ 'mount_options': offload[target]['mount_options'],
+ 'address': offload[target]['address'],
+ }
+ return offload_facts
+
+
+def generate_s3_offload_dict(array):
+ offload_facts = {}
+ api_version = array._list_available_rest_versions()
+ if S3_REQUIRED_API_VERSION in api_version:
+ offload = array.list_s3_offload()
+ for target in range(0, len(offload)):
+ offloadt = offload[target]['name']
+ offload_facts[offloadt] = {
+ 'status': offload[target]['status'],
+ 'bucket': offload[target]['bucket'],
+ 'protocol': offload[target]['protocol'],
+ 'access_key_id': offload[target]['access_key_id'],
+ }
+ return offload_facts
+
+
+def generate_hgroups_dict(array):
+ hgroups_facts = {}
+ hgroups = array.list_hgroups()
+ for hgroup in range(0, len(hgroups)):
+ hostgroup = hgroups[hgroup]['name']
+ hgroups_facts[hostgroup] = {
+ 'hosts': hgroups[hgroup]['hosts'],
+ 'pgs': [],
+ 'vols': [],
+ }
+ pghgroups = array.list_hgroups(protect=True)
+ for pghg in range(0, len(pghgroups)):
+ pgname = pghgroups[pghg]['name']
+ hgroups_facts[pgname]['pgs'].append(pghgroups[pghg]['protection_group'])
+ volhgroups = array.list_hgroups(connect=True)
+ for pgvol in range(0, len(volhgroups)):
+ pgname = volhgroups[pgvol]['name']
+ volpgdict = [volhgroups[pgvol]['vol'], volhgroups[pgvol]['lun']]
+ hgroups_facts[pgname]['vols'].append(volpgdict)
+ return hgroups_facts
+
+
+def generate_interfaces_dict(array):
+ api_version = array._list_available_rest_versions()
+ int_facts = {}
+ ports = array.list_ports()
+ for port in range(0, len(ports)):
+ int_name = ports[port]['name']
+ if ports[port]['wwn']:
+ int_facts[int_name] = ports[port]['wwn']
+ if ports[port]['iqn']:
+ int_facts[int_name] = ports[port]['iqn']
+ if NVME_API_VERSION in api_version:
+ if ports[port]['nqn']:
+ int_facts[int_name] = ports[port]['nqn']
+ return int_facts
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(dict(
+ gather_subset=dict(default='minimum', type='list',)
+ ))
+
+ module = AnsibleModule(argument_spec, supports_check_mode=False)
+
+ array = get_system(module)
+
+ subset = [test.lower() for test in module.params['gather_subset']]
+ valid_subsets = ('all', 'minimum', 'config', 'performance', 'capacity',
+ 'network', 'subnet', 'interfaces', 'hgroups', 'pgroups',
+ 'hosts', 'admins', 'volumes', 'snapshots', 'pods',
+ 'vgroups', 'offload', 'apps', 'arrays')
+ subset_test = (test in valid_subsets for test in subset)
+ if not all(subset_test):
+ module.fail_json(msg="value must gather_subset must be one or more of: %s, got: %s"
+ % (",".join(valid_subsets), ",".join(subset)))
+
+ facts = {}
+
+ if 'minimum' in subset or 'all' in subset:
+ facts['default'] = generate_default_dict(array)
+ if 'performance' in subset or 'all' in subset:
+ facts['performance'] = generate_perf_dict(array)
+ if 'config' in subset or 'all' in subset:
+ facts['config'] = generate_config_dict(array)
+ if 'capacity' in subset or 'all' in subset:
+ facts['capacity'] = generate_capacity_dict(array)
+ if 'network' in subset or 'all' in subset:
+ facts['network'] = generate_network_dict(array)
+ if 'subnet' in subset or 'all' in subset:
+ facts['subnet'] = generate_subnet_dict(array)
+ if 'interfaces' in subset or 'all' in subset:
+ facts['interfaces'] = generate_interfaces_dict(array)
+ if 'hosts' in subset or 'all' in subset:
+ facts['hosts'] = generate_host_dict(array)
+ if 'volumes' in subset or 'all' in subset:
+ facts['volumes'] = generate_vol_dict(array)
+ if 'snapshots' in subset or 'all' in subset:
+ facts['snapshots'] = generate_snap_dict(array)
+ if 'hgroups' in subset or 'all' in subset:
+ facts['hgroups'] = generate_hgroups_dict(array)
+ if 'pgroups' in subset or 'all' in subset:
+ facts['pgroups'] = generate_pgroups_dict(array)
+ if 'pods' in subset or 'all' in subset:
+ facts['pods'] = generate_pods_dict(array)
+ if 'admins' in subset or 'all' in subset:
+ facts['admins'] = generate_admin_dict(array)
+ if 'vgroups' in subset or 'all' in subset:
+ facts['vgroups'] = generate_vgroups_dict(array)
+ if 'offload' in subset or 'all' in subset:
+ facts['nfs_offload'] = generate_nfs_offload_dict(array)
+ facts['s3_offload'] = generate_s3_offload_dict(array)
+ if 'apps' in subset or 'all' in subset:
+ facts['apps'] = generate_apps_dict(array)
+ if 'arrays' in subset or 'all' in subset:
+ facts['arrays'] = generate_conn_array_dict(array)
+
+ module.exit_json(ansible_facts={'ansible_purefa_facts': facts})
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/purefb_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/purefb_facts.py
new file mode 100644
index 00000000..8c5a40c0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/purefb_facts.py
@@ -0,0 +1,652 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: purefb_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favor of C(_info) module.
+ alternative: Use M(purestorage.flashblade.purefb_info) instead.
+short_description: Collect facts from Pure Storage FlashBlade
+description:
+ - Collect facts information from a Pure Storage FlashBlade running the
+ Purity//FB operating system. By default, the module will collect basic
+ fact information including hosts, host groups, protection
+ groups and volume counts. Additional fact information can be collected
+ based on the configured set of arguments.
+author:
+ - Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ gather_subset:
+ description:
+ - When supplied, this argument will define the facts to be collected.
+ Possible values for this include all, minimum, config, performance,
+ capacity, network, subnets, lags, filesystems and snapshots.
+ required: false
+ type: list
+ default: minimum
+extends_documentation_fragment:
+- community.general.purestorage.fb
+
+'''
+
+EXAMPLES = r'''
+- name: Collect default set of facts
+ community.general.purefb_facts:
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Collect configuration and capacity facts
+ community.general.purefb_facts:
+ gather_subset:
+ - config
+ - capacity
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Collect all facts
+ community.general.purefb_facts:
+ gather_subset:
+ - all
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+'''
+
+RETURN = r'''
+ansible_facts:
+ description: Returns the facts collected from the FlashBlade
+ returned: always
+ type: complex
+ sample: {
+ "capacity": {
+ "aggregate": {
+ "data_reduction": 1.1179228,
+ "snapshots": 0,
+ "total_physical": 17519748439,
+ "unique": 17519748439,
+ "virtual": 19585726464
+ },
+ "file-system": {
+ "data_reduction": 1.3642412,
+ "snapshots": 0,
+ "total_physical": 4748219708,
+ "unique": 4748219708,
+ "virtual": 6477716992
+ },
+ "object-store": {
+ "data_reduction": 1.0263462,
+ "snapshots": 0,
+ "total_physical": 12771528731,
+ "unique": 12771528731,
+ "virtual": 6477716992
+ },
+ "total": 83359896948925
+ },
+ "config": {
+ "alert_watchers": {
+ "enabled": true,
+ "name": "notify@acmestorage.com"
+ },
+ "array_management": {
+ "base_dn": null,
+ "bind_password": null,
+ "bind_user": null,
+ "enabled": false,
+ "name": "management",
+ "services": [
+ "management"
+ ],
+ "uris": []
+ },
+ "directory_service_roles": {
+ "array_admin": {
+ "group": null,
+ "group_base": null
+ },
+ "ops_admin": {
+ "group": null,
+ "group_base": null
+ },
+ "readonly": {
+ "group": null,
+ "group_base": null
+ },
+ "storage_admin": {
+ "group": null,
+ "group_base": null
+ }
+ },
+ "dns": {
+ "domain": "demo.acmestorage.com",
+ "name": "demo-fb-1",
+ "nameservers": [
+ "8.8.8.8"
+ ],
+ "search": [
+ "demo.acmestorage.com"
+ ]
+ },
+ "nfs_directory_service": {
+ "base_dn": null,
+ "bind_password": null,
+ "bind_user": null,
+ "enabled": false,
+ "name": "nfs",
+ "services": [
+ "nfs"
+ ],
+ "uris": []
+ },
+ "ntp": [
+ "0.ntp.pool.org"
+ ],
+ "smb_directory_service": {
+ "base_dn": null,
+ "bind_password": null,
+ "bind_user": null,
+ "enabled": false,
+ "name": "smb",
+ "services": [
+ "smb"
+ ],
+ "uris": []
+ },
+ "smtp": {
+ "name": "demo-fb-1",
+ "relay_host": null,
+ "sender_domain": "acmestorage.com"
+ },
+ "ssl_certs": {
+ "certificate": "-----BEGIN CERTIFICATE-----\n\n-----END CERTIFICATE-----",
+ "common_name": "Acme Storage",
+ "country": "US",
+ "email": null,
+ "intermediate_certificate": null,
+ "issued_by": "Acme Storage",
+ "issued_to": "Acme Storage",
+ "key_size": 4096,
+ "locality": null,
+ "name": "global",
+ "organization": "Acme Storage",
+ "organizational_unit": "Acme Storage",
+ "passphrase": null,
+ "private_key": null,
+ "state": null,
+ "status": "self-signed",
+ "valid_from": "1508433967000",
+ "valid_to": "2458833967000"
+ }
+ },
+ "default": {
+ "blades": 15,
+ "buckets": 7,
+ "filesystems": 2,
+ "flashblade_name": "demo-fb-1",
+ "object_store_accounts": 1,
+ "object_store_users": 1,
+ "purity_version": "2.2.0",
+ "snapshots": 1,
+ "total_capacity": 83359896948925
+ },
+ "filesystems": {
+ "k8s-pvc-d24b1357-579e-11e8-811f-ecf4bbc88f54": {
+ "destroyed": false,
+ "fast_remove": false,
+ "hard_limit": true,
+ "nfs_rules": "*(rw,no_root_squash)",
+ "provisioned": 21474836480,
+ "snapshot_enabled": false
+ },
+ "z": {
+ "destroyed": false,
+ "fast_remove": false,
+ "hard_limit": false,
+ "provisioned": 1073741824,
+ "snapshot_enabled": false
+ }
+ },
+ "lag": {
+ "uplink": {
+ "lag_speed": 0,
+ "port_speed": 40000000000,
+ "ports": [
+ {
+ "name": "CH1.FM1.ETH1.1"
+ },
+ {
+ "name": "CH1.FM1.ETH1.2"
+ },
+ ],
+ "status": "healthy"
+ }
+ },
+ "network": {
+ "fm1.admin0": {
+ "address": "10.10.100.6",
+ "gateway": "10.10.100.1",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "support"
+ ],
+ "type": "vip",
+ "vlan": 2200
+ },
+ "fm2.admin0": {
+ "address": "10.10.100.7",
+ "gateway": "10.10.100.1",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "support"
+ ],
+ "type": "vip",
+ "vlan": 2200
+ },
+ "nfs1": {
+ "address": "10.10.100.4",
+ "gateway": "10.10.100.1",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "data"
+ ],
+ "type": "vip",
+ "vlan": 2200
+ },
+ "vir0": {
+ "address": "10.10.100.5",
+ "gateway": "10.10.100.1",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "management"
+ ],
+ "type": "vip",
+ "vlan": 2200
+ }
+ },
+ "performance": {
+ "aggregate": {
+ "bytes_per_op": 0,
+ "bytes_per_read": 0,
+ "bytes_per_write": 0,
+ "read_bytes_per_sec": 0,
+ "reads_per_sec": 0,
+ "usec_per_other_op": 0,
+ "usec_per_read_op": 0,
+ "usec_per_write_op": 0,
+ "write_bytes_per_sec": 0,
+ "writes_per_sec": 0
+ },
+ "http": {
+ "bytes_per_op": 0,
+ "bytes_per_read": 0,
+ "bytes_per_write": 0,
+ "read_bytes_per_sec": 0,
+ "reads_per_sec": 0,
+ "usec_per_other_op": 0,
+ "usec_per_read_op": 0,
+ "usec_per_write_op": 0,
+ "write_bytes_per_sec": 0,
+ "writes_per_sec": 0
+ },
+ "nfs": {
+ "bytes_per_op": 0,
+ "bytes_per_read": 0,
+ "bytes_per_write": 0,
+ "read_bytes_per_sec": 0,
+ "reads_per_sec": 0,
+ "usec_per_other_op": 0,
+ "usec_per_read_op": 0,
+ "usec_per_write_op": 0,
+ "write_bytes_per_sec": 0,
+ "writes_per_sec": 0
+ },
+ "s3": {
+ "bytes_per_op": 0,
+ "bytes_per_read": 0,
+ "bytes_per_write": 0,
+ "read_bytes_per_sec": 0,
+ "reads_per_sec": 0,
+ "usec_per_other_op": 0,
+ "usec_per_read_op": 0,
+ "usec_per_write_op": 0,
+ "write_bytes_per_sec": 0,
+ "writes_per_sec": 0
+ }
+ },
+ "snapshots": {
+ "z.188": {
+ "destroyed": false,
+ "source": "z",
+ "source_destroyed": false,
+ "suffix": "188"
+ }
+ },
+ "subnet": {
+ "new-mgmt": {
+ "gateway": "10.10.100.1",
+ "interfaces": [
+ {
+ "name": "fm1.admin0"
+ },
+ {
+ "name": "fm2.admin0"
+ },
+ {
+ "name": "nfs1"
+ },
+ {
+ "name": "vir0"
+ }
+ ],
+ "lag": "uplink",
+ "mtu": 1500,
+ "prefix": "10.10.100.0/24",
+ "services": [
+ "data",
+ "management",
+ "support"
+ ],
+ "vlan": 2200
+ }
+ }
+ }
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.pure import get_blade, purefb_argument_spec
+
+
+MIN_REQUIRED_API_VERSION = '1.3'
+HARD_LIMIT_API_VERSION = '1.4'
+
+
+def generate_default_dict(blade):
+ default_facts = {}
+ defaults = blade.arrays.list_arrays().items[0]
+ default_facts['flashblade_name'] = defaults.name
+ default_facts['purity_version'] = defaults.version
+ default_facts['filesystems'] = \
+ len(blade.file_systems.list_file_systems().items)
+ default_facts['snapshots'] = \
+ len(blade.file_system_snapshots.list_file_system_snapshots().items)
+ default_facts['buckets'] = len(blade.buckets.list_buckets().items)
+ default_facts['object_store_users'] = \
+ len(blade.object_store_users.list_object_store_users().items)
+ default_facts['object_store_accounts'] = \
+ len(blade.object_store_accounts.list_object_store_accounts().items)
+ default_facts['blades'] = len(blade.blade.list_blades().items)
+ default_facts['total_capacity'] = \
+ blade.arrays.list_arrays_space().items[0].capacity
+ return default_facts
+
+
+def generate_perf_dict(blade):
+ perf_facts = {}
+ total_perf = blade.arrays.list_arrays_performance()
+ http_perf = blade.arrays.list_arrays_performance(protocol='http')
+ s3_perf = blade.arrays.list_arrays_performance(protocol='s3')
+ nfs_perf = blade.arrays.list_arrays_performance(protocol='nfs')
+ perf_facts['aggregate'] = {
+ 'bytes_per_op': total_perf.items[0].bytes_per_op,
+ 'bytes_per_read': total_perf.items[0].bytes_per_read,
+ 'bytes_per_write': total_perf.items[0].bytes_per_write,
+ 'read_bytes_per_sec': total_perf.items[0].read_bytes_per_sec,
+ 'reads_per_sec': total_perf.items[0].reads_per_sec,
+ 'usec_per_other_op': total_perf.items[0].usec_per_other_op,
+ 'usec_per_read_op': total_perf.items[0].usec_per_read_op,
+ 'usec_per_write_op': total_perf.items[0].usec_per_write_op,
+ 'write_bytes_per_sec': total_perf.items[0].write_bytes_per_sec,
+ 'writes_per_sec': total_perf.items[0].writes_per_sec,
+ }
+ perf_facts['http'] = {
+ 'bytes_per_op': http_perf.items[0].bytes_per_op,
+ 'bytes_per_read': http_perf.items[0].bytes_per_read,
+ 'bytes_per_write': http_perf.items[0].bytes_per_write,
+ 'read_bytes_per_sec': http_perf.items[0].read_bytes_per_sec,
+ 'reads_per_sec': http_perf.items[0].reads_per_sec,
+ 'usec_per_other_op': http_perf.items[0].usec_per_other_op,
+ 'usec_per_read_op': http_perf.items[0].usec_per_read_op,
+ 'usec_per_write_op': http_perf.items[0].usec_per_write_op,
+ 'write_bytes_per_sec': http_perf.items[0].write_bytes_per_sec,
+ 'writes_per_sec': http_perf.items[0].writes_per_sec,
+ }
+ perf_facts['s3'] = {
+ 'bytes_per_op': s3_perf.items[0].bytes_per_op,
+ 'bytes_per_read': s3_perf.items[0].bytes_per_read,
+ 'bytes_per_write': s3_perf.items[0].bytes_per_write,
+ 'read_bytes_per_sec': s3_perf.items[0].read_bytes_per_sec,
+ 'reads_per_sec': s3_perf.items[0].reads_per_sec,
+ 'usec_per_other_op': s3_perf.items[0].usec_per_other_op,
+ 'usec_per_read_op': s3_perf.items[0].usec_per_read_op,
+ 'usec_per_write_op': s3_perf.items[0].usec_per_write_op,
+ 'write_bytes_per_sec': s3_perf.items[0].write_bytes_per_sec,
+ 'writes_per_sec': s3_perf.items[0].writes_per_sec,
+ }
+ perf_facts['nfs'] = {
+ 'bytes_per_op': nfs_perf.items[0].bytes_per_op,
+ 'bytes_per_read': nfs_perf.items[0].bytes_per_read,
+ 'bytes_per_write': nfs_perf.items[0].bytes_per_write,
+ 'read_bytes_per_sec': nfs_perf.items[0].read_bytes_per_sec,
+ 'reads_per_sec': nfs_perf.items[0].reads_per_sec,
+ 'usec_per_other_op': nfs_perf.items[0].usec_per_other_op,
+ 'usec_per_read_op': nfs_perf.items[0].usec_per_read_op,
+ 'usec_per_write_op': nfs_perf.items[0].usec_per_write_op,
+ 'write_bytes_per_sec': nfs_perf.items[0].write_bytes_per_sec,
+ 'writes_per_sec': nfs_perf.items[0].writes_per_sec,
+ }
+
+ return perf_facts
+
+
+def generate_config_dict(blade):
+ config_facts = {}
+ config_facts['dns'] = blade.dns.list_dns().items[0].to_dict()
+ config_facts['smtp'] = blade.smtp.list_smtp().items[0].to_dict()
+ config_facts['alert_watchers'] = \
+ blade.alert_watchers.list_alert_watchers().items[0].to_dict()
+ api_version = blade.api_version.list_versions().versions
+ if HARD_LIMIT_API_VERSION in api_version:
+ config_facts['array_management'] = \
+ blade.directory_services.list_directory_services(names=['management']).items[0].to_dict()
+ config_facts['directory_service_roles'] = {}
+ roles = blade.directory_services.list_directory_services_roles()
+ for role in range(0, len(roles.items)):
+ role_name = roles.items[role].name
+ config_facts['directory_service_roles'][role_name] = {
+ 'group': roles.items[role].group,
+ 'group_base': roles.items[role].group_base
+ }
+ config_facts['nfs_directory_service'] = \
+ blade.directory_services.list_directory_services(names=['nfs']).items[0].to_dict()
+ config_facts['smb_directory_service'] = \
+ blade.directory_services.list_directory_services(names=['smb']).items[0].to_dict()
+ config_facts['ntp'] = blade.arrays.list_arrays().items[0].ntp_servers
+ config_facts['ssl_certs'] = \
+ blade.certificates.list_certificates().items[0].to_dict()
+ return config_facts
+
+
+def generate_subnet_dict(blade):
+ sub_facts = {}
+ subnets = blade.subnets.list_subnets()
+ for sub in range(0, len(subnets.items)):
+ sub_name = subnets.items[sub].name
+ if subnets.items[sub].enabled:
+ sub_facts[sub_name] = {
+ 'gateway': subnets.items[sub].gateway,
+ 'mtu': subnets.items[sub].mtu,
+ 'vlan': subnets.items[sub].vlan,
+ 'prefix': subnets.items[sub].prefix,
+ 'services': subnets.items[sub].services,
+ }
+ sub_facts[sub_name]['lag'] = subnets.items[sub].link_aggregation_group.name
+ sub_facts[sub_name]['interfaces'] = []
+ for iface in range(0, len(subnets.items[sub].interfaces)):
+ sub_facts[sub_name]['interfaces'].append({'name': subnets.items[sub].interfaces[iface].name})
+ return sub_facts
+
+
+def generate_lag_dict(blade):
+ lag_facts = {}
+ groups = blade.link_aggregation_groups.list_link_aggregation_groups()
+ for groupcnt in range(0, len(groups.items)):
+ lag_name = groups.items[groupcnt].name
+ lag_facts[lag_name] = {
+ 'lag_speed': groups.items[groupcnt].lag_speed,
+ 'port_speed': groups.items[groupcnt].port_speed,
+ 'status': groups.items[groupcnt].status,
+ }
+ lag_facts[lag_name]['ports'] = []
+ for port in range(0, len(groups.items[groupcnt].ports)):
+ lag_facts[lag_name]['ports'].append({'name': groups.items[groupcnt].ports[port].name})
+ return lag_facts
+
+
+def generate_network_dict(blade):
+ net_facts = {}
+ ports = blade.network_interfaces.list_network_interfaces()
+ for portcnt in range(0, len(ports.items)):
+ int_name = ports.items[portcnt].name
+ if ports.items[portcnt].enabled:
+ net_facts[int_name] = {
+ 'type': ports.items[portcnt].type,
+ 'mtu': ports.items[portcnt].mtu,
+ 'vlan': ports.items[portcnt].vlan,
+ 'address': ports.items[portcnt].address,
+ 'services': ports.items[portcnt].services,
+ 'gateway': ports.items[portcnt].gateway,
+ 'netmask': ports.items[portcnt].netmask,
+ }
+ return net_facts
+
+
+def generate_capacity_dict(blade):
+ capacity_facts = {}
+ total_cap = blade.arrays.list_arrays_space()
+ file_cap = blade.arrays.list_arrays_space(type='file-system')
+ object_cap = blade.arrays.list_arrays_space(type='object-store')
+ capacity_facts['total'] = total_cap.items[0].capacity
+ capacity_facts['aggregate'] = {
+ 'data_reduction': total_cap.items[0].space.data_reduction,
+ 'snapshots': total_cap.items[0].space.snapshots,
+ 'total_physical': total_cap.items[0].space.total_physical,
+ 'unique': total_cap.items[0].space.unique,
+ 'virtual': total_cap.items[0].space.virtual,
+ }
+ capacity_facts['file-system'] = {
+ 'data_reduction': file_cap.items[0].space.data_reduction,
+ 'snapshots': file_cap.items[0].space.snapshots,
+ 'total_physical': file_cap.items[0].space.total_physical,
+ 'unique': file_cap.items[0].space.unique,
+ 'virtual': file_cap.items[0].space.virtual,
+ }
+ capacity_facts['object-store'] = {
+ 'data_reduction': object_cap.items[0].space.data_reduction,
+ 'snapshots': object_cap.items[0].space.snapshots,
+ 'total_physical': object_cap.items[0].space.total_physical,
+ 'unique': object_cap.items[0].space.unique,
+ 'virtual': file_cap.items[0].space.virtual,
+ }
+
+ return capacity_facts
+
+
+def generate_snap_dict(blade):
+ snap_facts = {}
+ snaps = blade.file_system_snapshots.list_file_system_snapshots()
+ for snap in range(0, len(snaps.items)):
+ snapshot = snaps.items[snap].name
+ snap_facts[snapshot] = {
+ 'destroyed': snaps.items[snap].destroyed,
+ 'source': snaps.items[snap].source,
+ 'suffix': snaps.items[snap].suffix,
+ 'source_destroyed': snaps.items[snap].source_destroyed,
+ }
+ return snap_facts
+
+
+def generate_fs_dict(blade):
+ fs_facts = {}
+ fsys = blade.file_systems.list_file_systems()
+ for fsystem in range(0, len(fsys.items)):
+ share = fsys.items[fsystem].name
+ fs_facts[share] = {
+ 'fast_remove': fsys.items[fsystem].fast_remove_directory_enabled,
+ 'snapshot_enabled': fsys.items[fsystem].snapshot_directory_enabled,
+ 'provisioned': fsys.items[fsystem].provisioned,
+ 'destroyed': fsys.items[fsystem].destroyed,
+ }
+ if fsys.items[fsystem].http.enabled:
+ fs_facts[share]['http'] = fsys.items[fsystem].http.enabled
+ if fsys.items[fsystem].smb.enabled:
+ fs_facts[share]['smb_mode'] = fsys.items[fsystem].smb.acl_mode
+ if fsys.items[fsystem].nfs.enabled:
+ fs_facts[share]['nfs_rules'] = fsys.items[fsystem].nfs.rules
+ api_version = blade.api_version.list_versions().versions
+ if HARD_LIMIT_API_VERSION in api_version:
+ fs_facts[share]['hard_limit'] = fsys.items[fsystem].hard_limit_enabled
+
+ return fs_facts
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(dict(
+ gather_subset=dict(default='minimum', type='list',)
+ ))
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ blade = get_blade(module)
+ versions = blade.api_version.list_versions().versions
+
+ if MIN_REQUIRED_API_VERSION not in versions:
+ module.fail_json(msg='FlashBlade REST version not supported. Minimum version required: {0}'.format(MIN_REQUIRED_API_VERSION))
+
+ subset = [test.lower() for test in module.params['gather_subset']]
+ valid_subsets = ('all', 'minimum', 'config', 'performance', 'capacity',
+ 'network', 'subnets', 'lags',
+ 'filesystems', 'snapshots')
+ subset_test = (test in valid_subsets for test in subset)
+ if not all(subset_test):
+ module.fail_json(msg="value must gather_subset must be one or more of: %s, got: %s"
+ % (",".join(valid_subsets), ",".join(subset)))
+
+ facts = {}
+
+ if 'minimum' in subset or 'all' in subset:
+ facts['default'] = generate_default_dict(blade)
+ if 'performance' in subset or 'all' in subset:
+ facts['performance'] = generate_perf_dict(blade)
+ if 'config' in subset or 'all' in subset:
+ facts['config'] = generate_config_dict(blade)
+ if 'capacity' in subset or 'all' in subset:
+ facts['capacity'] = generate_capacity_dict(blade)
+ if 'lags' in subset or 'all' in subset:
+ facts['lag'] = generate_lag_dict(blade)
+ if 'network' in subset or 'all' in subset:
+ facts['network'] = generate_network_dict(blade)
+ if 'subnets' in subset or 'all' in subset:
+ facts['subnet'] = generate_subnet_dict(blade)
+ if 'filesystems' in subset or 'all' in subset:
+ facts['filesystems'] = generate_fs_dict(blade)
+ if 'snapshots' in subset or 'all' in subset:
+ facts['snapshots'] = generate_snap_dict(blade)
+
+ module.exit_json(ansible_facts={'ansible_purefb_facts': facts})
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/pushbullet.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pushbullet.py
new file mode 100644
index 00000000..ab27fd5e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pushbullet.py
@@ -0,0 +1,185 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+author: "Willy Barro (@willybarro)"
+requirements: [ pushbullet.py ]
+module: pushbullet
+short_description: Sends notifications to Pushbullet
+description:
+ - This module sends push notifications via Pushbullet to channels or devices.
+options:
+ api_key:
+ type: str
+ description:
+ - Push bullet API token
+ required: true
+ channel:
+ type: str
+ description:
+ - The channel TAG you wish to broadcast a push notification,
+ as seen on the "My Channels" > "Edit your channel" at
+ Pushbullet page.
+ device:
+ type: str
+ description:
+ - The device NAME you wish to send a push notification,
+ as seen on the Pushbullet main page.
+ push_type:
+ type: str
+ description:
+ - Thing you wish to push.
+ default: note
+ choices: [ "note", "link" ]
+ title:
+ type: str
+ description:
+ - Title of the notification.
+ required: true
+ body:
+ type: str
+ description:
+ - Body of the notification, e.g. Details of the fault you're alerting.
+
+notes:
+ - Requires pushbullet.py Python package on the remote host.
+ You can install it via pip with ($ pip install pushbullet.py).
+ See U(https://github.com/randomchars/pushbullet.py)
+'''
+
+EXAMPLES = '''
+- name: Sends a push notification to a device
+ community.general.pushbullet:
+ api_key: "ABC123abc123ABC123abc123ABC123ab"
+ device: "Chrome"
+ title: "You may see this on Google Chrome"
+
+- name: Sends a link to a device
+ community.general.pushbullet:
+ api_key: ABC123abc123ABC123abc123ABC123ab
+ device: Chrome
+ push_type: link
+ title: Ansible Documentation
+ body: https://docs.ansible.com/
+
+- name: Sends a push notification to a channel
+ community.general.pushbullet:
+ api_key: ABC123abc123ABC123abc123ABC123ab
+ channel: my-awesome-channel
+ title: Broadcasting a message to the #my-awesome-channel folks
+
+- name: Sends a push notification with title and body to a channel
+ community.general.pushbullet:
+ api_key: ABC123abc123ABC123abc123ABC123ab
+ channel: my-awesome-channel
+ title: ALERT! Signup service is down
+ body: Error rate on signup service is over 90% for more than 2 minutes
+'''
+
+import traceback
+
+PUSHBULLET_IMP_ERR = None
+try:
+ from pushbullet import PushBullet
+ from pushbullet.errors import InvalidKeyError, PushError
+except ImportError:
+ PUSHBULLET_IMP_ERR = traceback.format_exc()
+ pushbullet_found = False
+else:
+ pushbullet_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+# ===========================================
+# Main
+#
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(type='str', required=True, no_log=True),
+ channel=dict(type='str', default=None),
+ device=dict(type='str', default=None),
+ push_type=dict(type='str', default="note", choices=['note', 'link']),
+ title=dict(type='str', required=True),
+ body=dict(type='str', default=None),
+ url=dict(type='str', default=None),
+ ),
+ mutually_exclusive=(
+ ['channel', 'device'],
+ ),
+ supports_check_mode=True
+ )
+
+ api_key = module.params['api_key']
+ channel = module.params['channel']
+ device = module.params['device']
+ push_type = module.params['push_type']
+ title = module.params['title']
+ body = module.params['body']
+ url = module.params['url']
+
+ if not pushbullet_found:
+ module.fail_json(msg=missing_required_lib('pushbullet.py'), exception=PUSHBULLET_IMP_ERR)
+
+ # Init pushbullet
+ try:
+ pb = PushBullet(api_key)
+ target = None
+ except InvalidKeyError:
+ module.fail_json(msg="Invalid api_key")
+
+ # Checks for channel/device
+ if device is None and channel is None:
+ module.fail_json(msg="You need to provide a channel or a device.")
+
+ # Search for given device
+ if device is not None:
+ devices_by_nickname = {}
+ for d in pb.devices:
+ devices_by_nickname[d.nickname] = d
+
+ if device in devices_by_nickname:
+ target = devices_by_nickname[device]
+ else:
+ module.fail_json(msg="Device '%s' not found. Available devices: '%s'" % (device, "', '".join(devices_by_nickname.keys())))
+
+ # Search for given channel
+ if channel is not None:
+ channels_by_tag = {}
+ for c in pb.channels:
+ channels_by_tag[c.channel_tag] = c
+
+ if channel in channels_by_tag:
+ target = channels_by_tag[channel]
+ else:
+ module.fail_json(msg="Channel '%s' not found. Available channels: '%s'" % (channel, "', '".join(channels_by_tag.keys())))
+
+ # If in check mode, exit saying that we succeeded
+ if module.check_mode:
+ module.exit_json(changed=False, msg="OK")
+
+ # Send push notification
+ try:
+ if push_type == "link":
+ target.push_link(title, url, body)
+ else:
+ target.push_note(title, body)
+ module.exit_json(changed=False, msg="OK")
+ except PushError as e:
+ module.fail_json(msg="An error occurred, Pushbullet's response: %s" % str(e))
+
+ module.fail_json(msg="An unknown error has occurred")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/pushover.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pushover.py
new file mode 100644
index 00000000..7f73592a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/pushover.py
@@ -0,0 +1,153 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2012, Jim Richardson <weaselkeeper@gmail.com>
+# Copyright (c) 2019, Bernd Arnold <wopfel@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: pushover
+short_description: Send notifications via U(https://pushover.net)
+description:
+ - Send notifications via pushover, to subscriber list of devices, and email
+ addresses. Requires pushover app on devices.
+notes:
+ - You will require a pushover.net account to use this module. But no account
+ is required to receive messages.
+options:
+ msg:
+ type: str
+ description:
+ - What message you wish to send.
+ required: true
+ app_token:
+ type: str
+ description:
+ - Pushover issued token identifying your pushover app.
+ required: true
+ user_key:
+ type: str
+ description:
+ - Pushover issued authentication key for your user.
+ required: true
+ title:
+ type: str
+ description:
+ - Message title.
+ required: false
+ pri:
+ type: str
+ description:
+ - Message priority (see U(https://pushover.net) for details).
+ required: false
+ default: '0'
+ choices: [ '-2', '-1', '0', '1', '2' ]
+ device:
+ type: str
+ description:
+ - A device the message should be sent to. Multiple devices can be specified, separated by a comma.
+ required: false
+ version_added: 1.2.0
+
+author:
+ - "Jim Richardson (@weaselkeeper)"
+ - "Bernd Arnold (@wopfel)"
+'''
+
+EXAMPLES = '''
+- name: Send notifications via pushover.net
+ community.general.pushover:
+ msg: '{{ inventory_hostname }} is acting strange ...'
+ app_token: wxfdksl
+ user_key: baa5fe97f2c5ab3ca8f0bb59
+ delegate_to: localhost
+
+- name: Send notifications via pushover.net
+ community.general.pushover:
+ title: 'Alert!'
+ msg: '{{ inventory_hostname }} has exploded in flames, It is now time to panic'
+ pri: 1
+ app_token: wxfdksl
+ user_key: baa5fe97f2c5ab3ca8f0bb59
+ delegate_to: localhost
+
+- name: Send notifications via pushover.net to a specific device
+ community.general.pushover:
+ msg: '{{ inventory_hostname }} has been lost somewhere'
+ app_token: wxfdksl
+ user_key: baa5fe97f2c5ab3ca8f0bb59
+ device: admins-iPhone
+ delegate_to: localhost
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url
+
+
+class Pushover(object):
+ ''' Instantiates a pushover object, use it to send notifications '''
+ base_uri = 'https://api.pushover.net'
+
+ def __init__(self, module, user, token):
+ self.module = module
+ self.user = user
+ self.token = token
+
+ def run(self, priority, msg, title, device):
+ ''' Do, whatever it is, we do. '''
+
+ url = '%s/1/messages.json' % (self.base_uri)
+
+ # parse config
+ options = dict(user=self.user,
+ token=self.token,
+ priority=priority,
+ message=msg)
+
+ if title is not None:
+ options = dict(options,
+ title=title)
+
+ if device is not None:
+ options = dict(options,
+ device=device)
+
+ data = urlencode(options)
+
+ headers = {"Content-type": "application/x-www-form-urlencoded"}
+ r, info = fetch_url(self.module, url, method='POST', data=data, headers=headers)
+ if info['status'] != 200:
+ raise Exception(info)
+
+ return r.read()
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ title=dict(type='str'),
+ msg=dict(required=True),
+ app_token=dict(required=True, no_log=True),
+ user_key=dict(required=True, no_log=True),
+ pri=dict(required=False, default='0', choices=['-2', '-1', '0', '1', '2']),
+ device=dict(type='str'),
+ ),
+ )
+
+ msg_object = Pushover(module, module.params['user_key'], module.params['app_token'])
+ try:
+ response = msg_object.run(module.params['pri'], module.params['msg'], module.params['title'], module.params['device'])
+ except Exception:
+ module.fail_json(msg='Unable to send msg via pushover')
+
+ module.exit_json(msg='message sent successfully: %s' % response, changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/python_requirements_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/python_requirements_facts.py
new file mode 100644
index 00000000..5ffb2776
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/python_requirements_facts.py
@@ -0,0 +1,172 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: python_requirements_info
+short_description: Show python path and assert dependency versions
+description:
+ - Get info about available Python requirements on the target host, including listing required libraries and gathering versions.
+ - This module was called C(python_requirements_facts) before Ansible 2.9. The usage did not change.
+options:
+ dependencies:
+ type: list
+ elements: str
+ description: >
+ A list of version-likes or module names to check for installation.
+ Supported operators: <, >, <=, >=, or ==. The bare module name like
+ I(ansible), the module with a specific version like I(boto3==1.6.1), or a
+ partial version like I(requests>2) are all valid specifications.
+author:
+- Will Thames (@willthames)
+- Ryan Scott Brown (@ryansb)
+'''
+
+EXAMPLES = '''
+- name: Show python lib/site paths
+ community.general.python_requirements_info:
+
+- name: Check for modern boto3 and botocore versions
+ community.general.python_requirements_info:
+ dependencies:
+ - boto3>1.6
+ - botocore<2
+'''
+
+RETURN = '''
+python:
+ description: path to python version used
+ returned: always
+ type: str
+ sample: /usr/local/opt/python@2/bin/python2.7
+python_version:
+ description: version of python
+ returned: always
+ type: str
+ sample: "2.7.15 (default, May 1 2018, 16:44:08)\n[GCC 4.2.1 Compatible Apple LLVM 9.1.0 (clang-902.0.39.1)]"
+python_system_path:
+ description: List of paths python is looking for modules in
+ returned: always
+ type: list
+ sample:
+ - /usr/local/opt/python@2/site-packages/
+ - /usr/lib/python/site-packages/
+ - /usr/lib/python/site-packages/
+valid:
+ description: A dictionary of dependencies that matched their desired versions. If no version was specified, then I(desired) will be null
+ returned: always
+ type: dict
+ sample:
+ boto3:
+ desired: null
+ installed: 1.7.60
+ botocore:
+ desired: botocore<2
+ installed: 1.10.60
+mismatched:
+ description: A dictionary of dependencies that did not satisfy the desired version
+ returned: always
+ type: dict
+ sample:
+ botocore:
+ desired: botocore>2
+ installed: 1.10.60
+not_found:
+ description: A list of packages that could not be imported at all, and are not installed
+ returned: always
+ type: list
+ sample:
+ - boto4
+ - requests
+'''
+
+import re
+import sys
+import operator
+
+HAS_DISTUTILS = False
+try:
+ import pkg_resources
+ from distutils.version import LooseVersion
+ HAS_DISTUTILS = True
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+
+operations = {
+ '<=': operator.le,
+ '>=': operator.ge,
+ '<': operator.lt,
+ '>': operator.gt,
+ '==': operator.eq,
+}
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ dependencies=dict(type='list', elements='str')
+ ),
+ supports_check_mode=True,
+ )
+ if module._name in ('python_requirements_facts', 'community.general.python_requirements_facts'):
+ module.deprecate("The 'python_requirements_facts' module has been renamed to 'python_requirements_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+ if not HAS_DISTUTILS:
+ module.fail_json(
+ msg='Could not import "distutils" and "pkg_resources" libraries to introspect python environment.',
+ python=sys.executable,
+ python_version=sys.version,
+ python_system_path=sys.path,
+ )
+ pkg_dep_re = re.compile(r'(^[a-zA-Z][a-zA-Z0-9_-]+)(==|[><]=?)?([0-9.]+)?$')
+
+ results = dict(
+ not_found=[],
+ mismatched={},
+ valid={},
+ )
+
+ for dep in (module.params.get('dependencies') or []):
+ match = pkg_dep_re.match(dep)
+ if match is None:
+ module.fail_json(msg="Failed to parse version requirement '{0}'. Must be formatted like 'ansible>2.6'".format(dep))
+ pkg, op, version = match.groups()
+ if op is not None and op not in operations:
+ module.fail_json(msg="Failed to parse version requirement '{0}'. Operator must be one of >, <, <=, >=, or ==".format(dep))
+ try:
+ existing = pkg_resources.get_distribution(pkg).version
+ except pkg_resources.DistributionNotFound:
+ # not there
+ results['not_found'].append(pkg)
+ continue
+ if op is None and version is None:
+ results['valid'][pkg] = {
+ 'installed': existing,
+ 'desired': None,
+ }
+ elif operations[op](LooseVersion(existing), LooseVersion(version)):
+ results['valid'][pkg] = {
+ 'installed': existing,
+ 'desired': dep,
+ }
+ else:
+ results['mismatched'] = {
+ 'installed': existing,
+ 'desired': dep,
+ }
+
+ module.exit_json(
+ python=sys.executable,
+ python_version=sys.version,
+ python_system_path=sys.path,
+ **results
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/python_requirements_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/python_requirements_info.py
new file mode 100644
index 00000000..5ffb2776
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/python_requirements_info.py
@@ -0,0 +1,172 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: python_requirements_info
+short_description: Show python path and assert dependency versions
+description:
+ - Get info about available Python requirements on the target host, including listing required libraries and gathering versions.
+ - This module was called C(python_requirements_facts) before Ansible 2.9. The usage did not change.
+options:
+ dependencies:
+ type: list
+ elements: str
+ description: >
+ A list of version-likes or module names to check for installation.
+ Supported operators: <, >, <=, >=, or ==. The bare module name like
+ I(ansible), the module with a specific version like I(boto3==1.6.1), or a
+ partial version like I(requests>2) are all valid specifications.
+author:
+- Will Thames (@willthames)
+- Ryan Scott Brown (@ryansb)
+'''
+
+EXAMPLES = '''
+- name: Show python lib/site paths
+ community.general.python_requirements_info:
+
+- name: Check for modern boto3 and botocore versions
+ community.general.python_requirements_info:
+ dependencies:
+ - boto3>1.6
+ - botocore<2
+'''
+
+RETURN = '''
+python:
+ description: path to python version used
+ returned: always
+ type: str
+ sample: /usr/local/opt/python@2/bin/python2.7
+python_version:
+ description: version of python
+ returned: always
+ type: str
+ sample: "2.7.15 (default, May 1 2018, 16:44:08)\n[GCC 4.2.1 Compatible Apple LLVM 9.1.0 (clang-902.0.39.1)]"
+python_system_path:
+ description: List of paths python is looking for modules in
+ returned: always
+ type: list
+ sample:
+ - /usr/local/opt/python@2/site-packages/
+ - /usr/lib/python/site-packages/
+ - /usr/lib/python/site-packages/
+valid:
+ description: A dictionary of dependencies that matched their desired versions. If no version was specified, then I(desired) will be null
+ returned: always
+ type: dict
+ sample:
+ boto3:
+ desired: null
+ installed: 1.7.60
+ botocore:
+ desired: botocore<2
+ installed: 1.10.60
+mismatched:
+ description: A dictionary of dependencies that did not satisfy the desired version
+ returned: always
+ type: dict
+ sample:
+ botocore:
+ desired: botocore>2
+ installed: 1.10.60
+not_found:
+ description: A list of packages that could not be imported at all, and are not installed
+ returned: always
+ type: list
+ sample:
+ - boto4
+ - requests
+'''
+
+import re
+import sys
+import operator
+
+HAS_DISTUTILS = False
+try:
+ import pkg_resources
+ from distutils.version import LooseVersion
+ HAS_DISTUTILS = True
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+
+operations = {
+ '<=': operator.le,
+ '>=': operator.ge,
+ '<': operator.lt,
+ '>': operator.gt,
+ '==': operator.eq,
+}
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ dependencies=dict(type='list', elements='str')
+ ),
+ supports_check_mode=True,
+ )
+ if module._name in ('python_requirements_facts', 'community.general.python_requirements_facts'):
+ module.deprecate("The 'python_requirements_facts' module has been renamed to 'python_requirements_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+ if not HAS_DISTUTILS:
+ module.fail_json(
+ msg='Could not import "distutils" and "pkg_resources" libraries to introspect python environment.',
+ python=sys.executable,
+ python_version=sys.version,
+ python_system_path=sys.path,
+ )
+ pkg_dep_re = re.compile(r'(^[a-zA-Z][a-zA-Z0-9_-]+)(==|[><]=?)?([0-9.]+)?$')
+
+ results = dict(
+ not_found=[],
+ mismatched={},
+ valid={},
+ )
+
+ for dep in (module.params.get('dependencies') or []):
+ match = pkg_dep_re.match(dep)
+ if match is None:
+ module.fail_json(msg="Failed to parse version requirement '{0}'. Must be formatted like 'ansible>2.6'".format(dep))
+ pkg, op, version = match.groups()
+ if op is not None and op not in operations:
+ module.fail_json(msg="Failed to parse version requirement '{0}'. Operator must be one of >, <, <=, >=, or ==".format(dep))
+ try:
+ existing = pkg_resources.get_distribution(pkg).version
+ except pkg_resources.DistributionNotFound:
+ # not there
+ results['not_found'].append(pkg)
+ continue
+ if op is None and version is None:
+ results['valid'][pkg] = {
+ 'installed': existing,
+ 'desired': None,
+ }
+ elif operations[op](LooseVersion(existing), LooseVersion(version)):
+ results['valid'][pkg] = {
+ 'installed': existing,
+ 'desired': dep,
+ }
+ else:
+ results['mismatched'] = {
+ 'installed': existing,
+ 'desired': dep,
+ }
+
+ module.exit_json(
+ python=sys.executable,
+ python_version=sys.version,
+ python_system_path=sys.path,
+ **results
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax.py
new file mode 100644
index 00000000..9f7df5c4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax.py
@@ -0,0 +1,897 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax
+short_description: create / delete an instance in Rackspace Public Cloud
+description:
+ - creates / deletes a Rackspace Public Cloud instance and optionally
+ waits for it to be 'running'.
+options:
+ auto_increment:
+ description:
+ - Whether or not to increment a single number with the name of the
+ created servers. Only applicable when used with the I(group) attribute
+ or meta key.
+ type: bool
+ default: 'yes'
+ boot_from_volume:
+ description:
+ - Whether or not to boot the instance from a Cloud Block Storage volume.
+ If C(yes) and I(image) is specified a new volume will be created at
+ boot time. I(boot_volume_size) is required with I(image) to create a
+ new volume at boot time.
+ type: bool
+ default: 'no'
+ boot_volume:
+ type: str
+ description:
+ - Cloud Block Storage ID or Name to use as the boot volume of the
+ instance
+ boot_volume_size:
+ type: int
+ description:
+ - Size of the volume to create in Gigabytes. This is only required with
+ I(image) and I(boot_from_volume).
+ default: 100
+ boot_volume_terminate:
+ description:
+ - Whether the I(boot_volume) or newly created volume from I(image) will
+ be terminated when the server is terminated
+ type: bool
+ default: 'no'
+ config_drive:
+ description:
+ - Attach read-only configuration drive to server as label config-2
+ type: bool
+ default: 'no'
+ count:
+ type: int
+ description:
+ - number of instances to launch
+ default: 1
+ count_offset:
+ type: int
+ description:
+ - number count to start at
+ default: 1
+ disk_config:
+ type: str
+ description:
+ - Disk partitioning strategy
+ - If not specified it will assume the value C(auto).
+ choices:
+ - auto
+ - manual
+ exact_count:
+ description:
+ - Explicitly ensure an exact count of instances, used with
+ state=active/present. If specified as C(yes) and I(count) is less than
+ the servers matched, servers will be deleted to match the count. If
+ the number of matched servers is fewer than specified in I(count)
+ additional servers will be added.
+ type: bool
+ default: 'no'
+ extra_client_args:
+ type: dict
+ description:
+ - A hash of key/value pairs to be used when creating the cloudservers
+ client. This is considered an advanced option, use it wisely and
+ with caution.
+ extra_create_args:
+ type: dict
+ description:
+ - A hash of key/value pairs to be used when creating a new server.
+ This is considered an advanced option, use it wisely and with caution.
+ files:
+ type: dict
+ description:
+ - Files to insert into the instance. remotefilename:localcontent
+ flavor:
+ type: str
+ description:
+ - flavor to use for the instance
+ group:
+ type: str
+ description:
+ - host group to assign to server, is also used for idempotent operations
+ to ensure a specific number of instances
+ image:
+ type: str
+ description:
+ - image to use for the instance. Can be an C(id), C(human_id) or C(name).
+ With I(boot_from_volume), a Cloud Block Storage volume will be created
+ with this image
+ instance_ids:
+ type: list
+ description:
+ - list of instance ids, currently only used when state='absent' to
+ remove instances
+ key_name:
+ type: str
+ description:
+ - key pair to use on the instance
+ aliases:
+ - keypair
+ meta:
+ type: dict
+ description:
+ - A hash of metadata to associate with the instance
+ name:
+ type: str
+ description:
+ - Name to give the instance
+ networks:
+ type: list
+ description:
+ - The network to attach to the instances. If specified, you must include
+ ALL networks including the public and private interfaces. Can be C(id)
+ or C(label).
+ default:
+ - public
+ - private
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+ user_data:
+ type: str
+ description:
+ - Data to be uploaded to the servers config drive. This option implies
+ I(config_drive). Can be a file path or a string
+ wait:
+ description:
+ - wait for the instance to be in state 'running' before returning
+ type: bool
+ default: 'no'
+ wait_timeout:
+ type: int
+ description:
+ - how long before wait gives up, in seconds
+ default: 300
+author:
+ - "Jesse Keating (@omgjlk)"
+ - "Matt Martz (@sivel)"
+notes:
+ - I(exact_count) can be "destructive" if the number of running servers in
+ the I(group) is larger than that specified in I(count). In such a case, the
+ I(state) is effectively set to C(absent) and the extra servers are deleted.
+ In the case of deletion, the returned data structure will have C(action)
+ set to C(delete), and the oldest servers in the group will be deleted.
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Build a Cloud Server
+ gather_facts: False
+ tasks:
+ - name: Server build request
+ local_action:
+ module: rax
+ credentials: ~/.raxpub
+ name: rax-test1
+ flavor: 5
+ image: b11d9567-e412-4255-96b9-bd63ab23bcfe
+ key_name: my_rackspace_key
+ files:
+ /root/test.txt: /home/localuser/test.txt
+ wait: yes
+ state: present
+ networks:
+ - private
+ - public
+ register: rax
+
+- name: Build an exact count of cloud servers with incremented names
+ hosts: local
+ gather_facts: False
+ tasks:
+ - name: Server build requests
+ local_action:
+ module: rax
+ credentials: ~/.raxpub
+ name: test%03d.example.org
+ flavor: performance1-1
+ image: ubuntu-1204-lts-precise-pangolin
+ state: present
+ count: 10
+ count_offset: 10
+ exact_count: yes
+ group: test
+ wait: yes
+ register: rax
+'''
+
+import json
+import os
+import re
+import time
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (FINAL_STATUSES, rax_argument_spec, rax_find_bootable_volume,
+ rax_find_image, rax_find_network, rax_find_volume,
+ rax_required_together, rax_to_dict, setup_rax_module)
+from ansible.module_utils.six.moves import xrange
+from ansible.module_utils.six import string_types
+
+
+def rax_find_server_image(module, server, image, boot_volume):
+ if not image and boot_volume:
+ vol = rax_find_bootable_volume(module, pyrax, server,
+ exit=False)
+ if not vol:
+ return None
+ volume_image_metadata = vol.volume_image_metadata
+ vol_image_id = volume_image_metadata.get('image_id')
+ if vol_image_id:
+ server_image = rax_find_image(module, pyrax,
+ vol_image_id, exit=False)
+ if server_image:
+ server.image = dict(id=server_image)
+
+ # Match image IDs taking care of boot from volume
+ if image and not server.image:
+ vol = rax_find_bootable_volume(module, pyrax, server)
+ volume_image_metadata = vol.volume_image_metadata
+ vol_image_id = volume_image_metadata.get('image_id')
+ if not vol_image_id:
+ return None
+ server_image = rax_find_image(module, pyrax,
+ vol_image_id, exit=False)
+ if image != server_image:
+ return None
+
+ server.image = dict(id=server_image)
+ elif image and server.image['id'] != image:
+ return None
+
+ return server.image
+
+
+def create(module, names=None, flavor=None, image=None, meta=None, key_name=None,
+ files=None, wait=True, wait_timeout=300, disk_config=None,
+ group=None, nics=None, extra_create_args=None, user_data=None,
+ config_drive=False, existing=None, block_device_mapping_v2=None):
+ names = [] if names is None else names
+ meta = {} if meta is None else meta
+ files = {} if files is None else files
+ nics = [] if nics is None else nics
+ extra_create_args = {} if extra_create_args is None else extra_create_args
+ existing = [] if existing is None else existing
+ block_device_mapping_v2 = [] if block_device_mapping_v2 is None else block_device_mapping_v2
+
+ cs = pyrax.cloudservers
+ changed = False
+
+ if user_data:
+ config_drive = True
+
+ if user_data and os.path.isfile(os.path.expanduser(user_data)):
+ try:
+ user_data = os.path.expanduser(user_data)
+ f = open(user_data)
+ user_data = f.read()
+ f.close()
+ except Exception as e:
+ module.fail_json(msg='Failed to load %s' % user_data)
+
+ # Handle the file contents
+ for rpath in files.keys():
+ lpath = os.path.expanduser(files[rpath])
+ try:
+ fileobj = open(lpath, 'r')
+ files[rpath] = fileobj.read()
+ fileobj.close()
+ except Exception as e:
+ module.fail_json(msg='Failed to load %s' % lpath)
+ try:
+ servers = []
+ bdmv2 = block_device_mapping_v2
+ for name in names:
+ servers.append(cs.servers.create(name=name, image=image,
+ flavor=flavor, meta=meta,
+ key_name=key_name,
+ files=files, nics=nics,
+ disk_config=disk_config,
+ config_drive=config_drive,
+ userdata=user_data,
+ block_device_mapping_v2=bdmv2,
+ **extra_create_args))
+ except Exception as e:
+ if e.message:
+ msg = str(e.message)
+ else:
+ msg = repr(e)
+ module.fail_json(msg=msg)
+ else:
+ changed = True
+
+ if wait:
+ end_time = time.time() + wait_timeout
+ infinite = wait_timeout == 0
+ while infinite or time.time() < end_time:
+ for server in servers:
+ try:
+ server.get()
+ except Exception:
+ server.status = 'ERROR'
+
+ if not filter(lambda s: s.status not in FINAL_STATUSES,
+ servers):
+ break
+ time.sleep(5)
+
+ success = []
+ error = []
+ timeout = []
+ for server in servers:
+ try:
+ server.get()
+ except Exception:
+ server.status = 'ERROR'
+ instance = rax_to_dict(server, 'server')
+ if server.status == 'ACTIVE' or not wait:
+ success.append(instance)
+ elif server.status == 'ERROR':
+ error.append(instance)
+ elif wait:
+ timeout.append(instance)
+
+ untouched = [rax_to_dict(s, 'server') for s in existing]
+ instances = success + untouched
+
+ results = {
+ 'changed': changed,
+ 'action': 'create',
+ 'instances': instances,
+ 'success': success,
+ 'error': error,
+ 'timeout': timeout,
+ 'instance_ids': {
+ 'instances': [i['id'] for i in instances],
+ 'success': [i['id'] for i in success],
+ 'error': [i['id'] for i in error],
+ 'timeout': [i['id'] for i in timeout]
+ }
+ }
+
+ if timeout:
+ results['msg'] = 'Timeout waiting for all servers to build'
+ elif error:
+ results['msg'] = 'Failed to build all servers'
+
+ if 'msg' in results:
+ module.fail_json(**results)
+ else:
+ module.exit_json(**results)
+
+
+def delete(module, instance_ids=None, wait=True, wait_timeout=300, kept=None):
+ instance_ids = [] if instance_ids is None else instance_ids
+ kept = [] if kept is None else kept
+
+ cs = pyrax.cloudservers
+
+ changed = False
+ instances = {}
+ servers = []
+
+ for instance_id in instance_ids:
+ servers.append(cs.servers.get(instance_id))
+
+ for server in servers:
+ try:
+ server.delete()
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ changed = True
+
+ instance = rax_to_dict(server, 'server')
+ instances[instance['id']] = instance
+
+ # If requested, wait for server deletion
+ if wait:
+ end_time = time.time() + wait_timeout
+ infinite = wait_timeout == 0
+ while infinite or time.time() < end_time:
+ for server in servers:
+ instance_id = server.id
+ try:
+ server.get()
+ except Exception:
+ instances[instance_id]['status'] = 'DELETED'
+ instances[instance_id]['rax_status'] = 'DELETED'
+
+ if not filter(lambda s: s['status'] not in ('', 'DELETED',
+ 'ERROR'),
+ instances.values()):
+ break
+
+ time.sleep(5)
+
+ timeout = filter(lambda s: s['status'] not in ('', 'DELETED', 'ERROR'),
+ instances.values())
+ error = filter(lambda s: s['status'] in ('ERROR'),
+ instances.values())
+ success = filter(lambda s: s['status'] in ('', 'DELETED'),
+ instances.values())
+
+ instances = [rax_to_dict(s, 'server') for s in kept]
+
+ results = {
+ 'changed': changed,
+ 'action': 'delete',
+ 'instances': instances,
+ 'success': success,
+ 'error': error,
+ 'timeout': timeout,
+ 'instance_ids': {
+ 'instances': [i['id'] for i in instances],
+ 'success': [i['id'] for i in success],
+ 'error': [i['id'] for i in error],
+ 'timeout': [i['id'] for i in timeout]
+ }
+ }
+
+ if timeout:
+ results['msg'] = 'Timeout waiting for all servers to delete'
+ elif error:
+ results['msg'] = 'Failed to delete all servers'
+
+ if 'msg' in results:
+ module.fail_json(**results)
+ else:
+ module.exit_json(**results)
+
+
+def cloudservers(module, state=None, name=None, flavor=None, image=None,
+ meta=None, key_name=None, files=None, wait=True, wait_timeout=300,
+ disk_config=None, count=1, group=None, instance_ids=None,
+ exact_count=False, networks=None, count_offset=0,
+ auto_increment=False, extra_create_args=None, user_data=None,
+ config_drive=False, boot_from_volume=False,
+ boot_volume=None, boot_volume_size=None,
+ boot_volume_terminate=False):
+ meta = {} if meta is None else meta
+ files = {} if files is None else files
+ instance_ids = [] if instance_ids is None else instance_ids
+ networks = [] if networks is None else networks
+ extra_create_args = {} if extra_create_args is None else extra_create_args
+
+ cs = pyrax.cloudservers
+ cnw = pyrax.cloud_networks
+ if not cnw:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if state == 'present' or (state == 'absent' and instance_ids is None):
+ if not boot_from_volume and not boot_volume and not image:
+ module.fail_json(msg='image is required for the "rax" module')
+
+ for arg, value in dict(name=name, flavor=flavor).items():
+ if not value:
+ module.fail_json(msg='%s is required for the "rax" module' %
+ arg)
+
+ if boot_from_volume and not image and not boot_volume:
+ module.fail_json(msg='image or boot_volume are required for the '
+ '"rax" with boot_from_volume')
+
+ if boot_from_volume and image and not boot_volume_size:
+ module.fail_json(msg='boot_volume_size is required for the "rax" '
+ 'module with boot_from_volume and image')
+
+ if boot_from_volume and image and boot_volume:
+ image = None
+
+ servers = []
+
+ # Add the group meta key
+ if group and 'group' not in meta:
+ meta['group'] = group
+ elif 'group' in meta and group is None:
+ group = meta['group']
+
+ # Normalize and ensure all metadata values are strings
+ for k, v in meta.items():
+ if isinstance(v, list):
+ meta[k] = ','.join(['%s' % i for i in v])
+ elif isinstance(v, dict):
+ meta[k] = json.dumps(v)
+ elif not isinstance(v, string_types):
+ meta[k] = '%s' % v
+
+ # When using state=absent with group, the absent block won't match the
+ # names properly. Use the exact_count functionality to decrease the count
+ # to the desired level
+ was_absent = False
+ if group is not None and state == 'absent':
+ exact_count = True
+ state = 'present'
+ was_absent = True
+
+ if image:
+ image = rax_find_image(module, pyrax, image)
+
+ nics = []
+ if networks:
+ for network in networks:
+ nics.extend(rax_find_network(module, pyrax, network))
+
+ # act on the state
+ if state == 'present':
+ # Idempotent ensurance of a specific count of servers
+ if exact_count is not False:
+ # See if we can find servers that match our options
+ if group is None:
+ module.fail_json(msg='"group" must be provided when using '
+ '"exact_count"')
+
+ if auto_increment:
+ numbers = set()
+
+ # See if the name is a printf like string, if not append
+ # %d to the end
+ try:
+ name % 0
+ except TypeError as e:
+ if e.message.startswith('not all'):
+ name = '%s%%d' % name
+ else:
+ module.fail_json(msg=e.message)
+
+ # regex pattern to match printf formatting
+ pattern = re.sub(r'%\d*[sd]', r'(\d+)', name)
+ for server in cs.servers.list():
+ # Ignore DELETED servers
+ if server.status == 'DELETED':
+ continue
+ if server.metadata.get('group') == group:
+ servers.append(server)
+ match = re.search(pattern, server.name)
+ if match:
+ number = int(match.group(1))
+ numbers.add(number)
+
+ number_range = xrange(count_offset, count_offset + count)
+ available_numbers = list(set(number_range)
+ .difference(numbers))
+ else: # Not auto incrementing
+ for server in cs.servers.list():
+ # Ignore DELETED servers
+ if server.status == 'DELETED':
+ continue
+ if server.metadata.get('group') == group:
+ servers.append(server)
+ # available_numbers not needed here, we inspect auto_increment
+ # again later
+
+ # If state was absent but the count was changed,
+ # assume we only wanted to remove that number of instances
+ if was_absent:
+ diff = len(servers) - count
+ if diff < 0:
+ count = 0
+ else:
+ count = diff
+
+ if len(servers) > count:
+ # We have more servers than we need, set state='absent'
+ # and delete the extras, this should delete the oldest
+ state = 'absent'
+ kept = servers[:count]
+ del servers[:count]
+ instance_ids = []
+ for server in servers:
+ instance_ids.append(server.id)
+ delete(module, instance_ids=instance_ids, wait=wait,
+ wait_timeout=wait_timeout, kept=kept)
+ elif len(servers) < count:
+ # we have fewer servers than we need
+ if auto_increment:
+ # auto incrementing server numbers
+ names = []
+ name_slice = count - len(servers)
+ numbers_to_use = available_numbers[:name_slice]
+ for number in numbers_to_use:
+ names.append(name % number)
+ else:
+ # We are not auto incrementing server numbers,
+ # create a list of 'name' that matches how many we need
+ names = [name] * (count - len(servers))
+ else:
+ # we have the right number of servers, just return info
+ # about all of the matched servers
+ instances = []
+ instance_ids = []
+ for server in servers:
+ instances.append(rax_to_dict(server, 'server'))
+ instance_ids.append(server.id)
+ module.exit_json(changed=False, action=None,
+ instances=instances,
+ success=[], error=[], timeout=[],
+ instance_ids={'instances': instance_ids,
+ 'success': [], 'error': [],
+ 'timeout': []})
+ else: # not called with exact_count=True
+ if group is not None:
+ if auto_increment:
+ # we are auto incrementing server numbers, but not with
+ # exact_count
+ numbers = set()
+
+ # See if the name is a printf like string, if not append
+ # %d to the end
+ try:
+ name % 0
+ except TypeError as e:
+ if e.message.startswith('not all'):
+ name = '%s%%d' % name
+ else:
+ module.fail_json(msg=e.message)
+
+ # regex pattern to match printf formatting
+ pattern = re.sub(r'%\d*[sd]', r'(\d+)', name)
+ for server in cs.servers.list():
+ # Ignore DELETED servers
+ if server.status == 'DELETED':
+ continue
+ if server.metadata.get('group') == group:
+ servers.append(server)
+ match = re.search(pattern, server.name)
+ if match:
+ number = int(match.group(1))
+ numbers.add(number)
+
+ number_range = xrange(count_offset,
+ count_offset + count + len(numbers))
+ available_numbers = list(set(number_range)
+ .difference(numbers))
+ names = []
+ numbers_to_use = available_numbers[:count]
+ for number in numbers_to_use:
+ names.append(name % number)
+ else:
+ # Not auto incrementing
+ names = [name] * count
+ else:
+ # No group was specified, and not using exact_count
+ # Perform more simplistic matching
+ search_opts = {
+ 'name': '^%s$' % name,
+ 'flavor': flavor
+ }
+ servers = []
+ for server in cs.servers.list(search_opts=search_opts):
+ # Ignore DELETED servers
+ if server.status == 'DELETED':
+ continue
+
+ if not rax_find_server_image(module, server, image,
+ boot_volume):
+ continue
+
+ # Ignore servers with non matching metadata
+ if server.metadata != meta:
+ continue
+ servers.append(server)
+
+ if len(servers) >= count:
+ # We have more servers than were requested, don't do
+ # anything. Not running with exact_count=True, so we assume
+ # more is OK
+ instances = []
+ for server in servers:
+ instances.append(rax_to_dict(server, 'server'))
+
+ instance_ids = [i['id'] for i in instances]
+ module.exit_json(changed=False, action=None,
+ instances=instances, success=[], error=[],
+ timeout=[],
+ instance_ids={'instances': instance_ids,
+ 'success': [], 'error': [],
+ 'timeout': []})
+
+ # We need more servers to reach out target, create names for
+ # them, we aren't performing auto_increment here
+ names = [name] * (count - len(servers))
+
+ block_device_mapping_v2 = []
+ if boot_from_volume:
+ mapping = {
+ 'boot_index': '0',
+ 'delete_on_termination': boot_volume_terminate,
+ 'destination_type': 'volume',
+ }
+ if image:
+ mapping.update({
+ 'uuid': image,
+ 'source_type': 'image',
+ 'volume_size': boot_volume_size,
+ })
+ image = None
+ elif boot_volume:
+ volume = rax_find_volume(module, pyrax, boot_volume)
+ mapping.update({
+ 'uuid': pyrax.utils.get_id(volume),
+ 'source_type': 'volume',
+ })
+ block_device_mapping_v2.append(mapping)
+
+ create(module, names=names, flavor=flavor, image=image,
+ meta=meta, key_name=key_name, files=files, wait=wait,
+ wait_timeout=wait_timeout, disk_config=disk_config, group=group,
+ nics=nics, extra_create_args=extra_create_args,
+ user_data=user_data, config_drive=config_drive,
+ existing=servers,
+ block_device_mapping_v2=block_device_mapping_v2)
+
+ elif state == 'absent':
+ if instance_ids is None:
+ # We weren't given an explicit list of server IDs to delete
+ # Let's match instead
+ search_opts = {
+ 'name': '^%s$' % name,
+ 'flavor': flavor
+ }
+ for server in cs.servers.list(search_opts=search_opts):
+ # Ignore DELETED servers
+ if server.status == 'DELETED':
+ continue
+
+ if not rax_find_server_image(module, server, image,
+ boot_volume):
+ continue
+
+ # Ignore servers with non matching metadata
+ if meta != server.metadata:
+ continue
+
+ servers.append(server)
+
+ # Build a list of server IDs to delete
+ instance_ids = []
+ for server in servers:
+ if len(instance_ids) < count:
+ instance_ids.append(server.id)
+ else:
+ break
+
+ if not instance_ids:
+ # No server IDs were matched for deletion, or no IDs were
+ # explicitly provided, just exit and don't do anything
+ module.exit_json(changed=False, action=None, instances=[],
+ success=[], error=[], timeout=[],
+ instance_ids={'instances': [],
+ 'success': [], 'error': [],
+ 'timeout': []})
+
+ delete(module, instance_ids=instance_ids, wait=wait,
+ wait_timeout=wait_timeout)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ auto_increment=dict(default=True, type='bool'),
+ boot_from_volume=dict(default=False, type='bool'),
+ boot_volume=dict(type='str'),
+ boot_volume_size=dict(type='int', default=100),
+ boot_volume_terminate=dict(type='bool', default=False),
+ config_drive=dict(default=False, type='bool'),
+ count=dict(default=1, type='int'),
+ count_offset=dict(default=1, type='int'),
+ disk_config=dict(choices=['auto', 'manual']),
+ exact_count=dict(default=False, type='bool'),
+ extra_client_args=dict(type='dict', default={}),
+ extra_create_args=dict(type='dict', default={}),
+ files=dict(type='dict', default={}),
+ flavor=dict(),
+ group=dict(),
+ image=dict(),
+ instance_ids=dict(type='list'),
+ key_name=dict(aliases=['keypair']),
+ meta=dict(type='dict', default={}),
+ name=dict(),
+ networks=dict(type='list', default=['public', 'private']),
+ service=dict(),
+ state=dict(default='present', choices=['present', 'absent']),
+ user_data=dict(no_log=True),
+ wait=dict(default=False, type='bool'),
+ wait_timeout=dict(default=300, type='int'),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ service = module.params.get('service')
+
+ if service is not None:
+ module.fail_json(msg='The "service" attribute has been deprecated, '
+ 'please remove "service: cloudservers" from your '
+ 'playbook pertaining to the "rax" module')
+
+ auto_increment = module.params.get('auto_increment')
+ boot_from_volume = module.params.get('boot_from_volume')
+ boot_volume = module.params.get('boot_volume')
+ boot_volume_size = module.params.get('boot_volume_size')
+ boot_volume_terminate = module.params.get('boot_volume_terminate')
+ config_drive = module.params.get('config_drive')
+ count = module.params.get('count')
+ count_offset = module.params.get('count_offset')
+ disk_config = module.params.get('disk_config')
+ if disk_config:
+ disk_config = disk_config.upper()
+ exact_count = module.params.get('exact_count', False)
+ extra_client_args = module.params.get('extra_client_args')
+ extra_create_args = module.params.get('extra_create_args')
+ files = module.params.get('files')
+ flavor = module.params.get('flavor')
+ group = module.params.get('group')
+ image = module.params.get('image')
+ instance_ids = module.params.get('instance_ids')
+ key_name = module.params.get('key_name')
+ meta = module.params.get('meta')
+ name = module.params.get('name')
+ networks = module.params.get('networks')
+ state = module.params.get('state')
+ user_data = module.params.get('user_data')
+ wait = module.params.get('wait')
+ wait_timeout = int(module.params.get('wait_timeout'))
+
+ setup_rax_module(module, pyrax)
+
+ if extra_client_args:
+ pyrax.cloudservers = pyrax.connect_to_cloudservers(
+ region=pyrax.cloudservers.client.region_name,
+ **extra_client_args)
+ client = pyrax.cloudservers.client
+ if 'bypass_url' in extra_client_args:
+ client.management_url = extra_client_args['bypass_url']
+
+ if pyrax.cloudservers is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ cloudservers(module, state=state, name=name, flavor=flavor,
+ image=image, meta=meta, key_name=key_name, files=files,
+ wait=wait, wait_timeout=wait_timeout, disk_config=disk_config,
+ count=count, group=group, instance_ids=instance_ids,
+ exact_count=exact_count, networks=networks,
+ count_offset=count_offset, auto_increment=auto_increment,
+ extra_create_args=extra_create_args, user_data=user_data,
+ config_drive=config_drive, boot_from_volume=boot_from_volume,
+ boot_volume=boot_volume, boot_volume_size=boot_volume_size,
+ boot_volume_terminate=boot_volume_terminate)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_cbs.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_cbs.py
new file mode 100644
index 00000000..a681feff
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_cbs.py
@@ -0,0 +1,226 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_cbs
+short_description: Manipulate Rackspace Cloud Block Storage Volumes
+description:
+ - Manipulate Rackspace Cloud Block Storage Volumes
+options:
+ description:
+ type: str
+ description:
+ - Description to give the volume being created
+ image:
+ type: str
+ description:
+ - image to use for bootable volumes. Can be an C(id), C(human_id) or
+ C(name). This option requires C(pyrax>=1.9.3)
+ meta:
+ type: dict
+ description:
+ - A hash of metadata to associate with the volume
+ name:
+ type: str
+ description:
+ - Name to give the volume being created
+ required: true
+ size:
+ type: int
+ description:
+ - Size of the volume to create in Gigabytes
+ default: 100
+ snapshot_id:
+ type: str
+ description:
+ - The id of the snapshot to create the volume from
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+ volume_type:
+ type: str
+ description:
+ - Type of the volume being created
+ choices:
+ - SATA
+ - SSD
+ default: SATA
+ wait:
+ description:
+ - wait for the volume to be in state 'available' before returning
+ type: bool
+ default: 'no'
+ wait_timeout:
+ type: int
+ description:
+ - how long before wait gives up, in seconds
+ default: 300
+author:
+ - "Christopher H. Laco (@claco)"
+ - "Matt Martz (@sivel)"
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Build a Block Storage Volume
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Storage volume create request
+ local_action:
+ module: rax_cbs
+ credentials: ~/.raxpub
+ name: my-volume
+ description: My Volume
+ volume_type: SSD
+ size: 150
+ region: DFW
+ wait: yes
+ state: present
+ meta:
+ app: my-cool-app
+ register: my_volume
+'''
+
+from distutils.version import LooseVersion
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (VOLUME_STATUS, rax_argument_spec, rax_find_image, rax_find_volume,
+ rax_required_together, rax_to_dict, setup_rax_module)
+
+
+def cloud_block_storage(module, state, name, description, meta, size,
+ snapshot_id, volume_type, wait, wait_timeout,
+ image):
+ changed = False
+ volume = None
+ instance = {}
+
+ cbs = pyrax.cloud_blockstorage
+
+ if cbs is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if image:
+ # pyrax<1.9.3 did not have support for specifying an image when
+ # creating a volume which is required for bootable volumes
+ if LooseVersion(pyrax.version.version) < LooseVersion('1.9.3'):
+ module.fail_json(msg='Creating a bootable volume requires '
+ 'pyrax>=1.9.3')
+ image = rax_find_image(module, pyrax, image)
+
+ volume = rax_find_volume(module, pyrax, name)
+
+ if state == 'present':
+ if not volume:
+ kwargs = dict()
+ if image:
+ kwargs['image'] = image
+ try:
+ volume = cbs.create(name, size=size, volume_type=volume_type,
+ description=description,
+ metadata=meta,
+ snapshot_id=snapshot_id, **kwargs)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ if wait:
+ attempts = wait_timeout // 5
+ pyrax.utils.wait_for_build(volume, interval=5,
+ attempts=attempts)
+
+ volume.get()
+ instance = rax_to_dict(volume)
+
+ result = dict(changed=changed, volume=instance)
+
+ if volume.status == 'error':
+ result['msg'] = '%s failed to build' % volume.id
+ elif wait and volume.status not in VOLUME_STATUS:
+ result['msg'] = 'Timeout waiting on %s' % volume.id
+
+ if 'msg' in result:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+ elif state == 'absent':
+ if volume:
+ instance = rax_to_dict(volume)
+ try:
+ volume.delete()
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, volume=instance)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ description=dict(type='str'),
+ image=dict(type='str'),
+ meta=dict(type='dict', default={}),
+ name=dict(required=True),
+ size=dict(type='int', default=100),
+ snapshot_id=dict(),
+ state=dict(default='present', choices=['present', 'absent']),
+ volume_type=dict(choices=['SSD', 'SATA'], default='SATA'),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300)
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ description = module.params.get('description')
+ image = module.params.get('image')
+ meta = module.params.get('meta')
+ name = module.params.get('name')
+ size = module.params.get('size')
+ snapshot_id = module.params.get('snapshot_id')
+ state = module.params.get('state')
+ volume_type = module.params.get('volume_type')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_block_storage(module, state, name, description, meta, size,
+ snapshot_id, volume_type, wait, wait_timeout,
+ image)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_cbs_attachments.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_cbs_attachments.py
new file mode 100644
index 00000000..71d01620
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_cbs_attachments.py
@@ -0,0 +1,218 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_cbs_attachments
+short_description: Manipulate Rackspace Cloud Block Storage Volume Attachments
+description:
+ - Manipulate Rackspace Cloud Block Storage Volume Attachments
+options:
+ device:
+ type: str
+ description:
+ - The device path to attach the volume to, e.g. /dev/xvde.
+ - Before 2.4 this was a required field. Now it can be left to null to auto assign the device name.
+ volume:
+ type: str
+ description:
+ - Name or id of the volume to attach/detach
+ required: true
+ server:
+ type: str
+ description:
+ - Name or id of the server to attach/detach
+ required: true
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+ wait:
+ description:
+ - wait for the volume to be in 'in-use'/'available' state before returning
+ type: bool
+ default: 'no'
+ wait_timeout:
+ type: int
+ description:
+ - how long before wait gives up, in seconds
+ default: 300
+author:
+ - "Christopher H. Laco (@claco)"
+ - "Matt Martz (@sivel)"
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Attach a Block Storage Volume
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Storage volume attach request
+ local_action:
+ module: rax_cbs_attachments
+ credentials: ~/.raxpub
+ volume: my-volume
+ server: my-server
+ device: /dev/xvdd
+ region: DFW
+ wait: yes
+ state: present
+ register: my_volume
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (NON_CALLABLES,
+ rax_argument_spec,
+ rax_find_server,
+ rax_find_volume,
+ rax_required_together,
+ rax_to_dict,
+ setup_rax_module,
+ )
+
+
+def cloud_block_storage_attachments(module, state, volume, server, device,
+ wait, wait_timeout):
+ cbs = pyrax.cloud_blockstorage
+ cs = pyrax.cloudservers
+
+ if cbs is None or cs is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ changed = False
+ instance = {}
+
+ volume = rax_find_volume(module, pyrax, volume)
+
+ if not volume:
+ module.fail_json(msg='No matching storage volumes were found')
+
+ if state == 'present':
+ server = rax_find_server(module, pyrax, server)
+
+ if (volume.attachments and
+ volume.attachments[0]['server_id'] == server.id):
+ changed = False
+ elif volume.attachments:
+ module.fail_json(msg='Volume is attached to another server')
+ else:
+ try:
+ volume.attach_to_instance(server, mountpoint=device)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ volume.get()
+
+ for key, value in vars(volume).items():
+ if (isinstance(value, NON_CALLABLES) and
+ not key.startswith('_')):
+ instance[key] = value
+
+ result = dict(changed=changed)
+
+ if volume.status == 'error':
+ result['msg'] = '%s failed to build' % volume.id
+ elif wait:
+ attempts = wait_timeout // 5
+ pyrax.utils.wait_until(volume, 'status', 'in-use',
+ interval=5, attempts=attempts)
+
+ volume.get()
+ result['volume'] = rax_to_dict(volume)
+
+ if 'msg' in result:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+ elif state == 'absent':
+ server = rax_find_server(module, pyrax, server)
+
+ if (volume.attachments and
+ volume.attachments[0]['server_id'] == server.id):
+ try:
+ volume.detach()
+ if wait:
+ pyrax.utils.wait_until(volume, 'status', 'available',
+ interval=3, attempts=0,
+ verbose=False)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ volume.get()
+ changed = True
+ elif volume.attachments:
+ module.fail_json(msg='Volume is attached to another server')
+
+ result = dict(changed=changed, volume=rax_to_dict(volume))
+
+ if volume.status == 'error':
+ result['msg'] = '%s failed to build' % volume.id
+
+ if 'msg' in result:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+ module.exit_json(changed=changed, volume=instance)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ device=dict(required=False),
+ volume=dict(required=True),
+ server=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300)
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ device = module.params.get('device')
+ volume = module.params.get('volume')
+ server = module.params.get('server')
+ state = module.params.get('state')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_block_storage_attachments(module, state, volume, server, device,
+ wait, wait_timeout)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_cdb.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_cdb.py
new file mode 100644
index 00000000..5b9996cd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_cdb.py
@@ -0,0 +1,258 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_cdb
+short_description: create/delete or resize a Rackspace Cloud Databases instance
+description:
+ - creates / deletes or resize a Rackspace Cloud Databases instance
+ and optionally waits for it to be 'running'. The name option needs to be
+ unique since it's used to identify the instance.
+options:
+ name:
+ type: str
+ description:
+ - Name of the databases server instance
+ required: yes
+ flavor:
+ type: int
+ description:
+ - flavor to use for the instance 1 to 6 (i.e. 512MB to 16GB)
+ default: 1
+ volume:
+ type: int
+ description:
+ - Volume size of the database 1-150GB
+ default: 2
+ cdb_type:
+ type: str
+ description:
+ - type of instance (i.e. MySQL, MariaDB, Percona)
+ default: MySQL
+ aliases: ['type']
+ cdb_version:
+ type: str
+ description:
+ - version of database (MySQL supports 5.1 and 5.6, MariaDB supports 10, Percona supports 5.6)
+ - "The available choices are: C(5.1), C(5.6) and C(10)."
+ default: 5.6
+ aliases: ['version']
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices: ['present', 'absent']
+ default: present
+ wait:
+ description:
+ - wait for the instance to be in state 'running' before returning
+ type: bool
+ default: 'no'
+ wait_timeout:
+ type: int
+ description:
+ - how long before wait gives up, in seconds
+ default: 300
+author: "Simon JAILLET (@jails)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Build a Cloud Databases
+ gather_facts: False
+ tasks:
+ - name: Server build request
+ local_action:
+ module: rax_cdb
+ credentials: ~/.raxpub
+ region: IAD
+ name: db-server1
+ flavor: 1
+ volume: 2
+ cdb_type: MySQL
+ cdb_version: 5.6
+ wait: yes
+ state: present
+ register: rax_db_server
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module
+
+
+def find_instance(name):
+
+ cdb = pyrax.cloud_databases
+ instances = cdb.list()
+ if instances:
+ for instance in instances:
+ if instance.name == name:
+ return instance
+ return False
+
+
+def save_instance(module, name, flavor, volume, cdb_type, cdb_version, wait,
+ wait_timeout):
+
+ for arg, value in dict(name=name, flavor=flavor,
+ volume=volume, type=cdb_type, version=cdb_version
+ ).items():
+ if not value:
+ module.fail_json(msg='%s is required for the "rax_cdb"'
+ ' module' % arg)
+
+ if not (volume >= 1 and volume <= 150):
+ module.fail_json(msg='volume is required to be between 1 and 150')
+
+ cdb = pyrax.cloud_databases
+
+ flavors = []
+ for item in cdb.list_flavors():
+ flavors.append(item.id)
+
+ if not (flavor in flavors):
+ module.fail_json(msg='unexisting flavor reference "%s"' % str(flavor))
+
+ changed = False
+
+ instance = find_instance(name)
+
+ if not instance:
+ action = 'create'
+ try:
+ instance = cdb.create(name=name, flavor=flavor, volume=volume,
+ type=cdb_type, version=cdb_version)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ changed = True
+
+ else:
+ action = None
+
+ if instance.volume.size != volume:
+ action = 'resize'
+ if instance.volume.size > volume:
+ module.fail_json(changed=False, action=action,
+ msg='The new volume size must be larger than '
+ 'the current volume size',
+ cdb=rax_to_dict(instance))
+ instance.resize_volume(volume)
+ changed = True
+
+ if int(instance.flavor.id) != flavor:
+ action = 'resize'
+ pyrax.utils.wait_until(instance, 'status', 'ACTIVE',
+ attempts=wait_timeout)
+ instance.resize(flavor)
+ changed = True
+
+ if wait:
+ pyrax.utils.wait_until(instance, 'status', 'ACTIVE',
+ attempts=wait_timeout)
+
+ if wait and instance.status != 'ACTIVE':
+ module.fail_json(changed=changed, action=action,
+ cdb=rax_to_dict(instance),
+ msg='Timeout waiting for "%s" databases instance to '
+ 'be created' % name)
+
+ module.exit_json(changed=changed, action=action, cdb=rax_to_dict(instance))
+
+
+def delete_instance(module, name, wait, wait_timeout):
+
+ if not name:
+ module.fail_json(msg='name is required for the "rax_cdb" module')
+
+ changed = False
+
+ instance = find_instance(name)
+ if not instance:
+ module.exit_json(changed=False, action='delete')
+
+ try:
+ instance.delete()
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ changed = True
+
+ if wait:
+ pyrax.utils.wait_until(instance, 'status', 'SHUTDOWN',
+ attempts=wait_timeout)
+
+ if wait and instance.status != 'SHUTDOWN':
+ module.fail_json(changed=changed, action='delete',
+ cdb=rax_to_dict(instance),
+ msg='Timeout waiting for "%s" databases instance to '
+ 'be deleted' % name)
+
+ module.exit_json(changed=changed, action='delete',
+ cdb=rax_to_dict(instance))
+
+
+def rax_cdb(module, state, name, flavor, volume, cdb_type, cdb_version, wait,
+ wait_timeout):
+
+ # act on the state
+ if state == 'present':
+ save_instance(module, name, flavor, volume, cdb_type, cdb_version, wait,
+ wait_timeout)
+ elif state == 'absent':
+ delete_instance(module, name, wait, wait_timeout)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type='str', required=True),
+ flavor=dict(type='int', default=1),
+ volume=dict(type='int', default=2),
+ cdb_type=dict(type='str', default='MySQL', aliases=['type']),
+ cdb_version=dict(type='str', default='5.6', aliases=['version']),
+ state=dict(default='present', choices=['present', 'absent']),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ name = module.params.get('name')
+ flavor = module.params.get('flavor')
+ volume = module.params.get('volume')
+ cdb_type = module.params.get('cdb_type')
+ cdb_version = module.params.get('cdb_version')
+ state = module.params.get('state')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ setup_rax_module(module, pyrax)
+ rax_cdb(module, state, name, flavor, volume, cdb_type, cdb_version, wait, wait_timeout)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_cdb_database.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_cdb_database.py
new file mode 100644
index 00000000..6d3435e8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_cdb_database.py
@@ -0,0 +1,171 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: rax_cdb_database
+short_description: 'create / delete a database in the Cloud Databases'
+description:
+ - create / delete a database in the Cloud Databases.
+options:
+ cdb_id:
+ type: str
+ description:
+ - The databases server UUID
+ required: yes
+ name:
+ type: str
+ description:
+ - Name to give to the database
+ required: yes
+ character_set:
+ type: str
+ description:
+ - Set of symbols and encodings
+ default: 'utf8'
+ collate:
+ type: str
+ description:
+ - Set of rules for comparing characters in a character set
+ default: 'utf8_general_ci'
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices: ['present', 'absent']
+ default: present
+author: "Simon JAILLET (@jails)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Build a database in Cloud Databases
+ tasks:
+ - name: Database build request
+ local_action:
+ module: rax_cdb_database
+ credentials: ~/.raxpub
+ region: IAD
+ cdb_id: 323e7ce0-9cb0-11e3-a5e2-0800200c9a66
+ name: db1
+ state: present
+ register: rax_db_database
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module
+
+
+def find_database(instance, name):
+ try:
+ database = instance.get_database(name)
+ except Exception:
+ return False
+
+ return database
+
+
+def save_database(module, cdb_id, name, character_set, collate):
+ cdb = pyrax.cloud_databases
+
+ try:
+ instance = cdb.get(cdb_id)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ changed = False
+
+ database = find_database(instance, name)
+
+ if not database:
+ try:
+ database = instance.create_database(name=name,
+ character_set=character_set,
+ collate=collate)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ changed = True
+
+ module.exit_json(changed=changed, action='create',
+ database=rax_to_dict(database))
+
+
+def delete_database(module, cdb_id, name):
+ cdb = pyrax.cloud_databases
+
+ try:
+ instance = cdb.get(cdb_id)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ changed = False
+
+ database = find_database(instance, name)
+
+ if database:
+ try:
+ database.delete()
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ changed = True
+
+ module.exit_json(changed=changed, action='delete',
+ database=rax_to_dict(database))
+
+
+def rax_cdb_database(module, state, cdb_id, name, character_set, collate):
+
+ # act on the state
+ if state == 'present':
+ save_database(module, cdb_id, name, character_set, collate)
+ elif state == 'absent':
+ delete_database(module, cdb_id, name)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ cdb_id=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ character_set=dict(type='str', default='utf8'),
+ collate=dict(type='str', default='utf8_general_ci'),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ cdb_id = module.params.get('cdb_id')
+ name = module.params.get('name')
+ character_set = module.params.get('character_set')
+ collate = module.params.get('collate')
+ state = module.params.get('state')
+
+ setup_rax_module(module, pyrax)
+ rax_cdb_database(module, state, cdb_id, name, character_set, collate)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_cdb_user.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_cdb_user.py
new file mode 100644
index 00000000..34be49d8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_cdb_user.py
@@ -0,0 +1,218 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_cdb_user
+short_description: create / delete a Rackspace Cloud Database
+description:
+ - create / delete a database in the Cloud Databases.
+options:
+ cdb_id:
+ type: str
+ description:
+ - The databases server UUID
+ required: yes
+ db_username:
+ type: str
+ description:
+ - Name of the database user
+ required: yes
+ db_password:
+ type: str
+ description:
+ - Database user password
+ required: yes
+ databases:
+ type: list
+ description:
+ - Name of the databases that the user can access
+ default: []
+ host:
+ type: str
+ description:
+ - Specifies the host from which a user is allowed to connect to
+ the database. Possible values are a string containing an IPv4 address
+ or "%" to allow connecting from any host
+ default: '%'
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices: ['present', 'absent']
+ default: present
+author: "Simon JAILLET (@jails)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Build a user in Cloud Databases
+ tasks:
+ - name: User build request
+ local_action:
+ module: rax_cdb_user
+ credentials: ~/.raxpub
+ region: IAD
+ cdb_id: 323e7ce0-9cb0-11e3-a5e2-0800200c9a66
+ db_username: user1
+ db_password: user1
+ databases: ['db1']
+ state: present
+ register: rax_db_user
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_text
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module
+
+
+def find_user(instance, name):
+ try:
+ user = instance.get_user(name)
+ except Exception:
+ return False
+
+ return user
+
+
+def save_user(module, cdb_id, name, password, databases, host):
+
+ for arg, value in dict(cdb_id=cdb_id, name=name).items():
+ if not value:
+ module.fail_json(msg='%s is required for the "rax_cdb_user" '
+ 'module' % arg)
+
+ cdb = pyrax.cloud_databases
+
+ try:
+ instance = cdb.get(cdb_id)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ changed = False
+
+ user = find_user(instance, name)
+
+ if not user:
+ action = 'create'
+ try:
+ user = instance.create_user(name=name,
+ password=password,
+ database_names=databases,
+ host=host)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ changed = True
+ else:
+ action = 'update'
+
+ if user.host != host:
+ changed = True
+
+ user.update(password=password, host=host)
+
+ former_dbs = set([item.name for item in user.list_user_access()])
+ databases = set(databases)
+
+ if databases != former_dbs:
+ try:
+ revoke_dbs = [db for db in former_dbs if db not in databases]
+ user.revoke_user_access(db_names=revoke_dbs)
+
+ new_dbs = [db for db in databases if db not in former_dbs]
+ user.grant_user_access(db_names=new_dbs)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ changed = True
+
+ module.exit_json(changed=changed, action=action, user=rax_to_dict(user))
+
+
+def delete_user(module, cdb_id, name):
+
+ for arg, value in dict(cdb_id=cdb_id, name=name).items():
+ if not value:
+ module.fail_json(msg='%s is required for the "rax_cdb_user"'
+ ' module' % arg)
+
+ cdb = pyrax.cloud_databases
+
+ try:
+ instance = cdb.get(cdb_id)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ changed = False
+
+ user = find_user(instance, name)
+
+ if user:
+ try:
+ user.delete()
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ changed = True
+
+ module.exit_json(changed=changed, action='delete')
+
+
+def rax_cdb_user(module, state, cdb_id, name, password, databases, host):
+
+ # act on the state
+ if state == 'present':
+ save_user(module, cdb_id, name, password, databases, host)
+ elif state == 'absent':
+ delete_user(module, cdb_id, name)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ cdb_id=dict(type='str', required=True),
+ db_username=dict(type='str', required=True),
+ db_password=dict(type='str', required=True, no_log=True),
+ databases=dict(type='list', default=[]),
+ host=dict(type='str', default='%'),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ cdb_id = module.params.get('cdb_id')
+ name = module.params.get('db_username')
+ password = module.params.get('db_password')
+ databases = module.params.get('databases')
+ host = to_text(module.params.get('host'), errors='surrogate_or_strict')
+ state = module.params.get('state')
+
+ setup_rax_module(module, pyrax)
+ rax_cdb_user(module, state, cdb_id, name, password, databases, host)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_clb.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_clb.py
new file mode 100644
index 00000000..5ff1e314
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_clb.py
@@ -0,0 +1,311 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_clb
+short_description: create / delete a load balancer in Rackspace Public Cloud
+description:
+ - creates / deletes a Rackspace Public Cloud load balancer.
+options:
+ algorithm:
+ type: str
+ description:
+ - algorithm for the balancer being created
+ choices:
+ - RANDOM
+ - LEAST_CONNECTIONS
+ - ROUND_ROBIN
+ - WEIGHTED_LEAST_CONNECTIONS
+ - WEIGHTED_ROUND_ROBIN
+ default: LEAST_CONNECTIONS
+ meta:
+ type: dict
+ description:
+ - A hash of metadata to associate with the instance
+ name:
+ type: str
+ description:
+ - Name to give the load balancer
+ required: yes
+ port:
+ type: int
+ description:
+ - Port for the balancer being created
+ default: 80
+ protocol:
+ type: str
+ description:
+ - Protocol for the balancer being created
+ choices:
+ - DNS_TCP
+ - DNS_UDP
+ - FTP
+ - HTTP
+ - HTTPS
+ - IMAPS
+ - IMAPv4
+ - LDAP
+ - LDAPS
+ - MYSQL
+ - POP3
+ - POP3S
+ - SMTP
+ - TCP
+ - TCP_CLIENT_FIRST
+ - UDP
+ - UDP_STREAM
+ - SFTP
+ default: HTTP
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+ timeout:
+ type: int
+ description:
+ - timeout for communication between the balancer and the node
+ default: 30
+ type:
+ type: str
+ description:
+ - type of interface for the balancer being created
+ choices:
+ - PUBLIC
+ - SERVICENET
+ default: PUBLIC
+ vip_id:
+ type: str
+ description:
+ - Virtual IP ID to use when creating the load balancer for purposes of
+ sharing an IP with another load balancer of another protocol
+ wait:
+ description:
+ - wait for the balancer to be in state 'running' before returning
+ type: bool
+ default: 'no'
+ wait_timeout:
+ type: int
+ description:
+ - how long before wait gives up, in seconds
+ default: 300
+author:
+ - "Christopher H. Laco (@claco)"
+ - "Matt Martz (@sivel)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Build a Load Balancer
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Load Balancer create request
+ local_action:
+ module: rax_clb
+ credentials: ~/.raxpub
+ name: my-lb
+ port: 8080
+ protocol: HTTP
+ type: SERVICENET
+ timeout: 30
+ region: DFW
+ wait: yes
+ state: present
+ meta:
+ app: my-cool-app
+ register: my_lb
+'''
+
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (CLB_ALGORITHMS,
+ CLB_PROTOCOLS,
+ rax_argument_spec,
+ rax_required_together,
+ rax_to_dict,
+ setup_rax_module,
+ )
+
+
+def cloud_load_balancer(module, state, name, meta, algorithm, port, protocol,
+ vip_type, timeout, wait, wait_timeout, vip_id):
+ if int(timeout) < 30:
+ module.fail_json(msg='"timeout" must be greater than or equal to 30')
+
+ changed = False
+ balancers = []
+
+ clb = pyrax.cloud_loadbalancers
+ if not clb:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ balancer_list = clb.list()
+ while balancer_list:
+ retrieved = clb.list(marker=balancer_list.pop().id)
+ balancer_list.extend(retrieved)
+ if len(retrieved) < 2:
+ break
+
+ for balancer in balancer_list:
+ if name != balancer.name and name != balancer.id:
+ continue
+
+ balancers.append(balancer)
+
+ if len(balancers) > 1:
+ module.fail_json(msg='Multiple Load Balancers were matched by name, '
+ 'try using the Load Balancer ID instead')
+
+ if state == 'present':
+ if isinstance(meta, dict):
+ metadata = [dict(key=k, value=v) for k, v in meta.items()]
+
+ if not balancers:
+ try:
+ virtual_ips = [clb.VirtualIP(type=vip_type, id=vip_id)]
+ balancer = clb.create(name, metadata=metadata, port=port,
+ algorithm=algorithm, protocol=protocol,
+ timeout=timeout, virtual_ips=virtual_ips)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ balancer = balancers[0]
+ setattr(balancer, 'metadata',
+ [dict(key=k, value=v) for k, v in
+ balancer.get_metadata().items()])
+ atts = {
+ 'name': name,
+ 'algorithm': algorithm,
+ 'port': port,
+ 'protocol': protocol,
+ 'timeout': timeout
+ }
+ for att, value in atts.items():
+ current = getattr(balancer, att)
+ if current != value:
+ changed = True
+
+ if changed:
+ balancer.update(**atts)
+
+ if balancer.metadata != metadata:
+ balancer.set_metadata(meta)
+ changed = True
+
+ virtual_ips = [clb.VirtualIP(type=vip_type)]
+ current_vip_types = set([v.type for v in balancer.virtual_ips])
+ vip_types = set([v.type for v in virtual_ips])
+ if current_vip_types != vip_types:
+ module.fail_json(msg='Load balancer Virtual IP type cannot '
+ 'be changed')
+
+ if wait:
+ attempts = wait_timeout // 5
+ pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts)
+
+ balancer.get()
+ instance = rax_to_dict(balancer, 'clb')
+
+ result = dict(changed=changed, balancer=instance)
+
+ if balancer.status == 'ERROR':
+ result['msg'] = '%s failed to build' % balancer.id
+ elif wait and balancer.status not in ('ACTIVE', 'ERROR'):
+ result['msg'] = 'Timeout waiting on %s' % balancer.id
+
+ if 'msg' in result:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+ elif state == 'absent':
+ if balancers:
+ balancer = balancers[0]
+ try:
+ balancer.delete()
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ instance = rax_to_dict(balancer, 'clb')
+
+ if wait:
+ attempts = wait_timeout // 5
+ pyrax.utils.wait_until(balancer, 'status', ('DELETED'),
+ interval=5, attempts=attempts)
+ else:
+ instance = {}
+
+ module.exit_json(changed=changed, balancer=instance)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ algorithm=dict(choices=CLB_ALGORITHMS,
+ default='LEAST_CONNECTIONS'),
+ meta=dict(type='dict', default={}),
+ name=dict(required=True),
+ port=dict(type='int', default=80),
+ protocol=dict(choices=CLB_PROTOCOLS, default='HTTP'),
+ state=dict(default='present', choices=['present', 'absent']),
+ timeout=dict(type='int', default=30),
+ type=dict(choices=['PUBLIC', 'SERVICENET'], default='PUBLIC'),
+ vip_id=dict(),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ algorithm = module.params.get('algorithm')
+ meta = module.params.get('meta')
+ name = module.params.get('name')
+ port = module.params.get('port')
+ protocol = module.params.get('protocol')
+ state = module.params.get('state')
+ timeout = int(module.params.get('timeout'))
+ vip_id = module.params.get('vip_id')
+ vip_type = module.params.get('type')
+ wait = module.params.get('wait')
+ wait_timeout = int(module.params.get('wait_timeout'))
+
+ setup_rax_module(module, pyrax)
+
+ cloud_load_balancer(module, state, name, meta, algorithm, port, protocol,
+ vip_type, timeout, wait, wait_timeout, vip_id)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_clb_nodes.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_clb_nodes.py
new file mode 100644
index 00000000..c066ab66
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_clb_nodes.py
@@ -0,0 +1,282 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_clb_nodes
+short_description: add, modify and remove nodes from a Rackspace Cloud Load Balancer
+description:
+ - Adds, modifies and removes nodes from a Rackspace Cloud Load Balancer
+options:
+ address:
+ type: str
+ required: false
+ description:
+ - IP address or domain name of the node
+ condition:
+ type: str
+ required: false
+ choices:
+ - enabled
+ - disabled
+ - draining
+ description:
+ - Condition for the node, which determines its role within the load
+ balancer
+ load_balancer_id:
+ type: int
+ required: true
+ description:
+ - Load balancer id
+ node_id:
+ type: int
+ required: false
+ description:
+ - Node id
+ port:
+ type: int
+ required: false
+ description:
+ - Port number of the load balanced service on the node
+ state:
+ type: str
+ required: false
+ default: "present"
+ choices:
+ - present
+ - absent
+ description:
+ - Indicate desired state of the node
+ type:
+ type: str
+ required: false
+ choices:
+ - primary
+ - secondary
+ description:
+ - Type of node
+ wait:
+ required: false
+ default: "no"
+ type: bool
+ description:
+ - Wait for the load balancer to become active before returning
+ wait_timeout:
+ type: int
+ required: false
+ default: 30
+ description:
+ - How long to wait before giving up and returning an error
+ weight:
+ type: int
+ required: false
+ description:
+ - Weight of node
+ virtualenv:
+ type: path
+ description:
+ - Virtualenv to execute this module in
+author: "Lukasz Kawczynski (@neuroid)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Add a new node to the load balancer
+ local_action:
+ module: rax_clb_nodes
+ load_balancer_id: 71
+ address: 10.2.2.3
+ port: 80
+ condition: enabled
+ type: primary
+ wait: yes
+ credentials: /path/to/credentials
+
+- name: Drain connections from a node
+ local_action:
+ module: rax_clb_nodes
+ load_balancer_id: 71
+ node_id: 410
+ condition: draining
+ wait: yes
+ credentials: /path/to/credentials
+
+- name: Remove a node from the load balancer
+ local_action:
+ module: rax_clb_nodes
+ load_balancer_id: 71
+ node_id: 410
+ state: absent
+ wait: yes
+ credentials: /path/to/credentials
+'''
+
+import os
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_clb_node_to_dict, rax_required_together, setup_rax_module
+
+
+def _activate_virtualenv(path):
+ activate_this = os.path.join(path, 'bin', 'activate_this.py')
+ with open(activate_this) as f:
+ code = compile(f.read(), activate_this, 'exec')
+ exec(code)
+
+
+def _get_node(lb, node_id=None, address=None, port=None):
+ """Return a matching node"""
+ for node in getattr(lb, 'nodes', []):
+ match_list = []
+ if node_id is not None:
+ match_list.append(getattr(node, 'id', None) == node_id)
+ if address is not None:
+ match_list.append(getattr(node, 'address', None) == address)
+ if port is not None:
+ match_list.append(getattr(node, 'port', None) == port)
+
+ if match_list and all(match_list):
+ return node
+
+ return None
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ address=dict(),
+ condition=dict(choices=['enabled', 'disabled', 'draining']),
+ load_balancer_id=dict(required=True, type='int'),
+ node_id=dict(type='int'),
+ port=dict(type='int'),
+ state=dict(default='present', choices=['present', 'absent']),
+ type=dict(choices=['primary', 'secondary']),
+ virtualenv=dict(type='path'),
+ wait=dict(default=False, type='bool'),
+ wait_timeout=dict(default=30, type='int'),
+ weight=dict(type='int'),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ address = module.params['address']
+ condition = (module.params['condition'] and
+ module.params['condition'].upper())
+ load_balancer_id = module.params['load_balancer_id']
+ node_id = module.params['node_id']
+ port = module.params['port']
+ state = module.params['state']
+ typ = module.params['type'] and module.params['type'].upper()
+ virtualenv = module.params['virtualenv']
+ wait = module.params['wait']
+ wait_timeout = module.params['wait_timeout'] or 1
+ weight = module.params['weight']
+
+ if virtualenv:
+ try:
+ _activate_virtualenv(virtualenv)
+ except IOError as e:
+ module.fail_json(msg='Failed to activate virtualenv %s (%s)' % (
+ virtualenv, e))
+
+ setup_rax_module(module, pyrax)
+
+ if not pyrax.cloud_loadbalancers:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ try:
+ lb = pyrax.cloud_loadbalancers.get(load_balancer_id)
+ except pyrax.exc.PyraxException as e:
+ module.fail_json(msg='%s' % e.message)
+
+ node = _get_node(lb, node_id, address, port)
+
+ result = rax_clb_node_to_dict(node)
+
+ if state == 'absent':
+ if not node: # Removing a non-existent node
+ module.exit_json(changed=False, state=state)
+ try:
+ lb.delete_node(node)
+ result = {}
+ except pyrax.exc.NotFound:
+ module.exit_json(changed=False, state=state)
+ except pyrax.exc.PyraxException as e:
+ module.fail_json(msg='%s' % e.message)
+ else: # present
+ if not node:
+ if node_id: # Updating a non-existent node
+ msg = 'Node %d not found' % node_id
+ if lb.nodes:
+ msg += (' (available nodes: %s)' %
+ ', '.join([str(x.id) for x in lb.nodes]))
+ module.fail_json(msg=msg)
+ else: # Creating a new node
+ try:
+ node = pyrax.cloudloadbalancers.Node(
+ address=address, port=port, condition=condition,
+ weight=weight, type=typ)
+ resp, body = lb.add_nodes([node])
+ result.update(body['nodes'][0])
+ except pyrax.exc.PyraxException as e:
+ module.fail_json(msg='%s' % e.message)
+ else: # Updating an existing node
+ mutable = {
+ 'condition': condition,
+ 'type': typ,
+ 'weight': weight,
+ }
+
+ for name, value in mutable.items():
+ if value is None or value == getattr(node, name):
+ mutable.pop(name)
+
+ if not mutable:
+ module.exit_json(changed=False, state=state, node=result)
+
+ try:
+ # The diff has to be set explicitly to update node's weight and
+ # type; this should probably be fixed in pyrax
+ lb.update_node(node, diff=mutable)
+ result.update(mutable)
+ except pyrax.exc.PyraxException as e:
+ module.fail_json(msg='%s' % e.message)
+
+ if wait:
+ pyrax.utils.wait_until(lb, "status", "ACTIVE", interval=1,
+ attempts=wait_timeout)
+ if lb.status != 'ACTIVE':
+ module.fail_json(
+ msg='Load balancer not active after %ds (current status: %s)' %
+ (wait_timeout, lb.status.lower()))
+
+ kwargs = {'node': result} if result else {}
+ module.exit_json(changed=True, state=state, **kwargs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_clb_ssl.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_clb_ssl.py
new file mode 100644
index 00000000..114128e8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_clb_ssl.py
@@ -0,0 +1,281 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: rax_clb_ssl
+short_description: Manage SSL termination for a Rackspace Cloud Load Balancer.
+description:
+- Set up, reconfigure, or remove SSL termination for an existing load balancer.
+options:
+ loadbalancer:
+ type: str
+ description:
+ - Name or ID of the load balancer on which to manage SSL termination.
+ required: true
+ state:
+ type: str
+ description:
+ - If set to "present", SSL termination will be added to this load balancer.
+ - If "absent", SSL termination will be removed instead.
+ choices:
+ - present
+ - absent
+ default: present
+ enabled:
+ description:
+ - If set to "false", temporarily disable SSL termination without discarding
+ - existing credentials.
+ default: true
+ type: bool
+ private_key:
+ type: str
+ description:
+ - The private SSL key as a string in PEM format.
+ certificate:
+ type: str
+ description:
+ - The public SSL certificates as a string in PEM format.
+ intermediate_certificate:
+ type: str
+ description:
+ - One or more intermediate certificate authorities as a string in PEM
+ - format, concatenated into a single string.
+ secure_port:
+ type: int
+ description:
+ - The port to listen for secure traffic.
+ default: 443
+ secure_traffic_only:
+ description:
+ - If "true", the load balancer will *only* accept secure traffic.
+ default: false
+ type: bool
+ https_redirect:
+ description:
+ - If "true", the load balancer will redirect HTTP traffic to HTTPS.
+ - Requires "secure_traffic_only" to be true. Incurs an implicit wait if SSL
+ - termination is also applied or removed.
+ type: bool
+ wait:
+ description:
+ - Wait for the balancer to be in state "running" before turning.
+ default: false
+ type: bool
+ wait_timeout:
+ type: int
+ description:
+ - How long before "wait" gives up, in seconds.
+ default: 300
+author: Ash Wilson (@smashwilson)
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Enable SSL termination on a load balancer
+ community.general.rax_clb_ssl:
+ loadbalancer: the_loadbalancer
+ state: present
+ private_key: "{{ lookup('file', 'credentials/server.key' ) }}"
+ certificate: "{{ lookup('file', 'credentials/server.crt' ) }}"
+ intermediate_certificate: "{{ lookup('file', 'credentials/trust-chain.crt') }}"
+ secure_traffic_only: true
+ wait: true
+
+- name: Disable SSL termination
+ community.general.rax_clb_ssl:
+ loadbalancer: "{{ registered_lb.balancer.id }}"
+ state: absent
+ wait: true
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec,
+ rax_find_loadbalancer,
+ rax_required_together,
+ rax_to_dict,
+ setup_rax_module,
+ )
+
+
+def cloud_load_balancer_ssl(module, loadbalancer, state, enabled, private_key,
+ certificate, intermediate_certificate, secure_port,
+ secure_traffic_only, https_redirect,
+ wait, wait_timeout):
+ # Validate arguments.
+
+ if state == 'present':
+ if not private_key:
+ module.fail_json(msg="private_key must be provided.")
+ else:
+ private_key = private_key.strip()
+
+ if not certificate:
+ module.fail_json(msg="certificate must be provided.")
+ else:
+ certificate = certificate.strip()
+
+ attempts = wait_timeout // 5
+
+ # Locate the load balancer.
+
+ balancer = rax_find_loadbalancer(module, pyrax, loadbalancer)
+ existing_ssl = balancer.get_ssl_termination()
+
+ changed = False
+
+ if state == 'present':
+ # Apply or reconfigure SSL termination on the load balancer.
+ ssl_attrs = dict(
+ securePort=secure_port,
+ privatekey=private_key,
+ certificate=certificate,
+ intermediateCertificate=intermediate_certificate,
+ enabled=enabled,
+ secureTrafficOnly=secure_traffic_only
+ )
+
+ needs_change = False
+
+ if existing_ssl:
+ for ssl_attr, value in ssl_attrs.items():
+ if ssl_attr == 'privatekey':
+ # The private key is not included in get_ssl_termination's
+ # output (as it shouldn't be). Also, if you're changing the
+ # private key, you'll also be changing the certificate,
+ # so we don't lose anything by not checking it.
+ continue
+
+ if value is not None and existing_ssl.get(ssl_attr) != value:
+ # module.fail_json(msg='Unnecessary change', attr=ssl_attr, value=value, existing=existing_ssl.get(ssl_attr))
+ needs_change = True
+ else:
+ needs_change = True
+
+ if needs_change:
+ try:
+ balancer.add_ssl_termination(**ssl_attrs)
+ except pyrax.exceptions.PyraxException as e:
+ module.fail_json(msg='%s' % e.message)
+ changed = True
+ elif state == 'absent':
+ # Remove SSL termination if it's already configured.
+ if existing_ssl:
+ try:
+ balancer.delete_ssl_termination()
+ except pyrax.exceptions.PyraxException as e:
+ module.fail_json(msg='%s' % e.message)
+ changed = True
+
+ if https_redirect is not None and balancer.httpsRedirect != https_redirect:
+ if changed:
+ # This wait is unavoidable because load balancers are immutable
+ # while the SSL termination changes above are being applied.
+ pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts)
+
+ try:
+ balancer.update(httpsRedirect=https_redirect)
+ except pyrax.exceptions.PyraxException as e:
+ module.fail_json(msg='%s' % e.message)
+ changed = True
+
+ if changed and wait:
+ pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts)
+
+ balancer.get()
+ new_ssl_termination = balancer.get_ssl_termination()
+
+ # Intentionally omit the private key from the module output, so you don't
+ # accidentally echo it with `ansible-playbook -v` or `debug`, and the
+ # certificate, which is just long. Convert other attributes to snake_case
+ # and include https_redirect at the top-level.
+ if new_ssl_termination:
+ new_ssl = dict(
+ enabled=new_ssl_termination['enabled'],
+ secure_port=new_ssl_termination['securePort'],
+ secure_traffic_only=new_ssl_termination['secureTrafficOnly']
+ )
+ else:
+ new_ssl = None
+
+ result = dict(
+ changed=changed,
+ https_redirect=balancer.httpsRedirect,
+ ssl_termination=new_ssl,
+ balancer=rax_to_dict(balancer, 'clb')
+ )
+ success = True
+
+ if balancer.status == 'ERROR':
+ result['msg'] = '%s failed to build' % balancer.id
+ success = False
+ elif wait and balancer.status not in ('ACTIVE', 'ERROR'):
+ result['msg'] = 'Timeout waiting on %s' % balancer.id
+ success = False
+
+ if success:
+ module.exit_json(**result)
+ else:
+ module.fail_json(**result)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(dict(
+ loadbalancer=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ enabled=dict(type='bool', default=True),
+ private_key=dict(no_log=True),
+ certificate=dict(),
+ intermediate_certificate=dict(),
+ secure_port=dict(type='int', default=443),
+ secure_traffic_only=dict(type='bool', default=False),
+ https_redirect=dict(type='bool'),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300)
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module.')
+
+ loadbalancer = module.params.get('loadbalancer')
+ state = module.params.get('state')
+ enabled = module.boolean(module.params.get('enabled'))
+ private_key = module.params.get('private_key')
+ certificate = module.params.get('certificate')
+ intermediate_certificate = module.params.get('intermediate_certificate')
+ secure_port = module.params.get('secure_port')
+ secure_traffic_only = module.boolean(module.params.get('secure_traffic_only'))
+ https_redirect = module.boolean(module.params.get('https_redirect'))
+ wait = module.boolean(module.params.get('wait'))
+ wait_timeout = module.params.get('wait_timeout')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_load_balancer_ssl(
+ module, loadbalancer, state, enabled, private_key, certificate,
+ intermediate_certificate, secure_port, secure_traffic_only,
+ https_redirect, wait, wait_timeout
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_dns.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_dns.py
new file mode 100644
index 00000000..e9b7e2be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_dns.py
@@ -0,0 +1,172 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_dns
+short_description: Manage domains on Rackspace Cloud DNS
+description:
+ - Manage domains on Rackspace Cloud DNS
+options:
+ comment:
+ type: str
+ description:
+ - Brief description of the domain. Maximum length of 160 characters
+ email:
+ type: str
+ description:
+ - Email address of the domain administrator
+ name:
+ type: str
+ description:
+ - Domain name to create
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+ ttl:
+ type: int
+ description:
+ - Time to live of domain in seconds
+ default: 3600
+notes:
+ - "It is recommended that plays utilizing this module be run with
+ C(serial: 1) to avoid exceeding the API request limit imposed by
+ the Rackspace CloudDNS API"
+author: "Matt Martz (@sivel)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Create domain
+ hosts: all
+ gather_facts: False
+ tasks:
+ - name: Domain create request
+ local_action:
+ module: rax_dns
+ credentials: ~/.raxpub
+ name: example.org
+ email: admin@example.org
+ register: rax_dns
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec,
+ rax_required_together,
+ rax_to_dict,
+ setup_rax_module,
+ )
+
+
+def rax_dns(module, comment, email, name, state, ttl):
+ changed = False
+
+ dns = pyrax.cloud_dns
+ if not dns:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if state == 'present':
+ if not email:
+ module.fail_json(msg='An "email" attribute is required for '
+ 'creating a domain')
+
+ try:
+ domain = dns.find(name=name)
+ except pyrax.exceptions.NoUniqueMatch as e:
+ module.fail_json(msg='%s' % e.message)
+ except pyrax.exceptions.NotFound:
+ try:
+ domain = dns.create(name=name, emailAddress=email, ttl=ttl,
+ comment=comment)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ update = {}
+ if comment != getattr(domain, 'comment', None):
+ update['comment'] = comment
+ if ttl != getattr(domain, 'ttl', None):
+ update['ttl'] = ttl
+ if email != getattr(domain, 'emailAddress', None):
+ update['emailAddress'] = email
+
+ if update:
+ try:
+ domain.update(**update)
+ changed = True
+ domain.get()
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ elif state == 'absent':
+ try:
+ domain = dns.find(name=name)
+ except pyrax.exceptions.NotFound:
+ domain = {}
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ if domain:
+ try:
+ domain.delete()
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, domain=rax_to_dict(domain))
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ comment=dict(),
+ email=dict(),
+ name=dict(),
+ state=dict(default='present', choices=['present', 'absent']),
+ ttl=dict(type='int', default=3600),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ comment = module.params.get('comment')
+ email = module.params.get('email')
+ name = module.params.get('name')
+ state = module.params.get('state')
+ ttl = module.params.get('ttl')
+
+ setup_rax_module(module, pyrax, False)
+
+ rax_dns(module, comment, email, name, state, ttl)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_dns_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_dns_record.py
new file mode 100644
index 00000000..0b60120a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_dns_record.py
@@ -0,0 +1,352 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_dns_record
+short_description: Manage DNS records on Rackspace Cloud DNS
+description:
+ - Manage DNS records on Rackspace Cloud DNS
+options:
+ comment:
+ type: str
+ description:
+ - Brief description of the domain. Maximum length of 160 characters
+ data:
+ type: str
+ description:
+ - IP address for A/AAAA record, FQDN for CNAME/MX/NS, or text data for
+ SRV/TXT
+ required: True
+ domain:
+ type: str
+ description:
+ - Domain name to create the record in. This is an invalid option when
+ type=PTR
+ loadbalancer:
+ type: str
+ description:
+ - Load Balancer ID to create a PTR record for. Only used with type=PTR
+ name:
+ type: str
+ description:
+ - FQDN record name to create
+ required: True
+ overwrite:
+ description:
+ - Add new records if data doesn't match, instead of updating existing
+ record with matching name. If there are already multiple records with
+ matching name and overwrite=true, this module will fail.
+ default: true
+ type: bool
+ priority:
+ type: int
+ description:
+ - Required for MX and SRV records, but forbidden for other record types.
+ If specified, must be an integer from 0 to 65535.
+ server:
+ type: str
+ description:
+ - Server ID to create a PTR record for. Only used with type=PTR
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+ ttl:
+ type: int
+ description:
+ - Time to live of record in seconds
+ default: 3600
+ type:
+ type: str
+ description:
+ - DNS record type
+ choices:
+ - A
+ - AAAA
+ - CNAME
+ - MX
+ - NS
+ - SRV
+ - TXT
+ - PTR
+ required: true
+notes:
+ - "It is recommended that plays utilizing this module be run with
+ C(serial: 1) to avoid exceeding the API request limit imposed by
+ the Rackspace CloudDNS API"
+ - To manipulate a C(PTR) record either C(loadbalancer) or C(server) must be
+ supplied
+ - As of version 1.7, the C(type) field is required and no longer defaults to an C(A) record.
+ - C(PTR) record support was added in version 1.7
+author: "Matt Martz (@sivel)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Create DNS Records
+ hosts: all
+ gather_facts: False
+ tasks:
+ - name: Create A record
+ local_action:
+ module: rax_dns_record
+ credentials: ~/.raxpub
+ domain: example.org
+ name: www.example.org
+ data: "{{ rax_accessipv4 }}"
+ type: A
+ register: a_record
+
+ - name: Create PTR record
+ local_action:
+ module: rax_dns_record
+ credentials: ~/.raxpub
+ server: "{{ rax_id }}"
+ name: "{{ inventory_hostname }}"
+ region: DFW
+ register: ptr_record
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec,
+ rax_find_loadbalancer,
+ rax_find_server,
+ rax_required_together,
+ rax_to_dict,
+ setup_rax_module,
+ )
+
+
+def rax_dns_record_ptr(module, data=None, comment=None, loadbalancer=None,
+ name=None, server=None, state='present', ttl=7200):
+ changed = False
+ results = []
+
+ dns = pyrax.cloud_dns
+
+ if not dns:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if loadbalancer:
+ item = rax_find_loadbalancer(module, pyrax, loadbalancer)
+ elif server:
+ item = rax_find_server(module, pyrax, server)
+
+ if state == 'present':
+ current = dns.list_ptr_records(item)
+ for record in current:
+ if record.data == data:
+ if record.ttl != ttl or record.name != name:
+ try:
+ dns.update_ptr_record(item, record, name, data, ttl)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ record.ttl = ttl
+ record.name = name
+ results.append(rax_to_dict(record))
+ break
+ else:
+ results.append(rax_to_dict(record))
+ break
+
+ if not results:
+ record = dict(name=name, type='PTR', data=data, ttl=ttl,
+ comment=comment)
+ try:
+ results = dns.add_ptr_records(item, [record])
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, records=results)
+
+ elif state == 'absent':
+ current = dns.list_ptr_records(item)
+ for record in current:
+ if record.data == data:
+ results.append(rax_to_dict(record))
+ break
+
+ if results:
+ try:
+ dns.delete_ptr_records(item, data)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, records=results)
+
+
+def rax_dns_record(module, comment=None, data=None, domain=None, name=None,
+ overwrite=True, priority=None, record_type='A',
+ state='present', ttl=7200):
+ """Function for manipulating record types other than PTR"""
+
+ changed = False
+
+ dns = pyrax.cloud_dns
+ if not dns:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if state == 'present':
+ if not priority and record_type in ['MX', 'SRV']:
+ module.fail_json(msg='A "priority" attribute is required for '
+ 'creating a MX or SRV record')
+
+ try:
+ domain = dns.find(name=domain)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ try:
+ if overwrite:
+ record = domain.find_record(record_type, name=name)
+ else:
+ record = domain.find_record(record_type, name=name, data=data)
+ except pyrax.exceptions.DomainRecordNotUnique as e:
+ module.fail_json(msg='overwrite=true and there are multiple matching records')
+ except pyrax.exceptions.DomainRecordNotFound as e:
+ try:
+ record_data = {
+ 'type': record_type,
+ 'name': name,
+ 'data': data,
+ 'ttl': ttl
+ }
+ if comment:
+ record_data.update(dict(comment=comment))
+ if priority and record_type.upper() in ['MX', 'SRV']:
+ record_data.update(dict(priority=priority))
+
+ record = domain.add_records([record_data])[0]
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ update = {}
+ if comment != getattr(record, 'comment', None):
+ update['comment'] = comment
+ if ttl != getattr(record, 'ttl', None):
+ update['ttl'] = ttl
+ if priority != getattr(record, 'priority', None):
+ update['priority'] = priority
+ if data != getattr(record, 'data', None):
+ update['data'] = data
+
+ if update:
+ try:
+ record.update(**update)
+ changed = True
+ record.get()
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ elif state == 'absent':
+ try:
+ domain = dns.find(name=domain)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ try:
+ record = domain.find_record(record_type, name=name, data=data)
+ except pyrax.exceptions.DomainRecordNotFound as e:
+ record = {}
+ except pyrax.exceptions.DomainRecordNotUnique as e:
+ module.fail_json(msg='%s' % e.message)
+
+ if record:
+ try:
+ record.delete()
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, record=rax_to_dict(record))
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ comment=dict(),
+ data=dict(required=True),
+ domain=dict(),
+ loadbalancer=dict(),
+ name=dict(required=True),
+ overwrite=dict(type='bool', default=True),
+ priority=dict(type='int'),
+ server=dict(),
+ state=dict(default='present', choices=['present', 'absent']),
+ ttl=dict(type='int', default=3600),
+ type=dict(required=True, choices=['A', 'AAAA', 'CNAME', 'MX', 'NS',
+ 'SRV', 'TXT', 'PTR'])
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ mutually_exclusive=[
+ ['server', 'loadbalancer', 'domain'],
+ ],
+ required_one_of=[
+ ['server', 'loadbalancer', 'domain'],
+ ],
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ comment = module.params.get('comment')
+ data = module.params.get('data')
+ domain = module.params.get('domain')
+ loadbalancer = module.params.get('loadbalancer')
+ name = module.params.get('name')
+ overwrite = module.params.get('overwrite')
+ priority = module.params.get('priority')
+ server = module.params.get('server')
+ state = module.params.get('state')
+ ttl = module.params.get('ttl')
+ record_type = module.params.get('type')
+
+ setup_rax_module(module, pyrax, False)
+
+ if record_type.upper() == 'PTR':
+ if not server and not loadbalancer:
+ module.fail_json(msg='one of the following is required: '
+ 'server,loadbalancer')
+ rax_dns_record_ptr(module, data=data, comment=comment,
+ loadbalancer=loadbalancer, name=name, server=server,
+ state=state, ttl=ttl)
+ else:
+ rax_dns_record(module, comment=comment, data=data, domain=domain,
+ name=name, overwrite=overwrite, priority=priority,
+ record_type=record_type, state=state, ttl=ttl)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_facts.py
new file mode 100644
index 00000000..386ca7cf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_facts.py
@@ -0,0 +1,142 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_facts
+short_description: Gather facts for Rackspace Cloud Servers
+description:
+ - Gather facts for Rackspace Cloud Servers.
+options:
+ address:
+ type: str
+ description:
+ - Server IP address to retrieve facts for, will match any IP assigned to
+ the server
+ id:
+ type: str
+ description:
+ - Server ID to retrieve facts for
+ name:
+ type: str
+ description:
+ - Server name to retrieve facts for
+author: "Matt Martz (@sivel)"
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Gather info about servers
+ hosts: all
+ gather_facts: False
+ tasks:
+ - name: Get facts about servers
+ local_action:
+ module: rax_facts
+ credentials: ~/.raxpub
+ name: "{{ inventory_hostname }}"
+ region: DFW
+ - name: Map some facts
+ ansible.builtin.set_fact:
+ ansible_ssh_host: "{{ rax_accessipv4 }}"
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec,
+ rax_required_together,
+ rax_to_dict,
+ setup_rax_module,
+ )
+
+
+def rax_facts(module, address, name, server_id):
+ changed = False
+
+ cs = pyrax.cloudservers
+
+ if cs is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ ansible_facts = {}
+
+ search_opts = {}
+ if name:
+ search_opts = dict(name='^%s$' % name)
+ try:
+ servers = cs.servers.list(search_opts=search_opts)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ elif address:
+ servers = []
+ try:
+ for server in cs.servers.list():
+ for addresses in server.networks.values():
+ if address in addresses:
+ servers.append(server)
+ break
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ elif server_id:
+ servers = []
+ try:
+ servers.append(cs.servers.get(server_id))
+ except Exception as e:
+ pass
+
+ servers[:] = [server for server in servers if server.status != "DELETED"]
+
+ if len(servers) > 1:
+ module.fail_json(msg='Multiple servers found matching provided '
+ 'search parameters')
+ elif len(servers) == 1:
+ ansible_facts = rax_to_dict(servers[0], 'server')
+
+ module.exit_json(changed=changed, ansible_facts=ansible_facts)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ address=dict(),
+ id=dict(),
+ name=dict(),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ mutually_exclusive=[['address', 'id', 'name']],
+ required_one_of=[['address', 'id', 'name']],
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ address = module.params.get('address')
+ server_id = module.params.get('id')
+ name = module.params.get('name')
+
+ setup_rax_module(module, pyrax)
+
+ rax_facts(module, address, name, server_id)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_files.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_files.py
new file mode 100644
index 00000000..7080cc2f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_files.py
@@ -0,0 +1,392 @@
+#!/usr/bin/python
+
+# (c) 2013, Paul Durivage <paul.durivage@rackspace.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_files
+short_description: Manipulate Rackspace Cloud Files Containers
+description:
+ - Manipulate Rackspace Cloud Files Containers
+options:
+ clear_meta:
+ description:
+ - Optionally clear existing metadata when applying metadata to existing containers.
+ Selecting this option is only appropriate when setting type=meta
+ type: bool
+ default: "no"
+ container:
+ type: str
+ description:
+ - The container to use for container or metadata operations.
+ meta:
+ type: dict
+ description:
+ - A hash of items to set as metadata values on a container
+ private:
+ description:
+ - Used to set a container as private, removing it from the CDN. B(Warning!)
+ Private containers, if previously made public, can have live objects
+ available until the TTL on cached objects expires
+ type: bool
+ default: false
+ public:
+ description:
+ - Used to set a container as public, available via the Cloud Files CDN
+ type: bool
+ default: false
+ region:
+ type: str
+ description:
+ - Region to create an instance in
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices: ['present', 'absent', 'list']
+ default: present
+ ttl:
+ type: int
+ description:
+ - In seconds, set a container-wide TTL for all objects cached on CDN edge nodes.
+ Setting a TTL is only appropriate for containers that are public
+ type:
+ type: str
+ description:
+ - Type of object to do work on, i.e. metadata object or a container object
+ choices:
+ - container
+ - meta
+ default: container
+ web_error:
+ type: str
+ description:
+ - Sets an object to be presented as the HTTP error page when accessed by the CDN URL
+ web_index:
+ type: str
+ description:
+ - Sets an object to be presented as the HTTP index page when accessed by the CDN URL
+author: "Paul Durivage (@angstwad)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: "Test Cloud Files Containers"
+ hosts: local
+ gather_facts: no
+ tasks:
+ - name: "List all containers"
+ community.general.rax_files:
+ state: list
+
+ - name: "Create container called 'mycontainer'"
+ community.general.rax_files:
+ container: mycontainer
+
+ - name: "Create container 'mycontainer2' with metadata"
+ community.general.rax_files:
+ container: mycontainer2
+ meta:
+ key: value
+ file_for: someuser@example.com
+
+ - name: "Set a container's web index page"
+ community.general.rax_files:
+ container: mycontainer
+ web_index: index.html
+
+ - name: "Set a container's web error page"
+ community.general.rax_files:
+ container: mycontainer
+ web_error: error.html
+
+ - name: "Make container public"
+ community.general.rax_files:
+ container: mycontainer
+ public: yes
+
+ - name: "Make container public with a 24 hour TTL"
+ community.general.rax_files:
+ container: mycontainer
+ public: yes
+ ttl: 86400
+
+ - name: "Make container private"
+ community.general.rax_files:
+ container: mycontainer
+ private: yes
+
+- name: "Test Cloud Files Containers Metadata Storage"
+ hosts: local
+ gather_facts: no
+ tasks:
+ - name: "Get mycontainer2 metadata"
+ community.general.rax_files:
+ container: mycontainer2
+ type: meta
+
+ - name: "Set mycontainer2 metadata"
+ community.general.rax_files:
+ container: mycontainer2
+ type: meta
+ meta:
+ uploaded_by: someuser@example.com
+
+ - name: "Remove mycontainer2 metadata"
+ community.general.rax_files:
+ container: "mycontainer2"
+ type: meta
+ state: absent
+ meta:
+ key: ""
+ file_for: ""
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError as e:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+
+
+EXIT_DICT = dict(success=True)
+META_PREFIX = 'x-container-meta-'
+
+
+def _get_container(module, cf, container):
+ try:
+ return cf.get_container(container)
+ except pyrax.exc.NoSuchContainer as e:
+ module.fail_json(msg=e.message)
+
+
+def _fetch_meta(module, container):
+ EXIT_DICT['meta'] = dict()
+ try:
+ for k, v in container.get_metadata().items():
+ split_key = k.split(META_PREFIX)[-1]
+ EXIT_DICT['meta'][split_key] = v
+ except Exception as e:
+ module.fail_json(msg=e.message)
+
+
+def meta(cf, module, container_, state, meta_, clear_meta):
+ c = _get_container(module, cf, container_)
+
+ if meta_ and state == 'present':
+ try:
+ meta_set = c.set_metadata(meta_, clear=clear_meta)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ elif meta_ and state == 'absent':
+ remove_results = []
+ for k, v in meta_.items():
+ c.remove_metadata_key(k)
+ remove_results.append(k)
+ EXIT_DICT['deleted_meta_keys'] = remove_results
+ elif state == 'absent':
+ remove_results = []
+ for k, v in c.get_metadata().items():
+ c.remove_metadata_key(k)
+ remove_results.append(k)
+ EXIT_DICT['deleted_meta_keys'] = remove_results
+
+ _fetch_meta(module, c)
+ _locals = locals().keys()
+
+ EXIT_DICT['container'] = c.name
+ if 'meta_set' in _locals or 'remove_results' in _locals:
+ EXIT_DICT['changed'] = True
+
+ module.exit_json(**EXIT_DICT)
+
+
+def container(cf, module, container_, state, meta_, clear_meta, ttl, public,
+ private, web_index, web_error):
+ if public and private:
+ module.fail_json(msg='container cannot be simultaneously '
+ 'set to public and private')
+
+ if state == 'absent' and (meta_ or clear_meta or public or private or web_index or web_error):
+ module.fail_json(msg='state cannot be omitted when setting/removing '
+ 'attributes on a container')
+
+ if state == 'list':
+ # We don't care if attributes are specified, let's list containers
+ EXIT_DICT['containers'] = cf.list_containers()
+ module.exit_json(**EXIT_DICT)
+
+ try:
+ c = cf.get_container(container_)
+ except pyrax.exc.NoSuchContainer as e:
+ # Make the container if state=present, otherwise bomb out
+ if state == 'present':
+ try:
+ c = cf.create_container(container_)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ EXIT_DICT['changed'] = True
+ EXIT_DICT['created'] = True
+ else:
+ module.fail_json(msg=e.message)
+ else:
+ # Successfully grabbed a container object
+ # Delete if state is absent
+ if state == 'absent':
+ try:
+ cont_deleted = c.delete()
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ EXIT_DICT['deleted'] = True
+
+ if meta_:
+ try:
+ meta_set = c.set_metadata(meta_, clear=clear_meta)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ finally:
+ _fetch_meta(module, c)
+
+ if ttl:
+ try:
+ c.cdn_ttl = ttl
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ EXIT_DICT['ttl'] = c.cdn_ttl
+
+ if public:
+ try:
+ cont_public = c.make_public()
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ EXIT_DICT['container_urls'] = dict(url=c.cdn_uri,
+ ssl_url=c.cdn_ssl_uri,
+ streaming_url=c.cdn_streaming_uri,
+ ios_uri=c.cdn_ios_uri)
+
+ if private:
+ try:
+ cont_private = c.make_private()
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ EXIT_DICT['set_private'] = True
+
+ if web_index:
+ try:
+ cont_web_index = c.set_web_index_page(web_index)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ EXIT_DICT['set_index'] = True
+ finally:
+ _fetch_meta(module, c)
+
+ if web_error:
+ try:
+ cont_err_index = c.set_web_error_page(web_error)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ EXIT_DICT['set_error'] = True
+ finally:
+ _fetch_meta(module, c)
+
+ EXIT_DICT['container'] = c.name
+ EXIT_DICT['objs_in_container'] = c.object_count
+ EXIT_DICT['total_bytes'] = c.total_bytes
+
+ _locals = locals().keys()
+ if ('cont_deleted' in _locals
+ or 'meta_set' in _locals
+ or 'cont_public' in _locals
+ or 'cont_private' in _locals
+ or 'cont_web_index' in _locals
+ or 'cont_err_index' in _locals):
+ EXIT_DICT['changed'] = True
+
+ module.exit_json(**EXIT_DICT)
+
+
+def cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public,
+ private, web_index, web_error):
+ """ Dispatch from here to work with metadata or file objects """
+ cf = pyrax.cloudfiles
+
+ if cf is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if typ == "container":
+ container(cf, module, container_, state, meta_, clear_meta, ttl,
+ public, private, web_index, web_error)
+ else:
+ meta(cf, module, container_, state, meta_, clear_meta)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ container=dict(),
+ state=dict(choices=['present', 'absent', 'list'],
+ default='present'),
+ meta=dict(type='dict', default=dict()),
+ clear_meta=dict(default=False, type='bool'),
+ type=dict(choices=['container', 'meta'], default='container'),
+ ttl=dict(type='int'),
+ public=dict(default=False, type='bool'),
+ private=dict(default=False, type='bool'),
+ web_index=dict(),
+ web_error=dict()
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ container_ = module.params.get('container')
+ state = module.params.get('state')
+ meta_ = module.params.get('meta')
+ clear_meta = module.params.get('clear_meta')
+ typ = module.params.get('type')
+ ttl = module.params.get('ttl')
+ public = module.params.get('public')
+ private = module.params.get('private')
+ web_index = module.params.get('web_index')
+ web_error = module.params.get('web_error')
+
+ if state in ['present', 'absent'] and not container_:
+ module.fail_json(msg='please specify a container name')
+ if clear_meta and not typ == 'meta':
+ module.fail_json(msg='clear_meta can only be used when setting '
+ 'metadata')
+
+ setup_rax_module(module, pyrax)
+ cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public,
+ private, web_index, web_error)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_files_objects.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_files_objects.py
new file mode 100644
index 00000000..dc445554
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_files_objects.py
@@ -0,0 +1,608 @@
+#!/usr/bin/python
+
+# (c) 2013, Paul Durivage <paul.durivage@rackspace.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_files_objects
+short_description: Upload, download, and delete objects in Rackspace Cloud Files
+description:
+ - Upload, download, and delete objects in Rackspace Cloud Files
+options:
+ clear_meta:
+ description:
+ - Optionally clear existing metadata when applying metadata to existing objects.
+ Selecting this option is only appropriate when setting type=meta
+ type: bool
+ default: 'no'
+ container:
+ type: str
+ description:
+ - The container to use for file object operations.
+ required: true
+ dest:
+ type: str
+ description:
+ - The destination of a "get" operation; i.e. a local directory, "/home/user/myfolder".
+ Used to specify the destination of an operation on a remote object; i.e. a file name,
+ "file1", or a comma-separated list of remote objects, "file1,file2,file17"
+ expires:
+ type: int
+ description:
+ - Used to set an expiration on a file or folder uploaded to Cloud Files.
+ Requires an integer, specifying expiration in seconds
+ meta:
+ type: dict
+ description:
+ - A hash of items to set as metadata values on an uploaded file or folder
+ method:
+ type: str
+ description:
+ - The method of operation to be performed. For example, put to upload files
+ to Cloud Files, get to download files from Cloud Files or delete to delete
+ remote objects in Cloud Files
+ choices:
+ - get
+ - put
+ - delete
+ default: get
+ src:
+ type: str
+ description:
+ - Source from which to upload files. Used to specify a remote object as a source for
+ an operation, i.e. a file name, "file1", or a comma-separated list of remote objects,
+ "file1,file2,file17". src and dest are mutually exclusive on remote-only object operations
+ structure:
+ description:
+ - Used to specify whether to maintain nested directory structure when downloading objects
+ from Cloud Files. Setting to false downloads the contents of a container to a single,
+ flat directory
+ type: bool
+ default: 'yes'
+ type:
+ type: str
+ description:
+ - Type of object to do work on
+ - Metadata object or a file object
+ choices:
+ - file
+ - meta
+ default: file
+author: "Paul Durivage (@angstwad)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: "Test Cloud Files Objects"
+ hosts: local
+ gather_facts: False
+ tasks:
+ - name: "Get objects from test container"
+ community.general.rax_files_objects:
+ container: testcont
+ dest: ~/Downloads/testcont
+
+ - name: "Get single object from test container"
+ community.general.rax_files_objects:
+ container: testcont
+ src: file1
+ dest: ~/Downloads/testcont
+
+ - name: "Get several objects from test container"
+ community.general.rax_files_objects:
+ container: testcont
+ src: file1,file2,file3
+ dest: ~/Downloads/testcont
+
+ - name: "Delete one object in test container"
+ community.general.rax_files_objects:
+ container: testcont
+ method: delete
+ dest: file1
+
+ - name: "Delete several objects in test container"
+ community.general.rax_files_objects:
+ container: testcont
+ method: delete
+ dest: file2,file3,file4
+
+ - name: "Delete all objects in test container"
+ community.general.rax_files_objects:
+ container: testcont
+ method: delete
+
+ - name: "Upload all files to test container"
+ community.general.rax_files_objects:
+ container: testcont
+ method: put
+ src: ~/Downloads/onehundred
+
+ - name: "Upload one file to test container"
+ community.general.rax_files_objects:
+ container: testcont
+ method: put
+ src: ~/Downloads/testcont/file1
+
+ - name: "Upload one file to test container with metadata"
+ community.general.rax_files_objects:
+ container: testcont
+ src: ~/Downloads/testcont/file2
+ method: put
+ meta:
+ testkey: testdata
+ who_uploaded_this: someuser@example.com
+
+ - name: "Upload one file to test container with TTL of 60 seconds"
+ community.general.rax_files_objects:
+ container: testcont
+ method: put
+ src: ~/Downloads/testcont/file3
+ expires: 60
+
+ - name: "Attempt to get remote object that does not exist"
+ community.general.rax_files_objects:
+ container: testcont
+ method: get
+ src: FileThatDoesNotExist.jpg
+ dest: ~/Downloads/testcont
+ ignore_errors: yes
+
+ - name: "Attempt to delete remote object that does not exist"
+ community.general.rax_files_objects:
+ container: testcont
+ method: delete
+ dest: FileThatDoesNotExist.jpg
+ ignore_errors: yes
+
+- name: "Test Cloud Files Objects Metadata"
+ hosts: local
+ gather_facts: false
+ tasks:
+ - name: "Get metadata on one object"
+ community.general.rax_files_objects:
+ container: testcont
+ type: meta
+ dest: file2
+
+ - name: "Get metadata on several objects"
+ community.general.rax_files_objects:
+ container: testcont
+ type: meta
+ src: file2,file1
+
+ - name: "Set metadata on an object"
+ community.general.rax_files_objects:
+ container: testcont
+ type: meta
+ dest: file17
+ method: put
+ meta:
+ key1: value1
+ key2: value2
+ clear_meta: true
+
+ - name: "Verify metadata is set"
+ community.general.rax_files_objects:
+ container: testcont
+ type: meta
+ src: file17
+
+ - name: "Delete metadata"
+ community.general.rax_files_objects:
+ container: testcont
+ type: meta
+ dest: file17
+ method: delete
+ meta:
+ key1: ''
+ key2: ''
+
+ - name: "Get metadata on all objects"
+ community.general.rax_files_objects:
+ container: testcont
+ type: meta
+'''
+
+import os
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+
+
+EXIT_DICT = dict(success=False)
+META_PREFIX = 'x-object-meta-'
+
+
+def _get_container(module, cf, container):
+ try:
+ return cf.get_container(container)
+ except pyrax.exc.NoSuchContainer as e:
+ module.fail_json(msg=e.message)
+
+
+def _upload_folder(cf, folder, container, ttl=None, headers=None):
+ """ Uploads a folder to Cloud Files.
+ """
+ total_bytes = 0
+ for root, dirs, files in os.walk(folder):
+ for fname in files:
+ full_path = os.path.join(root, fname)
+ obj_name = os.path.relpath(full_path, folder)
+ obj_size = os.path.getsize(full_path)
+ cf.upload_file(container, full_path,
+ obj_name=obj_name, return_none=True, ttl=ttl, headers=headers)
+ total_bytes += obj_size
+ return total_bytes
+
+
+def upload(module, cf, container, src, dest, meta, expires):
+ """ Uploads a single object or a folder to Cloud Files Optionally sets an
+ metadata, TTL value (expires), or Content-Disposition and Content-Encoding
+ headers.
+ """
+ if not src:
+ module.fail_json(msg='src must be specified when uploading')
+
+ c = _get_container(module, cf, container)
+ src = os.path.abspath(os.path.expanduser(src))
+ is_dir = os.path.isdir(src)
+
+ if not is_dir and not os.path.isfile(src) or not os.path.exists(src):
+ module.fail_json(msg='src must be a file or a directory')
+ if dest and is_dir:
+ module.fail_json(msg='dest cannot be set when whole '
+ 'directories are uploaded')
+
+ cont_obj = None
+ total_bytes = 0
+ if dest and not is_dir:
+ try:
+ cont_obj = c.upload_file(src, obj_name=dest, ttl=expires, headers=meta)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ elif is_dir:
+ try:
+ total_bytes = _upload_folder(cf, src, c, ttl=expires, headers=meta)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ try:
+ cont_obj = c.upload_file(src, ttl=expires, headers=meta)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+
+ EXIT_DICT['success'] = True
+ EXIT_DICT['container'] = c.name
+ EXIT_DICT['msg'] = "Uploaded %s to container: %s" % (src, c.name)
+ if cont_obj or total_bytes > 0:
+ EXIT_DICT['changed'] = True
+ if meta:
+ EXIT_DICT['meta'] = dict(updated=True)
+
+ if cont_obj:
+ EXIT_DICT['bytes'] = cont_obj.total_bytes
+ EXIT_DICT['etag'] = cont_obj.etag
+ else:
+ EXIT_DICT['bytes'] = total_bytes
+
+ module.exit_json(**EXIT_DICT)
+
+
+def download(module, cf, container, src, dest, structure):
+ """ Download objects from Cloud Files to a local path specified by "dest".
+ Optionally disable maintaining a directory structure by by passing a
+ false value to "structure".
+ """
+ # Looking for an explicit destination
+ if not dest:
+ module.fail_json(msg='dest is a required argument when '
+ 'downloading from Cloud Files')
+
+ # Attempt to fetch the container by name
+ c = _get_container(module, cf, container)
+
+ # Accept a single object name or a comma-separated list of objs
+ # If not specified, get the entire container
+ if src:
+ objs = src.split(',')
+ objs = map(str.strip, objs)
+ else:
+ objs = c.get_object_names()
+
+ dest = os.path.abspath(os.path.expanduser(dest))
+ is_dir = os.path.isdir(dest)
+
+ if not is_dir:
+ module.fail_json(msg='dest must be a directory')
+
+ results = []
+ for obj in objs:
+ try:
+ c.download_object(obj, dest, structure=structure)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ results.append(obj)
+
+ len_results = len(results)
+ len_objs = len(objs)
+
+ EXIT_DICT['container'] = c.name
+ EXIT_DICT['requested_downloaded'] = results
+ if results:
+ EXIT_DICT['changed'] = True
+ if len_results == len_objs:
+ EXIT_DICT['success'] = True
+ EXIT_DICT['msg'] = "%s objects downloaded to %s" % (len_results, dest)
+ else:
+ EXIT_DICT['msg'] = "Error: only %s of %s objects were " \
+ "downloaded" % (len_results, len_objs)
+ module.exit_json(**EXIT_DICT)
+
+
+def delete(module, cf, container, src, dest):
+ """ Delete specific objects by proving a single file name or a
+ comma-separated list to src OR dest (but not both). Omitting file name(s)
+ assumes the entire container is to be deleted.
+ """
+ objs = None
+ if src and dest:
+ module.fail_json(msg="Error: ambiguous instructions; files to be deleted "
+ "have been specified on both src and dest args")
+ elif dest:
+ objs = dest
+ else:
+ objs = src
+
+ c = _get_container(module, cf, container)
+
+ if objs:
+ objs = objs.split(',')
+ objs = map(str.strip, objs)
+ else:
+ objs = c.get_object_names()
+
+ num_objs = len(objs)
+
+ results = []
+ for obj in objs:
+ try:
+ result = c.delete_object(obj)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ results.append(result)
+
+ num_deleted = results.count(True)
+
+ EXIT_DICT['container'] = c.name
+ EXIT_DICT['deleted'] = num_deleted
+ EXIT_DICT['requested_deleted'] = objs
+
+ if num_deleted:
+ EXIT_DICT['changed'] = True
+
+ if num_objs == num_deleted:
+ EXIT_DICT['success'] = True
+ EXIT_DICT['msg'] = "%s objects deleted" % num_deleted
+ else:
+ EXIT_DICT['msg'] = ("Error: only %s of %s objects "
+ "deleted" % (num_deleted, num_objs))
+ module.exit_json(**EXIT_DICT)
+
+
+def get_meta(module, cf, container, src, dest):
+ """ Get metadata for a single file, comma-separated list, or entire
+ container
+ """
+ c = _get_container(module, cf, container)
+
+ objs = None
+ if src and dest:
+ module.fail_json(msg="Error: ambiguous instructions; files to be deleted "
+ "have been specified on both src and dest args")
+ elif dest:
+ objs = dest
+ else:
+ objs = src
+
+ if objs:
+ objs = objs.split(',')
+ objs = map(str.strip, objs)
+ else:
+ objs = c.get_object_names()
+
+ results = dict()
+ for obj in objs:
+ try:
+ meta = c.get_object(obj).get_metadata()
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ results[obj] = dict()
+ for k, v in meta.items():
+ meta_key = k.split(META_PREFIX)[-1]
+ results[obj][meta_key] = v
+
+ EXIT_DICT['container'] = c.name
+ if results:
+ EXIT_DICT['meta_results'] = results
+ EXIT_DICT['success'] = True
+ module.exit_json(**EXIT_DICT)
+
+
+def put_meta(module, cf, container, src, dest, meta, clear_meta):
+ """ Set metadata on a container, single file, or comma-separated list.
+ Passing a true value to clear_meta clears the metadata stored in Cloud
+ Files before setting the new metadata to the value of "meta".
+ """
+ objs = None
+ if src and dest:
+ module.fail_json(msg="Error: ambiguous instructions; files to set meta"
+ " have been specified on both src and dest args")
+ elif dest:
+ objs = dest
+ else:
+ objs = src
+
+ objs = objs.split(',')
+ objs = map(str.strip, objs)
+
+ c = _get_container(module, cf, container)
+
+ results = []
+ for obj in objs:
+ try:
+ result = c.get_object(obj).set_metadata(meta, clear=clear_meta)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ results.append(result)
+
+ EXIT_DICT['container'] = c.name
+ EXIT_DICT['success'] = True
+ if results:
+ EXIT_DICT['changed'] = True
+ EXIT_DICT['num_changed'] = True
+ module.exit_json(**EXIT_DICT)
+
+
+def delete_meta(module, cf, container, src, dest, meta):
+ """ Removes metadata keys and values specified in meta, if any. Deletes on
+ all objects specified by src or dest (but not both), if any; otherwise it
+ deletes keys on all objects in the container
+ """
+ objs = None
+ if src and dest:
+ module.fail_json(msg="Error: ambiguous instructions; meta keys to be "
+ "deleted have been specified on both src and dest"
+ " args")
+ elif dest:
+ objs = dest
+ else:
+ objs = src
+
+ objs = objs.split(',')
+ objs = map(str.strip, objs)
+
+ c = _get_container(module, cf, container)
+
+ results = [] # Num of metadata keys removed, not objects affected
+ for obj in objs:
+ if meta:
+ for k, v in meta.items():
+ try:
+ result = c.get_object(obj).remove_metadata_key(k)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ results.append(result)
+ else:
+ try:
+ o = c.get_object(obj)
+ except pyrax.exc.NoSuchObject as e:
+ module.fail_json(msg=e.message)
+
+ for k, v in o.get_metadata().items():
+ try:
+ result = o.remove_metadata_key(k)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ results.append(result)
+
+ EXIT_DICT['container'] = c.name
+ EXIT_DICT['success'] = True
+ if results:
+ EXIT_DICT['changed'] = True
+ EXIT_DICT['num_deleted'] = len(results)
+ module.exit_json(**EXIT_DICT)
+
+
+def cloudfiles(module, container, src, dest, method, typ, meta, clear_meta,
+ structure, expires):
+ """ Dispatch from here to work with metadata or file objects """
+ cf = pyrax.cloudfiles
+
+ if cf is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if typ == "file":
+ if method == 'put':
+ upload(module, cf, container, src, dest, meta, expires)
+
+ elif method == 'get':
+ download(module, cf, container, src, dest, structure)
+
+ elif method == 'delete':
+ delete(module, cf, container, src, dest)
+
+ else:
+ if method == 'get':
+ get_meta(module, cf, container, src, dest)
+
+ if method == 'put':
+ put_meta(module, cf, container, src, dest, meta, clear_meta)
+
+ if method == 'delete':
+ delete_meta(module, cf, container, src, dest, meta)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ container=dict(required=True),
+ src=dict(),
+ dest=dict(),
+ method=dict(default='get', choices=['put', 'get', 'delete']),
+ type=dict(default='file', choices=['file', 'meta']),
+ meta=dict(type='dict', default=dict()),
+ clear_meta=dict(default=False, type='bool'),
+ structure=dict(default=True, type='bool'),
+ expires=dict(type='int'),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ container = module.params.get('container')
+ src = module.params.get('src')
+ dest = module.params.get('dest')
+ method = module.params.get('method')
+ typ = module.params.get('type')
+ meta = module.params.get('meta')
+ clear_meta = module.params.get('clear_meta')
+ structure = module.params.get('structure')
+ expires = module.params.get('expires')
+
+ if clear_meta and not typ == 'meta':
+ module.fail_json(msg='clear_meta can only be used when setting metadata')
+
+ setup_rax_module(module, pyrax)
+ cloudfiles(module, container, src, dest, method, typ, meta, clear_meta, structure, expires)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_identity.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_identity.py
new file mode 100644
index 00000000..330c510d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_identity.py
@@ -0,0 +1,102 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_identity
+short_description: Load Rackspace Cloud Identity
+description:
+ - Verifies Rackspace Cloud credentials and returns identity information
+options:
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices: ['present']
+ default: present
+ required: false
+author:
+ - "Christopher H. Laco (@claco)"
+ - "Matt Martz (@sivel)"
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Load Rackspace Cloud Identity
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Load Identity
+ local_action:
+ module: rax_identity
+ credentials: ~/.raxpub
+ region: DFW
+ register: rackspace_identity
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, rax_required_together, rax_to_dict,
+ setup_rax_module)
+
+
+def cloud_identity(module, state, identity):
+ instance = dict(
+ authenticated=identity.authenticated,
+ credentials=identity._creds_file
+ )
+ changed = False
+
+ instance.update(rax_to_dict(identity))
+ instance['services'] = instance.get('services', {}).keys()
+
+ if state == 'present':
+ if not identity.authenticated:
+ module.fail_json(msg='Credentials could not be verified!')
+
+ module.exit_json(changed=changed, identity=instance)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present'])
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ state = module.params.get('state')
+
+ setup_rax_module(module, pyrax)
+
+ if not pyrax.identity:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ cloud_identity(module, state, pyrax.identity)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_keypair.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_keypair.py
new file mode 100644
index 00000000..0314883f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_keypair.py
@@ -0,0 +1,171 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_keypair
+short_description: Create a keypair for use with Rackspace Cloud Servers
+description:
+ - Create a keypair for use with Rackspace Cloud Servers
+options:
+ name:
+ type: str
+ description:
+ - Name of keypair
+ required: true
+ public_key:
+ type: str
+ description:
+ - Public Key string to upload. Can be a file path or string
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+author: "Matt Martz (@sivel)"
+notes:
+ - Keypairs cannot be manipulated, only created and deleted. To "update" a
+ keypair you must first delete and then recreate.
+ - The ability to specify a file path for the public key was added in 1.7
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Create a keypair
+ hosts: localhost
+ gather_facts: False
+ tasks:
+ - name: Keypair request
+ local_action:
+ module: rax_keypair
+ credentials: ~/.raxpub
+ name: my_keypair
+ region: DFW
+ register: keypair
+ - name: Create local public key
+ local_action:
+ module: copy
+ content: "{{ keypair.keypair.public_key }}"
+ dest: "{{ inventory_dir }}/{{ keypair.keypair.name }}.pub"
+ - name: Create local private key
+ local_action:
+ module: copy
+ content: "{{ keypair.keypair.private_key }}"
+ dest: "{{ inventory_dir }}/{{ keypair.keypair.name }}"
+
+- name: Create a keypair
+ hosts: localhost
+ gather_facts: False
+ tasks:
+ - name: Keypair request
+ local_action:
+ module: rax_keypair
+ credentials: ~/.raxpub
+ name: my_keypair
+ public_key: "{{ lookup('file', 'authorized_keys/id_rsa.pub') }}"
+ region: DFW
+ register: keypair
+'''
+import os
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec,
+ rax_required_together,
+ rax_to_dict,
+ setup_rax_module,
+ )
+
+
+def rax_keypair(module, name, public_key, state):
+ changed = False
+
+ cs = pyrax.cloudservers
+
+ if cs is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ keypair = {}
+
+ if state == 'present':
+ if public_key and os.path.isfile(public_key):
+ try:
+ f = open(public_key)
+ public_key = f.read()
+ f.close()
+ except Exception as e:
+ module.fail_json(msg='Failed to load %s' % public_key)
+
+ try:
+ keypair = cs.keypairs.find(name=name)
+ except cs.exceptions.NotFound:
+ try:
+ keypair = cs.keypairs.create(name, public_key)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ elif state == 'absent':
+ try:
+ keypair = cs.keypairs.find(name=name)
+ except Exception:
+ pass
+
+ if keypair:
+ try:
+ keypair.delete()
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, keypair=rax_to_dict(keypair))
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(required=True),
+ public_key=dict(),
+ state=dict(default='present', choices=['absent', 'present']),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ name = module.params.get('name')
+ public_key = module.params.get('public_key')
+ state = module.params.get('state')
+
+ setup_rax_module(module, pyrax)
+
+ rax_keypair(module, name, public_key, state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_meta.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_meta.py
new file mode 100644
index 00000000..b7d172d9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_meta.py
@@ -0,0 +1,173 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_meta
+short_description: Manipulate metadata for Rackspace Cloud Servers
+description:
+ - Manipulate metadata for Rackspace Cloud Servers
+options:
+ address:
+ type: str
+ description:
+ - Server IP address to modify metadata for, will match any IP assigned to
+ the server
+ id:
+ type: str
+ description:
+ - Server ID to modify metadata for
+ name:
+ type: str
+ description:
+ - Server name to modify metadata for
+ meta:
+ type: dict
+ description:
+ - A hash of metadata to associate with the instance
+author: "Matt Martz (@sivel)"
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Set metadata for a server
+ hosts: all
+ gather_facts: False
+ tasks:
+ - name: Set metadata
+ local_action:
+ module: rax_meta
+ credentials: ~/.raxpub
+ name: "{{ inventory_hostname }}"
+ region: DFW
+ meta:
+ group: primary_group
+ groups:
+ - group_two
+ - group_three
+ app: my_app
+
+ - name: Clear metadata
+ local_action:
+ module: rax_meta
+ credentials: ~/.raxpub
+ name: "{{ inventory_hostname }}"
+ region: DFW
+'''
+
+import json
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+from ansible.module_utils.six import string_types
+
+
+def rax_meta(module, address, name, server_id, meta):
+ changed = False
+
+ cs = pyrax.cloudservers
+
+ if cs is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ search_opts = {}
+ if name:
+ search_opts = dict(name='^%s$' % name)
+ try:
+ servers = cs.servers.list(search_opts=search_opts)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ elif address:
+ servers = []
+ try:
+ for server in cs.servers.list():
+ for addresses in server.networks.values():
+ if address in addresses:
+ servers.append(server)
+ break
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ elif server_id:
+ servers = []
+ try:
+ servers.append(cs.servers.get(server_id))
+ except Exception as e:
+ pass
+
+ if len(servers) > 1:
+ module.fail_json(msg='Multiple servers found matching provided '
+ 'search parameters')
+ elif not servers:
+ module.fail_json(msg='Failed to find a server matching provided '
+ 'search parameters')
+
+ # Normalize and ensure all metadata values are strings
+ for k, v in meta.items():
+ if isinstance(v, list):
+ meta[k] = ','.join(['%s' % i for i in v])
+ elif isinstance(v, dict):
+ meta[k] = json.dumps(v)
+ elif not isinstance(v, string_types):
+ meta[k] = '%s' % v
+
+ server = servers[0]
+ if server.metadata == meta:
+ changed = False
+ else:
+ changed = True
+ removed = set(server.metadata.keys()).difference(meta.keys())
+ cs.servers.delete_meta(server, list(removed))
+ cs.servers.set_meta(server, meta)
+ server.get()
+
+ module.exit_json(changed=changed, meta=server.metadata)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ address=dict(),
+ id=dict(),
+ name=dict(),
+ meta=dict(type='dict', default=dict()),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ mutually_exclusive=[['address', 'id', 'name']],
+ required_one_of=[['address', 'id', 'name']],
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ address = module.params.get('address')
+ server_id = module.params.get('id')
+ name = module.params.get('name')
+ meta = module.params.get('meta')
+
+ setup_rax_module(module, pyrax)
+
+ rax_meta(module, address, name, server_id, meta)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_mon_alarm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_mon_alarm.py
new file mode 100644
index 00000000..8de26609
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_mon_alarm.py
@@ -0,0 +1,227 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_mon_alarm
+short_description: Create or delete a Rackspace Cloud Monitoring alarm.
+description:
+- Create or delete a Rackspace Cloud Monitoring alarm that associates an
+ existing rax_mon_entity, rax_mon_check, and rax_mon_notification_plan with
+ criteria that specify what conditions will trigger which levels of
+ notifications. Rackspace monitoring module flow | rax_mon_entity ->
+ rax_mon_check -> rax_mon_notification -> rax_mon_notification_plan ->
+ *rax_mon_alarm*
+options:
+ state:
+ type: str
+ description:
+ - Ensure that the alarm with this C(label) exists or does not exist.
+ choices: [ "present", "absent" ]
+ required: false
+ default: present
+ label:
+ type: str
+ description:
+ - Friendly name for this alarm, used to achieve idempotence. Must be a String
+ between 1 and 255 characters long.
+ required: true
+ entity_id:
+ type: str
+ description:
+ - ID of the entity this alarm is attached to. May be acquired by registering
+ the value of a rax_mon_entity task.
+ required: true
+ check_id:
+ type: str
+ description:
+ - ID of the check that should be alerted on. May be acquired by registering
+ the value of a rax_mon_check task.
+ required: true
+ notification_plan_id:
+ type: str
+ description:
+ - ID of the notification plan to trigger if this alarm fires. May be acquired
+ by registering the value of a rax_mon_notification_plan task.
+ required: true
+ criteria:
+ type: str
+ description:
+ - Alarm DSL that describes alerting conditions and their output states. Must
+ be between 1 and 16384 characters long. See
+ http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/alerts-language.html
+ for a reference on the alerting language.
+ disabled:
+ description:
+ - If yes, create this alarm, but leave it in an inactive state. Defaults to
+ no.
+ type: bool
+ default: false
+ metadata:
+ type: dict
+ description:
+ - Arbitrary key/value pairs to accompany the alarm. Must be a hash of String
+ keys and values between 1 and 255 characters long.
+author: Ash Wilson (@smashwilson)
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Alarm example
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Ensure that a specific alarm exists.
+ community.general.rax_mon_alarm:
+ credentials: ~/.rax_pub
+ state: present
+ label: uhoh
+ entity_id: "{{ the_entity['entity']['id'] }}"
+ check_id: "{{ the_check['check']['id'] }}"
+ notification_plan_id: "{{ defcon1['notification_plan']['id'] }}"
+ criteria: >
+ if (rate(metric['average']) > 10) {
+ return new AlarmStatus(WARNING);
+ }
+ return new AlarmStatus(OK);
+ register: the_alarm
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+
+
+def alarm(module, state, label, entity_id, check_id, notification_plan_id, criteria,
+ disabled, metadata):
+
+ if len(label) < 1 or len(label) > 255:
+ module.fail_json(msg='label must be between 1 and 255 characters long')
+
+ if criteria and len(criteria) < 1 or len(criteria) > 16384:
+ module.fail_json(msg='criteria must be between 1 and 16384 characters long')
+
+ # Coerce attributes.
+
+ changed = False
+ alarm = None
+
+ cm = pyrax.cloud_monitoring
+ if not cm:
+ module.fail_json(msg='Failed to instantiate client. This typically '
+ 'indicates an invalid region or an incorrectly '
+ 'capitalized region name.')
+
+ existing = [a for a in cm.list_alarms(entity_id) if a.label == label]
+
+ if existing:
+ alarm = existing[0]
+
+ if state == 'present':
+ should_create = False
+ should_update = False
+ should_delete = False
+
+ if len(existing) > 1:
+ module.fail_json(msg='%s existing alarms have the label %s.' %
+ (len(existing), label))
+
+ if alarm:
+ if check_id != alarm.check_id or notification_plan_id != alarm.notification_plan_id:
+ should_delete = should_create = True
+
+ should_update = (disabled and disabled != alarm.disabled) or \
+ (metadata and metadata != alarm.metadata) or \
+ (criteria and criteria != alarm.criteria)
+
+ if should_update and not should_delete:
+ cm.update_alarm(entity=entity_id, alarm=alarm,
+ criteria=criteria, disabled=disabled,
+ label=label, metadata=metadata)
+ changed = True
+
+ if should_delete:
+ alarm.delete()
+ changed = True
+ else:
+ should_create = True
+
+ if should_create:
+ alarm = cm.create_alarm(entity=entity_id, check=check_id,
+ notification_plan=notification_plan_id,
+ criteria=criteria, disabled=disabled, label=label,
+ metadata=metadata)
+ changed = True
+ else:
+ for a in existing:
+ a.delete()
+ changed = True
+
+ if alarm:
+ alarm_dict = {
+ "id": alarm.id,
+ "label": alarm.label,
+ "check_id": alarm.check_id,
+ "notification_plan_id": alarm.notification_plan_id,
+ "criteria": alarm.criteria,
+ "disabled": alarm.disabled,
+ "metadata": alarm.metadata
+ }
+ module.exit_json(changed=changed, alarm=alarm_dict)
+ else:
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ label=dict(required=True),
+ entity_id=dict(required=True),
+ check_id=dict(required=True),
+ notification_plan_id=dict(required=True),
+ criteria=dict(),
+ disabled=dict(type='bool', default=False),
+ metadata=dict(type='dict')
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ state = module.params.get('state')
+ label = module.params.get('label')
+ entity_id = module.params.get('entity_id')
+ check_id = module.params.get('check_id')
+ notification_plan_id = module.params.get('notification_plan_id')
+ criteria = module.params.get('criteria')
+ disabled = module.boolean(module.params.get('disabled'))
+ metadata = module.params.get('metadata')
+
+ setup_rax_module(module, pyrax)
+
+ alarm(module, state, label, entity_id, check_id, notification_plan_id,
+ criteria, disabled, metadata)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_mon_check.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_mon_check.py
new file mode 100644
index 00000000..e04dfc74
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_mon_check.py
@@ -0,0 +1,319 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_mon_check
+short_description: Create or delete a Rackspace Cloud Monitoring check for an
+ existing entity.
+description:
+- Create or delete a Rackspace Cloud Monitoring check associated with an
+ existing rax_mon_entity. A check is a specific test or measurement that is
+ performed, possibly from different monitoring zones, on the systems you
+ monitor. Rackspace monitoring module flow | rax_mon_entity ->
+ *rax_mon_check* -> rax_mon_notification -> rax_mon_notification_plan ->
+ rax_mon_alarm
+options:
+ state:
+ type: str
+ description:
+ - Ensure that a check with this C(label) exists or does not exist.
+ choices: ["present", "absent"]
+ default: present
+ entity_id:
+ type: str
+ description:
+ - ID of the rax_mon_entity to target with this check.
+ required: true
+ label:
+ type: str
+ description:
+ - Defines a label for this check, between 1 and 64 characters long.
+ required: true
+ check_type:
+ type: str
+ description:
+ - The type of check to create. C(remote.) checks may be created on any
+ rax_mon_entity. C(agent.) checks may only be created on rax_mon_entities
+ that have a non-null C(agent_id).
+ - |
+ Choices for this option are:
+ - C(remote.dns)
+ - C(remote.ftp-banner)
+ - C(remote.http)
+ - C(remote.imap-banner)
+ - C(remote.mssql-banner)
+ - C(remote.mysql-banner)
+ - C(remote.ping)
+ - C(remote.pop3-banner)
+ - C(remote.postgresql-banner)
+ - C(remote.smtp-banner)
+ - C(remote.smtp)
+ - C(remote.ssh)
+ - C(remote.tcp)
+ - C(remote.telnet-banner)
+ - C(agent.filesystem)
+ - C(agent.memory)
+ - C(agent.load_average)
+ - C(agent.cpu)
+ - C(agent.disk)
+ - C(agent.network)
+ - C(agent.plugin)
+ required: true
+ monitoring_zones_poll:
+ type: str
+ description:
+ - Comma-separated list of the names of the monitoring zones the check should
+ run from. Available monitoring zones include mzdfw, mzhkg, mziad, mzlon,
+ mzord and mzsyd. Required for remote.* checks; prohibited for agent.* checks.
+ target_hostname:
+ type: str
+ description:
+ - One of `target_hostname` and `target_alias` is required for remote.* checks,
+ but prohibited for agent.* checks. The hostname this check should target.
+ Must be a valid IPv4, IPv6, or FQDN.
+ target_alias:
+ type: str
+ description:
+ - One of `target_alias` and `target_hostname` is required for remote.* checks,
+ but prohibited for agent.* checks. Use the corresponding key in the entity's
+ `ip_addresses` hash to resolve an IP address to target.
+ details:
+ type: dict
+ description:
+ - Additional details specific to the check type. Must be a hash of strings
+ between 1 and 255 characters long, or an array or object containing 0 to
+ 256 items.
+ disabled:
+ description:
+ - If "yes", ensure the check is created, but don't actually use it yet.
+ type: bool
+ default: false
+ metadata:
+ type: dict
+ description:
+ - Hash of arbitrary key-value pairs to accompany this check if it fires.
+ Keys and values must be strings between 1 and 255 characters long.
+ period:
+ type: int
+ description:
+ - The number of seconds between each time the check is performed. Must be
+ greater than the minimum period set on your account.
+ timeout:
+ type: int
+ description:
+ - The number of seconds this check will wait when attempting to collect
+ results. Must be less than the period.
+author: Ash Wilson (@smashwilson)
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Create a monitoring check
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Associate a check with an existing entity.
+ community.general.rax_mon_check:
+ credentials: ~/.rax_pub
+ state: present
+ entity_id: "{{ the_entity['entity']['id'] }}"
+ label: the_check
+ check_type: remote.ping
+ monitoring_zones_poll: mziad,mzord,mzdfw
+ details:
+ count: 10
+ meta:
+ hurf: durf
+ register: the_check
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+
+
+def cloud_check(module, state, entity_id, label, check_type,
+ monitoring_zones_poll, target_hostname, target_alias, details,
+ disabled, metadata, period, timeout):
+
+ # Coerce attributes.
+
+ if monitoring_zones_poll and not isinstance(monitoring_zones_poll, list):
+ monitoring_zones_poll = [monitoring_zones_poll]
+
+ if period:
+ period = int(period)
+
+ if timeout:
+ timeout = int(timeout)
+
+ changed = False
+ check = None
+
+ cm = pyrax.cloud_monitoring
+ if not cm:
+ module.fail_json(msg='Failed to instantiate client. This typically '
+ 'indicates an invalid region or an incorrectly '
+ 'capitalized region name.')
+
+ entity = cm.get_entity(entity_id)
+ if not entity:
+ module.fail_json(msg='Failed to instantiate entity. "%s" may not be'
+ ' a valid entity id.' % entity_id)
+
+ existing = [e for e in entity.list_checks() if e.label == label]
+
+ if existing:
+ check = existing[0]
+
+ if state == 'present':
+ if len(existing) > 1:
+ module.fail_json(msg='%s existing checks have a label of %s.' %
+ (len(existing), label))
+
+ should_delete = False
+ should_create = False
+ should_update = False
+
+ if check:
+ # Details may include keys set to default values that are not
+ # included in the initial creation.
+ #
+ # Only force a recreation of the check if one of the *specified*
+ # keys is missing or has a different value.
+ if details:
+ for (key, value) in details.items():
+ if key not in check.details:
+ should_delete = should_create = True
+ elif value != check.details[key]:
+ should_delete = should_create = True
+
+ should_update = label != check.label or \
+ (target_hostname and target_hostname != check.target_hostname) or \
+ (target_alias and target_alias != check.target_alias) or \
+ (disabled != check.disabled) or \
+ (metadata and metadata != check.metadata) or \
+ (period and period != check.period) or \
+ (timeout and timeout != check.timeout) or \
+ (monitoring_zones_poll and monitoring_zones_poll != check.monitoring_zones_poll)
+
+ if should_update and not should_delete:
+ check.update(label=label,
+ disabled=disabled,
+ metadata=metadata,
+ monitoring_zones_poll=monitoring_zones_poll,
+ timeout=timeout,
+ period=period,
+ target_alias=target_alias,
+ target_hostname=target_hostname)
+ changed = True
+ else:
+ # The check doesn't exist yet.
+ should_create = True
+
+ if should_delete:
+ check.delete()
+
+ if should_create:
+ check = cm.create_check(entity,
+ label=label,
+ check_type=check_type,
+ target_hostname=target_hostname,
+ target_alias=target_alias,
+ monitoring_zones_poll=monitoring_zones_poll,
+ details=details,
+ disabled=disabled,
+ metadata=metadata,
+ period=period,
+ timeout=timeout)
+ changed = True
+ elif state == 'absent':
+ if check:
+ check.delete()
+ changed = True
+ else:
+ module.fail_json(msg='state must be either present or absent.')
+
+ if check:
+ check_dict = {
+ "id": check.id,
+ "label": check.label,
+ "type": check.type,
+ "target_hostname": check.target_hostname,
+ "target_alias": check.target_alias,
+ "monitoring_zones_poll": check.monitoring_zones_poll,
+ "details": check.details,
+ "disabled": check.disabled,
+ "metadata": check.metadata,
+ "period": check.period,
+ "timeout": check.timeout
+ }
+ module.exit_json(changed=changed, check=check_dict)
+ else:
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ entity_id=dict(required=True),
+ label=dict(required=True),
+ check_type=dict(required=True),
+ monitoring_zones_poll=dict(),
+ target_hostname=dict(),
+ target_alias=dict(),
+ details=dict(type='dict', default={}),
+ disabled=dict(type='bool', default=False),
+ metadata=dict(type='dict', default={}),
+ period=dict(type='int'),
+ timeout=dict(type='int'),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ entity_id = module.params.get('entity_id')
+ label = module.params.get('label')
+ check_type = module.params.get('check_type')
+ monitoring_zones_poll = module.params.get('monitoring_zones_poll')
+ target_hostname = module.params.get('target_hostname')
+ target_alias = module.params.get('target_alias')
+ details = module.params.get('details')
+ disabled = module.boolean(module.params.get('disabled'))
+ metadata = module.params.get('metadata')
+ period = module.params.get('period')
+ timeout = module.params.get('timeout')
+
+ state = module.params.get('state')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_check(module, state, entity_id, label, check_type,
+ monitoring_zones_poll, target_hostname, target_alias, details,
+ disabled, metadata, period, timeout)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_mon_entity.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_mon_entity.py
new file mode 100644
index 00000000..69f49cd0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_mon_entity.py
@@ -0,0 +1,191 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_mon_entity
+short_description: Create or delete a Rackspace Cloud Monitoring entity
+description:
+- Create or delete a Rackspace Cloud Monitoring entity, which represents a device
+ to monitor. Entities associate checks and alarms with a target system and
+ provide a convenient, centralized place to store IP addresses. Rackspace
+ monitoring module flow | *rax_mon_entity* -> rax_mon_check ->
+ rax_mon_notification -> rax_mon_notification_plan -> rax_mon_alarm
+options:
+ label:
+ type: str
+ description:
+ - Defines a name for this entity. Must be a non-empty string between 1 and
+ 255 characters long.
+ required: true
+ state:
+ type: str
+ description:
+ - Ensure that an entity with this C(name) exists or does not exist.
+ choices: ["present", "absent"]
+ default: present
+ agent_id:
+ type: str
+ description:
+ - Rackspace monitoring agent on the target device to which this entity is
+ bound. Necessary to collect C(agent.) rax_mon_checks against this entity.
+ named_ip_addresses:
+ type: dict
+ description:
+ - Hash of IP addresses that may be referenced by name by rax_mon_checks
+ added to this entity. Must be a dictionary of with keys that are names
+ between 1 and 64 characters long, and values that are valid IPv4 or IPv6
+ addresses.
+ metadata:
+ type: dict
+ description:
+ - Hash of arbitrary C(name), C(value) pairs that are passed to associated
+ rax_mon_alarms. Names and values must all be between 1 and 255 characters
+ long.
+author: Ash Wilson (@smashwilson)
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Entity example
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Ensure an entity exists
+ community.general.rax_mon_entity:
+ credentials: ~/.rax_pub
+ state: present
+ label: my_entity
+ named_ip_addresses:
+ web_box: 192.0.2.4
+ db_box: 192.0.2.5
+ meta:
+ hurf: durf
+ register: the_entity
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+
+
+def cloud_monitoring(module, state, label, agent_id, named_ip_addresses,
+ metadata):
+
+ if len(label) < 1 or len(label) > 255:
+ module.fail_json(msg='label must be between 1 and 255 characters long')
+
+ changed = False
+
+ cm = pyrax.cloud_monitoring
+ if not cm:
+ module.fail_json(msg='Failed to instantiate client. This typically '
+ 'indicates an invalid region or an incorrectly '
+ 'capitalized region name.')
+
+ existing = []
+ for entity in cm.list_entities():
+ if label == entity.label:
+ existing.append(entity)
+
+ entity = None
+
+ if existing:
+ entity = existing[0]
+
+ if state == 'present':
+ should_update = False
+ should_delete = False
+ should_create = False
+
+ if len(existing) > 1:
+ module.fail_json(msg='%s existing entities have the label %s.' %
+ (len(existing), label))
+
+ if entity:
+ if named_ip_addresses and named_ip_addresses != entity.ip_addresses:
+ should_delete = should_create = True
+
+ # Change an existing Entity, unless there's nothing to do.
+ should_update = agent_id and agent_id != entity.agent_id or \
+ (metadata and metadata != entity.metadata)
+
+ if should_update and not should_delete:
+ entity.update(agent_id, metadata)
+ changed = True
+
+ if should_delete:
+ entity.delete()
+ else:
+ should_create = True
+
+ if should_create:
+ # Create a new Entity.
+ entity = cm.create_entity(label=label, agent=agent_id,
+ ip_addresses=named_ip_addresses,
+ metadata=metadata)
+ changed = True
+ else:
+ # Delete the existing Entities.
+ for e in existing:
+ e.delete()
+ changed = True
+
+ if entity:
+ entity_dict = {
+ "id": entity.id,
+ "name": entity.name,
+ "agent_id": entity.agent_id,
+ }
+ module.exit_json(changed=changed, entity=entity_dict)
+ else:
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ label=dict(required=True),
+ agent_id=dict(),
+ named_ip_addresses=dict(type='dict', default={}),
+ metadata=dict(type='dict', default={})
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ state = module.params.get('state')
+
+ label = module.params.get('label')
+ agent_id = module.params.get('agent_id')
+ named_ip_addresses = module.params.get('named_ip_addresses')
+ metadata = module.params.get('metadata')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_monitoring(module, state, label, agent_id, named_ip_addresses, metadata)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_mon_notification.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_mon_notification.py
new file mode 100644
index 00000000..416d03ba
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_mon_notification.py
@@ -0,0 +1,174 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_mon_notification
+short_description: Create or delete a Rackspace Cloud Monitoring notification.
+description:
+- Create or delete a Rackspace Cloud Monitoring notification that specifies a
+ channel that can be used to communicate alarms, such as email, webhooks, or
+ PagerDuty. Rackspace monitoring module flow | rax_mon_entity -> rax_mon_check ->
+ *rax_mon_notification* -> rax_mon_notification_plan -> rax_mon_alarm
+options:
+ state:
+ type: str
+ description:
+ - Ensure that the notification with this C(label) exists or does not exist.
+ choices: ['present', 'absent']
+ default: present
+ label:
+ type: str
+ description:
+ - Defines a friendly name for this notification. String between 1 and 255
+ characters long.
+ required: true
+ notification_type:
+ type: str
+ description:
+ - A supported notification type.
+ choices: ["webhook", "email", "pagerduty"]
+ required: true
+ details:
+ type: dict
+ description:
+ - Dictionary of key-value pairs used to initialize the notification.
+ Required keys and meanings vary with notification type. See
+ http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/
+ service-notification-types-crud.html for details.
+ required: true
+author: Ash Wilson (@smashwilson)
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Monitoring notification example
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Email me when something goes wrong.
+ rax_mon_entity:
+ credentials: ~/.rax_pub
+ label: omg
+ type: email
+ details:
+ address: me@mailhost.com
+ register: the_notification
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+
+
+def notification(module, state, label, notification_type, details):
+
+ if len(label) < 1 or len(label) > 255:
+ module.fail_json(msg='label must be between 1 and 255 characters long')
+
+ changed = False
+ notification = None
+
+ cm = pyrax.cloud_monitoring
+ if not cm:
+ module.fail_json(msg='Failed to instantiate client. This typically '
+ 'indicates an invalid region or an incorrectly '
+ 'capitalized region name.')
+
+ existing = []
+ for n in cm.list_notifications():
+ if n.label == label:
+ existing.append(n)
+
+ if existing:
+ notification = existing[0]
+
+ if state == 'present':
+ should_update = False
+ should_delete = False
+ should_create = False
+
+ if len(existing) > 1:
+ module.fail_json(msg='%s existing notifications are labelled %s.' %
+ (len(existing), label))
+
+ if notification:
+ should_delete = (notification_type != notification.type)
+
+ should_update = (details != notification.details)
+
+ if should_update and not should_delete:
+ notification.update(details=notification.details)
+ changed = True
+
+ if should_delete:
+ notification.delete()
+ else:
+ should_create = True
+
+ if should_create:
+ notification = cm.create_notification(notification_type,
+ label=label, details=details)
+ changed = True
+ else:
+ for n in existing:
+ n.delete()
+ changed = True
+
+ if notification:
+ notification_dict = {
+ "id": notification.id,
+ "type": notification.type,
+ "label": notification.label,
+ "details": notification.details
+ }
+ module.exit_json(changed=changed, notification=notification_dict)
+ else:
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ label=dict(required=True),
+ notification_type=dict(required=True, choices=['webhook', 'email', 'pagerduty']),
+ details=dict(required=True, type='dict')
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ state = module.params.get('state')
+
+ label = module.params.get('label')
+ notification_type = module.params.get('notification_type')
+ details = module.params.get('details')
+
+ setup_rax_module(module, pyrax)
+
+ notification(module, state, label, notification_type, details)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_mon_notification_plan.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_mon_notification_plan.py
new file mode 100644
index 00000000..a4b8920d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_mon_notification_plan.py
@@ -0,0 +1,180 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_mon_notification_plan
+short_description: Create or delete a Rackspace Cloud Monitoring notification
+ plan.
+description:
+- Create or delete a Rackspace Cloud Monitoring notification plan by
+ associating existing rax_mon_notifications with severity levels. Rackspace
+ monitoring module flow | rax_mon_entity -> rax_mon_check ->
+ rax_mon_notification -> *rax_mon_notification_plan* -> rax_mon_alarm
+options:
+ state:
+ type: str
+ description:
+ - Ensure that the notification plan with this C(label) exists or does not
+ exist.
+ choices: ['present', 'absent']
+ default: present
+ label:
+ type: str
+ description:
+ - Defines a friendly name for this notification plan. String between 1 and
+ 255 characters long.
+ required: true
+ critical_state:
+ type: list
+ description:
+ - Notification list to use when the alarm state is CRITICAL. Must be an
+ array of valid rax_mon_notification ids.
+ warning_state:
+ type: list
+ description:
+ - Notification list to use when the alarm state is WARNING. Must be an array
+ of valid rax_mon_notification ids.
+ ok_state:
+ type: list
+ description:
+ - Notification list to use when the alarm state is OK. Must be an array of
+ valid rax_mon_notification ids.
+author: Ash Wilson (@smashwilson)
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Example notification plan
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Establish who gets called when.
+ community.general.rax_mon_notification_plan:
+ credentials: ~/.rax_pub
+ state: present
+ label: defcon1
+ critical_state:
+ - "{{ everyone['notification']['id'] }}"
+ warning_state:
+ - "{{ opsfloor['notification']['id'] }}"
+ register: defcon1
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+
+
+def notification_plan(module, state, label, critical_state, warning_state, ok_state):
+
+ if len(label) < 1 or len(label) > 255:
+ module.fail_json(msg='label must be between 1 and 255 characters long')
+
+ changed = False
+ notification_plan = None
+
+ cm = pyrax.cloud_monitoring
+ if not cm:
+ module.fail_json(msg='Failed to instantiate client. This typically '
+ 'indicates an invalid region or an incorrectly '
+ 'capitalized region name.')
+
+ existing = []
+ for n in cm.list_notification_plans():
+ if n.label == label:
+ existing.append(n)
+
+ if existing:
+ notification_plan = existing[0]
+
+ if state == 'present':
+ should_create = False
+ should_delete = False
+
+ if len(existing) > 1:
+ module.fail_json(msg='%s notification plans are labelled %s.' %
+ (len(existing), label))
+
+ if notification_plan:
+ should_delete = (critical_state and critical_state != notification_plan.critical_state) or \
+ (warning_state and warning_state != notification_plan.warning_state) or \
+ (ok_state and ok_state != notification_plan.ok_state)
+
+ if should_delete:
+ notification_plan.delete()
+ should_create = True
+ else:
+ should_create = True
+
+ if should_create:
+ notification_plan = cm.create_notification_plan(label=label,
+ critical_state=critical_state,
+ warning_state=warning_state,
+ ok_state=ok_state)
+ changed = True
+ else:
+ for np in existing:
+ np.delete()
+ changed = True
+
+ if notification_plan:
+ notification_plan_dict = {
+ "id": notification_plan.id,
+ "critical_state": notification_plan.critical_state,
+ "warning_state": notification_plan.warning_state,
+ "ok_state": notification_plan.ok_state,
+ "metadata": notification_plan.metadata
+ }
+ module.exit_json(changed=changed, notification_plan=notification_plan_dict)
+ else:
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ label=dict(required=True),
+ critical_state=dict(type='list'),
+ warning_state=dict(type='list'),
+ ok_state=dict(type='list')
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ state = module.params.get('state')
+
+ label = module.params.get('label')
+ critical_state = module.params.get('critical_state')
+ warning_state = module.params.get('warning_state')
+ ok_state = module.params.get('ok_state')
+
+ setup_rax_module(module, pyrax)
+
+ notification_plan(module, state, label, critical_state, warning_state, ok_state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_network.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_network.py
new file mode 100644
index 00000000..27a793b5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_network.py
@@ -0,0 +1,138 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_network
+short_description: create / delete an isolated network in Rackspace Public Cloud
+description:
+ - creates / deletes a Rackspace Public Cloud isolated network.
+options:
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+ label:
+ type: str
+ description:
+ - Label (name) to give the network
+ required: yes
+ cidr:
+ type: str
+ description:
+ - cidr of the network being created
+author:
+ - "Christopher H. Laco (@claco)"
+ - "Jesse Keating (@omgjlk)"
+extends_documentation_fragment:
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Build an Isolated Network
+ gather_facts: False
+
+ tasks:
+ - name: Network create request
+ local_action:
+ module: rax_network
+ credentials: ~/.raxpub
+ label: my-net
+ cidr: 192.168.3.0/24
+ state: present
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+
+
+def cloud_network(module, state, label, cidr):
+ changed = False
+ network = None
+ networks = []
+
+ if not pyrax.cloud_networks:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if state == 'present':
+ if not cidr:
+ module.fail_json(msg='missing required arguments: cidr')
+
+ try:
+ network = pyrax.cloud_networks.find_network_by_label(label)
+ except pyrax.exceptions.NetworkNotFound:
+ try:
+ network = pyrax.cloud_networks.create(label, cidr=cidr)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ elif state == 'absent':
+ try:
+ network = pyrax.cloud_networks.find_network_by_label(label)
+ network.delete()
+ changed = True
+ except pyrax.exceptions.NetworkNotFound:
+ pass
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ if network:
+ instance = dict(id=network.id,
+ label=network.label,
+ cidr=network.cidr)
+ networks.append(instance)
+
+ module.exit_json(changed=changed, networks=networks)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present',
+ choices=['present', 'absent']),
+ label=dict(required=True),
+ cidr=dict()
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ state = module.params.get('state')
+ label = module.params.get('label')
+ cidr = module.params.get('cidr')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_network(module, state, label, cidr)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_queue.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_queue.py
new file mode 100644
index 00000000..dca006da
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_queue.py
@@ -0,0 +1,139 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_queue
+short_description: create / delete a queue in Rackspace Public Cloud
+description:
+ - creates / deletes a Rackspace Public Cloud queue.
+options:
+ name:
+ type: str
+ description:
+ - Name to give the queue
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+author:
+ - "Christopher H. Laco (@claco)"
+ - "Matt Martz (@sivel)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+- name: Build a Queue
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Queue create request
+ local_action:
+ module: rax_queue
+ credentials: ~/.raxpub
+ name: my-queue
+ region: DFW
+ state: present
+ register: my_queue
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+
+
+def cloud_queue(module, state, name):
+ for arg in (state, name):
+ if not arg:
+ module.fail_json(msg='%s is required for rax_queue' % arg)
+
+ changed = False
+ queues = []
+ instance = {}
+
+ cq = pyrax.queues
+ if not cq:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ for queue in cq.list():
+ if name != queue.name:
+ continue
+
+ queues.append(queue)
+
+ if len(queues) > 1:
+ module.fail_json(msg='Multiple Queues were matched by name')
+
+ if state == 'present':
+ if not queues:
+ try:
+ queue = cq.create(name)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ queue = queues[0]
+
+ instance = dict(name=queue.name)
+ result = dict(changed=changed, queue=instance)
+ module.exit_json(**result)
+
+ elif state == 'absent':
+ if queues:
+ queue = queues[0]
+ try:
+ queue.delete()
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, queue=instance)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ name = module.params.get('name')
+ state = module.params.get('state')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_queue(module, state, name)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_scaling_group.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_scaling_group.py
new file mode 100644
index 00000000..7b2b6ace
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_scaling_group.py
@@ -0,0 +1,438 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_scaling_group
+short_description: Manipulate Rackspace Cloud Autoscale Groups
+description:
+ - Manipulate Rackspace Cloud Autoscale Groups
+options:
+ config_drive:
+ description:
+ - Attach read-only configuration drive to server as label config-2
+ type: bool
+ default: 'no'
+ cooldown:
+ type: int
+ description:
+ - The period of time, in seconds, that must pass before any scaling can
+ occur after the previous scaling. Must be an integer between 0 and
+ 86400 (24 hrs).
+ default: 300
+ disk_config:
+ type: str
+ description:
+ - Disk partitioning strategy
+ - If not specified, it will fallback to C(auto).
+ choices:
+ - auto
+ - manual
+ files:
+ type: dict
+ description:
+ - 'Files to insert into the instance. Hash of C(remotepath: localpath)'
+ flavor:
+ type: str
+ description:
+ - flavor to use for the instance
+ required: true
+ image:
+ type: str
+ description:
+ - image to use for the instance. Can be an C(id), C(human_id) or C(name)
+ required: true
+ key_name:
+ type: str
+ description:
+ - key pair to use on the instance
+ loadbalancers:
+ type: list
+ description:
+ - List of load balancer C(id) and C(port) hashes
+ max_entities:
+ type: int
+ description:
+ - The maximum number of entities that are allowed in the scaling group.
+ Must be an integer between 0 and 1000.
+ required: true
+ meta:
+ type: dict
+ description:
+ - A hash of metadata to associate with the instance
+ min_entities:
+ type: int
+ description:
+ - The minimum number of entities that are allowed in the scaling group.
+ Must be an integer between 0 and 1000.
+ required: true
+ name:
+ type: str
+ description:
+ - Name to give the scaling group
+ required: true
+ networks:
+ type: list
+ description:
+ - The network to attach to the instances. If specified, you must include
+ ALL networks including the public and private interfaces. Can be C(id)
+ or C(label).
+ default:
+ - public
+ - private
+ server_name:
+ type: str
+ description:
+ - The base name for servers created by Autoscale
+ required: true
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+ user_data:
+ type: str
+ description:
+ - Data to be uploaded to the servers config drive. This option implies
+ I(config_drive). Can be a file path or a string
+ wait:
+ description:
+ - wait for the scaling group to finish provisioning the minimum amount of
+ servers
+ type: bool
+ default: 'no'
+ wait_timeout:
+ type: int
+ description:
+ - how long before wait gives up, in seconds
+ default: 300
+author: "Matt Martz (@sivel)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+---
+- hosts: localhost
+ gather_facts: false
+ connection: local
+ tasks:
+ - community.general.rax_scaling_group:
+ credentials: ~/.raxpub
+ region: ORD
+ cooldown: 300
+ flavor: performance1-1
+ image: bb02b1a3-bc77-4d17-ab5b-421d89850fca
+ min_entities: 5
+ max_entities: 10
+ name: ASG Test
+ server_name: asgtest
+ loadbalancers:
+ - id: 228385
+ port: 80
+ register: asg
+'''
+
+import base64
+import json
+import os
+import time
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, rax_find_image, rax_find_network,
+ rax_required_together, rax_to_dict, setup_rax_module)
+from ansible.module_utils.six import string_types
+
+
+def rax_asg(module, cooldown=300, disk_config=None, files=None, flavor=None,
+ image=None, key_name=None, loadbalancers=None, meta=None,
+ min_entities=0, max_entities=0, name=None, networks=None,
+ server_name=None, state='present', user_data=None,
+ config_drive=False, wait=True, wait_timeout=300):
+ files = {} if files is None else files
+ loadbalancers = [] if loadbalancers is None else loadbalancers
+ meta = {} if meta is None else meta
+ networks = [] if networks is None else networks
+
+ changed = False
+
+ au = pyrax.autoscale
+ if not au:
+ module.fail_json(msg='Failed to instantiate clients. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if user_data:
+ config_drive = True
+
+ if user_data and os.path.isfile(user_data):
+ try:
+ f = open(user_data)
+ user_data = f.read()
+ f.close()
+ except Exception as e:
+ module.fail_json(msg='Failed to load %s' % user_data)
+
+ if state == 'present':
+ # Normalize and ensure all metadata values are strings
+ if meta:
+ for k, v in meta.items():
+ if isinstance(v, list):
+ meta[k] = ','.join(['%s' % i for i in v])
+ elif isinstance(v, dict):
+ meta[k] = json.dumps(v)
+ elif not isinstance(v, string_types):
+ meta[k] = '%s' % v
+
+ if image:
+ image = rax_find_image(module, pyrax, image)
+
+ nics = []
+ if networks:
+ for network in networks:
+ nics.extend(rax_find_network(module, pyrax, network))
+
+ for nic in nics:
+ # pyrax is currently returning net-id, but we need uuid
+ # this check makes this forward compatible for a time when
+ # pyrax uses uuid instead
+ if nic.get('net-id'):
+ nic.update(uuid=nic['net-id'])
+ del nic['net-id']
+
+ # Handle the file contents
+ personality = []
+ if files:
+ for rpath in files.keys():
+ lpath = os.path.expanduser(files[rpath])
+ try:
+ f = open(lpath, 'r')
+ personality.append({
+ 'path': rpath,
+ 'contents': f.read()
+ })
+ f.close()
+ except Exception as e:
+ module.fail_json(msg='Failed to load %s' % lpath)
+
+ lbs = []
+ if loadbalancers:
+ for lb in loadbalancers:
+ try:
+ lb_id = int(lb.get('id'))
+ except (ValueError, TypeError):
+ module.fail_json(msg='Load balancer ID is not an integer: '
+ '%s' % lb.get('id'))
+ try:
+ port = int(lb.get('port'))
+ except (ValueError, TypeError):
+ module.fail_json(msg='Load balancer port is not an '
+ 'integer: %s' % lb.get('port'))
+ if not lb_id or not port:
+ continue
+ lbs.append((lb_id, port))
+
+ try:
+ sg = au.find(name=name)
+ except pyrax.exceptions.NoUniqueMatch as e:
+ module.fail_json(msg='%s' % e.message)
+ except pyrax.exceptions.NotFound:
+ try:
+ sg = au.create(name, cooldown=cooldown,
+ min_entities=min_entities,
+ max_entities=max_entities,
+ launch_config_type='launch_server',
+ server_name=server_name, image=image,
+ flavor=flavor, disk_config=disk_config,
+ metadata=meta, personality=personality,
+ networks=nics, load_balancers=lbs,
+ key_name=key_name, config_drive=config_drive,
+ user_data=user_data)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ if not changed:
+ # Scaling Group Updates
+ group_args = {}
+ if cooldown != sg.cooldown:
+ group_args['cooldown'] = cooldown
+
+ if min_entities != sg.min_entities:
+ group_args['min_entities'] = min_entities
+
+ if max_entities != sg.max_entities:
+ group_args['max_entities'] = max_entities
+
+ if group_args:
+ changed = True
+ sg.update(**group_args)
+
+ # Launch Configuration Updates
+ lc = sg.get_launch_config()
+ lc_args = {}
+ if server_name != lc.get('name'):
+ lc_args['server_name'] = server_name
+
+ if image != lc.get('image'):
+ lc_args['image'] = image
+
+ if flavor != lc.get('flavor'):
+ lc_args['flavor'] = flavor
+
+ disk_config = disk_config or 'AUTO'
+ if ((disk_config or lc.get('disk_config')) and
+ disk_config != lc.get('disk_config', 'AUTO')):
+ lc_args['disk_config'] = disk_config
+
+ if (meta or lc.get('meta')) and meta != lc.get('metadata'):
+ lc_args['metadata'] = meta
+
+ test_personality = []
+ for p in personality:
+ test_personality.append({
+ 'path': p['path'],
+ 'contents': base64.b64encode(p['contents'])
+ })
+ if ((test_personality or lc.get('personality')) and
+ test_personality != lc.get('personality')):
+ lc_args['personality'] = personality
+
+ if nics != lc.get('networks'):
+ lc_args['networks'] = nics
+
+ if lbs != lc.get('load_balancers'):
+ # Work around for https://github.com/rackspace/pyrax/pull/393
+ lc_args['load_balancers'] = sg.manager._resolve_lbs(lbs)
+
+ if key_name != lc.get('key_name'):
+ lc_args['key_name'] = key_name
+
+ if config_drive != lc.get('config_drive', False):
+ lc_args['config_drive'] = config_drive
+
+ if (user_data and
+ base64.b64encode(user_data) != lc.get('user_data')):
+ lc_args['user_data'] = user_data
+
+ if lc_args:
+ # Work around for https://github.com/rackspace/pyrax/pull/389
+ if 'flavor' not in lc_args:
+ lc_args['flavor'] = lc.get('flavor')
+ changed = True
+ sg.update_launch_config(**lc_args)
+
+ sg.get()
+
+ if wait:
+ end_time = time.time() + wait_timeout
+ infinite = wait_timeout == 0
+ while infinite or time.time() < end_time:
+ state = sg.get_state()
+ if state["pending_capacity"] == 0:
+ break
+
+ time.sleep(5)
+
+ module.exit_json(changed=changed, autoscale_group=rax_to_dict(sg))
+
+ else:
+ try:
+ sg = au.find(name=name)
+ sg.delete()
+ changed = True
+ except pyrax.exceptions.NotFound as e:
+ sg = {}
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, autoscale_group=rax_to_dict(sg))
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ config_drive=dict(default=False, type='bool'),
+ cooldown=dict(type='int', default=300),
+ disk_config=dict(choices=['auto', 'manual']),
+ files=dict(type='dict', default={}),
+ flavor=dict(required=True),
+ image=dict(required=True),
+ key_name=dict(),
+ loadbalancers=dict(type='list'),
+ meta=dict(type='dict', default={}),
+ min_entities=dict(type='int', required=True),
+ max_entities=dict(type='int', required=True),
+ name=dict(required=True),
+ networks=dict(type='list', default=['public', 'private']),
+ server_name=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ user_data=dict(no_log=True),
+ wait=dict(default=False, type='bool'),
+ wait_timeout=dict(default=300, type='int'),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ config_drive = module.params.get('config_drive')
+ cooldown = module.params.get('cooldown')
+ disk_config = module.params.get('disk_config')
+ if disk_config:
+ disk_config = disk_config.upper()
+ files = module.params.get('files')
+ flavor = module.params.get('flavor')
+ image = module.params.get('image')
+ key_name = module.params.get('key_name')
+ loadbalancers = module.params.get('loadbalancers')
+ meta = module.params.get('meta')
+ min_entities = module.params.get('min_entities')
+ max_entities = module.params.get('max_entities')
+ name = module.params.get('name')
+ networks = module.params.get('networks')
+ server_name = module.params.get('server_name')
+ state = module.params.get('state')
+ user_data = module.params.get('user_data')
+
+ if not 0 <= min_entities <= 1000 or not 0 <= max_entities <= 1000:
+ module.fail_json(msg='min_entities and max_entities must be an '
+ 'integer between 0 and 1000')
+
+ if not 0 <= cooldown <= 86400:
+ module.fail_json(msg='cooldown must be an integer between 0 and 86400')
+
+ setup_rax_module(module, pyrax)
+
+ rax_asg(module, cooldown=cooldown, disk_config=disk_config,
+ files=files, flavor=flavor, image=image, meta=meta,
+ key_name=key_name, loadbalancers=loadbalancers,
+ min_entities=min_entities, max_entities=max_entities,
+ name=name, networks=networks, server_name=server_name,
+ state=state, config_drive=config_drive, user_data=user_data)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_scaling_policy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_scaling_policy.py
new file mode 100644
index 00000000..384825f0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rax_scaling_policy.py
@@ -0,0 +1,286 @@
+#!/usr/bin/python
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_scaling_policy
+short_description: Manipulate Rackspace Cloud Autoscale Scaling Policy
+description:
+ - Manipulate Rackspace Cloud Autoscale Scaling Policy
+options:
+ at:
+ type: str
+ description:
+ - The UTC time when this policy will be executed. The time must be
+ formatted according to C(yyyy-MM-dd'T'HH:mm:ss.SSS) such as
+ C(2013-05-19T08:07:08Z)
+ change:
+ type: int
+ description:
+ - The change, either as a number of servers or as a percentage, to make
+ in the scaling group. If this is a percentage, you must set
+ I(is_percent) to C(true) also.
+ cron:
+ type: str
+ description:
+ - The time when the policy will be executed, as a cron entry. For
+ example, if this is parameter is set to C(1 0 * * *)
+ cooldown:
+ type: int
+ description:
+ - The period of time, in seconds, that must pass before any scaling can
+ occur after the previous scaling. Must be an integer between 0 and
+ 86400 (24 hrs).
+ default: 300
+ desired_capacity:
+ type: int
+ description:
+ - The desired server capacity of the scaling the group; that is, how
+ many servers should be in the scaling group.
+ is_percent:
+ description:
+ - Whether the value in I(change) is a percent value
+ default: false
+ type: bool
+ name:
+ type: str
+ description:
+ - Name to give the policy
+ required: true
+ policy_type:
+ type: str
+ description:
+ - The type of policy that will be executed for the current release.
+ choices:
+ - webhook
+ - schedule
+ required: true
+ scaling_group:
+ type: str
+ description:
+ - Name of the scaling group that this policy will be added to
+ required: true
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+author: "Matt Martz (@sivel)"
+extends_documentation_fragment:
+- community.general.rackspace
+- community.general.rackspace.openstack
+
+'''
+
+EXAMPLES = '''
+---
+- hosts: localhost
+ gather_facts: false
+ connection: local
+ tasks:
+ - community.general.rax_scaling_policy:
+ credentials: ~/.raxpub
+ region: ORD
+ at: '2013-05-19T08:07:08Z'
+ change: 25
+ cooldown: 300
+ is_percent: true
+ name: ASG Test Policy - at
+ policy_type: schedule
+ scaling_group: ASG Test
+ register: asps_at
+
+ - community.general.rax_scaling_policy:
+ credentials: ~/.raxpub
+ region: ORD
+ cron: '1 0 * * *'
+ change: 25
+ cooldown: 300
+ is_percent: true
+ name: ASG Test Policy - cron
+ policy_type: schedule
+ scaling_group: ASG Test
+ register: asp_cron
+
+ - community.general.rax_scaling_policy:
+ credentials: ~/.raxpub
+ region: ORD
+ cooldown: 300
+ desired_capacity: 5
+ name: ASG Test Policy - webhook
+ policy_type: webhook
+ scaling_group: ASG Test
+ register: asp_webhook
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (UUID, rax_argument_spec, rax_required_together, rax_to_dict,
+ setup_rax_module)
+
+
+def rax_asp(module, at=None, change=0, cron=None, cooldown=300,
+ desired_capacity=0, is_percent=False, name=None,
+ policy_type=None, scaling_group=None, state='present'):
+ changed = False
+
+ au = pyrax.autoscale
+ if not au:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ try:
+ UUID(scaling_group)
+ except ValueError:
+ try:
+ sg = au.find(name=scaling_group)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ try:
+ sg = au.get(scaling_group)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ if state == 'present':
+ policies = filter(lambda p: name == p.name, sg.list_policies())
+ if len(policies) > 1:
+ module.fail_json(msg='No unique policy match found by name')
+ if at:
+ args = dict(at=at)
+ elif cron:
+ args = dict(cron=cron)
+ else:
+ args = None
+
+ if not policies:
+ try:
+ policy = sg.add_policy(name, policy_type=policy_type,
+ cooldown=cooldown, change=change,
+ is_percent=is_percent,
+ desired_capacity=desired_capacity,
+ args=args)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ else:
+ policy = policies[0]
+ kwargs = {}
+ if policy_type != policy.type:
+ kwargs['policy_type'] = policy_type
+
+ if cooldown != policy.cooldown:
+ kwargs['cooldown'] = cooldown
+
+ if hasattr(policy, 'change') and change != policy.change:
+ kwargs['change'] = change
+
+ if hasattr(policy, 'changePercent') and is_percent is False:
+ kwargs['change'] = change
+ kwargs['is_percent'] = False
+ elif hasattr(policy, 'change') and is_percent is True:
+ kwargs['change'] = change
+ kwargs['is_percent'] = True
+
+ if hasattr(policy, 'desiredCapacity') and change:
+ kwargs['change'] = change
+ elif ((hasattr(policy, 'change') or
+ hasattr(policy, 'changePercent')) and desired_capacity):
+ kwargs['desired_capacity'] = desired_capacity
+
+ if hasattr(policy, 'args') and args != policy.args:
+ kwargs['args'] = args
+
+ if kwargs:
+ policy.update(**kwargs)
+ changed = True
+
+ policy.get()
+
+ module.exit_json(changed=changed, autoscale_policy=rax_to_dict(policy))
+
+ else:
+ try:
+ policies = filter(lambda p: name == p.name, sg.list_policies())
+ if len(policies) > 1:
+ module.fail_json(msg='No unique policy match found by name')
+ elif not policies:
+ policy = {}
+ else:
+ policy.delete()
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, autoscale_policy=rax_to_dict(policy))
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ at=dict(),
+ change=dict(type='int'),
+ cron=dict(),
+ cooldown=dict(type='int', default=300),
+ desired_capacity=dict(type='int'),
+ is_percent=dict(type='bool', default=False),
+ name=dict(required=True),
+ policy_type=dict(required=True, choices=['webhook', 'schedule']),
+ scaling_group=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ mutually_exclusive=[
+ ['cron', 'at'],
+ ['change', 'desired_capacity'],
+ ]
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ at = module.params.get('at')
+ change = module.params.get('change')
+ cron = module.params.get('cron')
+ cooldown = module.params.get('cooldown')
+ desired_capacity = module.params.get('desired_capacity')
+ is_percent = module.params.get('is_percent')
+ name = module.params.get('name')
+ policy_type = module.params.get('policy_type')
+ scaling_group = module.params.get('scaling_group')
+ state = module.params.get('state')
+
+ if (at or cron) and policy_type == 'webhook':
+ module.fail_json(msg='policy_type=schedule is required for a time '
+ 'based policy')
+
+ setup_rax_module(module, pyrax)
+
+ rax_asp(module, at=at, change=change, cron=cron, cooldown=cooldown,
+ desired_capacity=desired_capacity, is_percent=is_percent,
+ name=name, policy_type=policy_type, scaling_group=scaling_group,
+ state=state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/read_csv.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/read_csv.py
new file mode 100644
index 00000000..7100d378
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/read_csv.py
@@ -0,0 +1,241 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Dag Wieers (@dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: read_csv
+short_description: Read a CSV file
+description:
+- Read a CSV file and return a list or a dictionary, containing one dictionary per row.
+author:
+- Dag Wieers (@dagwieers)
+options:
+ path:
+ description:
+ - The CSV filename to read data from.
+ type: path
+ required: yes
+ aliases: [ filename ]
+ key:
+ description:
+ - The column name used as a key for the resulting dictionary.
+ - If C(key) is unset, the module returns a list of dictionaries,
+ where each dictionary is a row in the CSV file.
+ type: str
+ dialect:
+ description:
+ - The CSV dialect to use when parsing the CSV file.
+ - Possible values include C(excel), C(excel-tab) or C(unix).
+ type: str
+ default: excel
+ fieldnames:
+ description:
+ - A list of field names for every column.
+ - This is needed if the CSV does not have a header.
+ type: list
+ elements: str
+ unique:
+ description:
+ - Whether the C(key) used is expected to be unique.
+ type: bool
+ default: yes
+ delimiter:
+ description:
+ - A one-character string used to separate fields.
+ - When using this parameter, you change the default value used by C(dialect).
+ - The default value depends on the dialect used.
+ type: str
+ skipinitialspace:
+ description:
+ - Whether to ignore any whitespaces immediately following the delimiter.
+ - When using this parameter, you change the default value used by C(dialect).
+ - The default value depends on the dialect used.
+ type: bool
+ strict:
+ description:
+ - Whether to raise an exception on bad CSV input.
+ - When using this parameter, you change the default value used by C(dialect).
+ - The default value depends on the dialect used.
+ type: bool
+notes:
+- Ansible also ships with the C(csvfile) lookup plugin, which can be used to do selective lookups in CSV files from Jinja.
+'''
+
+EXAMPLES = r'''
+# Example CSV file with header
+#
+# name,uid,gid
+# dag,500,500
+# jeroen,501,500
+
+# Read a CSV file and access user 'dag'
+- name: Read users from CSV file and return a dictionary
+ community.general.read_csv:
+ path: users.csv
+ key: name
+ register: users
+ delegate_to: localhost
+
+- ansible.builtin.debug:
+ msg: 'User {{ users.dict.dag.name }} has UID {{ users.dict.dag.uid }} and GID {{ users.dict.dag.gid }}'
+
+# Read a CSV file and access the first item
+- name: Read users from CSV file and return a list
+ community.general.read_csv:
+ path: users.csv
+ register: users
+ delegate_to: localhost
+
+- ansible.builtin.debug:
+ msg: 'User {{ users.list.1.name }} has UID {{ users.list.1.uid }} and GID {{ users.list.1.gid }}'
+
+# Example CSV file without header and semi-colon delimiter
+#
+# dag;500;500
+# jeroen;501;500
+
+# Read a CSV file without headers
+- name: Read users from CSV file and return a list
+ community.general.read_csv:
+ path: users.csv
+ fieldnames: name,uid,gid
+ delimiter: ';'
+ register: users
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+dict:
+ description: The CSV content as a dictionary.
+ returned: success
+ type: dict
+ sample:
+ dag:
+ name: dag
+ uid: 500
+ gid: 500
+ jeroen:
+ name: jeroen
+ uid: 501
+ gid: 500
+list:
+ description: The CSV content as a list.
+ returned: success
+ type: list
+ sample:
+ - name: dag
+ uid: 500
+ gid: 500
+ - name: jeroen
+ uid: 501
+ gid: 500
+'''
+
+import csv
+from io import BytesIO, StringIO
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_text
+from ansible.module_utils.six import PY3
+
+
+# Add Unix dialect from Python 3
+class unix_dialect(csv.Dialect):
+ """Describe the usual properties of Unix-generated CSV files."""
+ delimiter = ','
+ quotechar = '"'
+ doublequote = True
+ skipinitialspace = False
+ lineterminator = '\n'
+ quoting = csv.QUOTE_ALL
+
+
+csv.register_dialect("unix", unix_dialect)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path', required=True, aliases=['filename']),
+ dialect=dict(type='str', default='excel'),
+ key=dict(type='str'),
+ fieldnames=dict(type='list', elements='str'),
+ unique=dict(type='bool', default=True),
+ delimiter=dict(type='str'),
+ skipinitialspace=dict(type='bool'),
+ strict=dict(type='bool'),
+ ),
+ supports_check_mode=True,
+ )
+
+ path = module.params['path']
+ dialect = module.params['dialect']
+ key = module.params['key']
+ fieldnames = module.params['fieldnames']
+ unique = module.params['unique']
+
+ if dialect not in csv.list_dialects():
+ module.fail_json(msg="Dialect '%s' is not supported by your version of python." % dialect)
+
+ dialect_options = dict(
+ delimiter=module.params['delimiter'],
+ skipinitialspace=module.params['skipinitialspace'],
+ strict=module.params['strict'],
+ )
+
+ # Create a dictionary from only set options
+ dialect_params = dict((k, v) for k, v in dialect_options.items() if v is not None)
+ if dialect_params:
+ try:
+ csv.register_dialect('custom', dialect, **dialect_params)
+ except TypeError as e:
+ module.fail_json(msg="Unable to create custom dialect: %s" % to_text(e))
+ dialect = 'custom'
+
+ try:
+ with open(path, 'rb') as f:
+ data = f.read()
+ except (IOError, OSError) as e:
+ module.fail_json(msg="Unable to open file: %s" % to_text(e))
+
+ if PY3:
+ # Manually decode on Python3 so that we can use the surrogateescape error handler
+ data = to_text(data, errors='surrogate_or_strict')
+ fake_fh = StringIO(data)
+ else:
+ fake_fh = BytesIO(data)
+
+ reader = csv.DictReader(fake_fh, fieldnames=fieldnames, dialect=dialect)
+
+ if key and key not in reader.fieldnames:
+ module.fail_json(msg="Key '%s' was not found in the CSV header fields: %s" % (key, ', '.join(reader.fieldnames)))
+
+ data_dict = dict()
+ data_list = list()
+
+ if key is None:
+ try:
+ for row in reader:
+ data_list.append(row)
+ except csv.Error as e:
+ module.fail_json(msg="Unable to process file: %s" % to_text(e))
+ else:
+ try:
+ for row in reader:
+ if unique and row[key] in data_dict:
+ module.fail_json(msg="Key '%s' is not unique for value '%s'" % (key, row[key]))
+ data_dict[row[key]] = row
+ except csv.Error as e:
+ module.fail_json(msg="Unable to process file: %s" % to_text(e))
+
+ module.exit_json(dict=data_dict, list=data_list)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/redfish_command.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/redfish_command.py
new file mode 100644
index 00000000..78007f1d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/redfish_command.py
@@ -0,0 +1,756 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017-2018 Dell EMC Inc.
+# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: redfish_command
+short_description: Manages Out-Of-Band controllers using Redfish APIs
+description:
+ - Builds Redfish URIs locally and sends them to remote OOB controllers to
+ perform an action.
+ - Manages OOB controller ex. reboot, log management.
+ - Manages OOB controller users ex. add, remove, update.
+ - Manages system power ex. on, off, graceful and forced reboot.
+options:
+ category:
+ required: true
+ description:
+ - Category to execute on OOB controller
+ type: str
+ command:
+ required: true
+ description:
+ - List of commands to execute on OOB controller
+ type: list
+ baseuri:
+ required: true
+ description:
+ - Base URI of OOB controller
+ type: str
+ username:
+ required: true
+ description:
+ - Username for authentication with OOB controller
+ type: str
+ password:
+ required: true
+ description:
+ - Password for authentication with OOB controller
+ type: str
+ id:
+ required: false
+ aliases: [ account_id ]
+ description:
+ - ID of account to delete/modify
+ type: str
+ new_username:
+ required: false
+ aliases: [ account_username ]
+ description:
+ - Username of account to add/delete/modify
+ type: str
+ new_password:
+ required: false
+ aliases: [ account_password ]
+ description:
+ - New password of account to add/modify
+ type: str
+ roleid:
+ required: false
+ aliases: [ account_roleid ]
+ description:
+ - Role of account to add/modify
+ type: str
+ bootdevice:
+ required: false
+ description:
+ - bootdevice when setting boot configuration
+ type: str
+ timeout:
+ description:
+ - Timeout in seconds for URL requests to OOB controller
+ default: 10
+ type: int
+ uefi_target:
+ required: false
+ description:
+ - UEFI target when bootdevice is "UefiTarget"
+ type: str
+ boot_next:
+ required: false
+ description:
+ - BootNext target when bootdevice is "UefiBootNext"
+ type: str
+ update_username:
+ required: false
+ aliases: [ account_updatename ]
+ description:
+ - new update user name for account_username
+ type: str
+ version_added: '0.2.0'
+ account_properties:
+ required: false
+ description:
+ - properties of account service to update
+ type: dict
+ version_added: '0.2.0'
+ resource_id:
+ required: false
+ description:
+ - The ID of the System, Manager or Chassis to modify
+ type: str
+ version_added: '0.2.0'
+ update_image_uri:
+ required: false
+ description:
+ - The URI of the image for the update
+ type: str
+ version_added: '0.2.0'
+ update_protocol:
+ required: false
+ description:
+ - The protocol for the update
+ type: str
+ version_added: '0.2.0'
+ update_targets:
+ required: false
+ description:
+ - The list of target resource URIs to apply the update to
+ type: list
+ elements: str
+ version_added: '0.2.0'
+ update_creds:
+ required: false
+ description:
+ - The credentials for retrieving the update image
+ type: dict
+ version_added: '0.2.0'
+ suboptions:
+ username:
+ required: false
+ description:
+ - The username for retrieving the update image
+ type: str
+ password:
+ required: false
+ description:
+ - The password for retrieving the update image
+ type: str
+ virtual_media:
+ required: false
+ description:
+ - The options for VirtualMedia commands
+ type: dict
+ version_added: '0.2.0'
+ suboptions:
+ media_types:
+ required: false
+ description:
+ - The list of media types appropriate for the image
+ type: list
+ elements: str
+ image_url:
+ required: false
+ description:
+ - The URL od the image the insert or eject
+ type: str
+ inserted:
+ required: false
+ description:
+ - Indicates if the image is treated as inserted on command completion
+ type: bool
+ default: True
+ write_protected:
+ required: false
+ description:
+ - Indicates if the media is treated as write-protected
+ type: bool
+ default: True
+ username:
+ required: false
+ description:
+ - The username for accessing the image URL
+ type: str
+ password:
+ required: false
+ description:
+ - The password for accessing the image URL
+ type: str
+ transfer_protocol_type:
+ required: false
+ description:
+ - The network protocol to use with the image
+ type: str
+ transfer_method:
+ required: false
+ description:
+ - The transfer method to use with the image
+ type: str
+
+author: "Jose Delarosa (@jose-delarosa)"
+'''
+
+EXAMPLES = '''
+ - name: Restart system power gracefully
+ community.general.redfish_command:
+ category: Systems
+ command: PowerGracefulRestart
+ resource_id: 437XR1138R2
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Turn system power off
+ community.general.redfish_command:
+ category: Systems
+ command: PowerForceOff
+ resource_id: 437XR1138R2
+
+ - name: Restart system power forcefully
+ community.general.redfish_command:
+ category: Systems
+ command: PowerForceRestart
+ resource_id: 437XR1138R2
+
+ - name: Shutdown system power gracefully
+ community.general.redfish_command:
+ category: Systems
+ command: PowerGracefulShutdown
+ resource_id: 437XR1138R2
+
+ - name: Turn system power on
+ community.general.redfish_command:
+ category: Systems
+ command: PowerOn
+ resource_id: 437XR1138R2
+
+ - name: Reboot system power
+ community.general.redfish_command:
+ category: Systems
+ command: PowerReboot
+ resource_id: 437XR1138R2
+
+ - name: Set one-time boot device to {{ bootdevice }}
+ community.general.redfish_command:
+ category: Systems
+ command: SetOneTimeBoot
+ resource_id: 437XR1138R2
+ bootdevice: "{{ bootdevice }}"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Set one-time boot device to UefiTarget of "/0x31/0x33/0x01/0x01"
+ community.general.redfish_command:
+ category: Systems
+ command: SetOneTimeBoot
+ resource_id: 437XR1138R2
+ bootdevice: "UefiTarget"
+ uefi_target: "/0x31/0x33/0x01/0x01"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Set one-time boot device to BootNext target of "Boot0001"
+ community.general.redfish_command:
+ category: Systems
+ command: SetOneTimeBoot
+ resource_id: 437XR1138R2
+ bootdevice: "UefiBootNext"
+ boot_next: "Boot0001"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Set persistent boot device override
+ community.general.redfish_command:
+ category: Systems
+ command: EnableContinuousBootOverride
+ resource_id: 437XR1138R2
+ bootdevice: "{{ bootdevice }}"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Disable persistent boot device override
+ community.general.redfish_command:
+ category: Systems
+ command: DisableBootOverride
+
+ - name: Set chassis indicator LED to blink
+ community.general.redfish_command:
+ category: Chassis
+ command: IndicatorLedBlink
+ resource_id: 1U
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Add user
+ community.general.redfish_command:
+ category: Accounts
+ command: AddUser
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ new_username: "{{ new_username }}"
+ new_password: "{{ new_password }}"
+ roleid: "{{ roleid }}"
+
+ - name: Add user using new option aliases
+ community.general.redfish_command:
+ category: Accounts
+ command: AddUser
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ account_username: "{{ account_username }}"
+ account_password: "{{ account_password }}"
+ account_roleid: "{{ account_roleid }}"
+
+ - name: Delete user
+ community.general.redfish_command:
+ category: Accounts
+ command: DeleteUser
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ account_username: "{{ account_username }}"
+
+ - name: Disable user
+ community.general.redfish_command:
+ category: Accounts
+ command: DisableUser
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ account_username: "{{ account_username }}"
+
+ - name: Enable user
+ community.general.redfish_command:
+ category: Accounts
+ command: EnableUser
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ account_username: "{{ account_username }}"
+
+ - name: Add and enable user
+ community.general.redfish_command:
+ category: Accounts
+ command: AddUser,EnableUser
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ new_username: "{{ new_username }}"
+ new_password: "{{ new_password }}"
+ roleid: "{{ roleid }}"
+
+ - name: Update user password
+ community.general.redfish_command:
+ category: Accounts
+ command: UpdateUserPassword
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ account_username: "{{ account_username }}"
+ account_password: "{{ account_password }}"
+
+ - name: Update user role
+ community.general.redfish_command:
+ category: Accounts
+ command: UpdateUserRole
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ account_username: "{{ account_username }}"
+ roleid: "{{ roleid }}"
+
+ - name: Update user name
+ community.general.redfish_command:
+ category: Accounts
+ command: UpdateUserName
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ account_username: "{{ account_username }}"
+ account_updatename: "{{ account_updatename }}"
+
+ - name: Update user name
+ community.general.redfish_command:
+ category: Accounts
+ command: UpdateUserName
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ account_username: "{{ account_username }}"
+ update_username: "{{ update_username }}"
+
+ - name: Update AccountService properties
+ community.general.redfish_command:
+ category: Accounts
+ command: UpdateAccountServiceProperties
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ account_properties:
+ AccountLockoutThreshold: 5
+ AccountLockoutDuration: 600
+
+ - name: Clear Manager Logs with a timeout of 20 seconds
+ community.general.redfish_command:
+ category: Manager
+ command: ClearLogs
+ resource_id: BMC
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ timeout: 20
+
+ - name: Clear Sessions
+ community.general.redfish_command:
+ category: Sessions
+ command: ClearSessions
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Simple update
+ community.general.redfish_command:
+ category: Update
+ command: SimpleUpdate
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ update_image_uri: https://example.com/myupdate.img
+
+ - name: Simple update with additional options
+ community.general.redfish_command:
+ category: Update
+ command: SimpleUpdate
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ update_image_uri: //example.com/myupdate.img
+ update_protocol: FTP
+ update_targets:
+ - /redfish/v1/UpdateService/FirmwareInventory/BMC
+ update_creds:
+ username: operator
+ password: supersecretpwd
+
+ - name: Insert Virtual Media
+ community.general.redfish_command:
+ category: Manager
+ command: VirtualMediaInsert
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ virtual_media:
+ image_url: 'http://example.com/images/SomeLinux-current.iso'
+ media_types:
+ - CD
+ - DVD
+ resource_id: BMC
+
+ - name: Eject Virtual Media
+ community.general.redfish_command:
+ category: Manager
+ command: VirtualMediaEject
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ virtual_media:
+ image_url: 'http://example.com/images/SomeLinux-current.iso'
+ resource_id: BMC
+
+ - name: Restart manager power gracefully
+ community.general.redfish_command:
+ category: Manager
+ command: GracefulRestart
+ resource_id: BMC
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Restart manager power gracefully
+ community.general.redfish_command:
+ category: Manager
+ command: PowerGracefulRestart
+ resource_id: BMC
+
+ - name: Turn manager power off
+ community.general.redfish_command:
+ category: Manager
+ command: PowerForceOff
+ resource_id: BMC
+
+ - name: Restart manager power forcefully
+ community.general.redfish_command:
+ category: Manager
+ command: PowerForceRestart
+ resource_id: BMC
+
+ - name: Shutdown manager power gracefully
+ community.general.redfish_command:
+ category: Manager
+ command: PowerGracefulShutdown
+ resource_id: BMC
+
+ - name: Turn manager power on
+ community.general.redfish_command:
+ category: Manager
+ command: PowerOn
+ resource_id: BMC
+
+ - name: Reboot manager power
+ community.general.redfish_command:
+ category: Manager
+ command: PowerReboot
+ resource_id: BMC
+'''
+
+RETURN = '''
+msg:
+ description: Message with action result or error description
+ returned: always
+ type: str
+ sample: "Action was successful"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
+from ansible.module_utils._text import to_native
+
+
+# More will be added as module features are expanded
+CATEGORY_COMMANDS_ALL = {
+ "Systems": ["PowerOn", "PowerForceOff", "PowerForceRestart", "PowerGracefulRestart",
+ "PowerGracefulShutdown", "PowerReboot", "SetOneTimeBoot", "EnableContinuousBootOverride", "DisableBootOverride"],
+ "Chassis": ["IndicatorLedOn", "IndicatorLedOff", "IndicatorLedBlink"],
+ "Accounts": ["AddUser", "EnableUser", "DeleteUser", "DisableUser",
+ "UpdateUserRole", "UpdateUserPassword", "UpdateUserName",
+ "UpdateAccountServiceProperties"],
+ "Sessions": ["ClearSessions"],
+ "Manager": ["GracefulRestart", "ClearLogs", "VirtualMediaInsert",
+ "VirtualMediaEject", "PowerOn", "PowerForceOff", "PowerForceRestart",
+ "PowerGracefulRestart", "PowerGracefulShutdown", "PowerReboot"],
+ "Update": ["SimpleUpdate"]
+}
+
+
+def main():
+ result = {}
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(required=True),
+ command=dict(required=True, type='list'),
+ baseuri=dict(required=True),
+ username=dict(required=True),
+ password=dict(required=True, no_log=True),
+ id=dict(aliases=["account_id"]),
+ new_username=dict(aliases=["account_username"]),
+ new_password=dict(aliases=["account_password"], no_log=True),
+ roleid=dict(aliases=["account_roleid"]),
+ update_username=dict(type='str', aliases=["account_updatename"]),
+ account_properties=dict(type='dict', default={}),
+ bootdevice=dict(),
+ timeout=dict(type='int', default=10),
+ uefi_target=dict(),
+ boot_next=dict(),
+ resource_id=dict(),
+ update_image_uri=dict(),
+ update_protocol=dict(),
+ update_targets=dict(type='list', elements='str', default=[]),
+ update_creds=dict(
+ type='dict',
+ options=dict(
+ username=dict(),
+ password=dict(no_log=True)
+ )
+ ),
+ virtual_media=dict(
+ type='dict',
+ options=dict(
+ media_types=dict(type='list', elements='str', default=[]),
+ image_url=dict(),
+ inserted=dict(type='bool', default=True),
+ write_protected=dict(type='bool', default=True),
+ username=dict(),
+ password=dict(no_log=True),
+ transfer_protocol_type=dict(),
+ transfer_method=dict(),
+ )
+ )
+ ),
+ supports_check_mode=False
+ )
+
+ category = module.params['category']
+ command_list = module.params['command']
+
+ # admin credentials used for authentication
+ creds = {'user': module.params['username'],
+ 'pswd': module.params['password']}
+
+ # user to add/modify/delete
+ user = {'account_id': module.params['id'],
+ 'account_username': module.params['new_username'],
+ 'account_password': module.params['new_password'],
+ 'account_roleid': module.params['roleid'],
+ 'account_updatename': module.params['update_username'],
+ 'account_properties': module.params['account_properties']}
+
+ # timeout
+ timeout = module.params['timeout']
+
+ # System, Manager or Chassis ID to modify
+ resource_id = module.params['resource_id']
+
+ # update options
+ update_opts = {
+ 'update_image_uri': module.params['update_image_uri'],
+ 'update_protocol': module.params['update_protocol'],
+ 'update_targets': module.params['update_targets'],
+ 'update_creds': module.params['update_creds']
+ }
+
+ # Boot override options
+ boot_opts = {
+ 'bootdevice': module.params['bootdevice'],
+ 'uefi_target': module.params['uefi_target'],
+ 'boot_next': module.params['boot_next']
+ }
+
+ # VirtualMedia options
+ virtual_media = module.params['virtual_media']
+
+ # Build root URI
+ root_uri = "https://" + module.params['baseuri']
+ rf_utils = RedfishUtils(creds, root_uri, timeout, module,
+ resource_id=resource_id, data_modification=True)
+
+ # Check that Category is valid
+ if category not in CATEGORY_COMMANDS_ALL:
+ module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys())))
+
+ # Check that all commands are valid
+ for cmd in command_list:
+ # Fail if even one command given is invalid
+ if cmd not in CATEGORY_COMMANDS_ALL[category]:
+ module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
+
+ # Organize by Categories / Commands
+ if category == "Accounts":
+ ACCOUNTS_COMMANDS = {
+ "AddUser": rf_utils.add_user,
+ "EnableUser": rf_utils.enable_user,
+ "DeleteUser": rf_utils.delete_user,
+ "DisableUser": rf_utils.disable_user,
+ "UpdateUserRole": rf_utils.update_user_role,
+ "UpdateUserPassword": rf_utils.update_user_password,
+ "UpdateUserName": rf_utils.update_user_name,
+ "UpdateAccountServiceProperties": rf_utils.update_accountservice_properties
+ }
+
+ # execute only if we find an Account service resource
+ result = rf_utils._find_accountservice_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ result = ACCOUNTS_COMMANDS[command](user)
+
+ elif category == "Systems":
+ # execute only if we find a System resource
+ result = rf_utils._find_systems_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ if command.startswith('Power'):
+ result = rf_utils.manage_system_power(command)
+ elif command == "SetOneTimeBoot":
+ boot_opts['override_enabled'] = 'Once'
+ result = rf_utils.set_boot_override(boot_opts)
+ elif command == "EnableContinuousBootOverride":
+ boot_opts['override_enabled'] = 'Continuous'
+ result = rf_utils.set_boot_override(boot_opts)
+ elif command == "DisableBootOverride":
+ boot_opts['override_enabled'] = 'Disabled'
+ result = rf_utils.set_boot_override(boot_opts)
+
+ elif category == "Chassis":
+ result = rf_utils._find_chassis_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ led_commands = ["IndicatorLedOn", "IndicatorLedOff", "IndicatorLedBlink"]
+
+ # Check if more than one led_command is present
+ num_led_commands = sum([command in led_commands for command in command_list])
+ if num_led_commands > 1:
+ result = {'ret': False, 'msg': "Only one IndicatorLed command should be sent at a time."}
+ else:
+ for command in command_list:
+ if command in led_commands:
+ result = rf_utils.manage_indicator_led(command)
+
+ elif category == "Sessions":
+ # execute only if we find SessionService resources
+ resource = rf_utils._find_sessionservice_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "ClearSessions":
+ result = rf_utils.clear_sessions()
+
+ elif category == "Manager":
+ # execute only if we find a Manager service resource
+ result = rf_utils._find_managers_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ # standardize on the Power* commands, but allow the the legacy
+ # GracefulRestart command
+ if command == 'GracefulRestart':
+ command = 'PowerGracefulRestart'
+
+ if command.startswith('Power'):
+ result = rf_utils.manage_manager_power(command)
+ elif command == 'ClearLogs':
+ result = rf_utils.clear_logs()
+ elif command == 'VirtualMediaInsert':
+ result = rf_utils.virtual_media_insert(virtual_media)
+ elif command == 'VirtualMediaEject':
+ result = rf_utils.virtual_media_eject(virtual_media)
+
+ elif category == "Update":
+ # execute only if we find UpdateService resources
+ resource = rf_utils._find_updateservice_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "SimpleUpdate":
+ result = rf_utils.simple_update(update_opts)
+
+ # Return data back or fail with proper message
+ if result['ret'] is True:
+ del result['ret']
+ changed = result.get('changed', True)
+ module.exit_json(changed=changed, msg='Action was successful')
+ else:
+ module.fail_json(msg=to_native(result['msg']))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/redfish_config.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/redfish_config.py
new file mode 100644
index 00000000..26b692a6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/redfish_config.py
@@ -0,0 +1,335 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017-2018 Dell EMC Inc.
+# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: redfish_config
+short_description: Manages Out-Of-Band controllers using Redfish APIs
+description:
+ - Builds Redfish URIs locally and sends them to remote OOB controllers to
+ set or update a configuration attribute.
+ - Manages BIOS configuration settings.
+ - Manages OOB controller configuration settings.
+options:
+ category:
+ required: true
+ description:
+ - Category to execute on OOB controller
+ type: str
+ command:
+ required: true
+ description:
+ - List of commands to execute on OOB controller
+ type: list
+ baseuri:
+ required: true
+ description:
+ - Base URI of OOB controller
+ type: str
+ username:
+ required: true
+ description:
+ - User for authentication with OOB controller
+ type: str
+ password:
+ required: true
+ description:
+ - Password for authentication with OOB controller
+ type: str
+ bios_attribute_name:
+ required: false
+ description:
+ - name of BIOS attr to update (deprecated - use bios_attributes instead)
+ default: 'null'
+ type: str
+ bios_attribute_value:
+ required: false
+ description:
+ - value of BIOS attr to update (deprecated - use bios_attributes instead)
+ default: 'null'
+ type: raw
+ bios_attributes:
+ required: false
+ description:
+ - dictionary of BIOS attributes to update
+ default: {}
+ type: dict
+ version_added: '0.2.0'
+ timeout:
+ description:
+ - Timeout in seconds for URL requests to OOB controller
+ default: 10
+ type: int
+ boot_order:
+ required: false
+ description:
+ - list of BootOptionReference strings specifying the BootOrder
+ default: []
+ type: list
+ elements: str
+ version_added: '0.2.0'
+ network_protocols:
+ required: false
+ description:
+ - setting dict of manager services to update
+ type: dict
+ version_added: '0.2.0'
+ resource_id:
+ required: false
+ description:
+ - The ID of the System, Manager or Chassis to modify
+ type: str
+ version_added: '0.2.0'
+ nic_addr:
+ required: false
+ description:
+ - EthernetInterface Address string on OOB controller
+ default: 'null'
+ type: str
+ version_added: '0.2.0'
+ nic_config:
+ required: false
+ description:
+ - setting dict of EthernetInterface on OOB controller
+ type: dict
+ version_added: '0.2.0'
+
+author: "Jose Delarosa (@jose-delarosa)"
+'''
+
+EXAMPLES = '''
+ - name: Set BootMode to UEFI
+ community.general.redfish_config:
+ category: Systems
+ command: SetBiosAttributes
+ resource_id: 437XR1138R2
+ bios_attributes:
+ BootMode: "Uefi"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Set multiple BootMode attributes
+ community.general.redfish_config:
+ category: Systems
+ command: SetBiosAttributes
+ resource_id: 437XR1138R2
+ bios_attributes:
+ BootMode: "Bios"
+ OneTimeBootMode: "Enabled"
+ BootSeqRetry: "Enabled"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Enable PXE Boot for NIC1 using deprecated options
+ community.general.redfish_config:
+ category: Systems
+ command: SetBiosAttributes
+ resource_id: 437XR1138R2
+ bios_attribute_name: PxeDev1EnDis
+ bios_attribute_value: Enabled
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Set BIOS default settings with a timeout of 20 seconds
+ community.general.redfish_config:
+ category: Systems
+ command: SetBiosDefaultSettings
+ resource_id: 437XR1138R2
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ timeout: 20
+
+ - name: Set boot order
+ community.general.redfish_config:
+ category: Systems
+ command: SetBootOrder
+ boot_order:
+ - Boot0002
+ - Boot0001
+ - Boot0000
+ - Boot0003
+ - Boot0004
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Set boot order to the default
+ community.general.redfish_config:
+ category: Systems
+ command: SetDefaultBootOrder
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Set Manager Network Protocols
+ community.general.redfish_config:
+ category: Manager
+ command: SetNetworkProtocols
+ network_protocols:
+ SNMP:
+ ProtocolEnabled: True
+ Port: 161
+ HTTP:
+ ProtocolEnabled: False
+ Port: 8080
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Set Manager NIC
+ community.general.redfish_config:
+ category: Manager
+ command: SetManagerNic
+ nic_config:
+ DHCPv4:
+ DHCPEnabled: False
+ IPv4StaticAddresses:
+ Address: 192.168.1.3
+ Gateway: 192.168.1.1
+ SubnetMask: 255.255.255.0
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+'''
+
+RETURN = '''
+msg:
+ description: Message with action result or error description
+ returned: always
+ type: str
+ sample: "Action was successful"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
+from ansible.module_utils._text import to_native
+
+
+# More will be added as module features are expanded
+CATEGORY_COMMANDS_ALL = {
+ "Systems": ["SetBiosDefaultSettings", "SetBiosAttributes", "SetBootOrder",
+ "SetDefaultBootOrder"],
+ "Manager": ["SetNetworkProtocols", "SetManagerNic"]
+}
+
+
+def main():
+ result = {}
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(required=True),
+ command=dict(required=True, type='list'),
+ baseuri=dict(required=True),
+ username=dict(required=True),
+ password=dict(required=True, no_log=True),
+ bios_attribute_name=dict(default='null'),
+ bios_attribute_value=dict(default='null', type='raw'),
+ bios_attributes=dict(type='dict', default={}),
+ timeout=dict(type='int', default=10),
+ boot_order=dict(type='list', elements='str', default=[]),
+ network_protocols=dict(
+ type='dict',
+ default={}
+ ),
+ resource_id=dict(),
+ nic_addr=dict(default='null'),
+ nic_config=dict(
+ type='dict',
+ default={}
+ )
+ ),
+ supports_check_mode=False
+ )
+
+ category = module.params['category']
+ command_list = module.params['command']
+
+ # admin credentials used for authentication
+ creds = {'user': module.params['username'],
+ 'pswd': module.params['password']}
+
+ # timeout
+ timeout = module.params['timeout']
+
+ # BIOS attributes to update
+ bios_attributes = module.params['bios_attributes']
+ if module.params['bios_attribute_name'] != 'null':
+ bios_attributes[module.params['bios_attribute_name']] = module.params[
+ 'bios_attribute_value']
+ module.deprecate(msg='The bios_attribute_name/bios_attribute_value '
+ 'options are deprecated. Use bios_attributes instead',
+ version='3.0.0', collection_name='community.general') # was Ansible 2.14
+
+ # boot order
+ boot_order = module.params['boot_order']
+
+ # System, Manager or Chassis ID to modify
+ resource_id = module.params['resource_id']
+
+ # manager nic
+ nic_addr = module.params['nic_addr']
+ nic_config = module.params['nic_config']
+
+ # Build root URI
+ root_uri = "https://" + module.params['baseuri']
+ rf_utils = RedfishUtils(creds, root_uri, timeout, module,
+ resource_id=resource_id, data_modification=True)
+
+ # Check that Category is valid
+ if category not in CATEGORY_COMMANDS_ALL:
+ module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys())))
+
+ # Check that all commands are valid
+ for cmd in command_list:
+ # Fail if even one command given is invalid
+ if cmd not in CATEGORY_COMMANDS_ALL[category]:
+ module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
+
+ # Organize by Categories / Commands
+ if category == "Systems":
+ # execute only if we find a System resource
+ result = rf_utils._find_systems_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ if command == "SetBiosDefaultSettings":
+ result = rf_utils.set_bios_default_settings()
+ elif command == "SetBiosAttributes":
+ result = rf_utils.set_bios_attributes(bios_attributes)
+ elif command == "SetBootOrder":
+ result = rf_utils.set_boot_order(boot_order)
+ elif command == "SetDefaultBootOrder":
+ result = rf_utils.set_default_boot_order()
+
+ elif category == "Manager":
+ # execute only if we find a Manager service resource
+ result = rf_utils._find_managers_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ if command == "SetNetworkProtocols":
+ result = rf_utils.set_network_protocols(module.params['network_protocols'])
+ elif command == "SetManagerNic":
+ result = rf_utils.set_manager_nic(nic_addr, nic_config)
+
+ # Return data back or fail with proper message
+ if result['ret'] is True:
+ module.exit_json(changed=result['changed'], msg=to_native(result['msg']))
+ else:
+ module.fail_json(msg=to_native(result['msg']))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/redfish_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/redfish_facts.py
new file mode 100644
index 00000000..cfdb1aef
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/redfish_facts.py
@@ -0,0 +1,466 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017-2018 Dell EMC Inc.
+# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: redfish_info
+short_description: Manages Out-Of-Band controllers using Redfish APIs
+description:
+ - Builds Redfish URIs locally and sends them to remote OOB controllers to
+ get information back.
+ - Information retrieved is placed in a location specified by the user.
+ - This module was called C(redfish_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.redfish_info) module no longer returns C(ansible_facts)!
+options:
+ category:
+ required: false
+ description:
+ - List of categories to execute on OOB controller
+ default: ['Systems']
+ type: list
+ command:
+ required: false
+ description:
+ - List of commands to execute on OOB controller
+ type: list
+ baseuri:
+ required: true
+ description:
+ - Base URI of OOB controller
+ type: str
+ username:
+ required: true
+ description:
+ - User for authentication with OOB controller
+ type: str
+ password:
+ required: true
+ description:
+ - Password for authentication with OOB controller
+ type: str
+ timeout:
+ description:
+ - Timeout in seconds for URL requests to OOB controller
+ default: 10
+ type: int
+
+author: "Jose Delarosa (@jose-delarosa)"
+'''
+
+EXAMPLES = '''
+ - name: Get CPU inventory
+ community.general.redfish_info:
+ category: Systems
+ command: GetCpuInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.cpu.entries | to_nice_json }}"
+
+ - name: Get CPU model
+ community.general.redfish_info:
+ category: Systems
+ command: GetCpuInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.cpu.entries.0.Model }}"
+
+ - name: Get memory inventory
+ community.general.redfish_info:
+ category: Systems
+ command: GetMemoryInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+
+ - name: Get fan inventory with a timeout of 20 seconds
+ community.general.redfish_info:
+ category: Chassis
+ command: GetFanInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ timeout: 20
+ register: result
+
+ - name: Get Virtual Media information
+ community.general.redfish_info:
+ category: Manager
+ command: GetVirtualMedia
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.virtual_media.entries | to_nice_json }}"
+
+ - name: Get Volume Inventory
+ community.general.redfish_info:
+ category: Systems
+ command: GetVolumeInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.volume.entries | to_nice_json }}"
+
+ - name: Get Session information
+ community.general.redfish_info:
+ category: Sessions
+ command: GetSessions
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.session.entries | to_nice_json }}"
+
+ - name: Get default inventory information
+ community.general.redfish_info:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts | to_nice_json }}"
+
+ - name: Get several inventories
+ community.general.redfish_info:
+ category: Systems
+ command: GetNicInventory,GetBiosAttributes
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get default system inventory and user information
+ community.general.redfish_info:
+ category: Systems,Accounts
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get default system, user and firmware information
+ community.general.redfish_info:
+ category: ["Systems", "Accounts", "Update"]
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get Manager NIC inventory information
+ community.general.redfish_info:
+ category: Manager
+ command: GetManagerNicInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get boot override information
+ community.general.redfish_info:
+ category: Systems
+ command: GetBootOverride
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get chassis inventory
+ community.general.redfish_info:
+ category: Chassis
+ command: GetChassisInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get all information available in the Manager category
+ community.general.redfish_info:
+ category: Manager
+ command: all
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get firmware update capability information
+ community.general.redfish_info:
+ category: Update
+ command: GetFirmwareUpdateCapabilities
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get firmware inventory
+ community.general.redfish_info:
+ category: Update
+ command: GetFirmwareInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get software inventory
+ community.general.redfish_info:
+ category: Update
+ command: GetSoftwareInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get Manager Services
+ community.general.redfish_info:
+ category: Manager
+ command: GetNetworkProtocols
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get all information available in all categories
+ community.general.redfish_info:
+ category: all
+ command: all
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get system health report
+ community.general.redfish_info:
+ category: Systems
+ command: GetHealthReport
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get chassis health report
+ community.general.redfish_info:
+ category: Chassis
+ command: GetHealthReport
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get manager health report
+ community.general.redfish_info:
+ category: Manager
+ command: GetHealthReport
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+'''
+
+RETURN = '''
+result:
+ description: different results depending on task
+ returned: always
+ type: dict
+ sample: List of CPUs on system
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
+
+CATEGORY_COMMANDS_ALL = {
+ "Systems": ["GetSystemInventory", "GetPsuInventory", "GetCpuInventory",
+ "GetMemoryInventory", "GetNicInventory", "GetHealthReport",
+ "GetStorageControllerInventory", "GetDiskInventory", "GetVolumeInventory",
+ "GetBiosAttributes", "GetBootOrder", "GetBootOverride"],
+ "Chassis": ["GetFanInventory", "GetPsuInventory", "GetChassisPower",
+ "GetChassisThermals", "GetChassisInventory", "GetHealthReport"],
+ "Accounts": ["ListUsers"],
+ "Sessions": ["GetSessions"],
+ "Update": ["GetFirmwareInventory", "GetFirmwareUpdateCapabilities", "GetSoftwareInventory"],
+ "Manager": ["GetManagerNicInventory", "GetVirtualMedia", "GetLogs", "GetNetworkProtocols",
+ "GetHealthReport"],
+}
+
+CATEGORY_COMMANDS_DEFAULT = {
+ "Systems": "GetSystemInventory",
+ "Chassis": "GetFanInventory",
+ "Accounts": "ListUsers",
+ "Update": "GetFirmwareInventory",
+ "Sessions": "GetSessions",
+ "Manager": "GetManagerNicInventory"
+}
+
+
+def main():
+ result = {}
+ category_list = []
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(type='list', default=['Systems']),
+ command=dict(type='list'),
+ baseuri=dict(required=True),
+ username=dict(required=True),
+ password=dict(required=True, no_log=True),
+ timeout=dict(type='int', default=10)
+ ),
+ supports_check_mode=False
+ )
+ is_old_facts = module._name in ('redfish_facts', 'community.general.redfish_facts')
+ if is_old_facts:
+ module.deprecate("The 'redfish_facts' module has been renamed to 'redfish_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ # admin credentials used for authentication
+ creds = {'user': module.params['username'],
+ 'pswd': module.params['password']}
+
+ # timeout
+ timeout = module.params['timeout']
+
+ # Build root URI
+ root_uri = "https://" + module.params['baseuri']
+ rf_utils = RedfishUtils(creds, root_uri, timeout, module)
+
+ # Build Category list
+ if "all" in module.params['category']:
+ for entry in CATEGORY_COMMANDS_ALL:
+ category_list.append(entry)
+ else:
+ # one or more categories specified
+ category_list = module.params['category']
+
+ for category in category_list:
+ command_list = []
+ # Build Command list for each Category
+ if category in CATEGORY_COMMANDS_ALL:
+ if not module.params['command']:
+ # True if we don't specify a command --> use default
+ command_list.append(CATEGORY_COMMANDS_DEFAULT[category])
+ elif "all" in module.params['command']:
+ for entry in range(len(CATEGORY_COMMANDS_ALL[category])):
+ command_list.append(CATEGORY_COMMANDS_ALL[category][entry])
+ # one or more commands
+ else:
+ command_list = module.params['command']
+ # Verify that all commands are valid
+ for cmd in command_list:
+ # Fail if even one command given is invalid
+ if cmd not in CATEGORY_COMMANDS_ALL[category]:
+ module.fail_json(msg="Invalid Command: %s" % cmd)
+ else:
+ # Fail if even one category given is invalid
+ module.fail_json(msg="Invalid Category: %s" % category)
+
+ # Organize by Categories / Commands
+ if category == "Systems":
+ # execute only if we find a Systems resource
+ resource = rf_utils._find_systems_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "GetSystemInventory":
+ result["system"] = rf_utils.get_multi_system_inventory()
+ elif command == "GetCpuInventory":
+ result["cpu"] = rf_utils.get_multi_cpu_inventory()
+ elif command == "GetMemoryInventory":
+ result["memory"] = rf_utils.get_multi_memory_inventory()
+ elif command == "GetNicInventory":
+ result["nic"] = rf_utils.get_multi_nic_inventory(category)
+ elif command == "GetStorageControllerInventory":
+ result["storage_controller"] = rf_utils.get_multi_storage_controller_inventory()
+ elif command == "GetDiskInventory":
+ result["disk"] = rf_utils.get_multi_disk_inventory()
+ elif command == "GetVolumeInventory":
+ result["volume"] = rf_utils.get_multi_volume_inventory()
+ elif command == "GetBiosAttributes":
+ result["bios_attribute"] = rf_utils.get_multi_bios_attributes()
+ elif command == "GetBootOrder":
+ result["boot_order"] = rf_utils.get_multi_boot_order()
+ elif command == "GetBootOverride":
+ result["boot_override"] = rf_utils.get_multi_boot_override()
+ elif command == "GetHealthReport":
+ result["health_report"] = rf_utils.get_multi_system_health_report()
+
+ elif category == "Chassis":
+ # execute only if we find Chassis resource
+ resource = rf_utils._find_chassis_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "GetFanInventory":
+ result["fan"] = rf_utils.get_fan_inventory()
+ elif command == "GetPsuInventory":
+ result["psu"] = rf_utils.get_psu_inventory()
+ elif command == "GetChassisThermals":
+ result["thermals"] = rf_utils.get_chassis_thermals()
+ elif command == "GetChassisPower":
+ result["chassis_power"] = rf_utils.get_chassis_power()
+ elif command == "GetChassisInventory":
+ result["chassis"] = rf_utils.get_chassis_inventory()
+ elif command == "GetHealthReport":
+ result["health_report"] = rf_utils.get_multi_chassis_health_report()
+
+ elif category == "Accounts":
+ # execute only if we find an Account service resource
+ resource = rf_utils._find_accountservice_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "ListUsers":
+ result["user"] = rf_utils.list_users()
+
+ elif category == "Update":
+ # execute only if we find UpdateService resources
+ resource = rf_utils._find_updateservice_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "GetFirmwareInventory":
+ result["firmware"] = rf_utils.get_firmware_inventory()
+ elif command == "GetSoftwareInventory":
+ result["software"] = rf_utils.get_software_inventory()
+ elif command == "GetFirmwareUpdateCapabilities":
+ result["firmware_update_capabilities"] = rf_utils.get_firmware_update_capabilities()
+
+ elif category == "Sessions":
+ # execute only if we find SessionService resources
+ resource = rf_utils._find_sessionservice_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "GetSessions":
+ result["session"] = rf_utils.get_sessions()
+
+ elif category == "Manager":
+ # execute only if we find a Manager service resource
+ resource = rf_utils._find_managers_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "GetManagerNicInventory":
+ result["manager_nics"] = rf_utils.get_multi_nic_inventory(category)
+ elif command == "GetVirtualMedia":
+ result["virtual_media"] = rf_utils.get_multi_virtualmedia()
+ elif command == "GetLogs":
+ result["log"] = rf_utils.get_logs()
+ elif command == "GetNetworkProtocols":
+ result["network_protocols"] = rf_utils.get_network_protocols()
+ elif command == "GetHealthReport":
+ result["health_report"] = rf_utils.get_multi_manager_health_report()
+
+ # Return data back
+ if is_old_facts:
+ module.exit_json(ansible_facts=dict(redfish_facts=result))
+ else:
+ module.exit_json(redfish_facts=result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/redfish_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/redfish_info.py
new file mode 100644
index 00000000..cfdb1aef
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/redfish_info.py
@@ -0,0 +1,466 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017-2018 Dell EMC Inc.
+# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: redfish_info
+short_description: Manages Out-Of-Band controllers using Redfish APIs
+description:
+ - Builds Redfish URIs locally and sends them to remote OOB controllers to
+ get information back.
+ - Information retrieved is placed in a location specified by the user.
+ - This module was called C(redfish_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.redfish_info) module no longer returns C(ansible_facts)!
+options:
+ category:
+ required: false
+ description:
+ - List of categories to execute on OOB controller
+ default: ['Systems']
+ type: list
+ command:
+ required: false
+ description:
+ - List of commands to execute on OOB controller
+ type: list
+ baseuri:
+ required: true
+ description:
+ - Base URI of OOB controller
+ type: str
+ username:
+ required: true
+ description:
+ - User for authentication with OOB controller
+ type: str
+ password:
+ required: true
+ description:
+ - Password for authentication with OOB controller
+ type: str
+ timeout:
+ description:
+ - Timeout in seconds for URL requests to OOB controller
+ default: 10
+ type: int
+
+author: "Jose Delarosa (@jose-delarosa)"
+'''
+
+EXAMPLES = '''
+ - name: Get CPU inventory
+ community.general.redfish_info:
+ category: Systems
+ command: GetCpuInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.cpu.entries | to_nice_json }}"
+
+ - name: Get CPU model
+ community.general.redfish_info:
+ category: Systems
+ command: GetCpuInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.cpu.entries.0.Model }}"
+
+ - name: Get memory inventory
+ community.general.redfish_info:
+ category: Systems
+ command: GetMemoryInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+
+ - name: Get fan inventory with a timeout of 20 seconds
+ community.general.redfish_info:
+ category: Chassis
+ command: GetFanInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ timeout: 20
+ register: result
+
+ - name: Get Virtual Media information
+ community.general.redfish_info:
+ category: Manager
+ command: GetVirtualMedia
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.virtual_media.entries | to_nice_json }}"
+
+ - name: Get Volume Inventory
+ community.general.redfish_info:
+ category: Systems
+ command: GetVolumeInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.volume.entries | to_nice_json }}"
+
+ - name: Get Session information
+ community.general.redfish_info:
+ category: Sessions
+ command: GetSessions
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.session.entries | to_nice_json }}"
+
+ - name: Get default inventory information
+ community.general.redfish_info:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts | to_nice_json }}"
+
+ - name: Get several inventories
+ community.general.redfish_info:
+ category: Systems
+ command: GetNicInventory,GetBiosAttributes
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get default system inventory and user information
+ community.general.redfish_info:
+ category: Systems,Accounts
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get default system, user and firmware information
+ community.general.redfish_info:
+ category: ["Systems", "Accounts", "Update"]
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get Manager NIC inventory information
+ community.general.redfish_info:
+ category: Manager
+ command: GetManagerNicInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get boot override information
+ community.general.redfish_info:
+ category: Systems
+ command: GetBootOverride
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get chassis inventory
+ community.general.redfish_info:
+ category: Chassis
+ command: GetChassisInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get all information available in the Manager category
+ community.general.redfish_info:
+ category: Manager
+ command: all
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get firmware update capability information
+ community.general.redfish_info:
+ category: Update
+ command: GetFirmwareUpdateCapabilities
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get firmware inventory
+ community.general.redfish_info:
+ category: Update
+ command: GetFirmwareInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get software inventory
+ community.general.redfish_info:
+ category: Update
+ command: GetSoftwareInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get Manager Services
+ community.general.redfish_info:
+ category: Manager
+ command: GetNetworkProtocols
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get all information available in all categories
+ community.general.redfish_info:
+ category: all
+ command: all
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get system health report
+ community.general.redfish_info:
+ category: Systems
+ command: GetHealthReport
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get chassis health report
+ community.general.redfish_info:
+ category: Chassis
+ command: GetHealthReport
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get manager health report
+ community.general.redfish_info:
+ category: Manager
+ command: GetHealthReport
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+'''
+
+RETURN = '''
+result:
+ description: different results depending on task
+ returned: always
+ type: dict
+ sample: List of CPUs on system
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
+
+CATEGORY_COMMANDS_ALL = {
+ "Systems": ["GetSystemInventory", "GetPsuInventory", "GetCpuInventory",
+ "GetMemoryInventory", "GetNicInventory", "GetHealthReport",
+ "GetStorageControllerInventory", "GetDiskInventory", "GetVolumeInventory",
+ "GetBiosAttributes", "GetBootOrder", "GetBootOverride"],
+ "Chassis": ["GetFanInventory", "GetPsuInventory", "GetChassisPower",
+ "GetChassisThermals", "GetChassisInventory", "GetHealthReport"],
+ "Accounts": ["ListUsers"],
+ "Sessions": ["GetSessions"],
+ "Update": ["GetFirmwareInventory", "GetFirmwareUpdateCapabilities", "GetSoftwareInventory"],
+ "Manager": ["GetManagerNicInventory", "GetVirtualMedia", "GetLogs", "GetNetworkProtocols",
+ "GetHealthReport"],
+}
+
+CATEGORY_COMMANDS_DEFAULT = {
+ "Systems": "GetSystemInventory",
+ "Chassis": "GetFanInventory",
+ "Accounts": "ListUsers",
+ "Update": "GetFirmwareInventory",
+ "Sessions": "GetSessions",
+ "Manager": "GetManagerNicInventory"
+}
+
+
+def main():
+ result = {}
+ category_list = []
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(type='list', default=['Systems']),
+ command=dict(type='list'),
+ baseuri=dict(required=True),
+ username=dict(required=True),
+ password=dict(required=True, no_log=True),
+ timeout=dict(type='int', default=10)
+ ),
+ supports_check_mode=False
+ )
+ is_old_facts = module._name in ('redfish_facts', 'community.general.redfish_facts')
+ if is_old_facts:
+ module.deprecate("The 'redfish_facts' module has been renamed to 'redfish_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ # admin credentials used for authentication
+ creds = {'user': module.params['username'],
+ 'pswd': module.params['password']}
+
+ # timeout
+ timeout = module.params['timeout']
+
+ # Build root URI
+ root_uri = "https://" + module.params['baseuri']
+ rf_utils = RedfishUtils(creds, root_uri, timeout, module)
+
+ # Build Category list
+ if "all" in module.params['category']:
+ for entry in CATEGORY_COMMANDS_ALL:
+ category_list.append(entry)
+ else:
+ # one or more categories specified
+ category_list = module.params['category']
+
+ for category in category_list:
+ command_list = []
+ # Build Command list for each Category
+ if category in CATEGORY_COMMANDS_ALL:
+ if not module.params['command']:
+ # True if we don't specify a command --> use default
+ command_list.append(CATEGORY_COMMANDS_DEFAULT[category])
+ elif "all" in module.params['command']:
+ for entry in range(len(CATEGORY_COMMANDS_ALL[category])):
+ command_list.append(CATEGORY_COMMANDS_ALL[category][entry])
+ # one or more commands
+ else:
+ command_list = module.params['command']
+ # Verify that all commands are valid
+ for cmd in command_list:
+ # Fail if even one command given is invalid
+ if cmd not in CATEGORY_COMMANDS_ALL[category]:
+ module.fail_json(msg="Invalid Command: %s" % cmd)
+ else:
+ # Fail if even one category given is invalid
+ module.fail_json(msg="Invalid Category: %s" % category)
+
+ # Organize by Categories / Commands
+ if category == "Systems":
+ # execute only if we find a Systems resource
+ resource = rf_utils._find_systems_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "GetSystemInventory":
+ result["system"] = rf_utils.get_multi_system_inventory()
+ elif command == "GetCpuInventory":
+ result["cpu"] = rf_utils.get_multi_cpu_inventory()
+ elif command == "GetMemoryInventory":
+ result["memory"] = rf_utils.get_multi_memory_inventory()
+ elif command == "GetNicInventory":
+ result["nic"] = rf_utils.get_multi_nic_inventory(category)
+ elif command == "GetStorageControllerInventory":
+ result["storage_controller"] = rf_utils.get_multi_storage_controller_inventory()
+ elif command == "GetDiskInventory":
+ result["disk"] = rf_utils.get_multi_disk_inventory()
+ elif command == "GetVolumeInventory":
+ result["volume"] = rf_utils.get_multi_volume_inventory()
+ elif command == "GetBiosAttributes":
+ result["bios_attribute"] = rf_utils.get_multi_bios_attributes()
+ elif command == "GetBootOrder":
+ result["boot_order"] = rf_utils.get_multi_boot_order()
+ elif command == "GetBootOverride":
+ result["boot_override"] = rf_utils.get_multi_boot_override()
+ elif command == "GetHealthReport":
+ result["health_report"] = rf_utils.get_multi_system_health_report()
+
+ elif category == "Chassis":
+ # execute only if we find Chassis resource
+ resource = rf_utils._find_chassis_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "GetFanInventory":
+ result["fan"] = rf_utils.get_fan_inventory()
+ elif command == "GetPsuInventory":
+ result["psu"] = rf_utils.get_psu_inventory()
+ elif command == "GetChassisThermals":
+ result["thermals"] = rf_utils.get_chassis_thermals()
+ elif command == "GetChassisPower":
+ result["chassis_power"] = rf_utils.get_chassis_power()
+ elif command == "GetChassisInventory":
+ result["chassis"] = rf_utils.get_chassis_inventory()
+ elif command == "GetHealthReport":
+ result["health_report"] = rf_utils.get_multi_chassis_health_report()
+
+ elif category == "Accounts":
+ # execute only if we find an Account service resource
+ resource = rf_utils._find_accountservice_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "ListUsers":
+ result["user"] = rf_utils.list_users()
+
+ elif category == "Update":
+ # execute only if we find UpdateService resources
+ resource = rf_utils._find_updateservice_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "GetFirmwareInventory":
+ result["firmware"] = rf_utils.get_firmware_inventory()
+ elif command == "GetSoftwareInventory":
+ result["software"] = rf_utils.get_software_inventory()
+ elif command == "GetFirmwareUpdateCapabilities":
+ result["firmware_update_capabilities"] = rf_utils.get_firmware_update_capabilities()
+
+ elif category == "Sessions":
+ # execute only if we find SessionService resources
+ resource = rf_utils._find_sessionservice_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "GetSessions":
+ result["session"] = rf_utils.get_sessions()
+
+ elif category == "Manager":
+ # execute only if we find a Manager service resource
+ resource = rf_utils._find_managers_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "GetManagerNicInventory":
+ result["manager_nics"] = rf_utils.get_multi_nic_inventory(category)
+ elif command == "GetVirtualMedia":
+ result["virtual_media"] = rf_utils.get_multi_virtualmedia()
+ elif command == "GetLogs":
+ result["log"] = rf_utils.get_logs()
+ elif command == "GetNetworkProtocols":
+ result["network_protocols"] = rf_utils.get_network_protocols()
+ elif command == "GetHealthReport":
+ result["health_report"] = rf_utils.get_multi_manager_health_report()
+
+ # Return data back
+ if is_old_facts:
+ module.exit_json(ansible_facts=dict(redfish_facts=result))
+ else:
+ module.exit_json(redfish_facts=result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/redhat_subscription.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/redhat_subscription.py
new file mode 100644
index 00000000..a4599588
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/redhat_subscription.py
@@ -0,0 +1,930 @@
+#!/usr/bin/python
+
+# James Laska (jlaska@redhat.com)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: redhat_subscription
+short_description: Manage registration and subscriptions to RHSM using the C(subscription-manager) command
+description:
+ - Manage registration and subscription to the Red Hat Subscription Management entitlement platform using the C(subscription-manager) command
+author: "Barnaby Court (@barnabycourt)"
+notes:
+ - In order to register a system, subscription-manager requires either a username and password, or an activationkey and an Organization ID.
+ - Since 2.5 values for I(server_hostname), I(server_insecure), I(rhsm_baseurl),
+ I(server_proxy_hostname), I(server_proxy_port), I(server_proxy_user) and
+ I(server_proxy_password) are no longer taken from the C(/etc/rhsm/rhsm.conf)
+ config file and default to None.
+requirements:
+ - subscription-manager
+options:
+ state:
+ description:
+ - whether to register and subscribe (C(present)), or unregister (C(absent)) a system
+ choices: [ "present", "absent" ]
+ default: "present"
+ type: str
+ username:
+ description:
+ - access.redhat.com or Sat6 username
+ type: str
+ password:
+ description:
+ - access.redhat.com or Sat6 password
+ type: str
+ server_hostname:
+ description:
+ - Specify an alternative Red Hat Subscription Management or Sat6 server
+ type: str
+ server_insecure:
+ description:
+ - Enable or disable https server certificate verification when connecting to C(server_hostname)
+ type: str
+ rhsm_baseurl:
+ description:
+ - Specify CDN baseurl
+ type: str
+ rhsm_repo_ca_cert:
+ description:
+ - Specify an alternative location for a CA certificate for CDN
+ type: str
+ server_proxy_hostname:
+ description:
+ - Specify a HTTP proxy hostname
+ type: str
+ server_proxy_port:
+ description:
+ - Specify a HTTP proxy port
+ type: str
+ server_proxy_user:
+ description:
+ - Specify a user for HTTP proxy with basic authentication
+ type: str
+ server_proxy_password:
+ description:
+ - Specify a password for HTTP proxy with basic authentication
+ type: str
+ auto_attach:
+ description:
+ - Upon successful registration, auto-consume available subscriptions
+ - Added in favor of deprecated autosubscribe in 2.5.
+ type: bool
+ aliases: [autosubscribe]
+ activationkey:
+ description:
+ - supply an activation key for use with registration
+ type: str
+ org_id:
+ description:
+ - Organization ID to use in conjunction with activationkey
+ type: str
+ environment:
+ description:
+ - Register with a specific environment in the destination org. Used with Red Hat Satellite 6.x or Katello
+ type: str
+ pool:
+ description:
+ - |
+ Specify a subscription pool name to consume. Regular expressions accepted. Use I(pool_ids) instead if
+ possible, as it is much faster. Mutually exclusive with I(pool_ids).
+ default: '^$'
+ type: str
+ pool_ids:
+ description:
+ - |
+ Specify subscription pool IDs to consume. Prefer over I(pool) when possible as it is much faster.
+ A pool ID may be specified as a C(string) - just the pool ID (ex. C(0123456789abcdef0123456789abcdef)),
+ or as a C(dict) with the pool ID as the key, and a quantity as the value (ex.
+ C(0123456789abcdef0123456789abcdef: 2). If the quantity is provided, it is used to consume multiple
+ entitlements from a pool (the pool must support this). Mutually exclusive with I(pool).
+ default: []
+ type: list
+ consumer_type:
+ description:
+ - The type of unit to register, defaults to system
+ type: str
+ consumer_name:
+ description:
+ - Name of the system to register, defaults to the hostname
+ type: str
+ consumer_id:
+ description:
+ - |
+ References an existing consumer ID to resume using a previous registration
+ for this system. If the system's identity certificate is lost or corrupted,
+ this option allows it to resume using its previous identity and subscriptions.
+ The default is to not specify a consumer ID so a new ID is created.
+ type: str
+ force_register:
+ description:
+ - Register the system even if it is already registered
+ type: bool
+ default: no
+ release:
+ description:
+ - Set a release version
+ type: str
+ syspurpose:
+ description:
+ - Set syspurpose attributes in file C(/etc/rhsm/syspurpose/syspurpose.json)
+ and synchronize these attributes with RHSM server. Syspurpose attributes help attach
+ the most appropriate subscriptions to the system automatically. When C(syspurpose.json) file
+ already contains some attributes, then new attributes overwrite existing attributes.
+ When some attribute is not listed in the new list of attributes, the existing
+ attribute will be removed from C(syspurpose.json) file. Unknown attributes are ignored.
+ type: dict
+ default: {}
+ suboptions:
+ usage:
+ description: Syspurpose attribute usage
+ type: str
+ role:
+ description: Syspurpose attribute role
+ type: str
+ service_level_agreement:
+ description: Syspurpose attribute service_level_agreement
+ type: str
+ addons:
+ description: Syspurpose attribute addons
+ type: list
+ sync:
+ description:
+ - When this option is true, then syspurpose attributes are synchronized with
+ RHSM server immediately. When this option is false, then syspurpose attributes
+ will be synchronized with RHSM server by rhsmcertd daemon.
+ type: bool
+ default: no
+'''
+
+EXAMPLES = '''
+- name: Register as user (joe_user) with password (somepass) and auto-subscribe to available content.
+ community.general.redhat_subscription:
+ state: present
+ username: joe_user
+ password: somepass
+ auto_attach: true
+
+- name: Same as above but subscribe to a specific pool by ID.
+ community.general.redhat_subscription:
+ state: present
+ username: joe_user
+ password: somepass
+ pool_ids: 0123456789abcdef0123456789abcdef
+
+- name: Register and subscribe to multiple pools.
+ community.general.redhat_subscription:
+ state: present
+ username: joe_user
+ password: somepass
+ pool_ids:
+ - 0123456789abcdef0123456789abcdef
+ - 1123456789abcdef0123456789abcdef
+
+- name: Same as above but consume multiple entitlements.
+ community.general.redhat_subscription:
+ state: present
+ username: joe_user
+ password: somepass
+ pool_ids:
+ - 0123456789abcdef0123456789abcdef: 2
+ - 1123456789abcdef0123456789abcdef: 4
+
+- name: Register and pull existing system data.
+ community.general.redhat_subscription:
+ state: present
+ username: joe_user
+ password: somepass
+ consumer_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+
+- name: Register with activationkey and consume subscriptions matching Red Hat Enterprise Server or Red Hat Virtualization
+ community.general.redhat_subscription:
+ state: present
+ activationkey: 1-222333444
+ org_id: 222333444
+ pool: '^(Red Hat Enterprise Server|Red Hat Virtualization)$'
+
+- name: Update the consumed subscriptions from the previous example (remove Red Hat Virtualization subscription)
+ community.general.redhat_subscription:
+ state: present
+ activationkey: 1-222333444
+ org_id: 222333444
+ pool: '^Red Hat Enterprise Server$'
+
+- name: Register as user credentials into given environment (against Red Hat Satellite 6.x), and auto-subscribe.
+ community.general.redhat_subscription:
+ state: present
+ username: joe_user
+ password: somepass
+ environment: Library
+ auto_attach: true
+
+- name: Register as user (joe_user) with password (somepass) and a specific release
+ community.general.redhat_subscription:
+ state: present
+ username: joe_user
+ password: somepass
+ release: 7.4
+
+- name: Register as user (joe_user) with password (somepass), set syspurpose attributes and synchronize them with server
+ community.general.redhat_subscription:
+ state: present
+ username: joe_user
+ password: somepass
+ auto_attach: true
+ syspurpose:
+ usage: "Production"
+ role: "Red Hat Enterprise Server"
+ service_level_agreement: "Premium"
+ addons:
+ - addon1
+ - addon2
+ sync: true
+'''
+
+RETURN = '''
+subscribed_pool_ids:
+ description: List of pool IDs to which system is now subscribed
+ returned: success
+ type: complex
+ sample: {
+ "8a85f9815ab905d3015ab928c7005de4": "1"
+ }
+'''
+
+from os.path import isfile
+from os import unlink
+import re
+import shutil
+import tempfile
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible.module_utils.six.moves import configparser
+
+
+SUBMAN_CMD = None
+
+
+class RegistrationBase(object):
+
+ REDHAT_REPO = "/etc/yum.repos.d/redhat.repo"
+
+ def __init__(self, module, username=None, password=None):
+ self.module = module
+ self.username = username
+ self.password = password
+
+ def configure(self):
+ raise NotImplementedError("Must be implemented by a sub-class")
+
+ def enable(self):
+ # Remove any existing redhat.repo
+ if isfile(self.REDHAT_REPO):
+ unlink(self.REDHAT_REPO)
+
+ def register(self):
+ raise NotImplementedError("Must be implemented by a sub-class")
+
+ def unregister(self):
+ raise NotImplementedError("Must be implemented by a sub-class")
+
+ def unsubscribe(self):
+ raise NotImplementedError("Must be implemented by a sub-class")
+
+ def update_plugin_conf(self, plugin, enabled=True):
+ plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin
+
+ if isfile(plugin_conf):
+ tmpfd, tmpfile = tempfile.mkstemp()
+ shutil.copy2(plugin_conf, tmpfile)
+ cfg = configparser.ConfigParser()
+ cfg.read([tmpfile])
+
+ if enabled:
+ cfg.set('main', 'enabled', '1')
+ else:
+ cfg.set('main', 'enabled', '0')
+
+ fd = open(tmpfile, 'w+')
+ cfg.write(fd)
+ fd.close()
+ self.module.atomic_move(tmpfile, plugin_conf)
+
+ def subscribe(self, **kwargs):
+ raise NotImplementedError("Must be implemented by a sub-class")
+
+
+class Rhsm(RegistrationBase):
+ def __init__(self, module, username=None, password=None):
+ RegistrationBase.__init__(self, module, username, password)
+ self.module = module
+
+ def enable(self):
+ '''
+ Enable the system to receive updates from subscription-manager.
+ This involves updating affected yum plugins and removing any
+ conflicting yum repositories.
+ '''
+ RegistrationBase.enable(self)
+ self.update_plugin_conf('rhnplugin', False)
+ self.update_plugin_conf('subscription-manager', True)
+
+ def configure(self, **kwargs):
+ '''
+ Configure the system as directed for registration with RHSM
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+
+ args = [SUBMAN_CMD, 'config']
+
+ # Pass supplied **kwargs as parameters to subscription-manager. Ignore
+ # non-configuration parameters and replace '_' with '.'. For example,
+ # 'server_hostname' becomes '--server.hostname'.
+ options = []
+ for k, v in sorted(kwargs.items()):
+ if re.search(r'^(server|rhsm)_', k) and v is not None:
+ options.append('--%s=%s' % (k.replace('_', '.', 1), v))
+
+ # When there is nothing to configure, then it is not necessary
+ # to run config command, because it only returns current
+ # content of current configuration file
+ if len(options) == 0:
+ return
+
+ args.extend(options)
+
+ self.module.run_command(args, check_rc=True)
+
+ @property
+ def is_registered(self):
+ '''
+ Determine whether the current system
+ Returns:
+ * Boolean - whether the current system is currently registered to
+ RHSM.
+ '''
+
+ args = [SUBMAN_CMD, 'identity']
+ rc, stdout, stderr = self.module.run_command(args, check_rc=False)
+ if rc == 0:
+ return True
+ else:
+ return False
+
+ def register(self, username, password, auto_attach, activationkey, org_id,
+ consumer_type, consumer_name, consumer_id, force_register, environment,
+ rhsm_baseurl, server_insecure, server_hostname, server_proxy_hostname,
+ server_proxy_port, server_proxy_user, server_proxy_password, release):
+ '''
+ Register the current system to the provided RHSM or Sat6 server
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+ args = [SUBMAN_CMD, 'register']
+
+ # Generate command arguments
+ if force_register:
+ args.extend(['--force'])
+
+ if rhsm_baseurl:
+ args.extend(['--baseurl', rhsm_baseurl])
+
+ if server_insecure:
+ args.extend(['--insecure'])
+
+ if server_hostname:
+ args.extend(['--serverurl', server_hostname])
+
+ if org_id:
+ args.extend(['--org', org_id])
+
+ if server_proxy_hostname and server_proxy_port:
+ args.extend(['--proxy', server_proxy_hostname + ':' + server_proxy_port])
+
+ if server_proxy_user:
+ args.extend(['--proxyuser', server_proxy_user])
+
+ if server_proxy_password:
+ args.extend(['--proxypassword', server_proxy_password])
+
+ if activationkey:
+ args.extend(['--activationkey', activationkey])
+ else:
+ if auto_attach:
+ args.append('--auto-attach')
+ if username:
+ args.extend(['--username', username])
+ if password:
+ args.extend(['--password', password])
+ if consumer_type:
+ args.extend(['--type', consumer_type])
+ if consumer_name:
+ args.extend(['--name', consumer_name])
+ if consumer_id:
+ args.extend(['--consumerid', consumer_id])
+ if environment:
+ args.extend(['--environment', environment])
+
+ if release:
+ args.extend(['--release', release])
+
+ rc, stderr, stdout = self.module.run_command(args, check_rc=True, expand_user_and_vars=False)
+
+ def unsubscribe(self, serials=None):
+ '''
+ Unsubscribe a system from subscribed channels
+ Args:
+ serials(list or None): list of serials to unsubscribe. If
+ serials is none or an empty list, then
+ all subscribed channels will be removed.
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+ items = []
+ if serials is not None and serials:
+ items = ["--serial=%s" % s for s in serials]
+ if serials is None:
+ items = ["--all"]
+
+ if items:
+ args = [SUBMAN_CMD, 'unsubscribe'] + items
+ rc, stderr, stdout = self.module.run_command(args, check_rc=True)
+ return serials
+
+ def unregister(self):
+ '''
+ Unregister a currently registered system
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+ args = [SUBMAN_CMD, 'unregister']
+ rc, stderr, stdout = self.module.run_command(args, check_rc=True)
+ self.update_plugin_conf('rhnplugin', False)
+ self.update_plugin_conf('subscription-manager', False)
+
+ def subscribe(self, regexp):
+ '''
+ Subscribe current system to available pools matching the specified
+ regular expression. It matches regexp against available pool ids first.
+ If any pool ids match, subscribe to those pools and return.
+
+ If no pool ids match, then match regexp against available pool product
+ names. Note this can still easily match many many pools. Then subscribe
+ to those pools.
+
+ Since a pool id is a more specific match, we only fallback to matching
+ against names if we didn't match pool ids.
+
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+ # See https://github.com/ansible/ansible/issues/19466
+
+ # subscribe to pools whose pool id matches regexp (and only the pool id)
+ subscribed_pool_ids = self.subscribe_pool(regexp)
+
+ # If we found any matches, we are done
+ # Don't attempt to match pools by product name
+ if subscribed_pool_ids:
+ return subscribed_pool_ids
+
+ # We didn't match any pool ids.
+ # Now try subscribing to pools based on product name match
+ # Note: This can match lots of product names.
+ subscribed_by_product_pool_ids = self.subscribe_product(regexp)
+ if subscribed_by_product_pool_ids:
+ return subscribed_by_product_pool_ids
+
+ # no matches
+ return []
+
+ def subscribe_by_pool_ids(self, pool_ids):
+ """
+ Try to subscribe to the list of pool IDs
+ """
+ available_pools = RhsmPools(self.module)
+
+ available_pool_ids = [p.get_pool_id() for p in available_pools]
+
+ for pool_id, quantity in sorted(pool_ids.items()):
+ if pool_id in available_pool_ids:
+ args = [SUBMAN_CMD, 'attach', '--pool', pool_id]
+ if quantity is not None:
+ args.extend(['--quantity', to_native(quantity)])
+ rc, stderr, stdout = self.module.run_command(args, check_rc=True)
+ else:
+ self.module.fail_json(msg='Pool ID: %s not in list of available pools' % pool_id)
+ return pool_ids
+
+ def subscribe_pool(self, regexp):
+ '''
+ Subscribe current system to available pools matching the specified
+ regular expression
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+
+ # Available pools ready for subscription
+ available_pools = RhsmPools(self.module)
+
+ subscribed_pool_ids = []
+ for pool in available_pools.filter_pools(regexp):
+ pool.subscribe()
+ subscribed_pool_ids.append(pool.get_pool_id())
+ return subscribed_pool_ids
+
+ def subscribe_product(self, regexp):
+ '''
+ Subscribe current system to available pools matching the specified
+ regular expression
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+
+ # Available pools ready for subscription
+ available_pools = RhsmPools(self.module)
+
+ subscribed_pool_ids = []
+ for pool in available_pools.filter_products(regexp):
+ pool.subscribe()
+ subscribed_pool_ids.append(pool.get_pool_id())
+ return subscribed_pool_ids
+
+ def update_subscriptions(self, regexp):
+ changed = False
+ consumed_pools = RhsmPools(self.module, consumed=True)
+ pool_ids_to_keep = [p.get_pool_id() for p in consumed_pools.filter_pools(regexp)]
+ pool_ids_to_keep.extend([p.get_pool_id() for p in consumed_pools.filter_products(regexp)])
+
+ serials_to_remove = [p.Serial for p in consumed_pools if p.get_pool_id() not in pool_ids_to_keep]
+ serials = self.unsubscribe(serials=serials_to_remove)
+
+ subscribed_pool_ids = self.subscribe(regexp)
+
+ if subscribed_pool_ids or serials:
+ changed = True
+ return {'changed': changed, 'subscribed_pool_ids': subscribed_pool_ids,
+ 'unsubscribed_serials': serials}
+
+ def update_subscriptions_by_pool_ids(self, pool_ids):
+ changed = False
+ consumed_pools = RhsmPools(self.module, consumed=True)
+
+ existing_pools = {}
+ for p in consumed_pools:
+ existing_pools[p.get_pool_id()] = p.QuantityUsed
+
+ serials_to_remove = [p.Serial for p in consumed_pools if pool_ids.get(p.get_pool_id(), 0) != p.QuantityUsed]
+ serials = self.unsubscribe(serials=serials_to_remove)
+
+ missing_pools = {}
+ for pool_id, quantity in sorted(pool_ids.items()):
+ if existing_pools.get(pool_id, 0) != quantity:
+ missing_pools[pool_id] = quantity
+
+ self.subscribe_by_pool_ids(missing_pools)
+
+ if missing_pools or serials:
+ changed = True
+ return {'changed': changed, 'subscribed_pool_ids': missing_pools.keys(),
+ 'unsubscribed_serials': serials}
+
+ def sync_syspurpose(self):
+ """
+ Try to synchronize syspurpose attributes with server
+ """
+ args = [SUBMAN_CMD, 'status']
+ rc, stdout, stderr = self.module.run_command(args, check_rc=False)
+
+
+class RhsmPool(object):
+ '''
+ Convenience class for housing subscription information
+ '''
+
+ def __init__(self, module, **kwargs):
+ self.module = module
+ for k, v in kwargs.items():
+ setattr(self, k, v)
+
+ def __str__(self):
+ return str(self.__getattribute__('_name'))
+
+ def get_pool_id(self):
+ return getattr(self, 'PoolId', getattr(self, 'PoolID'))
+
+ def subscribe(self):
+ args = "subscription-manager attach --pool %s" % self.get_pool_id()
+ rc, stdout, stderr = self.module.run_command(args, check_rc=True)
+ if rc == 0:
+ return True
+ else:
+ return False
+
+
+class RhsmPools(object):
+ """
+ This class is used for manipulating pools subscriptions with RHSM
+ """
+
+ def __init__(self, module, consumed=False):
+ self.module = module
+ self.products = self._load_product_list(consumed)
+
+ def __iter__(self):
+ return self.products.__iter__()
+
+ def _load_product_list(self, consumed=False):
+ """
+ Loads list of all available or consumed pools for system in data structure
+
+ Args:
+ consumed(bool): if True list consumed pools, else list available pools (default False)
+ """
+ args = "subscription-manager list"
+ if consumed:
+ args += " --consumed"
+ else:
+ args += " --available"
+ lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+ rc, stdout, stderr = self.module.run_command(args, check_rc=True, environ_update=lang_env)
+
+ products = []
+ for line in stdout.split('\n'):
+ # Remove leading+trailing whitespace
+ line = line.strip()
+ # An empty line implies the end of a output group
+ if len(line) == 0:
+ continue
+ # If a colon ':' is found, parse
+ elif ':' in line:
+ (key, value) = line.split(':', 1)
+ key = key.strip().replace(" ", "") # To unify
+ value = value.strip()
+ if key in ['ProductName', 'SubscriptionName']:
+ # Remember the name for later processing
+ products.append(RhsmPool(self.module, _name=value, key=value))
+ elif products:
+ # Associate value with most recently recorded product
+ products[-1].__setattr__(key, value)
+ # FIXME - log some warning?
+ # else:
+ # warnings.warn("Unhandled subscription key/value: %s/%s" % (key,value))
+ return products
+
+ def filter_pools(self, regexp='^$'):
+ '''
+ Return a list of RhsmPools whose pool id matches the provided regular expression
+ '''
+ r = re.compile(regexp)
+ for product in self.products:
+ if r.search(product.get_pool_id()):
+ yield product
+
+ def filter_products(self, regexp='^$'):
+ '''
+ Return a list of RhsmPools whose product name matches the provided regular expression
+ '''
+ r = re.compile(regexp)
+ for product in self.products:
+ if r.search(product._name):
+ yield product
+
+
+class SysPurpose(object):
+ """
+ This class is used for reading and writing to syspurpose.json file
+ """
+
+ SYSPURPOSE_FILE_PATH = "/etc/rhsm/syspurpose/syspurpose.json"
+
+ ALLOWED_ATTRIBUTES = ['role', 'usage', 'service_level_agreement', 'addons']
+
+ def __init__(self, path=None):
+ """
+ Initialize class used for reading syspurpose json file
+ """
+ self.path = path or self.SYSPURPOSE_FILE_PATH
+
+ def update_syspurpose(self, new_syspurpose):
+ """
+ Try to update current syspurpose with new attributes from new_syspurpose
+ """
+ syspurpose = {}
+ syspurpose_changed = False
+ for key, value in new_syspurpose.items():
+ if key in self.ALLOWED_ATTRIBUTES:
+ if value is not None:
+ syspurpose[key] = value
+ elif key == 'sync':
+ pass
+ else:
+ raise KeyError("Attribute: %s not in list of allowed attributes: %s" %
+ (key, self.ALLOWED_ATTRIBUTES))
+ current_syspurpose = self._read_syspurpose()
+ if current_syspurpose != syspurpose:
+ syspurpose_changed = True
+ # Update current syspurpose with new values
+ current_syspurpose.update(syspurpose)
+ # When some key is not listed in new syspurpose, then delete it from current syspurpose
+ # and ignore custom attributes created by user (e.g. "foo": "bar")
+ for key in list(current_syspurpose):
+ if key in self.ALLOWED_ATTRIBUTES and key not in syspurpose:
+ del current_syspurpose[key]
+ self._write_syspurpose(current_syspurpose)
+ return syspurpose_changed
+
+ def _write_syspurpose(self, new_syspurpose):
+ """
+ This function tries to update current new_syspurpose attributes to
+ json file.
+ """
+ with open(self.path, "w") as fp:
+ fp.write(json.dumps(new_syspurpose, indent=2, ensure_ascii=False, sort_keys=True))
+
+ def _read_syspurpose(self):
+ """
+ Read current syspurpuse from json file.
+ """
+ current_syspurpose = {}
+ try:
+ with open(self.path, "r") as fp:
+ content = fp.read()
+ except IOError:
+ pass
+ else:
+ current_syspurpose = json.loads(content)
+ return current_syspurpose
+
+
+def main():
+
+ # Load RHSM configuration from file
+ rhsm = Rhsm(None)
+
+ # Note: the default values for parameters are:
+ # 'type': 'str', 'default': None, 'required': False
+ # So there is no need to repeat these values for each parameter.
+ module = AnsibleModule(
+ argument_spec={
+ 'state': {'default': 'present', 'choices': ['present', 'absent']},
+ 'username': {},
+ 'password': {'no_log': True},
+ 'server_hostname': {},
+ 'server_insecure': {},
+ 'rhsm_baseurl': {},
+ 'rhsm_repo_ca_cert': {},
+ 'auto_attach': {'aliases': ['autosubscribe'], 'type': 'bool'},
+ 'activationkey': {'no_log': True},
+ 'org_id': {},
+ 'environment': {},
+ 'pool': {'default': '^$'},
+ 'pool_ids': {'default': [], 'type': 'list'},
+ 'consumer_type': {},
+ 'consumer_name': {},
+ 'consumer_id': {},
+ 'force_register': {'default': False, 'type': 'bool'},
+ 'server_proxy_hostname': {},
+ 'server_proxy_port': {},
+ 'server_proxy_user': {},
+ 'server_proxy_password': {'no_log': True},
+ 'release': {},
+ 'syspurpose': {
+ 'type': 'dict',
+ 'options': {
+ 'role': {},
+ 'usage': {},
+ 'service_level_agreement': {},
+ 'addons': {'type': 'list'},
+ 'sync': {'type': 'bool', 'default': False}
+ }
+ }
+ },
+ required_together=[['username', 'password'],
+ ['server_proxy_hostname', 'server_proxy_port'],
+ ['server_proxy_user', 'server_proxy_password']],
+ mutually_exclusive=[['activationkey', 'username'],
+ ['activationkey', 'consumer_id'],
+ ['activationkey', 'environment'],
+ ['activationkey', 'autosubscribe'],
+ ['pool', 'pool_ids']],
+ required_if=[['state', 'present', ['username', 'activationkey'], True]],
+ )
+
+ rhsm.module = module
+ state = module.params['state']
+ username = module.params['username']
+ password = module.params['password']
+ server_hostname = module.params['server_hostname']
+ server_insecure = module.params['server_insecure']
+ rhsm_baseurl = module.params['rhsm_baseurl']
+ rhsm_repo_ca_cert = module.params['rhsm_repo_ca_cert']
+ auto_attach = module.params['auto_attach']
+ activationkey = module.params['activationkey']
+ org_id = module.params['org_id']
+ if activationkey and not org_id:
+ module.fail_json(msg='org_id is required when using activationkey')
+ environment = module.params['environment']
+ pool = module.params['pool']
+ pool_ids = {}
+ for value in module.params['pool_ids']:
+ if isinstance(value, dict):
+ if len(value) != 1:
+ module.fail_json(msg='Unable to parse pool_ids option.')
+ pool_id, quantity = list(value.items())[0]
+ else:
+ pool_id, quantity = value, None
+ pool_ids[pool_id] = quantity
+ consumer_type = module.params["consumer_type"]
+ consumer_name = module.params["consumer_name"]
+ consumer_id = module.params["consumer_id"]
+ force_register = module.params["force_register"]
+ server_proxy_hostname = module.params['server_proxy_hostname']
+ server_proxy_port = module.params['server_proxy_port']
+ server_proxy_user = module.params['server_proxy_user']
+ server_proxy_password = module.params['server_proxy_password']
+ release = module.params['release']
+ syspurpose = module.params['syspurpose']
+
+ global SUBMAN_CMD
+ SUBMAN_CMD = module.get_bin_path('subscription-manager', True)
+
+ syspurpose_changed = False
+ if syspurpose is not None:
+ try:
+ syspurpose_changed = SysPurpose().update_syspurpose(syspurpose)
+ except Exception as err:
+ module.fail_json(msg="Failed to update syspurpose attributes: %s" % to_native(err))
+
+ # Ensure system is registered
+ if state == 'present':
+
+ # Register system
+ if rhsm.is_registered and not force_register:
+ if syspurpose and 'sync' in syspurpose and syspurpose['sync'] is True:
+ try:
+ rhsm.sync_syspurpose()
+ except Exception as e:
+ module.fail_json(msg="Failed to synchronize syspurpose attributes: %s" % to_native(e))
+ if pool != '^$' or pool_ids:
+ try:
+ if pool_ids:
+ result = rhsm.update_subscriptions_by_pool_ids(pool_ids)
+ else:
+ result = rhsm.update_subscriptions(pool)
+ except Exception as e:
+ module.fail_json(msg="Failed to update subscriptions for '%s': %s" % (server_hostname, to_native(e)))
+ else:
+ module.exit_json(**result)
+ else:
+ if syspurpose_changed is True:
+ module.exit_json(changed=True, msg="Syspurpose attributes changed.")
+ else:
+ module.exit_json(changed=False, msg="System already registered.")
+ else:
+ try:
+ rhsm.enable()
+ rhsm.configure(**module.params)
+ rhsm.register(username, password, auto_attach, activationkey, org_id,
+ consumer_type, consumer_name, consumer_id, force_register,
+ environment, rhsm_baseurl, server_insecure, server_hostname,
+ server_proxy_hostname, server_proxy_port, server_proxy_user, server_proxy_password, release)
+ if syspurpose and 'sync' in syspurpose and syspurpose['sync'] is True:
+ rhsm.sync_syspurpose()
+ if pool_ids:
+ subscribed_pool_ids = rhsm.subscribe_by_pool_ids(pool_ids)
+ elif pool != '^$':
+ subscribed_pool_ids = rhsm.subscribe(pool)
+ else:
+ subscribed_pool_ids = []
+ except Exception as e:
+ module.fail_json(msg="Failed to register with '%s': %s" % (server_hostname, to_native(e)))
+ else:
+ module.exit_json(changed=True,
+ msg="System successfully registered to '%s'." % server_hostname,
+ subscribed_pool_ids=subscribed_pool_ids)
+
+ # Ensure system is *not* registered
+ if state == 'absent':
+ if not rhsm.is_registered:
+ module.exit_json(changed=False, msg="System already unregistered.")
+ else:
+ try:
+ rhsm.unsubscribe()
+ rhsm.unregister()
+ except Exception as e:
+ module.fail_json(msg="Failed to unregister: %s" % to_native(e))
+ else:
+ module.exit_json(changed=True, msg="System successfully unregistered from %s." % server_hostname)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/redis.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/redis.py
new file mode 100644
index 00000000..5ffbd7db
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/redis.py
@@ -0,0 +1,324 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: redis
+short_description: Various redis commands, slave and flush
+description:
+ - Unified utility to interact with redis instances.
+options:
+ command:
+ description:
+ - The selected redis command
+ - C(config) (new in 1.6), ensures a configuration setting on an instance.
+ - C(flush) flushes all the instance or a specified db.
+ - C(slave) sets a redis instance in slave or master mode.
+ choices: [ config, flush, slave ]
+ type: str
+ login_password:
+ description:
+ - The password used to authenticate with (usually not used)
+ type: str
+ login_host:
+ description:
+ - The host running the database
+ default: localhost
+ type: str
+ login_port:
+ description:
+ - The port to connect to
+ default: 6379
+ type: int
+ master_host:
+ description:
+ - The host of the master instance [slave command]
+ type: str
+ master_port:
+ description:
+ - The port of the master instance [slave command]
+ type: int
+ slave_mode:
+ description:
+ - the mode of the redis instance [slave command]
+ default: slave
+ choices: [ master, slave ]
+ type: str
+ db:
+ description:
+ - The database to flush (used in db mode) [flush command]
+ type: int
+ flush_mode:
+ description:
+ - Type of flush (all the dbs in a redis instance or a specific one)
+ [flush command]
+ default: all
+ choices: [ all, db ]
+ type: str
+ name:
+ description:
+ - A redis config key.
+ type: str
+ value:
+ description:
+ - A redis config value. When memory size is needed, it is possible
+ to specify it in the usal form of 1KB, 2M, 400MB where the base is 1024.
+ Units are case insensitive i.e. 1m = 1mb = 1M = 1MB.
+ type: str
+
+notes:
+ - Requires the redis-py Python package on the remote host. You can
+ install it with pip (pip install redis) or with a package manager.
+ https://github.com/andymccurdy/redis-py
+ - If the redis master instance we are making slave of is password protected
+ this needs to be in the redis.conf in the masterauth variable
+
+seealso:
+ - module: community.general.redis_info
+requirements: [ redis ]
+author: "Xabier Larrakoetxea (@slok)"
+'''
+
+EXAMPLES = '''
+- name: Set local redis instance to be slave of melee.island on port 6377
+ community.general.redis:
+ command: slave
+ master_host: melee.island
+ master_port: 6377
+
+- name: Deactivate slave mode
+ community.general.redis:
+ command: slave
+ slave_mode: master
+
+- name: Flush all the redis db
+ community.general.redis:
+ command: flush
+ flush_mode: all
+
+- name: Flush only one db in a redis instance
+ community.general.redis:
+ command: flush
+ db: 1
+ flush_mode: db
+
+- name: Configure local redis to have 10000 max clients
+ community.general.redis:
+ command: config
+ name: maxclients
+ value: 10000
+
+- name: Configure local redis maxmemory to 4GB
+ community.general.redis:
+ command: config
+ name: maxmemory
+ value: 4GB
+
+- name: Configure local redis to have lua time limit of 100 ms
+ community.general.redis:
+ command: config
+ name: lua-time-limit
+ value: 100
+'''
+
+import traceback
+
+REDIS_IMP_ERR = None
+try:
+ import redis
+except ImportError:
+ REDIS_IMP_ERR = traceback.format_exc()
+ redis_found = False
+else:
+ redis_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.formatters import human_to_bytes
+from ansible.module_utils._text import to_native
+import re
+
+
+# Redis module specific support methods.
+def set_slave_mode(client, master_host, master_port):
+ try:
+ return client.slaveof(master_host, master_port)
+ except Exception:
+ return False
+
+
+def set_master_mode(client):
+ try:
+ return client.slaveof()
+ except Exception:
+ return False
+
+
+def flush(client, db=None):
+ try:
+ if not isinstance(db, int):
+ return client.flushall()
+ else:
+ # The passed client has been connected to the database already
+ return client.flushdb()
+ except Exception:
+ return False
+
+
+# Module execution.
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ command=dict(type='str', choices=['config', 'flush', 'slave']),
+ login_password=dict(type='str', no_log=True),
+ login_host=dict(type='str', default='localhost'),
+ login_port=dict(type='int', default=6379),
+ master_host=dict(type='str'),
+ master_port=dict(type='int'),
+ slave_mode=dict(type='str', default='slave', choices=['master', 'slave']),
+ db=dict(type='int'),
+ flush_mode=dict(type='str', default='all', choices=['all', 'db']),
+ name=dict(type='str'),
+ value=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+ if not redis_found:
+ module.fail_json(msg=missing_required_lib('redis'), exception=REDIS_IMP_ERR)
+
+ login_password = module.params['login_password']
+ login_host = module.params['login_host']
+ login_port = module.params['login_port']
+ command = module.params['command']
+
+ # Slave Command section -----------
+ if command == "slave":
+ master_host = module.params['master_host']
+ master_port = module.params['master_port']
+ mode = module.params['slave_mode']
+
+ # Check if we have all the data
+ if mode == "slave": # Only need data if we want to be slave
+ if not master_host:
+ module.fail_json(msg='In slave mode master host must be provided')
+
+ if not master_port:
+ module.fail_json(msg='In slave mode master port must be provided')
+
+ # Connect and check
+ r = redis.StrictRedis(host=login_host, port=login_port, password=login_password)
+ try:
+ r.ping()
+ except Exception as e:
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
+
+ # Check if we are already in the mode that we want
+ info = r.info()
+ if mode == "master" and info["role"] == "master":
+ module.exit_json(changed=False, mode=mode)
+
+ elif mode == "slave" and info["role"] == "slave" and info["master_host"] == master_host and info["master_port"] == master_port:
+ status = dict(
+ status=mode,
+ master_host=master_host,
+ master_port=master_port,
+ )
+ module.exit_json(changed=False, mode=status)
+ else:
+ # Do the stuff
+ # (Check Check_mode before commands so the commands aren't evaluated
+ # if not necessary)
+ if mode == "slave":
+ if module.check_mode or\
+ set_slave_mode(r, master_host, master_port):
+ info = r.info()
+ status = {
+ 'status': mode,
+ 'master_host': master_host,
+ 'master_port': master_port,
+ }
+ module.exit_json(changed=True, mode=status)
+ else:
+ module.fail_json(msg='Unable to set slave mode')
+
+ else:
+ if module.check_mode or set_master_mode(r):
+ module.exit_json(changed=True, mode=mode)
+ else:
+ module.fail_json(msg='Unable to set master mode')
+
+ # flush Command section -----------
+ elif command == "flush":
+ db = module.params['db']
+ mode = module.params['flush_mode']
+
+ # Check if we have all the data
+ if mode == "db":
+ if db is None:
+ module.fail_json(msg="In db mode the db number must be provided")
+
+ # Connect and check
+ r = redis.StrictRedis(host=login_host, port=login_port, password=login_password, db=db)
+ try:
+ r.ping()
+ except Exception as e:
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
+
+ # Do the stuff
+ # (Check Check_mode before commands so the commands aren't evaluated
+ # if not necessary)
+ if mode == "all":
+ if module.check_mode or flush(r):
+ module.exit_json(changed=True, flushed=True)
+ else: # Flush never fails :)
+ module.fail_json(msg="Unable to flush all databases")
+
+ else:
+ if module.check_mode or flush(r, db):
+ module.exit_json(changed=True, flushed=True, db=db)
+ else: # Flush never fails :)
+ module.fail_json(msg="Unable to flush '%d' database" % db)
+ elif command == 'config':
+ name = module.params['name']
+
+ try: # try to parse the value as if it were the memory size
+ if re.match(r'^\s*(\d*\.?\d*)\s*([A-Za-z]+)?\s*$', module.params['value'].upper()):
+ value = str(human_to_bytes(module.params['value'].upper()))
+ else:
+ value = module.params['value']
+ except ValueError:
+ value = module.params['value']
+
+ r = redis.StrictRedis(host=login_host, port=login_port, password=login_password)
+
+ try:
+ r.ping()
+ except Exception as e:
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
+
+ try:
+ old_value = r.config_get(name)[name]
+ except Exception as e:
+ module.fail_json(msg="unable to read config: %s" % to_native(e), exception=traceback.format_exc())
+ changed = old_value != value
+
+ if module.check_mode or not changed:
+ module.exit_json(changed=changed, name=name, value=value)
+ else:
+ try:
+ r.config_set(name, value)
+ except Exception as e:
+ module.fail_json(msg="unable to write config: %s" % to_native(e), exception=traceback.format_exc())
+ module.exit_json(changed=changed, name=name, value=value)
+ else:
+ module.fail_json(msg='A valid command must be provided')
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/redis_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/redis_info.py
new file mode 100644
index 00000000..b615addb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/redis_info.py
@@ -0,0 +1,236 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Pavlo Bashynskyi (@levonet) <levonet@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: redis_info
+short_description: Gather information about Redis servers
+version_added: '0.2.0'
+description:
+- Gathers information and statistics about Redis servers.
+options:
+ login_host:
+ description:
+ - The host running the database.
+ type: str
+ default: localhost
+ login_port:
+ description:
+ - The port to connect to.
+ type: int
+ default: 6379
+ login_password:
+ description:
+ - The password used to authenticate with, when authentication is enabled for the Redis server.
+ type: str
+notes:
+- Requires the redis-py Python package on the remote host. You can
+ install it with pip (C(pip install redis)) or with a package manager.
+ U(https://github.com/andymccurdy/redis-py)
+seealso:
+- module: community.general.redis
+requirements: [ redis ]
+author: "Pavlo Bashynskyi (@levonet)"
+'''
+
+EXAMPLES = r'''
+- name: Get server information
+ community.general.redis_info:
+ register: result
+
+- name: Print server information
+ ansible.builtin.debug:
+ var: result.info
+'''
+
+RETURN = r'''
+info:
+ description: The default set of server information sections U(https://redis.io/commands/info).
+ returned: success
+ type: dict
+ sample: {
+ "active_defrag_hits": 0,
+ "active_defrag_key_hits": 0,
+ "active_defrag_key_misses": 0,
+ "active_defrag_misses": 0,
+ "active_defrag_running": 0,
+ "allocator_active": 932409344,
+ "allocator_allocated": 932062792,
+ "allocator_frag_bytes": 346552,
+ "allocator_frag_ratio": 1.0,
+ "allocator_resident": 947253248,
+ "allocator_rss_bytes": 14843904,
+ "allocator_rss_ratio": 1.02,
+ "aof_current_rewrite_time_sec": -1,
+ "aof_enabled": 0,
+ "aof_last_bgrewrite_status": "ok",
+ "aof_last_cow_size": 0,
+ "aof_last_rewrite_time_sec": -1,
+ "aof_last_write_status": "ok",
+ "aof_rewrite_in_progress": 0,
+ "aof_rewrite_scheduled": 0,
+ "arch_bits": 64,
+ "atomicvar_api": "atomic-builtin",
+ "blocked_clients": 0,
+ "client_recent_max_input_buffer": 4,
+ "client_recent_max_output_buffer": 0,
+ "cluster_enabled": 0,
+ "config_file": "",
+ "configured_hz": 10,
+ "connected_clients": 4,
+ "connected_slaves": 0,
+ "db0": {
+ "avg_ttl": 1945628530,
+ "expires": 16,
+ "keys": 3341411
+ },
+ "evicted_keys": 0,
+ "executable": "/data/redis-server",
+ "expired_keys": 9,
+ "expired_stale_perc": 1.72,
+ "expired_time_cap_reached_count": 0,
+ "gcc_version": "9.2.0",
+ "hz": 10,
+ "instantaneous_input_kbps": 0.0,
+ "instantaneous_ops_per_sec": 0,
+ "instantaneous_output_kbps": 0.0,
+ "keyspace_hits": 0,
+ "keyspace_misses": 0,
+ "latest_fork_usec": 0,
+ "lazyfree_pending_objects": 0,
+ "loading": 0,
+ "lru_clock": 11603632,
+ "master_repl_offset": 118831417,
+ "master_replid": "0d904704e424e38c3cd896783e9f9d28d4836e5e",
+ "master_replid2": "0000000000000000000000000000000000000000",
+ "maxmemory": 0,
+ "maxmemory_human": "0B",
+ "maxmemory_policy": "noeviction",
+ "mem_allocator": "jemalloc-5.1.0",
+ "mem_aof_buffer": 0,
+ "mem_clients_normal": 49694,
+ "mem_clients_slaves": 0,
+ "mem_fragmentation_bytes": 12355480,
+ "mem_fragmentation_ratio": 1.01,
+ "mem_not_counted_for_evict": 0,
+ "mem_replication_backlog": 1048576,
+ "migrate_cached_sockets": 0,
+ "multiplexing_api": "epoll",
+ "number_of_cached_scripts": 0,
+ "os": "Linux 3.10.0-862.14.4.el7.x86_64 x86_64",
+ "process_id": 1,
+ "pubsub_channels": 0,
+ "pubsub_patterns": 0,
+ "rdb_bgsave_in_progress": 0,
+ "rdb_changes_since_last_save": 671,
+ "rdb_current_bgsave_time_sec": -1,
+ "rdb_last_bgsave_status": "ok",
+ "rdb_last_bgsave_time_sec": -1,
+ "rdb_last_cow_size": 0,
+ "rdb_last_save_time": 1588702236,
+ "redis_build_id": "a31260535f820267",
+ "redis_git_dirty": 0,
+ "redis_git_sha1": 0,
+ "redis_mode": "standalone",
+ "redis_version": "999.999.999",
+ "rejected_connections": 0,
+ "repl_backlog_active": 1,
+ "repl_backlog_first_byte_offset": 118707937,
+ "repl_backlog_histlen": 123481,
+ "repl_backlog_size": 1048576,
+ "role": "master",
+ "rss_overhead_bytes": -3051520,
+ "rss_overhead_ratio": 1.0,
+ "run_id": "8d252f66c3ef89bd60a060cf8dc5cfe3d511c5e4",
+ "second_repl_offset": 118830003,
+ "slave_expires_tracked_keys": 0,
+ "sync_full": 0,
+ "sync_partial_err": 0,
+ "sync_partial_ok": 0,
+ "tcp_port": 6379,
+ "total_commands_processed": 885,
+ "total_connections_received": 10,
+ "total_net_input_bytes": 802709255,
+ "total_net_output_bytes": 31754,
+ "total_system_memory": 135029538816,
+ "total_system_memory_human": "125.76G",
+ "uptime_in_days": 53,
+ "uptime_in_seconds": 4631778,
+ "used_cpu_sys": 4.668282,
+ "used_cpu_sys_children": 0.002191,
+ "used_cpu_user": 4.21088,
+ "used_cpu_user_children": 0.0,
+ "used_memory": 931908760,
+ "used_memory_dataset": 910774306,
+ "used_memory_dataset_perc": "97.82%",
+ "used_memory_human": "888.74M",
+ "used_memory_lua": 37888,
+ "used_memory_lua_human": "37.00K",
+ "used_memory_overhead": 21134454,
+ "used_memory_peak": 932015216,
+ "used_memory_peak_human": "888.84M",
+ "used_memory_peak_perc": "99.99%",
+ "used_memory_rss": 944201728,
+ "used_memory_rss_human": "900.46M",
+ "used_memory_scripts": 0,
+ "used_memory_scripts_human": "0B",
+ "used_memory_startup": 791264
+ }
+'''
+
+import traceback
+
+REDIS_IMP_ERR = None
+try:
+ from redis import StrictRedis
+ HAS_REDIS_PACKAGE = True
+except ImportError:
+ REDIS_IMP_ERR = traceback.format_exc()
+ HAS_REDIS_PACKAGE = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def redis_client(**client_params):
+ return StrictRedis(**client_params)
+
+
+# Module execution.
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ login_host=dict(type='str', default='localhost'),
+ login_port=dict(type='int', default=6379),
+ login_password=dict(type='str', no_log=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ if not HAS_REDIS_PACKAGE:
+ module.fail_json(msg=missing_required_lib('redis'), exception=REDIS_IMP_ERR)
+
+ login_host = module.params['login_host']
+ login_port = module.params['login_port']
+ login_password = module.params['login_password']
+
+ # Connect and check
+ client = redis_client(host=login_host, port=login_port, password=login_password)
+ try:
+ client.ping()
+ except Exception as e:
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
+
+ info = client.info()
+ module.exit_json(changed=False, info=info)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/cobbler/cobbler_sync.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/cobbler/cobbler_sync.py
new file mode 100644
index 00000000..2e5f080d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/cobbler/cobbler_sync.py
@@ -0,0 +1,140 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Dag Wieers (dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: cobbler_sync
+short_description: Sync Cobbler
+description:
+- Sync Cobbler to commit changes.
+options:
+ host:
+ description:
+ - The name or IP address of the Cobbler system.
+ default: 127.0.0.1
+ type: str
+ port:
+ description:
+ - Port number to be used for REST connection.
+ - The default value depends on parameter C(use_ssl).
+ type: int
+ username:
+ description:
+ - The username to log in to Cobbler.
+ default: cobbler
+ type: str
+ password:
+ description:
+ - The password to log in to Cobbler.
+ type: str
+ use_ssl:
+ description:
+ - If C(no), an HTTP connection will be used instead of the default HTTPS connection.
+ type: bool
+ default: 'yes'
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated.
+ - This should only set to C(no) when used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+author:
+- Dag Wieers (@dagwieers)
+todo:
+notes:
+- Concurrently syncing Cobbler is bound to fail with weird errors.
+- On python 2.7.8 and older (i.e. on RHEL7) you may need to tweak the python behaviour to disable certificate validation.
+ More information at L(Certificate verification in Python standard library HTTP clients,https://access.redhat.com/articles/2039753).
+'''
+
+EXAMPLES = r'''
+- name: Commit Cobbler changes
+ community.general.cobbler_sync:
+ host: cobbler01
+ username: cobbler
+ password: MySuperSecureP4sswOrd
+ run_once: yes
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+# Default return values
+'''
+
+import datetime
+import ssl
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import xmlrpc_client
+from ansible.module_utils._text import to_text
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(type='str', default='127.0.0.1'),
+ port=dict(type='int'),
+ username=dict(type='str', default='cobbler'),
+ password=dict(type='str', no_log=True),
+ use_ssl=dict(type='bool', default=True),
+ validate_certs=dict(type='bool', default=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ username = module.params['username']
+ password = module.params['password']
+ port = module.params['port']
+ use_ssl = module.params['use_ssl']
+ validate_certs = module.params['validate_certs']
+
+ module.params['proto'] = 'https' if use_ssl else 'http'
+ if not port:
+ module.params['port'] = '443' if use_ssl else '80'
+
+ result = dict(
+ changed=True,
+ )
+
+ start = datetime.datetime.utcnow()
+
+ ssl_context = None
+ if not validate_certs:
+ try: # Python 2.7.9 and newer
+ ssl_context = ssl.create_unverified_context()
+ except AttributeError: # Legacy Python that doesn't verify HTTPS certificates by default
+ ssl._create_default_context = ssl._create_unverified_context
+ else: # Python 2.7.8 and older
+ ssl._create_default_https_context = ssl._create_unverified_https_context
+
+ url = '{proto}://{host}:{port}/cobbler_api'.format(**module.params)
+ if ssl_context:
+ conn = xmlrpc_client.ServerProxy(url, context=ssl_context)
+ else:
+ conn = xmlrpc_client.Server(url)
+
+ try:
+ token = conn.login(username, password)
+ except xmlrpc_client.Fault as e:
+ module.fail_json(msg="Failed to log in to Cobbler '{url}' as '{username}'. {error}".format(url=url, error=to_text(e), **module.params))
+ except Exception as e:
+ module.fail_json(msg="Connection to '{url}' failed. {error}".format(url=url, error=to_text(e)))
+
+ if not module.check_mode:
+ try:
+ conn.sync(token)
+ except Exception as e:
+ module.fail_json(msg="Failed to sync Cobbler. {error}".format(error=to_text(e)))
+
+ elapsed = datetime.datetime.utcnow() - start
+ module.exit_json(elapsed=elapsed.seconds, **result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/cobbler/cobbler_system.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/cobbler/cobbler_system.py
new file mode 100644
index 00000000..ecabcc8e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/cobbler/cobbler_system.py
@@ -0,0 +1,339 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Dag Wieers (dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: cobbler_system
+short_description: Manage system objects in Cobbler
+description:
+- Add, modify or remove systems in Cobbler
+options:
+ host:
+ description:
+ - The name or IP address of the Cobbler system.
+ default: 127.0.0.1
+ type: str
+ port:
+ description:
+ - Port number to be used for REST connection.
+ - The default value depends on parameter C(use_ssl).
+ type: int
+ username:
+ description:
+ - The username to log in to Cobbler.
+ default: cobbler
+ type: str
+ password:
+ description:
+ - The password to log in to Cobbler.
+ type: str
+ use_ssl:
+ description:
+ - If C(no), an HTTP connection will be used instead of the default HTTPS connection.
+ type: bool
+ default: 'yes'
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated.
+ - This should only set to C(no) when used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+ name:
+ description:
+ - The system name to manage.
+ type: str
+ properties:
+ description:
+ - A dictionary with system properties.
+ type: dict
+ interfaces:
+ description:
+ - A list of dictionaries containing interface options.
+ type: dict
+ sync:
+ description:
+ - Sync on changes.
+ - Concurrently syncing Cobbler is bound to fail.
+ type: bool
+ default: no
+ state:
+ description:
+ - Whether the system should be present, absent or a query is made.
+ choices: [ absent, present, query ]
+ default: present
+ type: str
+author:
+- Dag Wieers (@dagwieers)
+notes:
+- Concurrently syncing Cobbler is bound to fail with weird errors.
+- On python 2.7.8 and older (i.e. on RHEL7) you may need to tweak the python behaviour to disable certificate validation.
+ More information at L(Certificate verification in Python standard library HTTP clients,https://access.redhat.com/articles/2039753).
+'''
+
+EXAMPLES = r'''
+- name: Ensure the system exists in Cobbler
+ community.general.cobbler_system:
+ host: cobbler01
+ username: cobbler
+ password: MySuperSecureP4sswOrd
+ name: myhost
+ properties:
+ profile: CentOS6-x86_64
+ name_servers: [ 2.3.4.5, 3.4.5.6 ]
+ name_servers_search: foo.com, bar.com
+ interfaces:
+ eth0:
+ macaddress: 00:01:02:03:04:05
+ ipaddress: 1.2.3.4
+ delegate_to: localhost
+
+- name: Enable network boot in Cobbler
+ community.general.cobbler_system:
+ host: bdsol-aci-cobbler-01
+ username: cobbler
+ password: ins3965!
+ name: bdsol-aci51-apic1.cisco.com
+ properties:
+ netboot_enabled: yes
+ state: present
+ delegate_to: localhost
+
+- name: Query all systems in Cobbler
+ community.general.cobbler_system:
+ host: cobbler01
+ username: cobbler
+ password: MySuperSecureP4sswOrd
+ state: query
+ register: cobbler_systems
+ delegate_to: localhost
+
+- name: Query a specific system in Cobbler
+ community.general.cobbler_system:
+ host: cobbler01
+ username: cobbler
+ password: MySuperSecureP4sswOrd
+ name: '{{ inventory_hostname }}'
+ state: query
+ register: cobbler_properties
+ delegate_to: localhost
+
+- name: Ensure the system does not exist in Cobbler
+ community.general.cobbler_system:
+ host: cobbler01
+ username: cobbler
+ password: MySuperSecureP4sswOrd
+ name: myhost
+ state: absent
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+systems:
+ description: List of systems
+ returned: C(state=query) and C(name) is not provided
+ type: list
+system:
+ description: (Resulting) information about the system we are working with
+ returned: when C(name) is provided
+ type: dict
+'''
+
+import copy
+import datetime
+import ssl
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible.module_utils.six.moves import xmlrpc_client
+from ansible.module_utils._text import to_text
+
+IFPROPS_MAPPING = dict(
+ bondingopts='bonding_opts',
+ bridgeopts='bridge_opts',
+ connected_mode='connected_mode',
+ cnames='cnames',
+ dhcptag='dhcp_tag',
+ dnsname='dns_name',
+ ifgateway='if_gateway',
+ interfacetype='interface_type',
+ interfacemaster='interface_master',
+ ipaddress='ip_address',
+ ipv6address='ipv6_address',
+ ipv6defaultgateway='ipv6_default_gateway',
+ ipv6mtu='ipv6_mtu',
+ ipv6prefix='ipv6_prefix',
+ ipv6secondaries='ipv6_secondariesu',
+ ipv6staticroutes='ipv6_static_routes',
+ macaddress='mac_address',
+ management='management',
+ mtu='mtu',
+ netmask='netmask',
+ static='static',
+ staticroutes='static_routes',
+ virtbridge='virt_bridge',
+)
+
+
+def getsystem(conn, name, token):
+ system = dict()
+ if name:
+ # system = conn.get_system(name, token)
+ systems = conn.find_system(dict(name=name), token)
+ if systems:
+ system = systems[0]
+ return system
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(type='str', default='127.0.0.1'),
+ port=dict(type='int'),
+ username=dict(type='str', default='cobbler'),
+ password=dict(type='str', no_log=True),
+ use_ssl=dict(type='bool', default=True),
+ validate_certs=dict(type='bool', default=True),
+ name=dict(type='str'),
+ interfaces=dict(type='dict'),
+ properties=dict(type='dict'),
+ sync=dict(type='bool', default=False),
+ state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
+ ),
+ supports_check_mode=True,
+ )
+
+ username = module.params['username']
+ password = module.params['password']
+ port = module.params['port']
+ use_ssl = module.params['use_ssl']
+ validate_certs = module.params['validate_certs']
+
+ name = module.params['name']
+ state = module.params['state']
+
+ module.params['proto'] = 'https' if use_ssl else 'http'
+ if not port:
+ module.params['port'] = '443' if use_ssl else '80'
+
+ result = dict(
+ changed=False,
+ )
+
+ start = datetime.datetime.utcnow()
+
+ ssl_context = None
+ if not validate_certs:
+ try: # Python 2.7.9 and newer
+ ssl_context = ssl.create_unverified_context()
+ except AttributeError: # Legacy Python that doesn't verify HTTPS certificates by default
+ ssl._create_default_context = ssl._create_unverified_context
+ else: # Python 2.7.8 and older
+ ssl._create_default_https_context = ssl._create_unverified_https_context
+
+ url = '{proto}://{host}:{port}/cobbler_api'.format(**module.params)
+ if ssl_context:
+ conn = xmlrpc_client.ServerProxy(url, context=ssl_context)
+ else:
+ conn = xmlrpc_client.Server(url)
+
+ try:
+ token = conn.login(username, password)
+ except xmlrpc_client.Fault as e:
+ module.fail_json(msg="Failed to log in to Cobbler '{url}' as '{username}'. {error}".format(url=url, error=to_text(e), **module.params))
+ except Exception as e:
+ module.fail_json(msg="Connection to '{url}' failed. {error}".format(url=url, error=to_text(e), **module.params))
+
+ system = getsystem(conn, name, token)
+ # result['system'] = system
+
+ if state == 'query':
+ if name:
+ result['system'] = system
+ else:
+ # Turn it into a dictionary of dictionaries
+ # all_systems = conn.get_systems()
+ # result['systems'] = { system['name']: system for system in all_systems }
+
+ # Return a list of dictionaries
+ result['systems'] = conn.get_systems()
+
+ elif state == 'present':
+
+ if system:
+ # Update existing entry
+ system_id = conn.get_system_handle(name, token)
+
+ for key, value in iteritems(module.params['properties']):
+ if key not in system:
+ module.warn("Property '{0}' is not a valid system property.".format(key))
+ if system[key] != value:
+ try:
+ conn.modify_system(system_id, key, value, token)
+ result['changed'] = True
+ except Exception as e:
+ module.fail_json(msg="Unable to change '{0}' to '{1}'. {2}".format(key, value, e))
+
+ else:
+ # Create a new entry
+ system_id = conn.new_system(token)
+ conn.modify_system(system_id, 'name', name, token)
+ result['changed'] = True
+
+ if module.params['properties']:
+ for key, value in iteritems(module.params['properties']):
+ try:
+ conn.modify_system(system_id, key, value, token)
+ except Exception as e:
+ module.fail_json(msg="Unable to change '{0}' to '{1}'. {2}".format(key, value, e))
+
+ # Add interface properties
+ interface_properties = dict()
+ if module.params['interfaces']:
+ for device, values in iteritems(module.params['interfaces']):
+ for key, value in iteritems(values):
+ if key == 'name':
+ continue
+ if key not in IFPROPS_MAPPING:
+ module.warn("Property '{0}' is not a valid system property.".format(key))
+ if not system or system['interfaces'][device][IFPROPS_MAPPING[key]] != value:
+ result['changed'] = True
+ interface_properties['{0}-{1}'.format(key, device)] = value
+
+ if result['changed'] is True:
+ conn.modify_system(system_id, "modify_interface", interface_properties, token)
+
+ # Only save when the entry was changed
+ if not module.check_mode and result['changed']:
+ conn.save_system(system_id, token)
+
+ elif state == 'absent':
+
+ if system:
+ if not module.check_mode:
+ conn.remove_system(name, token)
+ result['changed'] = True
+
+ if not module.check_mode and module.params['sync'] and result['changed']:
+ try:
+ conn.sync(token)
+ except Exception as e:
+ module.fail_json(msg="Failed to sync Cobbler. {0}".format(to_text(e)))
+
+ if state in ('absent', 'present'):
+ result['system'] = getsystem(conn, name, token)
+
+ if module._diff:
+ result['diff'] = dict(before=system, after=result['system'])
+
+ elapsed = datetime.datetime.utcnow() - start
+ module.exit_json(elapsed=elapsed.seconds, **result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/dellemc/idrac_firmware.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/dellemc/idrac_firmware.py
new file mode 100644
index 00000000..fa8ac66c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/dellemc/idrac_firmware.py
@@ -0,0 +1,207 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 2.0
+# Copyright (C) 2018-2019 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: idrac_firmware
+short_description: Firmware update from a repository on a network share (CIFS, NFS).
+description:
+ - Update the Firmware by connecting to a network share (either CIFS or NFS) that contains a catalog of
+ available updates.
+ - Network share should contain a valid repository of Update Packages (DUPs) and a catalog file describing the DUPs.
+ - All applicable updates contained in the repository are applied to the system.
+ - This feature is available only with iDRAC Enterprise License.
+options:
+ idrac_ip:
+ description: iDRAC IP Address.
+ type: str
+ required: True
+ idrac_user:
+ description: iDRAC username.
+ type: str
+ required: True
+ idrac_password:
+ description: iDRAC user password.
+ type: str
+ required: True
+ aliases: ['idrac_pwd']
+ idrac_port:
+ description: iDRAC port.
+ type: int
+ default: 443
+ share_name:
+ description: CIFS or NFS Network share.
+ type: str
+ required: True
+ share_user:
+ description: Network share user in the format 'user@domain' or 'domain\\user' if user is
+ part of a domain else 'user'. This option is mandatory for CIFS Network Share.
+ type: str
+ share_password:
+ description: Network share user password. This option is mandatory for CIFS Network Share.
+ type: str
+ aliases: ['share_pwd']
+ share_mnt:
+ description: Local mount path of the network share with read-write permission for ansible user.
+ This option is mandatory for Network Share.
+ type: str
+ required: True
+ reboot:
+ description: Whether to reboots after applying the updates or not.
+ type: bool
+ default: false
+ job_wait:
+ description: Whether to wait for job completion or not.
+ type: bool
+ default: true
+ catalog_file_name:
+ required: False
+ description: Catalog file name relative to the I(share_name).
+ type: str
+ default: 'Catalog.xml'
+
+requirements:
+ - "omsdk"
+ - "python >= 2.7.5"
+author: "Rajeev Arakkal (@rajeevarakkal)"
+'''
+
+EXAMPLES = """
+---
+- name: Update firmware from repository on a Network Share
+ community.general.idrac_firmware:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ share_name: "192.168.0.0:/share"
+ share_user: "share_user_name"
+ share_password: "share_user_pwd"
+ share_mnt: "/mnt/share"
+ reboot: True
+ job_wait: True
+ catalog_file_name: "Catalog.xml"
+"""
+
+RETURN = """
+---
+msg:
+ type: str
+ description: Over all firmware update status.
+ returned: always
+ sample: "Successfully updated the firmware."
+update_status:
+ type: dict
+ description: Firmware Update job and progress details from the iDRAC.
+ returned: success
+ sample: {
+ 'InstanceID': 'JID_XXXXXXXXXXXX',
+ 'JobState': 'Completed',
+ 'Message': 'Job completed successfully.',
+ 'MessageId': 'REDXXX',
+ 'Name': 'Repository Update',
+ 'JobStartTime': 'NA',
+ 'Status': 'Success',
+ }
+"""
+
+
+from ansible_collections.community.general.plugins.module_utils.remote_management.dellemc.dellemc_idrac import iDRACConnection
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from omsdk.sdkcreds import UserCredentials
+ from omsdk.sdkfile import FileOnShare
+ HAS_OMSDK = True
+except ImportError:
+ HAS_OMSDK = False
+
+
+def _validate_catalog_file(catalog_file_name):
+ normilized_file_name = catalog_file_name.lower()
+ if not normilized_file_name:
+ raise ValueError('catalog_file_name should be a non-empty string.')
+ elif not normilized_file_name.endswith("xml"):
+ raise ValueError('catalog_file_name should be an XML file.')
+
+
+def update_firmware(idrac, module):
+ """Update firmware from a network share and return the job details."""
+ msg = {}
+ msg['changed'] = False
+ msg['update_status'] = {}
+
+ try:
+ upd_share = FileOnShare(remote=module.params['share_name'] + "/" + module.params['catalog_file_name'],
+ mount_point=module.params['share_mnt'],
+ isFolder=False,
+ creds=UserCredentials(
+ module.params['share_user'],
+ module.params['share_password'])
+ )
+
+ idrac.use_redfish = True
+ if '12' in idrac.ServerGeneration or '13' in idrac.ServerGeneration:
+ idrac.use_redfish = False
+
+ apply_update = True
+ msg['update_status'] = idrac.update_mgr.update_from_repo(upd_share,
+ apply_update,
+ module.params['reboot'],
+ module.params['job_wait'])
+ except RuntimeError as e:
+ module.fail_json(msg=str(e))
+
+ if "Status" in msg['update_status']:
+ if msg['update_status']['Status'] == "Success":
+ if module.params['job_wait']:
+ msg['changed'] = True
+ else:
+ module.fail_json(msg='Failed to update firmware.', update_status=msg['update_status'])
+ return msg
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec={
+ "idrac_ip": {"required": True, "type": 'str'},
+ "idrac_user": {"required": True, "type": 'str'},
+ "idrac_password": {"required": True, "type": 'str', "aliases": ['idrac_pwd'], "no_log": True},
+ "idrac_port": {"required": False, "default": 443, "type": 'int'},
+
+ "share_name": {"required": True, "type": 'str'},
+ "share_user": {"required": False, "type": 'str'},
+ "share_password": {"required": False, "type": 'str', "aliases": ['share_pwd'], "no_log": True},
+ "share_mnt": {"required": True, "type": 'str'},
+
+ "catalog_file_name": {"required": False, "type": 'str', "default": "Catalog.xml"},
+ "reboot": {"required": False, "type": 'bool', "default": False},
+ "job_wait": {"required": False, "type": 'bool', "default": True},
+ },
+
+ supports_check_mode=False)
+
+ try:
+ # Validate the catalog file
+ _validate_catalog_file(module.params['catalog_file_name'])
+ # Connect to iDRAC and update firmware
+ with iDRACConnection(module.params) as idrac:
+ update_status = update_firmware(idrac, module)
+ except (ImportError, ValueError, RuntimeError) as e:
+ module.fail_json(msg=str(e))
+
+ module.exit_json(msg='Successfully updated the firmware.', update_status=update_status)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/dellemc/idrac_server_config_profile.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/dellemc/idrac_server_config_profile.py
new file mode 100644
index 00000000..39857fd3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/dellemc/idrac_server_config_profile.py
@@ -0,0 +1,301 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 2.0
+# Copyright (C) 2019 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: idrac_server_config_profile
+short_description: Export or Import iDRAC Server Configuration Profile (SCP).
+description:
+ - Export the Server Configuration Profile (SCP) from the iDRAC or Import from a network share or a local file.
+options:
+ idrac_ip:
+ description: iDRAC IP Address.
+ type: str
+ required: True
+ idrac_user:
+ description: iDRAC username.
+ type: str
+ required: True
+ idrac_password:
+ description: iDRAC user password.
+ type: str
+ required: True
+ aliases: ['idrac_pwd']
+ idrac_port:
+ description: iDRAC port.
+ type: int
+ default: 443
+ command:
+ description:
+ - If C(import), will perform SCP import operations.
+ - If C(export), will perform SCP export operations.
+ choices: ['import', 'export']
+ default: 'export'
+ job_wait:
+ description: Whether to wait for job completion or not.
+ type: bool
+ required: True
+ share_name:
+ description: CIFS or NFS Network Share or a local path.
+ type: str
+ required: True
+ share_user:
+ description: Network share user in the format 'user@domain' or 'domain\\user' if user is
+ part of a domain else 'user'. This option is mandatory for CIFS Network Share.
+ type: str
+ share_password:
+ description: Network share user password. This option is mandatory for CIFS Network Share.
+ type: str
+ aliases: ['share_pwd']
+ scp_file:
+ description: Server Configuration Profile file name. This option is mandatory for C(import) command.
+ type: str
+ scp_components:
+ description:
+ - If C(ALL), this module will import all components configurations from SCP file.
+ - If C(IDRAC), this module will import iDRAC configuration from SCP file.
+ - If C(BIOS), this module will import BIOS configuration from SCP file.
+ - If C(NIC), this module will import NIC configuration from SCP file.
+ - If C(RAID), this module will import RAID configuration from SCP file.
+ choices: ['ALL', 'IDRAC', 'BIOS', 'NIC', 'RAID']
+ default: 'ALL'
+ shutdown_type:
+ description:
+ - This option is applicable for C(import) command.
+ - If C(Graceful), it gracefully shuts down the server.
+ - If C(Forced), it forcefully shuts down the server.
+ - If C(NoReboot), it does not reboot the server.
+ choices: ['Graceful', 'Forced', 'NoReboot']
+ default: 'Graceful'
+ end_host_power_state:
+ description:
+ - This option is applicable for C(import) command.
+ - If C(On), End host power state is on.
+ - If C(Off), End host power state is off.
+ choices: ['On' ,'Off']
+ default: 'On'
+ export_format:
+ description: Specify the output file format. This option is applicable for C(export) command.
+ choices: ['JSON', 'XML']
+ default: 'XML'
+ export_use:
+ description: Specify the type of server configuration profile (SCP) to be exported.
+ This option is applicable for C(export) command.
+ choices: ['Default', 'Clone', 'Replace']
+ default: 'Default'
+
+requirements:
+ - "omsdk"
+ - "python >= 2.7.5"
+author: "Jagadeesh N V(@jagadeeshnv)"
+
+'''
+
+EXAMPLES = r'''
+---
+- name: Import Server Configuration Profile from a network share
+ community.general.idrac_server_config_profile:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ command: "import"
+ share_name: "192.168.0.2:/share"
+ share_user: "share_user_name"
+ share_password: "share_user_password"
+ scp_file: "scp_filename.xml"
+ scp_components: "ALL"
+ job_wait: True
+
+- name: Import Server Configuration Profile from a local path
+ community.general.idrac_server_config_profile:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ command: "import"
+ share_name: "/scp_folder"
+ share_user: "share_user_name"
+ share_password: "share_user_password"
+ scp_file: "scp_filename.xml"
+ scp_components: "ALL"
+ job_wait: True
+
+- name: Export Server Configuration Profile to a network share
+ community.general.idrac_server_config_profile:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ share_name: "192.168.0.2:/share"
+ share_user: "share_user_name"
+ share_password: "share_user_password"
+ job_wait: False
+
+- name: Export Server Configuration Profile to a local path
+ community.general.idrac_server_config_profile:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ share_name: "/scp_folder"
+ share_user: "share_user_name"
+ share_password: "share_user_password"
+ job_wait: False
+'''
+
+RETURN = r'''
+---
+msg:
+ type: str
+ description: Status of the import or export SCP job.
+ returned: always
+ sample: "Successfully imported the Server Configuration Profile"
+scp_status:
+ type: dict
+ description: SCP operation job and progress details from the iDRAC.
+ returned: success
+ sample:
+ {
+ "Id": "JID_XXXXXXXXX",
+ "JobState": "Completed",
+ "JobType": "ImportConfiguration",
+ "Message": "Successfully imported and applied Server Configuration Profile.",
+ "MessageArgs": [],
+ "MessageId": "XXX123",
+ "Name": "Import Configuration",
+ "PercentComplete": 100,
+ "StartTime": "TIME_NOW",
+ "Status": "Success",
+ "TargetSettingsURI": null,
+ "retval": true
+ }
+'''
+
+import os
+from ansible_collections.community.general.plugins.module_utils.remote_management.dellemc.dellemc_idrac import iDRACConnection
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from omsdk.sdkfile import file_share_manager
+ from omsdk.sdkcreds import UserCredentials
+ from omdrivers.enums.iDRAC.iDRACEnums import (SCPTargetEnum, EndHostPowerStateEnum,
+ ShutdownTypeEnum, ExportFormatEnum, ExportUseEnum)
+except ImportError:
+ pass
+
+
+def run_import_server_config_profile(idrac, module):
+ """Import Server Configuration Profile from a network share."""
+ target = SCPTargetEnum[module.params['scp_components']]
+ job_wait = module.params['job_wait']
+ end_host_power_state = EndHostPowerStateEnum[module.params['end_host_power_state']]
+ shutdown_type = ShutdownTypeEnum[module.params['shutdown_type']]
+ idrac.use_redfish = True
+
+ try:
+ myshare = file_share_manager.create_share_obj(
+ share_path="{0}{1}{2}".format(module.params['share_name'], os.sep, module.params['scp_file']),
+ creds=UserCredentials(module.params['share_user'],
+ module.params['share_password']), isFolder=False)
+ import_status = idrac.config_mgr.scp_import(myshare,
+ target=target, shutdown_type=shutdown_type,
+ end_host_power_state=end_host_power_state,
+ job_wait=job_wait)
+ if not import_status or import_status.get('Status') != "Success":
+ module.fail_json(msg='Failed to import scp.', scp_status=import_status)
+ except RuntimeError as e:
+ module.fail_json(msg=str(e))
+ return import_status
+
+
+def run_export_server_config_profile(idrac, module):
+ """Export Server Configuration Profile to a network share."""
+ export_format = ExportFormatEnum[module.params['export_format']]
+ scp_file_name_format = "%ip_%Y%m%d_%H%M%S_scp.{0}".format(module.params['export_format'].lower())
+ target = SCPTargetEnum[module.params['scp_components']]
+ export_use = ExportUseEnum[module.params['export_use']]
+ idrac.use_redfish = True
+
+ try:
+ myshare = file_share_manager.create_share_obj(share_path=module.params['share_name'],
+ creds=UserCredentials(module.params['share_user'],
+ module.params['share_password']),
+ isFolder=True)
+ scp_file_name = myshare.new_file(scp_file_name_format)
+ export_status = idrac.config_mgr.scp_export(scp_file_name,
+ target=target,
+ export_format=export_format,
+ export_use=export_use,
+ job_wait=module.params['job_wait'])
+ if not export_status or export_status.get('Status') != "Success":
+ module.fail_json(msg='Failed to export scp.', scp_status=export_status)
+ except RuntimeError as e:
+ module.fail_json(msg=str(e))
+ return export_status
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec={
+ "idrac_ip": {"required": True, "type": 'str'},
+ "idrac_user": {"required": True, "type": 'str'},
+ "idrac_password": {"required": True, "type": 'str',
+ "aliases": ['idrac_pwd'], "no_log": True},
+ "idrac_port": {"required": False, "default": 443, "type": 'int'},
+
+ "command": {"required": False, "type": 'str',
+ "choices": ['export', 'import'], "default": 'export'},
+ "job_wait": {"required": True, "type": 'bool'},
+
+ "share_name": {"required": True, "type": 'str'},
+ "share_user": {"required": False, "type": 'str'},
+ "share_password": {"required": False, "type": 'str',
+ "aliases": ['share_pwd'], "no_log": True},
+ "scp_components": {"required": False,
+ "choices": ['ALL', 'IDRAC', 'BIOS', 'NIC', 'RAID'],
+ "default": 'ALL'},
+
+ "scp_file": {"required": False, "type": 'str'},
+ "shutdown_type": {"required": False,
+ "choices": ['Graceful', 'Forced', 'NoReboot'],
+ "default": 'Graceful'},
+ "end_host_power_state": {"required": False,
+ "choices": ['On', 'Off'],
+ "default": 'On'},
+
+ "export_format": {"required": False, "type": 'str',
+ "choices": ['JSON', 'XML'], "default": 'XML'},
+ "export_use": {"required": False, "type": 'str',
+ "choices": ['Default', 'Clone', 'Replace'], "default": 'Default'}
+ },
+ required_if=[
+ ["command", "import", ["scp_file"]]
+ ],
+ supports_check_mode=False)
+
+ try:
+ changed = False
+ with iDRACConnection(module.params) as idrac:
+ command = module.params['command']
+ if command == 'import':
+ scp_status = run_import_server_config_profile(idrac, module)
+ if "No changes were applied" not in scp_status.get('Message', ""):
+ changed = True
+ else:
+ scp_status = run_export_server_config_profile(idrac, module)
+ module.exit_json(changed=changed, msg="Successfully {0}ed the Server Configuration Profile.".format(command),
+ scp_status=scp_status)
+ except (ImportError, ValueError, RuntimeError) as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/dellemc/ome_device_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/dellemc/ome_device_info.py
new file mode 100644
index 00000000..68fbb1e6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/dellemc/ome_device_info.py
@@ -0,0 +1,413 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 1.2
+# Copyright (C) 2019 Dell Inc.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# All rights reserved. Dell, EMC, and other trademarks are trademarks of Dell Inc. or its subsidiaries.
+# Other trademarks may be trademarks of their respective owners.
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_device_info
+short_description: Retrieves the information about Device.
+description:
+ - This module retrieves the list of all devices information with the exhaustive inventory of each
+ device.
+options:
+ hostname:
+ description:
+ - Target IP Address or hostname.
+ type: str
+ required: True
+ username:
+ description:
+ - Target username.
+ type: str
+ required: True
+ password:
+ description:
+ - Target user password.
+ type: str
+ required: True
+ port:
+ description:
+ - Target HTTPS port.
+ type: int
+ default: 443
+ fact_subset:
+ description:
+ - C(basic_inventory) returns the list of the devices.
+ - C(detailed_inventory) returns the inventory details of specified devices.
+ - C(subsystem_health) returns the health status of specified devices.
+ type: str
+ choices: [basic_inventory, detailed_inventory, subsystem_health ]
+ default: basic_inventory
+ system_query_options:
+ description:
+ - I(system_query_options) applicable for the choices of the fact_subset. Either I(device_id) or I(device_service_tag)
+ is mandatory for C(detailed_inventory) and C(subsystem_health) or both can be applicable.
+ type: dict
+ suboptions:
+ device_id:
+ description:
+ - A list of unique identifier is applicable
+ for C(detailed_inventory) and C(subsystem_health).
+ type: list
+ device_service_tag:
+ description:
+ - A list of service tags are applicable for C(detailed_inventory)
+ and C(subsystem_health).
+ type: list
+ inventory_type:
+ description:
+ - For C(detailed_inventory), it returns details of the specified inventory type.
+ type: str
+ filter:
+ description:
+ - For C(basic_inventory), it filters the collection of devices.
+ I(filter) query format should be aligned with OData standards.
+ type: str
+
+requirements:
+ - "python >= 2.7.5"
+author: "Sajna Shetty(@Sajna-Shetty)"
+'''
+
+EXAMPLES = """
+---
+- name: Retrieve basic inventory of all devices.
+ community.general.ome_device_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+
+- name: Retrieve basic inventory for devices identified by IDs 33333 or 11111 using filtering.
+ community.general.ome_device_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ fact_subset: "basic_inventory"
+ system_query_options:
+ filter: "Id eq 33333 or Id eq 11111"
+
+- name: Retrieve inventory details of specified devices identified by IDs 11111 and 22222.
+ community.general.ome_device_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ fact_subset: "detailed_inventory"
+ system_query_options:
+ device_id:
+ - 11111
+ - 22222
+
+- name: Retrieve inventory details of specified devices identified by service tags MXL1234 and MXL4567.
+ community.general.ome_device_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ fact_subset: "detailed_inventory"
+ system_query_options:
+ device_service_tag:
+ - MXL1234
+ - MXL4567
+
+- name: Retrieve details of specified inventory type of specified devices identified by ID and service tags.
+ community.general.ome_device_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ fact_subset: "detailed_inventory"
+ system_query_options:
+ device_id:
+ - 11111
+ device_service_tag:
+ - MXL1234
+ - MXL4567
+ inventory_type: "serverDeviceCards"
+
+- name: Retrieve subsystem health of specified devices identified by service tags.
+ community.general.ome_device_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ fact_subset: "subsystem_health"
+ system_query_options:
+ device_service_tag:
+ - MXL1234
+ - MXL4567
+
+"""
+
+RETURN = '''
+---
+msg:
+ type: str
+ description: Over all device information status.
+ returned: on error
+ sample: "Failed to fetch the device information"
+device_info:
+ type: dict
+ description: Returns the information collected from the Device.
+ returned: success
+ sample: {
+ "value": [
+ {
+ "Actions": null,
+ "AssetTag": null,
+ "ChassisServiceTag": null,
+ "ConnectionState": true,
+ "DeviceManagement": [
+ {
+ "DnsName": "dnsname.host.com",
+ "InstrumentationName": "MX-12345",
+ "MacAddress": "11:10:11:10:11:10",
+ "ManagementId": 12345,
+ "ManagementProfile": [
+ {
+ "HasCreds": 0,
+ "ManagementId": 12345,
+ "ManagementProfileId": 12345,
+ "ManagementURL": "https://192.168.0.1:443",
+ "Status": 1000,
+ "StatusDateTime": "2019-01-21 06:30:08.501"
+ }
+ ],
+ "ManagementType": 2,
+ "NetworkAddress": "192.168.0.1"
+ }
+ ],
+ "DeviceName": "MX-0003I",
+ "DeviceServiceTag": "MXL1234",
+ "DeviceSubscription": null,
+ "LastInventoryTime": "2019-01-21 06:30:08.501",
+ "LastStatusTime": "2019-01-21 06:30:02.492",
+ "ManagedState": 3000,
+ "Model": "PowerEdge MX7000",
+ "PowerState": 17,
+ "SlotConfiguration": {},
+ "Status": 4000,
+ "SystemId": 2031,
+ "Type": 2000
+ }
+ ]
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.remote_management.dellemc.ome import RestOME
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+
+DEVICES_INVENTORY_DETAILS = "detailed_inventory"
+DEVICES_SUBSYSTEM_HEALTH = "subsystem_health"
+DEVICES_INVENTORY_TYPE = "inventory_type"
+DEVICE_LIST = "basic_inventory"
+DESC_HTTP_ERROR = "HTTP Error 404: Not Found"
+device_fact_error_report = {}
+
+DEVICE_RESOURCE_COLLECTION = {
+ DEVICE_LIST: {"resource": "DeviceService/Devices"},
+ DEVICES_INVENTORY_DETAILS: {"resource": "DeviceService/Devices({Id})/InventoryDetails"},
+ DEVICES_INVENTORY_TYPE: {"resource": "DeviceService/Devices({Id})/InventoryDetails('{InventoryType}')"},
+ DEVICES_SUBSYSTEM_HEALTH: {"resource": "DeviceService/Devices({Id})/SubSystemHealth"},
+}
+
+
+def _get_device_id_from_service_tags(service_tags, rest_obj):
+ """
+ Get device ids from device service tag
+ Returns :dict : device_id to service_tag map
+ :arg service_tags: service tag
+ :arg rest_obj: RestOME class object in case of request with session.
+ :returns: dict eg: {1345:"MXL1245"}
+ """
+ try:
+ path = DEVICE_RESOURCE_COLLECTION[DEVICE_LIST]["resource"]
+ resp = rest_obj.invoke_request('GET', path)
+ if resp.success:
+ devices_list = resp.json_data["value"]
+ service_tag_dict = {}
+ for item in devices_list:
+ if item["DeviceServiceTag"] in service_tags:
+ service_tag_dict.update({item["Id"]: item["DeviceServiceTag"]})
+ available_service_tags = service_tag_dict.values()
+ not_available_service_tag = list(set(service_tags) - set(available_service_tags))
+ device_fact_error_report.update(dict((tag, DESC_HTTP_ERROR) for tag in not_available_service_tag))
+ else:
+ raise ValueError(resp.json_data)
+ except (URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError) as err:
+ raise err
+ return service_tag_dict
+
+
+def is_int(val):
+ """check when device_id numeric represented value is int"""
+ try:
+ int(val)
+ return True
+ except ValueError:
+ return False
+
+
+def _check_duplicate_device_id(device_id_list, service_tag_dict):
+ """If service_tag is duplicate of device_id, then updates the message as Duplicate report
+ :arg1: device_id_list : list of device_id
+ :arg2: service_tag_id_dict: dictionary of device_id to service tag map"""
+ if device_id_list:
+ device_id_represents_int = [int(device_id) for device_id in device_id_list if device_id and is_int(device_id)]
+ common_val = list(set(device_id_represents_int) & set(service_tag_dict.keys()))
+ for device_id in common_val:
+ device_fact_error_report.update(
+ {service_tag_dict[device_id]: "Duplicate report of device_id: {0}".format(device_id)})
+ del service_tag_dict[device_id]
+
+
+def _get_device_identifier_map(module_params, rest_obj):
+ """
+ Builds the identifiers mapping
+ :returns: the dict of device_id to server_tag map
+ eg: {"device_id":{1234: None},"device_service_tag":{1345:"MXL1234"}}"""
+ system_query_options_param = module_params.get("system_query_options")
+ device_id_service_tag_dict = {}
+ if system_query_options_param is not None:
+ device_id_list = system_query_options_param.get("device_id")
+ device_service_tag_list = system_query_options_param.get("device_service_tag")
+ if device_id_list:
+ device_id_dict = dict((device_id, None) for device_id in list(set(device_id_list)))
+ device_id_service_tag_dict["device_id"] = device_id_dict
+ if device_service_tag_list:
+ service_tag_dict = _get_device_id_from_service_tags(device_service_tag_list,
+ rest_obj)
+
+ _check_duplicate_device_id(device_id_list, service_tag_dict)
+ device_id_service_tag_dict["device_service_tag"] = service_tag_dict
+ return device_id_service_tag_dict
+
+
+def _get_query_parameters(module_params):
+ """
+ Builds query parameter
+ :returns: dictionary, which is applicable builds the query format
+ eg : {"$filter":"Type eq 2000"}
+ """
+ system_query_options_param = module_params.get("system_query_options")
+ query_parameter = None
+ if system_query_options_param:
+ filter_by_val = system_query_options_param.get("filter")
+ if filter_by_val:
+ query_parameter = {"$filter": filter_by_val}
+ return query_parameter
+
+
+def _get_resource_parameters(module_params, rest_obj):
+ """
+ Identifies the resource path by different states
+ :returns: dictionary containing identifier with respective resource path
+ eg:{"device_id":{1234:""DeviceService/Devices(1234)/InventoryDetails"},
+ "device_service_tag":{"MXL1234":"DeviceService/Devices(1345)/InventoryDetails"}}
+ """
+ fact_subset = module_params["fact_subset"]
+ path_dict = {}
+ if fact_subset != DEVICE_LIST:
+ inventory_type = None
+ device_id_service_tag_dict = _get_device_identifier_map(module_params, rest_obj)
+ if fact_subset == DEVICES_INVENTORY_DETAILS:
+ system_query_options = module_params.get("system_query_options")
+ inventory_type = system_query_options.get(DEVICES_INVENTORY_TYPE)
+ path_identifier = DEVICES_INVENTORY_TYPE if inventory_type else fact_subset
+ for identifier_type, identifier_dict in device_id_service_tag_dict.items():
+ path_dict[identifier_type] = {}
+ for device_id, service_tag in identifier_dict.items():
+ key_identifier = service_tag if identifier_type == "device_service_tag" else device_id
+ path = DEVICE_RESOURCE_COLLECTION[path_identifier]["resource"].format(Id=device_id,
+ InventoryType=inventory_type)
+ path_dict[identifier_type].update({key_identifier: path})
+ else:
+ path_dict.update({DEVICE_LIST: DEVICE_RESOURCE_COLLECTION[DEVICE_LIST]["resource"]})
+ return path_dict
+
+
+def _check_mutually_inclusive_arguments(val, module_params, required_args):
+ """"
+ Throws error if arguments detailed_inventory, subsystem_health
+ not exists with qualifier device_id or device_service_tag"""
+ system_query_options_param = module_params.get("system_query_options")
+ if system_query_options_param is None or (system_query_options_param is not None and not any(
+ system_query_options_param.get(qualifier) for qualifier in required_args)):
+ raise ValueError("One of the following {0} is required for {1}".format(required_args, val))
+
+
+def _validate_inputs(module_params):
+ """validates input parameters"""
+ fact_subset = module_params["fact_subset"]
+ if fact_subset != "basic_inventory":
+ _check_mutually_inclusive_arguments(fact_subset, module_params, ["device_id", "device_service_tag"])
+
+
+def main():
+ system_query_options = {"type": 'dict', "required": False, "options": {
+ "device_id": {"type": 'list'},
+ "device_service_tag": {"type": 'list'},
+ "inventory_type": {"type": 'str'},
+ "filter": {"type": 'str', "required": False},
+ }}
+
+ module = AnsibleModule(
+ argument_spec={
+ "hostname": {"required": True, "type": 'str'},
+ "username": {"required": True, "type": 'str'},
+ "password": {"required": True, "type": 'str', "no_log": True},
+ "port": {"required": False, "default": 443, "type": 'int'},
+ "fact_subset": {"required": False, "default": "basic_inventory",
+ "choices": ['basic_inventory', 'detailed_inventory', 'subsystem_health']},
+ "system_query_options": system_query_options,
+ },
+ required_if=[['fact_subset', 'detailed_inventory', ['system_query_options']],
+ ['fact_subset', 'subsystem_health', ['system_query_options']], ],
+ supports_check_mode=False)
+
+ try:
+ _validate_inputs(module.params)
+ with RestOME(module.params, req_session=True) as rest_obj:
+ device_facts = _get_resource_parameters(module.params, rest_obj)
+ resp_status = []
+ if device_facts.get("basic_inventory"):
+ query_param = _get_query_parameters(module.params)
+ resp = rest_obj.invoke_request('GET', device_facts["basic_inventory"], query_param=query_param)
+ device_facts = resp.json_data
+ resp_status.append(resp.status_code)
+ else:
+ for identifier_type, path_dict_map in device_facts.items():
+ for identifier, path in path_dict_map.items():
+ try:
+ resp = rest_obj.invoke_request('GET', path)
+ data = resp.json_data
+ resp_status.append(resp.status_code)
+ except HTTPError as err:
+ data = str(err)
+ path_dict_map[identifier] = data
+ if any(device_fact_error_report):
+ if "device_service_tag" in device_facts:
+ device_facts["device_service_tag"].update(device_fact_error_report)
+ else:
+ device_facts["device_service_tag"] = device_fact_error_report
+ if 200 in resp_status:
+ module.exit_json(device_info=device_facts)
+ else:
+ module.fail_json(msg="Failed to fetch the device information")
+ except (URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/foreman/foreman.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/foreman/foreman.py
new file mode 100644
index 00000000..b209b05a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/foreman/foreman.py
@@ -0,0 +1,157 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Eric D Helms <ericdhelms@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: foreman
+short_description: Manage Foreman Resources
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.12
+ why: "Replaced by re-designed individual modules living at https://github.com/theforeman/foreman-ansible-modules"
+ alternative: https://github.com/theforeman/foreman-ansible-modules
+description:
+ - Allows the management of Foreman resources inside your Foreman server.
+author:
+- Eric D Helms (@ehelms)
+requirements:
+ - nailgun >= 0.28.0
+ - python >= 2.6
+ - datetime
+options:
+ server_url:
+ description:
+ - URL of Foreman server.
+ required: true
+ username:
+ description:
+ - Username on Foreman server.
+ required: true
+ verify_ssl:
+ description:
+ - Whether to verify an SSL connection to Foreman server.
+ type: bool
+ default: False
+ password:
+ description:
+ - Password for user accessing Foreman server.
+ required: true
+ entity:
+ description:
+ - The Foreman resource that the action will be performed on (e.g. organization, host).
+ required: true
+ params:
+ description:
+ - Parameters associated to the entity resource to set or edit in dictionary format (e.g. name, description).
+ required: true
+'''
+
+EXAMPLES = '''
+- name: Create CI Organization
+ community.general.foreman:
+ username: admin
+ password: admin
+ server_url: https://fakeserver.com
+ entity: organization
+ params:
+ name: My Cool New Organization
+ delegate_to: localhost
+'''
+
+RETURN = '''# '''
+
+import traceback
+
+try:
+ from nailgun import entities
+ from nailgun.config import ServerConfig
+ HAS_NAILGUN_PACKAGE = True
+except Exception:
+ HAS_NAILGUN_PACKAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+class NailGun(object):
+ def __init__(self, server, entities, module):
+ self._server = server
+ self._entities = entities
+ self._module = module
+
+ def find_organization(self, name, **params):
+ org = self._entities.Organization(self._server, name=name, **params)
+ response = org.search(set(), {'search': 'name={0}'.format(name)})
+
+ if len(response) == 1:
+ return response[0]
+
+ return None
+
+ def organization(self, params):
+ name = params['name']
+ del params['name']
+ org = self.find_organization(name, **params)
+
+ if org:
+ org = self._entities.Organization(self._server, name=name, id=org.id, **params)
+ org.update()
+ else:
+ org = self._entities.Organization(self._server, name=name, **params)
+ org.create()
+
+ return True
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ server_url=dict(type='str', required=True),
+ username=dict(type='str', required=True, no_log=True),
+ password=dict(type='str', required=True, no_log=True),
+ entity=dict(type='str', required=True),
+ verify_ssl=dict(type='bool', default=False),
+ params=dict(type='dict', required=True, no_log=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ if not HAS_NAILGUN_PACKAGE:
+ module.fail_json(msg="Missing required nailgun module (check docs or install with: pip install nailgun")
+
+ server_url = module.params['server_url']
+ username = module.params['username']
+ password = module.params['password']
+ entity = module.params['entity']
+ params = module.params['params']
+ verify_ssl = module.params['verify_ssl']
+
+ server = ServerConfig(
+ url=server_url,
+ auth=(username, password),
+ verify=verify_ssl
+ )
+ ng = NailGun(server, entities, module)
+
+ # Lets make an connection to the server with username and password
+ try:
+ org = entities.Organization(server)
+ org.search()
+ except Exception as e:
+ module.fail_json(msg="Failed to connect to Foreman server: %s " % to_native(e),
+ exception=traceback.format_exc())
+
+ if entity == 'organization':
+ ng.organization(params)
+ module.exit_json(changed=True, result="%s updated" % entity)
+ else:
+ module.fail_json(changed=False, result="Unsupported entity supplied")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/foreman/katello.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/foreman/katello.py
new file mode 100644
index 00000000..732c4723
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/foreman/katello.py
@@ -0,0 +1,615 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Eric D Helms <ericdhelms@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: katello
+short_description: Manage Katello Resources
+deprecated:
+ removed_in: '2.0.0' # was Ansible 2.12
+ why: "Replaced by re-designed individual modules living at https://github.com/theforeman/foreman-ansible-modules"
+ alternative: https://github.com/theforeman/foreman-ansible-modules
+description:
+ - Allows the management of Katello resources inside your Foreman server.
+author:
+- Eric D Helms (@ehelms)
+requirements:
+ - nailgun >= 0.28.0
+ - python >= 2.6
+ - datetime
+options:
+ server_url:
+ description:
+ - URL of Foreman server.
+ required: true
+ username:
+ description:
+ - Username on Foreman server.
+ required: true
+ password:
+ description:
+ - Password for user accessing Foreman server.
+ required: true
+ entity:
+ description:
+ - The Foreman resource that the action will be performed on (e.g. organization, host).
+ choices:
+
+ - repository
+ - manifest
+ - repository_set
+ - sync_plan
+ - content_view
+ - lifecycle_environment
+ - activation_key
+ - product
+
+ required: true
+ action:
+ description:
+ - action associated to the entity resource to set or edit in dictionary format.
+ - Possible Action in relation to Entitys.
+ - "sync (available when entity=product or entity=repository)"
+ - "publish (available when entity=content_view)"
+ - "promote (available when entity=content_view)"
+ choices:
+ - sync
+ - publish
+ - promote
+ required: false
+ params:
+ description:
+ - Parameters associated to the entity resource and action, to set or edit in dictionary format.
+ - Each choice may be only available with specific entitys and actions.
+ - "Possible Choices are in the format of param_name ([entry,action,action,...],[entity,..],...)."
+ - The action "None" means no action specified.
+ - Possible Params in relation to entity and action.
+ - "name ([product,sync,None], [repository,sync], [repository_set,None], [sync_plan,None],"
+ - "[content_view,promote,publish,None], [lifecycle_environment,None], [activation_key,None])"
+ - "organization ([product,sync,None] ,[repository,sync,None], [repository_set,None], [sync_plan,None], "
+ - "[content_view,promote,publish,None], [lifecycle_environment,None], [activation_key,None])"
+ - "content ([manifest,None])"
+ - "product ([repository,sync,None], [repository_set,None], [sync_plan,None])"
+ - "basearch ([repository_set,None])"
+ - "releaserver ([repository_set,None])"
+ - "sync_date ([sync_plan,None])"
+ - "interval ([sync_plan,None])"
+ - "repositories ([content_view,None])"
+ - "from_environment ([content_view,promote])"
+ - "to_environment([content_view,promote])"
+ - "prior ([lifecycle_environment,None])"
+ - "content_view ([activation_key,None])"
+ - "lifecycle_environment ([activation_key,None])"
+ required: true
+ task_timeout:
+ description:
+ - The timeout in seconds to wait for the started Foreman action to finish.
+ - If the timeout is reached and the Foreman action did not complete, the ansible task fails. However the foreman action does not get canceled.
+ default: 1000
+ required: false
+ verify_ssl:
+ description:
+ - verify the ssl/https connection (e.g for a valid certificate)
+ default: false
+ type: bool
+ required: false
+'''
+
+EXAMPLES = '''
+---
+# Simple Example:
+
+- name: Create Product
+ community.general.katello:
+ username: admin
+ password: admin
+ server_url: https://fakeserver.com
+ entity: product
+ params:
+ name: Centos 7
+ delegate_to: localhost
+
+# Abstraction Example:
+# katello.yml
+---
+- name: "{{ name }}"
+ community.general.katello:
+ username: admin
+ password: admin
+ server_url: https://fakeserver.com
+ entity: "{{ entity }}"
+ params: "{{ params }}"
+ delegate_to: localhost
+
+# tasks.yml
+---
+- include: katello.yml
+ vars:
+ name: Create Dev Environment
+ entity: lifecycle_environment
+ params:
+ name: Dev
+ prior: Library
+ organization: Default Organization
+
+- include: katello.yml
+ vars:
+ name: Create Centos Product
+ entity: product
+ params:
+ name: Centos 7
+ organization: Default Organization
+
+- include: katello.yml
+ vars:
+ name: Create 7.2 Repository
+ entity: repository
+ params:
+ name: Centos 7.2
+ product: Centos 7
+ organization: Default Organization
+ content_type: yum
+ url: http://mirror.centos.org/centos/7/os/x86_64/
+
+- include: katello.yml
+ vars:
+ name: Create Centos 7 View
+ entity: content_view
+ params:
+ name: Centos 7 View
+ organization: Default Organization
+ repositories:
+ - name: Centos 7.2
+ product: Centos 7
+
+- include: katello.yml
+ vars:
+ name: Enable RHEL Product
+ entity: repository_set
+ params:
+ name: Red Hat Enterprise Linux 7 Server (RPMs)
+ product: Red Hat Enterprise Linux Server
+ organization: Default Organization
+ basearch: x86_64
+ releasever: 7
+
+- include: katello.yml
+ vars:
+ name: Promote Contentview Environment with longer timeout
+ task_timeout: 10800
+ entity: content_view
+ action: promote
+ params:
+ name: MyContentView
+ organization: MyOrganisation
+ from_environment: Testing
+ to_environment: Production
+
+# Best Practices
+
+# In Foreman, things can be done in parallel.
+# When a conflicting action is already running,
+# the task will fail instantly instead of waiting for the already running action to complete.
+# So you should use a "until success" loop to catch this.
+
+- name: Promote Contentview Environment with increased Timeout
+ community.general.katello:
+ username: ansibleuser
+ password: supersecret
+ task_timeout: 10800
+ entity: content_view
+ action: promote
+ params:
+ name: MyContentView
+ organization: MyOrganisation
+ from_environment: Testing
+ to_environment: Production
+ register: task_result
+ until: task_result is success
+ retries: 9
+ delay: 120
+
+'''
+
+RETURN = '''# '''
+
+import datetime
+import os
+import traceback
+
+try:
+ from nailgun import entities, entity_fields, entity_mixins
+ from nailgun.config import ServerConfig
+ HAS_NAILGUN_PACKAGE = True
+except Exception:
+ HAS_NAILGUN_PACKAGE = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+class NailGun(object):
+ def __init__(self, server, entities, module, task_timeout):
+ self._server = server
+ self._entities = entities
+ self._module = module
+ entity_mixins.TASK_TIMEOUT = task_timeout
+
+ def find_organization(self, name, **params):
+ org = self._entities.Organization(self._server, name=name, **params)
+ response = org.search(set(), {'search': 'name={0}'.format(name)})
+
+ if len(response) == 1:
+ return response[0]
+ else:
+ self._module.fail_json(msg="No organization found for %s" % name)
+
+ def find_lifecycle_environment(self, name, organization):
+ org = self.find_organization(organization)
+
+ lifecycle_env = self._entities.LifecycleEnvironment(self._server, name=name, organization=org)
+ response = lifecycle_env.search()
+
+ if len(response) == 1:
+ return response[0]
+ else:
+ self._module.fail_json(msg="No Lifecycle Found found for %s" % name)
+
+ def find_product(self, name, organization):
+ org = self.find_organization(organization)
+
+ product = self._entities.Product(self._server, name=name, organization=org)
+ response = product.search()
+
+ if len(response) == 1:
+ return response[0]
+ else:
+ self._module.fail_json(msg="No Product found for %s" % name)
+
+ def find_repository(self, name, product, organization):
+ product = self.find_product(product, organization)
+
+ repository = self._entities.Repository(self._server, name=name, product=product)
+ repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization)
+ repository.organization = product.organization
+ response = repository.search()
+
+ if len(response) == 1:
+ return response[0]
+ else:
+ self._module.fail_json(msg="No Repository found for %s" % name)
+
+ def find_content_view(self, name, organization):
+ org = self.find_organization(organization)
+
+ content_view = self._entities.ContentView(self._server, name=name, organization=org)
+ response = content_view.search()
+
+ if len(response) == 1:
+ return response[0]
+ else:
+ self._module.fail_json(msg="No Content View found for %s" % name)
+
+ def organization(self, params):
+ name = params['name']
+ del params['name']
+ org = self.find_organization(name, **params)
+
+ if org:
+ org = self._entities.Organization(self._server, name=name, id=org.id, **params)
+ org.update()
+ else:
+ org = self._entities.Organization(self._server, name=name, **params)
+ org.create()
+
+ return True
+
+ def manifest(self, params):
+ org = self.find_organization(params['organization'])
+ params['organization'] = org.id
+
+ try:
+ file = open(os.getcwd() + params['content'], 'r')
+ content = file.read()
+ finally:
+ file.close()
+
+ manifest = self._entities.Subscription(self._server)
+
+ try:
+ manifest.upload(
+ data={'organization_id': org.id},
+ files={'content': content}
+ )
+ return True
+ except Exception as e:
+
+ if "Import is the same as existing data" in e.message:
+ return False
+ else:
+ self._module.fail_json(msg="Manifest import failed with %s" % to_native(e),
+ exception=traceback.format_exc())
+
+ def product(self, params):
+ org = self.find_organization(params['organization'])
+ params['organization'] = org.id
+
+ product = self._entities.Product(self._server, **params)
+ response = product.search()
+
+ if len(response) == 1:
+ product.id = response[0].id
+ product.update()
+ else:
+ product.create()
+
+ return True
+
+ def sync_product(self, params):
+ org = self.find_organization(params['organization'])
+ product = self.find_product(params['name'], org.name)
+
+ return product.sync()
+
+ def repository(self, params):
+ product = self.find_product(params['product'], params['organization'])
+ params['product'] = product.id
+ del params['organization']
+
+ repository = self._entities.Repository(self._server, **params)
+ repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization)
+ repository.organization = product.organization
+ response = repository.search()
+
+ if len(response) == 1:
+ repository.id = response[0].id
+ repository.update()
+ else:
+ repository.create()
+
+ return True
+
+ def sync_repository(self, params):
+ org = self.find_organization(params['organization'])
+ repository = self.find_repository(params['name'], params['product'], org.name)
+
+ return repository.sync()
+
+ def repository_set(self, params):
+ product = self.find_product(params['product'], params['organization'])
+ del params['product']
+ del params['organization']
+
+ if not product:
+ return False
+ else:
+ reposet = self._entities.RepositorySet(self._server, product=product, name=params['name'])
+ reposet = reposet.search()[0]
+
+ formatted_name = [params['name'].replace('(', '').replace(')', '')]
+ formatted_name.append(params['basearch'])
+
+ if 'releasever' in params:
+ formatted_name.append(params['releasever'])
+
+ formatted_name = ' '.join(formatted_name)
+
+ repository = self._entities.Repository(self._server, product=product, name=formatted_name)
+ repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization)
+ repository.organization = product.organization
+ repository = repository.search()
+
+ if len(repository) == 0:
+ if 'releasever' in params:
+ reposet.enable(data={'basearch': params['basearch'], 'releasever': params['releasever']})
+ else:
+ reposet.enable(data={'basearch': params['basearch']})
+
+ return True
+
+ def sync_plan(self, params):
+ org = self.find_organization(params['organization'])
+ params['organization'] = org.id
+ params['sync_date'] = datetime.datetime.strptime(params['sync_date'], "%H:%M")
+
+ products = params['products']
+ del params['products']
+
+ sync_plan = self._entities.SyncPlan(
+ self._server,
+ name=params['name'],
+ organization=org
+ )
+ response = sync_plan.search()
+
+ sync_plan.sync_date = params['sync_date']
+ sync_plan.interval = params['interval']
+
+ if len(response) == 1:
+ sync_plan.id = response[0].id
+ sync_plan.update()
+ else:
+ response = sync_plan.create()
+ sync_plan.id = response[0].id
+
+ if products:
+ ids = []
+
+ for name in products:
+ product = self.find_product(name, org.name)
+ ids.append(product.id)
+
+ sync_plan.add_products(data={'product_ids': ids})
+
+ return True
+
+ def content_view(self, params):
+ org = self.find_organization(params['organization'])
+
+ content_view = self._entities.ContentView(self._server, name=params['name'], organization=org)
+ response = content_view.search()
+
+ if len(response) == 1:
+ content_view.id = response[0].id
+ content_view.update()
+ else:
+ content_view = content_view.create()
+
+ if params['repositories']:
+ repos = []
+
+ for repository in params['repositories']:
+ repository = self.find_repository(repository['name'], repository['product'], org.name)
+ repos.append(repository)
+
+ content_view.repository = repos
+ content_view.update(['repository'])
+
+ def find_content_view_version(self, name, organization, environment):
+ env = self.find_lifecycle_environment(environment, organization)
+ content_view = self.find_content_view(name, organization)
+
+ content_view_version = self._entities.ContentViewVersion(self._server, content_view=content_view)
+ response = content_view_version.search(['content_view'], {'environment_id': env.id})
+
+ if len(response) == 1:
+ return response[0]
+ else:
+ self._module.fail_json(msg="No Content View version found for %s" % response)
+
+ def publish(self, params):
+ content_view = self.find_content_view(params['name'], params['organization'])
+
+ return content_view.publish()
+
+ def promote(self, params):
+ to_environment = self.find_lifecycle_environment(params['to_environment'], params['organization'])
+ version = self.find_content_view_version(params['name'], params['organization'], params['from_environment'])
+
+ data = {'environment_id': to_environment.id}
+ return version.promote(data=data)
+
+ def lifecycle_environment(self, params):
+ org = self.find_organization(params['organization'])
+ prior_env = self.find_lifecycle_environment(params['prior'], params['organization'])
+
+ lifecycle_env = self._entities.LifecycleEnvironment(self._server, name=params['name'], organization=org, prior=prior_env)
+ response = lifecycle_env.search()
+
+ if len(response) == 1:
+ lifecycle_env.id = response[0].id
+ lifecycle_env.update()
+ else:
+ lifecycle_env.create()
+
+ return True
+
+ def activation_key(self, params):
+ org = self.find_organization(params['organization'])
+
+ activation_key = self._entities.ActivationKey(self._server, name=params['name'], organization=org)
+ response = activation_key.search()
+
+ if len(response) == 1:
+ activation_key.id = response[0].id
+ activation_key.update()
+ else:
+ activation_key.create()
+
+ if params['content_view']:
+ content_view = self.find_content_view(params['content_view'], params['organization'])
+ lifecycle_environment = self.find_lifecycle_environment(params['lifecycle_environment'], params['organization'])
+
+ activation_key.content_view = content_view
+ activation_key.environment = lifecycle_environment
+ activation_key.update()
+
+ return True
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ server_url=dict(type='str', required=True),
+ username=dict(type='str', required=True, no_log=True),
+ password=dict(type='str', required=True, no_log=True),
+ entity=dict(type='str', required=True,
+ choices=['repository', 'manifest', 'repository_set', 'sync_plan',
+ 'content_view', 'lifecycle_environment', 'activation_key', 'product']),
+ action=dict(type='str', choices=['sync', 'publish', 'promote']),
+ verify_ssl=dict(type='bool', default=False),
+ task_timeout=dict(type='int', default=1000),
+ params=dict(type='dict', required=True, no_log=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ if not HAS_NAILGUN_PACKAGE:
+ module.fail_json(msg="Missing required nailgun module (check docs or install with: pip install nailgun")
+
+ server_url = module.params['server_url']
+ username = module.params['username']
+ password = module.params['password']
+ entity = module.params['entity']
+ action = module.params['action']
+ params = module.params['params']
+ verify_ssl = module.params['verify_ssl']
+ task_timeout = module.params['task_timeout']
+
+ server = ServerConfig(
+ url=server_url,
+ auth=(username, password),
+ verify=verify_ssl
+ )
+ ng = NailGun(server, entities, module, task_timeout)
+
+ # Lets make an connection to the server with username and password
+ try:
+ org = entities.Organization(server)
+ org.search()
+ except Exception as e:
+ module.fail_json(msg="Failed to connect to Foreman server: %s " % e)
+
+ result = False
+
+ if entity == 'product':
+ if action == 'sync':
+ result = ng.sync_product(params)
+ else:
+ result = ng.product(params)
+ elif entity == 'repository':
+ if action == 'sync':
+ result = ng.sync_repository(params)
+ else:
+ result = ng.repository(params)
+ elif entity == 'manifest':
+ result = ng.manifest(params)
+ elif entity == 'repository_set':
+ result = ng.repository_set(params)
+ elif entity == 'sync_plan':
+ result = ng.sync_plan(params)
+ elif entity == 'content_view':
+ if action == 'publish':
+ result = ng.publish(params)
+ elif action == 'promote':
+ result = ng.promote(params)
+ else:
+ result = ng.content_view(params)
+ elif entity == 'lifecycle_environment':
+ result = ng.lifecycle_environment(params)
+ elif entity == 'activation_key':
+ result = ng.activation_key(params)
+ else:
+ module.fail_json(changed=False, result="Unsupported entity supplied")
+
+ module.exit_json(changed=result, result="%s updated" % entity)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/hpilo/hpilo_boot.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/hpilo/hpilo_boot.py
new file mode 100644
index 00000000..1e37aee3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/hpilo/hpilo_boot.py
@@ -0,0 +1,203 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2012 Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: hpilo_boot
+author: Dag Wieers (@dagwieers)
+short_description: Boot system using specific media through HP iLO interface
+description:
+- "This module boots a system through its HP iLO interface. The boot media
+ can be one of: cdrom, floppy, hdd, network or usb."
+- This module requires the hpilo python module.
+options:
+ host:
+ description:
+ - The HP iLO hostname/address that is linked to the physical system.
+ required: true
+ login:
+ description:
+ - The login name to authenticate to the HP iLO interface.
+ default: Administrator
+ password:
+ description:
+ - The password to authenticate to the HP iLO interface.
+ default: admin
+ media:
+ description:
+ - The boot media to boot the system from
+ choices: [ "cdrom", "floppy", "rbsu", "hdd", "network", "normal", "usb" ]
+ image:
+ description:
+ - The URL of a cdrom, floppy or usb boot media image.
+ protocol://username:password@hostname:port/filename
+ - protocol is either 'http' or 'https'
+ - username:password is optional
+ - port is optional
+ state:
+ description:
+ - The state of the boot media.
+ - "no_boot: Do not boot from the device"
+ - "boot_once: Boot from the device once and then notthereafter"
+ - "boot_always: Boot from the device each time the server is rebooted"
+ - "connect: Connect the virtual media device and set to boot_always"
+ - "disconnect: Disconnects the virtual media device and set to no_boot"
+ - "poweroff: Power off the server"
+ default: boot_once
+ choices: [ "boot_always", "boot_once", "connect", "disconnect", "no_boot", "poweroff" ]
+ force:
+ description:
+ - Whether to force a reboot (even when the system is already booted).
+ - As a safeguard, without force, hpilo_boot will refuse to reboot a server that is already running.
+ default: no
+ type: bool
+ ssl_version:
+ description:
+ - Change the ssl_version used.
+ default: TLSv1
+ choices: [ "SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2" ]
+requirements:
+- python-hpilo
+notes:
+- To use a USB key image you need to specify floppy as boot media.
+- This module ought to be run from a system that can access the HP iLO
+ interface directly, either by using C(local_action) or using C(delegate_to).
+'''
+
+EXAMPLES = r'''
+- name: Task to boot a system using an ISO from an HP iLO interface only if the system is an HP server
+ community.general.hpilo_boot:
+ host: YOUR_ILO_ADDRESS
+ login: YOUR_ILO_LOGIN
+ password: YOUR_ILO_PASSWORD
+ media: cdrom
+ image: http://some-web-server/iso/boot.iso
+ when: cmdb_hwmodel.startswith('HP ')
+ delegate_to: localhost
+
+- name: Power off a server
+ community.general.hpilo_boot:
+ host: YOUR_ILO_HOST
+ login: YOUR_ILO_LOGIN
+ password: YOUR_ILO_PASSWORD
+ state: poweroff
+ delegate_to: localhost
+'''
+
+RETURN = '''
+# Default return values
+'''
+
+import time
+import traceback
+import warnings
+
+HPILO_IMP_ERR = None
+try:
+ import hpilo
+ HAS_HPILO = True
+except ImportError:
+ HPILO_IMP_ERR = traceback.format_exc()
+ HAS_HPILO = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+# Suppress warnings from hpilo
+warnings.simplefilter('ignore')
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(type='str', required=True),
+ login=dict(type='str', default='Administrator'),
+ password=dict(type='str', default='admin', no_log=True),
+ media=dict(type='str', choices=['cdrom', 'floppy', 'rbsu', 'hdd', 'network', 'normal', 'usb']),
+ image=dict(type='str'),
+ state=dict(type='str', default='boot_once', choices=['boot_always', 'boot_once', 'connect', 'disconnect', 'no_boot', 'poweroff']),
+ force=dict(type='bool', default=False),
+ ssl_version=dict(type='str', default='TLSv1', choices=['SSLv3', 'SSLv23', 'TLSv1', 'TLSv1_1', 'TLSv1_2']),
+ )
+ )
+
+ if not HAS_HPILO:
+ module.fail_json(msg=missing_required_lib('python-hpilo'), exception=HPILO_IMP_ERR)
+
+ host = module.params['host']
+ login = module.params['login']
+ password = module.params['password']
+ media = module.params['media']
+ image = module.params['image']
+ state = module.params['state']
+ force = module.params['force']
+ ssl_version = getattr(hpilo.ssl, 'PROTOCOL_' + module.params.get('ssl_version').upper().replace('V', 'v'))
+
+ ilo = hpilo.Ilo(host, login=login, password=password, ssl_version=ssl_version)
+ changed = False
+ status = {}
+ power_status = 'UNKNOWN'
+
+ if media and state in ('boot_always', 'boot_once', 'connect', 'disconnect', 'no_boot'):
+
+ # Workaround for: Error communicating with iLO: Problem manipulating EV
+ try:
+ ilo.set_one_time_boot(media)
+ except hpilo.IloError:
+ time.sleep(60)
+ ilo.set_one_time_boot(media)
+
+ # TODO: Verify if image URL exists/works
+ if image:
+ ilo.insert_virtual_media(media, image)
+ changed = True
+
+ if media == 'cdrom':
+ ilo.set_vm_status('cdrom', state, True)
+ status = ilo.get_vm_status()
+ changed = True
+ elif media in ('floppy', 'usb'):
+ ilo.set_vf_status(state, True)
+ status = ilo.get_vf_status()
+ changed = True
+
+ # Only perform a boot when state is boot_once or boot_always, or in case we want to force a reboot
+ if state in ('boot_once', 'boot_always') or force:
+
+ power_status = ilo.get_host_power_status()
+
+ if not force and power_status == 'ON':
+ module.fail_json(msg='HP iLO (%s) reports that the server is already powered on !' % host)
+
+ if power_status == 'ON':
+ ilo.warm_boot_server()
+# ilo.cold_boot_server()
+ changed = True
+ else:
+ ilo.press_pwr_btn()
+# ilo.reset_server()
+# ilo.set_host_power(host_power=True)
+ changed = True
+
+ elif state in ('poweroff'):
+
+ power_status = ilo.get_host_power_status()
+
+ if not power_status == 'OFF':
+ ilo.hold_pwr_btn()
+# ilo.set_host_power(host_power=False)
+ changed = True
+
+ module.exit_json(changed=changed, power=power_status, **status)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/hpilo/hpilo_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/hpilo/hpilo_facts.py
new file mode 100644
index 00000000..af43ca19
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/hpilo/hpilo_facts.py
@@ -0,0 +1,258 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2012 Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: hpilo_info
+author: Dag Wieers (@dagwieers)
+short_description: Gather information through an HP iLO interface
+description:
+- This module gathers information on a specific system using its HP iLO interface.
+ These information includes hardware and network related information useful
+ for provisioning (e.g. macaddress, uuid).
+- This module requires the C(hpilo) python module.
+- This module was called C(hpilo_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.hpilo_info) module no longer returns C(ansible_facts)!
+options:
+ host:
+ description:
+ - The HP iLO hostname/address that is linked to the physical system.
+ required: true
+ login:
+ description:
+ - The login name to authenticate to the HP iLO interface.
+ default: Administrator
+ password:
+ description:
+ - The password to authenticate to the HP iLO interface.
+ default: admin
+ ssl_version:
+ description:
+ - Change the ssl_version used.
+ default: TLSv1
+ choices: [ "SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2" ]
+requirements:
+- hpilo
+notes:
+- This module ought to be run from a system that can access the HP iLO
+ interface directly, either by using C(local_action) or using C(delegate_to).
+'''
+
+EXAMPLES = r'''
+- name: Gather facts from a HP iLO interface only if the system is an HP server
+ community.general.hpilo_info:
+ host: YOUR_ILO_ADDRESS
+ login: YOUR_ILO_LOGIN
+ password: YOUR_ILO_PASSWORD
+ when: cmdb_hwmodel.startswith('HP ')
+ delegate_to: localhost
+ register: results
+
+- ansible.builtin.fail:
+ msg: 'CMDB serial ({{ cmdb_serialno }}) does not match hardware serial ({{ results.hw_system_serial }}) !'
+ when: cmdb_serialno != results.hw_system_serial
+'''
+
+RETURN = r'''
+# Typical output of HP iLO_info for a physical system
+hw_bios_date:
+ description: BIOS date
+ returned: always
+ type: str
+ sample: 05/05/2011
+
+hw_bios_version:
+ description: BIOS version
+ returned: always
+ type: str
+ sample: P68
+
+hw_ethX:
+ description: Interface information (for each interface)
+ returned: always
+ type: dict
+ sample:
+ - macaddress: 00:11:22:33:44:55
+ macaddress_dash: 00-11-22-33-44-55
+
+hw_eth_ilo:
+ description: Interface information (for the iLO network interface)
+ returned: always
+ type: dict
+ sample:
+ - macaddress: 00:11:22:33:44:BA
+ - macaddress_dash: 00-11-22-33-44-BA
+
+hw_product_name:
+ description: Product name
+ returned: always
+ type: str
+ sample: ProLiant DL360 G7
+
+hw_product_uuid:
+ description: Product UUID
+ returned: always
+ type: str
+ sample: ef50bac8-2845-40ff-81d9-675315501dac
+
+hw_system_serial:
+ description: System serial number
+ returned: always
+ type: str
+ sample: ABC12345D6
+
+hw_uuid:
+ description: Hardware UUID
+ returned: always
+ type: str
+ sample: 123456ABC78901D2
+'''
+
+import re
+import traceback
+import warnings
+
+HPILO_IMP_ERR = None
+try:
+ import hpilo
+ HAS_HPILO = True
+except ImportError:
+ HPILO_IMP_ERR = traceback.format_exc()
+ HAS_HPILO = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+# Suppress warnings from hpilo
+warnings.simplefilter('ignore')
+
+
+def parse_flat_interface(entry, non_numeric='hw_eth_ilo'):
+ try:
+ infoname = 'hw_eth' + str(int(entry['Port']) - 1)
+ except Exception:
+ infoname = non_numeric
+
+ info = {
+ 'macaddress': entry['MAC'].replace('-', ':'),
+ 'macaddress_dash': entry['MAC']
+ }
+ return (infoname, info)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(type='str', required=True),
+ login=dict(type='str', default='Administrator'),
+ password=dict(type='str', default='admin', no_log=True),
+ ssl_version=dict(type='str', default='TLSv1', choices=['SSLv3', 'SSLv23', 'TLSv1', 'TLSv1_1', 'TLSv1_2']),
+ ),
+ supports_check_mode=True,
+ )
+ is_old_facts = module._name in ('hpilo_facts', 'community.general.hpilo_facts')
+ if is_old_facts:
+ module.deprecate("The 'hpilo_facts' module has been renamed to 'hpilo_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ if not HAS_HPILO:
+ module.fail_json(msg=missing_required_lib('python-hpilo'), exception=HPILO_IMP_ERR)
+
+ host = module.params['host']
+ login = module.params['login']
+ password = module.params['password']
+ ssl_version = getattr(hpilo.ssl, 'PROTOCOL_' + module.params.get('ssl_version').upper().replace('V', 'v'))
+
+ ilo = hpilo.Ilo(host, login=login, password=password, ssl_version=ssl_version)
+
+ info = {
+ 'module_hw': True,
+ }
+
+ # TODO: Count number of CPUs, DIMMs and total memory
+ try:
+ data = ilo.get_host_data()
+ except hpilo.IloCommunicationError as e:
+ module.fail_json(msg=to_native(e))
+
+ for entry in data:
+ if 'type' not in entry:
+ continue
+ elif entry['type'] == 0: # BIOS Information
+ info['hw_bios_version'] = entry['Family']
+ info['hw_bios_date'] = entry['Date']
+ elif entry['type'] == 1: # System Information
+ info['hw_uuid'] = entry['UUID']
+ info['hw_system_serial'] = entry['Serial Number'].rstrip()
+ info['hw_product_name'] = entry['Product Name']
+ info['hw_product_uuid'] = entry['cUUID']
+ elif entry['type'] == 209: # Embedded NIC MAC Assignment
+ if 'fields' in entry:
+ for (name, value) in [(e['name'], e['value']) for e in entry['fields']]:
+ if name.startswith('Port'):
+ try:
+ infoname = 'hw_eth' + str(int(value) - 1)
+ except Exception:
+ infoname = 'hw_eth_ilo'
+ elif name.startswith('MAC'):
+ info[infoname] = {
+ 'macaddress': value.replace('-', ':'),
+ 'macaddress_dash': value
+ }
+ else:
+ (infoname, entry_info) = parse_flat_interface(entry, 'hw_eth_ilo')
+ info[infoname] = entry_info
+ elif entry['type'] == 209: # HPQ NIC iSCSI MAC Info
+ for (name, value) in [(e['name'], e['value']) for e in entry['fields']]:
+ if name.startswith('Port'):
+ try:
+ infoname = 'hw_iscsi' + str(int(value) - 1)
+ except Exception:
+ infoname = 'hw_iscsi_ilo'
+ elif name.startswith('MAC'):
+ info[infoname] = {
+ 'macaddress': value.replace('-', ':'),
+ 'macaddress_dash': value
+ }
+ elif entry['type'] == 233: # Embedded NIC MAC Assignment (Alternate data format)
+ (infoname, entry_info) = parse_flat_interface(entry, 'hw_eth_ilo')
+ info[infoname] = entry_info
+
+ # Collect health (RAM/CPU data)
+ health = ilo.get_embedded_health()
+ info['hw_health'] = health
+
+ memory_details_summary = health.get('memory', {}).get('memory_details_summary')
+ # RAM as reported by iLO 2.10 on ProLiant BL460c Gen8
+ if memory_details_summary:
+ info['hw_memory_details_summary'] = memory_details_summary
+ info['hw_memory_total'] = 0
+ for cpu, details in memory_details_summary.items():
+ cpu_total_memory_size = details.get('total_memory_size')
+ if cpu_total_memory_size:
+ ram = re.search(r'(\d+)\s+(\w+)', cpu_total_memory_size)
+ if ram:
+ if ram.group(2) == 'GB':
+ info['hw_memory_total'] = info['hw_memory_total'] + int(ram.group(1))
+
+ # reformat into a text friendly format
+ info['hw_memory_total'] = "{0} GB".format(info['hw_memory_total'])
+
+ if is_old_facts:
+ module.exit_json(ansible_facts=info)
+ else:
+ module.exit_json(**info)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/hpilo/hpilo_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/hpilo/hpilo_info.py
new file mode 100644
index 00000000..af43ca19
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/hpilo/hpilo_info.py
@@ -0,0 +1,258 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2012 Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: hpilo_info
+author: Dag Wieers (@dagwieers)
+short_description: Gather information through an HP iLO interface
+description:
+- This module gathers information on a specific system using its HP iLO interface.
+ These information includes hardware and network related information useful
+ for provisioning (e.g. macaddress, uuid).
+- This module requires the C(hpilo) python module.
+- This module was called C(hpilo_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.hpilo_info) module no longer returns C(ansible_facts)!
+options:
+ host:
+ description:
+ - The HP iLO hostname/address that is linked to the physical system.
+ required: true
+ login:
+ description:
+ - The login name to authenticate to the HP iLO interface.
+ default: Administrator
+ password:
+ description:
+ - The password to authenticate to the HP iLO interface.
+ default: admin
+ ssl_version:
+ description:
+ - Change the ssl_version used.
+ default: TLSv1
+ choices: [ "SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2" ]
+requirements:
+- hpilo
+notes:
+- This module ought to be run from a system that can access the HP iLO
+ interface directly, either by using C(local_action) or using C(delegate_to).
+'''
+
+EXAMPLES = r'''
+- name: Gather facts from a HP iLO interface only if the system is an HP server
+ community.general.hpilo_info:
+ host: YOUR_ILO_ADDRESS
+ login: YOUR_ILO_LOGIN
+ password: YOUR_ILO_PASSWORD
+ when: cmdb_hwmodel.startswith('HP ')
+ delegate_to: localhost
+ register: results
+
+- ansible.builtin.fail:
+ msg: 'CMDB serial ({{ cmdb_serialno }}) does not match hardware serial ({{ results.hw_system_serial }}) !'
+ when: cmdb_serialno != results.hw_system_serial
+'''
+
+RETURN = r'''
+# Typical output of HP iLO_info for a physical system
+hw_bios_date:
+ description: BIOS date
+ returned: always
+ type: str
+ sample: 05/05/2011
+
+hw_bios_version:
+ description: BIOS version
+ returned: always
+ type: str
+ sample: P68
+
+hw_ethX:
+ description: Interface information (for each interface)
+ returned: always
+ type: dict
+ sample:
+ - macaddress: 00:11:22:33:44:55
+ macaddress_dash: 00-11-22-33-44-55
+
+hw_eth_ilo:
+ description: Interface information (for the iLO network interface)
+ returned: always
+ type: dict
+ sample:
+ - macaddress: 00:11:22:33:44:BA
+ - macaddress_dash: 00-11-22-33-44-BA
+
+hw_product_name:
+ description: Product name
+ returned: always
+ type: str
+ sample: ProLiant DL360 G7
+
+hw_product_uuid:
+ description: Product UUID
+ returned: always
+ type: str
+ sample: ef50bac8-2845-40ff-81d9-675315501dac
+
+hw_system_serial:
+ description: System serial number
+ returned: always
+ type: str
+ sample: ABC12345D6
+
+hw_uuid:
+ description: Hardware UUID
+ returned: always
+ type: str
+ sample: 123456ABC78901D2
+'''
+
+import re
+import traceback
+import warnings
+
+HPILO_IMP_ERR = None
+try:
+ import hpilo
+ HAS_HPILO = True
+except ImportError:
+ HPILO_IMP_ERR = traceback.format_exc()
+ HAS_HPILO = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+# Suppress warnings from hpilo
+warnings.simplefilter('ignore')
+
+
+def parse_flat_interface(entry, non_numeric='hw_eth_ilo'):
+ try:
+ infoname = 'hw_eth' + str(int(entry['Port']) - 1)
+ except Exception:
+ infoname = non_numeric
+
+ info = {
+ 'macaddress': entry['MAC'].replace('-', ':'),
+ 'macaddress_dash': entry['MAC']
+ }
+ return (infoname, info)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(type='str', required=True),
+ login=dict(type='str', default='Administrator'),
+ password=dict(type='str', default='admin', no_log=True),
+ ssl_version=dict(type='str', default='TLSv1', choices=['SSLv3', 'SSLv23', 'TLSv1', 'TLSv1_1', 'TLSv1_2']),
+ ),
+ supports_check_mode=True,
+ )
+ is_old_facts = module._name in ('hpilo_facts', 'community.general.hpilo_facts')
+ if is_old_facts:
+ module.deprecate("The 'hpilo_facts' module has been renamed to 'hpilo_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ if not HAS_HPILO:
+ module.fail_json(msg=missing_required_lib('python-hpilo'), exception=HPILO_IMP_ERR)
+
+ host = module.params['host']
+ login = module.params['login']
+ password = module.params['password']
+ ssl_version = getattr(hpilo.ssl, 'PROTOCOL_' + module.params.get('ssl_version').upper().replace('V', 'v'))
+
+ ilo = hpilo.Ilo(host, login=login, password=password, ssl_version=ssl_version)
+
+ info = {
+ 'module_hw': True,
+ }
+
+ # TODO: Count number of CPUs, DIMMs and total memory
+ try:
+ data = ilo.get_host_data()
+ except hpilo.IloCommunicationError as e:
+ module.fail_json(msg=to_native(e))
+
+ for entry in data:
+ if 'type' not in entry:
+ continue
+ elif entry['type'] == 0: # BIOS Information
+ info['hw_bios_version'] = entry['Family']
+ info['hw_bios_date'] = entry['Date']
+ elif entry['type'] == 1: # System Information
+ info['hw_uuid'] = entry['UUID']
+ info['hw_system_serial'] = entry['Serial Number'].rstrip()
+ info['hw_product_name'] = entry['Product Name']
+ info['hw_product_uuid'] = entry['cUUID']
+ elif entry['type'] == 209: # Embedded NIC MAC Assignment
+ if 'fields' in entry:
+ for (name, value) in [(e['name'], e['value']) for e in entry['fields']]:
+ if name.startswith('Port'):
+ try:
+ infoname = 'hw_eth' + str(int(value) - 1)
+ except Exception:
+ infoname = 'hw_eth_ilo'
+ elif name.startswith('MAC'):
+ info[infoname] = {
+ 'macaddress': value.replace('-', ':'),
+ 'macaddress_dash': value
+ }
+ else:
+ (infoname, entry_info) = parse_flat_interface(entry, 'hw_eth_ilo')
+ info[infoname] = entry_info
+ elif entry['type'] == 209: # HPQ NIC iSCSI MAC Info
+ for (name, value) in [(e['name'], e['value']) for e in entry['fields']]:
+ if name.startswith('Port'):
+ try:
+ infoname = 'hw_iscsi' + str(int(value) - 1)
+ except Exception:
+ infoname = 'hw_iscsi_ilo'
+ elif name.startswith('MAC'):
+ info[infoname] = {
+ 'macaddress': value.replace('-', ':'),
+ 'macaddress_dash': value
+ }
+ elif entry['type'] == 233: # Embedded NIC MAC Assignment (Alternate data format)
+ (infoname, entry_info) = parse_flat_interface(entry, 'hw_eth_ilo')
+ info[infoname] = entry_info
+
+ # Collect health (RAM/CPU data)
+ health = ilo.get_embedded_health()
+ info['hw_health'] = health
+
+ memory_details_summary = health.get('memory', {}).get('memory_details_summary')
+ # RAM as reported by iLO 2.10 on ProLiant BL460c Gen8
+ if memory_details_summary:
+ info['hw_memory_details_summary'] = memory_details_summary
+ info['hw_memory_total'] = 0
+ for cpu, details in memory_details_summary.items():
+ cpu_total_memory_size = details.get('total_memory_size')
+ if cpu_total_memory_size:
+ ram = re.search(r'(\d+)\s+(\w+)', cpu_total_memory_size)
+ if ram:
+ if ram.group(2) == 'GB':
+ info['hw_memory_total'] = info['hw_memory_total'] + int(ram.group(1))
+
+ # reformat into a text friendly format
+ info['hw_memory_total'] = "{0} GB".format(info['hw_memory_total'])
+
+ if is_old_facts:
+ module.exit_json(ansible_facts=info)
+ else:
+ module.exit_json(**info)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/hpilo/hponcfg.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/hpilo/hponcfg.py
new file mode 100644
index 00000000..451e4b06
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/hpilo/hponcfg.py
@@ -0,0 +1,111 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: hponcfg
+author: Dag Wieers (@dagwieers)
+short_description: Configure HP iLO interface using hponcfg
+description:
+- This modules configures the HP iLO interface using hponcfg.
+options:
+ path:
+ description:
+ - The XML file as accepted by hponcfg.
+ required: true
+ aliases: ['src']
+ minfw:
+ description:
+ - The minimum firmware level needed.
+ required: false
+ executable:
+ description:
+ - Path to the hponcfg executable (`hponcfg` which uses $PATH).
+ default: hponcfg
+ verbose:
+ description:
+ - Run hponcfg in verbose mode (-v).
+ default: no
+ type: bool
+requirements:
+- hponcfg tool
+notes:
+- You need a working hponcfg on the target system.
+'''
+
+EXAMPLES = r'''
+- name: Example hponcfg configuration XML
+ ansible.builtin.copy:
+ content: |
+ <ribcl VERSION="2.0">
+ <login USER_LOGIN="user" PASSWORD="password">
+ <rib_info MODE="WRITE">
+ <mod_global_settings>
+ <session_timeout value="0"/>
+ <ssh_status value="Y"/>
+ <ssh_port value="22"/>
+ <serial_cli_status value="3"/>
+ <serial_cli_speed value="5"/>
+ </mod_global_settings>
+ </rib_info>
+ </login>
+ </ribcl>
+ dest: /tmp/enable-ssh.xml
+
+- name: Configure HP iLO using enable-ssh.xml
+ community.general.hponcfg:
+ src: /tmp/enable-ssh.xml
+
+- name: Configure HP iLO on VMware ESXi hypervisor
+ community.general.hponcfg:
+ src: /tmp/enable-ssh.xml
+ executable: /opt/hp/tools/hponcfg
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ src=dict(type='path', required=True, aliases=['path']),
+ minfw=dict(type='str'),
+ executable=dict(default='hponcfg', type='str'),
+ verbose=dict(default=False, type='bool'),
+ )
+ )
+
+ # Consider every action a change (not idempotent yet!)
+ changed = True
+
+ src = module.params['src']
+ minfw = module.params['minfw']
+ executable = module.params['executable']
+ verbose = module.params['verbose']
+
+ options = ' -f %s' % src
+
+ if verbose:
+ options += ' -v'
+
+ if minfw:
+ options += ' -m %s' % minfw
+
+ rc, stdout, stderr = module.run_command('%s %s' % (executable, options))
+
+ if rc != 0:
+ module.fail_json(rc=rc, msg="Failed to run hponcfg", stdout=stdout, stderr=stderr)
+
+ module.exit_json(changed=changed, stdout=stdout, stderr=stderr)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/imc/imc_rest.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/imc/imc_rest.py
new file mode 100644
index 00000000..ca318b4e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/imc/imc_rest.py
@@ -0,0 +1,427 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# (c) 2017, Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: imc_rest
+short_description: Manage Cisco IMC hardware through its REST API
+description:
+- Provides direct access to the Cisco IMC REST API.
+- Perform any configuration changes and actions that the Cisco IMC supports.
+- More information about the IMC REST API is available from
+ U(http://www.cisco.com/c/en/us/td/docs/unified_computing/ucs/c/sw/api/3_0/b_Cisco_IMC_api_301.html)
+author:
+- Dag Wieers (@dagwieers)
+requirements:
+- lxml
+- xmljson >= 0.1.8
+options:
+ hostname:
+ description:
+ - IP Address or hostname of Cisco IMC, resolvable by Ansible control host.
+ required: true
+ aliases: [ host, ip ]
+ username:
+ description:
+ - Username used to login to the switch.
+ default: admin
+ aliases: [ user ]
+ password:
+ description:
+ - The password to use for authentication.
+ default: password
+ path:
+ description:
+ - Name of the absolute path of the filename that includes the body
+ of the http request being sent to the Cisco IMC REST API.
+ - Parameter C(path) is mutual exclusive with parameter C(content).
+ aliases: [ 'src', 'config_file' ]
+ content:
+ description:
+ - When used instead of C(path), sets the content of the API requests directly.
+ - This may be convenient to template simple requests, for anything complex use the M(ansible.builtin.template) module.
+ - You can collate multiple IMC XML fragments and they will be processed sequentially in a single stream,
+ the Cisco IMC output is subsequently merged.
+ - Parameter C(content) is mutual exclusive with parameter C(path).
+ protocol:
+ description:
+ - Connection protocol to use.
+ default: https
+ choices: [ http, https ]
+ timeout:
+ description:
+ - The socket level timeout in seconds.
+ - This is the time that every single connection (every fragment) can spend.
+ If this C(timeout) is reached, the module will fail with a
+ C(Connection failure) indicating that C(The read operation timed out).
+ default: 60
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated.
+ - This should only set to C(no) used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+notes:
+- The XML fragments don't need an authentication cookie, this is injected by the module automatically.
+- The Cisco IMC XML output is being translated to JSON using the Cobra convention.
+- Any configConfMo change requested has a return status of 'modified', even if there was no actual change
+ from the previous configuration. As a result, this module will always report a change on subsequent runs.
+ In case this behaviour is fixed in a future update to Cisco IMC, this module will automatically adapt.
+- If you get a C(Connection failure) related to C(The read operation timed out) increase the C(timeout)
+ parameter. Some XML fragments can take longer than the default timeout.
+- More information about the IMC REST API is available from
+ U(http://www.cisco.com/c/en/us/td/docs/unified_computing/ucs/c/sw/api/3_0/b_Cisco_IMC_api_301.html)
+'''
+
+EXAMPLES = r'''
+- name: Power down server
+ community.general.imc_rest:
+ hostname: '{{ imc_hostname }}'
+ username: '{{ imc_username }}'
+ password: '{{ imc_password }}'
+ validate_certs: no
+ content: |
+ <configConfMo><inConfig>
+ <computeRackUnit dn="sys/rack-unit-1" adminPower="down"/>
+ </inConfig></configConfMo>
+ delegate_to: localhost
+
+- name: Configure IMC using multiple XML fragments
+ community.general.imc_rest:
+ hostname: '{{ imc_hostname }}'
+ username: '{{ imc_username }}'
+ password: '{{ imc_password }}'
+ validate_certs: no
+ timeout: 120
+ content: |
+ <!-- Configure Serial-on-LAN -->
+ <configConfMo><inConfig>
+ <solIf dn="sys/rack-unit-1/sol-if" adminState="enable" speed=="115200" comport="com0"/>
+ </inConfig></configConfMo>
+
+ <!-- Configure Console Redirection -->
+ <configConfMo><inConfig>
+ <biosVfConsoleRedirection dn="sys/rack-unit-1/bios/bios-settings/Console-redirection"
+ vpBaudRate="115200"
+ vpConsoleRedirection="com-0"
+ vpFlowControl="none"
+ vpTerminalType="vt100"
+ vpPuttyKeyPad="LINUX"
+ vpRedirectionAfterPOST="Always Enable"/>
+ </inConfig></configConfMo>
+ delegate_to: localhost
+
+- name: Enable PXE boot and power-cycle server
+ community.general.imc_rest:
+ hostname: '{{ imc_hostname }}'
+ username: '{{ imc_username }}'
+ password: '{{ imc_password }}'
+ validate_certs: no
+ content: |
+ <!-- Configure PXE boot -->
+ <configConfMo><inConfig>
+ <lsbootLan dn="sys/rack-unit-1/boot-policy/lan-read-only" access="read-only" order="1" prot="pxe" type="lan"/>
+ </inConfig></configConfMo>
+
+ <!-- Power cycle server -->
+ <configConfMo><inConfig>
+ <computeRackUnit dn="sys/rack-unit-1" adminPower="cycle-immediate"/>
+ </inConfig></configConfMo>
+ delegate_to: localhost
+
+- name: Reconfigure IMC to boot from storage
+ community.general.imc_rest:
+ hostname: '{{ imc_host }}'
+ username: '{{ imc_username }}'
+ password: '{{ imc_password }}'
+ validate_certs: no
+ content: |
+ <configConfMo><inConfig>
+ <lsbootStorage dn="sys/rack-unit-1/boot-policy/storage-read-write" access="read-write" order="1" type="storage"/>
+ </inConfig></configConfMo>
+ delegate_to: localhost
+
+- name: Add customer description to server
+ community.general.imc_rest:
+ hostname: '{{ imc_host }}'
+ username: '{{ imc_username }}'
+ password: '{{ imc_password }}'
+ validate_certs: no
+ content: |
+ <configConfMo><inConfig>
+ <computeRackUnit dn="sys/rack-unit-1" usrLbl="Customer Lab - POD{{ pod_id }} - {{ inventory_hostname_short }}"/>
+ </inConfig></configConfMo>
+ delegate_to: localhost
+
+- name: Disable HTTP and increase session timeout to max value 10800 secs
+ community.general.imc_rest:
+ hostname: '{{ imc_host }}'
+ username: '{{ imc_username }}'
+ password: '{{ imc_password }}'
+ validate_certs: no
+ timeout: 120
+ content: |
+ <configConfMo><inConfig>
+ <commHttp dn="sys/svc-ext/http-svc" adminState="disabled"/>
+ </inConfig></configConfMo>
+
+ <configConfMo><inConfig>
+ <commHttps dn="sys/svc-ext/https-svc" adminState="enabled" sessionTimeout="10800"/>
+ </inConfig></configConfMo>
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+aaLogin:
+ description: Cisco IMC XML output for the login, translated to JSON using Cobra convention
+ returned: success
+ type: dict
+ sample: |
+ "attributes": {
+ "cookie": "",
+ "outCookie": "1498902428/9de6dc36-417c-157c-106c-139efe2dc02a",
+ "outPriv": "admin",
+ "outRefreshPeriod": "600",
+ "outSessionId": "114",
+ "outVersion": "2.0(13e)",
+ "response": "yes"
+ }
+configConfMo:
+ description: Cisco IMC XML output for any configConfMo XML fragments, translated to JSON using Cobra convention
+ returned: success
+ type: dict
+ sample: |
+elapsed:
+ description: Elapsed time in seconds
+ returned: always
+ type: int
+ sample: 31
+response:
+ description: HTTP response message, including content length
+ returned: always
+ type: str
+ sample: OK (729 bytes)
+status:
+ description: The HTTP response status code
+ returned: always
+ type: dict
+ sample: 200
+error:
+ description: Cisco IMC XML error output for last request, translated to JSON using Cobra convention
+ returned: failed
+ type: dict
+ sample: |
+ "attributes": {
+ "cookie": "",
+ "errorCode": "ERR-xml-parse-error",
+ "errorDescr": "XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed. ",
+ "invocationResult": "594",
+ "response": "yes"
+ }
+error_code:
+ description: Cisco IMC error code
+ returned: failed
+ type: str
+ sample: ERR-xml-parse-error
+error_text:
+ description: Cisco IMC error message
+ returned: failed
+ type: str
+ sample: |
+ XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed.
+input:
+ description: RAW XML input sent to the Cisco IMC, causing the error
+ returned: failed
+ type: str
+ sample: |
+ <configConfMo><inConfig><computeRackUnit dn="sys/rack-unit-1" admin_Power="down"/></inConfig></configConfMo>
+output:
+ description: RAW XML output received from the Cisco IMC, with error details
+ returned: failed
+ type: str
+ sample: >
+ <error cookie=""
+ response="yes"
+ errorCode="ERR-xml-parse-error"
+ invocationResult="594"
+ errorDescr="XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed.\n"/>
+'''
+
+import atexit
+import datetime
+import itertools
+import os
+import traceback
+
+LXML_ETREE_IMP_ERR = None
+try:
+ import lxml.etree
+ HAS_LXML_ETREE = True
+except ImportError:
+ LXML_ETREE_IMP_ERR = traceback.format_exc()
+ HAS_LXML_ETREE = False
+
+XMLJSON_COBRA_IMP_ERR = None
+try:
+ from xmljson import cobra
+ HAS_XMLJSON_COBRA = True
+except ImportError:
+ XMLJSON_COBRA_IMP_ERR = traceback.format_exc()
+ HAS_XMLJSON_COBRA = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.urls import fetch_url
+
+
+def imc_response(module, rawoutput, rawinput=''):
+ ''' Handle IMC returned data '''
+ xmloutput = lxml.etree.fromstring(rawoutput)
+ result = cobra.data(xmloutput)
+
+ # Handle errors
+ if xmloutput.get('errorCode') and xmloutput.get('errorDescr'):
+ if rawinput:
+ result['input'] = rawinput
+ result['output'] = rawoutput
+ result['error_code'] = xmloutput.get('errorCode')
+ result['error_text'] = xmloutput.get('errorDescr')
+ module.fail_json(msg='Request failed: %(error_text)s' % result, **result)
+
+ return result
+
+
+def logout(module, url, cookie, timeout):
+ ''' Perform a logout, if needed '''
+ data = '<aaaLogout cookie="%s" inCookie="%s"/>' % (cookie, cookie)
+ resp, auth = fetch_url(module, url, data=data, method="POST", timeout=timeout)
+
+
+def merge(one, two):
+ ''' Merge two complex nested datastructures into one'''
+ if isinstance(one, dict) and isinstance(two, dict):
+ copy = dict(one)
+ # copy.update({key: merge(one.get(key, None), two[key]) for key in two})
+ copy.update(dict((key, merge(one.get(key, None), two[key])) for key in two))
+ return copy
+
+ elif isinstance(one, list) and isinstance(two, list):
+ return [merge(alpha, beta) for (alpha, beta) in itertools.izip_longest(one, two)]
+
+ return one if two is None else two
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ hostname=dict(type='str', required=True, aliases=['host', 'ip']),
+ username=dict(type='str', default='admin', aliases=['user']),
+ password=dict(type='str', default='password', no_log=True),
+ content=dict(type='str'),
+ path=dict(type='path', aliases=['config_file', 'src']),
+ protocol=dict(type='str', default='https', choices=['http', 'https']),
+ timeout=dict(type='int', default=60),
+ validate_certs=dict(type='bool', default=True),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[['content', 'path']],
+ )
+
+ if not HAS_LXML_ETREE:
+ module.fail_json(msg=missing_required_lib('lxml'), exception=LXML_ETREE_IMP_ERR)
+
+ if not HAS_XMLJSON_COBRA:
+ module.fail_json(msg=missing_required_lib('xmljson >= 0.1.8'), exception=XMLJSON_COBRA_IMP_ERR)
+
+ hostname = module.params['hostname']
+ username = module.params['username']
+ password = module.params['password']
+
+ content = module.params['content']
+ path = module.params['path']
+
+ protocol = module.params['protocol']
+ timeout = module.params['timeout']
+
+ result = dict(
+ failed=False,
+ changed=False,
+ )
+
+ # Report missing file
+ file_exists = False
+ if path:
+ if os.path.isfile(path):
+ file_exists = True
+ else:
+ module.fail_json(msg='Cannot find/access path:\n%s' % path)
+
+ start = datetime.datetime.utcnow()
+
+ # Perform login first
+ url = '%s://%s/nuova' % (protocol, hostname)
+ data = '<aaaLogin inName="%s" inPassword="%s"/>' % (username, password)
+ resp, auth = fetch_url(module, url, data=data, method='POST', timeout=timeout)
+ if resp is None or auth['status'] != 200:
+ result['elapsed'] = (datetime.datetime.utcnow() - start).seconds
+ module.fail_json(msg='Task failed with error %(status)s: %(msg)s' % auth, **result)
+ result.update(imc_response(module, resp.read()))
+
+ # Store cookie for future requests
+ try:
+ cookie = result['aaaLogin']['attributes']['outCookie']
+ except Exception:
+ module.fail_json(msg='Could not find cookie in output', **result)
+
+ # If we would not log out properly, we run out of sessions quickly
+ atexit.register(logout, module, url, cookie, timeout)
+
+ # Prepare request data
+ if content:
+ rawdata = content
+ elif file_exists:
+ with open(path, 'r') as config_object:
+ rawdata = config_object.read()
+
+ # Wrap the XML documents in a <root> element
+ xmldata = lxml.etree.fromstring('<root>%s</root>' % rawdata.replace('\n', ''))
+
+ # Handle each XML document separately in the same session
+ for xmldoc in list(xmldata):
+ if xmldoc.tag is lxml.etree.Comment:
+ continue
+ # Add cookie to XML
+ xmldoc.set('cookie', cookie)
+ data = lxml.etree.tostring(xmldoc)
+
+ # Perform actual request
+ resp, info = fetch_url(module, url, data=data, method='POST', timeout=timeout)
+ if resp is None or info['status'] != 200:
+ result['elapsed'] = (datetime.datetime.utcnow() - start).seconds
+ module.fail_json(msg='Task failed with error %(status)s: %(msg)s' % info, **result)
+
+ # Merge results with previous results
+ rawoutput = resp.read()
+ result = merge(result, imc_response(module, rawoutput, rawinput=data))
+ result['response'] = info['msg']
+ result['status'] = info['status']
+
+ # Check for any changes
+ # NOTE: Unfortunately IMC API always report status as 'modified'
+ xmloutput = lxml.etree.fromstring(rawoutput)
+ results = xmloutput.xpath('/configConfMo/outConfig/*/@status')
+ result['changed'] = ('modified' in results)
+
+ # Report success
+ result['elapsed'] = (datetime.datetime.utcnow() - start).seconds
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/ipmi/ipmi_boot.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/ipmi/ipmi_boot.py
new file mode 100644
index 00000000..6509ca21
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/ipmi/ipmi_boot.py
@@ -0,0 +1,194 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ipmi_boot
+short_description: Management of order of boot devices
+description:
+ - Use this module to manage order of boot devices
+options:
+ name:
+ description:
+ - Hostname or ip address of the BMC.
+ required: true
+ port:
+ description:
+ - Remote RMCP port.
+ default: 623
+ user:
+ description:
+ - Username to use to connect to the BMC.
+ required: true
+ password:
+ description:
+ - Password to connect to the BMC.
+ required: true
+ bootdev:
+ description:
+ - Set boot device to use on next reboot
+ - "The choices for the device are:
+ - network -- Request network boot
+ - floppy -- Boot from floppy
+ - hd -- Boot from hard drive
+ - safe -- Boot from hard drive, requesting 'safe mode'
+ - optical -- boot from CD/DVD/BD drive
+ - setup -- Boot into setup utility
+ - default -- remove any IPMI directed boot device request"
+ required: true
+ choices:
+ - network
+ - floppy
+ - hd
+ - safe
+ - optical
+ - setup
+ - default
+ state:
+ description:
+ - Whether to ensure that boot devices is desired.
+ - "The choices for the state are:
+ - present -- Request system turn on
+ - absent -- Request system turn on"
+ default: present
+ choices: [ present, absent ]
+ persistent:
+ description:
+ - If set, ask that system firmware uses this device beyond next boot.
+ Be aware many systems do not honor this.
+ type: bool
+ default: 'no'
+ uefiboot:
+ description:
+ - If set, request UEFI boot explicitly.
+ Strictly speaking, the spec suggests that if not set, the system should BIOS boot and offers no "don't care" option.
+ In practice, this flag not being set does not preclude UEFI boot on any system I've encountered.
+ type: bool
+ default: 'no'
+requirements:
+ - "python >= 2.6"
+ - pyghmi
+author: "Bulat Gaifullin (@bgaifullin) <gaifullinbf@gmail.com>"
+'''
+
+RETURN = '''
+bootdev:
+ description: The boot device name which will be used beyond next boot.
+ returned: success
+ type: str
+ sample: default
+persistent:
+ description: If True, system firmware will use this device beyond next boot.
+ returned: success
+ type: bool
+ sample: false
+uefimode:
+ description: If True, system firmware will use UEFI boot explicitly beyond next boot.
+ returned: success
+ type: bool
+ sample: false
+'''
+
+EXAMPLES = '''
+- name: Ensure bootdevice is HD
+ community.general.ipmi_boot:
+ name: test.testdomain.com
+ user: admin
+ password: password
+ bootdev: hd
+
+- name: Ensure bootdevice is not Network
+ community.general.ipmi_boot:
+ name: test.testdomain.com
+ user: admin
+ password: password
+ bootdev: network
+ state: absent
+'''
+
+import traceback
+
+PYGHMI_IMP_ERR = None
+try:
+ from pyghmi.ipmi import command
+except ImportError:
+ PYGHMI_IMP_ERR = traceback.format_exc()
+ command = None
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ port=dict(default=623, type='int'),
+ user=dict(required=True, no_log=True),
+ password=dict(required=True, no_log=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ bootdev=dict(required=True, choices=['network', 'hd', 'floppy', 'safe', 'optical', 'setup', 'default']),
+ persistent=dict(default=False, type='bool'),
+ uefiboot=dict(default=False, type='bool')
+ ),
+ supports_check_mode=True,
+ )
+
+ if command is None:
+ module.fail_json(msg=missing_required_lib('pyghmi'), exception=PYGHMI_IMP_ERR)
+
+ name = module.params['name']
+ port = module.params['port']
+ user = module.params['user']
+ password = module.params['password']
+ state = module.params['state']
+ bootdev = module.params['bootdev']
+ persistent = module.params['persistent']
+ uefiboot = module.params['uefiboot']
+ request = dict()
+
+ if state == 'absent' and bootdev == 'default':
+ module.fail_json(msg="The bootdev 'default' cannot be used with state 'absent'.")
+
+ # --- run command ---
+ try:
+ ipmi_cmd = command.Command(
+ bmc=name, userid=user, password=password, port=port
+ )
+ module.debug('ipmi instantiated - name: "%s"' % name)
+ current = ipmi_cmd.get_bootdev()
+ # uefimode may not supported by BMC, so use desired value as default
+ current.setdefault('uefimode', uefiboot)
+ if state == 'present' and current != dict(bootdev=bootdev, persistent=persistent, uefimode=uefiboot):
+ request = dict(bootdev=bootdev, uefiboot=uefiboot, persist=persistent)
+ elif state == 'absent' and current['bootdev'] == bootdev:
+ request = dict(bootdev='default')
+ else:
+ module.exit_json(changed=False, **current)
+
+ if module.check_mode:
+ response = dict(bootdev=request['bootdev'])
+ else:
+ response = ipmi_cmd.set_bootdev(**request)
+
+ if 'error' in response:
+ module.fail_json(msg=response['error'])
+
+ if 'persist' in request:
+ response['persistent'] = request['persist']
+ if 'uefiboot' in request:
+ response['uefimode'] = request['uefiboot']
+
+ module.exit_json(changed=True, **response)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/ipmi/ipmi_power.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/ipmi/ipmi_power.py
new file mode 100644
index 00000000..47840154
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/ipmi/ipmi_power.py
@@ -0,0 +1,131 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ipmi_power
+short_description: Power management for machine
+description:
+ - Use this module for power management
+options:
+ name:
+ description:
+ - Hostname or ip address of the BMC.
+ required: true
+ port:
+ description:
+ - Remote RMCP port.
+ default: 623
+ user:
+ description:
+ - Username to use to connect to the BMC.
+ required: true
+ password:
+ description:
+ - Password to connect to the BMC.
+ required: true
+ state:
+ description:
+ - Whether to ensure that the machine in desired state.
+ - "The choices for state are:
+ - on -- Request system turn on
+ - off -- Request system turn off without waiting for OS to shutdown
+ - shutdown -- Have system request OS proper shutdown
+ - reset -- Request system reset without waiting for OS
+ - boot -- If system is off, then 'on', else 'reset'"
+ choices: ['on', 'off', shutdown, reset, boot]
+ required: true
+ timeout:
+ description:
+ - Maximum number of seconds before interrupt request.
+ default: 300
+requirements:
+ - "python >= 2.6"
+ - pyghmi
+author: "Bulat Gaifullin (@bgaifullin) <gaifullinbf@gmail.com>"
+'''
+
+RETURN = '''
+powerstate:
+ description: The current power state of the machine.
+ returned: success
+ type: str
+ sample: on
+'''
+
+EXAMPLES = '''
+- name: Ensure machine is powered on
+ community.general.ipmi_power:
+ name: test.testdomain.com
+ user: admin
+ password: password
+ state: on
+'''
+
+import traceback
+
+PYGHMI_IMP_ERR = None
+try:
+ from pyghmi.ipmi import command
+except ImportError:
+ PYGHMI_IMP_ERR = traceback.format_exc()
+ command = None
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ port=dict(default=623, type='int'),
+ state=dict(required=True, choices=['on', 'off', 'shutdown', 'reset', 'boot']),
+ user=dict(required=True, no_log=True),
+ password=dict(required=True, no_log=True),
+ timeout=dict(default=300, type='int'),
+ ),
+ supports_check_mode=True,
+ )
+
+ if command is None:
+ module.fail_json(msg=missing_required_lib('pyghmi'), exception=PYGHMI_IMP_ERR)
+
+ name = module.params['name']
+ port = module.params['port']
+ user = module.params['user']
+ password = module.params['password']
+ state = module.params['state']
+ timeout = module.params['timeout']
+
+ # --- run command ---
+ try:
+ ipmi_cmd = command.Command(
+ bmc=name, userid=user, password=password, port=port
+ )
+ module.debug('ipmi instantiated - name: "%s"' % name)
+
+ current = ipmi_cmd.get_power()
+ if current['powerstate'] != state:
+ response = {'powerstate': state} if module.check_mode else ipmi_cmd.set_power(state, wait=timeout)
+ changed = True
+ else:
+ response = current
+ changed = False
+
+ if 'error' in response:
+ module.fail_json(msg=response['error'])
+
+ module.exit_json(changed=changed, **response)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/lxca/lxca_cmms.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/lxca/lxca_cmms.py
new file mode 100644
index 00000000..7bd7b9ff
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/lxca/lxca_cmms.py
@@ -0,0 +1,170 @@
+#!/usr/bin/python
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+author:
+ - Naval Patel (@navalkp)
+ - Prashant Bhosale (@prabhosa)
+module: lxca_cmms
+short_description: Custom module for lxca cmms inventory utility
+description:
+ - This module returns/displays a inventory details of cmms
+
+options:
+ uuid:
+ description:
+ uuid of device, this is string with length greater than 16.
+
+ command_options:
+ description:
+ options to filter nodes information
+ default: cmms
+ choices:
+ - cmms
+ - cmms_by_uuid
+ - cmms_by_chassis_uuid
+
+ chassis:
+ description:
+ uuid of chassis, this is string with length greater than 16.
+
+extends_documentation_fragment:
+- community.general.lxca_common
+
+'''
+
+EXAMPLES = '''
+# get all cmms info
+- name: Get nodes data from LXCA
+ community.general.lxca_cmms:
+ login_user: USERID
+ login_password: Password
+ auth_url: "https://10.243.15.168"
+
+# get specific cmms info by uuid
+- name: Get nodes data from LXCA
+ community.general.lxca_cmms:
+ login_user: USERID
+ login_password: Password
+ auth_url: "https://10.243.15.168"
+ uuid: "3C737AA5E31640CE949B10C129A8B01F"
+ command_options: cmms_by_uuid
+
+# get specific cmms info by chassis uuid
+- name: Get nodes data from LXCA
+ community.general.lxca_cmms:
+ login_user: USERID
+ login_password: Password
+ auth_url: "https://10.243.15.168"
+ chassis: "3C737AA5E31640CE949B10C129A8B01F"
+ command_options: cmms_by_chassis_uuid
+
+'''
+
+RETURN = r'''
+result:
+ description: cmms detail from lxca
+ returned: success
+ type: dict
+ sample:
+ cmmList:
+ - machineType: ''
+ model: ''
+ type: 'CMM'
+ uuid: '118D2C88C8FD11E4947B6EAE8B4BDCDF'
+ # bunch of properties
+ - machineType: ''
+ model: ''
+ type: 'CMM'
+ uuid: '223D2C88C8FD11E4947B6EAE8B4BDCDF'
+ # bunch of properties
+ # Multiple cmms details
+'''
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common import LXCA_COMMON_ARGS, has_pylxca, connection_object
+try:
+ from pylxca import cmms
+except ImportError:
+ pass
+
+
+UUID_REQUIRED = 'UUID of device is required for cmms_by_uuid command.'
+CHASSIS_UUID_REQUIRED = 'UUID of chassis is required for cmms_by_chassis_uuid command.'
+SUCCESS_MSG = "Success %s result"
+
+
+def _cmms(module, lxca_con):
+ return cmms(lxca_con)
+
+
+def _cmms_by_uuid(module, lxca_con):
+ if not module.params['uuid']:
+ module.fail_json(msg=UUID_REQUIRED)
+ return cmms(lxca_con, module.params['uuid'])
+
+
+def _cmms_by_chassis_uuid(module, lxca_con):
+ if not module.params['chassis']:
+ module.fail_json(msg=CHASSIS_UUID_REQUIRED)
+ return cmms(lxca_con, chassis=module.params['chassis'])
+
+
+def setup_module_object():
+ """
+ this function merge argument spec and create ansible module object
+ :return:
+ """
+ args_spec = dict(LXCA_COMMON_ARGS)
+ args_spec.update(INPUT_ARG_SPEC)
+ module = AnsibleModule(argument_spec=args_spec, supports_check_mode=False)
+
+ return module
+
+
+FUNC_DICT = {
+ 'cmms': _cmms,
+ 'cmms_by_uuid': _cmms_by_uuid,
+ 'cmms_by_chassis_uuid': _cmms_by_chassis_uuid,
+}
+
+
+INPUT_ARG_SPEC = dict(
+ command_options=dict(default='cmms', choices=['cmms', 'cmms_by_uuid',
+ 'cmms_by_chassis_uuid']),
+ uuid=dict(default=None),
+ chassis=dict(default=None)
+)
+
+
+def execute_module(module):
+ """
+ This function invoke commands
+ :param module: Ansible module object
+ """
+ try:
+ with connection_object(module) as lxca_con:
+ result = FUNC_DICT[module.params['command_options']](module, lxca_con)
+ module.exit_json(changed=False,
+ msg=SUCCESS_MSG % module.params['command_options'],
+ result=result)
+ except Exception as exception:
+ error_msg = '; '.join((e) for e in exception.args)
+ module.fail_json(msg=error_msg, exception=traceback.format_exc())
+
+
+def main():
+ module = setup_module_object()
+ has_pylxca(module)
+ execute_module(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/lxca/lxca_nodes.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/lxca/lxca_nodes.py
new file mode 100644
index 00000000..febe2fd5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/lxca/lxca_nodes.py
@@ -0,0 +1,200 @@
+#!/usr/bin/python
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+author:
+ - Naval Patel (@navalkp)
+ - Prashant Bhosale (@prabhosa)
+module: lxca_nodes
+short_description: Custom module for lxca nodes inventory utility
+description:
+ - This module returns/displays a inventory details of nodes
+
+options:
+ uuid:
+ description:
+ uuid of device, this is string with length greater than 16.
+
+ command_options:
+ description:
+ options to filter nodes information
+ default: nodes
+ choices:
+ - nodes
+ - nodes_by_uuid
+ - nodes_by_chassis_uuid
+ - nodes_status_managed
+ - nodes_status_unmanaged
+
+ chassis:
+ description:
+ uuid of chassis, this is string with length greater than 16.
+
+extends_documentation_fragment:
+- community.general.lxca_common
+
+'''
+
+EXAMPLES = '''
+# get all nodes info
+- name: Get nodes data from LXCA
+ community.general.lxca_nodes:
+ login_user: USERID
+ login_password: Password
+ auth_url: "https://10.243.15.168"
+ command_options: nodes
+
+# get specific nodes info by uuid
+- name: Get nodes data from LXCA
+ community.general.lxca_nodes:
+ login_user: USERID
+ login_password: Password
+ auth_url: "https://10.243.15.168"
+ uuid: "3C737AA5E31640CE949B10C129A8B01F"
+ command_options: nodes_by_uuid
+
+# get specific nodes info by chassis uuid
+- name: Get nodes data from LXCA
+ community.general.lxca_nodes:
+ login_user: USERID
+ login_password: Password
+ auth_url: "https://10.243.15.168"
+ chassis: "3C737AA5E31640CE949B10C129A8B01F"
+ command_options: nodes_by_chassis_uuid
+
+# get managed nodes
+- name: Get nodes data from LXCA
+ community.general.lxca_nodes:
+ login_user: USERID
+ login_password: Password
+ auth_url: "https://10.243.15.168"
+ command_options: nodes_status_managed
+
+# get unmanaged nodes
+- name: Get nodes data from LXCA
+ community.general.lxca_nodes:
+ login_user: USERID
+ login_password: Password
+ auth_url: "https://10.243.15.168"
+ command_options: nodes_status_unmanaged
+
+'''
+
+RETURN = r'''
+result:
+ description: nodes detail from lxca
+ returned: always
+ type: dict
+ sample:
+ nodeList:
+ - machineType: '6241'
+ model: 'AC1'
+ type: 'Rack-TowerServer'
+ uuid: '118D2C88C8FD11E4947B6EAE8B4BDCDF'
+ # bunch of properties
+ - machineType: '8871'
+ model: 'AC1'
+ type: 'Rack-TowerServer'
+ uuid: '223D2C88C8FD11E4947B6EAE8B4BDCDF'
+ # bunch of properties
+ # Multiple nodes details
+'''
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common import LXCA_COMMON_ARGS, has_pylxca, connection_object
+try:
+ from pylxca import nodes
+except ImportError:
+ pass
+
+
+UUID_REQUIRED = 'UUID of device is required for nodes_by_uuid command.'
+CHASSIS_UUID_REQUIRED = 'UUID of chassis is required for nodes_by_chassis_uuid command.'
+SUCCESS_MSG = "Success %s result"
+
+
+def _nodes(module, lxca_con):
+ return nodes(lxca_con)
+
+
+def _nodes_by_uuid(module, lxca_con):
+ if not module.params['uuid']:
+ module.fail_json(msg=UUID_REQUIRED)
+ return nodes(lxca_con, module.params['uuid'])
+
+
+def _nodes_by_chassis_uuid(module, lxca_con):
+ if not module.params['chassis']:
+ module.fail_json(msg=CHASSIS_UUID_REQUIRED)
+ return nodes(lxca_con, chassis=module.params['chassis'])
+
+
+def _nodes_status_managed(module, lxca_con):
+ return nodes(lxca_con, status='managed')
+
+
+def _nodes_status_unmanaged(module, lxca_con):
+ return nodes(lxca_con, status='unmanaged')
+
+
+def setup_module_object():
+ """
+ this function merge argument spec and create ansible module object
+ :return:
+ """
+ args_spec = dict(LXCA_COMMON_ARGS)
+ args_spec.update(INPUT_ARG_SPEC)
+ module = AnsibleModule(argument_spec=args_spec, supports_check_mode=False)
+
+ return module
+
+
+FUNC_DICT = {
+ 'nodes': _nodes,
+ 'nodes_by_uuid': _nodes_by_uuid,
+ 'nodes_by_chassis_uuid': _nodes_by_chassis_uuid,
+ 'nodes_status_managed': _nodes_status_managed,
+ 'nodes_status_unmanaged': _nodes_status_unmanaged,
+}
+
+
+INPUT_ARG_SPEC = dict(
+ command_options=dict(default='nodes', choices=['nodes', 'nodes_by_uuid',
+ 'nodes_by_chassis_uuid',
+ 'nodes_status_managed',
+ 'nodes_status_unmanaged']),
+ uuid=dict(default=None), chassis=dict(default=None)
+)
+
+
+def execute_module(module):
+ """
+ This function invoke commands
+ :param module: Ansible module object
+ """
+ try:
+ with connection_object(module) as lxca_con:
+ result = FUNC_DICT[module.params['command_options']](module, lxca_con)
+ module.exit_json(changed=False,
+ msg=SUCCESS_MSG % module.params['command_options'],
+ result=result)
+ except Exception as exception:
+ error_msg = '; '.join(exception.args)
+ module.fail_json(msg=error_msg, exception=traceback.format_exc())
+
+
+def main():
+ module = setup_module_object()
+ has_pylxca(module)
+ execute_module(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_alert_profiles.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_alert_profiles.py
new file mode 100644
index 00000000..d40a8ca0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_alert_profiles.py
@@ -0,0 +1,304 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017 Red Hat Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: manageiq_alert_profiles
+
+short_description: Configuration of alert profiles for ManageIQ
+extends_documentation_fragment:
+- community.general.manageiq
+
+author: Elad Alfassa (@elad661) <ealfassa@redhat.com>
+description:
+ - The manageiq_alert_profiles module supports adding, updating and deleting alert profiles in ManageIQ.
+
+options:
+ state:
+ type: str
+ description:
+ - absent - alert profile should not exist,
+ - present - alert profile should exist,
+ choices: ['absent', 'present']
+ default: 'present'
+ name:
+ type: str
+ description:
+ - The unique alert profile name in ManageIQ.
+ - Required when state is "absent" or "present".
+ resource_type:
+ type: str
+ description:
+ - The resource type for the alert profile in ManageIQ. Required when state is "present".
+ choices: ['Vm', 'ContainerNode', 'MiqServer', 'Host', 'Storage', 'EmsCluster',
+ 'ExtManagementSystem', 'MiddlewareServer']
+ alerts:
+ type: list
+ description:
+ - List of alert descriptions to assign to this profile.
+ - Required if state is "present"
+ notes:
+ type: str
+ description:
+ - Optional notes for this profile
+
+'''
+
+EXAMPLES = '''
+- name: Add an alert profile to ManageIQ
+ community.general.manageiq_alert_profiles:
+ state: present
+ name: Test profile
+ resource_type: ContainerNode
+ alerts:
+ - Test Alert 01
+ - Test Alert 02
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Delete an alert profile from ManageIQ
+ community.general.manageiq_alert_profiles:
+ state: absent
+ name: Test profile
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec
+
+
+class ManageIQAlertProfiles(object):
+ """ Object to execute alert profile management operations in manageiq.
+ """
+
+ def __init__(self, manageiq):
+ self.manageiq = manageiq
+
+ self.module = self.manageiq.module
+ self.api_url = self.manageiq.api_url
+ self.client = self.manageiq.client
+ self.url = '{api_url}/alert_definition_profiles'.format(api_url=self.api_url)
+
+ def get_profiles(self):
+ """ Get all alert profiles from ManageIQ
+ """
+ try:
+ response = self.client.get(self.url + '?expand=alert_definitions,resources')
+ except Exception as e:
+ self.module.fail_json(msg="Failed to query alert profiles: {error}".format(error=e))
+ return response.get('resources') or []
+
+ def get_alerts(self, alert_descriptions):
+ """ Get a list of alert hrefs from a list of alert descriptions
+ """
+ alerts = []
+ for alert_description in alert_descriptions:
+ alert = self.manageiq.find_collection_resource_or_fail("alert_definitions",
+ description=alert_description)
+ alerts.append(alert['href'])
+
+ return alerts
+
+ def add_profile(self, profile):
+ """ Add a new alert profile to ManageIQ
+ """
+ # find all alerts to add to the profile
+ # we do this first to fail early if one is missing.
+ alerts = self.get_alerts(profile['alerts'])
+
+ # build the profile dict to send to the server
+
+ profile_dict = dict(name=profile['name'],
+ description=profile['name'],
+ mode=profile['resource_type'])
+ if profile['notes']:
+ profile_dict['set_data'] = dict(notes=profile['notes'])
+
+ # send it to the server
+ try:
+ result = self.client.post(self.url, resource=profile_dict, action="create")
+ except Exception as e:
+ self.module.fail_json(msg="Creating profile failed {error}".format(error=e))
+
+ # now that it has been created, we can assign the alerts
+ self.assign_or_unassign(result['results'][0], alerts, "assign")
+
+ msg = "Profile {name} created successfully"
+ msg = msg.format(name=profile['name'])
+ return dict(changed=True, msg=msg)
+
+ def delete_profile(self, profile):
+ """ Delete an alert profile from ManageIQ
+ """
+ try:
+ self.client.post(profile['href'], action="delete")
+ except Exception as e:
+ self.module.fail_json(msg="Deleting profile failed: {error}".format(error=e))
+
+ msg = "Successfully deleted profile {name}".format(name=profile['name'])
+ return dict(changed=True, msg=msg)
+
+ def get_alert_href(self, alert):
+ """ Get an absolute href for an alert
+ """
+ return "{url}/alert_definitions/{id}".format(url=self.api_url, id=alert['id'])
+
+ def assign_or_unassign(self, profile, resources, action):
+ """ Assign or unassign alerts to profile, and validate the result.
+ """
+ alerts = [dict(href=href) for href in resources]
+
+ subcollection_url = profile['href'] + '/alert_definitions'
+ try:
+ result = self.client.post(subcollection_url, resources=alerts, action=action)
+ if len(result['results']) != len(alerts):
+ msg = "Failed to {action} alerts to profile '{name}'," +\
+ "expected {expected} alerts to be {action}ed," +\
+ "but only {changed} were {action}ed"
+ msg = msg.format(action=action,
+ name=profile['name'],
+ expected=len(alerts),
+ changed=result['results'])
+ self.module.fail_json(msg=msg)
+ except Exception as e:
+ msg = "Failed to {action} alerts to profile '{name}': {error}"
+ msg = msg.format(action=action, name=profile['name'], error=e)
+ self.module.fail_json(msg=msg)
+
+ return result['results']
+
+ def update_profile(self, old_profile, desired_profile):
+ """ Update alert profile in ManageIQ
+ """
+ changed = False
+ # we need to use client.get to query the alert definitions
+ old_profile = self.client.get(old_profile['href'] + '?expand=alert_definitions')
+
+ # figure out which alerts we need to assign / unassign
+ # alerts listed by the user:
+ desired_alerts = set(self.get_alerts(desired_profile['alerts']))
+
+ # alert which currently exist in the profile
+ if 'alert_definitions' in old_profile:
+ # we use get_alert_href to have a direct href to the alert
+ existing_alerts = set([self.get_alert_href(alert) for alert in old_profile['alert_definitions']])
+ else:
+ # no alerts in this profile
+ existing_alerts = set()
+
+ to_add = list(desired_alerts - existing_alerts)
+ to_remove = list(existing_alerts - desired_alerts)
+
+ # assign / unassign the alerts, if needed
+
+ if to_remove:
+ self.assign_or_unassign(old_profile, to_remove, "unassign")
+ changed = True
+ if to_add:
+ self.assign_or_unassign(old_profile, to_add, "assign")
+ changed = True
+
+ # update other properties
+ profile_dict = dict()
+
+ if old_profile['mode'] != desired_profile['resource_type']:
+ # mode needs to be updated
+ profile_dict['mode'] = desired_profile['resource_type']
+
+ # check if notes need to be updated
+ old_notes = old_profile.get('set_data', {}).get('notes')
+
+ if desired_profile['notes'] != old_notes:
+ profile_dict['set_data'] = dict(notes=desired_profile['notes'])
+
+ if profile_dict:
+ # if we have any updated values
+ changed = True
+ try:
+ result = self.client.post(old_profile['href'],
+ resource=profile_dict,
+ action="edit")
+ except Exception as e:
+ msg = "Updating profile '{name}' failed: {error}"
+ msg = msg.format(name=old_profile['name'], error=e)
+ self.module.fail_json(msg=msg, result=result)
+
+ if changed:
+ msg = "Profile {name} updated successfully".format(name=desired_profile['name'])
+ else:
+ msg = "No update needed for profile {name}".format(name=desired_profile['name'])
+ return dict(changed=changed, msg=msg)
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str'),
+ resource_type=dict(type='str', choices=['Vm',
+ 'ContainerNode',
+ 'MiqServer',
+ 'Host',
+ 'Storage',
+ 'EmsCluster',
+ 'ExtManagementSystem',
+ 'MiddlewareServer']),
+ alerts=dict(type='list'),
+ notes=dict(type='str'),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ # add the manageiq connection arguments to the arguments
+ argument_spec.update(manageiq_argument_spec())
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ required_if=[('state', 'present', ['name', 'resource_type']),
+ ('state', 'absent', ['name'])])
+
+ state = module.params['state']
+ name = module.params['name']
+
+ manageiq = ManageIQ(module)
+ manageiq_alert_profiles = ManageIQAlertProfiles(manageiq)
+
+ existing_profile = manageiq.find_collection_resource_by("alert_definition_profiles",
+ name=name)
+
+ # we need to add or update the alert profile
+ if state == "present":
+ if not existing_profile:
+ # a profile with this name doesn't exist yet, let's create it
+ res_args = manageiq_alert_profiles.add_profile(module.params)
+ else:
+ # a profile with this name exists, we might need to update it
+ res_args = manageiq_alert_profiles.update_profile(existing_profile, module.params)
+
+ # this alert profile should not exist
+ if state == "absent":
+ # if we have an alert profile with this name, delete it
+ if existing_profile:
+ res_args = manageiq_alert_profiles.delete_profile(existing_profile)
+ else:
+ # This alert profile does not exist in ManageIQ, and that's okay
+ msg = "Alert profile '{name}' does not exist in ManageIQ"
+ msg = msg.format(name=name)
+ res_args = dict(changed=False, msg=msg)
+
+ module.exit_json(**res_args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_alerts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_alerts.py
new file mode 100644
index 00000000..4f818a3a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_alerts.py
@@ -0,0 +1,349 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017 Red Hat Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: manageiq_alerts
+
+short_description: Configuration of alerts in ManageIQ
+extends_documentation_fragment:
+- community.general.manageiq
+
+author: Elad Alfassa (@elad661) <ealfassa@redhat.com
+description:
+ - The manageiq_alerts module supports adding, updating and deleting alerts in ManageIQ.
+
+options:
+ state:
+ type: str
+ description:
+ - absent - alert should not exist,
+ - present - alert should exist,
+ required: False
+ choices: ['absent', 'present']
+ default: 'present'
+ description:
+ type: str
+ description:
+ - The unique alert description in ManageIQ.
+ - Required when state is "absent" or "present".
+ resource_type:
+ type: str
+ description:
+ - The entity type for the alert in ManageIQ. Required when state is "present".
+ choices: ['Vm', 'ContainerNode', 'MiqServer', 'Host', 'Storage', 'EmsCluster',
+ 'ExtManagementSystem', 'MiddlewareServer']
+ expression_type:
+ type: str
+ description:
+ - Expression type.
+ default: hash
+ choices: ["hash", "miq"]
+ expression:
+ type: dict
+ description:
+ - The alert expression for ManageIQ.
+ - Can either be in the "Miq Expression" format or the "Hash Expression format".
+ - Required if state is "present".
+ enabled:
+ description:
+ - Enable or disable the alert. Required if state is "present".
+ type: bool
+ options:
+ type: dict
+ description:
+ - Additional alert options, such as notification type and frequency
+
+
+'''
+
+EXAMPLES = '''
+- name: Add an alert with a "hash expression" to ManageIQ
+ community.general.manageiq_alerts:
+ state: present
+ description: Test Alert 01
+ options:
+ notifications:
+ email:
+ to: ["example@example.com"]
+ from: "example@example.com"
+ resource_type: ContainerNode
+ expression:
+ eval_method: hostd_log_threshold
+ mode: internal
+ options: {}
+ enabled: true
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Add an alert with a "miq expression" to ManageIQ
+ community.general.manageiq_alerts:
+ state: present
+ description: Test Alert 02
+ options:
+ notifications:
+ email:
+ to: ["example@example.com"]
+ from: "example@example.com"
+ resource_type: Vm
+ expression_type: miq
+ expression:
+ and:
+ - CONTAINS:
+ tag: Vm.managed-environment
+ value: prod
+ - not:
+ CONTAINS:
+ tag: Vm.host.managed-environment
+ value: prod
+ enabled: true
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Delete an alert from ManageIQ
+ community.general.manageiq_alerts:
+ state: absent
+ description: Test Alert 01
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec
+
+
+class ManageIQAlert(object):
+ """ Represent a ManageIQ alert. Can be initialized with both the format
+ we receive from the server and the format we get from the user.
+ """
+ def __init__(self, alert):
+ self.description = alert['description']
+ self.db = alert['db']
+ self.enabled = alert['enabled']
+ self.options = alert['options']
+ self.hash_expression = None
+ self.miq_expressipn = None
+
+ if 'hash_expression' in alert:
+ self.hash_expression = alert['hash_expression']
+ if 'miq_expression' in alert:
+ self.miq_expression = alert['miq_expression']
+ if 'exp' in self.miq_expression:
+ # miq_expression is a field that needs a special case, because
+ # it's returned surrounded by a dict named exp even though we don't
+ # send it with that dict.
+ self.miq_expression = self.miq_expression['exp']
+
+ def __eq__(self, other):
+ """ Compare two ManageIQAlert objects
+ """
+ return self.__dict__ == other.__dict__
+
+
+class ManageIQAlerts(object):
+ """ Object to execute alert management operations in manageiq.
+ """
+
+ def __init__(self, manageiq):
+ self.manageiq = manageiq
+
+ self.module = self.manageiq.module
+ self.api_url = self.manageiq.api_url
+ self.client = self.manageiq.client
+ self.alerts_url = '{api_url}/alert_definitions'.format(api_url=self.api_url)
+
+ def get_alerts(self):
+ """ Get all alerts from ManageIQ
+ """
+ try:
+ response = self.client.get(self.alerts_url + '?expand=resources')
+ except Exception as e:
+ self.module.fail_json(msg="Failed to query alerts: {error}".format(error=e))
+ return response.get('resources', [])
+
+ def validate_hash_expression(self, expression):
+ """ Validate a 'hash expression' alert definition
+ """
+ # hash expressions must have the following fields
+ for key in ['options', 'eval_method', 'mode']:
+ if key not in expression:
+ msg = "Hash expression is missing required field {key}".format(key=key)
+ self.module.fail_json(msg)
+
+ def create_alert_dict(self, params):
+ """ Create a dict representing an alert
+ """
+ if params['expression_type'] == 'hash':
+ # hash expression supports depends on https://github.com/ManageIQ/manageiq-api/pull/76
+ self.validate_hash_expression(params['expression'])
+ expression_type = 'hash_expression'
+ else:
+ # actually miq_expression, but we call it "expression" for backwards-compatibility
+ expression_type = 'expression'
+
+ # build the alret
+ alert = dict(description=params['description'],
+ db=params['resource_type'],
+ options=params['options'],
+ enabled=params['enabled'])
+
+ # add the actual expression.
+ alert.update({expression_type: params['expression']})
+
+ return alert
+
+ def add_alert(self, alert):
+ """ Add a new alert to ManageIQ
+ """
+ try:
+ result = self.client.post(self.alerts_url, action='create', resource=alert)
+
+ msg = "Alert {description} created successfully: {details}"
+ msg = msg.format(description=alert['description'], details=result)
+ return dict(changed=True, msg=msg)
+ except Exception as e:
+ msg = "Creating alert {description} failed: {error}"
+ if "Resource expression needs be specified" in str(e):
+ # Running on an older version of ManageIQ and trying to create a hash expression
+ msg = msg.format(description=alert['description'],
+ error="Your version of ManageIQ does not support hash_expression")
+ else:
+ msg = msg.format(description=alert['description'], error=e)
+ self.module.fail_json(msg=msg)
+
+ def delete_alert(self, alert):
+ """ Delete an alert
+ """
+ try:
+ result = self.client.post('{url}/{id}'.format(url=self.alerts_url,
+ id=alert['id']),
+ action="delete")
+ msg = "Alert {description} deleted: {details}"
+ msg = msg.format(description=alert['description'], details=result)
+ return dict(changed=True, msg=msg)
+ except Exception as e:
+ msg = "Deleting alert {description} failed: {error}"
+ msg = msg.format(description=alert['description'], error=e)
+ self.module.fail_json(msg=msg)
+
+ def update_alert(self, existing_alert, new_alert):
+ """ Update an existing alert with the values from `new_alert`
+ """
+ new_alert_obj = ManageIQAlert(new_alert)
+ if new_alert_obj == ManageIQAlert(existing_alert):
+ # no change needed - alerts are identical
+ return dict(changed=False, msg="No update needed")
+ else:
+ try:
+ url = '{url}/{id}'.format(url=self.alerts_url, id=existing_alert['id'])
+ result = self.client.post(url, action="edit", resource=new_alert)
+
+ # make sure that the update was indeed successful by comparing
+ # the result to the expected result.
+ if new_alert_obj == ManageIQAlert(result):
+ # success!
+ msg = "Alert {description} updated successfully: {details}"
+ msg = msg.format(description=existing_alert['description'], details=result)
+
+ return dict(changed=True, msg=msg)
+ else:
+ # unexpected result
+ msg = "Updating alert {description} failed, unexpected result {details}"
+ msg = msg.format(description=existing_alert['description'], details=result)
+
+ self.module.fail_json(msg=msg)
+
+ except Exception as e:
+ msg = "Updating alert {description} failed: {error}"
+ if "Resource expression needs be specified" in str(e):
+ # Running on an older version of ManageIQ and trying to update a hash expression
+ msg = msg.format(description=existing_alert['description'],
+ error="Your version of ManageIQ does not support hash_expression")
+ else:
+ msg = msg.format(description=existing_alert['description'], error=e)
+ self.module.fail_json(msg=msg)
+
+
+def main():
+ argument_spec = dict(
+ description=dict(type='str'),
+ resource_type=dict(type='str', choices=['Vm',
+ 'ContainerNode',
+ 'MiqServer',
+ 'Host',
+ 'Storage',
+ 'EmsCluster',
+ 'ExtManagementSystem',
+ 'MiddlewareServer']),
+ expression_type=dict(type='str', default='hash', choices=['miq', 'hash']),
+ expression=dict(type='dict'),
+ options=dict(type='dict'),
+ enabled=dict(type='bool'),
+ state=dict(required=False, default='present',
+ choices=['present', 'absent']),
+ )
+ # add the manageiq connection arguments to the arguments
+ argument_spec.update(manageiq_argument_spec())
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ required_if=[('state', 'present', ['description',
+ 'resource_type',
+ 'expression',
+ 'enabled',
+ 'options']),
+ ('state', 'absent', ['description'])])
+
+ state = module.params['state']
+ description = module.params['description']
+
+ manageiq = ManageIQ(module)
+ manageiq_alerts = ManageIQAlerts(manageiq)
+
+ existing_alert = manageiq.find_collection_resource_by("alert_definitions",
+ description=description)
+
+ # we need to add or update the alert
+ if state == "present":
+ alert = manageiq_alerts.create_alert_dict(module.params)
+
+ if not existing_alert:
+ # an alert with this description doesn't exist yet, let's create it
+ res_args = manageiq_alerts.add_alert(alert)
+ else:
+ # an alert with this description exists, we might need to update it
+ res_args = manageiq_alerts.update_alert(existing_alert, alert)
+
+ # this alert should not exist
+ elif state == "absent":
+ # if we have an alert with this description, delete it
+ if existing_alert:
+ res_args = manageiq_alerts.delete_alert(existing_alert)
+ else:
+ # it doesn't exist, and that's okay
+ msg = "Alert '{description}' does not exist in ManageIQ"
+ msg = msg.format(description=description)
+ res_args = dict(changed=False, msg=msg)
+
+ module.exit_json(**res_args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_group.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_group.py
new file mode 100644
index 00000000..2050eb63
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_group.py
@@ -0,0 +1,648 @@
+#!/usr/bin/python
+#
+# (c) 2018, Evert Mulder <evertmulder@gmail.com> (base on manageiq_user.py by Daniel Korn <korndaniel1@gmail.com>)
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: manageiq_group
+
+short_description: Management of groups in ManageIQ.
+extends_documentation_fragment:
+- community.general.manageiq
+
+author: Evert Mulder (@evertmulder)
+description:
+ - The manageiq_group module supports adding, updating and deleting groups in ManageIQ.
+requirements:
+- manageiq-client
+
+options:
+ state:
+ type: str
+ description:
+ - absent - group should not exist, present - group should be.
+ choices: ['absent', 'present']
+ default: 'present'
+ description:
+ type: str
+ description:
+ - The group description.
+ required: true
+ default: null
+ role_id:
+ type: int
+ description:
+ - The the group role id
+ required: false
+ default: null
+ role:
+ type: str
+ description:
+ - The the group role name
+ - The C(role_id) has precedence over the C(role) when supplied.
+ required: false
+ default: null
+ tenant_id:
+ type: int
+ description:
+ - The tenant for the group identified by the tenant id.
+ required: false
+ default: null
+ tenant:
+ type: str
+ description:
+ - The tenant for the group identified by the tenant name.
+ - The C(tenant_id) has precedence over the C(tenant) when supplied.
+ - Tenant names are case sensitive.
+ required: false
+ default: null
+ managed_filters:
+ description: The tag values per category
+ type: dict
+ required: false
+ default: null
+ managed_filters_merge_mode:
+ type: str
+ description:
+ - In merge mode existing categories are kept or updated, new categories are added.
+ - In replace mode all categories will be replaced with the supplied C(managed_filters).
+ choices: [ merge, replace ]
+ default: replace
+ belongsto_filters:
+ description: A list of strings with a reference to the allowed host, cluster or folder
+ type: list
+ elements: str
+ required: false
+ default: null
+ belongsto_filters_merge_mode:
+ type: str
+ description:
+ - In merge mode existing settings are merged with the supplied C(belongsto_filters).
+ - In replace mode current values are replaced with the supplied C(belongsto_filters).
+ choices: [ merge, replace ]
+ default: replace
+'''
+
+EXAMPLES = '''
+- name: Create a group in ManageIQ with the role EvmRole-user and tenant 'my_tenant'
+ community.general.manageiq_group:
+ description: 'MyGroup-user'
+ role: 'EvmRole-user'
+ tenant: 'my_tenant'
+ manageiq_connection:
+ url: 'https://manageiq_server'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Create a group in ManageIQ with the role EvmRole-user and tenant with tenant_id 4
+ community.general.manageiq_group:
+ description: 'MyGroup-user'
+ role: 'EvmRole-user'
+ tenant_id: 4
+ manageiq_connection:
+ url: 'https://manageiq_server'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name:
+ - Create or update a group in ManageIQ with the role EvmRole-user and tenant my_tenant.
+ - Apply 3 prov_max_cpu and 2 department tags to the group.
+ - Limit access to a cluster for the group.
+ community.general.manageiq_group:
+ description: 'MyGroup-user'
+ role: 'EvmRole-user'
+ tenant: my_tenant
+ managed_filters:
+ prov_max_cpu:
+ - '1'
+ - '2'
+ - '4'
+ department:
+ - defense
+ - engineering
+ managed_filters_merge_mode: replace
+ belongsto_filters:
+ - "/belongsto/ExtManagementSystem|ProviderName/EmsFolder|Datacenters/EmsFolder|dc_name/EmsFolder|host/EmsCluster|Cluster name"
+ belongsto_filters_merge_mode: merge
+ manageiq_connection:
+ url: 'https://manageiq_server'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Delete a group in ManageIQ
+ community.general.manageiq_group:
+ state: 'absent'
+ description: 'MyGroup-user'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+
+- name: Delete a group in ManageIQ using a token
+ community.general.manageiq_group:
+ state: 'absent'
+ description: 'MyGroup-user'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ token: 'sometoken'
+'''
+
+RETURN = '''
+group:
+ description: The group.
+ returned: success
+ type: complex
+ contains:
+ description:
+ description: The group description
+ returned: success
+ type: str
+ id:
+ description: The group id
+ returned: success
+ type: int
+ group_type:
+ description: The group type, system or user
+ returned: success
+ type: str
+ role:
+ description: The group role name
+ returned: success
+ type: str
+ tenant:
+ description: The group tenant name
+ returned: success
+ type: str
+ managed_filters:
+ description: The tag values per category
+ returned: success
+ type: dict
+ belongsto_filters:
+ description: A list of strings with a reference to the allowed host, cluster or folder
+ returned: success
+ type: list
+ created_on:
+ description: Group creation date
+ returned: success
+ type: str
+ sample: "2018-08-12T08:37:55+00:00"
+ updated_on:
+ description: Group update date
+ returned: success
+ type: int
+ sample: "2018-08-12T08:37:55+00:00"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec
+
+
+class ManageIQgroup(object):
+ """
+ Object to execute group management operations in manageiq.
+ """
+
+ def __init__(self, manageiq):
+ self.manageiq = manageiq
+
+ self.module = self.manageiq.module
+ self.api_url = self.manageiq.api_url
+ self.client = self.manageiq.client
+
+ def group(self, description):
+ """ Search for group object by description.
+ Returns:
+ the group, or None if group was not found.
+ """
+ groups = self.client.collections.groups.find_by(description=description)
+ if len(groups) == 0:
+ return None
+ else:
+ return groups[0]
+
+ def tenant(self, tenant_id, tenant_name):
+ """ Search for tenant entity by name or id
+ Returns:
+ the tenant entity, None if no id or name was supplied
+ """
+
+ if tenant_id:
+ tenant = self.client.get_entity('tenants', tenant_id)
+ if not tenant:
+ self.module.fail_json(msg="Tenant with id '%s' not found in manageiq" % str(tenant_id))
+ return tenant
+ else:
+ if tenant_name:
+ tenant_res = self.client.collections.tenants.find_by(name=tenant_name)
+ if not tenant_res:
+ self.module.fail_json(msg="Tenant '%s' not found in manageiq" % tenant_name)
+ if len(tenant_res) > 1:
+ self.module.fail_json(msg="Multiple tenants found in manageiq with name '%s" % tenant_name)
+ tenant = tenant_res[0]
+ return tenant
+ else:
+ # No tenant name or tenant id supplied
+ return None
+
+ def role(self, role_id, role_name):
+ """ Search for a role object by name or id.
+ Returns:
+ the role entity, None no id or name was supplied
+
+ the role, or send a module Fail signal if role not found.
+ """
+ if role_id:
+ role = self.client.get_entity('roles', role_id)
+ if not role:
+ self.module.fail_json(msg="Role with id '%s' not found in manageiq" % str(role_id))
+ return role
+ else:
+ if role_name:
+ role_res = self.client.collections.roles.find_by(name=role_name)
+ if not role_res:
+ self.module.fail_json(msg="Role '%s' not found in manageiq" % role_name)
+ if len(role_res) > 1:
+ self.module.fail_json(msg="Multiple roles found in manageiq with name '%s" % role_name)
+ return role_res[0]
+ else:
+ # No role name or role id supplied
+ return None
+
+ @staticmethod
+ def merge_dict_values(norm_current_values, norm_updated_values):
+ """ Create an merged update object for manageiq group filters.
+
+ The input dict contain the tag values per category.
+ If the new values contain the category, all tags for that category are replaced
+ If the new values do not contain the category, the existing tags are kept
+
+ Returns:
+ the nested array with the merged values, used in the update post body
+ """
+
+ # If no updated values are supplied, in merge mode, the original values must be returned
+ # otherwise the existing tag filters will be removed.
+ if norm_current_values and (not norm_updated_values):
+ return norm_current_values
+
+ # If no existing tag filters exist, use the user supplied values
+ if (not norm_current_values) and norm_updated_values:
+ return norm_updated_values
+
+ # start with norm_current_values's keys and values
+ res = norm_current_values.copy()
+ # replace res with norm_updated_values's keys and values
+ res.update(norm_updated_values)
+ return res
+
+ def delete_group(self, group):
+ """ Deletes a group from manageiq.
+
+ Returns:
+ a dict of:
+ changed: boolean indicating if the entity was updated.
+ msg: a short message describing the operation executed.
+ """
+ try:
+ url = '%s/groups/%s' % (self.api_url, group['id'])
+ result = self.client.post(url, action='delete')
+ except Exception as e:
+ self.module.fail_json(msg="failed to delete group %s: %s" % (group['description'], str(e)))
+
+ if result['success'] is False:
+ self.module.fail_json(msg=result['message'])
+
+ return dict(
+ changed=True,
+ msg="deleted group %s with id %s" % (group['description'], group['id']))
+
+ def edit_group(self, group, description, role, tenant, norm_managed_filters, managed_filters_merge_mode,
+ belongsto_filters, belongsto_filters_merge_mode):
+ """ Edit a manageiq group.
+
+ Returns:
+ a dict of:
+ changed: boolean indicating if the entity was updated.
+ msg: a short message describing the operation executed.
+ """
+
+ if role or norm_managed_filters or belongsto_filters:
+ group.reload(attributes=['miq_user_role_name', 'entitlement'])
+
+ try:
+ current_role = group['miq_user_role_name']
+ except AttributeError:
+ current_role = None
+
+ changed = False
+ resource = {}
+
+ if description and group['description'] != description:
+ resource['description'] = description
+ changed = True
+
+ if tenant and group['tenant_id'] != tenant['id']:
+ resource['tenant'] = dict(id=tenant['id'])
+ changed = True
+
+ if role and current_role != role['name']:
+ resource['role'] = dict(id=role['id'])
+ changed = True
+
+ if norm_managed_filters or belongsto_filters:
+
+ # Only compare if filters are supplied
+ entitlement = group['entitlement']
+
+ if 'filters' not in entitlement:
+ # No existing filters exist, use supplied filters
+ managed_tag_filters_post_body = self.normalized_managed_tag_filters_to_miq(norm_managed_filters)
+ resource['filters'] = {'managed': managed_tag_filters_post_body, "belongsto": belongsto_filters}
+ changed = True
+ else:
+ current_filters = entitlement['filters']
+ new_filters = self.edit_group_edit_filters(current_filters,
+ norm_managed_filters, managed_filters_merge_mode,
+ belongsto_filters, belongsto_filters_merge_mode)
+ if new_filters:
+ resource['filters'] = new_filters
+ changed = True
+
+ if not changed:
+ return dict(
+ changed=False,
+ msg="group %s is not changed." % group['description'])
+
+ # try to update group
+ try:
+ self.client.post(group['href'], action='edit', resource=resource)
+ changed = True
+ except Exception as e:
+ self.module.fail_json(msg="failed to update group %s: %s" % (group['name'], str(e)))
+
+ return dict(
+ changed=changed,
+ msg="successfully updated the group %s with id %s" % (group['description'], group['id']))
+
+ def edit_group_edit_filters(self, current_filters, norm_managed_filters, managed_filters_merge_mode,
+ belongsto_filters, belongsto_filters_merge_mode):
+ """ Edit a manageiq group filters.
+
+ Returns:
+ None if no the group was not updated
+ If the group was updated the post body part for updating the group
+ """
+ filters_updated = False
+ new_filters_resource = {}
+
+ current_belongsto_set = current_filters.get('belongsto', set())
+
+ if belongsto_filters:
+ new_belongsto_set = set(belongsto_filters)
+ else:
+ new_belongsto_set = set()
+
+ if current_belongsto_set == new_belongsto_set:
+ new_filters_resource['belongsto'] = current_filters['belongsto']
+ else:
+ if belongsto_filters_merge_mode == 'merge':
+ current_belongsto_set.update(new_belongsto_set)
+ new_filters_resource['belongsto'] = list(current_belongsto_set)
+ else:
+ new_filters_resource['belongsto'] = list(new_belongsto_set)
+ filters_updated = True
+
+ # Process belongsto managed filter tags
+ # The input is in the form dict with keys are the categories and the tags are supplied string array
+ # ManageIQ, the current_managed, uses an array of arrays. One array of categories.
+ # We normalize the user input from a dict with arrays to a dict of sorted arrays
+ # We normalize the current manageiq array of arrays also to a dict of sorted arrays so we can compare
+ norm_current_filters = self.manageiq_filters_to_sorted_dict(current_filters)
+
+ if norm_current_filters == norm_managed_filters:
+ if 'managed' in current_filters:
+ new_filters_resource['managed'] = current_filters['managed']
+ else:
+ if managed_filters_merge_mode == 'merge':
+ merged_dict = self.merge_dict_values(norm_current_filters, norm_managed_filters)
+ new_filters_resource['managed'] = self.normalized_managed_tag_filters_to_miq(merged_dict)
+ else:
+ new_filters_resource['managed'] = self.normalized_managed_tag_filters_to_miq(norm_managed_filters)
+ filters_updated = True
+
+ if not filters_updated:
+ return None
+
+ return new_filters_resource
+
+ def create_group(self, description, role, tenant, norm_managed_filters, belongsto_filters):
+ """ Creates the group in manageiq.
+
+ Returns:
+ the created group id, name, created_on timestamp,
+ updated_on timestamp.
+ """
+ # check for required arguments
+ for key, value in dict(description=description).items():
+ if value in (None, ''):
+ self.module.fail_json(msg="missing required argument: %s" % key)
+
+ url = '%s/groups' % self.api_url
+
+ resource = {'description': description}
+
+ if role is not None:
+ resource['role'] = dict(id=role['id'])
+
+ if tenant is not None:
+ resource['tenant'] = dict(id=tenant['id'])
+
+ if norm_managed_filters or belongsto_filters:
+ managed_tag_filters_post_body = self.normalized_managed_tag_filters_to_miq(norm_managed_filters)
+ resource['filters'] = {'managed': managed_tag_filters_post_body, "belongsto": belongsto_filters}
+
+ try:
+ result = self.client.post(url, action='create', resource=resource)
+ except Exception as e:
+ self.module.fail_json(msg="failed to create group %s: %s" % (description, str(e)))
+
+ return dict(
+ changed=True,
+ msg="successfully created group %s" % description,
+ group_id=result['results'][0]['id']
+ )
+
+ @staticmethod
+ def normalized_managed_tag_filters_to_miq(norm_managed_filters):
+ if not norm_managed_filters:
+ return None
+
+ return list(norm_managed_filters.values())
+
+ @staticmethod
+ def manageiq_filters_to_sorted_dict(current_filters):
+ current_managed_filters = current_filters.get('managed')
+ if not current_managed_filters:
+ return None
+
+ res = {}
+ for tag_list in current_managed_filters:
+ tag_list.sort()
+ key = tag_list[0].split('/')[2]
+ res[key] = tag_list
+
+ return res
+
+ @staticmethod
+ def normalize_user_managed_filters_to_sorted_dict(managed_filters, module):
+ if not managed_filters:
+ return None
+
+ res = {}
+ for cat_key in managed_filters:
+ cat_array = []
+ if not isinstance(managed_filters[cat_key], list):
+ module.fail_json(msg='Entry "{0}" of managed_filters must be a list!'.format(cat_key))
+ for tags in managed_filters[cat_key]:
+ miq_managed_tag = "/managed/" + cat_key + "/" + tags
+ cat_array.append(miq_managed_tag)
+ # Do not add empty categories. ManageIQ will remove all categories that are not supplied
+ if cat_array:
+ cat_array.sort()
+ res[cat_key] = cat_array
+ return res
+
+ @staticmethod
+ def create_result_group(group):
+ """ Creates the ansible result object from a manageiq group entity
+
+ Returns:
+ a dict with the group id, description, role, tenant, filters, group_type, created_on, updated_on
+ """
+ try:
+ role_name = group['miq_user_role_name']
+ except AttributeError:
+ role_name = None
+
+ managed_filters = None
+ belongsto_filters = None
+ if 'filters' in group['entitlement']:
+ filters = group['entitlement']['filters']
+ belongsto_filters = filters.get('belongsto')
+ group_managed_filters = filters.get('managed')
+ if group_managed_filters:
+ managed_filters = {}
+ for tag_list in group_managed_filters:
+ key = tag_list[0].split('/')[2]
+ tags = []
+ for t in tag_list:
+ tags.append(t.split('/')[3])
+ managed_filters[key] = tags
+
+ return dict(
+ id=group['id'],
+ description=group['description'],
+ role=role_name,
+ tenant=group['tenant']['name'],
+ managed_filters=managed_filters,
+ belongsto_filters=belongsto_filters,
+ group_type=group['group_type'],
+ created_on=group['created_on'],
+ updated_on=group['updated_on'],
+ )
+
+
+def main():
+ argument_spec = dict(
+ description=dict(required=True, type='str'),
+ state=dict(choices=['absent', 'present'], default='present'),
+ role_id=dict(required=False, type='int'),
+ role=dict(required=False, type='str'),
+ tenant_id=dict(required=False, type='int'),
+ tenant=dict(required=False, type='str'),
+ managed_filters=dict(required=False, type='dict'),
+ managed_filters_merge_mode=dict(required=False, choices=['merge', 'replace'], default='replace'),
+ belongsto_filters=dict(required=False, type='list', elements='str'),
+ belongsto_filters_merge_mode=dict(required=False, choices=['merge', 'replace'], default='replace'),
+ )
+ # add the manageiq connection arguments to the arguments
+ argument_spec.update(manageiq_argument_spec())
+
+ module = AnsibleModule(
+ argument_spec=argument_spec
+ )
+
+ description = module.params['description']
+ state = module.params['state']
+ role_id = module.params['role_id']
+ role_name = module.params['role']
+ tenant_id = module.params['tenant_id']
+ tenant_name = module.params['tenant']
+ managed_filters = module.params['managed_filters']
+ managed_filters_merge_mode = module.params['managed_filters_merge_mode']
+ belongsto_filters = module.params['belongsto_filters']
+ belongsto_filters_merge_mode = module.params['belongsto_filters_merge_mode']
+
+ manageiq = ManageIQ(module)
+ manageiq_group = ManageIQgroup(manageiq)
+
+ group = manageiq_group.group(description)
+
+ # group should not exist
+ if state == "absent":
+ # if we have a group, delete it
+ if group:
+ res_args = manageiq_group.delete_group(group)
+ # if we do not have a group, nothing to do
+ else:
+ res_args = dict(
+ changed=False,
+ msg="group '%s' does not exist in manageiq" % description)
+
+ # group should exist
+ if state == "present":
+
+ tenant = manageiq_group.tenant(tenant_id, tenant_name)
+ role = manageiq_group.role(role_id, role_name)
+ norm_managed_filters = manageiq_group.normalize_user_managed_filters_to_sorted_dict(managed_filters, module)
+ # if we have a group, edit it
+ if group:
+ res_args = manageiq_group.edit_group(group, description, role, tenant,
+ norm_managed_filters, managed_filters_merge_mode,
+ belongsto_filters, belongsto_filters_merge_mode)
+
+ # if we do not have a group, create it
+ else:
+ res_args = manageiq_group.create_group(description, role, tenant, norm_managed_filters, belongsto_filters)
+ group = manageiq.client.get_entity('groups', res_args['group_id'])
+
+ group.reload(expand='resources', attributes=['miq_user_role_name', 'tenant', 'entitlement'])
+ res_args['group'] = manageiq_group.create_result_group(group)
+
+ module.exit_json(**res_args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_policies.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_policies.py
new file mode 100644
index 00000000..600c0bff
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_policies.py
@@ -0,0 +1,344 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# (c) 2017, Daniel Korn <korndaniel1@gmail.com>
+# (c) 2017, Yaacov Zamir <yzamir@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: manageiq_policies
+
+short_description: Management of resource policy_profiles in ManageIQ.
+extends_documentation_fragment:
+- community.general.manageiq
+
+author: Daniel Korn (@dkorn)
+description:
+ - The manageiq_policies module supports adding and deleting policy_profiles in ManageIQ.
+
+options:
+ state:
+ type: str
+ description:
+ - absent - policy_profiles should not exist,
+ - present - policy_profiles should exist,
+ - list - list current policy_profiles and policies.
+ choices: ['absent', 'present', 'list']
+ default: 'present'
+ policy_profiles:
+ type: list
+ description:
+ - list of dictionaries, each includes the policy_profile 'name' key.
+ - required if state is present or absent.
+ resource_type:
+ type: str
+ description:
+ - the type of the resource to which the profile should be [un]assigned
+ required: true
+ choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster',
+ 'data store', 'group', 'resource pool', 'service', 'service template',
+ 'template', 'tenant', 'user']
+ resource_name:
+ type: str
+ description:
+ - the name of the resource to which the profile should be [un]assigned
+ required: true
+'''
+
+EXAMPLES = '''
+- name: Assign new policy_profile for a provider in ManageIQ
+ community.general.manageiq_policies:
+ resource_name: 'EngLab'
+ resource_type: 'provider'
+ policy_profiles:
+ - name: openscap profile
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Unassign a policy_profile for a provider in ManageIQ
+ community.general.manageiq_policies:
+ state: absent
+ resource_name: 'EngLab'
+ resource_type: 'provider'
+ policy_profiles:
+ - name: openscap profile
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: List current policy_profile and policies for a provider in ManageIQ
+ community.general.manageiq_policies:
+ state: list
+ resource_name: 'EngLab'
+ resource_type: 'provider'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+'''
+
+RETURN = '''
+manageiq_policies:
+ description:
+ - List current policy_profile and policies for a provider in ManageIQ
+ returned: always
+ type: dict
+ sample: '{
+ "changed": false,
+ "profiles": [
+ {
+ "policies": [
+ {
+ "active": true,
+ "description": "OpenSCAP",
+ "name": "openscap policy"
+ },
+ {
+ "active": true,
+ "description": "Analyse incoming container images",
+ "name": "analyse incoming container images"
+ },
+ {
+ "active": true,
+ "description": "Schedule compliance after smart state analysis",
+ "name": "schedule compliance after smart state analysis"
+ }
+ ],
+ "profile_description": "OpenSCAP profile",
+ "profile_name": "openscap profile"
+ }
+ ]
+ }'
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec, manageiq_entities
+
+
+class ManageIQPolicies(object):
+ """
+ Object to execute policies management operations of manageiq resources.
+ """
+
+ def __init__(self, manageiq, resource_type, resource_id):
+ self.manageiq = manageiq
+
+ self.module = self.manageiq.module
+ self.api_url = self.manageiq.api_url
+ self.client = self.manageiq.client
+
+ self.resource_type = resource_type
+ self.resource_id = resource_id
+ self.resource_url = '{api_url}/{resource_type}/{resource_id}'.format(
+ api_url=self.api_url,
+ resource_type=resource_type,
+ resource_id=resource_id)
+
+ def query_profile_href(self, profile):
+ """ Add or Update the policy_profile href field
+
+ Example:
+ {name: STR, ...} => {name: STR, href: STR}
+ """
+ resource = self.manageiq.find_collection_resource_or_fail(
+ "policy_profiles", **profile)
+ return dict(name=profile['name'], href=resource['href'])
+
+ def query_resource_profiles(self):
+ """ Returns a set of the profile objects objects assigned to the resource
+ """
+ url = '{resource_url}/policy_profiles?expand=resources'
+ try:
+ response = self.client.get(url.format(resource_url=self.resource_url))
+ except Exception as e:
+ msg = "Failed to query {resource_type} policies: {error}".format(
+ resource_type=self.resource_type,
+ error=e)
+ self.module.fail_json(msg=msg)
+
+ resources = response.get('resources', [])
+
+ # clean the returned rest api profile object to look like:
+ # {profile_name: STR, profile_description: STR, policies: ARR<POLICIES>}
+ profiles = [self.clean_profile_object(profile) for profile in resources]
+
+ return profiles
+
+ def query_profile_policies(self, profile_id):
+ """ Returns a set of the policy objects assigned to the resource
+ """
+ url = '{api_url}/policy_profiles/{profile_id}?expand=policies'
+ try:
+ response = self.client.get(url.format(api_url=self.api_url, profile_id=profile_id))
+ except Exception as e:
+ msg = "Failed to query {resource_type} policies: {error}".format(
+ resource_type=self.resource_type,
+ error=e)
+ self.module.fail_json(msg=msg)
+
+ resources = response.get('policies', [])
+
+ # clean the returned rest api policy object to look like:
+ # {name: STR, description: STR, active: BOOL}
+ policies = [self.clean_policy_object(policy) for policy in resources]
+
+ return policies
+
+ def clean_policy_object(self, policy):
+ """ Clean a policy object to have human readable form of:
+ {
+ name: STR,
+ description: STR,
+ active: BOOL
+ }
+ """
+ name = policy.get('name')
+ description = policy.get('description')
+ active = policy.get('active')
+
+ return dict(
+ name=name,
+ description=description,
+ active=active)
+
+ def clean_profile_object(self, profile):
+ """ Clean a profile object to have human readable form of:
+ {
+ profile_name: STR,
+ profile_description: STR,
+ policies: ARR<POLICIES>
+ }
+ """
+ profile_id = profile['id']
+ name = profile.get('name')
+ description = profile.get('description')
+ policies = self.query_profile_policies(profile_id)
+
+ return dict(
+ profile_name=name,
+ profile_description=description,
+ policies=policies)
+
+ def profiles_to_update(self, profiles, action):
+ """ Create a list of policies we need to update in ManageIQ.
+
+ Returns:
+ Whether or not a change took place and a message describing the
+ operation executed.
+ """
+ profiles_to_post = []
+ assigned_profiles = self.query_resource_profiles()
+
+ # make a list of assigned full profile names strings
+ # e.g. ['openscap profile', ...]
+ assigned_profiles_set = set([profile['profile_name'] for profile in assigned_profiles])
+
+ for profile in profiles:
+ assigned = profile.get('name') in assigned_profiles_set
+
+ if (action == 'unassign' and assigned) or (action == 'assign' and not assigned):
+ # add/update the policy profile href field
+ # {name: STR, ...} => {name: STR, href: STR}
+ profile = self.query_profile_href(profile)
+ profiles_to_post.append(profile)
+
+ return profiles_to_post
+
+ def assign_or_unassign_profiles(self, profiles, action):
+ """ Perform assign/unassign action
+ """
+ # get a list of profiles needed to be changed
+ profiles_to_post = self.profiles_to_update(profiles, action)
+ if not profiles_to_post:
+ return dict(
+ changed=False,
+ msg="Profiles {profiles} already {action}ed, nothing to do".format(
+ action=action,
+ profiles=profiles))
+
+ # try to assign or unassign profiles to resource
+ url = '{resource_url}/policy_profiles'.format(resource_url=self.resource_url)
+ try:
+ response = self.client.post(url, action=action, resources=profiles_to_post)
+ except Exception as e:
+ msg = "Failed to {action} profile: {error}".format(
+ action=action,
+ error=e)
+ self.module.fail_json(msg=msg)
+
+ # check all entities in result to be successful
+ for result in response['results']:
+ if not result['success']:
+ msg = "Failed to {action}: {message}".format(
+ action=action,
+ message=result['message'])
+ self.module.fail_json(msg=msg)
+
+ # successfully changed all needed profiles
+ return dict(
+ changed=True,
+ msg="Successfully {action}ed profiles: {profiles}".format(
+ action=action,
+ profiles=profiles))
+
+
+def main():
+ actions = {'present': 'assign', 'absent': 'unassign', 'list': 'list'}
+ argument_spec = dict(
+ policy_profiles=dict(type='list'),
+ resource_name=dict(required=True, type='str'),
+ resource_type=dict(required=True, type='str',
+ choices=list(manageiq_entities().keys())),
+ state=dict(required=False, type='str',
+ choices=['present', 'absent', 'list'], default='present'),
+ )
+ # add the manageiq connection arguments to the arguments
+ argument_spec.update(manageiq_argument_spec())
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_if=[
+ ('state', 'present', ['policy_profiles']),
+ ('state', 'absent', ['policy_profiles'])
+ ],
+ )
+
+ policy_profiles = module.params['policy_profiles']
+ resource_type_key = module.params['resource_type']
+ resource_name = module.params['resource_name']
+ state = module.params['state']
+
+ # get the action and resource type
+ action = actions[state]
+ resource_type = manageiq_entities()[resource_type_key]
+
+ manageiq = ManageIQ(module)
+
+ # query resource id, fail if resource does not exist
+ resource_id = manageiq.find_collection_resource_or_fail(resource_type, name=resource_name)['id']
+
+ manageiq_policies = ManageIQPolicies(manageiq, resource_type, resource_id)
+
+ if action == 'list':
+ # return a list of current profiles for this object
+ current_profiles = manageiq_policies.query_resource_profiles()
+ res_args = dict(changed=False, profiles=current_profiles)
+ else:
+ # assign or unassign the profiles
+ res_args = manageiq_policies.assign_or_unassign_profiles(policy_profiles, action)
+
+ module.exit_json(**res_args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_provider.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_provider.py
new file mode 100644
index 00000000..7f55b55b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_provider.py
@@ -0,0 +1,928 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# (c) 2017, Daniel Korn <korndaniel1@gmail.com>
+# (c) 2017, Yaacov Zamir <yzamir@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: manageiq_provider
+short_description: Management of provider in ManageIQ.
+extends_documentation_fragment:
+- community.general.manageiq
+
+author: Daniel Korn (@dkorn)
+description:
+ - The manageiq_provider module supports adding, updating, and deleting provider in ManageIQ.
+
+options:
+ state:
+ type: str
+ description:
+ - absent - provider should not exist, present - provider should be present, refresh - provider will be refreshed
+ choices: ['absent', 'present', 'refresh']
+ default: 'present'
+ name:
+ type: str
+ description: The provider's name.
+ required: true
+ type:
+ type: str
+ description: The provider's type.
+ choices: ['Openshift', 'Amazon', 'oVirt', 'VMware', 'Azure', 'Director', 'OpenStack', 'GCE']
+ zone:
+ type: str
+ description: The ManageIQ zone name that will manage the provider.
+ default: 'default'
+ provider_region:
+ type: str
+ description: The provider region name to connect to (e.g. AWS region for Amazon).
+ host_default_vnc_port_start:
+ type: str
+ description: The first port in the host VNC range. defaults to None.
+ host_default_vnc_port_end:
+ type: str
+ description: The last port in the host VNC range. defaults to None.
+ subscription:
+ type: str
+ description: Microsoft Azure subscription ID. defaults to None.
+ project:
+ type: str
+ description: Google Compute Engine Project ID. defaults to None.
+ azure_tenant_id:
+ type: str
+ description: Tenant ID. defaults to None.
+ aliases: [ keystone_v3_domain_id ]
+ tenant_mapping_enabled:
+ type: bool
+ default: 'no'
+ description: Whether to enable mapping of existing tenants. defaults to False.
+ api_version:
+ type: str
+ description: The OpenStack Keystone API version. defaults to None.
+ choices: ['v2', 'v3']
+
+ provider:
+ description: Default endpoint connection information, required if state is true.
+ suboptions:
+ hostname:
+ type: str
+ description: The provider's api hostname.
+ required: true
+ port:
+ type: int
+ description: The provider's api port.
+ userid:
+ type: str
+ description: Provider's api endpoint authentication userid. defaults to None.
+ password:
+ type: str
+ description: Provider's api endpoint authentication password. defaults to None.
+ auth_key:
+ type: str
+ description: Provider's api endpoint authentication bearer token. defaults to None.
+ validate_certs:
+ description: Whether SSL certificates should be verified for HTTPS requests (deprecated). defaults to True.
+ type: bool
+ default: 'yes'
+ security_protocol:
+ type: str
+ description: How SSL certificates should be used for HTTPS requests. defaults to None.
+ choices: ['ssl-with-validation','ssl-with-validation-custom-ca','ssl-without-validation','non-ssl']
+ certificate_authority:
+ type: str
+ description: The CA bundle string with custom certificates. defaults to None.
+
+ metrics:
+ description: Metrics endpoint connection information.
+ suboptions:
+ hostname:
+ type: str
+ description: The provider's api hostname.
+ required: true
+ port:
+ type: int
+ description: The provider's api port.
+ userid:
+ type: str
+ description: Provider's api endpoint authentication userid. defaults to None.
+ password:
+ type: str
+ description: Provider's api endpoint authentication password. defaults to None.
+ auth_key:
+ type: str
+ description: Provider's api endpoint authentication bearer token. defaults to None.
+ validate_certs:
+ description: Whether SSL certificates should be verified for HTTPS requests (deprecated). defaults to True.
+ type: bool
+ default: 'yes'
+ security_protocol:
+ type: str
+ choices: ['ssl-with-validation','ssl-with-validation-custom-ca','ssl-without-validation','non-ssl']
+ description: How SSL certificates should be used for HTTPS requests. defaults to None.
+ certificate_authority:
+ type: str
+ description: The CA bundle string with custom certificates. defaults to None.
+ path:
+ type: str
+ description: Database name for oVirt metrics. Defaults to C(ovirt_engine_history).
+
+ alerts:
+ description: Alerts endpoint connection information.
+ suboptions:
+ hostname:
+ type: str
+ description: The provider's api hostname.
+ required: true
+ port:
+ type: int
+ description: The provider's api port.
+ userid:
+ type: str
+ description: Provider's api endpoint authentication userid. defaults to None.
+ password:
+ type: str
+ description: Provider's api endpoint authentication password. defaults to None.
+ auth_key:
+ type: str
+ description: Provider's api endpoint authentication bearer token. defaults to None.
+ validate_certs:
+ type: bool
+ description: Whether SSL certificates should be verified for HTTPS requests (deprecated). defaults to True.
+ default: true
+ security_protocol:
+ type: str
+ choices: ['ssl-with-validation','ssl-with-validation-custom-ca','ssl-without-validation', 'non-ssl']
+ description: How SSL certificates should be used for HTTPS requests. defaults to None.
+ certificate_authority:
+ type: str
+ description: The CA bundle string with custom certificates. defaults to None.
+
+ ssh_keypair:
+ description: SSH key pair used for SSH connections to all hosts in this provider.
+ suboptions:
+ hostname:
+ type: str
+ description: Director hostname.
+ required: true
+ userid:
+ type: str
+ description: SSH username.
+ auth_key:
+ type: str
+ description: SSH private key.
+ validate_certs:
+ description:
+ - Whether certificates should be verified for connections.
+ type: bool
+ default: yes
+ aliases: [ verify_ssl ]
+'''
+
+EXAMPLES = '''
+- name: Create a new provider in ManageIQ ('Hawkular' metrics)
+ community.general.manageiq_provider:
+ name: 'EngLab'
+ type: 'OpenShift'
+ state: 'present'
+ provider:
+ auth_key: 'topSecret'
+ hostname: 'example.com'
+ port: 8443
+ validate_certs: true
+ security_protocol: 'ssl-with-validation-custom-ca'
+ certificate_authority: |
+ -----BEGIN CERTIFICATE-----
+ FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
+ c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
+ MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
+ ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
+ ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
+ AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
+ Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
+ z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
+ ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
+ AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
+ SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
+ QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
+ aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
+ gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
+ qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
+ XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
+ -----END CERTIFICATE-----
+ metrics:
+ auth_key: 'topSecret'
+ role: 'hawkular'
+ hostname: 'example.com'
+ port: 443
+ validate_certs: true
+ security_protocol: 'ssl-with-validation-custom-ca'
+ certificate_authority: |
+ -----BEGIN CERTIFICATE-----
+ FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
+ c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
+ MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
+ ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
+ ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
+ AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
+ Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
+ z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
+ ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
+ AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
+ SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
+ QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
+ aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
+ gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
+ qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
+ XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
+ -----END CERTIFICATE-----
+ manageiq_connection:
+ url: 'https://127.0.0.1:80'
+ username: 'admin'
+ password: 'password'
+ validate_certs: true
+
+
+- name: Update an existing provider named 'EngLab' (defaults to 'Prometheus' metrics)
+ community.general.manageiq_provider:
+ name: 'EngLab'
+ type: 'Openshift'
+ state: 'present'
+ provider:
+ auth_key: 'topSecret'
+ hostname: 'next.example.com'
+ port: 8443
+ validate_certs: true
+ security_protocol: 'ssl-with-validation-custom-ca'
+ certificate_authority: |
+ -----BEGIN CERTIFICATE-----
+ FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
+ c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
+ MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
+ ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
+ ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
+ AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
+ Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
+ z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
+ ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
+ AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
+ SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
+ QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
+ aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
+ gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
+ qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
+ XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
+ -----END CERTIFICATE-----
+ metrics:
+ auth_key: 'topSecret'
+ hostname: 'next.example.com'
+ port: 443
+ validate_certs: true
+ security_protocol: 'ssl-with-validation-custom-ca'
+ certificate_authority: |
+ -----BEGIN CERTIFICATE-----
+ FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
+ c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
+ MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
+ ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
+ ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
+ AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
+ Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
+ z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
+ ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
+ AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
+ SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
+ QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
+ aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
+ gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
+ qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
+ XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
+ -----END CERTIFICATE-----
+ manageiq_connection:
+ url: 'https://127.0.0.1'
+ username: 'admin'
+ password: 'password'
+ validate_certs: true
+
+
+- name: Delete a provider in ManageIQ
+ community.general.manageiq_provider:
+ name: 'EngLab'
+ type: 'Openshift'
+ state: 'absent'
+ manageiq_connection:
+ url: 'https://127.0.0.1'
+ username: 'admin'
+ password: 'password'
+ validate_certs: true
+
+
+- name: Create a new Amazon provider in ManageIQ using token authentication
+ community.general.manageiq_provider:
+ name: 'EngAmazon'
+ type: 'Amazon'
+ state: 'present'
+ provider:
+ hostname: 'amazon.example.com'
+ userid: 'hello'
+ password: 'world'
+ manageiq_connection:
+ url: 'https://127.0.0.1'
+ token: 'VeryLongToken'
+ validate_certs: true
+
+
+- name: Create a new oVirt provider in ManageIQ
+ community.general.manageiq_provider:
+ name: 'RHEV'
+ type: 'oVirt'
+ state: 'present'
+ provider:
+ hostname: 'rhev01.example.com'
+ userid: 'admin@internal'
+ password: 'password'
+ validate_certs: true
+ certificate_authority: |
+ -----BEGIN CERTIFICATE-----
+ FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
+ c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
+ MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
+ ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
+ ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
+ AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
+ Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
+ z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
+ ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
+ AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
+ SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
+ QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
+ aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
+ gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
+ qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
+ XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
+ -----END CERTIFICATE-----
+ metrics:
+ hostname: 'metrics.example.com'
+ path: 'ovirt_engine_history'
+ userid: 'user_id_metrics'
+ password: 'password_metrics'
+ validate_certs: true
+ certificate_authority: |
+ -----BEGIN CERTIFICATE-----
+ FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
+ c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
+ MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
+ ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
+ ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
+ AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
+ Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
+ z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
+ ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
+ AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
+ SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
+ QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
+ aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
+ gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
+ qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
+ XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
+ -----END CERTIFICATE-----
+ manageiq_connection:
+ url: 'https://127.0.0.1'
+ username: 'admin'
+ password: 'password'
+ validate_certs: true
+
+- name: Create a new VMware provider in ManageIQ
+ community.general.manageiq_provider:
+ name: 'EngVMware'
+ type: 'VMware'
+ state: 'present'
+ provider:
+ hostname: 'vcenter.example.com'
+ host_default_vnc_port_start: 5800
+ host_default_vnc_port_end: 5801
+ userid: 'root'
+ password: 'password'
+ manageiq_connection:
+ url: 'https://127.0.0.1'
+ token: 'VeryLongToken'
+ validate_certs: true
+
+- name: Create a new Azure provider in ManageIQ
+ community.general.manageiq_provider:
+ name: 'EngAzure'
+ type: 'Azure'
+ provider_region: 'northeurope'
+ subscription: 'e272bd74-f661-484f-b223-88dd128a4049'
+ azure_tenant_id: 'e272bd74-f661-484f-b223-88dd128a4048'
+ state: 'present'
+ provider:
+ hostname: 'azure.example.com'
+ userid: 'e272bd74-f661-484f-b223-88dd128a4049'
+ password: 'password'
+ manageiq_connection:
+ url: 'https://cf-6af0.rhpds.opentlc.com'
+ username: 'admin'
+ password: 'password'
+ validate_certs: false
+
+- name: Create a new OpenStack Director provider in ManageIQ with rsa keypair
+ community.general.manageiq_provider:
+ name: 'EngDirector'
+ type: 'Director'
+ api_version: 'v3'
+ state: 'present'
+ provider:
+ hostname: 'director.example.com'
+ userid: 'admin'
+ password: 'password'
+ security_protocol: 'ssl-with-validation'
+ validate_certs: 'true'
+ certificate_authority: |
+ -----BEGIN CERTIFICATE-----
+ FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
+ c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
+ MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
+ ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
+ ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
+ AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
+ Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
+ z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
+ ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
+ AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
+ SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
+ QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
+ aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
+ gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
+ qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
+ XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
+ -----END CERTIFICATE-----
+ ssh_keypair:
+ hostname: director.example.com
+ userid: heat-admin
+ auth_key: 'SecretSSHPrivateKey'
+
+- name: Create a new OpenStack provider in ManageIQ with amqp metrics
+ community.general.manageiq_provider:
+ name: 'EngOpenStack'
+ type: 'OpenStack'
+ api_version: 'v3'
+ state: 'present'
+ provider_region: 'europe'
+ tenant_mapping_enabled: 'False'
+ keystone_v3_domain_id: 'mydomain'
+ provider:
+ hostname: 'openstack.example.com'
+ userid: 'admin'
+ password: 'password'
+ security_protocol: 'ssl-with-validation'
+ validate_certs: 'true'
+ certificate_authority: |
+ -----BEGIN CERTIFICATE-----
+ FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
+ c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
+ MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
+ ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
+ ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
+ AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
+ Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
+ z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
+ ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
+ AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
+ SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
+ QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
+ aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
+ gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
+ qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
+ XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
+ -----END CERTIFICATE-----
+ metrics:
+ role: amqp
+ hostname: 'amqp.example.com'
+ security_protocol: 'non-ssl'
+ port: 5666
+ userid: admin
+ password: password
+
+
+- name: Create a new GCE provider in ManageIQ
+ community.general.manageiq_provider:
+ name: 'EngGoogle'
+ type: 'GCE'
+ provider_region: 'europe-west1'
+ project: 'project1'
+ state: 'present'
+ provider:
+ hostname: 'gce.example.com'
+ auth_key: 'google_json_key'
+ validate_certs: 'false'
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec
+
+
+def supported_providers():
+ return dict(
+ Openshift=dict(
+ class_name='ManageIQ::Providers::Openshift::ContainerManager',
+ authtype='bearer',
+ default_role='default',
+ metrics_role='prometheus',
+ alerts_role='prometheus_alerts',
+ ),
+ Amazon=dict(
+ class_name='ManageIQ::Providers::Amazon::CloudManager',
+ ),
+ oVirt=dict(
+ class_name='ManageIQ::Providers::Redhat::InfraManager',
+ default_role='default',
+ metrics_role='metrics',
+ ),
+ VMware=dict(
+ class_name='ManageIQ::Providers::Vmware::InfraManager',
+ ),
+ Azure=dict(
+ class_name='ManageIQ::Providers::Azure::CloudManager',
+ ),
+ Director=dict(
+ class_name='ManageIQ::Providers::Openstack::InfraManager',
+ ssh_keypair_role="ssh_keypair"
+ ),
+ OpenStack=dict(
+ class_name='ManageIQ::Providers::Openstack::CloudManager',
+ ),
+ GCE=dict(
+ class_name='ManageIQ::Providers::Google::CloudManager',
+ ),
+ )
+
+
+def endpoint_list_spec():
+ return dict(
+ provider=dict(type='dict', options=endpoint_argument_spec()),
+ metrics=dict(type='dict', options=endpoint_argument_spec()),
+ alerts=dict(type='dict', options=endpoint_argument_spec()),
+ ssh_keypair=dict(type='dict', options=endpoint_argument_spec()),
+ )
+
+
+def endpoint_argument_spec():
+ return dict(
+ role=dict(),
+ hostname=dict(required=True),
+ port=dict(type='int'),
+ validate_certs=dict(default=True, type='bool', aliases=['verify_ssl']),
+ certificate_authority=dict(),
+ security_protocol=dict(
+ choices=[
+ 'ssl-with-validation',
+ 'ssl-with-validation-custom-ca',
+ 'ssl-without-validation',
+ 'non-ssl',
+ ],
+ ),
+ userid=dict(),
+ password=dict(no_log=True),
+ auth_key=dict(no_log=True),
+ subscription=dict(no_log=True),
+ project=dict(),
+ uid_ems=dict(),
+ path=dict(),
+ )
+
+
+def delete_nulls(h):
+ """ Remove null entries from a hash
+
+ Returns:
+ a hash without nulls
+ """
+ if isinstance(h, list):
+ return [delete_nulls(i) for i in h]
+ if isinstance(h, dict):
+ return dict((k, delete_nulls(v)) for k, v in h.items() if v is not None)
+
+ return h
+
+
+class ManageIQProvider(object):
+ """
+ Object to execute provider management operations in manageiq.
+ """
+
+ def __init__(self, manageiq):
+ self.manageiq = manageiq
+
+ self.module = self.manageiq.module
+ self.api_url = self.manageiq.api_url
+ self.client = self.manageiq.client
+
+ def class_name_to_type(self, class_name):
+ """ Convert class_name to type
+
+ Returns:
+ the type
+ """
+ out = [k for k, v in supported_providers().items() if v['class_name'] == class_name]
+ if len(out) == 1:
+ return out[0]
+
+ return None
+
+ def zone_id(self, name):
+ """ Search for zone id by zone name.
+
+ Returns:
+ the zone id, or send a module Fail signal if zone not found.
+ """
+ zone = self.manageiq.find_collection_resource_by('zones', name=name)
+ if not zone: # zone doesn't exist
+ self.module.fail_json(
+ msg="zone %s does not exist in manageiq" % (name))
+
+ return zone['id']
+
+ def provider(self, name):
+ """ Search for provider object by name.
+
+ Returns:
+ the provider, or None if provider not found.
+ """
+ return self.manageiq.find_collection_resource_by('providers', name=name)
+
+ def build_connection_configurations(self, provider_type, endpoints):
+ """ Build "connection_configurations" objects from
+ requested endpoints provided by user
+
+ Returns:
+ the user requested provider endpoints list
+ """
+ connection_configurations = []
+ endpoint_keys = endpoint_list_spec().keys()
+ provider_defaults = supported_providers().get(provider_type, {})
+
+ # get endpoint defaults
+ endpoint = endpoints.get('provider')
+ default_auth_key = endpoint.get('auth_key')
+
+ # build a connection_configuration object for each endpoint
+ for endpoint_key in endpoint_keys:
+ endpoint = endpoints.get(endpoint_key)
+ if endpoint:
+ # get role and authtype
+ role = endpoint.get('role') or provider_defaults.get(endpoint_key + '_role', 'default')
+ if role == 'default':
+ authtype = provider_defaults.get('authtype') or role
+ else:
+ authtype = role
+
+ # set a connection_configuration
+ connection_configurations.append({
+ 'endpoint': {
+ 'role': role,
+ 'hostname': endpoint.get('hostname'),
+ 'port': endpoint.get('port'),
+ 'verify_ssl': [0, 1][endpoint.get('validate_certs', True)],
+ 'security_protocol': endpoint.get('security_protocol'),
+ 'certificate_authority': endpoint.get('certificate_authority'),
+ 'path': endpoint.get('path'),
+ },
+ 'authentication': {
+ 'authtype': authtype,
+ 'userid': endpoint.get('userid'),
+ 'password': endpoint.get('password'),
+ 'auth_key': endpoint.get('auth_key') or default_auth_key,
+ }
+ })
+
+ return connection_configurations
+
+ def delete_provider(self, provider):
+ """ Deletes a provider from manageiq.
+
+ Returns:
+ a short message describing the operation executed.
+ """
+ try:
+ url = '%s/providers/%s' % (self.api_url, provider['id'])
+ result = self.client.post(url, action='delete')
+ except Exception as e:
+ self.module.fail_json(msg="failed to delete provider %s: %s" % (provider['name'], str(e)))
+
+ return dict(changed=True, msg=result['message'])
+
+ def edit_provider(self, provider, name, provider_type, endpoints, zone_id, provider_region,
+ host_default_vnc_port_start, host_default_vnc_port_end,
+ subscription, project, uid_ems, tenant_mapping_enabled, api_version):
+ """ Edit a provider from manageiq.
+
+ Returns:
+ a short message describing the operation executed.
+ """
+ url = '%s/providers/%s' % (self.api_url, provider['id'])
+
+ resource = dict(
+ name=name,
+ zone={'id': zone_id},
+ provider_region=provider_region,
+ connection_configurations=endpoints,
+ host_default_vnc_port_start=host_default_vnc_port_start,
+ host_default_vnc_port_end=host_default_vnc_port_end,
+ subscription=subscription,
+ project=project,
+ uid_ems=uid_ems,
+ tenant_mapping_enabled=tenant_mapping_enabled,
+ api_version=api_version,
+ )
+
+ # NOTE: we do not check for diff's between requested and current
+ # provider, we always submit endpoints with password or auth_keys,
+ # since we can not compare with current password or auth_key,
+ # every edit request is sent to ManageIQ API without comparing
+ # it to current state.
+
+ # clean nulls, we do not send nulls to the api
+ resource = delete_nulls(resource)
+
+ # try to update provider
+ try:
+ result = self.client.post(url, action='edit', resource=resource)
+ except Exception as e:
+ self.module.fail_json(msg="failed to update provider %s: %s" % (provider['name'], str(e)))
+
+ return dict(
+ changed=True,
+ msg="successfully updated the provider %s: %s" % (provider['name'], result))
+
+ def create_provider(self, name, provider_type, endpoints, zone_id, provider_region,
+ host_default_vnc_port_start, host_default_vnc_port_end,
+ subscription, project, uid_ems, tenant_mapping_enabled, api_version):
+ """ Creates the provider in manageiq.
+
+ Returns:
+ a short message describing the operation executed.
+ """
+ resource = dict(
+ name=name,
+ zone={'id': zone_id},
+ provider_region=provider_region,
+ host_default_vnc_port_start=host_default_vnc_port_start,
+ host_default_vnc_port_end=host_default_vnc_port_end,
+ subscription=subscription,
+ project=project,
+ uid_ems=uid_ems,
+ tenant_mapping_enabled=tenant_mapping_enabled,
+ api_version=api_version,
+ connection_configurations=endpoints,
+ )
+
+ # clean nulls, we do not send nulls to the api
+ resource = delete_nulls(resource)
+
+ # try to create a new provider
+ try:
+ url = '%s/providers' % (self.api_url)
+ result = self.client.post(url, type=supported_providers()[provider_type]['class_name'], **resource)
+ except Exception as e:
+ self.module.fail_json(msg="failed to create provider %s: %s" % (name, str(e)))
+
+ return dict(
+ changed=True,
+ msg="successfully created the provider %s: %s" % (name, result['results']))
+
+ def refresh(self, provider, name):
+ """ Trigger provider refresh.
+
+ Returns:
+ a short message describing the operation executed.
+ """
+ try:
+ url = '%s/providers/%s' % (self.api_url, provider['id'])
+ result = self.client.post(url, action='refresh')
+ except Exception as e:
+ self.module.fail_json(msg="failed to refresh provider %s: %s" % (name, str(e)))
+
+ return dict(
+ changed=True,
+ msg="refreshing provider %s" % name)
+
+
+def main():
+ zone_id = None
+ endpoints = []
+ argument_spec = dict(
+ state=dict(choices=['absent', 'present', 'refresh'], default='present'),
+ name=dict(required=True),
+ zone=dict(default='default'),
+ provider_region=dict(),
+ host_default_vnc_port_start=dict(),
+ host_default_vnc_port_end=dict(),
+ subscription=dict(),
+ project=dict(),
+ azure_tenant_id=dict(aliases=['keystone_v3_domain_id']),
+ tenant_mapping_enabled=dict(default=False, type='bool'),
+ api_version=dict(choices=['v2', 'v3']),
+ type=dict(choices=supported_providers().keys()),
+ )
+ # add the manageiq connection arguments to the arguments
+ argument_spec.update(manageiq_argument_spec())
+ # add the endpoint arguments to the arguments
+ argument_spec.update(endpoint_list_spec())
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_if=[
+ ('state', 'present', ['provider']),
+ ('state', 'refresh', ['name'])],
+ required_together=[
+ ['host_default_vnc_port_start', 'host_default_vnc_port_end']
+ ],
+ )
+
+ name = module.params['name']
+ zone_name = module.params['zone']
+ provider_type = module.params['type']
+ raw_endpoints = module.params
+ provider_region = module.params['provider_region']
+ host_default_vnc_port_start = module.params['host_default_vnc_port_start']
+ host_default_vnc_port_end = module.params['host_default_vnc_port_end']
+ subscription = module.params['subscription']
+ uid_ems = module.params['azure_tenant_id']
+ project = module.params['project']
+ tenant_mapping_enabled = module.params['tenant_mapping_enabled']
+ api_version = module.params['api_version']
+ state = module.params['state']
+
+ manageiq = ManageIQ(module)
+ manageiq_provider = ManageIQProvider(manageiq)
+
+ provider = manageiq_provider.provider(name)
+
+ # provider should not exist
+ if state == "absent":
+ # if we have a provider, delete it
+ if provider:
+ res_args = manageiq_provider.delete_provider(provider)
+ # if we do not have a provider, nothing to do
+ else:
+ res_args = dict(
+ changed=False,
+ msg="provider %s: does not exist in manageiq" % (name))
+
+ # provider should exist
+ if state == "present":
+ # get data user did not explicitly give
+ if zone_name:
+ zone_id = manageiq_provider.zone_id(zone_name)
+
+ # if we do not have a provider_type, use the current provider_type
+ if provider and not provider_type:
+ provider_type = manageiq_provider.class_name_to_type(provider['type'])
+
+ # check supported_providers types
+ if not provider_type:
+ manageiq_provider.module.fail_json(
+ msg="missing required argument: provider_type")
+
+ # check supported_providers types
+ if provider_type not in supported_providers().keys():
+ manageiq_provider.module.fail_json(
+ msg="provider_type %s is not supported" % (provider_type))
+
+ # build "connection_configurations" objects from user requested endpoints
+ # "provider" is a required endpoint, if we have it, we have endpoints
+ if raw_endpoints.get("provider"):
+ endpoints = manageiq_provider.build_connection_configurations(provider_type, raw_endpoints)
+
+ # if we have a provider, edit it
+ if provider:
+ res_args = manageiq_provider.edit_provider(provider, name, provider_type, endpoints, zone_id, provider_region,
+ host_default_vnc_port_start, host_default_vnc_port_end,
+ subscription, project, uid_ems, tenant_mapping_enabled, api_version)
+ # if we do not have a provider, create it
+ else:
+ res_args = manageiq_provider.create_provider(name, provider_type, endpoints, zone_id, provider_region,
+ host_default_vnc_port_start, host_default_vnc_port_end,
+ subscription, project, uid_ems, tenant_mapping_enabled, api_version)
+
+ # refresh provider (trigger sync)
+ if state == "refresh":
+ if provider:
+ res_args = manageiq_provider.refresh(provider, name)
+ else:
+ res_args = dict(
+ changed=False,
+ msg="provider %s: does not exist in manageiq" % (name))
+
+ module.exit_json(**res_args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_tags.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_tags.py
new file mode 100644
index 00000000..68de2324
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_tags.py
@@ -0,0 +1,289 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# (c) 2017, Daniel Korn <korndaniel1@gmail.com>
+# (c) 2017, Yaacov Zamir <yzamir@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: manageiq_tags
+
+short_description: Management of resource tags in ManageIQ.
+extends_documentation_fragment:
+- community.general.manageiq
+
+author: Daniel Korn (@dkorn)
+description:
+ - The manageiq_tags module supports adding, updating and deleting tags in ManageIQ.
+
+options:
+ state:
+ type: str
+ description:
+ - absent - tags should not exist,
+ - present - tags should exist,
+ - list - list current tags.
+ choices: ['absent', 'present', 'list']
+ default: 'present'
+ tags:
+ type: list
+ description:
+ - tags - list of dictionaries, each includes 'name' and 'category' keys.
+ - required if state is present or absent.
+ resource_type:
+ type: str
+ description:
+ - the relevant resource type in manageiq
+ required: true
+ choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster',
+ 'data store', 'group', 'resource pool', 'service', 'service template',
+ 'template', 'tenant', 'user']
+ resource_name:
+ type: str
+ description:
+ - the relevant resource name in manageiq
+ required: true
+'''
+
+EXAMPLES = '''
+- name: Create new tags for a provider in ManageIQ
+ community.general.manageiq_tags:
+ resource_name: 'EngLab'
+ resource_type: 'provider'
+ tags:
+ - category: environment
+ name: prod
+ - category: owner
+ name: prod_ops
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Remove tags for a provider in ManageIQ
+ community.general.manageiq_tags:
+ state: absent
+ resource_name: 'EngLab'
+ resource_type: 'provider'
+ tags:
+ - category: environment
+ name: prod
+ - category: owner
+ name: prod_ops
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: List current tags for a provider in ManageIQ
+ community.general.manageiq_tags:
+ state: list
+ resource_name: 'EngLab'
+ resource_type: 'provider'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec, manageiq_entities
+
+
+def query_resource_id(manageiq, resource_type, resource_name):
+ """ Query the resource name in ManageIQ.
+
+ Returns:
+ the resource id if it exists in manageiq, Fail otherwise.
+ """
+ resource = manageiq.find_collection_resource_by(resource_type, name=resource_name)
+ if resource:
+ return resource["id"]
+ else:
+ msg = "{resource_name} {resource_type} does not exist in manageiq".format(
+ resource_name=resource_name, resource_type=resource_type)
+ manageiq.module.fail_json(msg=msg)
+
+
+class ManageIQTags(object):
+ """
+ Object to execute tags management operations of manageiq resources.
+ """
+
+ def __init__(self, manageiq, resource_type, resource_id):
+ self.manageiq = manageiq
+
+ self.module = self.manageiq.module
+ self.api_url = self.manageiq.api_url
+ self.client = self.manageiq.client
+
+ self.resource_type = resource_type
+ self.resource_id = resource_id
+ self.resource_url = '{api_url}/{resource_type}/{resource_id}'.format(
+ api_url=self.api_url,
+ resource_type=resource_type,
+ resource_id=resource_id)
+
+ def full_tag_name(self, tag):
+ """ Returns the full tag name in manageiq
+ """
+ return '/managed/{tag_category}/{tag_name}'.format(
+ tag_category=tag['category'],
+ tag_name=tag['name'])
+
+ def clean_tag_object(self, tag):
+ """ Clean a tag object to have human readable form of:
+ {
+ full_name: STR,
+ name: STR,
+ display_name: STR,
+ category: STR
+ }
+ """
+ full_name = tag.get('name')
+ categorization = tag.get('categorization', {})
+
+ return dict(
+ full_name=full_name,
+ name=categorization.get('name'),
+ display_name=categorization.get('display_name'),
+ category=categorization.get('category', {}).get('name'))
+
+ def query_resource_tags(self):
+ """ Returns a set of the tag objects assigned to the resource
+ """
+ url = '{resource_url}/tags?expand=resources&attributes=categorization'
+ try:
+ response = self.client.get(url.format(resource_url=self.resource_url))
+ except Exception as e:
+ msg = "Failed to query {resource_type} tags: {error}".format(
+ resource_type=self.resource_type,
+ error=e)
+ self.module.fail_json(msg=msg)
+
+ resources = response.get('resources', [])
+
+ # clean the returned rest api tag object to look like:
+ # {full_name: STR, name: STR, display_name: STR, category: STR}
+ tags = [self.clean_tag_object(tag) for tag in resources]
+
+ return tags
+
+ def tags_to_update(self, tags, action):
+ """ Create a list of tags we need to update in ManageIQ.
+
+ Returns:
+ Whether or not a change took place and a message describing the
+ operation executed.
+ """
+ tags_to_post = []
+ assigned_tags = self.query_resource_tags()
+
+ # make a list of assigned full tag names strings
+ # e.g. ['/managed/environment/prod', ...]
+ assigned_tags_set = set([tag['full_name'] for tag in assigned_tags])
+
+ for tag in tags:
+ assigned = self.full_tag_name(tag) in assigned_tags_set
+
+ if assigned and action == 'unassign':
+ tags_to_post.append(tag)
+ elif (not assigned) and action == 'assign':
+ tags_to_post.append(tag)
+
+ return tags_to_post
+
+ def assign_or_unassign_tags(self, tags, action):
+ """ Perform assign/unassign action
+ """
+ # get a list of tags needed to be changed
+ tags_to_post = self.tags_to_update(tags, action)
+ if not tags_to_post:
+ return dict(
+ changed=False,
+ msg="Tags already {action}ed, nothing to do".format(action=action))
+
+ # try to assign or unassign tags to resource
+ url = '{resource_url}/tags'.format(resource_url=self.resource_url)
+ try:
+ response = self.client.post(url, action=action, resources=tags)
+ except Exception as e:
+ msg = "Failed to {action} tag: {error}".format(
+ action=action,
+ error=e)
+ self.module.fail_json(msg=msg)
+
+ # check all entities in result to be successful
+ for result in response['results']:
+ if not result['success']:
+ msg = "Failed to {action}: {message}".format(
+ action=action,
+ message=result['message'])
+ self.module.fail_json(msg=msg)
+
+ # successfully changed all needed tags
+ return dict(
+ changed=True,
+ msg="Successfully {action}ed tags".format(action=action))
+
+
+def main():
+ actions = {'present': 'assign', 'absent': 'unassign', 'list': 'list'}
+ argument_spec = dict(
+ tags=dict(type='list'),
+ resource_name=dict(required=True, type='str'),
+ resource_type=dict(required=True, type='str',
+ choices=list(manageiq_entities().keys())),
+ state=dict(required=False, type='str',
+ choices=['present', 'absent', 'list'], default='present'),
+ )
+ # add the manageiq connection arguments to the arguments
+ argument_spec.update(manageiq_argument_spec())
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_if=[
+ ('state', 'present', ['tags']),
+ ('state', 'absent', ['tags'])
+ ],
+ )
+
+ tags = module.params['tags']
+ resource_type_key = module.params['resource_type']
+ resource_name = module.params['resource_name']
+ state = module.params['state']
+
+ # get the action and resource type
+ action = actions[state]
+ resource_type = manageiq_entities()[resource_type_key]
+
+ manageiq = ManageIQ(module)
+
+ # query resource id, fail if resource does not exist
+ resource_id = query_resource_id(manageiq, resource_type, resource_name)
+
+ manageiq_tags = ManageIQTags(manageiq, resource_type, resource_id)
+
+ if action == 'list':
+ # return a list of current tags for this object
+ current_tags = manageiq_tags.query_resource_tags()
+ res_args = dict(changed=False, tags=current_tags)
+ else:
+ # assign or unassign the tags
+ res_args = manageiq_tags.assign_or_unassign_tags(tags, action)
+
+ module.exit_json(**res_args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_tenant.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_tenant.py
new file mode 100644
index 00000000..3ec174cf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_tenant.py
@@ -0,0 +1,557 @@
+#!/usr/bin/python
+#
+# (c) 2018, Evert Mulder (base on manageiq_user.py by Daniel Korn <korndaniel1@gmail.com>)
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: manageiq_tenant
+
+short_description: Management of tenants in ManageIQ.
+extends_documentation_fragment:
+- community.general.manageiq
+
+author: Evert Mulder (@evertmulder)
+description:
+ - The manageiq_tenant module supports adding, updating and deleting tenants in ManageIQ.
+requirements:
+- manageiq-client
+options:
+ state:
+ type: str
+ description:
+ - absent - tenant should not exist, present - tenant should be.
+ choices: ['absent', 'present']
+ default: 'present'
+ name:
+ type: str
+ description:
+ - The tenant name.
+ required: true
+ default: null
+ description:
+ type: str
+ description:
+ - The tenant description.
+ required: true
+ default: null
+ parent_id:
+ type: int
+ description:
+ - The id of the parent tenant. If not supplied the root tenant is used.
+ - The C(parent_id) takes president over C(parent) when supplied
+ required: false
+ default: null
+ parent:
+ type: str
+ description:
+ - The name of the parent tenant. If not supplied and no C(parent_id) is supplied the root tenant is used.
+ required: false
+ default: null
+ quotas:
+ type: dict
+ description:
+ - The tenant quotas.
+ - All parameters case sensitive.
+ - 'Valid attributes are:'
+ - ' - C(cpu_allocated) (int): use null to remove the quota.'
+ - ' - C(mem_allocated) (GB): use null to remove the quota.'
+ - ' - C(storage_allocated) (GB): use null to remove the quota.'
+ - ' - C(vms_allocated) (int): use null to remove the quota.'
+ - ' - C(templates_allocated) (int): use null to remove the quota.'
+ required: false
+ default: null
+'''
+
+EXAMPLES = '''
+- name: Update the root tenant in ManageIQ
+ community.general.manageiq_tenant:
+ name: 'My Company'
+ description: 'My company name'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Create a tenant in ManageIQ
+ community.general.manageiq_tenant:
+ name: 'Dep1'
+ description: 'Manufacturing department'
+ parent_id: 1
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Delete a tenant in ManageIQ
+ community.general.manageiq_tenant:
+ state: 'absent'
+ name: 'Dep1'
+ parent_id: 1
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Set tenant quota for cpu_allocated, mem_allocated, remove quota for vms_allocated
+ community.general.manageiq_tenant:
+ name: 'Dep1'
+ parent_id: 1
+ quotas:
+ - cpu_allocated: 100
+ - mem_allocated: 50
+ - vms_allocated: null
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+
+- name: Delete a tenant in ManageIQ using a token
+ community.general.manageiq_tenant:
+ state: 'absent'
+ name: 'Dep1'
+ parent_id: 1
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ token: 'sometoken'
+ validate_certs: False
+'''
+
+RETURN = '''
+tenant:
+ description: The tenant.
+ returned: success
+ type: complex
+ contains:
+ id:
+ description: The tenant id
+ returned: success
+ type: int
+ name:
+ description: The tenant name
+ returned: success
+ type: str
+ description:
+ description: The tenant description
+ returned: success
+ type: str
+ parent_id:
+ description: The id of the parent tenant
+ returned: success
+ type: int
+ quotas:
+ description: List of tenant quotas
+ returned: success
+ type: list
+ sample:
+ cpu_allocated: 100
+ mem_allocated: 50
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec
+
+
+class ManageIQTenant(object):
+ """
+ Object to execute tenant management operations in manageiq.
+ """
+
+ def __init__(self, manageiq):
+ self.manageiq = manageiq
+
+ self.module = self.manageiq.module
+ self.api_url = self.manageiq.api_url
+ self.client = self.manageiq.client
+
+ def tenant(self, name, parent_id, parent):
+ """ Search for tenant object by name and parent_id or parent
+ or the root tenant if no parent or parent_id is supplied.
+ Returns:
+ the parent tenant, None for the root tenant
+ the tenant or None if tenant was not found.
+ """
+
+ if parent_id:
+ parent_tenant_res = self.client.collections.tenants.find_by(id=parent_id)
+ if not parent_tenant_res:
+ self.module.fail_json(msg="Parent tenant with id '%s' not found in manageiq" % str(parent_id))
+ parent_tenant = parent_tenant_res[0]
+ tenants = self.client.collections.tenants.find_by(name=name)
+
+ for tenant in tenants:
+ try:
+ ancestry = tenant['ancestry']
+ except AttributeError:
+ ancestry = None
+
+ if ancestry:
+ tenant_parent_id = int(ancestry.split("/")[-1])
+ if int(tenant_parent_id) == parent_id:
+ return parent_tenant, tenant
+
+ return parent_tenant, None
+ else:
+ if parent:
+ parent_tenant_res = self.client.collections.tenants.find_by(name=parent)
+ if not parent_tenant_res:
+ self.module.fail_json(msg="Parent tenant '%s' not found in manageiq" % parent)
+
+ if len(parent_tenant_res) > 1:
+ self.module.fail_json(msg="Multiple parent tenants not found in manageiq with name '%s" % parent)
+
+ parent_tenant = parent_tenant_res[0]
+ parent_id = int(parent_tenant['id'])
+ tenants = self.client.collections.tenants.find_by(name=name)
+
+ for tenant in tenants:
+ try:
+ ancestry = tenant['ancestry']
+ except AttributeError:
+ ancestry = None
+
+ if ancestry:
+ tenant_parent_id = int(ancestry.split("/")[-1])
+ if tenant_parent_id == parent_id:
+ return parent_tenant, tenant
+
+ return parent_tenant, None
+ else:
+ # No parent or parent id supplied we select the root tenant
+ return None, self.client.collections.tenants.find_by(ancestry=None)[0]
+
+ def compare_tenant(self, tenant, name, description):
+ """ Compare tenant fields with new field values.
+
+ Returns:
+ false if tenant fields have some difference from new fields, true o/w.
+ """
+ found_difference = (
+ (name and tenant['name'] != name) or
+ (description and tenant['description'] != description)
+ )
+
+ return not found_difference
+
+ def delete_tenant(self, tenant):
+ """ Deletes a tenant from manageiq.
+
+ Returns:
+ dict with `msg` and `changed`
+ """
+ try:
+ url = '%s/tenants/%s' % (self.api_url, tenant['id'])
+ result = self.client.post(url, action='delete')
+ except Exception as e:
+ self.module.fail_json(msg="failed to delete tenant %s: %s" % (tenant['name'], str(e)))
+
+ if result['success'] is False:
+ self.module.fail_json(msg=result['message'])
+
+ return dict(changed=True, msg=result['message'])
+
+ def edit_tenant(self, tenant, name, description):
+ """ Edit a manageiq tenant.
+
+ Returns:
+ dict with `msg` and `changed`
+ """
+ resource = dict(name=name, description=description, use_config_for_attributes=False)
+
+ # check if we need to update ( compare_tenant is true is no difference found )
+ if self.compare_tenant(tenant, name, description):
+ return dict(
+ changed=False,
+ msg="tenant %s is not changed." % tenant['name'],
+ tenant=tenant['_data'])
+
+ # try to update tenant
+ try:
+ result = self.client.post(tenant['href'], action='edit', resource=resource)
+ except Exception as e:
+ self.module.fail_json(msg="failed to update tenant %s: %s" % (tenant['name'], str(e)))
+
+ return dict(
+ changed=True,
+ msg="successfully updated the tenant with id %s" % (tenant['id']))
+
+ def create_tenant(self, name, description, parent_tenant):
+ """ Creates the tenant in manageiq.
+
+ Returns:
+ dict with `msg`, `changed` and `tenant_id`
+ """
+ parent_id = parent_tenant['id']
+ # check for required arguments
+ for key, value in dict(name=name, description=description, parent_id=parent_id).items():
+ if value in (None, ''):
+ self.module.fail_json(msg="missing required argument: %s" % key)
+
+ url = '%s/tenants' % self.api_url
+
+ resource = {'name': name, 'description': description, 'parent': {'id': parent_id}}
+
+ try:
+ result = self.client.post(url, action='create', resource=resource)
+ tenant_id = result['results'][0]['id']
+ except Exception as e:
+ self.module.fail_json(msg="failed to create tenant %s: %s" % (name, str(e)))
+
+ return dict(
+ changed=True,
+ msg="successfully created tenant '%s' with id '%s'" % (name, tenant_id),
+ tenant_id=tenant_id)
+
+ def tenant_quota(self, tenant, quota_key):
+ """ Search for tenant quota object by tenant and quota_key.
+ Returns:
+ the quota for the tenant, or None if the tenant quota was not found.
+ """
+
+ tenant_quotas = self.client.get("%s/quotas?expand=resources&filter[]=name=%s" % (tenant['href'], quota_key))
+
+ return tenant_quotas['resources']
+
+ def tenant_quotas(self, tenant):
+ """ Search for tenant quotas object by tenant.
+ Returns:
+ the quotas for the tenant, or None if no tenant quotas were not found.
+ """
+
+ tenant_quotas = self.client.get("%s/quotas?expand=resources" % (tenant['href']))
+
+ return tenant_quotas['resources']
+
+ def update_tenant_quotas(self, tenant, quotas):
+ """ Creates the tenant quotas in manageiq.
+
+ Returns:
+ dict with `msg` and `changed`
+ """
+
+ changed = False
+ messages = []
+ for quota_key, quota_value in quotas.items():
+ current_quota_filtered = self.tenant_quota(tenant, quota_key)
+ if current_quota_filtered:
+ current_quota = current_quota_filtered[0]
+ else:
+ current_quota = None
+
+ if quota_value:
+ # Change the byte values to GB
+ if quota_key in ['storage_allocated', 'mem_allocated']:
+ quota_value_int = int(quota_value) * 1024 * 1024 * 1024
+ else:
+ quota_value_int = int(quota_value)
+ if current_quota:
+ res = self.edit_tenant_quota(tenant, current_quota, quota_key, quota_value_int)
+ else:
+ res = self.create_tenant_quota(tenant, quota_key, quota_value_int)
+ else:
+ if current_quota:
+ res = self.delete_tenant_quota(tenant, current_quota)
+ else:
+ res = dict(changed=False, msg="tenant quota '%s' does not exist" % quota_key)
+
+ if res['changed']:
+ changed = True
+
+ messages.append(res['msg'])
+
+ return dict(
+ changed=changed,
+ msg=', '.join(messages))
+
+ def edit_tenant_quota(self, tenant, current_quota, quota_key, quota_value):
+ """ Update the tenant quotas in manageiq.
+
+ Returns:
+ result
+ """
+
+ if current_quota['value'] == quota_value:
+ return dict(
+ changed=False,
+ msg="tenant quota %s already has value %s" % (quota_key, quota_value))
+ else:
+
+ url = '%s/quotas/%s' % (tenant['href'], current_quota['id'])
+ resource = {'value': quota_value}
+ try:
+ self.client.post(url, action='edit', resource=resource)
+ except Exception as e:
+ self.module.fail_json(msg="failed to update tenant quota %s: %s" % (quota_key, str(e)))
+
+ return dict(
+ changed=True,
+ msg="successfully updated tenant quota %s" % quota_key)
+
+ def create_tenant_quota(self, tenant, quota_key, quota_value):
+ """ Creates the tenant quotas in manageiq.
+
+ Returns:
+ result
+ """
+ url = '%s/quotas' % (tenant['href'])
+ resource = {'name': quota_key, 'value': quota_value}
+ try:
+ self.client.post(url, action='create', resource=resource)
+ except Exception as e:
+ self.module.fail_json(msg="failed to create tenant quota %s: %s" % (quota_key, str(e)))
+
+ return dict(
+ changed=True,
+ msg="successfully created tenant quota %s" % quota_key)
+
+ def delete_tenant_quota(self, tenant, quota):
+ """ deletes the tenant quotas in manageiq.
+
+ Returns:
+ result
+ """
+ try:
+ result = self.client.post(quota['href'], action='delete')
+ except Exception as e:
+ self.module.fail_json(msg="failed to delete tenant quota '%s': %s" % (quota['name'], str(e)))
+
+ return dict(changed=True, msg=result['message'])
+
+ def create_tenant_response(self, tenant, parent_tenant):
+ """ Creates the ansible result object from a manageiq tenant entity
+
+ Returns:
+ a dict with the tenant id, name, description, parent id,
+ quota's
+ """
+ tenant_quotas = self.create_tenant_quotas_response(tenant['tenant_quotas'])
+
+ try:
+ ancestry = tenant['ancestry']
+ tenant_parent_id = ancestry.split("/")[-1]
+ except AttributeError:
+ # The root tenant does not return the ancestry attribute
+ tenant_parent_id = None
+
+ return dict(
+ id=tenant['id'],
+ name=tenant['name'],
+ description=tenant['description'],
+ parent_id=tenant_parent_id,
+ quotas=tenant_quotas
+ )
+
+ @staticmethod
+ def create_tenant_quotas_response(tenant_quotas):
+ """ Creates the ansible result object from a manageiq tenant_quotas entity
+
+ Returns:
+ a dict with the applied quotas, name and value
+ """
+
+ if not tenant_quotas:
+ return {}
+
+ result = {}
+ for quota in tenant_quotas:
+ if quota['unit'] == 'bytes':
+ value = float(quota['value']) / (1024 * 1024 * 1024)
+ else:
+ value = quota['value']
+ result[quota['name']] = value
+ return result
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True, type='str'),
+ description=dict(required=True, type='str'),
+ parent_id=dict(required=False, type='int'),
+ parent=dict(required=False, type='str'),
+ state=dict(choices=['absent', 'present'], default='present'),
+ quotas=dict(type='dict', default={})
+ )
+ # add the manageiq connection arguments to the arguments
+ argument_spec.update(manageiq_argument_spec())
+
+ module = AnsibleModule(
+ argument_spec=argument_spec
+ )
+
+ name = module.params['name']
+ description = module.params['description']
+ parent_id = module.params['parent_id']
+ parent = module.params['parent']
+ state = module.params['state']
+ quotas = module.params['quotas']
+
+ manageiq = ManageIQ(module)
+ manageiq_tenant = ManageIQTenant(manageiq)
+
+ parent_tenant, tenant = manageiq_tenant.tenant(name, parent_id, parent)
+
+ # tenant should not exist
+ if state == "absent":
+ # if we have a tenant, delete it
+ if tenant:
+ res_args = manageiq_tenant.delete_tenant(tenant)
+ # if we do not have a tenant, nothing to do
+ else:
+ if parent_id:
+ msg = "tenant '%s' with parent_id %i does not exist in manageiq" % (name, parent_id)
+ else:
+ msg = "tenant '%s' with parent '%s' does not exist in manageiq" % (name, parent)
+
+ res_args = dict(
+ changed=False,
+ msg=msg)
+
+ # tenant should exist
+ if state == "present":
+ # if we have a tenant, edit it
+ if tenant:
+ res_args = manageiq_tenant.edit_tenant(tenant, name, description)
+
+ # if we do not have a tenant, create it
+ else:
+ res_args = manageiq_tenant.create_tenant(name, description, parent_tenant)
+ tenant = manageiq.client.get_entity('tenants', res_args['tenant_id'])
+
+ # quotas as supplied and we have a tenant
+ if quotas:
+ tenant_quotas_res = manageiq_tenant.update_tenant_quotas(tenant, quotas)
+ if tenant_quotas_res['changed']:
+ res_args['changed'] = True
+ res_args['tenant_quotas_msg'] = tenant_quotas_res['msg']
+
+ tenant.reload(expand='resources', attributes=['tenant_quotas'])
+ res_args['tenant'] = manageiq_tenant.create_tenant_response(tenant, parent_tenant)
+
+ module.exit_json(**res_args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_user.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_user.py
new file mode 100644
index 00000000..8905dde2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/manageiq/manageiq_user.py
@@ -0,0 +1,331 @@
+#!/usr/bin/python
+#
+# (c) 2017, Daniel Korn <korndaniel1@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: manageiq_user
+
+short_description: Management of users in ManageIQ.
+extends_documentation_fragment:
+- community.general.manageiq
+
+author: Daniel Korn (@dkorn)
+description:
+ - The manageiq_user module supports adding, updating and deleting users in ManageIQ.
+
+options:
+ state:
+ type: str
+ description:
+ - absent - user should not exist, present - user should be.
+ choices: ['absent', 'present']
+ default: 'present'
+ userid:
+ type: str
+ description:
+ - The unique userid in manageiq, often mentioned as username.
+ required: true
+ name:
+ type: str
+ description:
+ - The users' full name.
+ password:
+ type: str
+ description:
+ - The users' password.
+ group:
+ type: str
+ description:
+ - The name of the group to which the user belongs.
+ email:
+ type: str
+ description:
+ - The users' E-mail address.
+ update_password:
+ type: str
+ default: always
+ choices: ['always', 'on_create']
+ description:
+ - C(always) will update passwords unconditionally. C(on_create) will only set the password for a newly created user.
+'''
+
+EXAMPLES = '''
+- name: Create a new user in ManageIQ
+ community.general.manageiq_user:
+ userid: 'jdoe'
+ name: 'Jane Doe'
+ password: 'VerySecret'
+ group: 'EvmGroup-user'
+ email: 'jdoe@example.com'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Create a new user in ManageIQ using a token
+ community.general.manageiq_user:
+ userid: 'jdoe'
+ name: 'Jane Doe'
+ password: 'VerySecret'
+ group: 'EvmGroup-user'
+ email: 'jdoe@example.com'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ token: 'sometoken'
+ validate_certs: False
+
+- name: Delete a user in ManageIQ
+ community.general.manageiq_user:
+ state: 'absent'
+ userid: 'jdoe'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Delete a user in ManageIQ using a token
+ community.general.manageiq_user:
+ state: 'absent'
+ userid: 'jdoe'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ token: 'sometoken'
+ validate_certs: False
+
+- name: Update email of user in ManageIQ
+ community.general.manageiq_user:
+ userid: 'jdoe'
+ email: 'jaustine@example.com'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: False
+
+- name: Update email of user in ManageIQ using a token
+ community.general.manageiq_user:
+ userid: 'jdoe'
+ email: 'jaustine@example.com'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ token: 'sometoken'
+ validate_certs: False
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec
+
+
+class ManageIQUser(object):
+ """
+ Object to execute user management operations in manageiq.
+ """
+
+ def __init__(self, manageiq):
+ self.manageiq = manageiq
+
+ self.module = self.manageiq.module
+ self.api_url = self.manageiq.api_url
+ self.client = self.manageiq.client
+
+ def group_id(self, description):
+ """ Search for group id by group description.
+
+ Returns:
+ the group id, or send a module Fail signal if group not found.
+ """
+ group = self.manageiq.find_collection_resource_by('groups', description=description)
+ if not group: # group doesn't exist
+ self.module.fail_json(
+ msg="group %s does not exist in manageiq" % (description))
+
+ return group['id']
+
+ def user(self, userid):
+ """ Search for user object by userid.
+
+ Returns:
+ the user, or None if user not found.
+ """
+ return self.manageiq.find_collection_resource_by('users', userid=userid)
+
+ def compare_user(self, user, name, group_id, password, email):
+ """ Compare user fields with new field values.
+
+ Returns:
+ false if user fields have some difference from new fields, true o/w.
+ """
+ found_difference = (
+ (name and user['name'] != name) or
+ (password is not None) or
+ (email and user['email'] != email) or
+ (group_id and user['current_group_id'] != group_id)
+ )
+
+ return not found_difference
+
+ def delete_user(self, user):
+ """ Deletes a user from manageiq.
+
+ Returns:
+ a short message describing the operation executed.
+ """
+ try:
+ url = '%s/users/%s' % (self.api_url, user['id'])
+ result = self.client.post(url, action='delete')
+ except Exception as e:
+ self.module.fail_json(msg="failed to delete user %s: %s" % (user['userid'], str(e)))
+
+ return dict(changed=True, msg=result['message'])
+
+ def edit_user(self, user, name, group, password, email):
+ """ Edit a user from manageiq.
+
+ Returns:
+ a short message describing the operation executed.
+ """
+ group_id = None
+ url = '%s/users/%s' % (self.api_url, user['id'])
+
+ resource = dict(userid=user['userid'])
+ if group is not None:
+ group_id = self.group_id(group)
+ resource['group'] = dict(id=group_id)
+ if name is not None:
+ resource['name'] = name
+ if email is not None:
+ resource['email'] = email
+
+ # if there is a password param, but 'update_password' is 'on_create'
+ # then discard the password (since we're editing an existing user)
+ if self.module.params['update_password'] == 'on_create':
+ password = None
+ if password is not None:
+ resource['password'] = password
+
+ # check if we need to update ( compare_user is true is no difference found )
+ if self.compare_user(user, name, group_id, password, email):
+ return dict(
+ changed=False,
+ msg="user %s is not changed." % (user['userid']))
+
+ # try to update user
+ try:
+ result = self.client.post(url, action='edit', resource=resource)
+ except Exception as e:
+ self.module.fail_json(msg="failed to update user %s: %s" % (user['userid'], str(e)))
+
+ return dict(
+ changed=True,
+ msg="successfully updated the user %s: %s" % (user['userid'], result))
+
+ def create_user(self, userid, name, group, password, email):
+ """ Creates the user in manageiq.
+
+ Returns:
+ the created user id, name, created_on timestamp,
+ updated_on timestamp, userid and current_group_id.
+ """
+ # check for required arguments
+ for key, value in dict(name=name, group=group, password=password).items():
+ if value in (None, ''):
+ self.module.fail_json(msg="missing required argument: %s" % (key))
+
+ group_id = self.group_id(group)
+ url = '%s/users' % (self.api_url)
+
+ resource = {'userid': userid, 'name': name, 'password': password, 'group': {'id': group_id}}
+ if email is not None:
+ resource['email'] = email
+
+ # try to create a new user
+ try:
+ result = self.client.post(url, action='create', resource=resource)
+ except Exception as e:
+ self.module.fail_json(msg="failed to create user %s: %s" % (userid, str(e)))
+
+ return dict(
+ changed=True,
+ msg="successfully created the user %s: %s" % (userid, result['results']))
+
+
+def main():
+ argument_spec = dict(
+ userid=dict(required=True, type='str'),
+ name=dict(),
+ password=dict(no_log=True),
+ group=dict(),
+ email=dict(),
+ state=dict(choices=['absent', 'present'], default='present'),
+ update_password=dict(choices=['always', 'on_create'],
+ default='always'),
+ )
+ # add the manageiq connection arguments to the arguments
+ argument_spec.update(manageiq_argument_spec())
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ )
+
+ userid = module.params['userid']
+ name = module.params['name']
+ password = module.params['password']
+ group = module.params['group']
+ email = module.params['email']
+ state = module.params['state']
+
+ manageiq = ManageIQ(module)
+ manageiq_user = ManageIQUser(manageiq)
+
+ user = manageiq_user.user(userid)
+
+ # user should not exist
+ if state == "absent":
+ # if we have a user, delete it
+ if user:
+ res_args = manageiq_user.delete_user(user)
+ # if we do not have a user, nothing to do
+ else:
+ res_args = dict(
+ changed=False,
+ msg="user %s: does not exist in manageiq" % (userid))
+
+ # user should exist
+ if state == "present":
+ # if we have a user, edit it
+ if user:
+ res_args = manageiq_user.edit_user(user, name, group, password, email)
+ # if we do not have a user, create it
+ else:
+ res_args = manageiq_user.create_user(userid, name, group, password, email)
+
+ module.exit_json(**res_args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_datacenter_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_datacenter_facts.py
new file mode 100644
index 00000000..19aa7a27
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_datacenter_facts.py
@@ -0,0 +1,153 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_datacenter_info
+short_description: Retrieve information about the OneView Data Centers
+description:
+ - Retrieve information about the OneView Data Centers.
+ - This module was called C(oneview_datacenter_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_datacenter_info) module no longer returns C(ansible_facts)!
+requirements:
+ - "hpOneView >= 2.0.1"
+author:
+ - Alex Monteiro (@aalexmonteiro)
+ - Madhav Bharadwaj (@madhav-bharadwaj)
+ - Priyanka Sood (@soodpr)
+ - Ricardo Galeno (@ricardogpsf)
+options:
+ name:
+ description:
+ - Data Center name.
+ options:
+ description:
+ - "Retrieve additional information. Options available: 'visualContent'."
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Data Centers
+ community.general.oneview_datacenter_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.datacenters }}"
+
+- name: Gather paginated, filtered and sorted information about Data Centers
+ community.general.oneview_datacenter_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ params:
+ start: 0
+ count: 3
+ sort: 'name:descending'
+ filter: 'state=Unmanaged'
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.datacenters }}"
+
+- name: Gather information about a Data Center by name
+ community.general.oneview_datacenter_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ name: "My Data Center"
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.datacenters }}"
+
+- name: Gather information about the Data Center Visual Content
+ community.general.oneview_datacenter_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ name: "My Data Center"
+ options:
+ - visualContent
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.datacenters }}"
+- ansible.builtin.debug:
+ msg: "{{ result.datacenter_visual_content }}"
+'''
+
+RETURN = '''
+datacenters:
+ description: Has all the OneView information about the Data Centers.
+ returned: Always, but can be null.
+ type: dict
+
+datacenter_visual_content:
+ description: Has information about the Data Center Visual Content.
+ returned: When requested, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class DatacenterInfoModule(OneViewModuleBase):
+ argument_spec = dict(
+ name=dict(type='str'),
+ options=dict(type='list'),
+ params=dict(type='dict')
+ )
+
+ def __init__(self):
+ super(DatacenterInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_datacenter_facts', 'community.general.oneview_datacenter_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_datacenter_facts' module has been renamed to 'oneview_datacenter_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+
+ client = self.oneview_client.datacenters
+ info = {}
+
+ if self.module.params.get('name'):
+ datacenters = client.get_by('name', self.module.params['name'])
+
+ if self.options and 'visualContent' in self.options:
+ if datacenters:
+ info['datacenter_visual_content'] = client.get_visual_content(datacenters[0]['uri'])
+ else:
+ info['datacenter_visual_content'] = None
+
+ info['datacenters'] = datacenters
+ else:
+ info['datacenters'] = client.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False,
+ ansible_facts=info)
+ else:
+ return dict(changed=False, **info)
+
+
+def main():
+ DatacenterInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_datacenter_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_datacenter_info.py
new file mode 100644
index 00000000..19aa7a27
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_datacenter_info.py
@@ -0,0 +1,153 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_datacenter_info
+short_description: Retrieve information about the OneView Data Centers
+description:
+ - Retrieve information about the OneView Data Centers.
+ - This module was called C(oneview_datacenter_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_datacenter_info) module no longer returns C(ansible_facts)!
+requirements:
+ - "hpOneView >= 2.0.1"
+author:
+ - Alex Monteiro (@aalexmonteiro)
+ - Madhav Bharadwaj (@madhav-bharadwaj)
+ - Priyanka Sood (@soodpr)
+ - Ricardo Galeno (@ricardogpsf)
+options:
+ name:
+ description:
+ - Data Center name.
+ options:
+ description:
+ - "Retrieve additional information. Options available: 'visualContent'."
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Data Centers
+ community.general.oneview_datacenter_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.datacenters }}"
+
+- name: Gather paginated, filtered and sorted information about Data Centers
+ community.general.oneview_datacenter_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ params:
+ start: 0
+ count: 3
+ sort: 'name:descending'
+ filter: 'state=Unmanaged'
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.datacenters }}"
+
+- name: Gather information about a Data Center by name
+ community.general.oneview_datacenter_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ name: "My Data Center"
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.datacenters }}"
+
+- name: Gather information about the Data Center Visual Content
+ community.general.oneview_datacenter_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ name: "My Data Center"
+ options:
+ - visualContent
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.datacenters }}"
+- ansible.builtin.debug:
+ msg: "{{ result.datacenter_visual_content }}"
+'''
+
+RETURN = '''
+datacenters:
+ description: Has all the OneView information about the Data Centers.
+ returned: Always, but can be null.
+ type: dict
+
+datacenter_visual_content:
+ description: Has information about the Data Center Visual Content.
+ returned: When requested, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class DatacenterInfoModule(OneViewModuleBase):
+ argument_spec = dict(
+ name=dict(type='str'),
+ options=dict(type='list'),
+ params=dict(type='dict')
+ )
+
+ def __init__(self):
+ super(DatacenterInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_datacenter_facts', 'community.general.oneview_datacenter_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_datacenter_facts' module has been renamed to 'oneview_datacenter_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+
+ client = self.oneview_client.datacenters
+ info = {}
+
+ if self.module.params.get('name'):
+ datacenters = client.get_by('name', self.module.params['name'])
+
+ if self.options and 'visualContent' in self.options:
+ if datacenters:
+ info['datacenter_visual_content'] = client.get_visual_content(datacenters[0]['uri'])
+ else:
+ info['datacenter_visual_content'] = None
+
+ info['datacenters'] = datacenters
+ else:
+ info['datacenters'] = client.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False,
+ ansible_facts=info)
+ else:
+ return dict(changed=False, **info)
+
+
+def main():
+ DatacenterInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_enclosure_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_enclosure_facts.py
new file mode 100644
index 00000000..7963de74
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_enclosure_facts.py
@@ -0,0 +1,225 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_enclosure_info
+short_description: Retrieve information about one or more Enclosures
+description:
+ - Retrieve information about one or more of the Enclosures from OneView.
+ - This module was called C(oneview_enclosure_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_enclosure_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - Enclosure name.
+ options:
+ description:
+ - "List with options to gather additional information about an Enclosure and related resources.
+ Options allowed: C(script), C(environmentalConfiguration), and C(utilization). For the option C(utilization),
+ you can provide specific parameters."
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Enclosures
+ community.general.oneview_enclosure_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.enclosures }}"
+
+- name: Gather paginated, filtered and sorted information about Enclosures
+ community.general.oneview_enclosure_info:
+ params:
+ start: 0
+ count: 3
+ sort: name:descending
+ filter: status=OK
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.enclosures }}"
+
+- name: Gather information about an Enclosure by name
+ community.general.oneview_enclosure_info:
+ name: Enclosure-Name
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.enclosures }}"
+
+- name: Gather information about an Enclosure by name with options
+ community.general.oneview_enclosure_info:
+ name: Test-Enclosure
+ options:
+ - script # optional
+ - environmentalConfiguration # optional
+ - utilization # optional
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.enclosures }}"
+- ansible.builtin.debug:
+ msg: "{{ result.enclosure_script }}"
+- ansible.builtin.debug:
+ msg: "{{ result.enclosure_environmental_configuration }}"
+- ansible.builtin.debug:
+ msg: "{{ result.enclosure_utilization }}"
+
+- name: "Gather information about an Enclosure with temperature data at a resolution of one sample per day, between two
+ specified dates"
+ community.general.oneview_enclosure_info:
+ name: Test-Enclosure
+ options:
+ - utilization: # optional
+ fields: AmbientTemperature
+ filter:
+ - startDate=2016-07-01T14:29:42.000Z
+ - endDate=2017-07-01T03:29:42.000Z
+ view: day
+ refresh: false
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.enclosures }}"
+- ansible.builtin.debug:
+ msg: "{{ result.enclosure_utilization }}"
+'''
+
+RETURN = '''
+enclosures:
+ description: Has all the OneView information about the Enclosures.
+ returned: Always, but can be null.
+ type: dict
+
+enclosure_script:
+ description: Has all the OneView information about the script of an Enclosure.
+ returned: When requested, but can be null.
+ type: str
+
+enclosure_environmental_configuration:
+ description: Has all the OneView information about the environmental configuration of an Enclosure.
+ returned: When requested, but can be null.
+ type: dict
+
+enclosure_utilization:
+ description: Has all the OneView information about the utilization of an Enclosure.
+ returned: When requested, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class EnclosureInfoModule(OneViewModuleBase):
+ argument_spec = dict(name=dict(type='str'), options=dict(type='list'), params=dict(type='dict'))
+
+ def __init__(self):
+ super(EnclosureInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_enclosure_facts', 'community.general.oneview_enclosure_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_enclosure_facts' module has been renamed to 'oneview_enclosure_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+
+ info = {}
+
+ if self.module.params['name']:
+ enclosures = self._get_by_name(self.module.params['name'])
+
+ if self.options and enclosures:
+ info = self._gather_optional_info(self.options, enclosures[0])
+ else:
+ enclosures = self.oneview_client.enclosures.get_all(**self.facts_params)
+
+ info['enclosures'] = enclosures
+
+ if self.is_old_facts:
+ return dict(changed=False,
+ ansible_facts=info)
+ else:
+ return dict(changed=False, **info)
+
+ def _gather_optional_info(self, options, enclosure):
+
+ enclosure_client = self.oneview_client.enclosures
+ info = {}
+
+ if options.get('script'):
+ info['enclosure_script'] = enclosure_client.get_script(enclosure['uri'])
+ if options.get('environmentalConfiguration'):
+ env_config = enclosure_client.get_environmental_configuration(enclosure['uri'])
+ info['enclosure_environmental_configuration'] = env_config
+ if options.get('utilization'):
+ info['enclosure_utilization'] = self._get_utilization(enclosure, options['utilization'])
+
+ return info
+
+ def _get_utilization(self, enclosure, params):
+ fields = view = refresh = filter = ''
+
+ if isinstance(params, dict):
+ fields = params.get('fields')
+ view = params.get('view')
+ refresh = params.get('refresh')
+ filter = params.get('filter')
+
+ return self.oneview_client.enclosures.get_utilization(enclosure['uri'],
+ fields=fields,
+ filter=filter,
+ refresh=refresh,
+ view=view)
+
+ def _get_by_name(self, name):
+ return self.oneview_client.enclosures.get_by('name', name)
+
+
+def main():
+ EnclosureInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_enclosure_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_enclosure_info.py
new file mode 100644
index 00000000..7963de74
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_enclosure_info.py
@@ -0,0 +1,225 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_enclosure_info
+short_description: Retrieve information about one or more Enclosures
+description:
+ - Retrieve information about one or more of the Enclosures from OneView.
+ - This module was called C(oneview_enclosure_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_enclosure_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - Enclosure name.
+ options:
+ description:
+ - "List with options to gather additional information about an Enclosure and related resources.
+ Options allowed: C(script), C(environmentalConfiguration), and C(utilization). For the option C(utilization),
+ you can provide specific parameters."
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Enclosures
+ community.general.oneview_enclosure_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.enclosures }}"
+
+- name: Gather paginated, filtered and sorted information about Enclosures
+ community.general.oneview_enclosure_info:
+ params:
+ start: 0
+ count: 3
+ sort: name:descending
+ filter: status=OK
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.enclosures }}"
+
+- name: Gather information about an Enclosure by name
+ community.general.oneview_enclosure_info:
+ name: Enclosure-Name
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.enclosures }}"
+
+- name: Gather information about an Enclosure by name with options
+ community.general.oneview_enclosure_info:
+ name: Test-Enclosure
+ options:
+ - script # optional
+ - environmentalConfiguration # optional
+ - utilization # optional
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.enclosures }}"
+- ansible.builtin.debug:
+ msg: "{{ result.enclosure_script }}"
+- ansible.builtin.debug:
+ msg: "{{ result.enclosure_environmental_configuration }}"
+- ansible.builtin.debug:
+ msg: "{{ result.enclosure_utilization }}"
+
+- name: "Gather information about an Enclosure with temperature data at a resolution of one sample per day, between two
+ specified dates"
+ community.general.oneview_enclosure_info:
+ name: Test-Enclosure
+ options:
+ - utilization: # optional
+ fields: AmbientTemperature
+ filter:
+ - startDate=2016-07-01T14:29:42.000Z
+ - endDate=2017-07-01T03:29:42.000Z
+ view: day
+ refresh: false
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.enclosures }}"
+- ansible.builtin.debug:
+ msg: "{{ result.enclosure_utilization }}"
+'''
+
+RETURN = '''
+enclosures:
+ description: Has all the OneView information about the Enclosures.
+ returned: Always, but can be null.
+ type: dict
+
+enclosure_script:
+ description: Has all the OneView information about the script of an Enclosure.
+ returned: When requested, but can be null.
+ type: str
+
+enclosure_environmental_configuration:
+ description: Has all the OneView information about the environmental configuration of an Enclosure.
+ returned: When requested, but can be null.
+ type: dict
+
+enclosure_utilization:
+ description: Has all the OneView information about the utilization of an Enclosure.
+ returned: When requested, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class EnclosureInfoModule(OneViewModuleBase):
+ argument_spec = dict(name=dict(type='str'), options=dict(type='list'), params=dict(type='dict'))
+
+ def __init__(self):
+ super(EnclosureInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_enclosure_facts', 'community.general.oneview_enclosure_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_enclosure_facts' module has been renamed to 'oneview_enclosure_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+
+ info = {}
+
+ if self.module.params['name']:
+ enclosures = self._get_by_name(self.module.params['name'])
+
+ if self.options and enclosures:
+ info = self._gather_optional_info(self.options, enclosures[0])
+ else:
+ enclosures = self.oneview_client.enclosures.get_all(**self.facts_params)
+
+ info['enclosures'] = enclosures
+
+ if self.is_old_facts:
+ return dict(changed=False,
+ ansible_facts=info)
+ else:
+ return dict(changed=False, **info)
+
+ def _gather_optional_info(self, options, enclosure):
+
+ enclosure_client = self.oneview_client.enclosures
+ info = {}
+
+ if options.get('script'):
+ info['enclosure_script'] = enclosure_client.get_script(enclosure['uri'])
+ if options.get('environmentalConfiguration'):
+ env_config = enclosure_client.get_environmental_configuration(enclosure['uri'])
+ info['enclosure_environmental_configuration'] = env_config
+ if options.get('utilization'):
+ info['enclosure_utilization'] = self._get_utilization(enclosure, options['utilization'])
+
+ return info
+
+ def _get_utilization(self, enclosure, params):
+ fields = view = refresh = filter = ''
+
+ if isinstance(params, dict):
+ fields = params.get('fields')
+ view = params.get('view')
+ refresh = params.get('refresh')
+ filter = params.get('filter')
+
+ return self.oneview_client.enclosures.get_utilization(enclosure['uri'],
+ fields=fields,
+ filter=filter,
+ refresh=refresh,
+ view=view)
+
+ def _get_by_name(self, name):
+ return self.oneview_client.enclosures.get_by('name', name)
+
+
+def main():
+ EnclosureInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_ethernet_network.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_ethernet_network.py
new file mode 100644
index 00000000..a81e144a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_ethernet_network.py
@@ -0,0 +1,247 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_ethernet_network
+short_description: Manage OneView Ethernet Network resources
+description:
+ - Provides an interface to manage Ethernet Network resources. Can create, update, or delete.
+requirements:
+ - hpOneView >= 3.1.0
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ state:
+ description:
+ - Indicates the desired state for the Ethernet Network resource.
+ - C(present) will ensure data properties are compliant with OneView.
+ - C(absent) will remove the resource from OneView, if it exists.
+ - C(default_bandwidth_reset) will reset the network connection template to the default.
+ default: present
+ choices: [present, absent, default_bandwidth_reset]
+ data:
+ description:
+ - List with Ethernet Network properties.
+ required: true
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.validateetag
+
+'''
+
+EXAMPLES = '''
+- name: Ensure that the Ethernet Network is present using the default configuration
+ community.general.oneview_ethernet_network:
+ config: '/etc/oneview/oneview_config.json'
+ state: present
+ data:
+ name: 'Test Ethernet Network'
+ vlanId: '201'
+ delegate_to: localhost
+
+- name: Update the Ethernet Network changing bandwidth and purpose
+ community.general.oneview_ethernet_network:
+ config: '/etc/oneview/oneview_config.json'
+ state: present
+ data:
+ name: 'Test Ethernet Network'
+ purpose: Management
+ bandwidth:
+ maximumBandwidth: 3000
+ typicalBandwidth: 2000
+ delegate_to: localhost
+
+- name: Ensure that the Ethernet Network is present with name 'Renamed Ethernet Network'
+ community.general.oneview_ethernet_network:
+ config: '/etc/oneview/oneview_config.json'
+ state: present
+ data:
+ name: 'Test Ethernet Network'
+ newName: 'Renamed Ethernet Network'
+ delegate_to: localhost
+
+- name: Ensure that the Ethernet Network is absent
+ community.general.oneview_ethernet_network:
+ config: '/etc/oneview/oneview_config.json'
+ state: absent
+ data:
+ name: 'New Ethernet Network'
+ delegate_to: localhost
+
+- name: Create Ethernet networks in bulk
+ community.general.oneview_ethernet_network:
+ config: '/etc/oneview/oneview_config.json'
+ state: present
+ data:
+ vlanIdRange: '1-10,15,17'
+ purpose: General
+ namePrefix: TestNetwork
+ smartLink: false
+ privateNetwork: false
+ bandwidth:
+ maximumBandwidth: 10000
+ typicalBandwidth: 2000
+ delegate_to: localhost
+
+- name: Reset to the default network connection template
+ community.general.oneview_ethernet_network:
+ config: '/etc/oneview/oneview_config.json'
+ state: default_bandwidth_reset
+ data:
+ name: 'Test Ethernet Network'
+ delegate_to: localhost
+'''
+
+RETURN = '''
+ethernet_network:
+ description: Has the facts about the Ethernet Networks.
+ returned: On state 'present'. Can be null.
+ type: dict
+
+ethernet_network_bulk:
+ description: Has the facts about the Ethernet Networks affected by the bulk insert.
+ returned: When 'vlanIdRange' attribute is in data argument. Can be null.
+ type: dict
+
+ethernet_network_connection_template:
+ description: Has the facts about the Ethernet Network Connection Template.
+ returned: On state 'default_bandwidth_reset'. Can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleResourceNotFound
+
+
+class EthernetNetworkModule(OneViewModuleBase):
+ MSG_CREATED = 'Ethernet Network created successfully.'
+ MSG_UPDATED = 'Ethernet Network updated successfully.'
+ MSG_DELETED = 'Ethernet Network deleted successfully.'
+ MSG_ALREADY_PRESENT = 'Ethernet Network is already present.'
+ MSG_ALREADY_ABSENT = 'Ethernet Network is already absent.'
+
+ MSG_BULK_CREATED = 'Ethernet Networks created successfully.'
+ MSG_MISSING_BULK_CREATED = 'Some missing Ethernet Networks were created successfully.'
+ MSG_BULK_ALREADY_EXIST = 'The specified Ethernet Networks already exist.'
+ MSG_CONNECTION_TEMPLATE_RESET = 'Ethernet Network connection template was reset to the default.'
+ MSG_ETHERNET_NETWORK_NOT_FOUND = 'Ethernet Network was not found.'
+
+ RESOURCE_FACT_NAME = 'ethernet_network'
+
+ def __init__(self):
+
+ argument_spec = dict(
+ state=dict(type='str', default='present', choices=['absent', 'default_bandwidth_reset', 'present']),
+ data=dict(type='dict', required=True),
+ )
+
+ super(EthernetNetworkModule, self).__init__(additional_arg_spec=argument_spec, validate_etag_support=True)
+
+ self.resource_client = self.oneview_client.ethernet_networks
+
+ def execute_module(self):
+
+ changed, msg, ansible_facts, resource = False, '', {}, None
+
+ if self.data.get('name'):
+ resource = self.get_by_name(self.data['name'])
+
+ if self.state == 'present':
+ if self.data.get('vlanIdRange'):
+ return self._bulk_present()
+ else:
+ return self._present(resource)
+ elif self.state == 'absent':
+ return self.resource_absent(resource)
+ elif self.state == 'default_bandwidth_reset':
+ changed, msg, ansible_facts = self._default_bandwidth_reset(resource)
+ return dict(changed=changed, msg=msg, ansible_facts=ansible_facts)
+
+ def _present(self, resource):
+
+ bandwidth = self.data.pop('bandwidth', None)
+ scope_uris = self.data.pop('scopeUris', None)
+ result = self.resource_present(resource, self.RESOURCE_FACT_NAME)
+
+ if bandwidth:
+ if self._update_connection_template(result['ansible_facts']['ethernet_network'], bandwidth)[0]:
+ result['changed'] = True
+ result['msg'] = self.MSG_UPDATED
+
+ if scope_uris is not None:
+ result = self.resource_scopes_set(result, 'ethernet_network', scope_uris)
+
+ return result
+
+ def _bulk_present(self):
+ vlan_id_range = self.data['vlanIdRange']
+ result = dict(ansible_facts={})
+ ethernet_networks = self.resource_client.get_range(self.data['namePrefix'], vlan_id_range)
+
+ if not ethernet_networks:
+ self.resource_client.create_bulk(self.data)
+ result['changed'] = True
+ result['msg'] = self.MSG_BULK_CREATED
+
+ else:
+ vlan_ids = self.resource_client.dissociate_values_or_ranges(vlan_id_range)
+ for net in ethernet_networks[:]:
+ vlan_ids.remove(net['vlanId'])
+
+ if len(vlan_ids) == 0:
+ result['msg'] = self.MSG_BULK_ALREADY_EXIST
+ result['changed'] = False
+ else:
+ if len(vlan_ids) == 1:
+ self.data['vlanIdRange'] = '{0}-{1}'.format(vlan_ids[0], vlan_ids[0])
+ else:
+ self.data['vlanIdRange'] = ','.join(map(str, vlan_ids))
+
+ self.resource_client.create_bulk(self.data)
+ result['changed'] = True
+ result['msg'] = self.MSG_MISSING_BULK_CREATED
+ result['ansible_facts']['ethernet_network_bulk'] = self.resource_client.get_range(self.data['namePrefix'], vlan_id_range)
+
+ return result
+
+ def _update_connection_template(self, ethernet_network, bandwidth):
+
+ if 'connectionTemplateUri' not in ethernet_network:
+ return False, None
+
+ connection_template = self.oneview_client.connection_templates.get(ethernet_network['connectionTemplateUri'])
+
+ merged_data = connection_template.copy()
+ merged_data.update({'bandwidth': bandwidth})
+
+ if not self.compare(connection_template, merged_data):
+ connection_template = self.oneview_client.connection_templates.update(merged_data)
+ return True, connection_template
+ else:
+ return False, None
+
+ def _default_bandwidth_reset(self, resource):
+
+ if not resource:
+ raise OneViewModuleResourceNotFound(self.MSG_ETHERNET_NETWORK_NOT_FOUND)
+
+ default_connection_template = self.oneview_client.connection_templates.get_default()
+
+ changed, connection_template = self._update_connection_template(resource, default_connection_template['bandwidth'])
+
+ return changed, self.MSG_CONNECTION_TEMPLATE_RESET, dict(
+ ethernet_network_connection_template=connection_template)
+
+
+def main():
+ EthernetNetworkModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_ethernet_network_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_ethernet_network_facts.py
new file mode 100644
index 00000000..b1790932
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_ethernet_network_facts.py
@@ -0,0 +1,165 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_ethernet_network_info
+short_description: Retrieve the information about one or more of the OneView Ethernet Networks
+description:
+ - Retrieve the information about one or more of the Ethernet Networks from OneView.
+ - This module was called C(oneview_ethernet_network_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_ethernet_network_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - Ethernet Network name.
+ options:
+ description:
+ - "List with options to gather additional information about an Ethernet Network and related resources.
+ Options allowed: C(associatedProfiles) and C(associatedUplinkGroups)."
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Ethernet Networks
+ community.general.oneview_ethernet_network_info:
+ config: /etc/oneview/oneview_config.json
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.ethernet_networks }}"
+
+- name: Gather paginated and filtered information about Ethernet Networks
+ community.general.oneview_ethernet_network_info:
+ config: /etc/oneview/oneview_config.json
+ params:
+ start: 1
+ count: 3
+ sort: 'name:descending'
+ filter: 'purpose=General'
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.ethernet_networks }}"
+
+- name: Gather information about an Ethernet Network by name
+ community.general.oneview_ethernet_network_info:
+ config: /etc/oneview/oneview_config.json
+ name: Ethernet network name
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.ethernet_networks }}"
+
+- name: Gather information about an Ethernet Network by name with options
+ community.general.oneview_ethernet_network_info:
+ config: /etc/oneview/oneview_config.json
+ name: eth1
+ options:
+ - associatedProfiles
+ - associatedUplinkGroups
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.enet_associated_profiles }}"
+- ansible.builtin.debug:
+ msg: "{{ result.enet_associated_uplink_groups }}"
+'''
+
+RETURN = '''
+ethernet_networks:
+ description: Has all the OneView information about the Ethernet Networks.
+ returned: Always, but can be null.
+ type: dict
+
+enet_associated_profiles:
+ description: Has all the OneView information about the profiles which are using the Ethernet network.
+ returned: When requested, but can be null.
+ type: dict
+
+enet_associated_uplink_groups:
+ description: Has all the OneView information about the uplink sets which are using the Ethernet network.
+ returned: When requested, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class EthernetNetworkInfoModule(OneViewModuleBase):
+ argument_spec = dict(
+ name=dict(type='str'),
+ options=dict(type='list'),
+ params=dict(type='dict')
+ )
+
+ def __init__(self):
+ super(EthernetNetworkInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_ethernet_network_facts', 'community.general.oneview_ethernet_network_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_ethernet_network_facts' module has been renamed to 'oneview_ethernet_network_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ self.resource_client = self.oneview_client.ethernet_networks
+
+ def execute_module(self):
+ info = {}
+ if self.module.params['name']:
+ ethernet_networks = self.resource_client.get_by('name', self.module.params['name'])
+
+ if self.module.params.get('options') and ethernet_networks:
+ info = self.__gather_optional_info(ethernet_networks[0])
+ else:
+ ethernet_networks = self.resource_client.get_all(**self.facts_params)
+
+ info['ethernet_networks'] = ethernet_networks
+
+ if self.is_old_facts:
+ return dict(changed=False, ansible_facts=info)
+ else:
+ return dict(changed=False, **info)
+
+ def __gather_optional_info(self, ethernet_network):
+
+ info = {}
+
+ if self.options.get('associatedProfiles'):
+ info['enet_associated_profiles'] = self.__get_associated_profiles(ethernet_network)
+ if self.options.get('associatedUplinkGroups'):
+ info['enet_associated_uplink_groups'] = self.__get_associated_uplink_groups(ethernet_network)
+
+ return info
+
+ def __get_associated_profiles(self, ethernet_network):
+ associated_profiles = self.resource_client.get_associated_profiles(ethernet_network['uri'])
+ return [self.oneview_client.server_profiles.get(x) for x in associated_profiles]
+
+ def __get_associated_uplink_groups(self, ethernet_network):
+ uplink_groups = self.resource_client.get_associated_uplink_groups(ethernet_network['uri'])
+ return [self.oneview_client.uplink_sets.get(x) for x in uplink_groups]
+
+
+def main():
+ EthernetNetworkInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py
new file mode 100644
index 00000000..b1790932
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py
@@ -0,0 +1,165 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_ethernet_network_info
+short_description: Retrieve the information about one or more of the OneView Ethernet Networks
+description:
+ - Retrieve the information about one or more of the Ethernet Networks from OneView.
+ - This module was called C(oneview_ethernet_network_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_ethernet_network_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - Ethernet Network name.
+ options:
+ description:
+ - "List with options to gather additional information about an Ethernet Network and related resources.
+ Options allowed: C(associatedProfiles) and C(associatedUplinkGroups)."
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Ethernet Networks
+ community.general.oneview_ethernet_network_info:
+ config: /etc/oneview/oneview_config.json
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.ethernet_networks }}"
+
+- name: Gather paginated and filtered information about Ethernet Networks
+ community.general.oneview_ethernet_network_info:
+ config: /etc/oneview/oneview_config.json
+ params:
+ start: 1
+ count: 3
+ sort: 'name:descending'
+ filter: 'purpose=General'
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.ethernet_networks }}"
+
+- name: Gather information about an Ethernet Network by name
+ community.general.oneview_ethernet_network_info:
+ config: /etc/oneview/oneview_config.json
+ name: Ethernet network name
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.ethernet_networks }}"
+
+- name: Gather information about an Ethernet Network by name with options
+ community.general.oneview_ethernet_network_info:
+ config: /etc/oneview/oneview_config.json
+ name: eth1
+ options:
+ - associatedProfiles
+ - associatedUplinkGroups
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.enet_associated_profiles }}"
+- ansible.builtin.debug:
+ msg: "{{ result.enet_associated_uplink_groups }}"
+'''
+
+RETURN = '''
+ethernet_networks:
+ description: Has all the OneView information about the Ethernet Networks.
+ returned: Always, but can be null.
+ type: dict
+
+enet_associated_profiles:
+ description: Has all the OneView information about the profiles which are using the Ethernet network.
+ returned: When requested, but can be null.
+ type: dict
+
+enet_associated_uplink_groups:
+ description: Has all the OneView information about the uplink sets which are using the Ethernet network.
+ returned: When requested, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class EthernetNetworkInfoModule(OneViewModuleBase):
+ argument_spec = dict(
+ name=dict(type='str'),
+ options=dict(type='list'),
+ params=dict(type='dict')
+ )
+
+ def __init__(self):
+ super(EthernetNetworkInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_ethernet_network_facts', 'community.general.oneview_ethernet_network_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_ethernet_network_facts' module has been renamed to 'oneview_ethernet_network_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ self.resource_client = self.oneview_client.ethernet_networks
+
+ def execute_module(self):
+ info = {}
+ if self.module.params['name']:
+ ethernet_networks = self.resource_client.get_by('name', self.module.params['name'])
+
+ if self.module.params.get('options') and ethernet_networks:
+ info = self.__gather_optional_info(ethernet_networks[0])
+ else:
+ ethernet_networks = self.resource_client.get_all(**self.facts_params)
+
+ info['ethernet_networks'] = ethernet_networks
+
+ if self.is_old_facts:
+ return dict(changed=False, ansible_facts=info)
+ else:
+ return dict(changed=False, **info)
+
+ def __gather_optional_info(self, ethernet_network):
+
+ info = {}
+
+ if self.options.get('associatedProfiles'):
+ info['enet_associated_profiles'] = self.__get_associated_profiles(ethernet_network)
+ if self.options.get('associatedUplinkGroups'):
+ info['enet_associated_uplink_groups'] = self.__get_associated_uplink_groups(ethernet_network)
+
+ return info
+
+ def __get_associated_profiles(self, ethernet_network):
+ associated_profiles = self.resource_client.get_associated_profiles(ethernet_network['uri'])
+ return [self.oneview_client.server_profiles.get(x) for x in associated_profiles]
+
+ def __get_associated_uplink_groups(self, ethernet_network):
+ uplink_groups = self.resource_client.get_associated_uplink_groups(ethernet_network['uri'])
+ return [self.oneview_client.uplink_sets.get(x) for x in uplink_groups]
+
+
+def main():
+ EthernetNetworkInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fc_network.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fc_network.py
new file mode 100644
index 00000000..45fa035c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fc_network.py
@@ -0,0 +1,121 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_fc_network
+short_description: Manage OneView Fibre Channel Network resources.
+description:
+ - Provides an interface to manage Fibre Channel Network resources. Can create, update, and delete.
+requirements:
+ - "hpOneView >= 4.0.0"
+author: "Felipe Bulsoni (@fgbulsoni)"
+options:
+ state:
+ description:
+ - Indicates the desired state for the Fibre Channel Network resource.
+ C(present) will ensure data properties are compliant with OneView.
+ C(absent) will remove the resource from OneView, if it exists.
+ choices: ['present', 'absent']
+ required: true
+ data:
+ description:
+ - List with the Fibre Channel Network properties.
+ required: true
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.validateetag
+
+'''
+
+EXAMPLES = '''
+- name: Ensure that the Fibre Channel Network is present using the default configuration
+ community.general.oneview_fc_network:
+ config: "{{ config_file_path }}"
+ state: present
+ data:
+ name: 'New FC Network'
+
+- name: Ensure that the Fibre Channel Network is present with fabricType 'DirectAttach'
+ community.general.oneview_fc_network:
+ config: "{{ config_file_path }}"
+ state: present
+ data:
+ name: 'New FC Network'
+ fabricType: 'DirectAttach'
+
+- name: Ensure that the Fibre Channel Network is present and is inserted in the desired scopes
+ community.general.oneview_fc_network:
+ config: "{{ config_file_path }}"
+ state: present
+ data:
+ name: 'New FC Network'
+ scopeUris:
+ - '/rest/scopes/00SC123456'
+ - '/rest/scopes/01SC123456'
+
+- name: Ensure that the Fibre Channel Network is absent
+ community.general.oneview_fc_network:
+ config: "{{ config_file_path }}"
+ state: absent
+ data:
+ name: 'New FC Network'
+'''
+
+RETURN = '''
+fc_network:
+ description: Has the facts about the managed OneView FC Network.
+ returned: On state 'present'. Can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class FcNetworkModule(OneViewModuleBase):
+ MSG_CREATED = 'FC Network created successfully.'
+ MSG_UPDATED = 'FC Network updated successfully.'
+ MSG_DELETED = 'FC Network deleted successfully.'
+ MSG_ALREADY_PRESENT = 'FC Network is already present.'
+ MSG_ALREADY_ABSENT = 'FC Network is already absent.'
+ RESOURCE_FACT_NAME = 'fc_network'
+
+ def __init__(self):
+
+ additional_arg_spec = dict(data=dict(required=True, type='dict'),
+ state=dict(
+ required=True,
+ choices=['present', 'absent']))
+
+ super(FcNetworkModule, self).__init__(additional_arg_spec=additional_arg_spec,
+ validate_etag_support=True)
+
+ self.resource_client = self.oneview_client.fc_networks
+
+ def execute_module(self):
+ resource = self.get_by_name(self.data['name'])
+
+ if self.state == 'present':
+ return self._present(resource)
+ else:
+ return self.resource_absent(resource)
+
+ def _present(self, resource):
+ scope_uris = self.data.pop('scopeUris', None)
+ result = self.resource_present(resource, self.RESOURCE_FACT_NAME)
+ if scope_uris is not None:
+ result = self.resource_scopes_set(result, 'fc_network', scope_uris)
+ return result
+
+
+def main():
+ FcNetworkModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fc_network_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fc_network_facts.py
new file mode 100644
index 00000000..2fad241a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fc_network_facts.py
@@ -0,0 +1,110 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_fc_network_info
+short_description: Retrieve the information about one or more of the OneView Fibre Channel Networks
+description:
+ - Retrieve the information about one or more of the Fibre Channel Networks from OneView.
+ - This module was called C(oneview_fc_network_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_fc_network_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - Fibre Channel Network name.
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Fibre Channel Networks
+ community.general.oneview_fc_network_info:
+ config: /etc/oneview/oneview_config.json
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.fc_networks }}"
+
+- name: Gather paginated, filtered and sorted information about Fibre Channel Networks
+ community.general.oneview_fc_network_info:
+ config: /etc/oneview/oneview_config.json
+ params:
+ start: 1
+ count: 3
+ sort: 'name:descending'
+ filter: 'fabricType=FabricAttach'
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.fc_networks }}"
+
+- name: Gather information about a Fibre Channel Network by name
+ community.general.oneview_fc_network_info:
+ config: /etc/oneview/oneview_config.json
+ name: network name
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.fc_networks }}"
+'''
+
+RETURN = '''
+fc_networks:
+ description: Has all the OneView information about the Fibre Channel Networks.
+ returned: Always, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class FcNetworkInfoModule(OneViewModuleBase):
+ def __init__(self):
+
+ argument_spec = dict(
+ name=dict(required=False, type='str'),
+ params=dict(required=False, type='dict')
+ )
+
+ super(FcNetworkInfoModule, self).__init__(additional_arg_spec=argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_fc_network_facts', 'community.general.oneview_fc_network_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_fc_network_facts' module has been renamed to 'oneview_fc_network_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+
+ if self.module.params['name']:
+ fc_networks = self.oneview_client.fc_networks.get_by('name', self.module.params['name'])
+ else:
+ fc_networks = self.oneview_client.fc_networks.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False, ansible_facts=dict(fc_networks=fc_networks))
+ else:
+ return dict(changed=False, fc_networks=fc_networks)
+
+
+def main():
+ FcNetworkInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fc_network_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fc_network_info.py
new file mode 100644
index 00000000..2fad241a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fc_network_info.py
@@ -0,0 +1,110 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_fc_network_info
+short_description: Retrieve the information about one or more of the OneView Fibre Channel Networks
+description:
+ - Retrieve the information about one or more of the Fibre Channel Networks from OneView.
+ - This module was called C(oneview_fc_network_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_fc_network_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - Fibre Channel Network name.
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Fibre Channel Networks
+ community.general.oneview_fc_network_info:
+ config: /etc/oneview/oneview_config.json
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.fc_networks }}"
+
+- name: Gather paginated, filtered and sorted information about Fibre Channel Networks
+ community.general.oneview_fc_network_info:
+ config: /etc/oneview/oneview_config.json
+ params:
+ start: 1
+ count: 3
+ sort: 'name:descending'
+ filter: 'fabricType=FabricAttach'
+ delegate_to: localhost
+ register: result
+- ansible.builtin.debug:
+ msg: "{{ result.fc_networks }}"
+
+- name: Gather information about a Fibre Channel Network by name
+ community.general.oneview_fc_network_info:
+ config: /etc/oneview/oneview_config.json
+ name: network name
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.fc_networks }}"
+'''
+
+RETURN = '''
+fc_networks:
+ description: Has all the OneView information about the Fibre Channel Networks.
+ returned: Always, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class FcNetworkInfoModule(OneViewModuleBase):
+ def __init__(self):
+
+ argument_spec = dict(
+ name=dict(required=False, type='str'),
+ params=dict(required=False, type='dict')
+ )
+
+ super(FcNetworkInfoModule, self).__init__(additional_arg_spec=argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_fc_network_facts', 'community.general.oneview_fc_network_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_fc_network_facts' module has been renamed to 'oneview_fc_network_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+
+ if self.module.params['name']:
+ fc_networks = self.oneview_client.fc_networks.get_by('name', self.module.params['name'])
+ else:
+ fc_networks = self.oneview_client.fc_networks.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False, ansible_facts=dict(fc_networks=fc_networks))
+ else:
+ return dict(changed=False, fc_networks=fc_networks)
+
+
+def main():
+ FcNetworkInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fcoe_network.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fcoe_network.py
new file mode 100644
index 00000000..79d8ae21
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fcoe_network.py
@@ -0,0 +1,117 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_fcoe_network
+short_description: Manage OneView FCoE Network resources
+description:
+ - Provides an interface to manage FCoE Network resources. Can create, update, or delete.
+requirements:
+ - "python >= 2.7.9"
+ - "hpOneView >= 4.0.0"
+author: "Felipe Bulsoni (@fgbulsoni)"
+options:
+ state:
+ description:
+ - Indicates the desired state for the FCoE Network resource.
+ C(present) will ensure data properties are compliant with OneView.
+ C(absent) will remove the resource from OneView, if it exists.
+ default: present
+ choices: ['present', 'absent']
+ data:
+ description:
+ - List with FCoE Network properties.
+ required: true
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.validateetag
+
+'''
+
+EXAMPLES = '''
+- name: Ensure that FCoE Network is present using the default configuration
+ community.general.oneview_fcoe_network:
+ config: '/etc/oneview/oneview_config.json'
+ state: present
+ data:
+ name: Test FCoE Network
+ vlanId: 201
+ delegate_to: localhost
+
+- name: Update the FCOE network scopes
+ community.general.oneview_fcoe_network:
+ config: '/etc/oneview/oneview_config.json'
+ state: present
+ data:
+ name: New FCoE Network
+ scopeUris:
+ - '/rest/scopes/00SC123456'
+ - '/rest/scopes/01SC123456'
+ delegate_to: localhost
+
+- name: Ensure that FCoE Network is absent
+ community.general.oneview_fcoe_network:
+ config: '/etc/oneview/oneview_config.json'
+ state: absent
+ data:
+ name: New FCoE Network
+ delegate_to: localhost
+'''
+
+RETURN = '''
+fcoe_network:
+ description: Has the facts about the OneView FCoE Networks.
+ returned: On state 'present'. Can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class FcoeNetworkModule(OneViewModuleBase):
+ MSG_CREATED = 'FCoE Network created successfully.'
+ MSG_UPDATED = 'FCoE Network updated successfully.'
+ MSG_DELETED = 'FCoE Network deleted successfully.'
+ MSG_ALREADY_PRESENT = 'FCoE Network is already present.'
+ MSG_ALREADY_ABSENT = 'FCoE Network is already absent.'
+ RESOURCE_FACT_NAME = 'fcoe_network'
+
+ def __init__(self):
+
+ additional_arg_spec = dict(data=dict(required=True, type='dict'),
+ state=dict(default='present',
+ choices=['present', 'absent']))
+
+ super(FcoeNetworkModule, self).__init__(additional_arg_spec=additional_arg_spec,
+ validate_etag_support=True)
+
+ self.resource_client = self.oneview_client.fcoe_networks
+
+ def execute_module(self):
+ resource = self.get_by_name(self.data.get('name'))
+
+ if self.state == 'present':
+ return self.__present(resource)
+ elif self.state == 'absent':
+ return self.resource_absent(resource)
+
+ def __present(self, resource):
+ scope_uris = self.data.pop('scopeUris', None)
+ result = self.resource_present(resource, self.RESOURCE_FACT_NAME)
+ if scope_uris is not None:
+ result = self.resource_scopes_set(result, 'fcoe_network', scope_uris)
+ return result
+
+
+def main():
+ FcoeNetworkModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fcoe_network_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fcoe_network_facts.py
new file mode 100644
index 00000000..8c1980df
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fcoe_network_facts.py
@@ -0,0 +1,110 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_fcoe_network_info
+short_description: Retrieve the information about one or more of the OneView FCoE Networks
+description:
+ - Retrieve the information about one or more of the FCoE Networks from OneView.
+ - This module was called C(oneview_fcoe_network_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_fcoe_network_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - FCoE Network name.
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all FCoE Networks
+ community.general.oneview_fcoe_network_info:
+ config: /etc/oneview/oneview_config.json
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.fcoe_networks }}"
+
+- name: Gather paginated, filtered and sorted information about FCoE Networks
+ community.general.oneview_fcoe_network_info:
+ config: /etc/oneview/oneview_config.json
+ params:
+ start: 0
+ count: 3
+ sort: 'name:descending'
+ filter: 'vlanId=2'
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.fcoe_networks }}"
+
+- name: Gather information about a FCoE Network by name
+ community.general.oneview_fcoe_network_info:
+ config: /etc/oneview/oneview_config.json
+ name: Test FCoE Network Information
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.fcoe_networks }}"
+'''
+
+RETURN = '''
+fcoe_networks:
+ description: Has all the OneView information about the FCoE Networks.
+ returned: Always, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class FcoeNetworkInfoModule(OneViewModuleBase):
+ def __init__(self):
+ argument_spec = dict(
+ name=dict(type='str'),
+ params=dict(type='dict'),
+ )
+
+ super(FcoeNetworkInfoModule, self).__init__(additional_arg_spec=argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_fcoe_network_facts', 'community.general.oneview_fcoe_network_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_fcoe_network_facts' module has been renamed to 'oneview_fcoe_network_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+
+ if self.module.params['name']:
+ fcoe_networks = self.oneview_client.fcoe_networks.get_by('name', self.module.params['name'])
+ else:
+ fcoe_networks = self.oneview_client.fcoe_networks.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False,
+ ansible_facts=dict(fcoe_networks=fcoe_networks))
+ else:
+ return dict(changed=False, fcoe_networks=fcoe_networks)
+
+
+def main():
+ FcoeNetworkInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py
new file mode 100644
index 00000000..8c1980df
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py
@@ -0,0 +1,110 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_fcoe_network_info
+short_description: Retrieve the information about one or more of the OneView FCoE Networks
+description:
+ - Retrieve the information about one or more of the FCoE Networks from OneView.
+ - This module was called C(oneview_fcoe_network_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_fcoe_network_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - FCoE Network name.
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all FCoE Networks
+ community.general.oneview_fcoe_network_info:
+ config: /etc/oneview/oneview_config.json
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.fcoe_networks }}"
+
+- name: Gather paginated, filtered and sorted information about FCoE Networks
+ community.general.oneview_fcoe_network_info:
+ config: /etc/oneview/oneview_config.json
+ params:
+ start: 0
+ count: 3
+ sort: 'name:descending'
+ filter: 'vlanId=2'
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.fcoe_networks }}"
+
+- name: Gather information about a FCoE Network by name
+ community.general.oneview_fcoe_network_info:
+ config: /etc/oneview/oneview_config.json
+ name: Test FCoE Network Information
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.fcoe_networks }}"
+'''
+
+RETURN = '''
+fcoe_networks:
+ description: Has all the OneView information about the FCoE Networks.
+ returned: Always, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class FcoeNetworkInfoModule(OneViewModuleBase):
+ def __init__(self):
+ argument_spec = dict(
+ name=dict(type='str'),
+ params=dict(type='dict'),
+ )
+
+ super(FcoeNetworkInfoModule, self).__init__(additional_arg_spec=argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_fcoe_network_facts', 'community.general.oneview_fcoe_network_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_fcoe_network_facts' module has been renamed to 'oneview_fcoe_network_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+
+ if self.module.params['name']:
+ fcoe_networks = self.oneview_client.fcoe_networks.get_by('name', self.module.params['name'])
+ else:
+ fcoe_networks = self.oneview_client.fcoe_networks.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False,
+ ansible_facts=dict(fcoe_networks=fcoe_networks))
+ else:
+ return dict(changed=False, fcoe_networks=fcoe_networks)
+
+
+def main():
+ FcoeNetworkInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py
new file mode 100644
index 00000000..8ca49e21
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py
@@ -0,0 +1,164 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_logical_interconnect_group
+short_description: Manage OneView Logical Interconnect Group resources
+description:
+ - Provides an interface to manage Logical Interconnect Group resources. Can create, update, or delete.
+requirements:
+ - hpOneView >= 4.0.0
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ state:
+ description:
+ - Indicates the desired state for the Logical Interconnect Group resource.
+ C(absent) will remove the resource from OneView, if it exists.
+ C(present) will ensure data properties are compliant with OneView.
+ choices: [absent, present]
+ default: present
+ data:
+ description:
+ - List with the Logical Interconnect Group properties.
+ required: true
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.validateetag
+
+'''
+
+EXAMPLES = '''
+- name: Ensure that the Logical Interconnect Group is present
+ community.general.oneview_logical_interconnect_group:
+ config: /etc/oneview/oneview_config.json
+ state: present
+ data:
+ name: Test Logical Interconnect Group
+ uplinkSets: []
+ enclosureType: C7000
+ interconnectMapTemplate:
+ interconnectMapEntryTemplates:
+ - logicalDownlinkUri: ~
+ logicalLocation:
+ locationEntries:
+ - relativeValue: 1
+ type: Bay
+ - relativeValue: 1
+ type: Enclosure
+ permittedInterconnectTypeName: HP VC Flex-10/10D Module
+ # Alternatively you can inform permittedInterconnectTypeUri
+ delegate_to: localhost
+
+- name: Ensure that the Logical Interconnect Group has the specified scopes
+ community.general.oneview_logical_interconnect_group:
+ config: /etc/oneview/oneview_config.json
+ state: present
+ data:
+ name: Test Logical Interconnect Group
+ scopeUris:
+ - /rest/scopes/00SC123456
+ - /rest/scopes/01SC123456
+ delegate_to: localhost
+
+- name: Ensure that the Logical Interconnect Group is present with name 'Test'
+ community.general.oneview_logical_interconnect_group:
+ config: /etc/oneview/oneview_config.json
+ state: present
+ data:
+ name: New Logical Interconnect Group
+ newName: Test
+ delegate_to: localhost
+
+- name: Ensure that the Logical Interconnect Group is absent
+ community.general.oneview_logical_interconnect_group:
+ config: /etc/oneview/oneview_config.json
+ state: absent
+ data:
+ name: New Logical Interconnect Group
+ delegate_to: localhost
+'''
+
+RETURN = '''
+logical_interconnect_group:
+ description: Has the facts about the OneView Logical Interconnect Group.
+ returned: On state 'present'. Can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleResourceNotFound
+
+
+class LogicalInterconnectGroupModule(OneViewModuleBase):
+ MSG_CREATED = 'Logical Interconnect Group created successfully.'
+ MSG_UPDATED = 'Logical Interconnect Group updated successfully.'
+ MSG_DELETED = 'Logical Interconnect Group deleted successfully.'
+ MSG_ALREADY_PRESENT = 'Logical Interconnect Group is already present.'
+ MSG_ALREADY_ABSENT = 'Logical Interconnect Group is already absent.'
+ MSG_INTERCONNECT_TYPE_NOT_FOUND = 'Interconnect Type was not found.'
+
+ RESOURCE_FACT_NAME = 'logical_interconnect_group'
+
+ def __init__(self):
+ argument_spec = dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ data=dict(required=True, type='dict')
+ )
+
+ super(LogicalInterconnectGroupModule, self).__init__(additional_arg_spec=argument_spec,
+ validate_etag_support=True)
+ self.resource_client = self.oneview_client.logical_interconnect_groups
+
+ def execute_module(self):
+ resource = self.get_by_name(self.data['name'])
+
+ if self.state == 'present':
+ return self.__present(resource)
+ elif self.state == 'absent':
+ return self.resource_absent(resource)
+
+ def __present(self, resource):
+ scope_uris = self.data.pop('scopeUris', None)
+
+ self.__replace_name_by_uris(self.data)
+ result = self.resource_present(resource, self.RESOURCE_FACT_NAME)
+
+ if scope_uris is not None:
+ result = self.resource_scopes_set(result, 'logical_interconnect_group', scope_uris)
+
+ return result
+
+ def __replace_name_by_uris(self, data):
+ map_template = data.get('interconnectMapTemplate')
+
+ if map_template:
+ map_entry_templates = map_template.get('interconnectMapEntryTemplates')
+ if map_entry_templates:
+ for value in map_entry_templates:
+ permitted_interconnect_type_name = value.pop('permittedInterconnectTypeName', None)
+ if permitted_interconnect_type_name:
+ value['permittedInterconnectTypeUri'] = self.__get_interconnect_type_by_name(
+ permitted_interconnect_type_name).get('uri')
+
+ def __get_interconnect_type_by_name(self, name):
+ i_type = self.oneview_client.interconnect_types.get_by('name', name)
+ if i_type:
+ return i_type[0]
+ else:
+ raise OneViewModuleResourceNotFound(self.MSG_INTERCONNECT_TYPE_NOT_FOUND)
+
+
+def main():
+ LogicalInterconnectGroupModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_facts.py
new file mode 100644
index 00000000..16a78309
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_facts.py
@@ -0,0 +1,122 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_logical_interconnect_group_info
+short_description: Retrieve information about one or more of the OneView Logical Interconnect Groups
+description:
+ - Retrieve information about one or more of the Logical Interconnect Groups from OneView
+ - This module was called C(oneview_logical_interconnect_group_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_logical_interconnect_group_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - Logical Interconnect Group name.
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Logical Interconnect Groups
+ community.general.oneview_logical_interconnect_group_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.logical_interconnect_groups }}"
+
+- name: Gather paginated, filtered and sorted information about Logical Interconnect Groups
+ community.general.oneview_logical_interconnect_group_info:
+ params:
+ start: 0
+ count: 3
+ sort: name:descending
+ filter: name=LIGName
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.logical_interconnect_groups }}"
+
+- name: Gather information about a Logical Interconnect Group by name
+ community.general.oneview_logical_interconnect_group_info:
+ name: logical interconnect group name
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.logical_interconnect_groups }}"
+'''
+
+RETURN = '''
+logical_interconnect_groups:
+ description: Has all the OneView information about the Logical Interconnect Groups.
+ returned: Always, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class LogicalInterconnectGroupInfoModule(OneViewModuleBase):
+ def __init__(self):
+
+ argument_spec = dict(
+ name=dict(type='str'),
+ params=dict(type='dict'),
+ )
+
+ super(LogicalInterconnectGroupInfoModule, self).__init__(additional_arg_spec=argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_logical_interconnect_group_facts', 'community.general.oneview_logical_interconnect_group_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_logical_interconnect_group_facts' module has been renamed to 'oneview_logical_interconnect_group_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+ if self.module.params.get('name'):
+ ligs = self.oneview_client.logical_interconnect_groups.get_by('name', self.module.params['name'])
+ else:
+ ligs = self.oneview_client.logical_interconnect_groups.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False, ansible_facts=dict(logical_interconnect_groups=ligs))
+ else:
+ return dict(changed=False, logical_interconnect_groups=ligs)
+
+
+def main():
+ LogicalInterconnectGroupInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py
new file mode 100644
index 00000000..16a78309
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py
@@ -0,0 +1,122 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_logical_interconnect_group_info
+short_description: Retrieve information about one or more of the OneView Logical Interconnect Groups
+description:
+ - Retrieve information about one or more of the Logical Interconnect Groups from OneView
+ - This module was called C(oneview_logical_interconnect_group_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_logical_interconnect_group_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - Logical Interconnect Group name.
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Logical Interconnect Groups
+ community.general.oneview_logical_interconnect_group_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.logical_interconnect_groups }}"
+
+- name: Gather paginated, filtered and sorted information about Logical Interconnect Groups
+ community.general.oneview_logical_interconnect_group_info:
+ params:
+ start: 0
+ count: 3
+ sort: name:descending
+ filter: name=LIGName
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.logical_interconnect_groups }}"
+
+- name: Gather information about a Logical Interconnect Group by name
+ community.general.oneview_logical_interconnect_group_info:
+ name: logical interconnect group name
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.logical_interconnect_groups }}"
+'''
+
+RETURN = '''
+logical_interconnect_groups:
+ description: Has all the OneView information about the Logical Interconnect Groups.
+ returned: Always, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class LogicalInterconnectGroupInfoModule(OneViewModuleBase):
+ def __init__(self):
+
+ argument_spec = dict(
+ name=dict(type='str'),
+ params=dict(type='dict'),
+ )
+
+ super(LogicalInterconnectGroupInfoModule, self).__init__(additional_arg_spec=argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_logical_interconnect_group_facts', 'community.general.oneview_logical_interconnect_group_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_logical_interconnect_group_facts' module has been renamed to 'oneview_logical_interconnect_group_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+ if self.module.params.get('name'):
+ ligs = self.oneview_client.logical_interconnect_groups.get_by('name', self.module.params['name'])
+ else:
+ ligs = self.oneview_client.logical_interconnect_groups.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False, ansible_facts=dict(logical_interconnect_groups=ligs))
+ else:
+ return dict(changed=False, logical_interconnect_groups=ligs)
+
+
+def main():
+ LogicalInterconnectGroupInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_network_set.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_network_set.py
new file mode 100644
index 00000000..cc70d5e5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_network_set.py
@@ -0,0 +1,150 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_network_set
+short_description: Manage HPE OneView Network Set resources
+description:
+ - Provides an interface to manage Network Set resources. Can create, update, or delete.
+requirements:
+ - hpOneView >= 4.0.0
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ state:
+ description:
+ - Indicates the desired state for the Network Set resource.
+ - C(present) will ensure data properties are compliant with OneView.
+ - C(absent) will remove the resource from OneView, if it exists.
+ default: present
+ choices: ['present', 'absent']
+ data:
+ description:
+ - List with the Network Set properties.
+ required: true
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.validateetag
+
+'''
+
+EXAMPLES = '''
+- name: Create a Network Set
+ community.general.oneview_network_set:
+ config: /etc/oneview/oneview_config.json
+ state: present
+ data:
+ name: OneViewSDK Test Network Set
+ networkUris:
+ - Test Ethernet Network_1 # can be a name
+ - /rest/ethernet-networks/e4360c9d-051d-4931-b2aa-7de846450dd8 # or a URI
+ delegate_to: localhost
+
+- name: Update the Network Set name to 'OneViewSDK Test Network Set - Renamed' and change the associated networks
+ community.general.oneview_network_set:
+ config: /etc/oneview/oneview_config.json
+ state: present
+ data:
+ name: OneViewSDK Test Network Set
+ newName: OneViewSDK Test Network Set - Renamed
+ networkUris:
+ - Test Ethernet Network_1
+ delegate_to: localhost
+
+- name: Delete the Network Set
+ community.general.oneview_network_set:
+ config: /etc/oneview/oneview_config.json
+ state: absent
+ data:
+ name: OneViewSDK Test Network Set - Renamed
+ delegate_to: localhost
+
+- name: Update the Network set with two scopes
+ community.general.oneview_network_set:
+ config: /etc/oneview/oneview_config.json
+ state: present
+ data:
+ name: OneViewSDK Test Network Set
+ scopeUris:
+ - /rest/scopes/01SC123456
+ - /rest/scopes/02SC123456
+ delegate_to: localhost
+'''
+
+RETURN = '''
+network_set:
+ description: Has the facts about the Network Set.
+ returned: On state 'present', but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleResourceNotFound
+
+
+class NetworkSetModule(OneViewModuleBase):
+ MSG_CREATED = 'Network Set created successfully.'
+ MSG_UPDATED = 'Network Set updated successfully.'
+ MSG_DELETED = 'Network Set deleted successfully.'
+ MSG_ALREADY_PRESENT = 'Network Set is already present.'
+ MSG_ALREADY_ABSENT = 'Network Set is already absent.'
+ MSG_ETHERNET_NETWORK_NOT_FOUND = 'Ethernet Network not found: '
+ RESOURCE_FACT_NAME = 'network_set'
+
+ argument_spec = dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ data=dict(required=True, type='dict'))
+
+ def __init__(self):
+ super(NetworkSetModule, self).__init__(additional_arg_spec=self.argument_spec,
+ validate_etag_support=True)
+ self.resource_client = self.oneview_client.network_sets
+
+ def execute_module(self):
+ resource = self.get_by_name(self.data.get('name'))
+
+ if self.state == 'present':
+ return self._present(resource)
+ elif self.state == 'absent':
+ return self.resource_absent(resource)
+
+ def _present(self, resource):
+ scope_uris = self.data.pop('scopeUris', None)
+ self._replace_network_name_by_uri(self.data)
+ result = self.resource_present(resource, self.RESOURCE_FACT_NAME)
+ if scope_uris is not None:
+ result = self.resource_scopes_set(result, self.RESOURCE_FACT_NAME, scope_uris)
+ return result
+
+ def _get_ethernet_network_by_name(self, name):
+ result = self.oneview_client.ethernet_networks.get_by('name', name)
+ return result[0] if result else None
+
+ def _get_network_uri(self, network_name_or_uri):
+ if network_name_or_uri.startswith('/rest/ethernet-networks'):
+ return network_name_or_uri
+ else:
+ enet_network = self._get_ethernet_network_by_name(network_name_or_uri)
+ if enet_network:
+ return enet_network['uri']
+ else:
+ raise OneViewModuleResourceNotFound(self.MSG_ETHERNET_NETWORK_NOT_FOUND + network_name_or_uri)
+
+ def _replace_network_name_by_uri(self, data):
+ if 'networkUris' in data:
+ data['networkUris'] = [self._get_network_uri(x) for x in data['networkUris']]
+
+
+def main():
+ NetworkSetModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_network_set_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_network_set_facts.py
new file mode 100644
index 00000000..68c18db9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_network_set_facts.py
@@ -0,0 +1,166 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_network_set_info
+short_description: Retrieve information about the OneView Network Sets
+description:
+ - Retrieve information about the Network Sets from OneView.
+ - This module was called C(oneview_network_set_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_network_set_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - Network Set name.
+
+ options:
+ description:
+ - "List with options to gather information about Network Set.
+ Option allowed: C(withoutEthernet).
+ The option C(withoutEthernet) retrieves the list of network_sets excluding Ethernet networks."
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Network Sets
+ community.general.oneview_network_set_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.network_sets }}"
+
+- name: Gather paginated, filtered, and sorted information about Network Sets
+ community.general.oneview_network_set_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ params:
+ start: 0
+ count: 3
+ sort: 'name:descending'
+ filter: name='netset001'
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.network_sets }}"
+
+- name: Gather information about all Network Sets, excluding Ethernet networks
+ community.general.oneview_network_set_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ options:
+ - withoutEthernet
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.network_sets }}"
+
+- name: Gather information about a Network Set by name
+ community.general.oneview_network_set_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ name: Name of the Network Set
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.network_sets }}"
+
+- name: Gather information about a Network Set by name, excluding Ethernet networks
+ community.general.oneview_network_set_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ name: Name of the Network Set
+ options:
+ - withoutEthernet
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.network_sets }}"
+'''
+
+RETURN = '''
+network_sets:
+ description: Has all the OneView information about the Network Sets.
+ returned: Always, but can be empty.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class NetworkSetInfoModule(OneViewModuleBase):
+ argument_spec = dict(
+ name=dict(type='str'),
+ options=dict(type='list'),
+ params=dict(type='dict'),
+ )
+
+ def __init__(self):
+ super(NetworkSetInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_network_set_facts', 'community.general.oneview_network_set_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_network_set_facts' module has been renamed to 'oneview_network_set_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+
+ name = self.module.params.get('name')
+
+ if 'withoutEthernet' in self.options:
+ filter_by_name = ("\"'name'='%s'\"" % name) if name else ''
+ network_sets = self.oneview_client.network_sets.get_all_without_ethernet(filter=filter_by_name)
+ elif name:
+ network_sets = self.oneview_client.network_sets.get_by('name', name)
+ else:
+ network_sets = self.oneview_client.network_sets.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False,
+ ansible_facts=dict(network_sets=network_sets))
+ else:
+ return dict(changed=False, network_sets=network_sets)
+
+
+def main():
+ NetworkSetInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_network_set_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_network_set_info.py
new file mode 100644
index 00000000..68c18db9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_network_set_info.py
@@ -0,0 +1,166 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_network_set_info
+short_description: Retrieve information about the OneView Network Sets
+description:
+ - Retrieve information about the Network Sets from OneView.
+ - This module was called C(oneview_network_set_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_network_set_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ name:
+ description:
+ - Network Set name.
+
+ options:
+ description:
+ - "List with options to gather information about Network Set.
+ Option allowed: C(withoutEthernet).
+ The option C(withoutEthernet) retrieves the list of network_sets excluding Ethernet networks."
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.factsparams
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Network Sets
+ community.general.oneview_network_set_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.network_sets }}"
+
+- name: Gather paginated, filtered, and sorted information about Network Sets
+ community.general.oneview_network_set_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ params:
+ start: 0
+ count: 3
+ sort: 'name:descending'
+ filter: name='netset001'
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.network_sets }}"
+
+- name: Gather information about all Network Sets, excluding Ethernet networks
+ community.general.oneview_network_set_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ options:
+ - withoutEthernet
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.network_sets }}"
+
+- name: Gather information about a Network Set by name
+ community.general.oneview_network_set_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ name: Name of the Network Set
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.network_sets }}"
+
+- name: Gather information about a Network Set by name, excluding Ethernet networks
+ community.general.oneview_network_set_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ name: Name of the Network Set
+ options:
+ - withoutEthernet
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.network_sets }}"
+'''
+
+RETURN = '''
+network_sets:
+ description: Has all the OneView information about the Network Sets.
+ returned: Always, but can be empty.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class NetworkSetInfoModule(OneViewModuleBase):
+ argument_spec = dict(
+ name=dict(type='str'),
+ options=dict(type='list'),
+ params=dict(type='dict'),
+ )
+
+ def __init__(self):
+ super(NetworkSetInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
+ self.is_old_facts = self.module._name in ('oneview_network_set_facts', 'community.general.oneview_network_set_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_network_set_facts' module has been renamed to 'oneview_network_set_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+
+ name = self.module.params.get('name')
+
+ if 'withoutEthernet' in self.options:
+ filter_by_name = ("\"'name'='%s'\"" % name) if name else ''
+ network_sets = self.oneview_client.network_sets.get_all_without_ethernet(filter=filter_by_name)
+ elif name:
+ network_sets = self.oneview_client.network_sets.get_by('name', name)
+ else:
+ network_sets = self.oneview_client.network_sets.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False,
+ ansible_facts=dict(network_sets=network_sets))
+ else:
+ return dict(changed=False, network_sets=network_sets)
+
+
+def main():
+ NetworkSetInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_san_manager.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_san_manager.py
new file mode 100644
index 00000000..57e93475
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_san_manager.py
@@ -0,0 +1,215 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_san_manager
+short_description: Manage OneView SAN Manager resources
+description:
+ - Provides an interface to manage SAN Manager resources. Can create, update, or delete.
+requirements:
+ - hpOneView >= 3.1.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ state:
+ description:
+ - Indicates the desired state for the Uplink Set resource.
+ - C(present) ensures data properties are compliant with OneView.
+ - C(absent) removes the resource from OneView, if it exists.
+ - C(connection_information_set) updates the connection information for the SAN Manager. This operation is non-idempotent.
+ default: present
+ choices: [present, absent, connection_information_set]
+ data:
+ description:
+ - List with SAN Manager properties.
+ required: true
+
+extends_documentation_fragment:
+- community.general.oneview
+- community.general.oneview.validateetag
+
+'''
+
+EXAMPLES = '''
+- name: Creates a Device Manager for the Brocade SAN provider with the given hostname and credentials
+ community.general.oneview_san_manager:
+ config: /etc/oneview/oneview_config.json
+ state: present
+ data:
+ providerDisplayName: Brocade Network Advisor
+ connectionInfo:
+ - name: Host
+ value: 172.18.15.1
+ - name: Port
+ value: 5989
+ - name: Username
+ value: username
+ - name: Password
+ value: password
+ - name: UseSsl
+ value: true
+ delegate_to: localhost
+
+- name: Ensure a Device Manager for the Cisco SAN Provider is present
+ community.general.oneview_san_manager:
+ config: /etc/oneview/oneview_config.json
+ state: present
+ data:
+ name: 172.18.20.1
+ providerDisplayName: Cisco
+ connectionInfo:
+ - name: Host
+ value: 172.18.20.1
+ - name: SnmpPort
+ value: 161
+ - name: SnmpUserName
+ value: admin
+ - name: SnmpAuthLevel
+ value: authnopriv
+ - name: SnmpAuthProtocol
+ value: sha
+ - name: SnmpAuthString
+ value: password
+ delegate_to: localhost
+
+- name: Sets the SAN Manager connection information
+ community.general.oneview_san_manager:
+ config: /etc/oneview/oneview_config.json
+ state: connection_information_set
+ data:
+ connectionInfo:
+ - name: Host
+ value: '172.18.15.1'
+ - name: Port
+ value: '5989'
+ - name: Username
+ value: 'username'
+ - name: Password
+ value: 'password'
+ - name: UseSsl
+ value: true
+ delegate_to: localhost
+
+- name: Refreshes the SAN Manager
+ community.general.oneview_san_manager:
+ config: /etc/oneview/oneview_config.json
+ state: present
+ data:
+ name: 172.18.15.1
+ refreshState: RefreshPending
+ delegate_to: localhost
+
+- name: Delete the SAN Manager recently created
+ community.general.oneview_san_manager:
+ config: /etc/oneview/oneview_config.json
+ state: absent
+ data:
+ name: '172.18.15.1'
+ delegate_to: localhost
+'''
+
+RETURN = '''
+san_manager:
+ description: Has the OneView facts about the SAN Manager.
+ returned: On state 'present'. Can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleValueError
+
+
+class SanManagerModule(OneViewModuleBase):
+ MSG_CREATED = 'SAN Manager created successfully.'
+ MSG_UPDATED = 'SAN Manager updated successfully.'
+ MSG_DELETED = 'SAN Manager deleted successfully.'
+ MSG_ALREADY_PRESENT = 'SAN Manager is already present.'
+ MSG_ALREADY_ABSENT = 'SAN Manager is already absent.'
+ MSG_SAN_MANAGER_PROVIDER_DISPLAY_NAME_NOT_FOUND = "The provider '{0}' was not found."
+
+ argument_spec = dict(
+ state=dict(type='str', default='present', choices=['absent', 'present', 'connection_information_set']),
+ data=dict(type='dict', required=True)
+ )
+
+ def __init__(self):
+ super(SanManagerModule, self).__init__(additional_arg_spec=self.argument_spec, validate_etag_support=True)
+ self.resource_client = self.oneview_client.san_managers
+
+ def execute_module(self):
+ if self.data.get('connectionInfo'):
+ for connection_hash in self.data.get('connectionInfo'):
+ if connection_hash.get('name') == 'Host':
+ resource_name = connection_hash.get('value')
+ elif self.data.get('name'):
+ resource_name = self.data.get('name')
+ else:
+ msg = 'A "name" or "connectionInfo" must be provided inside the "data" field for this operation. '
+ msg += 'If a "connectionInfo" is provided, the "Host" name is considered as the "name" for the resource.'
+ raise OneViewModuleValueError(msg.format())
+
+ resource = self.resource_client.get_by_name(resource_name)
+
+ if self.state == 'present':
+ changed, msg, san_manager = self._present(resource)
+ return dict(changed=changed, msg=msg, ansible_facts=dict(san_manager=san_manager))
+
+ elif self.state == 'absent':
+ return self.resource_absent(resource, method='remove')
+
+ elif self.state == 'connection_information_set':
+ changed, msg, san_manager = self._connection_information_set(resource)
+ return dict(changed=changed, msg=msg, ansible_facts=dict(san_manager=san_manager))
+
+ def _present(self, resource):
+ if not resource:
+ provider_uri = self.data.get('providerUri', self._get_provider_uri_by_display_name(self.data))
+ return True, self.MSG_CREATED, self.resource_client.add(self.data, provider_uri)
+ else:
+ merged_data = resource.copy()
+ merged_data.update(self.data)
+
+ # Remove 'connectionInfo' from comparison, since it is not possible to validate it.
+ resource.pop('connectionInfo', None)
+ merged_data.pop('connectionInfo', None)
+
+ if self.compare(resource, merged_data):
+ return False, self.MSG_ALREADY_PRESENT, resource
+ else:
+ updated_san_manager = self.resource_client.update(resource=merged_data, id_or_uri=resource['uri'])
+ return True, self.MSG_UPDATED, updated_san_manager
+
+ def _connection_information_set(self, resource):
+ if not resource:
+ return self._present(resource)
+ else:
+ merged_data = resource.copy()
+ merged_data.update(self.data)
+ merged_data.pop('refreshState', None)
+ if not self.data.get('connectionInfo', None):
+ raise OneViewModuleValueError('A connectionInfo field is required for this operation.')
+ updated_san_manager = self.resource_client.update(resource=merged_data, id_or_uri=resource['uri'])
+ return True, self.MSG_UPDATED, updated_san_manager
+
+ def _get_provider_uri_by_display_name(self, data):
+ display_name = data.get('providerDisplayName')
+ provider_uri = self.resource_client.get_provider_uri(display_name)
+
+ if not provider_uri:
+ raise OneViewModuleValueError(self.MSG_SAN_MANAGER_PROVIDER_DISPLAY_NAME_NOT_FOUND.format(display_name))
+
+ return provider_uri
+
+
+def main():
+ SanManagerModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_san_manager_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_san_manager_facts.py
new file mode 100644
index 00000000..c4a6b7a8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_san_manager_facts.py
@@ -0,0 +1,121 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_san_manager_info
+short_description: Retrieve information about one or more of the OneView SAN Managers
+description:
+ - Retrieve information about one or more of the SAN Managers from OneView
+ - This module was called C(oneview_san_manager_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_san_manager_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ provider_display_name:
+ description:
+ - Provider Display Name.
+ params:
+ description:
+ - List of params to delimit, filter and sort the list of resources.
+ - "params allowed:
+ - C(start): The first item to return, using 0-based indexing.
+ - C(count): The number of resources to return.
+ - C(query): A general query string to narrow the list of resources returned.
+ - C(sort): The sort order of the returned data set."
+extends_documentation_fragment:
+- community.general.oneview
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all SAN Managers
+ community.general.oneview_san_manager_info:
+ config: /etc/oneview/oneview_config.json
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.san_managers }}"
+
+- name: Gather paginated, filtered and sorted information about SAN Managers
+ community.general.oneview_san_manager_info:
+ config: /etc/oneview/oneview_config.json
+ params:
+ start: 0
+ count: 3
+ sort: name:ascending
+ query: isInternal eq false
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.san_managers }}"
+
+- name: Gather information about a SAN Manager by provider display name
+ community.general.oneview_san_manager_info:
+ config: /etc/oneview/oneview_config.json
+ provider_display_name: Brocade Network Advisor
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.san_managers }}"
+'''
+
+RETURN = '''
+san_managers:
+ description: Has all the OneView information about the SAN Managers.
+ returned: Always, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class SanManagerInfoModule(OneViewModuleBase):
+ argument_spec = dict(
+ provider_display_name=dict(type='str'),
+ params=dict(type='dict')
+ )
+
+ def __init__(self):
+ super(SanManagerInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
+ self.resource_client = self.oneview_client.san_managers
+ self.is_old_facts = self.module._name in ('oneview_san_manager_facts', 'community.general.oneview_san_manager_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_san_manager_facts' module has been renamed to 'oneview_san_manager_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+ if self.module.params.get('provider_display_name'):
+ provider_display_name = self.module.params['provider_display_name']
+ san_manager = self.oneview_client.san_managers.get_by_provider_display_name(provider_display_name)
+ if san_manager:
+ resources = [san_manager]
+ else:
+ resources = []
+ else:
+ resources = self.oneview_client.san_managers.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False, ansible_facts=dict(san_managers=resources))
+ else:
+ return dict(changed=False, san_managers=resources)
+
+
+def main():
+ SanManagerInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_san_manager_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_san_manager_info.py
new file mode 100644
index 00000000..c4a6b7a8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/oneview/oneview_san_manager_info.py
@@ -0,0 +1,121 @@
+#!/usr/bin/python
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_san_manager_info
+short_description: Retrieve information about one or more of the OneView SAN Managers
+description:
+ - Retrieve information about one or more of the SAN Managers from OneView
+ - This module was called C(oneview_san_manager_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_san_manager_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+options:
+ provider_display_name:
+ description:
+ - Provider Display Name.
+ params:
+ description:
+ - List of params to delimit, filter and sort the list of resources.
+ - "params allowed:
+ - C(start): The first item to return, using 0-based indexing.
+ - C(count): The number of resources to return.
+ - C(query): A general query string to narrow the list of resources returned.
+ - C(sort): The sort order of the returned data set."
+extends_documentation_fragment:
+- community.general.oneview
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all SAN Managers
+ community.general.oneview_san_manager_info:
+ config: /etc/oneview/oneview_config.json
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.san_managers }}"
+
+- name: Gather paginated, filtered and sorted information about SAN Managers
+ community.general.oneview_san_manager_info:
+ config: /etc/oneview/oneview_config.json
+ params:
+ start: 0
+ count: 3
+ sort: name:ascending
+ query: isInternal eq false
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.san_managers }}"
+
+- name: Gather information about a SAN Manager by provider display name
+ community.general.oneview_san_manager_info:
+ config: /etc/oneview/oneview_config.json
+ provider_display_name: Brocade Network Advisor
+ delegate_to: localhost
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.san_managers }}"
+'''
+
+RETURN = '''
+san_managers:
+ description: Has all the OneView information about the SAN Managers.
+ returned: Always, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class SanManagerInfoModule(OneViewModuleBase):
+ argument_spec = dict(
+ provider_display_name=dict(type='str'),
+ params=dict(type='dict')
+ )
+
+ def __init__(self):
+ super(SanManagerInfoModule, self).__init__(additional_arg_spec=self.argument_spec)
+ self.resource_client = self.oneview_client.san_managers
+ self.is_old_facts = self.module._name in ('oneview_san_manager_facts', 'community.general.oneview_san_manager_facts')
+ if self.is_old_facts:
+ self.module.deprecate("The 'oneview_san_manager_facts' module has been renamed to 'oneview_san_manager_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ def execute_module(self):
+ if self.module.params.get('provider_display_name'):
+ provider_display_name = self.module.params['provider_display_name']
+ san_manager = self.oneview_client.san_managers.get_by_provider_display_name(provider_display_name)
+ if san_manager:
+ resources = [san_manager]
+ else:
+ resources = []
+ else:
+ resources = self.oneview_client.san_managers.get_all(**self.facts_params)
+
+ if self.is_old_facts:
+ return dict(changed=False, ansible_facts=dict(san_managers=resources))
+ else:
+ return dict(changed=False, san_managers=resources)
+
+
+def main():
+ SanManagerInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/idrac_redfish_command.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/idrac_redfish_command.py
new file mode 100644
index 00000000..ea97ecdc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/idrac_redfish_command.py
@@ -0,0 +1,200 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018 Dell EMC Inc.
+# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: idrac_redfish_command
+short_description: Manages Out-Of-Band controllers using iDRAC OEM Redfish APIs
+description:
+ - Builds Redfish URIs locally and sends them to remote OOB controllers to
+ perform an action.
+ - For use with Dell iDRAC operations that require Redfish OEM extensions
+options:
+ category:
+ required: true
+ description:
+ - Category to execute on OOB controller
+ type: str
+ command:
+ required: true
+ description:
+ - List of commands to execute on OOB controller
+ type: list
+ baseuri:
+ required: true
+ description:
+ - Base URI of OOB controller
+ type: str
+ username:
+ required: true
+ description:
+ - User for authentication with OOB controller
+ type: str
+ password:
+ required: true
+ description:
+ - Password for authentication with OOB controller
+ type: str
+ timeout:
+ description:
+ - Timeout in seconds for URL requests to OOB controller
+ default: 10
+ type: int
+ resource_id:
+ required: false
+ description:
+ - The ID of the System, Manager or Chassis to modify
+ type: str
+ version_added: '0.2.0'
+
+author: "Jose Delarosa (@jose-delarosa)"
+'''
+
+EXAMPLES = '''
+ - name: Create BIOS configuration job (schedule BIOS setting update)
+ community.general.idrac_redfish_command:
+ category: Systems
+ command: CreateBiosConfigJob
+ resource_id: System.Embedded.1
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+'''
+
+RETURN = '''
+msg:
+ description: Message with action result or error description
+ returned: always
+ type: str
+ sample: "Action was successful"
+'''
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
+from ansible.module_utils._text import to_native
+
+
+class IdracRedfishUtils(RedfishUtils):
+
+ def create_bios_config_job(self):
+ result = {}
+ key = "Bios"
+ jobs = "Jobs"
+
+ # Search for 'key' entry and extract URI from it
+ response = self.get_request(self.root_uri + self.systems_uris[0])
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ if key not in data:
+ return {'ret': False, 'msg': "Key %s not found" % key}
+
+ bios_uri = data[key]["@odata.id"]
+
+ # Extract proper URI
+ response = self.get_request(self.root_uri + bios_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+ set_bios_attr_uri = data["@Redfish.Settings"]["SettingsObject"][
+ "@odata.id"]
+
+ payload = {"TargetSettingsURI": set_bios_attr_uri}
+ response = self.post_request(
+ self.root_uri + self.manager_uri + "/" + jobs, payload)
+ if response['ret'] is False:
+ return response
+
+ response_output = response['resp'].__dict__
+ job_id = response_output["headers"]["Location"]
+ job_id = re.search("JID_.+", job_id).group()
+ # Currently not passing job_id back to user but patch is coming
+ return {'ret': True, 'msg': "Config job %s created" % job_id}
+
+
+CATEGORY_COMMANDS_ALL = {
+ "Systems": ["CreateBiosConfigJob"],
+ "Accounts": [],
+ "Manager": []
+}
+
+
+def main():
+ result = {}
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(required=True),
+ command=dict(required=True, type='list'),
+ baseuri=dict(required=True),
+ username=dict(required=True),
+ password=dict(required=True, no_log=True),
+ timeout=dict(type='int', default=10),
+ resource_id=dict()
+ ),
+ supports_check_mode=False
+ )
+
+ category = module.params['category']
+ command_list = module.params['command']
+
+ # admin credentials used for authentication
+ creds = {'user': module.params['username'],
+ 'pswd': module.params['password']}
+
+ # timeout
+ timeout = module.params['timeout']
+
+ # System, Manager or Chassis ID to modify
+ resource_id = module.params['resource_id']
+
+ # Build root URI
+ root_uri = "https://" + module.params['baseuri']
+ rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module,
+ resource_id=resource_id, data_modification=True)
+
+ # Check that Category is valid
+ if category not in CATEGORY_COMMANDS_ALL:
+ module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys())))
+
+ # Check that all commands are valid
+ for cmd in command_list:
+ # Fail if even one command given is invalid
+ if cmd not in CATEGORY_COMMANDS_ALL[category]:
+ module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
+
+ # Organize by Categories / Commands
+
+ if category == "Systems":
+ # execute only if we find a System resource
+ result = rf_utils._find_systems_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ if command == "CreateBiosConfigJob":
+ # execute only if we find a Managers resource
+ result = rf_utils._find_managers_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+ result = rf_utils.create_bios_config_job()
+
+ # Return data back or fail with proper message
+ if result['ret'] is True:
+ del result['ret']
+ module.exit_json(changed=True, msg='Action was successful')
+ else:
+ module.fail_json(msg=to_native(result['msg']))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/idrac_redfish_config.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/idrac_redfish_config.py
new file mode 100644
index 00000000..485d54cd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/idrac_redfish_config.py
@@ -0,0 +1,327 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019 Dell EMC Inc.
+# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: idrac_redfish_config
+short_description: Manages servers through iDRAC using Dell Redfish APIs
+description:
+ - For use with Dell iDRAC operations that require Redfish OEM extensions
+ - Builds Redfish URIs locally and sends them to remote iDRAC controllers to
+ set or update a configuration attribute.
+options:
+ category:
+ required: true
+ type: str
+ description:
+ - Category to execute on iDRAC
+ command:
+ required: true
+ description:
+ - List of commands to execute on iDRAC
+ - I(SetManagerAttributes), I(SetLifecycleControllerAttributes) and
+ I(SetSystemAttributes) are mutually exclusive commands when C(category)
+ is I(Manager)
+ type: list
+ baseuri:
+ required: true
+ description:
+ - Base URI of iDRAC
+ type: str
+ username:
+ required: true
+ description:
+ - User for authentication with iDRAC
+ type: str
+ password:
+ required: true
+ description:
+ - Password for authentication with iDRAC
+ type: str
+ manager_attribute_name:
+ required: false
+ description:
+ - (deprecated) name of iDRAC attribute to update
+ type: str
+ manager_attribute_value:
+ required: false
+ description:
+ - (deprecated) value of iDRAC attribute to update
+ type: str
+ manager_attributes:
+ required: false
+ description:
+ - dictionary of iDRAC attribute name and value pairs to update
+ default: {}
+ type: 'dict'
+ version_added: '0.2.0'
+ timeout:
+ description:
+ - Timeout in seconds for URL requests to iDRAC controller
+ default: 10
+ type: int
+ resource_id:
+ required: false
+ description:
+ - The ID of the System, Manager or Chassis to modify
+ type: str
+ version_added: '0.2.0'
+
+author: "Jose Delarosa (@jose-delarosa)"
+'''
+
+EXAMPLES = '''
+ - name: Enable NTP and set NTP server and Time zone attributes in iDRAC
+ community.general.idrac_redfish_config:
+ category: Manager
+ command: SetManagerAttributes
+ resource_id: iDRAC.Embedded.1
+ manager_attributes:
+ NTPConfigGroup.1.NTPEnable: "Enabled"
+ NTPConfigGroup.1.NTP1: "{{ ntpserver1 }}"
+ Time.1.Timezone: "{{ timezone }}"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username}}"
+ password: "{{ password }}"
+
+ - name: Enable Syslog and set Syslog servers in iDRAC
+ community.general.idrac_redfish_config:
+ category: Manager
+ command: SetManagerAttributes
+ resource_id: iDRAC.Embedded.1
+ manager_attributes:
+ SysLog.1.SysLogEnable: "Enabled"
+ SysLog.1.Server1: "{{ syslog_server1 }}"
+ SysLog.1.Server2: "{{ syslog_server2 }}"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username}}"
+ password: "{{ password }}"
+
+ - name: Configure SNMP community string, port, protocol and trap format
+ community.general.idrac_redfish_config:
+ category: Manager
+ command: SetManagerAttributes
+ resource_id: iDRAC.Embedded.1
+ manager_attributes:
+ SNMP.1.AgentEnable: "Enabled"
+ SNMP.1.AgentCommunity: "public_community_string"
+ SNMP.1.TrapFormat: "SNMPv1"
+ SNMP.1.SNMPProtocol: "All"
+ SNMP.1.DiscoveryPort: 161
+ SNMP.1.AlertPort: 162
+ baseuri: "{{ baseuri }}"
+ username: "{{ username}}"
+ password: "{{ password }}"
+
+ - name: Enable CSIOR
+ community.general.idrac_redfish_config:
+ category: Manager
+ command: SetLifecycleControllerAttributes
+ resource_id: iDRAC.Embedded.1
+ manager_attributes:
+ LCAttributes.1.CollectSystemInventoryOnRestart: "Enabled"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username}}"
+ password: "{{ password }}"
+
+ - name: Set Power Supply Redundancy Policy to A/B Grid Redundant
+ community.general.idrac_redfish_config:
+ category: Manager
+ command: SetSystemAttributes
+ resource_id: iDRAC.Embedded.1
+ manager_attributes:
+ ServerPwr.1.PSRedPolicy: "A/B Grid Redundant"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username}}"
+ password: "{{ password }}"
+'''
+
+RETURN = '''
+msg:
+ description: Message with action result or error description
+ returned: always
+ type: str
+ sample: "Action was successful"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.validation import (
+ check_mutually_exclusive,
+ check_required_arguments
+)
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
+from ansible.module_utils._text import to_native
+
+
+class IdracRedfishUtils(RedfishUtils):
+
+ def set_manager_attributes(self, command):
+
+ result = {}
+ required_arg_spec = {'manager_attributes': {'required': True}}
+
+ try:
+ check_required_arguments(required_arg_spec, self.module.params)
+
+ except TypeError as e:
+ msg = to_native(e)
+ self.module.fail_json(msg=msg)
+
+ key = "Attributes"
+ command_manager_attributes_uri_map = {
+ "SetManagerAttributes": self.manager_uri,
+ "SetLifecycleControllerAttributes": "/redfish/v1/Managers/LifecycleController.Embedded.1",
+ "SetSystemAttributes": "/redfish/v1/Managers/System.Embedded.1"
+ }
+ manager_uri = command_manager_attributes_uri_map.get(command, self.manager_uri)
+
+ attributes = self.module.params['manager_attributes']
+ manager_attr_name = self.module.params.get('manager_attribute_name')
+ manager_attr_value = self.module.params.get('manager_attribute_value')
+
+ # manager attributes to update
+ if manager_attr_name:
+ attributes.update({manager_attr_name: manager_attr_value})
+
+ attrs_to_patch = {}
+ attrs_skipped = {}
+
+ # Search for key entry and extract URI from it
+ response = self.get_request(self.root_uri + manager_uri + "/" + key)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ if key not in data:
+ return {'ret': False,
+ 'msg': "%s: Key %s not found" % (command, key)}
+
+ for attr_name, attr_value in attributes.items():
+ # Check if attribute exists
+ if attr_name not in data[u'Attributes']:
+ return {'ret': False,
+ 'msg': "%s: Manager attribute %s not found" % (command, attr_name)}
+
+ # Find out if value is already set to what we want. If yes, exclude
+ # those attributes
+ if data[u'Attributes'][attr_name] == attr_value:
+ attrs_skipped.update({attr_name: attr_value})
+ else:
+ attrs_to_patch.update({attr_name: attr_value})
+
+ if not attrs_to_patch:
+ return {'ret': True, 'changed': False,
+ 'msg': "Manager attributes already set"}
+
+ payload = {"Attributes": attrs_to_patch}
+ response = self.patch_request(self.root_uri + manager_uri + "/" + key, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True, 'changed': True,
+ 'msg': "%s: Modified Manager attributes %s" % (command, attrs_to_patch)}
+
+
+CATEGORY_COMMANDS_ALL = {
+ "Manager": ["SetManagerAttributes", "SetLifecycleControllerAttributes",
+ "SetSystemAttributes"]
+}
+
+# list of mutually exclusive commands for a category
+CATEGORY_COMMANDS_MUTUALLY_EXCLUSIVE = {
+ "Manager": [["SetManagerAttributes", "SetLifecycleControllerAttributes",
+ "SetSystemAttributes"]]
+}
+
+
+def main():
+ result = {}
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(required=True),
+ command=dict(required=True, type='list'),
+ baseuri=dict(required=True),
+ username=dict(required=True),
+ password=dict(required=True, no_log=True),
+ manager_attribute_name=dict(default=None),
+ manager_attribute_value=dict(default=None),
+ manager_attributes=dict(type='dict', default={}),
+ timeout=dict(type='int', default=10),
+ resource_id=dict()
+ ),
+ supports_check_mode=False
+ )
+
+ category = module.params['category']
+ command_list = module.params['command']
+
+ # admin credentials used for authentication
+ creds = {'user': module.params['username'],
+ 'pswd': module.params['password']}
+
+ # timeout
+ timeout = module.params['timeout']
+
+ # System, Manager or Chassis ID to modify
+ resource_id = module.params['resource_id']
+
+ # Build root URI
+ root_uri = "https://" + module.params['baseuri']
+ rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module,
+ resource_id=resource_id, data_modification=True)
+
+ # Check that Category is valid
+ if category not in CATEGORY_COMMANDS_ALL:
+ module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys())))
+
+ # Check that all commands are valid
+ for cmd in command_list:
+ # Fail if even one command given is invalid
+ if cmd not in CATEGORY_COMMANDS_ALL[category]:
+ module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
+
+ # check for mutually exclusive commands
+ try:
+ # check_mutually_exclusive accepts a single list or list of lists that
+ # are groups of terms that should be mutually exclusive with one another
+ # and checks that against a dictionary
+ check_mutually_exclusive(CATEGORY_COMMANDS_MUTUALLY_EXCLUSIVE[category],
+ dict.fromkeys(command_list, True))
+
+ except TypeError as e:
+ module.fail_json(msg=to_native(e))
+
+ # Organize by Categories / Commands
+
+ if category == "Manager":
+ # execute only if we find a Manager resource
+ result = rf_utils._find_managers_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ if command in ["SetManagerAttributes", "SetLifecycleControllerAttributes", "SetSystemAttributes"]:
+ result = rf_utils.set_manager_attributes(command)
+
+ if any((module.params['manager_attribute_name'], module.params['manager_attribute_value'])):
+ module.deprecate(msg='Arguments `manager_attribute_name` and '
+ '`manager_attribute_value` are deprecated. '
+ 'Use `manager_attributes` instead for passing in '
+ 'the manager attribute name and value pairs',
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ # Return data back or fail with proper message
+ if result['ret'] is True:
+ module.exit_json(changed=result['changed'], msg=to_native(result['msg']))
+ else:
+ module.fail_json(msg=to_native(result['msg']))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/idrac_redfish_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/idrac_redfish_facts.py
new file mode 100644
index 00000000..f5b7fe1a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/idrac_redfish_facts.py
@@ -0,0 +1,236 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019 Dell EMC Inc.
+# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: idrac_redfish_info
+short_description: Gather PowerEdge server information through iDRAC using Redfish APIs
+description:
+ - Builds Redfish URIs locally and sends them to remote iDRAC controllers to
+ get information back.
+ - For use with Dell EMC iDRAC operations that require Redfish OEM extensions
+ - This module was called C(idrac_redfish_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.idrac_redfish_info) module no longer returns C(ansible_facts)!
+options:
+ category:
+ required: true
+ description:
+ - Category to execute on iDRAC controller
+ type: str
+ command:
+ required: true
+ description:
+ - List of commands to execute on iDRAC controller
+ - C(GetManagerAttributes) returns the list of dicts containing iDRAC,
+ LifecycleController and System attributes
+ type: list
+ baseuri:
+ required: true
+ description:
+ - Base URI of iDRAC controller
+ type: str
+ username:
+ required: true
+ description:
+ - User for authentication with iDRAC controller
+ type: str
+ password:
+ required: true
+ description:
+ - Password for authentication with iDRAC controller
+ type: str
+ timeout:
+ description:
+ - Timeout in seconds for URL requests to OOB controller
+ default: 10
+ type: int
+
+author: "Jose Delarosa (@jose-delarosa)"
+'''
+
+EXAMPLES = '''
+ - name: Get Manager attributes with a default of 20 seconds
+ community.general.idrac_redfish_info:
+ category: Manager
+ command: GetManagerAttributes
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ timeout: 20
+ register: result
+
+ # Examples to display the value of all or a single iDRAC attribute
+ - name: Store iDRAC attributes as a fact variable
+ ansible.builtin.set_fact:
+ idrac_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'iDRACAttributes') | list | first }}"
+
+ - name: Display all iDRAC attributes
+ ansible.builtin.debug:
+ var: idrac_attributes
+
+ - name: Display the value of 'Syslog.1.SysLogEnable' iDRAC attribute
+ ansible.builtin.debug:
+ var: idrac_attributes['Syslog.1.SysLogEnable']
+
+ # Examples to display the value of all or a single LifecycleController attribute
+ - name: Store LifecycleController attributes as a fact variable
+ ansible.builtin.set_fact:
+ lc_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'LCAttributes') | list | first }}"
+
+ - name: Display LifecycleController attributes
+ ansible.builtin.debug:
+ var: lc_attributes
+
+ - name: Display the value of 'CollectSystemInventoryOnRestart' attribute
+ ansible.builtin.debug:
+ var: lc_attributes['LCAttributes.1.CollectSystemInventoryOnRestart']
+
+ # Examples to display the value of all or a single System attribute
+ - name: Store System attributes as a fact variable
+ ansible.builtin.set_fact:
+ system_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'SystemAttributes') | list | first }}"
+
+ - name: Display System attributes
+ ansible.builtin.debug:
+ var: system_attributes
+
+ - name: Display the value of 'PSRedPolicy'
+ ansible.builtin.debug:
+ var: system_attributes['ServerPwr.1.PSRedPolicy']
+
+'''
+
+RETURN = '''
+msg:
+ description: different results depending on task
+ returned: always
+ type: dict
+ sample: List of Manager attributes
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
+from ansible.module_utils._text import to_native
+
+
+class IdracRedfishUtils(RedfishUtils):
+
+ def get_manager_attributes(self):
+ result = {}
+ manager_attributes = []
+ properties = ['Attributes', 'Id']
+
+ response = self.get_request(self.root_uri + self.manager_uri)
+
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ # Manager attributes are supported as part of iDRAC OEM extension
+ # Attributes are supported only on iDRAC9
+ try:
+ for members in data[u'Links'][u'Oem'][u'Dell'][u'DellAttributes']:
+ attributes_uri = members[u'@odata.id']
+
+ response = self.get_request(self.root_uri + attributes_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ attributes = {}
+ for prop in properties:
+ if prop in data:
+ attributes[prop] = data.get(prop)
+
+ if attributes:
+ manager_attributes.append(attributes)
+
+ result['ret'] = True
+
+ except (AttributeError, KeyError) as e:
+ result['ret'] = False
+ result['msg'] = "Failed to find attribute/key: " + str(e)
+
+ result["entries"] = manager_attributes
+ return result
+
+
+CATEGORY_COMMANDS_ALL = {
+ "Manager": ["GetManagerAttributes"]
+}
+
+
+def main():
+ result = {}
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(required=True),
+ command=dict(required=True, type='list'),
+ baseuri=dict(required=True),
+ username=dict(required=True),
+ password=dict(required=True, no_log=True),
+ timeout=dict(type='int', default=10)
+ ),
+ supports_check_mode=False
+ )
+ is_old_facts = module._name in ('idrac_redfish_facts', 'community.general.idrac_redfish_facts')
+ if is_old_facts:
+ module.deprecate("The 'idrac_redfish_facts' module has been renamed to 'idrac_redfish_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ category = module.params['category']
+ command_list = module.params['command']
+
+ # admin credentials used for authentication
+ creds = {'user': module.params['username'],
+ 'pswd': module.params['password']}
+
+ # timeout
+ timeout = module.params['timeout']
+
+ # Build root URI
+ root_uri = "https://" + module.params['baseuri']
+ rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module)
+
+ # Check that Category is valid
+ if category not in CATEGORY_COMMANDS_ALL:
+ module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys())))
+
+ # Check that all commands are valid
+ for cmd in command_list:
+ # Fail if even one command given is invalid
+ if cmd not in CATEGORY_COMMANDS_ALL[category]:
+ module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
+
+ # Organize by Categories / Commands
+
+ if category == "Manager":
+ # execute only if we find a Manager resource
+ result = rf_utils._find_managers_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ if command == "GetManagerAttributes":
+ result = rf_utils.get_manager_attributes()
+
+ # Return data back or fail with proper message
+ if result['ret'] is True:
+ del result['ret']
+ if is_old_facts:
+ module.exit_json(ansible_facts=dict(redfish_facts=result))
+ else:
+ module.exit_json(redfish_facts=result)
+ else:
+ module.fail_json(msg=to_native(result['msg']))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/idrac_redfish_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/idrac_redfish_info.py
new file mode 100644
index 00000000..f5b7fe1a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/idrac_redfish_info.py
@@ -0,0 +1,236 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019 Dell EMC Inc.
+# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: idrac_redfish_info
+short_description: Gather PowerEdge server information through iDRAC using Redfish APIs
+description:
+ - Builds Redfish URIs locally and sends them to remote iDRAC controllers to
+ get information back.
+ - For use with Dell EMC iDRAC operations that require Redfish OEM extensions
+ - This module was called C(idrac_redfish_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.idrac_redfish_info) module no longer returns C(ansible_facts)!
+options:
+ category:
+ required: true
+ description:
+ - Category to execute on iDRAC controller
+ type: str
+ command:
+ required: true
+ description:
+ - List of commands to execute on iDRAC controller
+ - C(GetManagerAttributes) returns the list of dicts containing iDRAC,
+ LifecycleController and System attributes
+ type: list
+ baseuri:
+ required: true
+ description:
+ - Base URI of iDRAC controller
+ type: str
+ username:
+ required: true
+ description:
+ - User for authentication with iDRAC controller
+ type: str
+ password:
+ required: true
+ description:
+ - Password for authentication with iDRAC controller
+ type: str
+ timeout:
+ description:
+ - Timeout in seconds for URL requests to OOB controller
+ default: 10
+ type: int
+
+author: "Jose Delarosa (@jose-delarosa)"
+'''
+
+EXAMPLES = '''
+ - name: Get Manager attributes with a default of 20 seconds
+ community.general.idrac_redfish_info:
+ category: Manager
+ command: GetManagerAttributes
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ timeout: 20
+ register: result
+
+ # Examples to display the value of all or a single iDRAC attribute
+ - name: Store iDRAC attributes as a fact variable
+ ansible.builtin.set_fact:
+ idrac_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'iDRACAttributes') | list | first }}"
+
+ - name: Display all iDRAC attributes
+ ansible.builtin.debug:
+ var: idrac_attributes
+
+ - name: Display the value of 'Syslog.1.SysLogEnable' iDRAC attribute
+ ansible.builtin.debug:
+ var: idrac_attributes['Syslog.1.SysLogEnable']
+
+ # Examples to display the value of all or a single LifecycleController attribute
+ - name: Store LifecycleController attributes as a fact variable
+ ansible.builtin.set_fact:
+ lc_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'LCAttributes') | list | first }}"
+
+ - name: Display LifecycleController attributes
+ ansible.builtin.debug:
+ var: lc_attributes
+
+ - name: Display the value of 'CollectSystemInventoryOnRestart' attribute
+ ansible.builtin.debug:
+ var: lc_attributes['LCAttributes.1.CollectSystemInventoryOnRestart']
+
+ # Examples to display the value of all or a single System attribute
+ - name: Store System attributes as a fact variable
+ ansible.builtin.set_fact:
+ system_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'SystemAttributes') | list | first }}"
+
+ - name: Display System attributes
+ ansible.builtin.debug:
+ var: system_attributes
+
+ - name: Display the value of 'PSRedPolicy'
+ ansible.builtin.debug:
+ var: system_attributes['ServerPwr.1.PSRedPolicy']
+
+'''
+
+RETURN = '''
+msg:
+ description: different results depending on task
+ returned: always
+ type: dict
+ sample: List of Manager attributes
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
+from ansible.module_utils._text import to_native
+
+
+class IdracRedfishUtils(RedfishUtils):
+
+ def get_manager_attributes(self):
+ result = {}
+ manager_attributes = []
+ properties = ['Attributes', 'Id']
+
+ response = self.get_request(self.root_uri + self.manager_uri)
+
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ # Manager attributes are supported as part of iDRAC OEM extension
+ # Attributes are supported only on iDRAC9
+ try:
+ for members in data[u'Links'][u'Oem'][u'Dell'][u'DellAttributes']:
+ attributes_uri = members[u'@odata.id']
+
+ response = self.get_request(self.root_uri + attributes_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ attributes = {}
+ for prop in properties:
+ if prop in data:
+ attributes[prop] = data.get(prop)
+
+ if attributes:
+ manager_attributes.append(attributes)
+
+ result['ret'] = True
+
+ except (AttributeError, KeyError) as e:
+ result['ret'] = False
+ result['msg'] = "Failed to find attribute/key: " + str(e)
+
+ result["entries"] = manager_attributes
+ return result
+
+
+CATEGORY_COMMANDS_ALL = {
+ "Manager": ["GetManagerAttributes"]
+}
+
+
+def main():
+ result = {}
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(required=True),
+ command=dict(required=True, type='list'),
+ baseuri=dict(required=True),
+ username=dict(required=True),
+ password=dict(required=True, no_log=True),
+ timeout=dict(type='int', default=10)
+ ),
+ supports_check_mode=False
+ )
+ is_old_facts = module._name in ('idrac_redfish_facts', 'community.general.idrac_redfish_facts')
+ if is_old_facts:
+ module.deprecate("The 'idrac_redfish_facts' module has been renamed to 'idrac_redfish_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ category = module.params['category']
+ command_list = module.params['command']
+
+ # admin credentials used for authentication
+ creds = {'user': module.params['username'],
+ 'pswd': module.params['password']}
+
+ # timeout
+ timeout = module.params['timeout']
+
+ # Build root URI
+ root_uri = "https://" + module.params['baseuri']
+ rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module)
+
+ # Check that Category is valid
+ if category not in CATEGORY_COMMANDS_ALL:
+ module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys())))
+
+ # Check that all commands are valid
+ for cmd in command_list:
+ # Fail if even one command given is invalid
+ if cmd not in CATEGORY_COMMANDS_ALL[category]:
+ module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
+
+ # Organize by Categories / Commands
+
+ if category == "Manager":
+ # execute only if we find a Manager resource
+ result = rf_utils._find_managers_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ if command == "GetManagerAttributes":
+ result = rf_utils.get_manager_attributes()
+
+ # Return data back or fail with proper message
+ if result['ret'] is True:
+ del result['ret']
+ if is_old_facts:
+ module.exit_json(ansible_facts=dict(redfish_facts=result))
+ else:
+ module.exit_json(redfish_facts=result)
+ else:
+ module.fail_json(msg=to_native(result['msg']))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/redfish_command.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/redfish_command.py
new file mode 100644
index 00000000..78007f1d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/redfish_command.py
@@ -0,0 +1,756 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017-2018 Dell EMC Inc.
+# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: redfish_command
+short_description: Manages Out-Of-Band controllers using Redfish APIs
+description:
+ - Builds Redfish URIs locally and sends them to remote OOB controllers to
+ perform an action.
+ - Manages OOB controller ex. reboot, log management.
+ - Manages OOB controller users ex. add, remove, update.
+ - Manages system power ex. on, off, graceful and forced reboot.
+options:
+ category:
+ required: true
+ description:
+ - Category to execute on OOB controller
+ type: str
+ command:
+ required: true
+ description:
+ - List of commands to execute on OOB controller
+ type: list
+ baseuri:
+ required: true
+ description:
+ - Base URI of OOB controller
+ type: str
+ username:
+ required: true
+ description:
+ - Username for authentication with OOB controller
+ type: str
+ password:
+ required: true
+ description:
+ - Password for authentication with OOB controller
+ type: str
+ id:
+ required: false
+ aliases: [ account_id ]
+ description:
+ - ID of account to delete/modify
+ type: str
+ new_username:
+ required: false
+ aliases: [ account_username ]
+ description:
+ - Username of account to add/delete/modify
+ type: str
+ new_password:
+ required: false
+ aliases: [ account_password ]
+ description:
+ - New password of account to add/modify
+ type: str
+ roleid:
+ required: false
+ aliases: [ account_roleid ]
+ description:
+ - Role of account to add/modify
+ type: str
+ bootdevice:
+ required: false
+ description:
+ - bootdevice when setting boot configuration
+ type: str
+ timeout:
+ description:
+ - Timeout in seconds for URL requests to OOB controller
+ default: 10
+ type: int
+ uefi_target:
+ required: false
+ description:
+ - UEFI target when bootdevice is "UefiTarget"
+ type: str
+ boot_next:
+ required: false
+ description:
+ - BootNext target when bootdevice is "UefiBootNext"
+ type: str
+ update_username:
+ required: false
+ aliases: [ account_updatename ]
+ description:
+ - new update user name for account_username
+ type: str
+ version_added: '0.2.0'
+ account_properties:
+ required: false
+ description:
+ - properties of account service to update
+ type: dict
+ version_added: '0.2.0'
+ resource_id:
+ required: false
+ description:
+ - The ID of the System, Manager or Chassis to modify
+ type: str
+ version_added: '0.2.0'
+ update_image_uri:
+ required: false
+ description:
+ - The URI of the image for the update
+ type: str
+ version_added: '0.2.0'
+ update_protocol:
+ required: false
+ description:
+ - The protocol for the update
+ type: str
+ version_added: '0.2.0'
+ update_targets:
+ required: false
+ description:
+ - The list of target resource URIs to apply the update to
+ type: list
+ elements: str
+ version_added: '0.2.0'
+ update_creds:
+ required: false
+ description:
+ - The credentials for retrieving the update image
+ type: dict
+ version_added: '0.2.0'
+ suboptions:
+ username:
+ required: false
+ description:
+ - The username for retrieving the update image
+ type: str
+ password:
+ required: false
+ description:
+ - The password for retrieving the update image
+ type: str
+ virtual_media:
+ required: false
+ description:
+ - The options for VirtualMedia commands
+ type: dict
+ version_added: '0.2.0'
+ suboptions:
+ media_types:
+ required: false
+ description:
+ - The list of media types appropriate for the image
+ type: list
+ elements: str
+ image_url:
+ required: false
+ description:
+ - The URL od the image the insert or eject
+ type: str
+ inserted:
+ required: false
+ description:
+ - Indicates if the image is treated as inserted on command completion
+ type: bool
+ default: True
+ write_protected:
+ required: false
+ description:
+ - Indicates if the media is treated as write-protected
+ type: bool
+ default: True
+ username:
+ required: false
+ description:
+ - The username for accessing the image URL
+ type: str
+ password:
+ required: false
+ description:
+ - The password for accessing the image URL
+ type: str
+ transfer_protocol_type:
+ required: false
+ description:
+ - The network protocol to use with the image
+ type: str
+ transfer_method:
+ required: false
+ description:
+ - The transfer method to use with the image
+ type: str
+
+author: "Jose Delarosa (@jose-delarosa)"
+'''
+
+EXAMPLES = '''
+ - name: Restart system power gracefully
+ community.general.redfish_command:
+ category: Systems
+ command: PowerGracefulRestart
+ resource_id: 437XR1138R2
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Turn system power off
+ community.general.redfish_command:
+ category: Systems
+ command: PowerForceOff
+ resource_id: 437XR1138R2
+
+ - name: Restart system power forcefully
+ community.general.redfish_command:
+ category: Systems
+ command: PowerForceRestart
+ resource_id: 437XR1138R2
+
+ - name: Shutdown system power gracefully
+ community.general.redfish_command:
+ category: Systems
+ command: PowerGracefulShutdown
+ resource_id: 437XR1138R2
+
+ - name: Turn system power on
+ community.general.redfish_command:
+ category: Systems
+ command: PowerOn
+ resource_id: 437XR1138R2
+
+ - name: Reboot system power
+ community.general.redfish_command:
+ category: Systems
+ command: PowerReboot
+ resource_id: 437XR1138R2
+
+ - name: Set one-time boot device to {{ bootdevice }}
+ community.general.redfish_command:
+ category: Systems
+ command: SetOneTimeBoot
+ resource_id: 437XR1138R2
+ bootdevice: "{{ bootdevice }}"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Set one-time boot device to UefiTarget of "/0x31/0x33/0x01/0x01"
+ community.general.redfish_command:
+ category: Systems
+ command: SetOneTimeBoot
+ resource_id: 437XR1138R2
+ bootdevice: "UefiTarget"
+ uefi_target: "/0x31/0x33/0x01/0x01"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Set one-time boot device to BootNext target of "Boot0001"
+ community.general.redfish_command:
+ category: Systems
+ command: SetOneTimeBoot
+ resource_id: 437XR1138R2
+ bootdevice: "UefiBootNext"
+ boot_next: "Boot0001"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Set persistent boot device override
+ community.general.redfish_command:
+ category: Systems
+ command: EnableContinuousBootOverride
+ resource_id: 437XR1138R2
+ bootdevice: "{{ bootdevice }}"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Disable persistent boot device override
+ community.general.redfish_command:
+ category: Systems
+ command: DisableBootOverride
+
+ - name: Set chassis indicator LED to blink
+ community.general.redfish_command:
+ category: Chassis
+ command: IndicatorLedBlink
+ resource_id: 1U
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Add user
+ community.general.redfish_command:
+ category: Accounts
+ command: AddUser
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ new_username: "{{ new_username }}"
+ new_password: "{{ new_password }}"
+ roleid: "{{ roleid }}"
+
+ - name: Add user using new option aliases
+ community.general.redfish_command:
+ category: Accounts
+ command: AddUser
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ account_username: "{{ account_username }}"
+ account_password: "{{ account_password }}"
+ account_roleid: "{{ account_roleid }}"
+
+ - name: Delete user
+ community.general.redfish_command:
+ category: Accounts
+ command: DeleteUser
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ account_username: "{{ account_username }}"
+
+ - name: Disable user
+ community.general.redfish_command:
+ category: Accounts
+ command: DisableUser
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ account_username: "{{ account_username }}"
+
+ - name: Enable user
+ community.general.redfish_command:
+ category: Accounts
+ command: EnableUser
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ account_username: "{{ account_username }}"
+
+ - name: Add and enable user
+ community.general.redfish_command:
+ category: Accounts
+ command: AddUser,EnableUser
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ new_username: "{{ new_username }}"
+ new_password: "{{ new_password }}"
+ roleid: "{{ roleid }}"
+
+ - name: Update user password
+ community.general.redfish_command:
+ category: Accounts
+ command: UpdateUserPassword
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ account_username: "{{ account_username }}"
+ account_password: "{{ account_password }}"
+
+ - name: Update user role
+ community.general.redfish_command:
+ category: Accounts
+ command: UpdateUserRole
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ account_username: "{{ account_username }}"
+ roleid: "{{ roleid }}"
+
+ - name: Update user name
+ community.general.redfish_command:
+ category: Accounts
+ command: UpdateUserName
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ account_username: "{{ account_username }}"
+ account_updatename: "{{ account_updatename }}"
+
+ - name: Update user name
+ community.general.redfish_command:
+ category: Accounts
+ command: UpdateUserName
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ account_username: "{{ account_username }}"
+ update_username: "{{ update_username }}"
+
+ - name: Update AccountService properties
+ community.general.redfish_command:
+ category: Accounts
+ command: UpdateAccountServiceProperties
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ account_properties:
+ AccountLockoutThreshold: 5
+ AccountLockoutDuration: 600
+
+ - name: Clear Manager Logs with a timeout of 20 seconds
+ community.general.redfish_command:
+ category: Manager
+ command: ClearLogs
+ resource_id: BMC
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ timeout: 20
+
+ - name: Clear Sessions
+ community.general.redfish_command:
+ category: Sessions
+ command: ClearSessions
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Simple update
+ community.general.redfish_command:
+ category: Update
+ command: SimpleUpdate
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ update_image_uri: https://example.com/myupdate.img
+
+ - name: Simple update with additional options
+ community.general.redfish_command:
+ category: Update
+ command: SimpleUpdate
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ update_image_uri: //example.com/myupdate.img
+ update_protocol: FTP
+ update_targets:
+ - /redfish/v1/UpdateService/FirmwareInventory/BMC
+ update_creds:
+ username: operator
+ password: supersecretpwd
+
+ - name: Insert Virtual Media
+ community.general.redfish_command:
+ category: Manager
+ command: VirtualMediaInsert
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ virtual_media:
+ image_url: 'http://example.com/images/SomeLinux-current.iso'
+ media_types:
+ - CD
+ - DVD
+ resource_id: BMC
+
+ - name: Eject Virtual Media
+ community.general.redfish_command:
+ category: Manager
+ command: VirtualMediaEject
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ virtual_media:
+ image_url: 'http://example.com/images/SomeLinux-current.iso'
+ resource_id: BMC
+
+ - name: Restart manager power gracefully
+ community.general.redfish_command:
+ category: Manager
+ command: GracefulRestart
+ resource_id: BMC
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Restart manager power gracefully
+ community.general.redfish_command:
+ category: Manager
+ command: PowerGracefulRestart
+ resource_id: BMC
+
+ - name: Turn manager power off
+ community.general.redfish_command:
+ category: Manager
+ command: PowerForceOff
+ resource_id: BMC
+
+ - name: Restart manager power forcefully
+ community.general.redfish_command:
+ category: Manager
+ command: PowerForceRestart
+ resource_id: BMC
+
+ - name: Shutdown manager power gracefully
+ community.general.redfish_command:
+ category: Manager
+ command: PowerGracefulShutdown
+ resource_id: BMC
+
+ - name: Turn manager power on
+ community.general.redfish_command:
+ category: Manager
+ command: PowerOn
+ resource_id: BMC
+
+ - name: Reboot manager power
+ community.general.redfish_command:
+ category: Manager
+ command: PowerReboot
+ resource_id: BMC
+'''
+
+RETURN = '''
+msg:
+ description: Message with action result or error description
+ returned: always
+ type: str
+ sample: "Action was successful"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
+from ansible.module_utils._text import to_native
+
+
+# More will be added as module features are expanded
+CATEGORY_COMMANDS_ALL = {
+ "Systems": ["PowerOn", "PowerForceOff", "PowerForceRestart", "PowerGracefulRestart",
+ "PowerGracefulShutdown", "PowerReboot", "SetOneTimeBoot", "EnableContinuousBootOverride", "DisableBootOverride"],
+ "Chassis": ["IndicatorLedOn", "IndicatorLedOff", "IndicatorLedBlink"],
+ "Accounts": ["AddUser", "EnableUser", "DeleteUser", "DisableUser",
+ "UpdateUserRole", "UpdateUserPassword", "UpdateUserName",
+ "UpdateAccountServiceProperties"],
+ "Sessions": ["ClearSessions"],
+ "Manager": ["GracefulRestart", "ClearLogs", "VirtualMediaInsert",
+ "VirtualMediaEject", "PowerOn", "PowerForceOff", "PowerForceRestart",
+ "PowerGracefulRestart", "PowerGracefulShutdown", "PowerReboot"],
+ "Update": ["SimpleUpdate"]
+}
+
+
+def main():
+ result = {}
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(required=True),
+ command=dict(required=True, type='list'),
+ baseuri=dict(required=True),
+ username=dict(required=True),
+ password=dict(required=True, no_log=True),
+ id=dict(aliases=["account_id"]),
+ new_username=dict(aliases=["account_username"]),
+ new_password=dict(aliases=["account_password"], no_log=True),
+ roleid=dict(aliases=["account_roleid"]),
+ update_username=dict(type='str', aliases=["account_updatename"]),
+ account_properties=dict(type='dict', default={}),
+ bootdevice=dict(),
+ timeout=dict(type='int', default=10),
+ uefi_target=dict(),
+ boot_next=dict(),
+ resource_id=dict(),
+ update_image_uri=dict(),
+ update_protocol=dict(),
+ update_targets=dict(type='list', elements='str', default=[]),
+ update_creds=dict(
+ type='dict',
+ options=dict(
+ username=dict(),
+ password=dict(no_log=True)
+ )
+ ),
+ virtual_media=dict(
+ type='dict',
+ options=dict(
+ media_types=dict(type='list', elements='str', default=[]),
+ image_url=dict(),
+ inserted=dict(type='bool', default=True),
+ write_protected=dict(type='bool', default=True),
+ username=dict(),
+ password=dict(no_log=True),
+ transfer_protocol_type=dict(),
+ transfer_method=dict(),
+ )
+ )
+ ),
+ supports_check_mode=False
+ )
+
+ category = module.params['category']
+ command_list = module.params['command']
+
+ # admin credentials used for authentication
+ creds = {'user': module.params['username'],
+ 'pswd': module.params['password']}
+
+ # user to add/modify/delete
+ user = {'account_id': module.params['id'],
+ 'account_username': module.params['new_username'],
+ 'account_password': module.params['new_password'],
+ 'account_roleid': module.params['roleid'],
+ 'account_updatename': module.params['update_username'],
+ 'account_properties': module.params['account_properties']}
+
+ # timeout
+ timeout = module.params['timeout']
+
+ # System, Manager or Chassis ID to modify
+ resource_id = module.params['resource_id']
+
+ # update options
+ update_opts = {
+ 'update_image_uri': module.params['update_image_uri'],
+ 'update_protocol': module.params['update_protocol'],
+ 'update_targets': module.params['update_targets'],
+ 'update_creds': module.params['update_creds']
+ }
+
+ # Boot override options
+ boot_opts = {
+ 'bootdevice': module.params['bootdevice'],
+ 'uefi_target': module.params['uefi_target'],
+ 'boot_next': module.params['boot_next']
+ }
+
+ # VirtualMedia options
+ virtual_media = module.params['virtual_media']
+
+ # Build root URI
+ root_uri = "https://" + module.params['baseuri']
+ rf_utils = RedfishUtils(creds, root_uri, timeout, module,
+ resource_id=resource_id, data_modification=True)
+
+ # Check that Category is valid
+ if category not in CATEGORY_COMMANDS_ALL:
+ module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys())))
+
+ # Check that all commands are valid
+ for cmd in command_list:
+ # Fail if even one command given is invalid
+ if cmd not in CATEGORY_COMMANDS_ALL[category]:
+ module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
+
+ # Organize by Categories / Commands
+ if category == "Accounts":
+ ACCOUNTS_COMMANDS = {
+ "AddUser": rf_utils.add_user,
+ "EnableUser": rf_utils.enable_user,
+ "DeleteUser": rf_utils.delete_user,
+ "DisableUser": rf_utils.disable_user,
+ "UpdateUserRole": rf_utils.update_user_role,
+ "UpdateUserPassword": rf_utils.update_user_password,
+ "UpdateUserName": rf_utils.update_user_name,
+ "UpdateAccountServiceProperties": rf_utils.update_accountservice_properties
+ }
+
+ # execute only if we find an Account service resource
+ result = rf_utils._find_accountservice_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ result = ACCOUNTS_COMMANDS[command](user)
+
+ elif category == "Systems":
+ # execute only if we find a System resource
+ result = rf_utils._find_systems_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ if command.startswith('Power'):
+ result = rf_utils.manage_system_power(command)
+ elif command == "SetOneTimeBoot":
+ boot_opts['override_enabled'] = 'Once'
+ result = rf_utils.set_boot_override(boot_opts)
+ elif command == "EnableContinuousBootOverride":
+ boot_opts['override_enabled'] = 'Continuous'
+ result = rf_utils.set_boot_override(boot_opts)
+ elif command == "DisableBootOverride":
+ boot_opts['override_enabled'] = 'Disabled'
+ result = rf_utils.set_boot_override(boot_opts)
+
+ elif category == "Chassis":
+ result = rf_utils._find_chassis_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ led_commands = ["IndicatorLedOn", "IndicatorLedOff", "IndicatorLedBlink"]
+
+ # Check if more than one led_command is present
+ num_led_commands = sum([command in led_commands for command in command_list])
+ if num_led_commands > 1:
+ result = {'ret': False, 'msg': "Only one IndicatorLed command should be sent at a time."}
+ else:
+ for command in command_list:
+ if command in led_commands:
+ result = rf_utils.manage_indicator_led(command)
+
+ elif category == "Sessions":
+ # execute only if we find SessionService resources
+ resource = rf_utils._find_sessionservice_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "ClearSessions":
+ result = rf_utils.clear_sessions()
+
+ elif category == "Manager":
+ # execute only if we find a Manager service resource
+ result = rf_utils._find_managers_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ # standardize on the Power* commands, but allow the the legacy
+ # GracefulRestart command
+ if command == 'GracefulRestart':
+ command = 'PowerGracefulRestart'
+
+ if command.startswith('Power'):
+ result = rf_utils.manage_manager_power(command)
+ elif command == 'ClearLogs':
+ result = rf_utils.clear_logs()
+ elif command == 'VirtualMediaInsert':
+ result = rf_utils.virtual_media_insert(virtual_media)
+ elif command == 'VirtualMediaEject':
+ result = rf_utils.virtual_media_eject(virtual_media)
+
+ elif category == "Update":
+ # execute only if we find UpdateService resources
+ resource = rf_utils._find_updateservice_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "SimpleUpdate":
+ result = rf_utils.simple_update(update_opts)
+
+ # Return data back or fail with proper message
+ if result['ret'] is True:
+ del result['ret']
+ changed = result.get('changed', True)
+ module.exit_json(changed=changed, msg='Action was successful')
+ else:
+ module.fail_json(msg=to_native(result['msg']))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/redfish_config.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/redfish_config.py
new file mode 100644
index 00000000..26b692a6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/redfish_config.py
@@ -0,0 +1,335 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017-2018 Dell EMC Inc.
+# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: redfish_config
+short_description: Manages Out-Of-Band controllers using Redfish APIs
+description:
+ - Builds Redfish URIs locally and sends them to remote OOB controllers to
+ set or update a configuration attribute.
+ - Manages BIOS configuration settings.
+ - Manages OOB controller configuration settings.
+options:
+ category:
+ required: true
+ description:
+ - Category to execute on OOB controller
+ type: str
+ command:
+ required: true
+ description:
+ - List of commands to execute on OOB controller
+ type: list
+ baseuri:
+ required: true
+ description:
+ - Base URI of OOB controller
+ type: str
+ username:
+ required: true
+ description:
+ - User for authentication with OOB controller
+ type: str
+ password:
+ required: true
+ description:
+ - Password for authentication with OOB controller
+ type: str
+ bios_attribute_name:
+ required: false
+ description:
+ - name of BIOS attr to update (deprecated - use bios_attributes instead)
+ default: 'null'
+ type: str
+ bios_attribute_value:
+ required: false
+ description:
+ - value of BIOS attr to update (deprecated - use bios_attributes instead)
+ default: 'null'
+ type: raw
+ bios_attributes:
+ required: false
+ description:
+ - dictionary of BIOS attributes to update
+ default: {}
+ type: dict
+ version_added: '0.2.0'
+ timeout:
+ description:
+ - Timeout in seconds for URL requests to OOB controller
+ default: 10
+ type: int
+ boot_order:
+ required: false
+ description:
+ - list of BootOptionReference strings specifying the BootOrder
+ default: []
+ type: list
+ elements: str
+ version_added: '0.2.0'
+ network_protocols:
+ required: false
+ description:
+ - setting dict of manager services to update
+ type: dict
+ version_added: '0.2.0'
+ resource_id:
+ required: false
+ description:
+ - The ID of the System, Manager or Chassis to modify
+ type: str
+ version_added: '0.2.0'
+ nic_addr:
+ required: false
+ description:
+ - EthernetInterface Address string on OOB controller
+ default: 'null'
+ type: str
+ version_added: '0.2.0'
+ nic_config:
+ required: false
+ description:
+ - setting dict of EthernetInterface on OOB controller
+ type: dict
+ version_added: '0.2.0'
+
+author: "Jose Delarosa (@jose-delarosa)"
+'''
+
+EXAMPLES = '''
+ - name: Set BootMode to UEFI
+ community.general.redfish_config:
+ category: Systems
+ command: SetBiosAttributes
+ resource_id: 437XR1138R2
+ bios_attributes:
+ BootMode: "Uefi"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Set multiple BootMode attributes
+ community.general.redfish_config:
+ category: Systems
+ command: SetBiosAttributes
+ resource_id: 437XR1138R2
+ bios_attributes:
+ BootMode: "Bios"
+ OneTimeBootMode: "Enabled"
+ BootSeqRetry: "Enabled"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Enable PXE Boot for NIC1 using deprecated options
+ community.general.redfish_config:
+ category: Systems
+ command: SetBiosAttributes
+ resource_id: 437XR1138R2
+ bios_attribute_name: PxeDev1EnDis
+ bios_attribute_value: Enabled
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Set BIOS default settings with a timeout of 20 seconds
+ community.general.redfish_config:
+ category: Systems
+ command: SetBiosDefaultSettings
+ resource_id: 437XR1138R2
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ timeout: 20
+
+ - name: Set boot order
+ community.general.redfish_config:
+ category: Systems
+ command: SetBootOrder
+ boot_order:
+ - Boot0002
+ - Boot0001
+ - Boot0000
+ - Boot0003
+ - Boot0004
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Set boot order to the default
+ community.general.redfish_config:
+ category: Systems
+ command: SetDefaultBootOrder
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Set Manager Network Protocols
+ community.general.redfish_config:
+ category: Manager
+ command: SetNetworkProtocols
+ network_protocols:
+ SNMP:
+ ProtocolEnabled: True
+ Port: 161
+ HTTP:
+ ProtocolEnabled: False
+ Port: 8080
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Set Manager NIC
+ community.general.redfish_config:
+ category: Manager
+ command: SetManagerNic
+ nic_config:
+ DHCPv4:
+ DHCPEnabled: False
+ IPv4StaticAddresses:
+ Address: 192.168.1.3
+ Gateway: 192.168.1.1
+ SubnetMask: 255.255.255.0
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+'''
+
+RETURN = '''
+msg:
+ description: Message with action result or error description
+ returned: always
+ type: str
+ sample: "Action was successful"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
+from ansible.module_utils._text import to_native
+
+
+# More will be added as module features are expanded
+CATEGORY_COMMANDS_ALL = {
+ "Systems": ["SetBiosDefaultSettings", "SetBiosAttributes", "SetBootOrder",
+ "SetDefaultBootOrder"],
+ "Manager": ["SetNetworkProtocols", "SetManagerNic"]
+}
+
+
+def main():
+ result = {}
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(required=True),
+ command=dict(required=True, type='list'),
+ baseuri=dict(required=True),
+ username=dict(required=True),
+ password=dict(required=True, no_log=True),
+ bios_attribute_name=dict(default='null'),
+ bios_attribute_value=dict(default='null', type='raw'),
+ bios_attributes=dict(type='dict', default={}),
+ timeout=dict(type='int', default=10),
+ boot_order=dict(type='list', elements='str', default=[]),
+ network_protocols=dict(
+ type='dict',
+ default={}
+ ),
+ resource_id=dict(),
+ nic_addr=dict(default='null'),
+ nic_config=dict(
+ type='dict',
+ default={}
+ )
+ ),
+ supports_check_mode=False
+ )
+
+ category = module.params['category']
+ command_list = module.params['command']
+
+ # admin credentials used for authentication
+ creds = {'user': module.params['username'],
+ 'pswd': module.params['password']}
+
+ # timeout
+ timeout = module.params['timeout']
+
+ # BIOS attributes to update
+ bios_attributes = module.params['bios_attributes']
+ if module.params['bios_attribute_name'] != 'null':
+ bios_attributes[module.params['bios_attribute_name']] = module.params[
+ 'bios_attribute_value']
+ module.deprecate(msg='The bios_attribute_name/bios_attribute_value '
+ 'options are deprecated. Use bios_attributes instead',
+ version='3.0.0', collection_name='community.general') # was Ansible 2.14
+
+ # boot order
+ boot_order = module.params['boot_order']
+
+ # System, Manager or Chassis ID to modify
+ resource_id = module.params['resource_id']
+
+ # manager nic
+ nic_addr = module.params['nic_addr']
+ nic_config = module.params['nic_config']
+
+ # Build root URI
+ root_uri = "https://" + module.params['baseuri']
+ rf_utils = RedfishUtils(creds, root_uri, timeout, module,
+ resource_id=resource_id, data_modification=True)
+
+ # Check that Category is valid
+ if category not in CATEGORY_COMMANDS_ALL:
+ module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys())))
+
+ # Check that all commands are valid
+ for cmd in command_list:
+ # Fail if even one command given is invalid
+ if cmd not in CATEGORY_COMMANDS_ALL[category]:
+ module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
+
+ # Organize by Categories / Commands
+ if category == "Systems":
+ # execute only if we find a System resource
+ result = rf_utils._find_systems_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ if command == "SetBiosDefaultSettings":
+ result = rf_utils.set_bios_default_settings()
+ elif command == "SetBiosAttributes":
+ result = rf_utils.set_bios_attributes(bios_attributes)
+ elif command == "SetBootOrder":
+ result = rf_utils.set_boot_order(boot_order)
+ elif command == "SetDefaultBootOrder":
+ result = rf_utils.set_default_boot_order()
+
+ elif category == "Manager":
+ # execute only if we find a Manager service resource
+ result = rf_utils._find_managers_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ if command == "SetNetworkProtocols":
+ result = rf_utils.set_network_protocols(module.params['network_protocols'])
+ elif command == "SetManagerNic":
+ result = rf_utils.set_manager_nic(nic_addr, nic_config)
+
+ # Return data back or fail with proper message
+ if result['ret'] is True:
+ module.exit_json(changed=result['changed'], msg=to_native(result['msg']))
+ else:
+ module.fail_json(msg=to_native(result['msg']))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/redfish_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/redfish_facts.py
new file mode 100644
index 00000000..cfdb1aef
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/redfish_facts.py
@@ -0,0 +1,466 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017-2018 Dell EMC Inc.
+# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: redfish_info
+short_description: Manages Out-Of-Band controllers using Redfish APIs
+description:
+ - Builds Redfish URIs locally and sends them to remote OOB controllers to
+ get information back.
+ - Information retrieved is placed in a location specified by the user.
+ - This module was called C(redfish_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.redfish_info) module no longer returns C(ansible_facts)!
+options:
+ category:
+ required: false
+ description:
+ - List of categories to execute on OOB controller
+ default: ['Systems']
+ type: list
+ command:
+ required: false
+ description:
+ - List of commands to execute on OOB controller
+ type: list
+ baseuri:
+ required: true
+ description:
+ - Base URI of OOB controller
+ type: str
+ username:
+ required: true
+ description:
+ - User for authentication with OOB controller
+ type: str
+ password:
+ required: true
+ description:
+ - Password for authentication with OOB controller
+ type: str
+ timeout:
+ description:
+ - Timeout in seconds for URL requests to OOB controller
+ default: 10
+ type: int
+
+author: "Jose Delarosa (@jose-delarosa)"
+'''
+
+EXAMPLES = '''
+ - name: Get CPU inventory
+ community.general.redfish_info:
+ category: Systems
+ command: GetCpuInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.cpu.entries | to_nice_json }}"
+
+ - name: Get CPU model
+ community.general.redfish_info:
+ category: Systems
+ command: GetCpuInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.cpu.entries.0.Model }}"
+
+ - name: Get memory inventory
+ community.general.redfish_info:
+ category: Systems
+ command: GetMemoryInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+
+ - name: Get fan inventory with a timeout of 20 seconds
+ community.general.redfish_info:
+ category: Chassis
+ command: GetFanInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ timeout: 20
+ register: result
+
+ - name: Get Virtual Media information
+ community.general.redfish_info:
+ category: Manager
+ command: GetVirtualMedia
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.virtual_media.entries | to_nice_json }}"
+
+ - name: Get Volume Inventory
+ community.general.redfish_info:
+ category: Systems
+ command: GetVolumeInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.volume.entries | to_nice_json }}"
+
+ - name: Get Session information
+ community.general.redfish_info:
+ category: Sessions
+ command: GetSessions
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.session.entries | to_nice_json }}"
+
+ - name: Get default inventory information
+ community.general.redfish_info:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts | to_nice_json }}"
+
+ - name: Get several inventories
+ community.general.redfish_info:
+ category: Systems
+ command: GetNicInventory,GetBiosAttributes
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get default system inventory and user information
+ community.general.redfish_info:
+ category: Systems,Accounts
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get default system, user and firmware information
+ community.general.redfish_info:
+ category: ["Systems", "Accounts", "Update"]
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get Manager NIC inventory information
+ community.general.redfish_info:
+ category: Manager
+ command: GetManagerNicInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get boot override information
+ community.general.redfish_info:
+ category: Systems
+ command: GetBootOverride
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get chassis inventory
+ community.general.redfish_info:
+ category: Chassis
+ command: GetChassisInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get all information available in the Manager category
+ community.general.redfish_info:
+ category: Manager
+ command: all
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get firmware update capability information
+ community.general.redfish_info:
+ category: Update
+ command: GetFirmwareUpdateCapabilities
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get firmware inventory
+ community.general.redfish_info:
+ category: Update
+ command: GetFirmwareInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get software inventory
+ community.general.redfish_info:
+ category: Update
+ command: GetSoftwareInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get Manager Services
+ community.general.redfish_info:
+ category: Manager
+ command: GetNetworkProtocols
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get all information available in all categories
+ community.general.redfish_info:
+ category: all
+ command: all
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get system health report
+ community.general.redfish_info:
+ category: Systems
+ command: GetHealthReport
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get chassis health report
+ community.general.redfish_info:
+ category: Chassis
+ command: GetHealthReport
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get manager health report
+ community.general.redfish_info:
+ category: Manager
+ command: GetHealthReport
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+'''
+
+RETURN = '''
+result:
+ description: different results depending on task
+ returned: always
+ type: dict
+ sample: List of CPUs on system
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
+
+CATEGORY_COMMANDS_ALL = {
+ "Systems": ["GetSystemInventory", "GetPsuInventory", "GetCpuInventory",
+ "GetMemoryInventory", "GetNicInventory", "GetHealthReport",
+ "GetStorageControllerInventory", "GetDiskInventory", "GetVolumeInventory",
+ "GetBiosAttributes", "GetBootOrder", "GetBootOverride"],
+ "Chassis": ["GetFanInventory", "GetPsuInventory", "GetChassisPower",
+ "GetChassisThermals", "GetChassisInventory", "GetHealthReport"],
+ "Accounts": ["ListUsers"],
+ "Sessions": ["GetSessions"],
+ "Update": ["GetFirmwareInventory", "GetFirmwareUpdateCapabilities", "GetSoftwareInventory"],
+ "Manager": ["GetManagerNicInventory", "GetVirtualMedia", "GetLogs", "GetNetworkProtocols",
+ "GetHealthReport"],
+}
+
+CATEGORY_COMMANDS_DEFAULT = {
+ "Systems": "GetSystemInventory",
+ "Chassis": "GetFanInventory",
+ "Accounts": "ListUsers",
+ "Update": "GetFirmwareInventory",
+ "Sessions": "GetSessions",
+ "Manager": "GetManagerNicInventory"
+}
+
+
+def main():
+ result = {}
+ category_list = []
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(type='list', default=['Systems']),
+ command=dict(type='list'),
+ baseuri=dict(required=True),
+ username=dict(required=True),
+ password=dict(required=True, no_log=True),
+ timeout=dict(type='int', default=10)
+ ),
+ supports_check_mode=False
+ )
+ is_old_facts = module._name in ('redfish_facts', 'community.general.redfish_facts')
+ if is_old_facts:
+ module.deprecate("The 'redfish_facts' module has been renamed to 'redfish_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ # admin credentials used for authentication
+ creds = {'user': module.params['username'],
+ 'pswd': module.params['password']}
+
+ # timeout
+ timeout = module.params['timeout']
+
+ # Build root URI
+ root_uri = "https://" + module.params['baseuri']
+ rf_utils = RedfishUtils(creds, root_uri, timeout, module)
+
+ # Build Category list
+ if "all" in module.params['category']:
+ for entry in CATEGORY_COMMANDS_ALL:
+ category_list.append(entry)
+ else:
+ # one or more categories specified
+ category_list = module.params['category']
+
+ for category in category_list:
+ command_list = []
+ # Build Command list for each Category
+ if category in CATEGORY_COMMANDS_ALL:
+ if not module.params['command']:
+ # True if we don't specify a command --> use default
+ command_list.append(CATEGORY_COMMANDS_DEFAULT[category])
+ elif "all" in module.params['command']:
+ for entry in range(len(CATEGORY_COMMANDS_ALL[category])):
+ command_list.append(CATEGORY_COMMANDS_ALL[category][entry])
+ # one or more commands
+ else:
+ command_list = module.params['command']
+ # Verify that all commands are valid
+ for cmd in command_list:
+ # Fail if even one command given is invalid
+ if cmd not in CATEGORY_COMMANDS_ALL[category]:
+ module.fail_json(msg="Invalid Command: %s" % cmd)
+ else:
+ # Fail if even one category given is invalid
+ module.fail_json(msg="Invalid Category: %s" % category)
+
+ # Organize by Categories / Commands
+ if category == "Systems":
+ # execute only if we find a Systems resource
+ resource = rf_utils._find_systems_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "GetSystemInventory":
+ result["system"] = rf_utils.get_multi_system_inventory()
+ elif command == "GetCpuInventory":
+ result["cpu"] = rf_utils.get_multi_cpu_inventory()
+ elif command == "GetMemoryInventory":
+ result["memory"] = rf_utils.get_multi_memory_inventory()
+ elif command == "GetNicInventory":
+ result["nic"] = rf_utils.get_multi_nic_inventory(category)
+ elif command == "GetStorageControllerInventory":
+ result["storage_controller"] = rf_utils.get_multi_storage_controller_inventory()
+ elif command == "GetDiskInventory":
+ result["disk"] = rf_utils.get_multi_disk_inventory()
+ elif command == "GetVolumeInventory":
+ result["volume"] = rf_utils.get_multi_volume_inventory()
+ elif command == "GetBiosAttributes":
+ result["bios_attribute"] = rf_utils.get_multi_bios_attributes()
+ elif command == "GetBootOrder":
+ result["boot_order"] = rf_utils.get_multi_boot_order()
+ elif command == "GetBootOverride":
+ result["boot_override"] = rf_utils.get_multi_boot_override()
+ elif command == "GetHealthReport":
+ result["health_report"] = rf_utils.get_multi_system_health_report()
+
+ elif category == "Chassis":
+ # execute only if we find Chassis resource
+ resource = rf_utils._find_chassis_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "GetFanInventory":
+ result["fan"] = rf_utils.get_fan_inventory()
+ elif command == "GetPsuInventory":
+ result["psu"] = rf_utils.get_psu_inventory()
+ elif command == "GetChassisThermals":
+ result["thermals"] = rf_utils.get_chassis_thermals()
+ elif command == "GetChassisPower":
+ result["chassis_power"] = rf_utils.get_chassis_power()
+ elif command == "GetChassisInventory":
+ result["chassis"] = rf_utils.get_chassis_inventory()
+ elif command == "GetHealthReport":
+ result["health_report"] = rf_utils.get_multi_chassis_health_report()
+
+ elif category == "Accounts":
+ # execute only if we find an Account service resource
+ resource = rf_utils._find_accountservice_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "ListUsers":
+ result["user"] = rf_utils.list_users()
+
+ elif category == "Update":
+ # execute only if we find UpdateService resources
+ resource = rf_utils._find_updateservice_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "GetFirmwareInventory":
+ result["firmware"] = rf_utils.get_firmware_inventory()
+ elif command == "GetSoftwareInventory":
+ result["software"] = rf_utils.get_software_inventory()
+ elif command == "GetFirmwareUpdateCapabilities":
+ result["firmware_update_capabilities"] = rf_utils.get_firmware_update_capabilities()
+
+ elif category == "Sessions":
+ # execute only if we find SessionService resources
+ resource = rf_utils._find_sessionservice_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "GetSessions":
+ result["session"] = rf_utils.get_sessions()
+
+ elif category == "Manager":
+ # execute only if we find a Manager service resource
+ resource = rf_utils._find_managers_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "GetManagerNicInventory":
+ result["manager_nics"] = rf_utils.get_multi_nic_inventory(category)
+ elif command == "GetVirtualMedia":
+ result["virtual_media"] = rf_utils.get_multi_virtualmedia()
+ elif command == "GetLogs":
+ result["log"] = rf_utils.get_logs()
+ elif command == "GetNetworkProtocols":
+ result["network_protocols"] = rf_utils.get_network_protocols()
+ elif command == "GetHealthReport":
+ result["health_report"] = rf_utils.get_multi_manager_health_report()
+
+ # Return data back
+ if is_old_facts:
+ module.exit_json(ansible_facts=dict(redfish_facts=result))
+ else:
+ module.exit_json(redfish_facts=result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/redfish_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/redfish_info.py
new file mode 100644
index 00000000..cfdb1aef
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/redfish/redfish_info.py
@@ -0,0 +1,466 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017-2018 Dell EMC Inc.
+# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: redfish_info
+short_description: Manages Out-Of-Band controllers using Redfish APIs
+description:
+ - Builds Redfish URIs locally and sends them to remote OOB controllers to
+ get information back.
+ - Information retrieved is placed in a location specified by the user.
+ - This module was called C(redfish_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.redfish_info) module no longer returns C(ansible_facts)!
+options:
+ category:
+ required: false
+ description:
+ - List of categories to execute on OOB controller
+ default: ['Systems']
+ type: list
+ command:
+ required: false
+ description:
+ - List of commands to execute on OOB controller
+ type: list
+ baseuri:
+ required: true
+ description:
+ - Base URI of OOB controller
+ type: str
+ username:
+ required: true
+ description:
+ - User for authentication with OOB controller
+ type: str
+ password:
+ required: true
+ description:
+ - Password for authentication with OOB controller
+ type: str
+ timeout:
+ description:
+ - Timeout in seconds for URL requests to OOB controller
+ default: 10
+ type: int
+
+author: "Jose Delarosa (@jose-delarosa)"
+'''
+
+EXAMPLES = '''
+ - name: Get CPU inventory
+ community.general.redfish_info:
+ category: Systems
+ command: GetCpuInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.cpu.entries | to_nice_json }}"
+
+ - name: Get CPU model
+ community.general.redfish_info:
+ category: Systems
+ command: GetCpuInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.cpu.entries.0.Model }}"
+
+ - name: Get memory inventory
+ community.general.redfish_info:
+ category: Systems
+ command: GetMemoryInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+
+ - name: Get fan inventory with a timeout of 20 seconds
+ community.general.redfish_info:
+ category: Chassis
+ command: GetFanInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ timeout: 20
+ register: result
+
+ - name: Get Virtual Media information
+ community.general.redfish_info:
+ category: Manager
+ command: GetVirtualMedia
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.virtual_media.entries | to_nice_json }}"
+
+ - name: Get Volume Inventory
+ community.general.redfish_info:
+ category: Systems
+ command: GetVolumeInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.volume.entries | to_nice_json }}"
+
+ - name: Get Session information
+ community.general.redfish_info:
+ category: Sessions
+ command: GetSessions
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.session.entries | to_nice_json }}"
+
+ - name: Get default inventory information
+ community.general.redfish_info:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts | to_nice_json }}"
+
+ - name: Get several inventories
+ community.general.redfish_info:
+ category: Systems
+ command: GetNicInventory,GetBiosAttributes
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get default system inventory and user information
+ community.general.redfish_info:
+ category: Systems,Accounts
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get default system, user and firmware information
+ community.general.redfish_info:
+ category: ["Systems", "Accounts", "Update"]
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get Manager NIC inventory information
+ community.general.redfish_info:
+ category: Manager
+ command: GetManagerNicInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get boot override information
+ community.general.redfish_info:
+ category: Systems
+ command: GetBootOverride
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get chassis inventory
+ community.general.redfish_info:
+ category: Chassis
+ command: GetChassisInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get all information available in the Manager category
+ community.general.redfish_info:
+ category: Manager
+ command: all
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get firmware update capability information
+ community.general.redfish_info:
+ category: Update
+ command: GetFirmwareUpdateCapabilities
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get firmware inventory
+ community.general.redfish_info:
+ category: Update
+ command: GetFirmwareInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get software inventory
+ community.general.redfish_info:
+ category: Update
+ command: GetSoftwareInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get Manager Services
+ community.general.redfish_info:
+ category: Manager
+ command: GetNetworkProtocols
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get all information available in all categories
+ community.general.redfish_info:
+ category: all
+ command: all
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get system health report
+ community.general.redfish_info:
+ category: Systems
+ command: GetHealthReport
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get chassis health report
+ community.general.redfish_info:
+ category: Chassis
+ command: GetHealthReport
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get manager health report
+ community.general.redfish_info:
+ category: Manager
+ command: GetHealthReport
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+'''
+
+RETURN = '''
+result:
+ description: different results depending on task
+ returned: always
+ type: dict
+ sample: List of CPUs on system
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
+
+CATEGORY_COMMANDS_ALL = {
+ "Systems": ["GetSystemInventory", "GetPsuInventory", "GetCpuInventory",
+ "GetMemoryInventory", "GetNicInventory", "GetHealthReport",
+ "GetStorageControllerInventory", "GetDiskInventory", "GetVolumeInventory",
+ "GetBiosAttributes", "GetBootOrder", "GetBootOverride"],
+ "Chassis": ["GetFanInventory", "GetPsuInventory", "GetChassisPower",
+ "GetChassisThermals", "GetChassisInventory", "GetHealthReport"],
+ "Accounts": ["ListUsers"],
+ "Sessions": ["GetSessions"],
+ "Update": ["GetFirmwareInventory", "GetFirmwareUpdateCapabilities", "GetSoftwareInventory"],
+ "Manager": ["GetManagerNicInventory", "GetVirtualMedia", "GetLogs", "GetNetworkProtocols",
+ "GetHealthReport"],
+}
+
+CATEGORY_COMMANDS_DEFAULT = {
+ "Systems": "GetSystemInventory",
+ "Chassis": "GetFanInventory",
+ "Accounts": "ListUsers",
+ "Update": "GetFirmwareInventory",
+ "Sessions": "GetSessions",
+ "Manager": "GetManagerNicInventory"
+}
+
+
+def main():
+ result = {}
+ category_list = []
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(type='list', default=['Systems']),
+ command=dict(type='list'),
+ baseuri=dict(required=True),
+ username=dict(required=True),
+ password=dict(required=True, no_log=True),
+ timeout=dict(type='int', default=10)
+ ),
+ supports_check_mode=False
+ )
+ is_old_facts = module._name in ('redfish_facts', 'community.general.redfish_facts')
+ if is_old_facts:
+ module.deprecate("The 'redfish_facts' module has been renamed to 'redfish_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ # admin credentials used for authentication
+ creds = {'user': module.params['username'],
+ 'pswd': module.params['password']}
+
+ # timeout
+ timeout = module.params['timeout']
+
+ # Build root URI
+ root_uri = "https://" + module.params['baseuri']
+ rf_utils = RedfishUtils(creds, root_uri, timeout, module)
+
+ # Build Category list
+ if "all" in module.params['category']:
+ for entry in CATEGORY_COMMANDS_ALL:
+ category_list.append(entry)
+ else:
+ # one or more categories specified
+ category_list = module.params['category']
+
+ for category in category_list:
+ command_list = []
+ # Build Command list for each Category
+ if category in CATEGORY_COMMANDS_ALL:
+ if not module.params['command']:
+ # True if we don't specify a command --> use default
+ command_list.append(CATEGORY_COMMANDS_DEFAULT[category])
+ elif "all" in module.params['command']:
+ for entry in range(len(CATEGORY_COMMANDS_ALL[category])):
+ command_list.append(CATEGORY_COMMANDS_ALL[category][entry])
+ # one or more commands
+ else:
+ command_list = module.params['command']
+ # Verify that all commands are valid
+ for cmd in command_list:
+ # Fail if even one command given is invalid
+ if cmd not in CATEGORY_COMMANDS_ALL[category]:
+ module.fail_json(msg="Invalid Command: %s" % cmd)
+ else:
+ # Fail if even one category given is invalid
+ module.fail_json(msg="Invalid Category: %s" % category)
+
+ # Organize by Categories / Commands
+ if category == "Systems":
+ # execute only if we find a Systems resource
+ resource = rf_utils._find_systems_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "GetSystemInventory":
+ result["system"] = rf_utils.get_multi_system_inventory()
+ elif command == "GetCpuInventory":
+ result["cpu"] = rf_utils.get_multi_cpu_inventory()
+ elif command == "GetMemoryInventory":
+ result["memory"] = rf_utils.get_multi_memory_inventory()
+ elif command == "GetNicInventory":
+ result["nic"] = rf_utils.get_multi_nic_inventory(category)
+ elif command == "GetStorageControllerInventory":
+ result["storage_controller"] = rf_utils.get_multi_storage_controller_inventory()
+ elif command == "GetDiskInventory":
+ result["disk"] = rf_utils.get_multi_disk_inventory()
+ elif command == "GetVolumeInventory":
+ result["volume"] = rf_utils.get_multi_volume_inventory()
+ elif command == "GetBiosAttributes":
+ result["bios_attribute"] = rf_utils.get_multi_bios_attributes()
+ elif command == "GetBootOrder":
+ result["boot_order"] = rf_utils.get_multi_boot_order()
+ elif command == "GetBootOverride":
+ result["boot_override"] = rf_utils.get_multi_boot_override()
+ elif command == "GetHealthReport":
+ result["health_report"] = rf_utils.get_multi_system_health_report()
+
+ elif category == "Chassis":
+ # execute only if we find Chassis resource
+ resource = rf_utils._find_chassis_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "GetFanInventory":
+ result["fan"] = rf_utils.get_fan_inventory()
+ elif command == "GetPsuInventory":
+ result["psu"] = rf_utils.get_psu_inventory()
+ elif command == "GetChassisThermals":
+ result["thermals"] = rf_utils.get_chassis_thermals()
+ elif command == "GetChassisPower":
+ result["chassis_power"] = rf_utils.get_chassis_power()
+ elif command == "GetChassisInventory":
+ result["chassis"] = rf_utils.get_chassis_inventory()
+ elif command == "GetHealthReport":
+ result["health_report"] = rf_utils.get_multi_chassis_health_report()
+
+ elif category == "Accounts":
+ # execute only if we find an Account service resource
+ resource = rf_utils._find_accountservice_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "ListUsers":
+ result["user"] = rf_utils.list_users()
+
+ elif category == "Update":
+ # execute only if we find UpdateService resources
+ resource = rf_utils._find_updateservice_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "GetFirmwareInventory":
+ result["firmware"] = rf_utils.get_firmware_inventory()
+ elif command == "GetSoftwareInventory":
+ result["software"] = rf_utils.get_software_inventory()
+ elif command == "GetFirmwareUpdateCapabilities":
+ result["firmware_update_capabilities"] = rf_utils.get_firmware_update_capabilities()
+
+ elif category == "Sessions":
+ # execute only if we find SessionService resources
+ resource = rf_utils._find_sessionservice_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "GetSessions":
+ result["session"] = rf_utils.get_sessions()
+
+ elif category == "Manager":
+ # execute only if we find a Manager service resource
+ resource = rf_utils._find_managers_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "GetManagerNicInventory":
+ result["manager_nics"] = rf_utils.get_multi_nic_inventory(category)
+ elif command == "GetVirtualMedia":
+ result["virtual_media"] = rf_utils.get_multi_virtualmedia()
+ elif command == "GetLogs":
+ result["log"] = rf_utils.get_logs()
+ elif command == "GetNetworkProtocols":
+ result["network_protocols"] = rf_utils.get_network_protocols()
+ elif command == "GetHealthReport":
+ result["health_report"] = rf_utils.get_multi_manager_health_report()
+
+ # Return data back
+ if is_old_facts:
+ module.exit_json(ansible_facts=dict(redfish_facts=result))
+ else:
+ module.exit_json(redfish_facts=result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/stacki/stacki_host.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/stacki/stacki_host.py
new file mode 100644
index 00000000..372ba2df
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/stacki/stacki_host.py
@@ -0,0 +1,277 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Hugh Ma <Hugh.Ma@flextronics.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: stacki_host
+short_description: Add or remove host to stacki front-end
+description:
+ - Use this module to add or remove hosts to a stacki front-end via API.
+ - U(https://github.com/StackIQ/stacki)
+options:
+ name:
+ description:
+ - Name of the host to be added to Stacki.
+ required: True
+ type: str
+ stacki_user:
+ description:
+ - Username for authenticating with Stacki API, but if not
+ specified, the environment variable C(stacki_user) is used instead.
+ required: True
+ type: str
+ stacki_password:
+ description:
+ - Password for authenticating with Stacki API, but if not
+ specified, the environment variable C(stacki_password) is used instead.
+ required: True
+ type: str
+ stacki_endpoint:
+ description:
+ - URL for the Stacki API Endpoint.
+ required: True
+ type: str
+ prim_intf_mac:
+ description:
+ - MAC Address for the primary PXE boot network interface.
+ type: str
+ prim_intf_ip:
+ description:
+ - IP Address for the primary network interface.
+ type: str
+ prim_intf:
+ description:
+ - Name of the primary network interface.
+ type: str
+ force_install:
+ description:
+ - Set value to True to force node into install state if it already exists in stacki.
+ type: bool
+ state:
+ description:
+ - Set value to the desired state for the specified host.
+ type: str
+ choices: [ absent, present ]
+author:
+- Hugh Ma (@bbyhuy) <Hugh.Ma@flextronics.com>
+'''
+
+EXAMPLES = '''
+- name: Add a host named test-1
+ community.general.stacki_host:
+ name: test-1
+ stacki_user: usr
+ stacki_password: pwd
+ stacki_endpoint: url
+ prim_intf_mac: mac_addr
+ prim_intf_ip: x.x.x.x
+ prim_intf: eth0
+
+- name: Remove a host named test-1
+ community.general.stacki_host:
+ name: test-1
+ stacki_user: usr
+ stacki_password: pwd
+ stacki_endpoint: url
+ state: absent
+'''
+
+RETURN = '''
+changed:
+ description: response to whether or not the api call completed successfully
+ returned: always
+ type: bool
+ sample: true
+
+stdout:
+ description: the set of responses from the commands
+ returned: always
+ type: list
+ sample: ['...', '...']
+
+stdout_lines:
+ description: the value of stdout split into a list
+ returned: always
+ type: list
+ sample: [['...', '...'], ['...'], ['...']]
+'''
+
+import json
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url
+
+
+class StackiHost(object):
+
+ def __init__(self, module):
+ self.module = module
+ self.hostname = module.params['name']
+ self.rack = module.params['rack']
+ self.rank = module.params['rank']
+ self.appliance = module.params['appliance']
+ self.prim_intf = module.params['prim_intf']
+ self.prim_intf_ip = module.params['prim_intf_ip']
+ self.network = module.params['network']
+ self.prim_intf_mac = module.params['prim_intf_mac']
+ self.endpoint = module.params['stacki_endpoint']
+
+ auth_creds = {'USERNAME': module.params['stacki_user'],
+ 'PASSWORD': module.params['stacki_password']}
+
+ # Get Initial CSRF
+ cred_a = self.do_request(self.module, self.endpoint, method="GET")
+ cookie_a = cred_a.headers.get('Set-Cookie').split(';')
+ init_csrftoken = None
+ for c in cookie_a:
+ if "csrftoken" in c:
+ init_csrftoken = c.replace("csrftoken=", "")
+ init_csrftoken = init_csrftoken.rstrip("\r\n")
+ break
+
+ # Make Header Dictionary with initial CSRF
+ header = {'csrftoken': init_csrftoken, 'X-CSRFToken': init_csrftoken,
+ 'Content-type': 'application/x-www-form-urlencoded', 'Cookie': cred_a.headers.get('Set-Cookie')}
+
+ # Endpoint to get final authentication header
+ login_endpoint = self.endpoint + "/login"
+
+ # Get Final CSRF and Session ID
+ login_req = self.do_request(self.module, login_endpoint, headers=header,
+ payload=urlencode(auth_creds), method='POST')
+
+ cookie_f = login_req.headers.get('Set-Cookie').split(';')
+ csrftoken = None
+ for f in cookie_f:
+ if "csrftoken" in f:
+ csrftoken = f.replace("csrftoken=", "")
+ if "sessionid" in f:
+ sessionid = c.split("sessionid=", 1)[-1]
+ sessionid = sessionid.rstrip("\r\n")
+
+ self.header = {'csrftoken': csrftoken,
+ 'X-CSRFToken': csrftoken,
+ 'sessionid': sessionid,
+ 'Content-type': 'application/json',
+ 'Cookie': login_req.headers.get('Set-Cookie')}
+
+ def do_request(self, module, url, payload=None, headers=None, method=None):
+ res, info = fetch_url(module, url, data=payload, headers=headers, method=method)
+
+ if info['status'] != 200:
+ self.module.fail_json(changed=False, msg=info['msg'])
+
+ return res
+
+ def stack_check_host(self):
+ res = self.do_request(self.module, self.endpoint, payload=json.dumps({"cmd": "list host"}), headers=self.header, method="POST")
+
+ if self.hostname in res.read():
+ return True
+ else:
+ return False
+
+ def stack_sync(self):
+ self.do_request(self.module, self.endpoint, payload=json.dumps({"cmd": "sync config"}), headers=self.header, method="POST")
+ self.do_request(self.module, self.endpoint, payload=json.dumps({"cmd": "sync host config"}), headers=self.header, method="POST")
+
+ def stack_force_install(self, result):
+ data = dict()
+ changed = False
+
+ data['cmd'] = "set host boot {0} action=install" \
+ .format(self.hostname)
+ self.do_request(self.module, self.endpoint, payload=json.dumps(data), headers=self.header, method="POST")
+ changed = True
+
+ self.stack_sync()
+
+ result['changed'] = changed
+ result['stdout'] = "api call successful".rstrip("\r\n")
+
+ def stack_add(self, result):
+ data = dict()
+ changed = False
+
+ data['cmd'] = "add host {0} rack={1} rank={2} appliance={3}"\
+ .format(self.hostname, self.rack, self.rank, self.appliance)
+ self.do_request(self.module, self.endpoint, payload=json.dumps(data), headers=self.header, method="POST")
+
+ self.stack_sync()
+
+ result['changed'] = changed
+ result['stdout'] = "api call successful".rstrip("\r\n")
+
+ def stack_remove(self, result):
+ data = dict()
+
+ data['cmd'] = "remove host {0}"\
+ .format(self.hostname)
+ self.do_request(self.module, self.endpoint, payload=json.dumps(data), headers=self.header, method="POST")
+
+ self.stack_sync()
+
+ result['changed'] = True
+ result['stdout'] = "api call successful".rstrip("\r\n")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ name=dict(type='str', required=True),
+ rack=dict(type='int', default=0),
+ rank=dict(type='int', default=0),
+ appliance=dict(type='str', default='backend'),
+ prim_intf=dict(type='str'),
+ prim_intf_ip=dict(type='str'),
+ network=dict(type='str', default='private'),
+ prim_intf_mac=dict(type='str'),
+ stacki_user=dict(type='str', required=True, default=os.environ.get('stacki_user')),
+ stacki_password=dict(type='str', required=True, default=os.environ.get('stacki_password'), no_log=True),
+ stacki_endpoint=dict(type='str', required=True, default=os.environ.get('stacki_endpoint')),
+ force_install=dict(type='bool', default=False),
+ ),
+ supports_check_mode=False,
+ )
+
+ result = {'changed': False}
+ missing_params = list()
+
+ stacki = StackiHost(module)
+ host_exists = stacki.stack_check_host()
+
+ # If state is present, but host exists, need force_install flag to put host back into install state
+ if module.params['state'] == 'present' and host_exists and module.params['force_install']:
+ stacki.stack_force_install(result)
+ # If state is present, but host exists, and force_install and false, do nothing
+ elif module.params['state'] == 'present' and host_exists and not module.params['force_install']:
+ result['stdout'] = "{0} already exists. Set 'force_install' to true to bootstrap"\
+ .format(module.params['name'])
+ # Otherwise, state is present, but host doesn't exists, require more params to add host
+ elif module.params['state'] == 'present' and not host_exists:
+ for param in ['appliance', 'prim_intf',
+ 'prim_intf_ip', 'network', 'prim_intf_mac']:
+ if not module.params[param]:
+ missing_params.append(param)
+ if len(missing_params) > 0: # @FIXME replace with required_if
+ module.fail_json(msg="missing required arguments: {0}".format(missing_params))
+
+ stacki.stack_add(result)
+ # If state is absent, and host exists, lets remove it.
+ elif module.params['state'] == 'absent' and host_exists:
+ stacki.stack_remove(result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/wakeonlan.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/wakeonlan.py
new file mode 100644
index 00000000..2f097fcf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/remote_management/wakeonlan.py
@@ -0,0 +1,131 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: wakeonlan
+short_description: Send a magic Wake-on-LAN (WoL) broadcast packet
+description:
+- The C(wakeonlan) module sends magic Wake-on-LAN (WoL) broadcast packets.
+options:
+ mac:
+ description:
+ - MAC address to send Wake-on-LAN broadcast packet for.
+ required: true
+ type: str
+ broadcast:
+ description:
+ - Network broadcast address to use for broadcasting magic Wake-on-LAN packet.
+ default: 255.255.255.255
+ type: str
+ port:
+ description:
+ - UDP port to use for magic Wake-on-LAN packet.
+ default: 7
+ type: int
+todo:
+ - Add arping support to check whether the system is up (before and after)
+ - Enable check-mode support (when we have arping support)
+ - Does not have SecureOn password support
+notes:
+ - This module sends a magic packet, without knowing whether it worked
+ - Only works if the target system was properly configured for Wake-on-LAN (in the BIOS and/or the OS)
+ - Some BIOSes have a different (configurable) Wake-on-LAN boot order (i.e. PXE first).
+seealso:
+- module: community.windows.win_wakeonlan
+author:
+- Dag Wieers (@dagwieers)
+'''
+
+EXAMPLES = r'''
+- name: Send a magic Wake-on-LAN packet to 00:00:5E:00:53:66
+ community.general.wakeonlan:
+ mac: '00:00:5E:00:53:66'
+ broadcast: 192.0.2.23
+ delegate_to: localhost
+
+- community.general.wakeonlan:
+ mac: 00:00:5E:00:53:66
+ port: 9
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+# Default return values
+'''
+import socket
+import struct
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def wakeonlan(module, mac, broadcast, port):
+ """ Send a magic Wake-on-LAN packet. """
+
+ mac_orig = mac
+
+ # Remove possible separator from MAC address
+ if len(mac) == 12 + 5:
+ mac = mac.replace(mac[2], '')
+
+ # If we don't end up with 12 hexadecimal characters, fail
+ if len(mac) != 12:
+ module.fail_json(msg="Incorrect MAC address length: %s" % mac_orig)
+
+ # Test if it converts to an integer, otherwise fail
+ try:
+ int(mac, 16)
+ except ValueError:
+ module.fail_json(msg="Incorrect MAC address format: %s" % mac_orig)
+
+ # Create payload for magic packet
+ data = b''
+ padding = ''.join(['FFFFFFFFFFFF', mac * 20])
+ for i in range(0, len(padding), 2):
+ data = b''.join([data, struct.pack('B', int(padding[i: i + 2], 16))])
+
+ # Broadcast payload to network
+ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
+
+ if not module.check_mode:
+
+ try:
+ sock.sendto(data, (broadcast, port))
+ except socket.error as e:
+ sock.close()
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ sock.close()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ mac=dict(type='str', required=True),
+ broadcast=dict(type='str', default='255.255.255.255'),
+ port=dict(type='int', default=7),
+ ),
+ supports_check_mode=True,
+ )
+
+ mac = module.params['mac']
+ broadcast = module.params['broadcast']
+ port = module.params['port']
+
+ wakeonlan(module, mac, broadcast, port)
+
+ module.exit_json(changed=True)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rhevm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rhevm.py
new file mode 100644
index 00000000..2aebc346
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rhevm.py
@@ -0,0 +1,1516 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Timothy Vandenbrande <timothy.vandenbrande@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: rhevm
+short_description: RHEV/oVirt automation
+description:
+ - This module only supports oVirt/RHEV version 3.
+ - A newer module M(ovirt.ovirt.ovirt_vm) supports oVirt/RHV version 4.
+ - Allows you to create/remove/update or powermanage virtual machines on a RHEV/oVirt platform.
+requirements:
+ - ovirtsdk
+author:
+- Timothy Vandenbrande (@TimothyVandenbrande)
+options:
+ user:
+ description:
+ - The user to authenticate with.
+ type: str
+ default: admin@internal
+ password:
+ description:
+ - The password for user authentication.
+ type: str
+ required: true
+ server:
+ description:
+ - The name/IP of your RHEV-m/oVirt instance.
+ type: str
+ default: 127.0.0.1
+ port:
+ description:
+ - The port on which the API is reachable.
+ type: int
+ default: 443
+ insecure_api:
+ description:
+ - A boolean switch to make a secure or insecure connection to the server.
+ type: bool
+ default: no
+ name:
+ description:
+ - The name of the VM.
+ type: str
+ cluster:
+ description:
+ - The RHEV/oVirt cluster in which you want you VM to start.
+ type: str
+ datacenter:
+ description:
+ - The RHEV/oVirt datacenter in which you want you VM to start.
+ type: str
+ default: Default
+ state:
+ description:
+ - This serves to create/remove/update or powermanage your VM.
+ type: str
+ choices: [ absent, cd, down, info, ping, present, restarted, up ]
+ default: present
+ image:
+ description:
+ - The template to use for the VM.
+ type: str
+ type:
+ description:
+ - To define if the VM is a server or desktop.
+ type: str
+ choices: [ desktop, host, server ]
+ default: server
+ vmhost:
+ description:
+ - The host you wish your VM to run on.
+ type: str
+ vmcpu:
+ description:
+ - The number of CPUs you want in your VM.
+ type: int
+ default: 2
+ cpu_share:
+ description:
+ - This parameter is used to configure the CPU share.
+ type: int
+ default: 0
+ vmmem:
+ description:
+ - The amount of memory you want your VM to use (in GB).
+ type: int
+ default: 1
+ osver:
+ description:
+ - The operating system option in RHEV/oVirt.
+ type: str
+ default: rhel_6x64
+ mempol:
+ description:
+ - The minimum amount of memory you wish to reserve for this system.
+ type: int
+ default: 1
+ vm_ha:
+ description:
+ - To make your VM High Available.
+ type: bool
+ default: yes
+ disks:
+ description:
+ - This option uses complex arguments and is a list of disks with the options name, size and domain.
+ type: list
+ elements: str
+ ifaces:
+ description:
+ - This option uses complex arguments and is a list of interfaces with the options name and vlan.
+ type: list
+ elements: str
+ aliases: [ interfaces, nics ]
+ boot_order:
+ description:
+ - This option uses complex arguments and is a list of items that specify the bootorder.
+ type: list
+ elements: str
+ default: [ hd, network ]
+ del_prot:
+ description:
+ - This option sets the delete protection checkbox.
+ type: bool
+ default: yes
+ cd_drive:
+ description:
+ - The CD you wish to have mounted on the VM when I(state = 'CD').
+ type: str
+ timeout:
+ description:
+ - The timeout you wish to define for power actions.
+ - When I(state = 'up').
+ - When I(state = 'down').
+ - When I(state = 'restarted').
+ type: int
+'''
+
+RETURN = r'''
+vm:
+ description: Returns all of the VMs variables and execution.
+ returned: always
+ type: dict
+ sample: '{
+ "boot_order": [
+ "hd",
+ "network"
+ ],
+ "changed": true,
+ "changes": [
+ "Delete Protection"
+ ],
+ "cluster": "C1",
+ "cpu_share": "0",
+ "created": false,
+ "datacenter": "Default",
+ "del_prot": true,
+ "disks": [
+ {
+ "domain": "ssd-san",
+ "name": "OS",
+ "size": 40
+ }
+ ],
+ "eth0": "00:00:5E:00:53:00",
+ "eth1": "00:00:5E:00:53:01",
+ "eth2": "00:00:5E:00:53:02",
+ "exists": true,
+ "failed": false,
+ "ifaces": [
+ {
+ "name": "eth0",
+ "vlan": "Management"
+ },
+ {
+ "name": "eth1",
+ "vlan": "Internal"
+ },
+ {
+ "name": "eth2",
+ "vlan": "External"
+ }
+ ],
+ "image": false,
+ "mempol": "0",
+ "msg": [
+ "VM exists",
+ "cpu_share was already set to 0",
+ "VM high availability was already set to True",
+ "The boot order has already been set",
+ "VM delete protection has been set to True",
+ "Disk web2_Disk0_OS already exists",
+ "The VM starting host was already set to host416"
+ ],
+ "name": "web2",
+ "type": "server",
+ "uuid": "4ba5a1be-e60b-4368-9533-920f156c817b",
+ "vm_ha": true,
+ "vmcpu": "4",
+ "vmhost": "host416",
+ "vmmem": "16"
+ }'
+'''
+
+EXAMPLES = r'''
+- name: Basic get info from VM
+ community.general.rhevm:
+ server: rhevm01
+ user: '{{ rhev.admin.name }}'
+ password: '{{ rhev.admin.pass }}'
+ name: demo
+ state: info
+
+- name: Basic create example from image
+ community.general.rhevm:
+ server: rhevm01
+ user: '{{ rhev.admin.name }}'
+ password: '{{ rhev.admin.pass }}'
+ name: demo
+ cluster: centos
+ image: centos7_x64
+ state: present
+
+- name: Power management
+ community.general.rhevm:
+ server: rhevm01
+ user: '{{ rhev.admin.name }}'
+ password: '{{ rhev.admin.pass }}'
+ cluster: RH
+ name: uptime_server
+ image: centos7_x64
+ state: down
+
+- name: Multi disk, multi nic create example
+ community.general.rhevm:
+ server: rhevm01
+ user: '{{ rhev.admin.name }}'
+ password: '{{ rhev.admin.pass }}'
+ cluster: RH
+ name: server007
+ type: server
+ vmcpu: 4
+ vmmem: 2
+ ifaces:
+ - name: eth0
+ vlan: vlan2202
+ - name: eth1
+ vlan: vlan36
+ - name: eth2
+ vlan: vlan38
+ - name: eth3
+ vlan: vlan2202
+ disks:
+ - name: root
+ size: 10
+ domain: ssd-san
+ - name: swap
+ size: 10
+ domain: 15kiscsi-san
+ - name: opt
+ size: 10
+ domain: 15kiscsi-san
+ - name: var
+ size: 10
+ domain: 10kiscsi-san
+ - name: home
+ size: 10
+ domain: sata-san
+ boot_order:
+ - network
+ - hd
+ state: present
+
+- name: Add a CD to the disk cd_drive
+ community.general.rhevm:
+ user: '{{ rhev.admin.name }}'
+ password: '{{ rhev.admin.pass }}'
+ name: server007
+ cd_drive: rhev-tools-setup.iso
+ state: cd
+
+- name: New host deployment + host network configuration
+ community.general.rhevm:
+ password: '{{ rhevm.admin.pass }}'
+ name: ovirt_node007
+ type: host
+ cluster: rhevm01
+ ifaces:
+ - name: em1
+ - name: em2
+ - name: p3p1
+ ip: 172.31.224.200
+ netmask: 255.255.254.0
+ - name: p3p2
+ ip: 172.31.225.200
+ netmask: 255.255.254.0
+ - name: bond0
+ bond:
+ - em1
+ - em2
+ network: rhevm
+ ip: 172.31.222.200
+ netmask: 255.255.255.0
+ management: yes
+ - name: bond0.36
+ network: vlan36
+ ip: 10.2.36.200
+ netmask: 255.255.254.0
+ gateway: 10.2.36.254
+ - name: bond0.2202
+ network: vlan2202
+ - name: bond0.38
+ network: vlan38
+ state: present
+'''
+
+import time
+
+try:
+ from ovirtsdk.api import API
+ from ovirtsdk.xml import params
+ HAS_SDK = True
+except ImportError:
+ HAS_SDK = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+RHEV_FAILED = 1
+RHEV_SUCCESS = 0
+RHEV_UNAVAILABLE = 2
+
+RHEV_TYPE_OPTS = ['desktop', 'host', 'server']
+STATE_OPTS = ['absent', 'cd', 'down', 'info', 'ping', 'present', 'restart', 'up']
+
+msg = []
+changed = False
+failed = False
+
+
+class RHEVConn(object):
+ 'Connection to RHEV-M'
+
+ def __init__(self, module):
+ self.module = module
+
+ user = module.params.get('user')
+ password = module.params.get('password')
+ server = module.params.get('server')
+ port = module.params.get('port')
+ insecure_api = module.params.get('insecure_api')
+
+ url = "https://%s:%s" % (server, port)
+
+ try:
+ api = API(url=url, username=user, password=password, insecure=str(insecure_api))
+ api.test()
+ self.conn = api
+ except Exception:
+ raise Exception("Failed to connect to RHEV-M.")
+
+ def __del__(self):
+ self.conn.disconnect()
+
+ def createVMimage(self, name, cluster, template):
+ try:
+ vmparams = params.VM(
+ name=name,
+ cluster=self.conn.clusters.get(name=cluster),
+ template=self.conn.templates.get(name=template),
+ disks=params.Disks(clone=True)
+ )
+ self.conn.vms.add(vmparams)
+ setMsg("VM is created")
+ setChanged()
+ return True
+ except Exception as e:
+ setMsg("Failed to create VM")
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ def createVM(self, name, cluster, os, actiontype):
+ try:
+ vmparams = params.VM(
+ name=name,
+ cluster=self.conn.clusters.get(name=cluster),
+ os=params.OperatingSystem(type_=os),
+ template=self.conn.templates.get(name="Blank"),
+ type_=actiontype
+ )
+ self.conn.vms.add(vmparams)
+ setMsg("VM is created")
+ setChanged()
+ return True
+ except Exception as e:
+ setMsg("Failed to create VM")
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ def createDisk(self, vmname, diskname, disksize, diskdomain, diskinterface, diskformat, diskallocationtype, diskboot):
+ VM = self.get_VM(vmname)
+
+ newdisk = params.Disk(
+ name=diskname,
+ size=1024 * 1024 * 1024 * int(disksize),
+ wipe_after_delete=True,
+ sparse=diskallocationtype,
+ interface=diskinterface,
+ format=diskformat,
+ bootable=diskboot,
+ storage_domains=params.StorageDomains(
+ storage_domain=[self.get_domain(diskdomain)]
+ )
+ )
+
+ try:
+ VM.disks.add(newdisk)
+ VM.update()
+ setMsg("Successfully added disk " + diskname)
+ setChanged()
+ except Exception as e:
+ setFailed()
+ setMsg("Error attaching " + diskname + "disk, please recheck and remove any leftover configuration.")
+ setMsg(str(e))
+ return False
+
+ try:
+ currentdisk = VM.disks.get(name=diskname)
+ attempt = 1
+ while currentdisk.status.state != 'ok':
+ currentdisk = VM.disks.get(name=diskname)
+ if attempt == 100:
+ setMsg("Error, disk %s, state %s" % (diskname, str(currentdisk.status.state)))
+ raise Exception()
+ else:
+ attempt += 1
+ time.sleep(2)
+ setMsg("The disk " + diskname + " is ready.")
+ except Exception as e:
+ setFailed()
+ setMsg("Error getting the state of " + diskname + ".")
+ setMsg(str(e))
+ return False
+ return True
+
+ def createNIC(self, vmname, nicname, vlan, interface):
+ VM = self.get_VM(vmname)
+ CLUSTER = self.get_cluster_byid(VM.cluster.id)
+ DC = self.get_DC_byid(CLUSTER.data_center.id)
+ newnic = params.NIC(
+ name=nicname,
+ network=DC.networks.get(name=vlan),
+ interface=interface
+ )
+
+ try:
+ VM.nics.add(newnic)
+ VM.update()
+ setMsg("Successfully added iface " + nicname)
+ setChanged()
+ except Exception as e:
+ setFailed()
+ setMsg("Error attaching " + nicname + " iface, please recheck and remove any leftover configuration.")
+ setMsg(str(e))
+ return False
+
+ try:
+ currentnic = VM.nics.get(name=nicname)
+ attempt = 1
+ while currentnic.active is not True:
+ currentnic = VM.nics.get(name=nicname)
+ if attempt == 100:
+ setMsg("Error, iface %s, state %s" % (nicname, str(currentnic.active)))
+ raise Exception()
+ else:
+ attempt += 1
+ time.sleep(2)
+ setMsg("The iface " + nicname + " is ready.")
+ except Exception as e:
+ setFailed()
+ setMsg("Error getting the state of " + nicname + ".")
+ setMsg(str(e))
+ return False
+ return True
+
+ def get_DC(self, dc_name):
+ return self.conn.datacenters.get(name=dc_name)
+
+ def get_DC_byid(self, dc_id):
+ return self.conn.datacenters.get(id=dc_id)
+
+ def get_VM(self, vm_name):
+ return self.conn.vms.get(name=vm_name)
+
+ def get_cluster_byid(self, cluster_id):
+ return self.conn.clusters.get(id=cluster_id)
+
+ def get_cluster(self, cluster_name):
+ return self.conn.clusters.get(name=cluster_name)
+
+ def get_domain_byid(self, dom_id):
+ return self.conn.storagedomains.get(id=dom_id)
+
+ def get_domain(self, domain_name):
+ return self.conn.storagedomains.get(name=domain_name)
+
+ def get_disk(self, disk):
+ return self.conn.disks.get(disk)
+
+ def get_network(self, dc_name, network_name):
+ return self.get_DC(dc_name).networks.get(network_name)
+
+ def get_network_byid(self, network_id):
+ return self.conn.networks.get(id=network_id)
+
+ def get_NIC(self, vm_name, nic_name):
+ return self.get_VM(vm_name).nics.get(nic_name)
+
+ def get_Host(self, host_name):
+ return self.conn.hosts.get(name=host_name)
+
+ def get_Host_byid(self, host_id):
+ return self.conn.hosts.get(id=host_id)
+
+ def set_Memory(self, name, memory):
+ VM = self.get_VM(name)
+ VM.memory = int(int(memory) * 1024 * 1024 * 1024)
+ try:
+ VM.update()
+ setMsg("The Memory has been updated.")
+ setChanged()
+ return True
+ except Exception as e:
+ setMsg("Failed to update memory.")
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ def set_Memory_Policy(self, name, memory_policy):
+ VM = self.get_VM(name)
+ VM.memory_policy.guaranteed = int(int(memory_policy) * 1024 * 1024 * 1024)
+ try:
+ VM.update()
+ setMsg("The memory policy has been updated.")
+ setChanged()
+ return True
+ except Exception as e:
+ setMsg("Failed to update memory policy.")
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ def set_CPU(self, name, cpu):
+ VM = self.get_VM(name)
+ VM.cpu.topology.cores = int(cpu)
+ try:
+ VM.update()
+ setMsg("The number of CPUs has been updated.")
+ setChanged()
+ return True
+ except Exception as e:
+ setMsg("Failed to update the number of CPUs.")
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ def set_CPU_share(self, name, cpu_share):
+ VM = self.get_VM(name)
+ VM.cpu_shares = int(cpu_share)
+ try:
+ VM.update()
+ setMsg("The CPU share has been updated.")
+ setChanged()
+ return True
+ except Exception as e:
+ setMsg("Failed to update the CPU share.")
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ def set_Disk(self, diskname, disksize, diskinterface, diskboot):
+ DISK = self.get_disk(diskname)
+ setMsg("Checking disk " + diskname)
+ if DISK.get_bootable() != diskboot:
+ try:
+ DISK.set_bootable(diskboot)
+ setMsg("Updated the boot option on the disk.")
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to set the boot option on the disk.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ else:
+ setMsg("The boot option of the disk is correct")
+ if int(DISK.size) < (1024 * 1024 * 1024 * int(disksize)):
+ try:
+ DISK.size = (1024 * 1024 * 1024 * int(disksize))
+ setMsg("Updated the size of the disk.")
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to update the size of the disk.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ elif int(DISK.size) > (1024 * 1024 * 1024 * int(disksize)):
+ setMsg("Shrinking disks is not supported")
+ setFailed()
+ return False
+ else:
+ setMsg("The size of the disk is correct")
+ if str(DISK.interface) != str(diskinterface):
+ try:
+ DISK.interface = diskinterface
+ setMsg("Updated the interface of the disk.")
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to update the interface of the disk.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ else:
+ setMsg("The interface of the disk is correct")
+ return True
+
+ def set_NIC(self, vmname, nicname, newname, vlan, interface):
+ NIC = self.get_NIC(vmname, nicname)
+ VM = self.get_VM(vmname)
+ CLUSTER = self.get_cluster_byid(VM.cluster.id)
+ DC = self.get_DC_byid(CLUSTER.data_center.id)
+ NETWORK = self.get_network(str(DC.name), vlan)
+ checkFail()
+ if NIC.name != newname:
+ NIC.name = newname
+ setMsg('Updating iface name to ' + newname)
+ setChanged()
+ if str(NIC.network.id) != str(NETWORK.id):
+ NIC.set_network(NETWORK)
+ setMsg('Updating iface network to ' + vlan)
+ setChanged()
+ if NIC.interface != interface:
+ NIC.interface = interface
+ setMsg('Updating iface interface to ' + interface)
+ setChanged()
+ try:
+ NIC.update()
+ setMsg('iface has successfully been updated.')
+ except Exception as e:
+ setMsg("Failed to update the iface.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def set_DeleteProtection(self, vmname, del_prot):
+ VM = self.get_VM(vmname)
+ VM.delete_protected = del_prot
+ try:
+ VM.update()
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to update delete protection.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def set_BootOrder(self, vmname, boot_order):
+ VM = self.get_VM(vmname)
+ bootorder = []
+ for device in boot_order:
+ bootorder.append(params.Boot(dev=device))
+ VM.os.boot = bootorder
+
+ try:
+ VM.update()
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to update the boot order.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def set_Host(self, host_name, cluster, ifaces):
+ HOST = self.get_Host(host_name)
+ CLUSTER = self.get_cluster(cluster)
+
+ if HOST is None:
+ setMsg("Host does not exist.")
+ ifacelist = dict()
+ networklist = []
+ manageip = ''
+
+ try:
+ for iface in ifaces:
+ try:
+ setMsg('creating host interface ' + iface['name'])
+ if 'management' in iface:
+ manageip = iface['ip']
+ if 'boot_protocol' not in iface:
+ if 'ip' in iface:
+ iface['boot_protocol'] = 'static'
+ else:
+ iface['boot_protocol'] = 'none'
+ if 'ip' not in iface:
+ iface['ip'] = ''
+ if 'netmask' not in iface:
+ iface['netmask'] = ''
+ if 'gateway' not in iface:
+ iface['gateway'] = ''
+
+ if 'network' in iface:
+ if 'bond' in iface:
+ bond = []
+ for slave in iface['bond']:
+ bond.append(ifacelist[slave])
+ try:
+ tmpiface = params.Bonding(
+ slaves=params.Slaves(host_nic=bond),
+ options=params.Options(
+ option=[
+ params.Option(name='miimon', value='100'),
+ params.Option(name='mode', value='4')
+ ]
+ )
+ )
+ except Exception as e:
+ setMsg('Failed to create the bond for ' + iface['name'])
+ setFailed()
+ setMsg(str(e))
+ return False
+ try:
+ tmpnetwork = params.HostNIC(
+ network=params.Network(name=iface['network']),
+ name=iface['name'],
+ boot_protocol=iface['boot_protocol'],
+ ip=params.IP(
+ address=iface['ip'],
+ netmask=iface['netmask'],
+ gateway=iface['gateway']
+ ),
+ override_configuration=True,
+ bonding=tmpiface)
+ networklist.append(tmpnetwork)
+ setMsg('Applying network ' + iface['name'])
+ except Exception as e:
+ setMsg('Failed to set' + iface['name'] + ' as network interface')
+ setFailed()
+ setMsg(str(e))
+ return False
+ else:
+ tmpnetwork = params.HostNIC(
+ network=params.Network(name=iface['network']),
+ name=iface['name'],
+ boot_protocol=iface['boot_protocol'],
+ ip=params.IP(
+ address=iface['ip'],
+ netmask=iface['netmask'],
+ gateway=iface['gateway']
+ ))
+ networklist.append(tmpnetwork)
+ setMsg('Applying network ' + iface['name'])
+ else:
+ tmpiface = params.HostNIC(
+ name=iface['name'],
+ network=params.Network(),
+ boot_protocol=iface['boot_protocol'],
+ ip=params.IP(
+ address=iface['ip'],
+ netmask=iface['netmask'],
+ gateway=iface['gateway']
+ ))
+ ifacelist[iface['name']] = tmpiface
+ except Exception as e:
+ setMsg('Failed to set ' + iface['name'])
+ setFailed()
+ setMsg(str(e))
+ return False
+ except Exception as e:
+ setMsg('Failed to set networks')
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ if manageip == '':
+ setMsg('No management network is defined')
+ setFailed()
+ return False
+
+ try:
+ HOST = params.Host(name=host_name, address=manageip, cluster=CLUSTER, ssh=params.SSH(authentication_method='publickey'))
+ if self.conn.hosts.add(HOST):
+ setChanged()
+ HOST = self.get_Host(host_name)
+ state = HOST.status.state
+ while (state != 'non_operational' and state != 'up'):
+ HOST = self.get_Host(host_name)
+ state = HOST.status.state
+ time.sleep(1)
+ if state == 'non_responsive':
+ setMsg('Failed to add host to RHEVM')
+ setFailed()
+ return False
+
+ setMsg('status host: up')
+ time.sleep(5)
+
+ HOST = self.get_Host(host_name)
+ state = HOST.status.state
+ setMsg('State before setting to maintenance: ' + str(state))
+ HOST.deactivate()
+ while state != 'maintenance':
+ HOST = self.get_Host(host_name)
+ state = HOST.status.state
+ time.sleep(1)
+ setMsg('status host: maintenance')
+
+ try:
+ HOST.nics.setupnetworks(params.Action(
+ force=True,
+ check_connectivity=False,
+ host_nics=params.HostNics(host_nic=networklist)
+ ))
+ setMsg('nics are set')
+ except Exception as e:
+ setMsg('Failed to apply networkconfig')
+ setFailed()
+ setMsg(str(e))
+ return False
+
+ try:
+ HOST.commitnetconfig()
+ setMsg('Network config is saved')
+ except Exception as e:
+ setMsg('Failed to save networkconfig')
+ setFailed()
+ setMsg(str(e))
+ return False
+ except Exception as e:
+ if 'The Host name is already in use' in str(e):
+ setMsg("Host already exists")
+ else:
+ setMsg("Failed to add host")
+ setFailed()
+ setMsg(str(e))
+ return False
+
+ HOST.activate()
+ while state != 'up':
+ HOST = self.get_Host(host_name)
+ state = HOST.status.state
+ time.sleep(1)
+ if state == 'non_responsive':
+ setMsg('Failed to apply networkconfig.')
+ setFailed()
+ return False
+ setMsg('status host: up')
+ else:
+ setMsg("Host exists.")
+
+ return True
+
+ def del_NIC(self, vmname, nicname):
+ return self.get_NIC(vmname, nicname).delete()
+
+ def remove_VM(self, vmname):
+ VM = self.get_VM(vmname)
+ try:
+ VM.delete()
+ except Exception as e:
+ setMsg("Failed to remove VM.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def start_VM(self, vmname, timeout):
+ VM = self.get_VM(vmname)
+ try:
+ VM.start()
+ except Exception as e:
+ setMsg("Failed to start VM.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return self.wait_VM(vmname, "up", timeout)
+
+ def wait_VM(self, vmname, state, timeout):
+ VM = self.get_VM(vmname)
+ while VM.status.state != state:
+ VM = self.get_VM(vmname)
+ time.sleep(10)
+ if timeout is not False:
+ timeout -= 10
+ if timeout <= 0:
+ setMsg("Timeout expired")
+ setFailed()
+ return False
+ return True
+
+ def stop_VM(self, vmname, timeout):
+ VM = self.get_VM(vmname)
+ try:
+ VM.stop()
+ except Exception as e:
+ setMsg("Failed to stop VM.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return self.wait_VM(vmname, "down", timeout)
+
+ def set_CD(self, vmname, cd_drive):
+ VM = self.get_VM(vmname)
+ try:
+ if str(VM.status.state) == 'down':
+ cdrom = params.CdRom(file=cd_drive)
+ VM.cdroms.add(cdrom)
+ setMsg("Attached the image.")
+ setChanged()
+ else:
+ cdrom = VM.cdroms.get(id="00000000-0000-0000-0000-000000000000")
+ cdrom.set_file(cd_drive)
+ cdrom.update(current=True)
+ setMsg("Attached the image.")
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to attach image.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def set_VM_Host(self, vmname, vmhost):
+ VM = self.get_VM(vmname)
+ HOST = self.get_Host(vmhost)
+ try:
+ VM.placement_policy.host = HOST
+ VM.update()
+ setMsg("Set startup host to " + vmhost)
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to set startup host.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def migrate_VM(self, vmname, vmhost):
+ VM = self.get_VM(vmname)
+
+ HOST = self.get_Host_byid(VM.host.id)
+ if str(HOST.name) != vmhost:
+ try:
+ VM.migrate(
+ action=params.Action(
+ host=params.Host(
+ name=vmhost,
+ )
+ ),
+ )
+ setChanged()
+ setMsg("VM migrated to " + vmhost)
+ except Exception as e:
+ setMsg("Failed to set startup host.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def remove_CD(self, vmname):
+ VM = self.get_VM(vmname)
+ try:
+ VM.cdroms.get(id="00000000-0000-0000-0000-000000000000").delete()
+ setMsg("Removed the image.")
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to remove the image.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+
+class RHEV(object):
+ def __init__(self, module):
+ self.module = module
+
+ def __get_conn(self):
+ self.conn = RHEVConn(self.module)
+ return self.conn
+
+ def test(self):
+ self.__get_conn()
+ return "OK"
+
+ def getVM(self, name):
+ self.__get_conn()
+ VM = self.conn.get_VM(name)
+ if VM:
+ vminfo = dict()
+ vminfo['uuid'] = VM.id
+ vminfo['name'] = VM.name
+ vminfo['status'] = VM.status.state
+ vminfo['cpu_cores'] = VM.cpu.topology.cores
+ vminfo['cpu_sockets'] = VM.cpu.topology.sockets
+ vminfo['cpu_shares'] = VM.cpu_shares
+ vminfo['memory'] = (int(VM.memory) // 1024 // 1024 // 1024)
+ vminfo['mem_pol'] = (int(VM.memory_policy.guaranteed) // 1024 // 1024 // 1024)
+ vminfo['os'] = VM.get_os().type_
+ vminfo['del_prot'] = VM.delete_protected
+ try:
+ vminfo['host'] = str(self.conn.get_Host_byid(str(VM.host.id)).name)
+ except Exception:
+ vminfo['host'] = None
+ vminfo['boot_order'] = []
+ for boot_dev in VM.os.get_boot():
+ vminfo['boot_order'].append(str(boot_dev.dev))
+ vminfo['disks'] = []
+ for DISK in VM.disks.list():
+ disk = dict()
+ disk['name'] = DISK.name
+ disk['size'] = (int(DISK.size) // 1024 // 1024 // 1024)
+ disk['domain'] = str((self.conn.get_domain_byid(DISK.get_storage_domains().get_storage_domain()[0].id)).name)
+ disk['interface'] = DISK.interface
+ vminfo['disks'].append(disk)
+ vminfo['ifaces'] = []
+ for NIC in VM.nics.list():
+ iface = dict()
+ iface['name'] = str(NIC.name)
+ iface['vlan'] = str(self.conn.get_network_byid(NIC.get_network().id).name)
+ iface['interface'] = NIC.interface
+ iface['mac'] = NIC.mac.address
+ vminfo['ifaces'].append(iface)
+ vminfo[str(NIC.name)] = NIC.mac.address
+ CLUSTER = self.conn.get_cluster_byid(VM.cluster.id)
+ if CLUSTER:
+ vminfo['cluster'] = CLUSTER.name
+ else:
+ vminfo = False
+ return vminfo
+
+ def createVMimage(self, name, cluster, template, disks):
+ self.__get_conn()
+ return self.conn.createVMimage(name, cluster, template, disks)
+
+ def createVM(self, name, cluster, os, actiontype):
+ self.__get_conn()
+ return self.conn.createVM(name, cluster, os, actiontype)
+
+ def setMemory(self, name, memory):
+ self.__get_conn()
+ return self.conn.set_Memory(name, memory)
+
+ def setMemoryPolicy(self, name, memory_policy):
+ self.__get_conn()
+ return self.conn.set_Memory_Policy(name, memory_policy)
+
+ def setCPU(self, name, cpu):
+ self.__get_conn()
+ return self.conn.set_CPU(name, cpu)
+
+ def setCPUShare(self, name, cpu_share):
+ self.__get_conn()
+ return self.conn.set_CPU_share(name, cpu_share)
+
+ def setDisks(self, name, disks):
+ self.__get_conn()
+ counter = 0
+ bootselect = False
+ for disk in disks:
+ if 'bootable' in disk:
+ if disk['bootable'] is True:
+ bootselect = True
+
+ for disk in disks:
+ diskname = name + "_Disk" + str(counter) + "_" + disk.get('name', '').replace('/', '_')
+ disksize = disk.get('size', 1)
+ diskdomain = disk.get('domain', None)
+ if diskdomain is None:
+ setMsg("`domain` is a required disk key.")
+ setFailed()
+ return False
+ diskinterface = disk.get('interface', 'virtio')
+ diskformat = disk.get('format', 'raw')
+ diskallocationtype = disk.get('thin', False)
+ diskboot = disk.get('bootable', False)
+
+ if bootselect is False and counter == 0:
+ diskboot = True
+
+ DISK = self.conn.get_disk(diskname)
+
+ if DISK is None:
+ self.conn.createDisk(name, diskname, disksize, diskdomain, diskinterface, diskformat, diskallocationtype, diskboot)
+ else:
+ self.conn.set_Disk(diskname, disksize, diskinterface, diskboot)
+ checkFail()
+ counter += 1
+
+ return True
+
+ def setNetworks(self, vmname, ifaces):
+ self.__get_conn()
+ VM = self.conn.get_VM(vmname)
+
+ counter = 0
+ length = len(ifaces)
+
+ for NIC in VM.nics.list():
+ if counter < length:
+ iface = ifaces[counter]
+ name = iface.get('name', None)
+ if name is None:
+ setMsg("`name` is a required iface key.")
+ setFailed()
+ elif str(name) != str(NIC.name):
+ setMsg("ifaces are in the wrong order, rebuilding everything.")
+ for NIC in VM.nics.list():
+ self.conn.del_NIC(vmname, NIC.name)
+ self.setNetworks(vmname, ifaces)
+ checkFail()
+ return True
+ vlan = iface.get('vlan', None)
+ if vlan is None:
+ setMsg("`vlan` is a required iface key.")
+ setFailed()
+ checkFail()
+ interface = iface.get('interface', 'virtio')
+ self.conn.set_NIC(vmname, str(NIC.name), name, vlan, interface)
+ else:
+ self.conn.del_NIC(vmname, NIC.name)
+ counter += 1
+ checkFail()
+
+ while counter < length:
+ iface = ifaces[counter]
+ name = iface.get('name', None)
+ if name is None:
+ setMsg("`name` is a required iface key.")
+ setFailed()
+ vlan = iface.get('vlan', None)
+ if vlan is None:
+ setMsg("`vlan` is a required iface key.")
+ setFailed()
+ if failed is True:
+ return False
+ interface = iface.get('interface', 'virtio')
+ self.conn.createNIC(vmname, name, vlan, interface)
+
+ counter += 1
+ checkFail()
+ return True
+
+ def setDeleteProtection(self, vmname, del_prot):
+ self.__get_conn()
+ VM = self.conn.get_VM(vmname)
+ if bool(VM.delete_protected) != bool(del_prot):
+ self.conn.set_DeleteProtection(vmname, del_prot)
+ checkFail()
+ setMsg("`delete protection` has been updated.")
+ else:
+ setMsg("`delete protection` already has the right value.")
+ return True
+
+ def setBootOrder(self, vmname, boot_order):
+ self.__get_conn()
+ VM = self.conn.get_VM(vmname)
+ bootorder = []
+ for boot_dev in VM.os.get_boot():
+ bootorder.append(str(boot_dev.dev))
+
+ if boot_order != bootorder:
+ self.conn.set_BootOrder(vmname, boot_order)
+ setMsg('The boot order has been set')
+ else:
+ setMsg('The boot order has already been set')
+ return True
+
+ def removeVM(self, vmname):
+ self.__get_conn()
+ self.setPower(vmname, "down", 300)
+ return self.conn.remove_VM(vmname)
+
+ def setPower(self, vmname, state, timeout):
+ self.__get_conn()
+ VM = self.conn.get_VM(vmname)
+ if VM is None:
+ setMsg("VM does not exist.")
+ setFailed()
+ return False
+
+ if state == VM.status.state:
+ setMsg("VM state was already " + state)
+ else:
+ if state == "up":
+ setMsg("VM is going to start")
+ self.conn.start_VM(vmname, timeout)
+ setChanged()
+ elif state == "down":
+ setMsg("VM is going to stop")
+ self.conn.stop_VM(vmname, timeout)
+ setChanged()
+ elif state == "restarted":
+ self.setPower(vmname, "down", timeout)
+ checkFail()
+ self.setPower(vmname, "up", timeout)
+ checkFail()
+ setMsg("the vm state is set to " + state)
+ return True
+
+ def setCD(self, vmname, cd_drive):
+ self.__get_conn()
+ if cd_drive:
+ return self.conn.set_CD(vmname, cd_drive)
+ else:
+ return self.conn.remove_CD(vmname)
+
+ def setVMHost(self, vmname, vmhost):
+ self.__get_conn()
+ return self.conn.set_VM_Host(vmname, vmhost)
+
+ # pylint: disable=unreachable
+ VM = self.conn.get_VM(vmname)
+ HOST = self.conn.get_Host(vmhost)
+
+ if VM.placement_policy.host is None:
+ self.conn.set_VM_Host(vmname, vmhost)
+ elif str(VM.placement_policy.host.id) != str(HOST.id):
+ self.conn.set_VM_Host(vmname, vmhost)
+ else:
+ setMsg("VM's startup host was already set to " + vmhost)
+ checkFail()
+
+ if str(VM.status.state) == "up":
+ self.conn.migrate_VM(vmname, vmhost)
+ checkFail()
+
+ return True
+
+ def setHost(self, hostname, cluster, ifaces):
+ self.__get_conn()
+ return self.conn.set_Host(hostname, cluster, ifaces)
+
+
+def checkFail():
+ if failed:
+ module.fail_json(msg=msg)
+ else:
+ return True
+
+
+def setFailed():
+ global failed
+ failed = True
+
+
+def setChanged():
+ global changed
+ changed = True
+
+
+def setMsg(message):
+ global failed
+ msg.append(message)
+
+
+def core(module):
+
+ r = RHEV(module)
+
+ state = module.params.get('state', 'present')
+
+ if state == 'ping':
+ r.test()
+ return RHEV_SUCCESS, {"ping": "pong"}
+ elif state == 'info':
+ name = module.params.get('name')
+ if not name:
+ setMsg("`name` is a required argument.")
+ return RHEV_FAILED, msg
+ vminfo = r.getVM(name)
+ return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo}
+ elif state == 'present':
+ created = False
+ name = module.params.get('name')
+ if not name:
+ setMsg("`name` is a required argument.")
+ return RHEV_FAILED, msg
+ actiontype = module.params.get('type')
+ if actiontype == 'server' or actiontype == 'desktop':
+ vminfo = r.getVM(name)
+ if vminfo:
+ setMsg('VM exists')
+ else:
+ # Create VM
+ cluster = module.params.get('cluster')
+ if cluster is None:
+ setMsg("cluster is a required argument.")
+ setFailed()
+ template = module.params.get('image')
+ if template:
+ disks = module.params.get('disks')
+ if disks is None:
+ setMsg("disks is a required argument.")
+ setFailed()
+ checkFail()
+ if r.createVMimage(name, cluster, template, disks) is False:
+ return RHEV_FAILED, vminfo
+ else:
+ os = module.params.get('osver')
+ if os is None:
+ setMsg("osver is a required argument.")
+ setFailed()
+ checkFail()
+ if r.createVM(name, cluster, os, actiontype) is False:
+ return RHEV_FAILED, vminfo
+ created = True
+
+ # Set MEMORY and MEMORY POLICY
+ vminfo = r.getVM(name)
+ memory = module.params.get('vmmem')
+ if memory is not None:
+ memory_policy = module.params.get('mempol')
+ if memory_policy == 0:
+ memory_policy = memory
+ mem_pol_nok = True
+ if int(vminfo['mem_pol']) == memory_policy:
+ setMsg("Memory is correct")
+ mem_pol_nok = False
+
+ mem_nok = True
+ if int(vminfo['memory']) == memory:
+ setMsg("Memory is correct")
+ mem_nok = False
+
+ if memory_policy > memory:
+ setMsg('memory_policy cannot have a higher value than memory.')
+ return RHEV_FAILED, msg
+
+ if mem_nok and mem_pol_nok:
+ if memory_policy > int(vminfo['memory']):
+ r.setMemory(vminfo['name'], memory)
+ r.setMemoryPolicy(vminfo['name'], memory_policy)
+ else:
+ r.setMemoryPolicy(vminfo['name'], memory_policy)
+ r.setMemory(vminfo['name'], memory)
+ elif mem_nok:
+ r.setMemory(vminfo['name'], memory)
+ elif mem_pol_nok:
+ r.setMemoryPolicy(vminfo['name'], memory_policy)
+ checkFail()
+
+ # Set CPU
+ cpu = module.params.get('vmcpu')
+ if int(vminfo['cpu_cores']) == cpu:
+ setMsg("Number of CPUs is correct")
+ else:
+ if r.setCPU(vminfo['name'], cpu) is False:
+ return RHEV_FAILED, msg
+
+ # Set CPU SHARE
+ cpu_share = module.params.get('cpu_share')
+ if cpu_share is not None:
+ if int(vminfo['cpu_shares']) == cpu_share:
+ setMsg("CPU share is correct.")
+ else:
+ if r.setCPUShare(vminfo['name'], cpu_share) is False:
+ return RHEV_FAILED, msg
+
+ # Set DISKS
+ disks = module.params.get('disks')
+ if disks is not None:
+ if r.setDisks(vminfo['name'], disks) is False:
+ return RHEV_FAILED, msg
+
+ # Set NETWORKS
+ ifaces = module.params.get('ifaces', None)
+ if ifaces is not None:
+ if r.setNetworks(vminfo['name'], ifaces) is False:
+ return RHEV_FAILED, msg
+
+ # Set Delete Protection
+ del_prot = module.params.get('del_prot')
+ if r.setDeleteProtection(vminfo['name'], del_prot) is False:
+ return RHEV_FAILED, msg
+
+ # Set Boot Order
+ boot_order = module.params.get('boot_order')
+ if r.setBootOrder(vminfo['name'], boot_order) is False:
+ return RHEV_FAILED, msg
+
+ # Set VM Host
+ vmhost = module.params.get('vmhost')
+ if vmhost:
+ if r.setVMHost(vminfo['name'], vmhost) is False:
+ return RHEV_FAILED, msg
+
+ vminfo = r.getVM(name)
+ vminfo['created'] = created
+ return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo}
+
+ if actiontype == 'host':
+ cluster = module.params.get('cluster')
+ if cluster is None:
+ setMsg("cluster is a required argument.")
+ setFailed()
+ ifaces = module.params.get('ifaces')
+ if ifaces is None:
+ setMsg("ifaces is a required argument.")
+ setFailed()
+ if r.setHost(name, cluster, ifaces) is False:
+ return RHEV_FAILED, msg
+ return RHEV_SUCCESS, {'changed': changed, 'msg': msg}
+
+ elif state == 'absent':
+ name = module.params.get('name')
+ if not name:
+ setMsg("`name` is a required argument.")
+ return RHEV_FAILED, msg
+ actiontype = module.params.get('type')
+ if actiontype == 'server' or actiontype == 'desktop':
+ vminfo = r.getVM(name)
+ if vminfo:
+ setMsg('VM exists')
+
+ # Set Delete Protection
+ del_prot = module.params.get('del_prot')
+ if r.setDeleteProtection(vminfo['name'], del_prot) is False:
+ return RHEV_FAILED, msg
+
+ # Remove VM
+ if r.removeVM(vminfo['name']) is False:
+ return RHEV_FAILED, msg
+ setMsg('VM has been removed.')
+ vminfo['state'] = 'DELETED'
+ else:
+ setMsg('VM was already removed.')
+ return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo}
+
+ elif state == 'up' or state == 'down' or state == 'restarted':
+ name = module.params.get('name')
+ if not name:
+ setMsg("`name` is a required argument.")
+ return RHEV_FAILED, msg
+ timeout = module.params.get('timeout')
+ if r.setPower(name, state, timeout) is False:
+ return RHEV_FAILED, msg
+ vminfo = r.getVM(name)
+ return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo}
+
+ elif state == 'cd':
+ name = module.params.get('name')
+ cd_drive = module.params.get('cd_drive')
+ if r.setCD(name, cd_drive) is False:
+ return RHEV_FAILED, msg
+ return RHEV_SUCCESS, {'changed': changed, 'msg': msg}
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['absent', 'cd', 'down', 'info', 'ping', 'present', 'restarted', 'up']),
+ user=dict(type='str', default='admin@internal'),
+ password=dict(type='str', required=True, no_log=True),
+ server=dict(type='str', default='127.0.0.1'),
+ port=dict(type='int', default=443),
+ insecure_api=dict(type='bool', default=False),
+ name=dict(type='str'),
+ image=dict(type='str'),
+ datacenter=dict(type='str', default="Default"),
+ type=dict(type='str', default='server', choices=['desktop', 'host', 'server']),
+ cluster=dict(type='str', default=''),
+ vmhost=dict(type='str'),
+ vmcpu=dict(type='int', default=2),
+ vmmem=dict(type='int', default=1),
+ disks=dict(type='list', elements='str'),
+ osver=dict(type='str', default="rhel_6x64"),
+ ifaces=dict(type='list', elements='str', aliases=['interfaces', 'nics']),
+ timeout=dict(type='int'),
+ mempol=dict(type='int', default=1),
+ vm_ha=dict(type='bool', default=True),
+ cpu_share=dict(type='int', default=0),
+ boot_order=dict(type='list', elements='str', default=['hd', 'network']),
+ del_prot=dict(type='bool', default=True),
+ cd_drive=dict(type='str'),
+ ),
+ )
+
+ if not HAS_SDK:
+ module.fail_json(msg="The 'ovirtsdk' module is not importable. Check the requirements.")
+
+ rc = RHEV_SUCCESS
+ try:
+ rc, result = core(module)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ if rc != 0: # something went wrong emit the msg
+ module.fail_json(rc=rc, msg=result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rhn_channel.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rhn_channel.py
new file mode 100644
index 00000000..63be0323
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rhn_channel.py
@@ -0,0 +1,192 @@
+#!/usr/bin/python
+
+# Copyright: (c) Vincent Van de Kussen
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: rhn_channel
+short_description: Adds or removes Red Hat software channels
+description:
+ - Adds or removes Red Hat software channels.
+author:
+- Vincent Van der Kussen (@vincentvdk)
+notes:
+ - This module fetches the system id from RHN.
+ - This module doesn't support I(check_mode).
+options:
+ name:
+ description:
+ - Name of the software channel.
+ required: true
+ type: str
+ sysname:
+ description:
+ - Name of the system as it is known in RHN/Satellite.
+ required: true
+ type: str
+ state:
+ description:
+ - Whether the channel should be present or not, taking action if the state is different from what is stated.
+ default: present
+ choices: [ present, absent ]
+ type: str
+ url:
+ description:
+ - The full URL to the RHN/Satellite API.
+ required: true
+ type: str
+ user:
+ description:
+ - RHN/Satellite login.
+ required: true
+ type: str
+ password:
+ description:
+ - RHN/Satellite password.
+ aliases: [pwd]
+ required: true
+ type: str
+ validate_certs:
+ description:
+ - If C(False), SSL certificates will not be validated.
+ - This should only set to C(False) when used on self controlled sites
+ using self-signed certificates, and you are absolutely sure that nobody
+ can modify traffic between the module and the site.
+ type: bool
+ default: true
+ version_added: '0.2.0'
+'''
+
+EXAMPLES = '''
+- name: Add a Red Hat software channel
+ community.general.rhn_channel:
+ name: rhel-x86_64-server-v2vwin-6
+ sysname: server01
+ url: https://rhn.redhat.com/rpc/api
+ user: rhnuser
+ password: guessme
+ delegate_to: localhost
+'''
+
+import ssl
+from ansible.module_utils._text import to_text
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import xmlrpc_client
+
+
+def get_systemid(client, session, sysname):
+ systems = client.system.listUserSystems(session)
+ for system in systems:
+ if system.get('name') == sysname:
+ idres = system.get('id')
+ idd = int(idres)
+ return idd
+
+
+def subscribe_channels(channelname, client, session, sysname, sys_id):
+ channels = base_channels(client, session, sys_id)
+ channels.append(channelname)
+ return client.system.setChildChannels(session, sys_id, channels)
+
+
+def unsubscribe_channels(channelname, client, session, sysname, sys_id):
+ channels = base_channels(client, session, sys_id)
+ channels.remove(channelname)
+ return client.system.setChildChannels(session, sys_id, channels)
+
+
+def base_channels(client, session, sys_id):
+ basechan = client.channel.software.listSystemChannels(session, sys_id)
+ try:
+ chans = [item['label'] for item in basechan]
+ except KeyError:
+ chans = [item['channel_label'] for item in basechan]
+ return chans
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ name=dict(type='str', required=True),
+ sysname=dict(type='str', required=True),
+ url=dict(type='str', required=True),
+ user=dict(type='str', required=True),
+ password=dict(type='str', required=True, aliases=['pwd'], no_log=True),
+ validate_certs=dict(type='bool', default=True),
+ )
+ )
+
+ state = module.params['state']
+ channelname = module.params['name']
+ systname = module.params['sysname']
+ saturl = module.params['url']
+ user = module.params['user']
+ password = module.params['password']
+ validate_certs = module.params['validate_certs']
+
+ ssl_context = None
+ if not validate_certs:
+ try: # Python 2.7.9 and newer
+ ssl_context = ssl.create_unverified_context()
+ except AttributeError: # Legacy Python that doesn't verify HTTPS certificates by default
+ ssl_context = ssl._create_unverified_context()
+ else: # Python 2.7.8 and older
+ ssl._create_default_https_context = ssl._create_unverified_https_context
+
+ # initialize connection
+ if ssl_context:
+ client = xmlrpc_client.ServerProxy(saturl, context=ssl_context)
+ else:
+ client = xmlrpc_client.Server(saturl)
+
+ try:
+ session = client.auth.login(user, password)
+ except Exception as e:
+ module.fail_json(msg="Unable to establish session with Satellite server: %s " % to_text(e))
+
+ if not session:
+ module.fail_json(msg="Failed to establish session with Satellite server.")
+
+ # get systemid
+ try:
+ sys_id = get_systemid(client, session, systname)
+ except Exception as e:
+ module.fail_json(msg="Unable to get system id: %s " % to_text(e))
+
+ if not sys_id:
+ module.fail_json(msg="Failed to get system id.")
+
+ # get channels for system
+ try:
+ chans = base_channels(client, session, sys_id)
+ except Exception as e:
+ module.fail_json(msg="Unable to get channel information: %s " % to_text(e))
+
+ try:
+ if state == 'present':
+ if channelname in chans:
+ module.exit_json(changed=False, msg="Channel %s already exists" % channelname)
+ else:
+ subscribe_channels(channelname, client, session, systname, sys_id)
+ module.exit_json(changed=True, msg="Channel %s added" % channelname)
+
+ if state == 'absent':
+ if channelname not in chans:
+ module.exit_json(changed=False, msg="Not subscribed to channel %s." % channelname)
+ else:
+ unsubscribe_channels(channelname, client, session, systname, sys_id)
+ module.exit_json(changed=True, msg="Channel %s removed" % channelname)
+ except Exception as e:
+ module.fail_json(msg='Unable to %s channel (%s): %s' % ('add' if state == 'present' else 'remove', channelname, to_text(e)))
+ finally:
+ client.auth.logout(session)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rhn_register.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rhn_register.py
new file mode 100644
index 00000000..dfc408a5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rhn_register.py
@@ -0,0 +1,431 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) James Laska
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: rhn_register
+short_description: Manage Red Hat Network registration using the C(rhnreg_ks) command
+description:
+ - Manage registration to the Red Hat Network.
+author:
+- James Laska (@jlaska)
+notes:
+ - This is for older Red Hat products. You probably want the M(community.general.redhat_subscription) module instead.
+ - In order to register a system, C(rhnreg_ks) requires either a username and password, or an activationkey.
+requirements:
+ - rhnreg_ks
+ - either libxml2 or lxml
+options:
+ state:
+ description:
+ - Whether to register (C(present)), or unregister (C(absent)) a system.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ username:
+ description:
+ - Red Hat Network username.
+ type: str
+ password:
+ description:
+ - Red Hat Network password.
+ type: str
+ server_url:
+ description:
+ - Specify an alternative Red Hat Network server URL.
+ - The default is the current value of I(serverURL) from C(/etc/sysconfig/rhn/up2date).
+ type: str
+ activationkey:
+ description:
+ - Supply an activation key for use with registration.
+ type: str
+ profilename:
+ description:
+ - Supply an profilename for use with registration.
+ type: str
+ ca_cert:
+ description:
+ - Supply a custom ssl CA certificate file for use with registration.
+ type: path
+ aliases: [ sslcacert ]
+ systemorgid:
+ description:
+ - Supply an organizational id for use with registration.
+ type: str
+ channels:
+ description:
+ - Optionally specify a list of channels to subscribe to upon successful registration.
+ type: list
+ elements: str
+ default: []
+ enable_eus:
+ description:
+ - If C(no), extended update support will be requested.
+ type: bool
+ default: no
+ nopackages:
+ description:
+ - If C(yes), the registered node will not upload its installed packages information to Satellite server.
+ type: bool
+ default: no
+'''
+
+EXAMPLES = r'''
+- name: Unregister system from RHN
+ community.general.rhn_register:
+ state: absent
+ username: joe_user
+ password: somepass
+
+- name: Register as user with password and auto-subscribe to available content
+ community.general.rhn_register:
+ state: present
+ username: joe_user
+ password: somepass
+
+- name: Register with activationkey and enable extended update support
+ community.general.rhn_register:
+ state: present
+ activationkey: 1-222333444
+ enable_eus: yes
+
+- name: Register with activationkey and set a profilename which may differ from the hostname
+ community.general.rhn_register:
+ state: present
+ activationkey: 1-222333444
+ profilename: host.example.com.custom
+
+- name: Register as user with password against a satellite server
+ community.general.rhn_register:
+ state: present
+ username: joe_user
+ password: somepass
+ server_url: https://xmlrpc.my.satellite/XMLRPC
+
+- name: Register as user with password and enable channels
+ community.general.rhn_register:
+ state: present
+ username: joe_user
+ password: somepass
+ channels: rhel-x86_64-server-6-foo-1,rhel-x86_64-server-6-bar-1
+'''
+
+RETURN = r'''
+# Default return values
+'''
+
+import os
+import sys
+
+# Attempt to import rhn client tools
+sys.path.insert(0, '/usr/share/rhn')
+try:
+ import up2date_client
+ import up2date_client.config
+ HAS_UP2DATE_CLIENT = True
+except ImportError:
+ HAS_UP2DATE_CLIENT = False
+
+# INSERT REDHAT SNIPPETS
+from ansible_collections.community.general.plugins.module_utils import redhat
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import urllib, xmlrpc_client
+
+
+class Rhn(redhat.RegistrationBase):
+
+ def __init__(self, module=None, username=None, password=None):
+ redhat.RegistrationBase.__init__(self, module, username, password)
+ self.config = self.load_config()
+ self.server = None
+ self.session = None
+
+ def logout(self):
+ if self.session is not None:
+ self.server.auth.logout(self.session)
+
+ def load_config(self):
+ '''
+ Read configuration from /etc/sysconfig/rhn/up2date
+ '''
+ if not HAS_UP2DATE_CLIENT:
+ return None
+
+ config = up2date_client.config.initUp2dateConfig()
+
+ return config
+
+ @property
+ def server_url(self):
+ return self.config['serverURL']
+
+ @property
+ def hostname(self):
+ '''
+ Return the non-xmlrpc RHN hostname. This is a convenience method
+ used for displaying a more readable RHN hostname.
+
+ Returns: str
+ '''
+ url = urllib.parse.urlparse(self.server_url)
+ return url[1].replace('xmlrpc.', '')
+
+ @property
+ def systemid(self):
+ systemid = None
+ xpath_str = "//member[name='system_id']/value/string"
+
+ if os.path.isfile(self.config['systemIdPath']):
+ fd = open(self.config['systemIdPath'], 'r')
+ xml_data = fd.read()
+ fd.close()
+
+ # Ugh, xml parsing time ...
+ # First, try parsing with libxml2 ...
+ if systemid is None:
+ try:
+ import libxml2
+ doc = libxml2.parseDoc(xml_data)
+ ctxt = doc.xpathNewContext()
+ systemid = ctxt.xpathEval(xpath_str)[0].content
+ doc.freeDoc()
+ ctxt.xpathFreeContext()
+ except ImportError:
+ pass
+
+ # m-kay, let's try with lxml now ...
+ if systemid is None:
+ try:
+ from lxml import etree
+ root = etree.fromstring(xml_data)
+ systemid = root.xpath(xpath_str)[0].text
+ except ImportError:
+ raise Exception('"libxml2" or "lxml" is required for this module.')
+
+ # Strip the 'ID-' prefix
+ if systemid is not None and systemid.startswith('ID-'):
+ systemid = systemid[3:]
+
+ return int(systemid)
+
+ @property
+ def is_registered(self):
+ '''
+ Determine whether the current system is registered.
+
+ Returns: True|False
+ '''
+ return os.path.isfile(self.config['systemIdPath'])
+
+ def configure_server_url(self, server_url):
+ '''
+ Configure server_url for registration
+ '''
+
+ self.config.set('serverURL', server_url)
+ self.config.save()
+
+ def enable(self):
+ '''
+ Prepare the system for RHN registration. This includes ...
+ * enabling the rhnplugin yum plugin
+ * disabling the subscription-manager yum plugin
+ '''
+ redhat.RegistrationBase.enable(self)
+ self.update_plugin_conf('rhnplugin', True)
+ self.update_plugin_conf('subscription-manager', False)
+
+ def register(self, enable_eus=False, activationkey=None, profilename=None, sslcacert=None, systemorgid=None, nopackages=False):
+ '''
+ Register system to RHN. If enable_eus=True, extended update
+ support will be requested.
+ '''
+ register_cmd = ['/usr/sbin/rhnreg_ks', '--force']
+ if self.username:
+ register_cmd.extend(['--username', self.username, '--password', self.password])
+ if self.server_url:
+ register_cmd.extend(['--serverUrl', self.server_url])
+ if enable_eus:
+ register_cmd.append('--use-eus-channel')
+ if nopackages:
+ register_cmd.append('--nopackages')
+ if activationkey is not None:
+ register_cmd.extend(['--activationkey', activationkey])
+ if profilename is not None:
+ register_cmd.extend(['--profilename', profilename])
+ if sslcacert is not None:
+ register_cmd.extend(['--sslCACert', sslcacert])
+ if systemorgid is not None:
+ register_cmd.extend(['--systemorgid', systemorgid])
+ rc, stdout, stderr = self.module.run_command(register_cmd, check_rc=True)
+
+ def api(self, method, *args):
+ '''
+ Convenience RPC wrapper
+ '''
+ if self.server is None:
+ if self.hostname != 'rhn.redhat.com':
+ url = "https://%s/rpc/api" % self.hostname
+ else:
+ url = "https://xmlrpc.%s/rpc/api" % self.hostname
+ self.server = xmlrpc_client.ServerProxy(url)
+ self.session = self.server.auth.login(self.username, self.password)
+
+ func = getattr(self.server, method)
+ return func(self.session, *args)
+
+ def unregister(self):
+ '''
+ Unregister a previously registered system
+ '''
+
+ # Initiate RPC connection
+ self.api('system.deleteSystems', [self.systemid])
+
+ # Remove systemid file
+ os.unlink(self.config['systemIdPath'])
+
+ def subscribe(self, channels):
+ if not channels:
+ return
+
+ if self._is_hosted():
+ current_channels = self.api('channel.software.listSystemChannels', self.systemid)
+ new_channels = [item['channel_label'] for item in current_channels]
+ new_channels.extend(channels)
+ return self.api('channel.software.setSystemChannels', self.systemid, list(new_channels))
+
+ else:
+ current_channels = self.api('channel.software.listSystemChannels', self.systemid)
+ current_channels = [item['label'] for item in current_channels]
+ new_base = None
+ new_childs = []
+ for ch in channels:
+ if ch in current_channels:
+ continue
+ if self.api('channel.software.getDetails', ch)['parent_channel_label'] == '':
+ new_base = ch
+ else:
+ if ch not in new_childs:
+ new_childs.append(ch)
+ out_base = 0
+ out_childs = 0
+
+ if new_base:
+ out_base = self.api('system.setBaseChannel', self.systemid, new_base)
+
+ if new_childs:
+ out_childs = self.api('system.setChildChannels', self.systemid, new_childs)
+
+ return out_base and out_childs
+
+ def _is_hosted(self):
+ '''
+ Return True if we are running against Hosted (rhn.redhat.com) or
+ False otherwise (when running against Satellite or Spacewalk)
+ '''
+ return 'rhn.redhat.com' in self.hostname
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ username=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ server_url=dict(type='str'),
+ activationkey=dict(type='str', no_log=True),
+ profilename=dict(type='str'),
+ ca_cert=dict(type='path', aliases=['sslcacert']),
+ systemorgid=dict(type='str'),
+ enable_eus=dict(type='bool', default=False),
+ nopackages=dict(type='bool', default=False),
+ channels=dict(type='list', elements='str', default=[]),
+ ),
+ # username/password is required for state=absent, or if channels is not empty
+ # (basically anything that uses self.api requires username/password) but it doesn't
+ # look like we can express that with required_if/required_together/mutually_exclusive
+
+ # only username+password can be used for unregister
+ required_if=[['state', 'absent', ['username', 'password']]],
+ )
+
+ if not HAS_UP2DATE_CLIENT:
+ module.fail_json(msg="Unable to import up2date_client. Is 'rhn-client-tools' installed?")
+
+ server_url = module.params['server_url']
+ username = module.params['username']
+ password = module.params['password']
+
+ state = module.params['state']
+ activationkey = module.params['activationkey']
+ profilename = module.params['profilename']
+ sslcacert = module.params['ca_cert']
+ systemorgid = module.params['systemorgid']
+ channels = module.params['channels']
+ enable_eus = module.params['enable_eus']
+ nopackages = module.params['nopackages']
+
+ rhn = Rhn(module=module, username=username, password=password)
+
+ # use the provided server url and persist it to the rhn config.
+ if server_url:
+ rhn.configure_server_url(server_url)
+
+ if not rhn.server_url:
+ module.fail_json(
+ msg="No serverURL was found (from either the 'server_url' module arg or the config file option 'serverURL' in /etc/sysconfig/rhn/up2date)"
+ )
+
+ # Ensure system is registered
+ if state == 'present':
+
+ # Check for missing parameters ...
+ if not (activationkey or rhn.username or rhn.password):
+ module.fail_json(msg="Missing arguments, must supply an activationkey (%s) or username (%s) and password (%s)" % (activationkey, rhn.username,
+ rhn.password))
+ if not activationkey and not (rhn.username and rhn.password):
+ module.fail_json(msg="Missing arguments, If registering without an activationkey, must supply username or password")
+
+ # Register system
+ if rhn.is_registered:
+ module.exit_json(changed=False, msg="System already registered.")
+
+ try:
+ rhn.enable()
+ rhn.register(enable_eus, activationkey, profilename, sslcacert, systemorgid, nopackages)
+ rhn.subscribe(channels)
+ except Exception as exc:
+ module.fail_json(msg="Failed to register with '%s': %s" % (rhn.hostname, exc))
+ finally:
+ rhn.logout()
+
+ module.exit_json(changed=True, msg="System successfully registered to '%s'." % rhn.hostname)
+
+ # Ensure system is *not* registered
+ if state == 'absent':
+ if not rhn.is_registered:
+ module.exit_json(changed=False, msg="System already unregistered.")
+
+ if not (rhn.username and rhn.password):
+ module.fail_json(msg="Missing arguments, the system is currently registered and unregistration requires a username and password")
+
+ try:
+ rhn.unregister()
+ except Exception as exc:
+ module.fail_json(msg="Failed to unregister: %s" % exc)
+ finally:
+ rhn.logout()
+
+ module.exit_json(changed=True, msg="System successfully unregistered from %s." % rhn.hostname)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rhsm_release.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rhsm_release.py
new file mode 100644
index 00000000..22b280f1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rhsm_release.py
@@ -0,0 +1,124 @@
+#!/usr/bin/python
+
+# (c) 2018, Sean Myers <sean.myers@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: rhsm_release
+short_description: Set or Unset RHSM Release version
+description:
+ - Sets or unsets the release version used by RHSM repositories.
+notes:
+ - This module will fail on an unregistered system.
+ Use the C(redhat_subscription) module to register a system
+ prior to setting the RHSM release.
+requirements:
+ - Red Hat Enterprise Linux 6+ with subscription-manager installed
+options:
+ release:
+ description:
+ - RHSM release version to use (use null to unset)
+ required: true
+ type: str
+author:
+ - Sean Myers (@seandst)
+'''
+
+EXAMPLES = '''
+# Set release version to 7.1
+- name: Set RHSM release version
+ community.general.rhsm_release:
+ release: "7.1"
+
+# Set release version to 6Server
+- name: Set RHSM release version
+ community.general.rhsm_release:
+ release: "6Server"
+
+# Unset release version
+- name: Unset RHSM release release
+ community.general.rhsm_release:
+ release: null
+'''
+
+RETURN = '''
+current_release:
+ description: The current RHSM release version value
+ returned: success
+ type: str
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+import re
+
+# Matches release-like values such as 7.2, 6.10, 10Server,
+# but rejects unlikely values, like 100Server, 100.0, 1.100, etc.
+release_matcher = re.compile(r'\b\d{1,2}(?:\.\d{1,2}|Server)\b')
+
+
+def _sm_release(module, *args):
+ # pass args to s-m release, e.g. _sm_release(module, '--set', '0.1') becomes
+ # "subscription-manager release --set 0.1"
+ sm_bin = module.get_bin_path('subscription-manager', required=True)
+ cmd = '{0} release {1}'.format(sm_bin, " ".join(args))
+ # delegate nonzero rc handling to run_command
+ return module.run_command(cmd, check_rc=True)
+
+
+def get_release(module):
+ # Get the current release version, or None if release unset
+ rc, out, err = _sm_release(module, '--show')
+ try:
+ match = release_matcher.findall(out)[0]
+ except IndexError:
+ # 0'th index did not exist; no matches
+ match = None
+
+ return match
+
+
+def set_release(module, release):
+ # Set current release version, or unset if release is None
+ if release is None:
+ args = ('--unset',)
+ else:
+ args = ('--set', release)
+
+ return _sm_release(module, *args)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ release=dict(type='str', required=True),
+ ),
+ supports_check_mode=True
+ )
+
+ target_release = module.params['release']
+
+ # sanity check: the target release at least looks like a valid release
+ if target_release and not release_matcher.findall(target_release):
+ module.fail_json(msg='"{0}" does not appear to be a valid release.'.format(target_release))
+
+ # Will fail with useful error from s-m if system not subscribed
+ current_release = get_release(module)
+
+ changed = (target_release != current_release)
+ if not module.check_mode and changed:
+ set_release(module, target_release)
+ # If setting the release fails, then a fail_json would have exited with
+ # the s-m error, e.g. "No releases match '7.20'...". If not, then the
+ # current release is now set to the target release (job's done)
+ current_release = target_release
+
+ module.exit_json(current_release=current_release, changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rhsm_repository.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rhsm_repository.py
new file mode 100644
index 00000000..7317be66
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rhsm_repository.py
@@ -0,0 +1,245 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2017, Giovanni Sciortino (@giovannisciortino)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: rhsm_repository
+short_description: Manage RHSM repositories using the subscription-manager command
+description:
+ - Manage (Enable/Disable) RHSM repositories to the Red Hat Subscription
+ Management entitlement platform using the C(subscription-manager) command.
+author: Giovanni Sciortino (@giovannisciortino)
+notes:
+ - In order to manage RHSM repositories the system must be already registered
+ to RHSM manually or using the Ansible C(redhat_subscription) module.
+
+requirements:
+ - subscription-manager
+options:
+ state:
+ description:
+ - If state is equal to present or disabled, indicates the desired
+ repository state.
+ choices: [present, enabled, absent, disabled]
+ default: "enabled"
+ type: str
+ name:
+ description:
+ - The ID of repositories to enable.
+ - To operate on several repositories this can accept a comma separated
+ list or a YAML list.
+ required: True
+ type: list
+ elements: str
+ purge:
+ description:
+ - Disable all currently enabled repositories that are not not specified in C(name).
+ Only set this to C(True) if passing in a list of repositories to the C(name) field.
+ Using this with C(loop) will most likely not have the desired result.
+ type: bool
+ default: no
+'''
+
+EXAMPLES = '''
+- name: Enable a RHSM repository
+ community.general.rhsm_repository:
+ name: rhel-7-server-rpms
+
+- name: Disable all RHSM repositories
+ community.general.rhsm_repository:
+ name: '*'
+ state: disabled
+
+- name: Enable all repositories starting with rhel-6-server
+ community.general.rhsm_repository:
+ name: rhel-6-server*
+ state: enabled
+
+- name: Disable all repositories except rhel-7-server-rpms
+ community.general.rhsm_repository:
+ name: rhel-7-server-rpms
+ purge: True
+'''
+
+RETURN = '''
+repositories:
+ description:
+ - The list of RHSM repositories with their states.
+ - When this module is used to change the repository states, this list contains the updated states after the changes.
+ returned: success
+ type: list
+'''
+
+import re
+import os
+from fnmatch import fnmatch
+from copy import deepcopy
+from ansible.module_utils.basic import AnsibleModule
+
+
+def run_subscription_manager(module, arguments):
+ # Execute subscription-manager with arguments and manage common errors
+ rhsm_bin = module.get_bin_path('subscription-manager')
+ if not rhsm_bin:
+ module.fail_json(msg='The executable file subscription-manager was not found in PATH')
+
+ lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+ rc, out, err = module.run_command("%s %s" % (rhsm_bin, " ".join(arguments)), environ_update=lang_env)
+
+ if rc == 1 and (err == 'The password you typed is invalid.\nPlease try again.\n' or os.getuid() != 0):
+ module.fail_json(msg='The executable file subscription-manager must be run using root privileges')
+ elif rc == 0 and out == 'This system has no repositories available through subscriptions.\n':
+ module.fail_json(msg='This system has no repositories available through subscriptions')
+ elif rc == 1:
+ module.fail_json(msg='subscription-manager failed with the following error: %s' % err)
+ else:
+ return rc, out, err
+
+
+def get_repository_list(module, list_parameter):
+ # Generate RHSM repository list and return a list of dict
+ if list_parameter == 'list_enabled':
+ rhsm_arguments = ['repos', '--list-enabled']
+ elif list_parameter == 'list_disabled':
+ rhsm_arguments = ['repos', '--list-disabled']
+ elif list_parameter == 'list':
+ rhsm_arguments = ['repos', '--list']
+ rc, out, err = run_subscription_manager(module, rhsm_arguments)
+
+ skip_lines = [
+ '+----------------------------------------------------------+',
+ ' Available Repositories in /etc/yum.repos.d/redhat.repo'
+ ]
+ repo_id_re = re.compile(r'Repo ID:\s+(.*)')
+ repo_name_re = re.compile(r'Repo Name:\s+(.*)')
+ repo_url_re = re.compile(r'Repo URL:\s+(.*)')
+ repo_enabled_re = re.compile(r'Enabled:\s+(.*)')
+
+ repo_id = ''
+ repo_name = ''
+ repo_url = ''
+ repo_enabled = ''
+
+ repo_result = []
+ for line in out.splitlines():
+ if line == '' or line in skip_lines:
+ continue
+
+ repo_id_match = repo_id_re.match(line)
+ if repo_id_match:
+ repo_id = repo_id_match.group(1)
+ continue
+
+ repo_name_match = repo_name_re.match(line)
+ if repo_name_match:
+ repo_name = repo_name_match.group(1)
+ continue
+
+ repo_url_match = repo_url_re.match(line)
+ if repo_url_match:
+ repo_url = repo_url_match.group(1)
+ continue
+
+ repo_enabled_match = repo_enabled_re.match(line)
+ if repo_enabled_match:
+ repo_enabled = repo_enabled_match.group(1)
+
+ repo = {
+ "id": repo_id,
+ "name": repo_name,
+ "url": repo_url,
+ "enabled": True if repo_enabled == '1' else False
+ }
+
+ repo_result.append(repo)
+
+ return repo_result
+
+
+def repository_modify(module, state, name, purge=False):
+ name = set(name)
+ current_repo_list = get_repository_list(module, 'list')
+ updated_repo_list = deepcopy(current_repo_list)
+ matched_existing_repo = {}
+ for repoid in name:
+ matched_existing_repo[repoid] = []
+ for idx, repo in enumerate(current_repo_list):
+ if fnmatch(repo['id'], repoid):
+ matched_existing_repo[repoid].append(repo)
+ # Update current_repo_list to return it as result variable
+ updated_repo_list[idx]['enabled'] = True if state == 'enabled' else False
+
+ changed = False
+ results = []
+ diff_before = ""
+ diff_after = ""
+ rhsm_arguments = ['repos']
+
+ for repoid in matched_existing_repo:
+ if len(matched_existing_repo[repoid]) == 0:
+ results.append("%s is not a valid repository ID" % repoid)
+ module.fail_json(results=results, msg="%s is not a valid repository ID" % repoid)
+ for repo in matched_existing_repo[repoid]:
+ if state in ['disabled', 'absent']:
+ if repo['enabled']:
+ changed = True
+ diff_before += "Repository '%s' is enabled for this system\n" % repo['id']
+ diff_after += "Repository '%s' is disabled for this system\n" % repo['id']
+ results.append("Repository '%s' is disabled for this system" % repo['id'])
+ rhsm_arguments += ['--disable', repo['id']]
+ elif state in ['enabled', 'present']:
+ if not repo['enabled']:
+ changed = True
+ diff_before += "Repository '%s' is disabled for this system\n" % repo['id']
+ diff_after += "Repository '%s' is enabled for this system\n" % repo['id']
+ results.append("Repository '%s' is enabled for this system" % repo['id'])
+ rhsm_arguments += ['--enable', repo['id']]
+
+ # Disable all enabled repos on the system that are not in the task and not
+ # marked as disabled by the task
+ if purge:
+ enabled_repo_ids = set(repo['id'] for repo in updated_repo_list if repo['enabled'])
+ matched_repoids_set = set(matched_existing_repo.keys())
+ difference = enabled_repo_ids.difference(matched_repoids_set)
+ if len(difference) > 0:
+ for repoid in difference:
+ changed = True
+ diff_before.join("Repository '{repoid}'' is enabled for this system\n".format(repoid=repoid))
+ diff_after.join("Repository '{repoid}' is disabled for this system\n".format(repoid=repoid))
+ results.append("Repository '{repoid}' is disabled for this system".format(repoid=repoid))
+ rhsm_arguments.extend(['--disable', repoid])
+
+ diff = {'before': diff_before,
+ 'after': diff_after,
+ 'before_header': "RHSM repositories",
+ 'after_header': "RHSM repositories"}
+
+ if not module.check_mode and changed:
+ rc, out, err = run_subscription_manager(module, rhsm_arguments)
+ results = out.splitlines()
+ module.exit_json(results=results, changed=changed, repositories=updated_repo_list, diff=diff)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='list', elements='str', required=True),
+ state=dict(choices=['enabled', 'disabled', 'present', 'absent'], default='enabled'),
+ purge=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+ name = module.params['name']
+ state = module.params['state']
+ purge = module.params['purge']
+
+ repository_modify(module, state, name, purge)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/riak.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/riak.py
new file mode 100644
index 00000000..848a5e3f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/riak.py
@@ -0,0 +1,221 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, James Martin <jmartin@basho.com>, Drew Kerrigan <dkerrigan@basho.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: riak
+short_description: This module handles some common Riak operations
+description:
+ - This module can be used to join nodes to a cluster, check
+ the status of the cluster.
+author:
+ - "James Martin (@jsmartin)"
+ - "Drew Kerrigan (@drewkerrigan)"
+options:
+ command:
+ description:
+ - The command you would like to perform against the cluster.
+ choices: ['ping', 'kv_test', 'join', 'plan', 'commit']
+ config_dir:
+ description:
+ - The path to the riak configuration directory
+ default: /etc/riak
+ http_conn:
+ description:
+ - The ip address and port that is listening for Riak HTTP queries
+ default: 127.0.0.1:8098
+ target_node:
+ description:
+ - The target node for certain operations (join, ping)
+ default: riak@127.0.0.1
+ wait_for_handoffs:
+ description:
+ - Number of seconds to wait for handoffs to complete.
+ wait_for_ring:
+ description:
+ - Number of seconds to wait for all nodes to agree on the ring.
+ wait_for_service:
+ description:
+ - Waits for a riak service to come online before continuing.
+ choices: ['kv']
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+'''
+
+EXAMPLES = '''
+- name: "Join's a Riak node to another node"
+ community.general.riak:
+ command: join
+ target_node: riak@10.1.1.1
+
+- name: Wait for handoffs to finish. Use with async and poll.
+ community.general.riak:
+ wait_for_handoffs: yes
+
+- name: Wait for riak_kv service to startup
+ community.general.riak:
+ wait_for_service: kv
+'''
+
+import json
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def ring_check(module, riak_admin_bin):
+ cmd = '%s ringready' % riak_admin_bin
+ rc, out, err = module.run_command(cmd)
+ if rc == 0 and 'TRUE All nodes agree on the ring' in out:
+ return True
+ else:
+ return False
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ command=dict(required=False, default=None, choices=[
+ 'ping', 'kv_test', 'join', 'plan', 'commit']),
+ config_dir=dict(default='/etc/riak', type='path'),
+ http_conn=dict(required=False, default='127.0.0.1:8098'),
+ target_node=dict(default='riak@127.0.0.1', required=False),
+ wait_for_handoffs=dict(default=False, type='int'),
+ wait_for_ring=dict(default=False, type='int'),
+ wait_for_service=dict(
+ required=False, default=None, choices=['kv']),
+ validate_certs=dict(default=True, type='bool'))
+ )
+
+ command = module.params.get('command')
+ http_conn = module.params.get('http_conn')
+ target_node = module.params.get('target_node')
+ wait_for_handoffs = module.params.get('wait_for_handoffs')
+ wait_for_ring = module.params.get('wait_for_ring')
+ wait_for_service = module.params.get('wait_for_service')
+
+ # make sure riak commands are on the path
+ riak_bin = module.get_bin_path('riak')
+ riak_admin_bin = module.get_bin_path('riak-admin')
+
+ timeout = time.time() + 120
+ while True:
+ if time.time() > timeout:
+ module.fail_json(msg='Timeout, could not fetch Riak stats.')
+ (response, info) = fetch_url(module, 'http://%s/stats' % (http_conn), force=True, timeout=5)
+ if info['status'] == 200:
+ stats_raw = response.read()
+ break
+ time.sleep(5)
+
+ # here we attempt to load those stats,
+ try:
+ stats = json.loads(stats_raw)
+ except Exception:
+ module.fail_json(msg='Could not parse Riak stats.')
+
+ node_name = stats['nodename']
+ nodes = stats['ring_members']
+ ring_size = stats['ring_creation_size']
+ rc, out, err = module.run_command([riak_bin, 'version'])
+ version = out.strip()
+
+ result = dict(node_name=node_name,
+ nodes=nodes,
+ ring_size=ring_size,
+ version=version)
+
+ if command == 'ping':
+ cmd = '%s ping %s' % (riak_bin, target_node)
+ rc, out, err = module.run_command(cmd)
+ if rc == 0:
+ result['ping'] = out
+ else:
+ module.fail_json(msg=out)
+
+ elif command == 'kv_test':
+ cmd = '%s test' % riak_admin_bin
+ rc, out, err = module.run_command(cmd)
+ if rc == 0:
+ result['kv_test'] = out
+ else:
+ module.fail_json(msg=out)
+
+ elif command == 'join':
+ if nodes.count(node_name) == 1 and len(nodes) > 1:
+ result['join'] = 'Node is already in cluster or staged to be in cluster.'
+ else:
+ cmd = '%s cluster join %s' % (riak_admin_bin, target_node)
+ rc, out, err = module.run_command(cmd)
+ if rc == 0:
+ result['join'] = out
+ result['changed'] = True
+ else:
+ module.fail_json(msg=out)
+
+ elif command == 'plan':
+ cmd = '%s cluster plan' % riak_admin_bin
+ rc, out, err = module.run_command(cmd)
+ if rc == 0:
+ result['plan'] = out
+ if 'Staged Changes' in out:
+ result['changed'] = True
+ else:
+ module.fail_json(msg=out)
+
+ elif command == 'commit':
+ cmd = '%s cluster commit' % riak_admin_bin
+ rc, out, err = module.run_command(cmd)
+ if rc == 0:
+ result['commit'] = out
+ result['changed'] = True
+ else:
+ module.fail_json(msg=out)
+
+# this could take a while, recommend to run in async mode
+ if wait_for_handoffs:
+ timeout = time.time() + wait_for_handoffs
+ while True:
+ cmd = '%s transfers' % riak_admin_bin
+ rc, out, err = module.run_command(cmd)
+ if 'No transfers active' in out:
+ result['handoffs'] = 'No transfers active.'
+ break
+ time.sleep(10)
+ if time.time() > timeout:
+ module.fail_json(msg='Timeout waiting for handoffs.')
+
+ if wait_for_service:
+ cmd = [riak_admin_bin, 'wait_for_service', 'riak_%s' % wait_for_service, node_name]
+ rc, out, err = module.run_command(cmd)
+ result['service'] = out
+
+ if wait_for_ring:
+ timeout = time.time() + wait_for_ring
+ while True:
+ if ring_check(module, riak_admin_bin):
+ break
+ time.sleep(10)
+ if time.time() > timeout:
+ module.fail_json(msg='Timeout waiting for nodes to agree on ring.')
+
+ result['ring_ready'] = ring_check(module, riak_admin_bin)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rocketchat.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rocketchat.py
new file mode 100644
index 00000000..13a93dd8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rocketchat.py
@@ -0,0 +1,241 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Deepak Kothandan <deepak.kothandan@outlook.com>
+# (c) 2015, Stefan Berggren <nsg@nsg.cc>
+# (c) 2014, Ramon de la Fuente <ramon@delafuente.nl>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: rocketchat
+short_description: Send notifications to Rocket Chat
+description:
+ - The C(rocketchat) module sends notifications to Rocket Chat via the Incoming WebHook integration
+author: "Ramon de la Fuente (@ramondelafuente)"
+options:
+ domain:
+ type: str
+ description:
+ - The domain for your environment without protocol. (i.e.
+ C(example.com) or C(chat.example.com))
+ required: true
+ token:
+ type: str
+ description:
+ - Rocket Chat Incoming Webhook integration token. This provides
+ authentication to Rocket Chat's Incoming webhook for posting
+ messages.
+ required: true
+ protocol:
+ type: str
+ description:
+ - Specify the protocol used to send notification messages before the webhook url. (i.e. http or https)
+ default: https
+ choices:
+ - 'http'
+ - 'https'
+ msg:
+ type: str
+ description:
+ - Message to be sent.
+ channel:
+ type: str
+ description:
+ - Channel to send the message to. If absent, the message goes to the channel selected for the I(token)
+ specified during the creation of webhook.
+ username:
+ type: str
+ description:
+ - This is the sender of the message.
+ default: "Ansible"
+ icon_url:
+ type: str
+ description:
+ - URL for the message sender's icon.
+ default: "https://www.ansible.com/favicon.ico"
+ icon_emoji:
+ type: str
+ description:
+ - Emoji for the message sender. The representation for the available emojis can be
+ got from Rocket Chat. (for example :thumbsup:) (if I(icon_emoji) is set, I(icon_url) will not be used)
+ link_names:
+ type: int
+ description:
+ - Automatically create links for channels and usernames in I(msg).
+ default: 1
+ choices:
+ - 1
+ - 0
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+ color:
+ type: str
+ description:
+ - Allow text to use default colors - use the default of 'normal' to not send a custom color bar at the start of the message
+ default: 'normal'
+ choices:
+ - 'normal'
+ - 'good'
+ - 'warning'
+ - 'danger'
+ attachments:
+ type: list
+ description:
+ - Define a list of attachments.
+'''
+
+EXAMPLES = """
+- name: Send notification message via Rocket Chat
+ community.general.rocketchat:
+ token: thetoken/generatedby/rocketchat
+ domain: chat.example.com
+ msg: '{{ inventory_hostname }} completed'
+ delegate_to: localhost
+
+- name: Send notification message via Rocket Chat all options
+ community.general.rocketchat:
+ domain: chat.example.com
+ token: thetoken/generatedby/rocketchat
+ msg: '{{ inventory_hostname }} completed'
+ channel: #ansible
+ username: 'Ansible on {{ inventory_hostname }}'
+ icon_url: http://www.example.com/some-image-file.png
+ link_names: 0
+ delegate_to: localhost
+
+- name: Insert a color bar in front of the message for visibility purposes and use the default webhook icon and name configured in rocketchat
+ community.general.rocketchat:
+ token: thetoken/generatedby/rocketchat
+ domain: chat.example.com
+ msg: '{{ inventory_hostname }} is alive!'
+ color: good
+ username: ''
+ icon_url: ''
+ delegate_to: localhost
+
+- name: Use the attachments API
+ community.general.rocketchat:
+ token: thetoken/generatedby/rocketchat
+ domain: chat.example.com
+ attachments:
+ - text: Display my system load on host A and B
+ color: #ff00dd
+ title: System load
+ fields:
+ - title: System A
+ value: 'load average: 0,74, 0,66, 0,63'
+ short: True
+ - title: System B
+ value: 'load average: 5,16, 4,64, 2,43'
+ short: True
+ delegate_to: localhost
+"""
+
+RETURN = """
+changed:
+ description: A flag indicating if any change was made or not.
+ returned: success
+ type: bool
+ sample: false
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+ROCKETCHAT_INCOMING_WEBHOOK = '%s://%s/hooks/%s'
+
+
+def build_payload_for_rocketchat(module, text, channel, username, icon_url, icon_emoji, link_names, color, attachments):
+ payload = {}
+ if color == "normal" and text is not None:
+ payload = dict(text=text)
+ elif text is not None:
+ payload = dict(attachments=[dict(text=text, color=color)])
+ if channel is not None:
+ if (channel[0] == '#') or (channel[0] == '@'):
+ payload['channel'] = channel
+ else:
+ payload['channel'] = '#' + channel
+ if username is not None:
+ payload['username'] = username
+ if icon_emoji is not None:
+ payload['icon_emoji'] = icon_emoji
+ else:
+ payload['icon_url'] = icon_url
+ if link_names is not None:
+ payload['link_names'] = link_names
+
+ if attachments is not None:
+ if 'attachments' not in payload:
+ payload['attachments'] = []
+
+ if attachments is not None:
+ for attachment in attachments:
+ if 'fallback' not in attachment:
+ attachment['fallback'] = attachment['text']
+ payload['attachments'].append(attachment)
+
+ payload = "payload=" + module.jsonify(payload)
+ return payload
+
+
+def do_notify_rocketchat(module, domain, token, protocol, payload):
+
+ if token.count('/') < 1:
+ module.fail_json(msg="Invalid Token specified, provide a valid token")
+
+ rocketchat_incoming_webhook = ROCKETCHAT_INCOMING_WEBHOOK % (protocol, domain, token)
+
+ response, info = fetch_url(module, rocketchat_incoming_webhook, data=payload)
+ if info['status'] != 200:
+ module.fail_json(msg="failed to send message, return status=%s" % str(info['status']))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ domain=dict(type='str', required=True),
+ token=dict(type='str', required=True, no_log=True),
+ protocol=dict(type='str', default='https', choices=['http', 'https']),
+ msg=dict(type='str', required=False),
+ channel=dict(type='str'),
+ username=dict(type='str', default='Ansible'),
+ icon_url=dict(type='str', default='https://www.ansible.com/favicon.ico'),
+ icon_emoji=dict(type='str'),
+ link_names=dict(type='int', default=1, choices=[0, 1]),
+ validate_certs=dict(default=True, type='bool'),
+ color=dict(type='str', default='normal', choices=['normal', 'good', 'warning', 'danger']),
+ attachments=dict(type='list', required=False)
+ )
+ )
+
+ domain = module.params['domain']
+ token = module.params['token']
+ protocol = module.params['protocol']
+ text = module.params['msg']
+ channel = module.params['channel']
+ username = module.params['username']
+ icon_url = module.params['icon_url']
+ icon_emoji = module.params['icon_emoji']
+ link_names = module.params['link_names']
+ color = module.params['color']
+ attachments = module.params['attachments']
+
+ payload = build_payload_for_rocketchat(module, text, channel, username, icon_url, icon_emoji, link_names, color, attachments)
+ do_notify_rocketchat(module, domain, token, protocol, payload)
+
+ module.exit_json(msg="OK")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rollbar_deployment.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rollbar_deployment.py
new file mode 100644
index 00000000..161361b7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rollbar_deployment.py
@@ -0,0 +1,143 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014, Max Riveiro, <kavu13@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rollbar_deployment
+author: "Max Riveiro (@kavu)"
+short_description: Notify Rollbar about app deployments
+description:
+ - Notify Rollbar about app deployments
+ (see https://rollbar.com/docs/deploys_other/)
+options:
+ token:
+ type: str
+ description:
+ - Your project access token.
+ required: true
+ environment:
+ type: str
+ description:
+ - Name of the environment being deployed, e.g. 'production'.
+ required: true
+ revision:
+ type: str
+ description:
+ - Revision number/sha being deployed.
+ required: true
+ user:
+ type: str
+ description:
+ - User who deployed.
+ required: false
+ rollbar_user:
+ type: str
+ description:
+ - Rollbar username of the user who deployed.
+ required: false
+ comment:
+ type: str
+ description:
+ - Deploy comment (e.g. what is being deployed).
+ required: false
+ url:
+ type: str
+ description:
+ - Optional URL to submit the notification to.
+ required: false
+ default: 'https://api.rollbar.com/api/1/deploy/'
+ validate_certs:
+ description:
+ - If C(no), SSL certificates for the target url will not be validated.
+ This should only be used on personally controlled sites using
+ self-signed certificates.
+ required: false
+ default: 'yes'
+ type: bool
+'''
+
+EXAMPLES = '''
+ - name: Rollbar deployment notification
+ community.general.rollbar_deployment:
+ token: AAAAAA
+ environment: staging
+ user: ansible
+ revision: '4.2'
+ rollbar_user: admin
+ comment: Test Deploy
+
+ - name: Notify rollbar about current git revision deployment by current user
+ community.general.rollbar_deployment:
+ token: "{{ rollbar_access_token }}"
+ environment: production
+ revision: "{{ lookup('pipe', 'git rev-parse HEAD') }}"
+ user: "{{ lookup('env', 'USER') }}"
+'''
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import fetch_url
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(required=True, no_log=True),
+ environment=dict(required=True),
+ revision=dict(required=True),
+ user=dict(required=False),
+ rollbar_user=dict(required=False),
+ comment=dict(required=False),
+ url=dict(
+ required=False,
+ default='https://api.rollbar.com/api/1/deploy/'
+ ),
+ validate_certs=dict(default=True, type='bool'),
+ ),
+ supports_check_mode=True
+ )
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ params = dict(
+ access_token=module.params['token'],
+ environment=module.params['environment'],
+ revision=module.params['revision']
+ )
+
+ if module.params['user']:
+ params['local_username'] = module.params['user']
+
+ if module.params['rollbar_user']:
+ params['rollbar_username'] = module.params['rollbar_user']
+
+ if module.params['comment']:
+ params['comment'] = module.params['comment']
+
+ url = module.params.get('url')
+
+ try:
+ data = urlencode(params)
+ response, info = fetch_url(module, url, data=data, method='POST')
+ except Exception as e:
+ module.fail_json(msg='Unable to notify Rollbar: %s' % to_native(e), exception=traceback.format_exc())
+ else:
+ if info['status'] == 200:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg='HTTP result code: %d connecting to %s' % (info['status'], url))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rundeck_acl_policy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rundeck_acl_policy.py
new file mode 100644
index 00000000..1caa159b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rundeck_acl_policy.py
@@ -0,0 +1,255 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Loic Blot <loic.blot@unix-experience.fr>
+# Sponsored by Infopro Digital. http://www.infopro-digital.com/
+# Sponsored by E.T.A.I. http://www.etai.fr/
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rundeck_acl_policy
+
+short_description: Manage Rundeck ACL policies.
+description:
+ - Create, update and remove Rundeck ACL policies through HTTP API.
+author: "Loic Blot (@nerzhul)"
+options:
+ state:
+ type: str
+ description:
+ - Create or remove Rundeck project.
+ choices: ['present', 'absent']
+ default: 'present'
+ name:
+ type: str
+ description:
+ - Sets the project name.
+ required: True
+ url:
+ type: str
+ description:
+ - Sets the rundeck instance URL.
+ required: True
+ api_version:
+ type: int
+ description:
+ - Sets the API version used by module.
+ - API version must be at least 14.
+ default: 14
+ token:
+ type: str
+ description:
+ - Sets the token to authenticate against Rundeck API.
+ required: True
+ project:
+ type: str
+ description:
+ - Sets the project which receive the ACL policy.
+ - If unset, it's a system ACL policy.
+ policy:
+ type: str
+ description:
+ - Sets the ACL policy content.
+ - ACL policy content is a YAML object as described in http://rundeck.org/docs/man5/aclpolicy.html.
+ - It can be a YAML string or a pure Ansible inventory YAML object.
+ client_cert:
+ version_added: '0.2.0'
+ client_key:
+ version_added: '0.2.0'
+ force:
+ version_added: '0.2.0'
+ force_basic_auth:
+ version_added: '0.2.0'
+ http_agent:
+ version_added: '0.2.0'
+ url_password:
+ version_added: '0.2.0'
+ url_username:
+ version_added: '0.2.0'
+ use_proxy:
+ version_added: '0.2.0'
+ validate_certs:
+ version_added: '0.2.0'
+extends_documentation_fragment: url
+'''
+
+EXAMPLES = '''
+- name: Create or update a rundeck ACL policy in project Ansible
+ community.general.rundeck_acl_policy:
+ name: "Project_01"
+ api_version: 18
+ url: "https://rundeck.example.org"
+ token: "mytoken"
+ state: present
+ project: "Ansible"
+ policy:
+ description: "my policy"
+ context:
+ application: rundeck
+ for:
+ project:
+ - allow: read
+ by:
+ group: "build"
+
+- name: Remove a rundeck system policy
+ community.general.rundeck_acl_policy:
+ name: "Project_02"
+ url: "https://rundeck.example.org"
+ token: "mytoken"
+ state: absent
+'''
+
+RETURN = '''
+rundeck_response:
+ description: Rundeck response when a failure occurs.
+ returned: failed
+ type: str
+before:
+ description: Dictionary containing ACL policy informations before modification.
+ returned: success
+ type: dict
+after:
+ description: Dictionary containing ACL policy informations after modification.
+ returned: success
+ type: dict
+'''
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url, url_argument_spec
+from ansible.module_utils._text import to_text
+import json
+import re
+
+
+class RundeckACLManager:
+ def __init__(self, module):
+ self.module = module
+
+ def handle_http_code_if_needed(self, infos):
+ if infos["status"] == 403:
+ self.module.fail_json(msg="Token not allowed. Please ensure token is allowed or has the correct "
+ "permissions.", rundeck_response=infos["body"])
+ elif infos["status"] >= 500:
+ self.module.fail_json(msg="Fatal Rundeck API error.", rundeck_response=infos["body"])
+
+ def request_rundeck_api(self, query, data=None, method="GET"):
+ resp, info = fetch_url(self.module,
+ "%s/api/%d/%s" % (self.module.params["url"], self.module.params["api_version"], query),
+ data=json.dumps(data),
+ method=method,
+ headers={
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+ "X-Rundeck-Auth-Token": self.module.params["token"]
+ })
+
+ self.handle_http_code_if_needed(info)
+ if resp is not None:
+ resp = resp.read()
+ if resp != b"":
+ try:
+ json_resp = json.loads(to_text(resp, errors='surrogate_or_strict'))
+ return json_resp, info
+ except ValueError as e:
+ self.module.fail_json(msg="Rundeck response was not a valid JSON. Exception was: %s. "
+ "Object was: %s" % (str(e), resp))
+ return resp, info
+
+ def get_acl(self):
+ resp, info = self.request_rundeck_api("system/acl/%s.aclpolicy" % self.module.params["name"])
+ return resp
+
+ def create_or_update_acl(self):
+ facts = self.get_acl()
+ if facts is None:
+ # If in check mode don't create project, simulate a fake project creation
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, before={}, after=self.module.params["policy"])
+
+ _, info = self.request_rundeck_api("system/acl/%s.aclpolicy" % self.module.params["name"],
+ method="POST",
+ data={"contents": self.module.params["policy"]})
+
+ if info["status"] == 201:
+ self.module.exit_json(changed=True, before={}, after=self.get_acl())
+ elif info["status"] == 400:
+ self.module.fail_json(msg="Unable to validate acl %s. Please ensure it's a valid ACL" %
+ self.module.params["name"])
+ elif info["status"] == 409:
+ self.module.fail_json(msg="ACL %s already exists" % self.module.params["name"])
+ else:
+ self.module.fail_json(msg="Unhandled HTTP status %d, please report the bug" % info["status"],
+ before={}, after=self.get_acl())
+ else:
+ if facts["contents"] == self.module.params["policy"]:
+ self.module.exit_json(changed=False, before=facts, after=facts)
+
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, before=facts, after=facts)
+
+ _, info = self.request_rundeck_api("system/acl/%s.aclpolicy" % self.module.params["name"],
+ method="PUT",
+ data={"contents": self.module.params["policy"]})
+
+ if info["status"] == 200:
+ self.module.exit_json(changed=True, before=facts, after=self.get_acl())
+ elif info["status"] == 400:
+ self.module.fail_json(msg="Unable to validate acl %s. Please ensure it's a valid ACL" %
+ self.module.params["name"])
+ elif info["status"] == 404:
+ self.module.fail_json(msg="ACL %s doesn't exists. Cannot update." % self.module.params["name"])
+
+ def remove_acl(self):
+ facts = self.get_acl()
+ if facts is None:
+ self.module.exit_json(changed=False, before={}, after={})
+ else:
+ # If not in check mode, remove the project
+ if not self.module.check_mode:
+ self.request_rundeck_api("system/acl/%s.aclpolicy" % self.module.params["name"], method="DELETE")
+ self.module.exit_json(changed=True, before=facts, after={})
+
+
+def main():
+ # Also allow the user to set values for fetch_url
+ argument_spec = url_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ url=dict(required=True, type='str'),
+ api_version=dict(type='int', default=14),
+ token=dict(required=True, type='str', no_log=True),
+ policy=dict(type='str'),
+ project=dict(type='str'),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_if=[
+ ['state', 'present', ['policy']],
+ ],
+ supports_check_mode=True
+ )
+
+ if not bool(re.match("[a-zA-Z0-9,.+_-]+", module.params["name"])):
+ module.fail_json(msg="Name contains forbidden characters. The policy can contain the characters: a-zA-Z0-9,.+_-")
+
+ if module.params["api_version"] < 14:
+ module.fail_json(msg="API version should be at least 14")
+
+ rundeck = RundeckACLManager(module)
+ if module.params['state'] == 'present':
+ rundeck.create_or_update_acl()
+ elif module.params['state'] == 'absent':
+ rundeck.remove_acl()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/rundeck_project.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rundeck_project.py
new file mode 100644
index 00000000..5c846482
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/rundeck_project.py
@@ -0,0 +1,207 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Ansible module to manage rundeck projects
+# (c) 2017, Loic Blot <loic.blot@unix-experience.fr>
+# Sponsored by Infopro Digital. http://www.infopro-digital.com/
+# Sponsored by E.T.A.I. http://www.etai.fr/
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rundeck_project
+
+short_description: Manage Rundeck projects.
+description:
+ - Create and remove Rundeck projects through HTTP API.
+author: "Loic Blot (@nerzhul)"
+options:
+ state:
+ type: str
+ description:
+ - Create or remove Rundeck project.
+ choices: ['present', 'absent']
+ default: 'present'
+ name:
+ type: str
+ description:
+ - Sets the project name.
+ required: True
+ url:
+ type: str
+ description:
+ - Sets the rundeck instance URL.
+ required: True
+ api_version:
+ type: int
+ description:
+ - Sets the API version used by module.
+ - API version must be at least 14.
+ default: 14
+ token:
+ type: str
+ description:
+ - Sets the token to authenticate against Rundeck API.
+ required: True
+ client_cert:
+ version_added: '0.2.0'
+ client_key:
+ version_added: '0.2.0'
+ force:
+ version_added: '0.2.0'
+ force_basic_auth:
+ version_added: '0.2.0'
+ http_agent:
+ version_added: '0.2.0'
+ url_password:
+ version_added: '0.2.0'
+ url_username:
+ version_added: '0.2.0'
+ use_proxy:
+ version_added: '0.2.0'
+ validate_certs:
+ version_added: '0.2.0'
+extends_documentation_fragment: url
+'''
+
+EXAMPLES = '''
+- name: Create a rundeck project
+ community.general.rundeck_project:
+ name: "Project_01"
+ api_version: 18
+ url: "https://rundeck.example.org"
+ token: "mytoken"
+ state: present
+
+- name: Remove a rundeck project
+ community.general.rundeck_project:
+ name: "Project_02"
+ url: "https://rundeck.example.org"
+ token: "mytoken"
+ state: absent
+'''
+
+RETURN = '''
+rundeck_response:
+ description: Rundeck response when a failure occurs
+ returned: failed
+ type: str
+before:
+ description: dictionary containing project information before modification
+ returned: success
+ type: dict
+after:
+ description: dictionary containing project information after modification
+ returned: success
+ type: dict
+'''
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import fetch_url, url_argument_spec
+import json
+
+
+class RundeckProjectManager(object):
+ def __init__(self, module):
+ self.module = module
+
+ def handle_http_code_if_needed(self, infos):
+ if infos["status"] == 403:
+ self.module.fail_json(msg="Token not allowed. Please ensure token is allowed or has the correct "
+ "permissions.", rundeck_response=infos["body"])
+ elif infos["status"] >= 500:
+ self.module.fail_json(msg="Fatal Rundeck API error.", rundeck_response=infos["body"])
+
+ def request_rundeck_api(self, query, data=None, method="GET"):
+ resp, info = fetch_url(self.module,
+ "%s/api/%d/%s" % (self.module.params["url"], self.module.params["api_version"], query),
+ data=json.dumps(data),
+ method=method,
+ headers={
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+ "X-Rundeck-Auth-Token": self.module.params["token"]
+ })
+
+ self.handle_http_code_if_needed(info)
+ if resp is not None:
+ resp = resp.read()
+ if resp != "":
+ try:
+ json_resp = json.loads(resp)
+ return json_resp, info
+ except ValueError as e:
+ self.module.fail_json(msg="Rundeck response was not a valid JSON. Exception was: %s. "
+ "Object was: %s" % (to_native(e), resp))
+ return resp, info
+
+ def get_project_facts(self):
+ resp, info = self.request_rundeck_api("project/%s" % self.module.params["name"])
+ return resp
+
+ def create_or_update_project(self):
+ facts = self.get_project_facts()
+ if facts is None:
+ # If in check mode don't create project, simulate a fake project creation
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, before={}, after={"name": self.module.params["name"]})
+
+ resp, info = self.request_rundeck_api("projects", method="POST", data={
+ "name": self.module.params["name"],
+ "config": {}
+ })
+
+ if info["status"] == 201:
+ self.module.exit_json(changed=True, before={}, after=self.get_project_facts())
+ else:
+ self.module.fail_json(msg="Unhandled HTTP status %d, please report the bug" % info["status"],
+ before={}, after=self.get_project_facts())
+ else:
+ self.module.exit_json(changed=False, before=facts, after=facts)
+
+ def remove_project(self):
+ facts = self.get_project_facts()
+ if facts is None:
+ self.module.exit_json(changed=False, before={}, after={})
+ else:
+ # If not in check mode, remove the project
+ if not self.module.check_mode:
+ self.request_rundeck_api("project/%s" % self.module.params["name"], method="DELETE")
+ self.module.exit_json(changed=True, before=facts, after={})
+
+
+def main():
+ # Also allow the user to set values for fetch_url
+ argument_spec = url_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ url=dict(required=True, type='str'),
+ api_version=dict(type='int', default=14),
+ token=dict(required=True, type='str', no_log=True),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ if module.params["api_version"] < 14:
+ module.fail_json(msg="API version should be at least 14")
+
+ rundeck = RundeckProjectManager(module)
+ if module.params['state'] == 'present':
+ rundeck.create_or_update_project()
+ elif module.params['state'] == 'absent':
+ rundeck.remove_project()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/runit.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/runit.py
new file mode 100644
index 00000000..b80ed8cb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/runit.py
@@ -0,0 +1,278 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Brian Coca <bcoca@ansible.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: runit
+author:
+- James Sumners (@jsumners)
+short_description: Manage runit services
+description:
+ - Controls runit services on remote hosts using the sv utility.
+options:
+ name:
+ description:
+ - Name of the service to manage.
+ type: str
+ required: yes
+ state:
+ description:
+ - C(started)/C(stopped) are idempotent actions that will not run
+ commands unless necessary. C(restarted) will always bounce the
+ service (sv restart) and C(killed) will always bounce the service (sv force-stop).
+ C(reloaded) will send a HUP (sv reload).
+ C(once) will run a normally downed sv once (sv once), not really
+ an idempotent operation.
+ type: str
+ choices: [ killed, once, reloaded, restarted, started, stopped ]
+ enabled:
+ description:
+ - Whether the service is enabled or not, if disabled it also implies stopped.
+ type: bool
+ service_dir:
+ description:
+ - directory runsv watches for services
+ type: str
+ default: /var/service
+ service_src:
+ description:
+ - directory where services are defined, the source of symlinks to service_dir.
+ type: str
+ default: /etc/sv
+'''
+
+EXAMPLES = r'''
+- name: Start sv dnscache, if not running
+ community.general.runit:
+ name: dnscache
+ state: started
+
+- name: Stop sv dnscache, if running
+ community.general.runit:
+ name: dnscache
+ state: stopped
+
+- name: Kill sv dnscache, in all cases
+ community.general.runit:
+ name: dnscache
+ state: killed
+
+- name: Restart sv dnscache, in all cases
+ community.general.runit:
+ name: dnscache
+ state: restarted
+
+- name: Reload sv dnscache, in all cases
+ community.general.runit:
+ name: dnscache
+ state: reloaded
+
+- name: Use alternative sv directory location
+ community.general.runit:
+ name: dnscache
+ state: reloaded
+ service_dir: /run/service
+'''
+
+import os
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def _load_dist_subclass(cls, *args, **kwargs): # @FIXME remove unused function?
+ '''
+ Used for derivative implementations
+ '''
+ subclass = None
+
+ distro = kwargs['module'].params['distro']
+
+ # get the most specific superclass for this platform
+ if distro is not None:
+ for sc in cls.__subclasses__():
+ if sc.distro is not None and sc.distro == distro:
+ subclass = sc
+ if subclass is None:
+ subclass = cls
+
+ return super(cls, subclass).__new__(subclass)
+
+
+class Sv(object):
+ """
+ Main class that handles daemontools, can be subclassed and overridden in case
+ we want to use a 'derivative' like encore, s6, etc
+ """
+
+ # def __new__(cls, *args, **kwargs):
+ # return _load_dist_subclass(cls, args, kwargs)
+
+ def __init__(self, module):
+ self.extra_paths = []
+ self.report_vars = ['state', 'enabled', 'svc_full', 'src_full', 'pid', 'duration', 'full_state']
+
+ self.module = module
+
+ self.name = module.params['name']
+ self.service_dir = module.params['service_dir']
+ self.service_src = module.params['service_src']
+ self.enabled = None
+ self.full_state = None
+ self.state = None
+ self.pid = None
+ self.duration = None
+
+ self.svc_cmd = module.get_bin_path('sv', opt_dirs=self.extra_paths, required=True)
+ self.svstat_cmd = module.get_bin_path('sv', opt_dirs=self.extra_paths)
+ self.svc_full = '/'.join([self.service_dir, self.name])
+ self.src_full = '/'.join([self.service_src, self.name])
+
+ self.enabled = os.path.lexists(self.svc_full)
+ if self.enabled:
+ self.get_status()
+ else:
+ self.state = 'stopped'
+
+ def enable(self):
+ if os.path.exists(self.src_full):
+ try:
+ os.symlink(self.src_full, self.svc_full)
+ except OSError as e:
+ self.module.fail_json(path=self.src_full, msg='Error while linking: %s' % to_native(e))
+ else:
+ self.module.fail_json(msg="Could not find source for service to enable (%s)." % self.src_full)
+
+ def disable(self):
+ self.execute_command([self.svc_cmd, 'force-stop', self.src_full])
+ try:
+ os.unlink(self.svc_full)
+ except OSError as e:
+ self.module.fail_json(path=self.svc_full, msg='Error while unlinking: %s' % to_native(e))
+
+ def get_status(self):
+ (rc, out, err) = self.execute_command([self.svstat_cmd, 'status', self.svc_full])
+
+ if err is not None and err:
+ self.full_state = self.state = err
+ else:
+ self.full_state = out
+ # full_state *may* contain information about the logger:
+ # "down: /etc/service/service-without-logger: 1s, normally up\n"
+ # "down: /etc/service/updater: 127s, normally up; run: log: (pid 364) 263439s\n"
+ full_state_no_logger = self.full_state.split("; ")[0]
+
+ m = re.search(r'\(pid (\d+)\)', full_state_no_logger)
+ if m:
+ self.pid = m.group(1)
+
+ m = re.search(r' (\d+)s', full_state_no_logger)
+ if m:
+ self.duration = m.group(1)
+
+ if re.search(r'^run:', full_state_no_logger):
+ self.state = 'started'
+ elif re.search(r'^down:', full_state_no_logger):
+ self.state = 'stopped'
+ else:
+ self.state = 'unknown'
+ return
+
+ def started(self):
+ return self.start()
+
+ def start(self):
+ return self.execute_command([self.svc_cmd, 'start', self.svc_full])
+
+ def stopped(self):
+ return self.stop()
+
+ def stop(self):
+ return self.execute_command([self.svc_cmd, 'stop', self.svc_full])
+
+ def once(self):
+ return self.execute_command([self.svc_cmd, 'once', self.svc_full])
+
+ def reloaded(self):
+ return self.reload()
+
+ def reload(self):
+ return self.execute_command([self.svc_cmd, 'reload', self.svc_full])
+
+ def restarted(self):
+ return self.restart()
+
+ def restart(self):
+ return self.execute_command([self.svc_cmd, 'restart', self.svc_full])
+
+ def killed(self):
+ return self.kill()
+
+ def kill(self):
+ return self.execute_command([self.svc_cmd, 'force-stop', self.svc_full])
+
+ def execute_command(self, cmd):
+ try:
+ (rc, out, err) = self.module.run_command(' '.join(cmd))
+ except Exception as e:
+ self.module.fail_json(msg="failed to execute: %s" % to_native(e))
+ return (rc, out, err)
+
+ def report(self):
+ self.get_status()
+ states = {}
+ for k in self.report_vars:
+ states[k] = self.__dict__[k]
+ return states
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', choices=['killed', 'once', 'reloaded', 'restarted', 'started', 'stopped']),
+ enabled=dict(type='bool'),
+ dist=dict(type='str', default='runit'), # @FIXME unused param?
+ service_dir=dict(type='str', default='/var/service'),
+ service_src=dict(type='str', default='/etc/sv'),
+ ),
+ supports_check_mode=True,
+ )
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ state = module.params['state']
+ enabled = module.params['enabled']
+
+ sv = Sv(module)
+ changed = False
+ orig_state = sv.report()
+
+ if enabled is not None and enabled != sv.enabled:
+ changed = True
+ if not module.check_mode:
+ try:
+ if enabled:
+ sv.enable()
+ else:
+ sv.disable()
+ except (OSError, IOError) as e:
+ module.fail_json(msg="Could not change service link: %s" % to_native(e))
+
+ if state is not None and state != sv.state:
+ changed = True
+ if not module.check_mode:
+ getattr(sv, state)()
+
+ module.exit_json(changed=changed, sv=sv.report())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/say.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/say.py
new file mode 100644
index 00000000..1c66adf6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/say.py
@@ -0,0 +1,91 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Michael DeHaan <michael@ansible.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: say
+short_description: Makes a computer to speak.
+description:
+ - makes a computer speak! Amuse your friends, annoy your coworkers!
+notes:
+ - In 2.5, this module has been renamed from C(osx_say) to M(community.general.say).
+ - If you like this module, you may also be interested in the osx_say callback plugin.
+ - A list of available voices, with language, can be found by running C(say -v ?) on a OSX host and C(espeak --voices) on a Linux host.
+options:
+ msg:
+ type: str
+ description:
+ What to say
+ required: true
+ voice:
+ type: str
+ description:
+ What voice to use
+ required: false
+requirements: [ say or espeak or espeak-ng ]
+author:
+ - "Ansible Core Team"
+ - "Michael DeHaan (@mpdehaan)"
+'''
+
+EXAMPLES = '''
+- name: Makes a computer to speak
+ community.general.say:
+ msg: '{{ inventory_hostname }} is all done'
+ voice: Zarvox
+ delegate_to: localhost
+'''
+import platform
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def say(module, executable, msg, voice):
+ cmd = [executable, msg]
+ if voice:
+ cmd.extend(('-v', voice))
+ module.run_command(cmd, check_rc=True)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ msg=dict(required=True),
+ voice=dict(required=False),
+ ),
+ supports_check_mode=True
+ )
+
+ msg = module.params['msg']
+ voice = module.params['voice']
+ possibles = ('say', 'espeak', 'espeak-ng')
+
+ if platform.system() != 'Darwin':
+ # 'say' binary available, it might be GNUstep tool which doesn't support 'voice' parameter
+ voice = None
+
+ for possible in possibles:
+ executable = module.get_bin_path(possible)
+ if executable:
+ break
+ else:
+ module.fail_json(msg='Unable to find either %s' % ', '.join(possibles))
+
+ if module.check_mode:
+ module.exit_json(msg=msg, changed=False)
+
+ say(module, executable, msg, voice)
+
+ module.exit_json(msg=msg, changed=True)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_compute.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_compute.py
new file mode 100644
index 00000000..8df9a5e6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_compute.py
@@ -0,0 +1,671 @@
+#!/usr/bin/python
+#
+# Scaleway Compute management module
+#
+# Copyright (C) 2018 Online SAS.
+# https://www.scaleway.com
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_compute
+short_description: Scaleway compute management module
+author: Remy Leone (@sieben)
+description:
+ - "This module manages compute instances on Scaleway."
+extends_documentation_fragment:
+- community.general.scaleway
+
+
+options:
+
+ public_ip:
+ type: str
+ description:
+ - Manage public IP on a Scaleway server
+ - Could be Scaleway IP address UUID
+ - C(dynamic) Means that IP is destroyed at the same time the host is destroyed
+ - C(absent) Means no public IP at all
+ default: absent
+
+ enable_ipv6:
+ description:
+ - Enable public IPv6 connectivity on the instance
+ default: false
+ type: bool
+
+ image:
+ type: str
+ description:
+ - Image identifier used to start the instance with
+ required: true
+
+ name:
+ type: str
+ description:
+ - Name of the instance
+
+ organization:
+ type: str
+ description:
+ - Organization identifier
+ required: true
+
+ state:
+ type: str
+ description:
+ - Indicate desired state of the instance.
+ default: present
+ choices:
+ - present
+ - absent
+ - running
+ - restarted
+ - stopped
+
+ tags:
+ type: list
+ description:
+ - List of tags to apply to the instance (5 max)
+ required: false
+ default: []
+
+ region:
+ type: str
+ description:
+ - Scaleway compute zone
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+
+ commercial_type:
+ type: str
+ description:
+ - Commercial name of the compute node
+ required: true
+
+ wait:
+ description:
+ - Wait for the instance to reach its desired state before returning.
+ type: bool
+ default: 'no'
+
+ wait_timeout:
+ type: int
+ description:
+ - Time to wait for the server to reach the expected state
+ required: false
+ default: 300
+
+ wait_sleep_time:
+ type: int
+ description:
+ - Time to wait before every attempt to check the state of the server
+ required: false
+ default: 3
+
+ security_group:
+ type: str
+ description:
+ - Security group unique identifier
+ - If no value provided, the default security group or current security group will be used
+ required: false
+'''
+
+EXAMPLES = '''
+- name: Create a server
+ community.general.scaleway_compute:
+ name: foobar
+ state: present
+ image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe
+ organization: 951df375-e094-4d26-97c1-ba548eeb9c42
+ region: ams1
+ commercial_type: VC1S
+ tags:
+ - test
+ - www
+
+- name: Create a server attached to a security group
+ community.general.scaleway_compute:
+ name: foobar
+ state: present
+ image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe
+ organization: 951df375-e094-4d26-97c1-ba548eeb9c42
+ region: ams1
+ commercial_type: VC1S
+ security_group: 4a31b633-118e-4900-bd52-facf1085fc8d
+ tags:
+ - test
+ - www
+
+- name: Destroy it right after
+ community.general.scaleway_compute:
+ name: foobar
+ state: absent
+ image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe
+ organization: 951df375-e094-4d26-97c1-ba548eeb9c42
+ region: ams1
+ commercial_type: VC1S
+'''
+
+RETURN = '''
+'''
+
+import datetime
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import quote as urlquote
+from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway
+
+SCALEWAY_SERVER_STATES = (
+ 'stopped',
+ 'stopping',
+ 'starting',
+ 'running',
+ 'locked'
+)
+
+SCALEWAY_TRANSITIONS_STATES = (
+ "stopping",
+ "starting",
+ "pending"
+)
+
+
+def check_image_id(compute_api, image_id):
+ response = compute_api.get(path="images/%s" % image_id)
+
+ if not response.ok:
+ msg = 'Error in getting image %s on %s : %s' % (image_id, compute_api.module.params.get('api_url'), response.json)
+ compute_api.module.fail_json(msg=msg)
+
+
+def fetch_state(compute_api, server):
+ compute_api.module.debug("fetch_state of server: %s" % server["id"])
+ response = compute_api.get(path="servers/%s" % server["id"])
+
+ if response.status_code == 404:
+ return "absent"
+
+ if not response.ok:
+ msg = 'Error during state fetching: (%s) %s' % (response.status_code, response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ try:
+ compute_api.module.debug("Server %s in state: %s" % (server["id"], response.json["server"]["state"]))
+ return response.json["server"]["state"]
+ except KeyError:
+ compute_api.module.fail_json(msg="Could not fetch state in %s" % response.json)
+
+
+def wait_to_complete_state_transition(compute_api, server, wait=None):
+ if wait is None:
+ wait = compute_api.module.params["wait"]
+ if not wait:
+ return
+
+ wait_timeout = compute_api.module.params["wait_timeout"]
+ wait_sleep_time = compute_api.module.params["wait_sleep_time"]
+
+ start = datetime.datetime.utcnow()
+ end = start + datetime.timedelta(seconds=wait_timeout)
+ while datetime.datetime.utcnow() < end:
+ compute_api.module.debug("We are going to wait for the server to finish its transition")
+ if fetch_state(compute_api, server) not in SCALEWAY_TRANSITIONS_STATES:
+ compute_api.module.debug("It seems that the server is not in transition anymore.")
+ compute_api.module.debug("Server in state: %s" % fetch_state(compute_api, server))
+ break
+ time.sleep(wait_sleep_time)
+ else:
+ compute_api.module.fail_json(msg="Server takes too long to finish its transition")
+
+
+def public_ip_payload(compute_api, public_ip):
+ # We don't want a public ip
+ if public_ip in ("absent",):
+ return {"dynamic_ip_required": False}
+
+ # IP is only attached to the instance and is released as soon as the instance terminates
+ if public_ip in ("dynamic", "allocated"):
+ return {"dynamic_ip_required": True}
+
+ # We check that the IP we want to attach exists, if so its ID is returned
+ response = compute_api.get("ips")
+ if not response.ok:
+ msg = 'Error during public IP validation: (%s) %s' % (response.status_code, response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ ip_list = []
+ try:
+ ip_list = response.json["ips"]
+ except KeyError:
+ compute_api.module.fail_json(msg="Error in getting the IP information from: %s" % response.json)
+
+ lookup = [ip["id"] for ip in ip_list]
+ if public_ip in lookup:
+ return {"public_ip": public_ip}
+
+
+def create_server(compute_api, server):
+ compute_api.module.debug("Starting a create_server")
+ target_server = None
+ data = {"enable_ipv6": server["enable_ipv6"],
+ "tags": server["tags"],
+ "commercial_type": server["commercial_type"],
+ "image": server["image"],
+ "dynamic_ip_required": server["dynamic_ip_required"],
+ "name": server["name"],
+ "organization": server["organization"]
+ }
+
+ if server["security_group"]:
+ data["security_group"] = server["security_group"]
+
+ response = compute_api.post(path="servers", data=data)
+
+ if not response.ok:
+ msg = 'Error during server creation: (%s) %s' % (response.status_code, response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ try:
+ target_server = response.json["server"]
+ except KeyError:
+ compute_api.module.fail_json(msg="Error in getting the server information from: %s" % response.json)
+
+ wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
+
+ return target_server
+
+
+def restart_server(compute_api, server):
+ return perform_action(compute_api=compute_api, server=server, action="reboot")
+
+
+def stop_server(compute_api, server):
+ return perform_action(compute_api=compute_api, server=server, action="poweroff")
+
+
+def start_server(compute_api, server):
+ return perform_action(compute_api=compute_api, server=server, action="poweron")
+
+
+def perform_action(compute_api, server, action):
+ response = compute_api.post(path="servers/%s/action" % server["id"],
+ data={"action": action})
+ if not response.ok:
+ msg = 'Error during server %s: (%s) %s' % (action, response.status_code, response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ wait_to_complete_state_transition(compute_api=compute_api, server=server)
+
+ return response
+
+
+def remove_server(compute_api, server):
+ compute_api.module.debug("Starting remove server strategy")
+ response = compute_api.delete(path="servers/%s" % server["id"])
+ if not response.ok:
+ msg = 'Error during server deletion: (%s) %s' % (response.status_code, response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ wait_to_complete_state_transition(compute_api=compute_api, server=server)
+
+ return response
+
+
+def present_strategy(compute_api, wished_server):
+ compute_api.module.debug("Starting present strategy")
+ changed = False
+ query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1)
+
+ if not query_results:
+ changed = True
+ if compute_api.module.check_mode:
+ return changed, {"status": "A server would be created."}
+
+ target_server = create_server(compute_api=compute_api, server=wished_server)
+ else:
+ target_server = query_results[0]
+
+ if server_attributes_should_be_changed(compute_api=compute_api, target_server=target_server,
+ wished_server=wished_server):
+ changed = True
+
+ if compute_api.module.check_mode:
+ return changed, {"status": "Server %s attributes would be changed." % target_server["id"]}
+
+ target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server)
+
+ return changed, target_server
+
+
+def absent_strategy(compute_api, wished_server):
+ compute_api.module.debug("Starting absent strategy")
+ changed = False
+ target_server = None
+ query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1)
+
+ if not query_results:
+ return changed, {"status": "Server already absent."}
+ else:
+ target_server = query_results[0]
+
+ changed = True
+
+ if compute_api.module.check_mode:
+ return changed, {"status": "Server %s would be made absent." % target_server["id"]}
+
+ # A server MUST be stopped to be deleted.
+ while fetch_state(compute_api=compute_api, server=target_server) != "stopped":
+ wait_to_complete_state_transition(compute_api=compute_api, server=target_server, wait=True)
+ response = stop_server(compute_api=compute_api, server=target_server)
+
+ if not response.ok:
+ err_msg = 'Error while stopping a server before removing it [{0}: {1}]'.format(response.status_code,
+ response.json)
+ compute_api.module.fail_json(msg=err_msg)
+
+ wait_to_complete_state_transition(compute_api=compute_api, server=target_server, wait=True)
+
+ response = remove_server(compute_api=compute_api, server=target_server)
+
+ if not response.ok:
+ err_msg = 'Error while removing server [{0}: {1}]'.format(response.status_code, response.json)
+ compute_api.module.fail_json(msg=err_msg)
+
+ return changed, {"status": "Server %s deleted" % target_server["id"]}
+
+
+def running_strategy(compute_api, wished_server):
+ compute_api.module.debug("Starting running strategy")
+ changed = False
+ query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1)
+
+ if not query_results:
+ changed = True
+ if compute_api.module.check_mode:
+ return changed, {"status": "A server would be created before being run."}
+
+ target_server = create_server(compute_api=compute_api, server=wished_server)
+ else:
+ target_server = query_results[0]
+
+ if server_attributes_should_be_changed(compute_api=compute_api, target_server=target_server,
+ wished_server=wished_server):
+ changed = True
+
+ if compute_api.module.check_mode:
+ return changed, {"status": "Server %s attributes would be changed before running it." % target_server["id"]}
+
+ target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server)
+
+ current_state = fetch_state(compute_api=compute_api, server=target_server)
+ if current_state not in ("running", "starting"):
+ compute_api.module.debug("running_strategy: Server in state: %s" % current_state)
+ changed = True
+
+ if compute_api.module.check_mode:
+ return changed, {"status": "Server %s attributes would be changed." % target_server["id"]}
+
+ response = start_server(compute_api=compute_api, server=target_server)
+ if not response.ok:
+ msg = 'Error while running server [{0}: {1}]'.format(response.status_code, response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ return changed, target_server
+
+
+def stop_strategy(compute_api, wished_server):
+ compute_api.module.debug("Starting stop strategy")
+ query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1)
+
+ changed = False
+
+ if not query_results:
+
+ if compute_api.module.check_mode:
+ return changed, {"status": "A server would be created before being stopped."}
+
+ target_server = create_server(compute_api=compute_api, server=wished_server)
+ changed = True
+ else:
+ target_server = query_results[0]
+
+ compute_api.module.debug("stop_strategy: Servers are found.")
+
+ if server_attributes_should_be_changed(compute_api=compute_api, target_server=target_server,
+ wished_server=wished_server):
+ changed = True
+
+ if compute_api.module.check_mode:
+ return changed, {
+ "status": "Server %s attributes would be changed before stopping it." % target_server["id"]}
+
+ target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server)
+
+ wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
+
+ current_state = fetch_state(compute_api=compute_api, server=target_server)
+ if current_state not in ("stopped",):
+ compute_api.module.debug("stop_strategy: Server in state: %s" % current_state)
+
+ changed = True
+
+ if compute_api.module.check_mode:
+ return changed, {"status": "Server %s would be stopped." % target_server["id"]}
+
+ response = stop_server(compute_api=compute_api, server=target_server)
+ compute_api.module.debug(response.json)
+ compute_api.module.debug(response.ok)
+
+ if not response.ok:
+ msg = 'Error while stopping server [{0}: {1}]'.format(response.status_code, response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ return changed, target_server
+
+
+def restart_strategy(compute_api, wished_server):
+ compute_api.module.debug("Starting restart strategy")
+ changed = False
+ query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1)
+
+ if not query_results:
+ changed = True
+ if compute_api.module.check_mode:
+ return changed, {"status": "A server would be created before being rebooted."}
+
+ target_server = create_server(compute_api=compute_api, server=wished_server)
+ else:
+ target_server = query_results[0]
+
+ if server_attributes_should_be_changed(compute_api=compute_api,
+ target_server=target_server,
+ wished_server=wished_server):
+ changed = True
+
+ if compute_api.module.check_mode:
+ return changed, {
+ "status": "Server %s attributes would be changed before rebooting it." % target_server["id"]}
+
+ target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server)
+
+ changed = True
+ if compute_api.module.check_mode:
+ return changed, {"status": "Server %s would be rebooted." % target_server["id"]}
+
+ wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
+
+ if fetch_state(compute_api=compute_api, server=target_server) in ("running",):
+ response = restart_server(compute_api=compute_api, server=target_server)
+ wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
+ if not response.ok:
+ msg = 'Error while restarting server that was running [{0}: {1}].'.format(response.status_code,
+ response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ if fetch_state(compute_api=compute_api, server=target_server) in ("stopped",):
+ response = restart_server(compute_api=compute_api, server=target_server)
+ wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
+ if not response.ok:
+ msg = 'Error while restarting server that was stopped [{0}: {1}].'.format(response.status_code,
+ response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ return changed, target_server
+
+
+state_strategy = {
+ "present": present_strategy,
+ "restarted": restart_strategy,
+ "stopped": stop_strategy,
+ "running": running_strategy,
+ "absent": absent_strategy
+}
+
+
+def find(compute_api, wished_server, per_page=1):
+ compute_api.module.debug("Getting inside find")
+ # Only the name attribute is accepted in the Compute query API
+ response = compute_api.get("servers", params={"name": wished_server["name"],
+ "per_page": per_page})
+
+ if not response.ok:
+ msg = 'Error during server search: (%s) %s' % (response.status_code, response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ search_results = response.json["servers"]
+
+ return search_results
+
+
+PATCH_MUTABLE_SERVER_ATTRIBUTES = (
+ "ipv6",
+ "tags",
+ "name",
+ "dynamic_ip_required",
+ "security_group",
+)
+
+
+def server_attributes_should_be_changed(compute_api, target_server, wished_server):
+ compute_api.module.debug("Checking if server attributes should be changed")
+ compute_api.module.debug("Current Server: %s" % target_server)
+ compute_api.module.debug("Wished Server: %s" % wished_server)
+ debug_dict = dict((x, (target_server[x], wished_server[x]))
+ for x in PATCH_MUTABLE_SERVER_ATTRIBUTES
+ if x in target_server and x in wished_server)
+ compute_api.module.debug("Debug dict %s" % debug_dict)
+ try:
+ for key in PATCH_MUTABLE_SERVER_ATTRIBUTES:
+ if key in target_server and key in wished_server:
+ # When you are working with dict, only ID matter as we ask user to put only the resource ID in the playbook
+ if isinstance(target_server[key], dict) and wished_server[key] and "id" in target_server[key].keys(
+ ) and target_server[key]["id"] != wished_server[key]:
+ return True
+ # Handling other structure compare simply the two objects content
+ elif not isinstance(target_server[key], dict) and target_server[key] != wished_server[key]:
+ return True
+ return False
+ except AttributeError:
+ compute_api.module.fail_json(msg="Error while checking if attributes should be changed")
+
+
+def server_change_attributes(compute_api, target_server, wished_server):
+ compute_api.module.debug("Starting patching server attributes")
+ patch_payload = dict()
+
+ for key in PATCH_MUTABLE_SERVER_ATTRIBUTES:
+ if key in target_server and key in wished_server:
+ # When you are working with dict, only ID matter as we ask user to put only the resource ID in the playbook
+ if isinstance(target_server[key], dict) and "id" in target_server[key] and wished_server[key]:
+ # Setting all key to current value except ID
+ key_dict = dict((x, target_server[key][x]) for x in target_server[key].keys() if x != "id")
+ # Setting ID to the user specified ID
+ key_dict["id"] = wished_server[key]
+ patch_payload[key] = key_dict
+ elif not isinstance(target_server[key], dict):
+ patch_payload[key] = wished_server[key]
+
+ response = compute_api.patch(path="servers/%s" % target_server["id"],
+ data=patch_payload)
+ if not response.ok:
+ msg = 'Error during server attributes patching: (%s) %s' % (response.status_code, response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ try:
+ target_server = response.json["server"]
+ except KeyError:
+ compute_api.module.fail_json(msg="Error in getting the server information from: %s" % response.json)
+
+ wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
+
+ return target_server
+
+
+def core(module):
+ region = module.params["region"]
+ wished_server = {
+ "state": module.params["state"],
+ "image": module.params["image"],
+ "name": module.params["name"],
+ "commercial_type": module.params["commercial_type"],
+ "enable_ipv6": module.params["enable_ipv6"],
+ "tags": module.params["tags"],
+ "organization": module.params["organization"],
+ "security_group": module.params["security_group"]
+ }
+ module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+ compute_api = Scaleway(module=module)
+
+ check_image_id(compute_api, wished_server["image"])
+
+ # IP parameters of the wished server depends on the configuration
+ ip_payload = public_ip_payload(compute_api=compute_api, public_ip=module.params["public_ip"])
+ wished_server.update(ip_payload)
+
+ changed, summary = state_strategy[wished_server["state"]](compute_api=compute_api, wished_server=wished_server)
+ module.exit_json(changed=changed, msg=summary)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ image=dict(required=True),
+ name=dict(),
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ commercial_type=dict(required=True),
+ enable_ipv6=dict(default=False, type="bool"),
+ public_ip=dict(default="absent"),
+ state=dict(choices=list(state_strategy.keys()), default='present'),
+ tags=dict(type="list", default=[]),
+ organization=dict(required=True),
+ wait=dict(type="bool", default=False),
+ wait_timeout=dict(type="int", default=300),
+ wait_sleep_time=dict(type="int", default=3),
+ security_group=dict(),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_database_backup.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_database_backup.py
new file mode 100644
index 00000000..57803245
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_database_backup.py
@@ -0,0 +1,371 @@
+#!/usr/bin/python
+#
+# Scaleway database backups management module
+#
+# Copyright (C) 2020 Guillaume Rodriguez (g.rodriguez@opendecide.com).
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_database_backup
+short_description: Scaleway database backups management module
+version_added: 1.2.0
+author: Guillaume Rodriguez (@guillaume_ro_fr)
+description:
+ - This module manages database backups on Scaleway account U(https://developer.scaleway.com).
+extends_documentation_fragment:
+ - community.general.scaleway
+options:
+ state:
+ description:
+ - Indicate desired state of the database backup.
+ - C(present) creates a backup.
+ - C(absent) deletes the backup.
+ - C(exported) creates a download link for the backup.
+ - C(restored) restores the backup to a new database.
+ type: str
+ default: present
+ choices:
+ - present
+ - absent
+ - exported
+ - restored
+
+ region:
+ description:
+ - Scaleway region to use (for example C(fr-par)).
+ type: str
+ required: true
+ choices:
+ - fr-par
+ - nl-ams
+ - pl-waw
+
+ id:
+ description:
+ - UUID used to identify the database backup.
+ - Required for C(absent), C(exported) and C(restored) states.
+ type: str
+
+ name:
+ description:
+ - Name used to identify the database backup.
+ - Required for C(present) state.
+ - Ignored when C(state=absent), C(state=exported) or C(state=restored).
+ type: str
+ required: false
+
+ database_name:
+ description:
+ - Name used to identify the database.
+ - Required for C(present) and C(restored) states.
+ - Ignored when C(state=absent) or C(state=exported).
+ type: str
+ required: false
+
+ instance_id:
+ description:
+ - UUID of the instance associated to the database backup.
+ - Required for C(present) and C(restored) states.
+ - Ignored when C(state=absent) or C(state=exported).
+ type: str
+ required: false
+
+ expires_at:
+ description:
+ - Expiration datetime of the database backup (ISO 8601 format).
+ - Ignored when C(state=absent), C(state=exported) or C(state=restored).
+ type: str
+ required: false
+
+ wait:
+ description:
+ - Wait for the instance to reach its desired state before returning.
+ type: bool
+ default: false
+
+ wait_timeout:
+ description:
+ - Time to wait for the backup to reach the expected state.
+ type: int
+ required: false
+ default: 300
+
+ wait_sleep_time:
+ description:
+ - Time to wait before every attempt to check the state of the backup.
+ type: int
+ required: false
+ default: 3
+'''
+
+EXAMPLES = '''
+ - name: Create a backup
+ community.general.scaleway_database_backup:
+ name: 'my_backup'
+ state: present
+ region: 'fr-par'
+ database_name: 'my-database'
+ instance_id: '50968a80-2909-4e5c-b1af-a2e19860dddb'
+
+ - name: Export a backup
+ community.general.scaleway_database_backup:
+ id: '6ef1125a-037e-494f-a911-6d9c49a51691'
+ state: exported
+ region: 'fr-par'
+
+ - name: Restore a backup
+ community.general.scaleway_database_backup:
+ id: '6ef1125a-037e-494f-a911-6d9c49a51691'
+ state: restored
+ region: 'fr-par'
+ database_name: 'my-new-database'
+ instance_id: '50968a80-2909-4e5c-b1af-a2e19860dddb'
+
+ - name: Remove a backup
+ community.general.scaleway_database_backup:
+ id: '6ef1125a-037e-494f-a911-6d9c49a51691'
+ state: absent
+ region: 'fr-par'
+'''
+
+RETURN = '''
+metadata:
+ description: Backup metadata.
+ returned: when C(state=present), C(state=exported) or C(state=restored)
+ type: dict
+ sample: {
+ "metadata": {
+ "created_at": "2020-08-06T12:42:05.631049Z",
+ "database_name": "my-database",
+ "download_url": null,
+ "download_url_expires_at": null,
+ "expires_at": null,
+ "id": "a15297bd-0c4a-4b4f-8fbb-b36a35b7eb07",
+ "instance_id": "617be32e-6497-4ed7-b4c7-0ee5a81edf49",
+ "instance_name": "my-instance",
+ "name": "backup_name",
+ "region": "fr-par",
+ "size": 600000,
+ "status": "ready",
+ "updated_at": "2020-08-06T12:42:10.581649Z"
+ }
+ }
+'''
+
+import datetime
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway,
+ scaleway_argument_spec,
+ SCALEWAY_REGIONS,
+)
+
+stable_states = (
+ 'ready',
+ 'deleting',
+)
+
+
+def wait_to_complete_state_transition(module, account_api, backup=None):
+ wait_timeout = module.params['wait_timeout']
+ wait_sleep_time = module.params['wait_sleep_time']
+
+ if backup is None or backup['status'] in stable_states:
+ return backup
+
+ start = datetime.datetime.utcnow()
+ end = start + datetime.timedelta(seconds=wait_timeout)
+ while datetime.datetime.utcnow() < end:
+ module.debug('We are going to wait for the backup to finish its transition')
+
+ response = account_api.get('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup['id']))
+ if not response.ok:
+ module.fail_json(msg='Error getting backup [{0}: {1}]'.format(response.status_code, response.json))
+ break
+ response_json = response.json
+
+ if response_json['status'] in stable_states:
+ module.debug('It seems that the backup is not in transition anymore.')
+ module.debug('Backup in state: %s' % response_json['status'])
+ return response_json
+ time.sleep(wait_sleep_time)
+ else:
+ module.fail_json(msg='Backup takes too long to finish its transition')
+
+
+def present_strategy(module, account_api, backup):
+ name = module.params['name']
+ database_name = module.params['database_name']
+ instance_id = module.params['instance_id']
+ expiration_date = module.params['expires_at']
+
+ if backup is not None:
+ if (backup['name'] == name or name is None) and (
+ backup['expires_at'] == expiration_date or expiration_date is None):
+ wait_to_complete_state_transition(module, account_api, backup)
+ module.exit_json(changed=False)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ payload = {}
+ if name is not None:
+ payload['name'] = name
+ if expiration_date is not None:
+ payload['expires_at'] = expiration_date
+
+ response = account_api.patch('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup['id']),
+ payload)
+ if response.ok:
+ result = wait_to_complete_state_transition(module, account_api, response.json)
+ module.exit_json(changed=True, metadata=result)
+
+ module.fail_json(msg='Error modifying backup [{0}: {1}]'.format(response.status_code, response.json))
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ payload = {'name': name, 'database_name': database_name, 'instance_id': instance_id}
+ if expiration_date is not None:
+ payload['expires_at'] = expiration_date
+
+ response = account_api.post('/rdb/v1/regions/%s/backups' % module.params.get('region'), payload)
+
+ if response.ok:
+ result = wait_to_complete_state_transition(module, account_api, response.json)
+ module.exit_json(changed=True, metadata=result)
+
+ module.fail_json(msg='Error creating backup [{0}: {1}]'.format(response.status_code, response.json))
+
+
+def absent_strategy(module, account_api, backup):
+ if backup is None:
+ module.exit_json(changed=False)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ response = account_api.delete('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup['id']))
+ if response.ok:
+ result = wait_to_complete_state_transition(module, account_api, response.json)
+ module.exit_json(changed=True, metadata=result)
+
+ module.fail_json(msg='Error deleting backup [{0}: {1}]'.format(response.status_code, response.json))
+
+
+def exported_strategy(module, account_api, backup):
+ if backup is None:
+ module.fail_json(msg=('Backup "%s" not found' % module.params['id']))
+
+ if backup['download_url'] is not None:
+ module.exit_json(changed=False, metadata=backup)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ backup = wait_to_complete_state_transition(module, account_api, backup)
+ response = account_api.post(
+ '/rdb/v1/regions/%s/backups/%s/export' % (module.params.get('region'), backup['id']), {})
+
+ if response.ok:
+ result = wait_to_complete_state_transition(module, account_api, response.json)
+ module.exit_json(changed=True, metadata=result)
+
+ module.fail_json(msg='Error exporting backup [{0}: {1}]'.format(response.status_code, response.json))
+
+
+def restored_strategy(module, account_api, backup):
+ if backup is None:
+ module.fail_json(msg=('Backup "%s" not found' % module.params['id']))
+
+ database_name = module.params['database_name']
+ instance_id = module.params['instance_id']
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ backup = wait_to_complete_state_transition(module, account_api, backup)
+
+ payload = {'database_name': database_name, 'instance_id': instance_id}
+ response = account_api.post('/rdb/v1/regions/%s/backups/%s/restore' % (module.params.get('region'), backup['id']),
+ payload)
+
+ if response.ok:
+ result = wait_to_complete_state_transition(module, account_api, response.json)
+ module.exit_json(changed=True, metadata=result)
+
+ module.fail_json(msg='Error restoring backup [{0}: {1}]'.format(response.status_code, response.json))
+
+
+state_strategy = {
+ 'present': present_strategy,
+ 'absent': absent_strategy,
+ 'exported': exported_strategy,
+ 'restored': restored_strategy,
+}
+
+
+def core(module):
+ state = module.params['state']
+ backup_id = module.params['id']
+
+ account_api = Scaleway(module)
+
+ if backup_id is None:
+ backup_by_id = None
+ else:
+ response = account_api.get('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup_id))
+ status_code = response.status_code
+ backup_json = response.json
+ backup_by_id = None
+ if status_code == 404:
+ backup_by_id = None
+ elif response.ok:
+ backup_by_id = backup_json
+ else:
+ module.fail_json(msg='Error getting backup [{0}: {1}]'.format(status_code, response.json['message']))
+
+ state_strategy[state](module, account_api, backup_by_id)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ state=dict(default='present', choices=['absent', 'present', 'exported', 'restored']),
+ region=dict(required=True, choices=SCALEWAY_REGIONS),
+ id=dict(),
+ name=dict(type='str'),
+ database_name=dict(required=False),
+ instance_id=dict(required=False),
+ expires_at=dict(),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300),
+ wait_sleep_time=dict(type='int', default=3),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_together=[
+ ['database_name', 'instance_id'],
+ ],
+ required_if=[
+ ['state', 'present', ['name', 'database_name', 'instance_id']],
+ ['state', 'absent', ['id']],
+ ['state', 'exported', ['id']],
+ ['state', 'restored', ['id', 'database_name', 'instance_id']],
+ ],
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_image_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_image_facts.py
new file mode 100644
index 00000000..31bbfa76
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_image_facts.py
@@ -0,0 +1,125 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_image_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favour of C(_info) module.
+ alternative: Use M(community.general.scaleway_image_info) instead.
+short_description: Gather facts about the Scaleway images available.
+description:
+ - Gather facts about the Scaleway images available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.scaleway
+
+
+options:
+ region:
+ type: str
+ description:
+ - Scaleway compute zone
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway images facts
+ community.general.scaleway_image_facts:
+ region: par1
+'''
+
+RETURN = r'''
+---
+scaleway_image_facts:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_image_facts": [
+ {
+ "arch": "x86_64",
+ "creation_date": "2018-07-17T16:18:49.276456+00:00",
+ "default_bootscript": {
+ "architecture": "x86_64",
+ "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
+ "default": false,
+ "dtb": "",
+ "id": "15fbd2f7-a0f9-412b-8502-6a44da8d98b8",
+ "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz",
+ "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.9-4.9.93-rev1/vmlinuz-4.9.93",
+ "organization": "11111111-1111-4111-8111-111111111111",
+ "public": true,
+ "title": "x86_64 mainline 4.9.93 rev1"
+ },
+ "extra_volumes": [],
+ "from_server": null,
+ "id": "00ae4a88-3252-4eda-9feb-5f6b56bf5ef0",
+ "modification_date": "2018-07-17T16:42:06.319315+00:00",
+ "name": "Debian Stretch",
+ "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db",
+ "public": true,
+ "root_volume": {
+ "id": "da32dfbb-c5ff-476d-ae2d-c297dd09b7dd",
+ "name": "snapshot-2a7229dc-d431-4dc5-b66e-95db08b773af-2018-07-17_16:18",
+ "size": 25000000000,
+ "volume_type": "l_ssd"
+ },
+ "state": "available"
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway, ScalewayException, scaleway_argument_spec, SCALEWAY_LOCATION)
+
+
+class ScalewayImageFacts(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewayImageFacts, self).__init__(module)
+ self.name = 'images'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ ansible_facts={'scaleway_image_facts': ScalewayImageFacts(module).get_resources()}
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_image_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_image_info.py
new file mode 100644
index 00000000..3fad216e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_image_info.py
@@ -0,0 +1,126 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_image_info
+short_description: Gather information about the Scaleway images available.
+description:
+ - Gather information about the Scaleway images available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.scaleway
+
+
+options:
+
+ region:
+ type: str
+ description:
+ - Scaleway compute zone
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway images information
+ community.general.scaleway_image_info:
+ region: par1
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.scaleway_image_info }}"
+'''
+
+RETURN = r'''
+---
+scaleway_image_info:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_image_info": [
+ {
+ "arch": "x86_64",
+ "creation_date": "2018-07-17T16:18:49.276456+00:00",
+ "default_bootscript": {
+ "architecture": "x86_64",
+ "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
+ "default": false,
+ "dtb": "",
+ "id": "15fbd2f7-a0f9-412b-8502-6a44da8d98b8",
+ "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz",
+ "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.9-4.9.93-rev1/vmlinuz-4.9.93",
+ "organization": "11111111-1111-4111-8111-111111111111",
+ "public": true,
+ "title": "x86_64 mainline 4.9.93 rev1"
+ },
+ "extra_volumes": [],
+ "from_server": null,
+ "id": "00ae4a88-3252-4eda-9feb-5f6b56bf5ef0",
+ "modification_date": "2018-07-17T16:42:06.319315+00:00",
+ "name": "Debian Stretch",
+ "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db",
+ "public": true,
+ "root_volume": {
+ "id": "da32dfbb-c5ff-476d-ae2d-c297dd09b7dd",
+ "name": "snapshot-2a7229dc-d431-4dc5-b66e-95db08b773af-2018-07-17_16:18",
+ "size": 25000000000,
+ "volume_type": "l_ssd"
+ },
+ "state": "available"
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway, ScalewayException, scaleway_argument_spec, SCALEWAY_LOCATION)
+
+
+class ScalewayImageInfo(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewayImageInfo, self).__init__(module)
+ self.name = 'images'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ scaleway_image_info=ScalewayImageInfo(module).get_resources()
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_ip.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_ip.py
new file mode 100644
index 00000000..26da122e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_ip.py
@@ -0,0 +1,261 @@
+#!/usr/bin/python
+#
+# Scaleway IP management module
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_ip
+short_description: Scaleway IP management module
+author: Remy Leone (@sieben)
+description:
+ - This module manages IP on Scaleway account
+ U(https://developer.scaleway.com)
+extends_documentation_fragment:
+- community.general.scaleway
+
+
+options:
+ state:
+ type: str
+ description:
+ - Indicate desired state of the IP.
+ default: present
+ choices:
+ - present
+ - absent
+
+ organization:
+ type: str
+ description:
+ - Scaleway organization identifier
+ required: true
+
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example par1).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+
+ id:
+ type: str
+ description:
+ - id of the Scaleway IP (UUID)
+
+ server:
+ type: str
+ description:
+ - id of the server you want to attach an IP to.
+ - To unattach an IP don't specify this option
+
+ reverse:
+ type: str
+ description:
+ - Reverse to assign to the IP
+'''
+
+EXAMPLES = '''
+- name: Create an IP
+ community.general.scaleway_ip:
+ organization: '{{ scw_org }}'
+ state: present
+ region: par1
+ register: ip_creation_task
+
+- name: Make sure IP deleted
+ community.general.scaleway_ip:
+ id: '{{ ip_creation_task.scaleway_ip.id }}'
+ state: absent
+ region: par1
+'''
+
+RETURN = '''
+data:
+ description: This is only present when C(state=present)
+ returned: when C(state=present)
+ type: dict
+ sample: {
+ "ips": [
+ {
+ "organization": "951df375-e094-4d26-97c1-ba548eeb9c42",
+ "reverse": null,
+ "id": "dd9e8df6-6775-4863-b517-e0b0ee3d7477",
+ "server": {
+ "id": "3f1568ca-b1a2-4e98-b6f7-31a0588157f1",
+ "name": "ansible_tuto-1"
+ },
+ "address": "212.47.232.136"
+ }
+ ]
+ }
+'''
+
+from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway
+from ansible.module_utils.basic import AnsibleModule
+
+
+def ip_attributes_should_be_changed(api, target_ip, wished_ip):
+ patch_payload = {}
+
+ if target_ip["reverse"] != wished_ip["reverse"]:
+ patch_payload["reverse"] = wished_ip["reverse"]
+
+ # IP is assigned to a server
+ if target_ip["server"] is None and wished_ip["server"]:
+ patch_payload["server"] = wished_ip["server"]
+
+ # IP is unassigned to a server
+ try:
+ if target_ip["server"]["id"] and wished_ip["server"] is None:
+ patch_payload["server"] = wished_ip["server"]
+ except (TypeError, KeyError):
+ pass
+
+ # IP is migrated between 2 different servers
+ try:
+ if target_ip["server"]["id"] != wished_ip["server"]:
+ patch_payload["server"] = wished_ip["server"]
+ except (TypeError, KeyError):
+ pass
+
+ return patch_payload
+
+
+def payload_from_wished_ip(wished_ip):
+ return dict(
+ (k, v)
+ for k, v in wished_ip.items()
+ if k != 'id' and v is not None
+ )
+
+
+def present_strategy(api, wished_ip):
+ changed = False
+
+ response = api.get('ips')
+ if not response.ok:
+ api.module.fail_json(msg='Error getting IPs [{0}: {1}]'.format(
+ response.status_code, response.json['message']))
+
+ ips_list = response.json["ips"]
+ ip_lookup = dict((ip["id"], ip)
+ for ip in ips_list)
+
+ if wished_ip["id"] not in ip_lookup.keys():
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "An IP would be created."}
+
+ # Create IP
+ creation_response = api.post('/ips',
+ data=payload_from_wished_ip(wished_ip))
+
+ if not creation_response.ok:
+ msg = "Error during ip creation: %s: '%s' (%s)" % (creation_response.info['msg'],
+ creation_response.json['message'],
+ creation_response.json)
+ api.module.fail_json(msg=msg)
+ return changed, creation_response.json["ip"]
+
+ target_ip = ip_lookup[wished_ip["id"]]
+ patch_payload = ip_attributes_should_be_changed(api=api, target_ip=target_ip, wished_ip=wished_ip)
+
+ if not patch_payload:
+ return changed, target_ip
+
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "IP attributes would be changed."}
+
+ ip_patch_response = api.patch(path="ips/%s" % target_ip["id"],
+ data=patch_payload)
+
+ if not ip_patch_response.ok:
+ api.module.fail_json(msg='Error during IP attributes update: [{0}: {1}]'.format(
+ ip_patch_response.status_code, ip_patch_response.json['message']))
+
+ return changed, ip_patch_response.json["ip"]
+
+
+def absent_strategy(api, wished_ip):
+ response = api.get('ips')
+ changed = False
+
+ status_code = response.status_code
+ ips_json = response.json
+ ips_list = ips_json["ips"]
+
+ if not response.ok:
+ api.module.fail_json(msg='Error getting IPs [{0}: {1}]'.format(
+ status_code, response.json['message']))
+
+ ip_lookup = dict((ip["id"], ip)
+ for ip in ips_list)
+ if wished_ip["id"] not in ip_lookup.keys():
+ return changed, {}
+
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "IP would be destroyed"}
+
+ response = api.delete('/ips/' + wished_ip["id"])
+ if not response.ok:
+ api.module.fail_json(msg='Error deleting IP [{0}: {1}]'.format(
+ response.status_code, response.json))
+
+ return changed, response.json
+
+
+def core(module):
+ wished_ip = {
+ "organization": module.params['organization'],
+ "reverse": module.params["reverse"],
+ "id": module.params["id"],
+ "server": module.params["server"]
+ }
+
+ region = module.params["region"]
+ module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+ api = Scaleway(module=module)
+ if module.params["state"] == "absent":
+ changed, summary = absent_strategy(api=api, wished_ip=wished_ip)
+ else:
+ changed, summary = present_strategy(api=api, wished_ip=wished_ip)
+ module.exit_json(changed=changed, scaleway_ip=summary)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ state=dict(default='present', choices=['absent', 'present']),
+ organization=dict(required=True),
+ server=dict(),
+ reverse=dict(),
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ id=dict()
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_ip_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_ip_facts.py
new file mode 100644
index 00000000..4227f360
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_ip_facts.py
@@ -0,0 +1,108 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_ip_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favour of C(_info) module.
+ alternative: Use M(community.general.scaleway_ip_info) instead.
+short_description: Gather facts about the Scaleway ips available.
+description:
+ - Gather facts about the Scaleway ips available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.scaleway
+
+options:
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example par1).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway ips facts
+ community.general.scaleway_ip_facts:
+ region: par1
+'''
+
+RETURN = r'''
+---
+scaleway_ip_facts:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_ip_facts": [
+ {
+ "address": "163.172.170.243",
+ "id": "ea081794-a581-8899-8451-386ddaf0a451",
+ "organization": "3f709602-5e6c-4619-b80c-e324324324af",
+ "reverse": null,
+ "server": {
+ "id": "12f19bc7-109c-4517-954c-e6b3d0311363",
+ "name": "scw-e0d158"
+ }
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway,
+ ScalewayException,
+ scaleway_argument_spec,
+ SCALEWAY_LOCATION,
+)
+
+
+class ScalewayIpFacts(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewayIpFacts, self).__init__(module)
+ self.name = 'ips'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ ansible_facts={'scaleway_ip_facts': ScalewayIpFacts(module).get_resources()}
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_ip_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_ip_info.py
new file mode 100644
index 00000000..145fb203
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_ip_info.py
@@ -0,0 +1,108 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_ip_info
+short_description: Gather information about the Scaleway ips available.
+description:
+ - Gather information about the Scaleway ips available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.scaleway
+
+options:
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example C(par1)).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway ips information
+ community.general.scaleway_ip_info:
+ region: par1
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.scaleway_ip_info }}"
+'''
+
+RETURN = r'''
+---
+scaleway_ip_info:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_ip_info": [
+ {
+ "address": "163.172.170.243",
+ "id": "ea081794-a581-8899-8451-386ddaf0a451",
+ "organization": "3f709602-5e6c-4619-b80c-e324324324af",
+ "reverse": null,
+ "server": {
+ "id": "12f19bc7-109c-4517-954c-e6b3d0311363",
+ "name": "scw-e0d158"
+ }
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway,
+ ScalewayException,
+ scaleway_argument_spec,
+ SCALEWAY_LOCATION,
+)
+
+
+class ScalewayIpInfo(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewayIpInfo, self).__init__(module)
+ self.name = 'ips'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ scaleway_ip_info=ScalewayIpInfo(module).get_resources()
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_lb.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_lb.py
new file mode 100644
index 00000000..a9358188
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_lb.py
@@ -0,0 +1,356 @@
+#!/usr/bin/python
+#
+# Scaleway Load-balancer management module
+#
+# Copyright (C) 2018 Online SAS.
+# https://www.scaleway.com
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_lb
+short_description: Scaleway load-balancer management module
+author: Remy Leone (@sieben)
+description:
+ - "This module manages load-balancers on Scaleway."
+extends_documentation_fragment:
+- community.general.scaleway
+
+
+options:
+
+ name:
+ type: str
+ description:
+ - Name of the load-balancer
+ required: true
+
+ description:
+ type: str
+ description:
+ - Description of the load-balancer
+ required: true
+
+ organization_id:
+ type: str
+ description:
+ - Organization identifier
+ required: true
+
+ state:
+ type: str
+ description:
+ - Indicate desired state of the instance.
+ default: present
+ choices:
+ - present
+ - absent
+
+ region:
+ type: str
+ description:
+ - Scaleway zone
+ required: true
+ choices:
+ - nl-ams
+ - fr-par
+ - pl-waw
+
+ tags:
+ type: list
+ description:
+ - List of tags to apply to the load-balancer
+
+ wait:
+ description:
+ - Wait for the load-balancer to reach its desired state before returning.
+ type: bool
+ default: 'no'
+
+ wait_timeout:
+ type: int
+ description:
+ - Time to wait for the load-balancer to reach the expected state
+ required: false
+ default: 300
+
+ wait_sleep_time:
+ type: int
+ description:
+ - Time to wait before every attempt to check the state of the load-balancer
+ required: false
+ default: 3
+'''
+
+EXAMPLES = '''
+- name: Create a load-balancer
+ community.general.scaleway_lb:
+ name: foobar
+ state: present
+ organization_id: 951df375-e094-4d26-97c1-ba548eeb9c42
+ region: fr-par
+ tags:
+ - hello
+
+- name: Delete a load-balancer
+ community.general.scaleway_lb:
+ name: foobar
+ state: absent
+ organization_id: 951df375-e094-4d26-97c1-ba548eeb9c42
+ region: fr-par
+'''
+
+RETURNS = '''
+{
+ "scaleway_lb": {
+ "backend_count": 0,
+ "frontend_count": 0,
+ "description": "Description of my load-balancer",
+ "id": "00000000-0000-0000-0000-000000000000",
+ "instances": [
+ {
+ "id": "00000000-0000-0000-0000-000000000000",
+ "ip_address": "10.0.0.1",
+ "region": "fr-par",
+ "status": "ready"
+ },
+ {
+ "id": "00000000-0000-0000-0000-000000000000",
+ "ip_address": "10.0.0.2",
+ "region": "fr-par",
+ "status": "ready"
+ }
+ ],
+ "ip": [
+ {
+ "id": "00000000-0000-0000-0000-000000000000",
+ "ip_address": "192.168.0.1",
+ "lb_id": "00000000-0000-0000-0000-000000000000",
+ "region": "fr-par",
+ "organization_id": "00000000-0000-0000-0000-000000000000",
+ "reverse": ""
+ }
+ ],
+ "name": "lb_ansible_test",
+ "organization_id": "00000000-0000-0000-0000-000000000000",
+ "region": "fr-par",
+ "status": "ready",
+ "tags": [
+ "first_tag",
+ "second_tag"
+ ]
+ }
+}
+'''
+
+import datetime
+import time
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_REGIONS, SCALEWAY_ENDPOINT, scaleway_argument_spec, Scaleway
+
+STABLE_STATES = (
+ "ready",
+ "absent"
+)
+
+MUTABLE_ATTRIBUTES = (
+ "name",
+ "description"
+)
+
+
+def payload_from_wished_lb(wished_lb):
+ return {
+ "organization_id": wished_lb["organization_id"],
+ "name": wished_lb["name"],
+ "tags": wished_lb["tags"],
+ "description": wished_lb["description"]
+ }
+
+
+def fetch_state(api, lb):
+ api.module.debug("fetch_state of load-balancer: %s" % lb["id"])
+ response = api.get(path=api.api_path + "/%s" % lb["id"])
+
+ if response.status_code == 404:
+ return "absent"
+
+ if not response.ok:
+ msg = 'Error during state fetching: (%s) %s' % (response.status_code, response.json)
+ api.module.fail_json(msg=msg)
+
+ try:
+ api.module.debug("Load-balancer %s in state: %s" % (lb["id"], response.json["status"]))
+ return response.json["status"]
+ except KeyError:
+ api.module.fail_json(msg="Could not fetch state in %s" % response.json)
+
+
+def wait_to_complete_state_transition(api, lb, force_wait=False):
+ wait = api.module.params["wait"]
+ if not (wait or force_wait):
+ return
+ wait_timeout = api.module.params["wait_timeout"]
+ wait_sleep_time = api.module.params["wait_sleep_time"]
+
+ start = datetime.datetime.utcnow()
+ end = start + datetime.timedelta(seconds=wait_timeout)
+ while datetime.datetime.utcnow() < end:
+ api.module.debug("We are going to wait for the load-balancer to finish its transition")
+ state = fetch_state(api, lb)
+ if state in STABLE_STATES:
+ api.module.debug("It seems that the load-balancer is not in transition anymore.")
+ api.module.debug("load-balancer in state: %s" % fetch_state(api, lb))
+ break
+ time.sleep(wait_sleep_time)
+ else:
+ api.module.fail_json(msg="Server takes too long to finish its transition")
+
+
+def lb_attributes_should_be_changed(target_lb, wished_lb):
+ diff = dict((attr, wished_lb[attr]) for attr in MUTABLE_ATTRIBUTES if target_lb[attr] != wished_lb[attr])
+
+ if diff:
+ return dict((attr, wished_lb[attr]) for attr in MUTABLE_ATTRIBUTES)
+ else:
+ return diff
+
+
+def present_strategy(api, wished_lb):
+ changed = False
+
+ response = api.get(path=api.api_path)
+ if not response.ok:
+ api.module.fail_json(msg='Error getting load-balancers [{0}: {1}]'.format(
+ response.status_code, response.json['message']))
+
+ lbs_list = response.json["lbs"]
+ lb_lookup = dict((lb["name"], lb)
+ for lb in lbs_list)
+
+ if wished_lb["name"] not in lb_lookup.keys():
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "A load-balancer would be created."}
+
+ # Create Load-balancer
+ api.warn(payload_from_wished_lb(wished_lb))
+ creation_response = api.post(path=api.api_path,
+ data=payload_from_wished_lb(wished_lb))
+
+ if not creation_response.ok:
+ msg = "Error during lb creation: %s: '%s' (%s)" % (creation_response.info['msg'],
+ creation_response.json['message'],
+ creation_response.json)
+ api.module.fail_json(msg=msg)
+
+ wait_to_complete_state_transition(api=api, lb=creation_response.json)
+ response = api.get(path=api.api_path + "/%s" % creation_response.json["id"])
+ return changed, response.json
+
+ target_lb = lb_lookup[wished_lb["name"]]
+ patch_payload = lb_attributes_should_be_changed(target_lb=target_lb,
+ wished_lb=wished_lb)
+
+ if not patch_payload:
+ return changed, target_lb
+
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "Load-balancer attributes would be changed."}
+
+ lb_patch_response = api.put(path=api.api_path + "/%s" % target_lb["id"],
+ data=patch_payload)
+
+ if not lb_patch_response.ok:
+ api.module.fail_json(msg='Error during load-balancer attributes update: [{0}: {1}]'.format(
+ lb_patch_response.status_code, lb_patch_response.json['message']))
+
+ wait_to_complete_state_transition(api=api, lb=target_lb)
+ return changed, lb_patch_response.json
+
+
+def absent_strategy(api, wished_lb):
+ response = api.get(path=api.api_path)
+ changed = False
+
+ status_code = response.status_code
+ lbs_json = response.json
+ lbs_list = lbs_json["lbs"]
+
+ if not response.ok:
+ api.module.fail_json(msg='Error getting load-balancers [{0}: {1}]'.format(
+ status_code, response.json['message']))
+
+ lb_lookup = dict((lb["name"], lb)
+ for lb in lbs_list)
+ if wished_lb["name"] not in lb_lookup.keys():
+ return changed, {}
+
+ target_lb = lb_lookup[wished_lb["name"]]
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "Load-balancer would be destroyed"}
+
+ wait_to_complete_state_transition(api=api, lb=target_lb, force_wait=True)
+ response = api.delete(path=api.api_path + "/%s" % target_lb["id"])
+ if not response.ok:
+ api.module.fail_json(msg='Error deleting load-balancer [{0}: {1}]'.format(
+ response.status_code, response.json))
+
+ wait_to_complete_state_transition(api=api, lb=target_lb)
+ return changed, response.json
+
+
+state_strategy = {
+ "present": present_strategy,
+ "absent": absent_strategy
+}
+
+
+def core(module):
+ region = module.params["region"]
+ wished_load_balancer = {
+ "state": module.params["state"],
+ "name": module.params["name"],
+ "description": module.params["description"],
+ "tags": module.params["tags"],
+ "organization_id": module.params["organization_id"]
+ }
+ module.params['api_url'] = SCALEWAY_ENDPOINT
+ api = Scaleway(module=module)
+ api.api_path = "lb/v1/regions/%s/lbs" % region
+
+ changed, summary = state_strategy[wished_load_balancer["state"]](api=api,
+ wished_lb=wished_load_balancer)
+ module.exit_json(changed=changed, scaleway_lb=summary)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ name=dict(required=True),
+ description=dict(required=True),
+ region=dict(required=True, choices=SCALEWAY_REGIONS),
+ state=dict(choices=list(state_strategy.keys()), default='present'),
+ tags=dict(type="list", default=[]),
+ organization_id=dict(required=True),
+ wait=dict(type="bool", default=False),
+ wait_timeout=dict(type="int", default=300),
+ wait_sleep_time=dict(type="int", default=3),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_organization_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_organization_facts.py
new file mode 100644
index 00000000..ee571cdc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_organization_facts.py
@@ -0,0 +1,104 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_organization_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favour of C(_info) module.
+ alternative: Use M(community.general.scaleway_organization_info) instead.
+short_description: Gather facts about the Scaleway organizations available.
+description:
+ - Gather facts about the Scaleway organizations available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+options:
+ api_url:
+ description:
+ - Scaleway API URL
+ default: 'https://account.scaleway.com'
+ aliases: ['base_url']
+extends_documentation_fragment:
+- community.general.scaleway
+
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway organizations facts
+ community.general.scaleway_organization_facts:
+'''
+
+RETURN = r'''
+---
+scaleway_organization_facts:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_organization_facts": [
+ {
+ "address_city_name": "Paris",
+ "address_country_code": "FR",
+ "address_line1": "42 Rue de l'univers",
+ "address_line2": null,
+ "address_postal_code": "75042",
+ "address_subdivision_code": "FR-75",
+ "creation_date": "2018-08-06T13:43:28.508575+00:00",
+ "currency": "EUR",
+ "customer_class": "individual",
+ "id": "3f709602-5e6c-4619-b80c-e8432ferewtr",
+ "locale": "fr_FR",
+ "modification_date": "2018-08-06T14:56:41.401685+00:00",
+ "name": "James Bond",
+ "support_id": "694324",
+ "support_level": "basic",
+ "support_pin": "9324",
+ "users": [],
+ "vat_number": null,
+ "warnings": []
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway, ScalewayException, scaleway_argument_spec
+)
+
+
+class ScalewayOrganizationFacts(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewayOrganizationFacts, self).__init__(module)
+ self.name = 'organizations'
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ api_url=dict(fallback=(env_fallback, ['SCW_API_URL']), default='https://account.scaleway.com', aliases=['base_url']),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ ansible_facts={'scaleway_organization_facts': ScalewayOrganizationFacts(module).get_resources()}
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_organization_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_organization_info.py
new file mode 100644
index 00000000..f530dcb8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_organization_info.py
@@ -0,0 +1,104 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_organization_info
+short_description: Gather information about the Scaleway organizations available.
+description:
+ - Gather information about the Scaleway organizations available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+options:
+ api_url:
+ description:
+ - Scaleway API URL
+ default: 'https://account.scaleway.com'
+ aliases: ['base_url']
+extends_documentation_fragment:
+- community.general.scaleway
+
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway organizations information
+ community.general.scaleway_organization_info:
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.scaleway_organization_info }}"
+'''
+
+RETURN = r'''
+---
+scaleway_organization_info:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_organization_info": [
+ {
+ "address_city_name": "Paris",
+ "address_country_code": "FR",
+ "address_line1": "42 Rue de l'univers",
+ "address_line2": null,
+ "address_postal_code": "75042",
+ "address_subdivision_code": "FR-75",
+ "creation_date": "2018-08-06T13:43:28.508575+00:00",
+ "currency": "EUR",
+ "customer_class": "individual",
+ "id": "3f709602-5e6c-4619-b80c-e8432ferewtr",
+ "locale": "fr_FR",
+ "modification_date": "2018-08-06T14:56:41.401685+00:00",
+ "name": "James Bond",
+ "support_id": "694324",
+ "support_level": "basic",
+ "support_pin": "9324",
+ "users": [],
+ "vat_number": null,
+ "warnings": []
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway, ScalewayException, scaleway_argument_spec
+)
+
+
+class ScalewayOrganizationInfo(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewayOrganizationInfo, self).__init__(module)
+ self.name = 'organizations'
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ api_url=dict(fallback=(env_fallback, ['SCW_API_URL']), default='https://account.scaleway.com', aliases=['base_url']),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ scaleway_organization_info=ScalewayOrganizationInfo(module).get_resources()
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_security_group.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_security_group.py
new file mode 100644
index 00000000..9303e06e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_security_group.py
@@ -0,0 +1,238 @@
+#!/usr/bin/python
+#
+# Scaleway Security Group management module
+#
+# Copyright (C) 2018 Antoine Barbare (antoinebarbare@gmail.com).
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_security_group
+short_description: Scaleway Security Group management module
+author: Antoine Barbare (@abarbare)
+description:
+ - This module manages Security Group on Scaleway account
+ U(https://developer.scaleway.com).
+extends_documentation_fragment:
+- community.general.scaleway
+
+
+options:
+ state:
+ description:
+ - Indicate desired state of the Security Group.
+ type: str
+ choices: [ absent, present ]
+ default: present
+
+ organization:
+ description:
+ - Organization identifier.
+ type: str
+ required: true
+
+ region:
+ description:
+ - Scaleway region to use (for example C(par1)).
+ type: str
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+
+ name:
+ description:
+ - Name of the Security Group.
+ type: str
+ required: true
+
+ description:
+ description:
+ - Description of the Security Group.
+ type: str
+
+ stateful:
+ description:
+ - Create a stateful security group which allows established connections in and out.
+ type: bool
+ required: true
+
+ inbound_default_policy:
+ description:
+ - Default policy for incoming traffic.
+ type: str
+ choices: [ accept, drop ]
+
+ outbound_default_policy:
+ description:
+ - Default policy for outcoming traffic.
+ type: str
+ choices: [ accept, drop ]
+
+ organization_default:
+ description:
+ - Create security group to be the default one.
+ type: bool
+'''
+
+EXAMPLES = '''
+- name: Create a Security Group
+ community.general.scaleway_security_group:
+ state: present
+ region: par1
+ name: security_group
+ description: "my security group description"
+ organization: "43a3b6c8-916f-477b-b7ec-ff1898f5fdd9"
+ stateful: false
+ inbound_default_policy: accept
+ outbound_default_policy: accept
+ organization_default: false
+ register: security_group_creation_task
+'''
+
+RETURN = '''
+data:
+ description: This is only present when C(state=present)
+ returned: when C(state=present)
+ type: dict
+ sample: {
+ "scaleway_security_group": {
+ "description": "my security group description",
+ "enable_default_security": true,
+ "id": "0168fb1f-cc46-4f69-b4be-c95d2a19bcae",
+ "inbound_default_policy": "accept",
+ "name": "security_group",
+ "organization": "43a3b6c8-916f-477b-b7ec-ff1898f5fdd9",
+ "organization_default": false,
+ "outbound_default_policy": "accept",
+ "servers": [],
+ "stateful": false
+ }
+ }
+'''
+
+from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway
+from ansible.module_utils.basic import AnsibleModule
+from uuid import uuid4
+
+
+def payload_from_security_group(security_group):
+ return dict(
+ (k, v)
+ for k, v in security_group.items()
+ if k != 'id' and v is not None
+ )
+
+
+def present_strategy(api, security_group):
+ ret = {'changed': False}
+
+ response = api.get('security_groups')
+ if not response.ok:
+ api.module.fail_json(msg='Error getting security groups "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json))
+
+ security_group_lookup = dict((sg['name'], sg)
+ for sg in response.json['security_groups'])
+
+ if security_group['name'] not in security_group_lookup.keys():
+ ret['changed'] = True
+ if api.module.check_mode:
+ # Help user when check mode is enabled by defining id key
+ ret['scaleway_security_group'] = {'id': str(uuid4())}
+ return ret
+
+ # Create Security Group
+ response = api.post('/security_groups',
+ data=payload_from_security_group(security_group))
+
+ if not response.ok:
+ msg = 'Error during security group creation: "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json)
+ api.module.fail_json(msg=msg)
+ ret['scaleway_security_group'] = response.json['security_group']
+
+ else:
+ ret['scaleway_security_group'] = security_group_lookup[security_group['name']]
+
+ return ret
+
+
+def absent_strategy(api, security_group):
+ response = api.get('security_groups')
+ ret = {'changed': False}
+
+ if not response.ok:
+ api.module.fail_json(msg='Error getting security groups "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json))
+
+ security_group_lookup = dict((sg['name'], sg)
+ for sg in response.json['security_groups'])
+ if security_group['name'] not in security_group_lookup.keys():
+ return ret
+
+ ret['changed'] = True
+ if api.module.check_mode:
+ return ret
+
+ response = api.delete('/security_groups/' + security_group_lookup[security_group['name']]['id'])
+ if not response.ok:
+ api.module.fail_json(msg='Error deleting security group "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json))
+
+ return ret
+
+
+def core(module):
+ security_group = {
+ 'organization': module.params['organization'],
+ 'name': module.params['name'],
+ 'description': module.params['description'],
+ 'stateful': module.params['stateful'],
+ 'inbound_default_policy': module.params['inbound_default_policy'],
+ 'outbound_default_policy': module.params['outbound_default_policy'],
+ 'organization_default': module.params['organization_default'],
+ }
+
+ region = module.params['region']
+ module.params['api_url'] = SCALEWAY_LOCATION[region]['api_endpoint']
+
+ api = Scaleway(module=module)
+ if module.params['state'] == 'present':
+ summary = present_strategy(api=api, security_group=security_group)
+ else:
+ summary = absent_strategy(api=api, security_group=security_group)
+ module.exit_json(**summary)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ organization=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ description=dict(type='str'),
+ region=dict(type='str', required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ stateful=dict(type='bool', required=True),
+ inbound_default_policy=dict(type='str', choices=['accept', 'drop']),
+ outbound_default_policy=dict(type='str', choices=['accept', 'drop']),
+ organization_default=dict(type='bool'),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=[['stateful', True, ['inbound_default_policy', 'outbound_default_policy']]]
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_security_group_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_security_group_facts.py
new file mode 100644
index 00000000..a43bfedb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_security_group_facts.py
@@ -0,0 +1,112 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_security_group_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favour of C(_info) module.
+ alternative: Use M(community.general.scaleway_security_group_info) instead.
+short_description: Gather facts about the Scaleway security groups available.
+description:
+ - Gather facts about the Scaleway security groups available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+options:
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example par1).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+extends_documentation_fragment:
+- community.general.scaleway
+
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway security groups facts
+ community.general.scaleway_security_group_facts:
+ region: par1
+'''
+
+RETURN = r'''
+---
+scaleway_security_group_facts:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_security_group_facts": [
+ {
+ "description": "test-ams",
+ "enable_default_security": true,
+ "id": "7fcde327-8bed-43a6-95c4-6dfbc56d8b51",
+ "name": "test-ams",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "organization_default": false,
+ "servers": [
+ {
+ "id": "12f19bc7-108c-4517-954c-e6b3d0311363",
+ "name": "scw-e0d158"
+ }
+ ]
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway,
+ ScalewayException,
+ scaleway_argument_spec,
+ SCALEWAY_LOCATION,
+)
+
+
+class ScalewaySecurityGroupFacts(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewaySecurityGroupFacts, self).__init__(module)
+ self.name = 'security_groups'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ ansible_facts={'scaleway_security_group_facts': ScalewaySecurityGroupFacts(module).get_resources()}
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_security_group_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_security_group_info.py
new file mode 100644
index 00000000..d3488f0c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_security_group_info.py
@@ -0,0 +1,112 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_security_group_info
+short_description: Gather information about the Scaleway security groups available.
+description:
+ - Gather information about the Scaleway security groups available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+options:
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example C(par1)).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+extends_documentation_fragment:
+- community.general.scaleway
+
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway security groups information
+ community.general.scaleway_security_group_info:
+ region: par1
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.scaleway_security_group_info }}"
+'''
+
+RETURN = r'''
+---
+scaleway_security_group_info:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_security_group_info": [
+ {
+ "description": "test-ams",
+ "enable_default_security": true,
+ "id": "7fcde327-8bed-43a6-95c4-6dfbc56d8b51",
+ "name": "test-ams",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "organization_default": false,
+ "servers": [
+ {
+ "id": "12f19bc7-108c-4517-954c-e6b3d0311363",
+ "name": "scw-e0d158"
+ }
+ ]
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway,
+ ScalewayException,
+ scaleway_argument_spec,
+ SCALEWAY_LOCATION,
+)
+
+
+class ScalewaySecurityGroupInfo(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewaySecurityGroupInfo, self).__init__(module)
+ self.name = 'security_groups'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ scaleway_security_group_info=ScalewaySecurityGroupInfo(module).get_resources()
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_security_group_rule.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_security_group_rule.py
new file mode 100644
index 00000000..054a4d47
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_security_group_rule.py
@@ -0,0 +1,263 @@
+#!/usr/bin/python
+#
+# Scaleway Security Group Rule management module
+#
+# Copyright (C) 2018 Antoine Barbare (antoinebarbare@gmail.com).
+#
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_security_group_rule
+short_description: Scaleway Security Group Rule management module
+author: Antoine Barbare (@abarbare)
+description:
+ - This module manages Security Group Rule on Scaleway account
+ U(https://developer.scaleway.com)
+extends_documentation_fragment:
+- community.general.scaleway
+
+
+options:
+ state:
+ type: str
+ description:
+ - Indicate desired state of the Security Group Rule.
+ default: present
+ choices:
+ - present
+ - absent
+
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example C(par1)).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+
+ protocol:
+ type: str
+ description:
+ - Network protocol to use
+ choices:
+ - TCP
+ - UDP
+ - ICMP
+ required: true
+
+ port:
+ description:
+ - Port related to the rule, null value for all the ports
+ required: true
+ type: int
+
+ ip_range:
+ type: str
+ description:
+ - IPV4 CIDR notation to apply to the rule
+ default: 0.0.0.0/0
+
+ direction:
+ type: str
+ description:
+ - Rule direction
+ choices:
+ - inbound
+ - outbound
+ required: true
+
+ action:
+ type: str
+ description:
+ - Rule action
+ choices:
+ - accept
+ - drop
+ required: true
+
+ security_group:
+ type: str
+ description:
+ - Security Group unique identifier
+ required: true
+'''
+
+EXAMPLES = '''
+ - name: Create a Security Group Rule
+ community.general.scaleway_security_group_rule:
+ state: present
+ region: par1
+ protocol: TCP
+ port: 80
+ ip_range: 0.0.0.0/0
+ direction: inbound
+ action: accept
+ security_group: b57210ee-1281-4820-a6db-329f78596ecb
+ register: security_group_rule_creation_task
+'''
+
+RETURN = '''
+data:
+ description: This is only present when C(state=present)
+ returned: when C(state=present)
+ type: dict
+ sample: {
+ "scaleway_security_group_rule": {
+ "direction": "inbound",
+ "protocol": "TCP",
+ "ip_range": "0.0.0.0/0",
+ "dest_port_from": 80,
+ "action": "accept",
+ "position": 2,
+ "dest_port_to": null,
+ "editable": null,
+ "id": "10cb0b9a-80f6-4830-abd7-a31cd828b5e9"
+ }
+ }
+'''
+
+from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway, payload_from_object
+from ansible_collections.community.general.plugins.module_utils.compat.ipaddress import ip_network
+from ansible.module_utils._text import to_text
+from ansible.module_utils.basic import AnsibleModule
+
+
+def get_sgr_from_api(security_group_rules, security_group_rule):
+ """ Check if a security_group_rule specs are present in security_group_rules
+ Return None if no rules match the specs
+ Return the rule if found
+ """
+ for sgr in security_group_rules:
+ if (sgr['ip_range'] == security_group_rule['ip_range'] and sgr['dest_port_from'] == security_group_rule['dest_port_from'] and
+ sgr['direction'] == security_group_rule['direction'] and sgr['action'] == security_group_rule['action'] and
+ sgr['protocol'] == security_group_rule['protocol']):
+ return sgr
+
+ return None
+
+
+def present_strategy(api, security_group_id, security_group_rule):
+ ret = {'changed': False}
+
+ response = api.get('security_groups/%s/rules' % security_group_id)
+ if not response.ok:
+ api.module.fail_json(
+ msg='Error getting security group rules "%s": "%s" (%s)' %
+ (response.info['msg'], response.json['message'], response.json))
+
+ existing_rule = get_sgr_from_api(
+ response.json['rules'], security_group_rule)
+
+ if not existing_rule:
+ ret['changed'] = True
+ if api.module.check_mode:
+ return ret
+
+ # Create Security Group Rule
+ response = api.post('/security_groups/%s/rules' % security_group_id,
+ data=payload_from_object(security_group_rule))
+
+ if not response.ok:
+ api.module.fail_json(
+ msg='Error during security group rule creation: "%s": "%s" (%s)' %
+ (response.info['msg'], response.json['message'], response.json))
+ ret['scaleway_security_group_rule'] = response.json['rule']
+
+ else:
+ ret['scaleway_security_group_rule'] = existing_rule
+
+ return ret
+
+
+def absent_strategy(api, security_group_id, security_group_rule):
+ ret = {'changed': False}
+
+ response = api.get('security_groups/%s/rules' % security_group_id)
+ if not response.ok:
+ api.module.fail_json(
+ msg='Error getting security group rules "%s": "%s" (%s)' %
+ (response.info['msg'], response.json['message'], response.json))
+
+ existing_rule = get_sgr_from_api(
+ response.json['rules'], security_group_rule)
+
+ if not existing_rule:
+ return ret
+
+ ret['changed'] = True
+ if api.module.check_mode:
+ return ret
+
+ response = api.delete(
+ '/security_groups/%s/rules/%s' %
+ (security_group_id, existing_rule['id']))
+ if not response.ok:
+ api.module.fail_json(
+ msg='Error deleting security group rule "%s": "%s" (%s)' %
+ (response.info['msg'], response.json['message'], response.json))
+
+ return ret
+
+
+def core(module):
+ api = Scaleway(module=module)
+
+ security_group_rule = {
+ 'protocol': module.params['protocol'],
+ 'dest_port_from': module.params['port'],
+ 'ip_range': module.params['ip_range'],
+ 'direction': module.params['direction'],
+ 'action': module.params['action'],
+ }
+
+ region = module.params['region']
+ module.params['api_url'] = SCALEWAY_LOCATION[region]['api_endpoint']
+
+ if module.params['state'] == 'present':
+ summary = present_strategy(
+ api=api,
+ security_group_id=module.params['security_group'],
+ security_group_rule=security_group_rule)
+ else:
+ summary = absent_strategy(
+ api=api,
+ security_group_id=module.params['security_group'],
+ security_group_rule=security_group_rule)
+ module.exit_json(**summary)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ region=dict(type='str', required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ protocol=dict(type='str', required=True, choices=['TCP', 'UDP', 'ICMP']),
+ port=dict(type='int', required=True),
+ ip_range=dict(type='str', default='0.0.0.0/0'),
+ direction=dict(type='str', required=True, choices=['inbound', 'outbound']),
+ action=dict(type='str', required=True, choices=['accept', 'drop']),
+ security_group=dict(type='str', required=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_server_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_server_facts.py
new file mode 100644
index 00000000..d3e73669
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_server_facts.py
@@ -0,0 +1,195 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_server_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favour of C(_info) module.
+ alternative: Use M(community.general.scaleway_server_info) instead.
+short_description: Gather facts about the Scaleway servers available.
+description:
+ - Gather facts about the Scaleway servers available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.scaleway
+
+options:
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example par1).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway servers facts
+ community.general.scaleway_server_facts:
+ region: par1
+'''
+
+RETURN = r'''
+---
+scaleway_server_facts:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_server_facts": [
+ {
+ "arch": "x86_64",
+ "boot_type": "local",
+ "bootscript": {
+ "architecture": "x86_64",
+ "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
+ "default": true,
+ "dtb": "",
+ "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9",
+ "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz",
+ "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127",
+ "organization": "11111111-1111-4111-8111-111111111111",
+ "public": true,
+ "title": "x86_64 mainline 4.4.127 rev1"
+ },
+ "commercial_type": "START1-XS",
+ "creation_date": "2018-08-14T21:36:56.271545+00:00",
+ "dynamic_ip_required": false,
+ "enable_ipv6": false,
+ "extra_networks": [],
+ "hostname": "scw-e0d256",
+ "id": "12f19bc7-108c-4517-954c-e6b3d0311363",
+ "image": {
+ "arch": "x86_64",
+ "creation_date": "2018-04-26T12:42:21.619844+00:00",
+ "default_bootscript": {
+ "architecture": "x86_64",
+ "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
+ "default": true,
+ "dtb": "",
+ "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9",
+ "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz",
+ "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127",
+ "organization": "11111111-1111-4111-8111-111111111111",
+ "public": true,
+ "title": "x86_64 mainline 4.4.127 rev1"
+ },
+ "extra_volumes": [],
+ "from_server": null,
+ "id": "67375eb1-f14d-4f02-bb42-6119cecbde51",
+ "modification_date": "2018-04-26T12:49:07.573004+00:00",
+ "name": "Ubuntu Xenial",
+ "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db",
+ "public": true,
+ "root_volume": {
+ "id": "020b8d61-3867-4a0e-84a4-445c5393e05d",
+ "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42",
+ "size": 25000000000,
+ "volume_type": "l_ssd"
+ },
+ "state": "available"
+ },
+ "ipv6": null,
+ "location": {
+ "cluster_id": "5",
+ "hypervisor_id": "412",
+ "node_id": "2",
+ "platform_id": "13",
+ "zone_id": "par1"
+ },
+ "maintenances": [],
+ "modification_date": "2018-08-14T21:37:28.630882+00:00",
+ "name": "scw-e0d256",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "private_ip": "10.14.222.131",
+ "protected": false,
+ "public_ip": {
+ "address": "163.172.170.197",
+ "dynamic": false,
+ "id": "ea081794-a581-4495-8451-386ddaf0a451"
+ },
+ "security_group": {
+ "id": "a37379d2-d8b0-4668-9cfb-1233fc436f7e",
+ "name": "Default security group"
+ },
+ "state": "running",
+ "state_detail": "booted",
+ "tags": [],
+ "volumes": {
+ "0": {
+ "creation_date": "2018-08-14T21:36:56.271545+00:00",
+ "export_uri": "device://dev/vda",
+ "id": "68386fae-4f55-4fbf-aabb-953036a85872",
+ "modification_date": "2018-08-14T21:36:56.271545+00:00",
+ "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "server": {
+ "id": "12f19bc7-108c-4517-954c-e6b3d0311363",
+ "name": "scw-e0d256"
+ },
+ "size": 25000000000,
+ "state": "available",
+ "volume_type": "l_ssd"
+ }
+ }
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway,
+ ScalewayException,
+ scaleway_argument_spec,
+ SCALEWAY_LOCATION,
+)
+
+
+class ScalewayServerFacts(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewayServerFacts, self).__init__(module)
+ self.name = 'servers'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ ansible_facts={'scaleway_server_facts': ScalewayServerFacts(module).get_resources()}
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_server_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_server_info.py
new file mode 100644
index 00000000..43b0badc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_server_info.py
@@ -0,0 +1,195 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_server_info
+short_description: Gather information about the Scaleway servers available.
+description:
+ - Gather information about the Scaleway servers available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.scaleway
+
+options:
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example C(par1)).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway servers information
+ community.general.scaleway_server_info:
+ region: par1
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.scaleway_server_info }}"
+'''
+
+RETURN = r'''
+---
+scaleway_server_info:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_server_info": [
+ {
+ "arch": "x86_64",
+ "boot_type": "local",
+ "bootscript": {
+ "architecture": "x86_64",
+ "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
+ "default": true,
+ "dtb": "",
+ "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9",
+ "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz",
+ "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127",
+ "organization": "11111111-1111-4111-8111-111111111111",
+ "public": true,
+ "title": "x86_64 mainline 4.4.127 rev1"
+ },
+ "commercial_type": "START1-XS",
+ "creation_date": "2018-08-14T21:36:56.271545+00:00",
+ "dynamic_ip_required": false,
+ "enable_ipv6": false,
+ "extra_networks": [],
+ "hostname": "scw-e0d256",
+ "id": "12f19bc7-108c-4517-954c-e6b3d0311363",
+ "image": {
+ "arch": "x86_64",
+ "creation_date": "2018-04-26T12:42:21.619844+00:00",
+ "default_bootscript": {
+ "architecture": "x86_64",
+ "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
+ "default": true,
+ "dtb": "",
+ "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9",
+ "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz",
+ "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127",
+ "organization": "11111111-1111-4111-8111-111111111111",
+ "public": true,
+ "title": "x86_64 mainline 4.4.127 rev1"
+ },
+ "extra_volumes": [],
+ "from_server": null,
+ "id": "67375eb1-f14d-4f02-bb42-6119cecbde51",
+ "modification_date": "2018-04-26T12:49:07.573004+00:00",
+ "name": "Ubuntu Xenial",
+ "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db",
+ "public": true,
+ "root_volume": {
+ "id": "020b8d61-3867-4a0e-84a4-445c5393e05d",
+ "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42",
+ "size": 25000000000,
+ "volume_type": "l_ssd"
+ },
+ "state": "available"
+ },
+ "ipv6": null,
+ "location": {
+ "cluster_id": "5",
+ "hypervisor_id": "412",
+ "node_id": "2",
+ "platform_id": "13",
+ "zone_id": "par1"
+ },
+ "maintenances": [],
+ "modification_date": "2018-08-14T21:37:28.630882+00:00",
+ "name": "scw-e0d256",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "private_ip": "10.14.222.131",
+ "protected": false,
+ "public_ip": {
+ "address": "163.172.170.197",
+ "dynamic": false,
+ "id": "ea081794-a581-4495-8451-386ddaf0a451"
+ },
+ "security_group": {
+ "id": "a37379d2-d8b0-4668-9cfb-1233fc436f7e",
+ "name": "Default security group"
+ },
+ "state": "running",
+ "state_detail": "booted",
+ "tags": [],
+ "volumes": {
+ "0": {
+ "creation_date": "2018-08-14T21:36:56.271545+00:00",
+ "export_uri": "device://dev/vda",
+ "id": "68386fae-4f55-4fbf-aabb-953036a85872",
+ "modification_date": "2018-08-14T21:36:56.271545+00:00",
+ "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "server": {
+ "id": "12f19bc7-108c-4517-954c-e6b3d0311363",
+ "name": "scw-e0d256"
+ },
+ "size": 25000000000,
+ "state": "available",
+ "volume_type": "l_ssd"
+ }
+ }
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway,
+ ScalewayException,
+ scaleway_argument_spec,
+ SCALEWAY_LOCATION,
+)
+
+
+class ScalewayServerInfo(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewayServerInfo, self).__init__(module)
+ self.name = 'servers'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ scaleway_server_info=ScalewayServerInfo(module).get_resources()
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_snapshot_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_snapshot_facts.py
new file mode 100644
index 00000000..25f99e72
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_snapshot_facts.py
@@ -0,0 +1,113 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_snapshot_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favour of C(_info) module.
+ alternative: Use M(community.general.scaleway_snapshot_info) instead.
+short_description: Gather facts about the Scaleway snapshots available.
+description:
+ - Gather facts about the Scaleway snapshot available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.scaleway
+
+options:
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example par1).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway snapshots facts
+ community.general.scaleway_snapshot_facts:
+ region: par1
+'''
+
+RETURN = r'''
+---
+scaleway_snapshot_facts:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_snapshot_facts": [
+ {
+ "base_volume": {
+ "id": "68386fae-4f55-4fbf-aabb-953036a85872",
+ "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42"
+ },
+ "creation_date": "2018-08-14T22:34:35.299461+00:00",
+ "id": "b61b4b03-a2e9-4da5-b5ea-e462ac0662d2",
+ "modification_date": "2018-08-14T22:34:54.520560+00:00",
+ "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42 snapshot",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "size": 25000000000,
+ "state": "available",
+ "volume_type": "l_ssd"
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway,
+ ScalewayException,
+ scaleway_argument_spec,
+ SCALEWAY_LOCATION
+)
+
+
+class ScalewaySnapshotFacts(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewaySnapshotFacts, self).__init__(module)
+ self.name = 'snapshots'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ ansible_facts={'scaleway_snapshot_facts': ScalewaySnapshotFacts(module).get_resources()}
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_snapshot_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_snapshot_info.py
new file mode 100644
index 00000000..f31b74b0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_snapshot_info.py
@@ -0,0 +1,113 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_snapshot_info
+short_description: Gather information about the Scaleway snapshots available.
+description:
+ - Gather information about the Scaleway snapshot available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.scaleway
+
+options:
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example C(par1)).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway snapshots information
+ community.general.scaleway_snapshot_info:
+ region: par1
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.scaleway_snapshot_info }}"
+'''
+
+RETURN = r'''
+---
+scaleway_snapshot_info:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_snapshot_info": [
+ {
+ "base_volume": {
+ "id": "68386fae-4f55-4fbf-aabb-953036a85872",
+ "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42"
+ },
+ "creation_date": "2018-08-14T22:34:35.299461+00:00",
+ "id": "b61b4b03-a2e9-4da5-b5ea-e462ac0662d2",
+ "modification_date": "2018-08-14T22:34:54.520560+00:00",
+ "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42 snapshot",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "size": 25000000000,
+ "state": "available",
+ "volume_type": "l_ssd"
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway,
+ ScalewayException,
+ scaleway_argument_spec,
+ SCALEWAY_LOCATION
+)
+
+
+class ScalewaySnapshotInfo(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewaySnapshotInfo, self).__init__(module)
+ self.name = 'snapshots'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ scaleway_snapshot_info=ScalewaySnapshotInfo(module).get_resources()
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_sshkey.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_sshkey.py
new file mode 100644
index 00000000..08555b23
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_sshkey.py
@@ -0,0 +1,172 @@
+#!/usr/bin/python
+#
+# Scaleway SSH keys management module
+#
+# Copyright (C) 2018 Online SAS.
+# https://www.scaleway.com
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_sshkey
+short_description: Scaleway SSH keys management module
+author: Remy Leone (@sieben)
+description:
+ - This module manages SSH keys on Scaleway account
+ U(https://developer.scaleway.com)
+extends_documentation_fragment:
+- community.general.scaleway
+
+
+options:
+ state:
+ type: str
+ description:
+ - Indicate desired state of the SSH key.
+ default: present
+ choices:
+ - present
+ - absent
+ ssh_pub_key:
+ type: str
+ description:
+ - The public SSH key as a string to add.
+ required: true
+ api_url:
+ type: str
+ description:
+ - Scaleway API URL
+ default: 'https://account.scaleway.com'
+ aliases: ['base_url']
+'''
+
+EXAMPLES = '''
+- name: "Add SSH key"
+ community.general.scaleway_sshkey:
+ ssh_pub_key: "ssh-rsa AAAA..."
+ state: "present"
+
+- name: "Delete SSH key"
+ community.general.scaleway_sshkey:
+ ssh_pub_key: "ssh-rsa AAAA..."
+ state: "absent"
+
+- name: "Add SSH key with explicit token"
+ community.general.scaleway_sshkey:
+ ssh_pub_key: "ssh-rsa AAAA..."
+ state: "present"
+ oauth_token: "6ecd2c9b-6f4f-44d4-a187-61a92078d08c"
+'''
+
+RETURN = '''
+data:
+ description: This is only present when C(state=present)
+ returned: when C(state=present)
+ type: dict
+ sample: {
+ "ssh_public_keys": [
+ {"key": "ssh-rsa AAAA...."}
+ ]
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback
+from ansible_collections.community.general.plugins.module_utils.scaleway import scaleway_argument_spec, Scaleway
+
+
+def extract_present_sshkeys(raw_organization_dict):
+ ssh_key_list = raw_organization_dict["organizations"][0]["users"][0]["ssh_public_keys"]
+ ssh_key_lookup = [ssh_key["key"] for ssh_key in ssh_key_list]
+ return ssh_key_lookup
+
+
+def extract_user_id(raw_organization_dict):
+ return raw_organization_dict["organizations"][0]["users"][0]["id"]
+
+
+def sshkey_user_patch(ssh_lookup):
+ ssh_list = {"ssh_public_keys": [{"key": key}
+ for key in ssh_lookup]}
+ return ssh_list
+
+
+def core(module):
+ ssh_pub_key = module.params['ssh_pub_key']
+ state = module.params["state"]
+ account_api = Scaleway(module)
+ response = account_api.get('organizations')
+
+ status_code = response.status_code
+ organization_json = response.json
+
+ if not response.ok:
+ module.fail_json(msg='Error getting ssh key [{0}: {1}]'.format(
+ status_code, response.json['message']))
+
+ user_id = extract_user_id(organization_json)
+ present_sshkeys = []
+ try:
+ present_sshkeys = extract_present_sshkeys(organization_json)
+ except (KeyError, IndexError) as e:
+ module.fail_json(changed=False, data="Error while extracting present SSH keys from API")
+
+ if state in ('present',):
+ if ssh_pub_key in present_sshkeys:
+ module.exit_json(changed=False)
+
+ # If key not found create it!
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ present_sshkeys.append(ssh_pub_key)
+ payload = sshkey_user_patch(present_sshkeys)
+
+ response = account_api.patch('/users/%s' % user_id, data=payload)
+
+ if response.ok:
+ module.exit_json(changed=True, data=response.json)
+
+ module.fail_json(msg='Error creating ssh key [{0}: {1}]'.format(
+ response.status_code, response.json))
+
+ elif state in ('absent',):
+ if ssh_pub_key not in present_sshkeys:
+ module.exit_json(changed=False)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ present_sshkeys.remove(ssh_pub_key)
+ payload = sshkey_user_patch(present_sshkeys)
+
+ response = account_api.patch('/users/%s' % user_id, data=payload)
+
+ if response.ok:
+ module.exit_json(changed=True, data=response.json)
+
+ module.fail_json(msg='Error deleting ssh key [{0}: {1}]'.format(
+ response.status_code, response.json))
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ state=dict(default='present', choices=['absent', 'present']),
+ ssh_pub_key=dict(required=True),
+ api_url=dict(fallback=(env_fallback, ['SCW_API_URL']), default='https://account.scaleway.com', aliases=['base_url']),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_user_data.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_user_data.py
new file mode 100644
index 00000000..4a38e76d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_user_data.py
@@ -0,0 +1,171 @@
+#!/usr/bin/python
+#
+# Scaleway user data management module
+#
+# Copyright (C) 2018 Online SAS.
+# https://www.scaleway.com
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_user_data
+short_description: Scaleway user_data management module
+author: Remy Leone (@sieben)
+description:
+ - "This module manages user_data on compute instances on Scaleway."
+ - "It can be used to configure cloud-init for instance"
+extends_documentation_fragment:
+- community.general.scaleway
+
+
+options:
+
+ server_id:
+ type: str
+ description:
+ - Scaleway Compute instance ID of the server
+ required: true
+
+ user_data:
+ type: dict
+ description:
+ - User defined data. Typically used with `cloud-init`.
+ - Pass your cloud-init script here as a string
+ required: false
+
+ region:
+ type: str
+ description:
+ - Scaleway compute zone
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = '''
+- name: Update the cloud-init
+ community.general.scaleway_user_data:
+ server_id: '5a33b4ab-57dd-4eb6-8b0a-d95eb63492ce'
+ region: ams1
+ user_data:
+ cloud-init: 'final_message: "Hello World!"'
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway
+
+
+def patch_user_data(compute_api, server_id, key, value):
+ compute_api.module.debug("Starting patching user_data attributes")
+
+ path = "servers/%s/user_data/%s" % (server_id, key)
+ response = compute_api.patch(path=path, data=value, headers={"Content-type": "text/plain"})
+ if not response.ok:
+ msg = 'Error during user_data patching: %s %s' % (response.status_code, response.body)
+ compute_api.module.fail_json(msg=msg)
+
+ return response
+
+
+def delete_user_data(compute_api, server_id, key):
+ compute_api.module.debug("Starting deleting user_data attributes: %s" % key)
+
+ response = compute_api.delete(path="servers/%s/user_data/%s" % (server_id, key))
+
+ if not response.ok:
+ msg = 'Error during user_data deleting: (%s) %s' % response.status_code, response.body
+ compute_api.module.fail_json(msg=msg)
+
+ return response
+
+
+def get_user_data(compute_api, server_id, key):
+ compute_api.module.debug("Starting patching user_data attributes")
+
+ path = "servers/%s/user_data/%s" % (server_id, key)
+ response = compute_api.get(path=path)
+ if not response.ok:
+ msg = 'Error during user_data patching: %s %s' % (response.status_code, response.body)
+ compute_api.module.fail_json(msg=msg)
+
+ return response.json
+
+
+def core(module):
+ region = module.params["region"]
+ server_id = module.params["server_id"]
+ user_data = module.params["user_data"]
+ changed = False
+
+ module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+ compute_api = Scaleway(module=module)
+
+ user_data_list = compute_api.get(path="servers/%s/user_data" % server_id)
+ if not user_data_list.ok:
+ msg = 'Error during user_data fetching: %s %s' % user_data_list.status_code, user_data_list.body
+ compute_api.module.fail_json(msg=msg)
+
+ present_user_data_keys = user_data_list.json["user_data"]
+ present_user_data = dict(
+ (key, get_user_data(compute_api=compute_api, server_id=server_id, key=key))
+ for key in present_user_data_keys
+ )
+
+ if present_user_data == user_data:
+ module.exit_json(changed=changed, msg=user_data_list.json)
+
+ # First we remove keys that are not defined in the wished user_data
+ for key in present_user_data:
+ if key not in user_data:
+
+ changed = True
+ if compute_api.module.check_mode:
+ module.exit_json(changed=changed, msg={"status": "User-data of %s would be patched." % server_id})
+
+ delete_user_data(compute_api=compute_api, server_id=server_id, key=key)
+
+ # Then we patch keys that are different
+ for key, value in user_data.items():
+ if key not in present_user_data or user_data[key] != present_user_data[key]:
+
+ changed = True
+ if compute_api.module.check_mode:
+ module.exit_json(changed=changed, msg={"status": "User-data of %s would be patched." % server_id})
+
+ patch_user_data(compute_api=compute_api, server_id=server_id, key=key, value=value)
+
+ module.exit_json(changed=changed, msg=user_data)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ user_data=dict(type="dict"),
+ server_id=dict(required=True),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_volume.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_volume.py
new file mode 100644
index 00000000..e879d3c9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_volume.py
@@ -0,0 +1,176 @@
+#!/usr/bin/python
+#
+# Scaleway volumes management module
+#
+# Copyright (C) 2018 Henryk Konsek Consulting (hekonsek@gmail.com).
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_volume
+short_description: Scaleway volumes management module
+author: Henryk Konsek (@hekonsek)
+description:
+ - This module manages volumes on Scaleway account
+ U(https://developer.scaleway.com)
+extends_documentation_fragment:
+- community.general.scaleway
+
+
+options:
+ state:
+ type: str
+ description:
+ - Indicate desired state of the volume.
+ default: present
+ choices:
+ - present
+ - absent
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example par1).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+ name:
+ type: str
+ description:
+ - Name used to identify the volume.
+ required: true
+ organization:
+ type: str
+ description:
+ - ScaleWay organization ID to which volume belongs.
+ size:
+ type: int
+ description:
+ - Size of the volume in bytes.
+ volume_type:
+ type: str
+ description:
+ - Type of the volume (for example 'l_ssd').
+'''
+
+EXAMPLES = '''
+- name: Create 10GB volume
+ community.general.scaleway_volume:
+ name: my-volume
+ state: present
+ region: par1
+ organization: "{{ scw_org }}"
+ "size": 10000000000
+ volume_type: l_ssd
+ register: server_creation_check_task
+
+- name: Make sure volume deleted
+ community.general.scaleway_volume:
+ name: my-volume
+ state: absent
+ region: par1
+'''
+
+RETURN = '''
+data:
+ description: This is only present when C(state=present)
+ returned: when C(state=present)
+ type: dict
+ sample: {
+ "volume": {
+ "export_uri": null,
+ "id": "c675f420-cfeb-48ff-ba2a-9d2a4dbe3fcd",
+ "name": "volume-0-3",
+ "organization": "000a115d-2852-4b0a-9ce8-47f1134ba95a",
+ "server": null,
+ "size": 10000000000,
+ "volume_type": "l_ssd"
+ }
+}
+'''
+
+from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway
+from ansible.module_utils.basic import AnsibleModule
+
+
+def core(module):
+ state = module.params['state']
+ name = module.params['name']
+ organization = module.params['organization']
+ size = module.params['size']
+ volume_type = module.params['volume_type']
+
+ account_api = Scaleway(module)
+ response = account_api.get('volumes')
+ status_code = response.status_code
+ volumes_json = response.json
+
+ if not response.ok:
+ module.fail_json(msg='Error getting volume [{0}: {1}]'.format(
+ status_code, response.json['message']))
+
+ volumeByName = None
+ for volume in volumes_json['volumes']:
+ if volume['organization'] == organization and volume['name'] == name:
+ volumeByName = volume
+
+ if state in ('present',):
+ if volumeByName is not None:
+ module.exit_json(changed=False)
+
+ payload = {'name': name, 'organization': organization, 'size': size, 'volume_type': volume_type}
+
+ response = account_api.post('/volumes', payload)
+
+ if response.ok:
+ module.exit_json(changed=True, data=response.json)
+
+ module.fail_json(msg='Error creating volume [{0}: {1}]'.format(
+ response.status_code, response.json))
+
+ elif state in ('absent',):
+ if volumeByName is None:
+ module.exit_json(changed=False)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ response = account_api.delete('/volumes/' + volumeByName['id'])
+ if response.status_code == 204:
+ module.exit_json(changed=True, data=response.json)
+
+ module.fail_json(msg='Error deleting volume [{0}: {1}]'.format(
+ response.status_code, response.json))
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ state=dict(default='present', choices=['absent', 'present']),
+ name=dict(required=True),
+ size=dict(type='int'),
+ organization=dict(),
+ volume_type=dict(),
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_volume_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_volume_facts.py
new file mode 100644
index 00000000..e894f965
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_volume_facts.py
@@ -0,0 +1,108 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_volume_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favour of C(_info) module.
+ alternative: Use M(community.general.scaleway_volume_info) instead.
+short_description: Gather facts about the Scaleway volumes available.
+description:
+ - Gather facts about the Scaleway volumes available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.scaleway
+
+options:
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example par1).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway volumes facts
+ community.general.scaleway_volume_facts:
+ region: par1
+'''
+
+RETURN = r'''
+---
+scaleway_volume_facts:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_volume_facts": [
+ {
+ "creation_date": "2018-08-14T20:56:24.949660+00:00",
+ "export_uri": null,
+ "id": "b8d51a06-daeb-4fef-9539-a8aea016c1ba",
+ "modification_date": "2018-08-14T20:56:24.949660+00:00",
+ "name": "test-volume",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "server": null,
+ "size": 50000000000,
+ "state": "available",
+ "volume_type": "l_ssd"
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway, ScalewayException, scaleway_argument_spec,
+ SCALEWAY_LOCATION)
+
+
+class ScalewayVolumeFacts(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewayVolumeFacts, self).__init__(module)
+ self.name = 'volumes'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ ansible_facts={'scaleway_volume_facts': ScalewayVolumeFacts(module).get_resources()}
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_volume_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_volume_info.py
new file mode 100644
index 00000000..ff6093e8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/scaleway_volume_info.py
@@ -0,0 +1,108 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_volume_info
+short_description: Gather information about the Scaleway volumes available.
+description:
+ - Gather information about the Scaleway volumes available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@sieben)"
+extends_documentation_fragment:
+- community.general.scaleway
+
+options:
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example C(par1)).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway volumes information
+ community.general.scaleway_volume_info:
+ region: par1
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.scaleway_volume_info }}"
+'''
+
+RETURN = r'''
+---
+scaleway_volume_info:
+ description: Response from Scaleway API
+ returned: success
+ type: complex
+ sample:
+ "scaleway_volume_info": [
+ {
+ "creation_date": "2018-08-14T20:56:24.949660+00:00",
+ "export_uri": null,
+ "id": "b8d51a06-daeb-4fef-9539-a8aea016c1ba",
+ "modification_date": "2018-08-14T20:56:24.949660+00:00",
+ "name": "test-volume",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "server": null,
+ "size": 50000000000,
+ "state": "available",
+ "volume_type": "l_ssd"
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway, ScalewayException, scaleway_argument_spec,
+ SCALEWAY_LOCATION)
+
+
+class ScalewayVolumeInfo(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewayVolumeInfo, self).__init__(module)
+ self.name = 'volumes'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ scaleway_volume_info=ScalewayVolumeInfo(module).get_resources()
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/sefcontext.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sefcontext.py
new file mode 100644
index 00000000..457e2e23
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sefcontext.py
@@ -0,0 +1,292 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Dag Wieers (@dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: sefcontext
+short_description: Manages SELinux file context mapping definitions
+description:
+- Manages SELinux file context mapping definitions.
+- Similar to the C(semanage fcontext) command.
+options:
+ target:
+ description:
+ - Target path (expression).
+ type: str
+ required: yes
+ aliases: [ path ]
+ ftype:
+ description:
+ - The file type that should have SELinux contexts applied.
+ - "The following file type options are available:"
+ - C(a) for all files,
+ - C(b) for block devices,
+ - C(c) for character devices,
+ - C(d) for directories,
+ - C(f) for regular files,
+ - C(l) for symbolic links,
+ - C(p) for named pipes,
+ - C(s) for socket files.
+ type: str
+ choices: [ a, b, c, d, f, l, p, s ]
+ default: a
+ setype:
+ description:
+ - SELinux type for the specified target.
+ type: str
+ required: yes
+ seuser:
+ description:
+ - SELinux user for the specified target.
+ type: str
+ selevel:
+ description:
+ - SELinux range for the specified target.
+ type: str
+ aliases: [ serange ]
+ state:
+ description:
+ - Whether the SELinux file context must be C(absent) or C(present).
+ type: str
+ choices: [ absent, present ]
+ default: present
+ reload:
+ description:
+ - Reload SELinux policy after commit.
+ - Note that this does not apply SELinux file contexts to existing files.
+ type: bool
+ default: yes
+ ignore_selinux_state:
+ description:
+ - Useful for scenarios (chrooted environment) that you can't get the real SELinux state.
+ type: bool
+ default: no
+notes:
+- The changes are persistent across reboots.
+- The M(community.general.sefcontext) module does not modify existing files to the new
+ SELinux context(s), so it is advisable to first create the SELinux
+ file contexts before creating files, or run C(restorecon) manually
+ for the existing files that require the new SELinux file contexts.
+- Not applying SELinux fcontexts to existing files is a deliberate
+ decision as it would be unclear what reported changes would entail
+ to, and there's no guarantee that applying SELinux fcontext does
+ not pick up other unrelated prior changes.
+requirements:
+- libselinux-python
+- policycoreutils-python
+author:
+- Dag Wieers (@dagwieers)
+'''
+
+EXAMPLES = r'''
+- name: Allow apache to modify files in /srv/git_repos
+ community.general.sefcontext:
+ target: '/srv/git_repos(/.*)?'
+ setype: httpd_git_rw_content_t
+ state: present
+
+- name: Apply new SELinux file context to filesystem
+ ansible.builtin.command: restorecon -irv /srv/git_repos
+'''
+
+RETURN = r'''
+# Default return values
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+SELINUX_IMP_ERR = None
+try:
+ import selinux
+ HAVE_SELINUX = True
+except ImportError:
+ SELINUX_IMP_ERR = traceback.format_exc()
+ HAVE_SELINUX = False
+
+SEOBJECT_IMP_ERR = None
+try:
+ import seobject
+ HAVE_SEOBJECT = True
+except ImportError:
+ SEOBJECT_IMP_ERR = traceback.format_exc()
+ HAVE_SEOBJECT = False
+
+# Add missing entries (backward compatible)
+if HAVE_SEOBJECT:
+ seobject.file_types.update(
+ a=seobject.SEMANAGE_FCONTEXT_ALL,
+ b=seobject.SEMANAGE_FCONTEXT_BLOCK,
+ c=seobject.SEMANAGE_FCONTEXT_CHAR,
+ d=seobject.SEMANAGE_FCONTEXT_DIR,
+ f=seobject.SEMANAGE_FCONTEXT_REG,
+ l=seobject.SEMANAGE_FCONTEXT_LINK,
+ p=seobject.SEMANAGE_FCONTEXT_PIPE,
+ s=seobject.SEMANAGE_FCONTEXT_SOCK,
+ )
+
+# Make backward compatible
+option_to_file_type_str = dict(
+ a='all files',
+ b='block device',
+ c='character device',
+ d='directory',
+ f='regular file',
+ l='symbolic link',
+ p='named pipe',
+ s='socket',
+)
+
+
+def get_runtime_status(ignore_selinux_state=False):
+ return True if ignore_selinux_state is True else selinux.is_selinux_enabled()
+
+
+def semanage_fcontext_exists(sefcontext, target, ftype):
+ ''' Get the SELinux file context mapping definition from policy. Return None if it does not exist. '''
+
+ # Beware that records comprise of a string representation of the file_type
+ record = (target, option_to_file_type_str[ftype])
+ records = sefcontext.get_all()
+ try:
+ return records[record]
+ except KeyError:
+ return None
+
+
+def semanage_fcontext_modify(module, result, target, ftype, setype, do_reload, serange, seuser, sestore=''):
+ ''' Add or modify SELinux file context mapping definition to the policy. '''
+
+ changed = False
+ prepared_diff = ''
+
+ try:
+ sefcontext = seobject.fcontextRecords(sestore)
+ sefcontext.set_reload(do_reload)
+ exists = semanage_fcontext_exists(sefcontext, target, ftype)
+ if exists:
+ # Modify existing entry
+ orig_seuser, orig_serole, orig_setype, orig_serange = exists
+
+ if seuser is None:
+ seuser = orig_seuser
+ if serange is None:
+ serange = orig_serange
+
+ if setype != orig_setype or seuser != orig_seuser or serange != orig_serange:
+ if not module.check_mode:
+ sefcontext.modify(target, setype, ftype, serange, seuser)
+ changed = True
+
+ if module._diff:
+ prepared_diff += '# Change to semanage file context mappings\n'
+ prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, orig_seuser, orig_serole, orig_setype, orig_serange)
+ prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, orig_serole, setype, serange)
+ else:
+ # Add missing entry
+ if seuser is None:
+ seuser = 'system_u'
+ if serange is None:
+ serange = 's0'
+
+ if not module.check_mode:
+ sefcontext.add(target, setype, ftype, serange, seuser)
+ changed = True
+
+ if module._diff:
+ prepared_diff += '# Addition to semanage file context mappings\n'
+ prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, 'object_r', setype, serange)
+
+ except Exception as e:
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)))
+
+ if module._diff and prepared_diff:
+ result['diff'] = dict(prepared=prepared_diff)
+
+ module.exit_json(changed=changed, seuser=seuser, serange=serange, **result)
+
+
+def semanage_fcontext_delete(module, result, target, ftype, do_reload, sestore=''):
+ ''' Delete SELinux file context mapping definition from the policy. '''
+
+ changed = False
+ prepared_diff = ''
+
+ try:
+ sefcontext = seobject.fcontextRecords(sestore)
+ sefcontext.set_reload(do_reload)
+ exists = semanage_fcontext_exists(sefcontext, target, ftype)
+ if exists:
+ # Remove existing entry
+ orig_seuser, orig_serole, orig_setype, orig_serange = exists
+
+ if not module.check_mode:
+ sefcontext.delete(target, ftype)
+ changed = True
+
+ if module._diff:
+ prepared_diff += '# Deletion to semanage file context mappings\n'
+ prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, exists[0], exists[1], exists[2], exists[3])
+
+ except Exception as e:
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)))
+
+ if module._diff and prepared_diff:
+ result['diff'] = dict(prepared=prepared_diff)
+
+ module.exit_json(changed=changed, **result)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ ignore_selinux_state=dict(type='bool', default=False),
+ target=dict(type='str', required=True, aliases=['path']),
+ ftype=dict(type='str', default='a', choices=list(option_to_file_type_str.keys())),
+ setype=dict(type='str', required=True),
+ seuser=dict(type='str'),
+ selevel=dict(type='str', aliases=['serange']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ reload=dict(type='bool', default=True),
+ ),
+ supports_check_mode=True,
+ )
+ if not HAVE_SELINUX:
+ module.fail_json(msg=missing_required_lib("libselinux-python"), exception=SELINUX_IMP_ERR)
+
+ if not HAVE_SEOBJECT:
+ module.fail_json(msg=missing_required_lib("policycoreutils-python"), exception=SEOBJECT_IMP_ERR)
+
+ ignore_selinux_state = module.params['ignore_selinux_state']
+
+ if not get_runtime_status(ignore_selinux_state):
+ module.fail_json(msg="SELinux is disabled on this host.")
+
+ target = module.params['target']
+ ftype = module.params['ftype']
+ setype = module.params['setype']
+ seuser = module.params['seuser']
+ serange = module.params['selevel']
+ state = module.params['state']
+ do_reload = module.params['reload']
+
+ result = dict(target=target, ftype=ftype, setype=setype, state=state)
+
+ if state == 'present':
+ semanage_fcontext_modify(module, result, target, ftype, setype, do_reload, serange, seuser)
+ elif state == 'absent':
+ semanage_fcontext_delete(module, result, target, ftype, do_reload)
+ else:
+ module.fail_json(msg='Invalid value of argument "state": {0}'.format(state))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/selinux_permissive.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/selinux_permissive.py
new file mode 100644
index 00000000..0d1f9f59
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/selinux_permissive.py
@@ -0,0 +1,127 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Michael Scherer <misc@zarb.org>
+# inspired by code of github.com/dandiker/
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: selinux_permissive
+short_description: Change permissive domain in SELinux policy
+description:
+ - Add and remove a domain from the list of permissive domains.
+options:
+ domain:
+ description:
+ - The domain that will be added or removed from the list of permissive domains.
+ type: str
+ required: true
+ default: ''
+ aliases: [ name ]
+ permissive:
+ description:
+ - Indicate if the domain should or should not be set as permissive.
+ type: bool
+ required: true
+ no_reload:
+ description:
+ - Disable reloading of the SELinux policy after making change to a domain's permissive setting.
+ - The default is C(no), which causes policy to be reloaded when a domain changes state.
+ - Reloading the policy does not work on older versions of the C(policycoreutils-python) library, for example in EL 6."
+ type: bool
+ default: no
+ store:
+ description:
+ - Name of the SELinux policy store to use.
+ type: str
+notes:
+ - Requires a recent version of SELinux and C(policycoreutils-python) (EL 6 or newer).
+requirements: [ policycoreutils-python ]
+author:
+- Michael Scherer (@mscherer) <misc@zarb.org>
+'''
+
+EXAMPLES = r'''
+- name: Change the httpd_t domain to permissive
+ community.general.selinux_permissive:
+ name: httpd_t
+ permissive: true
+'''
+
+import traceback
+
+HAVE_SEOBJECT = False
+SEOBJECT_IMP_ERR = None
+try:
+ import seobject
+ HAVE_SEOBJECT = True
+except ImportError:
+ SEOBJECT_IMP_ERR = traceback.format_exc()
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ domain=dict(type='str', required=True, aliases=['name']),
+ store=dict(type='str', default=''),
+ permissive=dict(type='bool', required=True),
+ no_reload=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+
+ # global vars
+ changed = False
+ store = module.params['store']
+ permissive = module.params['permissive']
+ domain = module.params['domain']
+ no_reload = module.params['no_reload']
+
+ if not HAVE_SEOBJECT:
+ module.fail_json(changed=False, msg=missing_required_lib("policycoreutils-python"),
+ exception=SEOBJECT_IMP_ERR)
+
+ try:
+ permissive_domains = seobject.permissiveRecords(store)
+ except ValueError as e:
+ module.fail_json(domain=domain, msg=to_native(e), exception=traceback.format_exc())
+
+ # not supported on EL 6
+ if 'set_reload' in dir(permissive_domains):
+ permissive_domains.set_reload(not no_reload)
+
+ try:
+ all_domains = permissive_domains.get_all()
+ except ValueError as e:
+ module.fail_json(domain=domain, msg=to_native(e), exception=traceback.format_exc())
+
+ if permissive:
+ if domain not in all_domains:
+ if not module.check_mode:
+ try:
+ permissive_domains.add(domain)
+ except ValueError as e:
+ module.fail_json(domain=domain, msg=to_native(e), exception=traceback.format_exc())
+ changed = True
+ else:
+ if domain in all_domains:
+ if not module.check_mode:
+ try:
+ permissive_domains.delete(domain)
+ except ValueError as e:
+ module.fail_json(domain=domain, msg=to_native(e), exception=traceback.format_exc())
+ changed = True
+
+ module.exit_json(changed=changed, store=store,
+ permissive=permissive, domain=domain)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/selogin.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/selogin.py
new file mode 100644
index 00000000..7036dad9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/selogin.py
@@ -0,0 +1,258 @@
+#!/usr/bin/python
+
+# (c) 2017, Petr Lautrbach <plautrba@redhat.com>
+# Based on seport.py module (c) 2014, Dan Keder <dan.keder@gmail.com>
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: selogin
+short_description: Manages linux user to SELinux user mapping
+description:
+ - Manages linux user to SELinux user mapping
+options:
+ login:
+ type: str
+ description:
+ - a Linux user
+ required: true
+ seuser:
+ type: str
+ description:
+ - SELinux user name
+ selevel:
+ type: str
+ aliases: [ serange ]
+ description:
+ - MLS/MCS Security Range (MLS/MCS Systems only) SELinux Range for SELinux login mapping defaults to the SELinux user record range.
+ default: s0
+ state:
+ type: str
+ description:
+ - Desired mapping value.
+ default: present
+ choices: [ 'present', 'absent' ]
+ reload:
+ description:
+ - Reload SELinux policy after commit.
+ type: bool
+ default: yes
+ ignore_selinux_state:
+ description:
+ - Run independent of selinux runtime state
+ type: bool
+ default: false
+notes:
+ - The changes are persistent across reboots
+ - Not tested on any debian based system
+requirements: [ 'libselinux', 'policycoreutils' ]
+author:
+- Dan Keder (@dankeder)
+- Petr Lautrbach (@bachradsusi)
+- James Cassell (@jamescassell)
+'''
+
+EXAMPLES = '''
+- name: Modify the default user on the system to the guest_u user
+ community.general.selogin:
+ login: __default__
+ seuser: guest_u
+ state: present
+
+- name: Assign gijoe user on an MLS machine a range and to the staff_u user
+ community.general.selogin:
+ login: gijoe
+ seuser: staff_u
+ serange: SystemLow-Secret
+ state: present
+
+- name: Assign all users in the engineering group to the staff_u user
+ community.general.selogin:
+ login: '%engineering'
+ seuser: staff_u
+ state: present
+'''
+
+RETURN = r'''
+# Default return values
+'''
+
+
+import traceback
+
+SELINUX_IMP_ERR = None
+try:
+ import selinux
+ HAVE_SELINUX = True
+except ImportError:
+ SELINUX_IMP_ERR = traceback.format_exc()
+ HAVE_SELINUX = False
+
+SEOBJECT_IMP_ERR = None
+try:
+ import seobject
+ HAVE_SEOBJECT = True
+except ImportError:
+ SEOBJECT_IMP_ERR = traceback.format_exc()
+ HAVE_SEOBJECT = False
+
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def semanage_login_add(module, login, seuser, do_reload, serange='s0', sestore=''):
+ """ Add linux user to SELinux user mapping
+
+ :type module: AnsibleModule
+ :param module: Ansible module
+
+ :type login: str
+ :param login: a Linux User or a Linux group if it begins with %
+
+ :type seuser: str
+ :param proto: An SELinux user ('__default__', 'unconfined_u', 'staff_u', ...), see 'semanage login -l'
+
+ :type serange: str
+ :param serange: SELinux MLS/MCS range (defaults to 's0')
+
+ :type do_reload: bool
+ :param do_reload: Whether to reload SELinux policy after commit
+
+ :type sestore: str
+ :param sestore: SELinux store
+
+ :rtype: bool
+ :return: True if the policy was changed, otherwise False
+ """
+ try:
+ selogin = seobject.loginRecords(sestore)
+ selogin.set_reload(do_reload)
+ change = False
+ all_logins = selogin.get_all()
+ # module.fail_json(msg="%s: %s %s" % (all_logins, login, sestore))
+ # for local_login in all_logins:
+ if login not in all_logins.keys():
+ change = True
+ if not module.check_mode:
+ selogin.add(login, seuser, serange)
+ else:
+ if all_logins[login][0] != seuser or all_logins[login][1] != serange:
+ change = True
+ if not module.check_mode:
+ selogin.modify(login, seuser, serange)
+
+ except (ValueError, KeyError, OSError, RuntimeError) as e:
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc())
+
+ return change
+
+
+def semanage_login_del(module, login, seuser, do_reload, sestore=''):
+ """ Delete linux user to SELinux user mapping
+
+ :type module: AnsibleModule
+ :param module: Ansible module
+
+ :type login: str
+ :param login: a Linux User or a Linux group if it begins with %
+
+ :type seuser: str
+ :param proto: An SELinux user ('__default__', 'unconfined_u', 'staff_u', ...), see 'semanage login -l'
+
+ :type do_reload: bool
+ :param do_reload: Whether to reload SELinux policy after commit
+
+ :type sestore: str
+ :param sestore: SELinux store
+
+ :rtype: bool
+ :return: True if the policy was changed, otherwise False
+ """
+ try:
+ selogin = seobject.loginRecords(sestore)
+ selogin.set_reload(do_reload)
+ change = False
+ all_logins = selogin.get_all()
+ # module.fail_json(msg="%s: %s %s" % (all_logins, login, sestore))
+ if login in all_logins.keys():
+ change = True
+ if not module.check_mode:
+ selogin.delete(login)
+
+ except (ValueError, KeyError, OSError, RuntimeError) as e:
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc())
+
+ return change
+
+
+def get_runtime_status(ignore_selinux_state=False):
+ return True if ignore_selinux_state is True else selinux.is_selinux_enabled()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ ignore_selinux_state=dict(type='bool', default=False),
+ login=dict(type='str', required=True),
+ seuser=dict(type='str'),
+ selevel=dict(type='str', aliases=['serange'], default='s0'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ reload=dict(type='bool', default=True),
+ ),
+ required_if=[
+ ["state", "present", ["seuser"]]
+ ],
+ supports_check_mode=True
+ )
+ if not HAVE_SELINUX:
+ module.fail_json(msg=missing_required_lib("libselinux"), exception=SELINUX_IMP_ERR)
+
+ if not HAVE_SEOBJECT:
+ module.fail_json(msg=missing_required_lib("seobject from policycoreutils"), exception=SEOBJECT_IMP_ERR)
+
+ ignore_selinux_state = module.params['ignore_selinux_state']
+
+ if not get_runtime_status(ignore_selinux_state):
+ module.fail_json(msg="SELinux is disabled on this host.")
+
+ login = module.params['login']
+ seuser = module.params['seuser']
+ serange = module.params['selevel']
+ state = module.params['state']
+ do_reload = module.params['reload']
+
+ result = {
+ 'login': login,
+ 'seuser': seuser,
+ 'serange': serange,
+ 'state': state,
+ }
+
+ if state == 'present':
+ result['changed'] = semanage_login_add(module, login, seuser, do_reload, serange)
+ elif state == 'absent':
+ result['changed'] = semanage_login_del(module, login, seuser, do_reload)
+ else:
+ module.fail_json(msg='Invalid value of argument "state": {0}'.format(state))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/sendgrid.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sendgrid.py
new file mode 100644
index 00000000..67132771
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sendgrid.py
@@ -0,0 +1,268 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Matt Makai <matthew.makai@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: sendgrid
+short_description: Sends an email with the SendGrid API
+description:
+ - "Sends an email with a SendGrid account through their API, not through
+ the SMTP service."
+notes:
+ - "This module is non-idempotent because it sends an email through the
+ external API. It is idempotent only in the case that the module fails."
+ - "Like the other notification modules, this one requires an external
+ dependency to work. In this case, you'll need an active SendGrid
+ account."
+ - "In order to use api_key, cc, bcc, attachments, from_name, html_body, headers
+ you must pip install sendgrid"
+ - "since 2.2 I(username) and I(password) are not required if you supply an I(api_key)"
+requirements:
+ - sendgrid Python library 1.6.22 or lower (Sendgrid API V2 supported)
+options:
+ username:
+ type: str
+ description:
+ - Username for logging into the SendGrid account.
+ - Since 2.2 it is only required if I(api_key) is not supplied.
+ password:
+ type: str
+ description:
+ - Password that corresponds to the username.
+ - Since 2.2 it is only required if I(api_key) is not supplied.
+ from_address:
+ type: str
+ description:
+ - The address in the "from" field for the email.
+ required: true
+ to_addresses:
+ type: list
+ description:
+ - A list with one or more recipient email addresses.
+ required: true
+ subject:
+ type: str
+ description:
+ - The desired subject for the email.
+ required: true
+ api_key:
+ type: str
+ description:
+ - Sendgrid API key to use instead of username/password.
+ cc:
+ type: list
+ description:
+ - A list of email addresses to cc.
+ bcc:
+ type: list
+ description:
+ - A list of email addresses to bcc.
+ attachments:
+ type: list
+ description:
+ - A list of relative or explicit paths of files you want to attach (7MB limit as per SendGrid docs).
+ from_name:
+ type: str
+ description:
+ - The name you want to appear in the from field, i.e 'John Doe'.
+ html_body:
+ description:
+ - Whether the body is html content that should be rendered.
+ type: bool
+ default: 'no'
+ headers:
+ type: dict
+ description:
+ - A dict to pass on as headers.
+ body:
+ type: str
+ description:
+ - The e-mail body content.
+ required: yes
+author: "Matt Makai (@makaimc)"
+'''
+
+EXAMPLES = r'''
+- name: Send an email to a single recipient that the deployment was successful
+ community.general.sendgrid:
+ username: "{{ sendgrid_username }}"
+ password: "{{ sendgrid_password }}"
+ from_address: "ansible@mycompany.com"
+ to_addresses:
+ - "ops@mycompany.com"
+ subject: "Deployment success."
+ body: "The most recent Ansible deployment was successful."
+ delegate_to: localhost
+
+- name: Send an email to more than one recipient that the build failed
+ community.general.sendgrid:
+ username: "{{ sendgrid_username }}"
+ password: "{{ sendgrid_password }}"
+ from_address: "build@mycompany.com"
+ to_addresses:
+ - "ops@mycompany.com"
+ - "devteam@mycompany.com"
+ subject: "Build failure!."
+ body: "Unable to pull source repository from Git server."
+ delegate_to: localhost
+'''
+
+# =======================================
+# sendgrid module support methods
+#
+import os
+import traceback
+
+from distutils.version import LooseVersion
+
+SENDGRID_IMP_ERR = None
+try:
+ import sendgrid
+ HAS_SENDGRID = True
+except ImportError:
+ SENDGRID_IMP_ERR = traceback.format_exc()
+ HAS_SENDGRID = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils._text import to_bytes
+from ansible.module_utils.urls import fetch_url
+
+
+def post_sendgrid_api(module, username, password, from_address, to_addresses,
+ subject, body, api_key=None, cc=None, bcc=None, attachments=None,
+ html_body=False, from_name=None, headers=None):
+
+ if not HAS_SENDGRID:
+ SENDGRID_URI = "https://api.sendgrid.com/api/mail.send.json"
+ AGENT = "Ansible"
+ data = {'api_user': username, 'api_key': password,
+ 'from': from_address, 'subject': subject, 'text': body}
+ encoded_data = urlencode(data)
+ to_addresses_api = ''
+ for recipient in to_addresses:
+ recipient = to_bytes(recipient, errors='surrogate_or_strict')
+ to_addresses_api += '&to[]=%s' % recipient
+ encoded_data += to_addresses_api
+
+ headers = {'User-Agent': AGENT,
+ 'Content-type': 'application/x-www-form-urlencoded',
+ 'Accept': 'application/json'}
+ return fetch_url(module, SENDGRID_URI, data=encoded_data, headers=headers, method='POST')
+ else:
+ # Remove this check when adding Sendgrid API v3 support
+ if LooseVersion(sendgrid.version.__version__) > LooseVersion("1.6.22"):
+ module.fail_json(msg="Please install sendgrid==1.6.22 or lower since module uses Sendgrid V2 APIs.")
+
+ if api_key:
+ sg = sendgrid.SendGridClient(api_key)
+ else:
+ sg = sendgrid.SendGridClient(username, password)
+
+ message = sendgrid.Mail()
+ message.set_subject(subject)
+
+ for recip in to_addresses:
+ message.add_to(recip)
+
+ if cc:
+ for recip in cc:
+ message.add_cc(recip)
+ if bcc:
+ for recip in bcc:
+ message.add_bcc(recip)
+
+ if headers:
+ message.set_headers(headers)
+
+ if attachments:
+ for f in attachments:
+ name = os.path.basename(f)
+ message.add_attachment(name, f)
+
+ if from_name:
+ message.set_from('%s <%s.' % (from_name, from_address))
+ else:
+ message.set_from(from_address)
+
+ if html_body:
+ message.set_html(body)
+ else:
+ message.set_text(body)
+
+ return sg.send(message)
+# =======================================
+# Main
+#
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ username=dict(required=False),
+ password=dict(required=False, no_log=True),
+ api_key=dict(required=False, no_log=True),
+ bcc=dict(required=False, type='list'),
+ cc=dict(required=False, type='list'),
+ headers=dict(required=False, type='dict'),
+ from_address=dict(required=True),
+ from_name=dict(required=False),
+ to_addresses=dict(required=True, type='list'),
+ subject=dict(required=True),
+ body=dict(required=True),
+ html_body=dict(required=False, default=False, type='bool'),
+ attachments=dict(required=False, type='list')
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['api_key', 'password'],
+ ['api_key', 'username']
+ ],
+ required_together=[['username', 'password']],
+ )
+
+ username = module.params['username']
+ password = module.params['password']
+ api_key = module.params['api_key']
+ bcc = module.params['bcc']
+ cc = module.params['cc']
+ headers = module.params['headers']
+ from_name = module.params['from_name']
+ from_address = module.params['from_address']
+ to_addresses = module.params['to_addresses']
+ subject = module.params['subject']
+ body = module.params['body']
+ html_body = module.params['html_body']
+ attachments = module.params['attachments']
+
+ sendgrid_lib_args = [api_key, bcc, cc, headers, from_name, html_body, attachments]
+
+ if any(lib_arg is not None for lib_arg in sendgrid_lib_args) and not HAS_SENDGRID:
+ reason = 'when using any of the following arguments: ' \
+ 'api_key, bcc, cc, headers, from_name, html_body, attachments'
+ module.fail_json(msg=missing_required_lib('sendgrid', reason=reason),
+ exception=SENDGRID_IMP_ERR)
+
+ response, info = post_sendgrid_api(module, username, password,
+ from_address, to_addresses, subject, body, attachments=attachments,
+ bcc=bcc, cc=cc, headers=headers, html_body=html_body, api_key=api_key)
+
+ if not HAS_SENDGRID:
+ if info['status'] != 200:
+ module.fail_json(msg="unable to send email through SendGrid API: %s" % info['msg'])
+ else:
+ if response != 200:
+ module.fail_json(msg="unable to send email through SendGrid API: %s" % info['message'])
+
+ module.exit_json(msg=subject, changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/sensu_check.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sensu_check.py
new file mode 100644
index 00000000..9ebe2765
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sensu_check.py
@@ -0,0 +1,370 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Anders Ingemann <aim@secoya.dk>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: sensu_check
+short_description: Manage Sensu checks
+description:
+ - Manage the checks that should be run on a machine by I(Sensu).
+ - Most options do not have a default and will not be added to the check definition unless specified.
+ - All defaults except I(path), I(state), I(backup) and I(metric) are not managed by this module,
+ - they are simply specified for your convenience.
+options:
+ name:
+ type: str
+ description:
+ - The name of the check
+ - This is the key that is used to determine whether a check exists
+ required: true
+ state:
+ type: str
+ description:
+ - Whether the check should be present or not
+ choices: [ 'present', 'absent' ]
+ default: present
+ path:
+ type: str
+ description:
+ - Path to the json file of the check to be added/removed.
+ - Will be created if it does not exist (unless I(state=absent)).
+ - The parent folders need to exist when I(state=present), otherwise an error will be thrown
+ default: /etc/sensu/conf.d/checks.json
+ backup:
+ description:
+ - Create a backup file (if yes), including the timestamp information so
+ - you can get the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: 'no'
+ command:
+ type: str
+ description:
+ - Path to the sensu check to run (not required when I(state=absent))
+ handlers:
+ type: list
+ description:
+ - List of handlers to notify when the check fails
+ default: []
+ subscribers:
+ type: list
+ description:
+ - List of subscribers/channels this check should run for
+ - See sensu_subscribers to subscribe a machine to a channel
+ default: []
+ interval:
+ type: int
+ description:
+ - Check interval in seconds
+ timeout:
+ type: int
+ description:
+ - Timeout for the check
+ - If not specified, it defaults to 10.
+ ttl:
+ type: int
+ description:
+ - Time to live in seconds until the check is considered stale
+ handle:
+ description:
+ - Whether the check should be handled or not
+ - Default is C(false).
+ type: bool
+ subdue_begin:
+ type: str
+ description:
+ - When to disable handling of check failures
+ subdue_end:
+ type: str
+ description:
+ - When to enable handling of check failures
+ dependencies:
+ type: list
+ description:
+ - Other checks this check depends on, if dependencies fail,
+ - handling of this check will be disabled
+ default: []
+ metric:
+ description:
+ - Whether the check is a metric
+ type: bool
+ default: 'no'
+ standalone:
+ description:
+ - Whether the check should be scheduled by the sensu client or server
+ - This option obviates the need for specifying the I(subscribers) option
+ - Default is C(false).
+ type: bool
+ publish:
+ description:
+ - Whether the check should be scheduled at all.
+ - You can still issue it via the sensu api
+ - Default is C(false).
+ type: bool
+ occurrences:
+ type: int
+ description:
+ - Number of event occurrences before the handler should take action
+ - If not specified, defaults to 1.
+ refresh:
+ type: int
+ description:
+ - Number of seconds handlers should wait before taking second action
+ aggregate:
+ description:
+ - Classifies the check as an aggregate check,
+ - making it available via the aggregate API
+ - Default is C(false).
+ type: bool
+ low_flap_threshold:
+ type: int
+ description:
+ - The low threshold for flap detection
+ high_flap_threshold:
+ type: int
+ description:
+ - The high threshold for flap detection
+ custom:
+ type: dict
+ description:
+ - A hash/dictionary of custom parameters for mixing to the configuration.
+ - You can't rewrite others module parameters using this
+ default: {}
+ source:
+ type: str
+ description:
+ - The check source, used to create a JIT Sensu client for an external resource (e.g. a network switch).
+author: "Anders Ingemann (@andsens)"
+'''
+
+EXAMPLES = '''
+# Fetch metrics about the CPU load every 60 seconds,
+# the sensu server has a handler called 'relay' which forwards stats to graphite
+- name: Get cpu metrics
+ community.general.sensu_check:
+ name: cpu_load
+ command: /etc/sensu/plugins/system/cpu-mpstat-metrics.rb
+ metric: yes
+ handlers: relay
+ subscribers: common
+ interval: 60
+
+# Check whether nginx is running
+- name: Check nginx process
+ community.general.sensu_check:
+ name: nginx_running
+ command: /etc/sensu/plugins/processes/check-procs.rb -f /var/run/nginx.pid
+ handlers: default
+ subscribers: nginx
+ interval: 60
+
+# Stop monitoring the disk capacity.
+# Note that the check will still show up in the sensu dashboard,
+# to remove it completely you need to issue a DELETE request to the sensu api.
+- name: Check disk
+ community.general.sensu_check:
+ name: check_disk_capacity
+ state: absent
+'''
+
+import json
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def sensu_check(module, path, name, state='present', backup=False):
+ changed = False
+ reasons = []
+
+ stream = None
+ try:
+ try:
+ stream = open(path, 'r')
+ config = json.load(stream)
+ except IOError as e:
+ if e.errno == 2: # File not found, non-fatal
+ if state == 'absent':
+ reasons.append('file did not exist and state is `absent\'')
+ return changed, reasons
+ config = {}
+ else:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except ValueError:
+ msg = '{path} contains invalid JSON'.format(path=path)
+ module.fail_json(msg=msg)
+ finally:
+ if stream:
+ stream.close()
+
+ if 'checks' not in config:
+ if state == 'absent':
+ reasons.append('`checks\' section did not exist and state is `absent\'')
+ return changed, reasons
+ config['checks'] = {}
+ changed = True
+ reasons.append('`checks\' section did not exist')
+
+ if state == 'absent':
+ if name in config['checks']:
+ del config['checks'][name]
+ changed = True
+ reasons.append('check was present and state is `absent\'')
+
+ if state == 'present':
+ if name not in config['checks']:
+ check = {}
+ config['checks'][name] = check
+ changed = True
+ reasons.append('check was absent and state is `present\'')
+ else:
+ check = config['checks'][name]
+ simple_opts = ['command',
+ 'handlers',
+ 'subscribers',
+ 'interval',
+ 'timeout',
+ 'ttl',
+ 'handle',
+ 'dependencies',
+ 'standalone',
+ 'publish',
+ 'occurrences',
+ 'refresh',
+ 'aggregate',
+ 'low_flap_threshold',
+ 'high_flap_threshold',
+ 'source',
+ ]
+ for opt in simple_opts:
+ if module.params[opt] is not None:
+ if opt not in check or check[opt] != module.params[opt]:
+ check[opt] = module.params[opt]
+ changed = True
+ reasons.append('`{opt}\' did not exist or was different'.format(opt=opt))
+ else:
+ if opt in check:
+ del check[opt]
+ changed = True
+ reasons.append('`{opt}\' was removed'.format(opt=opt))
+
+ if module.params['custom']:
+ # Convert to json
+ custom_params = module.params['custom']
+ overwrited_fields = set(custom_params.keys()) & set(simple_opts + ['type', 'subdue', 'subdue_begin', 'subdue_end'])
+ if overwrited_fields:
+ msg = 'You can\'t overwriting standard module parameters via "custom". You are trying overwrite: {opt}'.format(opt=list(overwrited_fields))
+ module.fail_json(msg=msg)
+
+ for k, v in custom_params.items():
+ if k in config['checks'][name]:
+ if not config['checks'][name][k] == v:
+ changed = True
+ reasons.append('`custom param {opt}\' was changed'.format(opt=k))
+ else:
+ changed = True
+ reasons.append('`custom param {opt}\' was added'.format(opt=k))
+ check[k] = v
+ simple_opts += custom_params.keys()
+
+ # Remove obsolete custom params
+ for opt in set(config['checks'][name].keys()) - set(simple_opts + ['type', 'subdue', 'subdue_begin', 'subdue_end']):
+ changed = True
+ reasons.append('`custom param {opt}\' was deleted'.format(opt=opt))
+ del check[opt]
+
+ if module.params['metric']:
+ if 'type' not in check or check['type'] != 'metric':
+ check['type'] = 'metric'
+ changed = True
+ reasons.append('`type\' was not defined or not `metric\'')
+ if not module.params['metric'] and 'type' in check:
+ del check['type']
+ changed = True
+ reasons.append('`type\' was defined')
+
+ if module.params['subdue_begin'] is not None and module.params['subdue_end'] is not None:
+ subdue = {'begin': module.params['subdue_begin'],
+ 'end': module.params['subdue_end'],
+ }
+ if 'subdue' not in check or check['subdue'] != subdue:
+ check['subdue'] = subdue
+ changed = True
+ reasons.append('`subdue\' did not exist or was different')
+ else:
+ if 'subdue' in check:
+ del check['subdue']
+ changed = True
+ reasons.append('`subdue\' was removed')
+
+ if changed and not module.check_mode:
+ if backup:
+ module.backup_local(path)
+ try:
+ try:
+ stream = open(path, 'w')
+ stream.write(json.dumps(config, indent=2) + '\n')
+ except IOError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ finally:
+ if stream:
+ stream.close()
+
+ return changed, reasons
+
+
+def main():
+
+ arg_spec = {'name': {'type': 'str', 'required': True},
+ 'path': {'type': 'str', 'default': '/etc/sensu/conf.d/checks.json'},
+ 'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']},
+ 'backup': {'type': 'bool', 'default': 'no'},
+ 'command': {'type': 'str'},
+ 'handlers': {'type': 'list'},
+ 'subscribers': {'type': 'list'},
+ 'interval': {'type': 'int'},
+ 'timeout': {'type': 'int'},
+ 'ttl': {'type': 'int'},
+ 'handle': {'type': 'bool'},
+ 'subdue_begin': {'type': 'str'},
+ 'subdue_end': {'type': 'str'},
+ 'dependencies': {'type': 'list'},
+ 'metric': {'type': 'bool', 'default': 'no'},
+ 'standalone': {'type': 'bool'},
+ 'publish': {'type': 'bool'},
+ 'occurrences': {'type': 'int'},
+ 'refresh': {'type': 'int'},
+ 'aggregate': {'type': 'bool'},
+ 'low_flap_threshold': {'type': 'int'},
+ 'high_flap_threshold': {'type': 'int'},
+ 'custom': {'type': 'dict'},
+ 'source': {'type': 'str'},
+ }
+
+ required_together = [['subdue_begin', 'subdue_end']]
+
+ module = AnsibleModule(argument_spec=arg_spec,
+ required_together=required_together,
+ supports_check_mode=True)
+ if module.params['state'] != 'absent' and module.params['command'] is None:
+ module.fail_json(msg="missing required arguments: %s" % ",".join(['command']))
+
+ path = module.params['path']
+ name = module.params['name']
+ state = module.params['state']
+ backup = module.params['backup']
+
+ changed, reasons = sensu_check(module, path, name, state, backup)
+
+ module.exit_json(path=path, changed=changed, msg='OK', name=name, reasons=reasons)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/sensu_client.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sensu_client.py
new file mode 100644
index 00000000..35444f60
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sensu_client.py
@@ -0,0 +1,258 @@
+#!/usr/bin/python
+
+# (c) 2017, Red Hat Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: sensu_client
+author: "David Moreau Simard (@dmsimard)"
+short_description: Manages Sensu client configuration
+description:
+ - Manages Sensu client configuration.
+ - 'For more information, refer to the Sensu documentation: U(https://sensuapp.org/docs/latest/reference/clients.html)'
+options:
+ state:
+ type: str
+ description:
+ - Whether the client should be present or not
+ choices: [ 'present', 'absent' ]
+ default: present
+ name:
+ type: str
+ description:
+ - A unique name for the client. The name cannot contain special characters or spaces.
+ - If not specified, it defaults to the system hostname as determined by Ruby Socket.gethostname (provided by Sensu).
+ address:
+ type: str
+ description:
+ - An address to help identify and reach the client. This is only informational, usually an IP address or hostname.
+ - If not specified it defaults to non-loopback IPv4 address as determined by Ruby Socket.ip_address_list (provided by Sensu).
+ subscriptions:
+ type: list
+ description:
+ - An array of client subscriptions, a list of roles and/or responsibilities assigned to the system (e.g. webserver).
+ - These subscriptions determine which monitoring checks are executed by the client, as check requests are sent to subscriptions.
+ - The subscriptions array items must be strings.
+ safe_mode:
+ description:
+ - If safe mode is enabled for the client. Safe mode requires local check definitions in order to accept a check request and execute the check.
+ type: bool
+ default: 'no'
+ redact:
+ type: list
+ description:
+ - Client definition attributes to redact (values) when logging and sending client keepalives.
+ socket:
+ type: dict
+ description:
+ - The socket definition scope, used to configure the Sensu client socket.
+ keepalives:
+ description:
+ - If Sensu should monitor keepalives for this client.
+ type: bool
+ default: 'yes'
+ keepalive:
+ type: dict
+ description:
+ - The keepalive definition scope, used to configure Sensu client keepalives behavior (e.g. keepalive thresholds, etc).
+ registration:
+ type: dict
+ description:
+ - The registration definition scope, used to configure Sensu registration event handlers.
+ deregister:
+ description:
+ - If a deregistration event should be created upon Sensu client process stop.
+ - Default is C(false).
+ type: bool
+ deregistration:
+ type: dict
+ description:
+ - The deregistration definition scope, used to configure automated Sensu client de-registration.
+ ec2:
+ type: dict
+ description:
+ - The ec2 definition scope, used to configure the Sensu Enterprise AWS EC2 integration (Sensu Enterprise users only).
+ chef:
+ type: dict
+ description:
+ - The chef definition scope, used to configure the Sensu Enterprise Chef integration (Sensu Enterprise users only).
+ puppet:
+ type: dict
+ description:
+ - The puppet definition scope, used to configure the Sensu Enterprise Puppet integration (Sensu Enterprise users only).
+ servicenow:
+ type: dict
+ description:
+ - The servicenow definition scope, used to configure the Sensu Enterprise ServiceNow integration (Sensu Enterprise users only).
+notes:
+ - Check mode is supported
+'''
+
+EXAMPLES = '''
+# Minimum possible configuration
+- name: Configure Sensu client
+ community.general.sensu_client:
+ subscriptions:
+ - default
+
+# With customization
+- name: Configure Sensu client
+ community.general.sensu_client:
+ name: "{{ ansible_fqdn }}"
+ address: "{{ ansible_default_ipv4['address'] }}"
+ subscriptions:
+ - default
+ - webserver
+ redact:
+ - password
+ socket:
+ bind: 127.0.0.1
+ port: 3030
+ keepalive:
+ thresholds:
+ warning: 180
+ critical: 300
+ handlers:
+ - email
+ custom:
+ - broadcast: irc
+ occurrences: 3
+ register: client
+ notify:
+ - Restart sensu-client
+
+- name: Secure Sensu client configuration file
+ ansible.builtin.file:
+ path: "{{ client['file'] }}"
+ owner: "sensu"
+ group: "sensu"
+ mode: "0600"
+
+- name: Delete the Sensu client configuration
+ community.general.sensu_client:
+ state: "absent"
+'''
+
+RETURN = '''
+config:
+ description: Effective client configuration, when state is present
+ returned: success
+ type: dict
+ sample: {'name': 'client', 'subscriptions': ['default']}
+file:
+ description: Path to the client configuration file
+ returned: success
+ type: str
+ sample: "/etc/sensu/conf.d/client.json"
+'''
+
+import json
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ supports_check_mode=True,
+ argument_spec=dict(
+ state=dict(type='str', required=False, choices=['present', 'absent'], default='present'),
+ name=dict(type='str', required=False),
+ address=dict(type='str', required=False),
+ subscriptions=dict(type='list', required=False),
+ safe_mode=dict(type='bool', required=False, default=False),
+ redact=dict(type='list', required=False),
+ socket=dict(type='dict', required=False),
+ keepalives=dict(type='bool', required=False, default=True),
+ keepalive=dict(type='dict', required=False),
+ registration=dict(type='dict', required=False),
+ deregister=dict(type='bool', required=False),
+ deregistration=dict(type='dict', required=False),
+ ec2=dict(type='dict', required=False),
+ chef=dict(type='dict', required=False),
+ puppet=dict(type='dict', required=False),
+ servicenow=dict(type='dict', required=False)
+ ),
+ required_if=[
+ ['state', 'present', ['subscriptions']]
+ ]
+ )
+
+ state = module.params['state']
+ path = "/etc/sensu/conf.d/client.json"
+
+ if state == 'absent':
+ if os.path.exists(path):
+ if module.check_mode:
+ msg = '{path} would have been deleted'.format(path=path)
+ module.exit_json(msg=msg, changed=True)
+ else:
+ try:
+ os.remove(path)
+ msg = '{path} deleted successfully'.format(path=path)
+ module.exit_json(msg=msg, changed=True)
+ except OSError as e:
+ msg = 'Exception when trying to delete {path}: {exception}'
+ module.fail_json(
+ msg=msg.format(path=path, exception=str(e)))
+ else:
+ # Idempotency: it's okay if the file doesn't exist
+ msg = '{path} already does not exist'.format(path=path)
+ module.exit_json(msg=msg)
+
+ # Build client configuration from module arguments
+ config = {'client': {}}
+ args = ['name', 'address', 'subscriptions', 'safe_mode', 'redact',
+ 'socket', 'keepalives', 'keepalive', 'registration', 'deregister',
+ 'deregistration', 'ec2', 'chef', 'puppet', 'servicenow']
+
+ for arg in args:
+ if arg in module.params and module.params[arg] is not None:
+ config['client'][arg] = module.params[arg]
+
+ # Load the current config, if there is one, so we can compare
+ current_config = None
+ try:
+ current_config = json.load(open(path, 'r'))
+ except (IOError, ValueError):
+ # File either doesn't exist or it's invalid JSON
+ pass
+
+ if current_config is not None and current_config == config:
+ # Config is the same, let's not change anything
+ module.exit_json(msg='Client configuration is already up to date',
+ config=config['client'],
+ file=path)
+
+ # Validate that directory exists before trying to write to it
+ if not module.check_mode and not os.path.exists(os.path.dirname(path)):
+ try:
+ os.makedirs(os.path.dirname(path))
+ except OSError as e:
+ module.fail_json(msg='Unable to create {0}: {1}'.format(os.path.dirname(path),
+ str(e)))
+
+ if module.check_mode:
+ module.exit_json(msg='Client configuration would have been updated',
+ changed=True,
+ config=config['client'],
+ file=path)
+
+ try:
+ with open(path, 'w') as client:
+ client.write(json.dumps(config, indent=4))
+ module.exit_json(msg='Client configuration updated',
+ changed=True,
+ config=config['client'],
+ file=path)
+ except (OSError, IOError) as e:
+ module.fail_json(msg='Unable to write file {0}: {1}'.format(path,
+ str(e)))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/sensu_handler.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sensu_handler.py
new file mode 100644
index 00000000..53152edc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sensu_handler.py
@@ -0,0 +1,270 @@
+#!/usr/bin/python
+
+# (c) 2017, Red Hat Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: sensu_handler
+author: "David Moreau Simard (@dmsimard)"
+short_description: Manages Sensu handler configuration
+description:
+ - Manages Sensu handler configuration
+ - 'For more information, refer to the Sensu documentation: U(https://sensuapp.org/docs/latest/reference/handlers.html)'
+options:
+ state:
+ type: str
+ description:
+ - Whether the handler should be present or not
+ choices: [ 'present', 'absent' ]
+ default: present
+ name:
+ type: str
+ description:
+ - A unique name for the handler. The name cannot contain special characters or spaces.
+ required: True
+ type:
+ type: str
+ description:
+ - The handler type
+ choices: [ 'pipe', 'tcp', 'udp', 'transport', 'set' ]
+ filter:
+ type: str
+ description:
+ - The Sensu event filter (name) to use when filtering events for the handler.
+ filters:
+ type: list
+ description:
+ - An array of Sensu event filters (names) to use when filtering events for the handler.
+ - Each array item must be a string.
+ severities:
+ type: list
+ description:
+ - An array of check result severities the handler will handle.
+ - 'NOTE: event resolution bypasses this filtering.'
+ - "Example: [ 'warning', 'critical', 'unknown' ]."
+ mutator:
+ type: str
+ description:
+ - The Sensu event mutator (name) to use to mutate event data for the handler.
+ timeout:
+ type: int
+ description:
+ - The handler execution duration timeout in seconds (hard stop).
+ - Only used by pipe and tcp handler types.
+ default: 10
+ handle_silenced:
+ description:
+ - If events matching one or more silence entries should be handled.
+ type: bool
+ default: 'no'
+ handle_flapping:
+ description:
+ - If events in the flapping state should be handled.
+ type: bool
+ default: 'no'
+ command:
+ type: str
+ description:
+ - The handler command to be executed.
+ - The event data is passed to the process via STDIN.
+ - 'NOTE: the command attribute is only required for Pipe handlers (i.e. handlers configured with "type": "pipe").'
+ socket:
+ type: dict
+ description:
+ - The socket definition scope, used to configure the TCP/UDP handler socket.
+ - 'NOTE: the socket attribute is only required for TCP/UDP handlers (i.e. handlers configured with "type": "tcp" or "type": "udp").'
+ pipe:
+ type: dict
+ description:
+ - The pipe definition scope, used to configure the Sensu transport pipe.
+ - 'NOTE: the pipe attribute is only required for Transport handlers (i.e. handlers configured with "type": "transport").'
+ handlers:
+ type: list
+ description:
+ - An array of Sensu event handlers (names) to use for events using the handler set.
+ - Each array item must be a string.
+ - 'NOTE: the handlers attribute is only required for handler sets (i.e. handlers configured with "type": "set").'
+notes:
+ - Check mode is supported
+'''
+
+EXAMPLES = '''
+# Configure a handler that sends event data as STDIN (pipe)
+- name: Configure IRC Sensu handler
+ community.general.sensu_handler:
+ name: "irc_handler"
+ type: "pipe"
+ command: "/usr/local/bin/notify-irc.sh"
+ severities:
+ - "ok"
+ - "critical"
+ - "warning"
+ - "unknown"
+ timeout: 15
+ notify:
+ - Restart sensu-client
+ - Restart sensu-server
+
+# Delete a handler
+- name: Delete IRC Sensu handler
+ community.general.sensu_handler:
+ name: "irc_handler"
+ state: "absent"
+
+# Example of a TCP handler
+- name: Configure TCP Sensu handler
+ community.general.sensu_handler:
+ name: "tcp_handler"
+ type: "tcp"
+ timeout: 30
+ socket:
+ host: "10.0.1.99"
+ port: 4444
+ register: handler
+ notify:
+ - Restart sensu-client
+ - Restart sensu-server
+
+- name: Secure Sensu handler configuration file
+ ansible.builtin.file:
+ path: "{{ handler['file'] }}"
+ owner: "sensu"
+ group: "sensu"
+ mode: "0600"
+'''
+
+RETURN = '''
+config:
+ description: Effective handler configuration, when state is present
+ returned: success
+ type: dict
+ sample: {'name': 'irc', 'type': 'pipe', 'command': '/usr/local/bin/notify-irc.sh'}
+file:
+ description: Path to the handler configuration file
+ returned: success
+ type: str
+ sample: "/etc/sensu/conf.d/handlers/irc.json"
+name:
+ description: Name of the handler
+ returned: success
+ type: str
+ sample: "irc"
+'''
+
+import json
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ supports_check_mode=True,
+ argument_spec=dict(
+ state=dict(type='str', required=False, choices=['present', 'absent'], default='present'),
+ name=dict(type='str', required=True),
+ type=dict(type='str', required=False, choices=['pipe', 'tcp', 'udp', 'transport', 'set']),
+ filter=dict(type='str', required=False),
+ filters=dict(type='list', required=False),
+ severities=dict(type='list', required=False),
+ mutator=dict(type='str', required=False),
+ timeout=dict(type='int', required=False, default=10),
+ handle_silenced=dict(type='bool', required=False, default=False),
+ handle_flapping=dict(type='bool', required=False, default=False),
+ command=dict(type='str', required=False),
+ socket=dict(type='dict', required=False),
+ pipe=dict(type='dict', required=False),
+ handlers=dict(type='list', required=False),
+ ),
+ required_if=[
+ ['state', 'present', ['type']],
+ ['type', 'pipe', ['command']],
+ ['type', 'tcp', ['socket']],
+ ['type', 'udp', ['socket']],
+ ['type', 'transport', ['pipe']],
+ ['type', 'set', ['handlers']]
+ ]
+ )
+
+ state = module.params['state']
+ name = module.params['name']
+ path = '/etc/sensu/conf.d/handlers/{0}.json'.format(name)
+
+ if state == 'absent':
+ if os.path.exists(path):
+ if module.check_mode:
+ msg = '{path} would have been deleted'.format(path=path)
+ module.exit_json(msg=msg, changed=True)
+ else:
+ try:
+ os.remove(path)
+ msg = '{path} deleted successfully'.format(path=path)
+ module.exit_json(msg=msg, changed=True)
+ except OSError as e:
+ msg = 'Exception when trying to delete {path}: {exception}'
+ module.fail_json(
+ msg=msg.format(path=path, exception=str(e)))
+ else:
+ # Idempotency: it's okay if the file doesn't exist
+ msg = '{path} already does not exist'.format(path=path)
+ module.exit_json(msg=msg)
+
+ # Build handler configuration from module arguments
+ config = {'handlers': {name: {}}}
+ args = ['type', 'filter', 'filters', 'severities', 'mutator', 'timeout',
+ 'handle_silenced', 'handle_flapping', 'command', 'socket',
+ 'pipe', 'handlers']
+
+ for arg in args:
+ if arg in module.params and module.params[arg] is not None:
+ config['handlers'][name][arg] = module.params[arg]
+
+ # Load the current config, if there is one, so we can compare
+ current_config = None
+ try:
+ current_config = json.load(open(path, 'r'))
+ except (IOError, ValueError):
+ # File either doesn't exist or it's invalid JSON
+ pass
+
+ if current_config is not None and current_config == config:
+ # Config is the same, let's not change anything
+ module.exit_json(msg='Handler configuration is already up to date',
+ config=config['handlers'][name],
+ file=path,
+ name=name)
+
+ # Validate that directory exists before trying to write to it
+ if not module.check_mode and not os.path.exists(os.path.dirname(path)):
+ try:
+ os.makedirs(os.path.dirname(path))
+ except OSError as e:
+ module.fail_json(msg='Unable to create {0}: {1}'.format(os.path.dirname(path),
+ str(e)))
+
+ if module.check_mode:
+ module.exit_json(msg='Handler configuration would have been updated',
+ changed=True,
+ config=config['handlers'][name],
+ file=path,
+ name=name)
+
+ try:
+ with open(path, 'w') as handler:
+ handler.write(json.dumps(config, indent=4))
+ module.exit_json(msg='Handler configuration updated',
+ changed=True,
+ config=config['handlers'][name],
+ file=path,
+ name=name)
+ except (OSError, IOError) as e:
+ module.fail_json(msg='Unable to write file {0}: {1}'.format(path,
+ str(e)))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/sensu_silence.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sensu_silence.py
new file mode 100644
index 00000000..12dc5d20
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sensu_silence.py
@@ -0,0 +1,297 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Steven Bambling <smbambling@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: sensu_silence
+author: Steven Bambling (@smbambling)
+short_description: Manage Sensu silence entries
+description:
+ - Create and clear (delete) a silence entries via the Sensu API
+ for subscriptions and checks.
+options:
+ check:
+ type: str
+ description:
+ - Specifies the check which the silence entry applies to.
+ creator:
+ type: str
+ description:
+ - Specifies the entity responsible for this entry.
+ expire:
+ type: int
+ description:
+ - If specified, the silence entry will be automatically cleared
+ after this number of seconds.
+ expire_on_resolve:
+ description:
+ - If specified as true, the silence entry will be automatically
+ cleared once the condition it is silencing is resolved.
+ type: bool
+ reason:
+ type: str
+ description:
+ - If specified, this free-form string is used to provide context or
+ rationale for the reason this silence entry was created.
+ state:
+ type: str
+ description:
+ - Specifies to create or clear (delete) a silence entry via the Sensu API
+ default: present
+ choices: ['present', 'absent']
+ subscription:
+ type: str
+ description:
+ - Specifies the subscription which the silence entry applies to.
+ - To create a silence entry for a client prepend C(client:) to client name.
+ Example - C(client:server1.example.dev)
+ required: true
+ url:
+ type: str
+ description:
+ - Specifies the URL of the Sensu monitoring host server.
+ required: false
+ default: http://127.0.01:4567
+'''
+
+EXAMPLES = '''
+# Silence ALL checks for a given client
+- name: Silence server1.example.dev
+ community.general.sensu_silence:
+ subscription: client:server1.example.dev
+ creator: "{{ ansible_user_id }}"
+ reason: Performing maintenance
+
+# Silence specific check for a client
+- name: Silence CPU_Usage check for server1.example.dev
+ community.general.sensu_silence:
+ subscription: client:server1.example.dev
+ check: CPU_Usage
+ creator: "{{ ansible_user_id }}"
+ reason: Investigation alert issue
+
+# Silence multiple clients from a dict
+ silence:
+ server1.example.dev:
+ reason: 'Deployment in progress'
+ server2.example.dev:
+ reason: 'Deployment in progress'
+
+- name: Silence several clients from a dict
+ community.general.sensu_silence:
+ subscription: "client:{{ item.key }}"
+ reason: "{{ item.value.reason }}"
+ creator: "{{ ansible_user_id }}"
+ with_dict: "{{ silence }}"
+'''
+
+RETURN = '''
+'''
+
+import json
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def query(module, url, check, subscription):
+ headers = {
+ 'Content-Type': 'application/json',
+ }
+
+ url = url + '/silenced'
+
+ request_data = {
+ 'check': check,
+ 'subscription': subscription,
+ }
+
+ # Remove keys with None value
+ for k, v in dict(request_data).items():
+ if v is None:
+ del request_data[k]
+
+ response, info = fetch_url(
+ module, url, method='GET',
+ headers=headers, data=json.dumps(request_data)
+ )
+
+ if info['status'] == 500:
+ module.fail_json(
+ msg="Failed to query silence %s. Reason: %s" % (subscription, info)
+ )
+
+ try:
+ json_out = json.loads(to_native(response.read()))
+ except Exception:
+ json_out = ""
+
+ return False, json_out, False
+
+
+def clear(module, url, check, subscription):
+ # Test if silence exists before clearing
+ (rc, out, changed) = query(module, url, check, subscription)
+
+ d = dict((i['subscription'], i['check']) for i in out)
+ subscription_exists = subscription in d
+ if check and subscription_exists:
+ exists = (check == d[subscription])
+ else:
+ exists = subscription_exists
+
+ # If check/subscription doesn't exist
+ # exit with changed state of False
+ if not exists:
+ return False, out, changed
+
+ # module.check_mode is inherited from the AnsibleMOdule class
+ if not module.check_mode:
+ headers = {
+ 'Content-Type': 'application/json',
+ }
+
+ url = url + '/silenced/clear'
+
+ request_data = {
+ 'check': check,
+ 'subscription': subscription,
+ }
+
+ # Remove keys with None value
+ for k, v in dict(request_data).items():
+ if v is None:
+ del request_data[k]
+
+ response, info = fetch_url(
+ module, url, method='POST',
+ headers=headers, data=json.dumps(request_data)
+ )
+
+ if info['status'] != 204:
+ module.fail_json(
+ msg="Failed to silence %s. Reason: %s" % (subscription, info)
+ )
+
+ try:
+ json_out = json.loads(to_native(response.read()))
+ except Exception:
+ json_out = ""
+
+ return False, json_out, True
+ return False, out, True
+
+
+def create(
+ module, url, check, creator, expire,
+ expire_on_resolve, reason, subscription):
+ (rc, out, changed) = query(module, url, check, subscription)
+ for i in out:
+ if (i['subscription'] == subscription):
+ if (
+ (check is None or check == i['check']) and
+ (
+ creator == '' or
+ creator == i['creator']) and
+ (
+ reason == '' or
+ reason == i['reason']) and
+ (
+ expire is None or expire == i['expire']) and
+ (
+ expire_on_resolve is None or
+ expire_on_resolve == i['expire_on_resolve']
+ )
+ ):
+ return False, out, False
+
+ # module.check_mode is inherited from the AnsibleMOdule class
+ if not module.check_mode:
+ headers = {
+ 'Content-Type': 'application/json',
+ }
+
+ url = url + '/silenced'
+
+ request_data = {
+ 'check': check,
+ 'creator': creator,
+ 'expire': expire,
+ 'expire_on_resolve': expire_on_resolve,
+ 'reason': reason,
+ 'subscription': subscription,
+ }
+
+ # Remove keys with None value
+ for k, v in dict(request_data).items():
+ if v is None:
+ del request_data[k]
+
+ response, info = fetch_url(
+ module, url, method='POST',
+ headers=headers, data=json.dumps(request_data)
+ )
+
+ if info['status'] != 201:
+ module.fail_json(
+ msg="Failed to silence %s. Reason: %s" %
+ (subscription, info['msg'])
+ )
+
+ try:
+ json_out = json.loads(to_native(response.read()))
+ except Exception:
+ json_out = ""
+
+ return False, json_out, True
+ return False, out, True
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ check=dict(required=False),
+ creator=dict(required=False),
+ expire=dict(type='int', required=False),
+ expire_on_resolve=dict(type='bool', required=False),
+ reason=dict(required=False),
+ state=dict(default='present', choices=['present', 'absent']),
+ subscription=dict(required=True),
+ url=dict(required=False, default='http://127.0.01:4567'),
+ ),
+ supports_check_mode=True
+ )
+
+ url = module.params['url']
+ check = module.params['check']
+ creator = module.params['creator']
+ expire = module.params['expire']
+ expire_on_resolve = module.params['expire_on_resolve']
+ reason = module.params['reason']
+ subscription = module.params['subscription']
+ state = module.params['state']
+
+ if state == 'present':
+ (rc, out, changed) = create(
+ module, url, check, creator,
+ expire, expire_on_resolve, reason, subscription
+ )
+
+ if state == 'absent':
+ (rc, out, changed) = clear(module, url, check, subscription)
+
+ if rc != 0:
+ module.fail_json(msg="failed", result=out)
+ module.exit_json(msg="success", result=out, changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/sensu_subscription.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sensu_subscription.py
new file mode 100644
index 00000000..6316254d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sensu_subscription.py
@@ -0,0 +1,152 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Anders Ingemann <aim@secoya.dk>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: sensu_subscription
+short_description: Manage Sensu subscriptions
+description:
+ - Manage which I(sensu channels) a machine should subscribe to
+options:
+ name:
+ type: str
+ description:
+ - The name of the channel
+ required: true
+ state:
+ type: str
+ description:
+ - Whether the machine should subscribe or unsubscribe from the channel
+ choices: [ 'present', 'absent' ]
+ required: false
+ default: present
+ path:
+ type: str
+ description:
+ - Path to the subscriptions json file
+ required: false
+ default: /etc/sensu/conf.d/subscriptions.json
+ backup:
+ description:
+ - Create a backup file (if yes), including the timestamp information so you
+ - can get the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ required: false
+ default: no
+requirements: [ ]
+author: Anders Ingemann (@andsens)
+'''
+
+RETURN = '''
+reasons:
+ description: the reasons why the module changed or did not change something
+ returned: success
+ type: list
+ sample: ["channel subscription was absent and state is `present'"]
+'''
+
+EXAMPLES = '''
+# Subscribe to the nginx channel
+- name: Subscribe to nginx checks
+ community.general.sensu_subscription: name=nginx
+
+# Unsubscribe from the common checks channel
+- name: Unsubscribe from common checks
+ community.general.sensu_subscription: name=common state=absent
+'''
+
+import json
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def sensu_subscription(module, path, name, state='present', backup=False):
+ changed = False
+ reasons = []
+
+ try:
+ config = json.load(open(path))
+ except IOError as e:
+ if e.errno == 2: # File not found, non-fatal
+ if state == 'absent':
+ reasons.append('file did not exist and state is `absent\'')
+ return changed, reasons
+ config = {}
+ else:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except ValueError:
+ msg = '{path} contains invalid JSON'.format(path=path)
+ module.fail_json(msg=msg)
+
+ if 'client' not in config:
+ if state == 'absent':
+ reasons.append('`client\' did not exist and state is `absent\'')
+ return changed, reasons
+ config['client'] = {}
+ changed = True
+ reasons.append('`client\' did not exist')
+
+ if 'subscriptions' not in config['client']:
+ if state == 'absent':
+ reasons.append('`client.subscriptions\' did not exist and state is `absent\'')
+ return changed, reasons
+ config['client']['subscriptions'] = []
+ changed = True
+ reasons.append('`client.subscriptions\' did not exist')
+
+ if name not in config['client']['subscriptions']:
+ if state == 'absent':
+ reasons.append('channel subscription was absent')
+ return changed, reasons
+ config['client']['subscriptions'].append(name)
+ changed = True
+ reasons.append('channel subscription was absent and state is `present\'')
+ else:
+ if state == 'absent':
+ config['client']['subscriptions'].remove(name)
+ changed = True
+ reasons.append('channel subscription was present and state is `absent\'')
+
+ if changed and not module.check_mode:
+ if backup:
+ module.backup_local(path)
+ try:
+ open(path, 'w').write(json.dumps(config, indent=2) + '\n')
+ except IOError as e:
+ module.fail_json(msg='Failed to write to file %s: %s' % (path, to_native(e)),
+ exception=traceback.format_exc())
+
+ return changed, reasons
+
+
+def main():
+ arg_spec = {'name': {'type': 'str', 'required': True},
+ 'path': {'type': 'str', 'default': '/etc/sensu/conf.d/subscriptions.json'},
+ 'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']},
+ 'backup': {'type': 'bool', 'default': 'no'},
+ }
+
+ module = AnsibleModule(argument_spec=arg_spec,
+ supports_check_mode=True)
+
+ path = module.params['path']
+ name = module.params['name']
+ state = module.params['state']
+ backup = module.params['backup']
+
+ changed, reasons = sensu_subscription(module, path, name, state, backup)
+
+ module.exit_json(path=path, name=name, changed=changed, msg='OK', reasons=reasons)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/seport.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/seport.py
new file mode 100644
index 00000000..71df8d6b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/seport.py
@@ -0,0 +1,306 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Dan Keder <dan.keder@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: seport
+short_description: Manages SELinux network port type definitions
+description:
+ - Manages SELinux network port type definitions.
+options:
+ ports:
+ description:
+ - Ports or port ranges.
+ - Can be a list (since 2.6) or comma separated string.
+ type: list
+ elements: str
+ required: true
+ proto:
+ description:
+ - Protocol for the specified port.
+ type: str
+ required: true
+ choices: [ tcp, udp ]
+ setype:
+ description:
+ - SELinux type for the specified port.
+ type: str
+ required: true
+ state:
+ description:
+ - Desired boolean value.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ reload:
+ description:
+ - Reload SELinux policy after commit.
+ type: bool
+ default: yes
+ ignore_selinux_state:
+ description:
+ - Run independent of selinux runtime state
+ type: bool
+ default: no
+notes:
+ - The changes are persistent across reboots.
+ - Not tested on any debian based system.
+requirements:
+- libselinux-python
+- policycoreutils-python
+author:
+- Dan Keder (@dankeder)
+'''
+
+EXAMPLES = r'''
+- name: Allow Apache to listen on tcp port 8888
+ community.general.seport:
+ ports: 8888
+ proto: tcp
+ setype: http_port_t
+ state: present
+
+- name: Allow sshd to listen on tcp port 8991
+ community.general.seport:
+ ports: 8991
+ proto: tcp
+ setype: ssh_port_t
+ state: present
+
+- name: Allow memcached to listen on tcp ports 10000-10100 and 10112
+ community.general.seport:
+ ports: 10000-10100,10112
+ proto: tcp
+ setype: memcache_port_t
+ state: present
+
+- name: Allow memcached to listen on tcp ports 10000-10100 and 10112
+ community.general.seport:
+ ports:
+ - 10000-10100
+ - 10112
+ proto: tcp
+ setype: memcache_port_t
+ state: present
+'''
+
+import traceback
+
+SELINUX_IMP_ERR = None
+try:
+ import selinux
+ HAVE_SELINUX = True
+except ImportError:
+ SELINUX_IMP_ERR = traceback.format_exc()
+ HAVE_SELINUX = False
+
+SEOBJECT_IMP_ERR = None
+try:
+ import seobject
+ HAVE_SEOBJECT = True
+except ImportError:
+ SEOBJECT_IMP_ERR = traceback.format_exc()
+ HAVE_SEOBJECT = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def get_runtime_status(ignore_selinux_state=False):
+ return True if ignore_selinux_state is True else selinux.is_selinux_enabled()
+
+
+def semanage_port_get_ports(seport, setype, proto):
+ """ Get the list of ports that have the specified type definition.
+
+ :param community.general.seport: Instance of seobject.portRecords
+
+ :type setype: str
+ :param setype: SELinux type.
+
+ :type proto: str
+ :param proto: Protocol ('tcp' or 'udp')
+
+ :rtype: list
+ :return: List of ports that have the specified SELinux type.
+ """
+ records = seport.get_all_by_type()
+ if (setype, proto) in records:
+ return records[(setype, proto)]
+ else:
+ return []
+
+
+def semanage_port_get_type(seport, port, proto):
+ """ Get the SELinux type of the specified port.
+
+ :param community.general.seport: Instance of seobject.portRecords
+
+ :type port: str
+ :param port: Port or port range (example: "8080", "8080-9090")
+
+ :type proto: str
+ :param proto: Protocol ('tcp' or 'udp')
+
+ :rtype: tuple
+ :return: Tuple containing the SELinux type and MLS/MCS level, or None if not found.
+ """
+ if isinstance(port, str):
+ ports = port.split('-', 1)
+ if len(ports) == 1:
+ ports.extend(ports)
+ else:
+ ports = (port, port)
+
+ key = (int(ports[0]), int(ports[1]), proto)
+
+ records = seport.get_all()
+ if key in records:
+ return records[key]
+ else:
+ return None
+
+
+def semanage_port_add(module, ports, proto, setype, do_reload, serange='s0', sestore=''):
+ """ Add SELinux port type definition to the policy.
+
+ :type module: AnsibleModule
+ :param module: Ansible module
+
+ :type ports: list
+ :param ports: List of ports and port ranges to add (e.g. ["8080", "8080-9090"])
+
+ :type proto: str
+ :param proto: Protocol ('tcp' or 'udp')
+
+ :type setype: str
+ :param setype: SELinux type
+
+ :type do_reload: bool
+ :param do_reload: Whether to reload SELinux policy after commit
+
+ :type serange: str
+ :param serange: SELinux MLS/MCS range (defaults to 's0')
+
+ :type sestore: str
+ :param sestore: SELinux store
+
+ :rtype: bool
+ :return: True if the policy was changed, otherwise False
+ """
+ try:
+ seport = seobject.portRecords(sestore)
+ seport.set_reload(do_reload)
+ change = False
+ ports_by_type = semanage_port_get_ports(seport, setype, proto)
+ for port in ports:
+ if port not in ports_by_type:
+ change = True
+ port_type = semanage_port_get_type(seport, port, proto)
+ if port_type is None and not module.check_mode:
+ seport.add(port, proto, serange, setype)
+ elif port_type is not None and not module.check_mode:
+ seport.modify(port, proto, serange, setype)
+
+ except (ValueError, IOError, KeyError, OSError, RuntimeError) as e:
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc())
+
+ return change
+
+
+def semanage_port_del(module, ports, proto, setype, do_reload, sestore=''):
+ """ Delete SELinux port type definition from the policy.
+
+ :type module: AnsibleModule
+ :param module: Ansible module
+
+ :type ports: list
+ :param ports: List of ports and port ranges to delete (e.g. ["8080", "8080-9090"])
+
+ :type proto: str
+ :param proto: Protocol ('tcp' or 'udp')
+
+ :type setype: str
+ :param setype: SELinux type.
+
+ :type do_reload: bool
+ :param do_reload: Whether to reload SELinux policy after commit
+
+ :type sestore: str
+ :param sestore: SELinux store
+
+ :rtype: bool
+ :return: True if the policy was changed, otherwise False
+ """
+ try:
+ seport = seobject.portRecords(sestore)
+ seport.set_reload(do_reload)
+ change = False
+ ports_by_type = semanage_port_get_ports(seport, setype, proto)
+ for port in ports:
+ if port in ports_by_type:
+ change = True
+ if not module.check_mode:
+ seport.delete(port, proto)
+
+ except (ValueError, IOError, KeyError, OSError, RuntimeError) as e:
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc())
+
+ return change
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ ignore_selinux_state=dict(type='bool', default=False),
+ ports=dict(type='list', elements='str', required=True),
+ proto=dict(type='str', required=True, choices=['tcp', 'udp']),
+ setype=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ reload=dict(type='bool', default=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ if not HAVE_SELINUX:
+ module.fail_json(msg=missing_required_lib("libselinux-python"), exception=SELINUX_IMP_ERR)
+
+ if not HAVE_SEOBJECT:
+ module.fail_json(msg=missing_required_lib("policycoreutils-python"), exception=SEOBJECT_IMP_ERR)
+
+ ignore_selinux_state = module.params['ignore_selinux_state']
+
+ if not get_runtime_status(ignore_selinux_state):
+ module.fail_json(msg="SELinux is disabled on this host.")
+
+ ports = module.params['ports']
+ proto = module.params['proto']
+ setype = module.params['setype']
+ state = module.params['state']
+ do_reload = module.params['reload']
+
+ result = {
+ 'ports': ports,
+ 'proto': proto,
+ 'setype': setype,
+ 'state': state,
+ }
+
+ if state == 'present':
+ result['changed'] = semanage_port_add(module, ports, proto, setype, do_reload)
+ elif state == 'absent':
+ result['changed'] = semanage_port_del(module, ports, proto, setype, do_reload)
+ else:
+ module.fail_json(msg='Invalid value of argument "state": {0}'.format(state))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/serverless.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/serverless.py
new file mode 100644
index 00000000..912d4226
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/serverless.py
@@ -0,0 +1,232 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Ryan Scott Brown <ryansb@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: serverless
+short_description: Manages a Serverless Framework project
+description:
+ - Provides support for managing Serverless Framework (https://serverless.com/) project deployments and stacks.
+options:
+ state:
+ description:
+ - Goal state of given stage/project.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ serverless_bin_path:
+ description:
+ - The path of a serverless framework binary relative to the 'service_path' eg. node_module/.bin/serverless
+ type: path
+ service_path:
+ description:
+ - The path to the root of the Serverless Service to be operated on.
+ type: path
+ required: true
+ stage:
+ description:
+ - The name of the serverless framework project stage to deploy to.
+ - This uses the serverless framework default "dev".
+ type: str
+ functions:
+ description:
+ - A list of specific functions to deploy.
+ - If this is not provided, all functions in the service will be deployed.
+ type: list
+ elements: str
+ default: []
+ region:
+ description:
+ - AWS region to deploy the service to.
+ - This parameter defaults to C(us-east-1).
+ type: str
+ deploy:
+ description:
+ - Whether or not to deploy artifacts after building them.
+ - When this option is C(false) all the functions will be built, but no stack update will be run to send them out.
+ - This is mostly useful for generating artifacts to be stored/deployed elsewhere.
+ type: bool
+ default: yes
+ force:
+ description:
+ - Whether or not to force full deployment, equivalent to serverless C(--force) option.
+ type: bool
+ default: no
+ verbose:
+ description:
+ - Shows all stack events during deployment, and display any Stack Output.
+ type: bool
+ default: no
+notes:
+ - Currently, the C(serverless) command must be in the path of the node executing the task.
+ In the future this may be a flag.
+requirements:
+- serverless
+- yaml
+author:
+- Ryan Scott Brown (@ryansb)
+'''
+
+EXAMPLES = r'''
+- name: Basic deploy of a service
+ community.general.serverless:
+ service_path: '{{ project_dir }}'
+ state: present
+
+- name: Deploy specific functions
+ community.general.serverless:
+ service_path: '{{ project_dir }}'
+ functions:
+ - my_func_one
+ - my_func_two
+
+- name: Deploy a project, then pull its resource list back into Ansible
+ community.general.serverless:
+ stage: dev
+ region: us-east-1
+ service_path: '{{ project_dir }}'
+ register: sls
+
+# The cloudformation stack is always named the same as the full service, so the
+# cloudformation_info module can get a full list of the stack resources, as
+# well as stack events and outputs
+- cloudformation_info:
+ region: us-east-1
+ stack_name: '{{ sls.service_name }}'
+ stack_resources: true
+
+- name: Deploy a project using a locally installed serverless binary
+ community.general.serverless:
+ stage: dev
+ region: us-east-1
+ service_path: '{{ project_dir }}'
+ serverless_bin_path: node_modules/.bin/serverless
+'''
+
+RETURN = r'''
+service_name:
+ type: str
+ description: The service name specified in the serverless.yml that was just deployed.
+ returned: always
+ sample: my-fancy-service-dev
+state:
+ type: str
+ description: Whether the stack for the serverless project is present/absent.
+ returned: always
+command:
+ type: str
+ description: Full `serverless` command run by this module, in case you want to re-run the command outside the module.
+ returned: always
+ sample: serverless deploy --stage production
+'''
+
+import os
+
+try:
+ import yaml
+ HAS_YAML = True
+except ImportError:
+ HAS_YAML = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def read_serverless_config(module):
+ path = module.params.get('service_path')
+
+ try:
+ with open(os.path.join(path, 'serverless.yml')) as sls_config:
+ config = yaml.safe_load(sls_config.read())
+ return config
+ except IOError as e:
+ module.fail_json(msg="Could not open serverless.yml in {0}. err: {1}".format(path, str(e)))
+
+ module.fail_json(msg="Failed to open serverless config at {0}".format(
+ os.path.join(path, 'serverless.yml')))
+
+
+def get_service_name(module, stage):
+ config = read_serverless_config(module)
+ if config.get('service') is None:
+ module.fail_json(msg="Could not read `service` key from serverless.yml file")
+
+ if stage:
+ return "{0}-{1}".format(config['service'], stage)
+
+ return "{0}-{1}".format(config['service'], config.get('stage', 'dev'))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ service_path=dict(type='path', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ functions=dict(type='list', elements='str'),
+ region=dict(type='str', default=''),
+ stage=dict(type='str', default=''),
+ deploy=dict(type='bool', default=True),
+ serverless_bin_path=dict(type='path'),
+ force=dict(type='bool', default=False),
+ verbose=dict(type='bool', default=False),
+ ),
+ )
+
+ if not HAS_YAML:
+ module.fail_json(msg='yaml is required for this module')
+
+ service_path = module.params.get('service_path')
+ state = module.params.get('state')
+ functions = module.params.get('functions')
+ region = module.params.get('region')
+ stage = module.params.get('stage')
+ deploy = module.params.get('deploy', True)
+ force = module.params.get('force', False)
+ verbose = module.params.get('verbose', False)
+ serverless_bin_path = module.params.get('serverless_bin_path')
+
+ if serverless_bin_path is not None:
+ command = serverless_bin_path + " "
+ else:
+ command = "serverless "
+
+ if state == 'present':
+ command += 'deploy '
+ elif state == 'absent':
+ command += 'remove '
+ else:
+ module.fail_json(msg="State must either be 'present' or 'absent'. Received: {0}".format(state))
+
+ if state == 'present':
+ if not deploy:
+ command += '--noDeploy '
+ elif force:
+ command += '--force '
+
+ if region:
+ command += '--region {0} '.format(region)
+ if stage:
+ command += '--stage {0} '.format(stage)
+ if verbose:
+ command += '--verbose '
+
+ rc, out, err = module.run_command(command, cwd=service_path)
+ if rc != 0:
+ if state == 'absent' and "-{0}' does not exist".format(stage) in out:
+ module.exit_json(changed=False, state='absent', command=command,
+ out=out, service_name=get_service_name(module, stage))
+
+ module.fail_json(msg="Failure when executing Serverless command. Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, out, err))
+
+ # gather some facts about the deployment
+ module.exit_json(changed=True, state='present', out=out, command=command,
+ service_name=get_service_name(module, stage))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/sf_account_manager.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sf_account_manager.py
new file mode 100644
index 00000000..58c6962b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sf_account_manager.py
@@ -0,0 +1,263 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: sf_account_manager
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: This Module has been replaced
+ alternative: please use M(netapp.elementsw.na_elementsw_account)
+short_description: Manage SolidFire accounts
+extends_documentation_fragment:
+- community.general._netapp.solidfire
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+description:
+- Create, destroy, or update accounts on SolidFire
+
+options:
+
+ state:
+ description:
+ - Whether the specified account should exist or not.
+ required: true
+ choices: ['present', 'absent']
+
+ name:
+ description:
+ - Unique username for this account. (May be 1 to 64 characters in length).
+ required: true
+
+ new_name:
+ description:
+ - New name for the user account.
+
+ initiator_secret:
+ description:
+ - CHAP secret to use for the initiator. Should be 12-16 characters long and impenetrable.
+ - The CHAP initiator secrets must be unique and cannot be the same as the target CHAP secret.
+ - If not specified, a random secret is created.
+
+ target_secret:
+ description:
+ - CHAP secret to use for the target (mutual CHAP authentication).
+ - Should be 12-16 characters long and impenetrable.
+ - The CHAP target secrets must be unique and cannot be the same as the initiator CHAP secret.
+ - If not specified, a random secret is created.
+
+ attributes:
+ description: List of Name/Value pairs in JSON object format.
+
+ account_id:
+ description:
+ - The ID of the account to manage or update.
+
+ status:
+ description:
+ - Status of the account.
+
+'''
+
+EXAMPLES = """
+- name: Create Account
+ community.general.sf_account_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: present
+ name: TenantA
+
+- name: Modify Account
+ community.general.sf_account_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: present
+ name: TenantA
+ new_name: TenantA-Renamed
+
+- name: Delete Account
+ community.general.sf_account_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: absent
+ name: TenantA-Renamed
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+
+class SolidFireAccount(object):
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+ account_id=dict(required=False, type='int', default=None),
+
+ new_name=dict(required=False, type='str', default=None),
+ initiator_secret=dict(required=False, type='str'),
+ target_secret=dict(required=False, type='str'),
+ attributes=dict(required=False, type='dict'),
+ status=dict(required=False, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.name = p['name']
+ self.account_id = p['account_id']
+
+ self.new_name = p['new_name']
+ self.initiator_secret = p['initiator_secret']
+ self.target_secret = p['target_secret']
+ self.attributes = p['attributes']
+ self.status = p['status']
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+
+ def get_account(self):
+ """
+ Return account object if found
+
+ :return: Details about the account. None if not found.
+ :rtype: dict
+ """
+ account_list = self.sfe.list_accounts()
+
+ for account in account_list.accounts:
+ if account.username == self.name:
+ # Update self.account_id:
+ if self.account_id is not None:
+ if account.account_id == self.account_id:
+ return account
+ else:
+ self.account_id = account.account_id
+ return account
+ return None
+
+ def create_account(self):
+ try:
+ self.sfe.add_account(username=self.name,
+ initiator_secret=self.initiator_secret,
+ target_secret=self.target_secret,
+ attributes=self.attributes)
+ except Exception as e:
+ self.module.fail_json(msg='Error creating account %s: %s)' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def delete_account(self):
+ try:
+ self.sfe.remove_account(account_id=self.account_id)
+
+ except Exception as e:
+ self.module.fail_json(msg='Error deleting account %s: %s' % (self.account_id, to_native(e)),
+ exception=traceback.format_exc())
+
+ def update_account(self):
+ try:
+ self.sfe.modify_account(account_id=self.account_id,
+ username=self.new_name,
+ status=self.status,
+ initiator_secret=self.initiator_secret,
+ target_secret=self.target_secret,
+ attributes=self.attributes)
+
+ except Exception as e:
+ self.module.fail_json(msg='Error updating account %s: %s' % (self.account_id, to_native(e)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ changed = False
+ account_exists = False
+ update_account = False
+ account_detail = self.get_account()
+
+ if account_detail:
+ account_exists = True
+
+ if self.state == 'absent':
+ changed = True
+
+ elif self.state == 'present':
+ # Check if we need to update the account
+
+ if account_detail.username is not None and self.new_name is not None and \
+ account_detail.username != self.new_name:
+ update_account = True
+ changed = True
+
+ elif account_detail.status is not None and self.status is not None \
+ and account_detail.status != self.status:
+ update_account = True
+ changed = True
+
+ elif account_detail.initiator_secret is not None and self.initiator_secret is not None \
+ and account_detail.initiator_secret != self.initiator_secret:
+ update_account = True
+ changed = True
+
+ elif account_detail.target_secret is not None and self.target_secret is not None \
+ and account_detail.target_secret != self.target_secret:
+ update_account = True
+ changed = True
+
+ elif account_detail.attributes is not None and self.attributes is not None \
+ and account_detail.attributes != self.attributes:
+ update_account = True
+ changed = True
+ else:
+ if self.state == 'present':
+ changed = True
+
+ if changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ if not account_exists:
+ self.create_account()
+ elif update_account:
+ self.update_account()
+
+ elif self.state == 'absent':
+ self.delete_account()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = SolidFireAccount()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/sf_check_connections.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sf_check_connections.py
new file mode 100644
index 00000000..cfe24832
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sf_check_connections.py
@@ -0,0 +1,179 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: sf_check_connections
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: This Module has been replaced
+ alternative: please use M(netapp.elementsw.na_elementsw_check_connections)
+short_description: Check connectivity to MVIP and SVIP.
+extends_documentation_fragment:
+- community.general._netapp.solidfire
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+description:
+- Used to test the management connection to the cluster.
+- The test pings the MVIP and SVIP, and executes a simple API method to verify connectivity.
+
+options:
+
+ skip:
+ description:
+ - Skip checking connection to SVIP or MVIP.
+ choices: ['svip', 'mvip']
+
+ mvip:
+ description:
+ - Optionally, use to test connection of a different MVIP.
+ - This is not needed to test the connection to the target cluster.
+
+ svip:
+ description:
+ - Optionally, use to test connection of a different SVIP.
+ - This is not needed to test the connection to the target cluster.
+
+'''
+
+
+EXAMPLES = """
+ - name: Check connections to MVIP and SVIP
+ community.general.sf_check_connections:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+
+class SolidFireConnection(object):
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ skip=dict(required=False, type='str', default=None, choices=['mvip', 'svip']),
+ mvip=dict(required=False, type='str', default=None),
+ svip=dict(required=False, type='str', default=None)
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.skip = p['skip']
+ self.mvip = p['mvip']
+ self.svip = p['svip']
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.sfe = netapp_utils.ElementFactory.create(p['hostname'], p['username'], p['password'], port=442)
+
+ def check_mvip_connection(self):
+ """
+ Check connection to MVIP
+
+ :return: true if connection was successful, false otherwise.
+ :rtype: bool
+ """
+ try:
+ test = self.sfe.test_connect_mvip(mvip=self.mvip)
+ result = test.details.connected
+ # Todo - Log details about the test
+ return result
+
+ except Exception as e:
+ self.module.fail_json(msg='Error checking connection to MVIP: %s' % to_native(e), exception=traceback.format_exc())
+ return False
+
+ def check_svip_connection(self):
+ """
+ Check connection to SVIP
+
+ :return: true if connection was successful, false otherwise.
+ :rtype: bool
+ """
+ try:
+ test = self.sfe.test_connect_svip(svip=self.svip)
+ result = test.details.connected
+ # Todo - Log details about the test
+ return result
+
+ except Exception as e:
+ self.module.fail_json(msg='Error checking connection to SVIP: %s' % to_native(e), exception=traceback.format_exc())
+ return False
+
+ def check(self):
+
+ failed = True
+ msg = ''
+
+ if self.skip is None:
+ mvip_connection_established = self.check_mvip_connection()
+ svip_connection_established = self.check_svip_connection()
+
+ # Set failed and msg
+ if not mvip_connection_established:
+ failed = True
+ msg = 'Connection to MVIP failed.'
+ elif not svip_connection_established:
+ failed = True
+ msg = 'Connection to SVIP failed.'
+ else:
+ failed = False
+
+ elif self.skip == 'mvip':
+ svip_connection_established = self.check_svip_connection()
+
+ # Set failed and msg
+ if not svip_connection_established:
+ failed = True
+ msg = 'Connection to SVIP failed.'
+ else:
+ failed = False
+
+ elif self.skip == 'svip':
+ mvip_connection_established = self.check_mvip_connection()
+
+ # Set failed and msg
+ if not mvip_connection_established:
+ failed = True
+ msg = 'Connection to MVIP failed.'
+ else:
+ failed = False
+
+ if failed:
+ self.module.fail_json(msg=msg)
+ else:
+ self.module.exit_json()
+
+
+def main():
+ v = SolidFireConnection()
+ v.check()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/sf_snapshot_schedule_manager.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sf_snapshot_schedule_manager.py
new file mode 100644
index 00000000..296e50bd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sf_snapshot_schedule_manager.py
@@ -0,0 +1,384 @@
+#!/usr/bin/python
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: sf_snapshot_schedule_manager
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: This Module has been replaced
+ alternative: please use M(netapp.elementsw.na_elementsw_snapshot_schedule)
+short_description: Manage SolidFire snapshot schedules
+extends_documentation_fragment:
+- community.general._netapp.solidfire
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+description:
+- Create, destroy, or update accounts on SolidFire
+
+options:
+
+ state:
+ description:
+ - Whether the specified schedule should exist or not.
+ required: true
+ choices: ['present', 'absent']
+
+ paused:
+ description:
+ - Pause / Resume a schedule.
+ required: false
+
+ recurring:
+ description:
+ - Should the schedule recur?
+ required: false
+
+ time_interval_days:
+ description: Time interval in days.
+ required: false
+ default: 1
+
+ time_interval_hours:
+ description: Time interval in hours.
+ required: false
+ default: 0
+
+ time_interval_minutes:
+ description: Time interval in minutes.
+ required: false
+ default: 0
+
+ name:
+ description:
+ - Name for the snapshot schedule.
+ required: true
+
+ snapshot_name:
+ description:
+ - Name for the created snapshots.
+ required: false
+
+ volumes:
+ description:
+ - Volume IDs that you want to set the snapshot schedule for.
+ - At least 1 volume ID is required for creating a new schedule.
+ - required when C(state=present)
+ required: false
+
+ retention:
+ description:
+ - Retention period for the snapshot.
+ - Format is 'HH:mm:ss'.
+ required: false
+
+ schedule_id:
+ description:
+ - The schedule ID for the schedule that you want to update or delete.
+ required: false
+
+ starting_date:
+ description:
+ - Starting date for the schedule.
+ - Required when C(state=present).
+ - Please use two '-' in the above format, or you may see an error- TypeError, is not JSON serializable description.
+ - "Format: C(2016--12--01T00:00:00Z)"
+ required: false
+'''
+
+EXAMPLES = """
+ - name: Create Snapshot schedule
+ community.general.sf_snapshot_schedule_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: present
+ name: Schedule_A
+ time_interval_days: 1
+ starting_date: 2016--12--01T00:00:00Z
+ volumes: 7
+
+ - name: Update Snapshot schedule
+ community.general.sf_snapshot_schedule_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: present
+ schedule_id: 6
+ recurring: True
+ snapshot_name: AnsibleSnapshots
+
+ - name: Delete Snapshot schedule
+ community.general.sf_snapshot_schedule_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: absent
+ schedule_id: 6
+"""
+
+RETURN = """
+
+schedule_id:
+ description: Schedule ID of the newly created schedule
+ returned: success
+ type: str
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+
+class SolidFireSnapShotSchedule(object):
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+
+ time_interval_days=dict(required=False, type='int', default=1),
+ time_interval_hours=dict(required=False, type='int', default=0),
+ time_interval_minutes=dict(required=False, type='int', default=0),
+
+ paused=dict(required=False, type='bool'),
+ recurring=dict(required=False, type='bool'),
+
+ starting_date=dict(type='str'),
+
+ snapshot_name=dict(required=False, type='str'),
+ volumes=dict(required=False, type='list'),
+ retention=dict(required=False, type='str'),
+
+ schedule_id=dict(type='int'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['starting_date', 'volumes'])
+ ],
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.name = p['name']
+
+ # self.interval = p['interval']
+
+ self.time_interval_days = p['time_interval_days']
+ self.time_interval_hours = p['time_interval_hours']
+ self.time_interval_minutes = p['time_interval_minutes']
+
+ self.paused = p['paused']
+ self.recurring = p['recurring']
+
+ self.starting_date = p['starting_date']
+ if self.starting_date is not None:
+ self.starting_date = self.starting_date.replace("--", "-")
+
+ self.snapshot_name = p['snapshot_name']
+ self.volumes = p['volumes']
+ self.retention = p['retention']
+
+ self.schedule_id = p['schedule_id']
+
+ self.create_schedule_result = None
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+
+ def get_schedule(self):
+ schedule_list = self.sfe.list_schedules()
+ for schedule in schedule_list.schedules:
+ if schedule.name == self.name:
+ # Update self.schedule_id:
+ if self.schedule_id is not None:
+ if schedule.schedule_id == self.schedule_id:
+ return schedule
+ else:
+ self.schedule_id = schedule.schedule_id
+ return schedule
+
+ return None
+
+ def create_schedule(self):
+
+ try:
+ sched = netapp_utils.Schedule()
+ # if self.interval == 'time_interval':
+ sched.frequency = netapp_utils.TimeIntervalFrequency(days=self.time_interval_days,
+ hours=self.time_interval_hours,
+ minutes=self.time_interval_minutes)
+
+ # Create schedule
+ sched.name = self.name
+ sched.schedule_info = netapp_utils.ScheduleInfo(
+ volume_ids=self.volumes,
+ snapshot_name=self.snapshot_name,
+ retention=self.retention
+ )
+ sched.paused = self.paused
+ sched.recurring = self.recurring
+ sched.starting_date = self.starting_date
+
+ self.create_schedule_result = self.sfe.create_schedule(schedule=sched)
+
+ except Exception as e:
+ self.module.fail_json(msg='Error creating schedule %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def delete_schedule(self):
+
+ try:
+ get_schedule_result = self.sfe.get_schedule(schedule_id=self.schedule_id)
+ sched = get_schedule_result.schedule
+ sched.to_be_deleted = True
+ self.sfe.modify_schedule(schedule=sched)
+
+ except Exception as e:
+ self.module.fail_json(msg='Error deleting schedule %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def update_schedule(self):
+
+ try:
+ get_schedule_result = self.sfe.get_schedule(schedule_id=self.schedule_id)
+ sched = get_schedule_result.schedule
+
+ # Update schedule properties
+
+ # if self.interval == 'time_interval':
+ temp_frequency = netapp_utils.TimeIntervalFrequency(days=self.time_interval_days,
+ hours=self.time_interval_hours,
+ minutes=self.time_interval_minutes)
+
+ if sched.frequency.days != temp_frequency.days or \
+ sched.frequency.hours != temp_frequency.hours \
+ or sched.frequency.minutes != temp_frequency.minutes:
+ sched.frequency = temp_frequency
+
+ sched.name = self.name
+ if self.volumes is not None:
+ sched.schedule_info.volume_ids = self.volumes
+ if self.retention is not None:
+ sched.schedule_info.retention = self.retention
+ if self.snapshot_name is not None:
+ sched.schedule_info.snapshot_name = self.snapshot_name
+ if self.paused is not None:
+ sched.paused = self.paused
+ if self.recurring is not None:
+ sched.recurring = self.recurring
+ if self.starting_date is not None:
+ sched.starting_date = self.starting_date
+
+ # Make API call
+ self.sfe.modify_schedule(schedule=sched)
+
+ except Exception as e:
+ self.module.fail_json(msg='Error updating schedule %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ changed = False
+ schedule_exists = False
+ update_schedule = False
+ schedule_detail = self.get_schedule()
+
+ if schedule_detail:
+ schedule_exists = True
+
+ if self.state == 'absent':
+ changed = True
+
+ elif self.state == 'present':
+ # Check if we need to update the account
+
+ if self.retention is not None and schedule_detail.schedule_info.retention != self.retention:
+ update_schedule = True
+ changed = True
+
+ elif schedule_detail.name != self.name:
+ update_schedule = True
+ changed = True
+
+ elif self.snapshot_name is not None and schedule_detail.schedule_info.snapshot_name != self.snapshot_name:
+ update_schedule = True
+ changed = True
+
+ elif self.volumes is not None and schedule_detail.schedule_info.volume_ids != self.volumes:
+ update_schedule = True
+ changed = True
+
+ elif self.paused is not None and schedule_detail.paused != self.paused:
+ update_schedule = True
+ changed = True
+
+ elif self.recurring is not None and schedule_detail.recurring != self.recurring:
+ update_schedule = True
+ changed = True
+
+ elif self.starting_date is not None and schedule_detail.starting_date != self.starting_date:
+ update_schedule = True
+ changed = True
+
+ elif self.time_interval_minutes is not None or self.time_interval_hours is not None \
+ or self.time_interval_days is not None:
+
+ temp_frequency = netapp_utils.TimeIntervalFrequency(days=self.time_interval_days,
+ hours=self.time_interval_hours,
+ minutes=self.time_interval_minutes)
+
+ if schedule_detail.frequency.days != temp_frequency.days or \
+ schedule_detail.frequency.hours != temp_frequency.hours \
+ or schedule_detail.frequency.minutes != temp_frequency.minutes:
+ update_schedule = True
+ changed = True
+
+ else:
+ if self.state == 'present':
+ changed = True
+
+ if changed:
+ if self.module.check_mode:
+ # Skip changes
+ pass
+ else:
+ if self.state == 'present':
+ if not schedule_exists:
+ self.create_schedule()
+ elif update_schedule:
+ self.update_schedule()
+
+ elif self.state == 'absent':
+ self.delete_schedule()
+
+ if self.create_schedule_result is not None:
+ self.module.exit_json(changed=changed, schedule_id=self.create_schedule_result.schedule_id)
+ else:
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = SolidFireSnapShotSchedule()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/sf_volume_access_group_manager.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sf_volume_access_group_manager.py
new file mode 100644
index 00000000..78e3097d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sf_volume_access_group_manager.py
@@ -0,0 +1,244 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: sf_volume_access_group_manager
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: This Module has been replaced
+ alternative: please use M(netapp.elementsw.na_elementsw_access_group)
+short_description: Manage SolidFire Volume Access Groups
+extends_documentation_fragment:
+- community.general._netapp.solidfire
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+description:
+- Create, destroy, or update volume access groups on SolidFire
+
+options:
+
+ state:
+ description:
+ - Whether the specified volume access group should exist or not.
+ required: true
+ choices: ['present', 'absent']
+
+ name:
+ description:
+ - Name of the volume access group. It is not required to be unique, but recommended.
+ required: true
+
+ initiators:
+ description:
+ - List of initiators to include in the volume access group. If unspecified, the access group will start out without configured initiators.
+
+ volumes:
+ description:
+ - List of volumes to initially include in the volume access group. If unspecified, the access group will start without any volumes.
+
+ virtual_network_id:
+ description:
+ - The ID of the SolidFire Virtual Network ID to associate the volume access group with.
+
+ virtual_network_tags:
+ description:
+ - The ID of the VLAN Virtual Network Tag to associate the volume access group with.
+
+ attributes:
+ description: List of Name/Value pairs in JSON object format.
+
+ volume_access_group_id:
+ description:
+ - The ID of the volume access group to modify or delete.
+
+'''
+
+EXAMPLES = """
+ - name: Create Volume Access Group
+ community.general.sf_volume_access_group_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: present
+ name: AnsibleVolumeAccessGroup
+ volumes: [7,8]
+
+ - name: Modify Volume Access Group
+ community.general.sf_volume_access_group_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: present
+ volume_access_group_id: 1
+ name: AnsibleVolumeAccessGroup-Renamed
+ attributes: {"volumes": [1,2,3], "virtual_network_id": 12345}
+
+ - name: Delete Volume Access Group
+ community.general.sf_volume_access_group_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: absent
+ volume_access_group_id: 1
+"""
+
+RETURN = """
+
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+
+class SolidFireVolumeAccessGroup(object):
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+ volume_access_group_id=dict(required=False, type='int', default=None),
+
+ initiators=dict(required=False, type='list', default=None),
+ volumes=dict(required=False, type='list', default=None),
+ virtual_network_id=dict(required=False, type='list', default=None),
+ virtual_network_tags=dict(required=False, type='list', default=None),
+ attributes=dict(required=False, type='dict', default=None),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.name = p['name']
+ self.volume_access_group_id = p['volume_access_group_id']
+
+ self.initiators = p['initiators']
+ self.volumes = p['volumes']
+ self.virtual_network_id = p['virtual_network_id']
+ self.virtual_network_tags = p['virtual_network_tags']
+ self.attributes = p['attributes']
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+
+ def get_volume_access_group(self):
+ access_groups_list = self.sfe.list_volume_access_groups()
+
+ for group in access_groups_list.volume_access_groups:
+ if group.name == self.name:
+ # Update self.volume_access_group_id:
+ if self.volume_access_group_id is not None:
+ if group.volume_access_group_id == self.volume_access_group_id:
+ return group
+ else:
+ self.volume_access_group_id = group.volume_access_group_id
+ return group
+ return None
+
+ def create_volume_access_group(self):
+ try:
+ self.sfe.create_volume_access_group(name=self.name,
+ initiators=self.initiators,
+ volumes=self.volumes,
+ virtual_network_id=self.virtual_network_id,
+ virtual_network_tags=self.virtual_network_tags,
+ attributes=self.attributes)
+ except Exception as e:
+ self.module.fail_json(msg="Error creating volume access group %s: %s" %
+ (self.name, to_native(e)), exception=traceback.format_exc())
+
+ def delete_volume_access_group(self):
+ try:
+ self.sfe.delete_volume_access_group(volume_access_group_id=self.volume_access_group_id)
+
+ except Exception as e:
+ self.module.fail_json(msg="Error deleting volume access group %s: %s" %
+ (self.volume_access_group_id, to_native(e)),
+ exception=traceback.format_exc())
+
+ def update_volume_access_group(self):
+ try:
+ self.sfe.modify_volume_access_group(volume_access_group_id=self.volume_access_group_id,
+ virtual_network_id=self.virtual_network_id,
+ virtual_network_tags=self.virtual_network_tags,
+ name=self.name,
+ initiators=self.initiators,
+ volumes=self.volumes,
+ attributes=self.attributes)
+ except Exception as e:
+ self.module.fail_json(msg="Error updating volume access group %s: %s" %
+ (self.volume_access_group_id, to_native(e)), exception=traceback.format_exc())
+
+ def apply(self):
+ changed = False
+ group_exists = False
+ update_group = False
+ group_detail = self.get_volume_access_group()
+
+ if group_detail:
+ group_exists = True
+
+ if self.state == 'absent':
+ changed = True
+
+ elif self.state == 'present':
+ # Check if we need to update the group
+ if self.volumes is not None and group_detail.volumes != self.volumes:
+ update_group = True
+ changed = True
+ elif self.initiators is not None and group_detail.initiators != self.initiators:
+ update_group = True
+ changed = True
+ elif self.virtual_network_id is not None or self.virtual_network_tags is not None or \
+ self.attributes is not None:
+ update_group = True
+ changed = True
+
+ else:
+ if self.state == 'present':
+ changed = True
+
+ if changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ if not group_exists:
+ self.create_volume_access_group()
+ elif update_group:
+ self.update_volume_access_group()
+
+ elif self.state == 'absent':
+ self.delete_volume_access_group()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = SolidFireVolumeAccessGroup()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/sf_volume_manager.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sf_volume_manager.py
new file mode 100644
index 00000000..9d5378a2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sf_volume_manager.py
@@ -0,0 +1,315 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: sf_volume_manager
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: This Module has been replaced
+ alternative: please use M(netapp.elementsw.na_elementsw_volume)
+short_description: Manage SolidFire volumes
+extends_documentation_fragment:
+- community.general._netapp.solidfire
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+description:
+- Create, destroy, or update volumes on SolidFire
+
+options:
+
+ state:
+ description:
+ - Whether the specified volume should exist or not.
+ required: true
+ choices: ['present', 'absent']
+
+ name:
+ description:
+ - The name of the volume to manage.
+ required: true
+
+ account_id:
+ description:
+ - Account ID for the owner of this volume.
+ required: true
+
+ 512emulation:
+ description:
+ - Should the volume provide 512-byte sector emulation?
+ - Required when C(state=present)
+
+ qos:
+ description: Initial quality of service settings for this volume. Configure as dict in playbooks.
+
+ attributes:
+ description: A YAML dictionary of attributes that you would like to apply on this volume.
+
+ volume_id:
+ description:
+ - The ID of the volume to manage or update.
+ - In order to create multiple volumes with the same name, but different volume_ids, please declare the I(volume_id)
+ parameter with an arbitrary value. However, the specified volume_id will not be assigned to the newly created
+ volume (since it's an auto-generated property).
+
+ size:
+ description:
+ - The size of the volume in (size_unit).
+ - Required when C(state = present).
+
+ size_unit:
+ description:
+ - The unit used to interpret the size parameter.
+ choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
+ default: 'gb'
+
+ access:
+ description:
+ - "Access allowed for the volume."
+ - "readOnly: Only read operations are allowed."
+ - "readWrite: Reads and writes are allowed."
+ - "locked: No reads or writes are allowed."
+ - "replicationTarget: Identify a volume as the target volume for a paired set of volumes. If the volume is not paired, the access status is locked."
+ - "If unspecified, the access settings of the clone will be the same as the source."
+ choices: ['readOnly', 'readWrite', 'locked', 'replicationTarget']
+
+'''
+
+EXAMPLES = """
+ - name: Create Volume
+ community.general.sf_volume_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: present
+ name: AnsibleVol
+ qos: {minIOPS: 1000, maxIOPS: 20000, burstIOPS: 50000}
+ account_id: 3
+ enable512e: False
+ size: 1
+ size_unit: gb
+
+ - name: Update Volume
+ community.general.sf_volume_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: present
+ name: AnsibleVol
+ account_id: 3
+ access: readWrite
+
+ - name: Delete Volume
+ community.general.sf_volume_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: absent
+ name: AnsibleVol
+ account_id: 2
+"""
+
+RETURN = """
+
+msg:
+ description: Success message
+ returned: success
+ type: str
+
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+
+class SolidFireVolume(object):
+
+ def __init__(self):
+
+ self._size_unit_map = netapp_utils.SF_BYTE_MAP
+
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+ account_id=dict(required=True, type='int'),
+
+ enable512e=dict(type='bool', aliases=['512emulation']),
+ qos=dict(required=False, type='dict', default=None),
+ attributes=dict(required=False, type='dict', default=None),
+
+ volume_id=dict(type='int', default=None),
+ size=dict(type='int'),
+ size_unit=dict(default='gb',
+ choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb',
+ 'pb', 'eb', 'zb', 'yb'], type='str'),
+
+ access=dict(required=False, type='str', default=None, choices=['readOnly', 'readWrite',
+ 'locked', 'replicationTarget']),
+
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['size', 'enable512e'])
+ ],
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.name = p['name']
+ self.account_id = p['account_id']
+ self.enable512e = p['enable512e']
+ self.qos = p['qos']
+ self.attributes = p['attributes']
+
+ self.volume_id = p['volume_id']
+ self.size_unit = p['size_unit']
+ if p['size'] is not None:
+ self.size = p['size'] * self._size_unit_map[self.size_unit]
+ else:
+ self.size = None
+ self.access = p['access']
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+
+ def get_volume(self):
+ """
+ Return volume object if found
+
+ :return: Details about the volume. None if not found.
+ :rtype: dict
+ """
+ volume_list = self.sfe.list_volumes_for_account(account_id=self.account_id)
+ for volume in volume_list.volumes:
+ if volume.name == self.name:
+ # Update self.volume_id
+ if self.volume_id is not None:
+ if volume.volume_id == self.volume_id and str(volume.delete_time) == "":
+ return volume
+ else:
+ if str(volume.delete_time) == "":
+ self.volume_id = volume.volume_id
+ return volume
+ return None
+
+ def create_volume(self):
+ try:
+ self.sfe.create_volume(name=self.name,
+ account_id=self.account_id,
+ total_size=self.size,
+ enable512e=self.enable512e,
+ qos=self.qos,
+ attributes=self.attributes)
+
+ except Exception as err:
+ self.module.fail_json(msg="Error provisioning volume %s of size %s" % (self.name, self.size),
+ exception=to_native(err))
+
+ def delete_volume(self):
+ try:
+ self.sfe.delete_volume(volume_id=self.volume_id)
+
+ except Exception as err:
+ self.module.fail_json(msg="Error deleting volume %s" % self.volume_id,
+ exception=to_native(err))
+
+ def update_volume(self):
+ try:
+ self.sfe.modify_volume(self.volume_id,
+ account_id=self.account_id,
+ access=self.access,
+ qos=self.qos,
+ total_size=self.size,
+ attributes=self.attributes)
+
+ except Exception as err:
+ self.module.fail_json(msg="Error updating volume %s" % self.name,
+ exception=to_native(err))
+
+ def apply(self):
+ changed = False
+ volume_exists = False
+ update_volume = False
+ volume_detail = self.get_volume()
+
+ if volume_detail:
+ volume_exists = True
+
+ if self.state == 'absent':
+ # Checking for state change(s) here, and applying it later in the code allows us to support
+ # check_mode
+ changed = True
+
+ elif self.state == 'present':
+ if volume_detail.access is not None and self.access is not None and volume_detail.access != self.access:
+ update_volume = True
+ changed = True
+
+ elif volume_detail.account_id is not None and self.account_id is not None \
+ and volume_detail.account_id != self.account_id:
+ update_volume = True
+ changed = True
+
+ elif volume_detail.qos is not None and self.qos is not None and volume_detail.qos != self.qos:
+ update_volume = True
+ changed = True
+
+ elif volume_detail.total_size is not None and volume_detail.total_size != self.size:
+ size_difference = abs(float(volume_detail.total_size - self.size))
+ # Change size only if difference is bigger than 0.001
+ if size_difference / self.size > 0.001:
+ update_volume = True
+ changed = True
+
+ elif volume_detail.attributes is not None and self.attributes is not None and \
+ volume_detail.attributes != self.attributes:
+ update_volume = True
+ changed = True
+ else:
+ if self.state == 'present':
+ changed = True
+
+ result_message = ""
+
+ if changed:
+ if self.module.check_mode:
+ result_message = "Check mode, skipping changes"
+ else:
+ if self.state == 'present':
+ if not volume_exists:
+ self.create_volume()
+ result_message = "Volume created"
+ elif update_volume:
+ self.update_volume()
+ result_message = "Volume updated"
+
+ elif self.state == 'absent':
+ self.delete_volume()
+ result_message = "Volume deleted"
+
+ self.module.exit_json(changed=changed, msg=result_message)
+
+
+def main():
+ v = SolidFireVolume()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/shutdown.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/shutdown.py
new file mode 100644
index 00000000..ccb02a2d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/shutdown.py
@@ -0,0 +1,68 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2020, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: shutdown
+short_description: Shut down a machine
+notes:
+ - C(PATH) is ignored on the remote node when searching for the C(shutdown) command. Use I(search_paths)
+ to specify locations to search if the default paths do not work.
+description:
+ - Shut downs a machine.
+version_added: "1.1.0"
+options:
+ delay:
+ description:
+ - Seconds to wait before shutdown. Passed as a parameter to the shutdown command.
+ - On Linux, macOS and OpenBSD, this is converted to minutes and rounded down. If less than 60, it will be set to 0.
+ - On Solaris and FreeBSD, this will be seconds.
+ type: int
+ default: 0
+ msg:
+ description:
+ - Message to display to users before shutdown.
+ type: str
+ default: Shut down initiated by Ansible
+ search_paths:
+ description:
+ - Paths to search on the remote machine for the C(shutdown) command.
+ - I(Only) these paths will be searched for the C(shutdown) command. C(PATH) is ignored in the remote node when searching for the C(shutdown) command.
+ type: list
+ elements: path
+ default: ['/sbin', '/usr/sbin', '/usr/local/sbin']
+
+seealso:
+- module: ansible.builtin.reboot
+author:
+ - Matt Davis (@nitzmahone)
+ - Sam Doran (@samdoran)
+ - Amin Vakil (@aminvakil)
+'''
+
+EXAMPLES = r'''
+- name: Unconditionally shut down the machine with all defaults
+ community.general.shutdown:
+
+- name: Delay shutting down the remote node
+ community.general.shutdown:
+ delay: 60
+
+- name: Shut down a machine with shutdown command in unusual place
+ community.general.shutdown:
+ search_paths:
+ - '/lib/molly-guard'
+'''
+
+RETURN = r'''
+shutdown:
+ description: C(true) if the machine has been shut down.
+ returned: always
+ type: bool
+ sample: true
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/sl_vm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sl_vm.py
new file mode 100644
index 00000000..22556d91
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sl_vm.py
@@ -0,0 +1,428 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: sl_vm
+short_description: create or cancel a virtual instance in SoftLayer
+description:
+ - Creates or cancels SoftLayer instances.
+ - When created, optionally waits for it to be 'running'.
+options:
+ instance_id:
+ description:
+ - Instance Id of the virtual instance to perform action option.
+ type: str
+ hostname:
+ description:
+ - Hostname to be provided to a virtual instance.
+ type: str
+ domain:
+ description:
+ - Domain name to be provided to a virtual instance.
+ type: str
+ datacenter:
+ description:
+ - Datacenter for the virtual instance to be deployed.
+ type: str
+ choices:
+ - ams01
+ - ams03
+ - che01
+ - dal01
+ - dal05
+ - dal06
+ - dal09
+ - dal10
+ - dal12
+ - dal13
+ - fra02
+ - fra04
+ - fra05
+ - hkg02
+ - hou02
+ - lon02
+ - lon04
+ - lon06
+ - mel01
+ - mex01
+ - mil01
+ - mon01
+ - osl01
+ - par01
+ - sao01
+ - sea01
+ - seo01
+ - sjc01
+ - sjc03
+ - sjc04
+ - sng01
+ - syd01
+ - syd04
+ - tok02
+ - tor01
+ - wdc01
+ - wdc04
+ - wdc06
+ - wdc07
+ tags:
+ description:
+ - Tag or list of tags to be provided to a virtual instance.
+ type: str
+ hourly:
+ description:
+ - Flag to determine if the instance should be hourly billed.
+ type: bool
+ default: 'yes'
+ private:
+ description:
+ - Flag to determine if the instance should be private only.
+ type: bool
+ default: 'no'
+ dedicated:
+ description:
+ - Flag to determine if the instance should be deployed in dedicated space.
+ type: bool
+ default: 'no'
+ local_disk:
+ description:
+ - Flag to determine if local disk should be used for the new instance.
+ type: bool
+ default: 'yes'
+ cpus:
+ description:
+ - Count of cpus to be assigned to new virtual instance.
+ type: int
+ choices: [1, 2, 4, 8, 16, 32, 56]
+ memory:
+ description:
+ - Amount of memory to be assigned to new virtual instance.
+ type: int
+ choices: [1024, 2048, 4096, 6144, 8192, 12288, 16384, 32768, 49152, 65536, 131072, 247808]
+ flavor:
+ description:
+ - Specify which SoftLayer flavor template to use instead of cpus and memory.
+ version_added: '0.2.0'
+ type: str
+ disks:
+ description:
+ - List of disk sizes to be assigned to new virtual instance.
+ default: [ 25 ]
+ type: list
+ os_code:
+ description:
+ - OS Code to be used for new virtual instance.
+ type: str
+ image_id:
+ description:
+ - Image Template to be used for new virtual instance.
+ type: str
+ nic_speed:
+ description:
+ - NIC Speed to be assigned to new virtual instance.
+ choices: [10, 100, 1000]
+ type: int
+ public_vlan:
+ description:
+ - VLAN by its Id to be assigned to the public NIC.
+ type: str
+ private_vlan:
+ description:
+ - VLAN by its Id to be assigned to the private NIC.
+ type: str
+ ssh_keys:
+ description:
+ - List of ssh keys by their Id to be assigned to a virtual instance.
+ type: list
+ post_uri:
+ description:
+ - URL of a post provisioning script to be loaded and executed on virtual instance.
+ type: str
+ state:
+ description:
+ - Create, or cancel a virtual instance.
+ - Specify C(present) for create, C(absent) to cancel.
+ choices: [ absent, present ]
+ default: present
+ type: str
+ wait:
+ description:
+ - Flag used to wait for active status before returning.
+ type: bool
+ default: 'yes'
+ wait_time:
+ description:
+ - Time in seconds before wait returns.
+ default: 600
+ type: int
+requirements:
+ - python >= 2.6
+ - softlayer >= 4.1.1
+author:
+- Matt Colton (@mcltn)
+'''
+
+EXAMPLES = '''
+- name: Build instance
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: Build instance request
+ community.general.sl_vm:
+ hostname: instance-1
+ domain: anydomain.com
+ datacenter: dal09
+ tags: ansible-module-test
+ hourly: yes
+ private: no
+ dedicated: no
+ local_disk: yes
+ cpus: 1
+ memory: 1024
+ disks: [25]
+ os_code: UBUNTU_LATEST
+ wait: no
+
+- name: Build additional instances
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: Build instances request
+ community.general.sl_vm:
+ hostname: "{{ item.hostname }}"
+ domain: "{{ item.domain }}"
+ datacenter: "{{ item.datacenter }}"
+ tags: "{{ item.tags }}"
+ hourly: "{{ item.hourly }}"
+ private: "{{ item.private }}"
+ dedicated: "{{ item.dedicated }}"
+ local_disk: "{{ item.local_disk }}"
+ cpus: "{{ item.cpus }}"
+ memory: "{{ item.memory }}"
+ disks: "{{ item.disks }}"
+ os_code: "{{ item.os_code }}"
+ ssh_keys: "{{ item.ssh_keys }}"
+ wait: "{{ item.wait }}"
+ with_items:
+ - hostname: instance-2
+ domain: anydomain.com
+ datacenter: dal09
+ tags:
+ - ansible-module-test
+ - ansible-module-test-slaves
+ hourly: yes
+ private: no
+ dedicated: no
+ local_disk: yes
+ cpus: 1
+ memory: 1024
+ disks:
+ - 25
+ - 100
+ os_code: UBUNTU_LATEST
+ ssh_keys: []
+ wait: True
+ - hostname: instance-3
+ domain: anydomain.com
+ datacenter: dal09
+ tags:
+ - ansible-module-test
+ - ansible-module-test-slaves
+ hourly: yes
+ private: no
+ dedicated: no
+ local_disk: yes
+ cpus: 1
+ memory: 1024
+ disks:
+ - 25
+ - 100
+ os_code: UBUNTU_LATEST
+ ssh_keys: []
+ wait: yes
+
+- name: Cancel instances
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: Cancel by tag
+ community.general.sl_vm:
+ state: absent
+ tags: ansible-module-test
+'''
+
+# TODO: Disabled RETURN as it is breaking the build for docs. Needs to be fixed.
+RETURN = '''# '''
+
+import json
+import time
+
+try:
+ import SoftLayer
+ from SoftLayer import VSManager
+
+ HAS_SL = True
+ vsManager = VSManager(SoftLayer.create_client_from_env())
+except ImportError:
+ HAS_SL = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import string_types
+
+
+# TODO: get this info from API
+STATES = ['present', 'absent']
+DATACENTERS = ['ams01', 'ams03', 'che01', 'dal01', 'dal05', 'dal06', 'dal09', 'dal10', 'dal12', 'dal13', 'fra02',
+ 'fra04', 'fra05', 'hkg02', 'hou02', 'lon02', 'lon04', 'lon06', 'mel01', 'mex01', 'mil01', 'mon01',
+ 'osl01', 'par01', 'sao01', 'sea01', 'seo01', 'sjc01', 'sjc03', 'sjc04', 'sng01', 'syd01', 'syd04',
+ 'tok02', 'tor01', 'wdc01', 'wdc04', 'wdc06', 'wdc07']
+CPU_SIZES = [1, 2, 4, 8, 16, 32, 56]
+MEMORY_SIZES = [1024, 2048, 4096, 6144, 8192, 12288, 16384, 32768, 49152, 65536, 131072, 247808]
+INITIALDISK_SIZES = [25, 100]
+LOCALDISK_SIZES = [25, 100, 150, 200, 300]
+SANDISK_SIZES = [10, 20, 25, 30, 40, 50, 75, 100, 125, 150, 175, 200, 250, 300, 350, 400, 500, 750, 1000, 1500, 2000]
+NIC_SPEEDS = [10, 100, 1000]
+
+
+def create_virtual_instance(module):
+
+ instances = vsManager.list_instances(
+ hostname=module.params.get('hostname'),
+ domain=module.params.get('domain'),
+ datacenter=module.params.get('datacenter')
+ )
+
+ if instances:
+ return False, None
+
+ # Check if OS or Image Template is provided (Can't be both, defaults to OS)
+ if (module.params.get('os_code') is not None and module.params.get('os_code') != ''):
+ module.params['image_id'] = ''
+ elif (module.params.get('image_id') is not None and module.params.get('image_id') != ''):
+ module.params['os_code'] = ''
+ module.params['disks'] = [] # Blank out disks since it will use the template
+ else:
+ return False, None
+
+ tags = module.params.get('tags')
+ if isinstance(tags, list):
+ tags = ','.join(map(str, module.params.get('tags')))
+
+ instance = vsManager.create_instance(
+ hostname=module.params.get('hostname'),
+ domain=module.params.get('domain'),
+ cpus=module.params.get('cpus'),
+ memory=module.params.get('memory'),
+ flavor=module.params.get('flavor'),
+ hourly=module.params.get('hourly'),
+ datacenter=module.params.get('datacenter'),
+ os_code=module.params.get('os_code'),
+ image_id=module.params.get('image_id'),
+ local_disk=module.params.get('local_disk'),
+ disks=module.params.get('disks'),
+ ssh_keys=module.params.get('ssh_keys'),
+ nic_speed=module.params.get('nic_speed'),
+ private=module.params.get('private'),
+ public_vlan=module.params.get('public_vlan'),
+ private_vlan=module.params.get('private_vlan'),
+ dedicated=module.params.get('dedicated'),
+ post_uri=module.params.get('post_uri'),
+ tags=tags,
+ )
+
+ if instance is not None and instance['id'] > 0:
+ return True, instance
+ else:
+ return False, None
+
+
+def wait_for_instance(module, id):
+ instance = None
+ completed = False
+ wait_timeout = time.time() + module.params.get('wait_time')
+ while not completed and wait_timeout > time.time():
+ try:
+ completed = vsManager.wait_for_ready(id, 10, 2)
+ if completed:
+ instance = vsManager.get_instance(id)
+ except Exception:
+ completed = False
+
+ return completed, instance
+
+
+def cancel_instance(module):
+ canceled = True
+ if module.params.get('instance_id') is None and (module.params.get('tags') or module.params.get('hostname') or module.params.get('domain')):
+ tags = module.params.get('tags')
+ if isinstance(tags, string_types):
+ tags = [module.params.get('tags')]
+ instances = vsManager.list_instances(tags=tags, hostname=module.params.get('hostname'), domain=module.params.get('domain'))
+ for instance in instances:
+ try:
+ vsManager.cancel_instance(instance['id'])
+ except Exception:
+ canceled = False
+ elif module.params.get('instance_id') and module.params.get('instance_id') != 0:
+ try:
+ vsManager.cancel_instance(instance['id'])
+ except Exception:
+ canceled = False
+ else:
+ return False, None
+
+ return canceled, None
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ instance_id=dict(type='str'),
+ hostname=dict(type='str'),
+ domain=dict(type='str'),
+ datacenter=dict(type='str', choices=DATACENTERS),
+ tags=dict(type='str'),
+ hourly=dict(type='bool', default=True),
+ private=dict(type='bool', default=False),
+ dedicated=dict(type='bool', default=False),
+ local_disk=dict(type='bool', default=True),
+ cpus=dict(type='int', choices=CPU_SIZES),
+ memory=dict(type='int', choices=MEMORY_SIZES),
+ flavor=dict(type='str'),
+ disks=dict(type='list', default=[25]),
+ os_code=dict(type='str'),
+ image_id=dict(type='str'),
+ nic_speed=dict(type='int', choices=NIC_SPEEDS),
+ public_vlan=dict(type='str'),
+ private_vlan=dict(type='str'),
+ ssh_keys=dict(type='list', default=[]),
+ post_uri=dict(type='str'),
+ state=dict(type='str', default='present', choices=STATES),
+ wait=dict(type='bool', default=True),
+ wait_time=dict(type='int', default=600),
+ )
+ )
+
+ if not HAS_SL:
+ module.fail_json(msg='softlayer python library required for this module')
+
+ if module.params.get('state') == 'absent':
+ (changed, instance) = cancel_instance(module)
+
+ elif module.params.get('state') == 'present':
+ (changed, instance) = create_virtual_instance(module)
+ if module.params.get('wait') is True and instance:
+ (changed, instance) = wait_for_instance(module, instance['id'])
+
+ module.exit_json(changed=changed, instance=json.loads(json.dumps(instance, default=lambda o: o.__dict__)))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/slack.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/slack.py
new file mode 100644
index 00000000..946fc9aa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/slack.py
@@ -0,0 +1,487 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2020, Lee Goolsbee <lgoolsbee@atlassian.com>
+# (c) 2020, Michal Middleton <mm.404@icloud.com>
+# (c) 2017, Steve Pletcher <steve@steve-pletcher.com>
+# (c) 2016, René Moser <mail@renemoser.net>
+# (c) 2015, Stefan Berggren <nsg@nsg.cc>
+# (c) 2014, Ramon de la Fuente <ramon@delafuente.nl>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+module: slack
+short_description: Send Slack notifications
+description:
+ - The C(slack) module sends notifications to U(http://slack.com) via the Incoming WebHook integration
+author: "Ramon de la Fuente (@ramondelafuente)"
+options:
+ domain:
+ type: str
+ description:
+ - Slack (sub)domain for your environment without protocol. (i.e.
+ C(example.slack.com)) In 1.8 and beyond, this is deprecated and may
+ be ignored. See token documentation for information.
+ token:
+ type: str
+ description:
+ - Slack integration token. This authenticates you to the slack service.
+ Make sure to use the correct type of token, depending on what method you use.
+ - "Webhook token:
+ Prior to 1.8, a token looked like C(3Ffe373sfhRE6y42Fg3rvf4GlK). In
+ 1.8 and above, ansible adapts to the new slack API where tokens look
+ like C(G922VJP24/D921DW937/3Ffe373sfhRE6y42Fg3rvf4GlK). If tokens
+ are in the new format then slack will ignore any value of domain. If
+ the token is in the old format the domain is required. Ansible has no
+ control of when slack will get rid of the old API. When slack does
+ that the old format will stop working. ** Please keep in mind the tokens
+ are not the API tokens but are the webhook tokens. In slack these are
+ found in the webhook URL which are obtained under the apps and integrations.
+ The incoming webhooks can be added in that area. In some cases this may
+ be locked by your Slack admin and you must request access. It is there
+ that the incoming webhooks can be added. The key is on the end of the
+ URL given to you in that section."
+ - "WebAPI token:
+ Slack WebAPI requires a personal, bot or work application token. These tokens start with C(xoxp-), C(xoxb-)
+ or C(xoxa-), eg. C(xoxb-1234-56789abcdefghijklmnop). WebAPI token is required if you intend to receive thread_id.
+ See Slack's documentation (U(https://api.slack.com/docs/token-types)) for more information."
+ required: true
+ msg:
+ type: str
+ description:
+ - Message to send. Note that the module does not handle escaping characters.
+ Plain-text angle brackets and ampersands should be converted to HTML entities (e.g. & to &amp;) before sending.
+ See Slack's documentation (U(https://api.slack.com/docs/message-formatting)) for more.
+ channel:
+ type: str
+ description:
+ - Channel to send the message to. If absent, the message goes to the channel selected for the I(token).
+ thread_id:
+ description:
+ - Optional. Timestamp of parent message to thread this message. https://api.slack.com/docs/message-threading
+ type: str
+ message_id:
+ description:
+ - Optional. Message ID to edit, instead of posting a new message.
+ Corresponds to C(ts) in the Slack API (U(https://api.slack.com/messaging/modifying)).
+ type: str
+ version_added: 1.2.0
+ username:
+ type: str
+ description:
+ - This is the sender of the message.
+ default: "Ansible"
+ icon_url:
+ type: str
+ description:
+ - Url for the message sender's icon (default C(https://www.ansible.com/favicon.ico))
+ default: https://www.ansible.com/favicon.ico
+ icon_emoji:
+ type: str
+ description:
+ - Emoji for the message sender. See Slack documentation for options.
+ (if I(icon_emoji) is set, I(icon_url) will not be used)
+ link_names:
+ type: int
+ description:
+ - Automatically create links for channels and usernames in I(msg).
+ default: 1
+ choices:
+ - 1
+ - 0
+ parse:
+ type: str
+ description:
+ - Setting for the message parser at Slack
+ choices:
+ - 'full'
+ - 'none'
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: 'yes'
+ color:
+ type: str
+ description:
+ - Allow text to use default colors - use the default of 'normal' to not send a custom color bar at the start of the message.
+ - Allowed values for color can be one of 'normal', 'good', 'warning', 'danger', any valid 3 digit or 6 digit hex color value.
+ - Specifying value in hex is supported since Ansible 2.8.
+ default: 'normal'
+ attachments:
+ type: list
+ description:
+ - Define a list of attachments. This list mirrors the Slack JSON API.
+ - For more information, see U(https://api.slack.com/docs/attachments).
+ blocks:
+ description:
+ - Define a list of blocks. This list mirrors the Slack JSON API.
+ - For more information, see U(https://api.slack.com/block-kit).
+ type: list
+ elements: dict
+ version_added: 1.0.0
+"""
+
+EXAMPLES = """
+- name: Send notification message via Slack
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ msg: '{{ inventory_hostname }} completed'
+ delegate_to: localhost
+
+- name: Send notification message via Slack all options
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ msg: '{{ inventory_hostname }} completed'
+ channel: '#ansible'
+ thread_id: '1539917263.000100'
+ username: 'Ansible on {{ inventory_hostname }}'
+ icon_url: http://www.example.com/some-image-file.png
+ link_names: 0
+ parse: 'none'
+ delegate_to: localhost
+
+- name: Insert a color bar in front of the message for visibility purposes and use the default webhook icon and name configured in Slack
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ msg: '{{ inventory_hostname }} is alive!'
+ color: good
+ username: ''
+ icon_url: ''
+
+- name: Insert a color bar in front of the message with valid hex color value
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ msg: 'This message uses color in hex value'
+ color: '#00aacc'
+ username: ''
+ icon_url: ''
+
+- name: Use the attachments API
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ attachments:
+ - text: Display my system load on host A and B
+ color: '#ff00dd'
+ title: System load
+ fields:
+ - title: System A
+ value: "load average: 0,74, 0,66, 0,63"
+ short: True
+ - title: System B
+ value: 'load average: 5,16, 4,64, 2,43'
+ short: True
+
+- name: Use the blocks API
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ blocks:
+ - type: section
+ text:
+ type: mrkdwn
+ text: |-
+ *System load*
+ Display my system load on host A and B
+ - type: context
+ elements:
+ - type: mrkdwn
+ text: |-
+ *System A*
+ load average: 0,74, 0,66, 0,63
+ - type: mrkdwn
+ text: |-
+ *System B*
+ load average: 5,16, 4,64, 2,43
+
+- name: Send a message with a link using Slack markup
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ msg: We sent this message using <https://www.ansible.com|Ansible>!
+
+- name: Send a message with angle brackets and ampersands
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ msg: This message has &lt;brackets&gt; &amp; ampersands in plain text.
+
+- name: Initial Threaded Slack message
+ community.general.slack:
+ channel: '#ansible'
+ token: xoxb-1234-56789abcdefghijklmnop
+ msg: 'Starting a thread with my initial post.'
+ register: slack_response
+- name: Add more info to thread
+ community.general.slack:
+ channel: '#ansible'
+ token: xoxb-1234-56789abcdefghijklmnop
+ thread_id: "{{ slack_response['ts'] }}"
+ color: good
+ msg: 'And this is my threaded response!'
+
+- name: Send a message to be edited later on
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ channel: '#ansible'
+ msg: Deploying something...
+ register: slack_response
+- name: Edit message
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ channel: "{{ slack_response.channel }}"
+ msg: Deployment complete!
+ message_id: "{{ slack_response.ts }}"
+"""
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url
+
+OLD_SLACK_INCOMING_WEBHOOK = 'https://%s/services/hooks/incoming-webhook?token=%s'
+SLACK_INCOMING_WEBHOOK = 'https://hooks.slack.com/services/%s'
+SLACK_POSTMESSAGE_WEBAPI = 'https://slack.com/api/chat.postMessage'
+SLACK_UPDATEMESSAGE_WEBAPI = 'https://slack.com/api/chat.update'
+SLACK_CONVERSATIONS_HISTORY_WEBAPI = 'https://slack.com/api/conversations.history'
+
+# Escaping quotes and apostrophes to avoid ending string prematurely in ansible call.
+# We do not escape other characters used as Slack metacharacters (e.g. &, <, >).
+escape_table = {
+ '"': "\"",
+ "'": "\'",
+}
+
+
+def is_valid_hex_color(color_choice):
+ if re.match(r'^#([A-Fa-f0-9]{6}|[A-Fa-f0-9]{3})$', color_choice):
+ return True
+ return False
+
+
+def escape_quotes(text):
+ '''Backslash any quotes within text.'''
+ return "".join(escape_table.get(c, c) for c in text)
+
+
+def recursive_escape_quotes(obj, keys):
+ '''Recursively escape quotes inside supplied keys inside block kit objects'''
+ if isinstance(obj, dict):
+ escaped = {}
+ for k, v in obj.items():
+ if isinstance(v, str) and k in keys:
+ escaped[k] = escape_quotes(v)
+ else:
+ escaped[k] = recursive_escape_quotes(v, keys)
+ elif isinstance(obj, list):
+ escaped = [recursive_escape_quotes(v, keys) for v in obj]
+ else:
+ escaped = obj
+ return escaped
+
+
+def build_payload_for_slack(module, text, channel, thread_id, username, icon_url, icon_emoji, link_names,
+ parse, color, attachments, blocks, message_id):
+ payload = {}
+ if color == "normal" and text is not None:
+ payload = dict(text=escape_quotes(text))
+ elif text is not None:
+ # With a custom color we have to set the message as attachment, and explicitly turn markdown parsing on for it.
+ payload = dict(attachments=[dict(text=escape_quotes(text), color=color, mrkdwn_in=["text"])])
+ if channel is not None:
+ if channel.startswith(('#', '@', 'C0')):
+ payload['channel'] = channel
+ else:
+ payload['channel'] = '#' + channel
+ if thread_id is not None:
+ payload['thread_ts'] = thread_id
+ if username is not None:
+ payload['username'] = username
+ if icon_emoji is not None:
+ payload['icon_emoji'] = icon_emoji
+ else:
+ payload['icon_url'] = icon_url
+ if link_names is not None:
+ payload['link_names'] = link_names
+ if parse is not None:
+ payload['parse'] = parse
+ if message_id is not None:
+ payload['ts'] = message_id
+
+ if attachments is not None:
+ if 'attachments' not in payload:
+ payload['attachments'] = []
+
+ if attachments is not None:
+ attachment_keys_to_escape = [
+ 'title',
+ 'text',
+ 'author_name',
+ 'pretext',
+ 'fallback',
+ ]
+ for attachment in attachments:
+ for key in attachment_keys_to_escape:
+ if key in attachment:
+ attachment[key] = escape_quotes(attachment[key])
+
+ if 'fallback' not in attachment:
+ attachment['fallback'] = attachment['text']
+
+ payload['attachments'].append(attachment)
+
+ if blocks is not None:
+ block_keys_to_escape = [
+ 'text',
+ 'alt_text'
+ ]
+ payload['blocks'] = recursive_escape_quotes(blocks, block_keys_to_escape)
+
+ return payload
+
+
+def get_slack_message(module, domain, token, channel, ts):
+ headers = {
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/json',
+ 'Authorization': 'Bearer ' + token
+ }
+ qs = urlencode({
+ 'channel': channel,
+ 'ts': ts,
+ 'limit': 1,
+ 'inclusive': 'true',
+ })
+ url = SLACK_CONVERSATIONS_HISTORY_WEBAPI + '?' + qs
+ response, info = fetch_url(module=module, url=url, headers=headers, method='GET')
+ if info['status'] != 200:
+ module.fail_json(msg="failed to get slack message")
+ data = module.from_json(response.read())
+ if len(data['messages']) < 1:
+ module.fail_json(msg="no messages matching ts: %s" % ts)
+ if len(data['messages']) > 1:
+ module.fail_json(msg="more than 1 message matching ts: %s" % ts)
+ return data['messages'][0]
+
+
+def do_notify_slack(module, domain, token, payload):
+ use_webapi = False
+ if token.count('/') >= 2:
+ # New style webhook token
+ slack_uri = SLACK_INCOMING_WEBHOOK % (token)
+ elif re.match(r'^xox[abp]-\S+$', token):
+ slack_uri = SLACK_UPDATEMESSAGE_WEBAPI if 'ts' in payload else SLACK_POSTMESSAGE_WEBAPI
+ use_webapi = True
+ else:
+ if not domain:
+ module.fail_json(msg="Slack has updated its webhook API. You need to specify a token of the form "
+ "XXXX/YYYY/ZZZZ in your playbook")
+ slack_uri = OLD_SLACK_INCOMING_WEBHOOK % (domain, token)
+
+ headers = {
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/json',
+ }
+ if use_webapi:
+ headers['Authorization'] = 'Bearer ' + token
+
+ data = module.jsonify(payload)
+ response, info = fetch_url(module=module, url=slack_uri, headers=headers, method='POST', data=data)
+
+ if info['status'] != 200:
+ if use_webapi:
+ obscured_incoming_webhook = slack_uri
+ else:
+ obscured_incoming_webhook = SLACK_INCOMING_WEBHOOK % ('[obscured]')
+ module.fail_json(msg=" failed to send %s to %s: %s" % (data, obscured_incoming_webhook, info['msg']))
+
+ # each API requires different handling
+ if use_webapi:
+ return module.from_json(response.read())
+ else:
+ return {'webhook': 'ok'}
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ domain=dict(type='str', required=False, default=None),
+ token=dict(type='str', required=True, no_log=True),
+ msg=dict(type='str', required=False, default=None),
+ channel=dict(type='str', default=None),
+ thread_id=dict(type='str', default=None),
+ username=dict(type='str', default='Ansible'),
+ icon_url=dict(type='str', default='https://www.ansible.com/favicon.ico'),
+ icon_emoji=dict(type='str', default=None),
+ link_names=dict(type='int', default=1, choices=[0, 1]),
+ parse=dict(type='str', default=None, choices=['none', 'full']),
+ validate_certs=dict(default=True, type='bool'),
+ color=dict(type='str', default='normal'),
+ attachments=dict(type='list', required=False, default=None),
+ blocks=dict(type='list', elements='dict'),
+ message_id=dict(type='str', default=None),
+ ),
+ supports_check_mode=True,
+ )
+
+ domain = module.params['domain']
+ token = module.params['token']
+ text = module.params['msg']
+ channel = module.params['channel']
+ thread_id = module.params['thread_id']
+ username = module.params['username']
+ icon_url = module.params['icon_url']
+ icon_emoji = module.params['icon_emoji']
+ link_names = module.params['link_names']
+ parse = module.params['parse']
+ color = module.params['color']
+ attachments = module.params['attachments']
+ blocks = module.params['blocks']
+ message_id = module.params['message_id']
+
+ color_choices = ['normal', 'good', 'warning', 'danger']
+ if color not in color_choices and not is_valid_hex_color(color):
+ module.fail_json(msg="Color value specified should be either one of %r "
+ "or any valid hex value with length 3 or 6." % color_choices)
+
+ changed = True
+
+ # if updating an existing message, we can check if there's anything to update
+ if message_id is not None:
+ changed = False
+ msg = get_slack_message(module, domain, token, channel, message_id)
+ for key in ('icon_url', 'icon_emoji', 'link_names', 'color', 'attachments', 'blocks'):
+ if msg.get(key) != module.params.get(key):
+ changed = True
+ break
+ # if check mode is active, we shouldn't do anything regardless.
+ # if changed=False, we don't need to do anything, so don't do it.
+ if module.check_mode or not changed:
+ module.exit_json(changed=changed, ts=msg['ts'], channel=msg['channel'])
+ elif module.check_mode:
+ module.exit_json(changed=changed)
+
+ payload = build_payload_for_slack(module, text, channel, thread_id, username, icon_url, icon_emoji, link_names,
+ parse, color, attachments, blocks, message_id)
+ slack_response = do_notify_slack(module, domain, token, payload)
+
+ if 'ok' in slack_response:
+ # Evaluate WebAPI response
+ if slack_response['ok']:
+ # return payload as a string for backwards compatibility
+ payload_json = module.jsonify(payload)
+ module.exit_json(changed=changed, ts=slack_response['ts'], channel=slack_response['channel'],
+ api=slack_response, payload=payload_json)
+ else:
+ module.fail_json(msg="Slack API error", error=slack_response['error'])
+ else:
+ # Exit with plain OK from WebHook, since we don't have more information
+ # If we get 200 from webhook, the only answer is OK
+ module.exit_json(msg="OK")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/slackpkg.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/slackpkg.py
new file mode 100644
index 00000000..424f5b1b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/slackpkg.py
@@ -0,0 +1,205 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Kim Nørgaard
+# Written by Kim Nørgaard <jasen@jasen.dk>
+# Based on pkgng module written by bleader <bleader@ratonland.org>
+# that was based on pkgin module written by Shaun Zinck <shaun.zinck at gmail.com>
+# that was based on pacman module written by Afterburn <https://github.com/afterburn>
+# that was based on apt module written by Matthew Williams <matthew@flowroute.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: slackpkg
+short_description: Package manager for Slackware >= 12.2
+description:
+ - Manage binary packages for Slackware using 'slackpkg' which
+ is available in versions after 12.2.
+options:
+ name:
+ description:
+ - name of package to install/remove
+ required: true
+ type: list
+ elements: str
+ aliases: [pkg]
+
+ state:
+ description:
+ - state of the package, you can use "installed" as an alias for C(present) and removed as one for C(absent).
+ choices: [ 'present', 'absent', 'latest', 'installed', 'removed' ]
+ required: false
+ default: present
+ type: str
+
+ update_cache:
+ description:
+ - update the package database first
+ required: false
+ default: false
+ type: bool
+ aliases: [update-cache]
+
+author: Kim Nørgaard (@KimNorgaard)
+requirements: [ "Slackware >= 12.2" ]
+'''
+
+EXAMPLES = '''
+- name: Install package foo
+ community.general.slackpkg:
+ name: foo
+ state: present
+
+- name: Remove packages foo and bar
+ community.general.slackpkg:
+ name: foo,bar
+ state: absent
+
+- name: Make sure that it is the most updated package
+ community.general.slackpkg:
+ name: foo
+ state: latest
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def query_package(module, slackpkg_path, name):
+
+ import platform
+ import os
+ import re
+
+ machine = platform.machine()
+ # Exception for kernel-headers package on x86_64
+ if name == 'kernel-headers' and machine == 'x86_64':
+ machine = 'x86'
+ pattern = re.compile('^%s-[^-]+-(%s|noarch|fw)-[^-]+$' % (re.escape(name), re.escape(machine)))
+ packages = [f for f in os.listdir('/var/log/packages') if pattern.match(f)]
+
+ if len(packages) > 0:
+ return True
+
+ return False
+
+
+def remove_packages(module, slackpkg_path, packages):
+
+ remove_c = 0
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ if not query_package(module, slackpkg_path, package):
+ continue
+
+ if not module.check_mode:
+ rc, out, err = module.run_command("%s -default_answer=y -batch=on \
+ remove %s" % (slackpkg_path,
+ package))
+
+ if not module.check_mode and query_package(module, slackpkg_path,
+ package):
+ module.fail_json(msg="failed to remove %s: %s" % (package, out))
+
+ remove_c += 1
+
+ if remove_c > 0:
+
+ module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, slackpkg_path, packages):
+
+ install_c = 0
+
+ for package in packages:
+ if query_package(module, slackpkg_path, package):
+ continue
+
+ if not module.check_mode:
+ rc, out, err = module.run_command("%s -default_answer=y -batch=on \
+ install %s" % (slackpkg_path,
+ package))
+
+ if not module.check_mode and not query_package(module, slackpkg_path,
+ package):
+ module.fail_json(msg="failed to install %s: %s" % (package, out),
+ stderr=err)
+
+ install_c += 1
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg="present %s package(s)"
+ % (install_c))
+
+ module.exit_json(changed=False, msg="package(s) already present")
+
+
+def upgrade_packages(module, slackpkg_path, packages):
+ install_c = 0
+
+ for package in packages:
+ if not module.check_mode:
+ rc, out, err = module.run_command("%s -default_answer=y -batch=on \
+ upgrade %s" % (slackpkg_path,
+ package))
+
+ if not module.check_mode and not query_package(module, slackpkg_path,
+ package):
+ module.fail_json(msg="failed to install %s: %s" % (package, out),
+ stderr=err)
+
+ install_c += 1
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg="present %s package(s)"
+ % (install_c))
+
+ module.exit_json(changed=False, msg="package(s) already present")
+
+
+def update_cache(module, slackpkg_path):
+ rc, out, err = module.run_command("%s -batch=on update" % (slackpkg_path))
+ if rc != 0:
+ module.fail_json(msg="Could not update package cache")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default="present", choices=['installed', 'removed', 'absent', 'present', 'latest']),
+ name=dict(aliases=["pkg"], required=True, type='list', elements='str'),
+ update_cache=dict(default=False, aliases=["update-cache"],
+ type='bool'),
+ ),
+ supports_check_mode=True)
+
+ slackpkg_path = module.get_bin_path('slackpkg', True)
+
+ p = module.params
+
+ pkgs = p['name']
+
+ if p["update_cache"]:
+ update_cache(module, slackpkg_path)
+
+ if p['state'] == 'latest':
+ upgrade_packages(module, slackpkg_path, pkgs)
+
+ elif p['state'] in ['present', 'installed']:
+ install_packages(module, slackpkg_path, pkgs)
+
+ elif p["state"] in ['removed', 'absent']:
+ remove_packages(module, slackpkg_path, pkgs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/smartos_image_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/smartos_image_facts.py
new file mode 100644
index 00000000..17761af8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/smartos_image_facts.py
@@ -0,0 +1,124 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Adam Števko <adam.stevko@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: smartos_image_info
+short_description: Get SmartOS image details.
+description:
+ - Retrieve information about all installed images on SmartOS.
+ - This module was called C(smartos_image_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.smartos_image_info) module no longer returns C(ansible_facts)!
+author: Adam Števko (@xen0l)
+options:
+ filters:
+ description:
+ - Criteria for selecting image. Can be any value from image
+ manifest and 'published_date', 'published', 'source', 'clones',
+ and 'size'. More information can be found at U(https://smartos.org/man/1m/imgadm)
+ under 'imgadm list'.
+'''
+
+EXAMPLES = '''
+- name: Return information about all installed images
+ community.general.smartos_image_info:
+ register: result
+
+- name: Return all private active Linux images
+ community.general.smartos_image_info:
+ filters: "os=linux state=active public=false"
+ register: result
+
+- name: Show, how many clones does every image have
+ community.general.smartos_image_info:
+ register: result
+
+- name: Print information
+ ansible.builtin.debug:
+ msg: "{{ result.smartos_images[item]['name'] }}-{{ result.smartos_images[item]['version'] }}
+ has {{ result.smartos_images[item]['clones'] }} VM(s)"
+ with_items: "{{ result.smartos_images.keys() | list }}"
+
+# When the module is called as smartos_image_facts, return values are published
+# in ansible_facts['smartos_images'] and can be used as follows.
+# Note that this is deprecated and will stop working in community.general 3.0.0.
+- name: Print information
+ ansible.builtin.debug:
+ msg: "{{ smartos_images[item]['name'] }}-{{ smartos_images[item]['version'] }}
+ has {{ smartos_images[item]['clones'] }} VM(s)"
+ with_items: "{{ smartos_images.keys() | list }}"
+'''
+
+RETURN = '''
+'''
+
+import json
+from ansible.module_utils.basic import AnsibleModule
+
+
+class ImageFacts(object):
+
+ def __init__(self, module):
+ self.module = module
+
+ self.filters = module.params['filters']
+
+ def return_all_installed_images(self):
+ cmd = [self.module.get_bin_path('imgadm')]
+
+ cmd.append('list')
+ cmd.append('-j')
+
+ if self.filters:
+ cmd.append(self.filters)
+
+ (rc, out, err) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.exit_json(
+ msg='Failed to get all installed images', stderr=err)
+
+ images = json.loads(out)
+
+ result = {}
+ for image in images:
+ result[image['manifest']['uuid']] = image['manifest']
+ # Merge additional attributes with the image manifest.
+ for attrib in ['clones', 'source', 'zpool']:
+ result[image['manifest']['uuid']][attrib] = image[attrib]
+
+ return result
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ filters=dict(default=None),
+ ),
+ supports_check_mode=False,
+ )
+ is_old_facts = module._name in ('smartos_image_facts', 'community.general.smartos_image_facts')
+ if is_old_facts:
+ module.deprecate("The 'smartos_image_facts' module has been renamed to 'smartos_image_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ image_facts = ImageFacts(module)
+
+ data = dict(smartos_images=image_facts.return_all_installed_images())
+
+ if is_old_facts:
+ module.exit_json(ansible_facts=data)
+ else:
+ module.exit_json(**data)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/smartos_image_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/smartos_image_info.py
new file mode 100644
index 00000000..17761af8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/smartos_image_info.py
@@ -0,0 +1,124 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Adam Števko <adam.stevko@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: smartos_image_info
+short_description: Get SmartOS image details.
+description:
+ - Retrieve information about all installed images on SmartOS.
+ - This module was called C(smartos_image_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.smartos_image_info) module no longer returns C(ansible_facts)!
+author: Adam Števko (@xen0l)
+options:
+ filters:
+ description:
+ - Criteria for selecting image. Can be any value from image
+ manifest and 'published_date', 'published', 'source', 'clones',
+ and 'size'. More information can be found at U(https://smartos.org/man/1m/imgadm)
+ under 'imgadm list'.
+'''
+
+EXAMPLES = '''
+- name: Return information about all installed images
+ community.general.smartos_image_info:
+ register: result
+
+- name: Return all private active Linux images
+ community.general.smartos_image_info:
+ filters: "os=linux state=active public=false"
+ register: result
+
+- name: Show, how many clones does every image have
+ community.general.smartos_image_info:
+ register: result
+
+- name: Print information
+ ansible.builtin.debug:
+ msg: "{{ result.smartos_images[item]['name'] }}-{{ result.smartos_images[item]['version'] }}
+ has {{ result.smartos_images[item]['clones'] }} VM(s)"
+ with_items: "{{ result.smartos_images.keys() | list }}"
+
+# When the module is called as smartos_image_facts, return values are published
+# in ansible_facts['smartos_images'] and can be used as follows.
+# Note that this is deprecated and will stop working in community.general 3.0.0.
+- name: Print information
+ ansible.builtin.debug:
+ msg: "{{ smartos_images[item]['name'] }}-{{ smartos_images[item]['version'] }}
+ has {{ smartos_images[item]['clones'] }} VM(s)"
+ with_items: "{{ smartos_images.keys() | list }}"
+'''
+
+RETURN = '''
+'''
+
+import json
+from ansible.module_utils.basic import AnsibleModule
+
+
+class ImageFacts(object):
+
+ def __init__(self, module):
+ self.module = module
+
+ self.filters = module.params['filters']
+
+ def return_all_installed_images(self):
+ cmd = [self.module.get_bin_path('imgadm')]
+
+ cmd.append('list')
+ cmd.append('-j')
+
+ if self.filters:
+ cmd.append(self.filters)
+
+ (rc, out, err) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.exit_json(
+ msg='Failed to get all installed images', stderr=err)
+
+ images = json.loads(out)
+
+ result = {}
+ for image in images:
+ result[image['manifest']['uuid']] = image['manifest']
+ # Merge additional attributes with the image manifest.
+ for attrib in ['clones', 'source', 'zpool']:
+ result[image['manifest']['uuid']][attrib] = image[attrib]
+
+ return result
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ filters=dict(default=None),
+ ),
+ supports_check_mode=False,
+ )
+ is_old_facts = module._name in ('smartos_image_facts', 'community.general.smartos_image_facts')
+ if is_old_facts:
+ module.deprecate("The 'smartos_image_facts' module has been renamed to 'smartos_image_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ image_facts = ImageFacts(module)
+
+ data = dict(smartos_images=image_facts.return_all_installed_images())
+
+ if is_old_facts:
+ module.exit_json(ansible_facts=data)
+ else:
+ module.exit_json(**data)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/snap.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/snap.py
new file mode 100644
index 00000000..9776b4e5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/snap.py
@@ -0,0 +1,256 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Stanislas Lange (angristan) <angristan@pm.me>
+# Copyright: (c) 2018, Victor Carceler <vcarceler@iespuigcastellar.xeill.net>
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: snap
+
+short_description: Manages snaps
+
+
+description:
+ - "Manages snaps packages."
+
+options:
+ name:
+ description:
+ - Name of the snap to install or remove. Can be a list of snaps.
+ required: true
+ type: list
+ elements: str
+ state:
+ description:
+ - Desired state of the package.
+ required: false
+ default: present
+ choices: [ absent, present ]
+ type: str
+ classic:
+ description:
+ - Confinement policy. The classic confinement allows a snap to have
+ the same level of access to the system as "classic" packages,
+ like those managed by APT. This option corresponds to the --classic argument.
+ This option can only be specified if there is a single snap in the task.
+ type: bool
+ required: false
+ default: no
+ channel:
+ description:
+ - Define which release of a snap is installed and tracked for updates.
+ This option can only be specified if there is a single snap in the task.
+ type: str
+ required: false
+ default: stable
+
+author:
+ - Victor Carceler (@vcarceler) <vcarceler@iespuigcastellar.xeill.net>
+ - Stanislas Lange (@angristan) <angristan@pm.me>
+'''
+
+EXAMPLES = '''
+# Install "foo" and "bar" snap
+- name: Install foo
+ community.general.snap:
+ name:
+ - foo
+ - bar
+
+# Remove "foo" snap
+- name: Remove foo
+ community.general.snap:
+ name: foo
+ state: absent
+
+# Install a snap with classic confinement
+- name: Install "foo" with option --classic
+ community.general.snap:
+ name: foo
+ classic: yes
+
+# Install a snap with from a specific channel
+- name: Install "foo" with option --channel=latest/edge
+ community.general.snap:
+ name: foo
+ channel: latest/edge
+'''
+
+RETURN = '''
+classic:
+ description: Whether or not the snaps were installed with the classic confinement
+ type: bool
+ returned: When snaps are installed
+channel:
+ description: The channel the snaps were installed from
+ type: str
+ returned: When snaps are installed
+cmd:
+ description: The command that was executed on the host
+ type: str
+ returned: When changed is true
+snaps_installed:
+ description: The list of actually installed snaps
+ type: list
+ returned: When any snaps have been installed
+snaps_removed:
+ description: The list of actually removed snaps
+ type: list
+ returned: When any snaps have been removed
+'''
+
+import operator
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def validate_input_snaps(module):
+ """Ensure that all exist."""
+ for snap_name in module.params['name']:
+ if not snap_exists(module, snap_name):
+ module.fail_json(msg="No snap matching '%s' available." % snap_name)
+
+
+def snap_exists(module, snap_name):
+ snap_path = module.get_bin_path("snap", True)
+ cmd_parts = [snap_path, 'info', snap_name]
+ cmd = ' '.join(cmd_parts)
+ rc, out, err = module.run_command(cmd, check_rc=False)
+
+ return rc == 0
+
+
+def is_snap_installed(module, snap_name):
+ snap_path = module.get_bin_path("snap", True)
+ cmd_parts = [snap_path, 'list', snap_name]
+ cmd = ' '.join(cmd_parts)
+ rc, out, err = module.run_command(cmd, check_rc=False)
+
+ return rc == 0
+
+
+def get_snap_for_action(module):
+ """Construct a list of snaps to use for current action."""
+ snaps = module.params['name']
+
+ is_present_state = module.params['state'] == 'present'
+ negation_predicate = operator.not_ if is_present_state else bool
+
+ def predicate(s):
+ return negation_predicate(is_snap_installed(module, s))
+
+ return [s for s in snaps if predicate(s)]
+
+
+def get_base_cmd_parts(module):
+ action_map = {
+ 'present': 'install',
+ 'absent': 'remove',
+ }
+
+ state = module.params['state']
+
+ classic = ['--classic'] if module.params['classic'] else []
+ channel = ['--channel', module.params['channel']] if module.params['channel'] and module.params['channel'] != 'stable' else []
+
+ snap_path = module.get_bin_path("snap", True)
+ snap_action = action_map[state]
+
+ cmd_parts = [snap_path, snap_action]
+ if snap_action == 'install':
+ cmd_parts += classic + channel
+
+ return cmd_parts
+
+
+def get_cmd_parts(module, snap_names):
+ """Return list of cmds to run in exec format."""
+ is_install_mode = module.params['state'] == 'present'
+ has_multiple_snaps = len(snap_names) > 1
+
+ cmd_parts = get_base_cmd_parts(module)
+ has_one_pkg_params = '--classic' in cmd_parts or '--channel' in cmd_parts
+
+ if not (is_install_mode and has_one_pkg_params and has_multiple_snaps):
+ return [cmd_parts + snap_names]
+
+ return [cmd_parts + [s] for s in snap_names]
+
+
+def run_cmd_for(module, snap_names):
+ cmds_parts = get_cmd_parts(module, snap_names)
+ cmd = '; '.join(' '.join(c) for c in cmds_parts)
+ cmd = 'sh -c "{0}"'.format(cmd)
+
+ # Actually execute the snap command
+ return (cmd, ) + module.run_command(cmd, check_rc=False)
+
+
+def execute_action(module):
+ is_install_mode = module.params['state'] == 'present'
+ exit_kwargs = {
+ 'classic': module.params['classic'],
+ 'channel': module.params['channel'],
+ } if is_install_mode else {}
+
+ actionable_snaps = get_snap_for_action(module)
+ if not actionable_snaps:
+ module.exit_json(changed=False, **exit_kwargs)
+
+ changed_def_args = {
+ 'changed': True,
+ 'snaps_{result}'.
+ format(result='installed' if is_install_mode
+ else 'removed'): actionable_snaps,
+ }
+
+ if module.check_mode:
+ module.exit_json(**dict(changed_def_args, **exit_kwargs))
+
+ cmd, rc, out, err = run_cmd_for(module, actionable_snaps)
+ cmd_out_args = {
+ 'cmd': cmd,
+ 'rc': rc,
+ 'stdout': out,
+ 'stderr': err,
+ }
+
+ if rc == 0:
+ module.exit_json(**dict(changed_def_args, **dict(cmd_out_args, **exit_kwargs)))
+ else:
+ msg = "Ooops! Snap installation failed while executing '{cmd}', please examine logs and error output for more details.".format(cmd=cmd)
+ if is_install_mode:
+ m = re.match(r'^error: This revision of snap "(?P<package_name>\w+)" was published using classic confinement', err)
+ if m is not None:
+ err_pkg = m.group('package_name')
+ msg = "Couldn't install {name} because it requires classic confinement".format(name=err_pkg)
+ module.fail_json(msg=msg, **dict(cmd_out_args, **exit_kwargs))
+
+
+def main():
+ module_args = {
+ 'name': dict(type='list', elements='str', required=True),
+ 'state': dict(type='str', required=False, default='present', choices=['absent', 'present']),
+ 'classic': dict(type='bool', required=False, default=False),
+ 'channel': dict(type='str', required=False, default='stable'),
+ }
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True,
+ )
+
+ validate_input_snaps(module)
+
+ # Apply changes to the snaps
+ execute_action(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/snmp_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/snmp_facts.py
new file mode 100644
index 00000000..661db460
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/snmp_facts.py
@@ -0,0 +1,459 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# This file is part of Networklore's snmp library for Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: snmp_facts
+author:
+- Patrick Ogenstad (@ogenstad)
+short_description: Retrieve facts for a device using SNMP
+description:
+ - Retrieve facts for a device using SNMP, the facts will be
+ inserted to the ansible_facts key.
+requirements:
+ - pysnmp
+options:
+ host:
+ description:
+ - Set to target SNMP server (normally C({{ inventory_hostname }})).
+ type: str
+ required: true
+ version:
+ description:
+ - SNMP Version to use, C(v2), C(v2c) or C(v3).
+ type: str
+ required: true
+ choices: [ v2, v2c, v3 ]
+ community:
+ description:
+ - The SNMP community string, required if I(version) is C(v2) or C(v2c).
+ type: str
+ level:
+ description:
+ - Authentication level.
+ - Required if I(version) is C(v3).
+ type: str
+ choices: [ authNoPriv, authPriv ]
+ username:
+ description:
+ - Username for SNMPv3.
+ - Required if I(version) is C(v3).
+ type: str
+ integrity:
+ description:
+ - Hashing algorithm.
+ - Required if I(version) is C(v3).
+ type: str
+ choices: [ md5, sha ]
+ authkey:
+ description:
+ - Authentication key.
+ - Required I(version) is C(v3).
+ type: str
+ privacy:
+ description:
+ - Encryption algorithm.
+ - Required if I(level) is C(authPriv).
+ type: str
+ choices: [ aes, des ]
+ privkey:
+ description:
+ - Encryption key.
+ - Required if I(level) is C(authPriv).
+ type: str
+'''
+
+EXAMPLES = r'''
+- name: Gather facts with SNMP version 2
+ community.general.snmp_facts:
+ host: '{{ inventory_hostname }}'
+ version: v2c
+ community: public
+ delegate_to: local
+
+- name: Gather facts using SNMP version 3
+ community.general.snmp_facts:
+ host: '{{ inventory_hostname }}'
+ version: v3
+ level: authPriv
+ integrity: sha
+ privacy: aes
+ username: snmp-user
+ authkey: abc12345
+ privkey: def6789
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+ansible_sysdescr:
+ description: A textual description of the entity.
+ returned: success
+ type: str
+ sample: Linux ubuntu-user 4.4.0-93-generic #116-Ubuntu SMP Fri Aug 11 21:17:51 UTC 2017 x86_64
+ansible_sysobjectid:
+ description: The vendor's authoritative identification of the network management subsystem contained in the entity.
+ returned: success
+ type: str
+ sample: 1.3.6.1.4.1.8072.3.2.10
+ansible_sysuptime:
+ description: The time (in hundredths of a second) since the network management portion of the system was last re-initialized.
+ returned: success
+ type: int
+ sample: 42388
+ansible_syscontact:
+ description: The textual identification of the contact person for this managed node, together with information on how to contact this person.
+ returned: success
+ type: str
+ sample: Me <me@example.org>
+ansible_sysname:
+ description: An administratively-assigned name for this managed node.
+ returned: success
+ type: str
+ sample: ubuntu-user
+ansible_syslocation:
+ description: The physical location of this node (e.g., `telephone closet, 3rd floor').
+ returned: success
+ type: str
+ sample: Sitting on the Dock of the Bay
+ansible_all_ipv4_addresses:
+ description: List of all IPv4 addresses.
+ returned: success
+ type: list
+ sample: ["127.0.0.1", "172.17.0.1"]
+ansible_interfaces:
+ description: Dictionary of each network interface and its metadata.
+ returned: success
+ type: dict
+ sample: {
+ "1": {
+ "adminstatus": "up",
+ "description": "",
+ "ifindex": "1",
+ "ipv4": [
+ {
+ "address": "127.0.0.1",
+ "netmask": "255.0.0.0"
+ }
+ ],
+ "mac": "",
+ "mtu": "65536",
+ "name": "lo",
+ "operstatus": "up",
+ "speed": "65536"
+ },
+ "2": {
+ "adminstatus": "up",
+ "description": "",
+ "ifindex": "2",
+ "ipv4": [
+ {
+ "address": "192.168.213.128",
+ "netmask": "255.255.255.0"
+ }
+ ],
+ "mac": "000a305a52a1",
+ "mtu": "1500",
+ "name": "Intel Corporation 82545EM Gigabit Ethernet Controller (Copper)",
+ "operstatus": "up",
+ "speed": "1500"
+ }
+ }
+'''
+
+import binascii
+import traceback
+from collections import defaultdict
+
+PYSNMP_IMP_ERR = None
+try:
+ from pysnmp.entity.rfc3413.oneliner import cmdgen
+ from pysnmp.proto.rfc1905 import EndOfMibView
+ HAS_PYSNMP = True
+except Exception:
+ PYSNMP_IMP_ERR = traceback.format_exc()
+ HAS_PYSNMP = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_text
+
+
+class DefineOid(object):
+
+ def __init__(self, dotprefix=False):
+ if dotprefix:
+ dp = "."
+ else:
+ dp = ""
+
+ # From SNMPv2-MIB
+ self.sysDescr = dp + "1.3.6.1.2.1.1.1.0"
+ self.sysObjectId = dp + "1.3.6.1.2.1.1.2.0"
+ self.sysUpTime = dp + "1.3.6.1.2.1.1.3.0"
+ self.sysContact = dp + "1.3.6.1.2.1.1.4.0"
+ self.sysName = dp + "1.3.6.1.2.1.1.5.0"
+ self.sysLocation = dp + "1.3.6.1.2.1.1.6.0"
+
+ # From IF-MIB
+ self.ifIndex = dp + "1.3.6.1.2.1.2.2.1.1"
+ self.ifDescr = dp + "1.3.6.1.2.1.2.2.1.2"
+ self.ifMtu = dp + "1.3.6.1.2.1.2.2.1.4"
+ self.ifSpeed = dp + "1.3.6.1.2.1.2.2.1.5"
+ self.ifPhysAddress = dp + "1.3.6.1.2.1.2.2.1.6"
+ self.ifAdminStatus = dp + "1.3.6.1.2.1.2.2.1.7"
+ self.ifOperStatus = dp + "1.3.6.1.2.1.2.2.1.8"
+ self.ifAlias = dp + "1.3.6.1.2.1.31.1.1.1.18"
+
+ # From IP-MIB
+ self.ipAdEntAddr = dp + "1.3.6.1.2.1.4.20.1.1"
+ self.ipAdEntIfIndex = dp + "1.3.6.1.2.1.4.20.1.2"
+ self.ipAdEntNetMask = dp + "1.3.6.1.2.1.4.20.1.3"
+
+
+def decode_hex(hexstring):
+
+ if len(hexstring) < 3:
+ return hexstring
+ if hexstring[:2] == "0x":
+ return to_text(binascii.unhexlify(hexstring[2:]))
+ return hexstring
+
+
+def decode_mac(hexstring):
+
+ if len(hexstring) != 14:
+ return hexstring
+ if hexstring[:2] == "0x":
+ return hexstring[2:]
+ return hexstring
+
+
+def lookup_adminstatus(int_adminstatus):
+ adminstatus_options = {
+ 1: 'up',
+ 2: 'down',
+ 3: 'testing'
+ }
+ if int_adminstatus in adminstatus_options:
+ return adminstatus_options[int_adminstatus]
+ return ""
+
+
+def lookup_operstatus(int_operstatus):
+ operstatus_options = {
+ 1: 'up',
+ 2: 'down',
+ 3: 'testing',
+ 4: 'unknown',
+ 5: 'dormant',
+ 6: 'notPresent',
+ 7: 'lowerLayerDown'
+ }
+ if int_operstatus in operstatus_options:
+ return operstatus_options[int_operstatus]
+ return ""
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(type='str', required=True),
+ version=dict(type='str', required=True, choices=['v2', 'v2c', 'v3']),
+ community=dict(type='str'),
+ username=dict(type='str'),
+ level=dict(type='str', choices=['authNoPriv', 'authPriv']),
+ integrity=dict(type='str', choices=['md5', 'sha']),
+ privacy=dict(type='str', choices=['aes', 'des']),
+ authkey=dict(type='str', no_log=True),
+ privkey=dict(type='str', no_log=True),
+ ),
+ required_together=(
+ ['username', 'level', 'integrity', 'authkey'],
+ ['privacy', 'privkey'],
+ ),
+ supports_check_mode=False,
+ )
+
+ m_args = module.params
+
+ if not HAS_PYSNMP:
+ module.fail_json(msg=missing_required_lib('pysnmp'), exception=PYSNMP_IMP_ERR)
+
+ cmdGen = cmdgen.CommandGenerator()
+
+ # Verify that we receive a community when using snmp v2
+ if m_args['version'] in ("v2", "v2c"):
+ if m_args['community'] is None:
+ module.fail_json(msg='Community not set when using snmp version 2')
+
+ if m_args['version'] == "v3":
+ if m_args['username'] is None:
+ module.fail_json(msg='Username not set when using snmp version 3')
+
+ if m_args['level'] == "authPriv" and m_args['privacy'] is None:
+ module.fail_json(msg='Privacy algorithm not set when using authPriv')
+
+ if m_args['integrity'] == "sha":
+ integrity_proto = cmdgen.usmHMACSHAAuthProtocol
+ elif m_args['integrity'] == "md5":
+ integrity_proto = cmdgen.usmHMACMD5AuthProtocol
+
+ if m_args['privacy'] == "aes":
+ privacy_proto = cmdgen.usmAesCfb128Protocol
+ elif m_args['privacy'] == "des":
+ privacy_proto = cmdgen.usmDESPrivProtocol
+
+ # Use SNMP Version 2
+ if m_args['version'] in ("v2", "v2c"):
+ snmp_auth = cmdgen.CommunityData(m_args['community'])
+
+ # Use SNMP Version 3 with authNoPriv
+ elif m_args['level'] == "authNoPriv":
+ snmp_auth = cmdgen.UsmUserData(m_args['username'], authKey=m_args['authkey'], authProtocol=integrity_proto)
+
+ # Use SNMP Version 3 with authPriv
+ else:
+ snmp_auth = cmdgen.UsmUserData(m_args['username'], authKey=m_args['authkey'], privKey=m_args['privkey'], authProtocol=integrity_proto,
+ privProtocol=privacy_proto)
+
+ # Use p to prefix OIDs with a dot for polling
+ p = DefineOid(dotprefix=True)
+ # Use v without a prefix to use with return values
+ v = DefineOid(dotprefix=False)
+
+ def Tree():
+ return defaultdict(Tree)
+
+ results = Tree()
+
+ errorIndication, errorStatus, errorIndex, varBinds = cmdGen.getCmd(
+ snmp_auth,
+ cmdgen.UdpTransportTarget((m_args['host'], 161)),
+ cmdgen.MibVariable(p.sysDescr,),
+ cmdgen.MibVariable(p.sysObjectId,),
+ cmdgen.MibVariable(p.sysUpTime,),
+ cmdgen.MibVariable(p.sysContact,),
+ cmdgen.MibVariable(p.sysName,),
+ cmdgen.MibVariable(p.sysLocation,),
+ lookupMib=False
+ )
+
+ if errorIndication:
+ module.fail_json(msg=str(errorIndication))
+
+ for oid, val in varBinds:
+ current_oid = oid.prettyPrint()
+ current_val = val.prettyPrint()
+ if current_oid == v.sysDescr:
+ results['ansible_sysdescr'] = decode_hex(current_val)
+ elif current_oid == v.sysObjectId:
+ results['ansible_sysobjectid'] = current_val
+ elif current_oid == v.sysUpTime:
+ results['ansible_sysuptime'] = current_val
+ elif current_oid == v.sysContact:
+ results['ansible_syscontact'] = current_val
+ elif current_oid == v.sysName:
+ results['ansible_sysname'] = current_val
+ elif current_oid == v.sysLocation:
+ results['ansible_syslocation'] = current_val
+
+ errorIndication, errorStatus, errorIndex, varTable = cmdGen.nextCmd(
+ snmp_auth,
+ cmdgen.UdpTransportTarget((m_args['host'], 161)),
+ cmdgen.MibVariable(p.ifIndex,),
+ cmdgen.MibVariable(p.ifDescr,),
+ cmdgen.MibVariable(p.ifMtu,),
+ cmdgen.MibVariable(p.ifSpeed,),
+ cmdgen.MibVariable(p.ifPhysAddress,),
+ cmdgen.MibVariable(p.ifAdminStatus,),
+ cmdgen.MibVariable(p.ifOperStatus,),
+ cmdgen.MibVariable(p.ipAdEntAddr,),
+ cmdgen.MibVariable(p.ipAdEntIfIndex,),
+ cmdgen.MibVariable(p.ipAdEntNetMask,),
+
+ cmdgen.MibVariable(p.ifAlias,),
+ lookupMib=False
+ )
+
+ if errorIndication:
+ module.fail_json(msg=str(errorIndication))
+
+ interface_indexes = []
+
+ all_ipv4_addresses = []
+ ipv4_networks = Tree()
+
+ for varBinds in varTable:
+ for oid, val in varBinds:
+ if isinstance(val, EndOfMibView):
+ continue
+ current_oid = oid.prettyPrint()
+ current_val = val.prettyPrint()
+ if v.ifIndex in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['ifindex'] = current_val
+ interface_indexes.append(ifIndex)
+ if v.ifDescr in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['name'] = current_val
+ if v.ifMtu in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['mtu'] = current_val
+ if v.ifSpeed in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['speed'] = current_val
+ if v.ifPhysAddress in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['mac'] = decode_mac(current_val)
+ if v.ifAdminStatus in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['adminstatus'] = lookup_adminstatus(int(current_val))
+ if v.ifOperStatus in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['operstatus'] = lookup_operstatus(int(current_val))
+ if v.ipAdEntAddr in current_oid:
+ curIPList = current_oid.rsplit('.', 4)[-4:]
+ curIP = ".".join(curIPList)
+ ipv4_networks[curIP]['address'] = current_val
+ all_ipv4_addresses.append(current_val)
+ if v.ipAdEntIfIndex in current_oid:
+ curIPList = current_oid.rsplit('.', 4)[-4:]
+ curIP = ".".join(curIPList)
+ ipv4_networks[curIP]['interface'] = current_val
+ if v.ipAdEntNetMask in current_oid:
+ curIPList = current_oid.rsplit('.', 4)[-4:]
+ curIP = ".".join(curIPList)
+ ipv4_networks[curIP]['netmask'] = current_val
+
+ if v.ifAlias in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['description'] = current_val
+
+ interface_to_ipv4 = {}
+ for ipv4_network in ipv4_networks:
+ current_interface = ipv4_networks[ipv4_network]['interface']
+ current_network = {
+ 'address': ipv4_networks[ipv4_network]['address'],
+ 'netmask': ipv4_networks[ipv4_network]['netmask']
+ }
+ if current_interface not in interface_to_ipv4:
+ interface_to_ipv4[current_interface] = []
+ interface_to_ipv4[current_interface].append(current_network)
+ else:
+ interface_to_ipv4[current_interface].append(current_network)
+
+ for interface in interface_to_ipv4:
+ results['ansible_interfaces'][int(interface)]['ipv4'] = interface_to_ipv4[interface]
+
+ results['ansible_all_ipv4_addresses'] = all_ipv4_addresses
+
+ module.exit_json(ansible_facts=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/solaris_zone.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/solaris_zone.py
new file mode 100644
index 00000000..8ecdeb8d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/solaris_zone.py
@@ -0,0 +1,485 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Paul Markham <pmarkham@netrefinery.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: solaris_zone
+short_description: Manage Solaris zones
+description:
+ - Create, start, stop and delete Solaris zones.
+ - This module does not currently allow changing of options for a zone that is already been created.
+author:
+- Paul Markham (@pmarkham)
+requirements:
+ - Solaris 10 or 11
+options:
+ state:
+ description:
+ - C(present), configure and install the zone.
+ - C(installed), synonym for C(present).
+ - C(running), if the zone already exists, boot it, otherwise, configure and install
+ the zone first, then boot it.
+ - C(started), synonym for C(running).
+ - C(stopped), shutdown a zone.
+ - C(absent), destroy the zone.
+ - C(configured), configure the ready so that it's to be attached.
+ - C(attached), attach a zone, but do not boot it.
+ - C(detached), shutdown and detach a zone
+ type: str
+ choices: [ absent, attached, configured, detached, installed, present, running, started, stopped ]
+ default: present
+ name:
+ description:
+ - Zone name.
+ - A zone name must be unique name.
+ - A zone name must begin with an alpha-numeric character.
+ - The name can contain alpha-numeric characters, underbars I(_), hyphens I(-), and periods I(.).
+ - The name cannot be longer than 64 characters.
+ type: str
+ required: true
+ path:
+ description:
+ - The path where the zone will be created. This is required when the zone is created, but not
+ used otherwise.
+ type: str
+ sparse:
+ description:
+ - Whether to create a sparse (C(true)) or whole root (C(false)) zone.
+ type: bool
+ default: no
+ root_password:
+ description:
+ - The password hash for the root account. If not specified, the zone's root account
+ will not have a password.
+ type: str
+ config:
+ description:
+ - 'The zonecfg configuration commands for this zone. See zonecfg(1M) for the valid options
+ and syntax. Typically this is a list of options separated by semi-colons or new lines, e.g.
+ "set auto-boot=true;add net;set physical=bge0;set address=10.1.1.1;end"'
+ type: str
+ default: ''
+ create_options:
+ description:
+ - 'Extra options to the zonecfg(1M) create command.'
+ type: str
+ default: ''
+ install_options:
+ description:
+ - 'Extra options to the zoneadm(1M) install command. To automate Solaris 11 zone creation,
+ use this to specify the profile XML file, e.g. install_options="-c sc_profile.xml"'
+ type: str
+ default: ''
+ attach_options:
+ description:
+ - 'Extra options to the zoneadm attach command. For example, this can be used to specify
+ whether a minimum or full update of packages is required and if any packages need to
+ be deleted. For valid values, see zoneadm(1M)'
+ type: str
+ default: ''
+ timeout:
+ description:
+ - Timeout, in seconds, for zone to boot.
+ type: int
+ default: 600
+'''
+
+EXAMPLES = '''
+- name: Create and install a zone, but don't boot it
+ community.general.solaris_zone:
+ name: zone1
+ state: present
+ path: /zones/zone1
+ sparse: True
+ root_password: Be9oX7OSwWoU.
+ config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end'
+
+- name: Create and install a zone and boot it
+ community.general.solaris_zone:
+ name: zone1
+ state: running
+ path: /zones/zone1
+ root_password: Be9oX7OSwWoU.
+ config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end'
+
+- name: Boot an already installed zone
+ community.general.solaris_zone:
+ name: zone1
+ state: running
+
+- name: Stop a zone
+ community.general.solaris_zone:
+ name: zone1
+ state: stopped
+
+- name: Destroy a zone
+ community.general.solaris_zone:
+ name: zone1
+ state: absent
+
+- name: Detach a zone
+ community.general.solaris_zone:
+ name: zone1
+ state: detached
+
+- name: Configure a zone, ready to be attached
+ community.general.solaris_zone:
+ name: zone1
+ state: configured
+ path: /zones/zone1
+ root_password: Be9oX7OSwWoU.
+ config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end'
+
+- name: Attach zone1
+ community.general.solaris_zone:
+ name: zone1
+ state: attached
+ attach_options: -u
+'''
+
+import os
+import platform
+import re
+import tempfile
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Zone(object):
+ def __init__(self, module):
+ self.changed = False
+ self.msg = []
+
+ self.module = module
+ self.path = self.module.params['path']
+ self.name = self.module.params['name']
+ self.sparse = self.module.params['sparse']
+ self.root_password = self.module.params['root_password']
+ self.timeout = self.module.params['timeout']
+ self.config = self.module.params['config']
+ self.create_options = self.module.params['create_options']
+ self.install_options = self.module.params['install_options']
+ self.attach_options = self.module.params['attach_options']
+
+ self.zoneadm_cmd = self.module.get_bin_path('zoneadm', True)
+ self.zonecfg_cmd = self.module.get_bin_path('zonecfg', True)
+ self.ssh_keygen_cmd = self.module.get_bin_path('ssh-keygen', True)
+
+ if self.module.check_mode:
+ self.msg.append('Running in check mode')
+
+ if platform.system() != 'SunOS':
+ self.module.fail_json(msg='This module requires Solaris')
+
+ (self.os_major, self.os_minor) = platform.release().split('.')
+ if int(self.os_minor) < 10:
+ self.module.fail_json(msg='This module requires Solaris 10 or later')
+
+ match = re.match('^[a-zA-Z0-9][-_.a-zA-Z0-9]{0,62}$', self.name)
+ if not match:
+ self.module.fail_json(msg="Provided zone name is not a valid zone name. "
+ "Please refer documentation for correct zone name specifications.")
+
+ def configure(self):
+ if not self.path:
+ self.module.fail_json(msg='Missing required argument: path')
+
+ if not self.module.check_mode:
+ t = tempfile.NamedTemporaryFile(delete=False, mode='wt')
+
+ if self.sparse:
+ t.write('create %s\n' % self.create_options)
+ self.msg.append('creating sparse-root zone')
+ else:
+ t.write('create -b %s\n' % self.create_options)
+ self.msg.append('creating whole-root zone')
+
+ t.write('set zonepath=%s\n' % self.path)
+ t.write('%s\n' % self.config)
+ t.close()
+
+ cmd = '%s -z %s -f %s' % (self.zonecfg_cmd, self.name, t.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to create zone. %s' % (out + err))
+ os.unlink(t.name)
+
+ self.changed = True
+ self.msg.append('zone configured')
+
+ def install(self):
+ if not self.module.check_mode:
+ cmd = '%s -z %s install %s' % (self.zoneadm_cmd, self.name, self.install_options)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to install zone. %s' % (out + err))
+ if int(self.os_minor) == 10:
+ self.configure_sysid()
+ self.configure_password()
+ self.configure_ssh_keys()
+ self.changed = True
+ self.msg.append('zone installed')
+
+ def uninstall(self):
+ if self.is_installed():
+ if not self.module.check_mode:
+ cmd = '%s -z %s uninstall -F' % (self.zoneadm_cmd, self.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to uninstall zone. %s' % (out + err))
+ self.changed = True
+ self.msg.append('zone uninstalled')
+
+ def configure_sysid(self):
+ if os.path.isfile('%s/root/etc/.UNCONFIGURED' % self.path):
+ os.unlink('%s/root/etc/.UNCONFIGURED' % self.path)
+
+ open('%s/root/noautoshutdown' % self.path, 'w').close()
+
+ node = open('%s/root/etc/nodename' % self.path, 'w')
+ node.write(self.name)
+ node.close()
+
+ id = open('%s/root/etc/.sysIDtool.state' % self.path, 'w')
+ id.write('1 # System previously configured?\n')
+ id.write('1 # Bootparams succeeded?\n')
+ id.write('1 # System is on a network?\n')
+ id.write('1 # Extended network information gathered?\n')
+ id.write('0 # Autobinder succeeded?\n')
+ id.write('1 # Network has subnets?\n')
+ id.write('1 # root password prompted for?\n')
+ id.write('1 # locale and term prompted for?\n')
+ id.write('1 # security policy in place\n')
+ id.write('1 # NFSv4 domain configured\n')
+ id.write('0 # Auto Registration Configured\n')
+ id.write('vt100')
+ id.close()
+
+ def configure_ssh_keys(self):
+ rsa_key_file = '%s/root/etc/ssh/ssh_host_rsa_key' % self.path
+ dsa_key_file = '%s/root/etc/ssh/ssh_host_dsa_key' % self.path
+
+ if not os.path.isfile(rsa_key_file):
+ cmd = '%s -f %s -t rsa -N ""' % (self.ssh_keygen_cmd, rsa_key_file)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to create rsa key. %s' % (out + err))
+
+ if not os.path.isfile(dsa_key_file):
+ cmd = '%s -f %s -t dsa -N ""' % (self.ssh_keygen_cmd, dsa_key_file)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to create dsa key. %s' % (out + err))
+
+ def configure_password(self):
+ shadow = '%s/root/etc/shadow' % self.path
+ if self.root_password:
+ f = open(shadow, 'r')
+ lines = f.readlines()
+ f.close()
+
+ for i in range(0, len(lines)):
+ fields = lines[i].split(':')
+ if fields[0] == 'root':
+ fields[1] = self.root_password
+ lines[i] = ':'.join(fields)
+
+ f = open(shadow, 'w')
+ for line in lines:
+ f.write(line)
+ f.close()
+
+ def boot(self):
+ if not self.module.check_mode:
+ cmd = '%s -z %s boot' % (self.zoneadm_cmd, self.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to boot zone. %s' % (out + err))
+
+ """
+ The boot command can return before the zone has fully booted. This is especially
+ true on the first boot when the zone initializes the SMF services. Unless the zone
+ has fully booted, subsequent tasks in the playbook may fail as services aren't running yet.
+ Wait until the zone's console login is running; once that's running, consider the zone booted.
+ """
+
+ elapsed = 0
+ while True:
+ if elapsed > self.timeout:
+ self.module.fail_json(msg='timed out waiting for zone to boot')
+ rc = os.system('ps -z %s -o args|grep "ttymon.*-d /dev/console" > /dev/null 2>/dev/null' % self.name)
+ if rc == 0:
+ break
+ time.sleep(10)
+ elapsed += 10
+ self.changed = True
+ self.msg.append('zone booted')
+
+ def destroy(self):
+ if self.is_running():
+ self.stop()
+ if self.is_installed():
+ self.uninstall()
+ if not self.module.check_mode:
+ cmd = '%s -z %s delete -F' % (self.zonecfg_cmd, self.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to delete zone. %s' % (out + err))
+ self.changed = True
+ self.msg.append('zone deleted')
+
+ def stop(self):
+ if not self.module.check_mode:
+ cmd = '%s -z %s halt' % (self.zoneadm_cmd, self.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to stop zone. %s' % (out + err))
+ self.changed = True
+ self.msg.append('zone stopped')
+
+ def detach(self):
+ if not self.module.check_mode:
+ cmd = '%s -z %s detach' % (self.zoneadm_cmd, self.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to detach zone. %s' % (out + err))
+ self.changed = True
+ self.msg.append('zone detached')
+
+ def attach(self):
+ if not self.module.check_mode:
+ cmd = '%s -z %s attach %s' % (self.zoneadm_cmd, self.name, self.attach_options)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to attach zone. %s' % (out + err))
+ self.changed = True
+ self.msg.append('zone attached')
+
+ def exists(self):
+ cmd = '%s -z %s list' % (self.zoneadm_cmd, self.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc == 0:
+ return True
+ else:
+ return False
+
+ def is_running(self):
+ return self.status() == 'running'
+
+ def is_installed(self):
+ return self.status() == 'installed'
+
+ def is_configured(self):
+ return self.status() == 'configured'
+
+ def status(self):
+ cmd = '%s -z %s list -p' % (self.zoneadm_cmd, self.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc == 0:
+ return out.split(':')[2]
+ else:
+ return 'undefined'
+
+ def state_present(self):
+ if self.exists():
+ self.msg.append('zone already exists')
+ else:
+ self.configure()
+ self.install()
+
+ def state_running(self):
+ self.state_present()
+ if self.is_running():
+ self.msg.append('zone already running')
+ else:
+ self.boot()
+
+ def state_stopped(self):
+ if self.exists():
+ self.stop()
+ else:
+ self.module.fail_json(msg='zone does not exist')
+
+ def state_absent(self):
+ if self.exists():
+ if self.is_running():
+ self.stop()
+ self.destroy()
+ else:
+ self.msg.append('zone does not exist')
+
+ def state_configured(self):
+ if self.exists():
+ self.msg.append('zone already exists')
+ else:
+ self.configure()
+
+ def state_detached(self):
+ if not self.exists():
+ self.module.fail_json(msg='zone does not exist')
+ if self.is_configured():
+ self.msg.append('zone already detached')
+ else:
+ self.stop()
+ self.detach()
+
+ def state_attached(self):
+ if not self.exists():
+ self.msg.append('zone does not exist')
+ if self.is_configured():
+ self.attach()
+ else:
+ self.msg.append('zone already attached')
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present',
+ choices=['absent', 'attached', 'configured', 'detached', 'installed', 'present', 'running', 'started', 'stopped']),
+ path=dict(type='str'),
+ sparse=dict(type='bool', default=False),
+ root_password=dict(type='str', no_log=True),
+ timeout=dict(type='int', default=600),
+ config=dict(type='str', default=''),
+ create_options=dict(type='str', default=''),
+ install_options=dict(type='str', default=''),
+ attach_options=dict(type='str', default=''),
+ ),
+ supports_check_mode=True,
+ )
+
+ zone = Zone(module)
+
+ state = module.params['state']
+
+ if state == 'running' or state == 'started':
+ zone.state_running()
+ elif state == 'present' or state == 'installed':
+ zone.state_present()
+ elif state == 'stopped':
+ zone.state_stopped()
+ elif state == 'absent':
+ zone.state_absent()
+ elif state == 'configured':
+ zone.state_configured()
+ elif state == 'detached':
+ zone.state_detached()
+ elif state == 'attached':
+ zone.state_attached()
+ else:
+ module.fail_json(msg='Invalid state: %s' % state)
+
+ module.exit_json(changed=zone.changed, msg=', '.join(zone.msg))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/sorcery.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sorcery.py
new file mode 100644
index 00000000..347413fc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sorcery.py
@@ -0,0 +1,644 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015-2016, Vlad Glagolev <scm@vaygr.net>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: sorcery
+short_description: Package manager for Source Mage GNU/Linux
+description:
+ - Manages "spells" on Source Mage GNU/Linux using I(sorcery) toolchain
+author: "Vlad Glagolev (@vaygr)"
+notes:
+ - When all three components are selected, the update goes by the sequence --
+ Sorcery -> Grimoire(s) -> Spell(s); you cannot override it.
+ - grimoire handling (i.e. add/remove, including SCM/rsync versions) is not
+ yet supported.
+requirements:
+ - bash
+options:
+ name:
+ description:
+ - Name of the spell
+ - multiple names can be given, separated by commas
+ - special value '*' in conjunction with states C(latest) or
+ C(rebuild) will update or rebuild the whole system respectively
+ aliases: ["spell"]
+ type: list
+ elements: str
+
+ state:
+ description:
+ - Whether to cast, dispel or rebuild a package
+ - state C(cast) is an equivalent of C(present), not C(latest)
+ - state C(latest) always triggers C(update_cache=yes)
+ - state C(rebuild) implies cast of all specified spells, not only
+ those existed before
+ choices: ["present", "latest", "absent", "cast", "dispelled", "rebuild"]
+ default: "present"
+ type: str
+
+ depends:
+ description:
+ - Comma-separated list of _optional_ dependencies to build a spell
+ (or make sure it is built) with; use +/- in front of dependency
+ to turn it on/off ('+' is optional though)
+ - this option is ignored if C(name) parameter is equal to '*' or
+ contains more than one spell
+ - providers must be supplied in the form recognized by Sorcery, e.g.
+ 'openssl(SSL)'
+ type: str
+
+ update:
+ description:
+ - Whether or not to update sorcery scripts at the very first stage
+ type: bool
+ default: no
+
+ update_cache:
+ description:
+ - Whether or not to update grimoire collection before casting spells
+ type: bool
+ default: no
+ aliases: ["update_codex"]
+
+ cache_valid_time:
+ description:
+ - Time in seconds to invalidate grimoire collection on update
+ - especially useful for SCM and rsync grimoires
+ - makes sense only in pair with C(update_cache)
+ type: int
+'''
+
+
+EXAMPLES = '''
+- name: Make sure spell foo is installed
+ community.general.sorcery:
+ spell: foo
+ state: present
+
+- name: Make sure spells foo, bar and baz are removed
+ community.general.sorcery:
+ spell: foo,bar,baz
+ state: absent
+
+- name: Make sure spell foo with dependencies bar and baz is installed
+ community.general.sorcery:
+ spell: foo
+ depends: bar,baz
+ state: present
+
+- name: Make sure spell foo with bar and without baz dependencies is installed
+ community.general.sorcery:
+ spell: foo
+ depends: +bar,-baz
+ state: present
+
+- name: Make sure spell foo with libressl (providing SSL) dependency is installed
+ community.general.sorcery:
+ spell: foo
+ depends: libressl(SSL)
+ state: present
+
+- name: Make sure spells with/without required dependencies (if any) are installed
+ community.general.sorcery:
+ name: "{{ item.spell }}"
+ depends: "{{ item.depends | default(None) }}"
+ state: present
+ loop:
+ - { spell: 'vifm', depends: '+file,-gtk+2' }
+ - { spell: 'fwknop', depends: 'gpgme' }
+ - { spell: 'pv,tnftp,tor' }
+
+- name: Install the latest version of spell foo using regular glossary
+ community.general.sorcery:
+ name: foo
+ state: latest
+
+- name: Rebuild spell foo
+ community.general.sorcery:
+ spell: foo
+ state: rebuild
+
+- name: Rebuild the whole system, but update Sorcery and Codex first
+ community.general.sorcery:
+ spell: '*'
+ state: rebuild
+ update: yes
+ update_cache: yes
+
+- name: Refresh the grimoire collection if it is 1 day old using native sorcerous alias
+ community.general.sorcery:
+ update_codex: yes
+ cache_valid_time: 86400
+
+- name: Update only Sorcery itself
+ community.general.sorcery:
+ update: yes
+'''
+
+
+RETURN = '''
+'''
+
+
+import datetime
+import fileinput
+import os
+import re
+import shutil
+import sys
+
+
+# auto-filled at module init
+SORCERY = {
+ 'sorcery': None,
+ 'scribe': None,
+ 'cast': None,
+ 'dispel': None,
+ 'gaze': None
+}
+
+SORCERY_LOG_DIR = "/var/log/sorcery"
+SORCERY_STATE_DIR = "/var/state/sorcery"
+
+
+def get_sorcery_ver(module):
+ """ Get Sorcery version. """
+
+ cmd_sorcery = "%s --version" % SORCERY['sorcery']
+
+ rc, stdout, stderr = module.run_command(cmd_sorcery)
+
+ if rc != 0 or not stdout:
+ module.fail_json(msg="unable to get Sorcery version")
+
+ return stdout.strip()
+
+
+def codex_fresh(codex, module):
+ """ Check if grimoire collection is fresh enough. """
+
+ if not module.params['cache_valid_time']:
+ return False
+
+ timedelta = datetime.timedelta(seconds=module.params['cache_valid_time'])
+
+ for grimoire in codex:
+ lastupdate_path = os.path.join(SORCERY_STATE_DIR,
+ grimoire + ".lastupdate")
+
+ try:
+ mtime = os.stat(lastupdate_path).st_mtime
+ except Exception:
+ return False
+
+ lastupdate_ts = datetime.datetime.fromtimestamp(mtime)
+
+ # if any grimoire is not fresh, we invalidate the Codex
+ if lastupdate_ts + timedelta < datetime.datetime.now():
+ return False
+
+ return True
+
+
+def codex_list(module):
+ """ List valid grimoire collection. """
+
+ codex = {}
+
+ cmd_scribe = "%s index" % SORCERY['scribe']
+
+ rc, stdout, stderr = module.run_command(cmd_scribe)
+
+ if rc != 0:
+ module.fail_json(msg="unable to list grimoire collection, fix your Codex")
+
+ rex = re.compile(r"^\s*\[\d+\] : (?P<grim>[\w\-+.]+) : [\w\-+./]+(?: : (?P<ver>[\w\-+.]+))?\s*$")
+
+ # drop 4-line header and empty trailing line
+ for line in stdout.splitlines()[4:-1]:
+ match = rex.match(line)
+
+ if match:
+ codex[match.group('grim')] = match.group('ver')
+
+ if not codex:
+ module.fail_json(msg="no grimoires to operate on; add at least one")
+
+ return codex
+
+
+def update_sorcery(module):
+ """ Update sorcery scripts.
+
+ This runs 'sorcery update' ('sorcery -u'). Check mode always returns a
+ positive change value.
+
+ """
+
+ changed = False
+
+ if module.check_mode:
+ if not module.params['name'] and not module.params['update_cache']:
+ module.exit_json(changed=True, msg="would have updated Sorcery")
+ else:
+ sorcery_ver = get_sorcery_ver(module)
+
+ cmd_sorcery = "%s update" % SORCERY['sorcery']
+
+ rc, stdout, stderr = module.run_command(cmd_sorcery)
+
+ if rc != 0:
+ module.fail_json(msg="unable to update Sorcery: " + stdout)
+
+ if sorcery_ver != get_sorcery_ver(module):
+ changed = True
+
+ if not module.params['name'] and not module.params['update_cache']:
+ module.exit_json(changed=changed,
+ msg="successfully updated Sorcery")
+
+
+def update_codex(module):
+ """ Update grimoire collections.
+
+ This runs 'scribe update'. Check mode always returns a positive change
+ value when 'cache_valid_time' is used.
+
+ """
+
+ params = module.params
+
+ changed = False
+
+ codex = codex_list(module)
+ fresh = codex_fresh(codex, module)
+
+ if module.check_mode:
+ if not params['name']:
+ if not fresh:
+ changed = True
+
+ module.exit_json(changed=changed, msg="would have updated Codex")
+ elif not fresh or params['name'] and params['state'] == 'latest':
+ # SILENT is required as a workaround for query() in libgpg
+ module.run_command_environ_update.update(dict(SILENT='1'))
+
+ cmd_scribe = "%s update" % SORCERY['scribe']
+
+ rc, stdout, stderr = module.run_command(cmd_scribe)
+
+ if rc != 0:
+ module.fail_json(msg="unable to update Codex: " + stdout)
+
+ if codex != codex_list(module):
+ changed = True
+
+ if not params['name']:
+ module.exit_json(changed=changed,
+ msg="successfully updated Codex")
+
+
+def match_depends(module):
+ """ Check for matching dependencies.
+
+ This inspects spell's dependencies with the desired states and returns
+ 'False' if a recast is needed to match them. It also adds required lines
+ to the system-wide depends file for proper recast procedure.
+
+ """
+
+ params = module.params
+ spells = params['name']
+
+ depends = {}
+
+ depends_ok = True
+
+ if len(spells) > 1 or not params['depends']:
+ return depends_ok
+
+ spell = spells[0]
+
+ if module.check_mode:
+ sorcery_depends_orig = os.path.join(SORCERY_STATE_DIR, "depends")
+ sorcery_depends = os.path.join(SORCERY_STATE_DIR, "depends.check")
+
+ try:
+ shutil.copy2(sorcery_depends_orig, sorcery_depends)
+ except IOError:
+ module.fail_json(msg="failed to copy depends.check file")
+ else:
+ sorcery_depends = os.path.join(SORCERY_STATE_DIR, "depends")
+
+ rex = re.compile(r"^(?P<status>\+?|\-){1}(?P<depend>[a-z0-9]+[a-z0-9_\-\+\.]*(\([A-Z0-9_\-\+\.]+\))*)$")
+
+ for d in params['depends'].split(','):
+ match = rex.match(d)
+
+ if not match:
+ module.fail_json(msg="wrong depends line for spell '%s'" % spell)
+
+ # normalize status
+ if not match.group('status') or match.group('status') == '+':
+ status = 'on'
+ else:
+ status = 'off'
+
+ depends[match.group('depend')] = status
+
+ # drop providers spec
+ depends_list = [s.split('(')[0] for s in depends]
+
+ cmd_gaze = "%s -q version %s" % (SORCERY['gaze'], ' '.join(depends_list))
+
+ rc, stdout, stderr = module.run_command(cmd_gaze)
+
+ if rc != 0:
+ module.fail_json(msg="wrong dependencies for spell '%s'" % spell)
+
+ fi = fileinput.input(sorcery_depends, inplace=True)
+
+ try:
+ try:
+ for line in fi:
+ if line.startswith(spell + ':'):
+ match = None
+
+ for d in depends:
+ # when local status is 'off' and dependency is provider,
+ # use only provider value
+ d_offset = d.find('(')
+
+ if d_offset == -1:
+ d_p = ''
+ else:
+ d_p = re.escape(d[d_offset:])
+
+ # .escape() is needed mostly for the spells like 'libsigc++'
+ rex = re.compile("%s:(?:%s|%s):(?P<lstatus>on|off):optional:" %
+ (re.escape(spell), re.escape(d), d_p))
+
+ match = rex.match(line)
+
+ # we matched the line "spell:dependency:on|off:optional:"
+ if match:
+ # if we also matched the local status, mark dependency
+ # as empty and put it back into depends file
+ if match.group('lstatus') == depends[d]:
+ depends[d] = None
+
+ sys.stdout.write(line)
+
+ # status is not that we need, so keep this dependency
+ # in the list for further reverse switching;
+ # stop and process the next line in both cases
+ break
+
+ if not match:
+ sys.stdout.write(line)
+ else:
+ sys.stdout.write(line)
+ except IOError:
+ module.fail_json(msg="I/O error on the depends file")
+ finally:
+ fi.close()
+
+ depends_new = [v for v in depends if depends[v]]
+
+ if depends_new:
+ try:
+ try:
+ fl = open(sorcery_depends, 'a')
+
+ for k in depends_new:
+ fl.write("%s:%s:%s:optional::\n" % (spell, k, depends[k]))
+ except IOError:
+ module.fail_json(msg="I/O error on the depends file")
+ finally:
+ fl.close()
+
+ depends_ok = False
+
+ if module.check_mode:
+ try:
+ os.remove(sorcery_depends)
+ except IOError:
+ module.fail_json(msg="failed to clean up depends.backup file")
+
+ return depends_ok
+
+
+def manage_spells(module):
+ """ Cast or dispel spells.
+
+ This manages the whole system ('*'), list or a single spell. Command 'cast'
+ is used to install or rebuild spells, while 'dispel' takes care of theirs
+ removal from the system.
+
+ """
+
+ params = module.params
+ spells = params['name']
+
+ sorcery_queue = os.path.join(SORCERY_LOG_DIR, "queue/install")
+
+ if spells == '*':
+ if params['state'] == 'latest':
+ # back up original queue
+ try:
+ os.rename(sorcery_queue, sorcery_queue + ".backup")
+ except IOError:
+ module.fail_json(msg="failed to backup the update queue")
+
+ # see update_codex()
+ module.run_command_environ_update.update(dict(SILENT='1'))
+
+ cmd_sorcery = "%s queue"
+
+ rc, stdout, stderr = module.run_command(cmd_sorcery)
+
+ if rc != 0:
+ module.fail_json(msg="failed to generate the update queue")
+
+ try:
+ queue_size = os.stat(sorcery_queue).st_size
+ except Exception:
+ module.fail_json(msg="failed to read the update queue")
+
+ if queue_size != 0:
+ if module.check_mode:
+ try:
+ os.rename(sorcery_queue + ".backup", sorcery_queue)
+ except IOError:
+ module.fail_json(msg="failed to restore the update queue")
+
+ module.exit_json(changed=True, msg="would have updated the system")
+
+ cmd_cast = "%s --queue" % SORCERY['cast']
+
+ rc, stdout, stderr = module.run_command(cmd_cast)
+
+ if rc != 0:
+ module.fail_json(msg="failed to update the system")
+
+ module.exit_json(changed=True, msg="successfully updated the system")
+ else:
+ module.exit_json(changed=False, msg="the system is already up to date")
+ elif params['state'] == 'rebuild':
+ if module.check_mode:
+ module.exit_json(changed=True, msg="would have rebuilt the system")
+
+ cmd_sorcery = "%s rebuild" % SORCERY['sorcery']
+
+ rc, stdout, stderr = module.run_command(cmd_sorcery)
+
+ if rc != 0:
+ module.fail_json(msg="failed to rebuild the system: " + stdout)
+
+ module.exit_json(changed=True, msg="successfully rebuilt the system")
+ else:
+ module.fail_json(msg="unsupported operation on '*' name value")
+ else:
+ if params['state'] in ('present', 'latest', 'rebuild', 'absent'):
+ # extract versions from the 'gaze' command
+ cmd_gaze = "%s -q version %s" % (SORCERY['gaze'], ' '.join(spells))
+
+ rc, stdout, stderr = module.run_command(cmd_gaze)
+
+ # fail if any of spells cannot be found
+ if rc != 0:
+ module.fail_json(msg="failed to locate spell(s) in the list (%s)" %
+ ', '.join(spells))
+
+ cast_queue = []
+ dispel_queue = []
+
+ rex = re.compile(r"[^|]+\|[^|]+\|(?P<spell>[^|]+)\|(?P<grim_ver>[^|]+)\|(?P<inst_ver>[^$]+)")
+
+ # drop 2-line header and empty trailing line
+ for line in stdout.splitlines()[2:-1]:
+ match = rex.match(line)
+
+ cast = False
+
+ if params['state'] == 'present':
+ # spell is not installed..
+ if match.group('inst_ver') == '-':
+ # ..so set up depends reqs for it
+ match_depends(module)
+
+ cast = True
+ # spell is installed..
+ else:
+ # ..but does not conform depends reqs
+ if not match_depends(module):
+ cast = True
+ elif params['state'] == 'latest':
+ # grimoire and installed versions do not match..
+ if match.group('grim_ver') != match.group('inst_ver'):
+ # ..so check for depends reqs first and set them up
+ match_depends(module)
+
+ cast = True
+ # grimoire and installed versions match..
+ else:
+ # ..but the spell does not conform depends reqs
+ if not match_depends(module):
+ cast = True
+ elif params['state'] == 'rebuild':
+ cast = True
+ # 'absent'
+ else:
+ if match.group('inst_ver') != '-':
+ dispel_queue.append(match.group('spell'))
+
+ if cast:
+ cast_queue.append(match.group('spell'))
+
+ if cast_queue:
+ if module.check_mode:
+ module.exit_json(changed=True, msg="would have cast spell(s)")
+
+ cmd_cast = "%s -c %s" % (SORCERY['cast'], ' '.join(cast_queue))
+
+ rc, stdout, stderr = module.run_command(cmd_cast)
+
+ if rc != 0:
+ module.fail_json(msg="failed to cast spell(s): %s" + stdout)
+
+ module.exit_json(changed=True, msg="successfully cast spell(s)")
+ elif params['state'] != 'absent':
+ module.exit_json(changed=False, msg="spell(s) are already cast")
+
+ if dispel_queue:
+ if module.check_mode:
+ module.exit_json(changed=True, msg="would have dispelled spell(s)")
+
+ cmd_dispel = "%s %s" % (SORCERY['dispel'], ' '.join(dispel_queue))
+
+ rc, stdout, stderr = module.run_command(cmd_dispel)
+
+ if rc != 0:
+ module.fail_json(msg="failed to dispel spell(s): %s" + stdout)
+
+ module.exit_json(changed=True, msg="successfully dispelled spell(s)")
+ else:
+ module.exit_json(changed=False, msg="spell(s) are already dispelled")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(default=None, aliases=['spell'], type='list', elements='str'),
+ state=dict(default='present', choices=['present', 'latest',
+ 'absent', 'cast', 'dispelled', 'rebuild']),
+ depends=dict(default=None),
+ update=dict(default=False, type='bool'),
+ update_cache=dict(default=False, aliases=['update_codex'], type='bool'),
+ cache_valid_time=dict(default=0, type='int')
+ ),
+ required_one_of=[['name', 'update', 'update_cache']],
+ supports_check_mode=True
+ )
+
+ if os.geteuid() != 0:
+ module.fail_json(msg="root privileges are required for this operation")
+
+ for c in SORCERY:
+ SORCERY[c] = module.get_bin_path(c, True)
+
+ # prepare environment: run sorcery commands without asking questions
+ module.run_command_environ_update = dict(PROMPT_DELAY='0', VOYEUR='0')
+
+ params = module.params
+
+ # normalize 'state' parameter
+ if params['state'] in ('present', 'cast'):
+ params['state'] = 'present'
+ elif params['state'] in ('absent', 'dispelled'):
+ params['state'] = 'absent'
+
+ if params['update']:
+ update_sorcery(module)
+
+ if params['update_cache'] or params['state'] == 'latest':
+ update_codex(module)
+
+ if params['name']:
+ manage_spells(module)
+
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/bitbucket/bitbucket_access_key.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/bitbucket/bitbucket_access_key.py
new file mode 100644
index 00000000..80c1c493
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/bitbucket/bitbucket_access_key.py
@@ -0,0 +1,278 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Evgeniy Krysanov <evgeniy.krysanov@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: bitbucket_access_key
+short_description: Manages Bitbucket repository access keys
+description:
+ - Manages Bitbucket repository access keys (also called deploy keys).
+author:
+ - Evgeniy Krysanov (@catcombo)
+options:
+ client_id:
+ description:
+ - The OAuth consumer key.
+ - If not set the environment variable C(BITBUCKET_CLIENT_ID) will be used.
+ type: str
+ client_secret:
+ description:
+ - The OAuth consumer secret.
+ - If not set the environment variable C(BITBUCKET_CLIENT_SECRET) will be used.
+ type: str
+ repository:
+ description:
+ - The repository name.
+ type: str
+ required: true
+ username:
+ description:
+ - The repository owner.
+ type: str
+ required: true
+ key:
+ description:
+ - The SSH public key.
+ type: str
+ label:
+ description:
+ - The key label.
+ type: str
+ required: true
+ state:
+ description:
+ - Indicates desired state of the access key.
+ type: str
+ required: true
+ choices: [ absent, present ]
+notes:
+ - Bitbucket OAuth consumer key and secret can be obtained from Bitbucket profile -> Settings -> Access Management -> OAuth.
+ - Bitbucket OAuth consumer should have permissions to read and administrate account repositories.
+ - Check mode is supported.
+'''
+
+EXAMPLES = r'''
+- name: Create access key
+ community.general.bitbucket_access_key:
+ repository: 'bitbucket-repo'
+ username: bitbucket_username
+ key: '{{lookup("file", "bitbucket.pub") }}'
+ label: 'Bitbucket'
+ state: present
+
+- name: Delete access key
+ community.general.bitbucket_access_key:
+ repository: bitbucket-repo
+ username: bitbucket_username
+ label: Bitbucket
+ state: absent
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper
+
+error_messages = {
+ 'required_key': '`key` is required when the `state` is `present`',
+ 'required_permission': 'OAuth consumer `client_id` should have permissions to read and administrate the repository',
+ 'invalid_username_or_repo': 'Invalid `repository` or `username`',
+ 'invalid_key': 'Invalid SSH key or key is already in use',
+}
+
+BITBUCKET_API_ENDPOINTS = {
+ 'deploy-key-list': '%s/2.0/repositories/{username}/{repo_slug}/deploy-keys/' % BitbucketHelper.BITBUCKET_API_URL,
+ 'deploy-key-detail': '%s/2.0/repositories/{username}/{repo_slug}/deploy-keys/{key_id}' % BitbucketHelper.BITBUCKET_API_URL,
+}
+
+
+def get_existing_deploy_key(module, bitbucket):
+ """
+ Search for an existing deploy key on Bitbucket
+ with the label specified in module param `label`
+
+ :param module: instance of the :class:`AnsibleModule`
+ :param bitbucket: instance of the :class:`BitbucketHelper`
+ :return: existing deploy key or None if not found
+ :rtype: dict or None
+
+ Return example::
+
+ {
+ "id": 123,
+ "label": "mykey",
+ "created_on": "2019-03-23T10:15:21.517377+00:00",
+ "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADA...AdkTg7HGqL3rlaDrEcWfL7Lu6TnhBdq5",
+ "type": "deploy_key",
+ "comment": "",
+ "last_used": None,
+ "repository": {
+ "links": {
+ "self": {
+ "href": "https://api.bitbucket.org/2.0/repositories/mleu/test"
+ },
+ "html": {
+ "href": "https://bitbucket.org/mleu/test"
+ },
+ "avatar": {
+ "href": "..."
+ }
+ },
+ "type": "repository",
+ "name": "test",
+ "full_name": "mleu/test",
+ "uuid": "{85d08b4e-571d-44e9-a507-fa476535aa98}"
+ },
+ "links": {
+ "self": {
+ "href": "https://api.bitbucket.org/2.0/repositories/mleu/test/deploy-keys/123"
+ }
+ },
+ }
+ """
+ content = {
+ 'next': BITBUCKET_API_ENDPOINTS['deploy-key-list'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ )
+ }
+
+ # Look through the all response pages in search of deploy key we need
+ while 'next' in content:
+ info, content = bitbucket.request(
+ api_url=content['next'],
+ method='GET',
+ )
+
+ if info['status'] == 404:
+ module.fail_json(msg=error_messages['invalid_username_or_repo'])
+
+ if info['status'] == 403:
+ module.fail_json(msg=error_messages['required_permission'])
+
+ if info['status'] != 200:
+ module.fail_json(msg='Failed to retrieve the list of deploy keys: {0}'.format(info))
+
+ res = next(iter(filter(lambda v: v['label'] == module.params['label'], content['values'])), None)
+
+ if res is not None:
+ return res
+
+ return None
+
+
+def create_deploy_key(module, bitbucket):
+ info, content = bitbucket.request(
+ api_url=BITBUCKET_API_ENDPOINTS['deploy-key-list'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ ),
+ method='POST',
+ data={
+ 'key': module.params['key'],
+ 'label': module.params['label'],
+ },
+ )
+
+ if info['status'] == 404:
+ module.fail_json(msg=error_messages['invalid_username_or_repo'])
+
+ if info['status'] == 403:
+ module.fail_json(msg=error_messages['required_permission'])
+
+ if info['status'] == 400:
+ module.fail_json(msg=error_messages['invalid_key'])
+
+ if info['status'] != 200:
+ module.fail_json(msg='Failed to create deploy key `{label}`: {info}'.format(
+ label=module.params['label'],
+ info=info,
+ ))
+
+
+def delete_deploy_key(module, bitbucket, key_id):
+ info, content = bitbucket.request(
+ api_url=BITBUCKET_API_ENDPOINTS['deploy-key-detail'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ key_id=key_id,
+ ),
+ method='DELETE',
+ )
+
+ if info['status'] == 404:
+ module.fail_json(msg=error_messages['invalid_username_or_repo'])
+
+ if info['status'] == 403:
+ module.fail_json(msg=error_messages['required_permission'])
+
+ if info['status'] != 204:
+ module.fail_json(msg='Failed to delete deploy key `{label}`: {info}'.format(
+ label=module.params['label'],
+ info=info,
+ ))
+
+
+def main():
+ argument_spec = BitbucketHelper.bitbucket_argument_spec()
+ argument_spec.update(
+ repository=dict(type='str', required=True),
+ username=dict(type='str', required=True),
+ key=dict(type='str'),
+ label=dict(type='str', required=True),
+ state=dict(type='str', choices=['present', 'absent'], required=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ bitbucket = BitbucketHelper(module)
+
+ key = module.params['key']
+ state = module.params['state']
+
+ # Check parameters
+ if (key is None) and (state == 'present'):
+ module.fail_json(msg=error_messages['required_key'])
+
+ # Retrieve access token for authorized API requests
+ bitbucket.fetch_access_token()
+
+ # Retrieve existing deploy key (if any)
+ existing_deploy_key = get_existing_deploy_key(module, bitbucket)
+ changed = False
+
+ # Create new deploy key in case it doesn't exists
+ if not existing_deploy_key and (state == 'present'):
+ if not module.check_mode:
+ create_deploy_key(module, bitbucket)
+ changed = True
+
+ # Update deploy key if the old value does not match the new one
+ elif existing_deploy_key and (state == 'present'):
+ if not key.startswith(existing_deploy_key.get('key')):
+ if not module.check_mode:
+ # Bitbucket doesn't support update key for the same label,
+ # so we need to delete the old one first
+ delete_deploy_key(module, bitbucket, existing_deploy_key['id'])
+ create_deploy_key(module, bitbucket)
+ changed = True
+
+ # Delete deploy key
+ elif existing_deploy_key and (state == 'absent'):
+ if not module.check_mode:
+ delete_deploy_key(module, bitbucket, existing_deploy_key['id'])
+ changed = True
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/bitbucket/bitbucket_pipeline_key_pair.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/bitbucket/bitbucket_pipeline_key_pair.py
new file mode 100644
index 00000000..ab3b7ec4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/bitbucket/bitbucket_pipeline_key_pair.py
@@ -0,0 +1,206 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Evgeniy Krysanov <evgeniy.krysanov@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: bitbucket_pipeline_key_pair
+short_description: Manages Bitbucket pipeline SSH key pair
+description:
+ - Manages Bitbucket pipeline SSH key pair.
+author:
+ - Evgeniy Krysanov (@catcombo)
+options:
+ client_id:
+ description:
+ - OAuth consumer key.
+ - If not set the environment variable C(BITBUCKET_CLIENT_ID) will be used.
+ type: str
+ client_secret:
+ description:
+ - OAuth consumer secret.
+ - If not set the environment variable C(BITBUCKET_CLIENT_SECRET) will be used.
+ type: str
+ repository:
+ description:
+ - The repository name.
+ type: str
+ required: true
+ username:
+ description:
+ - The repository owner.
+ type: str
+ required: true
+ public_key:
+ description:
+ - The public key.
+ type: str
+ private_key:
+ description:
+ - The private key.
+ type: str
+ state:
+ description:
+ - Indicates desired state of the key pair.
+ type: str
+ required: true
+ choices: [ absent, present ]
+notes:
+ - Bitbucket OAuth consumer key and secret can be obtained from Bitbucket profile -> Settings -> Access Management -> OAuth.
+ - Check mode is supported.
+'''
+
+EXAMPLES = r'''
+- name: Create or update SSH key pair
+ community.general.bitbucket_pipeline_key_pair:
+ repository: 'bitbucket-repo'
+ username: bitbucket_username
+ public_key: '{{lookup("file", "bitbucket.pub") }}'
+ private_key: '{{lookup("file", "bitbucket") }}'
+ state: present
+
+- name: Remove SSH key pair
+ community.general.bitbucket_pipeline_key_pair:
+ repository: bitbucket-repo
+ username: bitbucket_username
+ state: absent
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper
+
+error_messages = {
+ 'invalid_params': 'Account, repository or SSH key pair was not found',
+ 'required_keys': '`public_key` and `private_key` are required when the `state` is `present`',
+}
+
+BITBUCKET_API_ENDPOINTS = {
+ 'ssh-key-pair': '%s/2.0/repositories/{username}/{repo_slug}/pipelines_config/ssh/key_pair' % BitbucketHelper.BITBUCKET_API_URL,
+}
+
+
+def get_existing_ssh_key_pair(module, bitbucket):
+ """
+ Retrieves an existing ssh key pair from repository
+ specified in module param `repository`
+
+ :param module: instance of the :class:`AnsibleModule`
+ :param bitbucket: instance of the :class:`BitbucketHelper`
+ :return: existing key pair or None if not found
+ :rtype: dict or None
+
+ Return example::
+
+ {
+ "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQ...2E8HAeT",
+ "type": "pipeline_ssh_key_pair"
+ }
+ """
+ api_url = BITBUCKET_API_ENDPOINTS['ssh-key-pair'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ )
+
+ info, content = bitbucket.request(
+ api_url=api_url,
+ method='GET',
+ )
+
+ if info['status'] == 404:
+ # Account, repository or SSH key pair was not found.
+ return None
+
+ return content
+
+
+def update_ssh_key_pair(module, bitbucket):
+ info, content = bitbucket.request(
+ api_url=BITBUCKET_API_ENDPOINTS['ssh-key-pair'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ ),
+ method='PUT',
+ data={
+ 'private_key': module.params['private_key'],
+ 'public_key': module.params['public_key'],
+ },
+ )
+
+ if info['status'] == 404:
+ module.fail_json(msg=error_messages['invalid_params'])
+
+ if info['status'] != 200:
+ module.fail_json(msg='Failed to create or update pipeline ssh key pair : {0}'.format(info))
+
+
+def delete_ssh_key_pair(module, bitbucket):
+ info, content = bitbucket.request(
+ api_url=BITBUCKET_API_ENDPOINTS['ssh-key-pair'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ ),
+ method='DELETE',
+ )
+
+ if info['status'] == 404:
+ module.fail_json(msg=error_messages['invalid_params'])
+
+ if info['status'] != 204:
+ module.fail_json(msg='Failed to delete pipeline ssh key pair: {0}'.format(info))
+
+
+def main():
+ argument_spec = BitbucketHelper.bitbucket_argument_spec()
+ argument_spec.update(
+ repository=dict(type='str', required=True),
+ username=dict(type='str', required=True),
+ public_key=dict(type='str'),
+ private_key=dict(type='str', no_log=True),
+ state=dict(type='str', choices=['present', 'absent'], required=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ bitbucket = BitbucketHelper(module)
+
+ state = module.params['state']
+ public_key = module.params['public_key']
+ private_key = module.params['private_key']
+
+ # Check parameters
+ if ((public_key is None) or (private_key is None)) and (state == 'present'):
+ module.fail_json(msg=error_messages['required_keys'])
+
+ # Retrieve access token for authorized API requests
+ bitbucket.fetch_access_token()
+
+ # Retrieve existing ssh key
+ key_pair = get_existing_ssh_key_pair(module, bitbucket)
+ changed = False
+
+ # Create or update key pair
+ if (not key_pair or (key_pair.get('public_key') != public_key)) and (state == 'present'):
+ if not module.check_mode:
+ update_ssh_key_pair(module, bitbucket)
+ changed = True
+
+ # Delete key pair
+ elif key_pair and (state == 'absent'):
+ if not module.check_mode:
+ delete_ssh_key_pair(module, bitbucket)
+ changed = True
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/bitbucket/bitbucket_pipeline_known_host.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/bitbucket/bitbucket_pipeline_known_host.py
new file mode 100644
index 00000000..dba9f9aa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/bitbucket/bitbucket_pipeline_known_host.py
@@ -0,0 +1,303 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Evgeniy Krysanov <evgeniy.krysanov@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: bitbucket_pipeline_known_host
+short_description: Manages Bitbucket pipeline known hosts
+description:
+ - Manages Bitbucket pipeline known hosts under the "SSH Keys" menu.
+ - The host fingerprint will be retrieved automatically, but in case of an error, one can use I(key) field to specify it manually.
+author:
+ - Evgeniy Krysanov (@catcombo)
+requirements:
+ - paramiko
+options:
+ client_id:
+ description:
+ - The OAuth consumer key.
+ - If not set the environment variable C(BITBUCKET_CLIENT_ID) will be used.
+ type: str
+ client_secret:
+ description:
+ - The OAuth consumer secret.
+ - If not set the environment variable C(BITBUCKET_CLIENT_SECRET) will be used.
+ type: str
+ repository:
+ description:
+ - The repository name.
+ type: str
+ required: true
+ username:
+ description:
+ - The repository owner.
+ type: str
+ required: true
+ name:
+ description:
+ - The FQDN of the known host.
+ type: str
+ required: true
+ key:
+ description:
+ - The public key.
+ type: str
+ state:
+ description:
+ - Indicates desired state of the record.
+ type: str
+ required: true
+ choices: [ absent, present ]
+notes:
+ - Bitbucket OAuth consumer key and secret can be obtained from Bitbucket profile -> Settings -> Access Management -> OAuth.
+ - Check mode is supported.
+'''
+
+EXAMPLES = r'''
+- name: Create known hosts from the list
+ community.general.bitbucket_pipeline_known_host:
+ repository: 'bitbucket-repo'
+ username: bitbucket_username
+ name: '{{ item }}'
+ state: present
+ with_items:
+ - bitbucket.org
+ - example.com
+
+- name: Remove known host
+ community.general.bitbucket_pipeline_known_host:
+ repository: bitbucket-repo
+ username: bitbucket_username
+ name: bitbucket.org
+ state: absent
+
+- name: Specify public key file
+ community.general.bitbucket_pipeline_known_host:
+ repository: bitbucket-repo
+ username: bitbucket_username
+ name: bitbucket.org
+ key: '{{lookup("file", "bitbucket.pub") }}'
+ state: absent
+'''
+
+RETURN = r''' # '''
+
+import socket
+
+try:
+ import paramiko
+ HAS_PARAMIKO = True
+except ImportError:
+ HAS_PARAMIKO = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper
+
+error_messages = {
+ 'invalid_params': 'Account or repository was not found',
+ 'unknown_key_type': 'Public key type is unknown',
+}
+
+BITBUCKET_API_ENDPOINTS = {
+ 'known-host-list': '%s/2.0/repositories/{username}/{repo_slug}/pipelines_config/ssh/known_hosts/' % BitbucketHelper.BITBUCKET_API_URL,
+ 'known-host-detail': '%s/2.0/repositories/{username}/{repo_slug}/pipelines_config/ssh/known_hosts/{known_host_uuid}' % BitbucketHelper.BITBUCKET_API_URL,
+}
+
+
+def get_existing_known_host(module, bitbucket):
+ """
+ Search for a host in Bitbucket pipelines known hosts
+ with the name specified in module param `name`
+
+ :param module: instance of the :class:`AnsibleModule`
+ :param bitbucket: instance of the :class:`BitbucketHelper`
+ :return: existing host or None if not found
+ :rtype: dict or None
+
+ Return example::
+
+ {
+ 'type': 'pipeline_known_host',
+ 'uuid': '{21cc0590-bebe-4fae-8baf-03722704119a7}'
+ 'hostname': 'bitbucket.org',
+ 'public_key': {
+ 'type': 'pipeline_ssh_public_key',
+ 'md5_fingerprint': 'md5:97:8c:1b:f2:6f:14:6b:4b:3b:ec:aa:46:46:74:7c:40',
+ 'sha256_fingerprint': 'SHA256:zzXQOXSFBEiUtuE8AikoYKwbHaxvSc0ojez9YXaGp1A',
+ 'key_type': 'ssh-rsa',
+ 'key': 'AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kN...seeFVBoGqzHM9yXw=='
+ },
+ }
+ """
+ content = {
+ 'next': BITBUCKET_API_ENDPOINTS['known-host-list'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ )
+ }
+
+ # Look through all response pages in search of hostname we need
+ while 'next' in content:
+ info, content = bitbucket.request(
+ api_url=content['next'],
+ method='GET',
+ )
+
+ if info['status'] == 404:
+ module.fail_json(msg='Invalid `repository` or `username`.')
+
+ if info['status'] != 200:
+ module.fail_json(msg='Failed to retrieve list of known hosts: {0}'.format(info))
+
+ host = next(filter(lambda v: v['hostname'] == module.params['name'], content['values']), None)
+
+ if host is not None:
+ return host
+
+ return None
+
+
+def get_host_key(module, hostname):
+ """
+ Fetches public key for specified host
+
+ :param module: instance of the :class:`AnsibleModule`
+ :param hostname: host name
+ :return: key type and key content
+ :rtype: tuple
+
+ Return example::
+
+ (
+ 'ssh-rsa',
+ 'AAAAB3NzaC1yc2EAAAABIwAAA...SBne8+seeFVBoGqzHM9yXw==',
+ )
+ """
+ try:
+ sock = socket.socket()
+ sock.connect((hostname, 22))
+ except socket.error:
+ module.fail_json(msg='Error opening socket to {0}'.format(hostname))
+
+ try:
+ trans = paramiko.transport.Transport(sock)
+ trans.start_client()
+ host_key = trans.get_remote_server_key()
+ except paramiko.SSHException:
+ module.fail_json(msg='SSH error on retrieving {0} server key'.format(hostname))
+
+ trans.close()
+ sock.close()
+
+ key_type = host_key.get_name()
+ key = host_key.get_base64()
+
+ return key_type, key
+
+
+def create_known_host(module, bitbucket):
+ hostname = module.params['name']
+ key_param = module.params['key']
+
+ if key_param is None:
+ key_type, key = get_host_key(module, hostname)
+ elif ' ' in key_param:
+ key_type, key = key_param.split(' ', 1)
+ else:
+ module.fail_json(msg=error_messages['unknown_key_type'])
+
+ info, content = bitbucket.request(
+ api_url=BITBUCKET_API_ENDPOINTS['known-host-list'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ ),
+ method='POST',
+ data={
+ 'hostname': hostname,
+ 'public_key': {
+ 'key_type': key_type,
+ 'key': key,
+ }
+ },
+ )
+
+ if info['status'] == 404:
+ module.fail_json(msg=error_messages['invalid_params'])
+
+ if info['status'] != 201:
+ module.fail_json(msg='Failed to create known host `{hostname}`: {info}'.format(
+ hostname=module.params['hostname'],
+ info=info,
+ ))
+
+
+def delete_known_host(module, bitbucket, known_host_uuid):
+ info, content = bitbucket.request(
+ api_url=BITBUCKET_API_ENDPOINTS['known-host-detail'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ known_host_uuid=known_host_uuid,
+ ),
+ method='DELETE',
+ )
+
+ if info['status'] == 404:
+ module.fail_json(msg=error_messages['invalid_params'])
+
+ if info['status'] != 204:
+ module.fail_json(msg='Failed to delete known host `{hostname}`: {info}'.format(
+ hostname=module.params['name'],
+ info=info,
+ ))
+
+
+def main():
+ argument_spec = BitbucketHelper.bitbucket_argument_spec()
+ argument_spec.update(
+ repository=dict(type='str', required=True),
+ username=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ key=dict(type='str'),
+ state=dict(type='str', choices=['present', 'absent'], required=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ if (module.params['key'] is None) and (not HAS_PARAMIKO):
+ module.fail_json(msg='`paramiko` package not found, please install it.')
+
+ bitbucket = BitbucketHelper(module)
+
+ # Retrieve access token for authorized API requests
+ bitbucket.fetch_access_token()
+
+ # Retrieve existing known host
+ existing_host = get_existing_known_host(module, bitbucket)
+ state = module.params['state']
+ changed = False
+
+ # Create new host in case it doesn't exists
+ if not existing_host and (state == 'present'):
+ if not module.check_mode:
+ create_known_host(module, bitbucket)
+ changed = True
+
+ # Delete host
+ elif existing_host and (state == 'absent'):
+ if not module.check_mode:
+ delete_known_host(module, bitbucket, existing_host['uuid'])
+ changed = True
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/bitbucket/bitbucket_pipeline_variable.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/bitbucket/bitbucket_pipeline_variable.py
new file mode 100644
index 00000000..33457fca
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/bitbucket/bitbucket_pipeline_variable.py
@@ -0,0 +1,277 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Evgeniy Krysanov <evgeniy.krysanov@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: bitbucket_pipeline_variable
+short_description: Manages Bitbucket pipeline variables
+description:
+ - Manages Bitbucket pipeline variables.
+author:
+ - Evgeniy Krysanov (@catcombo)
+options:
+ client_id:
+ description:
+ - The OAuth consumer key.
+ - If not set the environment variable C(BITBUCKET_CLIENT_ID) will be used.
+ type: str
+ client_secret:
+ description:
+ - The OAuth consumer secret.
+ - If not set the environment variable C(BITBUCKET_CLIENT_SECRET) will be used.
+ type: str
+ repository:
+ description:
+ - The repository name.
+ type: str
+ required: true
+ username:
+ description:
+ - The repository owner.
+ type: str
+ required: true
+ name:
+ description:
+ - The pipeline variable name.
+ type: str
+ required: true
+ value:
+ description:
+ - The pipeline variable value.
+ type: str
+ secured:
+ description:
+ - Whether to encrypt the variable value.
+ type: bool
+ default: no
+ state:
+ description:
+ - Indicates desired state of the variable.
+ type: str
+ required: true
+ choices: [ absent, present ]
+notes:
+ - Bitbucket OAuth consumer key and secret can be obtained from Bitbucket profile -> Settings -> Access Management -> OAuth.
+ - Check mode is supported.
+ - For secured values return parameter C(changed) is always C(True).
+'''
+
+EXAMPLES = r'''
+- name: Create or update pipeline variables from the list
+ community.general.bitbucket_pipeline_variable:
+ repository: 'bitbucket-repo'
+ username: bitbucket_username
+ name: '{{ item.name }}'
+ value: '{{ item.value }}'
+ secured: '{{ item.secured }}'
+ state: present
+ with_items:
+ - { name: AWS_ACCESS_KEY, value: ABCD1234, secured: False }
+ - { name: AWS_SECRET, value: qwe789poi123vbn0, secured: True }
+
+- name: Remove pipeline variable
+ community.general.bitbucket_pipeline_variable:
+ repository: bitbucket-repo
+ username: bitbucket_username
+ name: AWS_ACCESS_KEY
+ state: absent
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule, _load_params
+from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper
+
+error_messages = {
+ 'required_value': '`value` is required when the `state` is `present`',
+}
+
+BITBUCKET_API_ENDPOINTS = {
+ 'pipeline-variable-list': '%s/2.0/repositories/{username}/{repo_slug}/pipelines_config/variables/' % BitbucketHelper.BITBUCKET_API_URL,
+ 'pipeline-variable-detail': '%s/2.0/repositories/{username}/{repo_slug}/pipelines_config/variables/{variable_uuid}' % BitbucketHelper.BITBUCKET_API_URL,
+}
+
+
+def get_existing_pipeline_variable(module, bitbucket):
+ """
+ Search for a pipeline variable
+
+ :param module: instance of the :class:`AnsibleModule`
+ :param bitbucket: instance of the :class:`BitbucketHelper`
+ :return: existing variable or None if not found
+ :rtype: dict or None
+
+ Return example::
+
+ {
+ 'name': 'AWS_ACCESS_OBKEY_ID',
+ 'value': 'x7HU80-a2',
+ 'type': 'pipeline_variable',
+ 'secured': False,
+ 'uuid': '{9ddb0507-439a-495a-99f3-5464f15128127}'
+ }
+
+ The `value` key in dict is absent in case of secured variable.
+ """
+ variables_base_url = BITBUCKET_API_ENDPOINTS['pipeline-variable-list'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ )
+ # Look through the all response pages in search of variable we need
+ page = 1
+ while True:
+ next_url = "%s?page=%s" % (variables_base_url, page)
+ info, content = bitbucket.request(
+ api_url=next_url,
+ method='GET',
+ )
+
+ if info['status'] == 404:
+ module.fail_json(msg='Invalid `repository` or `username`.')
+
+ if info['status'] != 200:
+ module.fail_json(msg='Failed to retrieve the list of pipeline variables: {0}'.format(info))
+
+ # We are at the end of list
+ if 'pagelen' in content and content['pagelen'] == 0:
+ return None
+
+ page += 1
+ var = next(filter(lambda v: v['key'] == module.params['name'], content['values']), None)
+
+ if var is not None:
+ var['name'] = var.pop('key')
+ return var
+
+ return None
+
+
+def create_pipeline_variable(module, bitbucket):
+ info, content = bitbucket.request(
+ api_url=BITBUCKET_API_ENDPOINTS['pipeline-variable-list'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ ),
+ method='POST',
+ data={
+ 'key': module.params['name'],
+ 'value': module.params['value'],
+ 'secured': module.params['secured'],
+ },
+ )
+
+ if info['status'] != 201:
+ module.fail_json(msg='Failed to create pipeline variable `{name}`: {info}'.format(
+ name=module.params['name'],
+ info=info,
+ ))
+
+
+def update_pipeline_variable(module, bitbucket, variable_uuid):
+ info, content = bitbucket.request(
+ api_url=BITBUCKET_API_ENDPOINTS['pipeline-variable-detail'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ variable_uuid=variable_uuid,
+ ),
+ method='PUT',
+ data={
+ 'value': module.params['value'],
+ 'secured': module.params['secured'],
+ },
+ )
+
+ if info['status'] != 200:
+ module.fail_json(msg='Failed to update pipeline variable `{name}`: {info}'.format(
+ name=module.params['name'],
+ info=info,
+ ))
+
+
+def delete_pipeline_variable(module, bitbucket, variable_uuid):
+ info, content = bitbucket.request(
+ api_url=BITBUCKET_API_ENDPOINTS['pipeline-variable-detail'].format(
+ username=module.params['username'],
+ repo_slug=module.params['repository'],
+ variable_uuid=variable_uuid,
+ ),
+ method='DELETE',
+ )
+
+ if info['status'] != 204:
+ module.fail_json(msg='Failed to delete pipeline variable `{name}`: {info}'.format(
+ name=module.params['name'],
+ info=info,
+ ))
+
+
+class BitBucketPipelineVariable(AnsibleModule):
+ def __init__(self, *args, **kwargs):
+ params = _load_params() or {}
+ if params.get('secured'):
+ kwargs['argument_spec']['value'].update({'no_log': True})
+ super(BitBucketPipelineVariable, self).__init__(*args, **kwargs)
+
+
+def main():
+ argument_spec = BitbucketHelper.bitbucket_argument_spec()
+ argument_spec.update(
+ repository=dict(type='str', required=True),
+ username=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ value=dict(type='str'),
+ secured=dict(type='bool', default=False),
+ state=dict(type='str', choices=['present', 'absent'], required=True),
+ )
+ module = BitBucketPipelineVariable(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ bitbucket = BitbucketHelper(module)
+
+ value = module.params['value']
+ state = module.params['state']
+ secured = module.params['secured']
+
+ # Check parameters
+ if (value is None) and (state == 'present'):
+ module.fail_json(msg=error_messages['required_value'])
+
+ # Retrieve access token for authorized API requests
+ bitbucket.fetch_access_token()
+
+ # Retrieve existing pipeline variable (if any)
+ existing_variable = get_existing_pipeline_variable(module, bitbucket)
+ changed = False
+
+ # Create new variable in case it doesn't exists
+ if not existing_variable and (state == 'present'):
+ if not module.check_mode:
+ create_pipeline_variable(module, bitbucket)
+ changed = True
+
+ # Update variable if it is secured or the old value does not match the new one
+ elif existing_variable and (state == 'present'):
+ if (existing_variable['secured'] != secured) or (existing_variable.get('value') != value):
+ if not module.check_mode:
+ update_pipeline_variable(module, bitbucket, existing_variable['uuid'])
+ changed = True
+
+ # Delete variable
+ elif existing_variable and (state == 'absent'):
+ if not module.check_mode:
+ delete_pipeline_variable(module, bitbucket, existing_variable['uuid'])
+ changed = True
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/bzr.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/bzr.py
new file mode 100644
index 00000000..7af3f279
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/bzr.py
@@ -0,0 +1,190 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, André Paramés <git@andreparames.com>
+# Based on the Git module by Michael DeHaan <michael.dehaan@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: bzr
+author:
+- André Paramés (@andreparames)
+short_description: Deploy software (or files) from bzr branches
+description:
+ - Manage I(bzr) branches to deploy files or software.
+options:
+ name:
+ description:
+ - SSH or HTTP protocol address of the parent branch.
+ aliases: [ parent ]
+ required: yes
+ dest:
+ description:
+ - Absolute path of where the branch should be cloned to.
+ required: yes
+ version:
+ description:
+ - What version of the branch to clone. This can be the
+ bzr revno or revid.
+ default: head
+ force:
+ description:
+ - If C(yes), any modified files in the working
+ tree will be discarded. Before 1.9 the default
+ value was C(yes).
+ type: bool
+ default: 'no'
+ executable:
+ description:
+ - Path to bzr executable to use. If not supplied,
+ the normal mechanism for resolving binary paths will be used.
+'''
+
+EXAMPLES = '''
+- name: Checkout
+ community.general.bzr:
+ name: bzr+ssh://foosball.example.org/path/to/branch
+ dest: /srv/checkout
+ version: 22
+'''
+
+import os
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Bzr(object):
+ def __init__(self, module, parent, dest, version, bzr_path):
+ self.module = module
+ self.parent = parent
+ self.dest = dest
+ self.version = version
+ self.bzr_path = bzr_path
+
+ def _command(self, args_list, cwd=None, **kwargs):
+ (rc, out, err) = self.module.run_command([self.bzr_path] + args_list, cwd=cwd, **kwargs)
+ return (rc, out, err)
+
+ def get_version(self):
+ '''samples the version of the bzr branch'''
+
+ cmd = "%s revno" % self.bzr_path
+ rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest)
+ revno = stdout.strip()
+ return revno
+
+ def clone(self):
+ '''makes a new bzr branch if it does not already exist'''
+ dest_dirname = os.path.dirname(self.dest)
+ try:
+ os.makedirs(dest_dirname)
+ except Exception:
+ pass
+ if self.version.lower() != 'head':
+ args_list = ["branch", "-r", self.version, self.parent, self.dest]
+ else:
+ args_list = ["branch", self.parent, self.dest]
+ return self._command(args_list, check_rc=True, cwd=dest_dirname)
+
+ def has_local_mods(self):
+
+ cmd = "%s status -S" % self.bzr_path
+ rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest)
+ lines = stdout.splitlines()
+
+ lines = filter(lambda c: not re.search('^\\?\\?.*$', c), lines)
+ return len(lines) > 0
+
+ def reset(self, force):
+ '''
+ Resets the index and working tree to head.
+ Discards any changes to tracked files in the working
+ tree since that commit.
+ '''
+ if not force and self.has_local_mods():
+ self.module.fail_json(msg="Local modifications exist in branch (force=no).")
+ return self._command(["revert"], check_rc=True, cwd=self.dest)
+
+ def fetch(self):
+ '''updates branch from remote sources'''
+ if self.version.lower() != 'head':
+ (rc, out, err) = self._command(["pull", "-r", self.version], cwd=self.dest)
+ else:
+ (rc, out, err) = self._command(["pull"], cwd=self.dest)
+ if rc != 0:
+ self.module.fail_json(msg="Failed to pull")
+ return (rc, out, err)
+
+ def switch_version(self):
+ '''once pulled, switch to a particular revno or revid'''
+ if self.version.lower() != 'head':
+ args_list = ["revert", "-r", self.version]
+ else:
+ args_list = ["revert"]
+ return self._command(args_list, check_rc=True, cwd=self.dest)
+
+
+# ===========================================
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ dest=dict(type='path', required=True),
+ name=dict(type='str', required=True, aliases=['parent']),
+ version=dict(type='str', default='head'),
+ force=dict(type='bool', default=False),
+ executable=dict(type='str'),
+ )
+ )
+
+ dest = module.params['dest']
+ parent = module.params['name']
+ version = module.params['version']
+ force = module.params['force']
+ bzr_path = module.params['executable'] or module.get_bin_path('bzr', True)
+
+ bzrconfig = os.path.join(dest, '.bzr', 'branch', 'branch.conf')
+
+ rc, out, err = (0, None, None)
+
+ bzr = Bzr(module, parent, dest, version, bzr_path)
+
+ # if there is no bzr configuration, do a branch operation
+ # else pull and switch the version
+ before = None
+ local_mods = False
+ if not os.path.exists(bzrconfig):
+ (rc, out, err) = bzr.clone()
+
+ else:
+ # else do a pull
+ local_mods = bzr.has_local_mods()
+ before = bzr.get_version()
+ (rc, out, err) = bzr.reset(force)
+ if rc != 0:
+ module.fail_json(msg=err)
+ (rc, out, err) = bzr.fetch()
+ if rc != 0:
+ module.fail_json(msg=err)
+
+ # switch to version specified regardless of whether
+ # we cloned or pulled
+ (rc, out, err) = bzr.switch_version()
+
+ # determine if we changed anything
+ after = bzr.get_version()
+ changed = False
+
+ if before != after or local_mods:
+ changed = True
+
+ module.exit_json(changed=changed, before=before, after=after)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/git_config.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/git_config.py
new file mode 100644
index 00000000..66ef45f3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/git_config.py
@@ -0,0 +1,273 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Marius Gedminas <marius@pov.lt>
+# (c) 2016, Matthew Gamble <git@matthewgamble.net>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: git_config
+author:
+ - Matthew Gamble (@djmattyg007)
+ - Marius Gedminas (@mgedmin)
+requirements: ['git']
+short_description: Read and write git configuration
+description:
+ - The C(git_config) module changes git configuration by invoking 'git config'.
+ This is needed if you don't want to use M(ansible.builtin.template) for the entire git
+ config file (e.g. because you need to change just C(user.email) in
+ /etc/.git/config). Solutions involving M(ansible.builtin.command) are cumbersome or
+ don't work correctly in check mode.
+options:
+ list_all:
+ description:
+ - List all settings (optionally limited to a given I(scope))
+ type: bool
+ default: 'no'
+ name:
+ description:
+ - The name of the setting. If no value is supplied, the value will
+ be read from the config if it has been set.
+ repo:
+ description:
+ - Path to a git repository for reading and writing values from a
+ specific repo.
+ scope:
+ description:
+ - Specify which scope to read/set values from. This is required
+ when setting config values. If this is set to local, you must
+ also specify the repo parameter. It defaults to system only when
+ not using I(list_all)=yes.
+ choices: [ "local", "global", "system" ]
+ state:
+ description:
+ - "Indicates the setting should be set/unset.
+ This parameter has higher precedence than I(value) parameter:
+ when I(state)=absent and I(value) is defined, I(value) is discarded."
+ choices: [ 'present', 'absent' ]
+ default: 'present'
+ value:
+ description:
+ - When specifying the name of a single setting, supply a value to
+ set that setting to the given value.
+'''
+
+EXAMPLES = '''
+- name: Add a setting to ~/.gitconfig
+ community.general.git_config:
+ name: alias.ci
+ scope: global
+ value: commit
+
+- name: Add a setting to ~/.gitconfig
+ community.general.git_config:
+ name: alias.st
+ scope: global
+ value: status
+
+- name: Remove a setting from ~/.gitconfig
+ community.general.git_config:
+ name: alias.ci
+ scope: global
+ state: absent
+
+- name: Add a setting to ~/.gitconfig
+ community.general.git_config:
+ name: core.editor
+ scope: global
+ value: vim
+
+- name: Add a setting system-wide
+ community.general.git_config:
+ name: alias.remotev
+ scope: system
+ value: remote -v
+
+- name: Add a setting to a system scope (default)
+ community.general.git_config:
+ name: alias.diffc
+ value: diff --cached
+
+- name: Add a setting to a system scope (default)
+ community.general.git_config:
+ name: color.ui
+ value: auto
+
+- name: Make etckeeper not complaining when it is invoked by cron
+ community.general.git_config:
+ name: user.email
+ repo: /etc
+ scope: local
+ value: 'root@{{ ansible_fqdn }}'
+
+- name: Read individual values from git config
+ community.general.git_config:
+ name: alias.ci
+ scope: global
+
+- name: Scope system is also assumed when reading values, unless list_all=yes
+ community.general.git_config:
+ name: alias.diffc
+
+- name: Read all values from git config
+ community.general.git_config:
+ list_all: yes
+ scope: global
+
+- name: When list_all is yes and no scope is specified, you get configuration from all scopes
+ community.general.git_config:
+ list_all: yes
+
+- name: Specify a repository to include local settings
+ community.general.git_config:
+ list_all: yes
+ repo: /path/to/repo.git
+'''
+
+RETURN = '''
+---
+config_value:
+ description: When list_all=no and value is not set, a string containing the value of the setting in name
+ returned: success
+ type: str
+ sample: "vim"
+
+config_values:
+ description: When list_all=yes, a dict containing key/value pairs of multiple configuration settings
+ returned: success
+ type: dict
+ sample:
+ core.editor: "vim"
+ color.ui: "auto"
+ alias.diffc: "diff --cached"
+ alias.remotev: "remote -v"
+'''
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import shlex_quote
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ list_all=dict(required=False, type='bool', default=False),
+ name=dict(type='str'),
+ repo=dict(type='path'),
+ scope=dict(required=False, type='str', choices=['local', 'global', 'system']),
+ state=dict(required=False, type='str', default='present', choices=['present', 'absent']),
+ value=dict(required=False)
+ ),
+ mutually_exclusive=[['list_all', 'name'], ['list_all', 'value'], ['list_all', 'state']],
+ required_if=[('scope', 'local', ['repo'])],
+ required_one_of=[['list_all', 'name']],
+ supports_check_mode=True,
+ )
+ git_path = module.get_bin_path('git', True)
+
+ params = module.params
+ # We check error message for a pattern, so we need to make sure the messages appear in the form we're expecting.
+ # Set the locale to C to ensure consistent messages.
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ if params['name']:
+ name = params['name']
+ else:
+ name = None
+
+ if params['scope']:
+ scope = params['scope']
+ elif params['list_all']:
+ scope = None
+ else:
+ scope = 'system'
+
+ if params['state'] == 'absent':
+ unset = 'unset'
+ params['value'] = None
+ else:
+ unset = None
+
+ if params['value']:
+ new_value = params['value']
+ else:
+ new_value = None
+
+ args = [git_path, "config", "--includes"]
+ if params['list_all']:
+ args.append('-l')
+ if scope:
+ args.append("--" + scope)
+ if name:
+ args.append(name)
+
+ if scope == 'local':
+ dir = params['repo']
+ elif params['list_all'] and params['repo']:
+ # Include local settings from a specific repo when listing all available settings
+ dir = params['repo']
+ else:
+ # Run from root directory to avoid accidentally picking up any local config settings
+ dir = "/"
+
+ (rc, out, err) = module.run_command(' '.join(args), cwd=dir)
+ if params['list_all'] and scope and rc == 128 and 'unable to read config file' in err:
+ # This just means nothing has been set at the given scope
+ module.exit_json(changed=False, msg='', config_values={})
+ elif rc >= 2:
+ # If the return code is 1, it just means the option hasn't been set yet, which is fine.
+ module.fail_json(rc=rc, msg=err, cmd=' '.join(args))
+
+ if params['list_all']:
+ values = out.rstrip().splitlines()
+ config_values = {}
+ for value in values:
+ k, v = value.split('=', 1)
+ config_values[k] = v
+ module.exit_json(changed=False, msg='', config_values=config_values)
+ elif not new_value and not unset:
+ module.exit_json(changed=False, msg='', config_value=out.rstrip())
+ elif unset and not out:
+ module.exit_json(changed=False, msg='no setting to unset')
+ else:
+ old_value = out.rstrip()
+ if old_value == new_value:
+ module.exit_json(changed=False, msg="")
+
+ if not module.check_mode:
+ if unset:
+ args.insert(len(args) - 1, "--" + unset)
+ cmd = ' '.join(args)
+ else:
+ new_value_quoted = shlex_quote(new_value)
+ cmd = ' '.join(args + [new_value_quoted])
+ try: # try using extra parameter from ansible-base 2.10.4 onwards
+ (rc, out, err) = module.run_command(cmd, cwd=dir, ignore_invalid_cwd=False)
+ except TypeError:
+ # @TODO remove try/except when community.general drop support for 2.10.x
+ if not os.path.isdir(dir):
+ module.fail_json(msg="Cannot find directory '{0}'".format(dir))
+ (rc, out, err) = module.run_command(cmd, cwd=dir)
+ if err:
+ module.fail_json(rc=rc, msg=err, cmd=cmd)
+
+ module.exit_json(
+ msg='setting changed',
+ diff=dict(
+ before_header=' '.join(args),
+ before=old_value + "\n",
+ after_header=' '.join(args),
+ after=(new_value or '') + "\n"
+ ),
+ changed=True
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_deploy_key.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_deploy_key.py
new file mode 100644
index 00000000..8836454e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_deploy_key.py
@@ -0,0 +1,330 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: github_deploy_key
+author: "Ali (@bincyber)"
+short_description: Manages deploy keys for GitHub repositories.
+description:
+ - "Adds or removes deploy keys for GitHub repositories. Supports authentication using username and password,
+ username and password and 2-factor authentication code (OTP), OAuth2 token, or personal access token. Admin
+ rights on the repository are required."
+options:
+ github_url:
+ description:
+ - The base URL of the GitHub API
+ required: false
+ type: str
+ version_added: '0.2.0'
+ default: https://api.github.com
+ owner:
+ description:
+ - The name of the individual account or organization that owns the GitHub repository.
+ required: true
+ aliases: [ 'account', 'organization' ]
+ repo:
+ description:
+ - The name of the GitHub repository.
+ required: true
+ aliases: [ 'repository' ]
+ name:
+ description:
+ - The name for the deploy key.
+ required: true
+ aliases: [ 'title', 'label' ]
+ key:
+ description:
+ - The SSH public key to add to the repository as a deploy key.
+ required: true
+ read_only:
+ description:
+ - If C(true), the deploy key will only be able to read repository contents. Otherwise, the deploy key will be able to read and write.
+ type: bool
+ default: 'yes'
+ state:
+ description:
+ - The state of the deploy key.
+ default: "present"
+ choices: [ "present", "absent" ]
+ force:
+ description:
+ - If C(true), forcefully adds the deploy key by deleting any existing deploy key with the same public key or title.
+ type: bool
+ default: 'no'
+ username:
+ description:
+ - The username to authenticate with. Should not be set when using personal access token
+ password:
+ description:
+ - The password to authenticate with. Alternatively, a personal access token can be used instead of I(username) and I(password) combination.
+ token:
+ description:
+ - The OAuth2 token or personal access token to authenticate with. Mutually exclusive with I(password).
+ otp:
+ description:
+ - The 6 digit One Time Password for 2-Factor Authentication. Required together with I(username) and I(password).
+ aliases: ['2fa_token']
+notes:
+ - "Refer to GitHub's API documentation here: https://developer.github.com/v3/repos/keys/."
+'''
+
+EXAMPLES = '''
+- name: Add a new read-only deploy key to a GitHub repository using basic authentication
+ community.general.github_deploy_key:
+ owner: "johndoe"
+ repo: "example"
+ name: "new-deploy-key"
+ key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..."
+ read_only: yes
+ username: "johndoe"
+ password: "supersecretpassword"
+
+- name: Remove an existing deploy key from a GitHub repository
+ community.general.github_deploy_key:
+ owner: "johndoe"
+ repository: "example"
+ name: "new-deploy-key"
+ key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..."
+ force: yes
+ username: "johndoe"
+ password: "supersecretpassword"
+ state: absent
+
+- name: Add a new deploy key to a GitHub repository, replace an existing key, use an OAuth2 token to authenticate
+ community.general.github_deploy_key:
+ owner: "johndoe"
+ repository: "example"
+ name: "new-deploy-key"
+ key: "{{ lookup('file', '~/.ssh/github.pub') }}"
+ force: yes
+ token: "ABAQDAwXxn7kIMNWzcDfo..."
+
+- name: Re-add a deploy key to a GitHub repository but with a different name
+ community.general.github_deploy_key:
+ owner: "johndoe"
+ repository: "example"
+ name: "replace-deploy-key"
+ key: "{{ lookup('file', '~/.ssh/github.pub') }}"
+ username: "johndoe"
+ password: "supersecretpassword"
+
+- name: Add a new deploy key to a GitHub repository using 2FA
+ community.general.github_deploy_key:
+ owner: "johndoe"
+ repo: "example"
+ name: "new-deploy-key-2"
+ key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..."
+ username: "johndoe"
+ password: "supersecretpassword"
+ otp: 123456
+
+- name: Add a read-only deploy key to a repository hosted on GitHub Enterprise
+ community.general.github_deploy_key:
+ github_url: "https://api.example.com"
+ owner: "janedoe"
+ repo: "example"
+ name: "new-deploy-key"
+ key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..."
+ read_only: yes
+ username: "janedoe"
+ password: "supersecretpassword"
+'''
+
+RETURN = '''
+msg:
+ description: the status message describing what occurred
+ returned: always
+ type: str
+ sample: "Deploy key added successfully"
+
+http_status_code:
+ description: the HTTP status code returned by the GitHub API
+ returned: failed
+ type: int
+ sample: 400
+
+error:
+ description: the error message returned by the GitHub API
+ returned: failed
+ type: str
+ sample: "key is already in use"
+
+id:
+ description: the key identifier assigned by GitHub for the deploy key
+ returned: changed
+ type: int
+ sample: 24381901
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from re import findall
+
+
+class GithubDeployKey(object):
+ def __init__(self, module):
+ self.module = module
+
+ self.github_url = self.module.params['github_url']
+ self.name = module.params['name']
+ self.key = module.params['key']
+ self.state = module.params['state']
+ self.read_only = module.params.get('read_only', True)
+ self.force = module.params.get('force', False)
+ self.username = module.params.get('username', None)
+ self.password = module.params.get('password', None)
+ self.token = module.params.get('token', None)
+ self.otp = module.params.get('otp', None)
+
+ @property
+ def url(self):
+ owner = self.module.params['owner']
+ repo = self.module.params['repo']
+ return "{0}/repos/{1}/{2}/keys".format(self.github_url, owner, repo)
+
+ @property
+ def headers(self):
+ if self.username is not None and self.password is not None:
+ self.module.params['url_username'] = self.username
+ self.module.params['url_password'] = self.password
+ self.module.params['force_basic_auth'] = True
+ if self.otp is not None:
+ return {"X-GitHub-OTP": self.otp}
+ elif self.token is not None:
+ return {"Authorization": "token {0}".format(self.token)}
+ else:
+ return None
+
+ def paginate(self, url):
+ while url:
+ resp, info = fetch_url(self.module, url, headers=self.headers, method="GET")
+
+ if info["status"] == 200:
+ yield self.module.from_json(resp.read())
+
+ links = {}
+ for x, y in findall(r'<([^>]+)>;\s*rel="(\w+)"', info["link"]):
+ links[y] = x
+
+ url = links.get('next')
+ else:
+ self.handle_error(method="GET", info=info)
+
+ def get_existing_key(self):
+ for keys in self.paginate(self.url):
+ if keys:
+ for i in keys:
+ existing_key_id = str(i["id"])
+ if i["key"].split() == self.key.split()[:2]:
+ return existing_key_id
+ elif i['title'] == self.name and self.force:
+ return existing_key_id
+ else:
+ return None
+
+ def add_new_key(self):
+ request_body = {"title": self.name, "key": self.key, "read_only": self.read_only}
+
+ resp, info = fetch_url(self.module, self.url, data=self.module.jsonify(request_body), headers=self.headers, method="POST", timeout=30)
+
+ status_code = info["status"]
+
+ if status_code == 201:
+ response_body = self.module.from_json(resp.read())
+ key_id = response_body["id"]
+ self.module.exit_json(changed=True, msg="Deploy key successfully added", id=key_id)
+ elif status_code == 422:
+ self.module.exit_json(changed=False, msg="Deploy key already exists")
+ else:
+ self.handle_error(method="POST", info=info)
+
+ def remove_existing_key(self, key_id):
+ resp, info = fetch_url(self.module, "{0}/{1}".format(self.url, key_id), headers=self.headers, method="DELETE")
+
+ status_code = info["status"]
+
+ if status_code == 204:
+ if self.state == 'absent':
+ self.module.exit_json(changed=True, msg="Deploy key successfully deleted", id=key_id)
+ else:
+ self.handle_error(method="DELETE", info=info, key_id=key_id)
+
+ def handle_error(self, method, info, key_id=None):
+ status_code = info['status']
+ body = info.get('body')
+ if body:
+ err = self.module.from_json(body)['message']
+
+ if status_code == 401:
+ self.module.fail_json(msg="Failed to connect to {0} due to invalid credentials".format(self.github_url), http_status_code=status_code, error=err)
+ elif status_code == 404:
+ self.module.fail_json(msg="GitHub repository does not exist", http_status_code=status_code, error=err)
+ else:
+ if method == "GET":
+ self.module.fail_json(msg="Failed to retrieve existing deploy keys", http_status_code=status_code, error=err)
+ elif method == "POST":
+ self.module.fail_json(msg="Failed to add deploy key", http_status_code=status_code, error=err)
+ elif method == "DELETE":
+ self.module.fail_json(msg="Failed to delete existing deploy key", id=key_id, http_status_code=status_code, error=err)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ github_url=dict(required=False, type='str', default="https://api.github.com"),
+ owner=dict(required=True, type='str', aliases=['account', 'organization']),
+ repo=dict(required=True, type='str', aliases=['repository']),
+ name=dict(required=True, type='str', aliases=['title', 'label']),
+ key=dict(required=True, type='str'),
+ read_only=dict(required=False, type='bool', default=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ force=dict(required=False, type='bool', default=False),
+ username=dict(required=False, type='str'),
+ password=dict(required=False, type='str', no_log=True),
+ otp=dict(required=False, type='int', aliases=['2fa_token'], no_log=True),
+ token=dict(required=False, type='str', no_log=True)
+ ),
+ mutually_exclusive=[
+ ['password', 'token']
+ ],
+ required_together=[
+ ['username', 'password'],
+ ['otp', 'username', 'password']
+ ],
+ required_one_of=[
+ ['username', 'token']
+ ],
+ supports_check_mode=True,
+ )
+
+ deploy_key = GithubDeployKey(module)
+
+ if module.check_mode:
+ key_id = deploy_key.get_existing_key()
+ if deploy_key.state == "present" and key_id is None:
+ module.exit_json(changed=True)
+ elif deploy_key.state == "present" and key_id is not None:
+ module.exit_json(changed=False)
+
+ # to forcefully modify an existing key, the existing key must be deleted first
+ if deploy_key.state == 'absent' or deploy_key.force:
+ key_id = deploy_key.get_existing_key()
+
+ if key_id is not None:
+ deploy_key.remove_existing_key(key_id)
+ elif deploy_key.state == 'absent':
+ module.exit_json(changed=False, msg="Deploy key does not exist")
+
+ if deploy_key.state == "present":
+ deploy_key.add_new_key()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_hooks.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_hooks.py
new file mode 100644
index 00000000..e326711d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_hooks.py
@@ -0,0 +1,193 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Phillip Gentry <phillip@cx.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: github_hooks
+short_description: Manages GitHub service hooks.
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.12
+ why: Replaced by more granular modules
+ alternative: Use M(community.general.github_webhook) and M(community.general.github_webhook_info) instead.
+description:
+ - Adds service hooks and removes service hooks that have an error status.
+options:
+ user:
+ description:
+ - GitHub username.
+ required: true
+ oauthkey:
+ description:
+ - The oauth key provided by GitHub. It can be found/generated on GitHub under "Edit Your Profile" >> "Developer settings" >> "Personal Access Tokens"
+ required: true
+ repo:
+ description:
+ - >
+ This is the API url for the repository you want to manage hooks for. It should be in the form of: https://api.github.com/repos/user:/repo:.
+ Note this is different than the normal repo url.
+ required: true
+ hookurl:
+ description:
+ - When creating a new hook, this is the url that you want GitHub to post to. It is only required when creating a new hook.
+ required: false
+ action:
+ description:
+ - This tells the githooks module what you want it to do.
+ required: true
+ choices: [ "create", "cleanall", "list", "clean504" ]
+ validate_certs:
+ description:
+ - If C(no), SSL certificates for the target repo will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ type: bool
+ content_type:
+ description:
+ - Content type to use for requests made to the webhook
+ required: false
+ default: 'json'
+ choices: ['json', 'form']
+
+author: "Phillip Gentry, CX Inc (@pcgentry)"
+'''
+
+EXAMPLES = '''
+- name: Create a new service hook ignoring duplicates
+ community.general.github_hooks:
+ action: create
+ hookurl: http://11.111.111.111:2222
+ user: '{{ gituser }}'
+ oauthkey: '{{ oauthkey }}'
+ repo: https://api.github.com/repos/pcgentry/Github-Auto-Deploy
+
+# Cleaning all hooks for this repo that had an error on the last update.
+# Since this works for all hooks in a repo it is probably best that this would be called from a handler.
+- name: Clean all hooks
+ community.general.github_hooks:
+ action: cleanall
+ user: '{{ gituser }}'
+ oauthkey: '{{ oauthkey }}'
+ repo: '{{ repo }}'
+ delegate_to: localhost
+'''
+
+import json
+import base64
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils._text import to_bytes
+
+
+def request(module, url, user, oauthkey, data='', method='GET'):
+ auth = base64.b64encode(to_bytes('%s:%s' % (user, oauthkey)).replace('\n', ''))
+ headers = {
+ 'Authorization': 'Basic %s' % auth,
+ }
+ response, info = fetch_url(module, url, headers=headers, data=data, method=method)
+ return response, info
+
+
+def _list(module, oauthkey, repo, user):
+ url = "%s/hooks" % repo
+ response, info = request(module, url, user, oauthkey)
+ if info['status'] != 200:
+ return False, ''
+ else:
+ return False, response.read()
+
+
+def _clean504(module, oauthkey, repo, user):
+ current_hooks = _list(module, oauthkey, repo, user)[1]
+ decoded = json.loads(current_hooks)
+
+ for hook in decoded:
+ if hook['last_response']['code'] == 504:
+ _delete(module, oauthkey, repo, user, hook['id'])
+
+ return 0, current_hooks
+
+
+def _cleanall(module, oauthkey, repo, user):
+ current_hooks = _list(module, oauthkey, repo, user)[1]
+ decoded = json.loads(current_hooks)
+
+ for hook in decoded:
+ if hook['last_response']['code'] != 200:
+ _delete(module, oauthkey, repo, user, hook['id'])
+
+ return 0, current_hooks
+
+
+def _create(module, hookurl, oauthkey, repo, user, content_type):
+ url = "%s/hooks" % repo
+ values = {
+ "active": True,
+ "name": "web",
+ "config": {
+ "url": "%s" % hookurl,
+ "content_type": "%s" % content_type
+ }
+ }
+ data = json.dumps(values)
+ response, info = request(module, url, user, oauthkey, data=data, method='POST')
+ if info['status'] != 200:
+ return 0, '[]'
+ else:
+ return 0, response.read()
+
+
+def _delete(module, oauthkey, repo, user, hookid):
+ url = "%s/hooks/%s" % (repo, hookid)
+ response, info = request(module, url, user, oauthkey, method='DELETE')
+ return response.read()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ action=dict(required=True, choices=['list', 'clean504', 'cleanall', 'create']),
+ hookurl=dict(required=False),
+ oauthkey=dict(required=True, no_log=True),
+ repo=dict(required=True),
+ user=dict(required=True),
+ validate_certs=dict(default=True, type='bool'),
+ content_type=dict(default='json', choices=['json', 'form']),
+ )
+ )
+
+ action = module.params['action']
+ hookurl = module.params['hookurl']
+ oauthkey = module.params['oauthkey']
+ repo = module.params['repo']
+ user = module.params['user']
+ content_type = module.params['content_type']
+
+ if action == "list":
+ (rc, out) = _list(module, oauthkey, repo, user)
+
+ if action == "clean504":
+ (rc, out) = _clean504(module, oauthkey, repo, user)
+
+ if action == "cleanall":
+ (rc, out) = _cleanall(module, oauthkey, repo, user)
+
+ if action == "create":
+ (rc, out) = _create(module, hookurl, oauthkey, repo, user, content_type)
+
+ if rc != 0:
+ module.fail_json(msg="failed", result=out)
+
+ module.exit_json(msg="success", result=out)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_issue.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_issue.py
new file mode 100644
index 00000000..9c4b558b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_issue.py
@@ -0,0 +1,111 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2017-18, Abhijeet Kasurde <akasurde@redhat.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: github_issue
+short_description: View GitHub issue.
+description:
+ - View GitHub issue for a given repository and organization.
+options:
+ repo:
+ description:
+ - Name of repository from which issue needs to be retrieved.
+ required: true
+ organization:
+ description:
+ - Name of the GitHub organization in which the repository is hosted.
+ required: true
+ issue:
+ description:
+ - Issue number for which information is required.
+ required: true
+ action:
+ description:
+ - Get various details about issue depending upon action specified.
+ default: 'get_status'
+ choices:
+ - 'get_status'
+author:
+ - Abhijeet Kasurde (@Akasurde)
+'''
+
+RETURN = '''
+get_status:
+ description: State of the GitHub issue
+ type: str
+ returned: success
+ sample: open, closed
+'''
+
+EXAMPLES = '''
+- name: Check if GitHub issue is closed or not
+ community.general.github_issue:
+ organization: ansible
+ repo: ansible
+ issue: 23642
+ action: get_status
+ register: r
+
+- name: Take action depending upon issue status
+ ansible.builtin.debug:
+ msg: Do something when issue 23642 is open
+ when: r.issue_status == 'open'
+'''
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ organization=dict(required=True),
+ repo=dict(required=True),
+ issue=dict(type='int', required=True),
+ action=dict(choices=['get_status'], default='get_status'),
+ ),
+ supports_check_mode=True,
+ )
+
+ organization = module.params['organization']
+ repo = module.params['repo']
+ issue = module.params['issue']
+ action = module.params['action']
+
+ result = dict()
+
+ headers = {
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/vnd.github.v3+json',
+ }
+
+ url = "https://api.github.com/repos/%s/%s/issues/%s" % (organization, repo, issue)
+
+ response, info = fetch_url(module, url, headers=headers)
+ if not (200 <= info['status'] < 400):
+ if info['status'] == 404:
+ module.fail_json(msg="Failed to find issue %s" % issue)
+ module.fail_json(msg="Failed to send request to %s: %s" % (url, info['msg']))
+
+ gh_obj = json.loads(response.read())
+
+ if action == 'get_status' or action is None:
+ if module.check_mode:
+ result.update(changed=True)
+ else:
+ result.update(changed=True, issue_status=gh_obj['state'])
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_key.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_key.py
new file mode 100644
index 00000000..415065f8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_key.py
@@ -0,0 +1,237 @@
+#!/usr/bin/python
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: github_key
+short_description: Manage GitHub access keys.
+description:
+ - Creates, removes, or updates GitHub access keys.
+options:
+ token:
+ description:
+ - GitHub Access Token with permission to list and create public keys.
+ required: true
+ name:
+ description:
+ - SSH key name
+ required: true
+ pubkey:
+ description:
+ - SSH public key value. Required when C(state=present).
+ state:
+ description:
+ - Whether to remove a key, ensure that it exists, or update its value.
+ choices: ['present', 'absent']
+ default: 'present'
+ force:
+ description:
+ - The default is C(yes), which will replace the existing remote key
+ if it's different than C(pubkey). If C(no), the key will only be
+ set if no key with the given C(name) exists.
+ type: bool
+ default: 'yes'
+
+author: Robert Estelle (@erydo)
+'''
+
+RETURN = '''
+deleted_keys:
+ description: An array of key objects that were deleted. Only present on state=absent
+ type: list
+ returned: When state=absent
+ sample: [{'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': False}]
+matching_keys:
+ description: An array of keys matching the specified name. Only present on state=present
+ type: list
+ returned: When state=present
+ sample: [{'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': False}]
+key:
+ description: Metadata about the key just created. Only present on state=present
+ type: dict
+ returned: success
+ sample: {'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': False}
+'''
+
+EXAMPLES = '''
+- name: Read SSH public key to authorize
+ ansible.builtin.shell: cat /home/foo/.ssh/id_rsa.pub
+ register: ssh_pub_key
+
+- name: Authorize key with GitHub
+ local_action:
+ module: github_key
+ name: Access Key for Some Machine
+ token: '{{ github_access_token }}'
+ pubkey: '{{ ssh_pub_key.stdout }}'
+'''
+
+
+import json
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+API_BASE = 'https://api.github.com'
+
+
+class GitHubResponse(object):
+ def __init__(self, response, info):
+ self.content = response.read()
+ self.info = info
+
+ def json(self):
+ return json.loads(self.content)
+
+ def links(self):
+ links = {}
+ if 'link' in self.info:
+ link_header = self.info['link']
+ matches = re.findall('<([^>]+)>; rel="([^"]+)"', link_header)
+ for url, rel in matches:
+ links[rel] = url
+ return links
+
+
+class GitHubSession(object):
+ def __init__(self, module, token):
+ self.module = module
+ self.token = token
+
+ def request(self, method, url, data=None):
+ headers = {
+ 'Authorization': 'token %s' % self.token,
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/vnd.github.v3+json',
+ }
+ response, info = fetch_url(
+ self.module, url, method=method, data=data, headers=headers)
+ if not (200 <= info['status'] < 400):
+ self.module.fail_json(
+ msg=(" failed to send request %s to %s: %s"
+ % (method, url, info['msg'])))
+ return GitHubResponse(response, info)
+
+
+def get_all_keys(session):
+ url = API_BASE + '/user/keys'
+ result = []
+ while url:
+ r = session.request('GET', url)
+ result.extend(r.json())
+ url = r.links().get('next')
+ return result
+
+
+def create_key(session, name, pubkey, check_mode):
+ if check_mode:
+ from datetime import datetime
+ now = datetime.utcnow()
+ return {
+ 'id': 0,
+ 'key': pubkey,
+ 'title': name,
+ 'url': 'http://example.com/CHECK_MODE_GITHUB_KEY',
+ 'created_at': datetime.strftime(now, '%Y-%m-%dT%H:%M:%SZ'),
+ 'read_only': False,
+ 'verified': False
+ }
+ else:
+ return session.request(
+ 'POST',
+ API_BASE + '/user/keys',
+ data=json.dumps({'title': name, 'key': pubkey})).json()
+
+
+def delete_keys(session, to_delete, check_mode):
+ if check_mode:
+ return
+
+ for key in to_delete:
+ session.request('DELETE', API_BASE + '/user/keys/%s' % key["id"])
+
+
+def ensure_key_absent(session, name, check_mode):
+ to_delete = [key for key in get_all_keys(session) if key['title'] == name]
+ delete_keys(session, to_delete, check_mode=check_mode)
+
+ return {'changed': bool(to_delete),
+ 'deleted_keys': to_delete}
+
+
+def ensure_key_present(module, session, name, pubkey, force, check_mode):
+ all_keys = get_all_keys(session)
+ matching_keys = [k for k in all_keys if k['title'] == name]
+ deleted_keys = []
+
+ new_signature = pubkey.split(' ')[1]
+ for key in all_keys:
+ existing_signature = key['key'].split(' ')[1]
+ if new_signature == existing_signature and key['title'] != name:
+ module.fail_json(msg=(
+ "another key with the same content is already registered "
+ "under the name |{0}|").format(key['title']))
+
+ if matching_keys and force and matching_keys[0]['key'].split(' ')[1] != new_signature:
+ delete_keys(session, matching_keys, check_mode=check_mode)
+ (deleted_keys, matching_keys) = (matching_keys, [])
+
+ if not matching_keys:
+ key = create_key(session, name, pubkey, check_mode=check_mode)
+ else:
+ key = matching_keys[0]
+
+ return {
+ 'changed': bool(deleted_keys or not matching_keys),
+ 'deleted_keys': deleted_keys,
+ 'matching_keys': matching_keys,
+ 'key': key
+ }
+
+
+def main():
+ argument_spec = {
+ 'token': {'required': True, 'no_log': True},
+ 'name': {'required': True},
+ 'pubkey': {},
+ 'state': {'choices': ['present', 'absent'], 'default': 'present'},
+ 'force': {'default': True, 'type': 'bool'},
+ }
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ token = module.params['token']
+ name = module.params['name']
+ state = module.params['state']
+ force = module.params['force']
+ pubkey = module.params.get('pubkey')
+
+ if pubkey:
+ pubkey_parts = pubkey.split(' ')
+ # Keys consist of a protocol, the key data, and an optional comment.
+ if len(pubkey_parts) < 2:
+ module.fail_json(msg='"pubkey" parameter has an invalid format')
+ elif state == 'present':
+ module.fail_json(msg='"pubkey" is required when state=present')
+
+ session = GitHubSession(module, token)
+ if state == 'present':
+ result = ensure_key_present(module, session, name, pubkey, force=force,
+ check_mode=module.check_mode)
+ elif state == 'absent':
+ result = ensure_key_absent(session, name, check_mode=module.check_mode)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_release.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_release.py
new file mode 100644
index 00000000..5372d6e8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_release.py
@@ -0,0 +1,213 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Ansible Team
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: github_release
+short_description: Interact with GitHub Releases
+description:
+ - Fetch metadata about GitHub Releases
+options:
+ token:
+ description:
+ - GitHub Personal Access Token for authenticating. Mutually exclusive with C(password).
+ user:
+ description:
+ - The GitHub account that owns the repository
+ required: true
+ password:
+ description:
+ - The GitHub account password for the user. Mutually exclusive with C(token).
+ repo:
+ description:
+ - Repository name
+ required: true
+ action:
+ description:
+ - Action to perform
+ required: true
+ choices: [ 'latest_release', 'create_release' ]
+ tag:
+ description:
+ - Tag name when creating a release. Required when using action is set to C(create_release).
+ target:
+ description:
+ - Target of release when creating a release
+ name:
+ description:
+ - Name of release when creating a release
+ body:
+ description:
+ - Description of the release when creating a release
+ draft:
+ description:
+ - Sets if the release is a draft or not. (boolean)
+ type: 'bool'
+ default: 'no'
+ prerelease:
+ description:
+ - Sets if the release is a prerelease or not. (boolean)
+ type: bool
+ default: 'no'
+
+author:
+ - "Adrian Moisey (@adrianmoisey)"
+requirements:
+ - "github3.py >= 1.0.0a3"
+'''
+
+EXAMPLES = '''
+- name: Get latest release of a public repository
+ community.general.github_release:
+ user: ansible
+ repo: ansible
+ action: latest_release
+
+- name: Get latest release of testuseer/testrepo
+ community.general.github_release:
+ token: tokenabc1234567890
+ user: testuser
+ repo: testrepo
+ action: latest_release
+
+- name: Get latest release of test repo using username and password. Ansible 2.4.
+ community.general.github_release:
+ user: testuser
+ password: secret123
+ repo: testrepo
+ action: latest_release
+
+- name: Create a new release
+ community.general.github_release:
+ token: tokenabc1234567890
+ user: testuser
+ repo: testrepo
+ action: create_release
+ tag: test
+ target: master
+ name: My Release
+ body: Some description
+
+'''
+
+RETURN = '''
+create_release:
+ description:
+ - Version of the created release
+ - "For Ansible version 2.5 and later, if specified release version already exists, then State is unchanged"
+ - "For Ansible versions prior to 2.5, if specified release version already exists, then State is skipped"
+ type: str
+ returned: success
+ sample: 1.1.0
+
+latest_release:
+ description: Version of the latest release
+ type: str
+ returned: success
+ sample: 1.1.0
+'''
+
+import traceback
+
+GITHUB_IMP_ERR = None
+try:
+ import github3
+
+ HAS_GITHUB_API = True
+except ImportError:
+ GITHUB_IMP_ERR = traceback.format_exc()
+ HAS_GITHUB_API = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ repo=dict(required=True),
+ user=dict(required=True),
+ password=dict(no_log=True),
+ token=dict(no_log=True),
+ action=dict(
+ required=True, choices=['latest_release', 'create_release']),
+ tag=dict(type='str'),
+ target=dict(type='str'),
+ name=dict(type='str'),
+ body=dict(type='str'),
+ draft=dict(type='bool', default=False),
+ prerelease=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=(('password', 'token'),),
+ required_if=[('action', 'create_release', ['tag']),
+ ('action', 'create_release', ['password', 'token'], True)],
+ )
+
+ if not HAS_GITHUB_API:
+ module.fail_json(msg=missing_required_lib('github3.py >= 1.0.0a3'),
+ exception=GITHUB_IMP_ERR)
+
+ repo = module.params['repo']
+ user = module.params['user']
+ password = module.params['password']
+ login_token = module.params['token']
+ action = module.params['action']
+ tag = module.params.get('tag')
+ target = module.params.get('target')
+ name = module.params.get('name')
+ body = module.params.get('body')
+ draft = module.params.get('draft')
+ prerelease = module.params.get('prerelease')
+
+ # login to github
+ try:
+ if password:
+ gh_obj = github3.login(user, password=password)
+ elif login_token:
+ gh_obj = github3.login(token=login_token)
+ else:
+ gh_obj = github3.GitHub()
+
+ # test if we're actually logged in
+ if password or login_token:
+ gh_obj.me()
+ except github3.exceptions.AuthenticationFailed as e:
+ module.fail_json(msg='Failed to connect to GitHub: %s' % to_native(e),
+ details="Please check username and password or token "
+ "for repository %s" % repo)
+
+ repository = gh_obj.repository(user, repo)
+
+ if not repository:
+ module.fail_json(msg="Repository %s/%s doesn't exist" % (user, repo))
+
+ if action == 'latest_release':
+ release = repository.latest_release()
+ if release:
+ module.exit_json(tag=release.tag_name)
+ else:
+ module.exit_json(tag=None)
+
+ if action == 'create_release':
+ release_exists = repository.release_from_tag(tag)
+ if release_exists:
+ module.exit_json(changed=False, msg="Release for tag %s already exists." % tag)
+
+ release = repository.create_release(
+ tag, target, name, body, draft, prerelease)
+ if release:
+ module.exit_json(changed=True, tag=release.tag_name)
+ else:
+ module.exit_json(changed=False, tag=None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_webhook.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_webhook.py
new file mode 100644
index 00000000..ac153689
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_webhook.py
@@ -0,0 +1,280 @@
+#!/usr/bin/python
+#
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: github_webhook
+short_description: Manage GitHub webhooks
+description:
+ - "Create and delete GitHub webhooks"
+requirements:
+ - "PyGithub >= 1.3.5"
+options:
+ repository:
+ description:
+ - Full name of the repository to configure a hook for
+ required: true
+ aliases:
+ - repo
+ url:
+ description:
+ - URL to which payloads will be delivered
+ required: true
+ content_type:
+ description:
+ - The media type used to serialize the payloads
+ required: false
+ choices: [ form, json ]
+ default: form
+ secret:
+ description:
+ - The shared secret between GitHub and the payload URL.
+ required: false
+ insecure_ssl:
+ description:
+ - >
+ Flag to indicate that GitHub should skip SSL verification when calling
+ the hook.
+ required: false
+ type: bool
+ default: false
+ events:
+ description:
+ - >
+ A list of GitHub events the hook is triggered for. Events are listed at
+ U(https://developer.github.com/v3/activity/events/types/). Required
+ unless C(state) is C(absent)
+ required: false
+ type: list
+ elements: str
+ active:
+ description:
+ - Whether or not the hook is active
+ required: false
+ type: bool
+ default: true
+ state:
+ description:
+ - Whether the hook should be present or absent
+ required: false
+ choices: [ absent, present ]
+ default: present
+ user:
+ description:
+ - User to authenticate to GitHub as
+ required: true
+ password:
+ description:
+ - Password to authenticate to GitHub with
+ required: false
+ token:
+ description:
+ - Token to authenticate to GitHub with
+ required: false
+ github_url:
+ description:
+ - Base URL of the GitHub API
+ required: false
+ default: https://api.github.com
+
+author:
+ - "Chris St. Pierre (@stpierre)"
+'''
+
+EXAMPLES = '''
+- name: create a new webhook that triggers on push (password auth)
+ community.general.github_webhook:
+ repository: ansible/ansible
+ url: https://www.example.com/hooks/
+ events:
+ - push
+ user: "{{ github_user }}"
+ password: "{{ github_password }}"
+
+- name: Create a new webhook in a github enterprise installation with multiple event triggers (token auth)
+ community.general.github_webhook:
+ repository: myorg/myrepo
+ url: https://jenkins.example.com/ghprbhook/
+ content_type: json
+ secret: "{{ github_shared_secret }}"
+ insecure_ssl: True
+ events:
+ - issue_comment
+ - pull_request
+ user: "{{ github_user }}"
+ token: "{{ github_user_api_token }}"
+ github_url: https://github.example.com
+
+- name: Delete a webhook (password auth)
+ community.general.github_webhook:
+ repository: ansible/ansible
+ url: https://www.example.com/hooks/
+ state: absent
+ user: "{{ github_user }}"
+ password: "{{ github_password }}"
+'''
+
+RETURN = '''
+---
+hook_id:
+ description: The GitHub ID of the hook created/updated
+ returned: when state is 'present'
+ type: int
+ sample: 6206
+'''
+
+import traceback
+
+GITHUB_IMP_ERR = None
+try:
+ import github
+ HAS_GITHUB = True
+except ImportError:
+ GITHUB_IMP_ERR = traceback.format_exc()
+ HAS_GITHUB = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def _create_hook_config(module):
+ return {
+ "url": module.params["url"],
+ "content_type": module.params["content_type"],
+ "secret": module.params.get("secret"),
+ "insecure_ssl": "1" if module.params["insecure_ssl"] else "0"
+ }
+
+
+def create_hook(repo, module):
+ config = _create_hook_config(module)
+ try:
+ hook = repo.create_hook(
+ name="web",
+ config=config,
+ events=module.params["events"],
+ active=module.params["active"])
+ except github.GithubException as err:
+ module.fail_json(msg="Unable to create hook for repository %s: %s" % (
+ repo.full_name, to_native(err)))
+
+ data = {"hook_id": hook.id}
+ return True, data
+
+
+def update_hook(repo, hook, module):
+ config = _create_hook_config(module)
+ try:
+ hook.update()
+ hook.edit(
+ name="web",
+ config=config,
+ events=module.params["events"],
+ active=module.params["active"])
+
+ changed = hook.update()
+ except github.GithubException as err:
+ module.fail_json(msg="Unable to modify hook for repository %s: %s" % (
+ repo.full_name, to_native(err)))
+
+ data = {"hook_id": hook.id}
+ return changed, data
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ repository=dict(type='str', required=True, aliases=['repo']),
+ url=dict(type='str', required=True),
+ content_type=dict(
+ type='str',
+ choices=('json', 'form'),
+ required=False,
+ default='form'),
+ secret=dict(type='str', required=False, no_log=True),
+ insecure_ssl=dict(type='bool', required=False, default=False),
+ events=dict(type='list', elements='str', required=False),
+ active=dict(type='bool', required=False, default=True),
+ state=dict(
+ type='str',
+ required=False,
+ choices=('absent', 'present'),
+ default='present'),
+ user=dict(type='str', required=True),
+ password=dict(type='str', required=False, no_log=True),
+ token=dict(type='str', required=False, no_log=True),
+ github_url=dict(
+ type='str', required=False, default="https://api.github.com")),
+ mutually_exclusive=(('password', 'token'),),
+ required_one_of=(("password", "token"),),
+ required_if=(("state", "present", ("events",)),),
+ )
+
+ if not HAS_GITHUB:
+ module.fail_json(msg=missing_required_lib('PyGithub'),
+ exception=GITHUB_IMP_ERR)
+
+ try:
+ github_conn = github.Github(
+ module.params["user"],
+ module.params.get("password") or module.params.get("token"),
+ base_url=module.params["github_url"])
+ except github.GithubException as err:
+ module.fail_json(msg="Could not connect to GitHub at %s: %s" % (
+ module.params["github_url"], to_native(err)))
+
+ try:
+ repo = github_conn.get_repo(module.params["repository"])
+ except github.BadCredentialsException as err:
+ module.fail_json(msg="Could not authenticate to GitHub at %s: %s" % (
+ module.params["github_url"], to_native(err)))
+ except github.UnknownObjectException as err:
+ module.fail_json(
+ msg="Could not find repository %s in GitHub at %s: %s" % (
+ module.params["repository"], module.params["github_url"],
+ to_native(err)))
+ except Exception as err:
+ module.fail_json(
+ msg="Could not fetch repository %s from GitHub at %s: %s" %
+ (module.params["repository"], module.params["github_url"],
+ to_native(err)),
+ exception=traceback.format_exc())
+
+ hook = None
+ try:
+ for hook in repo.get_hooks():
+ if hook.config.get("url") == module.params["url"]:
+ break
+ else:
+ hook = None
+ except github.GithubException as err:
+ module.fail_json(msg="Unable to get hooks from repository %s: %s" % (
+ module.params["repository"], to_native(err)))
+
+ changed = False
+ data = {}
+ if hook is None and module.params["state"] == "present":
+ changed, data = create_hook(repo, module)
+ elif hook is not None and module.params["state"] == "absent":
+ try:
+ hook.delete()
+ except github.GithubException as err:
+ module.fail_json(
+ msg="Unable to delete hook from repository %s: %s" % (
+ repo.full_name, to_native(err)))
+ else:
+ changed = True
+ elif hook is not None and module.params["state"] == "present":
+ changed, data = update_hook(repo, hook, module)
+ # else, there is no hook and we want there to be no hook
+
+ module.exit_json(changed=changed, **data)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_webhook_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_webhook_info.py
new file mode 100644
index 00000000..f99a0a03
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/github/github_webhook_info.py
@@ -0,0 +1,169 @@
+#!/usr/bin/python
+#
+# Copyright: (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: github_webhook_info
+short_description: Query information about GitHub webhooks
+description:
+ - "Query information about GitHub webhooks"
+ - This module was called C(github_webhook_facts) before Ansible 2.9. The usage did not change.
+requirements:
+ - "PyGithub >= 1.3.5"
+options:
+ repository:
+ description:
+ - Full name of the repository to configure a hook for
+ required: true
+ aliases:
+ - repo
+ user:
+ description:
+ - User to authenticate to GitHub as
+ required: true
+ password:
+ description:
+ - Password to authenticate to GitHub with
+ required: false
+ token:
+ description:
+ - Token to authenticate to GitHub with
+ required: false
+ github_url:
+ description:
+ - Base URL of the github api
+ required: false
+ default: https://api.github.com
+
+author:
+ - "Chris St. Pierre (@stpierre)"
+'''
+
+EXAMPLES = '''
+- name: List hooks for a repository (password auth)
+ community.general.github_webhook_info:
+ repository: ansible/ansible
+ user: "{{ github_user }}"
+ password: "{{ github_password }}"
+ register: ansible_webhooks
+
+- name: List hooks for a repository on GitHub Enterprise (token auth)
+ community.general.github_webhook_info:
+ repository: myorg/myrepo
+ user: "{{ github_user }}"
+ token: "{{ github_user_api_token }}"
+ github_url: https://github.example.com/api/v3/
+ register: myrepo_webhooks
+'''
+
+RETURN = '''
+---
+hooks:
+ description: A list of hooks that exist for the repo
+ returned: always
+ type: list
+ sample: >
+ [{"has_shared_secret": true,
+ "url": "https://jenkins.example.com/ghprbhook/",
+ "events": ["issue_comment", "pull_request"],
+ "insecure_ssl": "1",
+ "content_type": "json",
+ "active": true,
+ "id": 6206,
+ "last_response": {"status": "active", "message": "OK", "code": 200}}]
+'''
+
+import traceback
+
+GITHUB_IMP_ERR = None
+try:
+ import github
+ HAS_GITHUB = True
+except ImportError:
+ GITHUB_IMP_ERR = traceback.format_exc()
+ HAS_GITHUB = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def _munge_hook(hook_obj):
+ retval = {
+ "active": hook_obj.active,
+ "events": hook_obj.events,
+ "id": hook_obj.id,
+ "url": hook_obj.url,
+ }
+ retval.update(hook_obj.config)
+ retval["has_shared_secret"] = "secret" in retval
+ if "secret" in retval:
+ del retval["secret"]
+
+ retval["last_response"] = hook_obj.last_response.raw_data
+ return retval
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ repository=dict(type='str', required=True, aliases=["repo"]),
+ user=dict(type='str', required=True),
+ password=dict(type='str', required=False, no_log=True),
+ token=dict(type='str', required=False, no_log=True),
+ github_url=dict(
+ type='str', required=False, default="https://api.github.com")),
+ mutually_exclusive=(('password', 'token'), ),
+ required_one_of=(("password", "token"), ),
+ supports_check_mode=True)
+ if module._name in ('github_webhook_facts', 'community.general.github_webhook_facts'):
+ module.deprecate("The 'github_webhook_facts' module has been renamed to 'github_webhook_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ if not HAS_GITHUB:
+ module.fail_json(msg=missing_required_lib('PyGithub'),
+ exception=GITHUB_IMP_ERR)
+
+ try:
+ github_conn = github.Github(
+ module.params["user"],
+ module.params.get("password") or module.params.get("token"),
+ base_url=module.params["github_url"])
+ except github.GithubException as err:
+ module.fail_json(msg="Could not connect to GitHub at %s: %s" % (
+ module.params["github_url"], to_native(err)))
+
+ try:
+ repo = github_conn.get_repo(module.params["repository"])
+ except github.BadCredentialsException as err:
+ module.fail_json(msg="Could not authenticate to GitHub at %s: %s" % (
+ module.params["github_url"], to_native(err)))
+ except github.UnknownObjectException as err:
+ module.fail_json(
+ msg="Could not find repository %s in GitHub at %s: %s" % (
+ module.params["repository"], module.params["github_url"],
+ to_native(err)))
+ except Exception as err:
+ module.fail_json(
+ msg="Could not fetch repository %s from GitHub at %s: %s" %
+ (module.params["repository"], module.params["github_url"],
+ to_native(err)),
+ exception=traceback.format_exc())
+
+ try:
+ hooks = [_munge_hook(h) for h in repo.get_hooks()]
+ except github.GithubException as err:
+ module.fail_json(
+ msg="Unable to get hooks from repository %s: %s" %
+ (module.params["repository"], to_native(err)),
+ exception=traceback.format_exc())
+
+ module.exit_json(changed=False, hooks=hooks)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_deploy_key.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_deploy_key.py
new file mode 100644
index 00000000..c66a6f9d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_deploy_key.py
@@ -0,0 +1,295 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# Copyright: (c) 2018, Marcus Watkins <marwatk@marcuswatkins.net>
+# Based on code:
+# Copyright: (c) 2013, Phillip Gentry <phillip@cx.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: gitlab_deploy_key
+short_description: Manages GitLab project deploy keys.
+description:
+ - Adds, updates and removes project deploy keys
+author:
+ - Marcus Watkins (@marwatk)
+ - Guillaume Martinez (@Lunik)
+requirements:
+ - python >= 2.7
+ - python-gitlab python module
+extends_documentation_fragment:
+- community.general.auth_basic
+
+options:
+ api_token:
+ description:
+ - GitLab token for logging in.
+ type: str
+ project:
+ description:
+ - Id or Full path of project in the form of group/name.
+ required: true
+ type: str
+ title:
+ description:
+ - Deploy key's title.
+ required: true
+ type: str
+ key:
+ description:
+ - Deploy key
+ required: true
+ type: str
+ can_push:
+ description:
+ - Whether this key can push to the project.
+ type: bool
+ default: no
+ state:
+ description:
+ - When C(present) the deploy key added to the project if it doesn't exist.
+ - When C(absent) it will be removed from the project if it exists.
+ default: present
+ type: str
+ choices: [ "present", "absent" ]
+'''
+
+EXAMPLES = '''
+- name: "Adding a project deploy key"
+ community.general.gitlab_deploy_key:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ api_token }}"
+ project: "my_group/my_project"
+ title: "Jenkins CI"
+ state: present
+ key: "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9w..."
+
+- name: "Update the above deploy key to add push access"
+ community.general.gitlab_deploy_key:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ api_token }}"
+ project: "my_group/my_project"
+ title: "Jenkins CI"
+ state: present
+ can_push: yes
+
+- name: "Remove the previous deploy key from the project"
+ community.general.gitlab_deploy_key:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ api_token }}"
+ project: "my_group/my_project"
+ state: absent
+ key: "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9w..."
+
+'''
+
+RETURN = '''
+msg:
+ description: Success or failure message
+ returned: always
+ type: str
+ sample: "Success"
+
+result:
+ description: json parsed response from the server
+ returned: always
+ type: dict
+
+error:
+ description: the error message returned by the GitLab API
+ returned: failed
+ type: str
+ sample: "400: key is already in use"
+
+deploy_key:
+ description: API object
+ returned: always
+ type: dict
+'''
+
+import re
+import traceback
+
+GITLAB_IMP_ERR = None
+try:
+ import gitlab
+ HAS_GITLAB_PACKAGE = True
+except Exception:
+ GITLAB_IMP_ERR = traceback.format_exc()
+ HAS_GITLAB_PACKAGE = False
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import findProject, gitlabAuthentication
+
+
+class GitLabDeployKey(object):
+ def __init__(self, module, gitlab_instance):
+ self._module = module
+ self._gitlab = gitlab_instance
+ self.deployKeyObject = None
+
+ '''
+ @param project Project object
+ @param key_title Title of the key
+ @param key_key String of the key
+ @param key_can_push Option of the deployKey
+ @param options Deploy key options
+ '''
+ def createOrUpdateDeployKey(self, project, key_title, key_key, options):
+ changed = False
+
+ # Because we have already call existsDeployKey in main()
+ if self.deployKeyObject is None:
+ deployKey = self.createDeployKey(project, {
+ 'title': key_title,
+ 'key': key_key,
+ 'can_push': options['can_push']})
+ changed = True
+ else:
+ changed, deployKey = self.updateDeployKey(self.deployKeyObject, {
+ 'can_push': options['can_push']})
+
+ self.deployKeyObject = deployKey
+ if changed:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True, msg="Successfully created or updated the deploy key %s" % key_title)
+
+ try:
+ deployKey.save()
+ except Exception as e:
+ self._module.fail_json(msg="Failed to update deploy key: %s " % e)
+ return True
+ else:
+ return False
+
+ '''
+ @param project Project Object
+ @param arguments Attributes of the deployKey
+ '''
+ def createDeployKey(self, project, arguments):
+ if self._module.check_mode:
+ return True
+
+ try:
+ deployKey = project.keys.create(arguments)
+ except (gitlab.exceptions.GitlabCreateError) as e:
+ self._module.fail_json(msg="Failed to create deploy key: %s " % to_native(e))
+
+ return deployKey
+
+ '''
+ @param deployKey Deploy Key Object
+ @param arguments Attributes of the deployKey
+ '''
+ def updateDeployKey(self, deployKey, arguments):
+ changed = False
+
+ for arg_key, arg_value in arguments.items():
+ if arguments[arg_key] is not None:
+ if getattr(deployKey, arg_key) != arguments[arg_key]:
+ setattr(deployKey, arg_key, arguments[arg_key])
+ changed = True
+
+ return (changed, deployKey)
+
+ '''
+ @param project Project object
+ @param key_title Title of the key
+ '''
+ def findDeployKey(self, project, key_title):
+ deployKeys = project.keys.list()
+ for deployKey in deployKeys:
+ if (deployKey.title == key_title):
+ return deployKey
+
+ '''
+ @param project Project object
+ @param key_title Title of the key
+ '''
+ def existsDeployKey(self, project, key_title):
+ # When project exists, object will be stored in self.projectObject.
+ deployKey = self.findDeployKey(project, key_title)
+ if deployKey:
+ self.deployKeyObject = deployKey
+ return True
+ return False
+
+ def deleteDeployKey(self):
+ if self._module.check_mode:
+ return True
+
+ return self.deployKeyObject.delete()
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_token=dict(type='str', no_log=True),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ project=dict(type='str', required=True),
+ key=dict(type='str', required=True),
+ can_push=dict(type='bool', default=False),
+ title=dict(type='str', required=True)
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_password', 'api_token']
+ ],
+ required_together=[
+ ['api_username', 'api_password']
+ ],
+ required_one_of=[
+ ['api_username', 'api_token']
+ ],
+ supports_check_mode=True,
+ )
+
+ state = module.params['state']
+ project_identifier = module.params['project']
+ key_title = module.params['title']
+ key_keyfile = module.params['key']
+ key_can_push = module.params['can_push']
+
+ if not HAS_GITLAB_PACKAGE:
+ module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
+
+ gitlab_instance = gitlabAuthentication(module)
+
+ gitlab_deploy_key = GitLabDeployKey(module, gitlab_instance)
+
+ project = findProject(gitlab_instance, project_identifier)
+
+ if project is None:
+ module.fail_json(msg="Failed to create deploy key: project %s doesn't exists" % project_identifier)
+
+ deployKey_exists = gitlab_deploy_key.existsDeployKey(project, key_title)
+
+ if state == 'absent':
+ if deployKey_exists:
+ gitlab_deploy_key.deleteDeployKey()
+ module.exit_json(changed=True, msg="Successfully deleted deploy key %s" % key_title)
+ else:
+ module.exit_json(changed=False, msg="Deploy key deleted or does not exists")
+
+ if state == 'present':
+ if gitlab_deploy_key.createOrUpdateDeployKey(project, key_title, key_keyfile, {'can_push': key_can_push}):
+
+ module.exit_json(changed=True, msg="Successfully created or updated the deploy key %s" % key_title,
+ deploy_key=gitlab_deploy_key.deployKeyObject._attrs)
+ else:
+ module.exit_json(changed=False, msg="No need to update the deploy key %s" % key_title,
+ deploy_key=gitlab_deploy_key.deployKeyObject._attrs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_group.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_group.py
new file mode 100644
index 00000000..0c612733
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_group.py
@@ -0,0 +1,324 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# Copyright: (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gitlab_group
+short_description: Creates/updates/deletes GitLab Groups
+description:
+ - When the group does not exist in GitLab, it will be created.
+ - When the group does exist and state=absent, the group will be deleted.
+author:
+ - Werner Dijkerman (@dj-wasabi)
+ - Guillaume Martinez (@Lunik)
+requirements:
+ - python >= 2.7
+ - python-gitlab python module
+extends_documentation_fragment:
+- community.general.auth_basic
+
+options:
+ api_token:
+ description:
+ - GitLab token for logging in.
+ type: str
+ name:
+ description:
+ - Name of the group you want to create.
+ required: true
+ type: str
+ path:
+ description:
+ - The path of the group you want to create, this will be api_url/group_path
+ - If not supplied, the group_name will be used.
+ type: str
+ description:
+ description:
+ - A description for the group.
+ type: str
+ state:
+ description:
+ - create or delete group.
+ - Possible values are present and absent.
+ default: present
+ type: str
+ choices: ["present", "absent"]
+ parent:
+ description:
+ - Allow to create subgroups
+ - Id or Full path of parent group in the form of group/name
+ type: str
+ visibility:
+ description:
+ - Default visibility of the group
+ choices: ["private", "internal", "public"]
+ default: private
+ type: str
+'''
+
+EXAMPLES = '''
+- name: "Delete GitLab Group"
+ community.general.gitlab_group:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ validate_certs: False
+ name: my_first_group
+ state: absent
+
+- name: "Create GitLab Group"
+ community.general.gitlab_group:
+ api_url: https://gitlab.example.com/
+ validate_certs: True
+ api_username: dj-wasabi
+ api_password: "MySecretPassword"
+ name: my_first_group
+ path: my_first_group
+ state: present
+
+# The group will by created at https://gitlab.dj-wasabi.local/super_parent/parent/my_first_group
+- name: "Create GitLab SubGroup"
+ community.general.gitlab_group:
+ api_url: https://gitlab.example.com/
+ validate_certs: True
+ api_username: dj-wasabi
+ api_password: "MySecretPassword"
+ name: my_first_group
+ path: my_first_group
+ state: present
+ parent: "super_parent/parent"
+'''
+
+RETURN = '''
+msg:
+ description: Success or failure message
+ returned: always
+ type: str
+ sample: "Success"
+
+result:
+ description: json parsed response from the server
+ returned: always
+ type: dict
+
+error:
+ description: the error message returned by the GitLab API
+ returned: failed
+ type: str
+ sample: "400: path is already in use"
+
+group:
+ description: API object
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+GITLAB_IMP_ERR = None
+try:
+ import gitlab
+ HAS_GITLAB_PACKAGE = True
+except Exception:
+ GITLAB_IMP_ERR = traceback.format_exc()
+ HAS_GITLAB_PACKAGE = False
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import findGroup, gitlabAuthentication
+
+
+class GitLabGroup(object):
+ def __init__(self, module, gitlab_instance):
+ self._module = module
+ self._gitlab = gitlab_instance
+ self.groupObject = None
+
+ '''
+ @param group Group object
+ '''
+ def getGroupId(self, group):
+ if group is not None:
+ return group.id
+ return None
+
+ '''
+ @param name Name of the group
+ @param parent Parent group full path
+ @param options Group options
+ '''
+ def createOrUpdateGroup(self, name, parent, options):
+ changed = False
+
+ # Because we have already call userExists in main()
+ if self.groupObject is None:
+ parent_id = self.getGroupId(parent)
+
+ payload = {
+ 'name': name,
+ 'path': options['path'],
+ 'parent_id': parent_id,
+ 'visibility': options['visibility']
+ }
+ if options.get('description'):
+ payload['description'] = options['description']
+ group = self.createGroup(payload)
+ changed = True
+ else:
+ changed, group = self.updateGroup(self.groupObject, {
+ 'name': name,
+ 'description': options['description'],
+ 'visibility': options['visibility']})
+
+ self.groupObject = group
+ if changed:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True, msg="Successfully created or updated the group %s" % name)
+
+ try:
+ group.save()
+ except Exception as e:
+ self._module.fail_json(msg="Failed to update group: %s " % e)
+ return True
+ else:
+ return False
+
+ '''
+ @param arguments Attributes of the group
+ '''
+ def createGroup(self, arguments):
+ if self._module.check_mode:
+ return True
+
+ try:
+ group = self._gitlab.groups.create(arguments)
+ except (gitlab.exceptions.GitlabCreateError) as e:
+ self._module.fail_json(msg="Failed to create group: %s " % to_native(e))
+
+ return group
+
+ '''
+ @param group Group Object
+ @param arguments Attributes of the group
+ '''
+ def updateGroup(self, group, arguments):
+ changed = False
+
+ for arg_key, arg_value in arguments.items():
+ if arguments[arg_key] is not None:
+ if getattr(group, arg_key) != arguments[arg_key]:
+ setattr(group, arg_key, arguments[arg_key])
+ changed = True
+
+ return (changed, group)
+
+ def deleteGroup(self):
+ group = self.groupObject
+
+ if len(group.projects.list()) >= 1:
+ self._module.fail_json(
+ msg="There are still projects in this group. These needs to be moved or deleted before this group can be removed.")
+ else:
+ if self._module.check_mode:
+ return True
+
+ try:
+ group.delete()
+ except Exception as e:
+ self._module.fail_json(msg="Failed to delete group: %s " % to_native(e))
+
+ '''
+ @param name Name of the groupe
+ @param full_path Complete path of the Group including parent group path. <parent_path>/<group_path>
+ '''
+ def existsGroup(self, project_identifier):
+ # When group/user exists, object will be stored in self.groupObject.
+ group = findGroup(self._gitlab, project_identifier)
+ if group:
+ self.groupObject = group
+ return True
+ return False
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_token=dict(type='str', no_log=True),
+ name=dict(type='str', required=True),
+ path=dict(type='str'),
+ description=dict(type='str'),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ parent=dict(type='str'),
+ visibility=dict(type='str', default="private", choices=["internal", "private", "public"]),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_password', 'api_token'],
+ ],
+ required_together=[
+ ['api_username', 'api_password'],
+ ],
+ required_one_of=[
+ ['api_username', 'api_token']
+ ],
+ supports_check_mode=True,
+ )
+
+ group_name = module.params['name']
+ group_path = module.params['path']
+ description = module.params['description']
+ state = module.params['state']
+ parent_identifier = module.params['parent']
+ group_visibility = module.params['visibility']
+
+ if not HAS_GITLAB_PACKAGE:
+ module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
+
+ gitlab_instance = gitlabAuthentication(module)
+
+ # Define default group_path based on group_name
+ if group_path is None:
+ group_path = group_name.replace(" ", "_")
+
+ gitlab_group = GitLabGroup(module, gitlab_instance)
+
+ parent_group = None
+ if parent_identifier:
+ parent_group = findGroup(gitlab_instance, parent_identifier)
+ if not parent_group:
+ module.fail_json(msg="Failed create GitLab group: Parent group doesn't exists")
+
+ group_exists = gitlab_group.existsGroup(parent_group.full_path + '/' + group_path)
+ else:
+ group_exists = gitlab_group.existsGroup(group_path)
+
+ if state == 'absent':
+ if group_exists:
+ gitlab_group.deleteGroup()
+ module.exit_json(changed=True, msg="Successfully deleted group %s" % group_name)
+ else:
+ module.exit_json(changed=False, msg="Group deleted or does not exists")
+
+ if state == 'present':
+ if gitlab_group.createOrUpdateGroup(group_name, parent_group, {
+ "path": group_path,
+ "description": description,
+ "visibility": group_visibility}):
+ module.exit_json(changed=True, msg="Successfully created or updated the group %s" % group_name, group=gitlab_group.groupObject._attrs)
+ else:
+ module.exit_json(changed=False, msg="No need to update the group %s" % group_name, group=gitlab_group.groupObject._attrs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_group_members.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_group_members.py
new file mode 100644
index 00000000..8a3da2a4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_group_members.py
@@ -0,0 +1,263 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Zainab Alsaffar <Zainab.Alsaffar@mail.rit.edu>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: gitlab_group_members
+short_description: Manage group members on GitLab Server
+description:
+ - This module allows to add and remove members to/from a group, or change a member's access level in a group on GitLab.
+version_added: '1.2.0'
+author: Zainab Alsaffar (@zanssa)
+requirements:
+ - python-gitlab python module <= 1.15.0
+ - administrator rights on the GitLab server
+extends_documentation_fragment: community.general.auth_basic
+options:
+ api_token:
+ description:
+ - A personal access token to authenticate with the GitLab API.
+ required: true
+ type: str
+ gitlab_group:
+ description:
+ - The name of the GitLab group the member is added to/removed from.
+ required: true
+ type: str
+ gitlab_user:
+ description:
+ - The username of the member to add to/remove from the GitLab group.
+ required: true
+ type: str
+ access_level:
+ description:
+ - The access level for the user.
+ - Required if I(state=present), user state is set to present.
+ type: str
+ choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner']
+ state:
+ description:
+ - State of the member in the group.
+ - On C(present), it adds a user to a GitLab group.
+ - On C(absent), it removes a user from a GitLab group.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+notes:
+ - Supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+- name: Add a user to a GitLab Group
+ community.general.gitlab_group_members:
+ api_url: 'https://gitlab.example.com'
+ api_token: 'Your-Private-Token'
+ gitlab_group: groupname
+ gitlab_user: username
+ access_level: developer
+ state: present
+
+- name: Remove a user from a GitLab Group
+ community.general.gitlab_group_members:
+ api_url: 'https://gitlab.example.com'
+ api_token: 'Your-Private-Token'
+ gitlab_group: groupname
+ gitlab_user: username
+ state: absent
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import gitlabAuthentication
+
+import traceback
+
+try:
+ import gitlab
+ HAS_PY_GITLAB = True
+except ImportError:
+ GITLAB_IMP_ERR = traceback.format_exc()
+ HAS_PY_GITLAB = False
+
+
+class GitLabGroup(object):
+ def __init__(self, module, gl):
+ self._module = module
+ self._gitlab = gl
+
+ # get user id if the user exists
+ def get_user_id(self, gitlab_user):
+ user_exists = self._gitlab.users.list(username=gitlab_user)
+ if user_exists:
+ return user_exists[0].id
+
+ # get group id if group exists
+ def get_group_id(self, gitlab_group):
+ group_exists = self._gitlab.groups.list(search=gitlab_group)
+ if group_exists:
+ return group_exists[0].id
+
+ # get all members in a group
+ def get_members_in_a_group(self, gitlab_group_id):
+ group = self._gitlab.groups.get(gitlab_group_id)
+ return group.members.list()
+
+ # check if the user is a member of the group
+ def is_user_a_member(self, members, gitlab_user_id):
+ for member in members:
+ if member.id == gitlab_user_id:
+ return True
+ return False
+
+ # add user to a group
+ def add_member_to_group(self, gitlab_user_id, gitlab_group_id, access_level):
+ try:
+ group = self._gitlab.groups.get(gitlab_group_id)
+ add_member = group.members.create(
+ {'user_id': gitlab_user_id, 'access_level': access_level})
+
+ if add_member:
+ return add_member.username
+
+ except (gitlab.exceptions.GitlabCreateError) as e:
+ self._module.fail_json(
+ msg="Failed to add member to the Group, Group ID %s: %s" % (gitlab_group_id, e))
+
+ # remove user from a group
+ def remove_user_from_group(self, gitlab_user_id, gitlab_group_id):
+ try:
+ group = self._gitlab.groups.get(gitlab_group_id)
+ group.members.delete(gitlab_user_id)
+
+ except (gitlab.exceptions.GitlabDeleteError) as e:
+ self._module.fail_json(
+ msg="Failed to remove member from GitLab group, ID %s: %s" % (gitlab_group_id, e))
+
+ # get user's access level
+ def get_user_access_level(self, members, gitlab_user_id):
+ for member in members:
+ if member.id == gitlab_user_id:
+ return member.access_level
+
+ # update user's access level in a group
+ def update_user_access_level(self, members, gitlab_user_id, access_level):
+ for member in members:
+ if member.id == gitlab_user_id:
+ try:
+ member.access_level = access_level
+ member.save()
+ except (gitlab.exceptions.GitlabCreateError) as e:
+ self._module.fail_json(
+ msg="Failed to update the access level for the member, %s: %s" % (gitlab_user_id, e))
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_token=dict(type='str', required=True, no_log=True),
+ gitlab_group=dict(type='str', required=True),
+ gitlab_user=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ access_level=dict(type='str', required=False, choices=['guest', 'reporter', 'developer', 'maintainer', 'owner'])
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_password', 'api_token'],
+ ],
+ required_together=[
+ ['api_username', 'api_password'],
+ ],
+ required_one_of=[
+ ['api_username', 'api_token'],
+ ],
+ required_if=[
+ ['state', 'present', ['access_level']],
+ ],
+ supports_check_mode=True,
+ )
+
+ if not HAS_PY_GITLAB:
+ module.fail_json(msg=missing_required_lib('python-gitlab', url='https://python-gitlab.readthedocs.io/en/stable/'), exception=GITLAB_IMP_ERR)
+
+ gitlab_group = module.params['gitlab_group']
+ gitlab_user = module.params['gitlab_user']
+ state = module.params['state']
+ access_level = module.params['access_level']
+
+ # convert access level string input to int
+ if access_level:
+ access_level_int = {
+ 'guest': gitlab.GUEST_ACCESS,
+ 'reporter': gitlab.REPORTER_ACCESS,
+ 'developer': gitlab.DEVELOPER_ACCESS,
+ 'maintainer': gitlab.MAINTAINER_ACCESS,
+ 'owner': gitlab.OWNER_ACCESS
+ }
+
+ access_level = access_level_int[access_level]
+
+ # connect to gitlab server
+ gl = gitlabAuthentication(module)
+
+ group = GitLabGroup(module, gl)
+
+ gitlab_user_id = group.get_user_id(gitlab_user)
+ gitlab_group_id = group.get_group_id(gitlab_group)
+
+ # group doesn't exist
+ if not gitlab_group_id:
+ module.fail_json(msg="group '%s' not found." % gitlab_group)
+
+ # user doesn't exist
+ if not gitlab_user_id:
+ if state == 'absent':
+ module.exit_json(changed=False, result="user '%s' not found, and thus also not part of the group" % gitlab_user)
+ else:
+ module.fail_json(msg="user '%s' not found." % gitlab_user)
+
+ members = group.get_members_in_a_group(gitlab_group_id)
+ is_user_a_member = group.is_user_a_member(members, gitlab_user_id)
+
+ # check if the user is a member in the group
+ if not is_user_a_member:
+ if state == 'present':
+ # add user to the group
+ if not module.check_mode:
+ group.add_member_to_group(gitlab_user_id, gitlab_group_id, access_level)
+ module.exit_json(changed=True, result="Successfully added user '%s' to the group." % gitlab_user)
+ # state as absent
+ else:
+ module.exit_json(changed=False, result="User, '%s', is not a member in the group. No change to report" % gitlab_user)
+ # in case that a user is a member
+ else:
+ if state == 'present':
+ # compare the access level
+ user_access_level = group.get_user_access_level(members, gitlab_user_id)
+ if user_access_level == access_level:
+ module.exit_json(changed=False, result="User, '%s', is already a member in the group. No change to report" % gitlab_user)
+ else:
+ # update the access level for the user
+ if not module.check_mode:
+ group.update_user_access_level(members, gitlab_user_id, access_level)
+ module.exit_json(changed=True, result="Successfully updated the access level for the user, '%s'" % gitlab_user)
+ else:
+ # remove the user from the group
+ if not module.check_mode:
+ group.remove_user_from_group(gitlab_user_id, gitlab_group_id)
+ module.exit_json(changed=True, result="Successfully removed user, '%s', from the group" % gitlab_user)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_group_variable.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_group_variable.py
new file mode 100644
index 00000000..dd20a0b8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_group_variable.py
@@ -0,0 +1,304 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Florent Madiot (scodeman@scode.io)
+# Based on code:
+# Copyright: (c) 2019, Markus Bergholz (markuman@gmail.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+module: gitlab_group_variable
+short_description: Creates, updates, or deletes GitLab groups variables
+version_added: 1.2.0
+description:
+ - Creates a group variable if it does not exist.
+ - When a group variable does exist, its value will be updated when the values are different.
+ - Variables which are untouched in the playbook, but are not untouched in the GitLab group,
+ they stay untouched (I(purge) is C(false)) or will be deleted (I(purge) is C(true)).
+author:
+ - Florent Madiot (@scodeman)
+requirements:
+ - python >= 2.7
+ - python-gitlab python module
+extends_documentation_fragment:
+ - community.general.auth_basic
+
+options:
+ state:
+ description:
+ - Create or delete group variable.
+ default: present
+ type: str
+ choices: ["present", "absent"]
+ api_token:
+ description:
+ - GitLab access token with API permissions.
+ required: true
+ type: str
+ group:
+ description:
+ - The path and name of the group.
+ required: true
+ type: str
+ purge:
+ description:
+ - When set to C(true), delete all variables which are not untouched in the task.
+ default: false
+ type: bool
+ vars:
+ description:
+ - When the list element is a simple key-value pair, set masked and protected to false.
+ - When the list element is a dict with the keys I(value), I(masked) and I(protected), the user can
+ have full control about whether a value should be masked, protected or both.
+ - Support for protected values requires GitLab >= 9.3.
+ - Support for masked values requires GitLab >= 11.10.
+ - A I(value) must be a string or a number.
+ - Field I(variable_type) must be a string with either C(env_var), which is the default, or C(file).
+ - When a value is masked, it must be in Base64 and have a length of at least 8 characters.
+ See GitLab documentation on acceptable values for a masked variable (U(https://docs.gitlab.com/ce/ci/variables/#masked-variables)).
+ default: {}
+ type: dict
+notes:
+- Supports I(check_mode).
+'''
+
+
+EXAMPLES = r'''
+- name: Set or update some CI/CD variables
+ community.general.gitlab_group_variable:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ group: scodeman/testgroup/
+ purge: false
+ vars:
+ ACCESS_KEY_ID: abc123
+ SECRET_ACCESS_KEY: 321cba
+
+- name: Set or update some CI/CD variables
+ community.general.gitlab_group_variable:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ group: scodeman/testgroup/
+ purge: false
+ vars:
+ ACCESS_KEY_ID: abc123
+ SECRET_ACCESS_KEY:
+ value: 3214cbad
+ masked: true
+ protected: true
+ variable_type: env_var
+
+- name: Delete one variable
+ community.general.gitlab_group_variable:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ group: scodeman/testgroup/
+ state: absent
+ vars:
+ ACCESS_KEY_ID: abc123
+'''
+
+RETURN = r'''
+group_variable:
+ description: Four lists of the variablenames which were added, updated, removed or exist.
+ returned: always
+ type: dict
+ contains:
+ added:
+ description: A list of variables which were created.
+ returned: always
+ type: list
+ sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']"
+ untouched:
+ description: A list of variables which exist.
+ returned: always
+ type: list
+ sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']"
+ removed:
+ description: A list of variables which were deleted.
+ returned: always
+ type: list
+ sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']"
+ updated:
+ description: A list of variables whose values were changed.
+ returned: always
+ type: list
+ sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']"
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.six import string_types
+from ansible.module_utils.six import integer_types
+
+
+GITLAB_IMP_ERR = None
+try:
+ import gitlab
+ HAS_GITLAB_PACKAGE = True
+except Exception:
+ GITLAB_IMP_ERR = traceback.format_exc()
+ HAS_GITLAB_PACKAGE = False
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import gitlabAuthentication
+
+
+class GitlabGroupVariables(object):
+
+ def __init__(self, module, gitlab_instance):
+ self.repo = gitlab_instance
+ self.group = self.get_group(module.params['group'])
+ self._module = module
+
+ def get_group(self, group_name):
+ return self.repo.groups.get(group_name)
+
+ def list_all_group_variables(self):
+ page_nb = 1
+ variables = []
+ vars_page = self.group.variables.list(page=page_nb)
+ while len(vars_page) > 0:
+ variables += vars_page
+ page_nb += 1
+ vars_page = self.group.variables.list(page=page_nb)
+ return variables
+
+ def create_variable(self, key, value, masked, protected, variable_type):
+ if self._module.check_mode:
+ return
+ return self.group.variables.create({"key": key, "value": value,
+ "masked": masked, "protected": protected,
+ "variable_type": variable_type})
+
+ def update_variable(self, key, var, value, masked, protected, variable_type):
+ if var.value == value and var.protected == protected and var.masked == masked and var.variable_type == variable_type:
+ return False
+
+ if self._module.check_mode:
+ return True
+
+ if var.protected == protected and var.masked == masked and var.variable_type == variable_type:
+ var.value = value
+ var.save()
+ return True
+
+ self.delete_variable(key)
+ self.create_variable(key, value, masked, protected, variable_type)
+ return True
+
+ def delete_variable(self, key):
+ if self._module.check_mode:
+ return
+ return self.group.variables.delete(key)
+
+
+def native_python_main(this_gitlab, purge, var_list, state, module):
+
+ change = False
+ return_value = dict(added=list(), updated=list(), removed=list(), untouched=list())
+
+ gitlab_keys = this_gitlab.list_all_group_variables()
+ existing_variables = [x.get_id() for x in gitlab_keys]
+
+ for key in var_list:
+ if not isinstance(var_list[key], (string_types, integer_types, float, dict)):
+ module.fail_json(msg="Value of %s variable must be of type string, integer, float or dict, passed %s" % (key, var_list[key].__class__.__name__))
+
+ for key in var_list:
+
+ if isinstance(var_list[key], (string_types, integer_types, float)):
+ value = var_list[key]
+ masked = False
+ protected = False
+ variable_type = 'env_var'
+ elif isinstance(var_list[key], dict):
+ value = var_list[key].get('value')
+ masked = var_list[key].get('masked', False)
+ protected = var_list[key].get('protected', False)
+ variable_type = var_list[key].get('variable_type', 'env_var')
+
+ if key in existing_variables:
+ index = existing_variables.index(key)
+ existing_variables[index] = None
+
+ if state == 'present':
+ single_change = this_gitlab.update_variable(key,
+ gitlab_keys[index],
+ value, masked,
+ protected,
+ variable_type)
+ change = single_change or change
+ if single_change:
+ return_value['updated'].append(key)
+ else:
+ return_value['untouched'].append(key)
+
+ elif state == 'absent':
+ this_gitlab.delete_variable(key)
+ change = True
+ return_value['removed'].append(key)
+
+ elif key not in existing_variables and state == 'present':
+ this_gitlab.create_variable(key, value, masked, protected, variable_type)
+ change = True
+ return_value['added'].append(key)
+
+ existing_variables = list(filter(None, existing_variables))
+ if purge:
+ for item in existing_variables:
+ this_gitlab.delete_variable(item)
+ change = True
+ return_value['removed'].append(item)
+ else:
+ return_value['untouched'].extend(existing_variables)
+
+ return change, return_value
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(
+ api_token=dict(type='str', required=True, no_log=True),
+ group=dict(type='str', required=True),
+ purge=dict(type='bool', required=False, default=False),
+ vars=dict(type='dict', required=False, default=dict(), no_log=True),
+ state=dict(type='str', default="present", choices=["absent", "present"])
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_password', 'api_token'],
+ ],
+ required_together=[
+ ['api_username', 'api_password'],
+ ],
+ required_one_of=[
+ ['api_username', 'api_token']
+ ],
+ supports_check_mode=True
+ )
+
+ purge = module.params['purge']
+ var_list = module.params['vars']
+ state = module.params['state']
+
+ if not HAS_GITLAB_PACKAGE:
+ module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
+
+ gitlab_instance = gitlabAuthentication(module)
+
+ this_gitlab = GitlabGroupVariables(module=module, gitlab_instance=gitlab_instance)
+
+ changed, return_value = native_python_main(this_gitlab, purge, var_list, state, module)
+
+ module.exit_json(changed=changed, group_variable=return_value)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_hook.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_hook.py
new file mode 100644
index 00000000..bc4b6ecb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_hook.py
@@ -0,0 +1,387 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# Copyright: (c) 2018, Marcus Watkins <marwatk@marcuswatkins.net>
+# Based on code:
+# Copyright: (c) 2013, Phillip Gentry <phillip@cx.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gitlab_hook
+short_description: Manages GitLab project hooks.
+description:
+ - Adds, updates and removes project hook
+author:
+ - Marcus Watkins (@marwatk)
+ - Guillaume Martinez (@Lunik)
+requirements:
+ - python >= 2.7
+ - python-gitlab python module
+extends_documentation_fragment:
+- community.general.auth_basic
+
+options:
+ api_token:
+ description:
+ - GitLab token for logging in.
+ type: str
+ project:
+ description:
+ - Id or Full path of the project in the form of group/name.
+ required: true
+ type: str
+ hook_url:
+ description:
+ - The url that you want GitLab to post to, this is used as the primary key for updates and deletion.
+ required: true
+ type: str
+ state:
+ description:
+ - When C(present) the hook will be updated to match the input or created if it doesn't exist.
+ - When C(absent) hook will be deleted if it exists.
+ default: present
+ type: str
+ choices: [ "present", "absent" ]
+ push_events:
+ description:
+ - Trigger hook on push events.
+ type: bool
+ default: yes
+ push_events_branch_filter:
+ description:
+ - Branch name of wildcard to trigger hook on push events
+ type: str
+ version_added: '0.2.0'
+ issues_events:
+ description:
+ - Trigger hook on issues events.
+ type: bool
+ default: no
+ merge_requests_events:
+ description:
+ - Trigger hook on merge requests events.
+ type: bool
+ default: no
+ tag_push_events:
+ description:
+ - Trigger hook on tag push events.
+ type: bool
+ default: no
+ note_events:
+ description:
+ - Trigger hook on note events or when someone adds a comment.
+ type: bool
+ default: no
+ job_events:
+ description:
+ - Trigger hook on job events.
+ type: bool
+ default: no
+ pipeline_events:
+ description:
+ - Trigger hook on pipeline events.
+ type: bool
+ default: no
+ wiki_page_events:
+ description:
+ - Trigger hook on wiki events.
+ type: bool
+ default: no
+ hook_validate_certs:
+ description:
+ - Whether GitLab will do SSL verification when triggering the hook.
+ type: bool
+ default: no
+ aliases: [ enable_ssl_verification ]
+ token:
+ description:
+ - Secret token to validate hook messages at the receiver.
+ - If this is present it will always result in a change as it cannot be retrieved from GitLab.
+ - Will show up in the X-GitLab-Token HTTP request header.
+ required: false
+ type: str
+'''
+
+EXAMPLES = '''
+- name: "Adding a project hook"
+ community.general.gitlab_hook:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ project: "my_group/my_project"
+ hook_url: "https://my-ci-server.example.com/gitlab-hook"
+ state: present
+ push_events: yes
+ tag_push_events: yes
+ hook_validate_certs: no
+ token: "my-super-secret-token-that-my-ci-server-will-check"
+
+- name: "Delete the previous hook"
+ community.general.gitlab_hook:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ project: "my_group/my_project"
+ hook_url: "https://my-ci-server.example.com/gitlab-hook"
+ state: absent
+
+- name: "Delete a hook by numeric project id"
+ community.general.gitlab_hook:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ project: 10
+ hook_url: "https://my-ci-server.example.com/gitlab-hook"
+ state: absent
+'''
+
+RETURN = '''
+msg:
+ description: Success or failure message
+ returned: always
+ type: str
+ sample: "Success"
+
+result:
+ description: json parsed response from the server
+ returned: always
+ type: dict
+
+error:
+ description: the error message returned by the GitLab API
+ returned: failed
+ type: str
+ sample: "400: path is already in use"
+
+hook:
+ description: API object
+ returned: always
+ type: dict
+'''
+
+import re
+import traceback
+
+GITLAB_IMP_ERR = None
+try:
+ import gitlab
+ HAS_GITLAB_PACKAGE = True
+except Exception:
+ GITLAB_IMP_ERR = traceback.format_exc()
+ HAS_GITLAB_PACKAGE = False
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import findProject, gitlabAuthentication
+
+
+class GitLabHook(object):
+ def __init__(self, module, gitlab_instance):
+ self._module = module
+ self._gitlab = gitlab_instance
+ self.hookObject = None
+
+ '''
+ @param project Project Object
+ @param hook_url Url to call on event
+ @param description Description of the group
+ @param parent Parent group full path
+ '''
+ def createOrUpdateHook(self, project, hook_url, options):
+ changed = False
+
+ # Because we have already call userExists in main()
+ if self.hookObject is None:
+ hook = self.createHook(project, {
+ 'url': hook_url,
+ 'push_events': options['push_events'],
+ 'push_events_branch_filter': options['push_events_branch_filter'],
+ 'issues_events': options['issues_events'],
+ 'merge_requests_events': options['merge_requests_events'],
+ 'tag_push_events': options['tag_push_events'],
+ 'note_events': options['note_events'],
+ 'job_events': options['job_events'],
+ 'pipeline_events': options['pipeline_events'],
+ 'wiki_page_events': options['wiki_page_events'],
+ 'enable_ssl_verification': options['enable_ssl_verification'],
+ 'token': options['token']})
+ changed = True
+ else:
+ changed, hook = self.updateHook(self.hookObject, {
+ 'push_events': options['push_events'],
+ 'push_events_branch_filter': options['push_events_branch_filter'],
+ 'issues_events': options['issues_events'],
+ 'merge_requests_events': options['merge_requests_events'],
+ 'tag_push_events': options['tag_push_events'],
+ 'note_events': options['note_events'],
+ 'job_events': options['job_events'],
+ 'pipeline_events': options['pipeline_events'],
+ 'wiki_page_events': options['wiki_page_events'],
+ 'enable_ssl_verification': options['enable_ssl_verification'],
+ 'token': options['token']})
+
+ self.hookObject = hook
+ if changed:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True, msg="Successfully created or updated the hook %s" % hook_url)
+
+ try:
+ hook.save()
+ except Exception as e:
+ self._module.fail_json(msg="Failed to update hook: %s " % e)
+ return True
+ else:
+ return False
+
+ '''
+ @param project Project Object
+ @param arguments Attributes of the hook
+ '''
+ def createHook(self, project, arguments):
+ if self._module.check_mode:
+ return True
+
+ hook = project.hooks.create(arguments)
+
+ return hook
+
+ '''
+ @param hook Hook Object
+ @param arguments Attributes of the hook
+ '''
+ def updateHook(self, hook, arguments):
+ changed = False
+
+ for arg_key, arg_value in arguments.items():
+ if arguments[arg_key] is not None:
+ if getattr(hook, arg_key) != arguments[arg_key]:
+ setattr(hook, arg_key, arguments[arg_key])
+ changed = True
+
+ return (changed, hook)
+
+ '''
+ @param project Project object
+ @param hook_url Url to call on event
+ '''
+ def findHook(self, project, hook_url):
+ hooks = project.hooks.list()
+ for hook in hooks:
+ if (hook.url == hook_url):
+ return hook
+
+ '''
+ @param project Project object
+ @param hook_url Url to call on event
+ '''
+ def existsHook(self, project, hook_url):
+ # When project exists, object will be stored in self.projectObject.
+ hook = self.findHook(project, hook_url)
+ if hook:
+ self.hookObject = hook
+ return True
+ return False
+
+ def deleteHook(self):
+ if self._module.check_mode:
+ return True
+
+ return self.hookObject.delete()
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_token=dict(type='str', no_log=True),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ project=dict(type='str', required=True),
+ hook_url=dict(type='str', required=True),
+ push_events=dict(type='bool', default=True),
+ push_events_branch_filter=dict(type='str', default=''),
+ issues_events=dict(type='bool', default=False),
+ merge_requests_events=dict(type='bool', default=False),
+ tag_push_events=dict(type='bool', default=False),
+ note_events=dict(type='bool', default=False),
+ job_events=dict(type='bool', default=False),
+ pipeline_events=dict(type='bool', default=False),
+ wiki_page_events=dict(type='bool', default=False),
+ hook_validate_certs=dict(type='bool', default=False, aliases=['enable_ssl_verification']),
+ token=dict(type='str', no_log=True),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_password', 'api_token']
+ ],
+ required_together=[
+ ['api_username', 'api_password']
+ ],
+ required_one_of=[
+ ['api_username', 'api_token']
+ ],
+ supports_check_mode=True,
+ )
+
+ state = module.params['state']
+ project_identifier = module.params['project']
+ hook_url = module.params['hook_url']
+ push_events = module.params['push_events']
+ push_events_branch_filter = module.params['push_events_branch_filter']
+ issues_events = module.params['issues_events']
+ merge_requests_events = module.params['merge_requests_events']
+ tag_push_events = module.params['tag_push_events']
+ note_events = module.params['note_events']
+ job_events = module.params['job_events']
+ pipeline_events = module.params['pipeline_events']
+ wiki_page_events = module.params['wiki_page_events']
+ enable_ssl_verification = module.params['hook_validate_certs']
+ hook_token = module.params['token']
+
+ if not HAS_GITLAB_PACKAGE:
+ module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
+
+ gitlab_instance = gitlabAuthentication(module)
+
+ gitlab_hook = GitLabHook(module, gitlab_instance)
+
+ project = findProject(gitlab_instance, project_identifier)
+
+ if project is None:
+ module.fail_json(msg="Failed to create hook: project %s doesn't exists" % project_identifier)
+
+ hook_exists = gitlab_hook.existsHook(project, hook_url)
+
+ if state == 'absent':
+ if hook_exists:
+ gitlab_hook.deleteHook()
+ module.exit_json(changed=True, msg="Successfully deleted hook %s" % hook_url)
+ else:
+ module.exit_json(changed=False, msg="Hook deleted or does not exists")
+
+ if state == 'present':
+ if gitlab_hook.createOrUpdateHook(project, hook_url, {
+ "push_events": push_events,
+ "push_events_branch_filter": push_events_branch_filter,
+ "issues_events": issues_events,
+ "merge_requests_events": merge_requests_events,
+ "tag_push_events": tag_push_events,
+ "note_events": note_events,
+ "job_events": job_events,
+ "pipeline_events": pipeline_events,
+ "wiki_page_events": wiki_page_events,
+ "enable_ssl_verification": enable_ssl_verification,
+ "token": hook_token}):
+
+ module.exit_json(changed=True, msg="Successfully created or updated the hook %s" % hook_url, hook=gitlab_hook.hookObject._attrs)
+ else:
+ module.exit_json(changed=False, msg="No need to update the hook %s" % hook_url, hook=gitlab_hook.hookObject._attrs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_project.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_project.py
new file mode 100644
index 00000000..98631c74
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_project.py
@@ -0,0 +1,374 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# Copyright: (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: gitlab_project
+short_description: Creates/updates/deletes GitLab Projects
+description:
+ - When the project does not exist in GitLab, it will be created.
+ - When the project does exists and state=absent, the project will be deleted.
+ - When changes are made to the project, the project will be updated.
+author:
+ - Werner Dijkerman (@dj-wasabi)
+ - Guillaume Martinez (@Lunik)
+requirements:
+ - python >= 2.7
+ - python-gitlab python module
+extends_documentation_fragment:
+- community.general.auth_basic
+
+options:
+ api_token:
+ description:
+ - GitLab token for logging in.
+ type: str
+ group:
+ description:
+ - Id or The full path of the group of which this projects belongs to.
+ type: str
+ name:
+ description:
+ - The name of the project
+ required: true
+ type: str
+ path:
+ description:
+ - The path of the project you want to create, this will be server_url/<group>/path.
+ - If not supplied, name will be used.
+ type: str
+ description:
+ description:
+ - An description for the project.
+ type: str
+ issues_enabled:
+ description:
+ - Whether you want to create issues or not.
+ - Possible values are true and false.
+ type: bool
+ default: yes
+ merge_requests_enabled:
+ description:
+ - If merge requests can be made or not.
+ - Possible values are true and false.
+ type: bool
+ default: yes
+ wiki_enabled:
+ description:
+ - If an wiki for this project should be available or not.
+ - Possible values are true and false.
+ type: bool
+ default: yes
+ snippets_enabled:
+ description:
+ - If creating snippets should be available or not.
+ - Possible values are true and false.
+ type: bool
+ default: yes
+ visibility:
+ description:
+ - Private. Project access must be granted explicitly for each user.
+ - Internal. The project can be cloned by any logged in user.
+ - Public. The project can be cloned without any authentication.
+ default: private
+ type: str
+ choices: ["private", "internal", "public"]
+ aliases:
+ - visibility_level
+ import_url:
+ description:
+ - Git repository which will be imported into gitlab.
+ - GitLab server needs read access to this git repository.
+ required: false
+ type: str
+ state:
+ description:
+ - create or delete project.
+ - Possible values are present and absent.
+ default: present
+ type: str
+ choices: ["present", "absent"]
+ merge_method:
+ description:
+ - What requirements are placed upon merges.
+ - Possible values are C(merge), C(rebase_merge) merge commit with semi-linear history, C(ff) fast-forward merges only.
+ type: str
+ choices: ["ff", "merge", "rebase_merge"]
+ default: merge
+ version_added: "1.0.0"
+'''
+
+EXAMPLES = r'''
+- name: Delete GitLab Project
+ community.general.gitlab_project:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ validate_certs: False
+ name: my_first_project
+ state: absent
+ delegate_to: localhost
+
+- name: Create GitLab Project in group Ansible
+ community.general.gitlab_project:
+ api_url: https://gitlab.example.com/
+ validate_certs: True
+ api_username: dj-wasabi
+ api_password: "MySecretPassword"
+ name: my_first_project
+ group: ansible
+ issues_enabled: False
+ merge_method: rebase_merge
+ wiki_enabled: True
+ snippets_enabled: True
+ import_url: http://git.example.com/example/lab.git
+ state: present
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+msg:
+ description: Success or failure message.
+ returned: always
+ type: str
+ sample: "Success"
+
+result:
+ description: json parsed response from the server.
+ returned: always
+ type: dict
+
+error:
+ description: the error message returned by the GitLab API.
+ returned: failed
+ type: str
+ sample: "400: path is already in use"
+
+project:
+ description: API object.
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+GITLAB_IMP_ERR = None
+try:
+ import gitlab
+ HAS_GITLAB_PACKAGE = True
+except Exception:
+ GITLAB_IMP_ERR = traceback.format_exc()
+ HAS_GITLAB_PACKAGE = False
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import findGroup, findProject, gitlabAuthentication
+
+
+class GitLabProject(object):
+ def __init__(self, module, gitlab_instance):
+ self._module = module
+ self._gitlab = gitlab_instance
+ self.projectObject = None
+
+ '''
+ @param project_name Name of the project
+ @param namespace Namespace Object (User or Group)
+ @param options Options of the project
+ '''
+ def createOrUpdateProject(self, project_name, namespace, options):
+ changed = False
+
+ # Because we have already call userExists in main()
+ if self.projectObject is None:
+ project = self.createProject(namespace, {
+ 'name': project_name,
+ 'path': options['path'],
+ 'description': options['description'],
+ 'issues_enabled': options['issues_enabled'],
+ 'merge_requests_enabled': options['merge_requests_enabled'],
+ 'merge_method': options['merge_method'],
+ 'wiki_enabled': options['wiki_enabled'],
+ 'snippets_enabled': options['snippets_enabled'],
+ 'visibility': options['visibility'],
+ 'import_url': options['import_url']})
+ changed = True
+ else:
+ changed, project = self.updateProject(self.projectObject, {
+ 'name': project_name,
+ 'description': options['description'],
+ 'issues_enabled': options['issues_enabled'],
+ 'merge_requests_enabled': options['merge_requests_enabled'],
+ 'merge_method': options['merge_method'],
+ 'wiki_enabled': options['wiki_enabled'],
+ 'snippets_enabled': options['snippets_enabled'],
+ 'visibility': options['visibility']})
+
+ self.projectObject = project
+ if changed:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True, msg="Successfully created or updated the project %s" % project_name)
+
+ try:
+ project.save()
+ except Exception as e:
+ self._module.fail_json(msg="Failed update project: %s " % e)
+ return True
+ else:
+ return False
+
+ '''
+ @param namespace Namespace Object (User or Group)
+ @param arguments Attributes of the project
+ '''
+ def createProject(self, namespace, arguments):
+ if self._module.check_mode:
+ return True
+
+ arguments['namespace_id'] = namespace.id
+ try:
+ project = self._gitlab.projects.create(arguments)
+ except (gitlab.exceptions.GitlabCreateError) as e:
+ self._module.fail_json(msg="Failed to create project: %s " % to_native(e))
+
+ return project
+
+ '''
+ @param project Project Object
+ @param arguments Attributes of the project
+ '''
+ def updateProject(self, project, arguments):
+ changed = False
+
+ for arg_key, arg_value in arguments.items():
+ if arguments[arg_key] is not None:
+ if getattr(project, arg_key) != arguments[arg_key]:
+ setattr(project, arg_key, arguments[arg_key])
+ changed = True
+
+ return (changed, project)
+
+ def deleteProject(self):
+ if self._module.check_mode:
+ return True
+
+ project = self.projectObject
+
+ return project.delete()
+
+ '''
+ @param namespace User/Group object
+ @param name Name of the project
+ '''
+ def existsProject(self, namespace, path):
+ # When project exists, object will be stored in self.projectObject.
+ project = findProject(self._gitlab, namespace.full_path + '/' + path)
+ if project:
+ self.projectObject = project
+ return True
+ return False
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_token=dict(type='str', no_log=True),
+ group=dict(type='str'),
+ name=dict(type='str', required=True),
+ path=dict(type='str'),
+ description=dict(type='str'),
+ issues_enabled=dict(type='bool', default=True),
+ merge_requests_enabled=dict(type='bool', default=True),
+ merge_method=dict(type='str', default='merge', choices=["merge", "rebase_merge", "ff"]),
+ wiki_enabled=dict(type='bool', default=True),
+ snippets_enabled=dict(default=True, type='bool'),
+ visibility=dict(type='str', default="private", choices=["internal", "private", "public"], aliases=["visibility_level"]),
+ import_url=dict(type='str'),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_password', 'api_token'],
+ ],
+ required_together=[
+ ['api_username', 'api_password'],
+ ],
+ required_one_of=[
+ ['api_username', 'api_token']
+ ],
+ supports_check_mode=True,
+ )
+
+ group_identifier = module.params['group']
+ project_name = module.params['name']
+ project_path = module.params['path']
+ project_description = module.params['description']
+ issues_enabled = module.params['issues_enabled']
+ merge_requests_enabled = module.params['merge_requests_enabled']
+ merge_method = module.params['merge_method']
+ wiki_enabled = module.params['wiki_enabled']
+ snippets_enabled = module.params['snippets_enabled']
+ visibility = module.params['visibility']
+ import_url = module.params['import_url']
+ state = module.params['state']
+
+ if not HAS_GITLAB_PACKAGE:
+ module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
+
+ gitlab_instance = gitlabAuthentication(module)
+
+ # Set project_path to project_name if it is empty.
+ if project_path is None:
+ project_path = project_name.replace(" ", "_")
+
+ gitlab_project = GitLabProject(module, gitlab_instance)
+
+ if group_identifier:
+ group = findGroup(gitlab_instance, group_identifier)
+ if group is None:
+ module.fail_json(msg="Failed to create project: group %s doesn't exists" % group_identifier)
+
+ namespace = gitlab_instance.namespaces.get(group.id)
+ project_exists = gitlab_project.existsProject(namespace, project_path)
+ else:
+ user = gitlab_instance.users.list(username=gitlab_instance.user.username)[0]
+ namespace = gitlab_instance.namespaces.get(user.id)
+ project_exists = gitlab_project.existsProject(namespace, project_path)
+
+ if state == 'absent':
+ if project_exists:
+ gitlab_project.deleteProject()
+ module.exit_json(changed=True, msg="Successfully deleted project %s" % project_name)
+ else:
+ module.exit_json(changed=False, msg="Project deleted or does not exists")
+
+ if state == 'present':
+ if gitlab_project.createOrUpdateProject(project_name, namespace, {
+ "path": project_path,
+ "description": project_description,
+ "issues_enabled": issues_enabled,
+ "merge_requests_enabled": merge_requests_enabled,
+ "merge_method": merge_method,
+ "wiki_enabled": wiki_enabled,
+ "snippets_enabled": snippets_enabled,
+ "visibility": visibility,
+ "import_url": import_url}):
+
+ module.exit_json(changed=True, msg="Successfully created or updated the project %s" % project_name, project=gitlab_project.projectObject._attrs)
+ else:
+ module.exit_json(changed=False, msg="No need to update the project %s" % project_name, project=gitlab_project.projectObject._attrs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_project_variable.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_project_variable.py
new file mode 100644
index 00000000..9803f76b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_project_variable.py
@@ -0,0 +1,299 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Markus Bergholz (markuman@gmail.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: gitlab_project_variable
+short_description: Creates/updates/deletes GitLab Projects Variables
+description:
+ - When a project variable does not exist, it will be created.
+ - When a project variable does exist, its value will be updated when the values are different.
+ - Variables which are untouched in the playbook, but are not untouched in the GitLab project,
+ they stay untouched (I(purge) is C(false)) or will be deleted (I(purge) is C(true)).
+author:
+ - "Markus Bergholz (@markuman)"
+requirements:
+ - python >= 2.7
+ - python-gitlab python module
+extends_documentation_fragment:
+- community.general.auth_basic
+
+options:
+ state:
+ description:
+ - Create or delete project variable.
+ - Possible values are present and absent.
+ default: present
+ type: str
+ choices: ["present", "absent"]
+ api_token:
+ description:
+ - GitLab access token with API permissions.
+ required: true
+ type: str
+ project:
+ description:
+ - The path and name of the project.
+ required: true
+ type: str
+ purge:
+ description:
+ - When set to true, all variables which are not untouched in the task will be deleted.
+ default: false
+ type: bool
+ vars:
+ description:
+ - When the list element is a simple key-value pair, masked and protected will be set to false.
+ - When the list element is a dict with the keys I(value), I(masked) and I(protected), the user can
+ have full control about whether a value should be masked, protected or both.
+ - Support for protected values requires GitLab >= 9.3.
+ - Support for masked values requires GitLab >= 11.10.
+ - A I(value) must be a string or a number.
+ - Field I(variable_type) must be a string with either C(env_var), which is the default, or C(file).
+ - When a value is masked, it must be in Base64 and have a length of at least 8 characters.
+ See GitLab documentation on acceptable values for a masked variable (https://docs.gitlab.com/ce/ci/variables/#masked-variables).
+ default: {}
+ type: dict
+'''
+
+
+EXAMPLES = '''
+- name: Set or update some CI/CD variables
+ community.general.gitlab_project_variable:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ project: markuman/dotfiles
+ purge: false
+ vars:
+ ACCESS_KEY_ID: abc123
+ SECRET_ACCESS_KEY: 321cba
+
+- name: Set or update some CI/CD variables
+ community.general.gitlab_project_variable:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ project: markuman/dotfiles
+ purge: false
+ vars:
+ ACCESS_KEY_ID: abc123
+ SECRET_ACCESS_KEY:
+ value: 3214cbad
+ masked: true
+ protected: true
+ variable_type: env_var
+
+- name: Delete one variable
+ community.general.gitlab_project_variable:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ project: markuman/dotfiles
+ state: absent
+ vars:
+ ACCESS_KEY_ID: abc123
+'''
+
+RETURN = '''
+project_variable:
+ description: Four lists of the variablenames which were added, updated, removed or exist.
+ returned: always
+ type: dict
+ contains:
+ added:
+ description: A list of variables which were created.
+ returned: always
+ type: list
+ sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']"
+ untouched:
+ description: A list of variables which exist.
+ returned: always
+ type: list
+ sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']"
+ removed:
+ description: A list of variables which were deleted.
+ returned: always
+ type: list
+ sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']"
+ updated:
+ description: A list of variables whose values were changed.
+ returned: always
+ type: list
+ sample: "['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']"
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.six import string_types
+from ansible.module_utils.six import integer_types
+
+
+GITLAB_IMP_ERR = None
+try:
+ import gitlab
+ HAS_GITLAB_PACKAGE = True
+except Exception:
+ GITLAB_IMP_ERR = traceback.format_exc()
+ HAS_GITLAB_PACKAGE = False
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import gitlabAuthentication
+
+
+class GitlabProjectVariables(object):
+
+ def __init__(self, module, gitlab_instance):
+ self.repo = gitlab_instance
+ self.project = self.get_project(module.params['project'])
+ self._module = module
+
+ def get_project(self, project_name):
+ return self.repo.projects.get(project_name)
+
+ def list_all_project_variables(self):
+ page_nb = 1
+ variables = []
+ vars_page = self.project.variables.list(page=page_nb)
+ while len(vars_page) > 0:
+ variables += vars_page
+ page_nb += 1
+ vars_page = self.project.variables.list(page=page_nb)
+ return variables
+
+ def create_variable(self, key, value, masked, protected, variable_type):
+ if self._module.check_mode:
+ return
+ return self.project.variables.create({"key": key, "value": value,
+ "masked": masked, "protected": protected,
+ "variable_type": variable_type})
+
+ def update_variable(self, key, var, value, masked, protected, variable_type):
+ if var.value == value and var.protected == protected and var.masked == masked and var.variable_type == variable_type:
+ return False
+
+ if self._module.check_mode:
+ return True
+
+ if var.protected == protected and var.masked == masked and var.variable_type == variable_type:
+ var.value = value
+ var.save()
+ return True
+
+ self.delete_variable(key)
+ self.create_variable(key, value, masked, protected, variable_type)
+ return True
+
+ def delete_variable(self, key):
+ if self._module.check_mode:
+ return
+ return self.project.variables.delete(key)
+
+
+def native_python_main(this_gitlab, purge, var_list, state, module):
+
+ change = False
+ return_value = dict(added=list(), updated=list(), removed=list(), untouched=list())
+
+ gitlab_keys = this_gitlab.list_all_project_variables()
+ existing_variables = [x.get_id() for x in gitlab_keys]
+
+ for key in var_list:
+
+ if isinstance(var_list[key], string_types) or isinstance(var_list[key], (integer_types, float)):
+ value = var_list[key]
+ masked = False
+ protected = False
+ variable_type = 'env_var'
+ elif isinstance(var_list[key], dict):
+ value = var_list[key].get('value')
+ masked = var_list[key].get('masked', False)
+ protected = var_list[key].get('protected', False)
+ variable_type = var_list[key].get('variable_type', 'env_var')
+ else:
+ module.fail_json(msg="value must be of type string, integer or dict")
+
+ if key in existing_variables:
+ index = existing_variables.index(key)
+ existing_variables[index] = None
+
+ if state == 'present':
+ single_change = this_gitlab.update_variable(key,
+ gitlab_keys[index],
+ value, masked,
+ protected,
+ variable_type)
+ change = single_change or change
+ if single_change:
+ return_value['updated'].append(key)
+ else:
+ return_value['untouched'].append(key)
+
+ elif state == 'absent':
+ this_gitlab.delete_variable(key)
+ change = True
+ return_value['removed'].append(key)
+
+ elif key not in existing_variables and state == 'present':
+ this_gitlab.create_variable(key, value, masked, protected, variable_type)
+ change = True
+ return_value['added'].append(key)
+
+ existing_variables = list(filter(None, existing_variables))
+ if purge:
+ for item in existing_variables:
+ this_gitlab.delete_variable(item)
+ change = True
+ return_value['removed'].append(item)
+ else:
+ return_value['untouched'].extend(existing_variables)
+
+ return change, return_value
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(
+ api_token=dict(type='str', required=True, no_log=True),
+ project=dict(type='str', required=True),
+ purge=dict(type='bool', required=False, default=False),
+ vars=dict(type='dict', required=False, default=dict(), no_log=True),
+ state=dict(type='str', default="present", choices=["absent", "present"])
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_password', 'api_token'],
+ ],
+ required_together=[
+ ['api_username', 'api_password'],
+ ],
+ required_one_of=[
+ ['api_username', 'api_token']
+ ],
+ supports_check_mode=True
+ )
+
+ purge = module.params['purge']
+ var_list = module.params['vars']
+ state = module.params['state']
+
+ if not HAS_GITLAB_PACKAGE:
+ module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
+
+ gitlab_instance = gitlabAuthentication(module)
+
+ this_gitlab = GitlabProjectVariables(module=module, gitlab_instance=gitlab_instance)
+
+ change, return_value = native_python_main(this_gitlab, purge, var_list, state, module)
+
+ module.exit_json(changed=change, project_variable=return_value)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_runner.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_runner.py
new file mode 100644
index 00000000..70384914
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_runner.py
@@ -0,0 +1,348 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# Copyright: (c) 2018, Samy Coenen <samy.coenen@nubera.be>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gitlab_runner
+short_description: Create, modify and delete GitLab Runners.
+description:
+ - Register, update and delete runners with the GitLab API.
+ - All operations are performed using the GitLab API v4.
+ - For details, consult the full API documentation at U(https://docs.gitlab.com/ee/api/runners.html).
+ - A valid private API token is required for all operations. You can create as many tokens as you like using the GitLab web interface at
+ U(https://$GITLAB_URL/profile/personal_access_tokens).
+ - A valid registration token is required for registering a new runner.
+ To create shared runners, you need to ask your administrator to give you this token.
+ It can be found at U(https://$GITLAB_URL/admin/runners/).
+notes:
+ - To create a new runner at least the C(api_token), C(description) and C(api_url) options are required.
+ - Runners need to have unique descriptions.
+author:
+ - Samy Coenen (@SamyCoenen)
+ - Guillaume Martinez (@Lunik)
+requirements:
+ - python >= 2.7
+ - python-gitlab >= 1.5.0
+extends_documentation_fragment:
+- community.general.auth_basic
+
+options:
+ api_token:
+ description:
+ - Your private token to interact with the GitLab API.
+ type: str
+ description:
+ description:
+ - The unique name of the runner.
+ required: True
+ type: str
+ aliases:
+ - name
+ state:
+ description:
+ - Make sure that the runner with the same name exists with the same configuration or delete the runner with the same name.
+ required: False
+ default: present
+ choices: ["present", "absent"]
+ type: str
+ registration_token:
+ description:
+ - The registration token is used to register new runners.
+ required: True
+ type: str
+ active:
+ description:
+ - Define if the runners is immediately active after creation.
+ required: False
+ default: yes
+ type: bool
+ locked:
+ description:
+ - Determines if the runner is locked or not.
+ required: False
+ default: False
+ type: bool
+ access_level:
+ description:
+ - Determines if a runner can pick up jobs from protected branches.
+ required: False
+ default: ref_protected
+ choices: ["ref_protected", "not_protected"]
+ type: str
+ maximum_timeout:
+ description:
+ - The maximum timeout that a runner has to pick up a specific job.
+ required: False
+ default: 3600
+ type: int
+ run_untagged:
+ description:
+ - Run untagged jobs or not.
+ required: False
+ default: yes
+ type: bool
+ tag_list:
+ description: The tags that apply to the runner.
+ required: False
+ default: []
+ type: list
+'''
+
+EXAMPLES = '''
+- name: "Register runner"
+ community.general.gitlab_runner:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ registration_token: 4gfdsg345
+ description: Docker Machine t1
+ state: present
+ active: True
+ tag_list: ['docker']
+ run_untagged: False
+ locked: False
+
+- name: "Delete runner"
+ community.general.gitlab_runner:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ description: Docker Machine t1
+ state: absent
+'''
+
+RETURN = '''
+msg:
+ description: Success or failure message
+ returned: always
+ type: str
+ sample: "Success"
+
+result:
+ description: json parsed response from the server
+ returned: always
+ type: dict
+
+error:
+ description: the error message returned by the GitLab API
+ returned: failed
+ type: str
+ sample: "400: path is already in use"
+
+runner:
+ description: API object
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+GITLAB_IMP_ERR = None
+try:
+ import gitlab
+ HAS_GITLAB_PACKAGE = True
+except Exception:
+ GITLAB_IMP_ERR = traceback.format_exc()
+ HAS_GITLAB_PACKAGE = False
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import gitlabAuthentication
+
+try:
+ cmp
+except NameError:
+ def cmp(a, b):
+ return (a > b) - (a < b)
+
+
+class GitLabRunner(object):
+ def __init__(self, module, gitlab_instance):
+ self._module = module
+ self._gitlab = gitlab_instance
+ self.runnerObject = None
+
+ def createOrUpdateRunner(self, description, options):
+ changed = False
+
+ # Because we have already call userExists in main()
+ if self.runnerObject is None:
+ runner = self.createRunner({
+ 'description': description,
+ 'active': options['active'],
+ 'token': options['registration_token'],
+ 'locked': options['locked'],
+ 'run_untagged': options['run_untagged'],
+ 'maximum_timeout': options['maximum_timeout'],
+ 'tag_list': options['tag_list']})
+ changed = True
+ else:
+ changed, runner = self.updateRunner(self.runnerObject, {
+ 'active': options['active'],
+ 'locked': options['locked'],
+ 'run_untagged': options['run_untagged'],
+ 'maximum_timeout': options['maximum_timeout'],
+ 'access_level': options['access_level'],
+ 'tag_list': options['tag_list']})
+
+ self.runnerObject = runner
+ if changed:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True, msg="Successfully created or updated the runner %s" % description)
+
+ try:
+ runner.save()
+ except Exception as e:
+ self._module.fail_json(msg="Failed to update runner: %s " % to_native(e))
+ return True
+ else:
+ return False
+
+ '''
+ @param arguments Attributes of the runner
+ '''
+ def createRunner(self, arguments):
+ if self._module.check_mode:
+ return True
+
+ try:
+ runner = self._gitlab.runners.create(arguments)
+ except (gitlab.exceptions.GitlabCreateError) as e:
+ self._module.fail_json(msg="Failed to create runner: %s " % to_native(e))
+
+ return runner
+
+ '''
+ @param runner Runner object
+ @param arguments Attributes of the runner
+ '''
+ def updateRunner(self, runner, arguments):
+ changed = False
+
+ for arg_key, arg_value in arguments.items():
+ if arguments[arg_key] is not None:
+ if isinstance(arguments[arg_key], list):
+ list1 = getattr(runner, arg_key)
+ list1.sort()
+ list2 = arguments[arg_key]
+ list2.sort()
+ if cmp(list1, list2):
+ setattr(runner, arg_key, arguments[arg_key])
+ changed = True
+ else:
+ if getattr(runner, arg_key) != arguments[arg_key]:
+ setattr(runner, arg_key, arguments[arg_key])
+ changed = True
+
+ return (changed, runner)
+
+ '''
+ @param description Description of the runner
+ '''
+ def findRunner(self, description):
+ runners = self._gitlab.runners.all(as_list=False)
+ for runner in runners:
+ if (runner['description'] == description):
+ return self._gitlab.runners.get(runner['id'])
+
+ '''
+ @param description Description of the runner
+ '''
+ def existsRunner(self, description):
+ # When runner exists, object will be stored in self.runnerObject.
+ runner = self.findRunner(description)
+
+ if runner:
+ self.runnerObject = runner
+ return True
+ return False
+
+ def deleteRunner(self):
+ if self._module.check_mode:
+ return True
+
+ runner = self.runnerObject
+
+ return runner.delete()
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_token=dict(type='str', no_log=True),
+ description=dict(type='str', required=True, aliases=["name"]),
+ active=dict(type='bool', default=True),
+ tag_list=dict(type='list', default=[]),
+ run_untagged=dict(type='bool', default=True),
+ locked=dict(type='bool', default=False),
+ access_level=dict(type='str', default='ref_protected', choices=["not_protected", "ref_protected"]),
+ maximum_timeout=dict(type='int', default=3600),
+ registration_token=dict(type='str', required=True, no_log=True),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_password', 'api_token'],
+ ],
+ required_together=[
+ ['api_username', 'api_password'],
+ ],
+ required_one_of=[
+ ['api_username', 'api_token'],
+ ],
+ supports_check_mode=True,
+ )
+
+ state = module.params['state']
+ runner_description = module.params['description']
+ runner_active = module.params['active']
+ tag_list = module.params['tag_list']
+ run_untagged = module.params['run_untagged']
+ runner_locked = module.params['locked']
+ access_level = module.params['access_level']
+ maximum_timeout = module.params['maximum_timeout']
+ registration_token = module.params['registration_token']
+
+ if not HAS_GITLAB_PACKAGE:
+ module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
+
+ gitlab_instance = gitlabAuthentication(module)
+
+ gitlab_runner = GitLabRunner(module, gitlab_instance)
+ runner_exists = gitlab_runner.existsRunner(runner_description)
+
+ if state == 'absent':
+ if runner_exists:
+ gitlab_runner.deleteRunner()
+ module.exit_json(changed=True, msg="Successfully deleted runner %s" % runner_description)
+ else:
+ module.exit_json(changed=False, msg="Runner deleted or does not exists")
+
+ if state == 'present':
+ if gitlab_runner.createOrUpdateRunner(runner_description, {
+ "active": runner_active,
+ "tag_list": tag_list,
+ "run_untagged": run_untagged,
+ "locked": runner_locked,
+ "access_level": access_level,
+ "maximum_timeout": maximum_timeout,
+ "registration_token": registration_token}):
+ module.exit_json(changed=True, runner=gitlab_runner.runnerObject._attrs,
+ msg="Successfully created or updated the runner %s" % runner_description)
+ else:
+ module.exit_json(changed=False, runner=gitlab_runner.runnerObject._attrs,
+ msg="No need to update the runner %s" % runner_description)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_user.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_user.py
new file mode 100644
index 00000000..1e8ee65a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/gitlab/gitlab_user.py
@@ -0,0 +1,563 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# Copyright: (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gitlab_user
+short_description: Creates/updates/deletes/blocks/unblocks GitLab Users
+description:
+ - When the user does not exist in GitLab, it will be created.
+ - When the user exists and state=absent, the user will be deleted.
+ - When the user exists and state=blocked, the user will be blocked.
+ - When changes are made to user, the user will be updated.
+notes:
+ - From community.general 0.2.0 and onwards, name, email and password are optional while deleting the user.
+author:
+ - Werner Dijkerman (@dj-wasabi)
+ - Guillaume Martinez (@Lunik)
+requirements:
+ - python >= 2.7
+ - python-gitlab python module
+ - administrator rights on the GitLab server
+extends_documentation_fragment:
+- community.general.auth_basic
+
+options:
+ api_token:
+ description:
+ - GitLab token for logging in.
+ type: str
+ name:
+ description:
+ - Name of the user you want to create.
+ - Required only if C(state) is set to C(present).
+ type: str
+ username:
+ description:
+ - The username of the user.
+ required: true
+ type: str
+ password:
+ description:
+ - The password of the user.
+ - GitLab server enforces minimum password length to 8, set this value with 8 or more characters.
+ - Required only if C(state) is set to C(present).
+ type: str
+ email:
+ description:
+ - The email that belongs to the user.
+ - Required only if C(state) is set to C(present).
+ type: str
+ sshkey_name:
+ description:
+ - The name of the sshkey
+ type: str
+ sshkey_file:
+ description:
+ - The ssh key itself.
+ type: str
+ group:
+ description:
+ - Id or Full path of parent group in the form of group/name.
+ - Add user as an member to this group.
+ type: str
+ access_level:
+ description:
+ - The access level to the group. One of the following can be used.
+ - guest
+ - reporter
+ - developer
+ - master (alias for maintainer)
+ - maintainer
+ - owner
+ default: guest
+ type: str
+ choices: ["guest", "reporter", "developer", "master", "maintainer", "owner"]
+ state:
+ description:
+ - Create, delete or block a user.
+ default: present
+ type: str
+ choices: ["present", "absent", "blocked", "unblocked"]
+ confirm:
+ description:
+ - Require confirmation.
+ type: bool
+ default: yes
+ isadmin:
+ description:
+ - Grant admin privileges to the user.
+ type: bool
+ default: no
+ external:
+ description:
+ - Define external parameter for this user.
+ type: bool
+ default: no
+'''
+
+EXAMPLES = '''
+- name: "Delete GitLab User"
+ community.general.gitlab_user:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ validate_certs: False
+ username: myusername
+ state: absent
+
+- name: "Create GitLab User"
+ community.general.gitlab_user:
+ api_url: https://gitlab.example.com/
+ validate_certs: True
+ api_username: dj-wasabi
+ api_password: "MySecretPassword"
+ name: My Name
+ username: myusername
+ password: mysecretpassword
+ email: me@example.com
+ sshkey_name: MySSH
+ sshkey_file: ssh-rsa AAAAB3NzaC1yc...
+ state: present
+ group: super_group/mon_group
+ access_level: owner
+
+- name: "Block GitLab User"
+ community.general.gitlab_user:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ validate_certs: False
+ username: myusername
+ state: blocked
+
+- name: "Unblock GitLab User"
+ community.general.gitlab_user:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ validate_certs: False
+ username: myusername
+ state: unblocked
+'''
+
+RETURN = '''
+msg:
+ description: Success or failure message
+ returned: always
+ type: str
+ sample: "Success"
+
+result:
+ description: json parsed response from the server
+ returned: always
+ type: dict
+
+error:
+ description: the error message returned by the GitLab API
+ returned: failed
+ type: str
+ sample: "400: path is already in use"
+
+user:
+ description: API object
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+GITLAB_IMP_ERR = None
+try:
+ import gitlab
+ HAS_GITLAB_PACKAGE = True
+except Exception:
+ GITLAB_IMP_ERR = traceback.format_exc()
+ HAS_GITLAB_PACKAGE = False
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import findGroup, gitlabAuthentication
+
+
+class GitLabUser(object):
+ def __init__(self, module, gitlab_instance):
+ self._module = module
+ self._gitlab = gitlab_instance
+ self.userObject = None
+ self.ACCESS_LEVEL = {
+ 'guest': gitlab.GUEST_ACCESS,
+ 'reporter': gitlab.REPORTER_ACCESS,
+ 'developer': gitlab.DEVELOPER_ACCESS,
+ 'master': gitlab.MAINTAINER_ACCESS,
+ 'maintainer': gitlab.MAINTAINER_ACCESS,
+ 'owner': gitlab.OWNER_ACCESS}
+
+ '''
+ @param username Username of the user
+ @param options User options
+ '''
+ def createOrUpdateUser(self, username, options):
+ changed = False
+ potentionally_changed = False
+
+ # Because we have already call userExists in main()
+ if self.userObject is None:
+ user = self.createUser({
+ 'name': options['name'],
+ 'username': username,
+ 'password': options['password'],
+ 'email': options['email'],
+ 'skip_confirmation': not options['confirm'],
+ 'admin': options['isadmin'],
+ 'external': options['external']})
+ changed = True
+ else:
+ changed, user = self.updateUser(
+ self.userObject, {
+ # add "normal" parameters here, put uncheckable
+ # params in the dict below
+ 'name': {'value': options['name']},
+ 'email': {'value': options['email']},
+
+ # note: for some attributes like this one the key
+ # from reading back from server is unfortunately
+ # different to the one needed for pushing/writing,
+ # in that case use the optional setter key
+ 'is_admin': {
+ 'value': options['isadmin'], 'setter': 'admin'
+ },
+ 'external': {'value': options['external']},
+ },
+ {
+ # put "uncheckable" params here, this means params
+ # which the gitlab does accept for setting but does
+ # not return any information about it
+ 'skip_reconfirmation': {'value': not options['confirm']},
+ 'password': {'value': options['password']},
+ }
+ )
+
+ # note: as we unfortunately have some uncheckable parameters
+ # where it is not possible to determine if the update
+ # changed something or not, we must assume here that a
+ # changed happend and that an user object update is needed
+ potentionally_changed = True
+
+ # Assign ssh keys
+ if options['sshkey_name'] and options['sshkey_file']:
+ key_changed = self.addSshKeyToUser(user, {
+ 'name': options['sshkey_name'],
+ 'file': options['sshkey_file']})
+ changed = changed or key_changed
+
+ # Assign group
+ if options['group_path']:
+ group_changed = self.assignUserToGroup(user, options['group_path'], options['access_level'])
+ changed = changed or group_changed
+
+ self.userObject = user
+ if (changed or potentionally_changed) and not self._module.check_mode:
+ try:
+ user.save()
+ except Exception as e:
+ self._module.fail_json(msg="Failed to update user: %s " % to_native(e))
+
+ if changed:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True, msg="Successfully created or updated the user %s" % username)
+ return True
+ else:
+ return False
+
+ '''
+ @param group User object
+ '''
+ def getUserId(self, user):
+ if user is not None:
+ return user.id
+ return None
+
+ '''
+ @param user User object
+ @param sshkey_name Name of the ssh key
+ '''
+ def sshKeyExists(self, user, sshkey_name):
+ keyList = map(lambda k: k.title, user.keys.list())
+
+ return sshkey_name in keyList
+
+ '''
+ @param user User object
+ @param sshkey Dict containing sshkey infos {"name": "", "file": ""}
+ '''
+ def addSshKeyToUser(self, user, sshkey):
+ if not self.sshKeyExists(user, sshkey['name']):
+ if self._module.check_mode:
+ return True
+
+ try:
+ user.keys.create({
+ 'title': sshkey['name'],
+ 'key': sshkey['file']})
+ except gitlab.exceptions.GitlabCreateError as e:
+ self._module.fail_json(msg="Failed to assign sshkey to user: %s" % to_native(e))
+ return True
+ return False
+
+ '''
+ @param group Group object
+ @param user_id Id of the user to find
+ '''
+ def findMember(self, group, user_id):
+ try:
+ member = group.members.get(user_id)
+ except gitlab.exceptions.GitlabGetError:
+ return None
+ return member
+
+ '''
+ @param group Group object
+ @param user_id Id of the user to check
+ '''
+ def memberExists(self, group, user_id):
+ member = self.findMember(group, user_id)
+
+ return member is not None
+
+ '''
+ @param group Group object
+ @param user_id Id of the user to check
+ @param access_level GitLab access_level to check
+ '''
+ def memberAsGoodAccessLevel(self, group, user_id, access_level):
+ member = self.findMember(group, user_id)
+
+ return member.access_level == access_level
+
+ '''
+ @param user User object
+ @param group_path Complete path of the Group including parent group path. <parent_path>/<group_path>
+ @param access_level GitLab access_level to assign
+ '''
+ def assignUserToGroup(self, user, group_identifier, access_level):
+ group = findGroup(self._gitlab, group_identifier)
+
+ if self._module.check_mode:
+ return True
+
+ if group is None:
+ return False
+
+ if self.memberExists(group, self.getUserId(user)):
+ member = self.findMember(group, self.getUserId(user))
+ if not self.memberAsGoodAccessLevel(group, member.id, self.ACCESS_LEVEL[access_level]):
+ member.access_level = self.ACCESS_LEVEL[access_level]
+ member.save()
+ return True
+ else:
+ try:
+ group.members.create({
+ 'user_id': self.getUserId(user),
+ 'access_level': self.ACCESS_LEVEL[access_level]})
+ except gitlab.exceptions.GitlabCreateError as e:
+ self._module.fail_json(msg="Failed to assign user to group: %s" % to_native(e))
+ return True
+ return False
+
+ '''
+ @param user User object
+ @param arguments User attributes
+ '''
+ def updateUser(self, user, arguments, uncheckable_args):
+ changed = False
+
+ for arg_key, arg_value in arguments.items():
+ av = arg_value['value']
+
+ if av is not None:
+ if getattr(user, arg_key) != av:
+ setattr(user, arg_value.get('setter', arg_key), av)
+ changed = True
+
+ for arg_key, arg_value in uncheckable_args.items():
+ av = arg_value['value']
+
+ if av is not None:
+ setattr(user, arg_value.get('setter', arg_key), av)
+
+ return (changed, user)
+
+ '''
+ @param arguments User attributes
+ '''
+ def createUser(self, arguments):
+ if self._module.check_mode:
+ return True
+
+ try:
+ user = self._gitlab.users.create(arguments)
+ except (gitlab.exceptions.GitlabCreateError) as e:
+ self._module.fail_json(msg="Failed to create user: %s " % to_native(e))
+
+ return user
+
+ '''
+ @param username Username of the user
+ '''
+ def findUser(self, username):
+ users = self._gitlab.users.list(search=username)
+ for user in users:
+ if (user.username == username):
+ return user
+
+ '''
+ @param username Username of the user
+ '''
+ def existsUser(self, username):
+ # When user exists, object will be stored in self.userObject.
+ user = self.findUser(username)
+ if user:
+ self.userObject = user
+ return True
+ return False
+
+ '''
+ @param username Username of the user
+ '''
+ def isActive(self, username):
+ user = self.findUser(username)
+ return user.attributes['state'] == 'active'
+
+ def deleteUser(self):
+ if self._module.check_mode:
+ return True
+
+ user = self.userObject
+
+ return user.delete()
+
+ def blockUser(self):
+ if self._module.check_mode:
+ return True
+
+ user = self.userObject
+
+ return user.block()
+
+ def unblockUser(self):
+ if self._module.check_mode:
+ return True
+
+ user = self.userObject
+
+ return user.unblock()
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_token=dict(type='str', no_log=True),
+ name=dict(type='str'),
+ state=dict(type='str', default="present", choices=["absent", "present", "blocked", "unblocked"]),
+ username=dict(type='str', required=True),
+ password=dict(type='str', no_log=True),
+ email=dict(type='str'),
+ sshkey_name=dict(type='str'),
+ sshkey_file=dict(type='str'),
+ group=dict(type='str'),
+ access_level=dict(type='str', default="guest", choices=["developer", "guest", "maintainer", "master", "owner", "reporter"]),
+ confirm=dict(type='bool', default=True),
+ isadmin=dict(type='bool', default=False),
+ external=dict(type='bool', default=False),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_password', 'api_token'],
+ ],
+ required_together=[
+ ['api_username', 'api_password'],
+ ],
+ required_one_of=[
+ ['api_username', 'api_token']
+ ],
+ supports_check_mode=True,
+ required_if=(
+ ('state', 'present', ['name', 'email', 'password']),
+ )
+ )
+
+ user_name = module.params['name']
+ state = module.params['state']
+ user_username = module.params['username'].lower()
+ user_password = module.params['password']
+ user_email = module.params['email']
+ user_sshkey_name = module.params['sshkey_name']
+ user_sshkey_file = module.params['sshkey_file']
+ group_path = module.params['group']
+ access_level = module.params['access_level']
+ confirm = module.params['confirm']
+ user_isadmin = module.params['isadmin']
+ user_external = module.params['external']
+
+ if not HAS_GITLAB_PACKAGE:
+ module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
+
+ gitlab_instance = gitlabAuthentication(module)
+
+ gitlab_user = GitLabUser(module, gitlab_instance)
+ user_exists = gitlab_user.existsUser(user_username)
+ if user_exists:
+ user_is_active = gitlab_user.isActive(user_username)
+ else:
+ user_is_active = False
+
+ if state == 'absent':
+ if user_exists:
+ gitlab_user.deleteUser()
+ module.exit_json(changed=True, msg="Successfully deleted user %s" % user_username)
+ else:
+ module.exit_json(changed=False, msg="User deleted or does not exists")
+
+ if state == 'blocked':
+ if user_exists and user_is_active:
+ gitlab_user.blockUser()
+ module.exit_json(changed=True, msg="Successfully blocked user %s" % user_username)
+ else:
+ module.exit_json(changed=False, msg="User already blocked or does not exists")
+
+ if state == 'unblocked':
+ if user_exists and not user_is_active:
+ gitlab_user.unblockUser()
+ module.exit_json(changed=True, msg="Successfully unblocked user %s" % user_username)
+ else:
+ module.exit_json(changed=False, msg="User is not blocked or does not exists")
+
+ if state == 'present':
+ if gitlab_user.createOrUpdateUser(user_username, {
+ "name": user_name,
+ "password": user_password,
+ "email": user_email,
+ "sshkey_name": user_sshkey_name,
+ "sshkey_file": user_sshkey_file,
+ "group_path": group_path,
+ "access_level": access_level,
+ "confirm": confirm,
+ "isadmin": user_isadmin,
+ "external": user_external}):
+ module.exit_json(changed=True, msg="Successfully created or updated the user %s" % user_username, user=gitlab_user.userObject._attrs)
+ else:
+ module.exit_json(changed=False, msg="No need to update the user %s" % user_username, user=gitlab_user.userObject._attrs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/hg.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/hg.py
new file mode 100644
index 00000000..5c084d3a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/source_control/hg.py
@@ -0,0 +1,291 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Yeukhon Wong <yeukhon@acm.org>
+# Copyright: (c) 2014, Nate Coraor <nate@bx.psu.edu>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: hg
+short_description: Manages Mercurial (hg) repositories
+description:
+ - Manages Mercurial (hg) repositories. Supports SSH, HTTP/S and local address.
+author: "Yeukhon Wong (@yeukhon)"
+options:
+ repo:
+ description:
+ - The repository address.
+ required: yes
+ aliases: [ name ]
+ dest:
+ description:
+ - Absolute path of where the repository should be cloned to.
+ This parameter is required, unless clone and update are set to no
+ revision:
+ description:
+ - Equivalent C(-r) option in hg command which could be the changeset, revision number,
+ branch name or even tag.
+ aliases: [ version ]
+ force:
+ description:
+ - Discards uncommitted changes. Runs C(hg update -C). Prior to
+ 1.9, the default was `yes`.
+ type: bool
+ default: 'no'
+ purge:
+ description:
+ - Deletes untracked files. Runs C(hg purge).
+ type: bool
+ default: 'no'
+ update:
+ description:
+ - If C(no), do not retrieve new revisions from the origin repository
+ type: bool
+ default: 'yes'
+ clone:
+ description:
+ - If C(no), do not clone the repository if it does not exist locally.
+ type: bool
+ default: 'yes'
+ executable:
+ description:
+ - Path to hg executable to use. If not supplied,
+ the normal mechanism for resolving binary paths will be used.
+notes:
+ - This module does not support push capability. See U(https://github.com/ansible/ansible/issues/31156).
+ - "If the task seems to be hanging, first verify remote host is in C(known_hosts).
+ SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt,
+ one solution is to add the remote host public key in C(/etc/ssh/ssh_known_hosts) before calling
+ the hg module, with the following command: ssh-keyscan remote_host.com >> /etc/ssh/ssh_known_hosts."
+ - As per 01 Dec 2018, Bitbucket has dropped support for TLSv1 and TLSv1.1 connections. As such,
+ if the underlying system still uses a Python version below 2.7.9, you will have issues checking out
+ bitbucket repositories. See U(https://bitbucket.org/blog/deprecating-tlsv1-tlsv1-1-2018-12-01).
+'''
+
+EXAMPLES = '''
+- name: Ensure the current working copy is inside the stable branch and deletes untracked files if any.
+ community.general.hg:
+ repo: https://bitbucket.org/user/repo1
+ dest: /home/user/repo1
+ revision: stable
+ purge: yes
+
+- name: Get information about the repository whether or not it has already been cloned locally.
+ community.general.hg:
+ repo: git://bitbucket.org/user/repo
+ dest: /srv/checkout
+ clone: no
+ update: no
+'''
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+class Hg(object):
+ def __init__(self, module, dest, repo, revision, hg_path):
+ self.module = module
+ self.dest = dest
+ self.repo = repo
+ self.revision = revision
+ self.hg_path = hg_path
+
+ def _command(self, args_list):
+ (rc, out, err) = self.module.run_command([self.hg_path] + args_list)
+ return (rc, out, err)
+
+ def _list_untracked(self):
+ args = ['purge', '--config', 'extensions.purge=', '-R', self.dest, '--print']
+ return self._command(args)
+
+ def get_revision(self):
+ """
+ hg id -b -i -t returns a string in the format:
+ "<changeset>[+] <branch_name> <tag>"
+ This format lists the state of the current working copy,
+ and indicates whether there are uncommitted changes by the
+ plus sign. Otherwise, the sign is omitted.
+
+ Read the full description via hg id --help
+ """
+ (rc, out, err) = self._command(['id', '-b', '-i', '-t', '-R', self.dest])
+ if rc != 0:
+ self.module.fail_json(msg=err)
+ else:
+ return to_native(out).strip('\n')
+
+ def get_remote_revision(self):
+ (rc, out, err) = self._command(['id', self.repo])
+ if rc != 0:
+ self.module.fail_json(msg=err)
+ else:
+ return to_native(out).strip('\n')
+
+ def has_local_mods(self):
+ now = self.get_revision()
+ if '+' in now:
+ return True
+ else:
+ return False
+
+ def discard(self):
+ before = self.has_local_mods()
+ if not before:
+ return False
+
+ args = ['update', '-C', '-R', self.dest, '-r', '.']
+ (rc, out, err) = self._command(args)
+ if rc != 0:
+ self.module.fail_json(msg=err)
+
+ after = self.has_local_mods()
+ if before != after and not after: # no more local modification
+ return True
+
+ def purge(self):
+ # before purge, find out if there are any untracked files
+ (rc1, out1, err1) = self._list_untracked()
+ if rc1 != 0:
+ self.module.fail_json(msg=err1)
+
+ # there are some untrackd files
+ if out1 != '':
+ args = ['purge', '--config', 'extensions.purge=', '-R', self.dest]
+ (rc2, out2, err2) = self._command(args)
+ if rc2 != 0:
+ self.module.fail_json(msg=err2)
+ return True
+ else:
+ return False
+
+ def cleanup(self, force, purge):
+ discarded = False
+ purged = False
+
+ if force:
+ discarded = self.discard()
+ if purge:
+ purged = self.purge()
+ if discarded or purged:
+ return True
+ else:
+ return False
+
+ def pull(self):
+ return self._command(
+ ['pull', '-R', self.dest, self.repo])
+
+ def update(self):
+ if self.revision is not None:
+ return self._command(['update', '-r', self.revision, '-R', self.dest])
+ return self._command(['update', '-R', self.dest])
+
+ def clone(self):
+ if self.revision is not None:
+ return self._command(['clone', self.repo, self.dest, '-r', self.revision])
+ return self._command(['clone', self.repo, self.dest])
+
+ @property
+ def at_revision(self):
+ """
+ There is no point in pulling from a potentially down/slow remote site
+ if the desired changeset is already the current changeset.
+ """
+ if self.revision is None or len(self.revision) < 7:
+ # Assume it's a rev number, tag, or branch
+ return False
+ (rc, out, err) = self._command(['--debug', 'id', '-i', '-R', self.dest])
+ if rc != 0:
+ self.module.fail_json(msg=err)
+ if out.startswith(self.revision):
+ return True
+ return False
+
+
+# ===========================================
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ repo=dict(type='str', required=True, aliases=['name']),
+ dest=dict(type='path'),
+ revision=dict(type='str', default=None, aliases=['version']),
+ force=dict(type='bool', default=False),
+ purge=dict(type='bool', default=False),
+ update=dict(type='bool', default=True),
+ clone=dict(type='bool', default=True),
+ executable=dict(type='str', default=None),
+ ),
+ )
+ repo = module.params['repo']
+ dest = module.params['dest']
+ revision = module.params['revision']
+ force = module.params['force']
+ purge = module.params['purge']
+ update = module.params['update']
+ clone = module.params['clone']
+ hg_path = module.params['executable'] or module.get_bin_path('hg', True)
+ if dest is not None:
+ hgrc = os.path.join(dest, '.hg/hgrc')
+
+ # initial states
+ before = ''
+ changed = False
+ cleaned = False
+
+ if not dest and (clone or update):
+ module.fail_json(msg="the destination directory must be specified unless clone=no and update=no")
+
+ hg = Hg(module, dest, repo, revision, hg_path)
+
+ # If there is no hgrc file, then assume repo is absent
+ # and perform clone. Otherwise, perform pull and update.
+ if not clone and not update:
+ out = hg.get_remote_revision()
+ module.exit_json(after=out, changed=False)
+ if not os.path.exists(hgrc):
+ if clone:
+ (rc, out, err) = hg.clone()
+ if rc != 0:
+ module.fail_json(msg=err)
+ else:
+ module.exit_json(changed=False)
+ elif not update:
+ # Just return having found a repo already in the dest path
+ before = hg.get_revision()
+ elif hg.at_revision:
+ # no update needed, don't pull
+ before = hg.get_revision()
+
+ # but force and purge if desired
+ cleaned = hg.cleanup(force, purge)
+ else:
+ # get the current state before doing pulling
+ before = hg.get_revision()
+
+ # can perform force and purge
+ cleaned = hg.cleanup(force, purge)
+
+ (rc, out, err) = hg.pull()
+ if rc != 0:
+ module.fail_json(msg=err)
+
+ (rc, out, err) = hg.update()
+ if rc != 0:
+ module.fail_json(msg=err)
+
+ after = hg.get_revision()
+ if before != after or cleaned:
+ changed = True
+
+ module.exit_json(before=before, after=after, changed=changed, cleaned=cleaned)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/spectrum_device.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/spectrum_device.py
new file mode 100644
index 00000000..77e3b153
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/spectrum_device.py
@@ -0,0 +1,332 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Renato Orgito <orgito@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: spectrum_device
+short_description: Creates/deletes devices in CA Spectrum.
+description:
+ - This module allows you to create and delete devices in CA Spectrum U(https://www.ca.com/us/products/ca-spectrum.html).
+ - Tested on CA Spectrum 9.4.2, 10.1.1 and 10.2.1
+author: "Renato Orgito (@orgito)"
+options:
+ device:
+ type: str
+ aliases: [ host, name ]
+ required: true
+ description:
+ - IP address of the device.
+ - If a hostname is given, it will be resolved to the IP address.
+ community:
+ type: str
+ description:
+ - SNMP community used for device discovery.
+ - Required when C(state=present).
+ required: true
+ landscape:
+ type: str
+ required: true
+ description:
+ - Landscape handle of the SpectroServer to which add or remove the device.
+ state:
+ type: str
+ required: false
+ description:
+ - On C(present) creates the device when it does not exist.
+ - On C(absent) removes the device when it exists.
+ choices: ['present', 'absent']
+ default: 'present'
+ url:
+ type: str
+ aliases: [ oneclick_url ]
+ required: true
+ description:
+ - HTTP, HTTPS URL of the Oneclick server in the form (http|https)://host.domain[:port]
+ url_username:
+ type: str
+ aliases: [ oneclick_user ]
+ required: true
+ description:
+ - Oneclick user name.
+ url_password:
+ type: str
+ aliases: [ oneclick_password ]
+ required: true
+ description:
+ - Oneclick user password.
+ use_proxy:
+ required: false
+ description:
+ - if C(no), it will not use a proxy, even if one is defined in an environment
+ variable on the target hosts.
+ default: 'yes'
+ type: bool
+ validate_certs:
+ required: false
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ default: 'yes'
+ type: bool
+ agentport:
+ type: int
+ required: false
+ description:
+ - UDP port used for SNMP discovery.
+ default: 161
+notes:
+ - The devices will be created inside the I(Universe) container of the specified landscape.
+ - All the operations will be performed only on the specified landscape.
+'''
+
+EXAMPLES = '''
+- name: Add device to CA Spectrum
+ local_action:
+ module: spectrum_device
+ device: '{{ ansible_host }}'
+ community: secret
+ landscape: '0x100000'
+ oneclick_url: http://oneclick.example.com:8080
+ oneclick_user: username
+ oneclick_password: password
+ state: present
+
+
+- name: Remove device from CA Spectrum
+ local_action:
+ module: spectrum_device
+ device: '{{ ansible_host }}'
+ landscape: '{{ landscape_handle }}'
+ oneclick_url: http://oneclick.example.com:8080
+ oneclick_user: username
+ oneclick_password: password
+ use_proxy: no
+ state: absent
+'''
+
+RETURN = '''
+device:
+ description: device data when state = present
+ returned: success
+ type: dict
+ sample: {'model_handle': '0x1007ab', 'landscape': '0x100000', 'address': '10.10.5.1'}
+'''
+
+from socket import gethostbyname, gaierror
+import xml.etree.ElementTree as ET
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def request(resource, xml=None, method=None):
+ headers = {
+ "Content-Type": "application/xml",
+ "Accept": "application/xml"
+ }
+
+ url = module.params['oneclick_url'] + '/spectrum/restful/' + resource
+
+ response, info = fetch_url(module, url, data=xml, method=method, headers=headers, timeout=45)
+
+ if info['status'] == 401:
+ module.fail_json(msg="failed to authenticate to Oneclick server")
+
+ if info['status'] not in (200, 201, 204):
+ module.fail_json(msg=info['msg'])
+
+ return response.read()
+
+
+def post(resource, xml=None):
+ return request(resource, xml=xml, method='POST')
+
+
+def delete(resource):
+ return request(resource, xml=None, method='DELETE')
+
+
+def get_ip():
+ try:
+ device_ip = gethostbyname(module.params.get('device'))
+ except gaierror:
+ module.fail_json(msg="failed to resolve device ip address for '%s'" % module.params.get('device'))
+
+ return device_ip
+
+
+def get_device(device_ip):
+ """Query OneClick for the device using the IP Address"""
+ resource = '/models'
+ landscape_min = "0x%x" % int(module.params.get('landscape'), 16)
+ landscape_max = "0x%x" % (int(module.params.get('landscape'), 16) + 0x100000)
+
+ xml = """<?xml version="1.0" encoding="UTF-8"?>
+ <rs:model-request throttlesize="5"
+ xmlns:rs="http://www.ca.com/spectrum/restful/schema/request"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://www.ca.com/spectrum/restful/schema/request ../../../xsd/Request.xsd">
+ <rs:target-models>
+ <rs:models-search>
+ <rs:search-criteria xmlns="http://www.ca.com/spectrum/restful/schema/filter">
+ <action-models>
+ <filtered-models>
+ <and>
+ <equals>
+ <model-type>SearchManager</model-type>
+ </equals>
+ <greater-than>
+ <attribute id="0x129fa">
+ <value>{mh_min}</value>
+ </attribute>
+ </greater-than>
+ <less-than>
+ <attribute id="0x129fa">
+ <value>{mh_max}</value>
+ </attribute>
+ </less-than>
+ </and>
+ </filtered-models>
+ <action>FIND_DEV_MODELS_BY_IP</action>
+ <attribute id="AttributeID.NETWORK_ADDRESS">
+ <value>{search_ip}</value>
+ </attribute>
+ </action-models>
+ </rs:search-criteria>
+ </rs:models-search>
+ </rs:target-models>
+ <rs:requested-attribute id="0x12d7f" /> <!--Network Address-->
+ </rs:model-request>
+ """.format(search_ip=device_ip, mh_min=landscape_min, mh_max=landscape_max)
+
+ result = post(resource, xml=xml)
+
+ root = ET.fromstring(result)
+
+ if root.get('total-models') == '0':
+ return None
+
+ namespace = dict(ca='http://www.ca.com/spectrum/restful/schema/response')
+
+ # get the first device
+ model = root.find('ca:model-responses', namespace).find('ca:model', namespace)
+
+ if model.get('error'):
+ module.fail_json(msg="error checking device: %s" % model.get('error'))
+
+ # get the attributes
+ model_handle = model.get('mh')
+
+ model_address = model.find('./*[@id="0x12d7f"]').text
+
+ # derive the landscape handler from the model handler of the device
+ model_landscape = "0x%x" % int(int(model_handle, 16) // 0x100000 * 0x100000)
+
+ device = dict(
+ model_handle=model_handle,
+ address=model_address,
+ landscape=model_landscape)
+
+ return device
+
+
+def add_device():
+ device_ip = get_ip()
+ device = get_device(device_ip)
+
+ if device:
+ module.exit_json(changed=False, device=device)
+
+ if module.check_mode:
+ device = dict(
+ model_handle=None,
+ address=device_ip,
+ landscape="0x%x" % int(module.params.get('landscape'), 16))
+ module.exit_json(changed=True, device=device)
+
+ resource = 'model?ipaddress=' + device_ip + '&commstring=' + module.params.get('community')
+ resource += '&landscapeid=' + module.params.get('landscape')
+
+ if module.params.get('agentport', None):
+ resource += '&agentport=' + str(module.params.get('agentport', 161))
+
+ result = post(resource)
+ root = ET.fromstring(result)
+
+ if root.get('error') != 'Success':
+ module.fail_json(msg=root.get('error-message'))
+
+ namespace = dict(ca='http://www.ca.com/spectrum/restful/schema/response')
+ model = root.find('ca:model', namespace)
+
+ model_handle = model.get('mh')
+ model_landscape = "0x%x" % int(int(model_handle, 16) // 0x100000 * 0x100000)
+
+ device = dict(
+ model_handle=model_handle,
+ address=device_ip,
+ landscape=model_landscape,
+ )
+
+ module.exit_json(changed=True, device=device)
+
+
+def remove_device():
+ device_ip = get_ip()
+ device = get_device(device_ip)
+
+ if device is None:
+ module.exit_json(changed=False)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ resource = '/model/' + device['model_handle']
+ result = delete(resource)
+
+ root = ET.fromstring(result)
+
+ namespace = dict(ca='http://www.ca.com/spectrum/restful/schema/response')
+ error = root.find('ca:error', namespace).text
+
+ if error != 'Success':
+ error_message = root.find('ca:error-message', namespace).text
+ module.fail_json(msg="%s %s" % (error, error_message))
+
+ module.exit_json(changed=True)
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ device=dict(required=True, aliases=['host', 'name']),
+ landscape=dict(required=True),
+ state=dict(choices=['present', 'absent'], default='present'),
+ community=dict(required=True, no_log=True), # @TODO remove the 'required', given the required_if ?
+ agentport=dict(type='int', default=161),
+ url=dict(required=True, aliases=['oneclick_url']),
+ url_username=dict(required=True, aliases=['oneclick_user']),
+ url_password=dict(required=True, no_log=True, aliases=['oneclick_password']),
+ use_proxy=dict(type='bool', default=True),
+ validate_certs=dict(type='bool', default=True),
+ ),
+ required_if=[('state', 'present', ['community'])],
+ supports_check_mode=True
+ )
+
+ if module.params.get('state') == 'present':
+ add_device()
+ else:
+ remove_device()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/spotinst_aws_elastigroup.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/spotinst_aws_elastigroup.py
new file mode 100644
index 00000000..8f05da7b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/spotinst_aws_elastigroup.py
@@ -0,0 +1,1543 @@
+#!/usr/bin/python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+
+DOCUMENTATION = '''
+---
+module: spotinst_aws_elastigroup
+short_description: Create, update or delete Spotinst AWS Elastigroups
+author: Spotinst (@talzur)
+description:
+ - Can create, update, or delete Spotinst AWS Elastigroups
+ Launch configuration is part of the elastigroup configuration,
+ so no additional modules are necessary for handling the launch configuration.
+ You will have to have a credentials file in this location - <home>/.spotinst/credentials
+ The credentials file must contain a row that looks like this
+ token = <YOUR TOKEN>
+ Full documentation available at https://help.spotinst.com/hc/en-us/articles/115003530285-Ansible-
+requirements:
+ - python >= 2.7
+ - spotinst_sdk >= 1.0.38
+options:
+
+ credentials_path:
+ description:
+ - (Path) Optional parameter that allows to set a non-default credentials path.
+ default: ~/.spotinst/credentials
+ type: path
+
+ account_id:
+ description:
+ - (String) Optional parameter that allows to set an account-id inside the module configuration
+ By default this is retrieved from the credentials path
+ type: str
+
+ availability_vs_cost:
+ description:
+ - (String) The strategy orientation.
+ - "The choices available are: C(availabilityOriented), C(costOriented), C(balanced)."
+ required: true
+ type: str
+
+ availability_zones:
+ description:
+ - (List of Objects) a list of hash/dictionaries of Availability Zones that are configured in the elastigroup;
+ '[{"key":"value", "key":"value"}]';
+ keys allowed are
+ name (String),
+ subnet_id (String),
+ placement_group_name (String),
+ required: true
+ type: list
+
+ block_device_mappings:
+ description:
+ - (List of Objects) a list of hash/dictionaries of Block Device Mappings for elastigroup instances;
+ You can specify virtual devices and EBS volumes.;
+ '[{"key":"value", "key":"value"}]';
+ keys allowed are
+ device_name (List of Strings),
+ virtual_name (String),
+ no_device (String),
+ ebs (Object, expects the following keys-
+ delete_on_termination(Boolean),
+ encrypted(Boolean),
+ iops (Integer),
+ snapshot_id(Integer),
+ volume_type(String),
+ volume_size(Integer))
+ type: list
+
+ chef:
+ description:
+ - (Object) The Chef integration configuration.;
+ Expects the following keys - chef_server (String),
+ organization (String),
+ user (String),
+ pem_key (String),
+ chef_version (String)
+ type: dict
+
+ draining_timeout:
+ description:
+ - (Integer) Time for instance to be drained from incoming requests and deregistered from ELB before termination.
+ type: int
+
+ ebs_optimized:
+ description:
+ - (Boolean) Enable EBS optimization for supported instances which are not enabled by default.;
+ Note - additional charges will be applied.
+ type: bool
+
+ ebs_volume_pool:
+ description:
+ - (List of Objects) a list of hash/dictionaries of EBS devices to reattach to the elastigroup when available;
+ '[{"key":"value", "key":"value"}]';
+ keys allowed are -
+ volume_ids (List of Strings),
+ device_name (String)
+ type: list
+
+ ecs:
+ description:
+ - (Object) The ECS integration configuration.;
+ Expects the following key -
+ cluster_name (String)
+ type: dict
+
+ elastic_ips:
+ description:
+ - (List of Strings) List of ElasticIps Allocation Ids (Example C(eipalloc-9d4e16f8)) to associate to the group instances
+ type: list
+
+ fallback_to_od:
+ description:
+ - (Boolean) In case of no spots available, Elastigroup will launch an On-demand instance instead
+ type: bool
+
+ health_check_grace_period:
+ description:
+ - (Integer) The amount of time, in seconds, after the instance has launched to start and check its health.
+ - If not specified, it defaults to C(300).
+ type: int
+
+ health_check_unhealthy_duration_before_replacement:
+ description:
+ - (Integer) Minimal mount of time instance should be unhealthy for us to consider it unhealthy.
+ type: int
+
+ health_check_type:
+ description:
+ - (String) The service to use for the health check.
+ - "The choices available are: C(ELB), C(HCS), C(TARGET_GROUP), C(MLB), C(EC2)."
+ type: str
+
+ iam_role_name:
+ description:
+ - (String) The instance profile iamRole name
+ - Only use iam_role_arn, or iam_role_name
+ type: str
+
+ iam_role_arn:
+ description:
+ - (String) The instance profile iamRole arn
+ - Only use iam_role_arn, or iam_role_name
+ type: str
+
+ id:
+ description:
+ - (String) The group id if it already exists and you want to update, or delete it.
+ This will not work unless the uniqueness_by field is set to id.
+ When this is set, and the uniqueness_by field is set, the group will either be updated or deleted, but not created.
+ type: str
+
+ image_id:
+ description:
+ - (String) The image Id used to launch the instance.;
+ In case of conflict between Instance type and image type, an error will be returned
+ required: true
+ type: str
+
+ key_pair:
+ description:
+ - (String) Specify a Key Pair to attach to the instances
+ type: str
+
+ kubernetes:
+ description:
+ - (Object) The Kubernetes integration configuration.
+ Expects the following keys -
+ api_server (String),
+ token (String)
+ type: dict
+
+ lifetime_period:
+ description:
+ - (Integer) lifetime period
+ type: int
+
+ load_balancers:
+ description:
+ - (List of Strings) List of classic ELB names
+ type: list
+
+ max_size:
+ description:
+ - (Integer) The upper limit number of instances that you can scale up to
+ required: true
+ type: int
+
+ mesosphere:
+ description:
+ - (Object) The Mesosphere integration configuration.
+ Expects the following key -
+ api_server (String)
+ type: dict
+
+ min_size:
+ description:
+ - (Integer) The lower limit number of instances that you can scale down to
+ required: true
+ type: int
+
+ monitoring:
+ description:
+ - (String) Describes whether instance Enhanced Monitoring is enabled
+ type: str
+
+ name:
+ description:
+ - (String) Unique name for elastigroup to be created, updated or deleted
+ required: true
+ type: str
+
+ network_interfaces:
+ description:
+ - (List of Objects) a list of hash/dictionaries of network interfaces to add to the elastigroup;
+ '[{"key":"value", "key":"value"}]';
+ keys allowed are -
+ description (String),
+ device_index (Integer),
+ secondary_private_ip_address_count (Integer),
+ associate_public_ip_address (Boolean),
+ delete_on_termination (Boolean),
+ groups (List of Strings),
+ network_interface_id (String),
+ private_ip_address (String),
+ subnet_id (String),
+ associate_ipv6_address (Boolean),
+ private_ip_addresses (List of Objects, Keys are privateIpAddress (String, required) and primary (Boolean))
+ type: list
+
+ on_demand_count:
+ description:
+ - (Integer) Required if risk is not set
+ - Number of on demand instances to launch. All other instances will be spot instances.;
+ Either set this parameter or the risk parameter
+ type: int
+
+ on_demand_instance_type:
+ description:
+ - (String) On-demand instance type that will be provisioned
+ type: str
+
+ opsworks:
+ description:
+ - (Object) The elastigroup OpsWorks integration configration.;
+ Expects the following key -
+ layer_id (String)
+ type: dict
+
+ persistence:
+ description:
+ - (Object) The Stateful elastigroup configration.;
+ Accepts the following keys -
+ should_persist_root_device (Boolean),
+ should_persist_block_devices (Boolean),
+ should_persist_private_ip (Boolean)
+ type: dict
+
+ product:
+ description:
+ - (String) Operation system type.
+ - "Available choices are: C(Linux/UNIX), C(SUSE Linux), C(Windows), C(Linux/UNIX (Amazon VPC)), C(SUSE Linux (Amazon VPC))."
+ required: true
+ type: str
+
+ rancher:
+ description:
+ - (Object) The Rancher integration configuration.;
+ Expects the following keys -
+ version (String),
+ access_key (String),
+ secret_key (String),
+ master_host (String)
+ type: dict
+
+ right_scale:
+ description:
+ - (Object) The Rightscale integration configuration.;
+ Expects the following keys -
+ account_id (String),
+ refresh_token (String)
+ type: dict
+
+ risk:
+ description:
+ - (Integer) required if on demand is not set. The percentage of Spot instances to launch (0 - 100).
+ type: int
+
+ roll_config:
+ description:
+ - (Object) Roll configuration.;
+ If you would like the group to roll after updating, please use this feature.
+ Accepts the following keys -
+ batch_size_percentage(Integer, Required),
+ grace_period - (Integer, Required),
+ health_check_type(String, Optional)
+ type: dict
+
+ scheduled_tasks:
+ description:
+ - (List of Objects) a list of hash/dictionaries of scheduled tasks to configure in the elastigroup;
+ '[{"key":"value", "key":"value"}]';
+ keys allowed are -
+ adjustment (Integer),
+ scale_target_capacity (Integer),
+ scale_min_capacity (Integer),
+ scale_max_capacity (Integer),
+ adjustment_percentage (Integer),
+ batch_size_percentage (Integer),
+ cron_expression (String),
+ frequency (String),
+ grace_period (Integer),
+ task_type (String, required),
+ is_enabled (Boolean)
+ type: list
+
+ security_group_ids:
+ description:
+ - (List of Strings) One or more security group IDs. ;
+ In case of update it will override the existing Security Group with the new given array
+ required: true
+ type: list
+
+ shutdown_script:
+ description:
+ - (String) The Base64-encoded shutdown script that executes prior to instance termination.
+ Encode before setting.
+ type: str
+
+ signals:
+ description:
+ - (List of Objects) a list of hash/dictionaries of signals to configure in the elastigroup;
+ keys allowed are -
+ name (String, required),
+ timeout (Integer)
+ type: list
+
+ spin_up_time:
+ description:
+ - (Integer) spin up time, in seconds, for the instance
+ type: int
+
+ spot_instance_types:
+ description:
+ - (List of Strings) Spot instance type that will be provisioned.
+ required: true
+ type: list
+
+ state:
+ choices:
+ - present
+ - absent
+ description:
+ - (String) create or delete the elastigroup
+ default: present
+ type: str
+
+ tags:
+ description:
+ - (List of tagKey:tagValue pairs) a list of tags to configure in the elastigroup. Please specify list of keys and values (key colon value);
+ type: list
+
+ target:
+ description:
+ - (Integer) The number of instances to launch
+ required: true
+ type: int
+
+ target_group_arns:
+ description:
+ - (List of Strings) List of target group arns instances should be registered to
+ type: list
+
+ tenancy:
+ description:
+ - (String) dedicated vs shared tenancy.
+ - "The available choices are: C(default), C(dedicated)."
+ type: str
+
+ terminate_at_end_of_billing_hour:
+ description:
+ - (Boolean) terminate at the end of billing hour
+ type: bool
+
+ unit:
+ description:
+ - (String) The capacity unit to launch instances by.
+ - "The available choices are: C(instance), C(weight)."
+ type: str
+
+ up_scaling_policies:
+ description:
+ - (List of Objects) a list of hash/dictionaries of scaling policies to configure in the elastigroup;
+ '[{"key":"value", "key":"value"}]';
+ keys allowed are -
+ policy_name (String, required),
+ namespace (String, required),
+ metric_name (String, required),
+ dimensions (List of Objects, Keys allowed are name (String, required) and value (String)),
+ statistic (String, required)
+ evaluation_periods (String, required),
+ period (String, required),
+ threshold (String, required),
+ cooldown (String, required),
+ unit (String, required),
+ operator (String, required),
+ action_type (String, required),
+ adjustment (String),
+ min_target_capacity (String),
+ target (String),
+ maximum (String),
+ minimum (String)
+ type: list
+
+ down_scaling_policies:
+ description:
+ - (List of Objects) a list of hash/dictionaries of scaling policies to configure in the elastigroup;
+ '[{"key":"value", "key":"value"}]';
+ keys allowed are -
+ policy_name (String, required),
+ namespace (String, required),
+ metric_name (String, required),
+ dimensions ((List of Objects), Keys allowed are name (String, required) and value (String)),
+ statistic (String, required),
+ evaluation_periods (String, required),
+ period (String, required),
+ threshold (String, required),
+ cooldown (String, required),
+ unit (String, required),
+ operator (String, required),
+ action_type (String, required),
+ adjustment (String),
+ max_target_capacity (String),
+ target (String),
+ maximum (String),
+ minimum (String)
+ type: list
+
+ target_tracking_policies:
+ description:
+ - (List of Objects) a list of hash/dictionaries of target tracking policies to configure in the elastigroup;
+ '[{"key":"value", "key":"value"}]';
+ keys allowed are -
+ policy_name (String, required),
+ namespace (String, required),
+ source (String, required),
+ metric_name (String, required),
+ statistic (String, required),
+ unit (String, required),
+ cooldown (String, required),
+ target (String, required)
+ type: list
+
+ uniqueness_by:
+ choices:
+ - id
+ - name
+ description:
+ - (String) If your group names are not unique, you may use this feature to update or delete a specific group.
+ Whenever this property is set, you must set a group_id in order to update or delete a group, otherwise a group will be created.
+ default: name
+ type: str
+
+ user_data:
+ description:
+ - (String) Base64-encoded MIME user data. Encode before setting the value.
+ type: str
+
+ utilize_reserved_instances:
+ description:
+ - (Boolean) In case of any available Reserved Instances,
+ Elastigroup will utilize your reservations before purchasing Spot instances.
+ type: bool
+
+ wait_for_instances:
+ description:
+ - (Boolean) Whether or not the elastigroup creation / update actions should wait for the instances to spin
+ type: bool
+ default: false
+
+ wait_timeout:
+ description:
+ - (Integer) How long the module should wait for instances before failing the action.;
+ Only works if wait_for_instances is True.
+ type: int
+
+'''
+EXAMPLES = '''
+# Basic configuration YAML example
+
+- hosts: localhost
+ tasks:
+ - name: Create elastigroup
+ community.general.spotinst_aws_elastigroup:
+ state: present
+ risk: 100
+ availability_vs_cost: balanced
+ availability_zones:
+ - name: us-west-2a
+ subnet_id: subnet-2b68a15c
+ image_id: ami-f173cc91
+ key_pair: spotinst-oregon
+ max_size: 15
+ min_size: 0
+ target: 0
+ unit: instance
+ monitoring: True
+ name: ansible-group
+ on_demand_instance_type: c3.large
+ product: Linux/UNIX
+ load_balancers:
+ - test-lb-1
+ security_group_ids:
+ - sg-8f4b8fe9
+ spot_instance_types:
+ - c3.large
+ do_not_update:
+ - image_id
+ - target
+ register: result
+ - ansible.builtin.debug: var=result
+
+# In this example, we create an elastigroup and wait 600 seconds to retrieve the instances, and use their private ips
+
+- hosts: localhost
+ tasks:
+ - name: Create elastigroup
+ community.general.spotinst_aws_elastigroup:
+ state: present
+ account_id: act-1a9dd2b
+ risk: 100
+ availability_vs_cost: balanced
+ availability_zones:
+ - name: us-west-2a
+ subnet_id: subnet-2b68a15c
+ tags:
+ - Environment: someEnvValue
+ - OtherTagKey: otherValue
+ image_id: ami-f173cc91
+ key_pair: spotinst-oregon
+ max_size: 5
+ min_size: 0
+ target: 0
+ unit: instance
+ monitoring: True
+ name: ansible-group-tal
+ on_demand_instance_type: c3.large
+ product: Linux/UNIX
+ security_group_ids:
+ - sg-8f4b8fe9
+ block_device_mappings:
+ - device_name: '/dev/sda1'
+ ebs:
+ volume_size: 100
+ volume_type: gp2
+ spot_instance_types:
+ - c3.large
+ do_not_update:
+ - image_id
+ wait_for_instances: True
+ wait_timeout: 600
+ register: result
+
+ - name: Store private ips to file
+ ansible.builtin.shell: echo {{ item.private_ip }}\\n >> list-of-private-ips
+ with_items: "{{ result.instances }}"
+ - ansible.builtin.debug: var=result
+
+# In this example, we create an elastigroup with multiple block device mappings, tags, and also an account id
+# In organizations with more than one account, it is required to specify an account_id
+
+- hosts: localhost
+ tasks:
+ - name: Create elastigroup
+ community.general.spotinst_aws_elastigroup:
+ state: present
+ account_id: act-1a9dd2b
+ risk: 100
+ availability_vs_cost: balanced
+ availability_zones:
+ - name: us-west-2a
+ subnet_id: subnet-2b68a15c
+ tags:
+ - Environment: someEnvValue
+ - OtherTagKey: otherValue
+ image_id: ami-f173cc91
+ key_pair: spotinst-oregon
+ max_size: 5
+ min_size: 0
+ target: 0
+ unit: instance
+ monitoring: True
+ name: ansible-group-tal
+ on_demand_instance_type: c3.large
+ product: Linux/UNIX
+ security_group_ids:
+ - sg-8f4b8fe9
+ block_device_mappings:
+ - device_name: '/dev/xvda'
+ ebs:
+ volume_size: 60
+ volume_type: gp2
+ - device_name: '/dev/xvdb'
+ ebs:
+ volume_size: 120
+ volume_type: gp2
+ spot_instance_types:
+ - c3.large
+ do_not_update:
+ - image_id
+ wait_for_instances: True
+ wait_timeout: 600
+ register: result
+
+ - name: Store private ips to file
+ ansible.builtin.shell: echo {{ item.private_ip }}\\n >> list-of-private-ips
+ with_items: "{{ result.instances }}"
+ - ansible.builtin.debug: var=result
+
+# In this example we have set up block device mapping with ephemeral devices
+
+- hosts: localhost
+ tasks:
+ - name: Create elastigroup
+ community.general.spotinst_aws_elastigroup:
+ state: present
+ risk: 100
+ availability_vs_cost: balanced
+ availability_zones:
+ - name: us-west-2a
+ subnet_id: subnet-2b68a15c
+ image_id: ami-f173cc91
+ key_pair: spotinst-oregon
+ max_size: 15
+ min_size: 0
+ target: 0
+ unit: instance
+ block_device_mappings:
+ - device_name: '/dev/xvda'
+ virtual_name: ephemeral0
+ - device_name: '/dev/xvdb/'
+ virtual_name: ephemeral1
+ monitoring: True
+ name: ansible-group
+ on_demand_instance_type: c3.large
+ product: Linux/UNIX
+ load_balancers:
+ - test-lb-1
+ security_group_ids:
+ - sg-8f4b8fe9
+ spot_instance_types:
+ - c3.large
+ do_not_update:
+ - image_id
+ - target
+ register: result
+ - ansible.builtin.debug: var=result
+
+# In this example we create a basic group configuration with a network interface defined.
+# Each network interface must have a device index
+
+- hosts: localhost
+ tasks:
+ - name: Create elastigroup
+ community.general.spotinst_aws_elastigroup:
+ state: present
+ risk: 100
+ availability_vs_cost: balanced
+ network_interfaces:
+ - associate_public_ip_address: true
+ device_index: 0
+ availability_zones:
+ - name: us-west-2a
+ subnet_id: subnet-2b68a15c
+ image_id: ami-f173cc91
+ key_pair: spotinst-oregon
+ max_size: 15
+ min_size: 0
+ target: 0
+ unit: instance
+ monitoring: True
+ name: ansible-group
+ on_demand_instance_type: c3.large
+ product: Linux/UNIX
+ load_balancers:
+ - test-lb-1
+ security_group_ids:
+ - sg-8f4b8fe9
+ spot_instance_types:
+ - c3.large
+ do_not_update:
+ - image_id
+ - target
+ register: result
+ - ansible.builtin.debug: var=result
+
+
+# In this example we create a basic group configuration with a target tracking scaling policy defined
+
+- hosts: localhost
+ tasks:
+ - name: Create elastigroup
+ community.general.spotinst_aws_elastigroup:
+ account_id: act-92d45673
+ state: present
+ risk: 100
+ availability_vs_cost: balanced
+ availability_zones:
+ - name: us-west-2a
+ subnet_id: subnet-79da021e
+ image_id: ami-f173cc91
+ fallback_to_od: true
+ tags:
+ - Creator: ValueOfCreatorTag
+ - Environment: ValueOfEnvironmentTag
+ key_pair: spotinst-labs-oregon
+ max_size: 10
+ min_size: 0
+ target: 2
+ unit: instance
+ monitoring: True
+ name: ansible-group-1
+ on_demand_instance_type: c3.large
+ product: Linux/UNIX
+ security_group_ids:
+ - sg-46cdc13d
+ spot_instance_types:
+ - c3.large
+ target_tracking_policies:
+ - policy_name: target-tracking-1
+ namespace: AWS/EC2
+ metric_name: CPUUtilization
+ statistic: average
+ unit: percent
+ target: 50
+ cooldown: 120
+ do_not_update:
+ - image_id
+ register: result
+ - ansible.builtin.debug: var=result
+'''
+
+RETURN = '''
+---
+instances:
+ description: List of active elastigroup instances and their details.
+ returned: success
+ type: dict
+ sample: [
+ {
+ "spotInstanceRequestId": "sir-regs25zp",
+ "instanceId": "i-09640ad8678234c",
+ "instanceType": "m4.large",
+ "product": "Linux/UNIX",
+ "availabilityZone": "us-west-2b",
+ "privateIp": "180.0.2.244",
+ "createdAt": "2017-07-17T12:46:18.000Z",
+ "status": "fulfilled"
+ }
+ ]
+group_id:
+ description: Created / Updated group's ID.
+ returned: success
+ type: str
+ sample: "sig-12345"
+
+'''
+
+HAS_SPOTINST_SDK = False
+__metaclass__ = type
+
+import os
+import time
+from ansible.module_utils.basic import AnsibleModule
+
+try:
+ import spotinst_sdk as spotinst
+ from spotinst_sdk import SpotinstClientException
+
+ HAS_SPOTINST_SDK = True
+
+except ImportError:
+ pass
+
+eni_fields = ('description',
+ 'device_index',
+ 'secondary_private_ip_address_count',
+ 'associate_public_ip_address',
+ 'delete_on_termination',
+ 'groups',
+ 'network_interface_id',
+ 'private_ip_address',
+ 'subnet_id',
+ 'associate_ipv6_address')
+
+private_ip_fields = ('private_ip_address',
+ 'primary')
+
+capacity_fields = (dict(ansible_field_name='min_size',
+ spotinst_field_name='minimum'),
+ dict(ansible_field_name='max_size',
+ spotinst_field_name='maximum'),
+ 'target',
+ 'unit')
+
+lspec_fields = ('user_data',
+ 'key_pair',
+ 'tenancy',
+ 'shutdown_script',
+ 'monitoring',
+ 'ebs_optimized',
+ 'image_id',
+ 'health_check_type',
+ 'health_check_grace_period',
+ 'health_check_unhealthy_duration_before_replacement',
+ 'security_group_ids')
+
+iam_fields = (dict(ansible_field_name='iam_role_name',
+ spotinst_field_name='name'),
+ dict(ansible_field_name='iam_role_arn',
+ spotinst_field_name='arn'))
+
+scheduled_task_fields = ('adjustment',
+ 'adjustment_percentage',
+ 'batch_size_percentage',
+ 'cron_expression',
+ 'frequency',
+ 'grace_period',
+ 'task_type',
+ 'is_enabled',
+ 'scale_target_capacity',
+ 'scale_min_capacity',
+ 'scale_max_capacity')
+
+scaling_policy_fields = ('policy_name',
+ 'namespace',
+ 'metric_name',
+ 'dimensions',
+ 'statistic',
+ 'evaluation_periods',
+ 'period',
+ 'threshold',
+ 'cooldown',
+ 'unit',
+ 'operator')
+
+tracking_policy_fields = ('policy_name',
+ 'namespace',
+ 'source',
+ 'metric_name',
+ 'statistic',
+ 'unit',
+ 'cooldown',
+ 'target',
+ 'threshold')
+
+action_fields = (dict(ansible_field_name='action_type',
+ spotinst_field_name='type'),
+ 'adjustment',
+ 'min_target_capacity',
+ 'max_target_capacity',
+ 'target',
+ 'minimum',
+ 'maximum')
+
+signal_fields = ('name',
+ 'timeout')
+
+multai_lb_fields = ('balancer_id',
+ 'project_id',
+ 'target_set_id',
+ 'az_awareness',
+ 'auto_weight')
+
+persistence_fields = ('should_persist_root_device',
+ 'should_persist_block_devices',
+ 'should_persist_private_ip')
+
+strategy_fields = ('risk',
+ 'utilize_reserved_instances',
+ 'fallback_to_od',
+ 'on_demand_count',
+ 'availability_vs_cost',
+ 'draining_timeout',
+ 'spin_up_time',
+ 'lifetime_period')
+
+ebs_fields = ('delete_on_termination',
+ 'encrypted',
+ 'iops',
+ 'snapshot_id',
+ 'volume_type',
+ 'volume_size')
+
+bdm_fields = ('device_name',
+ 'virtual_name',
+ 'no_device')
+
+kubernetes_fields = ('api_server',
+ 'token')
+
+right_scale_fields = ('account_id',
+ 'refresh_token')
+
+rancher_fields = ('access_key',
+ 'secret_key',
+ 'master_host',
+ 'version')
+
+chef_fields = ('chef_server',
+ 'organization',
+ 'user',
+ 'pem_key',
+ 'chef_version')
+
+az_fields = ('name',
+ 'subnet_id',
+ 'placement_group_name')
+
+opsworks_fields = ('layer_id',)
+
+scaling_strategy_fields = ('terminate_at_end_of_billing_hour',)
+
+mesosphere_fields = ('api_server',)
+
+ecs_fields = ('cluster_name',)
+
+multai_fields = ('multai_token',)
+
+
+def handle_elastigroup(client, module):
+ has_changed = False
+ group_id = None
+ message = 'None'
+
+ name = module.params.get('name')
+ state = module.params.get('state')
+ uniqueness_by = module.params.get('uniqueness_by')
+ external_group_id = module.params.get('id')
+
+ if uniqueness_by == 'id':
+ if external_group_id is None:
+ should_create = True
+ else:
+ should_create = False
+ group_id = external_group_id
+ else:
+ groups = client.get_elastigroups()
+ should_create, group_id = find_group_with_same_name(groups, name)
+
+ if should_create is True:
+ if state == 'present':
+ eg = expand_elastigroup(module, is_update=False)
+ module.debug(str(" [INFO] " + message + "\n"))
+ group = client.create_elastigroup(group=eg)
+ group_id = group['id']
+ message = 'Created group Successfully.'
+ has_changed = True
+
+ elif state == 'absent':
+ message = 'Cannot delete non-existent group.'
+ has_changed = False
+ else:
+ eg = expand_elastigroup(module, is_update=True)
+
+ if state == 'present':
+ group = client.update_elastigroup(group_update=eg, group_id=group_id)
+ message = 'Updated group successfully.'
+
+ try:
+ roll_config = module.params.get('roll_config')
+ if roll_config:
+ eg_roll = spotinst.aws_elastigroup.Roll(
+ batch_size_percentage=roll_config.get('batch_size_percentage'),
+ grace_period=roll_config.get('grace_period'),
+ health_check_type=roll_config.get('health_check_type')
+ )
+ roll_response = client.roll_group(group_roll=eg_roll, group_id=group_id)
+ message = 'Updated and started rolling the group successfully.'
+
+ except SpotinstClientException as exc:
+ message = 'Updated group successfully, but failed to perform roll. Error:' + str(exc)
+ has_changed = True
+
+ elif state == 'absent':
+ try:
+ client.delete_elastigroup(group_id=group_id)
+ except SpotinstClientException as exc:
+ if "GROUP_DOESNT_EXIST" in exc.message:
+ pass
+ else:
+ module.fail_json(msg="Error while attempting to delete group : " + exc.message)
+
+ message = 'Deleted group successfully.'
+ has_changed = True
+
+ return group_id, message, has_changed
+
+
+def retrieve_group_instances(client, module, group_id):
+ wait_timeout = module.params.get('wait_timeout')
+ wait_for_instances = module.params.get('wait_for_instances')
+
+ health_check_type = module.params.get('health_check_type')
+
+ if wait_timeout is None:
+ wait_timeout = 300
+
+ wait_timeout = time.time() + wait_timeout
+ target = module.params.get('target')
+ state = module.params.get('state')
+ instances = list()
+
+ if state == 'present' and group_id is not None and wait_for_instances is True:
+
+ is_amount_fulfilled = False
+ while is_amount_fulfilled is False and wait_timeout > time.time():
+ instances = list()
+ amount_of_fulfilled_instances = 0
+
+ if health_check_type is not None:
+ healthy_instances = client.get_instance_healthiness(group_id=group_id)
+
+ for healthy_instance in healthy_instances:
+ if healthy_instance.get('healthStatus') == 'HEALTHY':
+ amount_of_fulfilled_instances += 1
+ instances.append(healthy_instance)
+
+ else:
+ active_instances = client.get_elastigroup_active_instances(group_id=group_id)
+
+ for active_instance in active_instances:
+ if active_instance.get('private_ip') is not None:
+ amount_of_fulfilled_instances += 1
+ instances.append(active_instance)
+
+ if amount_of_fulfilled_instances >= target:
+ is_amount_fulfilled = True
+
+ time.sleep(10)
+
+ return instances
+
+
+def find_group_with_same_name(groups, name):
+ for group in groups:
+ if group['name'] == name:
+ return False, group.get('id')
+
+ return True, None
+
+
+def expand_elastigroup(module, is_update):
+ do_not_update = module.params['do_not_update']
+ name = module.params.get('name')
+
+ eg = spotinst.aws_elastigroup.Elastigroup()
+ description = module.params.get('description')
+
+ if name is not None:
+ eg.name = name
+ if description is not None:
+ eg.description = description
+
+ # Capacity
+ expand_capacity(eg, module, is_update, do_not_update)
+ # Strategy
+ expand_strategy(eg, module)
+ # Scaling
+ expand_scaling(eg, module)
+ # Third party integrations
+ expand_integrations(eg, module)
+ # Compute
+ expand_compute(eg, module, is_update, do_not_update)
+ # Multai
+ expand_multai(eg, module)
+ # Scheduling
+ expand_scheduled_tasks(eg, module)
+
+ return eg
+
+
+def expand_compute(eg, module, is_update, do_not_update):
+ elastic_ips = module.params['elastic_ips']
+ on_demand_instance_type = module.params.get('on_demand_instance_type')
+ spot_instance_types = module.params['spot_instance_types']
+ ebs_volume_pool = module.params['ebs_volume_pool']
+ availability_zones_list = module.params['availability_zones']
+ product = module.params.get('product')
+
+ eg_compute = spotinst.aws_elastigroup.Compute()
+
+ if product is not None:
+ # Only put product on group creation
+ if is_update is not True:
+ eg_compute.product = product
+
+ if elastic_ips is not None:
+ eg_compute.elastic_ips = elastic_ips
+
+ if on_demand_instance_type or spot_instance_types is not None:
+ eg_instance_types = spotinst.aws_elastigroup.InstanceTypes()
+
+ if on_demand_instance_type is not None:
+ eg_instance_types.spot = spot_instance_types
+ if spot_instance_types is not None:
+ eg_instance_types.ondemand = on_demand_instance_type
+
+ if eg_instance_types.spot is not None or eg_instance_types.ondemand is not None:
+ eg_compute.instance_types = eg_instance_types
+
+ expand_ebs_volume_pool(eg_compute, ebs_volume_pool)
+
+ eg_compute.availability_zones = expand_list(availability_zones_list, az_fields, 'AvailabilityZone')
+
+ expand_launch_spec(eg_compute, module, is_update, do_not_update)
+
+ eg.compute = eg_compute
+
+
+def expand_ebs_volume_pool(eg_compute, ebs_volumes_list):
+ if ebs_volumes_list is not None:
+ eg_volumes = []
+
+ for volume in ebs_volumes_list:
+ eg_volume = spotinst.aws_elastigroup.EbsVolume()
+
+ if volume.get('device_name') is not None:
+ eg_volume.device_name = volume.get('device_name')
+ if volume.get('volume_ids') is not None:
+ eg_volume.volume_ids = volume.get('volume_ids')
+
+ if eg_volume.device_name is not None:
+ eg_volumes.append(eg_volume)
+
+ if len(eg_volumes) > 0:
+ eg_compute.ebs_volume_pool = eg_volumes
+
+
+def expand_launch_spec(eg_compute, module, is_update, do_not_update):
+ eg_launch_spec = expand_fields(lspec_fields, module.params, 'LaunchSpecification')
+
+ if module.params['iam_role_arn'] is not None or module.params['iam_role_name'] is not None:
+ eg_launch_spec.iam_role = expand_fields(iam_fields, module.params, 'IamRole')
+
+ tags = module.params['tags']
+ load_balancers = module.params['load_balancers']
+ target_group_arns = module.params['target_group_arns']
+ block_device_mappings = module.params['block_device_mappings']
+ network_interfaces = module.params['network_interfaces']
+
+ if is_update is True:
+ if 'image_id' in do_not_update:
+ delattr(eg_launch_spec, 'image_id')
+
+ expand_tags(eg_launch_spec, tags)
+
+ expand_load_balancers(eg_launch_spec, load_balancers, target_group_arns)
+
+ expand_block_device_mappings(eg_launch_spec, block_device_mappings)
+
+ expand_network_interfaces(eg_launch_spec, network_interfaces)
+
+ eg_compute.launch_specification = eg_launch_spec
+
+
+def expand_integrations(eg, module):
+ rancher = module.params.get('rancher')
+ mesosphere = module.params.get('mesosphere')
+ ecs = module.params.get('ecs')
+ kubernetes = module.params.get('kubernetes')
+ right_scale = module.params.get('right_scale')
+ opsworks = module.params.get('opsworks')
+ chef = module.params.get('chef')
+
+ integration_exists = False
+
+ eg_integrations = spotinst.aws_elastigroup.ThirdPartyIntegrations()
+
+ if mesosphere is not None:
+ eg_integrations.mesosphere = expand_fields(mesosphere_fields, mesosphere, 'Mesosphere')
+ integration_exists = True
+
+ if ecs is not None:
+ eg_integrations.ecs = expand_fields(ecs_fields, ecs, 'EcsConfiguration')
+ integration_exists = True
+
+ if kubernetes is not None:
+ eg_integrations.kubernetes = expand_fields(kubernetes_fields, kubernetes, 'KubernetesConfiguration')
+ integration_exists = True
+
+ if right_scale is not None:
+ eg_integrations.right_scale = expand_fields(right_scale_fields, right_scale, 'RightScaleConfiguration')
+ integration_exists = True
+
+ if opsworks is not None:
+ eg_integrations.opsworks = expand_fields(opsworks_fields, opsworks, 'OpsWorksConfiguration')
+ integration_exists = True
+
+ if rancher is not None:
+ eg_integrations.rancher = expand_fields(rancher_fields, rancher, 'Rancher')
+ integration_exists = True
+
+ if chef is not None:
+ eg_integrations.chef = expand_fields(chef_fields, chef, 'ChefConfiguration')
+ integration_exists = True
+
+ if integration_exists:
+ eg.third_parties_integration = eg_integrations
+
+
+def expand_capacity(eg, module, is_update, do_not_update):
+ eg_capacity = expand_fields(capacity_fields, module.params, 'Capacity')
+
+ if is_update is True:
+ delattr(eg_capacity, 'unit')
+
+ if 'target' in do_not_update:
+ delattr(eg_capacity, 'target')
+
+ eg.capacity = eg_capacity
+
+
+def expand_strategy(eg, module):
+ persistence = module.params.get('persistence')
+ signals = module.params.get('signals')
+
+ eg_strategy = expand_fields(strategy_fields, module.params, 'Strategy')
+
+ terminate_at_end_of_billing_hour = module.params.get('terminate_at_end_of_billing_hour')
+
+ if terminate_at_end_of_billing_hour is not None:
+ eg_strategy.eg_scaling_strategy = expand_fields(scaling_strategy_fields,
+ module.params, 'ScalingStrategy')
+
+ if persistence is not None:
+ eg_strategy.persistence = expand_fields(persistence_fields, persistence, 'Persistence')
+
+ if signals is not None:
+ eg_signals = expand_list(signals, signal_fields, 'Signal')
+
+ if len(eg_signals) > 0:
+ eg_strategy.signals = eg_signals
+
+ eg.strategy = eg_strategy
+
+
+def expand_multai(eg, module):
+ multai_load_balancers = module.params.get('multai_load_balancers')
+
+ eg_multai = expand_fields(multai_fields, module.params, 'Multai')
+
+ if multai_load_balancers is not None:
+ eg_multai_load_balancers = expand_list(multai_load_balancers, multai_lb_fields, 'MultaiLoadBalancer')
+
+ if len(eg_multai_load_balancers) > 0:
+ eg_multai.balancers = eg_multai_load_balancers
+ eg.multai = eg_multai
+
+
+def expand_scheduled_tasks(eg, module):
+ scheduled_tasks = module.params.get('scheduled_tasks')
+
+ if scheduled_tasks is not None:
+ eg_scheduling = spotinst.aws_elastigroup.Scheduling()
+
+ eg_tasks = expand_list(scheduled_tasks, scheduled_task_fields, 'ScheduledTask')
+
+ if len(eg_tasks) > 0:
+ eg_scheduling.tasks = eg_tasks
+ eg.scheduling = eg_scheduling
+
+
+def expand_load_balancers(eg_launchspec, load_balancers, target_group_arns):
+ if load_balancers is not None or target_group_arns is not None:
+ eg_load_balancers_config = spotinst.aws_elastigroup.LoadBalancersConfig()
+ eg_total_lbs = []
+
+ if load_balancers is not None:
+ for elb_name in load_balancers:
+ eg_elb = spotinst.aws_elastigroup.LoadBalancer()
+ if elb_name is not None:
+ eg_elb.name = elb_name
+ eg_elb.type = 'CLASSIC'
+ eg_total_lbs.append(eg_elb)
+
+ if target_group_arns is not None:
+ for target_arn in target_group_arns:
+ eg_elb = spotinst.aws_elastigroup.LoadBalancer()
+ if target_arn is not None:
+ eg_elb.arn = target_arn
+ eg_elb.type = 'TARGET_GROUP'
+ eg_total_lbs.append(eg_elb)
+
+ if len(eg_total_lbs) > 0:
+ eg_load_balancers_config.load_balancers = eg_total_lbs
+ eg_launchspec.load_balancers_config = eg_load_balancers_config
+
+
+def expand_tags(eg_launchspec, tags):
+ if tags is not None:
+ eg_tags = []
+
+ for tag in tags:
+ eg_tag = spotinst.aws_elastigroup.Tag()
+ if tag.keys():
+ eg_tag.tag_key = tag.keys()[0]
+ if tag.values():
+ eg_tag.tag_value = tag.values()[0]
+
+ eg_tags.append(eg_tag)
+
+ if len(eg_tags) > 0:
+ eg_launchspec.tags = eg_tags
+
+
+def expand_block_device_mappings(eg_launchspec, bdms):
+ if bdms is not None:
+ eg_bdms = []
+
+ for bdm in bdms:
+ eg_bdm = expand_fields(bdm_fields, bdm, 'BlockDeviceMapping')
+
+ if bdm.get('ebs') is not None:
+ eg_bdm.ebs = expand_fields(ebs_fields, bdm.get('ebs'), 'EBS')
+
+ eg_bdms.append(eg_bdm)
+
+ if len(eg_bdms) > 0:
+ eg_launchspec.block_device_mappings = eg_bdms
+
+
+def expand_network_interfaces(eg_launchspec, enis):
+ if enis is not None:
+ eg_enis = []
+
+ for eni in enis:
+ eg_eni = expand_fields(eni_fields, eni, 'NetworkInterface')
+
+ eg_pias = expand_list(eni.get('private_ip_addresses'), private_ip_fields, 'PrivateIpAddress')
+
+ if eg_pias is not None:
+ eg_eni.private_ip_addresses = eg_pias
+
+ eg_enis.append(eg_eni)
+
+ if len(eg_enis) > 0:
+ eg_launchspec.network_interfaces = eg_enis
+
+
+def expand_scaling(eg, module):
+ up_scaling_policies = module.params['up_scaling_policies']
+ down_scaling_policies = module.params['down_scaling_policies']
+ target_tracking_policies = module.params['target_tracking_policies']
+
+ eg_scaling = spotinst.aws_elastigroup.Scaling()
+
+ if up_scaling_policies is not None:
+ eg_up_scaling_policies = expand_scaling_policies(up_scaling_policies)
+ if len(eg_up_scaling_policies) > 0:
+ eg_scaling.up = eg_up_scaling_policies
+
+ if down_scaling_policies is not None:
+ eg_down_scaling_policies = expand_scaling_policies(down_scaling_policies)
+ if len(eg_down_scaling_policies) > 0:
+ eg_scaling.down = eg_down_scaling_policies
+
+ if target_tracking_policies is not None:
+ eg_target_tracking_policies = expand_target_tracking_policies(target_tracking_policies)
+ if len(eg_target_tracking_policies) > 0:
+ eg_scaling.target = eg_target_tracking_policies
+
+ if eg_scaling.down is not None or eg_scaling.up is not None or eg_scaling.target is not None:
+ eg.scaling = eg_scaling
+
+
+def expand_list(items, fields, class_name):
+ if items is not None:
+ new_objects_list = []
+ for item in items:
+ new_obj = expand_fields(fields, item, class_name)
+ new_objects_list.append(new_obj)
+
+ return new_objects_list
+
+
+def expand_fields(fields, item, class_name):
+ class_ = getattr(spotinst.aws_elastigroup, class_name)
+ new_obj = class_()
+
+ # Handle primitive fields
+ if item is not None:
+ for field in fields:
+ if isinstance(field, dict):
+ ansible_field_name = field['ansible_field_name']
+ spotinst_field_name = field['spotinst_field_name']
+ else:
+ ansible_field_name = field
+ spotinst_field_name = field
+ if item.get(ansible_field_name) is not None:
+ setattr(new_obj, spotinst_field_name, item.get(ansible_field_name))
+
+ return new_obj
+
+
+def expand_scaling_policies(scaling_policies):
+ eg_scaling_policies = []
+
+ for policy in scaling_policies:
+ eg_policy = expand_fields(scaling_policy_fields, policy, 'ScalingPolicy')
+ eg_policy.action = expand_fields(action_fields, policy, 'ScalingPolicyAction')
+ eg_scaling_policies.append(eg_policy)
+
+ return eg_scaling_policies
+
+
+def expand_target_tracking_policies(tracking_policies):
+ eg_tracking_policies = []
+
+ for policy in tracking_policies:
+ eg_policy = expand_fields(tracking_policy_fields, policy, 'TargetTrackingPolicy')
+ eg_tracking_policies.append(eg_policy)
+
+ return eg_tracking_policies
+
+
+def main():
+ fields = dict(
+ account_id=dict(type='str'),
+ availability_vs_cost=dict(type='str', required=True),
+ availability_zones=dict(type='list', required=True),
+ block_device_mappings=dict(type='list'),
+ chef=dict(type='dict'),
+ credentials_path=dict(type='path', default="~/.spotinst/credentials"),
+ do_not_update=dict(default=[], type='list'),
+ down_scaling_policies=dict(type='list'),
+ draining_timeout=dict(type='int'),
+ ebs_optimized=dict(type='bool'),
+ ebs_volume_pool=dict(type='list'),
+ ecs=dict(type='dict'),
+ elastic_beanstalk=dict(type='dict'),
+ elastic_ips=dict(type='list'),
+ fallback_to_od=dict(type='bool'),
+ id=dict(type='str'),
+ health_check_grace_period=dict(type='int'),
+ health_check_type=dict(type='str'),
+ health_check_unhealthy_duration_before_replacement=dict(type='int'),
+ iam_role_arn=dict(type='str'),
+ iam_role_name=dict(type='str'),
+ image_id=dict(type='str', required=True),
+ key_pair=dict(type='str'),
+ kubernetes=dict(type='dict'),
+ lifetime_period=dict(type='int'),
+ load_balancers=dict(type='list'),
+ max_size=dict(type='int', required=True),
+ mesosphere=dict(type='dict'),
+ min_size=dict(type='int', required=True),
+ monitoring=dict(type='str'),
+ multai_load_balancers=dict(type='list'),
+ multai_token=dict(type='str', no_log=True),
+ name=dict(type='str', required=True),
+ network_interfaces=dict(type='list'),
+ on_demand_count=dict(type='int'),
+ on_demand_instance_type=dict(type='str'),
+ opsworks=dict(type='dict'),
+ persistence=dict(type='dict'),
+ product=dict(type='str', required=True),
+ rancher=dict(type='dict'),
+ right_scale=dict(type='dict'),
+ risk=dict(type='int'),
+ roll_config=dict(type='dict'),
+ scheduled_tasks=dict(type='list'),
+ security_group_ids=dict(type='list', required=True),
+ shutdown_script=dict(type='str'),
+ signals=dict(type='list'),
+ spin_up_time=dict(type='int'),
+ spot_instance_types=dict(type='list', required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ tags=dict(type='list'),
+ target=dict(type='int', required=True),
+ target_group_arns=dict(type='list'),
+ tenancy=dict(type='str'),
+ terminate_at_end_of_billing_hour=dict(type='bool'),
+ token=dict(type='str', no_log=True),
+ unit=dict(type='str'),
+ user_data=dict(type='str'),
+ utilize_reserved_instances=dict(type='bool'),
+ uniqueness_by=dict(default='name', choices=['name', 'id']),
+ up_scaling_policies=dict(type='list'),
+ target_tracking_policies=dict(type='list'),
+ wait_for_instances=dict(type='bool', default=False),
+ wait_timeout=dict(type='int')
+ )
+
+ module = AnsibleModule(argument_spec=fields)
+
+ if not HAS_SPOTINST_SDK:
+ module.fail_json(msg="the Spotinst SDK library is required. (pip install spotinst_sdk)")
+
+ # Retrieve creds file variables
+ creds_file_loaded_vars = dict()
+
+ credentials_path = module.params.get('credentials_path')
+
+ try:
+ with open(credentials_path, "r") as creds:
+ for line in creds:
+ eq_index = line.find('=')
+ var_name = line[:eq_index].strip()
+ string_value = line[eq_index + 1:].strip()
+ creds_file_loaded_vars[var_name] = string_value
+ except IOError:
+ pass
+ # End of creds file retrieval
+
+ token = module.params.get('token')
+ if not token:
+ token = os.environ.get('SPOTINST_TOKEN')
+ if not token:
+ token = creds_file_loaded_vars.get("token")
+
+ account = module.params.get('account_id')
+ if not account:
+ account = os.environ.get('SPOTINST_ACCOUNT_ID') or os.environ.get('ACCOUNT')
+ if not account:
+ account = creds_file_loaded_vars.get("account")
+
+ client = spotinst.SpotinstClient(auth_token=token, print_output=False)
+
+ if account is not None:
+ client = spotinst.SpotinstClient(auth_token=token, print_output=False, account_id=account)
+
+ group_id, message, has_changed = handle_elastigroup(client=client, module=module)
+
+ instances = retrieve_group_instances(client=client, module=module, group_id=group_id)
+
+ module.exit_json(changed=has_changed, group_id=group_id, message=message, instances=instances)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ss_3par_cpg.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ss_3par_cpg.py
new file mode 100644
index 00000000..04604c09
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ss_3par_cpg.py
@@ -0,0 +1,295 @@
+#!/usr/bin/python
+# Copyright: (c) 2018, Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+short_description: Manage HPE StoreServ 3PAR CPG
+author:
+ - Farhan Nomani (@farhan7500)
+ - Gautham P Hegde (@gautamphegde)
+description:
+ - Create and delete CPG on HPE 3PAR.
+module: ss_3par_cpg
+options:
+ cpg_name:
+ description:
+ - Name of the CPG.
+ type: str
+ required: true
+ disk_type:
+ choices:
+ - FC
+ - NL
+ - SSD
+ description:
+ - Specifies that physical disks must have the specified device type.
+ type: str
+ domain:
+ description:
+ - Specifies the name of the domain in which the object will reside.
+ type: str
+ growth_increment:
+ description:
+ - Specifies the growth increment(in MiB, GiB or TiB) the amount of logical disk storage
+ created on each auto-grow operation.
+ type: str
+ growth_limit:
+ description:
+ - Specifies that the autogrow operation is limited to the specified
+ storage amount that sets the growth limit(in MiB, GiB or TiB).
+ type: str
+ growth_warning:
+ description:
+ - Specifies that the threshold(in MiB, GiB or TiB) of used logical disk space when exceeded
+ results in a warning alert.
+ type: str
+ high_availability:
+ choices:
+ - PORT
+ - CAGE
+ - MAG
+ description:
+ - Specifies that the layout must support the failure of one port pair,
+ one cage, or one magazine.
+ type: str
+ raid_type:
+ choices:
+ - R0
+ - R1
+ - R5
+ - R6
+ description:
+ - Specifies the RAID type for the logical disk.
+ type: str
+ set_size:
+ description:
+ - Specifies the set size in the number of chunklets.
+ type: int
+ state:
+ choices:
+ - present
+ - absent
+ description:
+ - Whether the specified CPG should exist or not.
+ required: true
+ type: str
+ secure:
+ description:
+ - Specifies whether the certificate needs to be validated while communicating.
+ type: bool
+ default: no
+extends_documentation_fragment:
+- community.general.hpe3par
+
+'''
+
+
+EXAMPLES = r'''
+- name: Create CPG sample_cpg
+ community.general.ss_3par_cpg:
+ storage_system_ip: 10.10.10.1
+ storage_system_username: username
+ storage_system_password: password
+ state: present
+ cpg_name: sample_cpg
+ domain: sample_domain
+ growth_increment: 32000 MiB
+ growth_limit: 64000 MiB
+ growth_warning: 48000 MiB
+ raid_type: R6
+ set_size: 8
+ high_availability: MAG
+ disk_type: FC
+ secure: no
+
+- name: Delete CPG sample_cpg
+ community.general.ss_3par_cpg:
+ storage_system_ip: 10.10.10.1
+ storage_system_username: username
+ storage_system_password: password
+ state: absent
+ cpg_name: sample_cpg
+ secure: no
+'''
+
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.storage.hpe3par import hpe3par
+try:
+ from hpe3par_sdk import client
+ from hpe3parclient import exceptions
+ HAS_3PARCLIENT = True
+except ImportError:
+ HAS_3PARCLIENT = False
+
+
+def validate_set_size(raid_type, set_size):
+ if raid_type:
+ set_size_array = client.HPE3ParClient.RAID_MAP[raid_type]['set_sizes']
+ if set_size in set_size_array:
+ return True
+ return False
+
+
+def cpg_ldlayout_map(ldlayout_dict):
+ if ldlayout_dict['RAIDType'] is not None and ldlayout_dict['RAIDType']:
+ ldlayout_dict['RAIDType'] = client.HPE3ParClient.RAID_MAP[
+ ldlayout_dict['RAIDType']]['raid_value']
+ if ldlayout_dict['HA'] is not None and ldlayout_dict['HA']:
+ ldlayout_dict['HA'] = getattr(
+ client.HPE3ParClient, ldlayout_dict['HA'])
+ return ldlayout_dict
+
+
+def create_cpg(
+ client_obj,
+ cpg_name,
+ domain,
+ growth_increment,
+ growth_limit,
+ growth_warning,
+ raid_type,
+ set_size,
+ high_availability,
+ disk_type):
+ try:
+ if not validate_set_size(raid_type, set_size):
+ return (False, False, "Set size %s not part of RAID set %s" % (set_size, raid_type))
+ if not client_obj.cpgExists(cpg_name):
+
+ disk_patterns = []
+ if disk_type:
+ disk_type = getattr(client.HPE3ParClient, disk_type)
+ disk_patterns = [{'diskType': disk_type}]
+ ld_layout = {
+ 'RAIDType': raid_type,
+ 'setSize': set_size,
+ 'HA': high_availability,
+ 'diskPatterns': disk_patterns}
+ ld_layout = cpg_ldlayout_map(ld_layout)
+ if growth_increment is not None:
+ growth_increment = hpe3par.convert_to_binary_multiple(
+ growth_increment)
+ if growth_limit is not None:
+ growth_limit = hpe3par.convert_to_binary_multiple(
+ growth_limit)
+ if growth_warning is not None:
+ growth_warning = hpe3par.convert_to_binary_multiple(
+ growth_warning)
+ optional = {
+ 'domain': domain,
+ 'growthIncrementMiB': growth_increment,
+ 'growthLimitMiB': growth_limit,
+ 'usedLDWarningAlertMiB': growth_warning,
+ 'LDLayout': ld_layout}
+ client_obj.createCPG(cpg_name, optional)
+ else:
+ return (True, False, "CPG already present")
+ except exceptions.ClientException as e:
+ return (False, False, "CPG creation failed | %s" % (e))
+ return (True, True, "Created CPG %s successfully." % cpg_name)
+
+
+def delete_cpg(
+ client_obj,
+ cpg_name):
+ try:
+ if client_obj.cpgExists(cpg_name):
+ client_obj.deleteCPG(cpg_name)
+ else:
+ return (True, False, "CPG does not exist")
+ except exceptions.ClientException as e:
+ return (False, False, "CPG delete failed | %s" % e)
+ return (True, True, "Deleted CPG %s successfully." % cpg_name)
+
+
+def main():
+ module = AnsibleModule(argument_spec=hpe3par.cpg_argument_spec(),
+ required_together=[['raid_type', 'set_size']])
+ if not HAS_3PARCLIENT:
+ module.fail_json(msg='the python hpe3par_sdk library is required (https://pypi.org/project/hpe3par_sdk)')
+
+ if len(module.params["cpg_name"]) < 1 or len(module.params["cpg_name"]) > 31:
+ module.fail_json(msg="CPG name must be at least 1 character and not more than 31 characters")
+
+ storage_system_ip = module.params["storage_system_ip"]
+ storage_system_username = module.params["storage_system_username"]
+ storage_system_password = module.params["storage_system_password"]
+ cpg_name = module.params["cpg_name"]
+ domain = module.params["domain"]
+ growth_increment = module.params["growth_increment"]
+ growth_limit = module.params["growth_limit"]
+ growth_warning = module.params["growth_warning"]
+ raid_type = module.params["raid_type"]
+ set_size = module.params["set_size"]
+ high_availability = module.params["high_availability"]
+ disk_type = module.params["disk_type"]
+ secure = module.params["secure"]
+
+ wsapi_url = 'https://%s:8080/api/v1' % storage_system_ip
+ try:
+ client_obj = client.HPE3ParClient(wsapi_url, secure)
+ except exceptions.SSLCertFailed:
+ module.fail_json(msg="SSL Certificate Failed")
+ except exceptions.ConnectionError:
+ module.fail_json(msg="Connection Error")
+ except exceptions.UnsupportedVersion:
+ module.fail_json(msg="Unsupported WSAPI version")
+ except Exception as e:
+ module.fail_json(msg="Initializing client failed. %s" % e)
+
+ if storage_system_username is None or storage_system_password is None:
+ module.fail_json(msg="Storage system username or password is None")
+ if cpg_name is None:
+ module.fail_json(msg="CPG Name is None")
+
+ # States
+ if module.params["state"] == "present":
+ try:
+ client_obj.login(storage_system_username, storage_system_password)
+ return_status, changed, msg = create_cpg(
+ client_obj,
+ cpg_name,
+ domain,
+ growth_increment,
+ growth_limit,
+ growth_warning,
+ raid_type,
+ set_size,
+ high_availability,
+ disk_type
+ )
+ except Exception as e:
+ module.fail_json(msg="CPG create failed | %s" % e)
+ finally:
+ client_obj.logout()
+
+ elif module.params["state"] == "absent":
+ try:
+ client_obj.login(storage_system_username, storage_system_password)
+ return_status, changed, msg = delete_cpg(
+ client_obj,
+ cpg_name
+ )
+ except Exception as e:
+ module.fail_json(msg="CPG create failed | %s" % e)
+ finally:
+ client_obj.logout()
+
+ if return_status:
+ module.exit_json(changed=changed, msg=msg)
+ else:
+ module.fail_json(msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/stackdriver.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/stackdriver.py
new file mode 100644
index 00000000..8e2d19a9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/stackdriver.py
@@ -0,0 +1,215 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: stackdriver
+short_description: Send code deploy and annotation events to stackdriver
+description:
+ - Send code deploy and annotation events to Stackdriver
+author: "Ben Whaley (@bwhaley)"
+options:
+ key:
+ type: str
+ description:
+ - API key.
+ required: true
+ event:
+ type: str
+ description:
+ - The type of event to send, either annotation or deploy
+ choices: ['annotation', 'deploy']
+ required: true
+ revision_id:
+ type: str
+ description:
+ - The revision of the code that was deployed. Required for deploy events
+ deployed_by:
+ type: str
+ description:
+ - The person or robot responsible for deploying the code
+ default: "Ansible"
+ deployed_to:
+ type: str
+ description:
+ - "The environment code was deployed to. (ie: development, staging, production)"
+ repository:
+ type: str
+ description:
+ - The repository (or project) deployed
+ msg:
+ type: str
+ description:
+ - The contents of the annotation message, in plain text.  Limited to 256 characters. Required for annotation.
+ annotated_by:
+ type: str
+ description:
+ - The person or robot who the annotation should be attributed to.
+ default: "Ansible"
+ level:
+ type: str
+ description:
+ - one of INFO/WARN/ERROR, defaults to INFO if not supplied.  May affect display.
+ choices: ['INFO', 'WARN', 'ERROR']
+ default: 'INFO'
+ instance_id:
+ type: str
+ description:
+ - id of an EC2 instance that this event should be attached to, which will limit the contexts where this event is shown
+ event_epoch:
+ type: str
+ description:
+ - "Unix timestamp of where the event should appear in the timeline, defaults to now. Be careful with this."
+'''
+
+EXAMPLES = '''
+- name: Send a code deploy event to stackdriver
+ community.general.stackdriver:
+ key: AAAAAA
+ event: deploy
+ deployed_to: production
+ deployed_by: leeroyjenkins
+ repository: MyWebApp
+ revision_id: abcd123
+
+- name: Send an annotation event to stackdriver
+ community.general.stackdriver:
+ key: AAAAAA
+ event: annotation
+ msg: Greetings from Ansible
+ annotated_by: leeroyjenkins
+ level: WARN
+ instance_id: i-abcd1234
+'''
+
+# ===========================================
+# Stackdriver module specific support methods.
+#
+
+import json
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import fetch_url
+
+
+def send_deploy_event(module, key, revision_id, deployed_by='Ansible', deployed_to=None, repository=None):
+ """Send a deploy event to Stackdriver"""
+ deploy_api = "https://event-gateway.stackdriver.com/v1/deployevent"
+
+ params = {}
+ params['revision_id'] = revision_id
+ params['deployed_by'] = deployed_by
+ if deployed_to:
+ params['deployed_to'] = deployed_to
+ if repository:
+ params['repository'] = repository
+
+ return do_send_request(module, deploy_api, params, key)
+
+
+def send_annotation_event(module, key, msg, annotated_by='Ansible', level=None, instance_id=None, event_epoch=None):
+ """Send an annotation event to Stackdriver"""
+ annotation_api = "https://event-gateway.stackdriver.com/v1/annotationevent"
+
+ params = {}
+ params['message'] = msg
+ if annotated_by:
+ params['annotated_by'] = annotated_by
+ if level:
+ params['level'] = level
+ if instance_id:
+ params['instance_id'] = instance_id
+ if event_epoch:
+ params['event_epoch'] = event_epoch
+
+ return do_send_request(module, annotation_api, params, key)
+
+
+def do_send_request(module, url, params, key):
+ data = json.dumps(params)
+ headers = {
+ 'Content-Type': 'application/json',
+ 'x-stackdriver-apikey': key
+ }
+ response, info = fetch_url(module, url, headers=headers, data=data, method='POST')
+ if info['status'] != 200:
+ module.fail_json(msg="Unable to send msg: %s" % info['msg'])
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict( # @TODO add types
+ key=dict(required=True, no_log=True),
+ event=dict(required=True, choices=['deploy', 'annotation']),
+ msg=dict(),
+ revision_id=dict(),
+ annotated_by=dict(default='Ansible'),
+ level=dict(default='INFO', choices=['INFO', 'WARN', 'ERROR']),
+ instance_id=dict(),
+ event_epoch=dict(), # @TODO int?
+ deployed_by=dict(default='Ansible'),
+ deployed_to=dict(),
+ repository=dict(),
+ ),
+ supports_check_mode=True
+ )
+
+ key = module.params["key"]
+ event = module.params["event"]
+
+ # Annotation params
+ msg = module.params["msg"]
+ annotated_by = module.params["annotated_by"]
+ level = module.params["level"]
+ instance_id = module.params["instance_id"]
+ event_epoch = module.params["event_epoch"]
+
+ # Deploy params
+ revision_id = module.params["revision_id"]
+ deployed_by = module.params["deployed_by"]
+ deployed_to = module.params["deployed_to"]
+ repository = module.params["repository"]
+
+ ##################################################################
+ # deploy requires revision_id
+ # annotation requires msg
+ # We verify these manually
+ ##################################################################
+
+ if event == 'deploy':
+ if not revision_id:
+ module.fail_json(msg="revision_id required for deploy events")
+ try:
+ send_deploy_event(module, key, revision_id, deployed_by, deployed_to, repository)
+ except Exception as e:
+ module.fail_json(msg="unable to sent deploy event: %s" % to_native(e),
+ exception=traceback.format_exc())
+
+ if event == 'annotation':
+ if not msg:
+ module.fail_json(msg="msg required for annotation events")
+ try:
+ send_annotation_event(module, key, msg, annotated_by, level, instance_id, event_epoch)
+ except Exception as e:
+ module.fail_json(msg="unable to sent annotation event: %s" % to_native(e),
+ exception=traceback.format_exc())
+
+ changed = True
+ module.exit_json(changed=changed, deployed_by=deployed_by)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/stacki_host.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/stacki_host.py
new file mode 100644
index 00000000..372ba2df
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/stacki_host.py
@@ -0,0 +1,277 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Hugh Ma <Hugh.Ma@flextronics.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: stacki_host
+short_description: Add or remove host to stacki front-end
+description:
+ - Use this module to add or remove hosts to a stacki front-end via API.
+ - U(https://github.com/StackIQ/stacki)
+options:
+ name:
+ description:
+ - Name of the host to be added to Stacki.
+ required: True
+ type: str
+ stacki_user:
+ description:
+ - Username for authenticating with Stacki API, but if not
+ specified, the environment variable C(stacki_user) is used instead.
+ required: True
+ type: str
+ stacki_password:
+ description:
+ - Password for authenticating with Stacki API, but if not
+ specified, the environment variable C(stacki_password) is used instead.
+ required: True
+ type: str
+ stacki_endpoint:
+ description:
+ - URL for the Stacki API Endpoint.
+ required: True
+ type: str
+ prim_intf_mac:
+ description:
+ - MAC Address for the primary PXE boot network interface.
+ type: str
+ prim_intf_ip:
+ description:
+ - IP Address for the primary network interface.
+ type: str
+ prim_intf:
+ description:
+ - Name of the primary network interface.
+ type: str
+ force_install:
+ description:
+ - Set value to True to force node into install state if it already exists in stacki.
+ type: bool
+ state:
+ description:
+ - Set value to the desired state for the specified host.
+ type: str
+ choices: [ absent, present ]
+author:
+- Hugh Ma (@bbyhuy) <Hugh.Ma@flextronics.com>
+'''
+
+EXAMPLES = '''
+- name: Add a host named test-1
+ community.general.stacki_host:
+ name: test-1
+ stacki_user: usr
+ stacki_password: pwd
+ stacki_endpoint: url
+ prim_intf_mac: mac_addr
+ prim_intf_ip: x.x.x.x
+ prim_intf: eth0
+
+- name: Remove a host named test-1
+ community.general.stacki_host:
+ name: test-1
+ stacki_user: usr
+ stacki_password: pwd
+ stacki_endpoint: url
+ state: absent
+'''
+
+RETURN = '''
+changed:
+ description: response to whether or not the api call completed successfully
+ returned: always
+ type: bool
+ sample: true
+
+stdout:
+ description: the set of responses from the commands
+ returned: always
+ type: list
+ sample: ['...', '...']
+
+stdout_lines:
+ description: the value of stdout split into a list
+ returned: always
+ type: list
+ sample: [['...', '...'], ['...'], ['...']]
+'''
+
+import json
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url
+
+
+class StackiHost(object):
+
+ def __init__(self, module):
+ self.module = module
+ self.hostname = module.params['name']
+ self.rack = module.params['rack']
+ self.rank = module.params['rank']
+ self.appliance = module.params['appliance']
+ self.prim_intf = module.params['prim_intf']
+ self.prim_intf_ip = module.params['prim_intf_ip']
+ self.network = module.params['network']
+ self.prim_intf_mac = module.params['prim_intf_mac']
+ self.endpoint = module.params['stacki_endpoint']
+
+ auth_creds = {'USERNAME': module.params['stacki_user'],
+ 'PASSWORD': module.params['stacki_password']}
+
+ # Get Initial CSRF
+ cred_a = self.do_request(self.module, self.endpoint, method="GET")
+ cookie_a = cred_a.headers.get('Set-Cookie').split(';')
+ init_csrftoken = None
+ for c in cookie_a:
+ if "csrftoken" in c:
+ init_csrftoken = c.replace("csrftoken=", "")
+ init_csrftoken = init_csrftoken.rstrip("\r\n")
+ break
+
+ # Make Header Dictionary with initial CSRF
+ header = {'csrftoken': init_csrftoken, 'X-CSRFToken': init_csrftoken,
+ 'Content-type': 'application/x-www-form-urlencoded', 'Cookie': cred_a.headers.get('Set-Cookie')}
+
+ # Endpoint to get final authentication header
+ login_endpoint = self.endpoint + "/login"
+
+ # Get Final CSRF and Session ID
+ login_req = self.do_request(self.module, login_endpoint, headers=header,
+ payload=urlencode(auth_creds), method='POST')
+
+ cookie_f = login_req.headers.get('Set-Cookie').split(';')
+ csrftoken = None
+ for f in cookie_f:
+ if "csrftoken" in f:
+ csrftoken = f.replace("csrftoken=", "")
+ if "sessionid" in f:
+ sessionid = c.split("sessionid=", 1)[-1]
+ sessionid = sessionid.rstrip("\r\n")
+
+ self.header = {'csrftoken': csrftoken,
+ 'X-CSRFToken': csrftoken,
+ 'sessionid': sessionid,
+ 'Content-type': 'application/json',
+ 'Cookie': login_req.headers.get('Set-Cookie')}
+
+ def do_request(self, module, url, payload=None, headers=None, method=None):
+ res, info = fetch_url(module, url, data=payload, headers=headers, method=method)
+
+ if info['status'] != 200:
+ self.module.fail_json(changed=False, msg=info['msg'])
+
+ return res
+
+ def stack_check_host(self):
+ res = self.do_request(self.module, self.endpoint, payload=json.dumps({"cmd": "list host"}), headers=self.header, method="POST")
+
+ if self.hostname in res.read():
+ return True
+ else:
+ return False
+
+ def stack_sync(self):
+ self.do_request(self.module, self.endpoint, payload=json.dumps({"cmd": "sync config"}), headers=self.header, method="POST")
+ self.do_request(self.module, self.endpoint, payload=json.dumps({"cmd": "sync host config"}), headers=self.header, method="POST")
+
+ def stack_force_install(self, result):
+ data = dict()
+ changed = False
+
+ data['cmd'] = "set host boot {0} action=install" \
+ .format(self.hostname)
+ self.do_request(self.module, self.endpoint, payload=json.dumps(data), headers=self.header, method="POST")
+ changed = True
+
+ self.stack_sync()
+
+ result['changed'] = changed
+ result['stdout'] = "api call successful".rstrip("\r\n")
+
+ def stack_add(self, result):
+ data = dict()
+ changed = False
+
+ data['cmd'] = "add host {0} rack={1} rank={2} appliance={3}"\
+ .format(self.hostname, self.rack, self.rank, self.appliance)
+ self.do_request(self.module, self.endpoint, payload=json.dumps(data), headers=self.header, method="POST")
+
+ self.stack_sync()
+
+ result['changed'] = changed
+ result['stdout'] = "api call successful".rstrip("\r\n")
+
+ def stack_remove(self, result):
+ data = dict()
+
+ data['cmd'] = "remove host {0}"\
+ .format(self.hostname)
+ self.do_request(self.module, self.endpoint, payload=json.dumps(data), headers=self.header, method="POST")
+
+ self.stack_sync()
+
+ result['changed'] = True
+ result['stdout'] = "api call successful".rstrip("\r\n")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ name=dict(type='str', required=True),
+ rack=dict(type='int', default=0),
+ rank=dict(type='int', default=0),
+ appliance=dict(type='str', default='backend'),
+ prim_intf=dict(type='str'),
+ prim_intf_ip=dict(type='str'),
+ network=dict(type='str', default='private'),
+ prim_intf_mac=dict(type='str'),
+ stacki_user=dict(type='str', required=True, default=os.environ.get('stacki_user')),
+ stacki_password=dict(type='str', required=True, default=os.environ.get('stacki_password'), no_log=True),
+ stacki_endpoint=dict(type='str', required=True, default=os.environ.get('stacki_endpoint')),
+ force_install=dict(type='bool', default=False),
+ ),
+ supports_check_mode=False,
+ )
+
+ result = {'changed': False}
+ missing_params = list()
+
+ stacki = StackiHost(module)
+ host_exists = stacki.stack_check_host()
+
+ # If state is present, but host exists, need force_install flag to put host back into install state
+ if module.params['state'] == 'present' and host_exists and module.params['force_install']:
+ stacki.stack_force_install(result)
+ # If state is present, but host exists, and force_install and false, do nothing
+ elif module.params['state'] == 'present' and host_exists and not module.params['force_install']:
+ result['stdout'] = "{0} already exists. Set 'force_install' to true to bootstrap"\
+ .format(module.params['name'])
+ # Otherwise, state is present, but host doesn't exists, require more params to add host
+ elif module.params['state'] == 'present' and not host_exists:
+ for param in ['appliance', 'prim_intf',
+ 'prim_intf_ip', 'network', 'prim_intf_mac']:
+ if not module.params[param]:
+ missing_params.append(param)
+ if len(missing_params) > 0: # @FIXME replace with required_if
+ module.fail_json(msg="missing required arguments: {0}".format(missing_params))
+
+ stacki.stack_add(result)
+ # If state is absent, and host exists, lets remove it.
+ elif module.params['state'] == 'absent' and host_exists:
+ stacki.stack_remove(result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/statusio_maintenance.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/statusio_maintenance.py
new file mode 100644
index 00000000..0414f6e8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/statusio_maintenance.py
@@ -0,0 +1,465 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Benjamin Copeland (@bhcopeland) <ben@copeland.me.uk>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: statusio_maintenance
+short_description: Create maintenance windows for your status.io dashboard
+description:
+ - Creates a maintenance window for status.io
+ - Deletes a maintenance window for status.io
+notes:
+ - You can use the apiary API url (http://docs.statusio.apiary.io/) to
+ capture API traffic
+ - Use start_date and start_time with minutes to set future maintenance window
+author: Benjamin Copeland (@bhcopeland) <ben@copeland.me.uk>
+options:
+ title:
+ type: str
+ description:
+ - A descriptive title for the maintenance window
+ default: "A new maintenance window"
+ desc:
+ type: str
+ description:
+ - Message describing the maintenance window
+ default: "Created by Ansible"
+ state:
+ type: str
+ description:
+ - Desired state of the package.
+ default: "present"
+ choices: ["present", "absent"]
+ api_id:
+ type: str
+ description:
+ - Your unique API ID from status.io
+ required: true
+ api_key:
+ type: str
+ description:
+ - Your unique API Key from status.io
+ required: true
+ statuspage:
+ type: str
+ description:
+ - Your unique StatusPage ID from status.io
+ required: true
+ url:
+ type: str
+ description:
+ - Status.io API URL. A private apiary can be used instead.
+ default: "https://api.status.io"
+ components:
+ type: list
+ description:
+ - The given name of your component (server name)
+ aliases: ['component']
+ containers:
+ type: list
+ description:
+ - The given name of your container (data center)
+ aliases: ['container']
+ all_infrastructure_affected:
+ description:
+ - If it affects all components and containers
+ type: bool
+ default: 'no'
+ automation:
+ description:
+ - Automatically start and end the maintenance window
+ type: bool
+ default: 'no'
+ maintenance_notify_now:
+ description:
+ - Notify subscribers now
+ type: bool
+ default: 'no'
+ maintenance_notify_72_hr:
+ description:
+ - Notify subscribers 72 hours before maintenance start time
+ type: bool
+ default: 'no'
+ maintenance_notify_24_hr:
+ description:
+ - Notify subscribers 24 hours before maintenance start time
+ type: bool
+ default: 'no'
+ maintenance_notify_1_hr:
+ description:
+ - Notify subscribers 1 hour before maintenance start time
+ type: bool
+ default: 'no'
+ maintenance_id:
+ type: str
+ description:
+ - The maintenance id number when deleting a maintenance window
+ minutes:
+ type: int
+ description:
+ - The length of time in UTC that the maintenance will run
+ (starting from playbook runtime)
+ default: 10
+ start_date:
+ type: str
+ description:
+ - Date maintenance is expected to start (Month/Day/Year) (UTC)
+ - End Date is worked out from start_date + minutes
+ start_time:
+ type: str
+ description:
+ - Time maintenance is expected to start (Hour:Minutes) (UTC)
+ - End Time is worked out from start_time + minutes
+'''
+
+EXAMPLES = '''
+- name: Create a maintenance window for 10 minutes on server1, with automation to stop the maintenance
+ community.general.statusio_maintenance:
+ title: Router Upgrade from ansible
+ desc: Performing a Router Upgrade
+ components: server1.example.com
+ api_id: api_id
+ api_key: api_key
+ statuspage: statuspage_id
+ maintenance_notify_1_hr: True
+ automation: True
+
+- name: Create a maintenance window for 60 minutes on server1 and server2
+ community.general.statusio_maintenance:
+ title: Routine maintenance
+ desc: Some security updates
+ components:
+ - server1.example.com
+ - server2.example.com
+ minutes: 60
+ api_id: api_id
+ api_key: api_key
+ statuspage: statuspage_id
+ maintenance_notify_1_hr: True
+ automation: True
+ delegate_to: localhost
+
+- name: Create a future maintenance window for 24 hours to all hosts inside the Primary Data Center
+ community.general.statusio_maintenance:
+ title: Data center downtime
+ desc: Performing a Upgrade to our data center
+ components: Primary Data Center
+ api_id: api_id
+ api_key: api_key
+ statuspage: statuspage_id
+ start_date: 01/01/2016
+ start_time: 12:00
+ minutes: 1440
+
+- name: Delete a maintenance window
+ community.general.statusio_maintenance:
+ title: Remove a maintenance window
+ maintenance_id: 561f90faf74bc94a4700087b
+ statuspage: statuspage_id
+ api_id: api_id
+ api_key: api_key
+ state: absent
+
+'''
+# TODO: Add RETURN documentation.
+RETURN = ''' # '''
+
+import datetime
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import open_url
+
+
+def get_api_auth_headers(api_id, api_key, url, statuspage):
+
+ headers = {
+ "x-api-id": api_id,
+ "x-api-key": api_key,
+ "Content-Type": "application/json"
+ }
+
+ try:
+ response = open_url(
+ url + "/v2/component/list/" + statuspage, headers=headers)
+ data = json.loads(response.read())
+ if data['status']['message'] == 'Authentication failed':
+ return 1, None, None, "Authentication failed: " \
+ "Check api_id/api_key and statuspage id."
+ else:
+ auth_headers = headers
+ auth_content = data
+ except Exception as e:
+ return 1, None, None, to_native(e)
+ return 0, auth_headers, auth_content, None
+
+
+def get_component_ids(auth_content, components):
+ host_ids = []
+ lower_components = [x.lower() for x in components]
+ for result in auth_content["result"]:
+ if result['name'].lower() in lower_components:
+ data = {
+ "component_id": result["_id"],
+ "container_id": result["containers"][0]["_id"]
+ }
+ host_ids.append(data)
+ lower_components.remove(result['name'].lower())
+ if len(lower_components):
+ # items not found in the api
+ return 1, None, lower_components
+ return 0, host_ids, None
+
+
+def get_container_ids(auth_content, containers):
+ host_ids = []
+ lower_containers = [x.lower() for x in containers]
+ for result in auth_content["result"]:
+ if result["containers"][0]["name"].lower() in lower_containers:
+ data = {
+ "component_id": result["_id"],
+ "container_id": result["containers"][0]["_id"]
+ }
+ host_ids.append(data)
+ lower_containers.remove(result["containers"][0]["name"].lower())
+
+ if len(lower_containers):
+ # items not found in the api
+ return 1, None, lower_containers
+ return 0, host_ids, None
+
+
+def get_date_time(start_date, start_time, minutes):
+ returned_date = []
+ if start_date and start_time:
+ try:
+ datetime.datetime.strptime(start_date, '%m/%d/%Y')
+ returned_date.append(start_date)
+ except (NameError, ValueError):
+ return 1, None, "Not a valid start_date format."
+ try:
+ datetime.datetime.strptime(start_time, '%H:%M')
+ returned_date.append(start_time)
+ except (NameError, ValueError):
+ return 1, None, "Not a valid start_time format."
+ try:
+ # Work out end date/time based on minutes
+ date_time_start = datetime.datetime.strptime(
+ start_time + start_date, '%H:%M%m/%d/%Y')
+ delta = date_time_start + datetime.timedelta(minutes=minutes)
+ returned_date.append(delta.strftime("%m/%d/%Y"))
+ returned_date.append(delta.strftime("%H:%M"))
+ except (NameError, ValueError):
+ return 1, None, "Couldn't work out a valid date"
+ else:
+ now = datetime.datetime.utcnow()
+ delta = now + datetime.timedelta(minutes=minutes)
+ # start_date
+ returned_date.append(now.strftime("%m/%d/%Y"))
+ returned_date.append(now.strftime("%H:%M"))
+ # end_date
+ returned_date.append(delta.strftime("%m/%d/%Y"))
+ returned_date.append(delta.strftime("%H:%M"))
+ return 0, returned_date, None
+
+
+def create_maintenance(auth_headers, url, statuspage, host_ids,
+ all_infrastructure_affected, automation, title, desc,
+ returned_date, maintenance_notify_now,
+ maintenance_notify_72_hr, maintenance_notify_24_hr,
+ maintenance_notify_1_hr):
+ returned_dates = [[x] for x in returned_date]
+ component_id = []
+ container_id = []
+ for val in host_ids:
+ component_id.append(val['component_id'])
+ container_id.append(val['container_id'])
+ try:
+ values = json.dumps({
+ "statuspage_id": statuspage,
+ "components": component_id,
+ "containers": container_id,
+ "all_infrastructure_affected": str(int(all_infrastructure_affected)),
+ "automation": str(int(automation)),
+ "maintenance_name": title,
+ "maintenance_details": desc,
+ "date_planned_start": returned_dates[0],
+ "time_planned_start": returned_dates[1],
+ "date_planned_end": returned_dates[2],
+ "time_planned_end": returned_dates[3],
+ "maintenance_notify_now": str(int(maintenance_notify_now)),
+ "maintenance_notify_72_hr": str(int(maintenance_notify_72_hr)),
+ "maintenance_notify_24_hr": str(int(maintenance_notify_24_hr)),
+ "maintenance_notify_1_hr": str(int(maintenance_notify_1_hr))
+ })
+ response = open_url(
+ url + "/v2/maintenance/schedule", data=values,
+ headers=auth_headers)
+ data = json.loads(response.read())
+
+ if data["status"]["error"] == "yes":
+ return 1, None, data["status"]["message"]
+ except Exception as e:
+ return 1, None, to_native(e)
+ return 0, None, None
+
+
+def delete_maintenance(auth_headers, url, statuspage, maintenance_id):
+ try:
+ values = json.dumps({
+ "statuspage_id": statuspage,
+ "maintenance_id": maintenance_id,
+ })
+ response = open_url(
+ url=url + "/v2/maintenance/delete",
+ data=values,
+ headers=auth_headers)
+ data = json.loads(response.read())
+ if data["status"]["error"] == "yes":
+ return 1, None, "Invalid maintenance_id"
+ except Exception as e:
+ return 1, None, to_native(e)
+ return 0, None, None
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_id=dict(required=True),
+ api_key=dict(required=True, no_log=True),
+ statuspage=dict(required=True),
+ state=dict(required=False, default='present',
+ choices=['present', 'absent']),
+ url=dict(default='https://api.status.io', required=False),
+ components=dict(type='list', required=False, default=None,
+ aliases=['component']),
+ containers=dict(type='list', required=False, default=None,
+ aliases=['container']),
+ all_infrastructure_affected=dict(type='bool', default=False,
+ required=False),
+ automation=dict(type='bool', default=False, required=False),
+ title=dict(required=False, default='A new maintenance window'),
+ desc=dict(required=False, default='Created by Ansible'),
+ minutes=dict(type='int', required=False, default=10),
+ maintenance_notify_now=dict(type='bool', default=False,
+ required=False),
+ maintenance_notify_72_hr=dict(type='bool', default=False,
+ required=False),
+ maintenance_notify_24_hr=dict(type='bool', default=False,
+ required=False),
+ maintenance_notify_1_hr=dict(type='bool', default=False,
+ required=False),
+ maintenance_id=dict(required=False, default=None),
+ start_date=dict(default=None, required=False),
+ start_time=dict(default=None, required=False)
+ ),
+ supports_check_mode=True,
+ )
+
+ api_id = module.params['api_id']
+ api_key = module.params['api_key']
+ statuspage = module.params['statuspage']
+ state = module.params['state']
+ url = module.params['url']
+ components = module.params['components']
+ containers = module.params['containers']
+ all_infrastructure_affected = module.params['all_infrastructure_affected']
+ automation = module.params['automation']
+ title = module.params['title']
+ desc = module.params['desc']
+ minutes = module.params['minutes']
+ maintenance_notify_now = module.params['maintenance_notify_now']
+ maintenance_notify_72_hr = module.params['maintenance_notify_72_hr']
+ maintenance_notify_24_hr = module.params['maintenance_notify_24_hr']
+ maintenance_notify_1_hr = module.params['maintenance_notify_1_hr']
+ maintenance_id = module.params['maintenance_id']
+ start_date = module.params['start_date']
+ start_time = module.params['start_time']
+
+ if state == "present":
+
+ if api_id and api_key:
+ (rc, auth_headers, auth_content, error) = \
+ get_api_auth_headers(api_id, api_key, url, statuspage)
+ if rc != 0:
+ module.fail_json(msg="Failed to get auth keys: %s" % error)
+ else:
+ auth_headers = {}
+ auth_content = {}
+
+ if minutes or start_time and start_date:
+ (rc, returned_date, error) = get_date_time(
+ start_date, start_time, minutes)
+ if rc != 0:
+ module.fail_json(msg="Failed to set date/time: %s" % error)
+
+ if not components and not containers:
+ return module.fail_json(msg="A Component or Container must be "
+ "defined")
+ elif components and containers:
+ return module.fail_json(msg="Components and containers cannot "
+ "be used together")
+ else:
+ if components:
+ (rc, host_ids, error) = get_component_ids(auth_content,
+ components)
+ if rc != 0:
+ module.fail_json(msg="Failed to find component %s" % error)
+
+ if containers:
+ (rc, host_ids, error) = get_container_ids(auth_content,
+ containers)
+ if rc != 0:
+ module.fail_json(msg="Failed to find container %s" % error)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ (rc, _, error) = create_maintenance(
+ auth_headers, url, statuspage, host_ids,
+ all_infrastructure_affected, automation,
+ title, desc, returned_date, maintenance_notify_now,
+ maintenance_notify_72_hr, maintenance_notify_24_hr,
+ maintenance_notify_1_hr)
+ if rc == 0:
+ module.exit_json(changed=True, result="Successfully created "
+ "maintenance")
+ else:
+ module.fail_json(msg="Failed to create maintenance: %s"
+ % error)
+
+ if state == "absent":
+
+ if api_id and api_key:
+ (rc, auth_headers, auth_content, error) = \
+ get_api_auth_headers(api_id, api_key, url, statuspage)
+ if rc != 0:
+ module.fail_json(msg="Failed to get auth keys: %s" % error)
+ else:
+ auth_headers = {}
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ (rc, _, error) = delete_maintenance(
+ auth_headers, url, statuspage, maintenance_id)
+ if rc == 0:
+ module.exit_json(
+ changed=True,
+ result="Successfully deleted maintenance"
+ )
+ else:
+ module.fail_json(
+ msg="Failed to delete maintenance: %s" % error)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/emc/emc_vnx_sg_member.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/emc/emc_vnx_sg_member.py
new file mode 100644
index 00000000..dfac03ef
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/emc/emc_vnx_sg_member.py
@@ -0,0 +1,170 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2018, Luca 'remix_tj' Lorenzetto <lorenzetto.luca@gmail.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: emc_vnx_sg_member
+
+short_description: Manage storage group member on EMC VNX
+
+
+description:
+ - "This module manages the members of an existing storage group."
+
+extends_documentation_fragment:
+- community.general.emc.emc_vnx
+
+
+options:
+ name:
+ description:
+ - Name of the Storage group to manage.
+ required: true
+ lunid:
+ description:
+ - Lun id to be added.
+ required: true
+ state:
+ description:
+ - Indicates the desired lunid state.
+ - C(present) ensures specified lunid is present in the Storage Group.
+ - C(absent) ensures specified lunid is absent from Storage Group.
+ default: present
+ choices: [ "present", "absent"]
+
+
+author:
+ - Luca 'remix_tj' Lorenzetto (@remixtj)
+'''
+
+EXAMPLES = '''
+- name: Add lun to storage group
+ community.general.emc_vnx_sg_member:
+ name: sg01
+ sp_address: sp1a.fqdn
+ sp_user: sysadmin
+ sp_password: sysadmin
+ lunid: 100
+ state: present
+
+- name: Remove lun from storage group
+ community.general.emc_vnx_sg_member:
+ name: sg01
+ sp_address: sp1a.fqdn
+ sp_user: sysadmin
+ sp_password: sysadmin
+ lunid: 100
+ state: absent
+'''
+
+RETURN = '''
+hluid:
+ description: LUNID that hosts attached to the storage group will see.
+ type: int
+ returned: success
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+from ansible_collections.community.general.plugins.module_utils.storage.emc.emc_vnx import emc_vnx_argument_spec
+
+LIB_IMP_ERR = None
+try:
+ from storops import VNXSystem
+ from storops.exception import VNXCredentialError, VNXStorageGroupError, \
+ VNXAluAlreadyAttachedError, VNXAttachAluError, VNXDetachAluNotFoundError
+ HAS_LIB = True
+except Exception:
+ LIB_IMP_ERR = traceback.format_exc()
+ HAS_LIB = False
+
+
+def run_module():
+ module_args = dict(
+ name=dict(type='str', required=True),
+ lunid=dict(type='int', required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+
+ module_args.update(emc_vnx_argument_spec)
+
+ result = dict(
+ changed=False,
+ hluid=None
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True
+ )
+
+ if not HAS_LIB:
+ module.fail_json(msg=missing_required_lib('storops >= 0.5.10'),
+ exception=LIB_IMP_ERR)
+
+ sp_user = module.params['sp_user']
+ sp_address = module.params['sp_address']
+ sp_password = module.params['sp_password']
+ alu = module.params['lunid']
+
+ # if the user is working with this module in only check mode we do not
+ # want to make any changes to the environment, just return the current
+ # state with no modifications
+ if module.check_mode:
+ return result
+
+ try:
+ vnx = VNXSystem(sp_address, sp_user, sp_password)
+ sg = vnx.get_sg(module.params['name'])
+ if sg.existed:
+ if module.params['state'] == 'present':
+ if not sg.has_alu(alu):
+ try:
+ result['hluid'] = sg.attach_alu(alu)
+ result['changed'] = True
+ except VNXAluAlreadyAttachedError:
+ result['hluid'] = sg.get_hlu(alu)
+ except (VNXAttachAluError, VNXStorageGroupError) as e:
+ module.fail_json(msg='Error attaching {0}: '
+ '{1} '.format(alu, to_native(e)),
+ **result)
+ else:
+ result['hluid'] = sg.get_hlu(alu)
+ if module.params['state'] == 'absent' and sg.has_alu(alu):
+ try:
+ sg.detach_alu(alu)
+ result['changed'] = True
+ except VNXDetachAluNotFoundError:
+ # being not attached when using absent is OK
+ pass
+ except VNXStorageGroupError as e:
+ module.fail_json(msg='Error detaching alu {0}: '
+ '{1} '.format(alu, to_native(e)),
+ **result)
+ else:
+ module.fail_json(msg='No such storage group named '
+ '{0}'.format(module.params['name']),
+ **result)
+ except VNXCredentialError as e:
+ module.fail_json(msg='{0}'.format(to_native(e)), **result)
+
+ module.exit_json(**result)
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/glusterfs/gluster_heal_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/glusterfs/gluster_heal_info.py
new file mode 100644
index 00000000..46306585
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/glusterfs/gluster_heal_info.py
@@ -0,0 +1,199 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2016, Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gluster_heal_info
+short_description: Gather information on self-heal or rebalance status
+author: "Devyani Kota (@devyanikota)"
+description:
+ - Gather facts about either self-heal or rebalance status.
+ - This module was called C(gluster_heal_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.gluster_heal_info) module no longer returns C(ansible_facts)!
+options:
+ name:
+ description:
+ - The volume name.
+ required: true
+ aliases: ['volume']
+ status_filter:
+ default: "self-heal"
+ choices: ["self-heal", "rebalance"]
+ description:
+ - Determines which facts are to be returned.
+ - If the C(status_filter) is C(self-heal), status of self-heal, along with the number of files still in process are returned.
+ - If the C(status_filter) is C(rebalance), rebalance status is returned.
+requirements:
+ - GlusterFS > 3.2
+'''
+
+EXAMPLES = '''
+- name: Gather self-heal facts about all gluster hosts in the cluster
+ community.general.gluster_heal_info:
+ name: test_volume
+ status_filter: self-heal
+ register: self_heal_status
+- ansible.builtin.debug:
+ var: self_heal_status
+
+- name: Gather rebalance facts about all gluster hosts in the cluster
+ community.general.gluster_heal_info:
+ name: test_volume
+ status_filter: rebalance
+ register: rebalance_status
+- ansible.builtin.debug:
+ var: rebalance_status
+'''
+
+RETURN = '''
+name:
+ description: GlusterFS volume name
+ returned: always
+ type: str
+status_filter:
+ description: Whether self-heal or rebalance status is to be returned
+ returned: always
+ type: str
+heal_info:
+ description: List of files that still need healing process
+ returned: On success
+ type: list
+rebalance_status:
+ description: Status of rebalance operation
+ returned: On success
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from distutils.version import LooseVersion
+
+glusterbin = ''
+
+
+def run_gluster(gargs, **kwargs):
+ global glusterbin
+ global module
+ args = [glusterbin, '--mode=script']
+ args.extend(gargs)
+ try:
+ rc, out, err = module.run_command(args, **kwargs)
+ if rc != 0:
+ module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' %
+ (' '.join(args), rc, out or err), exception=traceback.format_exc())
+ except Exception as e:
+ module.fail_json(msg='error running gluster (%s) command: %s' % (' '.join(args),
+ to_native(e)), exception=traceback.format_exc())
+ return out
+
+
+def get_self_heal_status(name):
+ out = run_gluster(['volume', 'heal', name, 'info'], environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'))
+ raw_out = out.split("\n")
+ heal_info = []
+ # return files that still need healing.
+ for line in raw_out:
+ if 'Brick' in line:
+ br_dict = {}
+ br_dict['brick'] = line.strip().strip("Brick")
+ elif 'Status' in line:
+ br_dict['status'] = line.split(":")[1].strip()
+ elif 'Number' in line:
+ br_dict['no_of_entries'] = line.split(":")[1].strip()
+ elif line.startswith('/') or line.startswith('<') or '\n' in line:
+ continue
+ else:
+ br_dict and heal_info.append(br_dict)
+ br_dict = {}
+ return heal_info
+
+
+def get_rebalance_status(name):
+ out = run_gluster(['volume', 'rebalance', name, 'status'], environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'))
+ raw_out = out.split("\n")
+ rebalance_status = []
+ # return the files that are either still 'in progress' state or 'completed'.
+ for line in raw_out:
+ line = " ".join(line.split())
+ line_vals = line.split(" ")
+ if line_vals[0].startswith('-') or line_vals[0].startswith('Node'):
+ continue
+ node_dict = {}
+ if len(line_vals) == 1 or len(line_vals) == 4:
+ continue
+ node_dict['node'] = line_vals[0]
+ node_dict['rebalanced_files'] = line_vals[1]
+ node_dict['failures'] = line_vals[4]
+ if 'in progress' in line:
+ node_dict['status'] = line_vals[5] + line_vals[6]
+ rebalance_status.append(node_dict)
+ elif 'completed' in line:
+ node_dict['status'] = line_vals[5]
+ rebalance_status.append(node_dict)
+ return rebalance_status
+
+
+def is_invalid_gluster_version(module, required_version):
+ cmd = module.get_bin_path('gluster', True) + ' --version'
+ result = module.run_command(cmd)
+ ver_line = result[1].split('\n')[0]
+ version = ver_line.split(' ')[1]
+ # If the installed version is less than 3.2, it is an invalid version
+ # return True
+ return LooseVersion(version) < LooseVersion(required_version)
+
+
+def main():
+ global module
+ global glusterbin
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True, aliases=['volume']),
+ status_filter=dict(type='str', default='self-heal', choices=['self-heal', 'rebalance']),
+ ),
+ )
+ is_old_facts = module._name in ('gluster_heal_facts', 'community.general.gluster_heal_facts')
+ if is_old_facts:
+ module.deprecate("The 'gluster_heal_facts' module has been renamed to 'gluster_heal_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ glusterbin = module.get_bin_path('gluster', True)
+ required_version = "3.2"
+ status_filter = module.params['status_filter']
+ volume_name = module.params['name']
+ heal_info = ''
+ rebalance_status = ''
+
+ # Verify if required GlusterFS version is installed
+ if is_invalid_gluster_version(module, required_version):
+ module.fail_json(msg="GlusterFS version > %s is required" %
+ required_version)
+
+ try:
+ if status_filter == "self-heal":
+ heal_info = get_self_heal_status(volume_name)
+ elif status_filter == "rebalance":
+ rebalance_status = get_rebalance_status(volume_name)
+ except Exception as e:
+ module.fail_json(msg='Error retrieving status: %s' % e, exception=traceback.format_exc())
+
+ facts = {}
+ facts['glusterfs'] = {'volume': volume_name, 'status_filter': status_filter, 'heal_info': heal_info, 'rebalance': rebalance_status}
+
+ if is_old_facts:
+ module.exit_json(ansible_facts=facts)
+ else:
+ module.exit_json(**facts)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/glusterfs/gluster_peer.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/glusterfs/gluster_peer.py
new file mode 100644
index 00000000..e9e6fd71
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/glusterfs/gluster_peer.py
@@ -0,0 +1,172 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2015 Nandaja Varma <nvarma@redhat.com>
+# Copyright 2018 Red Hat, Inc.
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gluster_peer
+short_description: Attach/Detach peers to/from the cluster
+description:
+ - Create or diminish a GlusterFS trusted storage pool. A set of nodes can be
+ added into an existing trusted storage pool or a new storage pool can be
+ formed. Or, nodes can be removed from an existing trusted storage pool.
+author: Sachidananda Urs (@sac)
+options:
+ state:
+ choices: ["present", "absent"]
+ default: "present"
+ description:
+ - Determines whether the nodes should be attached to the pool or
+ removed from the pool. If the state is present, nodes will be
+ attached to the pool. If state is absent, nodes will be detached
+ from the pool.
+ type: str
+ nodes:
+ description:
+ - List of nodes that have to be probed into the pool.
+ required: true
+ type: list
+ force:
+ type: bool
+ default: false
+ description:
+ - Applicable only while removing the nodes from the pool. gluster
+ will refuse to detach a node from the pool if any one of the node
+ is down, in such cases force can be used.
+requirements:
+ - GlusterFS > 3.2
+notes:
+ - This module does not support check mode.
+'''
+
+EXAMPLES = '''
+- name: Create a trusted storage pool
+ community.general.gluster_peer:
+ state: present
+ nodes:
+ - 10.0.1.5
+ - 10.0.1.10
+
+- name: Delete a node from the trusted storage pool
+ community.general.gluster_peer:
+ state: absent
+ nodes:
+ - 10.0.1.10
+
+- name: Delete a node from the trusted storage pool by force
+ community.general.gluster_peer:
+ state: absent
+ nodes:
+ - 10.0.0.1
+ force: true
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from distutils.version import LooseVersion
+
+
+class Peer(object):
+ def __init__(self, module):
+ self.module = module
+ self.state = self.module.params['state']
+ self.nodes = self.module.params['nodes']
+ self.glustercmd = self.module.get_bin_path('gluster', True)
+ self.lang = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+ self.action = ''
+ self.force = ''
+
+ def gluster_peer_ops(self):
+ if not self.nodes:
+ self.module.fail_json(msg="nodes list cannot be empty")
+ self.force = 'force' if self.module.params.get('force') else ''
+ if self.state == 'present':
+ self.nodes = self.get_to_be_probed_hosts(self.nodes)
+ self.action = 'probe'
+ # In case of peer probe, we do not need `force'
+ self.force = ''
+ else:
+ self.action = 'detach'
+ self.call_peer_commands()
+
+ def get_to_be_probed_hosts(self, hosts):
+ peercmd = [self.glustercmd, 'pool', 'list', '--mode=script']
+ rc, output, err = self.module.run_command(peercmd,
+ environ_update=self.lang)
+ peers_in_cluster = [line.split('\t')[1].strip() for
+ line in filter(None, output.split('\n')[1:])]
+ try:
+ peers_in_cluster.remove('localhost')
+ except ValueError:
+ # It is ok not to have localhost in list
+ pass
+ hosts_to_be_probed = [host for host in hosts if host not in
+ peers_in_cluster]
+ return hosts_to_be_probed
+
+ def call_peer_commands(self):
+ result = {}
+ result['msg'] = ''
+ result['changed'] = False
+
+ for node in self.nodes:
+ peercmd = [self.glustercmd, 'peer', self.action, node, '--mode=script']
+ if self.force:
+ peercmd.append(self.force)
+ rc, out, err = self.module.run_command(peercmd,
+ environ_update=self.lang)
+ if rc:
+ result['rc'] = rc
+ result['msg'] = err
+ # Fail early, do not wait for the loop to finish
+ self.module.fail_json(**result)
+ else:
+ if 'already in peer' in out or \
+ 'localhost not needed' in out:
+ result['changed'] |= False
+ else:
+ result['changed'] = True
+ self.module.exit_json(**result)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ force=dict(type='bool', required=False, default=False),
+ nodes=dict(type='list', required=True),
+ state=dict(type='str', choices=['absent', 'present'],
+ default='present'),
+ ),
+ supports_check_mode=False
+ )
+ pops = Peer(module)
+ required_version = "3.2"
+ # Verify if required GlusterFS version is installed
+ if is_invalid_gluster_version(module, required_version):
+ module.fail_json(msg="GlusterFS version > %s is required" %
+ required_version)
+ pops.gluster_peer_ops()
+
+
+def is_invalid_gluster_version(module, required_version):
+ cmd = module.get_bin_path('gluster', True) + ' --version'
+ result = module.run_command(cmd)
+ ver_line = result[1].split('\n')[0]
+ version = ver_line.split(' ')[1]
+ # If the installed version is less than 3.2, it is an invalid version
+ # return True
+ return LooseVersion(version) < LooseVersion(required_version)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/glusterfs/gluster_volume.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/glusterfs/gluster_volume.py
new file mode 100644
index 00000000..d6444ef5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/glusterfs/gluster_volume.py
@@ -0,0 +1,604 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Taneli Leppä <taneli@crasman.fi>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: gluster_volume
+short_description: Manage GlusterFS volumes
+description:
+ - Create, remove, start, stop and tune GlusterFS volumes
+options:
+ name:
+ description:
+ - The volume name.
+ required: true
+ aliases: ['volume']
+ state:
+ description:
+ - Use present/absent ensure if a volume exists or not.
+ Use started/stopped to control its availability.
+ required: true
+ choices: ['absent', 'present', 'started', 'stopped']
+ cluster:
+ description:
+ - List of hosts to use for probing and brick setup.
+ host:
+ description:
+ - Override local hostname (for peer probing purposes).
+ replicas:
+ description:
+ - Replica count for volume.
+ arbiters:
+ description:
+ - Arbiter count for volume.
+ stripes:
+ description:
+ - Stripe count for volume.
+ disperses:
+ description:
+ - Disperse count for volume.
+ redundancies:
+ description:
+ - Redundancy count for volume.
+ transport:
+ description:
+ - Transport type for volume.
+ default: tcp
+ choices: [ tcp, rdma, 'tcp,rdma' ]
+ bricks:
+ description:
+ - Brick paths on servers. Multiple brick paths can be separated by commas.
+ aliases: [ brick ]
+ start_on_create:
+ description:
+ - Controls whether the volume is started after creation or not.
+ type: bool
+ default: 'yes'
+ rebalance:
+ description:
+ - Controls whether the cluster is rebalanced after changes.
+ type: bool
+ default: 'no'
+ directory:
+ description:
+ - Directory for limit-usage.
+ options:
+ description:
+ - A dictionary/hash with options/settings for the volume.
+ quota:
+ description:
+ - Quota value for limit-usage (be sure to use 10.0MB instead of 10MB, see quota list).
+ force:
+ description:
+ - If brick is being created in the root partition, module will fail.
+ Set force to true to override this behaviour.
+ type: bool
+ default: false
+notes:
+ - Requires cli tools for GlusterFS on servers.
+ - Will add new bricks, but not remove them.
+author:
+- Taneli Leppä (@rosmo)
+'''
+
+EXAMPLES = """
+- name: Create gluster volume
+ community.general.gluster_volume:
+ state: present
+ name: test1
+ bricks: /bricks/brick1/g1
+ rebalance: yes
+ cluster:
+ - 192.0.2.10
+ - 192.0.2.11
+ run_once: true
+
+- name: Tune
+ community.general.gluster_volume:
+ state: present
+ name: test1
+ options:
+ performance.cache-size: 256MB
+
+- name: Set multiple options on GlusterFS volume
+ community.general.gluster_volume:
+ state: present
+ name: test1
+ options:
+ { performance.cache-size: 128MB,
+ write-behind: 'off',
+ quick-read: 'on'
+ }
+
+- name: Start gluster volume
+ community.general.gluster_volume:
+ state: started
+ name: test1
+
+- name: Limit usage
+ community.general.gluster_volume:
+ state: present
+ name: test1
+ directory: /foo
+ quota: 20.0MB
+
+- name: Stop gluster volume
+ community.general.gluster_volume:
+ state: stopped
+ name: test1
+
+- name: Remove gluster volume
+ community.general.gluster_volume:
+ state: absent
+ name: test1
+
+- name: Create gluster volume with multiple bricks
+ community.general.gluster_volume:
+ state: present
+ name: test2
+ bricks: /bricks/brick1/g2,/bricks/brick2/g2
+ cluster:
+ - 192.0.2.10
+ - 192.0.2.11
+ run_once: true
+
+- name: Remove the bricks from gluster volume
+ community.general.gluster_volume:
+ state: present
+ name: testvol
+ bricks: /bricks/brick1/b1,/bricks/brick2/b2
+ cluster:
+ - 10.70.42.85
+ force: true
+ run_once: true
+
+- name: Reduce cluster configuration
+ community.general.gluster_volume:
+ state: present
+ name: testvol
+ bricks: /bricks/brick3/b1,/bricks/brick4/b2
+ replicas: 2
+ cluster:
+ - 10.70.42.85
+ force: true
+ run_once: true
+"""
+
+import re
+import socket
+import time
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+glusterbin = ''
+
+
+def run_gluster(gargs, **kwargs):
+ global glusterbin
+ global module
+ args = [glusterbin, '--mode=script']
+ args.extend(gargs)
+ try:
+ rc, out, err = module.run_command(args, **kwargs)
+ if rc != 0:
+ module.fail_json(msg='error running gluster (%s) command (rc=%d): %s' %
+ (' '.join(args), rc, out or err), exception=traceback.format_exc())
+ except Exception as e:
+ module.fail_json(msg='error running gluster (%s) command: %s' % (' '.join(args),
+ to_native(e)), exception=traceback.format_exc())
+ return out
+
+
+def run_gluster_nofail(gargs, **kwargs):
+ global glusterbin
+ global module
+ args = [glusterbin]
+ args.extend(gargs)
+ rc, out, err = module.run_command(args, **kwargs)
+ if rc != 0:
+ return None
+ return out
+
+
+def get_peers():
+ out = run_gluster(['peer', 'status'])
+ peers = {}
+ hostname = None
+ uuid = None
+ state = None
+ shortNames = False
+ for row in out.split('\n'):
+ if ': ' in row:
+ key, value = row.split(': ')
+ if key.lower() == 'hostname':
+ hostname = value
+ shortNames = False
+ if key.lower() == 'uuid':
+ uuid = value
+ if key.lower() == 'state':
+ state = value
+ peers[hostname] = [uuid, state]
+ elif row.lower() == 'other names:':
+ shortNames = True
+ elif row != '' and shortNames is True:
+ peers[row] = [uuid, state]
+ elif row == '':
+ shortNames = False
+ return peers
+
+
+def get_volumes():
+ out = run_gluster(['volume', 'info'])
+
+ volumes = {}
+ volume = {}
+ for row in out.split('\n'):
+ if ': ' in row:
+ key, value = row.split(': ')
+ if key.lower() == 'volume name':
+ volume['name'] = value
+ volume['options'] = {}
+ volume['quota'] = False
+ if key.lower() == 'volume id':
+ volume['id'] = value
+ if key.lower() == 'status':
+ volume['status'] = value
+ if key.lower() == 'transport-type':
+ volume['transport'] = value
+ if value.lower().endswith(' (arbiter)'):
+ if 'arbiters' not in volume:
+ volume['arbiters'] = []
+ value = value[:-10]
+ volume['arbiters'].append(value)
+ elif key.lower() == 'number of bricks':
+ volume['replicas'] = value[-1:]
+ if key.lower() != 'bricks' and key.lower()[:5] == 'brick':
+ if 'bricks' not in volume:
+ volume['bricks'] = []
+ volume['bricks'].append(value)
+ # Volume options
+ if '.' in key:
+ if 'options' not in volume:
+ volume['options'] = {}
+ volume['options'][key] = value
+ if key == 'features.quota' and value == 'on':
+ volume['quota'] = True
+ else:
+ if row.lower() != 'bricks:' and row.lower() != 'options reconfigured:':
+ if len(volume) > 0:
+ volumes[volume['name']] = volume
+ volume = {}
+ return volumes
+
+
+def get_quotas(name, nofail):
+ quotas = {}
+ if nofail:
+ out = run_gluster_nofail(['volume', 'quota', name, 'list'])
+ if not out:
+ return quotas
+ else:
+ out = run_gluster(['volume', 'quota', name, 'list'])
+ for row in out.split('\n'):
+ if row[:1] == '/':
+ q = re.split(r'\s+', row)
+ quotas[q[0]] = q[1]
+ return quotas
+
+
+def wait_for_peer(host):
+ for x in range(0, 4):
+ peers = get_peers()
+ if host in peers and peers[host][1].lower().find('peer in cluster') != -1:
+ return True
+ time.sleep(1)
+ return False
+
+
+def probe(host, myhostname):
+ global module
+ out = run_gluster(['peer', 'probe', host])
+ if out.find('localhost') == -1 and not wait_for_peer(host):
+ module.fail_json(msg='failed to probe peer %s on %s' % (host, myhostname))
+
+
+def probe_all_peers(hosts, peers, myhostname):
+ for host in hosts:
+ host = host.strip() # Clean up any extra space for exact comparison
+ if host not in peers:
+ probe(host, myhostname)
+
+
+def create_volume(name, stripe, replica, arbiter, disperse, redundancy, transport, hosts, bricks, force):
+ args = ['volume', 'create']
+ args.append(name)
+ if stripe:
+ args.append('stripe')
+ args.append(str(stripe))
+ if replica:
+ args.append('replica')
+ args.append(str(replica))
+ if arbiter:
+ args.append('arbiter')
+ args.append(str(arbiter))
+ if disperse:
+ args.append('disperse')
+ args.append(str(disperse))
+ if redundancy:
+ args.append('redundancy')
+ args.append(str(redundancy))
+ args.append('transport')
+ args.append(transport)
+ for brick in bricks:
+ for host in hosts:
+ args.append(('%s:%s' % (host, brick)))
+ if force:
+ args.append('force')
+ run_gluster(args)
+
+
+def start_volume(name):
+ run_gluster(['volume', 'start', name])
+
+
+def stop_volume(name):
+ run_gluster(['volume', 'stop', name])
+
+
+def set_volume_option(name, option, parameter):
+ run_gluster(['volume', 'set', name, option, parameter])
+
+
+def add_bricks(name, new_bricks, stripe, replica, force):
+ args = ['volume', 'add-brick', name]
+ if stripe:
+ args.append('stripe')
+ args.append(str(stripe))
+ if replica:
+ args.append('replica')
+ args.append(str(replica))
+ args.extend(new_bricks)
+ if force:
+ args.append('force')
+ run_gluster(args)
+
+
+def remove_bricks(name, removed_bricks, force):
+ # max-tries=12 with default_interval=10 secs
+ max_tries = 12
+ retries = 0
+ success = False
+ args = ['volume', 'remove-brick', name]
+ args.extend(removed_bricks)
+ # create a copy of args to use for commit operation
+ args_c = args[:]
+ args.append('start')
+ run_gluster(args)
+ # remove-brick operation needs to be followed by commit operation.
+ if not force:
+ module.fail_json(msg="Force option is mandatory.")
+ else:
+ while retries < max_tries:
+ last_brick = removed_bricks[-1]
+ out = run_gluster(['volume', 'remove-brick', name, last_brick, 'status'])
+ for row in out.split('\n')[1:]:
+ if 'completed' in row:
+ # remove-brick successful, call commit operation.
+ args_c.append('commit')
+ out = run_gluster(args_c)
+ success = True
+ break
+ else:
+ time.sleep(10)
+ if success:
+ break
+ retries += 1
+ if not success:
+ # remove-brick still in process, needs to be committed after completion.
+ module.fail_json(msg="Exceeded number of tries, check remove-brick status.\n"
+ "Commit operation needs to be followed.")
+
+
+def reduce_config(name, removed_bricks, replicas, force):
+ out = run_gluster(['volume', 'heal', name, 'info'])
+ summary = out.split("\n")
+ for line in summary:
+ if 'Number' in line and int(line.split(":")[1].strip()) != 0:
+ module.fail_json(msg="Operation aborted, self-heal in progress.")
+ args = ['volume', 'remove-brick', name, 'replica', replicas]
+ args.extend(removed_bricks)
+ if force:
+ args.append('force')
+ else:
+ module.fail_json(msg="Force option is mandatory")
+ run_gluster(args)
+
+
+def do_rebalance(name):
+ run_gluster(['volume', 'rebalance', name, 'start'])
+
+
+def enable_quota(name):
+ run_gluster(['volume', 'quota', name, 'enable'])
+
+
+def set_quota(name, directory, value):
+ run_gluster(['volume', 'quota', name, 'limit-usage', directory, value])
+
+
+def main():
+ # MAIN
+
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True, aliases=['volume']),
+ state=dict(type='str', required=True, choices=['absent', 'started', 'stopped', 'present']),
+ cluster=dict(type='list'),
+ host=dict(type='str'),
+ stripes=dict(type='int'),
+ replicas=dict(type='int'),
+ arbiters=dict(type='int'),
+ disperses=dict(type='int'),
+ redundancies=dict(type='int'),
+ transport=dict(type='str', default='tcp', choices=['tcp', 'rdma', 'tcp,rdma']),
+ bricks=dict(type='str', aliases=['brick']),
+ start_on_create=dict(type='bool', default=True),
+ rebalance=dict(type='bool', default=False),
+ options=dict(type='dict', default={}),
+ quota=dict(type='str'),
+ directory=dict(type='str'),
+ force=dict(type='bool', default=False),
+ ),
+ )
+
+ global glusterbin
+ glusterbin = module.get_bin_path('gluster', True)
+
+ changed = False
+
+ action = module.params['state']
+ volume_name = module.params['name']
+ cluster = module.params['cluster']
+ brick_paths = module.params['bricks']
+ stripes = module.params['stripes']
+ replicas = module.params['replicas']
+ arbiters = module.params['arbiters']
+ disperses = module.params['disperses']
+ redundancies = module.params['redundancies']
+ transport = module.params['transport']
+ myhostname = module.params['host']
+ start_on_create = module.boolean(module.params['start_on_create'])
+ rebalance = module.boolean(module.params['rebalance'])
+ force = module.boolean(module.params['force'])
+
+ if not myhostname:
+ myhostname = socket.gethostname()
+
+ # Clean up if last element is empty. Consider that yml can look like this:
+ # cluster="{% for host in groups['glusterfs'] %}{{ hostvars[host]['private_ip'] }},{% endfor %}"
+ if cluster is not None and len(cluster) > 1 and cluster[-1] == '':
+ cluster = cluster[0:-1]
+
+ if cluster is None:
+ cluster = []
+
+ if brick_paths is not None and "," in brick_paths:
+ brick_paths = brick_paths.split(",")
+ else:
+ brick_paths = [brick_paths]
+
+ options = module.params['options']
+ quota = module.params['quota']
+ directory = module.params['directory']
+
+ # get current state info
+ peers = get_peers()
+ volumes = get_volumes()
+ quotas = {}
+ if volume_name in volumes and volumes[volume_name]['quota'] and volumes[volume_name]['status'].lower() == 'started':
+ quotas = get_quotas(volume_name, True)
+
+ # do the work!
+ if action == 'absent':
+ if volume_name in volumes:
+ if volumes[volume_name]['status'].lower() != 'stopped':
+ stop_volume(volume_name)
+ run_gluster(['volume', 'delete', volume_name])
+ changed = True
+
+ if action == 'present':
+ probe_all_peers(cluster, peers, myhostname)
+
+ # create if it doesn't exist
+ if volume_name not in volumes:
+ create_volume(volume_name, stripes, replicas, arbiters, disperses, redundancies, transport, cluster, brick_paths, force)
+ volumes = get_volumes()
+ changed = True
+
+ if volume_name in volumes:
+ if volumes[volume_name]['status'].lower() != 'started' and start_on_create:
+ start_volume(volume_name)
+ changed = True
+
+ # switch bricks
+ new_bricks = []
+ removed_bricks = []
+ all_bricks = []
+ bricks_in_volume = volumes[volume_name]['bricks']
+
+ for node in cluster:
+ for brick_path in brick_paths:
+ brick = '%s:%s' % (node, brick_path)
+ all_bricks.append(brick)
+ if brick not in bricks_in_volume:
+ new_bricks.append(brick)
+
+ if not new_bricks and len(all_bricks) > 0 and \
+ len(all_bricks) < len(bricks_in_volume):
+ for brick in bricks_in_volume:
+ if brick not in all_bricks:
+ removed_bricks.append(brick)
+
+ if new_bricks:
+ add_bricks(volume_name, new_bricks, stripes, replicas, force)
+ changed = True
+
+ if removed_bricks:
+ if replicas and int(replicas) < int(volumes[volume_name]['replicas']):
+ reduce_config(volume_name, removed_bricks, str(replicas), force)
+ else:
+ remove_bricks(volume_name, removed_bricks, force)
+ changed = True
+
+ # handle quotas
+ if quota:
+ if not volumes[volume_name]['quota']:
+ enable_quota(volume_name)
+ quotas = get_quotas(volume_name, False)
+ if directory not in quotas or quotas[directory] != quota:
+ set_quota(volume_name, directory, quota)
+ changed = True
+
+ # set options
+ for option in options.keys():
+ if option not in volumes[volume_name]['options'] or volumes[volume_name]['options'][option] != options[option]:
+ set_volume_option(volume_name, option, options[option])
+ changed = True
+
+ else:
+ module.fail_json(msg='failed to create volume %s' % volume_name)
+
+ if action != 'absent' and volume_name not in volumes:
+ module.fail_json(msg='volume not found %s' % volume_name)
+
+ if action == 'started':
+ if volumes[volume_name]['status'].lower() != 'started':
+ start_volume(volume_name)
+ changed = True
+
+ if action == 'stopped':
+ if volumes[volume_name]['status'].lower() != 'stopped':
+ stop_volume(volume_name)
+ changed = True
+
+ if changed:
+ volumes = get_volumes()
+ if rebalance:
+ do_rebalance(volume_name)
+
+ facts = {}
+ facts['glusterfs'] = {'peers': peers, 'volumes': volumes, 'quotas': quotas}
+
+ module.exit_json(changed=changed, ansible_facts=facts)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/hpe3par/ss_3par_cpg.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/hpe3par/ss_3par_cpg.py
new file mode 100644
index 00000000..04604c09
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/hpe3par/ss_3par_cpg.py
@@ -0,0 +1,295 @@
+#!/usr/bin/python
+# Copyright: (c) 2018, Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+short_description: Manage HPE StoreServ 3PAR CPG
+author:
+ - Farhan Nomani (@farhan7500)
+ - Gautham P Hegde (@gautamphegde)
+description:
+ - Create and delete CPG on HPE 3PAR.
+module: ss_3par_cpg
+options:
+ cpg_name:
+ description:
+ - Name of the CPG.
+ type: str
+ required: true
+ disk_type:
+ choices:
+ - FC
+ - NL
+ - SSD
+ description:
+ - Specifies that physical disks must have the specified device type.
+ type: str
+ domain:
+ description:
+ - Specifies the name of the domain in which the object will reside.
+ type: str
+ growth_increment:
+ description:
+ - Specifies the growth increment(in MiB, GiB or TiB) the amount of logical disk storage
+ created on each auto-grow operation.
+ type: str
+ growth_limit:
+ description:
+ - Specifies that the autogrow operation is limited to the specified
+ storage amount that sets the growth limit(in MiB, GiB or TiB).
+ type: str
+ growth_warning:
+ description:
+ - Specifies that the threshold(in MiB, GiB or TiB) of used logical disk space when exceeded
+ results in a warning alert.
+ type: str
+ high_availability:
+ choices:
+ - PORT
+ - CAGE
+ - MAG
+ description:
+ - Specifies that the layout must support the failure of one port pair,
+ one cage, or one magazine.
+ type: str
+ raid_type:
+ choices:
+ - R0
+ - R1
+ - R5
+ - R6
+ description:
+ - Specifies the RAID type for the logical disk.
+ type: str
+ set_size:
+ description:
+ - Specifies the set size in the number of chunklets.
+ type: int
+ state:
+ choices:
+ - present
+ - absent
+ description:
+ - Whether the specified CPG should exist or not.
+ required: true
+ type: str
+ secure:
+ description:
+ - Specifies whether the certificate needs to be validated while communicating.
+ type: bool
+ default: no
+extends_documentation_fragment:
+- community.general.hpe3par
+
+'''
+
+
+EXAMPLES = r'''
+- name: Create CPG sample_cpg
+ community.general.ss_3par_cpg:
+ storage_system_ip: 10.10.10.1
+ storage_system_username: username
+ storage_system_password: password
+ state: present
+ cpg_name: sample_cpg
+ domain: sample_domain
+ growth_increment: 32000 MiB
+ growth_limit: 64000 MiB
+ growth_warning: 48000 MiB
+ raid_type: R6
+ set_size: 8
+ high_availability: MAG
+ disk_type: FC
+ secure: no
+
+- name: Delete CPG sample_cpg
+ community.general.ss_3par_cpg:
+ storage_system_ip: 10.10.10.1
+ storage_system_username: username
+ storage_system_password: password
+ state: absent
+ cpg_name: sample_cpg
+ secure: no
+'''
+
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.storage.hpe3par import hpe3par
+try:
+ from hpe3par_sdk import client
+ from hpe3parclient import exceptions
+ HAS_3PARCLIENT = True
+except ImportError:
+ HAS_3PARCLIENT = False
+
+
+def validate_set_size(raid_type, set_size):
+ if raid_type:
+ set_size_array = client.HPE3ParClient.RAID_MAP[raid_type]['set_sizes']
+ if set_size in set_size_array:
+ return True
+ return False
+
+
+def cpg_ldlayout_map(ldlayout_dict):
+ if ldlayout_dict['RAIDType'] is not None and ldlayout_dict['RAIDType']:
+ ldlayout_dict['RAIDType'] = client.HPE3ParClient.RAID_MAP[
+ ldlayout_dict['RAIDType']]['raid_value']
+ if ldlayout_dict['HA'] is not None and ldlayout_dict['HA']:
+ ldlayout_dict['HA'] = getattr(
+ client.HPE3ParClient, ldlayout_dict['HA'])
+ return ldlayout_dict
+
+
+def create_cpg(
+ client_obj,
+ cpg_name,
+ domain,
+ growth_increment,
+ growth_limit,
+ growth_warning,
+ raid_type,
+ set_size,
+ high_availability,
+ disk_type):
+ try:
+ if not validate_set_size(raid_type, set_size):
+ return (False, False, "Set size %s not part of RAID set %s" % (set_size, raid_type))
+ if not client_obj.cpgExists(cpg_name):
+
+ disk_patterns = []
+ if disk_type:
+ disk_type = getattr(client.HPE3ParClient, disk_type)
+ disk_patterns = [{'diskType': disk_type}]
+ ld_layout = {
+ 'RAIDType': raid_type,
+ 'setSize': set_size,
+ 'HA': high_availability,
+ 'diskPatterns': disk_patterns}
+ ld_layout = cpg_ldlayout_map(ld_layout)
+ if growth_increment is not None:
+ growth_increment = hpe3par.convert_to_binary_multiple(
+ growth_increment)
+ if growth_limit is not None:
+ growth_limit = hpe3par.convert_to_binary_multiple(
+ growth_limit)
+ if growth_warning is not None:
+ growth_warning = hpe3par.convert_to_binary_multiple(
+ growth_warning)
+ optional = {
+ 'domain': domain,
+ 'growthIncrementMiB': growth_increment,
+ 'growthLimitMiB': growth_limit,
+ 'usedLDWarningAlertMiB': growth_warning,
+ 'LDLayout': ld_layout}
+ client_obj.createCPG(cpg_name, optional)
+ else:
+ return (True, False, "CPG already present")
+ except exceptions.ClientException as e:
+ return (False, False, "CPG creation failed | %s" % (e))
+ return (True, True, "Created CPG %s successfully." % cpg_name)
+
+
+def delete_cpg(
+ client_obj,
+ cpg_name):
+ try:
+ if client_obj.cpgExists(cpg_name):
+ client_obj.deleteCPG(cpg_name)
+ else:
+ return (True, False, "CPG does not exist")
+ except exceptions.ClientException as e:
+ return (False, False, "CPG delete failed | %s" % e)
+ return (True, True, "Deleted CPG %s successfully." % cpg_name)
+
+
+def main():
+ module = AnsibleModule(argument_spec=hpe3par.cpg_argument_spec(),
+ required_together=[['raid_type', 'set_size']])
+ if not HAS_3PARCLIENT:
+ module.fail_json(msg='the python hpe3par_sdk library is required (https://pypi.org/project/hpe3par_sdk)')
+
+ if len(module.params["cpg_name"]) < 1 or len(module.params["cpg_name"]) > 31:
+ module.fail_json(msg="CPG name must be at least 1 character and not more than 31 characters")
+
+ storage_system_ip = module.params["storage_system_ip"]
+ storage_system_username = module.params["storage_system_username"]
+ storage_system_password = module.params["storage_system_password"]
+ cpg_name = module.params["cpg_name"]
+ domain = module.params["domain"]
+ growth_increment = module.params["growth_increment"]
+ growth_limit = module.params["growth_limit"]
+ growth_warning = module.params["growth_warning"]
+ raid_type = module.params["raid_type"]
+ set_size = module.params["set_size"]
+ high_availability = module.params["high_availability"]
+ disk_type = module.params["disk_type"]
+ secure = module.params["secure"]
+
+ wsapi_url = 'https://%s:8080/api/v1' % storage_system_ip
+ try:
+ client_obj = client.HPE3ParClient(wsapi_url, secure)
+ except exceptions.SSLCertFailed:
+ module.fail_json(msg="SSL Certificate Failed")
+ except exceptions.ConnectionError:
+ module.fail_json(msg="Connection Error")
+ except exceptions.UnsupportedVersion:
+ module.fail_json(msg="Unsupported WSAPI version")
+ except Exception as e:
+ module.fail_json(msg="Initializing client failed. %s" % e)
+
+ if storage_system_username is None or storage_system_password is None:
+ module.fail_json(msg="Storage system username or password is None")
+ if cpg_name is None:
+ module.fail_json(msg="CPG Name is None")
+
+ # States
+ if module.params["state"] == "present":
+ try:
+ client_obj.login(storage_system_username, storage_system_password)
+ return_status, changed, msg = create_cpg(
+ client_obj,
+ cpg_name,
+ domain,
+ growth_increment,
+ growth_limit,
+ growth_warning,
+ raid_type,
+ set_size,
+ high_availability,
+ disk_type
+ )
+ except Exception as e:
+ module.fail_json(msg="CPG create failed | %s" % e)
+ finally:
+ client_obj.logout()
+
+ elif module.params["state"] == "absent":
+ try:
+ client_obj.login(storage_system_username, storage_system_password)
+ return_status, changed, msg = delete_cpg(
+ client_obj,
+ cpg_name
+ )
+ except Exception as e:
+ module.fail_json(msg="CPG create failed | %s" % e)
+ finally:
+ client_obj.logout()
+
+ if return_status:
+ module.exit_json(changed=changed, msg=msg)
+ else:
+ module.fail_json(msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_domain.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_domain.py
new file mode 100644
index 00000000..29690497
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_domain.py
@@ -0,0 +1,156 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, IBM CORPORATION
+# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_sa_domain
+short_description: Manages domains on IBM Spectrum Accelerate Family storage systems
+
+description:
+ - "This module can be used to add domains to or removes them from IBM Spectrum Accelerate Family storage systems."
+
+options:
+ domain:
+ description:
+ - Name of the domain to be managed.
+ required: true
+ state:
+ description:
+ - The desired state of the domain.
+ default: "present"
+ choices: [ "present", "absent" ]
+ ldap_id:
+ description:
+ - ldap id to add to the domain.
+ required: false
+ size:
+ description:
+ - Size of the domain.
+ required: false
+ hard_capacity:
+ description:
+ - Hard capacity of the domain.
+ required: false
+ soft_capacity:
+ description:
+ - Soft capacity of the domain.
+ required: false
+ max_cgs:
+ description:
+ - Number of max cgs.
+ required: false
+ max_dms:
+ description:
+ - Number of max dms.
+ required: false
+ max_mirrors:
+ description:
+ - Number of max_mirrors.
+ required: false
+ max_pools:
+ description:
+ - Number of max_pools.
+ required: false
+ max_volumes:
+ description:
+ - Number of max_volumes.
+ required: false
+ perf_class:
+ description:
+ - Add the domain to a performance class.
+ required: false
+
+extends_documentation_fragment:
+- community.general.ibm_storage
+
+
+author:
+ - Tzur Eliyahu (@tzure)
+'''
+
+EXAMPLES = '''
+- name: Define new domain.
+ community.general.ibm_sa_domain:
+ domain: domain_name
+ size: domain_size
+ state: present
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+
+- name: Delete domain.
+ community.general.ibm_sa_domain:
+ domain: domain_name
+ state: absent
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+'''
+RETURN = '''
+msg:
+ description: module return status.
+ returned: as needed
+ type: str
+ sample: "domain 'domain_name' created successfully."
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \
+ connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed
+
+
+def main():
+ argument_spec = spectrum_accelerate_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ domain=dict(required=True),
+ size=dict(),
+ max_dms=dict(),
+ max_cgs=dict(),
+ ldap_id=dict(),
+ max_mirrors=dict(),
+ max_pools=dict(),
+ max_volumes=dict(),
+ perf_class=dict(),
+ hard_capacity=dict(),
+ soft_capacity=dict()
+ )
+ )
+
+ module = AnsibleModule(argument_spec)
+
+ is_pyxcli_installed(module)
+
+ xcli_client = connect_ssl(module)
+ domain = xcli_client.cmd.domain_list(
+ domain=module.params['domain']).as_single_element
+ state = module.params['state']
+
+ state_changed = False
+ msg = 'Domain \'{0}\''.format(module.params['domain'])
+ if state == 'present' and not domain:
+ state_changed = execute_pyxcli_command(
+ module, 'domain_create', xcli_client)
+ msg += " created successfully."
+ elif state == 'absent' and domain:
+ state_changed = execute_pyxcli_command(
+ module, 'domain_delete', xcli_client)
+ msg += " deleted successfully."
+ else:
+ msg += " state unchanged."
+
+ module.exit_json(changed=state_changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_host.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_host.py
new file mode 100644
index 00000000..5ce12992
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_host.py
@@ -0,0 +1,118 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2018 IBM CORPORATION
+# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_sa_host
+short_description: Adds hosts to or removes them from IBM Spectrum Accelerate Family storage systems.
+
+description:
+ - "This module adds hosts to or removes them from IBM Spectrum Accelerate Family storage systems."
+
+options:
+ host:
+ description:
+ - Host name.
+ required: true
+ state:
+ description:
+ - Host state.
+ default: "present"
+ choices: [ "present", "absent" ]
+ cluster:
+ description:
+ - The name of the cluster to include the host.
+ required: false
+ domain:
+ description:
+ - The domains the cluster will be attached to.
+ To include more than one domain,
+ separate domain names with commas.
+ To include all existing domains, use an asterisk ("*").
+ required: false
+ iscsi_chap_name:
+ description:
+ - The host's CHAP name identifier
+ required: false
+ iscsi_chap_secret:
+ description:
+ - The password of the initiator used to
+ authenticate to the system when CHAP is enable
+ required: false
+
+extends_documentation_fragment:
+- community.general.ibm_storage
+
+
+author:
+ - Tzur Eliyahu (@tzure)
+'''
+
+EXAMPLES = '''
+- name: Define new host.
+ community.general.ibm_sa_host:
+ host: host_name
+ state: present
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+
+- name: Delete host.
+ community.general.ibm_sa_host:
+ host: host_name
+ state: absent
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+'''
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \
+ connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed
+
+
+def main():
+ argument_spec = spectrum_accelerate_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ host=dict(required=True),
+ cluster=dict(),
+ domain=dict(),
+ iscsi_chap_name=dict(),
+ iscsi_chap_secret=dict(no_log=True),
+ )
+ )
+
+ module = AnsibleModule(argument_spec)
+
+ is_pyxcli_installed(module)
+
+ xcli_client = connect_ssl(module)
+ host = xcli_client.cmd.host_list(
+ host=module.params['host']).as_single_element
+ state = module.params['state']
+
+ state_changed = False
+ if state == 'present' and not host:
+ state_changed = execute_pyxcli_command(
+ module, 'host_define', xcli_client)
+ elif state == 'absent' and host:
+ state_changed = execute_pyxcli_command(
+ module, 'host_delete', xcli_client)
+
+ module.exit_json(changed=state_changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_host_ports.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_host_ports.py
new file mode 100644
index 00000000..981bc553
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_host_ports.py
@@ -0,0 +1,127 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2018 IBM CORPORATION
+# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_sa_host_ports
+short_description: Add host ports on IBM Spectrum Accelerate Family storage systems.
+
+description:
+ - "This module adds ports to or removes them from the hosts
+ on IBM Spectrum Accelerate Family storage systems."
+
+options:
+ host:
+ description:
+ - Host name.
+ required: true
+ state:
+ description:
+ - Host ports state.
+ default: "present"
+ choices: [ "present", "absent" ]
+ iscsi_name:
+ description:
+ - iSCSI initiator name.
+ required: false
+ fcaddress:
+ description:
+ - Fiber channel address.
+ required: false
+ num_of_visible_targets:
+ description:
+ - Number of visible targets.
+ required: false
+
+extends_documentation_fragment:
+- community.general.ibm_storage
+
+
+author:
+ - Tzur Eliyahu (@tzure)
+'''
+
+EXAMPLES = '''
+- name: Add ports for host.
+ community.general.ibm_sa_host_ports:
+ host: test_host
+ iscsi_name: iqn.1994-05.com***
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+ state: present
+
+- name: Remove ports for host.
+ community.general.ibm_sa_host_ports:
+ host: test_host
+ iscsi_name: iqn.1994-05.com***
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+ state: absent
+
+'''
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import (execute_pyxcli_command, connect_ssl,
+ spectrum_accelerate_spec, is_pyxcli_installed)
+
+
+def main():
+ argument_spec = spectrum_accelerate_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ host=dict(required=True),
+ iscsi_name=dict(),
+ fcaddress=dict(),
+ num_of_visible_targets=dict()
+ )
+ )
+
+ module = AnsibleModule(argument_spec)
+ is_pyxcli_installed(module)
+
+ xcli_client = connect_ssl(module)
+ # required args
+ ports = []
+ try:
+ ports = xcli_client.cmd.host_list_ports(
+ host=module.params.get('host')).as_list
+ except Exception:
+ pass
+ state = module.params['state']
+ port_exists = False
+ ports = [port.get('port_name') for port in ports]
+
+ fc_ports = (module.params.get('fcaddress')
+ if module.params.get('fcaddress') else [])
+ iscsi_ports = (module.params.get('iscsi_name')
+ if module.params.get('iscsi_name') else [])
+ for port in ports:
+ if port in iscsi_ports or port in fc_ports:
+ port_exists = True
+ break
+ state_changed = False
+ if state == 'present' and not port_exists:
+ state_changed = execute_pyxcli_command(
+ module, 'host_add_port', xcli_client)
+ if state == 'absent' and port_exists:
+ state_changed = execute_pyxcli_command(
+ module, 'host_remove_port', xcli_client)
+
+ module.exit_json(changed=state_changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_pool.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_pool.py
new file mode 100644
index 00000000..812904eb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_pool.py
@@ -0,0 +1,115 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2018 IBM CORPORATION
+# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_sa_pool
+short_description: Handles pools on IBM Spectrum Accelerate Family storage systems.
+
+description:
+ - "This module creates or deletes pools to be used on IBM Spectrum Accelerate Family storage systems"
+
+options:
+ pool:
+ description:
+ - Pool name.
+ required: true
+ state:
+ description:
+ - Pool state.
+ default: "present"
+ choices: [ "present", "absent" ]
+ size:
+ description:
+ - Pool size in GB
+ required: false
+ snapshot_size:
+ description:
+ - Pool snapshot size in GB
+ required: false
+ domain:
+ description:
+ - Adds the pool to the specified domain.
+ required: false
+ perf_class:
+ description:
+ - Assigns a perf_class to the pool.
+ required: false
+
+extends_documentation_fragment:
+- community.general.ibm_storage
+
+
+author:
+ - Tzur Eliyahu (@tzure)
+'''
+
+EXAMPLES = '''
+- name: Create new pool.
+ community.general.ibm_sa_pool:
+ name: pool_name
+ size: 300
+ state: present
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+
+- name: Delete pool.
+ community.general.ibm_sa_pool:
+ name: pool_name
+ state: absent
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+'''
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \
+ connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed
+
+
+def main():
+ argument_spec = spectrum_accelerate_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ pool=dict(required=True),
+ size=dict(),
+ snapshot_size=dict(),
+ domain=dict(),
+ perf_class=dict()
+ )
+ )
+
+ module = AnsibleModule(argument_spec)
+
+ is_pyxcli_installed(module)
+
+ xcli_client = connect_ssl(module)
+ pool = xcli_client.cmd.pool_list(
+ pool=module.params['pool']).as_single_element
+ state = module.params['state']
+
+ state_changed = False
+ if state == 'present' and not pool:
+ state_changed = execute_pyxcli_command(
+ module, 'pool_create', xcli_client)
+ if state == 'absent' and pool:
+ state_changed = execute_pyxcli_command(
+ module, 'pool_delete', xcli_client)
+
+ module.exit_json(changed=state_changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_vol.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_vol.py
new file mode 100644
index 00000000..bf578cee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_vol.py
@@ -0,0 +1,107 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2018 IBM CORPORATION
+# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_sa_vol
+short_description: Handle volumes on IBM Spectrum Accelerate Family storage systems.
+
+description:
+ - "This module creates or deletes volumes to be used on IBM Spectrum Accelerate Family storage systems."
+
+options:
+ vol:
+ description:
+ - Volume name.
+ required: true
+ pool:
+ description:
+ - Volume pool.
+ required: false
+ state:
+ description:
+ - Volume state.
+ default: "present"
+ choices: [ "present", "absent" ]
+ size:
+ description:
+ - Volume size.
+ required: false
+
+extends_documentation_fragment:
+- community.general.ibm_storage
+
+
+author:
+ - Tzur Eliyahu (@tzure)
+'''
+
+EXAMPLES = '''
+- name: Create a new volume.
+ community.general.ibm_sa_vol:
+ vol: volume_name
+ pool: pool_name
+ size: 17
+ state: present
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+
+- name: Delete an existing volume.
+ community.general.ibm_sa_vol:
+ vol: volume_name
+ state: absent
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+'''
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \
+ connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed
+
+
+def main():
+ argument_spec = spectrum_accelerate_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ vol=dict(required=True),
+ pool=dict(),
+ size=dict()
+ )
+ )
+
+ module = AnsibleModule(argument_spec)
+
+ is_pyxcli_installed(module)
+
+ xcli_client = connect_ssl(module)
+ # required args
+ volume = xcli_client.cmd.vol_list(
+ vol=module.params.get('vol')).as_single_element
+ state = module.params['state']
+
+ state_changed = False
+ if state == 'present' and not volume:
+ state_changed = execute_pyxcli_command(
+ module, 'vol_create', xcli_client)
+ elif state == 'absent' and volume:
+ state_changed = execute_pyxcli_command(
+ module, 'vol_delete', xcli_client)
+
+ module.exit_json(changed=state_changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_vol_map.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_vol_map.py
new file mode 100644
index 00000000..f1f5a807
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/ibm/ibm_sa_vol_map.py
@@ -0,0 +1,136 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2018 IBM CORPORATION
+# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
+#
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_sa_vol_map
+short_description: Handles volume mapping on IBM Spectrum Accelerate Family storage systems.
+
+description:
+ - "This module maps volumes to or unmaps them from the hosts on
+ IBM Spectrum Accelerate Family storage systems."
+
+options:
+ vol:
+ description:
+ - Volume name.
+ required: true
+ state:
+ default: "present"
+ choices: [ "present", "absent" ]
+ description:
+ - When the state is present the volume is mapped.
+ When the state is absent, the volume is meant to be unmapped.
+
+ cluster:
+ description:
+ - Maps the volume to a cluster.
+ required: false
+ host:
+ description:
+ - Maps the volume to a host.
+ required: false
+ lun:
+ description:
+ - The LUN identifier.
+ required: false
+ override:
+ description:
+ - Overrides the existing volume mapping.
+ required: false
+
+extends_documentation_fragment:
+- community.general.ibm_storage
+
+
+author:
+ - Tzur Eliyahu (@tzure)
+'''
+
+EXAMPLES = '''
+- name: Map volume to host.
+ community.general.ibm_sa_vol_map:
+ vol: volume_name
+ lun: 1
+ host: host_name
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+ state: present
+
+- name: Map volume to cluster.
+ community.general.ibm_sa_vol_map:
+ vol: volume_name
+ lun: 1
+ cluster: cluster_name
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+ state: present
+
+- name: Unmap volume.
+ community.general.ibm_sa_vol_map:
+ host: host_name
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+ state: absent
+'''
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import (execute_pyxcli_command,
+ connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed)
+
+
+def main():
+ argument_spec = spectrum_accelerate_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ vol=dict(required=True),
+ lun=dict(),
+ cluster=dict(),
+ host=dict(),
+ override=dict()
+ )
+ )
+
+ module = AnsibleModule(argument_spec)
+ is_pyxcli_installed(module)
+
+ xcli_client = connect_ssl(module)
+ # required args
+ mapping = False
+ try:
+ mapped_hosts = xcli_client.cmd.vol_mapping_list(
+ vol=module.params.get('vol')).as_list
+ for host in mapped_hosts:
+ if host['host'] == module.params.get("host", ""):
+ mapping = True
+ except Exception:
+ pass
+ state = module.params['state']
+
+ state_changed = False
+ if state == 'present' and not mapping:
+ state_changed = execute_pyxcli_command(module, 'map_vol', xcli_client)
+ if state == 'absent' and mapping:
+ state_changed = execute_pyxcli_command(
+ module, 'unmap_vol', xcli_client)
+
+ module.exit_json(changed=state_changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_aggregate.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_aggregate.py
new file mode 100644
index 00000000..f82bd7ea
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_aggregate.py
@@ -0,0 +1,228 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: na_cdot_aggregate
+
+short_description: Manage NetApp cDOT aggregates.
+extends_documentation_fragment:
+- community.general._netapp.ontap
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: Updated modules released with increased functionality
+ alternative: Use M(netapp.ontap.na_ontap_aggregate) instead.
+
+description:
+- Create or destroy aggregates on NetApp cDOT.
+
+options:
+
+ state:
+ required: true
+ description:
+ - Whether the specified aggregate should exist or not.
+ choices: ['present', 'absent']
+
+ name:
+ required: true
+ description:
+ - The name of the aggregate to manage.
+
+ disk_count:
+ description:
+ - Number of disks to place into the aggregate, including parity disks.
+ - The disks in this newly-created aggregate come from the spare disk pool.
+ - The smallest disks in this pool join the aggregate first, unless the C(disk-size) argument is provided.
+ - Either C(disk-count) or C(disks) must be supplied. Range [0..2^31-1].
+ - Required when C(state=present).
+
+'''
+
+EXAMPLES = """
+- name: Manage Aggregates
+ community.general.na_cdot_aggregate:
+ state: present
+ name: ansibleAggr
+ disk_count: 1
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Manage Aggregates
+ community.general.na_cdot_aggregate:
+ state: present
+ name: ansibleAggr
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppCDOTAggregate(object):
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+ disk_count=dict(required=False, type='int'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['disk_count'])
+ ],
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.name = p['name']
+ self.disk_count = p['disk_count']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_ontap_zapi(module=self.module)
+
+ def get_aggr(self):
+ """
+ Checks if aggregate exists.
+
+ :return:
+ True if aggregate found
+ False if aggregate is not found
+ :rtype: bool
+ """
+
+ aggr_get_iter = netapp_utils.zapi.NaElement('aggr-get-iter')
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'aggr-attributes', **{'aggregate-name': self.name})
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ aggr_get_iter.add_child_elem(query)
+
+ try:
+ result = self.server.invoke_successfully(aggr_get_iter,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ # Error 13040 denotes an aggregate not being found.
+ if to_native(e.code) == "13040":
+ return False
+ else:
+ self.module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ if (result.get_child_by_name('num-records') and
+ int(result.get_child_content('num-records')) >= 1):
+ return True
+ else:
+ return False
+
+ def create_aggr(self):
+ aggr_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'aggr-create', **{'aggregate': self.name,
+ 'disk-count': str(self.disk_count)})
+
+ try:
+ self.server.invoke_successfully(aggr_create,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error provisioning aggregate %s: %s" % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def delete_aggr(self):
+ aggr_destroy = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'aggr-destroy', **{'aggregate': self.name})
+
+ try:
+ self.server.invoke_successfully(aggr_destroy,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error removing aggregate %s: %s" % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def rename_aggregate(self):
+ aggr_rename = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'aggr-rename', **{'aggregate': self.name,
+ 'new-aggregate-name':
+ self.name})
+
+ try:
+ self.server.invoke_successfully(aggr_rename,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error renaming aggregate %s: %s" % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ changed = False
+ aggregate_exists = self.get_aggr()
+ rename_aggregate = False
+
+ # check if anything needs to be changed (add/delete/update)
+
+ if aggregate_exists:
+ if self.state == 'absent':
+ changed = True
+
+ elif self.state == 'present':
+ if self.name is not None and not self.name == self.name:
+ rename_aggregate = True
+ changed = True
+
+ else:
+ if self.state == 'present':
+ # Aggregate does not exist, but requested state is present.
+ changed = True
+
+ if changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ if not aggregate_exists:
+ self.create_aggr()
+
+ else:
+ if rename_aggregate:
+ self.rename_aggregate()
+
+ elif self.state == 'absent':
+ self.delete_aggr()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = NetAppCDOTAggregate()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_license.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_license.py
new file mode 100644
index 00000000..36c5416a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_license.py
@@ -0,0 +1,296 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: na_cdot_license
+
+short_description: Manage NetApp cDOT protocol and feature licenses
+extends_documentation_fragment:
+- community.general._netapp.ontap
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: Updated modules released with increased functionality
+ alternative: Use M(netapp.ontap.na_ontap_license) instead.
+
+description:
+- Add or remove licenses on NetApp ONTAP.
+
+options:
+
+ remove_unused:
+ description:
+ - Remove licenses that have no controller affiliation in the cluster.
+ type: bool
+ default: false
+
+ remove_expired:
+ description:
+ - Remove licenses that have expired in the cluster.
+ type: bool
+ default: false
+
+ serial_number:
+ description:
+ - Serial number of the node associated with the license.
+ - This parameter is used primarily when removing license for a specific service.
+ - If this parameter is not provided, the cluster serial number is used by default.
+
+ licenses:
+ description:
+ - List of licenses to add or remove.
+ - Please note that trying to remove a non-existent license will throw an error.
+ suboptions:
+ base:
+ description:
+ - Cluster Base License
+ nfs:
+ description:
+ - NFS License
+ cifs:
+ description:
+ - CIFS License
+ iscsi:
+ description:
+ - iSCSI License
+ fcp:
+ description:
+ - FCP License
+ cdmi:
+ description:
+ - CDMI License
+ snaprestore:
+ description:
+ - SnapRestore License
+ snapmirror:
+ description:
+ - SnapMirror License
+ flexclone:
+ description:
+ - FlexClone License
+ snapvault:
+ description:
+ - SnapVault License
+ snaplock:
+ description:
+ - SnapLock License
+ snapmanagersuite:
+ description:
+ - SnapManagerSuite License
+ snapprotectapps:
+ description:
+ - SnapProtectApp License
+ v_storageattach:
+ description:
+ - Virtual Attached Storage License
+
+'''
+
+
+EXAMPLES = """
+- name: Add licenses
+ community.general.na_cdot_license:
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ serial_number: #################
+ licenses:
+ nfs: #################
+ cifs: #################
+ iscsi: #################
+ fcp: #################
+ snaprestore: #################
+ flexclone: #################
+
+- name: Remove licenses
+ community.general.na_cdot_license:
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ remove_unused: false
+ remove_expired: true
+ serial_number: #################
+ licenses:
+ nfs: remove
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppCDOTLicense(object):
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ serial_number=dict(required=False, type='str', default=None),
+ remove_unused=dict(default=False, type='bool'),
+ remove_expired=dict(default=False, type='bool'),
+ licenses=dict(default=False, type='dict'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=False
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.serial_number = p['serial_number']
+ self.remove_unused = p['remove_unused']
+ self.remove_expired = p['remove_expired']
+ self.licenses = p['licenses']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_ontap_zapi(module=self.module)
+
+ def get_licensing_status(self):
+ """
+ Check licensing status
+
+ :return: package (key) and licensing status (value)
+ :rtype: dict
+ """
+ license_status = netapp_utils.zapi.NaElement('license-v2-status-list-info')
+ result = None
+ try:
+ result = self.server.invoke_successfully(license_status,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error checking license status: %s" %
+ to_native(e), exception=traceback.format_exc())
+
+ return_dictionary = {}
+ license_v2_status = result.get_child_by_name('license-v2-status')
+ if license_v2_status:
+ for license_v2_status_info in license_v2_status.get_children():
+ package = license_v2_status_info.get_child_content('package')
+ status = license_v2_status_info.get_child_content('method')
+ return_dictionary[package] = status
+
+ return return_dictionary
+
+ def remove_licenses(self, remove_list):
+ """
+ Remove requested licenses
+ :param:
+ remove_list : List of packages to remove
+
+ """
+ license_delete = netapp_utils.zapi.NaElement('license-v2-delete')
+ for package in remove_list:
+ license_delete.add_new_child('package', package)
+
+ if self.serial_number is not None:
+ license_delete.add_new_child('serial-number', self.serial_number)
+
+ try:
+ self.server.invoke_successfully(license_delete,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error removing license %s" %
+ to_native(e), exception=traceback.format_exc())
+
+ def remove_unused_licenses(self):
+ """
+ Remove unused licenses
+ """
+ remove_unused = netapp_utils.zapi.NaElement('license-v2-delete-unused')
+ try:
+ self.server.invoke_successfully(remove_unused,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error removing unused licenses: %s" %
+ to_native(e), exception=traceback.format_exc())
+
+ def remove_expired_licenses(self):
+ """
+ Remove expired licenses
+ """
+ remove_expired = netapp_utils.zapi.NaElement('license-v2-delete-expired')
+ try:
+ self.server.invoke_successfully(remove_expired,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error removing expired licenses: %s" %
+ to_native(e), exception=traceback.format_exc())
+
+ def update_licenses(self):
+ """
+ Update licenses
+ """
+ # Remove unused and expired licenses, if requested.
+ if self.remove_unused:
+ self.remove_unused_licenses()
+
+ if self.remove_expired:
+ self.remove_expired_licenses()
+
+ # Next, add/remove specific requested licenses.
+ license_add = netapp_utils.zapi.NaElement('license-v2-add')
+ codes = netapp_utils.zapi.NaElement('codes')
+ remove_list = []
+ for key, value in self.licenses.items():
+ str_value = str(value)
+ # Make sure license is not an empty string.
+ if str_value and str_value.strip():
+ if str_value.lower() == 'remove':
+ remove_list.append(str(key).lower())
+ else:
+ codes.add_new_child('license-code-v2', str_value)
+
+ # Remove requested licenses.
+ if len(remove_list) != 0:
+ self.remove_licenses(remove_list)
+
+ # Add requested licenses
+ if len(codes.get_children()) != 0:
+ license_add.add_child_elem(codes)
+ try:
+ self.server.invoke_successfully(license_add,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error adding licenses: %s" %
+ to_native(e), exception=traceback.format_exc())
+
+ def apply(self):
+ changed = False
+ # Add / Update licenses.
+ license_status = self.get_licensing_status()
+ self.update_licenses()
+ new_license_status = self.get_licensing_status()
+
+ if license_status != new_license_status:
+ changed = True
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = NetAppCDOTLicense()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_lun.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_lun.py
new file mode 100644
index 00000000..3236dbee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_lun.py
@@ -0,0 +1,373 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: na_cdot_lun
+
+short_description: Manage NetApp cDOT luns
+extends_documentation_fragment:
+- community.general._netapp.ontap
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: Updated modules released with increased functionality
+ alternative: Use M(netapp.ontap.na_ontap_lun) instead.
+
+description:
+- Create, destroy, resize luns on NetApp cDOT.
+
+options:
+
+ state:
+ description:
+ - Whether the specified lun should exist or not.
+ required: true
+ choices: ['present', 'absent']
+
+ name:
+ description:
+ - The name of the lun to manage.
+ required: true
+
+ flexvol_name:
+ description:
+ - The name of the FlexVol the lun should exist on.
+ - Required when C(state=present).
+
+ size:
+ description:
+ - The size of the lun in C(size_unit).
+ - Required when C(state=present).
+
+ size_unit:
+ description:
+ - The unit used to interpret the size parameter.
+ choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
+ default: 'gb'
+
+ force_resize:
+ description:
+ - Forcibly reduce the size. This is required for reducing the size of the LUN to avoid accidentally reducing the LUN size.
+ default: false
+
+ force_remove:
+ description:
+ - If "true", override checks that prevent a LUN from being destroyed if it is online and mapped.
+ - If "false", destroying an online and mapped LUN will fail.
+ default: false
+
+ force_remove_fenced:
+ description:
+ - If "true", override checks that prevent a LUN from being destroyed while it is fenced.
+ - If "false", attempting to destroy a fenced LUN will fail.
+ - The default if not specified is "false". This field is available in Data ONTAP 8.2 and later.
+ default: false
+
+ vserver:
+ required: true
+ description:
+ - The name of the vserver to use.
+
+'''
+
+EXAMPLES = """
+- name: Create LUN
+ community.general.na_cdot_lun:
+ state: present
+ name: ansibleLUN
+ flexvol_name: ansibleVolume
+ vserver: ansibleVServer
+ size: 5
+ size_unit: mb
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Resize Lun
+ community.general.na_cdot_lun:
+ state: present
+ name: ansibleLUN
+ force_resize: True
+ flexvol_name: ansibleVolume
+ vserver: ansibleVServer
+ size: 5
+ size_unit: gb
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppCDOTLUN(object):
+
+ def __init__(self):
+
+ self._size_unit_map = dict(
+ bytes=1,
+ b=1,
+ kb=1024,
+ mb=1024 ** 2,
+ gb=1024 ** 3,
+ tb=1024 ** 4,
+ pb=1024 ** 5,
+ eb=1024 ** 6,
+ zb=1024 ** 7,
+ yb=1024 ** 8
+ )
+
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+ size=dict(type='int'),
+ size_unit=dict(default='gb',
+ choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb',
+ 'pb', 'eb', 'zb', 'yb'], type='str'),
+ force_resize=dict(default=False, type='bool'),
+ force_remove=dict(default=False, type='bool'),
+ force_remove_fenced=dict(default=False, type='bool'),
+ flexvol_name=dict(type='str'),
+ vserver=dict(required=True, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['flexvol_name', 'size'])
+ ],
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.name = p['name']
+ self.size_unit = p['size_unit']
+ if p['size'] is not None:
+ self.size = p['size'] * self._size_unit_map[self.size_unit]
+ else:
+ self.size = None
+ self.force_resize = p['force_resize']
+ self.force_remove = p['force_remove']
+ self.force_remove_fenced = p['force_remove_fenced']
+ self.flexvol_name = p['flexvol_name']
+ self.vserver = p['vserver']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_ontap_zapi(module=self.module, vserver=self.vserver)
+
+ def get_lun(self):
+ """
+ Return details about the LUN
+
+ :return: Details about the lun
+ :rtype: dict
+ """
+
+ luns = []
+ tag = None
+ while True:
+ lun_info = netapp_utils.zapi.NaElement('lun-get-iter')
+ if tag:
+ lun_info.add_new_child('tag', tag, True)
+
+ query_details = netapp_utils.zapi.NaElement('lun-info')
+ query_details.add_new_child('vserver', self.vserver)
+ query_details.add_new_child('volume', self.flexvol_name)
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+
+ lun_info.add_child_elem(query)
+
+ result = self.server.invoke_successfully(lun_info, True)
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ attr_list = result.get_child_by_name('attributes-list')
+ luns.extend(attr_list.get_children())
+
+ tag = result.get_child_content('next-tag')
+
+ if tag is None:
+ break
+
+ # The LUNs have been extracted.
+ # Find the specified lun and extract details.
+ return_value = None
+ for lun in luns:
+ path = lun.get_child_content('path')
+ _rest, _splitter, found_name = path.rpartition('/')
+
+ if found_name == self.name:
+ size = lun.get_child_content('size')
+
+ # Find out if the lun is attached
+ attached_to = None
+ lun_id = None
+ if lun.get_child_content('mapped') == 'true':
+ lun_map_list = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'lun-map-list-info', **{'path': path})
+
+ result = self.server.invoke_successfully(
+ lun_map_list, enable_tunneling=True)
+
+ igroups = result.get_child_by_name('initiator-groups')
+ if igroups:
+ for igroup_info in igroups.get_children():
+ igroup = igroup_info.get_child_content(
+ 'initiator-group-name')
+ attached_to = igroup
+ lun_id = igroup_info.get_child_content('lun-id')
+
+ return_value = {
+ 'name': found_name,
+ 'size': size,
+ 'attached_to': attached_to,
+ 'lun_id': lun_id
+ }
+ else:
+ continue
+
+ return return_value
+
+ def create_lun(self):
+ """
+ Create LUN with requested name and size
+ """
+ path = '/vol/%s/%s' % (self.flexvol_name, self.name)
+ lun_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'lun-create-by-size', **{'path': path,
+ 'size': str(self.size),
+ 'ostype': 'linux'})
+
+ try:
+ self.server.invoke_successfully(lun_create, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error provisioning lun %s of size %s: %s" % (self.name, self.size, to_native(e)),
+ exception=traceback.format_exc())
+
+ def delete_lun(self):
+ """
+ Delete requested LUN
+ """
+ path = '/vol/%s/%s' % (self.flexvol_name, self.name)
+
+ lun_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'lun-destroy', **{'path': path,
+ 'force': str(self.force_remove),
+ 'destroy-fenced-lun':
+ str(self.force_remove_fenced)})
+
+ try:
+ self.server.invoke_successfully(lun_delete, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error deleting lun %s: %s" % (path, to_native(e)),
+ exception=traceback.format_exc())
+
+ def resize_lun(self):
+ """
+ Resize requested LUN.
+
+ :return: True if LUN was actually re-sized, false otherwise.
+ :rtype: bool
+ """
+ path = '/vol/%s/%s' % (self.flexvol_name, self.name)
+
+ lun_resize = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'lun-resize', **{'path': path,
+ 'size': str(self.size),
+ 'force': str(self.force_resize)})
+ try:
+ self.server.invoke_successfully(lun_resize, enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ if to_native(e.code) == "9042":
+ # Error 9042 denotes the new LUN size being the same as the
+ # old LUN size. This happens when there's barely any difference
+ # in the two sizes. For example, from 8388608 bytes to
+ # 8194304 bytes. This should go away if/when the default size
+ # requested/reported to/from the controller is changed to a
+ # larger unit (MB/GB/TB).
+ return False
+ else:
+ self.module.fail_json(msg="Error resizing lun %s: %s" % (path, to_native(e)),
+ exception=traceback.format_exc())
+
+ return True
+
+ def apply(self):
+ property_changed = False
+ multiple_properties_changed = False
+ size_changed = False
+ lun_exists = False
+ lun_detail = self.get_lun()
+
+ if lun_detail:
+ lun_exists = True
+ current_size = lun_detail['size']
+
+ if self.state == 'absent':
+ property_changed = True
+
+ elif self.state == 'present':
+ if not int(current_size) == self.size:
+ size_changed = True
+ property_changed = True
+
+ else:
+ if self.state == 'present':
+ property_changed = True
+
+ if property_changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ if not lun_exists:
+ self.create_lun()
+
+ else:
+ if size_changed:
+ # Ensure that size was actually changed. Please
+ # read notes in 'resize_lun' function for details.
+ size_changed = self.resize_lun()
+ if not size_changed and not \
+ multiple_properties_changed:
+ property_changed = False
+
+ elif self.state == 'absent':
+ self.delete_lun()
+
+ changed = property_changed or size_changed
+ # TODO: include other details about the lun (size, etc.)
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = NetAppCDOTLUN()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_qtree.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_qtree.py
new file mode 100644
index 00000000..9f7ce60d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_qtree.py
@@ -0,0 +1,234 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: na_cdot_qtree
+
+short_description: Manage qtrees
+extends_documentation_fragment:
+- community.general._netapp.ontap
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: Updated modules released with increased functionality
+ alternative: Use M(netapp.ontap.na_ontap_qtree) instead.
+
+description:
+- Create or destroy Qtrees.
+
+options:
+
+ state:
+ description:
+ - Whether the specified Qtree should exist or not.
+ required: true
+ choices: ['present', 'absent']
+
+ name:
+ description:
+ - The name of the Qtree to manage.
+ required: true
+
+ flexvol_name:
+ description:
+ - The name of the FlexVol the Qtree should exist on. Required when C(state=present).
+
+ vserver:
+ description:
+ - The name of the vserver to use.
+ required: true
+
+'''
+
+EXAMPLES = """
+- name: Create QTree
+ community.general.na_cdot_qtree:
+ state: present
+ name: ansibleQTree
+ flexvol_name: ansibleVolume
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+- name: Rename QTree
+ community.general.na_cdot_qtree:
+ state: present
+ name: ansibleQTree
+ flexvol_name: ansibleVolume
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppCDOTQTree(object):
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+ flexvol_name=dict(type='str'),
+ vserver=dict(required=True, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['flexvol_name'])
+ ],
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.name = p['name']
+ self.flexvol_name = p['flexvol_name']
+ self.vserver = p['vserver']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_ontap_zapi(module=self.module, vserver=self.vserver)
+
+ def get_qtree(self):
+ """
+ Checks if the qtree exists.
+
+ :return:
+ True if qtree found
+ False if qtree is not found
+ :rtype: bool
+ """
+
+ qtree_list_iter = netapp_utils.zapi.NaElement('qtree-list-iter')
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'qtree-info', **{'vserver': self.vserver,
+ 'volume': self.flexvol_name,
+ 'qtree': self.name})
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ qtree_list_iter.add_child_elem(query)
+
+ result = self.server.invoke_successfully(qtree_list_iter,
+ enable_tunneling=True)
+
+ if (result.get_child_by_name('num-records') and
+ int(result.get_child_content('num-records')) >= 1):
+ return True
+ else:
+ return False
+
+ def create_qtree(self):
+ qtree_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'qtree-create', **{'volume': self.flexvol_name,
+ 'qtree': self.name})
+
+ try:
+ self.server.invoke_successfully(qtree_create,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error provisioning qtree %s: %s" % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def delete_qtree(self):
+ path = '/vol/%s/%s' % (self.flexvol_name, self.name)
+ qtree_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'qtree-delete', **{'qtree': path})
+
+ try:
+ self.server.invoke_successfully(qtree_delete,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error deleting qtree %s: %s" % (path, to_native(e)),
+ exception=traceback.format_exc())
+
+ def rename_qtree(self):
+ path = '/vol/%s/%s' % (self.flexvol_name, self.name)
+ new_path = '/vol/%s/%s' % (self.flexvol_name, self.name)
+ qtree_rename = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'qtree-rename', **{'qtree': path,
+ 'new-qtree-name': new_path})
+
+ try:
+ self.server.invoke_successfully(qtree_rename,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg="Error renaming qtree %s: %s" % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ changed = False
+ qtree_exists = False
+ rename_qtree = False
+ qtree_detail = self.get_qtree()
+
+ if qtree_detail:
+ qtree_exists = True
+
+ if self.state == 'absent':
+ # Qtree exists, but requested state is 'absent'.
+ changed = True
+
+ elif self.state == 'present':
+ if self.name is not None and not self.name == \
+ self.name:
+ changed = True
+ rename_qtree = True
+
+ else:
+ if self.state == 'present':
+ # Qtree does not exist, but requested state is 'present'.
+ changed = True
+
+ if changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ if not qtree_exists:
+ self.create_qtree()
+
+ else:
+ if rename_qtree:
+ self.rename_qtree()
+
+ elif self.state == 'absent':
+ self.delete_qtree()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = NetAppCDOTQTree()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_svm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_svm.py
new file mode 100644
index 00000000..0227a014
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_svm.py
@@ -0,0 +1,246 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: na_cdot_svm
+
+short_description: Manage NetApp cDOT svm
+extends_documentation_fragment:
+- community.general._netapp.ontap
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: Updated modules released with increased functionality
+ alternative: Use M(netapp.ontap.na_ontap_svm) instead.
+
+description:
+- Create or destroy svm on NetApp cDOT
+
+options:
+
+ state:
+ description:
+ - Whether the specified SVM should exist or not.
+ required: true
+ choices: ['present', 'absent']
+
+ name:
+ description:
+ - The name of the SVM to manage.
+ required: true
+
+ root_volume:
+ description:
+ - Root volume of the SVM. Required when C(state=present).
+
+ root_volume_aggregate:
+ description:
+ - The aggregate on which the root volume will be created.
+ - Required when C(state=present).
+
+ root_volume_security_style:
+ description:
+ - Security Style of the root volume.
+ - When specified as part of the vserver-create, this field represents the security style for the Vserver root volume.
+ - When specified as part of vserver-get-iter call, this will return the list of matching Vservers.
+ - Possible values are 'unix', 'ntfs', 'mixed'.
+ - The 'unified' security style, which applies only to Infinite Volumes, cannot be applied to a Vserver's root volume.
+ - Valid options are "unix" for NFS, "ntfs" for CIFS, "mixed" for Mixed, "unified" for Unified.
+ - Required when C(state=present)
+ choices: ['unix', 'ntfs', 'mixed', 'unified']
+
+'''
+
+EXAMPLES = """
+
+ - name: Create SVM
+ community.general.na_cdot_svm:
+ state: present
+ name: ansibleVServer
+ root_volume: vol1
+ root_volume_aggregate: aggr1
+ root_volume_security_style: mixed
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppCDOTSVM(object):
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+ root_volume=dict(type='str'),
+ root_volume_aggregate=dict(type='str'),
+ root_volume_security_style=dict(type='str', choices=['unix',
+ 'ntfs',
+ 'mixed',
+ 'unified'
+ ]),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['root_volume',
+ 'root_volume_aggregate',
+ 'root_volume_security_style'])
+ ],
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.name = p['name']
+ self.root_volume = p['root_volume']
+ self.root_volume_aggregate = p['root_volume_aggregate']
+ self.root_volume_security_style = p['root_volume_security_style']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_ontap_zapi(module=self.module)
+
+ def get_vserver(self):
+ """
+ Checks if vserver exists.
+
+ :return:
+ True if vserver found
+ False if vserver is not found
+ :rtype: bool
+ """
+
+ vserver_info = netapp_utils.zapi.NaElement('vserver-get-iter')
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'vserver-info', **{'vserver-name': self.name})
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ vserver_info.add_child_elem(query)
+
+ result = self.server.invoke_successfully(vserver_info,
+ enable_tunneling=False)
+
+ if (result.get_child_by_name('num-records') and
+ int(result.get_child_content('num-records')) >= 1):
+
+ """
+ TODO:
+ Return more relevant parameters about vserver that can
+ be updated by the playbook.
+ """
+ return True
+ else:
+ return False
+
+ def create_vserver(self):
+ vserver_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'vserver-create', **{'vserver-name': self.name,
+ 'root-volume': self.root_volume,
+ 'root-volume-aggregate':
+ self.root_volume_aggregate,
+ 'root-volume-security-style':
+ self.root_volume_security_style
+ })
+
+ try:
+ self.server.invoke_successfully(vserver_create,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error provisioning SVM %s with root volume %s on aggregate %s: %s'
+ % (self.name, self.root_volume, self.root_volume_aggregate, to_native(e)),
+ exception=traceback.format_exc())
+
+ def delete_vserver(self):
+ vserver_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'vserver-destroy', **{'vserver-name': self.name})
+
+ try:
+ self.server.invoke_successfully(vserver_delete,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error deleting SVM %s with root volume %s on aggregate %s: %s'
+ % (self.name, self.root_volume, self.root_volume_aggregate, to_native(e)),
+ exception=traceback.format_exc())
+
+ def rename_vserver(self):
+ vserver_rename = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'vserver-rename', **{'vserver-name': self.name,
+ 'new-name': self.name})
+
+ try:
+ self.server.invoke_successfully(vserver_rename,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error renaming SVM %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ changed = False
+ vserver_exists = self.get_vserver()
+ rename_vserver = False
+ if vserver_exists:
+ if self.state == 'absent':
+ changed = True
+
+ elif self.state == 'present':
+ # Update properties
+ pass
+
+ else:
+ if self.state == 'present':
+ changed = True
+
+ if changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ if not vserver_exists:
+ self.create_vserver()
+
+ else:
+ if rename_vserver:
+ self.rename_vserver()
+
+ elif self.state == 'absent':
+ self.delete_vserver()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = NetAppCDOTSVM()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_user.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_user.py
new file mode 100644
index 00000000..626e0aa0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_user.py
@@ -0,0 +1,301 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: na_cdot_user
+
+short_description: useradmin configuration and management
+extends_documentation_fragment:
+- community.general._netapp.ontap
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: Updated modules released with increased functionality
+ alternative: Use M(netapp.ontap.na_ontap_user) instead.
+
+description:
+- Create or destroy users.
+
+options:
+
+ state:
+ description:
+ - Whether the specified user should exist or not.
+ required: true
+ choices: ['present', 'absent']
+
+ name:
+ description:
+ - The name of the user to manage.
+ required: true
+
+ application:
+ description:
+ - Applications to grant access to.
+ required: true
+ choices: ['console', 'http','ontapi','rsh','snmp','sp','ssh','telnet']
+
+ authentication_method:
+ description:
+ - Authentication method for the application.
+ - Not all authentication methods are valid for an application.
+ - Valid authentication methods for each application are as denoted in I(authentication_choices_description).
+ - password for console application
+ - password, domain, nsswitch, cert for http application.
+ - password, domain, nsswitch, cert for ontapi application.
+ - community for snmp application (when creating SNMPv1 and SNMPv2 users).
+ - usm and community for snmp application (when creating SNMPv3 users).
+ - password for sp application.
+ - password for rsh application.
+ - password for telnet application.
+ - password, publickey, domain, nsswitch for ssh application.
+ required: true
+ choices: ['community', 'password', 'publickey', 'domain', 'nsswitch', 'usm']
+
+ set_password:
+ description:
+ - Password for the user account.
+ - It is ignored for creating snmp users, but is required for creating non-snmp users.
+ - For an existing user, this value will be used as the new password.
+
+ role_name:
+ description:
+ - The name of the role. Required when C(state=present)
+
+
+ vserver:
+ description:
+ - The name of the vserver to use.
+ required: true
+
+'''
+
+EXAMPLES = """
+
+ - name: Create User
+ community.general.na_cdot_user:
+ state: present
+ name: SampleUser
+ application: ssh
+ authentication_method: password
+ set_password: apn1242183u1298u41
+ role_name: vsadmin
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppCDOTUser(object):
+ """
+ Common operations to manage users and roles.
+ """
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+
+ application=dict(required=True, type='str', choices=[
+ 'console', 'http', 'ontapi', 'rsh',
+ 'snmp', 'sp', 'ssh', 'telnet']),
+ authentication_method=dict(required=True, type='str',
+ choices=['community', 'password',
+ 'publickey', 'domain',
+ 'nsswitch', 'usm']),
+ set_password=dict(required=False, type='str', default=None),
+ role_name=dict(required=False, type='str'),
+
+ vserver=dict(required=True, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['role_name'])
+ ],
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.name = p['name']
+
+ self.application = p['application']
+ self.authentication_method = p['authentication_method']
+ self.set_password = p['set_password']
+ self.role_name = p['role_name']
+
+ self.vserver = p['vserver']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_ontap_zapi(module=self.module)
+
+ def get_user(self):
+ """
+ Checks if the user exists.
+
+ :return:
+ True if user found
+ False if user is not found
+ :rtype: bool
+ """
+
+ security_login_get_iter = netapp_utils.zapi.NaElement('security-login-get-iter')
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-account-info', **{'vserver': self.vserver,
+ 'user-name': self.name,
+ 'application': self.application,
+ 'authentication-method':
+ self.authentication_method})
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ security_login_get_iter.add_child_elem(query)
+
+ try:
+ result = self.server.invoke_successfully(security_login_get_iter,
+ enable_tunneling=False)
+
+ if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
+ return True
+ else:
+ return False
+
+ except netapp_utils.zapi.NaApiError as e:
+ # Error 16034 denotes a user not being found.
+ if to_native(e.code) == "16034":
+ return False
+ else:
+ self.module.fail_json(msg='Error getting user %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def create_user(self):
+ user_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-create', **{'vserver': self.vserver,
+ 'user-name': self.name,
+ 'application': self.application,
+ 'authentication-method':
+ self.authentication_method,
+ 'role-name': self.role_name})
+ if self.set_password is not None:
+ user_create.add_new_child('password', self.set_password)
+
+ try:
+ self.server.invoke_successfully(user_create,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error creating user %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def delete_user(self):
+ user_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-delete', **{'vserver': self.vserver,
+ 'user-name': self.name,
+ 'application': self.application,
+ 'authentication-method':
+ self.authentication_method})
+
+ try:
+ self.server.invoke_successfully(user_delete,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error removing user %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def change_password(self):
+ """
+ Changes the password
+
+ :return:
+ True if password updated
+ False if password is not updated
+ :rtype: bool
+ """
+ self.server.set_vserver(self.vserver)
+ modify_password = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-modify-password', **{
+ 'new-password': str(self.set_password),
+ 'user-name': self.name})
+ try:
+ self.server.invoke_successfully(modify_password,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ if to_native(e.code) == '13114':
+ return False
+ else:
+ self.module.fail_json(msg='Error setting password for user %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ self.server.set_vserver(None)
+ return True
+
+ def apply(self):
+ property_changed = False
+ password_changed = False
+ user_exists = self.get_user()
+
+ if user_exists:
+ if self.state == 'absent':
+ property_changed = True
+
+ elif self.state == 'present':
+ if self.set_password is not None:
+ password_changed = self.change_password()
+ else:
+ if self.state == 'present':
+ # Check if anything needs to be updated
+ property_changed = True
+
+ if property_changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ if not user_exists:
+ self.create_user()
+
+ # Add ability to update parameters.
+
+ elif self.state == 'absent':
+ self.delete_user()
+
+ changed = property_changed or password_changed
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = NetAppCDOTUser()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_user_role.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_user_role.py
new file mode 100644
index 00000000..88133200
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_user_role.py
@@ -0,0 +1,227 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: na_cdot_user_role
+
+short_description: useradmin configuration and management
+extends_documentation_fragment:
+- community.general._netapp.ontap
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: Updated modules released with increased functionality
+ alternative: Use M(netapp.ontap.na_ontap_user_role) instead.
+
+description:
+- Create or destroy user roles
+
+options:
+
+ state:
+ description:
+ - Whether the specified user should exist or not.
+ required: true
+ choices: ['present', 'absent']
+
+ name:
+ description:
+ - The name of the role to manage.
+ required: true
+
+ command_directory_name:
+ description:
+ - The command or command directory to which the role has an access.
+ required: true
+
+ access_level:
+ description:
+ - The name of the role to manage.
+ choices: ['none', 'readonly', 'all']
+ default: 'all'
+
+ vserver:
+ description:
+ - The name of the vserver to use.
+ required: true
+
+'''
+
+EXAMPLES = """
+
+ - name: Create User Role
+ community.general.na_cdot_user_role:
+ state: present
+ name: ansibleRole
+ command_directory_name: DEFAULT
+ access_level: none
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppCDOTUserRole(object):
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+
+ command_directory_name=dict(required=True, type='str'),
+ access_level=dict(required=False, type='str', default='all',
+ choices=['none', 'readonly', 'all']),
+
+ vserver=dict(required=True, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.name = p['name']
+
+ self.command_directory_name = p['command_directory_name']
+ self.access_level = p['access_level']
+
+ self.vserver = p['vserver']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_ontap_zapi(module=self.module)
+
+ def get_role(self):
+ """
+ Checks if the role exists for specific command-directory-name.
+
+ :return:
+ True if role found
+ False if role is not found
+ :rtype: bool
+ """
+
+ security_login_role_get_iter = netapp_utils.zapi.NaElement(
+ 'security-login-role-get-iter')
+ query_details = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-role-info', **{'vserver': self.vserver,
+ 'role-name': self.name,
+ 'command-directory-name':
+ self.command_directory_name})
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ security_login_role_get_iter.add_child_elem(query)
+
+ try:
+ result = self.server.invoke_successfully(
+ security_login_role_get_iter, enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ # Error 16031 denotes a role not being found.
+ if to_native(e.code) == "16031":
+ return False
+ else:
+ self.module.fail_json(msg='Error getting role %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ if (result.get_child_by_name('num-records') and
+ int(result.get_child_content('num-records')) >= 1):
+ return True
+ else:
+ return False
+
+ def create_role(self):
+ role_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-role-create', **{'vserver': self.vserver,
+ 'role-name': self.name,
+ 'command-directory-name':
+ self.command_directory_name,
+ 'access-level':
+ self.access_level})
+
+ try:
+ self.server.invoke_successfully(role_create,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error creating role %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def delete_role(self):
+ role_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'security-login-role-delete', **{'vserver': self.vserver,
+ 'role-name': self.name,
+ 'command-directory-name':
+ self.command_directory_name})
+
+ try:
+ self.server.invoke_successfully(role_delete,
+ enable_tunneling=False)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error removing role %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ changed = False
+ role_exists = self.get_role()
+
+ if role_exists:
+ if self.state == 'absent':
+ changed = True
+
+ # Check if properties need to be updated
+ else:
+ if self.state == 'present':
+ changed = True
+
+ if changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ if not role_exists:
+ self.create_role()
+
+ # Update properties
+
+ elif self.state == 'absent':
+ self.delete_role()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = NetAppCDOTUserRole()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_volume.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_volume.py
new file mode 100644
index 00000000..c10911d4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_cdot_volume.py
@@ -0,0 +1,437 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: na_cdot_volume
+
+short_description: Manage NetApp cDOT volumes
+extends_documentation_fragment:
+- community.general._netapp.ontap
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: Updated modules released with increased functionality
+ alternative: Use M(netapp.ontap.na_ontap_volume) instead.
+
+description:
+- Create or destroy volumes on NetApp cDOT
+
+options:
+
+ state:
+ description:
+ - Whether the specified volume should exist or not.
+ required: true
+ choices: ['present', 'absent']
+
+ name:
+ description:
+ - The name of the volume to manage.
+ required: true
+
+ infinite:
+ description:
+ - Set True if the volume is an Infinite Volume.
+ type: bool
+ default: 'no'
+
+ online:
+ description:
+ - Whether the specified volume is online, or not.
+ type: bool
+ default: 'yes'
+
+ aggregate_name:
+ description:
+ - The name of the aggregate the flexvol should exist on. Required when C(state=present).
+
+ size:
+ description:
+ - The size of the volume in (size_unit). Required when C(state=present).
+
+ size_unit:
+ description:
+ - The unit used to interpret the size parameter.
+ choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
+ default: 'gb'
+
+ vserver:
+ description:
+ - Name of the vserver to use.
+ required: true
+
+ junction_path:
+ description:
+ - Junction path where to mount the volume
+ required: false
+
+ export_policy:
+ description:
+ - Export policy to set for the specified junction path.
+ required: false
+ default: default
+
+ snapshot_policy:
+ description:
+ - Snapshot policy to set for the specified volume.
+ required: false
+ default: default
+
+'''
+
+EXAMPLES = """
+
+ - name: Create FlexVol
+ community.general.na_cdot_volume:
+ state: present
+ name: ansibleVolume
+ infinite: False
+ aggregate_name: aggr1
+ size: 20
+ size_unit: mb
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+ junction_path: /ansibleVolume
+ export_policy: all_nfs_networks
+ snapshot_policy: daily
+
+ - name: Make FlexVol offline
+ community.general.na_cdot_volume:
+ state: present
+ name: ansibleVolume
+ infinite: False
+ online: False
+ vserver: ansibleVServer
+ hostname: "{{ netapp_hostname }}"
+ username: "{{ netapp_username }}"
+ password: "{{ netapp_password }}"
+
+"""
+
+RETURN = """
+
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppCDOTVolume(object):
+
+ def __init__(self):
+
+ self._size_unit_map = dict(
+ bytes=1,
+ b=1,
+ kb=1024,
+ mb=1024 ** 2,
+ gb=1024 ** 3,
+ tb=1024 ** 4,
+ pb=1024 ** 5,
+ eb=1024 ** 6,
+ zb=1024 ** 7,
+ yb=1024 ** 8
+ )
+
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+ is_infinite=dict(required=False, type='bool', default=False, aliases=['infinite']),
+ is_online=dict(required=False, type='bool', default=True, aliases=['online']),
+ size=dict(type='int'),
+ size_unit=dict(default='gb',
+ choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb',
+ 'pb', 'eb', 'zb', 'yb'], type='str'),
+ aggregate_name=dict(type='str'),
+ vserver=dict(required=True, type='str', default=None),
+ junction_path=dict(required=False, type='str', default=None),
+ export_policy=dict(required=False, type='str', default='default'),
+ snapshot_policy=dict(required=False, type='str', default='default'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['aggregate_name', 'size'])
+ ],
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.name = p['name']
+ self.is_infinite = p['is_infinite']
+ self.is_online = p['is_online']
+ self.size_unit = p['size_unit']
+ self.vserver = p['vserver']
+ self.junction_path = p['junction_path']
+ self.export_policy = p['export_policy']
+ self.snapshot_policy = p['snapshot_policy']
+
+ if p['size'] is not None:
+ self.size = p['size'] * self._size_unit_map[self.size_unit]
+ else:
+ self.size = None
+ self.aggregate_name = p['aggregate_name']
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_ontap_zapi(module=self.module, vserver=self.vserver)
+
+ def get_volume(self):
+ """
+ Return details about the volume
+ :param:
+ name : Name of the volume
+
+ :return: Details about the volume. None if not found.
+ :rtype: dict
+ """
+ volume_info = netapp_utils.zapi.NaElement('volume-get-iter')
+ volume_attributes = netapp_utils.zapi.NaElement('volume-attributes')
+ volume_id_attributes = netapp_utils.zapi.NaElement('volume-id-attributes')
+ volume_id_attributes.add_new_child('name', self.name)
+ volume_attributes.add_child_elem(volume_id_attributes)
+
+ query = netapp_utils.zapi.NaElement('query')
+ query.add_child_elem(volume_attributes)
+
+ volume_info.add_child_elem(query)
+
+ result = self.server.invoke_successfully(volume_info, True)
+
+ return_value = None
+
+ if result.get_child_by_name('num-records') and \
+ int(result.get_child_content('num-records')) >= 1:
+
+ volume_attributes = result.get_child_by_name(
+ 'attributes-list').get_child_by_name(
+ 'volume-attributes')
+ # Get volume's current size
+ volume_space_attributes = volume_attributes.get_child_by_name(
+ 'volume-space-attributes')
+ current_size = volume_space_attributes.get_child_content('size')
+
+ # Get volume's state (online/offline)
+ volume_state_attributes = volume_attributes.get_child_by_name(
+ 'volume-state-attributes')
+ current_state = volume_state_attributes.get_child_content('state')
+ is_online = None
+ if current_state == "online":
+ is_online = True
+ elif current_state == "offline":
+ is_online = False
+ return_value = {
+ 'name': self.name,
+ 'size': current_size,
+ 'is_online': is_online,
+ }
+
+ return return_value
+
+ def create_volume(self):
+ create_parameters = {'volume': self.name,
+ 'containing-aggr-name': self.aggregate_name,
+ 'size': str(self.size),
+ }
+ if self.junction_path:
+ create_parameters['junction-path'] = str(self.junction_path)
+ if self.export_policy != 'default':
+ create_parameters['export-policy'] = str(self.export_policy)
+ if self.snapshot_policy != 'default':
+ create_parameters['snapshot-policy'] = str(self.snapshot_policy)
+
+ volume_create = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-create', **create_parameters)
+
+ try:
+ self.server.invoke_successfully(volume_create,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error provisioning volume %s of size %s: %s' % (self.name, self.size, to_native(e)),
+ exception=traceback.format_exc())
+
+ def delete_volume(self):
+ if self.is_infinite:
+ volume_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-destroy-async', **{'volume-name': self.name})
+ else:
+ volume_delete = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-destroy', **{'name': self.name, 'unmount-and-offline':
+ 'true'})
+
+ try:
+ self.server.invoke_successfully(volume_delete,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error deleting volume %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def rename_volume(self):
+ """
+ Rename the volume.
+
+ Note: 'is_infinite' needs to be set to True in order to rename an
+ Infinite Volume.
+ """
+ if self.is_infinite:
+ volume_rename = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-rename-async',
+ **{'volume-name': self.name, 'new-volume-name': str(
+ self.name)})
+ else:
+ volume_rename = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-rename', **{'volume': self.name, 'new-volume-name': str(
+ self.name)})
+ try:
+ self.server.invoke_successfully(volume_rename,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error renaming volume %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def resize_volume(self):
+ """
+ Re-size the volume.
+
+ Note: 'is_infinite' needs to be set to True in order to rename an
+ Infinite Volume.
+ """
+ if self.is_infinite:
+ volume_resize = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-size-async',
+ **{'volume-name': self.name, 'new-size': str(
+ self.size)})
+ else:
+ volume_resize = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-size', **{'volume': self.name, 'new-size': str(
+ self.size)})
+ try:
+ self.server.invoke_successfully(volume_resize,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error re-sizing volume %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def change_volume_state(self):
+ """
+ Change volume's state (offline/online).
+
+ Note: 'is_infinite' needs to be set to True in order to change the
+ state of an Infinite Volume.
+ """
+ state_requested = None
+ if self.is_online:
+ # Requested state is 'online'.
+ state_requested = "online"
+ if self.is_infinite:
+ volume_change_state = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-online-async',
+ **{'volume-name': self.name})
+ else:
+ volume_change_state = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-online',
+ **{'name': self.name})
+ else:
+ # Requested state is 'offline'.
+ state_requested = "offline"
+ if self.is_infinite:
+ volume_change_state = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-offline-async',
+ **{'volume-name': self.name})
+ else:
+ volume_change_state = netapp_utils.zapi.NaElement.create_node_with_children(
+ 'volume-offline',
+ **{'name': self.name})
+ try:
+ self.server.invoke_successfully(volume_change_state,
+ enable_tunneling=True)
+ except netapp_utils.zapi.NaApiError as e:
+ self.module.fail_json(msg='Error changing the state of volume %s to %s: %s' %
+ (self.name, state_requested, to_native(e)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ changed = False
+ volume_exists = False
+ rename_volume = False
+ resize_volume = False
+ volume_detail = self.get_volume()
+
+ if volume_detail:
+ volume_exists = True
+
+ if self.state == 'absent':
+ changed = True
+
+ elif self.state == 'present':
+ if str(volume_detail['size']) != str(self.size):
+ resize_volume = True
+ changed = True
+ if (volume_detail['is_online'] is not None) and (volume_detail['is_online'] != self.is_online):
+ changed = True
+ if self.is_online is False:
+ # Volume is online, but requested state is offline
+ pass
+ else:
+ # Volume is offline but requested state is online
+ pass
+
+ else:
+ if self.state == 'present':
+ changed = True
+
+ if changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ if not volume_exists:
+ self.create_volume()
+
+ else:
+ if resize_volume:
+ self.resize_volume()
+ if volume_detail['is_online'] is not \
+ None and volume_detail['is_online'] != \
+ self.is_online:
+ self.change_volume_state()
+ # Ensure re-naming is the last change made.
+ if rename_volume:
+ self.rename_volume()
+
+ elif self.state == 'absent':
+ self.delete_volume()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = NetAppCDOTVolume()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_ontap_gather_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_ontap_gather_facts.py
new file mode 100644
index 00000000..0fc61afb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/na_ontap_gather_facts.py
@@ -0,0 +1,610 @@
+#!/usr/bin/python
+
+# (c) 2018 Piotr Olczak <piotr.olczak@redhat.com>
+# (c) 2018-2019, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: na_ontap_gather_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favour of C(_info) module.
+ alternative: Use M(netapp.ontap.na_ontap_info) instead.
+author: Piotr Olczak (@dprts) <polczak@redhat.com>
+extends_documentation_fragment:
+- community.general._netapp.na_ontap
+
+short_description: NetApp information gatherer
+description:
+ - This module allows you to gather various information about ONTAP configuration
+requirements:
+ - netapp_lib
+options:
+ state:
+ description:
+ - Returns "info"
+ default: "info"
+ choices: ['info']
+ gather_subset:
+ description:
+ - When supplied, this argument will restrict the facts collected
+ to a given subset. Possible values for this argument include
+ "aggregate_info", "cluster_node_info", "igroup_info", "lun_info", "net_dns_info",
+ "net_ifgrp_info",
+ "net_interface_info", "net_port_info", "nvme_info", "nvme_interface_info",
+ "nvme_namespace_info", "nvme_subsystem_info", "ontap_version",
+ "qos_adaptive_policy_info", "qos_policy_info", "security_key_manager_key_info",
+ "security_login_account_info", "storage_failover_info", "volume_info",
+ "vserver_info", "vserver_login_banner_info", "vserver_motd_info", "vserver_nfs_info"
+ Can specify a list of values to include a larger subset. Values can also be used
+ with an initial C(M(!)) to specify that a specific subset should
+ not be collected.
+ - nvme is supported with ONTAP 9.4 onwards.
+ - use "help" to get a list of supported facts for your system.
+ default: "all"
+'''
+
+EXAMPLES = '''
+- name: Get NetApp info (Password Authentication)
+ community.general.na_ontap_gather_facts:
+ state: info
+ hostname: "na-vsim"
+ username: "admin"
+ password: "admins_password"
+- ansible.builtin.debug:
+ var: ontap_facts
+- name: Limit Fact Gathering to Aggregate Information
+ community.general.na_ontap_gather_facts:
+ state: info
+ hostname: "na-vsim"
+ username: "admin"
+ password: "admins_password"
+ gather_subset: "aggregate_info"
+- name: Limit Fact Gathering to Volume and Lun Information
+ community.general.na_ontap_gather_facts:
+ state: info
+ hostname: "na-vsim"
+ username: "admin"
+ password: "admins_password"
+ gather_subset:
+ - volume_info
+ - lun_info
+- name: Gather all facts except for volume and lun information
+ community.general.na_ontap_gather_facts:
+ state: info
+ hostname: "na-vsim"
+ username: "admin"
+ password: "admins_password"
+ gather_subset:
+ - "!volume_info"
+ - "!lun_info"
+'''
+
+RETURN = '''
+ontap_facts:
+ description: Returns various information about NetApp cluster configuration
+ returned: always
+ type: dict
+ sample: '{
+ "ontap_facts": {
+ "aggregate_info": {...},
+ "cluster_node_info": {...},
+ "net_dns_info": {...},
+ "net_ifgrp_info": {...},
+ "net_interface_info": {...},
+ "net_port_info": {...},
+ "security_key_manager_key_info": {...},
+ "security_login_account_info": {...},
+ "volume_info": {...},
+ "lun_info": {...},
+ "storage_failover_info": {...},
+ "vserver_login_banner_info": {...},
+ "vserver_motd_info": {...},
+ "vserver_info": {...},
+ "vserver_nfs_info": {...},
+ "ontap_version": {...},
+ "igroup_info": {...},
+ "qos_policy_info": {...},
+ "qos_adaptive_policy_info": {...}
+ }'
+'''
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+try:
+ import xmltodict
+ HAS_XMLTODICT = True
+except ImportError:
+ HAS_XMLTODICT = False
+
+try:
+ import json
+ HAS_JSON = True
+except ImportError:
+ HAS_JSON = False
+
+HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
+
+
+class NetAppONTAPGatherFacts(object):
+ '''Class with gather facts methods'''
+
+ def __init__(self, module):
+ self.module = module
+ self.netapp_info = dict()
+
+ # thanks to coreywan (https://github.com/ansible/ansible/pull/47016)
+ # for starting this
+ # min_version identifies the ontapi version which supports this ZAPI
+ # use 0 if it is supported since 9.1
+ self.fact_subsets = {
+ 'net_dns_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'net-dns-get-iter',
+ 'attribute': 'net-dns-info',
+ 'field': 'vserver-name',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'net_interface_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'net-interface-get-iter',
+ 'attribute': 'net-interface-info',
+ 'field': 'interface-name',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'net_port_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'net-port-get-iter',
+ 'attribute': 'net-port-info',
+ 'field': ('node', 'port'),
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'cluster_node_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'cluster-node-get-iter',
+ 'attribute': 'cluster-node-info',
+ 'field': 'node-name',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'security_login_account_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'security-login-get-iter',
+ 'attribute': 'security-login-account-info',
+ 'field': ('vserver', 'user-name', 'application', 'authentication-method'),
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'aggregate_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'aggr-get-iter',
+ 'attribute': 'aggr-attributes',
+ 'field': 'aggregate-name',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'volume_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'volume-get-iter',
+ 'attribute': 'volume-attributes',
+ 'field': ('name', 'owning-vserver-name'),
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'lun_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'lun-get-iter',
+ 'attribute': 'lun-info',
+ 'field': 'path',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'storage_failover_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'cf-get-iter',
+ 'attribute': 'storage-failover-info',
+ 'field': 'node',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'vserver_motd_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'vserver-motd-get-iter',
+ 'attribute': 'vserver-motd-info',
+ 'field': 'vserver',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'vserver_login_banner_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'vserver-login-banner-get-iter',
+ 'attribute': 'vserver-login-banner-info',
+ 'field': 'vserver',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'security_key_manager_key_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'security-key-manager-key-get-iter',
+ 'attribute': 'security-key-manager-key-info',
+ 'field': ('node', 'key-id'),
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'vserver_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'vserver-get-iter',
+ 'attribute': 'vserver-info',
+ 'field': 'vserver-name',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'vserver_nfs_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'nfs-service-get-iter',
+ 'attribute': 'nfs-info',
+ 'field': 'vserver',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'net_ifgrp_info': {
+ 'method': self.get_ifgrp_info,
+ 'kwargs': {},
+ 'min_version': '0',
+ },
+ 'ontap_version': {
+ 'method': self.ontapi,
+ 'kwargs': {},
+ 'min_version': '0',
+ },
+ 'system_node_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'system-node-get-iter',
+ 'attribute': 'node-details-info',
+ 'field': 'node',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'igroup_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'igroup-get-iter',
+ 'attribute': 'initiator-group-info',
+ 'field': ('vserver', 'initiator-group-name'),
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ 'qos_policy_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'qos-policy-group-get-iter',
+ 'attribute': 'qos-policy-group-info',
+ 'field': 'policy-group',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '0',
+ },
+ # supported in ONTAP 9.3 and onwards
+ 'qos_adaptive_policy_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'qos-adaptive-policy-group-get-iter',
+ 'attribute': 'qos-adaptive-policy-group-info',
+ 'field': 'policy-group',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '130',
+ },
+ # supported in ONTAP 9.4 and onwards
+ 'nvme_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'nvme-get-iter',
+ 'attribute': 'nvme-target-service-info',
+ 'field': 'vserver',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '140',
+ },
+ 'nvme_interface_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'nvme-interface-get-iter',
+ 'attribute': 'nvme-interface-info',
+ 'field': 'vserver',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '140',
+ },
+ 'nvme_subsystem_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'nvme-subsystem-get-iter',
+ 'attribute': 'nvme-subsystem-info',
+ 'field': 'subsystem',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '140',
+ },
+ 'nvme_namespace_info': {
+ 'method': self.get_generic_get_iter,
+ 'kwargs': {
+ 'call': 'nvme-namespace-get-iter',
+ 'attribute': 'nvme-namespace-info',
+ 'field': 'path',
+ 'query': {'max-records': '1024'},
+ },
+ 'min_version': '140',
+ },
+ }
+
+ if HAS_NETAPP_LIB is False:
+ self.module.fail_json(msg="the python NetApp-Lib module is required")
+ else:
+ self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
+
+ def ontapi(self):
+ '''Method to get ontapi version'''
+
+ api = 'system-get-ontapi-version'
+ api_call = netapp_utils.zapi.NaElement(api)
+ try:
+ results = self.server.invoke_successfully(api_call, enable_tunneling=False)
+ ontapi_version = results.get_child_content('minor-version')
+ return ontapi_version if ontapi_version is not None else '0'
+ except netapp_utils.zapi.NaApiError as error:
+ self.module.fail_json(msg="Error calling API %s: %s" %
+ (api, to_native(error)), exception=traceback.format_exc())
+
+ def call_api(self, call, query=None):
+ '''Main method to run an API call'''
+
+ api_call = netapp_utils.zapi.NaElement(call)
+ result = None
+
+ if query:
+ for key, val in query.items():
+ # Can val be nested?
+ api_call.add_new_child(key, val)
+ try:
+ result = self.server.invoke_successfully(api_call, enable_tunneling=False)
+ return result
+ except netapp_utils.zapi.NaApiError as error:
+ if call in ['security-key-manager-key-get-iter']:
+ return result
+ else:
+ self.module.fail_json(msg="Error calling API %s: %s"
+ % (call, to_native(error)), exception=traceback.format_exc())
+
+ def get_ifgrp_info(self):
+ '''Method to get network port ifgroups info'''
+
+ try:
+ net_port_info = self.netapp_info['net_port_info']
+ except KeyError:
+ net_port_info_calls = self.fact_subsets['net_port_info']
+ net_port_info = net_port_info_calls['method'](**net_port_info_calls['kwargs'])
+ interfaces = net_port_info.keys()
+
+ ifgrps = []
+ for ifn in interfaces:
+ if net_port_info[ifn]['port_type'] == 'if_group':
+ ifgrps.append(ifn)
+
+ net_ifgrp_info = dict()
+ for ifgrp in ifgrps:
+ query = dict()
+ query['node'], query['ifgrp-name'] = ifgrp.split(':')
+
+ tmp = self.get_generic_get_iter('net-port-ifgrp-get', field=('node', 'ifgrp-name'),
+ attribute='net-ifgrp-info', query=query)
+ net_ifgrp_info = net_ifgrp_info.copy()
+ net_ifgrp_info.update(tmp)
+ return net_ifgrp_info
+
+ def get_generic_get_iter(self, call, attribute=None, field=None, query=None):
+ '''Method to run a generic get-iter call'''
+
+ generic_call = self.call_api(call, query)
+
+ if call == 'net-port-ifgrp-get':
+ children = 'attributes'
+ else:
+ children = 'attributes-list'
+
+ if generic_call is None:
+ return None
+
+ if field is None:
+ out = []
+ else:
+ out = {}
+
+ attributes_list = generic_call.get_child_by_name(children)
+
+ if attributes_list is None:
+ return None
+
+ for child in attributes_list.get_children():
+ dic = xmltodict.parse(child.to_string(), xml_attribs=False)
+
+ if attribute is not None:
+ dic = dic[attribute]
+
+ if isinstance(field, str):
+ unique_key = _finditem(dic, field)
+ out = out.copy()
+ out.update({unique_key: convert_keys(json.loads(json.dumps(dic)))})
+ elif isinstance(field, tuple):
+ unique_key = ':'.join([_finditem(dic, el) for el in field])
+ out = out.copy()
+ out.update({unique_key: convert_keys(json.loads(json.dumps(dic)))})
+ else:
+ out.append(convert_keys(json.loads(json.dumps(dic))))
+
+ return out
+
+ def get_all(self, gather_subset):
+ '''Method to get all subsets'''
+
+ results = netapp_utils.get_cserver(self.server)
+ cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
+ netapp_utils.ems_log_event("na_ontap_gather_facts", cserver)
+
+ self.netapp_info['ontap_version'] = self.ontapi()
+
+ run_subset = self.get_subset(gather_subset, self.netapp_info['ontap_version'])
+ if 'help' in gather_subset:
+ self.netapp_info['help'] = sorted(run_subset)
+ else:
+ for subset in run_subset:
+ call = self.fact_subsets[subset]
+ self.netapp_info[subset] = call['method'](**call['kwargs'])
+
+ return self.netapp_info
+
+ def get_subset(self, gather_subset, version):
+ '''Method to get a single subset'''
+
+ runable_subsets = set()
+ exclude_subsets = set()
+ usable_subsets = [key for key in self.fact_subsets.keys() if version >= self.fact_subsets[key]['min_version']]
+ if 'help' in gather_subset:
+ return usable_subsets
+ for subset in gather_subset:
+ if subset == 'all':
+ runable_subsets.update(usable_subsets)
+ return runable_subsets
+ if subset.startswith('!'):
+ subset = subset[1:]
+ if subset == 'all':
+ return set()
+ exclude = True
+ else:
+ exclude = False
+
+ if subset not in usable_subsets:
+ if subset not in self.fact_subsets.keys():
+ self.module.fail_json(msg='Bad subset: %s' % subset)
+ self.module.fail_json(msg='Remote system at version %s does not support %s' %
+ (version, subset))
+
+ if exclude:
+ exclude_subsets.add(subset)
+ else:
+ runable_subsets.add(subset)
+
+ if not runable_subsets:
+ runable_subsets.update(usable_subsets)
+
+ runable_subsets.difference_update(exclude_subsets)
+
+ return runable_subsets
+
+
+# https://stackoverflow.com/questions/14962485/finding-a-key-recursively-in-a-dictionary
+def __finditem(obj, key):
+
+ if key in obj:
+ return obj[key]
+ for dummy, val in obj.items():
+ if isinstance(val, dict):
+ item = __finditem(val, key)
+ if item is not None:
+ return item
+ return None
+
+
+def _finditem(obj, key):
+
+ value = __finditem(obj, key)
+ if value is not None:
+ return value
+ raise KeyError(key)
+
+
+def convert_keys(d_param):
+ '''Method to convert hyphen to underscore'''
+
+ out = {}
+ if isinstance(d_param, dict):
+ for key, val in d_param.items():
+ val = convert_keys(val)
+ out[key.replace('-', '_')] = val
+ else:
+ return d_param
+ return out
+
+
+def main():
+ '''Execute action'''
+
+ argument_spec = netapp_utils.na_ontap_host_argument_spec()
+ argument_spec.update(dict(
+ state=dict(default='info', choices=['info']),
+ gather_subset=dict(default=['all'], type='list'),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ if not HAS_XMLTODICT:
+ module.fail_json(msg="xmltodict missing")
+
+ if not HAS_JSON:
+ module.fail_json(msg="json missing")
+
+ state = module.params['state']
+ gather_subset = module.params['gather_subset']
+ if gather_subset is None:
+ gather_subset = ['all']
+ gf_obj = NetAppONTAPGatherFacts(module)
+ gf_all = gf_obj.get_all(gather_subset)
+ result = {'state': state, 'changed': False}
+ module.exit_json(ansible_facts={'ontap_facts': gf_all}, **result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/sf_account_manager.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/sf_account_manager.py
new file mode 100644
index 00000000..58c6962b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/sf_account_manager.py
@@ -0,0 +1,263 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: sf_account_manager
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: This Module has been replaced
+ alternative: please use M(netapp.elementsw.na_elementsw_account)
+short_description: Manage SolidFire accounts
+extends_documentation_fragment:
+- community.general._netapp.solidfire
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+description:
+- Create, destroy, or update accounts on SolidFire
+
+options:
+
+ state:
+ description:
+ - Whether the specified account should exist or not.
+ required: true
+ choices: ['present', 'absent']
+
+ name:
+ description:
+ - Unique username for this account. (May be 1 to 64 characters in length).
+ required: true
+
+ new_name:
+ description:
+ - New name for the user account.
+
+ initiator_secret:
+ description:
+ - CHAP secret to use for the initiator. Should be 12-16 characters long and impenetrable.
+ - The CHAP initiator secrets must be unique and cannot be the same as the target CHAP secret.
+ - If not specified, a random secret is created.
+
+ target_secret:
+ description:
+ - CHAP secret to use for the target (mutual CHAP authentication).
+ - Should be 12-16 characters long and impenetrable.
+ - The CHAP target secrets must be unique and cannot be the same as the initiator CHAP secret.
+ - If not specified, a random secret is created.
+
+ attributes:
+ description: List of Name/Value pairs in JSON object format.
+
+ account_id:
+ description:
+ - The ID of the account to manage or update.
+
+ status:
+ description:
+ - Status of the account.
+
+'''
+
+EXAMPLES = """
+- name: Create Account
+ community.general.sf_account_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: present
+ name: TenantA
+
+- name: Modify Account
+ community.general.sf_account_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: present
+ name: TenantA
+ new_name: TenantA-Renamed
+
+- name: Delete Account
+ community.general.sf_account_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: absent
+ name: TenantA-Renamed
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+
+class SolidFireAccount(object):
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+ account_id=dict(required=False, type='int', default=None),
+
+ new_name=dict(required=False, type='str', default=None),
+ initiator_secret=dict(required=False, type='str'),
+ target_secret=dict(required=False, type='str'),
+ attributes=dict(required=False, type='dict'),
+ status=dict(required=False, type='str'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.name = p['name']
+ self.account_id = p['account_id']
+
+ self.new_name = p['new_name']
+ self.initiator_secret = p['initiator_secret']
+ self.target_secret = p['target_secret']
+ self.attributes = p['attributes']
+ self.status = p['status']
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+
+ def get_account(self):
+ """
+ Return account object if found
+
+ :return: Details about the account. None if not found.
+ :rtype: dict
+ """
+ account_list = self.sfe.list_accounts()
+
+ for account in account_list.accounts:
+ if account.username == self.name:
+ # Update self.account_id:
+ if self.account_id is not None:
+ if account.account_id == self.account_id:
+ return account
+ else:
+ self.account_id = account.account_id
+ return account
+ return None
+
+ def create_account(self):
+ try:
+ self.sfe.add_account(username=self.name,
+ initiator_secret=self.initiator_secret,
+ target_secret=self.target_secret,
+ attributes=self.attributes)
+ except Exception as e:
+ self.module.fail_json(msg='Error creating account %s: %s)' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def delete_account(self):
+ try:
+ self.sfe.remove_account(account_id=self.account_id)
+
+ except Exception as e:
+ self.module.fail_json(msg='Error deleting account %s: %s' % (self.account_id, to_native(e)),
+ exception=traceback.format_exc())
+
+ def update_account(self):
+ try:
+ self.sfe.modify_account(account_id=self.account_id,
+ username=self.new_name,
+ status=self.status,
+ initiator_secret=self.initiator_secret,
+ target_secret=self.target_secret,
+ attributes=self.attributes)
+
+ except Exception as e:
+ self.module.fail_json(msg='Error updating account %s: %s' % (self.account_id, to_native(e)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ changed = False
+ account_exists = False
+ update_account = False
+ account_detail = self.get_account()
+
+ if account_detail:
+ account_exists = True
+
+ if self.state == 'absent':
+ changed = True
+
+ elif self.state == 'present':
+ # Check if we need to update the account
+
+ if account_detail.username is not None and self.new_name is not None and \
+ account_detail.username != self.new_name:
+ update_account = True
+ changed = True
+
+ elif account_detail.status is not None and self.status is not None \
+ and account_detail.status != self.status:
+ update_account = True
+ changed = True
+
+ elif account_detail.initiator_secret is not None and self.initiator_secret is not None \
+ and account_detail.initiator_secret != self.initiator_secret:
+ update_account = True
+ changed = True
+
+ elif account_detail.target_secret is not None and self.target_secret is not None \
+ and account_detail.target_secret != self.target_secret:
+ update_account = True
+ changed = True
+
+ elif account_detail.attributes is not None and self.attributes is not None \
+ and account_detail.attributes != self.attributes:
+ update_account = True
+ changed = True
+ else:
+ if self.state == 'present':
+ changed = True
+
+ if changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ if not account_exists:
+ self.create_account()
+ elif update_account:
+ self.update_account()
+
+ elif self.state == 'absent':
+ self.delete_account()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = SolidFireAccount()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/sf_check_connections.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/sf_check_connections.py
new file mode 100644
index 00000000..cfe24832
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/sf_check_connections.py
@@ -0,0 +1,179 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: sf_check_connections
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: This Module has been replaced
+ alternative: please use M(netapp.elementsw.na_elementsw_check_connections)
+short_description: Check connectivity to MVIP and SVIP.
+extends_documentation_fragment:
+- community.general._netapp.solidfire
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+description:
+- Used to test the management connection to the cluster.
+- The test pings the MVIP and SVIP, and executes a simple API method to verify connectivity.
+
+options:
+
+ skip:
+ description:
+ - Skip checking connection to SVIP or MVIP.
+ choices: ['svip', 'mvip']
+
+ mvip:
+ description:
+ - Optionally, use to test connection of a different MVIP.
+ - This is not needed to test the connection to the target cluster.
+
+ svip:
+ description:
+ - Optionally, use to test connection of a different SVIP.
+ - This is not needed to test the connection to the target cluster.
+
+'''
+
+
+EXAMPLES = """
+ - name: Check connections to MVIP and SVIP
+ community.general.sf_check_connections:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+"""
+
+RETURN = """
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+
+class SolidFireConnection(object):
+
+ def __init__(self):
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ skip=dict(required=False, type='str', default=None, choices=['mvip', 'svip']),
+ mvip=dict(required=False, type='str', default=None),
+ svip=dict(required=False, type='str', default=None)
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.skip = p['skip']
+ self.mvip = p['mvip']
+ self.svip = p['svip']
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.sfe = netapp_utils.ElementFactory.create(p['hostname'], p['username'], p['password'], port=442)
+
+ def check_mvip_connection(self):
+ """
+ Check connection to MVIP
+
+ :return: true if connection was successful, false otherwise.
+ :rtype: bool
+ """
+ try:
+ test = self.sfe.test_connect_mvip(mvip=self.mvip)
+ result = test.details.connected
+ # Todo - Log details about the test
+ return result
+
+ except Exception as e:
+ self.module.fail_json(msg='Error checking connection to MVIP: %s' % to_native(e), exception=traceback.format_exc())
+ return False
+
+ def check_svip_connection(self):
+ """
+ Check connection to SVIP
+
+ :return: true if connection was successful, false otherwise.
+ :rtype: bool
+ """
+ try:
+ test = self.sfe.test_connect_svip(svip=self.svip)
+ result = test.details.connected
+ # Todo - Log details about the test
+ return result
+
+ except Exception as e:
+ self.module.fail_json(msg='Error checking connection to SVIP: %s' % to_native(e), exception=traceback.format_exc())
+ return False
+
+ def check(self):
+
+ failed = True
+ msg = ''
+
+ if self.skip is None:
+ mvip_connection_established = self.check_mvip_connection()
+ svip_connection_established = self.check_svip_connection()
+
+ # Set failed and msg
+ if not mvip_connection_established:
+ failed = True
+ msg = 'Connection to MVIP failed.'
+ elif not svip_connection_established:
+ failed = True
+ msg = 'Connection to SVIP failed.'
+ else:
+ failed = False
+
+ elif self.skip == 'mvip':
+ svip_connection_established = self.check_svip_connection()
+
+ # Set failed and msg
+ if not svip_connection_established:
+ failed = True
+ msg = 'Connection to SVIP failed.'
+ else:
+ failed = False
+
+ elif self.skip == 'svip':
+ mvip_connection_established = self.check_mvip_connection()
+
+ # Set failed and msg
+ if not mvip_connection_established:
+ failed = True
+ msg = 'Connection to MVIP failed.'
+ else:
+ failed = False
+
+ if failed:
+ self.module.fail_json(msg=msg)
+ else:
+ self.module.exit_json()
+
+
+def main():
+ v = SolidFireConnection()
+ v.check()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/sf_snapshot_schedule_manager.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/sf_snapshot_schedule_manager.py
new file mode 100644
index 00000000..296e50bd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/sf_snapshot_schedule_manager.py
@@ -0,0 +1,384 @@
+#!/usr/bin/python
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: sf_snapshot_schedule_manager
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: This Module has been replaced
+ alternative: please use M(netapp.elementsw.na_elementsw_snapshot_schedule)
+short_description: Manage SolidFire snapshot schedules
+extends_documentation_fragment:
+- community.general._netapp.solidfire
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+description:
+- Create, destroy, or update accounts on SolidFire
+
+options:
+
+ state:
+ description:
+ - Whether the specified schedule should exist or not.
+ required: true
+ choices: ['present', 'absent']
+
+ paused:
+ description:
+ - Pause / Resume a schedule.
+ required: false
+
+ recurring:
+ description:
+ - Should the schedule recur?
+ required: false
+
+ time_interval_days:
+ description: Time interval in days.
+ required: false
+ default: 1
+
+ time_interval_hours:
+ description: Time interval in hours.
+ required: false
+ default: 0
+
+ time_interval_minutes:
+ description: Time interval in minutes.
+ required: false
+ default: 0
+
+ name:
+ description:
+ - Name for the snapshot schedule.
+ required: true
+
+ snapshot_name:
+ description:
+ - Name for the created snapshots.
+ required: false
+
+ volumes:
+ description:
+ - Volume IDs that you want to set the snapshot schedule for.
+ - At least 1 volume ID is required for creating a new schedule.
+ - required when C(state=present)
+ required: false
+
+ retention:
+ description:
+ - Retention period for the snapshot.
+ - Format is 'HH:mm:ss'.
+ required: false
+
+ schedule_id:
+ description:
+ - The schedule ID for the schedule that you want to update or delete.
+ required: false
+
+ starting_date:
+ description:
+ - Starting date for the schedule.
+ - Required when C(state=present).
+ - Please use two '-' in the above format, or you may see an error- TypeError, is not JSON serializable description.
+ - "Format: C(2016--12--01T00:00:00Z)"
+ required: false
+'''
+
+EXAMPLES = """
+ - name: Create Snapshot schedule
+ community.general.sf_snapshot_schedule_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: present
+ name: Schedule_A
+ time_interval_days: 1
+ starting_date: 2016--12--01T00:00:00Z
+ volumes: 7
+
+ - name: Update Snapshot schedule
+ community.general.sf_snapshot_schedule_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: present
+ schedule_id: 6
+ recurring: True
+ snapshot_name: AnsibleSnapshots
+
+ - name: Delete Snapshot schedule
+ community.general.sf_snapshot_schedule_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: absent
+ schedule_id: 6
+"""
+
+RETURN = """
+
+schedule_id:
+ description: Schedule ID of the newly created schedule
+ returned: success
+ type: str
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+
+class SolidFireSnapShotSchedule(object):
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+
+ time_interval_days=dict(required=False, type='int', default=1),
+ time_interval_hours=dict(required=False, type='int', default=0),
+ time_interval_minutes=dict(required=False, type='int', default=0),
+
+ paused=dict(required=False, type='bool'),
+ recurring=dict(required=False, type='bool'),
+
+ starting_date=dict(type='str'),
+
+ snapshot_name=dict(required=False, type='str'),
+ volumes=dict(required=False, type='list'),
+ retention=dict(required=False, type='str'),
+
+ schedule_id=dict(type='int'),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['starting_date', 'volumes'])
+ ],
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.name = p['name']
+
+ # self.interval = p['interval']
+
+ self.time_interval_days = p['time_interval_days']
+ self.time_interval_hours = p['time_interval_hours']
+ self.time_interval_minutes = p['time_interval_minutes']
+
+ self.paused = p['paused']
+ self.recurring = p['recurring']
+
+ self.starting_date = p['starting_date']
+ if self.starting_date is not None:
+ self.starting_date = self.starting_date.replace("--", "-")
+
+ self.snapshot_name = p['snapshot_name']
+ self.volumes = p['volumes']
+ self.retention = p['retention']
+
+ self.schedule_id = p['schedule_id']
+
+ self.create_schedule_result = None
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+
+ def get_schedule(self):
+ schedule_list = self.sfe.list_schedules()
+ for schedule in schedule_list.schedules:
+ if schedule.name == self.name:
+ # Update self.schedule_id:
+ if self.schedule_id is not None:
+ if schedule.schedule_id == self.schedule_id:
+ return schedule
+ else:
+ self.schedule_id = schedule.schedule_id
+ return schedule
+
+ return None
+
+ def create_schedule(self):
+
+ try:
+ sched = netapp_utils.Schedule()
+ # if self.interval == 'time_interval':
+ sched.frequency = netapp_utils.TimeIntervalFrequency(days=self.time_interval_days,
+ hours=self.time_interval_hours,
+ minutes=self.time_interval_minutes)
+
+ # Create schedule
+ sched.name = self.name
+ sched.schedule_info = netapp_utils.ScheduleInfo(
+ volume_ids=self.volumes,
+ snapshot_name=self.snapshot_name,
+ retention=self.retention
+ )
+ sched.paused = self.paused
+ sched.recurring = self.recurring
+ sched.starting_date = self.starting_date
+
+ self.create_schedule_result = self.sfe.create_schedule(schedule=sched)
+
+ except Exception as e:
+ self.module.fail_json(msg='Error creating schedule %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def delete_schedule(self):
+
+ try:
+ get_schedule_result = self.sfe.get_schedule(schedule_id=self.schedule_id)
+ sched = get_schedule_result.schedule
+ sched.to_be_deleted = True
+ self.sfe.modify_schedule(schedule=sched)
+
+ except Exception as e:
+ self.module.fail_json(msg='Error deleting schedule %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def update_schedule(self):
+
+ try:
+ get_schedule_result = self.sfe.get_schedule(schedule_id=self.schedule_id)
+ sched = get_schedule_result.schedule
+
+ # Update schedule properties
+
+ # if self.interval == 'time_interval':
+ temp_frequency = netapp_utils.TimeIntervalFrequency(days=self.time_interval_days,
+ hours=self.time_interval_hours,
+ minutes=self.time_interval_minutes)
+
+ if sched.frequency.days != temp_frequency.days or \
+ sched.frequency.hours != temp_frequency.hours \
+ or sched.frequency.minutes != temp_frequency.minutes:
+ sched.frequency = temp_frequency
+
+ sched.name = self.name
+ if self.volumes is not None:
+ sched.schedule_info.volume_ids = self.volumes
+ if self.retention is not None:
+ sched.schedule_info.retention = self.retention
+ if self.snapshot_name is not None:
+ sched.schedule_info.snapshot_name = self.snapshot_name
+ if self.paused is not None:
+ sched.paused = self.paused
+ if self.recurring is not None:
+ sched.recurring = self.recurring
+ if self.starting_date is not None:
+ sched.starting_date = self.starting_date
+
+ # Make API call
+ self.sfe.modify_schedule(schedule=sched)
+
+ except Exception as e:
+ self.module.fail_json(msg='Error updating schedule %s: %s' % (self.name, to_native(e)),
+ exception=traceback.format_exc())
+
+ def apply(self):
+ changed = False
+ schedule_exists = False
+ update_schedule = False
+ schedule_detail = self.get_schedule()
+
+ if schedule_detail:
+ schedule_exists = True
+
+ if self.state == 'absent':
+ changed = True
+
+ elif self.state == 'present':
+ # Check if we need to update the account
+
+ if self.retention is not None and schedule_detail.schedule_info.retention != self.retention:
+ update_schedule = True
+ changed = True
+
+ elif schedule_detail.name != self.name:
+ update_schedule = True
+ changed = True
+
+ elif self.snapshot_name is not None and schedule_detail.schedule_info.snapshot_name != self.snapshot_name:
+ update_schedule = True
+ changed = True
+
+ elif self.volumes is not None and schedule_detail.schedule_info.volume_ids != self.volumes:
+ update_schedule = True
+ changed = True
+
+ elif self.paused is not None and schedule_detail.paused != self.paused:
+ update_schedule = True
+ changed = True
+
+ elif self.recurring is not None and schedule_detail.recurring != self.recurring:
+ update_schedule = True
+ changed = True
+
+ elif self.starting_date is not None and schedule_detail.starting_date != self.starting_date:
+ update_schedule = True
+ changed = True
+
+ elif self.time_interval_minutes is not None or self.time_interval_hours is not None \
+ or self.time_interval_days is not None:
+
+ temp_frequency = netapp_utils.TimeIntervalFrequency(days=self.time_interval_days,
+ hours=self.time_interval_hours,
+ minutes=self.time_interval_minutes)
+
+ if schedule_detail.frequency.days != temp_frequency.days or \
+ schedule_detail.frequency.hours != temp_frequency.hours \
+ or schedule_detail.frequency.minutes != temp_frequency.minutes:
+ update_schedule = True
+ changed = True
+
+ else:
+ if self.state == 'present':
+ changed = True
+
+ if changed:
+ if self.module.check_mode:
+ # Skip changes
+ pass
+ else:
+ if self.state == 'present':
+ if not schedule_exists:
+ self.create_schedule()
+ elif update_schedule:
+ self.update_schedule()
+
+ elif self.state == 'absent':
+ self.delete_schedule()
+
+ if self.create_schedule_result is not None:
+ self.module.exit_json(changed=changed, schedule_id=self.create_schedule_result.schedule_id)
+ else:
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = SolidFireSnapShotSchedule()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/sf_volume_access_group_manager.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/sf_volume_access_group_manager.py
new file mode 100644
index 00000000..78e3097d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/sf_volume_access_group_manager.py
@@ -0,0 +1,244 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: sf_volume_access_group_manager
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: This Module has been replaced
+ alternative: please use M(netapp.elementsw.na_elementsw_access_group)
+short_description: Manage SolidFire Volume Access Groups
+extends_documentation_fragment:
+- community.general._netapp.solidfire
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+description:
+- Create, destroy, or update volume access groups on SolidFire
+
+options:
+
+ state:
+ description:
+ - Whether the specified volume access group should exist or not.
+ required: true
+ choices: ['present', 'absent']
+
+ name:
+ description:
+ - Name of the volume access group. It is not required to be unique, but recommended.
+ required: true
+
+ initiators:
+ description:
+ - List of initiators to include in the volume access group. If unspecified, the access group will start out without configured initiators.
+
+ volumes:
+ description:
+ - List of volumes to initially include in the volume access group. If unspecified, the access group will start without any volumes.
+
+ virtual_network_id:
+ description:
+ - The ID of the SolidFire Virtual Network ID to associate the volume access group with.
+
+ virtual_network_tags:
+ description:
+ - The ID of the VLAN Virtual Network Tag to associate the volume access group with.
+
+ attributes:
+ description: List of Name/Value pairs in JSON object format.
+
+ volume_access_group_id:
+ description:
+ - The ID of the volume access group to modify or delete.
+
+'''
+
+EXAMPLES = """
+ - name: Create Volume Access Group
+ community.general.sf_volume_access_group_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: present
+ name: AnsibleVolumeAccessGroup
+ volumes: [7,8]
+
+ - name: Modify Volume Access Group
+ community.general.sf_volume_access_group_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: present
+ volume_access_group_id: 1
+ name: AnsibleVolumeAccessGroup-Renamed
+ attributes: {"volumes": [1,2,3], "virtual_network_id": 12345}
+
+ - name: Delete Volume Access Group
+ community.general.sf_volume_access_group_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: absent
+ volume_access_group_id: 1
+"""
+
+RETURN = """
+
+
+"""
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+
+class SolidFireVolumeAccessGroup(object):
+
+ def __init__(self):
+
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+ volume_access_group_id=dict(required=False, type='int', default=None),
+
+ initiators=dict(required=False, type='list', default=None),
+ volumes=dict(required=False, type='list', default=None),
+ virtual_network_id=dict(required=False, type='list', default=None),
+ virtual_network_tags=dict(required=False, type='list', default=None),
+ attributes=dict(required=False, type='dict', default=None),
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.name = p['name']
+ self.volume_access_group_id = p['volume_access_group_id']
+
+ self.initiators = p['initiators']
+ self.volumes = p['volumes']
+ self.virtual_network_id = p['virtual_network_id']
+ self.virtual_network_tags = p['virtual_network_tags']
+ self.attributes = p['attributes']
+
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+
+ def get_volume_access_group(self):
+ access_groups_list = self.sfe.list_volume_access_groups()
+
+ for group in access_groups_list.volume_access_groups:
+ if group.name == self.name:
+ # Update self.volume_access_group_id:
+ if self.volume_access_group_id is not None:
+ if group.volume_access_group_id == self.volume_access_group_id:
+ return group
+ else:
+ self.volume_access_group_id = group.volume_access_group_id
+ return group
+ return None
+
+ def create_volume_access_group(self):
+ try:
+ self.sfe.create_volume_access_group(name=self.name,
+ initiators=self.initiators,
+ volumes=self.volumes,
+ virtual_network_id=self.virtual_network_id,
+ virtual_network_tags=self.virtual_network_tags,
+ attributes=self.attributes)
+ except Exception as e:
+ self.module.fail_json(msg="Error creating volume access group %s: %s" %
+ (self.name, to_native(e)), exception=traceback.format_exc())
+
+ def delete_volume_access_group(self):
+ try:
+ self.sfe.delete_volume_access_group(volume_access_group_id=self.volume_access_group_id)
+
+ except Exception as e:
+ self.module.fail_json(msg="Error deleting volume access group %s: %s" %
+ (self.volume_access_group_id, to_native(e)),
+ exception=traceback.format_exc())
+
+ def update_volume_access_group(self):
+ try:
+ self.sfe.modify_volume_access_group(volume_access_group_id=self.volume_access_group_id,
+ virtual_network_id=self.virtual_network_id,
+ virtual_network_tags=self.virtual_network_tags,
+ name=self.name,
+ initiators=self.initiators,
+ volumes=self.volumes,
+ attributes=self.attributes)
+ except Exception as e:
+ self.module.fail_json(msg="Error updating volume access group %s: %s" %
+ (self.volume_access_group_id, to_native(e)), exception=traceback.format_exc())
+
+ def apply(self):
+ changed = False
+ group_exists = False
+ update_group = False
+ group_detail = self.get_volume_access_group()
+
+ if group_detail:
+ group_exists = True
+
+ if self.state == 'absent':
+ changed = True
+
+ elif self.state == 'present':
+ # Check if we need to update the group
+ if self.volumes is not None and group_detail.volumes != self.volumes:
+ update_group = True
+ changed = True
+ elif self.initiators is not None and group_detail.initiators != self.initiators:
+ update_group = True
+ changed = True
+ elif self.virtual_network_id is not None or self.virtual_network_tags is not None or \
+ self.attributes is not None:
+ update_group = True
+ changed = True
+
+ else:
+ if self.state == 'present':
+ changed = True
+
+ if changed:
+ if self.module.check_mode:
+ pass
+ else:
+ if self.state == 'present':
+ if not group_exists:
+ self.create_volume_access_group()
+ elif update_group:
+ self.update_volume_access_group()
+
+ elif self.state == 'absent':
+ self.delete_volume_access_group()
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ v = SolidFireVolumeAccessGroup()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/sf_volume_manager.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/sf_volume_manager.py
new file mode 100644
index 00000000..9d5378a2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/netapp/sf_volume_manager.py
@@ -0,0 +1,315 @@
+#!/usr/bin/python
+
+# (c) 2017, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: sf_volume_manager
+deprecated:
+ removed_in: 2.0.0 # was Ansible 2.11
+ why: This Module has been replaced
+ alternative: please use M(netapp.elementsw.na_elementsw_volume)
+short_description: Manage SolidFire volumes
+extends_documentation_fragment:
+- community.general._netapp.solidfire
+
+author: Sumit Kumar (@timuster) <sumit4@netapp.com>
+description:
+- Create, destroy, or update volumes on SolidFire
+
+options:
+
+ state:
+ description:
+ - Whether the specified volume should exist or not.
+ required: true
+ choices: ['present', 'absent']
+
+ name:
+ description:
+ - The name of the volume to manage.
+ required: true
+
+ account_id:
+ description:
+ - Account ID for the owner of this volume.
+ required: true
+
+ 512emulation:
+ description:
+ - Should the volume provide 512-byte sector emulation?
+ - Required when C(state=present)
+
+ qos:
+ description: Initial quality of service settings for this volume. Configure as dict in playbooks.
+
+ attributes:
+ description: A YAML dictionary of attributes that you would like to apply on this volume.
+
+ volume_id:
+ description:
+ - The ID of the volume to manage or update.
+ - In order to create multiple volumes with the same name, but different volume_ids, please declare the I(volume_id)
+ parameter with an arbitrary value. However, the specified volume_id will not be assigned to the newly created
+ volume (since it's an auto-generated property).
+
+ size:
+ description:
+ - The size of the volume in (size_unit).
+ - Required when C(state = present).
+
+ size_unit:
+ description:
+ - The unit used to interpret the size parameter.
+ choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
+ default: 'gb'
+
+ access:
+ description:
+ - "Access allowed for the volume."
+ - "readOnly: Only read operations are allowed."
+ - "readWrite: Reads and writes are allowed."
+ - "locked: No reads or writes are allowed."
+ - "replicationTarget: Identify a volume as the target volume for a paired set of volumes. If the volume is not paired, the access status is locked."
+ - "If unspecified, the access settings of the clone will be the same as the source."
+ choices: ['readOnly', 'readWrite', 'locked', 'replicationTarget']
+
+'''
+
+EXAMPLES = """
+ - name: Create Volume
+ community.general.sf_volume_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: present
+ name: AnsibleVol
+ qos: {minIOPS: 1000, maxIOPS: 20000, burstIOPS: 50000}
+ account_id: 3
+ enable512e: False
+ size: 1
+ size_unit: gb
+
+ - name: Update Volume
+ community.general.sf_volume_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: present
+ name: AnsibleVol
+ account_id: 3
+ access: readWrite
+
+ - name: Delete Volume
+ community.general.sf_volume_manager:
+ hostname: "{{ solidfire_hostname }}"
+ username: "{{ solidfire_username }}"
+ password: "{{ solidfire_password }}"
+ state: absent
+ name: AnsibleVol
+ account_id: 2
+"""
+
+RETURN = """
+
+msg:
+ description: Success message
+ returned: success
+ type: str
+
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+import ansible_collections.community.general.plugins.module_utils._netapp as netapp_utils
+
+HAS_SF_SDK = netapp_utils.has_sf_sdk()
+
+
+class SolidFireVolume(object):
+
+ def __init__(self):
+
+ self._size_unit_map = netapp_utils.SF_BYTE_MAP
+
+ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
+ self.argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+ account_id=dict(required=True, type='int'),
+
+ enable512e=dict(type='bool', aliases=['512emulation']),
+ qos=dict(required=False, type='dict', default=None),
+ attributes=dict(required=False, type='dict', default=None),
+
+ volume_id=dict(type='int', default=None),
+ size=dict(type='int'),
+ size_unit=dict(default='gb',
+ choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb',
+ 'pb', 'eb', 'zb', 'yb'], type='str'),
+
+ access=dict(required=False, type='str', default=None, choices=['readOnly', 'readWrite',
+ 'locked', 'replicationTarget']),
+
+ ))
+
+ self.module = AnsibleModule(
+ argument_spec=self.argument_spec,
+ required_if=[
+ ('state', 'present', ['size', 'enable512e'])
+ ],
+ supports_check_mode=True
+ )
+
+ p = self.module.params
+
+ # set up state variables
+ self.state = p['state']
+ self.name = p['name']
+ self.account_id = p['account_id']
+ self.enable512e = p['enable512e']
+ self.qos = p['qos']
+ self.attributes = p['attributes']
+
+ self.volume_id = p['volume_id']
+ self.size_unit = p['size_unit']
+ if p['size'] is not None:
+ self.size = p['size'] * self._size_unit_map[self.size_unit]
+ else:
+ self.size = None
+ self.access = p['access']
+ if HAS_SF_SDK is False:
+ self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
+ else:
+ self.sfe = netapp_utils.create_sf_connection(module=self.module)
+
+ def get_volume(self):
+ """
+ Return volume object if found
+
+ :return: Details about the volume. None if not found.
+ :rtype: dict
+ """
+ volume_list = self.sfe.list_volumes_for_account(account_id=self.account_id)
+ for volume in volume_list.volumes:
+ if volume.name == self.name:
+ # Update self.volume_id
+ if self.volume_id is not None:
+ if volume.volume_id == self.volume_id and str(volume.delete_time) == "":
+ return volume
+ else:
+ if str(volume.delete_time) == "":
+ self.volume_id = volume.volume_id
+ return volume
+ return None
+
+ def create_volume(self):
+ try:
+ self.sfe.create_volume(name=self.name,
+ account_id=self.account_id,
+ total_size=self.size,
+ enable512e=self.enable512e,
+ qos=self.qos,
+ attributes=self.attributes)
+
+ except Exception as err:
+ self.module.fail_json(msg="Error provisioning volume %s of size %s" % (self.name, self.size),
+ exception=to_native(err))
+
+ def delete_volume(self):
+ try:
+ self.sfe.delete_volume(volume_id=self.volume_id)
+
+ except Exception as err:
+ self.module.fail_json(msg="Error deleting volume %s" % self.volume_id,
+ exception=to_native(err))
+
+ def update_volume(self):
+ try:
+ self.sfe.modify_volume(self.volume_id,
+ account_id=self.account_id,
+ access=self.access,
+ qos=self.qos,
+ total_size=self.size,
+ attributes=self.attributes)
+
+ except Exception as err:
+ self.module.fail_json(msg="Error updating volume %s" % self.name,
+ exception=to_native(err))
+
+ def apply(self):
+ changed = False
+ volume_exists = False
+ update_volume = False
+ volume_detail = self.get_volume()
+
+ if volume_detail:
+ volume_exists = True
+
+ if self.state == 'absent':
+ # Checking for state change(s) here, and applying it later in the code allows us to support
+ # check_mode
+ changed = True
+
+ elif self.state == 'present':
+ if volume_detail.access is not None and self.access is not None and volume_detail.access != self.access:
+ update_volume = True
+ changed = True
+
+ elif volume_detail.account_id is not None and self.account_id is not None \
+ and volume_detail.account_id != self.account_id:
+ update_volume = True
+ changed = True
+
+ elif volume_detail.qos is not None and self.qos is not None and volume_detail.qos != self.qos:
+ update_volume = True
+ changed = True
+
+ elif volume_detail.total_size is not None and volume_detail.total_size != self.size:
+ size_difference = abs(float(volume_detail.total_size - self.size))
+ # Change size only if difference is bigger than 0.001
+ if size_difference / self.size > 0.001:
+ update_volume = True
+ changed = True
+
+ elif volume_detail.attributes is not None and self.attributes is not None and \
+ volume_detail.attributes != self.attributes:
+ update_volume = True
+ changed = True
+ else:
+ if self.state == 'present':
+ changed = True
+
+ result_message = ""
+
+ if changed:
+ if self.module.check_mode:
+ result_message = "Check mode, skipping changes"
+ else:
+ if self.state == 'present':
+ if not volume_exists:
+ self.create_volume()
+ result_message = "Volume created"
+ elif update_volume:
+ self.update_volume()
+ result_message = "Volume updated"
+
+ elif self.state == 'absent':
+ self.delete_volume()
+ result_message = "Volume deleted"
+
+ self.module.exit_json(changed=changed, msg=result_message)
+
+
+def main():
+ v = SolidFireVolume()
+ v.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/purestorage/purefa_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/purestorage/purefa_facts.py
new file mode 100644
index 00000000..5e8b5932
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/purestorage/purefa_facts.py
@@ -0,0 +1,858 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: purefa_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favor of C(_info) module.
+ alternative: Use M(purestorage.flasharray.purefa_info) instead.
+short_description: Collect facts from Pure Storage FlashArray
+description:
+ - Collect facts information from a Pure Storage Flasharray running the
+ Purity//FA operating system. By default, the module will collect basic
+ fact information including hosts, host groups, protection
+ groups and volume counts. Additional fact information can be collected
+ based on the configured set of arguments.
+author:
+ - Pure Storage ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ gather_subset:
+ description:
+ - When supplied, this argument will define the facts to be collected.
+ Possible values for this include all, minimum, config, performance,
+ capacity, network, subnet, interfaces, hgroups, pgroups, hosts,
+ admins, volumes, snapshots, pods, vgroups, offload, apps and arrays.
+ type: list
+ required: false
+ default: minimum
+extends_documentation_fragment:
+- community.general.purestorage.fa
+
+'''
+
+EXAMPLES = r'''
+- name: Collect default set of facts
+ community.general.purefa_facts:
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Collect configuration and capacity facts
+ community.general.purefa_facts:
+ gather_subset:
+ - config
+ - capacity
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+
+- name: Collect all facts
+ community.general.purefa_facts:
+ gather_subset:
+ - all
+ fa_url: 10.10.10.2
+ api_token: e31060a7-21fc-e277-6240-25983c6c4592
+'''
+
+RETURN = r'''
+ansible_facts:
+ description: Returns the facts collected from the FlashArray
+ returned: always
+ type: complex
+ sample: {
+ "capacity": {},
+ "config": {
+ "directory_service": {
+ "array_admin_group": null,
+ "base_dn": null,
+ "bind_password": null,
+ "bind_user": null,
+ "check_peer": false,
+ "enabled": false,
+ "group_base": null,
+ "readonly_group": null,
+ "storage_admin_group": null,
+ "uri": []
+ },
+ "dns": {
+ "domain": "domain.com",
+ "nameservers": [
+ "8.8.8.8",
+ "8.8.4.4"
+ ]
+ },
+ "ntp": [
+ "0.ntp.pool.org",
+ "1.ntp.pool.org",
+ "2.ntp.pool.org",
+ "3.ntp.pool.org"
+ ],
+ "smtp": [
+ {
+ "enabled": true,
+ "name": "alerts@acme.com"
+ },
+ {
+ "enabled": true,
+ "name": "user@acme.com"
+ }
+ ],
+ "snmp": [
+ {
+ "auth_passphrase": null,
+ "auth_protocol": null,
+ "community": null,
+ "host": "localhost",
+ "name": "localhost",
+ "privacy_passphrase": null,
+ "privacy_protocol": null,
+ "user": null,
+ "version": "v2c"
+ }
+ ],
+ "ssl_certs": {
+ "country": null,
+ "email": null,
+ "issued_by": "",
+ "issued_to": "",
+ "key_size": 2048,
+ "locality": null,
+ "organization": "Acme Storage, Inc.",
+ "organizational_unit": "Acme Storage, Inc.",
+ "state": null,
+ "status": "self-signed",
+ "valid_from": "2017-08-11T23:09:06Z",
+ "valid_to": "2027-08-09T23:09:06Z"
+ },
+ "syslog": []
+ },
+ "default": {
+ "array_name": "flasharray1",
+ "connected_arrays": 1,
+ "hostgroups": 0,
+ "hosts": 10,
+ "pods": 3,
+ "protection_groups": 1,
+ "purity_version": "5.0.4",
+ "snapshots": 1,
+ "volume_groups": 2
+ },
+ "hgroups": {},
+ "hosts": {
+ "host1": {
+ "hgroup": null,
+ "iqn": [
+ "iqn.1994-05.com.redhat:2f6f5715a533"
+ ],
+ "wwn": []
+ },
+ "host2": {
+ "hgroup": null,
+ "iqn": [
+ "iqn.1994-05.com.redhat:d17fb13fe0b"
+ ],
+ "wwn": []
+ },
+ "host3": {
+ "hgroup": null,
+ "iqn": [
+ "iqn.1994-05.com.redhat:97b1351bfb2"
+ ],
+ "wwn": []
+ },
+ "host4": {
+ "hgroup": null,
+ "iqn": [
+ "iqn.1994-05.com.redhat:dd84e9a7b2cb"
+ ],
+ "wwn": [
+ "10000000C96C48D1",
+ "10000000C96C48D2"
+ ]
+ }
+ },
+ "interfaces": {
+ "CT0.ETH4": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682",
+ "CT0.ETH5": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682",
+ "CT1.ETH4": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682",
+ "CT1.ETH5": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682"
+ },
+ "network": {
+ "ct0.eth0": {
+ "address": "10.10.10.10",
+ "gateway": "10.10.10.1",
+ "hwaddr": "ec:f4:bb:c8:8a:04",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "management"
+ ],
+ "speed": 1000000000
+ },
+ "ct0.eth2": {
+ "address": "10.10.10.11",
+ "gateway": null,
+ "hwaddr": "ec:f4:bb:c8:8a:00",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "replication"
+ ],
+ "speed": 10000000000
+ },
+ "ct0.eth3": {
+ "address": "10.10.10.12",
+ "gateway": null,
+ "hwaddr": "ec:f4:bb:c8:8a:02",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "replication"
+ ],
+ "speed": 10000000000
+ },
+ "ct0.eth4": {
+ "address": "10.10.10.13",
+ "gateway": null,
+ "hwaddr": "90:e2:ba:83:79:0c",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "iscsi"
+ ],
+ "speed": 10000000000
+ },
+ "ct0.eth5": {
+ "address": "10.10.10.14",
+ "gateway": null,
+ "hwaddr": "90:e2:ba:83:79:0d",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "iscsi"
+ ],
+ "speed": 10000000000
+ },
+ "vir0": {
+ "address": "10.10.10.20",
+ "gateway": "10.10.10.1",
+ "hwaddr": "fe:ba:e9:e7:6b:0f",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "management"
+ ],
+ "speed": 1000000000
+ }
+ },
+ "offload": {
+ "nfstarget": {
+ "address": "10.0.2.53",
+ "mount_options": null,
+ "mount_point": "/offload",
+ "protocol": "nfs",
+ "status": "scanning"
+ }
+ },
+ "performance": {
+ "input_per_sec": 8191,
+ "output_per_sec": 0,
+ "queue_depth": 1,
+ "reads_per_sec": 0,
+ "san_usec_per_write_op": 15,
+ "usec_per_read_op": 0,
+ "usec_per_write_op": 642,
+ "writes_per_sec": 2
+ },
+ "pgroups": {
+ "consisgroup-07b6b983-986e-46f5-bdc3-deaa3dbb299e-cinder": {
+ "hgroups": null,
+ "hosts": null,
+ "source": "host1",
+ "targets": null,
+ "volumes": [
+ "volume-1"
+ ]
+ }
+ },
+ "pods": {
+ "srm-pod": {
+ "arrays": [
+ {
+ "array_id": "52595f7e-b460-4b46-8851-a5defd2ac192",
+ "mediator_status": "online",
+ "name": "sn1-405-c09-37",
+ "status": "online"
+ },
+ {
+ "array_id": "a2c32301-f8a0-4382-949b-e69b552ce8ca",
+ "mediator_status": "online",
+ "name": "sn1-420-c11-31",
+ "status": "online"
+ }
+ ],
+ "source": null
+ }
+ },
+ "snapshots": {
+ "consisgroup.cgsnapshot": {
+ "created": "2018-03-28T09:34:02Z",
+ "size": 13958643712,
+ "source": "volume-1"
+ }
+ },
+ "subnet": {},
+ "vgroups": {
+ "vvol--vSphere-HA-0ffc7dd1-vg": {
+ "volumes": [
+ "vvol--vSphere-HA-0ffc7dd1-vg/Config-aad5d7c6"
+ ]
+ }
+ },
+ "volumes": {
+ "ansible_data": {
+ "bandwidth": null,
+ "hosts": [
+ [
+ "host1",
+ 1
+ ]
+ ],
+ "serial": "43BE47C12334399B000114A6",
+ "size": 1099511627776,
+ "source": null
+ }
+ }
+ }
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.pure import get_system, purefa_argument_spec
+
+
+ADMIN_API_VERSION = '1.14'
+S3_REQUIRED_API_VERSION = '1.16'
+LATENCY_REQUIRED_API_VERSION = '1.16'
+AC_REQUIRED_API_VERSION = '1.14'
+CAP_REQUIRED_API_VERSION = '1.6'
+SAN_REQUIRED_API_VERSION = '1.10'
+NVME_API_VERSION = '1.16'
+PREFERRED_API_VERSION = '1.15'
+CONN_STATUS_API_VERSION = '1.17'
+
+
+def generate_default_dict(array):
+ default_facts = {}
+ defaults = array.get()
+ api_version = array._list_available_rest_versions()
+ if AC_REQUIRED_API_VERSION in api_version:
+ default_facts['volume_groups'] = len(array.list_vgroups())
+ default_facts['connected_arrays'] = len(array.list_array_connections())
+ default_facts['pods'] = len(array.list_pods())
+ default_facts['connection_key'] = array.get(connection_key=True)['connection_key']
+ hosts = array.list_hosts()
+ admins = array.list_admins()
+ snaps = array.list_volumes(snap=True, pending=True)
+ pgroups = array.list_pgroups(pending=True)
+ hgroups = array.list_hgroups()
+ # Old FA arrays only report model from the primary controller
+ ct0_model = array.get_hardware('CT0')['model']
+ if ct0_model:
+ model = ct0_model
+ else:
+ ct1_model = array.get_hardware('CT1')['model']
+ model = ct1_model
+ default_facts['array_model'] = model
+ default_facts['array_name'] = defaults['array_name']
+ default_facts['purity_version'] = defaults['version']
+ default_facts['hosts'] = len(hosts)
+ default_facts['snapshots'] = len(snaps)
+ default_facts['protection_groups'] = len(pgroups)
+ default_facts['hostgroups'] = len(hgroups)
+ default_facts['admins'] = len(admins)
+ return default_facts
+
+
+def generate_perf_dict(array):
+ perf_facts = {}
+ api_version = array._list_available_rest_versions()
+ if LATENCY_REQUIRED_API_VERSION in api_version:
+ latency_info = array.get(action='monitor', latency=True)[0]
+ perf_info = array.get(action='monitor')[0]
+ # IOPS
+ perf_facts['writes_per_sec'] = perf_info['writes_per_sec']
+ perf_facts['reads_per_sec'] = perf_info['reads_per_sec']
+
+ # Bandwidth
+ perf_facts['input_per_sec'] = perf_info['input_per_sec']
+ perf_facts['output_per_sec'] = perf_info['output_per_sec']
+
+ # Latency
+ if LATENCY_REQUIRED_API_VERSION in api_version:
+ perf_facts['san_usec_per_read_op'] = latency_info['san_usec_per_read_op']
+ perf_facts['san_usec_per_write_op'] = latency_info['san_usec_per_write_op']
+ perf_facts['queue_usec_per_read_op'] = latency_info['queue_usec_per_read_op']
+ perf_facts['queue_usec_per_write_op'] = latency_info['queue_usec_per_write_op']
+ perf_facts['qos_rate_limit_usec_per_read_op'] = latency_info['qos_rate_limit_usec_per_read_op']
+ perf_facts['qos_rate_limit_usec_per_write_op'] = latency_info['qos_rate_limit_usec_per_write_op']
+ perf_facts['local_queue_usec_per_op'] = perf_info['local_queue_usec_per_op']
+ perf_facts['usec_per_read_op'] = perf_info['usec_per_read_op']
+ perf_facts['usec_per_write_op'] = perf_info['usec_per_write_op']
+ perf_facts['queue_depth'] = perf_info['queue_depth']
+ return perf_facts
+
+
+def generate_config_dict(array):
+ config_facts = {}
+ api_version = array._list_available_rest_versions()
+ # DNS
+ config_facts['dns'] = array.get_dns()
+ # SMTP
+ config_facts['smtp'] = array.list_alert_recipients()
+ # SNMP
+ config_facts['snmp'] = array.list_snmp_managers()
+ config_facts['snmp_v3_engine_id'] = array.get_snmp_engine_id()['engine_id']
+ # DS
+ config_facts['directory_service'] = array.get_directory_service()
+ if S3_REQUIRED_API_VERSION in api_version:
+ config_facts['directory_service_roles'] = {}
+ roles = array.list_directory_service_roles()
+ for role in range(0, len(roles)):
+ role_name = roles[role]['name']
+ config_facts['directory_service_roles'][role_name] = {
+ 'group': roles[role]['group'],
+ 'group_base': roles[role]['group_base'],
+ }
+ else:
+ config_facts['directory_service'].update(array.get_directory_service(groups=True))
+ # NTP
+ config_facts['ntp'] = array.get(ntpserver=True)['ntpserver']
+ # SYSLOG
+ config_facts['syslog'] = array.get(syslogserver=True)['syslogserver']
+ # Phonehome
+ config_facts['phonehome'] = array.get(phonehome=True)['phonehome']
+ # Proxy
+ config_facts['proxy'] = array.get(proxy=True)['proxy']
+ # Relay Host
+ config_facts['relayhost'] = array.get(relayhost=True)['relayhost']
+ # Sender Domain
+ config_facts['senderdomain'] = array.get(senderdomain=True)['senderdomain']
+ # SYSLOG
+ config_facts['syslog'] = array.get(syslogserver=True)['syslogserver']
+ # Idle Timeout
+ config_facts['idle_timeout'] = array.get(idle_timeout=True)['idle_timeout']
+ # SCSI Timeout
+ config_facts['scsi_timeout'] = array.get(scsi_timeout=True)['scsi_timeout']
+ # SSL
+ config_facts['ssl_certs'] = array.get_certificate()
+ # Global Admin settings
+ if S3_REQUIRED_API_VERSION in api_version:
+ config_facts['global_admin'] = array.get_global_admin_attributes()
+ return config_facts
+
+
+def generate_admin_dict(array):
+ api_version = array._list_available_rest_versions()
+ admin_facts = {}
+ if ADMIN_API_VERSION in api_version:
+ admins = array.list_admins()
+ for admin in range(0, len(admins)):
+ admin_name = admins[admin]['name']
+ admin_facts[admin_name] = {
+ 'type': admins[admin]['type'],
+ 'role': admins[admin]['role'],
+ }
+ return admin_facts
+
+
+def generate_subnet_dict(array):
+ sub_facts = {}
+ subnets = array.list_subnets()
+ for sub in range(0, len(subnets)):
+ sub_name = subnets[sub]['name']
+ if subnets[sub]['enabled']:
+ sub_facts[sub_name] = {
+ 'gateway': subnets[sub]['gateway'],
+ 'mtu': subnets[sub]['mtu'],
+ 'vlan': subnets[sub]['vlan'],
+ 'prefix': subnets[sub]['prefix'],
+ 'interfaces': subnets[sub]['interfaces'],
+ 'services': subnets[sub]['services'],
+ }
+ return sub_facts
+
+
+def generate_network_dict(array):
+ net_facts = {}
+ ports = array.list_network_interfaces()
+ for port in range(0, len(ports)):
+ int_name = ports[port]['name']
+ net_facts[int_name] = {
+ 'hwaddr': ports[port]['hwaddr'],
+ 'mtu': ports[port]['mtu'],
+ 'enabled': ports[port]['enabled'],
+ 'speed': ports[port]['speed'],
+ 'address': ports[port]['address'],
+ 'slaves': ports[port]['slaves'],
+ 'services': ports[port]['services'],
+ 'gateway': ports[port]['gateway'],
+ 'netmask': ports[port]['netmask'],
+ }
+ if ports[port]['subnet']:
+ subnets = array.get_subnet(ports[port]['subnet'])
+ if subnets['enabled']:
+ net_facts[int_name]['subnet'] = {
+ 'name': subnets['name'],
+ 'prefix': subnets['prefix'],
+ 'vlan': subnets['vlan'],
+ }
+ return net_facts
+
+
+def generate_capacity_dict(array):
+ capacity_facts = {}
+ api_version = array._list_available_rest_versions()
+ if CAP_REQUIRED_API_VERSION in api_version:
+ volumes = array.list_volumes(pending=True)
+ capacity_facts['provisioned_space'] = sum(item['size'] for item in volumes)
+ capacity = array.get(space=True)
+ total_capacity = capacity[0]['capacity']
+ used_space = capacity[0]["total"]
+ capacity_facts['free_space'] = total_capacity - used_space
+ capacity_facts['total_capacity'] = total_capacity
+ capacity_facts['data_reduction'] = capacity[0]['data_reduction']
+ capacity_facts['system_space'] = capacity[0]['system']
+ capacity_facts['volume_space'] = capacity[0]['volumes']
+ capacity_facts['shared_space'] = capacity[0]['shared_space']
+ capacity_facts['snapshot_space'] = capacity[0]['snapshots']
+ capacity_facts['thin_provisioning'] = capacity[0]['thin_provisioning']
+ capacity_facts['total_reduction'] = capacity[0]['total_reduction']
+
+ return capacity_facts
+
+
+def generate_snap_dict(array):
+ snap_facts = {}
+ snaps = array.list_volumes(snap=True)
+ for snap in range(0, len(snaps)):
+ snapshot = snaps[snap]['name']
+ snap_facts[snapshot] = {
+ 'size': snaps[snap]['size'],
+ 'source': snaps[snap]['source'],
+ 'created': snaps[snap]['created'],
+ }
+ return snap_facts
+
+
+def generate_vol_dict(array):
+ volume_facts = {}
+ vols = array.list_volumes()
+ for vol in range(0, len(vols)):
+ volume = vols[vol]['name']
+ volume_facts[volume] = {
+ 'source': vols[vol]['source'],
+ 'size': vols[vol]['size'],
+ 'serial': vols[vol]['serial'],
+ 'hosts': [],
+ 'bandwidth': ""
+ }
+ api_version = array._list_available_rest_versions()
+ if AC_REQUIRED_API_VERSION in api_version:
+ qvols = array.list_volumes(qos=True)
+ for qvol in range(0, len(qvols)):
+ volume = qvols[qvol]['name']
+ qos = qvols[qvol]['bandwidth_limit']
+ volume_facts[volume]['bandwidth'] = qos
+ vvols = array.list_volumes(protocol_endpoint=True)
+ for vvol in range(0, len(vvols)):
+ volume = vvols[vvol]['name']
+ volume_facts[volume] = {
+ 'source': vvols[vvol]['source'],
+ 'serial': vvols[vvol]['serial'],
+ 'hosts': []
+ }
+ cvols = array.list_volumes(connect=True)
+ for cvol in range(0, len(cvols)):
+ volume = cvols[cvol]['name']
+ voldict = [cvols[cvol]['host'], cvols[cvol]['lun']]
+ volume_facts[volume]['hosts'].append(voldict)
+ return volume_facts
+
+
+def generate_host_dict(array):
+ api_version = array._list_available_rest_versions()
+ host_facts = {}
+ hosts = array.list_hosts()
+ for host in range(0, len(hosts)):
+ hostname = hosts[host]['name']
+ tports = []
+ host_all_info = array.get_host(hostname, all=True)
+ if host_all_info:
+ tports = host_all_info[0]['target_port']
+ host_facts[hostname] = {
+ 'hgroup': hosts[host]['hgroup'],
+ 'iqn': hosts[host]['iqn'],
+ 'wwn': hosts[host]['wwn'],
+ 'personality': array.get_host(hostname,
+ personality=True)['personality'],
+ 'target_port': tports
+ }
+ if NVME_API_VERSION in api_version:
+ host_facts[hostname]['nqn'] = hosts[host]['nqn']
+ if PREFERRED_API_VERSION in api_version:
+ hosts = array.list_hosts(preferred_array=True)
+ for host in range(0, len(hosts)):
+ hostname = hosts[host]['name']
+ host_facts[hostname]['preferred_array'] = hosts[host]['preferred_array']
+ return host_facts
+
+
+def generate_pgroups_dict(array):
+ pgroups_facts = {}
+ pgroups = array.list_pgroups()
+ for pgroup in range(0, len(pgroups)):
+ protgroup = pgroups[pgroup]['name']
+ pgroups_facts[protgroup] = {
+ 'hgroups': pgroups[pgroup]['hgroups'],
+ 'hosts': pgroups[pgroup]['hosts'],
+ 'source': pgroups[pgroup]['source'],
+ 'targets': pgroups[pgroup]['targets'],
+ 'volumes': pgroups[pgroup]['volumes'],
+ }
+ prot_sched = array.get_pgroup(protgroup, schedule=True)
+ prot_reten = array.get_pgroup(protgroup, retention=True)
+ if prot_sched['snap_enabled'] or prot_sched['replicate_enabled']:
+ pgroups_facts[protgroup]['snap_freqyency'] = prot_sched['snap_frequency']
+ pgroups_facts[protgroup]['replicate_freqyency'] = prot_sched['replicate_frequency']
+ pgroups_facts[protgroup]['snap_enabled'] = prot_sched['snap_enabled']
+ pgroups_facts[protgroup]['replicate_enabled'] = prot_sched['replicate_enabled']
+ pgroups_facts[protgroup]['snap_at'] = prot_sched['snap_at']
+ pgroups_facts[protgroup]['replicate_at'] = prot_sched['replicate_at']
+ pgroups_facts[protgroup]['replicate_blackout'] = prot_sched['replicate_blackout']
+ pgroups_facts[protgroup]['per_day'] = prot_reten['per_day']
+ pgroups_facts[protgroup]['target_per_day'] = prot_reten['target_per_day']
+ pgroups_facts[protgroup]['target_days'] = prot_reten['target_days']
+ pgroups_facts[protgroup]['days'] = prot_reten['days']
+ pgroups_facts[protgroup]['all_for'] = prot_reten['all_for']
+ pgroups_facts[protgroup]['target_all_for'] = prot_reten['target_all_for']
+ if ":" in protgroup:
+ snap_transfers = array.get_pgroup(protgroup, snap=True, transfer=True)
+ pgroups_facts[protgroup]['snaps'] = {}
+ for snap_transfer in range(0, len(snap_transfers)):
+ snap = snap_transfers[snap_transfer]['name']
+ pgroups_facts[protgroup]['snaps'][snap] = {
+ 'created': snap_transfers[snap_transfer]['created'],
+ 'started': snap_transfers[snap_transfer]['started'],
+ 'completed': snap_transfers[snap_transfer]['completed'],
+ 'physical_bytes_written': snap_transfers[snap_transfer]['physical_bytes_written'],
+ 'data_transferred': snap_transfers[snap_transfer]['data_transferred'],
+ 'progress': snap_transfers[snap_transfer]['progress'],
+ }
+ return pgroups_facts
+
+
+def generate_pods_dict(array):
+ pods_facts = {}
+ api_version = array._list_available_rest_versions()
+ if AC_REQUIRED_API_VERSION in api_version:
+ pods = array.list_pods()
+ for pod in range(0, len(pods)):
+ acpod = pods[pod]['name']
+ pods_facts[acpod] = {
+ 'source': pods[pod]['source'],
+ 'arrays': pods[pod]['arrays'],
+ }
+ return pods_facts
+
+
+def generate_conn_array_dict(array):
+ conn_array_facts = {}
+ api_version = array._list_available_rest_versions()
+ if CONN_STATUS_API_VERSION in api_version:
+ carrays = array.list_connected_arrays()
+ for carray in range(0, len(carrays)):
+ arrayname = carrays[carray]['array_name']
+ conn_array_facts[arrayname] = {
+ 'array_id': carrays[carray]['id'],
+ 'throtled': carrays[carray]['throtled'],
+ 'version': carrays[carray]['version'],
+ 'type': carrays[carray]['type'],
+ 'mgmt_ip': carrays[carray]['management_address'],
+ 'repl_ip': carrays[carray]['replication_address'],
+ }
+ if CONN_STATUS_API_VERSION in api_version:
+ conn_array_facts[arrayname]['status'] = carrays[carray]['status']
+ return conn_array_facts
+
+
+def generate_apps_dict(array):
+ apps_facts = {}
+ api_version = array._list_available_rest_versions()
+ if SAN_REQUIRED_API_VERSION in api_version:
+ apps = array.list_apps()
+ for app in range(0, len(apps)):
+ appname = apps[app]['name']
+ apps_facts[appname] = {
+ 'version': apps[app]['version'],
+ 'status': apps[app]['status'],
+ 'description': apps[app]['description'],
+ }
+ return apps_facts
+
+
+def generate_vgroups_dict(array):
+ vgroups_facts = {}
+ api_version = array._list_available_rest_versions()
+ if AC_REQUIRED_API_VERSION in api_version:
+ vgroups = array.list_vgroups()
+ for vgroup in range(0, len(vgroups)):
+ virtgroup = vgroups[vgroup]['name']
+ vgroups_facts[virtgroup] = {
+ 'volumes': vgroups[vgroup]['volumes'],
+ }
+ return vgroups_facts
+
+
+def generate_nfs_offload_dict(array):
+ offload_facts = {}
+ api_version = array._list_available_rest_versions()
+ if AC_REQUIRED_API_VERSION in api_version:
+ offload = array.list_nfs_offload()
+ for target in range(0, len(offload)):
+ offloadt = offload[target]['name']
+ offload_facts[offloadt] = {
+ 'status': offload[target]['status'],
+ 'mount_point': offload[target]['mount_point'],
+ 'protocol': offload[target]['protocol'],
+ 'mount_options': offload[target]['mount_options'],
+ 'address': offload[target]['address'],
+ }
+ return offload_facts
+
+
+def generate_s3_offload_dict(array):
+ offload_facts = {}
+ api_version = array._list_available_rest_versions()
+ if S3_REQUIRED_API_VERSION in api_version:
+ offload = array.list_s3_offload()
+ for target in range(0, len(offload)):
+ offloadt = offload[target]['name']
+ offload_facts[offloadt] = {
+ 'status': offload[target]['status'],
+ 'bucket': offload[target]['bucket'],
+ 'protocol': offload[target]['protocol'],
+ 'access_key_id': offload[target]['access_key_id'],
+ }
+ return offload_facts
+
+
+def generate_hgroups_dict(array):
+ hgroups_facts = {}
+ hgroups = array.list_hgroups()
+ for hgroup in range(0, len(hgroups)):
+ hostgroup = hgroups[hgroup]['name']
+ hgroups_facts[hostgroup] = {
+ 'hosts': hgroups[hgroup]['hosts'],
+ 'pgs': [],
+ 'vols': [],
+ }
+ pghgroups = array.list_hgroups(protect=True)
+ for pghg in range(0, len(pghgroups)):
+ pgname = pghgroups[pghg]['name']
+ hgroups_facts[pgname]['pgs'].append(pghgroups[pghg]['protection_group'])
+ volhgroups = array.list_hgroups(connect=True)
+ for pgvol in range(0, len(volhgroups)):
+ pgname = volhgroups[pgvol]['name']
+ volpgdict = [volhgroups[pgvol]['vol'], volhgroups[pgvol]['lun']]
+ hgroups_facts[pgname]['vols'].append(volpgdict)
+ return hgroups_facts
+
+
+def generate_interfaces_dict(array):
+ api_version = array._list_available_rest_versions()
+ int_facts = {}
+ ports = array.list_ports()
+ for port in range(0, len(ports)):
+ int_name = ports[port]['name']
+ if ports[port]['wwn']:
+ int_facts[int_name] = ports[port]['wwn']
+ if ports[port]['iqn']:
+ int_facts[int_name] = ports[port]['iqn']
+ if NVME_API_VERSION in api_version:
+ if ports[port]['nqn']:
+ int_facts[int_name] = ports[port]['nqn']
+ return int_facts
+
+
+def main():
+ argument_spec = purefa_argument_spec()
+ argument_spec.update(dict(
+ gather_subset=dict(default='minimum', type='list',)
+ ))
+
+ module = AnsibleModule(argument_spec, supports_check_mode=False)
+
+ array = get_system(module)
+
+ subset = [test.lower() for test in module.params['gather_subset']]
+ valid_subsets = ('all', 'minimum', 'config', 'performance', 'capacity',
+ 'network', 'subnet', 'interfaces', 'hgroups', 'pgroups',
+ 'hosts', 'admins', 'volumes', 'snapshots', 'pods',
+ 'vgroups', 'offload', 'apps', 'arrays')
+ subset_test = (test in valid_subsets for test in subset)
+ if not all(subset_test):
+ module.fail_json(msg="value must gather_subset must be one or more of: %s, got: %s"
+ % (",".join(valid_subsets), ",".join(subset)))
+
+ facts = {}
+
+ if 'minimum' in subset or 'all' in subset:
+ facts['default'] = generate_default_dict(array)
+ if 'performance' in subset or 'all' in subset:
+ facts['performance'] = generate_perf_dict(array)
+ if 'config' in subset or 'all' in subset:
+ facts['config'] = generate_config_dict(array)
+ if 'capacity' in subset or 'all' in subset:
+ facts['capacity'] = generate_capacity_dict(array)
+ if 'network' in subset or 'all' in subset:
+ facts['network'] = generate_network_dict(array)
+ if 'subnet' in subset or 'all' in subset:
+ facts['subnet'] = generate_subnet_dict(array)
+ if 'interfaces' in subset or 'all' in subset:
+ facts['interfaces'] = generate_interfaces_dict(array)
+ if 'hosts' in subset or 'all' in subset:
+ facts['hosts'] = generate_host_dict(array)
+ if 'volumes' in subset or 'all' in subset:
+ facts['volumes'] = generate_vol_dict(array)
+ if 'snapshots' in subset or 'all' in subset:
+ facts['snapshots'] = generate_snap_dict(array)
+ if 'hgroups' in subset or 'all' in subset:
+ facts['hgroups'] = generate_hgroups_dict(array)
+ if 'pgroups' in subset or 'all' in subset:
+ facts['pgroups'] = generate_pgroups_dict(array)
+ if 'pods' in subset or 'all' in subset:
+ facts['pods'] = generate_pods_dict(array)
+ if 'admins' in subset or 'all' in subset:
+ facts['admins'] = generate_admin_dict(array)
+ if 'vgroups' in subset or 'all' in subset:
+ facts['vgroups'] = generate_vgroups_dict(array)
+ if 'offload' in subset or 'all' in subset:
+ facts['nfs_offload'] = generate_nfs_offload_dict(array)
+ facts['s3_offload'] = generate_s3_offload_dict(array)
+ if 'apps' in subset or 'all' in subset:
+ facts['apps'] = generate_apps_dict(array)
+ if 'arrays' in subset or 'all' in subset:
+ facts['arrays'] = generate_conn_array_dict(array)
+
+ module.exit_json(ansible_facts={'ansible_purefa_facts': facts})
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/purestorage/purefb_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/purestorage/purefb_facts.py
new file mode 100644
index 00000000..8c5a40c0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/purestorage/purefb_facts.py
@@ -0,0 +1,652 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2018, Simon Dodsley (simon@purestorage.com)
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: purefb_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favor of C(_info) module.
+ alternative: Use M(purestorage.flashblade.purefb_info) instead.
+short_description: Collect facts from Pure Storage FlashBlade
+description:
+ - Collect facts information from a Pure Storage FlashBlade running the
+ Purity//FB operating system. By default, the module will collect basic
+ fact information including hosts, host groups, protection
+ groups and volume counts. Additional fact information can be collected
+ based on the configured set of arguments.
+author:
+ - Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com>
+options:
+ gather_subset:
+ description:
+ - When supplied, this argument will define the facts to be collected.
+ Possible values for this include all, minimum, config, performance,
+ capacity, network, subnets, lags, filesystems and snapshots.
+ required: false
+ type: list
+ default: minimum
+extends_documentation_fragment:
+- community.general.purestorage.fb
+
+'''
+
+EXAMPLES = r'''
+- name: Collect default set of facts
+ community.general.purefb_facts:
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Collect configuration and capacity facts
+ community.general.purefb_facts:
+ gather_subset:
+ - config
+ - capacity
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+
+- name: Collect all facts
+ community.general.purefb_facts:
+ gather_subset:
+ - all
+ fb_url: 10.10.10.2
+ api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
+'''
+
+RETURN = r'''
+ansible_facts:
+ description: Returns the facts collected from the FlashBlade
+ returned: always
+ type: complex
+ sample: {
+ "capacity": {
+ "aggregate": {
+ "data_reduction": 1.1179228,
+ "snapshots": 0,
+ "total_physical": 17519748439,
+ "unique": 17519748439,
+ "virtual": 19585726464
+ },
+ "file-system": {
+ "data_reduction": 1.3642412,
+ "snapshots": 0,
+ "total_physical": 4748219708,
+ "unique": 4748219708,
+ "virtual": 6477716992
+ },
+ "object-store": {
+ "data_reduction": 1.0263462,
+ "snapshots": 0,
+ "total_physical": 12771528731,
+ "unique": 12771528731,
+ "virtual": 6477716992
+ },
+ "total": 83359896948925
+ },
+ "config": {
+ "alert_watchers": {
+ "enabled": true,
+ "name": "notify@acmestorage.com"
+ },
+ "array_management": {
+ "base_dn": null,
+ "bind_password": null,
+ "bind_user": null,
+ "enabled": false,
+ "name": "management",
+ "services": [
+ "management"
+ ],
+ "uris": []
+ },
+ "directory_service_roles": {
+ "array_admin": {
+ "group": null,
+ "group_base": null
+ },
+ "ops_admin": {
+ "group": null,
+ "group_base": null
+ },
+ "readonly": {
+ "group": null,
+ "group_base": null
+ },
+ "storage_admin": {
+ "group": null,
+ "group_base": null
+ }
+ },
+ "dns": {
+ "domain": "demo.acmestorage.com",
+ "name": "demo-fb-1",
+ "nameservers": [
+ "8.8.8.8"
+ ],
+ "search": [
+ "demo.acmestorage.com"
+ ]
+ },
+ "nfs_directory_service": {
+ "base_dn": null,
+ "bind_password": null,
+ "bind_user": null,
+ "enabled": false,
+ "name": "nfs",
+ "services": [
+ "nfs"
+ ],
+ "uris": []
+ },
+ "ntp": [
+ "0.ntp.pool.org"
+ ],
+ "smb_directory_service": {
+ "base_dn": null,
+ "bind_password": null,
+ "bind_user": null,
+ "enabled": false,
+ "name": "smb",
+ "services": [
+ "smb"
+ ],
+ "uris": []
+ },
+ "smtp": {
+ "name": "demo-fb-1",
+ "relay_host": null,
+ "sender_domain": "acmestorage.com"
+ },
+ "ssl_certs": {
+ "certificate": "-----BEGIN CERTIFICATE-----\n\n-----END CERTIFICATE-----",
+ "common_name": "Acme Storage",
+ "country": "US",
+ "email": null,
+ "intermediate_certificate": null,
+ "issued_by": "Acme Storage",
+ "issued_to": "Acme Storage",
+ "key_size": 4096,
+ "locality": null,
+ "name": "global",
+ "organization": "Acme Storage",
+ "organizational_unit": "Acme Storage",
+ "passphrase": null,
+ "private_key": null,
+ "state": null,
+ "status": "self-signed",
+ "valid_from": "1508433967000",
+ "valid_to": "2458833967000"
+ }
+ },
+ "default": {
+ "blades": 15,
+ "buckets": 7,
+ "filesystems": 2,
+ "flashblade_name": "demo-fb-1",
+ "object_store_accounts": 1,
+ "object_store_users": 1,
+ "purity_version": "2.2.0",
+ "snapshots": 1,
+ "total_capacity": 83359896948925
+ },
+ "filesystems": {
+ "k8s-pvc-d24b1357-579e-11e8-811f-ecf4bbc88f54": {
+ "destroyed": false,
+ "fast_remove": false,
+ "hard_limit": true,
+ "nfs_rules": "*(rw,no_root_squash)",
+ "provisioned": 21474836480,
+ "snapshot_enabled": false
+ },
+ "z": {
+ "destroyed": false,
+ "fast_remove": false,
+ "hard_limit": false,
+ "provisioned": 1073741824,
+ "snapshot_enabled": false
+ }
+ },
+ "lag": {
+ "uplink": {
+ "lag_speed": 0,
+ "port_speed": 40000000000,
+ "ports": [
+ {
+ "name": "CH1.FM1.ETH1.1"
+ },
+ {
+ "name": "CH1.FM1.ETH1.2"
+ },
+ ],
+ "status": "healthy"
+ }
+ },
+ "network": {
+ "fm1.admin0": {
+ "address": "10.10.100.6",
+ "gateway": "10.10.100.1",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "support"
+ ],
+ "type": "vip",
+ "vlan": 2200
+ },
+ "fm2.admin0": {
+ "address": "10.10.100.7",
+ "gateway": "10.10.100.1",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "support"
+ ],
+ "type": "vip",
+ "vlan": 2200
+ },
+ "nfs1": {
+ "address": "10.10.100.4",
+ "gateway": "10.10.100.1",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "data"
+ ],
+ "type": "vip",
+ "vlan": 2200
+ },
+ "vir0": {
+ "address": "10.10.100.5",
+ "gateway": "10.10.100.1",
+ "mtu": 1500,
+ "netmask": "255.255.255.0",
+ "services": [
+ "management"
+ ],
+ "type": "vip",
+ "vlan": 2200
+ }
+ },
+ "performance": {
+ "aggregate": {
+ "bytes_per_op": 0,
+ "bytes_per_read": 0,
+ "bytes_per_write": 0,
+ "read_bytes_per_sec": 0,
+ "reads_per_sec": 0,
+ "usec_per_other_op": 0,
+ "usec_per_read_op": 0,
+ "usec_per_write_op": 0,
+ "write_bytes_per_sec": 0,
+ "writes_per_sec": 0
+ },
+ "http": {
+ "bytes_per_op": 0,
+ "bytes_per_read": 0,
+ "bytes_per_write": 0,
+ "read_bytes_per_sec": 0,
+ "reads_per_sec": 0,
+ "usec_per_other_op": 0,
+ "usec_per_read_op": 0,
+ "usec_per_write_op": 0,
+ "write_bytes_per_sec": 0,
+ "writes_per_sec": 0
+ },
+ "nfs": {
+ "bytes_per_op": 0,
+ "bytes_per_read": 0,
+ "bytes_per_write": 0,
+ "read_bytes_per_sec": 0,
+ "reads_per_sec": 0,
+ "usec_per_other_op": 0,
+ "usec_per_read_op": 0,
+ "usec_per_write_op": 0,
+ "write_bytes_per_sec": 0,
+ "writes_per_sec": 0
+ },
+ "s3": {
+ "bytes_per_op": 0,
+ "bytes_per_read": 0,
+ "bytes_per_write": 0,
+ "read_bytes_per_sec": 0,
+ "reads_per_sec": 0,
+ "usec_per_other_op": 0,
+ "usec_per_read_op": 0,
+ "usec_per_write_op": 0,
+ "write_bytes_per_sec": 0,
+ "writes_per_sec": 0
+ }
+ },
+ "snapshots": {
+ "z.188": {
+ "destroyed": false,
+ "source": "z",
+ "source_destroyed": false,
+ "suffix": "188"
+ }
+ },
+ "subnet": {
+ "new-mgmt": {
+ "gateway": "10.10.100.1",
+ "interfaces": [
+ {
+ "name": "fm1.admin0"
+ },
+ {
+ "name": "fm2.admin0"
+ },
+ {
+ "name": "nfs1"
+ },
+ {
+ "name": "vir0"
+ }
+ ],
+ "lag": "uplink",
+ "mtu": 1500,
+ "prefix": "10.10.100.0/24",
+ "services": [
+ "data",
+ "management",
+ "support"
+ ],
+ "vlan": 2200
+ }
+ }
+ }
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.pure import get_blade, purefb_argument_spec
+
+
+MIN_REQUIRED_API_VERSION = '1.3'
+HARD_LIMIT_API_VERSION = '1.4'
+
+
+def generate_default_dict(blade):
+ default_facts = {}
+ defaults = blade.arrays.list_arrays().items[0]
+ default_facts['flashblade_name'] = defaults.name
+ default_facts['purity_version'] = defaults.version
+ default_facts['filesystems'] = \
+ len(blade.file_systems.list_file_systems().items)
+ default_facts['snapshots'] = \
+ len(blade.file_system_snapshots.list_file_system_snapshots().items)
+ default_facts['buckets'] = len(blade.buckets.list_buckets().items)
+ default_facts['object_store_users'] = \
+ len(blade.object_store_users.list_object_store_users().items)
+ default_facts['object_store_accounts'] = \
+ len(blade.object_store_accounts.list_object_store_accounts().items)
+ default_facts['blades'] = len(blade.blade.list_blades().items)
+ default_facts['total_capacity'] = \
+ blade.arrays.list_arrays_space().items[0].capacity
+ return default_facts
+
+
+def generate_perf_dict(blade):
+ perf_facts = {}
+ total_perf = blade.arrays.list_arrays_performance()
+ http_perf = blade.arrays.list_arrays_performance(protocol='http')
+ s3_perf = blade.arrays.list_arrays_performance(protocol='s3')
+ nfs_perf = blade.arrays.list_arrays_performance(protocol='nfs')
+ perf_facts['aggregate'] = {
+ 'bytes_per_op': total_perf.items[0].bytes_per_op,
+ 'bytes_per_read': total_perf.items[0].bytes_per_read,
+ 'bytes_per_write': total_perf.items[0].bytes_per_write,
+ 'read_bytes_per_sec': total_perf.items[0].read_bytes_per_sec,
+ 'reads_per_sec': total_perf.items[0].reads_per_sec,
+ 'usec_per_other_op': total_perf.items[0].usec_per_other_op,
+ 'usec_per_read_op': total_perf.items[0].usec_per_read_op,
+ 'usec_per_write_op': total_perf.items[0].usec_per_write_op,
+ 'write_bytes_per_sec': total_perf.items[0].write_bytes_per_sec,
+ 'writes_per_sec': total_perf.items[0].writes_per_sec,
+ }
+ perf_facts['http'] = {
+ 'bytes_per_op': http_perf.items[0].bytes_per_op,
+ 'bytes_per_read': http_perf.items[0].bytes_per_read,
+ 'bytes_per_write': http_perf.items[0].bytes_per_write,
+ 'read_bytes_per_sec': http_perf.items[0].read_bytes_per_sec,
+ 'reads_per_sec': http_perf.items[0].reads_per_sec,
+ 'usec_per_other_op': http_perf.items[0].usec_per_other_op,
+ 'usec_per_read_op': http_perf.items[0].usec_per_read_op,
+ 'usec_per_write_op': http_perf.items[0].usec_per_write_op,
+ 'write_bytes_per_sec': http_perf.items[0].write_bytes_per_sec,
+ 'writes_per_sec': http_perf.items[0].writes_per_sec,
+ }
+ perf_facts['s3'] = {
+ 'bytes_per_op': s3_perf.items[0].bytes_per_op,
+ 'bytes_per_read': s3_perf.items[0].bytes_per_read,
+ 'bytes_per_write': s3_perf.items[0].bytes_per_write,
+ 'read_bytes_per_sec': s3_perf.items[0].read_bytes_per_sec,
+ 'reads_per_sec': s3_perf.items[0].reads_per_sec,
+ 'usec_per_other_op': s3_perf.items[0].usec_per_other_op,
+ 'usec_per_read_op': s3_perf.items[0].usec_per_read_op,
+ 'usec_per_write_op': s3_perf.items[0].usec_per_write_op,
+ 'write_bytes_per_sec': s3_perf.items[0].write_bytes_per_sec,
+ 'writes_per_sec': s3_perf.items[0].writes_per_sec,
+ }
+ perf_facts['nfs'] = {
+ 'bytes_per_op': nfs_perf.items[0].bytes_per_op,
+ 'bytes_per_read': nfs_perf.items[0].bytes_per_read,
+ 'bytes_per_write': nfs_perf.items[0].bytes_per_write,
+ 'read_bytes_per_sec': nfs_perf.items[0].read_bytes_per_sec,
+ 'reads_per_sec': nfs_perf.items[0].reads_per_sec,
+ 'usec_per_other_op': nfs_perf.items[0].usec_per_other_op,
+ 'usec_per_read_op': nfs_perf.items[0].usec_per_read_op,
+ 'usec_per_write_op': nfs_perf.items[0].usec_per_write_op,
+ 'write_bytes_per_sec': nfs_perf.items[0].write_bytes_per_sec,
+ 'writes_per_sec': nfs_perf.items[0].writes_per_sec,
+ }
+
+ return perf_facts
+
+
+def generate_config_dict(blade):
+ config_facts = {}
+ config_facts['dns'] = blade.dns.list_dns().items[0].to_dict()
+ config_facts['smtp'] = blade.smtp.list_smtp().items[0].to_dict()
+ config_facts['alert_watchers'] = \
+ blade.alert_watchers.list_alert_watchers().items[0].to_dict()
+ api_version = blade.api_version.list_versions().versions
+ if HARD_LIMIT_API_VERSION in api_version:
+ config_facts['array_management'] = \
+ blade.directory_services.list_directory_services(names=['management']).items[0].to_dict()
+ config_facts['directory_service_roles'] = {}
+ roles = blade.directory_services.list_directory_services_roles()
+ for role in range(0, len(roles.items)):
+ role_name = roles.items[role].name
+ config_facts['directory_service_roles'][role_name] = {
+ 'group': roles.items[role].group,
+ 'group_base': roles.items[role].group_base
+ }
+ config_facts['nfs_directory_service'] = \
+ blade.directory_services.list_directory_services(names=['nfs']).items[0].to_dict()
+ config_facts['smb_directory_service'] = \
+ blade.directory_services.list_directory_services(names=['smb']).items[0].to_dict()
+ config_facts['ntp'] = blade.arrays.list_arrays().items[0].ntp_servers
+ config_facts['ssl_certs'] = \
+ blade.certificates.list_certificates().items[0].to_dict()
+ return config_facts
+
+
+def generate_subnet_dict(blade):
+ sub_facts = {}
+ subnets = blade.subnets.list_subnets()
+ for sub in range(0, len(subnets.items)):
+ sub_name = subnets.items[sub].name
+ if subnets.items[sub].enabled:
+ sub_facts[sub_name] = {
+ 'gateway': subnets.items[sub].gateway,
+ 'mtu': subnets.items[sub].mtu,
+ 'vlan': subnets.items[sub].vlan,
+ 'prefix': subnets.items[sub].prefix,
+ 'services': subnets.items[sub].services,
+ }
+ sub_facts[sub_name]['lag'] = subnets.items[sub].link_aggregation_group.name
+ sub_facts[sub_name]['interfaces'] = []
+ for iface in range(0, len(subnets.items[sub].interfaces)):
+ sub_facts[sub_name]['interfaces'].append({'name': subnets.items[sub].interfaces[iface].name})
+ return sub_facts
+
+
+def generate_lag_dict(blade):
+ lag_facts = {}
+ groups = blade.link_aggregation_groups.list_link_aggregation_groups()
+ for groupcnt in range(0, len(groups.items)):
+ lag_name = groups.items[groupcnt].name
+ lag_facts[lag_name] = {
+ 'lag_speed': groups.items[groupcnt].lag_speed,
+ 'port_speed': groups.items[groupcnt].port_speed,
+ 'status': groups.items[groupcnt].status,
+ }
+ lag_facts[lag_name]['ports'] = []
+ for port in range(0, len(groups.items[groupcnt].ports)):
+ lag_facts[lag_name]['ports'].append({'name': groups.items[groupcnt].ports[port].name})
+ return lag_facts
+
+
+def generate_network_dict(blade):
+ net_facts = {}
+ ports = blade.network_interfaces.list_network_interfaces()
+ for portcnt in range(0, len(ports.items)):
+ int_name = ports.items[portcnt].name
+ if ports.items[portcnt].enabled:
+ net_facts[int_name] = {
+ 'type': ports.items[portcnt].type,
+ 'mtu': ports.items[portcnt].mtu,
+ 'vlan': ports.items[portcnt].vlan,
+ 'address': ports.items[portcnt].address,
+ 'services': ports.items[portcnt].services,
+ 'gateway': ports.items[portcnt].gateway,
+ 'netmask': ports.items[portcnt].netmask,
+ }
+ return net_facts
+
+
+def generate_capacity_dict(blade):
+ capacity_facts = {}
+ total_cap = blade.arrays.list_arrays_space()
+ file_cap = blade.arrays.list_arrays_space(type='file-system')
+ object_cap = blade.arrays.list_arrays_space(type='object-store')
+ capacity_facts['total'] = total_cap.items[0].capacity
+ capacity_facts['aggregate'] = {
+ 'data_reduction': total_cap.items[0].space.data_reduction,
+ 'snapshots': total_cap.items[0].space.snapshots,
+ 'total_physical': total_cap.items[0].space.total_physical,
+ 'unique': total_cap.items[0].space.unique,
+ 'virtual': total_cap.items[0].space.virtual,
+ }
+ capacity_facts['file-system'] = {
+ 'data_reduction': file_cap.items[0].space.data_reduction,
+ 'snapshots': file_cap.items[0].space.snapshots,
+ 'total_physical': file_cap.items[0].space.total_physical,
+ 'unique': file_cap.items[0].space.unique,
+ 'virtual': file_cap.items[0].space.virtual,
+ }
+ capacity_facts['object-store'] = {
+ 'data_reduction': object_cap.items[0].space.data_reduction,
+ 'snapshots': object_cap.items[0].space.snapshots,
+ 'total_physical': object_cap.items[0].space.total_physical,
+ 'unique': object_cap.items[0].space.unique,
+ 'virtual': file_cap.items[0].space.virtual,
+ }
+
+ return capacity_facts
+
+
+def generate_snap_dict(blade):
+ snap_facts = {}
+ snaps = blade.file_system_snapshots.list_file_system_snapshots()
+ for snap in range(0, len(snaps.items)):
+ snapshot = snaps.items[snap].name
+ snap_facts[snapshot] = {
+ 'destroyed': snaps.items[snap].destroyed,
+ 'source': snaps.items[snap].source,
+ 'suffix': snaps.items[snap].suffix,
+ 'source_destroyed': snaps.items[snap].source_destroyed,
+ }
+ return snap_facts
+
+
+def generate_fs_dict(blade):
+ fs_facts = {}
+ fsys = blade.file_systems.list_file_systems()
+ for fsystem in range(0, len(fsys.items)):
+ share = fsys.items[fsystem].name
+ fs_facts[share] = {
+ 'fast_remove': fsys.items[fsystem].fast_remove_directory_enabled,
+ 'snapshot_enabled': fsys.items[fsystem].snapshot_directory_enabled,
+ 'provisioned': fsys.items[fsystem].provisioned,
+ 'destroyed': fsys.items[fsystem].destroyed,
+ }
+ if fsys.items[fsystem].http.enabled:
+ fs_facts[share]['http'] = fsys.items[fsystem].http.enabled
+ if fsys.items[fsystem].smb.enabled:
+ fs_facts[share]['smb_mode'] = fsys.items[fsystem].smb.acl_mode
+ if fsys.items[fsystem].nfs.enabled:
+ fs_facts[share]['nfs_rules'] = fsys.items[fsystem].nfs.rules
+ api_version = blade.api_version.list_versions().versions
+ if HARD_LIMIT_API_VERSION in api_version:
+ fs_facts[share]['hard_limit'] = fsys.items[fsystem].hard_limit_enabled
+
+ return fs_facts
+
+
+def main():
+ argument_spec = purefb_argument_spec()
+ argument_spec.update(dict(
+ gather_subset=dict(default='minimum', type='list',)
+ ))
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ blade = get_blade(module)
+ versions = blade.api_version.list_versions().versions
+
+ if MIN_REQUIRED_API_VERSION not in versions:
+ module.fail_json(msg='FlashBlade REST version not supported. Minimum version required: {0}'.format(MIN_REQUIRED_API_VERSION))
+
+ subset = [test.lower() for test in module.params['gather_subset']]
+ valid_subsets = ('all', 'minimum', 'config', 'performance', 'capacity',
+ 'network', 'subnets', 'lags',
+ 'filesystems', 'snapshots')
+ subset_test = (test in valid_subsets for test in subset)
+ if not all(subset_test):
+ module.fail_json(msg="value must gather_subset must be one or more of: %s, got: %s"
+ % (",".join(valid_subsets), ",".join(subset)))
+
+ facts = {}
+
+ if 'minimum' in subset or 'all' in subset:
+ facts['default'] = generate_default_dict(blade)
+ if 'performance' in subset or 'all' in subset:
+ facts['performance'] = generate_perf_dict(blade)
+ if 'config' in subset or 'all' in subset:
+ facts['config'] = generate_config_dict(blade)
+ if 'capacity' in subset or 'all' in subset:
+ facts['capacity'] = generate_capacity_dict(blade)
+ if 'lags' in subset or 'all' in subset:
+ facts['lag'] = generate_lag_dict(blade)
+ if 'network' in subset or 'all' in subset:
+ facts['network'] = generate_network_dict(blade)
+ if 'subnets' in subset or 'all' in subset:
+ facts['subnet'] = generate_subnet_dict(blade)
+ if 'filesystems' in subset or 'all' in subset:
+ facts['filesystems'] = generate_fs_dict(blade)
+ if 'snapshots' in subset or 'all' in subset:
+ facts['snapshots'] = generate_snap_dict(blade)
+
+ module.exit_json(ansible_facts={'ansible_purefb_facts': facts})
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/vexata/vexata_eg.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/vexata/vexata_eg.py
new file mode 100644
index 00000000..54bb8c29
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/vexata/vexata_eg.py
@@ -0,0 +1,209 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Sandeep Kasargod (sandeep@vexata.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vexata_eg
+short_description: Manage export groups on Vexata VX100 storage arrays
+description:
+ - Create or delete export groups on a Vexata VX100 array.
+ - An export group is a tuple of a volume group, initiator group and port
+ group that allows a set of volumes to be exposed to one or more hosts
+ through specific array ports.
+author:
+ - Sandeep Kasargod (@vexata)
+options:
+ name:
+ description:
+ - Export group name.
+ required: true
+ type: str
+ state:
+ description:
+ - Creates export group when present or delete when absent.
+ default: present
+ choices: [ present, absent ]
+ type: str
+ vg:
+ description:
+ - Volume group name.
+ type: str
+ ig:
+ description:
+ - Initiator group name.
+ type: str
+ pg:
+ description:
+ - Port group name.
+ type: str
+extends_documentation_fragment:
+- community.general.vexata.vx100
+
+'''
+
+EXAMPLES = r'''
+- name: Create export group named db_export.
+ community.general.vexata_eg:
+ name: db_export
+ vg: dbvols
+ ig: dbhosts
+ pg: pg1
+ state: present
+ array: vx100_ultra.test.com
+ user: admin
+ password: secret
+
+- name: Delete export group named db_export
+ community.general.vexata_eg:
+ name: db_export
+ state: absent
+ array: vx100_ultra.test.com
+ user: admin
+ password: secret
+'''
+
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.vexata import (
+ argument_spec, get_array, required_together)
+
+
+def get_eg(module, array):
+ """Retrieve a named vg if it exists, None if absent."""
+ name = module.params['name']
+ try:
+ egs = array.list_egs()
+ eg = filter(lambda eg: eg['name'] == name, egs)
+ if len(eg) == 1:
+ return eg[0]
+ else:
+ return None
+ except Exception:
+ module.fail_json(msg='Error while attempting to retrieve export groups.')
+
+
+def get_vg_id(module, array):
+ """Retrieve a named vg's id if it exists, error if absent."""
+ name = module.params['vg']
+ try:
+ vgs = array.list_vgs()
+ vg = filter(lambda vg: vg['name'] == name, vgs)
+ if len(vg) == 1:
+ return vg[0]['id']
+ else:
+ module.fail_json(msg='Volume group {0} was not found.'.format(name))
+ except Exception:
+ module.fail_json(msg='Error while attempting to retrieve volume groups.')
+
+
+def get_ig_id(module, array):
+ """Retrieve a named ig's id if it exists, error if absent."""
+ name = module.params['ig']
+ try:
+ igs = array.list_igs()
+ ig = filter(lambda ig: ig['name'] == name, igs)
+ if len(ig) == 1:
+ return ig[0]['id']
+ else:
+ module.fail_json(msg='Initiator group {0} was not found.'.format(name))
+ except Exception:
+ module.fail_json(msg='Error while attempting to retrieve initiator groups.')
+
+
+def get_pg_id(module, array):
+ """Retrieve a named pg's id if it exists, error if absent."""
+ name = module.params['pg']
+ try:
+ pgs = array.list_pgs()
+ pg = filter(lambda pg: pg['name'] == name, pgs)
+ if len(pg) == 1:
+ return pg[0]['id']
+ else:
+ module.fail_json(msg='Port group {0} was not found.'.format(name))
+ except Exception:
+ module.fail_json(msg='Error while attempting to retrieve port groups.')
+
+
+def create_eg(module, array):
+ """"Create a new export group."""
+ changed = False
+ eg_name = module.params['name']
+ vg_id = get_vg_id(module, array)
+ ig_id = get_ig_id(module, array)
+ pg_id = get_pg_id(module, array)
+ if module.check_mode:
+ module.exit_json(changed=changed)
+
+ try:
+ eg = array.create_eg(
+ eg_name,
+ 'Ansible export group',
+ (vg_id, ig_id, pg_id))
+ if eg:
+ module.log(msg='Created export group {0}'.format(eg_name))
+ changed = True
+ else:
+ raise Exception
+ except Exception:
+ module.fail_json(msg='Export group {0} create failed.'.format(eg_name))
+ module.exit_json(changed=changed)
+
+
+def delete_eg(module, array, eg):
+ changed = False
+ eg_name = eg['name']
+ if module.check_mode:
+ module.exit_json(changed=changed)
+
+ try:
+ ok = array.delete_eg(
+ eg['id'])
+ if ok:
+ module.log(msg='Export group {0} deleted.'.format(eg_name))
+ changed = True
+ else:
+ raise Exception
+ except Exception:
+ module.fail_json(msg='Export group {0} delete failed.'.format(eg_name))
+ module.exit_json(changed=changed)
+
+
+def main():
+ arg_spec = argument_spec()
+ arg_spec.update(
+ dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ vg=dict(type='str'),
+ ig=dict(type='str'),
+ pg=dict(type='str')
+ )
+ )
+
+ module = AnsibleModule(arg_spec,
+ supports_check_mode=True,
+ required_together=required_together())
+
+ state = module.params['state']
+ array = get_array(module)
+ eg = get_eg(module, array)
+
+ if state == 'present' and not eg:
+ create_eg(module, array)
+ elif state == 'absent' and eg:
+ delete_eg(module, array, eg)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/vexata/vexata_volume.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/vexata/vexata_volume.py
new file mode 100644
index 00000000..1cf4cd7b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/vexata/vexata_volume.py
@@ -0,0 +1,196 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Sandeep Kasargod (sandeep@vexata.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vexata_volume
+short_description: Manage volumes on Vexata VX100 storage arrays
+description:
+ - Create, deletes or extend volumes on a Vexata VX100 array.
+author:
+- Sandeep Kasargod (@vexata)
+options:
+ name:
+ description:
+ - Volume name.
+ required: true
+ type: str
+ state:
+ description:
+ - Creates/Modifies volume when present or removes when absent.
+ default: present
+ choices: [ present, absent ]
+ type: str
+ size:
+ description:
+ - Volume size in M, G, T units. M=2^20, G=2^30, T=2^40 bytes.
+ type: str
+extends_documentation_fragment:
+- community.general.vexata.vx100
+
+'''
+
+EXAMPLES = r'''
+- name: Create new 2 TiB volume named foo
+ community.general.vexata_volume:
+ name: foo
+ size: 2T
+ state: present
+ array: vx100_ultra.test.com
+ user: admin
+ password: secret
+
+- name: Expand volume named foo to 4 TiB
+ community.general.vexata_volume:
+ name: foo
+ size: 4T
+ state: present
+ array: vx100_ultra.test.com
+ user: admin
+ password: secret
+
+- name: Delete volume named foo
+ community.general.vexata_volume:
+ name: foo
+ state: absent
+ array: vx100_ultra.test.com
+ user: admin
+ password: secret
+'''
+
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.vexata import (
+ argument_spec, get_array, required_together, size_to_MiB)
+
+
+def get_volume(module, array):
+ """Retrieve a named volume if it exists, None if absent."""
+ name = module.params['name']
+ try:
+ vols = array.list_volumes()
+ vol = filter(lambda v: v['name'] == name, vols)
+ if len(vol) == 1:
+ return vol[0]
+ else:
+ return None
+ except Exception:
+ module.fail_json(msg='Error while attempting to retrieve volumes.')
+
+
+def validate_size(module, err_msg):
+ size = module.params.get('size', False)
+ if not size:
+ module.fail_json(msg=err_msg)
+ size = size_to_MiB(size)
+ if size <= 0:
+ module.fail_json(msg='Invalid volume size, must be <integer>[MGT].')
+ return size
+
+
+def create_volume(module, array):
+ """"Create a new volume."""
+ changed = False
+ size = validate_size(module, err_msg='Size is required to create volume.')
+ if module.check_mode:
+ module.exit_json(changed=changed)
+
+ try:
+ vol = array.create_volume(
+ module.params['name'],
+ 'Ansible volume',
+ size)
+ if vol:
+ module.log(msg='Created volume {0}'.format(vol['id']))
+ changed = True
+ else:
+ module.fail_json(msg='Volume create failed.')
+ except Exception:
+ pass
+ module.exit_json(changed=changed)
+
+
+def update_volume(module, array, volume):
+ """Expand the volume size."""
+ changed = False
+ size = validate_size(module, err_msg='Size is required to update volume')
+ prev_size = volume['volSize']
+ if size <= prev_size:
+ module.log(msg='Volume expanded size needs to be larger '
+ 'than current size.')
+ if module.check_mode:
+ module.exit_json(changed=changed)
+
+ try:
+ vol = array.grow_volume(
+ volume['name'],
+ volume['description'],
+ volume['id'],
+ size)
+ if vol:
+ changed = True
+ except Exception:
+ pass
+
+ module.exit_json(changed=changed)
+
+
+def delete_volume(module, array, volume):
+ changed = False
+ vol_name = volume['name']
+ if module.check_mode:
+ module.exit_json(changed=changed)
+
+ try:
+ ok = array.delete_volume(
+ volume['id'])
+ if ok:
+ module.log(msg='Volume {0} deleted.'.format(vol_name))
+ changed = True
+ else:
+ raise Exception
+ except Exception:
+ pass
+ module.exit_json(changed=changed)
+
+
+def main():
+ arg_spec = argument_spec()
+ arg_spec.update(
+ dict(
+ name=dict(type='str', required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ size=dict(type='str')
+ )
+ )
+
+ module = AnsibleModule(arg_spec,
+ supports_check_mode=True,
+ required_together=required_together())
+
+ state = module.params['state']
+ array = get_array(module)
+ volume = get_volume(module, array)
+
+ if state == 'present':
+ if not volume:
+ create_volume(module, array)
+ else:
+ update_volume(module, array, volume)
+ elif state == 'absent' and volume:
+ delete_volume(module, array, volume)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/zfs/zfs.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/zfs/zfs.py
new file mode 100644
index 00000000..6b2260fb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/zfs/zfs.py
@@ -0,0 +1,262 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Johan Wiren <johan.wiren.se@gmail.com>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: zfs
+short_description: Manage zfs
+description:
+ - Manages ZFS file systems, volumes, clones and snapshots
+options:
+ name:
+ description:
+ - File system, snapshot or volume name e.g. C(rpool/myfs).
+ required: true
+ state:
+ description:
+ - Whether to create (C(present)), or remove (C(absent)) a
+ file system, snapshot or volume. All parents/children
+ will be created/destroyed as needed to reach the desired state.
+ choices: [ absent, present ]
+ required: true
+ origin:
+ description:
+ - Snapshot from which to create a clone.
+ extra_zfs_properties:
+ description:
+ - A dictionary of zfs properties to be set.
+ - See the zfs(8) man page for more information.
+author:
+- Johan Wiren (@johanwiren)
+'''
+
+EXAMPLES = '''
+- name: Create a new file system called myfs in pool rpool with the setuid property turned off
+ community.general.zfs:
+ name: rpool/myfs
+ state: present
+ extra_zfs_properties:
+ setuid: off
+
+- name: Create a new volume called myvol in pool rpool.
+ community.general.zfs:
+ name: rpool/myvol
+ state: present
+ extra_zfs_properties:
+ volsize: 10M
+
+- name: Create a snapshot of rpool/myfs file system.
+ community.general.zfs:
+ name: rpool/myfs@mysnapshot
+ state: present
+
+- name: Create a new file system called myfs2 with snapdir enabled
+ community.general.zfs:
+ name: rpool/myfs2
+ state: present
+ extra_zfs_properties:
+ snapdir: enabled
+
+- name: Create a new file system by cloning a snapshot
+ community.general.zfs:
+ name: rpool/cloned_fs
+ state: present
+ origin: rpool/myfs@mysnapshot
+
+- name: Destroy a filesystem
+ community.general.zfs:
+ name: rpool/myfs
+ state: absent
+'''
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Zfs(object):
+
+ def __init__(self, module, name, properties):
+ self.module = module
+ self.name = name
+ self.properties = properties
+ self.changed = False
+ self.zfs_cmd = module.get_bin_path('zfs', True)
+ self.zpool_cmd = module.get_bin_path('zpool', True)
+ self.pool = name.split('/')[0].split('@')[0]
+ self.is_solaris = os.uname()[0] == 'SunOS'
+ self.is_openzfs = self.check_openzfs()
+ self.enhanced_sharing = self.check_enhanced_sharing()
+
+ def check_openzfs(self):
+ cmd = [self.zpool_cmd]
+ cmd.extend(['get', 'version'])
+ cmd.append(self.pool)
+ (rc, out, err) = self.module.run_command(cmd, check_rc=True)
+ version = out.splitlines()[-1].split()[2]
+ if version == '-':
+ return True
+ if int(version) == 5000:
+ return True
+ return False
+
+ def check_enhanced_sharing(self):
+ if self.is_solaris and not self.is_openzfs:
+ cmd = [self.zpool_cmd]
+ cmd.extend(['get', 'version'])
+ cmd.append(self.pool)
+ (rc, out, err) = self.module.run_command(cmd, check_rc=True)
+ version = out.splitlines()[-1].split()[2]
+ if int(version) >= 34:
+ return True
+ return False
+
+ def exists(self):
+ cmd = [self.zfs_cmd, 'list', '-t', 'all', self.name]
+ (rc, out, err) = self.module.run_command(' '.join(cmd))
+ if rc == 0:
+ return True
+ else:
+ return False
+
+ def create(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+ properties = self.properties
+ origin = self.module.params.get('origin', None)
+ cmd = [self.zfs_cmd]
+
+ if "@" in self.name:
+ action = 'snapshot'
+ elif origin:
+ action = 'clone'
+ else:
+ action = 'create'
+
+ cmd.append(action)
+
+ if action in ['create', 'clone']:
+ cmd += ['-p']
+
+ if properties:
+ for prop, value in properties.items():
+ if prop == 'volsize':
+ cmd += ['-V', value]
+ elif prop == 'volblocksize':
+ cmd += ['-b', value]
+ else:
+ cmd += ['-o', '%s="%s"' % (prop, value)]
+ if origin and action == 'clone':
+ cmd.append(origin)
+ cmd.append(self.name)
+ (rc, out, err) = self.module.run_command(' '.join(cmd))
+ if rc == 0:
+ self.changed = True
+ else:
+ self.module.fail_json(msg=err)
+
+ def destroy(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+ cmd = [self.zfs_cmd, 'destroy', '-R', self.name]
+ (rc, out, err) = self.module.run_command(' '.join(cmd))
+ if rc == 0:
+ self.changed = True
+ else:
+ self.module.fail_json(msg=err)
+
+ def set_property(self, prop, value):
+ if self.module.check_mode:
+ self.changed = True
+ return
+ cmd = [self.zfs_cmd, 'set', prop + '=' + str(value), self.name]
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc == 0:
+ self.changed = True
+ else:
+ self.module.fail_json(msg=err)
+
+ def set_properties_if_changed(self):
+ current_properties = self.get_current_properties()
+ for prop, value in self.properties.items():
+ if current_properties.get(prop, None) != value:
+ self.set_property(prop, value)
+
+ def get_current_properties(self):
+ cmd = [self.zfs_cmd, 'get', '-H']
+ if self.enhanced_sharing:
+ cmd += ['-e']
+ cmd += ['all', self.name]
+ rc, out, err = self.module.run_command(" ".join(cmd))
+ properties = dict()
+ for prop, value, source in [l.split('\t')[1:4] for l in out.splitlines()]:
+ if source == 'local':
+ properties[prop] = value
+ # Add alias for enhanced sharing properties
+ if self.enhanced_sharing:
+ properties['sharenfs'] = properties.get('share.nfs', None)
+ properties['sharesmb'] = properties.get('share.smb', None)
+ return properties
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', required=True, choices=['absent', 'present']),
+ origin=dict(type='str', default=None),
+ extra_zfs_properties=dict(type='dict', default={}),
+ ),
+ supports_check_mode=True,
+ )
+
+ state = module.params.get('state')
+ name = module.params.get('name')
+
+ if module.params.get('origin') and '@' in name:
+ module.fail_json(msg='cannot specify origin when operating on a snapshot')
+
+ # Reverse the boolification of zfs properties
+ for prop, value in module.params['extra_zfs_properties'].items():
+ if isinstance(value, bool):
+ if value is True:
+ module.params['extra_zfs_properties'][prop] = 'on'
+ else:
+ module.params['extra_zfs_properties'][prop] = 'off'
+ else:
+ module.params['extra_zfs_properties'][prop] = value
+
+ result = dict(
+ name=name,
+ state=state,
+ )
+
+ zfs = Zfs(module, name, module.params['extra_zfs_properties'])
+
+ if state == 'present':
+ if zfs.exists():
+ zfs.set_properties_if_changed()
+ else:
+ zfs.create()
+
+ elif state == 'absent':
+ if zfs.exists():
+ zfs.destroy()
+
+ result.update(zfs.properties)
+ result['changed'] = zfs.changed
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/zfs/zfs_delegate_admin.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/zfs/zfs_delegate_admin.py
new file mode 100644
index 00000000..223d7f72
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/zfs/zfs_delegate_admin.py
@@ -0,0 +1,263 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Nate Coraor <nate@coraor.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: zfs_delegate_admin
+short_description: Manage ZFS delegated administration (user admin privileges)
+description:
+ - Manages ZFS file system delegated administration permissions, which allow unprivileged users to perform ZFS
+ operations normally restricted to the superuser.
+ - See the C(zfs allow) section of C(zfs(1M)) for detailed explanations of options.
+ - This module attempts to adhere to the behavior of the command line tool as much as possible.
+requirements:
+ - "A ZFS/OpenZFS implementation that supports delegation with `zfs allow`, including: Solaris >= 10, illumos (all
+ versions), FreeBSD >= 8.0R, ZFS on Linux >= 0.7.0."
+options:
+ name:
+ description:
+ - File system or volume name e.g. C(rpool/myfs).
+ required: true
+ type: str
+ state:
+ description:
+ - Whether to allow (C(present)), or unallow (C(absent)) a permission.
+ - When set to C(present), at least one "entity" param of I(users), I(groups), or I(everyone) are required.
+ - When set to C(absent), removes permissions from the specified entities, or removes all permissions if no entity params are specified.
+ choices: [ absent, present ]
+ default: present
+ users:
+ description:
+ - List of users to whom permission(s) should be granted.
+ type: list
+ groups:
+ description:
+ - List of groups to whom permission(s) should be granted.
+ type: list
+ everyone:
+ description:
+ - Apply permissions to everyone.
+ type: bool
+ default: no
+ permissions:
+ description:
+ - The list of permission(s) to delegate (required if C(state) is C(present)).
+ type: list
+ choices: [ allow, clone, create, destroy, diff, hold, mount, promote, readonly, receive, release, rename, rollback, send, share, snapshot, unallow ]
+ local:
+ description:
+ - Apply permissions to C(name) locally (C(zfs allow -l)).
+ type: bool
+ descendents:
+ description:
+ - Apply permissions to C(name)'s descendents (C(zfs allow -d)).
+ type: bool
+ recursive:
+ description:
+ - Unallow permissions recursively (ignored when C(state) is C(present)).
+ type: bool
+ default: no
+author:
+- Nate Coraor (@natefoo)
+'''
+
+EXAMPLES = r'''
+- name: Grant `zfs allow` and `unallow` permission to the `adm` user with the default local+descendents scope
+ community.general.zfs_delegate_admin:
+ name: rpool/myfs
+ users: adm
+ permissions: allow,unallow
+
+- name: Grant `zfs send` to everyone, plus the group `backup`
+ community.general.zfs_delegate_admin:
+ name: rpool/myvol
+ groups: backup
+ everyone: yes
+ permissions: send
+
+- name: Grant `zfs send,receive` to users `foo` and `bar` with local scope only
+ community.general.zfs_delegate_admin:
+ name: rpool/myfs
+ users: foo,bar
+ permissions: send,receive
+ local: yes
+
+- name: Revoke all permissions from everyone (permissions specifically assigned to users and groups remain)
+ community.general.zfs_delegate_admin:
+ name: rpool/myfs
+ everyone: yes
+ state: absent
+'''
+
+# This module does not return anything other than the standard
+# changed/state/msg/stdout
+RETURN = '''
+'''
+
+from itertools import product
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class ZfsDelegateAdmin(object):
+ def __init__(self, module):
+ self.module = module
+ self.name = module.params.get('name')
+ self.state = module.params.get('state')
+ self.users = module.params.get('users')
+ self.groups = module.params.get('groups')
+ self.everyone = module.params.get('everyone')
+ self.perms = module.params.get('permissions')
+ self.scope = None
+ self.changed = False
+ self.initial_perms = None
+ self.subcommand = 'allow'
+ self.recursive_opt = []
+ self.run_method = self.update
+
+ self.setup(module)
+
+ def setup(self, module):
+ """ Validate params and set up for run.
+ """
+ if self.state == 'absent':
+ self.subcommand = 'unallow'
+ if module.params.get('recursive'):
+ self.recursive_opt = ['-r']
+
+ local = module.params.get('local')
+ descendents = module.params.get('descendents')
+ if (local and descendents) or (not local and not descendents):
+ self.scope = 'ld'
+ elif local:
+ self.scope = 'l'
+ elif descendents:
+ self.scope = 'd'
+ else:
+ self.module.fail_json(msg='Impossible value for local and descendents')
+
+ if not (self.users or self.groups or self.everyone):
+ if self.state == 'present':
+ self.module.fail_json(msg='One of `users`, `groups`, or `everyone` must be set')
+ elif self.state == 'absent':
+ self.run_method = self.clear
+ # ansible ensures the else cannot happen here
+
+ self.zfs_path = module.get_bin_path('zfs', True)
+
+ @property
+ def current_perms(self):
+ """ Parse the output of `zfs allow <name>` to retrieve current permissions.
+ """
+ out = self.run_zfs_raw(subcommand='allow')
+ perms = {
+ 'l': {'u': {}, 'g': {}, 'e': []},
+ 'd': {'u': {}, 'g': {}, 'e': []},
+ 'ld': {'u': {}, 'g': {}, 'e': []},
+ }
+ linemap = {
+ 'Local permissions:': 'l',
+ 'Descendent permissions:': 'd',
+ 'Local+Descendent permissions:': 'ld',
+ }
+ scope = None
+ for line in out.splitlines():
+ scope = linemap.get(line, scope)
+ if not scope:
+ continue
+ try:
+ if line.startswith('\tuser ') or line.startswith('\tgroup '):
+ ent_type, ent, cur_perms = line.split()
+ perms[scope][ent_type[0]][ent] = cur_perms.split(',')
+ elif line.startswith('\teveryone '):
+ perms[scope]['e'] = line.split()[1].split(',')
+ except ValueError:
+ self.module.fail_json(msg="Cannot parse user/group permission output by `zfs allow`: '%s'" % line)
+ return perms
+
+ def run_zfs_raw(self, subcommand=None, args=None):
+ """ Run a raw zfs command, fail on error.
+ """
+ cmd = [self.zfs_path, subcommand or self.subcommand] + (args or []) + [self.name]
+ rc, out, err = self.module.run_command(cmd)
+ if rc:
+ self.module.fail_json(msg='Command `%s` failed: %s' % (' '.join(cmd), err))
+ return out
+
+ def run_zfs(self, args):
+ """ Run zfs allow/unallow with appropriate options as per module arguments.
+ """
+ args = self.recursive_opt + ['-' + self.scope] + args
+ if self.perms:
+ args.append(','.join(self.perms))
+ return self.run_zfs_raw(args=args)
+
+ def clear(self):
+ """ Called by run() to clear all permissions.
+ """
+ changed = False
+ stdout = ''
+ for scope, ent_type in product(('ld', 'l', 'd'), ('u', 'g')):
+ for ent in self.initial_perms[scope][ent_type].keys():
+ stdout += self.run_zfs(['-%s' % ent_type, ent])
+ changed = True
+ for scope in ('ld', 'l', 'd'):
+ if self.initial_perms[scope]['e']:
+ stdout += self.run_zfs(['-e'])
+ changed = True
+ return (changed, stdout)
+
+ def update(self):
+ """ Update permissions as per module arguments.
+ """
+ stdout = ''
+ for ent_type, entities in (('u', self.users), ('g', self.groups)):
+ if entities:
+ stdout += self.run_zfs(['-%s' % ent_type, ','.join(entities)])
+ if self.everyone:
+ stdout += self.run_zfs(['-e'])
+ return (self.initial_perms != self.current_perms, stdout)
+
+ def run(self):
+ """ Run an operation, return results for Ansible.
+ """
+ exit_args = {'state': self.state}
+ self.initial_perms = self.current_perms
+ exit_args['changed'], stdout = self.run_method()
+ if exit_args['changed']:
+ exit_args['msg'] = 'ZFS delegated admin permissions updated'
+ exit_args['stdout'] = stdout
+ self.module.exit_json(**exit_args)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ users=dict(type='list'),
+ groups=dict(type='list'),
+ everyone=dict(type='bool', default=False),
+ permissions=dict(type='list',
+ choices=['allow', 'clone', 'create', 'destroy', 'diff', 'hold', 'mount', 'promote',
+ 'readonly', 'receive', 'release', 'rename', 'rollback', 'send', 'share',
+ 'snapshot', 'unallow']),
+ local=dict(type='bool'),
+ descendents=dict(type='bool'),
+ recursive=dict(type='bool', default=False),
+ ),
+ supports_check_mode=False,
+ required_if=[('state', 'present', ['permissions'])],
+ )
+ zfs_delegate_admin = ZfsDelegateAdmin(module)
+ zfs_delegate_admin.run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/zfs/zfs_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/zfs/zfs_facts.py
new file mode 100644
index 00000000..e7719f68
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/zfs/zfs_facts.py
@@ -0,0 +1,259 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Adam Števko <adam.stevko@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: zfs_facts
+short_description: Gather facts about ZFS datasets.
+description:
+ - Gather facts from ZFS dataset properties.
+author: Adam Števko (@xen0l)
+options:
+ name:
+ description:
+ - ZFS dataset name.
+ required: yes
+ aliases: [ "ds", "dataset" ]
+ recurse:
+ description:
+ - Specifies if properties for any children should be recursively
+ displayed.
+ type: bool
+ default: 'no'
+ parsable:
+ description:
+ - Specifies if property values should be displayed in machine
+ friendly format.
+ type: bool
+ default: 'no'
+ properties:
+ description:
+ - Specifies which dataset properties should be queried in comma-separated format.
+ For more information about dataset properties, check zfs(1M) man page.
+ default: all
+ type:
+ description:
+ - Specifies which datasets types to display. Multiple values have to be
+ provided in comma-separated form.
+ choices: [ 'all', 'filesystem', 'volume', 'snapshot', 'bookmark' ]
+ default: all
+ depth:
+ description:
+ - Specifies recursion depth.
+'''
+
+EXAMPLES = '''
+- name: Gather facts about ZFS dataset rpool/export/home
+ community.general.zfs_facts:
+ dataset: rpool/export/home
+
+- name: Report space usage on ZFS filesystems under data/home
+ community.general.zfs_facts:
+ name: data/home
+ recurse: yes
+ type: filesystem
+
+- ansible.builtin.debug:
+ msg: 'ZFS dataset {{ item.name }} consumes {{ item.used }} of disk space.'
+ with_items: '{{ ansible_zfs_datasets }}'
+'''
+
+RETURN = '''
+name:
+ description: ZFS dataset name
+ returned: always
+ type: str
+ sample: rpool/var/spool
+parsable:
+ description: if parsable output should be provided in machine friendly format.
+ returned: if 'parsable' is set to True
+ type: bool
+ sample: True
+recurse:
+ description: if we should recurse over ZFS dataset
+ returned: if 'recurse' is set to True
+ type: bool
+ sample: True
+zfs_datasets:
+ description: ZFS dataset facts
+ returned: always
+ type: str
+ sample:
+ {
+ "aclinherit": "restricted",
+ "aclmode": "discard",
+ "atime": "on",
+ "available": "43.8G",
+ "canmount": "on",
+ "casesensitivity": "sensitive",
+ "checksum": "on",
+ "compression": "off",
+ "compressratio": "1.00x",
+ "copies": "1",
+ "creation": "Thu Jun 16 11:37 2016",
+ "dedup": "off",
+ "devices": "on",
+ "exec": "on",
+ "filesystem_count": "none",
+ "filesystem_limit": "none",
+ "logbias": "latency",
+ "logicalreferenced": "18.5K",
+ "logicalused": "3.45G",
+ "mlslabel": "none",
+ "mounted": "yes",
+ "mountpoint": "/rpool",
+ "name": "rpool",
+ "nbmand": "off",
+ "normalization": "none",
+ "org.openindiana.caiman:install": "ready",
+ "primarycache": "all",
+ "quota": "none",
+ "readonly": "off",
+ "recordsize": "128K",
+ "redundant_metadata": "all",
+ "refcompressratio": "1.00x",
+ "referenced": "29.5K",
+ "refquota": "none",
+ "refreservation": "none",
+ "reservation": "none",
+ "secondarycache": "all",
+ "setuid": "on",
+ "sharenfs": "off",
+ "sharesmb": "off",
+ "snapdir": "hidden",
+ "snapshot_count": "none",
+ "snapshot_limit": "none",
+ "sync": "standard",
+ "type": "filesystem",
+ "used": "4.41G",
+ "usedbychildren": "4.41G",
+ "usedbydataset": "29.5K",
+ "usedbyrefreservation": "0",
+ "usedbysnapshots": "0",
+ "utf8only": "off",
+ "version": "5",
+ "vscan": "off",
+ "written": "29.5K",
+ "xattr": "on",
+ "zoned": "off"
+ }
+'''
+
+from collections import defaultdict
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+
+
+SUPPORTED_TYPES = ['all', 'filesystem', 'volume', 'snapshot', 'bookmark']
+
+
+class ZFSFacts(object):
+ def __init__(self, module):
+
+ self.module = module
+
+ self.name = module.params['name']
+ self.recurse = module.params['recurse']
+ self.parsable = module.params['parsable']
+ self.properties = module.params['properties']
+ self.type = module.params['type']
+ self.depth = module.params['depth']
+
+ self._datasets = defaultdict(dict)
+ self.facts = []
+
+ def dataset_exists(self):
+ cmd = [self.module.get_bin_path('zfs')]
+
+ cmd.append('list')
+ cmd.append(self.name)
+
+ (rc, out, err) = self.module.run_command(cmd)
+
+ if rc == 0:
+ return True
+ else:
+ return False
+
+ def get_facts(self):
+ cmd = [self.module.get_bin_path('zfs')]
+
+ cmd.append('get')
+ cmd.append('-H')
+ if self.parsable:
+ cmd.append('-p')
+ if self.recurse:
+ cmd.append('-r')
+ if int(self.depth) != 0:
+ cmd.append('-d')
+ cmd.append('%s' % self.depth)
+ if self.type:
+ cmd.append('-t')
+ cmd.append(self.type)
+ cmd.append('-o')
+ cmd.append('name,property,value')
+ cmd.append(self.properties)
+ cmd.append(self.name)
+
+ (rc, out, err) = self.module.run_command(cmd)
+
+ if rc == 0:
+ for line in out.splitlines():
+ dataset, property, value = line.split('\t')
+
+ self._datasets[dataset].update({property: value})
+
+ for k, v in iteritems(self._datasets):
+ v.update({'name': k})
+ self.facts.append(v)
+
+ return {'ansible_zfs_datasets': self.facts}
+ else:
+ self.module.fail_json(msg='Error while trying to get facts about ZFS dataset: %s' % self.name,
+ stderr=err,
+ rc=rc)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, aliases=['ds', 'dataset'], type='str'),
+ recurse=dict(required=False, default=False, type='bool'),
+ parsable=dict(required=False, default=False, type='bool'),
+ properties=dict(required=False, default='all', type='str'),
+ type=dict(required=False, default='all', type='str', choices=SUPPORTED_TYPES),
+ depth=dict(required=False, default=0, type='int')
+ ),
+ supports_check_mode=True
+ )
+
+ zfs_facts = ZFSFacts(module)
+
+ result = {}
+ result['changed'] = False
+ result['name'] = zfs_facts.name
+
+ if zfs_facts.parsable:
+ result['parsable'] = zfs_facts.parsable
+
+ if zfs_facts.recurse:
+ result['recurse'] = zfs_facts.recurse
+
+ if zfs_facts.dataset_exists():
+ result['ansible_facts'] = zfs_facts.get_facts()
+ else:
+ module.fail_json(msg='ZFS dataset %s does not exist!' % zfs_facts.name)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/zfs/zpool_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/zfs/zpool_facts.py
new file mode 100644
index 00000000..728c0779
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/storage/zfs/zpool_facts.py
@@ -0,0 +1,210 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Adam Števko <adam.stevko@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: zpool_facts
+short_description: Gather facts about ZFS pools.
+description:
+ - Gather facts from ZFS pool properties.
+author: Adam Števko (@xen0l)
+options:
+ name:
+ description:
+ - ZFS pool name.
+ aliases: [ "pool", "zpool" ]
+ required: false
+ parsable:
+ description:
+ - Specifies if property values should be displayed in machine
+ friendly format.
+ type: bool
+ default: False
+ required: false
+ properties:
+ description:
+ - Specifies which dataset properties should be queried in comma-separated format.
+ For more information about dataset properties, check zpool(1M) man page.
+ default: all
+ required: false
+'''
+
+EXAMPLES = '''
+- name: Gather facts about ZFS pool rpool
+ community.general.zpool_facts: pool=rpool
+
+- name: Gather space usage about all imported ZFS pools
+ community.general.zpool_facts: properties='free,size'
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: 'ZFS pool {{ item.name }} has {{ item.free }} free space out of {{ item.size }}.'
+ with_items: '{{ ansible_zfs_pools }}'
+'''
+
+RETURN = '''
+ansible_facts:
+ description: Dictionary containing all the detailed information about the ZFS pool facts
+ returned: always
+ type: complex
+ contains:
+ ansible_zfs_pools:
+ description: ZFS pool facts
+ returned: always
+ type: str
+ sample:
+ {
+ "allocated": "3.46G",
+ "altroot": "-",
+ "autoexpand": "off",
+ "autoreplace": "off",
+ "bootfs": "rpool/ROOT/openindiana",
+ "cachefile": "-",
+ "capacity": "6%",
+ "comment": "-",
+ "dedupditto": "0",
+ "dedupratio": "1.00x",
+ "delegation": "on",
+ "expandsize": "-",
+ "failmode": "wait",
+ "feature@async_destroy": "enabled",
+ "feature@bookmarks": "enabled",
+ "feature@edonr": "enabled",
+ "feature@embedded_data": "active",
+ "feature@empty_bpobj": "active",
+ "feature@enabled_txg": "active",
+ "feature@extensible_dataset": "enabled",
+ "feature@filesystem_limits": "enabled",
+ "feature@hole_birth": "active",
+ "feature@large_blocks": "enabled",
+ "feature@lz4_compress": "active",
+ "feature@multi_vdev_crash_dump": "enabled",
+ "feature@sha512": "enabled",
+ "feature@skein": "enabled",
+ "feature@spacemap_histogram": "active",
+ "fragmentation": "3%",
+ "free": "46.3G",
+ "freeing": "0",
+ "guid": "15729052870819522408",
+ "health": "ONLINE",
+ "leaked": "0",
+ "listsnapshots": "off",
+ "name": "rpool",
+ "readonly": "off",
+ "size": "49.8G",
+ "version": "-"
+ }
+name:
+ description: ZFS pool name
+ returned: always
+ type: str
+ sample: rpool
+parsable:
+ description: if parsable output should be provided in machine friendly format.
+ returned: if 'parsable' is set to True
+ type: bool
+ sample: True
+'''
+
+from collections import defaultdict
+
+from ansible.module_utils.six import iteritems
+from ansible.module_utils.basic import AnsibleModule
+
+
+class ZPoolFacts(object):
+ def __init__(self, module):
+
+ self.module = module
+
+ self.name = module.params['name']
+ self.parsable = module.params['parsable']
+ self.properties = module.params['properties']
+
+ self._pools = defaultdict(dict)
+ self.facts = []
+
+ def pool_exists(self):
+ cmd = [self.module.get_bin_path('zpool')]
+
+ cmd.append('list')
+ cmd.append(self.name)
+
+ (rc, out, err) = self.module.run_command(cmd)
+
+ if rc == 0:
+ return True
+ else:
+ return False
+
+ def get_facts(self):
+ cmd = [self.module.get_bin_path('zpool')]
+
+ cmd.append('get')
+ cmd.append('-H')
+ if self.parsable:
+ cmd.append('-p')
+ cmd.append('-o')
+ cmd.append('name,property,value')
+ cmd.append(self.properties)
+ if self.name:
+ cmd.append(self.name)
+
+ (rc, out, err) = self.module.run_command(cmd)
+
+ if rc == 0:
+ for line in out.splitlines():
+ pool, property, value = line.split('\t')
+
+ self._pools[pool].update({property: value})
+
+ for k, v in iteritems(self._pools):
+ v.update({'name': k})
+ self.facts.append(v)
+
+ return {'ansible_zfs_pools': self.facts}
+ else:
+ self.module.fail_json(msg='Error while trying to get facts about ZFS pool: %s' % self.name,
+ stderr=err,
+ rc=rc)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=False, aliases=['pool', 'zpool'], type='str'),
+ parsable=dict(required=False, default=False, type='bool'),
+ properties=dict(required=False, default='all', type='str'),
+ ),
+ supports_check_mode=True
+ )
+
+ zpool_facts = ZPoolFacts(module)
+
+ result = {}
+ result['changed'] = False
+ result['name'] = zpool_facts.name
+
+ if zpool_facts.parsable:
+ result['parsable'] = zpool_facts.parsable
+
+ if zpool_facts.name is not None:
+ if zpool_facts.pool_exists():
+ result['ansible_facts'] = zpool_facts.get_facts()
+ else:
+ module.fail_json(msg='ZFS pool %s does not exist!' % zpool_facts.name)
+ else:
+ result['ansible_facts'] = zpool_facts.get_facts()
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/supervisorctl.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/supervisorctl.py
new file mode 100644
index 00000000..5524beea
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/supervisorctl.py
@@ -0,0 +1,257 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Matt Wright <matt@nobien.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: supervisorctl
+short_description: Manage the state of a program or group of programs running via supervisord
+description:
+ - Manage the state of a program or group of programs running via supervisord
+options:
+ name:
+ type: str
+ description:
+ - The name of the supervisord program or group to manage.
+ - The name will be taken as group name when it ends with a colon I(:)
+ - Group support is only available in Ansible version 1.6 or later.
+ required: true
+ config:
+ type: path
+ description:
+ - The supervisor configuration file path
+ server_url:
+ type: str
+ description:
+ - URL on which supervisord server is listening
+ username:
+ type: str
+ description:
+ - username to use for authentication
+ password:
+ type: str
+ description:
+ - password to use for authentication
+ state:
+ type: str
+ description:
+ - The desired state of program/group.
+ required: true
+ choices: [ "present", "started", "stopped", "restarted", "absent", "signalled" ]
+ signal:
+ type: str
+ description:
+ - The signal to send to the program/group, when combined with the 'signalled' state. Required when l(state=signalled).
+ supervisorctl_path:
+ type: path
+ description:
+ - path to supervisorctl executable
+notes:
+ - When C(state) = I(present), the module will call C(supervisorctl reread) then C(supervisorctl add) if the program/group does not exist.
+ - When C(state) = I(restarted), the module will call C(supervisorctl update) then call C(supervisorctl restart).
+ - When C(state) = I(absent), the module will call C(supervisorctl reread) then C(supervisorctl remove) to remove the target program/group.
+requirements: [ "supervisorctl" ]
+author:
+ - "Matt Wright (@mattupstate)"
+ - "Aaron Wang (@inetfuture) <inetfuture@gmail.com>"
+'''
+
+EXAMPLES = '''
+- name: Manage the state of program to be in started state
+ community.general.supervisorctl:
+ name: my_app
+ state: started
+
+- name: Manage the state of program group to be in started state
+ community.general.supervisorctl:
+ name: 'my_apps:'
+ state: started
+
+- name: Restart my_app, reading supervisorctl configuration from a specified file
+ community.general.supervisorctl:
+ name: my_app
+ state: restarted
+ config: /var/opt/my_project/supervisord.conf
+
+- name: Restart my_app, connecting to supervisord with credentials and server URL
+ community.general.supervisorctl:
+ name: my_app
+ state: restarted
+ username: test
+ password: testpass
+ server_url: http://localhost:9001
+
+- name: Send a signal to my_app via supervisorctl
+ community.general.supervisorctl:
+ name: my_app
+ state: signalled
+ signal: USR1
+'''
+
+import os
+from ansible.module_utils.basic import AnsibleModule, is_executable
+
+
+def main():
+ arg_spec = dict(
+ name=dict(type='str', required=True),
+ config=dict(required=False, type='path'),
+ server_url=dict(type='str', required=False),
+ username=dict(type='str', required=False),
+ password=dict(type='str', required=False, no_log=True),
+ supervisorctl_path=dict(required=False, type='path'),
+ state=dict(type='str', required=True, choices=['present', 'started', 'restarted', 'stopped', 'absent', 'signalled']),
+ signal=dict(type='str', required=False)
+ )
+
+ module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
+
+ name = module.params['name']
+ is_group = False
+ if name.endswith(':'):
+ is_group = True
+ name = name.rstrip(':')
+ state = module.params['state']
+ config = module.params.get('config')
+ server_url = module.params.get('server_url')
+ username = module.params.get('username')
+ password = module.params.get('password')
+ supervisorctl_path = module.params.get('supervisorctl_path')
+ signal = module.params.get('signal')
+
+ # we check error message for a pattern, so we need to make sure that's in C locale
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ if supervisorctl_path:
+ if os.path.exists(supervisorctl_path) and is_executable(supervisorctl_path):
+ supervisorctl_args = [supervisorctl_path]
+ else:
+ module.fail_json(
+ msg="Provided path to supervisorctl does not exist or isn't executable: %s" % supervisorctl_path)
+ else:
+ supervisorctl_args = [module.get_bin_path('supervisorctl', True)]
+
+ if config:
+ supervisorctl_args.extend(['-c', config])
+ if server_url:
+ supervisorctl_args.extend(['-s', server_url])
+ if username:
+ supervisorctl_args.extend(['-u', username])
+ if password:
+ supervisorctl_args.extend(['-p', password])
+
+ if state == 'signalled' and not signal:
+ module.fail_json(msg="State 'signalled' requires a 'signal' value")
+
+ def run_supervisorctl(cmd, name=None, **kwargs):
+ args = list(supervisorctl_args) # copy the master args
+ args.append(cmd)
+ if name:
+ args.append(name)
+ return module.run_command(args, **kwargs)
+
+ def get_matched_processes():
+ matched = []
+ rc, out, err = run_supervisorctl('status')
+ for line in out.splitlines():
+ # One status line may look like one of these two:
+ # process not in group:
+ # echo_date_lonely RUNNING pid 7680, uptime 13:22:18
+ # process in group:
+ # echo_date_group:echo_date_00 RUNNING pid 7681, uptime 13:22:18
+ fields = [field for field in line.split(' ') if field != '']
+ process_name = fields[0]
+ status = fields[1]
+
+ if is_group:
+ # If there is ':', this process must be in a group.
+ if ':' in process_name:
+ group = process_name.split(':')[0]
+ if group != name:
+ continue
+ else:
+ continue
+ else:
+ if process_name != name:
+ continue
+
+ matched.append((process_name, status))
+ return matched
+
+ def take_action_on_processes(processes, status_filter, action, expected_result):
+ to_take_action_on = []
+ for process_name, status in processes:
+ if status_filter(status):
+ to_take_action_on.append(process_name)
+
+ if len(to_take_action_on) == 0:
+ module.exit_json(changed=False, name=name, state=state)
+ if module.check_mode:
+ module.exit_json(changed=True)
+ for process_name in to_take_action_on:
+ rc, out, err = run_supervisorctl(action, process_name, check_rc=True)
+ if '%s: %s' % (process_name, expected_result) not in out:
+ module.fail_json(msg=out)
+
+ module.exit_json(changed=True, name=name, state=state, affected=to_take_action_on)
+
+ if state == 'restarted':
+ rc, out, err = run_supervisorctl('update', check_rc=True)
+ processes = get_matched_processes()
+ if len(processes) == 0:
+ module.fail_json(name=name, msg="ERROR (no such process)")
+
+ take_action_on_processes(processes, lambda s: True, 'restart', 'started')
+
+ processes = get_matched_processes()
+
+ if state == 'absent':
+ if len(processes) == 0:
+ module.exit_json(changed=False, name=name, state=state)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+ run_supervisorctl('reread', check_rc=True)
+ rc, out, err = run_supervisorctl('remove', name)
+ if '%s: removed process group' % name in out:
+ module.exit_json(changed=True, name=name, state=state)
+ else:
+ module.fail_json(msg=out, name=name, state=state)
+
+ if state == 'present':
+ if len(processes) > 0:
+ module.exit_json(changed=False, name=name, state=state)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+ run_supervisorctl('reread', check_rc=True)
+ rc, out, err = run_supervisorctl('add', name)
+ if '%s: added process group' % name in out:
+ module.exit_json(changed=True, name=name, state=state)
+ else:
+ module.fail_json(msg=out, name=name, state=state)
+
+ if state == 'started':
+ if len(processes) == 0:
+ module.fail_json(name=name, msg="ERROR (no such process)")
+ take_action_on_processes(processes, lambda s: s not in ('RUNNING', 'STARTING'), 'start', 'started')
+
+ if state == 'stopped':
+ if len(processes) == 0:
+ module.fail_json(name=name, msg="ERROR (no such process)")
+ take_action_on_processes(processes, lambda s: s in ('RUNNING', 'STARTING'), 'stop', 'stopped')
+
+ if state == 'signalled':
+ if len(processes) == 0:
+ module.fail_json(name=name, msg="ERROR (no such process)")
+ take_action_on_processes(processes, lambda s: s in ('RUNNING'), "signal %s" % signal, 'signalled')
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/svc.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/svc.py
new file mode 100644
index 00000000..e9215670
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/svc.py
@@ -0,0 +1,297 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2015, Brian Coca <bcoca@ansible.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: svc
+author:
+- Brian Coca (@bcoca)
+short_description: Manage daemontools services
+description:
+ - Controls daemontools services on remote hosts using the svc utility.
+options:
+ name:
+ description:
+ - Name of the service to manage.
+ type: str
+ required: true
+ state:
+ description:
+ - C(Started)/C(stopped) are idempotent actions that will not run
+ commands unless necessary. C(restarted) will always bounce the
+ svc (svc -t) and C(killed) will always bounce the svc (svc -k).
+ C(reloaded) will send a sigusr1 (svc -1).
+ C(once) will run a normally downed svc once (svc -o), not really
+ an idempotent operation.
+ type: str
+ choices: [ killed, once, reloaded, restarted, started, stopped ]
+ downed:
+ description:
+ - Should a 'down' file exist or not, if it exists it disables auto startup.
+ Defaults to no. Downed does not imply stopped.
+ type: bool
+ enabled:
+ description:
+ - Whether the service is enabled or not, if disabled it also implies stopped.
+ Take note that a service can be enabled and downed (no auto restart).
+ type: bool
+ service_dir:
+ description:
+ - Directory svscan watches for services
+ type: str
+ default: /service
+ service_src:
+ description:
+ - Directory where services are defined, the source of symlinks to service_dir.
+ type: str
+ default: /etc/service
+'''
+
+EXAMPLES = '''
+- name: Start svc dnscache, if not running
+ community.general.svc:
+ name: dnscache
+ state: started
+
+- name: Stop svc dnscache, if running
+ community.general.svc:
+ name: dnscache
+ state: stopped
+
+- name: Kill svc dnscache, in all cases
+ community.general.svc:
+ name: dnscache
+ state: killed
+
+- name: Restart svc dnscache, in all cases
+ community.general.svc:
+ name: dnscache
+ state: restarted
+
+- name: Reload svc dnscache, in all cases
+ community.general.svc:
+ name: dnscache
+ state: reloaded
+
+- name: Using alternative svc directory location
+ community.general.svc:
+ name: dnscache
+ state: reloaded
+ service_dir: /var/service
+'''
+
+import os
+import re
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def _load_dist_subclass(cls, *args, **kwargs):
+ '''
+ Used for derivative implementations
+ '''
+ subclass = None
+
+ distro = kwargs['module'].params['distro']
+
+ # get the most specific superclass for this platform
+ if distro is not None:
+ for sc in cls.__subclasses__():
+ if sc.distro is not None and sc.distro == distro:
+ subclass = sc
+ if subclass is None:
+ subclass = cls
+
+ return super(cls, subclass).__new__(subclass)
+
+
+class Svc(object):
+ """
+ Main class that handles daemontools, can be subclassed and overridden in case
+ we want to use a 'derivative' like encore, s6, etc
+ """
+
+ # def __new__(cls, *args, **kwargs):
+ # return _load_dist_subclass(cls, args, kwargs)
+
+ def __init__(self, module):
+ self.extra_paths = ['/command', '/usr/local/bin']
+ self.report_vars = ['state', 'enabled', 'downed', 'svc_full', 'src_full', 'pid', 'duration', 'full_state']
+
+ self.module = module
+
+ self.name = module.params['name']
+ self.service_dir = module.params['service_dir']
+ self.service_src = module.params['service_src']
+ self.enabled = None
+ self.downed = None
+ self.full_state = None
+ self.state = None
+ self.pid = None
+ self.duration = None
+
+ self.svc_cmd = module.get_bin_path('svc', opt_dirs=self.extra_paths)
+ self.svstat_cmd = module.get_bin_path('svstat', opt_dirs=self.extra_paths)
+ self.svc_full = '/'.join([self.service_dir, self.name])
+ self.src_full = '/'.join([self.service_src, self.name])
+
+ self.enabled = os.path.lexists(self.svc_full)
+ if self.enabled:
+ self.downed = os.path.lexists('%s/down' % self.svc_full)
+ self.get_status()
+ else:
+ self.downed = os.path.lexists('%s/down' % self.src_full)
+ self.state = 'stopped'
+
+ def enable(self):
+ if os.path.exists(self.src_full):
+ try:
+ os.symlink(self.src_full, self.svc_full)
+ except OSError as e:
+ self.module.fail_json(path=self.src_full, msg='Error while linking: %s' % to_native(e))
+ else:
+ self.module.fail_json(msg="Could not find source for service to enable (%s)." % self.src_full)
+
+ def disable(self):
+ try:
+ os.unlink(self.svc_full)
+ except OSError as e:
+ self.module.fail_json(path=self.svc_full, msg='Error while unlinking: %s' % to_native(e))
+ self.execute_command([self.svc_cmd, '-dx', self.src_full])
+
+ src_log = '%s/log' % self.src_full
+ if os.path.exists(src_log):
+ self.execute_command([self.svc_cmd, '-dx', src_log])
+
+ def get_status(self):
+ (rc, out, err) = self.execute_command([self.svstat_cmd, self.svc_full])
+
+ if err is not None and err:
+ self.full_state = self.state = err
+ else:
+ self.full_state = out
+
+ m = re.search(r'\(pid (\d+)\)', out)
+ if m:
+ self.pid = m.group(1)
+
+ m = re.search(r'(\d+) seconds', out)
+ if m:
+ self.duration = m.group(1)
+
+ if re.search(' up ', out):
+ self.state = 'start'
+ elif re.search(' down ', out):
+ self.state = 'stopp'
+ else:
+ self.state = 'unknown'
+ return
+
+ if re.search(' want ', out):
+ self.state += 'ing'
+ else:
+ self.state += 'ed'
+
+ def start(self):
+ return self.execute_command([self.svc_cmd, '-u', self.svc_full])
+
+ def stopp(self):
+ return self.stop()
+
+ def stop(self):
+ return self.execute_command([self.svc_cmd, '-d', self.svc_full])
+
+ def once(self):
+ return self.execute_command([self.svc_cmd, '-o', self.svc_full])
+
+ def reload(self):
+ return self.execute_command([self.svc_cmd, '-1', self.svc_full])
+
+ def restart(self):
+ return self.execute_command([self.svc_cmd, '-t', self.svc_full])
+
+ def kill(self):
+ return self.execute_command([self.svc_cmd, '-k', self.svc_full])
+
+ def execute_command(self, cmd):
+ try:
+ (rc, out, err) = self.module.run_command(' '.join(cmd))
+ except Exception as e:
+ self.module.fail_json(msg="failed to execute: %s" % to_native(e), exception=traceback.format_exc())
+ return (rc, out, err)
+
+ def report(self):
+ self.get_status()
+ states = {}
+ for k in self.report_vars:
+ states[k] = self.__dict__[k]
+ return states
+
+
+# ===========================================
+# Main control flow
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', choices=['killed', 'once', 'reloaded', 'restarted', 'started', 'stopped']),
+ enabled=dict(type='bool'),
+ downed=dict(type='bool'),
+ service_dir=dict(type='str', default='/service'),
+ service_src=dict(type='str', default='/etc/service'),
+ ),
+ supports_check_mode=True,
+ )
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ state = module.params['state']
+ enabled = module.params['enabled']
+ downed = module.params['downed']
+
+ svc = Svc(module)
+ changed = False
+ orig_state = svc.report()
+
+ if enabled is not None and enabled != svc.enabled:
+ changed = True
+ if not module.check_mode:
+ try:
+ if enabled:
+ svc.enable()
+ else:
+ svc.disable()
+ except (OSError, IOError) as e:
+ module.fail_json(msg="Could not change service link: %s" % to_native(e))
+
+ if state is not None and state != svc.state:
+ changed = True
+ if not module.check_mode:
+ getattr(svc, state[:-2])()
+
+ if downed is not None and downed != svc.downed:
+ changed = True
+ if not module.check_mode:
+ d_file = "%s/down" % svc.svc_full
+ try:
+ if downed:
+ open(d_file, "a").close()
+ else:
+ os.unlink(d_file)
+ except (OSError, IOError) as e:
+ module.fail_json(msg="Could not change downed file: %s " % (to_native(e)))
+
+ module.exit_json(changed=changed, svc=svc.report())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/svr4pkg.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/svr4pkg.py
new file mode 100644
index 00000000..21d17f4d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/svr4pkg.py
@@ -0,0 +1,263 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Boyd Adamson <boyd () boydadamson.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: svr4pkg
+short_description: Manage Solaris SVR4 packages
+description:
+ - Manages SVR4 packages on Solaris 10 and 11.
+ - These were the native packages on Solaris <= 10 and are available
+ as a legacy feature in Solaris 11.
+ - Note that this is a very basic packaging system. It will not enforce
+ dependencies on install or remove.
+author: "Boyd Adamson (@brontitall)"
+options:
+ name:
+ description:
+ - Package name, e.g. C(SUNWcsr)
+ required: true
+ type: str
+
+ state:
+ description:
+ - Whether to install (C(present)), or remove (C(absent)) a package.
+ - If the package is to be installed, then I(src) is required.
+ - The SVR4 package system doesn't provide an upgrade operation. You need to uninstall the old, then install the new package.
+ required: true
+ choices: ["present", "absent"]
+ type: str
+
+ src:
+ description:
+ - Specifies the location to install the package from. Required when C(state=present).
+ - "Can be any path acceptable to the C(pkgadd) command's C(-d) option. e.g.: C(somefile.pkg), C(/dir/with/pkgs), C(http:/server/mypkgs.pkg)."
+ - If using a file or directory, they must already be accessible by the host. See the M(ansible.builtin.copy) module for a way to get them there.
+ type: str
+ proxy:
+ description:
+ - HTTP[s] proxy to be used if C(src) is a URL.
+ type: str
+ response_file:
+ description:
+ - Specifies the location of a response file to be used if package expects input on install. (added in Ansible 1.4)
+ required: false
+ type: str
+ zone:
+ description:
+ - Whether to install the package only in the current zone, or install it into all zones.
+ - The installation into all zones works only if you are working with the global zone.
+ required: false
+ default: "all"
+ choices: ["current", "all"]
+ type: str
+ category:
+ description:
+ - Install/Remove category instead of a single package.
+ required: false
+ type: bool
+ default: false
+'''
+
+EXAMPLES = '''
+- name: Install a package from an already copied file
+ community.general.svr4pkg:
+ name: CSWcommon
+ src: /tmp/cswpkgs.pkg
+ state: present
+
+- name: Install a package directly from an http site
+ community.general.svr4pkg:
+ name: CSWpkgutil
+ src: 'http://get.opencsw.org/now'
+ state: present
+ zone: current
+
+- name: Install a package with a response file
+ community.general.svr4pkg:
+ name: CSWggrep
+ src: /tmp/third-party.pkg
+ response_file: /tmp/ggrep.response
+ state: present
+
+- name: Ensure that a package is not installed
+ community.general.svr4pkg:
+ name: SUNWgnome-sound-recorder
+ state: absent
+
+- name: Ensure that a category is not installed
+ community.general.svr4pkg:
+ name: FIREFOX
+ state: absent
+ category: true
+'''
+
+
+import os
+import tempfile
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def package_installed(module, name, category):
+ cmd = [module.get_bin_path('pkginfo', True)]
+ cmd.append('-q')
+ if category:
+ cmd.append('-c')
+ cmd.append(name)
+ rc, out, err = module.run_command(' '.join(cmd))
+ if rc == 0:
+ return True
+ else:
+ return False
+
+
+def create_admin_file():
+ (desc, filename) = tempfile.mkstemp(prefix='ansible_svr4pkg', text=True)
+ fullauto = '''
+mail=
+instance=unique
+partial=nocheck
+runlevel=quit
+idepend=nocheck
+rdepend=nocheck
+space=quit
+setuid=nocheck
+conflict=nocheck
+action=nocheck
+networktimeout=60
+networkretries=3
+authentication=quit
+keystore=/var/sadm/security
+proxy=
+basedir=default
+'''
+ os.write(desc, fullauto)
+ os.close(desc)
+ return filename
+
+
+def run_command(module, cmd):
+ progname = cmd[0]
+ cmd[0] = module.get_bin_path(progname, True)
+ return module.run_command(cmd)
+
+
+def package_install(module, name, src, proxy, response_file, zone, category):
+ adminfile = create_admin_file()
+ cmd = ['pkgadd', '-n']
+ if zone == 'current':
+ cmd += ['-G']
+ cmd += ['-a', adminfile, '-d', src]
+ if proxy is not None:
+ cmd += ['-x', proxy]
+ if response_file is not None:
+ cmd += ['-r', response_file]
+ if category:
+ cmd += ['-Y']
+ cmd.append(name)
+ (rc, out, err) = run_command(module, cmd)
+ os.unlink(adminfile)
+ return (rc, out, err)
+
+
+def package_uninstall(module, name, src, category):
+ adminfile = create_admin_file()
+ if category:
+ cmd = ['pkgrm', '-na', adminfile, '-Y', name]
+ else:
+ cmd = ['pkgrm', '-na', adminfile, name]
+ (rc, out, err) = run_command(module, cmd)
+ os.unlink(adminfile)
+ return (rc, out, err)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(required=True, choices=['present', 'absent']),
+ src=dict(default=None),
+ proxy=dict(default=None),
+ response_file=dict(default=None),
+ zone=dict(required=False, default='all', choices=['current', 'all']),
+ category=dict(default=False, type='bool')
+ ),
+ supports_check_mode=True
+ )
+ state = module.params['state']
+ name = module.params['name']
+ src = module.params['src']
+ proxy = module.params['proxy']
+ response_file = module.params['response_file']
+ zone = module.params['zone']
+ category = module.params['category']
+ rc = None
+ out = ''
+ err = ''
+ result = {}
+ result['name'] = name
+ result['state'] = state
+
+ if state == 'present':
+ if src is None:
+ module.fail_json(name=name,
+ msg="src is required when state=present")
+ if not package_installed(module, name, category):
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = package_install(module, name, src, proxy, response_file, zone, category)
+ # Stdout is normally empty but for some packages can be
+ # very long and is not often useful
+ if len(out) > 75:
+ out = out[:75] + '...'
+
+ elif state == 'absent':
+ if package_installed(module, name, category):
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = package_uninstall(module, name, src, category)
+ out = out[:75]
+
+ # Returncodes as per pkgadd(1m)
+ # 0 Successful completion
+ # 1 Fatal error.
+ # 2 Warning.
+ # 3 Interruption.
+ # 4 Administration.
+ # 5 Administration. Interaction is required. Do not use pkgadd -n.
+ # 10 Reboot after installation of all packages.
+ # 20 Reboot after installation of this package.
+ # 99 (observed) pkgadd: ERROR: could not process datastream from </tmp/pkgutil.pkg>
+ if rc in (0, 2, 3, 10, 20):
+ result['changed'] = True
+ # no install nor uninstall, or failed
+ else:
+ result['changed'] = False
+
+ # rc will be none when the package already was installed and no action took place
+ # Only return failed=False when the returncode is known to be good as there may be more
+ # undocumented failure return codes
+ if rc not in (None, 0, 2, 10, 20):
+ result['failed'] = True
+ else:
+ result['failed'] = False
+
+ if out:
+ result['stdout'] = out
+ if err:
+ result['stderr'] = err
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/swdepot.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/swdepot.py
new file mode 100644
index 00000000..7e9db835
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/swdepot.py
@@ -0,0 +1,206 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Raul Melo
+# Written by Raul Melo <raulmelo@gmail.com>
+# Based on yum module written by Seth Vidal <skvidal at fedoraproject.org>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: swdepot
+short_description: Manage packages with swdepot package manager (HP-UX)
+description:
+ - Will install, upgrade and remove packages with swdepot package manager (HP-UX)
+notes: []
+author: "Raul Melo (@melodous)"
+options:
+ name:
+ description:
+ - package name.
+ aliases: [pkg]
+ required: true
+ type: str
+ state:
+ description:
+ - whether to install (C(present), C(latest)), or remove (C(absent)) a package.
+ required: true
+ choices: [ 'present', 'latest', 'absent']
+ type: str
+ depot:
+ description:
+ - The source repository from which install or upgrade a package.
+ type: str
+'''
+
+EXAMPLES = '''
+- name: Install a package
+ community.general.swdepot:
+ name: unzip-6.0
+ state: present
+ depot: 'repository:/path'
+
+- name: Install the latest version of a package
+ community.general.swdepot:
+ name: unzip
+ state: latest
+ depot: 'repository:/path'
+
+- name: Remove a package
+ community.general.swdepot:
+ name: unzip
+ state: absent
+'''
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import shlex_quote
+
+
+def compare_package(version1, version2):
+ """ Compare version packages.
+ Return values:
+ -1 first minor
+ 0 equal
+ 1 first greater """
+
+ def normalize(v):
+ return [int(x) for x in re.sub(r'(\.0+)*$', '', v).split(".")]
+ normalized_version1 = normalize(version1)
+ normalized_version2 = normalize(version2)
+ if normalized_version1 == normalized_version2:
+ rc = 0
+ elif normalized_version1 < normalized_version2:
+ rc = -1
+ else:
+ rc = 1
+ return rc
+
+
+def query_package(module, name, depot=None):
+ """ Returns whether a package is installed or not and version. """
+
+ cmd_list = '/usr/sbin/swlist -a revision -l product'
+ if depot:
+ rc, stdout, stderr = module.run_command("%s -s %s %s | grep %s" % (cmd_list, shlex_quote(depot), shlex_quote(name), shlex_quote(name)),
+ use_unsafe_shell=True)
+ else:
+ rc, stdout, stderr = module.run_command("%s %s | grep %s" % (cmd_list, shlex_quote(name), shlex_quote(name)), use_unsafe_shell=True)
+ if rc == 0:
+ version = re.sub(r"\s\s+|\t", " ", stdout).strip().split()[1]
+ else:
+ version = None
+
+ return rc, version
+
+
+def remove_package(module, name):
+ """ Uninstall package if installed. """
+
+ cmd_remove = '/usr/sbin/swremove'
+ rc, stdout, stderr = module.run_command("%s %s" % (cmd_remove, name))
+
+ if rc == 0:
+ return rc, stdout
+ else:
+ return rc, stderr
+
+
+def install_package(module, depot, name):
+ """ Install package if not already installed """
+
+ cmd_install = '/usr/sbin/swinstall -x mount_all_filesystems=false'
+ rc, stdout, stderr = module.run_command("%s -s %s %s" % (cmd_install, depot, name))
+ if rc == 0:
+ return rc, stdout
+ else:
+ return rc, stderr
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(aliases=['pkg'], required=True),
+ state=dict(choices=['present', 'absent', 'latest'], required=True),
+ depot=dict(default=None, required=False)
+ ),
+ supports_check_mode=True
+ )
+ name = module.params['name']
+ state = module.params['state']
+ depot = module.params['depot']
+
+ changed = False
+ msg = "No changed"
+ rc = 0
+ if (state == 'present' or state == 'latest') and depot is None:
+ output = "depot parameter is mandatory in present or latest task"
+ module.fail_json(name=name, msg=output, rc=rc)
+
+ # Check local version
+ rc, version_installed = query_package(module, name)
+ if not rc:
+ installed = True
+ msg = "Already installed"
+
+ else:
+ installed = False
+
+ if (state == 'present' or state == 'latest') and installed is False:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ rc, output = install_package(module, depot, name)
+
+ if not rc:
+ changed = True
+ msg = "Package installed"
+
+ else:
+ module.fail_json(name=name, msg=output, rc=rc)
+
+ elif state == 'latest' and installed is True:
+ # Check depot version
+ rc, version_depot = query_package(module, name, depot)
+
+ if not rc:
+ if compare_package(version_installed, version_depot) == -1:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ # Install new version
+ rc, output = install_package(module, depot, name)
+
+ if not rc:
+ msg = "Package upgraded, Before " + version_installed + " Now " + version_depot
+ changed = True
+
+ else:
+ module.fail_json(name=name, msg=output, rc=rc)
+
+ else:
+ output = "Software package not in repository " + depot
+ module.fail_json(name=name, msg=output, rc=rc)
+
+ elif state == 'absent' and installed is True:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ rc, output = remove_package(module, name)
+ if not rc:
+ changed = True
+ msg = "Package removed"
+ else:
+ module.fail_json(name=name, msg=output, rc=rc)
+
+ if module.check_mode:
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=changed, name=name, state=state, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/swupd.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/swupd.py
new file mode 100644
index 00000000..4dac01be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/swupd.py
@@ -0,0 +1,313 @@
+#!/usr/bin/python
+
+# (c) 2017, Alberto Murillo <alberto.murillo.silva@intel.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: swupd
+short_description: Manages updates and bundles in ClearLinux systems.
+description:
+ - Manages updates and bundles with the swupd bundle manager, which is used by the
+ Clear Linux Project for Intel Architecture.
+author: Alberto Murillo (@albertomurillo)
+options:
+ contenturl:
+ description:
+ - URL pointing to the contents of available bundles.
+ If not specified, the contents are retrieved from clearlinux.org.
+ type: str
+ format:
+ description:
+ - The format suffix for version file downloads. For example [1,2,3,staging,etc].
+ If not specified, the default format is used.
+ type: str
+ manifest:
+ description:
+ - The manifest contains information about the bundles at certain version of the OS.
+ Specify a Manifest version to verify against that version or leave unspecified to
+ verify against the current version.
+ aliases: [release, version]
+ type: int
+ name:
+ description:
+ - Name of the (I)bundle to install or remove.
+ aliases: [bundle]
+ type: str
+ state:
+ description:
+ - Indicates the desired (I)bundle state. C(present) ensures the bundle
+ is installed while C(absent) ensures the (I)bundle is not installed.
+ default: present
+ choices: [present, absent]
+ type: str
+ update:
+ description:
+ - Updates the OS to the latest version.
+ type: bool
+ default: false
+ url:
+ description:
+ - Overrides both I(contenturl) and I(versionurl).
+ type: str
+ verify:
+ description:
+ - Verify content for OS version.
+ type: bool
+ default: false
+ versionurl:
+ description:
+ - URL for version string download.
+ type: str
+'''
+
+EXAMPLES = '''
+- name: Update the OS to the latest version
+ community.general.swupd:
+ update: yes
+
+- name: Installs the "foo" bundle
+ community.general.swupd:
+ name: foo
+ state: present
+
+- name: Removes the "foo" bundle
+ community.general.swupd:
+ name: foo
+ state: absent
+
+- name: Check integrity of filesystem
+ community.general.swupd:
+ verify: yes
+
+- name: Downgrade OS to release 12920
+ community.general.swupd:
+ verify: yes
+ manifest: 12920
+'''
+
+RETURN = '''
+stdout:
+ description: stdout of swupd
+ returned: always
+ type: str
+stderr:
+ description: stderr of swupd
+ returned: always
+ type: str
+'''
+
+import os
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Swupd(object):
+ FILES_NOT_MATCH = "files did not match"
+ FILES_REPLACED = "missing files were replaced"
+ FILES_FIXED = "files were fixed"
+ FILES_DELETED = "files were deleted"
+
+ def __init__(self, module):
+ # Fail if swupd is not found
+ self.module = module
+ self.swupd_cmd = module.get_bin_path("swupd", False)
+ if not self.swupd_cmd:
+ module.fail_json(msg="Could not find swupd.")
+
+ # Initialize parameters
+ for key in module.params.keys():
+ setattr(self, key, module.params[key])
+
+ # Initialize return values
+ self.changed = False
+ self.failed = False
+ self.msg = None
+ self.rc = None
+ self.stderr = ""
+ self.stdout = ""
+
+ def _run_cmd(self, cmd):
+ self.rc, self.stdout, self.stderr = self.module.run_command(cmd, check_rc=False)
+
+ def _get_cmd(self, command):
+ cmd = "%s %s" % (self.swupd_cmd, command)
+
+ if self.format:
+ cmd += " --format=%s" % self.format
+ if self.manifest:
+ cmd += " --manifest=%s" % self.manifest
+ if self.url:
+ cmd += " --url=%s" % self.url
+ else:
+ if self.contenturl and command != "check-update":
+ cmd += " --contenturl=%s" % self.contenturl
+ if self.versionurl:
+ cmd += " --versionurl=%s" % self.versionurl
+
+ return cmd
+
+ def _is_bundle_installed(self, bundle):
+ try:
+ os.stat("/usr/share/clear/bundles/%s" % bundle)
+ except OSError:
+ return False
+
+ return True
+
+ def _needs_update(self):
+ cmd = self._get_cmd("check-update")
+ self._run_cmd(cmd)
+
+ if self.rc == 0:
+ return True
+
+ if self.rc == 1:
+ return False
+
+ self.failed = True
+ self.msg = "Failed to check for updates"
+
+ def _needs_verify(self):
+ cmd = self._get_cmd("verify")
+ self._run_cmd(cmd)
+
+ if self.rc != 0:
+ self.failed = True
+ self.msg = "Failed to check for filesystem inconsistencies."
+
+ if self.FILES_NOT_MATCH in self.stdout:
+ return True
+
+ return False
+
+ def install_bundle(self, bundle):
+ """Installs a bundle with `swupd bundle-add bundle`"""
+ if self.module.check_mode:
+ self.module.exit_json(changed=not self._is_bundle_installed(bundle))
+
+ if self._is_bundle_installed(bundle):
+ self.msg = "Bundle %s is already installed" % bundle
+ return
+
+ cmd = self._get_cmd("bundle-add %s" % bundle)
+ self._run_cmd(cmd)
+
+ if self.rc == 0:
+ self.changed = True
+ self.msg = "Bundle %s installed" % bundle
+ return
+
+ self.failed = True
+ self.msg = "Failed to install bundle %s" % bundle
+
+ def remove_bundle(self, bundle):
+ """Removes a bundle with `swupd bundle-remove bundle`"""
+ if self.module.check_mode:
+ self.module.exit_json(changed=self._is_bundle_installed(bundle))
+
+ if not self._is_bundle_installed(bundle):
+ self.msg = "Bundle %s not installed"
+ return
+
+ cmd = self._get_cmd("bundle-remove %s" % bundle)
+ self._run_cmd(cmd)
+
+ if self.rc == 0:
+ self.changed = True
+ self.msg = "Bundle %s removed" % bundle
+ return
+
+ self.failed = True
+ self.msg = "Failed to remove bundle %s" % bundle
+
+ def update_os(self):
+ """Updates the os with `swupd update`"""
+ if self.module.check_mode:
+ self.module.exit_json(changed=self._needs_update())
+
+ if not self._needs_update():
+ self.msg = "There are no updates available"
+ return
+
+ cmd = self._get_cmd("update")
+ self._run_cmd(cmd)
+
+ if self.rc == 0:
+ self.changed = True
+ self.msg = "Update successful"
+ return
+
+ self.failed = True
+ self.msg = "Failed to check for updates"
+
+ def verify_os(self):
+ """Verifies filesystem against specified or current version"""
+ if self.module.check_mode:
+ self.module.exit_json(changed=self._needs_verify())
+
+ if not self._needs_verify():
+ self.msg = "No files where changed"
+ return
+
+ cmd = self._get_cmd("verify --fix")
+ self._run_cmd(cmd)
+
+ if self.rc == 0 and (self.FILES_REPLACED in self.stdout or self.FILES_FIXED in self.stdout or self.FILES_DELETED in self.stdout):
+ self.changed = True
+ self.msg = "Fix successful"
+ return
+
+ self.failed = True
+ self.msg = "Failed to verify the OS"
+
+
+def main():
+ """The main function."""
+ module = AnsibleModule(
+ argument_spec=dict(
+ contenturl=dict(type="str"),
+ format=dict(type="str"),
+ manifest=dict(aliases=["release", "version"], type="int"),
+ name=dict(aliases=["bundle"], type="str"),
+ state=dict(default="present", choices=["present", "absent"], type="str"),
+ update=dict(default=False, type="bool"),
+ url=dict(type="str"),
+ verify=dict(default=False, type="bool"),
+ versionurl=dict(type="str"),
+ ),
+ required_one_of=[["name", "update", "verify"]],
+ mutually_exclusive=[["name", "update", "verify"]],
+ supports_check_mode=True
+ )
+
+ swupd = Swupd(module)
+
+ name = module.params["name"]
+ state = module.params["state"]
+ update = module.params["update"]
+ verify = module.params["verify"]
+
+ if update:
+ swupd.update_os()
+ elif verify:
+ swupd.verify_os()
+ elif state == "present":
+ swupd.install_bundle(name)
+ elif state == "absent":
+ swupd.remove_bundle(name)
+ else:
+ swupd.failed = True
+
+ if swupd.failed:
+ module.fail_json(msg=swupd.msg, stdout=swupd.stdout, stderr=swupd.stderr)
+ else:
+ module.exit_json(changed=swupd.changed, msg=swupd.msg, stdout=swupd.stdout, stderr=swupd.stderr)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/syslogger.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/syslogger.py
new file mode 100644
index 00000000..7f4f899f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/syslogger.py
@@ -0,0 +1,189 @@
+#!/usr/bin/python
+# Copyright: (c) 2017, Tim Rightnour <thegarbledone@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: syslogger
+short_description: Log messages in the syslog
+description:
+ - Uses syslog to add log entries to the host.
+options:
+ msg:
+ type: str
+ description:
+ - This is the message to place in syslog.
+ required: True
+ priority:
+ type: str
+ description:
+ - Set the log priority.
+ choices: [ "emerg", "alert", "crit", "err", "warning", "notice", "info", "debug" ]
+ default: "info"
+ facility:
+ type: str
+ description:
+ - Set the log facility.
+ choices: [ "kern", "user", "mail", "daemon", "auth", "lpr", "news",
+ "uucp", "cron", "syslog", "local0", "local1", "local2",
+ "local3", "local4", "local5", "local6", "local7" ]
+ default: "daemon"
+ log_pid:
+ description:
+ - Log the PID in brackets.
+ type: bool
+ default: False
+ ident:
+ description:
+ - Specify the name of application name which is sending the log to syslog.
+ type: str
+ default: 'ansible_syslogger'
+ version_added: '0.2.0'
+author:
+ - Tim Rightnour (@garbled1)
+'''
+
+EXAMPLES = r'''
+- name: Simple Usage
+ community.general.syslogger:
+ msg: "I will end up as daemon.info"
+
+- name: Send a log message with err priority and user facility with log_pid
+ community.general.syslogger:
+ msg: "Hello from Ansible"
+ priority: "err"
+ facility: "user"
+ log_pid: true
+
+- name: Specify the name of application which is sending log message
+ community.general.syslogger:
+ ident: "MyApp"
+ msg: "I want to believe"
+ priority: "alert"
+'''
+
+RETURN = r'''
+ident:
+ description: Name of application sending the message to log
+ returned: always
+ type: str
+ sample: "ansible_syslogger"
+ version_added: '0.2.0'
+priority:
+ description: Priority level
+ returned: always
+ type: str
+ sample: "daemon"
+facility:
+ description: Syslog facility
+ returned: always
+ type: str
+ sample: "info"
+log_pid:
+ description: Log PID status
+ returned: always
+ type: bool
+ sample: True
+msg:
+ description: Message sent to syslog
+ returned: always
+ type: str
+ sample: "Hello from Ansible"
+'''
+
+import syslog
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def get_facility(facility):
+ return {
+ 'kern': syslog.LOG_KERN,
+ 'user': syslog.LOG_USER,
+ 'mail': syslog.LOG_MAIL,
+ 'daemon': syslog.LOG_DAEMON,
+ 'auth': syslog.LOG_AUTH,
+ 'lpr': syslog.LOG_LPR,
+ 'news': syslog.LOG_NEWS,
+ 'uucp': syslog.LOG_UUCP,
+ 'cron': syslog.LOG_CRON,
+ 'syslog': syslog.LOG_SYSLOG,
+ 'local0': syslog.LOG_LOCAL0,
+ 'local1': syslog.LOG_LOCAL1,
+ 'local2': syslog.LOG_LOCAL2,
+ 'local3': syslog.LOG_LOCAL3,
+ 'local4': syslog.LOG_LOCAL4,
+ 'local5': syslog.LOG_LOCAL5,
+ 'local6': syslog.LOG_LOCAL6,
+ 'local7': syslog.LOG_LOCAL7
+ }.get(facility, syslog.LOG_DAEMON)
+
+
+def get_priority(priority):
+ return {
+ 'emerg': syslog.LOG_EMERG,
+ 'alert': syslog.LOG_ALERT,
+ 'crit': syslog.LOG_CRIT,
+ 'err': syslog.LOG_ERR,
+ 'warning': syslog.LOG_WARNING,
+ 'notice': syslog.LOG_NOTICE,
+ 'info': syslog.LOG_INFO,
+ 'debug': syslog.LOG_DEBUG
+ }.get(priority, syslog.LOG_INFO)
+
+
+def main():
+ # define the available arguments/parameters that a user can pass to
+ # the module
+ module_args = dict(
+ ident=dict(type='str', default='ansible_syslogger'),
+ msg=dict(type='str', required=True),
+ priority=dict(type='str', required=False,
+ choices=["emerg", "alert", "crit", "err", "warning",
+ "notice", "info", "debug"],
+ default='info'),
+ facility=dict(type='str', required=False,
+ choices=["kern", "user", "mail", "daemon", "auth",
+ "lpr", "news", "uucp", "cron", "syslog",
+ "local0", "local1", "local2", "local3",
+ "local4", "local5", "local6", "local7"],
+ default='daemon'),
+ log_pid=dict(type='bool', required=False, default=False)
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ )
+
+ result = dict(
+ changed=False,
+ ident=module.params['ident'],
+ priority=module.params['priority'],
+ facility=module.params['facility'],
+ log_pid=module.params['log_pid'],
+ msg=module.params['msg']
+ )
+
+ # do the logging
+ try:
+ syslog.openlog(module.params['ident'],
+ syslog.LOG_PID if module.params['log_pid'] else 0,
+ get_facility(module.params['facility']))
+ syslog.syslog(get_priority(module.params['priority']),
+ module.params['msg'])
+ syslog.closelog()
+ result['changed'] = True
+
+ except Exception as exc:
+ module.fail_json(error='Failed to write to syslog %s' % to_native(exc), exception=traceback.format_exc(), **result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/syspatch.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/syspatch.py
new file mode 100644
index 00000000..2483fb36
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/syspatch.py
@@ -0,0 +1,175 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2019-2020, Andrew Klaus <andrewklaus@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: syspatch
+
+short_description: Manage OpenBSD system patches
+
+
+description:
+ - "Manage OpenBSD system patches using syspatch."
+
+options:
+ apply:
+ type: bool
+ description:
+ - Apply all available system patches.
+ - By default, apply all patches.
+ - Deprecated. Will be removed in community.general 3.0.0.
+ default: yes
+ revert:
+ description:
+ - Revert system patches.
+ type: str
+ choices: [ all, one ]
+
+author:
+ - Andrew Klaus (@precurse)
+'''
+
+EXAMPLES = '''
+- name: Apply all available system patches
+ community.general.syspatch:
+ apply: true
+
+- name: Revert last patch
+ community.general.syspatch:
+ revert: one
+
+- name: Revert all patches
+ community.general.syspatch:
+ revert: all
+
+# NOTE: You can reboot automatically if a patch requires it:
+- name: Apply all patches and store result
+ community.general.syspatch:
+ apply: true
+ register: syspatch
+
+- name: Reboot if patch requires it
+ ansible.builtin.reboot:
+ when: syspatch.reboot_needed
+'''
+
+RETURN = r'''
+rc:
+ description: The command return code (0 means success)
+ returned: always
+ type: int
+stdout:
+ description: syspatch standard output.
+ returned: always
+ type: str
+ sample: "001_rip6cksum"
+stderr:
+ description: syspatch standard error.
+ returned: always
+ type: str
+ sample: "syspatch: need root privileges"
+reboot_needed:
+ description: Whether or not a reboot is required after an update.
+ returned: always
+ type: bool
+ sample: True
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def run_module():
+ # define available arguments/parameters a user can pass to the module
+ module_args = dict(
+ apply=dict(type='bool', default=True, removed_in_version='3.0.0', removed_from_collection='community.general'),
+ revert=dict(type='str', choices=['all', 'one'])
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True,
+ required_one_of=[['apply', 'revert']]
+ )
+
+ result = syspatch_run(module)
+
+ module.exit_json(**result)
+
+
+def syspatch_run(module):
+ cmd = module.get_bin_path('syspatch', True)
+ changed = False
+ reboot_needed = False
+ warnings = []
+
+ # Set safe defaults for run_flag and check_flag
+ run_flag = ['-c']
+ check_flag = ['-c']
+ if module.params['revert']:
+ check_flag = ['-l']
+
+ if module.params['revert'] == 'all':
+ run_flag = ['-R']
+ else:
+ run_flag = ['-r']
+ elif module.params['apply']:
+ check_flag = ['-c']
+ run_flag = []
+
+ # Run check command
+ rc, out, err = module.run_command([cmd] + check_flag)
+
+ if rc != 0:
+ module.fail_json(msg="Command %s failed rc=%d, out=%s, err=%s" % (cmd, rc, out, err))
+
+ if len(out) > 0:
+ # Changes pending
+ change_pending = True
+ else:
+ # No changes pending
+ change_pending = False
+
+ if module.check_mode:
+ changed = change_pending
+ elif change_pending:
+ rc, out, err = module.run_command([cmd] + run_flag)
+
+ # Workaround syspatch ln bug:
+ # http://openbsd-archive.7691.n7.nabble.com/Warning-applying-latest-syspatch-td354250.html
+ if rc != 0 and err != 'ln: /usr/X11R6/bin/X: No such file or directory\n':
+ module.fail_json(msg="Command %s failed rc=%d, out=%s, err=%s" % (cmd, rc, out, err))
+ elif out.lower().find('create unique kernel') >= 0:
+ # Kernel update applied
+ reboot_needed = True
+ elif out.lower().find('syspatch updated itself') >= 0:
+ warnings.append('Syspatch was updated. Please run syspatch again.')
+
+ # If no stdout, then warn user
+ if len(out) == 0:
+ warnings.append('syspatch had suggested changes, but stdout was empty.')
+
+ changed = True
+ else:
+ changed = False
+
+ return dict(
+ changed=changed,
+ reboot_needed=reboot_needed,
+ rc=rc,
+ stderr=err,
+ stdout=out,
+ warnings=warnings
+ )
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/aix_devices.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/aix_devices.py
new file mode 100644
index 00000000..89468059
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/aix_devices.py
@@ -0,0 +1,369 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, 2018 Kairo Araujo <kairo@kairo.eti.br>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+- Kairo Araujo (@kairoaraujo)
+module: aix_devices
+short_description: Manages AIX devices
+description:
+- This module discovers, defines, removes and modifies attributes of AIX devices.
+options:
+ attributes:
+ description:
+ - A list of device attributes.
+ type: dict
+ device:
+ description:
+ - The name of the device.
+ - C(all) is valid to rescan C(available) all devices (AIX cfgmgr command).
+ type: str
+ force:
+ description:
+ - Forces action.
+ type: bool
+ default: no
+ recursive:
+ description:
+ - Removes or defines a device and children devices.
+ type: bool
+ default: no
+ state:
+ description:
+ - Controls the device state.
+ - C(available) (alias C(present)) rescan a specific device or all devices (when C(device) is not specified).
+ - C(removed) (alias C(absent) removes a device.
+ - C(defined) changes device to Defined state.
+ type: str
+ choices: [ available, defined, removed ]
+ default: available
+'''
+
+EXAMPLES = r'''
+- name: Scan new devices
+ community.general.aix_devices:
+ device: all
+ state: available
+
+- name: Scan new virtual devices (vio0)
+ community.general.aix_devices:
+ device: vio0
+ state: available
+
+- name: Removing IP alias to en0
+ community.general.aix_devices:
+ device: en0
+ attributes:
+ delalias4: 10.0.0.100,255.255.255.0
+
+- name: Removes ent2
+ community.general.aix_devices:
+ device: ent2
+ state: removed
+
+- name: Put device en2 in Defined
+ community.general.aix_devices:
+ device: en2
+ state: defined
+
+- name: Removes ent4 (inexistent).
+ community.general.aix_devices:
+ device: ent4
+ state: removed
+
+- name: Put device en4 in Defined (inexistent)
+ community.general.aix_devices:
+ device: en4
+ state: defined
+
+- name: Put vscsi1 and children devices in Defined state.
+ community.general.aix_devices:
+ device: vscsi1
+ recursive: yes
+ state: defined
+
+- name: Removes vscsi1 and children devices.
+ community.general.aix_devices:
+ device: vscsi1
+ recursive: yes
+ state: removed
+
+- name: Changes en1 mtu to 9000 and disables arp.
+ community.general.aix_devices:
+ device: en1
+ attributes:
+ mtu: 900
+ arp: off
+ state: available
+
+- name: Configure IP, netmask and set en1 up.
+ community.general.aix_devices:
+ device: en1
+ attributes:
+ netaddr: 192.168.0.100
+ netmask: 255.255.255.0
+ state: up
+ state: available
+
+- name: Adding IP alias to en0
+ community.general.aix_devices:
+ device: en0
+ attributes:
+ alias4: 10.0.0.100,255.255.255.0
+ state: available
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def _check_device(module, device):
+ """
+ Check if device already exists and the state.
+ Args:
+ module: Ansible module.
+ device: device to be checked.
+
+ Returns: bool, device state
+
+ """
+ lsdev_cmd = module.get_bin_path('lsdev', True)
+ rc, lsdev_out, err = module.run_command(["%s" % lsdev_cmd, '-C', '-l', "%s" % device])
+
+ if rc != 0:
+ module.fail_json(msg="Failed to run lsdev", rc=rc, err=err)
+
+ if lsdev_out:
+ device_state = lsdev_out.split()[1]
+ return True, device_state
+
+ device_state = None
+ return False, device_state
+
+
+def _check_device_attr(module, device, attr):
+ """
+
+ Args:
+ module: Ansible module.
+ device: device to check attributes.
+ attr: attribute to be checked.
+
+ Returns:
+
+ """
+ lsattr_cmd = module.get_bin_path('lsattr', True)
+ rc, lsattr_out, err = module.run_command(["%s" % lsattr_cmd, '-El', "%s" % device, '-a', "%s" % attr])
+
+ hidden_attrs = ['delalias4', 'delalias6']
+
+ if rc == 255:
+
+ if attr in hidden_attrs:
+ current_param = ''
+ else:
+ current_param = None
+
+ return current_param
+
+ elif rc != 0:
+ module.fail_json(msg="Failed to run lsattr: %s" % err, rc=rc, err=err)
+
+ current_param = lsattr_out.split()[1]
+ return current_param
+
+
+def discover_device(module, device):
+ """ Discover AIX devices."""
+ cfgmgr_cmd = module.get_bin_path('cfgmgr', True)
+
+ if device is not None:
+ device = "-l %s" % device
+
+ else:
+ device = ''
+
+ changed = True
+ msg = ''
+ if not module.check_mode:
+ rc, cfgmgr_out, err = module.run_command(["%s" % cfgmgr_cmd, "%s" % device])
+ changed = True
+ msg = cfgmgr_out
+
+ return changed, msg
+
+
+def change_device_attr(module, attributes, device, force):
+ """ Change AIX device attribute. """
+
+ attr_changed = []
+ attr_not_changed = []
+ attr_invalid = []
+ chdev_cmd = module.get_bin_path('chdev', True)
+
+ for attr in list(attributes.keys()):
+ new_param = attributes[attr]
+ current_param = _check_device_attr(module, device, attr)
+
+ if current_param is None:
+ attr_invalid.append(attr)
+
+ elif current_param != new_param:
+ if force:
+ cmd = ["%s" % chdev_cmd, '-l', "%s" % device, '-a', "%s=%s" % (attr, attributes[attr]), "%s" % force]
+ else:
+ cmd = ["%s" % chdev_cmd, '-l', "%s" % device, '-a', "%s=%s" % (attr, attributes[attr])]
+
+ if not module.check_mode:
+ rc, chdev_out, err = module.run_command(cmd)
+ if rc != 0:
+ module.exit_json(msg="Failed to run chdev.", rc=rc, err=err)
+
+ attr_changed.append(attributes[attr])
+ else:
+ attr_not_changed.append(attributes[attr])
+
+ if len(attr_changed) > 0:
+ changed = True
+ attr_changed_msg = "Attributes changed: %s. " % ','.join(attr_changed)
+ else:
+ changed = False
+ attr_changed_msg = ''
+
+ if len(attr_not_changed) > 0:
+ attr_not_changed_msg = "Attributes already set: %s. " % ','.join(attr_not_changed)
+ else:
+ attr_not_changed_msg = ''
+
+ if len(attr_invalid) > 0:
+ attr_invalid_msg = "Invalid attributes: %s " % ', '.join(attr_invalid)
+ else:
+ attr_invalid_msg = ''
+
+ msg = "%s%s%s" % (attr_changed_msg, attr_not_changed_msg, attr_invalid_msg)
+
+ return changed, msg
+
+
+def remove_device(module, device, force, recursive, state):
+ """ Puts device in defined state or removes device. """
+
+ state_opt = {
+ 'removed': '-d',
+ 'absent': '-d',
+ 'defined': ''
+ }
+
+ recursive_opt = {
+ True: '-R',
+ False: ''
+ }
+
+ recursive = recursive_opt[recursive]
+ state = state_opt[state]
+
+ changed = True
+ msg = ''
+ rmdev_cmd = module.get_bin_path('rmdev', True)
+
+ if not module.check_mode:
+ if state:
+ rc, rmdev_out, err = module.run_command(["%s" % rmdev_cmd, "-l", "%s" % device, "%s" % recursive, "%s" % force])
+ else:
+ rc, rmdev_out, err = module.run_command(["%s" % rmdev_cmd, "-l", "%s" % device, "%s" % recursive])
+
+ if rc != 0:
+ module.fail_json(msg="Failed to run rmdev", rc=rc, err=err)
+
+ msg = rmdev_out
+
+ return changed, msg
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ attributes=dict(type='dict'),
+ device=dict(type='str'),
+ force=dict(type='bool', default=False),
+ recursive=dict(type='bool', default=False),
+ state=dict(type='str', default='available', choices=['available', 'defined', 'removed']),
+ ),
+ supports_check_mode=True,
+ )
+
+ force_opt = {
+ True: '-f',
+ False: '',
+ }
+
+ attributes = module.params['attributes']
+ device = module.params['device']
+ force = force_opt[module.params['force']]
+ recursive = module.params['recursive']
+ state = module.params['state']
+
+ result = dict(
+ changed=False,
+ msg='',
+ )
+
+ if state == 'available' or state == 'present':
+ if attributes:
+ # change attributes on device
+ device_status, device_state = _check_device(module, device)
+ if device_status:
+ result['changed'], result['msg'] = change_device_attr(module, attributes, device, force)
+ else:
+ result['msg'] = "Device %s does not exist." % device
+
+ else:
+ # discovery devices (cfgmgr)
+ if device and device != 'all':
+ device_status, device_state = _check_device(module, device)
+ if device_status:
+ # run cfgmgr on specific device
+ result['changed'], result['msg'] = discover_device(module, device)
+
+ else:
+ result['msg'] = "Device %s does not exist." % device
+
+ else:
+ result['changed'], result['msg'] = discover_device(module, device)
+
+ elif state == 'removed' or state == 'absent' or state == 'defined':
+ if not device:
+ result['msg'] = "device is required to removed or defined state."
+
+ else:
+ # Remove device
+ check_device, device_state = _check_device(module, device)
+ if check_device:
+ if state == 'defined' and device_state == 'Defined':
+ result['changed'] = False
+ result['msg'] = 'Device %s already in Defined' % device
+
+ else:
+ result['changed'], result['msg'] = remove_device(module, device, force, recursive, state)
+
+ else:
+ result['msg'] = "Device %s does not exist." % device
+
+ else:
+ result['msg'] = "Unexpected state %s." % state
+ module.fail_json(**result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/aix_filesystem.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/aix_filesystem.py
new file mode 100644
index 00000000..58a5c25d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/aix_filesystem.py
@@ -0,0 +1,567 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Kairo Araujo <kairo@kairo.eti.br>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+ - Kairo Araujo (@kairoaraujo)
+module: aix_filesystem
+short_description: Configure LVM and NFS file systems for AIX
+description:
+ - This module creates, removes, mount and unmount LVM and NFS file system for
+ AIX using C(/etc/filesystems).
+ - For LVM file systems is possible to resize a file system.
+options:
+ account_subsystem:
+ description:
+ - Specifies whether the file system is to be processed by the accounting subsystem.
+ type: bool
+ default: no
+ attributes:
+ description:
+ - Specifies attributes for files system separated by comma.
+ type: list
+ elements: str
+ default: agblksize='4096',isnapshot='no'
+ auto_mount:
+ description:
+ - File system is automatically mounted at system restart.
+ type: bool
+ default: yes
+ device:
+ description:
+ - Logical volume (LV) device name or remote export device to create a NFS file system.
+ - It is used to create a file system on an already existing logical volume or the exported NFS file system.
+ - If not mentioned a new logical volume name will be created following AIX standards (LVM).
+ type: str
+ fs_type:
+ description:
+ - Specifies the virtual file system type.
+ type: str
+ default: jfs2
+ permissions:
+ description:
+ - Set file system permissions. C(rw) (read-write) or C(ro) (read-only).
+ type: str
+ choices: [ ro, rw ]
+ default: rw
+ mount_group:
+ description:
+ - Specifies the mount group.
+ type: str
+ filesystem:
+ description:
+ - Specifies the mount point, which is the directory where the file system will be mounted.
+ type: str
+ required: true
+ nfs_server:
+ description:
+ - Specifies a Network File System (NFS) server.
+ type: str
+ rm_mount_point:
+ description:
+ - Removes the mount point directory when used with state C(absent).
+ type: bool
+ default: no
+ size:
+ description:
+ - Specifies the file system size.
+ - For already C(present) it will be resized.
+ - 512-byte blocks, Megabytes or Gigabytes. If the value has M specified
+ it will be in Megabytes. If the value has G specified it will be in
+ Gigabytes.
+ - If no M or G the value will be 512-byte blocks.
+ - If "+" is specified in begin of value, the value will be added.
+ - If "-" is specified in begin of value, the value will be removed.
+ - If "+" or "-" is not specified, the total value will be the specified.
+ - Size will respects the LVM AIX standards.
+ type: str
+ state:
+ description:
+ - Controls the file system state.
+ - C(present) check if file system exists, creates or resize.
+ - C(absent) removes existing file system if already C(unmounted).
+ - C(mounted) checks if the file system is mounted or mount the file system.
+ - C(unmounted) check if the file system is unmounted or unmount the file system.
+ type: str
+ choices: [ absent, mounted, present, unmounted ]
+ default: present
+ vg:
+ description:
+ - Specifies an existing volume group (VG).
+ type: str
+notes:
+ - For more C(attributes), please check "crfs" AIX manual.
+'''
+
+EXAMPLES = r'''
+- name: Create filesystem in a previously defined logical volume.
+ community.general.aix_filesystem:
+ device: testlv
+ community.general.filesystem: /testfs
+ state: present
+
+- name: Creating NFS filesystem from nfshost.
+ community.general.aix_filesystem:
+ device: /home/ftp
+ nfs_server: nfshost
+ community.general.filesystem: /home/ftp
+ state: present
+
+- name: Creating a new file system without a previously logical volume.
+ community.general.aix_filesystem:
+ community.general.filesystem: /newfs
+ size: 1G
+ state: present
+ vg: datavg
+
+- name: Unmounting /testfs.
+ community.general.aix_filesystem:
+ community.general.filesystem: /testfs
+ state: unmounted
+
+- name: Resizing /mksysb to +512M.
+ community.general.aix_filesystem:
+ community.general.filesystem: /mksysb
+ size: +512M
+ state: present
+
+- name: Resizing /mksysb to 11G.
+ community.general.aix_filesystem:
+ community.general.filesystem: /mksysb
+ size: 11G
+ state: present
+
+- name: Resizing /mksysb to -2G.
+ community.general.aix_filesystem:
+ community.general.filesystem: /mksysb
+ size: -2G
+ state: present
+
+- name: Remove NFS filesystem /home/ftp.
+ community.general.aix_filesystem:
+ community.general.filesystem: /home/ftp
+ rm_mount_point: yes
+ state: absent
+
+- name: Remove /newfs.
+ community.general.aix_filesystem:
+ community.general.filesystem: /newfs
+ rm_mount_point: yes
+ state: absent
+'''
+
+RETURN = r'''
+changed:
+ description: Return changed for aix_filesystems actions as true or false.
+ returned: always
+ type: bool
+msg:
+ description: Return message regarding the action.
+ returned: always
+ type: str
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._mount import ismount
+import re
+
+
+def _fs_exists(module, filesystem):
+ """
+ Check if file system already exists on /etc/filesystems.
+
+ :param module: Ansible module.
+ :param community.general.filesystem: filesystem name.
+ :return: True or False.
+ """
+ lsfs_cmd = module.get_bin_path('lsfs', True)
+ rc, lsfs_out, err = module.run_command("%s -l %s" % (lsfs_cmd, filesystem))
+ if rc == 1:
+ if re.findall("No record matching", err):
+ return False
+
+ else:
+ module.fail_json(msg="Failed to run lsfs. Error message: %s" % err)
+
+ else:
+
+ return True
+
+
+def _check_nfs_device(module, nfs_host, device):
+ """
+ Validate if NFS server is exporting the device (remote export).
+
+ :param module: Ansible module.
+ :param nfs_host: nfs_host parameter, NFS server.
+ :param device: device parameter, remote export.
+ :return: True or False.
+ """
+ showmount_cmd = module.get_bin_path('showmount', True)
+ rc, showmount_out, err = module.run_command(
+ "%s -a %s" % (showmount_cmd, nfs_host))
+ if rc != 0:
+ module.fail_json(msg="Failed to run showmount. Error message: %s" % err)
+ else:
+ showmount_data = showmount_out.splitlines()
+ for line in showmount_data:
+ if line.split(':')[1] == device:
+ return True
+
+ return False
+
+
+def _validate_vg(module, vg):
+ """
+ Check the current state of volume group.
+
+ :param module: Ansible module argument spec.
+ :param vg: Volume Group name.
+ :return: True (VG in varyon state) or False (VG in varyoff state) or
+ None (VG does not exist), message.
+ """
+ lsvg_cmd = module.get_bin_path('lsvg', True)
+ rc, current_active_vgs, err = module.run_command("%s -o" % lsvg_cmd)
+ if rc != 0:
+ module.fail_json(msg="Failed executing %s command." % lsvg_cmd)
+
+ rc, current_all_vgs, err = module.run_command("%s" % lsvg_cmd)
+ if rc != 0:
+ module.fail_json(msg="Failed executing %s command." % lsvg_cmd)
+
+ if vg in current_all_vgs and vg not in current_active_vgs:
+ msg = "Volume group %s is in varyoff state." % vg
+ return False, msg
+ elif vg in current_active_vgs:
+ msg = "Volume group %s is in varyon state." % vg
+ return True, msg
+ else:
+ msg = "Volume group %s does not exist." % vg
+ return None, msg
+
+
+def resize_fs(module, filesystem, size):
+ """ Resize LVM file system. """
+
+ chfs_cmd = module.get_bin_path('chfs', True)
+ if not module.check_mode:
+ rc, chfs_out, err = module.run_command('%s -a size="%s" %s' % (chfs_cmd, size, filesystem))
+
+ if rc == 28:
+ changed = False
+ return changed, chfs_out
+ elif rc != 0:
+ if re.findall('Maximum allocation for logical', err):
+ changed = False
+ return changed, err
+ else:
+ module.fail_json(msg="Failed to run chfs. Error message: %s" % err)
+
+ else:
+ if re.findall('The filesystem size is already', chfs_out):
+ changed = False
+ else:
+ changed = True
+
+ return changed, chfs_out
+ else:
+ changed = True
+ msg = ''
+
+ return changed, msg
+
+
+def create_fs(
+ module, fs_type, filesystem, vg, device, size, mount_group, auto_mount,
+ account_subsystem, permissions, nfs_server, attributes):
+ """ Create LVM file system or NFS remote mount point. """
+
+ attributes = ' -a '.join(attributes)
+
+ # Parameters definition.
+ account_subsys_opt = {
+ True: '-t yes',
+ False: '-t no'
+ }
+
+ if nfs_server is not None:
+ auto_mount_opt = {
+ True: '-A',
+ False: '-a'
+ }
+
+ else:
+ auto_mount_opt = {
+ True: '-A yes',
+ False: '-A no'
+ }
+
+ if size is None:
+ size = ''
+ else:
+ size = "-a size=%s" % size
+
+ if device is None:
+ device = ''
+ else:
+ device = "-d %s" % device
+
+ if vg is None:
+ vg = ''
+ else:
+ vg_state, msg = _validate_vg(module, vg)
+ if vg_state:
+ vg = "-g %s" % vg
+ else:
+ changed = False
+
+ return changed, msg
+
+ if mount_group is None:
+ mount_group = ''
+
+ else:
+ mount_group = "-u %s" % mount_group
+
+ auto_mount = auto_mount_opt[auto_mount]
+ account_subsystem = account_subsys_opt[account_subsystem]
+
+ if nfs_server is not None:
+ # Creates a NFS file system.
+ mknfsmnt_cmd = module.get_bin_path('mknfsmnt', True)
+ if not module.check_mode:
+ rc, mknfsmnt_out, err = module.run_command('%s -f "%s" %s -h "%s" -t "%s" "%s" -w "bg"' % (
+ mknfsmnt_cmd, filesystem, device, nfs_server, permissions, auto_mount))
+ if rc != 0:
+ module.fail_json(msg="Failed to run mknfsmnt. Error message: %s" % err)
+ else:
+ changed = True
+ msg = "NFS file system %s created." % filesystem
+
+ return changed, msg
+ else:
+ changed = True
+ msg = ''
+
+ return changed, msg
+
+ else:
+ # Creates a LVM file system.
+ crfs_cmd = module.get_bin_path('crfs', True)
+ if not module.check_mode:
+ cmd = "%s -v %s -m %s %s %s %s %s %s -p %s %s -a %s" % (
+ crfs_cmd, fs_type, filesystem, vg, device, mount_group, auto_mount, account_subsystem, permissions, size, attributes)
+ rc, crfs_out, err = module.run_command(cmd)
+
+ if rc == 10:
+ module.exit_json(
+ msg="Using a existent previously defined logical volume, "
+ "volume group needs to be empty. %s" % err)
+
+ elif rc != 0:
+ module.fail_json(msg="Failed to run %s. Error message: %s" % (cmd, err))
+
+ else:
+ changed = True
+ return changed, crfs_out
+ else:
+ changed = True
+ msg = ''
+
+ return changed, msg
+
+
+def remove_fs(module, filesystem, rm_mount_point):
+ """ Remove an LVM file system or NFS entry. """
+
+ # Command parameters.
+ rm_mount_point_opt = {
+ True: '-r',
+ False: ''
+ }
+
+ rm_mount_point = rm_mount_point_opt[rm_mount_point]
+
+ rmfs_cmd = module.get_bin_path('rmfs', True)
+ if not module.check_mode:
+ cmd = "%s -r %s %s" % (rmfs_cmd, rm_mount_point, filesystem)
+ rc, rmfs_out, err = module.run_command(cmd)
+ if rc != 0:
+ module.fail_json(msg="Failed to run %s. Error message: %s" % (cmd, err))
+ else:
+ changed = True
+ msg = rmfs_out
+ if not rmfs_out:
+ msg = "File system %s removed." % filesystem
+
+ return changed, msg
+ else:
+ changed = True
+ msg = ''
+
+ return changed, msg
+
+
+def mount_fs(module, filesystem):
+ """ Mount a file system. """
+ mount_cmd = module.get_bin_path('mount', True)
+
+ if not module.check_mode:
+ rc, mount_out, err = module.run_command(
+ "%s %s" % (mount_cmd, filesystem))
+ if rc != 0:
+ module.fail_json(msg="Failed to run mount. Error message: %s" % err)
+ else:
+ changed = True
+ msg = "File system %s mounted." % filesystem
+
+ return changed, msg
+ else:
+ changed = True
+ msg = ''
+
+ return changed, msg
+
+
+def unmount_fs(module, filesystem):
+ """ Unmount a file system."""
+ unmount_cmd = module.get_bin_path('unmount', True)
+
+ if not module.check_mode:
+ rc, unmount_out, err = module.run_command("%s %s" % (unmount_cmd, filesystem))
+ if rc != 0:
+ module.fail_json(msg="Failed to run unmount. Error message: %s" % err)
+ else:
+ changed = True
+ msg = "File system %s unmounted." % filesystem
+
+ return changed, msg
+ else:
+ changed = True
+ msg = ''
+
+ return changed, msg
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ account_subsystem=dict(type='bool', default=False),
+ attributes=dict(type='list', elements='str', default=["agblksize='4096'", "isnapshot='no'"]),
+ auto_mount=dict(type='bool', default=True),
+ device=dict(type='str'),
+ filesystem=dict(type='str', required=True),
+ fs_type=dict(type='str', default='jfs2'),
+ permissions=dict(type='str', default='rw', choices=['rw', 'ro']),
+ mount_group=dict(type='str'),
+ nfs_server=dict(type='str'),
+ rm_mount_point=dict(type='bool', default=False),
+ size=dict(type='str'),
+ state=dict(type='str', default='present', choices=['absent', 'mounted', 'present', 'unmounted']),
+ vg=dict(type='str'),
+ ),
+ supports_check_mode=True,
+ )
+
+ account_subsystem = module.params['account_subsystem']
+ attributes = module.params['attributes']
+ auto_mount = module.params['auto_mount']
+ device = module.params['device']
+ fs_type = module.params['fs_type']
+ permissions = module.params['permissions']
+ mount_group = module.params['mount_group']
+ filesystem = module.params['filesystem']
+ nfs_server = module.params['nfs_server']
+ rm_mount_point = module.params['rm_mount_point']
+ size = module.params['size']
+ state = module.params['state']
+ vg = module.params['vg']
+
+ result = dict(
+ changed=False,
+ msg='',
+ )
+
+ if state == 'present':
+ fs_mounted = ismount(filesystem)
+ fs_exists = _fs_exists(module, filesystem)
+
+ # Check if fs is mounted or exists.
+ if fs_mounted or fs_exists:
+ result['msg'] = "File system %s already exists." % filesystem
+ result['changed'] = False
+
+ # If parameter size was passed, resize fs.
+ if size is not None:
+ result['changed'], result['msg'] = resize_fs(module, filesystem, size)
+
+ # If fs doesn't exist, create it.
+ else:
+ # Check if fs will be a NFS device.
+ if nfs_server is not None:
+ if device is None:
+ result['msg'] = 'Parameter "device" is required when "nfs_server" is defined.'
+ module.fail_json(**result)
+ else:
+ # Create a fs from NFS export.
+ if _check_nfs_device(module, nfs_server, device):
+ result['changed'], result['msg'] = create_fs(
+ module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, account_subsystem, permissions, nfs_server, attributes)
+
+ if device is None:
+ if vg is None:
+ result['msg'] = 'Required parameter "device" and/or "vg" is missing for filesystem creation.'
+ module.fail_json(**result)
+ else:
+ # Create a fs from
+ result['changed'], result['msg'] = create_fs(
+ module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, account_subsystem, permissions, nfs_server, attributes)
+
+ if device is not None and nfs_server is None:
+ # Create a fs from a previously lv device.
+ result['changed'], result['msg'] = create_fs(
+ module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, account_subsystem, permissions, nfs_server, attributes)
+
+ elif state == 'absent':
+ if ismount(filesystem):
+ result['msg'] = "File system %s mounted." % filesystem
+
+ else:
+ fs_status = _fs_exists(module, filesystem)
+ if not fs_status:
+ result['msg'] = "File system %s does not exist." % filesystem
+ else:
+ result['changed'], result['msg'] = remove_fs(module, filesystem, rm_mount_point)
+
+ elif state == 'mounted':
+ if ismount(filesystem):
+ result['changed'] = False
+ result['msg'] = "File system %s already mounted." % filesystem
+ else:
+ result['changed'], result['msg'] = mount_fs(module, filesystem)
+
+ elif state == 'unmounted':
+ if not ismount(filesystem):
+ result['changed'] = False
+ result['msg'] = "File system %s already unmounted." % filesystem
+ else:
+ result['changed'], result['msg'] = unmount_fs(module, filesystem)
+
+ else:
+ # Unreachable codeblock
+ result['msg'] = "Unexpected state %s." % state
+ module.fail_json(**result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/aix_inittab.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/aix_inittab.py
new file mode 100644
index 00000000..c2daface
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/aix_inittab.py
@@ -0,0 +1,247 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Joris Weijters <joris.weijters@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+- Joris Weijters (@molekuul)
+module: aix_inittab
+short_description: Manages the inittab on AIX
+description:
+ - Manages the inittab on AIX.
+options:
+ name:
+ description:
+ - Name of the inittab entry.
+ type: str
+ required: yes
+ aliases: [ service ]
+ runlevel:
+ description:
+ - Runlevel of the entry.
+ type: str
+ required: yes
+ action:
+ description:
+ - Action what the init has to do with this entry.
+ type: str
+ choices:
+ - boot
+ - bootwait
+ - hold
+ - initdefault
+ - 'off'
+ - once
+ - ondemand
+ - powerfail
+ - powerwait
+ - respawn
+ - sysinit
+ - wait
+ command:
+ description:
+ - What command has to run.
+ type: str
+ required: yes
+ insertafter:
+ description:
+ - After which inittabline should the new entry inserted.
+ type: str
+ state:
+ description:
+ - Whether the entry should be present or absent in the inittab file.
+ type: str
+ choices: [ absent, present ]
+ default: present
+notes:
+ - The changes are persistent across reboots.
+ - You need root rights to read or adjust the inittab with the C(lsitab), C(chitab), C(mkitab) or C(rmitab) commands.
+ - Tested on AIX 7.1.
+requirements:
+- itertools
+'''
+
+EXAMPLES = '''
+# Add service startmyservice to the inittab, directly after service existingservice.
+- name: Add startmyservice to inittab
+ community.general.aix_inittab:
+ name: startmyservice
+ runlevel: 4
+ action: once
+ command: echo hello
+ insertafter: existingservice
+ state: present
+ become: yes
+
+# Change inittab entry startmyservice to runlevel "2" and processaction "wait".
+- name: Change startmyservice to inittab
+ community.general.aix_inittab:
+ name: startmyservice
+ runlevel: 2
+ action: wait
+ command: echo hello
+ state: present
+ become: yes
+
+- name: Remove startmyservice from inittab
+ community.general.aix_inittab:
+ name: startmyservice
+ runlevel: 2
+ action: wait
+ command: echo hello
+ state: absent
+ become: yes
+'''
+
+RETURN = '''
+name:
+ description: Name of the adjusted inittab entry
+ returned: always
+ type: str
+ sample: startmyservice
+msg:
+ description: Action done with the inittab entry
+ returned: changed
+ type: str
+ sample: changed inittab entry startmyservice
+changed:
+ description: Whether the inittab changed or not
+ returned: always
+ type: bool
+ sample: true
+'''
+
+# Import necessary libraries
+try:
+ # python 2
+ from itertools import izip
+except ImportError:
+ izip = zip
+
+from ansible.module_utils.basic import AnsibleModule
+
+# end import modules
+# start defining the functions
+
+
+def check_current_entry(module):
+ # Check if entry exists, if not return False in exists in return dict,
+ # if true return True and the entry in return dict
+ existsdict = {'exist': False}
+ lsitab = module.get_bin_path('lsitab')
+ (rc, out, err) = module.run_command([lsitab, module.params['name']])
+ if rc == 0:
+ keys = ('name', 'runlevel', 'action', 'command')
+ values = out.split(":")
+ # strip non readable characters as \n
+ values = map(lambda s: s.strip(), values)
+ existsdict = dict(izip(keys, values))
+ existsdict.update({'exist': True})
+ return existsdict
+
+
+def main():
+ # initialize
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True, aliases=['service']),
+ runlevel=dict(type='str', required=True),
+ action=dict(type='str', choices=[
+ 'boot',
+ 'bootwait',
+ 'hold',
+ 'initdefault',
+ 'off',
+ 'once',
+ 'ondemand',
+ 'powerfail',
+ 'powerwait',
+ 'respawn',
+ 'sysinit',
+ 'wait',
+ ]),
+ command=dict(type='str', required=True),
+ insertafter=dict(type='str'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ),
+ supports_check_mode=True,
+ )
+
+ result = {
+ 'name': module.params['name'],
+ 'changed': False,
+ 'msg': ""
+ }
+
+ # Find commandline strings
+ mkitab = module.get_bin_path('mkitab')
+ rmitab = module.get_bin_path('rmitab')
+ chitab = module.get_bin_path('chitab')
+ rc = 0
+
+ # check if the new entry exists
+ current_entry = check_current_entry(module)
+
+ # if action is install or change,
+ if module.params['state'] == 'present':
+
+ # create new entry string
+ new_entry = module.params['name'] + ":" + module.params['runlevel'] + \
+ ":" + module.params['action'] + ":" + module.params['command']
+
+ # If current entry exists or fields are different(if the entry does not
+ # exists, then the entry wil be created
+ if (not current_entry['exist']) or (
+ module.params['runlevel'] != current_entry['runlevel'] or
+ module.params['action'] != current_entry['action'] or
+ module.params['command'] != current_entry['command']):
+
+ # If the entry does exist then change the entry
+ if current_entry['exist']:
+ if not module.check_mode:
+ (rc, out, err) = module.run_command([chitab, new_entry])
+ if rc != 0:
+ module.fail_json(
+ msg="could not change inittab", rc=rc, err=err)
+ result['msg'] = "changed inittab entry" + " " + current_entry['name']
+ result['changed'] = True
+
+ # If the entry does not exist create the entry
+ elif not current_entry['exist']:
+ if module.params['insertafter']:
+ if not module.check_mode:
+ (rc, out, err) = module.run_command(
+ [mkitab, '-i', module.params['insertafter'], new_entry])
+ else:
+ if not module.check_mode:
+ (rc, out, err) = module.run_command(
+ [mkitab, new_entry])
+
+ if rc != 0:
+ module.fail_json(msg="could not adjust inittab", rc=rc, err=err)
+ result['msg'] = "add inittab entry" + " " + module.params['name']
+ result['changed'] = True
+
+ elif module.params['state'] == 'absent':
+ # If the action is remove and the entry exists then remove the entry
+ if current_entry['exist']:
+ if not module.check_mode:
+ (rc, out, err) = module.run_command(
+ [rmitab, module.params['name']])
+ if rc != 0:
+ module.fail_json(
+ msg="could not remove entry from inittab)", rc=rc, err=err)
+ result['msg'] = "removed inittab entry" + " " + current_entry['name']
+ result['changed'] = True
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/aix_lvg.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/aix_lvg.py
new file mode 100644
index 00000000..569711f4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/aix_lvg.py
@@ -0,0 +1,363 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Kairo Araujo <kairo@kairo.eti.br>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+- Kairo Araujo (@kairoaraujo)
+module: aix_lvg
+short_description: Manage LVM volume groups on AIX
+description:
+- This module creates, removes or resize volume groups on AIX LVM.
+options:
+ force:
+ description:
+ - Force volume group creation.
+ type: bool
+ default: no
+ pp_size:
+ description:
+ - The size of the physical partition in megabytes.
+ type: int
+ pvs:
+ description:
+ - List of comma-separated devices to use as physical devices in this volume group.
+ - Required when creating or extending (C(present) state) the volume group.
+ - If not informed reducing (C(absent) state) the volume group will be removed.
+ type: list
+ elements: str
+ state:
+ description:
+ - Control if the volume group exists and volume group AIX state varyonvg C(varyon) or varyoffvg C(varyoff).
+ type: str
+ choices: [ absent, present, varyoff, varyon ]
+ default: present
+ vg:
+ description:
+ - The name of the volume group.
+ type: str
+ required: true
+ vg_type:
+ description:
+ - The type of the volume group.
+ type: str
+ choices: [ big, normal, scalable ]
+ default: normal
+notes:
+- AIX will permit remove VG only if all LV/Filesystems are not busy.
+- Module does not modify PP size for already present volume group.
+'''
+
+EXAMPLES = r'''
+- name: Create a volume group datavg
+ community.general.aix_lvg:
+ vg: datavg
+ pp_size: 128
+ vg_type: scalable
+ state: present
+
+- name: Removing a volume group datavg
+ community.general.aix_lvg:
+ vg: datavg
+ state: absent
+
+- name: Extending rootvg
+ community.general.aix_lvg:
+ vg: rootvg
+ pvs: hdisk1
+ state: present
+
+- name: Reducing rootvg
+ community.general.aix_lvg:
+ vg: rootvg
+ pvs: hdisk1
+ state: absent
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def _validate_pv(module, vg, pvs):
+ """
+ Function to validate if the physical volume (PV) is not already in use by
+ another volume group or Oracle ASM.
+
+ :param module: Ansible module argument spec.
+ :param vg: Volume group name.
+ :param pvs: Physical volume list.
+ :return: [bool, message] or module.fail_json for errors.
+ """
+
+ lspv_cmd = module.get_bin_path('lspv', True)
+ rc, current_lspv, stderr = module.run_command("%s" % lspv_cmd)
+ if rc != 0:
+ module.fail_json(msg="Failed executing 'lspv' command.", rc=rc, stdout=current_lspv, stderr=stderr)
+
+ for pv in pvs:
+ # Get pv list.
+ lspv_list = {}
+ for line in current_lspv.splitlines():
+ pv_data = line.split()
+ lspv_list[pv_data[0]] = pv_data[2]
+
+ # Check if pv exists and is free.
+ if pv not in lspv_list.keys():
+ module.fail_json(msg="Physical volume '%s' doesn't exist." % pv)
+
+ if lspv_list[pv] == 'None':
+ # Disk None, looks free.
+ # Check if PV is not already in use by Oracle ASM.
+ lquerypv_cmd = module.get_bin_path('lquerypv', True)
+ rc, current_lquerypv, stderr = module.run_command("%s -h /dev/%s 20 10" % (lquerypv_cmd, pv))
+ if rc != 0:
+ module.fail_json(msg="Failed executing lquerypv command.", rc=rc, stdout=current_lquerypv, stderr=stderr)
+
+ if 'ORCLDISK' in current_lquerypv:
+ module.fail_json("Physical volume '%s' is already used by Oracle ASM." % pv)
+
+ msg = "Physical volume '%s' is ok to be used." % pv
+ return True, msg
+
+ # Check if PV is already in use for the same vg.
+ elif vg != lspv_list[pv]:
+ module.fail_json(msg="Physical volume '%s' is in use by another volume group '%s'." % (pv, lspv_list[pv]))
+
+ msg = "Physical volume '%s' is already used by volume group '%s'." % (pv, lspv_list[pv])
+ return False, msg
+
+
+def _validate_vg(module, vg):
+ """
+ Check the current state of volume group.
+
+ :param module: Ansible module argument spec.
+ :param vg: Volume Group name.
+ :return: True (VG in varyon state) or False (VG in varyoff state) or
+ None (VG does not exist), message.
+ """
+ lsvg_cmd = module.get_bin_path('lsvg', True)
+ rc, current_active_vgs, err = module.run_command("%s -o" % lsvg_cmd)
+ if rc != 0:
+ module.fail_json(msg="Failed executing '%s' command." % lsvg_cmd)
+
+ rc, current_all_vgs, err = module.run_command("%s" % lsvg_cmd)
+ if rc != 0:
+ module.fail_json(msg="Failed executing '%s' command." % lsvg_cmd)
+
+ if vg in current_all_vgs and vg not in current_active_vgs:
+ msg = "Volume group '%s' is in varyoff state." % vg
+ return False, msg
+
+ if vg in current_active_vgs:
+ msg = "Volume group '%s' is in varyon state." % vg
+ return True, msg
+
+ msg = "Volume group '%s' does not exist." % vg
+ return None, msg
+
+
+def create_extend_vg(module, vg, pvs, pp_size, vg_type, force, vg_validation):
+ """ Creates or extend a volume group. """
+
+ # Command option parameters.
+ force_opt = {
+ True: '-f',
+ False: ''
+ }
+
+ vg_opt = {
+ 'normal': '',
+ 'big': '-B',
+ 'scalable': '-S',
+ }
+
+ # Validate if PV are not already in use.
+ pv_state, msg = _validate_pv(module, vg, pvs)
+ if not pv_state:
+ changed = False
+ return changed, msg
+
+ vg_state, msg = vg_validation
+ if vg_state is False:
+ changed = False
+ return changed, msg
+
+ elif vg_state is True:
+ # Volume group extension.
+ changed = True
+ msg = ""
+
+ if not module.check_mode:
+ extendvg_cmd = module.get_bin_path('extendvg', True)
+ rc, output, err = module.run_command("%s %s %s" % (extendvg_cmd, vg, ' '.join(pvs)))
+ if rc != 0:
+ changed = False
+ msg = "Extending volume group '%s' has failed." % vg
+ return changed, msg
+
+ msg = "Volume group '%s' extended." % vg
+ return changed, msg
+
+ elif vg_state is None:
+ # Volume group creation.
+ changed = True
+ msg = ''
+
+ if not module.check_mode:
+ mkvg_cmd = module.get_bin_path('mkvg', True)
+ rc, output, err = module.run_command("%s %s %s %s -y %s %s" % (mkvg_cmd, vg_opt[vg_type], pp_size, force_opt[force], vg, ' '.join(pvs)))
+ if rc != 0:
+ changed = False
+ msg = "Creating volume group '%s' failed." % vg
+ return changed, msg
+
+ msg = "Volume group '%s' created." % vg
+ return changed, msg
+
+
+def reduce_vg(module, vg, pvs, vg_validation):
+ vg_state, msg = vg_validation
+
+ if vg_state is False:
+ changed = False
+ return changed, msg
+
+ elif vg_state is None:
+ changed = False
+ return changed, msg
+
+ # Define pvs_to_remove (list of physical volumes to be removed).
+ if pvs is None:
+ # Remove VG if pvs are note informed.
+ # Remark: AIX will permit remove only if the VG has not LVs.
+ lsvg_cmd = module.get_bin_path('lsvg', True)
+ rc, current_pvs, err = module.run_command("%s -p %s" % (lsvg_cmd, vg))
+ if rc != 0:
+ module.fail_json(msg="Failing to execute '%s' command." % lsvg_cmd)
+
+ pvs_to_remove = []
+ for line in current_pvs.splitlines()[2:]:
+ pvs_to_remove.append(line.split()[0])
+
+ reduce_msg = "Volume group '%s' removed." % vg
+ else:
+ pvs_to_remove = pvs
+ reduce_msg = ("Physical volume(s) '%s' removed from Volume group '%s'." % (' '.join(pvs_to_remove), vg))
+
+ # Reduce volume group.
+ if len(pvs_to_remove) <= 0:
+ changed = False
+ msg = "No physical volumes to remove."
+ return changed, msg
+
+ changed = True
+ msg = ''
+
+ if not module.check_mode:
+ reducevg_cmd = module.get_bin_path('reducevg', True)
+ rc, stdout, stderr = module.run_command("%s -df %s %s" % (reducevg_cmd, vg, ' '.join(pvs_to_remove)))
+ if rc != 0:
+ module.fail_json(msg="Unable to remove '%s'." % vg, rc=rc, stdout=stdout, stderr=stderr)
+
+ msg = reduce_msg
+ return changed, msg
+
+
+def state_vg(module, vg, state, vg_validation):
+ vg_state, msg = vg_validation
+
+ if vg_state is None:
+ module.fail_json(msg=msg)
+
+ if state == 'varyon':
+ if vg_state is True:
+ changed = False
+ return changed, msg
+
+ changed = True
+ msg = ''
+ if not module.check_mode:
+ varyonvg_cmd = module.get_bin_path('varyonvg', True)
+ rc, varyonvg_out, err = module.run_command("%s %s" % (varyonvg_cmd, vg))
+ if rc != 0:
+ module.fail_json(msg="Command 'varyonvg' failed.", rc=rc, err=err)
+
+ msg = "Varyon volume group %s completed." % vg
+ return changed, msg
+
+ elif state == 'varyoff':
+ if vg_state is False:
+ changed = False
+ return changed, msg
+
+ changed = True
+ msg = ''
+
+ if not module.check_mode:
+ varyonvg_cmd = module.get_bin_path('varyoffvg', True)
+ rc, varyonvg_out, stderr = module.run_command("%s %s" % (varyonvg_cmd, vg))
+ if rc != 0:
+ module.fail_json(msg="Command 'varyoffvg' failed.", rc=rc, stdout=varyonvg_out, stderr=stderr)
+
+ msg = "Varyoff volume group %s completed." % vg
+ return changed, msg
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ force=dict(type='bool', default=False),
+ pp_size=dict(type='int'),
+ pvs=dict(type='list', elements='str'),
+ state=dict(type='str', default='present', choices=['absent', 'present', 'varyoff', 'varyon']),
+ vg=dict(type='str', required=True),
+ vg_type=dict(type='str', default='normal', choices=['big', 'normal', 'scalable'])
+ ),
+ supports_check_mode=True,
+ )
+
+ force = module.params['force']
+ pp_size = module.params['pp_size']
+ pvs = module.params['pvs']
+ state = module.params['state']
+ vg = module.params['vg']
+ vg_type = module.params['vg_type']
+
+ if pp_size is None:
+ pp_size = ''
+ else:
+ pp_size = "-s %s" % pp_size
+
+ vg_validation = _validate_vg(module, vg)
+
+ if state == 'present':
+ if not pvs:
+ changed = False
+ msg = "pvs is required to state 'present'."
+ module.fail_json(msg=msg)
+ else:
+ changed, msg = create_extend_vg(module, vg, pvs, pp_size, vg_type, force, vg_validation)
+
+ elif state == 'absent':
+ changed, msg = reduce_vg(module, vg, pvs, vg_validation)
+
+ elif state == 'varyon' or state == 'varyoff':
+ changed, msg = state_vg(module, vg, state, vg_validation)
+
+ else:
+ changed = False
+ msg = "Unexpected state"
+
+ module.exit_json(changed=changed, msg=msg, state=state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/aix_lvol.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/aix_lvol.py
new file mode 100644
index 00000000..02b4f06c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/aix_lvol.py
@@ -0,0 +1,337 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Alain Dejoux <adejoux@djouxtech.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+author:
+ - Alain Dejoux (@adejoux)
+module: aix_lvol
+short_description: Configure AIX LVM logical volumes
+description:
+ - This module creates, removes or resizes AIX logical volumes. Inspired by lvol module.
+options:
+ vg:
+ description:
+ - The volume group this logical volume is part of.
+ type: str
+ required: true
+ lv:
+ description:
+ - The name of the logical volume.
+ type: str
+ required: true
+ lv_type:
+ description:
+ - The type of the logical volume.
+ type: str
+ default: jfs2
+ size:
+ description:
+ - The size of the logical volume with one of the [MGT] units.
+ type: str
+ copies:
+ description:
+ - The number of copies of the logical volume.
+ - Maximum copies are 3.
+ type: int
+ default: 1
+ policy:
+ description:
+ - Sets the interphysical volume allocation policy.
+ - C(maximum) allocates logical partitions across the maximum number of physical volumes.
+ - C(minimum) allocates logical partitions across the minimum number of physical volumes.
+ type: str
+ choices: [ maximum, minimum ]
+ default: maximum
+ state:
+ description:
+ - Control if the logical volume exists. If C(present) and the
+ volume does not already exist then the C(size) option is required.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ opts:
+ description:
+ - Free-form options to be passed to the mklv command.
+ type: str
+ pvs:
+ description:
+ - A list of physical volumes e.g. C(hdisk1,hdisk2).
+ type: list
+ elements: str
+'''
+
+EXAMPLES = r'''
+- name: Create a logical volume of 512M
+ community.general.aix_lvol:
+ vg: testvg
+ lv: testlv
+ size: 512M
+
+- name: Create a logical volume of 512M with disks hdisk1 and hdisk2
+ community.general.aix_lvol:
+ vg: testvg
+ lv: test2lv
+ size: 512M
+ pvs: [ hdisk1, hdisk2 ]
+
+- name: Create a logical volume of 512M mirrored
+ community.general.aix_lvol:
+ vg: testvg
+ lv: test3lv
+ size: 512M
+ copies: 2
+
+- name: Create a logical volume of 1G with a minimum placement policy
+ community.general.aix_lvol:
+ vg: rootvg
+ lv: test4lv
+ size: 1G
+ policy: minimum
+
+- name: Create a logical volume with special options like mirror pool
+ community.general.aix_lvol:
+ vg: testvg
+ lv: testlv
+ size: 512M
+ opts: -p copy1=poolA -p copy2=poolB
+
+- name: Extend the logical volume to 1200M
+ community.general.aix_lvol:
+ vg: testvg
+ lv: test4lv
+ size: 1200M
+
+- name: Remove the logical volume
+ community.general.aix_lvol:
+ vg: testvg
+ lv: testlv
+ state: absent
+'''
+
+RETURN = r'''
+msg:
+ type: str
+ description: A friendly message describing the task result.
+ returned: always
+ sample: Logical volume testlv created.
+'''
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def convert_size(module, size):
+ unit = size[-1].upper()
+ units = ['M', 'G', 'T']
+ try:
+ multiplier = 1024 ** units.index(unit)
+ except ValueError:
+ module.fail_json(msg="No valid size unit specified.")
+
+ return int(size[:-1]) * multiplier
+
+
+def round_ppsize(x, base=16):
+ new_size = int(base * round(float(x) / base))
+ if new_size < x:
+ new_size += base
+ return new_size
+
+
+def parse_lv(data):
+ name = None
+
+ for line in data.splitlines():
+ match = re.search(r"LOGICAL VOLUME:\s+(\w+)\s+VOLUME GROUP:\s+(\w+)", line)
+ if match is not None:
+ name = match.group(1)
+ vg = match.group(2)
+ continue
+ match = re.search(r"LPs:\s+(\d+).*PPs", line)
+ if match is not None:
+ lps = int(match.group(1))
+ continue
+ match = re.search(r"PP SIZE:\s+(\d+)", line)
+ if match is not None:
+ pp_size = int(match.group(1))
+ continue
+ match = re.search(r"INTER-POLICY:\s+(\w+)", line)
+ if match is not None:
+ policy = match.group(1)
+ continue
+
+ if not name:
+ return None
+
+ size = lps * pp_size
+
+ return {'name': name, 'vg': vg, 'size': size, 'policy': policy}
+
+
+def parse_vg(data):
+
+ for line in data.splitlines():
+
+ match = re.search(r"VOLUME GROUP:\s+(\w+)", line)
+ if match is not None:
+ name = match.group(1)
+ continue
+
+ match = re.search(r"TOTAL PP.*\((\d+)", line)
+ if match is not None:
+ size = int(match.group(1))
+ continue
+
+ match = re.search(r"PP SIZE:\s+(\d+)", line)
+ if match is not None:
+ pp_size = int(match.group(1))
+ continue
+
+ match = re.search(r"FREE PP.*\((\d+)", line)
+ if match is not None:
+ free = int(match.group(1))
+ continue
+
+ return {'name': name, 'size': size, 'free': free, 'pp_size': pp_size}
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ vg=dict(type='str', required=True),
+ lv=dict(type='str', required=True),
+ lv_type=dict(type='str', default='jfs2'),
+ size=dict(type='str'),
+ opts=dict(type='str', default=''),
+ copies=dict(type='int', default=1),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ policy=dict(type='str', default='maximum', choices=['maximum', 'minimum']),
+ pvs=dict(type='list', elements='str', default=list())
+ ),
+ supports_check_mode=True,
+ )
+
+ vg = module.params['vg']
+ lv = module.params['lv']
+ lv_type = module.params['lv_type']
+ size = module.params['size']
+ opts = module.params['opts']
+ copies = module.params['copies']
+ policy = module.params['policy']
+ state = module.params['state']
+ pvs = module.params['pvs']
+
+ pv_list = ' '.join(pvs)
+
+ if policy == 'maximum':
+ lv_policy = 'x'
+ else:
+ lv_policy = 'm'
+
+ # Add echo command when running in check-mode
+ if module.check_mode:
+ test_opt = 'echo '
+ else:
+ test_opt = ''
+
+ # check if system commands are available
+ lsvg_cmd = module.get_bin_path("lsvg", required=True)
+ lslv_cmd = module.get_bin_path("lslv", required=True)
+
+ # Get information on volume group requested
+ rc, vg_info, err = module.run_command("%s %s" % (lsvg_cmd, vg))
+
+ if rc != 0:
+ if state == 'absent':
+ module.exit_json(changed=False, msg="Volume group %s does not exist." % vg)
+ else:
+ module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, out=vg_info, err=err)
+
+ this_vg = parse_vg(vg_info)
+
+ if size is not None:
+ # Calculate pp size and round it up based on pp size.
+ lv_size = round_ppsize(convert_size(module, size), base=this_vg['pp_size'])
+
+ # Get information on logical volume requested
+ rc, lv_info, err = module.run_command(
+ "%s %s" % (lslv_cmd, lv))
+
+ if rc != 0:
+ if state == 'absent':
+ module.exit_json(changed=False, msg="Logical Volume %s does not exist." % lv)
+
+ changed = False
+
+ this_lv = parse_lv(lv_info)
+
+ if state == 'present' and not size:
+ if this_lv is None:
+ module.fail_json(msg="No size given.")
+
+ if this_lv is None:
+ if state == 'present':
+ if lv_size > this_vg['free']:
+ module.fail_json(msg="Not enough free space in volume group %s: %s MB free." % (this_vg['name'], this_vg['free']))
+
+ # create LV
+ mklv_cmd = module.get_bin_path("mklv", required=True)
+
+ cmd = "%s %s -t %s -y %s -c %s -e %s %s %s %sM %s" % (test_opt, mklv_cmd, lv_type, lv, copies, lv_policy, opts, vg, lv_size, pv_list)
+ rc, out, err = module.run_command(cmd)
+ if rc == 0:
+ module.exit_json(changed=True, msg="Logical volume %s created." % lv)
+ else:
+ module.fail_json(msg="Creating logical volume %s failed." % lv, rc=rc, out=out, err=err)
+ else:
+ if state == 'absent':
+ # remove LV
+ rmlv_cmd = module.get_bin_path("rmlv", required=True)
+ rc, out, err = module.run_command("%s %s -f %s" % (test_opt, rmlv_cmd, this_lv['name']))
+ if rc == 0:
+ module.exit_json(changed=True, msg="Logical volume %s deleted." % lv)
+ else:
+ module.fail_json(msg="Failed to remove logical volume %s." % lv, rc=rc, out=out, err=err)
+ else:
+ if this_lv['policy'] != policy:
+ # change lv allocation policy
+ chlv_cmd = module.get_bin_path("chlv", required=True)
+ rc, out, err = module.run_command("%s %s -e %s %s" % (test_opt, chlv_cmd, lv_policy, this_lv['name']))
+ if rc == 0:
+ module.exit_json(changed=True, msg="Logical volume %s policy changed: %s." % (lv, policy))
+ else:
+ module.fail_json(msg="Failed to change logical volume %s policy." % lv, rc=rc, out=out, err=err)
+
+ if vg != this_lv['vg']:
+ module.fail_json(msg="Logical volume %s already exist in volume group %s" % (lv, this_lv['vg']))
+
+ # from here the last remaining action is to resize it, if no size parameter is passed we do nothing.
+ if not size:
+ module.exit_json(changed=False, msg="Logical volume %s already exist." % (lv))
+
+ # resize LV based on absolute values
+ if int(lv_size) > this_lv['size']:
+ extendlv_cmd = module.get_bin_path("extendlv", required=True)
+ cmd = "%s %s %s %sM" % (test_opt, extendlv_cmd, lv, lv_size - this_lv['size'])
+ rc, out, err = module.run_command(cmd)
+ if rc == 0:
+ module.exit_json(changed=True, msg="Logical volume %s size extended to %sMB." % (lv, lv_size))
+ else:
+ module.fail_json(msg="Unable to resize %s to %sMB." % (lv, lv_size), rc=rc, out=out, err=err)
+ elif lv_size < this_lv['size']:
+ module.fail_json(msg="No shrinking of Logical Volume %s permitted. Current size: %s MB" % (lv, this_lv['size']))
+ else:
+ module.exit_json(changed=False, msg="Logical volume %s size is already %sMB." % (lv, lv_size))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/alternatives.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/alternatives.py
new file mode 100644
index 00000000..56db6dc6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/alternatives.py
@@ -0,0 +1,159 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Gabe Mulley <gabe.mulley@gmail.com>
+# Copyright: (c) 2015, David Wittman <dwittman@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: alternatives
+short_description: Manages alternative programs for common commands
+description:
+ - Manages symbolic links using the 'update-alternatives' tool.
+ - Useful when multiple programs are installed but provide similar functionality (e.g. different editors).
+author:
+ - David Wittman (@DavidWittman)
+ - Gabe Mulley (@mulby)
+options:
+ name:
+ description:
+ - The generic name of the link.
+ type: str
+ required: true
+ path:
+ description:
+ - The path to the real executable that the link should point to.
+ type: path
+ required: true
+ link:
+ description:
+ - The path to the symbolic link that should point to the real executable.
+ - This option is always required on RHEL-based distributions. On Debian-based distributions this option is
+ required when the alternative I(name) is unknown to the system.
+ type: path
+ priority:
+ description:
+ - The priority of the alternative.
+ type: int
+ default: 50
+requirements: [ update-alternatives ]
+'''
+
+EXAMPLES = r'''
+- name: Correct java version selected
+ community.general.alternatives:
+ name: java
+ path: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java
+
+- name: Alternatives link created
+ community.general.alternatives:
+ name: hadoop-conf
+ link: /etc/hadoop/conf
+ path: /etc/hadoop/conf.ansible
+
+- name: Make java 32 bit an alternative with low priority
+ community.general.alternatives:
+ name: java
+ path: /usr/lib/jvm/java-7-openjdk-i386/jre/bin/java
+ priority: -10
+'''
+
+import os
+import re
+import subprocess
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ path=dict(type='path', required=True),
+ link=dict(type='path'),
+ priority=dict(type='int', default=50),
+ ),
+ supports_check_mode=True,
+ )
+
+ params = module.params
+ name = params['name']
+ path = params['path']
+ link = params['link']
+ priority = params['priority']
+
+ UPDATE_ALTERNATIVES = module.get_bin_path('update-alternatives', True)
+
+ current_path = None
+ all_alternatives = []
+
+ # Run `update-alternatives --display <name>` to find existing alternatives
+ (rc, display_output, _) = module.run_command(
+ ['env', 'LC_ALL=C', UPDATE_ALTERNATIVES, '--display', name]
+ )
+
+ if rc == 0:
+ # Alternatives already exist for this link group
+ # Parse the output to determine the current path of the symlink and
+ # available alternatives
+ current_path_regex = re.compile(r'^\s*link currently points to (.*)$',
+ re.MULTILINE)
+ alternative_regex = re.compile(r'^(\/.*)\s-\spriority', re.MULTILINE)
+
+ match = current_path_regex.search(display_output)
+ if match:
+ current_path = match.group(1)
+ all_alternatives = alternative_regex.findall(display_output)
+
+ if not link:
+ # Read the current symlink target from `update-alternatives --query`
+ # in case we need to install the new alternative before setting it.
+ #
+ # This is only compatible on Debian-based systems, as the other
+ # alternatives don't have --query available
+ rc, query_output, _ = module.run_command(
+ ['env', 'LC_ALL=C', UPDATE_ALTERNATIVES, '--query', name]
+ )
+ if rc == 0:
+ for line in query_output.splitlines():
+ if line.startswith('Link:'):
+ link = line.split()[1]
+ break
+
+ if current_path != path:
+ if module.check_mode:
+ module.exit_json(changed=True, current_path=current_path)
+ try:
+ # install the requested path if necessary
+ if path not in all_alternatives:
+ if not os.path.exists(path):
+ module.fail_json(msg="Specified path %s does not exist" % path)
+ if not link:
+ module.fail_json(msg="Needed to install the alternative, but unable to do so as we are missing the link")
+
+ module.run_command(
+ [UPDATE_ALTERNATIVES, '--install', link, name, path, str(priority)],
+ check_rc=True
+ )
+
+ # select the requested path
+ module.run_command(
+ [UPDATE_ALTERNATIVES, '--set', name, path],
+ check_rc=True
+ )
+
+ module.exit_json(changed=True)
+ except subprocess.CalledProcessError as cpe:
+ module.fail_json(msg=str(dir(cpe)))
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/awall.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/awall.py
new file mode 100644
index 00000000..260c7ae4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/awall.py
@@ -0,0 +1,153 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ted Trask <ttrask01@yahoo.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: awall
+short_description: Manage awall policies
+author: Ted Trask (@tdtrask) <ttrask01@yahoo.com>
+description:
+ - This modules allows for enable/disable/activate of I(awall) policies.
+ - Alpine Wall (I(awall)) generates a firewall configuration from the enabled policy files
+ and activates the configuration on the system.
+options:
+ name:
+ description:
+ - One or more policy names.
+ type: list
+ elements: str
+ state:
+ description:
+ - Whether the policies should be enabled or disabled.
+ type: str
+ choices: [ disabled, enabled ]
+ default: enabled
+ activate:
+ description:
+ - Activate the new firewall rules.
+ - Can be run with other steps or on its own.
+ type: bool
+ default: no
+'''
+
+EXAMPLES = r'''
+- name: Enable "foo" and "bar" policy
+ community.general.awall:
+ name: [ foo bar ]
+ state: enabled
+
+- name: Disable "foo" and "bar" policy and activate new rules
+ community.general.awall:
+ name:
+ - foo
+ - bar
+ state: disabled
+ activate: no
+
+- name: Activate currently enabled firewall rules
+ community.general.awall:
+ activate: yes
+'''
+
+RETURN = ''' # '''
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+
+
+def activate(module):
+ cmd = "%s activate --force" % (AWALL_PATH)
+ rc, stdout, stderr = module.run_command(cmd)
+ if rc == 0:
+ return True
+ else:
+ module.fail_json(msg="could not activate new rules", stdout=stdout, stderr=stderr)
+
+
+def is_policy_enabled(module, name):
+ cmd = "%s list" % (AWALL_PATH)
+ rc, stdout, stderr = module.run_command(cmd)
+ if re.search(r"^%s\s+enabled" % name, stdout, re.MULTILINE):
+ return True
+ return False
+
+
+def enable_policy(module, names, act):
+ policies = []
+ for name in names:
+ if not is_policy_enabled(module, name):
+ policies.append(name)
+ if not policies:
+ module.exit_json(changed=False, msg="policy(ies) already enabled")
+ names = " ".join(policies)
+ if module.check_mode:
+ cmd = "%s list" % (AWALL_PATH)
+ else:
+ cmd = "%s enable %s" % (AWALL_PATH, names)
+ rc, stdout, stderr = module.run_command(cmd)
+ if rc != 0:
+ module.fail_json(msg="failed to enable %s" % names, stdout=stdout, stderr=stderr)
+ if act and not module.check_mode:
+ activate(module)
+ module.exit_json(changed=True, msg="enabled awall policy(ies): %s" % names)
+
+
+def disable_policy(module, names, act):
+ policies = []
+ for name in names:
+ if is_policy_enabled(module, name):
+ policies.append(name)
+ if not policies:
+ module.exit_json(changed=False, msg="policy(ies) already disabled")
+ names = " ".join(policies)
+ if module.check_mode:
+ cmd = "%s list" % (AWALL_PATH)
+ else:
+ cmd = "%s disable %s" % (AWALL_PATH, names)
+ rc, stdout, stderr = module.run_command(cmd)
+ if rc != 0:
+ module.fail_json(msg="failed to disable %s" % names, stdout=stdout, stderr=stderr)
+ if act and not module.check_mode:
+ activate(module)
+ module.exit_json(changed=True, msg="disabled awall policy(ies): %s" % names)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='enabled', choices=['disabled', 'enabled']),
+ name=dict(type='list', elements='str'),
+ activate=dict(type='bool', default=False),
+ ),
+ required_one_of=[['name', 'activate']],
+ supports_check_mode=True,
+ )
+
+ global AWALL_PATH
+ AWALL_PATH = module.get_bin_path('awall', required=True)
+
+ p = module.params
+
+ if p['name']:
+ if p['state'] == 'enabled':
+ enable_policy(module, p['name'], p['activate'])
+ elif p['state'] == 'disabled':
+ disable_policy(module, p['name'], p['activate'])
+
+ if p['activate']:
+ if not module.check_mode:
+ activate(module)
+ module.exit_json(changed=True, msg="activated awall rules")
+
+ module.fail_json(msg="no action defined")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/beadm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/beadm.py
new file mode 100644
index 00000000..ab53d066
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/beadm.py
@@ -0,0 +1,431 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Adam Števko <adam.stevko@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: beadm
+short_description: Manage ZFS boot environments on FreeBSD/Solaris/illumos systems.
+description:
+ - Create, delete or activate ZFS boot environments.
+ - Mount and unmount ZFS boot environments.
+author: Adam Števko (@xen0l)
+options:
+ name:
+ description:
+ - ZFS boot environment name.
+ type: str
+ required: True
+ aliases: [ "be" ]
+ snapshot:
+ description:
+ - If specified, the new boot environment will be cloned from the given
+ snapshot or inactive boot environment.
+ type: str
+ description:
+ description:
+ - Associate a description with a new boot environment. This option is
+ available only on Solarish platforms.
+ type: str
+ options:
+ description:
+ - Create the datasets for new BE with specific ZFS properties.
+ - Multiple options can be specified.
+ - This option is available only on Solarish platforms.
+ type: str
+ mountpoint:
+ description:
+ - Path where to mount the ZFS boot environment.
+ type: path
+ state:
+ description:
+ - Create or delete ZFS boot environment.
+ type: str
+ choices: [ absent, activated, mounted, present, unmounted ]
+ default: present
+ force:
+ description:
+ - Specifies if the unmount should be forced.
+ type: bool
+ default: false
+'''
+
+EXAMPLES = r'''
+- name: Create ZFS boot environment
+ community.general.beadm:
+ name: upgrade-be
+ state: present
+
+- name: Create ZFS boot environment from existing inactive boot environment
+ community.general.beadm:
+ name: upgrade-be
+ snapshot: be@old
+ state: present
+
+- name: Create ZFS boot environment with compression enabled and description "upgrade"
+ community.general.beadm:
+ name: upgrade-be
+ options: "compression=on"
+ description: upgrade
+ state: present
+
+- name: Delete ZFS boot environment
+ community.general.beadm:
+ name: old-be
+ state: absent
+
+- name: Mount ZFS boot environment on /tmp/be
+ community.general.beadm:
+ name: BE
+ mountpoint: /tmp/be
+ state: mounted
+
+- name: Unmount ZFS boot environment
+ community.general.beadm:
+ name: BE
+ state: unmounted
+
+- name: Activate ZFS boot environment
+ community.general.beadm:
+ name: upgrade-be
+ state: activated
+'''
+
+RETURN = r'''
+name:
+ description: BE name
+ returned: always
+ type: str
+ sample: pre-upgrade
+snapshot:
+ description: ZFS snapshot to create BE from
+ returned: always
+ type: str
+ sample: rpool/ROOT/oi-hipster@fresh
+description:
+ description: BE description
+ returned: always
+ type: str
+ sample: Upgrade from 9.0 to 10.0
+options:
+ description: BE additional options
+ returned: always
+ type: str
+ sample: compression=on
+mountpoint:
+ description: BE mountpoint
+ returned: always
+ type: str
+ sample: /mnt/be
+state:
+ description: state of the target
+ returned: always
+ type: str
+ sample: present
+force:
+ description: If forced action is wanted
+ returned: always
+ type: bool
+ sample: False
+'''
+
+import os
+import re
+from ansible.module_utils.basic import AnsibleModule
+
+
+class BE(object):
+ def __init__(self, module):
+ self.module = module
+
+ self.name = module.params['name']
+ self.snapshot = module.params['snapshot']
+ self.description = module.params['description']
+ self.options = module.params['options']
+ self.mountpoint = module.params['mountpoint']
+ self.state = module.params['state']
+ self.force = module.params['force']
+ self.is_freebsd = os.uname()[0] == 'FreeBSD'
+
+ def _beadm_list(self):
+ cmd = [self.module.get_bin_path('beadm')]
+ cmd.append('list')
+ cmd.append('-H')
+ if '@' in self.name:
+ cmd.append('-s')
+ return self.module.run_command(cmd)
+
+ def _find_be_by_name(self, out):
+ if '@' in self.name:
+ for line in out.splitlines():
+ if self.is_freebsd:
+ check = line.split()
+ if(check == []):
+ continue
+ full_name = check[0].split('/')
+ if(full_name == []):
+ continue
+ check[0] = full_name[len(full_name) - 1]
+ if check[0] == self.name:
+ return check
+ else:
+ check = line.split(';')
+ if check[0] == self.name:
+ return check
+ else:
+ for line in out.splitlines():
+ if self.is_freebsd:
+ check = line.split()
+ if check[0] == self.name:
+ return check
+ else:
+ check = line.split(';')
+ if check[0] == self.name:
+ return check
+ return None
+
+ def exists(self):
+ (rc, out, _) = self._beadm_list()
+
+ if rc == 0:
+ if self._find_be_by_name(out):
+ return True
+ else:
+ return False
+ else:
+ return False
+
+ def is_activated(self):
+ (rc, out, _) = self._beadm_list()
+
+ if rc == 0:
+ line = self._find_be_by_name(out)
+ if line is None:
+ return False
+ if self.is_freebsd:
+ if 'R' in line[1]:
+ return True
+ else:
+ if 'R' in line[2]:
+ return True
+
+ return False
+
+ def activate_be(self):
+ cmd = [self.module.get_bin_path('beadm')]
+
+ cmd.append('activate')
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+ def create_be(self):
+ cmd = [self.module.get_bin_path('beadm')]
+
+ cmd.append('create')
+
+ if self.snapshot:
+ cmd.append('-e')
+ cmd.append(self.snapshot)
+
+ if not self.is_freebsd:
+ if self.description:
+ cmd.append('-d')
+ cmd.append(self.description)
+
+ if self.options:
+ cmd.append('-o')
+ cmd.append(self.options)
+
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+ def destroy_be(self):
+ cmd = [self.module.get_bin_path('beadm')]
+
+ cmd.append('destroy')
+ cmd.append('-F')
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+ def is_mounted(self):
+ (rc, out, _) = self._beadm_list()
+
+ if rc == 0:
+ line = self._find_be_by_name(out)
+ if line is None:
+ return False
+ if self.is_freebsd:
+ # On FreeBSD, we exclude currently mounted BE on /, as it is
+ # special and can be activated even if it is mounted. That is not
+ # possible with non-root BEs.
+ if line[2] != '-' and line[2] != '/':
+ return True
+ else:
+ if line[3]:
+ return True
+
+ return False
+
+ def mount_be(self):
+ cmd = [self.module.get_bin_path('beadm')]
+
+ cmd.append('mount')
+ cmd.append(self.name)
+
+ if self.mountpoint:
+ cmd.append(self.mountpoint)
+
+ return self.module.run_command(cmd)
+
+ def unmount_be(self):
+ cmd = [self.module.get_bin_path('beadm')]
+
+ cmd.append('unmount')
+ if self.force:
+ cmd.append('-f')
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True, aliases=['be']),
+ snapshot=dict(type='str'),
+ description=dict(type='str'),
+ options=dict(type='str'),
+ mountpoint=dict(type='path'),
+ state=dict(type='str', default='present', choices=['absent', 'activated', 'mounted', 'present', 'unmounted']),
+ force=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+
+ be = BE(module)
+
+ rc = None
+ out = ''
+ err = ''
+ result = {}
+ result['name'] = be.name
+ result['state'] = be.state
+
+ if be.snapshot:
+ result['snapshot'] = be.snapshot
+
+ if be.description:
+ result['description'] = be.description
+
+ if be.options:
+ result['options'] = be.options
+
+ if be.mountpoint:
+ result['mountpoint'] = be.mountpoint
+
+ if be.state == 'absent':
+ # beadm on FreeBSD and Solarish systems differs in delete behaviour in
+ # that we are not allowed to delete activated BE on FreeBSD while on
+ # Solarish systems we cannot delete BE if it is mounted. We add mount
+ # check for both platforms as BE should be explicitly unmounted before
+ # being deleted. On FreeBSD, we also check if the BE is activated.
+ if be.exists():
+ if not be.is_mounted():
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ if be.is_freebsd:
+ if be.is_activated():
+ module.fail_json(msg='Unable to remove active BE!')
+
+ (rc, out, err) = be.destroy_be()
+
+ if rc != 0:
+ module.fail_json(msg='Error while destroying BE: "%s"' % err,
+ name=be.name,
+ stderr=err,
+ rc=rc)
+ else:
+ module.fail_json(msg='Unable to remove BE as it is mounted!')
+
+ elif be.state == 'present':
+ if not be.exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ (rc, out, err) = be.create_be()
+
+ if rc != 0:
+ module.fail_json(msg='Error while creating BE: "%s"' % err,
+ name=be.name,
+ stderr=err,
+ rc=rc)
+
+ elif be.state == 'activated':
+ if not be.is_activated():
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ # On FreeBSD, beadm is unable to activate mounted BEs, so we add
+ # an explicit check for that case.
+ if be.is_freebsd:
+ if be.is_mounted():
+ module.fail_json(msg='Unable to activate mounted BE!')
+
+ (rc, out, err) = be.activate_be()
+
+ if rc != 0:
+ module.fail_json(msg='Error while activating BE: "%s"' % err,
+ name=be.name,
+ stderr=err,
+ rc=rc)
+ elif be.state == 'mounted':
+ if not be.is_mounted():
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ (rc, out, err) = be.mount_be()
+
+ if rc != 0:
+ module.fail_json(msg='Error while mounting BE: "%s"' % err,
+ name=be.name,
+ stderr=err,
+ rc=rc)
+
+ elif be.state == 'unmounted':
+ if be.is_mounted():
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ (rc, out, err) = be.unmount_be()
+
+ if rc != 0:
+ module.fail_json(msg='Error while unmounting BE: "%s"' % err,
+ name=be.name,
+ stderr=err,
+ rc=rc)
+
+ if rc is None:
+ result['changed'] = False
+ else:
+ result['changed'] = True
+
+ if out:
+ result['stdout'] = out
+ if err:
+ result['stderr'] = err
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/capabilities.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/capabilities.py
new file mode 100644
index 00000000..ac6dde67
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/capabilities.py
@@ -0,0 +1,180 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Nate Coraor <nate@bx.psu.edu>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: capabilities
+short_description: Manage Linux capabilities
+description:
+ - This module manipulates files privileges using the Linux capabilities(7) system.
+options:
+ path:
+ description:
+ - Specifies the path to the file to be managed.
+ type: str
+ required: yes
+ aliases: [ key ]
+ capability:
+ description:
+ - Desired capability to set (with operator and flags, if state is C(present)) or remove (if state is C(absent))
+ type: str
+ required: yes
+ aliases: [ cap ]
+ state:
+ description:
+ - Whether the entry should be present or absent in the file's capabilities.
+ type: str
+ choices: [ absent, present ]
+ default: present
+notes:
+ - The capabilities system will automatically transform operators and flags into the effective set,
+ so for example, C(cap_foo=ep) will probably become C(cap_foo+ep).
+ - This module does not attempt to determine the final operator and flags to compare,
+ so you will want to ensure that your capabilities argument matches the final capabilities.
+author:
+- Nate Coraor (@natefoo)
+'''
+
+EXAMPLES = r'''
+- name: Set cap_sys_chroot+ep on /foo
+ community.general.capabilities:
+ path: /foo
+ capability: cap_sys_chroot+ep
+ state: present
+
+- name: Remove cap_net_bind_service from /bar
+ community.general.capabilities:
+ path: /bar
+ capability: cap_net_bind_service
+ state: absent
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+OPS = ('=', '-', '+')
+
+
+class CapabilitiesModule(object):
+ platform = 'Linux'
+ distribution = None
+
+ def __init__(self, module):
+ self.module = module
+ self.path = module.params['path'].strip()
+ self.capability = module.params['capability'].strip().lower()
+ self.state = module.params['state']
+ self.getcap_cmd = module.get_bin_path('getcap', required=True)
+ self.setcap_cmd = module.get_bin_path('setcap', required=True)
+ self.capability_tup = self._parse_cap(self.capability, op_required=self.state == 'present')
+
+ self.run()
+
+ def run(self):
+
+ current = self.getcap(self.path)
+ caps = [cap[0] for cap in current]
+
+ if self.state == 'present' and self.capability_tup not in current:
+ # need to add capability
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, msg='capabilities changed')
+ else:
+ # remove from current cap list if it's already set (but op/flags differ)
+ current = list(filter(lambda x: x[0] != self.capability_tup[0], current))
+ # add new cap with correct op/flags
+ current.append(self.capability_tup)
+ self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current))
+ elif self.state == 'absent' and self.capability_tup[0] in caps:
+ # need to remove capability
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, msg='capabilities changed')
+ else:
+ # remove from current cap list and then set current list
+ current = filter(lambda x: x[0] != self.capability_tup[0], current)
+ self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current))
+ self.module.exit_json(changed=False, state=self.state)
+
+ def getcap(self, path):
+ rval = []
+ cmd = "%s -v %s" % (self.getcap_cmd, path)
+ rc, stdout, stderr = self.module.run_command(cmd)
+ # If file xattrs are set but no caps are set the output will be:
+ # '/foo ='
+ # If file xattrs are unset the output will be:
+ # '/foo'
+ # If the file does not exist, the stderr will be (with rc == 0...):
+ # '/foo (No such file or directory)'
+ if rc != 0 or stderr != "":
+ self.module.fail_json(msg="Unable to get capabilities of %s" % path, stdout=stdout.strip(), stderr=stderr)
+ if stdout.strip() != path:
+ if ' =' in stdout:
+ # process output of an older version of libcap
+ caps = stdout.split(' =')[1].strip().split()
+ else:
+ # otherwise, we have a newer version here
+ # see original commit message of cap/v0.2.40-18-g177cd41 in libcap.git
+ caps = stdout.split()[1].strip().split()
+ for cap in caps:
+ cap = cap.lower()
+ # getcap condenses capabilities with the same op/flags into a
+ # comma-separated list, so we have to parse that
+ if ',' in cap:
+ cap_group = cap.split(',')
+ cap_group[-1], op, flags = self._parse_cap(cap_group[-1])
+ for subcap in cap_group:
+ rval.append((subcap, op, flags))
+ else:
+ rval.append(self._parse_cap(cap))
+ return rval
+
+ def setcap(self, path, caps):
+ caps = ' '.join([''.join(cap) for cap in caps])
+ cmd = "%s '%s' %s" % (self.setcap_cmd, caps, path)
+ rc, stdout, stderr = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Unable to set capabilities of %s" % path, stdout=stdout, stderr=stderr)
+ else:
+ return stdout
+
+ def _parse_cap(self, cap, op_required=True):
+ opind = -1
+ try:
+ i = 0
+ while opind == -1:
+ opind = cap.find(OPS[i])
+ i += 1
+ except Exception:
+ if op_required:
+ self.module.fail_json(msg="Couldn't find operator (one of: %s)" % str(OPS))
+ else:
+ return (cap, None, None)
+ op = cap[opind]
+ cap, flags = cap.split(op)
+ return (cap, op, flags)
+
+
+# ==============================================================
+# main
+
+def main():
+ # defining module
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='str', required=True, aliases=['key']),
+ capability=dict(type='str', required=True, aliases=['cap']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ),
+ supports_check_mode=True,
+ )
+
+ CapabilitiesModule(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/cronvar.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/cronvar.py
new file mode 100644
index 00000000..a76f6a78
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/cronvar.py
@@ -0,0 +1,423 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Cronvar Plugin: The goal of this plugin is to provide an idempotent
+# method for set cron variable values. It should play well with the
+# existing cron module as well as allow for manually added variables.
+# Each variable entered will be preceded with a comment describing the
+# variable so that it can be found later. This is required to be
+# present in order for this plugin to find/modify the variable
+
+# This module is based on the crontab module.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: cronvar
+short_description: Manage variables in crontabs
+description:
+ - Use this module to manage crontab variables.
+ - This module allows you to create, update, or delete cron variable definitions.
+options:
+ name:
+ description:
+ - Name of the crontab variable.
+ type: str
+ required: yes
+ value:
+ description:
+ - The value to set this variable to.
+ - Required if C(state=present).
+ type: str
+ insertafter:
+ description:
+ - If specified, the variable will be inserted after the variable specified.
+ - Used with C(state=present).
+ type: str
+ insertbefore:
+ description:
+ - Used with C(state=present). If specified, the variable will be inserted
+ just before the variable specified.
+ type: str
+ state:
+ description:
+ - Whether to ensure that the variable is present or absent.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ user:
+ description:
+ - The specific user whose crontab should be modified.
+ - This parameter defaults to C(root) when unset.
+ type: str
+ cron_file:
+ description:
+ - If specified, uses this file instead of an individual user's crontab.
+ - Without a leading C(/), this is assumed to be in I(/etc/cron.d).
+ - With a leading C(/), this is taken as absolute.
+ type: str
+ backup:
+ description:
+ - If set, create a backup of the crontab before it is modified.
+ The location of the backup is returned in the C(backup) variable by this module.
+ type: bool
+ default: no
+requirements:
+ - cron
+author:
+- Doug Luce (@dougluce)
+'''
+
+EXAMPLES = r'''
+- name: Ensure entry like "EMAIL=doug@ansibmod.con.com" exists
+ community.general.cronvar:
+ name: EMAIL
+ value: doug@ansibmod.con.com
+
+- name: Ensure a variable does not exist. This may remove any variable named "LEGACY"
+ community.general.cronvar:
+ name: LEGACY
+ state: absent
+
+- name: Add a variable to a file under /etc/cron.d
+ community.general.cronvar:
+ name: LOGFILE
+ value: /var/log/yum-autoupdate.log
+ user: root
+ cron_file: ansible_yum-autoupdate
+'''
+
+import os
+import platform
+import pwd
+import re
+import shlex
+import sys
+import tempfile
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import shlex_quote
+
+
+class CronVarError(Exception):
+ pass
+
+
+class CronVar(object):
+ """
+ CronVar object to write variables to crontabs.
+
+ user - the user of the crontab (defaults to root)
+ cron_file - a cron file under /etc/cron.d
+ """
+
+ def __init__(self, module, user=None, cron_file=None):
+ self.module = module
+ self.user = user
+ self.lines = None
+ self.wordchars = ''.join(chr(x) for x in range(128) if chr(x) not in ('=', "'", '"',))
+ self.cron_cmd = self.module.get_bin_path('crontab', required=True)
+
+ if cron_file:
+ self.cron_file = ""
+ if os.path.isabs(cron_file):
+ self.cron_file = cron_file
+ else:
+ self.cron_file = os.path.join('/etc/cron.d', cron_file)
+ else:
+ self.cron_file = None
+
+ self.read()
+
+ def read(self):
+ # Read in the crontab from the system
+ self.lines = []
+ if self.cron_file:
+ # read the cronfile
+ try:
+ f = open(self.cron_file, 'r')
+ self.lines = f.read().splitlines()
+ f.close()
+ except IOError:
+ # cron file does not exist
+ return
+ except Exception:
+ raise CronVarError("Unexpected error:", sys.exc_info()[0])
+ else:
+ # using safely quoted shell for now, but this really should be two non-shell calls instead. FIXME
+ (rc, out, err) = self.module.run_command(self._read_user_execute(), use_unsafe_shell=True)
+
+ if rc != 0 and rc != 1: # 1 can mean that there are no jobs.
+ raise CronVarError("Unable to read crontab")
+
+ lines = out.splitlines()
+ count = 0
+ for l in lines:
+ if count > 2 or (not re.match(r'# DO NOT EDIT THIS FILE - edit the master and reinstall.', l
+ ) and not re.match(r'# \(/tmp/.*installed on.*\)', l) and not re.match(r'# \(.*version.*\)', l)):
+ self.lines.append(l)
+ count += 1
+
+ def log_message(self, message):
+ self.module.debug('ansible: "%s"' % message)
+
+ def write(self, backup_file=None):
+ """
+ Write the crontab to the system. Saves all information.
+ """
+ if backup_file:
+ fileh = open(backup_file, 'w')
+ elif self.cron_file:
+ fileh = open(self.cron_file, 'w')
+ else:
+ filed, path = tempfile.mkstemp(prefix='crontab')
+ fileh = os.fdopen(filed, 'w')
+
+ fileh.write(self.render())
+ fileh.close()
+
+ # return if making a backup
+ if backup_file:
+ return
+
+ # Add the entire crontab back to the user crontab
+ if not self.cron_file:
+ # quoting shell args for now but really this should be two non-shell calls. FIXME
+ (rc, out, err) = self.module.run_command(self._write_execute(path), use_unsafe_shell=True)
+ os.unlink(path)
+
+ if rc != 0:
+ self.module.fail_json(msg=err)
+
+ def remove_variable_file(self):
+ try:
+ os.unlink(self.cron_file)
+ return True
+ except OSError:
+ # cron file does not exist
+ return False
+ except Exception:
+ raise CronVarError("Unexpected error:", sys.exc_info()[0])
+
+ def parse_for_var(self, line):
+ lexer = shlex.shlex(line)
+ lexer.wordchars = self.wordchars
+ varname = lexer.get_token()
+ is_env_var = lexer.get_token() == '='
+ value = ''.join(lexer)
+ if is_env_var:
+ return (varname, value)
+ raise CronVarError("Not a variable.")
+
+ def find_variable(self, name):
+ for l in self.lines:
+ try:
+ (varname, value) = self.parse_for_var(l)
+ if varname == name:
+ return value
+ except CronVarError:
+ pass
+ return None
+
+ def get_var_names(self):
+ var_names = []
+ for l in self.lines:
+ try:
+ (var_name, _) = self.parse_for_var(l)
+ var_names.append(var_name)
+ except CronVarError:
+ pass
+ return var_names
+
+ def add_variable(self, name, value, insertbefore, insertafter):
+ if insertbefore is None and insertafter is None:
+ # Add the variable to the top of the file.
+ self.lines.insert(0, "%s=%s" % (name, value))
+ else:
+ newlines = []
+ for l in self.lines:
+ try:
+ (varname, _) = self.parse_for_var(l) # Throws if not a var line
+ if varname == insertbefore:
+ newlines.append("%s=%s" % (name, value))
+ newlines.append(l)
+ elif varname == insertafter:
+ newlines.append(l)
+ newlines.append("%s=%s" % (name, value))
+ else:
+ raise CronVarError # Append.
+ except CronVarError:
+ newlines.append(l)
+
+ self.lines = newlines
+
+ def remove_variable(self, name):
+ self.update_variable(name, None, remove=True)
+
+ def update_variable(self, name, value, remove=False):
+ newlines = []
+ for l in self.lines:
+ try:
+ (varname, _) = self.parse_for_var(l) # Throws if not a var line
+ if varname != name:
+ raise CronVarError # Append.
+ if not remove:
+ newlines.append("%s=%s" % (name, value))
+ except CronVarError:
+ newlines.append(l)
+
+ self.lines = newlines
+
+ def render(self):
+ """
+ Render a proper crontab
+ """
+ result = '\n'.join(self.lines)
+ if result and result[-1] not in ['\n', '\r']:
+ result += '\n'
+ return result
+
+ def _read_user_execute(self):
+ """
+ Returns the command line for reading a crontab
+ """
+ user = ''
+
+ if self.user:
+ if platform.system() == 'SunOS':
+ return "su %s -c '%s -l'" % (shlex_quote(self.user), shlex_quote(self.cron_cmd))
+ elif platform.system() == 'AIX':
+ return "%s -l %s" % (shlex_quote(self.cron_cmd), shlex_quote(self.user))
+ elif platform.system() == 'HP-UX':
+ return "%s %s %s" % (self.cron_cmd, '-l', shlex_quote(self.user))
+ elif pwd.getpwuid(os.getuid())[0] != self.user:
+ user = '-u %s' % shlex_quote(self.user)
+ return "%s %s %s" % (self.cron_cmd, user, '-l')
+
+ def _write_execute(self, path):
+ """
+ Return the command line for writing a crontab
+ """
+ user = ''
+ if self.user:
+ if platform.system() in ['SunOS', 'HP-UX', 'AIX']:
+ return "chown %s %s ; su '%s' -c '%s %s'" % (
+ shlex_quote(self.user), shlex_quote(path), shlex_quote(self.user), self.cron_cmd, shlex_quote(path))
+ elif pwd.getpwuid(os.getuid())[0] != self.user:
+ user = '-u %s' % shlex_quote(self.user)
+ return "%s %s %s" % (self.cron_cmd, user, shlex_quote(path))
+
+
+# ==================================================
+
+def main():
+ # The following example playbooks:
+ #
+ # - community.general.cronvar: name="SHELL" value="/bin/bash"
+ #
+ # - name: Set the email
+ # community.general.cronvar: name="EMAILTO" value="doug@ansibmod.con.com"
+ #
+ # - name: Get rid of the old new host variable
+ # community.general.cronvar: name="NEW_HOST" state=absent
+ #
+ # Would produce:
+ # SHELL = /bin/bash
+ # EMAILTO = doug@ansibmod.con.com
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ value=dict(type='str'),
+ user=dict(type='str'),
+ cron_file=dict(type='str'),
+ insertafter=dict(type='str'),
+ insertbefore=dict(type='str'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ backup=dict(type='bool', default=False),
+ ),
+ mutually_exclusive=[['insertbefore', 'insertafter']],
+ supports_check_mode=False,
+ )
+
+ name = module.params['name']
+ value = module.params['value']
+ user = module.params['user']
+ cron_file = module.params['cron_file']
+ insertafter = module.params['insertafter']
+ insertbefore = module.params['insertbefore']
+ state = module.params['state']
+ backup = module.params['backup']
+ ensure_present = state == 'present'
+
+ changed = False
+ res_args = dict()
+
+ # Ensure all files generated are only writable by the owning user. Primarily relevant for the cron_file option.
+ os.umask(int('022', 8))
+ cronvar = CronVar(module, user, cron_file)
+
+ module.debug('cronvar instantiated - name: "%s"' % name)
+
+ # --- user input validation ---
+
+ if name is None and ensure_present:
+ module.fail_json(msg="You must specify 'name' to insert a new cron variable")
+
+ if value is None and ensure_present:
+ module.fail_json(msg="You must specify 'value' to insert a new cron variable")
+
+ if name is None and not ensure_present:
+ module.fail_json(msg="You must specify 'name' to remove a cron variable")
+
+ # if requested make a backup before making a change
+ if backup:
+ (_, backup_file) = tempfile.mkstemp(prefix='cronvar')
+ cronvar.write(backup_file)
+
+ if cronvar.cron_file and not name and not ensure_present:
+ changed = cronvar.remove_job_file()
+ module.exit_json(changed=changed, cron_file=cron_file, state=state)
+
+ old_value = cronvar.find_variable(name)
+
+ if ensure_present:
+ if old_value is None:
+ cronvar.add_variable(name, value, insertbefore, insertafter)
+ changed = True
+ elif old_value != value:
+ cronvar.update_variable(name, value)
+ changed = True
+ else:
+ if old_value is not None:
+ cronvar.remove_variable(name)
+ changed = True
+
+ res_args = {
+ "vars": cronvar.get_var_names(),
+ "changed": changed
+ }
+
+ if changed:
+ cronvar.write()
+
+ # retain the backup only if crontab or cron file have changed
+ if backup:
+ if changed:
+ res_args['backup_file'] = backup_file
+ else:
+ os.unlink(backup_file)
+
+ if cron_file:
+ res_args['cron_file'] = cron_file
+
+ module.exit_json(**res_args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/crypttab.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/crypttab.py
new file mode 100644
index 00000000..9841a786
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/crypttab.py
@@ -0,0 +1,354 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Steve <yo@groks.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: crypttab
+short_description: Encrypted Linux block devices
+description:
+ - Control Linux encrypted block devices that are set up during system boot in C(/etc/crypttab).
+options:
+ name:
+ description:
+ - Name of the encrypted block device as it appears in the C(/etc/crypttab) file, or
+ optionally prefixed with C(/dev/mapper/), as it appears in the filesystem. I(/dev/mapper/)
+ will be stripped from I(name).
+ type: str
+ required: yes
+ state:
+ description:
+ - Use I(present) to add a line to C(/etc/crypttab) or update its definition
+ if already present.
+ - Use I(absent) to remove a line with matching I(name).
+ - Use I(opts_present) to add options to those already present; options with
+ different values will be updated.
+ - Use I(opts_absent) to remove options from the existing set.
+ type: str
+ required: yes
+ choices: [ absent, opts_absent, opts_present, present ]
+ backing_device:
+ description:
+ - Path to the underlying block device or file, or the UUID of a block-device
+ prefixed with I(UUID=).
+ type: str
+ password:
+ description:
+ - Encryption password, the path to a file containing the password, or
+ C(-) or unset if the password should be entered at boot.
+ type: path
+ opts:
+ description:
+ - A comma-delimited list of options. See C(crypttab(5) ) for details.
+ type: str
+ path:
+ description:
+ - Path to file to use instead of C(/etc/crypttab).
+ - This might be useful in a chroot environment.
+ type: path
+ default: /etc/crypttab
+author:
+- Steve (@groks)
+'''
+
+EXAMPLES = r'''
+- name: Set the options explicitly a device which must already exist
+ community.general.crypttab:
+ name: luks-home
+ state: present
+ opts: discard,cipher=aes-cbc-essiv:sha256
+
+- name: Add the 'discard' option to any existing options for all devices
+ community.general.crypttab:
+ name: '{{ item.device }}'
+ state: opts_present
+ opts: discard
+ loop: '{{ ansible_mounts }}'
+ when: "'/dev/mapper/luks-' in {{ item.device }}"
+'''
+
+import os
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes, to_native
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', required=True, choices=['absent', 'opts_absent', 'opts_present', 'present']),
+ backing_device=dict(type='str'),
+ password=dict(type='path'),
+ opts=dict(type='str'),
+ path=dict(type='path', default='/etc/crypttab')
+ ),
+ supports_check_mode=True,
+ )
+
+ backing_device = module.params['backing_device']
+ password = module.params['password']
+ opts = module.params['opts']
+ state = module.params['state']
+ path = module.params['path']
+ name = module.params['name']
+ if name.startswith('/dev/mapper/'):
+ name = name[len('/dev/mapper/'):]
+
+ if state != 'absent' and backing_device is None and password is None and opts is None:
+ module.fail_json(msg="expected one or more of 'backing_device', 'password' or 'opts'",
+ **module.params)
+
+ if 'opts' in state and (backing_device is not None or password is not None):
+ module.fail_json(msg="cannot update 'backing_device' or 'password' when state=%s" % state,
+ **module.params)
+
+ for arg_name, arg in (('name', name),
+ ('backing_device', backing_device),
+ ('password', password),
+ ('opts', opts)):
+ if (arg is not None and (' ' in arg or '\t' in arg or arg == '')):
+ module.fail_json(msg="invalid '%s': contains white space or is empty" % arg_name,
+ **module.params)
+
+ try:
+ crypttab = Crypttab(path)
+ existing_line = crypttab.match(name)
+ except Exception as e:
+ module.fail_json(msg="failed to open and parse crypttab file: %s" % to_native(e),
+ exception=traceback.format_exc(), **module.params)
+
+ if 'present' in state and existing_line is None and backing_device is None:
+ module.fail_json(msg="'backing_device' required to add a new entry",
+ **module.params)
+
+ changed, reason = False, '?'
+
+ if state == 'absent':
+ if existing_line is not None:
+ changed, reason = existing_line.remove()
+
+ elif state == 'present':
+ if existing_line is not None:
+ changed, reason = existing_line.set(backing_device, password, opts)
+ else:
+ changed, reason = crypttab.add(Line(None, name, backing_device, password, opts))
+
+ elif state == 'opts_present':
+ if existing_line is not None:
+ changed, reason = existing_line.opts.add(opts)
+ else:
+ changed, reason = crypttab.add(Line(None, name, backing_device, password, opts))
+
+ elif state == 'opts_absent':
+ if existing_line is not None:
+ changed, reason = existing_line.opts.remove(opts)
+
+ if changed and not module.check_mode:
+ try:
+ f = open(path, 'wb')
+ f.write(to_bytes(crypttab, errors='surrogate_or_strict'))
+ finally:
+ f.close()
+
+ module.exit_json(changed=changed, msg=reason, **module.params)
+
+
+class Crypttab(object):
+ _lines = []
+
+ def __init__(self, path):
+ self.path = path
+ if not os.path.exists(path):
+ if not os.path.exists(os.path.dirname(path)):
+ os.makedirs(os.path.dirname(path))
+ open(path, 'a').close()
+
+ try:
+ f = open(path, 'r')
+ for line in f.readlines():
+ self._lines.append(Line(line))
+ finally:
+ f.close()
+
+ def add(self, line):
+ self._lines.append(line)
+ return True, 'added line'
+
+ def lines(self):
+ for line in self._lines:
+ if line.valid():
+ yield line
+
+ def match(self, name):
+ for line in self.lines():
+ if line.name == name:
+ return line
+ return None
+
+ def __str__(self):
+ lines = []
+ for line in self._lines:
+ lines.append(str(line))
+ crypttab = '\n'.join(lines)
+ if len(crypttab) == 0:
+ crypttab += '\n'
+ if crypttab[-1] != '\n':
+ crypttab += '\n'
+ return crypttab
+
+
+class Line(object):
+ def __init__(self, line=None, name=None, backing_device=None, password=None, opts=None):
+ self.line = line
+ self.name = name
+ self.backing_device = backing_device
+ self.password = password
+ self.opts = Options(opts)
+
+ if line is not None:
+ self.line = self.line.rstrip('\n')
+ if self._line_valid(line):
+ self.name, backing_device, password, opts = self._split_line(line)
+
+ self.set(backing_device, password, opts)
+
+ def set(self, backing_device, password, opts):
+ changed = False
+
+ if backing_device is not None and self.backing_device != backing_device:
+ self.backing_device = backing_device
+ changed = True
+
+ if password is not None and self.password != password:
+ self.password = password
+ changed = True
+
+ if opts is not None:
+ opts = Options(opts)
+ if opts != self.opts:
+ self.opts = opts
+ changed = True
+
+ return changed, 'updated line'
+
+ def _line_valid(self, line):
+ if not line.strip() or line.startswith('#') or len(line.split()) not in (2, 3, 4):
+ return False
+ return True
+
+ def _split_line(self, line):
+ fields = line.split()
+ try:
+ field2 = fields[2]
+ except IndexError:
+ field2 = None
+ try:
+ field3 = fields[3]
+ except IndexError:
+ field3 = None
+
+ return (fields[0],
+ fields[1],
+ field2,
+ field3)
+
+ def remove(self):
+ self.line, self.name, self.backing_device = '', None, None
+ return True, 'removed line'
+
+ def valid(self):
+ if self.name is not None and self.backing_device is not None:
+ return True
+ return False
+
+ def __str__(self):
+ if self.valid():
+ fields = [self.name, self.backing_device]
+ if self.password is not None or self.opts:
+ if self.password is not None:
+ fields.append(self.password)
+ else:
+ fields.append('none')
+ if self.opts:
+ fields.append(str(self.opts))
+ return ' '.join(fields)
+ return self.line
+
+
+class Options(dict):
+ """opts_string looks like: 'discard,foo=bar,baz=greeble' """
+
+ def __init__(self, opts_string):
+ super(Options, self).__init__()
+ self.itemlist = []
+ if opts_string is not None:
+ for opt in opts_string.split(','):
+ kv = opt.split('=')
+ if len(kv) > 1:
+ k, v = (kv[0], kv[1])
+ else:
+ k, v = (kv[0], None)
+ self[k] = v
+
+ def add(self, opts_string):
+ changed = False
+ for k, v in Options(opts_string).items():
+ if k in self:
+ if self[k] != v:
+ changed = True
+ else:
+ changed = True
+ self[k] = v
+ return changed, 'updated options'
+
+ def remove(self, opts_string):
+ changed = False
+ for k in Options(opts_string):
+ if k in self:
+ del self[k]
+ changed = True
+ return changed, 'removed options'
+
+ def keys(self):
+ return self.itemlist
+
+ def values(self):
+ return [self[key] for key in self]
+
+ def items(self):
+ return [(key, self[key]) for key in self]
+
+ def __iter__(self):
+ return iter(self.itemlist)
+
+ def __setitem__(self, key, value):
+ if key not in self:
+ self.itemlist.append(key)
+ super(Options, self).__setitem__(key, value)
+
+ def __delitem__(self, key):
+ self.itemlist.remove(key)
+ super(Options, self).__delitem__(key)
+
+ def __ne__(self, obj):
+ return not (isinstance(obj, Options) and sorted(self.items()) == sorted(obj.items()))
+
+ def __str__(self):
+ ret = []
+ for k, v in self.items():
+ if v is None:
+ ret.append(k)
+ else:
+ ret.append('%s=%s' % (k, v))
+ return ','.join(ret)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/dconf.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/dconf.py
new file mode 100644
index 00000000..49c42432
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/dconf.py
@@ -0,0 +1,380 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Branko Majic <branko@majic.rs>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: dconf
+author:
+ - "Branko Majic (@azaghal)"
+short_description: Modify and read dconf database
+description:
+ - This module allows modifications and reading of dconf database. The module
+ is implemented as a wrapper around dconf tool. Please see the dconf(1) man
+ page for more details.
+ - Since C(dconf) requires a running D-Bus session to change values, the module
+ will try to detect an existing session and reuse it, or run the tool via
+ C(dbus-run-session).
+notes:
+ - This module depends on C(psutil) Python library (version 4.0.0 and upwards),
+ C(dconf), C(dbus-send), and C(dbus-run-session) binaries. Depending on
+ distribution you are using, you may need to install additional packages to
+ have these available.
+ - Detection of existing, running D-Bus session, required to change settings
+ via C(dconf), is not 100% reliable due to implementation details of D-Bus
+ daemon itself. This might lead to running applications not picking-up
+ changes on the fly if options are changed via Ansible and
+ C(dbus-run-session).
+ - Keep in mind that the C(dconf) CLI tool, which this module wraps around,
+ utilises an unusual syntax for the values (GVariant). For example, if you
+ wanted to provide a string value, the correct syntax would be
+ C(value="'myvalue'") - with single quotes as part of the Ansible parameter
+ value.
+ - When using loops in combination with a value like
+ :code:`"[('xkb', 'us'), ('xkb', 'se')]"`, you need to be aware of possible
+ type conversions. Applying a filter :code:`"{{ item.value | string }}"`
+ to the parameter variable can avoid potential conversion problems.
+ - The easiest way to figure out exact syntax/value you need to provide for a
+ key is by making the configuration change in application affected by the
+ key, and then having a look at value set via commands C(dconf dump
+ /path/to/dir/) or C(dconf read /path/to/key).
+options:
+ key:
+ type: str
+ required: true
+ description:
+ - A dconf key to modify or read from the dconf database.
+ value:
+ type: str
+ required: false
+ description:
+ - Value to set for the specified dconf key. Value should be specified in
+ GVariant format. Due to complexity of this format, it is best to have a
+ look at existing values in the dconf database. Required for
+ C(state=present).
+ state:
+ type: str
+ required: false
+ default: present
+ choices:
+ - read
+ - present
+ - absent
+ description:
+ - The action to take upon the key/value.
+'''
+
+RETURN = """
+value:
+ description: value associated with the requested key
+ returned: success, state was "read"
+ type: str
+ sample: "'Default'"
+"""
+
+EXAMPLES = """
+- name: Configure available keyboard layouts in Gnome
+ community.general.dconf:
+ key: "/org/gnome/desktop/input-sources/sources"
+ value: "[('xkb', 'us'), ('xkb', 'se')]"
+ state: present
+
+- name: Read currently available keyboard layouts in Gnome
+ community.general.dconf:
+ key: "/org/gnome/desktop/input-sources/sources"
+ state: read
+ register: keyboard_layouts
+
+- name: Reset the available keyboard layouts in Gnome
+ community.general.dconf:
+ key: "/org/gnome/desktop/input-sources/sources"
+ state: absent
+
+- name: Configure available keyboard layouts in Cinnamon
+ community.general.dconf:
+ key: "/org/gnome/libgnomekbd/keyboard/layouts"
+ value: "['us', 'se']"
+ state: present
+
+- name: Read currently available keyboard layouts in Cinnamon
+ community.general.dconf:
+ key: "/org/gnome/libgnomekbd/keyboard/layouts"
+ state: read
+ register: keyboard_layouts
+
+- name: Reset the available keyboard layouts in Cinnamon
+ community.general.dconf:
+ key: "/org/gnome/libgnomekbd/keyboard/layouts"
+ state: absent
+
+- name: Disable desktop effects in Cinnamon
+ community.general.dconf:
+ key: "/org/cinnamon/desktop-effects"
+ value: "false"
+ state: present
+"""
+
+
+import os
+import traceback
+
+PSUTIL_IMP_ERR = None
+try:
+ import psutil
+ psutil_found = True
+except ImportError:
+ PSUTIL_IMP_ERR = traceback.format_exc()
+ psutil_found = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class DBusWrapper(object):
+ """
+ Helper class that can be used for running a command with a working D-Bus
+ session.
+
+ If possible, command will be run against an existing D-Bus session,
+ otherwise the session will be spawned via dbus-run-session.
+
+ Example usage:
+
+ dbus_wrapper = DBusWrapper(ansible_module)
+ dbus_wrapper.run_command(["printenv", "DBUS_SESSION_BUS_ADDRESS"])
+ """
+
+ def __init__(self, module):
+ """
+ Initialises an instance of the class.
+
+ :param module: Ansible module instance used to signal failures and run commands.
+ :type module: AnsibleModule
+ """
+
+ # Store passed-in arguments and set-up some defaults.
+ self.module = module
+
+ # Try to extract existing D-Bus session address.
+ self.dbus_session_bus_address = self._get_existing_dbus_session()
+
+ # If no existing D-Bus session was detected, check if dbus-run-session
+ # is available.
+ if self.dbus_session_bus_address is None:
+ self.module.get_bin_path('dbus-run-session', required=True)
+
+ def _get_existing_dbus_session(self):
+ """
+ Detects and returns an existing D-Bus session bus address.
+
+ :returns: string -- D-Bus session bus address. If a running D-Bus session was not detected, returns None.
+ """
+
+ # We'll be checking the processes of current user only.
+ uid = os.getuid()
+
+ # Go through all the pids for this user, try to extract the D-Bus
+ # session bus address from environment, and ensure it is possible to
+ # connect to it.
+ self.module.debug("Trying to detect existing D-Bus user session for user: %d" % uid)
+
+ for pid in psutil.pids():
+ process = psutil.Process(pid)
+ process_real_uid, _, _ = process.uids()
+ try:
+ if process_real_uid == uid and 'DBUS_SESSION_BUS_ADDRESS' in process.environ():
+ dbus_session_bus_address_candidate = process.environ()['DBUS_SESSION_BUS_ADDRESS']
+ self.module.debug("Found D-Bus user session candidate at address: %s" % dbus_session_bus_address_candidate)
+ command = ['dbus-send', '--address=%s' % dbus_session_bus_address_candidate, '--type=signal', '/', 'com.example.test']
+ rc, _, _ = self.module.run_command(command)
+
+ if rc == 0:
+ self.module.debug("Verified D-Bus user session candidate as usable at address: %s" % dbus_session_bus_address_candidate)
+
+ return dbus_session_bus_address_candidate
+
+ # This can happen with things like SSH sessions etc.
+ except psutil.AccessDenied:
+ pass
+
+ self.module.debug("Failed to find running D-Bus user session, will use dbus-run-session")
+
+ return None
+
+ def run_command(self, command):
+ """
+ Runs the specified command within a functional D-Bus session. Command is
+ effectively passed-on to AnsibleModule.run_command() method, with
+ modification for using dbus-run-session if necessary.
+
+ :param command: Command to run, including parameters. Each element of the list should be a string.
+ :type module: list
+
+ :returns: tuple(result_code, standard_output, standard_error) -- Result code, standard output, and standard error from running the command.
+ """
+
+ if self.dbus_session_bus_address is None:
+ self.module.debug("Using dbus-run-session wrapper for running commands.")
+ command = ['dbus-run-session'] + command
+ rc, out, err = self.module.run_command(command)
+
+ if self.dbus_session_bus_address is None and rc == 127:
+ self.module.fail_json(msg="Failed to run passed-in command, dbus-run-session faced an internal error: %s" % err)
+ else:
+ extra_environment = {'DBUS_SESSION_BUS_ADDRESS': self.dbus_session_bus_address}
+ rc, out, err = self.module.run_command(command, environ_update=extra_environment)
+
+ return rc, out, err
+
+
+class DconfPreference(object):
+
+ def __init__(self, module, check_mode=False):
+ """
+ Initialises instance of the class.
+
+ :param module: Ansible module instance used to signal failures and run commands.
+ :type module: AnsibleModule
+
+ :param check_mode: Specify whether to only check if a change should be made or if to actually make a change.
+ :type check_mode: bool
+ """
+
+ self.module = module
+ self.check_mode = check_mode
+
+ def read(self, key):
+ """
+ Retrieves current value associated with the dconf key.
+
+ If an error occurs, a call will be made to AnsibleModule.fail_json.
+
+ :returns: string -- Value assigned to the provided key. If the value is not set for specified key, returns None.
+ """
+
+ command = ["dconf", "read", key]
+
+ rc, out, err = self.module.run_command(command)
+
+ if rc != 0:
+ self.module.fail_json(msg='dconf failed while reading the value with error: %s' % err)
+
+ if out == '':
+ value = None
+ else:
+ value = out.rstrip('\n')
+
+ return value
+
+ def write(self, key, value):
+ """
+ Writes the value for specified key.
+
+ If an error occurs, a call will be made to AnsibleModule.fail_json.
+
+ :param key: dconf key for which the value should be set. Should be a full path.
+ :type key: str
+
+ :param value: Value to set for the specified dconf key. Should be specified in GVariant format.
+ :type value: str
+
+ :returns: bool -- True if a change was made, False if no change was required.
+ """
+
+ # If no change is needed (or won't be done due to check_mode), notify
+ # caller straight away.
+ if value == self.read(key):
+ return False
+ elif self.check_mode:
+ return True
+
+ # Set-up command to run. Since DBus is needed for write operation, wrap
+ # dconf command dbus-launch.
+ command = ["dconf", "write", key, value]
+
+ # Run the command and fetch standard return code, stdout, and stderr.
+ dbus_wrapper = DBusWrapper(self.module)
+ rc, out, err = dbus_wrapper.run_command(command)
+
+ if rc != 0:
+ self.module.fail_json(msg='dconf failed while write the value with error: %s' % err)
+
+ # Value was changed.
+ return True
+
+ def reset(self, key):
+ """
+ Returns value for the specified key (removes it from user configuration).
+
+ If an error occurs, a call will be made to AnsibleModule.fail_json.
+
+ :param key: dconf key to reset. Should be a full path.
+ :type key: str
+
+ :returns: bool -- True if a change was made, False if no change was required.
+ """
+
+ # Read the current value first.
+ current_value = self.read(key)
+
+ # No change was needed, key is not set at all, or just notify user if we
+ # are in check mode.
+ if current_value is None:
+ return False
+ elif self.check_mode:
+ return True
+
+ # Set-up command to run. Since DBus is needed for reset operation, wrap
+ # dconf command dbus-launch.
+ command = ["dconf", "reset", key]
+
+ # Run the command and fetch standard return code, stdout, and stderr.
+ dbus_wrapper = DBusWrapper(self.module)
+ rc, out, err = dbus_wrapper.run_command(command)
+
+ if rc != 0:
+ self.module.fail_json(msg='dconf failed while reseting the value with error: %s' % err)
+
+ # Value was changed.
+ return True
+
+
+def main():
+ # Setup the Ansible module
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent', 'read']),
+ key=dict(required=True, type='str'),
+ value=dict(required=False, default=None, type='str'),
+ ),
+ supports_check_mode=True
+ )
+
+ if not psutil_found:
+ module.fail_json(msg=missing_required_lib("psutil"), exception=PSUTIL_IMP_ERR)
+
+ # If present state was specified, value must be provided.
+ if module.params['state'] == 'present' and module.params['value'] is None:
+ module.fail_json(msg='State "present" requires "value" to be set.')
+
+ # Create wrapper instance.
+ dconf = DconfPreference(module, module.check_mode)
+
+ # Process based on different states.
+ if module.params['state'] == 'read':
+ value = dconf.read(module.params['key'])
+ module.exit_json(changed=False, value=value)
+ elif module.params['state'] == 'present':
+ changed = dconf.write(module.params['key'], module.params['value'])
+ module.exit_json(changed=changed)
+ elif module.params['state'] == 'absent':
+ changed = dconf.reset(module.params['key'])
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/dpkg_divert.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/dpkg_divert.py
new file mode 100644
index 00000000..b7b57fd3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/dpkg_divert.py
@@ -0,0 +1,370 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017-2020, Yann Amar <quidame@poivron.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: dpkg_divert
+short_description: Override a debian package's version of a file
+version_added: '0.2.0'
+author:
+ - quidame (@quidame)
+description:
+ - A diversion is for C(dpkg) the knowledge that only a given package
+ (or the local administrator) is allowed to install a file at a given
+ location. Other packages shipping their own version of this file will
+ be forced to I(divert) it, i.e. to install it at another location. It
+ allows one to keep changes in a file provided by a debian package by
+ preventing its overwrite at package upgrade.
+ - This module manages diversions of debian packages files using the
+ C(dpkg-divert) commandline tool. It can either create or remove a
+ diversion for a given file, but also update an existing diversion
+ to modify its I(holder) and/or its I(divert) location.
+options:
+ path:
+ description:
+ - The original and absolute path of the file to be diverted or
+ undiverted. This path is unique, i.e. it is not possible to get
+ two diversions for the same I(path).
+ required: true
+ type: path
+ state:
+ description:
+ - When I(state=absent), remove the diversion of the specified
+ I(path); when I(state=present), create the diversion if it does
+ not exist, or update its package I(holder) or I(divert) location,
+ if it already exists.
+ type: str
+ default: present
+ choices: [absent, present]
+ holder:
+ description:
+ - The name of the package whose copy of file is not diverted, also
+ known as the diversion holder or the package the diversion belongs
+ to.
+ - The actual package does not have to be installed or even to exist
+ for its name to be valid. If not specified, the diversion is hold
+ by 'LOCAL', that is reserved by/for dpkg for local diversions.
+ - This parameter is ignored when I(state=absent).
+ type: str
+ divert:
+ description:
+ - The location where the versions of file will be diverted.
+ - Default is to add suffix C(.distrib) to the file path.
+ - This parameter is ignored when I(state=absent).
+ type: path
+ rename:
+ description:
+ - Actually move the file aside (when I(state=present)) or back (when
+ I(state=absent)), but only when changing the state of the diversion.
+ This parameter has no effect when attempting to add a diversion that
+ already exists or when removing an unexisting one.
+ - Unless I(force=true), renaming fails if the destination file already
+ exists (this lock being a dpkg-divert feature, and bypassing it being
+ a module feature).
+ type: bool
+ default: no
+ force:
+ description:
+ - When I(rename=true) and I(force=true), renaming is performed even if
+ the target of the renaming exists, i.e. the existing contents of the
+ file at this location will be lost.
+ - This parameter is ignored when I(rename=false).
+ type: bool
+ default: no
+notes:
+ - This module supports I(check_mode) and I(diff).
+requirements:
+ - dpkg-divert >= 1.15.0 (Debian family)
+'''
+
+EXAMPLES = r'''
+- name: Divert /usr/bin/busybox to /usr/bin/busybox.distrib and keep file in place
+ community.general.dpkg_divert:
+ path: /usr/bin/busybox
+
+- name: Divert /usr/bin/busybox by package 'branding'
+ community.general.dpkg_divert:
+ path: /usr/bin/busybox
+ holder: branding
+
+- name: Divert and rename busybox to busybox.dpkg-divert
+ community.general.dpkg_divert:
+ path: /usr/bin/busybox
+ divert: /usr/bin/busybox.dpkg-divert
+ rename: yes
+
+- name: Remove the busybox diversion and move the diverted file back
+ community.general.dpkg_divert:
+ path: /usr/bin/busybox
+ state: absent
+ rename: yes
+ force: yes
+'''
+
+RETURN = r'''
+commands:
+ description: The dpkg-divert commands ran internally by the module.
+ type: list
+ returned: on_success
+ elements: str
+ sample: |-
+ [
+ "/usr/bin/dpkg-divert --no-rename --remove /etc/foobarrc",
+ "/usr/bin/dpkg-divert --package ansible --no-rename --add /etc/foobarrc"
+ ]
+messages:
+ description: The dpkg-divert relevant messages (stdout or stderr).
+ type: list
+ returned: on_success
+ elements: str
+ sample: |-
+ [
+ "Removing 'local diversion of /etc/foobarrc to /etc/foobarrc.distrib'",
+ "Adding 'diversion of /etc/foobarrc to /etc/foobarrc.distrib by ansible'"
+ ]
+diversion:
+ description: The status of the diversion after task execution.
+ type: dict
+ returned: always
+ contains:
+ divert:
+ description: The location of the diverted file.
+ type: str
+ holder:
+ description: The package holding the diversion.
+ type: str
+ path:
+ description: The path of the file to divert/undivert.
+ type: str
+ state:
+ description: The state of the diversion.
+ type: str
+ sample: |-
+ {
+ "divert": "/etc/foobarrc.distrib",
+ "holder": "LOCAL",
+ "path": "/etc/foobarrc"
+ "state": "present"
+ }
+'''
+
+
+import re
+import os
+from distutils.version import LooseVersion
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes, to_native
+
+
+def diversion_state(module, command, path):
+ diversion = dict(path=path, state='absent', divert=None, holder=None)
+ rc, out, err = module.run_command([command, '--listpackage', path], check_rc=True)
+ if out:
+ diversion['state'] = 'present'
+ diversion['holder'] = out.rstrip()
+ rc, out, err = module.run_command([command, '--truename', path], check_rc=True)
+ diversion['divert'] = out.rstrip()
+ return diversion
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(required=True, type='path'),
+ state=dict(required=False, type='str', default='present', choices=['absent', 'present']),
+ holder=dict(required=False, type='str'),
+ divert=dict(required=False, type='path'),
+ rename=dict(required=False, type='bool', default=False),
+ force=dict(required=False, type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+
+ path = module.params['path']
+ state = module.params['state']
+ holder = module.params['holder']
+ divert = module.params['divert']
+ rename = module.params['rename']
+ force = module.params['force']
+
+ diversion_wanted = dict(path=path, state=state)
+ changed = False
+
+ DPKG_DIVERT = module.get_bin_path('dpkg-divert', required=True)
+ MAINCOMMAND = [DPKG_DIVERT]
+
+ # Option --listpackage is needed and comes with 1.15.0
+ rc, stdout, stderr = module.run_command([DPKG_DIVERT, '--version'], check_rc=True)
+ [current_version] = [x for x in stdout.splitlines()[0].split() if re.match('^[0-9]+[.][0-9]', x)]
+ if LooseVersion(current_version) < LooseVersion("1.15.0"):
+ module.fail_json(msg="Unsupported dpkg version (<1.15.0).")
+ no_rename_is_supported = (LooseVersion(current_version) >= LooseVersion("1.19.1"))
+
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ path_exists = os.path.exists(b_path)
+ # Used for things not doable with a single dpkg-divert command (as forced
+ # renaming of files, and diversion's 'holder' or 'divert' updates).
+ target_exists = False
+ truename_exists = False
+
+ diversion_before = diversion_state(module, DPKG_DIVERT, path)
+ if diversion_before['state'] == 'present':
+ b_divert = to_bytes(diversion_before['divert'], errors='surrogate_or_strict')
+ truename_exists = os.path.exists(b_divert)
+
+ # Append options as requested in the task parameters, but ignore some of
+ # them when removing the diversion.
+ if rename:
+ MAINCOMMAND.append('--rename')
+ elif no_rename_is_supported:
+ MAINCOMMAND.append('--no-rename')
+
+ if state == 'present':
+ if holder and holder != 'LOCAL':
+ MAINCOMMAND.extend(['--package', holder])
+ diversion_wanted['holder'] = holder
+ else:
+ MAINCOMMAND.append('--local')
+ diversion_wanted['holder'] = 'LOCAL'
+
+ if divert:
+ MAINCOMMAND.extend(['--divert', divert])
+ target = divert
+ else:
+ target = '%s.distrib' % path
+
+ MAINCOMMAND.extend(['--add', path])
+ diversion_wanted['divert'] = target
+ b_target = to_bytes(target, errors='surrogate_or_strict')
+ target_exists = os.path.exists(b_target)
+
+ else:
+ MAINCOMMAND.extend(['--remove', path])
+ diversion_wanted['divert'] = None
+ diversion_wanted['holder'] = None
+
+ # Start to populate the returned objects.
+ diversion = diversion_before.copy()
+ maincommand = ' '.join(MAINCOMMAND)
+ commands = [maincommand]
+
+ if module.check_mode or diversion_wanted == diversion_before:
+ MAINCOMMAND.insert(1, '--test')
+ diversion_after = diversion_wanted
+
+ # Just try and see
+ rc, stdout, stderr = module.run_command(MAINCOMMAND)
+
+ if rc == 0:
+ messages = [stdout.rstrip()]
+
+ # else... cases of failure with dpkg-divert are:
+ # - The diversion does not belong to the same package (or LOCAL)
+ # - The divert filename is not the same (e.g. path.distrib != path.divert)
+ # - The renaming is forbidden by dpkg-divert (i.e. both the file and the
+ # diverted file exist)
+
+ elif state != diversion_before['state']:
+ # There should be no case with 'divert' and 'holder' when creating the
+ # diversion from none, and they're ignored when removing the diversion.
+ # So this is all about renaming...
+ if rename and path_exists and (
+ (state == 'absent' and truename_exists) or
+ (state == 'present' and target_exists)):
+ if not force:
+ msg = "Set 'force' param to True to force renaming of files."
+ module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg,
+ stderr=stderr, stdout=stdout, diversion=diversion)
+ else:
+ msg = "Unexpected error while changing state of the diversion."
+ module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg,
+ stderr=stderr, stdout=stdout, diversion=diversion)
+
+ to_remove = path
+ if state == 'present':
+ to_remove = target
+
+ if not module.check_mode:
+ try:
+ b_remove = to_bytes(to_remove, errors='surrogate_or_strict')
+ os.unlink(b_remove)
+ except OSError as e:
+ msg = 'Failed to remove %s: %s' % (to_remove, to_native(e))
+ module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg,
+ stderr=stderr, stdout=stdout, diversion=diversion)
+ rc, stdout, stderr = module.run_command(MAINCOMMAND, check_rc=True)
+
+ messages = [stdout.rstrip()]
+
+ # The situation is that we want to modify the settings (holder or divert)
+ # of an existing diversion. dpkg-divert does not handle this, and we have
+ # to remove the existing diversion first, and then set a new one.
+ else:
+ RMDIVERSION = [DPKG_DIVERT, '--remove', path]
+ if no_rename_is_supported:
+ RMDIVERSION.insert(1, '--no-rename')
+ rmdiversion = ' '.join(RMDIVERSION)
+
+ if module.check_mode:
+ RMDIVERSION.insert(1, '--test')
+
+ if rename:
+ MAINCOMMAND.remove('--rename')
+ if no_rename_is_supported:
+ MAINCOMMAND.insert(1, '--no-rename')
+ maincommand = ' '.join(MAINCOMMAND)
+
+ commands = [rmdiversion, maincommand]
+ rc, rmdout, rmderr = module.run_command(RMDIVERSION, check_rc=True)
+
+ if module.check_mode:
+ messages = [rmdout.rstrip(), 'Running in check mode']
+ else:
+ rc, stdout, stderr = module.run_command(MAINCOMMAND, check_rc=True)
+ messages = [rmdout.rstrip(), stdout.rstrip()]
+
+ # Avoid if possible to orphan files (i.e. to dereference them in diversion
+ # database but let them in place), but do not make renaming issues fatal.
+ # BTW, this module is not about state of files involved in the diversion.
+ old = diversion_before['divert']
+ new = diversion_wanted['divert']
+ if new != old:
+ b_old = to_bytes(old, errors='surrogate_or_strict')
+ b_new = to_bytes(new, errors='surrogate_or_strict')
+ if os.path.exists(b_old) and not os.path.exists(b_new):
+ try:
+ os.rename(b_old, b_new)
+ except OSError as e:
+ pass
+
+ if not module.check_mode:
+ diversion_after = diversion_state(module, DPKG_DIVERT, path)
+
+ diversion = diversion_after.copy()
+ diff = dict()
+ if module._diff:
+ diff['before'] = diversion_before
+ diff['after'] = diversion_after
+
+ if diversion_after != diversion_before:
+ changed = True
+
+ if diversion_after == diversion_wanted:
+ module.exit_json(changed=changed, diversion=diversion,
+ commands=commands, messages=messages, diff=diff)
+ else:
+ msg = "Unexpected error: see stdout and stderr for details."
+ module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg,
+ stderr=stderr, stdout=stdout, diversion=diversion)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/facter.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/facter.py
new file mode 100644
index 00000000..abd2ebc3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/facter.py
@@ -0,0 +1,72 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: facter
+short_description: Runs the discovery program I(facter) on the remote system
+description:
+ - Runs the C(facter) discovery program
+ (U(https://github.com/puppetlabs/facter)) on the remote system, returning
+ JSON data that can be useful for inventory purposes.
+options:
+ arguments:
+ description:
+ - Specifies arguments for facter.
+ type: list
+ elements: str
+requirements:
+ - facter
+ - ruby-json
+author:
+ - Ansible Core Team
+ - Michael DeHaan
+'''
+
+EXAMPLES = '''
+# Example command-line invocation
+# ansible www.example.net -m facter
+
+- name: Execute facter no arguments
+ community.general.facter:
+
+- name: Execute facter with arguments
+ community.general.facter:
+ arguments:
+ - -p
+ - system_uptime
+ - timezone
+ - is_virtual
+'''
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ arguments=dict(required=False, type='list', elements='str')
+ )
+ )
+
+ facter_path = module.get_bin_path(
+ 'facter',
+ opt_dirs=['/opt/puppetlabs/bin'])
+
+ cmd = [facter_path, "--json"]
+ if module.params['arguments']:
+ cmd += module.params['arguments']
+
+ rc, out, err = module.run_command(cmd, check_rc=True)
+ module.exit_json(**json.loads(out))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/filesystem.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/filesystem.py
new file mode 100644
index 00000000..e78eec4e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/filesystem.py
@@ -0,0 +1,496 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Alexander Bulimov <lazywolf0@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+author:
+- Alexander Bulimov (@abulimov)
+module: filesystem
+short_description: Makes a filesystem
+description:
+ - This module creates a filesystem.
+options:
+ state:
+ description:
+ - If C(state=present), the filesystem is created if it doesn't already
+ exist, that is the default behaviour if I(state) is omitted.
+ - If C(state=absent), filesystem signatures on I(dev) are wiped if it
+ contains a filesystem (as known by C(blkid)).
+ - When C(state=absent), all other options but I(dev) are ignored, and the
+ module doesn't fail if the device I(dev) doesn't actually exist.
+ - C(state=absent) is not supported and will fail on FreeBSD systems.
+ type: str
+ choices: [ present, absent ]
+ default: present
+ version_added: 1.3.0
+ fstype:
+ choices: [ btrfs, ext2, ext3, ext4, ext4dev, f2fs, lvm, ocfs2, reiserfs, xfs, vfat, swap ]
+ description:
+ - Filesystem type to be created. This option is required with
+ C(state=present) (or if I(state) is omitted).
+ - reiserfs support was added in 2.2.
+ - lvm support was added in 2.5.
+ - since 2.5, I(dev) can be an image file.
+ - vfat support was added in 2.5
+ - ocfs2 support was added in 2.6
+ - f2fs support was added in 2.7
+ - swap support was added in 2.8
+ type: str
+ aliases: [type]
+ dev:
+ description:
+ - Target path to device or image file.
+ type: path
+ required: yes
+ aliases: [device]
+ force:
+ description:
+ - If C(yes), allows to create new filesystem on devices that already has filesystem.
+ type: bool
+ default: 'no'
+ resizefs:
+ description:
+ - If C(yes), if the block device and filesystem size differ, grow the filesystem into the space.
+ - Supported for C(ext2), C(ext3), C(ext4), C(ext4dev), C(f2fs), C(lvm), C(xfs), C(vfat), C(swap) filesystems.
+ - XFS Will only grow if mounted. Currently, the module is based on commands
+ from C(util-linux) package to perform operations, so resizing of XFS is
+ not supported on FreeBSD systems.
+ - vFAT will likely fail if fatresize < 1.04.
+ type: bool
+ default: 'no'
+ opts:
+ description:
+ - List of options to be passed to mkfs command.
+ type: str
+requirements:
+ - Uses tools related to the I(fstype) (C(mkfs)) and C(blkid) command. When I(resizefs) is enabled, C(blockdev) command is required too.
+notes:
+ - Potential filesystem on I(dev) are checked using C(blkid), in case C(blkid) isn't able to detect an existing filesystem,
+ this filesystem is overwritten even if I(force) is C(no).
+ - This module supports I(check_mode).
+'''
+
+EXAMPLES = '''
+- name: Create a ext2 filesystem on /dev/sdb1
+ community.general.filesystem:
+ fstype: ext2
+ dev: /dev/sdb1
+
+- name: Create a ext4 filesystem on /dev/sdb1 and check disk blocks
+ community.general.filesystem:
+ fstype: ext4
+ dev: /dev/sdb1
+ opts: -cc
+
+- name: Blank filesystem signature on /dev/sdb1
+ community.general.filesystem:
+ dev: /dev/sdb1
+ state: absent
+'''
+
+from distutils.version import LooseVersion
+import os
+import platform
+import re
+import stat
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Device(object):
+ def __init__(self, module, path):
+ self.module = module
+ self.path = path
+
+ def size(self):
+ """ Return size in bytes of device. Returns int """
+ statinfo = os.stat(self.path)
+ if stat.S_ISBLK(statinfo.st_mode):
+ blockdev_cmd = self.module.get_bin_path("blockdev", required=True)
+ _, devsize_in_bytes, _ = self.module.run_command([blockdev_cmd, "--getsize64", self.path], check_rc=True)
+ return int(devsize_in_bytes)
+ elif os.path.isfile(self.path):
+ return os.path.getsize(self.path)
+ else:
+ self.module.fail_json(changed=False, msg="Target device not supported: %s" % self)
+
+ def get_mountpoint(self):
+ """Return (first) mountpoint of device. Returns None when not mounted."""
+ cmd_findmnt = self.module.get_bin_path("findmnt", required=True)
+
+ # find mountpoint
+ rc, mountpoint, _ = self.module.run_command([cmd_findmnt, "--mtab", "--noheadings", "--output",
+ "TARGET", "--source", self.path], check_rc=False)
+ if rc != 0:
+ mountpoint = None
+ else:
+ mountpoint = mountpoint.split('\n')[0]
+
+ return mountpoint
+
+ def __str__(self):
+ return self.path
+
+
+class Filesystem(object):
+
+ GROW = None
+ MKFS = None
+ MKFS_FORCE_FLAGS = ''
+
+ LANG_ENV = {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C'}
+
+ def __init__(self, module):
+ self.module = module
+
+ @property
+ def fstype(self):
+ return type(self).__name__
+
+ def get_fs_size(self, dev):
+ """ Return size in bytes of filesystem on device. Returns int """
+ raise NotImplementedError()
+
+ def create(self, opts, dev):
+ if self.module.check_mode:
+ return
+
+ mkfs = self.module.get_bin_path(self.MKFS, required=True)
+ if opts is None:
+ cmd = "%s %s '%s'" % (mkfs, self.MKFS_FORCE_FLAGS, dev)
+ else:
+ cmd = "%s %s %s '%s'" % (mkfs, self.MKFS_FORCE_FLAGS, opts, dev)
+ self.module.run_command(cmd, check_rc=True)
+
+ def wipefs(self, dev):
+ if platform.system() == 'FreeBSD':
+ msg = "module param state=absent is currently not supported on this OS (FreeBSD)."
+ self.module.fail_json(msg=msg)
+
+ if self.module.check_mode:
+ return
+
+ # wipefs comes with util-linux package (as 'blockdev' & 'findmnt' above)
+ # so it is not supported on FreeBSD. Even the use of dd as a fallback is
+ # not doable here if it needs get_mountpoint() (to prevent corruption of
+ # a mounted filesystem), since 'findmnt' is not available on FreeBSD.
+ wipefs = self.module.get_bin_path('wipefs', required=True)
+ cmd = [wipefs, "--all", dev.__str__()]
+ self.module.run_command(cmd, check_rc=True)
+
+ def grow_cmd(self, dev):
+ cmd = self.module.get_bin_path(self.GROW, required=True)
+ return [cmd, str(dev)]
+
+ def grow(self, dev):
+ """Get dev and fs size and compare. Returns stdout of used command."""
+ devsize_in_bytes = dev.size()
+
+ try:
+ fssize_in_bytes = self.get_fs_size(dev)
+ except NotImplementedError:
+ self.module.fail_json(changed=False, msg="module does not support resizing %s filesystem yet." % self.fstype)
+
+ if not fssize_in_bytes < devsize_in_bytes:
+ self.module.exit_json(changed=False, msg="%s filesystem is using the whole device %s" % (self.fstype, dev))
+ elif self.module.check_mode:
+ self.module.exit_json(changed=True, msg="Resizing filesystem %s on device %s" % (self.fstype, dev))
+ else:
+ _, out, _ = self.module.run_command(self.grow_cmd(dev), check_rc=True)
+ return out
+
+
+class Ext(Filesystem):
+ MKFS_FORCE_FLAGS = '-F'
+ GROW = 'resize2fs'
+
+ def get_fs_size(self, dev):
+ cmd = self.module.get_bin_path('tune2fs', required=True)
+ # Get Block count and Block size
+ _, size, _ = self.module.run_command([cmd, '-l', str(dev)], check_rc=True, environ_update=self.LANG_ENV)
+ for line in size.splitlines():
+ if 'Block count:' in line:
+ block_count = int(line.split(':')[1].strip())
+ elif 'Block size:' in line:
+ block_size = int(line.split(':')[1].strip())
+ return block_size * block_count
+
+
+class Ext2(Ext):
+ MKFS = 'mkfs.ext2'
+
+
+class Ext3(Ext):
+ MKFS = 'mkfs.ext3'
+
+
+class Ext4(Ext):
+ MKFS = 'mkfs.ext4'
+
+
+class XFS(Filesystem):
+ MKFS = 'mkfs.xfs'
+ MKFS_FORCE_FLAGS = '-f'
+ GROW = 'xfs_growfs'
+
+ def get_fs_size(self, dev):
+ cmd = self.module.get_bin_path('xfs_info', required=True)
+
+ mountpoint = dev.get_mountpoint()
+ if mountpoint:
+ rc, out, err = self.module.run_command([cmd, str(mountpoint)], environ_update=self.LANG_ENV)
+ else:
+ # Recent GNU/Linux distros support access to unmounted XFS filesystems
+ rc, out, err = self.module.run_command([cmd, str(dev)], environ_update=self.LANG_ENV)
+ if rc != 0:
+ self.module.fail_json(msg="Error while attempting to query size of XFS filesystem: %s" % err)
+
+ for line in out.splitlines():
+ col = line.split('=')
+ if col[0].strip() == 'data':
+ if col[1].strip() != 'bsize':
+ self.module.fail_json(msg='Unexpected output format from xfs_info (could not locate "bsize")')
+ if col[2].split()[1] != 'blocks':
+ self.module.fail_json(msg='Unexpected output format from xfs_info (could not locate "blocks")')
+ block_size = int(col[2].split()[0])
+ block_count = int(col[3].split(',')[0])
+ return block_size * block_count
+
+ def grow_cmd(self, dev):
+ # Check first if growing is needed, and then if it is doable or not.
+ devsize_in_bytes = dev.size()
+ fssize_in_bytes = self.get_fs_size(dev)
+ if not fssize_in_bytes < devsize_in_bytes:
+ self.module.exit_json(changed=False, msg="%s filesystem is using the whole device %s" % (self.fstype, dev))
+
+ mountpoint = dev.get_mountpoint()
+ if not mountpoint:
+ # xfs filesystem needs to be mounted
+ self.module.fail_json(msg="%s needs to be mounted for xfs operations" % dev)
+
+ cmd = self.module.get_bin_path(self.GROW, required=True)
+
+ return [cmd, str(mountpoint)]
+
+
+class Reiserfs(Filesystem):
+ MKFS = 'mkfs.reiserfs'
+ MKFS_FORCE_FLAGS = '-f'
+
+
+class Btrfs(Filesystem):
+ MKFS = 'mkfs.btrfs'
+
+ def __init__(self, module):
+ super(Btrfs, self).__init__(module)
+ _, stdout, stderr = self.module.run_command('%s --version' % self.MKFS, check_rc=True)
+ match = re.search(r" v([0-9.]+)", stdout)
+ if not match:
+ # v0.20-rc1 use stderr
+ match = re.search(r" v([0-9.]+)", stderr)
+ if match:
+ # v0.20-rc1 doesn't have --force parameter added in following version v3.12
+ if LooseVersion(match.group(1)) >= LooseVersion('3.12'):
+ self.MKFS_FORCE_FLAGS = '-f'
+ else:
+ self.MKFS_FORCE_FLAGS = ''
+ else:
+ # assume version is greater or equal to 3.12
+ self.MKFS_FORCE_FLAGS = '-f'
+ self.module.warn('Unable to identify mkfs.btrfs version (%r, %r)' % (stdout, stderr))
+
+
+class Ocfs2(Filesystem):
+ MKFS = 'mkfs.ocfs2'
+ MKFS_FORCE_FLAGS = '-Fx'
+
+
+class F2fs(Filesystem):
+ MKFS = 'mkfs.f2fs'
+ GROW = 'resize.f2fs'
+
+ @property
+ def MKFS_FORCE_FLAGS(self):
+ mkfs = self.module.get_bin_path(self.MKFS, required=True)
+ cmd = "%s %s" % (mkfs, os.devnull)
+ _, out, _ = self.module.run_command(cmd, check_rc=False, environ_update=self.LANG_ENV)
+ # Looking for " F2FS-tools: mkfs.f2fs Ver: 1.10.0 (2018-01-30)"
+ # mkfs.f2fs displays version since v1.2.0
+ match = re.search(r"F2FS-tools: mkfs.f2fs Ver: ([0-9.]+) \(", out)
+ if match is not None:
+ # Since 1.9.0, mkfs.f2fs check overwrite before make filesystem
+ # before that version -f switch wasn't used
+ if LooseVersion(match.group(1)) >= LooseVersion('1.9.0'):
+ return '-f'
+
+ return ''
+
+ def get_fs_size(self, dev):
+ cmd = self.module.get_bin_path('dump.f2fs', required=True)
+ # Get sector count and sector size
+ _, dump, _ = self.module.run_command([cmd, str(dev)], check_rc=True, environ_update=self.LANG_ENV)
+ sector_size = None
+ sector_count = None
+ for line in dump.splitlines():
+ if 'Info: sector size = ' in line:
+ # expected: 'Info: sector size = 512'
+ sector_size = int(line.split()[4])
+ elif 'Info: total FS sectors = ' in line:
+ # expected: 'Info: total FS sectors = 102400 (50 MB)'
+ sector_count = int(line.split()[5])
+
+ if None not in (sector_size, sector_count):
+ break
+ else:
+ self.module.warn("Unable to process dump.f2fs output '%s'", '\n'.join(dump))
+ self.module.fail_json(msg="Unable to process dump.f2fs output for %s" % dev)
+
+ return sector_size * sector_count
+
+
+class VFAT(Filesystem):
+ if platform.system() == 'FreeBSD':
+ MKFS = "newfs_msdos"
+ else:
+ MKFS = 'mkfs.vfat'
+ GROW = 'fatresize'
+
+ def get_fs_size(self, dev):
+ cmd = self.module.get_bin_path(self.GROW, required=True)
+ _, output, _ = self.module.run_command([cmd, '--info', str(dev)], check_rc=True, environ_update=self.LANG_ENV)
+ for line in output.splitlines()[1:]:
+ param, value = line.split(':', 1)
+ if param.strip() == 'Size':
+ return int(value.strip())
+ self.module.fail_json(msg="fatresize failed to provide filesystem size for %s" % dev)
+
+ def grow_cmd(self, dev):
+ cmd = self.module.get_bin_path(self.GROW)
+ return [cmd, "-s", str(dev.size()), str(dev.path)]
+
+
+class LVM(Filesystem):
+ MKFS = 'pvcreate'
+ MKFS_FORCE_FLAGS = '-f'
+ GROW = 'pvresize'
+
+ def get_fs_size(self, dev):
+ cmd = self.module.get_bin_path('pvs', required=True)
+ _, size, _ = self.module.run_command([cmd, '--noheadings', '-o', 'pv_size', '--units', 'b', '--nosuffix', str(dev)], check_rc=True)
+ block_count = int(size)
+ return block_count
+
+
+class Swap(Filesystem):
+ MKFS = 'mkswap'
+ MKFS_FORCE_FLAGS = '-f'
+
+
+FILESYSTEMS = {
+ 'ext2': Ext2,
+ 'ext3': Ext3,
+ 'ext4': Ext4,
+ 'ext4dev': Ext4,
+ 'f2fs': F2fs,
+ 'reiserfs': Reiserfs,
+ 'xfs': XFS,
+ 'btrfs': Btrfs,
+ 'vfat': VFAT,
+ 'ocfs2': Ocfs2,
+ 'LVM2_member': LVM,
+ 'swap': Swap,
+}
+
+
+def main():
+ friendly_names = {
+ 'lvm': 'LVM2_member',
+ }
+
+ fstypes = set(FILESYSTEMS.keys()) - set(friendly_names.values()) | set(friendly_names.keys())
+
+ # There is no "single command" to manipulate filesystems, so we map them all out and their options
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ fstype=dict(type='str', aliases=['type'], choices=list(fstypes)),
+ dev=dict(type='path', required=True, aliases=['device']),
+ opts=dict(type='str'),
+ force=dict(type='bool', default=False),
+ resizefs=dict(type='bool', default=False),
+ ),
+ required_if=[
+ ('state', 'present', ['fstype'])
+ ],
+ supports_check_mode=True,
+ )
+
+ state = module.params['state']
+ dev = module.params['dev']
+ fstype = module.params['fstype']
+ opts = module.params['opts']
+ force = module.params['force']
+ resizefs = module.params['resizefs']
+
+ changed = False
+
+ if not os.path.exists(dev):
+ msg = "Device %s not found." % dev
+ if state == "present":
+ module.fail_json(msg=msg)
+ else:
+ module.exit_json(msg=msg)
+
+ dev = Device(module, dev)
+
+ cmd = module.get_bin_path('blkid', required=True)
+ rc, raw_fs, err = module.run_command("%s -c /dev/null -o value -s TYPE %s" % (cmd, dev))
+ # In case blkid isn't able to identify an existing filesystem, device is considered as empty,
+ # then this existing filesystem would be overwritten even if force isn't enabled.
+ fs = raw_fs.strip()
+
+ if state == "present":
+ if fstype in friendly_names:
+ fstype = friendly_names[fstype]
+
+ try:
+ klass = FILESYSTEMS[fstype]
+ except KeyError:
+ module.fail_json(changed=False, msg="module does not support this filesystem (%s) yet." % fstype)
+
+ filesystem = klass(module)
+
+ same_fs = fs and FILESYSTEMS.get(fs) == FILESYSTEMS[fstype]
+ if same_fs and not resizefs and not force:
+ module.exit_json(changed=False)
+ elif same_fs and resizefs:
+ if not filesystem.GROW:
+ module.fail_json(changed=False, msg="module does not support resizing %s filesystem yet." % fstype)
+
+ out = filesystem.grow(dev)
+
+ module.exit_json(changed=True, msg=out)
+ elif fs and not force:
+ module.fail_json(msg="'%s' is already used as %s, use force=yes to overwrite" % (dev, fs), rc=rc, err=err)
+
+ # create fs
+ filesystem.create(opts, dev)
+ changed = True
+
+ elif fs:
+ # wipe fs signatures
+ filesystem = Filesystem(module)
+ filesystem.wipefs(dev)
+ changed = True
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/gconftool2.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/gconftool2.py
new file mode 100644
index 00000000..b1df1da8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/gconftool2.py
@@ -0,0 +1,233 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Kenneth D. Evensen <kevensen@redhat.com>
+# Copyright: (c) 2017, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: gconftool2
+author:
+ - Kenneth D. Evensen (@kevensen)
+short_description: Edit GNOME Configurations
+description:
+ - This module allows for the manipulation of GNOME 2 Configuration via
+ gconftool-2. Please see the gconftool-2(1) man pages for more details.
+options:
+ key:
+ type: str
+ description:
+ - A GConf preference key is an element in the GConf repository
+ that corresponds to an application preference. See man gconftool-2(1)
+ required: yes
+ value:
+ type: str
+ description:
+ - Preference keys typically have simple values such as strings,
+ integers, or lists of strings and integers. This is ignored if the state
+ is "get". See man gconftool-2(1)
+ value_type:
+ type: str
+ description:
+ - The type of value being set. This is ignored if the state is "get".
+ choices: [ bool, float, int, string ]
+ state:
+ type: str
+ description:
+ - The action to take upon the key/value.
+ required: yes
+ choices: [ absent, get, present ]
+ config_source:
+ type: str
+ description:
+ - Specify a configuration source to use rather than the default path.
+ See man gconftool-2(1)
+ direct:
+ description:
+ - Access the config database directly, bypassing server. If direct is
+ specified then the config_source must be specified as well.
+ See man gconftool-2(1)
+ type: bool
+ default: 'no'
+'''
+
+EXAMPLES = """
+- name: Change the widget font to "Serif 12"
+ community.general.gconftool2:
+ key: "/desktop/gnome/interface/font_name"
+ value_type: "string"
+ value: "Serif 12"
+"""
+
+RETURN = '''
+ key:
+ description: The key specified in the module parameters
+ returned: success
+ type: str
+ sample: /desktop/gnome/interface/font_name
+ value_type:
+ description: The type of the value that was changed
+ returned: success
+ type: str
+ sample: string
+ value:
+ description: The value of the preference key after executing the module
+ returned: success
+ type: str
+ sample: "Serif 12"
+...
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class GConf2Preference(object):
+ def __init__(self, ansible, key, value_type, value,
+ direct=False, config_source=""):
+ self.ansible = ansible
+ self.key = key
+ self.value_type = value_type
+ self.value = value
+ self.config_source = config_source
+ self.direct = direct
+
+ def value_already_set(self):
+ return False
+
+ def call(self, call_type, fail_onerr=True):
+ """ Helper function to perform gconftool-2 operations """
+ config_source = ''
+ direct = ''
+ changed = False
+ out = ''
+
+ # If the configuration source is different from the default, create
+ # the argument
+ if self.config_source is not None and len(self.config_source) > 0:
+ config_source = "--config-source " + self.config_source
+
+ # If direct is true, create the argument
+ if self.direct:
+ direct = "--direct"
+
+ # Execute the call
+ cmd = "gconftool-2 "
+ try:
+ # If the call is "get", then we don't need as many parameters and
+ # we can ignore some
+ if call_type == 'get':
+ cmd += "--get {0}".format(self.key)
+ # Otherwise, we will use all relevant parameters
+ elif call_type == 'set':
+ cmd += "{0} {1} --type {2} --{3} {4} \"{5}\"".format(direct,
+ config_source,
+ self.value_type,
+ call_type,
+ self.key,
+ self.value)
+ elif call_type == 'unset':
+ cmd += "--unset {0}".format(self.key)
+
+ # Start external command
+ rc, out, err = self.ansible.run_command(cmd, use_unsafe_shell=True)
+
+ if len(err) > 0:
+ if fail_onerr:
+ self.ansible.fail_json(msg='gconftool-2 failed with '
+ 'error: %s' % (str(err)))
+ else:
+ changed = True
+
+ except OSError as exception:
+ self.ansible.fail_json(msg='gconftool-2 failed with exception: '
+ '%s' % exception)
+ return changed, out.rstrip()
+
+
+def main():
+ # Setup the Ansible module
+ module = AnsibleModule(
+ argument_spec=dict(
+ key=dict(type='str', required=True),
+ value_type=dict(type='str', choices=['bool', 'float', 'int', 'string']),
+ value=dict(type='str'),
+ state=dict(type='str', required=True, choices=['absent', 'get', 'present']),
+ direct=dict(type='bool', default=False),
+ config_source=dict(type='str'),
+ ),
+ supports_check_mode=True
+ )
+
+ state_values = {"present": "set", "absent": "unset", "get": "get"}
+
+ # Assign module values to dictionary values
+ key = module.params['key']
+ value_type = module.params['value_type']
+ if module.params['value'].lower() == "true":
+ value = "true"
+ elif module.params['value'] == "false":
+ value = "false"
+ else:
+ value = module.params['value']
+
+ state = state_values[module.params['state']]
+ direct = module.params['direct']
+ config_source = module.params['config_source']
+
+ # Initialize some variables for later
+ change = False
+ new_value = ''
+
+ if state != "get":
+ if value is None or value == "":
+ module.fail_json(msg='State %s requires "value" to be set'
+ % str(state))
+ elif value_type is None or value_type == "":
+ module.fail_json(msg='State %s requires "value_type" to be set'
+ % str(state))
+
+ if direct and config_source is None:
+ module.fail_json(msg='If "direct" is "yes" then the ' +
+ '"config_source" must be specified')
+ elif not direct and config_source is not None:
+ module.fail_json(msg='If the "config_source" is specified ' +
+ 'then "direct" must be "yes"')
+
+ # Create a gconf2 preference
+ gconf_pref = GConf2Preference(module, key, value_type,
+ value, direct, config_source)
+ # Now we get the current value, if not found don't fail
+ _, current_value = gconf_pref.call("get", fail_onerr=False)
+
+ # Check if the current value equals the value we want to set. If not, make
+ # a change
+ if current_value != value:
+ # If check mode, we know a change would have occurred.
+ if module.check_mode:
+ # So we will set the change to True
+ change = True
+ # And set the new_value to the value that would have been set
+ new_value = value
+ # If not check mode make the change.
+ else:
+ change, new_value = gconf_pref.call(state)
+ # If the value we want to set is the same as the current_value, we will
+ # set the new_value to the current_value for reporting
+ else:
+ new_value = current_value
+
+ facts = dict(gconftool2={'changed': change,
+ 'key': key,
+ 'value_type': value_type,
+ 'new_value': new_value,
+ 'previous_value': current_value,
+ 'playbook_value': module.params['value']})
+
+ module.exit_json(changed=change, ansible_facts=facts)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/interfaces_file.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/interfaces_file.py
new file mode 100644
index 00000000..d1e37573
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/interfaces_file.py
@@ -0,0 +1,399 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2016, Roman Belyakovsky <ihryamzik () gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: interfaces_file
+short_description: Tweak settings in /etc/network/interfaces files
+extends_documentation_fragment: files
+description:
+ - Manage (add, remove, change) individual interface options in an interfaces-style file without having
+ to manage the file as a whole with, say, M(ansible.builtin.template) or M(ansible.builtin.assemble). Interface has to be presented in a file.
+ - Read information about interfaces from interfaces-styled files
+options:
+ dest:
+ type: path
+ description:
+ - Path to the interfaces file
+ default: /etc/network/interfaces
+ iface:
+ type: str
+ description:
+ - Name of the interface, required for value changes or option remove
+ address_family:
+ type: str
+ description:
+ - Address family of the interface, useful if same interface name is used for both inet and inet6
+ option:
+ type: str
+ description:
+ - Name of the option, required for value changes or option remove
+ value:
+ type: str
+ description:
+ - If I(option) is not presented for the I(interface) and I(state) is C(present) option will be added.
+ If I(option) already exists and is not C(pre-up), C(up), C(post-up) or C(down), it's value will be updated.
+ C(pre-up), C(up), C(post-up) and C(down) options can't be updated, only adding new options, removing existing
+ ones or cleaning the whole option set are supported
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can get
+ the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: 'no'
+ state:
+ type: str
+ description:
+ - If set to C(absent) the option or section will be removed if present instead of created.
+ default: "present"
+ choices: [ "present", "absent" ]
+
+notes:
+ - If option is defined multiple times last one will be updated but all will be deleted in case of an absent state
+requirements: []
+author: "Roman Belyakovsky (@hryamzik)"
+'''
+
+RETURN = '''
+dest:
+ description: destination file/path
+ returned: success
+ type: str
+ sample: "/etc/network/interfaces"
+ifaces:
+ description: interfaces dictionary
+ returned: success
+ type: complex
+ contains:
+ ifaces:
+ description: interface dictionary
+ returned: success
+ type: dict
+ contains:
+ eth0:
+ description: Name of the interface
+ returned: success
+ type: dict
+ contains:
+ address_family:
+ description: interface address family
+ returned: success
+ type: str
+ sample: "inet"
+ method:
+ description: interface method
+ returned: success
+ type: str
+ sample: "manual"
+ mtu:
+ description: other options, all values returned as strings
+ returned: success
+ type: str
+ sample: "1500"
+ pre-up:
+ description: list of C(pre-up) scripts
+ returned: success
+ type: list
+ sample:
+ - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1"
+ - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2"
+ up:
+ description: list of C(up) scripts
+ returned: success
+ type: list
+ sample:
+ - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1"
+ - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2"
+ post-up:
+ description: list of C(post-up) scripts
+ returned: success
+ type: list
+ sample:
+ - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1"
+ - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2"
+ down:
+ description: list of C(down) scripts
+ returned: success
+ type: list
+ sample:
+ - "route del -net 10.10.10.0/24 gw 10.10.10.1 dev eth1"
+ - "route del -net 10.10.11.0/24 gw 10.10.11.1 dev eth2"
+...
+'''
+
+EXAMPLES = '''
+- name: Set eth1 mtu configuration value to 8000
+ community.general.interfaces_file:
+ dest: /etc/network/interfaces.d/eth1.cfg
+ iface: eth1
+ option: mtu
+ value: 8000
+ backup: yes
+ state: present
+ register: eth1_cfg
+'''
+
+import os
+import re
+import tempfile
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes
+
+
+def lineDict(line):
+ return {'line': line, 'line_type': 'unknown'}
+
+
+def optionDict(line, iface, option, value, address_family):
+ return {'line': line, 'iface': iface, 'option': option, 'value': value, 'line_type': 'option', 'address_family': address_family}
+
+
+def getValueFromLine(s):
+ spaceRe = re.compile(r'\s+')
+ for m in spaceRe.finditer(s):
+ pass
+ valueEnd = m.start()
+ option = s.split()[0]
+ optionStart = s.find(option)
+ optionLen = len(option)
+ valueStart = re.search(r'\s', s[optionLen + optionStart:]).end() + optionLen + optionStart
+ return s[valueStart:valueEnd]
+
+
+def read_interfaces_file(module, filename):
+ f = open(filename, 'r')
+ return read_interfaces_lines(module, f)
+
+
+def read_interfaces_lines(module, line_strings):
+ lines = []
+ ifaces = {}
+ currently_processing = None
+ i = 0
+ for line in line_strings:
+ i += 1
+ words = line.split()
+ if len(words) < 1:
+ lines.append(lineDict(line))
+ continue
+ if words[0][0] == "#":
+ lines.append(lineDict(line))
+ continue
+ if words[0] == "mapping":
+ # currmap = calloc(1, sizeof *currmap);
+ lines.append(lineDict(line))
+ currently_processing = "MAPPING"
+ elif words[0] == "source":
+ lines.append(lineDict(line))
+ currently_processing = "NONE"
+ elif words[0] == "source-dir":
+ lines.append(lineDict(line))
+ currently_processing = "NONE"
+ elif words[0] == "source-directory":
+ lines.append(lineDict(line))
+ currently_processing = "NONE"
+ elif words[0] == "iface":
+ currif = {
+ "pre-up": [],
+ "up": [],
+ "down": [],
+ "post-up": []
+ }
+ iface_name = words[1]
+ try:
+ currif['address_family'] = words[2]
+ except IndexError:
+ currif['address_family'] = None
+ address_family = currif['address_family']
+ try:
+ currif['method'] = words[3]
+ except IndexError:
+ currif['method'] = None
+
+ ifaces[iface_name] = currif
+ lines.append({'line': line, 'iface': iface_name, 'line_type': 'iface', 'params': currif, 'address_family': address_family})
+ currently_processing = "IFACE"
+ elif words[0] == "auto":
+ lines.append(lineDict(line))
+ currently_processing = "NONE"
+ elif words[0].startswith("allow-"):
+ lines.append(lineDict(line))
+ currently_processing = "NONE"
+ elif words[0] == "no-auto-down":
+ lines.append(lineDict(line))
+ currently_processing = "NONE"
+ elif words[0] == "no-scripts":
+ lines.append(lineDict(line))
+ currently_processing = "NONE"
+ else:
+ if currently_processing == "IFACE":
+ option_name = words[0]
+ # TODO: if option_name in currif.options
+ value = getValueFromLine(line)
+ lines.append(optionDict(line, iface_name, option_name, value, address_family))
+ if option_name in ["pre-up", "up", "down", "post-up"]:
+ currif[option_name].append(value)
+ else:
+ currif[option_name] = value
+ elif currently_processing == "MAPPING":
+ lines.append(lineDict(line))
+ elif currently_processing == "NONE":
+ lines.append(lineDict(line))
+ else:
+ module.fail_json(msg="misplaced option %s in line %d" % (line, i))
+ return None, None
+ return lines, ifaces
+
+
+def setInterfaceOption(module, lines, iface, option, raw_value, state, address_family=None):
+ value = str(raw_value)
+ changed = False
+
+ iface_lines = [item for item in lines if "iface" in item and item["iface"] == iface]
+ if address_family is not None:
+ iface_lines = [item for item in iface_lines
+ if "address_family" in item and item["address_family"] == address_family]
+
+ if len(iface_lines) < 1:
+ # interface not found
+ module.fail_json(msg="Error: interface %s not found" % iface)
+ return changed, None
+
+ iface_options = list(filter(lambda i: i['line_type'] == 'option', iface_lines))
+ target_options = list(filter(lambda i: i['option'] == option, iface_options))
+
+ if state == "present":
+ if len(target_options) < 1:
+ changed = True
+ # add new option
+ last_line_dict = iface_lines[-1]
+ changed, lines = addOptionAfterLine(option, value, iface, lines, last_line_dict, iface_options, address_family)
+ else:
+ if option in ["pre-up", "up", "down", "post-up"]:
+ if len(list(filter(lambda i: i['value'] == value, target_options))) < 1:
+ changed, lines = addOptionAfterLine(option, value, iface, lines, target_options[-1], iface_options, address_family)
+ else:
+ # if more than one option found edit the last one
+ if target_options[-1]['value'] != value:
+ changed = True
+ target_option = target_options[-1]
+ old_line = target_option['line']
+ old_value = target_option['value']
+ address_family = target_option['address_family']
+ prefix_start = old_line.find(option)
+ optionLen = len(option)
+ old_value_position = re.search(r"\s+".join(map(re.escape, old_value.split())), old_line[prefix_start + optionLen:])
+ start = old_value_position.start() + prefix_start + optionLen
+ end = old_value_position.end() + prefix_start + optionLen
+ line = old_line[:start] + value + old_line[end:]
+ index = len(lines) - lines[::-1].index(target_option) - 1
+ lines[index] = optionDict(line, iface, option, value, address_family)
+ elif state == "absent":
+ if len(target_options) >= 1:
+ if option in ["pre-up", "up", "down", "post-up"] and value is not None and value != "None":
+ for target_option in filter(lambda i: i['value'] == value, target_options):
+ changed = True
+ lines = list(filter(lambda ln: ln != target_option, lines))
+ else:
+ changed = True
+ for target_option in target_options:
+ lines = list(filter(lambda ln: ln != target_option, lines))
+ else:
+ module.fail_json(msg="Error: unsupported state %s, has to be either present or absent" % state)
+
+ return changed, lines
+
+
+def addOptionAfterLine(option, value, iface, lines, last_line_dict, iface_options, address_family):
+ # Changing method of interface is not an addition
+ if option == 'method':
+ changed = False
+ for ln in lines:
+ if ln.get('line_type', '') == 'iface' and ln.get('iface', '') == iface and value != ln.get('params', {}).get('method', ''):
+ changed = True
+ ln['line'] = re.sub(ln.get('params', {}).get('method', '') + '$', value, ln.get('line'))
+ ln['params']['method'] = value
+ return changed, lines
+
+ last_line = last_line_dict['line']
+ prefix_start = last_line.find(last_line.split()[0])
+ suffix_start = last_line.rfind(last_line.split()[-1]) + len(last_line.split()[-1])
+ prefix = last_line[:prefix_start]
+
+ if len(iface_options) < 1:
+ # interface has no options, ident
+ prefix += " "
+
+ line = prefix + "%s %s" % (option, value) + last_line[suffix_start:]
+ option_dict = optionDict(line, iface, option, value, address_family)
+ index = len(lines) - lines[::-1].index(last_line_dict)
+ lines.insert(index, option_dict)
+ return True, lines
+
+
+def write_changes(module, lines, dest):
+
+ tmpfd, tmpfile = tempfile.mkstemp()
+ f = os.fdopen(tmpfd, 'wb')
+ f.write(to_bytes(''.join(lines), errors='surrogate_or_strict'))
+ f.close()
+ module.atomic_move(tmpfile, os.path.realpath(dest))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ dest=dict(type='path', default='/etc/network/interfaces'),
+ iface=dict(type='str'),
+ address_family=dict(type='str'),
+ option=dict(type='str'),
+ value=dict(type='str'),
+ backup=dict(type='bool', default=False),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ),
+ add_file_common_args=True,
+ supports_check_mode=True,
+ required_by=dict(
+ option=('iface',),
+ ),
+ )
+
+ dest = module.params['dest']
+ iface = module.params['iface']
+ address_family = module.params['address_family']
+ option = module.params['option']
+ value = module.params['value']
+ backup = module.params['backup']
+ state = module.params['state']
+
+ if option is not None and state == "present" and value is None:
+ module.fail_json(msg="Value must be set if option is defined and state is 'present'")
+
+ lines, ifaces = read_interfaces_file(module, dest)
+
+ changed = False
+
+ if option is not None:
+ changed, lines = setInterfaceOption(module, lines, iface, option, value, state, address_family)
+
+ if changed:
+ _, ifaces = read_interfaces_lines(module, [d['line'] for d in lines if 'line' in d])
+
+ if changed and not module.check_mode:
+ if backup:
+ module.backup_local(dest)
+ write_changes(module, [d['line'] for d in lines if 'line' in d], dest)
+
+ module.exit_json(dest=dest, changed=changed, ifaces=ifaces)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/iptables_state.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/iptables_state.py
new file mode 100644
index 00000000..56475268
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/iptables_state.py
@@ -0,0 +1,649 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, quidame <quidame@poivron.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: iptables_state
+short_description: Save iptables state into a file or restore it from a file
+version_added: '1.1.0'
+author: quidame (@quidame)
+description:
+ - C(iptables) is used to set up, maintain, and inspect the tables of IP
+ packet filter rules in the Linux kernel.
+ - This module handles the saving and/or loading of rules. This is the same
+ as the behaviour of the C(iptables-save) and C(iptables-restore) (or
+ C(ip6tables-save) and C(ip6tables-restore) for IPv6) commands which this
+ module uses internally.
+ - Modifying the state of the firewall remotely may lead to loose access to
+ the host in case of mistake in new ruleset. This module embeds a rollback
+ feature to avoid this, by telling the host to restore previous rules if a
+ cookie is still there after a given delay, and all this time telling the
+ controller to try to remove this cookie on the host through a new
+ connection.
+notes:
+ - The rollback feature is not a module option and depends on task's
+ attributes. To enable it, the module must be played asynchronously, i.e.
+ by setting task attributes I(poll) to C(0), and I(async) to a value less
+ or equal to C(ANSIBLE_TIMEOUT). If I(async) is greater, the rollback will
+ still happen if it shall happen, but you will experience a connection
+ timeout instead of more relevant info returned by the module after its
+ failure.
+ - This module supports I(check_mode).
+options:
+ counters:
+ description:
+ - Save or restore the values of all packet and byte counters.
+ - When C(true), the module is not idempotent.
+ type: bool
+ default: false
+ ip_version:
+ description:
+ - Which version of the IP protocol this module should apply to.
+ type: str
+ choices: [ ipv4, ipv6 ]
+ default: ipv4
+ modprobe:
+ description:
+ - Specify the path to the C(modprobe) program internally used by iptables
+ related commands to load kernel modules.
+ - By default, C(/proc/sys/kernel/modprobe) is inspected to determine the
+ executable's path.
+ type: path
+ noflush:
+ description:
+ - For I(state=restored), ignored otherwise.
+ - If C(false), restoring iptables rules from a file flushes (deletes)
+ all previous contents of the respective table(s). If C(true), the
+ previous rules are left untouched (but policies are updated anyway,
+ for all built-in chains).
+ type: bool
+ default: false
+ path:
+ description:
+ - The file the iptables state should be saved to.
+ - The file the iptables state should be restored from.
+ type: path
+ required: yes
+ state:
+ description:
+ - Whether the firewall state should be saved (into a file) or restored
+ (from a file).
+ type: str
+ choices: [ saved, restored ]
+ required: yes
+ table:
+ description:
+ - When I(state=restored), restore only the named table even if the input
+ file contains other tables. Fail if the named table is not declared in
+ the file.
+ - When I(state=saved), restrict output to the specified table. If not
+ specified, output includes all active tables.
+ type: str
+ choices: [ filter, nat, mangle, raw, security ]
+ wait:
+ description:
+ - Wait N seconds for the xtables lock to prevent instant failure in case
+ multiple instances of the program are running concurrently.
+ type: int
+requirements: [iptables, ip6tables]
+'''
+
+EXAMPLES = r'''
+# This will apply to all loaded/active IPv4 tables.
+- name: Save current state of the firewall in system file
+ community.general.iptables_state:
+ state: saved
+ path: /etc/sysconfig/iptables
+
+# This will apply only to IPv6 filter table.
+- name: save current state of the firewall in system file
+ community.general.iptables_state:
+ ip_version: ipv6
+ table: filter
+ state: saved
+ path: /etc/iptables/rules.v6
+
+# This will load a state from a file, with a rollback in case of access loss
+- name: restore firewall state from a file
+ community.general.iptables_state:
+ state: restored
+ path: /run/iptables.apply
+ async: "{{ ansible_timeout }}"
+ poll: 0
+
+# This will load new rules by appending them to the current ones
+- name: restore firewall state from a file
+ community.general.iptables_state:
+ state: restored
+ path: /run/iptables.apply
+ noflush: true
+ async: "{{ ansible_timeout }}"
+ poll: 0
+
+# This will only retrieve information
+- name: get current state of the firewall
+ community.general.iptables_state:
+ state: saved
+ path: /tmp/iptables
+ check_mode: yes
+ changed_when: false
+ register: iptables_state
+
+- name: show current state of the firewall
+ ansible.builtin.debug:
+ var: iptables_state.initial_state
+'''
+
+RETURN = r'''
+applied:
+ description: Whether or not the wanted state has been successfully restored.
+ type: bool
+ returned: always
+ sample: true
+initial_state:
+ description: The current state of the firewall when module starts.
+ type: list
+ elements: str
+ returned: always
+ sample: [
+ "# Generated by xtables-save v1.8.2",
+ "*filter",
+ ":INPUT ACCEPT [0:0]",
+ ":FORWARD ACCEPT [0:0]",
+ ":OUTPUT ACCEPT [0:0]",
+ "COMMIT",
+ "# Completed"
+ ]
+restored:
+ description: The state the module restored, whenever it is finally applied or not.
+ type: list
+ elements: str
+ returned: always
+ sample: [
+ "# Generated by xtables-save v1.8.2",
+ "*filter",
+ ":INPUT DROP [0:0]",
+ ":FORWARD DROP [0:0]",
+ ":OUTPUT ACCEPT [0:0]",
+ "-A INPUT -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT",
+ "-A INPUT -m conntrack --ctstate INVALID -j DROP",
+ "-A INPUT -i lo -j ACCEPT",
+ "-A INPUT -p icmp -j ACCEPT",
+ "-A INPUT -p tcp -m tcp --dport 22 -j ACCEPT",
+ "COMMIT",
+ "# Completed"
+ ]
+saved:
+ description: The iptables state the module saved.
+ type: list
+ elements: str
+ returned: always
+ sample: [
+ "# Generated by xtables-save v1.8.2",
+ "*filter",
+ ":INPUT ACCEPT [0:0]",
+ ":FORWARD DROP [0:0]",
+ ":OUTPUT ACCEPT [0:0]",
+ "COMMIT",
+ "# Completed"
+ ]
+tables:
+ description: The iptables we have interest for when module starts.
+ type: dict
+ contains:
+ table:
+ description: Policies and rules for all chains of the named table.
+ type: list
+ elements: str
+ sample: |-
+ {
+ "filter": [
+ ":INPUT ACCEPT",
+ ":FORWARD ACCEPT",
+ ":OUTPUT ACCEPT",
+ "-A INPUT -i lo -j ACCEPT",
+ "-A INPUT -p icmp -j ACCEPT",
+ "-A INPUT -p tcp -m tcp --dport 22 -j ACCEPT",
+ "-A INPUT -j REJECT --reject-with icmp-host-prohibited"
+ ],
+ "nat": [
+ ":PREROUTING ACCEPT",
+ ":INPUT ACCEPT",
+ ":OUTPUT ACCEPT",
+ ":POSTROUTING ACCEPT"
+ ]
+ }
+ returned: always
+'''
+
+
+import re
+import os
+import time
+import tempfile
+import filecmp
+import shutil
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes, to_native, to_text
+
+
+IPTABLES = dict(
+ ipv4='iptables',
+ ipv6='ip6tables',
+)
+
+SAVE = dict(
+ ipv4='iptables-save',
+ ipv6='ip6tables-save',
+)
+
+RESTORE = dict(
+ ipv4='iptables-restore',
+ ipv6='ip6tables-restore',
+)
+
+TABLES = ['filter', 'mangle', 'nat', 'raw', 'security']
+
+
+def read_state(b_path):
+ '''
+ Read a file and store its content in a variable as a list.
+ '''
+ with open(b_path, 'r') as f:
+ text = f.read()
+ lines = text.splitlines()
+ while '' in lines:
+ lines.remove('')
+ return (lines)
+
+
+def write_state(b_path, lines, changed):
+ '''
+ Write given contents to the given path, and return changed status.
+ '''
+ # Populate a temporary file
+ tmpfd, tmpfile = tempfile.mkstemp()
+ with os.fdopen(tmpfd, 'w') as f:
+ for line in lines:
+ f.write('%s\n' % line)
+
+ # Prepare to copy temporary file to the final destination
+ if not os.path.exists(b_path):
+ b_destdir = os.path.dirname(b_path)
+ destdir = to_native(b_destdir, errors='surrogate_or_strict')
+ if b_destdir and not os.path.exists(b_destdir) and not module.check_mode:
+ try:
+ os.makedirs(b_destdir)
+ except Exception as e:
+ module.fail_json(
+ msg='Error creating %s. Error code: %s. Error description: %s' % (destdir, e[0], e[1]),
+ initial_state=lines)
+ changed = True
+
+ elif not filecmp.cmp(tmpfile, b_path):
+ changed = True
+
+ # Do it
+ if changed and not module.check_mode:
+ try:
+ shutil.copyfile(tmpfile, b_path)
+ except Exception as e:
+ path = to_native(b_path, errors='surrogate_or_strict')
+ module.fail_json(
+ msg='Error saving state into %s. Error code: %s. Error description: %s' % (path, e[0], e[1]),
+ initial_state=lines)
+
+ return changed
+
+
+def initialize_from_null_state(initializer, initcommand, table):
+ '''
+ This ensures iptables-state output is suitable for iptables-restore to roll
+ back to it, i.e. iptables-save output is not empty. This also works for the
+ iptables-nft-save alternative.
+ '''
+ if table is None:
+ table = 'filter'
+
+ tmpfd, tmpfile = tempfile.mkstemp()
+ with os.fdopen(tmpfd, 'w') as f:
+ f.write('*%s\nCOMMIT\n' % table)
+
+ initializer.append(tmpfile)
+ (rc, out, err) = module.run_command(initializer, check_rc=True)
+ (rc, out, err) = module.run_command(initcommand, check_rc=True)
+ return (rc, out, err)
+
+
+def filter_and_format_state(string):
+ '''
+ Remove timestamps to ensure idempotence between runs. Also remove counters
+ by default. And return the result as a list.
+ '''
+ string = re.sub('((^|\n)# (Generated|Completed)[^\n]*) on [^\n]*', '\\1', string)
+ if not module.params['counters']:
+ string = re.sub('[[][0-9]+:[0-9]+[]]', '[0:0]', string)
+ lines = string.splitlines()
+ while '' in lines:
+ lines.remove('')
+ return (lines)
+
+
+def per_table_state(command, state):
+ '''
+ Convert raw iptables-save output into usable datastructure, for reliable
+ comparisons between initial and final states.
+ '''
+ tables = dict()
+ for t in TABLES:
+ COMMAND = list(command)
+ if '*%s' % t in state.splitlines():
+ COMMAND.extend(['--table', t])
+ (rc, out, err) = module.run_command(COMMAND, check_rc=True)
+ out = re.sub('(^|\n)(# Generated|# Completed|[*]%s|COMMIT)[^\n]*' % t, '', out)
+ out = re.sub(' *[[][0-9]+:[0-9]+[]] *', '', out)
+ table = out.splitlines()
+ while '' in table:
+ table.remove('')
+ tables[t] = table
+ return (tables)
+
+
+def main():
+
+ global module
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path', required=True),
+ state=dict(type='str', choices=['saved', 'restored'], required=True),
+ table=dict(type='str', choices=['filter', 'nat', 'mangle', 'raw', 'security']),
+ noflush=dict(type='bool', default=False),
+ counters=dict(type='bool', default=False),
+ modprobe=dict(type='path'),
+ ip_version=dict(type='str', choices=['ipv4', 'ipv6'], default='ipv4'),
+ wait=dict(type='int'),
+ _timeout=dict(type='int'),
+ _back=dict(type='path'),
+ ),
+ required_together=[
+ ['_timeout', '_back'],
+ ],
+ supports_check_mode=True,
+ )
+
+ # We'll parse iptables-restore stderr
+ module.run_command_environ_update = dict(LANG='C', LC_MESSAGES='C')
+
+ path = module.params['path']
+ state = module.params['state']
+ table = module.params['table']
+ noflush = module.params['noflush']
+ counters = module.params['counters']
+ modprobe = module.params['modprobe']
+ ip_version = module.params['ip_version']
+ wait = module.params['wait']
+ _timeout = module.params['_timeout']
+ _back = module.params['_back']
+
+ bin_iptables = module.get_bin_path(IPTABLES[ip_version], True)
+ bin_iptables_save = module.get_bin_path(SAVE[ip_version], True)
+ bin_iptables_restore = module.get_bin_path(RESTORE[ip_version], True)
+
+ os.umask(0o077)
+ changed = False
+ COMMANDARGS = []
+ INITCOMMAND = [bin_iptables_save]
+ INITIALIZER = [bin_iptables_restore]
+ TESTCOMMAND = [bin_iptables_restore, '--test']
+
+ if counters:
+ COMMANDARGS.append('--counters')
+
+ if table is not None:
+ COMMANDARGS.extend(['--table', table])
+
+ if wait is not None:
+ TESTCOMMAND.extend(['--wait', '%s' % wait])
+
+ if modprobe is not None:
+ b_modprobe = to_bytes(modprobe, errors='surrogate_or_strict')
+ if not os.path.exists(b_modprobe):
+ module.fail_json(msg="modprobe %s not found" % modprobe)
+ if not os.path.isfile(b_modprobe):
+ module.fail_json(msg="modprobe %s not a file" % modprobe)
+ if not os.access(b_modprobe, os.R_OK):
+ module.fail_json(msg="modprobe %s not readable" % modprobe)
+ if not os.access(b_modprobe, os.X_OK):
+ module.fail_json(msg="modprobe %s not executable" % modprobe)
+ COMMANDARGS.extend(['--modprobe', modprobe])
+ INITIALIZER.extend(['--modprobe', modprobe])
+ INITCOMMAND.extend(['--modprobe', modprobe])
+ TESTCOMMAND.extend(['--modprobe', modprobe])
+
+ SAVECOMMAND = list(COMMANDARGS)
+ SAVECOMMAND.insert(0, bin_iptables_save)
+
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+
+ if state == 'restored':
+ if not os.path.exists(b_path):
+ module.fail_json(msg="Source %s not found" % path)
+ if not os.path.isfile(b_path):
+ module.fail_json(msg="Source %s not a file" % path)
+ if not os.access(b_path, os.R_OK):
+ module.fail_json(msg="Source %s not readable" % path)
+ state_to_restore = read_state(b_path)
+ else:
+ cmd = ' '.join(SAVECOMMAND)
+
+ (rc, stdout, stderr) = module.run_command(INITCOMMAND, check_rc=True)
+
+ # The issue comes when wanting to restore state from empty iptable-save's
+ # output... what happens when, say:
+ # - no table is specified, and iptables-save's output is only nat table;
+ # - we give filter's ruleset to iptables-restore, that locks ourselve out
+ # of the host;
+ # then trying to roll iptables state back to the previous (working) setup
+ # doesn't override current filter table because no filter table is stored
+ # in the backup ! So we have to ensure tables to be restored have a backup
+ # in case of rollback.
+ if table is None:
+ if state == 'restored':
+ for t in TABLES:
+ if '*%s' % t in state_to_restore:
+ if len(stdout) == 0 or '*%s' % t not in stdout.splitlines():
+ (rc, stdout, stderr) = initialize_from_null_state(INITIALIZER, INITCOMMAND, t)
+ elif len(stdout) == 0:
+ (rc, stdout, stderr) = initialize_from_null_state(INITIALIZER, INITCOMMAND, 'filter')
+
+ elif state == 'restored' and '*%s' % table not in state_to_restore:
+ module.fail_json(msg="Table %s to restore not defined in %s" % (table, path))
+
+ elif len(stdout) == 0 or '*%s' % table not in stdout.splitlines():
+ (rc, stdout, stderr) = initialize_from_null_state(INITIALIZER, INITCOMMAND, table)
+
+ initial_state = filter_and_format_state(stdout)
+ if initial_state is None:
+ module.fail_json(msg="Unable to initialize firewall from NULL state.")
+
+ # Depending on the value of 'table', initref_state may differ from
+ # initial_state.
+ (rc, stdout, stderr) = module.run_command(SAVECOMMAND, check_rc=True)
+ tables_before = per_table_state(SAVECOMMAND, stdout)
+ initref_state = filter_and_format_state(stdout)
+
+ if state == 'saved':
+ changed = write_state(b_path, initref_state, changed)
+ module.exit_json(
+ changed=changed,
+ cmd=cmd,
+ tables=tables_before,
+ initial_state=initial_state,
+ saved=initref_state)
+
+ #
+ # All remaining code is for state=restored
+ #
+
+ MAINCOMMAND = list(COMMANDARGS)
+ MAINCOMMAND.insert(0, bin_iptables_restore)
+
+ if wait is not None:
+ MAINCOMMAND.extend(['--wait', '%s' % wait])
+
+ if _back is not None:
+ b_back = to_bytes(_back, errors='surrogate_or_strict')
+ garbage = write_state(b_back, initref_state, changed)
+ BACKCOMMAND = list(MAINCOMMAND)
+ BACKCOMMAND.append(_back)
+
+ if noflush:
+ MAINCOMMAND.append('--noflush')
+
+ MAINCOMMAND.append(path)
+ cmd = ' '.join(MAINCOMMAND)
+
+ TESTCOMMAND = list(MAINCOMMAND)
+ TESTCOMMAND.insert(1, '--test')
+ error_msg = "Source %s is not suitable for input to %s" % (path, os.path.basename(bin_iptables_restore))
+
+ # Due to a bug in iptables-nft-restore --test, we have to validate tables
+ # one by one (https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=960003).
+ for t in tables_before:
+ testcommand = list(TESTCOMMAND)
+ testcommand.extend(['--table', t])
+ (rc, stdout, stderr) = module.run_command(testcommand)
+
+ if 'Another app is currently holding the xtables lock' in stderr:
+ error_msg = stderr
+
+ if rc != 0:
+ cmd = ' '.join(testcommand)
+ module.fail_json(
+ msg=error_msg,
+ cmd=cmd,
+ rc=rc,
+ stdout=stdout,
+ stderr=stderr,
+ tables=tables_before,
+ initial_state=initial_state,
+ restored=state_to_restore,
+ applied=False)
+
+ if module.check_mode:
+ tmpfd, tmpfile = tempfile.mkstemp()
+ with os.fdopen(tmpfd, 'w') as f:
+ for line in initial_state:
+ f.write('%s\n' % line)
+
+ if filecmp.cmp(tmpfile, b_path):
+ restored_state = initial_state
+ else:
+ restored_state = state_to_restore
+
+ else:
+ # Let time enough to the plugin to retrieve async status of the module
+ # in case of bad option type/value and the like.
+ if _back is not None:
+ b_starter = to_bytes('%s.starter' % _back, errors='surrogate_or_strict')
+ while True:
+ if os.path.exists(b_starter):
+ os.remove(b_starter)
+ break
+ else:
+ time.sleep(0.01)
+ continue
+
+ (rc, stdout, stderr) = module.run_command(MAINCOMMAND)
+ if 'Another app is currently holding the xtables lock' in stderr:
+ module.fail_json(
+ msg=stderr,
+ cmd=cmd,
+ rc=rc,
+ stdout=stdout,
+ stderr=stderr,
+ tables=tables_before,
+ initial_state=initial_state,
+ restored=state_to_restore,
+ applied=False)
+
+ (rc, stdout, stderr) = module.run_command(SAVECOMMAND, check_rc=True)
+ restored_state = filter_and_format_state(stdout)
+
+ if restored_state != initref_state and restored_state != initial_state:
+ if module.check_mode:
+ changed = True
+ else:
+ tables_after = per_table_state(SAVECOMMAND, stdout)
+ if tables_after != tables_before:
+ changed = True
+
+ if _back is None or module.check_mode:
+ module.exit_json(
+ changed=changed,
+ cmd=cmd,
+ tables=tables_before,
+ initial_state=initial_state,
+ restored=restored_state,
+ applied=True)
+
+ # The rollback implementation currently needs:
+ # Here:
+ # * test existence of the backup file, exit with success if it doesn't exist
+ # * otherwise, restore iptables from this file and return failure
+ # Action plugin:
+ # * try to remove the backup file
+ # * wait async task is finished and retrieve its final status
+ # * modify it and return the result
+ # Task:
+ # * task attribute 'async' set to the same value (or lower) than ansible
+ # timeout
+ # * task attribute 'poll' equals 0
+ #
+ for x in range(_timeout):
+ if os.path.exists(b_back):
+ time.sleep(1)
+ continue
+ module.exit_json(
+ changed=changed,
+ cmd=cmd,
+ tables=tables_before,
+ initial_state=initial_state,
+ restored=restored_state,
+ applied=True)
+
+ # Here we are: for whatever reason, but probably due to the current ruleset,
+ # the action plugin (i.e. on the controller) was unable to remove the backup
+ # cookie, so we restore initial state from it.
+ (rc, stdout, stderr) = module.run_command(BACKCOMMAND, check_rc=True)
+ os.remove(b_back)
+
+ (rc, stdout, stderr) = module.run_command(SAVECOMMAND, check_rc=True)
+ tables_rollback = per_table_state(SAVECOMMAND, stdout)
+
+ msg = (
+ "Failed to confirm state restored from %s after %ss. "
+ "Firewall has been rolled back to its initial state." % (path, _timeout)
+ )
+
+ module.fail_json(
+ changed=(tables_before != tables_rollback),
+ msg=msg,
+ cmd=cmd,
+ tables=tables_before,
+ initial_state=initial_state,
+ restored=restored_state,
+ applied=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/java_cert.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/java_cert.py
new file mode 100644
index 00000000..7333397b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/java_cert.py
@@ -0,0 +1,401 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, RSD Services S.A
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: java_cert
+short_description: Uses keytool to import/remove key from java keystore (cacerts)
+description:
+ - This is a wrapper module around keytool, which can be used to import/remove
+ certificates from a given java keystore.
+options:
+ cert_url:
+ description:
+ - Basic URL to fetch SSL certificate from.
+ - One of C(cert_url) or C(cert_path) is required to load certificate.
+ type: str
+ cert_port:
+ description:
+ - Port to connect to URL.
+ - This will be used to create server URL:PORT.
+ type: int
+ default: 443
+ cert_path:
+ description:
+ - Local path to load certificate from.
+ - One of C(cert_url) or C(cert_path) is required to load certificate.
+ type: path
+ cert_alias:
+ description:
+ - Imported certificate alias.
+ - The alias is used when checking for the presence of a certificate in the keystore.
+ type: str
+ trust_cacert:
+ description:
+ - Trust imported cert as CAcert.
+ type: bool
+ default: False
+ version_added: '0.2.0'
+ pkcs12_path:
+ description:
+ - Local path to load PKCS12 keystore from.
+ type: path
+ pkcs12_password:
+ description:
+ - Password for importing from PKCS12 keystore.
+ type: str
+ default: ''
+ pkcs12_alias:
+ description:
+ - Alias in the PKCS12 keystore.
+ type: str
+ keystore_path:
+ description:
+ - Path to keystore.
+ type: path
+ keystore_pass:
+ description:
+ - Keystore password.
+ type: str
+ required: true
+ keystore_create:
+ description:
+ - Create keystore if it does not exist.
+ type: bool
+ default: false
+ keystore_type:
+ description:
+ - Keystore type (JCEKS, JKS).
+ type: str
+ executable:
+ description:
+ - Path to keytool binary if not used we search in PATH for it.
+ type: str
+ default: keytool
+ state:
+ description:
+ - Defines action which can be either certificate import or removal.
+ type: str
+ choices: [ absent, present ]
+ default: present
+author:
+- Adam Hamsik (@haad)
+'''
+
+EXAMPLES = r'''
+- name: Import SSL certificate from google.com to a given cacerts keystore
+ community.general.java_cert:
+ cert_url: google.com
+ cert_port: 443
+ keystore_path: /usr/lib/jvm/jre7/lib/security/cacerts
+ keystore_pass: changeit
+ state: present
+
+- name: Remove certificate with given alias from a keystore
+ community.general.java_cert:
+ cert_url: google.com
+ keystore_path: /usr/lib/jvm/jre7/lib/security/cacerts
+ keystore_pass: changeit
+ executable: /usr/lib/jvm/jre7/bin/keytool
+ state: absent
+
+- name: Import trusted CA from SSL certificate
+ community.general.java_cert:
+ cert_path: /opt/certs/rootca.crt
+ keystore_path: /tmp/cacerts
+ keystore_pass: changeit
+ keystore_create: yes
+ state: present
+ cert_alias: LE_RootCA
+ trust_cacert: True
+
+- name: Import SSL certificate from google.com to a keystore, create it if it doesn't exist
+ community.general.java_cert:
+ cert_url: google.com
+ keystore_path: /tmp/cacerts
+ keystore_pass: changeit
+ keystore_create: yes
+ state: present
+
+- name: Import a pkcs12 keystore with a specified alias, create it if it doesn't exist
+ community.general.java_cert:
+ pkcs12_path: "/tmp/importkeystore.p12"
+ cert_alias: default
+ keystore_path: /opt/wildfly/standalone/configuration/defaultkeystore.jks
+ keystore_pass: changeit
+ keystore_create: yes
+ state: present
+
+- name: Import SSL certificate to JCEKS keystore
+ community.general.java_cert:
+ pkcs12_path: "/tmp/importkeystore.p12"
+ pkcs12_alias: default
+ pkcs12_password: somepass
+ cert_alias: default
+ keystore_path: /opt/someapp/security/keystore.jceks
+ keystore_type: "JCEKS"
+ keystore_pass: changeit
+ keystore_create: yes
+ state: present
+'''
+
+RETURN = r'''
+msg:
+ description: Output from stdout of keytool command after execution of given command.
+ returned: success
+ type: str
+ sample: "Module require existing keystore at keystore_path '/tmp/test/cacerts'"
+
+rc:
+ description: Keytool command execution return value.
+ returned: success
+ type: int
+ sample: "0"
+
+cmd:
+ description: Executed command to get action done.
+ returned: success
+ type: str
+ sample: "keytool -importcert -noprompt -keystore"
+'''
+
+import os
+import re
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+
+
+def get_keystore_type(keystore_type):
+ ''' Check that custom keystore is presented in parameters '''
+ if keystore_type:
+ return " -storetype '%s'" % keystore_type
+ return ''
+
+
+def check_cert_present(module, executable, keystore_path, keystore_pass, alias, keystore_type):
+ ''' Check if certificate with alias is present in keystore
+ located at keystore_path '''
+ test_cmd = ("%s -noprompt -list -keystore '%s' -storepass '%s' "
+ "-alias '%s' %s") % (executable, keystore_path, keystore_pass, alias, get_keystore_type(keystore_type))
+
+ (check_rc, _, _) = module.run_command(test_cmd)
+ if check_rc == 0:
+ return True
+ return False
+
+
+def import_cert_url(module, executable, url, port, keystore_path, keystore_pass, alias, keystore_type, trust_cacert):
+ ''' Import certificate from URL into keystore located at keystore_path '''
+
+ https_proxy = os.getenv("https_proxy")
+ no_proxy = os.getenv("no_proxy")
+
+ proxy_opts = ''
+ if https_proxy is not None:
+ (proxy_host, proxy_port) = https_proxy.split(':')
+ proxy_opts = "-J-Dhttps.proxyHost=%s -J-Dhttps.proxyPort=%s" % (proxy_host, proxy_port)
+
+ if no_proxy is not None:
+ # For Java's nonProxyHosts property, items are separated by '|',
+ # and patterns have to start with "*".
+ non_proxy_hosts = no_proxy.replace(',', '|')
+ non_proxy_hosts = re.sub(r'(^|\|)\.', r'\1*.', non_proxy_hosts)
+
+ # The property name is http.nonProxyHosts, there is no
+ # separate setting for HTTPS.
+ proxy_opts += " -J-Dhttp.nonProxyHosts='%s'" % non_proxy_hosts
+
+ fetch_cmd = "%s -printcert -rfc -sslserver %s %s:%d" % (executable, proxy_opts, url, port)
+ import_cmd = ("%s -importcert -noprompt -keystore '%s' "
+ "-storepass '%s' -alias '%s' %s") % (executable, keystore_path,
+ keystore_pass, alias,
+ get_keystore_type(keystore_type))
+ if trust_cacert:
+ import_cmd = import_cmd + " -trustcacerts"
+
+ # Fetch SSL certificate from remote host.
+ (_, fetch_out, _) = module.run_command(fetch_cmd, check_rc=True)
+
+ # Use remote certificate from remote host and import it to a java keystore
+ (import_rc, import_out, import_err) = module.run_command(import_cmd,
+ data=fetch_out,
+ check_rc=False)
+ diff = {'before': '\n', 'after': '%s\n' % alias}
+ if import_rc == 0:
+ module.exit_json(changed=True, msg=import_out,
+ rc=import_rc, cmd=import_cmd, stdout=import_out,
+ diff=diff)
+ else:
+ module.fail_json(msg=import_out, rc=import_rc, cmd=import_cmd,
+ error=import_err)
+
+
+def import_cert_path(module, executable, path, keystore_path, keystore_pass, alias, keystore_type, trust_cacert):
+ ''' Import certificate from path into keystore located on
+ keystore_path as alias '''
+ import_cmd = ("%s -importcert -noprompt -keystore '%s' "
+ "-storepass '%s' -file '%s' -alias '%s' %s") % (executable, keystore_path,
+ keystore_pass, path, alias,
+ get_keystore_type(keystore_type))
+
+ if trust_cacert:
+ import_cmd = import_cmd + " -trustcacerts"
+
+ # Use local certificate from local path and import it to a java keystore
+ (import_rc, import_out, import_err) = module.run_command(import_cmd,
+ check_rc=False)
+
+ diff = {'before': '\n', 'after': '%s\n' % alias}
+ if import_rc == 0:
+ module.exit_json(changed=True, msg=import_out,
+ rc=import_rc, cmd=import_cmd, stdout=import_out,
+ error=import_err, diff=diff)
+ else:
+ module.fail_json(msg=import_out, rc=import_rc, cmd=import_cmd)
+
+
+def import_pkcs12_path(module, executable, path, keystore_path, keystore_pass, pkcs12_pass, pkcs12_alias, alias, keystore_type):
+ ''' Import pkcs12 from path into keystore located on
+ keystore_path as alias '''
+ import_cmd = ("%s -importkeystore -noprompt -destkeystore '%s' -srcstoretype PKCS12 "
+ "-deststorepass '%s' -destkeypass '%s' -srckeystore '%s' -srcstorepass '%s' "
+ "-srcalias '%s' -destalias '%s' %s") % (executable, keystore_path, keystore_pass,
+ keystore_pass, path, pkcs12_pass, pkcs12_alias,
+ alias, get_keystore_type(keystore_type))
+
+ # Use local certificate from local path and import it to a java keystore
+ (import_rc, import_out, import_err) = module.run_command(import_cmd,
+ check_rc=False)
+
+ diff = {'before': '\n', 'after': '%s\n' % alias}
+ if import_rc == 0:
+ module.exit_json(changed=True, msg=import_out,
+ rc=import_rc, cmd=import_cmd, stdout=import_out,
+ error=import_err, diff=diff)
+ else:
+ module.fail_json(msg=import_out, rc=import_rc, cmd=import_cmd)
+
+
+def delete_cert(module, executable, keystore_path, keystore_pass, alias, keystore_type):
+ ''' Delete certificate identified with alias from keystore on keystore_path '''
+ del_cmd = ("%s -delete -keystore '%s' -storepass '%s' "
+ "-alias '%s' %s") % (executable, keystore_path, keystore_pass, alias, get_keystore_type(keystore_type))
+
+ # Delete SSL certificate from keystore
+ (del_rc, del_out, del_err) = module.run_command(del_cmd, check_rc=True)
+
+ diff = {'before': '%s\n' % alias, 'after': None}
+
+ module.exit_json(changed=True, msg=del_out,
+ rc=del_rc, cmd=del_cmd, stdout=del_out,
+ error=del_err, diff=diff)
+
+
+def test_keytool(module, executable):
+ ''' Test if keytool is actually executable or not '''
+ module.run_command("%s" % executable, check_rc=True)
+
+
+def test_keystore(module, keystore_path):
+ ''' Check if we can access keystore as file or not '''
+ if keystore_path is None:
+ keystore_path = ''
+
+ if not os.path.exists(keystore_path) and not os.path.isfile(keystore_path):
+ # Keystore doesn't exist we want to create it
+ module.fail_json(changed=False, msg="Module require existing keystore at keystore_path '%s'" % keystore_path)
+
+
+def main():
+ argument_spec = dict(
+ cert_url=dict(type='str'),
+ cert_path=dict(type='path'),
+ pkcs12_path=dict(type='path'),
+ pkcs12_password=dict(type='str', no_log=True),
+ pkcs12_alias=dict(type='str'),
+ cert_alias=dict(type='str'),
+ cert_port=dict(type='int', default=443),
+ keystore_path=dict(type='path'),
+ keystore_pass=dict(type='str', required=True, no_log=True),
+ trust_cacert=dict(type='bool', default=False),
+ keystore_create=dict(type='bool', default=False),
+ keystore_type=dict(type='str'),
+ executable=dict(type='str', default='keytool'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[['cert_path', 'cert_url', 'pkcs12_path']],
+ required_together=[['keystore_path', 'keystore_pass']],
+ mutually_exclusive=[
+ ['cert_url', 'cert_path', 'pkcs12_path']
+ ],
+ supports_check_mode=True,
+ )
+
+ url = module.params.get('cert_url')
+ path = module.params.get('cert_path')
+ port = module.params.get('cert_port')
+
+ pkcs12_path = module.params.get('pkcs12_path')
+ pkcs12_pass = module.params.get('pkcs12_password', '')
+ pkcs12_alias = module.params.get('pkcs12_alias', '1')
+
+ cert_alias = module.params.get('cert_alias') or url
+ trust_cacert = module.params.get('trust_cacert')
+
+ keystore_path = module.params.get('keystore_path')
+ keystore_pass = module.params.get('keystore_pass')
+ keystore_create = module.params.get('keystore_create')
+ keystore_type = module.params.get('keystore_type')
+ executable = module.params.get('executable')
+ state = module.params.get('state')
+
+ if path and not cert_alias:
+ module.fail_json(changed=False,
+ msg="Using local path import from %s requires alias argument."
+ % keystore_path)
+
+ test_keytool(module, executable)
+
+ if not keystore_create:
+ test_keystore(module, keystore_path)
+
+ cert_present = check_cert_present(module, executable, keystore_path,
+ keystore_pass, cert_alias, keystore_type)
+
+ if state == 'absent' and cert_present:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ delete_cert(module, executable, keystore_path, keystore_pass, cert_alias, keystore_type)
+
+ elif state == 'present' and not cert_present:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ if pkcs12_path:
+ import_pkcs12_path(module, executable, pkcs12_path, keystore_path,
+ keystore_pass, pkcs12_pass, pkcs12_alias, cert_alias, keystore_type)
+
+ if path:
+ import_cert_path(module, executable, path, keystore_path,
+ keystore_pass, cert_alias, keystore_type, trust_cacert)
+
+ if url:
+ import_cert_url(module, executable, url, port, keystore_path,
+ keystore_pass, cert_alias, keystore_type, trust_cacert)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/java_keystore.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/java_keystore.py
new file mode 100644
index 00000000..db37bdee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/java_keystore.py
@@ -0,0 +1,315 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Guillaume Grossetie <ggrossetie@yuzutech.fr>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: java_keystore
+short_description: Create or delete a Java keystore in JKS format.
+description:
+ - Create or delete a Java keystore in JKS format for a given certificate.
+options:
+ name:
+ type: str
+ description:
+ - Name of the certificate.
+ required: true
+ certificate:
+ type: str
+ description:
+ - Certificate that should be used to create the key store.
+ required: true
+ private_key:
+ type: str
+ description:
+ - Private key that should be used to create the key store.
+ required: true
+ private_key_passphrase:
+ description:
+ - Pass phrase for reading the private key, if required.
+ type: str
+ required: false
+ version_added: '0.2.0'
+ password:
+ type: str
+ description:
+ - Password that should be used to secure the key store.
+ required: true
+ dest:
+ type: path
+ description:
+ - Absolute path where the jks should be generated.
+ required: true
+ owner:
+ description:
+ - Name of the user that should own jks file.
+ required: false
+ group:
+ description:
+ - Name of the group that should own jks file.
+ required: false
+ mode:
+ description:
+ - Mode the file should be.
+ required: false
+ force:
+ description:
+ - Key store will be created even if it already exists.
+ required: false
+ type: bool
+ default: 'no'
+requirements: [openssl, keytool]
+author: Guillaume Grossetie (@Mogztter)
+extends_documentation_fragment:
+- files
+
+'''
+
+EXAMPLES = '''
+- name: Create a key store for the given certificate (inline)
+ community.general.java_keystore:
+ name: example
+ certificate: |
+ -----BEGIN CERTIFICATE-----
+ h19dUZ2co2fI/ibYiwxWk4aeNE6KWvCaTQOMQ8t6Uo2XKhpL/xnjoAgh1uCQN/69
+ MG+34+RhUWzCfdZH7T8/qDxJw2kEPKluaYh7KnMsba+5jHjmtzix5QIDAQABo4IB
+ -----END CERTIFICATE-----
+ private_key: |
+ -----BEGIN RSA PRIVATE KEY-----
+ DBVFTEVDVFJJQ0lURSBERSBGUkFOQ0UxFzAVBgNVBAsMDjAwMDIgNTUyMDgxMzE3
+ GLlDNMw/uHyME7gHFsqJA7O11VY6O5WQ4IDP3m/s5ZV6s+Nn6Lerz17VZ99
+ -----END RSA PRIVATE KEY-----
+ password: changeit
+ dest: /etc/security/keystore.jks
+
+- name: Create a key store for the given certificate (lookup)
+ community.general.java_keystore:
+ name: example
+ certificate: "{{lookup('file', '/path/to/certificate.crt') }}"
+ private_key: "{{lookup('file', '/path/to/private.key') }}"
+ password: changeit
+ dest: /etc/security/keystore.jks
+'''
+
+RETURN = '''
+msg:
+ description: Output from stdout of keytool/openssl command after execution of given command or an error.
+ returned: changed and failure
+ type: str
+ sample: "Unable to find the current certificate fingerprint in ..."
+
+rc:
+ description: keytool/openssl command execution return value
+ returned: changed and failure
+ type: int
+ sample: "0"
+
+cmd:
+ description: Executed command to get action done
+ returned: changed and failure
+ type: str
+ sample: "openssl x509 -noout -in /tmp/cert.crt -fingerprint -sha256"
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+import os
+import re
+
+
+def read_certificate_fingerprint(module, openssl_bin, certificate_path):
+ current_certificate_fingerprint_cmd = [openssl_bin, "x509", "-noout", "-in", certificate_path, "-fingerprint", "-sha256"]
+ (rc, current_certificate_fingerprint_out, current_certificate_fingerprint_err) = run_commands(module, current_certificate_fingerprint_cmd)
+ if rc != 0:
+ return module.fail_json(msg=current_certificate_fingerprint_out,
+ err=current_certificate_fingerprint_err,
+ rc=rc,
+ cmd=current_certificate_fingerprint_cmd)
+
+ current_certificate_match = re.search(r"=([\w:]+)", current_certificate_fingerprint_out)
+ if not current_certificate_match:
+ return module.fail_json(
+ msg="Unable to find the current certificate fingerprint in %s" % current_certificate_fingerprint_out,
+ rc=rc,
+ cmd=current_certificate_fingerprint_err
+ )
+
+ return current_certificate_match.group(1)
+
+
+def read_stored_certificate_fingerprint(module, keytool_bin, alias, keystore_path, keystore_password):
+ stored_certificate_fingerprint_cmd = [keytool_bin, "-list", "-alias", alias, "-keystore", keystore_path, "-storepass", keystore_password, "-v"]
+ (rc, stored_certificate_fingerprint_out, stored_certificate_fingerprint_err) = run_commands(module, stored_certificate_fingerprint_cmd)
+ if rc != 0:
+ if "keytool error: java.lang.Exception: Alias <%s> does not exist" % alias not in stored_certificate_fingerprint_out:
+ return module.fail_json(msg=stored_certificate_fingerprint_out,
+ err=stored_certificate_fingerprint_err,
+ rc=rc,
+ cmd=stored_certificate_fingerprint_cmd)
+ else:
+ return None
+ else:
+ stored_certificate_match = re.search(r"SHA256: ([\w:]+)", stored_certificate_fingerprint_out)
+ if not stored_certificate_match:
+ return module.fail_json(
+ msg="Unable to find the stored certificate fingerprint in %s" % stored_certificate_fingerprint_out,
+ rc=rc,
+ cmd=stored_certificate_fingerprint_cmd
+ )
+
+ return stored_certificate_match.group(1)
+
+
+def run_commands(module, cmd, data=None, check_rc=True):
+ return module.run_command(cmd, check_rc=check_rc, data=data)
+
+
+def create_file(path, content):
+ with open(path, 'w') as f:
+ f.write(content)
+ return path
+
+
+def create_tmp_certificate(module):
+ return create_file("/tmp/%s.crt" % module.params['name'], module.params['certificate'])
+
+
+def create_tmp_private_key(module):
+ return create_file("/tmp/%s.key" % module.params['name'], module.params['private_key'])
+
+
+def cert_changed(module, openssl_bin, keytool_bin, keystore_path, keystore_pass, alias):
+ certificate_path = create_tmp_certificate(module)
+ try:
+ current_certificate_fingerprint = read_certificate_fingerprint(module, openssl_bin, certificate_path)
+ stored_certificate_fingerprint = read_stored_certificate_fingerprint(module, keytool_bin, alias, keystore_path, keystore_pass)
+ return current_certificate_fingerprint != stored_certificate_fingerprint
+ finally:
+ os.remove(certificate_path)
+
+
+def create_jks(module, name, openssl_bin, keytool_bin, keystore_path, password, keypass):
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ certificate_path = create_tmp_certificate(module)
+ private_key_path = create_tmp_private_key(module)
+ try:
+ if os.path.exists(keystore_path):
+ os.remove(keystore_path)
+
+ keystore_p12_path = "/tmp/keystore.p12"
+ if os.path.exists(keystore_p12_path):
+ os.remove(keystore_p12_path)
+
+ export_p12_cmd = [openssl_bin, "pkcs12", "-export", "-name", name, "-in", certificate_path,
+ "-inkey", private_key_path, "-out",
+ keystore_p12_path, "-passout", "stdin"]
+
+ # when keypass is provided, add -passin
+ cmd_stdin = ""
+ if keypass:
+ export_p12_cmd.append("-passin")
+ export_p12_cmd.append("stdin")
+ cmd_stdin = "%s\n" % keypass
+
+ cmd_stdin += "%s\n%s" % (password, password)
+ (rc, export_p12_out, export_p12_err) = run_commands(module, export_p12_cmd, data=cmd_stdin)
+ if rc != 0:
+ return module.fail_json(msg=export_p12_out,
+ rc=rc,
+ cmd=export_p12_cmd)
+
+ import_keystore_cmd = [keytool_bin, "-importkeystore",
+ "-destkeystore", keystore_path,
+ "-srckeystore", keystore_p12_path,
+ "-srcstoretype", "pkcs12",
+ "-alias", name,
+ "-deststorepass", password,
+ "-srcstorepass", password,
+ "-noprompt"]
+ (rc, import_keystore_out, import_keystore_err) = run_commands(module, import_keystore_cmd, data=None)
+ if rc == 0:
+ update_jks_perm(module, keystore_path)
+ return module.exit_json(changed=True,
+ msg=import_keystore_out,
+ rc=rc,
+ cmd=import_keystore_cmd,
+ stdout_lines=import_keystore_out)
+ else:
+ return module.fail_json(msg=import_keystore_out,
+ rc=rc,
+ cmd=import_keystore_cmd)
+ finally:
+ os.remove(certificate_path)
+ os.remove(private_key_path)
+
+
+def update_jks_perm(module, keystore_path):
+ try:
+ file_args = module.load_file_common_arguments(module.params, path=keystore_path)
+ except TypeError:
+ # The path argument is only supported in Ansible-base 2.10+. Fall back to
+ # pre-2.10 behavior for older Ansible versions.
+ module.params['path'] = keystore_path
+ file_args = module.load_file_common_arguments(module.params)
+ module.set_fs_attributes_if_different(file_args, False)
+
+
+def process_jks(module):
+ name = module.params['name']
+ password = module.params['password']
+ keypass = module.params['private_key_passphrase']
+ keystore_path = module.params['dest']
+ force = module.params['force']
+ openssl_bin = module.get_bin_path('openssl', True)
+ keytool_bin = module.get_bin_path('keytool', True)
+
+ if os.path.exists(keystore_path):
+ if force:
+ create_jks(module, name, openssl_bin, keytool_bin, keystore_path, password, keypass)
+ else:
+ if cert_changed(module, openssl_bin, keytool_bin, keystore_path, password, name):
+ create_jks(module, name, openssl_bin, keytool_bin, keystore_path, password, keypass)
+ else:
+ if not module.check_mode:
+ update_jks_perm(module, keystore_path)
+ return module.exit_json(changed=False)
+ else:
+ create_jks(module, name, openssl_bin, keytool_bin, keystore_path, password, keypass)
+
+
+class ArgumentSpec(object):
+ def __init__(self):
+ self.supports_check_mode = True
+ self.add_file_common_args = True
+ argument_spec = dict(
+ name=dict(required=True),
+ certificate=dict(required=True, no_log=True),
+ private_key=dict(required=True, no_log=True),
+ password=dict(required=True, no_log=True),
+ dest=dict(required=True, type='path'),
+ force=dict(required=False, default=False, type='bool'),
+ private_key_passphrase=dict(required=False, no_log=True, type='str')
+ )
+ self.argument_spec = argument_spec
+
+
+def main():
+ spec = ArgumentSpec()
+ module = AnsibleModule(
+ argument_spec=spec.argument_spec,
+ add_file_common_args=spec.add_file_common_args,
+ supports_check_mode=spec.supports_check_mode
+ )
+ process_jks(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/kernel_blacklist.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/kernel_blacklist.py
new file mode 100644
index 00000000..ff6f9c22
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/kernel_blacklist.py
@@ -0,0 +1,153 @@
+#!/usr/bin/python
+# encoding: utf-8 -*-
+
+# Copyright: (c) 2013, Matthias Vogelgesang <matthias.vogelgesang@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: kernel_blacklist
+author:
+- Matthias Vogelgesang (@matze)
+short_description: Blacklist kernel modules
+description:
+ - Add or remove kernel modules from blacklist.
+options:
+ name:
+ type: str
+ description:
+ - Name of kernel module to black- or whitelist.
+ required: true
+ state:
+ type: str
+ description:
+ - Whether the module should be present in the blacklist or absent.
+ choices: [ absent, present ]
+ default: present
+ blacklist_file:
+ type: str
+ description:
+ - If specified, use this blacklist file instead of
+ C(/etc/modprobe.d/blacklist-ansible.conf).
+'''
+
+EXAMPLES = '''
+- name: Blacklist the nouveau driver module
+ community.general.kernel_blacklist:
+ name: nouveau
+ state: present
+'''
+
+import os
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Blacklist(object):
+ def __init__(self, module, filename, checkmode):
+ self.filename = filename
+ self.module = module
+ self.checkmode = checkmode
+
+ def create_file(self):
+ if not self.checkmode and not os.path.exists(self.filename):
+ open(self.filename, 'a').close()
+ return True
+ elif self.checkmode and not os.path.exists(self.filename):
+ self.filename = os.devnull
+ return True
+ else:
+ return False
+
+ def get_pattern(self):
+ return r'^blacklist\s*' + self.module + '$'
+
+ def readlines(self):
+ f = open(self.filename, 'r')
+ lines = f.readlines()
+ f.close()
+ return lines
+
+ def module_listed(self):
+ lines = self.readlines()
+ pattern = self.get_pattern()
+
+ for line in lines:
+ stripped = line.strip()
+ if stripped.startswith('#'):
+ continue
+
+ if re.match(pattern, stripped):
+ return True
+
+ return False
+
+ def remove_module(self):
+ lines = self.readlines()
+ pattern = self.get_pattern()
+
+ if self.checkmode:
+ f = open(os.devnull, 'w')
+ else:
+ f = open(self.filename, 'w')
+
+ for line in lines:
+ if not re.match(pattern, line.strip()):
+ f.write(line)
+
+ f.close()
+
+ def add_module(self):
+ if self.checkmode:
+ f = open(os.devnull, 'a')
+ else:
+ f = open(self.filename, 'a')
+
+ f.write('blacklist %s\n' % self.module)
+
+ f.close()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ blacklist_file=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+ args = dict(changed=False, failed=False,
+ name=module.params['name'], state=module.params['state'])
+
+ filename = '/etc/modprobe.d/blacklist-ansible.conf'
+
+ if module.params['blacklist_file']:
+ filename = module.params['blacklist_file']
+
+ blacklist = Blacklist(args['name'], filename, module.check_mode)
+
+ if blacklist.create_file():
+ args['changed'] = True
+ else:
+ args['changed'] = False
+
+ if blacklist.module_listed():
+ if args['state'] == 'absent':
+ blacklist.remove_module()
+ args['changed'] = True
+ else:
+ if args['state'] == 'present':
+ blacklist.add_module()
+ args['changed'] = True
+
+ module.exit_json(**args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/launchd.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/launchd.py
new file mode 100644
index 00000000..919d8d7b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/launchd.py
@@ -0,0 +1,514 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Martin Migasiewicz <migasiew.nk@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: launchd
+author:
+- Martin Migasiewicz (@martinm82)
+short_description: Manage macOS services
+version_added: 1.0.0
+description:
+- Manage launchd services on target macOS hosts.
+options:
+ name:
+ description:
+ - Name of the service.
+ type: str
+ required: true
+ state:
+ description:
+ - C(started)/C(stopped) are idempotent actions that will not run
+ commands unless necessary.
+ - Launchd does not support C(restarted) nor C(reloaded) natively.
+ These will trigger a stop/start (restarted) or an unload/load
+ (reloaded).
+ - C(restarted) unloads and loads the service before start to ensure
+ that the latest job definition (plist) is used.
+ - C(reloaded) unloads and loads the service to ensure that the latest
+ job definition (plist) is used. Whether a service is started or
+ stopped depends on the content of the definition file.
+ type: str
+ choices: [ reloaded, restarted, started, stopped, unloaded ]
+ enabled:
+ description:
+ - Whether the service should start on boot.
+ - B(At least one of state and enabled are required.)
+ type: bool
+ force_stop:
+ description:
+ - Whether the service should not be restarted automatically by launchd.
+ - Services might have the 'KeepAlive' attribute set to true in a launchd configuration.
+ In case this is set to true, stopping a service will cause that launchd starts the service again.
+ - Set this option to C(yes) to let this module change the 'KeepAlive' attribute to false.
+ type: bool
+ default: no
+notes:
+- A user must privileged to manage services using this module.
+requirements:
+- A system managed by launchd
+- The plistlib python library
+'''
+
+EXAMPLES = r'''
+- name: Make sure spotify webhelper is started
+ community.general.launchd:
+ name: com.spotify.webhelper
+ state: started
+
+- name: Deploy custom memcached job definition
+ template:
+ src: org.memcached.plist.j2
+ dest: /Library/LaunchDaemons/org.memcached.plist
+
+- name: Run memcached
+ community.general.launchd:
+ name: org.memcached
+ state: started
+
+- name: Stop memcached
+ community.general.launchd:
+ name: org.memcached
+ state: stopped
+
+- name: Stop memcached
+ community.general.launchd:
+ name: org.memcached
+ state: stopped
+ force_stop: yes
+
+- name: Restart memcached
+ community.general.launchd:
+ name: org.memcached
+ state: restarted
+
+- name: Unload memcached
+ community.general.launchd:
+ name: org.memcached
+ state: unloaded
+'''
+
+RETURN = r'''
+status:
+ description: Metadata about service status
+ returned: always
+ type: dict
+ sample:
+ {
+ "current_pid": "-",
+ "current_state": "stopped",
+ "previous_pid": "82636",
+ "previous_state": "running"
+ }
+'''
+
+import os
+import plistlib
+from abc import ABCMeta, abstractmethod
+from time import sleep
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+class ServiceState:
+ UNKNOWN = 0
+ LOADED = 1
+ STOPPED = 2
+ STARTED = 3
+ UNLOADED = 4
+
+ @staticmethod
+ def to_string(state):
+ strings = {
+ ServiceState.UNKNOWN: 'unknown',
+ ServiceState.LOADED: 'loaded',
+ ServiceState.STOPPED: 'stopped',
+ ServiceState.STARTED: 'started',
+ ServiceState.UNLOADED: 'unloaded'
+ }
+ return strings[state]
+
+
+class Plist:
+ def __init__(self, module, service):
+ self.__changed = False
+ self.__service = service
+
+ state, pid, dummy, dummy = LaunchCtlList(module, service).run()
+
+ # Check if readPlist is available or not
+ self.old_plistlib = hasattr(plistlib, 'readPlist')
+
+ self.__file = self.__find_service_plist(service)
+ if self.__file is None:
+ msg = 'Unable to infer the path of %s service plist file' % service
+ if pid is None and state == ServiceState.UNLOADED:
+ msg += ' and it was not found among active services'
+ module.fail_json(msg=msg)
+ self.__update(module)
+
+ @staticmethod
+ def __find_service_plist(service_name):
+ """Finds the plist file associated with a service"""
+
+ launchd_paths = [
+ os.path.expanduser('~/Library/LaunchAgents'),
+ '/Library/LaunchAgents',
+ '/Library/LaunchDaemons',
+ '/System/Library/LaunchAgents',
+ '/System/Library/LaunchDaemons'
+ ]
+
+ for path in launchd_paths:
+ try:
+ files = os.listdir(path)
+ except OSError:
+ continue
+
+ filename = '%s.plist' % service_name
+ if filename in files:
+ return os.path.join(path, filename)
+ return None
+
+ def __update(self, module):
+ self.__handle_param_enabled(module)
+ self.__handle_param_force_stop(module)
+
+ def __read_plist_file(self, module):
+ service_plist = {}
+ if self.old_plistlib:
+ return plistlib.readPlist(self.__file)
+
+ # readPlist is deprecated in Python 3 and onwards
+ try:
+ with open(self.__file, 'rb') as plist_fp:
+ service_plist = plistlib.load(plist_fp)
+ except Exception as e:
+ module.fail_json(msg="Failed to read plist file "
+ "%s due to %s" % (self.__file, to_native(e)))
+ return service_plist
+
+ def __write_plist_file(self, module, service_plist=None):
+ if not service_plist:
+ service_plist = {}
+
+ if self.old_plistlib:
+ plistlib.writePlist(service_plist, self.__file)
+ return
+ # writePlist is deprecated in Python 3 and onwards
+ try:
+ with open(self.__file, 'wb') as plist_fp:
+ plistlib.dump(service_plist, plist_fp)
+ except Exception as e:
+ module.fail_json(msg="Failed to write to plist file "
+ " %s due to %s" % (self.__file, to_native(e)))
+
+ def __handle_param_enabled(self, module):
+ if module.params['enabled'] is not None:
+ service_plist = self.__read_plist_file(module)
+
+ # Enable/disable service startup at boot if requested
+ # Launchctl does not expose functionality to set the RunAtLoad
+ # attribute of a job definition. So we parse and modify the job
+ # definition plist file directly for this purpose.
+ if module.params['enabled'] is not None:
+ enabled = service_plist.get('RunAtLoad', False)
+ if module.params['enabled'] != enabled:
+ service_plist['RunAtLoad'] = module.params['enabled']
+
+ # Update the plist with one of the changes done.
+ if not module.check_mode:
+ self.__write_plist_file(module, service_plist)
+ self.__changed = True
+
+ def __handle_param_force_stop(self, module):
+ if module.params['force_stop'] is not None:
+ service_plist = self.__read_plist_file(module)
+
+ # Set KeepAlive to false in case force_stop is defined to avoid
+ # that the service gets restarted when stopping was requested.
+ if module.params['force_stop'] is not None:
+ keep_alive = service_plist.get('KeepAlive', False)
+ if module.params['force_stop'] and keep_alive:
+ service_plist['KeepAlive'] = not module.params['force_stop']
+
+ # Update the plist with one of the changes done.
+ if not module.check_mode:
+ self.__write_plist_file(module, service_plist)
+ self.__changed = True
+
+ def is_changed(self):
+ return self.__changed
+
+ def get_file(self):
+ return self.__file
+
+
+class LaunchCtlTask(object):
+ __metaclass__ = ABCMeta
+ WAITING_TIME = 5 # seconds
+
+ def __init__(self, module, service, plist):
+ self._module = module
+ self._service = service
+ self._plist = plist
+ self._launch = self._module.get_bin_path('launchctl', True)
+
+ def run(self):
+ """Runs a launchd command like 'load', 'unload', 'start', 'stop', etc.
+ and returns the new state and pid.
+ """
+ self.runCommand()
+ return self.get_state()
+
+ @abstractmethod
+ def runCommand(self):
+ pass
+
+ def get_state(self):
+ rc, out, err = self._launchctl("list")
+ if rc != 0:
+ self._module.fail_json(
+ msg='Failed to get status of %s' % (self._launch))
+
+ state = ServiceState.UNLOADED
+ service_pid = "-"
+ status_code = None
+ for line in out.splitlines():
+ if line.strip():
+ pid, last_exit_code, label = line.split('\t')
+ if label.strip() == self._service:
+ service_pid = pid
+ status_code = last_exit_code
+
+ # From launchctl man page:
+ # If the number [...] is negative, it represents the
+ # negative of the signal which killed the job. Thus,
+ # "-15" would indicate that the job was terminated with
+ # SIGTERM.
+ if last_exit_code not in ['0', '-2', '-3', '-9', '-15']:
+ # Something strange happened and we have no clue in
+ # which state the service is now. Therefore we mark
+ # the service state as UNKNOWN.
+ state = ServiceState.UNKNOWN
+ elif pid != '-':
+ # PID seems to be an integer so we assume the service
+ # is started.
+ state = ServiceState.STARTED
+ else:
+ # Exit code is 0 and PID is not available so we assume
+ # the service is stopped.
+ state = ServiceState.STOPPED
+ break
+ return (state, service_pid, status_code, err)
+
+ def start(self):
+ rc, out, err = self._launchctl("start")
+ # Unfortunately launchd does not wait until the process really started.
+ sleep(self.WAITING_TIME)
+ return (rc, out, err)
+
+ def stop(self):
+ rc, out, err = self._launchctl("stop")
+ # Unfortunately launchd does not wait until the process really stopped.
+ sleep(self.WAITING_TIME)
+ return (rc, out, err)
+
+ def restart(self):
+ # TODO: check for rc, out, err
+ self.stop()
+ return self.start()
+
+ def reload(self):
+ # TODO: check for rc, out, err
+ self.unload()
+ return self.load()
+
+ def load(self):
+ return self._launchctl("load")
+
+ def unload(self):
+ return self._launchctl("unload")
+
+ def _launchctl(self, command):
+ service_or_plist = self._plist.get_file() if command in [
+ 'load', 'unload'] else self._service if command in ['start', 'stop'] else ""
+
+ rc, out, err = self._module.run_command(
+ '%s %s %s' % (self._launch, command, service_or_plist))
+
+ if rc != 0:
+ msg = "Unable to %s '%s' (%s): '%s'" % (
+ command, self._service, self._plist.get_file(), err)
+ self._module.fail_json(msg=msg)
+
+ return (rc, out, err)
+
+
+class LaunchCtlStart(LaunchCtlTask):
+ def __init__(self, module, service, plist):
+ super(LaunchCtlStart, self).__init__(module, service, plist)
+
+ def runCommand(self):
+ state, dummy, dummy, dummy = self.get_state()
+
+ if state in (ServiceState.STOPPED, ServiceState.LOADED):
+ self.reload()
+ self.start()
+ elif state == ServiceState.STARTED:
+ # In case the service is already in started state but the
+ # job definition was changed we need to unload/load the
+ # service and start the service again.
+ if self._plist.is_changed():
+ self.reload()
+ self.start()
+ elif state == ServiceState.UNLOADED:
+ self.load()
+ self.start()
+ elif state == ServiceState.UNKNOWN:
+ # We are in an unknown state, let's try to reload the config
+ # and start the service again.
+ self.reload()
+ self.start()
+
+
+class LaunchCtlStop(LaunchCtlTask):
+ def __init__(self, module, service, plist):
+ super(LaunchCtlStop, self).__init__(module, service, plist)
+
+ def runCommand(self):
+ state, dummy, dummy, dummy = self.get_state()
+
+ if state == ServiceState.STOPPED:
+ # In case the service is stopped and we might later decide
+ # to start it, we need to reload the job definition by
+ # forcing an unload and load first.
+ # Afterwards we need to stop it as it might have been
+ # started again (KeepAlive or RunAtLoad).
+ if self._plist.is_changed():
+ self.reload()
+ self.stop()
+ elif state in (ServiceState.STARTED, ServiceState.LOADED):
+ if self._plist.is_changed():
+ self.reload()
+ self.stop()
+ elif state == ServiceState.UNKNOWN:
+ # We are in an unknown state, let's try to reload the config
+ # and stop the service gracefully.
+ self.reload()
+ self.stop()
+
+
+class LaunchCtlReload(LaunchCtlTask):
+ def __init__(self, module, service, plist):
+ super(LaunchCtlReload, self).__init__(module, service, plist)
+
+ def runCommand(self):
+ state, dummy, dummy, dummy = self.get_state()
+
+ if state == ServiceState.UNLOADED:
+ # launchd throws an error if we do an unload on an already
+ # unloaded service.
+ self.load()
+ else:
+ self.reload()
+
+
+class LaunchCtlUnload(LaunchCtlTask):
+ def __init__(self, module, service, plist):
+ super(LaunchCtlUnload, self).__init__(module, service, plist)
+
+ def runCommand(self):
+ state, dummy, dummy, dummy = self.get_state()
+ self.unload()
+
+
+class LaunchCtlRestart(LaunchCtlReload):
+ def __init__(self, module, service, plist):
+ super(LaunchCtlRestart, self).__init__(module, service, plist)
+
+ def runCommand(self):
+ super(LaunchCtlRestart, self).runCommand()
+ self.start()
+
+
+class LaunchCtlList(LaunchCtlTask):
+ def __init__(self, module, service):
+ super(LaunchCtlList, self).__init__(module, service, None)
+
+ def runCommand(self):
+ # Do nothing, the list functionality is done by the
+ # base class run method.
+ pass
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', choices=['reloaded', 'restarted', 'started', 'stopped', 'unloaded']),
+ enabled=dict(type='bool'),
+ force_stop=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ required_one_of=[
+ ['state', 'enabled'],
+ ],
+ )
+
+ service = module.params['name']
+ action = module.params['state']
+ rc = 0
+ out = err = ''
+ result = {
+ 'name': service,
+ 'changed': False,
+ 'status': {},
+ }
+
+ # We will tailor the plist file in case one of the options
+ # (enabled, force_stop) was specified.
+ plist = Plist(module, service)
+ result['changed'] = plist.is_changed()
+
+ # Gather information about the service to be controlled.
+ state, pid, dummy, dummy = LaunchCtlList(module, service).run()
+ result['status']['previous_state'] = ServiceState.to_string(state)
+ result['status']['previous_pid'] = pid
+
+ # Map the actions to specific tasks
+ tasks = {
+ 'started': LaunchCtlStart(module, service, plist),
+ 'stopped': LaunchCtlStop(module, service, plist),
+ 'restarted': LaunchCtlRestart(module, service, plist),
+ 'reloaded': LaunchCtlReload(module, service, plist),
+ 'unloaded': LaunchCtlUnload(module, service, plist)
+ }
+
+ status_code = '0'
+ # Run the requested task
+ if not module.check_mode:
+ state, pid, status_code, err = tasks[action].run()
+
+ result['status']['current_state'] = ServiceState.to_string(state)
+ result['status']['current_pid'] = pid
+ result['status']['status_code'] = status_code
+ result['status']['error'] = err
+
+ if (result['status']['current_state'] != result['status']['previous_state'] or
+ result['status']['current_pid'] != result['status']['previous_pid']):
+ result['changed'] = True
+ if module.check_mode:
+ result['changed'] = True
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/lbu.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/lbu.py
new file mode 100644
index 00000000..6f850791
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/lbu.py
@@ -0,0 +1,127 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2019, Kaarle Ritvanen <kaarle.ritvanen@datakunkku.fi>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: lbu
+
+short_description: Local Backup Utility for Alpine Linux
+
+version_added: '0.2.0'
+
+description:
+- Manage Local Backup Utility of Alpine Linux in run-from-RAM mode
+
+options:
+ commit:
+ description:
+ - Control whether to commit changed files.
+ type: bool
+ exclude:
+ description:
+ - List of paths to exclude.
+ type: list
+ elements: str
+ include:
+ description:
+ - List of paths to include.
+ type: list
+ elements: str
+
+author:
+- Kaarle Ritvanen (@kunkku)
+'''
+
+EXAMPLES = '''
+# Commit changed files (if any)
+- name: Commit
+ community.general.lbu:
+ commit: true
+
+# Exclude path and commit
+- name: Exclude directory
+ community.general.lbu:
+ commit: true
+ exclude:
+ - /etc/opt
+
+# Include paths without committing
+- name: Include file and directory
+ community.general.lbu:
+ include:
+ - /root/.ssh/authorized_keys
+ - /var/lib/misc
+'''
+
+RETURN = '''
+msg:
+ description: Error message
+ type: str
+ returned: on failure
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+import os.path
+
+
+def run_module():
+ module = AnsibleModule(
+ argument_spec={
+ 'commit': {'type': 'bool'},
+ 'exclude': {'type': 'list', 'elements': 'str'},
+ 'include': {'type': 'list', 'elements': 'str'}
+ },
+ supports_check_mode=True
+ )
+
+ changed = False
+
+ def run_lbu(*args):
+ code, stdout, stderr = module.run_command(
+ [module.get_bin_path('lbu', required=True)] + list(args)
+ )
+ if code:
+ module.fail_json(changed=changed, msg=stderr)
+ return stdout
+
+ update = False
+ commit = False
+
+ for param in ('include', 'exclude'):
+ if module.params[param]:
+ paths = run_lbu(param, '-l').split('\n')
+ for path in module.params[param]:
+ if os.path.normpath('/' + path)[1:] not in paths:
+ update = True
+
+ if module.params['commit']:
+ commit = update or run_lbu('status') > ''
+
+ if module.check_mode:
+ module.exit_json(changed=update or commit)
+
+ if update:
+ for param in ('include', 'exclude'):
+ if module.params[param]:
+ run_lbu(param, *module.params[param])
+ changed = True
+
+ if commit:
+ run_lbu('commit')
+ changed = True
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/listen_ports_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/listen_ports_facts.py
new file mode 100644
index 00000000..27ecca8f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/listen_ports_facts.py
@@ -0,0 +1,243 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2017, Nathan Davison <ndavison85@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: listen_ports_facts
+author:
+ - Nathan Davison (@ndavison)
+description:
+ - Gather facts on processes listening on TCP and UDP ports using netstat command.
+ - This module currently supports Linux only.
+requirements:
+ - netstat
+short_description: Gather facts on processes listening on TCP and UDP ports.
+'''
+
+EXAMPLES = r'''
+- name: Gather facts on listening ports
+ community.general.listen_ports_facts:
+
+- name: TCP whitelist violation
+ ansible.builtin.debug:
+ msg: TCP port {{ item.port }} by pid {{ item.pid }} violates the whitelist
+ vars:
+ tcp_listen_violations: "{{ ansible_facts.tcp_listen | selectattr('port', 'in', tcp_whitelist) | list }}"
+ tcp_whitelist:
+ - 22
+ - 25
+ loop: "{{ tcp_listen_violations }}"
+
+- name: List TCP ports
+ ansible.builtin.debug:
+ msg: "{{ ansible_facts.tcp_listen | map(attribute='port') | sort | list }}"
+
+- name: List UDP ports
+ ansible.builtin.debug:
+ msg: "{{ ansible_facts.udp_listen | map(attribute='port') | sort | list }}"
+
+- name: List all ports
+ ansible.builtin.debug:
+ msg: "{{ (ansible_facts.tcp_listen + ansible_facts.udp_listen) | map(attribute='port') | unique | sort | list }}"
+'''
+
+RETURN = r'''
+ansible_facts:
+ description: Dictionary containing details of TCP and UDP ports with listening servers
+ returned: always
+ type: complex
+ contains:
+ tcp_listen:
+ description: A list of processes that are listening on a TCP port.
+ returned: if TCP servers were found
+ type: list
+ contains:
+ address:
+ description: The address the server is listening on.
+ returned: always
+ type: str
+ sample: "0.0.0.0"
+ name:
+ description: The name of the listening process.
+ returned: if user permissions allow
+ type: str
+ sample: "mysqld"
+ pid:
+ description: The pid of the listening process.
+ returned: always
+ type: int
+ sample: 1223
+ port:
+ description: The port the server is listening on.
+ returned: always
+ type: int
+ sample: 3306
+ protocol:
+ description: The network protocol of the server.
+ returned: always
+ type: str
+ sample: "tcp"
+ stime:
+ description: The start time of the listening process.
+ returned: always
+ type: str
+ sample: "Thu Feb 2 13:29:45 2017"
+ user:
+ description: The user who is running the listening process.
+ returned: always
+ type: str
+ sample: "mysql"
+ udp_listen:
+ description: A list of processes that are listening on a UDP port.
+ returned: if UDP servers were found
+ type: list
+ contains:
+ address:
+ description: The address the server is listening on.
+ returned: always
+ type: str
+ sample: "0.0.0.0"
+ name:
+ description: The name of the listening process.
+ returned: if user permissions allow
+ type: str
+ sample: "rsyslogd"
+ pid:
+ description: The pid of the listening process.
+ returned: always
+ type: int
+ sample: 609
+ port:
+ description: The port the server is listening on.
+ returned: always
+ type: int
+ sample: 514
+ protocol:
+ description: The network protocol of the server.
+ returned: always
+ type: str
+ sample: "udp"
+ stime:
+ description: The start time of the listening process.
+ returned: always
+ type: str
+ sample: "Thu Feb 2 13:29:45 2017"
+ user:
+ description: The user who is running the listening process.
+ returned: always
+ type: str
+ sample: "root"
+'''
+
+import re
+import platform
+from ansible.module_utils._text import to_native
+from ansible.module_utils.basic import AnsibleModule
+
+
+def netStatParse(raw):
+ results = list()
+ for line in raw.splitlines():
+ listening_search = re.search('[^ ]+:[0-9]+', line)
+ if listening_search:
+ splitted = line.split()
+ conns = re.search('([^ ]+):([0-9]+)', splitted[3])
+ pidstr = ''
+ if 'tcp' in splitted[0]:
+ protocol = 'tcp'
+ pidstr = splitted[6]
+ elif 'udp' in splitted[0]:
+ protocol = 'udp'
+ pidstr = splitted[5]
+ pids = re.search(r'(([0-9]+)/(.*)|-)', pidstr)
+ if conns and pids:
+ address = conns.group(1)
+ port = conns.group(2)
+ if (pids.group(2)):
+ pid = pids.group(2)
+ else:
+ pid = 0
+ if (pids.group(3)):
+ name = pids.group(3)
+ else:
+ name = ''
+ result = {
+ 'pid': int(pid),
+ 'address': address,
+ 'port': int(port),
+ 'protocol': protocol,
+ 'name': name,
+ }
+ if result not in results:
+ results.append(result)
+ else:
+ raise EnvironmentError('Could not get process information for the listening ports.')
+ return results
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec={},
+ supports_check_mode=True,
+ )
+
+ if platform.system() != 'Linux':
+ module.fail_json(msg='This module requires Linux.')
+
+ def getPidSTime(pid):
+ ps_cmd = module.get_bin_path('ps', True)
+ rc, ps_output, stderr = module.run_command([ps_cmd, '-o', 'lstart', '-p', str(pid)])
+ stime = ''
+ if rc == 0:
+ for line in ps_output.splitlines():
+ if 'started' not in line:
+ stime = line
+ return stime
+
+ def getPidUser(pid):
+ ps_cmd = module.get_bin_path('ps', True)
+ rc, ps_output, stderr = module.run_command([ps_cmd, '-o', 'user', '-p', str(pid)])
+ user = ''
+ if rc == 0:
+ for line in ps_output.splitlines():
+ if line != 'USER':
+ user = line
+ return user
+
+ result = {
+ 'changed': False,
+ 'ansible_facts': {
+ 'tcp_listen': [],
+ 'udp_listen': [],
+ },
+ }
+
+ try:
+ netstat_cmd = module.get_bin_path('netstat', True)
+
+ # which ports are listening for connections?
+ rc, stdout, stderr = module.run_command([netstat_cmd, '-plunt'])
+ if rc == 0:
+ netstatOut = netStatParse(stdout)
+ for p in netstatOut:
+ p['stime'] = getPidSTime(p['pid'])
+ p['user'] = getPidUser(p['pid'])
+ if p['protocol'] == 'tcp':
+ result['ansible_facts']['tcp_listen'].append(p)
+ elif p['protocol'] == 'udp':
+ result['ansible_facts']['udp_listen'].append(p)
+ except (KeyError, EnvironmentError) as e:
+ module.fail_json(msg=to_native(e))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/locale_gen.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/locale_gen.py
new file mode 100644
index 00000000..9a5b84f0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/locale_gen.py
@@ -0,0 +1,234 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: locale_gen
+short_description: Creates or removes locales
+description:
+ - Manages locales by editing /etc/locale.gen and invoking locale-gen.
+author:
+- Augustus Kling (@AugustusKling)
+options:
+ name:
+ type: str
+ description:
+ - Name and encoding of the locale, such as "en_GB.UTF-8".
+ required: true
+ state:
+ type: str
+ description:
+ - Whether the locale shall be present.
+ choices: [ absent, present ]
+ default: present
+'''
+
+EXAMPLES = '''
+- name: Ensure a locale exists
+ community.general.locale_gen:
+ name: de_CH.UTF-8
+ state: present
+'''
+
+import os
+import re
+from subprocess import Popen, PIPE, call
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+LOCALE_NORMALIZATION = {
+ ".utf8": ".UTF-8",
+ ".eucjp": ".EUC-JP",
+ ".iso885915": ".ISO-8859-15",
+ ".cp1251": ".CP1251",
+ ".koi8r": ".KOI8-R",
+ ".armscii8": ".ARMSCII-8",
+ ".euckr": ".EUC-KR",
+ ".gbk": ".GBK",
+ ".gb18030": ".GB18030",
+ ".euctw": ".EUC-TW",
+}
+
+
+# ===========================================
+# location module specific support methods.
+#
+
+def is_available(name, ubuntuMode):
+ """Check if the given locale is available on the system. This is done by
+ checking either :
+ * if the locale is present in /etc/locales.gen
+ * or if the locale is present in /usr/share/i18n/SUPPORTED"""
+ if ubuntuMode:
+ __regexp = r'^(?P<locale>\S+_\S+) (?P<charset>\S+)\s*$'
+ __locales_available = '/usr/share/i18n/SUPPORTED'
+ else:
+ __regexp = r'^#{0,1}\s*(?P<locale>\S+_\S+) (?P<charset>\S+)\s*$'
+ __locales_available = '/etc/locale.gen'
+
+ re_compiled = re.compile(__regexp)
+ fd = open(__locales_available, 'r')
+ for line in fd:
+ result = re_compiled.match(line)
+ if result and result.group('locale') == name:
+ return True
+ fd.close()
+ return False
+
+
+def is_present(name):
+ """Checks if the given locale is currently installed."""
+ output = Popen(["locale", "-a"], stdout=PIPE).communicate()[0]
+ output = to_native(output)
+ return any(fix_case(name) == fix_case(line) for line in output.splitlines())
+
+
+def fix_case(name):
+ """locale -a might return the encoding in either lower or upper case.
+ Passing through this function makes them uniform for comparisons."""
+ for s, r in LOCALE_NORMALIZATION.items():
+ name = name.replace(s, r)
+ return name
+
+
+def replace_line(existing_line, new_line):
+ """Replaces lines in /etc/locale.gen"""
+ try:
+ f = open("/etc/locale.gen", "r")
+ lines = [line.replace(existing_line, new_line) for line in f]
+ finally:
+ f.close()
+ try:
+ f = open("/etc/locale.gen", "w")
+ f.write("".join(lines))
+ finally:
+ f.close()
+
+
+def set_locale(name, enabled=True):
+ """ Sets the state of the locale. Defaults to enabled. """
+ search_string = r'#{0,1}\s*%s (?P<charset>.+)' % name
+ if enabled:
+ new_string = r'%s \g<charset>' % (name)
+ else:
+ new_string = r'# %s \g<charset>' % (name)
+ try:
+ f = open("/etc/locale.gen", "r")
+ lines = [re.sub(search_string, new_string, line) for line in f]
+ finally:
+ f.close()
+ try:
+ f = open("/etc/locale.gen", "w")
+ f.write("".join(lines))
+ finally:
+ f.close()
+
+
+def apply_change(targetState, name):
+ """Create or remove locale.
+
+ Keyword arguments:
+ targetState -- Desired state, either present or absent.
+ name -- Name including encoding such as de_CH.UTF-8.
+ """
+ if targetState == "present":
+ # Create locale.
+ set_locale(name, enabled=True)
+ else:
+ # Delete locale.
+ set_locale(name, enabled=False)
+
+ localeGenExitValue = call("locale-gen")
+ if localeGenExitValue != 0:
+ raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned " + str(localeGenExitValue))
+
+
+def apply_change_ubuntu(targetState, name):
+ """Create or remove locale.
+
+ Keyword arguments:
+ targetState -- Desired state, either present or absent.
+ name -- Name including encoding such as de_CH.UTF-8.
+ """
+ if targetState == "present":
+ # Create locale.
+ # Ubuntu's patched locale-gen automatically adds the new locale to /var/lib/locales/supported.d/local
+ localeGenExitValue = call(["locale-gen", name])
+ else:
+ # Delete locale involves discarding the locale from /var/lib/locales/supported.d/local and regenerating all locales.
+ try:
+ f = open("/var/lib/locales/supported.d/local", "r")
+ content = f.readlines()
+ finally:
+ f.close()
+ try:
+ f = open("/var/lib/locales/supported.d/local", "w")
+ for line in content:
+ locale, charset = line.split(' ')
+ if locale != name:
+ f.write(line)
+ finally:
+ f.close()
+ # Purge locales and regenerate.
+ # Please provide a patch if you know how to avoid regenerating the locales to keep!
+ localeGenExitValue = call(["locale-gen", "--purge"])
+
+ if localeGenExitValue != 0:
+ raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned " + str(localeGenExitValue))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ),
+ supports_check_mode=True,
+ )
+
+ name = module.params['name']
+ state = module.params['state']
+
+ if not os.path.exists("/etc/locale.gen"):
+ if os.path.exists("/var/lib/locales/supported.d/"):
+ # Ubuntu created its own system to manage locales.
+ ubuntuMode = True
+ else:
+ module.fail_json(msg="/etc/locale.gen and /var/lib/locales/supported.d/local are missing. Is the package \"locales\" installed?")
+ else:
+ # We found the common way to manage locales.
+ ubuntuMode = False
+
+ if not is_available(name, ubuntuMode):
+ module.fail_json(msg="The locale you've entered is not available "
+ "on your system.")
+
+ if is_present(name):
+ prev_state = "present"
+ else:
+ prev_state = "absent"
+ changed = (prev_state != state)
+
+ if module.check_mode:
+ module.exit_json(changed=changed)
+ else:
+ if changed:
+ try:
+ if ubuntuMode is False:
+ apply_change(state, name)
+ else:
+ apply_change_ubuntu(state, name)
+ except EnvironmentError as e:
+ module.fail_json(msg=to_native(e), exitValue=e.errno)
+
+ module.exit_json(name=name, changed=changed, msg="OK")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/lvg.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/lvg.py
new file mode 100644
index 00000000..25f261ae
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/lvg.py
@@ -0,0 +1,328 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Alexander Bulimov <lazywolf0@gmail.com>
+# Based on lvol module by Jeroen Hoekx <jeroen.hoekx@dsquare.be>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+- Alexander Bulimov (@abulimov)
+module: lvg
+short_description: Configure LVM volume groups
+description:
+ - This module creates, removes or resizes volume groups.
+options:
+ vg:
+ description:
+ - The name of the volume group.
+ type: str
+ required: true
+ pvs:
+ description:
+ - List of comma-separated devices to use as physical devices in this volume group.
+ - Required when creating or resizing volume group.
+ - The module will take care of running pvcreate if needed.
+ type: list
+ elements: str
+ pesize:
+ description:
+ - "The size of the physical extent. I(pesize) must be a power of 2 of at least 1 sector
+ (where the sector size is the largest sector size of the PVs currently used in the VG),
+ or at least 128KiB."
+ - Since Ansible 2.6, pesize can be optionally suffixed by a UNIT (k/K/m/M/g/G), default unit is megabyte.
+ type: str
+ default: "4"
+ pv_options:
+ description:
+ - Additional options to pass to C(pvcreate) when creating the volume group.
+ type: str
+ pvresize:
+ description:
+ - If C(yes), resize the physical volume to the maximum available size.
+ type: bool
+ default: false
+ version_added: '0.2.0'
+ vg_options:
+ description:
+ - Additional options to pass to C(vgcreate) when creating the volume group.
+ type: str
+ state:
+ description:
+ - Control if the volume group exists.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ force:
+ description:
+ - If C(yes), allows to remove volume group with logical volumes.
+ type: bool
+ default: no
+seealso:
+- module: community.general.filesystem
+- module: community.general.lvol
+- module: community.general.parted
+notes:
+ - This module does not modify PE size for already present volume group.
+'''
+
+EXAMPLES = r'''
+- name: Create a volume group on top of /dev/sda1 with physical extent size = 32MB
+ community.general.lvg:
+ vg: vg.services
+ pvs: /dev/sda1
+ pesize: 32
+
+- name: Create a volume group on top of /dev/sdb with physical extent size = 128KiB
+ community.general.lvg:
+ vg: vg.services
+ pvs: /dev/sdb
+ pesize: 128K
+
+# If, for example, we already have VG vg.services on top of /dev/sdb1,
+# this VG will be extended by /dev/sdc5. Or if vg.services was created on
+# top of /dev/sda5, we first extend it with /dev/sdb1 and /dev/sdc5,
+# and then reduce by /dev/sda5.
+- name: Create or resize a volume group on top of /dev/sdb1 and /dev/sdc5.
+ community.general.lvg:
+ vg: vg.services
+ pvs: /dev/sdb1,/dev/sdc5
+
+- name: Remove a volume group with name vg.services
+ community.general.lvg:
+ vg: vg.services
+ state: absent
+
+- name: Create a volume group on top of /dev/sda3 and resize the volume group /dev/sda3 to the maximum possible
+ community.general.lvg:
+ vg: resizableVG
+ pvs: /dev/sda3
+ pvresize: yes
+'''
+
+import itertools
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def parse_vgs(data):
+ vgs = []
+ for line in data.splitlines():
+ parts = line.strip().split(';')
+ vgs.append({
+ 'name': parts[0],
+ 'pv_count': int(parts[1]),
+ 'lv_count': int(parts[2]),
+ })
+ return vgs
+
+
+def find_mapper_device_name(module, dm_device):
+ dmsetup_cmd = module.get_bin_path('dmsetup', True)
+ mapper_prefix = '/dev/mapper/'
+ rc, dm_name, err = module.run_command("%s info -C --noheadings -o name %s" % (dmsetup_cmd, dm_device))
+ if rc != 0:
+ module.fail_json(msg="Failed executing dmsetup command.", rc=rc, err=err)
+ mapper_device = mapper_prefix + dm_name.rstrip()
+ return mapper_device
+
+
+def parse_pvs(module, data):
+ pvs = []
+ dm_prefix = '/dev/dm-'
+ for line in data.splitlines():
+ parts = line.strip().split(';')
+ if parts[0].startswith(dm_prefix):
+ parts[0] = find_mapper_device_name(module, parts[0])
+ pvs.append({
+ 'name': parts[0],
+ 'vg_name': parts[1],
+ })
+ return pvs
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ vg=dict(type='str', required=True),
+ pvs=dict(type='list', elements='str'),
+ pesize=dict(type='str', default='4'),
+ pv_options=dict(type='str', default=''),
+ pvresize=dict(type='bool', default=False),
+ vg_options=dict(type='str', default=''),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ force=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+
+ vg = module.params['vg']
+ state = module.params['state']
+ force = module.boolean(module.params['force'])
+ pvresize = module.boolean(module.params['pvresize'])
+ pesize = module.params['pesize']
+ pvoptions = module.params['pv_options'].split()
+ vgoptions = module.params['vg_options'].split()
+
+ dev_list = []
+ if module.params['pvs']:
+ dev_list = list(module.params['pvs'])
+ elif state == 'present':
+ module.fail_json(msg="No physical volumes given.")
+
+ # LVM always uses real paths not symlinks so replace symlinks with actual path
+ for idx, dev in enumerate(dev_list):
+ dev_list[idx] = os.path.realpath(dev)
+
+ if state == 'present':
+ # check given devices
+ for test_dev in dev_list:
+ if not os.path.exists(test_dev):
+ module.fail_json(msg="Device %s not found." % test_dev)
+
+ # get pv list
+ pvs_cmd = module.get_bin_path('pvs', True)
+ if dev_list:
+ pvs_filter_pv_name = ' || '.join(
+ 'pv_name = {0}'.format(x)
+ for x in itertools.chain(dev_list, module.params['pvs'])
+ )
+ pvs_filter_vg_name = 'vg_name = {0}'.format(vg)
+ pvs_filter = "--select '{0} || {1}' ".format(pvs_filter_pv_name, pvs_filter_vg_name)
+ else:
+ pvs_filter = ''
+ rc, current_pvs, err = module.run_command("%s --noheadings -o pv_name,vg_name --separator ';' %s" % (pvs_cmd, pvs_filter))
+ if rc != 0:
+ module.fail_json(msg="Failed executing pvs command.", rc=rc, err=err)
+
+ # check pv for devices
+ pvs = parse_pvs(module, current_pvs)
+ used_pvs = [pv for pv in pvs if pv['name'] in dev_list and pv['vg_name'] and pv['vg_name'] != vg]
+ if used_pvs:
+ module.fail_json(msg="Device %s is already in %s volume group." % (used_pvs[0]['name'], used_pvs[0]['vg_name']))
+
+ vgs_cmd = module.get_bin_path('vgs', True)
+ rc, current_vgs, err = module.run_command("%s --noheadings -o vg_name,pv_count,lv_count --separator ';'" % vgs_cmd)
+
+ if rc != 0:
+ module.fail_json(msg="Failed executing vgs command.", rc=rc, err=err)
+
+ changed = False
+
+ vgs = parse_vgs(current_vgs)
+
+ for test_vg in vgs:
+ if test_vg['name'] == vg:
+ this_vg = test_vg
+ break
+ else:
+ this_vg = None
+
+ if this_vg is None:
+ if state == 'present':
+ # create VG
+ if module.check_mode:
+ changed = True
+ else:
+ # create PV
+ pvcreate_cmd = module.get_bin_path('pvcreate', True)
+ for current_dev in dev_list:
+ rc, _, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)])
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err)
+ vgcreate_cmd = module.get_bin_path('vgcreate')
+ rc, _, err = module.run_command([vgcreate_cmd] + vgoptions + ['-s', pesize, vg] + dev_list)
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Creating volume group '%s' failed" % vg, rc=rc, err=err)
+ else:
+ if state == 'absent':
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ if this_vg['lv_count'] == 0 or force:
+ # remove VG
+ vgremove_cmd = module.get_bin_path('vgremove', True)
+ rc, _, err = module.run_command("%s --force %s" % (vgremove_cmd, vg))
+ if rc == 0:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg="Failed to remove volume group %s" % (vg), rc=rc, err=err)
+ else:
+ module.fail_json(msg="Refuse to remove non-empty volume group %s without force=yes" % (vg))
+
+ # resize VG
+ current_devs = [os.path.realpath(pv['name']) for pv in pvs if pv['vg_name'] == vg]
+ devs_to_remove = list(set(current_devs) - set(dev_list))
+ devs_to_add = list(set(dev_list) - set(current_devs))
+
+ if current_devs:
+ if state == 'present' and pvresize:
+ for device in current_devs:
+ pvresize_cmd = module.get_bin_path('pvresize', True)
+ pvdisplay_cmd = module.get_bin_path('pvdisplay', True)
+ pvdisplay_ops = ["--units", "b", "--columns", "--noheadings", "--nosuffix"]
+ pvdisplay_cmd_device_options = [pvdisplay_cmd, device] + pvdisplay_ops
+ rc, dev_size, err = module.run_command(pvdisplay_cmd_device_options + ["-o", "dev_size"])
+ dev_size = int(dev_size.replace(" ", ""))
+ rc, pv_size, err = module.run_command(pvdisplay_cmd_device_options + ["-o", "pv_size"])
+ pv_size = int(pv_size.replace(" ", ""))
+ rc, pe_start, err = module.run_command(pvdisplay_cmd_device_options + ["-o", "pe_start"])
+ pe_start = int(pe_start.replace(" ", ""))
+ rc, vg_extent_size, err = module.run_command(pvdisplay_cmd_device_options + ["-o", "vg_extent_size"])
+ vg_extent_size = int(vg_extent_size.replace(" ", ""))
+ if (dev_size - (pe_start + pv_size)) > vg_extent_size:
+ if module.check_mode:
+ changed = True
+ else:
+ rc, _, err = module.run_command([pvresize_cmd, device])
+ if rc != 0:
+ module.fail_json(msg="Failed executing pvresize command.", rc=rc, err=err)
+ else:
+ changed = True
+
+ if devs_to_add or devs_to_remove:
+ if module.check_mode:
+ changed = True
+ else:
+ if devs_to_add:
+ devs_to_add_string = ' '.join(devs_to_add)
+ # create PV
+ pvcreate_cmd = module.get_bin_path('pvcreate', True)
+ for current_dev in devs_to_add:
+ rc, _, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)])
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err)
+ # add PV to our VG
+ vgextend_cmd = module.get_bin_path('vgextend', True)
+ rc, _, err = module.run_command("%s %s %s" % (vgextend_cmd, vg, devs_to_add_string))
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Unable to extend %s by %s." % (vg, devs_to_add_string), rc=rc, err=err)
+
+ # remove some PV from our VG
+ if devs_to_remove:
+ devs_to_remove_string = ' '.join(devs_to_remove)
+ vgreduce_cmd = module.get_bin_path('vgreduce', True)
+ rc, _, err = module.run_command("%s --force %s %s" % (vgreduce_cmd, vg, devs_to_remove_string))
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Unable to reduce %s by %s." % (vg, devs_to_remove_string), rc=rc, err=err)
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/lvol.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/lvol.py
new file mode 100644
index 00000000..fa50007e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/lvol.py
@@ -0,0 +1,566 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Jeroen Hoekx <jeroen.hoekx@dsquare.be>, Alexander Bulimov <lazywolf0@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+author:
+ - Jeroen Hoekx (@jhoekx)
+ - Alexander Bulimov (@abulimov)
+module: lvol
+short_description: Configure LVM logical volumes
+description:
+ - This module creates, removes or resizes logical volumes.
+options:
+ vg:
+ type: str
+ required: true
+ description:
+ - The volume group this logical volume is part of.
+ lv:
+ type: str
+ description:
+ - The name of the logical volume.
+ size:
+ type: str
+ description:
+ - The size of the logical volume, according to lvcreate(8) --size, by
+ default in megabytes or optionally with one of [bBsSkKmMgGtTpPeE] units; or
+ according to lvcreate(8) --extents as a percentage of [VG|PVS|FREE];
+ Float values must begin with a digit.
+ Resizing using percentage values was not supported prior to 2.1.
+ state:
+ type: str
+ description:
+ - Control if the logical volume exists. If C(present) and the
+ volume does not already exist then the C(size) option is required.
+ choices: [ absent, present ]
+ default: present
+ active:
+ description:
+ - Whether the volume is active and visible to the host.
+ type: bool
+ default: 'yes'
+ force:
+ description:
+ - Shrink or remove operations of volumes requires this switch. Ensures that
+ that filesystems get never corrupted/destroyed by mistake.
+ type: bool
+ default: 'no'
+ opts:
+ type: str
+ description:
+ - Free-form options to be passed to the lvcreate command.
+ snapshot:
+ type: str
+ description:
+ - The name of the snapshot volume
+ pvs:
+ type: str
+ description:
+ - Comma separated list of physical volumes (e.g. /dev/sda,/dev/sdb).
+ thinpool:
+ type: str
+ description:
+ - The thin pool volume name. When you want to create a thin provisioned volume, specify a thin pool volume name.
+ shrink:
+ description:
+ - Shrink if current size is higher than size requested.
+ type: bool
+ default: 'yes'
+ resizefs:
+ description:
+ - Resize the underlying filesystem together with the logical volume.
+ type: bool
+ default: 'no'
+notes:
+ - You must specify lv (when managing the state of logical volumes) or thinpool (when managing a thin provisioned volume).
+'''
+
+EXAMPLES = '''
+- name: Create a logical volume of 512m
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 512
+
+- name: Create a logical volume of 512m with disks /dev/sda and /dev/sdb
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 512
+ pvs: /dev/sda,/dev/sdb
+
+- name: Create cache pool logical volume
+ community.general.lvol:
+ vg: firefly
+ lv: lvcache
+ size: 512m
+ opts: --type cache-pool
+
+- name: Create a logical volume of 512g.
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 512g
+
+- name: Create a logical volume the size of all remaining space in the volume group
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 100%FREE
+
+- name: Create a logical volume with special options
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 512g
+ opts: -r 16
+
+- name: Extend the logical volume to 1024m.
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 1024
+
+- name: Extend the logical volume to consume all remaining space in the volume group
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: +100%FREE
+
+- name: Extend the logical volume to take all remaining space of the PVs and resize the underlying filesystem
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 100%PVS
+ resizefs: true
+
+- name: Resize the logical volume to % of VG
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 80%VG
+ force: yes
+
+- name: Reduce the logical volume to 512m
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 512
+ force: yes
+
+- name: Set the logical volume to 512m and do not try to shrink if size is lower than current one
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 512
+ shrink: no
+
+- name: Remove the logical volume.
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ state: absent
+ force: yes
+
+- name: Create a snapshot volume of the test logical volume.
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ snapshot: snap1
+ size: 100m
+
+- name: Deactivate a logical volume
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ active: false
+
+- name: Create a deactivated logical volume
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 512g
+ active: false
+
+- name: Create a thin pool of 512g
+ community.general.lvol:
+ vg: firefly
+ thinpool: testpool
+ size: 512g
+
+- name: Create a thin volume of 128g
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ thinpool: testpool
+ size: 128g
+'''
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+LVOL_ENV_VARS = dict(
+ # make sure we use the C locale when running lvol-related commands
+ LANG='C',
+ LC_ALL='C',
+ LC_MESSAGES='C',
+ LC_CTYPE='C',
+)
+
+
+def mkversion(major, minor, patch):
+ return (1000 * 1000 * int(major)) + (1000 * int(minor)) + int(patch)
+
+
+def parse_lvs(data):
+ lvs = []
+ for line in data.splitlines():
+ parts = line.strip().split(';')
+ lvs.append({
+ 'name': parts[0].replace('[', '').replace(']', ''),
+ 'size': float(parts[1]),
+ 'active': (parts[2][4] == 'a'),
+ 'thinpool': (parts[2][0] == 't'),
+ 'thinvol': (parts[2][0] == 'V'),
+ })
+ return lvs
+
+
+def parse_vgs(data):
+ vgs = []
+ for line in data.splitlines():
+ parts = line.strip().split(';')
+ vgs.append({
+ 'name': parts[0],
+ 'size': float(parts[1]),
+ 'free': float(parts[2]),
+ 'ext_size': float(parts[3])
+ })
+ return vgs
+
+
+def get_lvm_version(module):
+ ver_cmd = module.get_bin_path("lvm", required=True)
+ rc, out, err = module.run_command("%s version" % (ver_cmd))
+ if rc != 0:
+ return None
+ m = re.search(r"LVM version:\s+(\d+)\.(\d+)\.(\d+).*(\d{4}-\d{2}-\d{2})", out)
+ if not m:
+ return None
+ return mkversion(m.group(1), m.group(2), m.group(3))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ vg=dict(type='str', required=True),
+ lv=dict(type='str'),
+ size=dict(type='str'),
+ opts=dict(type='str'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ force=dict(type='bool', default=False),
+ shrink=dict(type='bool', default=True),
+ active=dict(type='bool', default=True),
+ snapshot=dict(type='str'),
+ pvs=dict(type='str'),
+ resizefs=dict(type='bool', default=False),
+ thinpool=dict(type='str'),
+ ),
+ supports_check_mode=True,
+ required_one_of=(
+ ['lv', 'thinpool'],
+ ),
+ )
+
+ module.run_command_environ_update = LVOL_ENV_VARS
+
+ # Determine if the "--yes" option should be used
+ version_found = get_lvm_version(module)
+ if version_found is None:
+ module.fail_json(msg="Failed to get LVM version number")
+ version_yesopt = mkversion(2, 2, 99) # First LVM with the "--yes" option
+ if version_found >= version_yesopt:
+ yesopt = "--yes"
+ else:
+ yesopt = ""
+
+ vg = module.params['vg']
+ lv = module.params['lv']
+ size = module.params['size']
+ opts = module.params['opts']
+ state = module.params['state']
+ force = module.boolean(module.params['force'])
+ shrink = module.boolean(module.params['shrink'])
+ active = module.boolean(module.params['active'])
+ resizefs = module.boolean(module.params['resizefs'])
+ thinpool = module.params['thinpool']
+ size_opt = 'L'
+ size_unit = 'm'
+ snapshot = module.params['snapshot']
+ pvs = module.params['pvs']
+
+ if pvs is None:
+ pvs = ""
+ else:
+ pvs = pvs.replace(",", " ")
+
+ if opts is None:
+ opts = ""
+
+ # Add --test option when running in check-mode
+ if module.check_mode:
+ test_opt = ' --test'
+ else:
+ test_opt = ''
+
+ if size:
+ # LVCREATE(8) -l --extents option with percentage
+ if '%' in size:
+ size_parts = size.split('%', 1)
+ size_percent = int(size_parts[0])
+ if size_percent > 100:
+ module.fail_json(msg="Size percentage cannot be larger than 100%")
+ size_whole = size_parts[1]
+ if size_whole == 'ORIGIN':
+ module.fail_json(msg="Snapshot Volumes are not supported")
+ elif size_whole not in ['VG', 'PVS', 'FREE']:
+ module.fail_json(msg="Specify extents as a percentage of VG|PVS|FREE")
+ size_opt = 'l'
+ size_unit = ''
+
+ if '%' not in size:
+ # LVCREATE(8) -L --size option unit
+ if size[-1].lower() in 'bskmgtpe':
+ size_unit = size[-1].lower()
+ size = size[0:-1]
+
+ try:
+ float(size)
+ if not size[0].isdigit():
+ raise ValueError()
+ except ValueError:
+ module.fail_json(msg="Bad size specification of '%s'" % size)
+
+ # when no unit, megabytes by default
+ if size_opt == 'l':
+ unit = 'm'
+ else:
+ unit = size_unit
+
+ # Get information on volume group requested
+ vgs_cmd = module.get_bin_path("vgs", required=True)
+ rc, current_vgs, err = module.run_command(
+ "%s --noheadings --nosuffix -o vg_name,size,free,vg_extent_size --units %s --separator ';' %s" % (vgs_cmd, unit, vg))
+
+ if rc != 0:
+ if state == 'absent':
+ module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg)
+ else:
+ module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err)
+
+ vgs = parse_vgs(current_vgs)
+ this_vg = vgs[0]
+
+ # Get information on logical volume requested
+ lvs_cmd = module.get_bin_path("lvs", required=True)
+ rc, current_lvs, err = module.run_command(
+ "%s -a --noheadings --nosuffix -o lv_name,size,lv_attr --units %s --separator ';' %s" % (lvs_cmd, unit, vg))
+
+ if rc != 0:
+ if state == 'absent':
+ module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg)
+ else:
+ module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err)
+
+ changed = False
+
+ lvs = parse_lvs(current_lvs)
+
+ if snapshot:
+ # Check snapshot pre-conditions
+ for test_lv in lvs:
+ if test_lv['name'] == lv or test_lv['name'] == thinpool:
+ if not test_lv['thinpool'] and not thinpool:
+ break
+ else:
+ module.fail_json(msg="Snapshots of thin pool LVs are not supported.")
+ else:
+ module.fail_json(msg="Snapshot origin LV %s does not exist in volume group %s." % (lv, vg))
+ check_lv = snapshot
+
+ elif thinpool:
+ if lv:
+ # Check thin volume pre-conditions
+ for test_lv in lvs:
+ if test_lv['name'] == thinpool:
+ break
+ else:
+ module.fail_json(msg="Thin pool LV %s does not exist in volume group %s." % (thinpool, vg))
+ check_lv = lv
+ else:
+ check_lv = thinpool
+ else:
+ check_lv = lv
+
+ for test_lv in lvs:
+ if test_lv['name'] in (check_lv, check_lv.rsplit('/', 1)[-1]):
+ this_lv = test_lv
+ break
+ else:
+ this_lv = None
+
+ msg = ''
+ if this_lv is None:
+ if state == 'present':
+ # Require size argument except for snapshot of thin volumes
+ if (lv or thinpool) and not size:
+ for test_lv in lvs:
+ if test_lv['name'] == lv and test_lv['thinvol'] and snapshot:
+ break
+ else:
+ module.fail_json(msg="No size given.")
+
+ # create LV
+ lvcreate_cmd = module.get_bin_path("lvcreate", required=True)
+ if snapshot is not None:
+ if size:
+ cmd = "%s %s %s -%s %s%s -s -n %s %s %s/%s" % (lvcreate_cmd, test_opt, yesopt, size_opt, size, size_unit, snapshot, opts, vg, lv)
+ else:
+ cmd = "%s %s %s -s -n %s %s %s/%s" % (lvcreate_cmd, test_opt, yesopt, snapshot, opts, vg, lv)
+ elif thinpool and lv:
+ if size_opt == 'l':
+ module.fail_json(changed=False, msg="Thin volume sizing with percentage not supported.")
+ size_opt = 'V'
+ cmd = "%s %s -n %s -%s %s%s %s -T %s/%s" % (lvcreate_cmd, yesopt, lv, size_opt, size, size_unit, opts, vg, thinpool)
+ elif thinpool and not lv:
+ cmd = "%s %s -%s %s%s %s -T %s/%s" % (lvcreate_cmd, yesopt, size_opt, size, size_unit, opts, vg, thinpool)
+ else:
+ cmd = "%s %s %s -n %s -%s %s%s %s %s %s" % (lvcreate_cmd, test_opt, yesopt, lv, size_opt, size, size_unit, opts, vg, pvs)
+ rc, _, err = module.run_command(cmd)
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Creating logical volume '%s' failed" % lv, rc=rc, err=err)
+ else:
+ if state == 'absent':
+ # remove LV
+ if not force:
+ module.fail_json(msg="Sorry, no removal of logical volume %s without force=yes." % (this_lv['name']))
+ lvremove_cmd = module.get_bin_path("lvremove", required=True)
+ rc, _, err = module.run_command("%s %s --force %s/%s" % (lvremove_cmd, test_opt, vg, this_lv['name']))
+ if rc == 0:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg="Failed to remove logical volume %s" % (lv), rc=rc, err=err)
+
+ elif not size:
+ pass
+
+ elif size_opt == 'l':
+ # Resize LV based on % value
+ tool = None
+ size_free = this_vg['free']
+ if size_whole == 'VG' or size_whole == 'PVS':
+ size_requested = size_percent * this_vg['size'] / 100
+ else: # size_whole == 'FREE':
+ size_requested = size_percent * this_vg['free'] / 100
+
+ # Round down to the next lowest whole physical extent
+ size_requested -= (size_requested % this_vg['ext_size'])
+
+ if '+' in size:
+ size_requested += this_lv['size']
+ if this_lv['size'] < size_requested:
+ if (size_free > 0) and (('+' not in size) or (size_free >= (size_requested - this_lv['size']))):
+ tool = module.get_bin_path("lvextend", required=True)
+ else:
+ module.fail_json(
+ msg="Logical Volume %s could not be extended. Not enough free space left (%s%s required / %s%s available)" %
+ (this_lv['name'], (size_requested - this_lv['size']), unit, size_free, unit)
+ )
+ elif shrink and this_lv['size'] > size_requested + this_vg['ext_size']: # more than an extent too large
+ if size_requested == 0:
+ module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name']))
+ elif not force:
+ module.fail_json(msg="Sorry, no shrinking of %s without force=yes" % (this_lv['name']))
+ else:
+ tool = module.get_bin_path("lvreduce", required=True)
+ tool = '%s %s' % (tool, '--force')
+
+ if tool:
+ if resizefs:
+ tool = '%s %s' % (tool, '--resizefs')
+ cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs)
+ rc, out, err = module.run_command(cmd)
+ if "Reached maximum COW size" in out:
+ module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out)
+ elif rc == 0:
+ changed = True
+ msg = "Volume %s resized to %s%s" % (this_lv['name'], size_requested, unit)
+ elif "matches existing size" in err:
+ module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'])
+ elif "not larger than existing size" in err:
+ module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err)
+ else:
+ module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err)
+
+ else:
+ # resize LV based on absolute values
+ tool = None
+ if float(size) > this_lv['size']:
+ tool = module.get_bin_path("lvextend", required=True)
+ elif shrink and float(size) < this_lv['size']:
+ if float(size) == 0:
+ module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name']))
+ if not force:
+ module.fail_json(msg="Sorry, no shrinking of %s without force=yes." % (this_lv['name']))
+ else:
+ tool = module.get_bin_path("lvreduce", required=True)
+ tool = '%s %s' % (tool, '--force')
+
+ if tool:
+ if resizefs:
+ tool = '%s %s' % (tool, '--resizefs')
+ cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs)
+ rc, out, err = module.run_command(cmd)
+ if "Reached maximum COW size" in out:
+ module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out)
+ elif rc == 0:
+ changed = True
+ elif "matches existing size" in err:
+ module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'])
+ elif "not larger than existing size" in err:
+ module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err)
+ else:
+ module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err)
+
+ if this_lv is not None:
+ if active:
+ lvchange_cmd = module.get_bin_path("lvchange", required=True)
+ rc, _, err = module.run_command("%s -ay %s/%s" % (lvchange_cmd, vg, this_lv['name']))
+ if rc == 0:
+ module.exit_json(changed=((not this_lv['active']) or changed), vg=vg, lv=this_lv['name'], size=this_lv['size'])
+ else:
+ module.fail_json(msg="Failed to activate logical volume %s" % (lv), rc=rc, err=err)
+ else:
+ lvchange_cmd = module.get_bin_path("lvchange", required=True)
+ rc, _, err = module.run_command("%s -an %s/%s" % (lvchange_cmd, vg, this_lv['name']))
+ if rc == 0:
+ module.exit_json(changed=(this_lv['active'] or changed), vg=vg, lv=this_lv['name'], size=this_lv['size'])
+ else:
+ module.fail_json(msg="Failed to deactivate logical volume %s" % (lv), rc=rc, err=err)
+
+ module.exit_json(changed=changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/make.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/make.py
new file mode 100644
index 00000000..7314af28
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/make.py
@@ -0,0 +1,173 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Linus Unnebäck <linus@folkdatorn.se>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: make
+short_description: Run targets in a Makefile
+requirements:
+- make
+author: Linus Unnebäck (@LinusU) <linus@folkdatorn.se>
+description:
+ - Run targets in a Makefile.
+options:
+ target:
+ description:
+ - The target to run.
+ - Typically this would be something like C(install),C(test) or C(all)."
+ type: str
+ params:
+ description:
+ - Any extra parameters to pass to make.
+ type: dict
+ chdir:
+ description:
+ - Change to this directory before running make.
+ type: path
+ required: true
+ file:
+ description:
+ - Use a custom Makefile.
+ type: path
+ make:
+ description:
+ - Use a specific make binary.
+ type: path
+ version_added: '0.2.0'
+'''
+
+EXAMPLES = r'''
+- name: Build the default target
+ community.general.make:
+ chdir: /home/ubuntu/cool-project
+
+- name: Run 'install' target as root
+ community.general.make:
+ chdir: /home/ubuntu/cool-project
+ target: install
+ become: yes
+
+- name: Build 'all' target with extra arguments
+ community.general.make:
+ chdir: /home/ubuntu/cool-project
+ target: all
+ params:
+ NUM_THREADS: 4
+ BACKEND: lapack
+
+- name: Build 'all' target with a custom Makefile
+ community.general.make:
+ chdir: /home/ubuntu/cool-project
+ target: all
+ file: /some-project/Makefile
+'''
+
+RETURN = r'''# '''
+
+from ansible.module_utils.six import iteritems
+from ansible.module_utils.basic import AnsibleModule
+
+
+def run_command(command, module, check_rc=True):
+ """
+ Run a command using the module, return
+ the result code and std{err,out} content.
+
+ :param command: list of command arguments
+ :param module: Ansible make module instance
+ :return: return code, stdout content, stderr content
+ """
+ rc, out, err = module.run_command(command, check_rc=check_rc, cwd=module.params['chdir'])
+ return rc, sanitize_output(out), sanitize_output(err)
+
+
+def sanitize_output(output):
+ """
+ Sanitize the output string before we
+ pass it to module.fail_json. Defaults
+ the string to empty if it is None, else
+ strips trailing newlines.
+
+ :param output: output to sanitize
+ :return: sanitized output
+ """
+ if output is None:
+ return ''
+ else:
+ return output.rstrip("\r\n")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ target=dict(type='str'),
+ params=dict(type='dict'),
+ chdir=dict(type='path', required=True),
+ file=dict(type='path'),
+ make=dict(type='path'),
+ ),
+ supports_check_mode=True,
+ )
+
+ make_path = module.params['make']
+ if make_path is None:
+ # Build up the invocation of `make` we are going to use
+ # For non-Linux OSes, prefer gmake (GNU make) over make
+ make_path = module.get_bin_path('gmake', required=False)
+ if not make_path:
+ # Fall back to system make
+ make_path = module.get_bin_path('make', required=True)
+ make_target = module.params['target']
+ if module.params['params'] is not None:
+ make_parameters = [k + '=' + str(v) for k, v in iteritems(module.params['params'])]
+ else:
+ make_parameters = []
+
+ if module.params['file'] is not None:
+ base_command = [make_path, "-f", module.params['file'], make_target]
+ else:
+ base_command = [make_path, make_target]
+ base_command.extend(make_parameters)
+
+ # Check if the target is already up to date
+ rc, out, err = run_command(base_command + ['-q'], module, check_rc=False)
+ if module.check_mode:
+ # If we've been asked to do a dry run, we only need
+ # to report whether or not the target is up to date
+ changed = (rc != 0)
+ else:
+ if rc == 0:
+ # The target is up to date, so we don't have to
+ # do anything
+ changed = False
+ else:
+ # The target isn't up to date, so we need to run it
+ rc, out, err = run_command(base_command, module,
+ check_rc=True)
+ changed = True
+
+ # We don't report the return code, as if this module failed
+ # we would be calling fail_json from run_command, so even if
+ # we had a non-zero return code, we did not fail. However, if
+ # we report a non-zero return code here, we will be marked as
+ # failed regardless of what we signal using the failed= kwarg.
+ module.exit_json(
+ changed=changed,
+ failed=False,
+ stdout=out,
+ stderr=err,
+ target=module.params['target'],
+ params=module.params['params'],
+ chdir=module.params['chdir'],
+ file=module.params['file']
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/mksysb.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/mksysb.py
new file mode 100644
index 00000000..1be917dd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/mksysb.py
@@ -0,0 +1,202 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Kairo Araujo <kairo@kairo.eti.br>
+# GNU General Public License v3.0+ (see COPYING or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+author: Kairo Araujo (@kairoaraujo)
+module: mksysb
+short_description: Generates AIX mksysb rootvg backups.
+description:
+ - This module manages a basic AIX mksysb (image) of rootvg.
+options:
+ backup_crypt_files:
+ description:
+ - Backup encrypted files.
+ type: bool
+ default: "yes"
+ backup_dmapi_fs:
+ description:
+ - Back up DMAPI filesystem files.
+ type: bool
+ default: "yes"
+ create_map_files:
+ description:
+ - Creates a new MAP files.
+ type: bool
+ default: "no"
+ exclude_files:
+ description:
+ - Excludes files using C(/etc/rootvg.exclude).
+ type: bool
+ default: "no"
+ exclude_wpar_files:
+ description:
+ - Excludes WPAR files.
+ type: bool
+ default: "no"
+ extended_attrs:
+ description:
+ - Backup extended attributes.
+ type: bool
+ default: "yes"
+ name:
+ type: str
+ description:
+ - Backup name
+ required: true
+ new_image_data:
+ description:
+ - Creates a new file data.
+ type: bool
+ default: "yes"
+ software_packing:
+ description:
+ - Exclude files from packing option listed in
+ C(/etc/exclude_packing.rootvg).
+ type: bool
+ default: "no"
+ storage_path:
+ type: str
+ description:
+ - Storage path where the mksysb will stored.
+ required: true
+ use_snapshot:
+ description:
+ - Creates backup using snapshots.
+ type: bool
+ default: "no"
+'''
+
+EXAMPLES = '''
+- name: Running a backup image mksysb
+ community.general.mksysb:
+ name: myserver
+ storage_path: /repository/images
+ exclude_files: yes
+ exclude_wpar_files: yes
+'''
+
+RETURN = '''
+changed:
+ description: Return changed for mksysb actions as true or false.
+ returned: always
+ type: bool
+msg:
+ description: Return message regarding the action.
+ returned: always
+ type: str
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+import os
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ backup_crypt_files=dict(type='bool', default=True),
+ backup_dmapi_fs=dict(type='bool', default=True),
+ create_map_files=dict(type='bool', default=False),
+ exclude_files=dict(type='bool', default=False),
+ exclude_wpar_files=dict(type='bool', default=False),
+ extended_attrs=dict(type='bool', default=True),
+ name=dict(type='str', required=True),
+ new_image_data=dict(type='bool', default=True),
+ software_packing=dict(type='bool', default=False),
+ storage_path=dict(type='str', required=True),
+ use_snapshot=dict(type='bool', default=False)
+ ),
+ supports_check_mode=True,
+ )
+
+ # Command options.
+ map_file_opt = {
+ True: '-m',
+ False: ''
+ }
+
+ use_snapshot_opt = {
+ True: '-T',
+ False: ''
+ }
+
+ exclude_files_opt = {
+ True: '-e',
+ False: ''
+ }
+
+ exclude_wpar_opt = {
+ True: '-G',
+ False: ''
+ }
+
+ new_image_data_opt = {
+ True: '-i',
+ False: ''
+ }
+
+ soft_packing_opt = {
+ True: '',
+ False: '-p'
+ }
+
+ extend_attr_opt = {
+ True: '',
+ False: '-a'
+ }
+
+ crypt_files_opt = {
+ True: '',
+ False: '-Z'
+ }
+
+ dmapi_fs_opt = {
+ True: '-a',
+ False: ''
+ }
+
+ backup_crypt_files = crypt_files_opt[module.params['backup_crypt_files']]
+ backup_dmapi_fs = dmapi_fs_opt[module.params['backup_dmapi_fs']]
+ create_map_files = map_file_opt[module.params['create_map_files']]
+ exclude_files = exclude_files_opt[module.params['exclude_files']]
+ exclude_wpar_files = exclude_wpar_opt[module.params['exclude_wpar_files']]
+ extended_attrs = extend_attr_opt[module.params['extended_attrs']]
+ name = module.params['name']
+ new_image_data = new_image_data_opt[module.params['new_image_data']]
+ software_packing = soft_packing_opt[module.params['software_packing']]
+ storage_path = module.params['storage_path']
+ use_snapshot = use_snapshot_opt[module.params['use_snapshot']]
+
+ # Validate if storage_path is a valid directory.
+ if os.path.isdir(storage_path):
+ if not module.check_mode:
+ # Generates the mksysb image backup.
+ mksysb_cmd = module.get_bin_path('mksysb', True)
+ rc, mksysb_output, err = module.run_command(
+ "%s -X %s %s %s %s %s %s %s %s %s %s/%s" % (
+ mksysb_cmd, create_map_files, use_snapshot, exclude_files,
+ exclude_wpar_files, software_packing, extended_attrs,
+ backup_crypt_files, backup_dmapi_fs, new_image_data,
+ storage_path, name))
+ if rc == 0:
+ module.exit_json(changed=True, msg=mksysb_output)
+ else:
+ module.fail_json(msg="mksysb failed.", rc=rc, err=err)
+
+ module.exit_json(changed=True)
+
+ else:
+ module.fail_json(msg="Storage path %s is not valid." % storage_path)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/modprobe.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/modprobe.py
new file mode 100644
index 00000000..0ab75235
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/modprobe.py
@@ -0,0 +1,127 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, David Stygstra <david.stygstra@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: modprobe
+short_description: Load or unload kernel modules
+author:
+ - David Stygstra (@stygstra)
+ - Julien Dauphant (@jdauphant)
+ - Matt Jeffery (@mattjeffery)
+description:
+ - Load or unload kernel modules.
+options:
+ name:
+ type: str
+ required: true
+ description:
+ - Name of kernel module to manage.
+ state:
+ type: str
+ description:
+ - Whether the module should be present or absent.
+ choices: [ absent, present ]
+ default: present
+ params:
+ type: str
+ description:
+ - Modules parameters.
+ default: ''
+'''
+
+EXAMPLES = '''
+- name: Add the 802.1q module
+ community.general.modprobe:
+ name: 8021q
+ state: present
+
+- name: Add the dummy module
+ community.general.modprobe:
+ name: dummy
+ state: present
+ params: 'numdummies=2'
+'''
+
+import os.path
+import shlex
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ params=dict(type='str', default=''),
+ ),
+ supports_check_mode=True,
+ )
+
+ name = module.params['name']
+ params = module.params['params']
+ state = module.params['state']
+
+ # FIXME: Adding all parameters as result values is useless
+ result = dict(
+ changed=False,
+ name=name,
+ params=params,
+ state=state,
+ )
+
+ # Check if module is present
+ try:
+ present = False
+ with open('/proc/modules') as modules:
+ module_name = name.replace('-', '_') + ' '
+ for line in modules:
+ if line.startswith(module_name):
+ present = True
+ break
+ if not present:
+ command = [module.get_bin_path('uname', True), '-r']
+ rc, uname_kernel_release, err = module.run_command(command)
+ module_file = '/' + name + '.ko'
+ builtin_path = os.path.join('/lib/modules/', uname_kernel_release.strip(),
+ 'modules.builtin')
+ with open(builtin_path) as builtins:
+ for line in builtins:
+ if line.endswith(module_file):
+ present = True
+ break
+ except IOError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc(), **result)
+
+ # Add/remove module as needed
+ if state == 'present':
+ if not present:
+ if not module.check_mode:
+ command = [module.get_bin_path('modprobe', True), name]
+ command.extend(shlex.split(params))
+ rc, out, err = module.run_command(command)
+ if rc != 0:
+ module.fail_json(msg=err, rc=rc, stdout=out, stderr=err, **result)
+ result['changed'] = True
+ elif state == 'absent':
+ if present:
+ if not module.check_mode:
+ rc, out, err = module.run_command([module.get_bin_path('modprobe', True), '-r', name])
+ if rc != 0:
+ module.fail_json(msg=err, rc=rc, stdout=out, stderr=err, **result)
+ result['changed'] = True
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/nosh.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/nosh.py
new file mode 100644
index 00000000..0f7de471
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/nosh.py
@@ -0,0 +1,537 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Thomas Caravia <taca@kadisius.eu>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: nosh
+author:
+ - "Thomas Caravia (@tacatac)"
+short_description: Manage services with nosh
+description:
+ - Control running and enabled state for system-wide or user services.
+ - BSD and Linux systems are supported.
+options:
+ name:
+ type: str
+ required: true
+ description:
+ - Name of the service to manage.
+ state:
+ type: str
+ required: false
+ choices: [ started, stopped, reset, restarted, reloaded ]
+ description:
+ - C(started)/C(stopped) are idempotent actions that will not run
+ commands unless necessary.
+ C(restarted) will always bounce the service.
+ C(reloaded) will send a SIGHUP or start the service.
+ C(reset) will start or stop the service according to whether it is
+ enabled or not.
+ enabled:
+ required: false
+ type: bool
+ description:
+ - Enable or disable the service, independently of C(*.preset) file
+ preference or running state. Mutually exclusive with I(preset). Will take
+ effect prior to I(state=reset).
+ preset:
+ required: false
+ type: bool
+ description:
+ - Enable or disable the service according to local preferences in *.preset files.
+ Mutually exclusive with I(enabled). Only has an effect if set to true. Will take
+ effect prior to I(state=reset).
+ user:
+ required: false
+ default: 'no'
+ type: bool
+ description:
+ - Run system-control talking to the calling user's service manager, rather than
+ the system-wide service manager.
+requirements:
+ - A system with an active nosh service manager, see Notes for further information.
+notes:
+ - Information on the nosh utilities suite may be found at U(https://jdebp.eu/Softwares/nosh/).
+'''
+
+EXAMPLES = '''
+- name: Start dnscache if not running
+ community.general.nosh: name=dnscache state=started
+
+- name: Stop mpd, if running
+ community.general.nosh: name=mpd state=stopped
+
+- name: Restart unbound or start it if not already running
+ community.general.nosh:
+ name: unbound
+ state: restarted
+
+- name: Reload fail2ban or start it if not already running
+ community.general.nosh:
+ name: fail2ban
+ state: reloaded
+
+- name: Disable nsd
+ community.general.nosh: name=nsd enabled=no
+
+- name: For package installers, set nginx running state according to local enable settings, preset and reset
+ community.general.nosh: name=nginx preset=True state=reset
+
+- name: Reboot the host if nosh is the system manager, would need a "wait_for*" task at least, not recommended as-is
+ community.general.nosh: name=reboot state=started
+
+- name: Using conditionals with the module facts
+ tasks:
+ - name: Obtain information on tinydns service
+ community.general.nosh: name=tinydns
+ register: result
+
+ - name: Fail if service not loaded
+ ansible.builtin.fail: msg="The {{ result.name }} service is not loaded"
+ when: not result.status
+
+ - name: Fail if service is running
+ ansible.builtin.fail: msg="The {{ result.name }} service is running"
+ when: result.status and result.status['DaemontoolsEncoreState'] == "running"
+'''
+
+RETURN = '''
+name:
+ description: name used to find the service
+ returned: success
+ type: str
+ sample: "sshd"
+service_path:
+ description: resolved path for the service
+ returned: success
+ type: str
+ sample: "/var/sv/sshd"
+enabled:
+ description: whether the service is enabled at system bootstrap
+ returned: success
+ type: bool
+ sample: True
+preset:
+ description: whether the enabled status reflects the one set in the relevant C(*.preset) file
+ returned: success
+ type: bool
+ sample: 'False'
+state:
+ description: service process run state, C(None) if the service is not loaded and will not be started
+ returned: if state option is used
+ type: str
+ sample: "reloaded"
+status:
+ description: a dictionary with the key=value pairs returned by `system-control show-json` or C(None) if the service is not loaded
+ returned: success
+ type: complex
+ contains:
+ After:
+ description: [] # FIXME
+ returned: success
+ type: list
+ sample: ["/etc/service-bundles/targets/basic","../sshdgenkeys", "log"]
+ Before:
+ description: [] # FIXME
+ returned: success
+ type: list
+ sample: ["/etc/service-bundles/targets/shutdown"]
+ Conflicts:
+ description: [] # FIXME
+ returned: success
+ type: list
+ sample: '[]'
+ DaemontoolsEncoreState:
+ description: [] # FIXME
+ returned: success
+ type: str
+ sample: "running"
+ DaemontoolsState:
+ description: [] # FIXME
+ returned: success
+ type: str
+ sample: "up"
+ Enabled:
+ description: [] # FIXME
+ returned: success
+ type: bool
+ sample: True
+ LogService:
+ description: [] # FIXME
+ returned: success
+ type: str
+ sample: "../cyclog@sshd"
+ MainPID:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 661
+ Paused:
+ description: [] # FIXME
+ returned: success
+ type: bool
+ sample: 'False'
+ ReadyAfterRun:
+ description: [] # FIXME
+ returned: success
+ type: bool
+ sample: 'False'
+ RemainAfterExit:
+ description: [] # FIXME
+ returned: success
+ type: bool
+ sample: 'False'
+ Required-By:
+ description: [] # FIXME
+ returned: success
+ type: list
+ sample: '[]'
+ RestartExitStatusCode:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: '0'
+ RestartExitStatusNumber:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: '0'
+ RestartTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 4611686019935648081
+ RestartUTCTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 1508260140
+ RunExitStatusCode:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: '0'
+ RunExitStatusNumber:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: '0'
+ RunTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 4611686019935648081
+ RunUTCTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 1508260140
+ StartExitStatusCode:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 1
+ StartExitStatusNumber:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: '0'
+ StartTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 4611686019935648081
+ StartUTCTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 1508260140
+ StopExitStatusCode:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: '0'
+ StopExitStatusNumber:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: '0'
+ StopTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 4611686019935648081
+ StopUTCTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 1508260140
+ Stopped-By:
+ description: [] # FIXME
+ returned: success
+ type: list
+ sample: ["/etc/service-bundles/targets/shutdown"]
+ Timestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 4611686019935648081
+ UTCTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 1508260140
+ Want:
+ description: [] # FIXME
+ returned: success
+ type: str
+ sample: "nothing"
+ Wanted-By:
+ description: [] # FIXME
+ returned: success
+ type: list
+ sample: ["/etc/service-bundles/targets/server","/etc/service-bundles/targets/sockets"]
+ Wants:
+ description: [] # FIXME
+ returned: success
+ type: list
+ sample: ["/etc/service-bundles/targets/basic","../sshdgenkeys"]
+user:
+ description: whether the user-level service manager is called
+ returned: success
+ type: bool
+ sample: False
+'''
+
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.service import fail_if_missing
+from ansible.module_utils._text import to_native
+
+
+def run_sys_ctl(module, args):
+ sys_ctl = [module.get_bin_path('system-control', required=True)]
+ if module.params['user']:
+ sys_ctl = sys_ctl + ['--user']
+ return module.run_command(sys_ctl + args)
+
+
+def get_service_path(module, service):
+ (rc, out, err) = run_sys_ctl(module, ['find', service])
+ # fail if service not found
+ if rc != 0:
+ fail_if_missing(module, False, service, msg='host')
+ else:
+ return to_native(out).strip()
+
+
+def service_is_enabled(module, service_path):
+ (rc, out, err) = run_sys_ctl(module, ['is-enabled', service_path])
+ return rc == 0
+
+
+def service_is_preset_enabled(module, service_path):
+ (rc, out, err) = run_sys_ctl(module, ['preset', '--dry-run', service_path])
+ return to_native(out).strip().startswith("enable")
+
+
+def service_is_loaded(module, service_path):
+ (rc, out, err) = run_sys_ctl(module, ['is-loaded', service_path])
+ return rc == 0
+
+
+def get_service_status(module, service_path):
+ (rc, out, err) = run_sys_ctl(module, ['show-json', service_path])
+ # will fail if not service is not loaded
+ if err is not None and err:
+ module.fail_json(msg=err)
+ else:
+ json_out = json.loads(to_native(out).strip())
+ status = json_out[service_path] # descend past service path header
+ return status
+
+
+def service_is_running(service_status):
+ return service_status['DaemontoolsEncoreState'] in set(['starting', 'started', 'running'])
+
+
+def handle_enabled(module, result, service_path):
+ """Enable or disable a service as needed.
+
+ - 'preset' will set the enabled state according to available preset file settings.
+ - 'enabled' will set the enabled state explicitly, independently of preset settings.
+
+ These options are set to "mutually exclusive" but the explicit 'enabled' option will
+ have priority if the check is bypassed.
+ """
+
+ # computed prior in control flow
+ preset = result['preset']
+ enabled = result['enabled']
+
+ # preset, effect only if option set to true (no reverse preset)
+ if module.params['preset']:
+ action = 'preset'
+
+ # run preset if needed
+ if preset != module.params['preset']:
+ result['changed'] = True
+ if not module.check_mode:
+ (rc, out, err) = run_sys_ctl(module, [action, service_path])
+ if rc != 0:
+ module.fail_json(msg="Unable to %s service %s: %s" % (action, service_path, out + err))
+ result['preset'] = not preset
+ result['enabled'] = not enabled
+
+ # enabled/disabled state
+ if module.params['enabled'] is not None:
+ if module.params['enabled']:
+ action = 'enable'
+ else:
+ action = 'disable'
+
+ # change enable/disable if needed
+ if enabled != module.params['enabled']:
+ result['changed'] = True
+ if not module.check_mode:
+ (rc, out, err) = run_sys_ctl(module, [action, service_path])
+ if rc != 0:
+ module.fail_json(msg="Unable to %s service %s: %s" % (action, service_path, out + err))
+ result['enabled'] = not enabled
+ result['preset'] = not preset
+
+
+def handle_state(module, result, service_path):
+ """Set service running state as needed.
+
+ Takes into account the fact that a service may not be loaded (no supervise directory) in
+ which case it is 'stopped' as far as the service manager is concerned. No status information
+ can be obtained and the service can only be 'started'.
+ """
+ # default to desired state, no action
+ result['state'] = module.params['state']
+ state = module.params['state']
+ action = None
+
+ # computed prior in control flow, possibly modified by handle_enabled()
+ enabled = result['enabled']
+
+ # service not loaded -> not started by manager, no status information
+ if not service_is_loaded(module, service_path):
+ if state in ['started', 'restarted', 'reloaded']:
+ action = 'start'
+ result['state'] = 'started'
+ elif state == 'reset':
+ if enabled:
+ action = 'start'
+ result['state'] = 'started'
+ else:
+ result['state'] = None
+ else:
+ result['state'] = None
+
+ # service is loaded
+ else:
+ # get status information
+ result['status'] = get_service_status(module, service_path)
+ running = service_is_running(result['status'])
+
+ if state == 'started':
+ if not running:
+ action = 'start'
+ elif state == 'stopped':
+ if running:
+ action = 'stop'
+ # reset = start/stop according to enabled status
+ elif state == 'reset':
+ if enabled is not running:
+ if running:
+ action = 'stop'
+ result['state'] = 'stopped'
+ else:
+ action = 'start'
+ result['state'] = 'started'
+ # start if not running, 'service' module constraint
+ elif state == 'restarted':
+ if not running:
+ action = 'start'
+ result['state'] = 'started'
+ else:
+ action = 'condrestart'
+ # start if not running, 'service' module constraint
+ elif state == 'reloaded':
+ if not running:
+ action = 'start'
+ result['state'] = 'started'
+ else:
+ action = 'hangup'
+
+ # change state as needed
+ if action:
+ result['changed'] = True
+ if not module.check_mode:
+ (rc, out, err) = run_sys_ctl(module, [action, service_path])
+ if rc != 0:
+ module.fail_json(msg="Unable to %s service %s: %s" % (action, service_path, err))
+
+# ===========================================
+# Main control flow
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', choices=['started', 'stopped', 'reset', 'restarted', 'reloaded']),
+ enabled=dict(type='bool'),
+ preset=dict(type='bool'),
+ user=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[['enabled', 'preset']],
+ )
+
+ service = module.params['name']
+ rc = 0
+ out = err = ''
+ result = {
+ 'name': service,
+ 'changed': False,
+ 'status': None,
+ }
+
+ # check service can be found (or fail) and get path
+ service_path = get_service_path(module, service)
+
+ # get preliminary service facts
+ result['service_path'] = service_path
+ result['user'] = module.params['user']
+ result['enabled'] = service_is_enabled(module, service_path)
+ result['preset'] = result['enabled'] is service_is_preset_enabled(module, service_path)
+
+ # set enabled state, service need not be loaded
+ if module.params['enabled'] is not None or module.params['preset']:
+ handle_enabled(module, result, service_path)
+
+ # set service running state
+ if module.params['state'] is not None:
+ handle_state(module, result, service_path)
+
+ # get final service status if possible
+ if service_is_loaded(module, service_path):
+ result['status'] = get_service_status(module, service_path)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/ohai.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/ohai.py
new file mode 100644
index 00000000..64092fd1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/ohai.py
@@ -0,0 +1,47 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ohai
+short_description: Returns inventory data from I(Ohai)
+description:
+ - Similar to the M(community.general.facter) module, this runs the I(Ohai) discovery program
+ (U(https://docs.chef.io/ohai.html)) on the remote host and
+ returns JSON inventory data.
+ I(Ohai) data is a bit more verbose and nested than I(facter).
+options: {}
+notes: []
+requirements: [ "ohai" ]
+author:
+ - "Ansible Core Team"
+ - "Michael DeHaan (@mpdehaan)"
+'''
+
+EXAMPLES = '''
+# Retrieve (ohai) data from all Web servers and store in one-file per host
+ansible webservers -m ohai --tree=/tmp/ohaidata
+'''
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict()
+ )
+ cmd = ["/usr/bin/env", "ohai"]
+ rc, out, err = module.run_command(cmd, check_rc=True)
+ module.exit_json(**json.loads(out))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/open_iscsi.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/open_iscsi.py
new file mode 100644
index 00000000..222bb82f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/open_iscsi.py
@@ -0,0 +1,375 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Serge van Ginderachter <serge@vanginderachter.be>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: open_iscsi
+author:
+- Serge van Ginderachter (@srvg)
+short_description: Manage iSCSI targets with Open-iSCSI
+description:
+ - Discover targets on given portal, (dis)connect targets, mark targets to
+ manually or auto start, return device nodes of connected targets.
+requirements:
+ - open_iscsi library and tools (iscsiadm)
+options:
+ portal:
+ description:
+ - The domain name or IP address of the iSCSI target.
+ type: str
+ aliases: [ ip ]
+ port:
+ description:
+ - The port on which the iSCSI target process listens.
+ type: str
+ default: 3260
+ target:
+ description:
+ - The iSCSI target name.
+ type: str
+ aliases: [ name, targetname ]
+ login:
+ description:
+ - Whether the target node should be connected.
+ type: bool
+ aliases: [ state ]
+ node_auth:
+ description:
+ - The value for C(discovery.sendtargets.auth.authmethod).
+ type: str
+ default: CHAP
+ node_user:
+ description:
+ - The value for C(discovery.sendtargets.auth.username).
+ type: str
+ node_pass:
+ description:
+ - The value for C(discovery.sendtargets.auth.password).
+ type: str
+ auto_node_startup:
+ description:
+ - Whether the target node should be automatically connected at startup.
+ type: bool
+ aliases: [ automatic ]
+ discover:
+ description:
+ - Whether the list of target nodes on the portal should be
+ (re)discovered and added to the persistent iSCSI database.
+ - Keep in mind that C(iscsiadm) discovery resets configuration, like C(node.startup)
+ to manual, hence combined with C(auto_node_startup=yes) will always return
+ a changed state.
+ type: bool
+ default: false
+ show_nodes:
+ description:
+ - Whether the list of nodes in the persistent iSCSI database should be returned by the module.
+ type: bool
+ default: false
+'''
+
+EXAMPLES = r'''
+- name: Perform a discovery on sun.com and show available target nodes
+ community.general.open_iscsi:
+ show_nodes: yes
+ discover: yes
+ portal: sun.com
+
+- name: Perform a discovery on 10.1.2.3 and show available target nodes
+ community.general.open_iscsi:
+ show_nodes: yes
+ discover: yes
+ ip: 10.1.2.3
+
+# NOTE: Only works if exactly one target is exported to the initiator
+- name: Discover targets on portal and login to the one available
+ community.general.open_iscsi:
+ portal: '{{ iscsi_target }}'
+ login: yes
+ discover: yes
+
+- name: Connect to the named target, after updating the local persistent database (cache)
+ community.general.open_iscsi:
+ login: yes
+ target: iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d
+
+- name: Disconnect from the cached named target
+ community.general.open_iscsi:
+ login: no
+ target: iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d
+'''
+
+import glob
+import os
+import socket
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+
+ISCSIADM = 'iscsiadm'
+
+
+def compare_nodelists(l1, l2):
+ l1.sort()
+ l2.sort()
+ return l1 == l2
+
+
+def iscsi_get_cached_nodes(module, portal=None):
+ cmd = '%s --mode node' % iscsiadm_cmd
+ (rc, out, err) = module.run_command(cmd)
+
+ if rc == 0:
+ lines = out.splitlines()
+ nodes = []
+ for line in lines:
+ # line format is "ip:port,target_portal_group_tag targetname"
+ parts = line.split()
+ if len(parts) > 2:
+ module.fail_json(msg='error parsing output', cmd=cmd)
+ target = parts[1]
+ parts = parts[0].split(':')
+ target_portal = parts[0]
+
+ if portal is None or portal == target_portal:
+ nodes.append(target)
+
+ # older versions of scsiadm don't have nice return codes
+ # for newer versions see iscsiadm(8); also usr/iscsiadm.c for details
+ # err can contain [N|n]o records...
+ elif rc == 21 or (rc == 255 and "o records found" in err):
+ nodes = []
+ else:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+ return nodes
+
+
+def iscsi_discover(module, portal, port):
+ cmd = '%s --mode discovery --type sendtargets --portal %s:%s' % (iscsiadm_cmd, portal, port)
+ (rc, out, err) = module.run_command(cmd)
+
+ if rc > 0:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+
+def target_loggedon(module, target):
+ cmd = '%s --mode session' % iscsiadm_cmd
+ (rc, out, err) = module.run_command(cmd)
+
+ if rc == 0:
+ return target in out
+ elif rc == 21:
+ return False
+ else:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+
+def target_login(module, target, portal=None, port=None):
+ node_auth = module.params['node_auth']
+ node_user = module.params['node_user']
+ node_pass = module.params['node_pass']
+
+ if node_user:
+ params = [('node.session.auth.authmethod', node_auth),
+ ('node.session.auth.username', node_user),
+ ('node.session.auth.password', node_pass)]
+ for (name, value) in params:
+ cmd = '%s --mode node --targetname %s --op=update --name %s --value %s' % (iscsiadm_cmd, target, name, value)
+ (rc, out, err) = module.run_command(cmd)
+ if rc > 0:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+ cmd = '%s --mode node --targetname %s --login' % (iscsiadm_cmd, target)
+ if portal is not None and port is not None:
+ cmd += ' --portal %s:%s' % (portal, port)
+
+ (rc, out, err) = module.run_command(cmd)
+
+ if rc > 0:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+
+def target_logout(module, target):
+ cmd = '%s --mode node --targetname %s --logout' % (iscsiadm_cmd, target)
+ (rc, out, err) = module.run_command(cmd)
+
+ if rc > 0:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+
+def target_device_node(module, target):
+ # if anyone know a better way to find out which devicenodes get created for
+ # a given target...
+
+ devices = glob.glob('/dev/disk/by-path/*%s*' % target)
+ devdisks = []
+ for dev in devices:
+ # exclude partitions
+ if "-part" not in dev:
+ devdisk = os.path.realpath(dev)
+ # only add once (multi-path?)
+ if devdisk not in devdisks:
+ devdisks.append(devdisk)
+ return devdisks
+
+
+def target_isauto(module, target):
+ cmd = '%s --mode node --targetname %s' % (iscsiadm_cmd, target)
+ (rc, out, err) = module.run_command(cmd)
+
+ if rc == 0:
+ lines = out.splitlines()
+ for line in lines:
+ if 'node.startup' in line:
+ return 'automatic' in line
+ return False
+ else:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+
+def target_setauto(module, target):
+ cmd = '%s --mode node --targetname %s --op=update --name node.startup --value automatic' % (iscsiadm_cmd, target)
+ (rc, out, err) = module.run_command(cmd)
+
+ if rc > 0:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+
+def target_setmanual(module, target):
+ cmd = '%s --mode node --targetname %s --op=update --name node.startup --value manual' % (iscsiadm_cmd, target)
+ (rc, out, err) = module.run_command(cmd)
+
+ if rc > 0:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+
+def main():
+ # load ansible module object
+ module = AnsibleModule(
+ argument_spec=dict(
+
+ # target
+ portal=dict(type='str', aliases=['ip']),
+ port=dict(type='str', default='3260'),
+ target=dict(type='str', aliases=['name', 'targetname']),
+ node_auth=dict(type='str', default='CHAP'),
+ node_user=dict(type='str'),
+ node_pass=dict(type='str', no_log=True),
+
+ # actions
+ login=dict(type='bool', aliases=['state']),
+ auto_node_startup=dict(type='bool', aliases=['automatic']),
+ discover=dict(type='bool', default=False),
+ show_nodes=dict(type='bool', default=False),
+ ),
+
+ required_together=[['node_user', 'node_pass']],
+ supports_check_mode=True,
+ )
+
+ global iscsiadm_cmd
+ iscsiadm_cmd = module.get_bin_path('iscsiadm', required=True)
+
+ # parameters
+ portal = module.params['portal']
+ if portal:
+ try:
+ portal = socket.getaddrinfo(portal, None)[0][4][0]
+ except socket.gaierror:
+ module.fail_json(msg="Portal address is incorrect")
+
+ target = module.params['target']
+ port = module.params['port']
+ login = module.params['login']
+ automatic = module.params['auto_node_startup']
+ discover = module.params['discover']
+ show_nodes = module.params['show_nodes']
+
+ check = module.check_mode
+
+ cached = iscsi_get_cached_nodes(module, portal)
+
+ # return json dict
+ result = {}
+ result['changed'] = False
+
+ if discover:
+ if portal is None:
+ module.fail_json(msg="Need to specify at least the portal (ip) to discover")
+ elif check:
+ nodes = cached
+ else:
+ iscsi_discover(module, portal, port)
+ nodes = iscsi_get_cached_nodes(module, portal)
+ if not compare_nodelists(cached, nodes):
+ result['changed'] |= True
+ result['cache_updated'] = True
+ else:
+ nodes = cached
+
+ if login is not None or automatic is not None:
+ if target is None:
+ if len(nodes) > 1:
+ module.fail_json(msg="Need to specify a target")
+ else:
+ target = nodes[0]
+ else:
+ # check given target is in cache
+ check_target = False
+ for node in nodes:
+ if node == target:
+ check_target = True
+ break
+ if not check_target:
+ module.fail_json(msg="Specified target not found")
+
+ if show_nodes:
+ result['nodes'] = nodes
+
+ if login is not None:
+ loggedon = target_loggedon(module, target)
+ if (login and loggedon) or (not login and not loggedon):
+ result['changed'] |= False
+ if login:
+ result['devicenodes'] = target_device_node(module, target)
+ elif not check:
+ if login:
+ target_login(module, target, portal, port)
+ # give udev some time
+ time.sleep(1)
+ result['devicenodes'] = target_device_node(module, target)
+ else:
+ target_logout(module, target)
+ result['changed'] |= True
+ result['connection_changed'] = True
+ else:
+ result['changed'] |= True
+ result['connection_changed'] = True
+
+ if automatic is not None:
+ isauto = target_isauto(module, target)
+ if (automatic and isauto) or (not automatic and not isauto):
+ result['changed'] |= False
+ result['automatic_changed'] = False
+ elif not check:
+ if automatic:
+ target_setauto(module, target)
+ else:
+ target_setmanual(module, target)
+ result['changed'] |= True
+ result['automatic_changed'] = True
+ else:
+ result['changed'] |= True
+ result['automatic_changed'] = True
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/openwrt_init.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/openwrt_init.py
new file mode 100644
index 00000000..817ed9f4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/openwrt_init.py
@@ -0,0 +1,198 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# (c) 2016, Andrew Gaffney <andrew@agaffney.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: openwrt_init
+author:
+ - "Andrew Gaffney (@agaffney)"
+short_description: Manage services on OpenWrt.
+description:
+ - Controls OpenWrt services on remote hosts.
+options:
+ name:
+ type: str
+ description:
+ - Name of the service.
+ required: true
+ aliases: ['service']
+ state:
+ type: str
+ description:
+ - C(started)/C(stopped) are idempotent actions that will not run commands unless necessary.
+ C(restarted) will always bounce the service. C(reloaded) will always reload.
+ choices: [ 'started', 'stopped', 'restarted', 'reloaded' ]
+ enabled:
+ description:
+ - Whether the service should start on boot. B(At least one of state and enabled are required.)
+ type: bool
+ pattern:
+ type: str
+ description:
+ - If the service does not respond to the 'running' command, name a
+ substring to look for as would be found in the output of the I(ps)
+ command as a stand-in for a 'running' result. If the string is found,
+ the service will be assumed to be running.
+notes:
+ - One option other than name is required.
+requirements:
+ - An OpenWrt system (with python)
+'''
+
+EXAMPLES = '''
+- name: Start service httpd, if not running
+ community.general.openwrt_init:
+ state: started
+ name: httpd
+
+- name: Stop service cron, if running
+ community.general.openwrt_init:
+ name: cron
+ state: stopped
+
+- name: Reload service httpd, in all cases
+ community.general.openwrt_init:
+ name: httpd
+ state: reloaded
+
+- name: Enable service httpd
+ community.general.openwrt_init:
+ name: httpd
+ enabled: yes
+'''
+
+RETURN = '''
+'''
+
+import os
+import glob
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_bytes, to_native
+
+module = None
+init_script = None
+
+
+# ===============================
+# Check if service is enabled
+def is_enabled():
+ (rc, out, err) = module.run_command("%s enabled" % init_script)
+ if rc == 0:
+ return True
+ return False
+
+
+# ===========================================
+# Main control flow
+
+def main():
+ global module, init_script
+ # init
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, type='str', aliases=['service']),
+ state=dict(type='str', choices=['started', 'stopped', 'restarted', 'reloaded']),
+ enabled=dict(type='bool'),
+ pattern=dict(type='str', required=False, default=None),
+ ),
+ supports_check_mode=True,
+ required_one_of=[['state', 'enabled']],
+ )
+
+ # initialize
+ service = module.params['name']
+ init_script = '/etc/init.d/' + service
+ rc = 0
+ out = err = ''
+ result = {
+ 'name': service,
+ 'changed': False,
+ }
+
+ # check if service exists
+ if not os.path.exists(init_script):
+ module.fail_json(msg='service %s does not exist' % service)
+
+ # Enable/disable service startup at boot if requested
+ if module.params['enabled'] is not None:
+ # do we need to enable the service?
+ enabled = is_enabled()
+
+ # default to current state
+ result['enabled'] = enabled
+
+ # Change enable/disable if needed
+ if enabled != module.params['enabled']:
+ result['changed'] = True
+ if module.params['enabled']:
+ action = 'enable'
+ else:
+ action = 'disable'
+
+ if not module.check_mode:
+ (rc, out, err) = module.run_command("%s %s" % (init_script, action))
+ # openwrt init scripts can return a non-zero exit code on a successful 'enable'
+ # command if the init script doesn't contain a STOP value, so we ignore the exit
+ # code and explicitly check if the service is now in the desired state
+ if is_enabled() != module.params['enabled']:
+ module.fail_json(msg="Unable to %s service %s: %s" % (action, service, err))
+
+ result['enabled'] = not enabled
+
+ if module.params['state'] is not None:
+ running = False
+
+ # check if service is currently running
+ if module.params['pattern']:
+ # Find ps binary
+ psbin = module.get_bin_path('ps', True)
+
+ # this should be busybox ps, so we only want/need to the 'w' option
+ (rc, psout, pserr) = module.run_command('%s w' % psbin)
+ # If rc is 0, set running as appropriate
+ if rc == 0:
+ lines = psout.split("\n")
+ for line in lines:
+ if module.params['pattern'] in line and "pattern=" not in line:
+ # so as to not confuse ./hacking/test-module.py
+ running = True
+ break
+ else:
+ (rc, out, err) = module.run_command("%s running" % init_script)
+ if rc == 0:
+ running = True
+
+ # default to desired state
+ result['state'] = module.params['state']
+
+ # determine action, if any
+ action = None
+ if module.params['state'] == 'started':
+ if not running:
+ action = 'start'
+ result['changed'] = True
+ elif module.params['state'] == 'stopped':
+ if running:
+ action = 'stop'
+ result['changed'] = True
+ else:
+ action = module.params['state'][:-2] # remove 'ed' from restarted/reloaded
+ result['state'] = 'started'
+ result['changed'] = True
+
+ if action:
+ if not module.check_mode:
+ (rc, out, err) = module.run_command("%s %s" % (init_script, action))
+ if rc != 0:
+ module.fail_json(msg="Unable to %s service %s: %s" % (action, service, err))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/osx_defaults.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/osx_defaults.py
new file mode 100644
index 00000000..a0362908
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/osx_defaults.py
@@ -0,0 +1,395 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, GeekChimp - Franck Nijhof <franck@geekchimp.com> (DO NOT CONTACT!)
+# Copyright: (c) 2019, Ansible project
+# Copyright: (c) 2019, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: osx_defaults
+author:
+# DO NOT RE-ADD GITHUB HANDLE!
+- Franck Nijhof (!UNKNOWN)
+short_description: Manage macOS user defaults
+description:
+ - osx_defaults allows users to read, write, and delete macOS user defaults from Ansible scripts.
+ - macOS applications and other programs use the defaults system to record user preferences and other
+ information that must be maintained when the applications are not running (such as default font for new
+ documents, or the position of an Info panel).
+options:
+ domain:
+ description:
+ - The domain is a domain name of the form C(com.companyname.appname).
+ type: str
+ default: NSGlobalDomain
+ host:
+ description:
+ - The host on which the preference should apply.
+ - The special value C(currentHost) corresponds to the C(-currentHost) switch of the defaults commandline tool.
+ type: str
+ key:
+ description:
+ - The key of the user preference.
+ type: str
+ type:
+ description:
+ - The type of value to write.
+ type: str
+ choices: [ array, bool, boolean, date, float, int, integer, string ]
+ default: string
+ array_add:
+ description:
+ - Add new elements to the array for a key which has an array as its value.
+ type: bool
+ default: no
+ value:
+ description:
+ - The value to write.
+ - Only required when C(state=present).
+ type: raw
+ state:
+ description:
+ - The state of the user defaults.
+ - If set to C(list) will query the given parameter specified by C(key). Returns 'null' is nothing found or mis-spelled.
+ - C(list) added in version 2.8.
+ type: str
+ choices: [ absent, list, present ]
+ default: present
+ path:
+ description:
+ - The path in which to search for C(defaults).
+ type: str
+ default: /usr/bin:/usr/local/bin
+notes:
+ - Apple Mac caches defaults. You may need to logout and login to apply the changes.
+'''
+
+EXAMPLES = r'''
+# TODO: Describe what happens in each example
+
+- community.general.osx_defaults:
+ domain: com.apple.Safari
+ key: IncludeInternalDebugMenu
+ type: bool
+ value: true
+ state: present
+
+- community.general.osx_defaults:
+ domain: NSGlobalDomain
+ key: AppleMeasurementUnits
+ type: string
+ value: Centimeters
+ state: present
+
+- community.general.osx_defaults:
+ domain: /Library/Preferences/com.apple.SoftwareUpdate
+ key: AutomaticCheckEnabled
+ type: int
+ value: 1
+ become: yes
+
+- community.general.osx_defaults:
+ domain: com.apple.screensaver
+ host: currentHost
+ key: showClock
+ type: int
+ value: 1
+
+- community.general.osx_defaults:
+ key: AppleMeasurementUnits
+ type: string
+ value: Centimeters
+
+- community.general.osx_defaults:
+ key: AppleLanguages
+ type: array
+ value:
+ - en
+ - nl
+
+- community.general.osx_defaults:
+ domain: com.geekchimp.macable
+ key: ExampleKeyToRemove
+ state: absent
+'''
+
+from datetime import datetime
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import binary_type, text_type
+
+
+# exceptions --------------------------------------------------------------- {{{
+class OSXDefaultsException(Exception):
+ def __init__(self, msg):
+ self.message = msg
+
+
+# /exceptions -------------------------------------------------------------- }}}
+
+# class MacDefaults -------------------------------------------------------- {{{
+class OSXDefaults(object):
+ """ Class to manage Mac OS user defaults """
+
+ # init ---------------------------------------------------------------- {{{
+ def __init__(self, module):
+ """ Initialize this module. Finds 'defaults' executable and preps the parameters """
+ # Initial var for storing current defaults value
+ self.current_value = None
+ self.module = module
+ self.domain = module.params['domain']
+ self.host = module.params['host']
+ self.key = module.params['key']
+ self.type = module.params['type']
+ self.array_add = module.params['array_add']
+ self.value = module.params['value']
+ self.state = module.params['state']
+ self.path = module.params['path']
+
+ # Try to find the defaults executable
+ self.executable = self.module.get_bin_path(
+ 'defaults',
+ required=False,
+ opt_dirs=self.path.split(':'),
+ )
+
+ if not self.executable:
+ raise OSXDefaultsException("Unable to locate defaults executable.")
+
+ # Ensure the value is the correct type
+ if self.state != 'absent':
+ self.value = self._convert_type(self.type, self.value)
+
+ # /init --------------------------------------------------------------- }}}
+
+ # tools --------------------------------------------------------------- {{{
+ @staticmethod
+ def is_int(value):
+ as_str = str(value)
+ if (as_str.startswith("-")):
+ return as_str[1:].isdigit()
+ else:
+ return as_str.isdigit()
+
+ @staticmethod
+ def _convert_type(data_type, value):
+ """ Converts value to given type """
+ if data_type == "string":
+ return str(value)
+ elif data_type in ["bool", "boolean"]:
+ if isinstance(value, (binary_type, text_type)):
+ value = value.lower()
+ if value in [True, 1, "true", "1", "yes"]:
+ return True
+ elif value in [False, 0, "false", "0", "no"]:
+ return False
+ raise OSXDefaultsException("Invalid boolean value: {0}".format(repr(value)))
+ elif data_type == "date":
+ try:
+ return datetime.strptime(value.split("+")[0].strip(), "%Y-%m-%d %H:%M:%S")
+ except ValueError:
+ raise OSXDefaultsException(
+ "Invalid date value: {0}. Required format yyy-mm-dd hh:mm:ss.".format(repr(value))
+ )
+ elif data_type in ["int", "integer"]:
+ if not OSXDefaults.is_int(value):
+ raise OSXDefaultsException("Invalid integer value: {0}".format(repr(value)))
+ return int(value)
+ elif data_type == "float":
+ try:
+ value = float(value)
+ except ValueError:
+ raise OSXDefaultsException("Invalid float value: {0}".format(repr(value)))
+ return value
+ elif data_type == "array":
+ if not isinstance(value, list):
+ raise OSXDefaultsException("Invalid value. Expected value to be an array")
+ return value
+
+ raise OSXDefaultsException('Type is not supported: {0}'.format(data_type))
+
+ def _host_args(self):
+ """ Returns a normalized list of commandline arguments based on the "host" attribute """
+ if self.host is None:
+ return []
+ elif self.host == 'currentHost':
+ return ['-currentHost']
+ else:
+ return ['-host', self.host]
+
+ def _base_command(self):
+ """ Returns a list containing the "defaults" executable and any common base arguments """
+ return [self.executable] + self._host_args()
+
+ @staticmethod
+ def _convert_defaults_str_to_list(value):
+ """ Converts array output from defaults to an list """
+ # Split output of defaults. Every line contains a value
+ value = value.splitlines()
+
+ # Remove first and last item, those are not actual values
+ value.pop(0)
+ value.pop(-1)
+
+ # Remove spaces at beginning and comma (,) at the end, unquote and unescape double quotes
+ value = [re.sub('^ *"?|"?,? *$', '', x.replace('\\"', '"')) for x in value]
+
+ return value
+
+ # /tools -------------------------------------------------------------- }}}
+
+ # commands ------------------------------------------------------------ {{{
+ def read(self):
+ """ Reads value of this domain & key from defaults """
+ # First try to find out the type
+ rc, out, err = self.module.run_command(self._base_command() + ["read-type", self.domain, self.key])
+
+ # If RC is 1, the key does not exist
+ if rc == 1:
+ return None
+
+ # If the RC is not 0, then terrible happened! Ooooh nooo!
+ if rc != 0:
+ raise OSXDefaultsException("An error occurred while reading key type from defaults: %s" % out)
+
+ # Ok, lets parse the type from output
+ data_type = out.strip().replace('Type is ', '')
+
+ # Now get the current value
+ rc, out, err = self.module.run_command(self._base_command() + ["read", self.domain, self.key])
+
+ # Strip output
+ out = out.strip()
+
+ # An non zero RC at this point is kinda strange...
+ if rc != 0:
+ raise OSXDefaultsException("An error occurred while reading key value from defaults: %s" % out)
+
+ # Convert string to list when type is array
+ if data_type == "array":
+ out = self._convert_defaults_str_to_list(out)
+
+ # Store the current_value
+ self.current_value = self._convert_type(data_type, out)
+
+ def write(self):
+ """ Writes value to this domain & key to defaults """
+ # We need to convert some values so the defaults commandline understands it
+ if isinstance(self.value, bool):
+ if self.value:
+ value = "TRUE"
+ else:
+ value = "FALSE"
+ elif isinstance(self.value, (int, float)):
+ value = str(self.value)
+ elif self.array_add and self.current_value is not None:
+ value = list(set(self.value) - set(self.current_value))
+ elif isinstance(self.value, datetime):
+ value = self.value.strftime('%Y-%m-%d %H:%M:%S')
+ else:
+ value = self.value
+
+ # When the type is array and array_add is enabled, morph the type :)
+ if self.type == "array" and self.array_add:
+ self.type = "array-add"
+
+ # All values should be a list, for easy passing it to the command
+ if not isinstance(value, list):
+ value = [value]
+
+ rc, out, err = self.module.run_command(self._base_command() + ['write', self.domain, self.key, '-' + self.type] + value)
+
+ if rc != 0:
+ raise OSXDefaultsException('An error occurred while writing value to defaults: %s' % out)
+
+ def delete(self):
+ """ Deletes defaults key from domain """
+ rc, out, err = self.module.run_command(self._base_command() + ['delete', self.domain, self.key])
+ if rc != 0:
+ raise OSXDefaultsException("An error occurred while deleting key from defaults: %s" % out)
+
+ # /commands ----------------------------------------------------------- }}}
+
+ # run ----------------------------------------------------------------- {{{
+ """ Does the magic! :) """
+
+ def run(self):
+
+ # Get the current value from defaults
+ self.read()
+
+ if self.state == 'list':
+ self.module.exit_json(key=self.key, value=self.current_value)
+
+ # Handle absent state
+ if self.state == "absent":
+ if self.current_value is None:
+ return False
+ if self.module.check_mode:
+ return True
+ self.delete()
+ return True
+
+ # There is a type mismatch! Given type does not match the type in defaults
+ value_type = type(self.value)
+ if self.current_value is not None and not isinstance(self.current_value, value_type):
+ raise OSXDefaultsException("Type mismatch. Type in defaults: %s" % type(self.current_value).__name__)
+
+ # Current value matches the given value. Nothing need to be done. Arrays need extra care
+ if self.type == "array" and self.current_value is not None and not self.array_add and \
+ set(self.current_value) == set(self.value):
+ return False
+ elif self.type == "array" and self.current_value is not None and self.array_add and len(list(set(self.value) - set(self.current_value))) == 0:
+ return False
+ elif self.current_value == self.value:
+ return False
+
+ if self.module.check_mode:
+ return True
+
+ # Change/Create/Set given key/value for domain in defaults
+ self.write()
+ return True
+
+ # /run ---------------------------------------------------------------- }}}
+
+
+# /class MacDefaults ------------------------------------------------------ }}}
+
+
+# main -------------------------------------------------------------------- {{{
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ domain=dict(type='str', default='NSGlobalDomain'),
+ host=dict(type='str'),
+ key=dict(type='str'),
+ type=dict(type='str', default='string', choices=['array', 'bool', 'boolean', 'date', 'float', 'int', 'integer', 'string']),
+ array_add=dict(type='bool', default=False),
+ value=dict(type='raw'),
+ state=dict(type='str', default='present', choices=['absent', 'list', 'present']),
+ path=dict(type='str', default='/usr/bin:/usr/local/bin'),
+ ),
+ supports_check_mode=True,
+ required_if=(
+ ('state', 'present', ['value']),
+ ),
+ )
+
+ try:
+ defaults = OSXDefaults(module=module)
+ module.exit_json(changed=defaults.run())
+ except OSXDefaultsException as e:
+ module.fail_json(msg=e.message)
+
+
+# /main ------------------------------------------------------------------- }}}
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/pam_limits.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/pam_limits.py
new file mode 100644
index 00000000..c63493ee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/pam_limits.py
@@ -0,0 +1,317 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Sebastien Rohaut <sebastien.rohaut@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: pam_limits
+author:
+ - "Sebastien Rohaut (@usawa)"
+short_description: Modify Linux PAM limits
+description:
+ - The C(pam_limits) module modifies PAM limits. The default file is
+ C(/etc/security/limits.conf). For the full documentation, see C(man 5
+ limits.conf).
+options:
+ domain:
+ type: str
+ description:
+ - A username, @groupname, wildcard, uid/gid range.
+ required: true
+ limit_type:
+ type: str
+ description:
+ - Limit type, see C(man 5 limits.conf) for an explanation
+ required: true
+ choices: [ "hard", "soft", "-" ]
+ limit_item:
+ type: str
+ description:
+ - The limit to be set
+ required: true
+ choices:
+ - "core"
+ - "data"
+ - "fsize"
+ - "memlock"
+ - "nofile"
+ - "rss"
+ - "stack"
+ - "cpu"
+ - "nproc"
+ - "as"
+ - "maxlogins"
+ - "maxsyslogins"
+ - "priority"
+ - "locks"
+ - "sigpending"
+ - "msgqueue"
+ - "nice"
+ - "rtprio"
+ - "chroot"
+ value:
+ type: str
+ description:
+ - The value of the limit.
+ required: true
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can get
+ the original file back if you somehow clobbered it incorrectly.
+ required: false
+ type: bool
+ default: "no"
+ use_min:
+ description:
+ - If set to C(yes), the minimal value will be used or conserved.
+ If the specified value is inferior to the value in the file, file content is replaced with the new value,
+ else content is not modified.
+ required: false
+ type: bool
+ default: "no"
+ use_max:
+ description:
+ - If set to C(yes), the maximal value will be used or conserved.
+ If the specified value is superior to the value in the file, file content is replaced with the new value,
+ else content is not modified.
+ required: false
+ type: bool
+ default: "no"
+ dest:
+ type: str
+ description:
+ - Modify the limits.conf path.
+ required: false
+ default: "/etc/security/limits.conf"
+ comment:
+ type: str
+ description:
+ - Comment associated with the limit.
+ required: false
+ default: ''
+notes:
+ - If C(dest) file doesn't exist, it is created.
+'''
+
+EXAMPLES = '''
+- name: Add or modify nofile soft limit for the user joe
+ community.general.pam_limits:
+ domain: joe
+ limit_type: soft
+ limit_item: nofile
+ value: 64000
+
+- name: Add or modify fsize hard limit for the user smith. Keep or set the maximal value.
+ community.general.pam_limits:
+ domain: smith
+ limit_type: hard
+ limit_item: fsize
+ value: 1000000
+ use_max: yes
+
+- name: Add or modify memlock, both soft and hard, limit for the user james with a comment.
+ community.general.pam_limits:
+ domain: james
+ limit_type: '-'
+ limit_item: memlock
+ value: unlimited
+ comment: unlimited memory lock for james
+
+- name: Add or modify hard nofile limits for wildcard domain
+ community.general.pam_limits:
+ domain: '*'
+ limit_type: hard
+ limit_item: nofile
+ value: 39693561
+'''
+
+import os
+import os.path
+import tempfile
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ pam_items = ['core', 'data', 'fsize', 'memlock', 'nofile', 'rss', 'stack', 'cpu', 'nproc', 'as', 'maxlogins', 'maxsyslogins', 'priority', 'locks',
+ 'sigpending', 'msgqueue', 'nice', 'rtprio', 'chroot']
+
+ pam_types = ['soft', 'hard', '-']
+
+ limits_conf = '/etc/security/limits.conf'
+
+ module = AnsibleModule(
+ # not checking because of daisy chain to file module
+ argument_spec=dict(
+ domain=dict(required=True, type='str'),
+ limit_type=dict(required=True, type='str', choices=pam_types),
+ limit_item=dict(required=True, type='str', choices=pam_items),
+ value=dict(required=True, type='str'),
+ use_max=dict(default=False, type='bool'),
+ use_min=dict(default=False, type='bool'),
+ backup=dict(default=False, type='bool'),
+ dest=dict(default=limits_conf, type='str'),
+ comment=dict(required=False, default='', type='str')
+ )
+ )
+
+ domain = module.params['domain']
+ limit_type = module.params['limit_type']
+ limit_item = module.params['limit_item']
+ value = module.params['value']
+ use_max = module.params['use_max']
+ use_min = module.params['use_min']
+ backup = module.params['backup']
+ limits_conf = module.params['dest']
+ new_comment = module.params['comment']
+
+ changed = False
+
+ if os.path.isfile(limits_conf):
+ if not os.access(limits_conf, os.W_OK):
+ module.fail_json(msg="%s is not writable. Use sudo" % limits_conf)
+ else:
+ limits_conf_dir = os.path.dirname(limits_conf)
+ if os.path.isdir(limits_conf_dir) and os.access(limits_conf_dir, os.W_OK):
+ open(limits_conf, 'a').close()
+ changed = True
+ else:
+ module.fail_json(msg="directory %s is not writable (check presence, access rights, use sudo)" % limits_conf_dir)
+
+ if use_max and use_min:
+ module.fail_json(msg="Cannot use use_min and use_max at the same time.")
+
+ if not (value in ['unlimited', 'infinity', '-1'] or value.isdigit()):
+ module.fail_json(msg="Argument 'value' can be one of 'unlimited', 'infinity', '-1' or positive number. Refer to manual pages for more details.")
+
+ # Backup
+ if backup:
+ backup_file = module.backup_local(limits_conf)
+
+ space_pattern = re.compile(r'\s+')
+
+ message = ''
+ f = open(limits_conf, 'rb')
+ # Tempfile
+ nf = tempfile.NamedTemporaryFile(mode='w+')
+
+ found = False
+ new_value = value
+
+ for line in f:
+ line = to_native(line, errors='surrogate_or_strict')
+ if line.startswith('#'):
+ nf.write(line)
+ continue
+
+ newline = re.sub(space_pattern, ' ', line).strip()
+ if not newline:
+ nf.write(line)
+ continue
+
+ # Remove comment in line
+ newline = newline.split('#', 1)[0]
+ try:
+ old_comment = line.split('#', 1)[1]
+ except Exception:
+ old_comment = ''
+
+ newline = newline.rstrip()
+
+ if not new_comment:
+ new_comment = old_comment
+
+ line_fields = newline.split(' ')
+
+ if len(line_fields) != 4:
+ nf.write(line)
+ continue
+
+ line_domain = line_fields[0]
+ line_type = line_fields[1]
+ line_item = line_fields[2]
+ actual_value = line_fields[3]
+
+ if not (actual_value in ['unlimited', 'infinity', '-1'] or actual_value.isdigit()):
+ module.fail_json(msg="Invalid configuration of '%s'. Current value of %s is unsupported." % (limits_conf, line_item))
+
+ # Found the line
+ if line_domain == domain and line_type == limit_type and line_item == limit_item:
+ found = True
+ if value == actual_value:
+ message = line
+ nf.write(line)
+ continue
+
+ actual_value_unlimited = actual_value in ['unlimited', 'infinity', '-1']
+ value_unlimited = value in ['unlimited', 'infinity', '-1']
+
+ if use_max:
+ if value.isdigit() and actual_value.isdigit():
+ new_value = str(max(int(value), int(actual_value)))
+ elif actual_value_unlimited:
+ new_value = actual_value
+ else:
+ new_value = value
+
+ if use_min:
+ if value.isdigit() and actual_value.isdigit():
+ new_value = str(min(int(value), int(actual_value)))
+ elif value_unlimited:
+ new_value = actual_value
+ else:
+ new_value = value
+
+ # Change line only if value has changed
+ if new_value != actual_value:
+ changed = True
+ if new_comment:
+ new_comment = "\t#" + new_comment
+ new_limit = domain + "\t" + limit_type + "\t" + limit_item + "\t" + new_value + new_comment + "\n"
+ message = new_limit
+ nf.write(new_limit)
+ else:
+ message = line
+ nf.write(line)
+ else:
+ nf.write(line)
+
+ if not found:
+ changed = True
+ if new_comment:
+ new_comment = "\t#" + new_comment
+ new_limit = domain + "\t" + limit_type + "\t" + limit_item + "\t" + new_value + new_comment + "\n"
+ message = new_limit
+ nf.write(new_limit)
+
+ f.close()
+ nf.flush()
+
+ # Copy tempfile to newfile
+ module.atomic_move(nf.name, f.name)
+
+ try:
+ nf.close()
+ except Exception:
+ pass
+
+ res_args = dict(
+ changed=changed, msg=message
+ )
+
+ if backup:
+ res_args['backup_file'] = backup_file
+
+ module.exit_json(**res_args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/pamd.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/pamd.py
new file mode 100644
index 00000000..45f00826
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/pamd.py
@@ -0,0 +1,866 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Kenneth D. Evensen <kdevensen@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: pamd
+author:
+ - Kenneth D. Evensen (@kevensen)
+short_description: Manage PAM Modules
+description:
+ - Edit PAM service's type, control, module path and module arguments.
+ - In order for a PAM rule to be modified, the type, control and
+ module_path must match an existing rule. See man(5) pam.d for details.
+options:
+ name:
+ description:
+ - The name generally refers to the PAM service file to
+ change, for example system-auth.
+ type: str
+ required: true
+ type:
+ description:
+ - The type of the PAM rule being modified.
+ - The C(type), C(control) and C(module_path) all must match a rule to be modified.
+ type: str
+ required: true
+ choices: [ account, -account, auth, -auth, password, -password, session, -session ]
+ control:
+ description:
+ - The control of the PAM rule being modified.
+ - This may be a complicated control with brackets. If this is the case, be
+ sure to put "[bracketed controls]" in quotes.
+ - The C(type), C(control) and C(module_path) all must match a rule to be modified.
+ type: str
+ required: true
+ module_path:
+ description:
+ - The module path of the PAM rule being modified.
+ - The C(type), C(control) and C(module_path) all must match a rule to be modified.
+ type: str
+ required: true
+ new_type:
+ description:
+ - The new type to assign to the new rule.
+ type: str
+ choices: [ account, -account, auth, -auth, password, -password, session, -session ]
+ new_control:
+ description:
+ - The new control to assign to the new rule.
+ type: str
+ new_module_path:
+ description:
+ - The new module path to be assigned to the new rule.
+ type: str
+ module_arguments:
+ description:
+ - When state is C(updated), the module_arguments will replace existing module_arguments.
+ - When state is C(args_absent) args matching those listed in module_arguments will be removed.
+ - When state is C(args_present) any args listed in module_arguments are added if
+ missing from the existing rule.
+ - Furthermore, if the module argument takes a value denoted by C(=),
+ the value will be changed to that specified in module_arguments.
+ type: list
+ elements: str
+ state:
+ description:
+ - The default of C(updated) will modify an existing rule if type,
+ control and module_path all match an existing rule.
+ - With C(before), the new rule will be inserted before a rule matching type,
+ control and module_path.
+ - Similarly, with C(after), the new rule will be inserted after an existing rulematching type,
+ control and module_path.
+ - With either C(before) or C(after) new_type, new_control, and new_module_path must all be specified.
+ - If state is C(args_absent) or C(args_present), new_type, new_control, and new_module_path will be ignored.
+ - State C(absent) will remove the rule. The 'absent' state was added in Ansible 2.4.
+ type: str
+ choices: [ absent, before, after, args_absent, args_present, updated ]
+ default: updated
+ path:
+ description:
+ - This is the path to the PAM service files.
+ type: path
+ default: /etc/pam.d
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can
+ get the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: no
+'''
+
+EXAMPLES = r'''
+- name: Update pamd rule's control in /etc/pam.d/system-auth
+ community.general.pamd:
+ name: system-auth
+ type: auth
+ control: required
+ module_path: pam_faillock.so
+ new_control: sufficient
+
+- name: Update pamd rule's complex control in /etc/pam.d/system-auth
+ community.general.pamd:
+ name: system-auth
+ type: session
+ control: '[success=1 default=ignore]'
+ module_path: pam_succeed_if.so
+ new_control: '[success=2 default=ignore]'
+
+- name: Insert a new rule before an existing rule
+ community.general.pamd:
+ name: system-auth
+ type: auth
+ control: required
+ module_path: pam_faillock.so
+ new_type: auth
+ new_control: sufficient
+ new_module_path: pam_faillock.so
+ state: before
+
+- name: Insert a new rule pam_wheel.so with argument 'use_uid' after an \
+ existing rule pam_rootok.so
+ community.general.pamd:
+ name: su
+ type: auth
+ control: sufficient
+ module_path: pam_rootok.so
+ new_type: auth
+ new_control: required
+ new_module_path: pam_wheel.so
+ module_arguments: 'use_uid'
+ state: after
+
+- name: Remove module arguments from an existing rule
+ community.general.pamd:
+ name: system-auth
+ type: auth
+ control: required
+ module_path: pam_faillock.so
+ module_arguments: ''
+ state: updated
+
+- name: Replace all module arguments in an existing rule
+ community.general.pamd:
+ name: system-auth
+ type: auth
+ control: required
+ module_path: pam_faillock.so
+ module_arguments: 'preauth
+ silent
+ deny=3
+ unlock_time=604800
+ fail_interval=900'
+ state: updated
+
+- name: Remove specific arguments from a rule
+ community.general.pamd:
+ name: system-auth
+ type: session
+ control: '[success=1 default=ignore]'
+ module_path: pam_succeed_if.so
+ module_arguments: crond,quiet
+ state: args_absent
+
+- name: Ensure specific arguments are present in a rule
+ community.general.pamd:
+ name: system-auth
+ type: session
+ control: '[success=1 default=ignore]'
+ module_path: pam_succeed_if.so
+ module_arguments: crond,quiet
+ state: args_present
+
+- name: Ensure specific arguments are present in a rule (alternative)
+ community.general.pamd:
+ name: system-auth
+ type: session
+ control: '[success=1 default=ignore]'
+ module_path: pam_succeed_if.so
+ module_arguments:
+ - crond
+ - quiet
+ state: args_present
+
+- name: Module arguments requiring commas must be listed as a Yaml list
+ community.general.pamd:
+ name: special-module
+ type: account
+ control: required
+ module_path: pam_access.so
+ module_arguments:
+ - listsep=,
+ state: args_present
+
+- name: Update specific argument value in a rule
+ community.general.pamd:
+ name: system-auth
+ type: auth
+ control: required
+ module_path: pam_faillock.so
+ module_arguments: 'fail_interval=300'
+ state: args_present
+
+- name: Add pam common-auth rule for duo
+ community.general.pamd:
+ name: common-auth
+ new_type: auth
+ new_control: '[success=1 default=ignore]'
+ new_module_path: '/lib64/security/pam_duo.so'
+ state: after
+ type: auth
+ module_path: pam_sss.so
+ control: 'requisite'
+'''
+
+RETURN = r'''
+change_count:
+ description: How many rules were changed.
+ type: int
+ sample: 1
+ returned: success
+new_rule:
+ description: The changes to the rule. This was available in Ansible 2.4 and Ansible 2.5. It was removed in Ansible 2.6.
+ type: str
+ sample: None None None sha512 shadow try_first_pass use_authtok
+ returned: success
+updated_rule_(n):
+ description: The rule(s) that was/were changed. This is only available in
+ Ansible 2.4 and was removed in Ansible 2.5.
+ type: str
+ sample:
+ - password sufficient pam_unix.so sha512 shadow try_first_pass
+ use_authtok
+ returned: success
+action:
+ description:
+ - "That action that was taken and is one of: update_rule,
+ insert_before_rule, insert_after_rule, args_present, args_absent,
+ absent. This was available in Ansible 2.4 and removed in Ansible 2.8"
+ returned: always
+ type: str
+ sample: "update_rule"
+dest:
+ description:
+ - "Path to pam.d service that was changed. This is only available in
+ Ansible 2.3 and was removed in Ansible 2.4."
+ returned: success
+ type: str
+ sample: "/etc/pam.d/system-auth"
+backupdest:
+ description:
+ - "The file name of the backup file, if created."
+ returned: success
+ type: str
+...
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+import os
+import re
+from tempfile import NamedTemporaryFile
+from datetime import datetime
+
+
+RULE_REGEX = re.compile(r"""(?P<rule_type>-?(?:auth|account|session|password))\s+
+ (?P<control>\[.*\]|\S*)\s+
+ (?P<path>\S*)\s*
+ (?P<args>.*)\s*""", re.X)
+
+RULE_ARG_REGEX = re.compile(r"""(\[.*\]|\S*)""")
+
+VALID_TYPES = ['account', '-account', 'auth', '-auth', 'password', '-password', 'session', '-session']
+
+
+class PamdLine(object):
+
+ def __init__(self, line):
+ self.line = line
+ self.prev = None
+ self.next = None
+
+ @property
+ def is_valid(self):
+ if self.line.strip() == '':
+ return True
+ return False
+
+ def validate(self):
+ if not self.is_valid:
+ return False, "Rule is not valid " + self.line
+ return True, "Rule is valid " + self.line
+
+ # Method to check if a rule matches the type, control and path.
+ def matches(self, rule_type, rule_control, rule_path, rule_args=None):
+ return False
+
+ def __str__(self):
+ return str(self.line)
+
+
+class PamdEmptyLine(PamdLine):
+ pass
+
+
+class PamdComment(PamdLine):
+
+ def __init__(self, line):
+ super(PamdComment, self).__init__(line)
+
+ @property
+ def is_valid(self):
+ if self.line.startswith('#'):
+ return True
+ return False
+
+
+class PamdInclude(PamdLine):
+ def __init__(self, line):
+ super(PamdInclude, self).__init__(line)
+
+ @property
+ def is_valid(self):
+ if self.line.startswith('@include'):
+ return True
+ return False
+
+
+class PamdRule(PamdLine):
+
+ valid_simple_controls = ['required', 'requisite', 'sufficient', 'optional', 'include', 'substack', 'definitive']
+ valid_control_values = ['success', 'open_err', 'symbol_err', 'service_err', 'system_err', 'buf_err',
+ 'perm_denied', 'auth_err', 'cred_insufficient', 'authinfo_unavail', 'user_unknown',
+ 'maxtries', 'new_authtok_reqd', 'acct_expired', 'session_err', 'cred_unavail',
+ 'cred_expired', 'cred_err', 'no_module_data', 'conv_err', 'authtok_err',
+ 'authtok_recover_err', 'authtok_lock_busy', 'authtok_disable_aging', 'try_again',
+ 'ignore', 'abort', 'authtok_expired', 'module_unknown', 'bad_item', 'conv_again',
+ 'incomplete', 'default']
+ valid_control_actions = ['ignore', 'bad', 'die', 'ok', 'done', 'reset']
+
+ def __init__(self, rule_type, rule_control, rule_path, rule_args=None):
+ self.prev = None
+ self.next = None
+ self._control = None
+ self._args = None
+ self.rule_type = rule_type
+ self.rule_control = rule_control
+
+ self.rule_path = rule_path
+ self.rule_args = rule_args
+
+ # Method to check if a rule matches the type, control and path.
+ def matches(self, rule_type, rule_control, rule_path, rule_args=None):
+ if (rule_type == self.rule_type and
+ rule_control == self.rule_control and
+ rule_path == self.rule_path):
+ return True
+ return False
+
+ @classmethod
+ def rule_from_string(cls, line):
+ rule_match = RULE_REGEX.search(line)
+ rule_args = parse_module_arguments(rule_match.group('args'))
+ return cls(rule_match.group('rule_type'), rule_match.group('control'), rule_match.group('path'), rule_args)
+
+ def __str__(self):
+ if self.rule_args:
+ return '{0: <11}{1} {2} {3}'.format(self.rule_type, self.rule_control, self.rule_path, ' '.join(self.rule_args))
+ return '{0: <11}{1} {2}'.format(self.rule_type, self.rule_control, self.rule_path)
+
+ @property
+ def rule_control(self):
+ if isinstance(self._control, list):
+ return '[' + ' '.join(self._control) + ']'
+ return self._control
+
+ @rule_control.setter
+ def rule_control(self, control):
+ if control.startswith('['):
+ control = control.replace(' = ', '=').replace('[', '').replace(']', '')
+ self._control = control.split(' ')
+ else:
+ self._control = control
+
+ @property
+ def rule_args(self):
+ if not self._args:
+ return []
+ return self._args
+
+ @rule_args.setter
+ def rule_args(self, args):
+ self._args = parse_module_arguments(args)
+
+ @property
+ def line(self):
+ return str(self)
+
+ @classmethod
+ def is_action_unsigned_int(cls, string_num):
+ number = 0
+ try:
+ number = int(string_num)
+ except ValueError:
+ return False
+
+ if number >= 0:
+ return True
+ return False
+
+ @property
+ def is_valid(self):
+ return self.validate()[0]
+
+ def validate(self):
+ # Validate the rule type
+ if self.rule_type not in VALID_TYPES:
+ return False, "Rule type, " + self.rule_type + ", is not valid in rule " + self.line
+ # Validate the rule control
+ if isinstance(self._control, str) and self.rule_control not in PamdRule.valid_simple_controls:
+ return False, "Rule control, " + self.rule_control + ", is not valid in rule " + self.line
+ elif isinstance(self._control, list):
+ for control in self._control:
+ value, action = control.split("=")
+ if value not in PamdRule.valid_control_values:
+ return False, "Rule control value, " + value + ", is not valid in rule " + self.line
+ if action not in PamdRule.valid_control_actions and not PamdRule.is_action_unsigned_int(action):
+ return False, "Rule control action, " + action + ", is not valid in rule " + self.line
+
+ # TODO: Validate path
+
+ return True, "Rule is valid " + self.line
+
+
+# PamdService encapsulates an entire service and contains one or more rules. It seems the best way is to do this
+# as a doubly linked list.
+class PamdService(object):
+
+ def __init__(self, content):
+ self._head = None
+ self._tail = None
+ for line in content.splitlines():
+ if line.lstrip().startswith('#'):
+ pamd_line = PamdComment(line)
+ elif line.lstrip().startswith('@include'):
+ pamd_line = PamdInclude(line)
+ elif line.strip() == '':
+ pamd_line = PamdEmptyLine(line)
+ else:
+ pamd_line = PamdRule.rule_from_string(line)
+
+ self.append(pamd_line)
+
+ def append(self, pamd_line):
+ if self._head is None:
+ self._head = self._tail = pamd_line
+ else:
+ pamd_line.prev = self._tail
+ pamd_line.next = None
+ self._tail.next = pamd_line
+ self._tail = pamd_line
+
+ def remove(self, rule_type, rule_control, rule_path):
+ current_line = self._head
+ changed = 0
+
+ while current_line is not None:
+ if current_line.matches(rule_type, rule_control, rule_path):
+ if current_line.prev is not None:
+ current_line.prev.next = current_line.next
+ if current_line.next is not None:
+ current_line.next.prev = current_line.prev
+ else:
+ self._head = current_line.next
+ current_line.next.prev = None
+ changed += 1
+
+ current_line = current_line.next
+ return changed
+
+ def get(self, rule_type, rule_control, rule_path):
+ lines = []
+ current_line = self._head
+ while current_line is not None:
+
+ if isinstance(current_line, PamdRule) and current_line.matches(rule_type, rule_control, rule_path):
+ lines.append(current_line)
+
+ current_line = current_line.next
+
+ return lines
+
+ def has_rule(self, rule_type, rule_control, rule_path):
+ if self.get(rule_type, rule_control, rule_path):
+ return True
+ return False
+
+ def update_rule(self, rule_type, rule_control, rule_path,
+ new_type=None, new_control=None, new_path=None, new_args=None):
+ # Get a list of rules we want to change
+ rules_to_find = self.get(rule_type, rule_control, rule_path)
+
+ new_args = parse_module_arguments(new_args)
+
+ changes = 0
+ for current_rule in rules_to_find:
+ rule_changed = False
+ if new_type:
+ if(current_rule.rule_type != new_type):
+ rule_changed = True
+ current_rule.rule_type = new_type
+ if new_control:
+ if(current_rule.rule_control != new_control):
+ rule_changed = True
+ current_rule.rule_control = new_control
+ if new_path:
+ if(current_rule.rule_path != new_path):
+ rule_changed = True
+ current_rule.rule_path = new_path
+ if new_args:
+ if(current_rule.rule_args != new_args):
+ rule_changed = True
+ current_rule.rule_args = new_args
+
+ if rule_changed:
+ changes += 1
+
+ return changes
+
+ def insert_before(self, rule_type, rule_control, rule_path,
+ new_type=None, new_control=None, new_path=None, new_args=None):
+ # Get a list of rules we want to change
+ rules_to_find = self.get(rule_type, rule_control, rule_path)
+ changes = 0
+ # There are two cases to consider.
+ # 1. The new rule doesn't exist before the existing rule
+ # 2. The new rule exists
+
+ for current_rule in rules_to_find:
+ # Create a new rule
+ new_rule = PamdRule(new_type, new_control, new_path, new_args)
+ # First we'll get the previous rule.
+ previous_rule = current_rule.prev
+
+ # Next we may have to loop backwards if the previous line is a comment. If it
+ # is, we'll get the previous "rule's" previous.
+ while previous_rule is not None and isinstance(previous_rule, (PamdComment, PamdEmptyLine)):
+ previous_rule = previous_rule.prev
+ # Next we'll see if the previous rule matches what we are trying to insert.
+ if previous_rule is not None and not previous_rule.matches(new_type, new_control, new_path):
+ # First set the original previous rule's next to the new_rule
+ previous_rule.next = new_rule
+ # Second, set the new_rule's previous to the original previous
+ new_rule.prev = previous_rule
+ # Third, set the new rule's next to the current rule
+ new_rule.next = current_rule
+ # Fourth, set the current rule's previous to the new_rule
+ current_rule.prev = new_rule
+
+ changes += 1
+
+ # Handle the case where it is the first rule in the list.
+ elif previous_rule is None:
+ # This is the case where the current rule is not only the first rule
+ # but the first line as well. So we set the head to the new rule
+ if current_rule.prev is None:
+ self._head = new_rule
+ # This case would occur if the previous line was a comment.
+ else:
+ current_rule.prev.next = new_rule
+ new_rule.prev = current_rule.prev
+ new_rule.next = current_rule
+ current_rule.prev = new_rule
+ changes += 1
+
+ return changes
+
+ def insert_after(self, rule_type, rule_control, rule_path,
+ new_type=None, new_control=None, new_path=None, new_args=None):
+ # Get a list of rules we want to change
+ rules_to_find = self.get(rule_type, rule_control, rule_path)
+ changes = 0
+ # There are two cases to consider.
+ # 1. The new rule doesn't exist after the existing rule
+ # 2. The new rule exists
+ for current_rule in rules_to_find:
+ # First we'll get the next rule.
+ next_rule = current_rule.next
+ # Next we may have to loop forwards if the next line is a comment. If it
+ # is, we'll get the next "rule's" next.
+ while next_rule is not None and isinstance(next_rule, (PamdComment, PamdEmptyLine)):
+ next_rule = next_rule.next
+
+ # First we create a new rule
+ new_rule = PamdRule(new_type, new_control, new_path, new_args)
+ if next_rule is not None and not next_rule.matches(new_type, new_control, new_path):
+ # If the previous rule doesn't match we'll insert our new rule.
+
+ # Second set the original next rule's previous to the new_rule
+ next_rule.prev = new_rule
+ # Third, set the new_rule's next to the original next rule
+ new_rule.next = next_rule
+ # Fourth, set the new rule's previous to the current rule
+ new_rule.prev = current_rule
+ # Fifth, set the current rule's next to the new_rule
+ current_rule.next = new_rule
+
+ changes += 1
+
+ # This is the case where the current_rule is the last in the list
+ elif next_rule is None:
+ new_rule.prev = self._tail
+ new_rule.next = None
+ self._tail.next = new_rule
+ self._tail = new_rule
+
+ current_rule.next = new_rule
+ changes += 1
+
+ return changes
+
+ def add_module_arguments(self, rule_type, rule_control, rule_path, args_to_add):
+ # Get a list of rules we want to change
+ rules_to_find = self.get(rule_type, rule_control, rule_path)
+
+ args_to_add = parse_module_arguments(args_to_add)
+
+ changes = 0
+
+ for current_rule in rules_to_find:
+ rule_changed = False
+
+ # create some structures to evaluate the situation
+ simple_new_args = set()
+ key_value_new_args = dict()
+
+ for arg in args_to_add:
+ if arg.startswith("["):
+ continue
+ elif "=" in arg:
+ key, value = arg.split("=")
+ key_value_new_args[key] = value
+ else:
+ simple_new_args.add(arg)
+
+ key_value_new_args_set = set(key_value_new_args)
+
+ simple_current_args = set()
+ key_value_current_args = dict()
+
+ for arg in current_rule.rule_args:
+ if arg.startswith("["):
+ continue
+ elif "=" in arg:
+ key, value = arg.split("=")
+ key_value_current_args[key] = value
+ else:
+ simple_current_args.add(arg)
+
+ key_value_current_args_set = set(key_value_current_args)
+
+ new_args_to_add = list()
+
+ # Handle new simple arguments
+ if simple_new_args.difference(simple_current_args):
+ for arg in simple_new_args.difference(simple_current_args):
+ new_args_to_add.append(arg)
+
+ # Handle new key value arguments
+ if key_value_new_args_set.difference(key_value_current_args_set):
+ for key in key_value_new_args_set.difference(key_value_current_args_set):
+ new_args_to_add.append(key + '=' + key_value_new_args[key])
+
+ if new_args_to_add:
+ current_rule.rule_args += new_args_to_add
+ rule_changed = True
+
+ # Handle existing key value arguments when value is not equal
+ if key_value_new_args_set.intersection(key_value_current_args_set):
+ for key in key_value_new_args_set.intersection(key_value_current_args_set):
+ if key_value_current_args[key] != key_value_new_args[key]:
+ arg_index = current_rule.rule_args.index(key + '=' + key_value_current_args[key])
+ current_rule.rule_args[arg_index] = str(key + '=' + key_value_new_args[key])
+ rule_changed = True
+
+ if rule_changed:
+ changes += 1
+
+ return changes
+
+ def remove_module_arguments(self, rule_type, rule_control, rule_path, args_to_remove):
+ # Get a list of rules we want to change
+ rules_to_find = self.get(rule_type, rule_control, rule_path)
+
+ args_to_remove = parse_module_arguments(args_to_remove)
+
+ changes = 0
+
+ for current_rule in rules_to_find:
+ if not args_to_remove:
+ args_to_remove = []
+
+ # Let's check to see if there are any args to remove by finding the intersection
+ # of the rule's current args and the args_to_remove lists
+ if not list(set(current_rule.rule_args) & set(args_to_remove)):
+ continue
+
+ # There are args to remove, so we create a list of new_args absent the args
+ # to remove.
+ current_rule.rule_args = [arg for arg in current_rule.rule_args if arg not in args_to_remove]
+
+ changes += 1
+
+ return changes
+
+ def validate(self):
+ current_line = self._head
+
+ while current_line is not None:
+ if not current_line.validate()[0]:
+ return current_line.validate()
+ current_line = current_line.next
+ return True, "Module is valid"
+
+ def __str__(self):
+ lines = []
+ current_line = self._head
+
+ while current_line is not None:
+ lines.append(str(current_line))
+ current_line = current_line.next
+
+ if lines[1].startswith("# Updated by Ansible"):
+ lines.pop(1)
+
+ lines.insert(1, "# Updated by Ansible - " + datetime.now().isoformat())
+
+ return '\n'.join(lines) + '\n'
+
+
+def parse_module_arguments(module_arguments):
+ # Return empty list if we have no args to parse
+ if not module_arguments:
+ return []
+ elif isinstance(module_arguments, list) and len(module_arguments) == 1 and not module_arguments[0]:
+ return []
+
+ if not isinstance(module_arguments, list):
+ module_arguments = [module_arguments]
+
+ parsed_args = list()
+
+ for arg in module_arguments:
+ for item in filter(None, RULE_ARG_REGEX.findall(arg)):
+ if not item.startswith("["):
+ re.sub("\\s*=\\s*", "=", item)
+ parsed_args.append(item)
+
+ return parsed_args
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ type=dict(type='str', required=True, choices=VALID_TYPES),
+ control=dict(type='str', required=True),
+ module_path=dict(type='str', required=True),
+ new_type=dict(type='str', choices=VALID_TYPES),
+ new_control=dict(type='str'),
+ new_module_path=dict(type='str'),
+ module_arguments=dict(type='list', elements='str'),
+ state=dict(type='str', default='updated', choices=['absent', 'after', 'args_absent', 'args_present', 'before', 'updated']),
+ path=dict(type='path', default='/etc/pam.d'),
+ backup=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ required_if=[
+ ("state", "args_present", ["module_arguments"]),
+ ("state", "args_absent", ["module_arguments"]),
+ ("state", "before", ["new_control", "new_type", "new_module_path"]),
+ ("state", "after", ["new_control", "new_type", "new_module_path"]),
+ ],
+ )
+ content = str()
+ fname = os.path.join(module.params["path"], module.params["name"])
+
+ # Open the file and read the content or fail
+ try:
+ with open(fname, 'r') as service_file_obj:
+ content = service_file_obj.read()
+ except IOError as e:
+ # If unable to read the file, fail out
+ module.fail_json(msg='Unable to open/read PAM module file %s with error %s.' % (fname, str(e)))
+
+ # Assuming we didn't fail, create the service
+ service = PamdService(content)
+ # Set the action
+ action = module.params['state']
+
+ changes = 0
+
+ # Take action
+ if action == 'updated':
+ changes = service.update_rule(module.params['type'], module.params['control'], module.params['module_path'],
+ module.params['new_type'], module.params['new_control'], module.params['new_module_path'],
+ module.params['module_arguments'])
+ elif action == 'before':
+ changes = service.insert_before(module.params['type'], module.params['control'], module.params['module_path'],
+ module.params['new_type'], module.params['new_control'], module.params['new_module_path'],
+ module.params['module_arguments'])
+ elif action == 'after':
+ changes = service.insert_after(module.params['type'], module.params['control'], module.params['module_path'],
+ module.params['new_type'], module.params['new_control'], module.params['new_module_path'],
+ module.params['module_arguments'])
+ elif action == 'args_absent':
+ changes = service.remove_module_arguments(module.params['type'], module.params['control'], module.params['module_path'],
+ module.params['module_arguments'])
+ elif action == 'args_present':
+ if [arg for arg in parse_module_arguments(module.params['module_arguments']) if arg.startswith("[")]:
+ module.fail_json(msg="Unable to process bracketed '[' complex arguments with 'args_present'. Please use 'updated'.")
+
+ changes = service.add_module_arguments(module.params['type'], module.params['control'], module.params['module_path'],
+ module.params['module_arguments'])
+ elif action == 'absent':
+ changes = service.remove(module.params['type'], module.params['control'], module.params['module_path'])
+
+ valid, msg = service.validate()
+
+ # If the module is not valid (meaning one of the rules is invalid), we will fail
+ if not valid:
+ module.fail_json(msg=msg)
+
+ result = dict(
+ changed=(changes > 0),
+ change_count=changes,
+ backupdest='',
+ )
+
+ # If not check mode and something changed, backup the original if necessary then write out the file or fail
+ if not module.check_mode and result['changed']:
+ # First, create a backup if desired.
+ if module.params['backup']:
+ result['backupdest'] = module.backup_local(fname)
+ try:
+ temp_file = NamedTemporaryFile(mode='w', dir=module.tmpdir, delete=False)
+ with open(temp_file.name, 'w') as fd:
+ fd.write(str(service))
+
+ except IOError:
+ module.fail_json(msg='Unable to create temporary \
+ file %s' % temp_file)
+
+ module.atomic_move(temp_file.name, os.path.realpath(fname))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/parted.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/parted.py
new file mode 100644
index 00000000..daf68c29
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/parted.py
@@ -0,0 +1,797 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Fabrizio Colonna <colofabrix@tin.it>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+author:
+ - Fabrizio Colonna (@ColOfAbRiX)
+module: parted
+short_description: Configure block device partitions
+description:
+ - This module allows configuring block device partition using the C(parted)
+ command line tool. For a full description of the fields and the options
+ check the GNU parted manual.
+requirements:
+ - This module requires parted version 1.8.3 and above
+ - align option (except 'undefined') requires parted 2.1 and above
+ - If the version of parted is below 3.1, it requires a Linux version running
+ the sysfs file system C(/sys/).
+options:
+ device:
+ description: The block device (disk) where to operate.
+ type: str
+ required: True
+ align:
+ description: Set alignment for newly created partitions. Use 'undefined' for parted default aligment.
+ type: str
+ choices: [ cylinder, minimal, none, optimal, undefined ]
+ default: optimal
+ number:
+ description:
+ - The number of the partition to work with or the number of the partition
+ that will be created.
+ - Required when performing any action on the disk, except fetching information.
+ type: int
+ unit:
+ description:
+ - Selects the current default unit that Parted will use to display
+ locations and capacities on the disk and to interpret those given by the
+ user if they are not suffixed by an unit.
+ - When fetching information about a disk, it is always recommended to specify a unit.
+ type: str
+ choices: [ s, B, KB, KiB, MB, MiB, GB, GiB, TB, TiB, '%', cyl, chs, compact ]
+ default: KiB
+ label:
+ description:
+ - Disk label type to use.
+ - If C(device) already contains different label, it will be changed to C(label) and any previous partitions will be lost.
+ type: str
+ choices: [ aix, amiga, bsd, dvh, gpt, loop, mac, msdos, pc98, sun ]
+ default: msdos
+ part_type:
+ description:
+ - May be specified only with 'msdos' or 'dvh' partition tables.
+ - A C(name) must be specified for a 'gpt' partition table.
+ - Neither C(part_type) nor C(name) may be used with a 'sun' partition table.
+ type: str
+ choices: [ extended, logical, primary ]
+ default: primary
+ part_start:
+ description:
+ - Where the partition will start as offset from the beginning of the disk,
+ that is, the "distance" from the start of the disk. Negative numbers
+ specify distance from the end of the disk.
+ - The distance can be specified with all the units supported by parted
+ (except compat) and it is case sensitive, e.g. C(10GiB), C(15%).
+ - Using negative values may require setting of C(fs_type) (see notes).
+ type: str
+ default: 0%
+ part_end:
+ description:
+ - Where the partition will end as offset from the beginning of the disk,
+ that is, the "distance" from the start of the disk. Negative numbers
+ specify distance from the end of the disk.
+ - The distance can be specified with all the units supported by parted
+ (except compat) and it is case sensitive, e.g. C(10GiB), C(15%).
+ type: str
+ default: 100%
+ name:
+ description:
+ - Sets the name for the partition number (GPT, Mac, MIPS and PC98 only).
+ type: str
+ flags:
+ description: A list of the flags that has to be set on the partition.
+ type: list
+ elements: str
+ state:
+ description:
+ - Whether to create or delete a partition.
+ - If set to C(info) the module will only return the device information.
+ type: str
+ choices: [ absent, present, info ]
+ default: info
+ fs_type:
+ description:
+ - If specified and the partition does not exist, will set filesystem type to given partition.
+ - Parameter optional, but see notes below about negative negative C(part_start) values.
+ type: str
+ version_added: '0.2.0'
+ resize:
+ description:
+ - Call C(resizepart) on existing partitions to match the size specified by I(part_end).
+ type: bool
+ default: false
+ version_added: '1.3.0'
+
+notes:
+ - When fetching information about a new disk and when the version of parted
+ installed on the system is before version 3.1, the module queries the kernel
+ through C(/sys/) to obtain disk information. In this case the units CHS and
+ CYL are not supported.
+ - Negative C(part_start) start values were rejected if C(fs_type) was not given.
+ This bug was fixed in parted 3.2.153. If you want to use negative C(part_start),
+ specify C(fs_type) as well or make sure your system contains newer parted.
+'''
+
+RETURN = r'''
+partition_info:
+ description: Current partition information
+ returned: success
+ type: complex
+ contains:
+ disk:
+ description: Generic device information.
+ type: dict
+ partitions:
+ description: List of device partitions.
+ type: list
+ script:
+ description: parted script executed by module
+ type: str
+ sample: {
+ "disk": {
+ "dev": "/dev/sdb",
+ "logical_block": 512,
+ "model": "VMware Virtual disk",
+ "physical_block": 512,
+ "size": 5.0,
+ "table": "msdos",
+ "unit": "gib"
+ },
+ "partitions": [{
+ "begin": 0.0,
+ "end": 1.0,
+ "flags": ["boot", "lvm"],
+ "fstype": "",
+ "name": "",
+ "num": 1,
+ "size": 1.0
+ }, {
+ "begin": 1.0,
+ "end": 5.0,
+ "flags": [],
+ "fstype": "",
+ "name": "",
+ "num": 2,
+ "size": 4.0
+ }],
+ "script": "unit KiB print "
+ }
+'''
+
+EXAMPLES = r'''
+- name: Create a new ext4 primary partition
+ community.general.parted:
+ device: /dev/sdb
+ number: 1
+ state: present
+ fs_type: ext4
+
+- name: Remove partition number 1
+ community.general.parted:
+ device: /dev/sdb
+ number: 1
+ state: absent
+
+- name: Create a new primary partition with a size of 1GiB
+ community.general.parted:
+ device: /dev/sdb
+ number: 1
+ state: present
+ part_end: 1GiB
+
+- name: Create a new primary partition for LVM
+ community.general.parted:
+ device: /dev/sdb
+ number: 2
+ flags: [ lvm ]
+ state: present
+ part_start: 1GiB
+
+- name: Create a new primary partition with a size of 1GiB at disk's end
+ community.general.parted:
+ device: /dev/sdb
+ number: 3
+ state: present
+ fs_type: ext3
+ part_start: -1GiB
+
+# Example on how to read info and reuse it in subsequent task
+- name: Read device information (always use unit when probing)
+ community.general.parted: device=/dev/sdb unit=MiB
+ register: sdb_info
+
+- name: Remove all partitions from disk
+ community.general.parted:
+ device: /dev/sdb
+ number: '{{ item.num }}'
+ state: absent
+ loop: '{{ sdb_info.partitions }}'
+
+- name: Extend an existing partition to fill all available space
+ community.general.parted:
+ device: /dev/sdb
+ number: "{{ sdb_info.partitions | length }}"
+ part_end: "100%"
+ resize: true
+ state: present
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+import math
+import re
+import os
+
+
+# Reference prefixes (International System of Units and IEC)
+units_si = ['B', 'KB', 'MB', 'GB', 'TB']
+units_iec = ['KiB', 'MiB', 'GiB', 'TiB']
+parted_units = units_si + units_iec + ['s', '%', 'cyl', 'chs', 'compact']
+
+
+def parse_unit(size_str, unit=''):
+ """
+ Parses a string containing a size or boundary information
+ """
+ matches = re.search(r'^(-?[\d.]+) *([\w%]+)?$', size_str)
+ if matches is None:
+ # "<cylinder>,<head>,<sector>" format
+ matches = re.search(r'^(\d+),(\d+),(\d+)$', size_str)
+ if matches is None:
+ module.fail_json(
+ msg="Error interpreting parted size output: '%s'" % size_str
+ )
+
+ size = {
+ 'cylinder': int(matches.group(1)),
+ 'head': int(matches.group(2)),
+ 'sector': int(matches.group(3))
+ }
+ unit = 'chs'
+
+ else:
+ # Normal format: "<number>[<unit>]"
+ if matches.group(2) is not None:
+ unit = matches.group(2)
+
+ size = float(matches.group(1))
+
+ return size, unit
+
+
+def parse_partition_info(parted_output, unit):
+ """
+ Parses the output of parted and transforms the data into
+ a dictionary.
+
+ Parted Machine Parseable Output:
+ See: https://lists.alioth.debian.org/pipermail/parted-devel/2006-December/00
+ 0573.html
+ - All lines end with a semicolon (;)
+ - The first line indicates the units in which the output is expressed.
+ CHS, CYL and BYT stands for CHS, Cylinder and Bytes respectively.
+ - The second line is made of disk information in the following format:
+ "path":"size":"transport-type":"logical-sector-size":"physical-sector-siz
+ e":"partition-table-type":"model-name";
+ - If the first line was either CYL or CHS, the next line will contain
+ information on no. of cylinders, heads, sectors and cylinder size.
+ - Partition information begins from the next line. This is of the format:
+ (for BYT)
+ "number":"begin":"end":"size":"filesystem-type":"partition-name":"flags-s
+ et";
+ (for CHS/CYL)
+ "number":"begin":"end":"filesystem-type":"partition-name":"flags-set";
+ """
+ lines = [x for x in parted_output.split('\n') if x.strip() != '']
+
+ # Generic device info
+ generic_params = lines[1].rstrip(';').split(':')
+
+ # The unit is read once, because parted always returns the same unit
+ size, unit = parse_unit(generic_params[1], unit)
+
+ generic = {
+ 'dev': generic_params[0],
+ 'size': size,
+ 'unit': unit.lower(),
+ 'table': generic_params[5],
+ 'model': generic_params[6],
+ 'logical_block': int(generic_params[3]),
+ 'physical_block': int(generic_params[4])
+ }
+
+ # CYL and CHS have an additional line in the output
+ if unit in ['cyl', 'chs']:
+ chs_info = lines[2].rstrip(';').split(':')
+ cyl_size, cyl_unit = parse_unit(chs_info[3])
+ generic['chs_info'] = {
+ 'cylinders': int(chs_info[0]),
+ 'heads': int(chs_info[1]),
+ 'sectors': int(chs_info[2]),
+ 'cyl_size': cyl_size,
+ 'cyl_size_unit': cyl_unit.lower()
+ }
+ lines = lines[1:]
+
+ parts = []
+ for line in lines[2:]:
+ part_params = line.rstrip(';').split(':')
+
+ # CHS use a different format than BYT, but contrary to what stated by
+ # the author, CYL is the same as BYT. I've tested this undocumented
+ # behaviour down to parted version 1.8.3, which is the first version
+ # that supports the machine parseable output.
+ if unit != 'chs':
+ size = parse_unit(part_params[3])[0]
+ fstype = part_params[4]
+ name = part_params[5]
+ flags = part_params[6]
+
+ else:
+ size = ""
+ fstype = part_params[3]
+ name = part_params[4]
+ flags = part_params[5]
+
+ parts.append({
+ 'num': int(part_params[0]),
+ 'begin': parse_unit(part_params[1])[0],
+ 'end': parse_unit(part_params[2])[0],
+ 'size': size,
+ 'fstype': fstype,
+ 'name': name,
+ 'flags': [f.strip() for f in flags.split(', ') if f != ''],
+ 'unit': unit.lower(),
+ })
+
+ return {'generic': generic, 'partitions': parts}
+
+
+def format_disk_size(size_bytes, unit):
+ """
+ Formats a size in bytes into a different unit, like parted does. It doesn't
+ manage CYL and CHS formats, though.
+ This function has been adapted from https://github.com/Distrotech/parted/blo
+ b/279d9d869ff472c52b9ec2e180d568f0c99e30b0/libparted/unit.c
+ """
+ global units_si, units_iec
+
+ unit = unit.lower()
+
+ # Shortcut
+ if size_bytes == 0:
+ return 0.0, 'b'
+
+ # Cases where we default to 'compact'
+ if unit in ['', 'compact', 'cyl', 'chs']:
+ index = max(0, int(
+ (math.log10(size_bytes) - 1.0) / 3.0
+ ))
+ unit = 'b'
+ if index < len(units_si):
+ unit = units_si[index]
+
+ # Find the appropriate multiplier
+ multiplier = 1.0
+ if unit in units_si:
+ multiplier = 1000.0 ** units_si.index(unit)
+ elif unit in units_iec:
+ multiplier = 1024.0 ** units_iec.index(unit)
+
+ output = size_bytes // multiplier * (1 + 1E-16)
+
+ # Corrections to round up as per IEEE754 standard
+ if output < 10:
+ w = output + 0.005
+ elif output < 100:
+ w = output + 0.05
+ else:
+ w = output + 0.5
+
+ if w < 10:
+ precision = 2
+ elif w < 100:
+ precision = 1
+ else:
+ precision = 0
+
+ # Round and return
+ return round(output, precision), unit
+
+
+def convert_to_bytes(size_str, unit):
+ size = float(size_str)
+ multiplier = 1.0
+ if unit in units_si:
+ multiplier = 1000.0 ** units_si.index(unit)
+ elif unit in units_iec:
+ multiplier = 1024.0 ** (units_iec.index(unit) + 1)
+ elif unit in ['', 'compact', 'cyl', 'chs']:
+ # As per format_disk_size, default to compact, which defaults to megabytes
+ multiplier = 1000.0 ** units_si.index("MB")
+
+ output = size * multiplier
+ return int(output)
+
+
+def get_unlabeled_device_info(device, unit):
+ """
+ Fetches device information directly from the kernel and it is used when
+ parted cannot work because of a missing label. It always returns a 'unknown'
+ label.
+ """
+ device_name = os.path.basename(device)
+ base = "/sys/block/%s" % device_name
+
+ vendor = read_record(base + "/device/vendor", "Unknown")
+ model = read_record(base + "/device/model", "model")
+ logic_block = int(read_record(base + "/queue/logical_block_size", 0))
+ phys_block = int(read_record(base + "/queue/physical_block_size", 0))
+ size_bytes = int(read_record(base + "/size", 0)) * logic_block
+
+ size, unit = format_disk_size(size_bytes, unit)
+
+ return {
+ 'generic': {
+ 'dev': device,
+ 'table': "unknown",
+ 'size': size,
+ 'unit': unit,
+ 'logical_block': logic_block,
+ 'physical_block': phys_block,
+ 'model': "%s %s" % (vendor, model),
+ },
+ 'partitions': []
+ }
+
+
+def get_device_info(device, unit):
+ """
+ Fetches information about a disk and its partitions and it returns a
+ dictionary.
+ """
+ global module, parted_exec
+
+ # If parted complains about missing labels, it means there are no partitions.
+ # In this case only, use a custom function to fetch information and emulate
+ # parted formats for the unit.
+ label_needed = check_parted_label(device)
+ if label_needed:
+ return get_unlabeled_device_info(device, unit)
+
+ command = "%s -s -m %s -- unit '%s' print" % (parted_exec, device, unit)
+ rc, out, err = module.run_command(command)
+ if rc != 0 and 'unrecognised disk label' not in err:
+ module.fail_json(msg=(
+ "Error while getting device information with parted "
+ "script: '%s'" % command),
+ rc=rc, out=out, err=err
+ )
+
+ return parse_partition_info(out, unit)
+
+
+def check_parted_label(device):
+ """
+ Determines if parted needs a label to complete its duties. Versions prior
+ to 3.1 don't return data when there is no label. For more information see:
+ http://upstream.rosalinux.ru/changelogs/libparted/3.1/changelog.html
+ """
+ global parted_exec
+
+ # Check the version
+ parted_major, parted_minor, _ = parted_version()
+ if (parted_major == 3 and parted_minor >= 1) or parted_major > 3:
+ return False
+
+ # Older parted versions return a message in the stdout and RC > 0.
+ rc, out, err = module.run_command("%s -s -m %s print" % (parted_exec, device))
+ if rc != 0 and 'unrecognised disk label' in out.lower():
+ return True
+
+ return False
+
+
+def parse_parted_version(out):
+ """
+ Returns version tuple from the output of "parted --version" command
+ """
+ lines = [x for x in out.split('\n') if x.strip() != '']
+ if len(lines) == 0:
+ return None, None, None
+
+ # Sample parted versions (see as well test unit):
+ # parted (GNU parted) 3.3
+ # parted (GNU parted) 3.4.5
+ # parted (GNU parted) 3.3.14-dfc61
+ matches = re.search(r'^parted.+\s(\d+)\.(\d+)(?:\.(\d+))?', lines[0].strip())
+
+ if matches is None:
+ return None, None, None
+
+ # Convert version to numbers
+ major = int(matches.group(1))
+ minor = int(matches.group(2))
+ rev = 0
+ if matches.group(3) is not None:
+ rev = int(matches.group(3))
+
+ return major, minor, rev
+
+
+def parted_version():
+ """
+ Returns the major and minor version of parted installed on the system.
+ """
+ global module, parted_exec
+
+ rc, out, err = module.run_command("%s --version" % parted_exec)
+ if rc != 0:
+ module.fail_json(
+ msg="Failed to get parted version.", rc=rc, out=out, err=err
+ )
+
+ (major, minor, rev) = parse_parted_version(out)
+ if major is None:
+ module.fail_json(msg="Failed to get parted version.", rc=0, out=out)
+
+ return major, minor, rev
+
+
+def parted(script, device, align):
+ """
+ Runs a parted script.
+ """
+ global module, parted_exec
+
+ align_option = '-a %s' % align
+ if align == 'undefined':
+ align_option = ''
+
+ if script and not module.check_mode:
+ command = "%s -s -m %s %s -- %s" % (parted_exec, align_option, device, script)
+ rc, out, err = module.run_command(command)
+
+ if rc != 0:
+ module.fail_json(
+ msg="Error while running parted script: %s" % command.strip(),
+ rc=rc, out=out, err=err
+ )
+
+
+def read_record(file_path, default=None):
+ """
+ Reads the first line of a file and returns it.
+ """
+ try:
+ f = open(file_path, 'r')
+ try:
+ return f.readline().strip()
+ finally:
+ f.close()
+ except IOError:
+ return default
+
+
+def part_exists(partitions, attribute, number):
+ """
+ Looks if a partition that has a specific value for a specific attribute
+ actually exists.
+ """
+ return any(
+ part[attribute] and
+ part[attribute] == number for part in partitions
+ )
+
+
+def check_size_format(size_str):
+ """
+ Checks if the input string is an allowed size
+ """
+ size, unit = parse_unit(size_str)
+ return unit in parted_units
+
+
+def main():
+ global module, units_si, units_iec, parted_exec
+
+ changed = False
+ output_script = ""
+ script = ""
+ module = AnsibleModule(
+ argument_spec=dict(
+ device=dict(type='str', required=True),
+ align=dict(type='str', default='optimal', choices=['cylinder', 'minimal', 'none', 'optimal', 'undefined']),
+ number=dict(type='int'),
+
+ # unit <unit> command
+ unit=dict(type='str', default='KiB', choices=parted_units),
+
+ # mklabel <label-type> command
+ label=dict(type='str', default='msdos', choices=['aix', 'amiga', 'bsd', 'dvh', 'gpt', 'loop', 'mac', 'msdos', 'pc98', 'sun']),
+
+ # mkpart <part-type> [<fs-type>] <start> <end> command
+ part_type=dict(type='str', default='primary', choices=['extended', 'logical', 'primary']),
+ part_start=dict(type='str', default='0%'),
+ part_end=dict(type='str', default='100%'),
+ fs_type=dict(type='str'),
+
+ # name <partition> <name> command
+ name=dict(type='str'),
+
+ # set <partition> <flag> <state> command
+ flags=dict(type='list', elements='str'),
+
+ # rm/mkpart command
+ state=dict(type='str', default='info', choices=['absent', 'info', 'present']),
+
+ # resize part
+ resize=dict(type='bool', default=False),
+ ),
+ required_if=[
+ ['state', 'present', ['number']],
+ ['state', 'absent', ['number']],
+ ],
+ supports_check_mode=True,
+ )
+ module.run_command_environ_update = {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C', 'LC_CTYPE': 'C'}
+
+ # Data extraction
+ device = module.params['device']
+ align = module.params['align']
+ number = module.params['number']
+ unit = module.params['unit']
+ label = module.params['label']
+ part_type = module.params['part_type']
+ part_start = module.params['part_start']
+ part_end = module.params['part_end']
+ name = module.params['name']
+ state = module.params['state']
+ flags = module.params['flags']
+ fs_type = module.params['fs_type']
+ resize = module.params['resize']
+
+ # Parted executable
+ parted_exec = module.get_bin_path('parted', True)
+
+ # Conditioning
+ if number is not None and number < 1:
+ module.fail_json(msg="The partition number must be greater then 0.")
+ if not check_size_format(part_start):
+ module.fail_json(
+ msg="The argument 'part_start' doesn't respect required format."
+ "The size unit is case sensitive.",
+ err=parse_unit(part_start)
+ )
+ if not check_size_format(part_end):
+ module.fail_json(
+ msg="The argument 'part_end' doesn't respect required format."
+ "The size unit is case sensitive.",
+ err=parse_unit(part_end)
+ )
+
+ # Read the current disk information
+ current_device = get_device_info(device, unit)
+ current_parts = current_device['partitions']
+
+ if state == 'present':
+
+ # Assign label if required
+ mklabel_needed = current_device['generic'].get('table', None) != label
+ if mklabel_needed:
+ script += "mklabel %s " % label
+
+ # Create partition if required
+ if part_type and (mklabel_needed or not part_exists(current_parts, 'num', number)):
+ script += "mkpart %s %s%s %s " % (
+ part_type,
+ '%s ' % fs_type if fs_type is not None else '',
+ part_start,
+ part_end
+ )
+
+ # Set the unit of the run
+ if unit and script:
+ script = "unit %s %s" % (unit, script)
+
+ # If partition exists, try to resize
+ if resize and part_exists(current_parts, 'num', number):
+ # Ensure new end is different to current
+ partition = [p for p in current_parts if p['num'] == number][0]
+ current_part_end = convert_to_bytes(partition['end'], unit)
+
+ size, parsed_unit = parse_unit(part_end, unit)
+ if parsed_unit == "%":
+ size = int((int(current_device['generic']['size']) * size) / 100)
+ parsed_unit = unit
+
+ desired_part_end = convert_to_bytes(size, parsed_unit)
+
+ if current_part_end != desired_part_end:
+ script += "resizepart %s %s " % (
+ number,
+ part_end
+ )
+
+ # Execute the script and update the data structure.
+ # This will create the partition for the next steps
+ if script:
+ output_script += script
+ parted(script, device, align)
+ changed = True
+ script = ""
+
+ if not module.check_mode:
+ current_parts = get_device_info(device, unit)['partitions']
+
+ if part_exists(current_parts, 'num', number) or module.check_mode:
+ if changed and module.check_mode:
+ partition = {'flags': []} # Empty structure for the check-mode
+ else:
+ partition = [p for p in current_parts if p['num'] == number][0]
+
+ # Assign name to the partition
+ if name is not None and partition.get('name', None) != name:
+ # Wrap double quotes in single quotes so the shell doesn't strip
+ # the double quotes as those need to be included in the arg
+ # passed to parted
+ script += 'name %s \'"%s"\' ' % (number, name)
+
+ # Manage flags
+ if flags:
+ # Parted infers boot with esp, if you assign esp, boot is set
+ # and if boot is unset, esp is also unset.
+ if 'esp' in flags and 'boot' not in flags:
+ flags.append('boot')
+
+ # Compute only the changes in flags status
+ flags_off = list(set(partition['flags']) - set(flags))
+ flags_on = list(set(flags) - set(partition['flags']))
+
+ for f in flags_on:
+ script += "set %s %s on " % (number, f)
+
+ for f in flags_off:
+ script += "set %s %s off " % (number, f)
+
+ # Set the unit of the run
+ if unit and script:
+ script = "unit %s %s" % (unit, script)
+
+ # Execute the script
+ if script:
+ output_script += script
+ changed = True
+ parted(script, device, align)
+
+ elif state == 'absent':
+ # Remove the partition
+ if part_exists(current_parts, 'num', number) or module.check_mode:
+ script = "rm %s " % number
+ output_script += script
+ changed = True
+ parted(script, device, align)
+
+ elif state == 'info':
+ output_script = "unit '%s' print " % unit
+
+ # Final status of the device
+ final_device_status = get_device_info(device, unit)
+ module.exit_json(
+ changed=changed,
+ disk=final_device_status['generic'],
+ partitions=final_device_status['partitions'],
+ script=output_script.strip()
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/pids.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/pids.py
new file mode 100644
index 00000000..1bee180b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/pids.py
@@ -0,0 +1,84 @@
+#!/usr/bin/python
+# Copyright: (c) 2019, Saranya Sridharan
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: pids
+description: "Retrieves a list of PIDs of given process name in Ansible controller/controlled machines.Returns an empty list if no process in that name exists."
+short_description: "Retrieves process IDs list if the process is running otherwise return empty list"
+author:
+ - Saranya Sridharan (@saranyasridharan)
+requirements:
+ - psutil(python module)
+options:
+ name:
+ description: the name of the process you want to get PID for.
+ required: true
+ type: str
+'''
+
+EXAMPLES = '''
+# Pass the process name
+- name: Getting process IDs of the process
+ community.general.pids:
+ name: python
+ register: pids_of_python
+
+- name: Printing the process IDs obtained
+ ansible.builtin.debug:
+ msg: "PIDS of python:{{pids_of_python.pids|join(',')}}"
+'''
+
+RETURN = '''
+pids:
+ description: Process IDs of the given process
+ returned: list of none, one, or more process IDs
+ type: list
+ sample: [100,200]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+try:
+ import psutil
+ HAS_PSUTIL = True
+except ImportError:
+ HAS_PSUTIL = False
+
+
+def compare_lower(a, b):
+ if a is None or b is None:
+ # this could just be "return False" but would lead to surprising behavior if both a and b are None
+ return a == b
+
+ return a.lower() == b.lower()
+
+
+def get_pid(name):
+ pids = []
+
+ for proc in psutil.process_iter(attrs=['name', 'cmdline']):
+ if compare_lower(proc.info['name'], name) or \
+ proc.info['cmdline'] and compare_lower(proc.info['cmdline'][0], name):
+ pids.append(proc.pid)
+
+ return pids
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, type="str"),
+ ),
+ supports_check_mode=True,
+ )
+ if not HAS_PSUTIL:
+ module.fail_json(msg="Missing required 'psutil' python module. Try installing it with: pip install psutil")
+ name = module.params["name"]
+ response = dict(pids=get_pid(name))
+ module.exit_json(**response)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/puppet.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/puppet.py
new file mode 100644
index 00000000..db8c0ec8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/puppet.py
@@ -0,0 +1,330 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Hewlett-Packard Development Company, L.P.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: puppet
+short_description: Runs puppet
+description:
+ - Runs I(puppet) agent or apply in a reliable manner.
+options:
+ timeout:
+ description:
+ - How long to wait for I(puppet) to finish.
+ type: str
+ default: 30m
+ puppetmaster:
+ description:
+ - The hostname of the puppetmaster to contact.
+ type: str
+ modulepath:
+ description:
+ - Path to an alternate location for puppet modules.
+ type: str
+ manifest:
+ description:
+ - Path to the manifest file to run puppet apply on.
+ type: str
+ noop:
+ description:
+ - Override puppet.conf noop mode.
+ - When C(yes), run Puppet agent with C(--noop) switch set.
+ - When C(no), run Puppet agent with C(--no-noop) switch set.
+ - When unset (default), use default or puppet.conf value if defined.
+ type: bool
+ facts:
+ description:
+ - A dict of values to pass in as persistent external facter facts.
+ type: dict
+ facter_basename:
+ description:
+ - Basename of the facter output file.
+ type: str
+ default: ansible
+ environment:
+ description:
+ - Puppet environment to be used.
+ type: str
+ logdest:
+ description:
+ - Where the puppet logs should go, if puppet apply is being used.
+ - C(all) will go to both C(stdout) and C(syslog).
+ type: str
+ choices: [ all, stdout, syslog ]
+ default: stdout
+ certname:
+ description:
+ - The name to use when handling certificates.
+ type: str
+ tags:
+ description:
+ - A list of puppet tags to be used.
+ type: list
+ elements: str
+ execute:
+ description:
+ - Execute a specific piece of Puppet code.
+ - It has no effect with a puppetmaster.
+ type: str
+ use_srv_records:
+ description:
+ - Toggles use_srv_records flag
+ type: bool
+ summarize:
+ description:
+ - Whether to print a transaction summary.
+ type: bool
+ default: false
+ verbose:
+ description:
+ - Print extra information.
+ type: bool
+ default: false
+ debug:
+ description:
+ - Enable full debugging.
+ type: bool
+ default: false
+requirements:
+- puppet
+author:
+- Monty Taylor (@emonty)
+'''
+
+EXAMPLES = r'''
+- name: Run puppet agent and fail if anything goes wrong
+ community.general.puppet:
+
+- name: Run puppet and timeout in 5 minutes
+ community.general.puppet:
+ timeout: 5m
+
+- name: Run puppet using a different environment
+ community.general.puppet:
+ environment: testing
+
+- name: Run puppet using a specific certname
+ community.general.puppet:
+ certname: agent01.example.com
+
+- name: Run puppet using a specific piece of Puppet code. Has no effect with a puppetmaster
+ community.general.puppet:
+ execute: include ::mymodule
+
+- name: Run puppet using a specific tags
+ community.general.puppet:
+ tags:
+ - update
+ - nginx
+
+- name: Run puppet agent in noop mode
+ community.general.puppet:
+ noop: yes
+
+- name: Run a manifest with debug, log to both syslog and stdout, specify module path
+ community.general.puppet:
+ modulepath: /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules
+ logdest: all
+ manifest: /var/lib/example/puppet_step_config.pp
+'''
+
+import json
+import os
+import stat
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import shlex_quote
+
+
+def _get_facter_dir():
+ if os.getuid() == 0:
+ return '/etc/facter/facts.d'
+ else:
+ return os.path.expanduser('~/.facter/facts.d')
+
+
+def _write_structured_data(basedir, basename, data):
+ if not os.path.exists(basedir):
+ os.makedirs(basedir)
+ file_path = os.path.join(basedir, "{0}.json".format(basename))
+ # This is more complex than you might normally expect because we want to
+ # open the file with only u+rw set. Also, we use the stat constants
+ # because ansible still supports python 2.4 and the octal syntax changed
+ out_file = os.fdopen(
+ os.open(
+ file_path, os.O_CREAT | os.O_WRONLY,
+ stat.S_IRUSR | stat.S_IWUSR), 'w')
+ out_file.write(json.dumps(data).encode('utf8'))
+ out_file.close()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ timeout=dict(type='str', default='30m'),
+ puppetmaster=dict(type='str'),
+ modulepath=dict(type='str'),
+ manifest=dict(type='str'),
+ noop=dict(required=False, type='bool'),
+ logdest=dict(type='str', default='stdout', choices=['all',
+ 'stdout',
+ 'syslog']),
+ # internal code to work with --diff, do not use
+ show_diff=dict(type='bool', default=False, aliases=['show-diff']),
+ facts=dict(type='dict'),
+ facter_basename=dict(type='str', default='ansible'),
+ environment=dict(type='str'),
+ certname=dict(type='str'),
+ tags=dict(type='list', elements='str'),
+ execute=dict(type='str'),
+ summarize=dict(type='bool', default=False),
+ debug=dict(type='bool', default=False),
+ verbose=dict(type='bool', default=False),
+ use_srv_records=dict(type='bool'),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ('puppetmaster', 'manifest'),
+ ('puppetmaster', 'manifest', 'execute'),
+ ('puppetmaster', 'modulepath'),
+ ],
+ )
+ p = module.params
+
+ global PUPPET_CMD
+ PUPPET_CMD = module.get_bin_path("puppet", False, ['/opt/puppetlabs/bin'])
+
+ if not PUPPET_CMD:
+ module.fail_json(
+ msg="Could not find puppet. Please ensure it is installed.")
+
+ global TIMEOUT_CMD
+ TIMEOUT_CMD = module.get_bin_path("timeout", False)
+
+ if p['manifest']:
+ if not os.path.exists(p['manifest']):
+ module.fail_json(
+ msg="Manifest file %(manifest)s not found." % dict(
+ manifest=p['manifest']))
+
+ # Check if puppet is disabled here
+ if not p['manifest']:
+ rc, stdout, stderr = module.run_command(
+ PUPPET_CMD + " config print agent_disabled_lockfile")
+ if os.path.exists(stdout.strip()):
+ module.fail_json(
+ msg="Puppet agent is administratively disabled.",
+ disabled=True)
+ elif rc != 0:
+ module.fail_json(
+ msg="Puppet agent state could not be determined.")
+
+ if module.params['facts'] and not module.check_mode:
+ _write_structured_data(
+ _get_facter_dir(),
+ module.params['facter_basename'],
+ module.params['facts'])
+
+ if TIMEOUT_CMD:
+ base_cmd = "%(timeout_cmd)s -s 9 %(timeout)s %(puppet_cmd)s" % dict(
+ timeout_cmd=TIMEOUT_CMD,
+ timeout=shlex_quote(p['timeout']),
+ puppet_cmd=PUPPET_CMD)
+ else:
+ base_cmd = PUPPET_CMD
+
+ if not p['manifest'] and not p['execute']:
+ cmd = ("%(base_cmd)s agent --onetime"
+ " --no-daemonize --no-usecacheonfailure --no-splay"
+ " --detailed-exitcodes --verbose --color 0") % dict(base_cmd=base_cmd)
+ if p['puppetmaster']:
+ cmd += " --server %s" % shlex_quote(p['puppetmaster'])
+ if p['show_diff']:
+ cmd += " --show_diff"
+ if p['environment']:
+ cmd += " --environment '%s'" % p['environment']
+ if p['tags']:
+ cmd += " --tags '%s'" % ','.join(p['tags'])
+ if p['certname']:
+ cmd += " --certname='%s'" % p['certname']
+ if module.check_mode:
+ cmd += " --noop"
+ elif 'noop' in p:
+ if p['noop']:
+ cmd += " --noop"
+ else:
+ cmd += " --no-noop"
+ if p['use_srv_records'] is not None:
+ if not p['use_srv_records']:
+ cmd += " --no-use_srv_records"
+ else:
+ cmd += " --use_srv_records"
+ else:
+ cmd = "%s apply --detailed-exitcodes " % base_cmd
+ if p['logdest'] == 'syslog':
+ cmd += "--logdest syslog "
+ if p['logdest'] == 'all':
+ cmd += " --logdest syslog --logdest stdout"
+ if p['modulepath']:
+ cmd += "--modulepath='%s'" % p['modulepath']
+ if p['environment']:
+ cmd += "--environment '%s' " % p['environment']
+ if p['certname']:
+ cmd += " --certname='%s'" % p['certname']
+ if p['tags']:
+ cmd += " --tags '%s'" % ','.join(p['tags'])
+ if module.check_mode:
+ cmd += "--noop "
+ elif 'noop' in p:
+ if p['noop']:
+ cmd += " --noop"
+ else:
+ cmd += " --no-noop"
+ if p['execute']:
+ cmd += " --execute '%s'" % p['execute']
+ else:
+ cmd += " %s" % shlex_quote(p['manifest'])
+ if p['summarize']:
+ cmd += " --summarize"
+ if p['debug']:
+ cmd += " --debug"
+ if p['verbose']:
+ cmd += " --verbose"
+ rc, stdout, stderr = module.run_command(cmd)
+
+ if rc == 0:
+ # success
+ module.exit_json(rc=rc, changed=False, stdout=stdout, stderr=stderr)
+ elif rc == 1:
+ # rc==1 could be because it's disabled
+ # rc==1 could also mean there was a compilation failure
+ disabled = "administratively disabled" in stdout
+ if disabled:
+ msg = "puppet is disabled"
+ else:
+ msg = "puppet did not run"
+ module.exit_json(
+ rc=rc, disabled=disabled, msg=msg,
+ error=True, stdout=stdout, stderr=stderr)
+ elif rc == 2:
+ # success with changes
+ module.exit_json(rc=0, changed=True, stdout=stdout, stderr=stderr)
+ elif rc == 124:
+ # timeout
+ module.exit_json(
+ rc=rc, msg="%s timed out" % cmd, stdout=stdout, stderr=stderr)
+ else:
+ # failure
+ module.fail_json(
+ rc=rc, msg="%s failed with return code: %d" % (cmd, rc),
+ stdout=stdout, stderr=stderr)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/python_requirements_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/python_requirements_facts.py
new file mode 100644
index 00000000..5ffb2776
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/python_requirements_facts.py
@@ -0,0 +1,172 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: python_requirements_info
+short_description: Show python path and assert dependency versions
+description:
+ - Get info about available Python requirements on the target host, including listing required libraries and gathering versions.
+ - This module was called C(python_requirements_facts) before Ansible 2.9. The usage did not change.
+options:
+ dependencies:
+ type: list
+ elements: str
+ description: >
+ A list of version-likes or module names to check for installation.
+ Supported operators: <, >, <=, >=, or ==. The bare module name like
+ I(ansible), the module with a specific version like I(boto3==1.6.1), or a
+ partial version like I(requests>2) are all valid specifications.
+author:
+- Will Thames (@willthames)
+- Ryan Scott Brown (@ryansb)
+'''
+
+EXAMPLES = '''
+- name: Show python lib/site paths
+ community.general.python_requirements_info:
+
+- name: Check for modern boto3 and botocore versions
+ community.general.python_requirements_info:
+ dependencies:
+ - boto3>1.6
+ - botocore<2
+'''
+
+RETURN = '''
+python:
+ description: path to python version used
+ returned: always
+ type: str
+ sample: /usr/local/opt/python@2/bin/python2.7
+python_version:
+ description: version of python
+ returned: always
+ type: str
+ sample: "2.7.15 (default, May 1 2018, 16:44:08)\n[GCC 4.2.1 Compatible Apple LLVM 9.1.0 (clang-902.0.39.1)]"
+python_system_path:
+ description: List of paths python is looking for modules in
+ returned: always
+ type: list
+ sample:
+ - /usr/local/opt/python@2/site-packages/
+ - /usr/lib/python/site-packages/
+ - /usr/lib/python/site-packages/
+valid:
+ description: A dictionary of dependencies that matched their desired versions. If no version was specified, then I(desired) will be null
+ returned: always
+ type: dict
+ sample:
+ boto3:
+ desired: null
+ installed: 1.7.60
+ botocore:
+ desired: botocore<2
+ installed: 1.10.60
+mismatched:
+ description: A dictionary of dependencies that did not satisfy the desired version
+ returned: always
+ type: dict
+ sample:
+ botocore:
+ desired: botocore>2
+ installed: 1.10.60
+not_found:
+ description: A list of packages that could not be imported at all, and are not installed
+ returned: always
+ type: list
+ sample:
+ - boto4
+ - requests
+'''
+
+import re
+import sys
+import operator
+
+HAS_DISTUTILS = False
+try:
+ import pkg_resources
+ from distutils.version import LooseVersion
+ HAS_DISTUTILS = True
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+
+operations = {
+ '<=': operator.le,
+ '>=': operator.ge,
+ '<': operator.lt,
+ '>': operator.gt,
+ '==': operator.eq,
+}
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ dependencies=dict(type='list', elements='str')
+ ),
+ supports_check_mode=True,
+ )
+ if module._name in ('python_requirements_facts', 'community.general.python_requirements_facts'):
+ module.deprecate("The 'python_requirements_facts' module has been renamed to 'python_requirements_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+ if not HAS_DISTUTILS:
+ module.fail_json(
+ msg='Could not import "distutils" and "pkg_resources" libraries to introspect python environment.',
+ python=sys.executable,
+ python_version=sys.version,
+ python_system_path=sys.path,
+ )
+ pkg_dep_re = re.compile(r'(^[a-zA-Z][a-zA-Z0-9_-]+)(==|[><]=?)?([0-9.]+)?$')
+
+ results = dict(
+ not_found=[],
+ mismatched={},
+ valid={},
+ )
+
+ for dep in (module.params.get('dependencies') or []):
+ match = pkg_dep_re.match(dep)
+ if match is None:
+ module.fail_json(msg="Failed to parse version requirement '{0}'. Must be formatted like 'ansible>2.6'".format(dep))
+ pkg, op, version = match.groups()
+ if op is not None and op not in operations:
+ module.fail_json(msg="Failed to parse version requirement '{0}'. Operator must be one of >, <, <=, >=, or ==".format(dep))
+ try:
+ existing = pkg_resources.get_distribution(pkg).version
+ except pkg_resources.DistributionNotFound:
+ # not there
+ results['not_found'].append(pkg)
+ continue
+ if op is None and version is None:
+ results['valid'][pkg] = {
+ 'installed': existing,
+ 'desired': None,
+ }
+ elif operations[op](LooseVersion(existing), LooseVersion(version)):
+ results['valid'][pkg] = {
+ 'installed': existing,
+ 'desired': dep,
+ }
+ else:
+ results['mismatched'] = {
+ 'installed': existing,
+ 'desired': dep,
+ }
+
+ module.exit_json(
+ python=sys.executable,
+ python_version=sys.version,
+ python_system_path=sys.path,
+ **results
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/python_requirements_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/python_requirements_info.py
new file mode 100644
index 00000000..5ffb2776
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/python_requirements_info.py
@@ -0,0 +1,172 @@
+#!/usr/bin/python
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: python_requirements_info
+short_description: Show python path and assert dependency versions
+description:
+ - Get info about available Python requirements on the target host, including listing required libraries and gathering versions.
+ - This module was called C(python_requirements_facts) before Ansible 2.9. The usage did not change.
+options:
+ dependencies:
+ type: list
+ elements: str
+ description: >
+ A list of version-likes or module names to check for installation.
+ Supported operators: <, >, <=, >=, or ==. The bare module name like
+ I(ansible), the module with a specific version like I(boto3==1.6.1), or a
+ partial version like I(requests>2) are all valid specifications.
+author:
+- Will Thames (@willthames)
+- Ryan Scott Brown (@ryansb)
+'''
+
+EXAMPLES = '''
+- name: Show python lib/site paths
+ community.general.python_requirements_info:
+
+- name: Check for modern boto3 and botocore versions
+ community.general.python_requirements_info:
+ dependencies:
+ - boto3>1.6
+ - botocore<2
+'''
+
+RETURN = '''
+python:
+ description: path to python version used
+ returned: always
+ type: str
+ sample: /usr/local/opt/python@2/bin/python2.7
+python_version:
+ description: version of python
+ returned: always
+ type: str
+ sample: "2.7.15 (default, May 1 2018, 16:44:08)\n[GCC 4.2.1 Compatible Apple LLVM 9.1.0 (clang-902.0.39.1)]"
+python_system_path:
+ description: List of paths python is looking for modules in
+ returned: always
+ type: list
+ sample:
+ - /usr/local/opt/python@2/site-packages/
+ - /usr/lib/python/site-packages/
+ - /usr/lib/python/site-packages/
+valid:
+ description: A dictionary of dependencies that matched their desired versions. If no version was specified, then I(desired) will be null
+ returned: always
+ type: dict
+ sample:
+ boto3:
+ desired: null
+ installed: 1.7.60
+ botocore:
+ desired: botocore<2
+ installed: 1.10.60
+mismatched:
+ description: A dictionary of dependencies that did not satisfy the desired version
+ returned: always
+ type: dict
+ sample:
+ botocore:
+ desired: botocore>2
+ installed: 1.10.60
+not_found:
+ description: A list of packages that could not be imported at all, and are not installed
+ returned: always
+ type: list
+ sample:
+ - boto4
+ - requests
+'''
+
+import re
+import sys
+import operator
+
+HAS_DISTUTILS = False
+try:
+ import pkg_resources
+ from distutils.version import LooseVersion
+ HAS_DISTUTILS = True
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+
+operations = {
+ '<=': operator.le,
+ '>=': operator.ge,
+ '<': operator.lt,
+ '>': operator.gt,
+ '==': operator.eq,
+}
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ dependencies=dict(type='list', elements='str')
+ ),
+ supports_check_mode=True,
+ )
+ if module._name in ('python_requirements_facts', 'community.general.python_requirements_facts'):
+ module.deprecate("The 'python_requirements_facts' module has been renamed to 'python_requirements_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+ if not HAS_DISTUTILS:
+ module.fail_json(
+ msg='Could not import "distutils" and "pkg_resources" libraries to introspect python environment.',
+ python=sys.executable,
+ python_version=sys.version,
+ python_system_path=sys.path,
+ )
+ pkg_dep_re = re.compile(r'(^[a-zA-Z][a-zA-Z0-9_-]+)(==|[><]=?)?([0-9.]+)?$')
+
+ results = dict(
+ not_found=[],
+ mismatched={},
+ valid={},
+ )
+
+ for dep in (module.params.get('dependencies') or []):
+ match = pkg_dep_re.match(dep)
+ if match is None:
+ module.fail_json(msg="Failed to parse version requirement '{0}'. Must be formatted like 'ansible>2.6'".format(dep))
+ pkg, op, version = match.groups()
+ if op is not None and op not in operations:
+ module.fail_json(msg="Failed to parse version requirement '{0}'. Operator must be one of >, <, <=, >=, or ==".format(dep))
+ try:
+ existing = pkg_resources.get_distribution(pkg).version
+ except pkg_resources.DistributionNotFound:
+ # not there
+ results['not_found'].append(pkg)
+ continue
+ if op is None and version is None:
+ results['valid'][pkg] = {
+ 'installed': existing,
+ 'desired': None,
+ }
+ elif operations[op](LooseVersion(existing), LooseVersion(version)):
+ results['valid'][pkg] = {
+ 'installed': existing,
+ 'desired': dep,
+ }
+ else:
+ results['mismatched'] = {
+ 'installed': existing,
+ 'desired': dep,
+ }
+
+ module.exit_json(
+ python=sys.executable,
+ python_version=sys.version,
+ python_system_path=sys.path,
+ **results
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/runit.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/runit.py
new file mode 100644
index 00000000..b80ed8cb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/runit.py
@@ -0,0 +1,278 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Brian Coca <bcoca@ansible.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: runit
+author:
+- James Sumners (@jsumners)
+short_description: Manage runit services
+description:
+ - Controls runit services on remote hosts using the sv utility.
+options:
+ name:
+ description:
+ - Name of the service to manage.
+ type: str
+ required: yes
+ state:
+ description:
+ - C(started)/C(stopped) are idempotent actions that will not run
+ commands unless necessary. C(restarted) will always bounce the
+ service (sv restart) and C(killed) will always bounce the service (sv force-stop).
+ C(reloaded) will send a HUP (sv reload).
+ C(once) will run a normally downed sv once (sv once), not really
+ an idempotent operation.
+ type: str
+ choices: [ killed, once, reloaded, restarted, started, stopped ]
+ enabled:
+ description:
+ - Whether the service is enabled or not, if disabled it also implies stopped.
+ type: bool
+ service_dir:
+ description:
+ - directory runsv watches for services
+ type: str
+ default: /var/service
+ service_src:
+ description:
+ - directory where services are defined, the source of symlinks to service_dir.
+ type: str
+ default: /etc/sv
+'''
+
+EXAMPLES = r'''
+- name: Start sv dnscache, if not running
+ community.general.runit:
+ name: dnscache
+ state: started
+
+- name: Stop sv dnscache, if running
+ community.general.runit:
+ name: dnscache
+ state: stopped
+
+- name: Kill sv dnscache, in all cases
+ community.general.runit:
+ name: dnscache
+ state: killed
+
+- name: Restart sv dnscache, in all cases
+ community.general.runit:
+ name: dnscache
+ state: restarted
+
+- name: Reload sv dnscache, in all cases
+ community.general.runit:
+ name: dnscache
+ state: reloaded
+
+- name: Use alternative sv directory location
+ community.general.runit:
+ name: dnscache
+ state: reloaded
+ service_dir: /run/service
+'''
+
+import os
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def _load_dist_subclass(cls, *args, **kwargs): # @FIXME remove unused function?
+ '''
+ Used for derivative implementations
+ '''
+ subclass = None
+
+ distro = kwargs['module'].params['distro']
+
+ # get the most specific superclass for this platform
+ if distro is not None:
+ for sc in cls.__subclasses__():
+ if sc.distro is not None and sc.distro == distro:
+ subclass = sc
+ if subclass is None:
+ subclass = cls
+
+ return super(cls, subclass).__new__(subclass)
+
+
+class Sv(object):
+ """
+ Main class that handles daemontools, can be subclassed and overridden in case
+ we want to use a 'derivative' like encore, s6, etc
+ """
+
+ # def __new__(cls, *args, **kwargs):
+ # return _load_dist_subclass(cls, args, kwargs)
+
+ def __init__(self, module):
+ self.extra_paths = []
+ self.report_vars = ['state', 'enabled', 'svc_full', 'src_full', 'pid', 'duration', 'full_state']
+
+ self.module = module
+
+ self.name = module.params['name']
+ self.service_dir = module.params['service_dir']
+ self.service_src = module.params['service_src']
+ self.enabled = None
+ self.full_state = None
+ self.state = None
+ self.pid = None
+ self.duration = None
+
+ self.svc_cmd = module.get_bin_path('sv', opt_dirs=self.extra_paths, required=True)
+ self.svstat_cmd = module.get_bin_path('sv', opt_dirs=self.extra_paths)
+ self.svc_full = '/'.join([self.service_dir, self.name])
+ self.src_full = '/'.join([self.service_src, self.name])
+
+ self.enabled = os.path.lexists(self.svc_full)
+ if self.enabled:
+ self.get_status()
+ else:
+ self.state = 'stopped'
+
+ def enable(self):
+ if os.path.exists(self.src_full):
+ try:
+ os.symlink(self.src_full, self.svc_full)
+ except OSError as e:
+ self.module.fail_json(path=self.src_full, msg='Error while linking: %s' % to_native(e))
+ else:
+ self.module.fail_json(msg="Could not find source for service to enable (%s)." % self.src_full)
+
+ def disable(self):
+ self.execute_command([self.svc_cmd, 'force-stop', self.src_full])
+ try:
+ os.unlink(self.svc_full)
+ except OSError as e:
+ self.module.fail_json(path=self.svc_full, msg='Error while unlinking: %s' % to_native(e))
+
+ def get_status(self):
+ (rc, out, err) = self.execute_command([self.svstat_cmd, 'status', self.svc_full])
+
+ if err is not None and err:
+ self.full_state = self.state = err
+ else:
+ self.full_state = out
+ # full_state *may* contain information about the logger:
+ # "down: /etc/service/service-without-logger: 1s, normally up\n"
+ # "down: /etc/service/updater: 127s, normally up; run: log: (pid 364) 263439s\n"
+ full_state_no_logger = self.full_state.split("; ")[0]
+
+ m = re.search(r'\(pid (\d+)\)', full_state_no_logger)
+ if m:
+ self.pid = m.group(1)
+
+ m = re.search(r' (\d+)s', full_state_no_logger)
+ if m:
+ self.duration = m.group(1)
+
+ if re.search(r'^run:', full_state_no_logger):
+ self.state = 'started'
+ elif re.search(r'^down:', full_state_no_logger):
+ self.state = 'stopped'
+ else:
+ self.state = 'unknown'
+ return
+
+ def started(self):
+ return self.start()
+
+ def start(self):
+ return self.execute_command([self.svc_cmd, 'start', self.svc_full])
+
+ def stopped(self):
+ return self.stop()
+
+ def stop(self):
+ return self.execute_command([self.svc_cmd, 'stop', self.svc_full])
+
+ def once(self):
+ return self.execute_command([self.svc_cmd, 'once', self.svc_full])
+
+ def reloaded(self):
+ return self.reload()
+
+ def reload(self):
+ return self.execute_command([self.svc_cmd, 'reload', self.svc_full])
+
+ def restarted(self):
+ return self.restart()
+
+ def restart(self):
+ return self.execute_command([self.svc_cmd, 'restart', self.svc_full])
+
+ def killed(self):
+ return self.kill()
+
+ def kill(self):
+ return self.execute_command([self.svc_cmd, 'force-stop', self.svc_full])
+
+ def execute_command(self, cmd):
+ try:
+ (rc, out, err) = self.module.run_command(' '.join(cmd))
+ except Exception as e:
+ self.module.fail_json(msg="failed to execute: %s" % to_native(e))
+ return (rc, out, err)
+
+ def report(self):
+ self.get_status()
+ states = {}
+ for k in self.report_vars:
+ states[k] = self.__dict__[k]
+ return states
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', choices=['killed', 'once', 'reloaded', 'restarted', 'started', 'stopped']),
+ enabled=dict(type='bool'),
+ dist=dict(type='str', default='runit'), # @FIXME unused param?
+ service_dir=dict(type='str', default='/var/service'),
+ service_src=dict(type='str', default='/etc/sv'),
+ ),
+ supports_check_mode=True,
+ )
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ state = module.params['state']
+ enabled = module.params['enabled']
+
+ sv = Sv(module)
+ changed = False
+ orig_state = sv.report()
+
+ if enabled is not None and enabled != sv.enabled:
+ changed = True
+ if not module.check_mode:
+ try:
+ if enabled:
+ sv.enable()
+ else:
+ sv.disable()
+ except (OSError, IOError) as e:
+ module.fail_json(msg="Could not change service link: %s" % to_native(e))
+
+ if state is not None and state != sv.state:
+ changed = True
+ if not module.check_mode:
+ getattr(sv, state)()
+
+ module.exit_json(changed=changed, sv=sv.report())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/sefcontext.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/sefcontext.py
new file mode 100644
index 00000000..457e2e23
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/sefcontext.py
@@ -0,0 +1,292 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Dag Wieers (@dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: sefcontext
+short_description: Manages SELinux file context mapping definitions
+description:
+- Manages SELinux file context mapping definitions.
+- Similar to the C(semanage fcontext) command.
+options:
+ target:
+ description:
+ - Target path (expression).
+ type: str
+ required: yes
+ aliases: [ path ]
+ ftype:
+ description:
+ - The file type that should have SELinux contexts applied.
+ - "The following file type options are available:"
+ - C(a) for all files,
+ - C(b) for block devices,
+ - C(c) for character devices,
+ - C(d) for directories,
+ - C(f) for regular files,
+ - C(l) for symbolic links,
+ - C(p) for named pipes,
+ - C(s) for socket files.
+ type: str
+ choices: [ a, b, c, d, f, l, p, s ]
+ default: a
+ setype:
+ description:
+ - SELinux type for the specified target.
+ type: str
+ required: yes
+ seuser:
+ description:
+ - SELinux user for the specified target.
+ type: str
+ selevel:
+ description:
+ - SELinux range for the specified target.
+ type: str
+ aliases: [ serange ]
+ state:
+ description:
+ - Whether the SELinux file context must be C(absent) or C(present).
+ type: str
+ choices: [ absent, present ]
+ default: present
+ reload:
+ description:
+ - Reload SELinux policy after commit.
+ - Note that this does not apply SELinux file contexts to existing files.
+ type: bool
+ default: yes
+ ignore_selinux_state:
+ description:
+ - Useful for scenarios (chrooted environment) that you can't get the real SELinux state.
+ type: bool
+ default: no
+notes:
+- The changes are persistent across reboots.
+- The M(community.general.sefcontext) module does not modify existing files to the new
+ SELinux context(s), so it is advisable to first create the SELinux
+ file contexts before creating files, or run C(restorecon) manually
+ for the existing files that require the new SELinux file contexts.
+- Not applying SELinux fcontexts to existing files is a deliberate
+ decision as it would be unclear what reported changes would entail
+ to, and there's no guarantee that applying SELinux fcontext does
+ not pick up other unrelated prior changes.
+requirements:
+- libselinux-python
+- policycoreutils-python
+author:
+- Dag Wieers (@dagwieers)
+'''
+
+EXAMPLES = r'''
+- name: Allow apache to modify files in /srv/git_repos
+ community.general.sefcontext:
+ target: '/srv/git_repos(/.*)?'
+ setype: httpd_git_rw_content_t
+ state: present
+
+- name: Apply new SELinux file context to filesystem
+ ansible.builtin.command: restorecon -irv /srv/git_repos
+'''
+
+RETURN = r'''
+# Default return values
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+SELINUX_IMP_ERR = None
+try:
+ import selinux
+ HAVE_SELINUX = True
+except ImportError:
+ SELINUX_IMP_ERR = traceback.format_exc()
+ HAVE_SELINUX = False
+
+SEOBJECT_IMP_ERR = None
+try:
+ import seobject
+ HAVE_SEOBJECT = True
+except ImportError:
+ SEOBJECT_IMP_ERR = traceback.format_exc()
+ HAVE_SEOBJECT = False
+
+# Add missing entries (backward compatible)
+if HAVE_SEOBJECT:
+ seobject.file_types.update(
+ a=seobject.SEMANAGE_FCONTEXT_ALL,
+ b=seobject.SEMANAGE_FCONTEXT_BLOCK,
+ c=seobject.SEMANAGE_FCONTEXT_CHAR,
+ d=seobject.SEMANAGE_FCONTEXT_DIR,
+ f=seobject.SEMANAGE_FCONTEXT_REG,
+ l=seobject.SEMANAGE_FCONTEXT_LINK,
+ p=seobject.SEMANAGE_FCONTEXT_PIPE,
+ s=seobject.SEMANAGE_FCONTEXT_SOCK,
+ )
+
+# Make backward compatible
+option_to_file_type_str = dict(
+ a='all files',
+ b='block device',
+ c='character device',
+ d='directory',
+ f='regular file',
+ l='symbolic link',
+ p='named pipe',
+ s='socket',
+)
+
+
+def get_runtime_status(ignore_selinux_state=False):
+ return True if ignore_selinux_state is True else selinux.is_selinux_enabled()
+
+
+def semanage_fcontext_exists(sefcontext, target, ftype):
+ ''' Get the SELinux file context mapping definition from policy. Return None if it does not exist. '''
+
+ # Beware that records comprise of a string representation of the file_type
+ record = (target, option_to_file_type_str[ftype])
+ records = sefcontext.get_all()
+ try:
+ return records[record]
+ except KeyError:
+ return None
+
+
+def semanage_fcontext_modify(module, result, target, ftype, setype, do_reload, serange, seuser, sestore=''):
+ ''' Add or modify SELinux file context mapping definition to the policy. '''
+
+ changed = False
+ prepared_diff = ''
+
+ try:
+ sefcontext = seobject.fcontextRecords(sestore)
+ sefcontext.set_reload(do_reload)
+ exists = semanage_fcontext_exists(sefcontext, target, ftype)
+ if exists:
+ # Modify existing entry
+ orig_seuser, orig_serole, orig_setype, orig_serange = exists
+
+ if seuser is None:
+ seuser = orig_seuser
+ if serange is None:
+ serange = orig_serange
+
+ if setype != orig_setype or seuser != orig_seuser or serange != orig_serange:
+ if not module.check_mode:
+ sefcontext.modify(target, setype, ftype, serange, seuser)
+ changed = True
+
+ if module._diff:
+ prepared_diff += '# Change to semanage file context mappings\n'
+ prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, orig_seuser, orig_serole, orig_setype, orig_serange)
+ prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, orig_serole, setype, serange)
+ else:
+ # Add missing entry
+ if seuser is None:
+ seuser = 'system_u'
+ if serange is None:
+ serange = 's0'
+
+ if not module.check_mode:
+ sefcontext.add(target, setype, ftype, serange, seuser)
+ changed = True
+
+ if module._diff:
+ prepared_diff += '# Addition to semanage file context mappings\n'
+ prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, 'object_r', setype, serange)
+
+ except Exception as e:
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)))
+
+ if module._diff and prepared_diff:
+ result['diff'] = dict(prepared=prepared_diff)
+
+ module.exit_json(changed=changed, seuser=seuser, serange=serange, **result)
+
+
+def semanage_fcontext_delete(module, result, target, ftype, do_reload, sestore=''):
+ ''' Delete SELinux file context mapping definition from the policy. '''
+
+ changed = False
+ prepared_diff = ''
+
+ try:
+ sefcontext = seobject.fcontextRecords(sestore)
+ sefcontext.set_reload(do_reload)
+ exists = semanage_fcontext_exists(sefcontext, target, ftype)
+ if exists:
+ # Remove existing entry
+ orig_seuser, orig_serole, orig_setype, orig_serange = exists
+
+ if not module.check_mode:
+ sefcontext.delete(target, ftype)
+ changed = True
+
+ if module._diff:
+ prepared_diff += '# Deletion to semanage file context mappings\n'
+ prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, exists[0], exists[1], exists[2], exists[3])
+
+ except Exception as e:
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)))
+
+ if module._diff and prepared_diff:
+ result['diff'] = dict(prepared=prepared_diff)
+
+ module.exit_json(changed=changed, **result)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ ignore_selinux_state=dict(type='bool', default=False),
+ target=dict(type='str', required=True, aliases=['path']),
+ ftype=dict(type='str', default='a', choices=list(option_to_file_type_str.keys())),
+ setype=dict(type='str', required=True),
+ seuser=dict(type='str'),
+ selevel=dict(type='str', aliases=['serange']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ reload=dict(type='bool', default=True),
+ ),
+ supports_check_mode=True,
+ )
+ if not HAVE_SELINUX:
+ module.fail_json(msg=missing_required_lib("libselinux-python"), exception=SELINUX_IMP_ERR)
+
+ if not HAVE_SEOBJECT:
+ module.fail_json(msg=missing_required_lib("policycoreutils-python"), exception=SEOBJECT_IMP_ERR)
+
+ ignore_selinux_state = module.params['ignore_selinux_state']
+
+ if not get_runtime_status(ignore_selinux_state):
+ module.fail_json(msg="SELinux is disabled on this host.")
+
+ target = module.params['target']
+ ftype = module.params['ftype']
+ setype = module.params['setype']
+ seuser = module.params['seuser']
+ serange = module.params['selevel']
+ state = module.params['state']
+ do_reload = module.params['reload']
+
+ result = dict(target=target, ftype=ftype, setype=setype, state=state)
+
+ if state == 'present':
+ semanage_fcontext_modify(module, result, target, ftype, setype, do_reload, serange, seuser)
+ elif state == 'absent':
+ semanage_fcontext_delete(module, result, target, ftype, do_reload)
+ else:
+ module.fail_json(msg='Invalid value of argument "state": {0}'.format(state))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/selinux_permissive.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/selinux_permissive.py
new file mode 100644
index 00000000..0d1f9f59
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/selinux_permissive.py
@@ -0,0 +1,127 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Michael Scherer <misc@zarb.org>
+# inspired by code of github.com/dandiker/
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: selinux_permissive
+short_description: Change permissive domain in SELinux policy
+description:
+ - Add and remove a domain from the list of permissive domains.
+options:
+ domain:
+ description:
+ - The domain that will be added or removed from the list of permissive domains.
+ type: str
+ required: true
+ default: ''
+ aliases: [ name ]
+ permissive:
+ description:
+ - Indicate if the domain should or should not be set as permissive.
+ type: bool
+ required: true
+ no_reload:
+ description:
+ - Disable reloading of the SELinux policy after making change to a domain's permissive setting.
+ - The default is C(no), which causes policy to be reloaded when a domain changes state.
+ - Reloading the policy does not work on older versions of the C(policycoreutils-python) library, for example in EL 6."
+ type: bool
+ default: no
+ store:
+ description:
+ - Name of the SELinux policy store to use.
+ type: str
+notes:
+ - Requires a recent version of SELinux and C(policycoreutils-python) (EL 6 or newer).
+requirements: [ policycoreutils-python ]
+author:
+- Michael Scherer (@mscherer) <misc@zarb.org>
+'''
+
+EXAMPLES = r'''
+- name: Change the httpd_t domain to permissive
+ community.general.selinux_permissive:
+ name: httpd_t
+ permissive: true
+'''
+
+import traceback
+
+HAVE_SEOBJECT = False
+SEOBJECT_IMP_ERR = None
+try:
+ import seobject
+ HAVE_SEOBJECT = True
+except ImportError:
+ SEOBJECT_IMP_ERR = traceback.format_exc()
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ domain=dict(type='str', required=True, aliases=['name']),
+ store=dict(type='str', default=''),
+ permissive=dict(type='bool', required=True),
+ no_reload=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+
+ # global vars
+ changed = False
+ store = module.params['store']
+ permissive = module.params['permissive']
+ domain = module.params['domain']
+ no_reload = module.params['no_reload']
+
+ if not HAVE_SEOBJECT:
+ module.fail_json(changed=False, msg=missing_required_lib("policycoreutils-python"),
+ exception=SEOBJECT_IMP_ERR)
+
+ try:
+ permissive_domains = seobject.permissiveRecords(store)
+ except ValueError as e:
+ module.fail_json(domain=domain, msg=to_native(e), exception=traceback.format_exc())
+
+ # not supported on EL 6
+ if 'set_reload' in dir(permissive_domains):
+ permissive_domains.set_reload(not no_reload)
+
+ try:
+ all_domains = permissive_domains.get_all()
+ except ValueError as e:
+ module.fail_json(domain=domain, msg=to_native(e), exception=traceback.format_exc())
+
+ if permissive:
+ if domain not in all_domains:
+ if not module.check_mode:
+ try:
+ permissive_domains.add(domain)
+ except ValueError as e:
+ module.fail_json(domain=domain, msg=to_native(e), exception=traceback.format_exc())
+ changed = True
+ else:
+ if domain in all_domains:
+ if not module.check_mode:
+ try:
+ permissive_domains.delete(domain)
+ except ValueError as e:
+ module.fail_json(domain=domain, msg=to_native(e), exception=traceback.format_exc())
+ changed = True
+
+ module.exit_json(changed=changed, store=store,
+ permissive=permissive, domain=domain)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/selogin.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/selogin.py
new file mode 100644
index 00000000..7036dad9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/selogin.py
@@ -0,0 +1,258 @@
+#!/usr/bin/python
+
+# (c) 2017, Petr Lautrbach <plautrba@redhat.com>
+# Based on seport.py module (c) 2014, Dan Keder <dan.keder@gmail.com>
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: selogin
+short_description: Manages linux user to SELinux user mapping
+description:
+ - Manages linux user to SELinux user mapping
+options:
+ login:
+ type: str
+ description:
+ - a Linux user
+ required: true
+ seuser:
+ type: str
+ description:
+ - SELinux user name
+ selevel:
+ type: str
+ aliases: [ serange ]
+ description:
+ - MLS/MCS Security Range (MLS/MCS Systems only) SELinux Range for SELinux login mapping defaults to the SELinux user record range.
+ default: s0
+ state:
+ type: str
+ description:
+ - Desired mapping value.
+ default: present
+ choices: [ 'present', 'absent' ]
+ reload:
+ description:
+ - Reload SELinux policy after commit.
+ type: bool
+ default: yes
+ ignore_selinux_state:
+ description:
+ - Run independent of selinux runtime state
+ type: bool
+ default: false
+notes:
+ - The changes are persistent across reboots
+ - Not tested on any debian based system
+requirements: [ 'libselinux', 'policycoreutils' ]
+author:
+- Dan Keder (@dankeder)
+- Petr Lautrbach (@bachradsusi)
+- James Cassell (@jamescassell)
+'''
+
+EXAMPLES = '''
+- name: Modify the default user on the system to the guest_u user
+ community.general.selogin:
+ login: __default__
+ seuser: guest_u
+ state: present
+
+- name: Assign gijoe user on an MLS machine a range and to the staff_u user
+ community.general.selogin:
+ login: gijoe
+ seuser: staff_u
+ serange: SystemLow-Secret
+ state: present
+
+- name: Assign all users in the engineering group to the staff_u user
+ community.general.selogin:
+ login: '%engineering'
+ seuser: staff_u
+ state: present
+'''
+
+RETURN = r'''
+# Default return values
+'''
+
+
+import traceback
+
+SELINUX_IMP_ERR = None
+try:
+ import selinux
+ HAVE_SELINUX = True
+except ImportError:
+ SELINUX_IMP_ERR = traceback.format_exc()
+ HAVE_SELINUX = False
+
+SEOBJECT_IMP_ERR = None
+try:
+ import seobject
+ HAVE_SEOBJECT = True
+except ImportError:
+ SEOBJECT_IMP_ERR = traceback.format_exc()
+ HAVE_SEOBJECT = False
+
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def semanage_login_add(module, login, seuser, do_reload, serange='s0', sestore=''):
+ """ Add linux user to SELinux user mapping
+
+ :type module: AnsibleModule
+ :param module: Ansible module
+
+ :type login: str
+ :param login: a Linux User or a Linux group if it begins with %
+
+ :type seuser: str
+ :param proto: An SELinux user ('__default__', 'unconfined_u', 'staff_u', ...), see 'semanage login -l'
+
+ :type serange: str
+ :param serange: SELinux MLS/MCS range (defaults to 's0')
+
+ :type do_reload: bool
+ :param do_reload: Whether to reload SELinux policy after commit
+
+ :type sestore: str
+ :param sestore: SELinux store
+
+ :rtype: bool
+ :return: True if the policy was changed, otherwise False
+ """
+ try:
+ selogin = seobject.loginRecords(sestore)
+ selogin.set_reload(do_reload)
+ change = False
+ all_logins = selogin.get_all()
+ # module.fail_json(msg="%s: %s %s" % (all_logins, login, sestore))
+ # for local_login in all_logins:
+ if login not in all_logins.keys():
+ change = True
+ if not module.check_mode:
+ selogin.add(login, seuser, serange)
+ else:
+ if all_logins[login][0] != seuser or all_logins[login][1] != serange:
+ change = True
+ if not module.check_mode:
+ selogin.modify(login, seuser, serange)
+
+ except (ValueError, KeyError, OSError, RuntimeError) as e:
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc())
+
+ return change
+
+
+def semanage_login_del(module, login, seuser, do_reload, sestore=''):
+ """ Delete linux user to SELinux user mapping
+
+ :type module: AnsibleModule
+ :param module: Ansible module
+
+ :type login: str
+ :param login: a Linux User or a Linux group if it begins with %
+
+ :type seuser: str
+ :param proto: An SELinux user ('__default__', 'unconfined_u', 'staff_u', ...), see 'semanage login -l'
+
+ :type do_reload: bool
+ :param do_reload: Whether to reload SELinux policy after commit
+
+ :type sestore: str
+ :param sestore: SELinux store
+
+ :rtype: bool
+ :return: True if the policy was changed, otherwise False
+ """
+ try:
+ selogin = seobject.loginRecords(sestore)
+ selogin.set_reload(do_reload)
+ change = False
+ all_logins = selogin.get_all()
+ # module.fail_json(msg="%s: %s %s" % (all_logins, login, sestore))
+ if login in all_logins.keys():
+ change = True
+ if not module.check_mode:
+ selogin.delete(login)
+
+ except (ValueError, KeyError, OSError, RuntimeError) as e:
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc())
+
+ return change
+
+
+def get_runtime_status(ignore_selinux_state=False):
+ return True if ignore_selinux_state is True else selinux.is_selinux_enabled()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ ignore_selinux_state=dict(type='bool', default=False),
+ login=dict(type='str', required=True),
+ seuser=dict(type='str'),
+ selevel=dict(type='str', aliases=['serange'], default='s0'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ reload=dict(type='bool', default=True),
+ ),
+ required_if=[
+ ["state", "present", ["seuser"]]
+ ],
+ supports_check_mode=True
+ )
+ if not HAVE_SELINUX:
+ module.fail_json(msg=missing_required_lib("libselinux"), exception=SELINUX_IMP_ERR)
+
+ if not HAVE_SEOBJECT:
+ module.fail_json(msg=missing_required_lib("seobject from policycoreutils"), exception=SEOBJECT_IMP_ERR)
+
+ ignore_selinux_state = module.params['ignore_selinux_state']
+
+ if not get_runtime_status(ignore_selinux_state):
+ module.fail_json(msg="SELinux is disabled on this host.")
+
+ login = module.params['login']
+ seuser = module.params['seuser']
+ serange = module.params['selevel']
+ state = module.params['state']
+ do_reload = module.params['reload']
+
+ result = {
+ 'login': login,
+ 'seuser': seuser,
+ 'serange': serange,
+ 'state': state,
+ }
+
+ if state == 'present':
+ result['changed'] = semanage_login_add(module, login, seuser, do_reload, serange)
+ elif state == 'absent':
+ result['changed'] = semanage_login_del(module, login, seuser, do_reload)
+ else:
+ module.fail_json(msg='Invalid value of argument "state": {0}'.format(state))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/seport.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/seport.py
new file mode 100644
index 00000000..71df8d6b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/seport.py
@@ -0,0 +1,306 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Dan Keder <dan.keder@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: seport
+short_description: Manages SELinux network port type definitions
+description:
+ - Manages SELinux network port type definitions.
+options:
+ ports:
+ description:
+ - Ports or port ranges.
+ - Can be a list (since 2.6) or comma separated string.
+ type: list
+ elements: str
+ required: true
+ proto:
+ description:
+ - Protocol for the specified port.
+ type: str
+ required: true
+ choices: [ tcp, udp ]
+ setype:
+ description:
+ - SELinux type for the specified port.
+ type: str
+ required: true
+ state:
+ description:
+ - Desired boolean value.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ reload:
+ description:
+ - Reload SELinux policy after commit.
+ type: bool
+ default: yes
+ ignore_selinux_state:
+ description:
+ - Run independent of selinux runtime state
+ type: bool
+ default: no
+notes:
+ - The changes are persistent across reboots.
+ - Not tested on any debian based system.
+requirements:
+- libselinux-python
+- policycoreutils-python
+author:
+- Dan Keder (@dankeder)
+'''
+
+EXAMPLES = r'''
+- name: Allow Apache to listen on tcp port 8888
+ community.general.seport:
+ ports: 8888
+ proto: tcp
+ setype: http_port_t
+ state: present
+
+- name: Allow sshd to listen on tcp port 8991
+ community.general.seport:
+ ports: 8991
+ proto: tcp
+ setype: ssh_port_t
+ state: present
+
+- name: Allow memcached to listen on tcp ports 10000-10100 and 10112
+ community.general.seport:
+ ports: 10000-10100,10112
+ proto: tcp
+ setype: memcache_port_t
+ state: present
+
+- name: Allow memcached to listen on tcp ports 10000-10100 and 10112
+ community.general.seport:
+ ports:
+ - 10000-10100
+ - 10112
+ proto: tcp
+ setype: memcache_port_t
+ state: present
+'''
+
+import traceback
+
+SELINUX_IMP_ERR = None
+try:
+ import selinux
+ HAVE_SELINUX = True
+except ImportError:
+ SELINUX_IMP_ERR = traceback.format_exc()
+ HAVE_SELINUX = False
+
+SEOBJECT_IMP_ERR = None
+try:
+ import seobject
+ HAVE_SEOBJECT = True
+except ImportError:
+ SEOBJECT_IMP_ERR = traceback.format_exc()
+ HAVE_SEOBJECT = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def get_runtime_status(ignore_selinux_state=False):
+ return True if ignore_selinux_state is True else selinux.is_selinux_enabled()
+
+
+def semanage_port_get_ports(seport, setype, proto):
+ """ Get the list of ports that have the specified type definition.
+
+ :param community.general.seport: Instance of seobject.portRecords
+
+ :type setype: str
+ :param setype: SELinux type.
+
+ :type proto: str
+ :param proto: Protocol ('tcp' or 'udp')
+
+ :rtype: list
+ :return: List of ports that have the specified SELinux type.
+ """
+ records = seport.get_all_by_type()
+ if (setype, proto) in records:
+ return records[(setype, proto)]
+ else:
+ return []
+
+
+def semanage_port_get_type(seport, port, proto):
+ """ Get the SELinux type of the specified port.
+
+ :param community.general.seport: Instance of seobject.portRecords
+
+ :type port: str
+ :param port: Port or port range (example: "8080", "8080-9090")
+
+ :type proto: str
+ :param proto: Protocol ('tcp' or 'udp')
+
+ :rtype: tuple
+ :return: Tuple containing the SELinux type and MLS/MCS level, or None if not found.
+ """
+ if isinstance(port, str):
+ ports = port.split('-', 1)
+ if len(ports) == 1:
+ ports.extend(ports)
+ else:
+ ports = (port, port)
+
+ key = (int(ports[0]), int(ports[1]), proto)
+
+ records = seport.get_all()
+ if key in records:
+ return records[key]
+ else:
+ return None
+
+
+def semanage_port_add(module, ports, proto, setype, do_reload, serange='s0', sestore=''):
+ """ Add SELinux port type definition to the policy.
+
+ :type module: AnsibleModule
+ :param module: Ansible module
+
+ :type ports: list
+ :param ports: List of ports and port ranges to add (e.g. ["8080", "8080-9090"])
+
+ :type proto: str
+ :param proto: Protocol ('tcp' or 'udp')
+
+ :type setype: str
+ :param setype: SELinux type
+
+ :type do_reload: bool
+ :param do_reload: Whether to reload SELinux policy after commit
+
+ :type serange: str
+ :param serange: SELinux MLS/MCS range (defaults to 's0')
+
+ :type sestore: str
+ :param sestore: SELinux store
+
+ :rtype: bool
+ :return: True if the policy was changed, otherwise False
+ """
+ try:
+ seport = seobject.portRecords(sestore)
+ seport.set_reload(do_reload)
+ change = False
+ ports_by_type = semanage_port_get_ports(seport, setype, proto)
+ for port in ports:
+ if port not in ports_by_type:
+ change = True
+ port_type = semanage_port_get_type(seport, port, proto)
+ if port_type is None and not module.check_mode:
+ seport.add(port, proto, serange, setype)
+ elif port_type is not None and not module.check_mode:
+ seport.modify(port, proto, serange, setype)
+
+ except (ValueError, IOError, KeyError, OSError, RuntimeError) as e:
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc())
+
+ return change
+
+
+def semanage_port_del(module, ports, proto, setype, do_reload, sestore=''):
+ """ Delete SELinux port type definition from the policy.
+
+ :type module: AnsibleModule
+ :param module: Ansible module
+
+ :type ports: list
+ :param ports: List of ports and port ranges to delete (e.g. ["8080", "8080-9090"])
+
+ :type proto: str
+ :param proto: Protocol ('tcp' or 'udp')
+
+ :type setype: str
+ :param setype: SELinux type.
+
+ :type do_reload: bool
+ :param do_reload: Whether to reload SELinux policy after commit
+
+ :type sestore: str
+ :param sestore: SELinux store
+
+ :rtype: bool
+ :return: True if the policy was changed, otherwise False
+ """
+ try:
+ seport = seobject.portRecords(sestore)
+ seport.set_reload(do_reload)
+ change = False
+ ports_by_type = semanage_port_get_ports(seport, setype, proto)
+ for port in ports:
+ if port in ports_by_type:
+ change = True
+ if not module.check_mode:
+ seport.delete(port, proto)
+
+ except (ValueError, IOError, KeyError, OSError, RuntimeError) as e:
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc())
+
+ return change
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ ignore_selinux_state=dict(type='bool', default=False),
+ ports=dict(type='list', elements='str', required=True),
+ proto=dict(type='str', required=True, choices=['tcp', 'udp']),
+ setype=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ reload=dict(type='bool', default=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ if not HAVE_SELINUX:
+ module.fail_json(msg=missing_required_lib("libselinux-python"), exception=SELINUX_IMP_ERR)
+
+ if not HAVE_SEOBJECT:
+ module.fail_json(msg=missing_required_lib("policycoreutils-python"), exception=SEOBJECT_IMP_ERR)
+
+ ignore_selinux_state = module.params['ignore_selinux_state']
+
+ if not get_runtime_status(ignore_selinux_state):
+ module.fail_json(msg="SELinux is disabled on this host.")
+
+ ports = module.params['ports']
+ proto = module.params['proto']
+ setype = module.params['setype']
+ state = module.params['state']
+ do_reload = module.params['reload']
+
+ result = {
+ 'ports': ports,
+ 'proto': proto,
+ 'setype': setype,
+ 'state': state,
+ }
+
+ if state == 'present':
+ result['changed'] = semanage_port_add(module, ports, proto, setype, do_reload)
+ elif state == 'absent':
+ result['changed'] = semanage_port_del(module, ports, proto, setype, do_reload)
+ else:
+ module.fail_json(msg='Invalid value of argument "state": {0}'.format(state))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/shutdown.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/shutdown.py
new file mode 100644
index 00000000..ccb02a2d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/shutdown.py
@@ -0,0 +1,68 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2020, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: shutdown
+short_description: Shut down a machine
+notes:
+ - C(PATH) is ignored on the remote node when searching for the C(shutdown) command. Use I(search_paths)
+ to specify locations to search if the default paths do not work.
+description:
+ - Shut downs a machine.
+version_added: "1.1.0"
+options:
+ delay:
+ description:
+ - Seconds to wait before shutdown. Passed as a parameter to the shutdown command.
+ - On Linux, macOS and OpenBSD, this is converted to minutes and rounded down. If less than 60, it will be set to 0.
+ - On Solaris and FreeBSD, this will be seconds.
+ type: int
+ default: 0
+ msg:
+ description:
+ - Message to display to users before shutdown.
+ type: str
+ default: Shut down initiated by Ansible
+ search_paths:
+ description:
+ - Paths to search on the remote machine for the C(shutdown) command.
+ - I(Only) these paths will be searched for the C(shutdown) command. C(PATH) is ignored in the remote node when searching for the C(shutdown) command.
+ type: list
+ elements: path
+ default: ['/sbin', '/usr/sbin', '/usr/local/sbin']
+
+seealso:
+- module: ansible.builtin.reboot
+author:
+ - Matt Davis (@nitzmahone)
+ - Sam Doran (@samdoran)
+ - Amin Vakil (@aminvakil)
+'''
+
+EXAMPLES = r'''
+- name: Unconditionally shut down the machine with all defaults
+ community.general.shutdown:
+
+- name: Delay shutting down the remote node
+ community.general.shutdown:
+ delay: 60
+
+- name: Shut down a machine with shutdown command in unusual place
+ community.general.shutdown:
+ search_paths:
+ - '/lib/molly-guard'
+'''
+
+RETURN = r'''
+shutdown:
+ description: C(true) if the machine has been shut down.
+ returned: always
+ type: bool
+ sample: true
+'''
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/solaris_zone.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/solaris_zone.py
new file mode 100644
index 00000000..8ecdeb8d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/solaris_zone.py
@@ -0,0 +1,485 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Paul Markham <pmarkham@netrefinery.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: solaris_zone
+short_description: Manage Solaris zones
+description:
+ - Create, start, stop and delete Solaris zones.
+ - This module does not currently allow changing of options for a zone that is already been created.
+author:
+- Paul Markham (@pmarkham)
+requirements:
+ - Solaris 10 or 11
+options:
+ state:
+ description:
+ - C(present), configure and install the zone.
+ - C(installed), synonym for C(present).
+ - C(running), if the zone already exists, boot it, otherwise, configure and install
+ the zone first, then boot it.
+ - C(started), synonym for C(running).
+ - C(stopped), shutdown a zone.
+ - C(absent), destroy the zone.
+ - C(configured), configure the ready so that it's to be attached.
+ - C(attached), attach a zone, but do not boot it.
+ - C(detached), shutdown and detach a zone
+ type: str
+ choices: [ absent, attached, configured, detached, installed, present, running, started, stopped ]
+ default: present
+ name:
+ description:
+ - Zone name.
+ - A zone name must be unique name.
+ - A zone name must begin with an alpha-numeric character.
+ - The name can contain alpha-numeric characters, underbars I(_), hyphens I(-), and periods I(.).
+ - The name cannot be longer than 64 characters.
+ type: str
+ required: true
+ path:
+ description:
+ - The path where the zone will be created. This is required when the zone is created, but not
+ used otherwise.
+ type: str
+ sparse:
+ description:
+ - Whether to create a sparse (C(true)) or whole root (C(false)) zone.
+ type: bool
+ default: no
+ root_password:
+ description:
+ - The password hash for the root account. If not specified, the zone's root account
+ will not have a password.
+ type: str
+ config:
+ description:
+ - 'The zonecfg configuration commands for this zone. See zonecfg(1M) for the valid options
+ and syntax. Typically this is a list of options separated by semi-colons or new lines, e.g.
+ "set auto-boot=true;add net;set physical=bge0;set address=10.1.1.1;end"'
+ type: str
+ default: ''
+ create_options:
+ description:
+ - 'Extra options to the zonecfg(1M) create command.'
+ type: str
+ default: ''
+ install_options:
+ description:
+ - 'Extra options to the zoneadm(1M) install command. To automate Solaris 11 zone creation,
+ use this to specify the profile XML file, e.g. install_options="-c sc_profile.xml"'
+ type: str
+ default: ''
+ attach_options:
+ description:
+ - 'Extra options to the zoneadm attach command. For example, this can be used to specify
+ whether a minimum or full update of packages is required and if any packages need to
+ be deleted. For valid values, see zoneadm(1M)'
+ type: str
+ default: ''
+ timeout:
+ description:
+ - Timeout, in seconds, for zone to boot.
+ type: int
+ default: 600
+'''
+
+EXAMPLES = '''
+- name: Create and install a zone, but don't boot it
+ community.general.solaris_zone:
+ name: zone1
+ state: present
+ path: /zones/zone1
+ sparse: True
+ root_password: Be9oX7OSwWoU.
+ config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end'
+
+- name: Create and install a zone and boot it
+ community.general.solaris_zone:
+ name: zone1
+ state: running
+ path: /zones/zone1
+ root_password: Be9oX7OSwWoU.
+ config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end'
+
+- name: Boot an already installed zone
+ community.general.solaris_zone:
+ name: zone1
+ state: running
+
+- name: Stop a zone
+ community.general.solaris_zone:
+ name: zone1
+ state: stopped
+
+- name: Destroy a zone
+ community.general.solaris_zone:
+ name: zone1
+ state: absent
+
+- name: Detach a zone
+ community.general.solaris_zone:
+ name: zone1
+ state: detached
+
+- name: Configure a zone, ready to be attached
+ community.general.solaris_zone:
+ name: zone1
+ state: configured
+ path: /zones/zone1
+ root_password: Be9oX7OSwWoU.
+ config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end'
+
+- name: Attach zone1
+ community.general.solaris_zone:
+ name: zone1
+ state: attached
+ attach_options: -u
+'''
+
+import os
+import platform
+import re
+import tempfile
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Zone(object):
+ def __init__(self, module):
+ self.changed = False
+ self.msg = []
+
+ self.module = module
+ self.path = self.module.params['path']
+ self.name = self.module.params['name']
+ self.sparse = self.module.params['sparse']
+ self.root_password = self.module.params['root_password']
+ self.timeout = self.module.params['timeout']
+ self.config = self.module.params['config']
+ self.create_options = self.module.params['create_options']
+ self.install_options = self.module.params['install_options']
+ self.attach_options = self.module.params['attach_options']
+
+ self.zoneadm_cmd = self.module.get_bin_path('zoneadm', True)
+ self.zonecfg_cmd = self.module.get_bin_path('zonecfg', True)
+ self.ssh_keygen_cmd = self.module.get_bin_path('ssh-keygen', True)
+
+ if self.module.check_mode:
+ self.msg.append('Running in check mode')
+
+ if platform.system() != 'SunOS':
+ self.module.fail_json(msg='This module requires Solaris')
+
+ (self.os_major, self.os_minor) = platform.release().split('.')
+ if int(self.os_minor) < 10:
+ self.module.fail_json(msg='This module requires Solaris 10 or later')
+
+ match = re.match('^[a-zA-Z0-9][-_.a-zA-Z0-9]{0,62}$', self.name)
+ if not match:
+ self.module.fail_json(msg="Provided zone name is not a valid zone name. "
+ "Please refer documentation for correct zone name specifications.")
+
+ def configure(self):
+ if not self.path:
+ self.module.fail_json(msg='Missing required argument: path')
+
+ if not self.module.check_mode:
+ t = tempfile.NamedTemporaryFile(delete=False, mode='wt')
+
+ if self.sparse:
+ t.write('create %s\n' % self.create_options)
+ self.msg.append('creating sparse-root zone')
+ else:
+ t.write('create -b %s\n' % self.create_options)
+ self.msg.append('creating whole-root zone')
+
+ t.write('set zonepath=%s\n' % self.path)
+ t.write('%s\n' % self.config)
+ t.close()
+
+ cmd = '%s -z %s -f %s' % (self.zonecfg_cmd, self.name, t.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to create zone. %s' % (out + err))
+ os.unlink(t.name)
+
+ self.changed = True
+ self.msg.append('zone configured')
+
+ def install(self):
+ if not self.module.check_mode:
+ cmd = '%s -z %s install %s' % (self.zoneadm_cmd, self.name, self.install_options)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to install zone. %s' % (out + err))
+ if int(self.os_minor) == 10:
+ self.configure_sysid()
+ self.configure_password()
+ self.configure_ssh_keys()
+ self.changed = True
+ self.msg.append('zone installed')
+
+ def uninstall(self):
+ if self.is_installed():
+ if not self.module.check_mode:
+ cmd = '%s -z %s uninstall -F' % (self.zoneadm_cmd, self.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to uninstall zone. %s' % (out + err))
+ self.changed = True
+ self.msg.append('zone uninstalled')
+
+ def configure_sysid(self):
+ if os.path.isfile('%s/root/etc/.UNCONFIGURED' % self.path):
+ os.unlink('%s/root/etc/.UNCONFIGURED' % self.path)
+
+ open('%s/root/noautoshutdown' % self.path, 'w').close()
+
+ node = open('%s/root/etc/nodename' % self.path, 'w')
+ node.write(self.name)
+ node.close()
+
+ id = open('%s/root/etc/.sysIDtool.state' % self.path, 'w')
+ id.write('1 # System previously configured?\n')
+ id.write('1 # Bootparams succeeded?\n')
+ id.write('1 # System is on a network?\n')
+ id.write('1 # Extended network information gathered?\n')
+ id.write('0 # Autobinder succeeded?\n')
+ id.write('1 # Network has subnets?\n')
+ id.write('1 # root password prompted for?\n')
+ id.write('1 # locale and term prompted for?\n')
+ id.write('1 # security policy in place\n')
+ id.write('1 # NFSv4 domain configured\n')
+ id.write('0 # Auto Registration Configured\n')
+ id.write('vt100')
+ id.close()
+
+ def configure_ssh_keys(self):
+ rsa_key_file = '%s/root/etc/ssh/ssh_host_rsa_key' % self.path
+ dsa_key_file = '%s/root/etc/ssh/ssh_host_dsa_key' % self.path
+
+ if not os.path.isfile(rsa_key_file):
+ cmd = '%s -f %s -t rsa -N ""' % (self.ssh_keygen_cmd, rsa_key_file)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to create rsa key. %s' % (out + err))
+
+ if not os.path.isfile(dsa_key_file):
+ cmd = '%s -f %s -t dsa -N ""' % (self.ssh_keygen_cmd, dsa_key_file)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to create dsa key. %s' % (out + err))
+
+ def configure_password(self):
+ shadow = '%s/root/etc/shadow' % self.path
+ if self.root_password:
+ f = open(shadow, 'r')
+ lines = f.readlines()
+ f.close()
+
+ for i in range(0, len(lines)):
+ fields = lines[i].split(':')
+ if fields[0] == 'root':
+ fields[1] = self.root_password
+ lines[i] = ':'.join(fields)
+
+ f = open(shadow, 'w')
+ for line in lines:
+ f.write(line)
+ f.close()
+
+ def boot(self):
+ if not self.module.check_mode:
+ cmd = '%s -z %s boot' % (self.zoneadm_cmd, self.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to boot zone. %s' % (out + err))
+
+ """
+ The boot command can return before the zone has fully booted. This is especially
+ true on the first boot when the zone initializes the SMF services. Unless the zone
+ has fully booted, subsequent tasks in the playbook may fail as services aren't running yet.
+ Wait until the zone's console login is running; once that's running, consider the zone booted.
+ """
+
+ elapsed = 0
+ while True:
+ if elapsed > self.timeout:
+ self.module.fail_json(msg='timed out waiting for zone to boot')
+ rc = os.system('ps -z %s -o args|grep "ttymon.*-d /dev/console" > /dev/null 2>/dev/null' % self.name)
+ if rc == 0:
+ break
+ time.sleep(10)
+ elapsed += 10
+ self.changed = True
+ self.msg.append('zone booted')
+
+ def destroy(self):
+ if self.is_running():
+ self.stop()
+ if self.is_installed():
+ self.uninstall()
+ if not self.module.check_mode:
+ cmd = '%s -z %s delete -F' % (self.zonecfg_cmd, self.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to delete zone. %s' % (out + err))
+ self.changed = True
+ self.msg.append('zone deleted')
+
+ def stop(self):
+ if not self.module.check_mode:
+ cmd = '%s -z %s halt' % (self.zoneadm_cmd, self.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to stop zone. %s' % (out + err))
+ self.changed = True
+ self.msg.append('zone stopped')
+
+ def detach(self):
+ if not self.module.check_mode:
+ cmd = '%s -z %s detach' % (self.zoneadm_cmd, self.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to detach zone. %s' % (out + err))
+ self.changed = True
+ self.msg.append('zone detached')
+
+ def attach(self):
+ if not self.module.check_mode:
+ cmd = '%s -z %s attach %s' % (self.zoneadm_cmd, self.name, self.attach_options)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to attach zone. %s' % (out + err))
+ self.changed = True
+ self.msg.append('zone attached')
+
+ def exists(self):
+ cmd = '%s -z %s list' % (self.zoneadm_cmd, self.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc == 0:
+ return True
+ else:
+ return False
+
+ def is_running(self):
+ return self.status() == 'running'
+
+ def is_installed(self):
+ return self.status() == 'installed'
+
+ def is_configured(self):
+ return self.status() == 'configured'
+
+ def status(self):
+ cmd = '%s -z %s list -p' % (self.zoneadm_cmd, self.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc == 0:
+ return out.split(':')[2]
+ else:
+ return 'undefined'
+
+ def state_present(self):
+ if self.exists():
+ self.msg.append('zone already exists')
+ else:
+ self.configure()
+ self.install()
+
+ def state_running(self):
+ self.state_present()
+ if self.is_running():
+ self.msg.append('zone already running')
+ else:
+ self.boot()
+
+ def state_stopped(self):
+ if self.exists():
+ self.stop()
+ else:
+ self.module.fail_json(msg='zone does not exist')
+
+ def state_absent(self):
+ if self.exists():
+ if self.is_running():
+ self.stop()
+ self.destroy()
+ else:
+ self.msg.append('zone does not exist')
+
+ def state_configured(self):
+ if self.exists():
+ self.msg.append('zone already exists')
+ else:
+ self.configure()
+
+ def state_detached(self):
+ if not self.exists():
+ self.module.fail_json(msg='zone does not exist')
+ if self.is_configured():
+ self.msg.append('zone already detached')
+ else:
+ self.stop()
+ self.detach()
+
+ def state_attached(self):
+ if not self.exists():
+ self.msg.append('zone does not exist')
+ if self.is_configured():
+ self.attach()
+ else:
+ self.msg.append('zone already attached')
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present',
+ choices=['absent', 'attached', 'configured', 'detached', 'installed', 'present', 'running', 'started', 'stopped']),
+ path=dict(type='str'),
+ sparse=dict(type='bool', default=False),
+ root_password=dict(type='str', no_log=True),
+ timeout=dict(type='int', default=600),
+ config=dict(type='str', default=''),
+ create_options=dict(type='str', default=''),
+ install_options=dict(type='str', default=''),
+ attach_options=dict(type='str', default=''),
+ ),
+ supports_check_mode=True,
+ )
+
+ zone = Zone(module)
+
+ state = module.params['state']
+
+ if state == 'running' or state == 'started':
+ zone.state_running()
+ elif state == 'present' or state == 'installed':
+ zone.state_present()
+ elif state == 'stopped':
+ zone.state_stopped()
+ elif state == 'absent':
+ zone.state_absent()
+ elif state == 'configured':
+ zone.state_configured()
+ elif state == 'detached':
+ zone.state_detached()
+ elif state == 'attached':
+ zone.state_attached()
+ else:
+ module.fail_json(msg='Invalid state: %s' % state)
+
+ module.exit_json(changed=zone.changed, msg=', '.join(zone.msg))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/svc.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/svc.py
new file mode 100644
index 00000000..e9215670
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/svc.py
@@ -0,0 +1,297 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2015, Brian Coca <bcoca@ansible.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: svc
+author:
+- Brian Coca (@bcoca)
+short_description: Manage daemontools services
+description:
+ - Controls daemontools services on remote hosts using the svc utility.
+options:
+ name:
+ description:
+ - Name of the service to manage.
+ type: str
+ required: true
+ state:
+ description:
+ - C(Started)/C(stopped) are idempotent actions that will not run
+ commands unless necessary. C(restarted) will always bounce the
+ svc (svc -t) and C(killed) will always bounce the svc (svc -k).
+ C(reloaded) will send a sigusr1 (svc -1).
+ C(once) will run a normally downed svc once (svc -o), not really
+ an idempotent operation.
+ type: str
+ choices: [ killed, once, reloaded, restarted, started, stopped ]
+ downed:
+ description:
+ - Should a 'down' file exist or not, if it exists it disables auto startup.
+ Defaults to no. Downed does not imply stopped.
+ type: bool
+ enabled:
+ description:
+ - Whether the service is enabled or not, if disabled it also implies stopped.
+ Take note that a service can be enabled and downed (no auto restart).
+ type: bool
+ service_dir:
+ description:
+ - Directory svscan watches for services
+ type: str
+ default: /service
+ service_src:
+ description:
+ - Directory where services are defined, the source of symlinks to service_dir.
+ type: str
+ default: /etc/service
+'''
+
+EXAMPLES = '''
+- name: Start svc dnscache, if not running
+ community.general.svc:
+ name: dnscache
+ state: started
+
+- name: Stop svc dnscache, if running
+ community.general.svc:
+ name: dnscache
+ state: stopped
+
+- name: Kill svc dnscache, in all cases
+ community.general.svc:
+ name: dnscache
+ state: killed
+
+- name: Restart svc dnscache, in all cases
+ community.general.svc:
+ name: dnscache
+ state: restarted
+
+- name: Reload svc dnscache, in all cases
+ community.general.svc:
+ name: dnscache
+ state: reloaded
+
+- name: Using alternative svc directory location
+ community.general.svc:
+ name: dnscache
+ state: reloaded
+ service_dir: /var/service
+'''
+
+import os
+import re
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def _load_dist_subclass(cls, *args, **kwargs):
+ '''
+ Used for derivative implementations
+ '''
+ subclass = None
+
+ distro = kwargs['module'].params['distro']
+
+ # get the most specific superclass for this platform
+ if distro is not None:
+ for sc in cls.__subclasses__():
+ if sc.distro is not None and sc.distro == distro:
+ subclass = sc
+ if subclass is None:
+ subclass = cls
+
+ return super(cls, subclass).__new__(subclass)
+
+
+class Svc(object):
+ """
+ Main class that handles daemontools, can be subclassed and overridden in case
+ we want to use a 'derivative' like encore, s6, etc
+ """
+
+ # def __new__(cls, *args, **kwargs):
+ # return _load_dist_subclass(cls, args, kwargs)
+
+ def __init__(self, module):
+ self.extra_paths = ['/command', '/usr/local/bin']
+ self.report_vars = ['state', 'enabled', 'downed', 'svc_full', 'src_full', 'pid', 'duration', 'full_state']
+
+ self.module = module
+
+ self.name = module.params['name']
+ self.service_dir = module.params['service_dir']
+ self.service_src = module.params['service_src']
+ self.enabled = None
+ self.downed = None
+ self.full_state = None
+ self.state = None
+ self.pid = None
+ self.duration = None
+
+ self.svc_cmd = module.get_bin_path('svc', opt_dirs=self.extra_paths)
+ self.svstat_cmd = module.get_bin_path('svstat', opt_dirs=self.extra_paths)
+ self.svc_full = '/'.join([self.service_dir, self.name])
+ self.src_full = '/'.join([self.service_src, self.name])
+
+ self.enabled = os.path.lexists(self.svc_full)
+ if self.enabled:
+ self.downed = os.path.lexists('%s/down' % self.svc_full)
+ self.get_status()
+ else:
+ self.downed = os.path.lexists('%s/down' % self.src_full)
+ self.state = 'stopped'
+
+ def enable(self):
+ if os.path.exists(self.src_full):
+ try:
+ os.symlink(self.src_full, self.svc_full)
+ except OSError as e:
+ self.module.fail_json(path=self.src_full, msg='Error while linking: %s' % to_native(e))
+ else:
+ self.module.fail_json(msg="Could not find source for service to enable (%s)." % self.src_full)
+
+ def disable(self):
+ try:
+ os.unlink(self.svc_full)
+ except OSError as e:
+ self.module.fail_json(path=self.svc_full, msg='Error while unlinking: %s' % to_native(e))
+ self.execute_command([self.svc_cmd, '-dx', self.src_full])
+
+ src_log = '%s/log' % self.src_full
+ if os.path.exists(src_log):
+ self.execute_command([self.svc_cmd, '-dx', src_log])
+
+ def get_status(self):
+ (rc, out, err) = self.execute_command([self.svstat_cmd, self.svc_full])
+
+ if err is not None and err:
+ self.full_state = self.state = err
+ else:
+ self.full_state = out
+
+ m = re.search(r'\(pid (\d+)\)', out)
+ if m:
+ self.pid = m.group(1)
+
+ m = re.search(r'(\d+) seconds', out)
+ if m:
+ self.duration = m.group(1)
+
+ if re.search(' up ', out):
+ self.state = 'start'
+ elif re.search(' down ', out):
+ self.state = 'stopp'
+ else:
+ self.state = 'unknown'
+ return
+
+ if re.search(' want ', out):
+ self.state += 'ing'
+ else:
+ self.state += 'ed'
+
+ def start(self):
+ return self.execute_command([self.svc_cmd, '-u', self.svc_full])
+
+ def stopp(self):
+ return self.stop()
+
+ def stop(self):
+ return self.execute_command([self.svc_cmd, '-d', self.svc_full])
+
+ def once(self):
+ return self.execute_command([self.svc_cmd, '-o', self.svc_full])
+
+ def reload(self):
+ return self.execute_command([self.svc_cmd, '-1', self.svc_full])
+
+ def restart(self):
+ return self.execute_command([self.svc_cmd, '-t', self.svc_full])
+
+ def kill(self):
+ return self.execute_command([self.svc_cmd, '-k', self.svc_full])
+
+ def execute_command(self, cmd):
+ try:
+ (rc, out, err) = self.module.run_command(' '.join(cmd))
+ except Exception as e:
+ self.module.fail_json(msg="failed to execute: %s" % to_native(e), exception=traceback.format_exc())
+ return (rc, out, err)
+
+ def report(self):
+ self.get_status()
+ states = {}
+ for k in self.report_vars:
+ states[k] = self.__dict__[k]
+ return states
+
+
+# ===========================================
+# Main control flow
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', choices=['killed', 'once', 'reloaded', 'restarted', 'started', 'stopped']),
+ enabled=dict(type='bool'),
+ downed=dict(type='bool'),
+ service_dir=dict(type='str', default='/service'),
+ service_src=dict(type='str', default='/etc/service'),
+ ),
+ supports_check_mode=True,
+ )
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ state = module.params['state']
+ enabled = module.params['enabled']
+ downed = module.params['downed']
+
+ svc = Svc(module)
+ changed = False
+ orig_state = svc.report()
+
+ if enabled is not None and enabled != svc.enabled:
+ changed = True
+ if not module.check_mode:
+ try:
+ if enabled:
+ svc.enable()
+ else:
+ svc.disable()
+ except (OSError, IOError) as e:
+ module.fail_json(msg="Could not change service link: %s" % to_native(e))
+
+ if state is not None and state != svc.state:
+ changed = True
+ if not module.check_mode:
+ getattr(svc, state[:-2])()
+
+ if downed is not None and downed != svc.downed:
+ changed = True
+ if not module.check_mode:
+ d_file = "%s/down" % svc.svc_full
+ try:
+ if downed:
+ open(d_file, "a").close()
+ else:
+ os.unlink(d_file)
+ except (OSError, IOError) as e:
+ module.fail_json(msg="Could not change downed file: %s " % (to_native(e)))
+
+ module.exit_json(changed=changed, svc=svc.report())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/syspatch.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/syspatch.py
new file mode 100644
index 00000000..2483fb36
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/syspatch.py
@@ -0,0 +1,175 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2019-2020, Andrew Klaus <andrewklaus@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: syspatch
+
+short_description: Manage OpenBSD system patches
+
+
+description:
+ - "Manage OpenBSD system patches using syspatch."
+
+options:
+ apply:
+ type: bool
+ description:
+ - Apply all available system patches.
+ - By default, apply all patches.
+ - Deprecated. Will be removed in community.general 3.0.0.
+ default: yes
+ revert:
+ description:
+ - Revert system patches.
+ type: str
+ choices: [ all, one ]
+
+author:
+ - Andrew Klaus (@precurse)
+'''
+
+EXAMPLES = '''
+- name: Apply all available system patches
+ community.general.syspatch:
+ apply: true
+
+- name: Revert last patch
+ community.general.syspatch:
+ revert: one
+
+- name: Revert all patches
+ community.general.syspatch:
+ revert: all
+
+# NOTE: You can reboot automatically if a patch requires it:
+- name: Apply all patches and store result
+ community.general.syspatch:
+ apply: true
+ register: syspatch
+
+- name: Reboot if patch requires it
+ ansible.builtin.reboot:
+ when: syspatch.reboot_needed
+'''
+
+RETURN = r'''
+rc:
+ description: The command return code (0 means success)
+ returned: always
+ type: int
+stdout:
+ description: syspatch standard output.
+ returned: always
+ type: str
+ sample: "001_rip6cksum"
+stderr:
+ description: syspatch standard error.
+ returned: always
+ type: str
+ sample: "syspatch: need root privileges"
+reboot_needed:
+ description: Whether or not a reboot is required after an update.
+ returned: always
+ type: bool
+ sample: True
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def run_module():
+ # define available arguments/parameters a user can pass to the module
+ module_args = dict(
+ apply=dict(type='bool', default=True, removed_in_version='3.0.0', removed_from_collection='community.general'),
+ revert=dict(type='str', choices=['all', 'one'])
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True,
+ required_one_of=[['apply', 'revert']]
+ )
+
+ result = syspatch_run(module)
+
+ module.exit_json(**result)
+
+
+def syspatch_run(module):
+ cmd = module.get_bin_path('syspatch', True)
+ changed = False
+ reboot_needed = False
+ warnings = []
+
+ # Set safe defaults for run_flag and check_flag
+ run_flag = ['-c']
+ check_flag = ['-c']
+ if module.params['revert']:
+ check_flag = ['-l']
+
+ if module.params['revert'] == 'all':
+ run_flag = ['-R']
+ else:
+ run_flag = ['-r']
+ elif module.params['apply']:
+ check_flag = ['-c']
+ run_flag = []
+
+ # Run check command
+ rc, out, err = module.run_command([cmd] + check_flag)
+
+ if rc != 0:
+ module.fail_json(msg="Command %s failed rc=%d, out=%s, err=%s" % (cmd, rc, out, err))
+
+ if len(out) > 0:
+ # Changes pending
+ change_pending = True
+ else:
+ # No changes pending
+ change_pending = False
+
+ if module.check_mode:
+ changed = change_pending
+ elif change_pending:
+ rc, out, err = module.run_command([cmd] + run_flag)
+
+ # Workaround syspatch ln bug:
+ # http://openbsd-archive.7691.n7.nabble.com/Warning-applying-latest-syspatch-td354250.html
+ if rc != 0 and err != 'ln: /usr/X11R6/bin/X: No such file or directory\n':
+ module.fail_json(msg="Command %s failed rc=%d, out=%s, err=%s" % (cmd, rc, out, err))
+ elif out.lower().find('create unique kernel') >= 0:
+ # Kernel update applied
+ reboot_needed = True
+ elif out.lower().find('syspatch updated itself') >= 0:
+ warnings.append('Syspatch was updated. Please run syspatch again.')
+
+ # If no stdout, then warn user
+ if len(out) == 0:
+ warnings.append('syspatch had suggested changes, but stdout was empty.')
+
+ changed = True
+ else:
+ changed = False
+
+ return dict(
+ changed=changed,
+ reboot_needed=reboot_needed,
+ rc=rc,
+ stderr=err,
+ stdout=out,
+ warnings=warnings
+ )
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/sysupgrade.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/sysupgrade.py
new file mode 100644
index 00000000..a1956129
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/sysupgrade.py
@@ -0,0 +1,152 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2020, Andrew Klaus <andrewklaus@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: sysupgrade
+short_description: Manage OpenBSD system upgrades
+version_added: 1.1.0
+description:
+ - Manage OpenBSD system upgrades using sysupgrade.
+options:
+ snapshot:
+ description:
+ - Apply the latest snapshot.
+ - Otherwise release will be applied.
+ default: no
+ type: bool
+ force:
+ description:
+ - Force upgrade (for snapshots only).
+ default: no
+ type: bool
+ keep_files:
+ description:
+ - Keep the files under /home/_sysupgrade.
+ - By default, the files will be deleted after the upgrade.
+ default: no
+ type: bool
+ fetch_only:
+ description:
+ - Fetch and verify files and create /bsd.upgrade but do not reboot.
+ - Set to C(false) if you want sysupgrade to reboot. This will cause Ansible to error, as it expects the module to exit gracefully. See the examples.
+ default: yes
+ type: bool
+ installurl:
+ description:
+ - OpenBSD mirror top-level URL for fetching an upgrade.
+ - By default, the mirror URL is pulled from /etc/installurl.
+ type: str
+author:
+ - Andrew Klaus (@precurse)
+'''
+
+EXAMPLES = r'''
+- name: Upgrade to latest release
+ community.general.sysupgrade:
+ register: sysupgrade
+
+- name: Upgrade to latest snapshot
+ community.general.sysupgrade:
+ snapshot: yes
+ installurl: https://cloudflare.cdn.openbsd.org/pub/OpenBSD
+ register: sysupgrade
+
+- name: Reboot to apply upgrade if needed
+ ansible.builtin.reboot:
+ when: sysupgrade.changed
+
+# Note: Ansible will error when running this way due to how
+# the reboot is forcefully handled by sysupgrade:
+
+- name: Have sysupgrade automatically reboot
+ community.general.sysupgrade:
+ fetch_only: no
+ ignore_errors: yes
+'''
+
+RETURN = r'''
+rc:
+ description: The command return code (0 means success).
+ returned: always
+ type: int
+stdout:
+ description: Sysupgrade standard output.
+ returned: always
+ type: str
+stderr:
+ description: Sysupgrade standard error.
+ returned: always
+ type: str
+ sample: "sysupgrade: need root privileges"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def sysupgrade_run(module):
+ sysupgrade_bin = module.get_bin_path('/usr/sbin/sysupgrade', required=True)
+ cmd = [sysupgrade_bin]
+ changed = False
+ warnings = []
+
+ # Setup command flags
+ if module.params['snapshot']:
+ run_flag = ['-s']
+ if module.params['force']:
+ # Force only applies to snapshots
+ run_flag.append('-f')
+ else:
+ # release flag
+ run_flag = ['-r']
+
+ if module.params['keep_files']:
+ run_flag.append('-k')
+
+ if module.params['fetch_only']:
+ run_flag.append('-n')
+
+ # installurl must be the last argument
+ if module.params['installurl']:
+ run_flag.append(module.params['installurl'])
+
+ rc, out, err = module.run_command(cmd + run_flag)
+
+ if rc != 0:
+ module.fail_json(msg="Command %s failed rc=%d, out=%s, err=%s" % (cmd, rc, out, err))
+ elif out.lower().find('already on latest snapshot') >= 0:
+ changed = False
+ elif out.lower().find('upgrade on next reboot') >= 0:
+ changed = True
+
+ return dict(
+ changed=changed,
+ rc=rc,
+ stderr=err,
+ stdout=out,
+ warnings=warnings
+ )
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ snapshot=dict(type='bool', default=False),
+ fetch_only=dict(type='bool', default=True),
+ force=dict(type='bool', default=False),
+ keep_files=dict(type='bool', default=False),
+ installurl=dict(type='str'),
+ ),
+ supports_check_mode=False,
+ )
+ return_dict = sysupgrade_run(module)
+ module.exit_json(**return_dict)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/timezone.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/timezone.py
new file mode 100644
index 00000000..d10dd9bb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/timezone.py
@@ -0,0 +1,905 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Shinichi TAMURA (@tmshn)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: timezone
+short_description: Configure timezone setting
+description:
+ - This module configures the timezone setting, both of the system clock and of the hardware clock.
+ If you want to set up the NTP, use M(ansible.builtin.service) module.
+ - It is recommended to restart C(crond) after changing the timezone, otherwise the jobs may run at the wrong time.
+ - Several different tools are used depending on the OS/Distribution involved.
+ For Linux it can use C(timedatectl) or edit C(/etc/sysconfig/clock) or C(/etc/timezone) and C(hwclock).
+ On SmartOS, C(sm-set-timezone), for macOS, C(systemsetup), for BSD, C(/etc/localtime) is modified.
+ On AIX, C(chtz) is used.
+ - As of Ansible 2.3 support was added for SmartOS and BSDs.
+ - As of Ansible 2.4 support was added for macOS.
+ - As of Ansible 2.9 support was added for AIX 6.1+
+ - Windows and HPUX are not supported, please let us know if you find any other OS/distro in which this fails.
+options:
+ name:
+ description:
+ - Name of the timezone for the system clock.
+ - Default is to keep current setting.
+ - B(At least one of name and hwclock are required.)
+ type: str
+ hwclock:
+ description:
+ - Whether the hardware clock is in UTC or in local timezone.
+ - Default is to keep current setting.
+ - Note that this option is recommended not to change and may fail
+ to configure, especially on virtual environments such as AWS.
+ - B(At least one of name and hwclock are required.)
+ - I(Only used on Linux.)
+ type: str
+ aliases: [ rtc ]
+ choices: [ local, UTC ]
+notes:
+ - On SmartOS the C(sm-set-timezone) utility (part of the smtools package) is required to set the zone timezone
+ - On AIX only Olson/tz database timezones are useable (POSIX is not supported).
+ - An OS reboot is also required on AIX for the new timezone setting to take effect.
+author:
+ - Shinichi TAMURA (@tmshn)
+ - Jasper Lievisse Adriaanse (@jasperla)
+ - Indrajit Raychaudhuri (@indrajitr)
+'''
+
+RETURN = r'''
+diff:
+ description: The differences about the given arguments.
+ returned: success
+ type: complex
+ contains:
+ before:
+ description: The values before change
+ type: dict
+ after:
+ description: The values after change
+ type: dict
+'''
+
+EXAMPLES = r'''
+- name: Set timezone to Asia/Tokyo
+ community.general.timezone:
+ name: Asia/Tokyo
+'''
+
+import errno
+import os
+import platform
+import random
+import re
+import string
+import filecmp
+
+from ansible.module_utils.basic import AnsibleModule, get_distribution
+from ansible.module_utils.six import iteritems
+
+
+class Timezone(object):
+ """This is a generic Timezone manipulation class that is subclassed based on platform.
+
+ A subclass may wish to override the following action methods:
+ - get(key, phase) ... get the value from the system at `phase`
+ - set(key, value) ... set the value to the current system
+ """
+
+ def __new__(cls, module):
+ """Return the platform-specific subclass.
+
+ It does not use load_platform_subclass() because it needs to judge based
+ on whether the `timedatectl` command exists and is available.
+
+ Args:
+ module: The AnsibleModule.
+ """
+ if platform.system() == 'Linux':
+ timedatectl = module.get_bin_path('timedatectl')
+ if timedatectl is not None:
+ rc, stdout, stderr = module.run_command(timedatectl)
+ if rc == 0:
+ return super(Timezone, SystemdTimezone).__new__(SystemdTimezone)
+ else:
+ module.warn('timedatectl command was found but not usable: %s. using other method.' % stderr)
+ return super(Timezone, NosystemdTimezone).__new__(NosystemdTimezone)
+ else:
+ return super(Timezone, NosystemdTimezone).__new__(NosystemdTimezone)
+ elif re.match('^joyent_.*Z', platform.version()):
+ # platform.system() returns SunOS, which is too broad. So look at the
+ # platform version instead. However we have to ensure that we're not
+ # running in the global zone where changing the timezone has no effect.
+ zonename_cmd = module.get_bin_path('zonename')
+ if zonename_cmd is not None:
+ (rc, stdout, _) = module.run_command(zonename_cmd)
+ if rc == 0 and stdout.strip() == 'global':
+ module.fail_json(msg='Adjusting timezone is not supported in Global Zone')
+
+ return super(Timezone, SmartOSTimezone).__new__(SmartOSTimezone)
+ elif platform.system() == 'Darwin':
+ return super(Timezone, DarwinTimezone).__new__(DarwinTimezone)
+ elif re.match('^(Free|Net|Open)BSD', platform.platform()):
+ return super(Timezone, BSDTimezone).__new__(BSDTimezone)
+ elif platform.system() == 'AIX':
+ AIXoslevel = int(platform.version() + platform.release())
+ if AIXoslevel >= 61:
+ return super(Timezone, AIXTimezone).__new__(AIXTimezone)
+ else:
+ module.fail_json(msg='AIX os level must be >= 61 for timezone module (Target: %s).' % AIXoslevel)
+ else:
+ # Not supported yet
+ return super(Timezone, Timezone).__new__(Timezone)
+
+ def __init__(self, module):
+ """Initialize of the class.
+
+ Args:
+ module: The AnsibleModule.
+ """
+ super(Timezone, self).__init__()
+ self.msg = []
+ # `self.value` holds the values for each params on each phases.
+ # Initially there's only info of "planned" phase, but the
+ # `self.check()` function will fill out it.
+ self.value = dict()
+ for key in module.argument_spec:
+ value = module.params[key]
+ if value is not None:
+ self.value[key] = dict(planned=value)
+ self.module = module
+
+ def abort(self, msg):
+ """Abort the process with error message.
+
+ This is just the wrapper of module.fail_json().
+
+ Args:
+ msg: The error message.
+ """
+ error_msg = ['Error message:', msg]
+ if len(self.msg) > 0:
+ error_msg.append('Other message(s):')
+ error_msg.extend(self.msg)
+ self.module.fail_json(msg='\n'.join(error_msg))
+
+ def execute(self, *commands, **kwargs):
+ """Execute the shell command.
+
+ This is just the wrapper of module.run_command().
+
+ Args:
+ *commands: The command to execute.
+ It will be concatenated with single space.
+ **kwargs: Only 'log' key is checked.
+ If kwargs['log'] is true, record the command to self.msg.
+
+ Returns:
+ stdout: Standard output of the command.
+ """
+ command = ' '.join(commands)
+ (rc, stdout, stderr) = self.module.run_command(command, check_rc=True)
+ if kwargs.get('log', False):
+ self.msg.append('executed `%s`' % command)
+ return stdout
+
+ def diff(self, phase1='before', phase2='after'):
+ """Calculate the difference between given 2 phases.
+
+ Args:
+ phase1, phase2: The names of phase to compare.
+
+ Returns:
+ diff: The difference of value between phase1 and phase2.
+ This is in the format which can be used with the
+ `--diff` option of ansible-playbook.
+ """
+ diff = {phase1: {}, phase2: {}}
+ for key, value in iteritems(self.value):
+ diff[phase1][key] = value[phase1]
+ diff[phase2][key] = value[phase2]
+ return diff
+
+ def check(self, phase):
+ """Check the state in given phase and set it to `self.value`.
+
+ Args:
+ phase: The name of the phase to check.
+
+ Returns:
+ NO RETURN VALUE
+ """
+ if phase == 'planned':
+ return
+ for key, value in iteritems(self.value):
+ value[phase] = self.get(key, phase)
+
+ def change(self):
+ """Make the changes effect based on `self.value`."""
+ for key, value in iteritems(self.value):
+ if value['before'] != value['planned']:
+ self.set(key, value['planned'])
+
+ # ===========================================
+ # Platform specific methods (must be replaced by subclass).
+
+ def get(self, key, phase):
+ """Get the value for the key at the given phase.
+
+ Called from self.check().
+
+ Args:
+ key: The key to get the value
+ phase: The phase to get the value
+
+ Return:
+ value: The value for the key at the given phase.
+ """
+ self.abort('get(key, phase) is not implemented on target platform')
+
+ def set(self, key, value):
+ """Set the value for the key (of course, for the phase 'after').
+
+ Called from self.change().
+
+ Args:
+ key: Key to set the value
+ value: Value to set
+ """
+ self.abort('set(key, value) is not implemented on target platform')
+
+ def _verify_timezone(self):
+ tz = self.value['name']['planned']
+ tzfile = '/usr/share/zoneinfo/%s' % tz
+ if not os.path.isfile(tzfile):
+ self.abort('given timezone "%s" is not available' % tz)
+ return tzfile
+
+
+class SystemdTimezone(Timezone):
+ """This is a Timezone manipulation class for systemd-powered Linux.
+
+ It uses the `timedatectl` command to check/set all arguments.
+ """
+
+ regexps = dict(
+ hwclock=re.compile(r'^\s*RTC in local TZ\s*:\s*([^\s]+)', re.MULTILINE),
+ name=re.compile(r'^\s*Time ?zone\s*:\s*([^\s]+)', re.MULTILINE)
+ )
+
+ subcmds = dict(
+ hwclock='set-local-rtc',
+ name='set-timezone'
+ )
+
+ def __init__(self, module):
+ super(SystemdTimezone, self).__init__(module)
+ self.timedatectl = module.get_bin_path('timedatectl', required=True)
+ self.status = dict()
+ # Validate given timezone
+ if 'name' in self.value:
+ self._verify_timezone()
+
+ def _get_status(self, phase):
+ if phase not in self.status:
+ self.status[phase] = self.execute(self.timedatectl, 'status')
+ return self.status[phase]
+
+ def get(self, key, phase):
+ status = self._get_status(phase)
+ value = self.regexps[key].search(status).group(1)
+ if key == 'hwclock':
+ # For key='hwclock'; convert yes/no -> local/UTC
+ if self.module.boolean(value):
+ value = 'local'
+ else:
+ value = 'UTC'
+ return value
+
+ def set(self, key, value):
+ # For key='hwclock'; convert UTC/local -> yes/no
+ if key == 'hwclock':
+ if value == 'local':
+ value = 'yes'
+ else:
+ value = 'no'
+ self.execute(self.timedatectl, self.subcmds[key], value, log=True)
+
+
+class NosystemdTimezone(Timezone):
+ """This is a Timezone manipulation class for non systemd-powered Linux.
+
+ For timezone setting, it edits the following file and reflect changes:
+ - /etc/sysconfig/clock ... RHEL/CentOS
+ - /etc/timezone ... Debian/Ubuntu
+ For hwclock setting, it executes `hwclock --systohc` command with the
+ '--utc' or '--localtime' option.
+ """
+
+ conf_files = dict(
+ name=None, # To be set in __init__
+ hwclock=None, # To be set in __init__
+ adjtime='/etc/adjtime'
+ )
+
+ # It's fine if all tree config files don't exist
+ allow_no_file = dict(
+ name=True,
+ hwclock=True,
+ adjtime=True
+ )
+
+ regexps = dict(
+ name=None, # To be set in __init__
+ hwclock=re.compile(r'^UTC\s*=\s*([^\s]+)', re.MULTILINE),
+ adjtime=re.compile(r'^(UTC|LOCAL)$', re.MULTILINE)
+ )
+
+ dist_regexps = dict(
+ SuSE=re.compile(r'^TIMEZONE\s*=\s*"?([^"\s]+)"?', re.MULTILINE),
+ redhat=re.compile(r'^ZONE\s*=\s*"?([^"\s]+)"?', re.MULTILINE)
+ )
+
+ dist_tzline_format = dict(
+ SuSE='TIMEZONE="%s"\n',
+ redhat='ZONE="%s"\n'
+ )
+
+ def __init__(self, module):
+ super(NosystemdTimezone, self).__init__(module)
+ # Validate given timezone
+ if 'name' in self.value:
+ tzfile = self._verify_timezone()
+ # `--remove-destination` is needed if /etc/localtime is a symlink so
+ # that it overwrites it instead of following it.
+ self.update_timezone = ['%s --remove-destination %s /etc/localtime' % (self.module.get_bin_path('cp', required=True), tzfile)]
+ self.update_hwclock = self.module.get_bin_path('hwclock', required=True)
+ # Distribution-specific configurations
+ if self.module.get_bin_path('dpkg-reconfigure') is not None:
+ # Debian/Ubuntu
+ if 'name' in self.value:
+ self.update_timezone = ['%s -sf %s /etc/localtime' % (self.module.get_bin_path('ln', required=True), tzfile),
+ '%s --frontend noninteractive tzdata' % self.module.get_bin_path('dpkg-reconfigure', required=True)]
+ self.conf_files['name'] = '/etc/timezone'
+ self.conf_files['hwclock'] = '/etc/default/rcS'
+ self.regexps['name'] = re.compile(r'^([^\s]+)', re.MULTILINE)
+ self.tzline_format = '%s\n'
+ else:
+ # RHEL/CentOS/SUSE
+ if self.module.get_bin_path('tzdata-update') is not None:
+ # tzdata-update cannot update the timezone if /etc/localtime is
+ # a symlink so we have to use cp to update the time zone which
+ # was set above.
+ if not os.path.islink('/etc/localtime'):
+ self.update_timezone = [self.module.get_bin_path('tzdata-update', required=True)]
+ # else:
+ # self.update_timezone = 'cp --remove-destination ...' <- configured above
+ self.conf_files['name'] = '/etc/sysconfig/clock'
+ self.conf_files['hwclock'] = '/etc/sysconfig/clock'
+ try:
+ f = open(self.conf_files['name'], 'r')
+ except IOError as err:
+ if self._allow_ioerror(err, 'name'):
+ # If the config file doesn't exist detect the distribution and set regexps.
+ distribution = get_distribution()
+ if distribution == 'SuSE':
+ # For SUSE
+ self.regexps['name'] = self.dist_regexps['SuSE']
+ self.tzline_format = self.dist_tzline_format['SuSE']
+ else:
+ # For RHEL/CentOS
+ self.regexps['name'] = self.dist_regexps['redhat']
+ self.tzline_format = self.dist_tzline_format['redhat']
+ else:
+ self.abort('could not read configuration file "%s"' % self.conf_files['name'])
+ else:
+ # The key for timezone might be `ZONE` or `TIMEZONE`
+ # (the former is used in RHEL/CentOS and the latter is used in SUSE linux).
+ # So check the content of /etc/sysconfig/clock and decide which key to use.
+ sysconfig_clock = f.read()
+ f.close()
+ if re.search(r'^TIMEZONE\s*=', sysconfig_clock, re.MULTILINE):
+ # For SUSE
+ self.regexps['name'] = self.dist_regexps['SuSE']
+ self.tzline_format = self.dist_tzline_format['SuSE']
+ else:
+ # For RHEL/CentOS
+ self.regexps['name'] = self.dist_regexps['redhat']
+ self.tzline_format = self.dist_tzline_format['redhat']
+
+ def _allow_ioerror(self, err, key):
+ # In some cases, even if the target file does not exist,
+ # simply creating it may solve the problem.
+ # In such cases, we should continue the configuration rather than aborting.
+ if err.errno != errno.ENOENT:
+ # If the error is not ENOENT ("No such file or directory"),
+ # (e.g., permission error, etc), we should abort.
+ return False
+ return self.allow_no_file.get(key, False)
+
+ def _edit_file(self, filename, regexp, value, key):
+ """Replace the first matched line with given `value`.
+
+ If `regexp` matched more than once, other than the first line will be deleted.
+
+ Args:
+ filename: The name of the file to edit.
+ regexp: The regular expression to search with.
+ value: The line which will be inserted.
+ key: For what key the file is being editted.
+ """
+ # Read the file
+ try:
+ file = open(filename, 'r')
+ except IOError as err:
+ if self._allow_ioerror(err, key):
+ lines = []
+ else:
+ self.abort('tried to configure %s using a file "%s", but could not read it' % (key, filename))
+ else:
+ lines = file.readlines()
+ file.close()
+ # Find the all matched lines
+ matched_indices = []
+ for i, line in enumerate(lines):
+ if regexp.search(line):
+ matched_indices.append(i)
+ if len(matched_indices) > 0:
+ insert_line = matched_indices[0]
+ else:
+ insert_line = 0
+ # Remove all matched lines
+ for i in matched_indices[::-1]:
+ del lines[i]
+ # ...and insert the value
+ lines.insert(insert_line, value)
+ # Write the changes
+ try:
+ file = open(filename, 'w')
+ except IOError:
+ self.abort('tried to configure %s using a file "%s", but could not write to it' % (key, filename))
+ else:
+ file.writelines(lines)
+ file.close()
+ self.msg.append('Added 1 line and deleted %s line(s) on %s' % (len(matched_indices), filename))
+
+ def _get_value_from_config(self, key, phase):
+ filename = self.conf_files[key]
+ try:
+ file = open(filename, mode='r')
+ except IOError as err:
+ if self._allow_ioerror(err, key):
+ if key == 'hwclock':
+ return 'n/a'
+ elif key == 'adjtime':
+ return 'UTC'
+ elif key == 'name':
+ return 'n/a'
+ else:
+ self.abort('tried to configure %s using a file "%s", but could not read it' % (key, filename))
+ else:
+ status = file.read()
+ file.close()
+ try:
+ value = self.regexps[key].search(status).group(1)
+ except AttributeError:
+ if key == 'hwclock':
+ # If we cannot find UTC in the config that's fine.
+ return 'n/a'
+ elif key == 'adjtime':
+ # If we cannot find UTC/LOCAL in /etc/cannot that means UTC
+ # will be used by default.
+ return 'UTC'
+ elif key == 'name':
+ if phase == 'before':
+ # In 'before' phase UTC/LOCAL doesn't need to be set in
+ # the timezone config file, so we ignore this error.
+ return 'n/a'
+ else:
+ self.abort('tried to configure %s using a file "%s", but could not find a valid value in it' % (key, filename))
+ else:
+ if key == 'hwclock':
+ # convert yes/no -> UTC/local
+ if self.module.boolean(value):
+ value = 'UTC'
+ else:
+ value = 'local'
+ elif key == 'adjtime':
+ # convert LOCAL -> local
+ if value != 'UTC':
+ value = value.lower()
+ return value
+
+ def get(self, key, phase):
+ planned = self.value[key]['planned']
+ if key == 'hwclock':
+ value = self._get_value_from_config(key, phase)
+ if value == planned:
+ # If the value in the config file is the same as the 'planned'
+ # value, we need to check /etc/adjtime.
+ value = self._get_value_from_config('adjtime', phase)
+ elif key == 'name':
+ value = self._get_value_from_config(key, phase)
+ if value == planned:
+ # If the planned values is the same as the one in the config file
+ # we need to check if /etc/localtime is also set to the 'planned' zone.
+ if os.path.islink('/etc/localtime'):
+ # If /etc/localtime is a symlink and is not set to the TZ we 'planned'
+ # to set, we need to return the TZ which the symlink points to.
+ if os.path.exists('/etc/localtime'):
+ # We use readlink() because on some distros zone files are symlinks
+ # to other zone files, so it's hard to get which TZ is actually set
+ # if we follow the symlink.
+ path = os.readlink('/etc/localtime')
+ linktz = re.search(r'/usr/share/zoneinfo/(.*)', path, re.MULTILINE)
+ if linktz:
+ valuelink = linktz.group(1)
+ if valuelink != planned:
+ value = valuelink
+ else:
+ # Set current TZ to 'n/a' if the symlink points to a path
+ # which isn't a zone file.
+ value = 'n/a'
+ else:
+ # Set current TZ to 'n/a' if the symlink to the zone file is broken.
+ value = 'n/a'
+ else:
+ # If /etc/localtime is not a symlink best we can do is compare it with
+ # the 'planned' zone info file and return 'n/a' if they are different.
+ try:
+ if not filecmp.cmp('/etc/localtime', '/usr/share/zoneinfo/' + planned):
+ return 'n/a'
+ except Exception:
+ return 'n/a'
+ else:
+ self.abort('unknown parameter "%s"' % key)
+ return value
+
+ def set_timezone(self, value):
+ self._edit_file(filename=self.conf_files['name'],
+ regexp=self.regexps['name'],
+ value=self.tzline_format % value,
+ key='name')
+ for cmd in self.update_timezone:
+ self.execute(cmd)
+
+ def set_hwclock(self, value):
+ if value == 'local':
+ option = '--localtime'
+ utc = 'no'
+ else:
+ option = '--utc'
+ utc = 'yes'
+ if self.conf_files['hwclock'] is not None:
+ self._edit_file(filename=self.conf_files['hwclock'],
+ regexp=self.regexps['hwclock'],
+ value='UTC=%s\n' % utc,
+ key='hwclock')
+ self.execute(self.update_hwclock, '--systohc', option, log=True)
+
+ def set(self, key, value):
+ if key == 'name':
+ self.set_timezone(value)
+ elif key == 'hwclock':
+ self.set_hwclock(value)
+ else:
+ self.abort('unknown parameter "%s"' % key)
+
+
+class SmartOSTimezone(Timezone):
+ """This is a Timezone manipulation class for SmartOS instances.
+
+ It uses the C(sm-set-timezone) utility to set the timezone, and
+ inspects C(/etc/default/init) to determine the current timezone.
+
+ NB: A zone needs to be rebooted in order for the change to be
+ activated.
+ """
+
+ def __init__(self, module):
+ super(SmartOSTimezone, self).__init__(module)
+ self.settimezone = self.module.get_bin_path('sm-set-timezone', required=False)
+ if not self.settimezone:
+ module.fail_json(msg='sm-set-timezone not found. Make sure the smtools package is installed.')
+
+ def get(self, key, phase):
+ """Lookup the current timezone name in `/etc/default/init`. If anything else
+ is requested, or if the TZ field is not set we fail.
+ """
+ if key == 'name':
+ try:
+ f = open('/etc/default/init', 'r')
+ for line in f:
+ m = re.match('^TZ=(.*)$', line.strip())
+ if m:
+ return m.groups()[0]
+ except Exception:
+ self.module.fail_json(msg='Failed to read /etc/default/init')
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+ def set(self, key, value):
+ """Set the requested timezone through sm-set-timezone, an invalid timezone name
+ will be rejected and we have no further input validation to perform.
+ """
+ if key == 'name':
+ cmd = 'sm-set-timezone %s' % value
+
+ (rc, stdout, stderr) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.fail_json(msg=stderr)
+
+ # sm-set-timezone knows no state and will always set the timezone.
+ # XXX: https://github.com/joyent/smtools/pull/2
+ m = re.match(r'^\* Changed (to)? timezone (to)? (%s).*' % value, stdout.splitlines()[1])
+ if not (m and m.groups()[-1] == value):
+ self.module.fail_json(msg='Failed to set timezone')
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+
+class DarwinTimezone(Timezone):
+ """This is the timezone implementation for Darwin which, unlike other *BSD
+ implementations, uses the `systemsetup` command on Darwin to check/set
+ the timezone.
+ """
+
+ regexps = dict(
+ name=re.compile(r'^\s*Time ?Zone\s*:\s*([^\s]+)', re.MULTILINE)
+ )
+
+ def __init__(self, module):
+ super(DarwinTimezone, self).__init__(module)
+ self.systemsetup = module.get_bin_path('systemsetup', required=True)
+ self.status = dict()
+ # Validate given timezone
+ if 'name' in self.value:
+ self._verify_timezone()
+
+ def _get_current_timezone(self, phase):
+ """Lookup the current timezone via `systemsetup -gettimezone`."""
+ if phase not in self.status:
+ self.status[phase] = self.execute(self.systemsetup, '-gettimezone')
+ return self.status[phase]
+
+ def _verify_timezone(self):
+ tz = self.value['name']['planned']
+ # Lookup the list of supported timezones via `systemsetup -listtimezones`.
+ # Note: Skip the first line that contains the label 'Time Zones:'
+ out = self.execute(self.systemsetup, '-listtimezones').splitlines()[1:]
+ tz_list = list(map(lambda x: x.strip(), out))
+ if tz not in tz_list:
+ self.abort('given timezone "%s" is not available' % tz)
+ return tz
+
+ def get(self, key, phase):
+ if key == 'name':
+ status = self._get_current_timezone(phase)
+ value = self.regexps[key].search(status).group(1)
+ return value
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+ def set(self, key, value):
+ if key == 'name':
+ self.execute(self.systemsetup, '-settimezone', value, log=True)
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+
+class BSDTimezone(Timezone):
+ """This is the timezone implementation for *BSD which works simply through
+ updating the `/etc/localtime` symlink to point to a valid timezone name under
+ `/usr/share/zoneinfo`.
+ """
+
+ def __init__(self, module):
+ super(BSDTimezone, self).__init__(module)
+
+ def __get_timezone(self):
+ zoneinfo_dir = '/usr/share/zoneinfo/'
+ localtime_file = '/etc/localtime'
+
+ # Strategy 1:
+ # If /etc/localtime does not exist, assum the timezone is UTC.
+ if not os.path.exists(localtime_file):
+ self.module.warn('Could not read /etc/localtime. Assuming UTC.')
+ return 'UTC'
+
+ # Strategy 2:
+ # Follow symlink of /etc/localtime
+ zoneinfo_file = localtime_file
+ while not zoneinfo_file.startswith(zoneinfo_dir):
+ try:
+ zoneinfo_file = os.readlink(localtime_file)
+ except OSError:
+ # OSError means "end of symlink chain" or broken link.
+ break
+ else:
+ return zoneinfo_file.replace(zoneinfo_dir, '')
+
+ # Strategy 3:
+ # (If /etc/localtime is not symlinked)
+ # Check all files in /usr/share/zoneinfo and return first non-link match.
+ for dname, _, fnames in sorted(os.walk(zoneinfo_dir)):
+ for fname in sorted(fnames):
+ zoneinfo_file = os.path.join(dname, fname)
+ if not os.path.islink(zoneinfo_file) and filecmp.cmp(zoneinfo_file, localtime_file):
+ return zoneinfo_file.replace(zoneinfo_dir, '')
+
+ # Strategy 4:
+ # As a fall-back, return 'UTC' as default assumption.
+ self.module.warn('Could not identify timezone name from /etc/localtime. Assuming UTC.')
+ return 'UTC'
+
+ def get(self, key, phase):
+ """Lookup the current timezone by resolving `/etc/localtime`."""
+ if key == 'name':
+ return self.__get_timezone()
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+ def set(self, key, value):
+ if key == 'name':
+ # First determine if the requested timezone is valid by looking in
+ # the zoneinfo directory.
+ zonefile = '/usr/share/zoneinfo/' + value
+ try:
+ if not os.path.isfile(zonefile):
+ self.module.fail_json(msg='%s is not a recognized timezone' % value)
+ except Exception:
+ self.module.fail_json(msg='Failed to stat %s' % zonefile)
+
+ # Now (somewhat) atomically update the symlink by creating a new
+ # symlink and move it into place. Otherwise we have to remove the
+ # original symlink and create the new symlink, however that would
+ # create a race condition in case another process tries to read
+ # /etc/localtime between removal and creation.
+ suffix = "".join([random.choice(string.ascii_letters + string.digits) for x in range(0, 10)])
+ new_localtime = '/etc/localtime.' + suffix
+
+ try:
+ os.symlink(zonefile, new_localtime)
+ os.rename(new_localtime, '/etc/localtime')
+ except Exception:
+ os.remove(new_localtime)
+ self.module.fail_json(msg='Could not update /etc/localtime')
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+
+class AIXTimezone(Timezone):
+ """This is a Timezone manipulation class for AIX instances.
+
+ It uses the C(chtz) utility to set the timezone, and
+ inspects C(/etc/environment) to determine the current timezone.
+
+ While AIX time zones can be set using two formats (POSIX and
+ Olson) the prefered method is Olson.
+ See the following article for more information:
+ https://developer.ibm.com/articles/au-aix-posix/
+
+ NB: AIX needs to be rebooted in order for the change to be
+ activated.
+ """
+
+ def __init__(self, module):
+ super(AIXTimezone, self).__init__(module)
+ self.settimezone = self.module.get_bin_path('chtz', required=True)
+
+ def __get_timezone(self):
+ """ Return the current value of TZ= in /etc/environment """
+ try:
+ f = open('/etc/environment', 'r')
+ etcenvironment = f.read()
+ f.close()
+ except Exception:
+ self.module.fail_json(msg='Issue reading contents of /etc/environment')
+
+ match = re.search(r'^TZ=(.*)$', etcenvironment, re.MULTILINE)
+ if match:
+ return match.group(1)
+ else:
+ return None
+
+ def get(self, key, phase):
+ """Lookup the current timezone name in `/etc/environment`. If anything else
+ is requested, or if the TZ field is not set we fail.
+ """
+ if key == 'name':
+ return self.__get_timezone()
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+ def set(self, key, value):
+ """Set the requested timezone through chtz, an invalid timezone name
+ will be rejected and we have no further input validation to perform.
+ """
+ if key == 'name':
+ # chtz seems to always return 0 on AIX 7.2, even for invalid timezone values.
+ # It will only return non-zero if the chtz command itself fails, it does not check for
+ # valid timezones. We need to perform a basic check to confirm that the timezone
+ # definition exists in /usr/share/lib/zoneinfo
+ # This does mean that we can only support Olson for now. The below commented out regex
+ # detects Olson date formats, so in the future we could detect Posix or Olson and
+ # act accordingly.
+
+ # regex_olson = re.compile('^([a-z0-9_\-\+]+\/?)+$', re.IGNORECASE)
+ # if not regex_olson.match(value):
+ # msg = 'Supplied timezone (%s) does not appear to a be valid Olson string' % value
+ # self.module.fail_json(msg=msg)
+
+ # First determine if the requested timezone is valid by looking in the zoneinfo
+ # directory.
+ zonefile = '/usr/share/lib/zoneinfo/' + value
+ try:
+ if not os.path.isfile(zonefile):
+ self.module.fail_json(msg='%s is not a recognized timezone.' % value)
+ except Exception:
+ self.module.fail_json(msg='Failed to check %s.' % zonefile)
+
+ # Now set the TZ using chtz
+ cmd = 'chtz %s' % value
+ (rc, stdout, stderr) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.fail_json(msg=stderr)
+
+ # The best condition check we can do is to check the value of TZ after making the
+ # change.
+ TZ = self.__get_timezone()
+ if TZ != value:
+ msg = 'TZ value does not match post-change (Actual: %s, Expected: %s).' % (TZ, value)
+ self.module.fail_json(msg=msg)
+
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+
+def main():
+ # Construct 'module' and 'tz'
+ module = AnsibleModule(
+ argument_spec=dict(
+ hwclock=dict(type='str', choices=['local', 'UTC'], aliases=['rtc']),
+ name=dict(type='str'),
+ ),
+ required_one_of=[
+ ['hwclock', 'name']
+ ],
+ supports_check_mode=True,
+ )
+ tz = Timezone(module)
+
+ # Check the current state
+ tz.check(phase='before')
+ if module.check_mode:
+ diff = tz.diff('before', 'planned')
+ # In check mode, 'planned' state is treated as 'after' state
+ diff['after'] = diff.pop('planned')
+ else:
+ # Make change
+ tz.change()
+ # Check the current state
+ tz.check(phase='after')
+ # Examine if the current state matches planned state
+ (after, planned) = tz.diff('after', 'planned').values()
+ if after != planned:
+ tz.abort('still not desired state, though changes have made - '
+ 'planned: %s, after: %s' % (str(planned), str(after)))
+ diff = tz.diff('before', 'after')
+
+ changed = (diff['before'] != diff['after'])
+ if len(tz.msg) > 0:
+ module.exit_json(changed=changed, diff=diff, msg='\n'.join(tz.msg))
+ else:
+ module.exit_json(changed=changed, diff=diff)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/ufw.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/ufw.py
new file mode 100644
index 00000000..c6df6fe6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/ufw.py
@@ -0,0 +1,594 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Ahti Kitsik <ak@ahtik.com>
+# Copyright: (c) 2014, Jarno Keskikangas <jarno.keskikangas@gmail.com>
+# Copyright: (c) 2013, Aleksey Ovcharenko <aleksey.ovcharenko@gmail.com>
+# Copyright: (c) 2013, James Martin <jmartin@basho.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ufw
+short_description: Manage firewall with UFW
+description:
+ - Manage firewall with UFW.
+author:
+ - Aleksey Ovcharenko (@ovcharenko)
+ - Jarno Keskikangas (@pyykkis)
+ - Ahti Kitsik (@ahtik)
+notes:
+ - See C(man ufw) for more examples.
+requirements:
+ - C(ufw) package
+options:
+ state:
+ description:
+ - C(enabled) reloads firewall and enables firewall on boot.
+ - C(disabled) unloads firewall and disables firewall on boot.
+ - C(reloaded) reloads firewall.
+ - C(reset) disables and resets firewall to installation defaults.
+ type: str
+ choices: [ disabled, enabled, reloaded, reset ]
+ default:
+ description:
+ - Change the default policy for incoming or outgoing traffic.
+ type: str
+ choices: [ allow, deny, reject ]
+ aliases: [ policy ]
+ direction:
+ description:
+ - Select direction for a rule or default policy command. Mutually
+ exclusive with I(interface_in) and I(interface_out).
+ type: str
+ choices: [ in, incoming, out, outgoing, routed ]
+ logging:
+ description:
+ - Toggles logging. Logged packets use the LOG_KERN syslog facility.
+ type: str
+ choices: [ 'on', 'off', low, medium, high, full ]
+ insert:
+ description:
+ - Insert the corresponding rule as rule number NUM.
+ - Note that ufw numbers rules starting with 1.
+ type: int
+ insert_relative_to:
+ description:
+ - Allows to interpret the index in I(insert) relative to a position.
+ - C(zero) interprets the rule number as an absolute index (i.e. 1 is
+ the first rule).
+ - C(first-ipv4) interprets the rule number relative to the index of the
+ first IPv4 rule, or relative to the position where the first IPv4 rule
+ would be if there is currently none.
+ - C(last-ipv4) interprets the rule number relative to the index of the
+ last IPv4 rule, or relative to the position where the last IPv4 rule
+ would be if there is currently none.
+ - C(first-ipv6) interprets the rule number relative to the index of the
+ first IPv6 rule, or relative to the position where the first IPv6 rule
+ would be if there is currently none.
+ - C(last-ipv6) interprets the rule number relative to the index of the
+ last IPv6 rule, or relative to the position where the last IPv6 rule
+ would be if there is currently none.
+ type: str
+ choices: [ first-ipv4, first-ipv6, last-ipv4, last-ipv6, zero ]
+ default: zero
+ rule:
+ description:
+ - Add firewall rule
+ type: str
+ choices: [ allow, deny, limit, reject ]
+ log:
+ description:
+ - Log new connections matched to this rule
+ type: bool
+ default: false
+ from_ip:
+ description:
+ - Source IP address.
+ type: str
+ default: any
+ aliases: [ from, src ]
+ from_port:
+ description:
+ - Source port.
+ type: str
+ to_ip:
+ description:
+ - Destination IP address.
+ type: str
+ default: any
+ aliases: [ dest, to]
+ to_port:
+ description:
+ - Destination port.
+ type: str
+ aliases: [ port ]
+ proto:
+ description:
+ - TCP/IP protocol.
+ type: str
+ choices: [ any, tcp, udp, ipv6, esp, ah, gre, igmp ]
+ aliases: [ protocol ]
+ name:
+ description:
+ - Use profile located in C(/etc/ufw/applications.d).
+ type: str
+ aliases: [ app ]
+ delete:
+ description:
+ - Delete rule.
+ type: bool
+ default: false
+ interface:
+ description:
+ - Specify interface for the rule. The direction (in or out) used
+ for the interface depends on the value of I(direction). See
+ I(interface_in) and I(interface_out) for routed rules that needs
+ to supply both an input and output interface. Mutually
+ exclusive with I(interface_in) and I(interface_out).
+ type: str
+ aliases: [ if ]
+ interface_in:
+ description:
+ - Specify input interface for the rule. This is mutually
+ exclusive with I(direction) and I(interface). However, it is
+ compatible with I(interface_out) for routed rules.
+ type: str
+ aliases: [ if_in ]
+ version_added: '0.2.0'
+ interface_out:
+ description:
+ - Specify output interface for the rule. This is mutually
+ exclusive with I(direction) and I(interface). However, it is
+ compatible with I(interface_in) for routed rules.
+ type: str
+ aliases: [ if_out ]
+ version_added: '0.2.0'
+ route:
+ description:
+ - Apply the rule to routed/forwarded packets.
+ type: bool
+ default: false
+ comment:
+ description:
+ - Add a comment to the rule. Requires UFW version >=0.35.
+ type: str
+'''
+
+EXAMPLES = r'''
+- name: Allow everything and enable UFW
+ community.general.ufw:
+ state: enabled
+ policy: allow
+
+- name: Set logging
+ community.general.ufw:
+ logging: 'on'
+
+# Sometimes it is desirable to let the sender know when traffic is
+# being denied, rather than simply ignoring it. In these cases, use
+# reject instead of deny. In addition, log rejected connections:
+- community.general.ufw:
+ rule: reject
+ port: auth
+ log: yes
+
+# ufw supports connection rate limiting, which is useful for protecting
+# against brute-force login attacks. ufw will deny connections if an IP
+# address has attempted to initiate 6 or more connections in the last
+# 30 seconds. See http://www.debian-administration.org/articles/187
+# for details. Typical usage is:
+- community.general.ufw:
+ rule: limit
+ port: ssh
+ proto: tcp
+
+# Allow OpenSSH. (Note that as ufw manages its own state, simply removing
+# a rule=allow task can leave those ports exposed. Either use delete=yes
+# or a separate state=reset task)
+- community.general.ufw:
+ rule: allow
+ name: OpenSSH
+
+- name: Delete OpenSSH rule
+ community.general.ufw:
+ rule: allow
+ name: OpenSSH
+ delete: yes
+
+- name: Deny all access to port 53
+ community.general.ufw:
+ rule: deny
+ port: '53'
+
+- name: Allow port range 60000-61000
+ community.general.ufw:
+ rule: allow
+ port: 60000:61000
+ proto: tcp
+
+- name: Allow all access to tcp port 80
+ community.general.ufw:
+ rule: allow
+ port: '80'
+ proto: tcp
+
+- name: Allow all access from RFC1918 networks to this host
+ community.general.ufw:
+ rule: allow
+ src: '{{ item }}'
+ loop:
+ - 10.0.0.0/8
+ - 172.16.0.0/12
+ - 192.168.0.0/16
+
+- name: Deny access to udp port 514 from host 1.2.3.4 and include a comment
+ community.general.ufw:
+ rule: deny
+ proto: udp
+ src: 1.2.3.4
+ port: '514'
+ comment: Block syslog
+
+- name: Allow incoming access to eth0 from 1.2.3.5 port 5469 to 1.2.3.4 port 5469
+ community.general.ufw:
+ rule: allow
+ interface: eth0
+ direction: in
+ proto: udp
+ src: 1.2.3.5
+ from_port: '5469'
+ dest: 1.2.3.4
+ to_port: '5469'
+
+# Note that IPv6 must be enabled in /etc/default/ufw for IPv6 firewalling to work.
+- name: Deny all traffic from the IPv6 2001:db8::/32 to tcp port 25 on this host
+ community.general.ufw:
+ rule: deny
+ proto: tcp
+ src: 2001:db8::/32
+ port: '25'
+
+- name: Deny all IPv6 traffic to tcp port 20 on this host
+ # this should be the first IPv6 rule
+ community.general.ufw:
+ rule: deny
+ proto: tcp
+ port: '20'
+ to_ip: "::"
+ insert: 0
+ insert_relative_to: first-ipv6
+
+- name: Deny all IPv4 traffic to tcp port 20 on this host
+ # This should be the third to last IPv4 rule
+ # (insert: -1 addresses the second to last IPv4 rule;
+ # so the new rule will be inserted before the second
+ # to last IPv4 rule, and will be come the third to last
+ # IPv4 rule.)
+ community.general.ufw:
+ rule: deny
+ proto: tcp
+ port: '20'
+ to_ip: "::"
+ insert: -1
+ insert_relative_to: last-ipv4
+
+# Can be used to further restrict a global FORWARD policy set to allow
+- name: Deny forwarded/routed traffic from subnet 1.2.3.0/24 to subnet 4.5.6.0/24
+ community.general.ufw:
+ rule: deny
+ route: yes
+ src: 1.2.3.0/24
+ dest: 4.5.6.0/24
+'''
+
+import re
+
+from operator import itemgetter
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def compile_ipv4_regexp():
+ r = r"((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}"
+ r += r"(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])"
+ return re.compile(r)
+
+
+def compile_ipv6_regexp():
+ """
+ validation pattern provided by :
+ https://stackoverflow.com/questions/53497/regular-expression-that-matches-
+ valid-ipv6-addresses#answer-17871737
+ """
+ r = r"(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:"
+ r += r"|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}"
+ r += r"(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4})"
+ r += r"{1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]"
+ r += r"{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]"
+ r += r"{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4})"
+ r += r"{0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]"
+ r += r"|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}"
+ r += r"[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}"
+ r += r"[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))"
+ return re.compile(r)
+
+
+def main():
+ command_keys = ['state', 'default', 'rule', 'logging']
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', choices=['enabled', 'disabled', 'reloaded', 'reset']),
+ default=dict(type='str', aliases=['policy'], choices=['allow', 'deny', 'reject']),
+ logging=dict(type='str', choices=['full', 'high', 'low', 'medium', 'off', 'on']),
+ direction=dict(type='str', choices=['in', 'incoming', 'out', 'outgoing', 'routed']),
+ delete=dict(type='bool', default=False),
+ route=dict(type='bool', default=False),
+ insert=dict(type='int'),
+ insert_relative_to=dict(choices=['zero', 'first-ipv4', 'last-ipv4', 'first-ipv6', 'last-ipv6'], default='zero'),
+ rule=dict(type='str', choices=['allow', 'deny', 'limit', 'reject']),
+ interface=dict(type='str', aliases=['if']),
+ interface_in=dict(type='str', aliases=['if_in']),
+ interface_out=dict(type='str', aliases=['if_out']),
+ log=dict(type='bool', default=False),
+ from_ip=dict(type='str', default='any', aliases=['from', 'src']),
+ from_port=dict(type='str'),
+ to_ip=dict(type='str', default='any', aliases=['dest', 'to']),
+ to_port=dict(type='str', aliases=['port']),
+ proto=dict(type='str', aliases=['protocol'], choices=['ah', 'any', 'esp', 'ipv6', 'tcp', 'udp', 'gre', 'igmp']),
+ name=dict(type='str', aliases=['app']),
+ comment=dict(type='str'),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['name', 'proto', 'logging'],
+ # Mutual exclusivity with `interface` implied by `required_by`.
+ ['direction', 'interface_in'],
+ ['direction', 'interface_out'],
+ ],
+ required_one_of=([command_keys]),
+ required_by=dict(
+ interface=('direction', ),
+ ),
+ )
+
+ cmds = []
+
+ ipv4_regexp = compile_ipv4_regexp()
+ ipv6_regexp = compile_ipv6_regexp()
+
+ def filter_line_that_not_start_with(pattern, content):
+ return ''.join([line for line in content.splitlines(True) if line.startswith(pattern)])
+
+ def filter_line_that_contains(pattern, content):
+ return [line for line in content.splitlines(True) if pattern in line]
+
+ def filter_line_that_not_contains(pattern, content):
+ return ''.join([line for line in content.splitlines(True) if not line.contains(pattern)])
+
+ def filter_line_that_match_func(match_func, content):
+ return ''.join([line for line in content.splitlines(True) if match_func(line) is not None])
+
+ def filter_line_that_contains_ipv4(content):
+ return filter_line_that_match_func(ipv4_regexp.search, content)
+
+ def filter_line_that_contains_ipv6(content):
+ return filter_line_that_match_func(ipv6_regexp.search, content)
+
+ def is_starting_by_ipv4(ip):
+ return ipv4_regexp.match(ip) is not None
+
+ def is_starting_by_ipv6(ip):
+ return ipv6_regexp.match(ip) is not None
+
+ def execute(cmd, ignore_error=False):
+ cmd = ' '.join(map(itemgetter(-1), filter(itemgetter(0), cmd)))
+
+ cmds.append(cmd)
+ (rc, out, err) = module.run_command(cmd, environ_update={"LANG": "C"})
+
+ if rc != 0 and not ignore_error:
+ module.fail_json(msg=err or out, commands=cmds)
+
+ return out
+
+ def get_current_rules():
+ user_rules_files = ["/lib/ufw/user.rules",
+ "/lib/ufw/user6.rules",
+ "/etc/ufw/user.rules",
+ "/etc/ufw/user6.rules",
+ "/var/lib/ufw/user.rules",
+ "/var/lib/ufw/user6.rules"]
+
+ cmd = [[grep_bin], ["-h"], ["'^### tuple'"]]
+
+ cmd.extend([[f] for f in user_rules_files])
+ return execute(cmd, ignore_error=True)
+
+ def ufw_version():
+ """
+ Returns the major and minor version of ufw installed on the system.
+ """
+ out = execute([[ufw_bin], ["--version"]])
+
+ lines = [x for x in out.split('\n') if x.strip() != '']
+ if len(lines) == 0:
+ module.fail_json(msg="Failed to get ufw version.", rc=0, out=out)
+
+ matches = re.search(r'^ufw.+(\d+)\.(\d+)(?:\.(\d+))?.*$', lines[0])
+ if matches is None:
+ module.fail_json(msg="Failed to get ufw version.", rc=0, out=out)
+
+ # Convert version to numbers
+ major = int(matches.group(1))
+ minor = int(matches.group(2))
+ rev = 0
+ if matches.group(3) is not None:
+ rev = int(matches.group(3))
+
+ return major, minor, rev
+
+ params = module.params
+
+ commands = dict((key, params[key]) for key in command_keys if params[key])
+
+ # Ensure ufw is available
+ ufw_bin = module.get_bin_path('ufw', True)
+ grep_bin = module.get_bin_path('grep', True)
+
+ # Save the pre state and rules in order to recognize changes
+ pre_state = execute([[ufw_bin], ['status verbose']])
+ pre_rules = get_current_rules()
+
+ changed = False
+
+ # Execute filter
+ for (command, value) in commands.items():
+
+ cmd = [[ufw_bin], [module.check_mode, '--dry-run']]
+
+ if command == 'state':
+ states = {'enabled': 'enable', 'disabled': 'disable',
+ 'reloaded': 'reload', 'reset': 'reset'}
+
+ if value in ['reloaded', 'reset']:
+ changed = True
+
+ if module.check_mode:
+ # "active" would also match "inactive", hence the space
+ ufw_enabled = pre_state.find(" active") != -1
+ if (value == 'disabled' and ufw_enabled) or (value == 'enabled' and not ufw_enabled):
+ changed = True
+ else:
+ execute(cmd + [['-f'], [states[value]]])
+
+ elif command == 'logging':
+ extract = re.search(r'Logging: (on|off)(?: \(([a-z]+)\))?', pre_state)
+ if extract:
+ current_level = extract.group(2)
+ current_on_off_value = extract.group(1)
+ if value != "off":
+ if current_on_off_value == "off":
+ changed = True
+ elif value != "on" and value != current_level:
+ changed = True
+ elif current_on_off_value != "off":
+ changed = True
+ else:
+ changed = True
+
+ if not module.check_mode:
+ execute(cmd + [[command], [value]])
+
+ elif command == 'default':
+ if params['direction'] not in ['outgoing', 'incoming', 'routed', None]:
+ module.fail_json(msg='For default, direction must be one of "outgoing", "incoming" and "routed", or direction must not be specified.')
+ if module.check_mode:
+ regexp = r'Default: (deny|allow|reject) \(incoming\), (deny|allow|reject) \(outgoing\), (deny|allow|reject|disabled) \(routed\)'
+ extract = re.search(regexp, pre_state)
+ if extract is not None:
+ current_default_values = {}
+ current_default_values["incoming"] = extract.group(1)
+ current_default_values["outgoing"] = extract.group(2)
+ current_default_values["routed"] = extract.group(3)
+ v = current_default_values[params['direction'] or 'incoming']
+ if v not in (value, 'disabled'):
+ changed = True
+ else:
+ changed = True
+ else:
+ execute(cmd + [[command], [value], [params['direction']]])
+
+ elif command == 'rule':
+ if params['direction'] not in ['in', 'out', None]:
+ module.fail_json(msg='For rules, direction must be one of "in" and "out", or direction must not be specified.')
+ if not params['route'] and params['interface_in'] and params['interface_out']:
+ module.fail_json(msg='Only route rules can combine '
+ 'interface_in and interface_out')
+ # Rules are constructed according to the long format
+ #
+ # ufw [--dry-run] [route] [delete] [insert NUM] allow|deny|reject|limit [in|out on INTERFACE] [log|log-all] \
+ # [from ADDRESS [port PORT]] [to ADDRESS [port PORT]] \
+ # [proto protocol] [app application] [comment COMMENT]
+ cmd.append([module.boolean(params['route']), 'route'])
+ cmd.append([module.boolean(params['delete']), 'delete'])
+ if params['insert'] is not None:
+ relative_to_cmd = params['insert_relative_to']
+ if relative_to_cmd == 'zero':
+ insert_to = params['insert']
+ else:
+ (dummy, numbered_state, dummy) = module.run_command([ufw_bin, 'status', 'numbered'])
+ numbered_line_re = re.compile(R'^\[ *([0-9]+)\] ')
+ lines = [(numbered_line_re.match(line), '(v6)' in line) for line in numbered_state.splitlines()]
+ lines = [(int(matcher.group(1)), ipv6) for (matcher, ipv6) in lines if matcher]
+ last_number = max([no for (no, ipv6) in lines]) if lines else 0
+ has_ipv4 = any([not ipv6 for (no, ipv6) in lines])
+ has_ipv6 = any([ipv6 for (no, ipv6) in lines])
+ if relative_to_cmd == 'first-ipv4':
+ relative_to = 1
+ elif relative_to_cmd == 'last-ipv4':
+ relative_to = max([no for (no, ipv6) in lines if not ipv6]) if has_ipv4 else 1
+ elif relative_to_cmd == 'first-ipv6':
+ relative_to = max([no for (no, ipv6) in lines if not ipv6]) + 1 if has_ipv4 else 1
+ elif relative_to_cmd == 'last-ipv6':
+ relative_to = last_number if has_ipv6 else last_number + 1
+ insert_to = params['insert'] + relative_to
+ if insert_to > last_number:
+ # ufw does not like it when the insert number is larger than the
+ # maximal rule number for IPv4/IPv6.
+ insert_to = None
+ cmd.append([insert_to is not None, "insert %s" % insert_to])
+ cmd.append([value])
+ cmd.append([params['direction'], "%s" % params['direction']])
+ cmd.append([params['interface'], "on %s" % params['interface']])
+ cmd.append([params['interface_in'], "in on %s" % params['interface_in']])
+ cmd.append([params['interface_out'], "out on %s" % params['interface_out']])
+ cmd.append([module.boolean(params['log']), 'log'])
+
+ for (key, template) in [('from_ip', "from %s"), ('from_port', "port %s"),
+ ('to_ip', "to %s"), ('to_port', "port %s"),
+ ('proto', "proto %s"), ('name', "app '%s'")]:
+ value = params[key]
+ cmd.append([value, template % (value)])
+
+ ufw_major, ufw_minor, dummy = ufw_version()
+ # comment is supported only in ufw version after 0.35
+ if (ufw_major == 0 and ufw_minor >= 35) or ufw_major > 0:
+ cmd.append([params['comment'], "comment '%s'" % params['comment']])
+
+ rules_dry = execute(cmd)
+
+ if module.check_mode:
+
+ nb_skipping_line = len(filter_line_that_contains("Skipping", rules_dry))
+
+ if not (nb_skipping_line > 0 and nb_skipping_line == len(rules_dry.splitlines(True))):
+
+ rules_dry = filter_line_that_not_start_with("### tuple", rules_dry)
+ # ufw dry-run doesn't send all rules so have to compare ipv4 or ipv6 rules
+ if is_starting_by_ipv4(params['from_ip']) or is_starting_by_ipv4(params['to_ip']):
+ if filter_line_that_contains_ipv4(pre_rules) != filter_line_that_contains_ipv4(rules_dry):
+ changed = True
+ elif is_starting_by_ipv6(params['from_ip']) or is_starting_by_ipv6(params['to_ip']):
+ if filter_line_that_contains_ipv6(pre_rules) != filter_line_that_contains_ipv6(rules_dry):
+ changed = True
+ elif pre_rules != rules_dry:
+ changed = True
+
+ # Get the new state
+ if module.check_mode:
+ return module.exit_json(changed=changed, commands=cmds)
+ else:
+ post_state = execute([[ufw_bin], ['status'], ['verbose']])
+ if not changed:
+ post_rules = get_current_rules()
+ changed = (pre_state != post_state) or (pre_rules != post_rules)
+ return module.exit_json(changed=changed, commands=cmds, msg=post_state.rstrip())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/vdo.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/vdo.py
new file mode 100644
index 00000000..15fd9c62
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/vdo.py
@@ -0,0 +1,866 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+ - Bryan Gurney (@bgurney-rh)
+
+module: vdo
+
+short_description: Module to control VDO
+
+
+description:
+ - This module controls the VDO dedupe and compression device.
+ - VDO, or Virtual Data Optimizer, is a device-mapper target that
+ provides inline block-level deduplication, compression, and
+ thin provisioning capabilities to primary storage.
+
+options:
+ name:
+ description:
+ - The name of the VDO volume.
+ type: str
+ required: true
+ state:
+ description:
+ - Whether this VDO volume should be "present" or "absent".
+ If a "present" VDO volume does not exist, it will be
+ created. If a "present" VDO volume already exists, it
+ will be modified, by updating the configuration, which
+ will take effect when the VDO volume is restarted.
+ Not all parameters of an existing VDO volume can be
+ modified; the "statusparamkeys" list contains the
+ parameters that can be modified after creation. If an
+ "absent" VDO volume does not exist, it will not be
+ removed.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ activated:
+ description:
+ - The "activate" status for a VDO volume. If this is set
+ to "no", the VDO volume cannot be started, and it will
+ not start on system startup. However, on initial
+ creation, a VDO volume with "activated" set to "off"
+ will be running, until stopped. This is the default
+ behavior of the "vdo create" command; it provides the
+ user an opportunity to write a base amount of metadata
+ (filesystem, LVM headers, etc.) to the VDO volume prior
+ to stopping the volume, and leaving it deactivated
+ until ready to use.
+ type: bool
+ running:
+ description:
+ - Whether this VDO volume is running.
+ - A VDO volume must be activated in order to be started.
+ type: bool
+ device:
+ description:
+ - The full path of the device to use for VDO storage.
+ - This is required if "state" is "present".
+ type: str
+ logicalsize:
+ description:
+ - The logical size of the VDO volume (in megabytes, or
+ LVM suffix format). If not specified for a new volume,
+ this defaults to the same size as the underlying storage
+ device, which is specified in the 'device' parameter.
+ Existing volumes will maintain their size if the
+ logicalsize parameter is not specified, or is smaller
+ than or identical to the current size. If the specified
+ size is larger than the current size, a growlogical
+ operation will be performed.
+ type: str
+ deduplication:
+ description:
+ - Configures whether deduplication is enabled. The
+ default for a created volume is 'enabled'. Existing
+ volumes will maintain their previously configured
+ setting unless a different value is specified in the
+ playbook.
+ type: str
+ choices: [ disabled, enabled ]
+ compression:
+ description:
+ - Configures whether compression is enabled. The default
+ for a created volume is 'enabled'. Existing volumes
+ will maintain their previously configured setting unless
+ a different value is specified in the playbook.
+ type: str
+ choices: [ disabled, enabled ]
+ blockmapcachesize:
+ description:
+ - The amount of memory allocated for caching block map
+ pages, in megabytes (or may be issued with an LVM-style
+ suffix of K, M, G, or T). The default (and minimum)
+ value is 128M. The value specifies the size of the
+ cache; there is a 15% memory usage overhead. Each 1.25G
+ of block map covers 1T of logical blocks, therefore a
+ small amount of block map cache memory can cache a
+ significantly large amount of block map data. Existing
+ volumes will maintain their previously configured
+ setting unless a different value is specified in the
+ playbook.
+ type: str
+ readcache:
+ description:
+ - Enables or disables the read cache. The default is
+ 'disabled'. Choosing 'enabled' enables a read cache
+ which may improve performance for workloads of high
+ deduplication, read workloads with a high level of
+ compression, or on hard disk storage. Existing
+ volumes will maintain their previously configured
+ setting unless a different value is specified in the
+ playbook.
+ - The read cache feature is available in VDO 6.1 and older.
+ type: str
+ choices: [ disabled, enabled ]
+ readcachesize:
+ description:
+ - Specifies the extra VDO device read cache size in
+ megabytes. This is in addition to a system-defined
+ minimum. Using a value with a suffix of K, M, G, or T
+ is optional. The default value is 0. 1.125 MB of
+ memory per bio thread will be used per 1 MB of read
+ cache specified (for example, a VDO volume configured
+ with 4 bio threads will have a read cache memory usage
+ overhead of 4.5 MB per 1 MB of read cache specified).
+ Existing volumes will maintain their previously
+ configured setting unless a different value is specified
+ in the playbook.
+ - The read cache feature is available in VDO 6.1 and older.
+ type: str
+ emulate512:
+ description:
+ - Enables 512-byte emulation mode, allowing drivers or
+ filesystems to access the VDO volume at 512-byte
+ granularity, instead of the default 4096-byte granularity.
+ Default is 'disabled'; only recommended when a driver
+ or filesystem requires 512-byte sector level access to
+ a device. This option is only available when creating
+ a new volume, and cannot be changed for an existing
+ volume.
+ type: bool
+ default: false
+ growphysical:
+ description:
+ - Specifies whether to attempt to execute a growphysical
+ operation, if there is enough unused space on the
+ device. A growphysical operation will be executed if
+ there is at least 64 GB of free space, relative to the
+ previous physical size of the affected VDO volume.
+ type: bool
+ default: false
+ slabsize:
+ description:
+ - The size of the increment by which the physical size of
+ a VDO volume is grown, in megabytes (or may be issued
+ with an LVM-style suffix of K, M, G, or T). Must be a
+ power of two between 128M and 32G. The default is 2G,
+ which supports volumes having a physical size up to 16T.
+ The maximum, 32G, supports a physical size of up to 256T.
+ This option is only available when creating a new
+ volume, and cannot be changed for an existing volume.
+ type: str
+ writepolicy:
+ description:
+ - Specifies the write policy of the VDO volume. The
+ 'sync' mode acknowledges writes only after data is on
+ stable storage. The 'async' mode acknowledges writes
+ when data has been cached for writing to stable
+ storage. The default (and highly recommended) 'auto'
+ mode checks the storage device to determine whether it
+ supports flushes. Devices that support flushes will
+ result in a VDO volume in 'async' mode, while devices
+ that do not support flushes will run in sync mode.
+ Existing volumes will maintain their previously
+ configured setting unless a different value is
+ specified in the playbook.
+ type: str
+ choices: [ async, auto, sync ]
+ indexmem:
+ description:
+ - Specifies the amount of index memory in gigabytes. The
+ default is 0.25. The special decimal values 0.25, 0.5,
+ and 0.75 can be used, as can any positive integer.
+ This option is only available when creating a new
+ volume, and cannot be changed for an existing volume.
+ type: str
+ indexmode:
+ description:
+ - Specifies the index mode of the Albireo index. The
+ default is 'dense', which has a deduplication window of
+ 1 GB of index memory per 1 TB of incoming data,
+ requiring 10 GB of index data on persistent storage.
+ The 'sparse' mode has a deduplication window of 1 GB of
+ index memory per 10 TB of incoming data, but requires
+ 100 GB of index data on persistent storage. This option
+ is only available when creating a new volume, and cannot
+ be changed for an existing volume.
+ type: str
+ choices: [ dense, sparse ]
+ ackthreads:
+ description:
+ - Specifies the number of threads to use for
+ acknowledging completion of requested VDO I/O operations.
+ Valid values are integer values from 1 to 100 (lower
+ numbers are preferable due to overhead). The default is
+ 1. Existing volumes will maintain their previously
+ configured setting unless a different value is specified
+ in the playbook.
+ type: str
+ biothreads:
+ description:
+ - Specifies the number of threads to use for submitting I/O
+ operations to the storage device. Valid values are
+ integer values from 1 to 100 (lower numbers are
+ preferable due to overhead). The default is 4.
+ Existing volumes will maintain their previously
+ configured setting unless a different value is specified
+ in the playbook.
+ type: str
+ cputhreads:
+ description:
+ - Specifies the number of threads to use for CPU-intensive
+ work such as hashing or compression. Valid values are
+ integer values from 1 to 100 (lower numbers are
+ preferable due to overhead). The default is 2.
+ Existing volumes will maintain their previously
+ configured setting unless a different value is specified
+ in the playbook.
+ type: str
+ logicalthreads:
+ description:
+ - Specifies the number of threads across which to
+ subdivide parts of the VDO processing based on logical
+ block addresses. Valid values are integer values from
+ 1 to 100 (lower numbers are preferable due to overhead).
+ The default is 1. Existing volumes will maintain their
+ previously configured setting unless a different value
+ is specified in the playbook.
+ type: str
+ physicalthreads:
+ description:
+ - Specifies the number of threads across which to
+ subdivide parts of the VDO processing based on physical
+ block addresses. Valid values are integer values from
+ 1 to 16 (lower numbers are preferable due to overhead).
+ The physical space used by the VDO volume must be
+ larger than (slabsize * physicalthreads). The default
+ is 1. Existing volumes will maintain their previously
+ configured setting unless a different value is specified
+ in the playbook.
+ type: str
+notes:
+ - In general, the default thread configuration should be used.
+requirements:
+ - PyYAML
+ - kmod-kvdo
+ - vdo
+'''
+
+EXAMPLES = r'''
+- name: Create 2 TB VDO volume vdo1 on device /dev/md0
+ community.general.vdo:
+ name: vdo1
+ state: present
+ device: /dev/md0
+ logicalsize: 2T
+
+- name: Remove VDO volume vdo1
+ community.general.vdo:
+ name: vdo1
+ state: absent
+'''
+
+RETURN = r'''# '''
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+import re
+import traceback
+
+YAML_IMP_ERR = None
+try:
+ import yaml
+ HAS_YAML = True
+except ImportError:
+ YAML_IMP_ERR = traceback.format_exc()
+ HAS_YAML = False
+
+
+# Generate a list of VDO volumes, whether they are running or stopped.
+#
+# @param module The AnsibleModule object.
+# @param vdocmd The path of the 'vdo' command.
+#
+# @return vdolist A list of currently created VDO volumes.
+def inventory_vdos(module, vdocmd):
+ rc, vdostatusout, err = module.run_command("%s status" % (vdocmd))
+
+ # if rc != 0:
+ # module.fail_json(msg="Inventorying VDOs failed: %s"
+ # % vdostatusout, rc=rc, err=err)
+
+ vdolist = []
+
+ if (rc == 2 and
+ re.findall(r"vdoconf.yml does not exist", err, re.MULTILINE)):
+ # If there is no /etc/vdoconf.yml file, assume there are no
+ # VDO volumes. Return an empty list of VDO volumes.
+ return vdolist
+
+ if rc != 0:
+ module.fail_json(msg="Inventorying VDOs failed: %s"
+ % vdostatusout, rc=rc, err=err)
+
+ vdostatusyaml = yaml.load(vdostatusout)
+ if vdostatusyaml is None:
+ return vdolist
+
+ vdoyamls = vdostatusyaml['VDOs']
+
+ if vdoyamls is not None:
+ vdolist = vdoyamls.keys()
+
+ return vdolist
+
+
+def list_running_vdos(module, vdocmd):
+ rc, vdolistout, err = module.run_command("%s list" % (vdocmd))
+ runningvdolist = filter(None, vdolistout.split('\n'))
+ return runningvdolist
+
+
+# Generate a string containing options to pass to the 'VDO' command.
+# Note that a 'create' operation will pass more options than a
+# 'modify' operation.
+#
+# @param params A dictionary of parameters, and their values
+# (values of 'None' and/or nonexistent values are ignored).
+#
+# @return vdocmdoptions A string to be used in a 'vdo <action>' command.
+def start_vdo(module, vdoname, vdocmd):
+ rc, out, err = module.run_command("%s start --name=%s" % (vdocmd, vdoname))
+ if rc == 0:
+ module.log("started VDO volume %s" % vdoname)
+
+ return rc
+
+
+def stop_vdo(module, vdoname, vdocmd):
+ rc, out, err = module.run_command("%s stop --name=%s" % (vdocmd, vdoname))
+ if rc == 0:
+ module.log("stopped VDO volume %s" % vdoname)
+
+ return rc
+
+
+def activate_vdo(module, vdoname, vdocmd):
+ rc, out, err = module.run_command("%s activate --name=%s"
+ % (vdocmd, vdoname))
+ if rc == 0:
+ module.log("activated VDO volume %s" % vdoname)
+
+ return rc
+
+
+def deactivate_vdo(module, vdoname, vdocmd):
+ rc, out, err = module.run_command("%s deactivate --name=%s"
+ % (vdocmd, vdoname))
+ if rc == 0:
+ module.log("deactivated VDO volume %s" % vdoname)
+
+ return rc
+
+
+def add_vdooptions(params):
+ vdocmdoptions = ""
+ options = []
+
+ if ('logicalsize' in params) and (params['logicalsize'] is not None):
+ options.append("--vdoLogicalSize=" + params['logicalsize'])
+
+ if (('blockmapcachesize' in params) and
+ (params['blockmapcachesize'] is not None)):
+ options.append("--blockMapCacheSize=" + params['blockmapcachesize'])
+
+ if ('readcache' in params) and (params['readcache'] == 'enabled'):
+ options.append("--readCache=enabled")
+
+ if ('readcachesize' in params) and (params['readcachesize'] is not None):
+ options.append("--readCacheSize=" + params['readcachesize'])
+
+ if ('slabsize' in params) and (params['slabsize'] is not None):
+ options.append("--vdoSlabSize=" + params['slabsize'])
+
+ if ('emulate512' in params) and (params['emulate512']):
+ options.append("--emulate512=enabled")
+
+ if ('indexmem' in params) and (params['indexmem'] is not None):
+ options.append("--indexMem=" + params['indexmem'])
+
+ if ('indexmode' in params) and (params['indexmode'] == 'sparse'):
+ options.append("--sparseIndex=enabled")
+
+ # Entering an invalid thread config results in a cryptic
+ # 'Could not set up device mapper for %s' error from the 'vdo'
+ # command execution. The dmsetup module on the system will
+ # output a more helpful message, but one would have to log
+ # onto that system to read the error. For now, heed the thread
+ # limit warnings in the DOCUMENTATION section above.
+ if ('ackthreads' in params) and (params['ackthreads'] is not None):
+ options.append("--vdoAckThreads=" + params['ackthreads'])
+
+ if ('biothreads' in params) and (params['biothreads'] is not None):
+ options.append("--vdoBioThreads=" + params['biothreads'])
+
+ if ('cputhreads' in params) and (params['cputhreads'] is not None):
+ options.append("--vdoCpuThreads=" + params['cputhreads'])
+
+ if ('logicalthreads' in params) and (params['logicalthreads'] is not None):
+ options.append("--vdoLogicalThreads=" + params['logicalthreads'])
+
+ if (('physicalthreads' in params) and
+ (params['physicalthreads'] is not None)):
+ options.append("--vdoPhysicalThreads=" + params['physicalthreads'])
+
+ vdocmdoptions = ' '.join(options)
+ return vdocmdoptions
+
+
+def run_module():
+
+ # Define the available arguments/parameters that a user can pass to
+ # the module.
+ # Defaults for VDO parameters are None, in order to facilitate
+ # the detection of parameters passed from the playbook.
+ # Creation param defaults are determined by the creation section.
+
+ module_args = dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ activated=dict(type='bool'),
+ running=dict(type='bool'),
+ growphysical=dict(type='bool', default=False),
+ device=dict(type='str'),
+ logicalsize=dict(type='str'),
+ deduplication=dict(type='str', choices=['disabled', 'enabled']),
+ compression=dict(type='str', choices=['disabled', 'enabled']),
+ blockmapcachesize=dict(type='str'),
+ readcache=dict(type='str', choices=['disabled', 'enabled']),
+ readcachesize=dict(type='str'),
+ emulate512=dict(type='bool', default=False),
+ slabsize=dict(type='str'),
+ writepolicy=dict(type='str', choices=['async', 'auto', 'sync']),
+ indexmem=dict(type='str'),
+ indexmode=dict(type='str', choices=['dense', 'sparse']),
+ ackthreads=dict(type='str'),
+ biothreads=dict(type='str'),
+ cputhreads=dict(type='str'),
+ logicalthreads=dict(type='str'),
+ physicalthreads=dict(type='str')
+ )
+
+ # Seed the result dictionary in the object. There will be an
+ # 'invocation' dictionary added with 'module_args' (arguments
+ # given).
+ result = dict(
+ changed=False,
+ )
+
+ # the AnsibleModule object will be our abstraction working with Ansible
+ # this includes instantiation, a couple of common attr would be the
+ # args/params passed to the execution, as well as if the module
+ # supports check mode
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=False,
+ )
+
+ if not HAS_YAML:
+ module.fail_json(msg=missing_required_lib('PyYAML'), exception=YAML_IMP_ERR)
+
+ vdocmd = module.get_bin_path("vdo", required=True)
+ if not vdocmd:
+ module.fail_json(msg='VDO is not installed.', **result)
+
+ # Print a pre-run list of VDO volumes in the result object.
+ vdolist = inventory_vdos(module, vdocmd)
+
+ runningvdolist = list_running_vdos(module, vdocmd)
+
+ # Collect the name of the desired VDO volume, and its state. These will
+ # determine what to do.
+ desiredvdo = module.params['name']
+ state = module.params['state']
+
+ # Create a desired VDO volume that doesn't exist yet.
+ if (desiredvdo not in vdolist) and (state == 'present'):
+ device = module.params['device']
+ if device is None:
+ module.fail_json(msg="Creating a VDO volume requires specifying "
+ "a 'device' in the playbook.")
+
+ # Create a dictionary of the options from the AnsibleModule
+ # parameters, compile the vdo command options, and run "vdo create"
+ # with those options.
+ # Since this is a creation of a new VDO volume, it will contain all
+ # all of the parameters given by the playbook; the rest will
+ # assume default values.
+ options = module.params
+ vdocmdoptions = add_vdooptions(options)
+ rc, out, err = module.run_command("%s create --name=%s --device=%s %s"
+ % (vdocmd, desiredvdo, device,
+ vdocmdoptions))
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Creating VDO %s failed."
+ % desiredvdo, rc=rc, err=err)
+
+ if (module.params['compression'] == 'disabled'):
+ rc, out, err = module.run_command("%s disableCompression --name=%s"
+ % (vdocmd, desiredvdo))
+
+ if ((module.params['deduplication'] is not None) and
+ module.params['deduplication'] == 'disabled'):
+ rc, out, err = module.run_command("%s disableDeduplication "
+ "--name=%s"
+ % (vdocmd, desiredvdo))
+
+ if module.params['activated'] == 'no':
+ deactivate_vdo(module, desiredvdo, vdocmd)
+
+ if module.params['running'] == 'no':
+ stop_vdo(module, desiredvdo, vdocmd)
+
+ # Print a post-run list of VDO volumes in the result object.
+ vdolist = inventory_vdos(module, vdocmd)
+ module.log("created VDO volume %s" % desiredvdo)
+ module.exit_json(**result)
+
+ # Modify the current parameters of a VDO that exists.
+ if (desiredvdo in vdolist) and (state == 'present'):
+ rc, vdostatusoutput, err = module.run_command("%s status" % (vdocmd))
+ vdostatusyaml = yaml.load(vdostatusoutput)
+
+ # An empty dictionary to contain dictionaries of VDO statistics
+ processedvdos = {}
+
+ vdoyamls = vdostatusyaml['VDOs']
+ if vdoyamls is not None:
+ processedvdos = vdoyamls
+
+ # The 'vdo status' keys that are currently modifiable.
+ statusparamkeys = ['Acknowledgement threads',
+ 'Bio submission threads',
+ 'Block map cache size',
+ 'CPU-work threads',
+ 'Logical threads',
+ 'Physical threads',
+ 'Read cache',
+ 'Read cache size',
+ 'Configured write policy',
+ 'Compression',
+ 'Deduplication']
+
+ # A key translation table from 'vdo status' output to Ansible
+ # module parameters. This covers all of the 'vdo status'
+ # parameter keys that could be modified with the 'vdo'
+ # command.
+ vdokeytrans = {
+ 'Logical size': 'logicalsize',
+ 'Compression': 'compression',
+ 'Deduplication': 'deduplication',
+ 'Block map cache size': 'blockmapcachesize',
+ 'Read cache': 'readcache',
+ 'Read cache size': 'readcachesize',
+ 'Configured write policy': 'writepolicy',
+ 'Acknowledgement threads': 'ackthreads',
+ 'Bio submission threads': 'biothreads',
+ 'CPU-work threads': 'cputhreads',
+ 'Logical threads': 'logicalthreads',
+ 'Physical threads': 'physicalthreads'
+ }
+
+ # Build a dictionary of the current VDO status parameters, with
+ # the keys used by VDO. (These keys will be converted later.)
+ currentvdoparams = {}
+
+ # Build a "lookup table" dictionary containing a translation table
+ # of the parameters that can be modified
+ modtrans = {}
+
+ for statfield in statusparamkeys:
+ if statfield in processedvdos[desiredvdo]:
+ currentvdoparams[statfield] = processedvdos[desiredvdo][statfield]
+
+ modtrans[statfield] = vdokeytrans[statfield]
+
+ # Build a dictionary of current parameters formatted with the
+ # same keys as the AnsibleModule parameters.
+ currentparams = {}
+ for paramkey in modtrans.keys():
+ currentparams[modtrans[paramkey]] = modtrans[paramkey]
+
+ diffparams = {}
+
+ # Check for differences between the playbook parameters and the
+ # current parameters. This will need a comparison function;
+ # since AnsibleModule params are all strings, compare them as
+ # strings (but if it's None; skip).
+ for key in currentparams.keys():
+ if module.params[key] is not None:
+ if str(currentparams[key]) != module.params[key]:
+ diffparams[key] = module.params[key]
+
+ if diffparams:
+ vdocmdoptions = add_vdooptions(diffparams)
+ if vdocmdoptions:
+ rc, out, err = module.run_command("%s modify --name=%s %s"
+ % (vdocmd,
+ desiredvdo,
+ vdocmdoptions))
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Modifying VDO %s failed."
+ % desiredvdo, rc=rc, err=err)
+
+ if 'deduplication' in diffparams.keys():
+ dedupemod = diffparams['deduplication']
+ if dedupemod == 'disabled':
+ rc, out, err = module.run_command("%s "
+ "disableDeduplication "
+ "--name=%s"
+ % (vdocmd, desiredvdo))
+
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Changing deduplication on "
+ "VDO volume %s failed."
+ % desiredvdo, rc=rc, err=err)
+
+ if dedupemod == 'enabled':
+ rc, out, err = module.run_command("%s "
+ "enableDeduplication "
+ "--name=%s"
+ % (vdocmd, desiredvdo))
+
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Changing deduplication on "
+ "VDO volume %s failed."
+ % desiredvdo, rc=rc, err=err)
+
+ if 'compression' in diffparams.keys():
+ compressmod = diffparams['compression']
+ if compressmod == 'disabled':
+ rc, out, err = module.run_command("%s disableCompression "
+ "--name=%s"
+ % (vdocmd, desiredvdo))
+
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Changing compression on "
+ "VDO volume %s failed."
+ % desiredvdo, rc=rc, err=err)
+
+ if compressmod == 'enabled':
+ rc, out, err = module.run_command("%s enableCompression "
+ "--name=%s"
+ % (vdocmd, desiredvdo))
+
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Changing compression on "
+ "VDO volume %s failed."
+ % desiredvdo, rc=rc, err=err)
+
+ if 'writepolicy' in diffparams.keys():
+ writepolmod = diffparams['writepolicy']
+ if writepolmod == 'auto':
+ rc, out, err = module.run_command("%s "
+ "changeWritePolicy "
+ "--name=%s "
+ "--writePolicy=%s"
+ % (vdocmd,
+ desiredvdo,
+ writepolmod))
+
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Changing write policy on "
+ "VDO volume %s failed."
+ % desiredvdo, rc=rc, err=err)
+
+ if writepolmod == 'sync':
+ rc, out, err = module.run_command("%s "
+ "changeWritePolicy "
+ "--name=%s "
+ "--writePolicy=%s"
+ % (vdocmd,
+ desiredvdo,
+ writepolmod))
+
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Changing write policy on "
+ "VDO volume %s failed."
+ % desiredvdo, rc=rc, err=err)
+
+ if writepolmod == 'async':
+ rc, out, err = module.run_command("%s "
+ "changeWritePolicy "
+ "--name=%s "
+ "--writePolicy=%s"
+ % (vdocmd,
+ desiredvdo,
+ writepolmod))
+
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Changing write policy on "
+ "VDO volume %s failed."
+ % desiredvdo, rc=rc, err=err)
+
+ # Process the size parameters, to determine of a growPhysical or
+ # growLogical operation needs to occur.
+ sizeparamkeys = ['Logical size', ]
+
+ currentsizeparams = {}
+ sizetrans = {}
+ for statfield in sizeparamkeys:
+ currentsizeparams[statfield] = processedvdos[desiredvdo][statfield]
+ sizetrans[statfield] = vdokeytrans[statfield]
+
+ sizeparams = {}
+ for paramkey in currentsizeparams.keys():
+ sizeparams[sizetrans[paramkey]] = currentsizeparams[paramkey]
+
+ diffsizeparams = {}
+ for key in sizeparams.keys():
+ if module.params[key] is not None:
+ if str(sizeparams[key]) != module.params[key]:
+ diffsizeparams[key] = module.params[key]
+
+ if module.params['growphysical']:
+ physdevice = module.params['device']
+ rc, devsectors, err = module.run_command("blockdev --getsz %s"
+ % (physdevice))
+ devblocks = (int(devsectors) / 8)
+ dmvdoname = ('/dev/mapper/' + desiredvdo)
+ currentvdostats = (processedvdos[desiredvdo]
+ ['VDO statistics']
+ [dmvdoname])
+ currentphysblocks = currentvdostats['physical blocks']
+
+ # Set a growPhysical threshold to grow only when there is
+ # guaranteed to be more than 2 slabs worth of unallocated
+ # space on the device to use. For now, set to device
+ # size + 64 GB, since 32 GB is the largest possible
+ # slab size.
+ growthresh = devblocks + 16777216
+
+ if currentphysblocks > growthresh:
+ result['changed'] = True
+ rc, out, err = module.run_command("%s growPhysical --name=%s"
+ % (vdocmd, desiredvdo))
+
+ if 'logicalsize' in diffsizeparams.keys():
+ result['changed'] = True
+ vdocmdoptions = ("--vdoLogicalSize=" +
+ diffsizeparams['logicalsize'])
+ rc, out, err = module.run_command("%s growLogical --name=%s %s"
+ % (vdocmd,
+ desiredvdo,
+ vdocmdoptions))
+
+ vdoactivatestatus = processedvdos[desiredvdo]['Activate']
+
+ if ((module.params['activated'] == 'no') and
+ (vdoactivatestatus == 'enabled')):
+ deactivate_vdo(module, desiredvdo, vdocmd)
+ if not result['changed']:
+ result['changed'] = True
+
+ if ((module.params['activated'] == 'yes') and
+ (vdoactivatestatus == 'disabled')):
+ activate_vdo(module, desiredvdo, vdocmd)
+ if not result['changed']:
+ result['changed'] = True
+
+ if ((module.params['running'] == 'no') and
+ (desiredvdo in runningvdolist)):
+ stop_vdo(module, desiredvdo, vdocmd)
+ if not result['changed']:
+ result['changed'] = True
+
+ # Note that a disabled VDO volume cannot be started by the
+ # 'vdo start' command, by design. To accurately track changed
+ # status, don't try to start a disabled VDO volume.
+ # If the playbook contains 'activated: yes', assume that
+ # the activate_vdo() operation succeeded, as 'vdoactivatestatus'
+ # will have the activated status prior to the activate_vdo()
+ # call.
+ if (((vdoactivatestatus == 'enabled') or
+ (module.params['activated'] == 'yes')) and
+ (module.params['running'] == 'yes') and
+ (desiredvdo not in runningvdolist)):
+ start_vdo(module, desiredvdo, vdocmd)
+ if not result['changed']:
+ result['changed'] = True
+
+ # Print a post-run list of VDO volumes in the result object.
+ vdolist = inventory_vdos(module, vdocmd)
+ if diffparams:
+ module.log("modified parameters of VDO volume %s" % desiredvdo)
+
+ module.exit_json(**result)
+
+ # Remove a desired VDO that currently exists.
+ if (desiredvdo in vdolist) and (state == 'absent'):
+ rc, out, err = module.run_command("%s remove --name=%s"
+ % (vdocmd, desiredvdo))
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Removing VDO %s failed."
+ % desiredvdo, rc=rc, err=err)
+
+ # Print a post-run list of VDO volumes in the result object.
+ vdolist = inventory_vdos(module, vdocmd)
+ module.log("removed VDO volume %s" % desiredvdo)
+ module.exit_json(**result)
+
+ # fall through
+ # The state for the desired VDO volume was absent, and it does
+ # not exist. Print a post-run list of VDO volumes in the result
+ # object.
+ vdolist = inventory_vdos(module, vdocmd)
+ module.log("received request to remove non-existent VDO volume %s"
+ % desiredvdo)
+
+ module.exit_json(**result)
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/xfconf.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/xfconf.py
new file mode 100644
index 00000000..8d0700ae
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/xfconf.py
@@ -0,0 +1,279 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# (c) 2017, Joseph Benden <joe@benden.us>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: xfconf
+author:
+ - "Joseph Benden (@jbenden)"
+ - "Alexei Znamensky (@russoz)"
+short_description: Edit XFCE4 Configurations
+description:
+ - This module allows for the manipulation of Xfce 4 Configuration via
+ xfconf-query. Please see the xfconf-query(1) man pages for more details.
+options:
+ channel:
+ description:
+ - A Xfconf preference channel is a top-level tree key, inside of the
+ Xfconf repository that corresponds to the location for which all
+ application properties/keys are stored. See man xfconf-query(1)
+ required: yes
+ type: str
+ property:
+ description:
+ - A Xfce preference key is an element in the Xfconf repository
+ that corresponds to an application preference. See man xfconf-query(1)
+ required: yes
+ type: str
+ value:
+ description:
+ - Preference properties typically have simple values such as strings,
+ integers, or lists of strings and integers. This is ignored if the state
+ is "get". For array mode, use a list of values. See man xfconf-query(1)
+ type: list
+ elements: raw
+ value_type:
+ description:
+ - The type of value being set. This is ignored if the state is "get".
+ For array mode, use a list of types.
+ type: list
+ elements: str
+ choices: [ int, uint, bool, float, double, string ]
+ state:
+ type: str
+ description:
+ - The action to take upon the property/value.
+ choices: [ get, present, absent ]
+ default: "present"
+ force_array:
+ description:
+ - Force array even if only one element
+ type: bool
+ default: 'no'
+ aliases: ['array']
+ version_added: 1.0.0
+'''
+
+EXAMPLES = """
+- name: Change the DPI to "192"
+ xfconf:
+ channel: "xsettings"
+ property: "/Xft/DPI"
+ value_type: "int"
+ value: "192"
+
+- name: Set workspace names (4)
+ xfconf:
+ channel: xfwm4
+ property: /general/workspace_names
+ value_type: string
+ value: ['Main', 'Work1', 'Work2', 'Tmp']
+
+- name: Set workspace names (1)
+ xfconf:
+ channel: xfwm4
+ property: /general/workspace_names
+ value_type: string
+ value: ['Main']
+ force_array: yes
+"""
+
+RETURN = '''
+ channel:
+ description: The channel specified in the module parameters
+ returned: success
+ type: str
+ sample: "xsettings"
+ property:
+ description: The property specified in the module parameters
+ returned: success
+ type: str
+ sample: "/Xft/DPI"
+ value_type:
+ description:
+ - The type of the value that was changed (C(none) for C(get) and C(reset)
+ state). Either a single string value or a list of strings for array
+ types.
+ returned: success
+ type: string or list of strings
+ sample: '"int" or ["str", "str", "str"]'
+ value:
+ description:
+ - The value of the preference key after executing the module. Either a
+ single string value or a list of strings for array types.
+ returned: success
+ type: string or list of strings
+ sample: '"192" or ["orange", "yellow", "violet"]'
+ previous_value:
+ description:
+ - The value of the preference key before executing the module (C(none) for
+ C(get) state). Either a single string value or a list of strings for array
+ types.
+ returned: success
+ type: string or list of strings
+ sample: '"96" or ["red", "blue", "green"]'
+'''
+
+from ansible_collections.community.general.plugins.module_utils.module_helper import (
+ ModuleHelper, CmdMixin, StateMixin, ArgFormat
+)
+
+
+def fix_bool(value):
+ vl = value.lower()
+ return vl if vl in ("true", "false") else value
+
+
+@ArgFormat.stars_deco(1)
+def values_fmt(values, value_types):
+ result = []
+ for value, value_type in zip(values, value_types):
+ if value_type == 'bool':
+ value = fix_bool(value)
+ result.append('--type')
+ result.append('{0}'.format(value_type))
+ result.append('--set')
+ result.append('{0}'.format(value))
+ return result
+
+
+class XFConfException(Exception):
+ pass
+
+
+class XFConfProperty(CmdMixin, StateMixin, ModuleHelper):
+ module = dict(
+ argument_spec=dict(
+ state=dict(default="present",
+ choices=("present", "get", "absent"),
+ type='str'),
+ channel=dict(required=True, type='str'),
+ property=dict(required=True, type='str'),
+ value_type=dict(required=False, type='list',
+ elements='str', choices=('int', 'uint', 'bool', 'float', 'double', 'string')),
+ value=dict(required=False, type='list', elements='raw'),
+ force_array=dict(default=False, type='bool', aliases=['array']),
+ ),
+ required_if=[('state', 'present', ['value', 'value_type'])],
+ required_together=[('value', 'value_type')],
+ supports_check_mode=True,
+ )
+
+ facts_name = "xfconf"
+ default_state = 'present'
+ command = 'xfconf-query'
+ command_args_formats = dict(
+ channel=dict(fmt=('--channel', '{0}'),),
+ property=dict(fmt=('--property', '{0}'),),
+ is_array=dict(fmt="--force-array", style=ArgFormat.BOOLEAN),
+ reset=dict(fmt="--reset", style=ArgFormat.BOOLEAN),
+ create=dict(fmt="--create", style=ArgFormat.BOOLEAN),
+ values_and_types=dict(fmt=values_fmt)
+ )
+
+ def update_xfconf_output(self, **kwargs):
+ self.update_output(**kwargs)
+ self.update_facts(**kwargs)
+
+ def __init_module__(self):
+ self.does_not = 'Property "{0}" does not exist on channel "{1}".'.format(self.module.params['property'],
+ self.module.params['channel'])
+ self.vars.previous_value = self._get()
+ self.update_xfconf_output(property=self.module.params['property'],
+ channel=self.module.params['channel'],
+ previous_value=None)
+
+ def process_command_output(self, rc, out, err):
+ if err.rstrip() == self.does_not:
+ return None
+ if rc or len(err):
+ raise XFConfException('xfconf-query failed with error (rc={0}): {1}'.format(rc, err))
+
+ result = out.rstrip()
+ if "Value is an array with" in result:
+ result = result.split("\n")
+ result.pop(0)
+ result.pop(0)
+
+ return result
+
+ @property
+ def changed(self):
+ if self.vars.previous_value is None:
+ return self.vars.value is not None
+ elif self.vars.value is None:
+ return self.vars.previous_value is not None
+ else:
+ return set(self.vars.previous_value) != set(self.vars.value)
+
+ def _get(self):
+ return self.run_command(params=('channel', 'property'))
+
+ def state_get(self):
+ self.vars.value = self.vars.previous_value
+ self.update_xfconf_output(value=self.vars.value)
+
+ def state_absent(self):
+ self.vars.value = None
+ self.run_command(params=('channel', 'property', 'reset'), extra_params={"reset": True})
+ self.update_xfconf_output(previous_value=self.vars.previous_value,
+ value=None)
+
+ def state_present(self):
+ # stringify all values - in the CLI they will all be happy strings anyway
+ # and by doing this here the rest of the code can be agnostic to it
+ self.vars.value = [str(v) for v in self.module.params['value']]
+ value_type = self.module.params['value_type']
+
+ values_len = len(self.vars.value)
+ types_len = len(value_type)
+
+ if types_len == 1:
+ # use one single type for the entire list
+ value_type = value_type * values_len
+ elif types_len != values_len:
+ # or complain if lists' lengths are different
+ raise XFConfException('Number of elements in "value" and "value_type" must be the same')
+
+ # fix boolean values
+ self.vars.value = [fix_bool(v[0]) if v[1] == 'bool' else v[0] for v in zip(self.vars.value, value_type)]
+
+ # calculates if it is an array
+ self.vars.is_array = \
+ bool(self.module.params['force_array']) or \
+ isinstance(self.vars.previous_value, list) or \
+ values_len > 1
+
+ params = ['channel', 'property', 'create']
+ if self.vars.is_array:
+ params.append('is_array')
+ params.append('values_and_types')
+
+ extra_params = dict(values_and_types=(self.vars.value, value_type))
+ extra_params['create'] = True
+ extra_params['is_array'] = self.vars.is_array
+
+ if not self.module.check_mode:
+ self.run_command(params=params, extra_params=extra_params)
+
+ if not self.vars.is_array:
+ self.vars.value = self.vars.value[0]
+ value_type = value_type[0]
+
+ self.update_xfconf_output(previous_value=self.vars.previous_value,
+ value=self.vars.value,
+ type=value_type)
+
+
+def main():
+ xfconf = XFConfProperty()
+ xfconf.run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/xfs_quota.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/xfs_quota.py
new file mode 100644
index 00000000..907f1bae
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/system/xfs_quota.py
@@ -0,0 +1,426 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Emmanouil Kampitakis <info@kampitakis.de>
+# Copyright: (c) 2018, William Leemans <willie@elaba.net>
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: xfs_quota
+short_description: Manage quotas on XFS filesystems
+description:
+ - Configure quotas on XFS filesystems.
+ - Before using this module /etc/projects and /etc/projid need to be configured.
+author:
+- William Leemans (@bushvin)
+options:
+ type:
+ description:
+ - The XFS quota type.
+ type: str
+ required: true
+ choices:
+ - user
+ - group
+ - project
+ name:
+ description:
+ - The name of the user, group or project to apply the quota to, if other than default.
+ type: str
+ mountpoint:
+ description:
+ - The mount point on which to apply the quotas.
+ type: str
+ required: true
+ bhard:
+ description:
+ - Hard blocks quota limit.
+ - This argument supports human readable sizes.
+ type: str
+ bsoft:
+ description:
+ - Soft blocks quota limit.
+ - This argument supports human readable sizes.
+ type: str
+ ihard:
+ description:
+ - Hard inodes quota limit.
+ type: int
+ isoft:
+ description:
+ - Soft inodes quota limit.
+ type: int
+ rtbhard:
+ description:
+ - Hard realtime blocks quota limit.
+ - This argument supports human readable sizes.
+ type: str
+ rtbsoft:
+ description:
+ - Soft realtime blocks quota limit.
+ - This argument supports human readable sizes.
+ type: str
+ state:
+ description:
+ - Whether to apply the limits or remove them.
+ - When removing limit, they are set to 0, and not quite removed.
+ type: str
+ default: present
+ choices:
+ - present
+ - absent
+
+requirements:
+ - xfsprogs
+'''
+
+EXAMPLES = r'''
+- name: Set default project soft and hard limit on /opt of 1g
+ community.general.xfs_quota:
+ type: project
+ mountpoint: /opt
+ bsoft: 1g
+ bhard: 1g
+ state: present
+
+- name: Remove the default limits on /opt
+ community.general.xfs_quota:
+ type: project
+ mountpoint: /opt
+ state: absent
+
+- name: Set default soft user inode limits on /home of 1024 inodes and hard of 2048
+ community.general.xfs_quota:
+ type: user
+ mountpoint: /home
+ isoft: 1024
+ ihard: 2048
+
+'''
+
+RETURN = r'''
+bhard:
+ description: the current bhard setting in bytes
+ returned: always
+ type: int
+ sample: 1024
+bsoft:
+ description: the current bsoft setting in bytes
+ returned: always
+ type: int
+ sample: 1024
+ihard:
+ description: the current ihard setting in bytes
+ returned: always
+ type: int
+ sample: 100
+isoft:
+ description: the current isoft setting in bytes
+ returned: always
+ type: int
+ sample: 100
+rtbhard:
+ description: the current rtbhard setting in bytes
+ returned: always
+ type: int
+ sample: 1024
+rtbsoft:
+ description: the current rtbsoft setting in bytes
+ returned: always
+ type: int
+ sample: 1024
+'''
+
+import grp
+import os
+import pwd
+
+from ansible.module_utils.basic import AnsibleModule, human_to_bytes
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ bhard=dict(type='str'),
+ bsoft=dict(type='str'),
+ ihard=dict(type='int'),
+ isoft=dict(type='int'),
+ mountpoint=dict(type='str', required=True),
+ name=dict(type='str'),
+ rtbhard=dict(type='str'),
+ rtbsoft=dict(type='str'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ type=dict(type='str', required=True, choices=['group', 'project', 'user'])
+ ),
+ supports_check_mode=True,
+ )
+
+ quota_type = module.params['type']
+ name = module.params['name']
+ mountpoint = module.params['mountpoint']
+ bhard = module.params['bhard']
+ bsoft = module.params['bsoft']
+ ihard = module.params['ihard']
+ isoft = module.params['isoft']
+ rtbhard = module.params['rtbhard']
+ rtbsoft = module.params['rtbsoft']
+ state = module.params['state']
+
+ if bhard is not None:
+ bhard = human_to_bytes(bhard)
+
+ if bsoft is not None:
+ bsoft = human_to_bytes(bsoft)
+
+ if rtbhard is not None:
+ rtbhard = human_to_bytes(rtbhard)
+
+ if rtbsoft is not None:
+ rtbsoft = human_to_bytes(rtbsoft)
+
+ result = dict(
+ changed=False,
+ )
+
+ if not os.path.ismount(mountpoint):
+ module.fail_json(msg="Path '%s' is not a mount point" % mountpoint, **result)
+
+ mp = get_fs_by_mountpoint(mountpoint)
+ if mp is None:
+ module.fail_json(msg="Path '%s' is not a mount point or not located on an xfs file system." % mountpoint, **result)
+
+ if quota_type == 'user':
+ type_arg = '-u'
+ quota_default = 'root'
+ if name is None:
+ name = quota_default
+
+ if 'uquota' not in mp['mntopts'] and 'usrquota' not in mp['mntopts'] and 'quota' not in mp['mntopts'] and 'uqnoenforce' not in mp['mntopts'] and \
+ 'qnoenforce' not in mp['mntopts']:
+ module.fail_json(
+ msg="Path '%s' is not mounted with the uquota/usrquota/quota/uqnoenforce/qnoenforce option." % mountpoint, **result
+ )
+ try:
+ pwd.getpwnam(name)
+ except KeyError as e:
+ module.fail_json(msg="User '%s' does not exist." % name, **result)
+
+ elif quota_type == 'group':
+ type_arg = '-g'
+ quota_default = 'root'
+ if name is None:
+ name = quota_default
+
+ if 'gquota' not in mp['mntopts'] and 'grpquota' not in mp['mntopts'] and 'gqnoenforce' not in mp['mntopts']:
+ module.fail_json(
+ msg="Path '%s' is not mounted with the gquota/grpquota/gqnoenforce option. (current options: %s)" % (mountpoint, mp['mntopts']), **result
+ )
+ try:
+ grp.getgrnam(name)
+ except KeyError as e:
+ module.fail_json(msg="User '%s' does not exist." % name, **result)
+
+ elif quota_type == 'project':
+ type_arg = '-p'
+ quota_default = '#0'
+ if name is None:
+ name = quota_default
+
+ if 'pquota' not in mp['mntopts'] and 'prjquota' not in mp['mntopts'] and 'pqnoenforce' not in mp['mntopts']:
+ module.fail_json(msg="Path '%s' is not mounted with the pquota/prjquota/pqnoenforce option." % mountpoint, **result)
+
+ if name != quota_default and not os.path.isfile('/etc/projects'):
+ module.fail_json(msg="Path '/etc/projects' does not exist.", **result)
+
+ if name != quota_default and not os.path.isfile('/etc/projid'):
+ module.fail_json(msg="Path '/etc/projid' does not exist.", **result)
+
+ if name != quota_default and name is not None and get_project_id(name) is None:
+ module.fail_json(msg="Entry '%s' has not been defined in /etc/projid." % name, **result)
+
+ prj_set = True
+ if name != quota_default:
+ cmd = 'project %s' % name
+ rc, stdout, stderr = exec_quota(module, cmd, mountpoint)
+ if rc != 0:
+ result['cmd'] = cmd
+ result['rc'] = rc
+ result['stdout'] = stdout
+ result['stderr'] = stderr
+ module.fail_json(msg='Could not get project state.', **result)
+ else:
+ for line in stdout.split('\n'):
+ if "Project Id '%s' - is not set." in line:
+ prj_set = False
+ break
+
+ if not prj_set and not module.check_mode:
+ cmd = 'project -s'
+ rc, stdout, stderr = exec_quota(module, cmd, mountpoint)
+ if rc != 0:
+ result['cmd'] = cmd
+ result['rc'] = rc
+ result['stdout'] = stdout
+ result['stderr'] = stderr
+ module.fail_json(msg='Could not get quota realtime block report.', **result)
+
+ result['changed'] = True
+
+ elif not prj_set and module.check_mode:
+ result['changed'] = True
+
+ # Set limits
+ if state == 'absent':
+ bhard = 0
+ bsoft = 0
+ ihard = 0
+ isoft = 0
+ rtbhard = 0
+ rtbsoft = 0
+
+ current_bsoft, current_bhard = quota_report(module, mountpoint, name, quota_type, 'b')
+ current_isoft, current_ihard = quota_report(module, mountpoint, name, quota_type, 'i')
+ current_rtbsoft, current_rtbhard = quota_report(module, mountpoint, name, quota_type, 'rtb')
+
+ result['xfs_quota'] = dict(
+ bsoft=current_bsoft,
+ bhard=current_bhard,
+ isoft=current_isoft,
+ ihard=current_ihard,
+ rtbsoft=current_rtbsoft,
+ rtbhard=current_rtbhard
+ )
+
+ limit = []
+ if bsoft is not None and int(bsoft) != current_bsoft:
+ limit.append('bsoft=%s' % bsoft)
+ result['bsoft'] = int(bsoft)
+
+ if bhard is not None and int(bhard) != current_bhard:
+ limit.append('bhard=%s' % bhard)
+ result['bhard'] = int(bhard)
+
+ if isoft is not None and isoft != current_isoft:
+ limit.append('isoft=%s' % isoft)
+ result['isoft'] = isoft
+
+ if ihard is not None and ihard != current_ihard:
+ limit.append('ihard=%s' % ihard)
+ result['ihard'] = ihard
+
+ if rtbsoft is not None and int(rtbsoft) != current_rtbsoft:
+ limit.append('rtbsoft=%s' % rtbsoft)
+ result['rtbsoft'] = int(rtbsoft)
+
+ if rtbhard is not None and int(rtbhard) != current_rtbhard:
+ limit.append('rtbhard=%s' % rtbhard)
+ result['rtbhard'] = int(rtbhard)
+
+ if len(limit) > 0 and not module.check_mode:
+ if name == quota_default:
+ cmd = 'limit %s -d %s' % (type_arg, ' '.join(limit))
+ else:
+ cmd = 'limit %s %s %s' % (type_arg, ' '.join(limit), name)
+
+ rc, stdout, stderr = exec_quota(module, cmd, mountpoint)
+ if rc != 0:
+ result['cmd'] = cmd
+ result['rc'] = rc
+ result['stdout'] = stdout
+ result['stderr'] = stderr
+ module.fail_json(msg='Could not set limits.', **result)
+
+ result['changed'] = True
+
+ elif len(limit) > 0 and module.check_mode:
+ result['changed'] = True
+
+ module.exit_json(**result)
+
+
+def quota_report(module, mountpoint, name, quota_type, used_type):
+ soft = None
+ hard = None
+
+ if quota_type == 'project':
+ type_arg = '-p'
+ elif quota_type == 'user':
+ type_arg = '-u'
+ elif quota_type == 'group':
+ type_arg = '-g'
+
+ if used_type == 'b':
+ used_arg = '-b'
+ used_name = 'blocks'
+ factor = 1024
+ elif used_type == 'i':
+ used_arg = '-i'
+ used_name = 'inodes'
+ factor = 1
+ elif used_type == 'rtb':
+ used_arg = '-r'
+ used_name = 'realtime blocks'
+ factor = 1024
+
+ rc, stdout, stderr = exec_quota(module, 'report %s %s' % (type_arg, used_arg), mountpoint)
+
+ if rc != 0:
+ result = dict(
+ changed=False,
+ rc=rc,
+ stdout=stdout,
+ stderr=stderr,
+ )
+ module.fail_json(msg='Could not get quota report for %s.' % used_name, **result)
+
+ for line in stdout.split('\n'):
+ line = line.strip().split()
+ if len(line) > 3 and line[0] == name:
+ soft = int(line[2]) * factor
+ hard = int(line[3]) * factor
+ break
+
+ return soft, hard
+
+
+def exec_quota(module, cmd, mountpoint):
+ cmd = ['xfs_quota', '-x', '-c'] + [cmd, mountpoint]
+ (rc, stdout, stderr) = module.run_command(cmd, use_unsafe_shell=True)
+ if "XFS_GETQUOTA: Operation not permitted" in stderr.split('\n') or \
+ rc == 1 and 'xfs_quota: cannot set limits: Operation not permitted' in stderr.split('\n'):
+ module.fail_json(msg='You need to be root or have CAP_SYS_ADMIN capability to perform this operation')
+
+ return rc, stdout, stderr
+
+
+def get_fs_by_mountpoint(mountpoint):
+ mpr = None
+ with open('/proc/mounts', 'r') as s:
+ for line in s.readlines():
+ mp = line.strip().split()
+ if len(mp) == 6 and mp[1] == mountpoint and mp[2] == 'xfs':
+ mpr = dict(zip(['spec', 'file', 'vfstype', 'mntopts', 'freq', 'passno'], mp))
+ mpr['mntopts'] = mpr['mntopts'].split(',')
+ break
+ return mpr
+
+
+def get_project_id(name):
+ prjid = None
+ with open('/etc/projid', 'r') as s:
+ for line in s.readlines():
+ line = line.strip().partition(':')
+ if line[0] == name:
+ prjid = line[2]
+ break
+
+ return prjid
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/sysupgrade.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sysupgrade.py
new file mode 100644
index 00000000..a1956129
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/sysupgrade.py
@@ -0,0 +1,152 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2020, Andrew Klaus <andrewklaus@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: sysupgrade
+short_description: Manage OpenBSD system upgrades
+version_added: 1.1.0
+description:
+ - Manage OpenBSD system upgrades using sysupgrade.
+options:
+ snapshot:
+ description:
+ - Apply the latest snapshot.
+ - Otherwise release will be applied.
+ default: no
+ type: bool
+ force:
+ description:
+ - Force upgrade (for snapshots only).
+ default: no
+ type: bool
+ keep_files:
+ description:
+ - Keep the files under /home/_sysupgrade.
+ - By default, the files will be deleted after the upgrade.
+ default: no
+ type: bool
+ fetch_only:
+ description:
+ - Fetch and verify files and create /bsd.upgrade but do not reboot.
+ - Set to C(false) if you want sysupgrade to reboot. This will cause Ansible to error, as it expects the module to exit gracefully. See the examples.
+ default: yes
+ type: bool
+ installurl:
+ description:
+ - OpenBSD mirror top-level URL for fetching an upgrade.
+ - By default, the mirror URL is pulled from /etc/installurl.
+ type: str
+author:
+ - Andrew Klaus (@precurse)
+'''
+
+EXAMPLES = r'''
+- name: Upgrade to latest release
+ community.general.sysupgrade:
+ register: sysupgrade
+
+- name: Upgrade to latest snapshot
+ community.general.sysupgrade:
+ snapshot: yes
+ installurl: https://cloudflare.cdn.openbsd.org/pub/OpenBSD
+ register: sysupgrade
+
+- name: Reboot to apply upgrade if needed
+ ansible.builtin.reboot:
+ when: sysupgrade.changed
+
+# Note: Ansible will error when running this way due to how
+# the reboot is forcefully handled by sysupgrade:
+
+- name: Have sysupgrade automatically reboot
+ community.general.sysupgrade:
+ fetch_only: no
+ ignore_errors: yes
+'''
+
+RETURN = r'''
+rc:
+ description: The command return code (0 means success).
+ returned: always
+ type: int
+stdout:
+ description: Sysupgrade standard output.
+ returned: always
+ type: str
+stderr:
+ description: Sysupgrade standard error.
+ returned: always
+ type: str
+ sample: "sysupgrade: need root privileges"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def sysupgrade_run(module):
+ sysupgrade_bin = module.get_bin_path('/usr/sbin/sysupgrade', required=True)
+ cmd = [sysupgrade_bin]
+ changed = False
+ warnings = []
+
+ # Setup command flags
+ if module.params['snapshot']:
+ run_flag = ['-s']
+ if module.params['force']:
+ # Force only applies to snapshots
+ run_flag.append('-f')
+ else:
+ # release flag
+ run_flag = ['-r']
+
+ if module.params['keep_files']:
+ run_flag.append('-k')
+
+ if module.params['fetch_only']:
+ run_flag.append('-n')
+
+ # installurl must be the last argument
+ if module.params['installurl']:
+ run_flag.append(module.params['installurl'])
+
+ rc, out, err = module.run_command(cmd + run_flag)
+
+ if rc != 0:
+ module.fail_json(msg="Command %s failed rc=%d, out=%s, err=%s" % (cmd, rc, out, err))
+ elif out.lower().find('already on latest snapshot') >= 0:
+ changed = False
+ elif out.lower().find('upgrade on next reboot') >= 0:
+ changed = True
+
+ return dict(
+ changed=changed,
+ rc=rc,
+ stderr=err,
+ stdout=out,
+ warnings=warnings
+ )
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ snapshot=dict(type='bool', default=False),
+ fetch_only=dict(type='bool', default=True),
+ force=dict(type='bool', default=False),
+ keep_files=dict(type='bool', default=False),
+ installurl=dict(type='str'),
+ ),
+ supports_check_mode=False,
+ )
+ return_dict = sysupgrade_run(module)
+ module.exit_json(**return_dict)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/taiga_issue.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/taiga_issue.py
new file mode 100644
index 00000000..ae8f31c0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/taiga_issue.py
@@ -0,0 +1,313 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Alejandro Guirao <lekumberri@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: taiga_issue
+short_description: Creates/deletes an issue in a Taiga Project Management Platform
+description:
+ - Creates/deletes an issue in a Taiga Project Management Platform (U(https://taiga.io)).
+ - An issue is identified by the combination of project, issue subject and issue type.
+ - This module implements the creation or deletion of issues (not the update).
+options:
+ taiga_host:
+ type: str
+ description:
+ - The hostname of the Taiga instance.
+ default: https://api.taiga.io
+ project:
+ type: str
+ description:
+ - Name of the project containing the issue. Must exist previously.
+ required: True
+ subject:
+ type: str
+ description:
+ - The issue subject.
+ required: True
+ issue_type:
+ type: str
+ description:
+ - The issue type. Must exist previously.
+ required: True
+ priority:
+ type: str
+ description:
+ - The issue priority. Must exist previously.
+ default: Normal
+ status:
+ type: str
+ description:
+ - The issue status. Must exist previously.
+ default: New
+ severity:
+ type: str
+ description:
+ - The issue severity. Must exist previously.
+ default: Normal
+ description:
+ type: str
+ description:
+ - The issue description.
+ default: ""
+ attachment:
+ type: path
+ description:
+ - Path to a file to be attached to the issue.
+ attachment_description:
+ type: str
+ description:
+ - A string describing the file to be attached to the issue.
+ default: ""
+ tags:
+ type: list
+ elements: str
+ description:
+ - A lists of tags to be assigned to the issue.
+ default: []
+ state:
+ type: str
+ description:
+ - Whether the issue should be present or not.
+ choices: ["present", "absent"]
+ default: present
+author: Alejandro Guirao (@lekum)
+requirements: [python-taiga]
+notes:
+- The authentication is achieved either by the environment variable TAIGA_TOKEN or by the pair of environment variables TAIGA_USERNAME and TAIGA_PASSWORD
+'''
+
+EXAMPLES = '''
+- name: Create an issue in the my hosted Taiga environment and attach an error log
+ community.general.taiga_issue:
+ taiga_host: https://mytaigahost.example.com
+ project: myproject
+ subject: An error has been found
+ issue_type: Bug
+ priority: High
+ status: New
+ severity: Important
+ description: An error has been found. Please check the attached error log for details.
+ attachment: /path/to/error.log
+ attachment_description: Error log file
+ tags:
+ - Error
+ - Needs manual check
+ state: present
+
+- name: Deletes the previously created issue
+ community.general.taiga_issue:
+ taiga_host: https://mytaigahost.example.com
+ project: myproject
+ subject: An error has been found
+ issue_type: Bug
+ state: absent
+'''
+
+RETURN = '''# '''
+import traceback
+
+from os import getenv
+from os.path import isfile
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+TAIGA_IMP_ERR = None
+try:
+ from taiga import TaigaAPI
+ from taiga.exceptions import TaigaException
+ TAIGA_MODULE_IMPORTED = True
+except ImportError:
+ TAIGA_IMP_ERR = traceback.format_exc()
+ TAIGA_MODULE_IMPORTED = False
+
+
+def manage_issue(module, taiga_host, project_name, issue_subject, issue_priority,
+ issue_status, issue_type, issue_severity, issue_description,
+ issue_attachment, issue_attachment_description,
+ issue_tags, state, check_mode=False):
+ """
+ Method that creates/deletes issues depending whether they exist and the state desired
+
+ The credentials should be passed via environment variables:
+ - TAIGA_TOKEN
+ - TAIGA_USERNAME and TAIGA_PASSWORD
+
+ Returns a tuple with these elements:
+ - A boolean representing the success of the operation
+ - A descriptive message
+ - A dict with the issue attributes, in case of issue creation, otherwise empty dict
+ """
+
+ changed = False
+
+ try:
+ token = getenv('TAIGA_TOKEN')
+ if token:
+ api = TaigaAPI(host=taiga_host, token=token)
+ else:
+ api = TaigaAPI(host=taiga_host)
+ username = getenv('TAIGA_USERNAME')
+ password = getenv('TAIGA_PASSWORD')
+ if not any([username, password]):
+ return (False, changed, "Missing credentials", {})
+ api.auth(username=username, password=password)
+
+ user_id = api.me().id
+ project_list = filter(lambda x: x.name == project_name, api.projects.list(member=user_id))
+ if len(project_list) != 1:
+ return (False, changed, "Unable to find project %s" % project_name, {})
+ project = project_list[0]
+ project_id = project.id
+
+ priority_list = filter(lambda x: x.name == issue_priority, api.priorities.list(project=project_id))
+ if len(priority_list) != 1:
+ return (False, changed, "Unable to find issue priority %s for project %s" % (issue_priority, project_name), {})
+ priority_id = priority_list[0].id
+
+ status_list = filter(lambda x: x.name == issue_status, api.issue_statuses.list(project=project_id))
+ if len(status_list) != 1:
+ return (False, changed, "Unable to find issue status %s for project %s" % (issue_status, project_name), {})
+ status_id = status_list[0].id
+
+ type_list = filter(lambda x: x.name == issue_type, project.list_issue_types())
+ if len(type_list) != 1:
+ return (False, changed, "Unable to find issue type %s for project %s" % (issue_type, project_name), {})
+ type_id = type_list[0].id
+
+ severity_list = filter(lambda x: x.name == issue_severity, project.list_severities())
+ if len(severity_list) != 1:
+ return (False, changed, "Unable to find severity %s for project %s" % (issue_severity, project_name), {})
+ severity_id = severity_list[0].id
+
+ issue = {
+ "project": project_name,
+ "subject": issue_subject,
+ "priority": issue_priority,
+ "status": issue_status,
+ "type": issue_type,
+ "severity": issue_severity,
+ "description": issue_description,
+ "tags": issue_tags,
+ }
+
+ # An issue is identified by the project_name, the issue_subject and the issue_type
+ matching_issue_list = filter(lambda x: x.subject == issue_subject and x.type == type_id, project.list_issues())
+ matching_issue_list_len = len(matching_issue_list)
+
+ if matching_issue_list_len == 0:
+ # The issue does not exist in the project
+ if state == "present":
+ # This implies a change
+ changed = True
+ if not check_mode:
+ # Create the issue
+ new_issue = project.add_issue(issue_subject, priority_id, status_id, type_id, severity_id, tags=issue_tags, description=issue_description)
+ if issue_attachment:
+ new_issue.attach(issue_attachment, description=issue_attachment_description)
+ issue["attachment"] = issue_attachment
+ issue["attachment_description"] = issue_attachment_description
+ return (True, changed, "Issue created", issue)
+
+ else:
+ # If does not exist, do nothing
+ return (True, changed, "Issue does not exist", {})
+
+ elif matching_issue_list_len == 1:
+ # The issue exists in the project
+ if state == "absent":
+ # This implies a change
+ changed = True
+ if not check_mode:
+ # Delete the issue
+ matching_issue_list[0].delete()
+ return (True, changed, "Issue deleted", {})
+
+ else:
+ # Do nothing
+ return (True, changed, "Issue already exists", {})
+
+ else:
+ # More than 1 matching issue
+ return (False, changed, "More than one issue with subject %s in project %s" % (issue_subject, project_name), {})
+
+ except TaigaException as exc:
+ msg = "An exception happened: %s" % to_native(exc)
+ return (False, changed, msg, {})
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ taiga_host=dict(type='str', required=False, default="https://api.taiga.io"),
+ project=dict(type='str', required=True),
+ subject=dict(type='str', required=True),
+ issue_type=dict(type='str', required=True),
+ priority=dict(type='str', required=False, default="Normal"),
+ status=dict(type='str', required=False, default="New"),
+ severity=dict(type='str', required=False, default="Normal"),
+ description=dict(type='str', required=False, default=""),
+ attachment=dict(type='path', required=False, default=None),
+ attachment_description=dict(type='str', required=False, default=""),
+ tags=dict(required=False, default=[], type='list', elements='str'),
+ state=dict(type='str', required=False, choices=['present', 'absent'],
+ default='present'),
+ ),
+ supports_check_mode=True
+ )
+
+ if not TAIGA_MODULE_IMPORTED:
+ module.fail_json(msg=missing_required_lib("python-taiga"),
+ exception=TAIGA_IMP_ERR)
+
+ taiga_host = module.params['taiga_host']
+ project_name = module.params['project']
+ issue_subject = module.params['subject']
+ issue_priority = module.params['priority']
+ issue_status = module.params['status']
+ issue_type = module.params['issue_type']
+ issue_severity = module.params['severity']
+ issue_description = module.params['description']
+ issue_attachment = module.params['attachment']
+ issue_attachment_description = module.params['attachment_description']
+ if issue_attachment:
+ if not isfile(issue_attachment):
+ msg = "%s is not a file" % issue_attachment
+ module.fail_json(msg=msg)
+ issue_tags = module.params['tags']
+ state = module.params['state']
+
+ return_status, changed, msg, issue_attr_dict = manage_issue(
+ module,
+ taiga_host,
+ project_name,
+ issue_subject,
+ issue_priority,
+ issue_status,
+ issue_type,
+ issue_severity,
+ issue_description,
+ issue_attachment,
+ issue_attachment_description,
+ issue_tags,
+ state,
+ check_mode=module.check_mode
+ )
+ if return_status:
+ if len(issue_attr_dict) > 0:
+ module.exit_json(changed=changed, msg=msg, issue=issue_attr_dict)
+ else:
+ module.exit_json(changed=changed, msg=msg)
+ else:
+ module.fail_json(msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/telegram.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/telegram.py
new file mode 100644
index 00000000..c1ef841c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/telegram.py
@@ -0,0 +1,114 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Artem Feofanov <artem.feofanov@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: telegram
+author: "Artem Feofanov (@tyouxa)"
+
+short_description: module for sending notifications via telegram
+
+description:
+ - Send notifications via telegram bot, to a verified group or user
+notes:
+ - You will require a telegram account and create telegram bot to use this module.
+options:
+ msg:
+ type: str
+ description:
+ - What message you wish to send.
+ required: true
+ msg_format:
+ type: str
+ description:
+ - Message format. Formatting options `markdown` and `html` described in
+ Telegram API docs (https://core.telegram.org/bots/api#formatting-options).
+ If option `plain` set, message will not be formatted.
+ default: plain
+ choices: [ "plain", "markdown", "html" ]
+ token:
+ type: str
+ description:
+ - Token identifying your telegram bot.
+ required: true
+ chat_id:
+ type: str
+ description:
+ - Telegram group or user chat_id
+ required: true
+
+'''
+
+EXAMPLES = """
+
+- name: Send a message to chat in playbook
+ community.general.telegram:
+ token: '9999999:XXXXXXXXXXXXXXXXXXXXXXX'
+ chat_id: 000000
+ msg: Ansible task finished
+"""
+
+RETURN = """
+
+msg:
+ description: The message you attempted to send
+ returned: success
+ type: str
+ sample: "Ansible task finished"
+telegram_error:
+ description: Error message gotten from Telegram API
+ returned: failure
+ type: str
+ sample: "Bad Request: message text is empty"
+"""
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import quote
+from ansible.module_utils.urls import fetch_url
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(type='str', required=True, no_log=True),
+ chat_id=dict(type='str', required=True, no_log=True),
+ msg_format=dict(type='str', required=False, default='plain',
+ choices=['plain', 'markdown', 'html']),
+ msg=dict(type='str', required=True)),
+ supports_check_mode=True
+ )
+
+ token = quote(module.params.get('token'))
+ chat_id = quote(module.params.get('chat_id'))
+ msg_format = quote(module.params.get('msg_format'))
+ msg = quote(module.params.get('msg'))
+
+ url = 'https://api.telegram.org/bot' + token + \
+ '/sendMessage?text=' + msg + '&chat_id=' + chat_id
+ if msg_format in ('markdown', 'html'):
+ url += '&parse_mode=' + msg_format
+
+ if module.check_mode:
+ module.exit_json(changed=False)
+
+ response, info = fetch_url(module, url)
+ if info['status'] == 200:
+ module.exit_json(changed=True)
+ else:
+ body = json.loads(info['body'])
+ module.fail_json(msg="failed to send message, return status=%s" % str(info['status']),
+ telegram_error=body['description'])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/terraform.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/terraform.py
new file mode 100644
index 00000000..680bab9a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/terraform.py
@@ -0,0 +1,413 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Ryan Scott Brown <ryansb@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: terraform
+short_description: Manages a Terraform deployment (and plans)
+description:
+ - Provides support for deploying resources with Terraform and pulling
+ resource information back into Ansible.
+options:
+ state:
+ choices: ['planned', 'present', 'absent']
+ description:
+ - Goal state of given stage/project
+ type: str
+ default: present
+ binary_path:
+ description:
+ - The path of a terraform binary to use, relative to the 'service_path'
+ unless you supply an absolute path.
+ type: path
+ project_path:
+ description:
+ - The path to the root of the Terraform directory with the
+ vars.tf/main.tf/etc to use.
+ type: path
+ required: true
+ workspace:
+ description:
+ - The terraform workspace to work with.
+ type: str
+ default: default
+ purge_workspace:
+ description:
+ - Only works with state = absent
+ - If true, the workspace will be deleted after the "terraform destroy" action.
+ - The 'default' workspace will not be deleted.
+ default: false
+ type: bool
+ plan_file:
+ description:
+ - The path to an existing Terraform plan file to apply. If this is not
+ specified, Ansible will build a new TF plan and execute it.
+ Note that this option is required if 'state' has the 'planned' value.
+ type: path
+ state_file:
+ description:
+ - The path to an existing Terraform state file to use when building plan.
+ If this is not specified, the default `terraform.tfstate` will be used.
+ - This option is ignored when plan is specified.
+ type: path
+ variables_files:
+ description:
+ - The path to a variables file for Terraform to fill into the TF
+ configurations. This can accept a list of paths to multiple variables files.
+ - Up until Ansible 2.9, this option was usable as I(variables_file).
+ type: list
+ elements: path
+ aliases: [ 'variables_file' ]
+ variables:
+ description:
+ - A group of key-values to override template variables or those in
+ variables files.
+ type: dict
+ targets:
+ description:
+ - A list of specific resources to target in this plan/application. The
+ resources selected here will also auto-include any dependencies.
+ type: list
+ elements: str
+ lock:
+ description:
+ - Enable statefile locking, if you use a service that accepts locks (such
+ as S3+DynamoDB) to store your statefile.
+ type: bool
+ default: true
+ lock_timeout:
+ description:
+ - How long to maintain the lock on the statefile, if you use a service
+ that accepts locks (such as S3+DynamoDB).
+ type: int
+ force_init:
+ description:
+ - To avoid duplicating infra, if a state file can't be found this will
+ force a `terraform init`. Generally, this should be turned off unless
+ you intend to provision an entirely new Terraform deployment.
+ default: false
+ type: bool
+ backend_config:
+ description:
+ - A group of key-values to provide at init stage to the -backend-config parameter.
+ type: dict
+ backend_config_files:
+ description:
+ - The path to a configuration file to provide at init state to the -backend-config parameter.
+ This can accept a list of paths to multiple configuration files.
+ type: list
+ elements: path
+ version_added: '0.2.0'
+ init_reconfigure:
+ description:
+ - Forces backend reconfiguration during init.
+ default: false
+ type: bool
+ version_added: '1.3.0'
+notes:
+ - To just run a `terraform plan`, use check mode.
+requirements: [ "terraform" ]
+author: "Ryan Scott Brown (@ryansb)"
+'''
+
+EXAMPLES = """
+- name: Basic deploy of a service
+ community.general.terraform:
+ project_path: '{{ project_dir }}'
+ state: present
+
+- name: Define the backend configuration at init
+ community.general.terraform:
+ project_path: 'project/'
+ state: "{{ state }}"
+ force_init: true
+ backend_config:
+ region: "eu-west-1"
+ bucket: "some-bucket"
+ key: "random.tfstate"
+
+- name: Define the backend configuration with one or more files at init
+ community.general.terraform:
+ project_path: 'project/'
+ state: "{{ state }}"
+ force_init: true
+ backend_config_files:
+ - /path/to/backend_config_file_1
+ - /path/to/backend_config_file_2
+"""
+
+RETURN = """
+outputs:
+ type: complex
+ description: A dictionary of all the TF outputs by their assigned name. Use `.outputs.MyOutputName.value` to access the value.
+ returned: on success
+ sample: '{"bukkit_arn": {"sensitive": false, "type": "string", "value": "arn:aws:s3:::tf-test-bukkit"}'
+ contains:
+ sensitive:
+ type: bool
+ returned: always
+ description: Whether Terraform has marked this value as sensitive
+ type:
+ type: str
+ returned: always
+ description: The type of the value (string, int, etc)
+ value:
+ type: str
+ returned: always
+ description: The value of the output as interpolated by Terraform
+stdout:
+ type: str
+ description: Full `terraform` command stdout, in case you want to display it or examine the event log
+ returned: always
+ sample: ''
+command:
+ type: str
+ description: Full `terraform` command built by this module, in case you want to re-run the command outside the module or debug a problem.
+ returned: always
+ sample: terraform apply ...
+"""
+
+import os
+import json
+import tempfile
+from ansible.module_utils.six.moves import shlex_quote
+
+from ansible.module_utils.basic import AnsibleModule
+
+DESTROY_ARGS = ('destroy', '-no-color', '-force')
+APPLY_ARGS = ('apply', '-no-color', '-input=false', '-auto-approve=true')
+module = None
+
+
+def preflight_validation(bin_path, project_path, variables_args=None, plan_file=None):
+ if project_path in [None, ''] or '/' not in project_path:
+ module.fail_json(msg="Path for Terraform project can not be None or ''.")
+ if not os.path.exists(bin_path):
+ module.fail_json(msg="Path for Terraform binary '{0}' doesn't exist on this host - check the path and try again please.".format(bin_path))
+ if not os.path.isdir(project_path):
+ module.fail_json(msg="Path for Terraform project '{0}' doesn't exist on this host - check the path and try again please.".format(project_path))
+
+ rc, out, err = module.run_command([bin_path, 'validate'] + variables_args, check_rc=True, cwd=project_path, use_unsafe_shell=True)
+
+
+def _state_args(state_file):
+ if state_file and os.path.exists(state_file):
+ return ['-state', state_file]
+ if state_file and not os.path.exists(state_file):
+ module.fail_json(msg='Could not find state_file "{0}", check the path and try again.'.format(state_file))
+ return []
+
+
+def init_plugins(bin_path, project_path, backend_config, backend_config_files, init_reconfigure):
+ command = [bin_path, 'init', '-input=false']
+ if backend_config:
+ for key, val in backend_config.items():
+ command.extend([
+ '-backend-config',
+ shlex_quote('{0}={1}'.format(key, val))
+ ])
+ if backend_config_files:
+ for f in backend_config_files:
+ command.extend(['-backend-config', f])
+ if init_reconfigure:
+ command.extend(['-reconfigure'])
+ rc, out, err = module.run_command(command, check_rc=True, cwd=project_path)
+
+
+def get_workspace_context(bin_path, project_path):
+ workspace_ctx = {"current": "default", "all": []}
+ command = [bin_path, 'workspace', 'list', '-no-color']
+ rc, out, err = module.run_command(command, cwd=project_path)
+ if rc != 0:
+ module.warn("Failed to list Terraform workspaces:\r\n{0}".format(err))
+ for item in out.split('\n'):
+ stripped_item = item.strip()
+ if not stripped_item:
+ continue
+ elif stripped_item.startswith('* '):
+ workspace_ctx["current"] = stripped_item.replace('* ', '')
+ else:
+ workspace_ctx["all"].append(stripped_item)
+ return workspace_ctx
+
+
+def _workspace_cmd(bin_path, project_path, action, workspace):
+ command = [bin_path, 'workspace', action, workspace, '-no-color']
+ rc, out, err = module.run_command(command, check_rc=True, cwd=project_path)
+ return rc, out, err
+
+
+def create_workspace(bin_path, project_path, workspace):
+ _workspace_cmd(bin_path, project_path, 'new', workspace)
+
+
+def select_workspace(bin_path, project_path, workspace):
+ _workspace_cmd(bin_path, project_path, 'select', workspace)
+
+
+def remove_workspace(bin_path, project_path, workspace):
+ _workspace_cmd(bin_path, project_path, 'delete', workspace)
+
+
+def build_plan(command, project_path, variables_args, state_file, targets, state, plan_path=None):
+ if plan_path is None:
+ f, plan_path = tempfile.mkstemp(suffix='.tfplan')
+
+ plan_command = [command[0], 'plan', '-input=false', '-no-color', '-detailed-exitcode', '-out', plan_path]
+
+ for t in (module.params.get('targets') or []):
+ plan_command.extend(['-target', t])
+
+ plan_command.extend(_state_args(state_file))
+
+ rc, out, err = module.run_command(plan_command + variables_args, cwd=project_path, use_unsafe_shell=True)
+
+ if rc == 0:
+ # no changes
+ return plan_path, False, out, err, plan_command if state == 'planned' else command
+ elif rc == 1:
+ # failure to plan
+ module.fail_json(msg='Terraform plan could not be created\r\nSTDOUT: {0}\r\n\r\nSTDERR: {1}'.format(out, err))
+ elif rc == 2:
+ # changes, but successful
+ return plan_path, True, out, err, plan_command if state == 'planned' else command
+
+ module.fail_json(msg='Terraform plan failed with unexpected exit code {0}. \r\nSTDOUT: {1}\r\n\r\nSTDERR: {2}'.format(rc, out, err))
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ project_path=dict(required=True, type='path'),
+ binary_path=dict(type='path'),
+ workspace=dict(required=False, type='str', default='default'),
+ purge_workspace=dict(type='bool', default=False),
+ state=dict(default='present', choices=['present', 'absent', 'planned']),
+ variables=dict(type='dict'),
+ variables_files=dict(aliases=['variables_file'], type='list', elements='path', default=None),
+ plan_file=dict(type='path'),
+ state_file=dict(type='path'),
+ targets=dict(type='list', elements='str', default=[]),
+ lock=dict(type='bool', default=True),
+ lock_timeout=dict(type='int',),
+ force_init=dict(type='bool', default=False),
+ backend_config=dict(type='dict', default=None),
+ backend_config_files=dict(type='list', elements='path', default=None),
+ init_reconfigure=dict(required=False, type='bool', default=False),
+ ),
+ required_if=[('state', 'planned', ['plan_file'])],
+ supports_check_mode=True,
+ )
+
+ project_path = module.params.get('project_path')
+ bin_path = module.params.get('binary_path')
+ workspace = module.params.get('workspace')
+ purge_workspace = module.params.get('purge_workspace')
+ state = module.params.get('state')
+ variables = module.params.get('variables') or {}
+ variables_files = module.params.get('variables_files')
+ plan_file = module.params.get('plan_file')
+ state_file = module.params.get('state_file')
+ force_init = module.params.get('force_init')
+ backend_config = module.params.get('backend_config')
+ backend_config_files = module.params.get('backend_config_files')
+ init_reconfigure = module.params.get('init_reconfigure')
+
+ if bin_path is not None:
+ command = [bin_path]
+ else:
+ command = [module.get_bin_path('terraform', required=True)]
+
+ if force_init:
+ init_plugins(command[0], project_path, backend_config, backend_config_files, init_reconfigure)
+
+ workspace_ctx = get_workspace_context(command[0], project_path)
+ if workspace_ctx["current"] != workspace:
+ if workspace not in workspace_ctx["all"]:
+ create_workspace(command[0], project_path, workspace)
+ else:
+ select_workspace(command[0], project_path, workspace)
+
+ if state == 'present':
+ command.extend(APPLY_ARGS)
+ elif state == 'absent':
+ command.extend(DESTROY_ARGS)
+
+ variables_args = []
+ for k, v in variables.items():
+ variables_args.extend([
+ '-var',
+ '{0}={1}'.format(k, v)
+ ])
+ if variables_files:
+ for f in variables_files:
+ variables_args.extend(['-var-file', f])
+
+ preflight_validation(command[0], project_path, variables_args)
+
+ if module.params.get('lock') is not None:
+ if module.params.get('lock'):
+ command.append('-lock=true')
+ else:
+ command.append('-lock=false')
+ if module.params.get('lock_timeout') is not None:
+ command.append('-lock-timeout=%ds' % module.params.get('lock_timeout'))
+
+ for t in (module.params.get('targets') or []):
+ command.extend(['-target', t])
+
+ # we aren't sure if this plan will result in changes, so assume yes
+ needs_application, changed = True, False
+
+ out, err = '', ''
+
+ if state == 'absent':
+ command.extend(variables_args)
+ elif state == 'present' and plan_file:
+ if any([os.path.isfile(project_path + "/" + plan_file), os.path.isfile(plan_file)]):
+ command.append(plan_file)
+ else:
+ module.fail_json(msg='Could not find plan_file "{0}", check the path and try again.'.format(plan_file))
+ else:
+ plan_file, needs_application, out, err, command = build_plan(command, project_path, variables_args, state_file,
+ module.params.get('targets'), state, plan_file)
+ command.append(plan_file)
+
+ if needs_application and not module.check_mode and not state == 'planned':
+ rc, out, err = module.run_command(command, check_rc=True, cwd=project_path)
+ # checks out to decide if changes were made during execution
+ if ' 0 added, 0 changed' not in out and not state == "absent" or ' 0 destroyed' not in out:
+ changed = True
+
+ outputs_command = [command[0], 'output', '-no-color', '-json'] + _state_args(state_file)
+ rc, outputs_text, outputs_err = module.run_command(outputs_command, cwd=project_path)
+ if rc == 1:
+ module.warn("Could not get Terraform outputs. This usually means none have been defined.\nstdout: {0}\nstderr: {1}".format(outputs_text, outputs_err))
+ outputs = {}
+ elif rc != 0:
+ module.fail_json(
+ msg="Failure when getting Terraform outputs. "
+ "Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, outputs_text, outputs_err),
+ command=' '.join(outputs_command))
+ else:
+ outputs = json.loads(outputs_text)
+
+ # Restore the Terraform workspace found when running the module
+ if workspace_ctx["current"] != workspace:
+ select_workspace(command[0], project_path, workspace_ctx["current"])
+ if state == 'absent' and workspace != 'default' and purge_workspace is True:
+ remove_workspace(command[0], project_path, workspace)
+
+ module.exit_json(changed=changed, state=state, workspace=workspace, outputs=outputs, stdout=out, stderr=err, command=' '.join(command))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/timezone.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/timezone.py
new file mode 100644
index 00000000..d10dd9bb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/timezone.py
@@ -0,0 +1,905 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Shinichi TAMURA (@tmshn)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: timezone
+short_description: Configure timezone setting
+description:
+ - This module configures the timezone setting, both of the system clock and of the hardware clock.
+ If you want to set up the NTP, use M(ansible.builtin.service) module.
+ - It is recommended to restart C(crond) after changing the timezone, otherwise the jobs may run at the wrong time.
+ - Several different tools are used depending on the OS/Distribution involved.
+ For Linux it can use C(timedatectl) or edit C(/etc/sysconfig/clock) or C(/etc/timezone) and C(hwclock).
+ On SmartOS, C(sm-set-timezone), for macOS, C(systemsetup), for BSD, C(/etc/localtime) is modified.
+ On AIX, C(chtz) is used.
+ - As of Ansible 2.3 support was added for SmartOS and BSDs.
+ - As of Ansible 2.4 support was added for macOS.
+ - As of Ansible 2.9 support was added for AIX 6.1+
+ - Windows and HPUX are not supported, please let us know if you find any other OS/distro in which this fails.
+options:
+ name:
+ description:
+ - Name of the timezone for the system clock.
+ - Default is to keep current setting.
+ - B(At least one of name and hwclock are required.)
+ type: str
+ hwclock:
+ description:
+ - Whether the hardware clock is in UTC or in local timezone.
+ - Default is to keep current setting.
+ - Note that this option is recommended not to change and may fail
+ to configure, especially on virtual environments such as AWS.
+ - B(At least one of name and hwclock are required.)
+ - I(Only used on Linux.)
+ type: str
+ aliases: [ rtc ]
+ choices: [ local, UTC ]
+notes:
+ - On SmartOS the C(sm-set-timezone) utility (part of the smtools package) is required to set the zone timezone
+ - On AIX only Olson/tz database timezones are useable (POSIX is not supported).
+ - An OS reboot is also required on AIX for the new timezone setting to take effect.
+author:
+ - Shinichi TAMURA (@tmshn)
+ - Jasper Lievisse Adriaanse (@jasperla)
+ - Indrajit Raychaudhuri (@indrajitr)
+'''
+
+RETURN = r'''
+diff:
+ description: The differences about the given arguments.
+ returned: success
+ type: complex
+ contains:
+ before:
+ description: The values before change
+ type: dict
+ after:
+ description: The values after change
+ type: dict
+'''
+
+EXAMPLES = r'''
+- name: Set timezone to Asia/Tokyo
+ community.general.timezone:
+ name: Asia/Tokyo
+'''
+
+import errno
+import os
+import platform
+import random
+import re
+import string
+import filecmp
+
+from ansible.module_utils.basic import AnsibleModule, get_distribution
+from ansible.module_utils.six import iteritems
+
+
+class Timezone(object):
+ """This is a generic Timezone manipulation class that is subclassed based on platform.
+
+ A subclass may wish to override the following action methods:
+ - get(key, phase) ... get the value from the system at `phase`
+ - set(key, value) ... set the value to the current system
+ """
+
+ def __new__(cls, module):
+ """Return the platform-specific subclass.
+
+ It does not use load_platform_subclass() because it needs to judge based
+ on whether the `timedatectl` command exists and is available.
+
+ Args:
+ module: The AnsibleModule.
+ """
+ if platform.system() == 'Linux':
+ timedatectl = module.get_bin_path('timedatectl')
+ if timedatectl is not None:
+ rc, stdout, stderr = module.run_command(timedatectl)
+ if rc == 0:
+ return super(Timezone, SystemdTimezone).__new__(SystemdTimezone)
+ else:
+ module.warn('timedatectl command was found but not usable: %s. using other method.' % stderr)
+ return super(Timezone, NosystemdTimezone).__new__(NosystemdTimezone)
+ else:
+ return super(Timezone, NosystemdTimezone).__new__(NosystemdTimezone)
+ elif re.match('^joyent_.*Z', platform.version()):
+ # platform.system() returns SunOS, which is too broad. So look at the
+ # platform version instead. However we have to ensure that we're not
+ # running in the global zone where changing the timezone has no effect.
+ zonename_cmd = module.get_bin_path('zonename')
+ if zonename_cmd is not None:
+ (rc, stdout, _) = module.run_command(zonename_cmd)
+ if rc == 0 and stdout.strip() == 'global':
+ module.fail_json(msg='Adjusting timezone is not supported in Global Zone')
+
+ return super(Timezone, SmartOSTimezone).__new__(SmartOSTimezone)
+ elif platform.system() == 'Darwin':
+ return super(Timezone, DarwinTimezone).__new__(DarwinTimezone)
+ elif re.match('^(Free|Net|Open)BSD', platform.platform()):
+ return super(Timezone, BSDTimezone).__new__(BSDTimezone)
+ elif platform.system() == 'AIX':
+ AIXoslevel = int(platform.version() + platform.release())
+ if AIXoslevel >= 61:
+ return super(Timezone, AIXTimezone).__new__(AIXTimezone)
+ else:
+ module.fail_json(msg='AIX os level must be >= 61 for timezone module (Target: %s).' % AIXoslevel)
+ else:
+ # Not supported yet
+ return super(Timezone, Timezone).__new__(Timezone)
+
+ def __init__(self, module):
+ """Initialize of the class.
+
+ Args:
+ module: The AnsibleModule.
+ """
+ super(Timezone, self).__init__()
+ self.msg = []
+ # `self.value` holds the values for each params on each phases.
+ # Initially there's only info of "planned" phase, but the
+ # `self.check()` function will fill out it.
+ self.value = dict()
+ for key in module.argument_spec:
+ value = module.params[key]
+ if value is not None:
+ self.value[key] = dict(planned=value)
+ self.module = module
+
+ def abort(self, msg):
+ """Abort the process with error message.
+
+ This is just the wrapper of module.fail_json().
+
+ Args:
+ msg: The error message.
+ """
+ error_msg = ['Error message:', msg]
+ if len(self.msg) > 0:
+ error_msg.append('Other message(s):')
+ error_msg.extend(self.msg)
+ self.module.fail_json(msg='\n'.join(error_msg))
+
+ def execute(self, *commands, **kwargs):
+ """Execute the shell command.
+
+ This is just the wrapper of module.run_command().
+
+ Args:
+ *commands: The command to execute.
+ It will be concatenated with single space.
+ **kwargs: Only 'log' key is checked.
+ If kwargs['log'] is true, record the command to self.msg.
+
+ Returns:
+ stdout: Standard output of the command.
+ """
+ command = ' '.join(commands)
+ (rc, stdout, stderr) = self.module.run_command(command, check_rc=True)
+ if kwargs.get('log', False):
+ self.msg.append('executed `%s`' % command)
+ return stdout
+
+ def diff(self, phase1='before', phase2='after'):
+ """Calculate the difference between given 2 phases.
+
+ Args:
+ phase1, phase2: The names of phase to compare.
+
+ Returns:
+ diff: The difference of value between phase1 and phase2.
+ This is in the format which can be used with the
+ `--diff` option of ansible-playbook.
+ """
+ diff = {phase1: {}, phase2: {}}
+ for key, value in iteritems(self.value):
+ diff[phase1][key] = value[phase1]
+ diff[phase2][key] = value[phase2]
+ return diff
+
+ def check(self, phase):
+ """Check the state in given phase and set it to `self.value`.
+
+ Args:
+ phase: The name of the phase to check.
+
+ Returns:
+ NO RETURN VALUE
+ """
+ if phase == 'planned':
+ return
+ for key, value in iteritems(self.value):
+ value[phase] = self.get(key, phase)
+
+ def change(self):
+ """Make the changes effect based on `self.value`."""
+ for key, value in iteritems(self.value):
+ if value['before'] != value['planned']:
+ self.set(key, value['planned'])
+
+ # ===========================================
+ # Platform specific methods (must be replaced by subclass).
+
+ def get(self, key, phase):
+ """Get the value for the key at the given phase.
+
+ Called from self.check().
+
+ Args:
+ key: The key to get the value
+ phase: The phase to get the value
+
+ Return:
+ value: The value for the key at the given phase.
+ """
+ self.abort('get(key, phase) is not implemented on target platform')
+
+ def set(self, key, value):
+ """Set the value for the key (of course, for the phase 'after').
+
+ Called from self.change().
+
+ Args:
+ key: Key to set the value
+ value: Value to set
+ """
+ self.abort('set(key, value) is not implemented on target platform')
+
+ def _verify_timezone(self):
+ tz = self.value['name']['planned']
+ tzfile = '/usr/share/zoneinfo/%s' % tz
+ if not os.path.isfile(tzfile):
+ self.abort('given timezone "%s" is not available' % tz)
+ return tzfile
+
+
+class SystemdTimezone(Timezone):
+ """This is a Timezone manipulation class for systemd-powered Linux.
+
+ It uses the `timedatectl` command to check/set all arguments.
+ """
+
+ regexps = dict(
+ hwclock=re.compile(r'^\s*RTC in local TZ\s*:\s*([^\s]+)', re.MULTILINE),
+ name=re.compile(r'^\s*Time ?zone\s*:\s*([^\s]+)', re.MULTILINE)
+ )
+
+ subcmds = dict(
+ hwclock='set-local-rtc',
+ name='set-timezone'
+ )
+
+ def __init__(self, module):
+ super(SystemdTimezone, self).__init__(module)
+ self.timedatectl = module.get_bin_path('timedatectl', required=True)
+ self.status = dict()
+ # Validate given timezone
+ if 'name' in self.value:
+ self._verify_timezone()
+
+ def _get_status(self, phase):
+ if phase not in self.status:
+ self.status[phase] = self.execute(self.timedatectl, 'status')
+ return self.status[phase]
+
+ def get(self, key, phase):
+ status = self._get_status(phase)
+ value = self.regexps[key].search(status).group(1)
+ if key == 'hwclock':
+ # For key='hwclock'; convert yes/no -> local/UTC
+ if self.module.boolean(value):
+ value = 'local'
+ else:
+ value = 'UTC'
+ return value
+
+ def set(self, key, value):
+ # For key='hwclock'; convert UTC/local -> yes/no
+ if key == 'hwclock':
+ if value == 'local':
+ value = 'yes'
+ else:
+ value = 'no'
+ self.execute(self.timedatectl, self.subcmds[key], value, log=True)
+
+
+class NosystemdTimezone(Timezone):
+ """This is a Timezone manipulation class for non systemd-powered Linux.
+
+ For timezone setting, it edits the following file and reflect changes:
+ - /etc/sysconfig/clock ... RHEL/CentOS
+ - /etc/timezone ... Debian/Ubuntu
+ For hwclock setting, it executes `hwclock --systohc` command with the
+ '--utc' or '--localtime' option.
+ """
+
+ conf_files = dict(
+ name=None, # To be set in __init__
+ hwclock=None, # To be set in __init__
+ adjtime='/etc/adjtime'
+ )
+
+ # It's fine if all tree config files don't exist
+ allow_no_file = dict(
+ name=True,
+ hwclock=True,
+ adjtime=True
+ )
+
+ regexps = dict(
+ name=None, # To be set in __init__
+ hwclock=re.compile(r'^UTC\s*=\s*([^\s]+)', re.MULTILINE),
+ adjtime=re.compile(r'^(UTC|LOCAL)$', re.MULTILINE)
+ )
+
+ dist_regexps = dict(
+ SuSE=re.compile(r'^TIMEZONE\s*=\s*"?([^"\s]+)"?', re.MULTILINE),
+ redhat=re.compile(r'^ZONE\s*=\s*"?([^"\s]+)"?', re.MULTILINE)
+ )
+
+ dist_tzline_format = dict(
+ SuSE='TIMEZONE="%s"\n',
+ redhat='ZONE="%s"\n'
+ )
+
+ def __init__(self, module):
+ super(NosystemdTimezone, self).__init__(module)
+ # Validate given timezone
+ if 'name' in self.value:
+ tzfile = self._verify_timezone()
+ # `--remove-destination` is needed if /etc/localtime is a symlink so
+ # that it overwrites it instead of following it.
+ self.update_timezone = ['%s --remove-destination %s /etc/localtime' % (self.module.get_bin_path('cp', required=True), tzfile)]
+ self.update_hwclock = self.module.get_bin_path('hwclock', required=True)
+ # Distribution-specific configurations
+ if self.module.get_bin_path('dpkg-reconfigure') is not None:
+ # Debian/Ubuntu
+ if 'name' in self.value:
+ self.update_timezone = ['%s -sf %s /etc/localtime' % (self.module.get_bin_path('ln', required=True), tzfile),
+ '%s --frontend noninteractive tzdata' % self.module.get_bin_path('dpkg-reconfigure', required=True)]
+ self.conf_files['name'] = '/etc/timezone'
+ self.conf_files['hwclock'] = '/etc/default/rcS'
+ self.regexps['name'] = re.compile(r'^([^\s]+)', re.MULTILINE)
+ self.tzline_format = '%s\n'
+ else:
+ # RHEL/CentOS/SUSE
+ if self.module.get_bin_path('tzdata-update') is not None:
+ # tzdata-update cannot update the timezone if /etc/localtime is
+ # a symlink so we have to use cp to update the time zone which
+ # was set above.
+ if not os.path.islink('/etc/localtime'):
+ self.update_timezone = [self.module.get_bin_path('tzdata-update', required=True)]
+ # else:
+ # self.update_timezone = 'cp --remove-destination ...' <- configured above
+ self.conf_files['name'] = '/etc/sysconfig/clock'
+ self.conf_files['hwclock'] = '/etc/sysconfig/clock'
+ try:
+ f = open(self.conf_files['name'], 'r')
+ except IOError as err:
+ if self._allow_ioerror(err, 'name'):
+ # If the config file doesn't exist detect the distribution and set regexps.
+ distribution = get_distribution()
+ if distribution == 'SuSE':
+ # For SUSE
+ self.regexps['name'] = self.dist_regexps['SuSE']
+ self.tzline_format = self.dist_tzline_format['SuSE']
+ else:
+ # For RHEL/CentOS
+ self.regexps['name'] = self.dist_regexps['redhat']
+ self.tzline_format = self.dist_tzline_format['redhat']
+ else:
+ self.abort('could not read configuration file "%s"' % self.conf_files['name'])
+ else:
+ # The key for timezone might be `ZONE` or `TIMEZONE`
+ # (the former is used in RHEL/CentOS and the latter is used in SUSE linux).
+ # So check the content of /etc/sysconfig/clock and decide which key to use.
+ sysconfig_clock = f.read()
+ f.close()
+ if re.search(r'^TIMEZONE\s*=', sysconfig_clock, re.MULTILINE):
+ # For SUSE
+ self.regexps['name'] = self.dist_regexps['SuSE']
+ self.tzline_format = self.dist_tzline_format['SuSE']
+ else:
+ # For RHEL/CentOS
+ self.regexps['name'] = self.dist_regexps['redhat']
+ self.tzline_format = self.dist_tzline_format['redhat']
+
+ def _allow_ioerror(self, err, key):
+ # In some cases, even if the target file does not exist,
+ # simply creating it may solve the problem.
+ # In such cases, we should continue the configuration rather than aborting.
+ if err.errno != errno.ENOENT:
+ # If the error is not ENOENT ("No such file or directory"),
+ # (e.g., permission error, etc), we should abort.
+ return False
+ return self.allow_no_file.get(key, False)
+
+ def _edit_file(self, filename, regexp, value, key):
+ """Replace the first matched line with given `value`.
+
+ If `regexp` matched more than once, other than the first line will be deleted.
+
+ Args:
+ filename: The name of the file to edit.
+ regexp: The regular expression to search with.
+ value: The line which will be inserted.
+ key: For what key the file is being editted.
+ """
+ # Read the file
+ try:
+ file = open(filename, 'r')
+ except IOError as err:
+ if self._allow_ioerror(err, key):
+ lines = []
+ else:
+ self.abort('tried to configure %s using a file "%s", but could not read it' % (key, filename))
+ else:
+ lines = file.readlines()
+ file.close()
+ # Find the all matched lines
+ matched_indices = []
+ for i, line in enumerate(lines):
+ if regexp.search(line):
+ matched_indices.append(i)
+ if len(matched_indices) > 0:
+ insert_line = matched_indices[0]
+ else:
+ insert_line = 0
+ # Remove all matched lines
+ for i in matched_indices[::-1]:
+ del lines[i]
+ # ...and insert the value
+ lines.insert(insert_line, value)
+ # Write the changes
+ try:
+ file = open(filename, 'w')
+ except IOError:
+ self.abort('tried to configure %s using a file "%s", but could not write to it' % (key, filename))
+ else:
+ file.writelines(lines)
+ file.close()
+ self.msg.append('Added 1 line and deleted %s line(s) on %s' % (len(matched_indices), filename))
+
+ def _get_value_from_config(self, key, phase):
+ filename = self.conf_files[key]
+ try:
+ file = open(filename, mode='r')
+ except IOError as err:
+ if self._allow_ioerror(err, key):
+ if key == 'hwclock':
+ return 'n/a'
+ elif key == 'adjtime':
+ return 'UTC'
+ elif key == 'name':
+ return 'n/a'
+ else:
+ self.abort('tried to configure %s using a file "%s", but could not read it' % (key, filename))
+ else:
+ status = file.read()
+ file.close()
+ try:
+ value = self.regexps[key].search(status).group(1)
+ except AttributeError:
+ if key == 'hwclock':
+ # If we cannot find UTC in the config that's fine.
+ return 'n/a'
+ elif key == 'adjtime':
+ # If we cannot find UTC/LOCAL in /etc/cannot that means UTC
+ # will be used by default.
+ return 'UTC'
+ elif key == 'name':
+ if phase == 'before':
+ # In 'before' phase UTC/LOCAL doesn't need to be set in
+ # the timezone config file, so we ignore this error.
+ return 'n/a'
+ else:
+ self.abort('tried to configure %s using a file "%s", but could not find a valid value in it' % (key, filename))
+ else:
+ if key == 'hwclock':
+ # convert yes/no -> UTC/local
+ if self.module.boolean(value):
+ value = 'UTC'
+ else:
+ value = 'local'
+ elif key == 'adjtime':
+ # convert LOCAL -> local
+ if value != 'UTC':
+ value = value.lower()
+ return value
+
+ def get(self, key, phase):
+ planned = self.value[key]['planned']
+ if key == 'hwclock':
+ value = self._get_value_from_config(key, phase)
+ if value == planned:
+ # If the value in the config file is the same as the 'planned'
+ # value, we need to check /etc/adjtime.
+ value = self._get_value_from_config('adjtime', phase)
+ elif key == 'name':
+ value = self._get_value_from_config(key, phase)
+ if value == planned:
+ # If the planned values is the same as the one in the config file
+ # we need to check if /etc/localtime is also set to the 'planned' zone.
+ if os.path.islink('/etc/localtime'):
+ # If /etc/localtime is a symlink and is not set to the TZ we 'planned'
+ # to set, we need to return the TZ which the symlink points to.
+ if os.path.exists('/etc/localtime'):
+ # We use readlink() because on some distros zone files are symlinks
+ # to other zone files, so it's hard to get which TZ is actually set
+ # if we follow the symlink.
+ path = os.readlink('/etc/localtime')
+ linktz = re.search(r'/usr/share/zoneinfo/(.*)', path, re.MULTILINE)
+ if linktz:
+ valuelink = linktz.group(1)
+ if valuelink != planned:
+ value = valuelink
+ else:
+ # Set current TZ to 'n/a' if the symlink points to a path
+ # which isn't a zone file.
+ value = 'n/a'
+ else:
+ # Set current TZ to 'n/a' if the symlink to the zone file is broken.
+ value = 'n/a'
+ else:
+ # If /etc/localtime is not a symlink best we can do is compare it with
+ # the 'planned' zone info file and return 'n/a' if they are different.
+ try:
+ if not filecmp.cmp('/etc/localtime', '/usr/share/zoneinfo/' + planned):
+ return 'n/a'
+ except Exception:
+ return 'n/a'
+ else:
+ self.abort('unknown parameter "%s"' % key)
+ return value
+
+ def set_timezone(self, value):
+ self._edit_file(filename=self.conf_files['name'],
+ regexp=self.regexps['name'],
+ value=self.tzline_format % value,
+ key='name')
+ for cmd in self.update_timezone:
+ self.execute(cmd)
+
+ def set_hwclock(self, value):
+ if value == 'local':
+ option = '--localtime'
+ utc = 'no'
+ else:
+ option = '--utc'
+ utc = 'yes'
+ if self.conf_files['hwclock'] is not None:
+ self._edit_file(filename=self.conf_files['hwclock'],
+ regexp=self.regexps['hwclock'],
+ value='UTC=%s\n' % utc,
+ key='hwclock')
+ self.execute(self.update_hwclock, '--systohc', option, log=True)
+
+ def set(self, key, value):
+ if key == 'name':
+ self.set_timezone(value)
+ elif key == 'hwclock':
+ self.set_hwclock(value)
+ else:
+ self.abort('unknown parameter "%s"' % key)
+
+
+class SmartOSTimezone(Timezone):
+ """This is a Timezone manipulation class for SmartOS instances.
+
+ It uses the C(sm-set-timezone) utility to set the timezone, and
+ inspects C(/etc/default/init) to determine the current timezone.
+
+ NB: A zone needs to be rebooted in order for the change to be
+ activated.
+ """
+
+ def __init__(self, module):
+ super(SmartOSTimezone, self).__init__(module)
+ self.settimezone = self.module.get_bin_path('sm-set-timezone', required=False)
+ if not self.settimezone:
+ module.fail_json(msg='sm-set-timezone not found. Make sure the smtools package is installed.')
+
+ def get(self, key, phase):
+ """Lookup the current timezone name in `/etc/default/init`. If anything else
+ is requested, or if the TZ field is not set we fail.
+ """
+ if key == 'name':
+ try:
+ f = open('/etc/default/init', 'r')
+ for line in f:
+ m = re.match('^TZ=(.*)$', line.strip())
+ if m:
+ return m.groups()[0]
+ except Exception:
+ self.module.fail_json(msg='Failed to read /etc/default/init')
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+ def set(self, key, value):
+ """Set the requested timezone through sm-set-timezone, an invalid timezone name
+ will be rejected and we have no further input validation to perform.
+ """
+ if key == 'name':
+ cmd = 'sm-set-timezone %s' % value
+
+ (rc, stdout, stderr) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.fail_json(msg=stderr)
+
+ # sm-set-timezone knows no state and will always set the timezone.
+ # XXX: https://github.com/joyent/smtools/pull/2
+ m = re.match(r'^\* Changed (to)? timezone (to)? (%s).*' % value, stdout.splitlines()[1])
+ if not (m and m.groups()[-1] == value):
+ self.module.fail_json(msg='Failed to set timezone')
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+
+class DarwinTimezone(Timezone):
+ """This is the timezone implementation for Darwin which, unlike other *BSD
+ implementations, uses the `systemsetup` command on Darwin to check/set
+ the timezone.
+ """
+
+ regexps = dict(
+ name=re.compile(r'^\s*Time ?Zone\s*:\s*([^\s]+)', re.MULTILINE)
+ )
+
+ def __init__(self, module):
+ super(DarwinTimezone, self).__init__(module)
+ self.systemsetup = module.get_bin_path('systemsetup', required=True)
+ self.status = dict()
+ # Validate given timezone
+ if 'name' in self.value:
+ self._verify_timezone()
+
+ def _get_current_timezone(self, phase):
+ """Lookup the current timezone via `systemsetup -gettimezone`."""
+ if phase not in self.status:
+ self.status[phase] = self.execute(self.systemsetup, '-gettimezone')
+ return self.status[phase]
+
+ def _verify_timezone(self):
+ tz = self.value['name']['planned']
+ # Lookup the list of supported timezones via `systemsetup -listtimezones`.
+ # Note: Skip the first line that contains the label 'Time Zones:'
+ out = self.execute(self.systemsetup, '-listtimezones').splitlines()[1:]
+ tz_list = list(map(lambda x: x.strip(), out))
+ if tz not in tz_list:
+ self.abort('given timezone "%s" is not available' % tz)
+ return tz
+
+ def get(self, key, phase):
+ if key == 'name':
+ status = self._get_current_timezone(phase)
+ value = self.regexps[key].search(status).group(1)
+ return value
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+ def set(self, key, value):
+ if key == 'name':
+ self.execute(self.systemsetup, '-settimezone', value, log=True)
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+
+class BSDTimezone(Timezone):
+ """This is the timezone implementation for *BSD which works simply through
+ updating the `/etc/localtime` symlink to point to a valid timezone name under
+ `/usr/share/zoneinfo`.
+ """
+
+ def __init__(self, module):
+ super(BSDTimezone, self).__init__(module)
+
+ def __get_timezone(self):
+ zoneinfo_dir = '/usr/share/zoneinfo/'
+ localtime_file = '/etc/localtime'
+
+ # Strategy 1:
+ # If /etc/localtime does not exist, assum the timezone is UTC.
+ if not os.path.exists(localtime_file):
+ self.module.warn('Could not read /etc/localtime. Assuming UTC.')
+ return 'UTC'
+
+ # Strategy 2:
+ # Follow symlink of /etc/localtime
+ zoneinfo_file = localtime_file
+ while not zoneinfo_file.startswith(zoneinfo_dir):
+ try:
+ zoneinfo_file = os.readlink(localtime_file)
+ except OSError:
+ # OSError means "end of symlink chain" or broken link.
+ break
+ else:
+ return zoneinfo_file.replace(zoneinfo_dir, '')
+
+ # Strategy 3:
+ # (If /etc/localtime is not symlinked)
+ # Check all files in /usr/share/zoneinfo and return first non-link match.
+ for dname, _, fnames in sorted(os.walk(zoneinfo_dir)):
+ for fname in sorted(fnames):
+ zoneinfo_file = os.path.join(dname, fname)
+ if not os.path.islink(zoneinfo_file) and filecmp.cmp(zoneinfo_file, localtime_file):
+ return zoneinfo_file.replace(zoneinfo_dir, '')
+
+ # Strategy 4:
+ # As a fall-back, return 'UTC' as default assumption.
+ self.module.warn('Could not identify timezone name from /etc/localtime. Assuming UTC.')
+ return 'UTC'
+
+ def get(self, key, phase):
+ """Lookup the current timezone by resolving `/etc/localtime`."""
+ if key == 'name':
+ return self.__get_timezone()
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+ def set(self, key, value):
+ if key == 'name':
+ # First determine if the requested timezone is valid by looking in
+ # the zoneinfo directory.
+ zonefile = '/usr/share/zoneinfo/' + value
+ try:
+ if not os.path.isfile(zonefile):
+ self.module.fail_json(msg='%s is not a recognized timezone' % value)
+ except Exception:
+ self.module.fail_json(msg='Failed to stat %s' % zonefile)
+
+ # Now (somewhat) atomically update the symlink by creating a new
+ # symlink and move it into place. Otherwise we have to remove the
+ # original symlink and create the new symlink, however that would
+ # create a race condition in case another process tries to read
+ # /etc/localtime between removal and creation.
+ suffix = "".join([random.choice(string.ascii_letters + string.digits) for x in range(0, 10)])
+ new_localtime = '/etc/localtime.' + suffix
+
+ try:
+ os.symlink(zonefile, new_localtime)
+ os.rename(new_localtime, '/etc/localtime')
+ except Exception:
+ os.remove(new_localtime)
+ self.module.fail_json(msg='Could not update /etc/localtime')
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+
+class AIXTimezone(Timezone):
+ """This is a Timezone manipulation class for AIX instances.
+
+ It uses the C(chtz) utility to set the timezone, and
+ inspects C(/etc/environment) to determine the current timezone.
+
+ While AIX time zones can be set using two formats (POSIX and
+ Olson) the prefered method is Olson.
+ See the following article for more information:
+ https://developer.ibm.com/articles/au-aix-posix/
+
+ NB: AIX needs to be rebooted in order for the change to be
+ activated.
+ """
+
+ def __init__(self, module):
+ super(AIXTimezone, self).__init__(module)
+ self.settimezone = self.module.get_bin_path('chtz', required=True)
+
+ def __get_timezone(self):
+ """ Return the current value of TZ= in /etc/environment """
+ try:
+ f = open('/etc/environment', 'r')
+ etcenvironment = f.read()
+ f.close()
+ except Exception:
+ self.module.fail_json(msg='Issue reading contents of /etc/environment')
+
+ match = re.search(r'^TZ=(.*)$', etcenvironment, re.MULTILINE)
+ if match:
+ return match.group(1)
+ else:
+ return None
+
+ def get(self, key, phase):
+ """Lookup the current timezone name in `/etc/environment`. If anything else
+ is requested, or if the TZ field is not set we fail.
+ """
+ if key == 'name':
+ return self.__get_timezone()
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+ def set(self, key, value):
+ """Set the requested timezone through chtz, an invalid timezone name
+ will be rejected and we have no further input validation to perform.
+ """
+ if key == 'name':
+ # chtz seems to always return 0 on AIX 7.2, even for invalid timezone values.
+ # It will only return non-zero if the chtz command itself fails, it does not check for
+ # valid timezones. We need to perform a basic check to confirm that the timezone
+ # definition exists in /usr/share/lib/zoneinfo
+ # This does mean that we can only support Olson for now. The below commented out regex
+ # detects Olson date formats, so in the future we could detect Posix or Olson and
+ # act accordingly.
+
+ # regex_olson = re.compile('^([a-z0-9_\-\+]+\/?)+$', re.IGNORECASE)
+ # if not regex_olson.match(value):
+ # msg = 'Supplied timezone (%s) does not appear to a be valid Olson string' % value
+ # self.module.fail_json(msg=msg)
+
+ # First determine if the requested timezone is valid by looking in the zoneinfo
+ # directory.
+ zonefile = '/usr/share/lib/zoneinfo/' + value
+ try:
+ if not os.path.isfile(zonefile):
+ self.module.fail_json(msg='%s is not a recognized timezone.' % value)
+ except Exception:
+ self.module.fail_json(msg='Failed to check %s.' % zonefile)
+
+ # Now set the TZ using chtz
+ cmd = 'chtz %s' % value
+ (rc, stdout, stderr) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.fail_json(msg=stderr)
+
+ # The best condition check we can do is to check the value of TZ after making the
+ # change.
+ TZ = self.__get_timezone()
+ if TZ != value:
+ msg = 'TZ value does not match post-change (Actual: %s, Expected: %s).' % (TZ, value)
+ self.module.fail_json(msg=msg)
+
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+
+def main():
+ # Construct 'module' and 'tz'
+ module = AnsibleModule(
+ argument_spec=dict(
+ hwclock=dict(type='str', choices=['local', 'UTC'], aliases=['rtc']),
+ name=dict(type='str'),
+ ),
+ required_one_of=[
+ ['hwclock', 'name']
+ ],
+ supports_check_mode=True,
+ )
+ tz = Timezone(module)
+
+ # Check the current state
+ tz.check(phase='before')
+ if module.check_mode:
+ diff = tz.diff('before', 'planned')
+ # In check mode, 'planned' state is treated as 'after' state
+ diff['after'] = diff.pop('planned')
+ else:
+ # Make change
+ tz.change()
+ # Check the current state
+ tz.check(phase='after')
+ # Examine if the current state matches planned state
+ (after, planned) = tz.diff('after', 'planned').values()
+ if after != planned:
+ tz.abort('still not desired state, though changes have made - '
+ 'planned: %s, after: %s' % (str(planned), str(after)))
+ diff = tz.diff('before', 'after')
+
+ changed = (diff['before'] != diff['after'])
+ if len(tz.msg) > 0:
+ module.exit_json(changed=changed, diff=diff, msg='\n'.join(tz.msg))
+ else:
+ module.exit_json(changed=changed, diff=diff)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/twilio.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/twilio.py
new file mode 100644
index 00000000..5ec995f4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/twilio.py
@@ -0,0 +1,173 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Matt Makai <matthew.makai@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: twilio
+short_description: Sends a text message to a mobile phone through Twilio.
+description:
+ - Sends a text message to a phone number through the Twilio messaging API.
+notes:
+ - This module is non-idempotent because it sends an email through the
+ external API. It is idempotent only in the case that the module fails.
+ - Like the other notification modules, this one requires an external
+ dependency to work. In this case, you'll need a Twilio account with
+ a purchased or verified phone number to send the text message.
+options:
+ account_sid:
+ type: str
+ description:
+ user's Twilio account token found on the account page
+ required: true
+ auth_token:
+ type: str
+ description: user's Twilio authentication token
+ required: true
+ msg:
+ type: str
+ description:
+ the body of the text message
+ required: true
+ to_numbers:
+ type: list
+ description:
+ one or more phone numbers to send the text message to,
+ format +15551112222
+ required: true
+ aliases: [ to_number ]
+ from_number:
+ type: str
+ description:
+ the Twilio number to send the text message from, format +15551112222
+ required: true
+ media_url:
+ type: str
+ description:
+ a URL with a picture, video or sound clip to send with an MMS
+ (multimedia message) instead of a plain SMS
+ required: false
+
+author: "Matt Makai (@makaimc)"
+'''
+
+EXAMPLES = '''
+# send an SMS about the build status to (555) 303 5681
+# note: replace account_sid and auth_token values with your credentials
+# and you have to have the 'from_number' on your Twilio account
+- name: Send a text message to a mobile phone through Twilio
+ community.general.twilio:
+ msg: All servers with webserver role are now configured.
+ account_sid: ACXXXXXXXXXXXXXXXXX
+ auth_token: ACXXXXXXXXXXXXXXXXX
+ from_number: +15552014545
+ to_number: +15553035681
+ delegate_to: localhost
+
+# send an SMS to multiple phone numbers about the deployment
+# note: replace account_sid and auth_token values with your credentials
+# and you have to have the 'from_number' on your Twilio account
+- name: Send a text message to a mobile phone through Twilio
+ community.general.twilio:
+ msg: This server configuration is now complete.
+ account_sid: ACXXXXXXXXXXXXXXXXX
+ auth_token: ACXXXXXXXXXXXXXXXXX
+ from_number: +15553258899
+ to_numbers:
+ - +15551113232
+ - +12025551235
+ - +19735559010
+ delegate_to: localhost
+
+# send an MMS to a single recipient with an update on the deployment
+# and an image of the results
+# note: replace account_sid and auth_token values with your credentials
+# and you have to have the 'from_number' on your Twilio account
+- name: Send a text message to a mobile phone through Twilio
+ community.general.twilio:
+ msg: Deployment complete!
+ account_sid: ACXXXXXXXXXXXXXXXXX
+ auth_token: ACXXXXXXXXXXXXXXXXX
+ from_number: +15552014545
+ to_number: +15553035681
+ media_url: https://demo.twilio.com/logo.png
+ delegate_to: localhost
+'''
+
+# =======================================
+# twilio module support methods
+#
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url
+
+
+def post_twilio_api(module, account_sid, auth_token, msg, from_number,
+ to_number, media_url=None):
+ URI = "https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json" \
+ % (account_sid,)
+ AGENT = "Ansible"
+
+ data = {'From': from_number, 'To': to_number, 'Body': msg}
+ if media_url:
+ data['MediaUrl'] = media_url
+ encoded_data = urlencode(data)
+
+ headers = {'User-Agent': AGENT,
+ 'Content-type': 'application/x-www-form-urlencoded',
+ 'Accept': 'application/json',
+ }
+
+ # Hack module params to have the Basic auth params that fetch_url expects
+ module.params['url_username'] = account_sid.replace('\n', '')
+ module.params['url_password'] = auth_token.replace('\n', '')
+
+ return fetch_url(module, URI, data=encoded_data, headers=headers)
+
+
+# =======================================
+# Main
+#
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ account_sid=dict(required=True),
+ auth_token=dict(required=True, no_log=True),
+ msg=dict(required=True),
+ from_number=dict(required=True),
+ to_numbers=dict(required=True, aliases=['to_number'], type='list'),
+ media_url=dict(default=None, required=False),
+ ),
+ supports_check_mode=True
+ )
+
+ account_sid = module.params['account_sid']
+ auth_token = module.params['auth_token']
+ msg = module.params['msg']
+ from_number = module.params['from_number']
+ to_numbers = module.params['to_numbers']
+ media_url = module.params['media_url']
+
+ for number in to_numbers:
+ r, info = post_twilio_api(module, account_sid, auth_token, msg,
+ from_number, number, media_url)
+ if info['status'] not in [200, 201]:
+ body_message = "unknown error"
+ if 'body' in info:
+ body = module.from_json(info['body'])
+ body_message = body['message']
+ module.fail_json(msg="unable to send message to %s: %s" % (number, body_message))
+
+ module.exit_json(msg=msg, changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/typetalk.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/typetalk.py
new file mode 100644
index 00000000..6f8e4e8b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/typetalk.py
@@ -0,0 +1,128 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: typetalk
+short_description: Send a message to typetalk
+description:
+ - Send a message to typetalk using typetalk API
+options:
+ client_id:
+ type: str
+ description:
+ - OAuth2 client ID
+ required: true
+ client_secret:
+ type: str
+ description:
+ - OAuth2 client secret
+ required: true
+ topic:
+ type: int
+ description:
+ - topic id to post message
+ required: true
+ msg:
+ type: str
+ description:
+ - message body
+ required: true
+requirements: [ json ]
+author: "Takashi Someda (@tksmd)"
+'''
+
+EXAMPLES = '''
+- name: Send a message to typetalk
+ community.general.typetalk:
+ client_id: 12345
+ client_secret: 12345
+ topic: 1
+ msg: install completed
+'''
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url, ConnectionError
+
+
+def do_request(module, url, params, headers=None):
+ data = urlencode(params)
+ if headers is None:
+ headers = dict()
+ headers = dict(headers, **{
+ 'User-Agent': 'Ansible/typetalk module',
+ })
+ r, info = fetch_url(module, url, data=data, headers=headers)
+ if info['status'] != 200:
+ exc = ConnectionError(info['msg'])
+ exc.code = info['status']
+ raise exc
+ return r
+
+
+def get_access_token(module, client_id, client_secret):
+ params = {
+ 'client_id': client_id,
+ 'client_secret': client_secret,
+ 'grant_type': 'client_credentials',
+ 'scope': 'topic.post'
+ }
+ res = do_request(module, 'https://typetalk.com/oauth2/access_token', params)
+ return json.load(res)['access_token']
+
+
+def send_message(module, client_id, client_secret, topic, msg):
+ """
+ send message to typetalk
+ """
+ try:
+ access_token = get_access_token(module, client_id, client_secret)
+ url = 'https://typetalk.com/api/v1/topics/%d' % topic
+ headers = {
+ 'Authorization': 'Bearer %s' % access_token,
+ }
+ do_request(module, url, {'message': msg}, headers)
+ return True, {'access_token': access_token}
+ except ConnectionError as e:
+ return False, e
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ client_id=dict(required=True),
+ client_secret=dict(required=True, no_log=True),
+ topic=dict(required=True, type='int'),
+ msg=dict(required=True),
+ ),
+ supports_check_mode=False
+ )
+
+ if not json:
+ module.fail_json(msg="json module is required")
+
+ client_id = module.params["client_id"]
+ client_secret = module.params["client_secret"]
+ topic = module.params["topic"]
+ msg = module.params["msg"]
+
+ res, error = send_message(module, client_id, client_secret, topic, msg)
+ if not res:
+ module.fail_json(msg='fail to send message with response code %s' % error.code)
+
+ module.exit_json(changed=True, topic=topic, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/udm_dns_record.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/udm_dns_record.py
new file mode 100644
index 00000000..db89bd46
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/udm_dns_record.py
@@ -0,0 +1,185 @@
+#!/usr/bin/python
+# -*- coding: UTF-8 -*-
+
+# Copyright: (c) 2016, Adfinis SyGroup AG
+# Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: udm_dns_record
+author:
+- Tobias Rüetschi (@keachi)
+short_description: Manage dns entries on a univention corporate server
+description:
+ - "This module allows to manage dns records on a univention corporate server (UCS).
+ It uses the python API of the UCS to create a new object or edit it."
+requirements:
+ - Python >= 2.6
+ - Univention
+options:
+ state:
+ required: false
+ default: "present"
+ choices: [ present, absent ]
+ description:
+ - Whether the dns record is present or not.
+ name:
+ required: true
+ description:
+ - "Name of the record, this is also the DNS record. E.g. www for
+ www.example.com."
+ zone:
+ required: true
+ description:
+ - Corresponding DNS zone for this record, e.g. example.com.
+ type:
+ required: true
+ description:
+ - "Define the record type. C(host_record) is a A or AAAA record,
+ C(alias) is a CNAME, C(ptr_record) is a PTR record, C(srv_record)
+ is a SRV record and C(txt_record) is a TXT record."
+ - "The available choices are: C(host_record), C(alias), C(ptr_record), C(srv_record), C(txt_record)."
+ data:
+ required: false
+ default: []
+ description:
+ - "Additional data for this record, e.g. ['a': '192.0.2.1'].
+ Required if C(state=present)."
+'''
+
+
+EXAMPLES = '''
+- name: Create a DNS record on a UCS
+ community.general.udm_dns_record:
+ name: www
+ zone: example.com
+ type: host_record
+ data:
+ a:
+ - 192.0.2.1
+ - 2001:0db8::42
+'''
+
+
+RETURN = '''#'''
+
+HAVE_UNIVENTION = False
+try:
+ from univention.admin.handlers.dns import (
+ forward_zone,
+ reverse_zone,
+ )
+ HAVE_UNIVENTION = True
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.univention_umc import (
+ umc_module_for_add,
+ umc_module_for_edit,
+ ldap_search,
+ base_dn,
+ config,
+ uldap,
+)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ type=dict(required=True,
+ type='str'),
+ zone=dict(required=True,
+ type='str'),
+ name=dict(required=True,
+ type='str'),
+ data=dict(default=[],
+ type='dict'),
+ state=dict(default='present',
+ choices=['present', 'absent'],
+ type='str')
+ ),
+ supports_check_mode=True,
+ required_if=([
+ ('state', 'present', ['data'])
+ ])
+ )
+
+ if not HAVE_UNIVENTION:
+ module.fail_json(msg="This module requires univention python bindings")
+
+ type = module.params['type']
+ zone = module.params['zone']
+ name = module.params['name']
+ data = module.params['data']
+ state = module.params['state']
+ changed = False
+ diff = None
+
+ obj = list(ldap_search(
+ '(&(objectClass=dNSZone)(zoneName={0})(relativeDomainName={1}))'.format(zone, name),
+ attr=['dNSZone']
+ ))
+
+ exists = bool(len(obj))
+ container = 'zoneName={0},cn=dns,{1}'.format(zone, base_dn())
+ dn = 'relativeDomainName={0},{1}'.format(name, container)
+
+ if state == 'present':
+ try:
+ if not exists:
+ so = forward_zone.lookup(
+ config(),
+ uldap(),
+ '(zone={0})'.format(zone),
+ scope='domain',
+ ) or reverse_zone.lookup(
+ config(),
+ uldap(),
+ '(zone={0})'.format(zone),
+ scope='domain',
+ )
+ obj = umc_module_for_add('dns/{0}'.format(type), container, superordinate=so[0])
+ else:
+ obj = umc_module_for_edit('dns/{0}'.format(type), dn)
+ obj['name'] = name
+ for k, v in data.items():
+ obj[k] = v
+ diff = obj.diff()
+ changed = obj.diff() != []
+ if not module.check_mode:
+ if not exists:
+ obj.create()
+ else:
+ obj.modify()
+ except Exception as e:
+ module.fail_json(
+ msg='Creating/editing dns entry {0} in {1} failed: {2}'.format(name, container, e)
+ )
+
+ if state == 'absent' and exists:
+ try:
+ obj = umc_module_for_edit('dns/{0}'.format(type), dn)
+ if not module.check_mode:
+ obj.remove()
+ changed = True
+ except Exception as e:
+ module.fail_json(
+ msg='Removing dns entry {0} in {1} failed: {2}'.format(name, container, e)
+ )
+
+ module.exit_json(
+ changed=changed,
+ name=name,
+ diff=diff,
+ container=container
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/udm_dns_zone.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/udm_dns_zone.py
new file mode 100644
index 00000000..2428650e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/udm_dns_zone.py
@@ -0,0 +1,231 @@
+#!/usr/bin/python
+# -*- coding: UTF-8 -*-
+
+# Copyright: (c) 2016, Adfinis SyGroup AG
+# Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: udm_dns_zone
+author:
+- Tobias Rüetschi (@keachi)
+short_description: Manage dns zones on a univention corporate server
+description:
+ - "This module allows to manage dns zones on a univention corporate server (UCS).
+ It uses the python API of the UCS to create a new object or edit it."
+requirements:
+ - Python >= 2.6
+options:
+ state:
+ required: false
+ default: "present"
+ choices: [ present, absent ]
+ description:
+ - Whether the dns zone is present or not.
+ type:
+ required: true
+ description:
+ - Define if the zone is a forward or reverse DNS zone.
+ - "The available choices are: C(forward_zone), C(reverse_zone)."
+ zone:
+ required: true
+ description:
+ - DNS zone name, e.g. C(example.com).
+ nameserver:
+ required: false
+ description:
+ - List of appropriate name servers. Required if C(state=present).
+ interfaces:
+ required: false
+ description:
+ - List of interface IP addresses, on which the server should
+ response this zone. Required if C(state=present).
+
+ refresh:
+ required: false
+ default: 3600
+ description:
+ - Interval before the zone should be refreshed.
+ retry:
+ required: false
+ default: 1800
+ description:
+ - Interval that should elapse before a failed refresh should be retried.
+ expire:
+ required: false
+ default: 604800
+ description:
+ - Specifies the upper limit on the time interval that can elapse before the zone is no longer authoritative.
+ ttl:
+ required: false
+ default: 600
+ description:
+ - Minimum TTL field that should be exported with any RR from this zone.
+
+ contact:
+ required: false
+ default: ''
+ description:
+ - Contact person in the SOA record.
+ mx:
+ required: false
+ default: []
+ description:
+ - List of MX servers. (Must declared as A or AAAA records).
+'''
+
+
+EXAMPLES = '''
+- name: Create a DNS zone on a UCS
+ community.general.udm_dns_zone:
+ zone: example.com
+ type: forward_zone
+ nameserver:
+ - ucs.example.com
+ interfaces:
+ - 192.0.2.1
+'''
+
+
+RETURN = '''# '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.univention_umc import (
+ umc_module_for_add,
+ umc_module_for_edit,
+ ldap_search,
+ base_dn,
+)
+
+
+def convert_time(time):
+ """Convert a time in seconds into the biggest unit"""
+ units = [
+ (24 * 60 * 60, 'days'),
+ (60 * 60, 'hours'),
+ (60, 'minutes'),
+ (1, 'seconds'),
+ ]
+
+ if time == 0:
+ return ('0', 'seconds')
+ for unit in units:
+ if time >= unit[0]:
+ return ('{0}'.format(time // unit[0]), unit[1])
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ type=dict(required=True,
+ type='str'),
+ zone=dict(required=True,
+ aliases=['name'],
+ type='str'),
+ nameserver=dict(default=[],
+ type='list'),
+ interfaces=dict(default=[],
+ type='list'),
+ refresh=dict(default=3600,
+ type='int'),
+ retry=dict(default=1800,
+ type='int'),
+ expire=dict(default=604800,
+ type='int'),
+ ttl=dict(default=600,
+ type='int'),
+ contact=dict(default='',
+ type='str'),
+ mx=dict(default=[],
+ type='list'),
+ state=dict(default='present',
+ choices=['present', 'absent'],
+ type='str')
+ ),
+ supports_check_mode=True,
+ required_if=([
+ ('state', 'present', ['nameserver', 'interfaces'])
+ ])
+ )
+ type = module.params['type']
+ zone = module.params['zone']
+ nameserver = module.params['nameserver']
+ interfaces = module.params['interfaces']
+ refresh = module.params['refresh']
+ retry = module.params['retry']
+ expire = module.params['expire']
+ ttl = module.params['ttl']
+ contact = module.params['contact']
+ mx = module.params['mx']
+ state = module.params['state']
+ changed = False
+ diff = None
+
+ obj = list(ldap_search(
+ '(&(objectClass=dNSZone)(zoneName={0}))'.format(zone),
+ attr=['dNSZone']
+ ))
+
+ exists = bool(len(obj))
+ container = 'cn=dns,{0}'.format(base_dn())
+ dn = 'zoneName={0},{1}'.format(zone, container)
+ if contact == '':
+ contact = 'root@{0}.'.format(zone)
+
+ if state == 'present':
+ try:
+ if not exists:
+ obj = umc_module_for_add('dns/{0}'.format(type), container)
+ else:
+ obj = umc_module_for_edit('dns/{0}'.format(type), dn)
+ obj['zone'] = zone
+ obj['nameserver'] = nameserver
+ obj['a'] = interfaces
+ obj['refresh'] = convert_time(refresh)
+ obj['retry'] = convert_time(retry)
+ obj['expire'] = convert_time(expire)
+ obj['ttl'] = convert_time(ttl)
+ obj['contact'] = contact
+ obj['mx'] = mx
+ diff = obj.diff()
+ if exists:
+ for k in obj.keys():
+ if obj.hasChanged(k):
+ changed = True
+ else:
+ changed = True
+ if not module.check_mode:
+ if not exists:
+ obj.create()
+ elif changed:
+ obj.modify()
+ except Exception as e:
+ module.fail_json(
+ msg='Creating/editing dns zone {0} failed: {1}'.format(zone, e)
+ )
+
+ if state == 'absent' and exists:
+ try:
+ obj = umc_module_for_edit('dns/{0}'.format(type), dn)
+ if not module.check_mode:
+ obj.remove()
+ changed = True
+ except Exception as e:
+ module.fail_json(
+ msg='Removing dns zone {0} failed: {1}'.format(zone, e)
+ )
+
+ module.exit_json(
+ changed=changed,
+ diff=diff,
+ zone=zone
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/udm_group.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/udm_group.py
new file mode 100644
index 00000000..d2cf2aea
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/udm_group.py
@@ -0,0 +1,177 @@
+#!/usr/bin/python
+# -*- coding: UTF-8 -*-
+
+# Copyright: (c) 2016, Adfinis SyGroup AG
+# Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: udm_group
+author:
+- Tobias Rüetschi (@keachi)
+short_description: Manage of the posix group
+description:
+ - "This module allows to manage user groups on a univention corporate server (UCS).
+ It uses the python API of the UCS to create a new object or edit it."
+requirements:
+ - Python >= 2.6
+options:
+ state:
+ required: false
+ default: "present"
+ choices: [ present, absent ]
+ description:
+ - Whether the group is present or not.
+ type: str
+ name:
+ required: true
+ description:
+ - Name of the posix group.
+ type: str
+ description:
+ required: false
+ description:
+ - Group description.
+ type: str
+ position:
+ required: false
+ description:
+ - define the whole ldap position of the group, e.g.
+ C(cn=g123m-1A,cn=classes,cn=schueler,cn=groups,ou=schule,dc=example,dc=com).
+ type: str
+ ou:
+ required: false
+ description:
+ - LDAP OU, e.g. school for LDAP OU C(ou=school,dc=example,dc=com).
+ type: str
+ subpath:
+ required: false
+ description:
+ - Subpath inside the OU, e.g. C(cn=classes,cn=students,cn=groups).
+ type: str
+ default: "cn=groups"
+'''
+
+
+EXAMPLES = '''
+- name: Create a POSIX group
+ community.general.udm_group:
+ name: g123m-1A
+
+# Create a POSIX group with the exact DN
+# C(cn=g123m-1A,cn=classes,cn=students,cn=groups,ou=school,dc=school,dc=example,dc=com)
+- name: Create a POSIX group with a DN
+ community.general.udm_group:
+ name: g123m-1A
+ subpath: 'cn=classes,cn=students,cn=groups'
+ ou: school
+
+# or
+- name: Create a POSIX group with a DN
+ community.general.udm_group:
+ name: g123m-1A
+ position: 'cn=classes,cn=students,cn=groups,ou=school,dc=school,dc=example,dc=com'
+'''
+
+
+RETURN = '''# '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.univention_umc import (
+ umc_module_for_add,
+ umc_module_for_edit,
+ ldap_search,
+ base_dn,
+)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True,
+ type='str'),
+ description=dict(default=None,
+ type='str'),
+ position=dict(default='',
+ type='str'),
+ ou=dict(default='',
+ type='str'),
+ subpath=dict(default='cn=groups',
+ type='str'),
+ state=dict(default='present',
+ choices=['present', 'absent'],
+ type='str')
+ ),
+ supports_check_mode=True
+ )
+ name = module.params['name']
+ description = module.params['description']
+ position = module.params['position']
+ ou = module.params['ou']
+ subpath = module.params['subpath']
+ state = module.params['state']
+ changed = False
+ diff = None
+
+ groups = list(ldap_search(
+ '(&(objectClass=posixGroup)(cn={0}))'.format(name),
+ attr=['cn']
+ ))
+ if position != '':
+ container = position
+ else:
+ if ou != '':
+ ou = 'ou={0},'.format(ou)
+ if subpath != '':
+ subpath = '{0},'.format(subpath)
+ container = '{0}{1}{2}'.format(subpath, ou, base_dn())
+ group_dn = 'cn={0},{1}'.format(name, container)
+
+ exists = bool(len(groups))
+
+ if state == 'present':
+ try:
+ if not exists:
+ grp = umc_module_for_add('groups/group', container)
+ else:
+ grp = umc_module_for_edit('groups/group', group_dn)
+ grp['name'] = name
+ grp['description'] = description
+ diff = grp.diff()
+ changed = grp.diff() != []
+ if not module.check_mode:
+ if not exists:
+ grp.create()
+ else:
+ grp.modify()
+ except Exception:
+ module.fail_json(
+ msg="Creating/editing group {0} in {1} failed".format(name, container)
+ )
+
+ if state == 'absent' and exists:
+ try:
+ grp = umc_module_for_edit('groups/group', group_dn)
+ if not module.check_mode:
+ grp.remove()
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Removing group {0} failed".format(name)
+ )
+
+ module.exit_json(
+ changed=changed,
+ name=name,
+ diff=diff,
+ container=container
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/udm_share.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/udm_share.py
new file mode 100644
index 00000000..3e8fb207
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/udm_share.py
@@ -0,0 +1,576 @@
+#!/usr/bin/python
+# -*- coding: UTF-8 -*-
+
+# Copyright: (c) 2016, Adfinis SyGroup AG
+# Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: udm_share
+author:
+- Tobias Rüetschi (@keachi)
+short_description: Manage samba shares on a univention corporate server
+description:
+ - "This module allows to manage samba shares on a univention corporate
+ server (UCS).
+ It uses the python API of the UCS to create a new object or edit it."
+requirements:
+ - Python >= 2.6
+options:
+ state:
+ default: "present"
+ choices: [ present, absent ]
+ description:
+ - Whether the share is present or not.
+ type: str
+ name:
+ required: true
+ description:
+ - Name
+ type: str
+ host:
+ required: false
+ description:
+ - Host FQDN (server which provides the share), e.g. C({{
+ ansible_fqdn }}). Required if C(state=present).
+ type: str
+ path:
+ required: false
+ description:
+ - Directory on the providing server, e.g. C(/home). Required if C(state=present).
+ type: path
+ sambaName:
+ required: false
+ description:
+ - Windows name. Required if C(state=present).
+ type: str
+ aliases: [ samba_name ]
+ ou:
+ required: true
+ description:
+ - Organisational unit, inside the LDAP Base DN.
+ type: str
+ owner:
+ default: '0'
+ description:
+ - Directory owner of the share's root directory.
+ type: str
+ group:
+ default: '0'
+ description:
+ - Directory owner group of the share's root directory.
+ type: str
+ directorymode:
+ default: '00755'
+ description:
+ - Permissions for the share's root directory.
+ type: str
+ root_squash:
+ default: true
+ description:
+ - Modify user ID for root user (root squashing).
+ type: bool
+ subtree_checking:
+ default: true
+ description:
+ - Subtree checking.
+ type: bool
+ sync:
+ default: 'sync'
+ description:
+ - NFS synchronisation.
+ type: str
+ writeable:
+ default: true
+ description:
+ - NFS write access.
+ type: bool
+ sambaBlockSize:
+ description:
+ - Blocking size.
+ type: str
+ aliases: [ samba_block_size ]
+ sambaBlockingLocks:
+ default: true
+ description:
+ - Blocking locks.
+ type: bool
+ aliases: [ samba_blocking_locks ]
+ sambaBrowseable:
+ description:
+ - Show in Windows network environment.
+ type: bool
+ default: True
+ aliases: [ samba_browsable ]
+ sambaCreateMode:
+ default: '0744'
+ description:
+ - File mode.
+ type: str
+ aliases: [ samba_create_mode ]
+ sambaCscPolicy:
+ default: 'manual'
+ description:
+ - Client-side caching policy.
+ type: str
+ aliases: [ samba_csc_policy ]
+ sambaCustomSettings:
+ default: []
+ description:
+ - Option name in smb.conf and its value.
+ type: list
+ aliases: [ samba_custom_settings ]
+ sambaDirectoryMode:
+ default: '0755'
+ description:
+ - Directory mode.
+ type: str
+ aliases: [ samba_directory_mode ]
+ sambaDirectorySecurityMode:
+ default: '0777'
+ description:
+ - Directory security mode.
+ type: str
+ aliases: [ samba_directory_security_mode ]
+ sambaDosFilemode:
+ default: false
+ description:
+ - Users with write access may modify permissions.
+ type: bool
+ aliases: [ samba_dos_filemode ]
+ sambaFakeOplocks:
+ default: false
+ description:
+ - Fake oplocks.
+ type: bool
+ aliases: [ samba_fake_oplocks ]
+ sambaForceCreateMode:
+ default: false
+ description:
+ - Force file mode.
+ type: bool
+ aliases: [ samba_force_create_mode ]
+ sambaForceDirectoryMode:
+ default: false
+ description:
+ - Force directory mode.
+ type: bool
+ aliases: [ samba_force_directory_mode ]
+ sambaForceDirectorySecurityMode:
+ default: false
+ description:
+ - Force directory security mode.
+ type: bool
+ aliases: [ samba_force_directory_security_mode ]
+ sambaForceGroup:
+ description:
+ - Force group.
+ type: str
+ aliases: [ samba_force_group ]
+ sambaForceSecurityMode:
+ default: false
+ description:
+ - Force security mode.
+ type: bool
+ aliases: [ samba_force_security_mode ]
+ sambaForceUser:
+ description:
+ - Force user.
+ type: str
+ aliases: [ samba_force_user ]
+ sambaHideFiles:
+ description:
+ - Hide files.
+ type: str
+ aliases: [ samba_hide_files ]
+ sambaHideUnreadable:
+ default: false
+ description:
+ - Hide unreadable files/directories.
+ type: bool
+ aliases: [ samba_hide_unreadable ]
+ sambaHostsAllow:
+ default: []
+ description:
+ - Allowed host/network.
+ type: list
+ aliases: [ samba_hosts_allow ]
+ sambaHostsDeny:
+ default: []
+ description:
+ - Denied host/network.
+ type: list
+ aliases: [ samba_hosts_deny ]
+ sambaInheritAcls:
+ default: true
+ description:
+ - Inherit ACLs.
+ type: bool
+ aliases: [ samba_inherit_acls ]
+ sambaInheritOwner:
+ default: false
+ description:
+ - Create files/directories with the owner of the parent directory.
+ type: bool
+ aliases: [ samba_inherit_owner ]
+ sambaInheritPermissions:
+ default: false
+ description:
+ - Create files/directories with permissions of the parent directory.
+ type: bool
+ aliases: [ samba_inherit_permissions ]
+ sambaInvalidUsers:
+ description:
+ - Invalid users or groups.
+ type: str
+ aliases: [ samba_invalid_users ]
+ sambaLevel2Oplocks:
+ default: true
+ description:
+ - Level 2 oplocks.
+ type: bool
+ aliases: [ samba_level_2_oplocks ]
+ sambaLocking:
+ default: true
+ description:
+ - Locking.
+ type: bool
+ aliases: [ samba_locking ]
+ sambaMSDFSRoot:
+ default: false
+ description:
+ - MSDFS root.
+ type: bool
+ aliases: [ samba_msdfs_root ]
+ sambaNtAclSupport:
+ default: true
+ description:
+ - NT ACL support.
+ type: bool
+ aliases: [ samba_nt_acl_support ]
+ sambaOplocks:
+ default: true
+ description:
+ - Oplocks.
+ type: bool
+ aliases: [ samba_oplocks ]
+ sambaPostexec:
+ description:
+ - Postexec script.
+ type: str
+ aliases: [ samba_postexec ]
+ sambaPreexec:
+ description:
+ - Preexec script.
+ type: str
+ aliases: [ samba_preexec ]
+ sambaPublic:
+ default: false
+ description:
+ - Allow anonymous read-only access with a guest user.
+ type: bool
+ aliases: [ samba_public ]
+ sambaSecurityMode:
+ default: '0777'
+ description:
+ - Security mode.
+ type: str
+ aliases: [ samba_security_mode ]
+ sambaStrictLocking:
+ default: 'Auto'
+ description:
+ - Strict locking.
+ type: str
+ aliases: [ samba_strict_locking ]
+ sambaVFSObjects:
+ description:
+ - VFS objects.
+ type: str
+ aliases: [ samba_vfs_objects ]
+ sambaValidUsers:
+ description:
+ - Valid users or groups.
+ type: str
+ aliases: [ samba_valid_users ]
+ sambaWriteList:
+ description:
+ - Restrict write access to these users/groups.
+ type: str
+ aliases: [ samba_write_list ]
+ sambaWriteable:
+ default: true
+ description:
+ - Samba write access.
+ type: bool
+ aliases: [ samba_writeable ]
+ nfs_hosts:
+ default: []
+ description:
+ - Only allow access for this host, IP address or network.
+ type: list
+ nfsCustomSettings:
+ default: []
+ description:
+ - Option name in exports file.
+ type: list
+ aliases: [ nfs_custom_settings ]
+'''
+
+
+EXAMPLES = '''
+- name: Create a share named home on the server ucs.example.com with the path /home
+ community.general.udm_share:
+ name: home
+ path: /home
+ host: ucs.example.com
+ sambaName: Home
+'''
+
+
+RETURN = '''# '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.univention_umc import (
+ umc_module_for_add,
+ umc_module_for_edit,
+ ldap_search,
+ base_dn,
+)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True,
+ type='str'),
+ ou=dict(required=True,
+ type='str'),
+ owner=dict(type='str',
+ default='0'),
+ group=dict(type='str',
+ default='0'),
+ path=dict(type='path',
+ default=None),
+ directorymode=dict(type='str',
+ default='00755'),
+ host=dict(type='str',
+ default=None),
+ root_squash=dict(type='bool',
+ default=True),
+ subtree_checking=dict(type='bool',
+ default=True),
+ sync=dict(type='str',
+ default='sync'),
+ writeable=dict(type='bool',
+ default=True),
+ sambaBlockSize=dict(type='str',
+ aliases=['samba_block_size'],
+ default=None),
+ sambaBlockingLocks=dict(type='bool',
+ aliases=['samba_blocking_locks'],
+ default=True),
+ sambaBrowseable=dict(type='bool',
+ aliases=['samba_browsable'],
+ default=True),
+ sambaCreateMode=dict(type='str',
+ aliases=['samba_create_mode'],
+ default='0744'),
+ sambaCscPolicy=dict(type='str',
+ aliases=['samba_csc_policy'],
+ default='manual'),
+ sambaCustomSettings=dict(type='list',
+ aliases=['samba_custom_settings'],
+ default=[]),
+ sambaDirectoryMode=dict(type='str',
+ aliases=['samba_directory_mode'],
+ default='0755'),
+ sambaDirectorySecurityMode=dict(type='str',
+ aliases=['samba_directory_security_mode'],
+ default='0777'),
+ sambaDosFilemode=dict(type='bool',
+ aliases=['samba_dos_filemode'],
+ default=False),
+ sambaFakeOplocks=dict(type='bool',
+ aliases=['samba_fake_oplocks'],
+ default=False),
+ sambaForceCreateMode=dict(type='bool',
+ aliases=['samba_force_create_mode'],
+ default=False),
+ sambaForceDirectoryMode=dict(type='bool',
+ aliases=['samba_force_directory_mode'],
+ default=False),
+ sambaForceDirectorySecurityMode=dict(type='bool',
+ aliases=['samba_force_directory_security_mode'],
+ default=False),
+ sambaForceGroup=dict(type='str',
+ aliases=['samba_force_group'],
+ default=None),
+ sambaForceSecurityMode=dict(type='bool',
+ aliases=['samba_force_security_mode'],
+ default=False),
+ sambaForceUser=dict(type='str',
+ aliases=['samba_force_user'],
+ default=None),
+ sambaHideFiles=dict(type='str',
+ aliases=['samba_hide_files'],
+ default=None),
+ sambaHideUnreadable=dict(type='bool',
+ aliases=['samba_hide_unreadable'],
+ default=False),
+ sambaHostsAllow=dict(type='list',
+ aliases=['samba_hosts_allow'],
+ default=[]),
+ sambaHostsDeny=dict(type='list',
+ aliases=['samba_hosts_deny'],
+ default=[]),
+ sambaInheritAcls=dict(type='bool',
+ aliases=['samba_inherit_acls'],
+ default=True),
+ sambaInheritOwner=dict(type='bool',
+ aliases=['samba_inherit_owner'],
+ default=False),
+ sambaInheritPermissions=dict(type='bool',
+ aliases=['samba_inherit_permissions'],
+ default=False),
+ sambaInvalidUsers=dict(type='str',
+ aliases=['samba_invalid_users'],
+ default=None),
+ sambaLevel2Oplocks=dict(type='bool',
+ aliases=['samba_level_2_oplocks'],
+ default=True),
+ sambaLocking=dict(type='bool',
+ aliases=['samba_locking'],
+ default=True),
+ sambaMSDFSRoot=dict(type='bool',
+ aliases=['samba_msdfs_root'],
+ default=False),
+ sambaName=dict(type='str',
+ aliases=['samba_name'],
+ default=None),
+ sambaNtAclSupport=dict(type='bool',
+ aliases=['samba_nt_acl_support'],
+ default=True),
+ sambaOplocks=dict(type='bool',
+ aliases=['samba_oplocks'],
+ default=True),
+ sambaPostexec=dict(type='str',
+ aliases=['samba_postexec'],
+ default=None),
+ sambaPreexec=dict(type='str',
+ aliases=['samba_preexec'],
+ default=None),
+ sambaPublic=dict(type='bool',
+ aliases=['samba_public'],
+ default=False),
+ sambaSecurityMode=dict(type='str',
+ aliases=['samba_security_mode'],
+ default='0777'),
+ sambaStrictLocking=dict(type='str',
+ aliases=['samba_strict_locking'],
+ default='Auto'),
+ sambaVFSObjects=dict(type='str',
+ aliases=['samba_vfs_objects'],
+ default=None),
+ sambaValidUsers=dict(type='str',
+ aliases=['samba_valid_users'],
+ default=None),
+ sambaWriteList=dict(type='str',
+ aliases=['samba_write_list'],
+ default=None),
+ sambaWriteable=dict(type='bool',
+ aliases=['samba_writeable'],
+ default=True),
+ nfs_hosts=dict(type='list',
+ default=[]),
+ nfsCustomSettings=dict(type='list',
+ aliases=['nfs_custom_settings'],
+ default=[]),
+ state=dict(default='present',
+ choices=['present', 'absent'],
+ type='str')
+ ),
+ supports_check_mode=True,
+ required_if=([
+ ('state', 'present', ['path', 'host', 'sambaName'])
+ ])
+ )
+ name = module.params['name']
+ state = module.params['state']
+ changed = False
+ diff = None
+
+ obj = list(ldap_search(
+ '(&(objectClass=univentionShare)(cn={0}))'.format(name),
+ attr=['cn']
+ ))
+
+ exists = bool(len(obj))
+ container = 'cn=shares,ou={0},{1}'.format(module.params['ou'], base_dn())
+ dn = 'cn={0},{1}'.format(name, container)
+
+ if state == 'present':
+ try:
+ if not exists:
+ obj = umc_module_for_add('shares/share', container)
+ else:
+ obj = umc_module_for_edit('shares/share', dn)
+
+ module.params['printablename'] = '{0} ({1})'.format(name, module.params['host'])
+ for k in obj.keys():
+ if module.params[k] is True:
+ module.params[k] = '1'
+ elif module.params[k] is False:
+ module.params[k] = '0'
+ obj[k] = module.params[k]
+
+ diff = obj.diff()
+ if exists:
+ for k in obj.keys():
+ if obj.hasChanged(k):
+ changed = True
+ else:
+ changed = True
+ if not module.check_mode:
+ if not exists:
+ obj.create()
+ elif changed:
+ obj.modify()
+ except Exception as err:
+ module.fail_json(
+ msg='Creating/editing share {0} in {1} failed: {2}'.format(
+ name,
+ container,
+ err,
+ )
+ )
+
+ if state == 'absent' and exists:
+ try:
+ obj = umc_module_for_edit('shares/share', dn)
+ if not module.check_mode:
+ obj.remove()
+ changed = True
+ except Exception as err:
+ module.fail_json(
+ msg='Removing share {0} in {1} failed: {2}'.format(
+ name,
+ container,
+ err,
+ )
+ )
+
+ module.exit_json(
+ changed=changed,
+ name=name,
+ diff=diff,
+ container=container
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/udm_user.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/udm_user.py
new file mode 100644
index 00000000..efbd95f4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/udm_user.py
@@ -0,0 +1,542 @@
+#!/usr/bin/python
+# -*- coding: UTF-8 -*-
+
+# Copyright: (c) 2016, Adfinis SyGroup AG
+# Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: udm_user
+author:
+- Tobias Rüetschi (@keachi)
+short_description: Manage posix users on a univention corporate server
+description:
+ - "This module allows to manage posix users on a univention corporate
+ server (UCS).
+ It uses the python API of the UCS to create a new object or edit it."
+requirements:
+ - Python >= 2.6
+options:
+ state:
+ default: "present"
+ choices: [ present, absent ]
+ description:
+ - Whether the user is present or not.
+ type: str
+ username:
+ required: true
+ description:
+ - User name
+ aliases: ['name']
+ type: str
+ firstname:
+ description:
+ - First name. Required if C(state=present).
+ type: str
+ lastname:
+ description:
+ - Last name. Required if C(state=present).
+ type: str
+ password:
+ description:
+ - Password. Required if C(state=present).
+ type: str
+ birthday:
+ description:
+ - Birthday
+ type: str
+ city:
+ description:
+ - City of users business address.
+ type: str
+ country:
+ description:
+ - Country of users business address.
+ type: str
+ department_number:
+ description:
+ - Department number of users business address.
+ aliases: [ departmentNumber ]
+ type: str
+ description:
+ description:
+ - Description (not gecos)
+ type: str
+ display_name:
+ description:
+ - Display name (not gecos)
+ aliases: [ displayName ]
+ type: str
+ email:
+ default: ['']
+ description:
+ - A list of e-mail addresses.
+ type: list
+ employee_number:
+ description:
+ - Employee number
+ aliases: [ employeeNumber ]
+ type: str
+ employee_type:
+ description:
+ - Employee type
+ aliases: [ employeeType ]
+ type: str
+ gecos:
+ description:
+ - GECOS
+ type: str
+ groups:
+ default: []
+ description:
+ - "POSIX groups, the LDAP DNs of the groups will be found with the
+ LDAP filter for each group as $GROUP:
+ C((&(objectClass=posixGroup)(cn=$GROUP)))."
+ type: list
+ home_share:
+ description:
+ - "Home NFS share. Must be a LDAP DN, e.g.
+ C(cn=home,cn=shares,ou=school,dc=example,dc=com)."
+ aliases: [ homeShare ]
+ type: str
+ home_share_path:
+ description:
+ - Path to home NFS share, inside the homeShare.
+ aliases: [ homeSharePath ]
+ type: str
+ home_telephone_number:
+ default: []
+ description:
+ - List of private telephone numbers.
+ aliases: [ homeTelephoneNumber ]
+ type: list
+ homedrive:
+ description:
+ - Windows home drive, e.g. C("H:").
+ type: str
+ mail_alternative_address:
+ default: []
+ description:
+ - List of alternative e-mail addresses.
+ aliases: [ mailAlternativeAddress ]
+ type: list
+ mail_home_server:
+ description:
+ - FQDN of mail server
+ aliases: [ mailHomeServer ]
+ type: str
+ mail_primary_address:
+ description:
+ - Primary e-mail address
+ aliases: [ mailPrimaryAddress ]
+ type: str
+ mobile_telephone_number:
+ default: []
+ description:
+ - Mobile phone number
+ aliases: [ mobileTelephoneNumber ]
+ type: list
+ organisation:
+ description:
+ - Organisation
+ aliases: [ organization ]
+ type: str
+ overridePWHistory:
+ type: bool
+ default: 'no'
+ description:
+ - Override password history
+ aliases: [ override_pw_history ]
+ overridePWLength:
+ type: bool
+ default: 'no'
+ description:
+ - Override password check
+ aliases: [ override_pw_length ]
+ pager_telephonenumber:
+ default: []
+ description:
+ - List of pager telephone numbers.
+ aliases: [ pagerTelephonenumber ]
+ type: list
+ phone:
+ description:
+ - List of telephone numbers.
+ type: list
+ postcode:
+ description:
+ - Postal code of users business address.
+ type: str
+ primary_group:
+ description:
+ - Primary group. This must be the group LDAP DN.
+ - If not specified, it defaults to C(cn=Domain Users,cn=groups,$LDAP_BASE_DN).
+ aliases: [ primaryGroup ]
+ type: str
+ profilepath:
+ description:
+ - Windows profile directory
+ type: str
+ pwd_change_next_login:
+ choices: [ '0', '1' ]
+ description:
+ - Change password on next login.
+ aliases: [ pwdChangeNextLogin ]
+ type: str
+ room_number:
+ description:
+ - Room number of users business address.
+ aliases: [ roomNumber ]
+ type: str
+ samba_privileges:
+ description:
+ - "Samba privilege, like allow printer administration, do domain
+ join."
+ aliases: [ sambaPrivileges ]
+ type: list
+ samba_user_workstations:
+ description:
+ - Allow the authentication only on this Microsoft Windows host.
+ aliases: [ sambaUserWorkstations ]
+ type: list
+ sambahome:
+ description:
+ - Windows home path, e.g. C('\\$FQDN\$USERNAME').
+ type: str
+ scriptpath:
+ description:
+ - Windows logon script.
+ type: str
+ secretary:
+ default: []
+ description:
+ - A list of superiors as LDAP DNs.
+ type: list
+ serviceprovider:
+ default: ['']
+ description:
+ - Enable user for the following service providers.
+ type: list
+ shell:
+ default: '/bin/bash'
+ description:
+ - Login shell
+ type: str
+ street:
+ description:
+ - Street of users business address.
+ type: str
+ title:
+ description:
+ - Title, e.g. C(Prof.).
+ type: str
+ unixhome:
+ description:
+ - Unix home directory
+ - If not specified, it defaults to C(/home/$USERNAME).
+ type: str
+ userexpiry:
+ description:
+ - Account expiry date, e.g. C(1999-12-31).
+ - If not specified, it defaults to the current day plus one year.
+ type: str
+ position:
+ default: ''
+ description:
+ - "Define the whole position of users object inside the LDAP tree,
+ e.g. C(cn=employee,cn=users,ou=school,dc=example,dc=com)."
+ type: str
+ update_password:
+ default: always
+ choices: [ always, on_create ]
+ description:
+ - "C(always) will update passwords if they differ.
+ C(on_create) will only set the password for newly created users."
+ type: str
+ ou:
+ default: ''
+ description:
+ - "Organizational Unit inside the LDAP Base DN, e.g. C(school) for
+ LDAP OU C(ou=school,dc=example,dc=com)."
+ type: str
+ subpath:
+ default: 'cn=users'
+ description:
+ - "LDAP subpath inside the organizational unit, e.g.
+ C(cn=teachers,cn=users) for LDAP container
+ C(cn=teachers,cn=users,dc=example,dc=com)."
+ type: str
+'''
+
+
+EXAMPLES = '''
+- name: Create a user on a UCS
+ community.general.udm_user:
+ name: FooBar
+ password: secure_password
+ firstname: Foo
+ lastname: Bar
+
+- name: Create a user with the DN C(uid=foo,cn=teachers,cn=users,ou=school,dc=school,dc=example,dc=com)
+ community.general.udm_user:
+ name: foo
+ password: secure_password
+ firstname: Foo
+ lastname: Bar
+ ou: school
+ subpath: 'cn=teachers,cn=users'
+
+# or define the position
+- name: Create a user with the DN C(uid=foo,cn=teachers,cn=users,ou=school,dc=school,dc=example,dc=com)
+ community.general.udm_user:
+ name: foo
+ password: secure_password
+ firstname: Foo
+ lastname: Bar
+ position: 'cn=teachers,cn=users,ou=school,dc=school,dc=example,dc=com'
+'''
+
+
+RETURN = '''# '''
+
+import crypt
+from datetime import date, timedelta
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.univention_umc import (
+ umc_module_for_add,
+ umc_module_for_edit,
+ ldap_search,
+ base_dn,
+)
+
+
+def main():
+ expiry = date.strftime(date.today() + timedelta(days=365), "%Y-%m-%d")
+ module = AnsibleModule(
+ argument_spec=dict(
+ birthday=dict(type='str'),
+ city=dict(type='str'),
+ country=dict(type='str'),
+ department_number=dict(type='str',
+ aliases=['departmentNumber']),
+ description=dict(type='str'),
+ display_name=dict(type='str',
+ aliases=['displayName']),
+ email=dict(default=[''],
+ type='list'),
+ employee_number=dict(type='str',
+ aliases=['employeeNumber']),
+ employee_type=dict(type='str',
+ aliases=['employeeType']),
+ firstname=dict(type='str'),
+ gecos=dict(type='str'),
+ groups=dict(default=[],
+ type='list'),
+ home_share=dict(type='str',
+ aliases=['homeShare']),
+ home_share_path=dict(type='str',
+ aliases=['homeSharePath']),
+ home_telephone_number=dict(default=[],
+ type='list',
+ aliases=['homeTelephoneNumber']),
+ homedrive=dict(type='str'),
+ lastname=dict(type='str'),
+ mail_alternative_address=dict(default=[],
+ type='list',
+ aliases=['mailAlternativeAddress']),
+ mail_home_server=dict(type='str',
+ aliases=['mailHomeServer']),
+ mail_primary_address=dict(type='str',
+ aliases=['mailPrimaryAddress']),
+ mobile_telephone_number=dict(default=[],
+ type='list',
+ aliases=['mobileTelephoneNumber']),
+ organisation=dict(type='str',
+ aliases=['organization']),
+ overridePWHistory=dict(default=False,
+ type='bool',
+ aliases=['override_pw_history']),
+ overridePWLength=dict(default=False,
+ type='bool',
+ aliases=['override_pw_length']),
+ pager_telephonenumber=dict(default=[],
+ type='list',
+ aliases=['pagerTelephonenumber']),
+ password=dict(type='str',
+ no_log=True),
+ phone=dict(default=[],
+ type='list'),
+ postcode=dict(type='str'),
+ primary_group=dict(type='str',
+ aliases=['primaryGroup']),
+ profilepath=dict(type='str'),
+ pwd_change_next_login=dict(type='str',
+ choices=['0', '1'],
+ aliases=['pwdChangeNextLogin']),
+ room_number=dict(type='str',
+ aliases=['roomNumber']),
+ samba_privileges=dict(default=[],
+ type='list',
+ aliases=['sambaPrivileges']),
+ samba_user_workstations=dict(default=[],
+ type='list',
+ aliases=['sambaUserWorkstations']),
+ sambahome=dict(type='str'),
+ scriptpath=dict(type='str'),
+ secretary=dict(default=[],
+ type='list'),
+ serviceprovider=dict(default=[''],
+ type='list'),
+ shell=dict(default='/bin/bash',
+ type='str'),
+ street=dict(type='str'),
+ title=dict(type='str'),
+ unixhome=dict(type='str'),
+ userexpiry=dict(type='str'),
+ username=dict(required=True,
+ aliases=['name'],
+ type='str'),
+ position=dict(default='',
+ type='str'),
+ update_password=dict(default='always',
+ choices=['always', 'on_create'],
+ type='str'),
+ ou=dict(default='',
+ type='str'),
+ subpath=dict(default='cn=users',
+ type='str'),
+ state=dict(default='present',
+ choices=['present', 'absent'],
+ type='str')
+ ),
+ supports_check_mode=True,
+ required_if=([
+ ('state', 'present', ['firstname', 'lastname', 'password'])
+ ])
+ )
+ username = module.params['username']
+ position = module.params['position']
+ ou = module.params['ou']
+ subpath = module.params['subpath']
+ state = module.params['state']
+ changed = False
+ diff = None
+
+ users = list(ldap_search(
+ '(&(objectClass=posixAccount)(uid={0}))'.format(username),
+ attr=['uid']
+ ))
+ if position != '':
+ container = position
+ else:
+ if ou != '':
+ ou = 'ou={0},'.format(ou)
+ if subpath != '':
+ subpath = '{0},'.format(subpath)
+ container = '{0}{1}{2}'.format(subpath, ou, base_dn())
+ user_dn = 'uid={0},{1}'.format(username, container)
+
+ exists = bool(len(users))
+
+ if state == 'present':
+ try:
+ if not exists:
+ obj = umc_module_for_add('users/user', container)
+ else:
+ obj = umc_module_for_edit('users/user', user_dn)
+
+ if module.params['displayName'] is None:
+ module.params['displayName'] = '{0} {1}'.format(
+ module.params['firstname'],
+ module.params['lastname']
+ )
+ if module.params['unixhome'] is None:
+ module.params['unixhome'] = '/home/{0}'.format(
+ module.params['username']
+ )
+ for k in obj.keys():
+ if (k != 'password' and
+ k != 'groups' and
+ k != 'overridePWHistory' and
+ k in module.params and
+ module.params[k] is not None):
+ obj[k] = module.params[k]
+ # handle some special values
+ obj['e-mail'] = module.params['email']
+ if 'userexpiry' in obj and obj.get('userexpiry') is None:
+ obj['userexpiry'] = expiry
+ password = module.params['password']
+ if obj['password'] is None:
+ obj['password'] = password
+ if module.params['update_password'] == 'always':
+ old_password = obj['password'].split('}', 2)[1]
+ if crypt.crypt(password, old_password) != old_password:
+ obj['overridePWHistory'] = module.params['overridePWHistory']
+ obj['overridePWLength'] = module.params['overridePWLength']
+ obj['password'] = password
+
+ diff = obj.diff()
+ if exists:
+ for k in obj.keys():
+ if obj.hasChanged(k):
+ changed = True
+ else:
+ changed = True
+ if not module.check_mode:
+ if not exists:
+ obj.create()
+ elif changed:
+ obj.modify()
+ except Exception:
+ module.fail_json(
+ msg="Creating/editing user {0} in {1} failed".format(
+ username,
+ container
+ )
+ )
+ try:
+ groups = module.params['groups']
+ if groups:
+ filter = '(&(objectClass=posixGroup)(|(cn={0})))'.format(
+ ')(cn='.join(groups)
+ )
+ group_dns = list(ldap_search(filter, attr=['dn']))
+ for dn in group_dns:
+ grp = umc_module_for_edit('groups/group', dn[0])
+ if user_dn not in grp['users']:
+ grp['users'].append(user_dn)
+ if not module.check_mode:
+ grp.modify()
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Adding groups to user {0} failed".format(username)
+ )
+
+ if state == 'absent' and exists:
+ try:
+ obj = umc_module_for_edit('users/user', user_dn)
+ if not module.check_mode:
+ obj.remove()
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Removing user {0} failed".format(username)
+ )
+
+ module.exit_json(
+ changed=changed,
+ username=username,
+ diff=diff,
+ container=container
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/ufw.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ufw.py
new file mode 100644
index 00000000..c6df6fe6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/ufw.py
@@ -0,0 +1,594 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Ahti Kitsik <ak@ahtik.com>
+# Copyright: (c) 2014, Jarno Keskikangas <jarno.keskikangas@gmail.com>
+# Copyright: (c) 2013, Aleksey Ovcharenko <aleksey.ovcharenko@gmail.com>
+# Copyright: (c) 2013, James Martin <jmartin@basho.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ufw
+short_description: Manage firewall with UFW
+description:
+ - Manage firewall with UFW.
+author:
+ - Aleksey Ovcharenko (@ovcharenko)
+ - Jarno Keskikangas (@pyykkis)
+ - Ahti Kitsik (@ahtik)
+notes:
+ - See C(man ufw) for more examples.
+requirements:
+ - C(ufw) package
+options:
+ state:
+ description:
+ - C(enabled) reloads firewall and enables firewall on boot.
+ - C(disabled) unloads firewall and disables firewall on boot.
+ - C(reloaded) reloads firewall.
+ - C(reset) disables and resets firewall to installation defaults.
+ type: str
+ choices: [ disabled, enabled, reloaded, reset ]
+ default:
+ description:
+ - Change the default policy for incoming or outgoing traffic.
+ type: str
+ choices: [ allow, deny, reject ]
+ aliases: [ policy ]
+ direction:
+ description:
+ - Select direction for a rule or default policy command. Mutually
+ exclusive with I(interface_in) and I(interface_out).
+ type: str
+ choices: [ in, incoming, out, outgoing, routed ]
+ logging:
+ description:
+ - Toggles logging. Logged packets use the LOG_KERN syslog facility.
+ type: str
+ choices: [ 'on', 'off', low, medium, high, full ]
+ insert:
+ description:
+ - Insert the corresponding rule as rule number NUM.
+ - Note that ufw numbers rules starting with 1.
+ type: int
+ insert_relative_to:
+ description:
+ - Allows to interpret the index in I(insert) relative to a position.
+ - C(zero) interprets the rule number as an absolute index (i.e. 1 is
+ the first rule).
+ - C(first-ipv4) interprets the rule number relative to the index of the
+ first IPv4 rule, or relative to the position where the first IPv4 rule
+ would be if there is currently none.
+ - C(last-ipv4) interprets the rule number relative to the index of the
+ last IPv4 rule, or relative to the position where the last IPv4 rule
+ would be if there is currently none.
+ - C(first-ipv6) interprets the rule number relative to the index of the
+ first IPv6 rule, or relative to the position where the first IPv6 rule
+ would be if there is currently none.
+ - C(last-ipv6) interprets the rule number relative to the index of the
+ last IPv6 rule, or relative to the position where the last IPv6 rule
+ would be if there is currently none.
+ type: str
+ choices: [ first-ipv4, first-ipv6, last-ipv4, last-ipv6, zero ]
+ default: zero
+ rule:
+ description:
+ - Add firewall rule
+ type: str
+ choices: [ allow, deny, limit, reject ]
+ log:
+ description:
+ - Log new connections matched to this rule
+ type: bool
+ default: false
+ from_ip:
+ description:
+ - Source IP address.
+ type: str
+ default: any
+ aliases: [ from, src ]
+ from_port:
+ description:
+ - Source port.
+ type: str
+ to_ip:
+ description:
+ - Destination IP address.
+ type: str
+ default: any
+ aliases: [ dest, to]
+ to_port:
+ description:
+ - Destination port.
+ type: str
+ aliases: [ port ]
+ proto:
+ description:
+ - TCP/IP protocol.
+ type: str
+ choices: [ any, tcp, udp, ipv6, esp, ah, gre, igmp ]
+ aliases: [ protocol ]
+ name:
+ description:
+ - Use profile located in C(/etc/ufw/applications.d).
+ type: str
+ aliases: [ app ]
+ delete:
+ description:
+ - Delete rule.
+ type: bool
+ default: false
+ interface:
+ description:
+ - Specify interface for the rule. The direction (in or out) used
+ for the interface depends on the value of I(direction). See
+ I(interface_in) and I(interface_out) for routed rules that needs
+ to supply both an input and output interface. Mutually
+ exclusive with I(interface_in) and I(interface_out).
+ type: str
+ aliases: [ if ]
+ interface_in:
+ description:
+ - Specify input interface for the rule. This is mutually
+ exclusive with I(direction) and I(interface). However, it is
+ compatible with I(interface_out) for routed rules.
+ type: str
+ aliases: [ if_in ]
+ version_added: '0.2.0'
+ interface_out:
+ description:
+ - Specify output interface for the rule. This is mutually
+ exclusive with I(direction) and I(interface). However, it is
+ compatible with I(interface_in) for routed rules.
+ type: str
+ aliases: [ if_out ]
+ version_added: '0.2.0'
+ route:
+ description:
+ - Apply the rule to routed/forwarded packets.
+ type: bool
+ default: false
+ comment:
+ description:
+ - Add a comment to the rule. Requires UFW version >=0.35.
+ type: str
+'''
+
+EXAMPLES = r'''
+- name: Allow everything and enable UFW
+ community.general.ufw:
+ state: enabled
+ policy: allow
+
+- name: Set logging
+ community.general.ufw:
+ logging: 'on'
+
+# Sometimes it is desirable to let the sender know when traffic is
+# being denied, rather than simply ignoring it. In these cases, use
+# reject instead of deny. In addition, log rejected connections:
+- community.general.ufw:
+ rule: reject
+ port: auth
+ log: yes
+
+# ufw supports connection rate limiting, which is useful for protecting
+# against brute-force login attacks. ufw will deny connections if an IP
+# address has attempted to initiate 6 or more connections in the last
+# 30 seconds. See http://www.debian-administration.org/articles/187
+# for details. Typical usage is:
+- community.general.ufw:
+ rule: limit
+ port: ssh
+ proto: tcp
+
+# Allow OpenSSH. (Note that as ufw manages its own state, simply removing
+# a rule=allow task can leave those ports exposed. Either use delete=yes
+# or a separate state=reset task)
+- community.general.ufw:
+ rule: allow
+ name: OpenSSH
+
+- name: Delete OpenSSH rule
+ community.general.ufw:
+ rule: allow
+ name: OpenSSH
+ delete: yes
+
+- name: Deny all access to port 53
+ community.general.ufw:
+ rule: deny
+ port: '53'
+
+- name: Allow port range 60000-61000
+ community.general.ufw:
+ rule: allow
+ port: 60000:61000
+ proto: tcp
+
+- name: Allow all access to tcp port 80
+ community.general.ufw:
+ rule: allow
+ port: '80'
+ proto: tcp
+
+- name: Allow all access from RFC1918 networks to this host
+ community.general.ufw:
+ rule: allow
+ src: '{{ item }}'
+ loop:
+ - 10.0.0.0/8
+ - 172.16.0.0/12
+ - 192.168.0.0/16
+
+- name: Deny access to udp port 514 from host 1.2.3.4 and include a comment
+ community.general.ufw:
+ rule: deny
+ proto: udp
+ src: 1.2.3.4
+ port: '514'
+ comment: Block syslog
+
+- name: Allow incoming access to eth0 from 1.2.3.5 port 5469 to 1.2.3.4 port 5469
+ community.general.ufw:
+ rule: allow
+ interface: eth0
+ direction: in
+ proto: udp
+ src: 1.2.3.5
+ from_port: '5469'
+ dest: 1.2.3.4
+ to_port: '5469'
+
+# Note that IPv6 must be enabled in /etc/default/ufw for IPv6 firewalling to work.
+- name: Deny all traffic from the IPv6 2001:db8::/32 to tcp port 25 on this host
+ community.general.ufw:
+ rule: deny
+ proto: tcp
+ src: 2001:db8::/32
+ port: '25'
+
+- name: Deny all IPv6 traffic to tcp port 20 on this host
+ # this should be the first IPv6 rule
+ community.general.ufw:
+ rule: deny
+ proto: tcp
+ port: '20'
+ to_ip: "::"
+ insert: 0
+ insert_relative_to: first-ipv6
+
+- name: Deny all IPv4 traffic to tcp port 20 on this host
+ # This should be the third to last IPv4 rule
+ # (insert: -1 addresses the second to last IPv4 rule;
+ # so the new rule will be inserted before the second
+ # to last IPv4 rule, and will be come the third to last
+ # IPv4 rule.)
+ community.general.ufw:
+ rule: deny
+ proto: tcp
+ port: '20'
+ to_ip: "::"
+ insert: -1
+ insert_relative_to: last-ipv4
+
+# Can be used to further restrict a global FORWARD policy set to allow
+- name: Deny forwarded/routed traffic from subnet 1.2.3.0/24 to subnet 4.5.6.0/24
+ community.general.ufw:
+ rule: deny
+ route: yes
+ src: 1.2.3.0/24
+ dest: 4.5.6.0/24
+'''
+
+import re
+
+from operator import itemgetter
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def compile_ipv4_regexp():
+ r = r"((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}"
+ r += r"(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])"
+ return re.compile(r)
+
+
+def compile_ipv6_regexp():
+ """
+ validation pattern provided by :
+ https://stackoverflow.com/questions/53497/regular-expression-that-matches-
+ valid-ipv6-addresses#answer-17871737
+ """
+ r = r"(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:"
+ r += r"|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}"
+ r += r"(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4})"
+ r += r"{1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]"
+ r += r"{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]"
+ r += r"{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4})"
+ r += r"{0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]"
+ r += r"|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}"
+ r += r"[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}"
+ r += r"[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))"
+ return re.compile(r)
+
+
+def main():
+ command_keys = ['state', 'default', 'rule', 'logging']
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', choices=['enabled', 'disabled', 'reloaded', 'reset']),
+ default=dict(type='str', aliases=['policy'], choices=['allow', 'deny', 'reject']),
+ logging=dict(type='str', choices=['full', 'high', 'low', 'medium', 'off', 'on']),
+ direction=dict(type='str', choices=['in', 'incoming', 'out', 'outgoing', 'routed']),
+ delete=dict(type='bool', default=False),
+ route=dict(type='bool', default=False),
+ insert=dict(type='int'),
+ insert_relative_to=dict(choices=['zero', 'first-ipv4', 'last-ipv4', 'first-ipv6', 'last-ipv6'], default='zero'),
+ rule=dict(type='str', choices=['allow', 'deny', 'limit', 'reject']),
+ interface=dict(type='str', aliases=['if']),
+ interface_in=dict(type='str', aliases=['if_in']),
+ interface_out=dict(type='str', aliases=['if_out']),
+ log=dict(type='bool', default=False),
+ from_ip=dict(type='str', default='any', aliases=['from', 'src']),
+ from_port=dict(type='str'),
+ to_ip=dict(type='str', default='any', aliases=['dest', 'to']),
+ to_port=dict(type='str', aliases=['port']),
+ proto=dict(type='str', aliases=['protocol'], choices=['ah', 'any', 'esp', 'ipv6', 'tcp', 'udp', 'gre', 'igmp']),
+ name=dict(type='str', aliases=['app']),
+ comment=dict(type='str'),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['name', 'proto', 'logging'],
+ # Mutual exclusivity with `interface` implied by `required_by`.
+ ['direction', 'interface_in'],
+ ['direction', 'interface_out'],
+ ],
+ required_one_of=([command_keys]),
+ required_by=dict(
+ interface=('direction', ),
+ ),
+ )
+
+ cmds = []
+
+ ipv4_regexp = compile_ipv4_regexp()
+ ipv6_regexp = compile_ipv6_regexp()
+
+ def filter_line_that_not_start_with(pattern, content):
+ return ''.join([line for line in content.splitlines(True) if line.startswith(pattern)])
+
+ def filter_line_that_contains(pattern, content):
+ return [line for line in content.splitlines(True) if pattern in line]
+
+ def filter_line_that_not_contains(pattern, content):
+ return ''.join([line for line in content.splitlines(True) if not line.contains(pattern)])
+
+ def filter_line_that_match_func(match_func, content):
+ return ''.join([line for line in content.splitlines(True) if match_func(line) is not None])
+
+ def filter_line_that_contains_ipv4(content):
+ return filter_line_that_match_func(ipv4_regexp.search, content)
+
+ def filter_line_that_contains_ipv6(content):
+ return filter_line_that_match_func(ipv6_regexp.search, content)
+
+ def is_starting_by_ipv4(ip):
+ return ipv4_regexp.match(ip) is not None
+
+ def is_starting_by_ipv6(ip):
+ return ipv6_regexp.match(ip) is not None
+
+ def execute(cmd, ignore_error=False):
+ cmd = ' '.join(map(itemgetter(-1), filter(itemgetter(0), cmd)))
+
+ cmds.append(cmd)
+ (rc, out, err) = module.run_command(cmd, environ_update={"LANG": "C"})
+
+ if rc != 0 and not ignore_error:
+ module.fail_json(msg=err or out, commands=cmds)
+
+ return out
+
+ def get_current_rules():
+ user_rules_files = ["/lib/ufw/user.rules",
+ "/lib/ufw/user6.rules",
+ "/etc/ufw/user.rules",
+ "/etc/ufw/user6.rules",
+ "/var/lib/ufw/user.rules",
+ "/var/lib/ufw/user6.rules"]
+
+ cmd = [[grep_bin], ["-h"], ["'^### tuple'"]]
+
+ cmd.extend([[f] for f in user_rules_files])
+ return execute(cmd, ignore_error=True)
+
+ def ufw_version():
+ """
+ Returns the major and minor version of ufw installed on the system.
+ """
+ out = execute([[ufw_bin], ["--version"]])
+
+ lines = [x for x in out.split('\n') if x.strip() != '']
+ if len(lines) == 0:
+ module.fail_json(msg="Failed to get ufw version.", rc=0, out=out)
+
+ matches = re.search(r'^ufw.+(\d+)\.(\d+)(?:\.(\d+))?.*$', lines[0])
+ if matches is None:
+ module.fail_json(msg="Failed to get ufw version.", rc=0, out=out)
+
+ # Convert version to numbers
+ major = int(matches.group(1))
+ minor = int(matches.group(2))
+ rev = 0
+ if matches.group(3) is not None:
+ rev = int(matches.group(3))
+
+ return major, minor, rev
+
+ params = module.params
+
+ commands = dict((key, params[key]) for key in command_keys if params[key])
+
+ # Ensure ufw is available
+ ufw_bin = module.get_bin_path('ufw', True)
+ grep_bin = module.get_bin_path('grep', True)
+
+ # Save the pre state and rules in order to recognize changes
+ pre_state = execute([[ufw_bin], ['status verbose']])
+ pre_rules = get_current_rules()
+
+ changed = False
+
+ # Execute filter
+ for (command, value) in commands.items():
+
+ cmd = [[ufw_bin], [module.check_mode, '--dry-run']]
+
+ if command == 'state':
+ states = {'enabled': 'enable', 'disabled': 'disable',
+ 'reloaded': 'reload', 'reset': 'reset'}
+
+ if value in ['reloaded', 'reset']:
+ changed = True
+
+ if module.check_mode:
+ # "active" would also match "inactive", hence the space
+ ufw_enabled = pre_state.find(" active") != -1
+ if (value == 'disabled' and ufw_enabled) or (value == 'enabled' and not ufw_enabled):
+ changed = True
+ else:
+ execute(cmd + [['-f'], [states[value]]])
+
+ elif command == 'logging':
+ extract = re.search(r'Logging: (on|off)(?: \(([a-z]+)\))?', pre_state)
+ if extract:
+ current_level = extract.group(2)
+ current_on_off_value = extract.group(1)
+ if value != "off":
+ if current_on_off_value == "off":
+ changed = True
+ elif value != "on" and value != current_level:
+ changed = True
+ elif current_on_off_value != "off":
+ changed = True
+ else:
+ changed = True
+
+ if not module.check_mode:
+ execute(cmd + [[command], [value]])
+
+ elif command == 'default':
+ if params['direction'] not in ['outgoing', 'incoming', 'routed', None]:
+ module.fail_json(msg='For default, direction must be one of "outgoing", "incoming" and "routed", or direction must not be specified.')
+ if module.check_mode:
+ regexp = r'Default: (deny|allow|reject) \(incoming\), (deny|allow|reject) \(outgoing\), (deny|allow|reject|disabled) \(routed\)'
+ extract = re.search(regexp, pre_state)
+ if extract is not None:
+ current_default_values = {}
+ current_default_values["incoming"] = extract.group(1)
+ current_default_values["outgoing"] = extract.group(2)
+ current_default_values["routed"] = extract.group(3)
+ v = current_default_values[params['direction'] or 'incoming']
+ if v not in (value, 'disabled'):
+ changed = True
+ else:
+ changed = True
+ else:
+ execute(cmd + [[command], [value], [params['direction']]])
+
+ elif command == 'rule':
+ if params['direction'] not in ['in', 'out', None]:
+ module.fail_json(msg='For rules, direction must be one of "in" and "out", or direction must not be specified.')
+ if not params['route'] and params['interface_in'] and params['interface_out']:
+ module.fail_json(msg='Only route rules can combine '
+ 'interface_in and interface_out')
+ # Rules are constructed according to the long format
+ #
+ # ufw [--dry-run] [route] [delete] [insert NUM] allow|deny|reject|limit [in|out on INTERFACE] [log|log-all] \
+ # [from ADDRESS [port PORT]] [to ADDRESS [port PORT]] \
+ # [proto protocol] [app application] [comment COMMENT]
+ cmd.append([module.boolean(params['route']), 'route'])
+ cmd.append([module.boolean(params['delete']), 'delete'])
+ if params['insert'] is not None:
+ relative_to_cmd = params['insert_relative_to']
+ if relative_to_cmd == 'zero':
+ insert_to = params['insert']
+ else:
+ (dummy, numbered_state, dummy) = module.run_command([ufw_bin, 'status', 'numbered'])
+ numbered_line_re = re.compile(R'^\[ *([0-9]+)\] ')
+ lines = [(numbered_line_re.match(line), '(v6)' in line) for line in numbered_state.splitlines()]
+ lines = [(int(matcher.group(1)), ipv6) for (matcher, ipv6) in lines if matcher]
+ last_number = max([no for (no, ipv6) in lines]) if lines else 0
+ has_ipv4 = any([not ipv6 for (no, ipv6) in lines])
+ has_ipv6 = any([ipv6 for (no, ipv6) in lines])
+ if relative_to_cmd == 'first-ipv4':
+ relative_to = 1
+ elif relative_to_cmd == 'last-ipv4':
+ relative_to = max([no for (no, ipv6) in lines if not ipv6]) if has_ipv4 else 1
+ elif relative_to_cmd == 'first-ipv6':
+ relative_to = max([no for (no, ipv6) in lines if not ipv6]) + 1 if has_ipv4 else 1
+ elif relative_to_cmd == 'last-ipv6':
+ relative_to = last_number if has_ipv6 else last_number + 1
+ insert_to = params['insert'] + relative_to
+ if insert_to > last_number:
+ # ufw does not like it when the insert number is larger than the
+ # maximal rule number for IPv4/IPv6.
+ insert_to = None
+ cmd.append([insert_to is not None, "insert %s" % insert_to])
+ cmd.append([value])
+ cmd.append([params['direction'], "%s" % params['direction']])
+ cmd.append([params['interface'], "on %s" % params['interface']])
+ cmd.append([params['interface_in'], "in on %s" % params['interface_in']])
+ cmd.append([params['interface_out'], "out on %s" % params['interface_out']])
+ cmd.append([module.boolean(params['log']), 'log'])
+
+ for (key, template) in [('from_ip', "from %s"), ('from_port', "port %s"),
+ ('to_ip', "to %s"), ('to_port', "port %s"),
+ ('proto', "proto %s"), ('name', "app '%s'")]:
+ value = params[key]
+ cmd.append([value, template % (value)])
+
+ ufw_major, ufw_minor, dummy = ufw_version()
+ # comment is supported only in ufw version after 0.35
+ if (ufw_major == 0 and ufw_minor >= 35) or ufw_major > 0:
+ cmd.append([params['comment'], "comment '%s'" % params['comment']])
+
+ rules_dry = execute(cmd)
+
+ if module.check_mode:
+
+ nb_skipping_line = len(filter_line_that_contains("Skipping", rules_dry))
+
+ if not (nb_skipping_line > 0 and nb_skipping_line == len(rules_dry.splitlines(True))):
+
+ rules_dry = filter_line_that_not_start_with("### tuple", rules_dry)
+ # ufw dry-run doesn't send all rules so have to compare ipv4 or ipv6 rules
+ if is_starting_by_ipv4(params['from_ip']) or is_starting_by_ipv4(params['to_ip']):
+ if filter_line_that_contains_ipv4(pre_rules) != filter_line_that_contains_ipv4(rules_dry):
+ changed = True
+ elif is_starting_by_ipv6(params['from_ip']) or is_starting_by_ipv6(params['to_ip']):
+ if filter_line_that_contains_ipv6(pre_rules) != filter_line_that_contains_ipv6(rules_dry):
+ changed = True
+ elif pre_rules != rules_dry:
+ changed = True
+
+ # Get the new state
+ if module.check_mode:
+ return module.exit_json(changed=changed, commands=cmds)
+ else:
+ post_state = execute([[ufw_bin], ['status'], ['verbose']])
+ if not changed:
+ post_rules = get_current_rules()
+ changed = (pre_state != post_state) or (pre_rules != post_rules)
+ return module.exit_json(changed=changed, commands=cmds, msg=post_state.rstrip())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/uptimerobot.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/uptimerobot.py
new file mode 100644
index 00000000..bb4e60fa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/uptimerobot.py
@@ -0,0 +1,149 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: uptimerobot
+short_description: Pause and start Uptime Robot monitoring
+description:
+ - This module will let you start and pause Uptime Robot Monitoring
+author: "Nate Kingsley (@nate-kingsley)"
+requirements:
+ - Valid Uptime Robot API Key
+options:
+ state:
+ type: str
+ description:
+ - Define whether or not the monitor should be running or paused.
+ required: true
+ choices: [ "started", "paused" ]
+ monitorid:
+ type: str
+ description:
+ - ID of the monitor to check.
+ required: true
+ apikey:
+ type: str
+ description:
+ - Uptime Robot API key.
+ required: true
+notes:
+ - Support for adding and removing monitors and alert contacts has not yet been implemented.
+'''
+
+EXAMPLES = '''
+- name: Pause the monitor with an ID of 12345
+ community.general.uptimerobot:
+ monitorid: 12345
+ apikey: 12345-1234512345
+ state: paused
+
+- name: Start the monitor with an ID of 12345
+ community.general.uptimerobot:
+ monitorid: 12345
+ apikey: 12345-1234512345
+ state: started
+'''
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils._text import to_text
+
+
+API_BASE = "https://api.uptimerobot.com/"
+
+API_ACTIONS = dict(
+ status='getMonitors?',
+ editMonitor='editMonitor?'
+)
+
+API_FORMAT = 'json'
+API_NOJSONCALLBACK = 1
+CHANGED_STATE = False
+SUPPORTS_CHECK_MODE = False
+
+
+def checkID(module, params):
+
+ data = urlencode(params)
+ full_uri = API_BASE + API_ACTIONS['status'] + data
+ req, info = fetch_url(module, full_uri)
+ result = to_text(req.read())
+ jsonresult = json.loads(result)
+ req.close()
+ return jsonresult
+
+
+def startMonitor(module, params):
+
+ params['monitorStatus'] = 1
+ data = urlencode(params)
+ full_uri = API_BASE + API_ACTIONS['editMonitor'] + data
+ req, info = fetch_url(module, full_uri)
+ result = to_text(req.read())
+ jsonresult = json.loads(result)
+ req.close()
+ return jsonresult['stat']
+
+
+def pauseMonitor(module, params):
+
+ params['monitorStatus'] = 0
+ data = urlencode(params)
+ full_uri = API_BASE + API_ACTIONS['editMonitor'] + data
+ req, info = fetch_url(module, full_uri)
+ result = to_text(req.read())
+ jsonresult = json.loads(result)
+ req.close()
+ return jsonresult['stat']
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(required=True, choices=['started', 'paused']),
+ apikey=dict(required=True, no_log=True),
+ monitorid=dict(required=True)
+ ),
+ supports_check_mode=SUPPORTS_CHECK_MODE
+ )
+
+ params = dict(
+ apiKey=module.params['apikey'],
+ monitors=module.params['monitorid'],
+ monitorID=module.params['monitorid'],
+ format=API_FORMAT,
+ noJsonCallback=API_NOJSONCALLBACK
+ )
+
+ check_result = checkID(module, params)
+
+ if check_result['stat'] != "ok":
+ module.fail_json(
+ msg="failed",
+ result=check_result['message']
+ )
+
+ if module.params['state'] == 'started':
+ monitor_result = startMonitor(module, params)
+ else:
+ monitor_result = pauseMonitor(module, params)
+
+ module.exit_json(
+ msg="success",
+ result=monitor_result
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/urpmi.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/urpmi.py
new file mode 100644
index 00000000..9d54fbcf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/urpmi.py
@@ -0,0 +1,219 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Philippe Makowski
+# Written by Philippe Makowski <philippem@mageia.org>
+# Based on apt module written by Matthew Williams <matthew@flowroute.com>
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: urpmi
+short_description: Urpmi manager
+description:
+ - Manages packages with I(urpmi) (such as for Mageia or Mandriva)
+options:
+ name:
+ description:
+ - A list of package names to install, upgrade or remove.
+ required: yes
+ aliases: [ package, pkg ]
+ type: list
+ elements: str
+ state:
+ description:
+ - Indicates the desired package state.
+ choices: [ absent, present, installed, removed ]
+ default: present
+ type: str
+ update_cache:
+ description:
+ - Update the package database first C(urpmi.update -a).
+ type: bool
+ default: no
+ aliases: ['update-cache']
+ no_recommends:
+ description:
+ - Corresponds to the C(--no-recommends) option for I(urpmi).
+ type: bool
+ default: yes
+ aliases: ['no-recommends']
+ force:
+ description:
+ - Assume "yes" is the answer to any question urpmi has to ask.
+ Corresponds to the C(--force) option for I(urpmi).
+ type: bool
+ default: yes
+ root:
+ description:
+ - Specifies an alternative install root, relative to which all packages will be installed.
+ Corresponds to the C(--root) option for I(urpmi).
+ aliases: [ installroot ]
+ type: str
+author:
+- Philippe Makowski (@pmakowski)
+'''
+
+EXAMPLES = '''
+- name: Install package foo
+ community.general.urpmi:
+ pkg: foo
+ state: present
+
+- name: Remove package foo
+ community.general.urpmi:
+ pkg: foo
+ state: absent
+
+- name: Remove packages foo and bar
+ community.general.urpmi:
+ pkg: foo,bar
+ state: absent
+
+- name: Update the package database (urpmi.update -a -q) and install bar (bar will be the updated if a newer version exists)
+- community.general.urpmi:
+ name: bar
+ state: present
+ update_cache: yes
+'''
+
+
+import os
+import shlex
+import sys
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def query_package(module, name, root):
+ # rpm -q returns 0 if the package is installed,
+ # 1 if it is not installed
+ rpm_path = module.get_bin_path("rpm", True)
+ cmd = "%s -q %s %s" % (rpm_path, name, root_option(root))
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ if rc == 0:
+ return True
+ else:
+ return False
+
+
+def query_package_provides(module, name, root):
+ # rpm -q returns 0 if the package is installed,
+ # 1 if it is not installed
+ rpm_path = module.get_bin_path("rpm", True)
+ cmd = "%s -q --whatprovides %s %s" % (rpm_path, name, root_option(root))
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ return rc == 0
+
+
+def update_package_db(module):
+
+ urpmiupdate_path = module.get_bin_path("urpmi.update", True)
+ cmd = "%s -a -q" % (urpmiupdate_path,)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ if rc != 0:
+ module.fail_json(msg="could not update package db")
+
+
+def remove_packages(module, packages, root):
+
+ remove_c = 0
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ if not query_package(module, package, root):
+ continue
+
+ urpme_path = module.get_bin_path("urpme", True)
+ cmd = "%s --auto %s %s" % (urpme_path, root_option(root), package)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc != 0:
+ module.fail_json(msg="failed to remove %s" % (package))
+
+ remove_c += 1
+
+ if remove_c > 0:
+
+ module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, pkgspec, root, force=True, no_recommends=True):
+
+ packages = ""
+ for package in pkgspec:
+ if not query_package_provides(module, package, root):
+ packages += "'%s' " % package
+
+ if len(packages) != 0:
+ if no_recommends:
+ no_recommends_yes = '--no-recommends'
+ else:
+ no_recommends_yes = ''
+
+ if force:
+ force_yes = '--force'
+ else:
+ force_yes = ''
+
+ urpmi_path = module.get_bin_path("urpmi", True)
+ cmd = ("%s --auto %s --quiet %s %s %s" % (urpmi_path, force_yes,
+ no_recommends_yes,
+ root_option(root),
+ packages))
+
+ rc, out, err = module.run_command(cmd)
+
+ for package in pkgspec:
+ if not query_package_provides(module, package, root):
+ module.fail_json(msg="'urpmi %s' failed: %s" % (package, err))
+
+ # urpmi always have 0 for exit code if --force is used
+ if rc:
+ module.fail_json(msg="'urpmi %s' failed: %s" % (packages, err))
+ else:
+ module.exit_json(changed=True, msg="%s present(s)" % packages)
+ else:
+ module.exit_json(changed=False)
+
+
+def root_option(root):
+ if (root):
+ return "--root=%s" % (root)
+ else:
+ return ""
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present',
+ choices=['absent', 'installed', 'present', 'removed']),
+ update_cache=dict(type='bool', default=False, aliases=['update-cache']),
+ force=dict(type='bool', default=True),
+ no_recommends=dict(type='bool', default=True, aliases=['no-recommends']),
+ name=dict(type='list', elements='str', required=True, aliases=['package', 'pkg']),
+ root=dict(type='str', aliases=['installroot']),
+ ),
+ )
+
+ p = module.params
+
+ if p['update_cache']:
+ update_package_db(module)
+
+ if p['state'] in ['installed', 'present']:
+ install_packages(module, p['name'], p['root'], p['force'], p['no_recommends'])
+
+ elif p['state'] in ['removed', 'absent']:
+ remove_packages(module, p['name'], p['root'])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_aaa_group.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_aaa_group.py
new file mode 100644
index 00000000..b4aca155
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_aaa_group.py
@@ -0,0 +1,225 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_aaa_group
+
+author:
+ - Johannes Brunswicker (@MatrixCrawler)
+
+short_description: Create, update or destroy an aaa group object in Sophos UTM.
+
+description:
+ - Create, update or destroy an aaa group object in Sophos UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+
+options:
+ name:
+ description:
+ - The name of the object. Will be used to identify the entry.
+ type: str
+ required: true
+ adirectory_groups:
+ description:
+ - List of adirectory group strings.
+ type: list
+ elements: str
+ adirectory_groups_sids:
+ description:
+ - Dictionary of group sids.
+ type: dict
+ backend_match:
+ description:
+ - The backend for the group.
+ type: str
+ choices:
+ - none
+ - adirectory
+ - edirectory
+ - radius
+ - tacacs
+ - ldap
+ default: none
+ comment:
+ description:
+ - Comment that describes the AAA group.
+ type: str
+ default: ''
+ dynamic:
+ description:
+ - Group type. Is static if none is selected.
+ type: str
+ default: none
+ choices:
+ - none
+ - ipsec_dn
+ - directory_groups
+ edirectory_groups:
+ description:
+ - List of edirectory group strings.
+ type: list
+ elements: str
+ ipsec_dn:
+ description:
+ - The ipsec dn string.
+ type: str
+ ldap_attribute:
+ description:
+ - The ldap attribute to check against.
+ type: str
+ ldap_attribute_value:
+ description:
+ - The ldap attribute value to check against.
+ type: str
+ members:
+ description:
+ - A list of user ref names (aaa/user).
+ type: list
+ elements: str
+ default: []
+ network:
+ description:
+ - The network reference name. The objects contains the known ip addresses for the authentication object (network/aaa).
+ type: str
+ default: ""
+ radius_groups:
+ description:
+ - A list of radius group strings.
+ type: list
+ elements: str
+ default: []
+ tacacs_groups:
+ description:
+ - A list of tacacs group strings.
+ type: list
+ elements: str
+ default: []
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Create UTM aaa_group
+ community.general.utm_aaa_group:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestAAAGroupEntry
+ backend_match: ldap
+ dynamic: directory_groups
+ ldap_attributes: memberof
+ ldap_attributes_value: "cn=groupname,ou=Groups,dc=mydomain,dc=com"
+ network: REF_OBJECT_STRING
+ state: present
+
+- name: Remove UTM aaa_group
+ community.general.utm_aaa_group:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestAAAGroupEntry
+ state: absent
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created.
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object.
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked.
+ type: bool
+ _type:
+ description: The type of the object.
+ type: str
+ name:
+ description: The name of the object.
+ type: str
+ adirectory_groups:
+ description: List of Active Directory Groups.
+ type: str
+ adirectory_groups_sids:
+ description: List of Active Directory Groups SIDS.
+ type: list
+ backend_match:
+ description: The backend to use.
+ type: str
+ comment:
+ description: The comment string.
+ type: str
+ dynamic:
+ description: Whether the group match is ipsec_dn or directory_group.
+ type: str
+ edirectory_groups:
+ description: List of eDirectory Groups.
+ type: str
+ ipsec_dn:
+ description: ipsec_dn identifier to match.
+ type: str
+ ldap_attribute:
+ description: The LDAP Attribute to match against.
+ type: str
+ ldap_attribute_value:
+ description: The LDAP Attribute Value to match against.
+ type: str
+ members:
+ description: List of member identifiers of the group.
+ type: list
+ network:
+ description: The identifier of the network (network/aaa).
+ type: str
+ radius_group:
+ description: The radius group identifier.
+ type: str
+ tacacs_group:
+ description: The tacacs group identifier.
+ type: str
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "aaa/group"
+ key_to_check_for_changes = ["comment", "adirectory_groups", "adirectory_groups_sids", "backend_match", "dynamic",
+ "edirectory_groups", "ipsec_dn", "ldap_attribute", "ldap_attribute_value", "members",
+ "network", "radius_groups", "tacacs_groups"]
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ adirectory_groups=dict(type='list', elements='str', required=False, default=[]),
+ adirectory_groups_sids=dict(type='dict', required=False, default={}),
+ backend_match=dict(type='str', required=False, default="none",
+ choices=["none", "adirectory", "edirectory", "radius", "tacacs", "ldap"]),
+ comment=dict(type='str', required=False, default=""),
+ dynamic=dict(type='str', required=False, default="none", choices=["none", "ipsec_dn", "directory_groups"]),
+ edirectory_groups=dict(type='list', elements='str', required=False, default=[]),
+ ipsec_dn=dict(type='str', required=False, default=""),
+ ldap_attribute=dict(type='str', required=False, default=""),
+ ldap_attribute_value=dict(type='str', required=False, default=""),
+ members=dict(type='list', elements='str', required=False, default=[]),
+ network=dict(type='str', required=False, default=""),
+ radius_groups=dict(type='list', elements='str', required=False, default=[]),
+ tacacs_groups=dict(type='list', elements='str', required=False, default=[]),
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_aaa_group_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_aaa_group_info.py
new file mode 100644
index 00000000..6d230c1a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_aaa_group_info.py
@@ -0,0 +1,122 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_aaa_group_info
+
+author:
+ - Johannes Brunswicker (@MatrixCrawler)
+
+short_description: get info for reverse_proxy frontend entry in Sophos UTM
+
+description:
+ - get info for a reverse_proxy frontend entry in SOPHOS UTM.
+
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Remove UTM aaa_group
+ community.general.utm_aaa_group_info:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestAAAGroupEntry
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ adirectory_groups:
+ description: List of Active Directory Groups
+ type: str
+ adirectory_groups_sids:
+ description: List of Active Directory Groups SIDS
+ type: list
+ backend_match:
+ description: The backend to use
+ type: str
+ comment:
+ description: The comment string
+ type: str
+ dynamic:
+ description: Whether the group match is ipsec_dn or directory_group
+ type: str
+ edirectory_groups:
+ description: List of eDirectory Groups
+ type: str
+ ipsec_dn:
+ description: ipsec_dn identifier to match
+ type: str
+ ldap_attribute:
+ description: The LDAP Attribute to match against
+ type: str
+ ldap_attribute_value:
+ description: The LDAP Attribute Value to match against
+ type: str
+ members:
+ description: List of member identifiers of the group
+ type: list
+ network:
+ description: The identifier of the network (network/aaa)
+ type: str
+ radius_group:
+ description: The radius group identifier
+ type: str
+ tacacs_group:
+ description: The tacacs group identifier
+ type: str
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "aaa/group"
+ key_to_check_for_changes = []
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True)
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_ca_host_key_cert.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_ca_host_key_cert.py
new file mode 100644
index 00000000..e940f416
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_ca_host_key_cert.py
@@ -0,0 +1,160 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Stephan Schwarz <stearz@gmx.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_ca_host_key_cert
+
+author:
+ - Stephan Schwarz (@stearz)
+
+short_description: create, update or destroy ca host_key_cert entry in Sophos UTM
+
+description:
+ - Create, update or destroy a ca host_key_cert entry in SOPHOS UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+
+options:
+ name:
+ description:
+ - The name of the object. Will be used to identify the entry.
+ required: true
+ type: str
+ ca:
+ description:
+ - A reference to an existing utm_ca_signing_ca or utm_ca_verification_ca object.
+ required: true
+ type: str
+ meta:
+ description:
+ - A reference to an existing utm_ca_meta_x509 object.
+ required: true
+ type: str
+ certificate:
+ description:
+ - The certificate in PEM format.
+ required: true
+ type: str
+ comment:
+ description:
+ - Optional comment string.
+ type: str
+ encrypted:
+ description:
+ - Optionally enable encryption.
+ default: False
+ type: bool
+ key:
+ description:
+ - Optional private key in PEM format.
+ type: str
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Create a ca_host_key_cert entry
+ community.general.utm_ca_host_key_cert:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestHostKeyCertEntry
+ ca: REF_ca/signing_ca_OBJECT_STRING
+ meta: REF_ca/meta_x509_OBJECT_STRING
+ certificate: |
+ --- BEGIN CERTIFICATE ---
+ . . .
+ . . .
+ . . .
+ --- END CERTIFICATE ---
+ state: present
+
+- name: Remove a ca_host_key_cert entry
+ community.general.utm_ca_host_key_cert:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestHostKeyCertEntry
+ state: absent
+
+- name: Read a ca_host_key_cert entry
+ community.general.utm_ca_host_key_cert:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestHostKeyCertEntry
+ state: info
+
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ ca:
+ description: A reference to an existing utm_ca_signing_ca or utm_ca_verification_ca object.
+ type: str
+ meta:
+ description: A reference to an existing utm_ca_meta_x509 object.
+ type: str
+ certificate:
+ description: The certificate in PEM format
+ type: str
+ comment:
+ description: Comment string (may be empty string)
+ type: str
+ encrypted:
+ description: If encryption is enabled
+ type: bool
+ key:
+ description: Private key in PEM format (may be empty string)
+ type: str
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "ca/host_key_cert"
+ key_to_check_for_changes = ["ca", "certificate", "comment", "encrypted", "key", "meta"]
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ ca=dict(type='str', required=True),
+ meta=dict(type='str', required=True),
+ certificate=dict(type='str', required=True),
+ comment=dict(type='str', required=False),
+ encrypted=dict(type='bool', required=False, default=False),
+ key=dict(type='str', required=False, no_log=True),
+ )
+ )
+ try:
+ # This is needed because the bool value only accepts int values in the backend
+ UTM(module, endpoint, key_to_check_for_changes).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_ca_host_key_cert_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_ca_host_key_cert_info.py
new file mode 100644
index 00000000..ad315df9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_ca_host_key_cert_info.py
@@ -0,0 +1,101 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Stephan Schwarz <stearz@gmx.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_ca_host_key_cert_info
+
+author:
+ - Stephan Schwarz (@stearz)
+
+short_description: Get info for a ca host_key_cert entry in Sophos UTM
+
+description:
+ - Get info for a ca host_key_cert entry in SOPHOS UTM.
+
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Get info for a ca host_key_cert entry
+ community.general.utm_ca_host_key_cert_info:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestHostKeyCertEntry
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ ca:
+ description: A reference to an existing utm_ca_signing_ca or utm_ca_verification_ca object.
+ type: str
+ meta:
+ description: A reference to an existing utm_ca_meta_x509 object.
+ type: str
+ certificate:
+ description: The certificate in PEM format
+ type: str
+ comment:
+ description: Comment string (may be empty string)
+ type: str
+ encrypted:
+ description: If encryption is enabled
+ type: bool
+ key:
+ description: Private key in PEM format (may be empty string)
+ type: str
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "ca/host_key_cert"
+ key_to_check_for_changes = []
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True)
+ )
+ )
+ try:
+ # This is needed because the bool value only accepts int values in the backend
+ UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_dns_host.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_dns_host.py
new file mode 100644
index 00000000..1f080abf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_dns_host.py
@@ -0,0 +1,157 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_dns_host
+
+author:
+ - Johannes Brunswicker (@MatrixCrawler)
+
+short_description: create, update or destroy dns entry in Sophos UTM
+
+description:
+ - Create, update or destroy a dns entry in SOPHOS UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+ address:
+ type: str
+ description:
+ - The IPV4 Address of the entry. Can be left empty for automatic resolving.
+ default: 0.0.0.0
+ address6:
+ type: str
+ description:
+ - The IPV6 Address of the entry. Can be left empty for automatic resolving.
+ default: "::"
+ comment:
+ type: str
+ description:
+ - An optional comment to add to the dns host object
+ hostname:
+ type: str
+ description:
+ - The hostname for the dns host object
+ interface:
+ type: str
+ description:
+ - The reference name of the interface to use. If not provided the default interface will be used
+ resolved:
+ description:
+ - whether the hostname's ipv4 address is already resolved or not
+ default: False
+ type: bool
+ resolved6:
+ description:
+ - whether the hostname's ipv6 address is already resolved or not
+ default: False
+ type: bool
+ timeout:
+ type: int
+ description:
+ - the timeout for the utm to resolve the ip address for the hostname again
+ default: 0
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Create UTM dns host entry
+ community.general.utm_dns_host:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestDNSEntry
+ hostname: testentry.some.tld
+ state: present
+
+- name: Remove UTM dns host entry
+ community.general.utm_dns_host:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestDNSEntry
+ state: absent
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ name:
+ description: The name of the object
+ type: str
+ address:
+ description: The ipv4 address of the object
+ type: str
+ address6:
+ description: The ipv6 address of the object
+ type: str
+ comment:
+ description: The comment string
+ type: str
+ hostname:
+ description: The hostname of the object
+ type: str
+ interface:
+ description: The reference name of the interface the object is associated with
+ type: str
+ resolved:
+ description: Whether the ipv4 address is resolved or not
+ type: bool
+ resolved6:
+ description: Whether the ipv6 address is resolved or not
+ type: bool
+ timeout:
+ description: The timeout until a new resolving will be attempted
+ type: int
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "network/dns_host"
+ key_to_check_for_changes = ["comment", "hostname", "interface"]
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ address=dict(type='str', required=False, default='0.0.0.0'),
+ address6=dict(type='str', required=False, default='::'),
+ comment=dict(type='str', required=False, default=""),
+ hostname=dict(type='str', required=False),
+ interface=dict(type='str', required=False, default=""),
+ resolved=dict(type='bool', required=False, default=False),
+ resolved6=dict(type='bool', required=False, default=False),
+ timeout=dict(type='int', required=False, default=0),
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_network_interface_address.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_network_interface_address.py
new file mode 100644
index 00000000..ecf08871
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_network_interface_address.py
@@ -0,0 +1,134 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Juergen Wiebe <wiebe@e-spirit.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_network_interface_address
+
+author:
+ - Juergen Wiebe (@steamx)
+
+short_description: Create, update or destroy network/interface_address object
+
+description:
+ - Create, update or destroy a network/interface_address object in SOPHOS UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+ address:
+ type: str
+ description:
+ - The ip4 address of the network/interface_address object.
+ required: true
+ address6:
+ type: str
+ description:
+ - The ip6 address of the network/interface_address object.
+ required: false
+ comment:
+ type: str
+ description:
+ - An optional comment to add to the object
+ resolved:
+ type: bool
+ description:
+ - Whether or not the object is resolved
+ resolved6:
+ type: bool
+ description:
+ - Whether or not the object is resolved
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Create a network interface address
+ utm_proxy_backend:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestNetworkInterfaceAddress
+ address: 0.0.0.0
+ state: present
+
+- name: Remove a network interface address
+ network_interface_address:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestNetworkInterfaceAddress
+ address: 0.0.0.0
+ state: absent
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ address:
+ description: The ip4 address of the network/interface_address object
+ type: str
+ address6:
+ description: The ip6 address of the network/interface_address object
+ type: str
+ comment:
+ description: The comment string
+ type: str
+ resolved:
+ description: Whether or not the object is resolved
+ type: bool
+ resolved6:
+ description: Whether or not the object is resolved
+ type: bool
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "network/interface_address"
+ key_to_check_for_changes = ["comment", "address"]
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ address=dict(type='str', required=True),
+ comment=dict(type='str', required=False, default=""),
+ address6=dict(type='str', required=False),
+ resolved=dict(type='bool', required=False),
+ resolved6=dict(type='bool', required=False),
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_network_interface_address_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_network_interface_address_info.py
new file mode 100644
index 00000000..c1d0f7d8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_network_interface_address_info.py
@@ -0,0 +1,96 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Juergen Wiebe <wiebe@e-spirit.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_network_interface_address_info
+
+author:
+ - Juergen Wiebe (@steamx)
+
+short_description: Get info for a network/interface_address object
+
+description:
+ - Get info for a network/interface_address object in SOPHOS UTM.
+
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Get network interface address info
+ utm_proxy_interface_address_info:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestNetworkInterfaceAddress
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ address:
+ description: The ip4 address of the network/interface_address object
+ type: str
+ address6:
+ description: The ip6 address of the network/interface_address object
+ type: str
+ comment:
+ description: The comment string
+ type: str
+ resolved:
+ description: Whether or not the object is resolved
+ type: bool
+ resolved6:
+ description: Whether or not the object is resolved
+ type: bool
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "network/interface_address"
+ key_to_check_for_changes = []
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True)
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_auth_profile.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_auth_profile.py
new file mode 100644
index 00000000..caa0085c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_auth_profile.py
@@ -0,0 +1,362 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Stephan Schwarz <stearz@gmx.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_proxy_auth_profile
+
+author:
+ - Stephan Schwarz (@stearz)
+
+short_description: create, update or destroy reverse_proxy auth_profile entry in Sophos UTM
+
+description:
+ - Create, update or destroy a reverse_proxy auth_profile entry in SOPHOS UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+ aaa:
+ type: list
+ elements: str
+ description:
+ - List of references to utm_aaa objects (allowed users or groups)
+ required: true
+ basic_prompt:
+ type: str
+ description:
+ - The message in the basic authentication prompt
+ required: true
+ backend_mode:
+ type: str
+ description:
+ - Specifies if the backend server needs authentication ([Basic|None])
+ default: None
+ choices:
+ - Basic
+ - None
+ backend_strip_basic_auth:
+ description:
+ - Should the login data be stripped when proxying the request to the backend host
+ type: bool
+ default: True
+ choices:
+ - True
+ - False
+ backend_user_prefix:
+ type: str
+ description:
+ - Prefix string to prepend to the username for backend authentication
+ default: ""
+ backend_user_suffix:
+ type: str
+ description:
+ - Suffix string to append to the username for backend authentication
+ default: ""
+ comment:
+ type: str
+ description:
+ - Optional comment string
+ default: ""
+ frontend_cookie:
+ type: str
+ description:
+ - Frontend cookie name
+ frontend_cookie_secret:
+ type: str
+ description:
+ - Frontend cookie secret
+ frontend_form:
+ type: str
+ description:
+ - Frontend authentication form name
+ frontend_form_template:
+ type: str
+ description:
+ - Frontend authentication form template
+ default: ""
+ frontend_login:
+ type: str
+ description:
+ - Frontend login name
+ frontend_logout:
+ type: str
+ description:
+ - Frontend logout name
+ frontend_mode:
+ type: str
+ description:
+ - Frontend authentication mode (Form|Basic)
+ default: Basic
+ choices:
+ - Basic
+ - Form
+ frontend_realm:
+ type: str
+ description:
+ - Frontend authentication realm
+ frontend_session_allow_persistency:
+ description:
+ - Allow session persistency
+ type: bool
+ default: False
+ choices:
+ - True
+ - False
+ frontend_session_lifetime:
+ type: int
+ description:
+ - session lifetime
+ required: true
+ frontend_session_lifetime_limited:
+ description:
+ - Specifies if limitation of session lifetime is active
+ type: bool
+ default: True
+ choices:
+ - True
+ - False
+ frontend_session_lifetime_scope:
+ type: str
+ description:
+ - scope for frontend_session_lifetime (days|hours|minutes)
+ default: hours
+ choices:
+ - days
+ - hours
+ - minutes
+ frontend_session_timeout:
+ type: int
+ description:
+ - session timeout
+ required: true
+ frontend_session_timeout_enabled:
+ description:
+ - Specifies if session timeout is active
+ type: bool
+ default: True
+ choices:
+ - True
+ - False
+ frontend_session_timeout_scope:
+ type: str
+ description:
+ - scope for frontend_session_timeout (days|hours|minutes)
+ default: minutes
+ choices:
+ - days
+ - hours
+ - minutes
+ logout_delegation_urls:
+ type: list
+ elements: str
+ description:
+ - List of logout URLs that logouts are delegated to
+ default: []
+ logout_mode:
+ type: str
+ description:
+ - Mode of logout (None|Delegation)
+ default: None
+ choices:
+ - None
+ - Delegation
+ redirect_to_requested_url:
+ description:
+ - Should a redirect to the requested URL be made
+ type: bool
+ default: False
+ choices:
+ - True
+ - False
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Create UTM proxy_auth_profile
+ community.general.utm_proxy_auth_profile:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestAuthProfileEntry
+ aaa: [REF_OBJECT_STRING,REF_ANOTHEROBJECT_STRING]
+ basic_prompt: "Authentication required: Please login"
+ frontend_session_lifetime: 1
+ frontend_session_timeout: 1
+ state: present
+
+- name: Remove UTM proxy_auth_profile
+ community.general.utm_proxy_auth_profile:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestAuthProfileEntry
+ state: absent
+
+- name: Read UTM proxy_auth_profile
+ community.general.utm_proxy_auth_profile:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestAuthProfileEntry
+ state: info
+
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ aaa:
+ description: List of references to utm_aaa objects (allowed users or groups)
+ type: list
+ basic_prompt:
+ description: The message in the basic authentication prompt
+ type: str
+ backend_mode:
+ description: Specifies if the backend server needs authentication ([Basic|None])
+ type: str
+ backend_strip_basic_auth:
+ description: Should the login data be stripped when proxying the request to the backend host
+ type: bool
+ backend_user_prefix:
+ description: Prefix string to prepend to the username for backend authentication
+ type: str
+ backend_user_suffix:
+ description: Suffix string to append to the username for backend authentication
+ type: str
+ comment:
+ description: Optional comment string
+ type: str
+ frontend_cookie:
+ description: Frontend cookie name
+ type: str
+ frontend_form:
+ description: Frontend authentication form name
+ type: str
+ frontend_form_template:
+ description: Frontend authentication form template
+ type: str
+ frontend_login:
+ description: Frontend login name
+ type: str
+ frontend_logout:
+ description: Frontend logout name
+ type: str
+ frontend_mode:
+ description: Frontend authentication mode (Form|Basic)
+ type: str
+ frontend_realm:
+ description: Frontend authentication realm
+ type: str
+ frontend_session_allow_persistency:
+ description: Allow session persistency
+ type: bool
+ frontend_session_lifetime:
+ description: session lifetime
+ type: int
+ frontend_session_lifetime_limited:
+ description: Specifies if limitation of session lifetime is active
+ type: bool
+ frontend_session_lifetime_scope:
+ description: scope for frontend_session_lifetime (days|hours|minutes)
+ type: str
+ frontend_session_timeout:
+ description: session timeout
+ type: int
+ frontend_session_timeout_enabled:
+ description: Specifies if session timeout is active
+ type: bool
+ frontend_session_timeout_scope:
+ description: scope for frontend_session_timeout (days|hours|minutes)
+ type: str
+ logout_delegation_urls:
+ description: List of logout URLs that logouts are delegated to
+ type: list
+ logout_mode:
+ description: Mode of logout (None|Delegation)
+ type: str
+ redirect_to_requested_url:
+ description: Should a redirect to the requested URL be made
+ type: bool
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "reverse_proxy/auth_profile"
+ key_to_check_for_changes = ["aaa", "basic_prompt", "backend_mode", "backend_strip_basic_auth",
+ "backend_user_prefix", "backend_user_suffix", "comment", "frontend_cookie",
+ "frontend_cookie_secret", "frontend_form", "frontend_form_template",
+ "frontend_login", "frontend_logout", "frontend_mode", "frontend_realm",
+ "frontend_session_allow_persistency", "frontend_session_lifetime",
+ "frontend_session_lifetime_limited", "frontend_session_lifetime_scope",
+ "frontend_session_timeout", "frontend_session_timeout_enabled",
+ "frontend_session_timeout_scope", "logout_delegation_urls", "logout_mode",
+ "redirect_to_requested_url"]
+
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ aaa=dict(type='list', elements='str', required=True),
+ basic_prompt=dict(type='str', required=True),
+ backend_mode=dict(type='str', required=False, default="None", choices=['Basic', 'None']),
+ backend_strip_basic_auth=dict(type='bool', required=False, default=True, choices=[True, False]),
+ backend_user_prefix=dict(type='str', required=False, default=""),
+ backend_user_suffix=dict(type='str', required=False, default=""),
+ comment=dict(type='str', required=False, default=""),
+ frontend_cookie=dict(type='str', required=False),
+ frontend_cookie_secret=dict(type='str', required=False, no_log=True),
+ frontend_form=dict(type='str', required=False),
+ frontend_form_template=dict(type='str', required=False, default=""),
+ frontend_login=dict(type='str', required=False),
+ frontend_logout=dict(type='str', required=False),
+ frontend_mode=dict(type='str', required=False, default="Basic", choices=['Basic', 'Form']),
+ frontend_realm=dict(type='str', required=False),
+ frontend_session_allow_persistency=dict(type='bool', required=False, default=False, choices=[True, False]),
+ frontend_session_lifetime=dict(type='int', required=True),
+ frontend_session_lifetime_limited=dict(type='bool', required=False, default=True, choices=[True, False]),
+ frontend_session_lifetime_scope=dict(type='str', required=False, default="hours", choices=['days', 'hours', 'minutes']),
+ frontend_session_timeout=dict(type='int', required=True),
+ frontend_session_timeout_enabled=dict(type='bool', required=False, default=True, choices=[True, False]),
+ frontend_session_timeout_scope=dict(type='str', required=False, default="minutes", choices=['days', 'hours', 'minutes']),
+ logout_delegation_urls=dict(type='list', elements='str', required=False, default=[]),
+ logout_mode=dict(type='str', required=False, default="None", choices=['None', 'Delegation']),
+ redirect_to_requested_url=dict(type='bool', required=False, default=False, choices=[True, False])
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_exception.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_exception.py
new file mode 100644
index 00000000..ed241af1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_exception.py
@@ -0,0 +1,241 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Sebastian Schenzel <sebastian.schenzel@mailbox.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_proxy_exception
+
+author:
+ - Sebastian Schenzel (@RickS-C137)
+
+short_description: Create, update or destroy reverse_proxy exception entry in Sophos UTM
+
+description:
+ - Create, update or destroy a reverse_proxy exception entry in SOPHOS UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+
+options:
+ name:
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: True
+ type: str
+ op:
+ description:
+ - The operand to be used with the entries of the path parameter
+ default: 'AND'
+ choices:
+ - 'AND'
+ - 'OR'
+ required: False
+ type: str
+ path:
+ description:
+ - The paths the exception in the reverse proxy is defined for
+ type: list
+ elements: str
+ default: []
+ required: False
+ skip_custom_threats_filters:
+ description:
+ - A list of threats to be skipped
+ type: list
+ elements: str
+ default: []
+ required: False
+ skip_threats_filter_categories:
+ description:
+ - Define which categories of threats are skipped
+ type: list
+ elements: str
+ default: []
+ required: False
+ skipav:
+ description:
+ - Skip the Antivirus Scanning
+ default: False
+ type: bool
+ required: False
+ skipbadclients:
+ description:
+ - Block clients with bad reputation
+ default: False
+ type: bool
+ required: False
+ skipcookie:
+ description:
+ - Skip the Cookie Signing check
+ default: False
+ type: bool
+ required: False
+ skipform:
+ description:
+ - Enable form hardening
+ default: False
+ type: bool
+ required: False
+ skipform_missingtoken:
+ description:
+ - Enable form hardening with missing tokens
+ default: False
+ type: bool
+ required: False
+ skiphtmlrewrite:
+ description:
+ - Protection against SQL
+ default: False
+ type: bool
+ required: False
+ skiptft:
+ description:
+ - Enable true file type control
+ default: False
+ type: bool
+ required: False
+ skipurl:
+ description:
+ - Enable static URL hardening
+ default: False
+ type: bool
+ required: False
+ source:
+ description:
+ - Define which categories of threats are skipped
+ type: list
+ elements: str
+ default: []
+ required: False
+ status:
+ description:
+ - Status of the exception rule set
+ default: True
+ type: bool
+ required: False
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Create UTM proxy_exception
+ community.general.utm_proxy_exception:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestExceptionEntry
+ backend: REF_OBJECT_STRING
+ state: present
+
+- name: Remove UTM proxy_exception
+ community.general.utm_proxy_exception:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestExceptionEntry
+ state: absent
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ comment:
+ description: The optional comment string
+ type: str
+ op:
+ description: The operand to be used with the entries of the path parameter
+ type: str
+ path:
+ description: The paths the exception in the reverse proxy is defined for
+ type: list
+ skip_custom_threats_filters:
+ description: A list of threats to be skipped
+ type: list
+ skip_threats_filter_categories:
+ description: Define which categories of threats are skipped
+ type: list
+ skipav:
+ description: Skip the Antivirus Scanning
+ type: bool
+ skipbadclients:
+ description: Block clients with bad reputation
+ type: bool
+ skipcookie:
+ description: Skip the Cookie Signing check
+ type: bool
+ skipform:
+ description: Enable form hardening
+ type: bool
+ skipform_missingtoken:
+ description: Enable form hardening with missing tokens
+ type: bool
+ skiphtmlrewrite:
+ description: Protection against SQL
+ type: bool
+ skiptft:
+ description: Enable true file type control
+ type: bool
+ skipurl:
+ description: Enable static URL hardening
+ type: bool
+ source:
+ description: Define which categories of threats are skipped
+ type: list
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "reverse_proxy/exception"
+ key_to_check_for_changes = ["op", "path", "skip_custom_threats_filters", "skip_threats_filter_categories", "skipav",
+ "comment", "skipbadclients", "skipcookie", "skipform", "status", "skipform_missingtoken",
+ "skiphtmlrewrite", "skiptft", "skipurl", "source"]
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ op=dict(type='str', required=False, default='AND', choices=['AND', 'OR']),
+ path=dict(type='list', elements='str', required=False, default=[]),
+ skip_custom_threats_filters=dict(type='list', elements='str', required=False, default=[]),
+ skip_threats_filter_categories=dict(type='list', elements='str', required=False, default=[]),
+ skipav=dict(type='bool', required=False, default=False),
+ skipbadclients=dict(type='bool', required=False, default=False),
+ skipcookie=dict(type='bool', required=False, default=False),
+ skipform=dict(type='bool', required=False, default=False),
+ skipform_missingtoken=dict(type='bool', required=False, default=False),
+ skiphtmlrewrite=dict(type='bool', required=False, default=False),
+ skiptft=dict(type='bool', required=False, default=False),
+ skipurl=dict(type='bool', required=False, default=False),
+ source=dict(type='list', elements='str', required=False, default=[]),
+ status=dict(type='bool', required=False, default=True),
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_frontend.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_frontend.py
new file mode 100644
index 00000000..8dba3640
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_frontend.py
@@ -0,0 +1,278 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_proxy_frontend
+
+author:
+ - Johannes Brunswicker (@MatrixCrawler)
+
+short_description: create, update or destroy reverse_proxy frontend entry in Sophos UTM
+
+description:
+ - Create, update or destroy a reverse_proxy frontend entry in Sophos UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+ add_content_type_header :
+ description:
+ - Whether to add the content type header or not
+ type: bool
+ default: False
+ address:
+ type: str
+ description:
+ - The reference name of the network/interface_address object.
+ default: REF_DefaultInternalAddress
+ allowed_networks:
+ type: list
+ elements: str
+ description:
+ - A list of reference names for the allowed networks.
+ default: ['REF_NetworkAny']
+ certificate:
+ type: str
+ description:
+ - The reference name of the ca/host_key_cert object.
+ default: ""
+ comment:
+ type: str
+ description:
+ - An optional comment to add to the object
+ default: ""
+ disable_compression:
+ description:
+ - Whether to enable the compression
+ type: bool
+ default: False
+ domain:
+ type: list
+ elements: str
+ description:
+ - A list of domain names for the frontend object
+ exceptions:
+ type: list
+ elements: str
+ description:
+ - A list of exception ref names (reverse_proxy/exception)
+ default: []
+ htmlrewrite:
+ description:
+ - Whether to enable html rewrite or not
+ type: bool
+ default: False
+ htmlrewrite_cookies:
+ description:
+ - Whether to enable html rewrite cookie or not
+ type: bool
+ default: False
+ implicitredirect:
+ description:
+ - Whether to enable implicit redirection or not
+ type: bool
+ default: False
+ lbmethod:
+ type: str
+ description:
+ - Which loadbalancer method should be used
+ choices:
+ - ""
+ - bybusyness
+ - bytraffic
+ - byrequests
+ default: bybusyness
+ locations:
+ type: list
+ elements: str
+ description:
+ - A list of location ref names (reverse_proxy/location)
+ default: []
+ port:
+ type: int
+ description:
+ - The frontend http port
+ default: 80
+ preservehost:
+ description:
+ - Whether to preserve host header
+ type: bool
+ default: False
+ profile:
+ type: str
+ description:
+ - The reference string of the reverse_proxy/profile
+ default: ""
+ status:
+ description:
+ - Whether to activate the frontend entry or not
+ type: bool
+ default: True
+ type:
+ type: str
+ description:
+ - Which protocol should be used
+ choices:
+ - http
+ - https
+ default: http
+ xheaders:
+ description:
+ - Whether to pass the host header or not
+ type: bool
+ default: False
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Create utm proxy_frontend
+ community.general.utm_proxy_frontend:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestFrontendEntry
+ host: REF_OBJECT_STRING
+ state: present
+
+- name: Remove utm proxy_frontend
+ community.general.utm_proxy_frontend:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestFrontendEntry
+ state: absent
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ add_content_type_header:
+ description: Whether to add the content type header
+ type: bool
+ address:
+ description: The reference name of the address
+ type: str
+ allowed_networks:
+ description: List of reference names of networks associated
+ type: list
+ certificate:
+ description: Reference name of certificate (ca/host_key_cert)
+ type: str
+ comment:
+ description: The comment string
+ type: str
+ disable_compression:
+ description: State of compression support
+ type: bool
+ domain:
+ description: List of hostnames
+ type: list
+ exceptions:
+ description: List of associated proxy exceptions
+ type: list
+ htmlrewrite:
+ description: State of html rewrite
+ type: bool
+ htmlrewrite_cookies:
+ description: Whether the html rewrite cookie will be set
+ type: bool
+ implicitredirect:
+ description: Whether to use implicit redirection
+ type: bool
+ lbmethod:
+ description: The method of loadbalancer to use
+ type: str
+ locations:
+ description: The reference names of reverse_proxy/locations associated with the object
+ type: list
+ port:
+ description: The port of the frontend connection
+ type: int
+ preservehost:
+ description: Preserve host header
+ type: bool
+ profile:
+ description: The associated reverse_proxy/profile
+ type: str
+ status:
+ description: Whether the frontend object is active or not
+ type: bool
+ type:
+ description: The connection type
+ type: str
+ xheaders:
+ description: The xheaders state
+ type: bool
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "reverse_proxy/frontend"
+ key_to_check_for_changes = ["add_content_type_header", "address", "allowed_networks", "certificate",
+ "comment", "disable_compression", "domain", "exceptions", "htmlrewrite",
+ "htmlrewrite_cookies", "implicitredirect", "lbmethod", "locations",
+ "port", "preservehost", "profile", "status", "type", "xheaders"]
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ add_content_type_header=dict(type='bool', required=False, default=False),
+ address=dict(type='str', required=False, default="REF_DefaultInternalAddress"),
+ allowed_networks=dict(type='list', elements='str', required=False, default=["REF_NetworkAny"]),
+ certificate=dict(type='str', required=False, default=""),
+ comment=dict(type='str', required=False, default=""),
+ disable_compression=dict(type='bool', required=False, default=False),
+ domain=dict(type='list', elements='str', required=False),
+ exceptions=dict(type='list', elements='str', required=False, default=[]),
+ htmlrewrite=dict(type='bool', required=False, default=False),
+ htmlrewrite_cookies=dict(type='bool', required=False, default=False),
+ implicitredirect=dict(type='bool', required=False, default=False),
+ lbmethod=dict(type='str', required=False, default="bybusyness",
+ choices=['bybusyness', 'bytraffic', 'byrequests', '']),
+ locations=dict(type='list', elements='str', required=False, default=[]),
+ port=dict(type='int', required=False, default=80),
+ preservehost=dict(type='bool', required=False, default=False),
+ profile=dict(type='str', required=False, default=""),
+ status=dict(type='bool', required=False, default=True),
+ type=dict(type='str', required=False, default="http", choices=['http', 'https']),
+ xheaders=dict(type='bool', required=False, default=False),
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_frontend_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_frontend_info.py
new file mode 100644
index 00000000..450bd161
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_frontend_info.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_proxy_frontend_info
+
+author:
+ - Johannes Brunswicker (@MatrixCrawler)
+
+short_description: create, update or destroy reverse_proxy frontend entry in Sophos UTM
+
+description:
+ - Create, update or destroy a reverse_proxy frontend entry in SOPHOS UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Get utm proxy_frontend
+ community.general.utm_proxy_frontend_info:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestBackendEntry
+ host: REF_OBJECT_STRING
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ add_content_type_header:
+ description: Whether to add the content type header
+ type: bool
+ address:
+ description: The reference name of the address
+ type: str
+ allowed_networks:
+ description: List of reference names of networks associated
+ type: list
+ certificate:
+ description: Reference name of certificate (ca/host_key_cert)
+ type: str
+ comment:
+ description: The comment string
+ type: str
+ disable_compression:
+ description: State of compression support
+ type: bool
+ domain:
+ description: List of hostnames
+ type: list
+ exceptions:
+ description: List of associated proxy exceptions
+ type: list
+ htmlrewrite:
+ description: State of html rewrite
+ type: bool
+ htmlrewrite_cookies:
+ description: whether the html rewrite cookie will be set
+ type: bool
+ implicitredirect:
+ description: whether to use implicit redirection
+ type: bool
+ lbmethod:
+ description: The method of loadbalancer to use
+ type: str
+ locations:
+ description: The reference names of reverse_proxy/locations associated with the object
+ type: list
+ port:
+ description: The port of the frontend connection
+ type: int
+ preservehost:
+ description: Preserve host header
+ type: bool
+ profile:
+ description: The associated reverse_proxy/profile
+ type: str
+ status:
+ description: Whether the frontend object is active or not
+ type: bool
+ type:
+ description: The connection type
+ type: str
+ xheaders:
+ description: The xheaders state
+ type: bool
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "reverse_proxy/frontend"
+ key_to_check_for_changes = []
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True)
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_location.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_location.py
new file mode 100644
index 00000000..7c4bc8b6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_location.py
@@ -0,0 +1,214 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_proxy_location
+
+author:
+ - Johannes Brunswicker (@MatrixCrawler)
+
+short_description: create, update or destroy reverse_proxy location entry in Sophos UTM
+
+description:
+ - Create, update or destroy a reverse_proxy location entry in SOPHOS UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+ access_control:
+ description:
+ - whether to activate the access control for the location
+ type: str
+ default: '0'
+ choices:
+ - '0'
+ - '1'
+ allowed_networks:
+ description:
+ - A list of allowed networks
+ type: list
+ elements: str
+ default: REF_NetworkAny
+ auth_profile:
+ type: str
+ description:
+ - The reference name of the auth profile
+ backend:
+ type: list
+ elements: str
+ description:
+ - A list of backends that are connected with this location declaration
+ default: []
+ be_path:
+ type: str
+ description:
+ - The path of the backend
+ comment:
+ type: str
+ description:
+ - The optional comment string
+ denied_networks:
+ type: list
+ elements: str
+ description:
+ - A list of denied network references
+ default: []
+ hot_standby:
+ description:
+ - Activate hot standby mode
+ type: bool
+ default: False
+ path:
+ type: str
+ description:
+ - The path of the location
+ default: "/"
+ status:
+ description:
+ - Whether the location is active or not
+ type: bool
+ default: True
+ stickysession_id:
+ type: str
+ description:
+ - The stickysession id
+ default: ROUTEID
+ stickysession_status:
+ description:
+ - Enable the stickysession
+ type: bool
+ default: False
+ websocket_passthrough:
+ description:
+ - Enable the websocket passthrough
+ type: bool
+ default: False
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Create UTM proxy_location
+ utm_proxy_backend:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestLocationEntry
+ backend: REF_OBJECT_STRING
+ state: present
+
+- name: Remove UTM proxy_location
+ utm_proxy_backend:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestLocationEntry
+ state: absent
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ access_control:
+ description: Whether to use access control state
+ type: str
+ allowed_networks:
+ description: List of allowed network reference names
+ type: list
+ auth_profile:
+ description: The auth profile reference name
+ type: str
+ backend:
+ description: The backend reference name
+ type: str
+ be_path:
+ description: The backend path
+ type: str
+ comment:
+ description: The comment string
+ type: str
+ denied_networks:
+ description: The list of the denied network names
+ type: list
+ hot_standby:
+ description: Use hot standy
+ type: bool
+ path:
+ description: Path name
+ type: str
+ status:
+ description: Whether the object is active or not
+ type: bool
+ stickysession_id:
+ description: The identifier of the stickysession
+ type: str
+ stickysession_status:
+ description: Whether to use stickysession or not
+ type: bool
+ websocket_passthrough:
+ description: Whether websocket passthrough will be used or not
+ type: bool
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "reverse_proxy/location"
+ key_to_check_for_changes = ["access_control", "allowed_networks", "auth_profile", "backend", "be_path", "comment",
+ "denied_networks", "hot_standby", "path", "status", "stickysession_id",
+ "stickysession_status", "websocket_passthrough"]
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ access_control=dict(type='str', required=False, default="0", choices=['0', '1']),
+ allowed_networks=dict(type='list', elements='str', required=False, default=['REF_NetworkAny']),
+ auth_profile=dict(type='str', required=False, default=""),
+ backend=dict(type='list', elements='str', required=False, default=[]),
+ be_path=dict(type='str', required=False, default=""),
+ comment=dict(type='str', required=False, default=""),
+ denied_networks=dict(type='list', elements='str', required=False, default=[]),
+ hot_standby=dict(type='bool', required=False, default=False),
+ path=dict(type='str', required=False, default="/"),
+ status=dict(type='bool', required=False, default=True),
+ stickysession_id=dict(type='str', required=False, default='ROUTEID'),
+ stickysession_status=dict(type='bool', required=False, default=False),
+ websocket_passthrough=dict(type='bool', required=False, default=False),
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_location_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_location_info.py
new file mode 100644
index 00000000..1125c4fa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/utm_proxy_location_info.py
@@ -0,0 +1,122 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_proxy_location_info
+
+author:
+ - Johannes Brunswicker (@MatrixCrawler)
+
+short_description: create, update or destroy reverse_proxy location entry in Sophos UTM
+
+description:
+ - Create, update or destroy a reverse_proxy location entry in SOPHOS UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Remove UTM proxy_location
+ community.general.utm_proxy_location_info:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestLocationEntry
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ access_control:
+ description: Whether to use access control state
+ type: str
+ allowed_networks:
+ description: List of allowed network reference names
+ type: list
+ auth_profile:
+ description: The auth profile reference name
+ type: str
+ backend:
+ description: The backend reference name
+ type: str
+ be_path:
+ description: The backend path
+ type: str
+ comment:
+ description: The comment string
+ type: str
+ denied_networks:
+ description: The list of the denied network names
+ type: list
+ hot_standby:
+ description: Use hot standy
+ type: bool
+ path:
+ description: Path name
+ type: str
+ status:
+ description: Whether the object is active or not
+ type: bool
+ stickysession_id:
+ description: The identifier of the stickysession
+ type: str
+ stickysession_status:
+ description: Whether to use stickysession or not
+ type: bool
+ websocket_passthrough:
+ description: Whether websocket passthrough will be used or not
+ type: bool
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "reverse_proxy/location"
+ key_to_check_for_changes = []
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True)
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/vdo.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/vdo.py
new file mode 100644
index 00000000..15fd9c62
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/vdo.py
@@ -0,0 +1,866 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+ - Bryan Gurney (@bgurney-rh)
+
+module: vdo
+
+short_description: Module to control VDO
+
+
+description:
+ - This module controls the VDO dedupe and compression device.
+ - VDO, or Virtual Data Optimizer, is a device-mapper target that
+ provides inline block-level deduplication, compression, and
+ thin provisioning capabilities to primary storage.
+
+options:
+ name:
+ description:
+ - The name of the VDO volume.
+ type: str
+ required: true
+ state:
+ description:
+ - Whether this VDO volume should be "present" or "absent".
+ If a "present" VDO volume does not exist, it will be
+ created. If a "present" VDO volume already exists, it
+ will be modified, by updating the configuration, which
+ will take effect when the VDO volume is restarted.
+ Not all parameters of an existing VDO volume can be
+ modified; the "statusparamkeys" list contains the
+ parameters that can be modified after creation. If an
+ "absent" VDO volume does not exist, it will not be
+ removed.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ activated:
+ description:
+ - The "activate" status for a VDO volume. If this is set
+ to "no", the VDO volume cannot be started, and it will
+ not start on system startup. However, on initial
+ creation, a VDO volume with "activated" set to "off"
+ will be running, until stopped. This is the default
+ behavior of the "vdo create" command; it provides the
+ user an opportunity to write a base amount of metadata
+ (filesystem, LVM headers, etc.) to the VDO volume prior
+ to stopping the volume, and leaving it deactivated
+ until ready to use.
+ type: bool
+ running:
+ description:
+ - Whether this VDO volume is running.
+ - A VDO volume must be activated in order to be started.
+ type: bool
+ device:
+ description:
+ - The full path of the device to use for VDO storage.
+ - This is required if "state" is "present".
+ type: str
+ logicalsize:
+ description:
+ - The logical size of the VDO volume (in megabytes, or
+ LVM suffix format). If not specified for a new volume,
+ this defaults to the same size as the underlying storage
+ device, which is specified in the 'device' parameter.
+ Existing volumes will maintain their size if the
+ logicalsize parameter is not specified, or is smaller
+ than or identical to the current size. If the specified
+ size is larger than the current size, a growlogical
+ operation will be performed.
+ type: str
+ deduplication:
+ description:
+ - Configures whether deduplication is enabled. The
+ default for a created volume is 'enabled'. Existing
+ volumes will maintain their previously configured
+ setting unless a different value is specified in the
+ playbook.
+ type: str
+ choices: [ disabled, enabled ]
+ compression:
+ description:
+ - Configures whether compression is enabled. The default
+ for a created volume is 'enabled'. Existing volumes
+ will maintain their previously configured setting unless
+ a different value is specified in the playbook.
+ type: str
+ choices: [ disabled, enabled ]
+ blockmapcachesize:
+ description:
+ - The amount of memory allocated for caching block map
+ pages, in megabytes (or may be issued with an LVM-style
+ suffix of K, M, G, or T). The default (and minimum)
+ value is 128M. The value specifies the size of the
+ cache; there is a 15% memory usage overhead. Each 1.25G
+ of block map covers 1T of logical blocks, therefore a
+ small amount of block map cache memory can cache a
+ significantly large amount of block map data. Existing
+ volumes will maintain their previously configured
+ setting unless a different value is specified in the
+ playbook.
+ type: str
+ readcache:
+ description:
+ - Enables or disables the read cache. The default is
+ 'disabled'. Choosing 'enabled' enables a read cache
+ which may improve performance for workloads of high
+ deduplication, read workloads with a high level of
+ compression, or on hard disk storage. Existing
+ volumes will maintain their previously configured
+ setting unless a different value is specified in the
+ playbook.
+ - The read cache feature is available in VDO 6.1 and older.
+ type: str
+ choices: [ disabled, enabled ]
+ readcachesize:
+ description:
+ - Specifies the extra VDO device read cache size in
+ megabytes. This is in addition to a system-defined
+ minimum. Using a value with a suffix of K, M, G, or T
+ is optional. The default value is 0. 1.125 MB of
+ memory per bio thread will be used per 1 MB of read
+ cache specified (for example, a VDO volume configured
+ with 4 bio threads will have a read cache memory usage
+ overhead of 4.5 MB per 1 MB of read cache specified).
+ Existing volumes will maintain their previously
+ configured setting unless a different value is specified
+ in the playbook.
+ - The read cache feature is available in VDO 6.1 and older.
+ type: str
+ emulate512:
+ description:
+ - Enables 512-byte emulation mode, allowing drivers or
+ filesystems to access the VDO volume at 512-byte
+ granularity, instead of the default 4096-byte granularity.
+ Default is 'disabled'; only recommended when a driver
+ or filesystem requires 512-byte sector level access to
+ a device. This option is only available when creating
+ a new volume, and cannot be changed for an existing
+ volume.
+ type: bool
+ default: false
+ growphysical:
+ description:
+ - Specifies whether to attempt to execute a growphysical
+ operation, if there is enough unused space on the
+ device. A growphysical operation will be executed if
+ there is at least 64 GB of free space, relative to the
+ previous physical size of the affected VDO volume.
+ type: bool
+ default: false
+ slabsize:
+ description:
+ - The size of the increment by which the physical size of
+ a VDO volume is grown, in megabytes (or may be issued
+ with an LVM-style suffix of K, M, G, or T). Must be a
+ power of two between 128M and 32G. The default is 2G,
+ which supports volumes having a physical size up to 16T.
+ The maximum, 32G, supports a physical size of up to 256T.
+ This option is only available when creating a new
+ volume, and cannot be changed for an existing volume.
+ type: str
+ writepolicy:
+ description:
+ - Specifies the write policy of the VDO volume. The
+ 'sync' mode acknowledges writes only after data is on
+ stable storage. The 'async' mode acknowledges writes
+ when data has been cached for writing to stable
+ storage. The default (and highly recommended) 'auto'
+ mode checks the storage device to determine whether it
+ supports flushes. Devices that support flushes will
+ result in a VDO volume in 'async' mode, while devices
+ that do not support flushes will run in sync mode.
+ Existing volumes will maintain their previously
+ configured setting unless a different value is
+ specified in the playbook.
+ type: str
+ choices: [ async, auto, sync ]
+ indexmem:
+ description:
+ - Specifies the amount of index memory in gigabytes. The
+ default is 0.25. The special decimal values 0.25, 0.5,
+ and 0.75 can be used, as can any positive integer.
+ This option is only available when creating a new
+ volume, and cannot be changed for an existing volume.
+ type: str
+ indexmode:
+ description:
+ - Specifies the index mode of the Albireo index. The
+ default is 'dense', which has a deduplication window of
+ 1 GB of index memory per 1 TB of incoming data,
+ requiring 10 GB of index data on persistent storage.
+ The 'sparse' mode has a deduplication window of 1 GB of
+ index memory per 10 TB of incoming data, but requires
+ 100 GB of index data on persistent storage. This option
+ is only available when creating a new volume, and cannot
+ be changed for an existing volume.
+ type: str
+ choices: [ dense, sparse ]
+ ackthreads:
+ description:
+ - Specifies the number of threads to use for
+ acknowledging completion of requested VDO I/O operations.
+ Valid values are integer values from 1 to 100 (lower
+ numbers are preferable due to overhead). The default is
+ 1. Existing volumes will maintain their previously
+ configured setting unless a different value is specified
+ in the playbook.
+ type: str
+ biothreads:
+ description:
+ - Specifies the number of threads to use for submitting I/O
+ operations to the storage device. Valid values are
+ integer values from 1 to 100 (lower numbers are
+ preferable due to overhead). The default is 4.
+ Existing volumes will maintain their previously
+ configured setting unless a different value is specified
+ in the playbook.
+ type: str
+ cputhreads:
+ description:
+ - Specifies the number of threads to use for CPU-intensive
+ work such as hashing or compression. Valid values are
+ integer values from 1 to 100 (lower numbers are
+ preferable due to overhead). The default is 2.
+ Existing volumes will maintain their previously
+ configured setting unless a different value is specified
+ in the playbook.
+ type: str
+ logicalthreads:
+ description:
+ - Specifies the number of threads across which to
+ subdivide parts of the VDO processing based on logical
+ block addresses. Valid values are integer values from
+ 1 to 100 (lower numbers are preferable due to overhead).
+ The default is 1. Existing volumes will maintain their
+ previously configured setting unless a different value
+ is specified in the playbook.
+ type: str
+ physicalthreads:
+ description:
+ - Specifies the number of threads across which to
+ subdivide parts of the VDO processing based on physical
+ block addresses. Valid values are integer values from
+ 1 to 16 (lower numbers are preferable due to overhead).
+ The physical space used by the VDO volume must be
+ larger than (slabsize * physicalthreads). The default
+ is 1. Existing volumes will maintain their previously
+ configured setting unless a different value is specified
+ in the playbook.
+ type: str
+notes:
+ - In general, the default thread configuration should be used.
+requirements:
+ - PyYAML
+ - kmod-kvdo
+ - vdo
+'''
+
+EXAMPLES = r'''
+- name: Create 2 TB VDO volume vdo1 on device /dev/md0
+ community.general.vdo:
+ name: vdo1
+ state: present
+ device: /dev/md0
+ logicalsize: 2T
+
+- name: Remove VDO volume vdo1
+ community.general.vdo:
+ name: vdo1
+ state: absent
+'''
+
+RETURN = r'''# '''
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+import re
+import traceback
+
+YAML_IMP_ERR = None
+try:
+ import yaml
+ HAS_YAML = True
+except ImportError:
+ YAML_IMP_ERR = traceback.format_exc()
+ HAS_YAML = False
+
+
+# Generate a list of VDO volumes, whether they are running or stopped.
+#
+# @param module The AnsibleModule object.
+# @param vdocmd The path of the 'vdo' command.
+#
+# @return vdolist A list of currently created VDO volumes.
+def inventory_vdos(module, vdocmd):
+ rc, vdostatusout, err = module.run_command("%s status" % (vdocmd))
+
+ # if rc != 0:
+ # module.fail_json(msg="Inventorying VDOs failed: %s"
+ # % vdostatusout, rc=rc, err=err)
+
+ vdolist = []
+
+ if (rc == 2 and
+ re.findall(r"vdoconf.yml does not exist", err, re.MULTILINE)):
+ # If there is no /etc/vdoconf.yml file, assume there are no
+ # VDO volumes. Return an empty list of VDO volumes.
+ return vdolist
+
+ if rc != 0:
+ module.fail_json(msg="Inventorying VDOs failed: %s"
+ % vdostatusout, rc=rc, err=err)
+
+ vdostatusyaml = yaml.load(vdostatusout)
+ if vdostatusyaml is None:
+ return vdolist
+
+ vdoyamls = vdostatusyaml['VDOs']
+
+ if vdoyamls is not None:
+ vdolist = vdoyamls.keys()
+
+ return vdolist
+
+
+def list_running_vdos(module, vdocmd):
+ rc, vdolistout, err = module.run_command("%s list" % (vdocmd))
+ runningvdolist = filter(None, vdolistout.split('\n'))
+ return runningvdolist
+
+
+# Generate a string containing options to pass to the 'VDO' command.
+# Note that a 'create' operation will pass more options than a
+# 'modify' operation.
+#
+# @param params A dictionary of parameters, and their values
+# (values of 'None' and/or nonexistent values are ignored).
+#
+# @return vdocmdoptions A string to be used in a 'vdo <action>' command.
+def start_vdo(module, vdoname, vdocmd):
+ rc, out, err = module.run_command("%s start --name=%s" % (vdocmd, vdoname))
+ if rc == 0:
+ module.log("started VDO volume %s" % vdoname)
+
+ return rc
+
+
+def stop_vdo(module, vdoname, vdocmd):
+ rc, out, err = module.run_command("%s stop --name=%s" % (vdocmd, vdoname))
+ if rc == 0:
+ module.log("stopped VDO volume %s" % vdoname)
+
+ return rc
+
+
+def activate_vdo(module, vdoname, vdocmd):
+ rc, out, err = module.run_command("%s activate --name=%s"
+ % (vdocmd, vdoname))
+ if rc == 0:
+ module.log("activated VDO volume %s" % vdoname)
+
+ return rc
+
+
+def deactivate_vdo(module, vdoname, vdocmd):
+ rc, out, err = module.run_command("%s deactivate --name=%s"
+ % (vdocmd, vdoname))
+ if rc == 0:
+ module.log("deactivated VDO volume %s" % vdoname)
+
+ return rc
+
+
+def add_vdooptions(params):
+ vdocmdoptions = ""
+ options = []
+
+ if ('logicalsize' in params) and (params['logicalsize'] is not None):
+ options.append("--vdoLogicalSize=" + params['logicalsize'])
+
+ if (('blockmapcachesize' in params) and
+ (params['blockmapcachesize'] is not None)):
+ options.append("--blockMapCacheSize=" + params['blockmapcachesize'])
+
+ if ('readcache' in params) and (params['readcache'] == 'enabled'):
+ options.append("--readCache=enabled")
+
+ if ('readcachesize' in params) and (params['readcachesize'] is not None):
+ options.append("--readCacheSize=" + params['readcachesize'])
+
+ if ('slabsize' in params) and (params['slabsize'] is not None):
+ options.append("--vdoSlabSize=" + params['slabsize'])
+
+ if ('emulate512' in params) and (params['emulate512']):
+ options.append("--emulate512=enabled")
+
+ if ('indexmem' in params) and (params['indexmem'] is not None):
+ options.append("--indexMem=" + params['indexmem'])
+
+ if ('indexmode' in params) and (params['indexmode'] == 'sparse'):
+ options.append("--sparseIndex=enabled")
+
+ # Entering an invalid thread config results in a cryptic
+ # 'Could not set up device mapper for %s' error from the 'vdo'
+ # command execution. The dmsetup module on the system will
+ # output a more helpful message, but one would have to log
+ # onto that system to read the error. For now, heed the thread
+ # limit warnings in the DOCUMENTATION section above.
+ if ('ackthreads' in params) and (params['ackthreads'] is not None):
+ options.append("--vdoAckThreads=" + params['ackthreads'])
+
+ if ('biothreads' in params) and (params['biothreads'] is not None):
+ options.append("--vdoBioThreads=" + params['biothreads'])
+
+ if ('cputhreads' in params) and (params['cputhreads'] is not None):
+ options.append("--vdoCpuThreads=" + params['cputhreads'])
+
+ if ('logicalthreads' in params) and (params['logicalthreads'] is not None):
+ options.append("--vdoLogicalThreads=" + params['logicalthreads'])
+
+ if (('physicalthreads' in params) and
+ (params['physicalthreads'] is not None)):
+ options.append("--vdoPhysicalThreads=" + params['physicalthreads'])
+
+ vdocmdoptions = ' '.join(options)
+ return vdocmdoptions
+
+
+def run_module():
+
+ # Define the available arguments/parameters that a user can pass to
+ # the module.
+ # Defaults for VDO parameters are None, in order to facilitate
+ # the detection of parameters passed from the playbook.
+ # Creation param defaults are determined by the creation section.
+
+ module_args = dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ activated=dict(type='bool'),
+ running=dict(type='bool'),
+ growphysical=dict(type='bool', default=False),
+ device=dict(type='str'),
+ logicalsize=dict(type='str'),
+ deduplication=dict(type='str', choices=['disabled', 'enabled']),
+ compression=dict(type='str', choices=['disabled', 'enabled']),
+ blockmapcachesize=dict(type='str'),
+ readcache=dict(type='str', choices=['disabled', 'enabled']),
+ readcachesize=dict(type='str'),
+ emulate512=dict(type='bool', default=False),
+ slabsize=dict(type='str'),
+ writepolicy=dict(type='str', choices=['async', 'auto', 'sync']),
+ indexmem=dict(type='str'),
+ indexmode=dict(type='str', choices=['dense', 'sparse']),
+ ackthreads=dict(type='str'),
+ biothreads=dict(type='str'),
+ cputhreads=dict(type='str'),
+ logicalthreads=dict(type='str'),
+ physicalthreads=dict(type='str')
+ )
+
+ # Seed the result dictionary in the object. There will be an
+ # 'invocation' dictionary added with 'module_args' (arguments
+ # given).
+ result = dict(
+ changed=False,
+ )
+
+ # the AnsibleModule object will be our abstraction working with Ansible
+ # this includes instantiation, a couple of common attr would be the
+ # args/params passed to the execution, as well as if the module
+ # supports check mode
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=False,
+ )
+
+ if not HAS_YAML:
+ module.fail_json(msg=missing_required_lib('PyYAML'), exception=YAML_IMP_ERR)
+
+ vdocmd = module.get_bin_path("vdo", required=True)
+ if not vdocmd:
+ module.fail_json(msg='VDO is not installed.', **result)
+
+ # Print a pre-run list of VDO volumes in the result object.
+ vdolist = inventory_vdos(module, vdocmd)
+
+ runningvdolist = list_running_vdos(module, vdocmd)
+
+ # Collect the name of the desired VDO volume, and its state. These will
+ # determine what to do.
+ desiredvdo = module.params['name']
+ state = module.params['state']
+
+ # Create a desired VDO volume that doesn't exist yet.
+ if (desiredvdo not in vdolist) and (state == 'present'):
+ device = module.params['device']
+ if device is None:
+ module.fail_json(msg="Creating a VDO volume requires specifying "
+ "a 'device' in the playbook.")
+
+ # Create a dictionary of the options from the AnsibleModule
+ # parameters, compile the vdo command options, and run "vdo create"
+ # with those options.
+ # Since this is a creation of a new VDO volume, it will contain all
+ # all of the parameters given by the playbook; the rest will
+ # assume default values.
+ options = module.params
+ vdocmdoptions = add_vdooptions(options)
+ rc, out, err = module.run_command("%s create --name=%s --device=%s %s"
+ % (vdocmd, desiredvdo, device,
+ vdocmdoptions))
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Creating VDO %s failed."
+ % desiredvdo, rc=rc, err=err)
+
+ if (module.params['compression'] == 'disabled'):
+ rc, out, err = module.run_command("%s disableCompression --name=%s"
+ % (vdocmd, desiredvdo))
+
+ if ((module.params['deduplication'] is not None) and
+ module.params['deduplication'] == 'disabled'):
+ rc, out, err = module.run_command("%s disableDeduplication "
+ "--name=%s"
+ % (vdocmd, desiredvdo))
+
+ if module.params['activated'] == 'no':
+ deactivate_vdo(module, desiredvdo, vdocmd)
+
+ if module.params['running'] == 'no':
+ stop_vdo(module, desiredvdo, vdocmd)
+
+ # Print a post-run list of VDO volumes in the result object.
+ vdolist = inventory_vdos(module, vdocmd)
+ module.log("created VDO volume %s" % desiredvdo)
+ module.exit_json(**result)
+
+ # Modify the current parameters of a VDO that exists.
+ if (desiredvdo in vdolist) and (state == 'present'):
+ rc, vdostatusoutput, err = module.run_command("%s status" % (vdocmd))
+ vdostatusyaml = yaml.load(vdostatusoutput)
+
+ # An empty dictionary to contain dictionaries of VDO statistics
+ processedvdos = {}
+
+ vdoyamls = vdostatusyaml['VDOs']
+ if vdoyamls is not None:
+ processedvdos = vdoyamls
+
+ # The 'vdo status' keys that are currently modifiable.
+ statusparamkeys = ['Acknowledgement threads',
+ 'Bio submission threads',
+ 'Block map cache size',
+ 'CPU-work threads',
+ 'Logical threads',
+ 'Physical threads',
+ 'Read cache',
+ 'Read cache size',
+ 'Configured write policy',
+ 'Compression',
+ 'Deduplication']
+
+ # A key translation table from 'vdo status' output to Ansible
+ # module parameters. This covers all of the 'vdo status'
+ # parameter keys that could be modified with the 'vdo'
+ # command.
+ vdokeytrans = {
+ 'Logical size': 'logicalsize',
+ 'Compression': 'compression',
+ 'Deduplication': 'deduplication',
+ 'Block map cache size': 'blockmapcachesize',
+ 'Read cache': 'readcache',
+ 'Read cache size': 'readcachesize',
+ 'Configured write policy': 'writepolicy',
+ 'Acknowledgement threads': 'ackthreads',
+ 'Bio submission threads': 'biothreads',
+ 'CPU-work threads': 'cputhreads',
+ 'Logical threads': 'logicalthreads',
+ 'Physical threads': 'physicalthreads'
+ }
+
+ # Build a dictionary of the current VDO status parameters, with
+ # the keys used by VDO. (These keys will be converted later.)
+ currentvdoparams = {}
+
+ # Build a "lookup table" dictionary containing a translation table
+ # of the parameters that can be modified
+ modtrans = {}
+
+ for statfield in statusparamkeys:
+ if statfield in processedvdos[desiredvdo]:
+ currentvdoparams[statfield] = processedvdos[desiredvdo][statfield]
+
+ modtrans[statfield] = vdokeytrans[statfield]
+
+ # Build a dictionary of current parameters formatted with the
+ # same keys as the AnsibleModule parameters.
+ currentparams = {}
+ for paramkey in modtrans.keys():
+ currentparams[modtrans[paramkey]] = modtrans[paramkey]
+
+ diffparams = {}
+
+ # Check for differences between the playbook parameters and the
+ # current parameters. This will need a comparison function;
+ # since AnsibleModule params are all strings, compare them as
+ # strings (but if it's None; skip).
+ for key in currentparams.keys():
+ if module.params[key] is not None:
+ if str(currentparams[key]) != module.params[key]:
+ diffparams[key] = module.params[key]
+
+ if diffparams:
+ vdocmdoptions = add_vdooptions(diffparams)
+ if vdocmdoptions:
+ rc, out, err = module.run_command("%s modify --name=%s %s"
+ % (vdocmd,
+ desiredvdo,
+ vdocmdoptions))
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Modifying VDO %s failed."
+ % desiredvdo, rc=rc, err=err)
+
+ if 'deduplication' in diffparams.keys():
+ dedupemod = diffparams['deduplication']
+ if dedupemod == 'disabled':
+ rc, out, err = module.run_command("%s "
+ "disableDeduplication "
+ "--name=%s"
+ % (vdocmd, desiredvdo))
+
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Changing deduplication on "
+ "VDO volume %s failed."
+ % desiredvdo, rc=rc, err=err)
+
+ if dedupemod == 'enabled':
+ rc, out, err = module.run_command("%s "
+ "enableDeduplication "
+ "--name=%s"
+ % (vdocmd, desiredvdo))
+
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Changing deduplication on "
+ "VDO volume %s failed."
+ % desiredvdo, rc=rc, err=err)
+
+ if 'compression' in diffparams.keys():
+ compressmod = diffparams['compression']
+ if compressmod == 'disabled':
+ rc, out, err = module.run_command("%s disableCompression "
+ "--name=%s"
+ % (vdocmd, desiredvdo))
+
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Changing compression on "
+ "VDO volume %s failed."
+ % desiredvdo, rc=rc, err=err)
+
+ if compressmod == 'enabled':
+ rc, out, err = module.run_command("%s enableCompression "
+ "--name=%s"
+ % (vdocmd, desiredvdo))
+
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Changing compression on "
+ "VDO volume %s failed."
+ % desiredvdo, rc=rc, err=err)
+
+ if 'writepolicy' in diffparams.keys():
+ writepolmod = diffparams['writepolicy']
+ if writepolmod == 'auto':
+ rc, out, err = module.run_command("%s "
+ "changeWritePolicy "
+ "--name=%s "
+ "--writePolicy=%s"
+ % (vdocmd,
+ desiredvdo,
+ writepolmod))
+
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Changing write policy on "
+ "VDO volume %s failed."
+ % desiredvdo, rc=rc, err=err)
+
+ if writepolmod == 'sync':
+ rc, out, err = module.run_command("%s "
+ "changeWritePolicy "
+ "--name=%s "
+ "--writePolicy=%s"
+ % (vdocmd,
+ desiredvdo,
+ writepolmod))
+
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Changing write policy on "
+ "VDO volume %s failed."
+ % desiredvdo, rc=rc, err=err)
+
+ if writepolmod == 'async':
+ rc, out, err = module.run_command("%s "
+ "changeWritePolicy "
+ "--name=%s "
+ "--writePolicy=%s"
+ % (vdocmd,
+ desiredvdo,
+ writepolmod))
+
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Changing write policy on "
+ "VDO volume %s failed."
+ % desiredvdo, rc=rc, err=err)
+
+ # Process the size parameters, to determine of a growPhysical or
+ # growLogical operation needs to occur.
+ sizeparamkeys = ['Logical size', ]
+
+ currentsizeparams = {}
+ sizetrans = {}
+ for statfield in sizeparamkeys:
+ currentsizeparams[statfield] = processedvdos[desiredvdo][statfield]
+ sizetrans[statfield] = vdokeytrans[statfield]
+
+ sizeparams = {}
+ for paramkey in currentsizeparams.keys():
+ sizeparams[sizetrans[paramkey]] = currentsizeparams[paramkey]
+
+ diffsizeparams = {}
+ for key in sizeparams.keys():
+ if module.params[key] is not None:
+ if str(sizeparams[key]) != module.params[key]:
+ diffsizeparams[key] = module.params[key]
+
+ if module.params['growphysical']:
+ physdevice = module.params['device']
+ rc, devsectors, err = module.run_command("blockdev --getsz %s"
+ % (physdevice))
+ devblocks = (int(devsectors) / 8)
+ dmvdoname = ('/dev/mapper/' + desiredvdo)
+ currentvdostats = (processedvdos[desiredvdo]
+ ['VDO statistics']
+ [dmvdoname])
+ currentphysblocks = currentvdostats['physical blocks']
+
+ # Set a growPhysical threshold to grow only when there is
+ # guaranteed to be more than 2 slabs worth of unallocated
+ # space on the device to use. For now, set to device
+ # size + 64 GB, since 32 GB is the largest possible
+ # slab size.
+ growthresh = devblocks + 16777216
+
+ if currentphysblocks > growthresh:
+ result['changed'] = True
+ rc, out, err = module.run_command("%s growPhysical --name=%s"
+ % (vdocmd, desiredvdo))
+
+ if 'logicalsize' in diffsizeparams.keys():
+ result['changed'] = True
+ vdocmdoptions = ("--vdoLogicalSize=" +
+ diffsizeparams['logicalsize'])
+ rc, out, err = module.run_command("%s growLogical --name=%s %s"
+ % (vdocmd,
+ desiredvdo,
+ vdocmdoptions))
+
+ vdoactivatestatus = processedvdos[desiredvdo]['Activate']
+
+ if ((module.params['activated'] == 'no') and
+ (vdoactivatestatus == 'enabled')):
+ deactivate_vdo(module, desiredvdo, vdocmd)
+ if not result['changed']:
+ result['changed'] = True
+
+ if ((module.params['activated'] == 'yes') and
+ (vdoactivatestatus == 'disabled')):
+ activate_vdo(module, desiredvdo, vdocmd)
+ if not result['changed']:
+ result['changed'] = True
+
+ if ((module.params['running'] == 'no') and
+ (desiredvdo in runningvdolist)):
+ stop_vdo(module, desiredvdo, vdocmd)
+ if not result['changed']:
+ result['changed'] = True
+
+ # Note that a disabled VDO volume cannot be started by the
+ # 'vdo start' command, by design. To accurately track changed
+ # status, don't try to start a disabled VDO volume.
+ # If the playbook contains 'activated: yes', assume that
+ # the activate_vdo() operation succeeded, as 'vdoactivatestatus'
+ # will have the activated status prior to the activate_vdo()
+ # call.
+ if (((vdoactivatestatus == 'enabled') or
+ (module.params['activated'] == 'yes')) and
+ (module.params['running'] == 'yes') and
+ (desiredvdo not in runningvdolist)):
+ start_vdo(module, desiredvdo, vdocmd)
+ if not result['changed']:
+ result['changed'] = True
+
+ # Print a post-run list of VDO volumes in the result object.
+ vdolist = inventory_vdos(module, vdocmd)
+ if diffparams:
+ module.log("modified parameters of VDO volume %s" % desiredvdo)
+
+ module.exit_json(**result)
+
+ # Remove a desired VDO that currently exists.
+ if (desiredvdo in vdolist) and (state == 'absent'):
+ rc, out, err = module.run_command("%s remove --name=%s"
+ % (vdocmd, desiredvdo))
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Removing VDO %s failed."
+ % desiredvdo, rc=rc, err=err)
+
+ # Print a post-run list of VDO volumes in the result object.
+ vdolist = inventory_vdos(module, vdocmd)
+ module.log("removed VDO volume %s" % desiredvdo)
+ module.exit_json(**result)
+
+ # fall through
+ # The state for the desired VDO volume was absent, and it does
+ # not exist. Print a post-run list of VDO volumes in the result
+ # object.
+ vdolist = inventory_vdos(module, vdocmd)
+ module.log("received request to remove non-existent VDO volume %s"
+ % desiredvdo)
+
+ module.exit_json(**result)
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_configuration.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_configuration.py
new file mode 100644
index 00000000..3d0788e6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_configuration.py
@@ -0,0 +1,196 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: vertica_configuration
+short_description: Updates Vertica configuration parameters.
+description:
+ - Updates Vertica configuration parameters.
+options:
+ name:
+ description:
+ - Name of the parameter to update.
+ required: true
+ aliases: [parameter]
+ type: str
+ value:
+ description:
+ - Value of the parameter to be set.
+ type: str
+ db:
+ description:
+ - Name of the Vertica database.
+ type: str
+ cluster:
+ description:
+ - Name of the Vertica cluster.
+ default: localhost
+ type: str
+ port:
+ description:
+ - Vertica cluster port to connect to.
+ default: '5433'
+ type: str
+ login_user:
+ description:
+ - The username used to authenticate with.
+ default: dbadmin
+ type: str
+ login_password:
+ description:
+ - The password used to authenticate with.
+ type: str
+notes:
+ - The default authentication assumes that you are either logging in as or sudo'ing
+ to the C(dbadmin) account on the host.
+ - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
+ that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
+ - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
+ to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
+ and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
+ to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
+requirements: [ 'unixODBC', 'pyodbc' ]
+author: "Dariusz Owczarek (@dareko)"
+'''
+
+EXAMPLES = """
+- name: Updating load_balance_policy
+ community.general.vertica_configuration: name=failovertostandbyafter value='8 hours'
+"""
+import traceback
+
+PYODBC_IMP_ERR = None
+try:
+ import pyodbc
+except ImportError:
+ PYODBC_IMP_ERR = traceback.format_exc()
+ pyodbc_found = False
+else:
+ pyodbc_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+class NotSupportedError(Exception):
+ pass
+
+
+class CannotDropError(Exception):
+ pass
+
+# module specific functions
+
+
+def get_configuration_facts(cursor, parameter_name=''):
+ facts = {}
+ cursor.execute("""
+ select c.parameter_name, c.current_value, c.default_value
+ from configuration_parameters c
+ where c.node_name = 'ALL'
+ and (? = '' or c.parameter_name ilike ?)
+ """, parameter_name, parameter_name)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ facts[row.parameter_name.lower()] = {
+ 'parameter_name': row.parameter_name,
+ 'current_value': row.current_value,
+ 'default_value': row.default_value}
+ return facts
+
+
+def check(configuration_facts, parameter_name, current_value):
+ parameter_key = parameter_name.lower()
+ if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower():
+ return False
+ return True
+
+
+def present(configuration_facts, cursor, parameter_name, current_value):
+ parameter_key = parameter_name.lower()
+ changed = False
+ if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower():
+ cursor.execute("select set_config_parameter('{0}', '{1}')".format(parameter_name, current_value))
+ changed = True
+ if changed:
+ configuration_facts.update(get_configuration_facts(cursor, parameter_name))
+ return changed
+
+# module logic
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ parameter=dict(required=True, aliases=['name']),
+ value=dict(default=None),
+ db=dict(default=None),
+ cluster=dict(default='localhost'),
+ port=dict(default='5433'),
+ login_user=dict(default='dbadmin'),
+ login_password=dict(default=None, no_log=True),
+ ), supports_check_mode=True)
+
+ if not pyodbc_found:
+ module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR)
+
+ parameter_name = module.params['parameter']
+ current_value = module.params['value']
+ db = ''
+ if module.params['db']:
+ db = module.params['db']
+
+ changed = False
+
+ try:
+ dsn = (
+ "Driver=Vertica;"
+ "Server={0};"
+ "Port={1};"
+ "Database={2};"
+ "User={3};"
+ "Password={4};"
+ "ConnectionLoadBalance={5}"
+ ).format(module.params['cluster'], module.params['port'], db,
+ module.params['login_user'], module.params['login_password'], 'true')
+ db_conn = pyodbc.connect(dsn, autocommit=True)
+ cursor = db_conn.cursor()
+ except Exception as e:
+ module.fail_json(msg="Unable to connect to database: {0}.".format(to_native(e)),
+ exception=traceback.format_exc())
+
+ try:
+ configuration_facts = get_configuration_facts(cursor)
+ if module.check_mode:
+ changed = not check(configuration_facts, parameter_name, current_value)
+ else:
+ try:
+ changed = present(configuration_facts, cursor, parameter_name, current_value)
+ except pyodbc.Error as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except NotSupportedError as e:
+ module.fail_json(msg=to_native(e), ansible_facts={'vertica_configuration': configuration_facts})
+ except CannotDropError as e:
+ module.fail_json(msg=to_native(e), ansible_facts={'vertica_configuration': configuration_facts})
+ except SystemExit:
+ # avoid catching this on python 2.4
+ raise
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=changed, parameter=parameter_name, ansible_facts={'vertica_configuration': configuration_facts})
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_facts.py
new file mode 100644
index 00000000..a5741719
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_facts.py
@@ -0,0 +1,291 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: vertica_info
+short_description: Gathers Vertica database facts.
+description:
+ - Gathers Vertica database information.
+ - This module was called C(vertica_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.vertica_info) module no longer returns C(ansible_facts)!
+options:
+ cluster:
+ description:
+ - Name of the cluster running the schema.
+ default: localhost
+ port:
+ description:
+ Database port to connect to.
+ default: 5433
+ db:
+ description:
+ - Name of the database running the schema.
+ login_user:
+ description:
+ - The username used to authenticate with.
+ default: dbadmin
+ login_password:
+ description:
+ - The password used to authenticate with.
+notes:
+ - The default authentication assumes that you are either logging in as or sudo'ing
+ to the C(dbadmin) account on the host.
+ - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
+ that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
+ - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
+ to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
+ and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
+ to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
+requirements: [ 'unixODBC', 'pyodbc' ]
+author: "Dariusz Owczarek (@dareko)"
+'''
+
+EXAMPLES = """
+- name: Gathering vertica facts
+ community.general.vertica_info: db=db_name
+ register: result
+
+- name: Print schemas
+ ansible.builtin.debug:
+ msg: "{{ result.vertica_schemas }}"
+"""
+import traceback
+
+PYODBC_IMP_ERR = None
+try:
+ import pyodbc
+except ImportError:
+ PYODBC_IMP_ERR = traceback.format_exc()
+ pyodbc_found = False
+else:
+ pyodbc_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+class NotSupportedError(Exception):
+ pass
+
+# module specific functions
+
+
+def get_schema_facts(cursor, schema=''):
+ facts = {}
+ cursor.execute("""
+ select schema_name, schema_owner, create_time
+ from schemata
+ where not is_system_schema and schema_name not in ('public')
+ and (? = '' or schema_name ilike ?)
+ """, schema, schema)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ facts[row.schema_name.lower()] = {
+ 'name': row.schema_name,
+ 'owner': row.schema_owner,
+ 'create_time': str(row.create_time),
+ 'usage_roles': [],
+ 'create_roles': []}
+ cursor.execute("""
+ select g.object_name as schema_name, r.name as role_name,
+ lower(g.privileges_description) privileges_description
+ from roles r join grants g
+ on g.grantee = r.name and g.object_type='SCHEMA'
+ and g.privileges_description like '%USAGE%'
+ and g.grantee not in ('public', 'dbadmin')
+ and (? = '' or g.object_name ilike ?)
+ """, schema, schema)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ schema_key = row.schema_name.lower()
+ if 'create' in row.privileges_description:
+ facts[schema_key]['create_roles'].append(row.role_name)
+ else:
+ facts[schema_key]['usage_roles'].append(row.role_name)
+ return facts
+
+
+def get_user_facts(cursor, user=''):
+ facts = {}
+ cursor.execute("""
+ select u.user_name, u.is_locked, u.lock_time,
+ p.password, p.acctexpired as is_expired,
+ u.profile_name, u.resource_pool,
+ u.all_roles, u.default_roles
+ from users u join password_auditor p on p.user_id = u.user_id
+ where not u.is_super_user
+ and (? = '' or u.user_name ilike ?)
+ """, user, user)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ user_key = row.user_name.lower()
+ facts[user_key] = {
+ 'name': row.user_name,
+ 'locked': str(row.is_locked),
+ 'password': row.password,
+ 'expired': str(row.is_expired),
+ 'profile': row.profile_name,
+ 'resource_pool': row.resource_pool,
+ 'roles': [],
+ 'default_roles': []}
+ if row.is_locked:
+ facts[user_key]['locked_time'] = str(row.lock_time)
+ if row.all_roles:
+ facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',')
+ if row.default_roles:
+ facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',')
+ return facts
+
+
+def get_role_facts(cursor, role=''):
+ facts = {}
+ cursor.execute("""
+ select r.name, r.assigned_roles
+ from roles r
+ where (? = '' or r.name ilike ?)
+ """, role, role)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ role_key = row.name.lower()
+ facts[role_key] = {
+ 'name': row.name,
+ 'assigned_roles': []}
+ if row.assigned_roles:
+ facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',')
+ return facts
+
+
+def get_configuration_facts(cursor, parameter=''):
+ facts = {}
+ cursor.execute("""
+ select c.parameter_name, c.current_value, c.default_value
+ from configuration_parameters c
+ where c.node_name = 'ALL'
+ and (? = '' or c.parameter_name ilike ?)
+ """, parameter, parameter)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ facts[row.parameter_name.lower()] = {
+ 'parameter_name': row.parameter_name,
+ 'current_value': row.current_value,
+ 'default_value': row.default_value}
+ return facts
+
+
+def get_node_facts(cursor, schema=''):
+ facts = {}
+ cursor.execute("""
+ select node_name, node_address, export_address, node_state, node_type,
+ catalog_path
+ from nodes
+ """)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ facts[row.node_address] = {
+ 'node_name': row.node_name,
+ 'export_address': row.export_address,
+ 'node_state': row.node_state,
+ 'node_type': row.node_type,
+ 'catalog_path': row.catalog_path}
+ return facts
+
+# module logic
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ cluster=dict(default='localhost'),
+ port=dict(default='5433'),
+ db=dict(default=None),
+ login_user=dict(default='dbadmin'),
+ login_password=dict(default=None, no_log=True),
+ ), supports_check_mode=True)
+ is_old_facts = module._name in ('vertica_facts', 'community.general.vertica_facts')
+ if is_old_facts:
+ module.deprecate("The 'vertica_facts' module has been renamed to 'vertica_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ if not pyodbc_found:
+ module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR)
+
+ db = ''
+ if module.params['db']:
+ db = module.params['db']
+
+ try:
+ dsn = (
+ "Driver=Vertica;"
+ "Server=%s;"
+ "Port=%s;"
+ "Database=%s;"
+ "User=%s;"
+ "Password=%s;"
+ "ConnectionLoadBalance=%s"
+ ) % (module.params['cluster'], module.params['port'], db,
+ module.params['login_user'], module.params['login_password'], 'true')
+ db_conn = pyodbc.connect(dsn, autocommit=True)
+ cursor = db_conn.cursor()
+ except Exception as e:
+ module.fail_json(msg="Unable to connect to database: %s." % to_native(e), exception=traceback.format_exc())
+
+ try:
+ schema_facts = get_schema_facts(cursor)
+ user_facts = get_user_facts(cursor)
+ role_facts = get_role_facts(cursor)
+ configuration_facts = get_configuration_facts(cursor)
+ node_facts = get_node_facts(cursor)
+
+ if is_old_facts:
+ module.exit_json(changed=False,
+ ansible_facts={'vertica_schemas': schema_facts,
+ 'vertica_users': user_facts,
+ 'vertica_roles': role_facts,
+ 'vertica_configuration': configuration_facts,
+ 'vertica_nodes': node_facts})
+ else:
+ module.exit_json(changed=False,
+ vertica_schemas=schema_facts,
+ vertica_users=user_facts,
+ vertica_roles=role_facts,
+ vertica_configuration=configuration_facts,
+ vertica_nodes=node_facts)
+ except NotSupportedError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except SystemExit:
+ # avoid catching this on python 2.4
+ raise
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_info.py
new file mode 100644
index 00000000..a5741719
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_info.py
@@ -0,0 +1,291 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: vertica_info
+short_description: Gathers Vertica database facts.
+description:
+ - Gathers Vertica database information.
+ - This module was called C(vertica_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.vertica_info) module no longer returns C(ansible_facts)!
+options:
+ cluster:
+ description:
+ - Name of the cluster running the schema.
+ default: localhost
+ port:
+ description:
+ Database port to connect to.
+ default: 5433
+ db:
+ description:
+ - Name of the database running the schema.
+ login_user:
+ description:
+ - The username used to authenticate with.
+ default: dbadmin
+ login_password:
+ description:
+ - The password used to authenticate with.
+notes:
+ - The default authentication assumes that you are either logging in as or sudo'ing
+ to the C(dbadmin) account on the host.
+ - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
+ that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
+ - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
+ to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
+ and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
+ to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
+requirements: [ 'unixODBC', 'pyodbc' ]
+author: "Dariusz Owczarek (@dareko)"
+'''
+
+EXAMPLES = """
+- name: Gathering vertica facts
+ community.general.vertica_info: db=db_name
+ register: result
+
+- name: Print schemas
+ ansible.builtin.debug:
+ msg: "{{ result.vertica_schemas }}"
+"""
+import traceback
+
+PYODBC_IMP_ERR = None
+try:
+ import pyodbc
+except ImportError:
+ PYODBC_IMP_ERR = traceback.format_exc()
+ pyodbc_found = False
+else:
+ pyodbc_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+class NotSupportedError(Exception):
+ pass
+
+# module specific functions
+
+
+def get_schema_facts(cursor, schema=''):
+ facts = {}
+ cursor.execute("""
+ select schema_name, schema_owner, create_time
+ from schemata
+ where not is_system_schema and schema_name not in ('public')
+ and (? = '' or schema_name ilike ?)
+ """, schema, schema)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ facts[row.schema_name.lower()] = {
+ 'name': row.schema_name,
+ 'owner': row.schema_owner,
+ 'create_time': str(row.create_time),
+ 'usage_roles': [],
+ 'create_roles': []}
+ cursor.execute("""
+ select g.object_name as schema_name, r.name as role_name,
+ lower(g.privileges_description) privileges_description
+ from roles r join grants g
+ on g.grantee = r.name and g.object_type='SCHEMA'
+ and g.privileges_description like '%USAGE%'
+ and g.grantee not in ('public', 'dbadmin')
+ and (? = '' or g.object_name ilike ?)
+ """, schema, schema)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ schema_key = row.schema_name.lower()
+ if 'create' in row.privileges_description:
+ facts[schema_key]['create_roles'].append(row.role_name)
+ else:
+ facts[schema_key]['usage_roles'].append(row.role_name)
+ return facts
+
+
+def get_user_facts(cursor, user=''):
+ facts = {}
+ cursor.execute("""
+ select u.user_name, u.is_locked, u.lock_time,
+ p.password, p.acctexpired as is_expired,
+ u.profile_name, u.resource_pool,
+ u.all_roles, u.default_roles
+ from users u join password_auditor p on p.user_id = u.user_id
+ where not u.is_super_user
+ and (? = '' or u.user_name ilike ?)
+ """, user, user)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ user_key = row.user_name.lower()
+ facts[user_key] = {
+ 'name': row.user_name,
+ 'locked': str(row.is_locked),
+ 'password': row.password,
+ 'expired': str(row.is_expired),
+ 'profile': row.profile_name,
+ 'resource_pool': row.resource_pool,
+ 'roles': [],
+ 'default_roles': []}
+ if row.is_locked:
+ facts[user_key]['locked_time'] = str(row.lock_time)
+ if row.all_roles:
+ facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',')
+ if row.default_roles:
+ facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',')
+ return facts
+
+
+def get_role_facts(cursor, role=''):
+ facts = {}
+ cursor.execute("""
+ select r.name, r.assigned_roles
+ from roles r
+ where (? = '' or r.name ilike ?)
+ """, role, role)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ role_key = row.name.lower()
+ facts[role_key] = {
+ 'name': row.name,
+ 'assigned_roles': []}
+ if row.assigned_roles:
+ facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',')
+ return facts
+
+
+def get_configuration_facts(cursor, parameter=''):
+ facts = {}
+ cursor.execute("""
+ select c.parameter_name, c.current_value, c.default_value
+ from configuration_parameters c
+ where c.node_name = 'ALL'
+ and (? = '' or c.parameter_name ilike ?)
+ """, parameter, parameter)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ facts[row.parameter_name.lower()] = {
+ 'parameter_name': row.parameter_name,
+ 'current_value': row.current_value,
+ 'default_value': row.default_value}
+ return facts
+
+
+def get_node_facts(cursor, schema=''):
+ facts = {}
+ cursor.execute("""
+ select node_name, node_address, export_address, node_state, node_type,
+ catalog_path
+ from nodes
+ """)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ facts[row.node_address] = {
+ 'node_name': row.node_name,
+ 'export_address': row.export_address,
+ 'node_state': row.node_state,
+ 'node_type': row.node_type,
+ 'catalog_path': row.catalog_path}
+ return facts
+
+# module logic
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ cluster=dict(default='localhost'),
+ port=dict(default='5433'),
+ db=dict(default=None),
+ login_user=dict(default='dbadmin'),
+ login_password=dict(default=None, no_log=True),
+ ), supports_check_mode=True)
+ is_old_facts = module._name in ('vertica_facts', 'community.general.vertica_facts')
+ if is_old_facts:
+ module.deprecate("The 'vertica_facts' module has been renamed to 'vertica_info', "
+ "and the renamed one no longer returns ansible_facts",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ if not pyodbc_found:
+ module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR)
+
+ db = ''
+ if module.params['db']:
+ db = module.params['db']
+
+ try:
+ dsn = (
+ "Driver=Vertica;"
+ "Server=%s;"
+ "Port=%s;"
+ "Database=%s;"
+ "User=%s;"
+ "Password=%s;"
+ "ConnectionLoadBalance=%s"
+ ) % (module.params['cluster'], module.params['port'], db,
+ module.params['login_user'], module.params['login_password'], 'true')
+ db_conn = pyodbc.connect(dsn, autocommit=True)
+ cursor = db_conn.cursor()
+ except Exception as e:
+ module.fail_json(msg="Unable to connect to database: %s." % to_native(e), exception=traceback.format_exc())
+
+ try:
+ schema_facts = get_schema_facts(cursor)
+ user_facts = get_user_facts(cursor)
+ role_facts = get_role_facts(cursor)
+ configuration_facts = get_configuration_facts(cursor)
+ node_facts = get_node_facts(cursor)
+
+ if is_old_facts:
+ module.exit_json(changed=False,
+ ansible_facts={'vertica_schemas': schema_facts,
+ 'vertica_users': user_facts,
+ 'vertica_roles': role_facts,
+ 'vertica_configuration': configuration_facts,
+ 'vertica_nodes': node_facts})
+ else:
+ module.exit_json(changed=False,
+ vertica_schemas=schema_facts,
+ vertica_users=user_facts,
+ vertica_roles=role_facts,
+ vertica_configuration=configuration_facts,
+ vertica_nodes=node_facts)
+ except NotSupportedError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except SystemExit:
+ # avoid catching this on python 2.4
+ raise
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_role.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_role.py
new file mode 100644
index 00000000..bba411d0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_role.py
@@ -0,0 +1,237 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: vertica_role
+short_description: Adds or removes Vertica database roles and assigns roles to them.
+description:
+ - Adds or removes Vertica database role and, optionally, assign other roles.
+options:
+ name:
+ description:
+ - Name of the role to add or remove.
+ required: true
+ assigned_roles:
+ description:
+ - Comma separated list of roles to assign to the role.
+ aliases: ['assigned_role']
+ state:
+ description:
+ - Whether to create C(present), drop C(absent) or lock C(locked) a role.
+ choices: ['present', 'absent']
+ default: present
+ db:
+ description:
+ - Name of the Vertica database.
+ cluster:
+ description:
+ - Name of the Vertica cluster.
+ default: localhost
+ port:
+ description:
+ - Vertica cluster port to connect to.
+ default: 5433
+ login_user:
+ description:
+ - The username used to authenticate with.
+ default: dbadmin
+ login_password:
+ description:
+ - The password used to authenticate with.
+notes:
+ - The default authentication assumes that you are either logging in as or sudo'ing
+ to the C(dbadmin) account on the host.
+ - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
+ that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
+ - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
+ to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
+ and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
+ to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
+requirements: [ 'unixODBC', 'pyodbc' ]
+author: "Dariusz Owczarek (@dareko)"
+'''
+
+EXAMPLES = """
+- name: Creating a new vertica role
+ community.general.vertica_role: name=role_name db=db_name state=present
+
+- name: Creating a new vertica role with other role assigned
+ community.general.vertica_role: name=role_name assigned_role=other_role_name state=present
+"""
+import traceback
+
+PYODBC_IMP_ERR = None
+try:
+ import pyodbc
+except ImportError:
+ PYODBC_IMP_ERR = traceback.format_exc()
+ pyodbc_found = False
+else:
+ pyodbc_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+class NotSupportedError(Exception):
+ pass
+
+
+class CannotDropError(Exception):
+ pass
+
+# module specific functions
+
+
+def get_role_facts(cursor, role=''):
+ facts = {}
+ cursor.execute("""
+ select r.name, r.assigned_roles
+ from roles r
+ where (? = '' or r.name ilike ?)
+ """, role, role)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ role_key = row.name.lower()
+ facts[role_key] = {
+ 'name': row.name,
+ 'assigned_roles': []}
+ if row.assigned_roles:
+ facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',')
+ return facts
+
+
+def update_roles(role_facts, cursor, role,
+ existing, required):
+ for assigned_role in set(existing) - set(required):
+ cursor.execute("revoke {0} from {1}".format(assigned_role, role))
+ for assigned_role in set(required) - set(existing):
+ cursor.execute("grant {0} to {1}".format(assigned_role, role))
+
+
+def check(role_facts, role, assigned_roles):
+ role_key = role.lower()
+ if role_key not in role_facts:
+ return False
+ if assigned_roles and sorted(assigned_roles) != sorted(role_facts[role_key]['assigned_roles']):
+ return False
+ return True
+
+
+def present(role_facts, cursor, role, assigned_roles):
+ role_key = role.lower()
+ if role_key not in role_facts:
+ cursor.execute("create role {0}".format(role))
+ update_roles(role_facts, cursor, role, [], assigned_roles)
+ role_facts.update(get_role_facts(cursor, role))
+ return True
+ else:
+ changed = False
+ if assigned_roles and (sorted(assigned_roles) != sorted(role_facts[role_key]['assigned_roles'])):
+ update_roles(role_facts, cursor, role,
+ role_facts[role_key]['assigned_roles'], assigned_roles)
+ changed = True
+ if changed:
+ role_facts.update(get_role_facts(cursor, role))
+ return changed
+
+
+def absent(role_facts, cursor, role, assigned_roles):
+ role_key = role.lower()
+ if role_key in role_facts:
+ update_roles(role_facts, cursor, role,
+ role_facts[role_key]['assigned_roles'], [])
+ cursor.execute("drop role {0} cascade".format(role_facts[role_key]['name']))
+ del role_facts[role_key]
+ return True
+ else:
+ return False
+
+# module logic
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ role=dict(required=True, aliases=['name']),
+ assigned_roles=dict(default=None, aliases=['assigned_role']),
+ state=dict(default='present', choices=['absent', 'present']),
+ db=dict(default=None),
+ cluster=dict(default='localhost'),
+ port=dict(default='5433'),
+ login_user=dict(default='dbadmin'),
+ login_password=dict(default=None, no_log=True),
+ ), supports_check_mode=True)
+
+ if not pyodbc_found:
+ module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR)
+
+ role = module.params['role']
+ assigned_roles = []
+ if module.params['assigned_roles']:
+ assigned_roles = module.params['assigned_roles'].split(',')
+ assigned_roles = filter(None, assigned_roles)
+ state = module.params['state']
+ db = ''
+ if module.params['db']:
+ db = module.params['db']
+
+ changed = False
+
+ try:
+ dsn = (
+ "Driver=Vertica;"
+ "Server={0};"
+ "Port={1};"
+ "Database={2};"
+ "User={3};"
+ "Password={4};"
+ "ConnectionLoadBalance={5}"
+ ).format(module.params['cluster'], module.params['port'], db,
+ module.params['login_user'], module.params['login_password'], 'true')
+ db_conn = pyodbc.connect(dsn, autocommit=True)
+ cursor = db_conn.cursor()
+ except Exception as e:
+ module.fail_json(msg="Unable to connect to database: {0}.".format(to_native(e)))
+
+ try:
+ role_facts = get_role_facts(cursor)
+ if module.check_mode:
+ changed = not check(role_facts, role, assigned_roles)
+ elif state == 'absent':
+ try:
+ changed = absent(role_facts, cursor, role, assigned_roles)
+ except pyodbc.Error as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ elif state == 'present':
+ try:
+ changed = present(role_facts, cursor, role, assigned_roles)
+ except pyodbc.Error as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except NotSupportedError as e:
+ module.fail_json(msg=to_native(e), ansible_facts={'vertica_roles': role_facts})
+ except CannotDropError as e:
+ module.fail_json(msg=to_native(e), ansible_facts={'vertica_roles': role_facts})
+ except SystemExit:
+ # avoid catching this on python 2.4
+ raise
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=changed, role=role, ansible_facts={'vertica_roles': role_facts})
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_schema.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_schema.py
new file mode 100644
index 00000000..424de564
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_schema.py
@@ -0,0 +1,308 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: vertica_schema
+short_description: Adds or removes Vertica database schema and roles.
+description:
+ - Adds or removes Vertica database schema and, optionally, roles
+ with schema access privileges.
+ - A schema will not be removed until all the objects have been dropped.
+ - In such a situation, if the module tries to remove the schema it
+ will fail and only remove roles created for the schema if they have
+ no dependencies.
+options:
+ name:
+ description:
+ - Name of the schema to add or remove.
+ required: true
+ usage_roles:
+ description:
+ - Comma separated list of roles to create and grant usage access to the schema.
+ aliases: ['usage_role']
+ create_roles:
+ description:
+ - Comma separated list of roles to create and grant usage and create access to the schema.
+ aliases: ['create_role']
+ owner:
+ description:
+ - Name of the user to set as owner of the schema.
+ state:
+ description:
+ - Whether to create C(present), or drop C(absent) a schema.
+ default: present
+ choices: ['present', 'absent']
+ db:
+ description:
+ - Name of the Vertica database.
+ cluster:
+ description:
+ - Name of the Vertica cluster.
+ default: localhost
+ port:
+ description:
+ - Vertica cluster port to connect to.
+ default: 5433
+ login_user:
+ description:
+ - The username used to authenticate with.
+ default: dbadmin
+ login_password:
+ description:
+ - The password used to authenticate with.
+notes:
+ - The default authentication assumes that you are either logging in as or sudo'ing
+ to the C(dbadmin) account on the host.
+ - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
+ that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
+ - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
+ to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
+ and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
+ to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
+requirements: [ 'unixODBC', 'pyodbc' ]
+author: "Dariusz Owczarek (@dareko)"
+'''
+
+EXAMPLES = """
+- name: Creating a new vertica schema
+ community.general.vertica_schema: name=schema_name db=db_name state=present
+
+- name: Creating a new schema with specific schema owner
+ community.general.vertica_schema: name=schema_name owner=dbowner db=db_name state=present
+
+- name: Creating a new schema with roles
+ community.general.vertica_schema:
+ name=schema_name
+ create_roles=schema_name_all
+ usage_roles=schema_name_ro,schema_name_rw
+ db=db_name
+ state=present
+"""
+import traceback
+
+PYODBC_IMP_ERR = None
+try:
+ import pyodbc
+except ImportError:
+ PYODBC_IMP_ERR = traceback.format_exc()
+ pyodbc_found = False
+else:
+ pyodbc_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+class NotSupportedError(Exception):
+ pass
+
+
+class CannotDropError(Exception):
+ pass
+
+# module specific functions
+
+
+def get_schema_facts(cursor, schema=''):
+ facts = {}
+ cursor.execute("""
+ select schema_name, schema_owner, create_time
+ from schemata
+ where not is_system_schema and schema_name not in ('public', 'TxtIndex')
+ and (? = '' or schema_name ilike ?)
+ """, schema, schema)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ facts[row.schema_name.lower()] = {
+ 'name': row.schema_name,
+ 'owner': row.schema_owner,
+ 'create_time': str(row.create_time),
+ 'usage_roles': [],
+ 'create_roles': []}
+ cursor.execute("""
+ select g.object_name as schema_name, r.name as role_name,
+ lower(g.privileges_description) privileges_description
+ from roles r join grants g
+ on g.grantee_id = r.role_id and g.object_type='SCHEMA'
+ and g.privileges_description like '%USAGE%'
+ and g.grantee not in ('public', 'dbadmin')
+ and (? = '' or g.object_name ilike ?)
+ """, schema, schema)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ schema_key = row.schema_name.lower()
+ if 'create' in row.privileges_description:
+ facts[schema_key]['create_roles'].append(row.role_name)
+ else:
+ facts[schema_key]['usage_roles'].append(row.role_name)
+ return facts
+
+
+def update_roles(schema_facts, cursor, schema,
+ existing, required,
+ create_existing, create_required):
+ for role in set(existing + create_existing) - set(required + create_required):
+ cursor.execute("drop role {0} cascade".format(role))
+ for role in set(create_existing) - set(create_required):
+ cursor.execute("revoke create on schema {0} from {1}".format(schema, role))
+ for role in set(required + create_required) - set(existing + create_existing):
+ cursor.execute("create role {0}".format(role))
+ cursor.execute("grant usage on schema {0} to {1}".format(schema, role))
+ for role in set(create_required) - set(create_existing):
+ cursor.execute("grant create on schema {0} to {1}".format(schema, role))
+
+
+def check(schema_facts, schema, usage_roles, create_roles, owner):
+ schema_key = schema.lower()
+ if schema_key not in schema_facts:
+ return False
+ if owner and owner.lower() == schema_facts[schema_key]['owner'].lower():
+ return False
+ if sorted(usage_roles) != sorted(schema_facts[schema_key]['usage_roles']):
+ return False
+ if sorted(create_roles) != sorted(schema_facts[schema_key]['create_roles']):
+ return False
+ return True
+
+
+def present(schema_facts, cursor, schema, usage_roles, create_roles, owner):
+ schema_key = schema.lower()
+ if schema_key not in schema_facts:
+ query_fragments = ["create schema {0}".format(schema)]
+ if owner:
+ query_fragments.append("authorization {0}".format(owner))
+ cursor.execute(' '.join(query_fragments))
+ update_roles(schema_facts, cursor, schema, [], usage_roles, [], create_roles)
+ schema_facts.update(get_schema_facts(cursor, schema))
+ return True
+ else:
+ changed = False
+ if owner and owner.lower() != schema_facts[schema_key]['owner'].lower():
+ raise NotSupportedError((
+ "Changing schema owner is not supported. "
+ "Current owner: {0}."
+ ).format(schema_facts[schema_key]['owner']))
+ if sorted(usage_roles) != sorted(schema_facts[schema_key]['usage_roles']) or \
+ sorted(create_roles) != sorted(schema_facts[schema_key]['create_roles']):
+
+ update_roles(schema_facts, cursor, schema,
+ schema_facts[schema_key]['usage_roles'], usage_roles,
+ schema_facts[schema_key]['create_roles'], create_roles)
+ changed = True
+ if changed:
+ schema_facts.update(get_schema_facts(cursor, schema))
+ return changed
+
+
+def absent(schema_facts, cursor, schema, usage_roles, create_roles):
+ schema_key = schema.lower()
+ if schema_key in schema_facts:
+ update_roles(schema_facts, cursor, schema,
+ schema_facts[schema_key]['usage_roles'], [], schema_facts[schema_key]['create_roles'], [])
+ try:
+ cursor.execute("drop schema {0} restrict".format(schema_facts[schema_key]['name']))
+ except pyodbc.Error:
+ raise CannotDropError("Dropping schema failed due to dependencies.")
+ del schema_facts[schema_key]
+ return True
+ else:
+ return False
+
+# module logic
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ schema=dict(required=True, aliases=['name']),
+ usage_roles=dict(default=None, aliases=['usage_role']),
+ create_roles=dict(default=None, aliases=['create_role']),
+ owner=dict(default=None),
+ state=dict(default='present', choices=['absent', 'present']),
+ db=dict(default=None),
+ cluster=dict(default='localhost'),
+ port=dict(default='5433'),
+ login_user=dict(default='dbadmin'),
+ login_password=dict(default=None, no_log=True),
+ ), supports_check_mode=True)
+
+ if not pyodbc_found:
+ module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR)
+
+ schema = module.params['schema']
+ usage_roles = []
+ if module.params['usage_roles']:
+ usage_roles = module.params['usage_roles'].split(',')
+ usage_roles = filter(None, usage_roles)
+ create_roles = []
+ if module.params['create_roles']:
+ create_roles = module.params['create_roles'].split(',')
+ create_roles = filter(None, create_roles)
+ owner = module.params['owner']
+ state = module.params['state']
+ db = ''
+ if module.params['db']:
+ db = module.params['db']
+
+ changed = False
+
+ try:
+ dsn = (
+ "Driver=Vertica;"
+ "Server={0};"
+ "Port={1};"
+ "Database={2};"
+ "User={3};"
+ "Password={4};"
+ "ConnectionLoadBalance={5}"
+ ).format(module.params['cluster'], module.params['port'], db,
+ module.params['login_user'], module.params['login_password'], 'true')
+ db_conn = pyodbc.connect(dsn, autocommit=True)
+ cursor = db_conn.cursor()
+ except Exception as e:
+ module.fail_json(msg="Unable to connect to database: {0}.".format(to_native(e)))
+
+ try:
+ schema_facts = get_schema_facts(cursor)
+ if module.check_mode:
+ changed = not check(schema_facts, schema, usage_roles, create_roles, owner)
+ elif state == 'absent':
+ try:
+ changed = absent(schema_facts, cursor, schema, usage_roles, create_roles)
+ except pyodbc.Error as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ elif state == 'present':
+ try:
+ changed = present(schema_facts, cursor, schema, usage_roles, create_roles, owner)
+ except pyodbc.Error as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except NotSupportedError as e:
+ module.fail_json(msg=to_native(e), ansible_facts={'vertica_schemas': schema_facts})
+ except CannotDropError as e:
+ module.fail_json(msg=to_native(e), ansible_facts={'vertica_schemas': schema_facts})
+ except SystemExit:
+ # avoid catching this on python 2.4
+ raise
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=changed, schema=schema, ansible_facts={'vertica_schemas': schema_facts})
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_user.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_user.py
new file mode 100644
index 00000000..f550f190
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/vertica_user.py
@@ -0,0 +1,373 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: vertica_user
+short_description: Adds or removes Vertica database users and assigns roles.
+description:
+ - Adds or removes Vertica database user and, optionally, assigns roles.
+ - A user will not be removed until all the dependencies have been dropped.
+ - In such a situation, if the module tries to remove the user it
+ will fail and only remove roles granted to the user.
+options:
+ name:
+ description:
+ - Name of the user to add or remove.
+ required: true
+ profile:
+ description:
+ - Sets the user's profile.
+ resource_pool:
+ description:
+ - Sets the user's resource pool.
+ password:
+ description:
+ - The user's password encrypted by the MD5 algorithm.
+ - The password must be generated with the format C("md5" + md5[password + username]),
+ resulting in a total of 35 characters. An easy way to do this is by querying
+ the Vertica database with select 'md5'||md5('<user_password><user_name>').
+ expired:
+ description:
+ - Sets the user's password expiration.
+ type: bool
+ ldap:
+ description:
+ - Set to true if users are authenticated via LDAP.
+ - The user will be created with password expired and set to I($ldap$).
+ type: bool
+ roles:
+ description:
+ - Comma separated list of roles to assign to the user.
+ aliases: ['role']
+ state:
+ description:
+ - Whether to create C(present), drop C(absent) or lock C(locked) a user.
+ choices: ['present', 'absent', 'locked']
+ default: present
+ db:
+ description:
+ - Name of the Vertica database.
+ cluster:
+ description:
+ - Name of the Vertica cluster.
+ default: localhost
+ port:
+ description:
+ - Vertica cluster port to connect to.
+ default: 5433
+ login_user:
+ description:
+ - The username used to authenticate with.
+ default: dbadmin
+ login_password:
+ description:
+ - The password used to authenticate with.
+notes:
+ - The default authentication assumes that you are either logging in as or sudo'ing
+ to the C(dbadmin) account on the host.
+ - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
+ that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
+ - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
+ to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
+ and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
+ to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
+requirements: [ 'unixODBC', 'pyodbc' ]
+author: "Dariusz Owczarek (@dareko)"
+'''
+
+EXAMPLES = """
+- name: Creating a new vertica user with password
+ community.general.vertica_user: name=user_name password=md5<encrypted_password> db=db_name state=present
+
+- name: Creating a new vertica user authenticated via ldap with roles assigned
+ community.general.vertica_user:
+ name=user_name
+ ldap=true
+ db=db_name
+ roles=schema_name_ro
+ state=present
+"""
+import traceback
+
+PYODBC_IMP_ERR = None
+try:
+ import pyodbc
+except ImportError:
+ PYODBC_IMP_ERR = traceback.format_exc()
+ pyodbc_found = False
+else:
+ pyodbc_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+class NotSupportedError(Exception):
+ pass
+
+
+class CannotDropError(Exception):
+ pass
+
+# module specific functions
+
+
+def get_user_facts(cursor, user=''):
+ facts = {}
+ cursor.execute("""
+ select u.user_name, u.is_locked, u.lock_time,
+ p.password, p.acctexpired as is_expired,
+ u.profile_name, u.resource_pool,
+ u.all_roles, u.default_roles
+ from users u join password_auditor p on p.user_id = u.user_id
+ where not u.is_super_user
+ and (? = '' or u.user_name ilike ?)
+ """, user, user)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ user_key = row.user_name.lower()
+ facts[user_key] = {
+ 'name': row.user_name,
+ 'locked': str(row.is_locked),
+ 'password': row.password,
+ 'expired': str(row.is_expired),
+ 'profile': row.profile_name,
+ 'resource_pool': row.resource_pool,
+ 'roles': [],
+ 'default_roles': []}
+ if row.is_locked:
+ facts[user_key]['locked_time'] = str(row.lock_time)
+ if row.all_roles:
+ facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',')
+ if row.default_roles:
+ facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',')
+ return facts
+
+
+def update_roles(user_facts, cursor, user,
+ existing_all, existing_default, required):
+ del_roles = list(set(existing_all) - set(required))
+ if del_roles:
+ cursor.execute("revoke {0} from {1}".format(','.join(del_roles), user))
+ new_roles = list(set(required) - set(existing_all))
+ if new_roles:
+ cursor.execute("grant {0} to {1}".format(','.join(new_roles), user))
+ if required:
+ cursor.execute("alter user {0} default role {1}".format(user, ','.join(required)))
+
+
+def check(user_facts, user, profile, resource_pool,
+ locked, password, expired, ldap, roles):
+ user_key = user.lower()
+ if user_key not in user_facts:
+ return False
+ if profile and profile != user_facts[user_key]['profile']:
+ return False
+ if resource_pool and resource_pool != user_facts[user_key]['resource_pool']:
+ return False
+ if locked != (user_facts[user_key]['locked'] == 'True'):
+ return False
+ if password and password != user_facts[user_key]['password']:
+ return False
+ if (expired is not None and expired != (user_facts[user_key]['expired'] == 'True') or
+ ldap is not None and ldap != (user_facts[user_key]['expired'] == 'True')):
+ return False
+ if roles and (sorted(roles) != sorted(user_facts[user_key]['roles']) or
+ sorted(roles) != sorted(user_facts[user_key]['default_roles'])):
+ return False
+ return True
+
+
+def present(user_facts, cursor, user, profile, resource_pool,
+ locked, password, expired, ldap, roles):
+ user_key = user.lower()
+ if user_key not in user_facts:
+ query_fragments = ["create user {0}".format(user)]
+ if locked:
+ query_fragments.append("account lock")
+ if password or ldap:
+ if password:
+ query_fragments.append("identified by '{0}'".format(password))
+ else:
+ query_fragments.append("identified by '$ldap$'")
+ if expired or ldap:
+ query_fragments.append("password expire")
+ if profile:
+ query_fragments.append("profile {0}".format(profile))
+ if resource_pool:
+ query_fragments.append("resource pool {0}".format(resource_pool))
+ cursor.execute(' '.join(query_fragments))
+ if resource_pool and resource_pool != 'general':
+ cursor.execute("grant usage on resource pool {0} to {1}".format(
+ resource_pool, user))
+ update_roles(user_facts, cursor, user, [], [], roles)
+ user_facts.update(get_user_facts(cursor, user))
+ return True
+ else:
+ changed = False
+ query_fragments = ["alter user {0}".format(user)]
+ if locked is not None and locked != (user_facts[user_key]['locked'] == 'True'):
+ if locked:
+ state = 'lock'
+ else:
+ state = 'unlock'
+ query_fragments.append("account {0}".format(state))
+ changed = True
+ if password and password != user_facts[user_key]['password']:
+ query_fragments.append("identified by '{0}'".format(password))
+ changed = True
+ if ldap:
+ if ldap != (user_facts[user_key]['expired'] == 'True'):
+ query_fragments.append("password expire")
+ changed = True
+ elif expired is not None and expired != (user_facts[user_key]['expired'] == 'True'):
+ if expired:
+ query_fragments.append("password expire")
+ changed = True
+ else:
+ raise NotSupportedError("Unexpiring user password is not supported.")
+ if profile and profile != user_facts[user_key]['profile']:
+ query_fragments.append("profile {0}".format(profile))
+ changed = True
+ if resource_pool and resource_pool != user_facts[user_key]['resource_pool']:
+ query_fragments.append("resource pool {0}".format(resource_pool))
+ if user_facts[user_key]['resource_pool'] != 'general':
+ cursor.execute("revoke usage on resource pool {0} from {1}".format(
+ user_facts[user_key]['resource_pool'], user))
+ if resource_pool != 'general':
+ cursor.execute("grant usage on resource pool {0} to {1}".format(
+ resource_pool, user))
+ changed = True
+ if changed:
+ cursor.execute(' '.join(query_fragments))
+ if roles and (sorted(roles) != sorted(user_facts[user_key]['roles']) or
+ sorted(roles) != sorted(user_facts[user_key]['default_roles'])):
+ update_roles(user_facts, cursor, user,
+ user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], roles)
+ changed = True
+ if changed:
+ user_facts.update(get_user_facts(cursor, user))
+ return changed
+
+
+def absent(user_facts, cursor, user, roles):
+ user_key = user.lower()
+ if user_key in user_facts:
+ update_roles(user_facts, cursor, user,
+ user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], [])
+ try:
+ cursor.execute("drop user {0}".format(user_facts[user_key]['name']))
+ except pyodbc.Error:
+ raise CannotDropError("Dropping user failed due to dependencies.")
+ del user_facts[user_key]
+ return True
+ else:
+ return False
+
+# module logic
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ user=dict(required=True, aliases=['name']),
+ profile=dict(default=None),
+ resource_pool=dict(default=None),
+ password=dict(default=None, no_log=True),
+ expired=dict(type='bool', default=None),
+ ldap=dict(type='bool', default=None),
+ roles=dict(default=None, aliases=['role']),
+ state=dict(default='present', choices=['absent', 'present', 'locked']),
+ db=dict(default=None),
+ cluster=dict(default='localhost'),
+ port=dict(default='5433'),
+ login_user=dict(default='dbadmin'),
+ login_password=dict(default=None, no_log=True),
+ ), supports_check_mode=True)
+
+ if not pyodbc_found:
+ module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR)
+
+ user = module.params['user']
+ profile = module.params['profile']
+ if profile:
+ profile = profile.lower()
+ resource_pool = module.params['resource_pool']
+ if resource_pool:
+ resource_pool = resource_pool.lower()
+ password = module.params['password']
+ expired = module.params['expired']
+ ldap = module.params['ldap']
+ roles = []
+ if module.params['roles']:
+ roles = module.params['roles'].split(',')
+ roles = filter(None, roles)
+ state = module.params['state']
+ if state == 'locked':
+ locked = True
+ else:
+ locked = False
+ db = ''
+ if module.params['db']:
+ db = module.params['db']
+
+ changed = False
+
+ try:
+ dsn = (
+ "Driver=Vertica;"
+ "Server={0};"
+ "Port={1};"
+ "Database={2};"
+ "User={3};"
+ "Password={4};"
+ "ConnectionLoadBalance={5}"
+ ).format(module.params['cluster'], module.params['port'], db,
+ module.params['login_user'], module.params['login_password'], 'true')
+ db_conn = pyodbc.connect(dsn, autocommit=True)
+ cursor = db_conn.cursor()
+ except Exception as e:
+ module.fail_json(msg="Unable to connect to database: {0}.".format(e))
+
+ try:
+ user_facts = get_user_facts(cursor)
+ if module.check_mode:
+ changed = not check(user_facts, user, profile, resource_pool,
+ locked, password, expired, ldap, roles)
+ elif state == 'absent':
+ try:
+ changed = absent(user_facts, cursor, user, roles)
+ except pyodbc.Error as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ elif state in ['present', 'locked']:
+ try:
+ changed = present(user_facts, cursor, user, profile, resource_pool,
+ locked, password, expired, ldap, roles)
+ except pyodbc.Error as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except NotSupportedError as e:
+ module.fail_json(msg=to_native(e), ansible_facts={'vertica_users': user_facts})
+ except CannotDropError as e:
+ module.fail_json(msg=to_native(e), ansible_facts={'vertica_users': user_facts})
+ except SystemExit:
+ # avoid catching this on python 2.4
+ raise
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=changed, user=user, ansible_facts={'vertica_users': user_facts})
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/vexata_eg.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/vexata_eg.py
new file mode 100644
index 00000000..54bb8c29
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/vexata_eg.py
@@ -0,0 +1,209 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Sandeep Kasargod (sandeep@vexata.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vexata_eg
+short_description: Manage export groups on Vexata VX100 storage arrays
+description:
+ - Create or delete export groups on a Vexata VX100 array.
+ - An export group is a tuple of a volume group, initiator group and port
+ group that allows a set of volumes to be exposed to one or more hosts
+ through specific array ports.
+author:
+ - Sandeep Kasargod (@vexata)
+options:
+ name:
+ description:
+ - Export group name.
+ required: true
+ type: str
+ state:
+ description:
+ - Creates export group when present or delete when absent.
+ default: present
+ choices: [ present, absent ]
+ type: str
+ vg:
+ description:
+ - Volume group name.
+ type: str
+ ig:
+ description:
+ - Initiator group name.
+ type: str
+ pg:
+ description:
+ - Port group name.
+ type: str
+extends_documentation_fragment:
+- community.general.vexata.vx100
+
+'''
+
+EXAMPLES = r'''
+- name: Create export group named db_export.
+ community.general.vexata_eg:
+ name: db_export
+ vg: dbvols
+ ig: dbhosts
+ pg: pg1
+ state: present
+ array: vx100_ultra.test.com
+ user: admin
+ password: secret
+
+- name: Delete export group named db_export
+ community.general.vexata_eg:
+ name: db_export
+ state: absent
+ array: vx100_ultra.test.com
+ user: admin
+ password: secret
+'''
+
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.vexata import (
+ argument_spec, get_array, required_together)
+
+
+def get_eg(module, array):
+ """Retrieve a named vg if it exists, None if absent."""
+ name = module.params['name']
+ try:
+ egs = array.list_egs()
+ eg = filter(lambda eg: eg['name'] == name, egs)
+ if len(eg) == 1:
+ return eg[0]
+ else:
+ return None
+ except Exception:
+ module.fail_json(msg='Error while attempting to retrieve export groups.')
+
+
+def get_vg_id(module, array):
+ """Retrieve a named vg's id if it exists, error if absent."""
+ name = module.params['vg']
+ try:
+ vgs = array.list_vgs()
+ vg = filter(lambda vg: vg['name'] == name, vgs)
+ if len(vg) == 1:
+ return vg[0]['id']
+ else:
+ module.fail_json(msg='Volume group {0} was not found.'.format(name))
+ except Exception:
+ module.fail_json(msg='Error while attempting to retrieve volume groups.')
+
+
+def get_ig_id(module, array):
+ """Retrieve a named ig's id if it exists, error if absent."""
+ name = module.params['ig']
+ try:
+ igs = array.list_igs()
+ ig = filter(lambda ig: ig['name'] == name, igs)
+ if len(ig) == 1:
+ return ig[0]['id']
+ else:
+ module.fail_json(msg='Initiator group {0} was not found.'.format(name))
+ except Exception:
+ module.fail_json(msg='Error while attempting to retrieve initiator groups.')
+
+
+def get_pg_id(module, array):
+ """Retrieve a named pg's id if it exists, error if absent."""
+ name = module.params['pg']
+ try:
+ pgs = array.list_pgs()
+ pg = filter(lambda pg: pg['name'] == name, pgs)
+ if len(pg) == 1:
+ return pg[0]['id']
+ else:
+ module.fail_json(msg='Port group {0} was not found.'.format(name))
+ except Exception:
+ module.fail_json(msg='Error while attempting to retrieve port groups.')
+
+
+def create_eg(module, array):
+ """"Create a new export group."""
+ changed = False
+ eg_name = module.params['name']
+ vg_id = get_vg_id(module, array)
+ ig_id = get_ig_id(module, array)
+ pg_id = get_pg_id(module, array)
+ if module.check_mode:
+ module.exit_json(changed=changed)
+
+ try:
+ eg = array.create_eg(
+ eg_name,
+ 'Ansible export group',
+ (vg_id, ig_id, pg_id))
+ if eg:
+ module.log(msg='Created export group {0}'.format(eg_name))
+ changed = True
+ else:
+ raise Exception
+ except Exception:
+ module.fail_json(msg='Export group {0} create failed.'.format(eg_name))
+ module.exit_json(changed=changed)
+
+
+def delete_eg(module, array, eg):
+ changed = False
+ eg_name = eg['name']
+ if module.check_mode:
+ module.exit_json(changed=changed)
+
+ try:
+ ok = array.delete_eg(
+ eg['id'])
+ if ok:
+ module.log(msg='Export group {0} deleted.'.format(eg_name))
+ changed = True
+ else:
+ raise Exception
+ except Exception:
+ module.fail_json(msg='Export group {0} delete failed.'.format(eg_name))
+ module.exit_json(changed=changed)
+
+
+def main():
+ arg_spec = argument_spec()
+ arg_spec.update(
+ dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ vg=dict(type='str'),
+ ig=dict(type='str'),
+ pg=dict(type='str')
+ )
+ )
+
+ module = AnsibleModule(arg_spec,
+ supports_check_mode=True,
+ required_together=required_together())
+
+ state = module.params['state']
+ array = get_array(module)
+ eg = get_eg(module, array)
+
+ if state == 'present' and not eg:
+ create_eg(module, array)
+ elif state == 'absent' and eg:
+ delete_eg(module, array, eg)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/vexata_volume.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/vexata_volume.py
new file mode 100644
index 00000000..1cf4cd7b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/vexata_volume.py
@@ -0,0 +1,196 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Sandeep Kasargod (sandeep@vexata.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vexata_volume
+short_description: Manage volumes on Vexata VX100 storage arrays
+description:
+ - Create, deletes or extend volumes on a Vexata VX100 array.
+author:
+- Sandeep Kasargod (@vexata)
+options:
+ name:
+ description:
+ - Volume name.
+ required: true
+ type: str
+ state:
+ description:
+ - Creates/Modifies volume when present or removes when absent.
+ default: present
+ choices: [ present, absent ]
+ type: str
+ size:
+ description:
+ - Volume size in M, G, T units. M=2^20, G=2^30, T=2^40 bytes.
+ type: str
+extends_documentation_fragment:
+- community.general.vexata.vx100
+
+'''
+
+EXAMPLES = r'''
+- name: Create new 2 TiB volume named foo
+ community.general.vexata_volume:
+ name: foo
+ size: 2T
+ state: present
+ array: vx100_ultra.test.com
+ user: admin
+ password: secret
+
+- name: Expand volume named foo to 4 TiB
+ community.general.vexata_volume:
+ name: foo
+ size: 4T
+ state: present
+ array: vx100_ultra.test.com
+ user: admin
+ password: secret
+
+- name: Delete volume named foo
+ community.general.vexata_volume:
+ name: foo
+ state: absent
+ array: vx100_ultra.test.com
+ user: admin
+ password: secret
+'''
+
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.vexata import (
+ argument_spec, get_array, required_together, size_to_MiB)
+
+
+def get_volume(module, array):
+ """Retrieve a named volume if it exists, None if absent."""
+ name = module.params['name']
+ try:
+ vols = array.list_volumes()
+ vol = filter(lambda v: v['name'] == name, vols)
+ if len(vol) == 1:
+ return vol[0]
+ else:
+ return None
+ except Exception:
+ module.fail_json(msg='Error while attempting to retrieve volumes.')
+
+
+def validate_size(module, err_msg):
+ size = module.params.get('size', False)
+ if not size:
+ module.fail_json(msg=err_msg)
+ size = size_to_MiB(size)
+ if size <= 0:
+ module.fail_json(msg='Invalid volume size, must be <integer>[MGT].')
+ return size
+
+
+def create_volume(module, array):
+ """"Create a new volume."""
+ changed = False
+ size = validate_size(module, err_msg='Size is required to create volume.')
+ if module.check_mode:
+ module.exit_json(changed=changed)
+
+ try:
+ vol = array.create_volume(
+ module.params['name'],
+ 'Ansible volume',
+ size)
+ if vol:
+ module.log(msg='Created volume {0}'.format(vol['id']))
+ changed = True
+ else:
+ module.fail_json(msg='Volume create failed.')
+ except Exception:
+ pass
+ module.exit_json(changed=changed)
+
+
+def update_volume(module, array, volume):
+ """Expand the volume size."""
+ changed = False
+ size = validate_size(module, err_msg='Size is required to update volume')
+ prev_size = volume['volSize']
+ if size <= prev_size:
+ module.log(msg='Volume expanded size needs to be larger '
+ 'than current size.')
+ if module.check_mode:
+ module.exit_json(changed=changed)
+
+ try:
+ vol = array.grow_volume(
+ volume['name'],
+ volume['description'],
+ volume['id'],
+ size)
+ if vol:
+ changed = True
+ except Exception:
+ pass
+
+ module.exit_json(changed=changed)
+
+
+def delete_volume(module, array, volume):
+ changed = False
+ vol_name = volume['name']
+ if module.check_mode:
+ module.exit_json(changed=changed)
+
+ try:
+ ok = array.delete_volume(
+ volume['id'])
+ if ok:
+ module.log(msg='Volume {0} deleted.'.format(vol_name))
+ changed = True
+ else:
+ raise Exception
+ except Exception:
+ pass
+ module.exit_json(changed=changed)
+
+
+def main():
+ arg_spec = argument_spec()
+ arg_spec.update(
+ dict(
+ name=dict(type='str', required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ size=dict(type='str')
+ )
+ )
+
+ module = AnsibleModule(arg_spec,
+ supports_check_mode=True,
+ required_together=required_together())
+
+ state = module.params['state']
+ array = get_array(module)
+ volume = get_volume(module, array)
+
+ if state == 'present':
+ if not volume:
+ create_volume(module, array)
+ else:
+ update_volume(module, array, volume)
+ elif state == 'absent' and volume:
+ delete_volume(module, array, volume)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/vmadm.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/vmadm.py
new file mode 100644
index 00000000..553e6efc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/vmadm.py
@@ -0,0 +1,796 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Jasper Lievisse Adriaanse <j@jasper.la>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: vmadm
+short_description: Manage SmartOS virtual machines and zones.
+description:
+ - Manage SmartOS virtual machines through vmadm(1M).
+author: Jasper Lievisse Adriaanse (@jasperla)
+options:
+ archive_on_delete:
+ required: false
+ description:
+ - When enabled, the zone dataset will be mounted on C(/zones/archive)
+ upon removal.
+ type: bool
+ autoboot:
+ required: false
+ description:
+ - Whether or not a VM is booted when the system is rebooted.
+ type: bool
+ brand:
+ choices: [ joyent, joyent-minimal, lx, kvm, bhyve ]
+ default: joyent
+ description:
+ - Type of virtual machine. The C(bhyve) option was added in community.general 0.2.0.
+ type: str
+ boot:
+ required: false
+ description:
+ - Set the boot order for KVM VMs.
+ type: str
+ cpu_cap:
+ required: false
+ description:
+ - Sets a limit on the amount of CPU time that can be used by a VM.
+ Use C(0) for no cap.
+ type: int
+ cpu_shares:
+ required: false
+ description:
+ - Sets a limit on the number of fair share scheduler (FSS) CPU shares for
+ a VM. This limit is relative to all other VMs on the system.
+ type: int
+ cpu_type:
+ required: false
+ choices: [ qemu64, host ]
+ default: qemu64
+ description:
+ - Control the type of virtual CPU exposed to KVM VMs.
+ type: str
+ customer_metadata:
+ required: false
+ description:
+ - Metadata to be set and associated with this VM, this contain customer
+ modifiable keys.
+ type: dict
+ delegate_dataset:
+ required: false
+ description:
+ - Whether to delegate a ZFS dataset to an OS VM.
+ type: bool
+ disk_driver:
+ required: false
+ description:
+ - Default value for a virtual disk model for KVM guests.
+ type: str
+ disks:
+ required: false
+ description:
+ - A list of disks to add, valid properties are documented in vmadm(1M).
+ type: list
+ dns_domain:
+ required: false
+ description:
+ - Domain value for C(/etc/hosts).
+ type: str
+ docker:
+ required: false
+ description:
+ - Docker images need this flag enabled along with the I(brand) set to C(lx).
+ type: bool
+ filesystems:
+ required: false
+ description:
+ - Mount additional filesystems into an OS VM.
+ type: list
+ firewall_enabled:
+ required: false
+ description:
+ - Enables the firewall, allowing fwadm(1M) rules to be applied.
+ type: bool
+ force:
+ required: false
+ description:
+ - Force a particular action (i.e. stop or delete a VM).
+ type: bool
+ fs_allowed:
+ required: false
+ description:
+ - Comma separated list of filesystem types this zone is allowed to mount.
+ type: str
+ hostname:
+ required: false
+ description:
+ - Zone/VM hostname.
+ type: str
+ image_uuid:
+ required: false
+ description:
+ - Image UUID.
+ type: str
+ indestructible_delegated:
+ required: false
+ description:
+ - Adds an C(@indestructible) snapshot to delegated datasets.
+ type: bool
+ indestructible_zoneroot:
+ required: false
+ description:
+ - Adds an C(@indestructible) snapshot to zoneroot.
+ type: bool
+ internal_metadata:
+ required: false
+ description:
+ - Metadata to be set and associated with this VM, this contains operator
+ generated keys.
+ type: dict
+ internal_metadata_namespace:
+ required: false
+ description:
+ - List of namespaces to be set as I(internal_metadata-only); these namespaces
+ will come from I(internal_metadata) rather than I(customer_metadata).
+ type: str
+ kernel_version:
+ required: false
+ description:
+ - Kernel version to emulate for LX VMs.
+ type: str
+ limit_priv:
+ required: false
+ description:
+ - Set (comma separated) list of privileges the zone is allowed to use.
+ type: str
+ maintain_resolvers:
+ required: false
+ description:
+ - Resolvers in C(/etc/resolv.conf) will be updated when updating
+ the I(resolvers) property.
+ type: bool
+ max_locked_memory:
+ required: false
+ description:
+ - Total amount of memory (in MiBs) on the host that can be locked by this VM.
+ type: int
+ max_lwps:
+ required: false
+ description:
+ - Maximum number of lightweight processes this VM is allowed to have running.
+ type: int
+ max_physical_memory:
+ required: false
+ description:
+ - Maximum amount of memory (in MiBs) on the host that the VM is allowed to use.
+ type: int
+ max_swap:
+ required: false
+ description:
+ - Maximum amount of virtual memory (in MiBs) the VM is allowed to use.
+ type: int
+ mdata_exec_timeout:
+ required: false
+ description:
+ - Timeout in seconds (or 0 to disable) for the C(svc:/smartdc/mdata:execute) service
+ that runs user-scripts in the zone.
+ type: int
+ name:
+ required: false
+ aliases: [ alias ]
+ description:
+ - Name of the VM. vmadm(1M) uses this as an optional name.
+ type: str
+ nic_driver:
+ required: false
+ description:
+ - Default value for a virtual NIC model for KVM guests.
+ type: str
+ nics:
+ required: false
+ description:
+ - A list of nics to add, valid properties are documented in vmadm(1M).
+ type: list
+ nowait:
+ required: false
+ description:
+ - Consider the provisioning complete when the VM first starts, rather than
+ when the VM has rebooted.
+ type: bool
+ qemu_opts:
+ required: false
+ description:
+ - Additional qemu arguments for KVM guests. This overwrites the default arguments
+ provided by vmadm(1M) and should only be used for debugging.
+ type: str
+ qemu_extra_opts:
+ required: false
+ description:
+ - Additional qemu cmdline arguments for KVM guests.
+ type: str
+ quota:
+ required: false
+ description:
+ - Quota on zone filesystems (in MiBs).
+ type: int
+ ram:
+ required: false
+ description:
+ - Amount of virtual RAM for a KVM guest (in MiBs).
+ type: int
+ resolvers:
+ required: false
+ description:
+ - List of resolvers to be put into C(/etc/resolv.conf).
+ type: list
+ routes:
+ required: false
+ description:
+ - Dictionary that maps destinations to gateways, these will be set as static
+ routes in the VM.
+ type: dict
+ spice_opts:
+ required: false
+ description:
+ - Addition options for SPICE-enabled KVM VMs.
+ type: str
+ spice_password:
+ required: false
+ description:
+ - Password required to connect to SPICE. By default no password is set.
+ Please note this can be read from the Global Zone.
+ type: str
+ state:
+ choices: [ present, running, absent, deleted, stopped, created, restarted, rebooted ]
+ default: running
+ description:
+ - States for the VM to be in. Please note that C(present), C(stopped) and C(restarted)
+ operate on a VM that is currently provisioned. C(present) means that the VM will be
+ created if it was absent, and that it will be in a running state. C(absent) will
+ shutdown the zone before removing it.
+ C(stopped) means the zone will be created if it doesn't exist already, before shutting
+ it down.
+ type: str
+ tmpfs:
+ required: false
+ description:
+ - Amount of memory (in MiBs) that will be available in the VM for the C(/tmp) filesystem.
+ type: int
+ uuid:
+ required: false
+ description:
+ - UUID of the VM. Can either be a full UUID or C(*) for all VMs.
+ type: str
+ vcpus:
+ required: false
+ description:
+ - Number of virtual CPUs for a KVM guest.
+ type: int
+ vga:
+ required: false
+ description:
+ - Specify VGA emulation used by KVM VMs.
+ type: str
+ virtio_txburst:
+ required: false
+ description:
+ - Number of packets that can be sent in a single flush of the tx queue of virtio NICs.
+ type: int
+ virtio_txtimer:
+ required: false
+ description:
+ - Timeout (in nanoseconds) for the TX timer of virtio NICs.
+ type: int
+ vnc_password:
+ required: false
+ description:
+ - Password required to connect to VNC. By default no password is set.
+ Please note this can be read from the Global Zone.
+ type: str
+ vnc_port:
+ required: false
+ description:
+ - TCP port to listen of the VNC server. Or set C(0) for random,
+ or C(-1) to disable.
+ type: int
+ zfs_data_compression:
+ required: false
+ description:
+ - Specifies compression algorithm used for this VMs data dataset. This option
+ only has effect on delegated datasets.
+ type: str
+ zfs_data_recsize:
+ required: false
+ description:
+ - Suggested block size (power of 2) for files in the delegated dataset's filesystem.
+ type: int
+ zfs_filesystem_limit:
+ required: false
+ description:
+ - Maximum number of filesystems the VM can have.
+ type: int
+ zfs_io_priority:
+ required: false
+ description:
+ - IO throttle priority value relative to other VMs.
+ type: int
+ zfs_root_compression:
+ required: false
+ description:
+ - Specifies compression algorithm used for this VMs root dataset. This option
+ only has effect on the zoneroot dataset.
+ type: str
+ zfs_root_recsize:
+ required: false
+ description:
+ - Suggested block size (power of 2) for files in the zoneroot dataset's filesystem.
+ type: int
+ zfs_snapshot_limit:
+ required: false
+ description:
+ - Number of snapshots the VM can have.
+ type: int
+ zpool:
+ required: false
+ description:
+ - ZFS pool the VM's zone dataset will be created in.
+ type: str
+requirements:
+ - python >= 2.6
+'''
+
+EXAMPLES = '''
+- name: Create SmartOS zone
+ community.general.vmadm:
+ brand: joyent
+ state: present
+ alias: fw_zone
+ image_uuid: 95f265b8-96b2-11e6-9597-972f3af4b6d5
+ firewall_enabled: yes
+ indestructible_zoneroot: yes
+ nics:
+ - nic_tag: admin
+ ip: dhcp
+ primary: true
+ internal_metadata:
+ root_pw: 'secret'
+ quota: 1
+
+- name: Delete a zone
+ community.general.vmadm:
+ alias: test_zone
+ state: deleted
+
+- name: Stop all zones
+ community.general.vmadm:
+ uuid: '*'
+ state: stopped
+'''
+
+RETURN = '''
+uuid:
+ description: UUID of the managed VM.
+ returned: always
+ type: str
+ sample: 'b217ab0b-cf57-efd8-cd85-958d0b80be33'
+alias:
+ description: Alias of the managed VM.
+ returned: When addressing a VM by alias.
+ type: str
+ sample: 'dns-zone'
+state:
+ description: State of the target, after execution.
+ returned: success
+ type: str
+ sample: 'running'
+'''
+
+import json
+import os
+import re
+import tempfile
+import traceback
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+# While vmadm(1M) supports a -E option to return any errors in JSON, the
+# generated JSON does not play well with the JSON parsers of Python.
+# The returned message contains '\n' as part of the stacktrace,
+# which breaks the parsers.
+
+
+def get_vm_prop(module, uuid, prop):
+ # Lookup a property for the given VM.
+ # Returns the property, or None if not found.
+ cmd = '{0} lookup -j -o {1} uuid={2}'.format(module.vmadm, prop, uuid)
+
+ (rc, stdout, stderr) = module.run_command(cmd)
+
+ if rc != 0:
+ module.fail_json(
+ msg='Could not perform lookup of {0} on {1}'.format(prop, uuid), exception=stderr)
+
+ try:
+ stdout_json = json.loads(stdout)
+ except Exception as e:
+ module.fail_json(
+ msg='Invalid JSON returned by vmadm for uuid lookup of {0}'.format(prop),
+ details=to_native(e), exception=traceback.format_exc())
+
+ if len(stdout_json) > 0 and prop in stdout_json[0]:
+ return stdout_json[0][prop]
+ else:
+ return None
+
+
+def get_vm_uuid(module, alias):
+ # Lookup the uuid that goes with the given alias.
+ # Returns the uuid or '' if not found.
+ cmd = '{0} lookup -j -o uuid alias={1}'.format(module.vmadm, alias)
+
+ (rc, stdout, stderr) = module.run_command(cmd)
+
+ if rc != 0:
+ module.fail_json(
+ msg='Could not retrieve UUID of {0}'.format(alias), exception=stderr)
+
+ # If no VM was found matching the given alias, we get back an empty array.
+ # That is not an error condition as we might be explicitly checking it's
+ # absence.
+ if stdout.strip() == '[]':
+ return None
+ else:
+ try:
+ stdout_json = json.loads(stdout)
+ except Exception as e:
+ module.fail_json(
+ msg='Invalid JSON returned by vmadm for uuid lookup of {0}'.format(alias),
+ details=to_native(e), exception=traceback.format_exc())
+
+ if len(stdout_json) > 0 and 'uuid' in stdout_json[0]:
+ return stdout_json[0]['uuid']
+
+
+def get_all_vm_uuids(module):
+ # Retrieve the UUIDs for all VMs.
+ cmd = '{0} lookup -j -o uuid'.format(module.vmadm)
+
+ (rc, stdout, stderr) = module.run_command(cmd)
+
+ if rc != 0:
+ module.fail_json(msg='Failed to get VMs list', exception=stderr)
+
+ try:
+ stdout_json = json.loads(stdout)
+ return [v['uuid'] for v in stdout_json]
+ except Exception as e:
+ module.fail_json(msg='Could not retrieve VM UUIDs', details=to_native(e),
+ exception=traceback.format_exc())
+
+
+def new_vm(module, uuid, vm_state):
+ payload_file = create_payload(module, uuid)
+
+ (rc, stdout, stderr) = vmadm_create_vm(module, payload_file)
+
+ if rc != 0:
+ changed = False
+ module.fail_json(msg='Could not create VM', exception=stderr)
+ else:
+ changed = True
+ # 'vmadm create' returns all output to stderr...
+ match = re.match('Successfully created VM (.*)', stderr)
+ if match:
+ vm_uuid = match.groups()[0]
+ if not is_valid_uuid(vm_uuid):
+ module.fail_json(msg='Invalid UUID for VM {0}?'.format(vm_uuid))
+ else:
+ module.fail_json(msg='Could not retrieve UUID of newly created(?) VM')
+
+ # Now that the VM is created, ensure it is in the desired state (if not 'running')
+ if vm_state != 'running':
+ ret = set_vm_state(module, vm_uuid, vm_state)
+ if not ret:
+ module.fail_json(msg='Could not set VM {0} to state {1}'.format(vm_uuid, vm_state))
+
+ try:
+ os.unlink(payload_file)
+ except Exception as e:
+ # Since the payload may contain sensitive information, fail hard
+ # if we cannot remove the file so the operator knows about it.
+ module.fail_json(msg='Could not remove temporary JSON payload file {0}: {1}'.format(payload_file, to_native(e)),
+ exception=traceback.format_exc())
+
+ return changed, vm_uuid
+
+
+def vmadm_create_vm(module, payload_file):
+ # Create a new VM using the provided payload.
+ cmd = '{0} create -f {1}'.format(module.vmadm, payload_file)
+
+ return module.run_command(cmd)
+
+
+def set_vm_state(module, vm_uuid, vm_state):
+ p = module.params
+
+ # Check if the VM is already in the desired state.
+ state = get_vm_prop(module, vm_uuid, 'state')
+ if state and (state == vm_state):
+ return None
+
+ # Lookup table for the state to be in, and which command to use for that.
+ # vm_state: [vmadm commandm, forceable?]
+ cmds = {
+ 'stopped': ['stop', True],
+ 'running': ['start', False],
+ 'deleted': ['delete', True],
+ 'rebooted': ['reboot', False]
+ }
+
+ if p['force'] and cmds[vm_state][1]:
+ force = '-F'
+ else:
+ force = ''
+
+ cmd = 'vmadm {0} {1} {2}'.format(cmds[vm_state][0], force, vm_uuid)
+
+ (rc, stdout, stderr) = module.run_command(cmd)
+
+ match = re.match('^Successfully.*', stderr)
+ if match:
+ return True
+ else:
+ return False
+
+
+def create_payload(module, uuid):
+ # Create the JSON payload (vmdef) and return the filename.
+
+ # Filter out the few options that are not valid VM properties.
+ module_options = ['debug', 'force', 'state']
+ # @TODO make this a simple {} comprehension as soon as py2 is ditched
+ # @TODO {k: v for k, v in p.items() if k not in module_options}
+ vmdef = dict([(k, v) for k, v in module.params.items() if k not in module_options and v])
+
+ try:
+ vmdef_json = json.dumps(vmdef)
+ except Exception as e:
+ module.fail_json(
+ msg='Could not create valid JSON payload', exception=traceback.format_exc())
+
+ # Create the temporary file that contains our payload, and set tight
+ # permissions for it may container sensitive information.
+ try:
+ # XXX: When there's a way to get the current ansible temporary directory
+ # drop the mkstemp call and rely on ANSIBLE_KEEP_REMOTE_FILES to retain
+ # the payload (thus removing the `save_payload` option).
+ fname = tempfile.mkstemp()[1]
+ os.chmod(fname, 0o400)
+ with open(fname, 'w') as fh:
+ fh.write(vmdef_json)
+ except Exception as e:
+ module.fail_json(msg='Could not save JSON payload: %s' % to_native(e), exception=traceback.format_exc())
+
+ return fname
+
+
+def vm_state_transition(module, uuid, vm_state):
+ ret = set_vm_state(module, uuid, vm_state)
+
+ # Whether the VM changed state.
+ if ret is None:
+ return False
+ elif ret:
+ return True
+ else:
+ module.fail_json(msg='Failed to set VM {0} to state {1}'.format(uuid, vm_state))
+
+
+def is_valid_uuid(uuid):
+ if re.match('^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$', uuid, re.IGNORECASE):
+ return True
+ else:
+ return False
+
+
+def validate_uuids(module):
+ # Perform basic UUID validation.
+ failed = []
+
+ for u in [['uuid', module.params['uuid']],
+ ['image_uuid', module.params['image_uuid']]]:
+ if u[1] and u[1] != '*':
+ if not is_valid_uuid(u[1]):
+ failed.append(u[0])
+
+ if len(failed) > 0:
+ module.fail_json(msg='No valid UUID(s) found for: {0}'.format(", ".join(failed)))
+
+
+def manage_all_vms(module, vm_state):
+ # Handle operations for all VMs, which can by definition only
+ # be state transitions.
+ state = module.params['state']
+
+ if state == 'created':
+ module.fail_json(msg='State "created" is only valid for tasks with a single VM')
+
+ # If any of the VMs has a change, the task as a whole has a change.
+ any_changed = False
+
+ # First get all VM uuids and for each check their state, and adjust it if needed.
+ for uuid in get_all_vm_uuids(module):
+ current_vm_state = get_vm_prop(module, uuid, 'state')
+ if not current_vm_state and vm_state == 'deleted':
+ any_changed = False
+ else:
+ if module.check_mode:
+ if (not current_vm_state) or (get_vm_prop(module, uuid, 'state') != state):
+ any_changed = True
+ else:
+ any_changed = (vm_state_transition(module, uuid, vm_state) | any_changed)
+
+ return any_changed
+
+
+def main():
+ # In order to reduce the clutter and boilerplate for trivial options,
+ # abstract the vmadm properties and build the dict of arguments later.
+ # Dict of all options that are simple to define based on their type.
+ # They're not required and have a default of None.
+ properties = {
+ 'str': [
+ 'boot', 'disk_driver', 'dns_domain', 'fs_allowed', 'hostname',
+ 'image_uuid', 'internal_metadata_namespace', 'kernel_version',
+ 'limit_priv', 'nic_driver', 'qemu_opts', 'qemu_extra_opts',
+ 'spice_opts', 'uuid', 'vga', 'zfs_data_compression',
+ 'zfs_root_compression', 'zpool'
+ ],
+ 'bool': [
+ 'archive_on_delete', 'autoboot', 'debug', 'delegate_dataset',
+ 'docker', 'firewall_enabled', 'force', 'indestructible_delegated',
+ 'indestructible_zoneroot', 'maintain_resolvers', 'nowait'
+ ],
+ 'int': [
+ 'cpu_cap', 'cpu_shares', 'max_locked_memory', 'max_lwps',
+ 'max_physical_memory', 'max_swap', 'mdata_exec_timeout',
+ 'quota', 'ram', 'tmpfs', 'vcpus', 'virtio_txburst',
+ 'virtio_txtimer', 'vnc_port', 'zfs_data_recsize',
+ 'zfs_filesystem_limit', 'zfs_io_priority', 'zfs_root_recsize',
+ 'zfs_snapshot_limit'
+ ],
+ 'dict': ['customer_metadata', 'internal_metadata', 'routes'],
+ 'list': ['disks', 'nics', 'resolvers', 'filesystems']
+ }
+
+ # Start with the options that are not as trivial as those above.
+ options = dict(
+ state=dict(
+ default='running',
+ type='str',
+ choices=['present', 'running', 'absent', 'deleted', 'stopped', 'created', 'restarted', 'rebooted']
+ ),
+ name=dict(
+ default=None, type='str',
+ aliases=['alias']
+ ),
+ brand=dict(
+ default='joyent',
+ type='str',
+ choices=['joyent', 'joyent-minimal', 'lx', 'kvm', 'bhyve']
+ ),
+ cpu_type=dict(
+ default='qemu64',
+ type='str',
+ choices=['host', 'qemu64']
+ ),
+ # Regular strings, however these require additional options.
+ spice_password=dict(type='str', no_log=True),
+ vnc_password=dict(type='str', no_log=True),
+ )
+
+ # Add our 'simple' options to options dict.
+ for type in properties:
+ for p in properties[type]:
+ option = dict(default=None, type=type)
+ options[p] = option
+
+ module = AnsibleModule(
+ argument_spec=options,
+ supports_check_mode=True,
+ required_one_of=[['name', 'uuid']]
+ )
+
+ module.vmadm = module.get_bin_path('vmadm', required=True)
+
+ p = module.params
+ uuid = p['uuid']
+ state = p['state']
+
+ # Translate the state parameter into something we can use later on.
+ if state in ['present', 'running']:
+ vm_state = 'running'
+ elif state in ['stopped', 'created']:
+ vm_state = 'stopped'
+ elif state in ['absent', 'deleted']:
+ vm_state = 'deleted'
+ elif state in ['restarted', 'rebooted']:
+ vm_state = 'rebooted'
+
+ result = {'state': state}
+
+ # While it's possible to refer to a given VM by it's `alias`, it's easier
+ # to operate on VMs by their UUID. So if we're not given a `uuid`, look
+ # it up.
+ if not uuid:
+ uuid = get_vm_uuid(module, p['name'])
+ # Bit of a chicken and egg problem here for VMs with state == deleted.
+ # If they're going to be removed in this play, we have to lookup the
+ # uuid. If they're already deleted there's nothing to lookup.
+ # So if state == deleted and get_vm_uuid() returned '', the VM is already
+ # deleted and there's nothing else to do.
+ if uuid is None and vm_state == 'deleted':
+ result['name'] = p['name']
+ module.exit_json(**result)
+
+ validate_uuids(module)
+
+ if p['name']:
+ result['name'] = p['name']
+ result['uuid'] = uuid
+
+ if uuid == '*':
+ result['changed'] = manage_all_vms(module, vm_state)
+ module.exit_json(**result)
+
+ # The general flow is as follows:
+ # - first the current state of the VM is obtained by it's UUID.
+ # - If the state was not found and the desired state is 'deleted', return.
+ # - If the state was not found, it means the VM has to be created.
+ # Subsequently the VM will be set to the desired state (i.e. stopped)
+ # - Otherwise, it means the VM exists already and we operate on it's
+ # state (i.e. reboot it.)
+ #
+ # In the future it should be possible to query the VM for a particular
+ # property as a valid state (i.e. queried) so the result can be
+ # registered.
+ # Also, VMs should be able to get their properties updated.
+ # Managing VM snapshots should be part of a standalone module.
+
+ # First obtain the VM state to determine what needs to be done with it.
+ current_vm_state = get_vm_prop(module, uuid, 'state')
+
+ # First handle the case where the VM should be deleted and is not present.
+ if not current_vm_state and vm_state == 'deleted':
+ result['changed'] = False
+ elif module.check_mode:
+ # Shortcut for check mode, if there is no VM yet, it will need to be created.
+ # Or, if the VM is not in the desired state yet, it needs to transition.
+ if (not current_vm_state) or (get_vm_prop(module, uuid, 'state') != state):
+ result['changed'] = True
+ else:
+ result['changed'] = False
+
+ module.exit_json(**result)
+ # No VM was found that matched the given ID (alias or uuid), so we create it.
+ elif not current_vm_state:
+ result['changed'], result['uuid'] = new_vm(module, uuid, vm_state)
+ else:
+ # VM was found, operate on its state directly.
+ result['changed'] = vm_state_transition(module, uuid, vm_state)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/wakeonlan.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/wakeonlan.py
new file mode 100644
index 00000000..2f097fcf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/wakeonlan.py
@@ -0,0 +1,131 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: wakeonlan
+short_description: Send a magic Wake-on-LAN (WoL) broadcast packet
+description:
+- The C(wakeonlan) module sends magic Wake-on-LAN (WoL) broadcast packets.
+options:
+ mac:
+ description:
+ - MAC address to send Wake-on-LAN broadcast packet for.
+ required: true
+ type: str
+ broadcast:
+ description:
+ - Network broadcast address to use for broadcasting magic Wake-on-LAN packet.
+ default: 255.255.255.255
+ type: str
+ port:
+ description:
+ - UDP port to use for magic Wake-on-LAN packet.
+ default: 7
+ type: int
+todo:
+ - Add arping support to check whether the system is up (before and after)
+ - Enable check-mode support (when we have arping support)
+ - Does not have SecureOn password support
+notes:
+ - This module sends a magic packet, without knowing whether it worked
+ - Only works if the target system was properly configured for Wake-on-LAN (in the BIOS and/or the OS)
+ - Some BIOSes have a different (configurable) Wake-on-LAN boot order (i.e. PXE first).
+seealso:
+- module: community.windows.win_wakeonlan
+author:
+- Dag Wieers (@dagwieers)
+'''
+
+EXAMPLES = r'''
+- name: Send a magic Wake-on-LAN packet to 00:00:5E:00:53:66
+ community.general.wakeonlan:
+ mac: '00:00:5E:00:53:66'
+ broadcast: 192.0.2.23
+ delegate_to: localhost
+
+- community.general.wakeonlan:
+ mac: 00:00:5E:00:53:66
+ port: 9
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+# Default return values
+'''
+import socket
+import struct
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def wakeonlan(module, mac, broadcast, port):
+ """ Send a magic Wake-on-LAN packet. """
+
+ mac_orig = mac
+
+ # Remove possible separator from MAC address
+ if len(mac) == 12 + 5:
+ mac = mac.replace(mac[2], '')
+
+ # If we don't end up with 12 hexadecimal characters, fail
+ if len(mac) != 12:
+ module.fail_json(msg="Incorrect MAC address length: %s" % mac_orig)
+
+ # Test if it converts to an integer, otherwise fail
+ try:
+ int(mac, 16)
+ except ValueError:
+ module.fail_json(msg="Incorrect MAC address format: %s" % mac_orig)
+
+ # Create payload for magic packet
+ data = b''
+ padding = ''.join(['FFFFFFFFFFFF', mac * 20])
+ for i in range(0, len(padding), 2):
+ data = b''.join([data, struct.pack('B', int(padding[i: i + 2], 16))])
+
+ # Broadcast payload to network
+ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
+
+ if not module.check_mode:
+
+ try:
+ sock.sendto(data, (broadcast, port))
+ except socket.error as e:
+ sock.close()
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ sock.close()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ mac=dict(type='str', required=True),
+ broadcast=dict(type='str', default='255.255.255.255'),
+ port=dict(type='int', default=7),
+ ),
+ supports_check_mode=True,
+ )
+
+ mac = module.params['mac']
+ broadcast = module.params['broadcast']
+ port = module.params['port']
+
+ wakeonlan(module, mac, broadcast, port)
+
+ module.exit_json(changed=True)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/apache2_mod_proxy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/apache2_mod_proxy.py
new file mode 100644
index 00000000..dcf1656f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/apache2_mod_proxy.py
@@ -0,0 +1,450 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Olivier Boukili <boukili.olivier@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: apache2_mod_proxy
+author: Olivier Boukili (@oboukili)
+short_description: Set and/or get members' attributes of an Apache httpd 2.4 mod_proxy balancer pool
+description:
+ - Set and/or get members' attributes of an Apache httpd 2.4 mod_proxy balancer
+ pool, using HTTP POST and GET requests. The httpd mod_proxy balancer-member
+ status page has to be enabled and accessible, as this module relies on parsing
+ this page. This module supports ansible check_mode, and requires BeautifulSoup
+ python module.
+options:
+ balancer_url_suffix:
+ type: str
+ description:
+ - Suffix of the balancer pool url required to access the balancer pool
+ status page (e.g. balancer_vhost[:port]/balancer_url_suffix).
+ default: /balancer-manager/
+ balancer_vhost:
+ type: str
+ description:
+ - (ipv4|ipv6|fqdn):port of the Apache httpd 2.4 mod_proxy balancer pool.
+ required: true
+ member_host:
+ type: str
+ description:
+ - (ipv4|ipv6|fqdn) of the balancer member to get or to set attributes to.
+ Port number is autodetected and should not be specified here.
+ If undefined, apache2_mod_proxy module will return a members list of
+ dictionaries of all the current balancer pool members' attributes.
+ state:
+ type: str
+ description:
+ - Desired state of the member host.
+ (absent|disabled),drained,hot_standby,ignore_errors can be
+ simultaneously invoked by separating them with a comma (e.g. state=drained,ignore_errors).
+ - 'Accepted state values: ["present", "absent", "enabled", "disabled", "drained", "hot_standby", "ignore_errors"]'
+ tls:
+ description:
+ - Use https to access balancer management page.
+ type: bool
+ default: 'no'
+ validate_certs:
+ description:
+ - Validate ssl/tls certificates.
+ type: bool
+ default: 'yes'
+'''
+
+EXAMPLES = '''
+- name: Get all current balancer pool members attributes
+ community.general.apache2_mod_proxy:
+ balancer_vhost: 10.0.0.2
+
+- name: Get a specific member attributes
+ community.general.apache2_mod_proxy:
+ balancer_vhost: myws.mydomain.org
+ balancer_suffix: /lb/
+ member_host: node1.myws.mydomain.org
+
+# Enable all balancer pool members:
+- name: Get attributes
+ community.general.apache2_mod_proxy:
+ balancer_vhost: '{{ myloadbalancer_host }}'
+ register: result
+
+- name: Enable all balancer pool members
+ community.general.apache2_mod_proxy:
+ balancer_vhost: '{{ myloadbalancer_host }}'
+ member_host: '{{ item.host }}'
+ state: present
+ with_items: '{{ result.members }}'
+
+# Gracefully disable a member from a loadbalancer node:
+- name: Step 1
+ community.general.apache2_mod_proxy:
+ balancer_vhost: '{{ vhost_host }}'
+ member_host: '{{ member.host }}'
+ state: drained
+ delegate_to: myloadbalancernode
+
+- name: Step 2
+ ansible.builtin.wait_for:
+ host: '{{ member.host }}'
+ port: '{{ member.port }}'
+ state: drained
+ delegate_to: myloadbalancernode
+
+- name: Step 3
+ community.general.apache2_mod_proxy:
+ balancer_vhost: '{{ vhost_host }}'
+ member_host: '{{ member.host }}'
+ state: absent
+ delegate_to: myloadbalancernode
+'''
+
+RETURN = '''
+member:
+ description: specific balancer member information dictionary, returned when apache2_mod_proxy module is invoked with member_host parameter.
+ type: dict
+ returned: success
+ sample:
+ {"attributes":
+ {"Busy": "0",
+ "Elected": "42",
+ "Factor": "1",
+ "From": "136K",
+ "Load": "0",
+ "Route": null,
+ "RouteRedir": null,
+ "Set": "0",
+ "Status": "Init Ok ",
+ "To": " 47K",
+ "Worker URL": null
+ },
+ "balancer_url": "http://10.10.0.2/balancer-manager/",
+ "host": "10.10.0.20",
+ "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b",
+ "path": "/ws",
+ "port": 8080,
+ "protocol": "http",
+ "status": {
+ "disabled": false,
+ "drained": false,
+ "hot_standby": false,
+ "ignore_errors": false
+ }
+ }
+members:
+ description: list of member (defined above) dictionaries, returned when apache2_mod_proxy is invoked with no member_host and state args.
+ returned: success
+ type: list
+ sample:
+ [{"attributes": {
+ "Busy": "0",
+ "Elected": "42",
+ "Factor": "1",
+ "From": "136K",
+ "Load": "0",
+ "Route": null,
+ "RouteRedir": null,
+ "Set": "0",
+ "Status": "Init Ok ",
+ "To": " 47K",
+ "Worker URL": null
+ },
+ "balancer_url": "http://10.10.0.2/balancer-manager/",
+ "host": "10.10.0.20",
+ "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b",
+ "path": "/ws",
+ "port": 8080,
+ "protocol": "http",
+ "status": {
+ "disabled": false,
+ "drained": false,
+ "hot_standby": false,
+ "ignore_errors": false
+ }
+ },
+ {"attributes": {
+ "Busy": "0",
+ "Elected": "42",
+ "Factor": "1",
+ "From": "136K",
+ "Load": "0",
+ "Route": null,
+ "RouteRedir": null,
+ "Set": "0",
+ "Status": "Init Ok ",
+ "To": " 47K",
+ "Worker URL": null
+ },
+ "balancer_url": "http://10.10.0.2/balancer-manager/",
+ "host": "10.10.0.21",
+ "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.21:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b",
+ "path": "/ws",
+ "port": 8080,
+ "protocol": "http",
+ "status": {
+ "disabled": false,
+ "drained": false,
+ "hot_standby": false,
+ "ignore_errors": false}
+ }
+ ]
+'''
+
+import re
+import traceback
+
+BEAUTIFUL_SOUP_IMP_ERR = None
+try:
+ from BeautifulSoup import BeautifulSoup
+except ImportError:
+ BEAUTIFUL_SOUP_IMP_ERR = traceback.format_exc()
+ HAS_BEAUTIFULSOUP = False
+else:
+ HAS_BEAUTIFULSOUP = True
+
+# balancer member attributes extraction regexp:
+EXPRESSION = r"(b=([\w\.\-]+)&w=(https?|ajp|wss?|ftp|[sf]cgi)://([\w\.\-]+):?(\d*)([/\w\.\-]*)&?[\w\-\=]*)"
+# Apache2 server version extraction regexp:
+APACHE_VERSION_EXPRESSION = r"SERVER VERSION: APACHE/([\d.]+)"
+
+
+def regexp_extraction(string, _regexp, groups=1):
+ """ Returns the capture group (default=1) specified in the regexp, applied to the string """
+ regexp_search = re.search(string=str(string), pattern=str(_regexp))
+ if regexp_search:
+ if regexp_search.group(groups) != '':
+ return str(regexp_search.group(groups))
+ return None
+
+
+class BalancerMember(object):
+ """ Apache 2.4 mod_proxy LB balancer member.
+ attributes:
+ read-only:
+ host -> member host (string),
+ management_url -> member management url (string),
+ protocol -> member protocol (string)
+ port -> member port (string),
+ path -> member location (string),
+ balancer_url -> url of this member's parent balancer (string),
+ attributes -> whole member attributes (dictionary)
+ module -> ansible module instance (AnsibleModule object).
+ writable:
+ status -> status of the member (dictionary)
+ """
+
+ def __init__(self, management_url, balancer_url, module):
+ self.host = regexp_extraction(management_url, str(EXPRESSION), 4)
+ self.management_url = str(management_url)
+ self.protocol = regexp_extraction(management_url, EXPRESSION, 3)
+ self.port = regexp_extraction(management_url, EXPRESSION, 5)
+ self.path = regexp_extraction(management_url, EXPRESSION, 6)
+ self.balancer_url = str(balancer_url)
+ self.module = module
+
+ def get_member_attributes(self):
+ """ Returns a dictionary of a balancer member's attributes."""
+
+ balancer_member_page = fetch_url(self.module, self.management_url)
+
+ if balancer_member_page[1]['status'] != 200:
+ self.module.fail_json(msg="Could not get balancer_member_page, check for connectivity! " + balancer_member_page[1])
+ else:
+ try:
+ soup = BeautifulSoup(balancer_member_page[0])
+ except TypeError:
+ self.module.fail_json(msg="Cannot parse balancer_member_page HTML! " + str(soup))
+ else:
+ subsoup = soup.findAll('table')[1].findAll('tr')
+ keys = subsoup[0].findAll('th')
+ for valuesset in subsoup[1::1]:
+ if re.search(pattern=self.host, string=str(valuesset)):
+ values = valuesset.findAll('td')
+ return dict((keys[x].string, values[x].string) for x in range(0, len(keys)))
+
+ def get_member_status(self):
+ """ Returns a dictionary of a balancer member's status attributes."""
+ status_mapping = {'disabled': 'Dis',
+ 'drained': 'Drn',
+ 'hot_standby': 'Stby',
+ 'ignore_errors': 'Ign'}
+ status = {}
+ actual_status = str(self.attributes['Status'])
+ for mode in status_mapping.keys():
+ if re.search(pattern=status_mapping[mode], string=actual_status):
+ status[mode] = True
+ else:
+ status[mode] = False
+ return status
+
+ def set_member_status(self, values):
+ """ Sets a balancer member's status attributes amongst pre-mapped values."""
+ values_mapping = {'disabled': '&w_status_D',
+ 'drained': '&w_status_N',
+ 'hot_standby': '&w_status_H',
+ 'ignore_errors': '&w_status_I'}
+
+ request_body = regexp_extraction(self.management_url, EXPRESSION, 1)
+ for k in values_mapping.keys():
+ if values[str(k)]:
+ request_body = request_body + str(values_mapping[k]) + '=1'
+ else:
+ request_body = request_body + str(values_mapping[k]) + '=0'
+
+ response = fetch_url(self.module, self.management_url, data=str(request_body))
+ if response[1]['status'] != 200:
+ self.module.fail_json(msg="Could not set the member status! " + self.host + " " + response[1]['status'])
+
+ attributes = property(get_member_attributes)
+ status = property(get_member_status, set_member_status)
+
+
+class Balancer(object):
+ """ Apache httpd 2.4 mod_proxy balancer object"""
+
+ def __init__(self, host, suffix, module, members=None, tls=False):
+ if tls:
+ self.base_url = str(str('https://') + str(host))
+ self.url = str(str('https://') + str(host) + str(suffix))
+ else:
+ self.base_url = str(str('http://') + str(host))
+ self.url = str(str('http://') + str(host) + str(suffix))
+ self.module = module
+ self.page = self.fetch_balancer_page()
+ if members is None:
+ self._members = []
+
+ def fetch_balancer_page(self):
+ """ Returns the balancer management html page as a string for later parsing."""
+ page = fetch_url(self.module, str(self.url))
+ if page[1]['status'] != 200:
+ self.module.fail_json(msg="Could not get balancer page! HTTP status response: " + str(page[1]['status']))
+ else:
+ content = page[0].read()
+ apache_version = regexp_extraction(content.upper(), APACHE_VERSION_EXPRESSION, 1)
+ if apache_version:
+ if not re.search(pattern=r"2\.4\.[\d]*", string=apache_version):
+ self.module.fail_json(msg="This module only acts on an Apache2 2.4+ instance, current Apache2 version: " + str(apache_version))
+ return content
+ else:
+ self.module.fail_json(msg="Could not get the Apache server version from the balancer-manager")
+
+ def get_balancer_members(self):
+ """ Returns members of the balancer as a generator object for later iteration."""
+ try:
+ soup = BeautifulSoup(self.page)
+ except TypeError:
+ self.module.fail_json(msg="Cannot parse balancer page HTML! " + str(self.page))
+ else:
+ for element in soup.findAll('a')[1::1]:
+ balancer_member_suffix = str(element.get('href'))
+ if not balancer_member_suffix:
+ self.module.fail_json(msg="Argument 'balancer_member_suffix' is empty!")
+ else:
+ yield BalancerMember(str(self.base_url + balancer_member_suffix), str(self.url), self.module)
+
+ members = property(get_balancer_members)
+
+
+def main():
+ """ Initiates module."""
+ module = AnsibleModule(
+ argument_spec=dict(
+ balancer_vhost=dict(required=True, type='str'),
+ balancer_url_suffix=dict(default="/balancer-manager/", type='str'),
+ member_host=dict(type='str'),
+ state=dict(type='str'),
+ tls=dict(default=False, type='bool'),
+ validate_certs=dict(default=True, type='bool')
+ ),
+ supports_check_mode=True
+ )
+
+ if HAS_BEAUTIFULSOUP is False:
+ module.fail_json(msg=missing_required_lib('BeautifulSoup'), exception=BEAUTIFUL_SOUP_IMP_ERR)
+
+ if module.params['state'] is not None:
+ states = module.params['state'].split(',')
+ if (len(states) > 1) and (("present" in states) or ("enabled" in states)):
+ module.fail_json(msg="state present/enabled is mutually exclusive with other states!")
+ else:
+ for _state in states:
+ if _state not in ['present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors']:
+ module.fail_json(
+ msg="State can only take values amongst 'present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors'."
+ )
+ else:
+ states = ['None']
+
+ mybalancer = Balancer(module.params['balancer_vhost'],
+ module.params['balancer_url_suffix'],
+ module=module,
+ tls=module.params['tls'])
+
+ if module.params['member_host'] is None:
+ json_output_list = []
+ for member in mybalancer.members:
+ json_output_list.append({
+ "host": member.host,
+ "status": member.status,
+ "protocol": member.protocol,
+ "port": member.port,
+ "path": member.path,
+ "attributes": member.attributes,
+ "management_url": member.management_url,
+ "balancer_url": member.balancer_url
+ })
+ module.exit_json(
+ changed=False,
+ members=json_output_list
+ )
+ else:
+ changed = False
+ member_exists = False
+ member_status = {'disabled': False, 'drained': False, 'hot_standby': False, 'ignore_errors': False}
+ for mode in member_status.keys():
+ for state in states:
+ if mode == state:
+ member_status[mode] = True
+ elif mode == 'disabled' and state == 'absent':
+ member_status[mode] = True
+
+ for member in mybalancer.members:
+ if str(member.host) == str(module.params['member_host']):
+ member_exists = True
+ if module.params['state'] is not None:
+ member_status_before = member.status
+ if not module.check_mode:
+ member_status_after = member.status = member_status
+ else:
+ member_status_after = member_status
+ if member_status_before != member_status_after:
+ changed = True
+ json_output = {
+ "host": member.host,
+ "status": member.status,
+ "protocol": member.protocol,
+ "port": member.port,
+ "path": member.path,
+ "attributes": member.attributes,
+ "management_url": member.management_url,
+ "balancer_url": member.balancer_url
+ }
+ if member_exists:
+ module.exit_json(
+ changed=changed,
+ member=json_output
+ )
+ else:
+ module.fail_json(msg=str(module.params['member_host']) + ' is not a member of the balancer ' + str(module.params['balancer_vhost']) + '!')
+
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.urls import fetch_url
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/apache2_module.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/apache2_module.py
new file mode 100644
index 00000000..4cc0ef8b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/apache2_module.py
@@ -0,0 +1,266 @@
+#!/usr/bin/python
+# coding: utf-8 -*-
+
+# (c) 2013-2014, Christian Berendt <berendt@b1-systems.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: apache2_module
+author:
+ - Christian Berendt (@berendt)
+ - Ralf Hertel (@n0trax)
+ - Robin Roth (@robinro)
+short_description: Enables/disables a module of the Apache2 webserver.
+description:
+ - Enables or disables a specified module of the Apache2 webserver.
+options:
+ name:
+ type: str
+ description:
+ - Name of the module to enable/disable as given to C(a2enmod/a2dismod).
+ required: true
+ identifier:
+ type: str
+ description:
+ - Identifier of the module as listed by C(apache2ctl -M).
+ This is optional and usually determined automatically by the common convention of
+ appending C(_module) to I(name) as well as custom exception for popular modules.
+ required: False
+ force:
+ description:
+ - Force disabling of default modules and override Debian warnings.
+ required: false
+ type: bool
+ default: False
+ state:
+ type: str
+ description:
+ - Desired state of the module.
+ choices: ['present', 'absent']
+ default: present
+ ignore_configcheck:
+ description:
+ - Ignore configuration checks about inconsistent module configuration. Especially for mpm_* modules.
+ type: bool
+ default: False
+requirements: ["a2enmod","a2dismod"]
+'''
+
+EXAMPLES = '''
+- name: Enable the Apache2 module wsgi
+ community.general.apache2_module:
+ state: present
+ name: wsgi
+
+- name: Disables the Apache2 module wsgi
+ community.general.apache2_module:
+ state: absent
+ name: wsgi
+
+- name: Disable default modules for Debian
+ community.general.apache2_module:
+ state: absent
+ name: autoindex
+ force: True
+
+- name: Disable mpm_worker and ignore warnings about missing mpm module
+ community.general.apache2_module:
+ state: absent
+ name: mpm_worker
+ ignore_configcheck: True
+
+- name: Enable dump_io module, which is identified as dumpio_module inside apache2
+ community.general.apache2_module:
+ state: present
+ name: dump_io
+ identifier: dumpio_module
+'''
+
+RETURN = '''
+result:
+ description: message about action taken
+ returned: always
+ type: str
+warnings:
+ description: list of warning messages
+ returned: when needed
+ type: list
+rc:
+ description: return code of underlying command
+ returned: failed
+ type: int
+stdout:
+ description: stdout of underlying command
+ returned: failed
+ type: str
+stderr:
+ description: stderr of underlying command
+ returned: failed
+ type: str
+'''
+
+import re
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+
+
+def _run_threaded(module):
+ control_binary = _get_ctl_binary(module)
+
+ result, stdout, stderr = module.run_command("%s -V" % control_binary)
+
+ return bool(re.search(r'threaded:[ ]*yes', stdout))
+
+
+def _get_ctl_binary(module):
+ for command in ['apache2ctl', 'apachectl']:
+ ctl_binary = module.get_bin_path(command)
+ if ctl_binary is not None:
+ return ctl_binary
+
+ module.fail_json(
+ msg="Neither of apache2ctl nor apachctl found."
+ " At least one apache control binary is necessary."
+ )
+
+
+def _module_is_enabled(module):
+ control_binary = _get_ctl_binary(module)
+ result, stdout, stderr = module.run_command("%s -M" % control_binary)
+
+ if result != 0:
+ error_msg = "Error executing %s: %s" % (control_binary, stderr)
+ if module.params['ignore_configcheck']:
+ if 'AH00534' in stderr and 'mpm_' in module.params['name']:
+ module.warnings.append(
+ "No MPM module loaded! apache2 reload AND other module actions"
+ " will fail if no MPM module is loaded immediately."
+ )
+ else:
+ module.warnings.append(error_msg)
+ return False
+ else:
+ module.fail_json(msg=error_msg)
+
+ searchstring = ' ' + module.params['identifier']
+ return searchstring in stdout
+
+
+def create_apache_identifier(name):
+ """
+ By convention if a module is loaded via name, it appears in apache2ctl -M as
+ name_module.
+
+ Some modules don't follow this convention and we use replacements for those."""
+
+ # a2enmod name replacement to apache2ctl -M names
+ text_workarounds = [
+ ('shib', 'mod_shib'),
+ ('shib2', 'mod_shib'),
+ ('evasive', 'evasive20_module'),
+ ]
+
+ # re expressions to extract subparts of names
+ re_workarounds = [
+ ('php', r'^(php\d)\.'),
+ ]
+
+ for a2enmod_spelling, module_name in text_workarounds:
+ if a2enmod_spelling in name:
+ return module_name
+
+ for search, reexpr in re_workarounds:
+ if search in name:
+ try:
+ rematch = re.search(reexpr, name)
+ return rematch.group(1) + '_module'
+ except AttributeError:
+ pass
+
+ return name + '_module'
+
+
+def _set_state(module, state):
+ name = module.params['name']
+ force = module.params['force']
+
+ want_enabled = state == 'present'
+ state_string = {'present': 'enabled', 'absent': 'disabled'}[state]
+ a2mod_binary = {'present': 'a2enmod', 'absent': 'a2dismod'}[state]
+ success_msg = "Module %s %s" % (name, state_string)
+
+ if _module_is_enabled(module) != want_enabled:
+ if module.check_mode:
+ module.exit_json(changed=True,
+ result=success_msg,
+ warnings=module.warnings)
+
+ a2mod_binary = module.get_bin_path(a2mod_binary)
+ if a2mod_binary is None:
+ module.fail_json(msg="%s not found. Perhaps this system does not use %s to manage apache" % (a2mod_binary, a2mod_binary))
+
+ if not want_enabled and force:
+ # force exists only for a2dismod on debian
+ a2mod_binary += ' -f'
+
+ result, stdout, stderr = module.run_command("%s %s" % (a2mod_binary, name))
+
+ if _module_is_enabled(module) == want_enabled:
+ module.exit_json(changed=True,
+ result=success_msg,
+ warnings=module.warnings)
+ else:
+ msg = (
+ 'Failed to set module {name} to {state}:\n'
+ '{stdout}\n'
+ 'Maybe the module identifier ({identifier}) was guessed incorrectly.'
+ 'Consider setting the "identifier" option.'
+ ).format(
+ name=name,
+ state=state_string,
+ stdout=stdout,
+ identifier=module.params['identifier']
+ )
+ module.fail_json(msg=msg,
+ rc=result,
+ stdout=stdout,
+ stderr=stderr)
+ else:
+ module.exit_json(changed=False,
+ result=success_msg,
+ warnings=module.warnings)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ identifier=dict(required=False, type='str'),
+ force=dict(required=False, type='bool', default=False),
+ state=dict(default='present', choices=['absent', 'present']),
+ ignore_configcheck=dict(required=False, type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+
+ module.warnings = []
+
+ name = module.params['name']
+ if name == 'cgi' and _run_threaded(module):
+ module.fail_json(msg="Your MPM seems to be threaded. No automatic actions on module %s possible." % name)
+
+ if not module.params['identifier']:
+ module.params['identifier'] = create_apache_identifier(module.params['name'])
+
+ if module.params['state'] in ['present', 'absent']:
+ _set_state(module, module.params['state'])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/deploy_helper.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/deploy_helper.py
new file mode 100644
index 00000000..641cc1d4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/deploy_helper.py
@@ -0,0 +1,524 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Jasper N. Brouwer <jasper@nerdsweide.nl>
+# (c) 2014, Ramon de la Fuente <ramon@delafuente.nl>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: deploy_helper
+author: "Ramon de la Fuente (@ramondelafuente)"
+short_description: Manages some of the steps common in deploying projects.
+description:
+ - The Deploy Helper manages some of the steps common in deploying software.
+ It creates a folder structure, manages a symlink for the current release
+ and cleans up old releases.
+ - "Running it with the C(state=query) or C(state=present) will return the C(deploy_helper) fact.
+ C(project_path), whatever you set in the path parameter,
+ C(current_path), the path to the symlink that points to the active release,
+ C(releases_path), the path to the folder to keep releases in,
+ C(shared_path), the path to the folder to keep shared resources in,
+ C(unfinished_filename), the file to check for to recognize unfinished builds,
+ C(previous_release), the release the 'current' symlink is pointing to,
+ C(previous_release_path), the full path to the 'current' symlink target,
+ C(new_release), either the 'release' parameter or a generated timestamp,
+ C(new_release_path), the path to the new release folder (not created by the module)."
+
+options:
+ path:
+ type: path
+ required: True
+ aliases: ['dest']
+ description:
+ - the root path of the project. Alias I(dest).
+ Returned in the C(deploy_helper.project_path) fact.
+
+ state:
+ type: str
+ description:
+ - the state of the project.
+ C(query) will only gather facts,
+ C(present) will create the project I(root) folder, and in it the I(releases) and I(shared) folders,
+ C(finalize) will remove the unfinished_filename file, create a symlink to the newly
+ deployed release and optionally clean old releases,
+ C(clean) will remove failed & old releases,
+ C(absent) will remove the project folder (synonymous to the M(ansible.builtin.file) module with C(state=absent))
+ choices: [ present, finalize, absent, clean, query ]
+ default: present
+
+ release:
+ type: str
+ description:
+ - the release version that is being deployed. Defaults to a timestamp format %Y%m%d%H%M%S (i.e. '20141119223359').
+ This parameter is optional during C(state=present), but needs to be set explicitly for C(state=finalize).
+ You can use the generated fact C(release={{ deploy_helper.new_release }}).
+
+ releases_path:
+ type: str
+ description:
+ - the name of the folder that will hold the releases. This can be relative to C(path) or absolute.
+ Returned in the C(deploy_helper.releases_path) fact.
+ default: releases
+
+ shared_path:
+ type: path
+ description:
+ - the name of the folder that will hold the shared resources. This can be relative to C(path) or absolute.
+ If this is set to an empty string, no shared folder will be created.
+ Returned in the C(deploy_helper.shared_path) fact.
+ default: shared
+
+ current_path:
+ type: path
+ description:
+ - the name of the symlink that is created when the deploy is finalized. Used in C(finalize) and C(clean).
+ Returned in the C(deploy_helper.current_path) fact.
+ default: current
+
+ unfinished_filename:
+ type: str
+ description:
+ - the name of the file that indicates a deploy has not finished. All folders in the releases_path that
+ contain this file will be deleted on C(state=finalize) with clean=True, or C(state=clean). This file is
+ automatically deleted from the I(new_release_path) during C(state=finalize).
+ default: DEPLOY_UNFINISHED
+
+ clean:
+ description:
+ - Whether to run the clean procedure in case of C(state=finalize).
+ type: bool
+ default: 'yes'
+
+ keep_releases:
+ type: int
+ description:
+ - the number of old releases to keep when cleaning. Used in C(finalize) and C(clean). Any unfinished builds
+ will be deleted first, so only correct releases will count. The current version will not count.
+ default: 5
+
+notes:
+ - Facts are only returned for C(state=query) and C(state=present). If you use both, you should pass any overridden
+ parameters to both calls, otherwise the second call will overwrite the facts of the first one.
+ - When using C(state=clean), the releases are ordered by I(creation date). You should be able to switch to a
+ new naming strategy without problems.
+ - Because of the default behaviour of generating the I(new_release) fact, this module will not be idempotent
+ unless you pass your own release name with C(release). Due to the nature of deploying software, this should not
+ be much of a problem.
+extends_documentation_fragment: files
+'''
+
+EXAMPLES = '''
+
+# General explanation, starting with an example folder structure for a project:
+
+# root:
+# releases:
+# - 20140415234508
+# - 20140415235146
+# - 20140416082818
+#
+# shared:
+# - sessions
+# - uploads
+#
+# current: releases/20140416082818
+
+
+# The 'releases' folder holds all the available releases. A release is a complete build of the application being
+# deployed. This can be a clone of a repository for example, or a sync of a local folder on your filesystem.
+# Having timestamped folders is one way of having distinct releases, but you could choose your own strategy like
+# git tags or commit hashes.
+#
+# During a deploy, a new folder should be created in the releases folder and any build steps required should be
+# performed. Once the new build is ready, the deploy procedure is 'finalized' by replacing the 'current' symlink
+# with a link to this build.
+#
+# The 'shared' folder holds any resource that is shared between releases. Examples of this are web-server
+# session files, or files uploaded by users of your application. It's quite common to have symlinks from a release
+# folder pointing to a shared/subfolder, and creating these links would be automated as part of the build steps.
+#
+# The 'current' symlink points to one of the releases. Probably the latest one, unless a deploy is in progress.
+# The web-server's root for the project will go through this symlink, so the 'downtime' when switching to a new
+# release is reduced to the time it takes to switch the link.
+#
+# To distinguish between successful builds and unfinished ones, a file can be placed in the folder of the release
+# that is currently in progress. The existence of this file will mark it as unfinished, and allow an automated
+# procedure to remove it during cleanup.
+
+
+# Typical usage
+- name: Initialize the deploy root and gather facts
+ community.general.deploy_helper:
+ path: /path/to/root
+- name: Clone the project to the new release folder
+ ansible.builtin.git:
+ repo: ansible.builtin.git://foosball.example.org/path/to/repo.git
+ dest: '{{ deploy_helper.new_release_path }}'
+ version: v1.1.1
+- name: Add an unfinished file, to allow cleanup on successful finalize
+ ansible.builtin.file:
+ path: '{{ deploy_helper.new_release_path }}/{{ deploy_helper.unfinished_filename }}'
+ state: touch
+- name: Perform some build steps, like running your dependency manager for example
+ composer:
+ command: install
+ working_dir: '{{ deploy_helper.new_release_path }}'
+- name: Create some folders in the shared folder
+ ansible.builtin.file:
+ path: '{{ deploy_helper.shared_path }}/{{ item }}'
+ state: directory
+ with_items:
+ - sessions
+ - uploads
+- name: Add symlinks from the new release to the shared folder
+ ansible.builtin.file:
+ path: '{{ deploy_helper.new_release_path }}/{{ item.path }}'
+ src: '{{ deploy_helper.shared_path }}/{{ item.src }}'
+ state: link
+ with_items:
+ - path: app/sessions
+ src: sessions
+ - path: web/uploads
+ src: uploads
+- name: Finalize the deploy, removing the unfinished file and switching the symlink
+ community.general.deploy_helper:
+ path: /path/to/root
+ release: '{{ deploy_helper.new_release }}'
+ state: finalize
+
+# Retrieving facts before running a deploy
+- name: Run 'state=query' to gather facts without changing anything
+ community.general.deploy_helper:
+ path: /path/to/root
+ state: query
+# Remember to set the 'release' parameter when you actually call 'state=present' later
+- name: Initialize the deploy root
+ community.general.deploy_helper:
+ path: /path/to/root
+ release: '{{ deploy_helper.new_release }}'
+ state: present
+
+# all paths can be absolute or relative (to the 'path' parameter)
+- community.general.deploy_helper:
+ path: /path/to/root
+ releases_path: /var/www/project/releases
+ shared_path: /var/www/shared
+ current_path: /var/www/active
+
+# Using your own naming strategy for releases (a version tag in this case):
+- community.general.deploy_helper:
+ path: /path/to/root
+ release: v1.1.1
+ state: present
+- community.general.deploy_helper:
+ path: /path/to/root
+ release: '{{ deploy_helper.new_release }}'
+ state: finalize
+
+# Using a different unfinished_filename:
+- community.general.deploy_helper:
+ path: /path/to/root
+ unfinished_filename: README.md
+ release: '{{ deploy_helper.new_release }}'
+ state: finalize
+
+# Postponing the cleanup of older builds:
+- community.general.deploy_helper:
+ path: /path/to/root
+ release: '{{ deploy_helper.new_release }}'
+ state: finalize
+ clean: False
+- community.general.deploy_helper:
+ path: /path/to/root
+ state: clean
+# Or running the cleanup ahead of the new deploy
+- community.general.deploy_helper:
+ path: /path/to/root
+ state: clean
+- community.general.deploy_helper:
+ path: /path/to/root
+ state: present
+
+# Keeping more old releases:
+- community.general.deploy_helper:
+ path: /path/to/root
+ release: '{{ deploy_helper.new_release }}'
+ state: finalize
+ keep_releases: 10
+# Or, if you use 'clean=false' on finalize:
+- community.general.deploy_helper:
+ path: /path/to/root
+ state: clean
+ keep_releases: 10
+
+# Removing the entire project root folder
+- community.general.deploy_helper:
+ path: /path/to/root
+ state: absent
+
+# Debugging the facts returned by the module
+- community.general.deploy_helper:
+ path: /path/to/root
+- ansible.builtin.debug:
+ var: deploy_helper
+'''
+import os
+import shutil
+import time
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+class DeployHelper(object):
+
+ def __init__(self, module):
+ self.module = module
+ self.file_args = module.load_file_common_arguments(module.params)
+
+ self.clean = module.params['clean']
+ self.current_path = module.params['current_path']
+ self.keep_releases = module.params['keep_releases']
+ self.path = module.params['path']
+ self.release = module.params['release']
+ self.releases_path = module.params['releases_path']
+ self.shared_path = module.params['shared_path']
+ self.state = module.params['state']
+ self.unfinished_filename = module.params['unfinished_filename']
+
+ def gather_facts(self):
+ current_path = os.path.join(self.path, self.current_path)
+ releases_path = os.path.join(self.path, self.releases_path)
+ if self.shared_path:
+ shared_path = os.path.join(self.path, self.shared_path)
+ else:
+ shared_path = None
+
+ previous_release, previous_release_path = self._get_last_release(current_path)
+
+ if not self.release and (self.state == 'query' or self.state == 'present'):
+ self.release = time.strftime("%Y%m%d%H%M%S")
+
+ if self.release:
+ new_release_path = os.path.join(releases_path, self.release)
+ else:
+ new_release_path = None
+
+ return {
+ 'project_path': self.path,
+ 'current_path': current_path,
+ 'releases_path': releases_path,
+ 'shared_path': shared_path,
+ 'previous_release': previous_release,
+ 'previous_release_path': previous_release_path,
+ 'new_release': self.release,
+ 'new_release_path': new_release_path,
+ 'unfinished_filename': self.unfinished_filename
+ }
+
+ def delete_path(self, path):
+ if not os.path.lexists(path):
+ return False
+
+ if not os.path.isdir(path):
+ self.module.fail_json(msg="%s exists but is not a directory" % path)
+
+ if not self.module.check_mode:
+ try:
+ shutil.rmtree(path, ignore_errors=False)
+ except Exception as e:
+ self.module.fail_json(msg="rmtree failed: %s" % to_native(e), exception=traceback.format_exc())
+
+ return True
+
+ def create_path(self, path):
+ changed = False
+
+ if not os.path.lexists(path):
+ changed = True
+ if not self.module.check_mode:
+ os.makedirs(path)
+
+ elif not os.path.isdir(path):
+ self.module.fail_json(msg="%s exists but is not a directory" % path)
+
+ changed += self.module.set_directory_attributes_if_different(self._get_file_args(path), changed)
+
+ return changed
+
+ def check_link(self, path):
+ if os.path.lexists(path):
+ if not os.path.islink(path):
+ self.module.fail_json(msg="%s exists but is not a symbolic link" % path)
+
+ def create_link(self, source, link_name):
+ changed = False
+
+ if os.path.islink(link_name):
+ norm_link = os.path.normpath(os.path.realpath(link_name))
+ norm_source = os.path.normpath(os.path.realpath(source))
+ if norm_link == norm_source:
+ changed = False
+ else:
+ changed = True
+ if not self.module.check_mode:
+ if not os.path.lexists(source):
+ self.module.fail_json(msg="the symlink target %s doesn't exists" % source)
+ tmp_link_name = link_name + '.' + self.unfinished_filename
+ if os.path.islink(tmp_link_name):
+ os.unlink(tmp_link_name)
+ os.symlink(source, tmp_link_name)
+ os.rename(tmp_link_name, link_name)
+ else:
+ changed = True
+ if not self.module.check_mode:
+ os.symlink(source, link_name)
+
+ return changed
+
+ def remove_unfinished_file(self, new_release_path):
+ changed = False
+ unfinished_file_path = os.path.join(new_release_path, self.unfinished_filename)
+ if os.path.lexists(unfinished_file_path):
+ changed = True
+ if not self.module.check_mode:
+ os.remove(unfinished_file_path)
+
+ return changed
+
+ def remove_unfinished_builds(self, releases_path):
+ changes = 0
+
+ for release in os.listdir(releases_path):
+ if os.path.isfile(os.path.join(releases_path, release, self.unfinished_filename)):
+ if self.module.check_mode:
+ changes += 1
+ else:
+ changes += self.delete_path(os.path.join(releases_path, release))
+
+ return changes
+
+ def remove_unfinished_link(self, path):
+ changed = False
+
+ tmp_link_name = os.path.join(path, self.release + '.' + self.unfinished_filename)
+ if not self.module.check_mode and os.path.exists(tmp_link_name):
+ changed = True
+ os.remove(tmp_link_name)
+
+ return changed
+
+ def cleanup(self, releases_path, reserve_version):
+ changes = 0
+
+ if os.path.lexists(releases_path):
+ releases = [f for f in os.listdir(releases_path) if os.path.isdir(os.path.join(releases_path, f))]
+ try:
+ releases.remove(reserve_version)
+ except ValueError:
+ pass
+
+ if not self.module.check_mode:
+ releases.sort(key=lambda x: os.path.getctime(os.path.join(releases_path, x)), reverse=True)
+ for release in releases[self.keep_releases:]:
+ changes += self.delete_path(os.path.join(releases_path, release))
+ elif len(releases) > self.keep_releases:
+ changes += (len(releases) - self.keep_releases)
+
+ return changes
+
+ def _get_file_args(self, path):
+ file_args = self.file_args.copy()
+ file_args['path'] = path
+ return file_args
+
+ def _get_last_release(self, current_path):
+ previous_release = None
+ previous_release_path = None
+
+ if os.path.lexists(current_path):
+ previous_release_path = os.path.realpath(current_path)
+ previous_release = os.path.basename(previous_release_path)
+
+ return previous_release, previous_release_path
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(aliases=['dest'], required=True, type='path'),
+ release=dict(required=False, type='str', default=None),
+ releases_path=dict(required=False, type='str', default='releases'),
+ shared_path=dict(required=False, type='path', default='shared'),
+ current_path=dict(required=False, type='path', default='current'),
+ keep_releases=dict(required=False, type='int', default=5),
+ clean=dict(required=False, type='bool', default=True),
+ unfinished_filename=dict(required=False, type='str', default='DEPLOY_UNFINISHED'),
+ state=dict(required=False, choices=['present', 'absent', 'clean', 'finalize', 'query'], default='present')
+ ),
+ add_file_common_args=True,
+ supports_check_mode=True
+ )
+
+ deploy_helper = DeployHelper(module)
+ facts = deploy_helper.gather_facts()
+
+ result = {
+ 'state': deploy_helper.state
+ }
+
+ changes = 0
+
+ if deploy_helper.state == 'query':
+ result['ansible_facts'] = {'deploy_helper': facts}
+
+ elif deploy_helper.state == 'present':
+ deploy_helper.check_link(facts['current_path'])
+ changes += deploy_helper.create_path(facts['project_path'])
+ changes += deploy_helper.create_path(facts['releases_path'])
+ if deploy_helper.shared_path:
+ changes += deploy_helper.create_path(facts['shared_path'])
+
+ result['ansible_facts'] = {'deploy_helper': facts}
+
+ elif deploy_helper.state == 'finalize':
+ if not deploy_helper.release:
+ module.fail_json(msg="'release' is a required parameter for state=finalize (try the 'deploy_helper.new_release' fact)")
+ if deploy_helper.keep_releases <= 0:
+ module.fail_json(msg="'keep_releases' should be at least 1")
+
+ changes += deploy_helper.remove_unfinished_file(facts['new_release_path'])
+ changes += deploy_helper.create_link(facts['new_release_path'], facts['current_path'])
+ if deploy_helper.clean:
+ changes += deploy_helper.remove_unfinished_link(facts['project_path'])
+ changes += deploy_helper.remove_unfinished_builds(facts['releases_path'])
+ changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release'])
+
+ elif deploy_helper.state == 'clean':
+ changes += deploy_helper.remove_unfinished_link(facts['project_path'])
+ changes += deploy_helper.remove_unfinished_builds(facts['releases_path'])
+ changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release'])
+
+ elif deploy_helper.state == 'absent':
+ # destroy the facts
+ result['ansible_facts'] = {'deploy_helper': []}
+ changes += deploy_helper.delete_path(facts['project_path'])
+
+ if changes > 0:
+ result['changed'] = True
+ else:
+ result['changed'] = False
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/django_manage.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/django_manage.py
new file mode 100644
index 00000000..10161c04
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/django_manage.py
@@ -0,0 +1,347 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Scott Anderson <scottanderson42@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: django_manage
+short_description: Manages a Django application.
+description:
+ - Manages a Django application using the C(manage.py) application frontend to C(django-admin). With the
+ C(virtualenv) parameter, all management commands will be executed by the given C(virtualenv) installation.
+options:
+ command:
+ description:
+ - The name of the Django management command to run. Built in commands are C(cleanup), C(collectstatic),
+ C(flush), C(loaddata), C(migrate), C(syncdb), C(test), and C(validate).
+ - Other commands can be entered, but will fail if they're unknown to Django. Other commands that may
+ prompt for user input should be run with the C(--noinput) flag.
+ - The module will perform some basic parameter validation (when applicable) to the commands C(cleanup),
+ C(collectstatic), C(createcachetable), C(flush), C(loaddata), C(migrate), C(syncdb), C(test), and C(validate).
+ type: str
+ required: true
+ project_path:
+ description:
+ - The path to the root of the Django application where B(manage.py) lives.
+ type: path
+ required: true
+ aliases: [app_path, chdir]
+ settings:
+ description:
+ - The Python path to the application's settings module, such as C(myapp.settings).
+ type: path
+ required: false
+ pythonpath:
+ description:
+ - A directory to add to the Python path. Typically used to include the settings module if it is located
+ external to the application directory.
+ type: path
+ required: false
+ aliases: [python_path]
+ virtualenv:
+ description:
+ - An optional path to a I(virtualenv) installation to use while running the manage application.
+ type: path
+ aliases: [virtual_env]
+ apps:
+ description:
+ - A list of space-delimited apps to target. Used by the C(test) command.
+ type: str
+ required: false
+ cache_table:
+ description:
+ - The name of the table used for database-backed caching. Used by the C(createcachetable) command.
+ type: str
+ required: false
+ clear:
+ description:
+ - Clear the existing files before trying to copy or link the original file.
+ - Used only with the 'collectstatic' command. The C(--noinput) argument will be added automatically.
+ required: false
+ default: no
+ type: bool
+ database:
+ description:
+ - The database to target. Used by the C(createcachetable), C(flush), C(loaddata), C(syncdb),
+ and C(migrate) commands.
+ type: str
+ required: false
+ failfast:
+ description:
+ - Fail the command immediately if a test fails. Used by the C(test) command.
+ required: false
+ default: false
+ type: bool
+ aliases: [fail_fast]
+ fixtures:
+ description:
+ - A space-delimited list of fixture file names to load in the database. B(Required) by the C(loaddata) command.
+ type: str
+ required: false
+ skip:
+ description:
+ - Will skip over out-of-order missing migrations, you can only use this parameter with C(migrate) command.
+ required: false
+ type: bool
+ merge:
+ description:
+ - Will run out-of-order or missing migrations as they are not rollback migrations, you can only use this
+ parameter with C(migrate) command.
+ required: false
+ type: bool
+ link:
+ description:
+ - Will create links to the files instead of copying them, you can only use this parameter with
+ C(collectstatic) command.
+ required: false
+ type: bool
+ liveserver:
+ description:
+ - This parameter was implemented a long time ago in a galaxy far way. It probably relates to the
+ django-liveserver package, which is no longer updated.
+ - Hence, it will be considered DEPRECATED and should be removed in a future release.
+ type: str
+ required: false
+ aliases: [live_server]
+ testrunner:
+ description:
+ - "From the Django docs: Controls the test runner class that is used to execute tests."
+ - This parameter is passed as-is to C(manage.py).
+ type: str
+ required: false
+ aliases: [test_runner]
+notes:
+ - C(virtualenv) (U(http://www.virtualenv.org)) must be installed on the remote host if the virtualenv parameter
+ is specified.
+ - This module will create a virtualenv if the virtualenv parameter is specified and a virtualenv does not already
+ exist at the given location.
+ - This module assumes English error messages for the C(createcachetable) command to detect table existence,
+ unfortunately.
+ - To be able to use the C(migrate) command with django versions < 1.7, you must have C(south) installed and added
+ as an app in your settings.
+ - To be able to use the C(collectstatic) command, you must have enabled staticfiles in your settings.
+ - Your C(manage.py) application must be executable (rwxr-xr-x), and must have a valid shebang,
+ i.e. C(#!/usr/bin/env python), for invoking the appropriate Python interpreter.
+requirements: [ "virtualenv", "django" ]
+author: "Scott Anderson (@tastychutney)"
+'''
+
+EXAMPLES = """
+- name: Run cleanup on the application installed in django_dir
+ community.general.django_manage:
+ command: cleanup
+ project_path: "{{ django_dir }}"
+
+- name: Load the initial_data fixture into the application
+ community.general.django_manage:
+ command: loaddata
+ project_path: "{{ django_dir }}"
+ fixtures: "{{ initial_data }}"
+
+- name: Run syncdb on the application
+ community.general.django_manage:
+ command: syncdb
+ project_path: "{{ django_dir }}"
+ settings: "{{ settings_app_name }}"
+ pythonpath: "{{ settings_dir }}"
+ virtualenv: "{{ virtualenv_dir }}"
+
+- name: Run the SmokeTest test case from the main app. Useful for testing deploys
+ community.general.django_manage:
+ command: test
+ project_path: "{{ django_dir }}"
+ apps: main.SmokeTest
+
+- name: Create an initial superuser
+ community.general.django_manage:
+ command: "createsuperuser --noinput --username=admin --email=admin@example.com"
+ project_path: "{{ django_dir }}"
+"""
+
+import os
+import sys
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def _fail(module, cmd, out, err, **kwargs):
+ msg = ''
+ if out:
+ msg += "stdout: %s" % (out, )
+ if err:
+ msg += "\n:stderr: %s" % (err, )
+ module.fail_json(cmd=cmd, msg=msg, **kwargs)
+
+
+def _ensure_virtualenv(module):
+
+ venv_param = module.params['virtualenv']
+ if venv_param is None:
+ return
+
+ vbin = os.path.join(venv_param, 'bin')
+ activate = os.path.join(vbin, 'activate')
+
+ if not os.path.exists(activate):
+ virtualenv = module.get_bin_path('virtualenv', True)
+ vcmd = [virtualenv, venv_param]
+ rc, out_venv, err_venv = module.run_command(vcmd)
+ if rc != 0:
+ _fail(module, vcmd, out_venv, err_venv)
+
+ os.environ["PATH"] = "%s:%s" % (vbin, os.environ["PATH"])
+ os.environ["VIRTUAL_ENV"] = venv_param
+
+
+def createcachetable_check_changed(output):
+ return "already exists" not in output
+
+
+def flush_filter_output(line):
+ return "Installed" in line and "Installed 0 object" not in line
+
+
+def loaddata_filter_output(line):
+ return "Installed" in line and "Installed 0 object" not in line
+
+
+def syncdb_filter_output(line):
+ return ("Creating table " in line) \
+ or ("Installed" in line and "Installed 0 object" not in line)
+
+
+def migrate_filter_output(line):
+ return ("Migrating forwards " in line) \
+ or ("Installed" in line and "Installed 0 object" not in line) \
+ or ("Applying" in line)
+
+
+def collectstatic_filter_output(line):
+ return line and "0 static files" not in line
+
+
+def main():
+ command_allowed_param_map = dict(
+ cleanup=(),
+ createcachetable=('cache_table', 'database', ),
+ flush=('database', ),
+ loaddata=('database', 'fixtures', ),
+ syncdb=('database', ),
+ test=('failfast', 'testrunner', 'liveserver', 'apps', ),
+ validate=(),
+ migrate=('apps', 'skip', 'merge', 'database',),
+ collectstatic=('clear', 'link', ),
+ )
+
+ command_required_param_map = dict(
+ loaddata=('fixtures', ),
+ )
+
+ # forces --noinput on every command that needs it
+ noinput_commands = (
+ 'flush',
+ 'syncdb',
+ 'migrate',
+ 'test',
+ 'collectstatic',
+ )
+
+ # These params are allowed for certain commands only
+ specific_params = ('apps', 'clear', 'database', 'failfast', 'fixtures', 'liveserver', 'testrunner')
+
+ # These params are automatically added to the command if present
+ general_params = ('settings', 'pythonpath', 'database',)
+ specific_boolean_params = ('clear', 'failfast', 'skip', 'merge', 'link')
+ end_of_command_params = ('apps', 'cache_table', 'fixtures')
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ command=dict(required=True, type='str'),
+ project_path=dict(required=True, type='path', aliases=['app_path', 'chdir']),
+ settings=dict(default=None, required=False, type='path'),
+ pythonpath=dict(default=None, required=False, type='path', aliases=['python_path']),
+ virtualenv=dict(default=None, required=False, type='path', aliases=['virtual_env']),
+
+ apps=dict(default=None, required=False),
+ cache_table=dict(default=None, required=False, type='str'),
+ clear=dict(default=False, required=False, type='bool'),
+ database=dict(default=None, required=False, type='str'),
+ failfast=dict(default=False, required=False, type='bool', aliases=['fail_fast']),
+ fixtures=dict(default=None, required=False, type='str'),
+ liveserver=dict(default=None, required=False, type='str', aliases=['live_server'],
+ removed_in_version='3.0.0', removed_from_collection='community.general'),
+ testrunner=dict(default=None, required=False, type='str', aliases=['test_runner']),
+ skip=dict(default=None, required=False, type='bool'),
+ merge=dict(default=None, required=False, type='bool'),
+ link=dict(default=None, required=False, type='bool'),
+ ),
+ )
+
+ command = module.params['command']
+ project_path = module.params['project_path']
+ virtualenv = module.params['virtualenv']
+
+ for param in specific_params:
+ value = module.params[param]
+ if param in specific_boolean_params:
+ value = module.boolean(value)
+ if value and param not in command_allowed_param_map[command]:
+ module.fail_json(msg='%s param is incompatible with command=%s' % (param, command))
+
+ for param in command_required_param_map.get(command, ()):
+ if not module.params[param]:
+ module.fail_json(msg='%s param is required for command=%s' % (param, command))
+
+ _ensure_virtualenv(module)
+
+ cmd = "./manage.py %s" % (command, )
+
+ if command in noinput_commands:
+ cmd = '%s --noinput' % cmd
+
+ for param in general_params:
+ if module.params[param]:
+ cmd = '%s --%s=%s' % (cmd, param, module.params[param])
+
+ for param in specific_boolean_params:
+ if module.boolean(module.params[param]):
+ cmd = '%s --%s' % (cmd, param)
+
+ # these params always get tacked on the end of the command
+ for param in end_of_command_params:
+ if module.params[param]:
+ cmd = '%s %s' % (cmd, module.params[param])
+
+ rc, out, err = module.run_command(cmd, cwd=project_path)
+ if rc != 0:
+ if command == 'createcachetable' and 'table' in err and 'already exists' in err:
+ out = 'already exists.'
+ else:
+ if "Unknown command:" in err:
+ _fail(module, cmd, err, "Unknown django command: %s" % command)
+ _fail(module, cmd, out, err, path=os.environ["PATH"], syspath=sys.path)
+
+ changed = False
+
+ lines = out.split('\n')
+ filt = globals().get(command + "_filter_output", None)
+ if filt:
+ filtered_output = list(filter(filt, lines))
+ if len(filtered_output):
+ changed = True
+ check_changed = globals().get("{0}_check_changed".format(command), None)
+ if check_changed:
+ changed = check_changed(out)
+
+ module.exit_json(changed=changed, out=out, cmd=cmd, app_path=project_path, project_path=project_path,
+ virtualenv=virtualenv, settings=module.params['settings'], pythonpath=module.params['pythonpath'])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/ejabberd_user.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/ejabberd_user.py
new file mode 100644
index 00000000..be63c920
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/ejabberd_user.py
@@ -0,0 +1,218 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2013, Peter Sprygada <sprygada@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ejabberd_user
+author: "Peter Sprygada (@privateip)"
+short_description: Manages users for ejabberd servers
+requirements:
+ - ejabberd with mod_admin_extra
+description:
+ - This module provides user management for ejabberd servers
+options:
+ username:
+ type: str
+ description:
+ - the name of the user to manage
+ required: true
+ host:
+ type: str
+ description:
+ - the ejabberd host associated with this username
+ required: true
+ password:
+ type: str
+ description:
+ - the password to assign to the username
+ required: false
+ logging:
+ description:
+ - enables or disables the local syslog facility for this module
+ required: false
+ default: false
+ type: bool
+ state:
+ type: str
+ description:
+ - describe the desired state of the user to be managed
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent' ]
+notes:
+ - Password parameter is required for state == present only
+ - Passwords must be stored in clear text for this release
+ - The ejabberd configuration file must include mod_admin_extra as a module.
+'''
+EXAMPLES = '''
+# Example playbook entries using the ejabberd_user module to manage users state.
+
+- name: Create a user if it does not exist
+ community.general.ejabberd_user:
+ username: test
+ host: server
+ password: password
+
+- name: Delete a user if it exists
+ community.general.ejabberd_user:
+ username: test
+ host: server
+ state: absent
+'''
+
+import syslog
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class EjabberdUserException(Exception):
+ """ Base exception for EjabberdUser class object """
+ pass
+
+
+class EjabberdUser(object):
+ """ This object represents a user resource for an ejabberd server. The
+ object manages user creation and deletion using ejabberdctl. The following
+ commands are currently supported:
+ * ejabberdctl register
+ * ejabberdctl deregister
+ """
+
+ def __init__(self, module):
+ self.module = module
+ self.logging = module.params.get('logging')
+ self.state = module.params.get('state')
+ self.host = module.params.get('host')
+ self.user = module.params.get('username')
+ self.pwd = module.params.get('password')
+
+ @property
+ def changed(self):
+ """ This method will check the current user and see if the password has
+ changed. It will return True if the user does not match the supplied
+ credentials and False if it does not
+ """
+ try:
+ options = [self.user, self.host, self.pwd]
+ (rc, out, err) = self.run_command('check_password', options)
+ except EjabberdUserException:
+ (rc, out, err) = (1, None, "required attribute(s) missing")
+ return rc
+
+ @property
+ def exists(self):
+ """ This method will check to see if the supplied username exists for
+ host specified. If the user exists True is returned, otherwise False
+ is returned
+ """
+ try:
+ options = [self.user, self.host]
+ (rc, out, err) = self.run_command('check_account', options)
+ except EjabberdUserException:
+ (rc, out, err) = (1, None, "required attribute(s) missing")
+ return not bool(int(rc))
+
+ def log(self, entry):
+ """ This method will log information to the local syslog facility """
+ if self.logging:
+ syslog.openlog('ansible-%s' % self.module._name)
+ syslog.syslog(syslog.LOG_NOTICE, entry)
+
+ def run_command(self, cmd, options):
+ """ This method will run the any command specified and return the
+ returns using the Ansible common module
+ """
+ if not all(options):
+ raise EjabberdUserException
+
+ cmd = 'ejabberdctl %s ' % cmd
+ cmd += " ".join(options)
+ self.log('command: %s' % cmd)
+ return self.module.run_command(cmd.split())
+
+ def update(self):
+ """ The update method will update the credentials for the user provided
+ """
+ try:
+ options = [self.user, self.host, self.pwd]
+ (rc, out, err) = self.run_command('change_password', options)
+ except EjabberdUserException:
+ (rc, out, err) = (1, None, "required attribute(s) missing")
+ return (rc, out, err)
+
+ def create(self):
+ """ The create method will create a new user on the host with the
+ password provided
+ """
+ try:
+ options = [self.user, self.host, self.pwd]
+ (rc, out, err) = self.run_command('register', options)
+ except EjabberdUserException:
+ (rc, out, err) = (1, None, "required attribute(s) missing")
+ return (rc, out, err)
+
+ def delete(self):
+ """ The delete method will delete the user from the host
+ """
+ try:
+ options = [self.user, self.host]
+ (rc, out, err) = self.run_command('unregister', options)
+ except EjabberdUserException:
+ (rc, out, err) = (1, None, "required attribute(s) missing")
+ return (rc, out, err)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(required=True, type='str'),
+ username=dict(required=True, type='str'),
+ password=dict(default=None, type='str', no_log=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ logging=dict(default=False, type='bool')
+ ),
+ supports_check_mode=True
+ )
+
+ obj = EjabberdUser(module)
+
+ rc = None
+ result = dict(changed=False)
+
+ if obj.state == 'absent':
+ if obj.exists:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = obj.delete()
+ if rc != 0:
+ module.fail_json(msg=err, rc=rc)
+
+ elif obj.state == 'present':
+ if not obj.exists:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = obj.create()
+ elif obj.changed:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = obj.update()
+ if rc is not None and rc != 0:
+ module.fail_json(msg=err, rc=rc)
+
+ if rc is None:
+ result['changed'] = False
+ else:
+ result['changed'] = True
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/gunicorn.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/gunicorn.py
new file mode 100644
index 00000000..57030556
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/gunicorn.py
@@ -0,0 +1,230 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Alejandro Gomez <alexgomez2202@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gunicorn
+short_description: Run gunicorn with various settings.
+description:
+ - Starts gunicorn with the parameters specified. Common settings for gunicorn
+ configuration are supported. For additional configuration use a config file
+ See U(https://gunicorn-docs.readthedocs.io/en/latest/settings.html) for more
+ options. It's recommended to always use the chdir option to avoid problems
+ with the location of the app.
+requirements: [gunicorn]
+author:
+ - "Alejandro Gomez (@agmezr)"
+options:
+ app:
+ type: str
+ required: true
+ aliases: ['name']
+ description:
+ - The app module. A name refers to a WSGI callable that should be found in the specified module.
+ venv:
+ type: path
+ aliases: ['virtualenv']
+ description:
+ - 'Path to the virtualenv directory.'
+ config:
+ type: path
+ description:
+ - 'Path to the gunicorn configuration file.'
+ aliases: ['conf']
+ chdir:
+ type: path
+ description:
+ - 'Chdir to specified directory before apps loading.'
+ pid:
+ type: path
+ description:
+ - 'A filename to use for the PID file. If not set and not found on the configuration file a tmp
+ pid file will be created to check a successful run of gunicorn.'
+ worker:
+ type: str
+ choices: ['sync', 'eventlet', 'gevent', 'tornado ', 'gthread', 'gaiohttp']
+ description:
+ - 'The type of workers to use. The default class (sync) should handle most "normal" types of workloads.'
+ user:
+ type: str
+ description:
+ - 'Switch worker processes to run as this user.'
+notes:
+ - If not specified on config file, a temporary error log will be created on /tmp dir.
+ Please make sure you have write access in /tmp dir. Not needed but will help you to
+ identify any problem with configuration.
+'''
+
+EXAMPLES = '''
+- name: Simple gunicorn run example
+ community.general.gunicorn:
+ app: 'wsgi'
+ chdir: '/workspace/example'
+
+- name: Run gunicorn on a virtualenv
+ community.general.gunicorn:
+ app: 'wsgi'
+ chdir: '/workspace/example'
+ venv: '/workspace/example/venv'
+
+- name: Run gunicorn with a config file
+ community.general.gunicorn:
+ app: 'wsgi'
+ chdir: '/workspace/example'
+ conf: '/workspace/example/gunicorn.cfg'
+
+- name: Run gunicorn as ansible user with specified pid and config file
+ community.general.gunicorn:
+ app: 'wsgi'
+ chdir: '/workspace/example'
+ conf: '/workspace/example/gunicorn.cfg'
+ venv: '/workspace/example/venv'
+ pid: '/workspace/example/gunicorn.pid'
+ user: 'ansible'
+'''
+
+RETURN = '''
+gunicorn:
+ description: process id of gunicorn
+ returned: changed
+ type: str
+ sample: "1234"
+'''
+
+import os
+import time
+
+# import ansible utils
+from ansible.module_utils.basic import AnsibleModule
+
+
+def search_existing_config(config, option):
+ ''' search in config file for specified option '''
+ if config and os.path.isfile(config):
+ data_config = None
+ with open(config, 'r') as f:
+ for line in f:
+ if option in line:
+ return line
+ return None
+
+
+def remove_tmp_file(file_path):
+ ''' remove temporary files '''
+ if os.path.isfile(file_path):
+ os.remove(file_path)
+
+
+def main():
+
+ # available gunicorn options on module
+ gunicorn_options = {
+ 'config': '-c',
+ 'chdir': '--chdir',
+ 'worker': '-k',
+ 'user': '-u',
+ }
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ app=dict(required=True, type='str', aliases=['name']),
+ venv=dict(required=False, type='path', default=None, aliases=['virtualenv']),
+ config=dict(required=False, default=None, type='path', aliases=['conf']),
+ chdir=dict(required=False, type='path', default=None),
+ pid=dict(required=False, type='path', default=None),
+ user=dict(required=False, type='str'),
+ worker=dict(required=False,
+ type='str',
+ choices=['sync', 'eventlet', 'gevent', 'tornado ', 'gthread', 'gaiohttp']
+ ),
+ )
+ )
+
+ # temporary files in case no option provided
+ tmp_error_log = os.path.join(module.tmpdir, 'gunicorn.temp.error.log')
+ tmp_pid_file = os.path.join(module.tmpdir, 'gunicorn.temp.pid')
+
+ # remove temp file if exists
+ remove_tmp_file(tmp_pid_file)
+ remove_tmp_file(tmp_error_log)
+
+ # obtain app name and venv
+ params = module.params
+ app = params['app']
+ venv = params['venv']
+ pid = params['pid']
+
+ # use venv path if exists
+ if venv:
+ gunicorn_command = "/".join((venv, 'bin', 'gunicorn'))
+ else:
+ gunicorn_command = 'gunicorn'
+
+ # to daemonize the process
+ options = ["-D"]
+
+ # fill options
+ for option in gunicorn_options:
+ param = params[option]
+ if param:
+ options.append(gunicorn_options[option])
+ options.append(param)
+
+ error_log = search_existing_config(params['config'], 'errorlog')
+ if not error_log:
+ # place error log somewhere in case of fail
+ options.append("--error-logfile")
+ options.append(tmp_error_log)
+
+ pid_file = search_existing_config(params['config'], 'pid')
+ if not params['pid'] and not pid_file:
+ pid = tmp_pid_file
+
+ # add option for pid file if not found on config file
+ if not pid_file:
+ options.append('--pid')
+ options.append(pid)
+
+ # put args together
+ args = [gunicorn_command] + options + [app]
+ rc, out, err = module.run_command(args, use_unsafe_shell=False, encoding=None)
+
+ if not err:
+ # wait for gunicorn to dump to log
+ time.sleep(0.5)
+ if os.path.isfile(pid):
+ with open(pid, 'r') as f:
+ result = f.readline().strip()
+
+ if not params['pid']:
+ os.remove(pid)
+
+ module.exit_json(changed=True, pid=result, debug=" ".join(args))
+ else:
+ # if user defined own error log, check that
+ if error_log:
+ error = 'Please check your {0}'.format(error_log.strip())
+ else:
+ if os.path.isfile(tmp_error_log):
+ with open(tmp_error_log, 'r') as f:
+ error = f.read()
+ # delete tmp log
+ os.remove(tmp_error_log)
+ else:
+ error = "Log not found"
+
+ module.fail_json(msg='Failed to start gunicorn. {0}'.format(error), error=err)
+
+ else:
+ module.fail_json(msg='Failed to start gunicorn {0}'.format(err), error=err)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/htpasswd.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/htpasswd.py
new file mode 100644
index 00000000..6ff04131
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/htpasswd.py
@@ -0,0 +1,274 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Nimbis Services, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: htpasswd
+short_description: manage user files for basic authentication
+description:
+ - Add and remove username/password entries in a password file using htpasswd.
+ - This is used by web servers such as Apache and Nginx for basic authentication.
+options:
+ path:
+ type: path
+ required: true
+ aliases: [ dest, destfile ]
+ description:
+ - Path to the file that contains the usernames and passwords
+ name:
+ type: str
+ required: true
+ aliases: [ username ]
+ description:
+ - User name to add or remove
+ password:
+ type: str
+ required: false
+ description:
+ - Password associated with user.
+ - Must be specified if user does not exist yet.
+ crypt_scheme:
+ type: str
+ required: false
+ default: "apr_md5_crypt"
+ description:
+ - Encryption scheme to be used. As well as the four choices listed
+ here, you can also use any other hash supported by passlib, such as
+ md5_crypt and sha256_crypt, which are linux passwd hashes. If you
+ do so the password file will not be compatible with Apache or Nginx
+ - 'Some of the available choices might be: C(apr_md5_crypt), C(des_crypt), C(ldap_sha1), C(plaintext)'
+ state:
+ type: str
+ required: false
+ choices: [ present, absent ]
+ default: "present"
+ description:
+ - Whether the user entry should be present or not
+ create:
+ required: false
+ type: bool
+ default: "yes"
+ description:
+ - Used with C(state=present). If specified, the file will be created
+ if it does not already exist. If set to "no", will fail if the
+ file does not exist
+notes:
+ - "This module depends on the I(passlib) Python library, which needs to be installed on all target systems."
+ - "On Debian, Ubuntu, or Fedora: install I(python-passlib)."
+ - "On RHEL or CentOS: Enable EPEL, then install I(python-passlib)."
+requirements: [ passlib>=1.6 ]
+author: "Ansible Core Team"
+extends_documentation_fragment: files
+'''
+
+EXAMPLES = """
+- name: Add a user to a password file and ensure permissions are set
+ community.general.htpasswd:
+ path: /etc/nginx/passwdfile
+ name: janedoe
+ password: '9s36?;fyNp'
+ owner: root
+ group: www-data
+ mode: 0640
+
+- name: Remove a user from a password file
+ community.general.htpasswd:
+ path: /etc/apache2/passwdfile
+ name: foobar
+ state: absent
+
+- name: Add a user to a password file suitable for use by libpam-pwdfile
+ community.general.htpasswd:
+ path: /etc/mail/passwords
+ name: alex
+ password: oedu2eGh
+ crypt_scheme: md5_crypt
+"""
+
+
+import os
+import tempfile
+import traceback
+from distutils.version import LooseVersion
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+PASSLIB_IMP_ERR = None
+try:
+ from passlib.apache import HtpasswdFile, htpasswd_context
+ from passlib.context import CryptContext
+ import passlib
+except ImportError:
+ PASSLIB_IMP_ERR = traceback.format_exc()
+ passlib_installed = False
+else:
+ passlib_installed = True
+
+apache_hashes = ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"]
+
+
+def create_missing_directories(dest):
+ destpath = os.path.dirname(dest)
+ if not os.path.exists(destpath):
+ os.makedirs(destpath)
+
+
+def present(dest, username, password, crypt_scheme, create, check_mode):
+ """ Ensures user is present
+
+ Returns (msg, changed) """
+ if crypt_scheme in apache_hashes:
+ context = htpasswd_context
+ else:
+ context = CryptContext(schemes=[crypt_scheme] + apache_hashes)
+ if not os.path.exists(dest):
+ if not create:
+ raise ValueError('Destination %s does not exist' % dest)
+ if check_mode:
+ return ("Create %s" % dest, True)
+ create_missing_directories(dest)
+ if LooseVersion(passlib.__version__) >= LooseVersion('1.6'):
+ ht = HtpasswdFile(dest, new=True, default_scheme=crypt_scheme, context=context)
+ else:
+ ht = HtpasswdFile(dest, autoload=False, default=crypt_scheme, context=context)
+ if getattr(ht, 'set_password', None):
+ ht.set_password(username, password)
+ else:
+ ht.update(username, password)
+ ht.save()
+ return ("Created %s and added %s" % (dest, username), True)
+ else:
+ if LooseVersion(passlib.__version__) >= LooseVersion('1.6'):
+ ht = HtpasswdFile(dest, new=False, default_scheme=crypt_scheme, context=context)
+ else:
+ ht = HtpasswdFile(dest, default=crypt_scheme, context=context)
+
+ found = None
+ if getattr(ht, 'check_password', None):
+ found = ht.check_password(username, password)
+ else:
+ found = ht.verify(username, password)
+
+ if found:
+ return ("%s already present" % username, False)
+ else:
+ if not check_mode:
+ if getattr(ht, 'set_password', None):
+ ht.set_password(username, password)
+ else:
+ ht.update(username, password)
+ ht.save()
+ return ("Add/update %s" % username, True)
+
+
+def absent(dest, username, check_mode):
+ """ Ensures user is absent
+
+ Returns (msg, changed) """
+ if LooseVersion(passlib.__version__) >= LooseVersion('1.6'):
+ ht = HtpasswdFile(dest, new=False)
+ else:
+ ht = HtpasswdFile(dest)
+
+ if username not in ht.users():
+ return ("%s not present" % username, False)
+ else:
+ if not check_mode:
+ ht.delete(username)
+ ht.save()
+ return ("Remove %s" % username, True)
+
+
+def check_file_attrs(module, changed, message):
+
+ file_args = module.load_file_common_arguments(module.params)
+ if module.set_fs_attributes_if_different(file_args, False):
+
+ if changed:
+ message += " and "
+ changed = True
+ message += "ownership, perms or SE linux context changed"
+
+ return message, changed
+
+
+def main():
+ arg_spec = dict(
+ path=dict(type='path', required=True, aliases=["dest", "destfile"]),
+ name=dict(type='str', required=True, aliases=["username"]),
+ password=dict(type='str', required=False, default=None, no_log=True),
+ crypt_scheme=dict(type='str', required=False, default="apr_md5_crypt"),
+ state=dict(type='str', required=False, default="present", choices=["present", "absent"]),
+ create=dict(type='bool', default=True),
+
+ )
+ module = AnsibleModule(argument_spec=arg_spec,
+ add_file_common_args=True,
+ supports_check_mode=True)
+
+ path = module.params['path']
+ username = module.params['name']
+ password = module.params['password']
+ crypt_scheme = module.params['crypt_scheme']
+ state = module.params['state']
+ create = module.params['create']
+ check_mode = module.check_mode
+
+ if not passlib_installed:
+ module.fail_json(msg=missing_required_lib("passlib"), exception=PASSLIB_IMP_ERR)
+
+ # Check file for blank lines in effort to avoid "need more than 1 value to unpack" error.
+ try:
+ f = open(path, "r")
+ except IOError:
+ # No preexisting file to remove blank lines from
+ f = None
+ else:
+ try:
+ lines = f.readlines()
+ finally:
+ f.close()
+
+ # If the file gets edited, it returns true, so only edit the file if it has blank lines
+ strip = False
+ for line in lines:
+ if not line.strip():
+ strip = True
+ break
+
+ if strip:
+ # If check mode, create a temporary file
+ if check_mode:
+ temp = tempfile.NamedTemporaryFile()
+ path = temp.name
+ f = open(path, "w")
+ try:
+ [f.write(line) for line in lines if line.strip()]
+ finally:
+ f.close()
+
+ try:
+ if state == 'present':
+ (msg, changed) = present(path, username, password, crypt_scheme, create, check_mode)
+ elif state == 'absent':
+ if not os.path.exists(path):
+ module.exit_json(msg="%s not present" % username,
+ warnings="%s does not exist" % path, changed=False)
+ (msg, changed) = absent(path, username, check_mode)
+ else:
+ module.fail_json(msg="Invalid state: %s" % state)
+
+ check_file_attrs(module, changed, msg)
+ module.exit_json(msg=msg, changed=changed)
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jboss.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jboss.py
new file mode 100644
index 00000000..4c077a1d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jboss.py
@@ -0,0 +1,178 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Jeroen Hoekx <jeroen.hoekx@dsquare.be>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: jboss
+short_description: Deploy applications to JBoss
+description:
+ - Deploy applications to JBoss standalone using the filesystem.
+options:
+ deployment:
+ required: true
+ description:
+ - The name of the deployment.
+ type: str
+ src:
+ description:
+ - The remote path of the application ear or war to deploy.
+ - Required when I(state=present).
+ - Ignored when I(state=absent).
+ type: path
+ deploy_path:
+ default: /var/lib/jbossas/standalone/deployments
+ description:
+ - The location in the filesystem where the deployment scanner listens.
+ type: path
+ state:
+ choices: [ present, absent ]
+ default: "present"
+ description:
+ - Whether the application should be deployed or undeployed.
+ type: str
+notes:
+ - The JBoss standalone deployment-scanner has to be enabled in standalone.xml
+ - The module can wait until I(deployment) file is deployed/undeployed by deployment-scanner.
+ Duration of waiting time depends on scan-interval parameter from standalone.xml.
+ - Ensure no identically named application is deployed through the JBoss CLI
+seealso:
+- name: WildFly reference
+ description: Complete reference of the WildFly documentation.
+ link: https://docs.wildfly.org
+author:
+ - Jeroen Hoekx (@jhoekx)
+'''
+
+EXAMPLES = r"""
+- name: Deploy a hello world application to the default deploy_path
+ community.general.jboss:
+ src: /tmp/hello-1.0-SNAPSHOT.war
+ deployment: hello.war
+ state: present
+
+- name: Update the hello world application to the non-default deploy_path
+ community.general.jboss:
+ src: /tmp/hello-1.1-SNAPSHOT.war
+ deploy_path: /opt/wildfly/deployment
+ deployment: hello.war
+ state: present
+
+- name: Undeploy the hello world application from the default deploy_path
+ community.general.jboss:
+ deployment: hello.war
+ state: absent
+"""
+
+RETURN = r""" # """
+
+import os
+import shutil
+import time
+from ansible.module_utils.basic import AnsibleModule
+
+
+DEFAULT_DEPLOY_PATH = '/var/lib/jbossas/standalone/deployments'
+
+
+def is_deployed(deploy_path, deployment):
+ return os.path.exists(os.path.join(deploy_path, "%s.deployed" % deployment))
+
+
+def is_undeployed(deploy_path, deployment):
+ return os.path.exists(os.path.join(deploy_path, "%s.undeployed" % deployment))
+
+
+def is_failed(deploy_path, deployment):
+ return os.path.exists(os.path.join(deploy_path, "%s.failed" % deployment))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ src=dict(type='path'),
+ deployment=dict(type='str', required=True),
+ deploy_path=dict(type='path', default=DEFAULT_DEPLOY_PATH),
+ state=dict(type='str', choices=['absent', 'present'], default='present'),
+ ),
+ required_if=[('state', 'present', ('src',))],
+ supports_check_mode=True
+ )
+
+ result = dict(changed=False)
+
+ src = module.params['src']
+ deployment = module.params['deployment']
+ deploy_path = module.params['deploy_path']
+ state = module.params['state']
+
+ if not os.path.exists(deploy_path):
+ module.fail_json(msg="deploy_path does not exist.")
+
+ if state == 'absent' and src:
+ module.warn('Parameter src is ignored when state=absent')
+ elif state == 'present' and not os.path.exists(src):
+ module.fail_json(msg='Source file %s does not exist.' % src)
+
+ deployed = is_deployed(deploy_path, deployment)
+
+ # === when check_mode ===
+ if module.check_mode:
+ if state == 'present':
+ if not deployed:
+ result['changed'] = True
+
+ elif deployed:
+ if module.sha1(src) != module.sha1(os.path.join(deploy_path, deployment)):
+ result['changed'] = True
+
+ elif state == 'absent' and deployed:
+ result['changed'] = True
+
+ module.exit_json(**result)
+ # =======================
+
+ if state == 'present' and not deployed:
+ if is_failed(deploy_path, deployment):
+ # Clean up old failed deployment
+ os.remove(os.path.join(deploy_path, "%s.failed" % deployment))
+
+ shutil.copyfile(src, os.path.join(deploy_path, deployment))
+ while not deployed:
+ deployed = is_deployed(deploy_path, deployment)
+ if is_failed(deploy_path, deployment):
+ module.fail_json(msg='Deploying %s failed.' % deployment)
+ time.sleep(1)
+ result['changed'] = True
+
+ if state == 'present' and deployed:
+ if module.sha1(src) != module.sha1(os.path.join(deploy_path, deployment)):
+ os.remove(os.path.join(deploy_path, "%s.deployed" % deployment))
+ shutil.copyfile(src, os.path.join(deploy_path, deployment))
+ deployed = False
+ while not deployed:
+ deployed = is_deployed(deploy_path, deployment)
+ if is_failed(deploy_path, deployment):
+ module.fail_json(msg='Deploying %s failed.' % deployment)
+ time.sleep(1)
+ result['changed'] = True
+
+ if state == 'absent' and deployed:
+ os.remove(os.path.join(deploy_path, "%s.deployed" % deployment))
+ while deployed:
+ deployed = not is_undeployed(deploy_path, deployment)
+ if is_failed(deploy_path, deployment):
+ module.fail_json(msg='Undeploying %s failed.' % deployment)
+ time.sleep(1)
+ result['changed'] = True
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jenkins_job.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jenkins_job.py
new file mode 100644
index 00000000..0e06b5ee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jenkins_job.py
@@ -0,0 +1,367 @@
+#!/usr/bin/python
+#
+# Copyright: (c) Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: jenkins_job
+short_description: Manage jenkins jobs
+description:
+ - Manage Jenkins jobs by using Jenkins REST API.
+requirements:
+ - "python-jenkins >= 0.4.12"
+author: "Sergio Millan Rodriguez (@sermilrod)"
+options:
+ config:
+ type: str
+ description:
+ - config in XML format.
+ - Required if job does not yet exist.
+ - Mutually exclusive with C(enabled).
+ - Considered if C(state=present).
+ required: false
+ enabled:
+ description:
+ - Whether the job should be enabled or disabled.
+ - Mutually exclusive with C(config).
+ - Considered if C(state=present).
+ type: bool
+ required: false
+ name:
+ type: str
+ description:
+ - Name of the Jenkins job.
+ required: true
+ password:
+ type: str
+ description:
+ - Password to authenticate with the Jenkins server.
+ required: false
+ state:
+ type: str
+ description:
+ - Attribute that specifies if the job has to be created or deleted.
+ required: false
+ default: present
+ choices: ['present', 'absent']
+ token:
+ type: str
+ description:
+ - API token used to authenticate alternatively to password.
+ required: false
+ url:
+ type: str
+ description:
+ - URL where the Jenkins server is accessible.
+ required: false
+ default: http://localhost:8080
+ user:
+ type: str
+ description:
+ - User to authenticate with the Jenkins server.
+ required: false
+'''
+
+EXAMPLES = '''
+- name: Create a jenkins job using basic authentication
+ community.general.jenkins_job:
+ config: "{{ lookup('file', 'templates/test.xml') }}"
+ name: test
+ password: admin
+ url: http://localhost:8080
+ user: admin
+
+- name: Create a jenkins job using the token
+ community.general.jenkins_job:
+ config: "{{ lookup('template', 'templates/test.xml.j2') }}"
+ name: test
+ token: asdfasfasfasdfasdfadfasfasdfasdfc
+ url: http://localhost:8080
+ user: admin
+
+- name: Delete a jenkins job using basic authentication
+ community.general.jenkins_job:
+ name: test
+ password: admin
+ state: absent
+ url: http://localhost:8080
+ user: admin
+
+- name: Delete a jenkins job using the token
+ community.general.jenkins_job:
+ name: test
+ token: asdfasfasfasdfasdfadfasfasdfasdfc
+ state: absent
+ url: http://localhost:8080
+ user: admin
+
+- name: Disable a jenkins job using basic authentication
+ community.general.jenkins_job:
+ name: test
+ password: admin
+ enabled: False
+ url: http://localhost:8080
+ user: admin
+
+- name: Disable a jenkins job using the token
+ community.general.jenkins_job:
+ name: test
+ token: asdfasfasfasdfasdfadfasfasdfasdfc
+ enabled: False
+ url: http://localhost:8080
+ user: admin
+'''
+
+RETURN = '''
+---
+name:
+ description: Name of the jenkins job.
+ returned: success
+ type: str
+ sample: test-job
+state:
+ description: State of the jenkins job.
+ returned: success
+ type: str
+ sample: present
+enabled:
+ description: Whether the jenkins job is enabled or not.
+ returned: success
+ type: bool
+ sample: true
+user:
+ description: User used for authentication.
+ returned: success
+ type: str
+ sample: admin
+url:
+ description: Url to connect to the Jenkins server.
+ returned: success
+ type: str
+ sample: https://jenkins.mydomain.com
+'''
+
+import traceback
+import xml.etree.ElementTree as ET
+
+JENKINS_IMP_ERR = None
+try:
+ import jenkins
+ python_jenkins_installed = True
+except ImportError:
+ JENKINS_IMP_ERR = traceback.format_exc()
+ python_jenkins_installed = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+class JenkinsJob:
+
+ def __init__(self, module):
+ self.module = module
+
+ self.config = module.params.get('config')
+ self.name = module.params.get('name')
+ self.password = module.params.get('password')
+ self.state = module.params.get('state')
+ self.enabled = module.params.get('enabled')
+ self.token = module.params.get('token')
+ self.user = module.params.get('user')
+ self.jenkins_url = module.params.get('url')
+ self.server = self.get_jenkins_connection()
+
+ self.result = {
+ 'changed': False,
+ 'url': self.jenkins_url,
+ 'name': self.name,
+ 'user': self.user,
+ 'state': self.state,
+ 'diff': {
+ 'before': "",
+ 'after': ""
+ }
+ }
+
+ self.EXCL_STATE = "excluded state"
+
+ def get_jenkins_connection(self):
+ try:
+ if (self.user and self.password):
+ return jenkins.Jenkins(self.jenkins_url, self.user, self.password)
+ elif (self.user and self.token):
+ return jenkins.Jenkins(self.jenkins_url, self.user, self.token)
+ elif (self.user and not (self.password or self.token)):
+ return jenkins.Jenkins(self.jenkins_url, self.user)
+ else:
+ return jenkins.Jenkins(self.jenkins_url)
+ except Exception as e:
+ self.module.fail_json(msg='Unable to connect to Jenkins server, %s' % to_native(e), exception=traceback.format_exc())
+
+ def get_job_status(self):
+ try:
+ response = self.server.get_job_info(self.name)
+ if "color" not in response:
+ return self.EXCL_STATE
+ else:
+ return to_native(response['color'])
+
+ except Exception as e:
+ self.module.fail_json(msg='Unable to fetch job information, %s' % to_native(e), exception=traceback.format_exc())
+
+ def job_exists(self):
+ try:
+ return bool(self.server.job_exists(self.name))
+ except Exception as e:
+ self.module.fail_json(msg='Unable to validate if job exists, %s for %s' % (to_native(e), self.jenkins_url),
+ exception=traceback.format_exc())
+
+ def get_config(self):
+ return job_config_to_string(self.config)
+
+ def get_current_config(self):
+ return job_config_to_string(self.server.get_job_config(self.name).encode('utf-8'))
+
+ def has_config_changed(self):
+ # config is optional, if not provided we keep the current config as is
+ if self.config is None:
+ return False
+
+ config_file = self.get_config()
+ machine_file = self.get_current_config()
+
+ self.result['diff']['after'] = config_file
+ self.result['diff']['before'] = machine_file
+
+ if machine_file != config_file:
+ return True
+ return False
+
+ def present_job(self):
+ if self.config is None and self.enabled is None:
+ self.module.fail_json(msg='one of the following params is required on state=present: config,enabled')
+
+ if not self.job_exists():
+ self.create_job()
+ else:
+ self.update_job()
+
+ def has_state_changed(self, status):
+ # Keep in current state if enabled arg_spec is not given
+ if self.enabled is None:
+ return False
+
+ if ((self.enabled is False and status != "disabled") or (self.enabled is True and status == "disabled")):
+ return True
+ return False
+
+ def switch_state(self):
+ if self.enabled is False:
+ self.server.disable_job(self.name)
+ else:
+ self.server.enable_job(self.name)
+
+ def update_job(self):
+ try:
+ status = self.get_job_status()
+
+ # Handle job config
+ if self.has_config_changed():
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ self.server.reconfig_job(self.name, self.get_config())
+
+ # Handle job disable/enable
+ elif (status != self.EXCL_STATE and self.has_state_changed(status)):
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ self.switch_state()
+
+ except Exception as e:
+ self.module.fail_json(msg='Unable to reconfigure job, %s for %s' % (to_native(e), self.jenkins_url),
+ exception=traceback.format_exc())
+
+ def create_job(self):
+ if self.config is None:
+ self.module.fail_json(msg='missing required param: config')
+
+ self.result['changed'] = True
+ try:
+ config_file = self.get_config()
+ self.result['diff']['after'] = config_file
+ if not self.module.check_mode:
+ self.server.create_job(self.name, config_file)
+ except Exception as e:
+ self.module.fail_json(msg='Unable to create job, %s for %s' % (to_native(e), self.jenkins_url),
+ exception=traceback.format_exc())
+
+ def absent_job(self):
+ if self.job_exists():
+ self.result['changed'] = True
+ self.result['diff']['before'] = self.get_current_config()
+ if not self.module.check_mode:
+ try:
+ self.server.delete_job(self.name)
+ except Exception as e:
+ self.module.fail_json(msg='Unable to delete job, %s for %s' % (to_native(e), self.jenkins_url),
+ exception=traceback.format_exc())
+
+ def get_result(self):
+ result = self.result
+ if self.job_exists():
+ result['enabled'] = self.get_job_status() != "disabled"
+ else:
+ result['enabled'] = None
+ return result
+
+
+def test_dependencies(module):
+ if not python_jenkins_installed:
+ module.fail_json(
+ msg=missing_required_lib("python-jenkins",
+ url="https://python-jenkins.readthedocs.io/en/latest/install.html"),
+ exception=JENKINS_IMP_ERR)
+
+
+def job_config_to_string(xml_str):
+ return ET.tostring(ET.fromstring(xml_str)).decode('ascii')
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ config=dict(type='str', required=False),
+ name=dict(type='str', required=True),
+ password=dict(type='str', required=False, no_log=True),
+ state=dict(type='str', required=False, choices=['present', 'absent'], default="present"),
+ enabled=dict(required=False, type='bool'),
+ token=dict(type='str', required=False, no_log=True),
+ url=dict(type='str', required=False, default="http://localhost:8080"),
+ user=dict(type='str', required=False)
+ ),
+ mutually_exclusive=[
+ ['password', 'token'],
+ ['config', 'enabled'],
+ ],
+ supports_check_mode=True,
+ )
+
+ test_dependencies(module)
+ jenkins_job = JenkinsJob(module)
+
+ if module.params.get('state') == "present":
+ jenkins_job.present_job()
+ else:
+ jenkins_job.absent_job()
+
+ result = jenkins_job.get_result()
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jenkins_job_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jenkins_job_facts.py
new file mode 100644
index 00000000..f0d13262
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jenkins_job_facts.py
@@ -0,0 +1,258 @@
+#!/usr/bin/python
+#
+# Copyright: (c) Ansible Project
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: jenkins_job_info
+short_description: Get information about Jenkins jobs
+description:
+ - This module can be used to query information about which Jenkins jobs which already exists.
+ - This module was called C(jenkins_job_info) before Ansible 2.9. The usage did not change.
+requirements:
+ - "python-jenkins >= 0.4.12"
+options:
+ name:
+ type: str
+ description:
+ - Exact name of the Jenkins job to fetch information about.
+ glob:
+ type: str
+ description:
+ - A shell glob of Jenkins job names to fetch information about.
+ color:
+ type: str
+ description:
+ - Only fetch jobs with the given status color.
+ password:
+ type: str
+ description:
+ - Password to authenticate with the Jenkins server.
+ - This is a required parameter, if C(token) is not provided.
+ token:
+ type: str
+ description:
+ - API token used to authenticate with the Jenkins server.
+ - This is a required parameter, if C(password) is not provided.
+ url:
+ type: str
+ description:
+ - URL where the Jenkins server is accessible.
+ default: http://localhost:8080
+ user:
+ type: str
+ description:
+ - User to authenticate with the Jenkins server.
+ validate_certs:
+ description:
+ - If set to C(False), the SSL certificates will not be validated.
+ - This should only set to C(False) used on personally controlled sites using self-signed certificates.
+ default: true
+ type: bool
+author:
+ - "Chris St. Pierre (@stpierre)"
+'''
+
+EXAMPLES = '''
+# Get all Jenkins jobs using basic auth
+- community.general.jenkins_job_info:
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+# Get all Jenkins jobs using the token
+- community.general.jenkins_job_info:
+ user: admin
+ token: abcdefghijklmnop
+ register: my_jenkins_job_info
+
+# Get info about a single job using basic auth
+- community.general.jenkins_job_info:
+ name: some-job-name
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+# Get info about a single job in a folder using basic auth
+- community.general.jenkins_job_info:
+ name: some-folder-name/some-job-name
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+# Get info about jobs matching a shell glob using basic auth
+- community.general.jenkins_job_info:
+ glob: some-job-*
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+# Get info about all failing jobs using basic auth
+- community.general.jenkins_job_info:
+ color: red
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+# Get info about passing jobs matching a shell glob using basic auth
+- community.general.jenkins_job_info:
+ name: some-job-*
+ color: blue
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+- name: Get the info from custom URL with token and validate_certs=False
+ community.general.jenkins_job_info:
+ user: admin
+ token: 126df5c60d66c66e3b75b11104a16a8a
+ url: https://jenkins.example.com
+ validate_certs: False
+ register: my_jenkins_job_info
+'''
+
+RETURN = '''
+---
+jobs:
+ description: All jobs found matching the specified criteria
+ returned: success
+ type: list
+ sample:
+ [
+ {
+ "name": "test-job",
+ "fullname": "test-folder/test-job",
+ "url": "http://localhost:8080/job/test-job/",
+ "color": "blue"
+ },
+ ]
+'''
+
+import ssl
+import fnmatch
+import traceback
+
+JENKINS_IMP_ERR = None
+try:
+ import jenkins
+ HAS_JENKINS = True
+except ImportError:
+ JENKINS_IMP_ERR = traceback.format_exc()
+ HAS_JENKINS = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def get_jenkins_connection(module):
+ url = module.params["url"]
+ username = module.params.get("user")
+ password = module.params.get("password")
+ token = module.params.get("token")
+
+ validate_certs = module.params.get('validate_certs')
+ if not validate_certs and hasattr(ssl, 'SSLContext'):
+ ssl._create_default_https_context = ssl._create_unverified_context
+ if validate_certs and not hasattr(ssl, 'SSLContext'):
+ module.fail_json(msg="Module does not support changing verification mode with python < 2.7.9."
+ " Either update Python or use validate_certs=false.")
+
+ if username and (password or token):
+ return jenkins.Jenkins(url, username, password or token)
+ elif username:
+ return jenkins.Jenkins(url, username)
+ else:
+ return jenkins.Jenkins(url)
+
+
+def test_dependencies(module):
+ if not HAS_JENKINS:
+ module.fail_json(
+ msg=missing_required_lib("python-jenkins",
+ url="https://python-jenkins.readthedocs.io/en/latest/install.html"),
+ exception=JENKINS_IMP_ERR)
+
+
+def get_jobs(module):
+ jenkins_conn = get_jenkins_connection(module)
+ jobs = []
+ if module.params.get("name"):
+ try:
+ job_info = jenkins_conn.get_job_info(module.params.get("name"))
+ except jenkins.NotFoundException:
+ pass
+ else:
+ jobs.append({
+ "name": job_info["name"],
+ "fullname": job_info["fullName"],
+ "url": job_info["url"],
+ "color": job_info["color"]
+ })
+
+ else:
+ all_jobs = jenkins_conn.get_all_jobs()
+ if module.params.get("glob"):
+ jobs.extend(
+ j for j in all_jobs
+ if fnmatch.fnmatch(j["fullname"], module.params.get("glob")))
+ else:
+ jobs = all_jobs
+ # python-jenkins includes the internal Jenkins class used for each job
+ # in its return value; we strip that out because the leading underscore
+ # (and the fact that it's not documented in the python-jenkins docs)
+ # indicates that it's not part of the dependable public interface.
+ for job in jobs:
+ if "_class" in job:
+ del job["_class"]
+
+ if module.params.get("color"):
+ jobs = [j for j in jobs if j["color"] == module.params.get("color")]
+
+ return jobs
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str'),
+ glob=dict(type='str'),
+ color=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ token=dict(type='str', no_log=True),
+ url=dict(type='str', default="http://localhost:8080"),
+ user=dict(type='str'),
+ validate_certs=dict(type='bool', default=True),
+ ),
+ mutually_exclusive=[
+ ['password', 'token'],
+ ['name', 'glob'],
+ ],
+ required_one_of=[
+ ['password', 'token'],
+ ],
+ supports_check_mode=True,
+ )
+ if module._name in ('jenkins_job_facts', 'community.general.jenkins_job_facts'):
+ module.deprecate("The 'jenkins_job_facts' module has been renamed to 'jenkins_job_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ test_dependencies(module)
+ jobs = list()
+
+ try:
+ jobs = get_jobs(module)
+ except jenkins.JenkinsException as err:
+ module.fail_json(
+ msg='Unable to connect to Jenkins server, %s' % to_native(err),
+ exception=traceback.format_exc())
+
+ module.exit_json(changed=False, jobs=jobs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jenkins_job_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jenkins_job_info.py
new file mode 100644
index 00000000..f0d13262
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jenkins_job_info.py
@@ -0,0 +1,258 @@
+#!/usr/bin/python
+#
+# Copyright: (c) Ansible Project
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: jenkins_job_info
+short_description: Get information about Jenkins jobs
+description:
+ - This module can be used to query information about which Jenkins jobs which already exists.
+ - This module was called C(jenkins_job_info) before Ansible 2.9. The usage did not change.
+requirements:
+ - "python-jenkins >= 0.4.12"
+options:
+ name:
+ type: str
+ description:
+ - Exact name of the Jenkins job to fetch information about.
+ glob:
+ type: str
+ description:
+ - A shell glob of Jenkins job names to fetch information about.
+ color:
+ type: str
+ description:
+ - Only fetch jobs with the given status color.
+ password:
+ type: str
+ description:
+ - Password to authenticate with the Jenkins server.
+ - This is a required parameter, if C(token) is not provided.
+ token:
+ type: str
+ description:
+ - API token used to authenticate with the Jenkins server.
+ - This is a required parameter, if C(password) is not provided.
+ url:
+ type: str
+ description:
+ - URL where the Jenkins server is accessible.
+ default: http://localhost:8080
+ user:
+ type: str
+ description:
+ - User to authenticate with the Jenkins server.
+ validate_certs:
+ description:
+ - If set to C(False), the SSL certificates will not be validated.
+ - This should only set to C(False) used on personally controlled sites using self-signed certificates.
+ default: true
+ type: bool
+author:
+ - "Chris St. Pierre (@stpierre)"
+'''
+
+EXAMPLES = '''
+# Get all Jenkins jobs using basic auth
+- community.general.jenkins_job_info:
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+# Get all Jenkins jobs using the token
+- community.general.jenkins_job_info:
+ user: admin
+ token: abcdefghijklmnop
+ register: my_jenkins_job_info
+
+# Get info about a single job using basic auth
+- community.general.jenkins_job_info:
+ name: some-job-name
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+# Get info about a single job in a folder using basic auth
+- community.general.jenkins_job_info:
+ name: some-folder-name/some-job-name
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+# Get info about jobs matching a shell glob using basic auth
+- community.general.jenkins_job_info:
+ glob: some-job-*
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+# Get info about all failing jobs using basic auth
+- community.general.jenkins_job_info:
+ color: red
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+# Get info about passing jobs matching a shell glob using basic auth
+- community.general.jenkins_job_info:
+ name: some-job-*
+ color: blue
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+- name: Get the info from custom URL with token and validate_certs=False
+ community.general.jenkins_job_info:
+ user: admin
+ token: 126df5c60d66c66e3b75b11104a16a8a
+ url: https://jenkins.example.com
+ validate_certs: False
+ register: my_jenkins_job_info
+'''
+
+RETURN = '''
+---
+jobs:
+ description: All jobs found matching the specified criteria
+ returned: success
+ type: list
+ sample:
+ [
+ {
+ "name": "test-job",
+ "fullname": "test-folder/test-job",
+ "url": "http://localhost:8080/job/test-job/",
+ "color": "blue"
+ },
+ ]
+'''
+
+import ssl
+import fnmatch
+import traceback
+
+JENKINS_IMP_ERR = None
+try:
+ import jenkins
+ HAS_JENKINS = True
+except ImportError:
+ JENKINS_IMP_ERR = traceback.format_exc()
+ HAS_JENKINS = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+
+def get_jenkins_connection(module):
+ url = module.params["url"]
+ username = module.params.get("user")
+ password = module.params.get("password")
+ token = module.params.get("token")
+
+ validate_certs = module.params.get('validate_certs')
+ if not validate_certs and hasattr(ssl, 'SSLContext'):
+ ssl._create_default_https_context = ssl._create_unverified_context
+ if validate_certs and not hasattr(ssl, 'SSLContext'):
+ module.fail_json(msg="Module does not support changing verification mode with python < 2.7.9."
+ " Either update Python or use validate_certs=false.")
+
+ if username and (password or token):
+ return jenkins.Jenkins(url, username, password or token)
+ elif username:
+ return jenkins.Jenkins(url, username)
+ else:
+ return jenkins.Jenkins(url)
+
+
+def test_dependencies(module):
+ if not HAS_JENKINS:
+ module.fail_json(
+ msg=missing_required_lib("python-jenkins",
+ url="https://python-jenkins.readthedocs.io/en/latest/install.html"),
+ exception=JENKINS_IMP_ERR)
+
+
+def get_jobs(module):
+ jenkins_conn = get_jenkins_connection(module)
+ jobs = []
+ if module.params.get("name"):
+ try:
+ job_info = jenkins_conn.get_job_info(module.params.get("name"))
+ except jenkins.NotFoundException:
+ pass
+ else:
+ jobs.append({
+ "name": job_info["name"],
+ "fullname": job_info["fullName"],
+ "url": job_info["url"],
+ "color": job_info["color"]
+ })
+
+ else:
+ all_jobs = jenkins_conn.get_all_jobs()
+ if module.params.get("glob"):
+ jobs.extend(
+ j for j in all_jobs
+ if fnmatch.fnmatch(j["fullname"], module.params.get("glob")))
+ else:
+ jobs = all_jobs
+ # python-jenkins includes the internal Jenkins class used for each job
+ # in its return value; we strip that out because the leading underscore
+ # (and the fact that it's not documented in the python-jenkins docs)
+ # indicates that it's not part of the dependable public interface.
+ for job in jobs:
+ if "_class" in job:
+ del job["_class"]
+
+ if module.params.get("color"):
+ jobs = [j for j in jobs if j["color"] == module.params.get("color")]
+
+ return jobs
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str'),
+ glob=dict(type='str'),
+ color=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ token=dict(type='str', no_log=True),
+ url=dict(type='str', default="http://localhost:8080"),
+ user=dict(type='str'),
+ validate_certs=dict(type='bool', default=True),
+ ),
+ mutually_exclusive=[
+ ['password', 'token'],
+ ['name', 'glob'],
+ ],
+ required_one_of=[
+ ['password', 'token'],
+ ],
+ supports_check_mode=True,
+ )
+ if module._name in ('jenkins_job_facts', 'community.general.jenkins_job_facts'):
+ module.deprecate("The 'jenkins_job_facts' module has been renamed to 'jenkins_job_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ test_dependencies(module)
+ jobs = list()
+
+ try:
+ jobs = get_jobs(module)
+ except jenkins.JenkinsException as err:
+ module.fail_json(
+ msg='Unable to connect to Jenkins server, %s' % to_native(err),
+ exception=traceback.format_exc())
+
+ module.exit_json(changed=False, jobs=jobs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jenkins_plugin.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jenkins_plugin.py
new file mode 100644
index 00000000..e2adf7a6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jenkins_plugin.py
@@ -0,0 +1,780 @@
+#!/usr/bin/python
+# encoding: utf-8
+
+# (c) 2016, Jiri Tyr <jiri.tyr@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: jenkins_plugin
+author: Jiri Tyr (@jtyr)
+short_description: Add or remove Jenkins plugin
+description:
+ - Ansible module which helps to manage Jenkins plugins.
+
+options:
+ group:
+ type: str
+ description:
+ - Name of the Jenkins group on the OS.
+ default: jenkins
+ jenkins_home:
+ type: path
+ description:
+ - Home directory of the Jenkins user.
+ default: /var/lib/jenkins
+ mode:
+ type: raw
+ description:
+ - File mode applied on versioned plugins.
+ default: '0644'
+ name:
+ type: str
+ description:
+ - Plugin name.
+ required: yes
+ owner:
+ type: str
+ description:
+ - Name of the Jenkins user on the OS.
+ default: jenkins
+ state:
+ type: str
+ description:
+ - Desired plugin state.
+ - If the C(latest) is set, the check for new version will be performed
+ every time. This is suitable to keep the plugin up-to-date.
+ choices: [absent, present, pinned, unpinned, enabled, disabled, latest]
+ default: present
+ timeout:
+ type: int
+ description:
+ - Server connection timeout in secs.
+ default: 30
+ updates_expiration:
+ type: int
+ description:
+ - Number of seconds after which a new copy of the I(update-center.json)
+ file is downloaded. This is used to avoid the need to download the
+ plugin to calculate its checksum when C(latest) is specified.
+ - Set it to C(0) if no cache file should be used. In that case, the
+ plugin file will always be downloaded to calculate its checksum when
+ C(latest) is specified.
+ default: 86400
+ updates_url:
+ type: str
+ description:
+ - URL of the Update Centre.
+ - Used as the base URL to download the plugins and the
+ I(update-center.json) JSON file.
+ default: https://updates.jenkins.io
+ url:
+ type: str
+ description:
+ - URL of the Jenkins server.
+ default: http://localhost:8080
+ version:
+ type: str
+ description:
+ - Plugin version number.
+ - If this option is specified, all plugin dependencies must be installed
+ manually.
+ - It might take longer to verify that the correct version is installed.
+ This is especially true if a specific version number is specified.
+ - Quote the version to prevent the value to be interpreted as float. For
+ example if C(1.20) would be unquoted, it would become C(1.2).
+ with_dependencies:
+ description:
+ - Defines whether to install plugin dependencies.
+ - This option takes effect only if the I(version) is not defined.
+ type: bool
+ default: yes
+
+notes:
+ - Plugin installation should be run under root or the same user which owns
+ the plugin files on the disk. Only if the plugin is not installed yet and
+ no version is specified, the API installation is performed which requires
+ only the Web UI credentials.
+ - It's necessary to notify the handler or call the I(service) module to
+ restart the Jenkins service after a new plugin was installed.
+ - Pinning works only if the plugin is installed and Jenkins service was
+ successfully restarted after the plugin installation.
+ - It is not possible to run the module remotely by changing the I(url)
+ parameter to point to the Jenkins server. The module must be used on the
+ host where Jenkins runs as it needs direct access to the plugin files.
+extends_documentation_fragment:
+ - url
+ - files
+'''
+
+EXAMPLES = '''
+- name: Install plugin
+ community.general.jenkins_plugin:
+ name: build-pipeline-plugin
+
+- name: Install plugin without its dependencies
+ community.general.jenkins_plugin:
+ name: build-pipeline-plugin
+ with_dependencies: no
+
+- name: Make sure the plugin is always up-to-date
+ community.general.jenkins_plugin:
+ name: token-macro
+ state: latest
+
+- name: Install specific version of the plugin
+ community.general.jenkins_plugin:
+ name: token-macro
+ version: "1.15"
+
+- name: Pin the plugin
+ community.general.jenkins_plugin:
+ name: token-macro
+ state: pinned
+
+- name: Unpin the plugin
+ community.general.jenkins_plugin:
+ name: token-macro
+ state: unpinned
+
+- name: Enable the plugin
+ community.general.jenkins_plugin:
+ name: token-macro
+ state: enabled
+
+- name: Disable the plugin
+ community.general.jenkins_plugin:
+ name: token-macro
+ state: disabled
+
+- name: Uninstall plugin
+ community.general.jenkins_plugin:
+ name: build-pipeline-plugin
+ state: absent
+
+#
+# Example of how to authenticate
+#
+- name: Install plugin
+ community.general.jenkins_plugin:
+ name: build-pipeline-plugin
+ url_username: admin
+ url_password: p4ssw0rd
+ url: http://localhost:8888
+
+#
+# Example of a Play which handles Jenkins restarts during the state changes
+#
+- name: Jenkins Master play
+ hosts: jenkins-master
+ vars:
+ my_jenkins_plugins:
+ token-macro:
+ enabled: yes
+ build-pipeline-plugin:
+ version: "1.4.9"
+ pinned: no
+ enabled: yes
+ tasks:
+ - name: Install plugins without a specific version
+ community.general.jenkins_plugin:
+ name: "{{ item.key }}"
+ register: my_jenkins_plugin_unversioned
+ when: >
+ 'version' not in item.value
+ with_dict: "{{ my_jenkins_plugins }}"
+
+ - name: Install plugins with a specific version
+ community.general.jenkins_plugin:
+ name: "{{ item.key }}"
+ version: "{{ item.value['version'] }}"
+ register: my_jenkins_plugin_versioned
+ when: >
+ 'version' in item.value
+ with_dict: "{{ my_jenkins_plugins }}"
+
+ - name: Initiate the fact
+ ansible.builtin.set_fact:
+ jenkins_restart_required: no
+
+ - name: Check if restart is required by any of the versioned plugins
+ ansible.builtin.set_fact:
+ jenkins_restart_required: yes
+ when: item.changed
+ with_items: "{{ my_jenkins_plugin_versioned.results }}"
+
+ - name: Check if restart is required by any of the unversioned plugins
+ ansible.builtin.set_fact:
+ jenkins_restart_required: yes
+ when: item.changed
+ with_items: "{{ my_jenkins_plugin_unversioned.results }}"
+
+ - name: Restart Jenkins if required
+ ansible.builtin.service:
+ name: jenkins
+ state: restarted
+ when: jenkins_restart_required
+
+ - name: Wait for Jenkins to start up
+ ansible.builtin.uri:
+ url: http://localhost:8080
+ status_code: 200
+ timeout: 5
+ register: jenkins_service_status
+ # Keep trying for 5 mins in 5 sec intervals
+ retries: 60
+ delay: 5
+ until: >
+ 'status' in jenkins_service_status and
+ jenkins_service_status['status'] == 200
+ when: jenkins_restart_required
+
+ - name: Reset the fact
+ ansible.builtin.set_fact:
+ jenkins_restart_required: no
+ when: jenkins_restart_required
+
+ - name: Plugin pinning
+ community.general.jenkins_plugin:
+ name: "{{ item.key }}"
+ state: "{{ 'pinned' if item.value['pinned'] else 'unpinned'}}"
+ when: >
+ 'pinned' in item.value
+ with_dict: "{{ my_jenkins_plugins }}"
+
+ - name: Plugin enabling
+ community.general.jenkins_plugin:
+ name: "{{ item.key }}"
+ state: "{{ 'enabled' if item.value['enabled'] else 'disabled'}}"
+ when: >
+ 'enabled' in item.value
+ with_dict: "{{ my_jenkins_plugins }}"
+'''
+
+RETURN = '''
+plugin:
+ description: plugin name
+ returned: success
+ type: str
+ sample: build-pipeline-plugin
+state:
+ description: state of the target, after execution
+ returned: success
+ type: str
+ sample: "present"
+'''
+
+from ansible.module_utils.basic import AnsibleModule, to_bytes
+from ansible.module_utils.six.moves import http_cookiejar as cookiejar
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url, url_argument_spec
+from ansible.module_utils.six import text_type, binary_type
+from ansible.module_utils._text import to_native
+import base64
+import hashlib
+import json
+import os
+import tempfile
+import time
+
+
+class JenkinsPlugin(object):
+ def __init__(self, module):
+ # To be able to call fail_json
+ self.module = module
+
+ # Shortcuts for the params
+ self.params = self.module.params
+ self.url = self.params['url']
+ self.timeout = self.params['timeout']
+
+ # Crumb
+ self.crumb = {}
+ # Cookie jar for crumb session
+ self.cookies = None
+
+ if self._csrf_enabled():
+ self.cookies = cookiejar.LWPCookieJar()
+ self.crumb = self._get_crumb()
+
+ # Get list of installed plugins
+ self._get_installed_plugins()
+
+ def _csrf_enabled(self):
+ csrf_data = self._get_json_data(
+ "%s/%s" % (self.url, "api/json"), 'CSRF')
+
+ if 'useCrumbs' not in csrf_data:
+ self.module.fail_json(
+ msg="Required fields not found in the Crumbs response.",
+ details=csrf_data)
+
+ return csrf_data['useCrumbs']
+
+ def _get_json_data(self, url, what, **kwargs):
+ # Get the JSON data
+ r = self._get_url_data(url, what, **kwargs)
+
+ # Parse the JSON data
+ try:
+ json_data = json.loads(to_native(r.read()))
+ except Exception as e:
+ self.module.fail_json(
+ msg="Cannot parse %s JSON data." % what,
+ details=to_native(e))
+
+ return json_data
+
+ def _get_url_data(
+ self, url, what=None, msg_status=None, msg_exception=None,
+ **kwargs):
+ # Compose default messages
+ if msg_status is None:
+ msg_status = "Cannot get %s" % what
+
+ if msg_exception is None:
+ msg_exception = "Retrieval of %s failed." % what
+
+ # Get the URL data
+ try:
+ response, info = fetch_url(
+ self.module, url, timeout=self.timeout, cookies=self.cookies,
+ headers=self.crumb, **kwargs)
+
+ if info['status'] != 200:
+ self.module.fail_json(msg=msg_status, details=info['msg'])
+ except Exception as e:
+ self.module.fail_json(msg=msg_exception, details=to_native(e))
+
+ return response
+
+ def _get_crumb(self):
+ crumb_data = self._get_json_data(
+ "%s/%s" % (self.url, "crumbIssuer/api/json"), 'Crumb')
+
+ if 'crumbRequestField' in crumb_data and 'crumb' in crumb_data:
+ ret = {
+ crumb_data['crumbRequestField']: crumb_data['crumb']
+ }
+ else:
+ self.module.fail_json(
+ msg="Required fields not found in the Crum response.",
+ details=crumb_data)
+
+ return ret
+
+ def _get_installed_plugins(self):
+ plugins_data = self._get_json_data(
+ "%s/%s" % (self.url, "pluginManager/api/json?depth=1"),
+ 'list of plugins')
+
+ # Check if we got valid data
+ if 'plugins' not in plugins_data:
+ self.module.fail_json(msg="No valid plugin data found.")
+
+ # Create final list of installed/pined plugins
+ self.is_installed = False
+ self.is_pinned = False
+ self.is_enabled = False
+
+ for p in plugins_data['plugins']:
+ if p['shortName'] == self.params['name']:
+ self.is_installed = True
+
+ if p['pinned']:
+ self.is_pinned = True
+
+ if p['enabled']:
+ self.is_enabled = True
+
+ break
+
+ def install(self):
+ changed = False
+ plugin_file = (
+ '%s/plugins/%s.jpi' % (
+ self.params['jenkins_home'],
+ self.params['name']))
+
+ if not self.is_installed and self.params['version'] in [None, 'latest']:
+ if not self.module.check_mode:
+ # Install the plugin (with dependencies)
+ install_script = (
+ 'd = Jenkins.instance.updateCenter.getPlugin("%s")'
+ '.deploy(); d.get();' % self.params['name'])
+
+ if self.params['with_dependencies']:
+ install_script = (
+ 'Jenkins.instance.updateCenter.getPlugin("%s")'
+ '.getNeededDependencies().each{it.deploy()}; %s' % (
+ self.params['name'], install_script))
+
+ script_data = {
+ 'script': install_script
+ }
+ data = urlencode(script_data)
+
+ # Send the installation request
+ r = self._get_url_data(
+ "%s/scriptText" % self.url,
+ msg_status="Cannot install plugin.",
+ msg_exception="Plugin installation has failed.",
+ data=data)
+
+ hpi_file = '%s/plugins/%s.hpi' % (
+ self.params['jenkins_home'],
+ self.params['name'])
+
+ if os.path.isfile(hpi_file):
+ os.remove(hpi_file)
+
+ changed = True
+ else:
+ # Check if the plugin directory exists
+ if not os.path.isdir(self.params['jenkins_home']):
+ self.module.fail_json(
+ msg="Jenkins home directory doesn't exist.")
+
+ checksum_old = None
+ if os.path.isfile(plugin_file):
+ # Make the checksum of the currently installed plugin
+ with open(plugin_file, 'rb') as plugin_fh:
+ plugin_content = plugin_fh.read()
+ checksum_old = hashlib.sha1(plugin_content).hexdigest()
+
+ if self.params['version'] in [None, 'latest']:
+ # Take latest version
+ plugin_url = (
+ "%s/latest/%s.hpi" % (
+ self.params['updates_url'],
+ self.params['name']))
+ else:
+ # Take specific version
+ plugin_url = (
+ "{0}/download/plugins/"
+ "{1}/{2}/{1}.hpi".format(
+ self.params['updates_url'],
+ self.params['name'],
+ self.params['version']))
+
+ if (
+ self.params['updates_expiration'] == 0 or
+ self.params['version'] not in [None, 'latest'] or
+ checksum_old is None):
+
+ # Download the plugin file directly
+ r = self._download_plugin(plugin_url)
+
+ # Write downloaded plugin into file if checksums don't match
+ if checksum_old is None:
+ # No previously installed plugin
+ if not self.module.check_mode:
+ self._write_file(plugin_file, r)
+
+ changed = True
+ else:
+ # Get data for the MD5
+ data = r.read()
+
+ # Make new checksum
+ checksum_new = hashlib.sha1(data).hexdigest()
+
+ # If the checksum is different from the currently installed
+ # plugin, store the new plugin
+ if checksum_old != checksum_new:
+ if not self.module.check_mode:
+ self._write_file(plugin_file, data)
+
+ changed = True
+ elif self.params['version'] == 'latest':
+ # Check for update from the updates JSON file
+ plugin_data = self._download_updates()
+
+ # If the latest version changed, download it
+ if checksum_old != to_bytes(plugin_data['sha1']):
+ if not self.module.check_mode:
+ r = self._download_plugin(plugin_url)
+ self._write_file(plugin_file, r)
+
+ changed = True
+
+ # Change file attributes if needed
+ if os.path.isfile(plugin_file):
+ params = {
+ 'dest': plugin_file
+ }
+ params.update(self.params)
+ file_args = self.module.load_file_common_arguments(params)
+
+ if not self.module.check_mode:
+ # Not sure how to run this in the check mode
+ changed = self.module.set_fs_attributes_if_different(
+ file_args, changed)
+ else:
+ # See the comment above
+ changed = True
+
+ return changed
+
+ def _download_updates(self):
+ updates_filename = 'jenkins-plugin-cache.json'
+ updates_dir = os.path.expanduser('~/.ansible/tmp')
+ updates_file = "%s/%s" % (updates_dir, updates_filename)
+ download_updates = True
+
+ # Check if we need to download new updates file
+ if os.path.isfile(updates_file):
+ # Get timestamp when the file was changed last time
+ ts_file = os.stat(updates_file).st_mtime
+ ts_now = time.time()
+
+ if ts_now - ts_file < self.params['updates_expiration']:
+ download_updates = False
+
+ updates_file_orig = updates_file
+
+ # Download the updates file if needed
+ if download_updates:
+ url = "%s/update-center.json" % self.params['updates_url']
+
+ # Get the data
+ r = self._get_url_data(
+ url,
+ msg_status="Remote updates not found.",
+ msg_exception="Updates download failed.")
+
+ # Write the updates file
+ update_fd, updates_file = tempfile.mkstemp()
+ os.write(update_fd, r.read())
+
+ try:
+ os.close(update_fd)
+ except IOError as e:
+ self.module.fail_json(
+ msg="Cannot close the tmp updates file %s." % updates_file,
+ details=to_native(e))
+
+ # Open the updates file
+ try:
+ f = open(updates_file, encoding='utf-8')
+ except IOError as e:
+ self.module.fail_json(
+ msg="Cannot open temporal updates file.",
+ details=to_native(e))
+
+ i = 0
+ for line in f:
+ # Read only the second line
+ if i == 1:
+ try:
+ data = json.loads(line)
+ except Exception as e:
+ self.module.fail_json(
+ msg="Cannot load JSON data from the tmp updates file.",
+ details=to_native(e))
+
+ break
+
+ i += 1
+
+ # Move the updates file to the right place if we could read it
+ if download_updates:
+ # Make sure the destination directory exists
+ if not os.path.isdir(updates_dir):
+ try:
+ os.makedirs(updates_dir, int('0700', 8))
+ except OSError as e:
+ self.module.fail_json(
+ msg="Cannot create temporal directory.",
+ details=to_native(e))
+
+ self.module.atomic_move(updates_file, updates_file_orig)
+
+ # Check if we have the plugin data available
+ if 'plugins' not in data or self.params['name'] not in data['plugins']:
+ self.module.fail_json(
+ msg="Cannot find plugin data in the updates file.")
+
+ return data['plugins'][self.params['name']]
+
+ def _download_plugin(self, plugin_url):
+ # Download the plugin
+ r = self._get_url_data(
+ plugin_url,
+ msg_status="Plugin not found.",
+ msg_exception="Plugin download failed.")
+
+ return r
+
+ def _write_file(self, f, data):
+ # Store the plugin into a temp file and then move it
+ tmp_f_fd, tmp_f = tempfile.mkstemp()
+
+ if isinstance(data, (text_type, binary_type)):
+ os.write(tmp_f_fd, data)
+ else:
+ os.write(tmp_f_fd, data.read())
+
+ try:
+ os.close(tmp_f_fd)
+ except IOError as e:
+ self.module.fail_json(
+ msg='Cannot close the temporal plugin file %s.' % tmp_f,
+ details=to_native(e))
+
+ # Move the file onto the right place
+ self.module.atomic_move(tmp_f, f)
+
+ def uninstall(self):
+ changed = False
+
+ # Perform the action
+ if self.is_installed:
+ if not self.module.check_mode:
+ self._pm_query('doUninstall', 'Uninstallation')
+
+ changed = True
+
+ return changed
+
+ def pin(self):
+ return self._pinning('pin')
+
+ def unpin(self):
+ return self._pinning('unpin')
+
+ def _pinning(self, action):
+ changed = False
+
+ # Check if the plugin is pinned/unpinned
+ if (
+ action == 'pin' and not self.is_pinned or
+ action == 'unpin' and self.is_pinned):
+
+ # Perform the action
+ if not self.module.check_mode:
+ self._pm_query(action, "%sning" % action.capitalize())
+
+ changed = True
+
+ return changed
+
+ def enable(self):
+ return self._enabling('enable')
+
+ def disable(self):
+ return self._enabling('disable')
+
+ def _enabling(self, action):
+ changed = False
+
+ # Check if the plugin is pinned/unpinned
+ if (
+ action == 'enable' and not self.is_enabled or
+ action == 'disable' and self.is_enabled):
+
+ # Perform the action
+ if not self.module.check_mode:
+ self._pm_query(
+ "make%sd" % action.capitalize(),
+ "%sing" % action[:-1].capitalize())
+
+ changed = True
+
+ return changed
+
+ def _pm_query(self, action, msg):
+ url = "%s/pluginManager/plugin/%s/%s" % (
+ self.params['url'], self.params['name'], action)
+
+ # Send the request
+ self._get_url_data(
+ url,
+ msg_status="Plugin not found. %s" % url,
+ msg_exception="%s has failed." % msg)
+
+
+def main():
+ # Module arguments
+ argument_spec = url_argument_spec()
+ argument_spec.update(
+ group=dict(type='str', default='jenkins'),
+ jenkins_home=dict(type='path', default='/var/lib/jenkins'),
+ mode=dict(default='0644', type='raw'),
+ name=dict(type='str', required=True),
+ owner=dict(type='str', default='jenkins'),
+ state=dict(
+ choices=[
+ 'present',
+ 'absent',
+ 'pinned',
+ 'unpinned',
+ 'enabled',
+ 'disabled',
+ 'latest'],
+ default='present'),
+ timeout=dict(default=30, type="int"),
+ updates_expiration=dict(default=86400, type="int"),
+ updates_url=dict(default='https://updates.jenkins.io'),
+ url=dict(default='http://localhost:8080'),
+ url_password=dict(no_log=True),
+ version=dict(),
+ with_dependencies=dict(default=True, type='bool'),
+ )
+ # Module settings
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ add_file_common_args=True,
+ supports_check_mode=True,
+ )
+
+ # Force basic authentication
+ module.params['force_basic_auth'] = True
+
+ # Convert timeout to float
+ try:
+ module.params['timeout'] = float(module.params['timeout'])
+ except ValueError as e:
+ module.fail_json(
+ msg='Cannot convert %s to float.' % module.params['timeout'],
+ details=to_native(e))
+
+ # Set version to latest if state is latest
+ if module.params['state'] == 'latest':
+ module.params['state'] = 'present'
+ module.params['version'] = 'latest'
+
+ # Create some shortcuts
+ name = module.params['name']
+ state = module.params['state']
+
+ # Initial change state of the task
+ changed = False
+
+ # Instantiate the JenkinsPlugin object
+ jp = JenkinsPlugin(module)
+
+ # Perform action depending on the requested state
+ if state == 'present':
+ changed = jp.install()
+ elif state == 'absent':
+ changed = jp.uninstall()
+ elif state == 'pinned':
+ changed = jp.pin()
+ elif state == 'unpinned':
+ changed = jp.unpin()
+ elif state == 'enabled':
+ changed = jp.enable()
+ elif state == 'disabled':
+ changed = jp.disable()
+
+ # Print status of the change
+ module.exit_json(changed=changed, plugin=name, state=state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jenkins_script.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jenkins_script.py
new file mode 100644
index 00000000..68f06c27
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jenkins_script.py
@@ -0,0 +1,197 @@
+#!/usr/bin/python
+
+# encoding: utf-8
+
+# (c) 2016, James Hogarth <james.hogarth@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+author: James Hogarth (@hogarthj)
+module: jenkins_script
+short_description: Executes a groovy script in the jenkins instance
+description:
+ - The C(jenkins_script) module takes a script plus a dict of values
+ to use within the script and returns the result of the script being run.
+
+options:
+ script:
+ type: str
+ description:
+ - The groovy script to be executed.
+ This gets passed as a string Template if args is defined.
+ required: true
+ url:
+ type: str
+ description:
+ - The jenkins server to execute the script against. The default is a local
+ jenkins instance that is not being proxied through a webserver.
+ default: http://localhost:8080
+ validate_certs:
+ description:
+ - If set to C(no), the SSL certificates will not be validated.
+ This should only set to C(no) used on personally controlled sites
+ using self-signed certificates as it avoids verifying the source site.
+ type: bool
+ default: 'yes'
+ user:
+ type: str
+ description:
+ - The username to connect to the jenkins server with.
+ password:
+ type: str
+ description:
+ - The password to connect to the jenkins server with.
+ timeout:
+ type: int
+ description:
+ - The request timeout in seconds
+ default: 10
+ args:
+ type: dict
+ description:
+ - A dict of key-value pairs used in formatting the script using string.Template (see https://docs.python.org/2/library/string.html#template-strings).
+
+notes:
+ - Since the script can do anything this does not report on changes.
+ Knowing the script is being run it's important to set changed_when
+ for the ansible output to be clear on any alterations made.
+
+'''
+
+EXAMPLES = '''
+- name: Obtaining a list of plugins
+ community.general.jenkins_script:
+ script: 'println(Jenkins.instance.pluginManager.plugins)'
+ user: admin
+ password: admin
+
+- name: Setting master using a variable to hold a more complicate script
+ ansible.builtin.set_fact:
+ setmaster_mode: |
+ import jenkins.model.*
+ instance = Jenkins.getInstance()
+ instance.setMode(${jenkins_mode})
+ instance.save()
+
+- name: Use the variable as the script
+ community.general.jenkins_script:
+ script: "{{ setmaster_mode }}"
+ args:
+ jenkins_mode: Node.Mode.EXCLUSIVE
+
+- name: Interacting with an untrusted HTTPS connection
+ community.general.jenkins_script:
+ script: "println(Jenkins.instance.pluginManager.plugins)"
+ user: admin
+ password: admin
+ url: https://localhost
+ validate_certs: no
+'''
+
+RETURN = '''
+output:
+ description: Result of script
+ returned: success
+ type: str
+ sample: 'Result: true'
+'''
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import http_cookiejar as cookiejar
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils._text import to_native
+
+
+def is_csrf_protection_enabled(module):
+ resp, info = fetch_url(module,
+ module.params['url'] + '/api/json',
+ timeout=module.params['timeout'],
+ method='GET')
+ if info["status"] != 200:
+ module.fail_json(msg="HTTP error " + str(info["status"]) + " " + info["msg"], output='')
+
+ content = to_native(resp.read())
+ return json.loads(content).get('useCrumbs', False)
+
+
+def get_crumb(module, cookies):
+ resp, info = fetch_url(module,
+ module.params['url'] + '/crumbIssuer/api/json',
+ method='GET',
+ timeout=module.params['timeout'],
+ cookies=cookies)
+ if info["status"] != 200:
+ module.fail_json(msg="HTTP error " + str(info["status"]) + " " + info["msg"], output='')
+
+ content = to_native(resp.read())
+ return json.loads(content)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ script=dict(required=True, type="str"),
+ url=dict(required=False, type="str", default="http://localhost:8080"),
+ validate_certs=dict(required=False, type="bool", default=True),
+ user=dict(required=False, type="str", default=None),
+ password=dict(required=False, no_log=True, type="str", default=None),
+ timeout=dict(required=False, type="int", default=10),
+ args=dict(required=False, type="dict", default=None)
+ )
+ )
+
+ if module.params['user'] is not None:
+ if module.params['password'] is None:
+ module.fail_json(msg="password required when user provided", output='')
+ module.params['url_username'] = module.params['user']
+ module.params['url_password'] = module.params['password']
+ module.params['force_basic_auth'] = True
+
+ if module.params['args'] is not None:
+ from string import Template
+ try:
+ script_contents = Template(module.params['script']).substitute(module.params['args'])
+ except KeyError as err:
+ module.fail_json(msg="Error with templating variable: %s" % err, output='')
+ else:
+ script_contents = module.params['script']
+
+ headers = {}
+ cookies = None
+ if is_csrf_protection_enabled(module):
+ cookies = cookiejar.LWPCookieJar()
+ crumb = get_crumb(module, cookies)
+ headers = {crumb['crumbRequestField']: crumb['crumb']}
+
+ resp, info = fetch_url(module,
+ module.params['url'] + "/scriptText",
+ data=urlencode({'script': script_contents}),
+ headers=headers,
+ method="POST",
+ timeout=module.params['timeout'],
+ cookies=cookies)
+
+ if info["status"] != 200:
+ module.fail_json(msg="HTTP error " + str(info["status"]) + " " + info["msg"], output='')
+
+ result = to_native(resp.read())
+
+ if 'Exception:' in result and 'at java.lang.Thread' in result:
+ module.fail_json(msg="script failed with stacktrace:\n " + result, output='')
+
+ module.exit_json(
+ output=result,
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jira.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jira.py
new file mode 100644
index 00000000..d10be9ea
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/jira.py
@@ -0,0 +1,531 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Steve Smith <ssmith@atlassian.com>
+# Atlassian open-source approval reference OSR-76.
+#
+# (c) 2020, Per Abildgaard Toft <per@minfejl.dk> Search and update function
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r"""
+module: jira
+short_description: create and modify issues in a JIRA instance
+description:
+ - Create and modify issues in a JIRA instance.
+
+options:
+ uri:
+ type: str
+ required: true
+ description:
+ - Base URI for the JIRA instance.
+
+ operation:
+ type: str
+ required: true
+ aliases: [ command ]
+ choices: [ comment, create, edit, fetch, link, search, transition, update ]
+ description:
+ - The operation to perform.
+
+ username:
+ type: str
+ required: true
+ description:
+ - The username to log-in with.
+
+ password:
+ type: str
+ required: true
+ description:
+ - The password to log-in with.
+
+ project:
+ type: str
+ required: false
+ description:
+ - The project for this operation. Required for issue creation.
+
+ summary:
+ type: str
+ required: false
+ description:
+ - The issue summary, where appropriate.
+
+ description:
+ type: str
+ required: false
+ description:
+ - The issue description, where appropriate.
+
+ issuetype:
+ type: str
+ required: false
+ description:
+ - The issue type, for issue creation.
+
+ issue:
+ type: str
+ required: false
+ description:
+ - An existing issue key to operate on.
+ aliases: ['ticket']
+
+ comment:
+ type: str
+ required: false
+ description:
+ - The comment text to add.
+
+ status:
+ type: str
+ required: false
+ description:
+ - The desired status; only relevant for the transition operation.
+
+ assignee:
+ type: str
+ required: false
+ description:
+ - Sets the assignee on create or transition operations. Note not all transitions will allow this.
+
+ linktype:
+ type: str
+ required: false
+ description:
+ - Set type of link, when action 'link' selected.
+
+ inwardissue:
+ type: str
+ required: false
+ description:
+ - Set issue from which link will be created.
+
+ outwardissue:
+ type: str
+ required: false
+ description:
+ - Set issue to which link will be created.
+
+ fields:
+ type: dict
+ required: false
+ description:
+ - This is a free-form data structure that can contain arbitrary data. This is passed directly to the JIRA REST API
+ (possibly after merging with other required data, as when passed to create). See examples for more information,
+ and the JIRA REST API for the structure required for various fields.
+
+ jql:
+ required: false
+ description:
+ - Query JIRA in JQL Syntax, e.g. 'CMDB Hostname'='test.example.com'.
+ type: str
+ version_added: '0.2.0'
+
+ maxresults:
+ required: false
+ description:
+ - Limit the result of I(operation=search). If no value is specified, the default jira limit will be used.
+ - Used when I(operation=search) only, ignored otherwise.
+ type: int
+ version_added: '0.2.0'
+
+ timeout:
+ type: float
+ required: false
+ description:
+ - Set timeout, in seconds, on requests to JIRA API.
+ default: 10
+
+ validate_certs:
+ required: false
+ description:
+ - Require valid SSL certificates (set to `false` if you'd like to use self-signed certificates)
+ default: true
+ type: bool
+
+notes:
+ - "Currently this only works with basic-auth."
+
+author:
+- "Steve Smith (@tarka)"
+- "Per Abildgaard Toft (@pertoft)"
+"""
+
+EXAMPLES = r"""
+# Create a new issue and add a comment to it:
+- name: Create an issue
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ project: ANS
+ operation: create
+ summary: Example Issue
+ description: Created using Ansible
+ issuetype: Task
+ args:
+ fields:
+ customfield_13225: "test"
+ customfield_12931: '{"value": "Test"}'
+ register: issue
+
+- name: Comment on issue
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ issue: '{{ issue.meta.key }}'
+ operation: comment
+ comment: A comment added by Ansible
+
+# Assign an existing issue using edit
+- name: Assign an issue using free-form fields
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ issue: '{{ issue.meta.key}}'
+ operation: edit
+ assignee: ssmith
+
+# Create an issue with an existing assignee
+- name: Create an assigned issue
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ project: ANS
+ operation: create
+ summary: Assigned issue
+ description: Created and assigned using Ansible
+ issuetype: Task
+ assignee: ssmith
+
+# Edit an issue
+- name: Set the labels on an issue using free-form fields
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ issue: '{{ issue.meta.key }}'
+ operation: edit
+ args:
+ fields:
+ labels:
+ - autocreated
+ - ansible
+
+# Updating a field using operations: add, set & remove
+- name: Change the value of a Select dropdown
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ issue: '{{ issue.meta.key }}'
+ operation: update
+ args:
+ fields:
+ customfield_12931: [ {'set': {'value': 'Virtual'}} ]
+ customfield_13820: [ {'set': {'value':'Manually'}} ]
+ register: cmdb_issue
+ delegate_to: localhost
+
+
+# Retrieve metadata for an issue and use it to create an account
+- name: Get an issue
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ project: ANS
+ operation: fetch
+ issue: ANS-63
+ register: issue
+
+# Search for an issue
+# You can limit the search for specific fields by adding optional args. Note! It must be a dict, hence, lastViewed: null
+- name: Search for an issue
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ project: ANS
+ operation: search
+ maxresults: 10
+ jql: project=cmdb AND cf[13225]="test"
+ args:
+ fields:
+ lastViewed: null
+ register: issue
+
+- name: Create a unix account for the reporter
+ become: true
+ user:
+ name: '{{ issue.meta.fields.creator.name }}'
+ comment: '{{ issue.meta.fields.creator.displayName }}'
+
+# You can get list of valid linktypes at /rest/api/2/issueLinkType
+# url of your jira installation.
+- name: Create link from HSP-1 to MKY-1
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ operation: link
+ linktype: Relates
+ inwardissue: HSP-1
+ outwardissue: MKY-1
+
+# Transition an issue by target status
+- name: Close the issue
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ issue: '{{ issue.meta.key }}'
+ operation: transition
+ status: Done
+ args:
+ fields:
+ customfield_14321: [ {'set': {'value': 'Value of Select' }} ]
+ comment: [ { 'add': { 'body' : 'Test' } }]
+
+"""
+
+import base64
+import json
+import sys
+import traceback
+
+from ansible.module_utils.six.moves.urllib.request import pathname2url
+
+from ansible.module_utils._text import to_text, to_bytes, to_native
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def request(url, user, passwd, timeout, data=None, method=None):
+ if data:
+ data = json.dumps(data)
+
+ # NOTE: fetch_url uses a password manager, which follows the
+ # standard request-then-challenge basic-auth semantics. However as
+ # JIRA allows some unauthorised operations it doesn't necessarily
+ # send the challenge, so the request occurs as the anonymous user,
+ # resulting in unexpected results. To work around this we manually
+ # inject the basic-auth header up-front to ensure that JIRA treats
+ # the requests as authorized for this user.
+ auth = to_text(base64.b64encode(to_bytes('{0}:{1}'.format(user, passwd), errors='surrogate_or_strict')))
+ response, info = fetch_url(module, url, data=data, method=method, timeout=timeout,
+ headers={'Content-Type': 'application/json',
+ 'Authorization': "Basic %s" % auth})
+
+ if info['status'] not in (200, 201, 204):
+ error = None
+ try:
+ error = json.loads(info['body'])
+ except Exception:
+ module.fail_json(msg=to_native(info['body']), exception=traceback.format_exc())
+ if error:
+ msg = []
+ for key in ('errorMessages', 'errors'):
+ if error.get(key):
+ msg.append(to_native(error[key]))
+ if msg:
+ module.fail_json(msg=', '.join(msg))
+ module.fail_json(msg=to_native(error))
+ # Fallback print body, if it cant be decoded
+ module.fail_json(msg=to_native(info['body']))
+
+ body = response.read()
+
+ if body:
+ return json.loads(to_text(body, errors='surrogate_or_strict'))
+ return {}
+
+
+def post(url, user, passwd, timeout, data):
+ return request(url, user, passwd, timeout, data=data, method='POST')
+
+
+def put(url, user, passwd, timeout, data):
+ return request(url, user, passwd, timeout, data=data, method='PUT')
+
+
+def get(url, user, passwd, timeout):
+ return request(url, user, passwd, timeout)
+
+
+def create(restbase, user, passwd, params):
+ createfields = {
+ 'project': {'key': params['project']},
+ 'summary': params['summary'],
+ 'issuetype': {'name': params['issuetype']}}
+
+ if params['description']:
+ createfields['description'] = params['description']
+
+ # Merge in any additional or overridden fields
+ if params['fields']:
+ createfields.update(params['fields'])
+
+ data = {'fields': createfields}
+
+ url = restbase + '/issue/'
+
+ return True, post(url, user, passwd, params['timeout'], data)
+
+
+def comment(restbase, user, passwd, params):
+ data = {
+ 'body': params['comment']
+ }
+ url = restbase + '/issue/' + params['issue'] + '/comment'
+
+ return True, post(url, user, passwd, params['timeout'], data)
+
+
+def edit(restbase, user, passwd, params):
+ data = {
+ 'fields': params['fields']
+ }
+ url = restbase + '/issue/' + params['issue']
+
+ return True, put(url, user, passwd, params['timeout'], data)
+
+
+def update(restbase, user, passwd, params):
+ data = {
+ "update": params['fields'],
+ }
+ url = restbase + '/issue/' + params['issue']
+
+ return True, put(url, user, passwd, params['timeout'], data)
+
+
+def fetch(restbase, user, passwd, params):
+ url = restbase + '/issue/' + params['issue']
+ return False, get(url, user, passwd, params['timeout'])
+
+
+def search(restbase, user, passwd, params):
+ url = restbase + '/search?jql=' + pathname2url(params['jql'])
+ if params['fields']:
+ fields = params['fields'].keys()
+ url = url + '&fields=' + '&fields='.join([pathname2url(f) for f in fields])
+ if params['maxresults']:
+ url = url + '&maxResults=' + str(params['maxresults'])
+ return False, get(url, user, passwd, params['timeout'])
+
+
+def transition(restbase, user, passwd, params):
+ # Find the transition id
+ turl = restbase + '/issue/' + params['issue'] + "/transitions"
+ tmeta = get(turl, user, passwd, params['timeout'])
+
+ target = params['status']
+ tid = None
+ for t in tmeta['transitions']:
+ if t['name'] == target:
+ tid = t['id']
+ break
+
+ if not tid:
+ raise ValueError("Failed find valid transition for '%s'" % target)
+
+ # Perform it
+ url = restbase + '/issue/' + params['issue'] + "/transitions"
+ data = {'transition': {"id": tid},
+ 'update': params['fields']}
+
+ return True, post(url, user, passwd, params['timeout'], data)
+
+
+def link(restbase, user, passwd, params):
+ data = {
+ 'type': {'name': params['linktype']},
+ 'inwardIssue': {'key': params['inwardissue']},
+ 'outwardIssue': {'key': params['outwardissue']},
+ }
+
+ url = restbase + '/issueLink/'
+
+ return True, post(url, user, passwd, params['timeout'], data)
+
+
+def main():
+
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ uri=dict(type='str', required=True),
+ operation=dict(type='str', choices=['create', 'comment', 'edit', 'update', 'fetch', 'transition', 'link', 'search'],
+ aliases=['command'], required=True),
+ username=dict(type='str', required=True),
+ password=dict(type='str', required=True, no_log=True),
+ project=dict(type='str', ),
+ summary=dict(type='str', ),
+ description=dict(type='str', ),
+ issuetype=dict(type='str', ),
+ issue=dict(type='str', aliases=['ticket']),
+ comment=dict(type='str', ),
+ status=dict(type='str', ),
+ assignee=dict(type='str', ),
+ fields=dict(default={}, type='dict'),
+ linktype=dict(type='str', ),
+ inwardissue=dict(type='str', ),
+ outwardissue=dict(type='str', ),
+ jql=dict(type='str', ),
+ maxresults=dict(type='int'),
+ timeout=dict(type='float', default=10),
+ validate_certs=dict(default=True, type='bool'),
+ ),
+ required_if=(
+ ('operation', 'create', ['project', 'issuetype', 'summary']),
+ ('operation', 'comment', ['issue', 'comment']),
+ ('operation', 'fetch', ['issue']),
+ ('operation', 'transition', ['issue', 'status']),
+ ('operation', 'link', ['linktype', 'inwardissue', 'outwardissue']),
+ ('operation', 'search', ['jql']),
+ ),
+ supports_check_mode=False
+ )
+
+ op = module.params['operation']
+
+ # Handle rest of parameters
+ uri = module.params['uri']
+ user = module.params['username']
+ passwd = module.params['password']
+ if module.params['assignee']:
+ module.params['fields']['assignee'] = {'name': module.params['assignee']}
+
+ if not uri.endswith('/'):
+ uri = uri + '/'
+ restbase = uri + 'rest/api/2'
+
+ # Dispatch
+ try:
+
+ # Lookup the corresponding method for this operation. This is
+ # safe as the AnsibleModule should remove any unknown operations.
+ thismod = sys.modules[__name__]
+ method = getattr(thismod, op)
+
+ changed, ret = method(restbase, user, passwd, module.params)
+
+ except Exception as e:
+ return module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=changed, meta=ret)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/nginx_status_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/nginx_status_facts.py
new file mode 100644
index 00000000..3a68f8da
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/nginx_status_facts.py
@@ -0,0 +1,160 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2016, René Moser <mail@renemoser.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: nginx_status_facts
+deprecated:
+ removed_in: 3.0.0 # was Ansible 2.13
+ why: Deprecated in favour of C(_info) module.
+ alternative: Use M(community.general.nginx_status_info) instead.
+short_description: Retrieve nginx status facts.
+description:
+ - Gathers facts from nginx from an URL having C(stub_status) enabled.
+author: "René Moser (@resmo)"
+options:
+ url:
+ type: str
+ description:
+ - URL of the nginx status.
+ required: true
+ timeout:
+ type: int
+ description:
+ - HTTP connection timeout in seconds.
+ required: false
+ default: 10
+
+notes:
+ - See http://nginx.org/en/docs/http/ngx_http_stub_status_module.html for more information.
+'''
+
+EXAMPLES = '''
+# Gather status facts from nginx on localhost
+- name: Get current http stats
+ community.general.nginx_status_facts:
+ url: http://localhost/nginx_status
+
+# Gather status facts from nginx on localhost with a custom timeout of 20 seconds
+- name: Get current http stats
+ community.general.nginx_status_facts:
+ url: http://localhost/nginx_status
+ timeout: 20
+'''
+
+RETURN = '''
+---
+nginx_status_facts.active_connections:
+ description: Active connections.
+ returned: success
+ type: int
+ sample: 2340
+nginx_status_facts.accepts:
+ description: The total number of accepted client connections.
+ returned: success
+ type: int
+ sample: 81769947
+nginx_status_facts.handled:
+ description: The total number of handled connections. Generally, the parameter value is the same as accepts unless some resource limits have been reached.
+ returned: success
+ type: int
+ sample: 81769947
+nginx_status_facts.requests:
+ description: The total number of client requests.
+ returned: success
+ type: int
+ sample: 144332345
+nginx_status_facts.reading:
+ description: The current number of connections where nginx is reading the request header.
+ returned: success
+ type: int
+ sample: 0
+nginx_status_facts.writing:
+ description: The current number of connections where nginx is writing the response back to the client.
+ returned: success
+ type: int
+ sample: 241
+nginx_status_facts.waiting:
+ description: The current number of idle client connections waiting for a request.
+ returned: success
+ type: int
+ sample: 2092
+nginx_status_facts.data:
+ description: HTTP response as is.
+ returned: success
+ type: str
+ sample: "Active connections: 2340 \nserver accepts handled requests\n 81769947 81769947 144332345 \nReading: 0 Writing: 241 Waiting: 2092 \n"
+'''
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils._text import to_text
+
+
+class NginxStatusFacts(object):
+
+ def __init__(self):
+ self.url = module.params.get('url')
+ self.timeout = module.params.get('timeout')
+
+ def run(self):
+ result = {
+ 'nginx_status_facts': {
+ 'active_connections': None,
+ 'accepts': None,
+ 'handled': None,
+ 'requests': None,
+ 'reading': None,
+ 'writing': None,
+ 'waiting': None,
+ 'data': None,
+ }
+ }
+ (response, info) = fetch_url(module=module, url=self.url, force=True, timeout=self.timeout)
+ if not response:
+ module.fail_json(msg="No valid or no response from url %s within %s seconds (timeout)" % (self.url, self.timeout))
+
+ data = to_text(response.read(), errors='surrogate_or_strict')
+ if not data:
+ return result
+
+ result['nginx_status_facts']['data'] = data
+ expr = r'Active connections: ([0-9]+) \nserver accepts handled requests\n ([0-9]+) ([0-9]+) ([0-9]+) \n' \
+ r'Reading: ([0-9]+) Writing: ([0-9]+) Waiting: ([0-9]+)'
+ match = re.match(expr, data, re.S)
+ if match:
+ result['nginx_status_facts']['active_connections'] = int(match.group(1))
+ result['nginx_status_facts']['accepts'] = int(match.group(2))
+ result['nginx_status_facts']['handled'] = int(match.group(3))
+ result['nginx_status_facts']['requests'] = int(match.group(4))
+ result['nginx_status_facts']['reading'] = int(match.group(5))
+ result['nginx_status_facts']['writing'] = int(match.group(6))
+ result['nginx_status_facts']['waiting'] = int(match.group(7))
+ return result
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ url=dict(required=True),
+ timeout=dict(type='int', default=10),
+ ),
+ supports_check_mode=True,
+ )
+
+ nginx_status_facts = NginxStatusFacts().run()
+ result = dict(changed=False, ansible_facts=nginx_status_facts)
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/nginx_status_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/nginx_status_info.py
new file mode 100644
index 00000000..a13a57a6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/nginx_status_info.py
@@ -0,0 +1,155 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2016, René Moser <mail@renemoser.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: nginx_status_info
+short_description: Retrieve information on nginx status.
+description:
+ - Gathers information from nginx from an URL having C(stub_status) enabled.
+author: "René Moser (@resmo)"
+options:
+ url:
+ type: str
+ description:
+ - URL of the nginx status.
+ required: true
+ timeout:
+ type: int
+ description:
+ - HTTP connection timeout in seconds.
+ required: false
+ default: 10
+
+notes:
+ - See U(http://nginx.org/en/docs/http/ngx_http_stub_status_module.html) for more information.
+'''
+
+EXAMPLES = r'''
+# Gather status info from nginx on localhost
+- name: Get current http stats
+ community.general.nginx_status_info:
+ url: http://localhost/nginx_status
+ register: result
+
+# Gather status info from nginx on localhost with a custom timeout of 20 seconds
+- name: Get current http stats
+ community.general.nginx_status_info:
+ url: http://localhost/nginx_status
+ timeout: 20
+ register: result
+'''
+
+RETURN = r'''
+---
+active_connections:
+ description: Active connections.
+ returned: success
+ type: int
+ sample: 2340
+accepts:
+ description: The total number of accepted client connections.
+ returned: success
+ type: int
+ sample: 81769947
+handled:
+ description: The total number of handled connections. Generally, the parameter value is the same as accepts unless some resource limits have been reached.
+ returned: success
+ type: int
+ sample: 81769947
+requests:
+ description: The total number of client requests.
+ returned: success
+ type: int
+ sample: 144332345
+reading:
+ description: The current number of connections where nginx is reading the request header.
+ returned: success
+ type: int
+ sample: 0
+writing:
+ description: The current number of connections where nginx is writing the response back to the client.
+ returned: success
+ type: int
+ sample: 241
+waiting:
+ description: The current number of idle client connections waiting for a request.
+ returned: success
+ type: int
+ sample: 2092
+data:
+ description: HTTP response as is.
+ returned: success
+ type: str
+ sample: "Active connections: 2340 \nserver accepts handled requests\n 81769947 81769947 144332345 \nReading: 0 Writing: 241 Waiting: 2092 \n"
+'''
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils._text import to_text
+
+
+class NginxStatusInfo(object):
+
+ def __init__(self):
+ self.url = module.params.get('url')
+ self.timeout = module.params.get('timeout')
+
+ def run(self):
+ result = {
+ 'active_connections': None,
+ 'accepts': None,
+ 'handled': None,
+ 'requests': None,
+ 'reading': None,
+ 'writing': None,
+ 'waiting': None,
+ 'data': None,
+ }
+ (response, info) = fetch_url(module=module, url=self.url, force=True, timeout=self.timeout)
+ if not response:
+ module.fail_json(msg="No valid or no response from url %s within %s seconds (timeout)" % (self.url, self.timeout))
+
+ data = to_text(response.read(), errors='surrogate_or_strict')
+ if not data:
+ return result
+
+ result['data'] = data
+ expr = r'Active connections: ([0-9]+) \nserver accepts handled requests\n ([0-9]+) ([0-9]+) ([0-9]+) \n' \
+ r'Reading: ([0-9]+) Writing: ([0-9]+) Waiting: ([0-9]+)'
+ match = re.match(expr, data, re.S)
+ if match:
+ result['active_connections'] = int(match.group(1))
+ result['accepts'] = int(match.group(2))
+ result['handled'] = int(match.group(3))
+ result['requests'] = int(match.group(4))
+ result['reading'] = int(match.group(5))
+ result['writing'] = int(match.group(6))
+ result['waiting'] = int(match.group(7))
+ return result
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ url=dict(type='str', required=True),
+ timeout=dict(type='int', default=10),
+ ),
+ supports_check_mode=True,
+ )
+
+ nginx_status_info = NginxStatusInfo().run()
+ module.exit_json(changed=False, **nginx_status_info)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/rundeck_acl_policy.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/rundeck_acl_policy.py
new file mode 100644
index 00000000..1caa159b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/rundeck_acl_policy.py
@@ -0,0 +1,255 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Loic Blot <loic.blot@unix-experience.fr>
+# Sponsored by Infopro Digital. http://www.infopro-digital.com/
+# Sponsored by E.T.A.I. http://www.etai.fr/
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rundeck_acl_policy
+
+short_description: Manage Rundeck ACL policies.
+description:
+ - Create, update and remove Rundeck ACL policies through HTTP API.
+author: "Loic Blot (@nerzhul)"
+options:
+ state:
+ type: str
+ description:
+ - Create or remove Rundeck project.
+ choices: ['present', 'absent']
+ default: 'present'
+ name:
+ type: str
+ description:
+ - Sets the project name.
+ required: True
+ url:
+ type: str
+ description:
+ - Sets the rundeck instance URL.
+ required: True
+ api_version:
+ type: int
+ description:
+ - Sets the API version used by module.
+ - API version must be at least 14.
+ default: 14
+ token:
+ type: str
+ description:
+ - Sets the token to authenticate against Rundeck API.
+ required: True
+ project:
+ type: str
+ description:
+ - Sets the project which receive the ACL policy.
+ - If unset, it's a system ACL policy.
+ policy:
+ type: str
+ description:
+ - Sets the ACL policy content.
+ - ACL policy content is a YAML object as described in http://rundeck.org/docs/man5/aclpolicy.html.
+ - It can be a YAML string or a pure Ansible inventory YAML object.
+ client_cert:
+ version_added: '0.2.0'
+ client_key:
+ version_added: '0.2.0'
+ force:
+ version_added: '0.2.0'
+ force_basic_auth:
+ version_added: '0.2.0'
+ http_agent:
+ version_added: '0.2.0'
+ url_password:
+ version_added: '0.2.0'
+ url_username:
+ version_added: '0.2.0'
+ use_proxy:
+ version_added: '0.2.0'
+ validate_certs:
+ version_added: '0.2.0'
+extends_documentation_fragment: url
+'''
+
+EXAMPLES = '''
+- name: Create or update a rundeck ACL policy in project Ansible
+ community.general.rundeck_acl_policy:
+ name: "Project_01"
+ api_version: 18
+ url: "https://rundeck.example.org"
+ token: "mytoken"
+ state: present
+ project: "Ansible"
+ policy:
+ description: "my policy"
+ context:
+ application: rundeck
+ for:
+ project:
+ - allow: read
+ by:
+ group: "build"
+
+- name: Remove a rundeck system policy
+ community.general.rundeck_acl_policy:
+ name: "Project_02"
+ url: "https://rundeck.example.org"
+ token: "mytoken"
+ state: absent
+'''
+
+RETURN = '''
+rundeck_response:
+ description: Rundeck response when a failure occurs.
+ returned: failed
+ type: str
+before:
+ description: Dictionary containing ACL policy informations before modification.
+ returned: success
+ type: dict
+after:
+ description: Dictionary containing ACL policy informations after modification.
+ returned: success
+ type: dict
+'''
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url, url_argument_spec
+from ansible.module_utils._text import to_text
+import json
+import re
+
+
+class RundeckACLManager:
+ def __init__(self, module):
+ self.module = module
+
+ def handle_http_code_if_needed(self, infos):
+ if infos["status"] == 403:
+ self.module.fail_json(msg="Token not allowed. Please ensure token is allowed or has the correct "
+ "permissions.", rundeck_response=infos["body"])
+ elif infos["status"] >= 500:
+ self.module.fail_json(msg="Fatal Rundeck API error.", rundeck_response=infos["body"])
+
+ def request_rundeck_api(self, query, data=None, method="GET"):
+ resp, info = fetch_url(self.module,
+ "%s/api/%d/%s" % (self.module.params["url"], self.module.params["api_version"], query),
+ data=json.dumps(data),
+ method=method,
+ headers={
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+ "X-Rundeck-Auth-Token": self.module.params["token"]
+ })
+
+ self.handle_http_code_if_needed(info)
+ if resp is not None:
+ resp = resp.read()
+ if resp != b"":
+ try:
+ json_resp = json.loads(to_text(resp, errors='surrogate_or_strict'))
+ return json_resp, info
+ except ValueError as e:
+ self.module.fail_json(msg="Rundeck response was not a valid JSON. Exception was: %s. "
+ "Object was: %s" % (str(e), resp))
+ return resp, info
+
+ def get_acl(self):
+ resp, info = self.request_rundeck_api("system/acl/%s.aclpolicy" % self.module.params["name"])
+ return resp
+
+ def create_or_update_acl(self):
+ facts = self.get_acl()
+ if facts is None:
+ # If in check mode don't create project, simulate a fake project creation
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, before={}, after=self.module.params["policy"])
+
+ _, info = self.request_rundeck_api("system/acl/%s.aclpolicy" % self.module.params["name"],
+ method="POST",
+ data={"contents": self.module.params["policy"]})
+
+ if info["status"] == 201:
+ self.module.exit_json(changed=True, before={}, after=self.get_acl())
+ elif info["status"] == 400:
+ self.module.fail_json(msg="Unable to validate acl %s. Please ensure it's a valid ACL" %
+ self.module.params["name"])
+ elif info["status"] == 409:
+ self.module.fail_json(msg="ACL %s already exists" % self.module.params["name"])
+ else:
+ self.module.fail_json(msg="Unhandled HTTP status %d, please report the bug" % info["status"],
+ before={}, after=self.get_acl())
+ else:
+ if facts["contents"] == self.module.params["policy"]:
+ self.module.exit_json(changed=False, before=facts, after=facts)
+
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, before=facts, after=facts)
+
+ _, info = self.request_rundeck_api("system/acl/%s.aclpolicy" % self.module.params["name"],
+ method="PUT",
+ data={"contents": self.module.params["policy"]})
+
+ if info["status"] == 200:
+ self.module.exit_json(changed=True, before=facts, after=self.get_acl())
+ elif info["status"] == 400:
+ self.module.fail_json(msg="Unable to validate acl %s. Please ensure it's a valid ACL" %
+ self.module.params["name"])
+ elif info["status"] == 404:
+ self.module.fail_json(msg="ACL %s doesn't exists. Cannot update." % self.module.params["name"])
+
+ def remove_acl(self):
+ facts = self.get_acl()
+ if facts is None:
+ self.module.exit_json(changed=False, before={}, after={})
+ else:
+ # If not in check mode, remove the project
+ if not self.module.check_mode:
+ self.request_rundeck_api("system/acl/%s.aclpolicy" % self.module.params["name"], method="DELETE")
+ self.module.exit_json(changed=True, before=facts, after={})
+
+
+def main():
+ # Also allow the user to set values for fetch_url
+ argument_spec = url_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ url=dict(required=True, type='str'),
+ api_version=dict(type='int', default=14),
+ token=dict(required=True, type='str', no_log=True),
+ policy=dict(type='str'),
+ project=dict(type='str'),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_if=[
+ ['state', 'present', ['policy']],
+ ],
+ supports_check_mode=True
+ )
+
+ if not bool(re.match("[a-zA-Z0-9,.+_-]+", module.params["name"])):
+ module.fail_json(msg="Name contains forbidden characters. The policy can contain the characters: a-zA-Z0-9,.+_-")
+
+ if module.params["api_version"] < 14:
+ module.fail_json(msg="API version should be at least 14")
+
+ rundeck = RundeckACLManager(module)
+ if module.params['state'] == 'present':
+ rundeck.create_or_update_acl()
+ elif module.params['state'] == 'absent':
+ rundeck.remove_acl()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/rundeck_project.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/rundeck_project.py
new file mode 100644
index 00000000..5c846482
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/rundeck_project.py
@@ -0,0 +1,207 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Ansible module to manage rundeck projects
+# (c) 2017, Loic Blot <loic.blot@unix-experience.fr>
+# Sponsored by Infopro Digital. http://www.infopro-digital.com/
+# Sponsored by E.T.A.I. http://www.etai.fr/
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rundeck_project
+
+short_description: Manage Rundeck projects.
+description:
+ - Create and remove Rundeck projects through HTTP API.
+author: "Loic Blot (@nerzhul)"
+options:
+ state:
+ type: str
+ description:
+ - Create or remove Rundeck project.
+ choices: ['present', 'absent']
+ default: 'present'
+ name:
+ type: str
+ description:
+ - Sets the project name.
+ required: True
+ url:
+ type: str
+ description:
+ - Sets the rundeck instance URL.
+ required: True
+ api_version:
+ type: int
+ description:
+ - Sets the API version used by module.
+ - API version must be at least 14.
+ default: 14
+ token:
+ type: str
+ description:
+ - Sets the token to authenticate against Rundeck API.
+ required: True
+ client_cert:
+ version_added: '0.2.0'
+ client_key:
+ version_added: '0.2.0'
+ force:
+ version_added: '0.2.0'
+ force_basic_auth:
+ version_added: '0.2.0'
+ http_agent:
+ version_added: '0.2.0'
+ url_password:
+ version_added: '0.2.0'
+ url_username:
+ version_added: '0.2.0'
+ use_proxy:
+ version_added: '0.2.0'
+ validate_certs:
+ version_added: '0.2.0'
+extends_documentation_fragment: url
+'''
+
+EXAMPLES = '''
+- name: Create a rundeck project
+ community.general.rundeck_project:
+ name: "Project_01"
+ api_version: 18
+ url: "https://rundeck.example.org"
+ token: "mytoken"
+ state: present
+
+- name: Remove a rundeck project
+ community.general.rundeck_project:
+ name: "Project_02"
+ url: "https://rundeck.example.org"
+ token: "mytoken"
+ state: absent
+'''
+
+RETURN = '''
+rundeck_response:
+ description: Rundeck response when a failure occurs
+ returned: failed
+ type: str
+before:
+ description: dictionary containing project information before modification
+ returned: success
+ type: dict
+after:
+ description: dictionary containing project information after modification
+ returned: success
+ type: dict
+'''
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import fetch_url, url_argument_spec
+import json
+
+
+class RundeckProjectManager(object):
+ def __init__(self, module):
+ self.module = module
+
+ def handle_http_code_if_needed(self, infos):
+ if infos["status"] == 403:
+ self.module.fail_json(msg="Token not allowed. Please ensure token is allowed or has the correct "
+ "permissions.", rundeck_response=infos["body"])
+ elif infos["status"] >= 500:
+ self.module.fail_json(msg="Fatal Rundeck API error.", rundeck_response=infos["body"])
+
+ def request_rundeck_api(self, query, data=None, method="GET"):
+ resp, info = fetch_url(self.module,
+ "%s/api/%d/%s" % (self.module.params["url"], self.module.params["api_version"], query),
+ data=json.dumps(data),
+ method=method,
+ headers={
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+ "X-Rundeck-Auth-Token": self.module.params["token"]
+ })
+
+ self.handle_http_code_if_needed(info)
+ if resp is not None:
+ resp = resp.read()
+ if resp != "":
+ try:
+ json_resp = json.loads(resp)
+ return json_resp, info
+ except ValueError as e:
+ self.module.fail_json(msg="Rundeck response was not a valid JSON. Exception was: %s. "
+ "Object was: %s" % (to_native(e), resp))
+ return resp, info
+
+ def get_project_facts(self):
+ resp, info = self.request_rundeck_api("project/%s" % self.module.params["name"])
+ return resp
+
+ def create_or_update_project(self):
+ facts = self.get_project_facts()
+ if facts is None:
+ # If in check mode don't create project, simulate a fake project creation
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, before={}, after={"name": self.module.params["name"]})
+
+ resp, info = self.request_rundeck_api("projects", method="POST", data={
+ "name": self.module.params["name"],
+ "config": {}
+ })
+
+ if info["status"] == 201:
+ self.module.exit_json(changed=True, before={}, after=self.get_project_facts())
+ else:
+ self.module.fail_json(msg="Unhandled HTTP status %d, please report the bug" % info["status"],
+ before={}, after=self.get_project_facts())
+ else:
+ self.module.exit_json(changed=False, before=facts, after=facts)
+
+ def remove_project(self):
+ facts = self.get_project_facts()
+ if facts is None:
+ self.module.exit_json(changed=False, before={}, after={})
+ else:
+ # If not in check mode, remove the project
+ if not self.module.check_mode:
+ self.request_rundeck_api("project/%s" % self.module.params["name"], method="DELETE")
+ self.module.exit_json(changed=True, before=facts, after={})
+
+
+def main():
+ # Also allow the user to set values for fetch_url
+ argument_spec = url_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ url=dict(required=True, type='str'),
+ api_version=dict(type='int', default=14),
+ token=dict(required=True, type='str', no_log=True),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ if module.params["api_version"] < 14:
+ module.fail_json(msg="API version should be at least 14")
+
+ rundeck = RundeckProjectManager(module)
+ if module.params['state'] == 'present':
+ rundeck.create_or_update_project()
+ elif module.params['state'] == 'absent':
+ rundeck.remove_project()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group.py
new file mode 100644
index 00000000..b4aca155
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group.py
@@ -0,0 +1,225 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_aaa_group
+
+author:
+ - Johannes Brunswicker (@MatrixCrawler)
+
+short_description: Create, update or destroy an aaa group object in Sophos UTM.
+
+description:
+ - Create, update or destroy an aaa group object in Sophos UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+
+options:
+ name:
+ description:
+ - The name of the object. Will be used to identify the entry.
+ type: str
+ required: true
+ adirectory_groups:
+ description:
+ - List of adirectory group strings.
+ type: list
+ elements: str
+ adirectory_groups_sids:
+ description:
+ - Dictionary of group sids.
+ type: dict
+ backend_match:
+ description:
+ - The backend for the group.
+ type: str
+ choices:
+ - none
+ - adirectory
+ - edirectory
+ - radius
+ - tacacs
+ - ldap
+ default: none
+ comment:
+ description:
+ - Comment that describes the AAA group.
+ type: str
+ default: ''
+ dynamic:
+ description:
+ - Group type. Is static if none is selected.
+ type: str
+ default: none
+ choices:
+ - none
+ - ipsec_dn
+ - directory_groups
+ edirectory_groups:
+ description:
+ - List of edirectory group strings.
+ type: list
+ elements: str
+ ipsec_dn:
+ description:
+ - The ipsec dn string.
+ type: str
+ ldap_attribute:
+ description:
+ - The ldap attribute to check against.
+ type: str
+ ldap_attribute_value:
+ description:
+ - The ldap attribute value to check against.
+ type: str
+ members:
+ description:
+ - A list of user ref names (aaa/user).
+ type: list
+ elements: str
+ default: []
+ network:
+ description:
+ - The network reference name. The objects contains the known ip addresses for the authentication object (network/aaa).
+ type: str
+ default: ""
+ radius_groups:
+ description:
+ - A list of radius group strings.
+ type: list
+ elements: str
+ default: []
+ tacacs_groups:
+ description:
+ - A list of tacacs group strings.
+ type: list
+ elements: str
+ default: []
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Create UTM aaa_group
+ community.general.utm_aaa_group:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestAAAGroupEntry
+ backend_match: ldap
+ dynamic: directory_groups
+ ldap_attributes: memberof
+ ldap_attributes_value: "cn=groupname,ou=Groups,dc=mydomain,dc=com"
+ network: REF_OBJECT_STRING
+ state: present
+
+- name: Remove UTM aaa_group
+ community.general.utm_aaa_group:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestAAAGroupEntry
+ state: absent
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created.
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object.
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked.
+ type: bool
+ _type:
+ description: The type of the object.
+ type: str
+ name:
+ description: The name of the object.
+ type: str
+ adirectory_groups:
+ description: List of Active Directory Groups.
+ type: str
+ adirectory_groups_sids:
+ description: List of Active Directory Groups SIDS.
+ type: list
+ backend_match:
+ description: The backend to use.
+ type: str
+ comment:
+ description: The comment string.
+ type: str
+ dynamic:
+ description: Whether the group match is ipsec_dn or directory_group.
+ type: str
+ edirectory_groups:
+ description: List of eDirectory Groups.
+ type: str
+ ipsec_dn:
+ description: ipsec_dn identifier to match.
+ type: str
+ ldap_attribute:
+ description: The LDAP Attribute to match against.
+ type: str
+ ldap_attribute_value:
+ description: The LDAP Attribute Value to match against.
+ type: str
+ members:
+ description: List of member identifiers of the group.
+ type: list
+ network:
+ description: The identifier of the network (network/aaa).
+ type: str
+ radius_group:
+ description: The radius group identifier.
+ type: str
+ tacacs_group:
+ description: The tacacs group identifier.
+ type: str
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "aaa/group"
+ key_to_check_for_changes = ["comment", "adirectory_groups", "adirectory_groups_sids", "backend_match", "dynamic",
+ "edirectory_groups", "ipsec_dn", "ldap_attribute", "ldap_attribute_value", "members",
+ "network", "radius_groups", "tacacs_groups"]
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ adirectory_groups=dict(type='list', elements='str', required=False, default=[]),
+ adirectory_groups_sids=dict(type='dict', required=False, default={}),
+ backend_match=dict(type='str', required=False, default="none",
+ choices=["none", "adirectory", "edirectory", "radius", "tacacs", "ldap"]),
+ comment=dict(type='str', required=False, default=""),
+ dynamic=dict(type='str', required=False, default="none", choices=["none", "ipsec_dn", "directory_groups"]),
+ edirectory_groups=dict(type='list', elements='str', required=False, default=[]),
+ ipsec_dn=dict(type='str', required=False, default=""),
+ ldap_attribute=dict(type='str', required=False, default=""),
+ ldap_attribute_value=dict(type='str', required=False, default=""),
+ members=dict(type='list', elements='str', required=False, default=[]),
+ network=dict(type='str', required=False, default=""),
+ radius_groups=dict(type='list', elements='str', required=False, default=[]),
+ tacacs_groups=dict(type='list', elements='str', required=False, default=[]),
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group_info.py
new file mode 100644
index 00000000..6d230c1a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group_info.py
@@ -0,0 +1,122 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_aaa_group_info
+
+author:
+ - Johannes Brunswicker (@MatrixCrawler)
+
+short_description: get info for reverse_proxy frontend entry in Sophos UTM
+
+description:
+ - get info for a reverse_proxy frontend entry in SOPHOS UTM.
+
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Remove UTM aaa_group
+ community.general.utm_aaa_group_info:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestAAAGroupEntry
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ adirectory_groups:
+ description: List of Active Directory Groups
+ type: str
+ adirectory_groups_sids:
+ description: List of Active Directory Groups SIDS
+ type: list
+ backend_match:
+ description: The backend to use
+ type: str
+ comment:
+ description: The comment string
+ type: str
+ dynamic:
+ description: Whether the group match is ipsec_dn or directory_group
+ type: str
+ edirectory_groups:
+ description: List of eDirectory Groups
+ type: str
+ ipsec_dn:
+ description: ipsec_dn identifier to match
+ type: str
+ ldap_attribute:
+ description: The LDAP Attribute to match against
+ type: str
+ ldap_attribute_value:
+ description: The LDAP Attribute Value to match against
+ type: str
+ members:
+ description: List of member identifiers of the group
+ type: list
+ network:
+ description: The identifier of the network (network/aaa)
+ type: str
+ radius_group:
+ description: The radius group identifier
+ type: str
+ tacacs_group:
+ description: The tacacs group identifier
+ type: str
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "aaa/group"
+ key_to_check_for_changes = []
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True)
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert.py
new file mode 100644
index 00000000..e940f416
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert.py
@@ -0,0 +1,160 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Stephan Schwarz <stearz@gmx.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_ca_host_key_cert
+
+author:
+ - Stephan Schwarz (@stearz)
+
+short_description: create, update or destroy ca host_key_cert entry in Sophos UTM
+
+description:
+ - Create, update or destroy a ca host_key_cert entry in SOPHOS UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+
+options:
+ name:
+ description:
+ - The name of the object. Will be used to identify the entry.
+ required: true
+ type: str
+ ca:
+ description:
+ - A reference to an existing utm_ca_signing_ca or utm_ca_verification_ca object.
+ required: true
+ type: str
+ meta:
+ description:
+ - A reference to an existing utm_ca_meta_x509 object.
+ required: true
+ type: str
+ certificate:
+ description:
+ - The certificate in PEM format.
+ required: true
+ type: str
+ comment:
+ description:
+ - Optional comment string.
+ type: str
+ encrypted:
+ description:
+ - Optionally enable encryption.
+ default: False
+ type: bool
+ key:
+ description:
+ - Optional private key in PEM format.
+ type: str
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Create a ca_host_key_cert entry
+ community.general.utm_ca_host_key_cert:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestHostKeyCertEntry
+ ca: REF_ca/signing_ca_OBJECT_STRING
+ meta: REF_ca/meta_x509_OBJECT_STRING
+ certificate: |
+ --- BEGIN CERTIFICATE ---
+ . . .
+ . . .
+ . . .
+ --- END CERTIFICATE ---
+ state: present
+
+- name: Remove a ca_host_key_cert entry
+ community.general.utm_ca_host_key_cert:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestHostKeyCertEntry
+ state: absent
+
+- name: Read a ca_host_key_cert entry
+ community.general.utm_ca_host_key_cert:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestHostKeyCertEntry
+ state: info
+
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ ca:
+ description: A reference to an existing utm_ca_signing_ca or utm_ca_verification_ca object.
+ type: str
+ meta:
+ description: A reference to an existing utm_ca_meta_x509 object.
+ type: str
+ certificate:
+ description: The certificate in PEM format
+ type: str
+ comment:
+ description: Comment string (may be empty string)
+ type: str
+ encrypted:
+ description: If encryption is enabled
+ type: bool
+ key:
+ description: Private key in PEM format (may be empty string)
+ type: str
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "ca/host_key_cert"
+ key_to_check_for_changes = ["ca", "certificate", "comment", "encrypted", "key", "meta"]
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ ca=dict(type='str', required=True),
+ meta=dict(type='str', required=True),
+ certificate=dict(type='str', required=True),
+ comment=dict(type='str', required=False),
+ encrypted=dict(type='bool', required=False, default=False),
+ key=dict(type='str', required=False, no_log=True),
+ )
+ )
+ try:
+ # This is needed because the bool value only accepts int values in the backend
+ UTM(module, endpoint, key_to_check_for_changes).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py
new file mode 100644
index 00000000..ad315df9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py
@@ -0,0 +1,101 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Stephan Schwarz <stearz@gmx.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_ca_host_key_cert_info
+
+author:
+ - Stephan Schwarz (@stearz)
+
+short_description: Get info for a ca host_key_cert entry in Sophos UTM
+
+description:
+ - Get info for a ca host_key_cert entry in SOPHOS UTM.
+
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Get info for a ca host_key_cert entry
+ community.general.utm_ca_host_key_cert_info:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestHostKeyCertEntry
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ ca:
+ description: A reference to an existing utm_ca_signing_ca or utm_ca_verification_ca object.
+ type: str
+ meta:
+ description: A reference to an existing utm_ca_meta_x509 object.
+ type: str
+ certificate:
+ description: The certificate in PEM format
+ type: str
+ comment:
+ description: Comment string (may be empty string)
+ type: str
+ encrypted:
+ description: If encryption is enabled
+ type: bool
+ key:
+ description: Private key in PEM format (may be empty string)
+ type: str
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "ca/host_key_cert"
+ key_to_check_for_changes = []
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True)
+ )
+ )
+ try:
+ # This is needed because the bool value only accepts int values in the backend
+ UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_dns_host.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_dns_host.py
new file mode 100644
index 00000000..1f080abf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_dns_host.py
@@ -0,0 +1,157 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_dns_host
+
+author:
+ - Johannes Brunswicker (@MatrixCrawler)
+
+short_description: create, update or destroy dns entry in Sophos UTM
+
+description:
+ - Create, update or destroy a dns entry in SOPHOS UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+ address:
+ type: str
+ description:
+ - The IPV4 Address of the entry. Can be left empty for automatic resolving.
+ default: 0.0.0.0
+ address6:
+ type: str
+ description:
+ - The IPV6 Address of the entry. Can be left empty for automatic resolving.
+ default: "::"
+ comment:
+ type: str
+ description:
+ - An optional comment to add to the dns host object
+ hostname:
+ type: str
+ description:
+ - The hostname for the dns host object
+ interface:
+ type: str
+ description:
+ - The reference name of the interface to use. If not provided the default interface will be used
+ resolved:
+ description:
+ - whether the hostname's ipv4 address is already resolved or not
+ default: False
+ type: bool
+ resolved6:
+ description:
+ - whether the hostname's ipv6 address is already resolved or not
+ default: False
+ type: bool
+ timeout:
+ type: int
+ description:
+ - the timeout for the utm to resolve the ip address for the hostname again
+ default: 0
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Create UTM dns host entry
+ community.general.utm_dns_host:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestDNSEntry
+ hostname: testentry.some.tld
+ state: present
+
+- name: Remove UTM dns host entry
+ community.general.utm_dns_host:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestDNSEntry
+ state: absent
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ name:
+ description: The name of the object
+ type: str
+ address:
+ description: The ipv4 address of the object
+ type: str
+ address6:
+ description: The ipv6 address of the object
+ type: str
+ comment:
+ description: The comment string
+ type: str
+ hostname:
+ description: The hostname of the object
+ type: str
+ interface:
+ description: The reference name of the interface the object is associated with
+ type: str
+ resolved:
+ description: Whether the ipv4 address is resolved or not
+ type: bool
+ resolved6:
+ description: Whether the ipv6 address is resolved or not
+ type: bool
+ timeout:
+ description: The timeout until a new resolving will be attempted
+ type: int
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "network/dns_host"
+ key_to_check_for_changes = ["comment", "hostname", "interface"]
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ address=dict(type='str', required=False, default='0.0.0.0'),
+ address6=dict(type='str', required=False, default='::'),
+ comment=dict(type='str', required=False, default=""),
+ hostname=dict(type='str', required=False),
+ interface=dict(type='str', required=False, default=""),
+ resolved=dict(type='bool', required=False, default=False),
+ resolved6=dict(type='bool', required=False, default=False),
+ timeout=dict(type='int', required=False, default=0),
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address.py
new file mode 100644
index 00000000..ecf08871
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address.py
@@ -0,0 +1,134 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Juergen Wiebe <wiebe@e-spirit.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_network_interface_address
+
+author:
+ - Juergen Wiebe (@steamx)
+
+short_description: Create, update or destroy network/interface_address object
+
+description:
+ - Create, update or destroy a network/interface_address object in SOPHOS UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+ address:
+ type: str
+ description:
+ - The ip4 address of the network/interface_address object.
+ required: true
+ address6:
+ type: str
+ description:
+ - The ip6 address of the network/interface_address object.
+ required: false
+ comment:
+ type: str
+ description:
+ - An optional comment to add to the object
+ resolved:
+ type: bool
+ description:
+ - Whether or not the object is resolved
+ resolved6:
+ type: bool
+ description:
+ - Whether or not the object is resolved
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Create a network interface address
+ utm_proxy_backend:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestNetworkInterfaceAddress
+ address: 0.0.0.0
+ state: present
+
+- name: Remove a network interface address
+ network_interface_address:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestNetworkInterfaceAddress
+ address: 0.0.0.0
+ state: absent
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ address:
+ description: The ip4 address of the network/interface_address object
+ type: str
+ address6:
+ description: The ip6 address of the network/interface_address object
+ type: str
+ comment:
+ description: The comment string
+ type: str
+ resolved:
+ description: Whether or not the object is resolved
+ type: bool
+ resolved6:
+ description: Whether or not the object is resolved
+ type: bool
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "network/interface_address"
+ key_to_check_for_changes = ["comment", "address"]
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ address=dict(type='str', required=True),
+ comment=dict(type='str', required=False, default=""),
+ address6=dict(type='str', required=False),
+ resolved=dict(type='bool', required=False),
+ resolved6=dict(type='bool', required=False),
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py
new file mode 100644
index 00000000..c1d0f7d8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py
@@ -0,0 +1,96 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Juergen Wiebe <wiebe@e-spirit.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_network_interface_address_info
+
+author:
+ - Juergen Wiebe (@steamx)
+
+short_description: Get info for a network/interface_address object
+
+description:
+ - Get info for a network/interface_address object in SOPHOS UTM.
+
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Get network interface address info
+ utm_proxy_interface_address_info:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestNetworkInterfaceAddress
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ address:
+ description: The ip4 address of the network/interface_address object
+ type: str
+ address6:
+ description: The ip6 address of the network/interface_address object
+ type: str
+ comment:
+ description: The comment string
+ type: str
+ resolved:
+ description: Whether or not the object is resolved
+ type: bool
+ resolved6:
+ description: Whether or not the object is resolved
+ type: bool
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "network/interface_address"
+ key_to_check_for_changes = []
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True)
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_auth_profile.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_auth_profile.py
new file mode 100644
index 00000000..caa0085c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_auth_profile.py
@@ -0,0 +1,362 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Stephan Schwarz <stearz@gmx.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_proxy_auth_profile
+
+author:
+ - Stephan Schwarz (@stearz)
+
+short_description: create, update or destroy reverse_proxy auth_profile entry in Sophos UTM
+
+description:
+ - Create, update or destroy a reverse_proxy auth_profile entry in SOPHOS UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+ aaa:
+ type: list
+ elements: str
+ description:
+ - List of references to utm_aaa objects (allowed users or groups)
+ required: true
+ basic_prompt:
+ type: str
+ description:
+ - The message in the basic authentication prompt
+ required: true
+ backend_mode:
+ type: str
+ description:
+ - Specifies if the backend server needs authentication ([Basic|None])
+ default: None
+ choices:
+ - Basic
+ - None
+ backend_strip_basic_auth:
+ description:
+ - Should the login data be stripped when proxying the request to the backend host
+ type: bool
+ default: True
+ choices:
+ - True
+ - False
+ backend_user_prefix:
+ type: str
+ description:
+ - Prefix string to prepend to the username for backend authentication
+ default: ""
+ backend_user_suffix:
+ type: str
+ description:
+ - Suffix string to append to the username for backend authentication
+ default: ""
+ comment:
+ type: str
+ description:
+ - Optional comment string
+ default: ""
+ frontend_cookie:
+ type: str
+ description:
+ - Frontend cookie name
+ frontend_cookie_secret:
+ type: str
+ description:
+ - Frontend cookie secret
+ frontend_form:
+ type: str
+ description:
+ - Frontend authentication form name
+ frontend_form_template:
+ type: str
+ description:
+ - Frontend authentication form template
+ default: ""
+ frontend_login:
+ type: str
+ description:
+ - Frontend login name
+ frontend_logout:
+ type: str
+ description:
+ - Frontend logout name
+ frontend_mode:
+ type: str
+ description:
+ - Frontend authentication mode (Form|Basic)
+ default: Basic
+ choices:
+ - Basic
+ - Form
+ frontend_realm:
+ type: str
+ description:
+ - Frontend authentication realm
+ frontend_session_allow_persistency:
+ description:
+ - Allow session persistency
+ type: bool
+ default: False
+ choices:
+ - True
+ - False
+ frontend_session_lifetime:
+ type: int
+ description:
+ - session lifetime
+ required: true
+ frontend_session_lifetime_limited:
+ description:
+ - Specifies if limitation of session lifetime is active
+ type: bool
+ default: True
+ choices:
+ - True
+ - False
+ frontend_session_lifetime_scope:
+ type: str
+ description:
+ - scope for frontend_session_lifetime (days|hours|minutes)
+ default: hours
+ choices:
+ - days
+ - hours
+ - minutes
+ frontend_session_timeout:
+ type: int
+ description:
+ - session timeout
+ required: true
+ frontend_session_timeout_enabled:
+ description:
+ - Specifies if session timeout is active
+ type: bool
+ default: True
+ choices:
+ - True
+ - False
+ frontend_session_timeout_scope:
+ type: str
+ description:
+ - scope for frontend_session_timeout (days|hours|minutes)
+ default: minutes
+ choices:
+ - days
+ - hours
+ - minutes
+ logout_delegation_urls:
+ type: list
+ elements: str
+ description:
+ - List of logout URLs that logouts are delegated to
+ default: []
+ logout_mode:
+ type: str
+ description:
+ - Mode of logout (None|Delegation)
+ default: None
+ choices:
+ - None
+ - Delegation
+ redirect_to_requested_url:
+ description:
+ - Should a redirect to the requested URL be made
+ type: bool
+ default: False
+ choices:
+ - True
+ - False
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Create UTM proxy_auth_profile
+ community.general.utm_proxy_auth_profile:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestAuthProfileEntry
+ aaa: [REF_OBJECT_STRING,REF_ANOTHEROBJECT_STRING]
+ basic_prompt: "Authentication required: Please login"
+ frontend_session_lifetime: 1
+ frontend_session_timeout: 1
+ state: present
+
+- name: Remove UTM proxy_auth_profile
+ community.general.utm_proxy_auth_profile:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestAuthProfileEntry
+ state: absent
+
+- name: Read UTM proxy_auth_profile
+ community.general.utm_proxy_auth_profile:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestAuthProfileEntry
+ state: info
+
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ aaa:
+ description: List of references to utm_aaa objects (allowed users or groups)
+ type: list
+ basic_prompt:
+ description: The message in the basic authentication prompt
+ type: str
+ backend_mode:
+ description: Specifies if the backend server needs authentication ([Basic|None])
+ type: str
+ backend_strip_basic_auth:
+ description: Should the login data be stripped when proxying the request to the backend host
+ type: bool
+ backend_user_prefix:
+ description: Prefix string to prepend to the username for backend authentication
+ type: str
+ backend_user_suffix:
+ description: Suffix string to append to the username for backend authentication
+ type: str
+ comment:
+ description: Optional comment string
+ type: str
+ frontend_cookie:
+ description: Frontend cookie name
+ type: str
+ frontend_form:
+ description: Frontend authentication form name
+ type: str
+ frontend_form_template:
+ description: Frontend authentication form template
+ type: str
+ frontend_login:
+ description: Frontend login name
+ type: str
+ frontend_logout:
+ description: Frontend logout name
+ type: str
+ frontend_mode:
+ description: Frontend authentication mode (Form|Basic)
+ type: str
+ frontend_realm:
+ description: Frontend authentication realm
+ type: str
+ frontend_session_allow_persistency:
+ description: Allow session persistency
+ type: bool
+ frontend_session_lifetime:
+ description: session lifetime
+ type: int
+ frontend_session_lifetime_limited:
+ description: Specifies if limitation of session lifetime is active
+ type: bool
+ frontend_session_lifetime_scope:
+ description: scope for frontend_session_lifetime (days|hours|minutes)
+ type: str
+ frontend_session_timeout:
+ description: session timeout
+ type: int
+ frontend_session_timeout_enabled:
+ description: Specifies if session timeout is active
+ type: bool
+ frontend_session_timeout_scope:
+ description: scope for frontend_session_timeout (days|hours|minutes)
+ type: str
+ logout_delegation_urls:
+ description: List of logout URLs that logouts are delegated to
+ type: list
+ logout_mode:
+ description: Mode of logout (None|Delegation)
+ type: str
+ redirect_to_requested_url:
+ description: Should a redirect to the requested URL be made
+ type: bool
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "reverse_proxy/auth_profile"
+ key_to_check_for_changes = ["aaa", "basic_prompt", "backend_mode", "backend_strip_basic_auth",
+ "backend_user_prefix", "backend_user_suffix", "comment", "frontend_cookie",
+ "frontend_cookie_secret", "frontend_form", "frontend_form_template",
+ "frontend_login", "frontend_logout", "frontend_mode", "frontend_realm",
+ "frontend_session_allow_persistency", "frontend_session_lifetime",
+ "frontend_session_lifetime_limited", "frontend_session_lifetime_scope",
+ "frontend_session_timeout", "frontend_session_timeout_enabled",
+ "frontend_session_timeout_scope", "logout_delegation_urls", "logout_mode",
+ "redirect_to_requested_url"]
+
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ aaa=dict(type='list', elements='str', required=True),
+ basic_prompt=dict(type='str', required=True),
+ backend_mode=dict(type='str', required=False, default="None", choices=['Basic', 'None']),
+ backend_strip_basic_auth=dict(type='bool', required=False, default=True, choices=[True, False]),
+ backend_user_prefix=dict(type='str', required=False, default=""),
+ backend_user_suffix=dict(type='str', required=False, default=""),
+ comment=dict(type='str', required=False, default=""),
+ frontend_cookie=dict(type='str', required=False),
+ frontend_cookie_secret=dict(type='str', required=False, no_log=True),
+ frontend_form=dict(type='str', required=False),
+ frontend_form_template=dict(type='str', required=False, default=""),
+ frontend_login=dict(type='str', required=False),
+ frontend_logout=dict(type='str', required=False),
+ frontend_mode=dict(type='str', required=False, default="Basic", choices=['Basic', 'Form']),
+ frontend_realm=dict(type='str', required=False),
+ frontend_session_allow_persistency=dict(type='bool', required=False, default=False, choices=[True, False]),
+ frontend_session_lifetime=dict(type='int', required=True),
+ frontend_session_lifetime_limited=dict(type='bool', required=False, default=True, choices=[True, False]),
+ frontend_session_lifetime_scope=dict(type='str', required=False, default="hours", choices=['days', 'hours', 'minutes']),
+ frontend_session_timeout=dict(type='int', required=True),
+ frontend_session_timeout_enabled=dict(type='bool', required=False, default=True, choices=[True, False]),
+ frontend_session_timeout_scope=dict(type='str', required=False, default="minutes", choices=['days', 'hours', 'minutes']),
+ logout_delegation_urls=dict(type='list', elements='str', required=False, default=[]),
+ logout_mode=dict(type='str', required=False, default="None", choices=['None', 'Delegation']),
+ redirect_to_requested_url=dict(type='bool', required=False, default=False, choices=[True, False])
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_exception.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_exception.py
new file mode 100644
index 00000000..ed241af1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_exception.py
@@ -0,0 +1,241 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Sebastian Schenzel <sebastian.schenzel@mailbox.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_proxy_exception
+
+author:
+ - Sebastian Schenzel (@RickS-C137)
+
+short_description: Create, update or destroy reverse_proxy exception entry in Sophos UTM
+
+description:
+ - Create, update or destroy a reverse_proxy exception entry in SOPHOS UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+
+options:
+ name:
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: True
+ type: str
+ op:
+ description:
+ - The operand to be used with the entries of the path parameter
+ default: 'AND'
+ choices:
+ - 'AND'
+ - 'OR'
+ required: False
+ type: str
+ path:
+ description:
+ - The paths the exception in the reverse proxy is defined for
+ type: list
+ elements: str
+ default: []
+ required: False
+ skip_custom_threats_filters:
+ description:
+ - A list of threats to be skipped
+ type: list
+ elements: str
+ default: []
+ required: False
+ skip_threats_filter_categories:
+ description:
+ - Define which categories of threats are skipped
+ type: list
+ elements: str
+ default: []
+ required: False
+ skipav:
+ description:
+ - Skip the Antivirus Scanning
+ default: False
+ type: bool
+ required: False
+ skipbadclients:
+ description:
+ - Block clients with bad reputation
+ default: False
+ type: bool
+ required: False
+ skipcookie:
+ description:
+ - Skip the Cookie Signing check
+ default: False
+ type: bool
+ required: False
+ skipform:
+ description:
+ - Enable form hardening
+ default: False
+ type: bool
+ required: False
+ skipform_missingtoken:
+ description:
+ - Enable form hardening with missing tokens
+ default: False
+ type: bool
+ required: False
+ skiphtmlrewrite:
+ description:
+ - Protection against SQL
+ default: False
+ type: bool
+ required: False
+ skiptft:
+ description:
+ - Enable true file type control
+ default: False
+ type: bool
+ required: False
+ skipurl:
+ description:
+ - Enable static URL hardening
+ default: False
+ type: bool
+ required: False
+ source:
+ description:
+ - Define which categories of threats are skipped
+ type: list
+ elements: str
+ default: []
+ required: False
+ status:
+ description:
+ - Status of the exception rule set
+ default: True
+ type: bool
+ required: False
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Create UTM proxy_exception
+ community.general.utm_proxy_exception:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestExceptionEntry
+ backend: REF_OBJECT_STRING
+ state: present
+
+- name: Remove UTM proxy_exception
+ community.general.utm_proxy_exception:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestExceptionEntry
+ state: absent
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ comment:
+ description: The optional comment string
+ type: str
+ op:
+ description: The operand to be used with the entries of the path parameter
+ type: str
+ path:
+ description: The paths the exception in the reverse proxy is defined for
+ type: list
+ skip_custom_threats_filters:
+ description: A list of threats to be skipped
+ type: list
+ skip_threats_filter_categories:
+ description: Define which categories of threats are skipped
+ type: list
+ skipav:
+ description: Skip the Antivirus Scanning
+ type: bool
+ skipbadclients:
+ description: Block clients with bad reputation
+ type: bool
+ skipcookie:
+ description: Skip the Cookie Signing check
+ type: bool
+ skipform:
+ description: Enable form hardening
+ type: bool
+ skipform_missingtoken:
+ description: Enable form hardening with missing tokens
+ type: bool
+ skiphtmlrewrite:
+ description: Protection against SQL
+ type: bool
+ skiptft:
+ description: Enable true file type control
+ type: bool
+ skipurl:
+ description: Enable static URL hardening
+ type: bool
+ source:
+ description: Define which categories of threats are skipped
+ type: list
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "reverse_proxy/exception"
+ key_to_check_for_changes = ["op", "path", "skip_custom_threats_filters", "skip_threats_filter_categories", "skipav",
+ "comment", "skipbadclients", "skipcookie", "skipform", "status", "skipform_missingtoken",
+ "skiphtmlrewrite", "skiptft", "skipurl", "source"]
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ op=dict(type='str', required=False, default='AND', choices=['AND', 'OR']),
+ path=dict(type='list', elements='str', required=False, default=[]),
+ skip_custom_threats_filters=dict(type='list', elements='str', required=False, default=[]),
+ skip_threats_filter_categories=dict(type='list', elements='str', required=False, default=[]),
+ skipav=dict(type='bool', required=False, default=False),
+ skipbadclients=dict(type='bool', required=False, default=False),
+ skipcookie=dict(type='bool', required=False, default=False),
+ skipform=dict(type='bool', required=False, default=False),
+ skipform_missingtoken=dict(type='bool', required=False, default=False),
+ skiphtmlrewrite=dict(type='bool', required=False, default=False),
+ skiptft=dict(type='bool', required=False, default=False),
+ skipurl=dict(type='bool', required=False, default=False),
+ source=dict(type='list', elements='str', required=False, default=[]),
+ status=dict(type='bool', required=False, default=True),
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend.py
new file mode 100644
index 00000000..8dba3640
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend.py
@@ -0,0 +1,278 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_proxy_frontend
+
+author:
+ - Johannes Brunswicker (@MatrixCrawler)
+
+short_description: create, update or destroy reverse_proxy frontend entry in Sophos UTM
+
+description:
+ - Create, update or destroy a reverse_proxy frontend entry in Sophos UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+ add_content_type_header :
+ description:
+ - Whether to add the content type header or not
+ type: bool
+ default: False
+ address:
+ type: str
+ description:
+ - The reference name of the network/interface_address object.
+ default: REF_DefaultInternalAddress
+ allowed_networks:
+ type: list
+ elements: str
+ description:
+ - A list of reference names for the allowed networks.
+ default: ['REF_NetworkAny']
+ certificate:
+ type: str
+ description:
+ - The reference name of the ca/host_key_cert object.
+ default: ""
+ comment:
+ type: str
+ description:
+ - An optional comment to add to the object
+ default: ""
+ disable_compression:
+ description:
+ - Whether to enable the compression
+ type: bool
+ default: False
+ domain:
+ type: list
+ elements: str
+ description:
+ - A list of domain names for the frontend object
+ exceptions:
+ type: list
+ elements: str
+ description:
+ - A list of exception ref names (reverse_proxy/exception)
+ default: []
+ htmlrewrite:
+ description:
+ - Whether to enable html rewrite or not
+ type: bool
+ default: False
+ htmlrewrite_cookies:
+ description:
+ - Whether to enable html rewrite cookie or not
+ type: bool
+ default: False
+ implicitredirect:
+ description:
+ - Whether to enable implicit redirection or not
+ type: bool
+ default: False
+ lbmethod:
+ type: str
+ description:
+ - Which loadbalancer method should be used
+ choices:
+ - ""
+ - bybusyness
+ - bytraffic
+ - byrequests
+ default: bybusyness
+ locations:
+ type: list
+ elements: str
+ description:
+ - A list of location ref names (reverse_proxy/location)
+ default: []
+ port:
+ type: int
+ description:
+ - The frontend http port
+ default: 80
+ preservehost:
+ description:
+ - Whether to preserve host header
+ type: bool
+ default: False
+ profile:
+ type: str
+ description:
+ - The reference string of the reverse_proxy/profile
+ default: ""
+ status:
+ description:
+ - Whether to activate the frontend entry or not
+ type: bool
+ default: True
+ type:
+ type: str
+ description:
+ - Which protocol should be used
+ choices:
+ - http
+ - https
+ default: http
+ xheaders:
+ description:
+ - Whether to pass the host header or not
+ type: bool
+ default: False
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Create utm proxy_frontend
+ community.general.utm_proxy_frontend:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestFrontendEntry
+ host: REF_OBJECT_STRING
+ state: present
+
+- name: Remove utm proxy_frontend
+ community.general.utm_proxy_frontend:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestFrontendEntry
+ state: absent
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ add_content_type_header:
+ description: Whether to add the content type header
+ type: bool
+ address:
+ description: The reference name of the address
+ type: str
+ allowed_networks:
+ description: List of reference names of networks associated
+ type: list
+ certificate:
+ description: Reference name of certificate (ca/host_key_cert)
+ type: str
+ comment:
+ description: The comment string
+ type: str
+ disable_compression:
+ description: State of compression support
+ type: bool
+ domain:
+ description: List of hostnames
+ type: list
+ exceptions:
+ description: List of associated proxy exceptions
+ type: list
+ htmlrewrite:
+ description: State of html rewrite
+ type: bool
+ htmlrewrite_cookies:
+ description: Whether the html rewrite cookie will be set
+ type: bool
+ implicitredirect:
+ description: Whether to use implicit redirection
+ type: bool
+ lbmethod:
+ description: The method of loadbalancer to use
+ type: str
+ locations:
+ description: The reference names of reverse_proxy/locations associated with the object
+ type: list
+ port:
+ description: The port of the frontend connection
+ type: int
+ preservehost:
+ description: Preserve host header
+ type: bool
+ profile:
+ description: The associated reverse_proxy/profile
+ type: str
+ status:
+ description: Whether the frontend object is active or not
+ type: bool
+ type:
+ description: The connection type
+ type: str
+ xheaders:
+ description: The xheaders state
+ type: bool
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "reverse_proxy/frontend"
+ key_to_check_for_changes = ["add_content_type_header", "address", "allowed_networks", "certificate",
+ "comment", "disable_compression", "domain", "exceptions", "htmlrewrite",
+ "htmlrewrite_cookies", "implicitredirect", "lbmethod", "locations",
+ "port", "preservehost", "profile", "status", "type", "xheaders"]
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ add_content_type_header=dict(type='bool', required=False, default=False),
+ address=dict(type='str', required=False, default="REF_DefaultInternalAddress"),
+ allowed_networks=dict(type='list', elements='str', required=False, default=["REF_NetworkAny"]),
+ certificate=dict(type='str', required=False, default=""),
+ comment=dict(type='str', required=False, default=""),
+ disable_compression=dict(type='bool', required=False, default=False),
+ domain=dict(type='list', elements='str', required=False),
+ exceptions=dict(type='list', elements='str', required=False, default=[]),
+ htmlrewrite=dict(type='bool', required=False, default=False),
+ htmlrewrite_cookies=dict(type='bool', required=False, default=False),
+ implicitredirect=dict(type='bool', required=False, default=False),
+ lbmethod=dict(type='str', required=False, default="bybusyness",
+ choices=['bybusyness', 'bytraffic', 'byrequests', '']),
+ locations=dict(type='list', elements='str', required=False, default=[]),
+ port=dict(type='int', required=False, default=80),
+ preservehost=dict(type='bool', required=False, default=False),
+ profile=dict(type='str', required=False, default=""),
+ status=dict(type='bool', required=False, default=True),
+ type=dict(type='str', required=False, default="http", choices=['http', 'https']),
+ xheaders=dict(type='bool', required=False, default=False),
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend_info.py
new file mode 100644
index 00000000..450bd161
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend_info.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_proxy_frontend_info
+
+author:
+ - Johannes Brunswicker (@MatrixCrawler)
+
+short_description: create, update or destroy reverse_proxy frontend entry in Sophos UTM
+
+description:
+ - Create, update or destroy a reverse_proxy frontend entry in SOPHOS UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Get utm proxy_frontend
+ community.general.utm_proxy_frontend_info:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestBackendEntry
+ host: REF_OBJECT_STRING
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ add_content_type_header:
+ description: Whether to add the content type header
+ type: bool
+ address:
+ description: The reference name of the address
+ type: str
+ allowed_networks:
+ description: List of reference names of networks associated
+ type: list
+ certificate:
+ description: Reference name of certificate (ca/host_key_cert)
+ type: str
+ comment:
+ description: The comment string
+ type: str
+ disable_compression:
+ description: State of compression support
+ type: bool
+ domain:
+ description: List of hostnames
+ type: list
+ exceptions:
+ description: List of associated proxy exceptions
+ type: list
+ htmlrewrite:
+ description: State of html rewrite
+ type: bool
+ htmlrewrite_cookies:
+ description: whether the html rewrite cookie will be set
+ type: bool
+ implicitredirect:
+ description: whether to use implicit redirection
+ type: bool
+ lbmethod:
+ description: The method of loadbalancer to use
+ type: str
+ locations:
+ description: The reference names of reverse_proxy/locations associated with the object
+ type: list
+ port:
+ description: The port of the frontend connection
+ type: int
+ preservehost:
+ description: Preserve host header
+ type: bool
+ profile:
+ description: The associated reverse_proxy/profile
+ type: str
+ status:
+ description: Whether the frontend object is active or not
+ type: bool
+ type:
+ description: The connection type
+ type: str
+ xheaders:
+ description: The xheaders state
+ type: bool
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "reverse_proxy/frontend"
+ key_to_check_for_changes = []
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True)
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location.py
new file mode 100644
index 00000000..7c4bc8b6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location.py
@@ -0,0 +1,214 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_proxy_location
+
+author:
+ - Johannes Brunswicker (@MatrixCrawler)
+
+short_description: create, update or destroy reverse_proxy location entry in Sophos UTM
+
+description:
+ - Create, update or destroy a reverse_proxy location entry in SOPHOS UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+ access_control:
+ description:
+ - whether to activate the access control for the location
+ type: str
+ default: '0'
+ choices:
+ - '0'
+ - '1'
+ allowed_networks:
+ description:
+ - A list of allowed networks
+ type: list
+ elements: str
+ default: REF_NetworkAny
+ auth_profile:
+ type: str
+ description:
+ - The reference name of the auth profile
+ backend:
+ type: list
+ elements: str
+ description:
+ - A list of backends that are connected with this location declaration
+ default: []
+ be_path:
+ type: str
+ description:
+ - The path of the backend
+ comment:
+ type: str
+ description:
+ - The optional comment string
+ denied_networks:
+ type: list
+ elements: str
+ description:
+ - A list of denied network references
+ default: []
+ hot_standby:
+ description:
+ - Activate hot standby mode
+ type: bool
+ default: False
+ path:
+ type: str
+ description:
+ - The path of the location
+ default: "/"
+ status:
+ description:
+ - Whether the location is active or not
+ type: bool
+ default: True
+ stickysession_id:
+ type: str
+ description:
+ - The stickysession id
+ default: ROUTEID
+ stickysession_status:
+ description:
+ - Enable the stickysession
+ type: bool
+ default: False
+ websocket_passthrough:
+ description:
+ - Enable the websocket passthrough
+ type: bool
+ default: False
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Create UTM proxy_location
+ utm_proxy_backend:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestLocationEntry
+ backend: REF_OBJECT_STRING
+ state: present
+
+- name: Remove UTM proxy_location
+ utm_proxy_backend:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestLocationEntry
+ state: absent
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ access_control:
+ description: Whether to use access control state
+ type: str
+ allowed_networks:
+ description: List of allowed network reference names
+ type: list
+ auth_profile:
+ description: The auth profile reference name
+ type: str
+ backend:
+ description: The backend reference name
+ type: str
+ be_path:
+ description: The backend path
+ type: str
+ comment:
+ description: The comment string
+ type: str
+ denied_networks:
+ description: The list of the denied network names
+ type: list
+ hot_standby:
+ description: Use hot standy
+ type: bool
+ path:
+ description: Path name
+ type: str
+ status:
+ description: Whether the object is active or not
+ type: bool
+ stickysession_id:
+ description: The identifier of the stickysession
+ type: str
+ stickysession_status:
+ description: Whether to use stickysession or not
+ type: bool
+ websocket_passthrough:
+ description: Whether websocket passthrough will be used or not
+ type: bool
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "reverse_proxy/location"
+ key_to_check_for_changes = ["access_control", "allowed_networks", "auth_profile", "backend", "be_path", "comment",
+ "denied_networks", "hot_standby", "path", "status", "stickysession_id",
+ "stickysession_status", "websocket_passthrough"]
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ access_control=dict(type='str', required=False, default="0", choices=['0', '1']),
+ allowed_networks=dict(type='list', elements='str', required=False, default=['REF_NetworkAny']),
+ auth_profile=dict(type='str', required=False, default=""),
+ backend=dict(type='list', elements='str', required=False, default=[]),
+ be_path=dict(type='str', required=False, default=""),
+ comment=dict(type='str', required=False, default=""),
+ denied_networks=dict(type='list', elements='str', required=False, default=[]),
+ hot_standby=dict(type='bool', required=False, default=False),
+ path=dict(type='str', required=False, default="/"),
+ status=dict(type='bool', required=False, default=True),
+ stickysession_id=dict(type='str', required=False, default='ROUTEID'),
+ stickysession_status=dict(type='bool', required=False, default=False),
+ websocket_passthrough=dict(type='bool', required=False, default=False),
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location_info.py
new file mode 100644
index 00000000..1125c4fa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location_info.py
@@ -0,0 +1,122 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_proxy_location_info
+
+author:
+ - Johannes Brunswicker (@MatrixCrawler)
+
+short_description: create, update or destroy reverse_proxy location entry in Sophos UTM
+
+description:
+ - Create, update or destroy a reverse_proxy location entry in SOPHOS UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+
+extends_documentation_fragment:
+- community.general.utm
+
+'''
+
+EXAMPLES = """
+- name: Remove UTM proxy_location
+ community.general.utm_proxy_location_info:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestLocationEntry
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ access_control:
+ description: Whether to use access control state
+ type: str
+ allowed_networks:
+ description: List of allowed network reference names
+ type: list
+ auth_profile:
+ description: The auth profile reference name
+ type: str
+ backend:
+ description: The backend reference name
+ type: str
+ be_path:
+ description: The backend path
+ type: str
+ comment:
+ description: The comment string
+ type: str
+ denied_networks:
+ description: The list of the denied network names
+ type: list
+ hot_standby:
+ description: Use hot standy
+ type: bool
+ path:
+ description: Path name
+ type: str
+ status:
+ description: Whether the object is active or not
+ type: bool
+ stickysession_id:
+ description: The identifier of the stickysession
+ type: str
+ stickysession_status:
+ description: Whether to use stickysession or not
+ type: bool
+ websocket_passthrough:
+ description: Whether websocket passthrough will be used or not
+ type: bool
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils._text import to_native
+
+
+def main():
+ endpoint = "reverse_proxy/location"
+ key_to_check_for_changes = []
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True)
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/supervisorctl.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/supervisorctl.py
new file mode 100644
index 00000000..5524beea
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/supervisorctl.py
@@ -0,0 +1,257 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012, Matt Wright <matt@nobien.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: supervisorctl
+short_description: Manage the state of a program or group of programs running via supervisord
+description:
+ - Manage the state of a program or group of programs running via supervisord
+options:
+ name:
+ type: str
+ description:
+ - The name of the supervisord program or group to manage.
+ - The name will be taken as group name when it ends with a colon I(:)
+ - Group support is only available in Ansible version 1.6 or later.
+ required: true
+ config:
+ type: path
+ description:
+ - The supervisor configuration file path
+ server_url:
+ type: str
+ description:
+ - URL on which supervisord server is listening
+ username:
+ type: str
+ description:
+ - username to use for authentication
+ password:
+ type: str
+ description:
+ - password to use for authentication
+ state:
+ type: str
+ description:
+ - The desired state of program/group.
+ required: true
+ choices: [ "present", "started", "stopped", "restarted", "absent", "signalled" ]
+ signal:
+ type: str
+ description:
+ - The signal to send to the program/group, when combined with the 'signalled' state. Required when l(state=signalled).
+ supervisorctl_path:
+ type: path
+ description:
+ - path to supervisorctl executable
+notes:
+ - When C(state) = I(present), the module will call C(supervisorctl reread) then C(supervisorctl add) if the program/group does not exist.
+ - When C(state) = I(restarted), the module will call C(supervisorctl update) then call C(supervisorctl restart).
+ - When C(state) = I(absent), the module will call C(supervisorctl reread) then C(supervisorctl remove) to remove the target program/group.
+requirements: [ "supervisorctl" ]
+author:
+ - "Matt Wright (@mattupstate)"
+ - "Aaron Wang (@inetfuture) <inetfuture@gmail.com>"
+'''
+
+EXAMPLES = '''
+- name: Manage the state of program to be in started state
+ community.general.supervisorctl:
+ name: my_app
+ state: started
+
+- name: Manage the state of program group to be in started state
+ community.general.supervisorctl:
+ name: 'my_apps:'
+ state: started
+
+- name: Restart my_app, reading supervisorctl configuration from a specified file
+ community.general.supervisorctl:
+ name: my_app
+ state: restarted
+ config: /var/opt/my_project/supervisord.conf
+
+- name: Restart my_app, connecting to supervisord with credentials and server URL
+ community.general.supervisorctl:
+ name: my_app
+ state: restarted
+ username: test
+ password: testpass
+ server_url: http://localhost:9001
+
+- name: Send a signal to my_app via supervisorctl
+ community.general.supervisorctl:
+ name: my_app
+ state: signalled
+ signal: USR1
+'''
+
+import os
+from ansible.module_utils.basic import AnsibleModule, is_executable
+
+
+def main():
+ arg_spec = dict(
+ name=dict(type='str', required=True),
+ config=dict(required=False, type='path'),
+ server_url=dict(type='str', required=False),
+ username=dict(type='str', required=False),
+ password=dict(type='str', required=False, no_log=True),
+ supervisorctl_path=dict(required=False, type='path'),
+ state=dict(type='str', required=True, choices=['present', 'started', 'restarted', 'stopped', 'absent', 'signalled']),
+ signal=dict(type='str', required=False)
+ )
+
+ module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
+
+ name = module.params['name']
+ is_group = False
+ if name.endswith(':'):
+ is_group = True
+ name = name.rstrip(':')
+ state = module.params['state']
+ config = module.params.get('config')
+ server_url = module.params.get('server_url')
+ username = module.params.get('username')
+ password = module.params.get('password')
+ supervisorctl_path = module.params.get('supervisorctl_path')
+ signal = module.params.get('signal')
+
+ # we check error message for a pattern, so we need to make sure that's in C locale
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ if supervisorctl_path:
+ if os.path.exists(supervisorctl_path) and is_executable(supervisorctl_path):
+ supervisorctl_args = [supervisorctl_path]
+ else:
+ module.fail_json(
+ msg="Provided path to supervisorctl does not exist or isn't executable: %s" % supervisorctl_path)
+ else:
+ supervisorctl_args = [module.get_bin_path('supervisorctl', True)]
+
+ if config:
+ supervisorctl_args.extend(['-c', config])
+ if server_url:
+ supervisorctl_args.extend(['-s', server_url])
+ if username:
+ supervisorctl_args.extend(['-u', username])
+ if password:
+ supervisorctl_args.extend(['-p', password])
+
+ if state == 'signalled' and not signal:
+ module.fail_json(msg="State 'signalled' requires a 'signal' value")
+
+ def run_supervisorctl(cmd, name=None, **kwargs):
+ args = list(supervisorctl_args) # copy the master args
+ args.append(cmd)
+ if name:
+ args.append(name)
+ return module.run_command(args, **kwargs)
+
+ def get_matched_processes():
+ matched = []
+ rc, out, err = run_supervisorctl('status')
+ for line in out.splitlines():
+ # One status line may look like one of these two:
+ # process not in group:
+ # echo_date_lonely RUNNING pid 7680, uptime 13:22:18
+ # process in group:
+ # echo_date_group:echo_date_00 RUNNING pid 7681, uptime 13:22:18
+ fields = [field for field in line.split(' ') if field != '']
+ process_name = fields[0]
+ status = fields[1]
+
+ if is_group:
+ # If there is ':', this process must be in a group.
+ if ':' in process_name:
+ group = process_name.split(':')[0]
+ if group != name:
+ continue
+ else:
+ continue
+ else:
+ if process_name != name:
+ continue
+
+ matched.append((process_name, status))
+ return matched
+
+ def take_action_on_processes(processes, status_filter, action, expected_result):
+ to_take_action_on = []
+ for process_name, status in processes:
+ if status_filter(status):
+ to_take_action_on.append(process_name)
+
+ if len(to_take_action_on) == 0:
+ module.exit_json(changed=False, name=name, state=state)
+ if module.check_mode:
+ module.exit_json(changed=True)
+ for process_name in to_take_action_on:
+ rc, out, err = run_supervisorctl(action, process_name, check_rc=True)
+ if '%s: %s' % (process_name, expected_result) not in out:
+ module.fail_json(msg=out)
+
+ module.exit_json(changed=True, name=name, state=state, affected=to_take_action_on)
+
+ if state == 'restarted':
+ rc, out, err = run_supervisorctl('update', check_rc=True)
+ processes = get_matched_processes()
+ if len(processes) == 0:
+ module.fail_json(name=name, msg="ERROR (no such process)")
+
+ take_action_on_processes(processes, lambda s: True, 'restart', 'started')
+
+ processes = get_matched_processes()
+
+ if state == 'absent':
+ if len(processes) == 0:
+ module.exit_json(changed=False, name=name, state=state)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+ run_supervisorctl('reread', check_rc=True)
+ rc, out, err = run_supervisorctl('remove', name)
+ if '%s: removed process group' % name in out:
+ module.exit_json(changed=True, name=name, state=state)
+ else:
+ module.fail_json(msg=out, name=name, state=state)
+
+ if state == 'present':
+ if len(processes) > 0:
+ module.exit_json(changed=False, name=name, state=state)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+ run_supervisorctl('reread', check_rc=True)
+ rc, out, err = run_supervisorctl('add', name)
+ if '%s: added process group' % name in out:
+ module.exit_json(changed=True, name=name, state=state)
+ else:
+ module.fail_json(msg=out, name=name, state=state)
+
+ if state == 'started':
+ if len(processes) == 0:
+ module.fail_json(name=name, msg="ERROR (no such process)")
+ take_action_on_processes(processes, lambda s: s not in ('RUNNING', 'STARTING'), 'start', 'started')
+
+ if state == 'stopped':
+ if len(processes) == 0:
+ module.fail_json(name=name, msg="ERROR (no such process)")
+ take_action_on_processes(processes, lambda s: s in ('RUNNING', 'STARTING'), 'stop', 'stopped')
+
+ if state == 'signalled':
+ if len(processes) == 0:
+ module.fail_json(name=name, msg="ERROR (no such process)")
+ take_action_on_processes(processes, lambda s: s in ('RUNNING'), "signal %s" % signal, 'signalled')
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/taiga_issue.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/taiga_issue.py
new file mode 100644
index 00000000..ae8f31c0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/web_infrastructure/taiga_issue.py
@@ -0,0 +1,313 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Alejandro Guirao <lekumberri@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: taiga_issue
+short_description: Creates/deletes an issue in a Taiga Project Management Platform
+description:
+ - Creates/deletes an issue in a Taiga Project Management Platform (U(https://taiga.io)).
+ - An issue is identified by the combination of project, issue subject and issue type.
+ - This module implements the creation or deletion of issues (not the update).
+options:
+ taiga_host:
+ type: str
+ description:
+ - The hostname of the Taiga instance.
+ default: https://api.taiga.io
+ project:
+ type: str
+ description:
+ - Name of the project containing the issue. Must exist previously.
+ required: True
+ subject:
+ type: str
+ description:
+ - The issue subject.
+ required: True
+ issue_type:
+ type: str
+ description:
+ - The issue type. Must exist previously.
+ required: True
+ priority:
+ type: str
+ description:
+ - The issue priority. Must exist previously.
+ default: Normal
+ status:
+ type: str
+ description:
+ - The issue status. Must exist previously.
+ default: New
+ severity:
+ type: str
+ description:
+ - The issue severity. Must exist previously.
+ default: Normal
+ description:
+ type: str
+ description:
+ - The issue description.
+ default: ""
+ attachment:
+ type: path
+ description:
+ - Path to a file to be attached to the issue.
+ attachment_description:
+ type: str
+ description:
+ - A string describing the file to be attached to the issue.
+ default: ""
+ tags:
+ type: list
+ elements: str
+ description:
+ - A lists of tags to be assigned to the issue.
+ default: []
+ state:
+ type: str
+ description:
+ - Whether the issue should be present or not.
+ choices: ["present", "absent"]
+ default: present
+author: Alejandro Guirao (@lekum)
+requirements: [python-taiga]
+notes:
+- The authentication is achieved either by the environment variable TAIGA_TOKEN or by the pair of environment variables TAIGA_USERNAME and TAIGA_PASSWORD
+'''
+
+EXAMPLES = '''
+- name: Create an issue in the my hosted Taiga environment and attach an error log
+ community.general.taiga_issue:
+ taiga_host: https://mytaigahost.example.com
+ project: myproject
+ subject: An error has been found
+ issue_type: Bug
+ priority: High
+ status: New
+ severity: Important
+ description: An error has been found. Please check the attached error log for details.
+ attachment: /path/to/error.log
+ attachment_description: Error log file
+ tags:
+ - Error
+ - Needs manual check
+ state: present
+
+- name: Deletes the previously created issue
+ community.general.taiga_issue:
+ taiga_host: https://mytaigahost.example.com
+ project: myproject
+ subject: An error has been found
+ issue_type: Bug
+ state: absent
+'''
+
+RETURN = '''# '''
+import traceback
+
+from os import getenv
+from os.path import isfile
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_native
+
+TAIGA_IMP_ERR = None
+try:
+ from taiga import TaigaAPI
+ from taiga.exceptions import TaigaException
+ TAIGA_MODULE_IMPORTED = True
+except ImportError:
+ TAIGA_IMP_ERR = traceback.format_exc()
+ TAIGA_MODULE_IMPORTED = False
+
+
+def manage_issue(module, taiga_host, project_name, issue_subject, issue_priority,
+ issue_status, issue_type, issue_severity, issue_description,
+ issue_attachment, issue_attachment_description,
+ issue_tags, state, check_mode=False):
+ """
+ Method that creates/deletes issues depending whether they exist and the state desired
+
+ The credentials should be passed via environment variables:
+ - TAIGA_TOKEN
+ - TAIGA_USERNAME and TAIGA_PASSWORD
+
+ Returns a tuple with these elements:
+ - A boolean representing the success of the operation
+ - A descriptive message
+ - A dict with the issue attributes, in case of issue creation, otherwise empty dict
+ """
+
+ changed = False
+
+ try:
+ token = getenv('TAIGA_TOKEN')
+ if token:
+ api = TaigaAPI(host=taiga_host, token=token)
+ else:
+ api = TaigaAPI(host=taiga_host)
+ username = getenv('TAIGA_USERNAME')
+ password = getenv('TAIGA_PASSWORD')
+ if not any([username, password]):
+ return (False, changed, "Missing credentials", {})
+ api.auth(username=username, password=password)
+
+ user_id = api.me().id
+ project_list = filter(lambda x: x.name == project_name, api.projects.list(member=user_id))
+ if len(project_list) != 1:
+ return (False, changed, "Unable to find project %s" % project_name, {})
+ project = project_list[0]
+ project_id = project.id
+
+ priority_list = filter(lambda x: x.name == issue_priority, api.priorities.list(project=project_id))
+ if len(priority_list) != 1:
+ return (False, changed, "Unable to find issue priority %s for project %s" % (issue_priority, project_name), {})
+ priority_id = priority_list[0].id
+
+ status_list = filter(lambda x: x.name == issue_status, api.issue_statuses.list(project=project_id))
+ if len(status_list) != 1:
+ return (False, changed, "Unable to find issue status %s for project %s" % (issue_status, project_name), {})
+ status_id = status_list[0].id
+
+ type_list = filter(lambda x: x.name == issue_type, project.list_issue_types())
+ if len(type_list) != 1:
+ return (False, changed, "Unable to find issue type %s for project %s" % (issue_type, project_name), {})
+ type_id = type_list[0].id
+
+ severity_list = filter(lambda x: x.name == issue_severity, project.list_severities())
+ if len(severity_list) != 1:
+ return (False, changed, "Unable to find severity %s for project %s" % (issue_severity, project_name), {})
+ severity_id = severity_list[0].id
+
+ issue = {
+ "project": project_name,
+ "subject": issue_subject,
+ "priority": issue_priority,
+ "status": issue_status,
+ "type": issue_type,
+ "severity": issue_severity,
+ "description": issue_description,
+ "tags": issue_tags,
+ }
+
+ # An issue is identified by the project_name, the issue_subject and the issue_type
+ matching_issue_list = filter(lambda x: x.subject == issue_subject and x.type == type_id, project.list_issues())
+ matching_issue_list_len = len(matching_issue_list)
+
+ if matching_issue_list_len == 0:
+ # The issue does not exist in the project
+ if state == "present":
+ # This implies a change
+ changed = True
+ if not check_mode:
+ # Create the issue
+ new_issue = project.add_issue(issue_subject, priority_id, status_id, type_id, severity_id, tags=issue_tags, description=issue_description)
+ if issue_attachment:
+ new_issue.attach(issue_attachment, description=issue_attachment_description)
+ issue["attachment"] = issue_attachment
+ issue["attachment_description"] = issue_attachment_description
+ return (True, changed, "Issue created", issue)
+
+ else:
+ # If does not exist, do nothing
+ return (True, changed, "Issue does not exist", {})
+
+ elif matching_issue_list_len == 1:
+ # The issue exists in the project
+ if state == "absent":
+ # This implies a change
+ changed = True
+ if not check_mode:
+ # Delete the issue
+ matching_issue_list[0].delete()
+ return (True, changed, "Issue deleted", {})
+
+ else:
+ # Do nothing
+ return (True, changed, "Issue already exists", {})
+
+ else:
+ # More than 1 matching issue
+ return (False, changed, "More than one issue with subject %s in project %s" % (issue_subject, project_name), {})
+
+ except TaigaException as exc:
+ msg = "An exception happened: %s" % to_native(exc)
+ return (False, changed, msg, {})
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ taiga_host=dict(type='str', required=False, default="https://api.taiga.io"),
+ project=dict(type='str', required=True),
+ subject=dict(type='str', required=True),
+ issue_type=dict(type='str', required=True),
+ priority=dict(type='str', required=False, default="Normal"),
+ status=dict(type='str', required=False, default="New"),
+ severity=dict(type='str', required=False, default="Normal"),
+ description=dict(type='str', required=False, default=""),
+ attachment=dict(type='path', required=False, default=None),
+ attachment_description=dict(type='str', required=False, default=""),
+ tags=dict(required=False, default=[], type='list', elements='str'),
+ state=dict(type='str', required=False, choices=['present', 'absent'],
+ default='present'),
+ ),
+ supports_check_mode=True
+ )
+
+ if not TAIGA_MODULE_IMPORTED:
+ module.fail_json(msg=missing_required_lib("python-taiga"),
+ exception=TAIGA_IMP_ERR)
+
+ taiga_host = module.params['taiga_host']
+ project_name = module.params['project']
+ issue_subject = module.params['subject']
+ issue_priority = module.params['priority']
+ issue_status = module.params['status']
+ issue_type = module.params['issue_type']
+ issue_severity = module.params['severity']
+ issue_description = module.params['description']
+ issue_attachment = module.params['attachment']
+ issue_attachment_description = module.params['attachment_description']
+ if issue_attachment:
+ if not isfile(issue_attachment):
+ msg = "%s is not a file" % issue_attachment
+ module.fail_json(msg=msg)
+ issue_tags = module.params['tags']
+ state = module.params['state']
+
+ return_status, changed, msg, issue_attr_dict = manage_issue(
+ module,
+ taiga_host,
+ project_name,
+ issue_subject,
+ issue_priority,
+ issue_status,
+ issue_type,
+ issue_severity,
+ issue_description,
+ issue_attachment,
+ issue_attachment_description,
+ issue_tags,
+ state,
+ check_mode=module.check_mode
+ )
+ if return_status:
+ if len(issue_attr_dict) > 0:
+ module.exit_json(changed=changed, msg=msg, issue=issue_attr_dict)
+ else:
+ module.exit_json(changed=changed, msg=msg)
+ else:
+ module.fail_json(msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/webfaction_app.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/webfaction_app.py
new file mode 100644
index 00000000..9a69ce54
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/webfaction_app.py
@@ -0,0 +1,190 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Quentin Stafford-Fraser, with contributions gratefully acknowledged from:
+# * Andy Baker
+# * Federico Tarantini
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Create a Webfaction application using Ansible and the Webfaction API
+#
+# Valid application types can be found by looking here:
+# https://docs.webfaction.com/xmlrpc-api/apps.html#application-types
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: webfaction_app
+short_description: Add or remove applications on a Webfaction host
+description:
+ - Add or remove applications on a Webfaction host. Further documentation at U(https://github.com/quentinsf/ansible-webfaction).
+author: Quentin Stafford-Fraser (@quentinsf)
+notes:
+ - >
+ You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
+ The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
+ your host, you may want to add C(serial: 1) to the plays.
+ - See `the webfaction API <https://docs.webfaction.com/xmlrpc-api/>`_ for more info.
+
+options:
+ name:
+ description:
+ - The name of the application
+ required: true
+
+ state:
+ description:
+ - Whether the application should exist
+ choices: ['present', 'absent']
+ default: "present"
+
+ type:
+ description:
+ - The type of application to create. See the Webfaction docs at U(https://docs.webfaction.com/xmlrpc-api/apps.html) for a list.
+ required: true
+
+ autostart:
+ description:
+ - Whether the app should restart with an C(autostart.cgi) script
+ type: bool
+ default: 'no'
+
+ extra_info:
+ description:
+ - Any extra parameters required by the app
+ default: ''
+
+ port_open:
+ description:
+ - IF the port should be opened
+ type: bool
+ default: 'no'
+
+ login_name:
+ description:
+ - The webfaction account to use
+ required: true
+
+ login_password:
+ description:
+ - The webfaction password to use
+ required: true
+
+ machine:
+ description:
+ - The machine name to use (optional for accounts with only one machine)
+
+'''
+
+EXAMPLES = '''
+ - name: Create a test app
+ community.general.webfaction_app:
+ name: "my_wsgi_app1"
+ state: present
+ type: mod_wsgi35-python27
+ login_name: "{{webfaction_user}}"
+ login_password: "{{webfaction_passwd}}"
+ machine: "{{webfaction_machine}}"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import xmlrpc_client
+
+
+webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/')
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ type=dict(required=True),
+ autostart=dict(required=False, type='bool', default=False),
+ extra_info=dict(required=False, default=""),
+ port_open=dict(required=False, type='bool', default=False),
+ login_name=dict(required=True),
+ login_password=dict(required=True, no_log=True),
+ machine=dict(required=False, default=None),
+ ),
+ supports_check_mode=True
+ )
+ app_name = module.params['name']
+ app_type = module.params['type']
+ app_state = module.params['state']
+
+ if module.params['machine']:
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password'],
+ module.params['machine']
+ )
+ else:
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password']
+ )
+
+ app_list = webfaction.list_apps(session_id)
+ app_map = dict([(i['name'], i) for i in app_list])
+ existing_app = app_map.get(app_name)
+
+ result = {}
+
+ # Here's where the real stuff happens
+
+ if app_state == 'present':
+
+ # Does an app with this name already exist?
+ if existing_app:
+ if existing_app['type'] != app_type:
+ module.fail_json(msg="App already exists with different type. Please fix by hand.")
+
+ # If it exists with the right type, we don't change it
+ # Should check other parameters.
+ module.exit_json(
+ changed=False,
+ result=existing_app,
+ )
+
+ if not module.check_mode:
+ # If this isn't a dry run, create the app
+ result.update(
+ webfaction.create_app(
+ session_id, app_name, app_type,
+ module.boolean(module.params['autostart']),
+ module.params['extra_info'],
+ module.boolean(module.params['port_open'])
+ )
+ )
+
+ elif app_state == 'absent':
+
+ # If the app's already not there, nothing changed.
+ if not existing_app:
+ module.exit_json(
+ changed=False,
+ )
+
+ if not module.check_mode:
+ # If this isn't a dry run, delete the app
+ result.update(
+ webfaction.delete_app(session_id, app_name)
+ )
+
+ else:
+ module.fail_json(msg="Unknown state specified: {0}".format(app_state))
+
+ module.exit_json(
+ changed=True,
+ result=result
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/webfaction_db.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/webfaction_db.py
new file mode 100644
index 00000000..19bc6ea2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/webfaction_db.py
@@ -0,0 +1,188 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Quentin Stafford-Fraser, with contributions gratefully acknowledged from:
+# * Andy Baker
+# * Federico Tarantini
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Create a webfaction database using Ansible and the Webfaction API
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: webfaction_db
+short_description: Add or remove a database on Webfaction
+description:
+ - Add or remove a database on a Webfaction host. Further documentation at https://github.com/quentinsf/ansible-webfaction.
+author: Quentin Stafford-Fraser (@quentinsf)
+notes:
+ - >
+ You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
+ The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
+ your host, you may want to add C(serial: 1) to the plays.
+ - See `the webfaction API <https://docs.webfaction.com/xmlrpc-api/>`_ for more info.
+options:
+
+ name:
+ description:
+ - The name of the database
+ required: true
+
+ state:
+ description:
+ - Whether the database should exist
+ choices: ['present', 'absent']
+ default: "present"
+
+ type:
+ description:
+ - The type of database to create.
+ required: true
+ choices: ['mysql', 'postgresql']
+
+ password:
+ description:
+ - The password for the new database user.
+
+ login_name:
+ description:
+ - The webfaction account to use
+ required: true
+
+ login_password:
+ description:
+ - The webfaction password to use
+ required: true
+
+ machine:
+ description:
+ - The machine name to use (optional for accounts with only one machine)
+'''
+
+EXAMPLES = '''
+ # This will also create a default DB user with the same
+ # name as the database, and the specified password.
+
+ - name: Create a database
+ community.general.webfaction_db:
+ name: "{{webfaction_user}}_db1"
+ password: mytestsql
+ type: mysql
+ login_name: "{{webfaction_user}}"
+ login_password: "{{webfaction_passwd}}"
+ machine: "{{webfaction_machine}}"
+
+ # Note that, for symmetry's sake, deleting a database using
+ # 'state: absent' will also delete the matching user.
+
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import xmlrpc_client
+
+
+webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/')
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ # You can specify an IP address or hostname.
+ type=dict(required=True, choices=['mysql', 'postgresql']),
+ password=dict(required=False, default=None, no_log=True),
+ login_name=dict(required=True),
+ login_password=dict(required=True, no_log=True),
+ machine=dict(required=False, default=None),
+ ),
+ supports_check_mode=True
+ )
+ db_name = module.params['name']
+ db_state = module.params['state']
+ db_type = module.params['type']
+ db_passwd = module.params['password']
+
+ if module.params['machine']:
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password'],
+ module.params['machine']
+ )
+ else:
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password']
+ )
+
+ db_list = webfaction.list_dbs(session_id)
+ db_map = dict([(i['name'], i) for i in db_list])
+ existing_db = db_map.get(db_name)
+
+ user_list = webfaction.list_db_users(session_id)
+ user_map = dict([(i['username'], i) for i in user_list])
+ existing_user = user_map.get(db_name)
+
+ result = {}
+
+ # Here's where the real stuff happens
+
+ if db_state == 'present':
+
+ # Does a database with this name already exist?
+ if existing_db:
+ # Yes, but of a different type - fail
+ if existing_db['db_type'] != db_type:
+ module.fail_json(msg="Database already exists but is a different type. Please fix by hand.")
+
+ # If it exists with the right type, we don't change anything.
+ module.exit_json(
+ changed=False,
+ )
+
+ if not module.check_mode:
+ # If this isn't a dry run, create the db
+ # and default user.
+ result.update(
+ webfaction.create_db(
+ session_id, db_name, db_type, db_passwd
+ )
+ )
+
+ elif db_state == 'absent':
+
+ # If this isn't a dry run...
+ if not module.check_mode:
+
+ if not (existing_db or existing_user):
+ module.exit_json(changed=False,)
+
+ if existing_db:
+ # Delete the db if it exists
+ result.update(
+ webfaction.delete_db(session_id, db_name, db_type)
+ )
+
+ if existing_user:
+ # Delete the default db user if it exists
+ result.update(
+ webfaction.delete_db_user(session_id, db_name, db_type)
+ )
+
+ else:
+ module.fail_json(msg="Unknown state specified: {0}".format(db_state))
+
+ module.exit_json(
+ changed=True,
+ result=result
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/webfaction_domain.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/webfaction_domain.py
new file mode 100644
index 00000000..a348ef51
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/webfaction_domain.py
@@ -0,0 +1,162 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Quentin Stafford-Fraser
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Create Webfaction domains and subdomains using Ansible and the Webfaction API
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: webfaction_domain
+short_description: Add or remove domains and subdomains on Webfaction
+description:
+ - Add or remove domains or subdomains on a Webfaction host. Further documentation at https://github.com/quentinsf/ansible-webfaction.
+author: Quentin Stafford-Fraser (@quentinsf)
+notes:
+ - If you are I(deleting) domains by using C(state=absent), then note that if you specify subdomains, just those particular subdomains will be deleted.
+ If you don't specify subdomains, the domain will be deleted.
+ - >
+ You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
+ The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
+ your host, you may want to add C(serial: 1) to the plays.
+ - See `the webfaction API <https://docs.webfaction.com/xmlrpc-api/>`_ for more info.
+
+options:
+
+ name:
+ description:
+ - The name of the domain
+ required: true
+
+ state:
+ description:
+ - Whether the domain should exist
+ choices: ['present', 'absent']
+ default: "present"
+
+ subdomains:
+ description:
+ - Any subdomains to create.
+ default: []
+
+ login_name:
+ description:
+ - The webfaction account to use
+ required: true
+
+ login_password:
+ description:
+ - The webfaction password to use
+ required: true
+'''
+
+EXAMPLES = '''
+ - name: Create a test domain
+ community.general.webfaction_domain:
+ name: mydomain.com
+ state: present
+ subdomains:
+ - www
+ - blog
+ login_name: "{{webfaction_user}}"
+ login_password: "{{webfaction_passwd}}"
+
+ - name: Delete test domain and any subdomains
+ community.general.webfaction_domain:
+ name: mydomain.com
+ state: absent
+ login_name: "{{webfaction_user}}"
+ login_password: "{{webfaction_passwd}}"
+
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import xmlrpc_client
+
+
+webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/')
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ subdomains=dict(required=False, default=[], type='list'),
+ login_name=dict(required=True),
+ login_password=dict(required=True, no_log=True),
+ ),
+ supports_check_mode=True
+ )
+ domain_name = module.params['name']
+ domain_state = module.params['state']
+ domain_subdomains = module.params['subdomains']
+
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password']
+ )
+
+ domain_list = webfaction.list_domains(session_id)
+ domain_map = dict([(i['domain'], i) for i in domain_list])
+ existing_domain = domain_map.get(domain_name)
+
+ result = {}
+
+ # Here's where the real stuff happens
+
+ if domain_state == 'present':
+
+ # Does an app with this name already exist?
+ if existing_domain:
+
+ if set(existing_domain['subdomains']) >= set(domain_subdomains):
+ # If it exists with the right subdomains, we don't change anything.
+ module.exit_json(
+ changed=False,
+ )
+
+ positional_args = [session_id, domain_name] + domain_subdomains
+
+ if not module.check_mode:
+ # If this isn't a dry run, create the app
+ # print positional_args
+ result.update(
+ webfaction.create_domain(
+ *positional_args
+ )
+ )
+
+ elif domain_state == 'absent':
+
+ # If the app's already not there, nothing changed.
+ if not existing_domain:
+ module.exit_json(
+ changed=False,
+ )
+
+ positional_args = [session_id, domain_name] + domain_subdomains
+
+ if not module.check_mode:
+ # If this isn't a dry run, delete the app
+ result.update(
+ webfaction.delete_domain(*positional_args)
+ )
+
+ else:
+ module.fail_json(msg="Unknown state specified: {0}".format(domain_state))
+
+ module.exit_json(
+ changed=True,
+ result=result
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/webfaction_mailbox.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/webfaction_mailbox.py
new file mode 100644
index 00000000..144fad29
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/webfaction_mailbox.py
@@ -0,0 +1,130 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Quentin Stafford-Fraser and Andy Baker
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Create webfaction mailbox using Ansible and the Webfaction API
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: webfaction_mailbox
+short_description: Add or remove mailboxes on Webfaction
+description:
+ - Add or remove mailboxes on a Webfaction account. Further documentation at https://github.com/quentinsf/ansible-webfaction.
+author: Quentin Stafford-Fraser (@quentinsf)
+notes:
+ - >
+ You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
+ The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
+ your host, you may want to add C(serial: 1) to the plays.
+ - See `the webfaction API <https://docs.webfaction.com/xmlrpc-api/>`_ for more info.
+options:
+
+ mailbox_name:
+ description:
+ - The name of the mailbox
+ required: true
+
+ mailbox_password:
+ description:
+ - The password for the mailbox
+ required: true
+
+ state:
+ description:
+ - Whether the mailbox should exist
+ choices: ['present', 'absent']
+ default: "present"
+
+ login_name:
+ description:
+ - The webfaction account to use
+ required: true
+
+ login_password:
+ description:
+ - The webfaction password to use
+ required: true
+'''
+
+EXAMPLES = '''
+ - name: Create a mailbox
+ community.general.webfaction_mailbox:
+ mailbox_name="mybox"
+ mailbox_password="myboxpw"
+ state=present
+ login_name={{webfaction_user}}
+ login_password={{webfaction_passwd}}
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import xmlrpc_client
+
+
+webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/')
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ mailbox_name=dict(required=True),
+ mailbox_password=dict(required=True, no_log=True),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ login_name=dict(required=True),
+ login_password=dict(required=True, no_log=True),
+ ),
+ supports_check_mode=True
+ )
+
+ mailbox_name = module.params['mailbox_name']
+ site_state = module.params['state']
+
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password']
+ )
+
+ mailbox_list = [x['mailbox'] for x in webfaction.list_mailboxes(session_id)]
+ existing_mailbox = mailbox_name in mailbox_list
+
+ result = {}
+
+ # Here's where the real stuff happens
+
+ if site_state == 'present':
+
+ # Does a mailbox with this name already exist?
+ if existing_mailbox:
+ module.exit_json(changed=False,)
+
+ positional_args = [session_id, mailbox_name]
+
+ if not module.check_mode:
+ # If this isn't a dry run, create the mailbox
+ result.update(webfaction.create_mailbox(*positional_args))
+
+ elif site_state == 'absent':
+
+ # If the mailbox is already not there, nothing changed.
+ if not existing_mailbox:
+ module.exit_json(changed=False)
+
+ if not module.check_mode:
+ # If this isn't a dry run, delete the mailbox
+ result.update(webfaction.delete_mailbox(session_id, mailbox_name))
+
+ else:
+ module.fail_json(msg="Unknown state specified: {0}".format(site_state))
+
+ module.exit_json(changed=True, result=result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/webfaction_site.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/webfaction_site.py
new file mode 100644
index 00000000..8ae98280
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/webfaction_site.py
@@ -0,0 +1,198 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Quentin Stafford-Fraser
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Create Webfaction website using Ansible and the Webfaction API
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: webfaction_site
+short_description: Add or remove a website on a Webfaction host
+description:
+ - Add or remove a website on a Webfaction host. Further documentation at https://github.com/quentinsf/ansible-webfaction.
+author: Quentin Stafford-Fraser (@quentinsf)
+notes:
+ - Sadly, you I(do) need to know your webfaction hostname for the C(host) parameter. But at least, unlike the API, you don't need to know the IP
+ address. You can use a DNS name.
+ - If a site of the same name exists in the account but on a different host, the operation will exit.
+ - >
+ You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
+ The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
+ your host, you may want to add C(serial: 1) to the plays.
+ - See `the webfaction API <https://docs.webfaction.com/xmlrpc-api/>`_ for more info.
+
+options:
+
+ name:
+ description:
+ - The name of the website
+ required: true
+
+ state:
+ description:
+ - Whether the website should exist
+ choices: ['present', 'absent']
+ default: "present"
+
+ host:
+ description:
+ - The webfaction host on which the site should be created.
+ required: true
+
+ https:
+ description:
+ - Whether or not to use HTTPS
+ type: bool
+ default: 'no'
+
+ site_apps:
+ description:
+ - A mapping of URLs to apps
+ default: []
+
+ subdomains:
+ description:
+ - A list of subdomains associated with this site.
+ default: []
+
+ login_name:
+ description:
+ - The webfaction account to use
+ required: true
+
+ login_password:
+ description:
+ - The webfaction password to use
+ required: true
+'''
+
+EXAMPLES = '''
+ - name: Create website
+ community.general.webfaction_site:
+ name: testsite1
+ state: present
+ host: myhost.webfaction.com
+ subdomains:
+ - 'testsite1.my_domain.org'
+ site_apps:
+ - ['testapp1', '/']
+ https: no
+ login_name: "{{webfaction_user}}"
+ login_password: "{{webfaction_passwd}}"
+'''
+
+import socket
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import xmlrpc_client
+
+
+webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/')
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ # You can specify an IP address or hostname.
+ host=dict(required=True),
+ https=dict(required=False, type='bool', default=False),
+ subdomains=dict(required=False, type='list', default=[]),
+ site_apps=dict(required=False, type='list', default=[]),
+ login_name=dict(required=True),
+ login_password=dict(required=True, no_log=True),
+ ),
+ supports_check_mode=True
+ )
+ site_name = module.params['name']
+ site_state = module.params['state']
+ site_host = module.params['host']
+ site_ip = socket.gethostbyname(site_host)
+
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password']
+ )
+
+ site_list = webfaction.list_websites(session_id)
+ site_map = dict([(i['name'], i) for i in site_list])
+ existing_site = site_map.get(site_name)
+
+ result = {}
+
+ # Here's where the real stuff happens
+
+ if site_state == 'present':
+
+ # Does a site with this name already exist?
+ if existing_site:
+
+ # If yes, but it's on a different IP address, then fail.
+ # If we wanted to allow relocation, we could add a 'relocate=true' option
+ # which would get the existing IP address, delete the site there, and create it
+ # at the new address. A bit dangerous, perhaps, so for now we'll require manual
+ # deletion if it's on another host.
+
+ if existing_site['ip'] != site_ip:
+ module.fail_json(msg="Website already exists with a different IP address. Please fix by hand.")
+
+ # If it's on this host and the key parameters are the same, nothing needs to be done.
+
+ if (existing_site['https'] == module.boolean(module.params['https'])) and \
+ (set(existing_site['subdomains']) == set(module.params['subdomains'])) and \
+ (dict(existing_site['website_apps']) == dict(module.params['site_apps'])):
+ module.exit_json(
+ changed=False
+ )
+
+ positional_args = [
+ session_id, site_name, site_ip,
+ module.boolean(module.params['https']),
+ module.params['subdomains'],
+ ]
+ for a in module.params['site_apps']:
+ positional_args.append((a[0], a[1]))
+
+ if not module.check_mode:
+ # If this isn't a dry run, create or modify the site
+ result.update(
+ webfaction.create_website(
+ *positional_args
+ ) if not existing_site else webfaction.update_website(
+ *positional_args
+ )
+ )
+
+ elif site_state == 'absent':
+
+ # If the site's already not there, nothing changed.
+ if not existing_site:
+ module.exit_json(
+ changed=False,
+ )
+
+ if not module.check_mode:
+ # If this isn't a dry run, delete the site
+ result.update(
+ webfaction.delete_website(session_id, site_name, site_ip)
+ )
+
+ else:
+ module.fail_json(msg="Unknown state specified: {0}".format(site_state))
+
+ module.exit_json(
+ changed=True,
+ result=result
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/xattr.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/xattr.py
new file mode 100644
index 00000000..8b1449be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/xattr.py
@@ -0,0 +1,237 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: xattr
+short_description: Manage user defined extended attributes
+description:
+ - Manages filesystem user defined extended attributes.
+ - Requires that extended attributes are enabled on the target filesystem
+ and that the setfattr/getfattr utilities are present.
+options:
+ path:
+ description:
+ - The full path of the file/object to get the facts of.
+ - Before 2.3 this option was only usable as I(name).
+ type: path
+ required: true
+ aliases: [ name ]
+ namespace:
+ description:
+ - Namespace of the named name/key.
+ type: str
+ default: user
+ key:
+ description:
+ - The name of a specific Extended attribute key to set/retrieve.
+ type: str
+ value:
+ description:
+ - The value to set the named name/key to, it automatically sets the C(state) to 'set'.
+ type: str
+ state:
+ description:
+ - defines which state you want to do.
+ C(read) retrieves the current value for a C(key) (default)
+ C(present) sets C(name) to C(value), default if value is set
+ C(all) dumps all data
+ C(keys) retrieves all keys
+ C(absent) deletes the key
+ type: str
+ choices: [ absent, all, keys, present, read ]
+ default: read
+ follow:
+ description:
+ - If C(yes), dereferences symlinks and sets/gets attributes on symlink target,
+ otherwise acts on symlink itself.
+ type: bool
+ default: yes
+notes:
+ - As of Ansible 2.3, the I(name) option has been changed to I(path) as default, but I(name) still works as well.
+author:
+- Brian Coca (@bcoca)
+'''
+
+EXAMPLES = '''
+- name: Obtain the extended attributes of /etc/foo.conf
+ community.general.xattr:
+ path: /etc/foo.conf
+
+- name: Set the key 'user.foo' to value 'bar'
+ community.general.xattr:
+ path: /etc/foo.conf
+ key: foo
+ value: bar
+
+- name: Set the key 'trusted.glusterfs.volume-id' to value '0x817b94343f164f199e5b573b4ea1f914'
+ community.general.xattr:
+ path: /mnt/bricks/brick1
+ namespace: trusted
+ key: glusterfs.volume-id
+ value: "0x817b94343f164f199e5b573b4ea1f914"
+
+- name: Remove the key 'user.foo'
+ community.general.xattr:
+ path: /etc/foo.conf
+ key: foo
+ state: absent
+
+- name: Remove the key 'trusted.glusterfs.volume-id'
+ community.general.xattr:
+ path: /mnt/bricks/brick1
+ namespace: trusted
+ key: glusterfs.volume-id
+ state: absent
+'''
+
+import os
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+
+
+def get_xattr_keys(module, path, follow):
+ cmd = [module.get_bin_path('getfattr', True)]
+ # prevents warning and not sure why it's not default
+ cmd.append('--absolute-names')
+ if not follow:
+ cmd.append('-h')
+ cmd.append(path)
+
+ return _run_xattr(module, cmd)
+
+
+def get_xattr(module, path, key, follow):
+
+ cmd = [module.get_bin_path('getfattr', True)]
+ # prevents warning and not sure why it's not default
+ cmd.append('--absolute-names')
+ if not follow:
+ cmd.append('-h')
+ if key is None:
+ cmd.append('-d')
+ else:
+ cmd.append('-n %s' % key)
+ cmd.append(path)
+
+ return _run_xattr(module, cmd, False)
+
+
+def set_xattr(module, path, key, value, follow):
+
+ cmd = [module.get_bin_path('setfattr', True)]
+ if not follow:
+ cmd.append('-h')
+ cmd.append('-n %s' % key)
+ cmd.append('-v %s' % value)
+ cmd.append(path)
+
+ return _run_xattr(module, cmd)
+
+
+def rm_xattr(module, path, key, follow):
+
+ cmd = [module.get_bin_path('setfattr', True)]
+ if not follow:
+ cmd.append('-h')
+ cmd.append('-x %s' % key)
+ cmd.append(path)
+
+ return _run_xattr(module, cmd, False)
+
+
+def _run_xattr(module, cmd, check_rc=True):
+
+ try:
+ (rc, out, err) = module.run_command(' '.join(cmd), check_rc=check_rc)
+ except Exception as e:
+ module.fail_json(msg="%s!" % to_native(e))
+
+ # result = {'raw': out}
+ result = {}
+ for line in out.splitlines():
+ if line.startswith('#') or line == '':
+ pass
+ elif '=' in line:
+ (key, val) = line.split('=')
+ result[key] = val.strip('"')
+ else:
+ result[line] = ''
+ return result
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path', required=True, aliases=['name']),
+ namespace=dict(type='str', default='user'),
+ key=dict(type='str'),
+ value=dict(type='str'),
+ state=dict(type='str', default='read', choices=['absent', 'all', 'keys', 'present', 'read']),
+ follow=dict(type='bool', default=True),
+ ),
+ supports_check_mode=True,
+ )
+ path = module.params.get('path')
+ namespace = module.params.get('namespace')
+ key = module.params.get('key')
+ value = module.params.get('value')
+ state = module.params.get('state')
+ follow = module.params.get('follow')
+
+ if not os.path.exists(path):
+ module.fail_json(msg="path not found or not accessible!")
+
+ changed = False
+ msg = ""
+ res = {}
+
+ if key is None and state in ['absent', 'present']:
+ module.fail_json(msg="%s needs a key parameter" % state)
+
+ # Prepend the key with the namespace if defined
+ if (
+ key is not None and
+ namespace is not None and
+ len(namespace) > 0 and
+ not (namespace == 'user' and key.startswith('user.'))):
+ key = '%s.%s' % (namespace, key)
+
+ if (state == 'present' or value is not None):
+ current = get_xattr(module, path, key, follow)
+ if current is None or key not in current or value != current[key]:
+ if not module.check_mode:
+ res = set_xattr(module, path, key, value, follow)
+ changed = True
+ res = current
+ msg = "%s set to %s" % (key, value)
+ elif state == 'absent':
+ current = get_xattr(module, path, key, follow)
+ if current is not None and key in current:
+ if not module.check_mode:
+ res = rm_xattr(module, path, key, follow)
+ changed = True
+ res = current
+ msg = "%s removed" % (key)
+ elif state == 'keys':
+ res = get_xattr_keys(module, path, follow)
+ msg = "returning all keys"
+ elif state == 'all':
+ res = get_xattr(module, path, None, follow)
+ msg = "dumping all"
+ else:
+ res = get_xattr(module, path, key, follow)
+ msg = "returning %s" % key
+
+ module.exit_json(changed=changed, msg=msg, xattr=res)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/xbps.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/xbps.py
new file mode 100644
index 00000000..6f2f5dfa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/xbps.py
@@ -0,0 +1,336 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2016 Dino Occhialini <dino.occhialini@gmail.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: xbps
+short_description: Manage packages with XBPS
+description:
+ - Manage packages with the XBPS package manager.
+author:
+ - "Dino Occhialini (@dinoocch)"
+ - "Michael Aldridge (@the-maldridge)"
+options:
+ name:
+ description:
+ - Name of the package to install, upgrade, or remove.
+ aliases: [pkg,package]
+ type: list
+ elements: str
+ state:
+ description:
+ - Desired state of the package.
+ default: "present"
+ choices: ["present", "absent", "latest", "installed", "removed"]
+ type: str
+ recurse:
+ description:
+ - When removing a package, also remove its dependencies, provided
+ that they are not required by other packages and were not
+ explicitly installed by a user.
+ type: bool
+ default: no
+ update_cache:
+ description:
+ - Whether or not to refresh the master package lists. This can be
+ run as part of a package installation or as a separate step.
+ aliases: ['update-cache']
+ type: bool
+ default: yes
+ upgrade:
+ description:
+ - Whether or not to upgrade whole system
+ type: bool
+ default: no
+ upgrade_xbps:
+ description:
+ - Whether or not to upgrade the xbps package when necessary.
+ Before installing new packages,
+ xbps requires the user to update the xbps package itself.
+ Thus when this option is set to C(no),
+ upgrades and installations will fail when xbps is not up to date.
+ type: bool
+ default: yes
+ version_added: '0.2.0'
+ force:
+ description:
+ - This option doesn't have any effect and is deprecated, it will be
+ removed in 3.0.0.
+ type: bool
+ default: no
+'''
+
+EXAMPLES = '''
+- name: Install package foo (automatically updating the xbps package if needed)
+ community.general.xbps: name=foo state=present
+
+- name: Upgrade package foo
+ community.general.xbps: name=foo state=latest update_cache=yes
+
+- name: Remove packages foo and bar
+ community.general.xbps: name=foo,bar state=absent
+
+- name: Recursively remove package foo
+ community.general.xbps: name=foo state=absent recurse=yes
+
+- name: Update package cache
+ community.general.xbps: update_cache=yes
+
+- name: Upgrade packages
+ community.general.xbps: upgrade=yes
+
+- name: Install a package, failing if the xbps package is out of date
+ community.general.xbps:
+ name: foo
+ state: present
+ upgrade_xbps: no
+'''
+
+RETURN = '''
+msg:
+ description: Message about results
+ returned: success
+ type: str
+ sample: "System Upgraded"
+packages:
+ description: Packages that are affected/would be affected
+ type: list
+ sample: ["ansible"]
+ returned: success
+'''
+
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def is_installed(xbps_output):
+ """Returns package install state"""
+ return bool(len(xbps_output))
+
+
+def query_package(module, xbps_path, name, state="present"):
+ """Returns Package info"""
+ if state == "present":
+ lcmd = "%s %s" % (xbps_path['query'], name)
+ lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False)
+ if not is_installed(lstdout):
+ # package is not installed locally
+ return False, False
+
+ rcmd = "%s -Sun" % (xbps_path['install'])
+ rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False)
+ if rrc == 0 or rrc == 17:
+ """Return True to indicate that the package is installed locally,
+ and the result of the version number comparison to determine if the
+ package is up-to-date"""
+ return True, name not in rstdout
+
+ return False, False
+
+
+def update_package_db(module, xbps_path):
+ """Returns True if update_package_db changed"""
+ cmd = "%s -S" % (xbps_path['install'])
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc != 0:
+ module.fail_json(msg="Could not update package db")
+ if "avg rate" in stdout:
+ return True
+ else:
+ return False
+
+
+def upgrade_xbps(module, xbps_path, exit_on_success=False):
+ cmdupgradexbps = "%s -uy xbps" % (xbps_path['install'])
+ rc, stdout, stderr = module.run_command(cmdupgradexbps, check_rc=False)
+ if rc != 0:
+ module.fail_json(msg='Could not upgrade xbps itself')
+
+
+def upgrade(module, xbps_path):
+ """Returns true is full upgrade succeeds"""
+ cmdupgrade = "%s -uy" % (xbps_path['install'])
+ cmdneedupgrade = "%s -un" % (xbps_path['install'])
+
+ rc, stdout, stderr = module.run_command(cmdneedupgrade, check_rc=False)
+ if rc == 0:
+ if(len(stdout.splitlines()) == 0):
+ module.exit_json(changed=False, msg='Nothing to upgrade')
+ elif module.check_mode:
+ module.exit_json(changed=True, msg='Would have performed upgrade')
+ else:
+ rc, stdout, stderr = module.run_command(cmdupgrade, check_rc=False)
+ if rc == 0:
+ module.exit_json(changed=True, msg='System upgraded')
+ elif rc == 16 and module.params['upgrade_xbps']:
+ upgrade_xbps(module, xbps_path)
+ # avoid loops by not trying self-upgrade again
+ module.params['upgrade_xbps'] = False
+ upgrade(module, xbps_path)
+ else:
+ module.fail_json(msg="Could not upgrade")
+ else:
+ module.fail_json(msg="Could not upgrade")
+
+
+def remove_packages(module, xbps_path, packages):
+ """Returns true if package removal succeeds"""
+ changed_packages = []
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ installed, updated = query_package(module, xbps_path, package)
+ if not installed:
+ continue
+
+ cmd = "%s -y %s" % (xbps_path['remove'], package)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc != 0:
+ module.fail_json(msg="failed to remove %s" % (package))
+
+ changed_packages.append(package)
+
+ if len(changed_packages) > 0:
+
+ module.exit_json(changed=True, msg="removed %s package(s)" %
+ len(changed_packages), packages=changed_packages)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, xbps_path, state, packages):
+ """Returns true if package install succeeds."""
+ toInstall = []
+ for i, package in enumerate(packages):
+ """If the package is installed and state == present or state == latest
+ and is up-to-date then skip"""
+ installed, updated = query_package(module, xbps_path, package)
+ if installed and (state == 'present' or
+ (state == 'latest' and updated)):
+ continue
+
+ toInstall.append(package)
+
+ if len(toInstall) == 0:
+ module.exit_json(changed=False, msg="Nothing to Install")
+
+ cmd = "%s -y %s" % (xbps_path['install'], " ".join(toInstall))
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc == 16 and module.params['upgrade_xbps']:
+ upgrade_xbps(module, xbps_path)
+ # avoid loops by not trying self-update again
+ module.params['upgrade_xbps'] = False
+ install_packages(module, xbps_path, state, packages)
+ elif rc != 0 and not (state == 'latest' and rc == 17):
+ module.fail_json(msg="failed to install %s" % (package))
+
+ module.exit_json(changed=True, msg="installed %s package(s)"
+ % (len(toInstall)),
+ packages=toInstall)
+
+
+def check_packages(module, xbps_path, packages, state):
+ """Returns change status of command"""
+ would_be_changed = []
+ for package in packages:
+ installed, updated = query_package(module, xbps_path, package)
+ if ((state in ["present", "latest"] and not installed) or
+ (state == "absent" and installed) or
+ (state == "latest" and not updated)):
+ would_be_changed.append(package)
+ if would_be_changed:
+ if state == "absent":
+ state = "removed"
+ module.exit_json(changed=True, msg="%s package(s) would be %s" % (
+ len(would_be_changed), state),
+ packages=would_be_changed)
+ else:
+ module.exit_json(changed=False, msg="package(s) already %s" % state,
+ packages=[])
+
+
+def update_cache(module, xbps_path, upgrade_planned):
+ """Update package cache"""
+ if module.check_mode:
+ if upgrade_planned:
+ return
+ module.exit_json(
+ changed=True, msg='Would have updated the package cache'
+ )
+ changed = update_package_db(module, xbps_path)
+ if not upgrade_planned:
+ module.exit_json(changed=changed, msg=(
+ 'Updated the package master lists' if changed
+ else 'Package list already up to date'
+ ))
+
+
+def main():
+ """Returns, calling appropriate command"""
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(default=None, aliases=['pkg', 'package'], type='list', elements='str'),
+ state=dict(default='present', choices=['present', 'installed',
+ 'latest', 'absent',
+ 'removed']),
+ recurse=dict(default=False, type='bool'),
+ force=dict(default=False, type='bool', removed_in_version='3.0.0', removed_from_collection='community.general'),
+ upgrade=dict(default=False, type='bool'),
+ update_cache=dict(default=True, aliases=['update-cache'],
+ type='bool'),
+ upgrade_xbps=dict(default=True, type='bool')
+ ),
+ required_one_of=[['name', 'update_cache', 'upgrade']],
+ supports_check_mode=True)
+
+ xbps_path = dict()
+ xbps_path['install'] = module.get_bin_path('xbps-install', True)
+ xbps_path['query'] = module.get_bin_path('xbps-query', True)
+ xbps_path['remove'] = module.get_bin_path('xbps-remove', True)
+
+ if not os.path.exists(xbps_path['install']):
+ module.fail_json(msg="cannot find xbps, in path %s"
+ % (xbps_path['install']))
+
+ p = module.params
+
+ # normalize the state parameter
+ if p['state'] in ['present', 'installed']:
+ p['state'] = 'present'
+ elif p['state'] in ['absent', 'removed']:
+ p['state'] = 'absent'
+
+ if p['update_cache']:
+ update_cache(module, xbps_path, (p['name'] or p['upgrade']))
+
+ if p['upgrade']:
+ upgrade(module, xbps_path)
+
+ if p['name']:
+ pkgs = p['name']
+
+ if module.check_mode:
+ check_packages(module, xbps_path, pkgs, p['state'])
+
+ if p['state'] in ['present', 'latest']:
+ install_packages(module, xbps_path, p['state'], pkgs)
+ elif p['state'] == 'absent':
+ remove_packages(module, xbps_path, pkgs)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/xenserver_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/xenserver_facts.py
new file mode 100644
index 00000000..25923cb2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/xenserver_facts.py
@@ -0,0 +1,202 @@
+#!/usr/bin/python
+#
+# Copyright: Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: xenserver_facts
+short_description: get facts reported on xenserver
+description:
+ - Reads data out of XenAPI, can be used instead of multiple xe commands.
+author:
+ - Andy Hill (@andyhky)
+ - Tim Rupp (@caphrim007)
+ - Robin Lee (@cheese)
+options: {}
+'''
+
+EXAMPLES = '''
+- name: Gather facts from xenserver
+ community.general.xenserver_facts:
+
+- name: Print running VMs
+ ansible.builtin.debug:
+ msg: "{{ item }}"
+ with_items: "{{ xs_vms.keys() }}"
+ when: xs_vms[item]['power_state'] == "Running"
+
+# Which will print:
+#
+# TASK: [Print running VMs] ***********************************************************
+# skipping: [10.13.0.22] => (item=CentOS 4.7 (32-bit))
+# ok: [10.13.0.22] => (item=Control domain on host: 10.0.13.22) => {
+# "item": "Control domain on host: 10.0.13.22",
+# "msg": "Control domain on host: 10.0.13.22"
+# }
+'''
+
+
+HAVE_XENAPI = False
+try:
+ import XenAPI
+ HAVE_XENAPI = True
+except ImportError:
+ pass
+
+from ansible.module_utils import distro
+from ansible.module_utils.basic import AnsibleModule
+
+
+class XenServerFacts:
+ def __init__(self):
+ self.codes = {
+ '5.5.0': 'george',
+ '5.6.100': 'oxford',
+ '6.0.0': 'boston',
+ '6.1.0': 'tampa',
+ '6.2.0': 'clearwater'
+ }
+
+ @property
+ def version(self):
+ result = distro.linux_distribution()[1]
+ return result
+
+ @property
+ def codename(self):
+ if self.version in self.codes:
+ result = self.codes[self.version]
+ else:
+ result = None
+
+ return result
+
+
+def get_xenapi_session():
+ session = XenAPI.xapi_local()
+ session.xenapi.login_with_password('', '')
+ return session
+
+
+def get_networks(session):
+ recs = session.xenapi.network.get_all_records()
+ networks = change_keys(recs, key='name_label')
+ return networks
+
+
+def get_pifs(session):
+ recs = session.xenapi.PIF.get_all_records()
+ pifs = change_keys(recs, key='uuid')
+ xs_pifs = {}
+ devicenums = range(0, 7)
+ for pif in pifs.values():
+ for eth in devicenums:
+ interface_name = "eth%s" % (eth)
+ bond_name = interface_name.replace('eth', 'bond')
+ if pif['device'] == interface_name:
+ xs_pifs[interface_name] = pif
+ elif pif['device'] == bond_name:
+ xs_pifs[bond_name] = pif
+ return xs_pifs
+
+
+def get_vlans(session):
+ recs = session.xenapi.VLAN.get_all_records()
+ return change_keys(recs, key='tag')
+
+
+def change_keys(recs, key='uuid', filter_func=None):
+ """
+ Take a xapi dict, and make the keys the value of recs[ref][key].
+
+ Preserves the ref in rec['ref']
+
+ """
+ new_recs = {}
+
+ for ref, rec in recs.items():
+ if filter_func is not None and not filter_func(rec):
+ continue
+
+ for param_name, param_value in rec.items():
+ # param_value may be of type xmlrpc.client.DateTime,
+ # which is not simply convertable to str.
+ # Use 'value' attr to get the str value,
+ # following an example in xmlrpc.client.DateTime document
+ if hasattr(param_value, "value"):
+ rec[param_name] = param_value.value
+ new_recs[rec[key]] = rec
+ new_recs[rec[key]]['ref'] = ref
+
+ return new_recs
+
+
+def get_host(session):
+ """Get the host"""
+ host_recs = session.xenapi.host.get_all()
+ # We only have one host, so just return its entry
+ return session.xenapi.host.get_record(host_recs[0])
+
+
+def get_vms(session):
+ recs = session.xenapi.VM.get_all_records()
+ if not recs:
+ return None
+ vms = change_keys(recs, key='name_label')
+ return vms
+
+
+def get_srs(session):
+ recs = session.xenapi.SR.get_all_records()
+ if not recs:
+ return None
+ srs = change_keys(recs, key='name_label')
+ return srs
+
+
+def main():
+ module = AnsibleModule({})
+
+ if not HAVE_XENAPI:
+ module.fail_json(changed=False, msg="python xen api required for this module")
+
+ obj = XenServerFacts()
+ try:
+ session = get_xenapi_session()
+ except XenAPI.Failure as e:
+ module.fail_json(msg='%s' % e)
+
+ data = {
+ 'xenserver_version': obj.version,
+ 'xenserver_codename': obj.codename
+ }
+
+ xs_networks = get_networks(session)
+ xs_pifs = get_pifs(session)
+ xs_vlans = get_vlans(session)
+ xs_vms = get_vms(session)
+ xs_srs = get_srs(session)
+
+ if xs_vlans:
+ data['xs_vlans'] = xs_vlans
+ if xs_pifs:
+ data['xs_pifs'] = xs_pifs
+ if xs_networks:
+ data['xs_networks'] = xs_networks
+
+ if xs_vms:
+ data['xs_vms'] = xs_vms
+
+ if xs_srs:
+ data['xs_srs'] = xs_srs
+
+ module.exit_json(ansible_facts=data)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/xenserver_guest.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/xenserver_guest.py
new file mode 100644
index 00000000..a9a5fb4c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/xenserver_guest.py
@@ -0,0 +1,1933 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2018, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: xenserver_guest
+short_description: Manages virtual machines running on Citrix Hypervisor/XenServer host or pool
+description: >
+ This module can be used to create new virtual machines from templates or other virtual machines,
+ modify various virtual machine components like network and disk, rename a virtual machine and
+ remove a virtual machine with associated components.
+author:
+- Bojan Vitnik (@bvitnik) <bvitnik@mainstream.rs>
+notes:
+- Minimal supported version of XenServer is 5.6.
+- Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0.
+- 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside
+ Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the XenAPI.py file from the SDK to your Python site-packages on your
+ Ansible Control Node to use it. Latest version of the library can also be acquired from GitHub:
+ U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py)'
+- 'If no scheme is specified in C(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are
+ accessing XenServer host in trusted environment or use C(https://) scheme explicitly.'
+- 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use C(validate_certs: no)
+ which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.'
+- 'Network configuration inside a guest OS, by using C(networks.type), C(networks.ip), C(networks.gateway) etc. parameters, is supported on
+ XenServer 7.0 or newer for Windows guests by using official XenServer Guest agent support for network configuration. The module will try to
+ detect if such support is available and utilize it, else it will use a custom method of configuration via xenstore. Since XenServer Guest
+ agent only support None and Static types of network configuration, where None means DHCP configured interface, C(networks.type) and C(networks.type6)
+ values C(none) and C(dhcp) have same effect. More info here:
+ U(https://www.citrix.com/community/citrix-developer/citrix-hypervisor-developer/citrix-hypervisor-developing-products/citrix-hypervisor-staticip.html)'
+- 'On platforms without official support for network configuration inside a guest OS, network parameters will be written to xenstore
+ C(vm-data/networks/<vif_device>) key. Parameters can be inspected by using C(xenstore ls) and C(xenstore read) tools on \*nix guests or trough
+ WMI interface on Windows guests. They can also be found in VM facts C(instance.xenstore_data) key as returned by the module. It is up to the user
+ to implement a boot time scripts or custom agent that will read the parameters from xenstore and configure network with given parameters.
+ Take note that for xenstore data to become available inside a guest, a VM restart is needed hence module will require VM restart if any
+ parameter is changed. This is a limitation of XenAPI and xenstore. Considering these limitations, network configuration trough xenstore is most
+ useful for bootstraping newly deployed VMs, much less for reconfiguring existing ones. More info here:
+ U(https://support.citrix.com/article/CTX226713)'
+requirements:
+- python >= 2.6
+- XenAPI
+options:
+ state:
+ description:
+ - Specify the state VM should be in.
+ - If C(state) is set to C(present) and VM exists, ensure the VM configuration conforms to given parameters.
+ - If C(state) is set to C(present) and VM does not exist, then VM is deployed with given parameters.
+ - If C(state) is set to C(absent) and VM exists, then VM is removed with its associated components.
+ - If C(state) is set to C(poweredon) and VM does not exist, then VM is deployed with given parameters and powered on automatically.
+ type: str
+ default: present
+ choices: [ present, absent, poweredon ]
+ name:
+ description:
+ - Name of the VM to work with.
+ - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found.
+ - In case of multiple VMs with same name, use C(uuid) to uniquely specify VM to manage.
+ - This parameter is case sensitive.
+ type: str
+ required: yes
+ aliases: [ name_label ]
+ name_desc:
+ description:
+ - VM description.
+ type: str
+ uuid:
+ description:
+ - UUID of the VM to manage if known. This is XenServer's unique identifier.
+ - It is required if name is not unique.
+ - Please note that a supplied UUID will be ignored on VM creation, as XenServer creates the UUID internally.
+ type: str
+ template:
+ description:
+ - Name of a template, an existing VM (must be shut down) or a snapshot that should be used to create VM.
+ - Templates/VMs/snapshots on XenServer do not necessarily have unique names. The module will fail if multiple templates with same name are found.
+ - In case of multiple templates/VMs/snapshots with same name, use C(template_uuid) to uniquely specify source template.
+ - If VM already exists, this setting will be ignored.
+ - This parameter is case sensitive.
+ type: str
+ aliases: [ template_src ]
+ template_uuid:
+ description:
+ - UUID of a template, an existing VM or a snapshot that should be used to create VM.
+ - It is required if template name is not unique.
+ type: str
+ is_template:
+ description:
+ - Convert VM to template.
+ type: bool
+ default: no
+ folder:
+ description:
+ - Destination folder for VM.
+ - This parameter is case sensitive.
+ - 'Example:'
+ - ' folder: /folder1/folder2'
+ type: str
+ hardware:
+ description:
+ - Manage VM's hardware parameters. VM needs to be shut down to reconfigure these parameters.
+ - 'Valid parameters are:'
+ - ' - C(num_cpus) (integer): Number of CPUs.'
+ - ' - C(num_cpu_cores_per_socket) (integer): Number of Cores Per Socket. C(num_cpus) has to be a multiple of C(num_cpu_cores_per_socket).'
+ - ' - C(memory_mb) (integer): Amount of memory in MB.'
+ type: dict
+ disks:
+ description:
+ - A list of disks to add to VM.
+ - All parameters are case sensitive.
+ - Removing or detaching existing disks of VM is not supported.
+ - 'Required parameters per entry:'
+ - ' - C(size_[tb,gb,mb,kb,b]) (integer): Disk storage size in specified unit. VM needs to be shut down to reconfigure this parameter.'
+ - 'Optional parameters per entry:'
+ - ' - C(name) (string): Disk name. You can also use C(name_label) as an alias.'
+ - ' - C(name_desc) (string): Disk description.'
+ - ' - C(sr) (string): Storage Repository to create disk on. If not specified, will use default SR. Cannot be used for moving disk to other SR.'
+ - ' - C(sr_uuid) (string): UUID of a SR to create disk on. Use if SR name is not unique.'
+ type: list
+ elements: dict
+ aliases: [ disk ]
+ cdrom:
+ description:
+ - A CD-ROM configuration for the VM.
+ - All parameters are case sensitive.
+ - 'Valid parameters are:'
+ - ' - C(type) (string): The type of CD-ROM, valid options are C(none) or C(iso). With C(none) the CD-ROM device will be present but empty.'
+ - ' - C(iso_name) (string): The file name of an ISO image from one of the XenServer ISO Libraries (implies C(type: iso)).
+ Required if C(type) is set to C(iso).'
+ type: dict
+ networks:
+ description:
+ - A list of networks (in the order of the NICs).
+ - All parameters are case sensitive.
+ - 'Required parameters per entry:'
+ - ' - C(name) (string): Name of a XenServer network to attach the network interface to. You can also use C(name_label) as an alias.'
+ - 'Optional parameters per entry (used for VM hardware):'
+ - ' - C(mac) (string): Customize MAC address of the interface.'
+ - 'Optional parameters per entry (used for OS customization):'
+ - ' - C(type) (string): Type of IPv4 assignment, valid options are C(none), C(dhcp) or C(static). Value C(none) means whatever is default for OS.
+ On some operating systems it could be DHCP configured (e.g. Windows) or unconfigured interface (e.g. Linux).'
+ - ' - C(ip) (string): Static IPv4 address (implies C(type: static)). Can include prefix in format <IPv4 address>/<prefix> instead of using C(netmask).'
+ - ' - C(netmask) (string): Static IPv4 netmask required for C(ip) if prefix is not specified.'
+ - ' - C(gateway) (string): Static IPv4 gateway.'
+ - ' - C(type6) (string): Type of IPv6 assignment, valid options are C(none), C(dhcp) or C(static). Value C(none) means whatever is default for OS.
+ On some operating systems it could be DHCP configured (e.g. Windows) or unconfigured interface (e.g. Linux).'
+ - ' - C(ip6) (string): Static IPv6 address (implies C(type6: static)) with prefix in format <IPv6 address>/<prefix>.'
+ - ' - C(gateway6) (string): Static IPv6 gateway.'
+ type: list
+ elements: dict
+ aliases: [ network ]
+ home_server:
+ description:
+ - Name of a XenServer host that will be a Home Server for the VM.
+ - This parameter is case sensitive.
+ type: str
+ custom_params:
+ description:
+ - Define a list of custom VM params to set on VM.
+ - Useful for advanced users familiar with managing VM params trough xe CLI.
+ - A custom value object takes two fields C(key) and C(value) (see example below).
+ type: list
+ elements: dict
+ wait_for_ip_address:
+ description:
+ - Wait until XenServer detects an IP address for the VM. If C(state) is set to C(absent), this parameter is ignored.
+ - This requires XenServer Tools to be preinstalled on the VM to work properly.
+ type: bool
+ default: no
+ state_change_timeout:
+ description:
+ - 'By default, module will wait indefinitely for VM to accquire an IP address if C(wait_for_ip_address: yes).'
+ - If this parameter is set to positive value, the module will instead wait specified number of seconds for the state change.
+ - In case of timeout, module will generate an error message.
+ type: int
+ default: 0
+ linked_clone:
+ description:
+ - Whether to create a Linked Clone from the template, existing VM or snapshot. If no, will create a full copy.
+ - This is equivalent to C(Use storage-level fast disk clone) option in XenCenter.
+ type: bool
+ default: no
+ force:
+ description:
+ - Ignore warnings and complete the actions.
+ - This parameter is useful for removing VM in running state or reconfiguring VM params that require VM to be shut down.
+ type: bool
+ default: no
+extends_documentation_fragment:
+- community.general.xenserver.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Create a VM from a template
+ community.general.xenserver_guest:
+ hostname: "{{ xenserver_hostname }}"
+ username: "{{ xenserver_username }}"
+ password: "{{ xenserver_password }}"
+ validate_certs: no
+ folder: /testvms
+ name: testvm_2
+ state: poweredon
+ template: CentOS 7
+ disks:
+ - size_gb: 10
+ sr: my_sr
+ hardware:
+ num_cpus: 6
+ num_cpu_cores_per_socket: 3
+ memory_mb: 512
+ cdrom:
+ type: iso
+ iso_name: guest-tools.iso
+ networks:
+ - name: VM Network
+ mac: aa:bb:dd:aa:00:14
+ wait_for_ip_address: yes
+ delegate_to: localhost
+ register: deploy
+
+- name: Create a VM template
+ community.general.xenserver_guest:
+ hostname: "{{ xenserver_hostname }}"
+ username: "{{ xenserver_username }}"
+ password: "{{ xenserver_password }}"
+ validate_certs: no
+ folder: /testvms
+ name: testvm_6
+ is_template: yes
+ disk:
+ - size_gb: 10
+ sr: my_sr
+ hardware:
+ memory_mb: 512
+ num_cpus: 1
+ delegate_to: localhost
+ register: deploy
+
+- name: Rename a VM (requires the VM's UUID)
+ community.general.xenserver_guest:
+ hostname: "{{ xenserver_hostname }}"
+ username: "{{ xenserver_username }}"
+ password: "{{ xenserver_password }}"
+ uuid: 421e4592-c069-924d-ce20-7e7533fab926
+ name: new_name
+ state: present
+ delegate_to: localhost
+
+- name: Remove a VM by UUID
+ community.general.xenserver_guest:
+ hostname: "{{ xenserver_hostname }}"
+ username: "{{ xenserver_username }}"
+ password: "{{ xenserver_password }}"
+ uuid: 421e4592-c069-924d-ce20-7e7533fab926
+ state: absent
+ delegate_to: localhost
+
+- name: Modify custom params (boot order)
+ community.general.xenserver_guest:
+ hostname: "{{ xenserver_hostname }}"
+ username: "{{ xenserver_username }}"
+ password: "{{ xenserver_password }}"
+ name: testvm_8
+ state: present
+ custom_params:
+ - key: HVM_boot_params
+ value: { "order": "ndc" }
+ delegate_to: localhost
+
+- name: Customize network parameters
+ community.general.xenserver_guest:
+ hostname: "{{ xenserver_hostname }}"
+ username: "{{ xenserver_username }}"
+ password: "{{ xenserver_password }}"
+ name: testvm_10
+ networks:
+ - name: VM Network
+ ip: 192.168.1.100/24
+ gateway: 192.168.1.1
+ - type: dhcp
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+instance:
+ description: Metadata about the VM
+ returned: always
+ type: dict
+ sample: {
+ "cdrom": {
+ "type": "none"
+ },
+ "customization_agent": "native",
+ "disks": [
+ {
+ "name": "testvm_11-0",
+ "name_desc": "",
+ "os_device": "xvda",
+ "size": 42949672960,
+ "sr": "Local storage",
+ "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
+ "vbd_userdevice": "0"
+ },
+ {
+ "name": "testvm_11-1",
+ "name_desc": "",
+ "os_device": "xvdb",
+ "size": 42949672960,
+ "sr": "Local storage",
+ "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
+ "vbd_userdevice": "1"
+ }
+ ],
+ "domid": "56",
+ "folder": "",
+ "hardware": {
+ "memory_mb": 8192,
+ "num_cpu_cores_per_socket": 2,
+ "num_cpus": 4
+ },
+ "home_server": "",
+ "is_template": false,
+ "name": "testvm_11",
+ "name_desc": "",
+ "networks": [
+ {
+ "gateway": "192.168.0.254",
+ "gateway6": "fc00::fffe",
+ "ip": "192.168.0.200",
+ "ip6": [
+ "fe80:0000:0000:0000:e9cb:625a:32c5:c291",
+ "fc00:0000:0000:0000:0000:0000:0000:0001"
+ ],
+ "mac": "ba:91:3a:48:20:76",
+ "mtu": "1500",
+ "name": "Pool-wide network associated with eth1",
+ "netmask": "255.255.255.128",
+ "prefix": "25",
+ "prefix6": "64",
+ "vif_device": "0"
+ }
+ ],
+ "other_config": {
+ "base_template_name": "Windows Server 2016 (64-bit)",
+ "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5",
+ "install-methods": "cdrom",
+ "instant": "true",
+ "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e"
+ },
+ "platform": {
+ "acpi": "1",
+ "apic": "true",
+ "cores-per-socket": "2",
+ "device_id": "0002",
+ "hpet": "true",
+ "nx": "true",
+ "pae": "true",
+ "timeoffset": "-25200",
+ "vga": "std",
+ "videoram": "8",
+ "viridian": "true",
+ "viridian_reference_tsc": "true",
+ "viridian_time_ref_count": "true"
+ },
+ "state": "poweredon",
+ "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda",
+ "xenstore_data": {
+ "vm-data": ""
+ }
+ }
+changes:
+ description: Detected or made changes to VM
+ returned: always
+ type: list
+ sample: [
+ {
+ "hardware": [
+ "num_cpus"
+ ]
+ },
+ {
+ "disks_changed": [
+ [],
+ [
+ "size"
+ ]
+ ]
+ },
+ {
+ "disks_new": [
+ {
+ "name": "new-disk",
+ "name_desc": "",
+ "position": 2,
+ "size_gb": "4",
+ "vbd_userdevice": "2"
+ }
+ ]
+ },
+ {
+ "cdrom": [
+ "type",
+ "iso_name"
+ ]
+ },
+ {
+ "networks_changed": [
+ [
+ "mac"
+ ],
+ ]
+ },
+ {
+ "networks_new": [
+ {
+ "name": "Pool-wide network associated with eth2",
+ "position": 1,
+ "vif_device": "1"
+ }
+ ]
+ },
+ "need_poweredoff"
+ ]
+'''
+
+import re
+
+HAS_XENAPI = False
+try:
+ import XenAPI
+ HAS_XENAPI = True
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.network import is_mac
+from ansible.module_utils import six
+from ansible_collections.community.general.plugins.module_utils.xenserver import (xenserver_common_argument_spec, XAPI, XenServerObject, get_object_ref,
+ gather_vm_params, gather_vm_facts, set_vm_power_state,
+ wait_for_vm_ip_address, is_valid_ip_addr, is_valid_ip_netmask,
+ is_valid_ip_prefix, ip_prefix_to_netmask, ip_netmask_to_prefix,
+ is_valid_ip6_addr, is_valid_ip6_prefix)
+
+
+class XenServerVM(XenServerObject):
+ """Class for managing XenServer VM.
+
+ Attributes:
+ vm_ref (str): XAPI reference to VM.
+ vm_params (dict): A dictionary with VM parameters as returned
+ by gather_vm_params() function.
+ """
+
+ def __init__(self, module):
+ """Inits XenServerVM using module parameters.
+
+ Args:
+ module: Reference to Ansible module object.
+ """
+ super(XenServerVM, self).__init__(module)
+
+ self.vm_ref = get_object_ref(self.module, self.module.params['name'], self.module.params['uuid'], obj_type="VM", fail=False, msg_prefix="VM search: ")
+ self.gather_params()
+
+ def exists(self):
+ """Returns True if VM exists, else False."""
+ return True if self.vm_ref is not None else False
+
+ def gather_params(self):
+ """Gathers all VM parameters available in XAPI database."""
+ self.vm_params = gather_vm_params(self.module, self.vm_ref)
+
+ def gather_facts(self):
+ """Gathers and returns VM facts."""
+ return gather_vm_facts(self.module, self.vm_params)
+
+ def set_power_state(self, power_state):
+ """Controls VM power state."""
+ state_changed, current_state = set_vm_power_state(self.module, self.vm_ref, power_state, self.module.params['state_change_timeout'])
+
+ # If state has changed, update vm_params.
+ if state_changed:
+ self.vm_params['power_state'] = current_state.capitalize()
+
+ return state_changed
+
+ def wait_for_ip_address(self):
+ """Waits for VM to acquire an IP address."""
+ self.vm_params['guest_metrics'] = wait_for_vm_ip_address(self.module, self.vm_ref, self.module.params['state_change_timeout'])
+
+ def deploy(self):
+ """Deploys new VM from template."""
+ # Safety check.
+ if self.exists():
+ self.module.fail_json(msg="Called deploy on existing VM!")
+
+ try:
+ templ_ref = get_object_ref(self.module, self.module.params['template'], self.module.params['template_uuid'], obj_type="template", fail=True,
+ msg_prefix="VM deploy: ")
+
+ # Is this an existing running VM?
+ if self.xapi_session.xenapi.VM.get_power_state(templ_ref).lower() != 'halted':
+ self.module.fail_json(msg="VM deploy: running VM cannot be used as a template!")
+
+ # Find a SR we can use for VM.copy(). We use SR of the first disk
+ # if specified or default SR if not specified.
+ disk_params_list = self.module.params['disks']
+
+ sr_ref = None
+
+ if disk_params_list:
+ disk_params = disk_params_list[0]
+
+ disk_sr_uuid = disk_params.get('sr_uuid')
+ disk_sr = disk_params.get('sr')
+
+ if disk_sr_uuid is not None or disk_sr is not None:
+ sr_ref = get_object_ref(self.module, disk_sr, disk_sr_uuid, obj_type="SR", fail=True,
+ msg_prefix="VM deploy disks[0]: ")
+
+ if not sr_ref:
+ if self.default_sr_ref != "OpaqueRef:NULL":
+ sr_ref = self.default_sr_ref
+ else:
+ self.module.fail_json(msg="VM deploy disks[0]: no default SR found! You must specify SR explicitly.")
+
+ # VM name could be an empty string which is bad.
+ if self.module.params['name'] is not None and not self.module.params['name']:
+ self.module.fail_json(msg="VM deploy: VM name must not be an empty string!")
+
+ # Support for Ansible check mode.
+ if self.module.check_mode:
+ return
+
+ # Now we can instantiate VM. We use VM.clone for linked_clone and
+ # VM.copy for non linked_clone.
+ if self.module.params['linked_clone']:
+ self.vm_ref = self.xapi_session.xenapi.VM.clone(templ_ref, self.module.params['name'])
+ else:
+ self.vm_ref = self.xapi_session.xenapi.VM.copy(templ_ref, self.module.params['name'], sr_ref)
+
+ # Description is copied over from template so we reset it.
+ self.xapi_session.xenapi.VM.set_name_description(self.vm_ref, "")
+
+ # If template is one of built-in XenServer templates, we have to
+ # do some additional steps.
+ # Note: VM.get_is_default_template() is supported from XenServer 7.2
+ # onward so we use an alternative way.
+ templ_other_config = self.xapi_session.xenapi.VM.get_other_config(templ_ref)
+
+ if "default_template" in templ_other_config and templ_other_config['default_template']:
+ # other_config of built-in XenServer templates have a key called
+ # 'disks' with the following content:
+ # disks: <provision><disk bootable="true" device="0" size="10737418240" sr="" type="system"/></provision>
+ # This value of other_data is copied to cloned or copied VM and
+ # it prevents provisioning of VM because sr is not specified and
+ # XAPI returns an error. To get around this, we remove the
+ # 'disks' key and add disks to VM later ourselves.
+ vm_other_config = self.xapi_session.xenapi.VM.get_other_config(self.vm_ref)
+
+ if "disks" in vm_other_config:
+ del vm_other_config['disks']
+
+ self.xapi_session.xenapi.VM.set_other_config(self.vm_ref, vm_other_config)
+
+ # At this point we have VM ready for provisioning.
+ self.xapi_session.xenapi.VM.provision(self.vm_ref)
+
+ # After provisioning we can prepare vm_params for reconfigure().
+ self.gather_params()
+
+ # VM is almost ready. We just need to reconfigure it...
+ self.reconfigure()
+
+ # Power on VM if needed.
+ if self.module.params['state'] == "poweredon":
+ self.set_power_state("poweredon")
+
+ except XenAPI.Failure as f:
+ self.module.fail_json(msg="XAPI ERROR: %s" % f.details)
+
+ def reconfigure(self):
+ """Reconfigures an existing VM.
+
+ Returns:
+ list: parameters that were reconfigured.
+ """
+ # Safety check.
+ if not self.exists():
+ self.module.fail_json(msg="Called reconfigure on non existing VM!")
+
+ config_changes = self.get_changes()
+
+ vm_power_state_save = self.vm_params['power_state'].lower()
+
+ if "need_poweredoff" in config_changes and vm_power_state_save != 'halted' and not self.module.params['force']:
+ self.module.fail_json(msg="VM reconfigure: VM has to be in powered off state to reconfigure but force was not specified!")
+
+ # Support for Ansible check mode.
+ if self.module.check_mode:
+ return config_changes
+
+ if "need_poweredoff" in config_changes and vm_power_state_save != 'halted' and self.module.params['force']:
+ self.set_power_state("shutdownguest")
+
+ try:
+ for change in config_changes:
+ if isinstance(change, six.string_types):
+ if change == "name":
+ self.xapi_session.xenapi.VM.set_name_label(self.vm_ref, self.module.params['name'])
+ elif change == "name_desc":
+ self.xapi_session.xenapi.VM.set_name_description(self.vm_ref, self.module.params['name_desc'])
+ elif change == "folder":
+ self.xapi_session.xenapi.VM.remove_from_other_config(self.vm_ref, 'folder')
+
+ if self.module.params['folder']:
+ self.xapi_session.xenapi.VM.add_to_other_config(self.vm_ref, 'folder', self.module.params['folder'])
+ elif change == "home_server":
+ if self.module.params['home_server']:
+ host_ref = self.xapi_session.xenapi.host.get_by_name_label(self.module.params['home_server'])[0]
+ else:
+ host_ref = "OpaqueRef:NULL"
+
+ self.xapi_session.xenapi.VM.set_affinity(self.vm_ref, host_ref)
+ elif isinstance(change, dict):
+ if change.get('hardware'):
+ for hardware_change in change['hardware']:
+ if hardware_change == "num_cpus":
+ num_cpus = int(self.module.params['hardware']['num_cpus'])
+
+ if num_cpus < int(self.vm_params['VCPUs_at_startup']):
+ self.xapi_session.xenapi.VM.set_VCPUs_at_startup(self.vm_ref, str(num_cpus))
+ self.xapi_session.xenapi.VM.set_VCPUs_max(self.vm_ref, str(num_cpus))
+ else:
+ self.xapi_session.xenapi.VM.set_VCPUs_max(self.vm_ref, str(num_cpus))
+ self.xapi_session.xenapi.VM.set_VCPUs_at_startup(self.vm_ref, str(num_cpus))
+ elif hardware_change == "num_cpu_cores_per_socket":
+ self.xapi_session.xenapi.VM.remove_from_platform(self.vm_ref, 'cores-per-socket')
+ num_cpu_cores_per_socket = int(self.module.params['hardware']['num_cpu_cores_per_socket'])
+
+ if num_cpu_cores_per_socket > 1:
+ self.xapi_session.xenapi.VM.add_to_platform(self.vm_ref, 'cores-per-socket', str(num_cpu_cores_per_socket))
+ elif hardware_change == "memory_mb":
+ memory_b = str(int(self.module.params['hardware']['memory_mb']) * 1048576)
+ vm_memory_static_min_b = str(min(int(memory_b), int(self.vm_params['memory_static_min'])))
+
+ self.xapi_session.xenapi.VM.set_memory_limits(self.vm_ref, vm_memory_static_min_b, memory_b, memory_b, memory_b)
+ elif change.get('disks_changed'):
+ vm_disk_params_list = [disk_params for disk_params in self.vm_params['VBDs'] if disk_params['type'] == "Disk"]
+ position = 0
+
+ for disk_change_list in change['disks_changed']:
+ for disk_change in disk_change_list:
+ vdi_ref = self.xapi_session.xenapi.VDI.get_by_uuid(vm_disk_params_list[position]['VDI']['uuid'])
+
+ if disk_change == "name":
+ self.xapi_session.xenapi.VDI.set_name_label(vdi_ref, self.module.params['disks'][position]['name'])
+ elif disk_change == "name_desc":
+ self.xapi_session.xenapi.VDI.set_name_description(vdi_ref, self.module.params['disks'][position]['name_desc'])
+ elif disk_change == "size":
+ self.xapi_session.xenapi.VDI.resize(vdi_ref, str(self.get_normalized_disk_size(self.module.params['disks'][position],
+ "VM reconfigure disks[%s]: " % position)))
+
+ position += 1
+ elif change.get('disks_new'):
+ for position, disk_userdevice in change['disks_new']:
+ disk_params = self.module.params['disks'][position]
+
+ disk_name = disk_params['name'] if disk_params.get('name') else "%s-%s" % (self.vm_params['name_label'], position)
+ disk_name_desc = disk_params['name_desc'] if disk_params.get('name_desc') else ""
+
+ if disk_params.get('sr_uuid'):
+ sr_ref = self.xapi_session.xenapi.SR.get_by_uuid(disk_params['sr_uuid'])
+ elif disk_params.get('sr'):
+ sr_ref = self.xapi_session.xenapi.SR.get_by_name_label(disk_params['sr'])[0]
+ else:
+ sr_ref = self.default_sr_ref
+
+ disk_size = str(self.get_normalized_disk_size(self.module.params['disks'][position], "VM reconfigure disks[%s]: " % position))
+
+ new_disk_vdi = {
+ "name_label": disk_name,
+ "name_description": disk_name_desc,
+ "SR": sr_ref,
+ "virtual_size": disk_size,
+ "type": "user",
+ "sharable": False,
+ "read_only": False,
+ "other_config": {},
+ }
+
+ new_disk_vbd = {
+ "VM": self.vm_ref,
+ "VDI": None,
+ "userdevice": disk_userdevice,
+ "bootable": False,
+ "mode": "RW",
+ "type": "Disk",
+ "empty": False,
+ "other_config": {},
+ "qos_algorithm_type": "",
+ "qos_algorithm_params": {},
+ }
+
+ new_disk_vbd['VDI'] = self.xapi_session.xenapi.VDI.create(new_disk_vdi)
+ vbd_ref_new = self.xapi_session.xenapi.VBD.create(new_disk_vbd)
+
+ if self.vm_params['power_state'].lower() == "running":
+ self.xapi_session.xenapi.VBD.plug(vbd_ref_new)
+
+ elif change.get('cdrom'):
+ vm_cdrom_params_list = [cdrom_params for cdrom_params in self.vm_params['VBDs'] if cdrom_params['type'] == "CD"]
+
+ # If there is no CD present, we have to create one.
+ if not vm_cdrom_params_list:
+ # We will try to place cdrom at userdevice position
+ # 3 (which is default) if it is not already occupied
+ # else we will place it at first allowed position.
+ cdrom_userdevices_allowed = self.xapi_session.xenapi.VM.get_allowed_VBD_devices(self.vm_ref)
+
+ if "3" in cdrom_userdevices_allowed:
+ cdrom_userdevice = "3"
+ else:
+ cdrom_userdevice = cdrom_userdevices_allowed[0]
+
+ cdrom_vbd = {
+ "VM": self.vm_ref,
+ "VDI": "OpaqueRef:NULL",
+ "userdevice": cdrom_userdevice,
+ "bootable": False,
+ "mode": "RO",
+ "type": "CD",
+ "empty": True,
+ "other_config": {},
+ "qos_algorithm_type": "",
+ "qos_algorithm_params": {},
+ }
+
+ cdrom_vbd_ref = self.xapi_session.xenapi.VBD.create(cdrom_vbd)
+ else:
+ cdrom_vbd_ref = self.xapi_session.xenapi.VBD.get_by_uuid(vm_cdrom_params_list[0]['uuid'])
+
+ cdrom_is_empty = self.xapi_session.xenapi.VBD.get_empty(cdrom_vbd_ref)
+
+ for cdrom_change in change['cdrom']:
+ if cdrom_change == "type":
+ cdrom_type = self.module.params['cdrom']['type']
+
+ if cdrom_type == "none" and not cdrom_is_empty:
+ self.xapi_session.xenapi.VBD.eject(cdrom_vbd_ref)
+ elif cdrom_type == "host":
+ # Unimplemented!
+ pass
+
+ elif cdrom_change == "iso_name":
+ if not cdrom_is_empty:
+ self.xapi_session.xenapi.VBD.eject(cdrom_vbd_ref)
+
+ cdrom_vdi_ref = self.xapi_session.xenapi.VDI.get_by_name_label(self.module.params['cdrom']['iso_name'])[0]
+ self.xapi_session.xenapi.VBD.insert(cdrom_vbd_ref, cdrom_vdi_ref)
+ elif change.get('networks_changed'):
+ position = 0
+
+ for network_change_list in change['networks_changed']:
+ if network_change_list:
+ vm_vif_params = self.vm_params['VIFs'][position]
+ network_params = self.module.params['networks'][position]
+
+ vif_ref = self.xapi_session.xenapi.VIF.get_by_uuid(vm_vif_params['uuid'])
+ network_ref = self.xapi_session.xenapi.network.get_by_uuid(vm_vif_params['network']['uuid'])
+
+ vif_recreated = False
+
+ if "name" in network_change_list or "mac" in network_change_list:
+ # To change network or MAC, we destroy old
+ # VIF and then create a new one with changed
+ # parameters. That's how XenCenter does it.
+
+ # Copy all old parameters to new VIF record.
+ vif = {
+ "device": vm_vif_params['device'],
+ "network": network_ref,
+ "VM": vm_vif_params['VM'],
+ "MAC": vm_vif_params['MAC'],
+ "MTU": vm_vif_params['MTU'],
+ "other_config": vm_vif_params['other_config'],
+ "qos_algorithm_type": vm_vif_params['qos_algorithm_type'],
+ "qos_algorithm_params": vm_vif_params['qos_algorithm_params'],
+ "locking_mode": vm_vif_params['locking_mode'],
+ "ipv4_allowed": vm_vif_params['ipv4_allowed'],
+ "ipv6_allowed": vm_vif_params['ipv6_allowed'],
+ }
+
+ if "name" in network_change_list:
+ network_ref_new = self.xapi_session.xenapi.network.get_by_name_label(network_params['name'])[0]
+ vif['network'] = network_ref_new
+ vif['MTU'] = self.xapi_session.xenapi.network.get_MTU(network_ref_new)
+
+ if "mac" in network_change_list:
+ vif['MAC'] = network_params['mac'].lower()
+
+ if self.vm_params['power_state'].lower() == "running":
+ self.xapi_session.xenapi.VIF.unplug(vif_ref)
+
+ self.xapi_session.xenapi.VIF.destroy(vif_ref)
+ vif_ref_new = self.xapi_session.xenapi.VIF.create(vif)
+
+ if self.vm_params['power_state'].lower() == "running":
+ self.xapi_session.xenapi.VIF.plug(vif_ref_new)
+
+ vif_ref = vif_ref_new
+ vif_recreated = True
+
+ if self.vm_params['customization_agent'] == "native":
+ vif_reconfigure_needed = False
+
+ if "type" in network_change_list:
+ network_type = network_params['type'].capitalize()
+ vif_reconfigure_needed = True
+ else:
+ network_type = vm_vif_params['ipv4_configuration_mode']
+
+ if "ip" in network_change_list:
+ network_ip = network_params['ip']
+ vif_reconfigure_needed = True
+ elif vm_vif_params['ipv4_addresses']:
+ network_ip = vm_vif_params['ipv4_addresses'][0].split('/')[0]
+ else:
+ network_ip = ""
+
+ if "prefix" in network_change_list:
+ network_prefix = "/%s" % network_params['prefix']
+ vif_reconfigure_needed = True
+ elif vm_vif_params['ipv4_addresses'] and vm_vif_params['ipv4_addresses'][0]:
+ network_prefix = "/%s" % vm_vif_params['ipv4_addresses'][0].split('/')[1]
+ else:
+ network_prefix = ""
+
+ if "gateway" in network_change_list:
+ network_gateway = network_params['gateway']
+ vif_reconfigure_needed = True
+ else:
+ network_gateway = vm_vif_params['ipv4_gateway']
+
+ if vif_recreated or vif_reconfigure_needed:
+ self.xapi_session.xenapi.VIF.configure_ipv4(vif_ref, network_type,
+ "%s%s" % (network_ip, network_prefix), network_gateway)
+
+ vif_reconfigure_needed = False
+
+ if "type6" in network_change_list:
+ network_type6 = network_params['type6'].capitalize()
+ vif_reconfigure_needed = True
+ else:
+ network_type6 = vm_vif_params['ipv6_configuration_mode']
+
+ if "ip6" in network_change_list:
+ network_ip6 = network_params['ip6']
+ vif_reconfigure_needed = True
+ elif vm_vif_params['ipv6_addresses']:
+ network_ip6 = vm_vif_params['ipv6_addresses'][0].split('/')[0]
+ else:
+ network_ip6 = ""
+
+ if "prefix6" in network_change_list:
+ network_prefix6 = "/%s" % network_params['prefix6']
+ vif_reconfigure_needed = True
+ elif vm_vif_params['ipv6_addresses'] and vm_vif_params['ipv6_addresses'][0]:
+ network_prefix6 = "/%s" % vm_vif_params['ipv6_addresses'][0].split('/')[1]
+ else:
+ network_prefix6 = ""
+
+ if "gateway6" in network_change_list:
+ network_gateway6 = network_params['gateway6']
+ vif_reconfigure_needed = True
+ else:
+ network_gateway6 = vm_vif_params['ipv6_gateway']
+
+ if vif_recreated or vif_reconfigure_needed:
+ self.xapi_session.xenapi.VIF.configure_ipv6(vif_ref, network_type6,
+ "%s%s" % (network_ip6, network_prefix6), network_gateway6)
+
+ elif self.vm_params['customization_agent'] == "custom":
+ vif_device = vm_vif_params['device']
+
+ # A user could have manually changed network
+ # or mac e.g. trough XenCenter and then also
+ # make those changes in playbook manually.
+ # In that case, module will not detect any
+ # changes and info in xenstore_data will
+ # become stale. For that reason we always
+ # update name and mac in xenstore_data.
+
+ # Since we handle name and mac differently,
+ # we have to remove them from
+ # network_change_list.
+ network_change_list_tmp = [net_chg for net_chg in network_change_list if net_chg not in ['name', 'mac']]
+
+ for network_change in network_change_list_tmp + ['name', 'mac']:
+ self.xapi_session.xenapi.VM.remove_from_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/%s" % (vif_device, network_change))
+
+ if network_params.get('name'):
+ network_name = network_params['name']
+ else:
+ network_name = vm_vif_params['network']['name_label']
+
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/%s" % (vif_device, 'name'), network_name)
+
+ if network_params.get('mac'):
+ network_mac = network_params['mac'].lower()
+ else:
+ network_mac = vm_vif_params['MAC'].lower()
+
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/%s" % (vif_device, 'mac'), network_mac)
+
+ for network_change in network_change_list_tmp:
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/%s" % (vif_device, network_change),
+ network_params[network_change])
+
+ position += 1
+ elif change.get('networks_new'):
+ for position, vif_device in change['networks_new']:
+ network_params = self.module.params['networks'][position]
+
+ network_ref = self.xapi_session.xenapi.network.get_by_name_label(network_params['name'])[0]
+
+ network_name = network_params['name']
+ network_mac = network_params['mac'] if network_params.get('mac') else ""
+ network_type = network_params.get('type')
+ network_ip = network_params['ip'] if network_params.get('ip') else ""
+ network_prefix = network_params['prefix'] if network_params.get('prefix') else ""
+ network_netmask = network_params['netmask'] if network_params.get('netmask') else ""
+ network_gateway = network_params['gateway'] if network_params.get('gateway') else ""
+ network_type6 = network_params.get('type6')
+ network_ip6 = network_params['ip6'] if network_params.get('ip6') else ""
+ network_prefix6 = network_params['prefix6'] if network_params.get('prefix6') else ""
+ network_gateway6 = network_params['gateway6'] if network_params.get('gateway6') else ""
+
+ vif = {
+ "device": vif_device,
+ "network": network_ref,
+ "VM": self.vm_ref,
+ "MAC": network_mac,
+ "MTU": self.xapi_session.xenapi.network.get_MTU(network_ref),
+ "other_config": {},
+ "qos_algorithm_type": "",
+ "qos_algorithm_params": {},
+ }
+
+ vif_ref_new = self.xapi_session.xenapi.VIF.create(vif)
+
+ if self.vm_params['power_state'].lower() == "running":
+ self.xapi_session.xenapi.VIF.plug(vif_ref_new)
+
+ if self.vm_params['customization_agent'] == "native":
+ if network_type and network_type == "static":
+ self.xapi_session.xenapi.VIF.configure_ipv4(vif_ref_new, "Static",
+ "%s/%s" % (network_ip, network_prefix), network_gateway)
+
+ if network_type6 and network_type6 == "static":
+ self.xapi_session.xenapi.VIF.configure_ipv6(vif_ref_new, "Static",
+ "%s/%s" % (network_ip6, network_prefix6), network_gateway6)
+ elif self.vm_params['customization_agent'] == "custom":
+ # We first have to remove any existing data
+ # from xenstore_data because there could be
+ # some old leftover data from some interface
+ # that once occupied same device location as
+ # our new interface.
+ for network_param in ['name', 'mac', 'type', 'ip', 'prefix', 'netmask', 'gateway', 'type6', 'ip6', 'prefix6', 'gateway6']:
+ self.xapi_session.xenapi.VM.remove_from_xenstore_data(self.vm_ref, "vm-data/networks/%s/%s" % (vif_device, network_param))
+
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/name" % vif_device, network_name)
+
+ # We get MAC from VIF itself instead of
+ # networks.mac because it could be
+ # autogenerated.
+ vm_vif_mac = self.xapi_session.xenapi.VIF.get_MAC(vif_ref_new)
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/mac" % vif_device, vm_vif_mac)
+
+ if network_type:
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/type" % vif_device, network_type)
+
+ if network_type == "static":
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/ip" % vif_device, network_ip)
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/prefix" % vif_device, network_prefix)
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/netmask" % vif_device, network_netmask)
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/gateway" % vif_device, network_gateway)
+
+ if network_type6:
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/type6" % vif_device, network_type6)
+
+ if network_type6 == "static":
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/ip6" % vif_device, network_ip6)
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/prefix6" % vif_device, network_prefix6)
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/gateway6" % vif_device, network_gateway6)
+
+ elif change.get('custom_params'):
+ for position in change['custom_params']:
+ custom_param_key = self.module.params['custom_params'][position]['key']
+ custom_param_value = self.module.params['custom_params'][position]['value']
+ self.xapi_session.xenapi_request("VM.set_%s" % custom_param_key, (self.vm_ref, custom_param_value))
+
+ if self.module.params['is_template']:
+ self.xapi_session.xenapi.VM.set_is_a_template(self.vm_ref, True)
+ elif "need_poweredoff" in config_changes and self.module.params['force'] and vm_power_state_save != 'halted':
+ self.set_power_state("poweredon")
+
+ # Gather new params after reconfiguration.
+ self.gather_params()
+
+ except XenAPI.Failure as f:
+ self.module.fail_json(msg="XAPI ERROR: %s" % f.details)
+
+ return config_changes
+
+ def destroy(self):
+ """Removes an existing VM with associated disks"""
+ # Safety check.
+ if not self.exists():
+ self.module.fail_json(msg="Called destroy on non existing VM!")
+
+ if self.vm_params['power_state'].lower() != 'halted' and not self.module.params['force']:
+ self.module.fail_json(msg="VM destroy: VM has to be in powered off state to destroy but force was not specified!")
+
+ # Support for Ansible check mode.
+ if self.module.check_mode:
+ return
+
+ # Make sure that VM is poweredoff before we can destroy it.
+ self.set_power_state("poweredoff")
+
+ try:
+ # Destroy VM!
+ self.xapi_session.xenapi.VM.destroy(self.vm_ref)
+
+ vm_disk_params_list = [disk_params for disk_params in self.vm_params['VBDs'] if disk_params['type'] == "Disk"]
+
+ # Destroy all VDIs associated with VM!
+ for vm_disk_params in vm_disk_params_list:
+ vdi_ref = self.xapi_session.xenapi.VDI.get_by_uuid(vm_disk_params['VDI']['uuid'])
+
+ self.xapi_session.xenapi.VDI.destroy(vdi_ref)
+
+ except XenAPI.Failure as f:
+ self.module.fail_json(msg="XAPI ERROR: %s" % f.details)
+
+ def get_changes(self):
+ """Finds VM parameters that differ from specified ones.
+
+ This method builds a dictionary with hierarchy of VM parameters
+ that differ from those specified in module parameters.
+
+ Returns:
+ list: VM parameters that differ from those specified in
+ module parameters.
+ """
+ # Safety check.
+ if not self.exists():
+ self.module.fail_json(msg="Called get_changes on non existing VM!")
+
+ need_poweredoff = False
+
+ if self.module.params['is_template']:
+ need_poweredoff = True
+
+ try:
+ # This VM could be a template or a snapshot. In that case we fail
+ # because we can't reconfigure them or it would just be too
+ # dangerous.
+ if self.vm_params['is_a_template'] and not self.vm_params['is_a_snapshot']:
+ self.module.fail_json(msg="VM check: targeted VM is a template! Template reconfiguration is not supported.")
+
+ if self.vm_params['is_a_snapshot']:
+ self.module.fail_json(msg="VM check: targeted VM is a snapshot! Snapshot reconfiguration is not supported.")
+
+ # Let's build a list of parameters that changed.
+ config_changes = []
+
+ # Name could only differ if we found an existing VM by uuid.
+ if self.module.params['name'] is not None and self.module.params['name'] != self.vm_params['name_label']:
+ if self.module.params['name']:
+ config_changes.append('name')
+ else:
+ self.module.fail_json(msg="VM check name: VM name cannot be an empty string!")
+
+ if self.module.params['name_desc'] is not None and self.module.params['name_desc'] != self.vm_params['name_description']:
+ config_changes.append('name_desc')
+
+ # Folder parameter is found in other_config.
+ vm_other_config = self.vm_params['other_config']
+ vm_folder = vm_other_config.get('folder', '')
+
+ if self.module.params['folder'] is not None and self.module.params['folder'] != vm_folder:
+ config_changes.append('folder')
+
+ if self.module.params['home_server'] is not None:
+ if (self.module.params['home_server'] and
+ (not self.vm_params['affinity'] or self.module.params['home_server'] != self.vm_params['affinity']['name_label'])):
+
+ # Check existance only. Ignore return value.
+ get_object_ref(self.module, self.module.params['home_server'], uuid=None, obj_type="home server", fail=True,
+ msg_prefix="VM check home_server: ")
+
+ config_changes.append('home_server')
+ elif not self.module.params['home_server'] and self.vm_params['affinity']:
+ config_changes.append('home_server')
+
+ config_changes_hardware = []
+
+ if self.module.params['hardware']:
+ num_cpus = self.module.params['hardware'].get('num_cpus')
+
+ if num_cpus is not None:
+ # Kept for compatibility with older Ansible versions that
+ # do not support subargument specs.
+ try:
+ num_cpus = int(num_cpus)
+ except ValueError as e:
+ self.module.fail_json(msg="VM check hardware.num_cpus: parameter should be an integer value!")
+
+ if num_cpus < 1:
+ self.module.fail_json(msg="VM check hardware.num_cpus: parameter should be greater than zero!")
+
+ # We can use VCPUs_at_startup or VCPUs_max parameter. I'd
+ # say the former is the way to go but this needs
+ # confirmation and testing.
+ if num_cpus != int(self.vm_params['VCPUs_at_startup']):
+ config_changes_hardware.append('num_cpus')
+ # For now, we don't support hotpluging so VM has to be in
+ # poweredoff state to reconfigure.
+ need_poweredoff = True
+
+ num_cpu_cores_per_socket = self.module.params['hardware'].get('num_cpu_cores_per_socket')
+
+ if num_cpu_cores_per_socket is not None:
+ # Kept for compatibility with older Ansible versions that
+ # do not support subargument specs.
+ try:
+ num_cpu_cores_per_socket = int(num_cpu_cores_per_socket)
+ except ValueError as e:
+ self.module.fail_json(msg="VM check hardware.num_cpu_cores_per_socket: parameter should be an integer value!")
+
+ if num_cpu_cores_per_socket < 1:
+ self.module.fail_json(msg="VM check hardware.num_cpu_cores_per_socket: parameter should be greater than zero!")
+
+ if num_cpus and num_cpus % num_cpu_cores_per_socket != 0:
+ self.module.fail_json(msg="VM check hardware.num_cpus: parameter should be a multiple of hardware.num_cpu_cores_per_socket!")
+
+ vm_platform = self.vm_params['platform']
+ vm_cores_per_socket = int(vm_platform.get('cores-per-socket', 1))
+
+ if num_cpu_cores_per_socket != vm_cores_per_socket:
+ config_changes_hardware.append('num_cpu_cores_per_socket')
+ # For now, we don't support hotpluging so VM has to be
+ # in poweredoff state to reconfigure.
+ need_poweredoff = True
+
+ memory_mb = self.module.params['hardware'].get('memory_mb')
+
+ if memory_mb is not None:
+ # Kept for compatibility with older Ansible versions that
+ # do not support subargument specs.
+ try:
+ memory_mb = int(memory_mb)
+ except ValueError as e:
+ self.module.fail_json(msg="VM check hardware.memory_mb: parameter should be an integer value!")
+
+ if memory_mb < 1:
+ self.module.fail_json(msg="VM check hardware.memory_mb: parameter should be greater than zero!")
+
+ # There are multiple memory parameters:
+ # - memory_dynamic_max
+ # - memory_dynamic_min
+ # - memory_static_max
+ # - memory_static_min
+ # - memory_target
+ #
+ # memory_target seems like a good candidate but it returns 0 for
+ # halted VMs so we can't use it.
+ #
+ # I decided to use memory_dynamic_max and memory_static_max
+ # and use whichever is larger. This strategy needs validation
+ # and testing.
+ #
+ # XenServer stores memory size in bytes so we need to divide
+ # it by 1024*1024 = 1048576.
+ if memory_mb != int(max(int(self.vm_params['memory_dynamic_max']), int(self.vm_params['memory_static_max'])) / 1048576):
+ config_changes_hardware.append('memory_mb')
+ # For now, we don't support hotpluging so VM has to be in
+ # poweredoff state to reconfigure.
+ need_poweredoff = True
+
+ if config_changes_hardware:
+ config_changes.append({"hardware": config_changes_hardware})
+
+ config_changes_disks = []
+ config_new_disks = []
+
+ # Find allowed userdevices.
+ vbd_userdevices_allowed = self.xapi_session.xenapi.VM.get_allowed_VBD_devices(self.vm_ref)
+
+ if self.module.params['disks']:
+ # Get the list of all disk. Filter out any CDs found.
+ vm_disk_params_list = [disk_params for disk_params in self.vm_params['VBDs'] if disk_params['type'] == "Disk"]
+
+ # Number of disks defined in module params have to be same or
+ # higher than a number of existing disks attached to the VM.
+ # We don't support removal or detachment of disks.
+ if len(self.module.params['disks']) < len(vm_disk_params_list):
+ self.module.fail_json(msg="VM check disks: provided disks configuration has less disks than the target VM (%d < %d)!" %
+ (len(self.module.params['disks']), len(vm_disk_params_list)))
+
+ # Find the highest disk occupied userdevice.
+ if not vm_disk_params_list:
+ vm_disk_userdevice_highest = "-1"
+ else:
+ vm_disk_userdevice_highest = vm_disk_params_list[-1]['userdevice']
+
+ for position in range(len(self.module.params['disks'])):
+ if position < len(vm_disk_params_list):
+ vm_disk_params = vm_disk_params_list[position]
+ else:
+ vm_disk_params = None
+
+ disk_params = self.module.params['disks'][position]
+
+ disk_size = self.get_normalized_disk_size(self.module.params['disks'][position], "VM check disks[%s]: " % position)
+
+ disk_name = disk_params.get('name')
+
+ if disk_name is not None and not disk_name:
+ self.module.fail_json(msg="VM check disks[%s]: disk name cannot be an empty string!" % position)
+
+ # If this is an existing disk.
+ if vm_disk_params and vm_disk_params['VDI']:
+ disk_changes = []
+
+ if disk_name and disk_name != vm_disk_params['VDI']['name_label']:
+ disk_changes.append('name')
+
+ disk_name_desc = disk_params.get('name_desc')
+
+ if disk_name_desc is not None and disk_name_desc != vm_disk_params['VDI']['name_description']:
+ disk_changes.append('name_desc')
+
+ if disk_size:
+ if disk_size > int(vm_disk_params['VDI']['virtual_size']):
+ disk_changes.append('size')
+ need_poweredoff = True
+ elif disk_size < int(vm_disk_params['VDI']['virtual_size']):
+ self.module.fail_json(msg="VM check disks[%s]: disk size is smaller than existing (%d bytes < %s bytes). "
+ "Reducing disk size is not allowed!" % (position, disk_size, vm_disk_params['VDI']['virtual_size']))
+
+ config_changes_disks.append(disk_changes)
+ # If this is a new disk.
+ else:
+ if not disk_size:
+ self.module.fail_json(msg="VM check disks[%s]: no valid disk size specification found!" % position)
+
+ disk_sr_uuid = disk_params.get('sr_uuid')
+ disk_sr = disk_params.get('sr')
+
+ if disk_sr_uuid is not None or disk_sr is not None:
+ # Check existance only. Ignore return value.
+ get_object_ref(self.module, disk_sr, disk_sr_uuid, obj_type="SR", fail=True,
+ msg_prefix="VM check disks[%s]: " % position)
+ elif self.default_sr_ref == 'OpaqueRef:NULL':
+ self.module.fail_json(msg="VM check disks[%s]: no default SR found! You must specify SR explicitly." % position)
+
+ if not vbd_userdevices_allowed:
+ self.module.fail_json(msg="VM check disks[%s]: maximum number of devices reached!" % position)
+
+ disk_userdevice = None
+
+ # We need to place a new disk right above the highest
+ # placed existing disk to maintain relative disk
+ # positions pairable with disk specifications in
+ # module params. That place must not be occupied by
+ # some other device like CD-ROM.
+ for userdevice in vbd_userdevices_allowed:
+ if int(userdevice) > int(vm_disk_userdevice_highest):
+ disk_userdevice = userdevice
+ vbd_userdevices_allowed.remove(userdevice)
+ vm_disk_userdevice_highest = userdevice
+ break
+
+ # If no place was found.
+ if disk_userdevice is None:
+ # Highest occupied place could be a CD-ROM device
+ # so we have to include all devices regardless of
+ # type when calculating out-of-bound position.
+ disk_userdevice = str(int(self.vm_params['VBDs'][-1]['userdevice']) + 1)
+ self.module.fail_json(msg="VM check disks[%s]: new disk position %s is out of bounds!" % (position, disk_userdevice))
+
+ # For new disks we only track their position.
+ config_new_disks.append((position, disk_userdevice))
+
+ # We should append config_changes_disks to config_changes only
+ # if there is at least one changed disk, else skip.
+ for disk_change in config_changes_disks:
+ if disk_change:
+ config_changes.append({"disks_changed": config_changes_disks})
+ break
+
+ if config_new_disks:
+ config_changes.append({"disks_new": config_new_disks})
+
+ config_changes_cdrom = []
+
+ if self.module.params['cdrom']:
+ # Get the list of all CD-ROMs. Filter out any regular disks
+ # found. If we found no existing CD-ROM, we will create it
+ # later else take the first one found.
+ vm_cdrom_params_list = [cdrom_params for cdrom_params in self.vm_params['VBDs'] if cdrom_params['type'] == "CD"]
+
+ # If no existing CD-ROM is found, we will need to add one.
+ # We need to check if there is any userdevice allowed.
+ if not vm_cdrom_params_list and not vbd_userdevices_allowed:
+ self.module.fail_json(msg="VM check cdrom: maximum number of devices reached!")
+
+ cdrom_type = self.module.params['cdrom'].get('type')
+ cdrom_iso_name = self.module.params['cdrom'].get('iso_name')
+
+ # If cdrom.iso_name is specified but cdrom.type is not,
+ # then set cdrom.type to 'iso', unless cdrom.iso_name is
+ # an empty string, in that case set cdrom.type to 'none'.
+ if not cdrom_type:
+ if cdrom_iso_name:
+ cdrom_type = "iso"
+ elif cdrom_iso_name is not None:
+ cdrom_type = "none"
+
+ self.module.params['cdrom']['type'] = cdrom_type
+
+ # If type changed.
+ if cdrom_type and (not vm_cdrom_params_list or cdrom_type != self.get_cdrom_type(vm_cdrom_params_list[0])):
+ config_changes_cdrom.append('type')
+
+ if cdrom_type == "iso":
+ # Check if ISO exists.
+ # Check existance only. Ignore return value.
+ get_object_ref(self.module, cdrom_iso_name, uuid=None, obj_type="ISO image", fail=True,
+ msg_prefix="VM check cdrom.iso_name: ")
+
+ # Is ISO image changed?
+ if (cdrom_iso_name and
+ (not vm_cdrom_params_list or
+ not vm_cdrom_params_list[0]['VDI'] or
+ cdrom_iso_name != vm_cdrom_params_list[0]['VDI']['name_label'])):
+ config_changes_cdrom.append('iso_name')
+
+ if config_changes_cdrom:
+ config_changes.append({"cdrom": config_changes_cdrom})
+
+ config_changes_networks = []
+ config_new_networks = []
+
+ # Find allowed devices.
+ vif_devices_allowed = self.xapi_session.xenapi.VM.get_allowed_VIF_devices(self.vm_ref)
+
+ if self.module.params['networks']:
+ # Number of VIFs defined in module params have to be same or
+ # higher than a number of existing VIFs attached to the VM.
+ # We don't support removal of VIFs.
+ if len(self.module.params['networks']) < len(self.vm_params['VIFs']):
+ self.module.fail_json(msg="VM check networks: provided networks configuration has less interfaces than the target VM (%d < %d)!" %
+ (len(self.module.params['networks']), len(self.vm_params['VIFs'])))
+
+ # Find the highest occupied device.
+ if not self.vm_params['VIFs']:
+ vif_device_highest = "-1"
+ else:
+ vif_device_highest = self.vm_params['VIFs'][-1]['device']
+
+ for position in range(len(self.module.params['networks'])):
+ if position < len(self.vm_params['VIFs']):
+ vm_vif_params = self.vm_params['VIFs'][position]
+ else:
+ vm_vif_params = None
+
+ network_params = self.module.params['networks'][position]
+
+ network_name = network_params.get('name')
+
+ if network_name is not None and not network_name:
+ self.module.fail_json(msg="VM check networks[%s]: network name cannot be an empty string!" % position)
+
+ if network_name:
+ # Check existance only. Ignore return value.
+ get_object_ref(self.module, network_name, uuid=None, obj_type="network", fail=True,
+ msg_prefix="VM check networks[%s]: " % position)
+
+ network_mac = network_params.get('mac')
+
+ if network_mac is not None:
+ network_mac = network_mac.lower()
+
+ if not is_mac(network_mac):
+ self.module.fail_json(msg="VM check networks[%s]: specified MAC address '%s' is not valid!" % (position, network_mac))
+
+ # IPv4 reconfiguration.
+ network_type = network_params.get('type')
+ network_ip = network_params.get('ip')
+ network_netmask = network_params.get('netmask')
+ network_prefix = None
+
+ # If networks.ip is specified and networks.type is not,
+ # then set networks.type to 'static'.
+ if not network_type and network_ip:
+ network_type = "static"
+
+ # XenServer natively supports only 'none' and 'static'
+ # type with 'none' being the same as 'dhcp'.
+ if self.vm_params['customization_agent'] == "native" and network_type and network_type == "dhcp":
+ network_type = "none"
+
+ if network_type and network_type == "static":
+ if network_ip is not None:
+ network_ip_split = network_ip.split('/')
+ network_ip = network_ip_split[0]
+
+ if network_ip and not is_valid_ip_addr(network_ip):
+ self.module.fail_json(msg="VM check networks[%s]: specified IPv4 address '%s' is not valid!" % (position, network_ip))
+
+ if len(network_ip_split) > 1:
+ network_prefix = network_ip_split[1]
+
+ if not is_valid_ip_prefix(network_prefix):
+ self.module.fail_json(msg="VM check networks[%s]: specified IPv4 prefix '%s' is not valid!" % (position, network_prefix))
+
+ if network_netmask is not None:
+ if not is_valid_ip_netmask(network_netmask):
+ self.module.fail_json(msg="VM check networks[%s]: specified IPv4 netmask '%s' is not valid!" % (position, network_netmask))
+
+ network_prefix = ip_netmask_to_prefix(network_netmask, skip_check=True)
+ elif network_prefix is not None:
+ network_netmask = ip_prefix_to_netmask(network_prefix, skip_check=True)
+
+ # If any parameter is overridden at this point, update it.
+ if network_type:
+ network_params['type'] = network_type
+
+ if network_ip:
+ network_params['ip'] = network_ip
+
+ if network_netmask:
+ network_params['netmask'] = network_netmask
+
+ if network_prefix:
+ network_params['prefix'] = network_prefix
+
+ network_gateway = network_params.get('gateway')
+
+ # Gateway can be an empty string (when removing gateway
+ # configuration) but if it is not, it should be validated.
+ if network_gateway and not is_valid_ip_addr(network_gateway):
+ self.module.fail_json(msg="VM check networks[%s]: specified IPv4 gateway '%s' is not valid!" % (position, network_gateway))
+
+ # IPv6 reconfiguration.
+ network_type6 = network_params.get('type6')
+ network_ip6 = network_params.get('ip6')
+ network_prefix6 = None
+
+ # If networks.ip6 is specified and networks.type6 is not,
+ # then set networks.type6 to 'static'.
+ if not network_type6 and network_ip6:
+ network_type6 = "static"
+
+ # XenServer natively supports only 'none' and 'static'
+ # type with 'none' being the same as 'dhcp'.
+ if self.vm_params['customization_agent'] == "native" and network_type6 and network_type6 == "dhcp":
+ network_type6 = "none"
+
+ if network_type6 and network_type6 == "static":
+ if network_ip6 is not None:
+ network_ip6_split = network_ip6.split('/')
+ network_ip6 = network_ip6_split[0]
+
+ if network_ip6 and not is_valid_ip6_addr(network_ip6):
+ self.module.fail_json(msg="VM check networks[%s]: specified IPv6 address '%s' is not valid!" % (position, network_ip6))
+
+ if len(network_ip6_split) > 1:
+ network_prefix6 = network_ip6_split[1]
+
+ if not is_valid_ip6_prefix(network_prefix6):
+ self.module.fail_json(msg="VM check networks[%s]: specified IPv6 prefix '%s' is not valid!" % (position, network_prefix6))
+
+ # If any parameter is overridden at this point, update it.
+ if network_type6:
+ network_params['type6'] = network_type6
+
+ if network_ip6:
+ network_params['ip6'] = network_ip6
+
+ if network_prefix6:
+ network_params['prefix6'] = network_prefix6
+
+ network_gateway6 = network_params.get('gateway6')
+
+ # Gateway can be an empty string (when removing gateway
+ # configuration) but if it is not, it should be validated.
+ if network_gateway6 and not is_valid_ip6_addr(network_gateway6):
+ self.module.fail_json(msg="VM check networks[%s]: specified IPv6 gateway '%s' is not valid!" % (position, network_gateway6))
+
+ # If this is an existing VIF.
+ if vm_vif_params and vm_vif_params['network']:
+ network_changes = []
+
+ if network_name and network_name != vm_vif_params['network']['name_label']:
+ network_changes.append('name')
+
+ if network_mac and network_mac != vm_vif_params['MAC'].lower():
+ network_changes.append('mac')
+
+ if self.vm_params['customization_agent'] == "native":
+ if network_type and network_type != vm_vif_params['ipv4_configuration_mode'].lower():
+ network_changes.append('type')
+
+ if network_type and network_type == "static":
+ if network_ip and (not vm_vif_params['ipv4_addresses'] or
+ not vm_vif_params['ipv4_addresses'][0] or
+ network_ip != vm_vif_params['ipv4_addresses'][0].split('/')[0]):
+ network_changes.append('ip')
+
+ if network_prefix and (not vm_vif_params['ipv4_addresses'] or
+ not vm_vif_params['ipv4_addresses'][0] or
+ network_prefix != vm_vif_params['ipv4_addresses'][0].split('/')[1]):
+ network_changes.append('prefix')
+ network_changes.append('netmask')
+
+ if network_gateway is not None and network_gateway != vm_vif_params['ipv4_gateway']:
+ network_changes.append('gateway')
+
+ if network_type6 and network_type6 != vm_vif_params['ipv6_configuration_mode'].lower():
+ network_changes.append('type6')
+
+ if network_type6 and network_type6 == "static":
+ if network_ip6 and (not vm_vif_params['ipv6_addresses'] or
+ not vm_vif_params['ipv6_addresses'][0] or
+ network_ip6 != vm_vif_params['ipv6_addresses'][0].split('/')[0]):
+ network_changes.append('ip6')
+
+ if network_prefix6 and (not vm_vif_params['ipv6_addresses'] or
+ not vm_vif_params['ipv6_addresses'][0] or
+ network_prefix6 != vm_vif_params['ipv6_addresses'][0].split('/')[1]):
+ network_changes.append('prefix6')
+
+ if network_gateway6 is not None and network_gateway6 != vm_vif_params['ipv6_gateway']:
+ network_changes.append('gateway6')
+
+ elif self.vm_params['customization_agent'] == "custom":
+ vm_xenstore_data = self.vm_params['xenstore_data']
+
+ if network_type and network_type != vm_xenstore_data.get('vm-data/networks/%s/type' % vm_vif_params['device'], "none"):
+ network_changes.append('type')
+ need_poweredoff = True
+
+ if network_type and network_type == "static":
+ if network_ip and network_ip != vm_xenstore_data.get('vm-data/networks/%s/ip' % vm_vif_params['device'], ""):
+ network_changes.append('ip')
+ need_poweredoff = True
+
+ if network_prefix and network_prefix != vm_xenstore_data.get('vm-data/networks/%s/prefix' % vm_vif_params['device'], ""):
+ network_changes.append('prefix')
+ network_changes.append('netmask')
+ need_poweredoff = True
+
+ if network_gateway is not None and network_gateway != vm_xenstore_data.get('vm-data/networks/%s/gateway' %
+ vm_vif_params['device'], ""):
+ network_changes.append('gateway')
+ need_poweredoff = True
+
+ if network_type6 and network_type6 != vm_xenstore_data.get('vm-data/networks/%s/type6' % vm_vif_params['device'], "none"):
+ network_changes.append('type6')
+ need_poweredoff = True
+
+ if network_type6 and network_type6 == "static":
+ if network_ip6 and network_ip6 != vm_xenstore_data.get('vm-data/networks/%s/ip6' % vm_vif_params['device'], ""):
+ network_changes.append('ip6')
+ need_poweredoff = True
+
+ if network_prefix6 and network_prefix6 != vm_xenstore_data.get('vm-data/networks/%s/prefix6' % vm_vif_params['device'], ""):
+ network_changes.append('prefix6')
+ need_poweredoff = True
+
+ if network_gateway6 is not None and network_gateway6 != vm_xenstore_data.get('vm-data/networks/%s/gateway6' %
+ vm_vif_params['device'], ""):
+ network_changes.append('gateway6')
+ need_poweredoff = True
+
+ config_changes_networks.append(network_changes)
+ # If this is a new VIF.
+ else:
+ if not network_name:
+ self.module.fail_json(msg="VM check networks[%s]: network name is required for new network interface!" % position)
+
+ if network_type and network_type == "static" and network_ip and not network_netmask:
+ self.module.fail_json(msg="VM check networks[%s]: IPv4 netmask or prefix is required for new network interface!" % position)
+
+ if network_type6 and network_type6 == "static" and network_ip6 and not network_prefix6:
+ self.module.fail_json(msg="VM check networks[%s]: IPv6 prefix is required for new network interface!" % position)
+
+ # Restart is needed if we are adding new network
+ # interface with IP/gateway parameters specified
+ # and custom agent is used.
+ if self.vm_params['customization_agent'] == "custom":
+ for parameter in ['type', 'ip', 'prefix', 'gateway', 'type6', 'ip6', 'prefix6', 'gateway6']:
+ if network_params.get(parameter):
+ need_poweredoff = True
+ break
+
+ if not vif_devices_allowed:
+ self.module.fail_json(msg="VM check networks[%s]: maximum number of network interfaces reached!" % position)
+
+ # We need to place a new network interface right above the
+ # highest placed existing interface to maintain relative
+ # positions pairable with network interface specifications
+ # in module params.
+ vif_device = str(int(vif_device_highest) + 1)
+
+ if vif_device not in vif_devices_allowed:
+ self.module.fail_json(msg="VM check networks[%s]: new network interface position %s is out of bounds!" % (position, vif_device))
+
+ vif_devices_allowed.remove(vif_device)
+ vif_device_highest = vif_device
+
+ # For new VIFs we only track their position.
+ config_new_networks.append((position, vif_device))
+
+ # We should append config_changes_networks to config_changes only
+ # if there is at least one changed network, else skip.
+ for network_change in config_changes_networks:
+ if network_change:
+ config_changes.append({"networks_changed": config_changes_networks})
+ break
+
+ if config_new_networks:
+ config_changes.append({"networks_new": config_new_networks})
+
+ config_changes_custom_params = []
+
+ if self.module.params['custom_params']:
+ for position in range(len(self.module.params['custom_params'])):
+ custom_param = self.module.params['custom_params'][position]
+
+ custom_param_key = custom_param['key']
+ custom_param_value = custom_param['value']
+
+ if custom_param_key not in self.vm_params:
+ self.module.fail_json(msg="VM check custom_params[%s]: unknown VM param '%s'!" % (position, custom_param_key))
+
+ if custom_param_value != self.vm_params[custom_param_key]:
+ # We only need to track custom param position.
+ config_changes_custom_params.append(position)
+
+ if config_changes_custom_params:
+ config_changes.append({"custom_params": config_changes_custom_params})
+
+ if need_poweredoff:
+ config_changes.append('need_poweredoff')
+
+ return config_changes
+
+ except XenAPI.Failure as f:
+ self.module.fail_json(msg="XAPI ERROR: %s" % f.details)
+
+ def get_normalized_disk_size(self, disk_params, msg_prefix=""):
+ """Parses disk size parameters and returns disk size in bytes.
+
+ This method tries to parse disk size module parameters. It fails
+ with an error message if size cannot be parsed.
+
+ Args:
+ disk_params (dist): A dictionary with disk parameters.
+ msg_prefix (str): A string error messages should be prefixed
+ with (default: "").
+
+ Returns:
+ int: disk size in bytes if disk size is successfully parsed or
+ None if no disk size parameters were found.
+ """
+ # There should be only single size spec but we make a list of all size
+ # specs just in case. Priority is given to 'size' but if not found, we
+ # check for 'size_tb', 'size_gb', 'size_mb' etc. and use first one
+ # found.
+ disk_size_spec = [x for x in disk_params.keys() if disk_params[x] is not None and (x.startswith('size_') or x == 'size')]
+
+ if disk_size_spec:
+ try:
+ # size
+ if "size" in disk_size_spec:
+ size_regex = re.compile(r'(\d+(?:\.\d+)?)\s*(.*)')
+ disk_size_m = size_regex.match(disk_params['size'])
+
+ if disk_size_m:
+ size = disk_size_m.group(1)
+ unit = disk_size_m.group(2)
+ else:
+ raise ValueError
+ # size_tb, size_gb, size_mb, size_kb, size_b
+ else:
+ size = disk_params[disk_size_spec[0]]
+ unit = disk_size_spec[0].split('_')[-1]
+
+ if not unit:
+ unit = "b"
+ else:
+ unit = unit.lower()
+
+ if re.match(r'\d+\.\d+', size):
+ # We found float value in string, let's typecast it.
+ if unit == "b":
+ # If we found float but unit is bytes, we get the integer part only.
+ size = int(float(size))
+ else:
+ size = float(size)
+ else:
+ # We found int value in string, let's typecast it.
+ size = int(size)
+
+ if not size or size < 0:
+ raise ValueError
+
+ except (TypeError, ValueError, NameError):
+ # Common failure
+ self.module.fail_json(msg="%sfailed to parse disk size! Please review value provided using documentation." % msg_prefix)
+
+ disk_units = dict(tb=4, gb=3, mb=2, kb=1, b=0)
+
+ if unit in disk_units:
+ return int(size * (1024 ** disk_units[unit]))
+ else:
+ self.module.fail_json(msg="%s'%s' is not a supported unit for disk size! Supported units are ['%s']." %
+ (msg_prefix, unit, "', '".join(sorted(disk_units.keys(), key=lambda key: disk_units[key]))))
+ else:
+ return None
+
+ @staticmethod
+ def get_cdrom_type(vm_cdrom_params):
+ """Returns VM CD-ROM type."""
+ # TODO: implement support for detecting type host. No server to test
+ # this on at the moment.
+ if vm_cdrom_params['empty']:
+ return "none"
+ else:
+ return "iso"
+
+
+def main():
+ argument_spec = xenserver_common_argument_spec()
+ argument_spec.update(
+ state=dict(type='str', default='present',
+ choices=['present', 'absent', 'poweredon']),
+ name=dict(type='str', aliases=['name_label']),
+ name_desc=dict(type='str'),
+ uuid=dict(type='str'),
+ template=dict(type='str', aliases=['template_src']),
+ template_uuid=dict(type='str'),
+ is_template=dict(type='bool', default=False),
+ folder=dict(type='str'),
+ hardware=dict(
+ type='dict',
+ options=dict(
+ num_cpus=dict(type='int'),
+ num_cpu_cores_per_socket=dict(type='int'),
+ memory_mb=dict(type='int'),
+ ),
+ ),
+ disks=dict(
+ type='list',
+ elements='dict',
+ options=dict(
+ size=dict(type='str'),
+ size_tb=dict(type='str'),
+ size_gb=dict(type='str'),
+ size_mb=dict(type='str'),
+ size_kb=dict(type='str'),
+ size_b=dict(type='str'),
+ name=dict(type='str', aliases=['name_label']),
+ name_desc=dict(type='str'),
+ sr=dict(type='str'),
+ sr_uuid=dict(type='str'),
+ ),
+ aliases=['disk'],
+ mutually_exclusive=[
+ ['size', 'size_tb', 'size_gb', 'size_mb', 'size_kb', 'size_b'],
+ ['sr', 'sr_uuid'],
+ ],
+ ),
+ cdrom=dict(
+ type='dict',
+ options=dict(
+ type=dict(type='str', choices=['none', 'iso']),
+ iso_name=dict(type='str'),
+ ),
+ required_if=[
+ ['type', 'iso', ['iso_name']],
+ ],
+ ),
+ networks=dict(
+ type='list',
+ elements='dict',
+ options=dict(
+ name=dict(type='str', aliases=['name_label']),
+ mac=dict(type='str'),
+ type=dict(type='str', choices=['none', 'dhcp', 'static']),
+ ip=dict(type='str'),
+ netmask=dict(type='str'),
+ gateway=dict(type='str'),
+ type6=dict(type='str', choices=['none', 'dhcp', 'static']),
+ ip6=dict(type='str'),
+ gateway6=dict(type='str'),
+ ),
+ aliases=['network'],
+ required_if=[
+ ['type', 'static', ['ip']],
+ ['type6', 'static', ['ip6']],
+ ],
+ ),
+ home_server=dict(type='str'),
+ custom_params=dict(
+ type='list',
+ elements='dict',
+ options=dict(
+ key=dict(type='str', required=True),
+ value=dict(type='raw', required=True),
+ ),
+ ),
+ wait_for_ip_address=dict(type='bool', default=False),
+ state_change_timeout=dict(type='int', default=0),
+ linked_clone=dict(type='bool', default=False),
+ force=dict(type='bool', default=False),
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[
+ ['name', 'uuid'],
+ ],
+ mutually_exclusive=[
+ ['template', 'template_uuid'],
+ ],
+ )
+
+ result = {'failed': False, 'changed': False}
+
+ vm = XenServerVM(module)
+
+ # Find existing VM
+ if vm.exists():
+ if module.params['state'] == "absent":
+ vm.destroy()
+ result['changed'] = True
+ elif module.params['state'] == "present":
+ config_changes = vm.reconfigure()
+
+ if config_changes:
+ result['changed'] = True
+
+ # Make new disk and network changes more user friendly
+ # and informative.
+ for change in config_changes:
+ if isinstance(change, dict):
+ if change.get('disks_new'):
+ disks_new = []
+
+ for position, userdevice in change['disks_new']:
+ disk_new_params = {"position": position, "vbd_userdevice": userdevice}
+ disk_params = module.params['disks'][position]
+
+ for k in disk_params.keys():
+ if disk_params[k] is not None:
+ disk_new_params[k] = disk_params[k]
+
+ disks_new.append(disk_new_params)
+
+ if disks_new:
+ change['disks_new'] = disks_new
+
+ elif change.get('networks_new'):
+ networks_new = []
+
+ for position, device in change['networks_new']:
+ network_new_params = {"position": position, "vif_device": device}
+ network_params = module.params['networks'][position]
+
+ for k in network_params.keys():
+ if network_params[k] is not None:
+ network_new_params[k] = network_params[k]
+
+ networks_new.append(network_new_params)
+
+ if networks_new:
+ change['networks_new'] = networks_new
+
+ result['changes'] = config_changes
+
+ elif module.params['state'] in ["poweredon", "poweredoff", "restarted", "shutdownguest", "rebootguest", "suspended"]:
+ result['changed'] = vm.set_power_state(module.params['state'])
+ elif module.params['state'] != "absent":
+ vm.deploy()
+ result['changed'] = True
+
+ if module.params['wait_for_ip_address'] and module.params['state'] != "absent":
+ vm.wait_for_ip_address()
+
+ result['instance'] = vm.gather_facts()
+
+ if result['failed']:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/xenserver_guest_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/xenserver_guest_facts.py
new file mode 100644
index 00000000..d3260b6e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/xenserver_guest_facts.py
@@ -0,0 +1,226 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2018, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: xenserver_guest_info
+short_description: Gathers information for virtual machines running on Citrix Hypervisor/XenServer host or pool
+description: >
+ This module can be used to gather essential VM facts.
+author:
+- Bojan Vitnik (@bvitnik) <bvitnik@mainstream.rs>
+notes:
+- Minimal supported version of XenServer is 5.6.
+- Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0.
+- 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside
+ Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the XenAPI.py file from the SDK to your Python site-packages on your
+ Ansible Control Node to use it. Latest version of the library can also be acquired from GitHub:
+ U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py)'
+- 'If no scheme is specified in C(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are
+ accessing XenServer host in trusted environment or use C(https://) scheme explicitly.'
+- 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use C(validate_certs: no)
+ which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.'
+- This module was called C(xenserver_guest_facts) before Ansible 2.9. The usage did not change.
+requirements:
+- python >= 2.6
+- XenAPI
+options:
+ name:
+ description:
+ - Name of the VM to gather facts from.
+ - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found.
+ - In case of multiple VMs with same name, use C(uuid) to uniquely specify VM to manage.
+ - This parameter is case sensitive.
+ type: str
+ aliases: [ name_label ]
+ uuid:
+ description:
+ - UUID of the VM to gather fact of. This is XenServer's unique identifier.
+ - It is required if name is not unique.
+ type: str
+extends_documentation_fragment:
+- community.general.xenserver.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Gather facts
+ community.general.xenserver_guest_info:
+ hostname: "{{ xenserver_hostname }}"
+ username: "{{ xenserver_username }}"
+ password: "{{ xenserver_password }}"
+ name: testvm_11
+ delegate_to: localhost
+ register: facts
+'''
+
+RETURN = r'''
+instance:
+ description: Metadata about the VM
+ returned: always
+ type: dict
+ sample: {
+ "cdrom": {
+ "type": "none"
+ },
+ "customization_agent": "native",
+ "disks": [
+ {
+ "name": "testvm_11-0",
+ "name_desc": "",
+ "os_device": "xvda",
+ "size": 42949672960,
+ "sr": "Local storage",
+ "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
+ "vbd_userdevice": "0"
+ },
+ {
+ "name": "testvm_11-1",
+ "name_desc": "",
+ "os_device": "xvdb",
+ "size": 42949672960,
+ "sr": "Local storage",
+ "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
+ "vbd_userdevice": "1"
+ }
+ ],
+ "domid": "56",
+ "folder": "",
+ "hardware": {
+ "memory_mb": 8192,
+ "num_cpu_cores_per_socket": 2,
+ "num_cpus": 4
+ },
+ "home_server": "",
+ "is_template": false,
+ "name": "testvm_11",
+ "name_desc": "",
+ "networks": [
+ {
+ "gateway": "192.168.0.254",
+ "gateway6": "fc00::fffe",
+ "ip": "192.168.0.200",
+ "ip6": [
+ "fe80:0000:0000:0000:e9cb:625a:32c5:c291",
+ "fc00:0000:0000:0000:0000:0000:0000:0001"
+ ],
+ "mac": "ba:91:3a:48:20:76",
+ "mtu": "1500",
+ "name": "Pool-wide network associated with eth1",
+ "netmask": "255.255.255.128",
+ "prefix": "25",
+ "prefix6": "64",
+ "vif_device": "0"
+ }
+ ],
+ "other_config": {
+ "base_template_name": "Windows Server 2016 (64-bit)",
+ "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5",
+ "install-methods": "cdrom",
+ "instant": "true",
+ "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e"
+ },
+ "platform": {
+ "acpi": "1",
+ "apic": "true",
+ "cores-per-socket": "2",
+ "device_id": "0002",
+ "hpet": "true",
+ "nx": "true",
+ "pae": "true",
+ "timeoffset": "-25200",
+ "vga": "std",
+ "videoram": "8",
+ "viridian": "true",
+ "viridian_reference_tsc": "true",
+ "viridian_time_ref_count": "true"
+ },
+ "state": "poweredon",
+ "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda",
+ "xenstore_data": {
+ "vm-data": ""
+ }
+ }
+'''
+
+HAS_XENAPI = False
+try:
+ import XenAPI
+ HAS_XENAPI = True
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.xenserver import (xenserver_common_argument_spec, XAPI, XenServerObject, get_object_ref,
+ gather_vm_params, gather_vm_facts)
+
+
+class XenServerVM(XenServerObject):
+ """Class for managing XenServer VM.
+
+ Attributes:
+ vm_ref (str): XAPI reference to VM.
+ vm_params (dict): A dictionary with VM parameters as returned
+ by gather_vm_params() function.
+ """
+
+ def __init__(self, module):
+ """Inits XenServerVM using module parameters.
+
+ Args:
+ module: Reference to AnsibleModule object.
+ """
+ super(XenServerVM, self).__init__(module)
+
+ self.vm_ref = get_object_ref(self.module, self.module.params['name'], self.module.params['uuid'], obj_type="VM", fail=True, msg_prefix="VM search: ")
+ self.gather_params()
+
+ def gather_params(self):
+ """Gathers all VM parameters available in XAPI database."""
+ self.vm_params = gather_vm_params(self.module, self.vm_ref)
+
+ def gather_facts(self):
+ """Gathers and returns VM facts."""
+ return gather_vm_facts(self.module, self.vm_params)
+
+
+def main():
+ argument_spec = xenserver_common_argument_spec()
+ argument_spec.update(
+ name=dict(type='str', aliases=['name_label']),
+ uuid=dict(type='str'),
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[
+ ['name', 'uuid'],
+ ],
+ )
+
+ if module._name in ('xenserver_guest_facts', 'community.general.xenserver_guest_facts'):
+ module.deprecate("The 'xenserver_guest_facts' module has been renamed to 'xenserver_guest_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ result = {'failed': False, 'changed': False}
+
+ # Module will exit with an error message if no VM is found.
+ vm = XenServerVM(module)
+
+ # Gather facts.
+ result['instance'] = vm.gather_facts()
+
+ if result['failed']:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/xenserver_guest_info.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/xenserver_guest_info.py
new file mode 100644
index 00000000..d3260b6e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/xenserver_guest_info.py
@@ -0,0 +1,226 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2018, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: xenserver_guest_info
+short_description: Gathers information for virtual machines running on Citrix Hypervisor/XenServer host or pool
+description: >
+ This module can be used to gather essential VM facts.
+author:
+- Bojan Vitnik (@bvitnik) <bvitnik@mainstream.rs>
+notes:
+- Minimal supported version of XenServer is 5.6.
+- Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0.
+- 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside
+ Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the XenAPI.py file from the SDK to your Python site-packages on your
+ Ansible Control Node to use it. Latest version of the library can also be acquired from GitHub:
+ U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py)'
+- 'If no scheme is specified in C(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are
+ accessing XenServer host in trusted environment or use C(https://) scheme explicitly.'
+- 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use C(validate_certs: no)
+ which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.'
+- This module was called C(xenserver_guest_facts) before Ansible 2.9. The usage did not change.
+requirements:
+- python >= 2.6
+- XenAPI
+options:
+ name:
+ description:
+ - Name of the VM to gather facts from.
+ - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found.
+ - In case of multiple VMs with same name, use C(uuid) to uniquely specify VM to manage.
+ - This parameter is case sensitive.
+ type: str
+ aliases: [ name_label ]
+ uuid:
+ description:
+ - UUID of the VM to gather fact of. This is XenServer's unique identifier.
+ - It is required if name is not unique.
+ type: str
+extends_documentation_fragment:
+- community.general.xenserver.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Gather facts
+ community.general.xenserver_guest_info:
+ hostname: "{{ xenserver_hostname }}"
+ username: "{{ xenserver_username }}"
+ password: "{{ xenserver_password }}"
+ name: testvm_11
+ delegate_to: localhost
+ register: facts
+'''
+
+RETURN = r'''
+instance:
+ description: Metadata about the VM
+ returned: always
+ type: dict
+ sample: {
+ "cdrom": {
+ "type": "none"
+ },
+ "customization_agent": "native",
+ "disks": [
+ {
+ "name": "testvm_11-0",
+ "name_desc": "",
+ "os_device": "xvda",
+ "size": 42949672960,
+ "sr": "Local storage",
+ "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
+ "vbd_userdevice": "0"
+ },
+ {
+ "name": "testvm_11-1",
+ "name_desc": "",
+ "os_device": "xvdb",
+ "size": 42949672960,
+ "sr": "Local storage",
+ "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
+ "vbd_userdevice": "1"
+ }
+ ],
+ "domid": "56",
+ "folder": "",
+ "hardware": {
+ "memory_mb": 8192,
+ "num_cpu_cores_per_socket": 2,
+ "num_cpus": 4
+ },
+ "home_server": "",
+ "is_template": false,
+ "name": "testvm_11",
+ "name_desc": "",
+ "networks": [
+ {
+ "gateway": "192.168.0.254",
+ "gateway6": "fc00::fffe",
+ "ip": "192.168.0.200",
+ "ip6": [
+ "fe80:0000:0000:0000:e9cb:625a:32c5:c291",
+ "fc00:0000:0000:0000:0000:0000:0000:0001"
+ ],
+ "mac": "ba:91:3a:48:20:76",
+ "mtu": "1500",
+ "name": "Pool-wide network associated with eth1",
+ "netmask": "255.255.255.128",
+ "prefix": "25",
+ "prefix6": "64",
+ "vif_device": "0"
+ }
+ ],
+ "other_config": {
+ "base_template_name": "Windows Server 2016 (64-bit)",
+ "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5",
+ "install-methods": "cdrom",
+ "instant": "true",
+ "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e"
+ },
+ "platform": {
+ "acpi": "1",
+ "apic": "true",
+ "cores-per-socket": "2",
+ "device_id": "0002",
+ "hpet": "true",
+ "nx": "true",
+ "pae": "true",
+ "timeoffset": "-25200",
+ "vga": "std",
+ "videoram": "8",
+ "viridian": "true",
+ "viridian_reference_tsc": "true",
+ "viridian_time_ref_count": "true"
+ },
+ "state": "poweredon",
+ "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda",
+ "xenstore_data": {
+ "vm-data": ""
+ }
+ }
+'''
+
+HAS_XENAPI = False
+try:
+ import XenAPI
+ HAS_XENAPI = True
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.xenserver import (xenserver_common_argument_spec, XAPI, XenServerObject, get_object_ref,
+ gather_vm_params, gather_vm_facts)
+
+
+class XenServerVM(XenServerObject):
+ """Class for managing XenServer VM.
+
+ Attributes:
+ vm_ref (str): XAPI reference to VM.
+ vm_params (dict): A dictionary with VM parameters as returned
+ by gather_vm_params() function.
+ """
+
+ def __init__(self, module):
+ """Inits XenServerVM using module parameters.
+
+ Args:
+ module: Reference to AnsibleModule object.
+ """
+ super(XenServerVM, self).__init__(module)
+
+ self.vm_ref = get_object_ref(self.module, self.module.params['name'], self.module.params['uuid'], obj_type="VM", fail=True, msg_prefix="VM search: ")
+ self.gather_params()
+
+ def gather_params(self):
+ """Gathers all VM parameters available in XAPI database."""
+ self.vm_params = gather_vm_params(self.module, self.vm_ref)
+
+ def gather_facts(self):
+ """Gathers and returns VM facts."""
+ return gather_vm_facts(self.module, self.vm_params)
+
+
+def main():
+ argument_spec = xenserver_common_argument_spec()
+ argument_spec.update(
+ name=dict(type='str', aliases=['name_label']),
+ uuid=dict(type='str'),
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[
+ ['name', 'uuid'],
+ ],
+ )
+
+ if module._name in ('xenserver_guest_facts', 'community.general.xenserver_guest_facts'):
+ module.deprecate("The 'xenserver_guest_facts' module has been renamed to 'xenserver_guest_info'",
+ version='3.0.0', collection_name='community.general') # was Ansible 2.13
+
+ result = {'failed': False, 'changed': False}
+
+ # Module will exit with an error message if no VM is found.
+ vm = XenServerVM(module)
+
+ # Gather facts.
+ result['instance'] = vm.gather_facts()
+
+ if result['failed']:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/xenserver_guest_powerstate.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/xenserver_guest_powerstate.py
new file mode 100644
index 00000000..4a195ff5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/xenserver_guest_powerstate.py
@@ -0,0 +1,270 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2018, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: xenserver_guest_powerstate
+short_description: Manages power states of virtual machines running on Citrix Hypervisor/XenServer host or pool
+description: >
+ This module can be used to power on, power off, restart or suspend virtual machine and gracefully reboot or shutdown guest OS of virtual machine.
+author:
+- Bojan Vitnik (@bvitnik) <bvitnik@mainstream.rs>
+notes:
+- Minimal supported version of XenServer is 5.6.
+- Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0.
+- 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside
+ Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the XenAPI.py file from the SDK to your Python site-packages on your
+ Ansible Control Node to use it. Latest version of the library can also be acquired from GitHub:
+ U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py)'
+- 'If no scheme is specified in C(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are
+ accessing XenServer host in trusted environment or use C(https://) scheme explicitly.'
+- 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use C(validate_certs: no)
+ which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.'
+requirements:
+- python >= 2.6
+- XenAPI
+options:
+ state:
+ description:
+ - Specify the state VM should be in.
+ - If C(state) is set to value other than C(present), then VM is transitioned into required state and facts are returned.
+ - If C(state) is set to C(present), then VM is just checked for existence and facts are returned.
+ type: str
+ default: present
+ choices: [ powered-on, powered-off, restarted, shutdown-guest, reboot-guest, suspended, present ]
+ name:
+ description:
+ - Name of the VM to manage.
+ - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found.
+ - In case of multiple VMs with same name, use C(uuid) to uniquely specify VM to manage.
+ - This parameter is case sensitive.
+ type: str
+ aliases: [ name_label ]
+ uuid:
+ description:
+ - UUID of the VM to manage if known. This is XenServer's unique identifier.
+ - It is required if name is not unique.
+ type: str
+ wait_for_ip_address:
+ description:
+ - Wait until XenServer detects an IP address for the VM.
+ - This requires XenServer Tools to be preinstalled on the VM to work properly.
+ type: bool
+ default: no
+ state_change_timeout:
+ description:
+ - 'By default, module will wait indefinitely for VM to change state or acquire an IP address if C(wait_for_ip_address: yes).'
+ - If this parameter is set to positive value, the module will instead wait specified number of seconds for the state change.
+ - In case of timeout, module will generate an error message.
+ type: int
+ default: 0
+extends_documentation_fragment:
+- community.general.xenserver.documentation
+
+'''
+
+EXAMPLES = r'''
+- name: Power on VM
+ community.general.xenserver_guest_powerstate:
+ hostname: "{{ xenserver_hostname }}"
+ username: "{{ xenserver_username }}"
+ password: "{{ xenserver_password }}"
+ name: testvm_11
+ state: powered-on
+ delegate_to: localhost
+ register: facts
+'''
+
+RETURN = r'''
+instance:
+ description: Metadata about the VM
+ returned: always
+ type: dict
+ sample: {
+ "cdrom": {
+ "type": "none"
+ },
+ "customization_agent": "native",
+ "disks": [
+ {
+ "name": "windows-template-testing-0",
+ "name_desc": "",
+ "os_device": "xvda",
+ "size": 42949672960,
+ "sr": "Local storage",
+ "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
+ "vbd_userdevice": "0"
+ },
+ {
+ "name": "windows-template-testing-1",
+ "name_desc": "",
+ "os_device": "xvdb",
+ "size": 42949672960,
+ "sr": "Local storage",
+ "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
+ "vbd_userdevice": "1"
+ }
+ ],
+ "domid": "56",
+ "folder": "",
+ "hardware": {
+ "memory_mb": 8192,
+ "num_cpu_cores_per_socket": 2,
+ "num_cpus": 4
+ },
+ "home_server": "",
+ "is_template": false,
+ "name": "windows-template-testing",
+ "name_desc": "",
+ "networks": [
+ {
+ "gateway": "192.168.0.254",
+ "gateway6": "fc00::fffe",
+ "ip": "192.168.0.200",
+ "ip6": [
+ "fe80:0000:0000:0000:e9cb:625a:32c5:c291",
+ "fc00:0000:0000:0000:0000:0000:0000:0001"
+ ],
+ "mac": "ba:91:3a:48:20:76",
+ "mtu": "1500",
+ "name": "Pool-wide network associated with eth1",
+ "netmask": "255.255.255.128",
+ "prefix": "25",
+ "prefix6": "64",
+ "vif_device": "0"
+ }
+ ],
+ "other_config": {
+ "base_template_name": "Windows Server 2016 (64-bit)",
+ "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5",
+ "install-methods": "cdrom",
+ "instant": "true",
+ "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e"
+ },
+ "platform": {
+ "acpi": "1",
+ "apic": "true",
+ "cores-per-socket": "2",
+ "device_id": "0002",
+ "hpet": "true",
+ "nx": "true",
+ "pae": "true",
+ "timeoffset": "-25200",
+ "vga": "std",
+ "videoram": "8",
+ "viridian": "true",
+ "viridian_reference_tsc": "true",
+ "viridian_time_ref_count": "true"
+ },
+ "state": "poweredon",
+ "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda",
+ "xenstore_data": {
+ "vm-data": ""
+ }
+ }
+'''
+
+import re
+
+HAS_XENAPI = False
+try:
+ import XenAPI
+ HAS_XENAPI = True
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.xenserver import (xenserver_common_argument_spec, XAPI, XenServerObject, get_object_ref,
+ gather_vm_params, gather_vm_facts, set_vm_power_state,
+ wait_for_vm_ip_address)
+
+
+class XenServerVM(XenServerObject):
+ """Class for managing XenServer VM.
+
+ Attributes:
+ vm_ref (str): XAPI reference to VM.
+ vm_params (dict): A dictionary with VM parameters as returned
+ by gather_vm_params() function.
+ """
+
+ def __init__(self, module):
+ """Inits XenServerVM using module parameters.
+
+ Args:
+ module: Reference to Ansible module object.
+ """
+ super(XenServerVM, self).__init__(module)
+
+ self.vm_ref = get_object_ref(self.module, self.module.params['name'], self.module.params['uuid'], obj_type="VM", fail=True, msg_prefix="VM search: ")
+ self.gather_params()
+
+ def gather_params(self):
+ """Gathers all VM parameters available in XAPI database."""
+ self.vm_params = gather_vm_params(self.module, self.vm_ref)
+
+ def gather_facts(self):
+ """Gathers and returns VM facts."""
+ return gather_vm_facts(self.module, self.vm_params)
+
+ def set_power_state(self, power_state):
+ """Controls VM power state."""
+ state_changed, current_state = set_vm_power_state(self.module, self.vm_ref, power_state, self.module.params['state_change_timeout'])
+
+ # If state has changed, update vm_params.
+ if state_changed:
+ self.vm_params['power_state'] = current_state.capitalize()
+
+ return state_changed
+
+ def wait_for_ip_address(self):
+ """Waits for VM to acquire an IP address."""
+ self.vm_params['guest_metrics'] = wait_for_vm_ip_address(self.module, self.vm_ref, self.module.params['state_change_timeout'])
+
+
+def main():
+ argument_spec = xenserver_common_argument_spec()
+ argument_spec.update(
+ state=dict(type='str', default='present',
+ choices=['powered-on', 'powered-off', 'restarted', 'shutdown-guest', 'reboot-guest', 'suspended', 'present']),
+ name=dict(type='str', aliases=['name_label']),
+ uuid=dict(type='str'),
+ wait_for_ip_address=dict(type='bool', default=False),
+ state_change_timeout=dict(type='int', default=0),
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[
+ ['name', 'uuid'],
+ ],
+ )
+
+ result = {'failed': False, 'changed': False}
+
+ # Module will exit with an error message if no VM is found.
+ vm = XenServerVM(module)
+
+ # Set VM power state.
+ if module.params['state'] != "present":
+ result['changed'] = vm.set_power_state(module.params['state'])
+
+ if module.params['wait_for_ip_address']:
+ vm.wait_for_ip_address()
+
+ result['instance'] = vm.gather_facts()
+
+ if result['failed']:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/xfconf.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/xfconf.py
new file mode 100644
index 00000000..8d0700ae
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/xfconf.py
@@ -0,0 +1,279 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# (c) 2017, Joseph Benden <joe@benden.us>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: xfconf
+author:
+ - "Joseph Benden (@jbenden)"
+ - "Alexei Znamensky (@russoz)"
+short_description: Edit XFCE4 Configurations
+description:
+ - This module allows for the manipulation of Xfce 4 Configuration via
+ xfconf-query. Please see the xfconf-query(1) man pages for more details.
+options:
+ channel:
+ description:
+ - A Xfconf preference channel is a top-level tree key, inside of the
+ Xfconf repository that corresponds to the location for which all
+ application properties/keys are stored. See man xfconf-query(1)
+ required: yes
+ type: str
+ property:
+ description:
+ - A Xfce preference key is an element in the Xfconf repository
+ that corresponds to an application preference. See man xfconf-query(1)
+ required: yes
+ type: str
+ value:
+ description:
+ - Preference properties typically have simple values such as strings,
+ integers, or lists of strings and integers. This is ignored if the state
+ is "get". For array mode, use a list of values. See man xfconf-query(1)
+ type: list
+ elements: raw
+ value_type:
+ description:
+ - The type of value being set. This is ignored if the state is "get".
+ For array mode, use a list of types.
+ type: list
+ elements: str
+ choices: [ int, uint, bool, float, double, string ]
+ state:
+ type: str
+ description:
+ - The action to take upon the property/value.
+ choices: [ get, present, absent ]
+ default: "present"
+ force_array:
+ description:
+ - Force array even if only one element
+ type: bool
+ default: 'no'
+ aliases: ['array']
+ version_added: 1.0.0
+'''
+
+EXAMPLES = """
+- name: Change the DPI to "192"
+ xfconf:
+ channel: "xsettings"
+ property: "/Xft/DPI"
+ value_type: "int"
+ value: "192"
+
+- name: Set workspace names (4)
+ xfconf:
+ channel: xfwm4
+ property: /general/workspace_names
+ value_type: string
+ value: ['Main', 'Work1', 'Work2', 'Tmp']
+
+- name: Set workspace names (1)
+ xfconf:
+ channel: xfwm4
+ property: /general/workspace_names
+ value_type: string
+ value: ['Main']
+ force_array: yes
+"""
+
+RETURN = '''
+ channel:
+ description: The channel specified in the module parameters
+ returned: success
+ type: str
+ sample: "xsettings"
+ property:
+ description: The property specified in the module parameters
+ returned: success
+ type: str
+ sample: "/Xft/DPI"
+ value_type:
+ description:
+ - The type of the value that was changed (C(none) for C(get) and C(reset)
+ state). Either a single string value or a list of strings for array
+ types.
+ returned: success
+ type: string or list of strings
+ sample: '"int" or ["str", "str", "str"]'
+ value:
+ description:
+ - The value of the preference key after executing the module. Either a
+ single string value or a list of strings for array types.
+ returned: success
+ type: string or list of strings
+ sample: '"192" or ["orange", "yellow", "violet"]'
+ previous_value:
+ description:
+ - The value of the preference key before executing the module (C(none) for
+ C(get) state). Either a single string value or a list of strings for array
+ types.
+ returned: success
+ type: string or list of strings
+ sample: '"96" or ["red", "blue", "green"]'
+'''
+
+from ansible_collections.community.general.plugins.module_utils.module_helper import (
+ ModuleHelper, CmdMixin, StateMixin, ArgFormat
+)
+
+
+def fix_bool(value):
+ vl = value.lower()
+ return vl if vl in ("true", "false") else value
+
+
+@ArgFormat.stars_deco(1)
+def values_fmt(values, value_types):
+ result = []
+ for value, value_type in zip(values, value_types):
+ if value_type == 'bool':
+ value = fix_bool(value)
+ result.append('--type')
+ result.append('{0}'.format(value_type))
+ result.append('--set')
+ result.append('{0}'.format(value))
+ return result
+
+
+class XFConfException(Exception):
+ pass
+
+
+class XFConfProperty(CmdMixin, StateMixin, ModuleHelper):
+ module = dict(
+ argument_spec=dict(
+ state=dict(default="present",
+ choices=("present", "get", "absent"),
+ type='str'),
+ channel=dict(required=True, type='str'),
+ property=dict(required=True, type='str'),
+ value_type=dict(required=False, type='list',
+ elements='str', choices=('int', 'uint', 'bool', 'float', 'double', 'string')),
+ value=dict(required=False, type='list', elements='raw'),
+ force_array=dict(default=False, type='bool', aliases=['array']),
+ ),
+ required_if=[('state', 'present', ['value', 'value_type'])],
+ required_together=[('value', 'value_type')],
+ supports_check_mode=True,
+ )
+
+ facts_name = "xfconf"
+ default_state = 'present'
+ command = 'xfconf-query'
+ command_args_formats = dict(
+ channel=dict(fmt=('--channel', '{0}'),),
+ property=dict(fmt=('--property', '{0}'),),
+ is_array=dict(fmt="--force-array", style=ArgFormat.BOOLEAN),
+ reset=dict(fmt="--reset", style=ArgFormat.BOOLEAN),
+ create=dict(fmt="--create", style=ArgFormat.BOOLEAN),
+ values_and_types=dict(fmt=values_fmt)
+ )
+
+ def update_xfconf_output(self, **kwargs):
+ self.update_output(**kwargs)
+ self.update_facts(**kwargs)
+
+ def __init_module__(self):
+ self.does_not = 'Property "{0}" does not exist on channel "{1}".'.format(self.module.params['property'],
+ self.module.params['channel'])
+ self.vars.previous_value = self._get()
+ self.update_xfconf_output(property=self.module.params['property'],
+ channel=self.module.params['channel'],
+ previous_value=None)
+
+ def process_command_output(self, rc, out, err):
+ if err.rstrip() == self.does_not:
+ return None
+ if rc or len(err):
+ raise XFConfException('xfconf-query failed with error (rc={0}): {1}'.format(rc, err))
+
+ result = out.rstrip()
+ if "Value is an array with" in result:
+ result = result.split("\n")
+ result.pop(0)
+ result.pop(0)
+
+ return result
+
+ @property
+ def changed(self):
+ if self.vars.previous_value is None:
+ return self.vars.value is not None
+ elif self.vars.value is None:
+ return self.vars.previous_value is not None
+ else:
+ return set(self.vars.previous_value) != set(self.vars.value)
+
+ def _get(self):
+ return self.run_command(params=('channel', 'property'))
+
+ def state_get(self):
+ self.vars.value = self.vars.previous_value
+ self.update_xfconf_output(value=self.vars.value)
+
+ def state_absent(self):
+ self.vars.value = None
+ self.run_command(params=('channel', 'property', 'reset'), extra_params={"reset": True})
+ self.update_xfconf_output(previous_value=self.vars.previous_value,
+ value=None)
+
+ def state_present(self):
+ # stringify all values - in the CLI they will all be happy strings anyway
+ # and by doing this here the rest of the code can be agnostic to it
+ self.vars.value = [str(v) for v in self.module.params['value']]
+ value_type = self.module.params['value_type']
+
+ values_len = len(self.vars.value)
+ types_len = len(value_type)
+
+ if types_len == 1:
+ # use one single type for the entire list
+ value_type = value_type * values_len
+ elif types_len != values_len:
+ # or complain if lists' lengths are different
+ raise XFConfException('Number of elements in "value" and "value_type" must be the same')
+
+ # fix boolean values
+ self.vars.value = [fix_bool(v[0]) if v[1] == 'bool' else v[0] for v in zip(self.vars.value, value_type)]
+
+ # calculates if it is an array
+ self.vars.is_array = \
+ bool(self.module.params['force_array']) or \
+ isinstance(self.vars.previous_value, list) or \
+ values_len > 1
+
+ params = ['channel', 'property', 'create']
+ if self.vars.is_array:
+ params.append('is_array')
+ params.append('values_and_types')
+
+ extra_params = dict(values_and_types=(self.vars.value, value_type))
+ extra_params['create'] = True
+ extra_params['is_array'] = self.vars.is_array
+
+ if not self.module.check_mode:
+ self.run_command(params=params, extra_params=extra_params)
+
+ if not self.vars.is_array:
+ self.vars.value = self.vars.value[0]
+ value_type = value_type[0]
+
+ self.update_xfconf_output(previous_value=self.vars.previous_value,
+ value=self.vars.value,
+ type=value_type)
+
+
+def main():
+ xfconf = XFConfProperty()
+ xfconf.run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/xfs_quota.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/xfs_quota.py
new file mode 100644
index 00000000..907f1bae
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/xfs_quota.py
@@ -0,0 +1,426 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Emmanouil Kampitakis <info@kampitakis.de>
+# Copyright: (c) 2018, William Leemans <willie@elaba.net>
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: xfs_quota
+short_description: Manage quotas on XFS filesystems
+description:
+ - Configure quotas on XFS filesystems.
+ - Before using this module /etc/projects and /etc/projid need to be configured.
+author:
+- William Leemans (@bushvin)
+options:
+ type:
+ description:
+ - The XFS quota type.
+ type: str
+ required: true
+ choices:
+ - user
+ - group
+ - project
+ name:
+ description:
+ - The name of the user, group or project to apply the quota to, if other than default.
+ type: str
+ mountpoint:
+ description:
+ - The mount point on which to apply the quotas.
+ type: str
+ required: true
+ bhard:
+ description:
+ - Hard blocks quota limit.
+ - This argument supports human readable sizes.
+ type: str
+ bsoft:
+ description:
+ - Soft blocks quota limit.
+ - This argument supports human readable sizes.
+ type: str
+ ihard:
+ description:
+ - Hard inodes quota limit.
+ type: int
+ isoft:
+ description:
+ - Soft inodes quota limit.
+ type: int
+ rtbhard:
+ description:
+ - Hard realtime blocks quota limit.
+ - This argument supports human readable sizes.
+ type: str
+ rtbsoft:
+ description:
+ - Soft realtime blocks quota limit.
+ - This argument supports human readable sizes.
+ type: str
+ state:
+ description:
+ - Whether to apply the limits or remove them.
+ - When removing limit, they are set to 0, and not quite removed.
+ type: str
+ default: present
+ choices:
+ - present
+ - absent
+
+requirements:
+ - xfsprogs
+'''
+
+EXAMPLES = r'''
+- name: Set default project soft and hard limit on /opt of 1g
+ community.general.xfs_quota:
+ type: project
+ mountpoint: /opt
+ bsoft: 1g
+ bhard: 1g
+ state: present
+
+- name: Remove the default limits on /opt
+ community.general.xfs_quota:
+ type: project
+ mountpoint: /opt
+ state: absent
+
+- name: Set default soft user inode limits on /home of 1024 inodes and hard of 2048
+ community.general.xfs_quota:
+ type: user
+ mountpoint: /home
+ isoft: 1024
+ ihard: 2048
+
+'''
+
+RETURN = r'''
+bhard:
+ description: the current bhard setting in bytes
+ returned: always
+ type: int
+ sample: 1024
+bsoft:
+ description: the current bsoft setting in bytes
+ returned: always
+ type: int
+ sample: 1024
+ihard:
+ description: the current ihard setting in bytes
+ returned: always
+ type: int
+ sample: 100
+isoft:
+ description: the current isoft setting in bytes
+ returned: always
+ type: int
+ sample: 100
+rtbhard:
+ description: the current rtbhard setting in bytes
+ returned: always
+ type: int
+ sample: 1024
+rtbsoft:
+ description: the current rtbsoft setting in bytes
+ returned: always
+ type: int
+ sample: 1024
+'''
+
+import grp
+import os
+import pwd
+
+from ansible.module_utils.basic import AnsibleModule, human_to_bytes
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ bhard=dict(type='str'),
+ bsoft=dict(type='str'),
+ ihard=dict(type='int'),
+ isoft=dict(type='int'),
+ mountpoint=dict(type='str', required=True),
+ name=dict(type='str'),
+ rtbhard=dict(type='str'),
+ rtbsoft=dict(type='str'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ type=dict(type='str', required=True, choices=['group', 'project', 'user'])
+ ),
+ supports_check_mode=True,
+ )
+
+ quota_type = module.params['type']
+ name = module.params['name']
+ mountpoint = module.params['mountpoint']
+ bhard = module.params['bhard']
+ bsoft = module.params['bsoft']
+ ihard = module.params['ihard']
+ isoft = module.params['isoft']
+ rtbhard = module.params['rtbhard']
+ rtbsoft = module.params['rtbsoft']
+ state = module.params['state']
+
+ if bhard is not None:
+ bhard = human_to_bytes(bhard)
+
+ if bsoft is not None:
+ bsoft = human_to_bytes(bsoft)
+
+ if rtbhard is not None:
+ rtbhard = human_to_bytes(rtbhard)
+
+ if rtbsoft is not None:
+ rtbsoft = human_to_bytes(rtbsoft)
+
+ result = dict(
+ changed=False,
+ )
+
+ if not os.path.ismount(mountpoint):
+ module.fail_json(msg="Path '%s' is not a mount point" % mountpoint, **result)
+
+ mp = get_fs_by_mountpoint(mountpoint)
+ if mp is None:
+ module.fail_json(msg="Path '%s' is not a mount point or not located on an xfs file system." % mountpoint, **result)
+
+ if quota_type == 'user':
+ type_arg = '-u'
+ quota_default = 'root'
+ if name is None:
+ name = quota_default
+
+ if 'uquota' not in mp['mntopts'] and 'usrquota' not in mp['mntopts'] and 'quota' not in mp['mntopts'] and 'uqnoenforce' not in mp['mntopts'] and \
+ 'qnoenforce' not in mp['mntopts']:
+ module.fail_json(
+ msg="Path '%s' is not mounted with the uquota/usrquota/quota/uqnoenforce/qnoenforce option." % mountpoint, **result
+ )
+ try:
+ pwd.getpwnam(name)
+ except KeyError as e:
+ module.fail_json(msg="User '%s' does not exist." % name, **result)
+
+ elif quota_type == 'group':
+ type_arg = '-g'
+ quota_default = 'root'
+ if name is None:
+ name = quota_default
+
+ if 'gquota' not in mp['mntopts'] and 'grpquota' not in mp['mntopts'] and 'gqnoenforce' not in mp['mntopts']:
+ module.fail_json(
+ msg="Path '%s' is not mounted with the gquota/grpquota/gqnoenforce option. (current options: %s)" % (mountpoint, mp['mntopts']), **result
+ )
+ try:
+ grp.getgrnam(name)
+ except KeyError as e:
+ module.fail_json(msg="User '%s' does not exist." % name, **result)
+
+ elif quota_type == 'project':
+ type_arg = '-p'
+ quota_default = '#0'
+ if name is None:
+ name = quota_default
+
+ if 'pquota' not in mp['mntopts'] and 'prjquota' not in mp['mntopts'] and 'pqnoenforce' not in mp['mntopts']:
+ module.fail_json(msg="Path '%s' is not mounted with the pquota/prjquota/pqnoenforce option." % mountpoint, **result)
+
+ if name != quota_default and not os.path.isfile('/etc/projects'):
+ module.fail_json(msg="Path '/etc/projects' does not exist.", **result)
+
+ if name != quota_default and not os.path.isfile('/etc/projid'):
+ module.fail_json(msg="Path '/etc/projid' does not exist.", **result)
+
+ if name != quota_default and name is not None and get_project_id(name) is None:
+ module.fail_json(msg="Entry '%s' has not been defined in /etc/projid." % name, **result)
+
+ prj_set = True
+ if name != quota_default:
+ cmd = 'project %s' % name
+ rc, stdout, stderr = exec_quota(module, cmd, mountpoint)
+ if rc != 0:
+ result['cmd'] = cmd
+ result['rc'] = rc
+ result['stdout'] = stdout
+ result['stderr'] = stderr
+ module.fail_json(msg='Could not get project state.', **result)
+ else:
+ for line in stdout.split('\n'):
+ if "Project Id '%s' - is not set." in line:
+ prj_set = False
+ break
+
+ if not prj_set and not module.check_mode:
+ cmd = 'project -s'
+ rc, stdout, stderr = exec_quota(module, cmd, mountpoint)
+ if rc != 0:
+ result['cmd'] = cmd
+ result['rc'] = rc
+ result['stdout'] = stdout
+ result['stderr'] = stderr
+ module.fail_json(msg='Could not get quota realtime block report.', **result)
+
+ result['changed'] = True
+
+ elif not prj_set and module.check_mode:
+ result['changed'] = True
+
+ # Set limits
+ if state == 'absent':
+ bhard = 0
+ bsoft = 0
+ ihard = 0
+ isoft = 0
+ rtbhard = 0
+ rtbsoft = 0
+
+ current_bsoft, current_bhard = quota_report(module, mountpoint, name, quota_type, 'b')
+ current_isoft, current_ihard = quota_report(module, mountpoint, name, quota_type, 'i')
+ current_rtbsoft, current_rtbhard = quota_report(module, mountpoint, name, quota_type, 'rtb')
+
+ result['xfs_quota'] = dict(
+ bsoft=current_bsoft,
+ bhard=current_bhard,
+ isoft=current_isoft,
+ ihard=current_ihard,
+ rtbsoft=current_rtbsoft,
+ rtbhard=current_rtbhard
+ )
+
+ limit = []
+ if bsoft is not None and int(bsoft) != current_bsoft:
+ limit.append('bsoft=%s' % bsoft)
+ result['bsoft'] = int(bsoft)
+
+ if bhard is not None and int(bhard) != current_bhard:
+ limit.append('bhard=%s' % bhard)
+ result['bhard'] = int(bhard)
+
+ if isoft is not None and isoft != current_isoft:
+ limit.append('isoft=%s' % isoft)
+ result['isoft'] = isoft
+
+ if ihard is not None and ihard != current_ihard:
+ limit.append('ihard=%s' % ihard)
+ result['ihard'] = ihard
+
+ if rtbsoft is not None and int(rtbsoft) != current_rtbsoft:
+ limit.append('rtbsoft=%s' % rtbsoft)
+ result['rtbsoft'] = int(rtbsoft)
+
+ if rtbhard is not None and int(rtbhard) != current_rtbhard:
+ limit.append('rtbhard=%s' % rtbhard)
+ result['rtbhard'] = int(rtbhard)
+
+ if len(limit) > 0 and not module.check_mode:
+ if name == quota_default:
+ cmd = 'limit %s -d %s' % (type_arg, ' '.join(limit))
+ else:
+ cmd = 'limit %s %s %s' % (type_arg, ' '.join(limit), name)
+
+ rc, stdout, stderr = exec_quota(module, cmd, mountpoint)
+ if rc != 0:
+ result['cmd'] = cmd
+ result['rc'] = rc
+ result['stdout'] = stdout
+ result['stderr'] = stderr
+ module.fail_json(msg='Could not set limits.', **result)
+
+ result['changed'] = True
+
+ elif len(limit) > 0 and module.check_mode:
+ result['changed'] = True
+
+ module.exit_json(**result)
+
+
+def quota_report(module, mountpoint, name, quota_type, used_type):
+ soft = None
+ hard = None
+
+ if quota_type == 'project':
+ type_arg = '-p'
+ elif quota_type == 'user':
+ type_arg = '-u'
+ elif quota_type == 'group':
+ type_arg = '-g'
+
+ if used_type == 'b':
+ used_arg = '-b'
+ used_name = 'blocks'
+ factor = 1024
+ elif used_type == 'i':
+ used_arg = '-i'
+ used_name = 'inodes'
+ factor = 1
+ elif used_type == 'rtb':
+ used_arg = '-r'
+ used_name = 'realtime blocks'
+ factor = 1024
+
+ rc, stdout, stderr = exec_quota(module, 'report %s %s' % (type_arg, used_arg), mountpoint)
+
+ if rc != 0:
+ result = dict(
+ changed=False,
+ rc=rc,
+ stdout=stdout,
+ stderr=stderr,
+ )
+ module.fail_json(msg='Could not get quota report for %s.' % used_name, **result)
+
+ for line in stdout.split('\n'):
+ line = line.strip().split()
+ if len(line) > 3 and line[0] == name:
+ soft = int(line[2]) * factor
+ hard = int(line[3]) * factor
+ break
+
+ return soft, hard
+
+
+def exec_quota(module, cmd, mountpoint):
+ cmd = ['xfs_quota', '-x', '-c'] + [cmd, mountpoint]
+ (rc, stdout, stderr) = module.run_command(cmd, use_unsafe_shell=True)
+ if "XFS_GETQUOTA: Operation not permitted" in stderr.split('\n') or \
+ rc == 1 and 'xfs_quota: cannot set limits: Operation not permitted' in stderr.split('\n'):
+ module.fail_json(msg='You need to be root or have CAP_SYS_ADMIN capability to perform this operation')
+
+ return rc, stdout, stderr
+
+
+def get_fs_by_mountpoint(mountpoint):
+ mpr = None
+ with open('/proc/mounts', 'r') as s:
+ for line in s.readlines():
+ mp = line.strip().split()
+ if len(mp) == 6 and mp[1] == mountpoint and mp[2] == 'xfs':
+ mpr = dict(zip(['spec', 'file', 'vfstype', 'mntopts', 'freq', 'passno'], mp))
+ mpr['mntopts'] = mpr['mntopts'].split(',')
+ break
+ return mpr
+
+
+def get_project_id(name):
+ prjid = None
+ with open('/etc/projid', 'r') as s:
+ for line in s.readlines():
+ line = line.strip().partition(':')
+ if line[0] == name:
+ prjid = line[2]
+ break
+
+ return prjid
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/xml.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/xml.py
new file mode 100644
index 00000000..1733e657
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/xml.py
@@ -0,0 +1,958 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2014, Red Hat, Inc.
+# Copyright: (c) 2014, Tim Bielawa <tbielawa@redhat.com>
+# Copyright: (c) 2014, Magnus Hedemark <mhedemar@redhat.com>
+# Copyright: (c) 2017, Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: xml
+short_description: Manage bits and pieces of XML files or strings
+description:
+- A CRUD-like interface to managing bits of XML files.
+options:
+ path:
+ description:
+ - Path to the file to operate on.
+ - This file must exist ahead of time.
+ - This parameter is required, unless C(xmlstring) is given.
+ type: path
+ aliases: [ dest, file ]
+ xmlstring:
+ description:
+ - A string containing XML on which to operate.
+ - This parameter is required, unless C(path) is given.
+ type: str
+ xpath:
+ description:
+ - A valid XPath expression describing the item(s) you want to manipulate.
+ - Operates on the document root, C(/), by default.
+ type: str
+ namespaces:
+ description:
+ - The namespace C(prefix:uri) mapping for the XPath expression.
+ - Needs to be a C(dict), not a C(list) of items.
+ type: dict
+ state:
+ description:
+ - Set or remove an xpath selection (node(s), attribute(s)).
+ type: str
+ choices: [ absent, present ]
+ default: present
+ aliases: [ ensure ]
+ attribute:
+ description:
+ - The attribute to select when using parameter C(value).
+ - This is a string, not prepended with C(@).
+ type: raw
+ value:
+ description:
+ - Desired state of the selected attribute.
+ - Either a string, or to unset a value, the Python C(None) keyword (YAML Equivalent, C(null)).
+ - Elements default to no value (but present).
+ - Attributes default to an empty string.
+ type: raw
+ add_children:
+ description:
+ - Add additional child-element(s) to a selected element for a given C(xpath).
+ - Child elements must be given in a list and each item may be either a string
+ (eg. C(children=ansible) to add an empty C(<ansible/>) child element),
+ or a hash where the key is an element name and the value is the element value.
+ - This parameter requires C(xpath) to be set.
+ type: list
+ set_children:
+ description:
+ - Set the child-element(s) of a selected element for a given C(xpath).
+ - Removes any existing children.
+ - Child elements must be specified as in C(add_children).
+ - This parameter requires C(xpath) to be set.
+ type: list
+ count:
+ description:
+ - Search for a given C(xpath) and provide the count of any matches.
+ - This parameter requires C(xpath) to be set.
+ type: bool
+ default: no
+ print_match:
+ description:
+ - Search for a given C(xpath) and print out any matches.
+ - This parameter requires C(xpath) to be set.
+ type: bool
+ default: no
+ pretty_print:
+ description:
+ - Pretty print XML output.
+ type: bool
+ default: no
+ content:
+ description:
+ - Search for a given C(xpath) and get content.
+ - This parameter requires C(xpath) to be set.
+ type: str
+ choices: [ attribute, text ]
+ input_type:
+ description:
+ - Type of input for C(add_children) and C(set_children).
+ type: str
+ choices: [ xml, yaml ]
+ default: yaml
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can get
+ the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: no
+ strip_cdata_tags:
+ description:
+ - Remove CDATA tags surrounding text values.
+ - Note that this might break your XML file if text values contain characters that could be interpreted as XML.
+ type: bool
+ default: no
+ insertbefore:
+ description:
+ - Add additional child-element(s) before the first selected element for a given C(xpath).
+ - Child elements must be given in a list and each item may be either a string
+ (eg. C(children=ansible) to add an empty C(<ansible/>) child element),
+ or a hash where the key is an element name and the value is the element value.
+ - This parameter requires C(xpath) to be set.
+ type: bool
+ default: no
+ insertafter:
+ description:
+ - Add additional child-element(s) after the last selected element for a given C(xpath).
+ - Child elements must be given in a list and each item may be either a string
+ (eg. C(children=ansible) to add an empty C(<ansible/>) child element),
+ or a hash where the key is an element name and the value is the element value.
+ - This parameter requires C(xpath) to be set.
+ type: bool
+ default: no
+requirements:
+- lxml >= 2.3.0
+notes:
+- Use the C(--check) and C(--diff) options when testing your expressions.
+- The diff output is automatically pretty-printed, so may not reflect the actual file content, only the file structure.
+- This module does not handle complicated xpath expressions, so limit xpath selectors to simple expressions.
+- Beware that in case your XML elements are namespaced, you need to use the C(namespaces) parameter, see the examples.
+- Namespaces prefix should be used for all children of an element where namespace is defined, unless another namespace is defined for them.
+seealso:
+- name: Xml module development community wiki
+ description: More information related to the development of this xml module.
+ link: https://github.com/ansible/community/wiki/Module:-xml
+- name: Introduction to XPath
+ description: A brief tutorial on XPath (w3schools.com).
+ link: https://www.w3schools.com/xml/xpath_intro.asp
+- name: XPath Reference document
+ description: The reference documentation on XSLT/XPath (developer.mozilla.org).
+ link: https://developer.mozilla.org/en-US/docs/Web/XPath
+author:
+- Tim Bielawa (@tbielawa)
+- Magnus Hedemark (@magnus919)
+- Dag Wieers (@dagwieers)
+'''
+
+EXAMPLES = r'''
+# Consider the following XML file:
+#
+# <business type="bar">
+# <name>Tasty Beverage Co.</name>
+# <beers>
+# <beer>Rochefort 10</beer>
+# <beer>St. Bernardus Abbot 12</beer>
+# <beer>Schlitz</beer>
+# </beers>
+# <rating subjective="true">10</rating>
+# <website>
+# <mobilefriendly/>
+# <address>http://tastybeverageco.com</address>
+# </website>
+# </business>
+
+- name: Remove the 'subjective' attribute of the 'rating' element
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/rating/@subjective
+ state: absent
+
+- name: Set the rating to '11'
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/rating
+ value: 11
+
+# Retrieve and display the number of nodes
+- name: Get count of 'beers' nodes
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/beers/beer
+ count: yes
+ register: hits
+
+- ansible.builtin.debug:
+ var: hits.count
+
+# Example where parent XML nodes are created automatically
+- name: Add a 'phonenumber' element to the 'business' element
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/phonenumber
+ value: 555-555-1234
+
+- name: Add several more beers to the 'beers' element
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/beers
+ add_children:
+ - beer: Old Rasputin
+ - beer: Old Motor Oil
+ - beer: Old Curmudgeon
+
+- name: Add several more beers to the 'beers' element and add them before the 'Rochefort 10' element
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: '/business/beers/beer[text()="Rochefort 10"]'
+ insertbefore: yes
+ add_children:
+ - beer: Old Rasputin
+ - beer: Old Motor Oil
+ - beer: Old Curmudgeon
+
+# NOTE: The 'state' defaults to 'present' and 'value' defaults to 'null' for elements
+- name: Add a 'validxhtml' element to the 'website' element
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/website/validxhtml
+
+- name: Add an empty 'validatedon' attribute to the 'validxhtml' element
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/website/validxhtml/@validatedon
+
+- name: Add or modify an attribute, add element if needed
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/website/validxhtml
+ attribute: validatedon
+ value: 1976-08-05
+
+# How to read an attribute value and access it in Ansible
+- name: Read an element's attribute values
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/website/validxhtml
+ content: attribute
+ register: xmlresp
+
+- name: Show an attribute value
+ ansible.builtin.debug:
+ var: xmlresp.matches[0].validxhtml.validatedon
+
+- name: Remove all children from the 'website' element (option 1)
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/website/*
+ state: absent
+
+- name: Remove all children from the 'website' element (option 2)
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/website
+ children: []
+
+# In case of namespaces, like in below XML, they have to be explicitly stated.
+#
+# <foo xmlns="http://x.test" xmlns:attr="http://z.test">
+# <bar>
+# <baz xmlns="http://y.test" attr:my_namespaced_attribute="true" />
+# </bar>
+# </foo>
+
+# NOTE: There is the prefix 'x' in front of the 'bar' element, too.
+- name: Set namespaced '/x:foo/x:bar/y:baz/@z:my_namespaced_attribute' to 'false'
+ community.general.xml:
+ path: foo.xml
+ xpath: /x:foo/x:bar/y:baz
+ namespaces:
+ x: http://x.test
+ y: http://y.test
+ z: http://z.test
+ attribute: z:my_namespaced_attribute
+ value: 'false'
+'''
+
+RETURN = r'''
+actions:
+ description: A dictionary with the original xpath, namespaces and state.
+ type: dict
+ returned: success
+ sample: {xpath: xpath, namespaces: [namespace1, namespace2], state=present}
+backup_file:
+ description: The name of the backup file that was created
+ type: str
+ returned: when backup=yes
+ sample: /path/to/file.xml.1942.2017-08-24@14:16:01~
+count:
+ description: The count of xpath matches.
+ type: int
+ returned: when parameter 'count' is set
+ sample: 2
+matches:
+ description: The xpath matches found.
+ type: list
+ returned: when parameter 'print_match' is set
+msg:
+ description: A message related to the performed action(s).
+ type: str
+ returned: always
+xmlstring:
+ description: An XML string of the resulting output.
+ type: str
+ returned: when parameter 'xmlstring' is set
+'''
+
+import copy
+import json
+import os
+import re
+import traceback
+
+from distutils.version import LooseVersion
+from io import BytesIO
+
+LXML_IMP_ERR = None
+try:
+ from lxml import etree, objectify
+ HAS_LXML = True
+except ImportError:
+ LXML_IMP_ERR = traceback.format_exc()
+ HAS_LXML = False
+
+from ansible.module_utils.basic import AnsibleModule, json_dict_bytes_to_unicode, missing_required_lib
+from ansible.module_utils.six import iteritems, string_types
+from ansible.module_utils._text import to_bytes, to_native
+from ansible.module_utils.common._collections_compat import MutableMapping
+
+_IDENT = r"[a-zA-Z-][a-zA-Z0-9_\-\.]*"
+_NSIDENT = _IDENT + "|" + _IDENT + ":" + _IDENT
+# Note: we can't reasonably support the 'if you need to put both ' and " in a string, concatenate
+# strings wrapped by the other delimiter' XPath trick, especially as simple XPath.
+_XPSTR = "('(?:.*)'|\"(?:.*)\")"
+
+_RE_SPLITSIMPLELAST = re.compile("^(.*)/(" + _NSIDENT + ")$")
+_RE_SPLITSIMPLELASTEQVALUE = re.compile("^(.*)/(" + _NSIDENT + ")/text\\(\\)=" + _XPSTR + "$")
+_RE_SPLITSIMPLEATTRLAST = re.compile("^(.*)/(@(?:" + _NSIDENT + "))$")
+_RE_SPLITSIMPLEATTRLASTEQVALUE = re.compile("^(.*)/(@(?:" + _NSIDENT + "))=" + _XPSTR + "$")
+_RE_SPLITSUBLAST = re.compile("^(.*)/(" + _NSIDENT + ")\\[(.*)\\]$")
+_RE_SPLITONLYEQVALUE = re.compile("^(.*)/text\\(\\)=" + _XPSTR + "$")
+
+
+def has_changed(doc):
+ orig_obj = etree.tostring(objectify.fromstring(etree.tostring(orig_doc)))
+ obj = etree.tostring(objectify.fromstring(etree.tostring(doc)))
+ return (orig_obj != obj)
+
+
+def do_print_match(module, tree, xpath, namespaces):
+ match = tree.xpath(xpath, namespaces=namespaces)
+ match_xpaths = []
+ for m in match:
+ match_xpaths.append(tree.getpath(m))
+ match_str = json.dumps(match_xpaths)
+ msg = "selector '%s' match: %s" % (xpath, match_str)
+ finish(module, tree, xpath, namespaces, changed=False, msg=msg)
+
+
+def count_nodes(module, tree, xpath, namespaces):
+ """ Return the count of nodes matching the xpath """
+ hits = tree.xpath("count(/%s)" % xpath, namespaces=namespaces)
+ msg = "found %d nodes" % hits
+ finish(module, tree, xpath, namespaces, changed=False, msg=msg, hitcount=int(hits))
+
+
+def is_node(tree, xpath, namespaces):
+ """ Test if a given xpath matches anything and if that match is a node.
+
+ For now we just assume you're only searching for one specific thing."""
+ if xpath_matches(tree, xpath, namespaces):
+ # OK, it found something
+ match = tree.xpath(xpath, namespaces=namespaces)
+ if isinstance(match[0], etree._Element):
+ return True
+
+ return False
+
+
+def is_attribute(tree, xpath, namespaces):
+ """ Test if a given xpath matches and that match is an attribute
+
+ An xpath attribute search will only match one item"""
+ if xpath_matches(tree, xpath, namespaces):
+ match = tree.xpath(xpath, namespaces=namespaces)
+ if isinstance(match[0], etree._ElementStringResult):
+ return True
+ elif isinstance(match[0], etree._ElementUnicodeResult):
+ return True
+ return False
+
+
+def xpath_matches(tree, xpath, namespaces):
+ """ Test if a node exists """
+ if tree.xpath(xpath, namespaces=namespaces):
+ return True
+ return False
+
+
+def delete_xpath_target(module, tree, xpath, namespaces):
+ """ Delete an attribute or element from a tree """
+ changed = False
+ try:
+ for result in tree.xpath(xpath, namespaces=namespaces):
+ changed = True
+ # Get the xpath for this result
+ if is_attribute(tree, xpath, namespaces):
+ # Delete an attribute
+ parent = result.getparent()
+ # Pop this attribute match out of the parent
+ # node's 'attrib' dict by using this match's
+ # 'attrname' attribute for the key
+ parent.attrib.pop(result.attrname)
+ elif is_node(tree, xpath, namespaces):
+ # Delete an element
+ result.getparent().remove(result)
+ else:
+ raise Exception("Impossible error")
+ except Exception as e:
+ module.fail_json(msg="Couldn't delete xpath target: %s (%s)" % (xpath, e))
+ else:
+ finish(module, tree, xpath, namespaces, changed=changed)
+
+
+def replace_children_of(children, match):
+ for element in list(match):
+ match.remove(element)
+ match.extend(children)
+
+
+def set_target_children_inner(module, tree, xpath, namespaces, children, in_type):
+ matches = tree.xpath(xpath, namespaces=namespaces)
+
+ # Create a list of our new children
+ children = children_to_nodes(module, children, in_type)
+ children_as_string = [etree.tostring(c) for c in children]
+
+ changed = False
+
+ # xpaths always return matches as a list, so....
+ for match in matches:
+ # Check if elements differ
+ if len(list(match)) == len(children):
+ for idx, element in enumerate(list(match)):
+ if etree.tostring(element) != children_as_string[idx]:
+ replace_children_of(children, match)
+ changed = True
+ break
+ else:
+ replace_children_of(children, match)
+ changed = True
+
+ return changed
+
+
+def set_target_children(module, tree, xpath, namespaces, children, in_type):
+ changed = set_target_children_inner(module, tree, xpath, namespaces, children, in_type)
+ # Write it out
+ finish(module, tree, xpath, namespaces, changed=changed)
+
+
+def add_target_children(module, tree, xpath, namespaces, children, in_type, insertbefore, insertafter):
+ if is_node(tree, xpath, namespaces):
+ new_kids = children_to_nodes(module, children, in_type)
+ if insertbefore or insertafter:
+ insert_target_children(tree, xpath, namespaces, new_kids, insertbefore, insertafter)
+ else:
+ for node in tree.xpath(xpath, namespaces=namespaces):
+ node.extend(new_kids)
+ finish(module, tree, xpath, namespaces, changed=True)
+ else:
+ finish(module, tree, xpath, namespaces)
+
+
+def insert_target_children(tree, xpath, namespaces, children, insertbefore, insertafter):
+ """
+ Insert the given children before or after the given xpath. If insertbefore is True, it is inserted before the
+ first xpath hit, with insertafter, it is inserted after the last xpath hit.
+ """
+ insert_target = tree.xpath(xpath, namespaces=namespaces)
+ loc_index = 0 if insertbefore else -1
+ index_in_parent = insert_target[loc_index].getparent().index(insert_target[loc_index])
+ parent = insert_target[0].getparent()
+ if insertafter:
+ index_in_parent += 1
+ for child in children:
+ parent.insert(index_in_parent, child)
+ index_in_parent += 1
+
+
+def _extract_xpstr(g):
+ return g[1:-1]
+
+
+def split_xpath_last(xpath):
+ """split an XPath of the form /foo/bar/baz into /foo/bar and baz"""
+ xpath = xpath.strip()
+ m = _RE_SPLITSIMPLELAST.match(xpath)
+ if m:
+ # requesting an element to exist
+ return (m.group(1), [(m.group(2), None)])
+ m = _RE_SPLITSIMPLELASTEQVALUE.match(xpath)
+ if m:
+ # requesting an element to exist with an inner text
+ return (m.group(1), [(m.group(2), _extract_xpstr(m.group(3)))])
+
+ m = _RE_SPLITSIMPLEATTRLAST.match(xpath)
+ if m:
+ # requesting an attribute to exist
+ return (m.group(1), [(m.group(2), None)])
+ m = _RE_SPLITSIMPLEATTRLASTEQVALUE.match(xpath)
+ if m:
+ # requesting an attribute to exist with a value
+ return (m.group(1), [(m.group(2), _extract_xpstr(m.group(3)))])
+
+ m = _RE_SPLITSUBLAST.match(xpath)
+ if m:
+ content = [x.strip() for x in m.group(3).split(" and ")]
+ return (m.group(1), [('/' + m.group(2), content)])
+
+ m = _RE_SPLITONLYEQVALUE.match(xpath)
+ if m:
+ # requesting a change of inner text
+ return (m.group(1), [("", _extract_xpstr(m.group(2)))])
+ return (xpath, [])
+
+
+def nsnameToClark(name, namespaces):
+ if ":" in name:
+ (nsname, rawname) = name.split(":")
+ # return "{{%s}}%s" % (namespaces[nsname], rawname)
+ return "{{{0}}}{1}".format(namespaces[nsname], rawname)
+
+ # no namespace name here
+ return name
+
+
+def check_or_make_target(module, tree, xpath, namespaces):
+ (inner_xpath, changes) = split_xpath_last(xpath)
+ if (inner_xpath == xpath) or (changes is None):
+ module.fail_json(msg="Can't process Xpath %s in order to spawn nodes! tree is %s" %
+ (xpath, etree.tostring(tree, pretty_print=True)))
+ return False
+
+ changed = False
+
+ if not is_node(tree, inner_xpath, namespaces):
+ changed = check_or_make_target(module, tree, inner_xpath, namespaces)
+
+ # we test again after calling check_or_make_target
+ if is_node(tree, inner_xpath, namespaces) and changes:
+ for (eoa, eoa_value) in changes:
+ if eoa and eoa[0] != '@' and eoa[0] != '/':
+ # implicitly creating an element
+ new_kids = children_to_nodes(module, [nsnameToClark(eoa, namespaces)], "yaml")
+ if eoa_value:
+ for nk in new_kids:
+ nk.text = eoa_value
+
+ for node in tree.xpath(inner_xpath, namespaces=namespaces):
+ node.extend(new_kids)
+ changed = True
+ # module.fail_json(msg="now tree=%s" % etree.tostring(tree, pretty_print=True))
+ elif eoa and eoa[0] == '/':
+ element = eoa[1:]
+ new_kids = children_to_nodes(module, [nsnameToClark(element, namespaces)], "yaml")
+ for node in tree.xpath(inner_xpath, namespaces=namespaces):
+ node.extend(new_kids)
+ for nk in new_kids:
+ for subexpr in eoa_value:
+ # module.fail_json(msg="element=%s subexpr=%s node=%s now tree=%s" %
+ # (element, subexpr, etree.tostring(node, pretty_print=True), etree.tostring(tree, pretty_print=True))
+ check_or_make_target(module, nk, "./" + subexpr, namespaces)
+ changed = True
+
+ # module.fail_json(msg="now tree=%s" % etree.tostring(tree, pretty_print=True))
+ elif eoa == "":
+ for node in tree.xpath(inner_xpath, namespaces=namespaces):
+ if (node.text != eoa_value):
+ node.text = eoa_value
+ changed = True
+
+ elif eoa and eoa[0] == '@':
+ attribute = nsnameToClark(eoa[1:], namespaces)
+
+ for element in tree.xpath(inner_xpath, namespaces=namespaces):
+ changing = (attribute not in element.attrib or element.attrib[attribute] != eoa_value)
+
+ if changing:
+ changed = changed or changing
+ if eoa_value is None:
+ value = ""
+ else:
+ value = eoa_value
+ element.attrib[attribute] = value
+
+ # module.fail_json(msg="arf %s changing=%s as curval=%s changed tree=%s" %
+ # (xpath, changing, etree.tostring(tree, changing, element[attribute], pretty_print=True)))
+
+ else:
+ module.fail_json(msg="unknown tree transformation=%s" % etree.tostring(tree, pretty_print=True))
+
+ return changed
+
+
+def ensure_xpath_exists(module, tree, xpath, namespaces):
+ changed = False
+
+ if not is_node(tree, xpath, namespaces):
+ changed = check_or_make_target(module, tree, xpath, namespaces)
+
+ finish(module, tree, xpath, namespaces, changed)
+
+
+def set_target_inner(module, tree, xpath, namespaces, attribute, value):
+ changed = False
+
+ try:
+ if not is_node(tree, xpath, namespaces):
+ changed = check_or_make_target(module, tree, xpath, namespaces)
+ except Exception as e:
+ missing_namespace = ""
+ # NOTE: This checks only the namespaces defined in root element!
+ # TODO: Implement a more robust check to check for child namespaces' existence
+ if tree.getroot().nsmap and ":" not in xpath:
+ missing_namespace = "XML document has namespace(s) defined, but no namespace prefix(es) used in xpath!\n"
+ module.fail_json(msg="%sXpath %s causes a failure: %s\n -- tree is %s" %
+ (missing_namespace, xpath, e, etree.tostring(tree, pretty_print=True)), exception=traceback.format_exc())
+
+ if not is_node(tree, xpath, namespaces):
+ module.fail_json(msg="Xpath %s does not reference a node! tree is %s" %
+ (xpath, etree.tostring(tree, pretty_print=True)))
+
+ for element in tree.xpath(xpath, namespaces=namespaces):
+ if not attribute:
+ changed = changed or (element.text != value)
+ if element.text != value:
+ element.text = value
+ else:
+ changed = changed or (element.get(attribute) != value)
+ if ":" in attribute:
+ attr_ns, attr_name = attribute.split(":")
+ # attribute = "{{%s}}%s" % (namespaces[attr_ns], attr_name)
+ attribute = "{{{0}}}{1}".format(namespaces[attr_ns], attr_name)
+ if element.get(attribute) != value:
+ element.set(attribute, value)
+
+ return changed
+
+
+def set_target(module, tree, xpath, namespaces, attribute, value):
+ changed = set_target_inner(module, tree, xpath, namespaces, attribute, value)
+ finish(module, tree, xpath, namespaces, changed)
+
+
+def get_element_text(module, tree, xpath, namespaces):
+ if not is_node(tree, xpath, namespaces):
+ module.fail_json(msg="Xpath %s does not reference a node!" % xpath)
+
+ elements = []
+ for element in tree.xpath(xpath, namespaces=namespaces):
+ elements.append({element.tag: element.text})
+
+ finish(module, tree, xpath, namespaces, changed=False, msg=len(elements), hitcount=len(elements), matches=elements)
+
+
+def get_element_attr(module, tree, xpath, namespaces):
+ if not is_node(tree, xpath, namespaces):
+ module.fail_json(msg="Xpath %s does not reference a node!" % xpath)
+
+ elements = []
+ for element in tree.xpath(xpath, namespaces=namespaces):
+ child = {}
+ for key in element.keys():
+ value = element.get(key)
+ child.update({key: value})
+ elements.append({element.tag: child})
+
+ finish(module, tree, xpath, namespaces, changed=False, msg=len(elements), hitcount=len(elements), matches=elements)
+
+
+def child_to_element(module, child, in_type):
+ if in_type == 'xml':
+ infile = BytesIO(to_bytes(child, errors='surrogate_or_strict'))
+
+ try:
+ parser = etree.XMLParser()
+ node = etree.parse(infile, parser)
+ return node.getroot()
+ except etree.XMLSyntaxError as e:
+ module.fail_json(msg="Error while parsing child element: %s" % e)
+ elif in_type == 'yaml':
+ if isinstance(child, string_types):
+ return etree.Element(child)
+ elif isinstance(child, MutableMapping):
+ if len(child) > 1:
+ module.fail_json(msg="Can only create children from hashes with one key")
+
+ (key, value) = next(iteritems(child))
+ if isinstance(value, MutableMapping):
+ children = value.pop('_', None)
+
+ node = etree.Element(key, value)
+
+ if children is not None:
+ if not isinstance(children, list):
+ module.fail_json(msg="Invalid children type: %s, must be list." % type(children))
+
+ subnodes = children_to_nodes(module, children)
+ node.extend(subnodes)
+ else:
+ node = etree.Element(key)
+ node.text = value
+ return node
+ else:
+ module.fail_json(msg="Invalid child type: %s. Children must be either strings or hashes." % type(child))
+ else:
+ module.fail_json(msg="Invalid child input type: %s. Type must be either xml or yaml." % in_type)
+
+
+def children_to_nodes(module=None, children=None, type='yaml'):
+ """turn a str/hash/list of str&hash into a list of elements"""
+ children = [] if children is None else children
+
+ return [child_to_element(module, child, type) for child in children]
+
+
+def make_pretty(module, tree):
+ xml_string = etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print'])
+
+ result = dict(
+ changed=False,
+ )
+
+ if module.params['path']:
+ xml_file = module.params['path']
+ with open(xml_file, 'rb') as xml_content:
+ if xml_string != xml_content.read():
+ result['changed'] = True
+ if not module.check_mode:
+ if module.params['backup']:
+ result['backup_file'] = module.backup_local(module.params['path'])
+ tree.write(xml_file, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print'])
+
+ elif module.params['xmlstring']:
+ result['xmlstring'] = xml_string
+ # NOTE: Modifying a string is not considered a change !
+ if xml_string != module.params['xmlstring']:
+ result['changed'] = True
+
+ module.exit_json(**result)
+
+
+def finish(module, tree, xpath, namespaces, changed=False, msg='', hitcount=0, matches=tuple()):
+
+ result = dict(
+ actions=dict(
+ xpath=xpath,
+ namespaces=namespaces,
+ state=module.params['state']
+ ),
+ changed=has_changed(tree),
+ )
+
+ if module.params['count'] or hitcount:
+ result['count'] = hitcount
+
+ if module.params['print_match'] or matches:
+ result['matches'] = matches
+
+ if msg:
+ result['msg'] = msg
+
+ if result['changed']:
+ if module._diff:
+ result['diff'] = dict(
+ before=etree.tostring(orig_doc, xml_declaration=True, encoding='UTF-8', pretty_print=True),
+ after=etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=True),
+ )
+
+ if module.params['path'] and not module.check_mode:
+ if module.params['backup']:
+ result['backup_file'] = module.backup_local(module.params['path'])
+ tree.write(module.params['path'], xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print'])
+
+ if module.params['xmlstring']:
+ result['xmlstring'] = etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print'])
+
+ module.exit_json(**result)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path', aliases=['dest', 'file']),
+ xmlstring=dict(type='str'),
+ xpath=dict(type='str'),
+ namespaces=dict(type='dict', default={}),
+ state=dict(type='str', default='present', choices=['absent', 'present'], aliases=['ensure']),
+ value=dict(type='raw'),
+ attribute=dict(type='raw'),
+ add_children=dict(type='list'),
+ set_children=dict(type='list'),
+ count=dict(type='bool', default=False),
+ print_match=dict(type='bool', default=False),
+ pretty_print=dict(type='bool', default=False),
+ content=dict(type='str', choices=['attribute', 'text']),
+ input_type=dict(type='str', default='yaml', choices=['xml', 'yaml']),
+ backup=dict(type='bool', default=False),
+ strip_cdata_tags=dict(type='bool', default=False),
+ insertbefore=dict(type='bool', default=False),
+ insertafter=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ required_by=dict(
+ add_children=['xpath'],
+ # TODO: Reinstate this in community.general 2.0.0 when we have deprecated the incorrect use below
+ # attribute=['value'],
+ content=['xpath'],
+ set_children=['xpath'],
+ value=['xpath'],
+ ),
+ required_if=[
+ ['count', True, ['xpath']],
+ ['print_match', True, ['xpath']],
+ ['insertbefore', True, ['xpath']],
+ ['insertafter', True, ['xpath']],
+ ],
+ required_one_of=[
+ ['path', 'xmlstring'],
+ ['add_children', 'content', 'count', 'pretty_print', 'print_match', 'set_children', 'value'],
+ ],
+ mutually_exclusive=[
+ ['add_children', 'content', 'count', 'print_match', 'set_children', 'value'],
+ ['path', 'xmlstring'],
+ ['insertbefore', 'insertafter'],
+ ],
+ )
+
+ xml_file = module.params['path']
+ xml_string = module.params['xmlstring']
+ xpath = module.params['xpath']
+ namespaces = module.params['namespaces']
+ state = module.params['state']
+ value = json_dict_bytes_to_unicode(module.params['value'])
+ attribute = module.params['attribute']
+ set_children = json_dict_bytes_to_unicode(module.params['set_children'])
+ add_children = json_dict_bytes_to_unicode(module.params['add_children'])
+ pretty_print = module.params['pretty_print']
+ content = module.params['content']
+ input_type = module.params['input_type']
+ print_match = module.params['print_match']
+ count = module.params['count']
+ backup = module.params['backup']
+ strip_cdata_tags = module.params['strip_cdata_tags']
+ insertbefore = module.params['insertbefore']
+ insertafter = module.params['insertafter']
+
+ # Check if we have lxml 2.3.0 or newer installed
+ if not HAS_LXML:
+ module.fail_json(msg=missing_required_lib("lxml"), exception=LXML_IMP_ERR)
+ elif LooseVersion('.'.join(to_native(f) for f in etree.LXML_VERSION)) < LooseVersion('2.3.0'):
+ module.fail_json(msg='The xml ansible module requires lxml 2.3.0 or newer installed on the managed machine')
+ elif LooseVersion('.'.join(to_native(f) for f in etree.LXML_VERSION)) < LooseVersion('3.0.0'):
+ module.warn('Using lxml version lower than 3.0.0 does not guarantee predictable element attribute order.')
+
+ # Report wrongly used attribute parameter when using content=attribute
+ # TODO: Remove this in community.general 2.0.0 (and reinstate strict parameter test above) and remove the integration test example
+ if content == 'attribute' and attribute is not None:
+ module.deprecate("Parameter 'attribute=%s' is ignored when using 'content=attribute' only 'xpath' is used. Please remove entry." % attribute,
+ version='2.0.0', collection_name='community.general') # was Ansible 2.12
+
+ # Check if the file exists
+ if xml_string:
+ infile = BytesIO(to_bytes(xml_string, errors='surrogate_or_strict'))
+ elif os.path.isfile(xml_file):
+ infile = open(xml_file, 'rb')
+ else:
+ module.fail_json(msg="The target XML source '%s' does not exist." % xml_file)
+
+ # Parse and evaluate xpath expression
+ if xpath is not None:
+ try:
+ etree.XPath(xpath)
+ except etree.XPathSyntaxError as e:
+ module.fail_json(msg="Syntax error in xpath expression: %s (%s)" % (xpath, e))
+ except etree.XPathEvalError as e:
+ module.fail_json(msg="Evaluation error in xpath expression: %s (%s)" % (xpath, e))
+
+ # Try to parse in the target XML file
+ try:
+ parser = etree.XMLParser(remove_blank_text=pretty_print, strip_cdata=strip_cdata_tags)
+ doc = etree.parse(infile, parser)
+ except etree.XMLSyntaxError as e:
+ module.fail_json(msg="Error while parsing document: %s (%s)" % (xml_file or 'xml_string', e))
+
+ # Ensure we have the original copy to compare
+ global orig_doc
+ orig_doc = copy.deepcopy(doc)
+
+ if print_match:
+ do_print_match(module, doc, xpath, namespaces)
+
+ if count:
+ count_nodes(module, doc, xpath, namespaces)
+
+ if content == 'attribute':
+ get_element_attr(module, doc, xpath, namespaces)
+ elif content == 'text':
+ get_element_text(module, doc, xpath, namespaces)
+
+ # File exists:
+ if state == 'absent':
+ # - absent: delete xpath target
+ delete_xpath_target(module, doc, xpath, namespaces)
+
+ # - present: carry on
+
+ # children && value both set?: should have already aborted by now
+ # add_children && set_children both set?: should have already aborted by now
+
+ # set_children set?
+ if set_children:
+ set_target_children(module, doc, xpath, namespaces, set_children, input_type)
+
+ # add_children set?
+ if add_children:
+ add_target_children(module, doc, xpath, namespaces, add_children, input_type, insertbefore, insertafter)
+
+ # No?: Carry on
+
+ # Is the xpath target an attribute selector?
+ if value is not None:
+ set_target(module, doc, xpath, namespaces, attribute, value)
+
+ # If an xpath was provided, we need to do something with the data
+ if xpath is not None:
+ ensure_xpath_exists(module, doc, xpath, namespaces)
+
+ # Otherwise only reformat the xml data?
+ if pretty_print:
+ make_pretty(module, doc)
+
+ module.fail_json(msg="Don't know what to do")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/yarn.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/yarn.py
new file mode 100644
index 00000000..77489e24
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/yarn.py
@@ -0,0 +1,394 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017 David Gunter <david.gunter@tivix.com>
+# Copyright (c) 2017 Chris Hoffman <christopher.hoffman@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: yarn
+short_description: Manage node.js packages with Yarn
+description:
+ - Manage node.js packages with the Yarn package manager (https://yarnpkg.com/)
+author:
+ - "David Gunter (@verkaufer)"
+ - "Chris Hoffman (@chrishoffman), creator of NPM Ansible module)"
+options:
+ name:
+ type: str
+ description:
+ - The name of a node.js library to install
+ - If omitted all packages in package.json are installed.
+ - To globally install from local node.js library. Prepend "file:" to the path of the node.js library.
+ required: false
+ path:
+ type: path
+ description:
+ - The base path where Node.js libraries will be installed.
+ - This is where the node_modules folder lives.
+ required: false
+ version:
+ type: str
+ description:
+ - The version of the library to be installed.
+ - Must be in semver format. If "latest" is desired, use "state" arg instead
+ required: false
+ global:
+ description:
+ - Install the node.js library globally
+ required: false
+ default: no
+ type: bool
+ executable:
+ type: path
+ description:
+ - The executable location for yarn.
+ required: false
+ ignore_scripts:
+ description:
+ - Use the --ignore-scripts flag when installing.
+ required: false
+ type: bool
+ default: no
+ production:
+ description:
+ - Install dependencies in production mode.
+ - Yarn will ignore any dependencies under devDependencies in package.json
+ required: false
+ type: bool
+ default: no
+ registry:
+ type: str
+ description:
+ - The registry to install modules from.
+ required: false
+ state:
+ type: str
+ description:
+ - Installation state of the named node.js library
+ - If absent is selected, a name option must be provided
+ required: false
+ default: present
+ choices: [ "present", "absent", "latest" ]
+requirements:
+ - Yarn installed in bin path (typically /usr/local/bin)
+'''
+
+EXAMPLES = '''
+- name: Install "imagemin" node.js package.
+ community.general.yarn:
+ name: imagemin
+ path: /app/location
+
+- name: Install "imagemin" node.js package on version 5.3.1
+ community.general.yarn:
+ name: imagemin
+ version: '5.3.1'
+ path: /app/location
+
+- name: Install "imagemin" node.js package globally.
+ community.general.yarn:
+ name: imagemin
+ global: yes
+
+- name: Remove the globally-installed package "imagemin".
+ community.general.yarn:
+ name: imagemin
+ global: yes
+ state: absent
+
+- name: Install "imagemin" node.js package from custom registry.
+ community.general.yarn:
+ name: imagemin
+ registry: 'http://registry.mysite.com'
+
+- name: Install packages based on package.json.
+ community.general.yarn:
+ path: /app/location
+
+- name: Update all packages in package.json to their latest version.
+ community.general.yarn:
+ path: /app/location
+ state: latest
+'''
+
+RETURN = '''
+changed:
+ description: Whether Yarn changed any package data
+ returned: always
+ type: bool
+ sample: true
+msg:
+ description: Provides an error message if Yarn syntax was incorrect
+ returned: failure
+ type: str
+ sample: "Package must be explicitly named when uninstalling."
+invocation:
+ description: Parameters and values used during execution
+ returned: success
+ type: dict
+ sample: {
+ "module_args": {
+ "executable": null,
+ "globally": false,
+ "ignore_scripts": false,
+ "name": null,
+ "path": "/some/path/folder",
+ "production": false,
+ "registry": null,
+ "state": "present",
+ "version": null
+ }
+ }
+out:
+ description: Output generated from Yarn with emojis removed.
+ returned: always
+ type: str
+ sample: "yarn add v0.16.1[1/4] Resolving packages...[2/4] Fetching packages...[3/4] Linking dependencies...[4/4]
+ Building fresh packages...success Saved lockfile.success Saved 1 new dependency..left-pad@1.1.3 Done in 0.59s."
+'''
+
+import os
+import re
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Yarn(object):
+
+ DEFAULT_GLOBAL_INSTALLATION_PATH = '~/.config/yarn/global'
+
+ def __init__(self, module, **kwargs):
+ self.module = module
+ self.globally = kwargs['globally']
+ self.name = kwargs['name']
+ self.version = kwargs['version']
+ self.path = kwargs['path']
+ self.registry = kwargs['registry']
+ self.production = kwargs['production']
+ self.ignore_scripts = kwargs['ignore_scripts']
+
+ # Specify a version of package if version arg passed in
+ self.name_version = None
+
+ if kwargs['executable']:
+ self.executable = kwargs['executable'].split(' ')
+ else:
+ self.executable = [module.get_bin_path('yarn', True)]
+
+ if kwargs['version'] and self.name is not None:
+ self.name_version = self.name + '@' + str(self.version)
+ elif self.name is not None:
+ self.name_version = self.name
+
+ def _exec(self, args, run_in_check_mode=False, check_rc=True):
+ if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
+
+ if self.globally:
+ # Yarn global arg is inserted before the command (e.g. `yarn global {some-command}`)
+ args.insert(0, 'global')
+
+ cmd = self.executable + args
+
+ if self.production:
+ cmd.append('--production')
+ if self.ignore_scripts:
+ cmd.append('--ignore-scripts')
+ if self.registry:
+ cmd.append('--registry')
+ cmd.append(self.registry)
+
+ # always run Yarn without emojis when called via Ansible
+ cmd.append('--no-emoji')
+
+ # If path is specified, cd into that path and run the command.
+ cwd = None
+ if self.path and not self.globally:
+ if not os.path.exists(self.path):
+ # Module will make directory if not exists.
+ os.makedirs(self.path)
+ if not os.path.isdir(self.path):
+ self.module.fail_json(msg="Path provided %s is not a directory" % self.path)
+ cwd = self.path
+
+ if not os.path.isfile(os.path.join(self.path, 'package.json')):
+ self.module.fail_json(msg="Package.json does not exist in provided path.")
+
+ rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd)
+ return out, err
+
+ return(None, None)
+
+ def list(self):
+ cmd = ['list', '--depth=0', '--json']
+
+ installed = list()
+ missing = list()
+
+ if not os.path.isfile(os.path.join(self.path, 'yarn.lock')):
+ missing.append(self.name)
+ return installed, missing
+
+ result, error = self._exec(cmd, True, False)
+
+ if error:
+ self.module.fail_json(msg=error)
+
+ data = json.loads(result)
+ try:
+ dependencies = data['data']['trees']
+ except KeyError:
+ missing.append(self.name)
+ return installed, missing
+
+ for dep in dependencies:
+ name, version = dep['name'].rsplit('@', 1)
+ installed.append(name)
+
+ if self.name not in installed:
+ missing.append(self.name)
+
+ return installed, missing
+
+ def install(self):
+ if self.name_version:
+ # Yarn has a separate command for installing packages by name...
+ return self._exec(['add', self.name_version])
+ # And one for installing all packages in package.json
+ return self._exec(['install', '--non-interactive'])
+
+ def update(self):
+ return self._exec(['upgrade', '--latest'])
+
+ def uninstall(self):
+ return self._exec(['remove', self.name])
+
+ def list_outdated(self):
+ outdated = list()
+
+ if not os.path.isfile(os.path.join(self.path, 'yarn.lock')):
+ return outdated
+
+ cmd_result, err = self._exec(['outdated', '--json'], True, False)
+ if err:
+ self.module.fail_json(msg=err)
+
+ if not cmd_result:
+ return outdated
+
+ outdated_packages_data = cmd_result.splitlines()[1]
+
+ data = json.loads(outdated_packages_data)
+
+ try:
+ outdated_dependencies = data['data']['body']
+ except KeyError:
+ return outdated
+
+ for dep in outdated_dependencies:
+ # Outdated dependencies returned as a list of lists, where
+ # item at index 0 is the name of the dependency
+ outdated.append(dep[0])
+ return outdated
+
+
+def main():
+ arg_spec = dict(
+ name=dict(default=None),
+ path=dict(default=None, type='path'),
+ version=dict(default=None),
+ production=dict(default=False, type='bool'),
+ executable=dict(default=None, type='path'),
+ registry=dict(default=None),
+ state=dict(default='present', choices=['present', 'absent', 'latest']),
+ ignore_scripts=dict(default=False, type='bool'),
+ )
+ arg_spec['global'] = dict(default=False, type='bool')
+ module = AnsibleModule(
+ argument_spec=arg_spec,
+ supports_check_mode=True
+ )
+
+ name = module.params['name']
+ path = module.params['path']
+ version = module.params['version']
+ globally = module.params['global']
+ production = module.params['production']
+ executable = module.params['executable']
+ registry = module.params['registry']
+ state = module.params['state']
+ ignore_scripts = module.params['ignore_scripts']
+
+ # When installing globally, users should not be able to define a path for installation.
+ # Require a path if global is False, though!
+ if path is None and globally is False:
+ module.fail_json(msg='Path must be specified when not using global arg')
+ elif path and globally is True:
+ module.fail_json(msg='Cannot specify path if doing global installation')
+
+ if state == 'absent' and not name:
+ module.fail_json(msg='Package must be explicitly named when uninstalling.')
+ if state == 'latest':
+ version = 'latest'
+
+ # When installing globally, use the defined path for global node_modules
+ if globally:
+ path = Yarn.DEFAULT_GLOBAL_INSTALLATION_PATH
+
+ yarn = Yarn(module,
+ name=name,
+ path=path,
+ version=version,
+ globally=globally,
+ production=production,
+ executable=executable,
+ registry=registry,
+ ignore_scripts=ignore_scripts)
+
+ changed = False
+ out = ''
+ err = ''
+ if state == 'present':
+
+ if not name:
+ changed = True
+ out, err = yarn.install()
+ else:
+ installed, missing = yarn.list()
+ if len(missing):
+ changed = True
+ out, err = yarn.install()
+
+ elif state == 'latest':
+
+ if not name:
+ changed = True
+ out, err = yarn.install()
+ else:
+ installed, missing = yarn.list()
+ outdated = yarn.list_outdated()
+ if len(missing):
+ changed = True
+ out, err = yarn.install()
+ if len(outdated):
+ changed = True
+ out, err = yarn.update()
+ else:
+ # state == absent
+ installed, missing = yarn.list()
+ if name in installed:
+ changed = True
+ out, err = yarn.uninstall()
+
+ module.exit_json(changed=changed, out=out, err=err)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/zfs.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/zfs.py
new file mode 100644
index 00000000..6b2260fb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/zfs.py
@@ -0,0 +1,262 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2013, Johan Wiren <johan.wiren.se@gmail.com>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: zfs
+short_description: Manage zfs
+description:
+ - Manages ZFS file systems, volumes, clones and snapshots
+options:
+ name:
+ description:
+ - File system, snapshot or volume name e.g. C(rpool/myfs).
+ required: true
+ state:
+ description:
+ - Whether to create (C(present)), or remove (C(absent)) a
+ file system, snapshot or volume. All parents/children
+ will be created/destroyed as needed to reach the desired state.
+ choices: [ absent, present ]
+ required: true
+ origin:
+ description:
+ - Snapshot from which to create a clone.
+ extra_zfs_properties:
+ description:
+ - A dictionary of zfs properties to be set.
+ - See the zfs(8) man page for more information.
+author:
+- Johan Wiren (@johanwiren)
+'''
+
+EXAMPLES = '''
+- name: Create a new file system called myfs in pool rpool with the setuid property turned off
+ community.general.zfs:
+ name: rpool/myfs
+ state: present
+ extra_zfs_properties:
+ setuid: off
+
+- name: Create a new volume called myvol in pool rpool.
+ community.general.zfs:
+ name: rpool/myvol
+ state: present
+ extra_zfs_properties:
+ volsize: 10M
+
+- name: Create a snapshot of rpool/myfs file system.
+ community.general.zfs:
+ name: rpool/myfs@mysnapshot
+ state: present
+
+- name: Create a new file system called myfs2 with snapdir enabled
+ community.general.zfs:
+ name: rpool/myfs2
+ state: present
+ extra_zfs_properties:
+ snapdir: enabled
+
+- name: Create a new file system by cloning a snapshot
+ community.general.zfs:
+ name: rpool/cloned_fs
+ state: present
+ origin: rpool/myfs@mysnapshot
+
+- name: Destroy a filesystem
+ community.general.zfs:
+ name: rpool/myfs
+ state: absent
+'''
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Zfs(object):
+
+ def __init__(self, module, name, properties):
+ self.module = module
+ self.name = name
+ self.properties = properties
+ self.changed = False
+ self.zfs_cmd = module.get_bin_path('zfs', True)
+ self.zpool_cmd = module.get_bin_path('zpool', True)
+ self.pool = name.split('/')[0].split('@')[0]
+ self.is_solaris = os.uname()[0] == 'SunOS'
+ self.is_openzfs = self.check_openzfs()
+ self.enhanced_sharing = self.check_enhanced_sharing()
+
+ def check_openzfs(self):
+ cmd = [self.zpool_cmd]
+ cmd.extend(['get', 'version'])
+ cmd.append(self.pool)
+ (rc, out, err) = self.module.run_command(cmd, check_rc=True)
+ version = out.splitlines()[-1].split()[2]
+ if version == '-':
+ return True
+ if int(version) == 5000:
+ return True
+ return False
+
+ def check_enhanced_sharing(self):
+ if self.is_solaris and not self.is_openzfs:
+ cmd = [self.zpool_cmd]
+ cmd.extend(['get', 'version'])
+ cmd.append(self.pool)
+ (rc, out, err) = self.module.run_command(cmd, check_rc=True)
+ version = out.splitlines()[-1].split()[2]
+ if int(version) >= 34:
+ return True
+ return False
+
+ def exists(self):
+ cmd = [self.zfs_cmd, 'list', '-t', 'all', self.name]
+ (rc, out, err) = self.module.run_command(' '.join(cmd))
+ if rc == 0:
+ return True
+ else:
+ return False
+
+ def create(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+ properties = self.properties
+ origin = self.module.params.get('origin', None)
+ cmd = [self.zfs_cmd]
+
+ if "@" in self.name:
+ action = 'snapshot'
+ elif origin:
+ action = 'clone'
+ else:
+ action = 'create'
+
+ cmd.append(action)
+
+ if action in ['create', 'clone']:
+ cmd += ['-p']
+
+ if properties:
+ for prop, value in properties.items():
+ if prop == 'volsize':
+ cmd += ['-V', value]
+ elif prop == 'volblocksize':
+ cmd += ['-b', value]
+ else:
+ cmd += ['-o', '%s="%s"' % (prop, value)]
+ if origin and action == 'clone':
+ cmd.append(origin)
+ cmd.append(self.name)
+ (rc, out, err) = self.module.run_command(' '.join(cmd))
+ if rc == 0:
+ self.changed = True
+ else:
+ self.module.fail_json(msg=err)
+
+ def destroy(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+ cmd = [self.zfs_cmd, 'destroy', '-R', self.name]
+ (rc, out, err) = self.module.run_command(' '.join(cmd))
+ if rc == 0:
+ self.changed = True
+ else:
+ self.module.fail_json(msg=err)
+
+ def set_property(self, prop, value):
+ if self.module.check_mode:
+ self.changed = True
+ return
+ cmd = [self.zfs_cmd, 'set', prop + '=' + str(value), self.name]
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc == 0:
+ self.changed = True
+ else:
+ self.module.fail_json(msg=err)
+
+ def set_properties_if_changed(self):
+ current_properties = self.get_current_properties()
+ for prop, value in self.properties.items():
+ if current_properties.get(prop, None) != value:
+ self.set_property(prop, value)
+
+ def get_current_properties(self):
+ cmd = [self.zfs_cmd, 'get', '-H']
+ if self.enhanced_sharing:
+ cmd += ['-e']
+ cmd += ['all', self.name]
+ rc, out, err = self.module.run_command(" ".join(cmd))
+ properties = dict()
+ for prop, value, source in [l.split('\t')[1:4] for l in out.splitlines()]:
+ if source == 'local':
+ properties[prop] = value
+ # Add alias for enhanced sharing properties
+ if self.enhanced_sharing:
+ properties['sharenfs'] = properties.get('share.nfs', None)
+ properties['sharesmb'] = properties.get('share.smb', None)
+ return properties
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', required=True, choices=['absent', 'present']),
+ origin=dict(type='str', default=None),
+ extra_zfs_properties=dict(type='dict', default={}),
+ ),
+ supports_check_mode=True,
+ )
+
+ state = module.params.get('state')
+ name = module.params.get('name')
+
+ if module.params.get('origin') and '@' in name:
+ module.fail_json(msg='cannot specify origin when operating on a snapshot')
+
+ # Reverse the boolification of zfs properties
+ for prop, value in module.params['extra_zfs_properties'].items():
+ if isinstance(value, bool):
+ if value is True:
+ module.params['extra_zfs_properties'][prop] = 'on'
+ else:
+ module.params['extra_zfs_properties'][prop] = 'off'
+ else:
+ module.params['extra_zfs_properties'][prop] = value
+
+ result = dict(
+ name=name,
+ state=state,
+ )
+
+ zfs = Zfs(module, name, module.params['extra_zfs_properties'])
+
+ if state == 'present':
+ if zfs.exists():
+ zfs.set_properties_if_changed()
+ else:
+ zfs.create()
+
+ elif state == 'absent':
+ if zfs.exists():
+ zfs.destroy()
+
+ result.update(zfs.properties)
+ result['changed'] = zfs.changed
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/zfs_delegate_admin.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/zfs_delegate_admin.py
new file mode 100644
index 00000000..223d7f72
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/zfs_delegate_admin.py
@@ -0,0 +1,263 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Nate Coraor <nate@coraor.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: zfs_delegate_admin
+short_description: Manage ZFS delegated administration (user admin privileges)
+description:
+ - Manages ZFS file system delegated administration permissions, which allow unprivileged users to perform ZFS
+ operations normally restricted to the superuser.
+ - See the C(zfs allow) section of C(zfs(1M)) for detailed explanations of options.
+ - This module attempts to adhere to the behavior of the command line tool as much as possible.
+requirements:
+ - "A ZFS/OpenZFS implementation that supports delegation with `zfs allow`, including: Solaris >= 10, illumos (all
+ versions), FreeBSD >= 8.0R, ZFS on Linux >= 0.7.0."
+options:
+ name:
+ description:
+ - File system or volume name e.g. C(rpool/myfs).
+ required: true
+ type: str
+ state:
+ description:
+ - Whether to allow (C(present)), or unallow (C(absent)) a permission.
+ - When set to C(present), at least one "entity" param of I(users), I(groups), or I(everyone) are required.
+ - When set to C(absent), removes permissions from the specified entities, or removes all permissions if no entity params are specified.
+ choices: [ absent, present ]
+ default: present
+ users:
+ description:
+ - List of users to whom permission(s) should be granted.
+ type: list
+ groups:
+ description:
+ - List of groups to whom permission(s) should be granted.
+ type: list
+ everyone:
+ description:
+ - Apply permissions to everyone.
+ type: bool
+ default: no
+ permissions:
+ description:
+ - The list of permission(s) to delegate (required if C(state) is C(present)).
+ type: list
+ choices: [ allow, clone, create, destroy, diff, hold, mount, promote, readonly, receive, release, rename, rollback, send, share, snapshot, unallow ]
+ local:
+ description:
+ - Apply permissions to C(name) locally (C(zfs allow -l)).
+ type: bool
+ descendents:
+ description:
+ - Apply permissions to C(name)'s descendents (C(zfs allow -d)).
+ type: bool
+ recursive:
+ description:
+ - Unallow permissions recursively (ignored when C(state) is C(present)).
+ type: bool
+ default: no
+author:
+- Nate Coraor (@natefoo)
+'''
+
+EXAMPLES = r'''
+- name: Grant `zfs allow` and `unallow` permission to the `adm` user with the default local+descendents scope
+ community.general.zfs_delegate_admin:
+ name: rpool/myfs
+ users: adm
+ permissions: allow,unallow
+
+- name: Grant `zfs send` to everyone, plus the group `backup`
+ community.general.zfs_delegate_admin:
+ name: rpool/myvol
+ groups: backup
+ everyone: yes
+ permissions: send
+
+- name: Grant `zfs send,receive` to users `foo` and `bar` with local scope only
+ community.general.zfs_delegate_admin:
+ name: rpool/myfs
+ users: foo,bar
+ permissions: send,receive
+ local: yes
+
+- name: Revoke all permissions from everyone (permissions specifically assigned to users and groups remain)
+ community.general.zfs_delegate_admin:
+ name: rpool/myfs
+ everyone: yes
+ state: absent
+'''
+
+# This module does not return anything other than the standard
+# changed/state/msg/stdout
+RETURN = '''
+'''
+
+from itertools import product
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class ZfsDelegateAdmin(object):
+ def __init__(self, module):
+ self.module = module
+ self.name = module.params.get('name')
+ self.state = module.params.get('state')
+ self.users = module.params.get('users')
+ self.groups = module.params.get('groups')
+ self.everyone = module.params.get('everyone')
+ self.perms = module.params.get('permissions')
+ self.scope = None
+ self.changed = False
+ self.initial_perms = None
+ self.subcommand = 'allow'
+ self.recursive_opt = []
+ self.run_method = self.update
+
+ self.setup(module)
+
+ def setup(self, module):
+ """ Validate params and set up for run.
+ """
+ if self.state == 'absent':
+ self.subcommand = 'unallow'
+ if module.params.get('recursive'):
+ self.recursive_opt = ['-r']
+
+ local = module.params.get('local')
+ descendents = module.params.get('descendents')
+ if (local and descendents) or (not local and not descendents):
+ self.scope = 'ld'
+ elif local:
+ self.scope = 'l'
+ elif descendents:
+ self.scope = 'd'
+ else:
+ self.module.fail_json(msg='Impossible value for local and descendents')
+
+ if not (self.users or self.groups or self.everyone):
+ if self.state == 'present':
+ self.module.fail_json(msg='One of `users`, `groups`, or `everyone` must be set')
+ elif self.state == 'absent':
+ self.run_method = self.clear
+ # ansible ensures the else cannot happen here
+
+ self.zfs_path = module.get_bin_path('zfs', True)
+
+ @property
+ def current_perms(self):
+ """ Parse the output of `zfs allow <name>` to retrieve current permissions.
+ """
+ out = self.run_zfs_raw(subcommand='allow')
+ perms = {
+ 'l': {'u': {}, 'g': {}, 'e': []},
+ 'd': {'u': {}, 'g': {}, 'e': []},
+ 'ld': {'u': {}, 'g': {}, 'e': []},
+ }
+ linemap = {
+ 'Local permissions:': 'l',
+ 'Descendent permissions:': 'd',
+ 'Local+Descendent permissions:': 'ld',
+ }
+ scope = None
+ for line in out.splitlines():
+ scope = linemap.get(line, scope)
+ if not scope:
+ continue
+ try:
+ if line.startswith('\tuser ') or line.startswith('\tgroup '):
+ ent_type, ent, cur_perms = line.split()
+ perms[scope][ent_type[0]][ent] = cur_perms.split(',')
+ elif line.startswith('\teveryone '):
+ perms[scope]['e'] = line.split()[1].split(',')
+ except ValueError:
+ self.module.fail_json(msg="Cannot parse user/group permission output by `zfs allow`: '%s'" % line)
+ return perms
+
+ def run_zfs_raw(self, subcommand=None, args=None):
+ """ Run a raw zfs command, fail on error.
+ """
+ cmd = [self.zfs_path, subcommand or self.subcommand] + (args or []) + [self.name]
+ rc, out, err = self.module.run_command(cmd)
+ if rc:
+ self.module.fail_json(msg='Command `%s` failed: %s' % (' '.join(cmd), err))
+ return out
+
+ def run_zfs(self, args):
+ """ Run zfs allow/unallow with appropriate options as per module arguments.
+ """
+ args = self.recursive_opt + ['-' + self.scope] + args
+ if self.perms:
+ args.append(','.join(self.perms))
+ return self.run_zfs_raw(args=args)
+
+ def clear(self):
+ """ Called by run() to clear all permissions.
+ """
+ changed = False
+ stdout = ''
+ for scope, ent_type in product(('ld', 'l', 'd'), ('u', 'g')):
+ for ent in self.initial_perms[scope][ent_type].keys():
+ stdout += self.run_zfs(['-%s' % ent_type, ent])
+ changed = True
+ for scope in ('ld', 'l', 'd'):
+ if self.initial_perms[scope]['e']:
+ stdout += self.run_zfs(['-e'])
+ changed = True
+ return (changed, stdout)
+
+ def update(self):
+ """ Update permissions as per module arguments.
+ """
+ stdout = ''
+ for ent_type, entities in (('u', self.users), ('g', self.groups)):
+ if entities:
+ stdout += self.run_zfs(['-%s' % ent_type, ','.join(entities)])
+ if self.everyone:
+ stdout += self.run_zfs(['-e'])
+ return (self.initial_perms != self.current_perms, stdout)
+
+ def run(self):
+ """ Run an operation, return results for Ansible.
+ """
+ exit_args = {'state': self.state}
+ self.initial_perms = self.current_perms
+ exit_args['changed'], stdout = self.run_method()
+ if exit_args['changed']:
+ exit_args['msg'] = 'ZFS delegated admin permissions updated'
+ exit_args['stdout'] = stdout
+ self.module.exit_json(**exit_args)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ users=dict(type='list'),
+ groups=dict(type='list'),
+ everyone=dict(type='bool', default=False),
+ permissions=dict(type='list',
+ choices=['allow', 'clone', 'create', 'destroy', 'diff', 'hold', 'mount', 'promote',
+ 'readonly', 'receive', 'release', 'rename', 'rollback', 'send', 'share',
+ 'snapshot', 'unallow']),
+ local=dict(type='bool'),
+ descendents=dict(type='bool'),
+ recursive=dict(type='bool', default=False),
+ ),
+ supports_check_mode=False,
+ required_if=[('state', 'present', ['permissions'])],
+ )
+ zfs_delegate_admin = ZfsDelegateAdmin(module)
+ zfs_delegate_admin.run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/zfs_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/zfs_facts.py
new file mode 100644
index 00000000..e7719f68
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/zfs_facts.py
@@ -0,0 +1,259 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Adam Števko <adam.stevko@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: zfs_facts
+short_description: Gather facts about ZFS datasets.
+description:
+ - Gather facts from ZFS dataset properties.
+author: Adam Števko (@xen0l)
+options:
+ name:
+ description:
+ - ZFS dataset name.
+ required: yes
+ aliases: [ "ds", "dataset" ]
+ recurse:
+ description:
+ - Specifies if properties for any children should be recursively
+ displayed.
+ type: bool
+ default: 'no'
+ parsable:
+ description:
+ - Specifies if property values should be displayed in machine
+ friendly format.
+ type: bool
+ default: 'no'
+ properties:
+ description:
+ - Specifies which dataset properties should be queried in comma-separated format.
+ For more information about dataset properties, check zfs(1M) man page.
+ default: all
+ type:
+ description:
+ - Specifies which datasets types to display. Multiple values have to be
+ provided in comma-separated form.
+ choices: [ 'all', 'filesystem', 'volume', 'snapshot', 'bookmark' ]
+ default: all
+ depth:
+ description:
+ - Specifies recursion depth.
+'''
+
+EXAMPLES = '''
+- name: Gather facts about ZFS dataset rpool/export/home
+ community.general.zfs_facts:
+ dataset: rpool/export/home
+
+- name: Report space usage on ZFS filesystems under data/home
+ community.general.zfs_facts:
+ name: data/home
+ recurse: yes
+ type: filesystem
+
+- ansible.builtin.debug:
+ msg: 'ZFS dataset {{ item.name }} consumes {{ item.used }} of disk space.'
+ with_items: '{{ ansible_zfs_datasets }}'
+'''
+
+RETURN = '''
+name:
+ description: ZFS dataset name
+ returned: always
+ type: str
+ sample: rpool/var/spool
+parsable:
+ description: if parsable output should be provided in machine friendly format.
+ returned: if 'parsable' is set to True
+ type: bool
+ sample: True
+recurse:
+ description: if we should recurse over ZFS dataset
+ returned: if 'recurse' is set to True
+ type: bool
+ sample: True
+zfs_datasets:
+ description: ZFS dataset facts
+ returned: always
+ type: str
+ sample:
+ {
+ "aclinherit": "restricted",
+ "aclmode": "discard",
+ "atime": "on",
+ "available": "43.8G",
+ "canmount": "on",
+ "casesensitivity": "sensitive",
+ "checksum": "on",
+ "compression": "off",
+ "compressratio": "1.00x",
+ "copies": "1",
+ "creation": "Thu Jun 16 11:37 2016",
+ "dedup": "off",
+ "devices": "on",
+ "exec": "on",
+ "filesystem_count": "none",
+ "filesystem_limit": "none",
+ "logbias": "latency",
+ "logicalreferenced": "18.5K",
+ "logicalused": "3.45G",
+ "mlslabel": "none",
+ "mounted": "yes",
+ "mountpoint": "/rpool",
+ "name": "rpool",
+ "nbmand": "off",
+ "normalization": "none",
+ "org.openindiana.caiman:install": "ready",
+ "primarycache": "all",
+ "quota": "none",
+ "readonly": "off",
+ "recordsize": "128K",
+ "redundant_metadata": "all",
+ "refcompressratio": "1.00x",
+ "referenced": "29.5K",
+ "refquota": "none",
+ "refreservation": "none",
+ "reservation": "none",
+ "secondarycache": "all",
+ "setuid": "on",
+ "sharenfs": "off",
+ "sharesmb": "off",
+ "snapdir": "hidden",
+ "snapshot_count": "none",
+ "snapshot_limit": "none",
+ "sync": "standard",
+ "type": "filesystem",
+ "used": "4.41G",
+ "usedbychildren": "4.41G",
+ "usedbydataset": "29.5K",
+ "usedbyrefreservation": "0",
+ "usedbysnapshots": "0",
+ "utf8only": "off",
+ "version": "5",
+ "vscan": "off",
+ "written": "29.5K",
+ "xattr": "on",
+ "zoned": "off"
+ }
+'''
+
+from collections import defaultdict
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+
+
+SUPPORTED_TYPES = ['all', 'filesystem', 'volume', 'snapshot', 'bookmark']
+
+
+class ZFSFacts(object):
+ def __init__(self, module):
+
+ self.module = module
+
+ self.name = module.params['name']
+ self.recurse = module.params['recurse']
+ self.parsable = module.params['parsable']
+ self.properties = module.params['properties']
+ self.type = module.params['type']
+ self.depth = module.params['depth']
+
+ self._datasets = defaultdict(dict)
+ self.facts = []
+
+ def dataset_exists(self):
+ cmd = [self.module.get_bin_path('zfs')]
+
+ cmd.append('list')
+ cmd.append(self.name)
+
+ (rc, out, err) = self.module.run_command(cmd)
+
+ if rc == 0:
+ return True
+ else:
+ return False
+
+ def get_facts(self):
+ cmd = [self.module.get_bin_path('zfs')]
+
+ cmd.append('get')
+ cmd.append('-H')
+ if self.parsable:
+ cmd.append('-p')
+ if self.recurse:
+ cmd.append('-r')
+ if int(self.depth) != 0:
+ cmd.append('-d')
+ cmd.append('%s' % self.depth)
+ if self.type:
+ cmd.append('-t')
+ cmd.append(self.type)
+ cmd.append('-o')
+ cmd.append('name,property,value')
+ cmd.append(self.properties)
+ cmd.append(self.name)
+
+ (rc, out, err) = self.module.run_command(cmd)
+
+ if rc == 0:
+ for line in out.splitlines():
+ dataset, property, value = line.split('\t')
+
+ self._datasets[dataset].update({property: value})
+
+ for k, v in iteritems(self._datasets):
+ v.update({'name': k})
+ self.facts.append(v)
+
+ return {'ansible_zfs_datasets': self.facts}
+ else:
+ self.module.fail_json(msg='Error while trying to get facts about ZFS dataset: %s' % self.name,
+ stderr=err,
+ rc=rc)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, aliases=['ds', 'dataset'], type='str'),
+ recurse=dict(required=False, default=False, type='bool'),
+ parsable=dict(required=False, default=False, type='bool'),
+ properties=dict(required=False, default='all', type='str'),
+ type=dict(required=False, default='all', type='str', choices=SUPPORTED_TYPES),
+ depth=dict(required=False, default=0, type='int')
+ ),
+ supports_check_mode=True
+ )
+
+ zfs_facts = ZFSFacts(module)
+
+ result = {}
+ result['changed'] = False
+ result['name'] = zfs_facts.name
+
+ if zfs_facts.parsable:
+ result['parsable'] = zfs_facts.parsable
+
+ if zfs_facts.recurse:
+ result['recurse'] = zfs_facts.recurse
+
+ if zfs_facts.dataset_exists():
+ result['ansible_facts'] = zfs_facts.get_facts()
+ else:
+ module.fail_json(msg='ZFS dataset %s does not exist!' % zfs_facts.name)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/znode.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/znode.py
new file mode 100644
index 00000000..156a6376
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/znode.py
@@ -0,0 +1,251 @@
+#!/usr/bin/python
+# Copyright 2015 WP Engine, Inc. All rights reserved.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: znode
+short_description: Create, delete, retrieve, and update znodes using ZooKeeper
+description:
+ - Create, delete, retrieve, and update znodes using ZooKeeper.
+options:
+ hosts:
+ description:
+ - A list of ZooKeeper servers (format '[server]:[port]').
+ required: true
+ name:
+ description:
+ - The path of the znode.
+ required: true
+ value:
+ description:
+ - The value assigned to the znode.
+ op:
+ description:
+ - An operation to perform. Mutually exclusive with state.
+ choices: [ get, wait, list ]
+ state:
+ description:
+ - The state to enforce. Mutually exclusive with op.
+ choices: [ present, absent ]
+ timeout:
+ description:
+ - The amount of time to wait for a node to appear.
+ default: 300
+ recursive:
+ description:
+ - Recursively delete node and all its children.
+ type: bool
+ default: 'no'
+requirements:
+ - kazoo >= 2.1
+ - python >= 2.6
+author: "Trey Perry (@treyperry)"
+'''
+
+EXAMPLES = """
+- name: Creating or updating a znode with a given value
+ community.general.znode:
+ hosts: 'localhost:2181'
+ name: /mypath
+ value: myvalue
+ state: present
+
+- name: Getting the value and stat structure for a znode
+ community.general.znode:
+ hosts: 'localhost:2181'
+ name: /mypath
+ op: get
+
+- name: Listing a particular znode's children
+ community.general.znode:
+ hosts: 'localhost:2181'
+ name: /zookeeper
+ op: list
+
+- name: Waiting 20 seconds for a znode to appear at path /mypath
+ community.general.znode:
+ hosts: 'localhost:2181'
+ name: /mypath
+ op: wait
+ timeout: 20
+
+- name: Deleting a znode at path /mypath
+ community.general.znode:
+ hosts: 'localhost:2181'
+ name: /mypath
+ state: absent
+
+- name: Creating or updating a znode with a given value on a remote Zookeeper
+ community.general.znode:
+ hosts: 'my-zookeeper-node:2181'
+ name: /mypath
+ value: myvalue
+ state: present
+ delegate_to: 127.0.0.1
+"""
+
+import time
+import traceback
+
+KAZOO_IMP_ERR = None
+try:
+ from kazoo.client import KazooClient
+ from kazoo.handlers.threading import KazooTimeoutError
+ KAZOO_INSTALLED = True
+except ImportError:
+ KAZOO_IMP_ERR = traceback.format_exc()
+ KAZOO_INSTALLED = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils._text import to_bytes
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ hosts=dict(required=True, type='str'),
+ name=dict(required=True, type='str'),
+ value=dict(required=False, default=None, type='str'),
+ op=dict(required=False, default=None, choices=['get', 'wait', 'list']),
+ state=dict(choices=['present', 'absent']),
+ timeout=dict(required=False, default=300, type='int'),
+ recursive=dict(required=False, default=False, type='bool')
+ ),
+ supports_check_mode=False
+ )
+
+ if not KAZOO_INSTALLED:
+ module.fail_json(msg=missing_required_lib('kazoo >= 2.1'), exception=KAZOO_IMP_ERR)
+
+ check = check_params(module.params)
+ if not check['success']:
+ module.fail_json(msg=check['msg'])
+
+ zoo = KazooCommandProxy(module)
+ try:
+ zoo.start()
+ except KazooTimeoutError:
+ module.fail_json(msg='The connection to the ZooKeeper ensemble timed out.')
+
+ command_dict = {
+ 'op': {
+ 'get': zoo.get,
+ 'list': zoo.list,
+ 'wait': zoo.wait
+ },
+ 'state': {
+ 'present': zoo.present,
+ 'absent': zoo.absent
+ }
+ }
+
+ command_type = 'op' if 'op' in module.params and module.params['op'] is not None else 'state'
+ method = module.params[command_type]
+ result, result_dict = command_dict[command_type][method]()
+ zoo.shutdown()
+
+ if result:
+ module.exit_json(**result_dict)
+ else:
+ module.fail_json(**result_dict)
+
+
+def check_params(params):
+ if not params['state'] and not params['op']:
+ return {'success': False, 'msg': 'Please define an operation (op) or a state.'}
+
+ if params['state'] and params['op']:
+ return {'success': False, 'msg': 'Please choose an operation (op) or a state, but not both.'}
+
+ return {'success': True}
+
+
+class KazooCommandProxy():
+ def __init__(self, module):
+ self.module = module
+ self.zk = KazooClient(module.params['hosts'])
+
+ def absent(self):
+ return self._absent(self.module.params['name'])
+
+ def exists(self, znode):
+ return self.zk.exists(znode)
+
+ def list(self):
+ children = self.zk.get_children(self.module.params['name'])
+ return True, {'count': len(children), 'items': children, 'msg': 'Retrieved znodes in path.',
+ 'znode': self.module.params['name']}
+
+ def present(self):
+ return self._present(self.module.params['name'], self.module.params['value'])
+
+ def get(self):
+ return self._get(self.module.params['name'])
+
+ def shutdown(self):
+ self.zk.stop()
+ self.zk.close()
+
+ def start(self):
+ self.zk.start()
+
+ def wait(self):
+ return self._wait(self.module.params['name'], self.module.params['timeout'])
+
+ def _absent(self, znode):
+ if self.exists(znode):
+ self.zk.delete(znode, recursive=self.module.params['recursive'])
+ return True, {'changed': True, 'msg': 'The znode was deleted.'}
+ else:
+ return True, {'changed': False, 'msg': 'The znode does not exist.'}
+
+ def _get(self, path):
+ if self.exists(path):
+ value, zstat = self.zk.get(path)
+ stat_dict = {}
+ for i in dir(zstat):
+ if not i.startswith('_'):
+ attr = getattr(zstat, i)
+ if isinstance(attr, (int, str)):
+ stat_dict[i] = attr
+ result = True, {'msg': 'The node was retrieved.', 'znode': path, 'value': value,
+ 'stat': stat_dict}
+ else:
+ result = False, {'msg': 'The requested node does not exist.'}
+
+ return result
+
+ def _present(self, path, value):
+ if self.exists(path):
+ (current_value, zstat) = self.zk.get(path)
+ if value != current_value:
+ self.zk.set(path, to_bytes(value))
+ return True, {'changed': True, 'msg': 'Updated the znode value.', 'znode': path,
+ 'value': value}
+ else:
+ return True, {'changed': False, 'msg': 'No changes were necessary.', 'znode': path, 'value': value}
+ else:
+ self.zk.create(path, to_bytes(value), makepath=True)
+ return True, {'changed': True, 'msg': 'Created a new znode.', 'znode': path, 'value': value}
+
+ def _wait(self, path, timeout, interval=5):
+ lim = time.time() + timeout
+
+ while time.time() < lim:
+ if self.exists(path):
+ return True, {'msg': 'The node appeared before the configured timeout.',
+ 'znode': path, 'timeout': timeout}
+ else:
+ time.sleep(interval)
+
+ return False, {'msg': 'The node did not appear before the operation timed out.', 'timeout': timeout,
+ 'znode': path}
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/zpool_facts.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/zpool_facts.py
new file mode 100644
index 00000000..728c0779
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/zpool_facts.py
@@ -0,0 +1,210 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Adam Števko <adam.stevko@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: zpool_facts
+short_description: Gather facts about ZFS pools.
+description:
+ - Gather facts from ZFS pool properties.
+author: Adam Števko (@xen0l)
+options:
+ name:
+ description:
+ - ZFS pool name.
+ aliases: [ "pool", "zpool" ]
+ required: false
+ parsable:
+ description:
+ - Specifies if property values should be displayed in machine
+ friendly format.
+ type: bool
+ default: False
+ required: false
+ properties:
+ description:
+ - Specifies which dataset properties should be queried in comma-separated format.
+ For more information about dataset properties, check zpool(1M) man page.
+ default: all
+ required: false
+'''
+
+EXAMPLES = '''
+- name: Gather facts about ZFS pool rpool
+ community.general.zpool_facts: pool=rpool
+
+- name: Gather space usage about all imported ZFS pools
+ community.general.zpool_facts: properties='free,size'
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: 'ZFS pool {{ item.name }} has {{ item.free }} free space out of {{ item.size }}.'
+ with_items: '{{ ansible_zfs_pools }}'
+'''
+
+RETURN = '''
+ansible_facts:
+ description: Dictionary containing all the detailed information about the ZFS pool facts
+ returned: always
+ type: complex
+ contains:
+ ansible_zfs_pools:
+ description: ZFS pool facts
+ returned: always
+ type: str
+ sample:
+ {
+ "allocated": "3.46G",
+ "altroot": "-",
+ "autoexpand": "off",
+ "autoreplace": "off",
+ "bootfs": "rpool/ROOT/openindiana",
+ "cachefile": "-",
+ "capacity": "6%",
+ "comment": "-",
+ "dedupditto": "0",
+ "dedupratio": "1.00x",
+ "delegation": "on",
+ "expandsize": "-",
+ "failmode": "wait",
+ "feature@async_destroy": "enabled",
+ "feature@bookmarks": "enabled",
+ "feature@edonr": "enabled",
+ "feature@embedded_data": "active",
+ "feature@empty_bpobj": "active",
+ "feature@enabled_txg": "active",
+ "feature@extensible_dataset": "enabled",
+ "feature@filesystem_limits": "enabled",
+ "feature@hole_birth": "active",
+ "feature@large_blocks": "enabled",
+ "feature@lz4_compress": "active",
+ "feature@multi_vdev_crash_dump": "enabled",
+ "feature@sha512": "enabled",
+ "feature@skein": "enabled",
+ "feature@spacemap_histogram": "active",
+ "fragmentation": "3%",
+ "free": "46.3G",
+ "freeing": "0",
+ "guid": "15729052870819522408",
+ "health": "ONLINE",
+ "leaked": "0",
+ "listsnapshots": "off",
+ "name": "rpool",
+ "readonly": "off",
+ "size": "49.8G",
+ "version": "-"
+ }
+name:
+ description: ZFS pool name
+ returned: always
+ type: str
+ sample: rpool
+parsable:
+ description: if parsable output should be provided in machine friendly format.
+ returned: if 'parsable' is set to True
+ type: bool
+ sample: True
+'''
+
+from collections import defaultdict
+
+from ansible.module_utils.six import iteritems
+from ansible.module_utils.basic import AnsibleModule
+
+
+class ZPoolFacts(object):
+ def __init__(self, module):
+
+ self.module = module
+
+ self.name = module.params['name']
+ self.parsable = module.params['parsable']
+ self.properties = module.params['properties']
+
+ self._pools = defaultdict(dict)
+ self.facts = []
+
+ def pool_exists(self):
+ cmd = [self.module.get_bin_path('zpool')]
+
+ cmd.append('list')
+ cmd.append(self.name)
+
+ (rc, out, err) = self.module.run_command(cmd)
+
+ if rc == 0:
+ return True
+ else:
+ return False
+
+ def get_facts(self):
+ cmd = [self.module.get_bin_path('zpool')]
+
+ cmd.append('get')
+ cmd.append('-H')
+ if self.parsable:
+ cmd.append('-p')
+ cmd.append('-o')
+ cmd.append('name,property,value')
+ cmd.append(self.properties)
+ if self.name:
+ cmd.append(self.name)
+
+ (rc, out, err) = self.module.run_command(cmd)
+
+ if rc == 0:
+ for line in out.splitlines():
+ pool, property, value = line.split('\t')
+
+ self._pools[pool].update({property: value})
+
+ for k, v in iteritems(self._pools):
+ v.update({'name': k})
+ self.facts.append(v)
+
+ return {'ansible_zfs_pools': self.facts}
+ else:
+ self.module.fail_json(msg='Error while trying to get facts about ZFS pool: %s' % self.name,
+ stderr=err,
+ rc=rc)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=False, aliases=['pool', 'zpool'], type='str'),
+ parsable=dict(required=False, default=False, type='bool'),
+ properties=dict(required=False, default='all', type='str'),
+ ),
+ supports_check_mode=True
+ )
+
+ zpool_facts = ZPoolFacts(module)
+
+ result = {}
+ result['changed'] = False
+ result['name'] = zpool_facts.name
+
+ if zpool_facts.parsable:
+ result['parsable'] = zpool_facts.parsable
+
+ if zpool_facts.name is not None:
+ if zpool_facts.pool_exists():
+ result['ansible_facts'] = zpool_facts.get_facts()
+ else:
+ module.fail_json(msg='ZFS pool %s does not exist!' % zpool_facts.name)
+ else:
+ result['ansible_facts'] = zpool_facts.get_facts()
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/zypper.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/zypper.py
new file mode 100644
index 00000000..9ad539ca
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/zypper.py
@@ -0,0 +1,561 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Patrick Callahan <pmc@patrickcallahan.com>
+# based on
+# openbsd_pkg
+# (c) 2013
+# Patrik Lundin <patrik.lundin.swe@gmail.com>
+#
+# yum
+# (c) 2012, Red Hat, Inc
+# Written by Seth Vidal <skvidal at fedoraproject.org>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: zypper
+author:
+ - "Patrick Callahan (@dirtyharrycallahan)"
+ - "Alexander Gubin (@alxgu)"
+ - "Thomas O'Donnell (@andytom)"
+ - "Robin Roth (@robinro)"
+ - "Andrii Radyk (@AnderEnder)"
+short_description: Manage packages on SUSE and openSUSE
+description:
+ - Manage packages on SUSE and openSUSE using the zypper and rpm tools.
+options:
+ name:
+ description:
+ - Package name C(name) or package specifier or a list of either.
+ - Can include a version like C(name=1.0), C(name>3.4) or C(name<=2.7). If a version is given, C(oldpackage) is implied and zypper is allowed to
+ update the package within the version range given.
+ - You can also pass a url or a local path to a rpm file.
+ - When using state=latest, this can be '*', which updates all installed packages.
+ required: true
+ aliases: [ 'pkg' ]
+ type: list
+ elements: str
+ state:
+ description:
+ - C(present) will make sure the package is installed.
+ C(latest) will make sure the latest version of the package is installed.
+ C(absent) will make sure the specified package is not installed.
+ C(dist-upgrade) will make sure the latest version of all installed packages from all enabled repositories is installed.
+ - When using C(dist-upgrade), I(name) should be C('*').
+ required: false
+ choices: [ present, latest, absent, dist-upgrade, installed, removed ]
+ default: "present"
+ type: str
+ type:
+ description:
+ - The type of package to be operated on.
+ required: false
+ choices: [ package, patch, pattern, product, srcpackage, application ]
+ default: "package"
+ type: str
+ extra_args_precommand:
+ required: false
+ description:
+ - Add additional global target options to C(zypper).
+ - Options should be supplied in a single line as if given in the command line.
+ type: str
+ disable_gpg_check:
+ description:
+ - Whether to disable to GPG signature checking of the package
+ signature being installed. Has an effect only if state is
+ I(present) or I(latest).
+ required: false
+ default: "no"
+ type: bool
+ disable_recommends:
+ description:
+ - Corresponds to the C(--no-recommends) option for I(zypper). Default behavior (C(yes)) modifies zypper's default behavior; C(no) does
+ install recommended packages.
+ required: false
+ default: "yes"
+ type: bool
+ force:
+ description:
+ - Adds C(--force) option to I(zypper). Allows to downgrade packages and change vendor or architecture.
+ required: false
+ default: "no"
+ type: bool
+ force_resolution:
+ description:
+ - Adds C(--force-resolution) option to I(zypper). Allows to (un)install packages with conflicting requirements (resolver will choose a solution).
+ required: false
+ default: "no"
+ type: bool
+ version_added: '0.2.0'
+ update_cache:
+ description:
+ - Run the equivalent of C(zypper refresh) before the operation. Disabled in check mode.
+ required: false
+ default: "no"
+ type: bool
+ aliases: [ "refresh" ]
+ oldpackage:
+ description:
+ - Adds C(--oldpackage) option to I(zypper). Allows to downgrade packages with less side-effects than force. This is implied as soon as a
+ version is specified as part of the package name.
+ required: false
+ default: "no"
+ type: bool
+ extra_args:
+ required: false
+ description:
+ - Add additional options to C(zypper) command.
+ - Options should be supplied in a single line as if given in the command line.
+ type: str
+ allow_vendor_change:
+ type: bool
+ required: false
+ default: false
+ description:
+ - Adds C(--allow_vendor_change) option to I(zypper) dist-upgrade command.
+ version_added: '0.2.0'
+ replacefiles:
+ type: bool
+ required: false
+ default: false
+ description:
+ - Adds C(--replacefiles) option to I(zypper) install/update command.
+ version_added: '0.2.0'
+notes:
+ - When used with a `loop:` each package will be processed individually,
+ it is much more efficient to pass the list directly to the `name` option.
+# informational: requirements for nodes
+requirements:
+ - "zypper >= 1.0 # included in openSUSE >= 11.1 or SUSE Linux Enterprise Server/Desktop >= 11.0"
+ - python-xml
+ - rpm
+'''
+
+EXAMPLES = '''
+- name: Install nmap
+ community.general.zypper:
+ name: nmap
+ state: present
+
+- name: Install apache2 with recommended packages
+ community.general.zypper:
+ name: apache2
+ state: present
+ disable_recommends: no
+
+- name: Apply a given patch
+ community.general.zypper:
+ name: openSUSE-2016-128
+ state: present
+ type: patch
+
+- name: Remove the nmap package
+ community.general.zypper:
+ name: nmap
+ state: absent
+
+- name: Install the nginx rpm from a remote repo
+ community.general.zypper:
+ name: 'http://nginx.org/packages/sles/12/x86_64/RPMS/nginx-1.8.0-1.sles12.ngx.x86_64.rpm'
+ state: present
+
+- name: Install local rpm file
+ community.general.zypper:
+ name: /tmp/fancy-software.rpm
+ state: present
+
+- name: Update all packages
+ community.general.zypper:
+ name: '*'
+ state: latest
+
+- name: Apply all available patches
+ community.general.zypper:
+ name: '*'
+ state: latest
+ type: patch
+
+- name: Perform a dist-upgrade with additional arguments
+ community.general.zypper:
+ name: '*'
+ state: dist-upgrade
+ allow_vendor_change: true
+ extra_args: '--allow-arch-change'
+
+- name: Perform a installaion of nmap with the install option replacefiles
+ community.general.zypper:
+ name: 'nmap'
+ state: latest
+ replacefiles: true
+
+- name: Refresh repositories and update package openssl
+ community.general.zypper:
+ name: openssl
+ state: present
+ update_cache: yes
+
+- name: "Install specific version (possible comparisons: <, >, <=, >=, =)"
+ community.general.zypper:
+ name: 'docker>=1.10'
+ state: present
+
+- name: Wait 20 seconds to acquire the lock before failing
+ community.general.zypper:
+ name: mosh
+ state: present
+ environment:
+ ZYPP_LOCK_TIMEOUT: 20
+'''
+
+import xml
+import re
+from xml.dom.minidom import parseString as parseXML
+from ansible.module_utils._text import to_native
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Package:
+ def __init__(self, name, prefix, version):
+ self.name = name
+ self.prefix = prefix
+ self.version = version
+ self.shouldinstall = (prefix == '+')
+
+ def __str__(self):
+ return self.prefix + self.name + self.version
+
+
+def split_name_version(name):
+ """splits of the package name and desired version
+
+ example formats:
+ - docker>=1.10
+ - apache=2.4
+
+ Allowed version specifiers: <, >, <=, >=, =
+ Allowed version format: [0-9.-]*
+
+ Also allows a prefix indicating remove "-", "~" or install "+"
+ """
+
+ prefix = ''
+ if name[0] in ['-', '~', '+']:
+ prefix = name[0]
+ name = name[1:]
+ if prefix == '~':
+ prefix = '-'
+
+ version_check = re.compile('^(.*?)((?:<|>|<=|>=|=)[0-9.-]*)?$')
+ try:
+ reres = version_check.match(name)
+ name, version = reres.groups()
+ if version is None:
+ version = ''
+ return prefix, name, version
+ except Exception:
+ return prefix, name, ''
+
+
+def get_want_state(names, remove=False):
+ packages = []
+ urls = []
+ for name in names:
+ if '://' in name or name.endswith('.rpm'):
+ urls.append(name)
+ else:
+ prefix, pname, version = split_name_version(name)
+ if prefix not in ['-', '+']:
+ if remove:
+ prefix = '-'
+ else:
+ prefix = '+'
+ packages.append(Package(pname, prefix, version))
+ return packages, urls
+
+
+def get_installed_state(m, packages):
+ "get installed state of packages"
+
+ cmd = get_cmd(m, 'search')
+ cmd.extend(['--match-exact', '--details', '--installed-only'])
+ cmd.extend([p.name for p in packages])
+ return parse_zypper_xml(m, cmd, fail_not_found=False)[0]
+
+
+def parse_zypper_xml(m, cmd, fail_not_found=True, packages=None):
+ rc, stdout, stderr = m.run_command(cmd, check_rc=False)
+
+ try:
+ dom = parseXML(stdout)
+ except xml.parsers.expat.ExpatError as exc:
+ m.fail_json(msg="Failed to parse zypper xml output: %s" % to_native(exc),
+ rc=rc, stdout=stdout, stderr=stderr, cmd=cmd)
+
+ if rc == 104:
+ # exit code 104 is ZYPPER_EXIT_INF_CAP_NOT_FOUND (no packages found)
+ if fail_not_found:
+ errmsg = dom.getElementsByTagName('message')[-1].childNodes[0].data
+ m.fail_json(msg=errmsg, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd)
+ else:
+ return {}, rc, stdout, stderr
+ elif rc in [0, 106, 103]:
+ # zypper exit codes
+ # 0: success
+ # 106: signature verification failed
+ # 103: zypper was upgraded, run same command again
+ if packages is None:
+ firstrun = True
+ packages = {}
+ solvable_list = dom.getElementsByTagName('solvable')
+ for solvable in solvable_list:
+ name = solvable.getAttribute('name')
+ packages[name] = {}
+ packages[name]['version'] = solvable.getAttribute('edition')
+ packages[name]['oldversion'] = solvable.getAttribute('edition-old')
+ status = solvable.getAttribute('status')
+ packages[name]['installed'] = status == "installed"
+ packages[name]['group'] = solvable.parentNode.nodeName
+ if rc == 103 and firstrun:
+ # if this was the first run and it failed with 103
+ # run zypper again with the same command to complete update
+ return parse_zypper_xml(m, cmd, fail_not_found=fail_not_found, packages=packages)
+
+ return packages, rc, stdout, stderr
+ m.fail_json(msg='Zypper run command failed with return code %s.' % rc, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd)
+
+
+def get_cmd(m, subcommand):
+ "puts together the basic zypper command arguments with those passed to the module"
+ is_install = subcommand in ['install', 'update', 'patch', 'dist-upgrade']
+ is_refresh = subcommand == 'refresh'
+ cmd = ['/usr/bin/zypper', '--quiet', '--non-interactive', '--xmlout']
+ if m.params['extra_args_precommand']:
+ args_list = m.params['extra_args_precommand'].split()
+ cmd.extend(args_list)
+ # add global options before zypper command
+ if (is_install or is_refresh) and m.params['disable_gpg_check']:
+ cmd.append('--no-gpg-checks')
+
+ if subcommand == 'search':
+ cmd.append('--disable-repositories')
+
+ cmd.append(subcommand)
+ if subcommand not in ['patch', 'dist-upgrade'] and not is_refresh:
+ cmd.extend(['--type', m.params['type']])
+ if m.check_mode and subcommand != 'search':
+ cmd.append('--dry-run')
+ if is_install:
+ cmd.append('--auto-agree-with-licenses')
+ if m.params['disable_recommends']:
+ cmd.append('--no-recommends')
+ if m.params['force']:
+ cmd.append('--force')
+ if m.params['force_resolution']:
+ cmd.append('--force-resolution')
+ if m.params['oldpackage']:
+ cmd.append('--oldpackage')
+ if m.params['replacefiles']:
+ cmd.append('--replacefiles')
+ if subcommand == 'dist-upgrade' and m.params['allow_vendor_change']:
+ cmd.append('--allow-vendor-change')
+ if m.params['extra_args']:
+ args_list = m.params['extra_args'].split(' ')
+ cmd.extend(args_list)
+
+ return cmd
+
+
+def set_diff(m, retvals, result):
+ # TODO: if there is only one package, set before/after to version numbers
+ packages = {'installed': [], 'removed': [], 'upgraded': []}
+ if result:
+ for p in result:
+ group = result[p]['group']
+ if group == 'to-upgrade':
+ versions = ' (' + result[p]['oldversion'] + ' => ' + result[p]['version'] + ')'
+ packages['upgraded'].append(p + versions)
+ elif group == 'to-install':
+ packages['installed'].append(p)
+ elif group == 'to-remove':
+ packages['removed'].append(p)
+
+ output = ''
+ for state in packages:
+ if packages[state]:
+ output += state + ': ' + ', '.join(packages[state]) + '\n'
+ if 'diff' not in retvals:
+ retvals['diff'] = {}
+ if 'prepared' not in retvals['diff']:
+ retvals['diff']['prepared'] = output
+ else:
+ retvals['diff']['prepared'] += '\n' + output
+
+
+def package_present(m, name, want_latest):
+ "install and update (if want_latest) the packages in name_install, while removing the packages in name_remove"
+ retvals = {'rc': 0, 'stdout': '', 'stderr': ''}
+ packages, urls = get_want_state(name)
+
+ # add oldpackage flag when a version is given to allow downgrades
+ if any(p.version for p in packages):
+ m.params['oldpackage'] = True
+
+ if not want_latest:
+ # for state=present: filter out already installed packages
+ # if a version is given leave the package in to let zypper handle the version
+ # resolution
+ packageswithoutversion = [p for p in packages if not p.version]
+ prerun_state = get_installed_state(m, packageswithoutversion)
+ # generate lists of packages to install or remove
+ packages = [p for p in packages if p.shouldinstall != (p.name in prerun_state)]
+
+ if not packages and not urls:
+ # nothing to install/remove and nothing to update
+ return None, retvals
+
+ # zypper install also updates packages
+ cmd = get_cmd(m, 'install')
+ cmd.append('--')
+ cmd.extend(urls)
+ # pass packages to zypper
+ # allow for + or - prefixes in install/remove lists
+ # also add version specifier if given
+ # do this in one zypper run to allow for dependency-resolution
+ # for example "-exim postfix" runs without removing packages depending on mailserver
+ cmd.extend([str(p) for p in packages])
+
+ retvals['cmd'] = cmd
+ result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd)
+
+ return result, retvals
+
+
+def package_update_all(m):
+ "run update or patch on all available packages"
+
+ retvals = {'rc': 0, 'stdout': '', 'stderr': ''}
+ if m.params['type'] == 'patch':
+ cmdname = 'patch'
+ elif m.params['state'] == 'dist-upgrade':
+ cmdname = 'dist-upgrade'
+ else:
+ cmdname = 'update'
+
+ cmd = get_cmd(m, cmdname)
+ retvals['cmd'] = cmd
+ result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd)
+ return result, retvals
+
+
+def package_absent(m, name):
+ "remove the packages in name"
+ retvals = {'rc': 0, 'stdout': '', 'stderr': ''}
+ # Get package state
+ packages, urls = get_want_state(name, remove=True)
+ if any(p.prefix == '+' for p in packages):
+ m.fail_json(msg="Can not combine '+' prefix with state=remove/absent.")
+ if urls:
+ m.fail_json(msg="Can not remove via URL.")
+ if m.params['type'] == 'patch':
+ m.fail_json(msg="Can not remove patches.")
+ prerun_state = get_installed_state(m, packages)
+ packages = [p for p in packages if p.name in prerun_state]
+
+ if not packages:
+ return None, retvals
+
+ cmd = get_cmd(m, 'remove')
+ cmd.extend([p.name + p.version for p in packages])
+
+ retvals['cmd'] = cmd
+ result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd)
+ return result, retvals
+
+
+def repo_refresh(m):
+ "update the repositories"
+ retvals = {'rc': 0, 'stdout': '', 'stderr': ''}
+
+ cmd = get_cmd(m, 'refresh')
+
+ retvals['cmd'] = cmd
+ result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd)
+
+ return retvals
+
+# ===========================================
+# Main control flow
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, aliases=['pkg'], type='list', elements='str'),
+ state=dict(required=False, default='present', choices=['absent', 'installed', 'latest', 'present', 'removed', 'dist-upgrade']),
+ type=dict(required=False, default='package', choices=['package', 'patch', 'pattern', 'product', 'srcpackage', 'application']),
+ extra_args_precommand=dict(required=False, default=None),
+ disable_gpg_check=dict(required=False, default=False, type='bool'),
+ disable_recommends=dict(required=False, default=True, type='bool'),
+ force=dict(required=False, default=False, type='bool'),
+ force_resolution=dict(required=False, default=False, type='bool'),
+ update_cache=dict(required=False, aliases=['refresh'], default=False, type='bool'),
+ oldpackage=dict(required=False, default=False, type='bool'),
+ extra_args=dict(required=False, default=None),
+ allow_vendor_change=dict(required=False, default=False, type='bool'),
+ replacefiles=dict(required=False, default=False, type='bool')
+ ),
+ supports_check_mode=True
+ )
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+
+ name = module.params['name']
+ state = module.params['state']
+ update_cache = module.params['update_cache']
+
+ # remove empty strings from package list
+ name = list(filter(None, name))
+
+ # Refresh repositories
+ if update_cache and not module.check_mode:
+ retvals = repo_refresh(module)
+
+ if retvals['rc'] != 0:
+ module.fail_json(msg="Zypper refresh run failed.", **retvals)
+
+ # Perform requested action
+ if name == ['*'] and state in ['latest', 'dist-upgrade']:
+ packages_changed, retvals = package_update_all(module)
+ elif name != ['*'] and state == 'dist-upgrade':
+ module.fail_json(msg="Can not dist-upgrade specific packages.")
+ else:
+ if state in ['absent', 'removed']:
+ packages_changed, retvals = package_absent(module, name)
+ elif state in ['installed', 'present', 'latest']:
+ packages_changed, retvals = package_present(module, name, state == 'latest')
+
+ retvals['changed'] = retvals['rc'] == 0 and bool(packages_changed)
+
+ if module._diff:
+ set_diff(module, retvals, packages_changed)
+
+ if retvals['rc'] != 0:
+ module.fail_json(msg="Zypper run failed.", **retvals)
+
+ if not retvals['changed']:
+ del retvals['stdout']
+ del retvals['stderr']
+
+ module.exit_json(name=name, state=state, update_cache=update_cache, **retvals)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/plugins/modules/zypper_repository.py b/collections-debian-merged/ansible_collections/community/general/plugins/modules/zypper_repository.py
new file mode 100644
index 00000000..55738b58
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/plugins/modules/zypper_repository.py
@@ -0,0 +1,402 @@
+#!/usr/bin/python
+# encoding: utf-8
+
+# (c) 2013, Matthias Vogelgesang <matthias.vogelgesang@gmail.com>
+# (c) 2014, Justin Lecher <jlec@gentoo.org>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: zypper_repository
+author: "Matthias Vogelgesang (@matze)"
+short_description: Add and remove Zypper repositories
+description:
+ - Add or remove Zypper repositories on SUSE and openSUSE
+options:
+ name:
+ description:
+ - A name for the repository. Not required when adding repofiles.
+ type: str
+ repo:
+ description:
+ - URI of the repository or .repo file. Required when state=present.
+ type: str
+ state:
+ description:
+ - A source string state.
+ choices: [ "absent", "present" ]
+ default: "present"
+ type: str
+ description:
+ description:
+ - A description of the repository
+ type: str
+ disable_gpg_check:
+ description:
+ - Whether to disable GPG signature checking of
+ all packages. Has an effect only if state is
+ I(present).
+ - Needs zypper version >= 1.6.2.
+ type: bool
+ default: no
+ autorefresh:
+ description:
+ - Enable autorefresh of the repository.
+ type: bool
+ default: yes
+ aliases: [ "refresh" ]
+ priority:
+ description:
+ - Set priority of repository. Packages will always be installed
+ from the repository with the smallest priority number.
+ - Needs zypper version >= 1.12.25.
+ type: int
+ overwrite_multiple:
+ description:
+ - Overwrite multiple repository entries, if repositories with both name and
+ URL already exist.
+ type: bool
+ default: no
+ auto_import_keys:
+ description:
+ - Automatically import the gpg signing key of the new or changed repository.
+ - Has an effect only if state is I(present). Has no effect on existing (unchanged) repositories or in combination with I(absent).
+ - Implies runrefresh.
+ - Only works with C(.repo) files if `name` is given explicitly.
+ type: bool
+ default: no
+ runrefresh:
+ description:
+ - Refresh the package list of the given repository.
+ - Can be used with repo=* to refresh all repositories.
+ type: bool
+ default: no
+ enabled:
+ description:
+ - Set repository to enabled (or disabled).
+ type: bool
+ default: yes
+
+
+requirements:
+ - "zypper >= 1.0 # included in openSUSE >= 11.1 or SUSE Linux Enterprise Server/Desktop >= 11.0"
+ - python-xml
+'''
+
+EXAMPLES = '''
+- name: Add NVIDIA repository for graphics drivers
+ community.general.zypper_repository:
+ name: nvidia-repo
+ repo: 'ftp://download.nvidia.com/opensuse/12.2'
+ state: present
+
+- name: Remove NVIDIA repository
+ community.general.zypper_repository:
+ name: nvidia-repo
+ repo: 'ftp://download.nvidia.com/opensuse/12.2'
+ state: absent
+
+- name: Add python development repository
+ community.general.zypper_repository:
+ repo: 'http://download.opensuse.org/repositories/devel:/languages:/python/SLE_11_SP3/devel:languages:python.repo'
+
+- name: Refresh all repos
+ community.general.zypper_repository:
+ repo: '*'
+ runrefresh: yes
+
+- name: Add a repo and add its gpg key
+ community.general.zypper_repository:
+ repo: 'http://download.opensuse.org/repositories/systemsmanagement/openSUSE_Leap_42.1/'
+ auto_import_keys: yes
+
+- name: Force refresh of a repository
+ community.general.zypper_repository:
+ repo: 'http://my_internal_ci_repo/repo'
+ name: my_ci_repo
+ state: present
+ runrefresh: yes
+'''
+
+import traceback
+
+XML_IMP_ERR = None
+try:
+ from xml.dom.minidom import parseString as parseXML
+ HAS_XML = True
+except ImportError:
+ XML_IMP_ERR = traceback.format_exc()
+ HAS_XML = False
+
+from distutils.version import LooseVersion
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+REPO_OPTS = ['alias', 'name', 'priority', 'enabled', 'autorefresh', 'gpgcheck']
+
+
+def _get_cmd(*args):
+ """Combines the non-interactive zypper command with arguments/subcommands"""
+ cmd = ['/usr/bin/zypper', '--quiet', '--non-interactive']
+ cmd.extend(args)
+
+ return cmd
+
+
+def _parse_repos(module):
+ """parses the output of zypper --xmlout repos and return a parse repo dictionary"""
+ cmd = _get_cmd('--xmlout', 'repos')
+
+ if not HAS_XML:
+ module.fail_json(msg=missing_required_lib("python-xml"), exception=XML_IMP_ERR)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ if rc == 0:
+ repos = []
+ dom = parseXML(stdout)
+ repo_list = dom.getElementsByTagName('repo')
+ for repo in repo_list:
+ opts = {}
+ for o in REPO_OPTS:
+ opts[o] = repo.getAttribute(o)
+ opts['url'] = repo.getElementsByTagName('url')[0].firstChild.data
+ # A repo can be uniquely identified by an alias + url
+ repos.append(opts)
+ return repos
+ # exit code 6 is ZYPPER_EXIT_NO_REPOS (no repositories defined)
+ elif rc == 6:
+ return []
+ else:
+ module.fail_json(msg='Failed to execute "%s"' % " ".join(cmd), rc=rc, stdout=stdout, stderr=stderr)
+
+
+def _repo_changes(realrepo, repocmp):
+ "Check whether the 2 given repos have different settings."
+ for k in repocmp:
+ if repocmp[k] and k not in realrepo:
+ return True
+
+ for k, v in realrepo.items():
+ if k in repocmp and repocmp[k]:
+ valold = str(repocmp[k] or "")
+ valnew = v or ""
+ if k == "url":
+ valold, valnew = valold.rstrip("/"), valnew.rstrip("/")
+ if valold != valnew:
+ return True
+ return False
+
+
+def repo_exists(module, repodata, overwrite_multiple):
+ """Check whether the repository already exists.
+
+ returns (exists, mod, old_repos)
+ exists: whether a matching (name, URL) repo exists
+ mod: whether there are changes compared to the existing repo
+ old_repos: list of matching repos
+ """
+ existing_repos = _parse_repos(module)
+
+ # look for repos that have matching alias or url to the one searched
+ repos = []
+ for kw in ['alias', 'url']:
+ name = repodata[kw]
+ for oldr in existing_repos:
+ if repodata[kw] == oldr[kw] and oldr not in repos:
+ repos.append(oldr)
+
+ if len(repos) == 0:
+ # Repo does not exist yet
+ return (False, False, None)
+ elif len(repos) == 1:
+ # Found an existing repo, look for changes
+ has_changes = _repo_changes(repos[0], repodata)
+ return (True, has_changes, repos)
+ elif len(repos) >= 2:
+ if overwrite_multiple:
+ # Found two repos and want to overwrite_multiple
+ return (True, True, repos)
+ else:
+ errmsg = 'More than one repo matched "%s": "%s".' % (name, repos)
+ errmsg += ' Use overwrite_multiple to allow more than one repo to be overwritten'
+ module.fail_json(msg=errmsg)
+
+
+def addmodify_repo(module, repodata, old_repos, zypper_version, warnings):
+ "Adds the repo, removes old repos before, that would conflict."
+ repo = repodata['url']
+ cmd = _get_cmd('addrepo', '--check')
+ if repodata['name']:
+ cmd.extend(['--name', repodata['name']])
+
+ # priority on addrepo available since 1.12.25
+ # https://github.com/openSUSE/zypper/blob/b9b3cb6db76c47dc4c47e26f6a4d2d4a0d12b06d/package/zypper.changes#L327-L336
+ if repodata['priority']:
+ if zypper_version >= LooseVersion('1.12.25'):
+ cmd.extend(['--priority', str(repodata['priority'])])
+ else:
+ warnings.append("Setting priority only available for zypper >= 1.12.25. Ignoring priority argument.")
+
+ if repodata['enabled'] == '0':
+ cmd.append('--disable')
+
+ # gpgcheck available since 1.6.2
+ # https://github.com/openSUSE/zypper/blob/b9b3cb6db76c47dc4c47e26f6a4d2d4a0d12b06d/package/zypper.changes#L2446-L2449
+ # the default changed in the past, so don't assume a default here and show warning for old zypper versions
+ if zypper_version >= LooseVersion('1.6.2'):
+ if repodata['gpgcheck'] == '1':
+ cmd.append('--gpgcheck')
+ else:
+ cmd.append('--no-gpgcheck')
+ else:
+ warnings.append("Enabling/disabling gpgcheck only available for zypper >= 1.6.2. Using zypper default value.")
+
+ if repodata['autorefresh'] == '1':
+ cmd.append('--refresh')
+
+ cmd.append(repo)
+
+ if not repo.endswith('.repo'):
+ cmd.append(repodata['alias'])
+
+ if old_repos is not None:
+ for oldrepo in old_repos:
+ remove_repo(module, oldrepo['url'])
+
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ return rc, stdout, stderr
+
+
+def remove_repo(module, repo):
+ "Removes the repo."
+ cmd = _get_cmd('removerepo', repo)
+
+ rc, stdout, stderr = module.run_command(cmd, check_rc=True)
+ return rc, stdout, stderr
+
+
+def get_zypper_version(module):
+ rc, stdout, stderr = module.run_command(['/usr/bin/zypper', '--version'])
+ if rc != 0 or not stdout.startswith('zypper '):
+ return LooseVersion('1.0')
+ return LooseVersion(stdout.split()[1])
+
+
+def runrefreshrepo(module, auto_import_keys=False, shortname=None):
+ "Forces zypper to refresh repo metadata."
+ if auto_import_keys:
+ cmd = _get_cmd('--gpg-auto-import-keys', 'refresh', '--force')
+ else:
+ cmd = _get_cmd('refresh', '--force')
+ if shortname is not None:
+ cmd.extend(['-r', shortname])
+
+ rc, stdout, stderr = module.run_command(cmd, check_rc=True)
+ return rc, stdout, stderr
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=False),
+ repo=dict(required=False),
+ state=dict(choices=['present', 'absent'], default='present'),
+ runrefresh=dict(required=False, default=False, type='bool'),
+ description=dict(required=False),
+ disable_gpg_check=dict(required=False, default=False, type='bool'),
+ autorefresh=dict(required=False, default=True, type='bool', aliases=['refresh']),
+ priority=dict(required=False, type='int'),
+ enabled=dict(required=False, default=True, type='bool'),
+ overwrite_multiple=dict(required=False, default=False, type='bool'),
+ auto_import_keys=dict(required=False, default=False, type='bool'),
+ ),
+ supports_check_mode=False,
+ required_one_of=[['state', 'runrefresh']],
+ )
+
+ repo = module.params['repo']
+ alias = module.params['name']
+ state = module.params['state']
+ overwrite_multiple = module.params['overwrite_multiple']
+ auto_import_keys = module.params['auto_import_keys']
+ runrefresh = module.params['runrefresh']
+
+ zypper_version = get_zypper_version(module)
+ warnings = [] # collect warning messages for final output
+
+ repodata = {
+ 'url': repo,
+ 'alias': alias,
+ 'name': module.params['description'],
+ 'priority': module.params['priority'],
+ }
+ # rewrite bools in the language that zypper lr -x provides for easier comparison
+ if module.params['enabled']:
+ repodata['enabled'] = '1'
+ else:
+ repodata['enabled'] = '0'
+ if module.params['disable_gpg_check']:
+ repodata['gpgcheck'] = '0'
+ else:
+ repodata['gpgcheck'] = '1'
+ if module.params['autorefresh']:
+ repodata['autorefresh'] = '1'
+ else:
+ repodata['autorefresh'] = '0'
+
+ def exit_unchanged():
+ module.exit_json(changed=False, repodata=repodata, state=state)
+
+ # Check run-time module parameters
+ if repo == '*' or alias == '*':
+ if runrefresh:
+ runrefreshrepo(module, auto_import_keys)
+ module.exit_json(changed=False, runrefresh=True)
+ else:
+ module.fail_json(msg='repo=* can only be used with the runrefresh option.')
+
+ if state == 'present' and not repo:
+ module.fail_json(msg='Module option state=present requires repo')
+ if state == 'absent' and not repo and not alias:
+ module.fail_json(msg='Alias or repo parameter required when state=absent')
+
+ if repo and repo.endswith('.repo'):
+ if alias:
+ module.fail_json(msg='Incompatible option: \'name\'. Do not use name when adding .repo files')
+ else:
+ if not alias and state == "present":
+ module.fail_json(msg='Name required when adding non-repo files.')
+
+ exists, mod, old_repos = repo_exists(module, repodata, overwrite_multiple)
+
+ if repo:
+ shortname = repo
+ else:
+ shortname = alias
+
+ if state == 'present':
+ if exists and not mod:
+ if runrefresh:
+ runrefreshrepo(module, auto_import_keys, shortname)
+ exit_unchanged()
+ rc, stdout, stderr = addmodify_repo(module, repodata, old_repos, zypper_version, warnings)
+ if rc == 0 and (runrefresh or auto_import_keys):
+ runrefreshrepo(module, auto_import_keys, shortname)
+ elif state == 'absent':
+ if not exists:
+ exit_unchanged()
+ rc, stdout, stderr = remove_repo(module, shortname)
+
+ if rc == 0:
+ module.exit_json(changed=True, repodata=repodata, state=state, warnings=warnings)
+ else:
+ module.fail_json(msg="Zypper failed with rc %s" % rc, rc=rc, stdout=stdout, stderr=stderr, repodata=repodata, state=state, warnings=warnings)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/__init__.py b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/abiquo.ini b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/abiquo.ini
new file mode 100644
index 00000000..991a2ed8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/abiquo.ini
@@ -0,0 +1,48 @@
+# Ansible external inventory script settings for Abiquo
+#
+
+# Define an Abiquo user with access to Abiquo API which will be used to
+# perform required queries to obtain information to generate the Ansible
+# inventory output.
+#
+[auth]
+apiuser = admin
+apipass = xabiquo
+
+
+# Specify Abiquo API version in major.minor format and the access URI to
+# API endpoint. Tested versions are: 2.6 , 3.0 and 3.1
+# To confirm that your box haves access to Abiquo API you can perform a
+# curl command, replacing with suitable values, similar to this:
+# curl -X GET https://192.168.2.100/api/login -u admin:xabiquo
+#
+[api]
+version = 3.0
+uri = https://192.168.2.100/api
+# You probably won't need to modify login preferences, but just in case
+login_path = /login
+login_type = application/vnd.abiquo.user+json
+
+
+# To avoid performing excessive calls to Abiquo API you can define a
+# cache for the plugin output. Within the time defined in seconds, latest
+# output will be reused. After that time, the cache will be refreshed.
+#
+[cache]
+cache_max_age = 30
+cache_dir = /tmp
+
+
+[defaults]
+# Depending in your Abiquo environment, you may want to use only public IP
+# addresses (if using public cloud providers) or also private IP addresses.
+# You can set this with public_ip_only configuration.
+public_ip_only = false
+# default_net_interface only is used if public_ip_only = false
+# If public_ip_only is set to false, you can choose default nic to obtain
+# IP address to define the host.
+default_net_interface = nic0
+# Only deployed VM are displayed in the plugin output.
+deployed_only = true
+# Define if VM metadata is obtained from Abiquo API.
+get_metadata = false
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/abiquo.py b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/abiquo.py
new file mode 100644
index 00000000..7602a1d2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/abiquo.py
@@ -0,0 +1,224 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+'''
+External inventory script for Abiquo
+====================================
+
+Shamelessly copied from an existing inventory script.
+
+This script generates an inventory that Ansible can understand by making API requests to Abiquo API
+Requires some python libraries, ensure to have them installed when using this script.
+
+This script has been tested in Abiquo 3.0 but it may work also for Abiquo 2.6.
+
+Before using this script you may want to modify abiquo.ini config file.
+
+This script generates an Ansible hosts file with these host groups:
+
+ABQ_xxx: Defines a hosts itself by Abiquo VM name label
+all: Contains all hosts defined in Abiquo user's enterprise
+virtualdatecenter: Creates a host group for each virtualdatacenter containing all hosts defined on it
+virtualappliance: Creates a host group for each virtualappliance containing all hosts defined on it
+imagetemplate: Creates a host group for each image template containing all hosts using it
+
+'''
+
+# (c) 2014, Daniel Beneyto <daniel.beneyto@abiquo.com>
+#
+# This file is part of Ansible,
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+import time
+
+import json
+
+from ansible.module_utils.six.moves import configparser as ConfigParser
+from ansible.module_utils.urls import open_url
+
+
+def api_get(link, config):
+ try:
+ if link is None:
+ url = config.get('api', 'uri') + config.get('api', 'login_path')
+ headers = {"Accept": config.get('api', 'login_type')}
+ else:
+ url = link['href'] + '?limit=0'
+ headers = {"Accept": link['type']}
+ result = open_url(url, headers=headers, url_username=config.get('auth', 'apiuser').replace('\n', ''),
+ url_password=config.get('auth', 'apipass').replace('\n', ''))
+ return json.loads(result.read())
+ except Exception:
+ return None
+
+
+def save_cache(data, config):
+ ''' saves item to cache '''
+ dpath = config.get('cache', 'cache_dir')
+ try:
+ cache = open('/'.join([dpath, 'inventory']), 'w')
+ cache.write(json.dumps(data))
+ cache.close()
+ except IOError as e:
+ pass # not really sure what to do here
+
+
+def get_cache(cache_item, config):
+ ''' returns cached item '''
+ dpath = config.get('cache', 'cache_dir')
+ inv = {}
+ try:
+ cache = open('/'.join([dpath, 'inventory']), 'r')
+ inv = cache.read()
+ cache.close()
+ except IOError as e:
+ pass # not really sure what to do here
+
+ return inv
+
+
+def cache_available(config):
+ ''' checks if we have a 'fresh' cache available for item requested '''
+
+ if config.has_option('cache', 'cache_dir'):
+ dpath = config.get('cache', 'cache_dir')
+
+ try:
+ existing = os.stat('/'.join([dpath, 'inventory']))
+ except Exception:
+ # cache doesn't exist or isn't accessible
+ return False
+
+ if config.has_option('cache', 'cache_max_age'):
+ maxage = config.get('cache', 'cache_max_age')
+ if (int(time.time()) - int(existing.st_mtime)) <= int(maxage):
+ return True
+
+ return False
+
+
+def generate_inv_from_api(enterprise_entity, config):
+ try:
+ inventory['all'] = {}
+ inventory['all']['children'] = []
+ inventory['all']['hosts'] = []
+ inventory['_meta'] = {}
+ inventory['_meta']['hostvars'] = {}
+
+ enterprise = api_get(enterprise_entity, config)
+ vms_entity = next(link for link in enterprise['links'] if link['rel'] == 'virtualmachines')
+ vms = api_get(vms_entity, config)
+ for vmcollection in vms['collection']:
+ for link in vmcollection['links']:
+ if link['rel'] == 'virtualappliance':
+ vm_vapp = link['title'].replace('[', '').replace(']', '').replace(' ', '_')
+ elif link['rel'] == 'virtualdatacenter':
+ vm_vdc = link['title'].replace('[', '').replace(']', '').replace(' ', '_')
+ elif link['rel'] == 'virtualmachinetemplate':
+ vm_template = link['title'].replace('[', '').replace(']', '').replace(' ', '_')
+
+ # From abiquo.ini: Only adding to inventory VMs with public IP
+ if config.getboolean('defaults', 'public_ip_only') is True:
+ for link in vmcollection['links']:
+ if link['type'] == 'application/vnd.abiquo.publicip+json' and link['rel'] == 'ip':
+ vm_nic = link['title']
+ break
+ else:
+ vm_nic = None
+ # Otherwise, assigning defined network interface IP address
+ else:
+ for link in vmcollection['links']:
+ if link['rel'] == config.get('defaults', 'default_net_interface'):
+ vm_nic = link['title']
+ break
+ else:
+ vm_nic = None
+
+ vm_state = True
+ # From abiquo.ini: Only adding to inventory VMs deployed
+ if config.getboolean('defaults', 'deployed_only') is True and vmcollection['state'] == 'NOT_ALLOCATED':
+ vm_state = False
+
+ if vm_nic is not None and vm_state:
+ if vm_vapp not in inventory:
+ inventory[vm_vapp] = {}
+ inventory[vm_vapp]['children'] = []
+ inventory[vm_vapp]['hosts'] = []
+ if vm_vdc not in inventory:
+ inventory[vm_vdc] = {}
+ inventory[vm_vdc]['hosts'] = []
+ inventory[vm_vdc]['children'] = []
+ if vm_template not in inventory:
+ inventory[vm_template] = {}
+ inventory[vm_template]['children'] = []
+ inventory[vm_template]['hosts'] = []
+ if config.getboolean('defaults', 'get_metadata') is True:
+ meta_entity = next(link for link in vmcollection['links'] if link['rel'] == 'metadata')
+ try:
+ metadata = api_get(meta_entity, config)
+ if (config.getfloat("api", "version") >= 3.0):
+ vm_metadata = metadata['metadata']
+ else:
+ vm_metadata = metadata['metadata']['metadata']
+ inventory['_meta']['hostvars'][vm_nic] = vm_metadata
+ except Exception as e:
+ pass
+
+ inventory[vm_vapp]['children'].append(vmcollection['name'])
+ inventory[vm_vdc]['children'].append(vmcollection['name'])
+ inventory[vm_template]['children'].append(vmcollection['name'])
+ inventory['all']['children'].append(vmcollection['name'])
+ inventory[vmcollection['name']] = []
+ inventory[vmcollection['name']].append(vm_nic)
+
+ return inventory
+ except Exception as e:
+ # Return empty hosts output
+ return {'all': {'hosts': []}, '_meta': {'hostvars': {}}}
+
+
+def get_inventory(enterprise, config):
+ ''' Reads the inventory from cache or Abiquo api '''
+
+ if cache_available(config):
+ inv = get_cache('inventory', config)
+ else:
+ default_group = os.path.basename(sys.argv[0]).rstrip('.py')
+ # MAKE ABIQUO API CALLS #
+ inv = generate_inv_from_api(enterprise, config)
+
+ save_cache(inv, config)
+ return json.dumps(inv)
+
+
+if __name__ == '__main__':
+ inventory = {}
+ enterprise = {}
+
+ # Read config
+ config = ConfigParser.SafeConfigParser()
+ for configfilename in [os.path.abspath(sys.argv[0]).rstrip('.py') + '.ini', 'abiquo.ini']:
+ if os.path.exists(configfilename):
+ config.read(configfilename)
+ break
+
+ try:
+ login = api_get(None, config)
+ enterprise = next(link for link in login['links'] if link['rel'] == 'enterprise')
+ except Exception as e:
+ enterprise = None
+
+ if cache_available(config):
+ inventory = get_cache('inventory', config)
+ else:
+ inventory = get_inventory(enterprise, config)
+
+ # return to ansible
+ sys.stdout.write(str(inventory))
+ sys.stdout.flush()
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/apache-libcloud.py b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/apache-libcloud.py
new file mode 100644
index 00000000..b0575235
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/apache-libcloud.py
@@ -0,0 +1,336 @@
+#!/usr/bin/env python
+
+# (c) 2013, Sebastien Goasguen <runseb@gmail.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+######################################################################
+
+'''
+Apache Libcloud generic external inventory script
+=================================
+
+Generates inventory that Ansible can understand by making API request to
+Cloud providers using the Apache libcloud library.
+
+This script also assumes there is a libcloud.ini file alongside it
+
+'''
+
+import sys
+import os
+import argparse
+import re
+from time import time
+
+from ansible.module_utils.six import iteritems, string_types
+from ansible.module_utils.six.moves import configparser as ConfigParser
+from libcloud.compute.types import Provider
+from libcloud.compute.providers import get_driver
+import libcloud.security as sec
+
+import json
+
+
+class LibcloudInventory(object):
+ def __init__(self):
+ ''' Main execution path '''
+
+ # Inventory grouped by instance IDs, tags, security groups, regions,
+ # and availability zones
+ self.inventory = {}
+
+ # Index of hostname (address) to instance ID
+ self.index = {}
+
+ # Read settings and parse CLI arguments
+ self.read_settings()
+ self.parse_cli_args()
+
+ # Cache
+ if self.args.refresh_cache:
+ self.do_api_calls_update_cache()
+ elif not self.is_cache_valid():
+ self.do_api_calls_update_cache()
+
+ # Data to print
+ if self.args.host:
+ data_to_print = self.get_host_info()
+
+ elif self.args.list:
+ # Display list of instances for inventory
+ if len(self.inventory) == 0:
+ data_to_print = self.get_inventory_from_cache()
+ else:
+ data_to_print = self.json_format_dict(self.inventory, True)
+
+ print(data_to_print)
+
+ def is_cache_valid(self):
+ ''' Determines if the cache files have expired, or if it is still valid '''
+
+ if os.path.isfile(self.cache_path_cache):
+ mod_time = os.path.getmtime(self.cache_path_cache)
+ current_time = time()
+ if (mod_time + self.cache_max_age) > current_time:
+ if os.path.isfile(self.cache_path_index):
+ return True
+
+ return False
+
+ def read_settings(self):
+ ''' Reads the settings from the libcloud.ini file '''
+
+ config = ConfigParser.SafeConfigParser()
+ libcloud_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'libcloud.ini')
+ libcloud_ini_path = os.environ.get('LIBCLOUD_INI_PATH', libcloud_default_ini_path)
+ config.read(libcloud_ini_path)
+
+ if not config.has_section('driver'):
+ raise ValueError('libcloud.ini file must contain a [driver] section')
+
+ if config.has_option('driver', 'provider'):
+ self.provider = config.get('driver', 'provider')
+ else:
+ raise ValueError('libcloud.ini does not have a provider defined')
+
+ if config.has_option('driver', 'key'):
+ self.key = config.get('driver', 'key')
+ else:
+ raise ValueError('libcloud.ini does not have a key defined')
+
+ if config.has_option('driver', 'secret'):
+ self.secret = config.get('driver', 'secret')
+ else:
+ raise ValueError('libcloud.ini does not have a secret defined')
+
+ if config.has_option('driver', 'host'):
+ self.host = config.get('driver', 'host')
+ if config.has_option('driver', 'secure'):
+ self.secure = config.get('driver', 'secure')
+ if config.has_option('driver', 'verify_ssl_cert'):
+ self.verify_ssl_cert = config.get('driver', 'verify_ssl_cert')
+ if config.has_option('driver', 'port'):
+ self.port = config.get('driver', 'port')
+ if config.has_option('driver', 'path'):
+ self.path = config.get('driver', 'path')
+ if config.has_option('driver', 'api_version'):
+ self.api_version = config.get('driver', 'api_version')
+
+ Driver = get_driver(getattr(Provider, self.provider))
+
+ self.conn = Driver(key=self.key, secret=self.secret, secure=self.secure,
+ host=self.host, path=self.path)
+
+ # Cache related
+ cache_path = config.get('cache', 'cache_path')
+ self.cache_path_cache = cache_path + "/ansible-libcloud.cache"
+ self.cache_path_index = cache_path + "/ansible-libcloud.index"
+ self.cache_max_age = config.getint('cache', 'cache_max_age')
+
+ def parse_cli_args(self):
+ '''
+ Command line argument processing
+ '''
+
+ parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on libcloud supported providers')
+ parser.add_argument('--list', action='store_true', default=True,
+ help='List instances (default: True)')
+ parser.add_argument('--host', action='store',
+ help='Get all the variables about a specific instance')
+ parser.add_argument('--refresh-cache', action='store_true', default=False,
+ help='Force refresh of cache by making API requests to libcloud supported providers (default: False - use cache files)')
+ self.args = parser.parse_args()
+
+ def do_api_calls_update_cache(self):
+ '''
+ Do API calls to a location, and save data in cache files
+ '''
+
+ self.get_nodes()
+
+ self.write_to_cache(self.inventory, self.cache_path_cache)
+ self.write_to_cache(self.index, self.cache_path_index)
+
+ def get_nodes(self):
+ '''
+ Gets the list of all nodes
+ '''
+
+ for node in self.conn.list_nodes():
+ self.add_node(node)
+
+ def get_node(self, node_id):
+ '''
+ Gets details about a specific node
+ '''
+
+ return [node for node in self.conn.list_nodes() if node.id == node_id][0]
+
+ def add_node(self, node):
+ '''
+ Adds a node to the inventory and index, as long as it is
+ addressable
+ '''
+
+ # Only want running instances
+ if node.state != 0:
+ return
+
+ # Select the best destination address
+ if not node.public_ips == []:
+ dest = node.public_ips[0]
+ if not dest:
+ # Skip instances we cannot address (e.g. private VPC subnet)
+ return
+
+ # Add to index
+ self.index[dest] = node.name
+
+ # Inventory: Group by instance ID (always a group of 1)
+ self.inventory[node.name] = [dest]
+ '''
+ # Inventory: Group by region
+ self.push(self.inventory, region, dest)
+
+ # Inventory: Group by availability zone
+ self.push(self.inventory, node.placement, dest)
+
+ # Inventory: Group by instance type
+ self.push(self.inventory, self.to_safe('type_' + node.instance_type), dest)
+ '''
+ # Inventory: Group by key pair
+ if node.extra['key_name']:
+ self.push(self.inventory, self.to_safe('key_' + node.extra['key_name']), dest)
+
+ # Inventory: Group by security group, quick thing to handle single sg
+ if node.extra['security_group']:
+ self.push(self.inventory, self.to_safe('sg_' + node.extra['security_group'][0]), dest)
+
+ # Inventory: Group by tag
+ if node.extra['tags']:
+ for tagkey in node.extra['tags'].keys():
+ self.push(self.inventory, self.to_safe('tag_' + tagkey + '_' + node.extra['tags'][tagkey]), dest)
+
+ def get_host_info(self):
+ '''
+ Get variables about a specific host
+ '''
+
+ if len(self.index) == 0:
+ # Need to load index from cache
+ self.load_index_from_cache()
+
+ if self.args.host not in self.index:
+ # try updating the cache
+ self.do_api_calls_update_cache()
+ if self.args.host not in self.index:
+ # host migh not exist anymore
+ return self.json_format_dict({}, True)
+
+ node_id = self.index[self.args.host]
+
+ node = self.get_node(node_id)
+ instance_vars = {}
+ for key, value in vars(node).items():
+ key = self.to_safe('ec2_' + key)
+
+ # Handle complex types
+ if isinstance(value, (int, bool)):
+ instance_vars[key] = value
+ elif isinstance(value, string_types):
+ instance_vars[key] = value.strip()
+ elif value is None:
+ instance_vars[key] = ''
+ elif key == 'ec2_region':
+ instance_vars[key] = value.name
+ elif key == 'ec2_tags':
+ for k, v in iteritems(value):
+ key = self.to_safe('ec2_tag_' + k)
+ instance_vars[key] = v
+ elif key == 'ec2_groups':
+ group_ids = []
+ group_names = []
+ for group in value:
+ group_ids.append(group.id)
+ group_names.append(group.name)
+ instance_vars["ec2_security_group_ids"] = ','.join(group_ids)
+ instance_vars["ec2_security_group_names"] = ','.join(group_names)
+ else:
+ pass
+ # TODO Product codes if someone finds them useful
+ # print(key)
+ # print(type(value))
+ # print(value)
+
+ return self.json_format_dict(instance_vars, True)
+
+ def push(self, my_dict, key, element):
+ '''
+ Pushed an element onto an array that may not have been defined in
+ the dict
+ '''
+
+ if key in my_dict:
+ my_dict[key].append(element)
+ else:
+ my_dict[key] = [element]
+
+ def get_inventory_from_cache(self):
+ '''
+ Reads the inventory from the cache file and returns it as a JSON
+ object
+ '''
+
+ cache = open(self.cache_path_cache, 'r')
+ json_inventory = cache.read()
+ return json_inventory
+
+ def load_index_from_cache(self):
+ '''
+ Reads the index from the cache file sets self.index
+ '''
+
+ cache = open(self.cache_path_index, 'r')
+ json_index = cache.read()
+ self.index = json.loads(json_index)
+
+ def write_to_cache(self, data, filename):
+ '''
+ Writes data in JSON format to a file
+ '''
+
+ json_data = self.json_format_dict(data, True)
+ cache = open(filename, 'w')
+ cache.write(json_data)
+ cache.close()
+
+ def to_safe(self, word):
+ '''
+ Converts 'bad' characters in a string to underscores so they can be
+ used as Ansible groups
+ '''
+
+ return re.sub(r"[^A-Za-z0-9\-]", "_", word)
+
+ def json_format_dict(self, data, pretty=False):
+ '''
+ Converts a dict to a JSON object and dumps it as a formatted
+ string
+ '''
+
+ if pretty:
+ return json.dumps(data, sort_keys=True, indent=2)
+ else:
+ return json.dumps(data)
+
+
+def main():
+ LibcloudInventory()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/apstra_aos.ini b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/apstra_aos.ini
new file mode 100644
index 00000000..1ec1255c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/apstra_aos.ini
@@ -0,0 +1,20 @@
+# Ansible Apstra AOS external inventory script settings
+# Dynamic Inventory script parameter can be provided using this file
+# Or by using Environment Variables:
+# - AOS_SERVER, AOS_PORT, AOS_USERNAME, AOS_PASSWORD, AOS_BLUEPRINT
+#
+# This file takes precedence over the Environment Variables
+#
+
+[aos]
+
+# aos_server = 172.20.62.3
+# port = 8888
+# username = admin
+# password = admin
+
+## Blueprint Mode
+# to use the inventory in mode Blueprint, you need to define the blueprint name you want to use
+
+# blueprint = my-blueprint-l2
+# blueprint_interface = true
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/apstra_aos.py b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/apstra_aos.py
new file mode 100644
index 00000000..ce2eb3de
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/apstra_aos.py
@@ -0,0 +1,580 @@
+#!/usr/bin/env python
+#
+# (c) 2017 Apstra Inc, <community@apstra.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+"""
+Apstra AOS external inventory script
+====================================
+
+Ansible has a feature where instead of reading from /etc/ansible/hosts
+as a text file, it can query external programs to obtain the list
+of hosts, groups the hosts are in, and even variables to assign to each host.
+
+To use this:
+ - copy this file over /etc/ansible/hosts and chmod +x the file.
+ - Copy both files (.py and .ini) in your preferred directory
+
+More information about Ansible Dynamic Inventory here
+http://unix.stackexchange.com/questions/205479/in-ansible-dynamic-inventory-json-can-i-render-hostvars-based-on-the-hostname
+
+2 modes are currently, supported: **device based** or **blueprint based**:
+ - For **Device based**, the list of device is taken from the global device list
+ the serial ID will be used as the inventory_hostname
+ - For **Blueprint based**, the list of device is taken from the given blueprint
+ the Node name will be used as the inventory_hostname
+
+Input parameters parameter can be provided using either with the ini file or by using Environment Variables:
+The following list of Environment Variables are supported: AOS_SERVER, AOS_PORT, AOS_USERNAME, AOS_PASSWORD, AOS_BLUEPRINT
+The config file takes precedence over the Environment Variables
+
+Tested with Apstra AOS 1.1
+
+This script has been inspired by the cobbler.py inventory. thanks
+
+Author: Damien Garros (@dgarros)
+Version: 0.2.0
+"""
+import json
+import os
+import re
+import sys
+
+try:
+ import argparse
+ HAS_ARGPARSE = True
+except ImportError:
+ HAS_ARGPARSE = False
+
+try:
+ from apstra.aosom.session import Session
+ HAS_AOS_PYEZ = True
+except ImportError:
+ HAS_AOS_PYEZ = False
+
+from ansible.module_utils.six.moves import configparser
+
+
+"""
+##
+Expected output format in Device mode
+{
+ "Cumulus": {
+ "hosts": [
+ "52540073956E",
+ "52540022211A"
+ ],
+ "vars": {}
+ },
+ "EOS": {
+ "hosts": [
+ "5254001CAFD8",
+ "525400DDDF72"
+ ],
+ "vars": {}
+ },
+ "Generic Model": {
+ "hosts": [
+ "525400E5486D"
+ ],
+ "vars": {}
+ },
+ "Ubuntu GNU/Linux": {
+ "hosts": [
+ "525400E5486D"
+ ],
+ "vars": {}
+ },
+ "VX": {
+ "hosts": [
+ "52540073956E",
+ "52540022211A"
+ ],
+ "vars": {}
+ },
+ "_meta": {
+ "hostvars": {
+ "5254001CAFD8": {
+ "agent_start_time": "2017-02-03T00:49:16.000000Z",
+ "ansible_ssh_host": "172.20.52.6",
+ "aos_hcl_model": "Arista_vEOS",
+ "aos_server": "",
+ "aos_version": "AOS_1.1.1_OB.5",
+ "comm_state": "on",
+ "device_start_time": "2017-02-03T00:47:58.454480Z",
+ "domain_name": "",
+ "error_message": "",
+ "fqdn": "localhost",
+ "hostname": "localhost",
+ "hw_model": "vEOS",
+ "hw_version": "",
+ "is_acknowledged": false,
+ "mgmt_ifname": "Management1",
+ "mgmt_ipaddr": "172.20.52.6",
+ "mgmt_macaddr": "52:54:00:1C:AF:D8",
+ "os_arch": "x86_64",
+ "os_family": "EOS",
+ "os_version": "4.16.6M",
+ "os_version_info": {
+ "build": "6M",
+ "major": "4",
+ "minor": "16"
+ },
+ "serial_number": "5254001CAFD8",
+ "state": "OOS-QUARANTINED",
+ "vendor": "Arista"
+ },
+ "52540022211A": {
+ "agent_start_time": "2017-02-03T00:45:22.000000Z",
+ "ansible_ssh_host": "172.20.52.7",
+ "aos_hcl_model": "Cumulus_VX",
+ "aos_server": "172.20.52.3",
+ "aos_version": "AOS_1.1.1_OB.5",
+ "comm_state": "on",
+ "device_start_time": "2017-02-03T00:45:11.019189Z",
+ "domain_name": "",
+ "error_message": "",
+ "fqdn": "cumulus",
+ "hostname": "cumulus",
+ "hw_model": "VX",
+ "hw_version": "",
+ "is_acknowledged": false,
+ "mgmt_ifname": "eth0",
+ "mgmt_ipaddr": "172.20.52.7",
+ "mgmt_macaddr": "52:54:00:22:21:1a",
+ "os_arch": "x86_64",
+ "os_family": "Cumulus",
+ "os_version": "3.1.1",
+ "os_version_info": {
+ "build": "1",
+ "major": "3",
+ "minor": "1"
+ },
+ "serial_number": "52540022211A",
+ "state": "OOS-QUARANTINED",
+ "vendor": "Cumulus"
+ },
+ "52540073956E": {
+ "agent_start_time": "2017-02-03T00:45:19.000000Z",
+ "ansible_ssh_host": "172.20.52.8",
+ "aos_hcl_model": "Cumulus_VX",
+ "aos_server": "172.20.52.3",
+ "aos_version": "AOS_1.1.1_OB.5",
+ "comm_state": "on",
+ "device_start_time": "2017-02-03T00:45:11.030113Z",
+ "domain_name": "",
+ "error_message": "",
+ "fqdn": "cumulus",
+ "hostname": "cumulus",
+ "hw_model": "VX",
+ "hw_version": "",
+ "is_acknowledged": false,
+ "mgmt_ifname": "eth0",
+ "mgmt_ipaddr": "172.20.52.8",
+ "mgmt_macaddr": "52:54:00:73:95:6e",
+ "os_arch": "x86_64",
+ "os_family": "Cumulus",
+ "os_version": "3.1.1",
+ "os_version_info": {
+ "build": "1",
+ "major": "3",
+ "minor": "1"
+ },
+ "serial_number": "52540073956E",
+ "state": "OOS-QUARANTINED",
+ "vendor": "Cumulus"
+ },
+ "525400DDDF72": {
+ "agent_start_time": "2017-02-03T00:49:07.000000Z",
+ "ansible_ssh_host": "172.20.52.5",
+ "aos_hcl_model": "Arista_vEOS",
+ "aos_server": "",
+ "aos_version": "AOS_1.1.1_OB.5",
+ "comm_state": "on",
+ "device_start_time": "2017-02-03T00:47:46.929921Z",
+ "domain_name": "",
+ "error_message": "",
+ "fqdn": "localhost",
+ "hostname": "localhost",
+ "hw_model": "vEOS",
+ "hw_version": "",
+ "is_acknowledged": false,
+ "mgmt_ifname": "Management1",
+ "mgmt_ipaddr": "172.20.52.5",
+ "mgmt_macaddr": "52:54:00:DD:DF:72",
+ "os_arch": "x86_64",
+ "os_family": "EOS",
+ "os_version": "4.16.6M",
+ "os_version_info": {
+ "build": "6M",
+ "major": "4",
+ "minor": "16"
+ },
+ "serial_number": "525400DDDF72",
+ "state": "OOS-QUARANTINED",
+ "vendor": "Arista"
+ },
+ "525400E5486D": {
+ "agent_start_time": "2017-02-02T18:44:42.000000Z",
+ "ansible_ssh_host": "172.20.52.4",
+ "aos_hcl_model": "Generic_Server_1RU_1x10G",
+ "aos_server": "172.20.52.3",
+ "aos_version": "AOS_1.1.1_OB.5",
+ "comm_state": "on",
+ "device_start_time": "2017-02-02T21:11:25.188734Z",
+ "domain_name": "",
+ "error_message": "",
+ "fqdn": "localhost",
+ "hostname": "localhost",
+ "hw_model": "Generic Model",
+ "hw_version": "pc-i440fx-trusty",
+ "is_acknowledged": false,
+ "mgmt_ifname": "eth0",
+ "mgmt_ipaddr": "172.20.52.4",
+ "mgmt_macaddr": "52:54:00:e5:48:6d",
+ "os_arch": "x86_64",
+ "os_family": "Ubuntu GNU/Linux",
+ "os_version": "14.04 LTS",
+ "os_version_info": {
+ "build": "",
+ "major": "14",
+ "minor": "04"
+ },
+ "serial_number": "525400E5486D",
+ "state": "OOS-QUARANTINED",
+ "vendor": "Generic Manufacturer"
+ }
+ }
+ },
+ "all": {
+ "hosts": [
+ "5254001CAFD8",
+ "52540073956E",
+ "525400DDDF72",
+ "525400E5486D",
+ "52540022211A"
+ ],
+ "vars": {}
+ },
+ "vEOS": {
+ "hosts": [
+ "5254001CAFD8",
+ "525400DDDF72"
+ ],
+ "vars": {}
+ }
+}
+"""
+
+
+def fail(msg):
+ sys.stderr.write("%s\n" % msg)
+ sys.exit(1)
+
+
+class AosInventory(object):
+
+ def __init__(self):
+
+ """ Main execution path """
+
+ if not HAS_AOS_PYEZ:
+ raise Exception('aos-pyez is not installed. Please see details here: https://github.com/Apstra/aos-pyez')
+ if not HAS_ARGPARSE:
+ raise Exception('argparse is not installed. Please install the argparse library or upgrade to python-2.7')
+
+ # Initialize inventory
+ self.inventory = dict() # A list of groups and the hosts in that group
+ self.inventory['_meta'] = dict()
+ self.inventory['_meta']['hostvars'] = dict()
+
+ # Read settings and parse CLI arguments
+ self.read_settings()
+ self.parse_cli_args()
+
+ # ----------------------------------------------------
+ # Open session to AOS
+ # ----------------------------------------------------
+ aos = Session(server=self.aos_server,
+ port=self.aos_server_port,
+ user=self.aos_username,
+ passwd=self.aos_password)
+
+ aos.login()
+
+ # Save session information in variables of group all
+ self.add_var_to_group('all', 'aos_session', aos.session)
+
+ # Add the AOS server itself in the inventory
+ self.add_host_to_group("all", 'aos')
+ self.add_var_to_host("aos", "ansible_ssh_host", self.aos_server)
+ self.add_var_to_host("aos", "ansible_ssh_pass", self.aos_password)
+ self.add_var_to_host("aos", "ansible_ssh_user", self.aos_username)
+
+ # ----------------------------------------------------
+ # Build the inventory
+ # 2 modes are supported: device based or blueprint based
+ # - For device based, the list of device is taken from the global device list
+ # the serial ID will be used as the inventory_hostname
+ # - For Blueprint based, the list of device is taken from the given blueprint
+ # the Node name will be used as the inventory_hostname
+ # ----------------------------------------------------
+ if self.aos_blueprint:
+
+ bp = aos.Blueprints[self.aos_blueprint]
+ if bp.exists is False:
+ fail("Unable to find the Blueprint: %s" % self.aos_blueprint)
+
+ for dev_name, dev_id in bp.params['devices'].value.items():
+
+ self.add_host_to_group('all', dev_name)
+ device = aos.Devices.find(uid=dev_id)
+
+ if 'facts' in device.value.keys():
+ self.add_device_facts_to_var(dev_name, device)
+
+ # Define admin State and Status
+ if 'user_config' in device.value.keys():
+ if 'admin_state' in device.value['user_config'].keys():
+ self.add_var_to_host(dev_name, 'admin_state', device.value['user_config']['admin_state'])
+
+ self.add_device_status_to_var(dev_name, device)
+
+ # Go over the contents data structure
+ for node in bp.contents['system']['nodes']:
+ if node['display_name'] == dev_name:
+ self.add_host_to_group(node['role'], dev_name)
+
+ # Check for additional attribute to import
+ attributes_to_import = [
+ 'loopback_ip',
+ 'asn',
+ 'role',
+ 'position',
+ ]
+ for attr in attributes_to_import:
+ if attr in node.keys():
+ self.add_var_to_host(dev_name, attr, node[attr])
+
+ # if blueprint_interface is enabled in the configuration
+ # Collect links information
+ if self.aos_blueprint_int:
+ interfaces = dict()
+
+ for link in bp.contents['system']['links']:
+ # each link has 2 sides [0,1], and it's unknown which one match this device
+ # at first we assume, first side match(0) and peer is (1)
+ peer_id = 1
+
+ for side in link['endpoints']:
+ if side['display_name'] == dev_name:
+
+ # import local information first
+ int_name = side['interface']
+
+ # init dict
+ interfaces[int_name] = dict()
+ if 'ip' in side.keys():
+ interfaces[int_name]['ip'] = side['ip']
+
+ if 'interface' in side.keys():
+ interfaces[int_name]['name'] = side['interface']
+
+ if 'display_name' in link['endpoints'][peer_id].keys():
+ interfaces[int_name]['peer'] = link['endpoints'][peer_id]['display_name']
+
+ if 'ip' in link['endpoints'][peer_id].keys():
+ interfaces[int_name]['peer_ip'] = link['endpoints'][peer_id]['ip']
+
+ if 'type' in link['endpoints'][peer_id].keys():
+ interfaces[int_name]['peer_type'] = link['endpoints'][peer_id]['type']
+
+ else:
+ # if we haven't match the first time, prepare the peer_id
+ # for the second loop iteration
+ peer_id = 0
+
+ self.add_var_to_host(dev_name, 'interfaces', interfaces)
+
+ else:
+ for device in aos.Devices:
+ # If not reacheable, create by key and
+ # If reacheable, create by hostname
+
+ self.add_host_to_group('all', device.name)
+
+ # populate information for this host
+ self.add_device_status_to_var(device.name, device)
+
+ if 'user_config' in device.value.keys():
+ for key, value in device.value['user_config'].items():
+ self.add_var_to_host(device.name, key, value)
+
+ # Based on device status online|offline, collect facts as well
+ if device.value['status']['comm_state'] == 'on':
+
+ if 'facts' in device.value.keys():
+ self.add_device_facts_to_var(device.name, device)
+
+ # Check if device is associated with a blueprint
+ # if it's create a new group
+ if 'blueprint_active' in device.value['status'].keys():
+ if 'blueprint_id' in device.value['status'].keys():
+ bp = aos.Blueprints.find(uid=device.value['status']['blueprint_id'])
+
+ if bp:
+ self.add_host_to_group(bp.name, device.name)
+
+ # ----------------------------------------------------
+ # Convert the inventory and return a JSON String
+ # ----------------------------------------------------
+ data_to_print = ""
+ data_to_print += self.json_format_dict(self.inventory, True)
+
+ print(data_to_print)
+
+ def read_settings(self):
+ """ Reads the settings from the apstra_aos.ini file """
+
+ config = configparser.ConfigParser()
+ config.read(os.path.dirname(os.path.realpath(__file__)) + '/apstra_aos.ini')
+
+ # Default Values
+ self.aos_blueprint = False
+ self.aos_blueprint_int = True
+ self.aos_username = 'admin'
+ self.aos_password = 'admin'
+ self.aos_server_port = 8888
+
+ # Try to reach all parameters from File, if not available try from ENV
+ try:
+ self.aos_server = config.get('aos', 'aos_server')
+ except Exception:
+ if 'AOS_SERVER' in os.environ.keys():
+ self.aos_server = os.environ['AOS_SERVER']
+
+ try:
+ self.aos_server_port = config.get('aos', 'port')
+ except Exception:
+ if 'AOS_PORT' in os.environ.keys():
+ self.aos_server_port = os.environ['AOS_PORT']
+
+ try:
+ self.aos_username = config.get('aos', 'username')
+ except Exception:
+ if 'AOS_USERNAME' in os.environ.keys():
+ self.aos_username = os.environ['AOS_USERNAME']
+
+ try:
+ self.aos_password = config.get('aos', 'password')
+ except Exception:
+ if 'AOS_PASSWORD' in os.environ.keys():
+ self.aos_password = os.environ['AOS_PASSWORD']
+
+ try:
+ self.aos_blueprint = config.get('aos', 'blueprint')
+ except Exception:
+ if 'AOS_BLUEPRINT' in os.environ.keys():
+ self.aos_blueprint = os.environ['AOS_BLUEPRINT']
+
+ try:
+ if config.get('aos', 'blueprint_interface') in ['false', 'no']:
+ self.aos_blueprint_int = False
+ except Exception:
+ pass
+
+ def parse_cli_args(self):
+ """ Command line argument processing """
+
+ parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Apstra AOS')
+ parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)')
+ parser.add_argument('--host', action='store', help='Get all the variables about a specific instance')
+ self.args = parser.parse_args()
+
+ def json_format_dict(self, data, pretty=False):
+ """ Converts a dict to a JSON object and dumps it as a formatted string """
+
+ if pretty:
+ return json.dumps(data, sort_keys=True, indent=2)
+ else:
+ return json.dumps(data)
+
+ def add_host_to_group(self, group, host):
+
+ # Cleanup group name first
+ clean_group = self.cleanup_group_name(group)
+
+ # Check if the group exist, if not initialize it
+ if clean_group not in self.inventory.keys():
+ self.inventory[clean_group] = {}
+ self.inventory[clean_group]['hosts'] = []
+ self.inventory[clean_group]['vars'] = {}
+
+ self.inventory[clean_group]['hosts'].append(host)
+
+ def add_var_to_host(self, host, var, value):
+
+ # Check if the host exist, if not initialize it
+ if host not in self.inventory['_meta']['hostvars'].keys():
+ self.inventory['_meta']['hostvars'][host] = {}
+
+ self.inventory['_meta']['hostvars'][host][var] = value
+
+ def add_var_to_group(self, group, var, value):
+
+ # Cleanup group name first
+ clean_group = self.cleanup_group_name(group)
+
+ # Check if the group exist, if not initialize it
+ if clean_group not in self.inventory.keys():
+ self.inventory[clean_group] = {}
+ self.inventory[clean_group]['hosts'] = []
+ self.inventory[clean_group]['vars'] = {}
+
+ self.inventory[clean_group]['vars'][var] = value
+
+ def add_device_facts_to_var(self, device_name, device):
+
+ # Populate variables for this host
+ self.add_var_to_host(device_name,
+ 'ansible_ssh_host',
+ device.value['facts']['mgmt_ipaddr'])
+
+ self.add_var_to_host(device_name, 'id', device.id)
+
+ # self.add_host_to_group('all', device.name)
+ for key, value in device.value['facts'].items():
+ self.add_var_to_host(device_name, key, value)
+
+ if key == 'os_family':
+ self.add_host_to_group(value, device_name)
+ elif key == 'hw_model':
+ self.add_host_to_group(value, device_name)
+
+ def cleanup_group_name(self, group_name):
+ """
+ Clean up group name by :
+ - Replacing all non-alphanumeric caracter by underscore
+ - Converting to lowercase
+ """
+
+ rx = re.compile(r'\W+')
+ clean_group = rx.sub('_', group_name).lower()
+
+ return clean_group
+
+ def add_device_status_to_var(self, device_name, device):
+
+ if 'status' in device.value.keys():
+ for key, value in device.value['status'].items():
+ self.add_var_to_host(device.name, key, value)
+
+
+# Run the script
+if __name__ == '__main__':
+ AosInventory()
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/azure_rm.ini b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/azure_rm.ini
new file mode 100644
index 00000000..6edd9b98
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/azure_rm.ini
@@ -0,0 +1,23 @@
+#
+# Configuration file for azure_rm.py
+#
+[azure]
+# Control which resource groups are included. By default all resources groups are included.
+# Set resource_groups to a comma separated list of resource groups names.
+#resource_groups=
+
+# Control which tags are included. Set tags to a comma separated list of keys or key:value pairs
+#tags=
+
+# Control which locations are included. Set locations to a comma separated list (e.g. eastus,eastus2,westus)
+#locations=
+
+# Include powerstate. If you don't need powerstate information, turning it off improves runtime performance.
+include_powerstate=yes
+
+# Control grouping with the following boolean flags. Valid values: yes, no, true, false, True, False, 0, 1.
+group_by_resource_group=yes
+group_by_location=yes
+group_by_security_group=yes
+group_by_os_family=yes
+group_by_tag=yes
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/azure_rm.py b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/azure_rm.py
new file mode 100644
index 00000000..ef9e7b1d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/azure_rm.py
@@ -0,0 +1,962 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
+# Chris Houseknecht, <house@redhat.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+'''
+Important note (2018/10)
+========================
+This inventory script is in maintenance mode: only critical bug fixes but no new features.
+There's new Azure external inventory script at https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/inventory/azure_rm.py,
+with better performance and latest new features. Please go to the link to get latest Azure inventory.
+
+Azure External Inventory Script
+===============================
+Generates dynamic inventory by making API requests to the Azure Resource
+Manager using the Azure Python SDK. For instruction on installing the
+Azure Python SDK see https://azure-sdk-for-python.readthedocs.io/
+
+Authentication
+--------------
+The order of precedence is command line arguments, environment variables,
+and finally the [default] profile found in ~/.azure/credentials.
+
+If using a credentials file, it should be an ini formatted file with one or
+more sections, which we refer to as profiles. The script looks for a
+[default] section, if a profile is not specified either on the command line
+or with an environment variable. The keys in a profile will match the
+list of command line arguments below.
+
+For command line arguments and environment variables specify a profile found
+in your ~/.azure/credentials file, or a service principal or Active Directory
+user.
+
+Command line arguments:
+ - profile
+ - client_id
+ - secret
+ - subscription_id
+ - tenant
+ - ad_user
+ - password
+ - cloud_environment
+ - adfs_authority_url
+
+Environment variables:
+ - AZURE_PROFILE
+ - AZURE_CLIENT_ID
+ - AZURE_SECRET
+ - AZURE_SUBSCRIPTION_ID
+ - AZURE_TENANT
+ - AZURE_AD_USER
+ - AZURE_PASSWORD
+ - AZURE_CLOUD_ENVIRONMENT
+ - AZURE_ADFS_AUTHORITY_URL
+
+Run for Specific Host
+-----------------------
+When run for a specific host using the --host option, a resource group is
+required. For a specific host, this script returns the following variables:
+
+{
+ "ansible_host": "XXX.XXX.XXX.XXX",
+ "computer_name": "computer_name2",
+ "fqdn": null,
+ "id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Compute/virtualMachines/object-name",
+ "image": {
+ "offer": "CentOS",
+ "publisher": "OpenLogic",
+ "sku": "7.1",
+ "version": "latest"
+ },
+ "location": "westus",
+ "mac_address": "00-00-5E-00-53-FE",
+ "name": "object-name",
+ "network_interface": "interface-name",
+ "network_interface_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/networkInterfaces/object-name1",
+ "network_security_group": null,
+ "network_security_group_id": null,
+ "os_disk": {
+ "name": "object-name",
+ "operating_system_type": "Linux"
+ },
+ "plan": null,
+ "powerstate": "running",
+ "private_ip": "172.26.3.6",
+ "private_ip_alloc_method": "Static",
+ "provisioning_state": "Succeeded",
+ "public_ip": "XXX.XXX.XXX.XXX",
+ "public_ip_alloc_method": "Static",
+ "public_ip_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/publicIPAddresses/object-name",
+ "public_ip_name": "object-name",
+ "resource_group": "galaxy-production",
+ "security_group": "object-name",
+ "security_group_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/networkSecurityGroups/object-name",
+ "tags": {
+ "db": "database"
+ },
+ "type": "Microsoft.Compute/virtualMachines",
+ "virtual_machine_size": "Standard_DS4"
+}
+
+Groups
+------
+When run in --list mode, instances are grouped by the following categories:
+ - azure
+ - location
+ - resource_group
+ - security_group
+ - tag key
+ - tag key_value
+
+Control groups using azure_rm.ini or set environment variables:
+
+AZURE_GROUP_BY_RESOURCE_GROUP=yes
+AZURE_GROUP_BY_LOCATION=yes
+AZURE_GROUP_BY_SECURITY_GROUP=yes
+AZURE_GROUP_BY_TAG=yes
+
+Select hosts within specific resource groups by assigning a comma separated list to:
+
+AZURE_RESOURCE_GROUPS=resource_group_a,resource_group_b
+
+Select hosts for specific tag key by assigning a comma separated list of tag keys to:
+
+AZURE_TAGS=key1,key2,key3
+
+Select hosts for specific locations:
+
+AZURE_LOCATIONS=eastus,westus,eastus2
+
+Or, select hosts for specific tag key:value pairs by assigning a comma separated list key:value pairs to:
+
+AZURE_TAGS=key1:value1,key2:value2
+
+If you don't need the powerstate, you can improve performance by turning off powerstate fetching:
+AZURE_INCLUDE_POWERSTATE=no
+
+azure_rm.ini
+------------
+As mentioned above, you can control execution using environment variables or a .ini file. A sample
+azure_rm.ini is included. The name of the .ini file is the basename of the inventory script (in this case
+'azure_rm') with a .ini extension. It also assumes the .ini file is alongside the script. To specify
+a different path for the .ini file, define the AZURE_INI_PATH environment variable:
+
+ export AZURE_INI_PATH=/path/to/custom.ini
+
+Powerstate:
+-----------
+The powerstate attribute indicates whether or not a host is running. If the value is 'running', the machine is
+up. If the value is anything other than 'running', the machine is down, and will be unreachable.
+
+Examples:
+---------
+ Execute /bin/uname on all instances in the galaxy-qa resource group
+ $ ansible -i azure_rm.py galaxy-qa -m shell -a "/bin/uname -a"
+
+ Use the inventory script to print instance specific information
+ $ contrib/inventory/azure_rm.py --host my_instance_host_name --pretty
+
+ Use with a playbook
+ $ ansible-playbook -i contrib/inventory/azure_rm.py my_playbook.yml --limit galaxy-qa
+
+
+Insecure Platform Warning
+-------------------------
+If you receive InsecurePlatformWarning from urllib3, install the
+requests security packages:
+
+ pip install requests[security]
+
+
+author:
+ - Chris Houseknecht (@chouseknecht)
+ - Matt Davis (@nitzmahone)
+
+Company: Ansible by Red Hat
+
+Version: 1.0.0
+'''
+
+import argparse
+import json
+import os
+import re
+import sys
+import inspect
+
+from os.path import expanduser
+from ansible.module_utils.six.moves import configparser as cp
+import ansible.module_utils.six.moves.urllib.parse as urlparse
+
+HAS_AZURE = True
+HAS_AZURE_EXC = None
+HAS_AZURE_CLI_CORE = True
+CLIError = None
+
+try:
+ from msrestazure.azure_active_directory import AADTokenCredentials
+ from msrestazure.azure_exceptions import CloudError
+ from msrestazure.azure_active_directory import MSIAuthentication
+ from msrestazure import azure_cloud
+ from azure.mgmt.compute import __version__ as azure_compute_version
+ from azure.common import AzureMissingResourceHttpError, AzureHttpError
+ from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials
+ from azure.mgmt.network import NetworkManagementClient
+ from azure.mgmt.resource.resources import ResourceManagementClient
+ from azure.mgmt.resource.subscriptions import SubscriptionClient
+ from azure.mgmt.compute import ComputeManagementClient
+ from adal.authentication_context import AuthenticationContext
+except ImportError as exc:
+ HAS_AZURE_EXC = exc
+ HAS_AZURE = False
+
+try:
+ from azure.cli.core.util import CLIError
+ from azure.common.credentials import get_azure_cli_credentials, get_cli_profile
+ from azure.common.cloud import get_cli_active_cloud
+except ImportError:
+ HAS_AZURE_CLI_CORE = False
+ CLIError = Exception
+
+try:
+ from ansible.release import __version__ as ansible_version
+except ImportError:
+ ansible_version = 'unknown'
+
+AZURE_CREDENTIAL_ENV_MAPPING = dict(
+ profile='AZURE_PROFILE',
+ subscription_id='AZURE_SUBSCRIPTION_ID',
+ client_id='AZURE_CLIENT_ID',
+ secret='AZURE_SECRET',
+ tenant='AZURE_TENANT',
+ ad_user='AZURE_AD_USER',
+ password='AZURE_PASSWORD',
+ cloud_environment='AZURE_CLOUD_ENVIRONMENT',
+ adfs_authority_url='AZURE_ADFS_AUTHORITY_URL'
+)
+
+AZURE_CONFIG_SETTINGS = dict(
+ resource_groups='AZURE_RESOURCE_GROUPS',
+ tags='AZURE_TAGS',
+ locations='AZURE_LOCATIONS',
+ include_powerstate='AZURE_INCLUDE_POWERSTATE',
+ group_by_resource_group='AZURE_GROUP_BY_RESOURCE_GROUP',
+ group_by_location='AZURE_GROUP_BY_LOCATION',
+ group_by_security_group='AZURE_GROUP_BY_SECURITY_GROUP',
+ group_by_tag='AZURE_GROUP_BY_TAG',
+ group_by_os_family='AZURE_GROUP_BY_OS_FAMILY',
+ use_private_ip='AZURE_USE_PRIVATE_IP'
+)
+
+AZURE_MIN_VERSION = "2.0.0"
+ANSIBLE_USER_AGENT = 'Ansible/{0}'.format(ansible_version)
+
+
+def azure_id_to_dict(id):
+ pieces = re.sub(r'^\/', '', id).split('/')
+ result = {}
+ index = 0
+ while index < len(pieces) - 1:
+ result[pieces[index]] = pieces[index + 1]
+ index += 1
+ return result
+
+
+class AzureRM(object):
+
+ def __init__(self, args):
+ self._args = args
+ self._cloud_environment = None
+ self._compute_client = None
+ self._resource_client = None
+ self._network_client = None
+ self._adfs_authority_url = None
+ self._resource = None
+
+ self.debug = False
+ if args.debug:
+ self.debug = True
+
+ self.credentials = self._get_credentials(args)
+ if not self.credentials:
+ self.fail("Failed to get credentials. Either pass as parameters, set environment variables, "
+ "or define a profile in ~/.azure/credentials.")
+
+ # if cloud_environment specified, look up/build Cloud object
+ raw_cloud_env = self.credentials.get('cloud_environment')
+ if not raw_cloud_env:
+ self._cloud_environment = azure_cloud.AZURE_PUBLIC_CLOUD # SDK default
+ else:
+ # try to look up "well-known" values via the name attribute on azure_cloud members
+ all_clouds = [x[1] for x in inspect.getmembers(azure_cloud) if isinstance(x[1], azure_cloud.Cloud)]
+ matched_clouds = [x for x in all_clouds if x.name == raw_cloud_env]
+ if len(matched_clouds) == 1:
+ self._cloud_environment = matched_clouds[0]
+ elif len(matched_clouds) > 1:
+ self.fail("Azure SDK failure: more than one cloud matched for cloud_environment name '{0}'".format(raw_cloud_env))
+ else:
+ if not urlparse.urlparse(raw_cloud_env).scheme:
+ self.fail("cloud_environment must be an endpoint discovery URL or one of {0}".format([x.name for x in all_clouds]))
+ try:
+ self._cloud_environment = azure_cloud.get_cloud_from_metadata_endpoint(raw_cloud_env)
+ except Exception as e:
+ self.fail("cloud_environment {0} could not be resolved: {1}".format(raw_cloud_env, e.message))
+
+ if self.credentials.get('subscription_id', None) is None:
+ self.fail("Credentials did not include a subscription_id value.")
+ self.log("setting subscription_id")
+ self.subscription_id = self.credentials['subscription_id']
+
+ # get authentication authority
+ # for adfs, user could pass in authority or not.
+ # for others, use default authority from cloud environment
+ if self.credentials.get('adfs_authority_url'):
+ self._adfs_authority_url = self.credentials.get('adfs_authority_url')
+ else:
+ self._adfs_authority_url = self._cloud_environment.endpoints.active_directory
+
+ # get resource from cloud environment
+ self._resource = self._cloud_environment.endpoints.active_directory_resource_id
+
+ if self.credentials.get('credentials'):
+ self.azure_credentials = self.credentials.get('credentials')
+ elif self.credentials.get('client_id') and self.credentials.get('secret') and self.credentials.get('tenant'):
+ self.azure_credentials = ServicePrincipalCredentials(client_id=self.credentials['client_id'],
+ secret=self.credentials['secret'],
+ tenant=self.credentials['tenant'],
+ cloud_environment=self._cloud_environment)
+
+ elif self.credentials.get('ad_user') is not None and \
+ self.credentials.get('password') is not None and \
+ self.credentials.get('client_id') is not None and \
+ self.credentials.get('tenant') is not None:
+
+ self.azure_credentials = self.acquire_token_with_username_password(
+ self._adfs_authority_url,
+ self._resource,
+ self.credentials['ad_user'],
+ self.credentials['password'],
+ self.credentials['client_id'],
+ self.credentials['tenant'])
+
+ elif self.credentials.get('ad_user') is not None and self.credentials.get('password') is not None:
+ tenant = self.credentials.get('tenant')
+ if not tenant:
+ tenant = 'common'
+ self.azure_credentials = UserPassCredentials(self.credentials['ad_user'],
+ self.credentials['password'],
+ tenant=tenant,
+ cloud_environment=self._cloud_environment)
+
+ else:
+ self.fail("Failed to authenticate with provided credentials. Some attributes were missing. "
+ "Credentials must include client_id, secret and tenant or ad_user and password, or "
+ "ad_user, password, client_id, tenant and adfs_authority_url(optional) for ADFS authentication, or "
+ "be logged in using AzureCLI.")
+
+ def log(self, msg):
+ if self.debug:
+ print(msg + u'\n')
+
+ def fail(self, msg):
+ raise Exception(msg)
+
+ def _get_profile(self, profile="default"):
+ path = expanduser("~")
+ path += "/.azure/credentials"
+ try:
+ config = cp.ConfigParser()
+ config.read(path)
+ except Exception as exc:
+ self.fail("Failed to access {0}. Check that the file exists and you have read "
+ "access. {1}".format(path, str(exc)))
+ credentials = dict()
+ for key in AZURE_CREDENTIAL_ENV_MAPPING:
+ try:
+ credentials[key] = config.get(profile, key, raw=True)
+ except Exception:
+ pass
+
+ if credentials.get('client_id') is not None or credentials.get('ad_user') is not None:
+ return credentials
+
+ return None
+
+ def _get_env_credentials(self):
+ env_credentials = dict()
+ for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
+ env_credentials[attribute] = os.environ.get(env_variable, None)
+
+ if env_credentials['profile'] is not None:
+ credentials = self._get_profile(env_credentials['profile'])
+ return credentials
+
+ if env_credentials['client_id'] is not None or env_credentials['ad_user'] is not None:
+ return env_credentials
+
+ return None
+
+ def _get_azure_cli_credentials(self):
+ credentials, subscription_id = get_azure_cli_credentials()
+ cloud_environment = get_cli_active_cloud()
+
+ cli_credentials = {
+ 'credentials': credentials,
+ 'subscription_id': subscription_id,
+ 'cloud_environment': cloud_environment
+ }
+ return cli_credentials
+
+ def _get_msi_credentials(self, subscription_id_param=None):
+ credentials = MSIAuthentication()
+ subscription_id_param = subscription_id_param or os.environ.get(AZURE_CREDENTIAL_ENV_MAPPING['subscription_id'], None)
+ try:
+ # try to get the subscription in MSI to test whether MSI is enabled
+ subscription_client = SubscriptionClient(credentials)
+ subscription = next(subscription_client.subscriptions.list())
+ subscription_id = str(subscription.subscription_id)
+ return {
+ 'credentials': credentials,
+ 'subscription_id': subscription_id_param or subscription_id
+ }
+ except Exception as exc:
+ return None
+
+ def _get_credentials(self, params):
+ # Get authentication credentials.
+ # Precedence: cmd line parameters-> environment variables-> default profile in ~/.azure/credentials.
+
+ self.log('Getting credentials')
+
+ arg_credentials = dict()
+ for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
+ arg_credentials[attribute] = getattr(params, attribute)
+
+ # try module params
+ if arg_credentials['profile'] is not None:
+ self.log('Retrieving credentials with profile parameter.')
+ credentials = self._get_profile(arg_credentials['profile'])
+ return credentials
+
+ if arg_credentials['client_id'] is not None:
+ self.log('Received credentials from parameters.')
+ return arg_credentials
+
+ if arg_credentials['ad_user'] is not None:
+ self.log('Received credentials from parameters.')
+ return arg_credentials
+
+ # try environment
+ env_credentials = self._get_env_credentials()
+ if env_credentials:
+ self.log('Received credentials from env.')
+ return env_credentials
+
+ # try default profile from ~./azure/credentials
+ default_credentials = self._get_profile()
+ if default_credentials:
+ self.log('Retrieved default profile credentials from ~/.azure/credentials.')
+ return default_credentials
+
+ msi_credentials = self._get_msi_credentials(arg_credentials.get('subscription_id'))
+ if msi_credentials:
+ self.log('Retrieved credentials from MSI.')
+ return msi_credentials
+
+ try:
+ if HAS_AZURE_CLI_CORE:
+ self.log('Retrieving credentials from AzureCLI profile')
+ cli_credentials = self._get_azure_cli_credentials()
+ return cli_credentials
+ except CLIError as ce:
+ self.log('Error getting AzureCLI profile credentials - {0}'.format(ce))
+
+ return None
+
+ def acquire_token_with_username_password(self, authority, resource, username, password, client_id, tenant):
+ authority_uri = authority
+
+ if tenant is not None:
+ authority_uri = authority + '/' + tenant
+
+ context = AuthenticationContext(authority_uri)
+ token_response = context.acquire_token_with_username_password(resource, username, password, client_id)
+ return AADTokenCredentials(token_response)
+
+ def _register(self, key):
+ try:
+ # We have to perform the one-time registration here. Otherwise, we receive an error the first
+ # time we attempt to use the requested client.
+ resource_client = self.rm_client
+ resource_client.providers.register(key)
+ except Exception as exc:
+ self.log("One-time registration of {0} failed - {1}".format(key, str(exc)))
+ self.log("You might need to register {0} using an admin account".format(key))
+ self.log(("To register a provider using the Python CLI: "
+ "https://docs.microsoft.com/azure/azure-resource-manager/"
+ "resource-manager-common-deployment-errors#noregisteredproviderfound"))
+
+ def get_mgmt_svc_client(self, client_type, base_url, api_version):
+ client = client_type(self.azure_credentials,
+ self.subscription_id,
+ base_url=base_url,
+ api_version=api_version)
+ client.config.add_user_agent(ANSIBLE_USER_AGENT)
+ return client
+
+ @property
+ def network_client(self):
+ self.log('Getting network client')
+ if not self._network_client:
+ self._network_client = self.get_mgmt_svc_client(NetworkManagementClient,
+ self._cloud_environment.endpoints.resource_manager,
+ '2017-06-01')
+ self._register('Microsoft.Network')
+ return self._network_client
+
+ @property
+ def rm_client(self):
+ self.log('Getting resource manager client')
+ if not self._resource_client:
+ self._resource_client = self.get_mgmt_svc_client(ResourceManagementClient,
+ self._cloud_environment.endpoints.resource_manager,
+ '2017-05-10')
+ return self._resource_client
+
+ @property
+ def compute_client(self):
+ self.log('Getting compute client')
+ if not self._compute_client:
+ self._compute_client = self.get_mgmt_svc_client(ComputeManagementClient,
+ self._cloud_environment.endpoints.resource_manager,
+ '2017-03-30')
+ self._register('Microsoft.Compute')
+ return self._compute_client
+
+
+class AzureInventory(object):
+
+ def __init__(self):
+
+ self._args = self._parse_cli_args()
+
+ try:
+ rm = AzureRM(self._args)
+ except Exception as e:
+ sys.exit("{0}".format(str(e)))
+
+ self._compute_client = rm.compute_client
+ self._network_client = rm.network_client
+ self._resource_client = rm.rm_client
+ self._security_groups = None
+
+ self.resource_groups = []
+ self.tags = None
+ self.locations = None
+ self.replace_dash_in_groups = False
+ self.group_by_resource_group = True
+ self.group_by_location = True
+ self.group_by_os_family = True
+ self.group_by_security_group = True
+ self.group_by_tag = True
+ self.include_powerstate = True
+ self.use_private_ip = False
+
+ self._inventory = dict(
+ _meta=dict(
+ hostvars=dict()
+ ),
+ azure=[]
+ )
+
+ self._get_settings()
+
+ if self._args.resource_groups:
+ self.resource_groups = self._args.resource_groups.split(',')
+
+ if self._args.tags:
+ self.tags = self._args.tags.split(',')
+
+ if self._args.locations:
+ self.locations = self._args.locations.split(',')
+
+ if self._args.no_powerstate:
+ self.include_powerstate = False
+
+ self.get_inventory()
+ print(self._json_format_dict(pretty=self._args.pretty))
+ sys.exit(0)
+
+ def _parse_cli_args(self):
+ # Parse command line arguments
+ parser = argparse.ArgumentParser(
+ description='Produce an Ansible Inventory file for an Azure subscription')
+ parser.add_argument('--list', action='store_true', default=True,
+ help='List instances (default: True)')
+ parser.add_argument('--debug', action='store_true', default=False,
+ help='Send debug messages to STDOUT')
+ parser.add_argument('--host', action='store',
+ help='Get all information about an instance')
+ parser.add_argument('--pretty', action='store_true', default=False,
+ help='Pretty print JSON output(default: False)')
+ parser.add_argument('--profile', action='store',
+ help='Azure profile contained in ~/.azure/credentials')
+ parser.add_argument('--subscription_id', action='store',
+ help='Azure Subscription Id')
+ parser.add_argument('--client_id', action='store',
+ help='Azure Client Id ')
+ parser.add_argument('--secret', action='store',
+ help='Azure Client Secret')
+ parser.add_argument('--tenant', action='store',
+ help='Azure Tenant Id')
+ parser.add_argument('--ad_user', action='store',
+ help='Active Directory User')
+ parser.add_argument('--password', action='store',
+ help='password')
+ parser.add_argument('--adfs_authority_url', action='store',
+ help='Azure ADFS authority url')
+ parser.add_argument('--cloud_environment', action='store',
+ help='Azure Cloud Environment name or metadata discovery URL')
+ parser.add_argument('--resource-groups', action='store',
+ help='Return inventory for comma separated list of resource group names')
+ parser.add_argument('--tags', action='store',
+ help='Return inventory for comma separated list of tag key:value pairs')
+ parser.add_argument('--locations', action='store',
+ help='Return inventory for comma separated list of locations')
+ parser.add_argument('--no-powerstate', action='store_true', default=False,
+ help='Do not include the power state of each virtual host')
+ return parser.parse_args()
+
+ def get_inventory(self):
+ if len(self.resource_groups) > 0:
+ # get VMs for requested resource groups
+ for resource_group in self.resource_groups:
+ try:
+ virtual_machines = self._compute_client.virtual_machines.list(resource_group.lower())
+ except Exception as exc:
+ sys.exit("Error: fetching virtual machines for resource group {0} - {1}".format(resource_group, str(exc)))
+ if self._args.host or self.tags:
+ selected_machines = self._selected_machines(virtual_machines)
+ self._load_machines(selected_machines)
+ else:
+ self._load_machines(virtual_machines)
+ else:
+ # get all VMs within the subscription
+ try:
+ virtual_machines = self._compute_client.virtual_machines.list_all()
+ except Exception as exc:
+ sys.exit("Error: fetching virtual machines - {0}".format(str(exc)))
+
+ if self._args.host or self.tags or self.locations:
+ selected_machines = self._selected_machines(virtual_machines)
+ self._load_machines(selected_machines)
+ else:
+ self._load_machines(virtual_machines)
+
+ def _load_machines(self, machines):
+ for machine in machines:
+ id_dict = azure_id_to_dict(machine.id)
+
+ # TODO - The API is returning an ID value containing resource group name in ALL CAPS. If/when it gets
+ # fixed, we should remove the .lower(). Opened Issue
+ # #574: https://github.com/Azure/azure-sdk-for-python/issues/574
+ resource_group = id_dict['resourceGroups'].lower()
+
+ if self.group_by_security_group:
+ self._get_security_groups(resource_group)
+
+ host_vars = dict(
+ ansible_host=None,
+ private_ip=None,
+ private_ip_alloc_method=None,
+ public_ip=None,
+ public_ip_name=None,
+ public_ip_id=None,
+ public_ip_alloc_method=None,
+ fqdn=None,
+ location=machine.location,
+ name=machine.name,
+ type=machine.type,
+ id=machine.id,
+ tags=machine.tags,
+ network_interface_id=None,
+ network_interface=None,
+ resource_group=resource_group,
+ mac_address=None,
+ plan=(machine.plan.name if machine.plan else None),
+ virtual_machine_size=machine.hardware_profile.vm_size,
+ computer_name=(machine.os_profile.computer_name if machine.os_profile else None),
+ provisioning_state=machine.provisioning_state,
+ )
+
+ host_vars['os_disk'] = dict(
+ name=machine.storage_profile.os_disk.name,
+ operating_system_type=machine.storage_profile.os_disk.os_type.value.lower()
+ )
+
+ if self.include_powerstate:
+ host_vars['powerstate'] = self._get_powerstate(resource_group, machine.name)
+
+ if machine.storage_profile.image_reference:
+ host_vars['image'] = dict(
+ offer=machine.storage_profile.image_reference.offer,
+ publisher=machine.storage_profile.image_reference.publisher,
+ sku=machine.storage_profile.image_reference.sku,
+ version=machine.storage_profile.image_reference.version
+ )
+
+ # Add windows details
+ if machine.os_profile is not None and machine.os_profile.windows_configuration is not None:
+ host_vars['ansible_connection'] = 'winrm'
+ host_vars['windows_auto_updates_enabled'] = \
+ machine.os_profile.windows_configuration.enable_automatic_updates
+ host_vars['windows_timezone'] = machine.os_profile.windows_configuration.time_zone
+ host_vars['windows_rm'] = None
+ if machine.os_profile.windows_configuration.win_rm is not None:
+ host_vars['windows_rm'] = dict(listeners=None)
+ if machine.os_profile.windows_configuration.win_rm.listeners is not None:
+ host_vars['windows_rm']['listeners'] = []
+ for listener in machine.os_profile.windows_configuration.win_rm.listeners:
+ host_vars['windows_rm']['listeners'].append(dict(protocol=listener.protocol.name,
+ certificate_url=listener.certificate_url))
+
+ for interface in machine.network_profile.network_interfaces:
+ interface_reference = self._parse_ref_id(interface.id)
+ network_interface = self._network_client.network_interfaces.get(
+ interface_reference['resourceGroups'],
+ interface_reference['networkInterfaces'])
+ if network_interface.primary:
+ if self.group_by_security_group and \
+ self._security_groups[resource_group].get(network_interface.id, None):
+ host_vars['security_group'] = \
+ self._security_groups[resource_group][network_interface.id]['name']
+ host_vars['security_group_id'] = \
+ self._security_groups[resource_group][network_interface.id]['id']
+ host_vars['network_interface'] = network_interface.name
+ host_vars['network_interface_id'] = network_interface.id
+ host_vars['mac_address'] = network_interface.mac_address
+ for ip_config in network_interface.ip_configurations:
+ host_vars['private_ip'] = ip_config.private_ip_address
+ host_vars['private_ip_alloc_method'] = ip_config.private_ip_allocation_method
+ if self.use_private_ip:
+ host_vars['ansible_host'] = ip_config.private_ip_address
+ if ip_config.public_ip_address:
+ public_ip_reference = self._parse_ref_id(ip_config.public_ip_address.id)
+ public_ip_address = self._network_client.public_ip_addresses.get(
+ public_ip_reference['resourceGroups'],
+ public_ip_reference['publicIPAddresses'])
+ if not self.use_private_ip:
+ host_vars['ansible_host'] = public_ip_address.ip_address
+ host_vars['public_ip'] = public_ip_address.ip_address
+ host_vars['public_ip_name'] = public_ip_address.name
+ host_vars['public_ip_alloc_method'] = public_ip_address.public_ip_allocation_method
+ host_vars['public_ip_id'] = public_ip_address.id
+ if public_ip_address.dns_settings:
+ host_vars['fqdn'] = public_ip_address.dns_settings.fqdn
+
+ self._add_host(host_vars)
+
+ def _selected_machines(self, virtual_machines):
+ selected_machines = []
+ for machine in virtual_machines:
+ if self._args.host and self._args.host == machine.name:
+ selected_machines.append(machine)
+ if self.tags and self._tags_match(machine.tags, self.tags):
+ selected_machines.append(machine)
+ if self.locations and machine.location in self.locations:
+ selected_machines.append(machine)
+ return selected_machines
+
+ def _get_security_groups(self, resource_group):
+ ''' For a given resource_group build a mapping of network_interface.id to security_group name '''
+ if not self._security_groups:
+ self._security_groups = dict()
+ if not self._security_groups.get(resource_group):
+ self._security_groups[resource_group] = dict()
+ for group in self._network_client.network_security_groups.list(resource_group):
+ if group.network_interfaces:
+ for interface in group.network_interfaces:
+ self._security_groups[resource_group][interface.id] = dict(
+ name=group.name,
+ id=group.id
+ )
+
+ def _get_powerstate(self, resource_group, name):
+ try:
+ vm = self._compute_client.virtual_machines.get(resource_group,
+ name,
+ expand='instanceview')
+ except Exception as exc:
+ sys.exit("Error: fetching instanceview for host {0} - {1}".format(name, str(exc)))
+
+ return next((s.code.replace('PowerState/', '')
+ for s in vm.instance_view.statuses if s.code.startswith('PowerState')), None)
+
+ def _add_host(self, vars):
+
+ host_name = self._to_safe(vars['name'])
+ resource_group = self._to_safe(vars['resource_group'])
+ operating_system_type = self._to_safe(vars['os_disk']['operating_system_type'].lower())
+ security_group = None
+ if vars.get('security_group'):
+ security_group = self._to_safe(vars['security_group'])
+
+ if self.group_by_os_family:
+ if not self._inventory.get(operating_system_type):
+ self._inventory[operating_system_type] = []
+ self._inventory[operating_system_type].append(host_name)
+
+ if self.group_by_resource_group:
+ if not self._inventory.get(resource_group):
+ self._inventory[resource_group] = []
+ self._inventory[resource_group].append(host_name)
+
+ if self.group_by_location:
+ if not self._inventory.get(vars['location']):
+ self._inventory[vars['location']] = []
+ self._inventory[vars['location']].append(host_name)
+
+ if self.group_by_security_group and security_group:
+ if not self._inventory.get(security_group):
+ self._inventory[security_group] = []
+ self._inventory[security_group].append(host_name)
+
+ self._inventory['_meta']['hostvars'][host_name] = vars
+ self._inventory['azure'].append(host_name)
+
+ if self.group_by_tag and vars.get('tags'):
+ for key, value in vars['tags'].items():
+ safe_key = self._to_safe(key)
+ safe_value = safe_key + '_' + self._to_safe(value)
+ if not self._inventory.get(safe_key):
+ self._inventory[safe_key] = []
+ if not self._inventory.get(safe_value):
+ self._inventory[safe_value] = []
+ self._inventory[safe_key].append(host_name)
+ self._inventory[safe_value].append(host_name)
+
+ def _json_format_dict(self, pretty=False):
+ # convert inventory to json
+ if pretty:
+ return json.dumps(self._inventory, sort_keys=True, indent=2)
+ else:
+ return json.dumps(self._inventory)
+
+ def _get_settings(self):
+ # Load settings from the .ini, if it exists. Otherwise,
+ # look for environment values.
+ file_settings = self._load_settings()
+ if file_settings:
+ for key in AZURE_CONFIG_SETTINGS:
+ if key in ('resource_groups', 'tags', 'locations') and file_settings.get(key):
+ values = file_settings.get(key).split(',')
+ if len(values) > 0:
+ setattr(self, key, values)
+ elif file_settings.get(key):
+ val = self._to_boolean(file_settings[key])
+ setattr(self, key, val)
+ else:
+ env_settings = self._get_env_settings()
+ for key in AZURE_CONFIG_SETTINGS:
+ if key in ('resource_groups', 'tags', 'locations') and env_settings.get(key):
+ values = env_settings.get(key).split(',')
+ if len(values) > 0:
+ setattr(self, key, values)
+ elif env_settings.get(key, None) is not None:
+ val = self._to_boolean(env_settings[key])
+ setattr(self, key, val)
+
+ def _parse_ref_id(self, reference):
+ response = {}
+ keys = reference.strip('/').split('/')
+ for index in range(len(keys)):
+ if index < len(keys) - 1 and index % 2 == 0:
+ response[keys[index]] = keys[index + 1]
+ return response
+
+ def _to_boolean(self, value):
+ if value in ['Yes', 'yes', 1, 'True', 'true', True]:
+ result = True
+ elif value in ['No', 'no', 0, 'False', 'false', False]:
+ result = False
+ else:
+ result = True
+ return result
+
+ def _get_env_settings(self):
+ env_settings = dict()
+ for attribute, env_variable in AZURE_CONFIG_SETTINGS.items():
+ env_settings[attribute] = os.environ.get(env_variable, None)
+ return env_settings
+
+ def _load_settings(self):
+ basename = os.path.splitext(os.path.basename(__file__))[0]
+ default_path = os.path.join(os.path.dirname(__file__), (basename + '.ini'))
+ path = os.path.expanduser(os.path.expandvars(os.environ.get('AZURE_INI_PATH', default_path)))
+ config = None
+ settings = None
+ try:
+ config = cp.ConfigParser()
+ config.read(path)
+ except Exception:
+ pass
+
+ if config is not None:
+ settings = dict()
+ for key in AZURE_CONFIG_SETTINGS:
+ try:
+ settings[key] = config.get('azure', key, raw=True)
+ except Exception:
+ pass
+
+ return settings
+
+ def _tags_match(self, tag_obj, tag_args):
+ '''
+ Return True if the tags object from a VM contains the requested tag values.
+
+ :param tag_obj: Dictionary of string:string pairs
+ :param tag_args: List of strings in the form key=value
+ :return: boolean
+ '''
+
+ if not tag_obj:
+ return False
+
+ matches = 0
+ for arg in tag_args:
+ arg_key = arg
+ arg_value = None
+ if re.search(r':', arg):
+ arg_key, arg_value = arg.split(':')
+ if arg_value and tag_obj.get(arg_key, None) == arg_value:
+ matches += 1
+ elif not arg_value and tag_obj.get(arg_key, None) is not None:
+ matches += 1
+ if matches == len(tag_args):
+ return True
+ return False
+
+ def _to_safe(self, word):
+ ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
+ regex = r"[^A-Za-z0-9\_"
+ if not self.replace_dash_in_groups:
+ regex += r"\-"
+ return re.sub(regex + "]", "_", word)
+
+
+def main():
+ if not HAS_AZURE:
+ sys.exit("The Azure python sdk is not installed (try `pip install 'azure>={0}' --upgrade`) - {1}".format(AZURE_MIN_VERSION, HAS_AZURE_EXC))
+
+ AzureInventory()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/brook.ini b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/brook.ini
new file mode 100644
index 00000000..e88c3631
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/brook.ini
@@ -0,0 +1,39 @@
+# Copyright 2016 Doalitic.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# The Brook.io inventory script has the following dependencies:
+# 1. A working Brook.io account
+# See https://brook.io
+# 2. A valid token generated through the 'API token' panel of Brook.io
+# 3. The libbrook python libray.
+# See https://github.com/doalitic/libbrook
+#
+# Author: Francisco Ros <fjros@doalitic.com>
+
+[brook]
+# Valid API token (required).
+# E.g. 'Aed342a12A60433697281FeEe1a4037C'
+#
+api_token =
+
+# Project id within Brook.io, as obtained from the project settings (optional). If provided, the
+# generated inventory will just include the hosts that belong to such project. Otherwise, it will
+# include all hosts in projects the requesting user has access to. The response includes groups
+# 'project_x', being 'x' the project name.
+# E.g. '2e8e099e1bc34cc0979d97ac34e9577b'
+#
+project_id =
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/brook.py b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/brook.py
new file mode 100644
index 00000000..1acd370e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/brook.py
@@ -0,0 +1,248 @@
+#!/usr/bin/env python
+# Copyright 2016 Doalitic.
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+"""
+Brook.io external inventory script
+==================================
+
+Generates inventory that Ansible can understand by making API requests to Brook.io via the libbrook
+library. Hence, such dependency must be installed in the system to run this script.
+
+The default configuration file is named 'brook.ini' and is located alongside this script. You can
+choose any other file by setting the BROOK_INI_PATH environment variable.
+
+If param 'project_id' is left blank in 'brook.ini', the inventory includes all the instances in
+projects where the requesting user belongs. Otherwise, only instances from the given project are
+included, provided the requesting user belongs to it.
+
+The following variables are established for every host. They can be retrieved from the hostvars
+dictionary.
+ - brook_pid: str
+ - brook_name: str
+ - brook_description: str
+ - brook_project: str
+ - brook_template: str
+ - brook_region: str
+ - brook_zone: str
+ - brook_status: str
+ - brook_tags: list(str)
+ - brook_internal_ips: list(str)
+ - brook_external_ips: list(str)
+ - brook_created_at
+ - brook_updated_at
+ - ansible_ssh_host
+
+Instances are grouped by the following categories:
+ - tag:
+ A group is created for each tag. E.g. groups 'tag_foo' and 'tag_bar' are created if there exist
+ instances with tags 'foo' and/or 'bar'.
+ - project:
+ A group is created for each project. E.g. group 'project_test' is created if a project named
+ 'test' exist.
+ - status:
+ A group is created for each instance state. E.g. groups 'status_RUNNING' and 'status_PENDING'
+ are created if there are instances in running and pending state.
+
+Examples:
+ Execute uname on all instances in project 'test'
+ $ ansible -i brook.py project_test -m shell -a "/bin/uname -a"
+
+ Install nginx on all debian web servers tagged with 'www'
+ $ ansible -i brook.py tag_www -m apt -a "name=nginx state=present"
+
+ Run site.yml playbook on web servers
+ $ ansible-playbook -i brook.py site.yml -l tag_www
+
+Support:
+ This script is tested on Python 2.7 and 3.4. It may work on other versions though.
+
+Author: Francisco Ros <fjros@doalitic.com>
+Version: 0.2
+"""
+
+
+import sys
+import os
+
+from ansible.module_utils.six.moves.configparser import SafeConfigParser as ConfigParser
+
+import json
+
+try:
+ import libbrook
+except Exception:
+ sys.exit('Brook.io inventory script requires libbrook. See https://github.com/doalitic/libbrook')
+
+
+class BrookInventory:
+
+ _API_ENDPOINT = 'https://api.brook.io'
+
+ def __init__(self):
+ self._configure_from_file()
+ self.client = self.get_api_client()
+ self.inventory = self.get_inventory()
+
+ def _configure_from_file(self):
+ """Initialize from .ini file.
+
+ Configuration file is assumed to be named 'brook.ini' and to be located on the same
+ directory than this file, unless the environment variable BROOK_INI_PATH says otherwise.
+ """
+
+ brook_ini_default_path = \
+ os.path.join(os.path.dirname(os.path.realpath(__file__)), 'brook.ini')
+ brook_ini_path = os.environ.get('BROOK_INI_PATH', brook_ini_default_path)
+
+ config = ConfigParser(defaults={
+ 'api_token': '',
+ 'project_id': ''
+ })
+ config.read(brook_ini_path)
+ self.api_token = config.get('brook', 'api_token')
+ self.project_id = config.get('brook', 'project_id')
+
+ if not self.api_token:
+ sys.exit('You must provide (at least) your Brook.io API token to generate the dynamic '
+ 'inventory.')
+
+ def get_api_client(self):
+ """Authenticate user via the provided credentials and return the corresponding API client.
+ """
+
+ # Get JWT token from API token
+ #
+ unauthenticated_client = libbrook.ApiClient(host=self._API_ENDPOINT)
+ auth_api = libbrook.AuthApi(unauthenticated_client)
+ api_token = libbrook.AuthTokenRequest()
+ api_token.token = self.api_token
+ jwt = auth_api.auth_token(token=api_token)
+
+ # Create authenticated API client
+ #
+ return libbrook.ApiClient(host=self._API_ENDPOINT,
+ header_name='Authorization',
+ header_value='Bearer %s' % jwt.token)
+
+ def get_inventory(self):
+ """Generate Ansible inventory.
+ """
+
+ groups = dict()
+ meta = dict()
+ meta['hostvars'] = dict()
+
+ instances_api = libbrook.InstancesApi(self.client)
+ projects_api = libbrook.ProjectsApi(self.client)
+ templates_api = libbrook.TemplatesApi(self.client)
+
+ # If no project is given, get all projects the requesting user has access to
+ #
+ if not self.project_id:
+ projects = [project.id for project in projects_api.index_projects()]
+ else:
+ projects = [self.project_id]
+
+ # Build inventory from instances in all projects
+ #
+ for project_id in projects:
+ project = projects_api.show_project(project_id=project_id)
+ for instance in instances_api.index_instances(project_id=project_id):
+ # Get template used for this instance if known
+ template = templates_api.show_template(template_id=instance.template) if instance.template else None
+
+ # Update hostvars
+ try:
+ meta['hostvars'][instance.name] = \
+ self.hostvars(project, instance, template, instances_api)
+ except libbrook.rest.ApiException:
+ continue
+
+ # Group by project
+ project_group = 'project_%s' % project.name
+ if project_group in groups:
+ groups[project_group].append(instance.name)
+ else:
+ groups[project_group] = [instance.name]
+
+ # Group by status
+ status_group = 'status_%s' % meta['hostvars'][instance.name]['brook_status']
+ if status_group in groups:
+ groups[status_group].append(instance.name)
+ else:
+ groups[status_group] = [instance.name]
+
+ # Group by tags
+ tags = meta['hostvars'][instance.name]['brook_tags']
+ for tag in tags:
+ tag_group = 'tag_%s' % tag
+ if tag_group in groups:
+ groups[tag_group].append(instance.name)
+ else:
+ groups[tag_group] = [instance.name]
+
+ groups['_meta'] = meta
+ return groups
+
+ def hostvars(self, project, instance, template, api):
+ """Return the hostvars dictionary for the given instance.
+
+ Raise libbrook.rest.ApiException if it cannot retrieve all required information from the
+ Brook.io API.
+ """
+
+ hostvars = instance.to_dict()
+ hostvars['brook_pid'] = hostvars.pop('pid')
+ hostvars['brook_name'] = hostvars.pop('name')
+ hostvars['brook_description'] = hostvars.pop('description')
+ hostvars['brook_project'] = hostvars.pop('project')
+ hostvars['brook_template'] = hostvars.pop('template')
+ hostvars['brook_region'] = hostvars.pop('region')
+ hostvars['brook_zone'] = hostvars.pop('zone')
+ hostvars['brook_created_at'] = hostvars.pop('created_at')
+ hostvars['brook_updated_at'] = hostvars.pop('updated_at')
+ del hostvars['id']
+ del hostvars['key']
+ del hostvars['provider']
+ del hostvars['image']
+
+ # Substitute identifiers for names
+ #
+ hostvars['brook_project'] = project.name
+ hostvars['brook_template'] = template.name if template else None
+
+ # Retrieve instance state
+ #
+ status = api.status_instance(project_id=project.id, instance_id=instance.id)
+ hostvars.update({'brook_status': status.state})
+
+ # Retrieve instance tags
+ #
+ tags = api.instance_tags(project_id=project.id, instance_id=instance.id)
+ hostvars.update({'brook_tags': tags})
+
+ # Retrieve instance addresses
+ #
+ addresses = api.instance_addresses(project_id=project.id, instance_id=instance.id)
+ internal_ips = [address.address for address in addresses if address.scope == 'internal']
+ external_ips = [address.address for address in addresses
+ if address.address and address.scope == 'external']
+ hostvars.update({'brook_internal_ips': internal_ips})
+ hostvars.update({'brook_external_ips': external_ips})
+ try:
+ hostvars.update({'ansible_ssh_host': external_ips[0]})
+ except IndexError:
+ raise libbrook.rest.ApiException(status='502', reason='Instance without public IP')
+
+ return hostvars
+
+
+# Run the script
+#
+brook = BrookInventory()
+print(json.dumps(brook.inventory))
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/cloudforms.ini b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/cloudforms.ini
new file mode 100644
index 00000000..30b9aa60
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/cloudforms.ini
@@ -0,0 +1,40 @@
+[cloudforms]
+
+# the version of CloudForms ; currently not used, but tested with
+version = 4.1
+
+# This should be the hostname of the CloudForms server
+url = https://cfme.example.com
+
+# This will more than likely need to be a local CloudForms username
+username = <set your username here>
+
+# The password for said username
+password = <set your password here>
+
+# True = verify SSL certificate / False = trust anything
+ssl_verify = True
+
+# limit the number of vms returned per request
+limit = 100
+
+# purge the CloudForms actions from hosts
+purge_actions = True
+
+# Clean up group names (from tags and other groupings so Ansible doesn't complain)
+clean_group_keys = True
+
+# Explode tags into nested groups / subgroups
+nest_tags = False
+
+# If set, ensure host name are suffixed with this value
+# Note: This suffix *must* include the leading '.' as it is appended to the hostname as is
+# suffix = .example.org
+
+# If true, will try and use an IPv4 address for the ansible_ssh_host rather than just the first IP address in the list
+prefer_ipv4 = False
+
+[cache]
+
+# Maximum time to trust the cache in seconds
+max_age = 600
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/cloudforms.py b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/cloudforms.py
new file mode 100644
index 00000000..72b4419c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/cloudforms.py
@@ -0,0 +1,470 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+#
+# Copyright (C) 2016 Guido Günther <agx@sigxcpu.org>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import argparse
+from ansible.module_utils.six.moves import configparser as ConfigParser
+import os
+import re
+from time import time
+import requests
+from requests.auth import HTTPBasicAuth
+import warnings
+from ansible.errors import AnsibleError
+
+import json
+
+
+class CloudFormsInventory(object):
+ def __init__(self):
+ """
+ Main execution path
+ """
+ self.inventory = dict() # A list of groups and the hosts in that group
+ self.hosts = dict() # Details about hosts in the inventory
+
+ # Parse CLI arguments
+ self.parse_cli_args()
+
+ # Read settings
+ self.read_settings()
+
+ # Cache
+ if self.args.refresh_cache or not self.is_cache_valid():
+ self.update_cache()
+ else:
+ self.load_inventory_from_cache()
+ self.load_hosts_from_cache()
+
+ data_to_print = ""
+
+ # Data to print
+ if self.args.host:
+ if self.args.debug:
+ print("Fetching host [%s]" % self.args.host)
+ data_to_print += self.get_host_info(self.args.host)
+ else:
+ self.inventory['_meta'] = {'hostvars': {}}
+ for hostname in self.hosts:
+ self.inventory['_meta']['hostvars'][hostname] = {
+ 'cloudforms': self.hosts[hostname],
+ }
+ # include the ansible_ssh_host in the top level
+ if 'ansible_ssh_host' in self.hosts[hostname]:
+ self.inventory['_meta']['hostvars'][hostname]['ansible_ssh_host'] = self.hosts[hostname]['ansible_ssh_host']
+
+ data_to_print += self.json_format_dict(self.inventory, self.args.pretty)
+
+ print(data_to_print)
+
+ def is_cache_valid(self):
+ """
+ Determines if the cache files have expired, or if it is still valid
+ """
+ if self.args.debug:
+ print("Determining if cache [%s] is still valid (< %s seconds old)" % (self.cache_path_hosts, self.cache_max_age))
+
+ if os.path.isfile(self.cache_path_hosts):
+ mod_time = os.path.getmtime(self.cache_path_hosts)
+ current_time = time()
+ if (mod_time + self.cache_max_age) > current_time:
+ if os.path.isfile(self.cache_path_inventory):
+ if self.args.debug:
+ print("Cache is still valid!")
+ return True
+
+ if self.args.debug:
+ print("Cache is stale or does not exist.")
+
+ return False
+
+ def read_settings(self):
+ """
+ Reads the settings from the cloudforms.ini file
+ """
+ config = ConfigParser.SafeConfigParser()
+ config_paths = [
+ os.path.dirname(os.path.realpath(__file__)) + '/cloudforms.ini',
+ "/etc/ansible/cloudforms.ini",
+ ]
+
+ env_value = os.environ.get('CLOUDFORMS_INI_PATH')
+ if env_value is not None:
+ config_paths.append(os.path.expanduser(os.path.expandvars(env_value)))
+
+ if self.args.debug:
+ for config_path in config_paths:
+ print("Reading from configuration file [%s]" % config_path)
+
+ config.read(config_paths)
+
+ # CloudForms API related
+ if config.has_option('cloudforms', 'url'):
+ self.cloudforms_url = config.get('cloudforms', 'url')
+ else:
+ self.cloudforms_url = None
+
+ if not self.cloudforms_url:
+ warnings.warn("No url specified, expected something like 'https://cfme.example.com'")
+
+ if config.has_option('cloudforms', 'username'):
+ self.cloudforms_username = config.get('cloudforms', 'username')
+ else:
+ self.cloudforms_username = None
+
+ if not self.cloudforms_username:
+ warnings.warn("No username specified, you need to specify a CloudForms username.")
+
+ if config.has_option('cloudforms', 'password'):
+ self.cloudforms_pw = config.get('cloudforms', 'password', raw=True)
+ else:
+ self.cloudforms_pw = None
+
+ if not self.cloudforms_pw:
+ warnings.warn("No password specified, you need to specify a password for the CloudForms user.")
+
+ if config.has_option('cloudforms', 'ssl_verify'):
+ self.cloudforms_ssl_verify = config.getboolean('cloudforms', 'ssl_verify')
+ else:
+ self.cloudforms_ssl_verify = True
+
+ if config.has_option('cloudforms', 'version'):
+ self.cloudforms_version = config.get('cloudforms', 'version')
+ else:
+ self.cloudforms_version = None
+
+ if config.has_option('cloudforms', 'limit'):
+ self.cloudforms_limit = config.getint('cloudforms', 'limit')
+ else:
+ self.cloudforms_limit = 100
+
+ if config.has_option('cloudforms', 'purge_actions'):
+ self.cloudforms_purge_actions = config.getboolean('cloudforms', 'purge_actions')
+ else:
+ self.cloudforms_purge_actions = True
+
+ if config.has_option('cloudforms', 'clean_group_keys'):
+ self.cloudforms_clean_group_keys = config.getboolean('cloudforms', 'clean_group_keys')
+ else:
+ self.cloudforms_clean_group_keys = True
+
+ if config.has_option('cloudforms', 'nest_tags'):
+ self.cloudforms_nest_tags = config.getboolean('cloudforms', 'nest_tags')
+ else:
+ self.cloudforms_nest_tags = False
+
+ if config.has_option('cloudforms', 'suffix'):
+ self.cloudforms_suffix = config.get('cloudforms', 'suffix')
+ if self.cloudforms_suffix[0] != '.':
+ raise AnsibleError('Leading fullstop is required for Cloudforms suffix')
+ else:
+ self.cloudforms_suffix = None
+
+ if config.has_option('cloudforms', 'prefer_ipv4'):
+ self.cloudforms_prefer_ipv4 = config.getboolean('cloudforms', 'prefer_ipv4')
+ else:
+ self.cloudforms_prefer_ipv4 = False
+
+ # Ansible related
+ try:
+ group_patterns = config.get('ansible', 'group_patterns')
+ except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
+ group_patterns = "[]"
+
+ self.group_patterns = eval(group_patterns)
+
+ # Cache related
+ try:
+ cache_path = os.path.expanduser(config.get('cache', 'path'))
+ except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
+ cache_path = '.'
+ (script, ext) = os.path.splitext(os.path.basename(__file__))
+ self.cache_path_hosts = cache_path + "/%s.hosts" % script
+ self.cache_path_inventory = cache_path + "/%s.inventory" % script
+ self.cache_max_age = config.getint('cache', 'max_age')
+
+ if self.args.debug:
+ print("CloudForms settings:")
+ print("cloudforms_url = %s" % self.cloudforms_url)
+ print("cloudforms_username = %s" % self.cloudforms_username)
+ print("cloudforms_pw = %s" % self.cloudforms_pw)
+ print("cloudforms_ssl_verify = %s" % self.cloudforms_ssl_verify)
+ print("cloudforms_version = %s" % self.cloudforms_version)
+ print("cloudforms_limit = %s" % self.cloudforms_limit)
+ print("cloudforms_purge_actions = %s" % self.cloudforms_purge_actions)
+ print("Cache settings:")
+ print("cache_max_age = %s" % self.cache_max_age)
+ print("cache_path_hosts = %s" % self.cache_path_hosts)
+ print("cache_path_inventory = %s" % self.cache_path_inventory)
+
+ def parse_cli_args(self):
+ """
+ Command line argument processing
+ """
+ parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on CloudForms managed VMs')
+ parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)')
+ parser.add_argument('--host', action='store', help='Get all the variables about a specific instance')
+ parser.add_argument('--pretty', action='store_true', default=False, help='Pretty print JSON output (default: False)')
+ parser.add_argument('--refresh-cache', action='store_true', default=False,
+ help='Force refresh of cache by making API requests to CloudForms (default: False - use cache files)')
+ parser.add_argument('--debug', action='store_true', default=False, help='Show debug output while running (default: False)')
+ self.args = parser.parse_args()
+
+ def _get_json(self, url):
+ """
+ Make a request and return the JSON
+ """
+ results = []
+
+ ret = requests.get(url,
+ auth=HTTPBasicAuth(self.cloudforms_username, self.cloudforms_pw),
+ verify=self.cloudforms_ssl_verify)
+
+ ret.raise_for_status()
+
+ try:
+ results = json.loads(ret.text)
+ except ValueError:
+ warnings.warn("Unexpected response from {0} ({1}): {2}".format(self.cloudforms_url, ret.status_code, ret.reason))
+ results = {}
+
+ if self.args.debug:
+ print("=======================================================================")
+ print("=======================================================================")
+ print("=======================================================================")
+ print(ret.text)
+ print("=======================================================================")
+ print("=======================================================================")
+ print("=======================================================================")
+
+ return results
+
+ def _get_hosts(self):
+ """
+ Get all hosts by paging through the results
+ """
+ limit = self.cloudforms_limit
+
+ page = 0
+ last_page = False
+
+ results = []
+
+ while not last_page:
+ offset = page * limit
+ ret = self._get_json("%s/api/vms?offset=%s&limit=%s&expand=resources,tags,hosts,&attributes=ipaddresses" % (self.cloudforms_url, offset, limit))
+ results += ret['resources']
+ if ret['subcount'] < limit:
+ last_page = True
+ page += 1
+
+ return results
+
+ def update_cache(self):
+ """
+ Make calls to cloudforms and save the output in a cache
+ """
+ self.groups = dict()
+ self.hosts = dict()
+
+ if self.args.debug:
+ print("Updating cache...")
+
+ for host in self._get_hosts():
+ if self.cloudforms_suffix is not None and not host['name'].endswith(self.cloudforms_suffix):
+ host['name'] = host['name'] + self.cloudforms_suffix
+
+ # Ignore VMs that are not powered on
+ if host['power_state'] != 'on':
+ if self.args.debug:
+ print("Skipping %s because power_state = %s" % (host['name'], host['power_state']))
+ continue
+
+ # purge actions
+ if self.cloudforms_purge_actions and 'actions' in host:
+ del host['actions']
+
+ # Create ansible groups for tags
+ if 'tags' in host:
+
+ # Create top-level group
+ if 'tags' not in self.inventory:
+ self.inventory['tags'] = dict(children=[], vars={}, hosts=[])
+
+ if not self.cloudforms_nest_tags:
+ # don't expand tags, just use them in a safe way
+ for group in host['tags']:
+ # Add sub-group, as a child of top-level
+ safe_key = self.to_safe(group['name'])
+ if safe_key:
+ if self.args.debug:
+ print("Adding sub-group '%s' to parent 'tags'" % safe_key)
+
+ if safe_key not in self.inventory['tags']['children']:
+ self.push(self.inventory['tags'], 'children', safe_key)
+
+ self.push(self.inventory, safe_key, host['name'])
+
+ if self.args.debug:
+ print("Found tag [%s] for host which will be mapped to [%s]" % (group['name'], safe_key))
+ else:
+ # expand the tags into nested groups / sub-groups
+ # Create nested groups for tags
+ safe_parent_tag_name = 'tags'
+ for tag in host['tags']:
+ tag_hierarchy = tag['name'][1:].split('/')
+
+ if self.args.debug:
+ print("Working on list %s" % tag_hierarchy)
+
+ for tag_name in tag_hierarchy:
+ if self.args.debug:
+ print("Working on tag_name = %s" % tag_name)
+
+ safe_tag_name = self.to_safe(tag_name)
+ if self.args.debug:
+ print("Using sanitized name %s" % safe_tag_name)
+
+ # Create sub-group
+ if safe_tag_name not in self.inventory:
+ self.inventory[safe_tag_name] = dict(children=[], vars={}, hosts=[])
+
+ # Add sub-group, as a child of top-level
+ if safe_parent_tag_name:
+ if self.args.debug:
+ print("Adding sub-group '%s' to parent '%s'" % (safe_tag_name, safe_parent_tag_name))
+
+ if safe_tag_name not in self.inventory[safe_parent_tag_name]['children']:
+ self.push(self.inventory[safe_parent_tag_name], 'children', safe_tag_name)
+
+ # Make sure the next one uses this one as it's parent
+ safe_parent_tag_name = safe_tag_name
+
+ # Add the host to the last tag
+ self.push(self.inventory[safe_parent_tag_name], 'hosts', host['name'])
+
+ # Set ansible_ssh_host to the first available ip address
+ if 'ipaddresses' in host and host['ipaddresses'] and isinstance(host['ipaddresses'], list):
+ # If no preference for IPv4, just use the first entry
+ if not self.cloudforms_prefer_ipv4:
+ host['ansible_ssh_host'] = host['ipaddresses'][0]
+ else:
+ # Before we search for an IPv4 address, set using the first entry in case we don't find any
+ host['ansible_ssh_host'] = host['ipaddresses'][0]
+ for currenthost in host['ipaddresses']:
+ if '.' in currenthost:
+ host['ansible_ssh_host'] = currenthost
+
+ # Create additional groups
+ for key in ('location', 'type', 'vendor'):
+ safe_key = self.to_safe(host[key])
+
+ # Create top-level group
+ if key not in self.inventory:
+ self.inventory[key] = dict(children=[], vars={}, hosts=[])
+
+ # Create sub-group
+ if safe_key not in self.inventory:
+ self.inventory[safe_key] = dict(children=[], vars={}, hosts=[])
+
+ # Add sub-group, as a child of top-level
+ if safe_key not in self.inventory[key]['children']:
+ self.push(self.inventory[key], 'children', safe_key)
+
+ if key in host:
+ # Add host to sub-group
+ self.push(self.inventory[safe_key], 'hosts', host['name'])
+
+ self.hosts[host['name']] = host
+ self.push(self.inventory, 'all', host['name'])
+
+ if self.args.debug:
+ print("Saving cached data")
+
+ self.write_to_cache(self.hosts, self.cache_path_hosts)
+ self.write_to_cache(self.inventory, self.cache_path_inventory)
+
+ def get_host_info(self, host):
+ """
+ Get variables about a specific host
+ """
+ if not self.hosts or len(self.hosts) == 0:
+ # Need to load cache from cache
+ self.load_hosts_from_cache()
+
+ if host not in self.hosts:
+ if self.args.debug:
+ print("[%s] not found in cache." % host)
+
+ # try updating the cache
+ self.update_cache()
+
+ if host not in self.hosts:
+ if self.args.debug:
+ print("[%s] does not exist after cache update." % host)
+ # host might not exist anymore
+ return self.json_format_dict({}, self.args.pretty)
+
+ return self.json_format_dict(self.hosts[host], self.args.pretty)
+
+ def push(self, d, k, v):
+ """
+ Safely puts a new entry onto an array.
+ """
+ if k in d:
+ d[k].append(v)
+ else:
+ d[k] = [v]
+
+ def load_inventory_from_cache(self):
+ """
+ Reads the inventory from the cache file sets self.inventory
+ """
+ cache = open(self.cache_path_inventory, 'r')
+ json_inventory = cache.read()
+ self.inventory = json.loads(json_inventory)
+
+ def load_hosts_from_cache(self):
+ """
+ Reads the cache from the cache file sets self.hosts
+ """
+ cache = open(self.cache_path_hosts, 'r')
+ json_cache = cache.read()
+ self.hosts = json.loads(json_cache)
+
+ def write_to_cache(self, data, filename):
+ """
+ Writes data in JSON format to a file
+ """
+ json_data = self.json_format_dict(data, True)
+ cache = open(filename, 'w')
+ cache.write(json_data)
+ cache.close()
+
+ def to_safe(self, word):
+ """
+ Converts 'bad' characters in a string to underscores so they can be used as Ansible groups
+ """
+ if self.cloudforms_clean_group_keys:
+ regex = r"[^A-Za-z0-9\_]"
+ return re.sub(regex, "_", word.replace(" ", ""))
+ else:
+ return word
+
+ def json_format_dict(self, data, pretty=False):
+ """
+ Converts a dict to a JSON object and dumps it as a formatted string
+ """
+ if pretty:
+ return json.dumps(data, sort_keys=True, indent=2)
+ else:
+ return json.dumps(data)
+
+
+CloudFormsInventory()
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/cobbler.ini b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/cobbler.ini
new file mode 100644
index 00000000..2dc8cd33
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/cobbler.ini
@@ -0,0 +1,24 @@
+# Ansible Cobbler external inventory script settings
+#
+
+[cobbler]
+
+host = http://PATH_TO_COBBLER_SERVER/cobbler_api
+
+# If API needs authentication add 'username' and 'password' options here.
+#username = foo
+#password = bar
+
+# API calls to Cobbler can be slow. For this reason, we cache the results of an API
+# call. Set this to the path you want cache files to be written to. Two files
+# will be written to this directory:
+# - ansible-cobbler.cache
+# - ansible-cobbler.index
+cache_path = /tmp
+
+# The number of seconds a cache file is considered valid. After this many
+# seconds, a new API call will be made, and the cache file will be updated.
+cache_max_age = 900
+
+
+
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/cobbler.py b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/cobbler.py
new file mode 100644
index 00000000..eeb8f582
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/cobbler.py
@@ -0,0 +1,305 @@
+#!/usr/bin/env python
+
+"""
+Cobbler external inventory script
+=================================
+
+Ansible has a feature where instead of reading from /etc/ansible/hosts
+as a text file, it can query external programs to obtain the list
+of hosts, groups the hosts are in, and even variables to assign to each host.
+
+To use this, copy this file over /etc/ansible/hosts and chmod +x the file.
+This, more or less, allows you to keep one central database containing
+info about all of your managed instances.
+
+This script is an example of sourcing that data from Cobbler
+(https://cobbler.github.io). With cobbler each --mgmt-class in cobbler
+will correspond to a group in Ansible, and --ks-meta variables will be
+passed down for use in templates or even in argument lines.
+
+NOTE: The cobbler system names will not be used. Make sure a
+cobbler --dns-name is set for each cobbler system. If a system
+appears with two DNS names we do not add it twice because we don't want
+ansible talking to it twice. The first one found will be used. If no
+--dns-name is set the system will NOT be visible to ansible. We do
+not add cobbler system names because there is no requirement in cobbler
+that those correspond to addresses.
+
+Tested with Cobbler 2.0.11.
+
+Changelog:
+ - 2015-06-21 dmccue: Modified to support run-once _meta retrieval, results in
+ higher performance at ansible startup. Groups are determined by owner rather than
+ default mgmt_classes. DNS name determined from hostname. cobbler values are written
+ to a 'cobbler' fact namespace
+
+ - 2013-09-01 pgehres: Refactored implementation to make use of caching and to
+ limit the number of connections to external cobbler server for performance.
+ Added use of cobbler.ini file to configure settings. Tested with Cobbler 2.4.0
+
+"""
+
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+######################################################################
+
+import argparse
+import os
+import re
+from time import time
+try: # Python 3
+ from xmlrpc.client import Server
+except ImportError: # Python 2
+ from xmlrpclib import Server
+
+import json
+
+from ansible.module_utils.six import iteritems
+from ansible.module_utils.six.moves import configparser as ConfigParser
+
+# NOTE -- this file assumes Ansible is being accessed FROM the cobbler
+# server, so it does not attempt to login with a username and password.
+# this will be addressed in a future version of this script.
+
+orderby_keyname = 'owners' # alternatively 'mgmt_classes'
+
+
+class CobblerInventory(object):
+
+ def __init__(self):
+
+ """ Main execution path """
+ self.conn = None
+
+ self.inventory = dict() # A list of groups and the hosts in that group
+ self.cache = dict() # Details about hosts in the inventory
+ self.ignore_settings = False # used to only look at env vars for settings.
+
+ # Read env vars, read settings, and parse CLI arguments
+ self.parse_env_vars()
+ self.read_settings()
+ self.parse_cli_args()
+
+ # Cache
+ if self.args.refresh_cache:
+ self.update_cache()
+ elif not self.is_cache_valid():
+ self.update_cache()
+ else:
+ self.load_inventory_from_cache()
+ self.load_cache_from_cache()
+
+ data_to_print = ""
+
+ # Data to print
+ if self.args.host:
+ data_to_print += self.get_host_info()
+ else:
+ self.inventory['_meta'] = {'hostvars': {}}
+ for hostname in self.cache:
+ self.inventory['_meta']['hostvars'][hostname] = {'cobbler': self.cache[hostname]}
+ data_to_print += self.json_format_dict(self.inventory, True)
+
+ print(data_to_print)
+
+ def _connect(self):
+ if not self.conn:
+ self.conn = Server(self.cobbler_host, allow_none=True)
+ self.token = None
+ if self.cobbler_username is not None:
+ self.token = self.conn.login(self.cobbler_username, self.cobbler_password)
+
+ def is_cache_valid(self):
+ """ Determines if the cache files have expired, or if it is still valid """
+
+ if os.path.isfile(self.cache_path_cache):
+ mod_time = os.path.getmtime(self.cache_path_cache)
+ current_time = time()
+ if (mod_time + self.cache_max_age) > current_time:
+ if os.path.isfile(self.cache_path_inventory):
+ return True
+
+ return False
+
+ def read_settings(self):
+ """ Reads the settings from the cobbler.ini file """
+
+ if(self.ignore_settings):
+ return
+
+ config = ConfigParser.SafeConfigParser()
+ config.read(os.path.dirname(os.path.realpath(__file__)) + '/cobbler.ini')
+
+ self.cobbler_host = config.get('cobbler', 'host')
+ self.cobbler_username = None
+ self.cobbler_password = None
+ if config.has_option('cobbler', 'username'):
+ self.cobbler_username = config.get('cobbler', 'username')
+ if config.has_option('cobbler', 'password'):
+ self.cobbler_password = config.get('cobbler', 'password')
+
+ # Cache related
+ cache_path = config.get('cobbler', 'cache_path')
+ self.cache_path_cache = cache_path + "/ansible-cobbler.cache"
+ self.cache_path_inventory = cache_path + "/ansible-cobbler.index"
+ self.cache_max_age = config.getint('cobbler', 'cache_max_age')
+
+ def parse_env_vars(self):
+ """ Reads the settings from the environment """
+
+ # Env. Vars:
+ # COBBLER_host
+ # COBBLER_username
+ # COBBLER_password
+ # COBBLER_cache_path
+ # COBBLER_cache_max_age
+ # COBBLER_ignore_settings
+
+ self.cobbler_host = os.getenv('COBBLER_host', None)
+ self.cobbler_username = os.getenv('COBBLER_username', None)
+ self.cobbler_password = os.getenv('COBBLER_password', None)
+
+ # Cache related
+ cache_path = os.getenv('COBBLER_cache_path', None)
+ if(cache_path is not None):
+ self.cache_path_cache = cache_path + "/ansible-cobbler.cache"
+ self.cache_path_inventory = cache_path + "/ansible-cobbler.index"
+
+ self.cache_max_age = int(os.getenv('COBBLER_cache_max_age', "30"))
+
+ # ignore_settings is used to ignore the settings file, for use in Ansible
+ # Tower (or AWX inventory scripts and not throw python exceptions.)
+ if(os.getenv('COBBLER_ignore_settings', False) == "True"):
+ self.ignore_settings = True
+
+ def parse_cli_args(self):
+ """ Command line argument processing """
+
+ parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Cobbler')
+ parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)')
+ parser.add_argument('--host', action='store', help='Get all the variables about a specific instance')
+ parser.add_argument('--refresh-cache', action='store_true', default=False,
+ help='Force refresh of cache by making API requests to cobbler (default: False - use cache files)')
+ self.args = parser.parse_args()
+
+ def update_cache(self):
+ """ Make calls to cobbler and save the output in a cache """
+
+ self._connect()
+ self.groups = dict()
+ self.hosts = dict()
+ if self.token is not None:
+ data = self.conn.get_systems(self.token)
+ else:
+ data = self.conn.get_systems()
+
+ for host in data:
+ # Get the FQDN for the host and add it to the right groups
+ dns_name = host['hostname'] # None
+ ksmeta = None
+ interfaces = host['interfaces']
+ # hostname is often empty for non-static IP hosts
+ if dns_name == '':
+ for (iname, ivalue) in iteritems(interfaces):
+ if ivalue['management'] or not ivalue['static']:
+ this_dns_name = ivalue.get('dns_name', None)
+ dns_name = this_dns_name if this_dns_name else ''
+
+ if dns_name == '' or dns_name is None:
+ continue
+
+ status = host['status']
+ profile = host['profile']
+ classes = host[orderby_keyname]
+
+ if status not in self.inventory:
+ self.inventory[status] = []
+ self.inventory[status].append(dns_name)
+
+ if profile not in self.inventory:
+ self.inventory[profile] = []
+ self.inventory[profile].append(dns_name)
+
+ for cls in classes:
+ if cls not in self.inventory:
+ self.inventory[cls] = []
+ self.inventory[cls].append(dns_name)
+
+ # Since we already have all of the data for the host, update the host details as well
+
+ # The old way was ksmeta only -- provide backwards compatibility
+
+ self.cache[dns_name] = host
+ if "ks_meta" in host:
+ for key, value in iteritems(host["ks_meta"]):
+ self.cache[dns_name][key] = value
+
+ self.write_to_cache(self.cache, self.cache_path_cache)
+ self.write_to_cache(self.inventory, self.cache_path_inventory)
+
+ def get_host_info(self):
+ """ Get variables about a specific host """
+
+ if not self.cache or len(self.cache) == 0:
+ # Need to load index from cache
+ self.load_cache_from_cache()
+
+ if self.args.host not in self.cache:
+ # try updating the cache
+ self.update_cache()
+
+ if self.args.host not in self.cache:
+ # host might not exist anymore
+ return self.json_format_dict({}, True)
+
+ return self.json_format_dict(self.cache[self.args.host], True)
+
+ def push(self, my_dict, key, element):
+ """ Pushed an element onto an array that may not have been defined in the dict """
+
+ if key in my_dict:
+ my_dict[key].append(element)
+ else:
+ my_dict[key] = [element]
+
+ def load_inventory_from_cache(self):
+ """ Reads the index from the cache file sets self.index """
+
+ cache = open(self.cache_path_inventory, 'r')
+ json_inventory = cache.read()
+ self.inventory = json.loads(json_inventory)
+
+ def load_cache_from_cache(self):
+ """ Reads the cache from the cache file sets self.cache """
+
+ cache = open(self.cache_path_cache, 'r')
+ json_cache = cache.read()
+ self.cache = json.loads(json_cache)
+
+ def write_to_cache(self, data, filename):
+ """ Writes data in JSON format to a file """
+ json_data = self.json_format_dict(data, True)
+ cache = open(filename, 'w')
+ cache.write(json_data)
+ cache.close()
+
+ def to_safe(self, word):
+ """ Converts 'bad' characters in a string to underscores so they can be used as Ansible groups """
+
+ return re.sub(r"[^A-Za-z0-9\-]", "_", word)
+
+ def json_format_dict(self, data, pretty=False):
+ """ Converts a dict to a JSON object and dumps it as a formatted string """
+
+ if pretty:
+ return json.dumps(data, sort_keys=True, indent=2)
+ else:
+ return json.dumps(data)
+
+
+CobblerInventory()
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/collins.ini b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/collins.ini
new file mode 100644
index 00000000..0ce0c2ac
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/collins.ini
@@ -0,0 +1,57 @@
+# Ansible Collins external inventory script settings
+#
+
+[collins]
+
+# You should not have a trailing slash or collins
+# will not properly match the URI
+host = http://localhost:9000
+
+username = blake
+password = admin:first
+
+# Specifies a timeout for all HTTP requests to Collins.
+timeout_secs = 120
+
+# Specifies a maximum number of retries per Collins request.
+max_retries = 5
+
+# Specifies the number of results to return per paginated query as specified in
+# the Pagination section of the Collins API docs:
+# http://tumblr.github.io/collins/api.html
+results_per_query = 100
+
+# Specifies the Collins asset type which will be queried for; most typically
+# you'll want to leave this at the default of SERVER_NODE.
+asset_type = SERVER_NODE
+
+# Collins assets can optionally be assigned hostnames; this option will preference
+# the selection of an asset's hostname over an IP address as the primary identifier
+# in the Ansible inventory. Typically, this value should be set to true if assets
+# are assigned hostnames.
+prefer_hostnames = true
+
+# Within Collins, assets can be granted multiple IP addresses; this configuration
+# value specifies the index within the 'ADDRESSES' array as returned by the
+# following API endpoint:
+# http://tumblr.github.io/collins/api.html#api-ipam-asset-addresses-section
+ip_address_index = 0
+
+# Sets whether Collins instances in multiple datacenters will be queried.
+query_remote_dcs = false
+
+# API calls to Collins can involve large, substantial queries. For this reason,
+# we cache the results of an API call. Set this to the path you want cache files
+# to be written to. Two files will be written to this directory:
+# - ansible-collins.cache
+# - ansible-collins.index
+cache_path = /tmp
+
+# If errors occur while querying inventory, logging messages will be written
+# to a logfile in the specified directory:
+# - ansible-collins.log
+log_path = /tmp
+
+# The number of seconds that a cache file is considered valid. After this many
+# seconds, a new API call will be made, and the cache file will be updated.
+cache_max_age = 600
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/collins.py b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/collins.py
new file mode 100644
index 00000000..f481649e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/collins.py
@@ -0,0 +1,429 @@
+#!/usr/bin/env python
+
+"""
+Collins external inventory script
+=================================
+
+Ansible has a feature where instead of reading from /etc/ansible/hosts
+as a text file, it can query external programs to obtain the list
+of hosts, groups the hosts are in, and even variables to assign to each host.
+
+Collins is a hardware asset management system originally developed by
+Tumblr for tracking new hardware as it built out its own datacenters. It
+exposes a rich API for manipulating and querying one's hardware inventory,
+which makes it an ideal 'single point of truth' for driving systems
+automation like Ansible. Extensive documentation on Collins, including a quickstart,
+API docs, and a full reference manual, can be found here:
+
+http://tumblr.github.io/collins
+
+This script adds support to Ansible for obtaining a dynamic inventory of
+assets in your infrastructure, grouping them in Ansible by their useful attributes,
+and binding all facts provided by Collins to each host so that they can be used to
+drive automation. Some parts of this script were cribbed shamelessly from mdehaan's
+Cobbler inventory script.
+
+To use it, copy it to your repo and pass -i <collins script> to the ansible or
+ansible-playbook command; if you'd like to use it by default, simply copy collins.ini
+to /etc/ansible and this script to /etc/ansible/hosts.
+
+Alongside the options set in collins.ini, there are several environment variables
+that will be used instead of the configured values if they are set:
+
+ - COLLINS_USERNAME - specifies a username to use for Collins authentication
+ - COLLINS_PASSWORD - specifies a password to use for Collins authentication
+ - COLLINS_ASSET_TYPE - specifies a Collins asset type to use during querying;
+ this can be used to run Ansible automation against different asset classes than
+ server nodes, such as network switches and PDUs
+ - COLLINS_CONFIG - specifies an alternative location for collins.ini, defaults to
+ <location of collins.py>/collins.ini
+
+If errors are encountered during operation, this script will return an exit code of
+255; otherwise, it will return an exit code of 0.
+
+Collins attributes are accessible as variables in ansible via the COLLINS['attribute_name'].
+
+Tested against Ansible 1.8.2 and Collins 1.3.0.
+"""
+
+# (c) 2014, Steve Salevan <steve.salevan@gmail.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+######################################################################
+
+
+import argparse
+import logging
+import os
+import re
+import sys
+from time import time
+import traceback
+
+import json
+
+from ansible.module_utils.six import iteritems
+from ansible.module_utils.six.moves import configparser as ConfigParser
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+
+from ansible.module_utils.urls import open_url
+
+
+class CollinsDefaults(object):
+ ASSETS_API_ENDPOINT = '%s/api/assets'
+ SPECIAL_ATTRIBUTES = set([
+ 'CREATED',
+ 'DELETED',
+ 'UPDATED',
+ 'STATE',
+ ])
+ LOG_FORMAT = '%(asctime)-15s %(message)s'
+
+
+class Error(Exception):
+ pass
+
+
+class MaxRetriesError(Error):
+ pass
+
+
+class CollinsInventory(object):
+
+ def __init__(self):
+ """ Constructs CollinsInventory object and reads all configuration. """
+
+ self.inventory = dict() # A list of groups and the hosts in that group
+ self.cache = dict() # Details about hosts in the inventory
+
+ # Read settings and parse CLI arguments
+ self.read_settings()
+ self.parse_cli_args()
+
+ logging.basicConfig(format=CollinsDefaults.LOG_FORMAT,
+ filename=self.log_location)
+ self.log = logging.getLogger('CollinsInventory')
+
+ def _asset_get_attribute(self, asset, attrib):
+ """ Returns a user-defined attribute from an asset if it exists; otherwise,
+ returns None. """
+
+ if 'ATTRIBS' in asset:
+ for attrib_block in asset['ATTRIBS'].keys():
+ if attrib in asset['ATTRIBS'][attrib_block]:
+ return asset['ATTRIBS'][attrib_block][attrib]
+ return None
+
+ def _asset_has_attribute(self, asset, attrib):
+ """ Returns whether a user-defined attribute is present on an asset. """
+
+ if 'ATTRIBS' in asset:
+ for attrib_block in asset['ATTRIBS'].keys():
+ if attrib in asset['ATTRIBS'][attrib_block]:
+ return True
+ return False
+
+ def run(self):
+ """ Main execution path """
+
+ # Updates cache if cache is not present or has expired.
+ successful = True
+ if self.args.refresh_cache:
+ successful = self.update_cache()
+ elif not self.is_cache_valid():
+ successful = self.update_cache()
+ else:
+ successful = self.load_inventory_from_cache()
+ successful &= self.load_cache_from_cache()
+
+ data_to_print = ""
+
+ # Data to print
+ if self.args.host:
+ data_to_print = self.get_host_info()
+
+ elif self.args.list:
+ # Display list of instances for inventory
+ data_to_print = self.json_format_dict(self.inventory, self.args.pretty)
+
+ else: # default action with no options
+ data_to_print = self.json_format_dict(self.inventory, self.args.pretty)
+
+ print(data_to_print)
+ return successful
+
+ def find_assets(self, attributes=None, operation='AND'):
+ """ Obtains Collins assets matching the provided attributes. """
+ attributes = {} if attributes is None else attributes
+
+ # Formats asset search query to locate assets matching attributes, using
+ # the CQL search feature as described here:
+ # http://tumblr.github.io/collins/recipes.html
+ attributes_query = ['='.join(attr_pair) for attr_pair in iteritems(attributes)]
+ query_parameters = {
+ 'details': ['True'],
+ 'operation': [operation],
+ 'query': attributes_query,
+ 'remoteLookup': [str(self.query_remote_dcs)],
+ 'size': [self.results_per_query],
+ 'type': [self.collins_asset_type],
+ }
+ assets = []
+ cur_page = 0
+ num_retries = 0
+ # Locates all assets matching the provided query, exhausting pagination.
+ while True:
+ if num_retries == self.collins_max_retries:
+ raise MaxRetriesError("Maximum of %s retries reached; giving up" % self.collins_max_retries)
+ query_parameters['page'] = cur_page
+ query_url = "%s?%s" % (
+ (CollinsDefaults.ASSETS_API_ENDPOINT % self.collins_host),
+ urlencode(query_parameters, doseq=True)
+ )
+ try:
+ response = open_url(query_url,
+ timeout=self.collins_timeout_secs,
+ url_username=self.collins_username,
+ url_password=self.collins_password,
+ force_basic_auth=True)
+ json_response = json.loads(response.read())
+ # Adds any assets found to the array of assets.
+ assets += json_response['data']['Data']
+ # If we've retrieved all of our assets, breaks out of the loop.
+ if len(json_response['data']['Data']) == 0:
+ break
+ cur_page += 1
+ num_retries = 0
+ except Exception:
+ self.log.error("Error while communicating with Collins, retrying:\n%s", traceback.format_exc())
+ num_retries += 1
+ return assets
+
+ def is_cache_valid(self):
+ """ Determines if the cache files have expired, or if it is still valid """
+
+ if os.path.isfile(self.cache_path_cache):
+ mod_time = os.path.getmtime(self.cache_path_cache)
+ current_time = time()
+ if (mod_time + self.cache_max_age) > current_time:
+ if os.path.isfile(self.cache_path_inventory):
+ return True
+
+ return False
+
+ def read_settings(self):
+ """ Reads the settings from the collins.ini file """
+
+ config_loc = os.getenv('COLLINS_CONFIG', os.path.dirname(os.path.realpath(__file__)) + '/collins.ini')
+
+ config = ConfigParser.SafeConfigParser()
+ config.read(os.path.dirname(os.path.realpath(__file__)) + '/collins.ini')
+
+ self.collins_host = config.get('collins', 'host')
+ self.collins_username = os.getenv('COLLINS_USERNAME', config.get('collins', 'username'))
+ self.collins_password = os.getenv('COLLINS_PASSWORD', config.get('collins', 'password'))
+ self.collins_asset_type = os.getenv('COLLINS_ASSET_TYPE', config.get('collins', 'asset_type'))
+ self.collins_timeout_secs = config.getint('collins', 'timeout_secs')
+ self.collins_max_retries = config.getint('collins', 'max_retries')
+
+ self.results_per_query = config.getint('collins', 'results_per_query')
+ self.ip_address_index = config.getint('collins', 'ip_address_index')
+ self.query_remote_dcs = config.getboolean('collins', 'query_remote_dcs')
+ self.prefer_hostnames = config.getboolean('collins', 'prefer_hostnames')
+
+ cache_path = config.get('collins', 'cache_path')
+ self.cache_path_cache = cache_path + \
+ '/ansible-collins-%s.cache' % self.collins_asset_type
+ self.cache_path_inventory = cache_path + \
+ '/ansible-collins-%s.index' % self.collins_asset_type
+ self.cache_max_age = config.getint('collins', 'cache_max_age')
+
+ log_path = config.get('collins', 'log_path')
+ self.log_location = log_path + '/ansible-collins.log'
+
+ def parse_cli_args(self):
+ """ Command line argument processing """
+
+ parser = argparse.ArgumentParser(
+ description='Produces an Ansible Inventory file based on Collins')
+ parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)')
+ parser.add_argument('--host', action='store', help='Get all the variables about a specific instance')
+ parser.add_argument('--refresh-cache', action='store_true', default=False,
+ help='Force refresh of cache by making API requests to Collins '
+ '(default: False - use cache files)')
+ parser.add_argument('--pretty', action='store_true', default=False, help='Pretty print all JSON output')
+ self.args = parser.parse_args()
+
+ def update_cache(self):
+ """ Make calls to Collins and saves the output in a cache """
+
+ self.cache = dict()
+ self.inventory = dict()
+
+ # Locates all server assets from Collins.
+ try:
+ server_assets = self.find_assets()
+ except Exception:
+ self.log.error("Error while locating assets from Collins:\n%s", traceback.format_exc())
+ return False
+
+ for asset in server_assets:
+ # Determines the index to retrieve the asset's IP address either by an
+ # attribute set on the Collins asset or the pre-configured value.
+ if self._asset_has_attribute(asset, 'ANSIBLE_IP_INDEX'):
+ ip_index = self._asset_get_attribute(asset, 'ANSIBLE_IP_INDEX')
+ try:
+ ip_index = int(ip_index)
+ except Exception:
+ self.log.error(
+ "ANSIBLE_IP_INDEX attribute on asset %s not an integer: %s", asset,
+ ip_index)
+ else:
+ ip_index = self.ip_address_index
+
+ asset['COLLINS'] = {}
+
+ # Attempts to locate the asset's primary identifier (hostname or IP address),
+ # which will be used to index the asset throughout the Ansible inventory.
+ if self.prefer_hostnames and self._asset_has_attribute(asset, 'HOSTNAME'):
+ asset_identifier = self._asset_get_attribute(asset, 'HOSTNAME')
+ elif 'ADDRESSES' not in asset:
+ self.log.warning("No IP addresses found for asset '%s', skipping", asset)
+ continue
+ elif len(asset['ADDRESSES']) < ip_index + 1:
+ self.log.warning(
+ "No IP address found at index %s for asset '%s', skipping",
+ ip_index, asset)
+ continue
+ else:
+ asset_identifier = asset['ADDRESSES'][ip_index]['ADDRESS']
+
+ # Adds an asset index to the Ansible inventory based upon unpacking
+ # the name of the asset's current STATE from its dictionary.
+ if 'STATE' in asset['ASSET'] and asset['ASSET']['STATE']:
+ state_inventory_key = self.to_safe(
+ 'STATE-%s' % asset['ASSET']['STATE']['NAME'])
+ self.push(self.inventory, state_inventory_key, asset_identifier)
+
+ # Indexes asset by all user-defined Collins attributes.
+ if 'ATTRIBS' in asset:
+ for attrib_block in asset['ATTRIBS'].keys():
+ for attrib in asset['ATTRIBS'][attrib_block].keys():
+ asset['COLLINS'][attrib] = asset['ATTRIBS'][attrib_block][attrib]
+ attrib_key = self.to_safe('%s-%s' % (attrib, asset['ATTRIBS'][attrib_block][attrib]))
+ self.push(self.inventory, attrib_key, asset_identifier)
+
+ # Indexes asset by all built-in Collins attributes.
+ for attribute in asset['ASSET'].keys():
+ if attribute not in CollinsDefaults.SPECIAL_ATTRIBUTES:
+ attribute_val = asset['ASSET'][attribute]
+ if attribute_val is not None:
+ attrib_key = self.to_safe('%s-%s' % (attribute, attribute_val))
+ self.push(self.inventory, attrib_key, asset_identifier)
+
+ # Indexes asset by hardware product information.
+ if 'HARDWARE' in asset:
+ if 'PRODUCT' in asset['HARDWARE']['BASE']:
+ product = asset['HARDWARE']['BASE']['PRODUCT']
+ if product:
+ product_key = self.to_safe(
+ 'HARDWARE-PRODUCT-%s' % asset['HARDWARE']['BASE']['PRODUCT'])
+ self.push(self.inventory, product_key, asset_identifier)
+
+ # Indexing now complete, adds the host details to the asset cache.
+ self.cache[asset_identifier] = asset
+
+ try:
+ self.write_to_cache(self.cache, self.cache_path_cache)
+ self.write_to_cache(self.inventory, self.cache_path_inventory)
+ except Exception:
+ self.log.error("Error while writing to cache:\n%s", traceback.format_exc())
+ return False
+ return True
+
+ def push(self, dictionary, key, value):
+ """ Adds a value to a list at a dictionary key, creating the list if it doesn't
+ exist. """
+
+ if key not in dictionary:
+ dictionary[key] = []
+ dictionary[key].append(value)
+
+ def get_host_info(self):
+ """ Get variables about a specific host. """
+
+ if not self.cache or len(self.cache) == 0:
+ # Need to load index from cache
+ self.load_cache_from_cache()
+
+ if self.args.host not in self.cache:
+ # try updating the cache
+ self.update_cache()
+
+ if self.args.host not in self.cache:
+ # host might not exist anymore
+ return self.json_format_dict({}, self.args.pretty)
+
+ return self.json_format_dict(self.cache[self.args.host], self.args.pretty)
+
+ def load_inventory_from_cache(self):
+ """ Reads the index from the cache file sets self.index """
+
+ try:
+ cache = open(self.cache_path_inventory, 'r')
+ json_inventory = cache.read()
+ self.inventory = json.loads(json_inventory)
+ return True
+ except Exception:
+ self.log.error("Error while loading inventory:\n%s",
+ traceback.format_exc())
+ self.inventory = {}
+ return False
+
+ def load_cache_from_cache(self):
+ """ Reads the cache from the cache file sets self.cache """
+
+ try:
+ cache = open(self.cache_path_cache, 'r')
+ json_cache = cache.read()
+ self.cache = json.loads(json_cache)
+ return True
+ except Exception:
+ self.log.error("Error while loading host cache:\n%s",
+ traceback.format_exc())
+ self.cache = {}
+ return False
+
+ def write_to_cache(self, data, filename):
+ """ Writes data in JSON format to a specified file. """
+
+ json_data = self.json_format_dict(data, self.args.pretty)
+ cache = open(filename, 'w')
+ cache.write(json_data)
+ cache.close()
+
+ def to_safe(self, word):
+ """ Converts 'bad' characters in a string to underscores so they
+ can be used as Ansible groups """
+
+ return re.sub(r"[^A-Za-z0-9\-]", "_", word)
+
+ def json_format_dict(self, data, pretty=False):
+ """ Converts a dict to a JSON object and dumps it as a formatted string """
+
+ if pretty:
+ return json.dumps(data, sort_keys=True, indent=2)
+ else:
+ return json.dumps(data)
+
+
+if __name__ in '__main__':
+ inventory = CollinsInventory()
+ if inventory.run():
+ sys.exit(0)
+ else:
+ sys.exit(-1)
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/consul_io.ini b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/consul_io.ini
new file mode 100644
index 00000000..d18a1494
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/consul_io.ini
@@ -0,0 +1,54 @@
+# Ansible Consul external inventory script settings.
+
+[consul]
+
+#
+# Bulk load. Load all possible data before building inventory JSON
+# If true, script processes in-memory data. JSON generation reduces drastically
+#
+bulk_load = false
+
+# restrict included nodes to those from this datacenter
+#datacenter = nyc1
+
+# url of the consul cluster to query
+#url = http://demo.consul.io
+url = http://localhost:8500
+
+# suffix added to each service to create a group name e.g Service of 'redis' and
+# a suffix of '_servers' will add each address to the group name 'redis_servers'
+servers_suffix = _servers
+
+#
+# By default, final JSON is built based on all available info in consul.
+# Suffixes means that services groups will be added in addition to basic information. See servers_suffix for additional info
+# There are cases when speed is preferable than having services groups
+# False value will reduce script execution time drastically.
+#
+suffixes = true
+
+# if specified then the inventory will generate domain names that will resolve
+# via Consul's inbuilt DNS.
+#domain=consul
+
+# make groups from service tags. the name of the group is derived from the
+# service name and the tag name e.g. a service named nginx with tags ['master', 'v1']
+# will create groups nginx_master and nginx_v1
+tags = true
+
+# looks up the node name at the given path for a list of groups to which the
+# node should be added.
+kv_groups=ansible/groups
+
+# looks up the node name at the given path for a json dictionary of metadata that
+# should be attached as metadata for the node
+kv_metadata=ansible/metadata
+
+# looks up the health of each service and adds the node to 'up' and 'down' groups
+# based on the service availability
+#
+# !!!! if availability is true, suffixes also must be true. !!!!
+#
+availability = true
+available_suffix = _up
+unavailable_suffix = _down
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/consul_io.py b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/consul_io.py
new file mode 100644
index 00000000..00832241
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/consul_io.py
@@ -0,0 +1,527 @@
+#!/usr/bin/env python
+
+#
+# (c) 2015, Steve Gargan <steve.gargan@gmail.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+######################################################################
+
+'''
+Consul.io inventory script (http://consul.io)
+======================================
+
+Generates Ansible inventory from nodes in a Consul cluster. This script will
+group nodes by:
+ - datacenter,
+ - registered service
+ - service tags
+ - service status
+ - values from the k/v store
+
+This script can be run with the switches
+--list as expected groups all the nodes in all datacenters
+--datacenter, to restrict the nodes to a single datacenter
+--host to restrict the inventory to a single named node. (requires datacenter config)
+
+The configuration for this plugin is read from a consul_io.ini file located in the
+same directory as this inventory script. All config options in the config file
+are optional except the host and port, which must point to a valid agent or
+server running the http api. For more information on enabling the endpoint see.
+
+http://www.consul.io/docs/agent/options.html
+
+Other options include:
+
+'datacenter':
+
+which restricts the included nodes to those from the given datacenter
+This can also be set with the environmental variable CONSUL_DATACENTER
+
+'url':
+
+the URL of the Consul cluster. host, port and scheme are derived from the
+URL. If not specified, connection configuration defaults to http requests
+to localhost on port 8500.
+This can also be set with the environmental variable CONSUL_URL
+
+'domain':
+
+if specified then the inventory will generate domain names that will resolve
+via Consul's inbuilt DNS. The name is derived from the node name, datacenter
+and domain <node_name>.node.<datacenter>.<domain>. Note that you will need to
+have consul hooked into your DNS server for these to resolve. See the consul
+DNS docs for more info.
+
+which restricts the included nodes to those from the given datacenter
+
+'servers_suffix':
+
+defining the a suffix to add to the service name when creating the service
+group. e.g Service name of 'redis' and a suffix of '_servers' will add
+each nodes address to the group name 'redis_servers'. No suffix is added
+if this is not set
+
+'tags':
+
+boolean flag defining if service tags should be used to create Inventory
+groups e.g. an nginx service with the tags ['master', 'v1'] will create
+groups nginx_master and nginx_v1 to which the node running the service
+will be added. No tag groups are created if this is missing.
+
+'token':
+
+ACL token to use to authorize access to the key value store. May be required
+to retrieve the kv_groups and kv_metadata based on your consul configuration.
+
+'kv_groups':
+
+This is used to lookup groups for a node in the key value store. It specifies a
+path to which each discovered node's name will be added to create a key to query
+the key/value store. There it expects to find a comma separated list of group
+names to which the node should be added e.g. if the inventory contains node
+'nyc-web-1' in datacenter 'nyc-dc1' and kv_groups = 'ansible/groups' then the key
+'ansible/groups/nyc-dc1/nyc-web-1' will be queried for a group list. If this query
+ returned 'test,honeypot' then the node address to both groups.
+
+'kv_metadata':
+
+kv_metadata is used to lookup metadata for each discovered node. Like kv_groups
+above it is used to build a path to lookup in the kv store where it expects to
+find a json dictionary of metadata entries. If found, each key/value pair in the
+dictionary is added to the metadata for the node. eg node 'nyc-web-1' in datacenter
+'nyc-dc1' and kv_metadata = 'ansible/metadata', then the key
+'ansible/metadata/nyc-dc1/nyc-web-1' should contain '{"databse": "postgres"}'
+
+'availability':
+
+if true then availability groups will be created for each service. The node will
+be added to one of the groups based on the health status of the service. The
+group name is derived from the service name and the configurable availability
+suffixes
+
+'available_suffix':
+
+suffix that should be appended to the service availability groups for available
+services e.g. if the suffix is '_up' and the service is nginx, then nodes with
+healthy nginx services will be added to the nginix_up group. Defaults to
+'_available'
+
+'unavailable_suffix':
+
+as above but for unhealthy services, defaults to '_unavailable'
+
+Note that if the inventory discovers an 'ssh' service running on a node it will
+register the port as ansible_ssh_port in the node's metadata and this port will
+be used to access the machine.
+```
+
+'''
+
+import os
+import re
+import argparse
+import sys
+
+from ansible.module_utils.six.moves import configparser
+
+
+def get_log_filename():
+ tty_filename = '/dev/tty'
+ stdout_filename = '/dev/stdout'
+
+ if not os.path.exists(tty_filename):
+ return stdout_filename
+ if not os.access(tty_filename, os.W_OK):
+ return stdout_filename
+ if os.getenv('TEAMCITY_VERSION'):
+ return stdout_filename
+
+ return tty_filename
+
+
+def setup_logging():
+ filename = get_log_filename()
+
+ import logging.config
+ logging.config.dictConfig({
+ 'version': 1,
+ 'formatters': {
+ 'simple': {
+ 'format': '%(asctime)s - %(name)s - %(levelname)s - %(message)s',
+ },
+ },
+ 'root': {
+ 'level': os.getenv('ANSIBLE_INVENTORY_CONSUL_IO_LOG_LEVEL', 'WARN'),
+ 'handlers': ['console'],
+ },
+ 'handlers': {
+ 'console': {
+ 'class': 'logging.FileHandler',
+ 'filename': filename,
+ 'formatter': 'simple',
+ },
+ },
+ 'loggers': {
+ 'iso8601': {
+ 'qualname': 'iso8601',
+ 'level': 'INFO',
+ },
+ },
+ })
+ logger = logging.getLogger('consul_io.py')
+ logger.debug('Invoked with %r', sys.argv)
+
+
+if os.getenv('ANSIBLE_INVENTORY_CONSUL_IO_LOG_ENABLED'):
+ setup_logging()
+
+
+import json
+
+try:
+ import consul
+except ImportError as e:
+ sys.exit("""failed=True msg='python-consul required for this module.
+See https://python-consul.readthedocs.io/en/latest/#installation'""")
+
+from ansible.module_utils.six import iteritems
+
+
+class ConsulInventory(object):
+
+ def __init__(self):
+ ''' Create an inventory based on the catalog of nodes and services
+ registered in a consul cluster'''
+ self.node_metadata = {}
+ self.nodes = {}
+ self.nodes_by_service = {}
+ self.nodes_by_tag = {}
+ self.nodes_by_datacenter = {}
+ self.nodes_by_kv = {}
+ self.nodes_by_availability = {}
+ self.current_dc = None
+ self.inmemory_kv = []
+ self.inmemory_nodes = []
+
+ config = ConsulConfig()
+ self.config = config
+
+ self.consul_api = config.get_consul_api()
+
+ if config.has_config('datacenter'):
+ if config.has_config('host'):
+ self.load_data_for_node(config.host, config.datacenter)
+ else:
+ self.load_data_for_datacenter(config.datacenter)
+ else:
+ self.load_all_data_consul()
+
+ self.combine_all_results()
+ print(json.dumps(self.inventory, sort_keys=True, indent=2))
+
+ def bulk_load(self, datacenter):
+ index, groups_list = self.consul_api.kv.get(self.config.kv_groups, recurse=True, dc=datacenter)
+ index, metadata_list = self.consul_api.kv.get(self.config.kv_metadata, recurse=True, dc=datacenter)
+ index, nodes = self.consul_api.catalog.nodes(dc=datacenter)
+ self.inmemory_kv += groups_list
+ self.inmemory_kv += metadata_list
+ self.inmemory_nodes += nodes
+
+ def load_all_data_consul(self):
+ ''' cycle through each of the datacenters in the consul catalog and process
+ the nodes in each '''
+ self.datacenters = self.consul_api.catalog.datacenters()
+ for datacenter in self.datacenters:
+ self.current_dc = datacenter
+ self.bulk_load(datacenter)
+ self.load_data_for_datacenter(datacenter)
+
+ def load_availability_groups(self, node, datacenter):
+ '''check the health of each service on a node and add the node to either
+ an 'available' or 'unavailable' grouping. The suffix for each group can be
+ controlled from the config'''
+ if self.config.has_config('availability'):
+ for service_name, service in iteritems(node['Services']):
+ for node in self.consul_api.health.service(service_name)[1]:
+ if self.is_service_available(node, service_name):
+ suffix = self.config.get_availability_suffix(
+ 'available_suffix', '_available')
+ else:
+ suffix = self.config.get_availability_suffix(
+ 'unavailable_suffix', '_unavailable')
+ self.add_node_to_map(self.nodes_by_availability,
+ service_name + suffix, node['Node'])
+
+ def is_service_available(self, node, service_name):
+ '''check the availability of the service on the node beside ensuring the
+ availability of the node itself'''
+ consul_ok = service_ok = False
+ for check in node['Checks']:
+ if check['CheckID'] == 'serfHealth':
+ consul_ok = check['Status'] == 'passing'
+ elif check['ServiceName'] == service_name:
+ service_ok = check['Status'] == 'passing'
+ return consul_ok and service_ok
+
+ def consul_get_kv_inmemory(self, key):
+ result = filter(lambda x: x['Key'] == key, self.inmemory_kv)
+ return result.pop() if result else None
+
+ def consul_get_node_inmemory(self, node):
+ result = filter(lambda x: x['Node'] == node, self.inmemory_nodes)
+ return {"Node": result.pop(), "Services": {}} if result else None
+
+ def load_data_for_datacenter(self, datacenter):
+ '''processes all the nodes in a particular datacenter'''
+ if self.config.bulk_load == 'true':
+ nodes = self.inmemory_nodes
+ else:
+ index, nodes = self.consul_api.catalog.nodes(dc=datacenter)
+ for node in nodes:
+ self.add_node_to_map(self.nodes_by_datacenter, datacenter, node)
+ self.load_data_for_node(node['Node'], datacenter)
+
+ def load_data_for_node(self, node, datacenter):
+ '''loads the data for a single node adding it to various groups based on
+ metadata retrieved from the kv store and service availability'''
+
+ if self.config.suffixes == 'true':
+ index, node_data = self.consul_api.catalog.node(node, dc=datacenter)
+ else:
+ node_data = self.consul_get_node_inmemory(node)
+ node = node_data['Node']
+
+ self.add_node_to_map(self.nodes, 'all', node)
+ self.add_metadata(node_data, "consul_datacenter", datacenter)
+ self.add_metadata(node_data, "consul_nodename", node['Node'])
+
+ self.load_groups_from_kv(node_data)
+ self.load_node_metadata_from_kv(node_data)
+ if self.config.suffixes == 'true':
+ self.load_availability_groups(node_data, datacenter)
+ for name, service in node_data['Services'].items():
+ self.load_data_from_service(name, service, node_data)
+
+ def load_node_metadata_from_kv(self, node_data):
+ ''' load the json dict at the metadata path defined by the kv_metadata value
+ and the node name add each entry in the dictionary to the node's
+ metadata '''
+ node = node_data['Node']
+ if self.config.has_config('kv_metadata'):
+ key = "%s/%s/%s" % (self.config.kv_metadata, self.current_dc, node['Node'])
+ if self.config.bulk_load == 'true':
+ metadata = self.consul_get_kv_inmemory(key)
+ else:
+ index, metadata = self.consul_api.kv.get(key)
+ if metadata and metadata['Value']:
+ try:
+ metadata = json.loads(metadata['Value'])
+ for k, v in metadata.items():
+ self.add_metadata(node_data, k, v)
+ except Exception:
+ pass
+
+ def load_groups_from_kv(self, node_data):
+ ''' load the comma separated list of groups at the path defined by the
+ kv_groups config value and the node name add the node address to each
+ group found '''
+ node = node_data['Node']
+ if self.config.has_config('kv_groups'):
+ key = "%s/%s/%s" % (self.config.kv_groups, self.current_dc, node['Node'])
+ if self.config.bulk_load == 'true':
+ groups = self.consul_get_kv_inmemory(key)
+ else:
+ index, groups = self.consul_api.kv.get(key)
+ if groups and groups['Value']:
+ for group in groups['Value'].split(','):
+ self.add_node_to_map(self.nodes_by_kv, group.strip(), node)
+
+ def load_data_from_service(self, service_name, service, node_data):
+ '''process a service registered on a node, adding the node to a group with
+ the service name. Each service tag is extracted and the node is added to a
+ tag grouping also'''
+ self.add_metadata(node_data, "consul_services", service_name, True)
+
+ if self.is_service("ssh", service_name):
+ self.add_metadata(node_data, "ansible_ssh_port", service['Port'])
+
+ if self.config.has_config('servers_suffix'):
+ service_name = service_name + self.config.servers_suffix
+
+ self.add_node_to_map(self.nodes_by_service, service_name, node_data['Node'])
+ self.extract_groups_from_tags(service_name, service, node_data)
+
+ def is_service(self, target, name):
+ return name and (name.lower() == target.lower())
+
+ def extract_groups_from_tags(self, service_name, service, node_data):
+ '''iterates each service tag and adds the node to groups derived from the
+ service and tag names e.g. nginx_master'''
+ if self.config.has_config('tags') and service['Tags']:
+ tags = service['Tags']
+ self.add_metadata(node_data, "consul_%s_tags" % service_name, tags)
+ for tag in service['Tags']:
+ tagname = service_name + '_' + tag
+ self.add_node_to_map(self.nodes_by_tag, tagname, node_data['Node'])
+
+ def combine_all_results(self):
+ '''prunes and sorts all groupings for combination into the final map'''
+ self.inventory = {"_meta": {"hostvars": self.node_metadata}}
+ groupings = [self.nodes, self.nodes_by_datacenter, self.nodes_by_service,
+ self.nodes_by_tag, self.nodes_by_kv, self.nodes_by_availability]
+ for grouping in groupings:
+ for name, addresses in grouping.items():
+ self.inventory[name] = sorted(list(set(addresses)))
+
+ def add_metadata(self, node_data, key, value, is_list=False):
+ ''' Pushed an element onto a metadata dict for the node, creating
+ the dict if it doesn't exist '''
+ key = self.to_safe(key)
+ node = self.get_inventory_name(node_data['Node'])
+
+ if node in self.node_metadata:
+ metadata = self.node_metadata[node]
+ else:
+ metadata = {}
+ self.node_metadata[node] = metadata
+ if is_list:
+ self.push(metadata, key, value)
+ else:
+ metadata[key] = value
+
+ def get_inventory_name(self, node_data):
+ '''return the ip or a node name that can be looked up in consul's dns'''
+ domain = self.config.domain
+ if domain:
+ node_name = node_data['Node']
+ if self.current_dc:
+ return '%s.node.%s.%s' % (node_name, self.current_dc, domain)
+ else:
+ return '%s.node.%s' % (node_name, domain)
+ else:
+ return node_data['Address']
+
+ def add_node_to_map(self, map, name, node):
+ self.push(map, name, self.get_inventory_name(node))
+
+ def push(self, my_dict, key, element):
+ ''' Pushed an element onto an array that may not have been defined in the
+ dict '''
+ key = self.to_safe(key)
+ if key in my_dict:
+ my_dict[key].append(element)
+ else:
+ my_dict[key] = [element]
+
+ def to_safe(self, word):
+ ''' Converts 'bad' characters in a string to underscores so they can be used
+ as Ansible groups '''
+ return re.sub(r'[^A-Za-z0-9\-\.]', '_', word)
+
+ def sanitize_dict(self, d):
+
+ new_dict = {}
+ for k, v in d.items():
+ if v is not None:
+ new_dict[self.to_safe(str(k))] = self.to_safe(str(v))
+ return new_dict
+
+ def sanitize_list(self, seq):
+ new_seq = []
+ for d in seq:
+ new_seq.append(self.sanitize_dict(d))
+ return new_seq
+
+
+class ConsulConfig(dict):
+
+ def __init__(self):
+ self.read_settings()
+ self.read_cli_args()
+ self.read_env_vars()
+
+ def has_config(self, name):
+ if hasattr(self, name):
+ return getattr(self, name)
+ else:
+ return False
+
+ def read_settings(self):
+ ''' Reads the settings from the consul_io.ini file (or consul.ini for backwards compatibility)'''
+ config = configparser.SafeConfigParser()
+ if os.path.isfile(os.path.dirname(os.path.realpath(__file__)) + '/consul_io.ini'):
+ config.read(os.path.dirname(os.path.realpath(__file__)) + '/consul_io.ini')
+ else:
+ config.read(os.path.dirname(os.path.realpath(__file__)) + '/consul.ini')
+
+ config_options = ['host', 'token', 'datacenter', 'servers_suffix',
+ 'tags', 'kv_metadata', 'kv_groups', 'availability',
+ 'unavailable_suffix', 'available_suffix', 'url',
+ 'domain', 'suffixes', 'bulk_load']
+ for option in config_options:
+ value = None
+ if config.has_option('consul', option):
+ value = config.get('consul', option).lower()
+ setattr(self, option, value)
+
+ def read_cli_args(self):
+ ''' Command line argument processing '''
+ parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based nodes in a Consul cluster')
+
+ parser.add_argument('--list', action='store_true',
+ help='Get all inventory variables from all nodes in the consul cluster')
+ parser.add_argument('--host', action='store',
+ help='Get all inventory variables about a specific consul node,'
+ 'requires datacenter set in consul.ini.')
+ parser.add_argument('--datacenter', action='store',
+ help='Get all inventory about a specific consul datacenter')
+
+ args = parser.parse_args()
+ arg_names = ['host', 'datacenter']
+
+ for arg in arg_names:
+ if getattr(args, arg):
+ setattr(self, arg, getattr(args, arg))
+
+ def read_env_vars(self):
+ env_var_options = ['datacenter', 'url']
+ for option in env_var_options:
+ value = None
+ env_var = 'CONSUL_' + option.upper()
+ if os.environ.get(env_var):
+ setattr(self, option, os.environ.get(env_var))
+
+ def get_availability_suffix(self, suffix, default):
+ if self.has_config(suffix):
+ return self.has_config(suffix)
+ return default
+
+ def get_consul_api(self):
+ '''get an instance of the api based on the supplied configuration'''
+ host = 'localhost'
+ port = 8500
+ token = None
+ scheme = 'http'
+
+ if hasattr(self, 'url'):
+ from ansible.module_utils.six.moves.urllib.parse import urlparse
+ o = urlparse(self.url)
+ if o.hostname:
+ host = o.hostname
+ if o.port:
+ port = o.port
+ if o.scheme:
+ scheme = o.scheme
+
+ if hasattr(self, 'token'):
+ token = self.token
+ if not token:
+ token = 'anonymous'
+ return consul.Consul(host=host, port=port, token=token, scheme=scheme)
+
+
+ConsulInventory()
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/docker.py b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/docker.py
new file mode 100644
index 00000000..b029d1f5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/docker.py
@@ -0,0 +1,892 @@
+#!/usr/bin/env python
+#
+# (c) 2016 Paul Durivage <paul.durivage@gmail.com>
+# Chris Houseknecht <house@redhat.com>
+# James Tanner <jtanner@redhat.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+Docker Inventory Script
+=======================
+The inventory script generates dynamic inventory by making API requests to one or more Docker APIs. It's dynamic
+because the inventory is generated at run-time rather than being read from a static file. The script generates the
+inventory by connecting to one or many Docker APIs and inspecting the containers it finds at each API. Which APIs the
+script contacts can be defined using environment variables or a configuration file.
+
+Requirements
+------------
+
+Using the docker modules requires having docker-py <https://docker-py.readthedocs.io/en/stable/>
+installed on the host running Ansible. To install docker-py:
+
+ pip install docker-py
+
+
+Run for Specific Host
+---------------------
+When run for a specific container using the --host option this script returns the following hostvars:
+
+{
+ "ansible_ssh_host": "",
+ "ansible_ssh_port": 0,
+ "docker_apparmorprofile": "",
+ "docker_args": [],
+ "docker_config": {
+ "AttachStderr": false,
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "Cmd": [
+ "/hello"
+ ],
+ "Domainname": "",
+ "Entrypoint": null,
+ "Env": null,
+ "Hostname": "9f2f80b0a702",
+ "Image": "hello-world",
+ "Labels": {},
+ "OnBuild": null,
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Tty": false,
+ "User": "",
+ "Volumes": null,
+ "WorkingDir": ""
+ },
+ "docker_created": "2016-04-18T02:05:59.659599249Z",
+ "docker_driver": "aufs",
+ "docker_execdriver": "native-0.2",
+ "docker_execids": null,
+ "docker_graphdriver": {
+ "Data": null,
+ "Name": "aufs"
+ },
+ "docker_hostconfig": {
+ "Binds": null,
+ "BlkioWeight": 0,
+ "CapAdd": null,
+ "CapDrop": null,
+ "CgroupParent": "",
+ "ConsoleSize": [
+ 0,
+ 0
+ ],
+ "ContainerIDFile": "",
+ "CpuPeriod": 0,
+ "CpuQuota": 0,
+ "CpuShares": 0,
+ "CpusetCpus": "",
+ "CpusetMems": "",
+ "Devices": null,
+ "Dns": null,
+ "DnsOptions": null,
+ "DnsSearch": null,
+ "ExtraHosts": null,
+ "GroupAdd": null,
+ "IpcMode": "",
+ "KernelMemory": 0,
+ "Links": null,
+ "LogConfig": {
+ "Config": {},
+ "Type": "json-file"
+ },
+ "LxcConf": null,
+ "Memory": 0,
+ "MemoryReservation": 0,
+ "MemorySwap": 0,
+ "MemorySwappiness": null,
+ "NetworkMode": "default",
+ "OomKillDisable": false,
+ "PidMode": "host",
+ "PortBindings": null,
+ "Privileged": false,
+ "PublishAllPorts": false,
+ "ReadonlyRootfs": false,
+ "RestartPolicy": {
+ "MaximumRetryCount": 0,
+ "Name": ""
+ },
+ "SecurityOpt": [
+ "label:disable"
+ ],
+ "UTSMode": "",
+ "Ulimits": null,
+ "VolumeDriver": "",
+ "VolumesFrom": null
+ },
+ "docker_hostnamepath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/hostname",
+ "docker_hostspath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/hosts",
+ "docker_id": "9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14",
+ "docker_image": "0a6ba66e537a53a5ea94f7c6a99c534c6adb12e3ed09326d4bf3b38f7c3ba4e7",
+ "docker_logpath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/9f2f80b0a702361d1ac432e6a-json.log",
+ "docker_mountlabel": "",
+ "docker_mounts": [],
+ "docker_name": "/hello-world",
+ "docker_networksettings": {
+ "Bridge": "",
+ "EndpointID": "",
+ "Gateway": "",
+ "GlobalIPv6Address": "",
+ "GlobalIPv6PrefixLen": 0,
+ "HairpinMode": false,
+ "IPAddress": "",
+ "IPPrefixLen": 0,
+ "IPv6Gateway": "",
+ "LinkLocalIPv6Address": "",
+ "LinkLocalIPv6PrefixLen": 0,
+ "MacAddress": "",
+ "Networks": {
+ "bridge": {
+ "EndpointID": "",
+ "Gateway": "",
+ "GlobalIPv6Address": "",
+ "GlobalIPv6PrefixLen": 0,
+ "IPAddress": "",
+ "IPPrefixLen": 0,
+ "IPv6Gateway": "",
+ "MacAddress": ""
+ }
+ },
+ "Ports": null,
+ "SandboxID": "",
+ "SandboxKey": "",
+ "SecondaryIPAddresses": null,
+ "SecondaryIPv6Addresses": null
+ },
+ "docker_path": "/hello",
+ "docker_processlabel": "",
+ "docker_resolvconfpath": "/mnt/sda1/var/lib/docker/containers/9f2f80b0a702361d1ac432e6af816c19bda46da15c21264fb418c873de635a14/resolv.conf",
+ "docker_restartcount": 0,
+ "docker_short_id": "9f2f80b0a7023",
+ "docker_state": {
+ "Dead": false,
+ "Error": "",
+ "ExitCode": 0,
+ "FinishedAt": "2016-04-18T02:06:00.296619369Z",
+ "OOMKilled": false,
+ "Paused": false,
+ "Pid": 0,
+ "Restarting": false,
+ "Running": false,
+ "StartedAt": "2016-04-18T02:06:00.272065041Z",
+ "Status": "exited"
+ }
+}
+
+Groups
+------
+When run in --list mode (the default), container instances are grouped by:
+
+ - container id
+ - container name
+ - container short id
+ - image_name (image_<image name>)
+ - stack_name (stack_<stack name>)
+ - service_name (service_<service name>)
+ - docker_host
+ - running
+ - stopped
+
+
+Configuration:
+--------------
+You can control the behavior of the inventory script by passing arguments, defining environment variables, or
+creating a configuration file named docker.yml (sample provided in ansible/contrib/inventory). The order of precedence
+is command line args, then the docker.yml file and finally environment variables.
+
+Environment variables:
+......................
+
+To connect to a single Docker API the following variables can be defined in the environment to control the connection
+options. These are the same environment variables used by the Docker modules.
+
+ DOCKER_HOST
+ The URL or Unix socket path used to connect to the Docker API. Defaults to unix://var/run/docker.sock.
+
+ DOCKER_API_VERSION:
+ The version of the Docker API running on the Docker Host. Defaults to the latest version of the API supported
+ by docker-py.
+
+ DOCKER_TIMEOUT:
+ The maximum amount of time in seconds to wait on a response fromm the API. Defaults to 60 seconds.
+
+ DOCKER_TLS:
+ Secure the connection to the API by using TLS without verifying the authenticity of the Docker host server.
+ Defaults to False.
+
+ DOCKER_TLS_VERIFY:
+ Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
+ Default is False
+
+ DOCKER_TLS_HOSTNAME:
+ When verifying the authenticity of the Docker Host server, provide the expected name of the server. Defaults
+ to localhost.
+
+ DOCKER_CERT_PATH:
+ Path to the directory containing the client certificate, client key and CA certificate.
+
+ DOCKER_SSL_VERSION:
+ Provide a valid SSL version number. Default value determined by docker-py, which at the time of this writing
+ was 1.0
+
+In addition to the connection variables there are a couple variables used to control the execution and output of the
+script:
+
+ DOCKER_CONFIG_FILE
+ Path to the configuration file. Defaults to ./docker.yml.
+
+ DOCKER_PRIVATE_SSH_PORT:
+ The private port (container port) on which SSH is listening for connections. Defaults to 22.
+
+ DOCKER_DEFAULT_IP:
+ The IP address to assign to ansible_host when the container's SSH port is mapped to interface '0.0.0.0'.
+
+
+Configuration File
+..................
+
+Using a configuration file provides a means for defining a set of Docker APIs from which to build an inventory.
+
+The default name of the file is derived from the name of the inventory script. By default the script will look for
+basename of the script (i.e. docker) with an extension of '.yml'.
+
+You can also override the default name of the script by defining DOCKER_CONFIG_FILE in the environment.
+
+Here's what you can define in docker_inventory.yml:
+
+ defaults
+ Defines a default connection. Defaults will be taken from this and applied to any values not provided
+ for a host defined in the hosts list.
+
+ hosts
+ If you wish to get inventory from more than one Docker host, define a hosts list.
+
+For the default host and each host in the hosts list define the following attributes:
+
+ host:
+ description: The URL or Unix socket path used to connect to the Docker API.
+ required: yes
+
+ tls:
+ description: Connect using TLS without verifying the authenticity of the Docker host server.
+ default: false
+ required: false
+
+ tls_verify:
+ description: Connect using TLS without verifying the authenticity of the Docker host server.
+ default: false
+ required: false
+
+ cert_path:
+ description: Path to the client's TLS certificate file.
+ default: null
+ required: false
+
+ cacert_path:
+ description: Use a CA certificate when performing server verification by providing the path to a CA certificate file.
+ default: null
+ required: false
+
+ key_path:
+ description: Path to the client's TLS key file.
+ default: null
+ required: false
+
+ version:
+ description: The Docker API version.
+ required: false
+ default: will be supplied by the docker-py module.
+
+ timeout:
+ description: The amount of time in seconds to wait on an API response.
+ required: false
+ default: 60
+
+ default_ip:
+ description: The IP address to assign to ansible_host when the container's SSH port is mapped to interface
+ '0.0.0.0'.
+ required: false
+ default: 127.0.0.1
+
+ private_ssh_port:
+ description: The port containers use for SSH
+ required: false
+ default: 22
+
+Examples
+--------
+
+# Connect to the Docker API on localhost port 4243 and format the JSON output
+DOCKER_HOST=tcp://localhost:4243 ./docker.py --pretty
+
+# Any container's ssh port exposed on 0.0.0.0 will be mapped to
+# another IP address (where Ansible will attempt to connect via SSH)
+DOCKER_DEFAULT_IP=1.2.3.4 ./docker.py --pretty
+
+# Run as input to a playbook:
+ansible-playbook -i ~/projects/ansible/contrib/inventory/docker.py docker_inventory_test.yml
+
+# Simple playbook to invoke with the above example:
+
+ - name: Test docker_inventory
+ hosts: all
+ connection: local
+ gather_facts: no
+ tasks:
+ - debug: msg="Container - {{ inventory_hostname }}"
+
+'''
+
+import os
+import sys
+import json
+import argparse
+import re
+import yaml
+
+from collections import defaultdict
+# Manipulation of the path is needed because the docker-py
+# module is imported by the name docker, and because this file
+# is also named docker
+for path in [os.getcwd(), '', os.path.dirname(os.path.abspath(__file__))]:
+ try:
+ del sys.path[sys.path.index(path)]
+ except Exception:
+ pass
+
+HAS_DOCKER_PY = True
+HAS_DOCKER_ERROR = False
+
+try:
+ from docker.errors import APIError, TLSParameterError
+ from docker.tls import TLSConfig
+ from docker.constants import DEFAULT_TIMEOUT_SECONDS, DEFAULT_DOCKER_API_VERSION
+except ImportError as exc:
+ HAS_DOCKER_ERROR = str(exc)
+ HAS_DOCKER_PY = False
+
+# Client has recently been split into DockerClient and APIClient
+try:
+ from docker import Client
+except ImportError as dummy:
+ try:
+ from docker import APIClient as Client
+ except ImportError as exc:
+ HAS_DOCKER_ERROR = str(exc)
+ HAS_DOCKER_PY = False
+
+ class Client:
+ pass
+
+DEFAULT_DOCKER_CONFIG_FILE = os.path.splitext(os.path.basename(__file__))[0] + '.yml'
+DEFAULT_DOCKER_HOST = 'unix://var/run/docker.sock'
+DEFAULT_TLS = False
+DEFAULT_TLS_VERIFY = False
+DEFAULT_TLS_HOSTNAME = "localhost"
+DEFAULT_IP = '127.0.0.1'
+DEFAULT_SSH_PORT = '22'
+
+BOOLEANS_TRUE = ['yes', 'on', '1', 'true', 1, True]
+BOOLEANS_FALSE = ['no', 'off', '0', 'false', 0, False]
+
+
+DOCKER_ENV_ARGS = dict(
+ config_file='DOCKER_CONFIG_FILE',
+ docker_host='DOCKER_HOST',
+ api_version='DOCKER_API_VERSION',
+ cert_path='DOCKER_CERT_PATH',
+ ssl_version='DOCKER_SSL_VERSION',
+ tls='DOCKER_TLS',
+ tls_verify='DOCKER_TLS_VERIFY',
+ tls_hostname='DOCKER_TLS_HOSTNAME',
+ timeout='DOCKER_TIMEOUT',
+ private_ssh_port='DOCKER_DEFAULT_SSH_PORT',
+ default_ip='DOCKER_DEFAULT_IP',
+)
+
+
+def fail(msg):
+ sys.stderr.write("%s\n" % msg)
+ sys.exit(1)
+
+
+def log(msg, pretty_print=False):
+ if pretty_print:
+ print(json.dumps(msg, sort_keys=True, indent=2))
+ else:
+ print(msg + u'\n')
+
+
+class AnsibleDockerClient(Client):
+ def __init__(self, auth_params, debug):
+
+ self.auth_params = auth_params
+ self.debug = debug
+ self._connect_params = self._get_connect_params()
+
+ try:
+ super(AnsibleDockerClient, self).__init__(**self._connect_params)
+ except APIError as exc:
+ self.fail("Docker API error: %s" % exc)
+ except Exception as exc:
+ self.fail("Error connecting: %s" % exc)
+
+ def fail(self, msg):
+ fail(msg)
+
+ def log(self, msg, pretty_print=False):
+ if self.debug:
+ log(msg, pretty_print)
+
+ def _get_tls_config(self, **kwargs):
+ self.log("get_tls_config:")
+ for key in kwargs:
+ self.log(" %s: %s" % (key, kwargs[key]))
+ try:
+ tls_config = TLSConfig(**kwargs)
+ return tls_config
+ except TLSParameterError as exc:
+ self.fail("TLS config error: %s" % exc)
+
+ def _get_connect_params(self):
+ auth = self.auth_params
+
+ self.log("auth params:")
+ for key in auth:
+ self.log(" %s: %s" % (key, auth[key]))
+
+ if auth['tls'] or auth['tls_verify']:
+ auth['docker_host'] = auth['docker_host'].replace('tcp://', 'https://')
+
+ if auth['tls'] and auth['cert_path'] and auth['key_path']:
+ # TLS with certs and no host verification
+ tls_config = self._get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
+ verify=False,
+ ssl_version=auth['ssl_version'])
+ return dict(base_url=auth['docker_host'],
+ tls=tls_config,
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+ if auth['tls']:
+ # TLS with no certs and not host verification
+ tls_config = self._get_tls_config(verify=False,
+ ssl_version=auth['ssl_version'])
+ return dict(base_url=auth['docker_host'],
+ tls=tls_config,
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+ if auth['tls_verify'] and auth['cert_path'] and auth['key_path']:
+ # TLS with certs and host verification
+ if auth['cacert_path']:
+ tls_config = self._get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
+ ca_cert=auth['cacert_path'],
+ verify=True,
+ assert_hostname=auth['tls_hostname'],
+ ssl_version=auth['ssl_version'])
+ else:
+ tls_config = self._get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
+ verify=True,
+ assert_hostname=auth['tls_hostname'],
+ ssl_version=auth['ssl_version'])
+
+ return dict(base_url=auth['docker_host'],
+ tls=tls_config,
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+ if auth['tls_verify'] and auth['cacert_path']:
+ # TLS with cacert only
+ tls_config = self._get_tls_config(ca_cert=auth['cacert_path'],
+ assert_hostname=auth['tls_hostname'],
+ verify=True,
+ ssl_version=auth['ssl_version'])
+ return dict(base_url=auth['docker_host'],
+ tls=tls_config,
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+ if auth['tls_verify']:
+ # TLS with verify and no certs
+ tls_config = self._get_tls_config(verify=True,
+ assert_hostname=auth['tls_hostname'],
+ ssl_version=auth['ssl_version'])
+ return dict(base_url=auth['docker_host'],
+ tls=tls_config,
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+ # No TLS
+ return dict(base_url=auth['docker_host'],
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+ def _handle_ssl_error(self, error):
+ match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error))
+ if match:
+ msg = "You asked for verification that Docker host name matches %s. The actual hostname is %s. " \
+ "Most likely you need to set DOCKER_TLS_HOSTNAME or pass tls_hostname with a value of %s. " \
+ "You may also use TLS without verification by setting the tls parameter to true." \
+ % (self.auth_params['tls_hostname'], match.group(1), match.group(1))
+ self.fail(msg)
+ self.fail("SSL Exception: %s" % (error))
+
+
+class EnvArgs(object):
+ def __init__(self):
+ self.config_file = None
+ self.docker_host = None
+ self.api_version = None
+ self.cert_path = None
+ self.ssl_version = None
+ self.tls = None
+ self.tls_verify = None
+ self.tls_hostname = None
+ self.timeout = None
+ self.default_ssh_port = None
+ self.default_ip = None
+
+
+class DockerInventory(object):
+
+ def __init__(self):
+ self._args = self._parse_cli_args()
+ self._env_args = self._parse_env_args()
+ self.groups = defaultdict(list)
+ self.hostvars = defaultdict(dict)
+
+ def run(self):
+ config_from_file = self._parse_config_file()
+ if not config_from_file:
+ config_from_file = dict()
+ docker_hosts = self.get_hosts(config_from_file)
+
+ for host in docker_hosts:
+ client = AnsibleDockerClient(host, self._args.debug)
+ self.get_inventory(client, host)
+
+ if not self._args.host:
+ self.groups['docker_hosts'] = [host.get('docker_host') for host in docker_hosts]
+ self.groups['_meta'] = dict(
+ hostvars=self.hostvars
+ )
+ print(self._json_format_dict(self.groups, pretty_print=self._args.pretty))
+ else:
+ print(self._json_format_dict(self.hostvars.get(self._args.host, dict()), pretty_print=self._args.pretty))
+
+ sys.exit(0)
+
+ def get_inventory(self, client, host):
+
+ ssh_port = host.get('default_ssh_port')
+ default_ip = host.get('default_ip')
+ hostname = host.get('docker_host')
+
+ try:
+ containers = client.containers(all=True)
+ except Exception as exc:
+ self.fail("Error fetching containers for host %s - %s" % (hostname, str(exc)))
+
+ for container in containers:
+ id = container.get('Id')
+ short_id = id[:13]
+
+ try:
+ name = container.get('Names', list()).pop(0).lstrip('/')
+ except IndexError:
+ name = short_id
+
+ if not self._args.host or (self._args.host and self._args.host in [name, id, short_id]):
+ try:
+ inspect = client.inspect_container(id)
+ except Exception as exc:
+ self.fail("Error inspecting container %s - %s" % (name, str(exc)))
+
+ running = inspect.get('State', dict()).get('Running')
+
+ # Add container to groups
+ image_name = inspect.get('Config', dict()).get('Image')
+ if image_name:
+ self.groups["image_%s" % (image_name)].append(name)
+
+ stack_name = inspect.get('Config', dict()).get('Labels', dict()).get('com.docker.stack.namespace')
+ if stack_name:
+ self.groups["stack_%s" % stack_name].append(name)
+
+ service_name = inspect.get('Config', dict()).get('Labels', dict()).get('com.docker.swarm.service.name')
+ if service_name:
+ self.groups["service_%s" % service_name].append(name)
+
+ self.groups[id].append(name)
+ self.groups[name].append(name)
+ if short_id not in self.groups:
+ self.groups[short_id].append(name)
+ self.groups[hostname].append(name)
+
+ if running is True:
+ self.groups['running'].append(name)
+ else:
+ self.groups['stopped'].append(name)
+
+ # Figure ous ssh IP and Port
+ try:
+ # Lookup the public facing port Nat'ed to ssh port.
+ port = client.port(container, ssh_port)[0]
+ except (IndexError, AttributeError, TypeError):
+ port = dict()
+
+ try:
+ ip = default_ip if port['HostIp'] == '0.0.0.0' else port['HostIp']
+ except KeyError:
+ ip = ''
+
+ facts = dict(
+ ansible_ssh_host=ip,
+ ansible_ssh_port=port.get('HostPort', int()),
+ docker_name=name,
+ docker_short_id=short_id
+ )
+
+ for key in inspect:
+ fact_key = self._slugify(key)
+ facts[fact_key] = inspect.get(key)
+
+ self.hostvars[name].update(facts)
+
+ def _slugify(self, value):
+ return 'docker_%s' % (re.sub(r'[^\w-]', '_', value).lower().lstrip('_'))
+
+ def get_hosts(self, config):
+ '''
+ Determine the list of docker hosts we need to talk to.
+
+ :param config: dictionary read from config file. can be empty.
+ :return: list of connection dictionaries
+ '''
+ hosts = list()
+
+ hosts_list = config.get('hosts')
+ defaults = config.get('defaults', dict())
+ self.log('defaults:')
+ self.log(defaults, pretty_print=True)
+ def_host = defaults.get('host')
+ def_tls = defaults.get('tls')
+ def_tls_verify = defaults.get('tls_verify')
+ def_tls_hostname = defaults.get('tls_hostname')
+ def_ssl_version = defaults.get('ssl_version')
+ def_cert_path = defaults.get('cert_path')
+ def_cacert_path = defaults.get('cacert_path')
+ def_key_path = defaults.get('key_path')
+ def_version = defaults.get('version')
+ def_timeout = defaults.get('timeout')
+ def_ip = defaults.get('default_ip')
+ def_ssh_port = defaults.get('private_ssh_port')
+
+ if hosts_list:
+ # use hosts from config file
+ for host in hosts_list:
+ docker_host = host.get('host') or def_host or self._args.docker_host or \
+ self._env_args.docker_host or DEFAULT_DOCKER_HOST
+ api_version = host.get('version') or def_version or self._args.api_version or \
+ self._env_args.api_version or DEFAULT_DOCKER_API_VERSION
+ tls_hostname = host.get('tls_hostname') or def_tls_hostname or self._args.tls_hostname or \
+ self._env_args.tls_hostname or DEFAULT_TLS_HOSTNAME
+ tls_verify = host.get('tls_verify') or def_tls_verify or self._args.tls_verify or \
+ self._env_args.tls_verify or DEFAULT_TLS_VERIFY
+ tls = host.get('tls') or def_tls or self._args.tls or self._env_args.tls or DEFAULT_TLS
+ ssl_version = host.get('ssl_version') or def_ssl_version or self._args.ssl_version or \
+ self._env_args.ssl_version
+
+ cert_path = host.get('cert_path') or def_cert_path or self._args.cert_path or \
+ self._env_args.cert_path
+ if cert_path and cert_path == self._env_args.cert_path:
+ cert_path = os.path.join(cert_path, 'cert.pem')
+
+ cacert_path = host.get('cacert_path') or def_cacert_path or self._args.cacert_path or \
+ self._env_args.cert_path
+ if cacert_path and cacert_path == self._env_args.cert_path:
+ cacert_path = os.path.join(cacert_path, 'ca.pem')
+
+ key_path = host.get('key_path') or def_key_path or self._args.key_path or \
+ self._env_args.cert_path
+ if key_path and key_path == self._env_args.cert_path:
+ key_path = os.path.join(key_path, 'key.pem')
+
+ timeout = host.get('timeout') or def_timeout or self._args.timeout or self._env_args.timeout or \
+ DEFAULT_TIMEOUT_SECONDS
+ default_ip = host.get('default_ip') or def_ip or self._env_args.default_ip or \
+ self._args.default_ip_address or DEFAULT_IP
+ default_ssh_port = host.get('private_ssh_port') or def_ssh_port or self._args.private_ssh_port or \
+ DEFAULT_SSH_PORT
+ host_dict = dict(
+ docker_host=docker_host,
+ api_version=api_version,
+ tls=tls,
+ tls_verify=tls_verify,
+ tls_hostname=tls_hostname,
+ cert_path=cert_path,
+ cacert_path=cacert_path,
+ key_path=key_path,
+ ssl_version=ssl_version,
+ timeout=timeout,
+ default_ip=default_ip,
+ default_ssh_port=default_ssh_port,
+ )
+ hosts.append(host_dict)
+ else:
+ # use default definition
+ docker_host = def_host or self._args.docker_host or self._env_args.docker_host or DEFAULT_DOCKER_HOST
+ api_version = def_version or self._args.api_version or self._env_args.api_version or \
+ DEFAULT_DOCKER_API_VERSION
+ tls_hostname = def_tls_hostname or self._args.tls_hostname or self._env_args.tls_hostname or \
+ DEFAULT_TLS_HOSTNAME
+ tls_verify = def_tls_verify or self._args.tls_verify or self._env_args.tls_verify or DEFAULT_TLS_VERIFY
+ tls = def_tls or self._args.tls or self._env_args.tls or DEFAULT_TLS
+ ssl_version = def_ssl_version or self._args.ssl_version or self._env_args.ssl_version
+
+ cert_path = def_cert_path or self._args.cert_path or self._env_args.cert_path
+ if cert_path and cert_path == self._env_args.cert_path:
+ cert_path = os.path.join(cert_path, 'cert.pem')
+
+ cacert_path = def_cacert_path or self._args.cacert_path or self._env_args.cert_path
+ if cacert_path and cacert_path == self._env_args.cert_path:
+ cacert_path = os.path.join(cacert_path, 'ca.pem')
+
+ key_path = def_key_path or self._args.key_path or self._env_args.cert_path
+ if key_path and key_path == self._env_args.cert_path:
+ key_path = os.path.join(key_path, 'key.pem')
+
+ timeout = def_timeout or self._args.timeout or self._env_args.timeout or DEFAULT_TIMEOUT_SECONDS
+ default_ip = def_ip or self._env_args.default_ip or self._args.default_ip_address or DEFAULT_IP
+ default_ssh_port = def_ssh_port or self._args.private_ssh_port or DEFAULT_SSH_PORT
+ host_dict = dict(
+ docker_host=docker_host,
+ api_version=api_version,
+ tls=tls,
+ tls_verify=tls_verify,
+ tls_hostname=tls_hostname,
+ cert_path=cert_path,
+ cacert_path=cacert_path,
+ key_path=key_path,
+ ssl_version=ssl_version,
+ timeout=timeout,
+ default_ip=default_ip,
+ default_ssh_port=default_ssh_port,
+ )
+ hosts.append(host_dict)
+ self.log("hosts: ")
+ self.log(hosts, pretty_print=True)
+ return hosts
+
+ def _parse_config_file(self):
+ config = dict()
+ config_file = DEFAULT_DOCKER_CONFIG_FILE
+
+ if self._args.config_file:
+ config_file = self._args.config_file
+ elif self._env_args.config_file:
+ config_file = self._env_args.config_file
+
+ config_file = os.path.abspath(config_file)
+
+ if os.path.isfile(config_file):
+ with open(config_file) as f:
+ try:
+ config = yaml.safe_load(f.read())
+ except Exception as exc:
+ self.fail("Error: parsing %s - %s" % (config_file, str(exc)))
+ else:
+ msg = "Error: config file given by {} does not exist - " + config_file
+ if self._args.config_file:
+ self.fail(msg.format('command line argument'))
+ elif self._env_args.config_file:
+ self.fail(msg.format(DOCKER_ENV_ARGS.get('config_file')))
+ else:
+ self.log(msg.format('DEFAULT_DOCKER_CONFIG_FILE'))
+ return config
+
+ def log(self, msg, pretty_print=False):
+ if self._args.debug:
+ log(msg, pretty_print)
+
+ def fail(self, msg):
+ fail(msg)
+
+ def _parse_env_args(self):
+ args = EnvArgs()
+ for key, value in DOCKER_ENV_ARGS.items():
+ if os.environ.get(value):
+ val = os.environ.get(value)
+ if val in BOOLEANS_TRUE:
+ val = True
+ if val in BOOLEANS_FALSE:
+ val = False
+ setattr(args, key, val)
+ return args
+
+ def _parse_cli_args(self):
+ # Parse command line arguments
+
+ parser = argparse.ArgumentParser(
+ description='Return Ansible inventory for one or more Docker hosts.')
+ parser.add_argument('--list', action='store_true', default=True,
+ help='List all containers (default: True)')
+ parser.add_argument('--debug', action='store_true', default=False,
+ help='Send debug messages to STDOUT')
+ parser.add_argument('--host', action='store',
+ help='Only get information for a specific container.')
+ parser.add_argument('--pretty', action='store_true', default=False,
+ help='Pretty print JSON output(default: False)')
+ parser.add_argument('--config-file', action='store', default=None,
+ help="Name of the config file to use. Default is %s" % (DEFAULT_DOCKER_CONFIG_FILE))
+ parser.add_argument('--docker-host', action='store', default=None,
+ help="The base url or Unix sock path to connect to the docker daemon. Defaults to %s"
+ % (DEFAULT_DOCKER_HOST))
+ parser.add_argument('--tls-hostname', action='store', default=None,
+ help="Host name to expect in TLS certs. Defaults to %s" % DEFAULT_TLS_HOSTNAME)
+ parser.add_argument('--api-version', action='store', default=None,
+ help="Docker daemon API version. Defaults to %s" % (DEFAULT_DOCKER_API_VERSION))
+ parser.add_argument('--timeout', action='store', default=None,
+ help="Docker connection timeout in seconds. Defaults to %s"
+ % (DEFAULT_TIMEOUT_SECONDS))
+ parser.add_argument('--cacert-path', action='store', default=None,
+ help="Path to the TLS certificate authority pem file.")
+ parser.add_argument('--cert-path', action='store', default=None,
+ help="Path to the TLS certificate pem file.")
+ parser.add_argument('--key-path', action='store', default=None,
+ help="Path to the TLS encryption key pem file.")
+ parser.add_argument('--ssl-version', action='store', default=None,
+ help="TLS version number")
+ parser.add_argument('--tls', action='store_true', default=None,
+ help="Use TLS. Defaults to %s" % (DEFAULT_TLS))
+ parser.add_argument('--tls-verify', action='store_true', default=None,
+ help="Verify TLS certificates. Defaults to %s" % (DEFAULT_TLS_VERIFY))
+ parser.add_argument('--private-ssh-port', action='store', default=None,
+ help="Default private container SSH Port. Defaults to %s" % (DEFAULT_SSH_PORT))
+ parser.add_argument('--default-ip-address', action='store', default=None,
+ help="Default container SSH IP address. Defaults to %s" % (DEFAULT_IP))
+ return parser.parse_args()
+
+ def _json_format_dict(self, data, pretty_print=False):
+ # format inventory data for output
+ if pretty_print:
+ return json.dumps(data, sort_keys=True, indent=4)
+ else:
+ return json.dumps(data)
+
+
+def main():
+
+ if not HAS_DOCKER_PY:
+ fail("Failed to import docker-py. Try `pip install docker-py` - %s" % (HAS_DOCKER_ERROR))
+
+ DockerInventory().run()
+
+
+main()
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/docker.yml b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/docker.yml
new file mode 100644
index 00000000..97239392
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/docker.yml
@@ -0,0 +1,74 @@
+# This is the configuration file for the Docker inventory script: docker_inventory.py.
+#
+# You can define the following in this file:
+#
+# defaults
+# Defines a default connection. Defaults will be taken from this and applied to any values not provided
+# for a host defined in the hosts list.
+#
+# hosts
+# If you wish to get inventory from more than one Docker host, define a hosts list.
+#
+# For the default host and each host in the hosts list define the following attributes:
+#
+# host:
+# description: The URL or Unix socket path used to connect to the Docker API.
+# required: yes
+#
+# tls:
+# description: Connect using TLS without verifying the authenticity of the Docker host server.
+# default: false
+# required: false
+#
+# tls_verify:
+# description: Connect using TLS without verifying the authenticity of the Docker host server.
+# default: false
+# required: false
+#
+# cert_path:
+# description: Path to the client's TLS certificate file.
+# default: null
+# required: false
+#
+# cacert_path:
+# description: Use a CA certificate when performing server verification by providing the path to a CA certificate file.
+# default: null
+# required: false
+#
+# key_path:
+# description: Path to the client's TLS key file.
+# default: null
+# required: false
+#
+# version:
+# description: The Docker API version.
+# required: false
+# default: will be supplied by the docker-py module.
+#
+# timeout:
+# description: The amount of time in seconds to wait on an API response.
+# required: false
+# default: 60
+#
+# default_ip:
+# description: The IP address to assign to ansible_host when the container's SSH port is mapped to interface
+# '0.0.0.0'.
+# required: false
+# default: 127.0.0.1
+#
+# private_ssh_port:
+# description: The port containers use for SSH
+# required: false
+# default: 22
+
+#defaults:
+# host: unix:///var/run/docker.sock
+# private_ssh_port: 22
+# default_ip: 127.0.0.1
+
+#hosts:
+# - host: tcp://10.45.5.16:4243
+# private_ssh_port: 2022
+# default_ip: 172.16.3.45
+# - host: tcp://localhost:4243
+# private_ssh_port: 2029
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/fleet.py b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/fleet.py
new file mode 100644
index 00000000..cc9537e1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/fleet.py
@@ -0,0 +1,99 @@
+#!/usr/bin/env python
+"""
+fleetctl base external inventory script. Automatically finds the IPs of the booted coreos instances and
+returns it under the host group 'coreos'
+"""
+
+# Copyright (C) 2014 Andrew Rothstein <andrew.rothstein at gmail.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+#
+# Thanks to the vagrant.py inventory script for giving me the basic structure
+# of this.
+#
+
+import sys
+import subprocess
+import re
+import string
+from optparse import OptionParser
+import json
+
+# Options
+# ------------------------------
+
+parser = OptionParser(usage="%prog [options] --list | --host <machine>")
+parser.add_option('--list', default=False, dest="list", action="store_true",
+ help="Produce a JSON consumable grouping of servers in your fleet")
+parser.add_option('--host', default=None, dest="host",
+ help="Generate additional host specific details for given host for Ansible")
+(options, args) = parser.parse_args()
+
+#
+# helper functions
+#
+
+
+def get_ssh_config():
+ configs = []
+ for box in list_running_boxes():
+ config = get_a_ssh_config(box)
+ configs.append(config)
+ return configs
+
+
+# list all the running instances in the fleet
+def list_running_boxes():
+ boxes = []
+ for line in subprocess.check_output(["fleetctl", "list-machines"]).split('\n'):
+ matcher = re.search(r"[^\s]+[\s]+([^\s]+).+", line)
+ if matcher and matcher.group(1) != "IP":
+ boxes.append(matcher.group(1))
+
+ return boxes
+
+
+def get_a_ssh_config(box_name):
+ config = {}
+ config['Host'] = box_name
+ config['ansible_ssh_user'] = 'core'
+ config['ansible_python_interpreter'] = '/opt/bin/python'
+ return config
+
+
+# List out servers that vagrant has running
+# ------------------------------
+if options.list:
+ ssh_config = get_ssh_config()
+ hosts = {'coreos': []}
+
+ for data in ssh_config:
+ hosts['coreos'].append(data['Host'])
+
+ print(json.dumps(hosts))
+ sys.exit(1)
+
+# Get out the host details
+# ------------------------------
+elif options.host:
+ result = {}
+ ssh_config = get_ssh_config()
+
+ details = filter(lambda x: (x['Host'] == options.host), ssh_config)
+ if len(details) > 0:
+ # pass through the port, in case it's non standard.
+ result = details[0]
+
+ print(json.dumps(result))
+ sys.exit(1)
+
+
+# Print out help
+# ------------------------------
+else:
+ parser.print_help()
+ sys.exit(1)
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/foreman.ini b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/foreman.ini
new file mode 100644
index 00000000..d1579638
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/foreman.ini
@@ -0,0 +1,200 @@
+# Foreman inventory (https://github.com/theforeman/foreman_ansible_inventory)
+#
+# This script can be used as an Ansible dynamic inventory.
+# The connection parameters are set up via *foreman.ini*
+# This is how the script founds the configuration file in
+# order of discovery.
+#
+# * `/etc/ansible/foreman.ini`
+# * Current directory of your inventory script.
+# * `FOREMAN_INI_PATH` environment variable.
+#
+# ## Variables and Parameters
+#
+# The data returned from Foreman for each host is stored in a foreman
+# hash so they're available as *host_vars* along with the parameters
+# of the host and it's hostgroups:
+#
+# "foo.example.com": {
+# "foreman": {
+# "architecture_id": 1,
+# "architecture_name": "x86_64",
+# "build": false,
+# "build_status": 0,
+# "build_status_label": "Installed",
+# "capabilities": [
+# "build",
+# "image"
+# ],
+# "compute_profile_id": 4,
+# "hostgroup_name": "webtier/myapp",
+# "id": 70,
+# "image_name": "debian8.1",
+# ...
+# "uuid": "50197c10-5ebb-b5cf-b384-a1e203e19e77"
+# },
+# "foreman_params": {
+# "testparam1": "foobar",
+# "testparam2": "small",
+# ...
+# }
+#
+# and could therefore be used in Ansible like:
+#
+# - debug: msg="From Foreman host {{ foreman['uuid'] }}"
+#
+# Which yields
+#
+# TASK [test_foreman : debug] ****************************************************
+# ok: [foo.example.com] => {
+# "msg": "From Foreman host 50190bd1-052a-a34a-3c9c-df37a39550bf"
+# }
+#
+# ## Automatic Ansible groups
+#
+# The inventory will provide a set of groups, by default prefixed by
+# 'foreman_'. If you want to customize this prefix, change the
+# group_prefix option in /etc/ansible/foreman.ini. The rest of this
+# guide will assume the default prefix of 'foreman'
+#
+# The hostgroup, location, organization, content view, and lifecycle
+# environment of each host are created as Ansible groups with a
+# foreman_<grouptype> prefix, all lowercase and problematic parameters
+# removed. So e.g. the foreman hostgroup
+#
+# myapp / webtier / datacenter1
+#
+# would turn into the Ansible group:
+#
+# foreman_hostgroup_myapp_webtier_datacenter1
+#
+# If the parameter want_hostcollections is set to true, the
+# collections each host is in are created as Ansible groups with a
+# foreman_hostcollection prefix, all lowercase and problematic
+# parameters removed. So e.g. the Foreman host collection
+#
+# Patch Window Thursday
+#
+# would turn into the Ansible group:
+#
+# foreman_hostcollection_patchwindowthursday
+#
+# If the parameter host_filters is set, it will be used as the
+# "search" parameter for the /api/v2/hosts call. This can be used to
+# restrict the list of returned host, as shown below.
+#
+# Furthermore Ansible groups can be created on the fly using the
+# *group_patterns* variable in *foreman.ini* so that you can build up
+# hierarchies using parameters on the hostgroup and host variables.
+#
+# Lets assume you have a host that is built using this nested hostgroup:
+#
+# myapp / webtier / datacenter1
+#
+# and each of the hostgroups defines a parameters respectively:
+#
+# myapp: app_param = myapp
+# webtier: tier_param = webtier
+# datacenter1: dc_param = datacenter1
+#
+# The host is also in a subnet called "mysubnet" and provisioned via an image
+# then *group_patterns* like:
+#
+# [ansible]
+# group_patterns = ["{app_param}-{tier_param}-{dc_param}",
+# "{app_param}-{tier_param}",
+# "{app_param}",
+# "{subnet_name}-{provision_method}"]
+#
+# would put the host into the additional Ansible groups:
+#
+# - myapp-webtier-datacenter1
+# - myapp-webtier
+# - myapp
+# - mysubnet-image
+#
+# by recursively resolving the hostgroups, getting the parameter keys
+# and values and doing a Python *string.format()* like replacement on
+# it.
+#
+[foreman]
+url = http://localhost:3000/
+user = foreman
+password = secret
+ssl_verify = True
+
+# Foreman 1.24 introduces a new reports API to improve performance of the inventory script.
+# Note: This requires foreman_ansible plugin installed.
+# Set to False if you want to use the old API. Defaults to True.
+
+use_reports_api = True
+
+# Retrieve only hosts from the organization "Web Engineering".
+# host_filters = organization="Web Engineering"
+
+# Retrieve only hosts from the organization "Web Engineering" that are
+# also in the host collection "Apache Servers".
+# host_filters = organization="Web Engineering" and host_collection="Apache Servers"
+
+# Foreman Inventory report related configuration options.
+# Configs that default to True :
+# want_organization , want_location, want_ipv4, want_host_group, want_subnet, want_smart_proxies, want_facts
+# Configs that default to False :
+# want_ipv6, want_subnet_v6, want_content_facet_attributes, want_host_params
+
+[report]
+# want_organization = True
+# want_location = True
+# want_ipv4 = True
+# want_ipv6 = False
+# want_host_group = True
+# want_subnet = True
+# want_subnet_v6 = False
+# want_smart_proxies = True
+# want_content_facet_attributes = False
+# want_host_params = False
+
+# use this config to determine if facts are to be fetched in the report and stored on the hosts.
+# want_facts = False
+
+# Upon receiving a request to return inventory report, Foreman schedules a report generation job.
+# The script then polls the report_data endpoint repeatedly to check if the job is complete and retrieves data
+# poll_interval allows to define the polling interval between 2 calls to the report_data endpoint while polling.
+# Defaults to 10 seconds
+
+# poll_interval = 10
+
+[ansible]
+group_patterns = ["{app}-{tier}-{color}",
+ "{app}-{color}",
+ "{app}",
+ "{tier}"]
+group_prefix = foreman_
+
+# Whether to fetch facts from Foreman and store them on the host
+want_facts = True
+
+# Whether to create Ansible groups for host collections. Only tested
+# with Katello (Red Hat Satellite). Disabled by default to not break
+# the script for stand-alone Foreman.
+want_hostcollections = False
+
+# Whether to interpret global parameters value as JSON (if possible, else
+# take as is). Only tested with Katello (Red Hat Satellite).
+# This allows to define lists and dictionaries (and more complicated structures)
+# variables by entering them as JSON string in Foreman parameters.
+# Disabled by default as the change would else not be backward compatible.
+rich_params = False
+
+# Whether to populate the ansible_ssh_host variable to explicitly specify the
+# connection target. Only tested with Katello (Red Hat Satellite).
+# If the foreman 'ip' fact exists then the ansible_ssh_host varibale is populated
+# to permit connections where DNS resolution fails.
+want_ansible_ssh_host = False
+
+[cache]
+path = .
+max_age = 60
+
+# Whether to scan foreman to add recently created hosts in inventory cache
+scan_new_hosts = True
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/foreman.py b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/foreman.py
new file mode 100644
index 00000000..f2e729b6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/foreman.py
@@ -0,0 +1,651 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+#
+# Copyright (C) 2016 Guido Günther <agx@sigxcpu.org>,
+# Daniel Lobato Garcia <dlobatog@redhat.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+# This is somewhat based on cobbler inventory
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import argparse
+import copy
+import os
+import re
+import sys
+from time import time, sleep
+from collections import defaultdict
+from distutils.version import LooseVersion, StrictVersion
+
+# 3rd party imports
+import requests
+if LooseVersion(requests.__version__) < LooseVersion('1.1.0'):
+ print('This script requires python-requests 1.1 as a minimum version')
+ sys.exit(1)
+
+from requests.auth import HTTPBasicAuth
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.six.moves import configparser as ConfigParser
+
+
+def json_format_dict(data, pretty=False):
+ """Converts a dict to a JSON object and dumps it as a formatted string"""
+
+ if pretty:
+ return json.dumps(data, sort_keys=True, indent=2)
+ else:
+ return json.dumps(data)
+
+
+class ForemanInventory(object):
+
+ def __init__(self):
+ self.inventory = defaultdict(list) # A list of groups and the hosts in that group
+ self.cache = dict() # Details about hosts in the inventory
+ self.params = dict() # Params of each host
+ self.facts = dict() # Facts of each host
+ self.hostgroups = dict() # host groups
+ self.hostcollections = dict() # host collections
+ self.session = None # Requests session
+ self.config_paths = [
+ "/etc/ansible/foreman.ini",
+ os.path.dirname(os.path.realpath(__file__)) + '/foreman.ini',
+ ]
+ env_value = os.environ.get('FOREMAN_INI_PATH')
+ if env_value is not None:
+ self.config_paths.append(os.path.expanduser(os.path.expandvars(env_value)))
+
+ def read_settings(self):
+ """Reads the settings from the foreman.ini file"""
+
+ config = ConfigParser.SafeConfigParser()
+ config.read(self.config_paths)
+
+ # Foreman API related
+ try:
+ self.foreman_url = config.get('foreman', 'url')
+ self.foreman_user = config.get('foreman', 'user')
+ self.foreman_pw = config.get('foreman', 'password', raw=True)
+ self.foreman_ssl_verify = config.getboolean('foreman', 'ssl_verify')
+ except (ConfigParser.NoOptionError, ConfigParser.NoSectionError) as e:
+ print("Error parsing configuration: %s" % e, file=sys.stderr)
+ return False
+
+ # Inventory Report Related
+ try:
+ self.foreman_use_reports_api = config.getboolean('foreman', 'use_reports_api')
+ except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
+ self.foreman_use_reports_api = True
+
+ try:
+ self.want_organization = config.getboolean('report', 'want_organization')
+ except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
+ self.want_organization = True
+
+ try:
+ self.want_location = config.getboolean('report', 'want_location')
+ except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
+ self.want_location = True
+
+ try:
+ self.want_IPv4 = config.getboolean('report', 'want_ipv4')
+ except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
+ self.want_IPv4 = True
+
+ try:
+ self.want_IPv6 = config.getboolean('report', 'want_ipv6')
+ except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
+ self.want_IPv6 = False
+
+ try:
+ self.want_host_group = config.getboolean('report', 'want_host_group')
+ except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
+ self.want_host_group = True
+
+ try:
+ self.want_host_params = config.getboolean('report', 'want_host_params')
+ except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
+ self.want_host_params = False
+
+ try:
+ self.want_subnet = config.getboolean('report', 'want_subnet')
+ except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
+ self.want_subnet = True
+
+ try:
+ self.want_subnet_v6 = config.getboolean('report', 'want_subnet_v6')
+ except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
+ self.want_subnet_v6 = False
+
+ try:
+ self.want_smart_proxies = config.getboolean('report', 'want_smart_proxies')
+ except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
+ self.want_smart_proxies = True
+
+ try:
+ self.want_content_facet_attributes = config.getboolean('report', 'want_content_facet_attributes')
+ except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
+ self.want_content_facet_attributes = False
+
+ try:
+ self.report_want_facts = config.getboolean('report', 'want_facts')
+ except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
+ self.report_want_facts = True
+
+ try:
+ self.poll_interval = config.getint('report', 'poll_interval')
+ except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
+ self.poll_interval = 10
+
+ # Ansible related
+ try:
+ group_patterns = config.get('ansible', 'group_patterns')
+ except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
+ group_patterns = "[]"
+
+ self.group_patterns = json.loads(group_patterns)
+
+ try:
+ self.group_prefix = config.get('ansible', 'group_prefix')
+ except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
+ self.group_prefix = "foreman_"
+
+ try:
+ self.want_facts = config.getboolean('ansible', 'want_facts')
+ except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
+ self.want_facts = True
+
+ self.want_facts = self.want_facts and self.report_want_facts
+
+ try:
+ self.want_hostcollections = config.getboolean('ansible', 'want_hostcollections')
+ except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
+ self.want_hostcollections = False
+
+ try:
+ self.want_ansible_ssh_host = config.getboolean('ansible', 'want_ansible_ssh_host')
+ except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
+ self.want_ansible_ssh_host = False
+
+ # Do we want parameters to be interpreted if possible as JSON? (no by default)
+ try:
+ self.rich_params = config.getboolean('ansible', 'rich_params')
+ except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
+ self.rich_params = False
+
+ try:
+ self.host_filters = config.get('foreman', 'host_filters')
+ except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
+ self.host_filters = None
+
+ # Cache related
+ try:
+ cache_path = os.path.expanduser(config.get('cache', 'path'))
+ except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
+ cache_path = '.'
+ (script, ext) = os.path.splitext(os.path.basename(__file__))
+ self.cache_path_cache = cache_path + "/%s.cache" % script
+ self.cache_path_inventory = cache_path + "/%s.index" % script
+ self.cache_path_params = cache_path + "/%s.params" % script
+ self.cache_path_facts = cache_path + "/%s.facts" % script
+ self.cache_path_hostcollections = cache_path + "/%s.hostcollections" % script
+ try:
+ self.cache_max_age = config.getint('cache', 'max_age')
+ except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
+ self.cache_max_age = 60
+ try:
+ self.scan_new_hosts = config.getboolean('cache', 'scan_new_hosts')
+ except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
+ self.scan_new_hosts = False
+
+ return True
+
+ def parse_cli_args(self):
+ """Command line argument processing"""
+
+ parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on foreman')
+ parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)')
+ parser.add_argument('--host', action='store', help='Get all the variables about a specific instance')
+ parser.add_argument('--refresh-cache', action='store_true', default=False,
+ help='Force refresh of cache by making API requests to foreman (default: False - use cache files)')
+ self.args = parser.parse_args()
+
+ def _get_session(self):
+ if not self.session:
+ self.session = requests.session()
+ self.session.auth = HTTPBasicAuth(self.foreman_user, self.foreman_pw)
+ self.session.verify = self.foreman_ssl_verify
+ return self.session
+
+ def _get_json(self, url, ignore_errors=None, params=None):
+ if params is None:
+ params = {}
+ params['per_page'] = 250
+
+ page = 1
+ results = []
+ s = self._get_session()
+ while True:
+ params['page'] = page
+ ret = s.get(url, params=params)
+ if ignore_errors and ret.status_code in ignore_errors:
+ break
+ ret.raise_for_status()
+ json = ret.json()
+ # /hosts/:id has not results key
+ if 'results' not in json:
+ return json
+ # Facts are returned as dict in results not list
+ if isinstance(json['results'], dict):
+ return json['results']
+ # List of all hosts is returned paginaged
+ results = results + json['results']
+ if len(results) >= json['subtotal']:
+ break
+ page += 1
+ if len(json['results']) == 0:
+ print("Did not make any progress during loop. "
+ "expected %d got %d" % (json['total'], len(results)),
+ file=sys.stderr)
+ break
+ return results
+
+ def _use_inventory_report(self):
+ if not self.foreman_use_reports_api:
+ return False
+ status_url = "%s/api/v2/status" % self.foreman_url
+ result = self._get_json(status_url)
+ foreman_version = (LooseVersion(result.get('version')) >= LooseVersion('1.24.0'))
+ return foreman_version
+
+ def _fetch_params(self):
+ options, params = ("no", "yes"), dict()
+ params["Organization"] = options[self.want_organization]
+ params["Location"] = options[self.want_location]
+ params["IPv4"] = options[self.want_IPv4]
+ params["IPv6"] = options[self.want_IPv6]
+ params["Facts"] = options[self.want_facts]
+ params["Host Group"] = options[self.want_host_group]
+ params["Host Collections"] = options[self.want_hostcollections]
+ params["Subnet"] = options[self.want_subnet]
+ params["Subnet v6"] = options[self.want_subnet_v6]
+ params["Smart Proxies"] = options[self.want_smart_proxies]
+ params["Content Attributes"] = options[self.want_content_facet_attributes]
+ params["Host Parameters"] = options[self.want_host_params]
+ if self.host_filters:
+ params["Hosts"] = self.host_filters
+ return params
+
+ def _post_request(self):
+ url = "%s/ansible/api/v2/ansible_inventories/schedule" % self.foreman_url
+ session = self._get_session()
+ params = {'input_values': self._fetch_params()}
+ ret = session.post(url, json=params)
+ if not ret:
+ raise Exception("Error scheduling inventory report on foreman. Please check foreman logs!")
+ url = "{0}/{1}".format(self.foreman_url, ret.json().get('data_url'))
+ response = session.get(url)
+ while response:
+ if response.status_code != 204:
+ break
+ else:
+ sleep(self.poll_interval)
+ response = session.get(url)
+ if not response:
+ raise Exception("Error receiving inventory report from foreman. Please check foreman logs!")
+ else:
+ return response.json()
+
+ def _get_hosts(self):
+ url = "%s/api/v2/hosts" % self.foreman_url
+
+ params = {}
+ if self.host_filters:
+ params['search'] = self.host_filters
+
+ return self._get_json(url, params=params)
+
+ def _get_host_data_by_id(self, hid):
+ url = "%s/api/v2/hosts/%s" % (self.foreman_url, hid)
+ return self._get_json(url)
+
+ def _get_facts_by_id(self, hid):
+ url = "%s/api/v2/hosts/%s/facts" % (self.foreman_url, hid)
+ return self._get_json(url)
+
+ def _resolve_params(self, host_params):
+ """Convert host params to dict"""
+ params = {}
+
+ for param in host_params:
+ name = param['name']
+ if self.rich_params:
+ try:
+ params[name] = json.loads(param['value'])
+ except ValueError:
+ params[name] = param['value']
+ else:
+ params[name] = param['value']
+
+ return params
+
+ def _get_facts(self, host):
+ """Fetch all host facts of the host"""
+ if not self.want_facts:
+ return {}
+
+ ret = self._get_facts_by_id(host['id'])
+ if len(ret.values()) == 0:
+ facts = {}
+ elif len(ret.values()) == 1:
+ facts = list(ret.values())[0]
+ else:
+ raise ValueError("More than one set of facts returned for '%s'" % host)
+ return facts
+
+ def write_to_cache(self, data, filename):
+ """Write data in JSON format to a file"""
+ json_data = json_format_dict(data, True)
+ cache = open(filename, 'w')
+ cache.write(json_data)
+ cache.close()
+
+ def _write_cache(self):
+ self.write_to_cache(self.cache, self.cache_path_cache)
+ self.write_to_cache(self.inventory, self.cache_path_inventory)
+ self.write_to_cache(self.params, self.cache_path_params)
+ self.write_to_cache(self.facts, self.cache_path_facts)
+ self.write_to_cache(self.hostcollections, self.cache_path_hostcollections)
+
+ def to_safe(self, word):
+ '''Converts 'bad' characters in a string to underscores
+ so they can be used as Ansible groups
+
+ >>> ForemanInventory.to_safe("foo-bar baz")
+ 'foo_barbaz'
+ '''
+ regex = r"[^A-Za-z0-9\_]"
+ return re.sub(regex, "_", word.replace(" ", ""))
+
+ def update_cache(self, scan_only_new_hosts=False):
+ """Make calls to foreman and save the output in a cache"""
+ use_inventory_report = self._use_inventory_report()
+ if use_inventory_report:
+ self._update_cache_inventory(scan_only_new_hosts)
+ else:
+ self._update_cache_host_api(scan_only_new_hosts)
+
+ def _update_cache_inventory(self, scan_only_new_hosts):
+ self.groups = dict()
+ self.hosts = dict()
+ try:
+ inventory_report_response = self._post_request()
+ except Exception:
+ self._update_cache_host_api(scan_only_new_hosts)
+ return
+ host_data = json.loads(inventory_report_response)
+ for host in host_data:
+ if not(host) or (host["name"] in self.cache.keys() and scan_only_new_hosts):
+ continue
+ dns_name = host['name']
+
+ host_params = host.pop('host_parameters', {})
+ fact_list = host.pop('facts', {})
+ content_facet_attributes = host.get('content_attributes', {}) or {}
+
+ # Create ansible groups for hostgroup
+ group = 'host_group'
+ val = host.get(group)
+ if val:
+ safe_key = self.to_safe('%s%s_%s' % (
+ to_text(self.group_prefix),
+ group,
+ to_text(val).lower()
+ ))
+ self.inventory[safe_key].append(dns_name)
+
+ # Create ansible groups for environment, location and organization
+ for group in ['environment', 'location', 'organization']:
+ val = host.get('%s' % group)
+ if val:
+ safe_key = self.to_safe('%s%s_%s' % (
+ to_text(self.group_prefix),
+ group,
+ to_text(val).lower()
+ ))
+ self.inventory[safe_key].append(dns_name)
+
+ for group in ['lifecycle_environment', 'content_view']:
+ val = content_facet_attributes.get('%s_name' % group)
+ if val:
+ safe_key = self.to_safe('%s%s_%s' % (
+ to_text(self.group_prefix),
+ group,
+ to_text(val).lower()
+ ))
+ self.inventory[safe_key].append(dns_name)
+
+ params = host_params
+
+ # Ansible groups by parameters in host groups and Foreman host
+ # attributes.
+ groupby = dict()
+ for k, v in params.items():
+ groupby[k] = self.to_safe(to_text(v))
+
+ # The name of the ansible groups is given by group_patterns:
+ for pattern in self.group_patterns:
+ try:
+ key = pattern.format(**groupby)
+ self.inventory[key].append(dns_name)
+ except KeyError:
+ pass # Host not part of this group
+
+ if self.want_hostcollections:
+ hostcollections = host.get('host_collections')
+
+ if hostcollections:
+ # Create Ansible groups for host collections
+ for hostcollection in hostcollections:
+ safe_key = self.to_safe('%shostcollection_%s' % (self.group_prefix, hostcollection.lower()))
+ self.inventory[safe_key].append(dns_name)
+
+ self.hostcollections[dns_name] = hostcollections
+
+ self.cache[dns_name] = host
+ self.params[dns_name] = params
+ self.facts[dns_name] = fact_list
+ self.inventory['all'].append(dns_name)
+ self._write_cache()
+
+ def _update_cache_host_api(self, scan_only_new_hosts):
+ """Make calls to foreman and save the output in a cache"""
+
+ self.groups = dict()
+ self.hosts = dict()
+
+ for host in self._get_hosts():
+ if host['name'] in self.cache.keys() and scan_only_new_hosts:
+ continue
+ dns_name = host['name']
+
+ host_data = self._get_host_data_by_id(host['id'])
+ host_params = host_data.get('all_parameters', {})
+
+ # Create ansible groups for hostgroup
+ group = 'hostgroup'
+ val = host.get('%s_title' % group) or host.get('%s_name' % group)
+ if val:
+ safe_key = self.to_safe('%s%s_%s' % (
+ to_text(self.group_prefix),
+ group,
+ to_text(val).lower()
+ ))
+ self.inventory[safe_key].append(dns_name)
+
+ # Create ansible groups for environment, location and organization
+ for group in ['environment', 'location', 'organization']:
+ val = host.get('%s_name' % group)
+ if val:
+ safe_key = self.to_safe('%s%s_%s' % (
+ to_text(self.group_prefix),
+ group,
+ to_text(val).lower()
+ ))
+ self.inventory[safe_key].append(dns_name)
+
+ for group in ['lifecycle_environment', 'content_view']:
+ val = host.get('content_facet_attributes', {}).get('%s_name' % group)
+ if val:
+ safe_key = self.to_safe('%s%s_%s' % (
+ to_text(self.group_prefix),
+ group,
+ to_text(val).lower()
+ ))
+ self.inventory[safe_key].append(dns_name)
+
+ params = self._resolve_params(host_params)
+
+ # Ansible groups by parameters in host groups and Foreman host
+ # attributes.
+ groupby = dict()
+ for k, v in params.items():
+ groupby[k] = self.to_safe(to_text(v))
+
+ # The name of the ansible groups is given by group_patterns:
+ for pattern in self.group_patterns:
+ try:
+ key = pattern.format(**groupby)
+ self.inventory[key].append(dns_name)
+ except KeyError:
+ pass # Host not part of this group
+
+ if self.want_hostcollections:
+ hostcollections = host_data.get('host_collections')
+
+ if hostcollections:
+ # Create Ansible groups for host collections
+ for hostcollection in hostcollections:
+ safe_key = self.to_safe('%shostcollection_%s' % (self.group_prefix, hostcollection['name'].lower()))
+ self.inventory[safe_key].append(dns_name)
+
+ self.hostcollections[dns_name] = hostcollections
+
+ self.cache[dns_name] = host
+ self.params[dns_name] = params
+ self.facts[dns_name] = self._get_facts(host)
+ self.inventory['all'].append(dns_name)
+ self._write_cache()
+
+ def is_cache_valid(self):
+ """Determines if the cache is still valid"""
+ if os.path.isfile(self.cache_path_cache):
+ mod_time = os.path.getmtime(self.cache_path_cache)
+ current_time = time()
+ if (mod_time + self.cache_max_age) > current_time:
+ if (os.path.isfile(self.cache_path_inventory) and
+ os.path.isfile(self.cache_path_params) and
+ os.path.isfile(self.cache_path_facts)):
+ return True
+ return False
+
+ def load_inventory_from_cache(self):
+ """Read the index from the cache file sets self.index"""
+
+ with open(self.cache_path_inventory, 'r') as fp:
+ self.inventory = json.load(fp)
+
+ def load_params_from_cache(self):
+ """Read the index from the cache file sets self.index"""
+
+ with open(self.cache_path_params, 'r') as fp:
+ self.params = json.load(fp)
+
+ def load_facts_from_cache(self):
+ """Read the index from the cache file sets self.facts"""
+
+ if not self.want_facts:
+ return
+ with open(self.cache_path_facts, 'r') as fp:
+ self.facts = json.load(fp)
+
+ def load_hostcollections_from_cache(self):
+ """Read the index from the cache file sets self.hostcollections"""
+
+ if not self.want_hostcollections:
+ return
+ with open(self.cache_path_hostcollections, 'r') as fp:
+ self.hostcollections = json.load(fp)
+
+ def load_cache_from_cache(self):
+ """Read the cache from the cache file sets self.cache"""
+
+ with open(self.cache_path_cache, 'r') as fp:
+ self.cache = json.load(fp)
+
+ def get_inventory(self):
+ if self.args.refresh_cache or not self.is_cache_valid():
+ self.update_cache()
+ else:
+ self.load_inventory_from_cache()
+ self.load_params_from_cache()
+ self.load_facts_from_cache()
+ self.load_hostcollections_from_cache()
+ self.load_cache_from_cache()
+ if self.scan_new_hosts:
+ self.update_cache(True)
+
+ def get_host_info(self):
+ """Get variables about a specific host"""
+
+ if not self.cache or len(self.cache) == 0:
+ # Need to load index from cache
+ self.load_cache_from_cache()
+
+ if self.args.host not in self.cache:
+ # try updating the cache
+ self.update_cache()
+
+ if self.args.host not in self.cache:
+ # host might not exist anymore
+ return json_format_dict({}, True)
+
+ return json_format_dict(self.cache[self.args.host], True)
+
+ def _print_data(self):
+ data_to_print = ""
+ if self.args.host:
+ data_to_print += self.get_host_info()
+ else:
+ self.inventory['_meta'] = {'hostvars': {}}
+ for hostname in self.cache:
+ self.inventory['_meta']['hostvars'][hostname] = {
+ 'foreman': self.cache[hostname],
+ 'foreman_params': self.params[hostname],
+ }
+ if self.want_ansible_ssh_host and 'ip' in self.cache[hostname]:
+ self.inventory['_meta']['hostvars'][hostname]['ansible_ssh_host'] = self.cache[hostname]['ip']
+ if self.want_facts:
+ self.inventory['_meta']['hostvars'][hostname]['foreman_facts'] = self.facts[hostname]
+
+ data_to_print += json_format_dict(self.inventory, True)
+
+ print(data_to_print)
+
+ def run(self):
+ # Read settings and parse CLI arguments
+ if not self.read_settings():
+ return False
+ self.parse_cli_args()
+ self.get_inventory()
+ self._print_data()
+ return True
+
+
+if __name__ == '__main__':
+ sys.exit(not ForemanInventory().run())
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/freeipa.py b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/freeipa.py
new file mode 100644
index 00000000..4a5bf678
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/freeipa.py
@@ -0,0 +1,126 @@
+#!/usr/bin/env python
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import argparse
+from distutils.version import LooseVersion
+import json
+import os
+import sys
+from ipalib import api, errors, __version__ as IPA_VERSION
+from ansible.module_utils.six import u
+
+
+def initialize():
+ '''
+ This function initializes the FreeIPA/IPA API. This function requires
+ no arguments. A kerberos key must be present in the users keyring in
+ order for this to work. IPA default configuration directory is /etc/ipa,
+ this path could be overridden with IPA_CONFDIR environment variable.
+ '''
+
+ api.bootstrap(context='cli')
+
+ if not os.path.isdir(api.env.confdir):
+ print("WARNING: IPA configuration directory (%s) is missing. "
+ "Environment variable IPA_CONFDIR could be used to override "
+ "default path." % api.env.confdir)
+
+ if LooseVersion(IPA_VERSION) >= LooseVersion('4.6.2'):
+ # With ipalib < 4.6.0 'server' and 'domain' have default values
+ # ('localhost:8888', 'example.com'), newer versions don't and
+ # DNS autodiscovery is broken, then one of jsonrpc_uri / xmlrpc_uri is
+ # required.
+ # ipalib 4.6.0 is unusable (https://pagure.io/freeipa/issue/7132)
+ # that's why 4.6.2 is explicitely tested.
+ if 'server' not in api.env or 'domain' not in api.env:
+ sys.exit("ERROR: ('jsonrpc_uri' or 'xmlrpc_uri') or 'domain' are not "
+ "defined in '[global]' section of '%s' nor in '%s'." %
+ (api.env.conf, api.env.conf_default))
+
+ api.finalize()
+ try:
+ api.Backend.rpcclient.connect()
+ except AttributeError:
+ # FreeIPA < 4.0 compatibility
+ api.Backend.xmlclient.connect()
+
+ return api
+
+
+def list_groups(api):
+ '''
+ This function prints a list of all host groups. This function requires
+ one argument, the FreeIPA/IPA API object.
+ '''
+
+ inventory = {}
+ hostvars = {}
+
+ result = api.Command.hostgroup_find(all=True)['result']
+
+ for hostgroup in result:
+ # Get direct and indirect members (nested hostgroups) of hostgroup
+ members = []
+
+ if 'member_host' in hostgroup:
+ members = [host for host in hostgroup['member_host']]
+ if 'memberindirect_host' in hostgroup:
+ members += (host for host in hostgroup['memberindirect_host'])
+ inventory[hostgroup['cn'][0]] = {'hosts': [host for host in members]}
+
+ for member in members:
+ hostvars[member] = {}
+
+ inventory['_meta'] = {'hostvars': hostvars}
+ inv_string = json.dumps(inventory, indent=1, sort_keys=True)
+ print(inv_string)
+
+ return None
+
+
+def parse_args():
+ '''
+ This function parses the arguments that were passed in via the command line.
+ This function expects no arguments.
+ '''
+
+ parser = argparse.ArgumentParser(description='Ansible FreeIPA/IPA '
+ 'inventory module')
+ group = parser.add_mutually_exclusive_group(required=True)
+ group.add_argument('--list', action='store_true',
+ help='List active servers')
+ group.add_argument('--host', help='List details about the specified host')
+
+ return parser.parse_args()
+
+
+def get_host_attributes(api, host):
+ """
+ This function expects one string, this hostname to lookup variables for.
+ Args:
+ api: FreeIPA API Object
+ host: Name of Hostname
+
+ Returns: Dict of Host vars if found else None
+ """
+ try:
+ result = api.Command.host_show(u(host))['result']
+ if 'usercertificate' in result:
+ del result['usercertificate']
+ return json.dumps(result, indent=1)
+ except errors.NotFound as e:
+ return {}
+
+
+if __name__ == '__main__':
+ args = parse_args()
+ api = initialize()
+
+ if args.host:
+ print(get_host_attributes(api, args.host))
+ elif args.list:
+ list_groups(api)
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/gce.ini b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/gce.ini
new file mode 100644
index 00000000..af27a9c4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/gce.ini
@@ -0,0 +1,76 @@
+# Copyright 2013 Google Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# The GCE inventory script has the following dependencies:
+# 1. A valid Google Cloud Platform account with Google Compute Engine
+# enabled. See https://cloud.google.com
+# 2. An OAuth2 Service Account flow should be enabled. This will generate
+# a private key file that the inventory script will use for API request
+# authorization. See https://developers.google.com/accounts/docs/OAuth2
+# 3. Convert the private key from PKCS12 to PEM format
+# $ openssl pkcs12 -in pkey.pkcs12 -passin pass:notasecret \
+# > -nodes -nocerts | openssl rsa -out pkey.pem
+# 4. The libcloud (>=0.13.3) python libray. See http://libcloud.apache.org
+#
+# (See ansible/test/gce_tests.py comments for full install instructions)
+#
+# Author: Eric Johnson <erjohnso@google.com>
+# Contributors: John Roach <johnroach1985@gmail.com>
+
+[gce]
+# GCE Service Account configuration information can be stored in the
+# libcloud 'secrets.py' file. Ideally, the 'secrets.py' file will already
+# exist in your PYTHONPATH and be picked up automatically with an import
+# statement in the inventory script. However, you can specify an absolute
+# path to the secrets.py file with 'libcloud_secrets' parameter.
+# This option will be deprecated in a future release.
+libcloud_secrets =
+
+# If you are not going to use a 'secrets.py' file, you can set the necessary
+# authorization parameters here.
+# You can add multiple gce projects to by using a comma separated list. Make
+# sure that the service account used has permissions on said projects.
+gce_service_account_email_address =
+gce_service_account_pem_file_path =
+gce_project_id =
+gce_zone =
+
+# Filter inventory based on state. Leave undefined to return instances regardless of state.
+# example: Uncomment to only return inventory in the running or provisioning state
+#instance_states = RUNNING,PROVISIONING
+
+# Filter inventory based on instance tags. Leave undefined to return instances regardless of tags.
+# example: Uncomment to only return inventory with the http-server or https-server tag
+#instance_tags = http-server,https-server
+
+
+[inventory]
+# The 'inventory_ip_type' parameter specifies whether 'ansible_ssh_host' should
+# contain the instance internal or external address. Values may be either
+# 'internal' or 'external'. If 'external' is specified but no external instance
+# address exists, the internal address will be used.
+# The INVENTORY_IP_TYPE environment variable will override this value.
+inventory_ip_type =
+
+[cache]
+# directory in which cache should be created
+cache_path = ~/.ansible/tmp
+
+# The number of seconds a cache file is considered valid. After this many
+# seconds, a new API call will be made, and the cache file will be updated.
+# To disable the cache, set this value to 0
+cache_max_age = 300
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/gce.py b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/gce.py
new file mode 100644
index 00000000..05a93f48
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/gce.py
@@ -0,0 +1,524 @@
+#!/usr/bin/env python
+
+# Copyright: (c) 2013, Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+'''
+GCE external inventory script
+=================================
+
+Generates inventory that Ansible can understand by making API requests
+Google Compute Engine via the libcloud library. Full install/configuration
+instructions for the gce* modules can be found in the comments of
+ansible/test/gce_tests.py.
+
+When run against a specific host, this script returns the following variables
+based on the data obtained from the libcloud Node object:
+ - gce_uuid
+ - gce_id
+ - gce_image
+ - gce_machine_type
+ - gce_private_ip
+ - gce_public_ip
+ - gce_name
+ - gce_description
+ - gce_status
+ - gce_zone
+ - gce_tags
+ - gce_metadata
+ - gce_network
+ - gce_subnetwork
+
+When run in --list mode, instances are grouped by the following categories:
+ - zone:
+ zone group name examples are us-central1-b, europe-west1-a, etc.
+ - instance tags:
+ An entry is created for each tag. For example, if you have two instances
+ with a common tag called 'foo', they will both be grouped together under
+ the 'tag_foo' name.
+ - network name:
+ the name of the network is appended to 'network_' (e.g. the 'default'
+ network will result in a group named 'network_default')
+ - machine type
+ types follow a pattern like n1-standard-4, g1-small, etc.
+ - running status:
+ group name prefixed with 'status_' (e.g. status_running, status_stopped,..)
+ - image:
+ when using an ephemeral/scratch disk, this will be set to the image name
+ used when creating the instance (e.g. debian-7-wheezy-v20130816). when
+ your instance was created with a root persistent disk it will be set to
+ 'persistent_disk' since there is no current way to determine the image.
+
+Examples:
+ Execute uname on all instances in the us-central1-a zone
+ $ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a"
+
+ Use the GCE inventory script to print out instance specific information
+ $ contrib/inventory/gce.py --host my_instance
+
+Author: Eric Johnson <erjohnso@google.com>
+Contributors: Matt Hite <mhite@hotmail.com>, Tom Melendez <supertom@google.com>,
+ John Roach <johnroach1985@gmail.com>
+Version: 0.0.4
+'''
+
+try:
+ import pkg_resources
+except ImportError:
+ # Use pkg_resources to find the correct versions of libraries and set
+ # sys.path appropriately when there are multiversion installs. We don't
+ # fail here as there is code that better expresses the errors where the
+ # library is used.
+ pass
+
+USER_AGENT_PRODUCT = "Ansible-gce_inventory_plugin"
+USER_AGENT_VERSION = "v2"
+
+import sys
+import os
+import argparse
+
+from time import time
+
+from ansible.module_utils.six.moves import configparser
+
+import logging
+logging.getLogger('libcloud.common.google').addHandler(logging.NullHandler())
+
+import json
+
+try:
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ _ = Provider.GCE
+except Exception:
+ sys.exit("GCE inventory script requires libcloud >= 0.13")
+
+
+class CloudInventoryCache(object):
+ def __init__(self, cache_name='ansible-cloud-cache', cache_path='/tmp',
+ cache_max_age=300):
+ cache_dir = os.path.expanduser(cache_path)
+ if not os.path.exists(cache_dir):
+ os.makedirs(cache_dir)
+ self.cache_path_cache = os.path.join(cache_dir, cache_name)
+
+ self.cache_max_age = cache_max_age
+
+ def is_valid(self, max_age=None):
+ ''' Determines if the cache files have expired, or if it is still valid '''
+
+ if max_age is None:
+ max_age = self.cache_max_age
+
+ if os.path.isfile(self.cache_path_cache):
+ mod_time = os.path.getmtime(self.cache_path_cache)
+ current_time = time()
+ if (mod_time + max_age) > current_time:
+ return True
+
+ return False
+
+ def get_all_data_from_cache(self, filename=''):
+ ''' Reads the JSON inventory from the cache file. Returns Python dictionary. '''
+
+ data = ''
+ if not filename:
+ filename = self.cache_path_cache
+ with open(filename, 'r') as cache:
+ data = cache.read()
+ return json.loads(data)
+
+ def write_to_cache(self, data, filename=''):
+ ''' Writes data to file as JSON. Returns True. '''
+ if not filename:
+ filename = self.cache_path_cache
+ json_data = json.dumps(data)
+ with open(filename, 'w') as cache:
+ cache.write(json_data)
+ return True
+
+
+class GceInventory(object):
+ def __init__(self):
+ # Cache object
+ self.cache = None
+ # dictionary containing inventory read from disk
+ self.inventory = {}
+
+ # Read settings and parse CLI arguments
+ self.parse_cli_args()
+ self.config = self.get_config()
+ self.drivers = self.get_gce_drivers()
+ self.ip_type = self.get_inventory_options()
+ if self.ip_type:
+ self.ip_type = self.ip_type.lower()
+
+ # Cache management
+ start_inventory_time = time()
+ cache_used = False
+ if self.args.refresh_cache or not self.cache.is_valid():
+ self.do_api_calls_update_cache()
+ else:
+ self.load_inventory_from_cache()
+ cache_used = True
+ self.inventory['_meta']['stats'] = {'use_cache': True}
+ self.inventory['_meta']['stats'] = {
+ 'inventory_load_time': time() - start_inventory_time,
+ 'cache_used': cache_used
+ }
+
+ # Just display data for specific host
+ if self.args.host:
+ print(self.json_format_dict(
+ self.inventory['_meta']['hostvars'][self.args.host],
+ pretty=self.args.pretty))
+ else:
+ # Otherwise, assume user wants all instances grouped
+ zones = self.parse_env_zones()
+ print(self.json_format_dict(self.inventory,
+ pretty=self.args.pretty))
+ sys.exit(0)
+
+ def get_config(self):
+ """
+ Reads the settings from the gce.ini file.
+
+ Populates a ConfigParser object with defaults and
+ attempts to read an .ini-style configuration from the filename
+ specified in GCE_INI_PATH. If the environment variable is
+ not present, the filename defaults to gce.ini in the current
+ working directory.
+ """
+ gce_ini_default_path = os.path.join(
+ os.path.dirname(os.path.realpath(__file__)), "gce.ini")
+ gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
+
+ # Create a ConfigParser.
+ # This provides empty defaults to each key, so that environment
+ # variable configuration (as opposed to INI configuration) is able
+ # to work.
+ config = configparser.ConfigParser(defaults={
+ 'gce_service_account_email_address': '',
+ 'gce_service_account_pem_file_path': '',
+ 'gce_project_id': '',
+ 'gce_zone': '',
+ 'libcloud_secrets': '',
+ 'instance_tags': '',
+ 'inventory_ip_type': '',
+ 'cache_path': '~/.ansible/tmp',
+ 'cache_max_age': '300'
+ })
+ if 'gce' not in config.sections():
+ config.add_section('gce')
+ if 'inventory' not in config.sections():
+ config.add_section('inventory')
+ if 'cache' not in config.sections():
+ config.add_section('cache')
+
+ config.read(gce_ini_path)
+
+ #########
+ # Section added for processing ini settings
+ #########
+
+ # Set the instance_states filter based on config file options
+ self.instance_states = []
+ if config.has_option('gce', 'instance_states'):
+ states = config.get('gce', 'instance_states')
+ # Ignore if instance_states is an empty string.
+ if states:
+ self.instance_states = states.split(',')
+
+ # Set the instance_tags filter, env var overrides config from file
+ # and cli param overrides all
+ if self.args.instance_tags:
+ self.instance_tags = self.args.instance_tags
+ else:
+ self.instance_tags = os.environ.get(
+ 'GCE_INSTANCE_TAGS', config.get('gce', 'instance_tags'))
+ if self.instance_tags:
+ self.instance_tags = self.instance_tags.split(',')
+
+ # Caching
+ cache_path = config.get('cache', 'cache_path')
+ cache_max_age = config.getint('cache', 'cache_max_age')
+ # TOOD(supertom): support project-specific caches
+ cache_name = 'ansible-gce.cache'
+ self.cache = CloudInventoryCache(cache_path=cache_path,
+ cache_max_age=cache_max_age,
+ cache_name=cache_name)
+ return config
+
+ def get_inventory_options(self):
+ """Determine inventory options. Environment variables always
+ take precedence over configuration files."""
+ ip_type = self.config.get('inventory', 'inventory_ip_type')
+ # If the appropriate environment variables are set, they override
+ # other configuration
+ ip_type = os.environ.get('INVENTORY_IP_TYPE', ip_type)
+ return ip_type
+
+ def get_gce_drivers(self):
+ """Determine the GCE authorization settings and return a list of
+ libcloud drivers.
+ """
+ # Attempt to get GCE params from a configuration file, if one
+ # exists.
+ secrets_path = self.config.get('gce', 'libcloud_secrets')
+ secrets_found = False
+
+ try:
+ import secrets
+ args = list(secrets.GCE_PARAMS)
+ kwargs = secrets.GCE_KEYWORD_PARAMS
+ secrets_found = True
+ except Exception:
+ pass
+
+ if not secrets_found and secrets_path:
+ if not secrets_path.endswith('secrets.py'):
+ err = "Must specify libcloud secrets file as "
+ err += "/absolute/path/to/secrets.py"
+ sys.exit(err)
+ sys.path.append(os.path.dirname(secrets_path))
+ try:
+ import secrets
+ args = list(getattr(secrets, 'GCE_PARAMS', []))
+ kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
+ secrets_found = True
+ except Exception:
+ pass
+
+ if not secrets_found:
+ args = [
+ self.config.get('gce', 'gce_service_account_email_address'),
+ self.config.get('gce', 'gce_service_account_pem_file_path')
+ ]
+ kwargs = {'project': self.config.get('gce', 'gce_project_id'),
+ 'datacenter': self.config.get('gce', 'gce_zone')}
+
+ # If the appropriate environment variables are set, they override
+ # other configuration; process those into our args and kwargs.
+ args[0] = os.environ.get('GCE_EMAIL', args[0])
+ args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1])
+ args[1] = os.environ.get('GCE_CREDENTIALS_FILE_PATH', args[1])
+
+ kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project'])
+ kwargs['datacenter'] = os.environ.get('GCE_ZONE', kwargs['datacenter'])
+
+ gce_drivers = []
+ projects = kwargs['project'].split(',')
+ for project in projects:
+ kwargs['project'] = project
+ gce = get_driver(Provider.GCE)(*args, **kwargs)
+ gce.connection.user_agent_append(
+ '%s/%s' % (USER_AGENT_PRODUCT, USER_AGENT_VERSION),
+ )
+ gce_drivers.append(gce)
+ return gce_drivers
+
+ def parse_env_zones(self):
+ '''returns a list of comma separated zones parsed from the GCE_ZONE environment variable.
+ If provided, this will be used to filter the results of the grouped_instances call'''
+ import csv
+ reader = csv.reader([os.environ.get('GCE_ZONE', "")], skipinitialspace=True)
+ zones = [r for r in reader]
+ return [z for z in zones[0]]
+
+ def parse_cli_args(self):
+ ''' Command line argument processing '''
+
+ parser = argparse.ArgumentParser(
+ description='Produce an Ansible Inventory file based on GCE')
+ parser.add_argument('--list', action='store_true', default=True,
+ help='List instances (default: True)')
+ parser.add_argument('--host', action='store',
+ help='Get all information about an instance')
+ parser.add_argument('--instance-tags', action='store',
+ help='Only include instances with this tags, separated by comma')
+ parser.add_argument('--pretty', action='store_true', default=False,
+ help='Pretty format (default: False)')
+ parser.add_argument(
+ '--refresh-cache', action='store_true', default=False,
+ help='Force refresh of cache by making API requests (default: False - use cache files)')
+ self.args = parser.parse_args()
+
+ def node_to_dict(self, inst):
+ md = {}
+
+ if inst is None:
+ return {}
+
+ if 'items' in inst.extra['metadata']:
+ for entry in inst.extra['metadata']['items']:
+ md[entry['key']] = entry['value']
+
+ net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
+ subnet = None
+ if 'subnetwork' in inst.extra['networkInterfaces'][0]:
+ subnet = inst.extra['networkInterfaces'][0]['subnetwork'].split('/')[-1]
+ # default to exernal IP unless user has specified they prefer internal
+ if self.ip_type == 'internal':
+ ssh_host = inst.private_ips[0]
+ else:
+ ssh_host = inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0]
+
+ return {
+ 'gce_uuid': inst.uuid,
+ 'gce_id': inst.id,
+ 'gce_image': inst.image,
+ 'gce_machine_type': inst.size,
+ 'gce_private_ip': inst.private_ips[0],
+ 'gce_public_ip': inst.public_ips[0] if len(inst.public_ips) >= 1 else None,
+ 'gce_name': inst.name,
+ 'gce_description': inst.extra['description'],
+ 'gce_status': inst.extra['status'],
+ 'gce_zone': inst.extra['zone'].name,
+ 'gce_tags': inst.extra['tags'],
+ 'gce_metadata': md,
+ 'gce_network': net,
+ 'gce_subnetwork': subnet,
+ # Hosts don't have a public name, so we add an IP
+ 'ansible_ssh_host': ssh_host
+ }
+
+ def load_inventory_from_cache(self):
+ ''' Loads inventory from JSON on disk. '''
+
+ try:
+ self.inventory = self.cache.get_all_data_from_cache()
+ hosts = self.inventory['_meta']['hostvars']
+ except Exception as e:
+ print(
+ "Invalid inventory file %s. Please rebuild with -refresh-cache option."
+ % (self.cache.cache_path_cache))
+ raise
+
+ def do_api_calls_update_cache(self):
+ ''' Do API calls and save data in cache. '''
+ zones = self.parse_env_zones()
+ data = self.group_instances(zones)
+ self.cache.write_to_cache(data)
+ self.inventory = data
+
+ def list_nodes(self):
+ all_nodes = []
+ params, more_results = {'maxResults': 500}, True
+ while more_results:
+ for driver in self.drivers:
+ driver.connection.gce_params = params
+ all_nodes.extend(driver.list_nodes())
+ more_results = 'pageToken' in params
+ return all_nodes
+
+ def group_instances(self, zones=None):
+ '''Group all instances'''
+ groups = {}
+ meta = {}
+ meta["hostvars"] = {}
+
+ for node in self.list_nodes():
+
+ # This check filters on the desired instance states defined in the
+ # config file with the instance_states config option.
+ #
+ # If the instance_states list is _empty_ then _ALL_ states are returned.
+ #
+ # If the instance_states list is _populated_ then check the current
+ # state against the instance_states list
+ if self.instance_states and not node.extra['status'] in self.instance_states:
+ continue
+
+ # This check filters on the desired instance tags defined in the
+ # config file with the instance_tags config option, env var GCE_INSTANCE_TAGS,
+ # or as the cli param --instance-tags.
+ #
+ # If the instance_tags list is _empty_ then _ALL_ instances are returned.
+ #
+ # If the instance_tags list is _populated_ then check the current
+ # instance tags against the instance_tags list. If the instance has
+ # at least one tag from the instance_tags list, it is returned.
+ if self.instance_tags and not set(self.instance_tags) & set(node.extra['tags']):
+ continue
+
+ name = node.name
+
+ meta["hostvars"][name] = self.node_to_dict(node)
+
+ zone = node.extra['zone'].name
+
+ # To avoid making multiple requests per zone
+ # we list all nodes and then filter the results
+ if zones and zone not in zones:
+ continue
+
+ if zone in groups:
+ groups[zone].append(name)
+ else:
+ groups[zone] = [name]
+
+ tags = node.extra['tags']
+ for t in tags:
+ if t.startswith('group-'):
+ tag = t[6:]
+ else:
+ tag = 'tag_%s' % t
+ if tag in groups:
+ groups[tag].append(name)
+ else:
+ groups[tag] = [name]
+
+ net = node.extra['networkInterfaces'][0]['network'].split('/')[-1]
+ net = 'network_%s' % net
+ if net in groups:
+ groups[net].append(name)
+ else:
+ groups[net] = [name]
+
+ machine_type = node.size
+ if machine_type in groups:
+ groups[machine_type].append(name)
+ else:
+ groups[machine_type] = [name]
+
+ image = node.image or 'persistent_disk'
+ if image in groups:
+ groups[image].append(name)
+ else:
+ groups[image] = [name]
+
+ status = node.extra['status']
+ stat = 'status_%s' % status.lower()
+ if stat in groups:
+ groups[stat].append(name)
+ else:
+ groups[stat] = [name]
+
+ for private_ip in node.private_ips:
+ groups[private_ip] = [name]
+
+ if len(node.public_ips) >= 1:
+ for public_ip in node.public_ips:
+ groups[public_ip] = [name]
+
+ groups["_meta"] = meta
+
+ return groups
+
+ def json_format_dict(self, data, pretty=False):
+ ''' Converts a dict to a JSON object and dumps it as a formatted
+ string '''
+
+ if pretty:
+ return json.dumps(data, sort_keys=True, indent=2)
+ else:
+ return json.dumps(data)
+
+
+# Run the script
+if __name__ == '__main__':
+ GceInventory()
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/infoblox.py b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/infoblox.py
new file mode 100644
index 00000000..9e985a9e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/infoblox.py
@@ -0,0 +1,117 @@
+#!/usr/bin/env python
+#
+# (c) 2018, Red Hat, Inc.
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+import json
+import argparse
+
+from ansible.parsing.dataloader import DataLoader
+from ansible.module_utils.six import iteritems
+from ansible.module_utils._text import to_text
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiInventory
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import normalize_extattrs, flatten_extattrs
+
+
+CONFIG_FILES = [
+ os.environ.get('INFOBLOX_CONFIG_FILE', ''),
+ '/etc/ansible/infoblox.yaml',
+ '/etc/ansible/infoblox.yml'
+]
+
+
+def parse_args():
+ parser = argparse.ArgumentParser()
+
+ parser.add_argument('--list', action='store_true',
+ help='List host records from NIOS for use in Ansible')
+
+ parser.add_argument('--host',
+ help='List meta data about single host (not used)')
+
+ return parser.parse_args()
+
+
+def main():
+ args = parse_args()
+
+ for config_file in CONFIG_FILES:
+ if os.path.exists(config_file):
+ break
+ else:
+ sys.stderr.write('unable to locate config file at /etc/ansible/infoblox.yaml\n')
+ sys.exit(-1)
+
+ try:
+ loader = DataLoader()
+ config = loader.load_from_file(config_file)
+ provider = config.get('provider') or {}
+ wapi = WapiInventory(provider)
+ except Exception as exc:
+ sys.stderr.write(to_text(exc))
+ sys.exit(-1)
+
+ if args.host:
+ host_filter = {'name': args.host}
+ else:
+ host_filter = {}
+
+ config_filters = config.get('filters')
+
+ if config_filters.get('view') is not None:
+ host_filter['view'] = config_filters['view']
+
+ if config_filters.get('extattrs'):
+ extattrs = normalize_extattrs(config_filters['extattrs'])
+ else:
+ extattrs = {}
+
+ hostvars = {}
+ inventory = {
+ '_meta': {
+ 'hostvars': hostvars
+ }
+ }
+
+ return_fields = ['name', 'view', 'extattrs', 'ipv4addrs']
+
+ hosts = wapi.get_object('record:host',
+ host_filter,
+ extattrs=extattrs,
+ return_fields=return_fields)
+
+ if hosts:
+ for item in hosts:
+ view = item['view']
+ name = item['name']
+
+ if view not in inventory:
+ inventory[view] = {'hosts': []}
+
+ inventory[view]['hosts'].append(name)
+
+ hostvars[name] = {
+ 'view': view
+ }
+
+ if item.get('extattrs'):
+ for key, value in iteritems(flatten_extattrs(item['extattrs'])):
+ if key.startswith('ansible_'):
+ hostvars[name][key] = value
+ else:
+ if 'extattrs' not in hostvars[name]:
+ hostvars[name]['extattrs'] = {}
+ hostvars[name]['extattrs'][key] = value
+
+ sys.stdout.write(json.dumps(inventory, indent=4))
+ sys.exit(0)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/infoblox.yaml b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/infoblox.yaml
new file mode 100644
index 00000000..c1be5324
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/infoblox.yaml
@@ -0,0 +1,24 @@
+---
+# This file provides the configuration information for the Infoblox dynamic
+# inventory script that is used to dynamically pull host information from NIOS.
+# This file should be copied to /etc/ansible/infoblox.yaml in order for the
+# dynamic script to find it.
+
+# Sets the provider arguments for authenticating to the Infoblox server to
+# retrieve inventory hosts. Provider arguments can also be set using
+# environment variables. Supported environment variables all start with
+# INFOBLOX_{{ name }}. For instance, to set the host provider value, the
+# environment variable would be INFOBLOX_HOST.
+provider:
+ host: <SERVER_IP>
+ username: <USERNAME>
+ password: <PASSWORD>
+
+# Filters allow the dynamic inventory script to restrict the set of hosts that
+# are returned from the Infoblox server.
+filters:
+ # restrict returned hosts by extensible attributes
+ extattrs: {}
+
+ # restrict returned hosts to a specified DNS view
+ view: null
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/jail.py b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/jail.py
new file mode 100644
index 00000000..a28b923b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/jail.py
@@ -0,0 +1,27 @@
+#!/usr/bin/env python
+
+# (c) 2013, Michael Scherer <misc@zarb.org>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from subprocess import Popen, PIPE
+import sys
+import json
+
+result = {}
+result['all'] = {}
+
+pipe = Popen(['jls', '-q', 'name'], stdout=PIPE, universal_newlines=True)
+result['all']['hosts'] = [x[:-1] for x in pipe.stdout.readlines()]
+result['all']['vars'] = {}
+result['all']['vars']['ansible_connection'] = 'jail'
+
+if len(sys.argv) == 2 and sys.argv[1] == '--list':
+ print(json.dumps(result))
+elif len(sys.argv) == 3 and sys.argv[1] == '--host':
+ print(json.dumps({'ansible_connection': 'jail'}))
+else:
+ sys.stderr.write("Need an argument, either --list or --host <host>\n")
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/landscape.py b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/landscape.py
new file mode 100644
index 00000000..8301e00b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/landscape.py
@@ -0,0 +1,117 @@
+#!/usr/bin/env python
+
+# (c) 2015, Marc Abramowitz <marca@surveymonkey.com>
+#
+# This file is part of Ansible.
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# Dynamic inventory script which lets you use nodes discovered by Canonical's
+# Landscape (http://www.ubuntu.com/management/landscape-features).
+#
+# Requires the `landscape_api` Python module
+# See:
+# - https://landscape.canonical.com/static/doc/api/api-client-package.html
+# - https://landscape.canonical.com/static/doc/api/python-api.html
+#
+# Environment variables
+# ---------------------
+# - `LANDSCAPE_API_URI`
+# - `LANDSCAPE_API_KEY`
+# - `LANDSCAPE_API_SECRET`
+# - `LANDSCAPE_API_SSL_CA_FILE` (optional)
+
+
+import argparse
+import collections
+import os
+import sys
+
+from landscape_api.base import API, HTTPError
+
+import json
+
+_key = 'landscape'
+
+
+class EnvironmentConfig(object):
+ uri = os.getenv('LANDSCAPE_API_URI')
+ access_key = os.getenv('LANDSCAPE_API_KEY')
+ secret_key = os.getenv('LANDSCAPE_API_SECRET')
+ ssl_ca_file = os.getenv('LANDSCAPE_API_SSL_CA_FILE')
+
+
+def _landscape_client():
+ env = EnvironmentConfig()
+ return API(
+ uri=env.uri,
+ access_key=env.access_key,
+ secret_key=env.secret_key,
+ ssl_ca_file=env.ssl_ca_file)
+
+
+def get_landscape_members_data():
+ return _landscape_client().get_computers()
+
+
+def get_nodes(data):
+ return [node['hostname'] for node in data]
+
+
+def get_groups(data):
+ groups = collections.defaultdict(list)
+
+ for node in data:
+ for value in node['tags']:
+ groups[value].append(node['hostname'])
+
+ return groups
+
+
+def get_meta(data):
+ meta = {'hostvars': {}}
+ for node in data:
+ meta['hostvars'][node['hostname']] = {'tags': node['tags']}
+ return meta
+
+
+def print_list():
+ data = get_landscape_members_data()
+ nodes = get_nodes(data)
+ groups = get_groups(data)
+ meta = get_meta(data)
+ inventory_data = {_key: nodes, '_meta': meta}
+ inventory_data.update(groups)
+ print(json.dumps(inventory_data))
+
+
+def print_host(host):
+ data = get_landscape_members_data()
+ meta = get_meta(data)
+ print(json.dumps(meta['hostvars'][host]))
+
+
+def get_args(args_list):
+ parser = argparse.ArgumentParser(
+ description='ansible inventory script reading from landscape cluster')
+ mutex_group = parser.add_mutually_exclusive_group(required=True)
+ help_list = 'list all hosts from landscape cluster'
+ mutex_group.add_argument('--list', action='store_true', help=help_list)
+ help_host = 'display variables for a host'
+ mutex_group.add_argument('--host', help=help_host)
+ return parser.parse_args(args_list)
+
+
+def main(args_list):
+ args = get_args(args_list)
+ if args.list:
+ print_list()
+ if args.host:
+ print_host(args.host)
+
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/libcloud.ini b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/libcloud.ini
new file mode 100644
index 00000000..7592c41c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/libcloud.ini
@@ -0,0 +1,15 @@
+# Ansible Apache Libcloud Generic inventory script
+
+[driver]
+provider = CLOUDSTACK
+host =
+path =
+secure = True
+verify_ssl_cert = True
+
+key =
+secret =
+
+[cache]
+cache_path=/path/to/your/cache
+cache_max_age=60
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/linode.ini b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/linode.ini
new file mode 100644
index 00000000..c925d970
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/linode.ini
@@ -0,0 +1,18 @@
+# Ansible Linode external inventory script settings
+#
+
+[linode]
+
+# API calls to Linode are slow. For this reason, we cache the results of an API
+# call. Set this to the path you want cache files to be written to. Two files
+# will be written to this directory:
+# - ansible-Linode.cache
+# - ansible-Linode.index
+cache_path = /tmp
+
+# The number of seconds a cache file is considered valid. After this many
+# seconds, a new API call will be made, and the cache file will be updated.
+cache_max_age = 300
+
+# If set to true use the hosts public ip in the dictionary instead of the label
+use_public_ip = false \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/linode.py b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/linode.py
new file mode 100644
index 00000000..2972725d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/linode.py
@@ -0,0 +1,338 @@
+#!/usr/bin/env python
+
+'''
+Linode external inventory script
+=================================
+
+Generates inventory that Ansible can understand by making API request to
+Linode using the Chube library.
+
+NOTE: This script assumes Ansible is being executed where Chube is already
+installed and has a valid config at ~/.chube. If not, run:
+
+ pip install chube
+ echo -e "---\napi_key: <YOUR API KEY GOES HERE>" > ~/.chube
+
+For more details, see: https://github.com/exosite/chube
+
+NOTE: By default, this script also assumes that the Linodes in your account all have
+labels that correspond to hostnames that are in your resolver search path.
+Your resolver search path resides in /etc/hosts.
+Optionally, if you would like to use the hosts public IP instead of it's label use
+the following setting in linode.ini:
+
+ use_public_ip = true
+
+When run against a specific host, this script returns the following variables:
+
+ - api_id
+ - datacenter_id
+ - datacenter_city (lowercase city name of data center, e.g. 'tokyo')
+ - label
+ - display_group
+ - create_dt
+ - total_hd
+ - total_xfer
+ - total_ram
+ - status
+ - public_ip (The first public IP found)
+ - private_ip (The first private IP found, or empty string if none)
+ - alert_cpu_enabled
+ - alert_cpu_threshold
+ - alert_diskio_enabled
+ - alert_diskio_threshold
+ - alert_bwin_enabled
+ - alert_bwin_threshold
+ - alert_bwout_enabled
+ - alert_bwout_threshold
+ - alert_bwquota_enabled
+ - alert_bwquota_threshold
+ - backup_weekly_daily
+ - backup_window
+ - watchdog
+
+Peter Sankauskas did most of the legwork here with his linode plugin; I
+just adapted that for Linode.
+'''
+
+# (c) 2013, Dan Slimmon
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+######################################################################
+
+# Standard imports
+import os
+import re
+import sys
+import argparse
+from time import time
+
+import json
+
+try:
+ from chube import load_chube_config
+ from chube import api as chube_api
+ from chube.datacenter import Datacenter
+ from chube.linode_obj import Linode
+except Exception:
+ try:
+ # remove local paths and other stuff that may
+ # cause an import conflict, as chube is sensitive
+ # to name collisions on importing
+ old_path = sys.path
+ sys.path = [d for d in sys.path if d not in ('', os.getcwd(), os.path.dirname(os.path.realpath(__file__)))]
+
+ from chube import load_chube_config
+ from chube import api as chube_api
+ from chube.datacenter import Datacenter
+ from chube.linode_obj import Linode
+
+ sys.path = old_path
+ except Exception as e:
+ raise Exception("could not import chube")
+
+load_chube_config()
+
+# Imports for ansible
+from ansible.module_utils.six.moves import configparser as ConfigParser
+
+
+class LinodeInventory(object):
+ def _empty_inventory(self):
+ return {"_meta": {"hostvars": {}}}
+
+ def __init__(self):
+ """Main execution path."""
+ # Inventory grouped by display group
+ self.inventory = self._empty_inventory()
+ # Index of label to Linode ID
+ self.index = {}
+ # Local cache of Datacenter objects populated by populate_datacenter_cache()
+ self._datacenter_cache = None
+
+ # Read settings and parse CLI arguments
+ self.read_settings()
+ self.parse_cli_args()
+
+ # Cache
+ if self.args.refresh_cache:
+ self.do_api_calls_update_cache()
+ elif not self.is_cache_valid():
+ self.do_api_calls_update_cache()
+
+ # Data to print
+ if self.args.host:
+ data_to_print = self.get_host_info()
+ elif self.args.list:
+ # Display list of nodes for inventory
+ if len(self.inventory) == 1:
+ data_to_print = self.get_inventory_from_cache()
+ else:
+ data_to_print = self.json_format_dict(self.inventory, True)
+
+ print(data_to_print)
+
+ def is_cache_valid(self):
+ """Determines if the cache file has expired, or if it is still valid."""
+ if os.path.isfile(self.cache_path_cache):
+ mod_time = os.path.getmtime(self.cache_path_cache)
+ current_time = time()
+ if (mod_time + self.cache_max_age) > current_time:
+ if os.path.isfile(self.cache_path_index):
+ return True
+ return False
+
+ def read_settings(self):
+ """Reads the settings from the .ini file."""
+ config = ConfigParser.SafeConfigParser()
+ config.read(os.path.dirname(os.path.realpath(__file__)) + '/linode.ini')
+
+ # Cache related
+ cache_path = config.get('linode', 'cache_path')
+ self.cache_path_cache = cache_path + "/ansible-linode.cache"
+ self.cache_path_index = cache_path + "/ansible-linode.index"
+ self.cache_max_age = config.getint('linode', 'cache_max_age')
+ self.use_public_ip = config.getboolean('linode', 'use_public_ip')
+
+ def parse_cli_args(self):
+ """Command line argument processing"""
+ parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Linode')
+ parser.add_argument('--list', action='store_true', default=True,
+ help='List nodes (default: True)')
+ parser.add_argument('--host', action='store',
+ help='Get all the variables about a specific node')
+ parser.add_argument('--refresh-cache', action='store_true', default=False,
+ help='Force refresh of cache by making API requests to Linode (default: False - use cache files)')
+ self.args = parser.parse_args()
+
+ def do_api_calls_update_cache(self):
+ """Do API calls, and save data in cache files."""
+ self.get_nodes()
+ self.write_to_cache(self.inventory, self.cache_path_cache)
+ self.write_to_cache(self.index, self.cache_path_index)
+
+ def get_nodes(self):
+ """Makes an Linode API call to get the list of nodes."""
+ try:
+ for node in Linode.search(status=Linode.STATUS_RUNNING):
+ self.add_node(node)
+ except chube_api.linode_api.ApiError as e:
+ sys.exit("Looks like Linode's API is down:\n %s" % e)
+
+ def get_node(self, linode_id):
+ """Gets details about a specific node."""
+ try:
+ return Linode.find(api_id=linode_id)
+ except chube_api.linode_api.ApiError as e:
+ sys.exit("Looks like Linode's API is down:\n%s" % e)
+
+ def populate_datacenter_cache(self):
+ """Creates self._datacenter_cache, containing all Datacenters indexed by ID."""
+ self._datacenter_cache = {}
+ dcs = Datacenter.search()
+ for dc in dcs:
+ self._datacenter_cache[dc.api_id] = dc
+
+ def get_datacenter_city(self, node):
+ """Returns a the lowercase city name of the node's data center."""
+ if self._datacenter_cache is None:
+ self.populate_datacenter_cache()
+ location = self._datacenter_cache[node.datacenter_id].location
+ location = location.lower()
+ location = location.split(",")[0]
+ return location
+
+ def add_node(self, node):
+ """Adds an node to the inventory and index."""
+ if self.use_public_ip:
+ dest = self.get_node_public_ip(node)
+ else:
+ dest = node.label
+
+ # Add to index
+ self.index[dest] = node.api_id
+
+ # Inventory: Group by node ID (always a group of 1)
+ self.inventory[node.api_id] = [dest]
+
+ # Inventory: Group by datacenter city
+ self.push(self.inventory, self.get_datacenter_city(node), dest)
+
+ # Inventory: Group by display group
+ self.push(self.inventory, node.display_group, dest)
+
+ # Inventory: Add a "linode" global tag group
+ self.push(self.inventory, "linode", dest)
+
+ # Add host info to hostvars
+ self.inventory["_meta"]["hostvars"][dest] = self._get_host_info(node)
+
+ def get_node_public_ip(self, node):
+ """Returns a the public IP address of the node"""
+ return [addr.address for addr in node.ipaddresses if addr.is_public][0]
+
+ def get_host_info(self):
+ """Get variables about a specific host."""
+
+ if len(self.index) == 0:
+ # Need to load index from cache
+ self.load_index_from_cache()
+
+ if self.args.host not in self.index:
+ # try updating the cache
+ self.do_api_calls_update_cache()
+ if self.args.host not in self.index:
+ # host might not exist anymore
+ return self.json_format_dict({}, True)
+
+ node_id = self.index[self.args.host]
+ node = self.get_node(node_id)
+
+ return self.json_format_dict(self._get_host_info(node), True)
+
+ def _get_host_info(self, node):
+ node_vars = {}
+ for direct_attr in [
+ "api_id",
+ "datacenter_id",
+ "label",
+ "display_group",
+ "create_dt",
+ "total_hd",
+ "total_xfer",
+ "total_ram",
+ "status",
+ "alert_cpu_enabled",
+ "alert_cpu_threshold",
+ "alert_diskio_enabled",
+ "alert_diskio_threshold",
+ "alert_bwin_enabled",
+ "alert_bwin_threshold",
+ "alert_bwout_enabled",
+ "alert_bwout_threshold",
+ "alert_bwquota_enabled",
+ "alert_bwquota_threshold",
+ "backup_weekly_daily",
+ "backup_window",
+ "watchdog"
+ ]:
+ node_vars[direct_attr] = getattr(node, direct_attr)
+
+ node_vars["datacenter_city"] = self.get_datacenter_city(node)
+ node_vars["public_ip"] = self.get_node_public_ip(node)
+
+ # Set the SSH host information, so these inventory items can be used if
+ # their labels aren't FQDNs
+ node_vars['ansible_ssh_host'] = node_vars["public_ip"]
+ node_vars['ansible_host'] = node_vars["public_ip"]
+
+ private_ips = [addr.address for addr in node.ipaddresses if not addr.is_public]
+
+ if private_ips:
+ node_vars["private_ip"] = private_ips[0]
+
+ return node_vars
+
+ def push(self, my_dict, key, element):
+ """Pushed an element onto an array that may not have been defined in the dict."""
+ if key in my_dict:
+ my_dict[key].append(element)
+ else:
+ my_dict[key] = [element]
+
+ def get_inventory_from_cache(self):
+ """Reads the inventory from the cache file and returns it as a JSON object."""
+ cache = open(self.cache_path_cache, 'r')
+ json_inventory = cache.read()
+ return json_inventory
+
+ def load_index_from_cache(self):
+ """Reads the index from the cache file and sets self.index."""
+ cache = open(self.cache_path_index, 'r')
+ json_index = cache.read()
+ self.index = json.loads(json_index)
+
+ def write_to_cache(self, data, filename):
+ """Writes data in JSON format to a file."""
+ json_data = self.json_format_dict(data, True)
+ cache = open(filename, 'w')
+ cache.write(json_data)
+ cache.close()
+
+ def to_safe(self, word):
+ """Escapes any characters that would be invalid in an ansible group name."""
+ return re.sub(r"[^A-Za-z0-9\-]", "_", word)
+
+ def json_format_dict(self, data, pretty=False):
+ """Converts a dict to a JSON object and dumps it as a formatted string."""
+ if pretty:
+ return json.dumps(data, sort_keys=True, indent=2)
+ else:
+ return json.dumps(data)
+
+
+LinodeInventory()
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/lxc_inventory.py b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/lxc_inventory.py
new file mode 100644
index 00000000..00de15c2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/lxc_inventory.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python
+#
+# (c) 2015-16 Florian Haas, hastexo Professional Services GmbH
+# <florian@hastexo.com>
+# Based in part on:
+# libvirt_lxc.py, (c) 2013, Michael Scherer <misc@zarb.org>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+"""
+Ansible inventory script for LXC containers. Requires Python
+bindings for LXC API.
+
+In LXC, containers can be grouped by setting the lxc.group option,
+which may be found more than once in a container's
+configuration. So, we enumerate all containers, fetch their list
+of groups, and then build the dictionary in the way Ansible expects
+it.
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+import lxc
+import json
+
+
+def build_dict():
+ """Returns a dictionary keyed to the defined LXC groups. All
+ containers, including the ones not in any group, are included in the
+ "all" group."""
+ # Enumerate all containers, and list the groups they are in. Also,
+ # implicitly add every container to the 'all' group.
+ containers = dict([(c,
+ ['all'] +
+ (lxc.Container(c).get_config_item('lxc.group') or []))
+ for c in lxc.list_containers()])
+
+ # Extract the groups, flatten the list, and remove duplicates
+ groups = set(sum([g for g in containers.values()], []))
+
+ # Create a dictionary for each group (including the 'all' group
+ return dict([(g, {'hosts': [k for k, v in containers.items() if g in v],
+ 'vars': {'ansible_connection': 'lxc'}}) for g in groups])
+
+
+def main(argv):
+ """Returns a JSON dictionary as expected by Ansible"""
+ result = build_dict()
+ if len(argv) == 2 and argv[1] == '--list':
+ json.dump(result, sys.stdout)
+ elif len(argv) == 3 and argv[1] == '--host':
+ json.dump({'ansible_connection': 'lxc'}, sys.stdout)
+ else:
+ print("Need an argument, either --list or --host <host>", file=sys.stderr)
+
+
+if __name__ == '__main__':
+ main(sys.argv)
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/lxd.ini b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/lxd.ini
new file mode 100644
index 00000000..5398e7d0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/lxd.ini
@@ -0,0 +1,13 @@
+# LXD external inventory script settings
+
+[lxd]
+
+# The default resource
+#resource = local:
+
+# The group name to add the hosts to
+#group = lxd
+
+# The connection type to return for these hosts - lxd hasn't been tested yet
+#connection = lxd
+connection = smart
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/lxd.py b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/lxd.py
new file mode 100644
index 00000000..8e8794eb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/lxd.py
@@ -0,0 +1,93 @@
+#!/usr/bin/env python
+
+# (c) 2013, Michael Scherer <misc@zarb.org>
+# (c) 2014, Hiroaki Nakamura <hnakamur@gmail.com>
+# (c) 2016, Andew Clarke <andrew@oscailte.org>
+#
+# This file is based on https://github.com/ansible/ansible/blob/devel/plugins/inventory/libvirt_lxc.py which is part of Ansible,
+# and https://github.com/hnakamur/lxc-ansible-playbooks/blob/master/provisioning/inventory-lxc.py
+#
+# NOTE, this file has some obvious limitations, improvements welcome
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+from subprocess import Popen, PIPE
+import distutils.spawn
+import sys
+import json
+
+from ansible.module_utils.six.moves import configparser
+
+# Set up defaults
+resource = 'local:'
+group = 'lxd'
+connection = 'lxd'
+hosts = {}
+result = {}
+
+# Read the settings from the lxd.ini file
+config = configparser.SafeConfigParser()
+config.read(os.path.dirname(os.path.realpath(__file__)) + '/lxd.ini')
+if config.has_option('lxd', 'resource'):
+ resource = config.get('lxd', 'resource')
+if config.has_option('lxd', 'group'):
+ group = config.get('lxd', 'group')
+if config.has_option('lxd', 'connection'):
+ connection = config.get('lxd', 'connection')
+
+# Ensure executable exists
+if distutils.spawn.find_executable('lxc'):
+
+ # Set up containers result and hosts array
+ result[group] = {}
+ result[group]['hosts'] = []
+
+ # Run the command and load json result
+ pipe = Popen(['lxc', 'list', resource, '--format', 'json'], stdout=PIPE, universal_newlines=True)
+ lxdjson = json.load(pipe.stdout)
+
+ # Iterate the json lxd output
+ for item in lxdjson:
+
+ # Check state and network
+ if 'state' in item and item['state'] is not None and 'network' in item['state']:
+ network = item['state']['network']
+
+ # Check for eth0 and addresses
+ if 'eth0' in network and 'addresses' in network['eth0']:
+ addresses = network['eth0']['addresses']
+
+ # Iterate addresses
+ for address in addresses:
+
+ # Only return inet family addresses
+ if 'family' in address and address['family'] == 'inet':
+ if 'address' in address:
+ ip = address['address']
+ name = item['name']
+
+ # Add the host to the results and the host array
+ result[group]['hosts'].append(name)
+ hosts[name] = ip
+
+ # Set the other containers result values
+ result[group]['vars'] = {}
+ result[group]['vars']['ansible_connection'] = connection
+
+# Process arguments
+if len(sys.argv) == 2 and sys.argv[1] == '--list':
+ print(json.dumps(result))
+elif len(sys.argv) == 3 and sys.argv[1] == '--host':
+ if sys.argv[2] == 'localhost':
+ print(json.dumps({'ansible_connection': 'local'}))
+ else:
+ if connection == 'lxd':
+ print(json.dumps({'ansible_connection': connection}))
+ else:
+ print(json.dumps({'ansible_connection': connection, 'ansible_host': hosts[sys.argv[2]]}))
+else:
+ print("Need an argument, either --list or --host <host>")
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/mdt.ini b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/mdt.ini
new file mode 100644
index 00000000..c401c0ce
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/mdt.ini
@@ -0,0 +1,17 @@
+[mdt]
+
+# Set the MDT server to connect to
+server = localhost.example.com
+
+# Set the MDT Instance
+instance = EXAMPLEINSTANCE
+
+# Set the MDT database
+database = MDTDB
+
+# Configure login credentials
+user = local.domain\admin
+password = adminpassword
+
+[tower]
+groupname = mdt
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/mdt_dynamic_inventory.py b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/mdt_dynamic_inventory.py
new file mode 100644
index 00000000..6409e37f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/mdt_dynamic_inventory.py
@@ -0,0 +1,122 @@
+#!/usr/bin/env python
+
+# (c) 2016, Julian Barnett <jbarnett@tableau.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+'''
+MDT external inventory script
+=================================
+author: J Barnett 06/23/2016 01:15
+maintainer: J Barnett (github @jbarnett1981)
+'''
+
+import argparse
+import json
+import pymssql
+from ansible.module_utils.six.moves import configparser
+
+
+class MDTInventory(object):
+
+ def __init__(self):
+ ''' Main execution path '''
+ self.conn = None
+
+ # Initialize empty inventory
+ self.inventory = self._empty_inventory()
+
+ # Read CLI arguments
+ self.read_settings()
+ self.parse_cli_args()
+
+ # Get Hosts
+ if self.args.list:
+ self.get_hosts()
+
+ # Get specific host vars
+ if self.args.host:
+ self.get_hosts(self.args.host)
+
+ def _connect(self, query):
+ '''
+ Connect to MDT and dump contents of dbo.ComputerIdentity database
+ '''
+ if not self.conn:
+ self.conn = pymssql.connect(server=self.mdt_server + "\\" + self.mdt_instance, user=self.mdt_user, password=self.mdt_password,
+ database=self.mdt_database)
+ cursor = self.conn.cursor()
+ cursor.execute(query)
+ self.mdt_dump = cursor.fetchall()
+ self.conn.close()
+
+ def get_hosts(self, hostname=False):
+ '''
+ Gets host from MDT Database
+ '''
+ if hostname:
+ query = ("SELECT t1.ID, t1.Description, t1.MacAddress, t2.Role "
+ "FROM ComputerIdentity as t1 join Settings_Roles as t2 on t1.ID = t2.ID where t1.Description = '%s'" % hostname)
+ else:
+ query = 'SELECT t1.ID, t1.Description, t1.MacAddress, t2.Role FROM ComputerIdentity as t1 join Settings_Roles as t2 on t1.ID = t2.ID'
+ self._connect(query)
+
+ # Configure to group name configured in Ansible Tower for this inventory
+ groupname = self.mdt_groupname
+
+ # Initialize empty host list
+ hostlist = []
+
+ # Parse through db dump and populate inventory
+ for hosts in self.mdt_dump:
+ self.inventory['_meta']['hostvars'][hosts[1]] = {'id': hosts[0], 'name': hosts[1], 'mac': hosts[2], 'role': hosts[3]}
+ hostlist.append(hosts[1])
+ self.inventory[groupname] = hostlist
+
+ # Print it all out
+ print(json.dumps(self.inventory, indent=2))
+
+ def _empty_inventory(self):
+ '''
+ Create empty inventory dictionary
+ '''
+ return {"_meta": {"hostvars": {}}}
+
+ def read_settings(self):
+ '''
+ Reads the settings from the mdt.ini file
+ '''
+ config = configparser.SafeConfigParser()
+ config.read('mdt.ini')
+
+ # MDT Server and instance and database
+ self.mdt_server = config.get('mdt', 'server')
+ self.mdt_instance = config.get('mdt', 'instance')
+ self.mdt_database = config.get('mdt', 'database')
+
+ # MDT Login credentials
+ if config.has_option('mdt', 'user'):
+ self.mdt_user = config.get('mdt', 'user')
+ if config.has_option('mdt', 'password'):
+ self.mdt_password = config.get('mdt', 'password')
+
+ # Group name in Tower
+ if config.has_option('tower', 'groupname'):
+ self.mdt_groupname = config.get('tower', 'groupname')
+
+ def parse_cli_args(self):
+ '''
+ Command line argument processing
+ '''
+ parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on MDT')
+ parser.add_argument('--list', action='store_true', default=False, help='List instances')
+ parser.add_argument('--host', action='store', help='Get all the variables about a specific instance')
+ self.args = parser.parse_args()
+
+
+if __name__ == "__main__":
+ # Run the script
+ MDTInventory()
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/nagios_livestatus.ini b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/nagios_livestatus.ini
new file mode 100644
index 00000000..320f11f3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/nagios_livestatus.ini
@@ -0,0 +1,41 @@
+# Ansible Nagios external inventory script settings
+#
+# To get all available possibilities, check following URL:
+# http://www.naemon.org/documentation/usersguide/livestatus.html
+# https://mathias-kettner.de/checkmk_livestatus.html
+#
+
+[local]
+# Livestatus URI
+# Example for default naemon livestatus unix socket :
+# livestatus_uri=unix:/var/cache/naemon/live
+
+[remote]
+
+# default field name for host: name
+# Uncomment to override:
+# host_field=address
+#
+# default field group for host: groups
+# Uncomment to override:
+# group_field=state
+# default fields retrieved: address, alias, display_name, children, parents
+# To override, uncomment the following line
+# fields_to_retrieve=address,alias,display_name
+#
+# default variable prefix: livestatus_
+# To override, uncomment the following line
+# var_prefix=naemon_
+#
+# default filter: None
+#
+# Uncomment to override
+#
+# All host with state = OK
+# host_filter=state = 0
+# Warning: for the moment, you can use only one filter at a time. You cannot combine various conditions.
+#
+# All host in groups Linux
+# host_filter=groups >= Linux
+#
+livestatus_uri=tcp:192.168.66.137:6557
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/nagios_livestatus.py b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/nagios_livestatus.py
new file mode 100644
index 00000000..bdf9f673
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/nagios_livestatus.py
@@ -0,0 +1,163 @@
+#!/usr/bin/env python
+
+# (c) 2015, Yannig Perre <yannig.perre@gmail.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+'''
+Nagios livestatus inventory script. Before using this script, please
+update nagios_livestatus.ini file.
+
+Livestatus is a nagios/naemon/shinken module which let you retrieve
+informations stored in the monitoring core.
+
+This plugin inventory need livestatus API for python. Please install it
+before using this script (apt/pip/yum/...).
+
+Checkmk livestatus: https://mathias-kettner.de/checkmk_livestatus.html
+Livestatus API: http://www.naemon.org/documentation/usersguide/livestatus.html
+'''
+
+import os
+import re
+import argparse
+import sys
+
+from ansible.module_utils.six.moves import configparser
+import json
+
+try:
+ from mk_livestatus import Socket
+except ImportError:
+ sys.exit("Error: mk_livestatus is needed. Try something like: pip install python-mk-livestatus")
+
+
+class NagiosLivestatusInventory(object):
+
+ def parse_ini_file(self):
+ config = configparser.SafeConfigParser()
+ config.read(os.path.dirname(os.path.realpath(__file__)) + '/nagios_livestatus.ini')
+ for section in config.sections():
+ if not config.has_option(section, 'livestatus_uri'):
+ continue
+
+ # If fields_to_retrieve is not set, using default fields
+ fields_to_retrieve = self.default_fields_to_retrieve
+ if config.has_option(section, 'fields_to_retrieve'):
+ fields_to_retrieve = [field.strip() for field in config.get(section, 'fields_to_retrieve').split(',')]
+ fields_to_retrieve = tuple(fields_to_retrieve)
+
+ # default section values
+ section_values = {
+ 'var_prefix': 'livestatus_',
+ 'host_filter': None,
+ 'host_field': 'name',
+ 'group_field': 'groups'
+ }
+ for key, value in section_values.items():
+ if config.has_option(section, key):
+ section_values[key] = config.get(section, key).strip()
+
+ # Retrieving livestatus string connection
+ livestatus_uri = config.get(section, 'livestatus_uri')
+ backend_definition = None
+
+ # Local unix socket
+ unix_match = re.match('unix:(.*)', livestatus_uri)
+ if unix_match is not None:
+ backend_definition = {'connection': unix_match.group(1)}
+
+ # Remote tcp connection
+ tcp_match = re.match('tcp:(.*):([^:]*)', livestatus_uri)
+ if tcp_match is not None:
+ backend_definition = {'connection': (tcp_match.group(1), int(tcp_match.group(2)))}
+
+ # No valid livestatus_uri => exiting
+ if backend_definition is None:
+ raise Exception('livestatus_uri field is invalid (%s). Expected: unix:/path/to/live or tcp:host:port' % livestatus_uri)
+
+ # Updating backend_definition with current value
+ backend_definition['name'] = section
+ backend_definition['fields'] = fields_to_retrieve
+ for key, value in section_values.items():
+ backend_definition[key] = value
+
+ self.backends.append(backend_definition)
+
+ def parse_options(self):
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--host', nargs=1)
+ parser.add_argument('--list', action='store_true')
+ parser.add_argument('--pretty', action='store_true')
+ self.options = parser.parse_args()
+
+ def add_host(self, hostname, group):
+ if group not in self.result:
+ self.result[group] = {}
+ self.result[group]['hosts'] = []
+ if hostname not in self.result[group]['hosts']:
+ self.result[group]['hosts'].append(hostname)
+
+ def query_backend(self, backend, host=None):
+ '''Query a livestatus backend'''
+ hosts_request = Socket(backend['connection']).hosts.columns(backend['host_field'], backend['group_field'])
+
+ if backend['host_filter'] is not None:
+ hosts_request = hosts_request.filter(backend['host_filter'])
+
+ if host is not None:
+ hosts_request = hosts_request.filter('name = ' + host[0])
+
+ hosts_request._columns += backend['fields']
+
+ hosts = hosts_request.call()
+ for host in hosts:
+ hostname = host[backend['host_field']]
+ hostgroups = host[backend['group_field']]
+ if not isinstance(hostgroups, list):
+ hostgroups = [hostgroups]
+ self.add_host(hostname, 'all')
+ self.add_host(hostname, backend['name'])
+ for group in hostgroups:
+ self.add_host(hostname, group)
+ for field in backend['fields']:
+ var_name = backend['var_prefix'] + field
+ if hostname not in self.result['_meta']['hostvars']:
+ self.result['_meta']['hostvars'][hostname] = {}
+ self.result['_meta']['hostvars'][hostname][var_name] = host[field]
+
+ def __init__(self):
+
+ self.defaultgroup = 'group_all'
+ self.default_fields_to_retrieve = ('address', 'alias', 'display_name', 'childs', 'parents')
+ self.backends = []
+ self.options = None
+
+ self.parse_ini_file()
+ self.parse_options()
+
+ self.result = {}
+ self.result['_meta'] = {}
+ self.result['_meta']['hostvars'] = {}
+ self.json_indent = None
+ if self.options.pretty:
+ self.json_indent = 2
+
+ if len(self.backends) == 0:
+ sys.exit("Error: Livestatus configuration is missing. See nagios_livestatus.ini.")
+
+ for backend in self.backends:
+ self.query_backend(backend, self.options.host)
+
+ if self.options.host:
+ print(json.dumps(self.result['_meta']['hostvars'][self.options.host[0]], indent=self.json_indent))
+ elif self.options.list:
+ print(json.dumps(self.result, indent=self.json_indent))
+ else:
+ sys.exit("usage: --list or --host HOSTNAME [--pretty]")
+
+
+NagiosLivestatusInventory()
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/nagios_ndo.ini b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/nagios_ndo.ini
new file mode 100644
index 00000000..1e133a29
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/nagios_ndo.ini
@@ -0,0 +1,10 @@
+# Ansible Nagios external inventory script settings
+#
+
+[ndo]
+# NDO database URI
+# Make sure that data is returned as strings and not bytes if using python 3.
+# See http://docs.sqlalchemy.org/en/rel_1_0/core/engines.html
+# for supported databases and URI format.
+# Example for mysqlclient module :
+database_uri=mysql+mysqldb://user:passwd@hostname/ndo?charset=utf8&use_unicode=1
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/nagios_ndo.py b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/nagios_ndo.py
new file mode 100644
index 00000000..ffd5500f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/nagios_ndo.py
@@ -0,0 +1,95 @@
+#!/usr/bin/env python
+
+# (c) 2014, Jonathan Lestrelin <jonathan.lestrelin@gmail.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+"""
+Nagios NDO external inventory script.
+========================================
+
+Returns hosts and hostgroups from Nagios NDO.
+
+Configuration is read from `nagios_ndo.ini`.
+"""
+
+import os
+import argparse
+import sys
+from ansible.module_utils.six.moves import configparser
+import json
+
+try:
+ from sqlalchemy import text
+ from sqlalchemy.engine import create_engine
+except ImportError:
+ sys.exit("Error: SQLAlchemy is needed. Try something like: pip install sqlalchemy")
+
+
+class NagiosNDOInventory(object):
+
+ def read_settings(self):
+ config = configparser.SafeConfigParser()
+ config.read(os.path.dirname(os.path.realpath(__file__)) + '/nagios_ndo.ini')
+ if config.has_option('ndo', 'database_uri'):
+ self.ndo_database_uri = config.get('ndo', 'database_uri')
+
+ def read_cli(self):
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--host', nargs=1)
+ parser.add_argument('--list', action='store_true')
+ self.options = parser.parse_args()
+
+ def get_hosts(self):
+ engine = create_engine(self.ndo_database_uri)
+ connection = engine.connect()
+ select_hosts = text("SELECT display_name \
+ FROM nagios_hosts")
+ select_hostgroups = text("SELECT alias \
+ FROM nagios_hostgroups")
+ select_hostgroup_hosts = text("SELECT h.display_name \
+ FROM nagios_hostgroup_members hgm, nagios_hosts h, nagios_hostgroups hg \
+ WHERE hgm.hostgroup_id = hg.hostgroup_id \
+ AND hgm.host_object_id = h.host_object_id \
+ AND hg.alias =:hostgroup_alias")
+
+ hosts = connection.execute(select_hosts)
+ self.result['all']['hosts'] = [host['display_name'] for host in hosts]
+
+ for hostgroup in connection.execute(select_hostgroups):
+ hostgroup_alias = hostgroup['alias']
+ self.result[hostgroup_alias] = {}
+ hosts = connection.execute(select_hostgroup_hosts, hostgroup_alias=hostgroup_alias)
+ self.result[hostgroup_alias]['hosts'] = [host['display_name'] for host in hosts]
+
+ def __init__(self):
+
+ self.defaultgroup = 'group_all'
+ self.ndo_database_uri = None
+ self.options = None
+
+ self.read_settings()
+ self.read_cli()
+
+ self.result = {}
+ self.result['all'] = {}
+ self.result['all']['hosts'] = []
+ self.result['_meta'] = {}
+ self.result['_meta']['hostvars'] = {}
+
+ if self.ndo_database_uri:
+ self.get_hosts()
+ if self.options.host:
+ print(json.dumps({}))
+ elif self.options.list:
+ print(json.dumps(self.result))
+ else:
+ sys.exit("usage: --list or --host HOSTNAME")
+ else:
+ sys.exit("Error: Database configuration is missing. See nagios_ndo.ini.")
+
+
+NagiosNDOInventory()
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/nsot.py b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/nsot.py
new file mode 100644
index 00000000..1394e3e3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/nsot.py
@@ -0,0 +1,346 @@
+#!/usr/bin/env python
+
+'''
+nsot
+====
+
+Ansible Dynamic Inventory to pull hosts from NSoT, a flexible CMDB by Dropbox
+
+Features
+--------
+
+* Define host groups in form of NSoT device attribute criteria
+
+* All parameters defined by the spec as of 2015-09-05 are supported.
+
+ + ``--list``: Returns JSON hash of host groups -> hosts and top-level
+ ``_meta`` -> ``hostvars`` which correspond to all device attributes.
+
+ Group vars can be specified in the YAML configuration, noted below.
+
+ + ``--host <hostname>``: Returns JSON hash where every item is a device
+ attribute.
+
+* In addition to all attributes assigned to resource being returned, script
+ will also append ``site_id`` and ``id`` as facts to utilize.
+
+
+Configuration
+------------
+
+Since it'd be annoying and failure prone to guess where you're configuration
+file is, use ``NSOT_INVENTORY_CONFIG`` to specify the path to it.
+
+This file should adhere to the YAML spec. All top-level variable must be
+desired Ansible group-name hashed with single 'query' item to define the NSoT
+attribute query.
+
+Queries follow the normal NSoT query syntax, `shown here`_
+
+.. _shown here: https://github.com/dropbox/pynsot#set-queries
+
+.. code:: yaml
+
+ routers:
+ query: 'deviceType=ROUTER'
+ vars:
+ a: b
+ c: d
+
+ juniper_fw:
+ query: 'deviceType=FIREWALL manufacturer=JUNIPER'
+
+ not_f10:
+ query: '-manufacturer=FORCE10'
+
+The inventory will automatically use your ``.pynsotrc`` like normal pynsot from
+cli would, so make sure that's configured appropriately.
+
+.. note::
+
+ Attributes I'm showing above are influenced from ones that the Trigger
+ project likes. As is the spirit of NSoT, use whichever attributes work best
+ for your workflow.
+
+If config file is blank or absent, the following default groups will be
+created:
+
+* ``routers``: deviceType=ROUTER
+* ``switches``: deviceType=SWITCH
+* ``firewalls``: deviceType=FIREWALL
+
+These are likely not useful for everyone so please use the configuration. :)
+
+.. note::
+
+ By default, resources will only be returned for what your default
+ site is set for in your ``~/.pynsotrc``.
+
+ If you want to specify, add an extra key under the group for ``site: n``.
+
+Output Examples
+---------------
+
+Here are some examples shown from just calling the command directly::
+
+ $ NSOT_INVENTORY_CONFIG=$PWD/test.yaml ansible_nsot --list | jq '.'
+ {
+ "routers": {
+ "hosts": [
+ "test1.example.com"
+ ],
+ "vars": {
+ "cool_level": "very",
+ "group": "routers"
+ }
+ },
+ "firewalls": {
+ "hosts": [
+ "test2.example.com"
+ ],
+ "vars": {
+ "cool_level": "enough",
+ "group": "firewalls"
+ }
+ },
+ "_meta": {
+ "hostvars": {
+ "test2.example.com": {
+ "make": "SRX",
+ "site_id": 1,
+ "id": 108
+ },
+ "test1.example.com": {
+ "make": "MX80",
+ "site_id": 1,
+ "id": 107
+ }
+ }
+ },
+ "rtr_and_fw": {
+ "hosts": [
+ "test1.example.com",
+ "test2.example.com"
+ ],
+ "vars": {}
+ }
+ }
+
+
+ $ NSOT_INVENTORY_CONFIG=$PWD/test.yaml ansible_nsot --host test1 | jq '.'
+ {
+ "make": "MX80",
+ "site_id": 1,
+ "id": 107
+ }
+
+'''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+import os
+import pkg_resources
+import argparse
+import json
+import yaml
+from textwrap import dedent
+from pynsot.client import get_api_client
+from pynsot.app import HttpServerError
+from click.exceptions import UsageError
+
+from ansible.module_utils.six import string_types
+
+
+def warning(*objs):
+ print("WARNING: ", *objs, file=sys.stderr)
+
+
+class NSoTInventory(object):
+ '''NSoT Client object for gather inventory'''
+
+ def __init__(self):
+ self.config = dict()
+ config_env = os.environ.get('NSOT_INVENTORY_CONFIG')
+ if config_env:
+ try:
+ config_file = os.path.abspath(config_env)
+ except IOError: # If file non-existent, use default config
+ self._config_default()
+ except Exception as e:
+ sys.exit('%s\n' % e)
+
+ with open(config_file) as f:
+ try:
+ self.config.update(yaml.safe_load(f))
+ except TypeError: # If empty file, use default config
+ warning('Empty config file')
+ self._config_default()
+ except Exception as e:
+ sys.exit('%s\n' % e)
+ else: # Use defaults if env var missing
+ self._config_default()
+ self.groups = self.config.keys()
+ self.client = get_api_client()
+ self._meta = {'hostvars': dict()}
+
+ def _config_default(self):
+ default_yaml = '''
+ ---
+ routers:
+ query: deviceType=ROUTER
+ switches:
+ query: deviceType=SWITCH
+ firewalls:
+ query: deviceType=FIREWALL
+ '''
+ self.config = yaml.safe_load(dedent(default_yaml))
+
+ def do_list(self):
+ '''Direct callback for when ``--list`` is provided
+
+ Relies on the configuration generated from init to run
+ _inventory_group()
+ '''
+ inventory = dict()
+ for group, contents in self.config.items():
+ group_response = self._inventory_group(group, contents)
+ inventory.update(group_response)
+ inventory.update({'_meta': self._meta})
+ return json.dumps(inventory)
+
+ def do_host(self, host):
+ return json.dumps(self._hostvars(host))
+
+ def _hostvars(self, host):
+ '''Return dictionary of all device attributes
+
+ Depending on number of devices in NSoT, could be rather slow since this
+ has to request every device resource to filter through
+ '''
+ device = [i for i in self.client.devices.get()
+ if host in i['hostname']][0]
+ attributes = device['attributes']
+ attributes.update({'site_id': device['site_id'], 'id': device['id']})
+ return attributes
+
+ def _inventory_group(self, group, contents):
+ '''Takes a group and returns inventory for it as dict
+
+ :param group: Group name
+ :type group: str
+ :param contents: The contents of the group's YAML config
+ :type contents: dict
+
+ contents param should look like::
+
+ {
+ 'query': 'xx',
+ 'vars':
+ 'a': 'b'
+ }
+
+ Will return something like::
+
+ { group: {
+ hosts: [],
+ vars: {},
+ }
+ '''
+ query = contents.get('query')
+ hostvars = contents.get('vars', dict())
+ site = contents.get('site', dict())
+ obj = {group: dict()}
+ obj[group]['hosts'] = []
+ obj[group]['vars'] = hostvars
+ try:
+ assert isinstance(query, string_types)
+ except Exception:
+ sys.exit('ERR: Group queries must be a single string\n'
+ ' Group: %s\n'
+ ' Query: %s\n' % (group, query)
+ )
+ try:
+ if site:
+ site = self.client.sites(site)
+ devices = site.devices.query.get(query=query)
+ else:
+ devices = self.client.devices.query.get(query=query)
+ except HttpServerError as e:
+ if '500' in str(e.response):
+ _site = 'Correct site id?'
+ _attr = 'Queried attributes actually exist?'
+ questions = _site + '\n' + _attr
+ sys.exit('ERR: 500 from server.\n%s' % questions)
+ else:
+ raise
+ except UsageError:
+ sys.exit('ERR: Could not connect to server. Running?')
+
+ # Would do a list comprehension here, but would like to save code/time
+ # and also acquire attributes in this step
+ for host in devices:
+ # Iterate through each device that matches query, assign hostname
+ # to the group's hosts array and then use this single iteration as
+ # a chance to update self._meta which will be used in the final
+ # return
+ hostname = host['hostname']
+ obj[group]['hosts'].append(hostname)
+ attributes = host['attributes']
+ attributes.update({'site_id': host['site_id'], 'id': host['id']})
+ self._meta['hostvars'].update({hostname: attributes})
+
+ return obj
+
+
+def parse_args():
+ desc = __doc__.splitlines()[4] # Just to avoid being redundant
+
+ # Establish parser with options and error out if no action provided
+ parser = argparse.ArgumentParser(
+ description=desc,
+ conflict_handler='resolve',
+ )
+
+ # Arguments
+ #
+ # Currently accepting (--list | -l) and (--host | -h)
+ # These must not be allowed together
+ parser.add_argument(
+ '--list', '-l',
+ help='Print JSON object containing hosts to STDOUT',
+ action='store_true',
+ dest='list_', # Avoiding syntax highlighting for list
+ )
+
+ parser.add_argument(
+ '--host', '-h',
+ help='Print JSON object containing hostvars for <host>',
+ action='store',
+ )
+ args = parser.parse_args()
+
+ if not args.list_ and not args.host: # Require at least one option
+ parser.exit(status=1, message='No action requested')
+
+ if args.list_ and args.host: # Do not allow multiple options
+ parser.exit(status=1, message='Too many actions requested')
+
+ return args
+
+
+def main():
+ '''Set up argument handling and callback routing'''
+ args = parse_args()
+ client = NSoTInventory()
+
+ # Callback condition
+ if args.list_:
+ print(client.do_list())
+ elif args.host:
+ print(client.do_host(args.host))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/nsot.yaml b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/nsot.yaml
new file mode 100644
index 00000000..ebddbc82
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/nsot.yaml
@@ -0,0 +1,22 @@
+---
+juniper_routers:
+ query: 'deviceType=ROUTER manufacturer=JUNIPER'
+ vars:
+ group: juniper_routers
+ netconf: true
+ os: junos
+
+cisco_asa:
+ query: 'manufacturer=CISCO deviceType=FIREWALL'
+ vars:
+ group: cisco_asa
+ routed_vpn: false
+ stateful: true
+
+old_cisco_asa:
+ query: 'manufacturer=CISCO deviceType=FIREWALL -softwareVersion=8.3+'
+ vars:
+ old_nat: true
+
+not_f10:
+ query: '-manufacturer=FORCE10'
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/openshift.py b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/openshift.py
new file mode 100644
index 00000000..85ea00cb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/openshift.py
@@ -0,0 +1,89 @@
+#!/usr/bin/env python
+
+# (c) 2013, Michael Scherer <misc@zarb.org>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+inventory: openshift
+short_description: Openshift gears external inventory script
+description:
+ - Generates inventory of Openshift gears using the REST interface
+ - this permit to reuse playbook to setup an Openshift gear
+author: Michael Scherer
+'''
+
+import json
+import os
+import os.path
+import sys
+import StringIO
+
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.six.moves import configparser as ConfigParser
+
+configparser = None
+
+
+def get_from_rhc_config(variable):
+ global configparser
+ CONF_FILE = os.path.expanduser('~/.openshift/express.conf')
+ if os.path.exists(CONF_FILE):
+ if not configparser:
+ ini_str = '[root]\n' + open(CONF_FILE, 'r').read()
+ configparser = ConfigParser.SafeConfigParser()
+ configparser.readfp(StringIO.StringIO(ini_str))
+ try:
+ return configparser.get('root', variable)
+ except ConfigParser.NoOptionError:
+ return None
+
+
+def get_config(env_var, config_var):
+ result = os.getenv(env_var)
+ if not result:
+ result = get_from_rhc_config(config_var)
+ if not result:
+ sys.exit("failed=True msg='missing %s'" % env_var)
+ return result
+
+
+def get_json_from_api(url, username, password):
+ headers = {'Accept': 'application/json; version=1.5'}
+ response = open_url(url, headers=headers, url_username=username, url_password=password)
+ return json.loads(response.read())['data']
+
+
+username = get_config('ANSIBLE_OPENSHIFT_USERNAME', 'default_rhlogin')
+password = get_config('ANSIBLE_OPENSHIFT_PASSWORD', 'password')
+broker_url = 'https://%s/broker/rest/' % get_config('ANSIBLE_OPENSHIFT_BROKER', 'libra_server')
+
+
+response = get_json_from_api(broker_url + '/domains', username, password)
+
+response = get_json_from_api("%s/domains/%s/applications" %
+ (broker_url, response[0]['id']), username, password)
+
+result = {}
+for app in response:
+
+ # ssh://520311404832ce3e570000ff@blog-johndoe.example.org
+ (user, host) = app['ssh_url'][6:].split('@')
+ app_name = host.split('-')[0]
+
+ result[app_name] = {}
+ result[app_name]['hosts'] = []
+ result[app_name]['hosts'].append(host)
+ result[app_name]['vars'] = {}
+ result[app_name]['vars']['ansible_ssh_user'] = user
+
+if len(sys.argv) == 2 and sys.argv[1] == '--list':
+ print(json.dumps(result))
+elif len(sys.argv) == 3 and sys.argv[1] == '--host':
+ print(json.dumps({}))
+else:
+ print("Need an argument, either --list or --host <host>")
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/openvz.py b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/openvz.py
new file mode 100644
index 00000000..95eec839
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/openvz.py
@@ -0,0 +1,74 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# openvz.py
+#
+# Copyright 2014 jordonr <jordon@beamsyn.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+# Inspired by libvirt_lxc.py inventory script
+# https://github.com/ansible/ansible/blob/e5ef0eca03cbb6c8950c06dc50d0ca22aa8902f4/plugins/inventory/libvirt_lxc.py
+#
+# Groups are determined by the description field of openvz guests
+# multiple groups can be separated by commas: webserver,dbserver
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from subprocess import Popen, PIPE
+import sys
+import json
+
+
+# List openvz hosts
+vzhosts = ['vzhost1', 'vzhost2', 'vzhost3']
+# Add openvz hosts to the inventory and Add "_meta" trick
+inventory = {'vzhosts': {'hosts': vzhosts}, '_meta': {'hostvars': {}}}
+# default group, when description not defined
+default_group = ['vzguest']
+
+
+def get_guests():
+ # Loop through vzhosts
+ for h in vzhosts:
+ # SSH to vzhost and get the list of guests in json
+ pipe = Popen(['ssh', h, 'vzlist', '-j'], stdout=PIPE, universal_newlines=True)
+
+ # Load Json info of guests
+ json_data = json.loads(pipe.stdout.read())
+
+ # loop through guests
+ for j in json_data:
+ # Add information to host vars
+ inventory['_meta']['hostvars'][j['hostname']] = {
+ 'ctid': j['ctid'],
+ 'veid': j['veid'],
+ 'vpsid': j['vpsid'],
+ 'private_path': j['private'],
+ 'root_path': j['root'],
+ 'ip': j['ip']
+ }
+
+ # determine group from guest description
+ if j['description'] is not None:
+ groups = j['description'].split(",")
+ else:
+ groups = default_group
+
+ # add guest to inventory
+ for g in groups:
+ if g not in inventory:
+ inventory[g] = {'hosts': []}
+
+ inventory[g]['hosts'].append(j['hostname'])
+
+ return inventory
+
+
+if len(sys.argv) == 2 and sys.argv[1] == '--list':
+ inv_json = get_guests()
+ print(json.dumps(inv_json, sort_keys=True))
+elif len(sys.argv) == 3 and sys.argv[1] == '--host':
+ print(json.dumps({}))
+else:
+ print("Need an argument, either --list or --host <host>")
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/ovirt.ini b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/ovirt.ini
new file mode 100644
index 00000000..d9aaf8a7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/ovirt.ini
@@ -0,0 +1,35 @@
+# Copyright 2013 Google Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+# Author: Josha Inglis <jinglis@iix.net> based on the gce.ini by Eric Johnson <erjohnso@google.com>
+
+[ovirt]
+# For ovirt.py script, which can be used with Python SDK version 3
+# Service Account configuration information can be stored in the
+# libcloud 'secrets.py' file. Ideally, the 'secrets.py' file will already
+# exist in your PYTHONPATH and be picked up automatically with an import
+# statement in the inventory script. However, you can specify an absolute
+# path to the secrets.py file with 'libcloud_secrets' parameter.
+ovirt_api_secrets =
+
+# If you are not going to use a 'secrets.py' file, you can set the necessary
+# authorization parameters here.
+ovirt_url =
+ovirt_username =
+ovirt_password =
+ovirt_ca_file =
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/ovirt.py b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/ovirt.py
new file mode 100644
index 00000000..04f7fc58
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/ovirt.py
@@ -0,0 +1,279 @@
+#!/usr/bin/env python
+# Copyright 2015 IIX Inc.
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+"""
+ovirt external inventory script
+=================================
+
+Generates inventory that Ansible can understand by making API requests to
+oVirt via the ovirt-engine-sdk-python library.
+
+When run against a specific host, this script returns the following variables
+based on the data obtained from the ovirt_sdk Node object:
+ - ovirt_uuid
+ - ovirt_id
+ - ovirt_image
+ - ovirt_machine_type
+ - ovirt_ips
+ - ovirt_name
+ - ovirt_description
+ - ovirt_status
+ - ovirt_zone
+ - ovirt_tags
+ - ovirt_stats
+
+When run in --list mode, instances are grouped by the following categories:
+
+ - zone:
+ zone group name.
+ - instance tags:
+ An entry is created for each tag. For example, if you have two instances
+ with a common tag called 'foo', they will both be grouped together under
+ the 'tag_foo' name.
+ - network name:
+ the name of the network is appended to 'network_' (e.g. the 'default'
+ network will result in a group named 'network_default')
+ - running status:
+ group name prefixed with 'status_' (e.g. status_up, status_down,..)
+
+Examples:
+ Execute uname on all instances in the us-central1-a zone
+ $ ansible -i ovirt.py us-central1-a -m shell -a "/bin/uname -a"
+
+ Use the ovirt inventory script to print out instance specific information
+ $ contrib/inventory/ovirt.py --host my_instance
+
+Author: Josha Inglis <jinglis@iix.net> based on the gce.py by Eric Johnson <erjohnso@google.com>
+Version: 0.0.1
+"""
+
+USER_AGENT_PRODUCT = "Ansible-ovirt_inventory_plugin"
+USER_AGENT_VERSION = "v1"
+
+import sys
+import os
+import argparse
+from collections import defaultdict
+from ansible.module_utils.six.moves import configparser as ConfigParser
+
+import json
+
+try:
+ # noinspection PyUnresolvedReferences
+ from ovirtsdk.api import API
+ # noinspection PyUnresolvedReferences
+ from ovirtsdk.xml import params
+except ImportError:
+ print("ovirt inventory script requires ovirt-engine-sdk-python")
+ sys.exit(1)
+
+
+class OVirtInventory(object):
+ def __init__(self):
+ # Read settings and parse CLI arguments
+ self.args = self.parse_cli_args()
+ self.driver = self.get_ovirt_driver()
+
+ # Just display data for specific host
+ if self.args.host:
+ print(self.json_format_dict(
+ self.node_to_dict(self.get_instance(self.args.host)),
+ pretty=self.args.pretty
+ ))
+ sys.exit(0)
+
+ # Otherwise, assume user wants all instances grouped
+ print(
+ self.json_format_dict(
+ data=self.group_instances(),
+ pretty=self.args.pretty
+ )
+ )
+ sys.exit(0)
+
+ @staticmethod
+ def get_ovirt_driver():
+ """
+ Determine the ovirt authorization settings and return a ovirt_sdk driver.
+
+ :rtype : ovirtsdk.api.API
+ """
+ kwargs = {}
+
+ ovirt_ini_default_path = os.path.join(
+ os.path.dirname(os.path.realpath(__file__)), "ovirt.ini")
+ ovirt_ini_path = os.environ.get('OVIRT_INI_PATH', ovirt_ini_default_path)
+
+ # Create a ConfigParser.
+ # This provides empty defaults to each key, so that environment
+ # variable configuration (as opposed to INI configuration) is able
+ # to work.
+ config = ConfigParser.SafeConfigParser(defaults={
+ 'ovirt_url': '',
+ 'ovirt_username': '',
+ 'ovirt_password': '',
+ 'ovirt_api_secrets': '',
+ })
+ if 'ovirt' not in config.sections():
+ config.add_section('ovirt')
+ config.read(ovirt_ini_path)
+
+ # Attempt to get ovirt params from a configuration file, if one
+ # exists.
+ secrets_path = config.get('ovirt', 'ovirt_api_secrets')
+ secrets_found = False
+ try:
+ # noinspection PyUnresolvedReferences,PyPackageRequirements
+ import secrets
+
+ kwargs = getattr(secrets, 'OVIRT_KEYWORD_PARAMS', {})
+ secrets_found = True
+ except ImportError:
+ pass
+
+ if not secrets_found and secrets_path:
+ if not secrets_path.endswith('secrets.py'):
+ err = "Must specify ovirt_sdk secrets file as /absolute/path/to/secrets.py"
+ print(err)
+ sys.exit(1)
+ sys.path.append(os.path.dirname(secrets_path))
+ try:
+ # noinspection PyUnresolvedReferences,PyPackageRequirements
+ import secrets
+
+ kwargs = getattr(secrets, 'OVIRT_KEYWORD_PARAMS', {})
+ except ImportError:
+ pass
+ if not secrets_found:
+ kwargs = {
+ 'url': config.get('ovirt', 'ovirt_url'),
+ 'username': config.get('ovirt', 'ovirt_username'),
+ 'password': config.get('ovirt', 'ovirt_password'),
+ }
+
+ # If the appropriate environment variables are set, they override
+ # other configuration; process those into our args and kwargs.
+ kwargs['url'] = os.environ.get('OVIRT_URL', kwargs['url'])
+ kwargs['username'] = next(val for val in [os.environ.get('OVIRT_EMAIL'), os.environ.get('OVIRT_USERNAME'), kwargs['username']] if val is not None)
+ kwargs['password'] = next(val for val in [os.environ.get('OVIRT_PASS'), os.environ.get('OVIRT_PASSWORD'), kwargs['password']] if val is not None)
+
+ # Retrieve and return the ovirt driver.
+ return API(insecure=True, **kwargs)
+
+ @staticmethod
+ def parse_cli_args():
+ """
+ Command line argument processing
+
+ :rtype : argparse.Namespace
+ """
+
+ parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on ovirt')
+ parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)')
+ parser.add_argument('--host', action='store', help='Get all information about an instance')
+ parser.add_argument('--pretty', action='store_true', default=False, help='Pretty format (default: False)')
+ return parser.parse_args()
+
+ def node_to_dict(self, inst):
+ """
+ :type inst: params.VM
+ """
+ if inst is None:
+ return {}
+
+ inst.get_custom_properties()
+ ips = [ip.get_address() for ip in inst.get_guest_info().get_ips().get_ip()] \
+ if inst.get_guest_info() is not None else []
+ stats = {}
+ for stat in inst.get_statistics().list():
+ stats[stat.get_name()] = stat.get_values().get_value()[0].get_datum()
+
+ return {
+ 'ovirt_uuid': inst.get_id(),
+ 'ovirt_id': inst.get_id(),
+ 'ovirt_image': inst.get_os().get_type(),
+ 'ovirt_machine_type': self.get_machine_type(inst),
+ 'ovirt_ips': ips,
+ 'ovirt_name': inst.get_name(),
+ 'ovirt_description': inst.get_description(),
+ 'ovirt_status': inst.get_status().get_state(),
+ 'ovirt_zone': inst.get_cluster().get_id(),
+ 'ovirt_tags': self.get_tags(inst),
+ 'ovirt_stats': stats,
+ # Hosts don't have a public name, so we add an IP
+ 'ansible_ssh_host': ips[0] if len(ips) > 0 else None
+ }
+
+ @staticmethod
+ def get_tags(inst):
+ """
+ :type inst: params.VM
+ """
+ return [x.get_name() for x in inst.get_tags().list()]
+
+ def get_machine_type(self, inst):
+ inst_type = inst.get_instance_type()
+ if inst_type:
+ return self.driver.instancetypes.get(id=inst_type.id).name
+
+ # noinspection PyBroadException,PyUnusedLocal
+ def get_instance(self, instance_name):
+ """Gets details about a specific instance """
+ try:
+ return self.driver.vms.get(name=instance_name)
+ except Exception as e:
+ return None
+
+ def group_instances(self):
+ """Group all instances"""
+ groups = defaultdict(list)
+ meta = {"hostvars": {}}
+
+ for node in self.driver.vms.list():
+ assert isinstance(node, params.VM)
+ name = node.get_name()
+
+ meta["hostvars"][name] = self.node_to_dict(node)
+
+ zone = node.get_cluster().get_name()
+ groups[zone].append(name)
+
+ tags = self.get_tags(node)
+ for t in tags:
+ tag = 'tag_%s' % t
+ groups[tag].append(name)
+
+ nets = [x.get_name() for x in node.get_nics().list()]
+ for net in nets:
+ net = 'network_%s' % net
+ groups[net].append(name)
+
+ status = node.get_status().get_state()
+ stat = 'status_%s' % status.lower()
+ if stat in groups:
+ groups[stat].append(name)
+ else:
+ groups[stat] = [name]
+
+ groups["_meta"] = meta
+
+ return groups
+
+ @staticmethod
+ def json_format_dict(data, pretty=False):
+ """ Converts a dict to a JSON object and dumps it as a formatted
+ string """
+
+ if pretty:
+ return json.dumps(data, sort_keys=True, indent=2)
+ else:
+ return json.dumps(data)
+
+
+# Run the script
+OVirtInventory()
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/ovirt4.py b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/ovirt4.py
new file mode 100644
index 00000000..afff18db
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/ovirt4.py
@@ -0,0 +1,247 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+"""
+oVirt dynamic inventory script
+=================================
+
+Generates dynamic inventory file for oVirt.
+
+Script will return following attributes for each virtual machine:
+ - id
+ - name
+ - host
+ - cluster
+ - status
+ - description
+ - fqdn
+ - os_type
+ - template
+ - tags
+ - statistics
+ - devices
+
+When run in --list mode, virtual machines are grouped by the following categories:
+ - cluster
+ - tag
+ - status
+
+ Note: If there is some virtual machine which has has more tags it will be in both tag
+ records.
+
+Examples:
+ # Execute update of system on webserver virtual machine:
+
+ $ ansible -i contrib/inventory/ovirt4.py webserver -m yum -a "name=* state=latest"
+
+ # Get webserver virtual machine information:
+
+ $ contrib/inventory/ovirt4.py --host webserver
+
+Author: Ondra Machacek (@machacekondra)
+"""
+
+import argparse
+import os
+import sys
+
+from collections import defaultdict
+
+from ansible.module_utils.six.moves import configparser
+
+import json
+
+try:
+ import ovirtsdk4 as sdk
+ import ovirtsdk4.types as otypes
+except ImportError:
+ print('oVirt inventory script requires ovirt-engine-sdk-python >= 4.0.0')
+ sys.exit(1)
+
+
+def parse_args():
+ """
+ Create command line parser for oVirt dynamic inventory script.
+ """
+ parser = argparse.ArgumentParser(
+ description='Ansible dynamic inventory script for oVirt.',
+ )
+ parser.add_argument(
+ '--list',
+ action='store_true',
+ default=True,
+ help='Get data of all virtual machines (default: True).',
+ )
+ parser.add_argument(
+ '--host',
+ help='Get data of virtual machines running on specified host.',
+ )
+ parser.add_argument(
+ '--pretty',
+ action='store_true',
+ default=False,
+ help='Pretty format (default: False).',
+ )
+ return parser.parse_args()
+
+
+def create_connection():
+ """
+ Create a connection to oVirt engine API.
+ """
+ # Get the path of the configuration file, by default use
+ # 'ovirt.ini' file in script directory:
+ default_path = os.path.join(
+ os.path.dirname(os.path.realpath(__file__)),
+ 'ovirt.ini',
+ )
+ config_path = os.environ.get('OVIRT_INI_PATH', default_path)
+
+ # Create parser and add ovirt section if it doesn't exist:
+ config = configparser.SafeConfigParser(
+ defaults={
+ 'ovirt_url': os.environ.get('OVIRT_URL'),
+ 'ovirt_username': os.environ.get('OVIRT_USERNAME'),
+ 'ovirt_password': os.environ.get('OVIRT_PASSWORD'),
+ 'ovirt_ca_file': os.environ.get('OVIRT_CAFILE', ''),
+ }
+ )
+ if not config.has_section('ovirt'):
+ config.add_section('ovirt')
+ config.read(config_path)
+
+ # Create a connection with options defined in ini file:
+ return sdk.Connection(
+ url=config.get('ovirt', 'ovirt_url'),
+ username=config.get('ovirt', 'ovirt_username'),
+ password=config.get('ovirt', 'ovirt_password', raw=True),
+ ca_file=config.get('ovirt', 'ovirt_ca_file') or None,
+ insecure=not config.get('ovirt', 'ovirt_ca_file'),
+ )
+
+
+def get_dict_of_struct(connection, vm):
+ """
+ Transform SDK Vm Struct type to Python dictionary.
+ """
+ if vm is None:
+ return dict()
+
+ vms_service = connection.system_service().vms_service()
+ clusters_service = connection.system_service().clusters_service()
+ vm_service = vms_service.vm_service(vm.id)
+ devices = vm_service.reported_devices_service().list()
+ tags = vm_service.tags_service().list()
+ stats = vm_service.statistics_service().list()
+ labels = vm_service.affinity_labels_service().list()
+ groups = clusters_service.cluster_service(
+ vm.cluster.id
+ ).affinity_groups_service().list()
+
+ return {
+ 'id': vm.id,
+ 'name': vm.name,
+ 'host': connection.follow_link(vm.host).name if vm.host else None,
+ 'cluster': connection.follow_link(vm.cluster).name,
+ 'status': str(vm.status),
+ 'description': vm.description,
+ 'fqdn': vm.fqdn,
+ 'os_type': vm.os.type,
+ 'template': connection.follow_link(vm.template).name,
+ 'tags': [tag.name for tag in tags],
+ 'affinity_labels': [label.name for label in labels],
+ 'affinity_groups': [
+ group.name for group in groups
+ if vm.name in [vm.name for vm in connection.follow_link(group.vms)]
+ ],
+ 'statistics': dict(
+ (stat.name, stat.values[0].datum) for stat in stats if stat.values
+ ),
+ 'devices': dict(
+ (device.name, [ip.address for ip in device.ips]) for device in devices if device.ips
+ ),
+ 'ansible_host': next((device.ips[0].address for device in devices if device.ips), None)
+ }
+
+
+def get_data(connection, vm_name=None):
+ """
+ Obtain data of `vm_name` if specified, otherwise obtain data of all vms.
+ """
+ vms_service = connection.system_service().vms_service()
+ clusters_service = connection.system_service().clusters_service()
+
+ if vm_name:
+ vm = vms_service.list(search='name=%s' % vm_name) or [None]
+ data = get_dict_of_struct(
+ connection=connection,
+ vm=vm[0],
+ )
+ else:
+ vms = dict()
+ data = defaultdict(list)
+ for vm in vms_service.list():
+ name = vm.name
+ vm_service = vms_service.vm_service(vm.id)
+ cluster_service = clusters_service.cluster_service(vm.cluster.id)
+
+ # Add vm to vms dict:
+ vms[name] = get_dict_of_struct(connection, vm)
+
+ # Add vm to cluster group:
+ cluster_name = connection.follow_link(vm.cluster).name
+ data['cluster_%s' % cluster_name].append(name)
+
+ # Add vm to tag group:
+ tags_service = vm_service.tags_service()
+ for tag in tags_service.list():
+ data['tag_%s' % tag.name].append(name)
+
+ # Add vm to status group:
+ data['status_%s' % vm.status].append(name)
+
+ # Add vm to affinity group:
+ for group in cluster_service.affinity_groups_service().list():
+ if vm.name in [
+ v.name for v in connection.follow_link(group.vms)
+ ]:
+ data['affinity_group_%s' % group.name].append(vm.name)
+
+ # Add vm to affinity label group:
+ affinity_labels_service = vm_service.affinity_labels_service()
+ for label in affinity_labels_service.list():
+ data['affinity_label_%s' % label.name].append(name)
+
+ data["_meta"] = {
+ 'hostvars': vms,
+ }
+
+ return data
+
+
+def main():
+ args = parse_args()
+ connection = create_connection()
+
+ print(
+ json.dumps(
+ obj=get_data(
+ connection=connection,
+ vm_name=args.host,
+ ),
+ sort_keys=args.pretty,
+ indent=args.pretty * 2,
+ )
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/packet_net.ini b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/packet_net.ini
new file mode 100644
index 00000000..6dcc027b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/packet_net.ini
@@ -0,0 +1,53 @@
+# Ansible Packet.net external inventory script settings
+#
+
+[packet]
+
+# Packet projects to get info for. Set this to 'all' to get info for all
+# projects in Packet and merge the results together. Alternatively, set
+# this to a comma separated list of projects. E.g. 'project-1,project-3,project-4'
+projects = all
+projects_exclude =
+
+# By default, packet devices in all state are returned. Specify
+# packet device states to return as a comma-separated list.
+# device_states = active, inactive, queued, provisioning
+
+# items per page to retrieve from packet api at a time
+items_per_page = 999
+
+# API calls to Packet are costly. For this reason, we cache the results of an API
+# call. Set this to the path you want cache files to be written to. Two files
+# will be written to this directory:
+# - ansible-packet.cache
+# - ansible-packet.index
+cache_path = ~/.ansible/tmp
+
+# The number of seconds a cache file is considered valid. After this many
+# seconds, a new API call will be made, and the cache file will be updated.
+# To disable the cache, set this value to 0
+cache_max_age = 300
+
+# Organize groups into a nested/hierarchy instead of a flat namespace.
+nested_groups = False
+
+# Replace - tags when creating groups to avoid issues with ansible
+replace_dash_in_groups = True
+
+# The packet inventory output can become very large. To manage its size,
+# configure which groups should be created.
+group_by_device_id = True
+group_by_hostname = True
+group_by_facility = True
+group_by_project = True
+group_by_operating_system = True
+group_by_plan_type = True
+group_by_tags = True
+group_by_tag_none = True
+
+# If you only want to include hosts that match a certain regular expression
+# pattern_include = staging-*
+
+# If you want to exclude any hosts that match a certain regular expression
+# pattern_exclude = staging-*
+
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/packet_net.py b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/packet_net.py
new file mode 100644
index 00000000..196e2686
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/packet_net.py
@@ -0,0 +1,496 @@
+#!/usr/bin/env python
+
+'''
+Packet.net external inventory script
+=================================
+
+Generates inventory that Ansible can understand by making API request to
+Packet.net using the Packet library.
+
+NOTE: This script assumes Ansible is being executed where the environment
+variable needed for Packet API Token already been set:
+ export PACKET_API_TOKEN=Bfse9F24SFtfs423Gsd3ifGsd43sSdfs
+
+This script also assumes there is a packet_net.ini file alongside it. To specify a
+different path to packet_net.ini, define the PACKET_NET_INI_PATH environment variable:
+
+ export PACKET_NET_INI_PATH=/path/to/my_packet_net.ini
+
+'''
+
+# (c) 2016, Peter Sankauskas
+# (c) 2017, Tomas Karasek
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+######################################################################
+
+import sys
+import os
+import argparse
+import re
+from time import time
+
+from ansible.module_utils import six
+from ansible.module_utils.six.moves import configparser
+
+try:
+ import packet
+except ImportError as e:
+ sys.exit("failed=True msg='`packet-python` library required for this script'")
+
+import traceback
+
+
+import json
+
+
+ini_section = 'packet'
+
+
+class PacketInventory(object):
+
+ def _empty_inventory(self):
+ return {"_meta": {"hostvars": {}}}
+
+ def __init__(self):
+ ''' Main execution path '''
+
+ # Inventory grouped by device IDs, tags, security groups, regions,
+ # and availability zones
+ self.inventory = self._empty_inventory()
+
+ # Index of hostname (address) to device ID
+ self.index = {}
+
+ # Read settings and parse CLI arguments
+ self.parse_cli_args()
+ self.read_settings()
+
+ # Cache
+ if self.args.refresh_cache:
+ self.do_api_calls_update_cache()
+ elif not self.is_cache_valid():
+ self.do_api_calls_update_cache()
+
+ # Data to print
+ if self.args.host:
+ data_to_print = self.get_host_info()
+
+ elif self.args.list:
+ # Display list of devices for inventory
+ if self.inventory == self._empty_inventory():
+ data_to_print = self.get_inventory_from_cache()
+ else:
+ data_to_print = self.json_format_dict(self.inventory, True)
+
+ print(data_to_print)
+
+ def is_cache_valid(self):
+ ''' Determines if the cache files have expired, or if it is still valid '''
+
+ if os.path.isfile(self.cache_path_cache):
+ mod_time = os.path.getmtime(self.cache_path_cache)
+ current_time = time()
+ if (mod_time + self.cache_max_age) > current_time:
+ if os.path.isfile(self.cache_path_index):
+ return True
+
+ return False
+
+ def read_settings(self):
+ ''' Reads the settings from the packet_net.ini file '''
+ if six.PY3:
+ config = configparser.ConfigParser()
+ else:
+ config = configparser.SafeConfigParser()
+
+ _ini_path_raw = os.environ.get('PACKET_NET_INI_PATH')
+
+ if _ini_path_raw:
+ packet_ini_path = os.path.expanduser(os.path.expandvars(_ini_path_raw))
+ else:
+ packet_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'packet_net.ini')
+ config.read(packet_ini_path)
+
+ # items per page
+ self.items_per_page = 999
+ if config.has_option(ini_section, 'items_per_page'):
+ config.get(ini_section, 'items_per_page')
+
+ # Instance states to be gathered in inventory. Default is all of them.
+ packet_valid_device_states = [
+ 'active',
+ 'inactive',
+ 'queued',
+ 'provisioning'
+ ]
+ self.packet_device_states = []
+ if config.has_option(ini_section, 'device_states'):
+ for device_state in config.get(ini_section, 'device_states').split(','):
+ device_state = device_state.strip()
+ if device_state not in packet_valid_device_states:
+ continue
+ self.packet_device_states.append(device_state)
+ else:
+ self.packet_device_states = packet_valid_device_states
+
+ # Cache related
+ cache_dir = os.path.expanduser(config.get(ini_section, 'cache_path'))
+ if not os.path.exists(cache_dir):
+ os.makedirs(cache_dir)
+
+ self.cache_path_cache = cache_dir + "/ansible-packet.cache"
+ self.cache_path_index = cache_dir + "/ansible-packet.index"
+ self.cache_max_age = config.getint(ini_section, 'cache_max_age')
+
+ # Configure nested groups instead of flat namespace.
+ if config.has_option(ini_section, 'nested_groups'):
+ self.nested_groups = config.getboolean(ini_section, 'nested_groups')
+ else:
+ self.nested_groups = False
+
+ # Replace dash or not in group names
+ if config.has_option(ini_section, 'replace_dash_in_groups'):
+ self.replace_dash_in_groups = config.getboolean(ini_section, 'replace_dash_in_groups')
+ else:
+ self.replace_dash_in_groups = True
+
+ # Configure which groups should be created.
+ group_by_options = [
+ 'group_by_device_id',
+ 'group_by_hostname',
+ 'group_by_facility',
+ 'group_by_project',
+ 'group_by_operating_system',
+ 'group_by_plan_type',
+ 'group_by_tags',
+ 'group_by_tag_none',
+ ]
+ for option in group_by_options:
+ if config.has_option(ini_section, option):
+ setattr(self, option, config.getboolean(ini_section, option))
+ else:
+ setattr(self, option, True)
+
+ # Do we need to just include hosts that match a pattern?
+ try:
+ pattern_include = config.get(ini_section, 'pattern_include')
+ if pattern_include and len(pattern_include) > 0:
+ self.pattern_include = re.compile(pattern_include)
+ else:
+ self.pattern_include = None
+ except configparser.NoOptionError:
+ self.pattern_include = None
+
+ # Do we need to exclude hosts that match a pattern?
+ try:
+ pattern_exclude = config.get(ini_section, 'pattern_exclude')
+ if pattern_exclude and len(pattern_exclude) > 0:
+ self.pattern_exclude = re.compile(pattern_exclude)
+ else:
+ self.pattern_exclude = None
+ except configparser.NoOptionError:
+ self.pattern_exclude = None
+
+ # Projects
+ self.projects = []
+ configProjects = config.get(ini_section, 'projects')
+ configProjects_exclude = config.get(ini_section, 'projects_exclude')
+ if (configProjects == 'all'):
+ for projectInfo in self.get_projects():
+ if projectInfo.name not in configProjects_exclude:
+ self.projects.append(projectInfo.name)
+ else:
+ self.projects = configProjects.split(",")
+
+ def parse_cli_args(self):
+ ''' Command line argument processing '''
+
+ parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Packet')
+ parser.add_argument('--list', action='store_true', default=True,
+ help='List Devices (default: True)')
+ parser.add_argument('--host', action='store',
+ help='Get all the variables about a specific device')
+ parser.add_argument('--refresh-cache', action='store_true', default=False,
+ help='Force refresh of cache by making API requests to Packet (default: False - use cache files)')
+ self.args = parser.parse_args()
+
+ def do_api_calls_update_cache(self):
+ ''' Do API calls to each region, and save data in cache files '''
+
+ for projectInfo in self.get_projects():
+ if projectInfo.name in self.projects:
+ self.get_devices_by_project(projectInfo)
+
+ self.write_to_cache(self.inventory, self.cache_path_cache)
+ self.write_to_cache(self.index, self.cache_path_index)
+
+ def connect(self):
+ ''' create connection to api server'''
+ token = os.environ.get('PACKET_API_TOKEN')
+ if token is None:
+ raise Exception("Error reading token from environment (PACKET_API_TOKEN)!")
+ manager = packet.Manager(auth_token=token)
+ return manager
+
+ def get_projects(self):
+ '''Makes a Packet API call to get the list of projects'''
+
+ params = {
+ 'per_page': self.items_per_page
+ }
+
+ try:
+ manager = self.connect()
+ projects = manager.list_projects(params=params)
+ return projects
+ except Exception as e:
+ traceback.print_exc()
+ self.fail_with_error(e, 'getting Packet projects')
+
+ def get_devices_by_project(self, project):
+ ''' Makes an Packet API call to the list of devices in a particular
+ project '''
+
+ params = {
+ 'per_page': self.items_per_page
+ }
+
+ try:
+ manager = self.connect()
+ devices = manager.list_devices(project_id=project.id, params=params)
+
+ for device in devices:
+ self.add_device(device, project)
+
+ except Exception as e:
+ traceback.print_exc()
+ self.fail_with_error(e, 'getting Packet devices')
+
+ def fail_with_error(self, err_msg, err_operation=None):
+ '''log an error to std err for ansible-playbook to consume and exit'''
+ if err_operation:
+ err_msg = 'ERROR: "{err_msg}", while: {err_operation}\n'.format(
+ err_msg=err_msg, err_operation=err_operation)
+ sys.stderr.write(err_msg)
+ sys.exit(1)
+
+ def get_device(self, device_id):
+ manager = self.connect()
+
+ device = manager.get_device(device_id)
+ return device
+
+ def add_device(self, device, project):
+ ''' Adds a device to the inventory and index, as long as it is
+ addressable '''
+
+ # Only return devices with desired device states
+ if device.state not in self.packet_device_states:
+ return
+
+ # Select the best destination address. Only include management
+ # addresses as non-management (elastic) addresses need manual
+ # host configuration to be routable.
+ # See https://help.packet.net/article/54-elastic-ips.
+ dest = None
+ for ip_address in device.ip_addresses:
+ if ip_address['public'] is True and \
+ ip_address['address_family'] == 4 and \
+ ip_address['management'] is True:
+ dest = ip_address['address']
+
+ if not dest:
+ # Skip devices we cannot address (e.g. private VPC subnet)
+ return
+
+ # if we only want to include hosts that match a pattern, skip those that don't
+ if self.pattern_include and not self.pattern_include.match(device.hostname):
+ return
+
+ # if we need to exclude hosts that match a pattern, skip those
+ if self.pattern_exclude and self.pattern_exclude.match(device.hostname):
+ return
+
+ # Add to index
+ self.index[dest] = [project.id, device.id]
+
+ # Inventory: Group by device ID (always a group of 1)
+ if self.group_by_device_id:
+ self.inventory[device.id] = [dest]
+ if self.nested_groups:
+ self.push_group(self.inventory, 'devices', device.id)
+
+ # Inventory: Group by device name (hopefully a group of 1)
+ if self.group_by_hostname:
+ self.push(self.inventory, device.hostname, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'hostnames', project.name)
+
+ # Inventory: Group by project
+ if self.group_by_project:
+ self.push(self.inventory, project.name, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'projects', project.name)
+
+ # Inventory: Group by facility
+ if self.group_by_facility:
+ self.push(self.inventory, device.facility['code'], dest)
+ if self.nested_groups:
+ if self.group_by_facility:
+ self.push_group(self.inventory, project.name, device.facility['code'])
+
+ # Inventory: Group by OS
+ if self.group_by_operating_system:
+ self.push(self.inventory, device.operating_system.slug, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'operating_systems', device.operating_system.slug)
+
+ # Inventory: Group by plan type
+ if self.group_by_plan_type:
+ self.push(self.inventory, device.plan['slug'], dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'plans', device.plan['slug'])
+
+ # Inventory: Group by tag keys
+ if self.group_by_tags:
+ for k in device.tags:
+ key = self.to_safe("tag_" + k)
+ self.push(self.inventory, key, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k))
+
+ # Global Tag: devices without tags
+ if self.group_by_tag_none and len(device.tags) == 0:
+ self.push(self.inventory, 'tag_none', dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'tags', 'tag_none')
+
+ # Global Tag: tag all Packet devices
+ self.push(self.inventory, 'packet', dest)
+
+ self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_device(device)
+
+ def get_host_info_dict_from_device(self, device):
+ device_vars = {}
+ for key in vars(device):
+ value = getattr(device, key)
+ key = self.to_safe('packet_' + key)
+
+ # Handle complex types
+ if key == 'packet_state':
+ device_vars[key] = device.state or ''
+ elif key == 'packet_hostname':
+ device_vars[key] = value
+ elif isinstance(value, (int, bool)):
+ device_vars[key] = value
+ elif isinstance(value, six.string_types):
+ device_vars[key] = value.strip()
+ elif value is None:
+ device_vars[key] = ''
+ elif key == 'packet_facility':
+ device_vars[key] = value['code']
+ elif key == 'packet_operating_system':
+ device_vars[key] = value.slug
+ elif key == 'packet_plan':
+ device_vars[key] = value['slug']
+ elif key == 'packet_tags':
+ for k in value:
+ key = self.to_safe('packet_tag_' + k)
+ device_vars[key] = k
+ else:
+ pass
+ # print key
+ # print type(value)
+ # print value
+
+ return device_vars
+
+ def get_host_info(self):
+ ''' Get variables about a specific host '''
+
+ if len(self.index) == 0:
+ # Need to load index from cache
+ self.load_index_from_cache()
+
+ if self.args.host not in self.index:
+ # try updating the cache
+ self.do_api_calls_update_cache()
+ if self.args.host not in self.index:
+ # host might not exist anymore
+ return self.json_format_dict({}, True)
+
+ (project_id, device_id) = self.index[self.args.host]
+
+ device = self.get_device(device_id)
+ return self.json_format_dict(self.get_host_info_dict_from_device(device), True)
+
+ def push(self, my_dict, key, element):
+ ''' Push an element onto an array that may not have been defined in
+ the dict '''
+ group_info = my_dict.setdefault(key, [])
+ if isinstance(group_info, dict):
+ host_list = group_info.setdefault('hosts', [])
+ host_list.append(element)
+ else:
+ group_info.append(element)
+
+ def push_group(self, my_dict, key, element):
+ ''' Push a group as a child of another group. '''
+ parent_group = my_dict.setdefault(key, {})
+ if not isinstance(parent_group, dict):
+ parent_group = my_dict[key] = {'hosts': parent_group}
+ child_groups = parent_group.setdefault('children', [])
+ if element not in child_groups:
+ child_groups.append(element)
+
+ def get_inventory_from_cache(self):
+ ''' Reads the inventory from the cache file and returns it as a JSON
+ object '''
+
+ cache = open(self.cache_path_cache, 'r')
+ json_inventory = cache.read()
+ return json_inventory
+
+ def load_index_from_cache(self):
+ ''' Reads the index from the cache file sets self.index '''
+
+ cache = open(self.cache_path_index, 'r')
+ json_index = cache.read()
+ self.index = json.loads(json_index)
+
+ def write_to_cache(self, data, filename):
+ ''' Writes data in JSON format to a file '''
+
+ json_data = self.json_format_dict(data, True)
+ cache = open(filename, 'w')
+ cache.write(json_data)
+ cache.close()
+
+ def uncammelize(self, key):
+ temp = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key)
+ return re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp).lower()
+
+ def to_safe(self, word):
+ ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
+ regex = r"[^A-Za-z0-9\_"
+ if not self.replace_dash_in_groups:
+ regex += r"\-"
+ return re.sub(regex + "]", "_", word)
+
+ def json_format_dict(self, data, pretty=False):
+ ''' Converts a dict to a JSON object and dumps it as a formatted
+ string '''
+
+ if pretty:
+ return json.dumps(data, sort_keys=True, indent=2)
+ else:
+ return json.dumps(data)
+
+
+# Run the script
+PacketInventory()
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/proxmox.py b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/proxmox.py
new file mode 100644
index 00000000..21969341
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/proxmox.py
@@ -0,0 +1,240 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2014 Mathieu GAUTHIER-LAFAYE <gauthierl@lapth.cnrs.fr>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# Updated 2016 by Matt Harris <matthaeus.harris@gmail.com>
+#
+# Added support for Proxmox VE 4.x
+# Added support for using the Notes field of a VM to define groups and variables:
+# A well-formatted JSON object in the Notes field will be added to the _meta
+# section for that VM. In addition, the "groups" key of this JSON object may be
+# used to specify group membership:
+#
+# { "groups": ["utility", "databases"], "a": false, "b": true }
+
+import json
+import os
+import sys
+from optparse import OptionParser
+
+from ansible.module_utils.six import iteritems
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+
+from ansible.module_utils.urls import open_url
+
+
+class ProxmoxNodeList(list):
+ def get_names(self):
+ return [node['node'] for node in self]
+
+
+class ProxmoxVM(dict):
+ def get_variables(self):
+ variables = {}
+ for key, value in iteritems(self):
+ variables['proxmox_' + key] = value
+ return variables
+
+
+class ProxmoxVMList(list):
+ def __init__(self, data=None):
+ data = [] if data is None else data
+
+ for item in data:
+ self.append(ProxmoxVM(item))
+
+ def get_names(self):
+ return [vm['name'] for vm in self if vm['template'] != 1]
+
+ def get_by_name(self, name):
+ results = [vm for vm in self if vm['name'] == name]
+ return results[0] if len(results) > 0 else None
+
+ def get_variables(self):
+ variables = {}
+ for vm in self:
+ variables[vm['name']] = vm.get_variables()
+
+ return variables
+
+
+class ProxmoxPoolList(list):
+ def get_names(self):
+ return [pool['poolid'] for pool in self]
+
+
+class ProxmoxPool(dict):
+ def get_members_name(self):
+ return [member['name'] for member in self['members'] if member['template'] != 1]
+
+
+class ProxmoxAPI(object):
+ def __init__(self, options):
+ self.options = options
+ self.credentials = None
+
+ if not options.url:
+ raise Exception('Missing mandatory parameter --url (or PROXMOX_URL).')
+ elif not options.username:
+ raise Exception('Missing mandatory parameter --username (or PROXMOX_USERNAME).')
+ elif not options.password:
+ raise Exception('Missing mandatory parameter --password (or PROXMOX_PASSWORD).')
+
+ def auth(self):
+ request_path = '{0}api2/json/access/ticket'.format(self.options.url)
+
+ request_params = urlencode({
+ 'username': self.options.username,
+ 'password': self.options.password,
+ })
+
+ data = json.load(open_url(request_path, data=request_params))
+
+ self.credentials = {
+ 'ticket': data['data']['ticket'],
+ 'CSRFPreventionToken': data['data']['CSRFPreventionToken'],
+ }
+
+ def get(self, url, data=None):
+ request_path = '{0}{1}'.format(self.options.url, url)
+
+ headers = {'Cookie': 'PVEAuthCookie={0}'.format(self.credentials['ticket'])}
+ request = open_url(request_path, data=data, headers=headers)
+
+ response = json.load(request)
+ return response['data']
+
+ def nodes(self):
+ return ProxmoxNodeList(self.get('api2/json/nodes'))
+
+ def vms_by_type(self, node, type):
+ return ProxmoxVMList(self.get('api2/json/nodes/{0}/{1}'.format(node, type)))
+
+ def vm_description_by_type(self, node, vm, type):
+ return self.get('api2/json/nodes/{0}/{1}/{2}/config'.format(node, type, vm))
+
+ def node_qemu(self, node):
+ return self.vms_by_type(node, 'qemu')
+
+ def node_qemu_description(self, node, vm):
+ return self.vm_description_by_type(node, vm, 'qemu')
+
+ def node_lxc(self, node):
+ return self.vms_by_type(node, 'lxc')
+
+ def node_lxc_description(self, node, vm):
+ return self.vm_description_by_type(node, vm, 'lxc')
+
+ def pools(self):
+ return ProxmoxPoolList(self.get('api2/json/pools'))
+
+ def pool(self, poolid):
+ return ProxmoxPool(self.get('api2/json/pools/{0}'.format(poolid)))
+
+
+def main_list(options):
+ results = {
+ 'all': {
+ 'hosts': [],
+ },
+ '_meta': {
+ 'hostvars': {},
+ }
+ }
+
+ proxmox_api = ProxmoxAPI(options)
+ proxmox_api.auth()
+
+ for node in proxmox_api.nodes().get_names():
+ qemu_list = proxmox_api.node_qemu(node)
+ results['all']['hosts'] += qemu_list.get_names()
+ results['_meta']['hostvars'].update(qemu_list.get_variables())
+ lxc_list = proxmox_api.node_lxc(node)
+ results['all']['hosts'] += lxc_list.get_names()
+ results['_meta']['hostvars'].update(lxc_list.get_variables())
+
+ for vm in results['_meta']['hostvars']:
+ vmid = results['_meta']['hostvars'][vm]['proxmox_vmid']
+ try:
+ type = results['_meta']['hostvars'][vm]['proxmox_type']
+ except KeyError:
+ type = 'qemu'
+ try:
+ description = proxmox_api.vm_description_by_type(node, vmid, type)['description']
+ except KeyError:
+ description = None
+
+ try:
+ metadata = json.loads(description)
+ except TypeError:
+ metadata = {}
+ except ValueError:
+ metadata = {
+ 'notes': description
+ }
+
+ if 'groups' in metadata:
+ # print metadata
+ for group in metadata['groups']:
+ if group not in results:
+ results[group] = {
+ 'hosts': []
+ }
+ results[group]['hosts'] += [vm]
+
+ results['_meta']['hostvars'][vm].update(metadata)
+
+ # pools
+ for pool in proxmox_api.pools().get_names():
+ results[pool] = {
+ 'hosts': proxmox_api.pool(pool).get_members_name(),
+ }
+
+ return results
+
+
+def main_host(options):
+ proxmox_api = ProxmoxAPI(options)
+ proxmox_api.auth()
+
+ for node in proxmox_api.nodes().get_names():
+ qemu_list = proxmox_api.node_qemu(node)
+ qemu = qemu_list.get_by_name(options.host)
+ if qemu:
+ return qemu.get_variables()
+
+ return {}
+
+
+def main():
+ parser = OptionParser(usage='%prog [options] --list | --host HOSTNAME')
+ parser.add_option('--list', action="store_true", default=False, dest="list")
+ parser.add_option('--host', dest="host")
+ parser.add_option('--url', default=os.environ.get('PROXMOX_URL'), dest='url')
+ parser.add_option('--username', default=os.environ.get('PROXMOX_USERNAME'), dest='username')
+ parser.add_option('--password', default=os.environ.get('PROXMOX_PASSWORD'), dest='password')
+ parser.add_option('--pretty', action="store_true", default=False, dest='pretty')
+ (options, args) = parser.parse_args()
+
+ if options.list:
+ data = main_list(options)
+ elif options.host:
+ data = main_host(options)
+ else:
+ parser.print_help()
+ sys.exit(1)
+
+ indent = None
+ if options.pretty:
+ indent = 2
+
+ print(json.dumps(data, indent=indent))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/rackhd.py b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/rackhd.py
new file mode 100644
index 00000000..9b4372f6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/rackhd.py
@@ -0,0 +1,86 @@
+#!/usr/bin/env python
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import requests
+import argparse
+
+RACKHD_URL = 'http://localhost:8080'
+
+
+class RackhdInventory(object):
+ def __init__(self, nodeids):
+ self._inventory = {}
+ for nodeid in nodeids:
+ self._load_inventory_data(nodeid)
+ inventory = {}
+ for (nodeid, info) in self._inventory.items():
+ inventory[nodeid] = (self._format_output(nodeid, info))
+ print(json.dumps(inventory))
+
+ def _load_inventory_data(self, nodeid):
+ info = {}
+ info['ohai'] = RACKHD_URL + '/api/common/nodes/{0}/catalogs/ohai'.format(nodeid)
+ info['lookup'] = RACKHD_URL + '/api/common/lookups/?q={0}'.format(nodeid)
+
+ results = {}
+ for (key, url) in info.items():
+ r = requests.get(url, verify=False)
+ results[key] = r.text
+ self._inventory[nodeid] = results
+
+ def _format_output(self, nodeid, info):
+ try:
+ node_info = json.loads(info['lookup'])
+ ipaddress = ''
+ if len(node_info) > 0:
+ ipaddress = node_info[0]['ipAddress']
+ output = {'hosts': [ipaddress], 'vars': {}}
+ for (key, result) in info.items():
+ output['vars'][key] = json.loads(result)
+ output['vars']['ansible_ssh_user'] = 'monorail'
+ except KeyError:
+ pass
+ return output
+
+
+def parse_args():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--host')
+ parser.add_argument('--list', action='store_true')
+ return parser.parse_args()
+
+
+try:
+ # check if rackhd url(ie:10.1.1.45:8080) is specified in the environment
+ RACKHD_URL = 'http://' + str(os.environ['RACKHD_URL'])
+except Exception:
+ # use default values
+ pass
+
+# Use the nodeid specified in the environment to limit the data returned
+# or return data for all available nodes
+nodeids = []
+
+if (parse_args().host):
+ try:
+ nodeids += parse_args().host.split(',')
+ RackhdInventory(nodeids)
+ except Exception:
+ pass
+if (parse_args().list):
+ try:
+ url = RACKHD_URL + '/api/common/nodes'
+ r = requests.get(url, verify=False)
+ data = json.loads(r.text)
+ for entry in data:
+ if entry['type'] == 'compute':
+ nodeids.append(entry['id'])
+ RackhdInventory(nodeids)
+ except Exception:
+ pass
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/rax.ini b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/rax.ini
new file mode 100644
index 00000000..15948e7b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/rax.ini
@@ -0,0 +1,66 @@
+# Ansible Rackspace external inventory script settings
+#
+
+[rax]
+
+# Environment Variable: RAX_CREDS_FILE
+#
+# An optional configuration that points to a pyrax-compatible credentials
+# file.
+#
+# If not supplied, rax.py will look for a credentials file
+# at ~/.rackspace_cloud_credentials. It uses the Rackspace Python SDK,
+# and therefore requires a file formatted per the SDK's specifications.
+#
+# https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md
+# creds_file = ~/.rackspace_cloud_credentials
+
+# Environment Variable: RAX_REGION
+#
+# An optional environment variable to narrow inventory search
+# scope. If used, needs a value like ORD, DFW, SYD (a Rackspace
+# datacenter) and optionally accepts a comma-separated list.
+# regions = IAD,ORD,DFW
+
+# Environment Variable: RAX_ENV
+#
+# A configuration that will use an environment as configured in
+# ~/.pyrax.cfg, see
+# https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md
+# env = prod
+
+# Environment Variable: RAX_META_PREFIX
+# Default: meta
+#
+# A configuration that changes the prefix used for meta key/value groups.
+# For compatibility with ec2.py set to "tag"
+# meta_prefix = meta
+
+# Environment Variable: RAX_ACCESS_NETWORK
+# Default: public
+#
+# A configuration that will tell the inventory script to use a specific
+# server network to determine the ansible_ssh_host value. If no address
+# is found, ansible_ssh_host will not be set. Accepts a comma-separated
+# list of network names, the first found wins.
+# access_network = public
+
+# Environment Variable: RAX_ACCESS_IP_VERSION
+# Default: 4
+#
+# A configuration related to "access_network" that will attempt to
+# determine the ansible_ssh_host value for either IPv4 or IPv6. If no
+# address is found, ansible_ssh_host will not be set.
+# Acceptable values are: 4 or 6. Values other than 4 or 6
+# will be ignored, and 4 will be used. Accepts a comma separated list,
+# the first found wins.
+# access_ip_version = 4
+
+# Environment Variable: RAX_CACHE_MAX_AGE
+# Default: 600
+#
+# A configuration the changes the behavior or the inventory cache.
+# Inventory listing performed before this value will be returned from
+# the cache instead of making a full request for all inventory. Setting
+# this value to 0 will force a full request.
+# cache_max_age = 600 \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/rax.py b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/rax.py
new file mode 100644
index 00000000..0cac0f00
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/rax.py
@@ -0,0 +1,460 @@
+#!/usr/bin/env python
+
+# (c) 2013, Jesse Keating <jesse.keating@rackspace.com,
+# Paul Durivage <paul.durivage@rackspace.com>,
+# Matt Martz <matt@sivel.net>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+"""
+Rackspace Cloud Inventory
+
+Authors:
+ Jesse Keating <jesse.keating@rackspace.com,
+ Paul Durivage <paul.durivage@rackspace.com>,
+ Matt Martz <matt@sivel.net>
+
+
+Description:
+ Generates inventory that Ansible can understand by making API request to
+ Rackspace Public Cloud API
+
+ When run against a specific host, this script returns variables similar to:
+ rax_os-ext-sts_task_state
+ rax_addresses
+ rax_links
+ rax_image
+ rax_os-ext-sts_vm_state
+ rax_flavor
+ rax_id
+ rax_rax-bandwidth_bandwidth
+ rax_user_id
+ rax_os-dcf_diskconfig
+ rax_accessipv4
+ rax_accessipv6
+ rax_progress
+ rax_os-ext-sts_power_state
+ rax_metadata
+ rax_status
+ rax_updated
+ rax_hostid
+ rax_name
+ rax_created
+ rax_tenant_id
+ rax_loaded
+
+Configuration:
+ rax.py can be configured using a rax.ini file or via environment
+ variables. The rax.ini file should live in the same directory along side
+ this script.
+
+ The section header for configuration values related to this
+ inventory plugin is [rax]
+
+ [rax]
+ creds_file = ~/.rackspace_cloud_credentials
+ regions = IAD,ORD,DFW
+ env = prod
+ meta_prefix = meta
+ access_network = public
+ access_ip_version = 4
+
+ Each of these configurations also has a corresponding environment variable.
+ An environment variable will override a configuration file value.
+
+ creds_file:
+ Environment Variable: RAX_CREDS_FILE
+
+ An optional configuration that points to a pyrax-compatible credentials
+ file.
+
+ If not supplied, rax.py will look for a credentials file
+ at ~/.rackspace_cloud_credentials. It uses the Rackspace Python SDK,
+ and therefore requires a file formatted per the SDK's specifications.
+
+ https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md
+
+ regions:
+ Environment Variable: RAX_REGION
+
+ An optional environment variable to narrow inventory search
+ scope. If used, needs a value like ORD, DFW, SYD (a Rackspace
+ datacenter) and optionally accepts a comma-separated list.
+
+ environment:
+ Environment Variable: RAX_ENV
+
+ A configuration that will use an environment as configured in
+ ~/.pyrax.cfg, see
+ https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md
+
+ meta_prefix:
+ Environment Variable: RAX_META_PREFIX
+ Default: meta
+
+ A configuration that changes the prefix used for meta key/value groups.
+ For compatibility with ec2.py set to "tag"
+
+ access_network:
+ Environment Variable: RAX_ACCESS_NETWORK
+ Default: public
+
+ A configuration that will tell the inventory script to use a specific
+ server network to determine the ansible_ssh_host value. If no address
+ is found, ansible_ssh_host will not be set. Accepts a comma-separated
+ list of network names, the first found wins.
+
+ access_ip_version:
+ Environment Variable: RAX_ACCESS_IP_VERSION
+ Default: 4
+
+ A configuration related to "access_network" that will attempt to
+ determine the ansible_ssh_host value for either IPv4 or IPv6. If no
+ address is found, ansible_ssh_host will not be set.
+ Acceptable values are: 4 or 6. Values other than 4 or 6
+ will be ignored, and 4 will be used. Accepts a comma-separated list,
+ the first found wins.
+
+Examples:
+ List server instances
+ $ RAX_CREDS_FILE=~/.raxpub rax.py --list
+
+ List servers in ORD datacenter only
+ $ RAX_CREDS_FILE=~/.raxpub RAX_REGION=ORD rax.py --list
+
+ List servers in ORD and DFW datacenters
+ $ RAX_CREDS_FILE=~/.raxpub RAX_REGION=ORD,DFW rax.py --list
+
+ Get server details for server named "server.example.com"
+ $ RAX_CREDS_FILE=~/.raxpub rax.py --host server.example.com
+
+ Use the instance private IP to connect (instead of public IP)
+ $ RAX_CREDS_FILE=~/.raxpub RAX_ACCESS_NETWORK=private rax.py --list
+"""
+
+import os
+import re
+import sys
+import argparse
+import warnings
+import collections
+
+from ansible.module_utils.six import iteritems
+from ansible.module_utils.six.moves import configparser as ConfigParser
+
+import json
+
+try:
+ import pyrax
+ from pyrax.utils import slugify
+except ImportError:
+ sys.exit('pyrax is required for this module')
+
+from time import time
+
+from ansible.constants import get_config
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.module_utils.six import text_type
+
+NON_CALLABLES = (text_type, str, bool, dict, int, list, type(None))
+
+
+def load_config_file():
+ p = ConfigParser.ConfigParser()
+ config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
+ 'rax.ini')
+ try:
+ p.read(config_file)
+ except ConfigParser.Error:
+ return None
+ else:
+ return p
+
+
+def rax_slugify(value):
+ return 'rax_%s' % (re.sub(r'[^\w-]', '_', value).lower().lstrip('_'))
+
+
+def to_dict(obj):
+ instance = {}
+ for key in dir(obj):
+ value = getattr(obj, key)
+ if isinstance(value, NON_CALLABLES) and not key.startswith('_'):
+ key = rax_slugify(key)
+ instance[key] = value
+
+ return instance
+
+
+def host(regions, hostname):
+ hostvars = {}
+
+ for region in regions:
+ # Connect to the region
+ cs = pyrax.connect_to_cloudservers(region=region)
+ for server in cs.servers.list():
+ if server.name == hostname:
+ for key, value in to_dict(server).items():
+ hostvars[key] = value
+
+ # And finally, add an IP address
+ hostvars['ansible_ssh_host'] = server.accessIPv4
+ print(json.dumps(hostvars, sort_keys=True, indent=4))
+
+
+def _list_into_cache(regions):
+ groups = collections.defaultdict(list)
+ hostvars = collections.defaultdict(dict)
+ images = {}
+ cbs_attachments = collections.defaultdict(dict)
+
+ prefix = get_config(p, 'rax', 'meta_prefix', 'RAX_META_PREFIX', 'meta')
+
+ try:
+ # Ansible 2.3+
+ networks = get_config(p, 'rax', 'access_network',
+ 'RAX_ACCESS_NETWORK', 'public', value_type='list')
+ except TypeError:
+ # Ansible 2.2.x and below
+ # pylint: disable=unexpected-keyword-arg
+ networks = get_config(p, 'rax', 'access_network',
+ 'RAX_ACCESS_NETWORK', 'public', islist=True)
+ try:
+ try:
+ # Ansible 2.3+
+ ip_versions = map(int, get_config(p, 'rax', 'access_ip_version',
+ 'RAX_ACCESS_IP_VERSION', 4, value_type='list'))
+ except TypeError:
+ # Ansible 2.2.x and below
+ # pylint: disable=unexpected-keyword-arg
+ ip_versions = map(int, get_config(p, 'rax', 'access_ip_version',
+ 'RAX_ACCESS_IP_VERSION', 4, islist=True))
+ except Exception:
+ ip_versions = [4]
+ else:
+ ip_versions = [v for v in ip_versions if v in [4, 6]]
+ if not ip_versions:
+ ip_versions = [4]
+
+ # Go through all the regions looking for servers
+ for region in regions:
+ # Connect to the region
+ cs = pyrax.connect_to_cloudservers(region=region)
+ if cs is None:
+ warnings.warn(
+ 'Connecting to Rackspace region "%s" has caused Pyrax to '
+ 'return None. Is this a valid region?' % region,
+ RuntimeWarning)
+ continue
+ for server in cs.servers.list():
+ # Create a group on region
+ groups[region].append(server.name)
+
+ # Check if group metadata key in servers' metadata
+ group = server.metadata.get('group')
+ if group:
+ groups[group].append(server.name)
+
+ for extra_group in server.metadata.get('groups', '').split(','):
+ if extra_group:
+ groups[extra_group].append(server.name)
+
+ # Add host metadata
+ for key, value in to_dict(server).items():
+ hostvars[server.name][key] = value
+
+ hostvars[server.name]['rax_region'] = region
+
+ for key, value in iteritems(server.metadata):
+ groups['%s_%s_%s' % (prefix, key, value)].append(server.name)
+
+ groups['instance-%s' % server.id].append(server.name)
+ groups['flavor-%s' % server.flavor['id']].append(server.name)
+
+ # Handle boot from volume
+ if not server.image:
+ if not cbs_attachments[region]:
+ cbs = pyrax.connect_to_cloud_blockstorage(region)
+ for vol in cbs.list():
+ if boolean(vol.bootable, strict=False):
+ for attachment in vol.attachments:
+ metadata = vol.volume_image_metadata
+ server_id = attachment['server_id']
+ cbs_attachments[region][server_id] = {
+ 'id': metadata['image_id'],
+ 'name': slugify(metadata['image_name'])
+ }
+ image = cbs_attachments[region].get(server.id)
+ if image:
+ server.image = {'id': image['id']}
+ hostvars[server.name]['rax_image'] = server.image
+ hostvars[server.name]['rax_boot_source'] = 'volume'
+ images[image['id']] = image['name']
+ else:
+ hostvars[server.name]['rax_boot_source'] = 'local'
+
+ try:
+ imagegroup = 'image-%s' % images[server.image['id']]
+ groups[imagegroup].append(server.name)
+ groups['image-%s' % server.image['id']].append(server.name)
+ except KeyError:
+ try:
+ image = cs.images.get(server.image['id'])
+ except cs.exceptions.NotFound:
+ groups['image-%s' % server.image['id']].append(server.name)
+ else:
+ images[image.id] = image.human_id
+ groups['image-%s' % image.human_id].append(server.name)
+ groups['image-%s' % server.image['id']].append(server.name)
+
+ # And finally, add an IP address
+ ansible_ssh_host = None
+ # use accessIPv[46] instead of looping address for 'public'
+ for network_name in networks:
+ if ansible_ssh_host:
+ break
+ if network_name == 'public':
+ for version_name in ip_versions:
+ if ansible_ssh_host:
+ break
+ if version_name == 6 and server.accessIPv6:
+ ansible_ssh_host = server.accessIPv6
+ elif server.accessIPv4:
+ ansible_ssh_host = server.accessIPv4
+ if not ansible_ssh_host:
+ addresses = server.addresses.get(network_name, [])
+ for address in addresses:
+ for version_name in ip_versions:
+ if ansible_ssh_host:
+ break
+ if address.get('version') == version_name:
+ ansible_ssh_host = address.get('addr')
+ break
+ if ansible_ssh_host:
+ hostvars[server.name]['ansible_ssh_host'] = ansible_ssh_host
+
+ if hostvars:
+ groups['_meta'] = {'hostvars': hostvars}
+
+ with open(get_cache_file_path(regions), 'w') as cache_file:
+ json.dump(groups, cache_file)
+
+
+def get_cache_file_path(regions):
+ regions_str = '.'.join([reg.strip().lower() for reg in regions])
+ ansible_tmp_path = os.path.join(os.path.expanduser("~"), '.ansible', 'tmp')
+ if not os.path.exists(ansible_tmp_path):
+ os.makedirs(ansible_tmp_path)
+ return os.path.join(ansible_tmp_path,
+ 'ansible-rax-%s-%s.cache' % (
+ pyrax.identity.username, regions_str))
+
+
+def _list(regions, refresh_cache=True):
+ cache_max_age = int(get_config(p, 'rax', 'cache_max_age',
+ 'RAX_CACHE_MAX_AGE', 600))
+
+ if (not os.path.exists(get_cache_file_path(regions)) or
+ refresh_cache or
+ (time() - os.stat(get_cache_file_path(regions))[-1]) > cache_max_age):
+ # Cache file doesn't exist or older than 10m or refresh cache requested
+ _list_into_cache(regions)
+
+ with open(get_cache_file_path(regions), 'r') as cache_file:
+ groups = json.load(cache_file)
+ print(json.dumps(groups, sort_keys=True, indent=4))
+
+
+def parse_args():
+ parser = argparse.ArgumentParser(description='Ansible Rackspace Cloud '
+ 'inventory module')
+ group = parser.add_mutually_exclusive_group(required=True)
+ group.add_argument('--list', action='store_true',
+ help='List active servers')
+ group.add_argument('--host', help='List details about the specific host')
+ parser.add_argument('--refresh-cache', action='store_true', default=False,
+ help=('Force refresh of cache, making API requests to'
+ 'RackSpace (default: False - use cache files)'))
+ return parser.parse_args()
+
+
+def setup():
+ default_creds_file = os.path.expanduser('~/.rackspace_cloud_credentials')
+
+ env = get_config(p, 'rax', 'environment', 'RAX_ENV', None)
+ if env:
+ pyrax.set_environment(env)
+
+ keyring_username = pyrax.get_setting('keyring_username')
+
+ # Attempt to grab credentials from environment first
+ creds_file = get_config(p, 'rax', 'creds_file',
+ 'RAX_CREDS_FILE', None)
+ if creds_file is not None:
+ creds_file = os.path.expanduser(creds_file)
+ else:
+ # But if that fails, use the default location of
+ # ~/.rackspace_cloud_credentials
+ if os.path.isfile(default_creds_file):
+ creds_file = default_creds_file
+ elif not keyring_username:
+ sys.exit('No value in environment variable %s and/or no '
+ 'credentials file at %s'
+ % ('RAX_CREDS_FILE', default_creds_file))
+
+ identity_type = pyrax.get_setting('identity_type')
+ pyrax.set_setting('identity_type', identity_type or 'rackspace')
+
+ region = pyrax.get_setting('region')
+
+ try:
+ if keyring_username:
+ pyrax.keyring_auth(keyring_username, region=region)
+ else:
+ pyrax.set_credential_file(creds_file, region=region)
+ except Exception as e:
+ sys.exit("%s: %s" % (e, e.message))
+
+ regions = []
+ if region:
+ regions.append(region)
+ else:
+ try:
+ # Ansible 2.3+
+ region_list = get_config(p, 'rax', 'regions', 'RAX_REGION', 'all',
+ value_type='list')
+ except TypeError:
+ # Ansible 2.2.x and below
+ # pylint: disable=unexpected-keyword-arg
+ region_list = get_config(p, 'rax', 'regions', 'RAX_REGION', 'all',
+ islist=True)
+
+ for region in region_list:
+ region = region.strip().upper()
+ if region == 'ALL':
+ regions = pyrax.regions
+ break
+ elif region not in pyrax.regions:
+ sys.exit('Unsupported region %s' % region)
+ elif region not in regions:
+ regions.append(region)
+
+ return regions
+
+
+def main():
+ args = parse_args()
+ regions = setup()
+ if args.list:
+ _list(regions, refresh_cache=args.refresh_cache)
+ elif args.host:
+ host(regions, args.host)
+ sys.exit(0)
+
+
+p = load_config_file()
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/rhv.py b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/rhv.py
new file mode 100644
index 00000000..afff18db
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/rhv.py
@@ -0,0 +1,247 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+"""
+oVirt dynamic inventory script
+=================================
+
+Generates dynamic inventory file for oVirt.
+
+Script will return following attributes for each virtual machine:
+ - id
+ - name
+ - host
+ - cluster
+ - status
+ - description
+ - fqdn
+ - os_type
+ - template
+ - tags
+ - statistics
+ - devices
+
+When run in --list mode, virtual machines are grouped by the following categories:
+ - cluster
+ - tag
+ - status
+
+ Note: If there is some virtual machine which has has more tags it will be in both tag
+ records.
+
+Examples:
+ # Execute update of system on webserver virtual machine:
+
+ $ ansible -i contrib/inventory/ovirt4.py webserver -m yum -a "name=* state=latest"
+
+ # Get webserver virtual machine information:
+
+ $ contrib/inventory/ovirt4.py --host webserver
+
+Author: Ondra Machacek (@machacekondra)
+"""
+
+import argparse
+import os
+import sys
+
+from collections import defaultdict
+
+from ansible.module_utils.six.moves import configparser
+
+import json
+
+try:
+ import ovirtsdk4 as sdk
+ import ovirtsdk4.types as otypes
+except ImportError:
+ print('oVirt inventory script requires ovirt-engine-sdk-python >= 4.0.0')
+ sys.exit(1)
+
+
+def parse_args():
+ """
+ Create command line parser for oVirt dynamic inventory script.
+ """
+ parser = argparse.ArgumentParser(
+ description='Ansible dynamic inventory script for oVirt.',
+ )
+ parser.add_argument(
+ '--list',
+ action='store_true',
+ default=True,
+ help='Get data of all virtual machines (default: True).',
+ )
+ parser.add_argument(
+ '--host',
+ help='Get data of virtual machines running on specified host.',
+ )
+ parser.add_argument(
+ '--pretty',
+ action='store_true',
+ default=False,
+ help='Pretty format (default: False).',
+ )
+ return parser.parse_args()
+
+
+def create_connection():
+ """
+ Create a connection to oVirt engine API.
+ """
+ # Get the path of the configuration file, by default use
+ # 'ovirt.ini' file in script directory:
+ default_path = os.path.join(
+ os.path.dirname(os.path.realpath(__file__)),
+ 'ovirt.ini',
+ )
+ config_path = os.environ.get('OVIRT_INI_PATH', default_path)
+
+ # Create parser and add ovirt section if it doesn't exist:
+ config = configparser.SafeConfigParser(
+ defaults={
+ 'ovirt_url': os.environ.get('OVIRT_URL'),
+ 'ovirt_username': os.environ.get('OVIRT_USERNAME'),
+ 'ovirt_password': os.environ.get('OVIRT_PASSWORD'),
+ 'ovirt_ca_file': os.environ.get('OVIRT_CAFILE', ''),
+ }
+ )
+ if not config.has_section('ovirt'):
+ config.add_section('ovirt')
+ config.read(config_path)
+
+ # Create a connection with options defined in ini file:
+ return sdk.Connection(
+ url=config.get('ovirt', 'ovirt_url'),
+ username=config.get('ovirt', 'ovirt_username'),
+ password=config.get('ovirt', 'ovirt_password', raw=True),
+ ca_file=config.get('ovirt', 'ovirt_ca_file') or None,
+ insecure=not config.get('ovirt', 'ovirt_ca_file'),
+ )
+
+
+def get_dict_of_struct(connection, vm):
+ """
+ Transform SDK Vm Struct type to Python dictionary.
+ """
+ if vm is None:
+ return dict()
+
+ vms_service = connection.system_service().vms_service()
+ clusters_service = connection.system_service().clusters_service()
+ vm_service = vms_service.vm_service(vm.id)
+ devices = vm_service.reported_devices_service().list()
+ tags = vm_service.tags_service().list()
+ stats = vm_service.statistics_service().list()
+ labels = vm_service.affinity_labels_service().list()
+ groups = clusters_service.cluster_service(
+ vm.cluster.id
+ ).affinity_groups_service().list()
+
+ return {
+ 'id': vm.id,
+ 'name': vm.name,
+ 'host': connection.follow_link(vm.host).name if vm.host else None,
+ 'cluster': connection.follow_link(vm.cluster).name,
+ 'status': str(vm.status),
+ 'description': vm.description,
+ 'fqdn': vm.fqdn,
+ 'os_type': vm.os.type,
+ 'template': connection.follow_link(vm.template).name,
+ 'tags': [tag.name for tag in tags],
+ 'affinity_labels': [label.name for label in labels],
+ 'affinity_groups': [
+ group.name for group in groups
+ if vm.name in [vm.name for vm in connection.follow_link(group.vms)]
+ ],
+ 'statistics': dict(
+ (stat.name, stat.values[0].datum) for stat in stats if stat.values
+ ),
+ 'devices': dict(
+ (device.name, [ip.address for ip in device.ips]) for device in devices if device.ips
+ ),
+ 'ansible_host': next((device.ips[0].address for device in devices if device.ips), None)
+ }
+
+
+def get_data(connection, vm_name=None):
+ """
+ Obtain data of `vm_name` if specified, otherwise obtain data of all vms.
+ """
+ vms_service = connection.system_service().vms_service()
+ clusters_service = connection.system_service().clusters_service()
+
+ if vm_name:
+ vm = vms_service.list(search='name=%s' % vm_name) or [None]
+ data = get_dict_of_struct(
+ connection=connection,
+ vm=vm[0],
+ )
+ else:
+ vms = dict()
+ data = defaultdict(list)
+ for vm in vms_service.list():
+ name = vm.name
+ vm_service = vms_service.vm_service(vm.id)
+ cluster_service = clusters_service.cluster_service(vm.cluster.id)
+
+ # Add vm to vms dict:
+ vms[name] = get_dict_of_struct(connection, vm)
+
+ # Add vm to cluster group:
+ cluster_name = connection.follow_link(vm.cluster).name
+ data['cluster_%s' % cluster_name].append(name)
+
+ # Add vm to tag group:
+ tags_service = vm_service.tags_service()
+ for tag in tags_service.list():
+ data['tag_%s' % tag.name].append(name)
+
+ # Add vm to status group:
+ data['status_%s' % vm.status].append(name)
+
+ # Add vm to affinity group:
+ for group in cluster_service.affinity_groups_service().list():
+ if vm.name in [
+ v.name for v in connection.follow_link(group.vms)
+ ]:
+ data['affinity_group_%s' % group.name].append(vm.name)
+
+ # Add vm to affinity label group:
+ affinity_labels_service = vm_service.affinity_labels_service()
+ for label in affinity_labels_service.list():
+ data['affinity_label_%s' % label.name].append(name)
+
+ data["_meta"] = {
+ 'hostvars': vms,
+ }
+
+ return data
+
+
+def main():
+ args = parse_args()
+ connection = create_connection()
+
+ print(
+ json.dumps(
+ obj=get_data(
+ connection=connection,
+ vm_name=args.host,
+ ),
+ sort_keys=args.pretty,
+ indent=args.pretty * 2,
+ )
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/rudder.ini b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/rudder.ini
new file mode 100644
index 00000000..748b3d21
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/rudder.ini
@@ -0,0 +1,35 @@
+# Rudder external inventory script settings
+#
+
+[rudder]
+
+# Your Rudder server API URL, typically:
+# https://rudder.local/rudder/api
+uri = https://rudder.local/rudder/api
+
+# By default, Rudder uses a self-signed certificate. Set this to True
+# to disable certificate validation.
+disable_ssl_certificate_validation = True
+
+# Your Rudder API token, created in the Web interface.
+token = aaabbbccc
+
+# Rudder API version to use, use "latest" for latest available
+# version.
+version = latest
+
+# Property to use as group name in the output.
+# Can generally be "id" or "displayName".
+group_name = displayName
+
+# Fail if there are two groups with the same name or two hosts with the
+# same hostname in the output.
+fail_if_name_collision = True
+
+# We cache the results of Rudder API in a local file
+cache_path = /tmp/ansible-rudder.cache
+
+# The number of seconds a cache file is considered valid. After this many
+# seconds, a new API call will be made, and the cache file will be updated.
+# Set to 0 to disable cache.
+cache_max_age = 500
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/rudder.py b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/rudder.py
new file mode 100644
index 00000000..9a65aca9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/rudder.py
@@ -0,0 +1,286 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2015, Normation SAS
+#
+# Inspired by the EC2 inventory plugin:
+# https://github.com/ansible/ansible/blob/devel/contrib/inventory/ec2.py
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+######################################################################
+
+'''
+Rudder external inventory script
+=================================
+
+Generates inventory that Ansible can understand by making API request to
+a Rudder server. This script is compatible with Rudder 2.10 or later.
+
+The output JSON includes all your Rudder groups, containing the hostnames of
+their nodes. Groups and nodes have a variable called rudder_group_id and
+rudder_node_id, which is the Rudder internal id of the item, allowing to identify
+them uniquely. Hosts variables also include your node properties, which are
+key => value properties set by the API and specific to each node.
+
+This script assumes there is an rudder.ini file alongside it. To specify a
+different path to rudder.ini, define the RUDDER_INI_PATH environment variable:
+
+ export RUDDER_INI_PATH=/path/to/my_rudder.ini
+
+You have to configure your Rudder server information, either in rudder.ini or
+by overriding it with environment variables:
+
+ export RUDDER_API_VERSION='latest'
+ export RUDDER_API_TOKEN='my_token'
+ export RUDDER_API_URI='https://rudder.local/rudder/api'
+'''
+
+
+import sys
+import os
+import re
+import argparse
+import httplib2 as http
+from time import time
+from ansible.module_utils import six
+from ansible.module_utils.six.moves import configparser
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+
+import json
+
+
+class RudderInventory(object):
+ def __init__(self):
+ ''' Main execution path '''
+
+ # Empty inventory by default
+ self.inventory = {}
+
+ # Read settings and parse CLI arguments
+ self.read_settings()
+ self.parse_cli_args()
+
+ # Create connection
+ self.conn = http.Http(disable_ssl_certificate_validation=self.disable_ssl_validation)
+
+ # Cache
+ if self.args.refresh_cache:
+ self.update_cache()
+ elif not self.is_cache_valid():
+ self.update_cache()
+ else:
+ self.load_cache()
+
+ data_to_print = {}
+
+ if self.args.host:
+ data_to_print = self.get_host_info(self.args.host)
+ elif self.args.list:
+ data_to_print = self.get_list_info()
+
+ print(self.json_format_dict(data_to_print, True))
+
+ def read_settings(self):
+ ''' Reads the settings from the rudder.ini file '''
+ if six.PY2:
+ config = configparser.SafeConfigParser()
+ else:
+ config = configparser.ConfigParser()
+ rudder_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'rudder.ini')
+ rudder_ini_path = os.path.expanduser(os.path.expandvars(os.environ.get('RUDDER_INI_PATH', rudder_default_ini_path)))
+ config.read(rudder_ini_path)
+
+ self.token = os.environ.get('RUDDER_API_TOKEN', config.get('rudder', 'token'))
+ self.version = os.environ.get('RUDDER_API_VERSION', config.get('rudder', 'version'))
+ self.uri = os.environ.get('RUDDER_API_URI', config.get('rudder', 'uri'))
+
+ self.disable_ssl_validation = config.getboolean('rudder', 'disable_ssl_certificate_validation')
+ self.group_name = config.get('rudder', 'group_name')
+ self.fail_if_name_collision = config.getboolean('rudder', 'fail_if_name_collision')
+
+ self.cache_path = config.get('rudder', 'cache_path')
+ self.cache_max_age = config.getint('rudder', 'cache_max_age')
+
+ def parse_cli_args(self):
+ ''' Command line argument processing '''
+
+ parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Rudder inventory')
+ parser.add_argument('--list', action='store_true', default=True,
+ help='List instances (default: True)')
+ parser.add_argument('--host', action='store',
+ help='Get all the variables about a specific instance')
+ parser.add_argument('--refresh-cache', action='store_true', default=False,
+ help='Force refresh of cache by making API requests to Rudder (default: False - use cache files)')
+ self.args = parser.parse_args()
+
+ def is_cache_valid(self):
+ ''' Determines if the cache files have expired, or if it is still valid '''
+
+ if os.path.isfile(self.cache_path):
+ mod_time = os.path.getmtime(self.cache_path)
+ current_time = time()
+ if (mod_time + self.cache_max_age) > current_time:
+ return True
+
+ return False
+
+ def load_cache(self):
+ ''' Reads the cache from the cache file sets self.cache '''
+
+ cache = open(self.cache_path, 'r')
+ json_cache = cache.read()
+
+ try:
+ self.inventory = json.loads(json_cache)
+ except ValueError as e:
+ self.fail_with_error('Could not parse JSON response from local cache', 'parsing local cache')
+
+ def write_cache(self):
+ ''' Writes data in JSON format to a file '''
+
+ json_data = self.json_format_dict(self.inventory, True)
+ cache = open(self.cache_path, 'w')
+ cache.write(json_data)
+ cache.close()
+
+ def get_nodes(self):
+ ''' Gets the nodes list from Rudder '''
+
+ path = '/nodes?select=nodeAndPolicyServer'
+ result = self.api_call(path)
+
+ nodes = {}
+
+ for node in result['data']['nodes']:
+ nodes[node['id']] = {}
+ nodes[node['id']]['hostname'] = node['hostname']
+ if 'properties' in node:
+ nodes[node['id']]['properties'] = node['properties']
+ else:
+ nodes[node['id']]['properties'] = []
+
+ return nodes
+
+ def get_groups(self):
+ ''' Gets the groups list from Rudder '''
+
+ path = '/groups'
+ result = self.api_call(path)
+
+ groups = {}
+
+ for group in result['data']['groups']:
+ groups[group['id']] = {'hosts': group['nodeIds'], 'name': self.to_safe(group[self.group_name])}
+
+ return groups
+
+ def update_cache(self):
+ ''' Fetches the inventory information from Rudder and creates the inventory '''
+
+ nodes = self.get_nodes()
+ groups = self.get_groups()
+
+ inventory = {}
+
+ for group in groups:
+ # Check for name collision
+ if self.fail_if_name_collision:
+ if groups[group]['name'] in inventory:
+ self.fail_with_error('Name collision on groups: "%s" appears twice' % groups[group]['name'], 'creating groups')
+ # Add group to inventory
+ inventory[groups[group]['name']] = {}
+ inventory[groups[group]['name']]['hosts'] = []
+ inventory[groups[group]['name']]['vars'] = {}
+ inventory[groups[group]['name']]['vars']['rudder_group_id'] = group
+ for node in groups[group]['hosts']:
+ # Add node to group
+ inventory[groups[group]['name']]['hosts'].append(nodes[node]['hostname'])
+
+ properties = {}
+
+ for node in nodes:
+ # Check for name collision
+ if self.fail_if_name_collision:
+ if nodes[node]['hostname'] in properties:
+ self.fail_with_error('Name collision on hosts: "%s" appears twice' % nodes[node]['hostname'], 'creating hosts')
+ # Add node properties to inventory
+ properties[nodes[node]['hostname']] = {}
+ properties[nodes[node]['hostname']]['rudder_node_id'] = node
+ for node_property in nodes[node]['properties']:
+ properties[nodes[node]['hostname']][self.to_safe(node_property['name'])] = node_property['value']
+
+ inventory['_meta'] = {}
+ inventory['_meta']['hostvars'] = properties
+
+ self.inventory = inventory
+
+ if self.cache_max_age > 0:
+ self.write_cache()
+
+ def get_list_info(self):
+ ''' Gets inventory information from local cache '''
+
+ return self.inventory
+
+ def get_host_info(self, hostname):
+ ''' Gets information about a specific host from local cache '''
+
+ if hostname in self.inventory['_meta']['hostvars']:
+ return self.inventory['_meta']['hostvars'][hostname]
+ else:
+ return {}
+
+ def api_call(self, path):
+ ''' Performs an API request '''
+
+ headers = {
+ 'X-API-Token': self.token,
+ 'X-API-Version': self.version,
+ 'Content-Type': 'application/json;charset=utf-8'
+ }
+
+ target = urlparse(self.uri + path)
+ method = 'GET'
+ body = ''
+
+ try:
+ response, content = self.conn.request(target.geturl(), method, body, headers)
+ except Exception:
+ self.fail_with_error('Error connecting to Rudder server')
+
+ try:
+ data = json.loads(content)
+ except ValueError as e:
+ self.fail_with_error('Could not parse JSON response from Rudder API', 'reading API response')
+
+ return data
+
+ def fail_with_error(self, err_msg, err_operation=None):
+ ''' Logs an error to std err for ansible-playbook to consume and exit '''
+ if err_operation:
+ err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format(
+ err_msg=err_msg, err_operation=err_operation)
+ sys.stderr.write(err_msg)
+ sys.exit(1)
+
+ def json_format_dict(self, data, pretty=False):
+ ''' Converts a dict to a JSON object and dumps it as a formatted
+ string '''
+
+ if pretty:
+ return json.dumps(data, sort_keys=True, indent=2)
+ else:
+ return json.dumps(data)
+
+ def to_safe(self, word):
+ ''' Converts 'bad' characters in a string to underscores so they can be
+ used as Ansible variable names '''
+
+ return re.sub(r'[^A-Za-z0-9\_]', '_', word)
+
+
+# Run the script
+RudderInventory()
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/scaleway.ini b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/scaleway.ini
new file mode 100644
index 00000000..99615a12
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/scaleway.ini
@@ -0,0 +1,37 @@
+# Ansible dynamic inventory script for Scaleway cloud provider
+#
+
+[compute]
+# Fetch inventory for regions. If not defined will read the SCALEWAY_REGION environment variable
+#
+# regions = all
+# regions = ams1
+# regions = par1, ams1
+regions = par1
+
+
+# Define a Scaleway token to perform required queries on the API
+# in order to generate inventory output.
+#
+[auth]
+# Token to authenticate with Scaleway's API.
+# If not defined will read the SCALEWAY_TOKEN environment variable
+#
+api_token = mysecrettoken
+
+
+# To avoid performing excessive calls to Scaleway API you can define a
+# cache for the plugin output. Within the time defined in seconds, latest
+# output will be reused. After that time, the cache will be refreshed.
+#
+[cache]
+cache_max_age = 60
+cache_dir = '~/.ansible/tmp'
+
+
+[defaults]
+# You may want to use only public IP addresses or private IP addresses.
+# You can set public_ip_only configuration to get public IPs only.
+# If not defined defaults to retrieving private IP addresses.
+#
+public_ip_only = false
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/scaleway.py b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/scaleway.py
new file mode 100644
index 00000000..f68eb128
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/scaleway.py
@@ -0,0 +1,220 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+'''
+External inventory script for Scaleway
+====================================
+
+Shamelessly copied from an existing inventory script.
+
+This script generates an inventory that Ansible can understand by making API requests to Scaleway API
+
+Requires some python libraries, ensure to have them installed when using this script. (pip install requests https://pypi.org/project/requests/)
+
+Before using this script you may want to modify scaleway.ini config file.
+
+This script generates an Ansible hosts file with these host groups:
+
+<hostname>: Defines host itself with Scaleway's hostname as group name.
+<tag>: Contains all hosts which has "<tag>" as tag.
+<region>: Contains all hosts which are in the "<region>" region.
+all: Contains all hosts defined in Scaleway.
+'''
+
+# (c) 2017, Paul B. <paul@bonaud.fr>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import copy
+import os
+import requests
+from ansible.module_utils import six
+from ansible.module_utils.six.moves import configparser
+import sys
+import time
+import traceback
+
+import json
+
+EMPTY_GROUP = {
+ 'children': [],
+ 'hosts': []
+}
+
+
+class ScalewayAPI:
+ REGIONS = ['par1', 'ams1']
+
+ def __init__(self, auth_token, region):
+ self.session = requests.session()
+ self.session.headers.update({
+ 'User-Agent': 'Ansible Python/%s' % (sys.version.split(' ')[0])
+ })
+ self.session.headers.update({
+ 'X-Auth-Token': auth_token.encode('latin1')
+ })
+ self.base_url = 'https://cp-%s.scaleway.com' % (region)
+
+ def servers(self):
+ raw = self.session.get('/'.join([self.base_url, 'servers']))
+
+ try:
+ response = raw.json()
+ return self.get_resource('servers', response, raw)
+ except ValueError:
+ return []
+
+ def get_resource(self, resource, response, raw):
+ raw.raise_for_status()
+
+ if resource in response:
+ return response[resource]
+ else:
+ raise ValueError(
+ "Resource %s not found in Scaleway API response" % (resource))
+
+
+def env_or_param(env_key, param=None, fallback=None):
+ env_value = os.environ.get(env_key)
+
+ if (param, env_value) == (None, None):
+ return fallback
+ elif env_value is not None:
+ return env_value
+ else:
+ return param
+
+
+def save_cache(data, config):
+ ''' saves item to cache '''
+ dpath = config.get('cache', 'cache_dir')
+ try:
+ cache = open('/'.join([dpath, 'scaleway_ansible_inventory.json']), 'w')
+ cache.write(json.dumps(data))
+ cache.close()
+ except IOError as e:
+ pass # not really sure what to do here
+
+
+def get_cache(cache_item, config):
+ ''' returns cached item '''
+ dpath = config.get('cache', 'cache_dir')
+ inv = {}
+ try:
+ cache = open('/'.join([dpath, 'scaleway_ansible_inventory.json']), 'r')
+ inv = cache.read()
+ cache.close()
+ except IOError as e:
+ pass # not really sure what to do here
+
+ return inv
+
+
+def cache_available(config):
+ ''' checks if we have a 'fresh' cache available for item requested '''
+
+ if config.has_option('cache', 'cache_dir'):
+ dpath = config.get('cache', 'cache_dir')
+
+ try:
+ existing = os.stat(
+ '/'.join([dpath, 'scaleway_ansible_inventory.json']))
+ except OSError:
+ return False
+
+ if config.has_option('cache', 'cache_max_age'):
+ maxage = config.get('cache', 'cache_max_age')
+ else:
+ maxage = 60
+ if (int(time.time()) - int(existing.st_mtime)) <= int(maxage):
+ return True
+
+ return False
+
+
+def generate_inv_from_api(config):
+ try:
+ inventory['scaleway'] = copy.deepcopy(EMPTY_GROUP)
+
+ auth_token = None
+ if config.has_option('auth', 'api_token'):
+ auth_token = config.get('auth', 'api_token')
+ auth_token = env_or_param('SCALEWAY_TOKEN', param=auth_token)
+ if auth_token is None:
+ sys.stderr.write('ERROR: missing authentication token for Scaleway API')
+ sys.exit(1)
+
+ if config.has_option('compute', 'regions'):
+ regions = config.get('compute', 'regions')
+ if regions == 'all':
+ regions = ScalewayAPI.REGIONS
+ else:
+ regions = map(str.strip, regions.split(','))
+ else:
+ regions = [
+ env_or_param('SCALEWAY_REGION', fallback='par1')
+ ]
+
+ for region in regions:
+ api = ScalewayAPI(auth_token, region)
+
+ for server in api.servers():
+ hostname = server['hostname']
+ if config.has_option('defaults', 'public_ip_only') and config.getboolean('defaults', 'public_ip_only'):
+ ip = server['public_ip']['address']
+ else:
+ ip = server['private_ip']
+ for server_tag in server['tags']:
+ if server_tag not in inventory:
+ inventory[server_tag] = copy.deepcopy(EMPTY_GROUP)
+ inventory[server_tag]['children'].append(hostname)
+ if region not in inventory:
+ inventory[region] = copy.deepcopy(EMPTY_GROUP)
+ inventory[region]['children'].append(hostname)
+ inventory['scaleway']['children'].append(hostname)
+ inventory[hostname] = []
+ inventory[hostname].append(ip)
+
+ return inventory
+ except Exception:
+ # Return empty hosts output
+ traceback.print_exc()
+ return {'scaleway': {'hosts': []}, '_meta': {'hostvars': {}}}
+
+
+def get_inventory(config):
+ ''' Reads the inventory from cache or Scaleway api '''
+
+ if cache_available(config):
+ inv = get_cache('scaleway_ansible_inventory.json', config)
+ else:
+ inv = generate_inv_from_api(config)
+
+ save_cache(inv, config)
+ return json.dumps(inv)
+
+
+if __name__ == '__main__':
+ inventory = {}
+
+ # Read config
+ if six.PY3:
+ config = configparser.ConfigParser()
+ else:
+ config = configparser.SafeConfigParser()
+ for configfilename in [os.path.abspath(sys.argv[0]).rsplit('.py')[0] + '.ini', 'scaleway.ini']:
+ if os.path.exists(configfilename):
+ config.read(configfilename)
+ break
+
+ if cache_available(config):
+ inventory = get_cache('scaleway_ansible_inventory.json', config)
+ else:
+ inventory = get_inventory(config)
+
+ # return to ansible
+ sys.stdout.write(str(inventory))
+ sys.stdout.flush()
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/serf.py b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/serf.py
new file mode 100644
index 00000000..df917ef5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/serf.py
@@ -0,0 +1,101 @@
+#!/usr/bin/env python
+
+# (c) 2015, Marc Abramowitz <marca@surveymonkey.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# Dynamic inventory script which lets you use nodes discovered by Serf
+# (https://serfdom.io/).
+#
+# Requires the `serfclient` Python module from
+# https://pypi.org/project/serfclient/
+#
+# Environment variables
+# ---------------------
+# - `SERF_RPC_ADDR`
+# - `SERF_RPC_AUTH`
+#
+# These variables are described at https://www.serfdom.io/docs/commands/members.html#_rpc_addr
+
+import argparse
+import collections
+import os
+import sys
+
+# https://pypi.org/project/serfclient/
+from serfclient import SerfClient, EnvironmentConfig
+
+import json
+
+_key = 'serf'
+
+
+def _serf_client():
+ env = EnvironmentConfig()
+ return SerfClient(host=env.host, port=env.port, rpc_auth=env.auth_key)
+
+
+def get_serf_members_data():
+ return _serf_client().members().body['Members']
+
+
+def get_nodes(data):
+ return [node['Name'] for node in data]
+
+
+def get_groups(data):
+ groups = collections.defaultdict(list)
+
+ for node in data:
+ for key, value in node['Tags'].items():
+ groups[value].append(node['Name'])
+
+ return groups
+
+
+def get_meta(data):
+ meta = {'hostvars': {}}
+ for node in data:
+ meta['hostvars'][node['Name']] = node['Tags']
+ return meta
+
+
+def print_list():
+ data = get_serf_members_data()
+ nodes = get_nodes(data)
+ groups = get_groups(data)
+ meta = get_meta(data)
+ inventory_data = {_key: nodes, '_meta': meta}
+ inventory_data.update(groups)
+ print(json.dumps(inventory_data))
+
+
+def print_host(host):
+ data = get_serf_members_data()
+ meta = get_meta(data)
+ print(json.dumps(meta['hostvars'][host]))
+
+
+def get_args(args_list):
+ parser = argparse.ArgumentParser(
+ description='ansible inventory script reading from serf cluster')
+ mutex_group = parser.add_mutually_exclusive_group(required=True)
+ help_list = 'list all hosts from serf cluster'
+ mutex_group.add_argument('--list', action='store_true', help=help_list)
+ help_host = 'display variables for a host'
+ mutex_group.add_argument('--host', help=help_host)
+ return parser.parse_args(args_list)
+
+
+def main(args_list):
+ args = get_args(args_list)
+ if args.list:
+ print_list()
+ if args.host:
+ print_host(args.host)
+
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/softlayer.py b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/softlayer.py
new file mode 100644
index 00000000..03f9820a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/softlayer.py
@@ -0,0 +1,196 @@
+#!/usr/bin/env python
+"""
+SoftLayer external inventory script.
+
+The SoftLayer Python API client is required. Use `pip install softlayer` to install it.
+You have a few different options for configuring your username and api_key. You can pass
+environment variables (SL_USERNAME and SL_API_KEY). You can also write INI file to
+~/.softlayer or /etc/softlayer.conf. For more information see the SL API at:
+- https://softlayer-python.readthedocs.io/en/latest/config_file.html
+
+The SoftLayer Python client has a built in command for saving this configuration file
+via the command `sl config setup`.
+"""
+
+# Copyright (C) 2014 AJ Bourg <aj@ajbourg.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+#
+# I found the structure of the ec2.py script very helpful as an example
+# as I put this together. Thanks to whoever wrote that script!
+#
+
+import SoftLayer
+import re
+import argparse
+import itertools
+import json
+
+
+class SoftLayerInventory(object):
+ common_items = [
+ 'id',
+ 'globalIdentifier',
+ 'hostname',
+ 'domain',
+ 'fullyQualifiedDomainName',
+ 'primaryBackendIpAddress',
+ 'primaryIpAddress',
+ 'datacenter',
+ 'tagReferences',
+ 'userData.value',
+ ]
+
+ vs_items = [
+ 'lastKnownPowerState.name',
+ 'powerState',
+ 'maxCpu',
+ 'maxMemory',
+ 'activeTransaction.transactionStatus[friendlyName,name]',
+ 'status',
+ ]
+
+ hw_items = [
+ 'hardwareStatusId',
+ 'processorPhysicalCoreAmount',
+ 'memoryCapacity',
+ ]
+
+ def _empty_inventory(self):
+ return {"_meta": {"hostvars": {}}}
+
+ def __init__(self):
+ '''Main path'''
+
+ self.inventory = self._empty_inventory()
+
+ self.parse_options()
+
+ if self.args.list:
+ self.get_all_servers()
+ print(self.json_format_dict(self.inventory, True))
+ elif self.args.host:
+ self.get_all_servers()
+ print(self.json_format_dict(self.inventory["_meta"]["hostvars"][self.args.host], True))
+
+ def to_safe(self, word):
+ '''Converts 'bad' characters in a string to underscores so they can be used as Ansible groups'''
+
+ return re.sub(r"[^A-Za-z0-9\-\.]", "_", word)
+
+ def push(self, my_dict, key, element):
+ '''Push an element onto an array that may not have been defined in the dict'''
+
+ if key in my_dict:
+ my_dict[key].append(element)
+ else:
+ my_dict[key] = [element]
+
+ def parse_options(self):
+ '''Parse all the arguments from the CLI'''
+
+ parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on SoftLayer')
+ parser.add_argument('--list', action='store_true', default=False,
+ help='List instances (default: False)')
+ parser.add_argument('--host', action='store',
+ help='Get all the variables about a specific instance')
+ self.args = parser.parse_args()
+
+ def json_format_dict(self, data, pretty=False):
+ '''Converts a dict to a JSON object and dumps it as a formatted string'''
+
+ if pretty:
+ return json.dumps(data, sort_keys=True, indent=2)
+ else:
+ return json.dumps(data)
+
+ def process_instance(self, instance, instance_type="virtual"):
+ '''Populate the inventory dictionary with any instance information'''
+
+ # only want active instances
+ if 'status' in instance and instance['status']['name'] != 'Active':
+ return
+
+ # and powered on instances
+ if 'powerState' in instance and instance['powerState']['name'] != 'Running':
+ return
+
+ # 5 is active for hardware... see https://forums.softlayer.com/forum/softlayer-developer-network/general-discussion/2955-hardwarestatusid
+ if 'hardwareStatusId' in instance and instance['hardwareStatusId'] != 5:
+ return
+
+ # if there's no IP address, we can't reach it
+ if 'primaryIpAddress' not in instance:
+ return
+
+ instance['userData'] = instance['userData'][0]['value'] if instance['userData'] else ''
+
+ dest = instance['primaryIpAddress']
+
+ instance['tags'] = list()
+ for tag in instance['tagReferences']:
+ instance['tags'].append(tag['tag']['name'])
+
+ del instance['tagReferences']
+
+ self.inventory["_meta"]["hostvars"][dest] = instance
+
+ # Inventory: group by memory
+ if 'maxMemory' in instance:
+ self.push(self.inventory, self.to_safe('memory_' + str(instance['maxMemory'])), dest)
+ elif 'memoryCapacity' in instance:
+ self.push(self.inventory, self.to_safe('memory_' + str(instance['memoryCapacity'])), dest)
+
+ # Inventory: group by cpu count
+ if 'maxCpu' in instance:
+ self.push(self.inventory, self.to_safe('cpu_' + str(instance['maxCpu'])), dest)
+ elif 'processorPhysicalCoreAmount' in instance:
+ self.push(self.inventory, self.to_safe('cpu_' + str(instance['processorPhysicalCoreAmount'])), dest)
+
+ # Inventory: group by datacenter
+ self.push(self.inventory, self.to_safe('datacenter_' + instance['datacenter']['name']), dest)
+
+ # Inventory: group by hostname
+ self.push(self.inventory, self.to_safe(instance['hostname']), dest)
+
+ # Inventory: group by FQDN
+ self.push(self.inventory, self.to_safe(instance['fullyQualifiedDomainName']), dest)
+
+ # Inventory: group by domain
+ self.push(self.inventory, self.to_safe(instance['domain']), dest)
+
+ # Inventory: group by type (hardware/virtual)
+ self.push(self.inventory, instance_type, dest)
+
+ for tag in instance['tags']:
+ self.push(self.inventory, tag, dest)
+
+ def get_virtual_servers(self):
+ '''Get all the CCI instances'''
+ vs = SoftLayer.VSManager(self.client)
+ mask = "mask[%s]" % ','.join(itertools.chain(self.common_items, self.vs_items))
+ instances = vs.list_instances(mask=mask)
+
+ for instance in instances:
+ self.process_instance(instance)
+
+ def get_physical_servers(self):
+ '''Get all the hardware instances'''
+ hw = SoftLayer.HardwareManager(self.client)
+ mask = "mask[%s]" % ','.join(itertools.chain(self.common_items, self.hw_items))
+ instances = hw.list_hardware(mask=mask)
+
+ for instance in instances:
+ self.process_instance(instance, 'hardware')
+
+ def get_all_servers(self):
+ self.client = SoftLayer.Client()
+ self.get_virtual_servers()
+ self.get_physical_servers()
+
+
+SoftLayerInventory()
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/spacewalk.ini b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/spacewalk.ini
new file mode 100644
index 00000000..5433c422
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/spacewalk.ini
@@ -0,0 +1,16 @@
+# Put this ini-file in the same directory as spacewalk.py
+# Command line options have precedence over options defined in here.
+
+[spacewalk]
+# To limit the script on one organization in spacewalk, uncomment org_number
+# and fill in the organization ID:
+# org_number=2
+
+# To prefix the group names with the organization ID set prefix_org_name=true.
+# This is convenient when org_number is not set and you have the same group names
+# in multiple organizations within spacewalk
+# The prefix is "org_number-"
+prefix_org_name=false
+
+# Default cache_age for files created with spacewalk-report is 300sec.
+cache_age=300
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/spacewalk.py b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/spacewalk.py
new file mode 100644
index 00000000..b3b8cf8e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/spacewalk.py
@@ -0,0 +1,226 @@
+#!/usr/bin/env python
+
+"""
+Spacewalk external inventory script
+=================================
+
+Ansible has a feature where instead of reading from /etc/ansible/hosts
+as a text file, it can query external programs to obtain the list
+of hosts, groups the hosts are in, and even variables to assign to each host.
+
+To use this, copy this file over /etc/ansible/hosts and chmod +x the file.
+This, more or less, allows you to keep one central database containing
+info about all of your managed instances.
+
+This script is dependent upon the spacealk-reports package being installed
+on the same machine. It is basically a CSV-to-JSON converter from the
+output of "spacewalk-report system-groups-systems|inventory".
+
+Tested with Ansible 1.9.2 and spacewalk 2.3
+"""
+#
+# Author:: Jon Miller <jonEbird@gmail.com>
+# Copyright:: Copyright (c) 2013, Jon Miller
+#
+# Extended for support of multiple organizations and
+# adding the "_meta" dictionary to --list output by
+# Bernhard Lichtinger <bernhard.lichtinger@lrz.de> 2015
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+import os
+import time
+from optparse import OptionParser
+import subprocess
+import json
+
+from ansible.module_utils.six import iteritems
+from ansible.module_utils.six.moves import configparser as ConfigParser
+
+
+base_dir = os.path.dirname(os.path.realpath(__file__))
+default_ini_file = os.path.join(base_dir, "spacewalk.ini")
+
+SW_REPORT = '/usr/bin/spacewalk-report'
+CACHE_DIR = os.path.join(base_dir, ".spacewalk_reports")
+CACHE_AGE = 300 # 5min
+INI_FILE = os.path.expanduser(os.path.expandvars(os.environ.get("SPACEWALK_INI_PATH", default_ini_file)))
+
+
+# Sanity check
+if not os.path.exists(SW_REPORT):
+ print('Error: %s is required for operation.' % (SW_REPORT), file=sys.stderr)
+ sys.exit(1)
+
+# Pre-startup work
+if not os.path.exists(CACHE_DIR):
+ os.mkdir(CACHE_DIR)
+ os.chmod(CACHE_DIR, 0o2775)
+
+# Helper functions
+# ------------------------------
+
+
+def spacewalk_report(name):
+ """Yield a dictionary form of each CSV output produced by the specified
+ spacewalk-report
+ """
+ cache_filename = os.path.join(CACHE_DIR, name)
+ if not os.path.exists(cache_filename) or \
+ (time.time() - os.stat(cache_filename).st_mtime) > CACHE_AGE:
+ # Update the cache
+ fh = open(cache_filename, 'w')
+ p = subprocess.Popen([SW_REPORT, name], stdout=fh)
+ p.wait()
+ fh.close()
+
+ with open(cache_filename, 'r') as f:
+ lines = f.readlines()
+ keys = lines[0].strip().split(',')
+ # add 'spacewalk_' prefix to the keys
+ keys = ['spacewalk_' + key for key in keys]
+ for line in lines[1:]:
+ values = line.strip().split(',')
+ if len(keys) == len(values):
+ yield dict(zip(keys, values))
+
+
+# Options
+# ------------------------------
+
+parser = OptionParser(usage="%prog [options] --list | --host <machine>")
+parser.add_option('--list', default=False, dest="list", action="store_true",
+ help="Produce a JSON consumable grouping of servers for Ansible")
+parser.add_option('--host', default=None, dest="host",
+ help="Generate additional host specific details for given host for Ansible")
+parser.add_option('-H', '--human', dest="human",
+ default=False, action="store_true",
+ help="Produce a friendlier version of either server list or host detail")
+parser.add_option('-o', '--org', default=None, dest="org_number",
+ help="Limit to spacewalk organization number")
+parser.add_option('-p', default=False, dest="prefix_org_name", action="store_true",
+ help="Prefix the group name with the organization number")
+(options, args) = parser.parse_args()
+
+
+# read spacewalk.ini if present
+# ------------------------------
+if os.path.exists(INI_FILE):
+ config = ConfigParser.SafeConfigParser()
+ config.read(INI_FILE)
+ if config.has_option('spacewalk', 'cache_age'):
+ CACHE_AGE = config.get('spacewalk', 'cache_age')
+ if not options.org_number and config.has_option('spacewalk', 'org_number'):
+ options.org_number = config.get('spacewalk', 'org_number')
+ if not options.prefix_org_name and config.has_option('spacewalk', 'prefix_org_name'):
+ options.prefix_org_name = config.getboolean('spacewalk', 'prefix_org_name')
+
+
+# Generate dictionary for mapping group_id to org_id
+# ------------------------------
+org_groups = {}
+try:
+ for group in spacewalk_report('system-groups'):
+ org_groups[group['spacewalk_group_id']] = group['spacewalk_org_id']
+
+except (OSError) as e:
+ print('Problem executing the command "%s system-groups": %s' %
+ (SW_REPORT, str(e)), file=sys.stderr)
+ sys.exit(2)
+
+
+# List out the known server from Spacewalk
+# ------------------------------
+if options.list:
+
+ # to build the "_meta"-Group with hostvars first create dictionary for later use
+ host_vars = {}
+ try:
+ for item in spacewalk_report('inventory'):
+ host_vars[item['spacewalk_profile_name']] = dict((key, (value.split(';') if ';' in value else value)) for key, value in item.items())
+
+ except (OSError) as e:
+ print('Problem executing the command "%s inventory": %s' %
+ (SW_REPORT, str(e)), file=sys.stderr)
+ sys.exit(2)
+
+ groups = {}
+ meta = {"hostvars": {}}
+ try:
+ for system in spacewalk_report('system-groups-systems'):
+ # first get org_id of system
+ org_id = org_groups[system['spacewalk_group_id']]
+
+ # shall we add the org_id as prefix to the group name:
+ if options.prefix_org_name:
+ prefix = org_id + "-"
+ group_name = prefix + system['spacewalk_group_name']
+ else:
+ group_name = system['spacewalk_group_name']
+
+ # if we are limited to one organization:
+ if options.org_number:
+ if org_id == options.org_number:
+ if group_name not in groups:
+ groups[group_name] = set()
+
+ groups[group_name].add(system['spacewalk_server_name'])
+ if system['spacewalk_server_name'] in host_vars and not system['spacewalk_server_name'] in meta["hostvars"]:
+ meta["hostvars"][system['spacewalk_server_name']] = host_vars[system['spacewalk_server_name']]
+ # or we list all groups and systems:
+ else:
+ if group_name not in groups:
+ groups[group_name] = set()
+
+ groups[group_name].add(system['spacewalk_server_name'])
+ if system['spacewalk_server_name'] in host_vars and not system['spacewalk_server_name'] in meta["hostvars"]:
+ meta["hostvars"][system['spacewalk_server_name']] = host_vars[system['spacewalk_server_name']]
+
+ except (OSError) as e:
+ print('Problem executing the command "%s system-groups-systems": %s' %
+ (SW_REPORT, str(e)), file=sys.stderr)
+ sys.exit(2)
+
+ if options.human:
+ for group, systems in iteritems(groups):
+ print('[%s]\n%s\n' % (group, '\n'.join(systems)))
+ else:
+ final = dict([(k, list(s)) for k, s in iteritems(groups)])
+ final["_meta"] = meta
+ print(json.dumps(final))
+ # print(json.dumps(groups))
+ sys.exit(0)
+
+
+# Return a details information concerning the spacewalk server
+# ------------------------------
+elif options.host:
+
+ host_details = {}
+ try:
+ for system in spacewalk_report('inventory'):
+ if system['spacewalk_hostname'] == options.host:
+ host_details = system
+ break
+
+ except (OSError) as e:
+ print('Problem executing the command "%s inventory": %s' %
+ (SW_REPORT, str(e)), file=sys.stderr)
+ sys.exit(2)
+
+ if options.human:
+ print('Host: %s' % options.host)
+ for k, v in iteritems(host_details):
+ print(' %s: %s' % (k, '\n '.join(v.split(';'))))
+ else:
+ print(json.dumps(dict((key, (value.split(';') if ';' in value else value)) for key, value in host_details.items())))
+ sys.exit(0)
+
+else:
+
+ parser.print_help()
+ sys.exit(1)
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/ssh_config.py b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/ssh_config.py
new file mode 100644
index 00000000..ad56a53e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/ssh_config.py
@@ -0,0 +1,121 @@
+#!/usr/bin/env python
+
+# (c) 2014, Tomas Karasek <tomas.karasek@digile.fi>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# Dynamic inventory script which lets you use aliases from ~/.ssh/config.
+#
+# There were some issues with various Paramiko versions. I took a deeper look
+# and tested heavily. Now, ansible parses this alright with Paramiko versions
+# 1.7.2 to 1.15.2.
+#
+# It prints inventory based on parsed ~/.ssh/config. You can refer to hosts
+# with their alias, rather than with the IP or hostname. It takes advantage
+# of the ansible_ssh_{host,port,user,private_key_file}.
+#
+# If you have in your .ssh/config:
+# Host git
+# HostName git.domain.org
+# User tkarasek
+# IdentityFile /home/tomk/keys/thekey
+#
+# You can do
+# $ ansible git -m ping
+#
+# Example invocation:
+# ssh_config.py --list
+# ssh_config.py --host <alias>
+
+import argparse
+import os.path
+import sys
+
+import json
+
+import paramiko
+
+from ansible.module_utils.common._collections_compat import MutableSequence
+
+SSH_CONF = '~/.ssh/config'
+
+_key = 'ssh_config'
+
+_ssh_to_ansible = [('user', 'ansible_ssh_user'),
+ ('hostname', 'ansible_ssh_host'),
+ ('identityfile', 'ansible_ssh_private_key_file'),
+ ('port', 'ansible_ssh_port')]
+
+
+def get_config():
+ if not os.path.isfile(os.path.expanduser(SSH_CONF)):
+ return {}
+ with open(os.path.expanduser(SSH_CONF)) as f:
+ cfg = paramiko.SSHConfig()
+ cfg.parse(f)
+ ret_dict = {}
+ for d in cfg._config:
+ if isinstance(d['host'], MutableSequence):
+ alias = d['host'][0]
+ else:
+ alias = d['host']
+ if ('?' in alias) or ('*' in alias):
+ continue
+ _copy = dict(d)
+ del _copy['host']
+ if 'config' in _copy:
+ ret_dict[alias] = _copy['config']
+ else:
+ ret_dict[alias] = _copy
+ return ret_dict
+
+
+def print_list():
+ cfg = get_config()
+ meta = {'hostvars': {}}
+ for alias, attributes in cfg.items():
+ tmp_dict = {}
+ for ssh_opt, ans_opt in _ssh_to_ansible:
+ if ssh_opt in attributes:
+ # If the attribute is a list, just take the first element.
+ # Private key is returned in a list for some reason.
+ attr = attributes[ssh_opt]
+ if isinstance(attr, MutableSequence):
+ attr = attr[0]
+ tmp_dict[ans_opt] = attr
+ if tmp_dict:
+ meta['hostvars'][alias] = tmp_dict
+
+ print(json.dumps({_key: list(set(meta['hostvars'].keys())), '_meta': meta}))
+
+
+def print_host(host):
+ cfg = get_config()
+ print(json.dumps(cfg[host]))
+
+
+def get_args(args_list):
+ parser = argparse.ArgumentParser(
+ description='ansible inventory script parsing .ssh/config')
+ mutex_group = parser.add_mutually_exclusive_group(required=True)
+ help_list = 'list all hosts from .ssh/config inventory'
+ mutex_group.add_argument('--list', action='store_true', help=help_list)
+ help_host = 'display variables for a host'
+ mutex_group.add_argument('--host', help=help_host)
+ return parser.parse_args(args_list)
+
+
+def main(args_list):
+
+ args = get_args(args_list)
+ if args.list:
+ print_list()
+ if args.host:
+ print_host(args.host)
+
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/stacki.py b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/stacki.py
new file mode 100644
index 00000000..2c6bb37c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/stacki.py
@@ -0,0 +1,180 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2016, Hugh Ma <hugh.ma@flextronics.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# Stacki inventory script
+# Configure stacki.yml with proper auth information and place in the following:
+# - ../inventory/stacki.yml
+# - /etc/stacki/stacki.yml
+# - /etc/ansible/stacki.yml
+# The stacki.yml file can contain entries for authentication information
+# regarding the Stacki front-end node.
+#
+# use_hostnames uses hostname rather than interface ip as connection
+#
+#
+
+"""
+Example Usage:
+ List Stacki Nodes
+ $ ./stack.py --list
+
+
+Example Configuration:
+---
+stacki:
+ auth:
+ stacki_user: admin
+ stacki_password: abc12345678910
+ stacki_endpoint: http://192.168.200.50/stack
+use_hostnames: false
+"""
+
+import argparse
+import os
+import sys
+import yaml
+from distutils.version import StrictVersion
+
+import json
+
+try:
+ import requests
+except Exception:
+ sys.exit('requests package is required for this inventory script')
+
+
+CONFIG_FILES = ['/etc/stacki/stacki.yml', '/etc/ansible/stacki.yml']
+
+
+def stack_auth(params):
+ endpoint = params['stacki_endpoint']
+ auth_creds = {'USERNAME': params['stacki_user'],
+ 'PASSWORD': params['stacki_password']}
+
+ client = requests.session()
+ client.get(endpoint)
+
+ init_csrf = client.cookies['csrftoken']
+
+ header = {'csrftoken': init_csrf, 'X-CSRFToken': init_csrf,
+ 'Content-type': 'application/x-www-form-urlencoded'}
+
+ login_endpoint = endpoint + "/login"
+
+ login_req = client.post(login_endpoint, data=auth_creds, headers=header)
+
+ csrftoken = login_req.cookies['csrftoken']
+ sessionid = login_req.cookies['sessionid']
+
+ auth_creds.update(CSRFTOKEN=csrftoken, SESSIONID=sessionid)
+
+ return client, auth_creds
+
+
+def stack_build_header(auth_creds):
+ header = {'csrftoken': auth_creds['CSRFTOKEN'],
+ 'X-CSRFToken': auth_creds['CSRFTOKEN'],
+ 'sessionid': auth_creds['SESSIONID'],
+ 'Content-type': 'application/json'}
+
+ return header
+
+
+def stack_host_list(endpoint, header, client):
+
+ stack_r = client.post(endpoint, data=json.dumps({"cmd": "list host"}),
+ headers=header)
+ return json.loads(stack_r.json())
+
+
+def stack_net_list(endpoint, header, client):
+
+ stack_r = client.post(endpoint, data=json.dumps({"cmd": "list host interface"}),
+ headers=header)
+ return json.loads(stack_r.json())
+
+
+def format_meta(hostdata, intfdata, config):
+ use_hostnames = config['use_hostnames']
+ meta = dict(all=dict(hosts=list()),
+ frontends=dict(hosts=list()),
+ backends=dict(hosts=list()),
+ _meta=dict(hostvars=dict()))
+
+ # Iterate through list of dicts of hosts and remove
+ # environment key as it causes conflicts
+ for host in hostdata:
+ del host['environment']
+ meta['_meta']['hostvars'][host['host']] = host
+ meta['_meta']['hostvars'][host['host']]['interfaces'] = list()
+
+ # @bbyhuy to improve readability in next iteration
+
+ for intf in intfdata:
+ if intf['host'] in meta['_meta']['hostvars']:
+ meta['_meta']['hostvars'][intf['host']]['interfaces'].append(intf)
+ if intf['default'] is True:
+ meta['_meta']['hostvars'][intf['host']]['ansible_host'] = intf['ip']
+ if not use_hostnames:
+ meta['all']['hosts'].append(intf['ip'])
+ if meta['_meta']['hostvars'][intf['host']]['appliance'] != 'frontend':
+ meta['backends']['hosts'].append(intf['ip'])
+ else:
+ meta['frontends']['hosts'].append(intf['ip'])
+ else:
+ meta['all']['hosts'].append(intf['host'])
+ if meta['_meta']['hostvars'][intf['host']]['appliance'] != 'frontend':
+ meta['backends']['hosts'].append(intf['host'])
+ else:
+ meta['frontends']['hosts'].append(intf['host'])
+ return meta
+
+
+def parse_args():
+ parser = argparse.ArgumentParser(description='Stacki Inventory Module')
+ group = parser.add_mutually_exclusive_group(required=True)
+ group.add_argument('--list', action='store_true',
+ help='List active hosts')
+ group.add_argument('--host', help='List details about the specific host')
+
+ return parser.parse_args()
+
+
+def main():
+ args = parse_args()
+
+ if StrictVersion(requests.__version__) < StrictVersion("2.4.3"):
+ sys.exit('requests>=2.4.3 is required for this inventory script')
+
+ try:
+ config_files = CONFIG_FILES
+ config_files.append(os.path.dirname(os.path.realpath(__file__)) + '/stacki.yml')
+ config = None
+ for cfg_file in config_files:
+ if os.path.isfile(cfg_file):
+ stream = open(cfg_file, 'r')
+ config = yaml.safe_load(stream)
+ break
+ if not config:
+ sys.stderr.write("No config file found at {0}\n".format(config_files))
+ sys.exit(1)
+ client, auth_creds = stack_auth(config['stacki']['auth'])
+ header = stack_build_header(auth_creds)
+ host_list = stack_host_list(config['stacki']['auth']['stacki_endpoint'], header, client)
+ intf_list = stack_net_list(config['stacki']['auth']['stacki_endpoint'], header, client)
+ final_meta = format_meta(host_list, intf_list, config)
+ print(json.dumps(final_meta, indent=4))
+ except Exception as e:
+ sys.stderr.write('%s\n' % e.message)
+ sys.exit(1)
+ sys.exit(0)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/stacki.yml b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/stacki.yml
new file mode 100644
index 00000000..2e31c72c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/stacki.yml
@@ -0,0 +1,7 @@
+---
+stacki:
+ auth:
+ stacki_user: admin
+ stacki_password: GhYgWut1hfGbbnstmbW3m-bJbeME-3EvC20rF1LHrDM
+ stacki_endpoint: http://192.168.200.50/stack
+use_hostnames: false \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/vagrant.py b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/vagrant.py
new file mode 100644
index 00000000..74db0212
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/vagrant.py
@@ -0,0 +1,123 @@
+#!/usr/bin/env python
+"""
+Vagrant external inventory script. Automatically finds the IP of the booted vagrant vm(s), and
+returns it under the host group 'vagrant'
+
+Example Vagrant configuration using this script:
+
+ config.vm.provision :ansible do |ansible|
+ ansible.playbook = "./provision/your_playbook.yml"
+ ansible.inventory_path = "./provision/inventory/vagrant.py"
+ ansible.verbose = true
+ end
+"""
+
+# Copyright (C) 2013 Mark Mandel <mark@compoundtheory.com>
+# 2015 Igor Khomyakov <homyakov@gmail.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+#
+# Thanks to the spacewalk.py inventory script for giving me the basic structure
+# of this.
+#
+
+import sys
+import os.path
+import subprocess
+import re
+from paramiko import SSHConfig
+from optparse import OptionParser
+from collections import defaultdict
+import json
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.six.moves import StringIO
+
+
+_group = 'vagrant' # a default group
+_ssh_to_ansible = [('user', 'ansible_user'),
+ ('hostname', 'ansible_host'),
+ ('identityfile', 'ansible_ssh_private_key_file'),
+ ('port', 'ansible_port')]
+
+# Options
+# ------------------------------
+
+parser = OptionParser(usage="%prog [options] --list | --host <machine>")
+parser.add_option('--list', default=False, dest="list", action="store_true",
+ help="Produce a JSON consumable grouping of Vagrant servers for Ansible")
+parser.add_option('--host', default=None, dest="host",
+ help="Generate additional host specific details for given host for Ansible")
+(options, args) = parser.parse_args()
+
+#
+# helper functions
+#
+
+
+# get all the ssh configs for all boxes in an array of dictionaries.
+def get_ssh_config():
+ return dict((k, get_a_ssh_config(k)) for k in list_running_boxes())
+
+
+# list all the running boxes
+def list_running_boxes():
+
+ output = to_text(subprocess.check_output(["vagrant", "status"]), errors='surrogate_or_strict').split('\n')
+
+ boxes = []
+
+ for line in output:
+ matcher = re.search(r"([^\s]+)[\s]+running \(.+", line)
+ if matcher:
+ boxes.append(matcher.group(1))
+
+ return boxes
+
+
+# get the ssh config for a single box
+def get_a_ssh_config(box_name):
+ """Gives back a map of all the machine's ssh configurations"""
+
+ output = to_text(subprocess.check_output(["vagrant", "ssh-config", box_name]), errors='surrogate_or_strict')
+ config = SSHConfig()
+ config.parse(StringIO(output))
+ host_config = config.lookup(box_name)
+
+ # man 5 ssh_config:
+ # > It is possible to have multiple identity files ...
+ # > all these identities will be tried in sequence.
+ for id in host_config['identityfile']:
+ if os.path.isfile(id):
+ host_config['identityfile'] = id
+
+ return dict((v, host_config[k]) for k, v in _ssh_to_ansible)
+
+
+# List out servers that vagrant has running
+# ------------------------------
+if options.list:
+ ssh_config = get_ssh_config()
+ meta = defaultdict(dict)
+
+ for host in ssh_config:
+ meta['hostvars'][host] = ssh_config[host]
+
+ print(json.dumps({_group: list(ssh_config.keys()), '_meta': meta}))
+ sys.exit(0)
+
+# Get out the host details
+# ------------------------------
+elif options.host:
+ print(json.dumps(get_a_ssh_config(options.host)))
+ sys.exit(0)
+
+# Print out help
+# ------------------------------
+else:
+ parser.print_help()
+ sys.exit(0)
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/vbox.py b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/vbox.py
new file mode 100644
index 00000000..110ead14
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/vbox.py
@@ -0,0 +1,107 @@
+#!/usr/bin/env python
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+from subprocess import Popen, PIPE
+
+import json
+
+
+class SetEncoder(json.JSONEncoder):
+ def default(self, obj):
+ if isinstance(obj, set):
+ return list(obj)
+ return json.JSONEncoder.default(self, obj)
+
+
+VBOX = "VBoxManage"
+
+
+def get_hosts(host=None):
+
+ returned = {}
+ try:
+ if host:
+ p = Popen([VBOX, 'showvminfo', host], stdout=PIPE)
+ else:
+ returned = {'all': set(), '_metadata': {}}
+ p = Popen([VBOX, 'list', '-l', 'vms'], stdout=PIPE)
+ except Exception:
+ sys.exit(1)
+
+ hostvars = {}
+ prevkey = pref_k = ''
+
+ for line in p.stdout.readlines():
+
+ try:
+ k, v = line.split(':', 1)
+ except Exception:
+ continue
+
+ if k == '':
+ continue
+
+ v = v.strip()
+ if k.startswith('Name'):
+ if v not in hostvars:
+ curname = v
+ hostvars[curname] = {}
+ try: # try to get network info
+ x = Popen([VBOX, 'guestproperty', 'get', curname, "/VirtualBox/GuestInfo/Net/0/V4/IP"], stdout=PIPE)
+ ipinfo = x.stdout.read()
+ if 'Value' in ipinfo:
+ a, ip = ipinfo.split(':', 1)
+ hostvars[curname]['ansible_ssh_host'] = ip.strip()
+ except Exception:
+ pass
+
+ continue
+
+ if not host:
+ if k == 'Groups':
+ for group in v.split('/'):
+ if group:
+ if group not in returned:
+ returned[group] = set()
+ returned[group].add(curname)
+ returned['all'].add(curname)
+ continue
+
+ pref_k = 'vbox_' + k.strip().replace(' ', '_')
+ if k.startswith(' '):
+ if prevkey not in hostvars[curname]:
+ hostvars[curname][prevkey] = {}
+ hostvars[curname][prevkey][pref_k] = v
+ else:
+ if v != '':
+ hostvars[curname][pref_k] = v
+
+ prevkey = pref_k
+
+ if not host:
+ returned['_metadata']['hostvars'] = hostvars
+ else:
+ returned = hostvars[host]
+ return returned
+
+
+if __name__ == '__main__':
+
+ inventory = {}
+ hostname = None
+
+ if len(sys.argv) > 1:
+ if sys.argv[1] == "--host":
+ hostname = sys.argv[2]
+
+ if hostname:
+ inventory = get_hosts(hostname)
+ else:
+ inventory = get_hosts()
+
+ sys.stdout.write(json.dumps(inventory, indent=2, cls=SetEncoder))
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/inventory/zone.py b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/zone.py
new file mode 100644
index 00000000..9020f9ea
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/inventory/zone.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+
+# (c) 2015, Dagobert Michelsen <dam@baltic-online.de>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from subprocess import Popen, PIPE
+import sys
+import json
+
+result = {}
+result['all'] = {}
+
+pipe = Popen(['zoneadm', 'list', '-ip'], stdout=PIPE, universal_newlines=True)
+result['all']['hosts'] = []
+for l in pipe.stdout.readlines():
+ # 1:work:running:/zones/work:3126dc59-9a07-4829-cde9-a816e4c5040e:native:shared
+ s = l.split(':')
+ if s[1] != 'global':
+ result['all']['hosts'].append(s[1])
+
+result['all']['vars'] = {}
+result['all']['vars']['ansible_connection'] = 'zone'
+
+if len(sys.argv) == 2 and sys.argv[1] == '--list':
+ print(json.dumps(result))
+elif len(sys.argv) == 3 and sys.argv[1] == '--host':
+ print(json.dumps({'ansible_connection': 'zone'}))
+else:
+ sys.stderr.write("Need an argument, either --list or --host <host>\n")
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/vault/__init__.py b/collections-debian-merged/ansible_collections/community/general/scripts/vault/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/vault/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/vault/azure_vault.ini b/collections-debian-merged/ansible_collections/community/general/scripts/vault/azure_vault.ini
new file mode 100644
index 00000000..d47f9762
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/vault/azure_vault.ini
@@ -0,0 +1,10 @@
+[azure_keyvault] # Used with Azure KeyVault
+vault_name=django-keyvault
+secret_name=vaultpw
+secret_version=9k1e6c7367b33eac8ee241b3698009f3
+
+[azure] # Used by Dynamic Inventory
+group_by_resource_group=yes
+group_by_location=yes
+group_by_security_group=yes
+group_by_tag=yes \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/vault/azure_vault.py b/collections-debian-merged/ansible_collections/community/general/scripts/vault/azure_vault.py
new file mode 100644
index 00000000..c27418f3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/vault/azure_vault.py
@@ -0,0 +1,595 @@
+#!/usr/bin/env python
+#
+# This script borrows a great deal of code from the azure_rm.py dynamic inventory script
+# that is packaged with Ansible. This can be found in the Ansible GitHub project at:
+# https://github.com/ansible/ansible/blob/devel/contrib/inventory/azure_rm.py
+#
+# The Azure Dynamic Inventory script was written by:
+# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
+# Chris Houseknecht, <house@redhat.com>
+# Altered/Added for Vault functionality:
+# Austin Hobbs, GitHub: @OxHobbs
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+'''
+Ansible Vault Password with Azure Key Vault Secret Script
+=========================================================
+This script is designed to be used with Ansible Vault. It provides the
+capability to provide this script as the password file to the ansible-vault
+command. This script uses the Azure Python SDK. For instruction on installing
+the Azure Python SDK see http://azure-sdk-for-python.readthedocs.org/
+
+Authentication
+--------------
+The order of precedence is command line arguments, environment variables,
+and finally the [default] profile found in ~/.azure/credentials for all
+authentication parameters.
+
+If using a credentials file, it should be an ini formatted file with one or
+more sections, which we refer to as profiles. The script looks for a
+[default] section, if a profile is not specified either on the command line
+or with an environment variable. The keys in a profile will match the
+list of command line arguments below.
+
+For command line arguments and environment variables specify a profile found
+in your ~/.azure/credentials file, or a service principal or Active Directory
+user.
+
+Command line arguments:
+ - profile
+ - client_id
+ - secret
+ - subscription_id
+ - tenant
+ - ad_user
+ - password
+ - cloud_environment
+ - adfs_authority_url
+ - vault-name
+ - secret-name
+ - secret-version
+
+Environment variables:
+ - AZURE_PROFILE
+ - AZURE_CLIENT_ID
+ - AZURE_SECRET
+ - AZURE_SUBSCRIPTION_ID
+ - AZURE_TENANT
+ - AZURE_AD_USER
+ - AZURE_PASSWORD
+ - AZURE_CLOUD_ENVIRONMENT
+ - AZURE_ADFS_AUTHORITY_URL
+ - AZURE_VAULT_NAME
+ - AZURE_VAULT_SECRET_NAME
+ - AZURE_VAULT_SECRET_VERSION
+
+
+Vault
+-----
+
+The order of precedence of Azure Key Vault Secret information is the same.
+Command line arguments, environment variables, and finally the azure_vault.ini
+file with the [azure_keyvault] section.
+
+azure_vault.ini (or azure_rm.ini if merged with Azure Dynamic Inventory Script)
+------------------------------------------------------------------------------
+As mentioned above, you can control execution using environment variables or a .ini file. A sample
+azure_vault.ini is included. The name of the .ini file is the basename of the inventory script (in this case
+'azure_vault') with a .ini extension. It also assumes the .ini file is alongside the script. To specify
+a different path for the .ini file, define the AZURE_VAULT_INI_PATH environment variable:
+
+ export AZURE_VAULT_INI_PATH=/path/to/custom.ini
+ or
+ export AZURE_VAULT_INI_PATH=[same path as azure_rm.ini if merged]
+
+ __NOTE__: If using the azure_rm.py dynamic inventory script, it is possible to use the same .ini
+ file for both the azure_rm dynamic inventory and the azure_vault password file. Simply add a section
+ named [azure_keyvault] to the ini file with the following properties: vault_name, secret_name and
+ secret_version.
+
+Examples:
+---------
+ Validate the vault_pw script with Python
+ $ python azure_vault.py -n mydjangovault -s vaultpw -v 6b6w7f7252b44eac8ee726b3698009f3
+ $ python azure_vault.py --vault-name 'mydjangovault' --secret-name 'vaultpw' \
+ --secret-version 6b6w7f7252b44eac8ee726b3698009f3
+
+ Use with a playbook
+ $ ansible-playbook -i ./azure_rm.py my_playbook.yml --limit galaxy-qa --vault-password-file ./azure_vault.py
+
+
+Insecure Platform Warning
+-------------------------
+If you receive InsecurePlatformWarning from urllib3, install the
+requests security packages:
+
+ pip install requests[security]
+
+
+author:
+ - Chris Houseknecht (@chouseknecht)
+ - Matt Davis (@nitzmahone)
+ - Austin Hobbs (@OxHobbs)
+
+Company: Ansible by Red Hat, Microsoft
+
+Version: 0.1.0
+'''
+
+import argparse
+import os
+import re
+import sys
+import inspect
+from azure.keyvault import KeyVaultClient
+
+from ansible.module_utils.six.moves import configparser as cp
+
+from os.path import expanduser
+import ansible.module_utils.six.moves.urllib.parse as urlparse
+
+HAS_AZURE = True
+HAS_AZURE_EXC = None
+HAS_AZURE_CLI_CORE = True
+CLIError = None
+
+try:
+ from msrestazure.azure_active_directory import AADTokenCredentials
+ from msrestazure.azure_exceptions import CloudError
+ from msrestazure.azure_active_directory import MSIAuthentication
+ from msrestazure import azure_cloud
+ from azure.mgmt.compute import __version__ as azure_compute_version
+ from azure.common import AzureMissingResourceHttpError, AzureHttpError
+ from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials
+ from azure.mgmt.network import NetworkManagementClient
+ from azure.mgmt.resource.resources import ResourceManagementClient
+ from azure.mgmt.resource.subscriptions import SubscriptionClient
+ from azure.mgmt.compute import ComputeManagementClient
+ from adal.authentication_context import AuthenticationContext
+except ImportError as exc:
+ HAS_AZURE_EXC = exc
+ HAS_AZURE = False
+
+try:
+ from azure.cli.core.util import CLIError
+ from azure.common.credentials import get_azure_cli_credentials, get_cli_profile
+ from azure.common.cloud import get_cli_active_cloud
+except ImportError:
+ HAS_AZURE_CLI_CORE = False
+ CLIError = Exception
+
+try:
+ from ansible.release import __version__ as ansible_version
+except ImportError:
+ ansible_version = 'unknown'
+
+
+AZURE_CREDENTIAL_ENV_MAPPING = dict(
+ profile='AZURE_PROFILE',
+ subscription_id='AZURE_SUBSCRIPTION_ID',
+ client_id='AZURE_CLIENT_ID',
+ secret='AZURE_SECRET',
+ tenant='AZURE_TENANT',
+ ad_user='AZURE_AD_USER',
+ password='AZURE_PASSWORD',
+ cloud_environment='AZURE_CLOUD_ENVIRONMENT',
+ adfs_authority_url='AZURE_ADFS_AUTHORITY_URL'
+)
+
+AZURE_VAULT_SETTINGS = dict(
+ vault_name='AZURE_VAULT_NAME',
+ secret_name='AZURE_VAULT_SECRET_NAME',
+ secret_version='AZURE_VAULT_SECRET_VERSION',
+)
+
+AZURE_MIN_VERSION = "2.0.0"
+ANSIBLE_USER_AGENT = 'Ansible/{0}'.format(ansible_version)
+
+
+class AzureRM(object):
+
+ def __init__(self, args):
+ self._args = args
+ self._cloud_environment = None
+ self._compute_client = None
+ self._resource_client = None
+ self._network_client = None
+ self._adfs_authority_url = None
+ self._vault_client = None
+ self._resource = None
+
+ self.debug = False
+ if args.debug:
+ self.debug = True
+
+ self.credentials = self._get_credentials(args)
+ if not self.credentials:
+ self.fail("Failed to get credentials. Either pass as parameters, set environment variables, "
+ "or define a profile in ~/.azure/credentials.")
+
+ # if cloud_environment specified, look up/build Cloud object
+ raw_cloud_env = self.credentials.get('cloud_environment')
+ if not raw_cloud_env:
+ self._cloud_environment = azure_cloud.AZURE_PUBLIC_CLOUD # SDK default
+ else:
+ # try to look up "well-known" values via the name attribute on azure_cloud members
+ all_clouds = [x[1] for x in inspect.getmembers(azure_cloud) if isinstance(x[1], azure_cloud.Cloud)]
+ matched_clouds = [x for x in all_clouds if x.name == raw_cloud_env]
+ if len(matched_clouds) == 1:
+ self._cloud_environment = matched_clouds[0]
+ elif len(matched_clouds) > 1:
+ self.fail("Azure SDK failure: more than one cloud matched for cloud_environment name '{0}'".format(
+ raw_cloud_env))
+ else:
+ if not urlparse.urlparse(raw_cloud_env).scheme:
+ self.fail("cloud_environment must be an endpoint discovery URL or one of {0}".format(
+ [x.name for x in all_clouds]))
+ try:
+ self._cloud_environment = azure_cloud.get_cloud_from_metadata_endpoint(raw_cloud_env)
+ except Exception as e:
+ self.fail("cloud_environment {0} could not be resolved: {1}".format(raw_cloud_env, e.message))
+
+ if self.credentials.get('subscription_id', None) is None:
+ self.fail("Credentials did not include a subscription_id value.")
+ self.log("setting subscription_id")
+ self.subscription_id = self.credentials['subscription_id']
+
+ # get authentication authority
+ # for adfs, user could pass in authority or not.
+ # for others, use default authority from cloud environment
+ if self.credentials.get('adfs_authority_url'):
+ self._adfs_authority_url = self.credentials.get('adfs_authority_url')
+ else:
+ self._adfs_authority_url = self._cloud_environment.endpoints.active_directory
+
+ # get resource from cloud environment
+ self._resource = self._cloud_environment.endpoints.active_directory_resource_id
+
+ if self.credentials.get('credentials'):
+ self.azure_credentials = self.credentials.get('credentials')
+ elif self.credentials.get('client_id') and self.credentials.get('secret') and self.credentials.get('tenant'):
+ self.azure_credentials = ServicePrincipalCredentials(client_id=self.credentials['client_id'],
+ secret=self.credentials['secret'],
+ tenant=self.credentials['tenant'],
+ cloud_environment=self._cloud_environment)
+
+ elif self.credentials.get('ad_user') is not None and \
+ self.credentials.get('password') is not None and \
+ self.credentials.get('client_id') is not None and \
+ self.credentials.get('tenant') is not None:
+
+ self.azure_credentials = self.acquire_token_with_username_password(
+ self._adfs_authority_url,
+ self._resource,
+ self.credentials['ad_user'],
+ self.credentials['password'],
+ self.credentials['client_id'],
+ self.credentials['tenant'])
+
+ elif self.credentials.get('ad_user') is not None and self.credentials.get('password') is not None:
+ tenant = self.credentials.get('tenant')
+ if not tenant:
+ tenant = 'common'
+ self.azure_credentials = UserPassCredentials(self.credentials['ad_user'],
+ self.credentials['password'],
+ tenant=tenant,
+ cloud_environment=self._cloud_environment)
+
+ else:
+ self.fail("Failed to authenticate with provided credentials. Some attributes were missing. "
+ "Credentials must include client_id, secret and tenant or ad_user and password, or "
+ "ad_user, password, client_id, tenant and adfs_authority_url(optional) for ADFS authentication, "
+ "or be logged in using AzureCLI.")
+
+ def log(self, msg):
+ if self.debug:
+ print(msg + u'\n')
+
+ def fail(self, msg):
+ raise Exception(msg)
+
+ def _get_profile(self, profile="default"):
+ path = expanduser("~")
+ path += "/.azure/credentials"
+ try:
+ config = cp.ConfigParser()
+ config.read(path)
+ except Exception as exc:
+ self.fail("Failed to access {0}. Check that the file exists and you have read "
+ "access. {1}".format(path, str(exc)))
+ credentials = dict()
+ for key in AZURE_CREDENTIAL_ENV_MAPPING:
+ try:
+ credentials[key] = config.get(profile, key, raw=True)
+ except Exception:
+ pass
+
+ if credentials.get('client_id') is not None or credentials.get('ad_user') is not None:
+ return credentials
+
+ return None
+
+ def _get_env_credentials(self):
+ env_credentials = dict()
+ for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
+ env_credentials[attribute] = os.environ.get(env_variable, None)
+
+ if env_credentials['profile'] is not None:
+ credentials = self._get_profile(env_credentials['profile'])
+ return credentials
+
+ if env_credentials['client_id'] is not None or env_credentials['ad_user'] is not None:
+ return env_credentials
+
+ return None
+
+ def _get_azure_cli_credentials(self):
+ credentials, subscription_id = get_azure_cli_credentials()
+ cloud_environment = get_cli_active_cloud()
+
+ cli_credentials = {
+ 'credentials': credentials,
+ 'subscription_id': subscription_id,
+ 'cloud_environment': cloud_environment
+ }
+ return cli_credentials
+
+ def _get_msi_credentials(self, subscription_id_param=None):
+ credentials = MSIAuthentication()
+ try:
+ # try to get the subscription in MSI to test whether MSI is enabled
+ subscription_client = SubscriptionClient(credentials)
+ subscription = next(subscription_client.subscriptions.list())
+ subscription_id = str(subscription.subscription_id)
+ return {
+ 'credentials': credentials,
+ 'subscription_id': subscription_id_param or subscription_id
+ }
+ except Exception as exc:
+ return None
+
+ def _get_credentials(self, params):
+ # Get authentication credentials.
+ # Precedence: cmd line parameters-> environment variables-> default profile in ~/.azure/credentials.
+
+ self.log('Getting credentials')
+
+ arg_credentials = dict()
+ for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
+ arg_credentials[attribute] = getattr(params, attribute)
+
+ # try module params
+ if arg_credentials['profile'] is not None:
+ self.log('Retrieving credentials with profile parameter.')
+ credentials = self._get_profile(arg_credentials['profile'])
+ return credentials
+
+ if arg_credentials['client_id'] is not None:
+ self.log('Received credentials from parameters.')
+ return arg_credentials
+
+ if arg_credentials['ad_user'] is not None:
+ self.log('Received credentials from parameters.')
+ return arg_credentials
+
+ # try environment
+ env_credentials = self._get_env_credentials()
+ if env_credentials:
+ self.log('Received credentials from env.')
+ return env_credentials
+
+ # try default profile from ~./azure/credentials
+ default_credentials = self._get_profile()
+ if default_credentials:
+ self.log('Retrieved default profile credentials from ~/.azure/credentials.')
+ return default_credentials
+
+ msi_credentials = self._get_msi_credentials(arg_credentials.get('subscription_id'))
+ if msi_credentials:
+ self.log('Retrieved credentials from MSI.')
+ return msi_credentials
+
+ try:
+ if HAS_AZURE_CLI_CORE:
+ self.log('Retrieving credentials from AzureCLI profile')
+ cli_credentials = self._get_azure_cli_credentials()
+ return cli_credentials
+ except CLIError as ce:
+ self.log('Error getting AzureCLI profile credentials - {0}'.format(ce))
+
+ return None
+
+ def acquire_token_with_username_password(self, authority, resource, username, password, client_id, tenant):
+ authority_uri = authority
+
+ if tenant is not None:
+ authority_uri = authority + '/' + tenant
+
+ context = AuthenticationContext(authority_uri)
+ token_response = context.acquire_token_with_username_password(resource, username, password, client_id)
+ return AADTokenCredentials(token_response)
+
+ def _register(self, key):
+ try:
+ # We have to perform the one-time registration here. Otherwise, we receive an error the first
+ # time we attempt to use the requested client.
+ resource_client = self.rm_client
+ resource_client.providers.register(key)
+ except Exception as exc:
+ self.log("One-time registration of {0} failed - {1}".format(key, str(exc)))
+ self.log("You might need to register {0} using an admin account".format(key))
+ self.log(("To register a provider using the Python CLI: "
+ "https://docs.microsoft.com/azure/azure-resource-manager/"
+ "resource-manager-common-deployment-errors#noregisteredproviderfound"))
+
+ def get_mgmt_svc_client(self, client_type, base_url, api_version):
+ client = client_type(self.azure_credentials,
+ self.subscription_id,
+ base_url=base_url,
+ api_version=api_version)
+ client.config.add_user_agent(ANSIBLE_USER_AGENT)
+ return client
+
+ def get_vault_client(self):
+ return KeyVaultClient(self.azure_credentials)
+
+ def get_vault_suffix(self):
+ return self._cloud_environment.suffixes.keyvault_dns
+
+ @property
+ def network_client(self):
+ self.log('Getting network client')
+ if not self._network_client:
+ self._network_client = self.get_mgmt_svc_client(NetworkManagementClient,
+ self._cloud_environment.endpoints.resource_manager,
+ '2017-06-01')
+ self._register('Microsoft.Network')
+ return self._network_client
+
+ @property
+ def rm_client(self):
+ self.log('Getting resource manager client')
+ if not self._resource_client:
+ self._resource_client = self.get_mgmt_svc_client(ResourceManagementClient,
+ self._cloud_environment.endpoints.resource_manager,
+ '2017-05-10')
+ return self._resource_client
+
+ @property
+ def compute_client(self):
+ self.log('Getting compute client')
+ if not self._compute_client:
+ self._compute_client = self.get_mgmt_svc_client(ComputeManagementClient,
+ self._cloud_environment.endpoints.resource_manager,
+ '2017-03-30')
+ self._register('Microsoft.Compute')
+ return self._compute_client
+
+ @property
+ def vault_client(self):
+ self.log('Getting the Key Vault client')
+ if not self._vault_client:
+ self._vault_client = self.get_vault_client()
+
+ return self._vault_client
+
+
+class AzureKeyVaultSecret:
+
+ def __init__(self):
+
+ self._args = self._parse_cli_args()
+
+ try:
+ rm = AzureRM(self._args)
+ except Exception as e:
+ sys.exit("{0}".format(str(e)))
+
+ self._get_vault_settings()
+
+ if self._args.vault_name:
+ self.vault_name = self._args.vault_name
+
+ if self._args.secret_name:
+ self.secret_name = self._args.secret_name
+
+ if self._args.secret_version:
+ self.secret_version = self._args.secret_version
+
+ self._vault_suffix = rm.get_vault_suffix()
+ self._vault_client = rm.vault_client
+
+ print(self.get_password_from_vault())
+
+ def _parse_cli_args(self):
+ parser = argparse.ArgumentParser(
+ description='Obtain the vault password used to secure your Ansilbe secrets'
+ )
+ parser.add_argument('-n', '--vault-name', action='store', help='Name of Azure Key Vault')
+ parser.add_argument('-s', '--secret-name', action='store',
+ help='Name of the secret stored in Azure Key Vault')
+ parser.add_argument('-v', '--secret-version', action='store',
+ help='Version of the secret to be retrieved')
+ parser.add_argument('--debug', action='store_true', default=False,
+ help='Send the debug messages to STDOUT')
+ parser.add_argument('--profile', action='store',
+ help='Azure profile contained in ~/.azure/credentials')
+ parser.add_argument('--subscription_id', action='store',
+ help='Azure Subscription Id')
+ parser.add_argument('--client_id', action='store',
+ help='Azure Client Id ')
+ parser.add_argument('--secret', action='store',
+ help='Azure Client Secret')
+ parser.add_argument('--tenant', action='store',
+ help='Azure Tenant Id')
+ parser.add_argument('--ad_user', action='store',
+ help='Active Directory User')
+ parser.add_argument('--password', action='store',
+ help='password')
+ parser.add_argument('--adfs_authority_url', action='store',
+ help='Azure ADFS authority url')
+ parser.add_argument('--cloud_environment', action='store',
+ help='Azure Cloud Environment name or metadata discovery URL')
+
+ return parser.parse_args()
+
+ def get_password_from_vault(self):
+ vault_url = 'https://{0}{1}'.format(self.vault_name, self._vault_suffix)
+ secret = self._vault_client.get_secret(vault_url, self.secret_name, self.secret_version)
+ return secret.value
+
+ def _get_vault_settings(self):
+ env_settings = self._get_vault_env_settings()
+ if None not in set(env_settings.values()):
+ for key in AZURE_VAULT_SETTINGS:
+ setattr(self, key, env_settings.get(key, None))
+ else:
+ file_settings = self._load_vault_settings()
+ if not file_settings:
+ return
+
+ for key in AZURE_VAULT_SETTINGS:
+ if file_settings.get(key):
+ setattr(self, key, file_settings.get(key))
+
+ def _get_vault_env_settings(self):
+ env_settings = dict()
+ for attribute, env_variable in AZURE_VAULT_SETTINGS.items():
+ env_settings[attribute] = os.environ.get(env_variable, None)
+ return env_settings
+
+ def _load_vault_settings(self):
+ basename = os.path.splitext(os.path.basename(__file__))[0]
+ default_path = os.path.join(os.path.dirname(__file__), (basename + '.ini'))
+ path = os.path.expanduser(os.path.expandvars(os.environ.get('AZURE_VAULT_INI_PATH', default_path)))
+ config = None
+ settings = None
+ try:
+ config = cp.ConfigParser()
+ config.read(path)
+ except Exception:
+ pass
+
+ if config is not None:
+ settings = dict()
+ for key in AZURE_VAULT_SETTINGS:
+ try:
+ settings[key] = config.get('azure_keyvault', key, raw=True)
+ except Exception:
+ pass
+
+ return settings
+
+
+def main():
+ if not HAS_AZURE:
+ sys.exit("The Azure python sdk is not installed (try `pip install 'azure>={0}' --upgrade`) - {1}".format(
+ AZURE_MIN_VERSION, HAS_AZURE_EXC))
+
+ AzureKeyVaultSecret()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/vault/vault-keyring-client.py b/collections-debian-merged/ansible_collections/community/general/scripts/vault/vault-keyring-client.py
new file mode 100644
index 00000000..8332b228
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/vault/vault-keyring-client.py
@@ -0,0 +1,134 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# (c) 2014, Matt Martz <matt@sivel.net>
+# (c) 2016, Justin Mayer <https://justinmayer.com/>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+# =============================================================================
+#
+# This script is to be used with ansible-vault's --vault-id arg
+# to retrieve the vault password via your OS's native keyring application.
+#
+# This file *MUST* be saved with executable permissions. Otherwise, Ansible
+# will try to parse as a password file and display: "ERROR! Decryption failed"
+#
+# The `keyring` Python module is required: https://pypi.org/project/keyring/
+#
+# By default, this script will store the specified password in the keyring of
+# the user that invokes the script. To specify a user keyring, add a [vault]
+# section to your ansible.cfg file with a 'username' option. Example:
+#
+# [vault]
+# username = 'ansible-vault'
+#
+# In useage like:
+#
+# ansible-vault --vault-id keyring_id@contrib/vault/vault-keyring-client.py view some_encrypted_file
+#
+# --vault-id will call this script like:
+#
+# contrib/vault/vault-keyring-client.py --vault-id keyring_id
+#
+# That will retrieve the password from users keyring for the
+# keyring service 'keyring_id'. The equivalent of:
+#
+# keyring get keyring_id $USER
+#
+# If no vault-id name is specified to ansible command line, the vault-keyring-client.py
+# script will be called without a '--vault-id' and will default to the keyring service 'ansible'
+# This is equivalent to:
+#
+# keyring get ansible $USER
+#
+# You can configure the `vault_password_file` option in ansible.cfg:
+#
+# [defaults]
+# ...
+# vault_password_file = /path/to/vault-keyring-client.py
+# ...
+#
+# To set your password, `cd` to your project directory and run:
+#
+# # will use default keyring service / vault-id of 'ansible'
+# /path/to/vault-keyring-client.py --set
+#
+# or to specify the keyring service / vault-id of 'my_ansible_secret':
+#
+# /path/to/vault-keyring-client.py --vault-id my_ansible_secret --set
+#
+# If you choose not to configure the path to `vault_password_file` in
+# ansible.cfg, your `ansible-playbook` command might look like:
+#
+# ansible-playbook --vault-id=keyring_id@/path/to/vault-keyring-client.py site.yml
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import argparse
+import sys
+import getpass
+import keyring
+
+from ansible.config.manager import ConfigManager
+
+KEYNAME_UNKNOWN_RC = 2
+
+
+def build_arg_parser():
+ parser = argparse.ArgumentParser(description='Get a vault password from user keyring')
+
+ parser.add_argument('--vault-id', action='store', default=None,
+ dest='vault_id',
+ help='name of the vault secret to get from keyring')
+ parser.add_argument('--username', action='store', default=None,
+ help='the username whose keyring is queried')
+ parser.add_argument('--set', action='store_true', default=False,
+ dest='set_password',
+ help='set the password instead of getting it')
+ return parser
+
+
+def main():
+ config_manager = ConfigManager()
+ username = config_manager.data.get_setting('vault.username')
+ if not username:
+ username = getpass.getuser()
+
+ keyname = config_manager.data.get_setting('vault.keyname')
+ if not keyname:
+ keyname = 'ansible'
+
+ arg_parser = build_arg_parser()
+ args = arg_parser.parse_args()
+
+ username = args.username or username
+ keyname = args.vault_id or keyname
+
+ # print('username: %s keyname: %s' % (username, keyname))
+
+ if args.set_password:
+ intro = 'Storing password in "{}" user keyring using key name: {}\n'
+ sys.stdout.write(intro.format(username, keyname))
+ password = getpass.getpass()
+ confirm = getpass.getpass('Confirm password: ')
+ if password == confirm:
+ keyring.set_password(keyname, username, password)
+ else:
+ sys.stderr.write('Passwords do not match\n')
+ sys.exit(1)
+ else:
+ secret = keyring.get_password(keyname, username)
+ if secret is None:
+ sys.stderr.write('vault-keyring-client could not find key="%s" for user="%s" via backend="%s"\n' %
+ (keyname, username, keyring.get_keyring().name))
+ sys.exit(KEYNAME_UNKNOWN_RC)
+
+ # print('secret: %s' % secret)
+ sys.stdout.write('%s\n' % secret)
+
+ sys.exit(0)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/scripts/vault/vault-keyring.py b/collections-debian-merged/ansible_collections/community/general/scripts/vault/vault-keyring.py
new file mode 100644
index 00000000..45188b12
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/scripts/vault/vault-keyring.py
@@ -0,0 +1,87 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# (c) 2014, Matt Martz <matt@sivel.net>
+# (c) 2016, Justin Mayer <https://justinmayer.com/>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+# =============================================================================
+#
+# This script is to be used with vault_password_file or --vault-password-file
+# to retrieve the vault password via your OS's native keyring application.
+#
+# This file *MUST* be saved with executable permissions. Otherwise, Ansible
+# will try to parse as a password file and display: "ERROR! Decryption failed"
+#
+# The `keyring` Python module is required: https://pypi.org/project/keyring/
+#
+# By default, this script will store the specified password in the keyring of
+# the user that invokes the script. To specify a user keyring, add a [vault]
+# section to your ansible.cfg file with a 'username' option. Example:
+#
+# [vault]
+# username = 'ansible-vault'
+#
+# Another optional setting is for the key name, which allows you to use this
+# script to handle multiple project vaults with different passwords:
+#
+# [vault]
+# keyname = 'ansible-vault-yourproject'
+#
+# You can configure the `vault_password_file` option in ansible.cfg:
+#
+# [defaults]
+# ...
+# vault_password_file = /path/to/vault-keyring.py
+# ...
+#
+# To set your password, `cd` to your project directory and run:
+#
+# python /path/to/vault-keyring.py set
+#
+# If you choose not to configure the path to `vault_password_file` in
+# ansible.cfg, your `ansible-playbook` command might look like:
+#
+# ansible-playbook --vault-password-file=/path/to/vault-keyring.py site.yml
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+import getpass
+import keyring
+
+from ansible.config.manager import ConfigManager, get_ini_config_value
+
+
+def main():
+ config = ConfigManager()
+ username = get_ini_config_value(
+ config._parsers[config._config_file],
+ dict(section='vault', key='username')
+ ) or getpass.getuser()
+
+ keyname = get_ini_config_value(
+ config._parsers[config._config_file],
+ dict(section='vault', key='keyname')
+ ) or 'ansible'
+
+ if len(sys.argv) == 2 and sys.argv[1] == 'set':
+ intro = 'Storing password in "{}" user keyring using key name: {}\n'
+ sys.stdout.write(intro.format(username, keyname))
+ password = getpass.getpass()
+ confirm = getpass.getpass('Confirm password: ')
+ if password == confirm:
+ keyring.set_password(keyname, username, password)
+ else:
+ sys.stderr.write('Passwords do not match\n')
+ sys.exit(1)
+ else:
+ sys.stdout.write('{0}\n'.format(keyring.get_password(keyname,
+ username)))
+
+ sys.exit(0)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/shippable.yml b/collections-debian-merged/ansible_collections/community/general/shippable.yml
new file mode 100644
index 00000000..9961851e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/shippable.yml
@@ -0,0 +1,44 @@
+language: python
+
+env:
+ matrix:
+ - T=none
+
+matrix:
+ exclude:
+ - env: T=none
+ include:
+ - env: T=devel/sanity/1
+ - env: T=devel/sanity/2
+ - env: T=devel/sanity/3
+ - env: T=devel/sanity/4
+ - env: T=devel/sanity/extra
+
+ - env: T=2.10/sanity/1
+ - env: T=2.10/sanity/2
+ - env: T=2.10/sanity/3
+ - env: T=2.10/sanity/4
+
+ - env: T=2.9/sanity/1
+ - env: T=2.9/sanity/2
+ - env: T=2.9/sanity/3
+ - env: T=2.9/sanity/4
+
+branches:
+ except:
+ - "*-patch-*"
+ - "revert-*-*"
+ - "patchback/backports/*"
+
+build:
+ ci:
+ - tests/utils/shippable/timing.sh tests/utils/shippable/shippable.sh $T
+
+integrations:
+ notifications:
+ - integrationName: email
+ type: email
+ on_success: never
+ on_failure: never
+ on_start: never
+ on_pull_request: never
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/.gitignore b/collections-debian-merged/ansible_collections/community/general/tests/.gitignore
new file mode 100644
index 00000000..ea1472ec
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/.gitignore
@@ -0,0 +1 @@
+output/
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/aix_devices/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/aix_devices/aliases
new file mode 100644
index 00000000..e6cab07d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/aix_devices/aliases
@@ -0,0 +1,2 @@
+# No AIX LPAR available
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/aix_devices/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/aix_devices/tasks/main.yml
new file mode 100644
index 00000000..3bb3329b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/aix_devices/tasks/main.yml
@@ -0,0 +1,76 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Scan new devices.
+ aix_devices:
+ device: all
+ state: present
+
+- name: Scan new virtual devices (vio0).
+ aix_devices:
+ device: vio0
+ state: present
+
+- name: Removing IP alias to en0
+ aix_devices:
+ device: en0
+ attributes:
+ delalias4: 10.0.0.100,255.255.255.0
+
+- name: Removes ent2.
+ aix_devices:
+ device: ent2
+ state: absent
+
+- name: Put device en2 in Defined
+ aix_devices:
+ device: en2
+ state: defined
+
+- name: Removes ent4 (inexistent).
+ aix_devices:
+ device: ent4
+ state: absent
+
+- name: Put device en4 in Defined (inexistent)
+ aix_devices:
+ device: en4
+ state: defined
+
+- name: Put vscsi1 and children devices in Defined state.
+ aix_devices:
+ device: vscsi1
+ recursive: yes
+ state: defined
+
+- name: Removes vscsi1 and children devices.
+ aix_devices:
+ device: vscsi1
+ recursive: yes
+ state: absent
+
+- name: Changes en1 mtu to 9000 and disables arp.
+ aix_devices:
+ device: en1
+ attributes:
+ mtu: 900
+ arp: off
+ state: present
+
+- name: Configure IP, netmask and set en1 up.
+ aix_devices:
+ device: en1
+ attributes:
+ netaddr: 192.168.0.100
+ netmask: 255.255.255.0
+ state: up
+ state: present
+
+- name: Adding IP alias to en0
+ aix_devices:
+ device: en0
+ attributes:
+ alias4: 10.0.0.100,255.255.255.0
+ state: present
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/aix_filesystem/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/aix_filesystem/aliases
new file mode 100644
index 00000000..ad7ccf7a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/aix_filesystem/aliases
@@ -0,0 +1 @@
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/aix_filesystem/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/aix_filesystem/tasks/main.yml
new file mode 100644
index 00000000..ed326d93
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/aix_filesystem/tasks/main.yml
@@ -0,0 +1,125 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Umounting /testfs
+ aix_filesystem:
+ filesystem: /testfs
+ state: unmounted
+
+- name: Removing /testfs
+ aix_filesystem:
+ filesystem: /testfs
+ state: absent
+
+- name: Creating a new file system
+ aix_filesystem:
+ filesystem: /newfs
+ size: 1G
+ state: present
+ vg: datavg
+
+# It requires a host (nfshost) exporting the NFS
+- name: Creating NFS filesystem from nfshost (Linux NFS server)
+ aix_filesystem:
+ device: /home/ftp
+ nfs_server: nfshost
+ filesystem: /nfs/ftp
+ state: present
+
+# It requires a volume group named datavg (next three actions)
+- name: Creating a logical volume testlv (aix_lvol module)
+ aix_lvol:
+ vg: datavg
+ lv: testlv
+ size: 2G
+ state: present
+
+- name: Create filesystem in a previously defined logical volume
+ aix_filesystem:
+ device: testlv
+ filesystem: /testfs
+ state: present
+
+- name: Create an already existing filesystem using existing logical volume.
+ aix_filesystem:
+ vg: datavg
+ device: mksysblv
+ filesystem: /mksysb
+ state: present
+
+- name: Create a filesystem in a non-existing VG
+ aix_filesystem:
+ vg: nonexistvg
+ filesystem: /newlv
+ state: present
+
+- name: Resizing /mksysb to 1G
+ aix_filesystem:
+ filesystem: /mksysb
+ size: 1G
+ state: present
+
+- name: Resizing /mksysb to +512M
+ aix_filesystem:
+ filesystem: /mksysb
+ size: +512M
+ state: present
+
+- name: Resizing /mksysb to 11G
+ aix_filesystem:
+ filesystem: /mksysb
+ size: 11G
+ state: present
+
+- name: Resizing /mksysb to 11G (already done)
+ aix_filesystem:
+ filesystem: /mksysb
+ size: 11G
+ state: present
+
+- name: Resizing /mksysb to -2G
+ aix_filesystem:
+ filesystem: /mksysb
+ size: -2G
+ state: present
+
+- name: Resizing /mksysb to 100G (no enought space)
+ aix_filesystem:
+ filesystem: /mksysb
+ size: +100G
+ state: present
+
+- name: Unmount filesystem /home/ftp
+ aix_filesystem:
+ filesystem: /home/ftp
+ state: unmounted
+
+- name: Remove NFS filesystem /home/ftp
+ aix_filesystem:
+ filesystem: /home/ftp
+ rm_mount_point: yes
+ state: absent
+
+- name: Mount filesystem /newfs
+ aix_filesystem:
+ filesystem: /newfs
+ state: mounted
+
+- name: Remove mounted /newfs
+ aix_filesystem:
+ filesystem: /newfs
+ rm_mount_point: yes
+ state: absent
+
+- name: Umount /newfs
+ aix_filesystem:
+ filesystem: /newfs
+ state: unmounted
+
+- name: Remove /newfs
+ aix_filesystem:
+ filesystem: /newfs
+ rm_mount_point: yes
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/aliases
new file mode 100644
index 00000000..64c02f24
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/aliases
@@ -0,0 +1,7 @@
+shippable/posix/group3
+destructive
+needs/root
+skip/aix
+skip/freebsd
+skip/osx
+skip/macos
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/main.yml
new file mode 100644
index 00000000..3dc799df
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/main.yml
@@ -0,0 +1,71 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) 2017 Pierre-Louis Bonicoli <pierre-louis.bonicoli@libregerbil.fr>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- name: 'setup: create a dummy alternative'
+ block:
+ - import_tasks: setup.yml
+
+ ##############
+ # Test parameters:
+ # link parameter present / absent ('with_link' variable)
+ # with / without alternatives defined in alternatives file ('with_alternatives' variable)
+ # auto / manual ('mode' variable)
+
+ - include_tasks: tests.yml
+ with_nested:
+ - [ True, False ] # with_link
+ - [ True, False ] # with_alternatives
+ - [ 'auto', 'manual' ] # mode
+ loop_control:
+ loop_var: test_conf
+
+ ##########
+ # Priority
+ - block:
+ - include_tasks: remove_links.yml
+ - include_tasks: setup_test.yml
+ # at least two iterations again
+ - include_tasks: tests_set_priority.yml
+ with_sequence: start=3 end=4
+ vars:
+ with_alternatives: True
+ mode: auto
+
+ - block:
+ - include_tasks: remove_links.yml
+ - include_tasks: setup_test.yml
+ # at least two iterations again
+ - include_tasks: tests_set_priority.yml
+ with_sequence: start=3 end=4
+ vars:
+ with_alternatives: False
+ mode: auto
+
+ # Test that path is checked: alternatives must fail when path is nonexistent
+ - import_tasks: path_is_checked.yml
+
+ always:
+ - include_tasks: remove_links.yml
+
+ - file:
+ path: '{{ item }}'
+ state: absent
+ with_items:
+ - '{{ alternatives_dir }}/dummy'
+
+ - file:
+ path: '/usr/bin/dummy{{ item }}'
+ state: absent
+ with_sequence: start=1 end=4
+ # *Disable tests on Fedora 24*
+ # Shippable Fedora 24 image provides chkconfig-1.7-2.fc24.x86_64 but not the
+ # latest available version (chkconfig-1.8-1.fc24.x86_64). update-alternatives
+ # in chkconfig-1.7-2 fails when /etc/alternatives/dummy link is missing,
+ # error is: 'failed to read link /usr/bin/dummy: No such file or directory'.
+ # Moreover Fedora 24 is no longer maintained.
+ when: ansible_distribution != 'Fedora' or ansible_distribution_major_version|int > 24
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/path_is_checked.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/path_is_checked.yml
new file mode 100644
index 00000000..ef0a3b47
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/path_is_checked.yml
@@ -0,0 +1,12 @@
+- name: Try with nonexistent path
+ alternatives:
+ name: dummy
+ path: '/non/existent/path/there'
+ link: '/usr/bin/dummy'
+ ignore_errors: True
+ register: alternative
+
+- name: Check previous task failed
+ assert:
+ that:
+ - 'alternative is failed'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/remove_links.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/remove_links.yml
new file mode 100644
index 00000000..690b0606
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/remove_links.yml
@@ -0,0 +1,7 @@
+- name: remove links
+ file:
+ path: '{{ item }}'
+ state: absent
+ with_items:
+ - /etc/alternatives/dummy
+ - /usr/bin/dummy
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/setup.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/setup.yml
new file mode 100644
index 00000000..7e4a4053
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/setup.yml
@@ -0,0 +1,14 @@
+- include_vars: '{{ item }}'
+ with_first_found:
+ - files:
+ - '{{ ansible_os_family }}-{{ ansible_distribution_version }}.yml'
+ - '{{ ansible_os_family }}.yml'
+ - default.yml
+ paths: ../vars
+- template:
+ src: dummy_command
+ dest: /usr/bin/dummy{{ item }}
+ owner: root
+ group: root
+ mode: '0755'
+ with_sequence: start=1 end=4
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/setup_test.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/setup_test.yml
new file mode 100644
index 00000000..6a55c6ba
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/setup_test.yml
@@ -0,0 +1,11 @@
+- template:
+ src: dummy_alternative
+ dest: '{{ alternatives_dir }}/dummy'
+ owner: root
+ group: root
+ mode: '0644'
+ when: with_alternatives or ansible_os_family != 'RedHat'
+- file:
+ path: '{{ alternatives_dir }}/dummy'
+ state: absent
+ when: not with_alternatives and ansible_os_family == 'RedHat'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/test.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/test.yml
new file mode 100644
index 00000000..e5cf2d99
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/test.yml
@@ -0,0 +1,53 @@
+- debug:
+ msg: ' with_alternatives: {{ with_alternatives }}, mode: {{ mode }}'
+
+- block:
+ - name: set alternative (using link parameter)
+ alternatives:
+ name: dummy
+ path: '/usr/bin/dummy{{ item }}'
+ link: '/usr/bin/dummy'
+ register: alternative
+
+ - name: check expected command was executed
+ assert:
+ that:
+ - 'alternative is successful'
+ - 'alternative is changed'
+ when: with_link
+
+- block:
+ - name: set alternative (without link parameter)
+ alternatives:
+ name: dummy
+ path: '/usr/bin/dummy{{ item }}'
+ register: alternative
+
+ - name: check expected command was executed
+ assert:
+ that:
+ - 'alternative is successful'
+ - 'alternative is changed'
+ when: not with_link
+
+- name: execute dummy command
+ shell: dummy
+ register: cmd
+
+- name: check expected command was executed
+ assert:
+ that:
+ - 'cmd.stdout == "dummy" ~ item'
+
+- name: 'check mode (manual: alternatives file existed, it has been updated)'
+ shell: 'head -n1 {{ alternatives_dir }}/dummy | grep "^manual$"'
+ when: ansible_os_family != 'RedHat' or with_alternatives or item != 1
+
+- name: 'check mode (auto: alternatives file didn''t exist, it has been created)'
+ shell: 'head -n1 {{ alternatives_dir }}/dummy | grep "^auto$"'
+ when: ansible_os_family == 'RedHat' and not with_alternatives and item == 1
+
+- name: check that alternative has been updated
+ command: "grep -Pzq '/bin/dummy{{ item }}\\n' '{{ alternatives_dir }}/dummy'"
+ # priority doesn't seem updated
+ #command: "grep -Pzq '/bin/dummy{{ item }}\\n50' '{{ alternatives_dir }}/dummy'"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/tests.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/tests.yml
new file mode 100644
index 00000000..e0400dfd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/tests.yml
@@ -0,0 +1,15 @@
+- block:
+ - include_tasks: remove_links.yml
+ - include_tasks: setup_test.yml
+ # at least two iterations:
+ # - first will use 'link currently absent',
+ # - second will receive 'link currently points to'
+ - include_tasks: test.yml
+ with_sequence: start=1 end=2
+ vars:
+ with_link: '{{ test_conf[0] }}'
+ with_alternatives: '{{ test_conf[1] }}'
+ mode: '{{ test_conf[2] }}'
+ # update-alternatives included in Fedora 26 (1.10) & Red Hat 7.4 (1.8) doesn't provide
+ # '--query' switch, 'link' is mandatory for these distributions.
+ when: ansible_os_family != 'RedHat' or test_conf[0]
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/tests_set_priority.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/tests_set_priority.yml
new file mode 100644
index 00000000..7e278175
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/tests_set_priority.yml
@@ -0,0 +1,23 @@
+- name: update dummy alternative
+ alternatives:
+ name: dummy
+ path: '/usr/bin/dummy{{ item }}'
+ link: /usr/bin/dummy
+ priority: '{{ 60 + item|int }}'
+ register: alternative
+
+- name: execute dummy command
+ shell: dummy
+ register: cmd
+
+- name: check if link group is in manual mode
+ shell: 'head -n1 {{ alternatives_dir }}/dummy | grep "^manual$"'
+
+- name: check expected command was executed
+ assert:
+ that:
+ - 'alternative is changed'
+ - 'cmd.stdout == "dummy{{ item }}"'
+
+- name: check that alternative has been updated
+ command: "grep -Pzq '/bin/dummy{{ item }}\\n{{ 60 + item|int }}' '{{ alternatives_dir }}/dummy'"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/templates/dummy_alternative b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/templates/dummy_alternative
new file mode 100644
index 00000000..5dce8add
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/templates/dummy_alternative
@@ -0,0 +1,12 @@
+{{ mode }}
+/usr/bin/dummy
+
+{% if with_alternatives %}
+/usr/bin/dummy1
+40
+/usr/bin/dummy2
+30
+
+{% else %}
+
+{% endif %}
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/templates/dummy_command b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/templates/dummy_command
new file mode 100644
index 00000000..332d9fe1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/templates/dummy_command
@@ -0,0 +1,2 @@
+#!/bin/sh
+echo dummy{{ item }}
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/vars/Debian.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/vars/Debian.yml
new file mode 100644
index 00000000..1e83283e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/vars/Debian.yml
@@ -0,0 +1,2 @@
+---
+alternatives_dir: /var/lib/dpkg/alternatives/
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/vars/Suse-42.3.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/vars/Suse-42.3.yml
new file mode 100644
index 00000000..37664ddb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/vars/Suse-42.3.yml
@@ -0,0 +1,2 @@
+---
+alternatives_dir: /var/lib/rpm/alternatives/
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/vars/default.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/vars/default.yml
new file mode 100644
index 00000000..d00123de
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/alternatives/vars/default.yml
@@ -0,0 +1,2 @@
+---
+alternatives_dir: /var/lib/alternatives/
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/apache2_module/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/apache2_module/aliases
new file mode 100644
index 00000000..0725da56
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/apache2_module/aliases
@@ -0,0 +1,3 @@
+destructive
+shippable/posix/group3
+skip/aix
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/apache2_module/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/apache2_module/meta/main.yml
new file mode 100644
index 00000000..07faa217
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/apache2_module/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_tests
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/apache2_module/tasks/actualtest.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/apache2_module/tasks/actualtest.yml
new file mode 100644
index 00000000..24ba4f27
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/apache2_module/tasks/actualtest.yml
@@ -0,0 +1,231 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- name: install apache via apt
+ apt:
+ name: "{{item}}"
+ state: present
+ when: "ansible_os_family == 'Debian'"
+ with_items:
+ - apache2
+ - libapache2-mod-evasive
+
+- name: install apache via zypper
+ community.general.zypper:
+ name: apache2
+ state: present
+ when: "ansible_os_family == 'Suse'"
+
+- name: disable userdir module
+ apache2_module:
+ name: userdir
+ state: absent
+ register: userdir_first_disable
+
+- name: disable userdir module, second run
+ apache2_module:
+ name: userdir
+ state: absent
+ register: disable
+
+- name: ensure apache2_module is idempotent
+ assert:
+ that:
+ - disable is not changed
+
+- name: enable userdir module
+ apache2_module:
+ name: userdir
+ state: present
+ register: enable
+
+- name: ensure changed on successful enable
+ assert:
+ that:
+ - enable is changed
+
+- name: enable userdir module, second run
+ apache2_module:
+ name: userdir
+ state: present
+ register: enabletwo
+
+- name: ensure apache2_module is idempotent
+ assert:
+ that:
+ - 'not enabletwo.changed'
+
+- name: disable userdir module, final run
+ apache2_module:
+ name: userdir
+ state: absent
+ register: disablefinal
+
+- name: ensure changed on successful disable
+ assert:
+ that:
+ - 'disablefinal.changed'
+
+- name: set userdir to original state
+ apache2_module:
+ name: userdir
+ state: present
+ when: userdir_first_disable is changed
+
+- name: ensure autoindex enabled
+ apache2_module:
+ name: autoindex
+ state: present
+
+- name: Debian/Ubuntu specific tests
+ when: "ansible_os_family == 'Debian'"
+ block:
+ - name: force disable of autoindex # bug #2499
+ apache2_module:
+ name: autoindex
+ state: absent
+ force: True
+
+ - name: reenable autoindex
+ apache2_module:
+ name: autoindex
+ state: present
+
+ - name: enable evasive module, test https://github.com/ansible/ansible/issues/22635
+ apache2_module:
+ name: evasive
+ state: present
+
+ - name: disable evasive module
+ apache2_module:
+ name: evasive
+ state: absent
+
+ - name: use identifier to enable module, fix for https://github.com/ansible/ansible/issues/33669
+ apache2_module:
+ name: dump_io
+ state: present
+ ignore_errors: True
+ register: enable_dumpio_wrong
+
+ - name: disable dump_io
+ apache2_module:
+ name: dump_io
+ identifier: dumpio_module
+ state: absent
+
+ - name: use identifier to enable module, fix for https://github.com/ansible/ansible/issues/33669
+ apache2_module:
+ name: dump_io
+ identifier: dumpio_module
+ state: present
+ register: enable_dumpio_correct_1
+
+ - name: ensure idempotency with identifier
+ apache2_module:
+ name: dump_io
+ identifier: dumpio_module
+ state: present
+ register: enable_dumpio_correct_2
+
+ - name: disable dump_io
+ apache2_module:
+ name: dump_io
+ identifier: dumpio_module
+ state: absent
+
+ - assert:
+ that:
+ - enable_dumpio_wrong is failed
+ - enable_dumpio_correct_1 is changed
+ - enable_dumpio_correct_2 is not changed
+
+ - name: disable mpm modules
+ apache2_module:
+ name: "{{ item }}"
+ state: absent
+ ignore_configcheck: True
+ with_items:
+ - mpm_worker
+ - mpm_event
+ - mpm_prefork
+
+ - name: enabled mpm_event
+ apache2_module:
+ name: mpm_event
+ state: present
+ ignore_configcheck: True
+ register: enabledmpmevent
+
+ - name: ensure changed mpm_event
+ assert:
+ that:
+ - 'enabledmpmevent.changed'
+
+ - name: switch between mpm_event and mpm_worker
+ apache2_module:
+ name: "{{ item.name }}"
+ state: "{{ item.state }}"
+ ignore_configcheck: True
+ with_items:
+ - name: mpm_event
+ state: absent
+ - name: mpm_worker
+ state: present
+
+ - name: ensure mpm_worker is already enabled
+ apache2_module:
+ name: mpm_worker
+ state: present
+ register: enabledmpmworker
+
+ - name: ensure mpm_worker unchanged
+ assert:
+ that:
+ - 'not enabledmpmworker.changed'
+
+ - name: try to disable all mpm modules with configcheck
+ apache2_module:
+ name: "{{item}}"
+ state: absent
+ with_items:
+ - mpm_worker
+ - mpm_event
+ - mpm_prefork
+ ignore_errors: yes
+ register: remove_with_configcheck
+
+ - name: ensure configcheck fails task with when run without mpm modules
+ assert:
+ that:
+ - "{{ item.failed }}"
+ with_items: "{{ remove_with_configcheck.results }}"
+
+ - name: try to disable all mpm modules without configcheck
+ apache2_module:
+ name: "{{item}}"
+ state: absent
+ ignore_configcheck: True
+ with_items:
+ - mpm_worker
+ - mpm_event
+ - mpm_prefork
+
+ - name: enabled mpm_event to restore previous state
+ apache2_module:
+ name: mpm_event
+ state: present
+ ignore_configcheck: True
+ register: enabledmpmevent
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/apache2_module/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/apache2_module/tasks/main.yml
new file mode 100644
index 00000000..2ec30885
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/apache2_module/tasks/main.yml
@@ -0,0 +1,26 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+
+
+- name:
+ block:
+ - name: get list of enabled modules
+ shell: apache2ctl -M | sort
+ register: modules_before
+ - name: include only on supported systems
+ include: actualtest.yml
+ always:
+ - name: get list of enabled modules
+ shell: apache2ctl -M | sort
+ register: modules_after
+ - debug: var=modules_before
+ - debug: var=modules_after
+ - name: ensure that all test modules are disabled again
+ assert:
+ that: modules_before.stdout == modules_after.stdout
+ when: ansible_os_family in ['Debian', 'Suse']
+ # centos/RHEL does not have a2enmod/a2dismod
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/archive/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/archive/aliases
new file mode 100644
index 00000000..db9bbd8c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/archive/aliases
@@ -0,0 +1,4 @@
+needs/root
+shippable/posix/group2
+destructive
+skip/aix
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/archive/files/bar.txt b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/archive/files/bar.txt
new file mode 100644
index 00000000..5f34b0af
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/archive/files/bar.txt
@@ -0,0 +1 @@
+bar.txt \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/archive/files/empty.txt b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/archive/files/empty.txt
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/archive/files/empty.txt
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/archive/files/foo.txt b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/archive/files/foo.txt
new file mode 100644
index 00000000..7c6ded14
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/archive/files/foo.txt
@@ -0,0 +1 @@
+foo.txt
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/archive/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/archive/meta/main.yml
new file mode 100644
index 00000000..ca521ab1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/archive/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - setup_pkg_mgr
+ - prepare_tests
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/archive/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/archive/tasks/main.yml
new file mode 100644
index 00000000..1b114fd0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/archive/tasks/main.yml
@@ -0,0 +1,368 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Test code for the archive module.
+# (c) 2017, Abhijeet Kasurde <akasurde@redhat.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+# Make sure we start fresh
+
+- name: Ensure zip is present to create test archive (yum)
+ yum: name=zip state=latest
+ when: ansible_facts.pkg_mgr == 'yum'
+
+- name: Ensure zip is present to create test archive (apt)
+ apt: name=zip state=latest
+ when: ansible_facts.pkg_mgr == 'apt'
+
+- name: Install prerequisites for backports.lzma when using python2 (non OSX)
+ block:
+ - name: Set liblzma package name depending on the OS
+ set_fact:
+ liblzma_dev_package:
+ Debian: liblzma-dev
+ RedHat: xz-devel
+ Suse: xz-devel
+ - name: Ensure liblzma-dev is present to install backports-lzma
+ package: name={{ liblzma_dev_package[ansible_os_family] }} state=latest
+ when: ansible_os_family in liblzma_dev_package.keys()
+ when:
+ - ansible_python_version.split('.')[0] == '2'
+ - ansible_os_family != 'Darwin'
+
+- name: Install prerequisites for backports.lzma when using python2 (OSX)
+ block:
+ - name: Find brew binary
+ command: which brew
+ register: brew_which
+ - name: Get owner of brew binary
+ stat: path="{{ brew_which.stdout }}"
+ register: brew_stat
+ - name: "Install package"
+ homebrew:
+ name: xz
+ state: present
+ update_homebrew: no
+ become: yes
+ become_user: "{{ brew_stat.stat.pw_name }}"
+ # Newer versions of brew want to compile a package which takes a long time. Do not upgrade homebrew until a
+ # proper solution can be found
+ environment:
+ HOMEBREW_NO_AUTO_UPDATE: True
+ when:
+ - ansible_python_version.split('.')[0] == '2'
+ - ansible_os_family == 'Darwin'
+
+- name: Ensure backports.lzma is present to create test archive (pip)
+ pip: name=backports.lzma state=latest
+ when: ansible_python_version.split('.')[0] == '2'
+ register: backports_lzma_pip
+
+- name: prep our files
+ copy: src={{ item }} dest={{output_dir}}/{{ item }}
+ with_items:
+ - foo.txt
+ - bar.txt
+ - empty.txt
+
+- name: archive using gz
+ archive:
+ path: "{{ output_dir }}/*.txt"
+ dest: "{{ output_dir }}/archive_01.gz"
+ format: gz
+ register: archive_gz_result_01
+
+- debug: msg="{{ archive_gz_result_01 }}"
+
+- name: verify that the files archived
+ file: path={{output_dir}}/archive_01.gz state=file
+
+- name: check if gz file exists and includes all text files
+ assert:
+ that:
+ - "{{ archive_gz_result_01.changed }}"
+ - "{{ 'archived' in archive_gz_result_01 }}"
+ - "{{ archive_gz_result_01['archived'] | length }} == 3"
+
+- name: archive using zip
+ archive:
+ path: "{{ output_dir }}/*.txt"
+ dest: "{{ output_dir }}/archive_01.zip"
+ format: zip
+ register: archive_zip_result_01
+
+- debug: msg="{{ archive_zip_result_01 }}"
+
+- name: verify that the files archived
+ file: path={{output_dir}}/archive_01.zip state=file
+
+- name: check if zip file exists
+ assert:
+ that:
+ - "{{ archive_zip_result_01.changed }}"
+ - "{{ 'archived' in archive_zip_result_01 }}"
+ - "{{ archive_zip_result_01['archived'] | length }} == 3"
+
+- name: archive using bz2
+ archive:
+ path: "{{ output_dir }}/*.txt"
+ dest: "{{ output_dir }}/archive_01.bz2"
+ format: bz2
+ register: archive_bz2_result_01
+
+- debug: msg="{{ archive_bz2_result_01 }}"
+
+- name: verify that the files archived
+ file: path={{output_dir}}/archive_01.bz2 state=file
+
+- name: check if bzip file exists
+ assert:
+ that:
+ - "{{ archive_bz2_result_01.changed }}"
+ - "{{ 'archived' in archive_bz2_result_01 }}"
+ - "{{ archive_bz2_result_01['archived'] | length }} == 3"
+
+- name: archive using xz
+ archive:
+ path: "{{ output_dir }}/*.txt"
+ dest: "{{ output_dir }}/archive_01.xz"
+ format: xz
+ register: archive_xz_result_01
+
+- debug: msg="{{ archive_xz_result_01 }}"
+
+- name: verify that the files archived
+ file: path={{output_dir}}/archive_01.xz state=file
+
+- name: check if xz file exists
+ assert:
+ that:
+ - "{{ archive_xz_result_01.changed }}"
+ - "{{ 'archived' in archive_xz_result_01 }}"
+ - "{{ archive_xz_result_01['archived'] | length }} == 3"
+
+- name: archive and set mode to 0600
+ archive:
+ path: "{{ output_dir }}/*.txt"
+ dest: "{{ output_dir }}/archive_02.gz"
+ format: gz
+ mode: "u+rwX,g-rwx,o-rwx"
+ register: archive_bz2_result_02
+
+- name: Test that the file modes were changed
+ stat:
+ path: "{{ output_dir }}/archive_02.gz"
+ register: archive_02_gz_stat
+
+- debug: msg="{{ archive_02_gz_stat}}"
+
+- name: Test that the file modes were changed
+ assert:
+ that:
+ - "archive_02_gz_stat.changed == False "
+ - "archive_02_gz_stat.stat.mode == '0600'"
+ - "'archived' in archive_bz2_result_02"
+ - "{{ archive_bz2_result_02['archived']| length}} == 3"
+
+- name: remove our gz
+ file: path="{{ output_dir }}/archive_02.gz" state=absent
+
+
+- name: archive and set mode to 0600
+ archive:
+ path: "{{ output_dir }}/*.txt"
+ dest: "{{ output_dir }}/archive_02.zip"
+ format: zip
+ mode: "u+rwX,g-rwx,o-rwx"
+ register: archive_zip_result_02
+
+- name: Test that the file modes were changed
+ stat:
+ path: "{{ output_dir }}/archive_02.zip"
+ register: archive_02_zip_stat
+
+- name: Test that the file modes were changed
+ assert:
+ that:
+ - "archive_02_zip_stat.changed == False"
+ - "archive_02_zip_stat.stat.mode == '0600'"
+ - "'archived' in archive_zip_result_02"
+ - "{{ archive_zip_result_02['archived']| length}} == 3"
+
+- name: remove our zip
+ file: path="{{ output_dir }}/archive_02.zip" state=absent
+
+
+- name: archive and set mode to 0600
+ archive:
+ path: "{{ output_dir }}/*.txt"
+ dest: "{{ output_dir }}/archive_02.bz2"
+ format: bz2
+ mode: "u+rwX,g-rwx,o-rwx"
+ register: archive_bz2_result_02
+
+- name: Test that the file modes were changed
+ stat:
+ path: "{{ output_dir }}/archive_02.bz2"
+ register: archive_02_bz2_stat
+
+- name: Test that the file modes were changed
+ assert:
+ that:
+ - "archive_02_bz2_stat.changed == False"
+ - "archive_02_bz2_stat.stat.mode == '0600'"
+ - "'archived' in archive_bz2_result_02"
+ - "{{ archive_bz2_result_02['archived']| length}} == 3"
+
+- name: remove our bz2
+ file: path="{{ output_dir }}/archive_02.bz2" state=absent
+
+- name: archive and set mode to 0600
+ archive:
+ path: "{{ output_dir }}/*.txt"
+ dest: "{{ output_dir }}/archive_02.xz"
+ format: xz
+ mode: "u+rwX,g-rwx,o-rwx"
+ register: archive_xz_result_02
+
+- name: Test that the file modes were changed
+ stat:
+ path: "{{ output_dir }}/archive_02.xz"
+ register: archive_02_xz_stat
+
+- name: Test that the file modes were changed
+ assert:
+ that:
+ - "archive_02_xz_stat.changed == False"
+ - "archive_02_xz_stat.stat.mode == '0600'"
+ - "'archived' in archive_xz_result_02"
+ - "{{ archive_xz_result_02['archived']| length}} == 3"
+
+- name: remove our xz
+ file: path="{{ output_dir }}/archive_02.xz" state=absent
+
+- name: archive multiple files as list
+ archive:
+ path:
+ - "{{ output_dir }}/empty.txt"
+ - "{{ output_dir }}/foo.txt"
+ - "{{ output_dir }}/bar.txt"
+ dest: "{{ output_dir }}/archive_list.gz"
+ format: gz
+ register: archive_gz_list_result
+
+- name: verify that the files archived
+ file: path={{output_dir}}/archive_list.gz state=file
+
+- name: check if gz file exists and includes all text files
+ assert:
+ that:
+ - "{{ archive_gz_list_result.changed }}"
+ - "{{ 'archived' in archive_gz_list_result }}"
+ - "{{ archive_gz_list_result['archived'] | length }} == 3"
+
+- name: remove our gz
+ file: path="{{ output_dir }}/archive_list.gz" state=absent
+
+- name: test that gz archive that contains non-ascii filenames
+ archive:
+ path: "{{ output_dir }}/*.txt"
+ dest: "{{ output_dir }}/test-archive-nonascii-くらとみ.tar.gz"
+ format: gz
+ register: nonascii_result_0
+
+- name: Check that file is really there
+ stat:
+ path: "{{ output_dir }}/test-archive-nonascii-くらとみ.tar.gz"
+ register: nonascii_stat0
+
+- name: Assert that nonascii tests succeeded
+ assert:
+ that:
+ - "nonascii_result_0.changed == true"
+ - "nonascii_stat0.stat.exists == true"
+
+- name: remove nonascii test
+ file: path="{{ output_dir }}/test-archive-nonascii-くらとみ.tar.gz" state=absent
+
+- name: test that bz2 archive that contains non-ascii filenames
+ archive:
+ path: "{{ output_dir }}/*.txt"
+ dest: "{{ output_dir }}/test-archive-nonascii-くらとみ.bz2"
+ format: bz2
+ register: nonascii_result_1
+
+- name: Check that file is really there
+ stat:
+ path: "{{ output_dir }}/test-archive-nonascii-くらとみ.bz2"
+ register: nonascii_stat_1
+
+- name: Assert that nonascii tests succeeded
+ assert:
+ that:
+ - "nonascii_result_1.changed == true"
+ - "nonascii_stat_1.stat.exists == true"
+
+- name: remove nonascii test
+ file: path="{{ output_dir }}/test-archive-nonascii-くらとみ.bz2" state=absent
+
+- name: test that xz archive that contains non-ascii filenames
+ archive:
+ path: "{{ output_dir }}/*.txt"
+ dest: "{{ output_dir }}/test-archive-nonascii-くらとみ.xz"
+ format: xz
+ register: nonascii_result_1
+
+- name: Check that file is really there
+ stat:
+ path: "{{ output_dir }}/test-archive-nonascii-くらとみ.xz"
+ register: nonascii_stat_1
+
+- name: Assert that nonascii tests succeeded
+ assert:
+ that:
+ - "nonascii_result_1.changed == true"
+ - "nonascii_stat_1.stat.exists == true"
+
+- name: remove nonascii test
+ file: path="{{ output_dir }}/test-archive-nonascii-くらとみ.xz" state=absent
+
+- name: test that zip archive that contains non-ascii filenames
+ archive:
+ path: "{{ output_dir }}/*.txt"
+ dest: "{{ output_dir }}/test-archive-nonascii-くらとみ.zip"
+ format: zip
+ register: nonascii_result_2
+
+- name: Check that file is really there
+ stat:
+ path: "{{ output_dir }}/test-archive-nonascii-くらとみ.zip"
+ register: nonascii_stat_2
+
+- name: Assert that nonascii tests succeeded
+ assert:
+ that:
+ - "nonascii_result_2.changed == true"
+ - "nonascii_stat_2.stat.exists == true"
+
+- name: remove nonascii test
+ file: path="{{ output_dir }}/test-archive-nonascii-くらとみ.zip" state=absent
+
+- name: Remove backports.lzma if previously installed (pip)
+ pip: name=backports.lzma state=absent
+ when: backports_lzma_pip is changed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/callback/inventory.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/callback/inventory.yml
new file mode 100644
index 00000000..ada2e761
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/callback/inventory.yml
@@ -0,0 +1,5 @@
+---
+all:
+ hosts:
+ testhost:
+ ansible_connection: local
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/callback/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/callback/tasks/main.yml
new file mode 100644
index 00000000..1b178f93
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/callback/tasks/main.yml
@@ -0,0 +1,96 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- block:
+ - name: Create temporary playbook files
+ tempfile:
+ state: file
+ suffix: temp
+ loop: "{{ tests }}"
+ loop_control:
+ loop_var: test
+ label: "{{ test.name }}"
+ register: temporary_playbook_files
+
+ - name: Set temporary playbook file content
+ copy:
+ content: "{{ test.playbook }}"
+ dest: "{{ temporary_playbook_files.results[test_idx].path }}"
+ loop: "{{ tests }}"
+ loop_control:
+ loop_var: test
+ index_var: test_idx
+ label: "{{ test.name }}"
+
+ - name: Collect outputs
+ command: "ansible-playbook -i {{ inventory }} {{ playbook }}"
+ environment: "{{ test.environment }}"
+ loop: "{{ tests }}"
+ loop_control:
+ loop_var: test
+ label: "{{ test.name }}"
+ register: outputs
+ changed_when: false
+ vars:
+ inventory: "{{ role_path }}/inventory.yml"
+ playbook: "
+ {%- for result in temporary_playbook_files.results -%}
+ {%- if result.test.name == test.name -%}
+ {{- result.path -}}
+ {%- endif -%}
+ {%- endfor -%}"
+
+ - name: Assert test output equals expected output
+ assert:
+ that: result.output.differences | length == 0
+ loop: "{{ results }}"
+ loop_control:
+ loop_var: result
+ label: "{{ result.name }}"
+ register: assertions
+ vars:
+ results: >-
+ {%- set results = [] -%}
+ {%- for result in outputs.results -%}
+ {%- set differences = [] -%}
+ {%- for i in range([result.test.expected_output | count, result.stdout_lines | count] | max) -%}
+ {%- set line = "line_%s" | format(i+1) -%}
+ {%- set test_line = result.stdout_lines[i] | default(none) -%}
+ {%- set expected_lines = result.test.expected_output[i] | default(none) -%}
+ {%- if expected_lines is not string and expected_lines is not none -%}
+ {%- if test_line not in expected_lines -%}
+ {{- differences.append({
+ line: {
+ 'expected_one_of': expected_lines,
+ 'got': test_line }}) -}}
+ {%- endif -%}
+ {%- else -%}
+ {%- if expected_lines != test_line -%}
+ {{- differences.append({
+ line: {
+ 'expected': expected_lines,
+ 'got': test_line }}) -}}
+ {%- endif -%}
+ {%- endif -%}
+ {%- endfor -%}
+ {{- results.append({
+ 'name': result.test.name,
+ 'output': {
+ 'differences': differences,
+ 'expected': result.test.expected_output,
+ 'got': result.stdout_lines }}) -}}
+ {%- endfor -%}
+ {{- results -}}
+
+ always:
+ - name: Remove temporary playbooks
+ file:
+ path: "{{ temporary_file.path }}"
+ state: absent
+ loop: "{{ temporary_playbook_files.results }}"
+ loop_control:
+ loop_var: temporary_file
+ label: "{{ temporary_file.test.name }}: {{ temporary_file.path }}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/callback_diy/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/callback_diy/aliases
new file mode 100644
index 00000000..252d6ceb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/callback_diy/aliases
@@ -0,0 +1,2 @@
+shippable/posix/group3
+needs/target/callback
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/callback_diy/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/callback_diy/tasks/main.yml
new file mode 100644
index 00000000..d087e452
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/callback_diy/tasks/main.yml
@@ -0,0 +1,457 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Run tests
+ include_role:
+ name: callback
+ vars:
+ tests:
+ - name: Not using diy callback options
+ environment:
+ ANSIBLE_NOCOLOR: 'true'
+ ANSIBLE_FORCE_COLOR: 'false'
+ ANSIBLE_STDOUT_CALLBACK: community.general.diy
+ playbook: |
+ - hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: Sample task name
+ debug:
+ msg: sample debug msg
+ expected_output: [
+ "",
+ "PLAY [testhost] ****************************************************************",
+ "",
+ "TASK [Sample task name] ********************************************************",
+ "ok: [testhost] => {",
+ " \"msg\": \"sample debug msg\"",
+ "}",
+ "",
+ "PLAY RECAP *********************************************************************",
+ "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
+ ]
+
+ - name: Set playbook_on_start_msg callback using environment variable
+ environment:
+ ANSIBLE_NOCOLOR: 'true'
+ ANSIBLE_FORCE_COLOR: 'false'
+ ANSIBLE_STDOUT_CALLBACK: community.general.diy
+ ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_START_MSG: "Sample output Sample playbook message"
+ playbook: |
+ - hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: Sample task name
+ debug:
+ msg: sample debug msg
+ expected_output: [
+ "Sample output Sample playbook message",
+ "",
+ "PLAY [testhost] ****************************************************************",
+ "",
+ "TASK [Sample task name] ********************************************************",
+ "ok: [testhost] => {",
+ " \"msg\": \"sample debug msg\"",
+ "}",
+ "",
+ "PLAY RECAP *********************************************************************",
+ "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
+ ]
+
+ - name: Set playbook_on_play_start_msg callback using play variable
+ environment:
+ ANSIBLE_NOCOLOR: 'true'
+ ANSIBLE_FORCE_COLOR: 'false'
+ ANSIBLE_STDOUT_CALLBACK: community.general.diy
+ playbook: !unsafe |
+ - name: Sample play name
+ hosts: testhost
+ gather_facts: false
+ vars:
+ ansible_callback_diy_playbook_on_play_start_msg: Sample output {{ ansible_callback_diy.play.name }}
+ tasks:
+ - name: Sample task name
+ debug:
+ msg: sample debug msg
+ expected_output: [
+ "Sample output Sample play name",
+ "",
+ "TASK [Sample task name] ********************************************************",
+ "ok: [testhost] => {",
+ " \"msg\": \"sample debug msg\"",
+ "}",
+ "",
+ "PLAY RECAP *********************************************************************",
+ "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
+ ]
+
+ - name: Set playbook_on_task_start_msg callback using play variable
+ environment:
+ ANSIBLE_NOCOLOR: 'true'
+ ANSIBLE_FORCE_COLOR: 'false'
+ ANSIBLE_STDOUT_CALLBACK: community.general.diy
+ playbook: !unsafe |
+ - hosts: testhost
+ gather_facts: false
+ vars:
+ ansible_callback_diy_playbook_on_task_start_msg: Sample output {{ ansible_callback_diy.task.name }}
+ tasks:
+ - name: Sample task name
+ debug:
+ msg: sample debug msg
+ expected_output: [
+ "",
+ "PLAY [testhost] ****************************************************************",
+ "Sample output Sample task name",
+ "ok: [testhost] => {",
+ " \"msg\": \"sample debug msg\"",
+ "}",
+ "",
+ "PLAY RECAP *********************************************************************",
+ "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
+ ]
+
+ - name: Set playbook_on_task_start_msg callback using task variable
+ environment:
+ ANSIBLE_NOCOLOR: 'true'
+ ANSIBLE_FORCE_COLOR: 'false'
+ ANSIBLE_STDOUT_CALLBACK: community.general.diy
+ playbook: !unsafe |
+ - hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: Sample task name
+ debug:
+ msg: sample debug msg
+ vars:
+ ansible_callback_diy_playbook_on_task_start_msg: Sample output {{ ansible_callback_diy.task.name }}
+ expected_output: [
+ "",
+ "PLAY [testhost] ****************************************************************",
+ "Sample output Sample task name",
+ "ok: [testhost] => {",
+ " \"msg\": \"sample debug msg\"",
+ "}",
+ "",
+ "PLAY RECAP *********************************************************************",
+ "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
+ ]
+
+ - name: Set runner_on_ok_msg callback using task variable
+ environment:
+ ANSIBLE_NOCOLOR: 'true'
+ ANSIBLE_FORCE_COLOR: 'false'
+ ANSIBLE_STDOUT_CALLBACK: community.general.diy
+ playbook: !unsafe |
+ - hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: Sample task name
+ debug:
+ msg: sample debug msg
+ vars:
+ ansible_callback_diy_runner_on_ok_msg: Sample output {{ ansible_callback_diy.result.output.msg }}
+ expected_output: [
+ "",
+ "PLAY [testhost] ****************************************************************",
+ "",
+ "TASK [Sample task name] ********************************************************",
+ "Sample output sample debug msg",
+ "",
+ "PLAY RECAP *********************************************************************",
+ "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
+ ]
+
+ - name: Set runner_on_failed_msg callback using task variable
+ environment:
+ ANSIBLE_NOCOLOR: 'true'
+ ANSIBLE_FORCE_COLOR: 'false'
+ ANSIBLE_STDOUT_CALLBACK: community.general.diy
+ playbook: |
+ - hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: Sample task name
+ debug:
+ msg: sample debug msg
+ failed_when: true
+ ignore_errors: true
+ vars:
+ ansible_callback_diy_runner_on_failed_msg: Sample output Sample failure message
+ expected_output: [
+ "",
+ "PLAY [testhost] ****************************************************************",
+ "",
+ "TASK [Sample task name] ********************************************************",
+ "Sample output Sample failure message",
+ "",
+ "PLAY RECAP *********************************************************************",
+ "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=1 "
+ ]
+
+ - name: Set runner_on_skipped_msg callback using task variable
+ environment:
+ ANSIBLE_NOCOLOR: 'true'
+ ANSIBLE_FORCE_COLOR: 'false'
+ ANSIBLE_STDOUT_CALLBACK: community.general.diy
+ playbook: !unsafe |
+ - hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: Sample task name
+ debug:
+ msg: sample debug msg
+ when: false
+ vars:
+ ansible_callback_diy_runner_on_skipped_msg: Sample output Skipped {{ ansible_callback_diy.task.name }}
+ expected_output: [
+ "",
+ "PLAY [testhost] ****************************************************************",
+ "",
+ "TASK [Sample task name] ********************************************************",
+ "Sample output Skipped Sample task name",
+ "",
+ "PLAY RECAP *********************************************************************",
+ "testhost : ok=0 changed=0 unreachable=0 failed=0 skipped=1 rescued=0 ignored=0 "
+ ]
+
+ - name: Set runner_item_on_ok_msg callback using task variable
+ environment:
+ ANSIBLE_NOCOLOR: 'true'
+ ANSIBLE_FORCE_COLOR: 'false'
+ ANSIBLE_STDOUT_CALLBACK: community.general.diy
+ playbook: !unsafe |
+ - hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: Sample task name
+ debug:
+ msg: sample debug msg {{ item }}
+ loop:
+ - sample item 1
+ - sample item 2
+ - sample item 3
+ vars:
+ ansible_callback_diy_runner_item_on_ok_msg: Sample output Looping {{ ansible_callback_diy.result.output.msg }}
+ expected_output: [
+ "",
+ "PLAY [testhost] ****************************************************************",
+ "",
+ "TASK [Sample task name] ********************************************************",
+ "Sample output Looping sample debug msg sample item 1",
+ "Sample output Looping sample debug msg sample item 2",
+ "Sample output Looping sample debug msg sample item 3",
+ "",
+ "PLAY RECAP *********************************************************************",
+ "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
+ ]
+
+ - name: Set runner_item_on_failed_msg callback using task variable
+ environment:
+ ANSIBLE_NOCOLOR: 'true'
+ ANSIBLE_FORCE_COLOR: 'false'
+ ANSIBLE_STDOUT_CALLBACK: community.general.diy
+ playbook: !unsafe |
+ - hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: Sample task name
+ debug:
+ msg: sample debug msg {{ item }}
+ loop:
+ - sample item 1
+ - sample item 2
+ - sample item 3
+ failed_when: item == 'sample item 2'
+ ignore_errors: true
+ vars:
+ ansible_callback_diy_runner_item_on_failed_msg: Sample output Looping sample failure message
+ expected_output: [
+ "",
+ "PLAY [testhost] ****************************************************************",
+ "",
+ "TASK [Sample task name] ********************************************************",
+ "ok: [testhost] => (item=sample item 1) => {",
+ " \"msg\": \"sample debug msg sample item 1\"",
+ "}",
+ "Sample output Looping sample failure message",
+ "ok: [testhost] => (item=sample item 3) => {",
+ " \"msg\": \"sample debug msg sample item 3\"",
+ "}",
+ [
+ # Apparently a bug was fixed in Ansible, as before it ran through with "All items completed"
+ "fatal: [testhost]: FAILED! => {\"msg\": \"All items completed\"}",
+ "fatal: [testhost]: FAILED! => {\"msg\": \"One or more items failed\"}",
+ ],
+ "...ignoring",
+ "",
+ "PLAY RECAP *********************************************************************",
+ "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=1 "
+ ]
+
+ - name: Set runner_item_on_skipped_msg callback using task variable
+ environment:
+ ANSIBLE_NOCOLOR: 'true'
+ ANSIBLE_FORCE_COLOR: 'false'
+ ANSIBLE_STDOUT_CALLBACK: community.general.diy
+ playbook: !unsafe |
+ - hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: Sample task name
+ debug:
+ msg: sample debug msg {{ item }}
+ loop:
+ - sample item 1
+ - sample item 2
+ - sample item 3
+ when: item != 'sample item 2'
+ vars:
+ ansible_callback_diy_runner_item_on_skipped_msg: Sample output Looping Skipped {{ ansible_callback_diy.result.output.item }}
+ expected_output: [
+ "",
+ "PLAY [testhost] ****************************************************************",
+ "",
+ "TASK [Sample task name] ********************************************************",
+ "ok: [testhost] => (item=sample item 1) => {",
+ " \"msg\": \"sample debug msg sample item 1\"",
+ "}",
+ "Sample output Looping Skipped sample item 2",
+ "ok: [testhost] => (item=sample item 3) => {",
+ " \"msg\": \"sample debug msg sample item 3\"",
+ "}",
+ "",
+ "PLAY RECAP *********************************************************************",
+ "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
+ ]
+
+ - name: Set playbook_on_stats_msg callback using play variable
+ environment:
+ ANSIBLE_NOCOLOR: 'true'
+ ANSIBLE_FORCE_COLOR: 'false'
+ ANSIBLE_STDOUT_CALLBACK: community.general.diy
+ playbook: !unsafe |
+ - hosts: testhost
+ gather_facts: false
+ vars:
+ ansible_callback_diy_playbook_on_stats_msg: |+2
+ Sample output stats
+ ===============================
+ {% for key in ansible_callback_diy.stats | sort %}
+ {% set color_one = "" %}
+ {% set color_two = "" %}
+ {% if ansible_callback_diy.stats[key] %}
+ {% if key == 'ok' %}
+ {% set prefix = ' ' %}
+ {% set suffix = ' ' %}
+ {% elif key == 'changed' %}
+ {% set prefix = ' ' %}
+ {% set suffix = ' ' %}
+ {% elif key == 'processed' %}
+ {% set prefix = ' ' %}
+ {% set suffix = ' ' %}
+ {% elif key == 'skipped' %}
+ {% set prefix = ' ' %}
+ {% set suffix = ' ' %}
+ {% else %}
+ {% set prefix = "" %}
+ {% set suffix = "" %}
+ {% endif %}
+ {{ color_one }}{{ "%s%s%s" | format(prefix,key,suffix) }}{{ color_two }}: {{ ansible_callback_diy.stats[key] | to_nice_yaml }}
+ {% endif %}
+ {% endfor %}
+ tasks:
+ - name: Sample task name
+ debug:
+ msg: sample debug msg
+ expected_output: [
+ "",
+ "PLAY [testhost] ****************************************************************",
+ "",
+ "TASK [Sample task name] ********************************************************",
+ "ok: [testhost] => {",
+ " \"msg\": \"sample debug msg\"",
+ "}",
+ " Sample output stats",
+ "===============================",
+ " ok : testhost: 1",
+ "",
+ " processed : testhost: 1"
+ ]
+
+ - name: Suppress output on playbook_on_task_start_msg callback using task variable
+ environment:
+ ANSIBLE_NOCOLOR: 'true'
+ ANSIBLE_FORCE_COLOR: 'false'
+ ANSIBLE_STDOUT_CALLBACK: community.general.diy
+ playbook: |
+ - hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: Sample task name
+ debug:
+ msg: sample debug msg
+ vars:
+ ansible_callback_diy_playbook_on_task_start_msg: ''
+ expected_output: [
+ "",
+ "PLAY [testhost] ****************************************************************",
+ "ok: [testhost] => {",
+ " \"msg\": \"sample debug msg\"",
+ "}",
+ "",
+ "PLAY RECAP *********************************************************************",
+ "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
+ ]
+
+ - name: Suppress output on runner_on_ok_msg callback using task variable
+ environment:
+ ANSIBLE_NOCOLOR: 'true'
+ ANSIBLE_FORCE_COLOR: 'false'
+ ANSIBLE_STDOUT_CALLBACK: community.general.diy
+ playbook: |
+ - hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: Sample task name
+ debug:
+ msg: sample debug msg
+ vars:
+ ansible_callback_diy_runner_on_ok_msg: ''
+ expected_output: [
+ "",
+ "PLAY [testhost] ****************************************************************",
+ "",
+ "TASK [Sample task name] ********************************************************",
+ "",
+ "PLAY RECAP *********************************************************************",
+ "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
+ ]
+
+ - name: Set runner_on_ok_msg_color using task variable
+ environment:
+ ANSIBLE_NOCOLOR: 'true'
+ ANSIBLE_FORCE_COLOR: 'false'
+ ANSIBLE_STDOUT_CALLBACK: community.general.diy
+ playbook: !unsafe |
+ - hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: Sample task name
+ debug:
+ msg: sample debug msg
+ vars:
+ ansible_callback_diy_runner_on_ok_msg: Sample output {{ ansible_callback_diy.result.output.msg }}
+ ansible_callback_diy_runner_on_ok_msg_color: blue
+ expected_output: [
+ "",
+ "PLAY [testhost] ****************************************************************",
+ "",
+ "TASK [Sample task name] ********************************************************",
+ "Sample output sample debug msg",
+ "",
+ "PLAY RECAP *********************************************************************",
+ "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
+ ]
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/callback_log_plays/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/callback_log_plays/aliases
new file mode 100644
index 00000000..b5983214
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/callback_log_plays/aliases
@@ -0,0 +1 @@
+shippable/posix/group3
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/callback_log_plays/ping_log.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/callback_log_plays/ping_log.yml
new file mode 100644
index 00000000..8015726e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/callback_log_plays/ping_log.yml
@@ -0,0 +1,4 @@
+- hosts: localhost
+ gather_facts: false
+ tasks:
+ - ping:
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/callback_log_plays/runme.sh b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/callback_log_plays/runme.sh
new file mode 100755
index 00000000..af4a9746
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/callback_log_plays/runme.sh
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+
+set -eux
+
+# ANSIBLE_CALLBACK_WHITELIST has been deprecated in ansible-base 2.11, ANSIBLE_CALLBACKS_ENABLED should be used
+export ANSIBLE_CALLBACK_WHITELIST="community.general.log_plays,${ANSIBLE_CALLBACK_WHITELIST:-}"
+export ANSIBLE_CALLBACKS_ENABLED="community.general.log_plays,${ANSIBLE_CALLBACKS_ENABLED:-}"
+
+# run play, should create log and dir if needed
+export ANSIBLE_LOG_FOLDER="logit"
+ansible-playbook ping_log.yml -v "$@"
+[[ -f "${ANSIBLE_LOG_FOLDER}/localhost" ]]
+
+# now force it to fail
+export ANSIBLE_LOG_FOLDER="logit.file"
+touch "${ANSIBLE_LOG_FOLDER}"
+ansible-playbook ping_log.yml -v "$@" 2>&1| grep 'Failure using method (v2_runner_on_ok) in callback plugin'
+[[ ! -f "${ANSIBLE_LOG_FOLDER}/localhost" ]]
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/callback_yaml/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/callback_yaml/aliases
new file mode 100644
index 00000000..c6864963
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/callback_yaml/aliases
@@ -0,0 +1,2 @@
+shippable/posix/group1
+needs/target/callback
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/callback_yaml/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/callback_yaml/tasks/main.yml
new file mode 100644
index 00000000..21b43717
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/callback_yaml/tasks/main.yml
@@ -0,0 +1,60 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Run tests
+ include_role:
+ name: callback
+ vars:
+ tests:
+ - name: Basic run
+ environment:
+ ANSIBLE_NOCOLOR: 'true'
+ ANSIBLE_FORCE_COLOR: 'false'
+ ANSIBLE_STDOUT_CALLBACK: community.general.yaml
+ playbook: |
+ - hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: Sample task name
+ debug:
+ msg: sample debug msg
+ expected_output: [
+ "",
+ "PLAY [testhost] ****************************************************************",
+ "",
+ "TASK [Sample task name] ********************************************************",
+ "ok: [testhost] => ",
+ " msg: sample debug msg",
+ "",
+ "PLAY RECAP *********************************************************************",
+ "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
+ ]
+ - name: Test umlauts in multiline
+ environment:
+ ANSIBLE_NOCOLOR: 'true'
+ ANSIBLE_FORCE_COLOR: 'false'
+ ANSIBLE_STDOUT_CALLBACK: community.general.yaml
+ playbook: |
+ - hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: Umlaut output
+ debug:
+ msg: "äöü\néêè\nßï☺"
+ expected_output: [
+ "",
+ "PLAY [testhost] ****************************************************************",
+ "",
+ "TASK [Umlaut output] ***********************************************************",
+ "ok: [testhost] => ",
+ " msg: |-",
+ " äöü",
+ " éêè",
+ " ßï☺",
+ "",
+ "PLAY RECAP *********************************************************************",
+ "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
+ ]
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/cloud_init_data_facts/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/cloud_init_data_facts/aliases
new file mode 100644
index 00000000..1a7dd323
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/cloud_init_data_facts/aliases
@@ -0,0 +1,6 @@
+destructive
+shippable/posix/group1
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/cloud_init_data_facts/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/cloud_init_data_facts/meta/main.yml
new file mode 100644
index 00000000..5438ced5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/cloud_init_data_facts/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_pkg_mgr
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/cloud_init_data_facts/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/cloud_init_data_facts/tasks/main.yml
new file mode 100644
index 00000000..faa36656
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/cloud_init_data_facts/tasks/main.yml
@@ -0,0 +1,55 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: test cloud-init
+ # TODO: check for a workaround
+ # install 'cloud-init'' failed: dpkg-divert: error: `diversion of /etc/init/ureadahead.conf
+ # to /etc/init/ureadahead.conf.disabled by cloud-init' clashes with `local diversion of
+ # /etc/init/ureadahead.conf to /etc/init/ureadahead.conf.distrib
+ # https://bugs.launchpad.net/ubuntu/+source/ureadahead/+bug/997838
+ # Will also have to skip on OpenSUSE when running on Python 2 on newer Leap versions
+ # (!= 42 and >= 15) ascloud-init will install the Python 3 package, breaking our build on py2.
+ when:
+ - not (ansible_distribution == "Ubuntu" and ansible_distribution_major_version|int == 14)
+ - not (ansible_os_family == "Suse" and ansible_distribution_major_version|int != 42 and ansible_python.version.major != 3)
+ block:
+ - name: setup install cloud-init
+ package:
+ name:
+ - cloud-init
+ - udev
+
+ - name: setup run cloud-init
+ service:
+ name: cloud-init-local
+ state: restarted
+
+ - name: test gather cloud-init facts in check mode
+ cloud_init_data_facts:
+ check_mode: yes
+ register: result
+ - name: verify test gather cloud-init facts in check mode
+ assert:
+ that:
+ - result.cloud_init_data_facts.status.v1 is defined
+ - result.cloud_init_data_facts.status.v1.stage is defined
+ - not result.cloud_init_data_facts.status.v1.stage
+ - cloud_init_data_facts.status.v1 is defined
+ - cloud_init_data_facts.status.v1.stage is defined
+ - not cloud_init_data_facts.status.v1.stage
+
+ - name: test gather cloud-init facts
+ cloud_init_data_facts:
+ register: result
+ - name: verify test gather cloud-init facts
+ assert:
+ that:
+ - result.cloud_init_data_facts.status.v1 is defined
+ - result.cloud_init_data_facts.status.v1.stage is defined
+ - not result.cloud_init_data_facts.status.v1.stage
+ - cloud_init_data_facts.status.v1 is defined
+ - cloud_init_data_facts.status.v1.stage is defined
+ - not cloud_init_data_facts.status.v1.stage
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection/aliases
new file mode 100644
index 00000000..136c05e0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection/aliases
@@ -0,0 +1 @@
+hidden
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection/test.sh b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection/test.sh
new file mode 100755
index 00000000..4e7aa8dd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection/test.sh
@@ -0,0 +1,10 @@
+#!/usr/bin/env bash
+
+set -eux
+
+[ -f "${INVENTORY}" ]
+
+# Run connection tests with both the default and C locale.
+
+ ansible-playbook test_connection.yml -i "${INVENTORY}" "$@"
+LC_ALL=C LANG=C ansible-playbook test_connection.yml -i "${INVENTORY}" "$@"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection/test_connection.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection/test_connection.yml
new file mode 100644
index 00000000..a662e576
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection/test_connection.yml
@@ -0,0 +1,43 @@
+- hosts: "{{ target_hosts }}"
+ gather_facts: no
+ serial: 1
+ tasks:
+
+ ### raw with unicode arg and output
+
+ - name: raw with unicode arg and output
+ raw: echo 汉语
+ register: command
+ - name: check output of raw with unicode arg and output
+ assert:
+ that:
+ - "'汉语' in command.stdout"
+ - command is changed # as of 2.2, raw should default to changed: true for consistency w/ shell/command/script modules
+
+ ### copy local file with unicode filename and content
+
+ - name: create local file with unicode filename and content
+ local_action: lineinfile dest={{ local_tmp }}-汉语/汉语.txt create=true line=汉语
+ - name: remove remote file with unicode filename and content
+ action: "{{ action_prefix }}file path={{ remote_tmp }}-汉语/汉语.txt state=absent"
+ - name: create remote directory with unicode name
+ action: "{{ action_prefix }}file path={{ remote_tmp }}-汉语 state=directory"
+ - name: copy local file with unicode filename and content
+ action: "{{ action_prefix }}copy src={{ local_tmp }}-汉语/汉语.txt dest={{ remote_tmp }}-汉语/汉语.txt"
+
+ ### fetch remote file with unicode filename and content
+
+ - name: remove local file with unicode filename and content
+ local_action: file path={{ local_tmp }}-汉语/汉语.txt state=absent
+ - name: fetch remote file with unicode filename and content
+ fetch: src={{ remote_tmp }}-汉语/汉语.txt dest={{ local_tmp }}-汉语/汉语.txt fail_on_missing=true validate_checksum=true flat=true
+
+ ### remove local and remote temp files
+
+ - name: remove local temp file
+ local_action: file path={{ local_tmp }}-汉语 state=absent
+ - name: remove remote temp file
+ action: "{{ action_prefix }}file path={{ remote_tmp }}-汉语 state=absent"
+
+ ### test wait_for_connection plugin
+ - ansible.builtin.wait_for_connection:
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_chroot/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_chroot/aliases
new file mode 100644
index 00000000..c460dc37
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_chroot/aliases
@@ -0,0 +1,3 @@
+needs/root
+shippable/posix/group3
+skip/macos # Skipped due to limitation of macOS 10.15 SIP, please read https://github.com/ansible-collections/community.general/issues/1017#issuecomment-755088895
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_chroot/runme.sh b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_chroot/runme.sh
new file mode 100755
index 00000000..d3976ff3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_chroot/runme.sh
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+
+set -eux
+
+# Connection tests for POSIX platforms use this script by linking to it from the appropriate 'connection_' target dir.
+# The name of the inventory group to test is extracted from the directory name following the 'connection_' prefix.
+
+group=$(python -c \
+ "from os import path; print(path.basename(path.abspath(path.dirname('$0'))).replace('connection_', ''))")
+
+cd ../connection
+
+INVENTORY="../connection_${group}/test_connection.inventory" ./test.sh \
+ -e target_hosts="${group}" \
+ -e action_prefix= \
+ -e local_tmp=/tmp/ansible-local \
+ -e remote_tmp=/tmp/ansible-remote \
+ "$@"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_chroot/test_connection.inventory b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_chroot/test_connection.inventory
new file mode 100644
index 00000000..db13a110
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_chroot/test_connection.inventory
@@ -0,0 +1,7 @@
+[chroot]
+chroot-pipelining ansible_ssh_pipelining=true
+chroot-no-pipelining ansible_ssh_pipelining=false
+[chroot:vars]
+ansible_host=/
+ansible_connection=community.general.chroot
+ansible_python_interpreter="{{ ansible_playbook_python }}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_docker/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_docker/aliases
new file mode 100644
index 00000000..33b258da
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_docker/aliases
@@ -0,0 +1,2 @@
+non_local
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_docker/runme.sh b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_docker/runme.sh
new file mode 100755
index 00000000..d3976ff3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_docker/runme.sh
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+
+set -eux
+
+# Connection tests for POSIX platforms use this script by linking to it from the appropriate 'connection_' target dir.
+# The name of the inventory group to test is extracted from the directory name following the 'connection_' prefix.
+
+group=$(python -c \
+ "from os import path; print(path.basename(path.abspath(path.dirname('$0'))).replace('connection_', ''))")
+
+cd ../connection
+
+INVENTORY="../connection_${group}/test_connection.inventory" ./test.sh \
+ -e target_hosts="${group}" \
+ -e action_prefix= \
+ -e local_tmp=/tmp/ansible-local \
+ -e remote_tmp=/tmp/ansible-remote \
+ "$@"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_docker/test_connection.inventory b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_docker/test_connection.inventory
new file mode 100644
index 00000000..7cef1cbd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_docker/test_connection.inventory
@@ -0,0 +1,6 @@
+[docker]
+docker-pipelining ansible_ssh_pipelining=true
+docker-no-pipelining ansible_ssh_pipelining=false
+[docker:vars]
+ansible_host=ubuntu-latest
+ansible_connection=community.general.docker
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_jail/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_jail/aliases
new file mode 100644
index 00000000..ad7ccf7a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_jail/aliases
@@ -0,0 +1 @@
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_jail/runme.sh b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_jail/runme.sh
new file mode 100755
index 00000000..d3976ff3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_jail/runme.sh
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+
+set -eux
+
+# Connection tests for POSIX platforms use this script by linking to it from the appropriate 'connection_' target dir.
+# The name of the inventory group to test is extracted from the directory name following the 'connection_' prefix.
+
+group=$(python -c \
+ "from os import path; print(path.basename(path.abspath(path.dirname('$0'))).replace('connection_', ''))")
+
+cd ../connection
+
+INVENTORY="../connection_${group}/test_connection.inventory" ./test.sh \
+ -e target_hosts="${group}" \
+ -e action_prefix= \
+ -e local_tmp=/tmp/ansible-local \
+ -e remote_tmp=/tmp/ansible-remote \
+ "$@"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_jail/test_connection.inventory b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_jail/test_connection.inventory
new file mode 100644
index 00000000..466f7776
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_jail/test_connection.inventory
@@ -0,0 +1,7 @@
+[jail]
+jail-pipelining ansible_ssh_pipelining=true
+jail-no-pipelining ansible_ssh_pipelining=false
+[jail:vars]
+ansible_host=freebsd_10_2
+ansible_connection=community.general.jail
+ansible_python_interpreter=/usr/local/bin/python
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_lxc/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_lxc/aliases
new file mode 100644
index 00000000..ad7ccf7a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_lxc/aliases
@@ -0,0 +1 @@
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_lxc/runme.sh b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_lxc/runme.sh
new file mode 100755
index 00000000..d3976ff3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_lxc/runme.sh
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+
+set -eux
+
+# Connection tests for POSIX platforms use this script by linking to it from the appropriate 'connection_' target dir.
+# The name of the inventory group to test is extracted from the directory name following the 'connection_' prefix.
+
+group=$(python -c \
+ "from os import path; print(path.basename(path.abspath(path.dirname('$0'))).replace('connection_', ''))")
+
+cd ../connection
+
+INVENTORY="../connection_${group}/test_connection.inventory" ./test.sh \
+ -e target_hosts="${group}" \
+ -e action_prefix= \
+ -e local_tmp=/tmp/ansible-local \
+ -e remote_tmp=/tmp/ansible-remote \
+ "$@"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_lxc/test_connection.inventory b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_lxc/test_connection.inventory
new file mode 100644
index 00000000..5e3e3c3f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_lxc/test_connection.inventory
@@ -0,0 +1,17 @@
+[lxc]
+lxc-pipelining ansible_ssh_pipelining=true
+lxc-no-pipelining ansible_ssh_pipelining=false
+[lxc:vars]
+# 1. install lxc
+# 2. install python2-lxc
+# $ pip install git+https://github.com/lxc/python2-lxc.git
+# 3. create container:
+# $ sudo lxc-create -t download -n centos-7-amd64 -- -d centos -r 7 -a amd64
+# 4. start container:
+# $ sudo lxc-start -n centos-7-amd64 -d
+# 5. run test:
+# $ sudo -E make test_connection_lxc
+# 6. stop container
+# $ sudo lxc-stop -n centos-7-amd64
+ansible_host=centos-7-amd64
+ansible_connection=community.general.lxc
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_lxd/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_lxd/aliases
new file mode 100644
index 00000000..33b258da
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_lxd/aliases
@@ -0,0 +1,2 @@
+non_local
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_lxd/runme.sh b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_lxd/runme.sh
new file mode 100755
index 00000000..d3976ff3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_lxd/runme.sh
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+
+set -eux
+
+# Connection tests for POSIX platforms use this script by linking to it from the appropriate 'connection_' target dir.
+# The name of the inventory group to test is extracted from the directory name following the 'connection_' prefix.
+
+group=$(python -c \
+ "from os import path; print(path.basename(path.abspath(path.dirname('$0'))).replace('connection_', ''))")
+
+cd ../connection
+
+INVENTORY="../connection_${group}/test_connection.inventory" ./test.sh \
+ -e target_hosts="${group}" \
+ -e action_prefix= \
+ -e local_tmp=/tmp/ansible-local \
+ -e remote_tmp=/tmp/ansible-remote \
+ "$@"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_lxd/test_connection.inventory b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_lxd/test_connection.inventory
new file mode 100644
index 00000000..73ab0656
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_lxd/test_connection.inventory
@@ -0,0 +1,6 @@
+[lxd]
+lxd-pipelining ansible_ssh_pipelining=true
+lxd-no-pipelining ansible_ssh_pipelining=false
+[lxd:vars]
+ansible_host=centos-7-amd64
+ansible_connection=community.general.lxd
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_posix/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_posix/aliases
new file mode 100644
index 00000000..f5e09799
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_posix/aliases
@@ -0,0 +1,2 @@
+needs/target/connection
+hidden
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_posix/test.sh b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_posix/test.sh
new file mode 100755
index 00000000..d3976ff3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/connection_posix/test.sh
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+
+set -eux
+
+# Connection tests for POSIX platforms use this script by linking to it from the appropriate 'connection_' target dir.
+# The name of the inventory group to test is extracted from the directory name following the 'connection_' prefix.
+
+group=$(python -c \
+ "from os import path; print(path.basename(path.abspath(path.dirname('$0'))).replace('connection_', ''))")
+
+cd ../connection
+
+INVENTORY="../connection_${group}/test_connection.inventory" ./test.sh \
+ -e target_hosts="${group}" \
+ -e action_prefix= \
+ -e local_tmp=/tmp/ansible-local \
+ -e remote_tmp=/tmp/ansible-remote \
+ "$@"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/consul/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/consul/aliases
new file mode 100644
index 00000000..cc24e932
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/consul/aliases
@@ -0,0 +1,4 @@
+shippable/posix/group2
+destructive
+skip/aix
+skip/macos # cannot simply create binaries in system locations on newer macOS versions
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/consul/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/consul/meta/main.yml
new file mode 100644
index 00000000..f4c99a2a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/consul/meta/main.yml
@@ -0,0 +1,4 @@
+---
+dependencies:
+ - setup_pkg_mgr
+ - setup_openssl
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/consul/tasks/consul_session.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/consul/tasks/consul_session.yml
new file mode 100644
index 00000000..a5490ec6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/consul/tasks/consul_session.yml
@@ -0,0 +1,162 @@
+- name: list sessions
+ consul_session:
+ state: list
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - "'sessions' in result"
+
+- name: create a session
+ consul_session:
+ state: present
+ name: testsession
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result['name'] == 'testsession'
+ - "'session_id' in result"
+
+- set_fact:
+ session_id: "{{ result['session_id'] }}"
+
+- name: list sessions after creation
+ consul_session:
+ state: list
+ register: result
+
+- set_fact:
+ session_count: "{{ result['sessions'] | length }}"
+
+- assert:
+ that:
+ - result is changed
+ # selectattr not available on Jinja 2.2 provided by CentOS 6
+ # hence the two following tasks (set_fact/assert) are used
+ # - (result['sessions'] | selectattr('ID', 'match', '^' ~ session_id ~ '$') | first)['Name'] == 'testsession'
+
+- name: search created session
+ set_fact:
+ test_session_found: True
+ loop: "{{ result['sessions'] }}"
+ when: "item.get('ID') == session_id and item.get('Name') == 'testsession'"
+
+- name: ensure session was created
+ assert:
+ that:
+ - test_session_found|default(False)
+
+- name: fetch info about a session
+ consul_session:
+ state: info
+ id: '{{ session_id }}'
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: ensure 'id' parameter is required when state=info
+ consul_session:
+ state: info
+ name: test
+ register: result
+ ignore_errors: True
+
+- assert:
+ that:
+ - result is failed
+
+- name: ensure unknown scheme fails
+ consul_session:
+ state: info
+ id: '{{ session_id }}'
+ scheme: non_existent
+ register: result
+ ignore_errors: True
+
+- assert:
+ that:
+ - result is failed
+
+- when: pyopenssl_version.stdout is version('0.15', '>=')
+ block:
+ - name: ensure SSL certificate is checked
+ consul_session:
+ state: info
+ id: '{{ session_id }}'
+ port: 8501
+ scheme: https
+ register: result
+ ignore_errors: True
+
+ - name: previous task should fail since certificate is not known
+ assert:
+ that:
+ - result is failed
+ - "'certificate verify failed' in result.msg"
+
+ - name: ensure SSL certificate isn't checked when validate_certs is disabled
+ consul_session:
+ state: info
+ id: '{{ session_id }}'
+ port: 8501
+ scheme: https
+ validate_certs: False
+ register: result
+
+ - name: previous task should succeed since certificate isn't checked
+ assert:
+ that:
+ - result is changed
+
+ - name: ensure a secure connection is possible
+ consul_session:
+ state: info
+ id: '{{ session_id }}'
+ port: 8501
+ scheme: https
+ environment:
+ REQUESTS_CA_BUNDLE: '{{ remote_dir }}/cert.pem'
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+
+- name: delete a session
+ consul_session:
+ state: absent
+ id: '{{ session_id }}'
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: list sessions after deletion
+ consul_session:
+ state: list
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ # selectattr and equalto not available on Jinja 2.2 provided by CentOS 6
+ # hence the two following tasks (command/assert) are used
+ # - (result['sessions'] | selectattr('ID', 'equalto', session_id) | list | length) == 0
+
+- name: search deleted session
+ command: echo 'session found'
+ loop: "{{ result['sessions'] }}"
+ when: "item.get('ID') == session_id and item.get('Name') == 'testsession'"
+ register: search_deleted
+
+- name: ensure session was deleted
+ assert:
+ that:
+ - search_deleted is skipped # each iteration is skipped
+ - search_deleted is not changed # and then unchanged
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/consul/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/consul/tasks/main.yml
new file mode 100644
index 00000000..4de2d332
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/consul/tasks/main.yml
@@ -0,0 +1,87 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Install Consul and test
+ vars:
+ consul_version: 1.5.0
+ consul_uri: https://s3.amazonaws.com/ansible-ci-files/test/integration/targets/consul/consul_{{ consul_version }}_{{ ansible_system | lower }}_{{ consul_arch }}.zip
+ consul_cmd: '{{ output_dir }}/consul'
+ block:
+ - name: register pyOpenSSL version
+ command: '{{ ansible_python_interpreter }} -c ''import OpenSSL; print(OpenSSL.__version__)'''
+ register: pyopenssl_version
+ - name: Install requests<2.20 (CentOS/RHEL 6)
+ pip:
+ name: requests<2.20
+ register: result
+ until: result is success
+ when: ansible_distribution_file_variety|default() == 'RedHat' and ansible_distribution_major_version is version('6', '<=')
+ - name: Install python-consul
+ pip:
+ name: python-consul
+ register: result
+ until: result is success
+ - when: pyopenssl_version.stdout is version('0.15', '>=')
+ block:
+ - name: Generate privatekey
+ community.crypto.openssl_privatekey:
+ path: '{{ output_dir }}/privatekey.pem'
+ - name: Generate CSR
+ community.crypto.openssl_csr:
+ path: '{{ output_dir }}/csr.csr'
+ privatekey_path: '{{ output_dir }}/privatekey.pem'
+ subject:
+ commonName: localhost
+ - name: Generate selfsigned certificate
+ register: selfsigned_certificate
+ community.crypto.openssl_certificate:
+ path: '{{ output_dir }}/cert.pem'
+ csr_path: '{{ output_dir }}/csr.csr'
+ privatekey_path: '{{ output_dir }}/privatekey.pem'
+ provider: selfsigned
+ selfsigned_digest: sha256
+ - name: Install unzip
+ package:
+ name: unzip
+ register: result
+ until: result is success
+ when: ansible_distribution != "MacOSX"
+ - assert:
+ that: ansible_architecture in ['i386', 'x86_64', 'amd64']
+ - set_fact:
+ consul_arch: '386'
+ when: ansible_architecture == 'i386'
+ - set_fact:
+ consul_arch: amd64
+ when: ansible_architecture in ['x86_64', 'amd64']
+ - name: Download consul binary
+ unarchive:
+ src: '{{ consul_uri }}'
+ dest: '{{ output_dir }}'
+ remote_src: true
+ register: result
+ until: result is success
+ - vars:
+ remote_dir: '{{ echo_output_dir.stdout }}'
+ block:
+ - command: echo {{ output_dir }}
+ register: echo_output_dir
+ - name: Create configuration file
+ template:
+ src: consul_config.hcl.j2
+ dest: '{{ output_dir }}/consul_config.hcl'
+ - name: Start Consul (dev mode enabled)
+ shell: nohup {{ consul_cmd }} agent -dev -config-file {{ output_dir }}/consul_config.hcl </dev/null >/dev/null 2>&1 &
+ - name: Create some data
+ command: '{{ consul_cmd }} kv put data/value{{ item }} foo{{ item }}'
+ loop:
+ - 1
+ - 2
+ - 3
+ - import_tasks: consul_session.yml
+ always:
+ - name: Kill consul process
+ shell: kill $(cat {{ output_dir }}/consul.pid)
+ ignore_errors: true
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/consul/templates/consul_config.hcl.j2 b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/consul/templates/consul_config.hcl.j2
new file mode 100644
index 00000000..9af06f02
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/consul/templates/consul_config.hcl.j2
@@ -0,0 +1,13 @@
+# {{ ansible_managed }}
+server = true
+pid_file = "{{ remote_dir }}/consul.pid"
+ports {
+ http = 8500
+ {% if pyopenssl_version.stdout is version('0.15', '>=') %}
+ https = 8501
+ {% endif %}
+}
+{% if pyopenssl_version.stdout is version('0.15', '>=') %}
+key_file = "{{ remote_dir }}/privatekey.pem"
+cert_file = "{{ remote_dir }}/cert.pem"
+{% endif %}
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/cronvar/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/cronvar/aliases
new file mode 100644
index 00000000..b2033afd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/cronvar/aliases
@@ -0,0 +1,5 @@
+destructive
+shippable/posix/group4
+skip/aix
+skip/osx
+skip/macos
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/cronvar/defaults/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/cronvar/defaults/main.yml
new file mode 100644
index 00000000..a22230ab
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/cronvar/defaults/main.yml
@@ -0,0 +1 @@
+cron_config_path: /etc/cron.d
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/cronvar/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/cronvar/meta/main.yml
new file mode 100644
index 00000000..2d2436a1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/cronvar/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_cron
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/cronvar/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/cronvar/tasks/main.yml
new file mode 100644
index 00000000..4e95258e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/cronvar/tasks/main.yml
@@ -0,0 +1,114 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Create EMAIL cron var
+ cronvar:
+ name: EMAIL
+ value: doug@ansibmod.con.com
+ register: create_cronvar1
+
+- name: Create EMAIL cron var again
+ cronvar:
+ name: EMAIL
+ value: doug@ansibmod.con.com
+ register: create_cronvar2
+
+- name: Check cron var value
+ shell: crontab -l -u root | grep -c EMAIL=doug@ansibmod.con.com
+ register: varcheck1
+
+- name: Modify EMAIL cron var
+ cronvar:
+ name: EMAIL
+ value: jane@ansibmod.con.com
+ register: create_cronvar3
+
+- name: Check cron var value again
+ shell: crontab -l -u root | grep -c EMAIL=jane@ansibmod.con.com
+ register: varcheck2
+
+- name: Remove EMAIL cron var
+ cronvar:
+ name: EMAIL
+ state: absent
+ register: remove_cronvar1
+
+- name: Remove EMAIL cron var again
+ cronvar:
+ name: EMAIL
+ state: absent
+ register: remove_cronvar2
+
+- name: Check cron var value again
+ shell: crontab -l -u root | grep -c EMAIL
+ register: varcheck3
+ failed_when: varcheck3.rc == 0
+
+- name: Add cron var to custom file
+ cronvar:
+ name: TESTVAR
+ value: somevalue
+ cron_file: cronvar_test
+ register: custom_cronfile1
+
+- name: Add cron var to custom file again
+ cronvar:
+ name: TESTVAR
+ value: somevalue
+ cron_file: cronvar_test
+ register: custom_cronfile2
+
+- name: Check cron var value in custom file
+ command: grep -c TESTVAR=somevalue {{ cron_config_path }}/cronvar_test
+ register: custom_varcheck1
+
+- name: Change cron var in custom file
+ cronvar:
+ name: TESTVAR
+ value: newvalue
+ cron_file: cronvar_test
+ register: custom_cronfile3
+
+- name: Check cron var value in custom file
+ command: grep -c TESTVAR=newvalue {{ cron_config_path }}/cronvar_test
+ register: custom_varcheck2
+
+- name: Remove cron var from custom file
+ cronvar:
+ name: TESTVAR
+ value: newvalue
+ cron_file: cronvar_test
+ state: absent
+ register: custom_remove_cronvar1
+
+- name: Remove cron var from custom file again
+ cronvar:
+ name: TESTVAR
+ value: newvalue
+ cron_file: cronvar_test
+ state: absent
+ register: custom_remove_cronvar2
+
+- name: Check cron var value
+ command: grep -c TESTVAR=newvalue {{ cron_config_path }}/cronvar_test
+ register: custom_varcheck3
+ failed_when: custom_varcheck3.rc == 0
+
+- name: Esure cronvar tasks did the right thing
+ assert:
+ that:
+ - create_cronvar1 is changed
+ - create_cronvar2 is not changed
+ - create_cronvar3 is changed
+ - remove_cronvar1 is changed
+ - remove_cronvar2 is not changed
+ - varcheck1.stdout == '1'
+ - varcheck2.stdout == '1'
+ - varcheck3.stdout == '0'
+ - custom_remove_cronvar1 is changed
+ - custom_remove_cronvar2 is not changed
+ - custom_varcheck1.stdout == '1'
+ - custom_varcheck2.stdout == '1'
+ - custom_varcheck3.stdout == '0'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/deploy_helper/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/deploy_helper/aliases
new file mode 100644
index 00000000..a6dafcf8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/deploy_helper/aliases
@@ -0,0 +1 @@
+shippable/posix/group1
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/deploy_helper/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/deploy_helper/meta/main.yml
new file mode 100644
index 00000000..07faa217
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/deploy_helper/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_tests
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/deploy_helper/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/deploy_helper/tasks/main.yml
new file mode 100644
index 00000000..a61ab2a0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/deploy_helper/tasks/main.yml
@@ -0,0 +1,154 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: record the output directory
+ set_fact: deploy_helper_test_root={{output_dir}}/deploy_helper_test_root
+
+- name: State=query with default parameters
+ deploy_helper: path={{ deploy_helper_test_root }} state=query
+- name: Assert State=query with default parameters
+ assert:
+ that:
+ - "'project_path' in deploy_helper"
+ - "deploy_helper.current_path == '{{ deploy_helper.project_path }}/current'"
+ - "deploy_helper.releases_path == '{{ deploy_helper.project_path }}/releases'"
+ - "deploy_helper.shared_path == '{{ deploy_helper.project_path }}/shared'"
+ - "deploy_helper.unfinished_filename == 'DEPLOY_UNFINISHED'"
+ - "'previous_release' in deploy_helper"
+ - "'previous_release_path' in deploy_helper"
+ - "'new_release' in deploy_helper"
+ - "'new_release_path' in deploy_helper"
+ - "deploy_helper.new_release_path == '{{ deploy_helper.releases_path }}/{{ deploy_helper.new_release }}'"
+
+- name: State=query with relative overridden paths
+ deploy_helper: path={{ deploy_helper_test_root }} current_path=CURRENT_PATH releases_path=RELEASES_PATH shared_path=SHARED_PATH state=query
+- name: Assert State=query with relative overridden paths
+ assert:
+ that:
+ - "deploy_helper.current_path == '{{ deploy_helper.project_path }}/CURRENT_PATH'"
+ - "deploy_helper.releases_path == '{{ deploy_helper.project_path }}/RELEASES_PATH'"
+ - "deploy_helper.shared_path == '{{ deploy_helper.project_path }}/SHARED_PATH'"
+ - "deploy_helper.new_release_path == '{{ deploy_helper.releases_path }}/{{ deploy_helper.new_release}}'"
+
+- name: State=query with absolute overridden paths
+ deploy_helper: path={{ deploy_helper_test_root }} current_path=/CURRENT_PATH releases_path=/RELEASES_PATH shared_path=/SHARED_PATH state=query
+- name: Assert State=query with absolute overridden paths
+ assert:
+ that:
+ - "deploy_helper.current_path == '/CURRENT_PATH'"
+ - "deploy_helper.releases_path == '/RELEASES_PATH'"
+ - "deploy_helper.shared_path == '/SHARED_PATH'"
+ - "deploy_helper.new_release_path == '{{ deploy_helper.releases_path }}/{{ deploy_helper.new_release}}'"
+
+- name: State=query with overridden unfinished_filename
+ deploy_helper: path={{ deploy_helper_test_root }} unfinished_filename=UNFINISHED_DEPLOY state=query
+- name: Assert State=query with overridden unfinished_filename
+ assert:
+ that:
+ - "'UNFINISHED_DEPLOY' == deploy_helper.unfinished_filename"
+
+# Remove the root folder just in case it exists
+- file: path={{ deploy_helper_test_root }} state=absent
+
+- name: State=present with default parameters
+ deploy_helper: path={{ deploy_helper_test_root }} state=present
+- stat: path={{ deploy_helper.releases_path }}
+ register: releases_path
+- stat: path={{ deploy_helper.shared_path }}
+ register: shared_path
+- name: Assert State=present with default parameters
+ assert:
+ that:
+ - "releases_path.stat.exists"
+ - "shared_path.stat.exists"
+
+# Setup older releases for tests
+- file: path={{ deploy_helper.releases_path }}/{{ item }} state=directory
+ with_items: ['first', 'second', 'third', 'fourth', 'fifth', 'sixth', 'seventh']
+# Setup the new release
+- file: path={{ deploy_helper.new_release_path }} state=directory
+# Add a buildfile, just like in a real deploy
+- copy: content='' dest={{ deploy_helper.new_release_path }}/{{ deploy_helper.unfinished_filename }}
+# Add a buildfile, to an older deploy
+- copy: content='' dest={{ deploy_helper.releases_path }}/third/{{ deploy_helper.unfinished_filename }}
+
+- name: State=finalize with default parameters
+ deploy_helper: path={{ deploy_helper_test_root }} release={{ deploy_helper.new_release }} state=finalize
+- stat: path={{ deploy_helper.current_path }}
+ register: current_path
+- stat: path={{ deploy_helper.current_path }}/DEPLOY_UNFINISHED
+ register: current_path_unfinished_filename
+- name: Assert State=finalize with default parameters
+ assert:
+ that:
+ - "current_path.stat.islnk"
+ - "deploy_helper.new_release_path in current_path.stat.lnk_source"
+ - "not current_path_unfinished_filename.stat.exists"
+- stat: path={{ deploy_helper.releases_path }}/third
+ register: third_release_path
+- shell: "ls {{ deploy_helper.releases_path }} | wc -l"
+ register: releases_count
+- name: Assert State=finalize with default parameters (clean=true checks)
+ assert:
+ that:
+ - "not third_release_path.stat.exists"
+ - "releases_count.stdout|trim == '6'"
+- deploy_helper: path={{ deploy_helper_test_root }} release={{ deploy_helper.new_release }} state=query
+- name: Assert State=finalize with default parameters (previous_release checks)
+ assert:
+ that:
+ - "deploy_helper.new_release == deploy_helper.previous_release"
+
+- name: State=absent with default parameters
+ deploy_helper: path={{ deploy_helper_test_root }} state=absent
+- stat: path={{ deploy_helper_test_root }}
+ register: project_path
+- name: Assert State=absent with default parameters
+ assert:
+ that:
+ - "not project_path.stat.exists"
+
+- debug: msg="Clearing all release data and facts ---------"
+
+- name: State=present with shared_path set to False
+ deploy_helper: path={{ deploy_helper_test_root }} state=present shared_path=''
+- stat: path={{ deploy_helper.releases_path }}
+ register: releases_path
+- stat: path={{ deploy_helper.shared_path }}
+ register: shared_path
+- name: Assert State=present with shared_path set to False
+ assert:
+ that:
+ - "releases_path.stat.exists"
+ - "not shared_path.stat.exists"
+
+# Setup older releases for tests
+- file: path={{ deploy_helper.releases_path }}/{{ item }} state=directory
+ with_items: ['first', 'second', 'third', 'fourth', 'fifth']
+# Setup the new release
+- file: path={{ deploy_helper.new_release_path }} state=directory
+# Add a buildfile, just like in a real deploy
+- copy: content='' dest={{ deploy_helper.new_release_path }}/{{ deploy_helper.unfinished_filename }}
+# Add a buildfile, to an older deploy
+- copy: content='' dest={{ deploy_helper.releases_path }}/third/{{ deploy_helper.unfinished_filename }}
+
+- shell: "ls {{ deploy_helper_test_root }}/releases | wc -l"
+ register: before_releases_count
+- name: State=clean with keep_releases=3
+ deploy_helper: path={{ deploy_helper_test_root }} release={{ deploy_helper.new_release }} state=clean keep_releases=3
+- stat: path={{ deploy_helper.releases_path }}/third
+ register: third_release_path
+- shell: "ls {{ deploy_helper.releases_path }} | wc -l"
+ register: releases_count
+- name: Assert State=finalize with default parameters (clean=true checks)
+ assert:
+ that:
+ - "not third_release_path.stat.exists"
+ - "before_releases_count.stdout|trim == '6'"
+ - "releases_count.stdout|trim == '3'"
+
+# Remove the root folder
+- file: path={{ deploy_helper_test_root }} state=absent
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_config/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_config/aliases
new file mode 100644
index 00000000..fadc88e2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_config/aliases
@@ -0,0 +1,9 @@
+shippable/posix/group3
+skip/osx
+skip/macos
+skip/freebsd
+skip/aix
+destructive
+skip/docker # The tests sometimes make docker daemon unstable; hence,
+ # we skip all docker-based CI runs to avoid disrupting
+ # the whole CI system.
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_config/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_config/meta/main.yml
new file mode 100644
index 00000000..07da8c6d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_config/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - setup_docker
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_config/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_config/tasks/main.yml
new file mode 100644
index 00000000..ea3fbde5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_config/tasks/main.yml
@@ -0,0 +1,11 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- include_tasks: test_docker_config.yml
+ when: docker_py_version is version('2.6.0', '>=') and docker_api_version is version('1.30', '>=')
+
+- fail: msg="Too old docker / docker-py version to run docker_config tests!"
+ when: not(docker_py_version is version('2.6.0', '>=') and docker_api_version is version('1.30', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_config/tasks/test_docker_config.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_config/tasks/test_docker_config.yml
new file mode 100644
index 00000000..8220e8f5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_config/tasks/test_docker_config.yml
@@ -0,0 +1,139 @@
+---
+- block:
+ - shell: "docker info --format '{% raw %}{{json .}}{% endraw %}' | python -m json.tool"
+
+ - name: Make sure we're not already using Docker swarm
+ docker_swarm:
+ state: absent
+ force: true
+
+ - shell: "docker info --format '{% raw %}{{json .}}{% endraw %}' | python -m json.tool"
+
+ - name: Create a Swarm cluster
+ docker_swarm:
+ state: present
+ advertise_addr: "{{ansible_default_ipv4.address}}"
+
+ - name: Parameter name should be required
+ docker_config:
+ state: present
+ ignore_errors: yes
+ register: output
+
+ - name: assert failure when called with no name
+ assert:
+ that:
+ - 'output.failed'
+ - 'output.msg == "missing required arguments: name"'
+
+ - name: Test parameters
+ docker_config:
+ name: foo
+ state: present
+ ignore_errors: yes
+ register: output
+
+ - name: assert failure when called with no data
+ assert:
+ that:
+ - 'output.failed'
+ - 'output.msg == "state is present but all of the following are missing: data"'
+
+ - name: Create config
+ docker_config:
+ name: db_password
+ data: opensesame!
+ state: present
+ register: output
+
+ - name: Create variable config_id
+ set_fact:
+ config_id: "{{ output.config_id }}"
+
+ - name: Inspect config
+ command: "docker config inspect {{ config_id }}"
+ register: inspect
+ ignore_errors: yes
+
+ - debug: var=inspect
+
+ - name: assert config creation succeeded
+ assert:
+ that:
+ - "'db_password' in inspect.stdout"
+ - "'ansible_key' in inspect.stdout"
+ when: inspect is not failed
+ - assert:
+ that:
+ - "'is too new. Maximum supported API version is' in inspect.stderr"
+ when: inspect is failed
+
+ - name: Create config again
+ docker_config:
+ name: db_password
+ data: opensesame!
+ state: present
+ register: output
+
+ - name: assert create config is idempotent
+ assert:
+ that:
+ - not output.changed
+
+ - name: Create config again (base64)
+ docker_config:
+ name: db_password
+ data: b3BlbnNlc2FtZSE=
+ data_is_b64: true
+ state: present
+ register: output
+
+ - name: assert create config (base64) is idempotent
+ assert:
+ that:
+ - not output.changed
+
+ - name: Update config
+ docker_config:
+ name: db_password
+ data: newpassword!
+ state: present
+ register: output
+
+ - name: assert config was updated
+ assert:
+ that:
+ - output.changed
+ - output.config_id != config_id
+
+ - name: Remove config
+ docker_config:
+ name: db_password
+ state: absent
+
+ - name: Check that config is removed
+ command: "docker config inspect {{ config_id }}"
+ register: output
+ ignore_errors: yes
+
+ - name: assert config was removed
+ assert:
+ that:
+ - output.failed
+
+ - name: Remove config
+ docker_config:
+ name: db_password
+ state: absent
+ register: output
+
+ - name: assert remove config is idempotent
+ assert:
+ that:
+ - not output.changed
+
+ always:
+ - name: Remove a Swarm cluster
+ docker_swarm:
+ state: absent
+ force: true
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/aliases
new file mode 100644
index 00000000..aec97db2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/aliases
@@ -0,0 +1,6 @@
+shippable/posix/group5
+skip/osx
+skip/macos
+skip/freebsd
+skip/aix
+destructive
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/files/env-file b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/files/env-file
new file mode 100644
index 00000000..b15f1b64
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/files/env-file
@@ -0,0 +1,2 @@
+TEST3=val3
+TEST4=val4
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/filter_plugins/ipaddr_tools.py b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/filter_plugins/ipaddr_tools.py
new file mode 100644
index 00000000..fb2a5dd9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/filter_plugins/ipaddr_tools.py
@@ -0,0 +1,34 @@
+# (c) 2020, Felix Fontein <felix@fontein.de>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.compat import ipaddress
+
+
+def _normalize_ipaddr(ipaddr):
+ return ipaddress.ip_address(ipaddr).compressed
+
+
+class FilterModule(object):
+ """ IP address and network manipulation filters """
+
+ def filters(self):
+ return {
+ 'normalize_ipaddr': _normalize_ipaddr,
+ }
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/meta/main.yml
new file mode 100644
index 00000000..07da8c6d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - setup_docker
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/tasks/main.yml
new file mode 100644
index 00000000..338e0b1c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/tasks/main.yml
@@ -0,0 +1,43 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Create random name prefix (for containers, networks, ...)
+- name: Create random container name prefix
+ set_fact:
+ cname_prefix: "{{ 'ansible-test-%0x' % ((2**32) | random) }}"
+ cnames: []
+ dnetworks: []
+
+- debug:
+ msg: "Using container name prefix {{ cname_prefix }}"
+
+# Run the tests
+- block:
+ - include_tasks: run-test.yml
+ with_fileglob:
+ - "tests/*.yml"
+
+ always:
+ - name: "Make sure all containers are removed"
+ docker_container:
+ name: "{{ item }}"
+ state: absent
+ force_kill: yes
+ with_items: "{{ cnames }}"
+ diff: no
+ - name: "Make sure all networks are removed"
+ docker_network:
+ name: "{{ item }}"
+ state: absent
+ force: yes
+ with_items: "{{ dnetworks }}"
+ when: docker_py_version is version('1.10.0', '>=')
+ diff: no
+
+ when: docker_py_version is version('1.8.0', '>=') and docker_api_version is version('1.20', '>=')
+
+- fail: msg="Too old docker / docker-py version to run all docker_container tests!"
+ when: not(docker_py_version is version('3.5.0', '>=') and docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/tasks/run-test.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/tasks/run-test.yml
new file mode 100644
index 00000000..a2999370
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/tasks/run-test.yml
@@ -0,0 +1,3 @@
+---
+- name: "Loading tasks from {{ item }}"
+ include_tasks: "{{ item }}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/tasks/tests/comparisons.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/tasks/tests/comparisons.yml
new file mode 100644
index 00000000..fadf4dc3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/tasks/tests/comparisons.yml
@@ -0,0 +1,463 @@
+---
+- name: Registering container name
+ set_fact:
+ cname: "{{ cname_prefix ~ '-comparisons' }}"
+- name: Registering container name
+ set_fact:
+ cnames: "{{ cnames + [cname] }}"
+
+####################################################################
+## value ###########################################################
+####################################################################
+
+- name: value
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ hostname: example.com
+ register: value_1
+
+- name: value (change, ignore)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ hostname: example.org
+ force_kill: yes
+ comparisons:
+ hostname: ignore
+ register: value_2
+
+- name: value (change, strict)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ hostname: example.org
+ force_kill: yes
+ comparisons:
+ hostname: strict
+ register: value_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - value_1 is changed
+ - value_2 is not changed
+ - value_3 is changed
+
+####################################################################
+## list ############################################################
+####################################################################
+
+- name: list
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ dns_servers:
+ - 1.1.1.1
+ - 8.8.8.8
+ register: list_1
+
+- name: list (change, ignore)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ dns_servers:
+ - 9.9.9.9
+ force_kill: yes
+ comparisons:
+ dns_servers: ignore
+ register: list_2
+
+- name: list (change, strict)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ dns_servers:
+ - 9.9.9.9
+ force_kill: yes
+ comparisons:
+ dns_servers: strict
+ register: list_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - list_1 is changed
+ - list_2 is not changed
+ - list_3 is changed
+
+####################################################################
+## set #############################################################
+####################################################################
+
+- name: set
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ groups:
+ - "1010"
+ - "1011"
+ register: set_1
+
+- name: set (change, ignore)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ groups:
+ - "1010"
+ - "1011"
+ - "1012"
+ force_kill: yes
+ comparisons:
+ groups: ignore
+ register: set_2
+
+- name: set (change, allow_more_present)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ groups:
+ - "1010"
+ - "1011"
+ - "1012"
+ force_kill: yes
+ comparisons:
+ groups: allow_more_present
+ register: set_3
+
+- name: set (change, allow_more_present)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ groups:
+ - "1010"
+ - "1012"
+ force_kill: yes
+ comparisons:
+ groups: allow_more_present
+ register: set_4
+
+- name: set (change, strict)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ groups:
+ - "1010"
+ - "1012"
+ force_kill: yes
+ comparisons:
+ groups: strict
+ register: set_5
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - set_1 is changed
+ - set_2 is not changed
+ - set_3 is changed
+ - set_4 is not changed
+ - set_5 is changed
+
+####################################################################
+## set(dict) #######################################################
+####################################################################
+
+- name: set(dict)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ devices:
+ - "/dev/random:/dev/virt-random:rwm"
+ - "/dev/urandom:/dev/virt-urandom:rwm"
+ register: set_dict_1
+
+- name: set(dict) (change, ignore)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ devices:
+ - "/dev/random:/dev/virt-random:rwm"
+ - "/dev/urandom:/dev/virt-urandom:rwm"
+ - "/dev/null:/dev/virt-null:rwm"
+ force_kill: yes
+ comparisons:
+ devices: ignore
+ register: set_dict_2
+
+- name: set(dict) (change, allow_more_present)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ devices:
+ - "/dev/random:/dev/virt-random:rwm"
+ - "/dev/urandom:/dev/virt-urandom:rwm"
+ - "/dev/null:/dev/virt-null:rwm"
+ force_kill: yes
+ comparisons:
+ devices: allow_more_present
+ register: set_dict_3
+
+- name: set(dict) (change, allow_more_present)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ devices:
+ - "/dev/random:/dev/virt-random:rwm"
+ - "/dev/null:/dev/virt-null:rwm"
+ force_kill: yes
+ comparisons:
+ devices: allow_more_present
+ register: set_dict_4
+
+- name: set(dict) (change, strict)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ devices:
+ - "/dev/random:/dev/virt-random:rwm"
+ - "/dev/null:/dev/virt-null:rwm"
+ force_kill: yes
+ comparisons:
+ devices: strict
+ register: set_dict_5
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - set_dict_1 is changed
+ - set_dict_2 is not changed
+ - set_dict_3 is changed
+ - set_dict_4 is not changed
+ - set_dict_5 is changed
+
+####################################################################
+## dict ############################################################
+####################################################################
+
+- name: dict
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ labels:
+ ansible.test.1: hello
+ ansible.test.2: world
+ register: dict_1
+
+- name: dict (change, ignore)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ labels:
+ ansible.test.1: hello
+ ansible.test.2: world
+ ansible.test.3: ansible
+ force_kill: yes
+ comparisons:
+ labels: ignore
+ register: dict_2
+
+- name: dict (change, allow_more_present)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ labels:
+ ansible.test.1: hello
+ ansible.test.2: world
+ ansible.test.3: ansible
+ force_kill: yes
+ comparisons:
+ labels: allow_more_present
+ register: dict_3
+
+- name: dict (change, allow_more_present)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ labels:
+ ansible.test.1: hello
+ ansible.test.3: ansible
+ force_kill: yes
+ comparisons:
+ labels: allow_more_present
+ register: dict_4
+
+- name: dict (change, strict)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ labels:
+ ansible.test.1: hello
+ ansible.test.3: ansible
+ force_kill: yes
+ comparisons:
+ labels: strict
+ register: dict_5
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - dict_1 is changed
+ - dict_2 is not changed
+ - dict_3 is changed
+ - dict_4 is not changed
+ - dict_5 is changed
+
+####################################################################
+## wildcard ########################################################
+####################################################################
+
+- name: Pull {{ docker_test_image_hello_world }} image to make sure wildcard_2 test succeeds
+ # If the image isn't there, it will pull it and return 'changed'.
+ docker_image:
+ name: "{{ docker_test_image_hello_world }}"
+ source: pull
+
+- name: wildcard
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ hostname: example.com
+ stop_timeout: 1
+ labels:
+ ansible.test.1: hello
+ ansible.test.2: world
+ ansible.test.3: ansible
+ register: wildcard_1
+
+- name: wildcard (change, ignore)
+ docker_container:
+ image: "{{ docker_test_image_hello_world }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ hostname: example.org
+ stop_timeout: 2
+ labels:
+ ansible.test.1: hello
+ ansible.test.4: ignore
+ force_kill: yes
+ comparisons:
+ '*': ignore
+ register: wildcard_2
+
+- name: wildcard (change, strict)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ hostname: example.org
+ stop_timeout: 1
+ labels:
+ ansible.test.1: hello
+ ansible.test.2: world
+ ansible.test.3: ansible
+ force_kill: yes
+ comparisons:
+ '*': strict
+ register: wildcard_3
+
+- name: wildcard (no change, strict)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ hostname: example.org
+ stop_timeout: 1
+ labels:
+ ansible.test.1: hello
+ ansible.test.2: world
+ ansible.test.3: ansible
+ force_kill: yes
+ comparisons:
+ '*': strict
+ register: wildcard_4
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - wildcard_1 is changed
+ - wildcard_2 is not changed
+ - wildcard_3 is changed
+ - wildcard_4 is not changed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/tasks/tests/compatibility.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/tasks/tests/compatibility.yml
new file mode 100644
index 00000000..de8758aa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/tasks/tests/compatibility.yml
@@ -0,0 +1,118 @@
+---
+- name: Registering container name
+ set_fact:
+ cname: "{{ cname_prefix ~ '-hi' }}"
+- name: Registering container name
+ set_fact:
+ cnames: "{{ cnames + [cname] }}"
+
+####################################################################
+## container_default_behavior: compatibility #######################
+####################################################################
+
+- name: Start container (check)
+ docker_container:
+ name: "{{ cname }}"
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ state: started
+ container_default_behavior: compatibility
+ check_mode: yes
+ register: start_1
+
+- name: Start container
+ docker_container:
+ name: "{{ cname }}"
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ state: started
+ container_default_behavior: compatibility
+ register: start_2
+
+- name: Start container (idempotent)
+ docker_container:
+ name: "{{ cname }}"
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ state: started
+ container_default_behavior: compatibility
+ register: start_3
+
+- name: Start container (idempotent check)
+ docker_container:
+ name: "{{ cname }}"
+ image: "{{ docker_test_image_alpine }}"
+ state: started
+ container_default_behavior: compatibility
+ check_mode: yes
+ register: start_4
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - start_1 is changed
+ - start_2 is changed
+ - start_3 is not changed
+ - start_4 is not changed
+
+####################################################################
+## container_default_behavior: no_defaults #########################
+####################################################################
+
+- name: Start container (check)
+ docker_container:
+ name: "{{ cname }}"
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ state: started
+ container_default_behavior: no_defaults
+ check_mode: yes
+ register: start_1
+
+- name: Start container
+ docker_container:
+ name: "{{ cname }}"
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ state: started
+ container_default_behavior: no_defaults
+ register: start_2
+
+- name: Start container (idempotent)
+ docker_container:
+ name: "{{ cname }}"
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ state: started
+ container_default_behavior: no_defaults
+ register: start_3
+
+- name: Start container (idempotent check)
+ docker_container:
+ name: "{{ cname }}"
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ state: started
+ container_default_behavior: no_defaults
+ check_mode: yes
+ register: start_4
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - start_1 is changed
+ - start_2 is changed
+ - start_3 is not changed
+ - start_4 is not changed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/tasks/tests/image-ids.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/tasks/tests/image-ids.yml
new file mode 100644
index 00000000..ff4a97a7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/tasks/tests/image-ids.yml
@@ -0,0 +1,141 @@
+---
+- name: Registering container name
+ set_fact:
+ cname: "{{ cname_prefix ~ '-iid' }}"
+- name: Registering container name
+ set_fact:
+ cnames: "{{ cnames + [cname] }}"
+
+- name: Pull images
+ docker_image:
+ name: "{{ image }}"
+ source: pull
+ loop:
+ - "{{ docker_test_image_hello_world }}"
+ - "{{ docker_test_image_alpine }}"
+ loop_control:
+ loop_var: image
+
+- name: Get image ID of {{ docker_test_image_hello_world }} and {{ docker_test_image_alpine }} images
+ docker_image_info:
+ name:
+ - "{{ docker_test_image_hello_world }}"
+ - "{{ docker_test_image_alpine }}"
+ register: image_info
+
+- assert:
+ that:
+ - image_info.images | length == 2
+
+- name: Print image IDs
+ debug:
+ msg: "{{ docker_test_image_hello_world }}: {{ image_info.images[0].Id }}; {{ docker_test_image_alpine }}: {{ image_info.images[1].Id }}"
+
+- name: Create container with {{ docker_test_image_hello_world }} image via ID
+ docker_container:
+ image: "{{ image_info.images[0].Id }}"
+ name: "{{ cname }}"
+ state: present
+ force_kill: yes
+ register: create_1
+
+- name: Create container with {{ docker_test_image_hello_world }} image via ID (idempotent)
+ docker_container:
+ image: "{{ image_info.images[0].Id }}"
+ name: "{{ cname }}"
+ state: present
+ force_kill: yes
+ register: create_2
+
+- name: Create container with {{ docker_test_image_alpine }} image via ID
+ docker_container:
+ image: "{{ image_info.images[1].Id }}"
+ name: "{{ cname }}"
+ state: present
+ force_kill: yes
+ register: create_3
+
+- name: Create container with {{ docker_test_image_alpine }} image via ID (idempotent)
+ docker_container:
+ image: "{{ image_info.images[1].Id }}"
+ name: "{{ cname }}"
+ state: present
+ force_kill: yes
+ register: create_4
+
+- name: Untag image
+ # Image will not be deleted since the container still uses it
+ docker_image:
+ name: "{{ docker_test_image_alpine }}"
+ force_absent: yes
+ state: absent
+
+- name: Create container with {{ docker_test_image_alpine }} image via name (check mode, will pull, same image)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ name: "{{ cname }}"
+ state: present
+ register: create_5
+ check_mode: yes
+
+- name: Create container with {{ docker_test_image_alpine }} image via name (will pull, same image)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ name: "{{ cname }}"
+ state: present
+ register: create_6
+
+- name: Cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - create_1 is changed
+ - create_2 is not changed
+ - create_3 is changed
+ - create_4 is not changed
+ - create_5 is changed
+ - create_6 is changed
+ - create_6.container.Image == image_info.images[1].Id
+ - create_6.container.Id == create_4.container.Id # make sure container wasn't recreated
+
+- name: Create container with {{ docker_test_image_digest_base }} image via old digest
+ docker_container:
+ image: "{{ docker_test_image_digest_base }}@sha256:{{ docker_test_image_digest_v1 }}"
+ name: "{{ cname }}"
+ state: present
+ force_kill: yes
+ register: digest_1
+
+- name: Create container with {{ docker_test_image_digest_base }} image via old digest (idempotent)
+ docker_container:
+ image: "{{ docker_test_image_digest_base }}@sha256:{{ docker_test_image_digest_v1 }}"
+ name: "{{ cname }}"
+ state: present
+ force_kill: yes
+ register: digest_2
+
+- name: Update container with {{ docker_test_image_digest_base }} image via new digest
+ docker_container:
+ image: "{{ docker_test_image_digest_base }}@sha256:{{ docker_test_image_digest_v2 }}"
+ name: "{{ cname }}"
+ state: present
+ force_kill: yes
+ register: digest_3
+
+- name: Cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - digest_1 is changed
+ - digest_2 is not changed
+ - digest_3 is changed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/tasks/tests/mounts-volumes.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/tasks/tests/mounts-volumes.yml
new file mode 100644
index 00000000..dbb3967f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/tasks/tests/mounts-volumes.yml
@@ -0,0 +1,445 @@
+---
+- name: Registering container name
+ set_fact:
+ cname: "{{ cname_prefix ~ '-mounts' }}"
+ cname_h1: "{{ cname_prefix ~ '-mounts-h1' }}"
+ cname_h2: "{{ cname_prefix ~ '-mounts-h2' }}"
+- name: Registering container name
+ set_fact:
+ cnames: "{{ cnames + [cname, cname_h1, cname_h2] }}"
+
+####################################################################
+## keep_volumes ####################################################
+####################################################################
+
+# TODO: - keep_volumes
+
+####################################################################
+## mounts ##########################################################
+####################################################################
+
+- name: mounts
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ mounts:
+ - source: /tmp
+ target: /tmp
+ type: bind
+ - source: /
+ target: /whatever
+ type: bind
+ read_only: no
+ register: mounts_1
+ ignore_errors: yes
+
+- name: mounts (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ mounts:
+ - source: /
+ target: /whatever
+ type: bind
+ read_only: no
+ - source: /tmp
+ target: /tmp
+ type: bind
+ register: mounts_2
+ ignore_errors: yes
+
+- name: mounts (less mounts)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ mounts:
+ - source: /tmp
+ target: /tmp
+ type: bind
+ register: mounts_3
+ ignore_errors: yes
+
+- name: mounts (more mounts)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ mounts:
+ - source: /tmp
+ target: /tmp
+ type: bind
+ - source: /tmp
+ target: /somewhereelse
+ type: bind
+ read_only: yes
+ force_kill: yes
+ register: mounts_4
+ ignore_errors: yes
+
+- name: mounts (different modes)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ mounts:
+ - source: /tmp
+ target: /tmp
+ type: bind
+ - source: /tmp
+ target: /somewhereelse
+ type: bind
+ read_only: no
+ force_kill: yes
+ register: mounts_5
+ ignore_errors: yes
+
+- name: mounts (endpoint collision)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ mounts:
+ - source: /home
+ target: /x
+ type: bind
+ - source: /etc
+ target: /x
+ type: bind
+ read_only: no
+ force_kill: yes
+ register: mounts_6
+ ignore_errors: yes
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - mounts_1 is changed
+ - mounts_2 is not changed
+ - mounts_3 is not changed
+ - mounts_4 is changed
+ - mounts_5 is changed
+ - mounts_6 is failed
+ - "'The mount point \"/x\" appears twice in the mounts option' == mounts_6.msg"
+ when: docker_py_version is version('2.6.0', '>=')
+- assert:
+ that:
+ - mounts_1 is failed
+ - "('version is ' ~ docker_py_version ~ ' ') in mounts_1.msg"
+ - "'Minimum version required is 2.6.0 ' in mounts_1.msg"
+ when: docker_py_version is version('2.6.0', '<')
+
+####################################################################
+## mounts + volumes ################################################
+####################################################################
+
+- name: mounts + volumes
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ mounts:
+ - source: /
+ target: /whatever
+ type: bind
+ read_only: yes
+ volumes:
+ - /tmp:/tmp
+ register: mounts_volumes_1
+ ignore_errors: yes
+
+- name: mounts + volumes (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ mounts:
+ - source: /
+ target: /whatever
+ type: bind
+ read_only: yes
+ volumes:
+ - /tmp:/tmp
+ register: mounts_volumes_2
+ ignore_errors: yes
+
+- name: mounts + volumes (switching)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ mounts:
+ - source: /tmp
+ target: /tmp
+ type: bind
+ read_only: no
+ volumes:
+ - /:/whatever:ro
+ force_kill: yes
+ register: mounts_volumes_3
+ ignore_errors: yes
+
+- name: mounts + volumes (collision, should fail)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ mounts:
+ - source: /tmp
+ target: /tmp
+ type: bind
+ read_only: no
+ volumes:
+ - /tmp:/tmp
+ force_kill: yes
+ register: mounts_volumes_4
+ ignore_errors: yes
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - mounts_volumes_1 is changed
+ - mounts_volumes_2 is not changed
+ - mounts_volumes_3 is changed
+ - mounts_volumes_4 is failed
+ - "'The mount point \"/tmp\" appears both in the volumes and mounts option' in mounts_volumes_4.msg"
+ when: docker_py_version is version('2.6.0', '>=')
+- assert:
+ that:
+ - mounts_volumes_1 is failed
+ - "('version is ' ~ docker_py_version ~ ' ') in mounts_1.msg"
+ - "'Minimum version required is 2.6.0 ' in mounts_1.msg"
+ when: docker_py_version is version('2.6.0', '<')
+
+####################################################################
+## volume_driver ###################################################
+####################################################################
+
+- name: volume_driver
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ volume_driver: local
+ state: started
+ register: volume_driver_1
+
+- name: volume_driver (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ volume_driver: local
+ state: started
+ register: volume_driver_2
+
+- name: volume_driver (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ volume_driver: /
+ state: started
+ force_kill: yes
+ register: volume_driver_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - volume_driver_1 is changed
+ - volume_driver_2 is not changed
+ - volume_driver_3 is changed
+
+####################################################################
+## volumes #########################################################
+####################################################################
+
+- name: volumes
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ volumes:
+ - "/tmp:/tmp"
+ - "/:/whatever:rw,z"
+ - "/anon:rw"
+ register: volumes_1
+
+- name: volumes (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ volumes:
+ - "/:/whatever:rw,z"
+ - "/tmp:/tmp"
+ - "/anon:rw"
+ register: volumes_2
+
+- name: volumes (less volumes)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ volumes:
+ - "/tmp:/tmp"
+ register: volumes_3
+
+- name: volumes (more volumes)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ volumes:
+ - "/tmp:/tmp"
+ - "/tmp:/somewhereelse:ro,Z"
+ force_kill: yes
+ register: volumes_4
+
+- name: volumes (different modes)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ volumes:
+ - "/tmp:/tmp"
+ - "/tmp:/somewhereelse:ro"
+ force_kill: yes
+ register: volumes_5
+
+- name: volumes (collision)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ volumes:
+ - "/etc:/tmp"
+ - "/home:/tmp:ro"
+ force_kill: yes
+ register: volumes_6
+ ignore_errors: yes
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - volumes_1 is changed
+ - volumes_1.container.Config.Volumes | length == 1
+ - volumes_1.container.Config.Volumes['/anon:rw'] | length == 0
+ - volumes_2 is not changed
+ - volumes_3 is not changed
+ - volumes_4 is changed
+ - not volumes_4.container.Config.Volumes
+ - volumes_5 is changed
+ - volumes_6 is failed
+ - "'The mount point \"/tmp\" appears twice in the volumes option' in volumes_6.msg"
+
+####################################################################
+## volumes_from ####################################################
+####################################################################
+
+- name: start helpers
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ container_name }}"
+ state: started
+ volumes:
+ - "{{ '/tmp:/tmp' if container_name == cname_h1 else '/:/whatever:ro' }}"
+ loop:
+ - "{{ cname_h1 }}"
+ - "{{ cname_h2 }}"
+ loop_control:
+ loop_var: container_name
+
+- name: volumes_from
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ volumes_from: "{{ cname_h1 }}"
+ register: volumes_from_1
+
+- name: volumes_from (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ volumes_from: "{{ cname_h1 }}"
+ register: volumes_from_2
+
+- name: volumes_from (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ volumes_from: "{{ cname_h2 }}"
+ force_kill: yes
+ register: volumes_from_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ container_name }}"
+ state: absent
+ force_kill: yes
+ loop:
+ - "{{ cname }}"
+ - "{{ cname_h1 }}"
+ - "{{ cname_h2 }}"
+ loop_control:
+ loop_var: container_name
+ diff: no
+
+- assert:
+ that:
+ - volumes_from_1 is changed
+ - volumes_from_2 is not changed
+ - volumes_from_3 is changed
+
+####################################################################
+####################################################################
+####################################################################
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/tasks/tests/network.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/tasks/tests/network.yml
new file mode 100644
index 00000000..9ef33643
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/tasks/tests/network.yml
@@ -0,0 +1,747 @@
+---
+- name: Registering container name
+ set_fact:
+ cname: "{{ cname_prefix ~ '-network' }}"
+ cname_h1: "{{ cname_prefix ~ '-network-h1' }}"
+ nname_1: "{{ cname_prefix ~ '-network-1' }}"
+ nname_2: "{{ cname_prefix ~ '-network-2' }}"
+ nname_3: "{{ cname_prefix ~ '-network-3' }}"
+- name: Registering container name
+ set_fact:
+ cnames: "{{ cnames + [cname, cname_h1] }}"
+ dnetworks: "{{ dnetworks + [nname_1, nname_2, nname_3] }}"
+
+- name: Create networks
+ docker_network:
+ name: "{{ network_name }}"
+ state: present
+ loop:
+ - "{{ nname_1 }}"
+ - "{{ nname_2 }}"
+ loop_control:
+ loop_var: network_name
+ when: docker_py_version is version('1.10.0', '>=')
+
+- set_fact:
+ subnet_ipv4_base: 10.{{ 16 + (240 | random) }}.{{ 16 + (240 | random) }}
+ subnet_ipv6_base: fdb6:feea:{{ '%0.4x:%0.4x' | format(65536 | random, 65536 | random) }}
+ # If netaddr would be installed on the controller, one could do:
+ # subnet_ipv4: "10.{{ 16 + (240 | random) }}.{{ 16 + (240 | random) }}.0/24"
+ # subnet_ipv6: "fdb6:feea:{{ '%0.4x:%0.4x' | format(65536 | random, 65536 | random) }}::/64"
+
+- set_fact:
+ subnet_ipv4: "{{ subnet_ipv4_base }}.0/24"
+ subnet_ipv6: "{{ subnet_ipv6_base }}::/64"
+ nname_3_ipv4_2: "{{ subnet_ipv4_base }}.2"
+ nname_3_ipv4_3: "{{ subnet_ipv4_base }}.3"
+ nname_3_ipv4_4: "{{ subnet_ipv4_base }}.4"
+ nname_3_ipv6_2: "{{ subnet_ipv6_base }}::2"
+ nname_3_ipv6_3: "{{ subnet_ipv6_base }}::3"
+ nname_3_ipv6_4: "{{ subnet_ipv6_base }}::4"
+ # If netaddr would be installed on the controller, one could do:
+ # nname_3_ipv4_2: "{{ subnet_ipv4 | ansible.netcommon.next_nth_usable(2) }}"
+ # nname_3_ipv4_3: "{{ subnet_ipv4 | ansible.netcommon.next_nth_usable(3) }}"
+ # nname_3_ipv4_4: "{{ subnet_ipv4 | ansible.netcommon.next_nth_usable(4) }}"
+ # nname_3_ipv6_2: "{{ subnet_ipv6 | ansible.netcommon.next_nth_usable(2) }}"
+ # nname_3_ipv6_3: "{{ subnet_ipv6 | ansible.netcommon.next_nth_usable(3) }}"
+ # nname_3_ipv6_4: "{{ subnet_ipv6 | ansible.netcommon.next_nth_usable(4) }}"
+
+- debug:
+ msg: "Chose random IPv4 subnet {{ subnet_ipv4 }} and random IPv6 subnet {{ subnet_ipv6 }}"
+
+- name: Create network with fixed IPv4 and IPv6 subnets
+ docker_network:
+ name: "{{ nname_3 }}"
+ enable_ipv6: yes
+ ipam_config:
+ - subnet: "{{ subnet_ipv4 }}"
+ - subnet: "{{ subnet_ipv6 }}"
+ state: present
+ when: docker_py_version is version('1.10.0', '>=')
+
+####################################################################
+## network_mode ####################################################
+####################################################################
+
+- name: network_mode
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ network_mode: host
+ register: network_mode_1
+
+- name: network_mode (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ network_mode: host
+ register: network_mode_2
+
+- name: network_mode (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ network_mode: none
+ force_kill: yes
+ register: network_mode_3
+
+- name: network_mode (container mode setup)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname_h1 }}"
+ state: started
+ register: cname_h1_id
+
+- name: network_mode (container mode)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ network_mode: "container:{{ cname_h1_id.container.Id }}"
+ force_kill: yes
+ register: network_mode_4
+
+- name: network_mode (container mode idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ network_mode: "container:{{ cname_h1 }}"
+ register: network_mode_5
+
+- name: cleanup
+ docker_container:
+ name: "{{ container_name }}"
+ state: absent
+ force_kill: yes
+ loop:
+ - "{{ cname }}"
+ - "{{ cname_h1 }}"
+ loop_control:
+ loop_var: container_name
+ diff: no
+
+- assert:
+ that:
+ - network_mode_1 is changed
+ - network_mode_1.container.HostConfig.NetworkMode == 'host'
+ - network_mode_2 is not changed
+ - network_mode_2.container.HostConfig.NetworkMode == 'host'
+ - network_mode_3 is changed
+ - network_mode_3.container.HostConfig.NetworkMode == 'none'
+ - network_mode_4 is changed
+ - network_mode_4.container.HostConfig.NetworkMode == 'container:' ~ cname_h1_id.container.Id
+ - network_mode_5 is not changed
+ - network_mode_5.container.HostConfig.NetworkMode == 'container:' ~ cname_h1_id.container.Id
+
+####################################################################
+## networks, purge_networks for networks_cli_compatible=no #########
+####################################################################
+
+- block:
+ - name: networks_cli_compatible=no, networks w/o purge_networks
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ networks:
+ - name: "{{ nname_1 }}"
+ - name: "{{ nname_2 }}"
+ networks_cli_compatible: no
+ register: networks_1
+
+ - name: networks_cli_compatible=no, networks w/o purge_networks
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ networks:
+ - name: "{{ nname_1 }}"
+ - name: "{{ nname_2 }}"
+ networks_cli_compatible: no
+ register: networks_2
+
+ - name: networks_cli_compatible=no, networks, purge_networks
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ purge_networks: yes
+ networks:
+ - name: bridge
+ - name: "{{ nname_1 }}"
+ networks_cli_compatible: no
+ force_kill: yes
+ register: networks_3
+
+ - name: networks_cli_compatible=no, networks, purge_networks (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ purge_networks: yes
+ networks:
+ - name: "{{ nname_1 }}"
+ - name: bridge
+ networks_cli_compatible: no
+ register: networks_4
+
+ - name: networks_cli_compatible=no, networks (less networks)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ networks:
+ - name: bridge
+ networks_cli_compatible: no
+ register: networks_5
+
+ - name: networks_cli_compatible=no, networks, purge_networks (less networks)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ purge_networks: yes
+ networks:
+ - name: bridge
+ networks_cli_compatible: no
+ force_kill: yes
+ register: networks_6
+
+ - name: networks_cli_compatible=no, networks, purge_networks (more networks)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ purge_networks: yes
+ networks:
+ - name: bridge
+ - name: "{{ nname_2 }}"
+ networks_cli_compatible: no
+ force_kill: yes
+ register: networks_7
+
+ - name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+ - assert:
+ that:
+ # networks_1 has networks default, 'bridge', nname_1
+ - networks_1 is changed
+ - networks_1.container.NetworkSettings.Networks | length == 3
+ - nname_1 in networks_1.container.NetworkSettings.Networks
+ - nname_2 in networks_1.container.NetworkSettings.Networks
+ - "'default' in networks_1.container.NetworkSettings.Networks or 'bridge' in networks_1.container.NetworkSettings.Networks"
+ # networks_2 has networks default, 'bridge', nname_1
+ - networks_2 is not changed
+ - networks_2.container.NetworkSettings.Networks | length == 3
+ - nname_1 in networks_2.container.NetworkSettings.Networks
+ - nname_2 in networks_1.container.NetworkSettings.Networks
+ - "'default' in networks_1.container.NetworkSettings.Networks or 'bridge' in networks_1.container.NetworkSettings.Networks"
+ # networks_3 has networks 'bridge', nname_1
+ - networks_3 is changed
+ - networks_3.container.NetworkSettings.Networks | length == 2
+ - nname_1 in networks_3.container.NetworkSettings.Networks
+ - "'default' in networks_3.container.NetworkSettings.Networks or 'bridge' in networks_3.container.NetworkSettings.Networks"
+ # networks_4 has networks 'bridge', nname_1
+ - networks_4 is not changed
+ - networks_4.container.NetworkSettings.Networks | length == 2
+ - nname_1 in networks_4.container.NetworkSettings.Networks
+ - "'default' in networks_4.container.NetworkSettings.Networks or 'bridge' in networks_4.container.NetworkSettings.Networks"
+ # networks_5 has networks 'bridge', nname_1
+ - networks_5 is not changed
+ - networks_5.container.NetworkSettings.Networks | length == 2
+ - nname_1 in networks_5.container.NetworkSettings.Networks
+ - "'default' in networks_5.container.NetworkSettings.Networks or 'bridge' in networks_5.container.NetworkSettings.Networks"
+ # networks_6 has networks 'bridge'
+ - networks_6 is changed
+ - networks_6.container.NetworkSettings.Networks | length == 1
+ - "'default' in networks_6.container.NetworkSettings.Networks or 'bridge' in networks_6.container.NetworkSettings.Networks"
+ # networks_7 has networks 'bridge', nname_2
+ - networks_7 is changed
+ - networks_7.container.NetworkSettings.Networks | length == 2
+ - nname_2 in networks_7.container.NetworkSettings.Networks
+ - "'default' in networks_7.container.NetworkSettings.Networks or 'bridge' in networks_7.container.NetworkSettings.Networks"
+
+ when: docker_py_version is version('1.10.0', '>=')
+
+####################################################################
+## networks for networks_cli_compatible=yes ########################
+####################################################################
+
+- block:
+ - name: networks_cli_compatible=yes, networks specified
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ networks:
+ - name: "{{ nname_1 }}"
+ aliases:
+ - alias1
+ - alias2
+ - name: "{{ nname_2 }}"
+ networks_cli_compatible: yes
+ register: networks_1
+
+ - name: networks_cli_compatible=yes, networks specified
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ networks:
+ - name: "{{ nname_1 }}"
+ - name: "{{ nname_2 }}"
+ networks_cli_compatible: yes
+ register: networks_2
+
+ - name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+ - name: networks_cli_compatible=yes, empty networks list specified
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ networks: []
+ networks_cli_compatible: yes
+ register: networks_3
+
+ - name: networks_cli_compatible=yes, empty networks list specified
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ networks: []
+ networks_cli_compatible: yes
+ register: networks_4
+
+ - name: networks_cli_compatible=yes, empty networks list specified, purge_networks
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ networks: []
+ networks_cli_compatible: yes
+ purge_networks: yes
+ force_kill: yes
+ register: networks_5
+
+ - name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+ - name: networks_cli_compatible=yes, networks not specified
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ networks_cli_compatible: yes
+ force_kill: yes
+ register: networks_6
+
+ - name: networks_cli_compatible=yes, networks not specified
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ networks_cli_compatible: yes
+ register: networks_7
+
+ - name: networks_cli_compatible=yes, networks not specified, purge_networks
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ networks_cli_compatible: yes
+ purge_networks: yes
+ force_kill: yes
+ register: networks_8
+
+ - name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+ - debug: var=networks_3
+
+ - assert:
+ that:
+ # networks_1 has networks nname_1, nname_2
+ - networks_1 is changed
+ - networks_1.container.NetworkSettings.Networks | length == 2
+ - nname_1 in networks_1.container.NetworkSettings.Networks
+ - nname_2 in networks_1.container.NetworkSettings.Networks
+ # networks_2 has networks nname_1, nname_2
+ - networks_2 is not changed
+ - networks_2.container.NetworkSettings.Networks | length == 2
+ - nname_1 in networks_2.container.NetworkSettings.Networks
+ - nname_2 in networks_1.container.NetworkSettings.Networks
+ # networks_3 has networks 'bridge'
+ - networks_3 is changed
+ - networks_3.container.NetworkSettings.Networks | length == 1
+ - "'default' in networks_3.container.NetworkSettings.Networks or 'bridge' in networks_3.container.NetworkSettings.Networks"
+ # networks_4 has networks 'bridge'
+ - networks_4 is not changed
+ - networks_4.container.NetworkSettings.Networks | length == 1
+ - "'default' in networks_4.container.NetworkSettings.Networks or 'bridge' in networks_4.container.NetworkSettings.Networks"
+ # networks_5 has no networks
+ - networks_5 is changed
+ - networks_5.container.NetworkSettings.Networks | length == 0
+ # networks_6 has networks 'bridge'
+ - networks_6 is changed
+ - networks_6.container.NetworkSettings.Networks | length == 1
+ - "'default' in networks_6.container.NetworkSettings.Networks or 'bridge' in networks_6.container.NetworkSettings.Networks"
+ # networks_7 has networks 'bridge'
+ - networks_7 is not changed
+ - networks_7.container.NetworkSettings.Networks | length == 1
+ - "'default' in networks_7.container.NetworkSettings.Networks or 'bridge' in networks_7.container.NetworkSettings.Networks"
+ # networks_8 has no networks
+ - networks_8 is changed
+ - networks_8.container.NetworkSettings.Networks | length == 0
+
+ when: docker_py_version is version('1.10.0', '>=')
+
+####################################################################
+## networks with comparisons #######################################
+####################################################################
+
+- block:
+ - name: create container with one network
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ networks:
+ - name: "{{ nname_1 }}"
+ networks_cli_compatible: yes
+ register: networks_1
+
+ - name: different networks, comparisons=ignore
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ networks:
+ - name: "{{ nname_2 }}"
+ networks_cli_compatible: yes
+ comparisons:
+ networks: ignore
+ register: networks_2
+
+ - name: less networks, comparisons=ignore
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ networks: []
+ networks_cli_compatible: yes
+ comparisons:
+ networks: ignore
+ register: networks_3
+
+ - name: less networks, comparisons=allow_more_present
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ networks: []
+ networks_cli_compatible: yes
+ comparisons:
+ networks: allow_more_present
+ register: networks_4
+
+ - name: different networks, comparisons=allow_more_present
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ networks:
+ - name: "{{ nname_2 }}"
+ networks_cli_compatible: yes
+ comparisons:
+ networks: allow_more_present
+ force_kill: yes
+ register: networks_5
+
+ - name: different networks, comparisons=strict
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ networks:
+ - name: "{{ nname_2 }}"
+ networks_cli_compatible: yes
+ comparisons:
+ networks: strict
+ force_kill: yes
+ register: networks_6
+
+ - name: less networks, comparisons=strict
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ networks: []
+ networks_cli_compatible: yes
+ comparisons:
+ networks: strict
+ force_kill: yes
+ register: networks_7
+
+ - name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+ - assert:
+ that:
+ # networks_1 has networks nname_1
+ - networks_1 is changed
+ - networks_1.container.NetworkSettings.Networks | length == 1
+ - nname_1 in networks_1.container.NetworkSettings.Networks
+ # networks_2 has networks nname_1
+ - networks_2 is not changed
+ - networks_2.container.NetworkSettings.Networks | length == 1
+ - nname_1 in networks_2.container.NetworkSettings.Networks
+ # networks_3 has networks nname_1
+ - networks_3 is not changed
+ - networks_3.container.NetworkSettings.Networks | length == 1
+ - nname_1 in networks_3.container.NetworkSettings.Networks
+ # networks_4 has networks nname_1
+ - networks_4 is not changed
+ - networks_4.container.NetworkSettings.Networks | length == 1
+ - nname_1 in networks_4.container.NetworkSettings.Networks
+ # networks_5 has networks nname_1, nname_2
+ - networks_5 is changed
+ - networks_5.container.NetworkSettings.Networks | length == 2
+ - nname_1 in networks_5.container.NetworkSettings.Networks
+ - nname_2 in networks_5.container.NetworkSettings.Networks
+ # networks_6 has networks nname_2
+ - networks_6 is changed
+ - networks_6.container.NetworkSettings.Networks | length == 1
+ - nname_2 in networks_6.container.NetworkSettings.Networks
+ # networks_7 has no networks
+ - networks_7 is changed
+ - networks_7.container.NetworkSettings.Networks | length == 0
+
+ when: docker_py_version is version('1.10.0', '>=')
+
+####################################################################
+## networks with IP address ########################################
+####################################################################
+
+- block:
+ - name: create container (stopped) with one network and fixed IP
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: stopped
+ networks:
+ - name: "{{ nname_3 }}"
+ ipv4_address: "{{ nname_3_ipv4_2 }}"
+ ipv6_address: "{{ nname_3_ipv6_2 }}"
+ networks_cli_compatible: yes
+ register: networks_1
+
+ - name: create container (stopped) with one network and fixed IP (idempotent)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: stopped
+ networks:
+ - name: "{{ nname_3 }}"
+ ipv4_address: "{{ nname_3_ipv4_2 }}"
+ ipv6_address: "{{ nname_3_ipv6_2 }}"
+ networks_cli_compatible: yes
+ register: networks_2
+
+ - name: create container (stopped) with one network and fixed IP (different IPv4)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: stopped
+ networks:
+ - name: "{{ nname_3 }}"
+ ipv4_address: "{{ nname_3_ipv4_3 }}"
+ ipv6_address: "{{ nname_3_ipv6_2 }}"
+ networks_cli_compatible: yes
+ register: networks_3
+
+ - name: create container (stopped) with one network and fixed IP (different IPv6)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: stopped
+ networks:
+ - name: "{{ nname_3 }}"
+ ipv4_address: "{{ nname_3_ipv4_3 }}"
+ ipv6_address: "{{ nname_3_ipv6_3 }}"
+ networks_cli_compatible: yes
+ register: networks_4
+
+ - name: create container (started) with one network and fixed IP
+ docker_container:
+ name: "{{ cname }}"
+ state: started
+ register: networks_5
+
+ - name: create container (started) with one network and fixed IP (different IPv4)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ networks:
+ - name: "{{ nname_3 }}"
+ ipv4_address: "{{ nname_3_ipv4_4 }}"
+ ipv6_address: "{{ nname_3_ipv6_3 }}"
+ networks_cli_compatible: yes
+ force_kill: yes
+ register: networks_6
+
+ - name: create container (started) with one network and fixed IP (different IPv6)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ networks:
+ - name: "{{ nname_3 }}"
+ ipv4_address: "{{ nname_3_ipv4_4 }}"
+ ipv6_address: "{{ nname_3_ipv6_4 }}"
+ networks_cli_compatible: yes
+ force_kill: yes
+ register: networks_7
+
+ - name: create container (started) with one network and fixed IP (idempotent)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ networks:
+ - name: "{{ nname_3 }}"
+ ipv4_address: "{{ nname_3_ipv4_4 }}"
+ ipv6_address: "{{ nname_3_ipv6_4 }}"
+ networks_cli_compatible: yes
+ register: networks_8
+
+ - name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+ - assert:
+ that:
+ - networks_1 is changed
+ - networks_1.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_2
+ - networks_1.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_2 | normalize_ipaddr
+ - networks_1.container.NetworkSettings.Networks[nname_3].IPAddress == ""
+ - networks_1.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address == ""
+ - networks_2 is not changed
+ - networks_2.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_2
+ - networks_2.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_2 | normalize_ipaddr
+ - networks_2.container.NetworkSettings.Networks[nname_3].IPAddress == ""
+ - networks_2.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address == ""
+ - networks_3 is changed
+ - networks_3.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_3
+ - networks_3.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_2 | normalize_ipaddr
+ - networks_3.container.NetworkSettings.Networks[nname_3].IPAddress == ""
+ - networks_3.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address == ""
+ - networks_4 is changed
+ - networks_4.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_3
+ - networks_4.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_3 | normalize_ipaddr
+ - networks_4.container.NetworkSettings.Networks[nname_3].IPAddress == ""
+ - networks_4.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address == ""
+ - networks_5 is changed
+ - networks_5.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_3
+ - networks_5.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_3 | normalize_ipaddr
+ - networks_5.container.NetworkSettings.Networks[nname_3].IPAddress == nname_3_ipv4_3
+ - networks_5.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address | normalize_ipaddr == nname_3_ipv6_3 | normalize_ipaddr
+ - networks_6 is changed
+ - networks_6.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_4
+ - networks_6.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_3 | normalize_ipaddr
+ - networks_6.container.NetworkSettings.Networks[nname_3].IPAddress == nname_3_ipv4_4
+ - networks_6.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address | normalize_ipaddr == nname_3_ipv6_3 | normalize_ipaddr
+ - networks_7 is changed
+ - networks_7.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_4
+ - networks_7.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_4 | normalize_ipaddr
+ - networks_7.container.NetworkSettings.Networks[nname_3].IPAddress == nname_3_ipv4_4
+ - networks_7.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address | normalize_ipaddr == nname_3_ipv6_4 | normalize_ipaddr
+ - networks_8 is not changed
+ - networks_8.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_4
+ - networks_8.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_4 | normalize_ipaddr
+ - networks_8.container.NetworkSettings.Networks[nname_3].IPAddress == nname_3_ipv4_4
+ - networks_8.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address | normalize_ipaddr == nname_3_ipv6_4 | normalize_ipaddr
+
+ when: docker_py_version is version('1.10.0', '>=')
+
+####################################################################
+####################################################################
+####################################################################
+
+- name: Delete networks
+ docker_network:
+ name: "{{ network_name }}"
+ state: absent
+ force: yes
+ loop:
+ - "{{ nname_1 }}"
+ - "{{ nname_2 }}"
+ - "{{ nname_3 }}"
+ loop_control:
+ loop_var: network_name
+ when: docker_py_version is version('1.10.0', '>=')
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/tasks/tests/options.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/tasks/tests/options.yml
new file mode 100644
index 00000000..ae468273
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/tasks/tests/options.yml
@@ -0,0 +1,3816 @@
+---
+- name: Registering container name
+ set_fact:
+ cname: "{{ cname_prefix ~ '-options' }}"
+ cname_h1: "{{ cname_prefix ~ '-options-h1' }}"
+ cname_h2: "{{ cname_prefix ~ '-options-h2' }}"
+ cname_h3: "{{ cname_prefix ~ '-options-h3' }}"
+- name: Registering container name
+ set_fact:
+ cnames: "{{ cnames + [cname, cname_h1, cname_h2, cname_h3] }}"
+
+####################################################################
+## auto_remove #####################################################
+####################################################################
+
+- name: auto_remove
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "echo"'
+ name: "{{ cname }}"
+ state: started
+ auto_remove: yes
+ register: auto_remove_1
+ ignore_errors: yes
+
+- name: Give container 1 second to be sure it terminated
+ pause:
+ seconds: 1
+
+- name: auto_remove (verify)
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ register: auto_remove_2
+ ignore_errors: yes
+
+- assert:
+ that:
+ - auto_remove_1 is changed
+ - auto_remove_2 is not changed
+ when: docker_py_version is version('2.1.0', '>=')
+- assert:
+ that:
+ - auto_remove_1 is failed
+ - "('version is ' ~ docker_py_version ~ ' ') in auto_remove_1.msg"
+ - "'Minimum version required is 2.1.0 ' in auto_remove_1.msg"
+ when: docker_py_version is version('2.1.0', '<')
+
+####################################################################
+## blkio_weight ####################################################
+####################################################################
+
+- name: blkio_weight
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ blkio_weight: 123
+ register: blkio_weight_1
+
+- name: blkio_weight (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ blkio_weight: 123
+ register: blkio_weight_2
+
+- name: blkio_weight (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ blkio_weight: 234
+ force_kill: yes
+ register: blkio_weight_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - blkio_weight_1 is changed
+ - "blkio_weight_2 is not changed or 'Docker warning: Your kernel does not support Block I/O weight or the cgroup is not mounted. Weight discarded.' in blkio_weight_2.warnings"
+ - blkio_weight_3 is changed
+
+####################################################################
+## cap_drop, capabilities ##########################################
+####################################################################
+
+- name: capabilities, cap_drop
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ capabilities:
+ - sys_time
+ cap_drop:
+ - all
+ register: capabilities_1
+
+- name: capabilities, cap_drop (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ capabilities:
+ - sys_time
+ cap_drop:
+ - all
+ register: capabilities_2
+
+- name: capabilities, cap_drop (less)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ capabilities: []
+ cap_drop:
+ - all
+ register: capabilities_3
+
+- name: capabilities, cap_drop (changed)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ capabilities:
+ - setgid
+ cap_drop:
+ - all
+ force_kill: yes
+ register: capabilities_4
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - capabilities_1 is changed
+ - capabilities_2 is not changed
+ - capabilities_3 is not changed
+ - capabilities_4 is changed
+
+####################################################################
+## command #########################################################
+####################################################################
+
+- name: command
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -v -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ register: command_1
+
+- name: command (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -v -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ register: command_2
+
+- name: command (less parameters)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ force_kill: yes
+ register: command_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - command_1 is changed
+ - command_2 is not changed
+ - command_3 is changed
+
+####################################################################
+## cpu_period ######################################################
+####################################################################
+
+- name: cpu_period
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ cpu_period: 90000
+ state: started
+ register: cpu_period_1
+
+- name: cpu_period (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ cpu_period: 90000
+ state: started
+ register: cpu_period_2
+
+- name: cpu_period (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ cpu_period: 50000
+ state: started
+ force_kill: yes
+ register: cpu_period_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - cpu_period_1 is changed
+ - cpu_period_2 is not changed
+ - cpu_period_3 is changed
+
+####################################################################
+## cpu_quota #######################################################
+####################################################################
+
+- name: cpu_quota
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ cpu_quota: 150000
+ state: started
+ register: cpu_quota_1
+
+- name: cpu_quota (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ cpu_quota: 150000
+ state: started
+ register: cpu_quota_2
+
+- name: cpu_quota (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ cpu_quota: 50000
+ state: started
+ force_kill: yes
+ register: cpu_quota_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - cpu_quota_1 is changed
+ - cpu_quota_2 is not changed
+ - cpu_quota_3 is changed
+
+####################################################################
+## cpu_shares ######################################################
+####################################################################
+
+- name: cpu_shares
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ cpu_shares: 900
+ state: started
+ register: cpu_shares_1
+
+- name: cpu_shares (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ cpu_shares: 900
+ state: started
+ register: cpu_shares_2
+
+- name: cpu_shares (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ cpu_shares: 1100
+ state: started
+ force_kill: yes
+ register: cpu_shares_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - cpu_shares_1 is changed
+ - cpu_shares_2 is not changed
+ - cpu_shares_3 is changed
+
+####################################################################
+## cpuset_cpus #####################################################
+####################################################################
+
+- name: cpuset_cpus
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ cpuset_cpus: "0"
+ state: started
+ register: cpuset_cpus_1
+
+- name: cpuset_cpus (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ cpuset_cpus: "0"
+ state: started
+ register: cpuset_cpus_2
+
+- name: cpuset_cpus (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ cpuset_cpus: "1"
+ state: started
+ force_kill: yes
+ # This will fail if the system the test is run on doesn't have
+ # multiple CPUs/cores available.
+ ignore_errors: yes
+ register: cpuset_cpus_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - cpuset_cpus_1 is changed
+ - cpuset_cpus_2 is not changed
+ - cpuset_cpus_3 is failed or cpuset_cpus_3 is changed
+
+####################################################################
+## cpuset_mems #####################################################
+####################################################################
+
+- name: cpuset_mems
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ cpuset_mems: "0"
+ state: started
+ register: cpuset_mems_1
+
+- name: cpuset_mems (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ cpuset_mems: "0"
+ state: started
+ register: cpuset_mems_2
+
+- name: cpuset_mems (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ cpuset_mems: "1"
+ state: started
+ force_kill: yes
+ # This will fail if the system the test is run on doesn't have
+ # multiple MEMs available.
+ ignore_errors: yes
+ register: cpuset_mems_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - cpuset_mems_1 is changed
+ - cpuset_mems_2 is not changed
+ - cpuset_mems_3 is failed or cpuset_mems_3 is changed
+
+####################################################################
+## cpus ############################################################
+####################################################################
+
+- name: cpus
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ cpus: 1
+ state: started
+ ignore_errors: yes
+ register: cpus_1
+
+- name: cpus (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ cpus: 1
+ state: started
+ ignore_errors: yes
+ register: cpus_2
+
+- name: cpus (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ cpus: 1.5
+ state: started
+ force_kill: yes
+ # This will fail if the system the test is run on doesn't have
+ # multiple MEMs available.
+ ignore_errors: yes
+ register: cpus_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - cpus_1 is changed
+ - cpus_2 is not changed and cpus_2 is not failed
+ - cpus_3 is failed or cpus_3 is changed
+ when: docker_py_version is version('2.3.0', '>=')
+- assert:
+ that:
+ - cpus_1 is failed
+ - "('version is ' ~ docker_py_version ~ ' ') in cpus_1.msg"
+ - "'Minimum version required is 2.3.0 ' in cpus_1.msg"
+ when: docker_py_version is version('2.3.0', '<')
+
+####################################################################
+## debug ###########################################################
+####################################################################
+
+- name: debug (create)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: present
+ debug: yes
+ register: debug_1
+
+- name: debug (start)
+ docker_container:
+ name: "{{ cname }}"
+ state: started
+ debug: yes
+ register: debug_2
+
+- name: debug (stop)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ name: "{{ cname }}"
+ state: stopped
+ force_kill: yes
+ debug: yes
+ register: debug_3
+
+- name: debug (absent)
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ debug: yes
+ force_kill: yes
+ register: debug_4
+
+- assert:
+ that:
+ - debug_1 is changed
+ - debug_2 is changed
+ - debug_3 is changed
+ - debug_4 is changed
+
+####################################################################
+## detach, cleanup #################################################
+####################################################################
+
+- name: detach without cleanup
+ docker_container:
+ name: "{{ cname }}"
+ image: "{{ docker_test_image_hello_world }}"
+ detach: no
+ register: detach_no_cleanup
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ register: detach_no_cleanup_cleanup
+ diff: no
+
+- name: detach with cleanup
+ docker_container:
+ name: "{{ cname }}"
+ image: "{{ docker_test_image_hello_world }}"
+ detach: no
+ cleanup: yes
+ register: detach_cleanup
+
+- name: cleanup (unnecessary)
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ register: detach_cleanup_cleanup
+ diff: no
+
+- name: detach with auto_remove and cleanup
+ docker_container:
+ name: "{{ cname }}"
+ image: "{{ docker_test_image_hello_world }}"
+ detach: no
+ auto_remove: yes
+ cleanup: yes
+ register: detach_auto_remove
+ ignore_errors: yes
+
+- name: cleanup (unnecessary)
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ register: detach_auto_remove_cleanup
+ diff: no
+
+- assert:
+ that:
+ # NOTE that 'Output' sometimes fails to contain the correct output
+ # of hello-world. We don't know why this happens, but it happens
+ # often enough to be annoying. That's why we disable this for now,
+ # and simply test that 'Output' is contained in the result.
+ - "'Output' in detach_no_cleanup.container"
+ # - "'Hello from Docker!' in detach_no_cleanup.container.Output"
+ - detach_no_cleanup_cleanup is changed
+ - "'Output' in detach_cleanup.container"
+ # - "'Hello from Docker!' in detach_cleanup.container.Output"
+ - detach_cleanup_cleanup is not changed
+- assert:
+ that:
+ - "'Cannot retrieve result as auto_remove is enabled' == detach_auto_remove.container.Output"
+ - detach_auto_remove_cleanup is not changed
+ when: docker_py_version is version('2.1.0', '>=')
+
+####################################################################
+## devices #########################################################
+####################################################################
+
+- name: devices
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ devices:
+ - "/dev/random:/dev/virt-random:rwm"
+ - "/dev/urandom:/dev/virt-urandom:rwm"
+ register: devices_1
+
+- name: devices (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ devices:
+ - "/dev/urandom:/dev/virt-urandom:rwm"
+ - "/dev/random:/dev/virt-random:rwm"
+ register: devices_2
+
+- name: devices (less)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ devices:
+ - "/dev/random:/dev/virt-random:rwm"
+ register: devices_3
+
+- name: devices (changed)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ devices:
+ - "/dev/random:/dev/virt-random:rwm"
+ - "/dev/null:/dev/virt-null:rwm"
+ force_kill: yes
+ register: devices_4
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - devices_1 is changed
+ - devices_2 is not changed
+ - devices_3 is not changed
+ - devices_4 is changed
+
+####################################################################
+## device_read_bps #################################################
+####################################################################
+
+- name: device_read_bps
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ device_read_bps:
+ - path: /dev/random
+ rate: 20M
+ - path: /dev/urandom
+ rate: 10K
+ register: device_read_bps_1
+ ignore_errors: yes
+
+- name: device_read_bps (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ device_read_bps:
+ - path: /dev/urandom
+ rate: 10K
+ - path: /dev/random
+ rate: 20M
+ register: device_read_bps_2
+ ignore_errors: yes
+
+- name: device_read_bps (lesser entries)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ device_read_bps:
+ - path: /dev/random
+ rate: 20M
+ register: device_read_bps_3
+ ignore_errors: yes
+
+- name: device_read_bps (changed)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ device_read_bps:
+ - path: /dev/random
+ rate: 10M
+ - path: /dev/urandom
+ rate: 5K
+ force_kill: yes
+ register: device_read_bps_4
+ ignore_errors: yes
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - device_read_bps_1 is changed
+ - device_read_bps_2 is not changed
+ - device_read_bps_3 is not changed
+ - device_read_bps_4 is changed
+ when: docker_py_version is version('1.9.0', '>=')
+- assert:
+ that:
+ - device_read_bps_1 is failed
+ - "('version is ' ~ docker_py_version ~ ' ') in device_read_bps_1.msg"
+ - "'Minimum version required is 1.9.0 ' in device_read_bps_1.msg"
+ when: docker_py_version is version('1.9.0', '<')
+
+####################################################################
+## device_read_iops ################################################
+####################################################################
+
+- name: device_read_iops
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ device_read_iops:
+ - path: /dev/random
+ rate: 10
+ - path: /dev/urandom
+ rate: 20
+ register: device_read_iops_1
+ ignore_errors: yes
+
+- name: device_read_iops (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ device_read_iops:
+ - path: /dev/urandom
+ rate: "20"
+ - path: /dev/random
+ rate: 10
+ register: device_read_iops_2
+ ignore_errors: yes
+
+- name: device_read_iops (less)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ device_read_iops:
+ - path: /dev/random
+ rate: 10
+ register: device_read_iops_3
+ ignore_errors: yes
+
+- name: device_read_iops (changed)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ device_read_iops:
+ - path: /dev/random
+ rate: 30
+ - path: /dev/urandom
+ rate: 50
+ force_kill: yes
+ register: device_read_iops_4
+ ignore_errors: yes
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - device_read_iops_1 is changed
+ - device_read_iops_2 is not changed
+ - device_read_iops_3 is not changed
+ - device_read_iops_4 is changed
+ when: docker_py_version is version('1.9.0', '>=')
+- assert:
+ that:
+ - device_read_iops_1 is failed
+ - "('version is ' ~ docker_py_version ~ ' ') in device_read_iops_1.msg"
+ - "'Minimum version required is 1.9.0 ' in device_read_iops_1.msg"
+ when: docker_py_version is version('1.9.0', '<')
+
+####################################################################
+## device_write_bps and device_write_iops ##########################
+####################################################################
+
+- name: device_write_bps and device_write_iops
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ device_write_bps:
+ - path: /dev/random
+ rate: 10M
+ device_write_iops:
+ - path: /dev/urandom
+ rate: 30
+ register: device_write_limit_1
+ ignore_errors: yes
+
+- name: device_write_bps and device_write_iops (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ device_write_bps:
+ - path: /dev/random
+ rate: 10M
+ device_write_iops:
+ - path: /dev/urandom
+ rate: 30
+ register: device_write_limit_2
+ ignore_errors: yes
+
+- name: device_write_bps device_write_iops (changed)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ device_write_bps:
+ - path: /dev/random
+ rate: 20K
+ device_write_iops:
+ - path: /dev/urandom
+ rate: 100
+ force_kill: yes
+ register: device_write_limit_3
+ ignore_errors: yes
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - device_write_limit_1 is changed
+ - device_write_limit_2 is not changed
+ - device_write_limit_3 is changed
+ when: docker_py_version is version('1.9.0', '>=')
+- assert:
+ that:
+ - device_write_limit_1 is failed
+ - "('version is ' ~ docker_py_version ~ ' ') in device_write_limit_1.msg"
+ - "'Minimum version required is 1.9.0 ' in device_write_limit_1.msg"
+ when: docker_py_version is version('1.9.0', '<')
+
+####################################################################
+## device_requests #################################################
+####################################################################
+
+- name: device_requests
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ device_requests: []
+ register: device_requests_1
+ ignore_errors: yes
+
+- name: device_requests (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ device_requests: []
+ register: device_requests_2
+ ignore_errors: yes
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - device_requests_1 is changed
+ - device_requests_2 is not changed
+ when: docker_py_version is version('4.3.0', '>=') and docker_api_version is version('1.40', '>=')
+- assert:
+ that:
+ - device_requests_1 is failed
+ - |
+ (('version is ' ~ docker_py_version ~ ' ') in device_requests_1.msg and 'Minimum version required is 4.3.0 ' in device_requests_1.msg) or
+ (('API version is ' ~ docker_api_version ~ '.') in device_requests_1.msg and 'Minimum version required is 1.40 ' in device_requests_1.msg)
+ when: docker_py_version is version('4.3.0', '<') or docker_api_version is version('1.40', '<')
+
+####################################################################
+## dns_opts ########################################################
+####################################################################
+
+- name: dns_opts
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ dns_opts:
+ - "timeout:10"
+ - rotate
+ register: dns_opts_1
+ ignore_errors: yes
+
+- name: dns_opts (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ dns_opts:
+ - rotate
+ - "timeout:10"
+ register: dns_opts_2
+ ignore_errors: yes
+
+- name: dns_opts (less resolv.conf options)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ dns_opts:
+ - "timeout:10"
+ register: dns_opts_3
+ ignore_errors: yes
+
+- name: dns_opts (more resolv.conf options)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ dns_opts:
+ - "timeout:10"
+ - no-check-names
+ force_kill: yes
+ register: dns_opts_4
+ ignore_errors: yes
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - dns_opts_1 is changed
+ - dns_opts_2 is not changed
+ - dns_opts_3 is not changed
+ - dns_opts_4 is changed
+ when: docker_py_version is version('1.10.0', '>=')
+- assert:
+ that:
+ - dns_opts_1 is failed
+ - "('version is ' ~ docker_py_version ~ ' ') in dns_opts_1.msg"
+ - "'Minimum version required is 1.10.0 ' in dns_opts_1.msg"
+ when: docker_py_version is version('1.10.0', '<')
+
+####################################################################
+## dns_search_domains ##############################################
+####################################################################
+
+- name: dns_search_domains
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ dns_search_domains:
+ - example.com
+ - example.org
+ register: dns_search_domains_1
+
+- name: dns_search_domains (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ dns_search_domains:
+ - example.com
+ - example.org
+ register: dns_search_domains_2
+
+- name: dns_search_domains (different order)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ dns_search_domains:
+ - example.org
+ - example.com
+ force_kill: yes
+ register: dns_search_domains_3
+
+- name: dns_search_domains (changed elements)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ dns_search_domains:
+ - ansible.com
+ - example.com
+ force_kill: yes
+ register: dns_search_domains_4
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - dns_search_domains_1 is changed
+ - dns_search_domains_2 is not changed
+ - dns_search_domains_3 is changed
+ - dns_search_domains_4 is changed
+
+####################################################################
+## dns_servers #####################################################
+####################################################################
+
+- name: dns_servers
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ dns_servers:
+ - 1.1.1.1
+ - 8.8.8.8
+ register: dns_servers_1
+
+- name: dns_servers (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ dns_servers:
+ - 1.1.1.1
+ - 8.8.8.8
+ register: dns_servers_2
+
+- name: dns_servers (changed order)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ dns_servers:
+ - 8.8.8.8
+ - 1.1.1.1
+ force_kill: yes
+ register: dns_servers_3
+
+- name: dns_servers (changed elements)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ dns_servers:
+ - 8.8.8.8
+ - 9.9.9.9
+ force_kill: yes
+ register: dns_servers_4
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - dns_servers_1 is changed
+ - dns_servers_2 is not changed
+ - dns_servers_3 is changed
+ - dns_servers_4 is changed
+
+####################################################################
+## domainname ######################################################
+####################################################################
+
+- name: domainname
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ domainname: example.com
+ state: started
+ register: domainname_1
+
+- name: domainname (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ domainname: example.com
+ state: started
+ register: domainname_2
+
+- name: domainname (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ domainname: example.org
+ state: started
+ force_kill: yes
+ register: domainname_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - domainname_1 is changed
+ - domainname_2 is not changed
+ - domainname_3 is changed
+
+####################################################################
+## entrypoint ######################################################
+####################################################################
+
+- name: entrypoint
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ entrypoint:
+ - /bin/sh
+ - "-v"
+ - "-c"
+ - "'sleep 10m'"
+ name: "{{ cname }}"
+ state: started
+ register: entrypoint_1
+
+- name: entrypoint (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ entrypoint:
+ - /bin/sh
+ - "-v"
+ - "-c"
+ - "'sleep 10m'"
+ name: "{{ cname }}"
+ state: started
+ register: entrypoint_2
+
+- name: entrypoint (change order, should not be idempotent)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ entrypoint:
+ - /bin/sh
+ - "-c"
+ - "'sleep 10m'"
+ - "-v"
+ name: "{{ cname }}"
+ state: started
+ force_kill: yes
+ register: entrypoint_3
+
+- name: entrypoint (less parameters)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ entrypoint:
+ - /bin/sh
+ - "-c"
+ - "'sleep 10m'"
+ name: "{{ cname }}"
+ state: started
+ force_kill: yes
+ register: entrypoint_4
+
+- name: entrypoint (other parameters)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ entrypoint:
+ - /bin/sh
+ - "-c"
+ - "'sleep 5m'"
+ name: "{{ cname }}"
+ state: started
+ force_kill: yes
+ register: entrypoint_5
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - entrypoint_1 is changed
+ - entrypoint_2 is not changed
+ - entrypoint_3 is changed
+ - entrypoint_4 is changed
+ - entrypoint_5 is changed
+
+####################################################################
+## env #############################################################
+####################################################################
+
+- name: env
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ env:
+ TEST1: val1
+ TEST2: val2
+ TEST3: "False"
+ TEST4: "true"
+ TEST5: "yes"
+ register: env_1
+
+- name: env (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ env:
+ TEST2: val2
+ TEST1: val1
+ TEST5: "yes"
+ TEST3: "False"
+ TEST4: "true"
+ register: env_2
+
+- name: env (less environment variables)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ env:
+ TEST1: val1
+ register: env_3
+
+- name: env (more environment variables)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ env:
+ TEST1: val1
+ TEST3: val3
+ force_kill: yes
+ register: env_4
+
+- name: env (fail unwrapped values)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ env:
+ TEST1: true
+ force_kill: yes
+ register: env_5
+ ignore_errors: yes
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - env_1 is changed
+ - env_2 is not changed
+ - env_3 is not changed
+ - env_4 is changed
+ - env_5 is failed
+ - "('Non-string value found for env option.') in env_5.msg"
+
+####################################################################
+## env_file #########################################################
+####################################################################
+
+- name: env_file
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ env_file: "{{ role_path }}/files/env-file"
+ register: env_file_1
+
+- name: env_file (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ env_file: "{{ role_path }}/files/env-file"
+ register: env_file_2
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - env_file_1 is changed
+ - env_file_2 is not changed
+
+####################################################################
+## etc_hosts #######################################################
+####################################################################
+
+- name: etc_hosts
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ etc_hosts:
+ example.com: 1.2.3.4
+ example.org: 4.3.2.1
+ register: etc_hosts_1
+
+- name: etc_hosts (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ etc_hosts:
+ example.org: 4.3.2.1
+ example.com: 1.2.3.4
+ register: etc_hosts_2
+
+- name: etc_hosts (less hosts)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ etc_hosts:
+ example.com: 1.2.3.4
+ register: etc_hosts_3
+
+- name: etc_hosts (more hosts)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ etc_hosts:
+ example.com: 1.2.3.4
+ example.us: 1.2.3.5
+ force_kill: yes
+ register: etc_hosts_4
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - etc_hosts_1 is changed
+ - etc_hosts_2 is not changed
+ - etc_hosts_3 is not changed
+ - etc_hosts_4 is changed
+
+####################################################################
+## exposed_ports ###################################################
+####################################################################
+
+- name: exposed_ports
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ exposed_ports:
+ - "9001"
+ - "9002"
+ register: exposed_ports_1
+
+- name: exposed_ports (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ exposed_ports:
+ - "9002"
+ - "9001"
+ register: exposed_ports_2
+
+- name: exposed_ports (less ports)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ exposed_ports:
+ - "9002"
+ register: exposed_ports_3
+
+- name: exposed_ports (more ports)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ exposed_ports:
+ - "9002"
+ - "9003"
+ force_kill: yes
+ register: exposed_ports_4
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - exposed_ports_1 is changed
+ - exposed_ports_2 is not changed
+ - exposed_ports_3 is not changed
+ - exposed_ports_4 is changed
+
+####################################################################
+## force_kill ######################################################
+####################################################################
+
+# TODO: - force_kill
+
+####################################################################
+## groups ##########################################################
+####################################################################
+
+- name: groups
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ groups:
+ - "1234"
+ - "5678"
+ register: groups_1
+
+- name: groups (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ groups:
+ - "5678"
+ - "1234"
+ register: groups_2
+
+- name: groups (less groups)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ groups:
+ - "1234"
+ register: groups_3
+
+- name: groups (more groups)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ groups:
+ - "1234"
+ - "2345"
+ force_kill: yes
+ register: groups_4
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - groups_1 is changed
+ - groups_2 is not changed
+ - groups_3 is not changed
+ - groups_4 is changed
+
+####################################################################
+## healthcheck #####################################################
+####################################################################
+
+- name: healthcheck
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ healthcheck:
+ test:
+ - CMD
+ - sleep
+ - 1
+ timeout: 2s
+ interval: 0h0m2s3ms4us
+ retries: 2
+ force_kill: yes
+ register: healthcheck_1
+ ignore_errors: yes
+
+- name: healthcheck (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ healthcheck:
+ test:
+ - CMD
+ - sleep
+ - 1
+ timeout: 2s
+ interval: 0h0m2s3ms4us
+ retries: 2
+ force_kill: yes
+ register: healthcheck_2
+ ignore_errors: yes
+
+- name: healthcheck (changed)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ healthcheck:
+ test:
+ - CMD
+ - sleep
+ - 1
+ timeout: 3s
+ interval: 0h1m2s3ms4us
+ retries: 3
+ force_kill: yes
+ register: healthcheck_3
+ ignore_errors: yes
+
+- name: healthcheck (no change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ force_kill: yes
+ register: healthcheck_4
+ ignore_errors: yes
+
+- name: healthcheck (disabled)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ healthcheck:
+ test:
+ - NONE
+ force_kill: yes
+ register: healthcheck_5
+ ignore_errors: yes
+
+- name: healthcheck (disabled, idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ healthcheck:
+ test:
+ - NONE
+ force_kill: yes
+ register: healthcheck_6
+ ignore_errors: yes
+
+- name: healthcheck (string in healthcheck test, changed)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ healthcheck:
+ test: "sleep 1"
+ force_kill: yes
+ register: healthcheck_7
+ ignore_errors: yes
+
+- name: healthcheck (string in healthcheck test, idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ healthcheck:
+ test: "sleep 1"
+ force_kill: yes
+ register: healthcheck_8
+ ignore_errors: yes
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - healthcheck_1 is changed
+ - healthcheck_2 is not changed
+ - healthcheck_3 is changed
+ - healthcheck_4 is not changed
+ - healthcheck_5 is changed
+ - healthcheck_6 is not changed
+ - healthcheck_7 is changed
+ - healthcheck_8 is not changed
+ when: docker_py_version is version('2.0.0', '>=')
+- assert:
+ that:
+ - healthcheck_1 is failed
+ - "('version is ' ~ docker_py_version ~ ' ') in healthcheck_1.msg"
+ - "'Minimum version required is 2.0.0 ' in healthcheck_1.msg"
+ when: docker_py_version is version('2.0.0', '<')
+
+####################################################################
+## hostname ########################################################
+####################################################################
+
+- name: hostname
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ hostname: me.example.com
+ state: started
+ register: hostname_1
+
+- name: hostname (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ hostname: me.example.com
+ state: started
+ register: hostname_2
+
+- name: hostname (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ hostname: me.example.org
+ state: started
+ force_kill: yes
+ register: hostname_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - hostname_1 is changed
+ - hostname_2 is not changed
+ - hostname_3 is changed
+
+####################################################################
+## init ############################################################
+####################################################################
+
+- name: init
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ init: yes
+ state: started
+ register: init_1
+ ignore_errors: yes
+
+- name: init (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ init: yes
+ state: started
+ register: init_2
+ ignore_errors: yes
+
+- name: init (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ init: no
+ state: started
+ force_kill: yes
+ register: init_3
+ ignore_errors: yes
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - init_1 is changed
+ - init_2 is not changed
+ - init_3 is changed
+ when: docker_py_version is version('2.2.0', '>=')
+- assert:
+ that:
+ - init_1 is failed
+ - "('version is ' ~ docker_py_version ~ ' ') in init_1.msg"
+ - "'Minimum version required is 2.2.0 ' in init_1.msg"
+ when: docker_py_version is version('2.2.0', '<')
+
+####################################################################
+## interactive #####################################################
+####################################################################
+
+- name: interactive
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ interactive: yes
+ state: started
+ register: interactive_1
+
+- name: interactive (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ interactive: yes
+ state: started
+ register: interactive_2
+
+- name: interactive (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ interactive: no
+ state: started
+ force_kill: yes
+ register: interactive_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - interactive_1 is changed
+ - interactive_2 is not changed
+ - interactive_3 is changed
+
+####################################################################
+## image / ignore_image ############################################
+####################################################################
+
+- name: Pull {{ docker_test_image_hello_world }} image to make sure ignore_image test succeeds
+ # If the image isn't there, it will pull it and return 'changed'.
+ docker_image:
+ name: "{{ docker_test_image_hello_world }}"
+ source: pull
+
+- name: image
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ register: image_1
+
+- name: image (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ register: image_2
+
+- name: ignore_image
+ docker_container:
+ image: "{{ docker_test_image_hello_world }}"
+ ignore_image: yes
+ name: "{{ cname }}"
+ state: started
+ register: ignore_image
+
+- name: image change
+ docker_container:
+ image: "{{ docker_test_image_hello_world }}"
+ name: "{{ cname }}"
+ state: started
+ force_kill: yes
+ register: image_change
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - image_1 is changed
+ - image_2 is not changed
+ - ignore_image is not changed
+ - image_change is changed
+
+####################################################################
+## ipc_mode ########################################################
+####################################################################
+
+- name: start helpers
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ container_name }}"
+ state: started
+ ipc_mode: shareable
+ loop:
+ - "{{ cname_h1 }}"
+ loop_control:
+ loop_var: container_name
+
+- name: ipc_mode
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ ipc_mode: "container:{{ cname_h1 }}"
+ # ipc_mode: shareable
+ register: ipc_mode_1
+
+- name: ipc_mode (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ ipc_mode: "container:{{ cname_h1 }}"
+ # ipc_mode: shareable
+ register: ipc_mode_2
+
+- name: ipc_mode (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ ipc_mode: private
+ force_kill: yes
+ register: ipc_mode_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ container_name }}"
+ state: absent
+ force_kill: yes
+ loop:
+ - "{{ cname }}"
+ - "{{ cname_h1 }}"
+ loop_control:
+ loop_var: container_name
+ diff: no
+
+- assert:
+ that:
+ - ipc_mode_1 is changed
+ - ipc_mode_2 is not changed
+ - ipc_mode_3 is changed
+
+####################################################################
+## kernel_memory ###################################################
+####################################################################
+
+- name: kernel_memory
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ kernel_memory: 8M
+ state: started
+ register: kernel_memory_1
+ ignore_errors: yes
+
+- name: kernel_memory (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ kernel_memory: 8M
+ state: started
+ register: kernel_memory_2
+ ignore_errors: yes
+
+- name: kernel_memory (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ kernel_memory: 6M
+ state: started
+ force_kill: yes
+ register: kernel_memory_3
+ ignore_errors: yes
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+ ignore_errors: yes
+
+- assert:
+ that:
+ - kernel_memory_1 is changed
+ - kernel_memory_2 is not changed
+ - kernel_memory_3 is changed
+ when: kernel_memory_1 is not failed or 'kernel memory accounting disabled in this runc build' not in kernel_memory_1.msg
+
+####################################################################
+## kill_signal #####################################################
+####################################################################
+
+# TODO: - kill_signal
+
+####################################################################
+## labels ##########################################################
+####################################################################
+
+- name: labels
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ labels:
+ ansible.test.1: hello
+ ansible.test.2: world
+ register: labels_1
+
+- name: labels (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ labels:
+ ansible.test.2: world
+ ansible.test.1: hello
+ register: labels_2
+
+- name: labels (less labels)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ labels:
+ ansible.test.1: hello
+ register: labels_3
+
+- name: labels (more labels)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ labels:
+ ansible.test.1: hello
+ ansible.test.3: ansible
+ force_kill: yes
+ register: labels_4
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - labels_1 is changed
+ - labels_2 is not changed
+ - labels_3 is not changed
+ - labels_4 is changed
+
+####################################################################
+## links ###########################################################
+####################################################################
+
+- name: start helpers
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ container_name }}"
+ state: started
+ loop:
+ - "{{ cname_h1 }}"
+ - "{{ cname_h2 }}"
+ - "{{ cname_h3 }}"
+ loop_control:
+ loop_var: container_name
+
+- name: links
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ links:
+ - "{{ cname_h1 }}:test1"
+ - "{{ cname_h2 }}:test2"
+ register: links_1
+
+- name: links (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ links:
+ - "{{ cname_h2 }}:test2"
+ - "{{ cname_h1 }}:test1"
+ register: links_2
+
+- name: links (less links)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ links:
+ - "{{ cname_h1 }}:test1"
+ register: links_3
+
+- name: links (more links)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ links:
+ - "{{ cname_h1 }}:test1"
+ - "{{ cname_h3 }}:test3"
+ force_kill: yes
+ register: links_4
+
+- name: cleanup
+ docker_container:
+ name: "{{ container_name }}"
+ state: absent
+ force_kill: yes
+ loop:
+ - "{{ cname }}"
+ - "{{ cname_h1 }}"
+ - "{{ cname_h2 }}"
+ - "{{ cname_h3 }}"
+ loop_control:
+ loop_var: container_name
+ diff: no
+
+- assert:
+ that:
+ - links_1 is changed
+ - links_2 is not changed
+ - links_3 is not changed
+ - links_4 is changed
+
+####################################################################
+## log_driver ######################################################
+####################################################################
+
+- name: log_driver
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ log_driver: json-file
+ register: log_driver_1
+
+- name: log_driver (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ log_driver: json-file
+ register: log_driver_2
+
+- name: log_driver (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ log_driver: syslog
+ force_kill: yes
+ register: log_driver_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - log_driver_1 is changed
+ - log_driver_2 is not changed
+ - log_driver_3 is changed
+
+####################################################################
+## log_options #####################################################
+####################################################################
+
+- name: log_options
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ log_driver: json-file
+ log_options:
+ labels: production_status
+ env: os,customer
+ max-file: 5
+ register: log_options_1
+
+- name: log_options (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ log_driver: json-file
+ log_options:
+ env: os,customer
+ labels: production_status
+ max-file: 5
+ register: log_options_2
+
+- name: log_options (less log options)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ log_driver: json-file
+ log_options:
+ labels: production_status
+ register: log_options_3
+
+- name: log_options (more log options)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ log_driver: json-file
+ log_options:
+ labels: production_status
+ max-size: 10m
+ force_kill: yes
+ register: log_options_4
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - log_options_1 is changed
+ - log_options_2 is not changed
+ - "'Non-string value found for log_options option \\'max-file\\'. The value is automatically converted to \\'5\\'. If this is not correct, or you want to
+avoid such warnings, please quote the value.' in log_options_2.warnings"
+ - log_options_3 is not changed
+ - log_options_4 is changed
+
+####################################################################
+## mac_address #####################################################
+####################################################################
+
+- name: mac_address
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ mac_address: 92:d0:c6:0a:29:33
+ state: started
+ register: mac_address_1
+
+- name: mac_address (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ mac_address: 92:d0:c6:0a:29:33
+ state: started
+ register: mac_address_2
+
+- name: mac_address (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ mac_address: 92:d0:c6:0a:29:44
+ state: started
+ force_kill: yes
+ register: mac_address_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - mac_address_1 is changed
+ - mac_address_2 is not changed
+ - mac_address_3 is changed
+
+####################################################################
+## memory ##########################################################
+####################################################################
+
+- name: memory
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ memory: 64M
+ state: started
+ register: memory_1
+
+- name: memory (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ memory: 64M
+ state: started
+ register: memory_2
+
+- name: memory (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ memory: 48M
+ state: started
+ force_kill: yes
+ register: memory_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - memory_1 is changed
+ - memory_2 is not changed
+ - memory_3 is changed
+
+####################################################################
+## memory_reservation ##############################################
+####################################################################
+
+- name: memory_reservation
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ memory_reservation: 64M
+ state: started
+ register: memory_reservation_1
+
+- name: memory_reservation (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ memory_reservation: 64M
+ state: started
+ register: memory_reservation_2
+
+- name: memory_reservation (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ memory_reservation: 48M
+ state: started
+ force_kill: yes
+ register: memory_reservation_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - memory_reservation_1 is changed
+ - memory_reservation_2 is not changed
+ - memory_reservation_3 is changed
+
+####################################################################
+## memory_swap #####################################################
+####################################################################
+
+- name: memory_swap
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ # Docker daemon does not accept memory_swap if memory is not specified
+ memory: 32M
+ memory_swap: 64M
+ state: started
+ debug: yes
+ register: memory_swap_1
+
+- name: memory_swap (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ # Docker daemon does not accept memory_swap if memory is not specified
+ memory: 32M
+ memory_swap: 64M
+ state: started
+ debug: yes
+ register: memory_swap_2
+
+- name: memory_swap (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ # Docker daemon does not accept memory_swap if memory is not specified
+ memory: 32M
+ memory_swap: 48M
+ state: started
+ force_kill: yes
+ debug: yes
+ register: memory_swap_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - memory_swap_1 is changed
+ # Sometimes (in particular during integration tests, maybe when not running
+ # on a proper VM), memory_swap cannot be set and will be -1 afterwards.
+ - memory_swap_2 is not changed or memory_swap_2.container.HostConfig.MemorySwap == -1
+ - memory_swap_3 is changed
+
+- debug: var=memory_swap_1
+ when: memory_swap_2 is changed
+- debug: var=memory_swap_2
+ when: memory_swap_2 is changed
+- debug: var=memory_swap_3
+ when: memory_swap_2 is changed
+
+####################################################################
+## memory_swappiness ###############################################
+####################################################################
+
+- name: memory_swappiness
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ memory_swappiness: 40
+ state: started
+ register: memory_swappiness_1
+
+- name: memory_swappiness (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ memory_swappiness: 40
+ state: started
+ register: memory_swappiness_2
+
+- name: memory_swappiness (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ memory_swappiness: 60
+ state: started
+ force_kill: yes
+ register: memory_swappiness_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - memory_swappiness_1 is changed
+ - memory_swappiness_2 is not changed
+ - memory_swappiness_3 is changed
+
+####################################################################
+## oom_killer ######################################################
+####################################################################
+
+- name: oom_killer
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ oom_killer: yes
+ state: started
+ register: oom_killer_1
+
+- name: oom_killer (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ oom_killer: yes
+ state: started
+ register: oom_killer_2
+
+- name: oom_killer (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ oom_killer: no
+ state: started
+ force_kill: yes
+ register: oom_killer_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - oom_killer_1 is changed
+ - oom_killer_2 is not changed
+ - oom_killer_3 is changed
+
+####################################################################
+## oom_score_adj ###################################################
+####################################################################
+
+- name: oom_score_adj
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ oom_score_adj: 5
+ state: started
+ register: oom_score_adj_1
+
+- name: oom_score_adj (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ oom_score_adj: 5
+ state: started
+ register: oom_score_adj_2
+
+- name: oom_score_adj (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ oom_score_adj: 7
+ state: started
+ force_kill: yes
+ register: oom_score_adj_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - oom_score_adj_1 is changed
+ - oom_score_adj_2 is not changed
+ - oom_score_adj_3 is changed
+
+####################################################################
+## output_logs #####################################################
+####################################################################
+
+# TODO: - output_logs
+
+####################################################################
+## paused ##########################################################
+####################################################################
+
+- name: paused
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: "/bin/sh -c 'sleep 10m'"
+ name: "{{ cname }}"
+ state: started
+ paused: yes
+ force_kill: yes
+ register: paused_1
+
+- name: inspect paused
+ command: "docker inspect -f {% raw %}'{{.State.Status}} {{.State.Paused}}'{% endraw %} {{ cname }}"
+ register: paused_2
+
+- name: paused (idempotent)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: "/bin/sh -c 'sleep 10m'"
+ name: "{{ cname }}"
+ state: started
+ paused: yes
+ force_kill: yes
+ register: paused_3
+
+- name: paused (continue)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: "/bin/sh -c 'sleep 10m'"
+ name: "{{ cname }}"
+ state: started
+ paused: no
+ force_kill: yes
+ register: paused_4
+
+- name: inspect paused
+ command: "docker inspect -f {% raw %}'{{.State.Status}} {{.State.Paused}}'{% endraw %} {{ cname }}"
+ register: paused_5
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - paused_1 is changed
+ - 'paused_2.stdout == "paused true"'
+ - paused_3 is not changed
+ - paused_4 is changed
+ - 'paused_5.stdout == "running false"'
+
+####################################################################
+## pid_mode ########################################################
+####################################################################
+
+- name: start helpers
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname_h1 }}"
+ state: started
+ register: pid_mode_helper
+
+- name: pid_mode
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ pid_mode: "container:{{ pid_mode_helper.container.Id }}"
+ register: pid_mode_1
+ ignore_errors: yes
+ # docker-py < 2.0 does not support "arbitrary" pid_mode values
+
+- name: pid_mode (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ pid_mode: "container:{{ cname_h1 }}"
+ register: pid_mode_2
+ ignore_errors: yes
+ # docker-py < 2.0 does not support "arbitrary" pid_mode values
+
+- name: pid_mode (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ pid_mode: host
+ force_kill: yes
+ register: pid_mode_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ container_name }}"
+ state: absent
+ force_kill: yes
+ loop:
+ - "{{ cname }}"
+ - "{{ cname_h1 }}"
+ loop_control:
+ loop_var: container_name
+ diff: no
+
+- assert:
+ that:
+ - pid_mode_1 is changed
+ - pid_mode_2 is not changed
+ - pid_mode_3 is changed
+ when: docker_py_version is version('2.0.0', '>=')
+- assert:
+ that:
+ - pid_mode_1 is failed
+ - pid_mode_2 is failed
+ - pid_mode_3 is changed
+ when: docker_py_version is version('2.0.0', '<')
+
+####################################################################
+## pids_limit ######################################################
+####################################################################
+
+- name: pids_limit
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ pids_limit: 10
+ register: pids_limit_1
+ ignore_errors: yes
+
+- name: pids_limit (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ pids_limit: 10
+ register: pids_limit_2
+ ignore_errors: yes
+
+- name: pids_limit (changed)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ pids_limit: 20
+ force_kill: yes
+ register: pids_limit_3
+ ignore_errors: yes
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - pids_limit_1 is changed
+ - pids_limit_2 is not changed
+ - pids_limit_3 is changed
+ when: docker_py_version is version('1.10.0', '>=')
+- assert:
+ that:
+ - pids_limit_1 is failed
+ - "('version is ' ~ docker_py_version ~ ' ') in pids_limit_1.msg"
+ - "'Minimum version required is 1.10.0 ' in pids_limit_1.msg"
+ when: docker_py_version is version('1.10.0', '<')
+
+####################################################################
+## privileged ######################################################
+####################################################################
+
+- name: privileged
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ privileged: yes
+ state: started
+ register: privileged_1
+
+- name: privileged (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ privileged: yes
+ state: started
+ register: privileged_2
+
+- name: privileged (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ privileged: no
+ state: started
+ force_kill: yes
+ register: privileged_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - privileged_1 is changed
+ - privileged_2 is not changed
+ - privileged_3 is changed
+
+####################################################################
+## published_ports #################################################
+####################################################################
+
+- name: published_ports
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ published_ports:
+ - '9001'
+ - '9002'
+ register: published_ports_1
+
+- name: published_ports (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ published_ports:
+ - '9002'
+ - '9001'
+ register: published_ports_2
+
+- name: published_ports (less published_ports)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ published_ports:
+ - '9002'
+ register: published_ports_3
+
+- name: published_ports (more published_ports)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ published_ports:
+ - '9002'
+ - '9003'
+ force_kill: yes
+ register: published_ports_4
+
+- name: published_ports (ports with IP addresses)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ published_ports:
+ - '127.0.0.1:9002:9002/tcp'
+ - '[::1]:9003:9003/tcp'
+ - '[fe80::1%test]:90:90/tcp'
+ force_kill: yes
+ register: published_ports_5
+
+- name: published_ports (ports with IP addresses, idempotent)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ published_ports:
+ - '127.0.0.1:9002:9002/tcp'
+ - '[::1]:9003:9003/tcp'
+ - '[fe80::1%test]:90:90/tcp'
+ register: published_ports_6
+
+- name: published_ports (no published ports)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ published_ports: []
+ comparisons:
+ published_ports: strict
+ force_kill: yes
+ register: published_ports_7
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - published_ports_1 is changed
+ - published_ports_2 is not changed
+ - published_ports_3 is not changed
+ - published_ports_4 is changed
+ - published_ports_5 is changed
+ - published_ports_6 is not changed
+ - published_ports_7 is changed
+
+####################################################################
+## pull ############################################################
+####################################################################
+
+# TODO: - pull
+
+####################################################################
+## read_only #######################################################
+####################################################################
+
+- name: read_only
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ read_only: yes
+ state: started
+ register: read_only_1
+
+- name: read_only (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ read_only: yes
+ state: started
+ register: read_only_2
+
+- name: read_only (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ read_only: no
+ state: started
+ force_kill: yes
+ register: read_only_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - read_only_1 is changed
+ - read_only_2 is not changed
+ - read_only_3 is changed
+
+####################################################################
+## restart_policy ##################################################
+####################################################################
+
+- name: restart_policy
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ restart_policy: always
+ state: started
+ register: restart_policy_1
+
+- name: restart_policy (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ restart_policy: always
+ state: started
+ register: restart_policy_2
+
+- name: restart_policy (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ restart_policy: unless-stopped
+ state: started
+ force_kill: yes
+ register: restart_policy_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - restart_policy_1 is changed
+ - restart_policy_2 is not changed
+ - restart_policy_3 is changed
+
+####################################################################
+## restart_retries #################################################
+####################################################################
+
+- name: restart_retries
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ restart_policy: on-failure
+ restart_retries: 5
+ state: started
+ register: restart_retries_1
+
+- name: restart_retries (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ restart_policy: on-failure
+ restart_retries: 5
+ state: started
+ register: restart_retries_2
+
+- name: restart_retries (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ restart_policy: on-failure
+ restart_retries: 2
+ state: started
+ force_kill: yes
+ register: restart_retries_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - restart_retries_1 is changed
+ - restart_retries_2 is not changed
+ - restart_retries_3 is changed
+
+####################################################################
+## runtime #########################################################
+####################################################################
+
+- name: runtime
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ runtime: runc
+ state: started
+ register: runtime_1
+ ignore_errors: yes
+
+- name: runtime (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ runtime: runc
+ state: started
+ register: runtime_2
+ ignore_errors: yes
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - runtime_1 is changed
+ - runtime_2 is not changed
+ when: docker_py_version is version('2.4.0', '>=')
+- assert:
+ that:
+ - runtime_1 is failed
+ - "('version is ' ~ docker_py_version ~ ' ') in runtime_1.msg"
+ - "'Minimum version required is 2.4.0 ' in runtime_1.msg"
+ when: docker_py_version is version('2.4.0', '<')
+
+####################################################################
+## security_opts ###################################################
+####################################################################
+
+# In case some of the options stop working, here are some more
+# options which *currently* work with all integration test targets:
+# no-new-privileges
+# label:disable
+# label=disable
+# label:level:s0:c100,c200
+# label=level:s0:c100,c200
+# label:type:svirt_apache_t
+# label=type:svirt_apache_t
+# label:user:root
+# label=user:root
+# seccomp:unconfined
+# seccomp=unconfined
+# apparmor:docker-default
+# apparmor=docker-default
+
+- name: security_opts
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ security_opts:
+ - "label:level:s0:c100,c200"
+ - "no-new-privileges"
+ register: security_opts_1
+
+- name: security_opts (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ security_opts:
+ - "no-new-privileges"
+ - "label:level:s0:c100,c200"
+ register: security_opts_2
+
+- name: security_opts (less security options)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ security_opts:
+ - "no-new-privileges"
+ register: security_opts_3
+
+- name: security_opts (more security options)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ security_opts:
+ - "label:disable"
+ - "no-new-privileges"
+ force_kill: yes
+ register: security_opts_4
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - security_opts_1 is changed
+ - security_opts_2 is not changed
+ - security_opts_3 is not changed
+ - security_opts_4 is changed
+
+####################################################################
+## shm_size ########################################################
+####################################################################
+
+- name: shm_size
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ shm_size: 96M
+ state: started
+ register: shm_size_1
+
+- name: shm_size (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ shm_size: 96M
+ state: started
+ register: shm_size_2
+
+- name: shm_size (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ shm_size: 75M
+ state: started
+ force_kill: yes
+ register: shm_size_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - shm_size_1 is changed
+ - shm_size_2 is not changed
+ - shm_size_3 is changed
+
+####################################################################
+## stop_signal #####################################################
+####################################################################
+
+- name: stop_signal
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ stop_signal: "30"
+ state: started
+ register: stop_signal_1
+
+- name: stop_signal (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ stop_signal: "30"
+ state: started
+ register: stop_signal_2
+
+- name: stop_signal (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ stop_signal: "9"
+ state: started
+ force_kill: yes
+ register: stop_signal_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - stop_signal_1 is changed
+ - stop_signal_2 is not changed
+ - stop_signal_3 is changed
+
+####################################################################
+## stop_timeout ####################################################
+####################################################################
+
+- name: stop_timeout
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ stop_timeout: 2
+ state: started
+ register: stop_timeout_1
+
+- name: stop_timeout (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ stop_timeout: 2
+ state: started
+ register: stop_timeout_2
+
+- name: stop_timeout (no change)
+ # stop_timeout changes are ignored by default
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ stop_timeout: 1
+ state: started
+ register: stop_timeout_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - stop_timeout_1 is changed
+ - stop_timeout_2 is not changed
+ - stop_timeout_3 is not changed
+
+####################################################################
+## sysctls #########################################################
+####################################################################
+
+# In case some of the options stop working, here are some more
+# options which *currently* work with all integration test targets:
+# net.ipv4.conf.default.log_martians: 1
+# net.ipv4.conf.default.secure_redirects: 0
+# net.ipv4.conf.default.send_redirects: 0
+# net.ipv4.conf.all.log_martians: 1
+# net.ipv4.conf.all.accept_redirects: 0
+# net.ipv4.conf.all.secure_redirects: 0
+# net.ipv4.conf.all.send_redirects: 0
+
+- name: sysctls
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ sysctls:
+ net.ipv4.icmp_echo_ignore_all: 1
+ net.ipv4.ip_forward: 1
+ register: sysctls_1
+ ignore_errors: yes
+
+- name: sysctls (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ sysctls:
+ net.ipv4.ip_forward: 1
+ net.ipv4.icmp_echo_ignore_all: 1
+ register: sysctls_2
+ ignore_errors: yes
+
+- name: sysctls (less sysctls)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ sysctls:
+ net.ipv4.icmp_echo_ignore_all: 1
+ register: sysctls_3
+ ignore_errors: yes
+
+- name: sysctls (more sysctls)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ sysctls:
+ net.ipv4.icmp_echo_ignore_all: 1
+ net.ipv6.conf.default.accept_redirects: 0
+ force_kill: yes
+ register: sysctls_4
+ ignore_errors: yes
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - sysctls_1 is changed
+ - sysctls_2 is not changed
+ - sysctls_3 is not changed
+ - sysctls_4 is changed
+ when: docker_py_version is version('1.10.0', '>=')
+- assert:
+ that:
+ - sysctls_1 is failed
+ - "('version is ' ~ docker_py_version ~ ' ') in sysctls_1.msg"
+ - "'Minimum version required is 1.10.0 ' in sysctls_1.msg"
+ when: docker_py_version is version('1.10.0', '<')
+
+####################################################################
+## tmpfs ###########################################################
+####################################################################
+
+- name: tmpfs
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ tmpfs:
+ - "/test1:rw,noexec,nosuid,size=65536k"
+ - "/test2:rw,noexec,nosuid,size=65536k"
+ register: tmpfs_1
+
+- name: tmpfs (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ tmpfs:
+ - "/test2:rw,noexec,nosuid,size=65536k"
+ - "/test1:rw,noexec,nosuid,size=65536k"
+ register: tmpfs_2
+
+- name: tmpfs (less tmpfs)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ tmpfs:
+ - "/test1:rw,noexec,nosuid,size=65536k"
+ register: tmpfs_3
+
+- name: tmpfs (more tmpfs)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ tmpfs:
+ - "/test1:rw,noexec,nosuid,size=65536k"
+ - "/test3:rw,noexec,nosuid,size=65536k"
+ force_kill: yes
+ register: tmpfs_4
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - tmpfs_1 is changed
+ - tmpfs_2 is not changed
+ - tmpfs_3 is not changed
+ - tmpfs_4 is changed
+
+####################################################################
+## tty #############################################################
+####################################################################
+
+- name: tty
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ tty: yes
+ state: started
+ register: tty_1
+
+- name: tty (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ tty: yes
+ state: started
+ register: tty_2
+
+- name: tty (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ tty: no
+ state: started
+ force_kill: yes
+ register: tty_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - tty_1 is changed
+ - tty_2 is not changed
+ - tty_3 is changed
+
+####################################################################
+## ulimits #########################################################
+####################################################################
+
+- name: ulimits
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ ulimits:
+ - "nofile:1234:1234"
+ - "nproc:3:6"
+ register: ulimits_1
+
+- name: ulimits (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ ulimits:
+ - "nproc:3:6"
+ - "nofile:1234:1234"
+ register: ulimits_2
+
+- name: ulimits (less ulimits)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ ulimits:
+ - "nofile:1234:1234"
+ register: ulimits_3
+
+- name: ulimits (more ulimits)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ ulimits:
+ - "nofile:1234:1234"
+ - "sigpending:100:200"
+ force_kill: yes
+ register: ulimits_4
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - ulimits_1 is changed
+ - ulimits_2 is not changed
+ - ulimits_3 is not changed
+ - ulimits_4 is changed
+
+####################################################################
+## user ############################################################
+####################################################################
+
+- name: user
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ user: nobody
+ state: started
+ register: user_1
+
+- name: user (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ user: nobody
+ state: started
+ register: user_2
+
+- name: user (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ user: root
+ state: started
+ force_kill: yes
+ register: user_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - user_1 is changed
+ - user_2 is not changed
+ - user_3 is changed
+
+####################################################################
+## userns_mode #####################################################
+####################################################################
+
+- name: userns_mode
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ userns_mode: host
+ state: started
+ register: userns_mode_1
+ ignore_errors: yes
+
+- name: userns_mode (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ userns_mode: host
+ state: started
+ register: userns_mode_2
+ ignore_errors: yes
+
+- name: userns_mode (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ userns_mode: ""
+ state: started
+ force_kill: yes
+ register: userns_mode_3
+ ignore_errors: yes
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - userns_mode_1 is changed
+ - userns_mode_2 is not changed
+ - userns_mode_3 is changed
+ when: docker_py_version is version('1.10.0', '>=')
+- assert:
+ that:
+ - userns_mode_1 is failed
+ - "('version is ' ~ docker_py_version ~ ' ') in userns_mode_1.msg"
+ - "'Minimum version required is 1.10.0 ' in userns_mode_1.msg"
+ when: docker_py_version is version('1.10.0', '<')
+
+####################################################################
+## uts #############################################################
+####################################################################
+
+- name: uts
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ uts: host
+ state: started
+ register: uts_1
+ ignore_errors: yes
+
+- name: uts (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ uts: host
+ state: started
+ register: uts_2
+ ignore_errors: yes
+
+- name: uts (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ uts: ""
+ state: started
+ force_kill: yes
+ register: uts_3
+ ignore_errors: yes
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - uts_1 is changed
+ - uts_2 is not changed
+ - uts_3 is changed
+ when: docker_py_version is version('3.5.0', '>=')
+- assert:
+ that:
+ - uts_1 is failed
+ - "('version is ' ~ docker_py_version ~ ' ') in uts_1.msg"
+ - "'Minimum version required is 3.5.0 ' in uts_1.msg"
+ when: docker_py_version is version('3.5.0', '<')
+
+####################################################################
+## working_dir #####################################################
+####################################################################
+
+- name: working_dir
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ working_dir: /tmp
+ state: started
+ register: working_dir_1
+
+- name: working_dir (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ working_dir: /tmp
+ state: started
+ register: working_dir_2
+
+- name: working_dir (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ working_dir: /
+ state: started
+ force_kill: yes
+ register: working_dir_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - working_dir_1 is changed
+ - working_dir_2 is not changed
+ - working_dir_3 is changed
+
+####################################################################
+####################################################################
+####################################################################
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/tasks/tests/ports.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/tasks/tests/ports.yml
new file mode 100644
index 00000000..895cd236
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/tasks/tests/ports.yml
@@ -0,0 +1,286 @@
+---
+- name: Registering container name
+ set_fact:
+ cname: "{{ cname_prefix ~ '-options' }}"
+ cname2: "{{ cname_prefix ~ '-options-h1' }}"
+- name: Registering container name
+ set_fact:
+ cnames: "{{ cnames + [cname, cname2] }}"
+
+####################################################################
+## published_ports: all ############################################
+####################################################################
+
+- name: published_ports -- all
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ exposed_ports:
+ - "9001"
+ - "9002"
+ published_ports:
+ - all
+ force_kill: yes
+ register: published_ports_1
+
+- name: published_ports -- all (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ exposed_ports:
+ - "9001"
+ - "9002"
+ published_ports:
+ - all
+ force_kill: yes
+ register: published_ports_2
+
+- name: published_ports -- all (writing out 'all')
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ exposed_ports:
+ - "9001"
+ - "9002"
+ published_ports:
+ - "9001"
+ - "9002"
+ force_kill: yes
+ register: published_ports_3
+
+- name: published_ports -- all (idempotency 2)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ exposed_ports:
+ - "9001"
+ - "9002"
+ published_ports:
+ - "9002"
+ - "9001"
+ force_kill: yes
+ register: published_ports_4
+
+- name: published_ports -- all (switching back to 'all')
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ exposed_ports:
+ - "9001"
+ - "9002"
+ published_ports:
+ - all
+ force_kill: yes
+ register: published_ports_5
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - published_ports_1 is changed
+ - published_ports_2 is not changed
+ - published_ports_3 is changed
+ - published_ports_4 is not changed
+ - published_ports_5 is changed
+
+####################################################################
+## published_ports: port range #####################################
+####################################################################
+
+- name: published_ports -- port range
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ exposed_ports:
+ - "9001"
+ - "9010-9050"
+ published_ports:
+ - "9001:9001"
+ - "9010-9050:9010-9050"
+ force_kill: yes
+ register: published_ports_1
+
+- name: published_ports -- port range (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ exposed_ports:
+ - "9001"
+ - "9010-9050"
+ published_ports:
+ - "9001:9001"
+ - "9010-9050:9010-9050"
+ force_kill: yes
+ register: published_ports_2
+
+- name: published_ports -- port range (different range)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ exposed_ports:
+ - "9001"
+ - "9010-9050"
+ published_ports:
+ - "9001:9001"
+ - "9020-9060:9020-9060"
+ force_kill: yes
+ register: published_ports_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - published_ports_1 is changed
+ - published_ports_2 is not changed
+ - published_ports_3 is changed
+
+####################################################################
+## published_ports: one-element container port range ###############
+####################################################################
+
+- name: published_ports -- one-element container port range
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ item }}"
+ state: started
+ published_ports:
+ - "9010-9050:9010"
+ force_kill: yes
+ loop:
+ - '{{ cname }}'
+ - '{{ cname2 }}'
+ register: published_ports_1
+
+- name: published_ports -- one-element container port range (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ item }}"
+ state: started
+ published_ports:
+ - "9010-9050:9010"
+ force_kill: yes
+ loop:
+ - '{{ cname }}'
+ - '{{ cname2 }}'
+ register: published_ports_2
+
+- name: published_ports -- one-element container port range (different range)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ item }}"
+ state: started
+ published_ports:
+ - "9010-9051:9010"
+ force_kill: yes
+ loop:
+ - '{{ cname }}'
+ - '{{ cname2 }}'
+ register: published_ports_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ item }}"
+ state: absent
+ force_kill: yes
+ loop:
+ - '{{ cname }}'
+ - '{{ cname2 }}'
+ diff: no
+
+- assert:
+ that:
+ - published_ports_1 is changed
+ - published_ports_2 is not changed
+ - published_ports_3 is changed
+
+####################################################################
+## published_ports: IPv6 addresses #################################
+####################################################################
+
+- name: published_ports -- IPv6
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ published_ports:
+ - "[::1]:9001:9001"
+ force_kill: yes
+ register: published_ports_1
+
+- name: published_ports -- IPv6 (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ published_ports:
+ - "[::1]:9001:9001"
+ force_kill: yes
+ register: published_ports_2
+
+- name: published_ports -- IPv6 (different IP)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ published_ports:
+ - "127.0.0.1:9001:9001"
+ force_kill: yes
+ register: published_ports_3
+
+- name: published_ports -- IPv6 (hostname)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ published_ports:
+ - "localhost:9001:9001"
+ force_kill: yes
+ register: published_ports_4
+ ignore_errors: yes
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - published_ports_1 is changed
+ - published_ports_2 is not changed
+ - published_ports_3 is changed
+ - published_ports_4 is failed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/tasks/tests/regression-45700-dont-parse-on-absent.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/tasks/tests/regression-45700-dont-parse-on-absent.yml
new file mode 100644
index 00000000..d5150153
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/tasks/tests/regression-45700-dont-parse-on-absent.yml
@@ -0,0 +1,34 @@
+---
+# Regression test for https://github.com/ansible/ansible/pull/45700
+- name: Registering container name
+ set_fact:
+ cname: "{{ cname_prefix ~ '-45700' }}"
+- name: Registering container name
+ set_fact:
+ cnames: "{{ cnames + [cname] }}"
+
+- name: Start container
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+
+- name: Stop container with a lot of invalid options
+ docker_container:
+ name: "{{ cname }}"
+ force_kill: yes
+ # Some options with "invalid" values, which would
+ # have to be parsed. The values are "invalid" because
+ # the containers and networks listed here do not exist.
+ # This can happen because the networks are removed
+ # before the container is stopped (see
+ # https://github.com/ansible/ansible/issues/45486).
+ networks:
+ - name: "nonexistant-network-{{ (2**32) | random }}"
+ published_ports:
+ - '1:2'
+ - '3'
+ links:
+ - "nonexistant-container-{{ (2**32) | random }}:test"
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/tasks/tests/start-stop.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/tasks/tests/start-stop.yml
new file mode 100644
index 00000000..ec9de7c5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container/tasks/tests/start-stop.yml
@@ -0,0 +1,455 @@
+---
+- name: Registering container name
+ set_fact:
+ cname: "{{ cname_prefix ~ '-hi' }}"
+- name: Registering container name
+ set_fact:
+ cnames: "{{ cnames + [cname] }}"
+
+####################################################################
+## Creation ########################################################
+####################################################################
+
+- name: Create container (check)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: present
+ check_mode: yes
+ register: create_1
+
+- name: Create container
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: present
+ register: create_2
+
+- name: Create container (idempotent)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: present
+ register: create_3
+
+- name: Create container (idempotent check)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: present
+ check_mode: yes
+ register: create_4
+
+- assert:
+ that:
+ - create_1 is changed
+ - create_2 is changed
+ - create_3 is not changed
+ - create_4 is not changed
+
+####################################################################
+## Starting (after creation) #######################################
+####################################################################
+
+- name: Start container (check)
+ docker_container:
+ name: "{{ cname }}"
+ state: started
+ check_mode: yes
+ register: start_1
+
+- name: Start container
+ docker_container:
+ name: "{{ cname }}"
+ state: started
+ register: start_2
+
+- name: Start container (idempotent)
+ docker_container:
+ name: "{{ cname }}"
+ state: started
+ register: start_3
+
+- name: Start container (idempotent check)
+ docker_container:
+ name: "{{ cname }}"
+ state: started
+ check_mode: yes
+ register: start_4
+
+- assert:
+ that:
+ - start_1 is changed
+ - start_2 is changed
+ - start_3 is not changed
+ - start_4 is not changed
+
+####################################################################
+## Present check for running container #############################
+####################################################################
+
+- name: Present check for running container (check)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: present
+ check_mode: yes
+ register: present_check_1
+
+- name: Present check for running container
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: present
+ register: present_check_2
+
+- assert:
+ that:
+ - present_check_1 is not changed
+ - present_check_2 is not changed
+
+####################################################################
+## Starting (from scratch) #########################################
+####################################################################
+
+- name: Remove container (setup for starting from scratch)
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+
+- name: Start container from scratch (check)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ stop_timeout: 1
+ name: "{{ cname }}"
+ state: started
+ check_mode: yes
+ register: start_scratch_1
+
+- name: Start container from scratch
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ stop_timeout: 1
+ name: "{{ cname }}"
+ state: started
+ register: start_scratch_2
+
+- name: Start container from scratch (idempotent)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ stop_timeout: 1
+ name: "{{ cname }}"
+ state: started
+ register: start_scratch_3
+
+- name: Start container from scratch (idempotent check)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ stop_timeout: 1
+ name: "{{ cname }}"
+ state: started
+ check_mode: yes
+ register: start_scratch_4
+
+- assert:
+ that:
+ - start_scratch_1 is changed
+ - start_scratch_2 is changed
+ - start_scratch_3 is not changed
+ - start_scratch_4 is not changed
+
+####################################################################
+## Recreating ######################################################
+####################################################################
+
+- name: Recreating container (created)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: present
+ force_kill: yes
+ register: recreate_1
+
+- name: Recreating container (created, recreate, check mode)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ recreate: yes
+ state: present
+ force_kill: yes
+ register: recreate_2
+ check_mode: yes
+
+- name: Recreating container (created, recreate)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ recreate: yes
+ state: present
+ force_kill: yes
+ register: recreate_3
+
+- name: Recreating container (started)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ force_kill: yes
+ register: recreate_4
+
+- name: Recreating container (started, recreate, check mode)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ recreate: yes
+ removal_wait_timeout: 10
+ state: started
+ force_kill: yes
+ register: recreate_5
+ check_mode: yes
+
+- name: Recreating container (started, recreate)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ recreate: yes
+ removal_wait_timeout: 10
+ state: started
+ force_kill: yes
+ register: recreate_6
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- debug: var=recreate_1
+- debug: var=recreate_3
+- debug: var=recreate_4
+- debug: var=recreate_6
+
+- assert:
+ that:
+ - recreate_2 is changed
+ - recreate_3 is changed
+ - recreate_4 is changed
+ - recreate_5 is changed
+ - recreate_6 is changed
+ - recreate_1.container.Id == recreate_2.container.Id
+ - recreate_1.container.Id != recreate_3.container.Id
+ - recreate_3.container.Id == recreate_4.container.Id
+ - recreate_4.container.Id == recreate_5.container.Id
+ - recreate_4.container.Id != recreate_6.container.Id
+
+####################################################################
+## Restarting ######################################################
+####################################################################
+
+- name: Restarting
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ stop_timeout: 1
+ volumes:
+ - /tmp/tmp
+ register: restart_1
+
+- name: Restarting (restart, check mode)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ restart: yes
+ state: started
+ stop_timeout: 1
+ force_kill: yes
+ register: restart_2
+ check_mode: yes
+
+- name: Restarting (restart)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ restart: yes
+ state: started
+ stop_timeout: 1
+ force_kill: yes
+ register: restart_3
+
+- name: Restarting (verify volumes)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ stop_timeout: 1
+ volumes:
+ - /tmp/tmp
+ register: restart_4
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ diff: no
+
+- assert:
+ that:
+ - restart_1 is changed
+ - restart_2 is changed
+ - restart_3 is changed
+ - restart_1.container.Id == restart_3.container.Id
+ - restart_4 is not changed
+
+####################################################################
+## Stopping ########################################################
+####################################################################
+
+- name: Stop container (check)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ name: "{{ cname }}"
+ state: stopped
+ stop_timeout: 1
+ check_mode: yes
+ register: stop_1
+
+- name: Stop container
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ name: "{{ cname }}"
+ state: stopped
+ stop_timeout: 1
+ register: stop_2
+
+- name: Stop container (idempotent)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ name: "{{ cname }}"
+ state: stopped
+ stop_timeout: 1
+ register: stop_3
+
+- name: Stop container (idempotent check)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ name: "{{ cname }}"
+ state: stopped
+ stop_timeout: 1
+ check_mode: yes
+ register: stop_4
+
+- assert:
+ that:
+ - stop_1 is changed
+ - stop_2 is changed
+ - stop_3 is not changed
+ - stop_4 is not changed
+
+####################################################################
+## Removing ########################################################
+####################################################################
+
+- name: Remove container (check)
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ check_mode: yes
+ register: remove_1
+
+- name: Remove container
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ register: remove_2
+
+- name: Remove container (idempotent)
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ register: remove_3
+
+- name: Remove container (idempotent check)
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ check_mode: yes
+ register: remove_4
+
+- assert:
+ that:
+ - remove_1 is changed
+ - remove_2 is changed
+ - remove_3 is not changed
+ - remove_4 is not changed
+
+####################################################################
+## Removing (from running) #########################################
+####################################################################
+
+- name: Start container (setup for removing from running)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+
+- name: Remove container from running (check)
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ check_mode: yes
+ register: remove_from_running_1
+
+- name: Remove container from running
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ register: remove_from_running_2
+
+- name: Remove container from running (idempotent)
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ register: remove_from_running_3
+
+- name: Remove container from running (idempotent check)
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+ check_mode: yes
+ register: remove_from_running_4
+
+- assert:
+ that:
+ - remove_from_running_1 is changed
+ - remove_from_running_2 is changed
+ - remove_from_running_3 is not changed
+ - remove_from_running_4 is not changed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container_info/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container_info/aliases
new file mode 100644
index 00000000..1ff45a23
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container_info/aliases
@@ -0,0 +1,6 @@
+shippable/posix/group2
+skip/osx
+skip/macos
+skip/freebsd
+skip/aix
+destructive
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container_info/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container_info/meta/main.yml
new file mode 100644
index 00000000..07da8c6d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container_info/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - setup_docker
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container_info/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container_info/tasks/main.yml
new file mode 100644
index 00000000..8ecf2de3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_container_info/tasks/main.yml
@@ -0,0 +1,80 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- block:
+ - name: Create random container name
+ set_fact:
+ cname: "{{ 'ansible-test-%0x' % ((2**32) | random) }}"
+
+ - name: Make sure container is not there
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+
+ - name: Inspect a non-present container
+ docker_container_info:
+ name: "{{ cname }}"
+ register: result
+
+ - assert:
+ that:
+ - "not result.exists"
+ - "'container' in result"
+ - "result.container is none"
+
+ - name: Make sure container exists
+ docker_container:
+ name: "{{ cname }}"
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ state: started
+ force_kill: yes
+
+ - name: Inspect a present container
+ docker_container_info:
+ name: "{{ cname }}"
+ register: result
+ - name: Dump docker_container_info result
+ debug: var=result
+
+ - name: "Comparison: use 'docker inspect'"
+ command: docker inspect "{{ cname }}"
+ register: docker_inspect
+ ignore_errors: yes
+ - block:
+ - set_fact:
+ docker_inspect_result: "{{ docker_inspect.stdout | from_json }}"
+ - name: Dump docker inspect result
+ debug: var=docker_inspect_result
+ when: docker_inspect is not failed
+
+ - assert:
+ that:
+ - result.exists
+ - "'container' in result"
+ - "result.container"
+
+ - assert:
+ that:
+ - "result.container == docker_inspect_result[0]"
+ when: docker_inspect is not failed
+ - assert:
+ that:
+ - "'is too new. Maximum supported API version is' in docker_inspect.stderr"
+ when: docker_inspect is failed
+
+ always:
+ - name: Cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+
+ when: docker_py_version is version('1.8.0', '>=') and docker_api_version is version('1.20', '>=')
+
+- fail: msg="Too old docker / docker-py version to run docker_container_info tests!"
+ when: not(docker_py_version is version('1.8.0', '>=') and docker_api_version is version('1.20', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_host_info/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_host_info/aliases
new file mode 100644
index 00000000..f1162af5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_host_info/aliases
@@ -0,0 +1,6 @@
+shippable/posix/group2
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
+destructive
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_host_info/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_host_info/meta/main.yml
new file mode 100644
index 00000000..07da8c6d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_host_info/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - setup_docker
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_host_info/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_host_info/tasks/main.yml
new file mode 100644
index 00000000..1f0e10a5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_host_info/tasks/main.yml
@@ -0,0 +1,10 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- include_tasks: test_host_info.yml
+ when: docker_py_version is version('1.10.0', '>=') and docker_api_version is version('1.21', '>=')
+
+- fail: msg="Too old docker / docker-py version to run docker_host_info tests!"
+ when: not(docker_py_version is version('1.10.0', '>=') and docker_api_version is version('1.21', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_host_info/tasks/test_host_info.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_host_info/tasks/test_host_info.yml
new file mode 100644
index 00000000..ef2a58a6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_host_info/tasks/test_host_info.yml
@@ -0,0 +1,296 @@
+---
+- name: Create random container/volume name
+ set_fact:
+ cname: "{{ 'ansible-test-%0x' % ((2**32) | random) }}"
+ vname: "{{ 'ansible-test-%0x' % ((2**32) | random) }}"
+
+- debug:
+ msg: "Using container name '{{ cname }}' and volume name '{{ vname }}'"
+
+- block:
+ - name: Get info on Docker host
+ docker_host_info:
+ register: output
+
+ - name: assert reading docker host facts when docker is running
+ assert:
+ that:
+ - 'output.host_info.Name is string'
+ - 'output.containers is not defined'
+ - 'output.networks is not defined'
+ - 'output.volumes is not defined'
+ - 'output.images is not defined'
+ - 'output.disk_usage is not defined'
+
+# Container and volume are created so that all lists are non-empty:
+# * container and volume lists are non-emtpy because of the created objects;
+# * image list is non-empty because the image of the container is there;
+# * network list is always non-empty (default networks).
+ - name: Create container
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ register: container_output
+
+ - assert:
+ that:
+ - container_output is changed
+
+ - name: Create a volume
+ docker_volume:
+ name: "{{ vname }}"
+ register: volume_output
+
+ - assert:
+ that:
+ - volume_output is changed
+
+ - name: Get info on Docker host and list containers
+ docker_host_info:
+ containers: yes
+ register: output
+
+ - name: assert reading docker host facts when docker is running and list containers
+ assert:
+ that:
+ - 'output.host_info.Name is string'
+ - 'output.networks is not defined'
+ - 'output.volumes is not defined'
+ - 'output.images is not defined'
+ - 'output.disk_usage is not defined'
+ - 'output.containers[0].Image is string'
+ - 'output.containers[0].ImageID is not defined'
+
+ - name: Get info on Docker host and list containers with verbose output
+ docker_host_info:
+ containers: yes
+ verbose_output: yes
+ register: output
+
+ - name: assert reading docker host facts when docker is running and list containers with verbose output
+ assert:
+ that:
+ - 'output.host_info.Name is string'
+ - 'output.networks is not defined'
+ - 'output.volumes is not defined'
+ - 'output.images is not defined'
+ - 'output.disk_usage is not defined'
+ - 'output.containers[0].Image is string'
+ - 'output.containers[0].ImageID is string'
+
+ - name: Get info on Docker host and list images
+ docker_host_info:
+ images: yes
+ register: output
+
+ - name: assert reading docker host facts when docker is running and list images
+ assert:
+ that:
+ - 'output.host_info.Name is string'
+ - 'output.containers is not defined'
+ - 'output.networks is not defined'
+ - 'output.volumes is not defined'
+ - 'output.images[0].Id is string'
+ - 'output.images[0].ParentId is not defined'
+ - 'output.disk_usage is not defined'
+
+ - name: Get info on Docker host and list images with verbose output
+ docker_host_info:
+ images: yes
+ verbose_output: yes
+ register: output
+
+ - name: assert reading docker host facts when docker is running and list images with verbose output
+ assert:
+ that:
+ - 'output.host_info.Name is string'
+ - 'output.containers is not defined'
+ - 'output.networks is not defined'
+ - 'output.volumes is not defined'
+ - 'output.images[0].Id is string'
+ - 'output.images[0].ParentId is string'
+ - 'output.disk_usage is not defined'
+
+ - name: Get info on Docker host and list networks
+ docker_host_info:
+ networks: yes
+ register: output
+
+ - name: assert reading docker host facts when docker is running and list networks
+ assert:
+ that:
+ - 'output.host_info.Name is string'
+ - 'output.containers is not defined'
+ - 'output.networks[0].Id is string'
+ - 'output.networks[0].Created is not defined'
+ - 'output.volumes is not defined'
+ - 'output.images is not defined'
+ - 'output.disk_usage is not defined'
+
+ - name: Get info on Docker host and list networks with verbose output
+ docker_host_info:
+ networks: yes
+ verbose_output: yes
+ register: output
+
+ - name: assert reading docker host facts when docker is running and list networks with verbose output
+ assert:
+ that:
+ - 'output.host_info.Name is string'
+ - 'output.containers is not defined'
+ - 'output.networks[0].Id is string'
+ - 'output.networks[0].Created is string'
+ - 'output.volumes is not defined'
+ - 'output.images is not defined'
+ - 'output.disk_usage is not defined'
+
+ - name: Get info on Docker host and list volumes
+ docker_host_info:
+ volumes: yes
+ register: output
+
+ - name: assert reading docker host facts when docker is running and list volumes
+ assert:
+ that:
+ - 'output.host_info.Name is string'
+ - 'output.containers is not defined'
+ - 'output.networks is not defined'
+ - 'output.volumes[0].Name is string'
+ - 'output.volumes[0].Mountpoint is not defined'
+ - 'output.images is not defined'
+ - 'output.disk_usage is not defined'
+
+ - name: Get info on Docker host and list volumes with verbose output
+ docker_host_info:
+ volumes: yes
+ verbose_output: yes
+ register: output
+
+ - name: assert reading docker host facts when docker is running and list volumes with verbose output
+ assert:
+ that:
+ - 'output.host_info.Name is string'
+ - 'output.containers is not defined'
+ - 'output.networks is not defined'
+ - 'output.volumes[0].Name is string'
+ - 'output.volumes[0].Mountpoint is string'
+ - 'output.images is not defined'
+ - 'output.disk_usage is not defined'
+
+ - name: Get info on Docker host and get disk usage
+ docker_host_info:
+ disk_usage: yes
+ register: output
+ ignore_errors: yes
+
+ - name: assert reading docker host facts when docker is running and get disk usage
+ assert:
+ that:
+ - 'output.host_info.Name is string'
+ - 'output.containers is not defined'
+ - 'output.networks is not defined'
+ - 'output.volumes is not defined'
+ - 'output.images is not defined'
+ - 'output.disk_usage.LayersSize is number'
+ - 'output.disk_usage.BuilderSize is not defined'
+ when: docker_py_version is version('2.2.0', '>=')
+ - assert:
+ that:
+ - output is failed
+ - "('version is ' ~ docker_py_version ~ ' ') in output.msg"
+ - "'Minimum version required is 2.2.0 ' in output.msg"
+ when: docker_py_version is version('2.2.0', '<')
+
+ - name: Get info on Docker host and get disk usage with verbose output
+ docker_host_info:
+ disk_usage: yes
+ verbose_output: yes
+ register: output
+ ignore_errors: yes
+
+ - name: assert reading docker host facts when docker is running and get disk usage with verbose output
+ assert:
+ that:
+ - 'output.host_info.Name is string'
+ - 'output.containers is not defined'
+ - 'output.networks is not defined'
+ - 'output.volumes is not defined'
+ - 'output.images is not defined'
+ - 'output.disk_usage.LayersSize is number'
+ - 'output.disk_usage.BuilderSize is number'
+ when: docker_py_version is version('2.2.0', '>=')
+ - assert:
+ that:
+ - output is failed
+ - "('version is ' ~ docker_py_version ~ ' ') in output.msg"
+ - "'Minimum version required is 2.2.0 ' in output.msg"
+ when: docker_py_version is version('2.2.0', '<')
+
+ - name: Get info on Docker host, disk usage and get all lists together
+ docker_host_info:
+ volumes: yes
+ containers: yes
+ networks: yes
+ images: yes
+ disk_usage: "{{ docker_py_version is version('2.2.0', '>=') }}"
+ register: output
+
+ - name: assert reading docker host facts when docker is running, disk usage and get lists together
+ assert:
+ that:
+ - 'output.host_info.Name is string'
+ - 'output.containers[0].Image is string'
+ - 'output.containers[0].ImageID is not defined'
+ - 'output.networks[0].Id is string'
+ - 'output.networks[0].Created is not defined'
+ - 'output.volumes[0].Name is string'
+ - 'output.volumes[0].Mountpoint is not defined'
+ - 'output.images[0].Id is string'
+ - 'output.images[0].ParentId is not defined'
+ - assert:
+ that:
+ - 'output.disk_usage.LayersSize is number'
+ - 'output.disk_usage.BuilderSize is not defined'
+ when: docker_py_version is version('2.2.0', '>=')
+
+ - name: Get info on Docker host, disk usage and get all lists together with verbose output
+ docker_host_info:
+ volumes: yes
+ containers: yes
+ networks: yes
+ images: yes
+ disk_usage: "{{ docker_py_version is version('2.2.0', '>=') }}"
+ verbose_output: yes
+ register: output
+
+ - name: assert reading docker host facts when docker is running and get disk usage with verbose output
+ assert:
+ that:
+ - 'output.host_info.Name is string'
+ - 'output.containers[0].Image is string'
+ - 'output.containers[0].ImageID is string'
+ - 'output.networks[0].Id is string'
+ - 'output.networks[0].Created is string'
+ - 'output.volumes[0].Name is string'
+ - 'output.volumes[0].Mountpoint is string'
+ - 'output.images[0].Id is string'
+ - 'output.images[0].ParentId is string'
+ - assert:
+ that:
+ - 'output.disk_usage.LayersSize is number'
+ - 'output.disk_usage.BuilderSize is number'
+ when: docker_py_version is version('2.2.0', '>=')
+
+ always:
+ - name: Delete container
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: yes
+
+ - name: Delete volume
+ docker_volume:
+ name: "{{ vname }}"
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/aliases
new file mode 100644
index 00000000..33b9cd98
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/aliases
@@ -0,0 +1,6 @@
+shippable/posix/group5
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
+destructive
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/meta/main.yml
new file mode 100644
index 00000000..21d7a58f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - setup_docker_registry
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/tasks/main.yml
new file mode 100644
index 00000000..2be493eb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/tasks/main.yml
@@ -0,0 +1,8 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- when: ansible_facts.distribution ~ ansible_facts.distribution_major_version not in ['CentOS6', 'RedHat6']
+ include_tasks:
+ file: test.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/tasks/run-test.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/tasks/run-test.yml
new file mode 100644
index 00000000..a2999370
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/tasks/run-test.yml
@@ -0,0 +1,3 @@
+---
+- name: "Loading tasks from {{ item }}"
+ include_tasks: "{{ item }}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/tasks/test.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/tasks/test.yml
new file mode 100644
index 00000000..023f8ccd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/tasks/test.yml
@@ -0,0 +1,49 @@
+---
+- name: Create random name prefix
+ set_fact:
+ name_prefix: "{{ 'ansible-test-%0x' % ((2**32) | random) }}"
+- name: Create image and container list
+ set_fact:
+ inames: []
+ cnames: []
+
+- debug:
+ msg: "Using name prefix {{ name_prefix }}"
+
+- name: Create files directory
+ file:
+ path: '{{ output_dir }}/files'
+ state: directory
+
+- name: Template files
+ template:
+ src: '{{ item }}'
+ dest: '{{ output_dir }}/files/{{ item }}'
+ loop:
+ - Dockerfile
+ - EtcHostsDockerfile
+ - MyDockerfile
+ - StagedDockerfile
+
+- block:
+ - include_tasks: run-test.yml
+ with_fileglob:
+ - "tests/*.yml"
+
+ always:
+ - name: "Make sure all images are removed"
+ docker_image:
+ name: "{{ item }}"
+ state: absent
+ with_items: "{{ inames }}"
+ - name: "Make sure all containers are removed"
+ docker_container:
+ name: "{{ item }}"
+ state: absent
+ force_kill: yes
+ with_items: "{{ cnames }}"
+
+ when: docker_py_version is version('1.8.0', '>=') and docker_api_version is version('1.20', '>=')
+
+- fail: msg="Too old docker / docker-py version to run docker_image tests!"
+ when: not(docker_py_version is version('1.8.0', '>=') and docker_api_version is version('1.20', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/tasks/tests/basic.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/tasks/tests/basic.yml
new file mode 100644
index 00000000..23a0e148
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/tasks/tests/basic.yml
@@ -0,0 +1,78 @@
+---
+####################################################################
+## basic ###########################################################
+####################################################################
+
+- name: Make sure image is not there
+ docker_image:
+ name: "{{ docker_test_image_hello_world }}"
+ state: absent
+ force_absent: yes
+ register: absent_1
+
+- name: Make sure image is not there (idempotency)
+ docker_image:
+ name: "{{ docker_test_image_hello_world }}"
+ state: absent
+ register: absent_2
+
+- assert:
+ that:
+ - absent_2 is not changed
+
+- name: Make sure image is there
+ docker_image:
+ name: "{{ docker_test_image_hello_world }}"
+ state: present
+ source: pull
+ register: present_1
+
+- name: Make sure image is there (idempotent)
+ docker_image:
+ name: "{{ docker_test_image_hello_world }}"
+ state: present
+ source: pull
+ register: present_2
+
+- assert:
+ that:
+ - present_1 is changed
+ - present_2 is not changed
+
+- name: Make sure tag is not there
+ docker_image:
+ name: "{{ docker_test_image_hello_world_base }}:alias"
+ state: absent
+
+- name: Tag image with alias
+ docker_image:
+ source: local
+ name: "{{ docker_test_image_hello_world }}"
+ repository: "{{ docker_test_image_hello_world_base }}:alias"
+ register: tag_1
+
+- name: Tag image with alias (idempotent)
+ docker_image:
+ source: local
+ name: "{{ docker_test_image_hello_world }}"
+ repository: "{{ docker_test_image_hello_world_base }}:alias"
+ register: tag_2
+
+- name: Tag image with alias (force, still idempotent)
+ docker_image:
+ source: local
+ name: "{{ docker_test_image_hello_world }}"
+ repository: "{{ docker_test_image_hello_world_base }}:alias"
+ force_tag: yes
+ register: tag_3
+
+- assert:
+ that:
+ - tag_1 is changed
+ - tag_2 is not changed
+ - tag_3 is not changed
+
+- name: Cleanup alias tag
+ docker_image:
+ name: "{{ docker_test_image_hello_world_base }}:alias"
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/tasks/tests/docker_image.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/tasks/tests/docker_image.yml
new file mode 100644
index 00000000..f256f8d8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/tasks/tests/docker_image.yml
@@ -0,0 +1,228 @@
+---
+- name: Registering image name
+ set_fact:
+ iname: "{{ name_prefix ~ '-options' }}"
+
+- name: Determining pushed image names
+ set_fact:
+ hello_world_image_base: "{{ registry_address }}/test/hello-world"
+ test_image_base: "{{ registry_address }}/test/{{ iname }}"
+
+- name: Registering image name
+ set_fact:
+ inames: "{{ inames + [iname, test_image_base ~ ':latest', hello_world_image_base ~ ':latest', hello_world_image_base ~ ':newtag', hello_world_image_base ~ ':newtag2'] }}"
+
+####################################################################
+## interact with test registry #####################################
+####################################################################
+
+- name: Make sure image is not there
+ docker_image:
+ name: "{{ hello_world_image_base }}:latest"
+ state: absent
+ force_absent: yes
+
+- name: Make sure we have {{ docker_test_image_hello_world }}
+ docker_image:
+ name: "{{ docker_test_image_hello_world }}"
+ source: pull
+
+- name: Push image to test registry
+ docker_image:
+ name: "{{ docker_test_image_hello_world }}"
+ repository: "{{ hello_world_image_base }}:latest"
+ push: yes
+ source: local
+ register: push_1
+
+- name: Push image to test registry (idempotent)
+ docker_image:
+ name: "{{ docker_test_image_hello_world }}"
+ repository: "{{ hello_world_image_base }}:latest"
+ push: yes
+ source: local
+ register: push_2
+
+- name: Push image to test registry (force, still idempotent)
+ docker_image:
+ name: "{{ docker_test_image_hello_world }}"
+ repository: "{{ hello_world_image_base }}:latest"
+ push: yes
+ source: local
+ force_tag: yes
+ register: push_3
+
+- assert:
+ that:
+ - push_1 is changed
+ - push_2 is not changed
+ - push_3 is not changed
+
+- name: Get facts of local image
+ docker_image_info:
+ name: "{{ hello_world_image_base }}:latest"
+ register: facts_1
+
+- name: Make sure image is not there
+ docker_image:
+ name: "{{ hello_world_image_base }}:latest"
+ state: absent
+ force_absent: yes
+
+- name: Get facts of local image (absent)
+ docker_image_info:
+ name: "{{ hello_world_image_base }}:latest"
+ register: facts_2
+
+- name: Pull image from test registry
+ docker_image:
+ name: "{{ hello_world_image_base }}:latest"
+ state: present
+ source: pull
+ register: pull_1
+
+- name: Pull image from test registry (idempotency)
+ docker_image:
+ name: "{{ hello_world_image_base }}:latest"
+ state: present
+ source: pull
+ register: pull_2
+
+- name: Get facts of local image (present)
+ docker_image_info:
+ name: "{{ hello_world_image_base }}:latest"
+ register: facts_3
+
+- assert:
+ that:
+ - pull_1 is changed
+ - pull_2 is not changed
+ - facts_1.images | length == 1
+ - facts_2.images | length == 0
+ - facts_3.images | length == 1
+
+- name: Tag different image with new tag
+ docker_image:
+ name: "{{ docker_test_image_alpine_different }}"
+ repository: "{{ hello_world_image_base }}:newtag"
+ push: no
+ source: pull
+
+- name: Push different image with new tag
+ docker_image:
+ name: "{{ hello_world_image_base }}"
+ repository: "{{ hello_world_image_base }}"
+ tag: newtag
+ push: yes
+ source: local
+ register: push_1_different
+
+- name: Push different image with new tag (idempotent)
+ docker_image:
+ name: "{{ hello_world_image_base }}"
+ repository: "{{ hello_world_image_base }}"
+ tag: newtag
+ push: yes
+ source: local
+ register: push_2_different
+
+- assert:
+ that:
+ - push_1_different is changed
+ - push_2_different is not changed
+
+- name: Tag same image with new tag
+ docker_image:
+ name: "{{ docker_test_image_alpine_different }}"
+ repository: "{{ hello_world_image_base }}:newtag2"
+ push: no
+ source: pull
+
+- name: Push same image with new tag
+ docker_image:
+ name: "{{ hello_world_image_base }}"
+ repository: "{{ hello_world_image_base }}"
+ tag: newtag2
+ push: yes
+ source: local
+ register: push_1_same
+
+- name: Push same image with new tag (idempotent)
+ docker_image:
+ name: "{{ hello_world_image_base }}"
+ repository: "{{ hello_world_image_base }}"
+ tag: newtag2
+ push: yes
+ source: local
+ register: push_2_same
+
+- assert:
+ that:
+ # NOTE: This should be:
+ # - push_1_same is changed
+ # Unfortunately docker does *NOT* report whether the tag already existed or not.
+ # Here are the logs returned by client.push() for both tasks (which are exactly the same):
+ # push_1_same:
+ # {"status": "The push refers to repository [localhost:32796/test/hello-world]"},
+ # {"id": "3fc64803ca2d", "progressDetail": {}, "status": "Preparing"},
+ # {"id": "3fc64803ca2d", "progressDetail": {}, "status": "Layer already exists"},
+ # {"status": "newtag2: digest: sha256:92251458088c638061cda8fd8b403b76d661a4dc6b7ee71b6affcf1872557b2b size: 528"},
+ # {"aux": {"Digest": "sha256:92251458088c638061cda8fd8b403b76d661a4dc6b7ee71b6affcf1872557b2b", "Size": 528, "Tag": "newtag2"}, "progressDetail": {}}
+ # push_2_same:
+ # {"status": "The push refers to repository [localhost:32796/test/hello-world]"},
+ # {"id": "3fc64803ca2d", "progressDetail": {}, "status": "Preparing"},
+ # {"id": "3fc64803ca2d", "progressDetail": {}, "status": "Layer already exists"},
+ # {"status": "newtag2: digest: sha256:92251458088c638061cda8fd8b403b76d661a4dc6b7ee71b6affcf1872557b2b size: 528"},
+ # {"aux": {"Digest": "sha256:92251458088c638061cda8fd8b403b76d661a4dc6b7ee71b6affcf1872557b2b", "Size": 528, "Tag": "newtag2"}, "progressDetail": {}}
+ - push_1_same is not changed
+ - push_2_same is not changed
+
+####################################################################
+## repository ######################################################
+####################################################################
+
+- name: Make sure image is not there
+ docker_image:
+ name: "{{ test_image_base }}:latest"
+ state: absent
+ force_absent: yes
+
+- name: repository
+ docker_image:
+ name: "{{ iname }}"
+ build:
+ path: "{{ output_dir }}/files"
+ pull: no
+ repository: "{{ test_image_base }}"
+ source: build
+ register: repository_1
+
+- name: repository (idempotent)
+ docker_image:
+ name: "{{ iname }}"
+ build:
+ path: "{{ output_dir }}/files"
+ pull: no
+ repository: "{{ test_image_base }}"
+ source: build
+ register: repository_2
+
+- assert:
+ that:
+ - repository_1 is changed
+ - repository_2 is not changed
+
+- name: Get facts of image
+ docker_image_info:
+ name: "{{ test_image_base }}:latest"
+ register: facts_1
+
+- name: cleanup
+ docker_image:
+ name: "{{ test_image_base }}:latest"
+ state: absent
+ force_absent: yes
+
+- assert:
+ that:
+ - facts_1.images | length == 1
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/tasks/tests/old-options.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/tasks/tests/old-options.yml
new file mode 100644
index 00000000..2bf1ca4c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/tasks/tests/old-options.yml
@@ -0,0 +1,51 @@
+---
+- name: Registering image name
+ set_fact:
+ iname: "{{ name_prefix ~ '-old-options' }}"
+
+- name: Registering image name
+ set_fact:
+ inames: "{{ inames + [iname]}}"
+
+####################################################################
+## build ###########################################################
+####################################################################
+
+- name: build with old-style options
+ docker_image:
+ name: "{{ iname }}"
+ path: "{{ output_dir }}/files"
+ dockerfile: Dockerfile
+ http_timeout: 60
+ nocache: yes
+ pull: no
+ rm: no
+ buildargs:
+ TEST1: val1
+ TEST2: val2
+ TEST3: "True"
+ container_limits:
+ memory: 7000000
+ memswap: 8000000
+ source: build
+ register: build
+
+- name: cleanup
+ docker_image:
+ name: "{{ iname }}"
+ state: absent
+ force_absent: yes
+
+- set_fact:
+ build_deprecations: "{{ build.deprecations | map(attribute='msg') | list }}"
+
+- assert:
+ that:
+ - '"Please specify build.container_limits instead of container_limits. The container_limits option has been renamed" in build_deprecations'
+ - '"Please specify build.dockerfile instead of dockerfile. The dockerfile option has been renamed" in build_deprecations'
+ - '"Please specify build.http_timeout instead of http_timeout. The http_timeout option has been renamed" in build_deprecations'
+ - '"Please specify build.nocache instead of nocache. The nocache option has been renamed" in build_deprecations'
+ - '"Please specify build.path instead of path. The path option has been renamed" in build_deprecations'
+ - '"Please specify build.pull instead of pull. The pull option has been renamed" in build_deprecations'
+ - '"Please specify build.rm instead of rm. The rm option has been renamed" in build_deprecations'
+ - '"Please specify build.args instead of buildargs. The buildargs option has been renamed" in build_deprecations'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/tasks/tests/options.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/tasks/tests/options.yml
new file mode 100644
index 00000000..b8630d34
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/tasks/tests/options.yml
@@ -0,0 +1,337 @@
+---
+- name: Registering image name
+ set_fact:
+ iname: "{{ name_prefix ~ '-options' }}"
+ iname_1: "{{ name_prefix ~ '-options-1' }}"
+
+- name: Registering image name
+ set_fact:
+ inames: "{{ inames + [iname, iname_1] }}"
+
+####################################################################
+## build.args ######################################################
+####################################################################
+
+- name: cleanup
+ docker_image:
+ name: "{{ iname }}"
+ state: absent
+ force_absent: yes
+
+- name: buildargs
+ docker_image:
+ name: "{{ iname }}"
+ build:
+ path: "{{ output_dir }}/files"
+ args:
+ TEST1: val1
+ TEST2: val2
+ TEST3: "True"
+ pull: no
+ source: build
+ register: buildargs_1
+
+- name: buildargs (idempotency)
+ docker_image:
+ name: "{{ iname }}"
+ build:
+ path: "{{ output_dir }}/files"
+ args:
+ TEST1: val1
+ TEST2: val2
+ TEST3: "True"
+ pull: no
+ source: build
+ register: buildargs_2
+
+- name: cleanup
+ docker_image:
+ name: "{{ iname }}"
+ state: absent
+ force_absent: yes
+
+- assert:
+ that:
+ - buildargs_1 is changed
+ - buildargs_2 is not changed
+ when: docker_py_version is version('1.6.0', '>=')
+
+- assert:
+ that:
+ - buildargs_1 is failed
+ - buildargs_2 is failed
+ when: docker_py_version is version('1.6.0', '<')
+
+####################################################################
+## container_limits ################################################
+####################################################################
+
+- name: container_limits (Failed due to min memory limit)
+ docker_image:
+ name: "{{ iname }}"
+ build:
+ path: "{{ output_dir }}/files"
+ container_limits:
+ memory: 4000
+ pull: no
+ source: build
+ ignore_errors: yes
+ register: container_limits_1
+
+- name: container_limits
+ docker_image:
+ name: "{{ iname }}"
+ build:
+ path: "{{ output_dir }}/files"
+ container_limits:
+ memory: 7000000
+ memswap: 8000000
+ pull: no
+ source: build
+ register: container_limits_2
+
+- name: cleanup
+ docker_image:
+ name: "{{ iname }}"
+ state: absent
+ force_absent: yes
+
+- assert:
+ that:
+ # It *sometimes* happens that the first task does not fail.
+ # For now, we work around this by
+ # a) requiring that if it fails, the message must
+ # contain 'Minimum memory limit allowed is (4|6)MB', and
+ # b) requiring that either the first task, or the second
+ # task is changed, but not both.
+ - "not container_limits_1 is failed or ('Minimum memory limit allowed is ') in container_limits_1.msg"
+ - "container_limits_1 is changed or container_limits_2 is changed and not (container_limits_1 is changed and container_limits_2 is changed)"
+
+####################################################################
+## dockerfile ######################################################
+####################################################################
+
+- name: dockerfile
+ docker_image:
+ name: "{{ iname }}"
+ build:
+ path: "{{ output_dir }}/files"
+ dockerfile: "MyDockerfile"
+ pull: no
+ source: build
+ register: dockerfile_1
+
+- name: cleanup
+ docker_image:
+ name: "{{ iname }}"
+ state: absent
+ force_absent: yes
+
+- assert:
+ that:
+ - dockerfile_1 is changed
+ - "('FROM ' ~ docker_test_image_alpine) in dockerfile_1.stdout"
+ - dockerfile_1['image']['Config']['WorkingDir'] == '/newdata'
+
+####################################################################
+## force ###########################################################
+####################################################################
+
+- name: Build an image
+ docker_image:
+ name: "{{ iname }}"
+ build:
+ path: "{{ output_dir }}/files"
+ pull: no
+ source: build
+
+- name: force (changed)
+ docker_image:
+ name: "{{ iname }}"
+ build:
+ path: "{{ output_dir }}/files"
+ dockerfile: "MyDockerfile"
+ pull: no
+ source: build
+ force_source: yes
+ register: force_1
+
+- name: force (unchanged)
+ docker_image:
+ name: "{{ iname }}"
+ build:
+ path: "{{ output_dir }}/files"
+ dockerfile: "MyDockerfile"
+ pull: no
+ source: build
+ force_source: yes
+ register: force_2
+
+- name: cleanup
+ docker_image:
+ name: "{{ iname }}"
+ state: absent
+ force_absent: yes
+
+- assert:
+ that:
+ - force_1 is changed
+ - force_2 is not changed
+
+####################################################################
+## load path #######################################################
+####################################################################
+
+- name: Archive image
+ docker_image:
+ name: "{{ docker_test_image_hello_world }}"
+ archive_path: "{{ output_dir }}/image.tar"
+ source: pull
+ register: archive_image
+
+- name: Create invalid archive
+ copy:
+ dest: "{{ output_dir }}/image-invalid.tar"
+ content: "this is not a valid image"
+
+- name: remove image
+ docker_image:
+ name: "{{ docker_test_image_hello_world }}"
+ state: absent
+ force_absent: yes
+
+- name: load image (changed)
+ docker_image:
+ name: "{{ docker_test_image_hello_world }}"
+ load_path: "{{ output_dir }}/image.tar"
+ source: load
+ register: load_image
+
+- name: load image (idempotency)
+ docker_image:
+ name: "{{ docker_test_image_hello_world }}"
+ load_path: "{{ output_dir }}/image.tar"
+ source: load
+ register: load_image_1
+
+- name: load image (wrong name)
+ docker_image:
+ name: foo:bar
+ load_path: "{{ output_dir }}/image.tar"
+ source: load
+ register: load_image_2
+ ignore_errors: true
+
+- name: load image (invalid image)
+ docker_image:
+ name: foo:bar
+ load_path: "{{ output_dir }}/image-invalid.tar"
+ source: load
+ register: load_image_3
+ ignore_errors: true
+
+- name: load image (invalid image, old API version)
+ docker_image:
+ name: foo:bar
+ load_path: "{{ output_dir }}/image-invalid.tar"
+ source: load
+ api_version: "1.22"
+ register: load_image_4
+
+- assert:
+ that:
+ - load_image is changed
+ - archive_image['image']['Id'] == load_image['image']['Id']
+ - load_image_1 is not changed
+ - load_image_2 is failed
+ - >-
+ "The archive did not contain image 'foo:bar'. Instead, found '" ~ docker_test_image_hello_world ~ "'." == load_image_2.msg
+ - load_image_3 is failed
+ - '"Detected no loaded images. Archive potentially corrupt?" == load_image_3.msg'
+ - load_image_4 is changed
+ - "'The API version of your Docker daemon is < 1.23, which does not return the image loading result from the Docker daemon. Therefore, we cannot verify whether the expected image was loaded, whether multiple images where loaded, or whether the load actually succeeded. You should consider upgrading your Docker daemon.' in load_image_4.warnings"
+
+####################################################################
+## path ############################################################
+####################################################################
+
+- name: Build image
+ docker_image:
+ name: "{{ iname }}"
+ build:
+ path: "{{ output_dir }}/files"
+ pull: no
+ source: build
+ register: path_1
+
+- name: Build image (idempotency)
+ docker_image:
+ name: "{{ iname }}"
+ build:
+ path: "{{ output_dir }}/files"
+ pull: no
+ source: build
+ register: path_2
+
+- name: cleanup
+ docker_image:
+ name: "{{ iname }}"
+ state: absent
+ force_absent: yes
+
+- assert:
+ that:
+ - path_1 is changed
+ - path_2 is not changed
+
+####################################################################
+## target ##########################################################
+####################################################################
+
+- name: Build multi-stage image
+ docker_image:
+ name: "{{ iname }}"
+ build:
+ path: "{{ output_dir }}/files"
+ dockerfile: "StagedDockerfile"
+ target: first
+ pull: no
+ source: build
+ register: dockerfile_2
+
+- name: cleanup
+ docker_image:
+ name: "{{ iname }}"
+ state: absent
+ force_absent: yes
+
+- assert:
+ that:
+ - dockerfile_2 is changed
+ - dockerfile_2.image.Config.WorkingDir == '/first'
+
+####################################################################
+## build.etc_hosts #################################################
+####################################################################
+
+- name: Build image with custom etc_hosts
+ docker_image:
+ name: "{{ iname }}"
+ build:
+ path: "{{ output_dir }}/files"
+ dockerfile: "EtcHostsDockerfile"
+ pull: no
+ etc_hosts:
+ some-custom-host: "127.0.0.1"
+ source: build
+ register: path_1
+
+- name: cleanup
+ docker_image:
+ name: "{{ iname }}"
+ state: absent
+ force_absent: yes
+
+- assert:
+ that:
+ - path_1 is changed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/templates/Dockerfile b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/templates/Dockerfile
new file mode 100644
index 00000000..c5032944
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/templates/Dockerfile
@@ -0,0 +1,3 @@
+FROM {{ docker_test_image_busybox }}
+ENV foo /bar
+WORKDIR ${foo}
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/templates/EtcHostsDockerfile b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/templates/EtcHostsDockerfile
new file mode 100644
index 00000000..f1b54e3b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/templates/EtcHostsDockerfile
@@ -0,0 +1,3 @@
+FROM {{ docker_test_image_busybox }}
+# This should fail building if docker cannot resolve some-custom-host
+RUN ping -c1 some-custom-host
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/templates/MyDockerfile b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/templates/MyDockerfile
new file mode 100644
index 00000000..68bca8a2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/templates/MyDockerfile
@@ -0,0 +1,5 @@
+FROM {{ docker_test_image_alpine }}
+ENV INSTALL_PATH /newdata
+RUN mkdir -p $INSTALL_PATH
+
+WORKDIR $INSTALL_PATH
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/templates/StagedDockerfile b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/templates/StagedDockerfile
new file mode 100644
index 00000000..fc4623a3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image/templates/StagedDockerfile
@@ -0,0 +1,7 @@
+FROM {{ docker_test_image_busybox }} AS first
+ENV dir /first
+WORKDIR ${dir}
+
+FROM {{ docker_test_image_busybox }} AS second
+ENV dir /second
+WORKDIR ${dir}
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image_info/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image_info/aliases
new file mode 100644
index 00000000..f1162af5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image_info/aliases
@@ -0,0 +1,6 @@
+shippable/posix/group2
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
+destructive
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image_info/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image_info/meta/main.yml
new file mode 100644
index 00000000..07da8c6d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image_info/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - setup_docker
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image_info/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image_info/tasks/main.yml
new file mode 100644
index 00000000..8b510c9c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_image_info/tasks/main.yml
@@ -0,0 +1,59 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- block:
+ - name: Make sure image is not there
+ docker_image:
+ name: "{{ docker_test_image_alpine_different }}"
+ state: absent
+
+ - name: Inspect a non-available image
+ docker_image_info:
+ name: "{{ docker_test_image_alpine_different }}"
+ register: result
+
+ - assert:
+ that:
+ - "result.images|length == 0"
+
+ - name: Make sure images are there
+ docker_image:
+ name: "{{ item }}"
+ source: pull
+ state: present
+ loop:
+ - "{{ docker_test_image_hello_world }}"
+ - "{{ docker_test_image_alpine }}"
+
+ - name: Inspect an available image
+ docker_image_info:
+ name: "{{ docker_test_image_hello_world }}"
+ register: result
+
+ - assert:
+ that:
+ - "result.images|length == 1"
+ - "docker_test_image_hello_world in result.images[0].RepoTags"
+
+ - name: Inspect multiple images
+ docker_image_info:
+ name:
+ - "{{ docker_test_image_hello_world }}"
+ - "{{ docker_test_image_alpine }}"
+ register: result
+
+ - debug: var=result
+
+ - assert:
+ that:
+ - "result.images|length == 2"
+ - "docker_test_image_hello_world in result.images[0].RepoTags"
+ - "docker_test_image_alpine in result.images[1].RepoTags"
+
+ when: docker_py_version is version('1.8.0', '>=') and docker_api_version is version('1.20', '>=')
+
+- fail: msg="Too old docker / docker-py version to run docker_image_info tests!"
+ when: not(docker_py_version is version('1.8.0', '>=') and docker_api_version is version('1.20', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_login/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_login/aliases
new file mode 100644
index 00000000..70d2451b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_login/aliases
@@ -0,0 +1,6 @@
+shippable/posix/group3
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
+destructive
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_login/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_login/meta/main.yml
new file mode 100644
index 00000000..21d7a58f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_login/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - setup_docker_registry
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_login/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_login/tasks/main.yml
new file mode 100644
index 00000000..115c31e6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_login/tasks/main.yml
@@ -0,0 +1,9 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- when: ansible_facts.distribution ~ ansible_facts.distribution_major_version not in ['CentOS6', 'RedHat6']
+ include_tasks:
+ file: test.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_login/tasks/run-test.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_login/tasks/run-test.yml
new file mode 100644
index 00000000..a2999370
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_login/tasks/run-test.yml
@@ -0,0 +1,3 @@
+---
+- name: "Loading tasks from {{ item }}"
+ include_tasks: "{{ item }}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_login/tasks/test.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_login/tasks/test.yml
new file mode 100644
index 00000000..5a6f15fa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_login/tasks/test.yml
@@ -0,0 +1,9 @@
+---
+- block:
+ - include_tasks: run-test.yml
+ with_fileglob:
+ - "tests/*.yml"
+ when: docker_py_version is version('1.8.0', '>=') and docker_api_version is version('1.20', '>=')
+
+- fail: msg="Too old docker / docker-py version to run docker_image tests!"
+ when: not(docker_py_version is version('1.8.0', '>=') and docker_api_version is version('1.20', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_login/tasks/tests/docker_login.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_login/tasks/tests/docker_login.yml
new file mode 100644
index 00000000..1c584c0f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_login/tasks/tests/docker_login.yml
@@ -0,0 +1,139 @@
+---
+- block:
+ - name: Log in with wrong password (check mode)
+ docker_login:
+ registry_url: "{{ registry_frontend_address }}"
+ username: testuser
+ password: "1234"
+ state: present
+ register: login_failed_check
+ ignore_errors: yes
+ check_mode: yes
+
+ - name: Log in with wrong password
+ docker_login:
+ registry_url: "{{ registry_frontend_address }}"
+ username: testuser
+ password: "1234"
+ state: present
+ register: login_failed
+ ignore_errors: yes
+
+ - name: Make sure that login failed
+ assert:
+ that:
+ - login_failed_check is failed
+ - "('login attempt to http://' ~ registry_frontend_address ~ '/v2/ failed') in login_failed_check.msg"
+ - login_failed is failed
+ - "('login attempt to http://' ~ registry_frontend_address ~ '/v2/ failed') in login_failed.msg"
+
+ - name: Log in (check mode)
+ docker_login:
+ registry_url: "{{ registry_frontend_address }}"
+ username: testuser
+ password: hunter2
+ state: present
+ register: login_1
+ check_mode: yes
+
+ - name: Log in
+ docker_login:
+ registry_url: "{{ registry_frontend_address }}"
+ username: testuser
+ password: hunter2
+ state: present
+ register: login_2
+
+ - name: Get permissions of ~/.docker/config.json
+ stat:
+ path: ~/.docker/config.json
+ register: login_2_stat
+
+ - name: Log in (idempotent)
+ docker_login:
+ registry_url: "{{ registry_frontend_address }}"
+ username: testuser
+ password: hunter2
+ state: present
+ register: login_3
+
+ - name: Log in (idempotent, check mode)
+ docker_login:
+ registry_url: "{{ registry_frontend_address }}"
+ username: testuser
+ password: hunter2
+ state: present
+ register: login_4
+ check_mode: yes
+
+ - name: Make sure that login worked
+ assert:
+ that:
+ - login_1 is changed
+ - login_2 is changed
+ - login_3 is not changed
+ - login_4 is not changed
+ - login_2_stat.stat.mode == '0600'
+
+ - name: Log in again with wrong password (check mode)
+ docker_login:
+ registry_url: "{{ registry_frontend_address }}"
+ username: testuser
+ password: "1234"
+ state: present
+ register: login_failed_check
+ ignore_errors: yes
+ check_mode: yes
+
+ - name: Log in again with wrong password
+ docker_login:
+ registry_url: "{{ registry_frontend_address }}"
+ username: testuser
+ password: "1234"
+ state: present
+ register: login_failed
+ ignore_errors: yes
+
+ - name: Make sure that login failed again
+ assert:
+ that:
+ - login_failed_check is failed
+ - "('login attempt to http://' ~ registry_frontend_address ~ '/v2/ failed') in login_failed_check.msg"
+ - login_failed is failed
+ - "('login attempt to http://' ~ registry_frontend_address ~ '/v2/ failed') in login_failed.msg"
+
+ - name: Log out (check mode)
+ docker_login:
+ registry_url: "{{ registry_frontend_address }}"
+ state: absent
+ register: logout_1
+ check_mode: yes
+
+ - name: Log out
+ docker_login:
+ registry_url: "{{ registry_frontend_address }}"
+ state: absent
+ register: logout_2
+
+ - name: Log out (idempotent)
+ docker_login:
+ registry_url: "{{ registry_frontend_address }}"
+ state: absent
+ register: logout_3
+
+ - name: Log out (idempotent, check mode)
+ docker_login:
+ registry_url: "{{ registry_frontend_address }}"
+ state: absent
+ register: logout_4
+ check_mode: yes
+
+ - name: Make sure that login worked
+ assert:
+ that:
+ - logout_1 is changed
+ - logout_2 is changed
+ - logout_3 is not changed
+ - logout_4 is not changed
+
+ when: registry_frontend_address != 'n/a'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network/aliases
new file mode 100644
index 00000000..33b9cd98
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network/aliases
@@ -0,0 +1,6 @@
+shippable/posix/group5
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
+destructive
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network/meta/main.yml
new file mode 100644
index 00000000..07da8c6d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - setup_docker
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network/tasks/main.yml
new file mode 100644
index 00000000..d5fcdb91
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network/tasks/main.yml
@@ -0,0 +1,38 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Create random name prefix
+ set_fact:
+ name_prefix: "{{ 'ansible-test-%0x' % ((2**32) | random) }}"
+ cnames: []
+ dnetworks: []
+
+- debug:
+ msg: "Using name prefix {{ name_prefix }}"
+
+- block:
+ - include_tasks: run-test.yml
+ with_fileglob:
+ - "tests/*.yml"
+
+ always:
+ - name: "Make sure all containers are removed"
+ docker_container:
+ name: "{{ item }}"
+ state: absent
+ force_kill: yes
+ loop: "{{ cnames }}"
+ - name: "Make sure all networks are removed"
+ docker_network:
+ name: "{{ item }}"
+ state: absent
+ force: yes
+ loop: "{{ dnetworks }}"
+
+ when: docker_py_version is version('1.10.0', '>=') and docker_api_version is version('1.20', '>=') # FIXME: find out API version!
+
+- fail: msg="Too old docker / docker-py version to run docker_network tests!"
+ when: not(docker_py_version is version('1.10.0', '>=') and docker_api_version is version('1.20', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network/tasks/run-test.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network/tasks/run-test.yml
new file mode 100644
index 00000000..a2999370
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network/tasks/run-test.yml
@@ -0,0 +1,3 @@
+---
+- name: "Loading tasks from {{ item }}"
+ include_tasks: "{{ item }}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network/tasks/tests/basic.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network/tasks/tests/basic.yml
new file mode 100644
index 00000000..e6290715
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network/tasks/tests/basic.yml
@@ -0,0 +1,134 @@
+---
+- name: Registering container and network names
+ set_fact:
+ cname_1: "{{ name_prefix ~ '-container-1' }}"
+ cname_2: "{{ name_prefix ~ '-container-2' }}"
+ cname_3: "{{ name_prefix ~ '-container-3' }}"
+ nname_1: "{{ name_prefix ~ '-network-1' }}"
+ nname_2: "{{ name_prefix ~ '-network-2' }}"
+- name: Registering container and network names
+ set_fact:
+ cnames: "{{ cnames + [cname_1, cname_2, cname_3] }}"
+ dnetworks: "{{ dnetworks + [nname_1, nname_2] }}"
+
+- name: Create containers
+ docker_container:
+ name: "{{ container_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ command: /bin/sleep 10m
+ state: started
+ loop:
+ - "{{ cname_1 }}"
+ - "{{ cname_2 }}"
+ - "{{ cname_3 }}"
+ loop_control:
+ loop_var: container_name
+
+####################################################################
+
+- name: Create network
+ docker_network:
+ name: "{{ nname_1 }}"
+ state: present
+ register: networks_1
+
+- name: Connect network to containers 1
+ docker_network:
+ name: "{{ nname_1 }}"
+ state: present
+ connected:
+ - "{{ cname_1 }}"
+ register: networks_2
+
+- name: Connect network to containers 1 (idempotency)
+ docker_network:
+ name: "{{ nname_1 }}"
+ state: present
+ connected:
+ - "{{ cname_1 }}"
+ register: networks_2_idem
+
+- name: Connect network to containers 1 and 2
+ docker_network:
+ name: "{{ nname_1 }}"
+ state: present
+ connected:
+ - "{{ cname_1 }}"
+ - "{{ cname_2 }}"
+ register: networks_3
+
+- name: Connect network to containers 1 and 2 (idempotency)
+ docker_network:
+ name: "{{ nname_1 }}"
+ state: present
+ connected:
+ - "{{ cname_1 }}"
+ - "{{ cname_2 }}"
+ register: networks_3_idem
+
+- name: Connect network to container 3
+ docker_network:
+ name: "{{ nname_1 }}"
+ state: present
+ connected:
+ - "{{ cname_3 }}"
+ appends: yes
+ register: networks_4
+
+- name: Connect network to container 3 (idempotency)
+ docker_network:
+ name: "{{ nname_1 }}"
+ state: present
+ connected:
+ - "{{ cname_3 }}"
+ appends: yes
+ register: networks_4_idem
+
+- name: Disconnect network from container 1
+ docker_network:
+ name: "{{ nname_1 }}"
+ state: present
+ connected:
+ - "{{ cname_2 }}"
+ - "{{ cname_3 }}"
+ register: networks_5
+
+- name: Disconnect network from container 1 (idempotency)
+ docker_network:
+ name: "{{ nname_1 }}"
+ state: present
+ connected:
+ - "{{ cname_2 }}"
+ - "{{ cname_3 }}"
+ register: networks_5_idem
+
+- name: Cleanup
+ docker_network:
+ name: "{{ nname_1 }}"
+ state: absent
+
+- assert:
+ that:
+ - networks_1 is changed
+ - networks_2 is changed
+ - networks_2_idem is not changed
+ - networks_3 is changed
+ - networks_3_idem is not changed
+ - networks_4 is changed
+ - networks_4_idem is not changed
+ - networks_5 is changed
+ - networks_5_idem is not changed
+
+####################################################################
+
+- name: Delete containers
+ docker_container:
+ name: "{{ container_name }}"
+ state: absent
+ force_kill: yes
+ loop:
+ - "{{ cname_1 }}"
+ - "{{ cname_2 }}"
+ - "{{ cname_3 }}"
+ loop_control:
+ loop_var: container_name
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network/tasks/tests/ipam.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network/tasks/tests/ipam.yml
new file mode 100644
index 00000000..7405c3b1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network/tasks/tests/ipam.yml
@@ -0,0 +1,398 @@
+---
+- name: Registering network names
+ set_fact:
+ nname_ipam_0: "{{ name_prefix ~ '-network-ipam-0' }}"
+ nname_ipam_1: "{{ name_prefix ~ '-network-ipam-1' }}"
+ nname_ipam_2: "{{ name_prefix ~ '-network-ipam-2' }}"
+ nname_ipam_3: "{{ name_prefix ~ '-network-ipam-3' }}"
+
+- name: Registering network names
+ set_fact:
+ dnetworks: "{{ dnetworks + [nname_ipam_0, nname_ipam_1, nname_ipam_2, nname_ipam_3] }}"
+
+
+#################### Deprecated ipam_config ####################
+
+- name: Create network with ipam_config and deprecated ipam_options (conflicting)
+ docker_network:
+ name: "{{ nname_ipam_0 }}"
+ ipam_options:
+ subnet: 172.3.29.0/24
+ ipam_config:
+ - subnet: 172.3.29.0/24
+ register: network
+ ignore_errors: yes
+
+- assert:
+ that:
+ - network is failed
+ - "network.msg == 'parameters are mutually exclusive: ipam_config|ipam_options'"
+
+- name: Create network with deprecated custom IPAM options
+ docker_network:
+ name: "{{ nname_ipam_0 }}"
+ ipam_options:
+ subnet: 172.3.29.0/24
+ gateway: 172.3.29.2
+ iprange: 172.3.29.0/26
+ aux_addresses:
+ host1: 172.3.29.3
+ host2: 172.3.29.4
+ register: network
+
+- assert:
+ that:
+ - network is changed
+
+- name: Create network with deprecated custom IPAM options (idempotence)
+ docker_network:
+ name: "{{ nname_ipam_0 }}"
+ ipam_options:
+ subnet: 172.3.29.0/24
+ gateway: 172.3.29.2
+ iprange: 172.3.29.0/26
+ aux_addresses:
+ host1: 172.3.29.3
+ host2: 172.3.29.4
+ register: network
+
+- assert:
+ that:
+ - network is not changed
+
+- name: Change of network created with deprecated custom IPAM options
+ docker_network:
+ name: "{{ nname_ipam_0 }}"
+ ipam_options:
+ subnet: 172.3.28.0/24
+ gateway: 172.3.28.2
+ iprange: 172.3.28.0/26
+ aux_addresses:
+ host1: 172.3.28.3
+ register: network
+ diff: yes
+
+- assert:
+ that:
+ - network is changed
+ - network.diff.differences | length == 4
+ - '"ipam_config[0].subnet" in network.diff.differences'
+ - '"ipam_config[0].gateway" in network.diff.differences'
+ - '"ipam_config[0].iprange" in network.diff.differences'
+ - '"ipam_config[0].aux_addresses" in network.diff.differences'
+
+- name: Remove gateway and iprange of network with deprecated custom IPAM options
+ docker_network:
+ name: "{{ nname_ipam_0 }}"
+ ipam_options:
+ subnet: 172.3.28.0/24
+ register: network
+
+- assert:
+ that:
+ - network is not changed
+
+- name: Cleanup network with deprecated custom IPAM options
+ docker_network:
+ name: "{{ nname_ipam_0 }}"
+ state: absent
+
+
+#################### IPv4 IPAM config ####################
+- name: Create network with custom IPAM config
+ docker_network:
+ name: "{{ nname_ipam_1 }}"
+ ipam_config:
+ - subnet: 172.3.27.0/24
+ gateway: 172.3.27.2
+ iprange: 172.3.27.0/26
+ aux_addresses:
+ host1: 172.3.27.3
+ host2: 172.3.27.4
+ register: network
+
+- assert:
+ that:
+ - network is changed
+
+- name: Create network with custom IPAM config (idempotence)
+ docker_network:
+ name: "{{ nname_ipam_1 }}"
+ ipam_config:
+ - subnet: 172.3.27.0/24
+ gateway: 172.3.27.2
+ iprange: 172.3.27.0/26
+ aux_addresses:
+ host1: 172.3.27.3
+ host2: 172.3.27.4
+ register: network
+
+- assert:
+ that:
+ - network is not changed
+
+- name: Change of network created with custom IPAM config
+ docker_network:
+ name: "{{ nname_ipam_1 }}"
+ ipam_config:
+ - subnet: 172.3.28.0/24
+ gateway: 172.3.28.2
+ iprange: 172.3.28.0/26
+ aux_addresses:
+ host1: 172.3.28.3
+ register: network
+ diff: yes
+
+- assert:
+ that:
+ - network is changed
+ - network.diff.differences | length == 4
+ - '"ipam_config[0].subnet" in network.diff.differences'
+ - '"ipam_config[0].gateway" in network.diff.differences'
+ - '"ipam_config[0].iprange" in network.diff.differences'
+ - '"ipam_config[0].aux_addresses" in network.diff.differences'
+
+- name: Remove gateway and iprange of network with custom IPAM config
+ docker_network:
+ name: "{{ nname_ipam_1 }}"
+ ipam_config:
+ - subnet: 172.3.28.0/24
+ register: network
+
+- assert:
+ that:
+ - network is not changed
+
+- name: Cleanup network with custom IPAM config
+ docker_network:
+ name: "{{ nname_ipam_1 }}"
+ state: absent
+
+
+#################### IPv6 IPAM config ####################
+
+- name: Create network with IPv6 IPAM config
+ docker_network:
+ name: "{{ nname_ipam_2 }}"
+ enable_ipv6: yes
+ ipam_config:
+ - subnet: fdd1:ac8c:0557:7ce0::/64
+ register: network
+
+- assert:
+ that:
+ - network is changed
+
+- name: Create network with IPv6 IPAM config (idempotence)
+ docker_network:
+ name: "{{ nname_ipam_2 }}"
+ enable_ipv6: yes
+ ipam_config:
+ - subnet: fdd1:ac8c:0557:7ce0::/64
+ register: network
+
+- assert:
+ that:
+ - network is not changed
+
+- name: Change subnet of network with IPv6 IPAM config
+ docker_network:
+ name: "{{ nname_ipam_2 }}"
+ enable_ipv6: yes
+ ipam_config:
+ - subnet: fdd1:ac8c:0557:7ce1::/64
+ register: network
+ diff: yes
+
+- assert:
+ that:
+ - network is changed
+ - network.diff.differences | length == 1
+ - network.diff.differences[0] == "ipam_config[0].subnet"
+
+- name: Change subnet of network with IPv6 IPAM config
+ docker_network:
+ name: "{{ nname_ipam_2 }}"
+ enable_ipv6: yes
+ ipam_config:
+ - subnet: "fdd1:ac8c:0557:7ce1::"
+ register: network
+ ignore_errors: yes
+
+- assert:
+ that:
+ - network is failed
+ - "network.msg == '\"fdd1:ac8c:0557:7ce1::\" is not a valid CIDR'"
+
+- name: Cleanup network with IPv6 IPAM config
+ docker_network:
+ name: "{{ nname_ipam_2 }}"
+ state: absent
+
+
+#################### IPv4 and IPv6 network ####################
+
+- name: Create network with IPv6 and custom IPv4 IPAM config
+ docker_network:
+ name: "{{ nname_ipam_3 }}"
+ enable_ipv6: yes
+ ipam_config:
+ - subnet: 172.4.27.0/24
+ - subnet: fdd1:ac8c:0557:7ce2::/64
+ register: network
+
+- assert:
+ that:
+ - network is changed
+
+- name: Change subnet order of network with IPv6 and custom IPv4 IPAM config (idempotence)
+ docker_network:
+ name: "{{ nname_ipam_3 }}"
+ enable_ipv6: yes
+ ipam_config:
+ - subnet: fdd1:ac8c:0557:7ce2::/64
+ - subnet: 172.4.27.0/24
+ register: network
+
+- assert:
+ that:
+ - network is not changed
+
+- name: Remove IPv6 from network with custom IPv4 and IPv6 IPAM config (change)
+ docker_network:
+ name: "{{ nname_ipam_3 }}"
+ enable_ipv6: no
+ ipam_config:
+ - subnet: 172.4.27.0/24
+ register: network
+ diff: yes
+
+- assert:
+ that:
+ - network is changed
+ - network.diff.differences | length == 1
+ - network.diff.differences[0] == "enable_ipv6"
+
+- name: Cleanup network with IPv6 and custom IPv4 IPAM config
+ docker_network:
+ name: "{{ nname_ipam_3 }}"
+ state: absent
+
+
+#################### multiple IPv4 networks ####################
+
+- block:
+ - name: Create network with two IPv4 IPAM configs
+ docker_network:
+ name: "{{ nname_ipam_3 }}"
+ driver: "macvlan"
+ driver_options:
+ parent: "{{ ansible_default_ipv4.alias }}"
+ ipam_config:
+ - subnet: 172.4.27.0/24
+ - subnet: 172.4.28.0/24
+ register: network
+
+ - assert:
+ that:
+ - network is changed
+
+ - name: Create network with two IPv4 IPAM configs (idempotence)
+ docker_network:
+ name: "{{ nname_ipam_3 }}"
+ driver: "macvlan"
+ driver_options:
+ parent: "{{ ansible_default_ipv4.alias }}"
+ ipam_config:
+ - subnet: 172.4.28.0/24
+ - subnet: 172.4.27.0/24
+ register: network
+
+ - assert:
+ that:
+ - network is not changed
+
+ - name: Create network with two IPv4 IPAM configs (change)
+ docker_network:
+ name: "{{ nname_ipam_3 }}"
+ driver: "macvlan"
+ driver_options:
+ parent: "{{ ansible_default_ipv4.alias }}"
+ ipam_config:
+ - subnet: 172.4.27.0/24
+ - subnet: 172.4.29.0/24
+ register: network
+ diff: yes
+
+ - assert:
+ that:
+ - network is changed
+ - network.diff.differences | length == 1
+
+ - name: Create network with one IPv4 IPAM config (no change)
+ docker_network:
+ name: "{{ nname_ipam_3 }}"
+ driver: "macvlan"
+ driver_options:
+ parent: "{{ ansible_default_ipv4.alias }}"
+ ipam_config:
+ - subnet: 172.4.29.0/24
+ register: network
+
+ - assert:
+ that:
+ - network is not changed
+
+ - name: Cleanup network
+ docker_network:
+ name: "{{ nname_ipam_3 }}"
+ state: absent
+
+ when: ansible_facts.virtualization_type != 'docker'
+
+
+#################### IPAM driver options ####################
+
+- name: Create network with IPAM driver options
+ docker_network:
+ name: "{{ nname_ipam_3 }}"
+ ipam_driver: default
+ ipam_driver_options:
+ a: b
+ register: network_1
+ ignore_errors: yes
+- name: Create network with IPAM driver options (idempotence)
+ docker_network:
+ name: "{{ nname_ipam_3 }}"
+ ipam_driver: default
+ ipam_driver_options:
+ a: b
+ diff: yes
+ register: network_2
+ ignore_errors: yes
+- name: Create network with IPAM driver options (change)
+ docker_network:
+ name: "{{ nname_ipam_3 }}"
+ ipam_driver: default
+ ipam_driver_options:
+ a: c
+ diff: yes
+ register: network_3
+ ignore_errors: yes
+- name: Cleanup network
+ docker_network:
+ name: "{{ nname_ipam_3 }}"
+ state: absent
+
+- assert:
+ that:
+ - network_1 is changed
+ - network_2 is not changed
+ - network_3 is changed
+ when: docker_py_version is version('2.0.0', '>=')
+- assert:
+ that:
+ - network_1 is failed
+ - "('version is ' ~ docker_py_version ~ ' ') in network_1.msg"
+ - "'Minimum version required is 2.0.0 ' in network_1.msg"
+ when: docker_py_version is version('2.0.0', '<')
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network/tasks/tests/options.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network/tasks/tests/options.yml
new file mode 100644
index 00000000..ea073db3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network/tasks/tests/options.yml
@@ -0,0 +1,240 @@
+---
+- name: Registering network name
+ set_fact:
+ nname_1: "{{ name_prefix ~ '-network-1' }}"
+- name: Registering network name
+ set_fact:
+ dnetworks: "{{ dnetworks + [nname_1] }}"
+
+####################################################################
+## internal ########################################################
+####################################################################
+
+- name: internal
+ docker_network:
+ name: "{{ nname_1 }}"
+ internal: yes
+ register: internal_1
+
+- name: internal (idempotency)
+ docker_network:
+ name: "{{ nname_1 }}"
+ internal: yes
+ register: internal_2
+
+- name: internal (change)
+ docker_network:
+ name: "{{ nname_1 }}"
+ internal: no
+ register: internal_3
+
+- name: cleanup
+ docker_network:
+ name: "{{ nname_1 }}"
+ state: absent
+ force: yes
+
+- assert:
+ that:
+ - internal_1 is changed
+ - internal_2 is not changed
+ - internal_3 is changed
+
+####################################################################
+## driver_options ##################################################
+####################################################################
+
+- name: driver_options
+ docker_network:
+ name: "{{ nname_1 }}"
+ driver_options:
+ com.docker.network.bridge.enable_icc: 'false'
+ register: driver_options_1
+
+- name: driver_options (idempotency)
+ docker_network:
+ name: "{{ nname_1 }}"
+ driver_options:
+ com.docker.network.bridge.enable_icc: 'false'
+ register: driver_options_2
+
+- name: driver_options (idempotency with string translation)
+ docker_network:
+ name: "{{ nname_1 }}"
+ driver_options:
+ com.docker.network.bridge.enable_icc: False
+ register: driver_options_3
+
+- name: driver_options (change)
+ docker_network:
+ name: "{{ nname_1 }}"
+ driver_options:
+ com.docker.network.bridge.enable_icc: 'true'
+ register: driver_options_4
+
+- name: driver_options (idempotency with string translation)
+ docker_network:
+ name: "{{ nname_1 }}"
+ driver_options:
+ com.docker.network.bridge.enable_icc: True
+ register: driver_options_5
+
+- name: cleanup
+ docker_network:
+ name: "{{ nname_1 }}"
+ state: absent
+ force: yes
+
+- assert:
+ that:
+ - driver_options_1 is changed
+ - driver_options_2 is not changed
+ - driver_options_3 is not changed
+ - driver_options_4 is changed
+ - driver_options_5 is not changed
+
+####################################################################
+## scope ###########################################################
+####################################################################
+
+- block:
+ - name: scope
+ docker_network:
+ name: "{{ nname_1 }}"
+ driver: bridge
+ scope: local
+ register: scope_1
+
+ - name: scope (idempotency)
+ docker_network:
+ name: "{{ nname_1 }}"
+ driver: bridge
+ scope: local
+ register: scope_2
+
+ - name: swarm
+ docker_swarm:
+ state: present
+ advertise_addr: "{{ansible_default_ipv4.address}}"
+
+ # Driver change alongside scope is intentional - bridge doesn't appear to support anything but local, and overlay can't downgrade to local. Additionally, overlay reports as swarm for swarm OR global, so no change is reported in that case.
+ # Test output indicates that the scope is altered, at least, so manual inspection will be required to verify this going forward, unless we come up with a test driver that supports multiple scopes.
+ - name: scope (change)
+ docker_network:
+ name: "{{ nname_1 }}"
+ driver: overlay
+ scope: swarm
+ register: scope_3
+
+ - name: cleanup network
+ docker_network:
+ name: "{{ nname_1 }}"
+ state: absent
+ force: yes
+
+ - assert:
+ that:
+ - scope_1 is changed
+ - scope_2 is not changed
+ - scope_3 is changed
+
+ always:
+ - name: cleanup swarm
+ docker_swarm:
+ state: absent
+ force: yes
+
+ # Requirements for docker_swarm
+ when: docker_py_version is version('2.6.0', '>=') and docker_api_version is version('1.25', '>=')
+
+####################################################################
+## attachable ######################################################
+####################################################################
+
+- name: attachable
+ docker_network:
+ name: "{{ nname_1 }}"
+ attachable: true
+ register: attachable_1
+ ignore_errors: yes
+
+- name: attachable (idempotency)
+ docker_network:
+ name: "{{ nname_1 }}"
+ attachable: true
+ register: attachable_2
+ ignore_errors: yes
+
+- name: attachable (change)
+ docker_network:
+ name: "{{ nname_1 }}"
+ attachable: false
+ register: attachable_3
+ ignore_errors: yes
+
+- name: cleanup
+ docker_network:
+ name: "{{ nname_1 }}"
+ state: absent
+ force: yes
+
+- assert:
+ that:
+ - attachable_1 is changed
+ - attachable_2 is not changed
+ - attachable_3 is changed
+ when: docker_py_version is version('2.0.0', '>=')
+- assert:
+ that:
+ - attachable_1 is failed
+ - "('version is ' ~ docker_py_version ~ ' ') in attachable_1.msg"
+ - "'Minimum version required is 2.0.0 ' in attachable_1.msg"
+ when: docker_py_version is version('2.0.0', '<')
+
+####################################################################
+## labels ##########################################################
+####################################################################
+
+- name: labels
+ docker_network:
+ name: "{{ nname_1 }}"
+ labels:
+ ansible.test.1: hello
+ ansible.test.2: world
+ register: labels_1
+
+- name: labels (idempotency)
+ docker_network:
+ name: "{{ nname_1 }}"
+ labels:
+ ansible.test.2: world
+ ansible.test.1: hello
+ register: labels_2
+
+- name: labels (less labels)
+ docker_network:
+ name: "{{ nname_1 }}"
+ labels:
+ ansible.test.1: hello
+ register: labels_3
+
+- name: labels (more labels)
+ docker_network:
+ name: "{{ nname_1 }}"
+ labels:
+ ansible.test.1: hello
+ ansible.test.3: ansible
+ register: labels_4
+
+- name: cleanup
+ docker_network:
+ name: "{{ nname_1 }}"
+ state: absent
+ force: yes
+
+- assert:
+ that:
+ - labels_1 is changed
+ - labels_2 is not changed
+ - labels_3 is not changed
+ - labels_4 is changed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network/tasks/tests/overlay.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network/tasks/tests/overlay.yml
new file mode 100644
index 00000000..832836aa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network/tasks/tests/overlay.yml
@@ -0,0 +1,61 @@
+---
+- name: Registering network name
+ set_fact:
+ nname_1: "{{ name_prefix ~ '-network-1' }}"
+- name: Registering network name
+ set_fact:
+ dnetworks: "{{ dnetworks + [nname_1] }}"
+
+####################################################################
+## overlay #########################################################
+####################################################################
+
+- block:
+ # Overlay networks require swarm initialization before they'll work
+ - name: swarm
+ docker_swarm:
+ state: present
+ advertise_addr: "{{ansible_default_ipv4.address}}"
+
+ - name: overlay
+ docker_network:
+ name: "{{ nname_1 }}"
+ driver: overlay
+ driver_options:
+ com.docker.network.driver.overlay.vxlanid_list: "257"
+ register: overlay_1
+
+ - name: overlay (idempotency)
+ docker_network:
+ name: "{{ nname_1 }}"
+ driver: overlay
+ driver_options:
+ com.docker.network.driver.overlay.vxlanid_list: "257"
+ register: overlay_2
+
+ - name: overlay (change)
+ docker_network:
+ name: "{{ nname_1 }}"
+ driver: bridge
+ register: overlay_3
+
+ - name: cleanup network
+ docker_network:
+ name: "{{ nname_1 }}"
+ state: absent
+ force: yes
+
+ - assert:
+ that:
+ - overlay_1 is changed
+ - overlay_2 is not changed
+ - overlay_3 is changed
+
+ always:
+ - name: cleanup swarm
+ docker_swarm:
+ state: absent
+ force: yes
+
+ # Requirements for docker_swarm
+ when: docker_py_version is version('2.6.0', '>=') and docker_api_version is version('1.25', '>=')
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network/tasks/tests/substring.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network/tasks/tests/substring.yml
new file mode 100644
index 00000000..bdee190e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network/tasks/tests/substring.yml
@@ -0,0 +1,37 @@
+---
+- name: Registering container and network names
+ set_fact:
+ nname_1: "{{ name_prefix ~ '-network-foo' }}"
+ nname_2: "{{ name_prefix ~ '-network-foobar' }}"
+- name: Registering container and network names
+ set_fact:
+ dnetworks: "{{ dnetworks + [nname_1, nname_2] }}"
+
+####################################################################
+
+- name: Create network (superstring)
+ docker_network:
+ name: "{{ nname_2 }}"
+ state: present
+ register: networks_1
+
+- name: Create network (substring)
+ docker_network:
+ name: "{{ nname_1 }}"
+ state: present
+ register: networks_2
+
+- name: Cleanup
+ docker_network:
+ name: "{{ network_name }}"
+ state: absent
+ loop:
+ - "{{ nname_1 }}"
+ - "{{ nname_2 }}"
+ loop_control:
+ loop_var: network_name
+
+- assert:
+ that:
+ - networks_1 is changed
+ - networks_2 is changed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network_info/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network_info/aliases
new file mode 100644
index 00000000..f1162af5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network_info/aliases
@@ -0,0 +1,6 @@
+shippable/posix/group2
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
+destructive
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network_info/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network_info/meta/main.yml
new file mode 100644
index 00000000..07da8c6d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network_info/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - setup_docker
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network_info/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network_info/tasks/main.yml
new file mode 100644
index 00000000..70edcf6c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_network_info/tasks/main.yml
@@ -0,0 +1,76 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- block:
+ - name: Create random network name
+ set_fact:
+ nname: "{{ 'ansible-test-%0x' % ((2**32) | random) }}"
+
+ - name: Make sure network is not there
+ docker_network:
+ name: "{{ nname }}"
+ state: absent
+ force: yes
+
+ - name: Inspect a non-present network
+ docker_network_info:
+ name: "{{ nname }}"
+ register: result
+
+ - assert:
+ that:
+ - "not result.exists"
+ - "'network' in result"
+ - "result.network is none"
+
+ - name: Make sure network exists
+ docker_network:
+ name: "{{ nname }}"
+ state: present
+
+ - name: Inspect a present network
+ docker_network_info:
+ name: "{{ nname }}"
+ register: result
+ - name: Dump docker_network_info result
+ debug: var=result
+
+ - name: "Comparison: use 'docker network inspect'"
+ command: docker network inspect "{{ nname }}"
+ register: docker_inspect
+ ignore_errors: yes
+ - block:
+ - set_fact:
+ docker_inspect_result: "{{ docker_inspect.stdout | from_json }}"
+ - name: Dump docker inspect result
+ debug: var=docker_inspect_result
+ when: docker_inspect is not failed
+
+ - name: Cleanup
+ docker_network:
+ name: "{{ nname }}"
+ state: absent
+ force: yes
+
+ - assert:
+ that:
+ - result.exists
+ - "'network' in result"
+ - "result.network"
+
+ - assert:
+ that:
+ - "result.network == docker_inspect_result[0]"
+ when: docker_inspect is not failed
+ - assert:
+ that:
+ - "'is too new. Maximum supported API version is' in docker_inspect.stderr"
+ when: docker_inspect is failed
+
+ when: docker_py_version is version('1.8.0', '>=') and docker_api_version is version('1.21', '>=')
+
+- fail: msg="Too old docker / docker-py version to run docker_network_info tests!"
+ when: not(docker_py_version is version('1.8.0', '>=') and docker_api_version is version('1.21', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_node/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_node/aliases
new file mode 100644
index 00000000..78341e07
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_node/aliases
@@ -0,0 +1,12 @@
+shippable/posix/group2
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
+destructive
+skip/docker # The tests sometimes make docker daemon unstable; hence,
+ # we skip all docker-based CI runs to avoid disrupting
+ # the whole CI system. On VMs, we restart docker daemon
+ # after finishing the tests to minimize potential effects
+ # on other tests.
+needs/root
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_node/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_node/meta/main.yml
new file mode 100644
index 00000000..07da8c6d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_node/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - setup_docker
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_node/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_node/tasks/main.yml
new file mode 100644
index 00000000..a91efc59
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_node/tasks/main.yml
@@ -0,0 +1,37 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Run the tests
+- block:
+ - include_tasks: test_node.yml
+
+ always:
+ - name: Cleanup (trying)
+ docker_swarm:
+ state: absent
+ force: true
+ diff: no
+ ignore_errors: yes
+
+ - name: Restart docker daemon
+ service:
+ name: docker
+ state: restarted
+ become: yes
+ - name: Wait for docker daemon to be fully restarted
+ command: docker ps
+ ignore_errors: yes
+
+ - name: Cleanup
+ docker_swarm:
+ state: absent
+ force: true
+ diff: no
+
+ when: docker_py_version is version('2.4.0', '>=') and docker_api_version is version('1.25', '>=')
+
+- fail: msg="Too old docker / docker-py version to run docker_node tests!"
+ when: not(docker_py_version is version('2.4.0', '>=') and docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_node/tasks/test_node.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_node/tasks/test_node.yml
new file mode 100644
index 00000000..bdc01afb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_node/tasks/test_node.yml
@@ -0,0 +1,840 @@
+---
+- block:
+ - name: Make sure we're not already using Docker swarm
+ docker_swarm:
+ state: absent
+ force: true
+
+ - name: Try to get docker_node_info when docker is not running in swarm mode
+ docker_node_info:
+ ignore_errors: yes
+ register: output
+
+ - name: assert failure when called when swarm is not in use or not run on manager node
+ assert:
+ that:
+ - 'output is failed'
+ - 'output.msg == "Error running docker swarm module: must run on swarm manager node"'
+
+ - name: Create a Swarm cluster
+ docker_swarm:
+ state: present
+ advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}"
+ register: output
+
+ - name: assert changed when create a new swarm cluster
+ assert:
+ that:
+ - 'output is changed'
+ - 'output.actions[0] | regex_search("New Swarm cluster created: ")'
+ - 'output.swarm_facts.JoinTokens.Manager'
+ - 'output.swarm_facts.JoinTokens.Worker'
+
+ - name: Try to get docker_node_info when docker is running in swarm mode and as manager
+ docker_node_info:
+ register: output
+
+ - name: assert reading docker swarm node facts
+ assert:
+ that:
+ - 'output.nodes | length > 0'
+ - 'output.nodes[0].ID is string'
+
+ - name: Register node ID
+ set_fact:
+ nodeid: "{{ output.nodes[0].ID }}"
+
+####################################################################
+## Set node as swarm manager #######################################
+####################################################################
+
+ - name: Try to set node as manager (check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ role: manager
+ check_mode: yes
+ register: set_as_manager_1
+
+ - name: Try to set node as manager
+ docker_node:
+ hostname: "{{ nodeid }}"
+ role: manager
+ register: set_as_manager_2
+
+ - name: Try to set node as manager (idempotent)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ role: manager
+ register: set_as_manager_3
+
+ - name: Try to set node as manager (idempotent check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ role: manager
+ check_mode: yes
+ register: set_as_manager_4
+
+ - name: assert that node role does has not changed
+ assert:
+ that:
+ - 'set_as_manager_1 is not changed'
+ - 'set_as_manager_2 is not changed'
+ - 'set_as_manager_3 is not changed'
+ - 'set_as_manager_4 is not changed'
+ - 'set_as_manager_1.node.Spec.Role == "manager"'
+ - 'set_as_manager_2.node.Spec.Role == "manager"'
+ - 'set_as_manager_3.node.Spec.Role == "manager"'
+ - 'set_as_manager_4.node.Spec.Role == "manager"'
+
+####################################################################
+## Set node as swarm worker ########################################
+####################################################################
+
+ - name: Try to set node as worker (check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ role: worker
+ check_mode: yes
+ register: set_as_worker_1
+
+ - name: Try to set node as worker
+ docker_node:
+ hostname: "{{ nodeid }}"
+ role: worker
+ ignore_errors: yes
+ register: set_as_worker_2
+
+ - name: assert that node cannot change role to worker
+ assert:
+ that:
+ - 'set_as_worker_1 is changed'
+ - 'set_as_worker_2 is failed'
+ - 'set_as_worker_2.msg | regex_search("attempting to demote the last manager of the swarm")'
+
+####################################################################
+## Set node as pasued ##############################################
+####################################################################
+
+ - name: Try to set node availability as paused (check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ availability: pause
+ check_mode: yes
+ register: set_as_paused_1
+
+ - name: Try to set node availability as paused
+ docker_node:
+ hostname: "{{ nodeid }}"
+ availability: pause
+ register: set_as_paused_2
+
+ - name: Try to set node availability as paused (idempotent)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ availability: pause
+ register: set_as_paused_3
+
+ - name: Try to set node availability as paused (idempotent check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ availability: pause
+ check_mode: yes
+ register: set_as_paused_4
+
+ - name: assert node changed availability to paused
+ assert:
+ that:
+ - 'set_as_paused_1 is changed'
+ - 'set_as_paused_2 is changed'
+ - 'set_as_paused_3 is not changed'
+ - 'set_as_paused_4 is not changed'
+ - 'set_as_paused_2.node.Spec.Availability == "pause"'
+
+####################################################################
+## Set node as drained #############################################
+####################################################################
+
+ - name: Try to set node availability as drained (check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ availability: drain
+ check_mode: yes
+ register: output_drain_1
+
+ - name: Try to set node availability as drained
+ docker_node:
+ hostname: "{{ nodeid }}"
+ availability: drain
+ register: output_drain_2
+
+ - name: Try to set node availability as drained (idempotent)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ availability: drain
+ register: output_drain_3
+
+ - name: Try to set node availability as drained (idempotent check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ availability: drain
+ check_mode: yes
+ register: output_drain_4
+
+ - name: assert node changed availability to drained
+ assert:
+ that:
+ - 'output_drain_1 is changed'
+ - 'output_drain_2 is changed'
+ - 'output_drain_3 is not changed'
+ - 'output_drain_4 is not changed'
+ - 'output_drain_2.node.Spec.Availability == "drain"'
+
+
+####################################################################
+## Set node as active ##############################################
+####################################################################
+
+ - name: Try to set node availability as active (check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ availability: active
+ check_mode: yes
+ register: output_active_1
+
+ - name: Try to set node availability as active
+ docker_node:
+ hostname: "{{ nodeid }}"
+ availability: active
+ register: output_active_2
+
+ - name: Try to set node availability as active (idempotent)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ availability: active
+ register: output_active_3
+
+ - name: Try to set node availability as active (idempotent check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ availability: active
+ check_mode: yes
+ register: output_active_4
+
+ - name: assert node changed availability to active
+ assert:
+ that:
+ - 'output_active_1 is changed'
+ - 'output_active_2 is changed'
+ - 'output_active_3 is not changed'
+ - 'output_active_4 is not changed'
+ - 'output_active_2.node.Spec.Availability == "active"'
+
+####################################################################
+## Add single label ###############################################
+####################################################################
+
+ - name: Try to add single label to swarm node (check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label1: value1
+ check_mode: yes
+ register: output_add_single_label_1
+
+ - name: Try to add single label to swarm node
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label1: value1
+ register: output_add_single_label_2
+
+ - name: Try to add single label to swarm node (idempotent)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label1: value1
+ register: output_add_single_label_3
+
+ - name: Try to add single label to swarm node (idempotent check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label1: value1
+ check_mode: yes
+ register: output_add_single_label_4
+
+ - name: assert adding single label to swarm node
+ assert:
+ that:
+ - 'output_add_single_label_1 is changed'
+ - 'output_add_single_label_2 is changed'
+ - 'output_add_single_label_3 is not changed'
+ - 'output_add_single_label_4 is not changed'
+ - 'output_add_single_label_2.node.Spec.Labels | length == 1'
+ - 'output_add_single_label_2.node.Spec.Labels.label1 == "value1"'
+
+####################################################################
+## Add multiple labels #############################################
+####################################################################
+
+ - name: Try to add five labels to swarm node (check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label2: value2
+ label3: value3
+ label4: value4
+ label5: value5
+ label6: value6
+ check_mode: yes
+ register: output_add_multiple_labels_1
+
+ - name: Try to add five labels to swarm node
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label2: value2
+ label3: value3
+ label4: value4
+ label5: value5
+ label6: value6
+ register: output_add_multiple_labels_2
+
+ - name: Try to add five labels to swarm node (idempotent)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label2: value2
+ label3: value3
+ label4: value4
+ label5: value5
+ label6: value6
+ register: output_add_multiple_labels_3
+
+ - name: Try to add five labels to swarm node (idempotent check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label2: value2
+ label3: value3
+ label4: value4
+ label5: value5
+ label6: value6
+ check_mode: yes
+ register: output_add_multiple_labels_4
+
+ - name: assert adding multiple labels to swarm node
+ assert:
+ that:
+ - 'output_add_multiple_labels_1 is changed'
+ - 'output_add_multiple_labels_2 is changed'
+ - 'output_add_multiple_labels_3 is not changed'
+ - 'output_add_multiple_labels_4 is not changed'
+ - 'output_add_multiple_labels_2.node.Spec.Labels | length == 6'
+ - 'output_add_multiple_labels_2.node.Spec.Labels.label1 == "value1"'
+ - 'output_add_multiple_labels_2.node.Spec.Labels.label6 == "value6"'
+
+####################################################################
+## Update label value ##############################################
+####################################################################
+
+ - name: Update value of existing label (check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label1: value1111
+ check_mode: yes
+ register: output_update_label_1
+
+ - name: Update value of existing label
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label1: value1111
+ register: output_update_label_2
+
+ - name: Update value of existing label (idempotent)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label1: value1111
+ register: output_update_label_3
+
+ - name: Update value of existing label (idempotent check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label1: value1111
+ check_mode: yes
+ register: output_update_label_4
+
+ - name: assert updating single label assigned to swarm node
+ assert:
+ that:
+ - 'output_update_label_1 is changed'
+ - 'output_update_label_2 is changed'
+ - 'output_update_label_3 is not changed'
+ - 'output_update_label_4 is not changed'
+ - 'output_update_label_2.node.Spec.Labels | length == 6'
+ - 'output_update_label_2.node.Spec.Labels.label1 == "value1111"'
+ - 'output_update_label_2.node.Spec.Labels.label5 == "value5"'
+
+####################################################################
+## Update multiple labels values ###################################
+####################################################################
+
+ - name: Update value of multiple existing label (check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label2: value2222
+ label3: value3333
+ check_mode: yes
+ register: output_update_labels_1
+
+ - name: Update value of multiple existing label
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label2: value2222
+ label3: value3333
+ register: output_update_labels_2
+
+ - name: Update value of multiple existing label (idempotent)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label2: value2222
+ label3: value3333
+ register: output_update_labels_3
+
+ - name: Update value of multiple existing label (idempotent check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label2: value2222
+ label3: value3333
+ check_mode: yes
+ register: output_update_labels_4
+
+ - name: assert updating multiple labels assigned to swarm node
+ assert:
+ that:
+ - 'output_update_labels_1 is changed'
+ - 'output_update_labels_2 is changed'
+ - 'output_update_labels_3 is not changed'
+ - 'output_update_labels_4 is not changed'
+ - 'output_update_labels_2.node.Spec.Labels | length == 6'
+ - 'output_update_labels_2.node.Spec.Labels.label1 == "value1111"'
+ - 'output_update_labels_2.node.Spec.Labels.label3 == "value3333"'
+ - 'output_update_labels_2.node.Spec.Labels.label5 == "value5"'
+
+####################################################################
+## Remove single label #############################################
+####################################################################
+
+ - name: Try to remove single existing label from swarm node (check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels_to_remove:
+ - label1
+ check_mode: yes
+ register: output_remove_label_1
+
+ - name: Try to remove single existing label from swarm node
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels_to_remove:
+ - label1
+ register: output_remove_label_2
+
+ - name: Try to remove single existing label from swarm node (idempotent)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels_to_remove:
+ - label1
+ register: output_remove_label_3
+
+ - name: Try to remove single existing label from swarm node (idempotent check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels_to_remove:
+ - label1
+ check_mode: yes
+ register: output_remove_label_4
+
+ - name: assert removing single label from swarm node
+ assert:
+ that:
+ - 'output_remove_label_1 is changed'
+ - 'output_remove_label_2 is changed'
+ - 'output_remove_label_3 is not changed'
+ - 'output_remove_label_4 is not changed'
+ - 'output_remove_label_2.node.Spec.Labels | length == 5'
+ - '"label1" not in output_remove_label_2.node.Spec.Labels'
+ - 'output_remove_label_2.node.Spec.Labels.label3 == "value3333"'
+ - 'output_remove_label_2.node.Spec.Labels.label5 == "value5"'
+
+
+####################################################################
+## Remove single not assigned to swarm label #######################
+####################################################################
+
+ - name: Try to remove single non-existing label from swarm node (check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels_to_remove:
+ - labelnotexist
+ check_mode: yes
+ register: output_remove_nonexist_label_1
+
+ - name: Try to remove single non-existing label from swarm node
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels_to_remove:
+ - labelnotexist
+ register: output_remove_nonexist_label_2
+
+ - name: Try to remove single non-existing label from swarm node (idempotent)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels_to_remove:
+ - labelnotexist
+ register: output_remove_nonexist_label_3
+
+ - name: Try to remove single non-existing label from swarm node (idempotent check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels_to_remove:
+ - labelnotexist
+ check_mode: yes
+ register: output_remove_nonexist_label_4
+
+ - name: assert removing single non-existing label from swarm node
+ assert:
+ that:
+ - 'output_remove_nonexist_label_1 is not changed'
+ - 'output_remove_nonexist_label_2 is not changed'
+ - 'output_remove_nonexist_label_3 is not changed'
+ - 'output_remove_nonexist_label_4 is not changed'
+ - 'output_remove_nonexist_label_2.node.Spec.Labels | length == 5'
+ - '"label1" not in output_remove_nonexist_label_2.node.Spec.Labels'
+ - 'output_remove_nonexist_label_2.node.Spec.Labels.label3 == "value3333"'
+ - 'output_remove_nonexist_label_2.node.Spec.Labels.label5 == "value5"'
+
+####################################################################
+## Remove multiple labels ##########################################
+####################################################################
+
+ - name: Try to remove two existing labels from swarm node (check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels_to_remove:
+ - label2
+ - label3
+ check_mode: yes
+ register: output_remove_label_1
+
+ - name: Try to remove two existing labels from swarm node
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels_to_remove:
+ - label2
+ - label3
+ register: output_remove_label_2
+
+ - name: Try to remove two existing labels from swarm node (idempotent)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels_to_remove:
+ - label2
+ - label3
+ register: output_remove_label_3
+
+ - name: Try to remove two existing labels from swarm node (idempotent check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels_to_remove:
+ - label2
+ - label3
+ check_mode: yes
+ register: output_remove_label_4
+
+ - name: assert removing multiple labels from swarm node
+ assert:
+ that:
+ - 'output_remove_label_1 is changed'
+ - 'output_remove_label_2 is changed'
+ - 'output_remove_label_3 is not changed'
+ - 'output_remove_label_4 is not changed'
+ - 'output_remove_label_2.node.Spec.Labels | length == 3'
+ - '"label1" not in output_remove_label_2.node.Spec.Labels'
+ - '"label2" not in output_remove_label_2.node.Spec.Labels'
+ - 'output_remove_label_2.node.Spec.Labels.label5 == "value5"'
+
+####################################################################
+## Remove multiple labels, mix assigned and not assigned ##########
+####################################################################
+
+ - name: Try to remove mix of existing amd non-existing labels from swarm node (check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels_to_remove:
+ - label4
+ - labelisnotthere
+ check_mode: yes
+ register: output_remove_mix_labels_1
+
+ - name: Try to remove mix of existing amd non-existing labels from swarm node
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels_to_remove:
+ - label4
+ - labelisnotthere
+ register: output_remove_mix_labels_2
+
+ - name: Try to remove mix of existing amd non-existing labels from swarm node (idempotent)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels_to_remove:
+ - label4
+ - labelisnotthere
+ register: output_remove_mix_labels_3
+
+ - name: Try to remove mix of existing amd non-existing labels from swarm node (idempotent check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels_to_remove:
+ - label4
+ - labelisnotthere
+ check_mode: yes
+ register: output_remove_mix_labels_4
+
+ - name: assert removing mix of existing and non-existing labels from swarm node
+ assert:
+ that:
+ - 'output_remove_mix_labels_1 is changed'
+ - 'output_remove_mix_labels_2 is changed'
+ - 'output_remove_mix_labels_3 is not changed'
+ - 'output_remove_mix_labels_4 is not changed'
+ - 'output_remove_mix_labels_2.node.Spec.Labels | length == 2'
+ - '"label1" not in output_remove_mix_labels_2.node.Spec.Labels'
+ - '"label4" not in output_remove_mix_labels_2.node.Spec.Labels'
+ - 'output_remove_mix_labels_2.node.Spec.Labels.label5 == "value5"'
+
+####################################################################
+## Add and remove labels ###########################################
+####################################################################
+
+ - name: Try to add and remove nonoverlapping labels at the same time (check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label7: value7
+ label8: value8
+ labels_to_remove:
+ - label5
+ check_mode: yes
+ register: output_add_del_labels_1
+
+ - name: Try to add and remove nonoverlapping labels at the same time
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label7: value7
+ label8: value8
+ labels_to_remove:
+ - label5
+ register: output_add_del_labels_2
+
+ - name: Try to add and remove nonoverlapping labels at the same time (idempotent)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label7: value7
+ label8: value8
+ labels_to_remove:
+ - label5
+ register: output_add_del_labels_3
+
+ - name: Try to add and remove nonoverlapping labels at the same time (idempotent check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label7: value7
+ label8: value8
+ labels_to_remove:
+ - label5
+ check_mode: yes
+ register: output_add_del_labels_4
+
+ - name: assert adding and removing nonoverlapping labels from swarm node
+ assert:
+ that:
+ - 'output_add_del_labels_1 is changed'
+ - 'output_add_del_labels_2 is changed'
+ - 'output_add_del_labels_3 is not changed'
+ - 'output_add_del_labels_4 is not changed'
+ - 'output_add_del_labels_2.node.Spec.Labels | length == 3'
+ - '"label5" not in output_add_del_labels_2.node.Spec.Labels'
+ - 'output_add_del_labels_2.node.Spec.Labels.label8 == "value8"'
+
+####################################################################
+## Add and remove labels with label in both lists ##################
+####################################################################
+
+ - name: Try to add or update and remove overlapping labels at the same time (check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label22: value22
+ label6: value6666
+ labels_to_remove:
+ - label6
+ - label7
+ check_mode: yes
+ register: output_add_del_overlap_lables_1
+
+ - name: Try to add or update and remove overlapping labels at the same time
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label22: value22
+ label6: value6666
+ labels_to_remove:
+ - label6
+ - label7
+ register: output_add_del_overlap_lables_2
+
+ - name: Try to add or update and remove overlapping labels at the same time (idempotent)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label22: value22
+ label6: value6666
+ labels_to_remove:
+ - label6
+ - label7
+ register: output_add_del_overlap_lables_3
+
+ - name: Try to add or update and remove overlapping labels at the same time (idempotent check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label22: value22
+ label6: value6666
+ labels_to_remove:
+ - label6
+ - label7
+ check_mode: yes
+ register: output_add_del_overlap_lables_4
+
+ - name: assert adding or updating and removing overlapping labels from swarm node
+ assert:
+ that:
+ - 'output_add_del_overlap_lables_1 is changed'
+ - 'output_add_del_overlap_lables_2 is changed'
+ - 'output_add_del_overlap_lables_3 is not changed'
+ - 'output_add_del_overlap_lables_4 is not changed'
+ - 'output_add_del_overlap_lables_2.node.Spec.Labels | length == 3'
+ - '"label7" not in output_add_del_overlap_lables_2.node.Spec.Labels'
+ - 'output_add_del_overlap_lables_2.node.Spec.Labels.label6 == "value6666"'
+ - 'output_add_del_overlap_lables_2.node.Spec.Labels.label22 == "value22"'
+
+####################################################################
+## Replace labels #############################################
+####################################################################
+
+ - name: Replace labels on swarm node (check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label11: value11
+ label12: value12
+ labels_state: replace
+ check_mode: yes
+ register: output_replace_labels_1
+
+ - name: Replace labels on swarm node
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label11: value11
+ label12: value12
+ labels_state: replace
+ register: output_replace_labels_2
+
+ - name: Replace labels on swarm node (idempotent)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label11: value11
+ label12: value12
+ labels_state: replace
+ register: output_replace_labels_3
+
+ - name: Replace labels on swarm node (idempotent check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label11: value11
+ label12: value12
+ labels_state: replace
+ check_mode: yes
+ register: output_replace_labels_4
+
+ - name: assert replacing labels from swarm node
+ assert:
+ that:
+ - 'output_replace_labels_1 is changed'
+ - 'output_replace_labels_2 is changed'
+ - 'output_replace_labels_3 is not changed'
+ - 'output_replace_labels_4 is not changed'
+ - 'output_replace_labels_2.node.Spec.Labels | length == 2'
+ - '"label6" not in output_replace_labels_2.node.Spec.Labels'
+ - 'output_replace_labels_2.node.Spec.Labels.label12 == "value12"'
+
+####################################################################
+## Remove all labels #############################################
+####################################################################
+
+ - name: Remove all labels from swarm node (check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels_state: replace
+ check_mode: yes
+ register: output_remove_labels_1
+
+ - name: Remove all labels from swarm node
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels_state: replace
+ register: output_remove_labels_2
+
+ - name: Remove all labels from swarm node (idempotent)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels_state: replace
+ register: output_remove_labels_3
+
+ - name: Remove all labels from swarm node (idempotent check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels_state: replace
+ check_mode: yes
+ register: output_remove_labels_4
+
+ - name: assert removing all lables from swarm node
+ assert:
+ that:
+ - 'output_remove_labels_1 is changed'
+ - 'output_remove_labels_2 is changed'
+ - 'output_remove_labels_3 is not changed'
+ - 'output_remove_labels_4 is not changed'
+ - 'output_remove_labels_2.node.Spec.Labels | length == 0'
+
+ always:
+ - name: Cleanup
+ docker_swarm:
+ state: absent
+ force: true
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_node_info/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_node_info/aliases
new file mode 100644
index 00000000..5c691cdf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_node_info/aliases
@@ -0,0 +1,9 @@
+shippable/posix/group3
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
+destructive
+skip/docker # The tests sometimes make docker daemon unstable; hence,
+ # we skip all docker-based CI runs to avoid disrupting
+ # the whole CI system.
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_node_info/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_node_info/meta/main.yml
new file mode 100644
index 00000000..07da8c6d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_node_info/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - setup_docker
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_node_info/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_node_info/tasks/main.yml
new file mode 100644
index 00000000..7855f7e2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_node_info/tasks/main.yml
@@ -0,0 +1,11 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- include_tasks: test_node_info.yml
+ # Maximum of 1.24 (docker API version for docker_node_info) and 1.25 (docker API version for docker_swarm) is 1.25
+ when: docker_py_version is version('2.4.0', '>=') and docker_api_version is version('1.25', '>=')
+
+- fail: msg="Too old docker / docker-py version to run docker_node_info tests!"
+ when: not(docker_py_version is version('2.4.0', '>=') and docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_node_info/tasks/test_node_info.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_node_info/tasks/test_node_info.yml
new file mode 100644
index 00000000..3ee5549b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_node_info/tasks/test_node_info.yml
@@ -0,0 +1,88 @@
+---
+- block:
+ - name: Make sure we're not already using Docker swarm
+ docker_swarm:
+ state: absent
+ force: true
+
+ - name: Try to get docker_node_info when docker is not running in swarm mode
+ docker_node_info:
+ ignore_errors: yes
+ register: output
+
+ - name: assert failure when called when swarm is not in use or not run on manager node
+ assert:
+ that:
+ - 'output is failed'
+ - 'output.msg == "Error running docker swarm module: must run on swarm manager node"'
+
+ - name: Create a Swarm cluster
+ docker_swarm:
+ state: present
+ advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}"
+ register: output
+
+ - name: assert changed when create a new swarm cluster
+ assert:
+ that:
+ - 'output is changed'
+ - 'output.actions[0] | regex_search("New Swarm cluster created: ")'
+ - 'output.swarm_facts.JoinTokens.Manager'
+ - 'output.swarm_facts.JoinTokens.Worker'
+
+ - name: Try to get docker_node_info when docker is running in swarm mode and as manager
+ docker_node_info:
+ register: output
+
+ - name: assert reading docker swarm node facts
+ assert:
+ that:
+ - 'output.nodes | length > 0'
+ - 'output.nodes[0].ID is string'
+
+ - name: Try to get docker_node_info using the self parameter
+ docker_node_info:
+ self: yes
+ register: output
+
+ - name: assert reading swarm facts with list of nodes option
+ assert:
+ that:
+ - 'output.nodes | length == 1'
+ - 'output.nodes[0].ID is string'
+
+ - name: Get local docker node name
+ set_fact:
+ localnodename: "{{ output.nodes[0].Description.Hostname }}"
+
+
+ - name: Try to get docker_node_info using the local node name as parameter
+ docker_node_info:
+ name: "{{ localnodename }}"
+ register: output
+
+ - name: assert reading reading swarm facts and using node filter (random node name)
+ assert:
+ that:
+ - 'output.nodes | length == 1'
+ - 'output.nodes[0].ID is string'
+
+ - name: Create random name
+ set_fact:
+ randomnodename: "{{ 'node-%0x' % ((2**32) | random) }}"
+
+ - name: Try to get docker_node_info using random node name as parameter
+ docker_node_info:
+ name: "{{ randomnodename }}"
+ register: output
+
+ - name: assert reading reading swarm facts and using node filter (random node name)
+ assert:
+ that:
+ - 'output.nodes | length == 0'
+
+ always:
+ - name: Cleanup
+ docker_swarm:
+ state: absent
+ force: true
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_prune/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_prune/aliases
new file mode 100644
index 00000000..f1162af5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_prune/aliases
@@ -0,0 +1,6 @@
+shippable/posix/group2
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
+destructive
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_prune/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_prune/meta/main.yml
new file mode 100644
index 00000000..07da8c6d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_prune/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - setup_docker
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_prune/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_prune/tasks/main.yml
new file mode 100644
index 00000000..16c4aa05
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_prune/tasks/main.yml
@@ -0,0 +1,68 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Create random names
+ set_fact:
+ cname: "{{ 'ansible-container-%0x' % ((2**32) | random) }}"
+ nname: "{{ 'ansible-network-%0x' % ((2**32) | random) }}"
+ vname: "{{ 'ansible-volume-%0x' % ((2**32) | random) }}"
+
+- block:
+ # Create objects to be pruned
+ - docker_container:
+ name: "{{ cname }}"
+ image: "{{ docker_test_image_hello_world }}"
+ state: present
+ register: container
+ - docker_network:
+ name: "{{ nname }}"
+ state: present
+ register: network
+ - docker_volume:
+ name: "{{ vname }}"
+ state: present
+ register: volume
+
+ # Prune objects
+ - docker_prune:
+ containers: yes
+ images: yes
+ networks: yes
+ volumes: yes
+ builder_cache: "{{ docker_py_version is version('3.3.0', '>=') }}"
+ register: result
+
+ # Analyze result
+ - debug: var=result
+ - assert:
+ that:
+ # containers
+ - container.container.Id in result.containers
+ - "'containers_space_reclaimed' in result"
+ # images
+ - "'images_space_reclaimed' in result"
+ # networks
+ - network.network.Name in result.networks
+ # volumes
+ - volume.volume.Name in result.volumes
+ - "'volumes_space_reclaimed' in result"
+ # builder_cache
+ - "'builder_cache_space_reclaimed' in result or docker_py_version is version('3.3.0', '<')"
+ - "'builder_cache_space_reclaimed' not in result or docker_py_version is version('3.3.0', '>=')"
+
+ # Test with filters
+ - docker_prune:
+ images: yes
+ images_filters:
+ dangling: true
+ register: result
+
+ - debug: var=result
+
+ when: docker_py_version is version('2.1.0', '>=') and docker_api_version is version('1.25', '>=')
+
+- fail: msg="Too old docker / docker-py version to run docker_prune tests!"
+ when: not(docker_py_version is version('2.1.0', '>=') and docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_secret/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_secret/aliases
new file mode 100644
index 00000000..1bb02f66
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_secret/aliases
@@ -0,0 +1,9 @@
+shippable/posix/group2
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
+destructive
+skip/docker # The tests sometimes make docker daemon unstable; hence,
+ # we skip all docker-based CI runs to avoid disrupting
+ # the whole CI system.
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_secret/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_secret/meta/main.yml
new file mode 100644
index 00000000..07da8c6d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_secret/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - setup_docker
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_secret/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_secret/tasks/main.yml
new file mode 100644
index 00000000..68d3df8d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_secret/tasks/main.yml
@@ -0,0 +1,10 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- include_tasks: test_secrets.yml
+ when: docker_py_version is version('2.1.0', '>=') and docker_api_version is version('1.25', '>=')
+
+- fail: msg="Too old docker / docker-py version to run docker_secrets tests!"
+ when: not(docker_py_version is version('2.1.0', '>=') and docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_secret/tasks/test_secrets.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_secret/tasks/test_secrets.yml
new file mode 100644
index 00000000..2c078488
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_secret/tasks/test_secrets.yml
@@ -0,0 +1,124 @@
+---
+- block:
+ - name: Make sure we're not already using Docker swarm
+ docker_swarm:
+ state: absent
+ force: true
+
+ - name: Create a Swarm cluster
+ docker_swarm:
+ state: present
+ advertise_addr: "{{ansible_default_ipv4.address}}"
+
+ - name: Parameter name should be required
+ docker_secret:
+ state: present
+ ignore_errors: yes
+ register: output
+
+ - name: assert failure when called with no name
+ assert:
+ that:
+ - 'output.failed'
+ - 'output.msg == "missing required arguments: name"'
+
+ - name: Test parameters
+ docker_secret:
+ name: foo
+ state: present
+ ignore_errors: yes
+ register: output
+
+ - name: assert failure when called with no data
+ assert:
+ that:
+ - 'output.failed'
+ - 'output.msg == "state is present but all of the following are missing: data"'
+
+ - name: Create secret
+ docker_secret:
+ name: db_password
+ data: opensesame!
+ state: present
+ register: output
+
+ - name: Create variable secret_id
+ set_fact:
+ secret_id: "{{ output.secret_id }}"
+
+ - name: Inspect secret
+ command: "docker secret inspect {{ secret_id }}"
+ register: inspect
+ ignore_errors: yes
+
+ - debug: var=inspect
+
+ - name: assert secret creation succeeded
+ assert:
+ that:
+ - "'db_password' in inspect.stdout"
+ - "'ansible_key' in inspect.stdout"
+ when: inspect is not failed
+ - assert:
+ that:
+ - "'is too new. Maximum supported API version is' in inspect.stderr"
+ when: inspect is failed
+
+ - name: Create secret again
+ docker_secret:
+ name: db_password
+ data: opensesame!
+ state: present
+ register: output
+
+ - name: assert create secret is idempotent
+ assert:
+ that:
+ - not output.changed
+
+ - name: Create secret again (base64)
+ docker_secret:
+ name: db_password
+ data: b3BlbnNlc2FtZSE=
+ data_is_b64: true
+ state: present
+ register: output
+
+ - name: assert create secret (base64) is idempotent
+ assert:
+ that:
+ - not output.changed
+
+ - name: Update secret
+ docker_secret:
+ name: db_password
+ data: newpassword!
+ state: present
+ register: output
+
+ - name: assert secret was updated
+ assert:
+ that:
+ - output.changed
+ - output.secret_id != secret_id
+
+ - name: Remove secret
+ docker_secret:
+ name: db_password
+ state: absent
+
+ - name: Check that secret is removed
+ command: "docker secret inspect {{ secret_id }}"
+ register: output
+ ignore_errors: yes
+
+ - name: assert secret was removed
+ assert:
+ that:
+ - output.failed
+
+ always:
+ - name: Remove Swarm cluster
+ docker_swarm:
+ state: absent
+ force: true
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack/aliases
new file mode 100644
index 00000000..1bb02f66
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack/aliases
@@ -0,0 +1,9 @@
+shippable/posix/group2
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
+destructive
+skip/docker # The tests sometimes make docker daemon unstable; hence,
+ # we skip all docker-based CI runs to avoid disrupting
+ # the whole CI system.
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack/meta/main.yml
new file mode 100644
index 00000000..07da8c6d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - setup_docker
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack/tasks/main.yml
new file mode 100644
index 00000000..5ed63f3e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack/tasks/main.yml
@@ -0,0 +1,10 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- include_tasks: test_stack.yml
+ when: docker_api_version is version('1.25', '>=')
+
+- fail: msg="Too old docker / docker-py version to run docker_stack tests!"
+ when: not(docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack/tasks/test_stack.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack/tasks/test_stack.yml
new file mode 100644
index 00000000..d1f332c2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack/tasks/test_stack.yml
@@ -0,0 +1,113 @@
+---
+- block:
+ - name: Make sure we're not already using Docker swarm
+ docker_swarm:
+ state: absent
+ force: true
+
+ - name: Create a Swarm cluster
+ docker_swarm:
+ state: present
+ advertise_addr: "{{ansible_default_ipv4.address}}"
+
+ - name: install docker_stack python requirements
+ pip:
+ name: jsondiff,pyyaml
+
+ - name: Create a stack without name
+ register: output
+ docker_stack:
+ state: present
+ ignore_errors: yes
+
+ - name: assert failure when name not set
+ assert:
+ that:
+ - output is failed
+ - 'output.msg == "missing required arguments: name"'
+
+ - name: Create a stack without compose
+ register: output
+ docker_stack:
+ name: test_stack
+ ignore_errors: yes
+
+ - name: assert failure when compose not set
+ assert:
+ that:
+ - output is failed
+ - 'output.msg == "compose parameter must be a list containing at least one element"'
+
+ - name: Ensure stack is absent
+ register: output
+ docker_stack:
+ state: absent
+ name: test_stack
+ absent_retries: 30
+
+ - name: Template compose files
+ template:
+ src: "{{item}}"
+ dest: "{{output_dir}}/"
+ with_items:
+ - stack_compose_base.yml
+ - stack_compose_overrides.yml
+
+ - name: Create stack with compose file
+ register: output
+ docker_stack:
+ state: present
+ name: test_stack
+ compose:
+ - "{{output_dir}}/stack_compose_base.yml"
+
+ - name: assert test_stack changed on stack creation with compose file
+ assert:
+ that:
+ - output is changed
+
+ # FIXME: updating the stack prevents leaving the swarm on Shippable
+ #- name: Update stack with YAML
+ # register: output
+ # docker_stack:
+ # state: present
+ # name: test_stack
+ # compose:
+ # - "{{stack_compose_base}}"
+ # - "{{stack_compose_overrides}}"
+ #
+ #- name: assert test_stack correctly changed on update with yaml
+ # assert:
+ # that:
+ # - output is changed
+ # - output.stack_spec_diff == stack_update_expected_diff
+
+ - name: Delete stack
+ register: output
+ docker_stack:
+ state: absent
+ name: test_stack
+ absent_retries: 30
+
+ - name: assert delete of existing stack returns changed
+ assert:
+ that:
+ - output is changed
+
+ - name: Delete stack again
+ register: output
+ docker_stack:
+ state: absent
+ name: test_stack
+ absent_retries: 30
+
+ - name: assert state=absent idempotency
+ assert:
+ that:
+ - output is not changed
+
+ always:
+ - name: Remove a Swarm cluster
+ docker_swarm:
+ state: absent
+ force: true
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack/templates/stack_compose_base.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack/templates/stack_compose_base.yml
new file mode 100644
index 00000000..b5162d68
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack/templates/stack_compose_base.yml
@@ -0,0 +1,5 @@
+version: '3'
+services:
+ busybox:
+ image: "{{ docker_test_image_busybox }}"
+ command: sleep 3600
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack/templates/stack_compose_overrides.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack/templates/stack_compose_overrides.yml
new file mode 100644
index 00000000..1b81c71b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack/templates/stack_compose_overrides.yml
@@ -0,0 +1,5 @@
+version: '3'
+services:
+ busybox:
+ environment:
+ envvar: value
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack/vars/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack/vars/main.yml
new file mode 100644
index 00000000..7751c86e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack/vars/main.yml
@@ -0,0 +1,15 @@
+stack_compose_base:
+ version: '3'
+ services:
+ busybox:
+ image: "{{ docker_test_image_busybox }}"
+ command: sleep 3600
+
+stack_compose_overrides:
+ version: '3'
+ services:
+ busybox:
+ environment:
+ envvar: value
+
+stack_update_expected_diff: '{"test_stack_busybox": {"TaskTemplate": {"ContainerSpec": {"Env": ["envvar=value"]}}}}'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_info/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_info/aliases
new file mode 100644
index 00000000..46d1424a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_info/aliases
@@ -0,0 +1,9 @@
+shippable/posix/group1
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
+destructive
+skip/docker # The tests sometimes make docker daemon unstable; hence,
+ # we skip all docker-based CI runs to avoid disrupting
+ # the whole CI system.
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_info/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_info/meta/main.yml
new file mode 100644
index 00000000..07da8c6d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_info/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - setup_docker
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_info/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_info/tasks/main.yml
new file mode 100644
index 00000000..b499f80d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_info/tasks/main.yml
@@ -0,0 +1,10 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- include_tasks: test_stack_info.yml
+ when: docker_api_version is version('1.25', '>=')
+
+- fail: msg="Too old docker / docker-py version to run docker_stack tests!"
+ when: not(docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_info/tasks/test_stack_info.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_info/tasks/test_stack_info.yml
new file mode 100644
index 00000000..a2a39727
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_info/tasks/test_stack_info.yml
@@ -0,0 +1,75 @@
+---
+- block:
+ - name: Make sure we're not already using Docker swarm
+ docker_swarm:
+ state: absent
+ force: true
+
+ - name: Get docker_stack_info when docker is not running in swarm mode
+ docker_stack_info:
+ ignore_errors: true
+ register: output
+
+ - name: Assert failure when called when swarm is not running
+ assert:
+ that:
+ - 'output is failed'
+ - '"Error running docker stack" in output.msg'
+
+ - name: Create a swarm cluster
+ docker_swarm:
+ state: present
+ advertise_addr: "{{ansible_default_ipv4.address}}"
+
+ - name: Get docker_stack_info when docker is running and not stack available
+ docker_stack_info:
+ register: output
+
+ - name: Assert stack facts
+ assert:
+ that:
+ - 'output.results | type_debug == "list"'
+ - 'output.results | length == 0'
+
+ - name: Template compose files
+ template:
+ src: "{{ item }}"
+ dest: "{{ output_dir }}/"
+ with_items:
+ - stack_compose_base.yml
+ - stack_compose_overrides.yml
+
+ - name: Install docker_stack python requirements
+ pip:
+ name: jsondiff,pyyaml
+
+ - name: Create stack with compose file
+ register: output
+ docker_stack:
+ state: present
+ name: test_stack
+ compose:
+ - "{{ output_dir }}/stack_compose_base.yml"
+
+ - name: Assert test_stack changed on stack creation with compose file
+ assert:
+ that:
+ - output is changed
+
+ - name: Get docker_stack_info when docker is running
+ docker_stack_info:
+ register: output
+
+ - name: assert stack facts
+ assert:
+ that:
+ - 'output.results | type_debug == "list"'
+ - 'output.results[0].Name == "test_stack"'
+ - 'output.results[0].Orchestrator == "Swarm"'
+ - 'output.results[0].Services == "1"'
+
+ always:
+ - name: Cleanup
+ docker_swarm:
+ state: absent
+ force: true
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_info/templates/stack_compose_base.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_info/templates/stack_compose_base.yml
new file mode 100644
index 00000000..b5162d68
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_info/templates/stack_compose_base.yml
@@ -0,0 +1,5 @@
+version: '3'
+services:
+ busybox:
+ image: "{{ docker_test_image_busybox }}"
+ command: sleep 3600
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_info/templates/stack_compose_overrides.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_info/templates/stack_compose_overrides.yml
new file mode 100644
index 00000000..1b81c71b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_info/templates/stack_compose_overrides.yml
@@ -0,0 +1,5 @@
+version: '3'
+services:
+ busybox:
+ environment:
+ envvar: value
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_info/vars/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_info/vars/main.yml
new file mode 100644
index 00000000..7751c86e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_info/vars/main.yml
@@ -0,0 +1,15 @@
+stack_compose_base:
+ version: '3'
+ services:
+ busybox:
+ image: "{{ docker_test_image_busybox }}"
+ command: sleep 3600
+
+stack_compose_overrides:
+ version: '3'
+ services:
+ busybox:
+ environment:
+ envvar: value
+
+stack_update_expected_diff: '{"test_stack_busybox": {"TaskTemplate": {"ContainerSpec": {"Env": ["envvar=value"]}}}}'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_task_info/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_task_info/aliases
new file mode 100644
index 00000000..46d1424a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_task_info/aliases
@@ -0,0 +1,9 @@
+shippable/posix/group1
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
+destructive
+skip/docker # The tests sometimes make docker daemon unstable; hence,
+ # we skip all docker-based CI runs to avoid disrupting
+ # the whole CI system.
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_task_info/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_task_info/meta/main.yml
new file mode 100644
index 00000000..07da8c6d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_task_info/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - setup_docker
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_task_info/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_task_info/tasks/main.yml
new file mode 100644
index 00000000..0990e90b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_task_info/tasks/main.yml
@@ -0,0 +1,10 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- include_tasks: test_stack_task_info.yml
+ when: docker_api_version is version('1.25', '>=')
+
+- fail: msg="Too old docker / docker-py version to run docker_stack tests!"
+ when: not(docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_task_info/tasks/test_stack_task_info.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_task_info/tasks/test_stack_task_info.yml
new file mode 100644
index 00000000..88e9eca3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_task_info/tasks/test_stack_task_info.yml
@@ -0,0 +1,84 @@
+---
+- block:
+ - name: Make sure we're not already using Docker swarm
+ docker_swarm:
+ state: absent
+ force: true
+
+ - name: Get docker_stack_info when docker is not running in swarm mode
+ docker_stack_info:
+ ignore_errors: true
+ register: output
+
+ - name: Assert failure when called when swarm is not running
+ assert:
+ that:
+ - 'output is failed'
+ - '"Error running docker stack" in output.msg'
+
+ - name: Create a swarm cluster
+ docker_swarm:
+ state: present
+ advertise_addr: "{{ ansible_default_ipv4.address }}"
+
+ - name: Get docker_stack_info when docker is running and not stack available
+ docker_stack_info:
+ register: output
+
+ - name: Assert stack facts
+ assert:
+ that:
+ - 'output.results | type_debug == "list"'
+ - 'output.results | length == 0'
+
+ - name: Template compose files
+ template:
+ src: "{{ item }}"
+ dest: "{{ output_dir }}/"
+ with_items:
+ - stack_compose_base.yml
+ - stack_compose_overrides.yml
+
+ - name: Install docker_stack python requirements
+ pip:
+ name: jsondiff,pyyaml
+
+ - name: Create stack with compose file
+ register: output
+ docker_stack:
+ state: present
+ name: test_stack
+ compose:
+ - "{{ output_dir }}/stack_compose_base.yml"
+
+ - name: Assert test_stack changed on stack creation with compose file
+ assert:
+ that:
+ - output is changed
+
+ - name: Wait a bit to make sure stack is running
+ pause:
+ seconds: 5
+
+ - name: Get docker_stack_info when docker is running
+ docker_stack_info:
+ register: output
+
+ - name: Get docker_stack_task_info first element
+ docker_stack_task_info:
+ name: "{{ output.results[0].Name }}"
+ register: output
+
+ - name: assert stack facts
+ assert:
+ that:
+ - 'output.results | type_debug == "list"'
+ - 'output.results[0].DesiredState == "Running"'
+ - 'output.results[0].Image == docker_test_image_busybox'
+ - 'output.results[0].Name == "test_stack_busybox.1"'
+
+ always:
+ - name: Cleanup
+ docker_swarm:
+ state: absent
+ force: true
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_task_info/templates/stack_compose_base.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_task_info/templates/stack_compose_base.yml
new file mode 100644
index 00000000..b5162d68
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_task_info/templates/stack_compose_base.yml
@@ -0,0 +1,5 @@
+version: '3'
+services:
+ busybox:
+ image: "{{ docker_test_image_busybox }}"
+ command: sleep 3600
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_task_info/templates/stack_compose_overrides.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_task_info/templates/stack_compose_overrides.yml
new file mode 100644
index 00000000..1b81c71b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_task_info/templates/stack_compose_overrides.yml
@@ -0,0 +1,5 @@
+version: '3'
+services:
+ busybox:
+ environment:
+ envvar: value
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_task_info/vars/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_task_info/vars/main.yml
new file mode 100644
index 00000000..7751c86e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_stack_task_info/vars/main.yml
@@ -0,0 +1,15 @@
+stack_compose_base:
+ version: '3'
+ services:
+ busybox:
+ image: "{{ docker_test_image_busybox }}"
+ command: sleep 3600
+
+stack_compose_overrides:
+ version: '3'
+ services:
+ busybox:
+ environment:
+ envvar: value
+
+stack_update_expected_diff: '{"test_stack_busybox": {"TaskTemplate": {"ContainerSpec": {"Env": ["envvar=value"]}}}}'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm/aliases
new file mode 100644
index 00000000..b06411b2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm/aliases
@@ -0,0 +1,13 @@
+shippable/posix/group2
+disabled # See: https://github.com/ansible/ansible/issues/61815
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
+destructive
+skip/docker # The tests sometimes make docker daemon unstable; hence,
+ # we skip all docker-based CI runs to avoid disrupting
+ # the whole CI system. On VMs, we restart docker daemon
+ # after finishing the tests to minimize potential effects
+ # on other tests.
+needs/root
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm/meta/main.yml
new file mode 100644
index 00000000..078660a8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm/meta/main.yml
@@ -0,0 +1,4 @@
+---
+dependencies:
+ - setup_docker
+ - setup_openssl
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm/tasks/cleanup.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm/tasks/cleanup.yml
new file mode 100644
index 00000000..3e19bfcd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm/tasks/cleanup.yml
@@ -0,0 +1,35 @@
+- name: CLEANUP | Leave Docker Swarm
+ docker_swarm:
+ state: absent
+ force: true
+ ignore_errors: yes
+ register: leave_swarm
+
+- name: CLEANUP | Kill Docker and cleanup
+ when: leave_swarm is failed
+ block:
+ - name: CLEANUP | Kill docker daemon
+ command: systemctl kill -s 9 docker
+ become: yes
+
+ - name: CLEANUP | Clear out /var/lib/docker
+ shell: rm -rf /var/lib/docker/*
+ args:
+ warn: no
+
+ - name: CLEANUP | Start docker daemon
+ service:
+ name: docker
+ state: started
+ become: yes
+
+ - name: CLEANUP | Wait for docker daemon to be fully started
+ command: docker ps
+ register: result
+ until: result is success
+ retries: 10
+
+ - name: CLEANUP | Leave Docker Swarm
+ docker_swarm:
+ state: absent
+ force: true
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm/tasks/main.yml
new file mode 100644
index 00000000..597b71a8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm/tasks/main.yml
@@ -0,0 +1,23 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Run Docker Swarm tests
+ when:
+ - docker_py_version is version('1.10.0', '>=')
+ - docker_api_version is version('1.25', '>=')
+
+ block:
+ - include_tasks: "{{ item }}"
+ with_fileglob:
+ - 'tests/*.yml'
+
+ always:
+ - import_tasks: cleanup.yml
+
+- fail:
+ msg: "Too old docker / docker-py version to run docker_swarm tests!"
+ when:
+ - not(docker_py_version is version('1.10.0', '>=') and docker_api_version is version('1.25', '>='))
+ - (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm/tasks/run-test.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm/tasks/run-test.yml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm/tasks/run-test.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm/tasks/tests/basic.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm/tasks/tests/basic.yml
new file mode 100644
index 00000000..4e9005fc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm/tasks/tests/basic.yml
@@ -0,0 +1,158 @@
+- debug:
+ msg: Running tests/basic.yml
+
+####################################################################
+## Errors ##########################################################
+####################################################################
+- name: Test parameters with state=join
+ docker_swarm:
+ state: join
+ ignore_errors: yes
+ register: output
+
+- name: assert failure when called with state=join and no remote_addrs,join_token
+ assert:
+ that:
+ - 'output.failed'
+ - 'output.msg == "state is join but all of the following are missing: remote_addrs, join_token"'
+
+- name: Test parameters with state=remove
+ docker_swarm:
+ state: remove
+ ignore_errors: yes
+ register: output
+
+- name: assert failure when called with state=remove and no node_id
+ assert:
+ that:
+ - 'output.failed'
+ - 'output.msg == "state is remove but all of the following are missing: node_id"'
+
+####################################################################
+## Creation ########################################################
+####################################################################
+
+- name: Create a Swarm cluster (check mode)
+ docker_swarm:
+ state: present
+ advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}"
+ check_mode: yes
+ diff: yes
+ register: output_1
+
+- name: Create a Swarm cluster
+ docker_swarm:
+ state: present
+ advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}"
+ diff: yes
+ register: output_2
+
+- name: Create a Swarm cluster (idempotent)
+ docker_swarm:
+ state: present
+ advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}"
+ diff: yes
+ register: output_3
+
+- name: Create a Swarm cluster (idempotent, check mode)
+ docker_swarm:
+ state: present
+ advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}"
+ check_mode: yes
+ diff: yes
+ register: output_4
+
+- name: Create a Swarm cluster (force re-create)
+ docker_swarm:
+ state: present
+ advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}"
+ force: yes
+ diff: yes
+ register: output_5
+
+- name: Create a Swarm cluster (force re-create, check mode)
+ docker_swarm:
+ state: present
+ advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}"
+ force: yes
+ check_mode: yes
+ diff: yes
+ register: output_6
+
+- name: assert changed when create a new swarm cluster
+ assert:
+ that:
+ - 'output_1 is changed'
+ - 'output_1.diff.before is defined'
+ - 'output_1.diff.after is defined'
+ - 'output_2 is changed'
+ - 'output_2.actions[0] | regex_search("New Swarm cluster created: ")'
+ - 'output_2.swarm_facts.JoinTokens.Manager'
+ - 'output_2.swarm_facts.JoinTokens.Worker'
+ - 'output_2.diff.before is defined'
+ - 'output_2.diff.after is defined'
+ - 'output_3 is not changed'
+ - 'output_3.diff.before is defined'
+ - 'output_3.diff.after is defined'
+ - 'output_4 is not changed'
+ - 'output_4.diff.before is defined'
+ - 'output_4.diff.after is defined'
+ - 'output_5 is changed'
+ - 'output_5.diff.before is defined'
+ - 'output_5.diff.after is defined'
+ - 'output_6 is changed'
+ - 'output_6.diff.before is defined'
+ - 'output_6.diff.after is defined'
+
+####################################################################
+## Removal #########################################################
+####################################################################
+
+- name: Remove a Swarm cluster (check mode)
+ docker_swarm:
+ state: absent
+ force: true
+ check_mode: yes
+ diff: yes
+ register: output_1
+
+- name: Remove a Swarm cluster
+ docker_swarm:
+ state: absent
+ force: true
+ diff: yes
+ register: output_2
+
+- name: Remove a Swarm cluster (idempotent)
+ docker_swarm:
+ state: absent
+ force: true
+ diff: yes
+ register: output_3
+
+- name: Remove a Swarm cluster (idempotent, check mode)
+ docker_swarm:
+ state: absent
+ force: true
+ check_mode: yes
+ diff: yes
+ register: output_4
+
+- name: assert changed when remove a swarm cluster
+ assert:
+ that:
+ - 'output_1 is changed'
+ - 'output_1.diff.before is defined'
+ - 'output_1.diff.after is defined'
+ - 'output_2 is changed'
+ - 'output_2.actions[0] == "Node has left the swarm cluster"'
+ - 'output_2.diff.before is defined'
+ - 'output_2.diff.after is defined'
+ - 'output_3 is not changed'
+ - 'output_3.diff.before is defined'
+ - 'output_3.diff.after is defined'
+ - 'output_4 is not changed'
+ - 'output_4.diff.before is defined'
+ - 'output_4.diff.after is defined'
+
+- include_tasks: cleanup.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm/tasks/tests/options-ca.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm/tasks/tests/options-ca.yml
new file mode 100644
index 00000000..6f9f1090
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm/tasks/tests/options-ca.yml
@@ -0,0 +1,114 @@
+- debug:
+ msg: Running tests/options-ca.yml
+- name: options-ca
+ when: pyopenssl_version.stdout is version('0.15', '>=') or cryptography_version.stdout is version('1.6', '>=')
+ block:
+ - name: Generate privatekey
+ loop:
+ - key1
+ - key2
+ loop_control:
+ loop_var: key
+ community.crypto.openssl_privatekey:
+ path: '{{ output_dir }}/ansible_{{ key }}.key'
+ size: 2048
+ mode: '0666'
+ - name: Generate CSR
+ loop:
+ - key1
+ - key2
+ loop_control:
+ loop_var: key
+ community.crypto.openssl_csr:
+ path: '{{ output_dir }}/ansible_{{ key }}.csr'
+ privatekey_path: '{{ output_dir }}/ansible_{{ key }}.key'
+ basic_constraints:
+ - CA:TRUE
+ key_usage:
+ - keyCertSign
+ - name: Generate self-signed certificate
+ loop:
+ - key1
+ - key2
+ loop_control:
+ loop_var: key
+ community.crypto.openssl_certificate:
+ path: '{{ output_dir }}/ansible_{{ key }}.pem'
+ privatekey_path: '{{ output_dir }}/ansible_{{ key }}.key'
+ csr_path: '{{ output_dir }}/ansible_{{ key }}.csr'
+ provider: selfsigned
+ - name: signing_ca_cert and signing_ca_key (check mode)
+ docker_swarm:
+ advertise_addr: '{{ansible_default_ipv4.address | default(''127.0.0.1'')}}'
+ state: present
+ signing_ca_cert: '{{ lookup(''file'', output_dir ~ ''/ansible_key1.pem'') }}'
+ signing_ca_key: '{{ lookup(''file'', output_dir ~ ''/ansible_key1.key'') }}'
+ timeout: 120
+ check_mode: true
+ diff: true
+ register: output_1
+ ignore_errors: true
+ - name: signing_ca_cert and signing_ca_key
+ docker_swarm:
+ advertise_addr: '{{ansible_default_ipv4.address | default(''127.0.0.1'')}}'
+ state: present
+ signing_ca_cert: '{{ lookup(''file'', output_dir ~ ''/ansible_key1.pem'') }}'
+ signing_ca_key: '{{ lookup(''file'', output_dir ~ ''/ansible_key1.key'') }}'
+ timeout: 120
+ diff: true
+ register: output_2
+ ignore_errors: true
+ - name: Private key
+ debug: msg="{{ lookup('file', output_dir ~ '/ansible_key1.key') }}"
+ - name: Cert
+ debug: msg="{{ lookup('file', output_dir ~ '/ansible_key1.pem') }}"
+ - docker_swarm_info: null
+ register: output
+ ignore_errors: true
+ - debug: var=output
+ - name: signing_ca_cert and signing_ca_key (change, check mode)
+ docker_swarm:
+ state: present
+ signing_ca_cert: '{{ lookup(''file'', output_dir ~ ''/ansible_key2.pem'') }}'
+ signing_ca_key: '{{ lookup(''file'', output_dir ~ ''/ansible_key2.key'') }}'
+ timeout: 120
+ check_mode: true
+ diff: true
+ register: output_5
+ ignore_errors: true
+ - name: signing_ca_cert and signing_ca_key (change)
+ docker_swarm:
+ state: present
+ signing_ca_cert: '{{ lookup(''file'', output_dir ~ ''/ansible_key2.pem'') }}'
+ signing_ca_key: '{{ lookup(''file'', output_dir ~ ''/ansible_key2.key'') }}'
+ timeout: 120
+ diff: true
+ register: output_6
+ ignore_errors: true
+ - name: assert signing_ca_cert and signing_ca_key
+ assert:
+ that:
+ - output_1 is changed
+ - 'output_1.actions[0] | regex_search("New Swarm cluster created: ")'
+ - output_1.diff.before is defined
+ - output_1.diff.after is defined
+ - output_2 is changed
+ - 'output_2.actions[0] | regex_search("New Swarm cluster created: ")'
+ - output_2.diff.before is defined
+ - output_2.diff.after is defined
+ - output_5 is changed
+ - output_5.actions[0] == "Swarm cluster updated"
+ - output_5.diff.before is defined
+ - output_5.diff.after is defined
+ - output_6 is changed
+ - output_6.actions[0] == "Swarm cluster updated"
+ - output_6.diff.before is defined
+ - output_6.diff.after is defined
+ when: docker_py_version is version('2.6.0', '>=')
+ - assert:
+ that:
+ - output_1 is failed
+ - ('version is ' ~ docker_py_version ~ ' ') in output_1.msg
+ - '''Minimum version required is 2.6.0 '' in output_1.msg'
+ when: docker_py_version is version('2.6.0', '<')
+ - include_tasks: cleanup.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm/tasks/tests/options.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm/tasks/tests/options.yml
new file mode 100644
index 00000000..c23ab3a6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm/tasks/tests/options.yml
@@ -0,0 +1,1158 @@
+- debug:
+ msg: Running tests/options.yml
+
+- name: Create a Swarm cluster
+ docker_swarm:
+ state: present
+ advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}"
+ name: default
+ diff: yes
+
+####################################################################
+## autolock_managers ###############################################
+####################################################################
+
+- name: autolock_managers (check mode)
+ docker_swarm:
+ state: present
+ autolock_managers: yes
+ check_mode: yes
+ diff: yes
+ register: output_1
+ ignore_errors: yes
+
+- name: autolock_managers
+ docker_swarm:
+ state: present
+ autolock_managers: yes
+ diff: yes
+ register: output_2
+ ignore_errors: yes
+
+- name: autolock_managers (idempotent)
+ docker_swarm:
+ state: present
+ autolock_managers: yes
+ diff: yes
+ register: output_3
+ ignore_errors: yes
+
+- name: autolock_managers (idempotent, check mode)
+ docker_swarm:
+ state: present
+ autolock_managers: yes
+ check_mode: yes
+ diff: yes
+ register: output_4
+ ignore_errors: yes
+
+- name: autolock_managers (change, check mode)
+ docker_swarm:
+ state: present
+ autolock_managers: no
+ check_mode: yes
+ diff: yes
+ register: output_5
+ ignore_errors: yes
+
+- name: autolock_managers (change)
+ docker_swarm:
+ state: present
+ autolock_managers: no
+ diff: yes
+ register: output_6
+ ignore_errors: yes
+
+- name: autolock_managers (force new swarm)
+ docker_swarm:
+ state: present
+ force: yes
+ advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}"
+ autolock_managers: yes
+ diff: yes
+ register: output_7
+ ignore_errors: yes
+
+- name: assert autolock_managers changes
+ assert:
+ that:
+ - 'output_1 is changed'
+ - 'output_1.actions[0] == "Swarm cluster updated"'
+ - 'output_1.diff.before is defined'
+ - 'output_1.diff.after is defined'
+ - 'output_2 is changed'
+ - 'output_2.actions[0] == "Swarm cluster updated"'
+ - 'output_2.diff.before is defined'
+ - 'output_2.diff.after is defined'
+ - 'output_3 is not changed'
+ - 'output_3.actions[0] == "No modification"'
+ - 'output_3.diff.before is defined'
+ - 'output_3.diff.after is defined'
+ - 'output_4 is not changed'
+ - 'output_4.actions[0] == "No modification"'
+ - 'output_4.diff.before is defined'
+ - 'output_4.diff.after is defined'
+ - 'output_5 is changed'
+ - 'output_5.actions[0] == "Swarm cluster updated"'
+ - 'output_5.diff.before is defined'
+ - 'output_5.diff.after is defined'
+ - 'output_6 is changed'
+ - 'output_6.actions[0] == "Swarm cluster updated"'
+ - 'output_6.diff.before is defined'
+ - 'output_6.diff.after is defined'
+ when: docker_py_version is version('2.6.0', '>=')
+
+- name: assert UnlockKey in swarm_facts
+ assert:
+ that:
+ - 'output_2.swarm_facts.UnlockKey'
+ - 'output_3.swarm_facts.UnlockKey is none'
+ - 'output_6.swarm_facts.UnlockKey is none'
+ - 'output_7.swarm_facts.UnlockKey'
+ when: docker_py_version is version('2.7.0', '>=')
+
+- assert:
+ that:
+ - output_1 is failed
+ - "('version is ' ~ docker_py_version ~ ' ') in output_1.msg"
+ - "'Minimum version required is 2.6.0 ' in output_1.msg"
+ when: docker_py_version is version('2.6.0', '<')
+
+####################################################################
+## ca_force_rotate #################################################
+####################################################################
+
+- name: ca_force_rotate (check mode)
+ docker_swarm:
+ state: present
+ ca_force_rotate: 1
+ check_mode: yes
+ diff: yes
+ register: output_1
+ ignore_errors: yes
+
+- name: ca_force_rotate
+ docker_swarm:
+ state: present
+ ca_force_rotate: 1
+ diff: yes
+ register: output_2
+ ignore_errors: yes
+
+- name: ca_force_rotate (idempotent)
+ docker_swarm:
+ state: present
+ ca_force_rotate: 1
+ diff: yes
+ register: output_3
+ ignore_errors: yes
+
+- name: ca_force_rotate (idempotent, check mode)
+ docker_swarm:
+ state: present
+ ca_force_rotate: 1
+ check_mode: yes
+ diff: yes
+ register: output_4
+ ignore_errors: yes
+
+- name: ca_force_rotate (change, check mode)
+ docker_swarm:
+ state: present
+ ca_force_rotate: 0
+ check_mode: yes
+ diff: yes
+ register: output_5
+ ignore_errors: yes
+
+- name: ca_force_rotate (change)
+ docker_swarm:
+ state: present
+ ca_force_rotate: 0
+ diff: yes
+ register: output_6
+ ignore_errors: yes
+
+- name: assert ca_force_rotate changes
+ assert:
+ that:
+ - 'output_1 is changed'
+ - 'output_1.actions[0] == "Swarm cluster updated"'
+ - 'output_1.diff.before is defined'
+ - 'output_1.diff.after is defined'
+ - 'output_2 is changed'
+ - 'output_2.actions[0] == "Swarm cluster updated"'
+ - 'output_2.diff.before is defined'
+ - 'output_2.diff.after is defined'
+ - 'output_3 is not changed'
+ - 'output_3.actions[0] == "No modification"'
+ - 'output_3.diff.before is defined'
+ - 'output_3.diff.after is defined'
+ - 'output_4 is not changed'
+ - 'output_4.actions[0] == "No modification"'
+ - 'output_4.diff.before is defined'
+ - 'output_4.diff.after is defined'
+ - 'output_5 is changed'
+ - 'output_5.actions[0] == "Swarm cluster updated"'
+ - 'output_5.diff.before is defined'
+ - 'output_5.diff.after is defined'
+ - 'output_6 is changed'
+ - 'output_6.actions[0] == "Swarm cluster updated"'
+ - 'output_6.diff.before is defined'
+ - 'output_6.diff.after is defined'
+ when: docker_py_version is version('2.6.0', '>=')
+- assert:
+ that:
+ - output_1 is failed
+ - "('version is ' ~ docker_py_version ~ ' ') in output_1.msg"
+ - "'Minimum version required is 2.6.0 ' in output_1.msg"
+ when: docker_py_version is version('2.6.0', '<')
+
+####################################################################
+## dispatcher_heartbeat_period #####################################
+####################################################################
+
+- name: dispatcher_heartbeat_period (check mode)
+ docker_swarm:
+ state: present
+ dispatcher_heartbeat_period: 10
+ check_mode: yes
+ diff: yes
+ register: output_1
+
+- name: dispatcher_heartbeat_period
+ docker_swarm:
+ state: present
+ dispatcher_heartbeat_period: 10
+ diff: yes
+ register: output_2
+
+- name: dispatcher_heartbeat_period (idempotent)
+ docker_swarm:
+ state: present
+ dispatcher_heartbeat_period: 10
+ diff: yes
+ register: output_3
+
+- name: dispatcher_heartbeat_period (idempotent, check mode)
+ docker_swarm:
+ state: present
+ dispatcher_heartbeat_period: 10
+ check_mode: yes
+ diff: yes
+ register: output_4
+
+- name: dispatcher_heartbeat_period (change, check mode)
+ docker_swarm:
+ state: present
+ dispatcher_heartbeat_period: 23
+ check_mode: yes
+ diff: yes
+ register: output_5
+
+- name: dispatcher_heartbeat_period (change)
+ docker_swarm:
+ state: present
+ dispatcher_heartbeat_period: 23
+ diff: yes
+ register: output_6
+
+- name: assert dispatcher_heartbeat_period changes
+ assert:
+ that:
+ - 'output_1 is changed'
+ - 'output_1.actions[0] == "Swarm cluster updated"'
+ - 'output_1.diff.before is defined'
+ - 'output_1.diff.after is defined'
+ - 'output_2 is changed'
+ - 'output_2.actions[0] == "Swarm cluster updated"'
+ - 'output_2.diff.before is defined'
+ - 'output_2.diff.after is defined'
+ - 'output_3 is not changed'
+ - 'output_3.actions[0] == "No modification"'
+ - 'output_3.diff.before is defined'
+ - 'output_3.diff.after is defined'
+ - 'output_4 is not changed'
+ - 'output_4.actions[0] == "No modification"'
+ - 'output_4.diff.before is defined'
+ - 'output_4.diff.after is defined'
+ - 'output_5 is changed'
+ - 'output_5.actions[0] == "Swarm cluster updated"'
+ - 'output_5.diff.before is defined'
+ - 'output_5.diff.after is defined'
+ - 'output_6 is changed'
+ - 'output_6.actions[0] == "Swarm cluster updated"'
+ - 'output_6.diff.before is defined'
+ - 'output_6.diff.after is defined'
+
+####################################################################
+## election_tick ###################################################
+####################################################################
+
+- name: election_tick (check mode)
+ docker_swarm:
+ state: present
+ election_tick: 20
+ check_mode: yes
+ diff: yes
+ register: output_1
+
+- name: election_tick
+ docker_swarm:
+ state: present
+ election_tick: 20
+ diff: yes
+ register: output_2
+
+- name: election_tick (idempotent)
+ docker_swarm:
+ state: present
+ election_tick: 20
+ diff: yes
+ register: output_3
+
+- name: election_tick (idempotent, check mode)
+ docker_swarm:
+ state: present
+ election_tick: 20
+ check_mode: yes
+ diff: yes
+ register: output_4
+
+- name: election_tick (change, check mode)
+ docker_swarm:
+ state: present
+ election_tick: 5
+ check_mode: yes
+ diff: yes
+ register: output_5
+
+- name: election_tick (change)
+ docker_swarm:
+ state: present
+ election_tick: 5
+ diff: yes
+ register: output_6
+
+- name: assert election_tick changes
+ assert:
+ that:
+ - 'output_1 is changed'
+ - 'output_1.actions[0] == "Swarm cluster updated"'
+ - 'output_1.diff.before is defined'
+ - 'output_1.diff.after is defined'
+ - 'output_2 is changed'
+ - 'output_2.actions[0] == "Swarm cluster updated"'
+ - 'output_2.diff.before is defined'
+ - 'output_2.diff.after is defined'
+ - 'output_3 is not changed'
+ - 'output_3.actions[0] == "No modification"'
+ - 'output_3.diff.before is defined'
+ - 'output_3.diff.after is defined'
+ - 'output_4 is not changed'
+ - 'output_4.actions[0] == "No modification"'
+ - 'output_4.diff.before is defined'
+ - 'output_4.diff.after is defined'
+ - 'output_5 is changed'
+ - 'output_5.actions[0] == "Swarm cluster updated"'
+ - 'output_5.diff.before is defined'
+ - 'output_5.diff.after is defined'
+ - 'output_6 is changed'
+ - 'output_6.actions[0] == "Swarm cluster updated"'
+ - 'output_6.diff.before is defined'
+ - 'output_6.diff.after is defined'
+
+####################################################################
+## heartbeat_tick ##################################################
+####################################################################
+
+- name: heartbeat_tick (check mode)
+ docker_swarm:
+ state: present
+ heartbeat_tick: 2
+ check_mode: yes
+ diff: yes
+ register: output_1
+
+- name: heartbeat_tick
+ docker_swarm:
+ state: present
+ heartbeat_tick: 2
+ diff: yes
+ register: output_2
+
+- name: heartbeat_tick (idempotent)
+ docker_swarm:
+ state: present
+ heartbeat_tick: 2
+ diff: yes
+ register: output_3
+
+- name: heartbeat_tick (idempotent, check mode)
+ docker_swarm:
+ state: present
+ heartbeat_tick: 2
+ check_mode: yes
+ diff: yes
+ register: output_4
+
+- name: heartbeat_tick (change, check mode)
+ docker_swarm:
+ state: present
+ heartbeat_tick: 3
+ check_mode: yes
+ diff: yes
+ register: output_5
+
+- name: heartbeat_tick (change)
+ docker_swarm:
+ state: present
+ heartbeat_tick: 3
+ diff: yes
+ register: output_6
+
+- name: assert heartbeat_tick changes
+ assert:
+ that:
+ - 'output_1 is changed'
+ - 'output_1.actions[0] == "Swarm cluster updated"'
+ - 'output_1.diff.before is defined'
+ - 'output_1.diff.after is defined'
+ - 'output_2 is changed'
+ - 'output_2.actions[0] == "Swarm cluster updated"'
+ - 'output_2.diff.before is defined'
+ - 'output_2.diff.after is defined'
+ - 'output_3 is not changed'
+ - 'output_3.actions[0] == "No modification"'
+ - 'output_3.diff.before is defined'
+ - 'output_3.diff.after is defined'
+ - 'output_4 is not changed'
+ - 'output_4.actions[0] == "No modification"'
+ - 'output_4.diff.before is defined'
+ - 'output_4.diff.after is defined'
+ - 'output_5 is changed'
+ - 'output_5.actions[0] == "Swarm cluster updated"'
+ - 'output_5.diff.before is defined'
+ - 'output_5.diff.after is defined'
+ - 'output_6 is changed'
+ - 'output_6.actions[0] == "Swarm cluster updated"'
+ - 'output_6.diff.before is defined'
+ - 'output_6.diff.after is defined'
+
+####################################################################
+## keep_old_snapshots ##############################################
+####################################################################
+- name: keep_old_snapshots (check mode)
+ docker_swarm:
+ state: present
+ keep_old_snapshots: 1
+ check_mode: yes
+ diff: yes
+ register: output_1
+
+- name: keep_old_snapshots
+ docker_swarm:
+ state: present
+ keep_old_snapshots: 1
+ diff: yes
+ register: output_2
+
+- name: keep_old_snapshots (idempotent)
+ docker_swarm:
+ state: present
+ keep_old_snapshots: 1
+ diff: yes
+ register: output_3
+
+- name: keep_old_snapshots (idempotent, check mode)
+ docker_swarm:
+ state: present
+ keep_old_snapshots: 1
+ check_mode: yes
+ diff: yes
+ register: output_4
+
+- name: keep_old_snapshots (change, check mode)
+ docker_swarm:
+ state: present
+ keep_old_snapshots: 2
+ check_mode: yes
+ diff: yes
+ register: output_5
+
+- name: keep_old_snapshots (change)
+ docker_swarm:
+ state: present
+ keep_old_snapshots: 2
+ diff: yes
+ register: output_6
+
+- name: assert keep_old_snapshots changes
+ assert:
+ that:
+ - 'output_1 is changed'
+ - 'output_1.actions[0] == "Swarm cluster updated"'
+ - 'output_1.diff.before is defined'
+ - 'output_1.diff.after is defined'
+ - 'output_2 is changed'
+ - 'output_2.actions[0] == "Swarm cluster updated"'
+ - 'output_2.diff.before is defined'
+ - 'output_2.diff.after is defined'
+ - 'output_3 is not changed'
+ - 'output_3.actions[0] == "No modification"'
+ - 'output_3.diff.before is defined'
+ - 'output_3.diff.after is defined'
+ - 'output_4 is not changed'
+ - 'output_4.actions[0] == "No modification"'
+ - 'output_4.diff.before is defined'
+ - 'output_4.diff.after is defined'
+ - 'output_5 is changed'
+ - 'output_5.actions[0] == "Swarm cluster updated"'
+ - 'output_5.diff.before is defined'
+ - 'output_5.diff.after is defined'
+ - 'output_6 is changed'
+ - 'output_6.actions[0] == "Swarm cluster updated"'
+ - 'output_6.diff.before is defined'
+ - 'output_6.diff.after is defined'
+
+####################################################################
+## labels ##########################################################
+####################################################################
+- name: labels (check mode)
+ docker_swarm:
+ state: present
+ labels:
+ a: v1
+ b: v2
+ check_mode: yes
+ diff: yes
+ register: output_1
+ ignore_errors: yes
+
+- name: labels
+ docker_swarm:
+ state: present
+ labels:
+ a: v1
+ b: v2
+ diff: yes
+ register: output_2
+ ignore_errors: yes
+
+- name: labels (idempotent)
+ docker_swarm:
+ state: present
+ labels:
+ a: v1
+ b: v2
+ diff: yes
+ register: output_3
+ ignore_errors: yes
+
+- name: labels (idempotent, check mode)
+ docker_swarm:
+ state: present
+ labels:
+ a: v1
+ b: v2
+ check_mode: yes
+ diff: yes
+ register: output_4
+ ignore_errors: yes
+
+- name: labels (change, check mode)
+ docker_swarm:
+ state: present
+ labels:
+ a: v1
+ c: v3
+ check_mode: yes
+ diff: yes
+ register: output_5
+ ignore_errors: yes
+
+- name: labels (change)
+ docker_swarm:
+ state: present
+ labels:
+ a: v1
+ c: v3
+ diff: yes
+ register: output_6
+ ignore_errors: yes
+
+- name: labels (not specifying, check mode)
+ docker_swarm:
+ state: present
+ check_mode: yes
+ diff: yes
+ register: output_7
+ ignore_errors: yes
+
+- name: labels (not specifying)
+ docker_swarm:
+ state: present
+ diff: yes
+ register: output_8
+ ignore_errors: yes
+
+- name: labels (idempotency, check that labels are still there)
+ docker_swarm:
+ state: present
+ labels:
+ a: v1
+ c: v3
+ diff: yes
+ register: output_9
+ ignore_errors: yes
+
+- name: labels (empty, check mode)
+ docker_swarm:
+ state: present
+ labels: {}
+ check_mode: yes
+ diff: yes
+ register: output_10
+ ignore_errors: yes
+
+- name: labels (empty)
+ docker_swarm:
+ state: present
+ labels: {}
+ diff: yes
+ register: output_11
+ ignore_errors: yes
+
+- name: labels (empty, idempotent, check mode)
+ docker_swarm:
+ state: present
+ labels: {}
+ check_mode: yes
+ diff: yes
+ register: output_12
+ ignore_errors: yes
+
+- name: labels (empty, idempotent)
+ docker_swarm:
+ state: present
+ labels: {}
+ diff: yes
+ register: output_13
+ ignore_errors: yes
+
+- name: assert labels changes
+ assert:
+ that:
+ - 'output_1 is changed'
+ - 'output_1.actions[0] == "Swarm cluster updated"'
+ - 'output_1.diff.before is defined'
+ - 'output_1.diff.after is defined'
+ - 'output_2 is changed'
+ - 'output_2.actions[0] == "Swarm cluster updated"'
+ - 'output_2.diff.before is defined'
+ - 'output_2.diff.after is defined'
+ - 'output_3 is not changed'
+ - 'output_3.actions[0] == "No modification"'
+ - 'output_3.diff.before is defined'
+ - 'output_3.diff.after is defined'
+ - 'output_4 is not changed'
+ - 'output_4.actions[0] == "No modification"'
+ - 'output_4.diff.before is defined'
+ - 'output_4.diff.after is defined'
+ - 'output_5 is changed'
+ - 'output_5.actions[0] == "Swarm cluster updated"'
+ - 'output_5.diff.before is defined'
+ - 'output_5.diff.after is defined'
+ - 'output_6 is changed'
+ - 'output_6.actions[0] == "Swarm cluster updated"'
+ - 'output_6.diff.before is defined'
+ - 'output_6.diff.after is defined'
+ - 'output_7 is not changed'
+ - 'output_7.actions[0] == "No modification"'
+ - 'output_7.diff.before is defined'
+ - 'output_7.diff.after is defined'
+ - 'output_8 is not changed'
+ - 'output_8.actions[0] == "No modification"'
+ - 'output_8.diff.before is defined'
+ - 'output_8.diff.after is defined'
+ - 'output_9 is not changed'
+ - 'output_9.actions[0] == "No modification"'
+ - 'output_9.diff.before is defined'
+ - 'output_9.diff.after is defined'
+ - 'output_10 is changed'
+ - 'output_10.actions[0] == "Swarm cluster updated"'
+ - 'output_10.diff.before is defined'
+ - 'output_10.diff.after is defined'
+ - 'output_11 is changed'
+ - 'output_11.actions[0] == "Swarm cluster updated"'
+ - 'output_11.diff.before is defined'
+ - 'output_11.diff.after is defined'
+ - 'output_12 is not changed'
+ - 'output_12.actions[0] == "No modification"'
+ - 'output_12.diff.before is defined'
+ - 'output_12.diff.after is defined'
+ - 'output_13 is not changed'
+ - 'output_13.actions[0] == "No modification"'
+ - 'output_13.diff.before is defined'
+ - 'output_13.diff.after is defined'
+ when: docker_py_version is version('2.6.0', '>=')
+- assert:
+ that:
+ - output_1 is failed
+ - "('version is ' ~ docker_py_version ~ ' ') in output_1.msg"
+ - "'Minimum version required is 2.6.0 ' in output_1.msg"
+ when: docker_py_version is version('2.6.0', '<')
+
+####################################################################
+## log_entries_for_slow_followers ##################################
+####################################################################
+- name: log_entries_for_slow_followers (check mode)
+ docker_swarm:
+ state: present
+ log_entries_for_slow_followers: 42
+ check_mode: yes
+ diff: yes
+ register: output_1
+
+- name: log_entries_for_slow_followers
+ docker_swarm:
+ state: present
+ log_entries_for_slow_followers: 42
+ diff: yes
+ register: output_2
+
+- name: log_entries_for_slow_followers (idempotent)
+ docker_swarm:
+ state: present
+ log_entries_for_slow_followers: 42
+ diff: yes
+ register: output_3
+
+- name: log_entries_for_slow_followers (idempotent, check mode)
+ docker_swarm:
+ state: present
+ log_entries_for_slow_followers: 42
+ check_mode: yes
+ diff: yes
+ register: output_4
+
+- name: log_entries_for_slow_followers (change, check mode)
+ docker_swarm:
+ state: present
+ log_entries_for_slow_followers: 23
+ check_mode: yes
+ diff: yes
+ register: output_5
+
+- name: log_entries_for_slow_followers (change)
+ docker_swarm:
+ state: present
+ log_entries_for_slow_followers: 23
+ diff: yes
+ register: output_6
+
+- name: assert log_entries_for_slow_followers changes
+ assert:
+ that:
+ - 'output_1 is changed'
+ - 'output_1.actions[0] == "Swarm cluster updated"'
+ - 'output_1.diff.before is defined'
+ - 'output_1.diff.after is defined'
+ - 'output_2 is changed'
+ - 'output_2.actions[0] == "Swarm cluster updated"'
+ - 'output_2.diff.before is defined'
+ - 'output_2.diff.after is defined'
+ - 'output_3 is not changed'
+ - 'output_3.actions[0] == "No modification"'
+ - 'output_3.diff.before is defined'
+ - 'output_3.diff.after is defined'
+ - 'output_4 is not changed'
+ - 'output_4.actions[0] == "No modification"'
+ - 'output_4.diff.before is defined'
+ - 'output_4.diff.after is defined'
+ - 'output_5 is changed'
+ - 'output_5.actions[0] == "Swarm cluster updated"'
+ - 'output_5.diff.before is defined'
+ - 'output_5.diff.after is defined'
+ - 'output_6 is changed'
+ - 'output_6.actions[0] == "Swarm cluster updated"'
+ - 'output_6.diff.before is defined'
+ - 'output_6.diff.after is defined'
+
+####################################################################
+## name ############################################################
+####################################################################
+- name: name (idempotent, check mode)
+ docker_swarm:
+ state: present
+ name: default
+ check_mode: yes
+ diff: yes
+ register: output_1
+
+- name: name (idempotent)
+ docker_swarm:
+ state: present
+ name: default
+ diff: yes
+ register: output_2
+
+# The name 'default' is hardcoded in docker swarm. Trying to change
+# it causes a failure. This might change in the future, so we also
+# accept a change for this test.
+- name: name (change, should fail)
+ docker_swarm:
+ state: present
+ name: foobar
+ diff: yes
+ register: output_3
+ ignore_errors: yes
+
+- name: assert name changes
+ assert:
+ that:
+ - 'output_1 is not changed'
+ - 'output_1.actions[0] == "No modification"'
+ - 'output_1.diff.before is defined'
+ - 'output_1.diff.after is defined'
+ - 'output_2 is not changed'
+ - 'output_2.actions[0] == "No modification"'
+ - 'output_2.diff.before is defined'
+ - 'output_2.diff.after is defined'
+ - 'output_3 is failed or output_3 is changed'
+
+####################################################################
+## node_cert_expiry ################################################
+####################################################################
+- name: node_cert_expiry (check mode)
+ docker_swarm:
+ state: present
+ node_cert_expiry: 7896000000000000
+ check_mode: yes
+ diff: yes
+ register: output_1
+
+- name: node_cert_expiry
+ docker_swarm:
+ state: present
+ node_cert_expiry: 7896000000000000
+ diff: yes
+ register: output_2
+
+- name: node_cert_expiry (idempotent)
+ docker_swarm:
+ state: present
+ node_cert_expiry: 7896000000000000
+ diff: yes
+ register: output_3
+
+- name: node_cert_expiry (idempotent, check mode)
+ docker_swarm:
+ state: present
+ node_cert_expiry: 7896000000000000
+ check_mode: yes
+ diff: yes
+ register: output_4
+
+- name: node_cert_expiry (change, check mode)
+ docker_swarm:
+ state: present
+ node_cert_expiry: 8766000000000000
+ check_mode: yes
+ diff: yes
+ register: output_5
+
+- name: node_cert_expiry (change)
+ docker_swarm:
+ state: present
+ node_cert_expiry: 8766000000000000
+ diff: yes
+ register: output_6
+
+- name: assert node_cert_expiry changes
+ assert:
+ that:
+ - 'output_1 is changed'
+ - 'output_1.actions[0] == "Swarm cluster updated"'
+ - 'output_1.diff.before is defined'
+ - 'output_1.diff.after is defined'
+ - 'output_2 is changed'
+ - 'output_2.actions[0] == "Swarm cluster updated"'
+ - 'output_2.diff.before is defined'
+ - 'output_2.diff.after is defined'
+ - 'output_3 is not changed'
+ - 'output_3.actions[0] == "No modification"'
+ - 'output_3.diff.before is defined'
+ - 'output_3.diff.after is defined'
+ - 'output_4 is not changed'
+ - 'output_4.actions[0] == "No modification"'
+ - 'output_4.diff.before is defined'
+ - 'output_4.diff.after is defined'
+ - 'output_5 is changed'
+ - 'output_5.actions[0] == "Swarm cluster updated"'
+ - 'output_5.diff.before is defined'
+ - 'output_5.diff.after is defined'
+ - 'output_6 is changed'
+ - 'output_6.actions[0] == "Swarm cluster updated"'
+ - 'output_6.diff.before is defined'
+ - 'output_6.diff.after is defined'
+
+####################################################################
+## rotate_manager_token ############################################
+####################################################################
+- name: rotate_manager_token (true, check mode)
+ docker_swarm:
+ state: present
+ rotate_manager_token: yes
+ check_mode: yes
+ diff: yes
+ register: output_1
+
+- name: rotate_manager_token (true)
+ docker_swarm:
+ state: present
+ rotate_manager_token: yes
+ diff: yes
+ register: output_2
+
+- name: rotate_manager_token (false, idempotent)
+ docker_swarm:
+ state: present
+ rotate_manager_token: no
+ diff: yes
+ register: output_3
+
+- name: rotate_manager_token (false, check mode)
+ docker_swarm:
+ state: present
+ rotate_manager_token: no
+ check_mode: yes
+ diff: yes
+ register: output_4
+
+- name: assert rotate_manager_token changes
+ assert:
+ that:
+ - 'output_1 is changed'
+ - 'output_1.actions[0] == "Swarm cluster updated"'
+ - 'output_1.diff.before is defined'
+ - 'output_1.diff.after is defined'
+ - 'output_2 is changed'
+ - 'output_2.actions[0] == "Swarm cluster updated"'
+ - 'output_2.diff.before is defined'
+ - 'output_2.diff.after is defined'
+ - 'output_3 is not changed'
+ - 'output_3.actions[0] == "No modification"'
+ - 'output_3.diff.before is defined'
+ - 'output_3.diff.after is defined'
+ - 'output_4 is not changed'
+ - 'output_4.actions[0] == "No modification"'
+ - 'output_4.diff.before is defined'
+ - 'output_4.diff.after is defined'
+
+####################################################################
+## rotate_worker_token #############################################
+####################################################################
+- name: rotate_worker_token (true, check mode)
+ docker_swarm:
+ state: present
+ rotate_worker_token: yes
+ check_mode: yes
+ diff: yes
+ register: output_1
+
+- name: rotate_worker_token (true)
+ docker_swarm:
+ state: present
+ rotate_worker_token: yes
+ diff: yes
+ register: output_2
+
+- name: rotate_worker_token (false, idempotent)
+ docker_swarm:
+ state: present
+ rotate_worker_token: no
+ diff: yes
+ register: output_3
+
+- name: rotate_worker_token (false, check mode)
+ docker_swarm:
+ state: present
+ rotate_worker_token: no
+ check_mode: yes
+ diff: yes
+ register: output_4
+
+- name: assert rotate_worker_token changes
+ assert:
+ that:
+ - 'output_1 is changed'
+ - 'output_1.actions[0] == "Swarm cluster updated"'
+ - 'output_1.diff.before is defined'
+ - 'output_1.diff.after is defined'
+ - 'output_2 is changed'
+ - 'output_2.actions[0] == "Swarm cluster updated"'
+ - 'output_2.diff.before is defined'
+ - 'output_2.diff.after is defined'
+ - 'output_3 is not changed'
+ - 'output_3.actions[0] == "No modification"'
+ - 'output_3.diff.before is defined'
+ - 'output_3.diff.after is defined'
+ - 'output_4 is not changed'
+ - 'output_4.actions[0] == "No modification"'
+ - 'output_4.diff.before is defined'
+ - 'output_4.diff.after is defined'
+
+####################################################################
+## snapshot_interval ###############################################
+####################################################################
+- name: snapshot_interval (check mode)
+ docker_swarm:
+ state: present
+ snapshot_interval: 12345
+ check_mode: yes
+ diff: yes
+ register: output_1
+
+- name: snapshot_interval
+ docker_swarm:
+ state: present
+ snapshot_interval: 12345
+ diff: yes
+ register: output_2
+
+- name: snapshot_interval (idempotent)
+ docker_swarm:
+ state: present
+ snapshot_interval: 12345
+ diff: yes
+ register: output_3
+
+- name: snapshot_interval (idempotent, check mode)
+ docker_swarm:
+ state: present
+ snapshot_interval: 12345
+ check_mode: yes
+ diff: yes
+ register: output_4
+
+- name: snapshot_interval (change, check mode)
+ docker_swarm:
+ state: present
+ snapshot_interval: 54321
+ check_mode: yes
+ diff: yes
+ register: output_5
+
+- name: snapshot_interval (change)
+ docker_swarm:
+ state: present
+ snapshot_interval: 54321
+ diff: yes
+ register: output_6
+
+- name: assert snapshot_interval changes
+ assert:
+ that:
+ - 'output_1 is changed'
+ - 'output_1.actions[0] == "Swarm cluster updated"'
+ - 'output_1.diff.before is defined'
+ - 'output_1.diff.after is defined'
+ - 'output_2 is changed'
+ - 'output_2.actions[0] == "Swarm cluster updated"'
+ - 'output_2.diff.before is defined'
+ - 'output_2.diff.after is defined'
+ - 'output_3 is not changed'
+ - 'output_3.actions[0] == "No modification"'
+ - 'output_3.diff.before is defined'
+ - 'output_3.diff.after is defined'
+ - 'output_4 is not changed'
+ - 'output_4.actions[0] == "No modification"'
+ - 'output_4.diff.before is defined'
+ - 'output_4.diff.after is defined'
+ - 'output_5 is changed'
+ - 'output_5.actions[0] == "Swarm cluster updated"'
+ - 'output_5.diff.before is defined'
+ - 'output_5.diff.after is defined'
+ - 'output_6 is changed'
+ - 'output_6.actions[0] == "Swarm cluster updated"'
+ - 'output_6.diff.before is defined'
+ - 'output_6.diff.after is defined'
+
+####################################################################
+## task_history_retention_limit ####################################
+####################################################################
+- name: task_history_retention_limit (check mode)
+ docker_swarm:
+ state: present
+ task_history_retention_limit: 23
+ check_mode: yes
+ diff: yes
+ register: output_1
+
+- name: task_history_retention_limit
+ docker_swarm:
+ state: present
+ task_history_retention_limit: 23
+ diff: yes
+ register: output_2
+
+- name: task_history_retention_limit (idempotent)
+ docker_swarm:
+ state: present
+ task_history_retention_limit: 23
+ diff: yes
+ register: output_3
+
+- name: task_history_retention_limit (idempotent, check mode)
+ docker_swarm:
+ state: present
+ task_history_retention_limit: 23
+ check_mode: yes
+ diff: yes
+ register: output_4
+
+- name: task_history_retention_limit (change, check mode)
+ docker_swarm:
+ state: present
+ task_history_retention_limit: 7
+ check_mode: yes
+ diff: yes
+ register: output_5
+
+- name: task_history_retention_limit (change)
+ docker_swarm:
+ state: present
+ task_history_retention_limit: 7
+ diff: yes
+ register: output_6
+
+- name: assert task_history_retention_limit changes
+ assert:
+ that:
+ - 'output_1 is changed'
+ - 'output_1.actions[0] == "Swarm cluster updated"'
+ - 'output_1.diff.before is defined'
+ - 'output_1.diff.after is defined'
+ - 'output_2 is changed'
+ - 'output_2.actions[0] == "Swarm cluster updated"'
+ - 'output_2.diff.before is defined'
+ - 'output_2.diff.after is defined'
+ - 'output_3 is not changed'
+ - 'output_3.actions[0] == "No modification"'
+ - 'output_3.diff.before is defined'
+ - 'output_3.diff.after is defined'
+ - 'output_4 is not changed'
+ - 'output_4.actions[0] == "No modification"'
+ - 'output_4.diff.before is defined'
+ - 'output_4.diff.after is defined'
+ - 'output_5 is changed'
+ - 'output_5.actions[0] == "Swarm cluster updated"'
+ - 'output_5.diff.before is defined'
+ - 'output_5.diff.after is defined'
+ - 'output_6 is changed'
+ - 'output_6.actions[0] == "Swarm cluster updated"'
+ - 'output_6.diff.before is defined'
+ - 'output_6.diff.after is defined'
+
+- include_tasks: cleanup.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm/tasks/tests/remote-addr-pool.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm/tasks/tests/remote-addr-pool.yml
new file mode 100644
index 00000000..a900953e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm/tasks/tests/remote-addr-pool.yml
@@ -0,0 +1,90 @@
+- debug:
+ msg: Running tests/remote-addr-pool.yml
+
+####################################################################
+## default_addr_pool ###############################################
+####################################################################
+
+- name: default_addr_pool
+ docker_swarm:
+ state: present
+ default_addr_pool:
+ - "2.0.0.0/16"
+ diff: yes
+ register: output_1
+ ignore_errors: yes
+
+- name: default_addr_pool (idempotent)
+ docker_swarm:
+ state: present
+ default_addr_pool:
+ - "2.0.0.0/16"
+ diff: yes
+ register: output_2
+ ignore_errors: yes
+
+- name: assert default_addr_pool
+ assert:
+ that:
+ - 'output_1 is changed'
+ - 'output_2 is not changed'
+ - 'output_2.swarm_facts.DefaultAddrPool == ["2.0.0.0/16"]'
+ when:
+ - docker_api_version is version('1.39', '>=')
+ - docker_py_version is version('4.0.0', '>=')
+
+- name: assert default_addr_pool failed when unsupported
+ assert:
+ that:
+ - 'output_1 is failed'
+ - "'Minimum version required' in output_1.msg"
+ when: docker_api_version is version('1.39', '<') or
+ docker_py_version is version('4.0.0', '<')
+
+####################################################################
+## subnet_size #####################################################
+####################################################################
+- name: Leave swarm
+ docker_swarm:
+ state: absent
+ force: yes
+ default_addr_pool:
+ - "2.0.0.0/16"
+ diff: yes
+
+- name: subnet_size
+ docker_swarm:
+ state: present
+ force: yes
+ subnet_size: 26
+ diff: yes
+ register: output_1
+ ignore_errors: yes
+
+- name: subnet_size (idempotent)
+ docker_swarm:
+ state: present
+ subnet_size: 26
+ diff: yes
+ register: output_2
+ ignore_errors: yes
+
+- name: assert subnet_size
+ assert:
+ that:
+ - 'output_1 is changed'
+ - 'output_2 is not changed'
+ - 'output_2.swarm_facts.SubnetSize == 26'
+ when:
+ - docker_api_version is version('1.39', '>=')
+ - docker_py_version is version('4.0.0', '>=')
+
+- name: assert subnet_size failed when unsupported
+ assert:
+ that:
+ - output_1 is failed
+ - "'Minimum version required' in output_1.msg"
+ when: docker_api_version is version('1.39', '<') or
+ docker_py_version is version('4.0.0', '<')
+
+- include_tasks: cleanup.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_info/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_info/aliases
new file mode 100644
index 00000000..46d1424a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_info/aliases
@@ -0,0 +1,9 @@
+shippable/posix/group1
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
+destructive
+skip/docker # The tests sometimes make docker daemon unstable; hence,
+ # we skip all docker-based CI runs to avoid disrupting
+ # the whole CI system.
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_info/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_info/meta/main.yml
new file mode 100644
index 00000000..07da8c6d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_info/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - setup_docker
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_info/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_info/tasks/main.yml
new file mode 100644
index 00000000..1421701f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_info/tasks/main.yml
@@ -0,0 +1,11 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- include_tasks: test_swarm_info.yml
+ # Maximum of 1.24 (docker API version for docker_swarm_info) and 1.25 (docker API version for docker_swarm) is 1.25
+ when: docker_py_version is version('1.10.0', '>=') and docker_api_version is version('1.25', '>=')
+
+- fail: msg="Too old docker / docker-py version to run docker_swarm_info tests!"
+ when: not(docker_py_version is version('1.10.0', '>=') and docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_info/tasks/test_swarm_info.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_info/tasks/test_swarm_info.yml
new file mode 100644
index 00000000..349d7cc5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_info/tasks/test_swarm_info.yml
@@ -0,0 +1,190 @@
+---
+- block:
+ - name: Make sure we're not already using Docker swarm
+ docker_swarm:
+ state: absent
+ force: true
+
+ - name: Try to get docker_swarm_info when docker is not running in swarm mode
+ docker_swarm_info:
+ ignore_errors: yes
+ register: output
+
+ - name: assert failure when called when swarm is not in use or not run on mamager node
+ assert:
+ that:
+ - 'output is failed'
+ - 'output.msg == "Error running docker swarm module: must run on swarm manager node"'
+ - 'output.can_talk_to_docker == true'
+ - 'output.docker_swarm_active == false'
+ - 'output.docker_swarm_manager == false'
+ - 'output.swarm_unlock_key is not defined'
+
+ - name: Create a Swarm cluster
+ docker_swarm:
+ state: present
+ advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}"
+ register: output
+
+ - name: assert changed when create a new swarm cluster
+ assert:
+ that:
+ - 'output is changed'
+ - 'output.actions[0] | regex_search("New Swarm cluster created: ")'
+ - 'output.swarm_facts.JoinTokens.Manager'
+ - 'output.swarm_facts.JoinTokens.Worker'
+
+ - name: Try to get docker_swarm_info when docker is running in swarm mode and as manager
+ docker_swarm_info:
+ register: output
+
+ - name: assert creding docker swarm facts
+ assert:
+ that:
+ - 'output.swarm_facts.JoinTokens.Manager'
+ - 'output.swarm_facts.JoinTokens.Worker'
+ - 'output.swarm_facts.ID'
+ - 'output.can_talk_to_docker == true'
+ - 'output.docker_swarm_active == true'
+ - 'output.docker_swarm_manager == true'
+ - 'output.swarm_unlock_key is not defined'
+
+ - name: Try to get docker_swarm_info and list of nodes when docker is running in swarm mode and as manager
+ docker_swarm_info:
+ nodes: yes
+ register: output
+
+ - name: assert reding swarm facts with list of nodes option
+ assert:
+ that:
+ - 'output.swarm_facts.JoinTokens.Manager'
+ - 'output.swarm_facts.JoinTokens.Worker'
+ - 'output.swarm_facts.ID'
+ - 'output.nodes[0].ID is string'
+ - 'output.can_talk_to_docker == true'
+ - 'output.docker_swarm_active == true'
+ - 'output.docker_swarm_manager == true'
+ - 'output.swarm_unlock_key is not defined'
+
+ - name: Get local docker node name
+ set_fact:
+ localnodename: "{{ output.nodes[0].Hostname }}"
+
+
+ - name: Try to get docker_swarm_info and verbose list of nodes when docker is running in swarm mode and as manager
+ docker_swarm_info:
+ nodes: yes
+ verbose_output: yes
+ register: output
+
+ - name: assert reading swarm facts with list of nodes and versbose output options
+ assert:
+ that:
+ - 'output.swarm_facts.JoinTokens.Manager'
+ - 'output.swarm_facts.JoinTokens.Worker'
+ - 'output.swarm_facts.ID'
+ - 'output.nodes[0].ID is string'
+ - 'output.nodes[0].CreatedAt'
+ - 'output.can_talk_to_docker == true'
+ - 'output.docker_swarm_active == true'
+ - 'output.docker_swarm_manager == true'
+ - 'output.swarm_unlock_key is not defined'
+
+ - name: Try to get docker_swarm_info and list of nodes with filters providing existing node name
+ docker_swarm_info:
+ nodes: yes
+ nodes_filters:
+ name: "{{ localnodename }}"
+ register: output
+
+ - name: assert reading reading swarm facts and using node filter (random node name)
+ assert:
+ that:
+ - 'output.swarm_facts.JoinTokens.Manager'
+ - 'output.swarm_facts.JoinTokens.Worker'
+ - 'output.swarm_facts.ID'
+ - 'output.nodes | length == 1'
+ - 'output.can_talk_to_docker == true'
+ - 'output.docker_swarm_active == true'
+ - 'output.docker_swarm_manager == true'
+ - 'output.swarm_unlock_key is not defined'
+
+ - name: Create random name
+ set_fact:
+ randomnodename: "{{ 'node-%0x' % ((2**32) | random) }}"
+
+ - name: Try to get docker_swarm_info and list of nodes with filters providing non-existing random node name
+ docker_swarm_info:
+ nodes: yes
+ nodes_filters:
+ name: "{{ randomnodename }}"
+ register: output
+
+ - name: assert reading reading swarm facts and using node filter (random node name)
+ assert:
+ that:
+ - 'output.swarm_facts.JoinTokens.Manager'
+ - 'output.swarm_facts.JoinTokens.Worker'
+ - 'output.swarm_facts.ID'
+ - 'output.nodes | length == 0'
+ - 'output.can_talk_to_docker == true'
+ - 'output.docker_swarm_active == true'
+ - 'output.docker_swarm_manager == true'
+ - 'output.swarm_unlock_key is not defined'
+
+ - name: Try to get docker_swarm_info and swarm_unlock_key on non a unlocked swarm
+ docker_swarm_info:
+ unlock_key: yes
+ register: output
+ ignore_errors: yes
+
+ - name: assert reading swarm facts and non existing swarm unlock key
+ assert:
+ that:
+ - 'output.swarm_unlock_key is none'
+ - 'output.can_talk_to_docker == true'
+ - 'output.docker_swarm_active == true'
+ - 'output.docker_swarm_manager == true'
+ when: docker_py_version is version('2.7.0', '>=')
+ - assert:
+ that:
+ - output is failed
+ - "('version is ' ~ docker_py_version ~ ' ') in output.msg"
+ - "'Minimum version required is 2.7.0 ' in output.msg"
+ when: docker_py_version is version('2.7.0', '<')
+
+ - name: Update swarm cluster to be locked
+ docker_swarm:
+ state: present
+ advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}"
+ autolock_managers: true
+ register: autolock_managers_update_output
+ ignore_errors: yes
+
+ - name: Try to get docker_swarm_info and swarm_unlock_key
+ docker_swarm_info:
+ unlock_key: yes
+ register: output
+ ignore_errors: yes
+
+ - name: assert reading swarm facts and swarm unlock key
+ assert:
+ that:
+ - 'output.swarm_unlock_key is string'
+ - 'output.swarm_unlock_key == autolock_managers_update_output.swarm_facts.UnlockKey'
+ - 'output.can_talk_to_docker == true'
+ - 'output.docker_swarm_active == true'
+ - 'output.docker_swarm_manager == true'
+ when: docker_py_version is version('2.7.0', '>=')
+ - assert:
+ that:
+ - output is failed
+ - "('version is ' ~ docker_py_version ~ ' ') in output.msg"
+ - "'Minimum version required is 2.7.0 ' in output.msg"
+ when: docker_py_version is version('2.7.0', '<')
+
+ always:
+ - name: Cleanup
+ docker_swarm:
+ state: absent
+ force: true
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/aliases
new file mode 100644
index 00000000..c37e2707
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/aliases
@@ -0,0 +1,9 @@
+shippable/posix/group5
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
+destructive
+skip/docker # The tests sometimes make docker daemon unstable; hence,
+ # we skip all docker-based CI runs to avoid disrupting
+ # the whole CI system.
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/files/env-file-1 b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/files/env-file-1
new file mode 100644
index 00000000..b15f1b64
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/files/env-file-1
@@ -0,0 +1,2 @@
+TEST3=val3
+TEST4=val4
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/files/env-file-2 b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/files/env-file-2
new file mode 100644
index 00000000..eff99aca
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/files/env-file-2
@@ -0,0 +1,2 @@
+TEST3=val5
+TEST5=val5
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/meta/main.yml
new file mode 100644
index 00000000..07da8c6d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - setup_docker
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/main.yml
new file mode 100644
index 00000000..4cb69597
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/main.yml
@@ -0,0 +1,80 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+
+# Create random name prefix (for containers, networks, ...)
+- name: Create random name prefix
+ set_fact:
+ name_prefix: "{{ 'ansible-test-%0x' % ((2**32) | random) }}"
+ service_names: []
+ network_names: []
+ config_names: []
+ secret_names: []
+ volume_names: []
+
+- debug:
+ msg: "Using container name prefix {{ name_prefix }}"
+
+# Run the tests
+- block:
+ - name: Create a Swarm cluster
+ docker_swarm:
+ state: present
+ advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}"
+
+ - include_tasks: run-test.yml
+ with_fileglob:
+ - "tests/*.yml"
+
+ always:
+ - name: Make sure all services are removed
+ docker_swarm_service:
+ name: "{{ item }}"
+ state: absent
+ loop: "{{ service_names }}"
+ ignore_errors: yes
+
+ - name: Make sure all networks are removed
+ docker_network:
+ name: "{{ item }}"
+ state: absent
+ force: yes
+ loop: "{{ network_names }}"
+ ignore_errors: yes
+
+ - name: Make sure all configs are removed
+ docker_config:
+ name: "{{ item }}"
+ state: absent
+ force: yes
+ loop: "{{ config_names }}"
+ ignore_errors: yes
+
+ - name: Make sure all volumes are removed
+ docker_volume:
+ name: "{{ item }}"
+ state: absent
+ loop: "{{ volume_names }}"
+ ignore_errors: yes
+
+ - name: Make sure all secrets are removed
+ docker_secret:
+ name: "{{ item }}"
+ state: absent
+ force: yes
+ loop: "{{ secret_names }}"
+ ignore_errors: yes
+
+ - name: Make sure swarm is removed
+ docker_swarm:
+ state: absent
+ force: yes
+ ignore_errors: yes
+ # Maximum of 1.24 (docker API version for docker_swarm_service) and 1.25 (docker API version for docker_swarm) is 1.25
+ when: docker_py_version is version('2.0.2', '>=') and docker_api_version is version('1.25', '>=')
+
+- fail: msg="Too old docker / docker-py version to run docker_swarm_service tests!"
+ when: not(docker_py_version is version('2.0.2', '>=') and docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/run-test.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/run-test.yml
new file mode 100644
index 00000000..a2999370
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/run-test.yml
@@ -0,0 +1,3 @@
+---
+- name: "Loading tasks from {{ item }}"
+ include_tasks: "{{ item }}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/configs.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/configs.yml
new file mode 100644
index 00000000..ad4d5695
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/configs.yml
@@ -0,0 +1,413 @@
+---
+
+- name: Registering container name
+ set_fact:
+ service_name: "{{ name_prefix ~ '-configs' }}"
+ config_name_1: "{{ name_prefix ~ '-configs-1' }}"
+ config_name_2: "{{ name_prefix ~ '-configs-2' }}"
+
+- name: Registering container name
+ set_fact:
+ config_names: "{{ config_names + [config_name_1, config_name_2] }}"
+
+- docker_config:
+ name: "{{ config_name_1 }}"
+ data: "hello"
+ state: present
+ register: "config_result_1"
+ when: docker_api_version is version('1.30', '>=') and docker_py_version is version('2.6.0', '>=')
+
+- docker_config:
+ name: "{{ config_name_2 }}"
+ data: "test"
+ state: present
+ register: "config_result_2"
+ when: docker_api_version is version('1.30', '>=') and docker_py_version is version('2.6.0', '>=')
+
+####################################################################
+## configs #########################################################
+####################################################################
+
+- name: configs
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ configs:
+ - config_id: "{{ config_result_1.config_id|default('') }}"
+ config_name: "{{ config_name_1 }}"
+ filename: "/tmp/{{ config_name_1 }}.txt"
+ register: configs_1
+ ignore_errors: yes
+
+- name: configs (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ configs:
+ - config_name: "{{ config_name_1 }}"
+ filename: "/tmp/{{ config_name_1 }}.txt"
+ register: configs_2
+ ignore_errors: yes
+
+- name: configs (add)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ configs:
+ - config_id: "{{ config_result_1.config_id|default('') }}"
+ config_name: "{{ config_name_1 }}"
+ filename: "/tmp/{{ config_name_1 }}.txt"
+ - config_name: "{{ config_name_2 }}"
+ filename: "/tmp/{{ config_name_2 }}.txt"
+ register: configs_3
+ ignore_errors: yes
+
+- name: configs (add idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ configs:
+ - config_name: "{{ config_name_1 }}"
+ filename: "/tmp/{{ config_name_1 }}.txt"
+ - config_id: "{{ config_result_2.config_id|default('') }}"
+ config_name: "{{ config_name_2 }}"
+ filename: "/tmp/{{ config_name_2 }}.txt"
+ register: configs_4
+ ignore_errors: yes
+
+- name: configs (add idempotency no id)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ configs:
+ - config_name: "{{ config_name_1 }}"
+ filename: "/tmp/{{ config_name_1 }}.txt"
+ - config_name: "{{ config_name_2 }}"
+ filename: "/tmp/{{ config_name_2 }}.txt"
+ register: configs_5
+ ignore_errors: yes
+
+- name: configs (add idempotency no id and re-ordered)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ configs:
+ - config_name: "{{ config_name_2 }}"
+ filename: "/tmp/{{ config_name_2 }}.txt"
+ - config_name: "{{ config_name_1 }}"
+ filename: "/tmp/{{ config_name_1 }}.txt"
+ register: configs_6
+ ignore_errors: yes
+
+- name: configs (empty)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ configs: []
+ register: configs_7
+ ignore_errors: yes
+
+- name: configs (empty idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ configs: []
+ register: configs_8
+ ignore_errors: yes
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - configs_1 is changed
+ - configs_2 is not changed
+ - configs_3 is changed
+ - configs_4 is not changed
+ - configs_5 is not changed
+ - configs_6 is not changed
+ - configs_7 is changed
+ - configs_8 is not changed
+ when: docker_api_version is version('1.30', '>=') and docker_py_version is version('2.6.0', '>=')
+
+- assert:
+ that:
+ - configs_1 is failed
+ - "'Minimum version required' in configs_1.msg"
+ when: docker_api_version is version('1.30', '<') or docker_py_version is version('2.6.0', '<')
+
+####################################################################
+## configs (uid) ###################################################
+####################################################################
+
+- name: configs (uid int)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ configs:
+ - config_id: "{{ config_result_1.config_id|default('') }}"
+ config_name: "{{ config_name_1 }}"
+ uid: 1000
+ register: configs_1
+ ignore_errors: yes
+
+- name: configs (uid int idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ configs:
+ - config_id: "{{ config_result_1.config_id|default('') }}"
+ config_name: "{{ config_name_1 }}"
+ uid: 1000
+ register: configs_2
+ ignore_errors: yes
+
+- name: configs (uid int change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ configs:
+ - config_id: "{{ config_result_1.config_id|default('') }}"
+ config_name: "{{ config_name_1 }}"
+ uid: 1002
+ register: configs_3
+ ignore_errors: yes
+
+- name: configs (uid str)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ configs:
+ - config_id: "{{ config_result_1.config_id|default('') }}"
+ config_name: "{{ config_name_1 }}"
+ uid: "1001"
+ register: configs_4
+ ignore_errors: yes
+
+- name: configs (uid str idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ configs:
+ - config_id: "{{ config_result_1.config_id|default('') }}"
+ config_name: "{{ config_name_1 }}"
+ uid: "1001"
+ register: configs_5
+ ignore_errors: yes
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+- assert:
+ that:
+ - configs_1 is changed
+ - configs_2 is not changed
+ - configs_3 is changed
+ - configs_4 is changed
+ - configs_5 is not changed
+ when: docker_api_version is version('1.30', '>=') and docker_py_version is version('2.6.0', '>=')
+
+- assert:
+ that:
+ - configs_1 is failed
+ - "'Minimum version required' in configs_1.msg"
+ when: docker_api_version is version('1.30', '<') or docker_py_version is version('2.6.0', '<')
+
+
+####################################################################
+## configs (gid) ###################################################
+####################################################################
+
+- name: configs (gid int)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ configs:
+ - config_id: "{{ config_result_1.config_id|default('') }}"
+ config_name: "{{ config_name_1 }}"
+ gid: 1000
+ register: configs_1
+ ignore_errors: yes
+
+- name: configs (gid int idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ configs:
+ - config_id: "{{ config_result_1.config_id|default('') }}"
+ config_name: "{{ config_name_1 }}"
+ gid: 1000
+ register: configs_2
+ ignore_errors: yes
+
+- name: configs (gid int change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ configs:
+ - config_id: "{{ config_result_1.config_id|default('') }}"
+ config_name: "{{ config_name_1 }}"
+ gid: 1002
+ register: configs_3
+ ignore_errors: yes
+
+- name: configs (gid str)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ configs:
+ - config_id: "{{ config_result_1.config_id|default('') }}"
+ config_name: "{{ config_name_1 }}"
+ gid: "1001"
+ register: configs_4
+ ignore_errors: yes
+
+- name: configs (gid str idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ configs:
+ - config_id: "{{ config_result_1.config_id|default('') }}"
+ config_name: "{{ config_name_1 }}"
+ gid: "1001"
+ register: configs_5
+ ignore_errors: yes
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+- assert:
+ that:
+ - configs_1 is changed
+ - configs_2 is not changed
+ - configs_3 is changed
+ - configs_4 is changed
+ - configs_5 is not changed
+ when: docker_api_version is version('1.30', '>=') and docker_py_version is version('2.6.0', '>=')
+
+- assert:
+ that:
+ - configs_1 is failed
+ - "'Minimum version required' in configs_1.msg"
+ when: docker_api_version is version('1.30', '<') or docker_py_version is version('2.6.0', '<')
+
+####################################################################
+## configs (mode) ##################################################
+####################################################################
+
+- name: configs (mode)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ configs:
+ - config_id: "{{ config_result_1.config_id|default('') }}"
+ config_name: "{{ config_name_1 }}"
+ mode: 0600
+ register: configs_1
+ ignore_errors: yes
+
+- name: configs (mode idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ configs:
+ - config_id: "{{ config_result_1.config_id|default('') }}"
+ config_name: "{{ config_name_1 }}"
+ mode: 0600
+ register: configs_2
+ ignore_errors: yes
+
+- name: configs (mode change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ configs:
+ - config_id: "{{ config_result_1.config_id|default('') }}"
+ config_name: "{{ config_name_1 }}"
+ mode: 0777
+ register: configs_3
+ ignore_errors: yes
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+- assert:
+ that:
+ - configs_1 is changed
+ - configs_2 is not changed
+ - configs_3 is changed
+ when: docker_api_version is version('1.30', '>=') and docker_py_version is version('2.6.0', '>=')
+
+- assert:
+ that:
+ - configs_1 is failed
+ - "'Minimum version required' in configs_1.msg"
+ when: docker_api_version is version('1.30', '<') or docker_py_version is version('2.6.0', '<')
+
+####################################################################
+####################################################################
+####################################################################
+
+- name: Delete configs
+ docker_config:
+ name: "{{ config_name }}"
+ state: absent
+ force: yes
+ loop:
+ - "{{ config_name_1 }}"
+ - "{{ config_name_2 }}"
+ loop_control:
+ loop_var: config_name
+ ignore_errors: yes
+ when: docker_api_version is version('1.30', '>=') and docker_py_version is version('2.6.0', '>=')
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/logging.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/logging.yml
new file mode 100644
index 00000000..79014dd5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/logging.yml
@@ -0,0 +1,158 @@
+---
+
+- name: Registering service name
+ set_fact:
+ service_name: "{{ name_prefix ~ '-logging' }}"
+
+- name: Registering service name
+ set_fact:
+ service_names: "{{ service_names + [service_name] }}"
+
+####################################################################
+## logging.driver ##################################################
+####################################################################
+
+- name: logging.driver
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ logging:
+ driver: json-file
+ register: logging_driver_1
+
+- name: logging.driver (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ logging:
+ driver: json-file
+ register: logging_driver_2
+
+- name: log_driver (idempotency, old name)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ log_driver: json-file
+ register: logging_driver_2b
+
+- name: logging.driver (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ logging:
+ driver: syslog
+ register: logging_driver_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - logging_driver_1 is changed
+ - logging_driver_2 is not changed
+ - logging_driver_2b is not changed
+ - logging_driver_3 is changed
+
+####################################################################
+## logging.options #################################################
+####################################################################
+
+- name: logging_options
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ logging:
+ driver: json-file
+ options:
+ labels: production_status
+ env: os,customer
+ register: logging_options_1
+
+- name: logging_options (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ logging:
+ driver: json-file
+ options:
+ env: os,customer
+ labels: production_status
+ register: logging_options_2
+
+- name: log_driver_options (idempotency, old name)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ log_driver: json-file
+ log_driver_options:
+ env: os,customer
+ labels: production_status
+ register: logging_options_2b
+
+- name: logging_options (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ logging:
+ driver: json-file
+ options:
+ env: os,customer
+ labels: production_status
+ max-file: "1"
+ register: logging_options_3
+
+- name: logging_options (empty)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ logging:
+ driver: json-file
+ options: {}
+ register: logging_options_4
+
+- name: logging_options (empty idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ logging:
+ driver: json-file
+ options: {}
+ register: logging_options_5
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - logging_options_1 is changed
+ - logging_options_2 is not changed
+ - logging_options_2b is not changed
+ - logging_options_3 is changed
+ - logging_options_4 is changed
+ - logging_options_5 is not changed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/misc.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/misc.yml
new file mode 100644
index 00000000..7d24e089
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/misc.yml
@@ -0,0 +1,113 @@
+---
+- block:
+ - name: Create a swarm service without name
+ register: output
+ docker_swarm_service:
+ state: present
+ ignore_errors: yes
+
+ - name: assert failure when name not set
+ assert:
+ that:
+ - output is failed
+ - 'output.msg == "missing required arguments: name"'
+
+ - name: Remove an non-existing service
+ register: output
+ docker_swarm_service:
+ state: absent
+ name: non_existing_service
+
+ - name: assert output not changed when deleting non-existing service
+ assert:
+ that:
+ - output is not changed
+
+ - name: create sample service
+ register: output
+ docker_swarm_service:
+ name: test_service
+ endpoint_mode: dnsrr
+ image: "{{ docker_test_image_busybox }}"
+ resolve_image: no
+ args:
+ - sleep
+ - "3600"
+
+ - name: assert sample service is created
+ assert:
+ that:
+ - output is changed
+
+ - name: change service args
+ register: output
+ docker_swarm_service:
+ name: test_service
+ image: "{{ docker_test_image_busybox }}"
+ resolve_image: no
+ args:
+ - sleep
+ - "1800"
+
+ - name: assert service args are correct
+ assert:
+ that:
+ - output.swarm_service.args == ['sleep', '1800']
+
+ - name: set service mode to global
+ register: output
+ docker_swarm_service:
+ name: test_service
+ image: "{{ docker_test_image_busybox }}"
+ resolve_image: no
+ endpoint_mode: vip
+ mode: global
+ args:
+ - sleep
+ - "1800"
+
+ - name: assert service mode changed caused service rebuild
+ assert:
+ that:
+ - output.rebuilt
+
+ - name: add published ports to service
+ register: output
+ docker_swarm_service:
+ name: test_service
+ image: "{{ docker_test_image_busybox }}"
+ resolve_image: no
+ mode: global
+ args:
+ - sleep
+ - "1800"
+ endpoint_mode: vip
+ publish:
+ - protocol: tcp
+ published_port: 60001
+ target_port: 60001
+ - protocol: udp
+ published_port: 60001
+ target_port: 60001
+
+ - name: fake image key as it is not predictable
+ set_fact:
+ ansible_docker_service_output: "{{ output.swarm_service|combine({'image': docker_test_image_busybox}) }}"
+
+ - name: assert service matches expectations
+ assert:
+ that:
+ - ansible_docker_service_output == service_expected_output
+
+ - name: delete sample service
+ register: output
+ docker_swarm_service:
+ name: test_service
+ state: absent
+
+ - name: assert service deletion returns changed
+ assert:
+ that:
+ - output is success
+ - output is changed
+ when: docker_api_version is version('1.24', '>=') and docker_py_version is version('3.0.0', '>=')
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/mounts.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/mounts.yml
new file mode 100644
index 00000000..441547d9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/mounts.yml
@@ -0,0 +1,601 @@
+- name: Registering service name
+ set_fact:
+ service_name: "{{ name_prefix ~ '-mounts' }}"
+ volume_name_1: "{{ name_prefix ~ '-volume-1' }}"
+ volume_name_2: "{{ name_prefix ~ '-volume-2' }}"
+
+- name: Registering service name
+ set_fact:
+ service_names: "{{ service_names + [service_name] }}"
+ volume_names: "{{ volume_names + [volume_name_1, volume_name_2] }}"
+
+- docker_volume:
+ name: "{{ volume_name }}"
+ state: present
+ loop:
+ - "{{ volume_name_1 }}"
+ - "{{ volume_name_2 }}"
+ loop_control:
+ loop_var: volume_name
+
+####################################################################
+## mounts ##########################################################
+####################################################################
+
+- name: mounts
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "{{ volume_name_1 }}"
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "volume"
+ register: mounts_1
+
+- name: mounts (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "{{ volume_name_1 }}"
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "volume"
+ register: mounts_2
+
+- name: mounts (add)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "{{ volume_name_1 }}"
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "volume"
+ - source: "/tmp/"
+ target: "/tmp/{{ volume_name_2 }}"
+ type: "bind"
+ register: mounts_3
+
+- name: mounts (order idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "/tmp/"
+ target: "/tmp/{{ volume_name_2 }}"
+ type: "bind"
+ - source: "{{ volume_name_1 }}"
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "volume"
+ register: mounts_4
+
+- name: mounts (empty)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts: []
+ register: mounts_5
+
+- name: mounts (empty idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts: []
+ register: mounts_6
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - mounts_1 is changed
+ - mounts_2 is not changed
+ - mounts_3 is changed
+ - mounts_4 is not changed
+ - mounts_5 is changed
+ - mounts_6 is not changed
+
+####################################################################
+## mounts.readonly #################################################
+####################################################################
+
+- name: mounts.readonly
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "{{ volume_name_1 }}"
+ target: "/tmp/{{ volume_name_1 }}"
+ readonly: true
+ register: mounts_readonly_1
+
+
+- name: mounts.readonly (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "{{ volume_name_1 }}"
+ target: "/tmp/{{ volume_name_1 }}"
+ readonly: true
+ register: mounts_readonly_2
+
+- name: mounts.readonly (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "{{ volume_name_1 }}"
+ target: "/tmp/{{ volume_name_1 }}"
+ readonly: false
+ register: mounts_readonly_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - mounts_readonly_1 is changed
+ - mounts_readonly_2 is not changed
+ - mounts_readonly_3 is changed
+
+####################################################################
+## mounts.propagation ##############################################
+####################################################################
+
+- name: mounts.propagation
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "/tmp"
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "bind"
+ propagation: "slave"
+ register: mounts_propagation_1
+
+
+- name: mounts.propagation (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "/tmp"
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "bind"
+ propagation: "slave"
+ register: mounts_propagation_2
+
+- name: mounts.propagation (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "/tmp"
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "bind"
+ propagation: "rprivate"
+ register: mounts_propagation_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - mounts_propagation_1 is changed
+ - mounts_propagation_2 is not changed
+ - mounts_propagation_3 is changed
+
+####################################################################
+## mounts.labels ##################################################
+####################################################################
+
+- name: mounts.labels
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "{{ volume_name_1 }}"
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "volume"
+ labels:
+ mylabel: hello-world
+ my-other-label: hello-mars
+ register: mounts_labels_1
+
+
+- name: mounts.labels (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "{{ volume_name_1 }}"
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "volume"
+ labels:
+ mylabel: hello-world
+ my-other-label: hello-mars
+ register: mounts_labels_2
+
+- name: mounts.labels (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "{{ volume_name_1 }}"
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "volume"
+ labels:
+ mylabel: hello-world
+ register: mounts_labels_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - mounts_labels_1 is changed
+ - mounts_labels_2 is not changed
+ - mounts_labels_3 is changed
+
+####################################################################
+## mounts.no_copy ##################################################
+####################################################################
+
+- name: mounts.no_copy
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "{{ volume_name_1 }}"
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "volume"
+ no_copy: true
+ register: mounts_no_copy_1
+
+
+- name: mounts.no_copy (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "{{ volume_name_1 }}"
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "volume"
+ no_copy: true
+ register: mounts_no_copy_2
+
+- name: mounts.no_copy (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "{{ volume_name_1 }}"
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "volume"
+ no_copy: false
+ register: mounts_no_copy_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - mounts_no_copy_1 is changed
+ - mounts_no_copy_2 is not changed
+ - mounts_no_copy_3 is changed
+
+####################################################################
+## mounts.driver_config ############################################
+####################################################################
+
+- name: mounts.driver_config
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "{{ volume_name_1 }}"
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "volume"
+ driver_config:
+ name: "nfs"
+ options:
+ addr: "127.0.0.1"
+ register: mounts_driver_config_1
+
+- name: mounts.driver_config
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "{{ volume_name_1 }}"
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "volume"
+ driver_config:
+ name: "nfs"
+ options:
+ addr: "127.0.0.1"
+ register: mounts_driver_config_2
+
+- name: mounts.driver_config
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "{{ volume_name_1 }}"
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "volume"
+ driver_config:
+ name: "local"
+ register: mounts_driver_config_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - mounts_driver_config_1 is changed
+ - mounts_driver_config_2 is not changed
+ - mounts_driver_config_3 is changed
+
+####################################################################
+## mounts.tmpfs_size ###############################################
+####################################################################
+
+- name: mounts.tmpfs_size
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "{{ volume_name_1 }}"
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "tmpfs"
+ tmpfs_size: "50M"
+ register: mounts_tmpfs_size_1
+ ignore_errors: yes
+
+- name: mounts.tmpfs_size (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "{{ volume_name_1 }}"
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "tmpfs"
+ tmpfs_size: "50M"
+ register: mounts_tmpfs_size_2
+ ignore_errors: yes
+
+- name: mounts.tmpfs_size (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "{{ volume_name_1 }}"
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "tmpfs"
+ tmpfs_size: "25M"
+ register: mounts_tmpfs_size_3
+ ignore_errors: yes
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - mounts_tmpfs_size_1 is changed
+ - mounts_tmpfs_size_2 is not changed
+ - mounts_tmpfs_size_3 is changed
+ when: docker_py_version is version('2.6.0', '>=')
+- assert:
+ that:
+ - mounts_tmpfs_size_1 is failed
+ - "'Minimum version required' in mounts_tmpfs_size_1.msg"
+ when: docker_py_version is version('2.6.0', '<')
+
+####################################################################
+## mounts.tmpfs_mode ###############################################
+####################################################################
+
+- name: mounts.tmpfs_mode
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "{{ volume_name_1 }}"
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "tmpfs"
+ tmpfs_mode: 0444
+ register: mounts_tmpfs_mode_1
+ ignore_errors: yes
+
+- name: mounts.tmpfs_mode (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "{{ volume_name_1 }}"
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "tmpfs"
+ tmpfs_mode: 0444
+ register: mounts_tmpfs_mode_2
+ ignore_errors: yes
+
+- name: mounts.tmpfs_mode (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "{{ volume_name_1 }}"
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "tmpfs"
+ tmpfs_mode: 0777
+ register: mounts_tmpfs_mode_3
+ ignore_errors: yes
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - mounts_tmpfs_mode_1 is changed
+ - mounts_tmpfs_mode_2 is not changed
+ - mounts_tmpfs_mode_3 is changed
+ when: docker_py_version is version('2.6.0', '>=')
+- assert:
+ that:
+ - mounts_tmpfs_size_1 is failed
+ - "'Minimum version required' in mounts_tmpfs_size_1.msg"
+ when: docker_py_version is version('2.6.0', '<')
+
+####################################################################
+## mounts.source ###################################################
+####################################################################
+
+- name: mounts.source (empty for tmpfs)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: ""
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "tmpfs"
+ register: mounts_tmpfs_source_1
+ ignore_errors: yes
+
+- name: mounts.source (empty for tmpfs idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: ""
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "tmpfs"
+ register: mounts_tmpfs_source_2
+ ignore_errors: yes
+
+- name: mounts.source (not specified for tmpfs idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - target: "/tmp/{{ volume_name_1 }}"
+ type: "tmpfs"
+ register: mounts_tmpfs_source_3
+ ignore_errors: yes
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - mounts_tmpfs_source_1 is changed
+ - mounts_tmpfs_source_2 is not changed
+ - mounts_tmpfs_source_3 is not changed
+ when: docker_py_version is version('2.6.0', '>=')
+- assert:
+ that:
+ - mounts_tmpfs_source_1 is failed
+ - "'Minimum version required' in mounts_tmpfs_source_1.msg"
+ when: docker_py_version is version('2.6.0', '<')
+
+####################################################################
+####################################################################
+####################################################################
+
+- name: Delete volumes
+ docker_volume:
+ name: "{{ volume_name }}"
+ state: absent
+ loop:
+ - "{{ volume_name_1 }}"
+ - "{{ volume_name_2 }}"
+ loop_control:
+ loop_var: volume_name
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/networks.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/networks.yml
new file mode 100644
index 00000000..26a822b6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/networks.yml
@@ -0,0 +1,450 @@
+---
+
+- name: Registering service name
+ set_fact:
+ service_name: "{{ name_prefix ~ '-networks' }}"
+ network_name_1: "{{ name_prefix ~ '-network-1' }}"
+ network_name_2: "{{ name_prefix ~ '-network-2' }}"
+
+- name: Registering service name
+ set_fact:
+ service_names: "{{ service_names + [service_name] }}"
+ network_names: "{{ network_names + [network_name_1, network_name_2] }}"
+
+- docker_network:
+ name: "{{ network_name }}"
+ driver: "overlay"
+ state: present
+ loop:
+ - "{{ network_name_1 }}"
+ - "{{ network_name_2 }}"
+ loop_control:
+ loop_var: network_name
+
+#####################################################################
+## networks #########################################################
+#####################################################################
+
+- name: networks
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - "{{ network_name_1 }}"
+ register: networks_1
+
+- name: networks (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - "{{ network_name_1 }}"
+ register: networks_2
+
+- name: networks (dict idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - name: "{{ network_name_1 }}"
+ register: networks_3
+
+- name: networks (change more)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - "{{ network_name_1 }}"
+ - "{{ network_name_2 }}"
+ register: networks_4
+
+- name: networks (change more idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - "{{ network_name_1 }}"
+ - "{{ network_name_2 }}"
+ register: networks_5
+
+- name: networks (change more dict idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - name: "{{ network_name_1 }}"
+ - name: "{{ network_name_2 }}"
+ register: networks_6
+
+- name: networks (change more mixed idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - name: "{{ network_name_1 }}"
+ - "{{ network_name_2 }}"
+ register: networks_7
+
+- name: networks (order idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - "{{ network_name_2 }}"
+ - name: "{{ network_name_1 }}"
+ register: networks_8
+
+- name: networks (change less)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - "{{ network_name_2 }}"
+ register: networks_9
+
+- name: networks (change less idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - "{{ network_name_2 }}"
+ register: networks_10
+
+- name: networks (empty)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks: []
+ register: networks_11
+
+- name: networks (empty idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks: []
+ register: networks_12
+
+- name: networks (unknown network)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - "idonotexist"
+ register: networks_13
+ ignore_errors: yes
+
+- name: networks (missing dict key name)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - foo: "bar"
+ register: networks_14
+ ignore_errors: yes
+
+- name: networks (invalid list type)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - [1, 2, 3]
+ register: networks_15
+ ignore_errors: yes
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - networks_1 is changed
+ - networks_2 is not changed
+ - networks_3 is not changed
+ - networks_4 is changed
+ - networks_5 is not changed
+ - networks_6 is not changed
+ - networks_7 is not changed
+ - networks_8 is not changed
+ - networks_9 is changed
+ - networks_10 is not changed
+ - networks_11 is changed
+ - networks_12 is not changed
+ - networks_13 is failed
+ - '"Could not find a network named: ''idonotexist''" in networks_13.msg'
+ - networks_14 is failed
+ - "'\"name\" is required when networks are passed as dictionaries.' in networks_14.msg"
+ - networks_15 is failed
+ - "'Only a list of strings or dictionaries are allowed to be passed as networks' in networks_15.msg"
+
+- assert:
+ that:
+ - networks_4.rebuilt == false
+ - networks_7.rebuilt == false
+ when: docker_api_version is version('1.29', '>=') and docker_py_version is version('2.7.0', '>=')
+
+- assert:
+ that:
+ - networks_4.rebuilt == true
+ - networks_7.rebuilt == true
+ when: docker_api_version is version('1.29', '<') or docker_py_version is version('2.7.0', '<')
+
+####################################################################
+## networks.aliases ################################################
+####################################################################
+
+- name: networks.aliases
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - name: "{{ network_name_1 }}"
+ aliases:
+ - "alias1"
+ - "alias2"
+ register: networks_aliases_1
+
+- name: networks.aliases (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - name: "{{ network_name_1 }}"
+ aliases:
+ - "alias1"
+ - "alias2"
+ register: networks_aliases_2
+
+- name: networks.aliases (order idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - name: "{{ network_name_1 }}"
+ aliases:
+ - "alias2"
+ - "alias1"
+ register: networks_aliases_3
+
+- name: networks.aliases (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - name: "{{ network_name_1 }}"
+ aliases:
+ - "alias1"
+ register: networks_aliases_4
+
+- name: networks.aliases (empty)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - name: "{{ network_name_1 }}"
+ aliases: []
+ register: networks_aliases_5
+
+- name: networks.aliases (empty idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - name: "{{ network_name_1 }}"
+ aliases: []
+ register: networks_aliases_6
+
+- name: networks.aliases (invalid type)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - name: "{{ network_name_1 }}"
+ aliases:
+ - [1, 2, 3]
+ register: networks_aliases_7
+ ignore_errors: yes
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - networks_aliases_1 is changed
+ - networks_aliases_2 is not changed
+ - networks_aliases_3 is not changed
+ - networks_aliases_4 is changed
+ - networks_aliases_5 is changed
+ - networks_aliases_6 is not changed
+ - networks_aliases_7 is failed
+ - "'Only strings are allowed as network aliases' in networks_aliases_7.msg"
+
+####################################################################
+## networks.options ################################################
+####################################################################
+
+- name: networks.options
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - name: "{{ network_name_1 }}"
+ options:
+ foo: bar
+ test: hello
+ register: networks_options_1
+
+- name: networks.options (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - name: "{{ network_name_1 }}"
+ options:
+ foo: bar
+ test: hello
+ register: networks_options_2
+
+- name: networks.options (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - name: "{{ network_name_1 }}"
+ options:
+ foo: bar
+ test: hej
+ register: networks_options_3
+
+- name: networks.options (change less)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - name: "{{ network_name_1 }}"
+ options:
+ foo: bar
+ register: networks_options_4
+
+- name: networks.options (invalid type)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - name: "{{ network_name_1 }}"
+ options: [1, 2, 3]
+ register: networks_options_5
+ ignore_errors: yes
+
+- name: networks.options (empty)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - name: "{{ network_name_1 }}"
+ options: {}
+ register: networks_options_6
+
+- name: networks.options (empty idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - name: "{{ network_name_1 }}"
+ options: {}
+ register: networks_options_7
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - networks_options_1 is changed
+ - networks_options_2 is not changed
+ - networks_options_3 is changed
+ - networks_options_4 is changed
+ - networks_options_5 is failed
+ - "'Only dict is allowed as network options' in networks_options_5.msg"
+ - networks_options_6 is changed
+ - networks_options_7 is not changed
+
+####################################################################
+####################################################################
+####################################################################
+
+- name: Delete networks
+ docker_network:
+ name: "{{ network_name }}"
+ state: absent
+ force: yes
+ loop:
+ - "{{ network_name_1 }}"
+ - "{{ network_name_2 }}"
+ loop_control:
+ loop_var: network_name
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/options.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/options.yml
new file mode 100644
index 00000000..df0a5938
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/options.yml
@@ -0,0 +1,1878 @@
+---
+
+- name: Registering service name
+ set_fact:
+ service_name: "{{ name_prefix ~ '-options' }}"
+
+- name: Registering service name
+ set_fact:
+ service_names: "{{ service_names + [service_name] }}"
+
+####################################################################
+## args ############################################################
+####################################################################
+
+- name: args
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ args:
+ - sleep
+ - "3600"
+ register: args_1
+
+- name: args (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ args:
+ - sleep
+ - "3600"
+ register: args_2
+
+- name: args (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ args:
+ - sleep
+ - "3400"
+ register: args_3
+
+- name: args (empty)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ args: []
+ register: args_4
+
+- name: args (empty idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ args: []
+ register: args_5
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - args_1 is changed
+ - args_2 is not changed
+ - args_3 is changed
+ - args_4 is changed
+ - args_5 is not changed
+
+####################################################################
+## command #########################################################
+####################################################################
+
+- name: command
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ register: command_1
+
+- name: command (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ register: command_2
+
+- name: command (less parameters)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -c "sleep 10m"'
+ register: command_3
+
+- name: command (as list)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command:
+ - "/bin/sh"
+ - "-c"
+ - "sleep 10m"
+ register: command_4
+
+- name: command (empty)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: []
+ register: command_5
+
+- name: command (empty idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: []
+ register: command_6
+
+- name: command (string failure)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: yes
+ register: command_7
+ ignore_errors: yes
+
+- name: command (list failure)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command:
+ - "/bin/sh"
+ - yes
+ register: command_8
+ ignore_errors: yes
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - command_1 is changed
+ - command_2 is not changed
+ - command_3 is changed
+ - command_4 is not changed
+ - command_5 is changed
+ - command_6 is not changed
+ - command_7 is failed
+ - command_8 is failed
+
+####################################################################
+## container_labels ################################################
+####################################################################
+
+- name: container_labels
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ container_labels:
+ test_1: "1"
+ test_2: "2"
+ register: container_labels_1
+
+- name: container_labels (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ container_labels:
+ test_1: "1"
+ test_2: "2"
+ register: container_labels_2
+
+- name: container_labels (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ container_labels:
+ test_1: "1"
+ test_2: "3"
+ register: container_labels_3
+
+- name: container_labels (empty)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ container_labels: {}
+ register: container_labels_4
+
+- name: container_labels (empty idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ container_labels: {}
+ register: container_labels_5
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - container_labels_1 is changed
+ - container_labels_2 is not changed
+ - container_labels_3 is changed
+ - container_labels_4 is changed
+ - container_labels_5 is not changed
+
+####################################################################
+## dns #############################################################
+####################################################################
+
+- name: dns
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ dns:
+ - 1.1.1.1
+ - 8.8.8.8
+ register: dns_1
+ ignore_errors: yes
+
+- name: dns (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ dns:
+ - 1.1.1.1
+ - 8.8.8.8
+ register: dns_2
+ ignore_errors: yes
+
+- name: dns_servers (changed order)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ dns:
+ - 8.8.8.8
+ - 1.1.1.1
+ register: dns_3
+ ignore_errors: yes
+
+- name: dns_servers (changed elements)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ dns:
+ - 8.8.8.8
+ - 9.9.9.9
+ register: dns_4
+ ignore_errors: yes
+
+- name: dns_servers (empty)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ dns: []
+ register: dns_5
+ ignore_errors: yes
+
+- name: dns_servers (empty idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ dns: []
+ register: dns_6
+ ignore_errors: yes
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - dns_1 is changed
+ - dns_2 is not changed
+ - dns_3 is changed
+ - dns_4 is changed
+ - dns_5 is changed
+ - dns_6 is not changed
+ when: docker_api_version is version('1.25', '>=') and docker_py_version is version('2.6.0', '>=')
+- assert:
+ that:
+ - dns_1 is failed
+ - "'Minimum version required' in dns_1.msg"
+ when: docker_api_version is version('1.25', '<') or docker_py_version is version('2.6.0', '<')
+
+####################################################################
+## dns_options #####################################################
+####################################################################
+
+- name: dns_options
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ dns_options:
+ - "timeout:10"
+ - rotate
+ register: dns_options_1
+ ignore_errors: yes
+
+- name: dns_options (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ dns_options:
+ - "timeout:10"
+ - rotate
+ register: dns_options_2
+ ignore_errors: yes
+
+- name: dns_options (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ dns_options:
+ - "timeout:10"
+ - no-check-names
+ register: dns_options_3
+ ignore_errors: yes
+
+- name: dns_options (order idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ dns_options:
+ - no-check-names
+ - "timeout:10"
+ register: dns_options_4
+ ignore_errors: yes
+
+- name: dns_options (empty)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ dns_options: []
+ register: dns_options_5
+ ignore_errors: yes
+
+- name: dns_options (empty idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ dns_options: []
+ register: dns_options_6
+ ignore_errors: yes
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - dns_options_1 is changed
+ - dns_options_2 is not changed
+ - dns_options_3 is changed
+ - dns_options_4 is not changed
+ - dns_options_5 is changed
+ - dns_options_6 is not changed
+ when: docker_api_version is version('1.25', '>=') and docker_py_version is version('2.6.0', '>=')
+- assert:
+ that:
+ - dns_options_1 is failed
+ - "'Minimum version required' in dns_options_1.msg"
+ when: docker_api_version is version('1.25', '<') or docker_py_version is version('2.6.0', '<')
+
+####################################################################
+## dns_search ######################################################
+####################################################################
+
+- name: dns_search
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ dns_search:
+ - example.com
+ - example.org
+ register: dns_search_1
+ ignore_errors: yes
+
+- name: dns_search (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ dns_search:
+ - example.com
+ - example.org
+ register: dns_search_2
+ ignore_errors: yes
+
+- name: dns_search (different order)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ dns_search:
+ - example.org
+ - example.com
+ register: dns_search_3
+ ignore_errors: yes
+
+- name: dns_search (changed elements)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ dns_search:
+ - ansible.com
+ - example.com
+ register: dns_search_4
+ ignore_errors: yes
+
+- name: dns_search (empty)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ dns_search: []
+ register: dns_search_5
+ ignore_errors: yes
+
+- name: dns_search (empty idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ dns_search: []
+ register: dns_search_6
+ ignore_errors: yes
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - dns_search_1 is changed
+ - dns_search_2 is not changed
+ - dns_search_3 is changed
+ - dns_search_4 is changed
+ - dns_search_5 is changed
+ - dns_search_6 is not changed
+ when: docker_api_version is version('1.25', '>=') and docker_py_version is version('2.6.0', '>=')
+- assert:
+ that:
+ - dns_search_1 is failed
+ - "'Minimum version required' in dns_search_1.msg"
+ when: docker_api_version is version('1.25', '<') or docker_py_version is version('2.6.0', '<')
+
+####################################################################
+## endpoint_mode ###################################################
+####################################################################
+
+- name: endpoint_mode
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ endpoint_mode: "dnsrr"
+ register: endpoint_mode_1
+ ignore_errors: yes
+
+- name: endpoint_mode (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ endpoint_mode: "dnsrr"
+ register: endpoint_mode_2
+ ignore_errors: yes
+
+- name: endpoint_mode (changes)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ endpoint_mode: "vip"
+ register: endpoint_mode_3
+ ignore_errors: yes
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - endpoint_mode_1 is changed
+ - endpoint_mode_2 is not changed
+ - endpoint_mode_3 is changed
+ when: docker_api_version is version('1.25', '>=') and docker_py_version is version('3.0.0', '>=')
+- assert:
+ that:
+ - endpoint_mode_1 is failed
+ - "'Minimum version required' in endpoint_mode_1.msg"
+ when: docker_api_version is version('1.25', '<') or docker_py_version is version('3.0.0', '<')
+
+####################################################################
+## env #############################################################
+####################################################################
+
+- name: env
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ env:
+ - "TEST1=val1"
+ - "TEST2=val2"
+ register: env_1
+
+- name: env (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ env:
+ TEST1: val1
+ TEST2: val2
+ register: env_2
+
+- name: env (changes)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ env:
+ - "TEST1=val1"
+ - "TEST2=val3"
+ register: env_3
+
+- name: env (order idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ env:
+ - "TEST2=val3"
+ - "TEST1=val1"
+ register: env_4
+
+- name: env (empty)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ env: []
+ register: env_5
+
+- name: env (empty idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ env: []
+ register: env_6
+
+- name: env (fail unwrapped values)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ env:
+ TEST1: true
+ register: env_7
+ ignore_errors: yes
+
+- name: env (fail invalid formatted string)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ env:
+ - "TEST1=val3"
+ - "TEST2"
+ register: env_8
+ ignore_errors: yes
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - env_1 is changed
+ - env_2 is not changed
+ - env_3 is changed
+ - env_4 is not changed
+ - env_5 is changed
+ - env_6 is not changed
+ - env_7 is failed
+ - env_8 is failed
+
+####################################################################
+## env_files #######################################################
+####################################################################
+
+- name: env_files
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ env_files:
+ - "{{ role_path }}/files/env-file-1"
+ register: env_file_1
+
+- name: env_files (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ env_files:
+ - "{{ role_path }}/files/env-file-1"
+ register: env_file_2
+
+- name: env_files (more items)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ env_files:
+ - "{{ role_path }}/files/env-file-1"
+ - "{{ role_path }}/files/env-file-2"
+ register: env_file_3
+
+- name: env_files (order)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ env_files:
+ - "{{ role_path }}/files/env-file-2"
+ - "{{ role_path }}/files/env-file-1"
+ register: env_file_4
+
+- name: env_files (multiple idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ env_files:
+ - "{{ role_path }}/files/env-file-2"
+ - "{{ role_path }}/files/env-file-1"
+ register: env_file_5
+
+- name: env_files (empty)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ env_files: []
+ register: env_file_6
+
+- name: env_files (empty idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ env_files: []
+ register: env_file_7
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - env_file_1 is changed
+ - env_file_2 is not changed
+ - env_file_3 is changed
+ - env_file_4 is changed
+ - env_file_5 is not changed
+ - env_file_6 is changed
+ - env_file_7 is not changed
+
+###################################################################
+## force_update ###################################################
+###################################################################
+
+- name: force_update
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ args:
+ - sleep
+ - "3600"
+ force_update: yes
+ register: force_update_1
+ ignore_errors: yes
+
+- name: force_update (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ args:
+ - sleep
+ - "3600"
+ force_update: yes
+ register: force_update_2
+ ignore_errors: yes
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - force_update_1 is changed
+ - force_update_2 is changed
+ when: docker_api_version is version('1.25', '>=') and docker_py_version is version('2.1.0', '>=')
+- assert:
+ that:
+ - force_update_1 is failed
+ - "'Minimum version required' in force_update_1.msg"
+ when: docker_api_version is version('1.25', '<') or docker_py_version is version('2.1.0', '<')
+
+####################################################################
+## groups ##########################################################
+####################################################################
+
+- name: groups
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ groups:
+ - "1234"
+ - "5678"
+ register: groups_1
+ ignore_errors: yes
+
+- name: groups (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ groups:
+ - "1234"
+ - "5678"
+ register: groups_2
+ ignore_errors: yes
+
+- name: groups (order idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ groups:
+ - "5678"
+ - "1234"
+ register: groups_3
+ ignore_errors: yes
+
+- name: groups (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ groups:
+ - "1234"
+ register: groups_4
+ ignore_errors: yes
+
+- name: groups (empty)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ groups: []
+ register: groups_5
+ ignore_errors: yes
+
+- name: groups (empty idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ groups: []
+ register: groups_6
+ ignore_errors: yes
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - groups_1 is changed
+ - groups_2 is not changed
+ - groups_3 is not changed
+ - groups_4 is changed
+ - groups_5 is changed
+ - groups_6 is not changed
+ when: docker_api_version is version('1.25', '>=') and docker_py_version is version('2.6.0', '>=')
+- assert:
+ that:
+ - groups_1 is failed
+ - "'Minimum version required' in groups_1.msg"
+ when: docker_api_version is version('1.25', '<') or docker_py_version is version('2.6.0', '<')
+
+####################################################################
+## healthcheck #####################################################
+####################################################################
+
+- name: healthcheck
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ healthcheck:
+ test:
+ - CMD
+ - sleep
+ - "1"
+ timeout: 2s
+ interval: 0h0m2s3ms4us
+ retries: 2
+ start_period: 20s
+ register: healthcheck_1
+ ignore_errors: yes
+
+- name: healthcheck (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ healthcheck:
+ test:
+ - CMD
+ - sleep
+ - 1
+ timeout: 2s
+ interval: 0h0m2s3ms4us
+ retries: 2
+ start_period: 20s
+ register: healthcheck_2
+ ignore_errors: yes
+
+- name: healthcheck (changed)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ healthcheck:
+ test:
+ - CMD
+ - sleep
+ - "1"
+ timeout: 3s
+ interval: 0h1m2s3ms4us
+ retries: 3
+ register: healthcheck_3
+ ignore_errors: yes
+
+- name: healthcheck (disabled)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ healthcheck:
+ test:
+ - NONE
+ register: healthcheck_4
+ ignore_errors: yes
+
+- name: healthcheck (disabled, idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ healthcheck:
+ test:
+ - NONE
+ register: healthcheck_5
+ ignore_errors: yes
+
+- name: healthcheck (string in healthcheck test, changed)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ healthcheck:
+ test: "sleep 1"
+ register: healthcheck_6
+ ignore_errors: yes
+
+- name: healthcheck (string in healthcheck test, idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ healthcheck:
+ test: "sleep 1"
+ register: healthcheck_7
+ ignore_errors: yes
+
+- name: healthcheck (empty)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ healthcheck: {}
+ register: healthcheck_8
+ ignore_errors: yes
+
+- name: healthcheck (empty idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ healthcheck: {}
+ register: healthcheck_9
+ ignore_errors: yes
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - healthcheck_1 is changed
+ - healthcheck_2 is not changed
+ - healthcheck_3 is changed
+ - healthcheck_4 is changed
+ - healthcheck_5 is not changed
+ - healthcheck_6 is changed
+ - healthcheck_7 is not changed
+ - healthcheck_8 is changed
+ - healthcheck_9 is not changed
+ when: docker_api_version is version('1.29', '>=') and docker_py_version is version('2.6.0', '>=')
+- assert:
+ that:
+ - healthcheck_1 is failed
+ - "'Minimum version required' in healthcheck_1.msg"
+ when: docker_api_version is version('1.29', '<') or docker_py_version is version('2.6.0', '<')
+
+###################################################################
+## hostname #######################################################
+###################################################################
+
+- name: hostname
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ hostname: me.example.com
+ register: hostname_1
+ ignore_errors: yes
+
+- name: hostname (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ hostname: me.example.com
+ register: hostname_2
+ ignore_errors: yes
+
+- name: hostname (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ hostname: me.example.org
+ register: hostname_3
+ ignore_errors: yes
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - hostname_1 is changed
+ - hostname_2 is not changed
+ - hostname_3 is changed
+ when: docker_api_version is version('1.25', '>=') and docker_py_version is version('2.2.0', '>=')
+- assert:
+ that:
+ - hostname_1 is failed
+ - "'Minimum version required' in hostname_1.msg"
+ when: docker_api_version is version('1.25', '<') or docker_py_version is version('2.2.0', '<')
+
+###################################################################
+## hosts ##########################################################
+###################################################################
+
+- name: hosts
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ hosts:
+ example.com: 1.2.3.4
+ example.org: 4.3.2.1
+ register: hosts_1
+ ignore_errors: yes
+
+- name: hosts (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ hosts:
+ example.com: 1.2.3.4
+ example.org: 4.3.2.1
+ register: hosts_2
+ ignore_errors: yes
+
+- name: hosts (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ hosts:
+ example.com: 1.2.3.4
+ register: hosts_3
+ ignore_errors: yes
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - hosts_1 is changed
+ - hosts_2 is not changed
+ - hosts_3 is changed
+ when: docker_api_version is version('1.25', '>=') and docker_py_version is version('2.6.0', '>=')
+- assert:
+ that:
+ - hosts_1 is failed
+ - "'Minimum version required' in hosts_1.msg"
+ when: docker_api_version is version('1.25', '<') or docker_py_version is version('2.6.0', '<')
+
+
+###################################################################
+## image ##########################################################
+###################################################################
+
+- name: image
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ register: image_1
+
+- name: image (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ register: image_2
+
+- name: image (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine_different }}"
+ register: image_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - image_1 is changed
+ - image_2 is not changed
+ - image_3 is changed
+
+####################################################################
+## labels ##########################################################
+####################################################################
+
+- name: labels
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ labels:
+ test_1: "1"
+ test_2: "2"
+ register: labels_1
+
+- name: labels (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ labels:
+ test_1: "1"
+ test_2: "2"
+ register: labels_2
+
+- name: labels (changes)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ labels:
+ test_1: "1"
+ test_2: "2"
+ test_3: "3"
+ register: labels_3
+
+- name: labels (empty)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ labels: {}
+ register: labels_4
+
+- name: labels (empty idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ labels: {}
+ register: labels_5
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - labels_1 is changed
+ - labels_2 is not changed
+ - labels_3 is changed
+ - labels_4 is changed
+ - labels_5 is not changed
+
+###################################################################
+## mode ###########################################################
+###################################################################
+
+- name: mode
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ mode: "replicated"
+ replicas: 1
+ register: mode_1
+
+- name: mode (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ mode: "replicated"
+ replicas: 1
+ register: mode_2
+
+- name: mode (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ mode: "global"
+ replicas: 1
+ register: mode_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - mode_1 is changed
+ - mode_2 is not changed
+ - mode_3 is changed
+
+####################################################################
+## stop_grace_period ###############################################
+####################################################################
+
+- name: stop_grace_period
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ stop_grace_period: 60s
+ register: stop_grace_period_1
+
+- name: stop_grace_period (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ stop_grace_period: 60s
+ register: stop_grace_period_2
+
+- name: stop_grace_period (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ stop_grace_period: 1m30s
+ register: stop_grace_period_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - stop_grace_period_1 is changed
+ - stop_grace_period_2 is not changed
+ - stop_grace_period_3 is changed
+
+####################################################################
+## stop_signal #####################################################
+####################################################################
+
+- name: stop_signal
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ stop_signal: "30"
+ register: stop_signal_1
+ ignore_errors: yes
+
+- name: stop_signal (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ stop_signal: "30"
+ register: stop_signal_2
+ ignore_errors: yes
+
+- name: stop_signal (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ stop_signal: "9"
+ register: stop_signal_3
+ ignore_errors: yes
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - stop_signal_1 is changed
+ - stop_signal_2 is not changed
+ - stop_signal_3 is changed
+ when: docker_api_version is version('1.28', '>=') and docker_py_version is version('2.6.0', '>=')
+- assert:
+ that:
+ - stop_signal_1 is failed
+ - "'Minimum version required' in stop_signal_1.msg"
+ when: docker_api_version is version('1.28', '<') or docker_py_version is version('2.6.0', '<')
+
+####################################################################
+## publish #########################################################
+####################################################################
+
+- name: publish
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ publish:
+ - protocol: tcp
+ published_port: 60001
+ target_port: 60001
+ - protocol: udp
+ published_port: 60002
+ target_port: 60002
+ register: publish_1
+ ignore_errors: yes
+
+- name: publish (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ publish:
+ - protocol: udp
+ published_port: 60002
+ target_port: 60002
+ - published_port: 60001
+ target_port: 60001
+ register: publish_2
+ ignore_errors: yes
+
+- name: publish (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ publish:
+ - protocol: tcp
+ published_port: 60002
+ target_port: 60003
+ - protocol: udp
+ published_port: 60001
+ target_port: 60001
+ register: publish_3
+ ignore_errors: yes
+
+- name: publish (mode)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ publish:
+ - protocol: tcp
+ published_port: 60002
+ target_port: 60003
+ mode: host
+ - protocol: udp
+ published_port: 60001
+ target_port: 60001
+ mode: host
+ register: publish_4
+ ignore_errors: yes
+
+- name: publish (mode idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ publish:
+ - protocol: udp
+ published_port: 60001
+ target_port: 60001
+ mode: host
+ - protocol: tcp
+ published_port: 60002
+ target_port: 60003
+ mode: host
+ register: publish_5
+ ignore_errors: yes
+
+- name: publish (empty)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ publish: []
+ register: publish_6
+ ignore_errors: yes
+
+- name: publish (empty idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ publish: []
+ register: publish_7
+ ignore_errors: yes
+
+- name: publish (publishes the same port with both protocols)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ publish:
+ - protocol: udp
+ published_port: 60001
+ target_port: 60001
+ mode: host
+ - protocol: tcp
+ published_port: 60001
+ target_port: 60001
+ mode: host
+ register: publish_8
+ ignore_errors: yes
+- name: gather service info
+ docker_swarm_service_info:
+ name: "{{ service_name }}"
+ register: publish_8_info
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - publish_1 is changed
+ - publish_2 is not changed
+ - publish_3 is changed
+ - publish_4 is changed
+ - publish_5 is not changed
+ - publish_6 is changed
+ - publish_7 is not changed
+ - publish_8 is changed
+ - (publish_8_info.service.Endpoint.Ports | length) == 2
+ when: docker_api_version is version('1.25', '>=') and docker_py_version is version('3.0.0', '>=')
+- assert:
+ that:
+ - publish_1 is failed
+ - "'Minimum version required' in publish_1.msg"
+ when: docker_api_version is version('1.25', '<') or docker_py_version is version('3.0.0', '<')
+
+###################################################################
+## read_only ######################################################
+###################################################################
+
+- name: read_only
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ read_only: true
+ register: read_only_1
+ ignore_errors: yes
+
+- name: read_only (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ read_only: true
+ register: read_only_2
+ ignore_errors: yes
+
+- name: read_only (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ read_only: false
+ register: read_only_3
+ ignore_errors: yes
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - read_only_1 is changed
+ - read_only_2 is not changed
+ - read_only_3 is changed
+ when: docker_api_version is version('1.28', '>=') and docker_py_version is version('2.6.0', '>=')
+- assert:
+ that:
+ - read_only_1 is failed
+ - "'Minimum version required' in read_only_1.msg"
+ when: docker_api_version is version('1.28', '<') or docker_py_version is version('2.6.0', '<')
+
+###################################################################
+## replicas #######################################################
+###################################################################
+
+- name: replicas
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ replicas: 2
+ register: replicas_1
+
+- name: replicas (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ replicas: 2
+ register: replicas_2
+
+- name: replicas (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ replicas: 3
+ register: replicas_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - replicas_1 is changed
+ - replicas_2 is not changed
+ - replicas_3 is changed
+
+###################################################################
+# resolve_image ###################################################
+###################################################################
+
+- name: resolve_image (false)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -v -c "sleep 10m"'
+ resolve_image: false
+ register: resolve_image_1
+
+- name: resolve_image (false idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -v -c "sleep 10m"'
+ resolve_image: false
+ register: resolve_image_2
+
+- name: resolve_image (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -v -c "sleep 10m"'
+ resolve_image: true
+ register: resolve_image_3
+ ignore_errors: yes
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - resolve_image_1 is changed
+ - resolve_image_2 is not changed
+ - resolve_image_3 is changed
+ when: docker_api_version is version('1.30', '>=') and docker_py_version is version('3.2.0', '>=')
+- assert:
+ that:
+ - resolve_image_1 is changed
+ - resolve_image_2 is not changed
+ - resolve_image_3 is failed
+ - "('version is ' ~ docker_py_version ~ ' ') in resolve_image_3.msg"
+ - "'Minimum version required is 3.2.0 ' in resolve_image_3.msg"
+ when: docker_api_version is version('1.30', '<') or docker_py_version is version('3.2.0', '<')
+
+###################################################################
+# tty #############################################################
+###################################################################
+
+- name: tty
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ tty: yes
+ register: tty_1
+ ignore_errors: yes
+
+- name: tty (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ tty: yes
+ register: tty_2
+ ignore_errors: yes
+
+- name: tty (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ tty: no
+ register: tty_3
+ ignore_errors: yes
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - tty_1 is changed
+ - tty_2 is not changed
+ - tty_3 is changed
+ when: docker_api_version is version('1.25', '>=') and docker_py_version is version('2.4.0', '>=')
+- assert:
+ that:
+ - tty_1 is failed
+ - "'Minimum version required' in tty_1.msg"
+ when: docker_api_version is version('1.25', '<') or docker_py_version is version('2.4.0', '<')
+
+###################################################################
+## user ###########################################################
+###################################################################
+
+- name: user
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ user: "operator"
+ register: user_1
+
+- name: user (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ user: "operator"
+ register: user_2
+
+- name: user (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ user: "root"
+ register: user_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - user_1 is changed
+ - user_2 is not changed
+ - user_3 is changed
+
+####################################################################
+## working_dir #####################################################
+####################################################################
+
+- name: working_dir
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ working_dir: /tmp
+ register: working_dir_1
+
+- name: working_dir (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ working_dir: /tmp
+ register: working_dir_2
+
+- name: working_dir (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ working_dir: /
+ register: working_dir_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - working_dir_1 is changed
+ - working_dir_2 is not changed
+ - working_dir_3 is changed
+
+####################################################################
+## init ############################################################
+####################################################################
+
+- name: init
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ init: true
+ register: init_1
+
+- name: init (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ init: true
+ register: init_2
+
+- name: init (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ init: false
+ register: init_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - init_1 is changed
+ - init_2 is not changed
+ - init_3 is changed
+ when: docker_api_version is version('1.37', '>=')
+
+- assert:
+ that:
+ - init_1 is failed
+ - "('version is ' ~ docker_api_version ~'. Minimum version required is 1.37') in hosts_1.msg"
+ when: docker_api_version is version('1.37', '<')
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/placement.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/placement.yml
new file mode 100644
index 00000000..e614f6a3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/placement.yml
@@ -0,0 +1,214 @@
+---
+
+- name: Registering service name
+ set_fact:
+ service_name: "{{ name_prefix ~ '-placement' }}"
+
+- name: Registering service name
+ set_fact:
+ service_names: "{{ service_names + [service_name] }}"
+
+
+####################################################################
+## placement.preferences ###########################################
+####################################################################
+
+- name: placement.preferences
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ placement:
+ preferences:
+ - spread: "node.labels.test"
+ register: placement_preferences_1
+ ignore_errors: yes
+
+- name: placement.preferences (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ placement:
+ preferences:
+ - spread: "node.labels.test"
+ register: placement_preferences_2
+ ignore_errors: yes
+
+- name: placement.preferences (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ placement:
+ preferences:
+ - spread: "node.labels.test2"
+ register: placement_preferences_3
+ ignore_errors: yes
+
+- name: placement.preferences (empty)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ placement:
+ preferences: []
+ register: placement_preferences_4
+ ignore_errors: yes
+
+- name: placement.preferences (empty idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ placement:
+ preferences: []
+ register: placement_preferences_5
+ ignore_errors: yes
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - placement_preferences_1 is changed
+ - placement_preferences_2 is not changed
+ - placement_preferences_3 is changed
+ - placement_preferences_4 is changed
+ - placement_preferences_5 is not changed
+ when: docker_api_version is version('1.27', '>=') and docker_py_version is version('2.4.0', '>=')
+- assert:
+ that:
+ - placement_preferences_1 is failed
+ - "'Minimum version required' in placement_preferences_1.msg"
+ when: docker_api_version is version('1.27', '<') or docker_py_version is version('2.4.0', '<')
+
+####################################################################
+## placement.constraints #####################################################
+####################################################################
+
+- name: placement.constraints
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ placement:
+ constraints:
+ - "node.role == manager"
+ register: constraints_1
+ ignore_errors: yes
+
+- name: placement.constraints (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ placement:
+ constraints:
+ - "node.role == manager"
+ register: constraints_2
+ ignore_errors: yes
+
+- name: constraints (idempotency, old name)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ constraints:
+ - "node.role == manager"
+ register: constraints_2b
+ ignore_errors: yes
+
+- name: placement.constraints (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ placement:
+ constraints:
+ - "node.role == worker"
+ register: constraints_3
+ ignore_errors: yes
+
+- name: placement.constraints (add)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ placement:
+ constraints:
+ - "node.role == worker"
+ - "node.label != non_existent_label"
+ register: constraints_4
+ ignore_errors: yes
+
+- name: placement.constraints (order idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ placement:
+ constraints:
+ - "node.label != non_existent_label"
+ - "node.role == worker"
+ register: constraints_5
+ ignore_errors: yes
+
+- name: placement.constraints (empty)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ placement:
+ constraints: []
+ register: constraints_6
+ ignore_errors: yes
+
+- name: placement.constraints (empty idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ placement:
+ constraints: []
+ register: constraints_7
+ ignore_errors: yes
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - constraints_1 is changed
+ - constraints_2 is not changed
+ - constraints_2b is not changed
+ - constraints_3 is changed
+ - constraints_4 is changed
+ - constraints_5 is not changed
+ - constraints_6 is changed
+ - constraints_7 is not changed
+ when: docker_api_version is version('1.27', '>=') and docker_py_version is version('2.4.0', '>=')
+- assert:
+ that:
+ - constraints_1 is failed
+ - "'Minimum version required' in constraints_1.msg"
+ when: docker_api_version is version('1.27', '<') or docker_py_version is version('2.4.0', '<')
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/resources.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/resources.yml
new file mode 100644
index 00000000..30141a24
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/resources.yml
@@ -0,0 +1,230 @@
+---
+
+- name: Registering service name
+ set_fact:
+ service_name: "{{ name_prefix ~ '-resources' }}"
+
+- name: Registering service name
+ set_fact:
+ service_names: "{{ service_names + [service_name] }}"
+
+####################################################################
+## limits.cpus #####################################################
+####################################################################
+
+- name: limits.cpus
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ limits:
+ cpus: 1
+ register: limit_cpu_1
+
+- name: limits.cpus (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ limits:
+ cpus: 1
+ register: limit_cpu_2
+
+- name: limit_cpu (idempotency, old name)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ limit_cpu: 1
+ register: limit_cpu_2b
+
+- name: limits.cpus (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ limits:
+ cpus: 0.5
+ register: limit_cpu_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - limit_cpu_1 is changed
+ - limit_cpu_2 is not changed
+ - limit_cpu_2b is not changed
+ - limit_cpu_3 is changed
+
+###################################################################
+## limits.memory ##################################################
+###################################################################
+
+- name: limits.memory
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ limits:
+ memory: 64M
+ register: limit_memory_1
+
+- name: limits.memory (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ limits:
+ memory: 64M
+ register: limit_memory_2
+
+- name: limit_memory (idempotency, old name)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ limit_memory: "67108864"
+ register: limit_memory_2b
+
+- name: limits.memory (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ limit_memory: 32M
+ register: limit_memory_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - limit_memory_1 is changed
+ - limit_memory_2 is not changed
+ - limit_memory_2b is not changed
+ - limit_memory_3 is changed
+
+###################################################################
+## reservations.cpus ##############################################
+###################################################################
+
+- name: reserve_cpu
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ reservations:
+ cpus: 1
+ register: reserve_cpu_1
+
+- name: reserve_cpu (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ reservations:
+ cpus: 1
+ register: reserve_cpu_2
+
+- name: reserve_cpu (idempotency, old name)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ reserve_cpu: 1
+ register: reserve_cpu_2b
+
+- name: reserve_cpu (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ reservations:
+ cpus: 0.5
+ register: reserve_cpu_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - reserve_cpu_1 is changed
+ - reserve_cpu_2 is not changed
+ - reserve_cpu_2b is not changed
+ - reserve_cpu_3 is changed
+
+###################################################################
+## reservations.memory ############################################
+###################################################################
+
+- name: reservations.memory
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ reservations:
+ memory: 64M
+ register: reserve_memory_1
+
+- name: reservations.memory (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ reserve_memory: 64M
+ register: reserve_memory_2
+
+- name: reserve_memory (idempotency, old name)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ reserve_memory: "67108864"
+ register: reserve_memory_2b
+
+- name: reservations.memory (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ reserve_memory: 32M
+ register: reserve_memory_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - reserve_memory_1 is changed
+ - reserve_memory_2 is not changed
+ - reserve_memory_2b is not changed
+ - reserve_memory_3 is changed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/restart_config.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/restart_config.yml
new file mode 100644
index 00000000..18bcbb5d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/restart_config.yml
@@ -0,0 +1,233 @@
+---
+
+- name: Registering service name
+ set_fact:
+ service_name: "{{ name_prefix ~ '-restart_config' }}"
+
+- name: Registering service name
+ set_fact:
+ service_names: "{{ service_names + [service_name] }}"
+
+###################################################################
+## restart_config.condition #######################################
+###################################################################
+
+- name: restart_config.condition
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ restart_config:
+ condition: "on-failure"
+ register: restart_policy_1
+
+- name: restart_config.condition (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ restart_config:
+ condition: "on-failure"
+ register: restart_policy_2
+
+- name: restart_policy (idempotency, old name)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ restart_policy: "on-failure"
+ register: restart_policy_2b
+
+- name: restart_config.condition (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ restart_config:
+ condition: "any"
+ register: restart_policy_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - restart_policy_1 is changed
+ - restart_policy_2 is not changed
+ - restart_policy_2b is not changed
+ - restart_policy_3 is changed
+
+###################################################################
+## restart_config.max_attempts ####################################
+###################################################################
+
+- name: restart_config.max_attempts
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ restart_config:
+ max_attempts: 1
+ register: restart_policy_attempts_1
+
+- name: restart_config.max_attempts (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ restart_config:
+ max_attempts: 1
+ register: restart_policy_attempts_2
+
+- name: restart_policy_attempts (idempotency, old name)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ restart_policy_attempts: 1
+ register: restart_policy_attempts_2b
+
+- name: restart_config.max_attempts (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ restart_config:
+ max_attempts: 2
+ register: restart_policy_attempts_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - restart_policy_attempts_1 is changed
+ - restart_policy_attempts_2 is not changed
+ - restart_policy_attempts_2b is not changed
+ - restart_policy_attempts_3 is changed
+
+###################################################################
+## restart_config.delay ###########################################
+###################################################################
+
+- name: restart_config.delay
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ restart_config:
+ delay: 5s
+ register: restart_policy_delay_1
+
+- name: restart_config.delay (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ restart_config:
+ delay: 5s
+ register: restart_policy_delay_2
+
+- name: restart_policy_delay (idempotency, old name)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ restart_policy_delay: 5000000000
+ register: restart_policy_delay_2b
+
+- name: restart_config.delay (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ restart_config:
+ delay: 10s
+ register: restart_policy_delay_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - restart_policy_delay_1 is changed
+ - restart_policy_delay_2 is not changed
+ - restart_policy_delay_2b is not changed
+ - restart_policy_delay_3 is changed
+
+###################################################################
+## restart_config.window ##########################################
+###################################################################
+
+- name: restart_config.window
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ restart_config:
+ window: 10s
+ register: restart_policy_window_1
+
+- name: restart_config.window (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ restart_config:
+ window: 10s
+ register: restart_policy_window_2
+
+- name: restart_policy_window (idempotency, old name)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ restart_policy_window: 10000000000
+ register: restart_policy_window_2b
+
+- name: restart_config.window (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ restart_config:
+ window: 20s
+ register: restart_policy_window_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - restart_policy_window_1 is changed
+ - restart_policy_window_2 is not changed
+ - restart_policy_window_2b is not changed
+ - restart_policy_window_3 is changed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/rollback_config.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/rollback_config.yml
new file mode 100644
index 00000000..8d97d7e9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/rollback_config.yml
@@ -0,0 +1,339 @@
+---
+
+- name: Registering service name
+ set_fact:
+ service_name: "{{ name_prefix ~ '-rollback_config' }}"
+
+- name: Registering service name
+ set_fact:
+ service_names: "{{ service_names + [service_name] }}"
+
+###################################################################
+## rollback_config.delay ############################################
+###################################################################
+
+- name: rollback_config.delay
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ rollback_config:
+ delay: 5s
+ register: rollback_config_delay_1
+ ignore_errors: yes
+
+- name: rollback_config.delay (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ rollback_config:
+ delay: 5s
+ register: rollback_config_delay_2
+ ignore_errors: yes
+
+- name: rollback_config.delay (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ rollback_config:
+ delay: 12s
+ register: rollback_config_delay_3
+ ignore_errors: yes
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - rollback_config_delay_1 is changed
+ - rollback_config_delay_2 is not changed
+ - rollback_config_delay_3 is changed
+ when: docker_api_version is version('1.28', '>=') and docker_py_version is version('3.5.0', '>=')
+- assert:
+ that:
+ - rollback_config_delay_1 is failed
+ - "'Minimum version required' in rollback_config_delay_1.msg"
+ when: docker_api_version is version('1.28', '<') or docker_py_version is version('3.5.0', '<')
+
+###################################################################
+## rollback_config.failure_action ###################################
+###################################################################
+
+- name: rollback_config.failure_action
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ rollback_config:
+ failure_action: "pause"
+ register: rollback_config_failure_action_1
+ ignore_errors: yes
+
+- name: rollback_config.failure_action (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ rollback_config:
+ failure_action: "pause"
+ register: rollback_config_failure_action_2
+ ignore_errors: yes
+
+- name: rollback_config.failure_action (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ rollback_config:
+ failure_action: "continue"
+ register: rollback_config_failure_action_3
+ ignore_errors: yes
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - rollback_config_failure_action_1 is changed
+ - rollback_config_failure_action_2 is not changed
+ - rollback_config_failure_action_3 is changed
+ when: docker_api_version is version('1.28', '>=') and docker_py_version is version('3.5.0', '>=')
+- assert:
+ that:
+ - rollback_config_failure_action_1 is failed
+ - "'Minimum version required' in rollback_config_failure_action_1.msg"
+ when: docker_api_version is version('1.28', '<') or docker_py_version is version('3.5.0', '<')
+
+###################################################################
+## rollback_config.max_failure_ratio ################################
+###################################################################
+
+- name: rollback_config.max_failure_ratio
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ rollback_config:
+ max_failure_ratio: 0.25
+ register: rollback_config_max_failure_ratio_1
+ ignore_errors: yes
+
+- name: rollback_config.max_failure_ratio (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ rollback_config:
+ max_failure_ratio: 0.25
+ register: rollback_config_max_failure_ratio_2
+ ignore_errors: yes
+
+- name: rollback_config.max_failure_ratio (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ rollback_config:
+ max_failure_ratio: 0.50
+ register: rollback_config_max_failure_ratio_3
+ ignore_errors: yes
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - rollback_config_max_failure_ratio_1 is changed
+ - rollback_config_max_failure_ratio_2 is not changed
+ - rollback_config_max_failure_ratio_3 is changed
+ when: docker_api_version is version('1.28', '>=') and docker_py_version is version('3.5.0', '>=')
+- assert:
+ that:
+ - rollback_config_max_failure_ratio_1 is failed
+ - "'Minimum version required' in rollback_config_max_failure_ratio_1.msg"
+ when: docker_api_version is version('1.28', '<') or docker_py_version is version('3.5.0', '<')
+
+###################################################################
+# rollback_config.monitor ###########################################
+###################################################################
+
+- name: rollback_config.monitor
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ rollback_config:
+ monitor: 10s
+ register: rollback_config_monitor_1
+ ignore_errors: yes
+
+- name: rollback_config.monitor (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ rollback_config:
+ monitor: 10s
+ register: rollback_config_monitor_2
+ ignore_errors: yes
+
+- name: rollback_config.monitor (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ rollback_config:
+ monitor: 60s
+ register: rollback_config_monitor_3
+ ignore_errors: yes
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - rollback_config_monitor_1 is changed
+ - rollback_config_monitor_2 is not changed
+ - rollback_config_monitor_3 is changed
+ when: docker_api_version is version('1.28', '>=') and docker_py_version is version('3.5.0', '>=')
+- assert:
+ that:
+ - rollback_config_monitor_1 is failed
+ - "'Minimum version required' in rollback_config_monitor_1.msg"
+ when: docker_api_version is version('1.28', '<') or docker_py_version is version('3.5.0', '<')
+
+###################################################################
+# rollback_config.order #############################################
+###################################################################
+
+- name: rollback_config.order
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ rollback_config:
+ order: "start-first"
+ register: rollback_config_order_1
+ ignore_errors: yes
+
+- name: rollback_config.order (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ rollback_config:
+ order: "start-first"
+ register: rollback_config_order_2
+ ignore_errors: yes
+
+- name: rollback_config.order (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ rollback_config:
+ order: "stop-first"
+ register: rollback_config_order_3
+ ignore_errors: yes
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - rollback_config_order_1 is changed
+ - rollback_config_order_2 is not changed
+ - rollback_config_order_3 is changed
+ when: docker_api_version is version('1.29', '>=') and docker_py_version is version('3.5.0', '>=')
+- assert:
+ that:
+ - rollback_config_order_1 is failed
+ - "'Minimum version required' in rollback_config_order_1.msg"
+ when: docker_api_version is version('1.29', '<') or docker_py_version is version('3.5.0', '<')
+
+###################################################################
+## rollback_config.parallelism ######################################
+###################################################################
+
+- name: rollback_config.parallelism
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ rollback_config:
+ parallelism: 2
+ register: rollback_config_parallelism_1
+ ignore_errors: yes
+
+- name: rollback_config.parallelism (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ rollback_config:
+ parallelism: 2
+ register: rollback_config_parallelism_2
+ ignore_errors: yes
+
+- name: rollback_config.parallelism (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ rollback_config:
+ parallelism: 1
+ register: rollback_config_parallelism_3
+ ignore_errors: yes
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - rollback_config_parallelism_1 is changed
+ - rollback_config_parallelism_2 is not changed
+ - rollback_config_parallelism_3 is changed
+ when: docker_api_version is version('1.28', '>=') and docker_py_version is version('3.5.0', '>=')
+- assert:
+ that:
+ - rollback_config_parallelism_1 is failed
+ - "'Minimum version required' in rollback_config_parallelism_1.msg"
+ when: docker_api_version is version('1.28', '<') or docker_py_version is version('3.5.0', '<')
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/secrets.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/secrets.yml
new file mode 100644
index 00000000..bcd1f269
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/secrets.yml
@@ -0,0 +1,411 @@
+---
+
+- name: Registering container name
+ set_fact:
+ service_name: "{{ name_prefix ~ '-secrets' }}"
+ secret_name_1: "{{ name_prefix ~ '-secret-1' }}"
+ secret_name_2: "{{ name_prefix ~ '-secret-2' }}"
+
+- name: Registering container name
+ set_fact:
+ secret_names: "{{ secret_names + [secret_name_1, secret_name_2] }}"
+
+- docker_secret:
+ name: "{{ secret_name_1 }}"
+ data: "secret1"
+ state: "present"
+ register: "secret_result_1"
+ when: docker_api_version is version('1.25', '>=') and docker_py_version is version('2.1.0', '>=')
+
+- docker_secret:
+ name: "{{ secret_name_2 }}"
+ data: "secret2"
+ state: "present"
+ register: "secret_result_2"
+ when: docker_api_version is version('1.25', '>=') and docker_py_version is version('2.1.0', '>=')
+
+####################################################################
+## secrets #########################################################
+####################################################################
+
+- name: secrets
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ secrets:
+ - secret_id: "{{ secret_result_1.secret_id|default('') }}"
+ secret_name: "{{ secret_name_1 }}"
+ filename: "/run/secrets/{{ secret_name_1 }}.txt"
+ register: secrets_1
+ ignore_errors: yes
+
+- name: secrets (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ secrets:
+ - secret_name: "{{ secret_name_1 }}"
+ filename: "/run/secrets/{{ secret_name_1 }}.txt"
+ register: secrets_2
+ ignore_errors: yes
+
+- name: secrets (add)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ secrets:
+ - secret_id: "{{ secret_result_1.secret_id|default('') }}"
+ secret_name: "{{ secret_name_1 }}"
+ filename: "/run/secrets/{{ secret_name_1 }}.txt"
+ - secret_name: "{{ secret_name_2 }}"
+ filename: "/run/secrets/{{ secret_name_2 }}.txt"
+ register: secrets_3
+ ignore_errors: yes
+
+- name: secrets (add idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ secrets:
+ - secret_name: "{{ secret_name_1 }}"
+ filename: "/run/secrets/{{ secret_name_1 }}.txt"
+ - secret_id: "{{ secret_result_2.secret_id|default('') }}"
+ secret_name: "{{ secret_name_2 }}"
+ filename: "/run/secrets/{{ secret_name_2 }}.txt"
+ register: secrets_4
+ ignore_errors: yes
+
+- name: secrets (add idempotency no id)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ secrets:
+ - secret_name: "{{ secret_name_1 }}"
+ filename: "/run/secrets/{{ secret_name_1 }}.txt"
+ - secret_name: "{{ secret_name_2 }}"
+ filename: "/run/secrets/{{ secret_name_2 }}.txt"
+ register: secrets_5
+ ignore_errors: yes
+
+- name: secrets (order idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ secrets:
+ - secret_name: "{{ secret_name_2 }}"
+ filename: "/run/secrets/{{ secret_name_2 }}.txt"
+ - secret_name: "{{ secret_name_1 }}"
+ filename: "/run/secrets/{{ secret_name_1 }}.txt"
+ register: secrets_6
+ ignore_errors: yes
+
+- name: secrets (empty)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ secrets: []
+ register: secrets_7
+ ignore_errors: yes
+
+- name: secrets (empty idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ secrets: []
+ register: secrets_8
+ ignore_errors: yes
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - secrets_1 is changed
+ - secrets_2 is not changed
+ - secrets_3 is changed
+ - secrets_4 is not changed
+ - secrets_5 is not changed
+ - secrets_6 is not changed
+ - secrets_7 is changed
+ - secrets_8 is not changed
+ when: docker_api_version is version('1.25', '>=') and docker_py_version is version('2.4.0', '>=')
+- assert:
+ that:
+ - secrets_1 is failed
+ - "'Minimum version required' in secrets_1.msg"
+ when: docker_api_version is version('1.25', '<') or docker_py_version is version('2.4.0', '<')
+
+####################################################################
+## secrets (uid) ###################################################
+####################################################################
+
+- name: secrets (uid int)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ secrets:
+ - secret_id: "{{ secret_result_1.secret_id|default('') }}"
+ secret_name: "{{ secret_name_1 }}"
+ uid: 1000
+ register: secrets_1
+ ignore_errors: yes
+
+- name: secrets (uid int idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ secrets:
+ - secret_id: "{{ secret_result_1.secret_id|default('') }}"
+ secret_name: "{{ secret_name_1 }}"
+ uid: 1000
+ register: secrets_2
+ ignore_errors: yes
+
+- name: secrets (uid int change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ secrets:
+ - secret_id: "{{ secret_result_1.secret_id|default('') }}"
+ secret_name: "{{ secret_name_1 }}"
+ uid: 1002
+ register: secrets_3
+ ignore_errors: yes
+
+- name: secrets (uid str)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ secrets:
+ - secret_id: "{{ secret_result_1.secret_id|default('') }}"
+ secret_name: "{{ secret_name_1 }}"
+ uid: "1001"
+ register: secrets_4
+ ignore_errors: yes
+
+- name: secrets (uid str idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ secrets:
+ - secret_id: "{{ secret_result_1.secret_id|default('') }}"
+ secret_name: "{{ secret_name_1 }}"
+ uid: "1001"
+ register: secrets_5
+ ignore_errors: yes
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - secrets_1 is changed
+ - secrets_2 is not changed
+ - secrets_3 is changed
+ - secrets_4 is changed
+ - secrets_5 is not changed
+ when: docker_api_version is version('1.25', '>=') and docker_py_version is version('2.4.0', '>=')
+- assert:
+ that:
+ - secrets_1 is failed
+ - "'Minimum version required' in secrets_1.msg"
+ when: docker_api_version is version('1.25', '<') or docker_py_version is version('2.4.0', '<')
+
+####################################################################
+## secrets (gid) ###################################################
+####################################################################
+
+- name: secrets (gid int)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ secrets:
+ - secret_id: "{{ secret_result_1.secret_id|default('') }}"
+ secret_name: "{{ secret_name_1 }}"
+ gid: 1001
+ register: secrets_1
+ ignore_errors: yes
+
+- name: secrets (gid int idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ secrets:
+ - secret_id: "{{ secret_result_1.secret_id|default('') }}"
+ secret_name: "{{ secret_name_1 }}"
+ gid: 1001
+ register: secrets_2
+ ignore_errors: yes
+
+- name: secrets (gid int change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ secrets:
+ - secret_id: "{{ secret_result_1.secret_id|default('') }}"
+ secret_name: "{{ secret_name_1 }}"
+ gid: 1002
+ register: secrets_3
+ ignore_errors: yes
+
+- name: secrets (gid str)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ secrets:
+ - secret_id: "{{ secret_result_1.secret_id|default('') }}"
+ secret_name: "{{ secret_name_1 }}"
+ gid: "1003"
+ register: secrets_4
+ ignore_errors: yes
+
+- name: secrets (gid str idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ secrets:
+ - secret_id: "{{ secret_result_1.secret_id|default('') }}"
+ secret_name: "{{ secret_name_1 }}"
+ gid: "1003"
+ register: secrets_5
+ ignore_errors: yes
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - secrets_1 is changed
+ - secrets_2 is not changed
+ - secrets_3 is changed
+ - secrets_4 is changed
+ - secrets_5 is not changed
+ when: docker_api_version is version('1.25', '>=') and docker_py_version is version('2.4.0', '>=')
+- assert:
+ that:
+ - secrets_1 is failed
+ - "'Minimum version required' in secrets_1.msg"
+ when: docker_api_version is version('1.25', '<') or docker_py_version is version('2.4.0', '<')
+
+####################################################################
+## secrets (mode) ##################################################
+####################################################################
+
+- name: secrets (mode)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ secrets:
+ - secret_id: "{{ secret_result_1.secret_id|default('') }}"
+ secret_name: "{{ secret_name_1 }}"
+ mode: 0600
+ register: secrets_1
+ ignore_errors: yes
+
+- name: secrets (mode idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ secrets:
+ - secret_id: "{{ secret_result_1.secret_id|default('') }}"
+ secret_name: "{{ secret_name_1 }}"
+ mode: 0600
+ register: secrets_2
+ ignore_errors: yes
+
+- name: secrets (mode change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ secrets:
+ - secret_id: "{{ secret_result_1.secret_id|default('') }}"
+ secret_name: "{{ secret_name_1 }}"
+ mode: 0777
+ register: secrets_3
+ ignore_errors: yes
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - secrets_1 is changed
+ - secrets_2 is not changed
+ - secrets_3 is changed
+ when: docker_api_version is version('1.25', '>=') and docker_py_version is version('2.4.0', '>=')
+- assert:
+ that:
+ - secrets_1 is failed
+ - "'Minimum version required' in secrets_1.msg"
+ when: docker_api_version is version('1.25', '<') or docker_py_version is version('2.4.0', '<')
+
+####################################################################
+####################################################################
+####################################################################
+
+- name: Delete secrets
+ docker_secret:
+ name: "{{ secret_name }}"
+ state: absent
+ force: yes
+ loop:
+ - "{{ secret_name_1 }}"
+ - "{{ secret_name_2 }}"
+ loop_control:
+ loop_var: secret_name
+ ignore_errors: yes
+ when: docker_api_version is version('1.25', '>=') and docker_py_version is version('2.1.0', '>=')
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/update_config.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/update_config.yml
new file mode 100644
index 00000000..4bc8a2cb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/tasks/tests/update_config.yml
@@ -0,0 +1,408 @@
+---
+
+- name: Registering service name
+ set_fact:
+ service_name: "{{ name_prefix ~ '-update_config' }}"
+
+- name: Registering service name
+ set_fact:
+ service_names: "{{ service_names + [service_name] }}"
+
+###################################################################
+## update_config.delay ############################################
+###################################################################
+
+- name: update_config.delay
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_config:
+ delay: 5s
+ register: update_delay_1
+
+- name: update_config.delay (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_config:
+ delay: 5s
+ register: update_delay_2
+
+- name: update_delay (idempotency, old name)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_delay: 5000000000
+ register: update_delay_2b
+
+- name: update_config.delay (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_config:
+ delay: 12s
+ register: update_delay_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - update_delay_1 is changed
+ - update_delay_2 is not changed
+ - update_delay_2b is not changed
+ - update_delay_3 is changed
+
+###################################################################
+## update_config.failure_action ###################################
+###################################################################
+
+- name: update_config.failure_action
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_config:
+ failure_action: "pause"
+ register: update_failure_action_1
+
+- name: update_config.failure_action (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_config:
+ failure_action: "pause"
+ register: update_failure_action_2
+
+- name: update_failure_action (idempotency, old name)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_failure_action: "pause"
+ register: update_failure_action_2b
+
+- name: update_config.failure_action (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_config:
+ failure_action: "continue"
+ register: update_failure_action_3
+
+- name: update_config.failure_action (rollback)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_config:
+ failure_action: "rollback"
+ register: update_failure_action_4
+ ignore_errors: yes
+
+- name: update_config.failure_action (rollback idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_failure_action: "rollback"
+ register: update_failure_action_5
+ ignore_errors: yes
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - update_failure_action_1 is changed
+ - update_failure_action_2 is not changed
+ - update_failure_action_2b is not changed
+ - update_failure_action_3 is changed
+
+- assert:
+ that:
+ - update_failure_action_4 is changed
+ - update_failure_action_5 is not changed
+ when: docker_api_version is version('1.28', '>=') and docker_py_version is version('3.5.0', '>=')
+
+- assert:
+ that:
+ - update_failure_action_4 is failed
+ - "'Minimum version required' in update_failure_action_4.msg"
+ when: docker_api_version is version('1.28', '<') or docker_py_version is version('3.5.0', '<')
+
+###################################################################
+## update_config.max_failure_ratio ################################
+###################################################################
+
+- name: update_config.max_failure_ratio
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_config:
+ max_failure_ratio: 0.25
+ register: update_max_failure_ratio_1
+ ignore_errors: yes
+
+- name: update_config.max_failure_ratio (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_config:
+ max_failure_ratio: 0.25
+ register: update_max_failure_ratio_2
+ ignore_errors: yes
+
+- name: update_max_failure_ratio (idempotency, old name)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_max_failure_ratio: 0.25
+ register: update_max_failure_ratio_2b
+ ignore_errors: yes
+
+- name: update_config.max_failure_ratio (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_config:
+ max_failure_ratio: 0.50
+ register: update_max_failure_ratio_3
+ ignore_errors: yes
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - update_max_failure_ratio_1 is changed
+ - update_max_failure_ratio_2 is not changed
+ - update_max_failure_ratio_2b is not changed
+ - update_max_failure_ratio_3 is changed
+ when: docker_api_version is version('1.25', '>=') and docker_py_version is version('2.1.0', '>=')
+- assert:
+ that:
+ - update_max_failure_ratio_1 is failed
+ - "'Minimum version required' in update_max_failure_ratio_1.msg"
+ when: docker_api_version is version('1.25', '<') or docker_py_version is version('2.1.0', '<')
+
+###################################################################
+# update_config.monitor ###########################################
+###################################################################
+
+- name: update_config.monitor
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_config:
+ monitor: 10s
+ register: update_monitor_1
+ ignore_errors: yes
+
+- name: update_config.monitor (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_config:
+ monitor: 10s
+ register: update_monitor_2
+ ignore_errors: yes
+
+- name: update_monitor (idempotency, old name)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_monitor: 10s
+ register: update_monitor_2b
+ ignore_errors: yes
+
+- name: update_config.monitor (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_config:
+ monitor: 60s
+ register: update_monitor_3
+ ignore_errors: yes
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - update_monitor_1 is changed
+ - update_monitor_2 is not changed
+ - update_monitor_2b is not changed
+ - update_monitor_3 is changed
+ when: docker_api_version is version('1.25', '>=') and docker_py_version is version('2.1.0', '>=')
+- assert:
+ that:
+ - update_monitor_1 is failed
+ - "'Minimum version required' in update_monitor_1.msg"
+ when: docker_api_version is version('1.25', '<') or docker_py_version is version('2.1.0', '<')
+
+###################################################################
+# update_config.order #############################################
+###################################################################
+
+- name: update_config.order
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_config:
+ order: "start-first"
+ register: update_order_1
+ ignore_errors: yes
+
+- name: update_config.order (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_config:
+ order: "start-first"
+ register: update_order_2
+ ignore_errors: yes
+
+- name: update_order (idempotency, old name)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_order: "start-first"
+ register: update_order_2b
+ ignore_errors: yes
+
+- name: update_config.order (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_config:
+ order: "stop-first"
+ register: update_order_3
+ ignore_errors: yes
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - update_order_1 is changed
+ - update_order_2 is not changed
+ - update_order_2b is not changed
+ - update_order_3 is changed
+ when: docker_api_version is version('1.29', '>=') and docker_py_version is version('2.7.0', '>=')
+- assert:
+ that:
+ - update_order_1 is failed
+ - "'Minimum version required' in update_order_1.msg"
+ when: docker_api_version is version('1.29', '<') or docker_py_version is version('2.7.0', '<')
+
+###################################################################
+## update_config.parallelism ######################################
+###################################################################
+
+- name: update_config.parallelism
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_config:
+ parallelism: 2
+ register: update_parallelism_1
+
+- name: update_config.parallelism (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_config:
+ parallelism: 2
+ register: update_parallelism_2
+
+- name: update_parallelism (idempotency, old name)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_parallelism: 2
+ register: update_parallelism_2b
+
+- name: update_config.parallelism (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: no
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_config:
+ parallelism: 1
+ register: update_parallelism_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: no
+
+- assert:
+ that:
+ - update_parallelism_1 is changed
+ - update_parallelism_2 is not changed
+ - update_parallelism_2b is not changed
+ - update_parallelism_3 is changed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/vars/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/vars/main.yml
new file mode 100644
index 00000000..8ec7ffeb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service/vars/main.yml
@@ -0,0 +1,54 @@
+---
+
+service_expected_output:
+ args: [sleep, '1800']
+ configs: null
+ constraints: null
+ container_labels: null
+ command: null
+ dns: null
+ dns_options: null
+ dns_search: null
+ endpoint_mode: vip
+ env: null
+ force_update: null
+ groups: null
+ healthcheck: null
+ healthcheck_disabled: null
+ hostname: null
+ hosts: null
+ image: "{{ docker_test_image_busybox }}"
+ labels: null
+ limit_cpu: null
+ limit_memory: null
+ log_driver: null
+ log_driver_options: null
+ mode: global
+ mounts: null
+ networks: null
+ secrets: null
+ stop_grace_period: null
+ stop_signal: null
+ placement_preferences: null
+ publish:
+ - {mode: null, protocol: tcp, published_port: 60001, target_port: 60001}
+ - {mode: null, protocol: udp, published_port: 60001, target_port: 60001}
+ read_only: null
+ replicas: null
+ reserve_cpu: null
+ reserve_memory: null
+ restart_policy: null
+ restart_policy_attempts: null
+ restart_policy_delay: null
+ restart_policy_window: null
+ rollback_config: null
+ tty: null
+ update_delay: null
+ update_failure_action: null
+ update_max_failure_ratio: null
+ update_monitor: null
+ update_order: null
+ update_parallelism: null
+ user: null
+ working_dir: null
+ init: null
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service_info/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service_info/aliases
new file mode 100644
index 00000000..5c691cdf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service_info/aliases
@@ -0,0 +1,9 @@
+shippable/posix/group3
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
+destructive
+skip/docker # The tests sometimes make docker daemon unstable; hence,
+ # we skip all docker-based CI runs to avoid disrupting
+ # the whole CI system.
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service_info/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service_info/meta/main.yml
new file mode 100644
index 00000000..07da8c6d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service_info/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - setup_docker
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service_info/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service_info/tasks/main.yml
new file mode 100644
index 00000000..8350e901
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service_info/tasks/main.yml
@@ -0,0 +1,11 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- include_tasks: test_docker_swarm_service_info.yml
+ when: docker_py_version is version('2.0.0', '>=') and docker_api_version is version('1.24', '>=')
+
+- fail: msg="Too old docker / docker-py version to run docker_swarm_service_info tests!"
+ when: not(docker_py_version is version('2.0.0', '>=') and docker_api_version is version('1.24', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service_info/tasks/test_docker_swarm_service_info.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service_info/tasks/test_docker_swarm_service_info.yml
new file mode 100644
index 00000000..2b5b882c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_swarm_service_info/tasks/test_docker_swarm_service_info.yml
@@ -0,0 +1,83 @@
+---
+
+- name: Generate service base name
+ set_fact:
+ service_base_name: "{{ 'ansible-test-%0x' % ((2**32) | random) }}"
+
+- name: Registering service names
+ set_fact:
+ service_name: "{{ service_base_name ~ '-1' }}"
+
+- block:
+ - name: Make sure we're not already using Docker swarm
+ docker_swarm:
+ state: absent
+ force: true
+
+ - name: Try to get docker_swarm_service_info when docker is not running in swarm mode
+ docker_swarm_service_info:
+ name: "{{ service_name }}"
+ ignore_errors: yes
+ register: output
+
+ - name: assert failure when called when swarm is not in use or not run on manager node
+ assert:
+ that:
+ - 'output is failed'
+ - 'output.msg == "Error running docker swarm module: must run on swarm manager node"'
+
+ - name: Create a Swarm cluster
+ docker_swarm:
+ state: present
+ advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}"
+ register: output
+
+ - name: Create services
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+
+ - name: Try to get docker_swarm_service_info for a single service
+ docker_swarm_service_info:
+ name: "{{ service_name }}"
+ register: output
+
+ - name: assert reading reading service info
+ assert:
+ that:
+ - 'output.exists == true'
+ - 'output.service.ID is string'
+ - 'output.service.Spec.Name == service_name'
+
+ - name: Create random name
+ set_fact:
+ random_service_name: "{{ 'random-service-%0x' % ((2**32) | random) }}"
+
+ - name: Try to get docker_swarm_service_info using random service name as parameter
+ docker_swarm_service_info:
+ name: "{{ random_service_name }}"
+ register: output
+
+ - name: assert reading reading service info
+ assert:
+ that:
+ - 'output.service is none'
+ - 'output.exists == false'
+
+ always:
+ - name: Remove services
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ ignore_errors: yes
+
+ - name: Remove swarm
+ docker_swarm:
+ state: absent
+ force: true
+
+ # Maximum of 1.24 (docker API version for docker_swarm_service_info) and 1.25 (docker API version for docker_swarm) is 1.25
+ when: docker_py_version is version('2.0.2', '>=') and docker_api_version is version('1.25', '>=')
+
+- fail: msg="Too old docker / docker-py version to run docker_swarm_service_info tests!"
+ when: not(docker_py_version is version('2.0.2', '>=') and docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_volume/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_volume/aliases
new file mode 100644
index 00000000..f1162af5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_volume/aliases
@@ -0,0 +1,6 @@
+shippable/posix/group2
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
+destructive
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_volume/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_volume/meta/main.yml
new file mode 100644
index 00000000..07da8c6d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_volume/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - setup_docker
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_volume/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_volume/tasks/main.yml
new file mode 100644
index 00000000..04baaadb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_volume/tasks/main.yml
@@ -0,0 +1,30 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Create random name prefix
+ set_fact:
+ name_prefix: "{{ 'ansible-test-%0x' % ((2**32) | random) }}"
+ vnames: []
+
+- debug:
+ msg: "Using name prefix {{ name_prefix }}"
+
+- block:
+ - include_tasks: run-test.yml
+ with_fileglob:
+ - "tests/*.yml"
+
+ always:
+ - name: "Make sure all volumes are removed"
+ docker_volume:
+ name: "{{ item }}"
+ state: absent
+ with_items: "{{ vnames }}"
+
+ when: docker_py_version is version('1.10.0', '>=') and docker_api_version is version('1.20', '>=') # FIXME: find out API version!
+
+- fail: msg="Too old docker / docker-py version to run docker_volume tests!"
+ when: not(docker_py_version is version('1.10.0', '>=') and docker_api_version is version('1.20', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_volume/tasks/run-test.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_volume/tasks/run-test.yml
new file mode 100644
index 00000000..a2999370
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_volume/tasks/run-test.yml
@@ -0,0 +1,3 @@
+---
+- name: "Loading tasks from {{ item }}"
+ include_tasks: "{{ item }}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_volume/tasks/tests/basic.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_volume/tasks/tests/basic.yml
new file mode 100644
index 00000000..7ee56261
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_volume/tasks/tests/basic.yml
@@ -0,0 +1,177 @@
+---
+- name: Registering volume name
+ set_fact:
+ vname: "{{ name_prefix ~ '-basic' }}"
+- name: Registering container name
+ set_fact:
+ vnames: "{{ vnames + [vname] }}"
+
+####################################################################
+## basic ###########################################################
+####################################################################
+
+- name: Create a volume
+ docker_volume:
+ name: "{{ vname }}"
+ register: create_1
+
+- name: Create a volume (idempotency)
+ docker_volume:
+ name: "{{ vname }}"
+ register: create_2
+
+- name: "Create a volume (recreate: options-changed)"
+ docker_volume:
+ name: "{{ vname }}"
+ recreate: options-changed
+ register: create_3
+
+- name: "Create a volume (recreate: always)"
+ docker_volume:
+ name: "{{ vname }}"
+ recreate: always
+ register: create_4
+
+- name: Remove a volume
+ docker_volume:
+ name: "{{ vname }}"
+ state: absent
+ register: absent_1
+
+- name: Remove a volume (idempotency)
+ docker_volume:
+ name: "{{ vname }}"
+ state: absent
+ register: absent_2
+
+- assert:
+ that:
+ - create_1 is changed
+ - create_2 is not changed
+ - create_3 is not changed
+ - create_4 is changed
+ - absent_1 is changed
+ - absent_2 is not changed
+
+####################################################################
+## driver_options ##################################################
+####################################################################
+
+- name: Create a volume with options
+ docker_volume:
+ name: "{{ vname }}"
+ driver: local
+ driver_options:
+ type: tempfs
+ device: tmpfs
+ o: size=100m,uid=1000
+ register: driver_options_1
+
+- name: Create a volume with options (idempotency)
+ docker_volume:
+ name: "{{ vname }}"
+ driver: local
+ driver_options:
+ type: tempfs
+ device: tmpfs
+ o: size=100m,uid=1000
+ register: driver_options_2
+
+- name: Create a volume with options (changed)
+ docker_volume:
+ name: "{{ vname }}"
+ driver: local
+ driver_options:
+ type: tempfs
+ device: tmpfs
+ o: size=200m,uid=1000
+ register: driver_options_3
+
+- name: "Create a volume with options (changed, recreate: options-changed)"
+ docker_volume:
+ name: "{{ vname }}"
+ driver: local
+ driver_options:
+ type: tempfs
+ device: tmpfs
+ o: size=200m,uid=1000
+ recreate: options-changed
+ register: driver_options_4
+
+- name: Cleanup
+ docker_volume:
+ name: "{{ vname }}"
+ state: absent
+
+- assert:
+ that:
+ - driver_options_1 is changed
+ - driver_options_2 is not changed
+ - driver_options_3 is not changed
+ - driver_options_4 is changed
+
+####################################################################
+## labels ##########################################################
+####################################################################
+
+- name: Create a volume with labels
+ docker_volume:
+ name: "{{ vname }}"
+ labels:
+ ansible.test.1: hello
+ ansible.test.2: world
+ register: driver_labels_1
+
+- name: Create a volume with labels (idempotency)
+ docker_volume:
+ name: "{{ vname }}"
+ labels:
+ ansible.test.2: world
+ ansible.test.1: hello
+ register: driver_labels_2
+
+- name: Create a volume with labels (less)
+ docker_volume:
+ name: "{{ vname }}"
+ labels:
+ ansible.test.1: hello
+ register: driver_labels_3
+
+- name: "Create a volume with labels (less, recreate: options-changed)"
+ docker_volume:
+ name: "{{ vname }}"
+ labels:
+ ansible.test.1: hello
+ recreate: options-changed
+ register: driver_labels_4
+
+- name: Create a volume with labels (more)
+ docker_volume:
+ name: "{{ vname }}"
+ labels:
+ ansible.test.1: hello
+ ansible.test.3: ansible
+ register: driver_labels_5
+
+- name: "Create a volume with labels (more, recreate: options-changed)"
+ docker_volume:
+ name: "{{ vname }}"
+ labels:
+ ansible.test.1: hello
+ ansible.test.3: ansible
+ recreate: options-changed
+ register: driver_labels_6
+
+- name: Cleanup
+ docker_volume:
+ name: "{{ vname }}"
+ state: absent
+
+- assert:
+ that:
+ - driver_labels_1 is changed
+ - driver_labels_2 is not changed
+ - driver_labels_3 is not changed
+ - driver_labels_4 is not changed
+ - driver_labels_5 is not changed
+ - driver_labels_6 is changed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_volume_info/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_volume_info/aliases
new file mode 100644
index 00000000..e91c4ea4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_volume_info/aliases
@@ -0,0 +1,6 @@
+shippable/posix/group1
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
+destructive
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_volume_info/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_volume_info/meta/main.yml
new file mode 100644
index 00000000..07da8c6d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_volume_info/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - setup_docker
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_volume_info/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_volume_info/tasks/main.yml
new file mode 100644
index 00000000..30f71e32
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/docker_volume_info/tasks/main.yml
@@ -0,0 +1,74 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- block:
+ - name: Create random volume name
+ set_fact:
+ cname: "{{ 'ansible-test-%0x' % ((2**32) | random) }}"
+
+ - name: Make sure volume is not there
+ docker_volume:
+ name: "{{ cname }}"
+ state: absent
+
+ - name: Inspect a non-present volume
+ docker_volume_info:
+ name: "{{ cname }}"
+ register: result
+
+ - assert:
+ that:
+ - "not result.exists"
+ - "'volume' in result"
+ - "result.volume is none"
+
+ - name: Make sure volume exists
+ docker_volume:
+ name: "{{ cname }}"
+
+ - name: Inspect a present volume
+ docker_volume_info:
+ name: "{{ cname }}"
+ register: result
+ - name: Dump docker_volume_info result
+ debug: var=result
+
+ - name: "Comparison: use 'docker volume inspect'"
+ command: docker volume inspect "{{ cname }}"
+ register: docker_volume_inspect
+ ignore_errors: yes
+ - block:
+ - set_fact:
+ docker_volume_inspect_result: "{{ docker_volume_inspect.stdout | from_json }}"
+ - name: Dump docker volume inspect result
+ debug: var=docker_volume_inspect_result
+ when: docker_volume_inspect is not failed
+
+ - name: Cleanup
+ docker_volume:
+ name: "{{ cname }}"
+ state: absent
+
+ - assert:
+ that:
+ - result.exists
+ - "'volume' in result"
+ - "result.volume"
+
+ - assert:
+ that:
+ - "result.volume == docker_volume_inspect_result[0]"
+ when: docker_volume_inspect is not failed
+ - assert:
+ that:
+ - "'is too new. Maximum supported API version is' in docker_volume_inspect.stderr"
+ when: docker_volume_inspect is failed
+
+ # Requirements for docker_volume
+ when: docker_py_version is version('1.10.0', '>=') and docker_api_version is version('1.24', '>=')
+
+- fail: msg="Too old docker / docker-py version to run docker_volume_info tests!"
+ when: not(docker_py_version is version('1.10.0', '>=') and docker_api_version is version('1.24', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/dpkg_divert/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/dpkg_divert/aliases
new file mode 100644
index 00000000..b47e4a60
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/dpkg_divert/aliases
@@ -0,0 +1,6 @@
+shippable/posix/group4
+skip/aix
+skip/osx
+skip/macos
+skip/rhel
+skip/freebsd
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/dpkg_divert/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/dpkg_divert/tasks/main.yml
new file mode 100644
index 00000000..6e71f1bd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/dpkg_divert/tasks/main.yml
@@ -0,0 +1,9 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: "include tasks for Debian family"
+ include_tasks: prepare.yml
+ when: ansible_pkg_mgr == "apt"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/dpkg_divert/tasks/prepare.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/dpkg_divert/tasks/prepare.yml
new file mode 100644
index 00000000..f30d14a1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/dpkg_divert/tasks/prepare.yml
@@ -0,0 +1,39 @@
+---
+- name: "set variables for the entire playbook"
+ set_fact:
+ foobarrc: "{{ foobarrc }}"
+ foobarrc_ansible: "{{ foobarrc }}.ansible"
+ foobarrc_distrib: "{{ foobarrc }}.distrib"
+ foobarrc_oldtext: "# foobar configuration file\n# Please refer to the documentation for details\n"
+ foobarrc_oldsha1: "e1c54c36d2fd1b8d67d1826e49b95ac8c0f24c0a"
+ foobarrc_newtext: "# Custom foobar configuration file\nFOO=bar\nBAR=foo"
+ foobarrc_newsha1: "3fe6c890519fb48e27c1b0e3e37afb11357d5cac"
+ vars:
+ foobarrc: "/etc/foobarrc"
+
+- name: "remove foobarrc diversion"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ state: absent
+ become: yes
+
+- name: "remove test files"
+ file:
+ path: "{{ dpkg_divert_item }}"
+ state: absent
+ loop:
+ - "{{ foobarrc_ansible }}"
+ - "{{ foobarrc_distrib }}"
+ loop_control:
+ loop_var: dpkg_divert_item
+ become: yes
+
+
+- block:
+ - name: "include tasks to perform basic tests (create, remove, update)"
+ include_tasks: tests/01-basic.yml
+
+ - name: "include tasks to perform other tests (rename)"
+ include_tasks: tests/02-rename.yml
+ become: yes
+ diff: yes
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/dpkg_divert/tasks/tests/01-basic.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/dpkg_divert/tasks/tests/01-basic.yml
new file mode 100644
index 00000000..c23db91d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/dpkg_divert/tasks/tests/01-basic.yml
@@ -0,0 +1,287 @@
+---
+################################################################################
+# TEST 01: state=present
+
+- name: "create foobarrc for tests"
+ copy:
+ dest: "{{ foobarrc }}"
+ content: "{{ foobarrc_oldtext }}"
+
+
+- name: "divert foobarrc (check mode, must report a change)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ state: present
+ register: diversion_0
+ check_mode: true
+
+- name: "divert foobarrc (must report a change)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ state: present
+ register: diversion_1
+
+
+- name: "divert foobarrc (must NOT report a change, idempotency)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ state: present
+ register: diversion_2
+
+- name: "divert foobarrc (check mode, must NOT report a change, idempotency)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ state: present
+ register: diversion_3
+ check_mode: true
+
+
+# Ensure that 'rename' has no effect when state is not changed
+
+- name: "divert foobarrc (rename, must NOT report a change, idempotency)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ state: present
+ rename: yes
+ register: diversion_4
+
+- name: "divert foobarrc (check mode, rename, must NOT report a change, idempotency)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ state: present
+ rename: yes
+ register: diversion_5
+ check_mode: true
+
+
+# Check results
+
+- name: "stat foobarrc (must still be there)"
+ stat:
+ path: "{{ foobarrc }}"
+ register: diversion_6
+
+- name: "stat foobarrc.distrib (must not exist)"
+ stat:
+ path: "{{ foobarrc_distrib }}"
+ register: diversion_7
+
+- name: "assert that results of test 01 are as expected"
+ assert:
+ that:
+ - diversion_0 is changed
+ - diversion_1 is changed
+ - diversion_2 is not changed
+ - diversion_3 is not changed
+ - diversion_4 is not changed
+ - diversion_5 is not changed
+ - diversion_6.stat.exists
+ - diversion_6.stat.checksum == foobarrc_oldsha1
+ - not diversion_7.stat.exists
+ - diversion_0.diversion == diversion_1.diversion
+ - diversion_2.diversion == diversion_3.diversion
+ - diversion_4.diversion == diversion_5.diversion
+ - diversion_0.commands == diversion_1.commands
+ - diversion_2.commands == diversion_3.commands
+ - diversion_4.commands == diversion_5.commands
+ quiet: yes
+
+
+################################################################################
+# TEST 02: state=absent
+
+- name: "remove diversion for foobarrc (check mode, must report a change)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ state: absent
+ register: diversion_0
+ check_mode: true
+
+- name: "remove diversion for foobarrc (must report a change)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ state: absent
+ register: diversion_1
+
+
+- name: "remove diversion for foobarrc (must NOT report a change, idempotency)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ state: absent
+ register: diversion_2
+
+- name: "remove diversion for foobarrc (check mode, must NOT report a change, idempotency)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ state: absent
+ register: diversion_3
+ check_mode: true
+
+
+# Check results
+
+- name: "stat foobarrc (must still be there)"
+ stat:
+ path: "{{ foobarrc }}"
+ register: diversion_4
+
+- name: "stat foobarrc.distrib (must not exist)"
+ stat:
+ path: "{{ foobarrc_distrib }}"
+ register: diversion_5
+
+- name: "assert that results of test 02 are as expected"
+ assert:
+ that:
+ - diversion_0 is changed
+ - diversion_1 is changed
+ - diversion_2 is not changed
+ - diversion_3 is not changed
+ - diversion_4.stat.exists
+ - diversion_4.stat.checksum == foobarrc_oldsha1
+ - not diversion_5.stat.exists
+ - diversion_0.diversion == diversion_1.diversion
+ - diversion_2.diversion == diversion_3.diversion
+ - diversion_0.commands == diversion_1.commands
+ - diversion_2.commands == diversion_3.commands
+ quiet: yes
+
+
+################################################################################
+# TEST 03: holder=ansible
+
+- name: "create foobarrc diversion with defaults"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+
+
+- name: "update foobarrc diversion holder (check mode, must report a change)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ holder: "ansible"
+ register: diversion_0
+ check_mode: yes
+
+- name: "update foobarrc diversion holder (must report a change)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ holder: "ansible"
+ register: diversion_1
+
+
+- name: "update foobarrc diversion holder (must NOT report a change, idempotency)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ holder: "ansible"
+ register: diversion_2
+
+- name: "update foobarrc diversion holder (check mode, must NOT report a change, idempotency)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ holder: "ansible"
+ register: diversion_3
+ check_mode: yes
+
+
+# Check results
+
+- name: "stat foobarrc (must still be there)"
+ stat:
+ path: "{{ foobarrc }}"
+ register: diversion_4
+
+- name: "stat foobarrc.distrib (must not exist)"
+ stat:
+ path: "{{ foobarrc_distrib }}"
+ register: diversion_5
+
+- name: "assert that results of test 03 are as expected"
+ assert:
+ that:
+ - diversion_0 is changed
+ - diversion_1 is changed
+ - diversion_2 is not changed
+ - diversion_3 is not changed
+ - diversion_4.stat.exists
+ - diversion_4.stat.checksum == foobarrc_oldsha1
+ - not diversion_5.stat.exists
+ - diversion_0.diversion == diversion_1.diversion
+ - diversion_2.diversion == diversion_3.diversion
+ - diversion_0.commands == diversion_1.commands
+ - diversion_2.commands == diversion_3.commands
+ quiet: yes
+
+- name: "remove foobarrc diversion"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ state: absent
+
+
+################################################################################
+# TEST 04: divert=/etc/foobarrc.ansible
+
+- name: "create foobarrc diversion with defaults"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+
+
+- name: "update foobarrc divert path (check mode, must report a change)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ divert: "{{ foobarrc_ansible }}"
+ register: diversion_0
+ check_mode: yes
+
+- name: "update foobarrc divert path (must report a change)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ divert: "{{ foobarrc_ansible }}"
+ register: diversion_1
+
+
+- name: "update foobarrc divert path (must NOT report a change, idempotency)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ divert: "{{ foobarrc_ansible }}"
+ register: diversion_2
+
+- name: "update foobarrc divert path (check mode, must NOT report a change, idempotency)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ divert: "{{ foobarrc_ansible }}"
+ register: diversion_3
+ check_mode: yes
+
+
+# Check results
+
+- name: "stat foobarrc (must still be there)"
+ stat:
+ path: "{{ foobarrc }}"
+ register: diversion_4
+
+- name: "stat foobarrc.ansible (must not exist)"
+ stat:
+ path: "{{ foobarrc_ansible }}"
+ register: diversion_5
+
+- name: "assert that results of test 04 are as expected"
+ assert:
+ that:
+ - diversion_0 is changed
+ - diversion_1 is changed
+ - diversion_2 is not changed
+ - diversion_3 is not changed
+ - diversion_4.stat.exists
+ - diversion_4.stat.checksum == foobarrc_oldsha1
+ - not diversion_5.stat.exists
+ - diversion_0.diversion == diversion_1.diversion
+ - diversion_2.diversion == diversion_3.diversion
+ - diversion_0.commands == diversion_1.commands
+ - diversion_2.commands == diversion_3.commands
+ quiet: yes
+
+- name: "remove foobarrc diversion"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/dpkg_divert/tasks/tests/02-rename.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/dpkg_divert/tasks/tests/02-rename.yml
new file mode 100644
index 00000000..69a46a8a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/dpkg_divert/tasks/tests/02-rename.yml
@@ -0,0 +1,380 @@
+---
+################################################################################
+# TEST 05: rename=yes, state=present
+
+- name: "create diversion for foobarrc (check mode, rename, must report a change)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ rename: yes
+ register: diversion_0
+ check_mode: yes
+
+- name: "create diversion for foobarrc (rename, must report a change)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ rename: yes
+ register: diversion_1
+
+
+- name: "create diversion for foobarrc (rename, must NOT report a change, idempotency)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ rename: yes
+ register: diversion_2
+
+- name: "create diversion for foobarrc (check mode, rename, must NOT report a change, idempotency)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ rename: yes
+ register: diversion_3
+ check_mode: yes
+
+
+# Get results
+
+- name: "stat foobarrc (must not exist)"
+ stat:
+ path: "{{ foobarrc }}"
+ register: diversion_4
+
+- name: "stat foobarrc.distrib (must exist)"
+ stat:
+ path: "{{ foobarrc_distrib }}"
+ register: diversion_5
+
+- name: "assert that results of test 05 are as expected"
+ assert:
+ that:
+ - diversion_0 is changed
+ - diversion_1 is changed
+ - diversion_2 is not changed
+ - diversion_3 is not changed
+ - not diversion_4.stat.exists
+ - diversion_5.stat.exists
+ - diversion_5.stat.checksum == foobarrc_oldsha1
+ - diversion_0.diversion == diversion_1.diversion
+ - diversion_2.diversion == diversion_3.diversion
+ - diversion_0.commands == diversion_1.commands
+ - diversion_2.commands == diversion_3.commands
+ quiet: yes
+
+
+################################################################################
+# TEST 06: rename=yes, state=absent
+
+- name: "remove diversion for foobarrc (check mode, rename, must report a change)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ rename: yes
+ state: absent
+ register: diversion_0
+ check_mode: yes
+
+- name: "remove diversion for foobarrc (rename, must report a change)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ rename: yes
+ state: absent
+ register: diversion_1
+
+
+- name: "remove diversion for foobarrc (rename, must NOT report a change, idempotency)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ rename: yes
+ state: absent
+ register: diversion_2
+
+- name: "remove diversion for foobarrc (check mode, rename, must NOT report a change, idempotency)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ rename: yes
+ state: absent
+ register: diversion_3
+ check_mode: yes
+
+
+# Check results
+
+- name: "stat foobarrc (must exist)"
+ stat:
+ path: "{{ foobarrc }}"
+ register: diversion_4
+
+- name: "stat foobarrc.distrib (must not exist)"
+ stat:
+ path: "{{ foobarrc_distrib }}"
+ register: diversion_5
+
+- name: "assert that results of test 06 are as expected"
+ assert:
+ that:
+ - diversion_0 is changed
+ - diversion_1 is changed
+ - diversion_2 is not changed
+ - diversion_3 is not changed
+ - diversion_4.stat.exists
+ - diversion_4.stat.checksum == foobarrc_oldsha1
+ - not diversion_5.stat.exists
+ - diversion_0.diversion == diversion_1.diversion
+ - diversion_2.diversion == diversion_3.diversion
+ - diversion_0.commands == diversion_1.commands
+ - diversion_2.commands == diversion_3.commands
+ quiet: yes
+
+
+################################################################################
+# TEST 07: rename=yes, force=yes, state=present
+
+- name: "create foobarrc.distrib for tests"
+ copy:
+ dest: "{{ foobarrc_distrib }}"
+ content: "{{ foobarrc_oldtext }}"
+
+
+- name: "create diversion for foobarrc (check mode, rename, must fail)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ rename: yes
+ register: diversion_0
+ ignore_errors: yes
+ check_mode: yes
+
+- name: "create diversion for foobarrc (rename, must fail)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ rename: yes
+ register: diversion_1
+ ignore_errors: yes
+
+
+- name: "create diversion for foobarrc (check mode, force rename, must report a change)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ rename: yes
+ force: yes
+ register: diversion_2
+ check_mode: yes
+
+- name: "create diversion for foobarrc (force rename, must report a change)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ rename: yes
+ force: yes
+ register: diversion_3
+
+
+- name: "create diversion for foobarrc (force rename, must NOT report a change, idempotency)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ rename: yes
+ force: yes
+ register: diversion_4
+
+- name: "create diversion for foobarrc (check mode, force rename, must NOT report a change, idempotency)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ rename: yes
+ force: yes
+ register: diversion_5
+ check_mode: yes
+
+
+# Check results
+
+- name: "stat foobarrc (must not exist)"
+ stat:
+ path: "{{ foobarrc }}"
+ register: diversion_6
+
+- name: "stat foobarrc.distrib (must exist)"
+ stat:
+ path: "{{ foobarrc_distrib }}"
+ register: diversion_7
+
+- name: "assert that results of test 07 are as expected"
+ assert:
+ that:
+ - diversion_0 is failed
+ - diversion_1 is failed
+ - diversion_2 is changed
+ - diversion_3 is changed
+ - diversion_4 is not changed
+ - diversion_5 is not changed
+ - not diversion_6.stat.exists
+ - diversion_7.stat.exists
+ - diversion_7.stat.checksum == foobarrc_oldsha1
+ - diversion_0 == diversion_1
+ - diversion_2.diversion == diversion_3.diversion
+ - diversion_4.diversion == diversion_5.diversion
+ - diversion_2.commands == diversion_3.commands
+ - diversion_4.commands == diversion_5.commands
+ quiet: yes
+
+
+################################################################################
+# TEST 08: state=present, update an existing divert path
+
+- name: "create foobarrc with new contents for tests"
+ copy:
+ dest: "{{ foobarrc }}"
+ content: "{{ foobarrc_newtext }}"
+
+
+- name: "create diversion for foobarrc (check mode, update divert path, must report a change)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ divert: "{{ foobarrc_ansible }}"
+ register: diversion_0
+ check_mode: yes
+
+- name: "create diversion for foobarrc (update divert path, must report a change)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ divert: "{{ foobarrc_ansible }}"
+ register: diversion_1
+
+
+- name: "create diversion for foobarrc (update divert path, must NOT report a change, idempotency)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ divert: "{{ foobarrc_ansible }}"
+ register: diversion_2
+
+- name: "create diversion for foobarrc (check mode, update divert path, must NOT report a change, idempotency)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ divert: "{{ foobarrc_ansible }}"
+ register: diversion_3
+ check_mode: yes
+
+
+# Check results
+
+- name: "stat foobarrc (must exist)"
+ stat:
+ path: "{{ foobarrc }}"
+ register: diversion_4
+
+- name: "stat foobarrc.ansible (must exist)"
+ stat:
+ path: "{{ foobarrc_ansible }}"
+ register: diversion_5
+
+- name: "stat foobarrc.distrib (must not exist)"
+ stat:
+ path: "{{ foobarrc_distrib }}"
+ register: diversion_6
+
+- name: "assert that results of test 08 are as expected"
+ assert:
+ that:
+ - diversion_0 is changed
+ - diversion_1 is changed
+ - diversion_2 is not changed
+ - diversion_3 is not changed
+ - diversion_4.stat.exists
+ - diversion_4.stat.checksum == foobarrc_newsha1
+ - diversion_5.stat.exists
+ - diversion_5.stat.checksum == foobarrc_oldsha1
+ - not diversion_6.stat.exists
+ - diversion_0.diversion == diversion_1.diversion
+ - diversion_2.diversion == diversion_3.diversion
+ - diversion_0.commands == diversion_1.commands
+ - diversion_2.commands == diversion_3.commands
+ quiet: yes
+
+
+################################################################################
+# TEST 09: rename=yes, force=yes, state=absent
+
+- name: "remove diversion for foobarrc (check mode, rename, must fail)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ state: absent
+ rename: yes
+ register: diversion_0
+ ignore_errors: yes
+ check_mode: yes
+
+- name: "remove diversion for foobarrc (rename, must fail)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ state: absent
+ rename: yes
+ register: diversion_1
+ ignore_errors: yes
+
+
+- name: "remove diversion for foobarrc (check mode, force rename, must report a change)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ state: absent
+ rename: yes
+ force: yes
+ register: diversion_2
+ check_mode: yes
+
+- name: "remove diversion for foobarrc (force rename, must report a change)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ state: absent
+ rename: yes
+ force: yes
+ register: diversion_3
+
+
+- name: "remove diversion for foobarrc (force rename, must NOT report a change, idempotency)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ state: absent
+ rename: yes
+ force: yes
+ register: diversion_4
+
+- name: "remove diversion for foobarrc (check mode, force rename, must NOT report a change, idempotency)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ state: absent
+ rename: yes
+ force: yes
+ register: diversion_5
+ check_mode: yes
+
+
+# Check results
+
+- name: "stat foobarrc (must exist)"
+ stat:
+ path: "{{ foobarrc }}"
+ register: diversion_6
+
+- name: "stat foobarrc.distrib (must not exist)"
+ stat:
+ path: "{{ foobarrc_distrib }}"
+ register: diversion_7
+
+- name: "stat foobarrc.ansible (must not exist)"
+ stat:
+ path: "{{ foobarrc_ansible }}"
+ register: diversion_8
+
+- name: "assert that results of test 09 are as expected"
+ assert:
+ that:
+ - diversion_0 is failed
+ - diversion_1 is failed
+ - diversion_2 is changed
+ - diversion_3 is changed
+ - diversion_4 is not changed
+ - diversion_5 is not changed
+ - diversion_6.stat.exists
+ - diversion_6.stat.checksum == foobarrc_oldsha1
+ - not diversion_7.stat.exists
+ - not diversion_8.stat.exists
+ - diversion_0 == diversion_1
+ - diversion_2.diversion == diversion_3.diversion
+ - diversion_4.diversion == diversion_5.diversion
+ - diversion_2.commands == diversion_3.commands
+ - diversion_4.commands == diversion_5.commands
+ quiet: yes
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/etcd3/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/etcd3/aliases
new file mode 100644
index 00000000..949f4250
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/etcd3/aliases
@@ -0,0 +1,8 @@
+shippable/posix/group1
+destructive
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
+skip/python2.6 # installing etcd3 python module will fail on python < 2.7
+disabled # see https://github.com/ansible-collections/community.general/issues/322
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/etcd3/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/etcd3/meta/main.yml
new file mode 100644
index 00000000..48987c54
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/etcd3/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - setup_pkg_mgr
+ - setup_etcd3
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/etcd3/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/etcd3/tasks/main.yml
new file mode 100644
index 00000000..a5f1c78a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/etcd3/tasks/main.yml
@@ -0,0 +1,31 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# test code for the etcd3 module
+# (c) 2017, Jean-Philippe Evrard <jean-philippe@evrard.me>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# ============================================================
+
+
+- name: run_tests for supported distros
+ include_tasks: run_tests.yml
+ when:
+ - ansible_distribution | lower ~ "-" ~ ansible_distribution_major_version | lower != 'centos-6'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/etcd3/tasks/run_tests.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/etcd3/tasks/run_tests.yml
new file mode 100644
index 00000000..66b53830
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/etcd3/tasks/run_tests.yml
@@ -0,0 +1,94 @@
+---
+# test code for the etcd3 module
+# (c) 2017, Jean-Philippe Evrard <jean-philippe@evrard.me>
+# 2020, SCC France, Eric Belhomme <ebelhomme@fr.scc.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# ============================================================
+
+# Integration tests
+- name: Check mode, show need change
+ etcd3:
+ key: "foo"
+ value: "bar"
+ state: "present"
+ register: _etcd3_prst_chktst
+ check_mode: true
+
+- name: Change to new value
+ etcd3:
+ key: "foo"
+ value: "bar"
+ state: "present"
+ register: _etcd3_prst_chgtst
+
+- name: Idempotency test, show unchanged.
+ etcd3:
+ key: "foo"
+ value: "bar"
+ state: "present"
+ register: _etcd3_prst_idmptnttst
+
+- name: Idempotency test in check mode, show unchanged
+ etcd3:
+ key: "foo"
+ value: "bar"
+ state: "present"
+ register: _etcd3_prst_idmptntchktst
+ check_mode: true
+
+- name: Check mode, show need removal of key
+ etcd3:
+ key: "foo"
+ value: "baz"
+ state: "absent"
+ register: _etcd3_absnt_chktst
+ check_mode: true
+
+- name: Remove foo key
+ etcd3:
+ key: "foo"
+ value: "baz"
+ state: "absent"
+ register: _etcd3_absnt_chgtst
+
+- name: Idempotency test in check mode, show unchanged
+ etcd3:
+ key: "foo"
+ value: "baz"
+ state: "absent"
+ register: _etcd3_absnt_idmptnttst
+ check_mode: true
+
+- name: Idempotency test, show unchanged
+ etcd3:
+ key: "foo"
+ value: "baz"
+ state: "absent"
+ register: _etcd3_absnt_idmptntchktst
+
+- name: Checking the status are expected
+ assert:
+ that:
+ - _etcd3_prst_chktst is changed
+ - _etcd3_prst_chgtst is changed
+ - _etcd3_prst_idmptnttst is not changed
+ - _etcd3_prst_idmptntchktst is not changed
+ - _etcd3_absnt_chktst is changed
+ - _etcd3_absnt_chgtst is changed
+ - _etcd3_absnt_idmptnttst is not changed
+ - _etcd3_absnt_idmptntchktst is not changed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filesystem/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filesystem/aliases
new file mode 100644
index 00000000..1c80472f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filesystem/aliases
@@ -0,0 +1,5 @@
+destructive
+shippable/posix/group3
+skip/aix
+skip/osx
+skip/macos
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filesystem/defaults/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filesystem/defaults/main.yml
new file mode 100644
index 00000000..764b98b6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filesystem/defaults/main.yml
@@ -0,0 +1,23 @@
+---
+tested_filesystems:
+ # key: fstype
+ # fssize: size (Mo)
+ # grow: True if resizefs is supported
+ # Other minimal sizes:
+ # - XFS: 20Mo
+ # - Btrfs: 150Mo (50Mo when "--metadata single" is used and 100Mb when on newer Fedora versions)
+ # - f2fs:
+ # - 1.2.0 requires at leat 116Mo
+ # - 1.7.0 requires at least 30Mo
+ # - 1.10.0 requires at least 38Mo
+ # - resizefs asserts when initial fs is smaller than 60Mo and seems to require 1.10.0
+ ext4: {fssize: 10, grow: True}
+ ext4dev: {fssize: 10, grow: True}
+ ext3: {fssize: 10, grow: True}
+ ext2: {fssize: 10, grow: True}
+ xfs: {fssize: 20, grow: False} # grow requires a mounted filesystem
+ btrfs: {fssize: 150, grow: False} # grow not implemented
+ vfat: {fssize: 20, grow: True}
+ ocfs2: {fssize: '{{ ocfs2_fssize }}', grow: False} # grow not implemented
+ f2fs: {fssize: '{{ f2fs_fssize|default(60) }}', grow: 'f2fs_version is version("1.10.0", ">=")'}
+ lvm: {fssize: 20, grow: True}
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filesystem/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filesystem/meta/main.yml
new file mode 100644
index 00000000..7853656a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filesystem/meta/main.yml
@@ -0,0 +1,4 @@
+---
+dependencies:
+ - setup_pkg_mgr
+ - setup_remote_tmp_dir
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filesystem/tasks/create_device.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filesystem/tasks/create_device.yml
new file mode 100644
index 00000000..e49861e7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filesystem/tasks/create_device.yml
@@ -0,0 +1,33 @@
+---
+- name: 'Create a "disk" file'
+ command: 'dd if=/dev/zero of={{ image_file }} bs=1M count={{ fssize }}'
+
+- vars:
+ dev: '{{ image_file }}'
+ block:
+ - when: fstype == 'lvm'
+ block:
+ - name: 'Create a loop device for LVM'
+ command: 'losetup --show -f {{ dev }}'
+ register: loop_device_cmd
+
+ - set_fact:
+ dev: "{{ loop_device_cmd.stdout }}"
+
+ - include_tasks: '{{ action }}.yml'
+
+ always:
+ - name: 'Detach loop device used for LVM'
+ command: 'losetup -d {{ dev }}'
+ args:
+ removes: '{{ dev }}'
+ when: fstype == 'lvm'
+
+ - name: 'Clean correct device for LVM'
+ set_fact:
+ dev: '{{ image_file }}'
+ when: fstype == 'lvm'
+
+ - file:
+ name: '{{ image_file }}'
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filesystem/tasks/create_fs.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filesystem/tasks/create_fs.yml
new file mode 100644
index 00000000..688a4462
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filesystem/tasks/create_fs.yml
@@ -0,0 +1,87 @@
+- name: filesystem creation
+ filesystem:
+ dev: '{{ dev }}'
+ fstype: '{{ fstype }}'
+ register: fs_result
+
+- assert:
+ that:
+ - 'fs_result is changed'
+ - 'fs_result is success'
+
+- command: 'blkid -c /dev/null -o value -s UUID {{ dev }}'
+ register: uuid
+
+- name: "Check that filesystem isn't created if force isn't used"
+ filesystem:
+ dev: '{{ dev }}'
+ fstype: '{{ fstype }}'
+ register: fs2_result
+
+- command: 'blkid -c /dev/null -o value -s UUID {{ dev }}'
+ register: uuid2
+
+- assert:
+ that:
+ - 'not (fs2_result is changed)'
+ - 'fs2_result is success'
+ - 'uuid.stdout == uuid2.stdout'
+
+- name: Check that filesystem is recreated if force is used
+ filesystem:
+ dev: '{{ dev }}'
+ fstype: '{{ fstype }}'
+ force: yes
+ register: fs3_result
+
+- command: 'blkid -c /dev/null -o value -s UUID {{ dev }}'
+ register: uuid3
+
+- assert:
+ that:
+ - 'fs3_result is changed'
+ - 'fs3_result is success'
+ - 'uuid.stdout != uuid3.stdout'
+
+
+- when: 'grow|bool and (fstype != "vfat" or resize_vfat)'
+ block:
+ - name: increase fake device
+ shell: 'dd if=/dev/zero bs=1M count=1 >> {{ image_file }}'
+
+ - name: Resize loop device for LVM
+ command: losetup -c {{ dev }}
+ when: fstype == 'lvm'
+
+ - name: Expand filesystem
+ filesystem:
+ dev: '{{ dev }}'
+ fstype: '{{ fstype }}'
+ resizefs: yes
+ register: fs4_result
+
+ - command: 'blkid -c /dev/null -o value -s UUID {{ dev }}'
+ register: uuid4
+
+ - assert:
+ that:
+ - 'fs4_result is changed'
+ - 'fs4_result is success'
+ - 'uuid3.stdout == uuid4.stdout' # unchanged
+
+- when:
+ - (grow | bool and (fstype != "vfat" or resize_vfat)) or
+ (fstype == "xfs" and ansible_system == "Linux" and
+ ansible_distribution not in ["CentOS", "Ubuntu"])
+ block:
+ - name: Check that resizefs does nothing if device size is not changed
+ filesystem:
+ dev: '{{ dev }}'
+ fstype: '{{ fstype }}'
+ resizefs: yes
+ register: fs5_result
+
+ - assert:
+ that:
+ - 'fs5_result is not changed'
+ - 'fs5_result is succeeded'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filesystem/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filesystem/tasks/main.yml
new file mode 100644
index 00000000..44e8c49f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filesystem/tasks/main.yml
@@ -0,0 +1,55 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- debug:
+ msg: '{{ role_name }}'
+- debug:
+ msg: '{{ role_path|basename }}'
+- import_tasks: setup.yml
+
+- include_vars: "{{ lookup('first_found', search) }}"
+ vars:
+ search:
+ files:
+ - '{{ ansible_distribution }}-{{ ansible_distribution_version }}.yml'
+ - 'default.yml'
+ paths:
+ - '../vars/'
+
+- include_tasks: create_device.yml
+ vars:
+ image_file: '{{ remote_tmp_dir }}/img'
+ fstype: '{{ item.0.key }}'
+ fssize: '{{ item.0.value.fssize }}'
+ grow: '{{ item.0.value.grow }}'
+ action: '{{ item.1 }}'
+ when:
+ - 'not (item.0.key == "btrfs" and ansible_system == "FreeBSD")' # btrfs not available on FreeBSD
+ # On Ubuntu trusty, blkid is unable to identify filesystem smaller than 256Mo, see
+ # https://www.kernel.org/pub/linux/utils/util-linux/v2.21/v2.21-ChangeLog
+ # https://anonscm.debian.org/cgit/collab-maint/pkg-util-linux.git/commit/?id=04f7020eadf31efc731558df92daa0a1c336c46c
+ - 'not (item.0.key == "btrfs" and (ansible_distribution == "Ubuntu" and ansible_distribution_release == "trusty"))'
+ - 'not (item.0.key == "btrfs" and (ansible_facts.os_family == "RedHat" and ansible_facts.distribution_major_version is version("8", ">=")))'
+ - 'not (item.0.key == "lvm" and ansible_system == "FreeBSD")' # LVM not available on FreeBSD
+ - 'not (item.0.key == "lvm" and ansible_virtualization_type in ["docker", "container", "containerd"])' # Tests use losetup which can not be used inside unprivileged container
+ - 'not (item.0.key == "ocfs2" and ansible_os_family != "Debian")' # ocfs2 only available on Debian based distributions
+ - 'not (item.0.key == "f2fs" and ansible_system == "FreeBSD")'
+ # f2fs-tools package not available with RHEL/CentOS
+ - 'not (item.0.key == "f2fs" and ansible_distribution in ["CentOS", "RedHat"])'
+ # On Ubuntu trusty, blkid (2.20.1) is unable to identify F2FS filesystem. blkid handles F2FS since v2.23, see:
+ # https://mirrors.edge.kernel.org/pub/linux/utils/util-linux/v2.23/v2.23-ReleaseNotes
+ - 'not (item.0.key == "f2fs" and ansible_distribution == "Ubuntu" and ansible_distribution_version is version("14.04", "<="))'
+ - 'not (item.1 == "overwrite_another_fs" and ansible_system == "FreeBSD")'
+
+ - 'not (item.1 == "remove_fs" and ansible_system == "FreeBSD")' # util-linux not available on FreeBSD
+ # On CentOS 6 shippable containers, wipefs seems unable to remove vfat signatures
+ - 'not (item.1 == "remove_fs" and item.0.key == "vfat" and ansible_distribution == "CentOS" and
+ ansible_distribution_version is version("7.0", "<"))'
+
+ # The xfsprogs package on newer versions of OpenSUSE (15+) require Python 3, we skip this on our Python 2 container
+ # OpenSUSE 42.3 Python2 and the other py3 containers are not affected so we will continue to run that
+ - 'not (item.0.key == "xfs" and ansible_os_family == "Suse" and ansible_python.version.major == 2 and ansible_distribution_major_version|int != 42)'
+ loop: "{{ query('dict', tested_filesystems)|product(['create_fs', 'overwrite_another_fs', 'remove_fs'])|list }}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filesystem/tasks/overwrite_another_fs.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filesystem/tasks/overwrite_another_fs.yml
new file mode 100644
index 00000000..671d9b0b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filesystem/tasks/overwrite_another_fs.yml
@@ -0,0 +1,40 @@
+---
+- name: 'Recreate "disk" file'
+ command: 'dd if=/dev/zero of={{ image_file }} bs=1M count={{ fssize }}'
+
+- name: 'Create a swap filesystem'
+ command: 'mkswap {{ dev }}'
+
+- command: 'blkid -c /dev/null -o value -s UUID {{ dev }}'
+ register: uuid
+
+- name: "Check that an existing filesystem (not handled by this module) isn't overwritten when force isn't used"
+ filesystem:
+ dev: '{{ dev }}'
+ fstype: '{{ fstype }}'
+ register: fs_result
+ ignore_errors: True
+
+- command: 'blkid -c /dev/null -o value -s UUID {{ dev }}'
+ register: uuid2
+
+- assert:
+ that:
+ - 'fs_result is failed'
+ - 'uuid.stdout == uuid2.stdout'
+
+- name: "Check that an existing filesystem (not handled by this module) is overwritten when force is used"
+ filesystem:
+ dev: '{{ dev }}'
+ fstype: '{{ fstype }}'
+ force: yes
+ register: fs_result2
+
+- command: 'blkid -c /dev/null -o value -s UUID {{ dev }}'
+ register: uuid3
+
+- assert:
+ that:
+ - 'fs_result2 is successful'
+ - 'fs_result2 is changed'
+ - 'uuid2.stdout != uuid3.stdout'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filesystem/tasks/remove_fs.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filesystem/tasks/remove_fs.yml
new file mode 100644
index 00000000..7d1ca2a1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filesystem/tasks/remove_fs.yml
@@ -0,0 +1,98 @@
+---
+# We assume 'create_fs' tests have passed.
+
+- name: filesystem creation
+ filesystem:
+ dev: '{{ dev }}'
+ fstype: '{{ fstype }}'
+
+- name: get filesystem UUID with 'blkid'
+ command:
+ cmd: 'blkid -c /dev/null -o value -s UUID {{ dev }}'
+ changed_when: false
+ register: blkid_ref
+
+- name: Assert that a filesystem exists on top of the device
+ assert:
+ that:
+ - blkid_ref.stdout | length > 0
+
+
+# Test check_mode first
+- name: filesystem removal (check mode)
+ filesystem:
+ dev: '{{ dev }}'
+ state: absent
+ register: wipefs
+ check_mode: yes
+
+- name: get filesystem UUID with 'blkid' (should remain the same)
+ command:
+ cmd: 'blkid -c /dev/null -o value -s UUID {{ dev }}'
+ changed_when: false
+ register: blkid
+
+- name: Assert that the state changed but the filesystem still exists
+ assert:
+ that:
+ - wipefs is changed
+ - blkid.stdout == blkid_ref.stdout
+
+# Do it
+- name: filesystem removal
+ filesystem:
+ dev: '{{ dev }}'
+ state: absent
+ register: wipefs
+
+- name: get filesystem UUID with 'blkid' (should be empty)
+ command:
+ cmd: 'blkid -c /dev/null -o value -s UUID {{ dev }}'
+ changed_when: false
+ failed_when: false
+ register: blkid
+
+- name: Assert that the state changed and the device has no filesystem
+ assert:
+ that:
+ - wipefs is changed
+ - blkid.stdout | length == 0
+ - blkid.rc == 2
+
+# Do it again
+- name: filesystem removal (idempotency)
+ filesystem:
+ dev: '{{ dev }}'
+ state: absent
+ register: wipefs
+
+- name: Assert that the state did not change
+ assert:
+ that:
+ - wipefs is not changed
+
+# and again
+- name: filesystem removal (idempotency, check mode)
+ filesystem:
+ dev: '{{ dev }}'
+ state: absent
+ register: wipefs
+ check_mode: yes
+
+- name: Assert that the state did not change
+ assert:
+ that:
+ - wipefs is not changed
+
+
+# By the way, test removal of a filesystem on unexistent device
+- name: filesystem removal (unexistent device)
+ filesystem:
+ dev: '/dev/unexistent_device'
+ state: absent
+ register: wipefs
+
+- name: Assert that the state did not change
+ assert:
+ that:
+ - wipefs is not changed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filesystem/tasks/setup.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filesystem/tasks/setup.yml
new file mode 100644
index 00000000..82fe7c54
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filesystem/tasks/setup.yml
@@ -0,0 +1,97 @@
+---
+- name: install filesystem tools
+ package:
+ name: '{{ item }}'
+ state: present
+ # xfsprogs on OpenSUSE requires Python 3, skip this for our newer Py2 OpenSUSE builds
+ when: not (item == 'xfsprogs' and ansible_os_family == 'Suse' and ansible_python.version.major == 2 and ansible_distribution_major_version|int != 42)
+ with_items:
+ - e2fsprogs
+ - xfsprogs
+
+- block:
+ - name: install btrfs progs
+ package:
+ name: btrfs-progs
+ state: present
+ when:
+ - ansible_os_family != 'Suse'
+ - not (ansible_distribution == 'Ubuntu' and ansible_distribution_version is version('16.04', '<='))
+ - ansible_system != "FreeBSD"
+ - not (ansible_facts.os_family == "RedHat" and ansible_facts.distribution_major_version is version('8', '>='))
+
+ - name: install btrfs progs (Ubuntu <= 16.04)
+ package:
+ name: btrfs-tools
+ state: present
+ when: ansible_distribution == 'Ubuntu' and ansible_distribution_version is version('16.04', '<=')
+
+ - name: install btrfs progs (OpenSuse)
+ package:
+ name: '{{ item }}'
+ state: present
+ when: ansible_os_family == 'Suse'
+ with_items:
+ - python{{ ansible_python.version.major }}-xml
+ - btrfsprogs
+
+ - name: install ocfs2 (Debian)
+ package:
+ name: ocfs2-tools
+ state: present
+ when: ansible_os_family == 'Debian'
+
+ - when:
+ - ansible_os_family != 'RedHat' or ansible_distribution == 'Fedora'
+ - ansible_distribution != 'Ubuntu' or ansible_distribution_version is version('16.04', '>=')
+ - ansible_system != "FreeBSD"
+ block:
+ - name: install f2fs
+ package:
+ name: f2fs-tools
+ state: present
+
+ - name: fetch f2fs version
+ command: mkfs.f2fs /dev/null
+ ignore_errors: yes
+ register: mkfs_f2fs
+
+ - set_fact:
+ f2fs_version: '{{ mkfs_f2fs.stdout | regex_search("F2FS-tools: mkfs.f2fs Ver:.*") | regex_replace("F2FS-tools: mkfs.f2fs Ver: ([0-9.]+) .*", "\1") }}'
+
+ - name: install dosfstools and lvm2 (Linux)
+ package:
+ name: '{{ item }}'
+ with_items:
+ - dosfstools
+ - lvm2
+ when: ansible_system == 'Linux'
+
+- block:
+ - name: install fatresize
+ package:
+ name: fatresize
+ state: present
+ - command: fatresize --help
+ register: fatresize
+ - set_fact:
+ fatresize_version: '{{ fatresize.stdout_lines[0] | regex_search("[0-9]+\.[0-9]+\.[0-9]+") }}'
+ when:
+ - ansible_system == 'Linux'
+ - ansible_os_family != 'Suse'
+ - ansible_os_family != 'RedHat' or (ansible_distribution == 'CentOS' and ansible_distribution_version is version('7.0', '=='))
+
+- command: mke2fs -V
+ register: mke2fs
+
+- set_fact:
+ # mke2fs 1.43.6 (29-Aug-2017)
+ e2fsprogs_version: '{{ mke2fs.stderr_lines[0] | regex_search("[0-9]{1,2}\.[0-9]{1,2}(\.[0-9]{1,2})?") }}'
+
+- set_fact:
+ # http://e2fsprogs.sourceforge.net/e2fsprogs-release.html#1.43
+ # Mke2fs no longer complains if the user tries to create a file system
+ # using the entire block device.
+ force_creation: "{{ e2fsprogs_version is version('1.43', '<') }}"
+ # Earlier versions have a segfault bug
+ resize_vfat: "{{ fatresize_version|default('0.0') is version('1.0.4', '>=') }}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filesystem/vars/Ubuntu-14.04.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filesystem/vars/Ubuntu-14.04.yml
new file mode 100644
index 00000000..e2ead0bf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filesystem/vars/Ubuntu-14.04.yml
@@ -0,0 +1,3 @@
+---
+ocfs2_fssize: 108
+f2fs_fssize: 116
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filesystem/vars/default.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filesystem/vars/default.yml
new file mode 100644
index 00000000..85b052d4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filesystem/vars/default.yml
@@ -0,0 +1,2 @@
+---
+ocfs2_fssize: 20
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_dict_kv/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_dict_kv/aliases
new file mode 100644
index 00000000..f04737b8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_dict_kv/aliases
@@ -0,0 +1,2 @@
+shippable/posix/group2
+skip/python2.6 # filters are controller only, and we no longer support Python 2.6 on the controller
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_dict_kv/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_dict_kv/tasks/main.yml
new file mode 100644
index 00000000..871962e9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_dict_kv/tasks/main.yml
@@ -0,0 +1,10 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: test dict_kv filter
+ assert:
+ that:
+ - "('value' | community.general.dict_kv('key')) == {'key': 'value'}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_jc/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_jc/aliases
new file mode 100644
index 00000000..f49c8c7f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_jc/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group2
+skip/python2.6 # filters are controller only, and we no longer support Python 2.6 on the controller
+skip/python2.7 # jc only supports python3.x
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_jc/runme.sh b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_jc/runme.sh
new file mode 100755
index 00000000..17c5beca
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_jc/runme.sh
@@ -0,0 +1,12 @@
+#!/usr/bin/env bash
+
+set -eux
+
+source virtualenv.sh
+
+# Requirements have to be installed prior to running ansible-playbook
+# because plugins and requirements are loaded before the task runs
+
+pip install jc
+
+ANSIBLE_ROLES_PATH=../ ansible-playbook runme.yml "$@"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_jc/runme.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_jc/runme.yml
new file mode 100644
index 00000000..c624d120
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_jc/runme.yml
@@ -0,0 +1,3 @@
+- hosts: localhost
+ roles:
+ - { role: filter_jc }
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_jc/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_jc/tasks/main.yml
new file mode 100644
index 00000000..3cae22d6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_jc/tasks/main.yml
@@ -0,0 +1,10 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: test jc key/value parser
+ assert:
+ that:
+ - "('key1=value1\nkey2=value2' | community.general.jc('kv')) == {'key1': 'value1', 'key2': 'value2'}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_json_query/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_json_query/aliases
new file mode 100644
index 00000000..1603f435
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_json_query/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group2
+skip/python2.6 # filters are controller only, and we no longer support Python 2.6 on the controller
+skip/aix
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_json_query/runme.sh b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_json_query/runme.sh
new file mode 100755
index 00000000..a0db5e54
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_json_query/runme.sh
@@ -0,0 +1,12 @@
+#!/usr/bin/env bash
+
+set -eux
+
+source virtualenv.sh
+
+# Requirements have to be installed prior to running ansible-playbook
+# because plugins and requirements are loaded before the task runs
+
+pip install jmespath
+
+ANSIBLE_ROLES_PATH=../ ansible-playbook runme.yml "$@"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_json_query/runme.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_json_query/runme.yml
new file mode 100644
index 00000000..68f6372c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_json_query/runme.yml
@@ -0,0 +1,3 @@
+- hosts: localhost
+ roles:
+ - { role: filter_json_query }
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_json_query/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_json_query/tasks/main.yml
new file mode 100644
index 00000000..2cc3e12d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_json_query/tasks/main.yml
@@ -0,0 +1,9 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Test json_query filter
+ assert:
+ that:
+ - "users | community.general.json_query('[*].hosts[].host') == ['host_a', 'host_b', 'host_c', 'host_d']"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_json_query/vars/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_json_query/vars/main.yml
new file mode 100644
index 00000000..36964115
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_json_query/vars/main.yml
@@ -0,0 +1,11 @@
+users:
+ - name: steve
+ hosts:
+ - host: host_a
+ password: abc
+ - host: host_b
+ - name: bill
+ hosts:
+ - host: host_c
+ password: default
+ - host: host_d
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_random_mac/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_random_mac/aliases
new file mode 100644
index 00000000..1603f435
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_random_mac/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group2
+skip/python2.6 # filters are controller only, and we no longer support Python 2.6 on the controller
+skip/aix
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_random_mac/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_random_mac/tasks/main.yml
new file mode 100644
index 00000000..782b6e5c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_random_mac/tasks/main.yml
@@ -0,0 +1,64 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# test code for filters
+# Copyright: (c) 2014, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- set_fact:
+ output_dir: "{{ lookup('env', 'OUTPUT_DIR') }}"
+
+- name: Test random_mac filter bad argument type
+ debug:
+ var: "0 | community.general.random_mac"
+ register: _bad_random_mac_filter
+ ignore_errors: yes
+
+- name: Verify random_mac filter showed a bad argument type error message
+ assert:
+ that:
+ - _bad_random_mac_filter is failed
+ - "_bad_random_mac_filter.msg is match('Invalid value type (.*int.*) for random_mac .*')"
+
+- name: Test random_mac filter bad argument value
+ debug:
+ var: "'dummy' | community.general.random_mac"
+ register: _bad_random_mac_filter
+ ignore_errors: yes
+
+- name: Verify random_mac filter showed a bad argument value error message
+ assert:
+ that:
+ - _bad_random_mac_filter is failed
+ - "_bad_random_mac_filter.msg is match('Invalid value (.*) for random_mac: .* not hexa byte')"
+
+- name: Test random_mac filter prefix too big
+ debug:
+ var: "'00:00:00:00:00:00' | community.general.random_mac"
+ register: _bad_random_mac_filter
+ ignore_errors: yes
+
+- name: Verify random_mac filter showed a prefix too big error message
+ assert:
+ that:
+ - _bad_random_mac_filter is failed
+ - "_bad_random_mac_filter.msg is match('Invalid value (.*) for random_mac: 5 colon.* separated items max')"
+
+- name: Verify random_mac filter
+ assert:
+ that:
+ - "'00' | community.general.random_mac is match('^00:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]$')"
+ - "'00:00' | community.general.random_mac is match('^00:00:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]$')"
+ - "'00:00:00' | community.general.random_mac is match('^00:00:00:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]$')"
+ - "'00:00:00:00' | community.general.random_mac is match('^00:00:00:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]$')"
+ - "'00:00:00:00:00' | community.general.random_mac is match('^00:00:00:00:00:[a-f0-9][a-f0-9]$')"
+ - "'00:00:00' | community.general.random_mac != '00:00:00' | community.general.random_mac"
+
+- name: Verify random_mac filter with seed
+ assert:
+ that:
+ - "'00:00:00' | community.general.random_mac(seed='test') == '00:00:00' | community.general.random_mac(seed='test')"
+ - "'00:00:00' | community.general.random_mac(seed='test') != '00:00:00' | community.general.random_mac(seed='another_test')"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_time/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_time/aliases
new file mode 100644
index 00000000..f04737b8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_time/aliases
@@ -0,0 +1,2 @@
+shippable/posix/group2
+skip/python2.6 # filters are controller only, and we no longer support Python 2.6 on the controller
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_time/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_time/tasks/main.yml
new file mode 100644
index 00000000..b3e8a55d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/filter_time/tasks/main.yml
@@ -0,0 +1,104 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: test to_milliseconds filter
+ assert:
+ that:
+ - "('1000ms' | community.general.to_milliseconds) == 1000"
+ - "('1s' | community.general.to_milliseconds) == 1000"
+ - "('1m' | community.general.to_milliseconds) == 60000"
+
+- name: test to_seconds filter
+ assert:
+ that:
+ - "('1000msecs' | community.general.to_seconds) == 1"
+ - "('1ms' | community.general.to_seconds) == 0.001"
+ - "('12m' | community.general.to_seconds) == 720"
+ - "('300minutes' | community.general.to_seconds) == 18000"
+ - "('3h 12m' | community.general.to_seconds) == 11520"
+ - "('2days 3hours 12mins 15secs' | community.general.to_seconds) == 184335"
+ - "('2d -2d -12s' | community.general.to_seconds) == -12"
+
+- name: test to_minutes filter
+ assert:
+ that:
+ - "('30s' | community.general.to_minutes) == 0.5"
+ - "('12m' | community.general.to_minutes) == 12"
+ - "('3h 72m' | community.general.to_minutes) == 252"
+ - "('300s' | community.general.to_minutes) == 5"
+
+- name: test to_hours filter
+ assert:
+ that:
+ - "('30m' | community.general.to_hours) == 0.5"
+ - "('3h 119m 61s' | community.general.to_hours) > 5"
+
+- name: test to_days filter
+ assert:
+ that:
+ - "('1year' | community.general.to_days) == 365"
+ - "('1week' | community.general.to_days) == 7"
+ - "('2weeks' | community.general.to_days) == 14"
+ - "('1mo' | community.general.to_days) == 30"
+ - "('1mo' | community.general.to_days(month=28)) == 28"
+
+- name: test to_weeks filter
+ assert:
+ that:
+ - "('1y' | community.general.to_weeks | int) == 52"
+ - "('7d' | community.general.to_weeks) == 1"
+ - "('1mo' | community.general.to_weeks(month=28)) == 4"
+
+- name: test to_months filter
+ assert:
+ that:
+ - "('30d' | community.general.to_months) == 1"
+ - "('1year' | community.general.to_months | int) == 12"
+ - "('5years' | community.general.to_months(month=30, year=360)) == 60"
+ - "('1years' | community.general.to_months(month=2, year=34)) == 17"
+
+- name: test to_years filter
+ assert:
+ that:
+ - "('365d' | community.general.to_years | int) == 1"
+ - "('12mo' | community.general.to_years | round(0, 'ceil')) == 1"
+ - "('24mo' | community.general.to_years(month=30, year=360)) == 2"
+
+- name: test fail unknown unit
+ debug:
+ msg: "{{ '1s' | community.general.to_time_unit('lightyears') }}"
+ ignore_errors: yes
+ register: res
+
+- name: verify test fail unknown unit
+ assert:
+ that:
+ - res is failed
+ - "'to_time_unit() can not convert to the following unit: lightyears' in res.msg"
+
+- name: test fail unknown string
+ debug:
+ msg: "{{ '1 s' | community.general.to_time_unit('s') }}"
+ ignore_errors: yes
+ register: res
+
+- name: test fail unknown string
+ assert:
+ that:
+ - res is failed
+ - "'to_time_unit() can not interpret following string' in res.msg"
+
+- name: test fail unknown kwarg
+ debug:
+ msg: "{{ '1s' | community.general.to_time_unit('s', second=23) }}"
+ ignore_errors: yes
+ register: res
+
+- name: test fail unknown kwarg
+ assert:
+ that:
+ - res is failed
+ - "'to_time_unit() got unknown keyword arguments' in res.msg"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak/aliases
new file mode 100644
index 00000000..59e306f8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak/aliases
@@ -0,0 +1,9 @@
+unsupported
+destructive
+skip/aix
+skip/freebsd
+skip/osx
+skip/macos
+skip/rhel
+needs/root
+needs/privileged
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak/meta/main.yml
new file mode 100644
index 00000000..07faa217
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_tests
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak/tasks/check_mode.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak/tasks/check_mode.yml
new file mode 100644
index 00000000..065f10df
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak/tasks/check_mode.yml
@@ -0,0 +1,193 @@
+# - Tests with absent flatpak --------------------------------------------------
+
+# state=present on absent flatpak
+
+- name: Test addition of absent flatpak (check mode)
+ flatpak:
+ name: org.gnome.Characters
+ remote: flathub
+ state: present
+ register: addition_result
+ check_mode: true
+
+- name: Verify addition of absent flatpak test result (check mode)
+ assert:
+ that:
+ - "addition_result.changed == true"
+ msg: "Adding an absent flatpak shall mark module execution as changed"
+
+- name: Test non-existent idempotency of addition of absent flatpak (check mode)
+ flatpak:
+ name: org.gnome.Characters
+ remote: flathub
+ state: present
+ register: double_addition_result
+ check_mode: true
+
+- name: Verify non-existent idempotency of addition of absent flatpak test result (check mode)
+ assert:
+ that:
+ - "double_addition_result.changed == true"
+ msg: |
+ Adding an absent flatpak a second time shall still mark module execution
+ as changed in check mode
+
+# state=absent on absent flatpak
+
+- name: Test removal of absent flatpak check mode
+ flatpak:
+ name: org.gnome.Characters
+ state: absent
+ register: removal_result
+ check_mode: true
+
+- name: Verify removal of absent flatpak test result (check mode)
+ assert:
+ that:
+ - "removal_result.changed == false"
+ msg: "Removing an absent flatpak shall mark module execution as not changed"
+
+# state=present with url on absent flatpak
+
+- name: Test addition of absent flatpak with url (check mode)
+ flatpak:
+ name: https://flathub.org/repo/appstream/org.gnome.Characters.flatpakref
+ remote: flathub
+ state: present
+ register: url_addition_result
+ check_mode: true
+
+- name: Verify addition of absent flatpak with url test result (check mode)
+ assert:
+ that:
+ - "url_addition_result.changed == true"
+ msg: "Adding an absent flatpak from URL shall mark module execution as changed"
+
+- name: Test non-existent idempotency of addition of absent flatpak with url (check mode)
+ flatpak:
+ name: https://flathub.org/repo/appstream/org.gnome.Characters.flatpakref
+ remote: flathub
+ state: present
+ register: double_url_addition_result
+ check_mode: true
+
+- name: >
+ Verify non-existent idempotency of additionof absent flatpak with url test
+ result (check mode)
+ assert:
+ that:
+ - "double_url_addition_result.changed == true"
+ msg: |
+ Adding an absent flatpak from URL a second time shall still mark module execution
+ as changed in check mode
+
+# state=absent with url on absent flatpak
+
+- name: Test removal of absent flatpak with url not doing anything (check mode)
+ flatpak:
+ name: https://flathub.org/repo/appstream/org.gnome.Characters.flatpakref
+ state: absent
+ register: url_removal_result
+ check_mode: true
+
+- name: Verify removal of absent flatpak with url test result (check mode)
+ assert:
+ that:
+ - "url_removal_result.changed == false"
+ msg: "Removing an absent flatpak shall mark module execution as not changed"
+
+
+# - Tests with present flatpak -------------------------------------------------
+
+# state=present on present flatpak
+
+- name: Test addition of present flatpak (check mode)
+ flatpak:
+ name: org.gnome.Calculator
+ remote: flathub
+ state: present
+ register: addition_present_result
+ check_mode: true
+
+- name: Verify addition test result of present flatpak (check mode)
+ assert:
+ that:
+ - "addition_present_result.changed == false"
+ msg: "Adding an present flatpak shall mark module execution as not changed"
+
+# state=absent on present flatpak
+
+- name: Test removal of present flatpak (check mode)
+ flatpak:
+ name: org.gnome.Calculator
+ state: absent
+ register: removal_present_result
+ check_mode: true
+
+- name: Verify removal of present flatpak test result (check mode)
+ assert:
+ that:
+ - "removal_present_result.changed == true"
+ msg: "Removing a present flatpak shall mark module execution as changed"
+
+- name: Test non-existent idempotency of removal (check mode)
+ flatpak:
+ name: org.gnome.Calculator
+ state: absent
+ register: double_removal_present_result
+ check_mode: true
+
+- name: Verify non-existent idempotency of removal (check mode)
+ assert:
+ that:
+ - "double_removal_present_result.changed == true"
+ msg: |
+ Removing a present flatpak a second time shall still mark module execution
+ as changed in check mode
+
+# state=present with url on present flatpak
+
+- name: Test addition with url of present flatpak (check mode)
+ flatpak:
+ name: https://flathub.org/repo/appstream/org.gnome.Calculator.flatpakref
+ remote: flathub
+ state: present
+ register: url_addition_present_result
+ check_mode: true
+
+- name: Verify addition with url of present flatpak test result (check mode)
+ assert:
+ that:
+ - "url_addition_present_result.changed == false"
+ msg: "Adding a present flatpak from URL shall mark module execution as not changed"
+
+# state=absent with url on present flatpak
+
+- name: Test removal with url of present flatpak (check mode)
+ flatpak:
+ name: https://flathub.org/repo/appstream/org.gnome.Calculator.flatpakref
+ state: absent
+ register: url_removal_present_result
+ check_mode: true
+
+- name: Verify removal with url of present flatpak test result (check mode)
+ assert:
+ that:
+ - "url_removal_present_result.changed == true"
+ msg: "Removing an absent flatpak shall mark module execution as not changed"
+
+- name: Test non-existent idempotency of removal with url of present flatpak (check mode)
+ flatpak:
+ name: https://flathub.org/repo/appstream/org.gnome.Calculator.flatpakref
+ remote: flathub
+ state: absent
+ register: double_url_removal_present_result
+ check_mode: true
+
+- name: >
+ Verify non-existent idempotency of installation with url of present
+ flatpak test result (check mode)
+ assert:
+ that:
+ - "double_url_removal_present_result.changed == true"
+ msg: Removing an absent flatpak a second time shall still mark module execution as changed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak/tasks/main.yml
new file mode 100644
index 00000000..45f9ecd5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak/tasks/main.yml
@@ -0,0 +1,61 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# (c) 2018, Alexander Bethke <oolongbrothers@gmx.net>
+# (c) 2018, Ansible Project
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- block:
+
+ - import_tasks: setup.yml
+ become: true
+
+ # executable override
+
+ - name: Test executable override
+ flatpak:
+ name: org.gnome.Characters
+ remote: flathub
+ state: present
+ executable: nothing-that-exists
+ ignore_errors: true
+ register: executable_override_result
+
+ - name: Verify executable override test result
+ assert:
+ that:
+ - "executable_override_result.failed == true"
+ - "executable_override_result.changed == false"
+ msg: "Specifying non-existing executable shall fail module execution"
+
+ - import_tasks: check_mode.yml
+ become: false
+
+ - import_tasks: test.yml
+ become: false
+ vars:
+ method: user
+
+ - import_tasks: test.yml
+ become: true
+ vars:
+ method: system
+
+ when: |
+ ansible_distribution in ('Fedora', 'Ubuntu')
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak/tasks/setup.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak/tasks/setup.yml
new file mode 100644
index 00000000..2dfa33a0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak/tasks/setup.yml
@@ -0,0 +1,35 @@
+- name: Install flatpak on Fedora
+ dnf:
+ name: flatpak
+ state: present
+ become: true
+ when: ansible_distribution == 'Fedora'
+- block:
+ - name: Activate flatpak ppa on Ubuntu
+ apt_repository:
+ repo: ppa:alexlarsson/flatpak
+ state: present
+ mode: '0644'
+ - name: Install flatpak package on Ubuntu
+ apt:
+ name: flatpak
+ state: present
+ become: true
+ when: ansible_distribution == 'Ubuntu'
+- name: Enable flathub for user
+ flatpak_remote:
+ name: flathub
+ state: present
+ flatpakrepo_url: https://dl.flathub.org/repo/flathub.flatpakrepo
+ method: user
+- name: Enable flathub for system
+ flatpak_remote:
+ name: flathub
+ state: present
+ flatpakrepo_url: https://dl.flathub.org/repo/flathub.flatpakrepo
+ method: system
+- name: Add flatpak for testing check mode on present flatpak
+ flatpak:
+ name: org.gnome.Calculator
+ remote: flathub
+ state: present
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak/tasks/test.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak/tasks/test.yml
new file mode 100644
index 00000000..20d864a8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak/tasks/test.yml
@@ -0,0 +1,115 @@
+# state=present
+
+- name: Test addition - {{ method }}
+ flatpak:
+ name: org.gnome.Characters
+ remote: flathub
+ state: present
+ method: "{{ method }}"
+ register: addition_result
+
+- name: Verify addition test result - {{ method }}
+ assert:
+ that:
+ - "addition_result.changed == true"
+ msg: "state=present shall add flatpak when absent"
+
+- name: Test idempotency of addition - {{ method }}
+ flatpak:
+ name: org.gnome.Characters
+ remote: flathub
+ state: present
+ method: "{{ method }}"
+ register: double_addition_result
+
+- name: Verify idempotency of addition test result - {{ method }}
+ assert:
+ that:
+ - "double_addition_result.changed == false"
+ msg: "state=present shall not do anything when flatpak is already present"
+
+# state=absent
+
+- name: Test removal - {{ method }}
+ flatpak:
+ name: org.gnome.Characters
+ state: absent
+ method: "{{ method }}"
+ register: removal_result
+
+- name: Verify removal test result - {{ method }}
+ assert:
+ that:
+ - "removal_result.changed == true"
+ msg: "state=absent shall remove flatpak when present"
+
+- name: Test idempotency of removal - {{ method }}
+ flatpak:
+ name: org.gnome.Characters
+ state: absent
+ method: "{{ method }}"
+ register: double_removal_result
+
+- name: Verify idempotency of removal test result - {{ method }}
+ assert:
+ that:
+ - "double_removal_result.changed == false"
+ msg: "state=absent shall not do anything when flatpak is not present"
+
+# state=present with url as name
+
+- name: Test addition with url - {{ method }}
+ flatpak:
+ name: https://flathub.org/repo/appstream/org.gnome.Characters.flatpakref
+ remote: flathub
+ state: present
+ method: "{{ method }}"
+ register: url_addition_result
+
+- name: Verify addition test result - {{ method }}
+ assert:
+ that:
+ - "url_addition_result.changed == true"
+ msg: "state=present with url as name shall add flatpak when absent"
+
+- name: Test idempotency of addition with url - {{ method }}
+ flatpak:
+ name: https://flathub.org/repo/appstream/org.gnome.Characters.flatpakref
+ remote: flathub
+ state: present
+ method: "{{ method }}"
+ register: double_url_addition_result
+
+- name: Verify idempotency of addition with url test result - {{ method }}
+ assert:
+ that:
+ - "double_url_addition_result.changed == false"
+ msg: "state=present with url as name shall not do anything when flatpak is already present"
+
+# state=absent with url as name
+
+- name: Test removal with url - {{ method }}
+ flatpak:
+ name: https://flathub.org/repo/appstream/org.gnome.Characters.flatpakref
+ state: absent
+ method: "{{ method }}"
+ register: url_removal_result
+
+- name: Verify removal test result - {{ method }}
+ assert:
+ that:
+ - "url_removal_result.changed == true"
+ msg: "state=absent with url as name shall remove flatpak when present"
+
+- name: Test idempotency of removal with url - {{ method }}
+ flatpak:
+ name: https://flathub.org/repo/appstream/org.gnome.Characters.flatpakref
+ state: absent
+ method: "{{ method }}"
+ register: double_url_removal_result
+
+- name: Verify idempotency of removal with url test result - {{ method }}
+ assert:
+ that:
+ - "double_url_removal_result.changed == false"
+ msg: "state=absent with url as name shall not do anything when flatpak is not present"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak_remote/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak_remote/aliases
new file mode 100644
index 00000000..39291d43
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak_remote/aliases
@@ -0,0 +1,8 @@
+shippable/posix/group3
+destructive
+skip/aix
+skip/freebsd
+skip/osx
+skip/macos
+skip/rhel
+needs/root
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak_remote/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak_remote/meta/main.yml
new file mode 100644
index 00000000..314f77eb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak_remote/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - prepare_tests
+ - setup_flatpak_remote
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak_remote/tasks/check_mode.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak_remote/tasks/check_mode.yml
new file mode 100644
index 00000000..7ce89a8c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak_remote/tasks/check_mode.yml
@@ -0,0 +1,101 @@
+# - Tests with absent flatpak remote -------------------------------------------
+
+# state=present
+
+- name: Test addition of absent flatpak remote (check mode)
+ flatpak_remote:
+ name: flatpak-test
+ flatpakrepo_url: /tmp/flatpak/repo/dummy-repo.flatpakrepo
+ state: present
+ register: addition_result
+ check_mode: true
+
+- name: Verify addition of absent flatpak remote test result (check mode)
+ assert:
+ that:
+ - "addition_result.changed == true"
+ msg: "Adding an absent flatpak remote shall mark module execution as changed"
+
+- name: Test non-existent idempotency of addition of absent flatpak remote (check mode)
+ flatpak_remote:
+ name: flatpak-test
+ flatpakrepo_url: /tmp/flatpak/repo/dummy-repo.flatpakrepo
+ state: present
+ register: double_addition_result
+ check_mode: true
+
+- name: >
+ Verify non-existent idempotency of addition of absent flatpak remote
+ test result (check mode)
+ assert:
+ that:
+ - "double_addition_result.changed == true"
+ msg: |
+ Adding an absent flatpak remote a second time shall still mark module execution
+ as changed in check mode
+
+# state=absent
+
+- name: Test removal of absent flatpak remote not doing anything in check mode
+ flatpak_remote:
+ name: flatpak-test
+ state: absent
+ register: removal_result
+ check_mode: true
+
+- name: Verify removal of absent flatpak remote test result (check mode)
+ assert:
+ that:
+ - "removal_result.changed == false"
+ msg: "Removing an absent flatpak remote shall mark module execution as not changed"
+
+
+# - Tests with present flatpak remote -------------------------------------------
+
+# state=present
+
+- name: Test addition of present flatpak remote (check mode)
+ flatpak_remote:
+ name: check-mode-test-remote
+ flatpakrepo_url: /tmp/flatpak/repo/dummy-repo.flatpakrepo
+ state: present
+ register: addition_result
+ check_mode: true
+
+- name: Verify addition of present flatpak remote test result (check mode)
+ assert:
+ that:
+ - "addition_result.changed == false"
+ msg: "Adding a present flatpak remote shall mark module execution as not changed"
+
+# state=absent
+
+- name: Test removal of present flatpak remote not doing anything in check mode
+ flatpak_remote:
+ name: check-mode-test-remote
+ state: absent
+ register: removal_result
+ check_mode: true
+
+- name: Verify removal of present flatpak remote test result (check mode)
+ assert:
+ that:
+ - "removal_result.changed == true"
+ msg: "Removing a present flatpak remote shall mark module execution as changed"
+
+- name: Test non-existent idempotency of removal of present flatpak remote (check mode)
+ flatpak_remote:
+ name: check-mode-test-remote
+ state: absent
+ register: double_removal_result
+ check_mode: true
+
+- name: >
+ Verify non-existent idempotency of removal of present flatpak remote
+ test result (check mode)
+ assert:
+ that:
+ - "double_removal_result.changed == true"
+ msg: |
+ Removing a present flatpak remote a second time shall still mark module execution
+ as changed in check mode
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak_remote/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak_remote/tasks/main.yml
new file mode 100644
index 00000000..aa2219e1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak_remote/tasks/main.yml
@@ -0,0 +1,62 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# (c) 2018, Alexander Bethke <oolongbrothers@gmx.net>
+# (c) 2018, Ansible Project
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- block:
+
+ - import_tasks: setup.yml
+ become: true
+
+ # executable override
+
+ - name: Test executable override
+ flatpak_remote:
+ name: irrelevant
+ remote: irrelevant
+ state: present
+ executable: nothing-that-exists
+ ignore_errors: true
+ register: executable_override_result
+
+ - name: Verify executable override test result
+ assert:
+ that:
+ - "executable_override_result.failed == true"
+ - "executable_override_result.changed == false"
+ msg: "Specifying non-existing executable shall fail module execution"
+
+ - import_tasks: check_mode.yml
+ become: false
+
+ - import_tasks: test.yml
+ become: false
+ vars:
+ method: user
+
+ - import_tasks: test.yml
+ become: true
+ vars:
+ method: system
+
+ when: |
+ ansible_distribution == 'Fedora' or
+ ansible_distribution == 'Ubuntu' and not ansible_distribution_major_version | int < 16
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak_remote/tasks/setup.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak_remote/tasks/setup.yml
new file mode 100644
index 00000000..65e060f5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak_remote/tasks/setup.yml
@@ -0,0 +1,22 @@
+- name: Install flatpak on Fedora
+ dnf:
+ name: flatpak
+ state: present
+ when: ansible_distribution == 'Fedora'
+- block:
+ - name: Activate flatpak ppa on Ubuntu versions older than 18.04/bionic
+ apt_repository:
+ repo: ppa:alexlarsson/flatpak
+ state: present
+ mode: '0644'
+ when: ansible_lsb.major_release | int < 18
+ - name: Install flatpak package on Ubuntu
+ apt:
+ name: flatpak
+ state: present
+ when: ansible_distribution == 'Ubuntu'
+- name: Install flatpak remote for testing check mode
+ flatpak_remote:
+ name: check-mode-test-remote
+ flatpakrepo_url: /tmp/flatpak/repo/dummy-repo.flatpakrepo
+ state: present
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak_remote/tasks/test.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak_remote/tasks/test.yml
new file mode 100644
index 00000000..9570f623
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/flatpak_remote/tasks/test.yml
@@ -0,0 +1,72 @@
+# state=present
+
+- name: Test addition - {{ method }}
+ flatpak_remote:
+ name: flatpak-test
+ flatpakrepo_url: /tmp/flatpak/repo/dummy-repo.flatpakrepo
+ state: present
+ method: "{{ method }}"
+ register: addition_result
+
+- name: Verify addition test result - {{ method }}
+ assert:
+ that:
+ - "addition_result.changed == true"
+ msg: "state=present shall add flatpak when absent"
+
+- name: Test idempotency of addition - {{ method }}
+ flatpak_remote:
+ name: flatpak-test
+ flatpakrepo_url: /tmp/flatpak/repo/dummy-repo.flatpakrepo
+ state: present
+ method: "{{ method }}"
+ register: double_addition_result
+
+- name: Verify idempotency of addition test result - {{ method }}
+ assert:
+ that:
+ - "double_addition_result.changed == false"
+ msg: "state=present shall not do anything when flatpak is already present"
+
+- name: Test updating remote url does not do anything - {{ method }}
+ flatpak_remote:
+ name: flatpak-test
+ flatpakrepo_url: https://a.different/repo.flatpakrepo
+ state: present
+ method: "{{ method }}"
+ register: url_update_result
+
+- name: Verify updating remote url does not do anything - {{ method }}
+ assert:
+ that:
+ - "url_update_result.changed == false"
+ msg: "Trying to update the URL of an existing flatpak remote shall not do anything"
+
+
+# state=absent
+
+- name: Test removal - {{ method }}
+ flatpak_remote:
+ name: flatpak-test
+ state: absent
+ method: "{{ method }}"
+ register: removal_result
+
+- name: Verify removal test result - {{ method }}
+ assert:
+ that:
+ - "removal_result.changed == true"
+ msg: "state=absent shall remove flatpak when present"
+
+- name: Test idempotency of removal - {{ method }}
+ flatpak_remote:
+ name: flatpak-test
+ state: absent
+ method: "{{ method }}"
+ register: double_removal_result
+
+- name: Verify idempotency of removal test result - {{ method }}
+ assert:
+ that:
+ - "double_removal_result.changed == false"
+ msg: "state=absent shall not do anything when flatpak is not present"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gem/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gem/aliases
new file mode 100644
index 00000000..1ef4c361
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gem/aliases
@@ -0,0 +1,5 @@
+destructive
+shippable/posix/group1
+skip/aix
+skip/osx
+skip/macos
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gem/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gem/meta/main.yml
new file mode 100644
index 00000000..ca521ab1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gem/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - setup_pkg_mgr
+ - prepare_tests
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gem/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gem/tasks/main.yml
new file mode 100644
index 00000000..ce64364d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gem/tasks/main.yml
@@ -0,0 +1,180 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# test code for the gem module
+# (c) 2014, James Tanner <tanner.jc@gmail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- include_vars: '{{ item }}'
+ with_first_found:
+ - files:
+ - '{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml'
+ - '{{ ansible_distribution }}-{{ ansible_distribution_version }}.yml'
+ - '{{ ansible_os_family }}.yml'
+ - 'default.yml'
+ paths: '../vars'
+
+- name: Install dependencies for test
+ package:
+ name: "{{ item }}"
+ state: present
+ loop: "{{ test_packages }}"
+ when: ansible_distribution != "MacOSX"
+
+- name: Install a gem
+ gem:
+ name: gist
+ state: present
+ register: install_gem_result
+ ignore_errors: yes
+
+# when running as root on Fedora, '--install-dir' is set in the os defaults which is
+# incompatible with '--user-install', we ignore this error for this case only
+- name: fail if failed to install gem
+ fail:
+ msg: "failed to install gem: {{ install_gem_result.msg }}"
+ when:
+ - install_gem_result is failed
+ - not (ansible_user_uid == 0 and "User --install-dir or --user-install but not both" not in install_gem_result.msg)
+
+- block:
+ - name: List gems
+ command: gem list
+ register: current_gems
+
+ - name: Ensure gem was installed
+ assert:
+ that:
+ - install_gem_result is changed
+ - current_gems.stdout is search('gist\s+\([0-9.]+\)')
+
+ - name: Remove a gem
+ gem:
+ name: gist
+ state: absent
+ register: remove_gem_results
+
+ - name: List gems
+ command: gem list
+ register: current_gems
+
+ - name: Verify gem is not installed
+ assert:
+ that:
+ - remove_gem_results is changed
+ - current_gems.stdout is not search('gist\s+\([0-9.]+\)')
+ when: not install_gem_result is failed
+
+# install gem in --no-user-install
+- block:
+ - name: Install a gem with --no-user-install
+ gem:
+ name: gist
+ state: present
+ user_install: no
+ register: install_gem_result
+
+ - name: List gems
+ command: gem list
+ register: current_gems
+
+ - name: Ensure gem was installed
+ assert:
+ that:
+ - install_gem_result is changed
+ - current_gems.stdout is search('gist\s+\([0-9.]+\)')
+
+ - name: Remove a gem
+ gem:
+ name: gist
+ state: absent
+ register: remove_gem_results
+
+ - name: List gems
+ command: gem list
+ register: current_gems
+
+ - name: Verify gem is not installed
+ assert:
+ that:
+ - remove_gem_results is changed
+ - current_gems.stdout is not search('gist\s+\([0-9.]+\)')
+ when: ansible_user_uid == 0
+
+# Check cutom gem directory
+- name: Install gem in a custom directory with incorrect options
+ gem:
+ name: gist
+ state: present
+ install_dir: "{{ output_dir }}/gems"
+ ignore_errors: yes
+ register: install_gem_fail_result
+
+- debug:
+ var: install_gem_fail_result
+ tags: debug
+
+- name: Ensure previous task failed
+ assert:
+ that:
+ - install_gem_fail_result is failed
+ - install_gem_fail_result.msg == 'install_dir requires user_install=false'
+
+- name: Install a gem in a custom directory
+ gem:
+ name: gist
+ state: present
+ user_install: no
+ install_dir: "{{ output_dir }}/gems"
+ register: install_gem_result
+
+- name: Find gems in custom directory
+ find:
+ paths: "{{ output_dir }}/gems/gems"
+ file_type: directory
+ contains: gist
+ register: gem_search
+
+- name: Ensure gem was installed in custom directory
+ assert:
+ that:
+ - install_gem_result is changed
+ - gem_search.files[0].path is search('gist-[0-9.]+')
+ ignore_errors: yes
+
+- name: Remove a gem in a custom directory
+ gem:
+ name: gist
+ state: absent
+ user_install: no
+ install_dir: "{{ output_dir }}/gems"
+ register: install_gem_result
+
+- name: Find gems in custom directory
+ find:
+ paths: "{{ output_dir }}/gems/gems"
+ file_type: directory
+ contains: gist
+ register: gem_search
+
+- name: Ensure gem was removed in custom directory
+ assert:
+ that:
+ - install_gem_result is changed
+ - gem_search.files | length == 0
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gem/vars/FreeBSD.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gem/vars/FreeBSD.yml
new file mode 100644
index 00000000..84e0b483
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gem/vars/FreeBSD.yml
@@ -0,0 +1,3 @@
+test_packages:
+ - "devel/ruby-gems"
+ - "ruby"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gem/vars/RedHat.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gem/vars/RedHat.yml
new file mode 100644
index 00000000..c044d109
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gem/vars/RedHat.yml
@@ -0,0 +1,2 @@
+test_packages:
+ - "rubygems"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gem/vars/default.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gem/vars/default.yml
new file mode 100644
index 00000000..7d6e61ac
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gem/vars/default.yml
@@ -0,0 +1 @@
+test_packages: []
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/aliases
new file mode 100644
index 00000000..757c9966
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/aliases
@@ -0,0 +1,2 @@
+shippable/posix/group3
+skip/aix
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/files/gitconfig b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/files/gitconfig
new file mode 100644
index 00000000..989aa1c8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/files/gitconfig
@@ -0,0 +1,2 @@
+[http]
+ proxy = foo \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/tasks/exclusion_state_list-all.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/tasks/exclusion_state_list-all.yml
new file mode 100644
index 00000000..09a6beee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/tasks/exclusion_state_list-all.yml
@@ -0,0 +1,16 @@
+---
+- import_tasks: setup_no_value.yml
+
+- name: testing exclusion between state and list_all parameters
+ git_config:
+ list_all: true
+ state: absent
+ register: result
+ ignore_errors: yes
+
+- name: assert git_config failed
+ assert:
+ that:
+ - result is failed
+ - "result.msg == 'parameters are mutually exclusive: list_all|state'"
+...
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/tasks/get_set_no_state.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/tasks/get_set_no_state.yml
new file mode 100644
index 00000000..149a9b2d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/tasks/get_set_no_state.yml
@@ -0,0 +1,25 @@
+---
+- import_tasks: setup_no_value.yml
+
+- name: setting value without state
+ git_config:
+ name: "{{ option_name }}"
+ value: "{{ option_value }}"
+ scope: "{{ option_scope }}"
+ register: set_result
+
+- name: getting value without state
+ git_config:
+ name: "{{ option_name }}"
+ scope: "{{ option_scope }}"
+ register: get_result
+
+- name: assert set changed and value is correct
+ assert:
+ that:
+ - set_result.changed == true
+ - set_result.diff.before == "\n"
+ - set_result.diff.after == option_value + "\n"
+ - get_result.changed == false
+ - get_result.config_value == option_value
+...
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/tasks/get_set_state_present.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/tasks/get_set_state_present.yml
new file mode 100644
index 00000000..59f3c9c0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/tasks/get_set_state_present.yml
@@ -0,0 +1,27 @@
+---
+- import_tasks: setup_no_value.yml
+
+- name: setting value with state=present
+ git_config:
+ name: "{{ option_name }}"
+ value: "{{ option_value }}"
+ scope: "{{ option_scope }}"
+ state: present
+ register: result
+
+- name: getting value with state=present
+ git_config:
+ name: "{{ option_name }}"
+ scope: "{{ option_scope }}"
+ state: present
+ register: get_result
+
+- name: assert set changed and value is correct with state=present
+ assert:
+ that:
+ - set_result.changed == true
+ - set_result.diff.before == "\n"
+ - set_result.diff.after == option_value + "\n"
+ - get_result.changed == false
+ - get_result.config_value == option_value
+...
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/tasks/main.yml
new file mode 100644
index 00000000..36eee370
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/tasks/main.yml
@@ -0,0 +1,28 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# test code for the git_config module
+
+- name: setup
+ import_tasks: setup.yml
+
+- block:
+ # testing parameters exclusion: state and list_all
+ - import_tasks: exclusion_state_list-all.yml
+ # testing get/set option without state
+ - import_tasks: get_set_no_state.yml
+ # testing get/set option with state=present
+ - import_tasks: get_set_state_present.yml
+ # testing state=absent without value to delete
+ - import_tasks: unset_no_value.yml
+ # testing state=absent with value to delete
+ - import_tasks: unset_value.yml
+ # testing state=absent with value to delete and a defined value parameter
+ - import_tasks: precedence_between_unset_and_value.yml
+ # testing state=absent with check mode
+ - import_tasks: unset_check_mode.yml
+ when: git_installed is succeeded and git_version.stdout is version(git_version_supporting_includes, ">=")
+...
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/tasks/precedence_between_unset_and_value.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/tasks/precedence_between_unset_and_value.yml
new file mode 100644
index 00000000..24ef2920
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/tasks/precedence_between_unset_and_value.yml
@@ -0,0 +1,25 @@
+---
+- import_tasks: setup_value.yml
+
+- name: unsetting value
+ git_config:
+ name: "{{ option_name }}"
+ scope: "{{ option_scope }}"
+ state: absent
+ value: bar
+ register: unset_result
+
+- name: getting value
+ git_config:
+ name: "{{ option_name }}"
+ scope: "{{ option_scope }}"
+ register: get_result
+
+- name: assert unset changed and deleted value
+ assert:
+ that:
+ - unset_result.changed == true
+ - unset_result.diff.before == option_value + "\n"
+ - unset_result.diff.after == "\n"
+ - get_result.config_value == ''
+...
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/tasks/setup.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/tasks/setup.yml
new file mode 100644
index 00000000..85e168fe
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/tasks/setup.yml
@@ -0,0 +1,11 @@
+---
+- name: verify that git is installed so this test can continue
+ command: which git
+ register: git_installed
+ ignore_errors: yes
+
+- name: get git version, only newer than {{git_version_supporting_includes}} has includes option
+ shell: "git --version | grep 'git version' | sed 's/git version //'"
+ register: git_version
+ ignore_errors: yes
+... \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/tasks/setup_no_value.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/tasks/setup_no_value.yml
new file mode 100644
index 00000000..01a2c973
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/tasks/setup_no_value.yml
@@ -0,0 +1,8 @@
+---
+# ------
+# set up : deleting gitconfig file
+- name: set up without value
+ file:
+ path: ~/.gitconfig
+ state: absent
+... \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/tasks/setup_value.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/tasks/setup_value.yml
new file mode 100644
index 00000000..f5e05654
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/tasks/setup_value.yml
@@ -0,0 +1,8 @@
+---
+# ------
+# set up : set gitconfig with value
+- name: set up with value
+ copy:
+ src: gitconfig
+ dest: ~/.gitconfig
+... \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/tasks/unset_check_mode.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/tasks/unset_check_mode.yml
new file mode 100644
index 00000000..c8fe00c0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/tasks/unset_check_mode.yml
@@ -0,0 +1,25 @@
+---
+- import_tasks: setup_value.yml
+
+- name: unsetting value with check mode
+ git_config:
+ name: "{{ option_name }}"
+ scope: "{{ option_scope }}"
+ state: absent
+ check_mode: yes
+ register: unset_result
+
+- name: getting value
+ git_config:
+ name: "{{ option_name }}"
+ scope: "{{ option_scope }}"
+ register: get_result
+
+- name: assert unset changed but dit not delete value
+ assert:
+ that:
+ - unset_result.changed == true
+ - unset_result.diff.before == option_value + "\n"
+ - unset_result.diff.after == "\n"
+ - get_result.config_value == option_value
+...
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/tasks/unset_no_value.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/tasks/unset_no_value.yml
new file mode 100644
index 00000000..71568e3a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/tasks/unset_no_value.yml
@@ -0,0 +1,23 @@
+---
+- import_tasks: setup_no_value.yml
+
+- name: unsetting value
+ git_config:
+ name: "{{ option_name }}"
+ scope: "{{ option_scope }}"
+ state: absent
+ register: unset_result
+
+- name: getting value
+ git_config:
+ name: "{{ option_name }}"
+ scope: "{{ option_scope }}"
+ register: get_result
+
+- name: assert unsetting didn't change
+ assert:
+ that:
+ - unset_result.changed == false
+ - unset_result.msg == 'no setting to unset'
+ - get_result.config_value == ''
+...
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/tasks/unset_value.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/tasks/unset_value.yml
new file mode 100644
index 00000000..a2308156
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/tasks/unset_value.yml
@@ -0,0 +1,24 @@
+---
+- import_tasks: setup_value.yml
+
+- name: unsetting value
+ git_config:
+ name: "{{ option_name }}"
+ scope: "{{ option_scope }}"
+ state: absent
+ register: unset_result
+
+- name: getting value
+ git_config:
+ name: "{{ option_name }}"
+ scope: "{{ option_scope }}"
+ register: get_result
+
+- name: assert unset changed and deleted value
+ assert:
+ that:
+ - unset_result.changed == true
+ - unset_result.diff.before == option_value + "\n"
+ - unset_result.diff.after == "\n"
+ - get_result.config_value == ''
+...
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/vars/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/vars/main.yml
new file mode 100644
index 00000000..545110a5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/git_config/vars/main.yml
@@ -0,0 +1,6 @@
+---
+git_version_supporting_includes: 1.7.10
+option_name: http.proxy
+option_value: 'foo'
+option_scope: global
+... \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/github_issue/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/github_issue/aliases
new file mode 100644
index 00000000..a4c92ef8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/github_issue/aliases
@@ -0,0 +1,2 @@
+destructive
+shippable/posix/group1
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/github_issue/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/github_issue/tasks/main.yml
new file mode 100644
index 00000000..24266128
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/github_issue/tasks/main.yml
@@ -0,0 +1,37 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Test code for the github_issue module.
+#
+# Copyright: (c) 2017-2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- name: Check if GitHub issue is closed or not
+ github_issue:
+ organization: "{{ organization }}"
+ repo: "{{ repo }}"
+ issue: "{{ issue }}"
+ action: get_status
+ register: get_status_0002
+
+- assert:
+ that:
+ - "{{ get_status_0002.changed == True }}"
+ - "{{ get_status_0002.issue_status == 'closed' }}"
+
+- name: Check if GitHub issue is closed or not
+ github_issue:
+ organization: "{{ organization }}"
+ repo: "{{ repo }}"
+ issue: "{{ non_existent_issue }}"
+ action: get_status
+ register: get_status_0003
+ ignore_errors: True
+
+- assert:
+ that:
+ - "{{ get_status_0003.changed == False }}"
+ - "{{ get_status_0003.failed == True }}"
+ - "{{ 'Failed' in get_status_0003.msg }}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/github_issue/vars/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/github_issue/vars/main.yml
new file mode 100644
index 00000000..52546d3f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/github_issue/vars/main.yml
@@ -0,0 +1,6 @@
+---
+
+issue: 23642
+non_existent_issue: 1111111
+organization: ansible
+repo: ansible
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_deploy_key/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_deploy_key/aliases
new file mode 100644
index 00000000..ef7ed2e3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_deploy_key/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group1
+gitlab/ci
+disabled
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_deploy_key/defaults/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_deploy_key/defaults/main.yml
new file mode 100644
index 00000000..4e475919
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_deploy_key/defaults/main.yml
@@ -0,0 +1,2 @@
+gitlab_project_name: ansible_test_project
+gitlab_deploy_key: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJnTYY7CYk1F/wBklpdRxudxN6KeXgfhutkiCigSfPhe ansible_test"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_deploy_key/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_deploy_key/tasks/main.yml
new file mode 100644
index 00000000..ba82e378
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_deploy_key/tasks/main.yml
@@ -0,0 +1,41 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Install required libs
+ pip:
+ name: python-gitlab
+ state: present
+
+- name: Create {{ gitlab_project_name }}
+ gitlab_project:
+ server_url: "{{ gitlab_host }}"
+ validate_certs: False
+ login_token: "{{ gitlab_login_token }}"
+ name: "{{ gitlab_project_name }}"
+ state: present
+
+- name: Cleanup deploy key to {{ gitlab_project_name }}
+ gitlab_deploy_key:
+ login_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ server_url: "{{ gitlab_host }}"
+ title: "{{ gitlab_project_name }}"
+ key: "{{ gitlab_deploy_key }}"
+ state: absent
+
+
+- name: Add deploy key to {{ gitlab_project_name }}
+ gitlab_deploy_key:
+ login_token: "{{ gitlab_login_token }}"
+ project: "root/{{ gitlab_project_name }}"
+ server_url: "{{ gitlab_host }}"
+ title: "{{ gitlab_project_name }}"
+ key: "{{ gitlab_deploy_key }}"
+ state: present
+ register: deploy_key_status
+
+- assert:
+ that:
+ - deploy_key_status is changed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_group/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_group/aliases
new file mode 100644
index 00000000..ef7ed2e3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_group/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group1
+gitlab/ci
+disabled
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_group/defaults/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_group/defaults/main.yml
new file mode 100644
index 00000000..630926d9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_group/defaults/main.yml
@@ -0,0 +1 @@
+gitlab_group: ansible_test_project
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_group/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_group/tasks/main.yml
new file mode 100644
index 00000000..34444134
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_group/tasks/main.yml
@@ -0,0 +1,74 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Install required libs
+ pip:
+ name: python-gitlab
+ state: present
+
+- name: Cleanup GitLab Group
+ gitlab_group:
+ api_url: "{{ gitlab_host }}"
+ validate_certs: false
+ api_token: "{{ gitlab_login_token }}"
+ name: ansible_test_group
+ path: ansible_test_group
+ state: absent
+
+- name: Create GitLab Group
+ gitlab_group:
+ api_url: "{{ gitlab_host }}"
+ validate_certs: false
+ api_token: "{{ gitlab_login_token }}"
+ name: ansible_test_group
+ path: ansible_test_group
+ state: present
+ register: gitlab_group_state
+
+- name: Test group created
+ assert:
+ that:
+ - gitlab_group_state is changed
+
+
+- name: Create GitLab Group ( Idempotency test )
+ gitlab_group:
+ api_url: "{{ gitlab_host }}"
+ validate_certs: false
+ api_token: "{{ gitlab_login_token }}"
+ name: ansible_test_group
+ path: ansible_test_group
+ state: present
+ register: gitlab_group_state_again
+
+- name: Test module is idempotent
+ assert:
+ that:
+ - gitlab_group_state_again is not changed
+
+- name: Cleanup GitLab Group for Description Test
+ gitlab_group:
+ api_url: "{{ gitlab_host }}"
+ validate_certs: false
+ api_token: "{{ gitlab_login_token }}"
+ name: ansible_test_group
+ path: ansible_test_group
+ state: absent
+
+- name: Create GitLab Group for Description Test
+ gitlab_group:
+ api_url: "{{ gitlab_host }}"
+ validate_certs: false
+ api_token: "{{ gitlab_login_token }}"
+ name: ansible_test_group
+ path: ansible_test_group
+ description: My Test Group
+ state: present
+ register: gitlab_group_state_desc
+
+- name: Test group created with Description
+ assert:
+ that:
+ - gitlab_group_state_desc.group.description == "My Test Group"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_group_members/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_group_members/aliases
new file mode 100644
index 00000000..89aea537
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_group_members/aliases
@@ -0,0 +1 @@
+unsupported \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_group_members/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_group_members/tasks/main.yml
new file mode 100644
index 00000000..4d4f1168
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_group_members/tasks/main.yml
@@ -0,0 +1,30 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Test code for gitlab_group_members module
+#
+# Copyright: (c) 2020, Zainab Alsaffar <Zainab.Alsaffar@mail.rit.edu>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+- name: Install required library
+ pip:
+ name: python-gitlab
+ state: present
+
+- name: Add a User to A GitLab Group
+ gitlab_group_members:
+ api_url: '{{ gitlab_server_url }}'
+ api_token: '{{ gitlab_api_access_token }}'
+ gitlab_group: '{{ gitlab_group_name }}'
+ gitlab_user: '{{ username }}'
+ access_level: '{{ gitlab_access_level }}'
+ state: present
+
+- name: Remove a User from A GitLab Group
+ gitlab_group_members:
+ api_url: '{{ gitlab_server_url }}'
+ api_token: '{{ gitlab_api_access_token }}'
+ gitlab_group: '{{ gitlab_group_name }}'
+ gitlab_user: '{{ username }}'
+ state: absent \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_group_members/vars/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_group_members/vars/main.yml
new file mode 100644
index 00000000..7f68893c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_group_members/vars/main.yml
@@ -0,0 +1,5 @@
+gitlab_server_url: https://gitlabserver.example.com
+gitlab_api_access_token: 126hngbscx890cv09b
+gitlab_group_name: groupname1
+username: username1
+gitlab_access_level: developer \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_group_variable/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_group_variable/aliases
new file mode 100644
index 00000000..ad7ccf7a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_group_variable/aliases
@@ -0,0 +1 @@
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_group_variable/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_group_variable/tasks/main.yml
new file mode 100644
index 00000000..d3b6eb4c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_group_variable/tasks/main.yml
@@ -0,0 +1,585 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Install required libs
+ pip:
+ name: python-gitlab
+ state: present
+
+- name: purge all variables for check_mode test
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ purge: True
+
+- name: add a variable value in check_mode
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ ACCESS_KEY_ID: checkmode
+ check_mode: yes
+ register: gitlab_group_variable_state
+
+- name: check_mode state must be changed
+ assert:
+ that:
+ - gitlab_group_variable_state is changed
+
+- name: apply add value from check_mode test
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ ACCESS_KEY_ID: checkmode
+ register: gitlab_group_variable_state
+
+- name: state must be changed
+ assert:
+ that:
+ - gitlab_group_variable_state is changed
+
+- name: test new format
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ ACCESS_KEY_ID:
+ value: checkmode
+ register: gitlab_group_variable_state
+
+- name: state must be not changed
+ assert:
+ that:
+ - gitlab_group_variable_state is not changed
+
+- name: change protected attribute
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ ACCESS_KEY_ID:
+ value: checkmode
+ protected: True
+ register: gitlab_group_variable_state
+
+- name: state must be changed
+ assert:
+ that:
+ - gitlab_group_variable_state is changed
+
+- name: revert protected attribute
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ ACCESS_KEY_ID:
+ value: checkmode
+ protected: False
+ register: gitlab_group_variable_state
+
+- name: state must be changed
+ assert:
+ that:
+ - gitlab_group_variable_state is changed
+
+- name: change masked attribute
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ ACCESS_KEY_ID:
+ value: checkmode
+ masked: True
+ register: gitlab_group_variable_state
+
+- name: state must be changed
+ assert:
+ that:
+ - gitlab_group_variable_state is changed
+
+- name: revert masked attribute by not mention it
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ ACCESS_KEY_ID:
+ value: checkmode
+ register: gitlab_group_variable_state
+
+- name: state must be changed
+ assert:
+ that:
+ - gitlab_group_variable_state is changed
+
+- name: revert again masked attribute by not mention it (idempotent)
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ ACCESS_KEY_ID:
+ value: checkmode
+ register: gitlab_group_variable_state
+
+- name: state must be not changed
+ assert:
+ that:
+ - gitlab_group_variable_state is not changed
+
+- name: set both (masked and protected) attribute
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ ACCESS_KEY_ID:
+ value: checkmode
+ masked: True
+ protected: True
+ variable_type: env_var
+ register: gitlab_group_variable_state
+
+- name: state must be changed
+ assert:
+ that:
+ - gitlab_group_variable_state is changed
+
+- name: set again both (masked and protected) attribute (idempotent)
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ ACCESS_KEY_ID:
+ value: checkmode
+ masked: True
+ protected: True
+ variable_type: env_var
+ register: gitlab_group_variable_state
+
+- name: state must not be changed
+ assert:
+ that:
+ - gitlab_group_variable_state is not changed
+
+- name: revert both (masked and protected) attribute
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ ACCESS_KEY_ID:
+ value: checkmode
+ protected: False
+ register: gitlab_group_variable_state
+
+- name: state must be changed
+ assert:
+ that:
+ - gitlab_group_variable_state is changed
+
+- name: change a variable value in check_mode again
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ ACCESS_KEY_ID: checkmode
+ check_mode: yes
+ register: gitlab_group_variable_state
+
+- name: check_mode state must not be changed
+ assert:
+ that:
+ - gitlab_group_variable_state is not changed
+
+- name: apply again the value change from check_mode test
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ ACCESS_KEY_ID: checkmode
+ register: gitlab_group_variable_state
+
+- name: state must not be changed
+ assert:
+ that:
+ - gitlab_group_variable_state is not changed
+
+- name: purge all variables at the beginning
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ purge: True
+
+- name: set two test variables
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ ACCESS_KEY_ID: abc123
+ SECRET_ACCESS_KEY: 321cba
+ register: gitlab_group_variable_state
+
+- name: set two test variables state must be changed
+ assert:
+ that:
+ - gitlab_group_variable_state is changed
+ - gitlab_group_variable_state.group_variable.added|length == 2
+ - gitlab_group_variable_state.group_variable.untouched|length == 0
+ - gitlab_group_variable_state.group_variable.removed|length == 0
+ - gitlab_group_variable_state.group_variable.updated|length == 0
+
+- name: re-set two test variables
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ ACCESS_KEY_ID: abc123
+ SECRET_ACCESS_KEY: 321cba
+ register: gitlab_group_variable_state
+
+- name: re-set two test variables state must not be changed
+ assert:
+ that:
+ - gitlab_group_variable_state is not changed
+ - gitlab_group_variable_state.group_variable.added|length == 0
+ - gitlab_group_variable_state.group_variable.untouched|length == 2
+ - gitlab_group_variable_state.group_variable.removed|length == 0
+ - gitlab_group_variable_state.group_variable.updated|length == 0
+
+- name: edit one variable
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ ACCESS_KEY_ID: changed
+ purge: False
+ register: gitlab_group_variable_state
+
+- name: edit one variable state must be changed
+ assert:
+ that:
+ - gitlab_group_variable_state.changed
+ - gitlab_group_variable_state.group_variable.added|length == 0
+ - gitlab_group_variable_state.group_variable.untouched|length == 1
+ - gitlab_group_variable_state.group_variable.removed|length == 0
+ - gitlab_group_variable_state.group_variable.updated|length == 1
+ - gitlab_group_variable_state.group_variable.updated[0] == "ACCESS_KEY_ID"
+
+- name: append one variable
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ some: value
+ purge: False
+ register: gitlab_group_variable_state
+
+- name: append one variable state must be changed
+ assert:
+ that:
+ - gitlab_group_variable_state.changed
+ - gitlab_group_variable_state.group_variable.added|length == 1
+ - gitlab_group_variable_state.group_variable.untouched|length == 2
+ - gitlab_group_variable_state.group_variable.removed|length == 0
+ - gitlab_group_variable_state.group_variable.updated|length == 0
+ - gitlab_group_variable_state.group_variable.added[0] == "some"
+
+- name: re-set all variables
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ ACCESS_KEY_ID: changed
+ SECRET_ACCESS_KEY: 321cba
+ some: value
+ register: gitlab_group_variable_state
+
+- name: re-set all variables state must not be changed
+ assert:
+ that:
+ - not gitlab_group_variable_state.changed
+ - gitlab_group_variable_state.group_variable.added|length == 0
+ - gitlab_group_variable_state.group_variable.untouched|length == 3
+ - gitlab_group_variable_state.group_variable.removed|length == 0
+ - gitlab_group_variable_state.group_variable.updated|length == 0
+
+- name: set one variables and purge all others
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ some: value
+ purge: True
+ register: gitlab_group_variable_state
+
+- name: set one variables and purge all others state must be changed
+ assert:
+ that:
+ - gitlab_group_variable_state.changed
+ - gitlab_group_variable_state.group_variable.added|length == 0
+ - gitlab_group_variable_state.group_variable.untouched|length == 1
+ - gitlab_group_variable_state.group_variable.removed|length == 2
+ - gitlab_group_variable_state.group_variable.updated|length == 0
+
+- name: only one variable is left
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ some: value
+ purge: False
+ register: gitlab_group_variable_state
+
+- name: only one variable is left state must not be changed
+ assert:
+ that:
+ - not gitlab_group_variable_state.changed
+ - gitlab_group_variable_state.group_variable.added|length == 0
+ - gitlab_group_variable_state.group_variable.untouched|length == 1
+ - gitlab_group_variable_state.group_variable.removed|length == 0
+ - gitlab_group_variable_state.group_variable.updated|length == 0
+ - gitlab_group_variable_state.group_variable.untouched[0] == "some"
+
+- name: test integer values
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ some: 42
+ purge: False
+ register: gitlab_group_variable_state
+
+- name: only one variable is left state must be changed
+ assert:
+ that:
+ - gitlab_group_variable_state.changed
+ - gitlab_group_variable_state.group_variable.added|length == 0
+ - gitlab_group_variable_state.group_variable.untouched|length == 0
+ - gitlab_group_variable_state.group_variable.removed|length == 0
+ - gitlab_group_variable_state.group_variable.updated|length == 1
+
+- name: test float values
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ some: 42.23
+ purge: False
+ register: gitlab_group_variable_state
+
+- name: only one variable is left state must be changed
+ assert:
+ that:
+ - gitlab_group_variable_state.changed
+ - gitlab_group_variable_state.group_variable.added|length == 0
+ - gitlab_group_variable_state.group_variable.untouched|length == 0
+ - gitlab_group_variable_state.group_variable.removed|length == 0
+ - gitlab_group_variable_state.group_variable.updated|length == 1
+
+- name: delete the last left variable
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ state: absent
+ vars:
+ some: value
+ register: gitlab_group_variable_state
+
+- name: no variable is left state must be changed
+ assert:
+ that:
+ - gitlab_group_variable_state.changed
+ - gitlab_group_variable_state.group_variable.added|length == 0
+ - gitlab_group_variable_state.group_variable.untouched|length == 0
+ - gitlab_group_variable_state.group_variable.removed|length == 1
+ - gitlab_group_variable_state.group_variable.updated|length == 0
+ - gitlab_group_variable_state.group_variable.removed[0] == "some"
+
+- name: add one variable with variable_type file
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ my_test_var:
+ value: my_test_value
+ variable_type: file
+ purge: False
+ register: gitlab_group_variable_state
+
+- name: append one variable state must be changed
+ assert:
+ that:
+ - gitlab_group_variable_state.changed
+ - gitlab_group_variable_state.group_variable.added|length == 1
+ - gitlab_group_variable_state.group_variable.untouched|length == 0
+ - gitlab_group_variable_state.group_variable.removed|length == 0
+ - gitlab_group_variable_state.group_variable.updated|length == 0
+ - gitlab_group_variable_state.group_variable.added[0] == "my_test_var"
+
+- name: change variable_type attribute
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ my_test_var:
+ value: my_test_value
+ variable_type: env_var
+ register: gitlab_group_variable_state
+
+- name: state must be changed
+ assert:
+ that:
+ - gitlab_group_variable_state is changed
+
+- name: revert variable_type attribute
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ my_test_var:
+ value: my_test_value
+ variable_type: file
+ register: gitlab_group_variable_state
+
+- name: state must be changed
+ assert:
+ that:
+ - gitlab_group_variable_state is changed
+
+- name: delete the variable_type file variable
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ state: absent
+ vars:
+ my_test_var: my_test_value
+ register: gitlab_group_variable_state
+
+- name: no variable is left state must be changed
+ assert:
+ that:
+ - gitlab_group_variable_state.changed
+ - gitlab_group_variable_state.group_variable.added|length == 0
+ - gitlab_group_variable_state.group_variable.untouched|length == 0
+ - gitlab_group_variable_state.group_variable.removed|length == 1
+ - gitlab_group_variable_state.group_variable.updated|length == 0
+ - gitlab_group_variable_state.group_variable.removed[0] == "my_test_var"
+
+- name: set complete page and purge existing ones
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ page1_var01: value
+ page1_var02: value
+ page1_var03: value
+ page1_var04: value
+ page1_var05: value
+ page1_var06: value
+ page1_var07: value
+ page1_var08: value
+ page1_var09: value
+ page1_var10: value
+ page1_var11: value
+ page1_var12: value
+ page1_var13: value
+ page1_var14: value
+ page1_var15: value
+ page1_var16: value
+ page1_var17: value
+ page1_var18: value
+ page1_var19: value
+ page1_var20: value
+ purge: True
+ register: gitlab_group_variable_state
+
+- name: complete page added state must be changed
+ assert:
+ that:
+ - gitlab_group_variable_state is changed
+ - gitlab_group_variable_state.group_variable.added|length == 20
+ - gitlab_group_variable_state.group_variable.untouched|length == 0
+
+- name: set complete page and keep existing ones
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ page2_var01: value
+ page2_var02: value
+ page2_var03: value
+ page2_var04: value
+ page2_var05: value
+ page2_var06: value
+ page2_var07: value
+ page2_var08: value
+ page2_var09: value
+ page2_var10: value
+ page2_var11: value
+ page2_var12: value
+ page2_var13: value
+ page2_var14: value
+ page2_var15: value
+ page2_var16: value
+ page2_var17: value
+ page2_var18: value
+ page2_var19: value
+ page2_var20: value
+ purge: False
+ register: gitlab_group_variable_state
+
+- name: existing page untouched state must be changed
+ assert:
+ that:
+ - gitlab_group_variable_state is changed
+ - gitlab_group_variable_state.group_variable.added|length == 20
+ - gitlab_group_variable_state.group_variable.untouched|length == 20
+
+- name: check that no variables are left
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ purge: True
+ register: gitlab_group_variable_state
+
+- name: check that no variables are untouched state must be changed
+ assert:
+ that:
+ - gitlab_group_variable_state.changed
+ - gitlab_group_variable_state.group_variable.added|length == 0
+ - gitlab_group_variable_state.group_variable.untouched|length == 0
+ - gitlab_group_variable_state.group_variable.removed|length == 40
+ - gitlab_group_variable_state.group_variable.updated|length == 0
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_hook/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_hook/aliases
new file mode 100644
index 00000000..ef7ed2e3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_hook/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group1
+gitlab/ci
+disabled
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_hook/defaults/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_hook/defaults/main.yml
new file mode 100644
index 00000000..eda34a26
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_hook/defaults/main.yml
@@ -0,0 +1,2 @@
+gitlab_project_name: ansible_test_project
+gitlab_hook_url: http://gitlab.example.com/hook
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_hook/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_hook/tasks/main.yml
new file mode 100644
index 00000000..0ef6cb5c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_hook/tasks/main.yml
@@ -0,0 +1,72 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Install required libs
+ pip:
+ name: python-gitlab
+ state: present
+
+- name: Create {{ gitlab_project_name }}
+ gitlab_project:
+ server_url: "{{ gitlab_host }}"
+ validate_certs: False
+ login_token: "{{ gitlab_login_token }}"
+ name: "{{ gitlab_project_name }}"
+ state: present
+
+- name: Cleanup GitLab hook
+ gitlab_hook:
+ server_url: "{{ gitlab_host }}"
+ validate_certs: false
+ login_token: "{{ gitlab_login_token }}"
+ hook_url: "{{ gitlab_hook_url }}"
+ project: "{{ gitlab_project_name }}"
+ state: absent
+
+- name: Create GitLab Hook
+ gitlab_hook:
+ server_url: "{{ gitlab_host }}"
+ validate_certs: false
+ login_token: "{{ gitlab_login_token }}"
+ hook_url: "{{ gitlab_hook_url }}"
+ project: "{{ gitlab_project_name }}"
+ state: present
+ register: gitlab_hook_state
+
+- name: Test group created
+ assert:
+ that:
+ - gitlab_hook_state is changed
+
+
+- name: Create GitLab Hook ( Idempotency test )
+ gitlab_hook:
+ server_url: "{{ gitlab_host }}"
+ validate_certs: false
+ login_token: "{{ gitlab_login_token }}"
+ hook_url: "{{ gitlab_hook_url }}"
+ project: "{{ gitlab_project_name }}"
+ state: present
+ register: gitlab_hook_state_again
+
+- name: Test module is idempotent
+ assert:
+ that:
+ - gitlab_hook_state_again is not changed
+
+- name: Remove GitLab hook
+ gitlab_hook:
+ server_url: "{{ gitlab_host }}"
+ validate_certs: false
+ login_token: "{{ gitlab_login_token }}"
+ hook_url: "{{ gitlab_hook_url }}"
+ project: "{{ gitlab_project_name }}"
+ state: absent
+ register: gitlab_hook_state_absent
+
+- name: Assert hook has been removed
+ assert:
+ that:
+ - gitlab_hook_state_absent is changed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_project/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_project/aliases
new file mode 100644
index 00000000..ef7ed2e3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_project/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group1
+gitlab/ci
+disabled
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_project/defaults/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_project/defaults/main.yml
new file mode 100644
index 00000000..4e475919
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_project/defaults/main.yml
@@ -0,0 +1,2 @@
+gitlab_project_name: ansible_test_project
+gitlab_deploy_key: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJnTYY7CYk1F/wBklpdRxudxN6KeXgfhutkiCigSfPhe ansible_test"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_project/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_project/tasks/main.yml
new file mode 100644
index 00000000..0a36d388
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_project/tasks/main.yml
@@ -0,0 +1,45 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Install required libs
+ pip:
+ name: python-gitlab
+ state: present
+
+- name: Clean up {{ gitlab_project_name }}
+ gitlab_project:
+ server_url: "{{ gitlab_host }}"
+ validate_certs: False
+ login_token: "{{ gitlab_login_token }}"
+ name: "{{ gitlab_project_name }}"
+ state: absent
+
+- name: Create {{ gitlab_project_name }}
+ gitlab_project:
+ server_url: "{{ gitlab_host }}"
+ validate_certs: False
+ login_token: "{{ gitlab_login_token }}"
+ name: "{{ gitlab_project_name }}"
+ state: present
+ register: gitlab_project_state
+
+
+- assert:
+ that:
+ - gitlab_project_state is changed
+
+- name: Create {{ gitlab_project_name }} (Test idempotency)
+ gitlab_project:
+ server_url: "{{ gitlab_host }}"
+ validate_certs: False
+ login_token: "{{ gitlab_login_token }}"
+ name: "{{ gitlab_project_name }}"
+ state: present
+ register: gitlab_project_state_again
+
+
+- assert:
+ that:
+ - gitlab_project_state_again is not changed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_project_variable/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_project_variable/aliases
new file mode 100644
index 00000000..ad7ccf7a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_project_variable/aliases
@@ -0,0 +1 @@
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_project_variable/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_project_variable/tasks/main.yml
new file mode 100644
index 00000000..2b29619f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_project_variable/tasks/main.yml
@@ -0,0 +1,584 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Install required libs
+ pip:
+ name: python-gitlab
+ state: present
+
+- name: purge all variables for check_mode test
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ purge: True
+
+- name: add a variable value in check_mode
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ ACCESS_KEY_ID: checkmode
+ check_mode: yes
+ register: gitlab_project_variable_state
+
+- name: check_mode state must be changed
+ assert:
+ that:
+ - gitlab_project_variable_state is changed
+
+- name: apply add value from check_mode test
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ ACCESS_KEY_ID: checkmode
+ register: gitlab_project_variable_state
+
+- name: state must be changed
+ assert:
+ that:
+ - gitlab_project_variable_state is changed
+
+- name: test new format
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ ACCESS_KEY_ID:
+ value: checkmode
+ register: gitlab_project_variable_state
+
+- name: state must be not changed
+ assert:
+ that:
+ - gitlab_project_variable_state is not changed
+
+- name: change protected attribute
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ ACCESS_KEY_ID:
+ value: checkmode
+ protected: True
+ register: gitlab_project_variable_state
+
+- name: state must be changed
+ assert:
+ that:
+ - gitlab_project_variable_state is changed
+
+- name: revert protected attribute
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ ACCESS_KEY_ID:
+ value: checkmode
+ protected: False
+ register: gitlab_project_variable_state
+
+- name: state must be changed
+ assert:
+ that:
+ - gitlab_project_variable_state is changed
+
+- name: change masked attribute
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ ACCESS_KEY_ID:
+ value: checkmode
+ masked: True
+ register: gitlab_project_variable_state
+
+- name: state must be changed
+ assert:
+ that:
+ - gitlab_project_variable_state is changed
+
+- name: revert masked attribute by not mention it
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ ACCESS_KEY_ID:
+ value: checkmode
+ register: gitlab_project_variable_state
+
+- name: state must be changed
+ assert:
+ that:
+ - gitlab_project_variable_state is changed
+
+- name: revert again masked attribute by not mention it (idempotent)
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ ACCESS_KEY_ID:
+ value: checkmode
+ register: gitlab_project_variable_state
+
+- name: state must be not changed
+ assert:
+ that:
+ - gitlab_project_variable_state is not changed
+
+- name: set both (masked and protected) attribute
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ ACCESS_KEY_ID:
+ value: checkmode
+ masked: True
+ protected: True
+ variable_type: env_var
+ register: gitlab_project_variable_state
+
+- name: state must be changed
+ assert:
+ that:
+ - gitlab_project_variable_state is changed
+
+- name: set again both (masked and protected) attribute (idempotent)
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ ACCESS_KEY_ID:
+ value: checkmode
+ masked: True
+ protected: True
+ variable_type: env_var
+ register: gitlab_project_variable_state
+
+- name: state must not be changed
+ assert:
+ that:
+ - gitlab_project_variable_state is not changed
+
+- name: revert both (masked and protected) attribute
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ ACCESS_KEY_ID:
+ value: checkmode
+ protected: False
+ register: gitlab_project_variable_state
+
+- name: state must be changed
+ assert:
+ that:
+ - gitlab_project_variable_state is changed
+
+- name: change a variable value in check_mode again
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ ACCESS_KEY_ID: checkmode
+ check_mode: yes
+ register: gitlab_project_variable_state
+
+- name: check_mode state must not be changed
+ assert:
+ that:
+ - gitlab_project_variable_state is not changed
+
+- name: apply again the value change from check_mode test
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ ACCESS_KEY_ID: checkmode
+ register: gitlab_project_variable_state
+
+- name: state must not be changed
+ assert:
+ that:
+ - gitlab_project_variable_state is not changed
+
+- name: purge all variables at the beginning
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ purge: True
+
+- name: set two test variables
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ ACCESS_KEY_ID: abc123
+ SECRET_ACCESS_KEY: 321cba
+ register: gitlab_project_variable_state
+
+- name: set two test variables state must be changed
+ assert:
+ that:
+ - gitlab_project_variable_state is changed
+ - gitlab_project_variable_state.project_variable.added|length == 2
+ - gitlab_project_variable_state.project_variable.untouched|length == 0
+ - gitlab_project_variable_state.project_variable.removed|length == 0
+ - gitlab_project_variable_state.project_variable.updated|length == 0
+
+- name: re-set two test variables
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ ACCESS_KEY_ID: abc123
+ SECRET_ACCESS_KEY: 321cba
+ register: gitlab_project_variable_state
+
+- name: re-set two test variables state must not be changed
+ assert:
+ that:
+ - gitlab_project_variable_state is not changed
+ - gitlab_project_variable_state.project_variable.added|length == 0
+ - gitlab_project_variable_state.project_variable.untouched|length == 2
+ - gitlab_project_variable_state.project_variable.removed|length == 0
+ - gitlab_project_variable_state.project_variable.updated|length == 0
+
+- name: edit one variable
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ ACCESS_KEY_ID: changed
+ purge: False
+ register: gitlab_project_variable_state
+
+- name: edit one variable state must be changed
+ assert:
+ that:
+ - gitlab_project_variable_state.changed
+ - gitlab_project_variable_state.project_variable.added|length == 0
+ - gitlab_project_variable_state.project_variable.untouched|length == 1
+ - gitlab_project_variable_state.project_variable.removed|length == 0
+ - gitlab_project_variable_state.project_variable.updated|length == 1
+ - gitlab_project_variable_state.project_variable.updated[0] == "ACCESS_KEY_ID"
+
+- name: append one variable
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ some: value
+ purge: False
+ register: gitlab_project_variable_state
+
+- name: append one variable state must be changed
+ assert:
+ that:
+ - gitlab_project_variable_state.changed
+ - gitlab_project_variable_state.project_variable.added|length == 1
+ - gitlab_project_variable_state.project_variable.untouched|length == 2
+ - gitlab_project_variable_state.project_variable.removed|length == 0
+ - gitlab_project_variable_state.project_variable.updated|length == 0
+ - gitlab_project_variable_state.project_variable.added[0] == "some"
+
+- name: re-set all variables
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ ACCESS_KEY_ID: changed
+ SECRET_ACCESS_KEY: 321cba
+ some: value
+ register: gitlab_project_variable_state
+
+- name: re-set all variables state must not be changed
+ assert:
+ that:
+ - not gitlab_project_variable_state.changed
+ - gitlab_project_variable_state.project_variable.added|length == 0
+ - gitlab_project_variable_state.project_variable.untouched|length == 3
+ - gitlab_project_variable_state.project_variable.removed|length == 0
+ - gitlab_project_variable_state.project_variable.updated|length == 0
+
+- name: set one variables and purge all others
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ some: value
+ purge: True
+ register: gitlab_project_variable_state
+
+- name: set one variables and purge all others state must be changed
+ assert:
+ that:
+ - gitlab_project_variable_state.changed
+ - gitlab_project_variable_state.project_variable.added|length == 0
+ - gitlab_project_variable_state.project_variable.untouched|length == 1
+ - gitlab_project_variable_state.project_variable.removed|length == 2
+ - gitlab_project_variable_state.project_variable.updated|length == 0
+
+- name: only one variable is left
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ some: value
+ purge: False
+ register: gitlab_project_variable_state
+
+- name: only one variable is left state must not be changed
+ assert:
+ that:
+ - not gitlab_project_variable_state.changed
+ - gitlab_project_variable_state.project_variable.added|length == 0
+ - gitlab_project_variable_state.project_variable.untouched|length == 1
+ - gitlab_project_variable_state.project_variable.removed|length == 0
+ - gitlab_project_variable_state.project_variable.updated|length == 0
+ - gitlab_project_variable_state.project_variable.untouched[0] == "some"
+
+- name: test integer values
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ some: 42
+ purge: False
+ register: gitlab_project_variable_state
+
+- name: only one variable is left state must be changed
+ assert:
+ that:
+ - gitlab_project_variable_state.changed
+ - gitlab_project_variable_state.project_variable.added|length == 0
+ - gitlab_project_variable_state.project_variable.untouched|length == 0
+ - gitlab_project_variable_state.project_variable.removed|length == 0
+ - gitlab_project_variable_state.project_variable.updated|length == 1
+
+- name: test float values
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ some: 42.23
+ purge: False
+ register: gitlab_project_variable_state
+
+- name: only one variable is left state must be changed
+ assert:
+ that:
+ - gitlab_project_variable_state.changed
+ - gitlab_project_variable_state.project_variable.added|length == 0
+ - gitlab_project_variable_state.project_variable.untouched|length == 0
+ - gitlab_project_variable_state.project_variable.removed|length == 0
+ - gitlab_project_variable_state.project_variable.updated|length == 1
+
+- name: delete the last left variable
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ state: absent
+ vars:
+ some: value
+ register: gitlab_project_variable_state
+
+- name: no variable is left state must be changed
+ assert:
+ that:
+ - gitlab_project_variable_state.changed
+ - gitlab_project_variable_state.project_variable.added|length == 0
+ - gitlab_project_variable_state.project_variable.untouched|length == 0
+ - gitlab_project_variable_state.project_variable.removed|length == 1
+ - gitlab_project_variable_state.project_variable.updated|length == 0
+ - gitlab_project_variable_state.project_variable.removed[0] == "some"
+
+- name: add one variable with variable_type file
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ my_test_var:
+ value: my_test_value
+ variable_type: file
+ purge: False
+ register: gitlab_project_variable_state
+
+- name: append one variable state must be changed
+ assert:
+ that:
+ - gitlab_project_variable_state.changed
+ - gitlab_project_variable_state.project_variable.added|length == 1
+ - gitlab_project_variable_state.project_variable.untouched|length == 0
+ - gitlab_project_variable_state.project_variable.removed|length == 0
+ - gitlab_project_variable_state.project_variable.updated|length == 0
+ - gitlab_project_variable_state.project_variable.added[0] == "my_test_var"
+
+- name: change variable_type attribute
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ my_test_var:
+ value: my_test_value
+ variable_type: env_var
+ register: gitlab_project_variable_state
+
+- name: state must be changed
+ assert:
+ that:
+ - gitlab_project_variable_state is changed
+
+- name: revert variable_type attribute
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ my_test_var:
+ value: my_test_value
+ variable_type: file
+ register: gitlab_project_variable_state
+
+- name: state must be changed
+ assert:
+ that:
+ - gitlab_project_variable_state is changed
+
+- name: delete the variable_type file variable
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ state: absent
+ vars:
+ my_test_var: my_test_value
+ register: gitlab_project_variable_state
+
+- name: no variable is left state must be changed
+ assert:
+ that:
+ - gitlab_project_variable_state.changed
+ - gitlab_project_variable_state.project_variable.added|length == 0
+ - gitlab_project_variable_state.project_variable.untouched|length == 0
+ - gitlab_project_variable_state.project_variable.removed|length == 1
+ - gitlab_project_variable_state.project_variable.updated|length == 0
+ - gitlab_project_variable_state.project_variable.removed[0] == "my_test_var"
+
+- name: set complete page and purge existing ones
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ page1_var01: value
+ page1_var02: value
+ page1_var03: value
+ page1_var04: value
+ page1_var05: value
+ page1_var06: value
+ page1_var07: value
+ page1_var08: value
+ page1_var09: value
+ page1_var10: value
+ page1_var11: value
+ page1_var12: value
+ page1_var13: value
+ page1_var14: value
+ page1_var15: value
+ page1_var16: value
+ page1_var17: value
+ page1_var18: value
+ page1_var19: value
+ page1_var20: value
+ purge: True
+ register: gitlab_project_variable_state
+
+- name: complete page added state must be changed
+ assert:
+ that:
+ - gitlab_project_variable_state is changed
+ - gitlab_project_variable_state.project_variable.added|length == 20
+ - gitlab_project_variable_state.project_variable.untouched|length == 0
+
+- name: set complete page and keep existing ones
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ page2_var01: value
+ page2_var02: value
+ page2_var03: value
+ page2_var04: value
+ page2_var05: value
+ page2_var06: value
+ page2_var07: value
+ page2_var08: value
+ page2_var09: value
+ page2_var10: value
+ page2_var11: value
+ page2_var12: value
+ page2_var13: value
+ page2_var14: value
+ page2_var15: value
+ page2_var16: value
+ page2_var17: value
+ page2_var18: value
+ page2_var19: value
+ page2_var20: value
+ purge: False
+ register: gitlab_project_variable_state
+
+- name: existing page untouched state must be changed
+ assert:
+ that:
+ - gitlab_project_variable_state is changed
+ - gitlab_project_variable_state.project_variable.added|length == 20
+ - gitlab_project_variable_state.project_variable.untouched|length == 20
+
+- name: check that no variables are left
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ purge: True
+ register: gitlab_project_variable_state
+
+- name: check that no variables are untouched state must be changed
+ assert:
+ that:
+ - gitlab_project_variable_state.changed
+ - gitlab_project_variable_state.project_variable.added|length == 0
+ - gitlab_project_variable_state.project_variable.untouched|length == 0
+ - gitlab_project_variable_state.project_variable.removed|length == 40
+ - gitlab_project_variable_state.project_variable.updated|length == 0
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_runner/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_runner/aliases
new file mode 100644
index 00000000..ef7ed2e3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_runner/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group1
+gitlab/ci
+disabled
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_runner/defaults/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_runner/defaults/main.yml
new file mode 100644
index 00000000..bed980a2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_runner/defaults/main.yml
@@ -0,0 +1,3 @@
+gitlab_project_name: ansible_test_project
+gitlab_hook_url: http://gitlab.example.com/hook
+gitlab_runner_name: ansible_test_runner
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_runner/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_runner/tasks/main.yml
new file mode 100644
index 00000000..42b41820
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_runner/tasks/main.yml
@@ -0,0 +1,73 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Install required libs
+ pip:
+ name: python-gitlab
+ state: present
+
+- name: Create {{ gitlab_project_name }}
+ gitlab_project:
+ server_url: "{{ gitlab_host }}"
+ validate_certs: False
+ login_token: "{{ gitlab_login_token }}"
+ name: "{{ gitlab_project_name }}"
+ state: present
+
+- name: Cleanup GitLab runner
+ gitlab_runner:
+ server_url: "{{ gitlab_host }}"
+ validate_certs: false
+ login_token: "{{ gitlab_login_token }}"
+ description: "{{ gitlab_runner_name }}"
+ registration_token: "{{ gitlab_runner_registration_token }}"
+ state: absent
+
+- name: Create GitLab Runner
+ gitlab_runner:
+ server_url: "{{ gitlab_host }}"
+ validate_certs: false
+ login_token: "{{ gitlab_login_token }}"
+ description: "{{ gitlab_runner_name }}"
+ registration_token: "{{ gitlab_runner_registration_token }}"
+ state: present
+ register: gitlab_runner_state
+
+- name: Test group created
+ assert:
+ that:
+ - gitlab_runner_state is changed
+
+
+#### COMMENTED AS MODULE WILL UPDATE THE RUNNER IF EXISTS. TO BE DISCUSSED ####
+# - name: Create GitLab Runner ( Idempotency test )
+# gitlab_runner:
+# server_url: "{{ gitlab_host }}"
+# validate_certs: false
+# login_token: "{{ gitlab_login_token }}"
+# description: "{{ gitlab_runner_name }}"
+# registration_token: "{{ gitlab_runner_registration_token }}"
+# state: present
+# register: gitlab_runner_state_again
+
+# - name: Test module is idempotent
+# assert:
+# that:
+# - gitlab_runner_state_again is not changed
+
+- name: Remove GitLab Runner
+ gitlab_runner:
+ server_url: "{{ gitlab_host }}"
+ validate_certs: false
+ login_token: "{{ gitlab_login_token }}"
+ description: "{{ gitlab_runner_name }}"
+ registration_token: "{{ gitlab_runner_registration_token }}"
+ state: absent
+ register: gitlab_runner_state_absent
+
+- name: Assert runner has been removed
+ assert:
+ that:
+ - gitlab_runner_state_absent is changed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_user/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_user/aliases
new file mode 100644
index 00000000..ef7ed2e3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_user/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group1
+gitlab/ci
+disabled
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_user/defaults/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_user/defaults/main.yml
new file mode 100644
index 00000000..a6755cf4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_user/defaults/main.yml
@@ -0,0 +1,3 @@
+gitlab_user: ansible_test_user
+gitlab_user_pass: Secr3tPassw00rd
+gitlab_user_email: root@localhost
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_user/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_user/tasks/main.yml
new file mode 100644
index 00000000..6cbcd14c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/gitlab_user/tasks/main.yml
@@ -0,0 +1,250 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Install required libs
+ pip:
+ name: python-gitlab
+ state: present
+
+- name: Clean up gitlab user
+ gitlab_user:
+ api_url: "{{ gitlab_host }}"
+ name: ansible_test_user
+ username: ansible_test_user
+ password: Secr3tPassw00rd
+ email: root@localhost
+ validate_certs: false
+ api_token: "{{ gitlab_login_token }}"
+ state: absent
+
+
+- name: Create gitlab user
+ gitlab_user:
+ api_url: "{{ gitlab_host }}"
+ email: "{{ gitlab_user_email }}"
+ name: "{{ gitlab_user }}"
+ username: "{{ gitlab_user }}"
+ password: "{{ gitlab_user_pass }}"
+ validate_certs: False
+ api_token: "{{ gitlab_login_token }}"
+ state: present
+ register: gitlab_user_state
+
+- name: Check user has been created correctly
+ assert:
+ that:
+ - gitlab_user_state is changed
+
+- name: Create gitlab user again
+ gitlab_user:
+ api_url: "{{ gitlab_host }}"
+ email: root@localhost
+ name: ansible_test_user
+ username: ansible_test_user
+ password: Secr3tPassw00rd
+ validate_certs: False
+ api_token: "{{ gitlab_login_token }}"
+ state: present
+ register: gitlab_user_state_again
+
+- name: Check state is not changed
+ assert:
+ that:
+ - gitlab_user_state_again is not changed
+ - gitlab_user_state_again.user.is_admin == False
+
+
+- name: Update User Test => Make User Admin
+ gitlab_user:
+ api_url: "{{ gitlab_host }}"
+ email: "{{ gitlab_user_email }}"
+ name: "{{ gitlab_user }}"
+ username: "{{ gitlab_user }}"
+ isadmin: true
+ validate_certs: False
+ api_token: "{{ gitlab_login_token }}"
+ state: present
+ register: gitlab_user_state
+
+- name: Check if user is admin now
+ assert:
+ that:
+ - gitlab_user_state is changed
+ - gitlab_user_state.user.is_admin == True
+
+- name: Update User Test => Make User Admin (Again)
+ gitlab_user:
+ api_url: "{{ gitlab_host }}"
+ email: "{{ gitlab_user_email }}"
+ name: "{{ gitlab_user }}"
+ username: "{{ gitlab_user }}"
+ isadmin: true
+ validate_certs: False
+ api_token: "{{ gitlab_login_token }}"
+ state: present
+ register: gitlab_user_state
+
+- name: Check state is not changed
+ assert:
+ that:
+ - gitlab_user_state is not changed
+ - gitlab_user_state.user.is_admin == True
+
+- name: Update User Test => Remove Admin Rights
+ gitlab_user:
+ api_url: "{{ gitlab_host }}"
+ email: "{{ gitlab_user_email }}"
+ name: "{{ gitlab_user }}"
+ username: "{{ gitlab_user }}"
+ isadmin: false
+ validate_certs: False
+ api_token: "{{ gitlab_login_token }}"
+ state: present
+ register: gitlab_user_state
+
+- name: Check if user is not admin anymore
+ assert:
+ that:
+ - gitlab_user_state is changed
+ - gitlab_user_state.user.is_admin == False
+
+
+- name: Update User Test => Try Changing Mail without Confirmation Skipping
+ gitlab_user:
+ api_url: "{{ gitlab_host }}"
+ email: foo@bar.baz
+ name: "{{ gitlab_user }}"
+ username: "{{ gitlab_user }}"
+ confirm: True
+ validate_certs: False
+ api_token: "{{ gitlab_login_token }}"
+ state: present
+ register: gitlab_user_state
+
+- name: Check that eMail is unchanged (Only works with confirmation skipping)
+ assert:
+ that:
+ - gitlab_user_state is changed
+ - gitlab_user_state.user.email == gitlab_user_email
+
+- name: Update User Test => Change Mail with Confirmation Skip
+ gitlab_user:
+ api_url: "{{ gitlab_host }}"
+ email: foo@bar.baz
+ name: "{{ gitlab_user }}"
+ username: "{{ gitlab_user }}"
+ confirm: false
+ validate_certs: False
+ api_token: "{{ gitlab_login_token }}"
+ state: present
+ register: gitlab_user_state
+
+- name: Check that mail has changed now
+ assert:
+ that:
+ - gitlab_user_state is changed
+ - gitlab_user_state.user.email == 'foo@bar.baz'
+
+- name: Update User Test => Change Mail with Confirmation Skip (Again)
+ gitlab_user:
+ api_url: "{{ gitlab_host }}"
+ email: foo@bar.baz
+ name: "{{ gitlab_user }}"
+ username: "{{ gitlab_user }}"
+ confirm: false
+ validate_certs: False
+ api_token: "{{ gitlab_login_token }}"
+ state: present
+ register: gitlab_user_state
+
+- name: Check state is not changed
+ assert:
+ that:
+ - gitlab_user_state is not changed
+ - gitlab_user_state.user.email == 'foo@bar.baz'
+
+- name: Update User Test => Revert to original Mail Address
+ gitlab_user:
+ api_url: "{{ gitlab_host }}"
+ email: "{{ gitlab_user_email }}"
+ name: "{{ gitlab_user }}"
+ username: "{{ gitlab_user }}"
+ confirm: false
+ validate_certs: False
+ api_token: "{{ gitlab_login_token }}"
+ state: present
+ register: gitlab_user_state
+
+- name: Check that reverting mail back to original has worked
+ assert:
+ that:
+ - gitlab_user_state is changed
+ - gitlab_user_state.user.email == gitlab_user_email
+
+
+- name: Update User Test => Change User Password
+ gitlab_user:
+ api_url: "{{ gitlab_host }}"
+ validate_certs: False
+
+ # note: the only way to check if a password really is what it is expected
+ # to be is to use it for login, so we use it here instead of the
+ # default token assuming that a user can always change its own password
+ api_username: "{{ gitlab_user }}"
+ api_password: "{{ gitlab_user_pass }}"
+
+ email: "{{ gitlab_user_email }}"
+ name: "{{ gitlab_user }}"
+ username: "{{ gitlab_user }}"
+ password: new-super-password
+ state: present
+ register: gitlab_user_state
+
+- name: Check PW setting return state
+ assert:
+ that:
+ # note: there is no way to determine if a password has changed or
+ # not, so it can only be always yellow or always green, we
+ # decided for always green for now
+ - gitlab_user_state is not changed
+
+- name: Update User Test => Reset User Password
+ gitlab_user:
+ api_url: "{{ gitlab_host }}"
+ validate_certs: False
+
+ api_username: "{{ gitlab_user }}"
+ api_password: new-super-password
+
+ email: "{{ gitlab_user_email }}"
+ name: "{{ gitlab_user }}"
+ username: "{{ gitlab_user }}"
+ password: "{{ gitlab_user_pass }}"
+ state: present
+ register: gitlab_user_state
+
+- name: Check PW setting return state (Again)
+ assert:
+ that:
+ - gitlab_user_state is not changed
+
+- name: Update User Test => Check that password was reset
+ gitlab_user:
+ api_url: "{{ gitlab_host }}"
+ validate_certs: False
+
+ api_username: "{{ gitlab_user }}"
+ api_password: "{{ gitlab_user_pass }}"
+
+ email: "{{ gitlab_user_email }}"
+ name: "{{ gitlab_user }}"
+ username: "{{ gitlab_user }}"
+ state: present
+ register: gitlab_user_state
+
+- name: Check PW setting return state (Reset)
+ assert:
+ that:
+ - gitlab_user_state is not changed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hg/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hg/aliases
new file mode 100644
index 00000000..2f2db5bc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hg/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group2
+skip/python3
+skip/aix
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hg/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hg/meta/main.yml
new file mode 100644
index 00000000..ca521ab1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hg/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - setup_pkg_mgr
+ - prepare_tests
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hg/tasks/install.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hg/tasks/install.yml
new file mode 100644
index 00000000..40aba5e2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hg/tasks/install.yml
@@ -0,0 +1,83 @@
+- name: get the default python version
+ command: "{{ ansible_python_interpreter }} -V"
+ register: default_python_version
+
+- name: find the default python
+ command: which python
+ register: which_python
+
+- name: find the default pip
+ command: which pip
+ register: which_pip
+
+- name: preserve the default python
+ command: cp -av "{{ which_python.stdout }}" "{{ which_python.stdout }}.default"
+
+- name: preserve the default pip
+ command: cp -av "{{ which_pip.stdout }}" "{{ which_pip.stdout }}.default"
+
+# using the apt module prevents autoremove from working, so call apt-get via shell instead
+- name: install mercurial (apt)
+ shell: apt-get -y update && apt-get -y install mercurial
+ when: ansible_facts.pkg_mgr == 'apt'
+
+- name: install mercurial (dnf)
+ dnf:
+ name: mercurial
+ when: ansible_facts.pkg_mgr == 'dnf'
+
+- name: install mercurial (yum)
+ yum:
+ name: mercurial
+ when: ansible_facts.pkg_mgr == 'yum'
+
+- name: install mercurial (pkgng)
+ package:
+ name: mercurial
+ when: ansible_facts.pkg_mgr in ['pkgng', 'community.general.pkgng']
+
+- name: install mercurial (zypper)
+ package:
+ name: mercurial
+ when: ansible_facts.pkg_mgr in ['zypper', 'community.general.zypper']
+
+- name: preserve the updated python
+ command: cp -av "{{ which_python.stdout }}" "{{ which_python.stdout }}.updated"
+
+- name: preserve the updated pip
+ command: cp -av "{{ which_pip.stdout }}" "{{ which_pip.stdout }}.updated"
+
+- name: locate mercurial
+ command: which hg
+ register: which_hg
+
+- name: get the mercurial interpreter
+ command: head -n 1 "{{ which_hg.stdout }}"
+ register: hg_interpreter
+
+- name: stat the mercurial interpreter
+ stat:
+ path: "{{ hg_interpreter.stdout[2:] }}"
+ register: stat_hg_interpreter
+
+- name: bypass the mercurial python interpreter symlink (if needed)
+ lineinfile:
+ path: "{{ which_hg.stdout }}"
+ regexp: "^#!.*$"
+ line: "#!{{ stat_hg_interpreter.stat.lnk_source }}"
+ when: stat_hg_interpreter.stat.islnk
+
+- name: restore the default python
+ command: cp -av "{{ which_python.stdout }}.default" "{{ which_python.stdout }}"
+
+- name: restore the default pip
+ command: cp -av "{{ which_pip.stdout }}.default" "{{ which_pip.stdout }}"
+
+- name: get the current python version
+ command: "{{ ansible_python_interpreter }} -V"
+ register: current_python_version
+
+- name: verify the python version has not changed
+ assert:
+ that:
+ - default_python_version.stdout == current_python_version.stdout
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hg/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hg/tasks/main.yml
new file mode 100644
index 00000000..4d7efca5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hg/tasks/main.yml
@@ -0,0 +1,44 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# test code for the hg module
+# Copyright: (c) 2014, James Tanner <tanner.jc@gmail.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- name: determine if mercurial is already installed
+ command: which hg
+ register: has_hg
+ ignore_errors: yes
+
+- name: warn if the underlying system is not capable of running these tests
+ debug:
+ msg: >-
+ The mercurial client is not able to check out Bitbucket repositories as per the changes mentioned here:
+ https://bitbucket.org/blog/deprecating-tlsv1-tlsv1-1-2018-12-01 . Therefore these tests are skipped.
+ when: (ansible_distribution == "Ubuntu" and ansible_distribution_version == "14.04") or ansible_python_version is version("2.7.9", "<")
+
+- block:
+ - name: install mercurial
+ include_tasks: install.yml
+ when: has_hg is failed
+
+ - name: test mercurial
+ include_tasks: run-tests.yml
+
+ - name: uninstall mercurial
+ include_tasks: uninstall.yml
+ when: has_hg is failed
+
+ # As per the bitbucket changes in https://bitbucket.org/blog/deprecating-tlsv1-tlsv1-1-2018-12-01 , this
+ # test will fail under certain circumstances, to avoid false positives, we skip these tests under the following
+ # circumstances:
+ #
+ # - The ubuntu 14.04 image used on shippable runs python 2.7.6, so we skip explicitly for this image.
+ # - When ansible_python_version is not 2.7.9 or higher, mercurial is likely to also run using this same (old)
+ # python version, which causes issues as per the link above.
+ when:
+ - not (ansible_distribution == "Ubuntu" and ansible_distribution_version == "14.04")
+ - ansible_python_version is version("2.7.9", ">=")
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hg/tasks/run-tests.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hg/tasks/run-tests.yml
new file mode 100644
index 00000000..775b2978
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hg/tasks/run-tests.yml
@@ -0,0 +1,84 @@
+# test code for the hg module
+# Copyright: (c) 2018, Ansible Project
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+- name: set where to extract the repo
+ set_fact:
+ checkout_dir: "{{ output_dir }}/hg_project_test"
+
+- name: set what repo to use
+ set_fact:
+ repo: "http://hg.pf.osdn.net/view/a/ak/akasurde/hg_project_test"
+
+- name: clean out the output_dir
+ shell: rm -rf {{ output_dir }}/*
+
+- name: verify that mercurial is installed so this test can continue
+ shell: which hg
+
+- name: initial checkout
+ hg:
+ repo: "{{ repo }}"
+ dest: "{{ checkout_dir }}"
+ register: hg_result
+
+- debug: var=hg_result
+
+- shell: ls {{ checkout_dir }}
+
+- name: verify information about the initial clone
+ assert:
+ that:
+ - "'before' in hg_result"
+ - "'after' in hg_result"
+ - "not hg_result.before"
+ - "hg_result.changed"
+
+- name: repeated checkout
+ hg:
+ repo: "{{ repo }}"
+ dest: "{{ checkout_dir }}"
+ register: hg_result2
+
+- debug: var=hg_result2
+
+- name: check for tags
+ stat:
+ path: "{{ checkout_dir }}/.hgtags"
+ register: tags
+
+- name: check for remotes
+ stat:
+ path: "{{ checkout_dir }}/.hg/branch"
+ register: branches
+
+- debug: var=tags
+- debug: var=branches
+
+- name: assert presence of tags/trunk/branches
+ assert:
+ that:
+ - "tags.stat.isreg"
+ - "branches.stat.isreg"
+
+- name: verify on a re-clone things are marked unchanged
+ assert:
+ that:
+ - "not hg_result2.changed"
+
+- name: Checkout non-existent repo clone
+ hg:
+ repo: "http://hg.pf.osdn.net/view/a/ak/akasurde/hg_project_test_1"
+ clone: no
+ update: no
+ register: hg_result3
+ ignore_errors: true
+
+- name: Verify result of non-existent repo clone
+ assert:
+ that:
+ - hg_result3.msg
+ - "'abort: HTTP Error 404: Not Found' in hg_result3.msg"
+ - "not hg_result3.changed"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hg/tasks/uninstall.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hg/tasks/uninstall.yml
new file mode 100644
index 00000000..305a2ffd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hg/tasks/uninstall.yml
@@ -0,0 +1,48 @@
+- name: restore the updated python
+ command: mv "{{ which_python.stdout }}.updated" "{{ which_python.stdout }}"
+
+- name: restore the updated pip
+ command: mv "{{ which_pip.stdout }}.updated" "{{ which_pip.stdout }}"
+
+- name: restore the mercurial python interpreter symlink (if needed)
+ lineinfile:
+ path: "{{ which_hg.stdout }}"
+ regexp: "^#!.*$"
+ line: "#!{{ stat_hg_interpreter.stat.path }}"
+ when: stat_hg_interpreter.stat.islnk
+
+# using the apt module prevents autoremove from working, so call apt-get via shell instead
+- name: uninstall packages which were not originally installed (apt)
+ shell: apt-get -y remove mercurial && apt-get -y autoremove
+ when: ansible_facts.pkg_mgr == 'apt'
+
+- name: uninstall packages which were not originally installed (dnf)
+ dnf:
+ name: mercurial
+ state: absent
+ autoremove: yes
+ when: ansible_facts.pkg_mgr == 'dnf'
+
+# the yum module does not have an autoremove parameter
+- name: uninstall packages which were not originally installed (yum)
+ shell: yum -y autoremove mercurial
+ when: ansible_facts.pkg_mgr == 'yum'
+
+- name: uninstall packages which were not originally installed (pkgng)
+ package:
+ name: mercurial
+ state: absent
+ autoremove: yes
+ when: ansible_facts.pkg_mgr in ['pkgng', 'community.general.pkgng']
+
+- name: uninstall packages which were not originally installed (zypper)
+ package:
+ name: mercurial
+ state: absent
+ when: ansible_facts.pkg_mgr in ['zypper', 'community.general.zypper']
+
+- name: restore the default python
+ raw: mv "{{ which_python.stdout }}.default" "{{ which_python.stdout }}"
+
+- name: restore the default pip
+ raw: mv "{{ which_pip.stdout }}.default" "{{ which_pip.stdout }}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/homebrew/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/homebrew/aliases
new file mode 100644
index 00000000..73611960
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/homebrew/aliases
@@ -0,0 +1,6 @@
+shippable/posix/group1
+skip/aix
+skip/freebsd
+skip/rhel
+skip/docker
+skip/python2.6
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/homebrew/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/homebrew/tasks/main.yml
new file mode 100644
index 00000000..7eda2e32
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/homebrew/tasks/main.yml
@@ -0,0 +1,85 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Test code for the homebrew module.
+# Copyright: (c) 2020, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+---
+- name: Find brew binary
+ command: which brew
+ register: brew_which
+ when: ansible_distribution in ['MacOSX']
+
+- name: Get owner of brew binary
+ stat:
+ path: "{{ brew_which.stdout }}"
+ register: brew_stat
+ when: ansible_distribution in ['MacOSX']
+
+#- name: Use ignored-pinned option while upgrading all
+# homebrew:
+# upgrade_all: yes
+# upgrade_options: ignore-pinned
+# become: yes
+# become_user: "{{ brew_stat.stat.pw_name }}"
+# register: upgrade_option_result
+# environment:
+# HOMEBREW_NO_AUTO_UPDATE: True
+
+#- assert:
+# that:
+# - upgrade_option_result.changed
+
+- name: Install xz package using homebrew
+ homebrew:
+ name: xz
+ state: present
+ update_homebrew: no
+ become: yes
+ become_user: "{{ brew_stat.stat.pw_name }}"
+ register: xz_result
+
+- assert:
+ that:
+ - xz_result.changed
+
+- name: Again install xz package using homebrew
+ homebrew:
+ name: xz
+ state: present
+ update_homebrew: no
+ become: yes
+ become_user: "{{ brew_stat.stat.pw_name }}"
+ register: xz_result
+
+- assert:
+ that:
+ - not xz_result.changed
+
+- name: Uninstall xz package using homebrew
+ homebrew:
+ name: xz
+ state: absent
+ update_homebrew: no
+ become: yes
+ become_user: "{{ brew_stat.stat.pw_name }}"
+ register: xz_result
+
+- assert:
+ that:
+ - xz_result.changed
+
+- name: Again uninstall xz package using homebrew
+ homebrew:
+ name: xz
+ state: absent
+ update_homebrew: no
+ become: yes
+ become_user: "{{ brew_stat.stat.pw_name }}"
+ register: xz_result
+
+- assert:
+ that:
+ - not xz_result.changed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_ecs_instance/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_ecs_instance/aliases
new file mode 100644
index 00000000..ad7ccf7a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_ecs_instance/aliases
@@ -0,0 +1 @@
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_ecs_instance/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_ecs_instance/tasks/main.yml
new file mode 100644
index 00000000..8c8ea2eb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_ecs_instance/tasks/main.yml
@@ -0,0 +1,315 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Pre-test setup
+- name: create a vpc
+ hwc_network_vpc:
+ cidr: "192.168.100.0/24"
+ name: "ansible_network_vpc_test"
+ state: present
+ register: vpc
+- name: create a subnet
+ hwc_vpc_subnet:
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: true
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ state: present
+ register: subnet
+- name: create a eip
+ hwc_vpc_eip:
+ dedicated_bandwidth:
+ charge_mode: "traffic"
+ name: "ansible_test_dedicated_bandwidth"
+ size: 1
+ type: "5_bgp"
+ state: present
+ register: eip
+- name: create a disk
+ hwc_evs_disk:
+ availability_zone: "cn-north-1a"
+ name: "ansible_evs_disk_test"
+ volume_type: "SATA"
+ size: 10
+ state: present
+ register: disk
+- name: delete a instance
+ hwc_ecs_instance:
+ data_volumes:
+ - volume_id: "{{ disk.id }}"
+ enable_auto_recovery: false
+ eip_id: "{{ eip.id }}"
+ name: "ansible_ecs_instance_test"
+ availability_zone: "cn-north-1a"
+ nics:
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.34"
+ server_tags:
+ my_server: "my_server"
+ image_id: "8da46d6d-6079-4e31-ad6d-a7167efff892"
+ flavor_name: "s3.small.1"
+ vpc_id: "{{ vpc.id }}"
+ root_volume:
+ volume_type: "SAS"
+ state: absent
+#----------------------------------------------------------
+- name: create a instance (check mode)
+ hwc_ecs_instance:
+ data_volumes:
+ - volume_id: "{{ disk.id }}"
+ enable_auto_recovery: false
+ eip_id: "{{ eip.id }}"
+ name: "ansible_ecs_instance_test"
+ availability_zone: "cn-north-1a"
+ nics:
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.34"
+ server_tags:
+ my_server: "my_server"
+ image_id: "8da46d6d-6079-4e31-ad6d-a7167efff892"
+ flavor_name: "s3.small.1"
+ vpc_id: "{{ vpc.id }}"
+ root_volume:
+ volume_type: "SAS"
+ state: present
+ check_mode: yes
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result is changed
+#----------------------------------------------------------
+- name: create a instance
+ hwc_ecs_instance:
+ data_volumes:
+ - volume_id: "{{ disk.id }}"
+ enable_auto_recovery: false
+ eip_id: "{{ eip.id }}"
+ name: "ansible_ecs_instance_test"
+ availability_zone: "cn-north-1a"
+ nics:
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.34"
+ server_tags:
+ my_server: "my_server"
+ image_id: "8da46d6d-6079-4e31-ad6d-a7167efff892"
+ flavor_name: "s3.small.1"
+ vpc_id: "{{ vpc.id }}"
+ root_volume:
+ volume_type: "SAS"
+ state: present
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ result is changed
+#----------------------------------------------------------
+- name: create a instance (idemponent)
+ hwc_ecs_instance:
+ data_volumes:
+ - volume_id: "{{ disk.id }}"
+ enable_auto_recovery: false
+ eip_id: "{{ eip.id }}"
+ name: "ansible_ecs_instance_test"
+ availability_zone: "cn-north-1a"
+ nics:
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.34"
+ server_tags:
+ my_server: "my_server"
+ image_id: "8da46d6d-6079-4e31-ad6d-a7167efff892"
+ flavor_name: "s3.small.1"
+ vpc_id: "{{ vpc.id }}"
+ root_volume:
+ volume_type: "SAS"
+ state: present
+ check_mode: yes
+ register: result
+- name: idemponent
+ assert:
+ that:
+ - not result.changed
+# ----------------------------------------------------------------------------
+- name: create a instance that already exists
+ hwc_ecs_instance:
+ data_volumes:
+ - volume_id: "{{ disk.id }}"
+ enable_auto_recovery: false
+ eip_id: "{{ eip.id }}"
+ name: "ansible_ecs_instance_test"
+ availability_zone: "cn-north-1a"
+ nics:
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.34"
+ server_tags:
+ my_server: "my_server"
+ image_id: "8da46d6d-6079-4e31-ad6d-a7167efff892"
+ flavor_name: "s3.small.1"
+ vpc_id: "{{ vpc.id }}"
+ root_volume:
+ volume_type: "SAS"
+ state: present
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result.failed == 0
+ - result.changed == false
+#----------------------------------------------------------
+- name: delete a instance (check mode)
+ hwc_ecs_instance:
+ data_volumes:
+ - volume_id: "{{ disk.id }}"
+ enable_auto_recovery: false
+ eip_id: "{{ eip.id }}"
+ name: "ansible_ecs_instance_test"
+ availability_zone: "cn-north-1a"
+ nics:
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.34"
+ server_tags:
+ my_server: "my_server"
+ image_id: "8da46d6d-6079-4e31-ad6d-a7167efff892"
+ flavor_name: "s3.small.1"
+ vpc_id: "{{ vpc.id }}"
+ root_volume:
+ volume_type: "SAS"
+ state: absent
+ check_mode: yes
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result is changed
+#----------------------------------------------------------
+- name: delete a instance
+ hwc_ecs_instance:
+ data_volumes:
+ - volume_id: "{{ disk.id }}"
+ enable_auto_recovery: false
+ eip_id: "{{ eip.id }}"
+ name: "ansible_ecs_instance_test"
+ availability_zone: "cn-north-1a"
+ nics:
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.34"
+ server_tags:
+ my_server: "my_server"
+ image_id: "8da46d6d-6079-4e31-ad6d-a7167efff892"
+ flavor_name: "s3.small.1"
+ vpc_id: "{{ vpc.id }}"
+ root_volume:
+ volume_type: "SAS"
+ state: absent
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ result is changed
+#----------------------------------------------------------
+- name: delete a instance (idemponent)
+ hwc_ecs_instance:
+ data_volumes:
+ - volume_id: "{{ disk.id }}"
+ enable_auto_recovery: false
+ eip_id: "{{ eip.id }}"
+ name: "ansible_ecs_instance_test"
+ availability_zone: "cn-north-1a"
+ nics:
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.34"
+ server_tags:
+ my_server: "my_server"
+ image_id: "8da46d6d-6079-4e31-ad6d-a7167efff892"
+ flavor_name: "s3.small.1"
+ vpc_id: "{{ vpc.id }}"
+ root_volume:
+ volume_type: "SAS"
+ state: absent
+ register: result
+- name: idemponent
+ assert:
+ that:
+ - not result.changed
+# ----------------------------------------------------------------------------
+- name: delete a instance that does not exist
+ hwc_ecs_instance:
+ data_volumes:
+ - volume_id: "{{ disk.id }}"
+ enable_auto_recovery: false
+ eip_id: "{{ eip.id }}"
+ name: "ansible_ecs_instance_test"
+ availability_zone: "cn-north-1a"
+ nics:
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.34"
+ server_tags:
+ my_server: "my_server"
+ image_id: "8da46d6d-6079-4e31-ad6d-a7167efff892"
+ flavor_name: "s3.small.1"
+ vpc_id: "{{ vpc.id }}"
+ root_volume:
+ volume_type: "SAS"
+ state: absent
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result.failed == 0
+ - result.changed == false
+#---------------------------------------------------------
+# Post-test teardown
+- name: delete a disk
+ hwc_evs_disk:
+ availability_zone: "cn-north-1a"
+ name: "ansible_evs_disk_test"
+ volume_type: "SATA"
+ size: 10
+ state: absent
+ register: disk
+- name: delete a eip
+ hwc_vpc_eip:
+ dedicated_bandwidth:
+ charge_mode: "traffic"
+ name: "ansible_test_dedicated_bandwidth"
+ size: 1
+ type: "5_bgp"
+ state: absent
+ register: eip
+- name: delete a subnet
+ hwc_vpc_subnet:
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: true
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ state: absent
+ register: subnet
+- name: delete a vpc
+ hwc_network_vpc:
+ cidr: "192.168.100.0/24"
+ name: "ansible_network_vpc_test"
+ state: absent
+ register: vpc
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_evs_disk/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_evs_disk/aliases
new file mode 100644
index 00000000..ad7ccf7a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_evs_disk/aliases
@@ -0,0 +1 @@
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_evs_disk/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_evs_disk/tasks/main.yml
new file mode 100644
index 00000000..79e67d0d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_evs_disk/tasks/main.yml
@@ -0,0 +1,109 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: delete a disk
+ hwc_evs_disk:
+ availability_zone: "cn-north-1a"
+ name: "ansible_evs_disk_test"
+ volume_type: "SATA"
+ size: 10
+ state: absent
+#----------------------------------------------------------
+- name: create a disk
+ hwc_evs_disk:
+ availability_zone: "cn-north-1a"
+ name: "ansible_evs_disk_test"
+ volume_type: "SATA"
+ size: 10
+ state: present
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ result is changed
+# ------------------------------------------------------------
+- name: test create a disk in check mode
+ hwc_evs_disk:
+ availability_zone: "cn-north-1a"
+ name: "ansible_evs_disk_test"
+ volume_type: "SATA"
+ size: 10
+ state: present
+ register: result
+ check_mode: true
+- name: verify results of test create a disk in check mode
+ assert:
+ that:
+ result is changed
+# ----------------------------------------------------------------------------
+- name: create a disk that already exists
+ hwc_evs_disk:
+ availability_zone: "cn-north-1a"
+ name: "ansible_evs_disk_test"
+ volume_type: "SATA"
+ size: 10
+ state: present
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result.failed == 0
+ - result.changed == false
+#----------------------------------------------------------
+- name: delete a disk (check mode)
+ hwc_evs_disk:
+ availability_zone: "cn-north-1a"
+ name: "ansible_evs_disk_test"
+ volume_type: "SATA"
+ size: 10
+ state: absent
+ check_mode: yes
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ result is changed
+#----------------------------------------------------------
+- name: delete a disk
+ hwc_evs_disk:
+ availability_zone: "cn-north-1a"
+ name: "ansible_evs_disk_test"
+ volume_type: "SATA"
+ size: 10
+ state: absent
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ result is changed
+# ----------------------------------------------------------------------------
+- name: delete a disk that does not exist (check mode)
+ hwc_evs_disk:
+ availability_zone: "cn-north-1a"
+ name: "ansible_evs_disk_test"
+ volume_type: "SATA"
+ size: 10
+ state: absent
+ check_mode: yes
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result.changed == false
+# ----------------------------------------------------------------------------
+- name: delete a disk that does not exist
+ hwc_evs_disk:
+ availability_zone: "cn-north-1a"
+ name: "ansible_evs_disk_test"
+ volume_type: "SATA"
+ size: 10
+ state: absent
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result.failed == 0
+ - result.changed == false
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_network_vpc/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_network_vpc/aliases
new file mode 100644
index 00000000..ad7ccf7a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_network_vpc/aliases
@@ -0,0 +1 @@
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_network_vpc/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_network_vpc/tasks/main.yml
new file mode 100644
index 00000000..5c01cf7a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_network_vpc/tasks/main.yml
@@ -0,0 +1,101 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# ----------------------------------------------------------------------------
+#
+# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
+#
+# ----------------------------------------------------------------------------
+#
+# This file is automatically generated by Magic Modules and manual
+# changes will be clobbered when the file is regenerated.
+#
+# Please read more about how to change this file at
+# https://www.github.com/huaweicloud/magic-modules
+#
+# ----------------------------------------------------------------------------
+# Pre-test setup
+- name: delete a vpc
+ hwc_network_vpc:
+ identity_endpoint: "{{ identity_endpoint }}"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ domain: "{{ domain }}"
+ project: "{{ project }}"
+ region: "{{ region }}"
+ name: "vpc_1"
+ cidr: "192.168.100.0/24"
+ state: absent
+#----------------------------------------------------------
+- name: create a vpc
+ hwc_network_vpc:
+ identity_endpoint: "{{ identity_endpoint }}"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ domain: "{{ domain }}"
+ project: "{{ project }}"
+ region: "{{ region }}"
+ name: "vpc_1"
+ cidr: "192.168.100.0/24"
+ state: present
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result is changed
+# ----------------------------------------------------------------------------
+- name: create a vpc that already exists
+ hwc_network_vpc:
+ identity_endpoint: "{{ identity_endpoint }}"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ domain: "{{ domain }}"
+ project: "{{ project }}"
+ region: "{{ region }}"
+ name: "vpc_1"
+ cidr: "192.168.100.0/24"
+ state: present
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result.failed == 0
+ - result.changed == false
+#----------------------------------------------------------
+- name: delete a vpc
+ hwc_network_vpc:
+ identity_endpoint: "{{ identity_endpoint }}"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ domain: "{{ domain }}"
+ project: "{{ project }}"
+ region: "{{ region }}"
+ name: "vpc_1"
+ cidr: "192.168.100.0/24"
+ state: absent
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result is changed
+# ----------------------------------------------------------------------------
+- name: delete a vpc that does not exist
+ hwc_network_vpc:
+ identity_endpoint: "{{ identity_endpoint }}"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ domain: "{{ domain }}"
+ project: "{{ project }}"
+ region: "{{ region }}"
+ name: "vpc_1"
+ cidr: "192.168.100.0/24"
+ state: absent
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result.failed == 0
+ - result.changed == false
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_smn_topic/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_smn_topic/aliases
new file mode 100644
index 00000000..ad7ccf7a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_smn_topic/aliases
@@ -0,0 +1 @@
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_smn_topic/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_smn_topic/tasks/main.yml
new file mode 100644
index 00000000..180f8fad
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_smn_topic/tasks/main.yml
@@ -0,0 +1,81 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: delete a smn topic
+ hwc_smn_topic:
+ identity_endpoint: "{{ identity_endpoint }}"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ domain: "{{ domain }}"
+ project: "{{ project }}"
+ region: "{{ region }}"
+ name: "ansible_smn_topic_test"
+ state: absent
+#----------------------------------------------------------
+- name: create a smn topic
+ hwc_smn_topic:
+ identity_endpoint: "{{ identity_endpoint }}"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ domain: "{{ domain }}"
+ project: "{{ project }}"
+ region: "{{ region }}"
+ name: "ansible_smn_topic_test"
+ state: present
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result is changed
+# ----------------------------------------------------------------------------
+- name: create a smn topic that already exists
+ hwc_smn_topic:
+ identity_endpoint: "{{ identity_endpoint }}"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ domain: "{{ domain }}"
+ project: "{{ project }}"
+ region: "{{ region }}"
+ name: "ansible_smn_topic_test"
+ state: present
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result.failed == 0
+ - result.changed == false
+#----------------------------------------------------------
+- name: delete a smn topic
+ hwc_smn_topic:
+ identity_endpoint: "{{ identity_endpoint }}"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ domain: "{{ domain }}"
+ project: "{{ project }}"
+ region: "{{ region }}"
+ name: "ansible_smn_topic_test"
+ state: absent
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result is changed
+# ----------------------------------------------------------------------------
+- name: delete a smn topic that does not exist
+ hwc_smn_topic:
+ identity_endpoint: "{{ identity_endpoint }}"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ domain: "{{ domain }}"
+ project: "{{ project }}"
+ region: "{{ region }}"
+ name: "ansible_smn_topic_test"
+ state: absent
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result.failed == 0
+ - result.changed == false
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_eip/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_eip/aliases
new file mode 100644
index 00000000..ad7ccf7a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_eip/aliases
@@ -0,0 +1 @@
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_eip/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_eip/tasks/main.yml
new file mode 100644
index 00000000..57de8324
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_eip/tasks/main.yml
@@ -0,0 +1,186 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Pre-test setup
+- name: create a vpc
+ hwc_network_vpc:
+ cidr: "192.168.100.0/24"
+ name: "ansible_network_vpc_test"
+ state: present
+ register: vpc
+- name: create a subnet
+ hwc_vpc_subnet:
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: True
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ state: present
+ register: subnet
+- name: create a port
+ hwc_vpc_port:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ state: present
+ register: port
+- name: delete a eip
+ hwc_vpc_eip:
+ type: "5_bgp"
+ dedicated_bandwidth:
+ charge_mode: "traffic"
+ name: "ansible_test_dedicated_bandwidth"
+ size: 1
+ port_id: "{{ port.id }}"
+ state: absent
+#----------------------------------------------------------
+- name: create a eip (check mode)
+ hwc_vpc_eip:
+ type: "5_bgp"
+ dedicated_bandwidth:
+ charge_mode: "traffic"
+ name: "ansible_test_dedicated_bandwidth"
+ size: 1
+ port_id: "{{ port.id }}"
+ state: present
+ check_mode: yes
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result is changed
+#----------------------------------------------------------
+- name: create a eip
+ hwc_vpc_eip:
+ type: "5_bgp"
+ dedicated_bandwidth:
+ charge_mode: "traffic"
+ name: "ansible_test_dedicated_bandwidth"
+ size: 1
+ port_id: "{{ port.id }}"
+ state: present
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ result is changed
+#----------------------------------------------------------
+- name: create a eip (idemponent)
+ hwc_vpc_eip:
+ type: "5_bgp"
+ dedicated_bandwidth:
+ charge_mode: "traffic"
+ name: "ansible_test_dedicated_bandwidth"
+ size: 1
+ port_id: "{{ port.id }}"
+ state: present
+ check_mode: yes
+ register: result
+- name: idemponent
+ assert:
+ that:
+ - not result.changed
+# ----------------------------------------------------------------------------
+- name: create a eip that already exists
+ hwc_vpc_eip:
+ type: "5_bgp"
+ dedicated_bandwidth:
+ charge_mode: "traffic"
+ name: "ansible_test_dedicated_bandwidth"
+ size: 1
+ port_id: "{{ port.id }}"
+ state: present
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result.failed == 0
+ - result.changed == false
+#----------------------------------------------------------
+- name: delete a eip (check mode)
+ hwc_vpc_eip:
+ type: "5_bgp"
+ dedicated_bandwidth:
+ charge_mode: "traffic"
+ name: "ansible_test_dedicated_bandwidth"
+ size: 1
+ port_id: "{{ port.id }}"
+ state: absent
+ check_mode: yes
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result is changed
+#----------------------------------------------------------
+- name: delete a eip
+ hwc_vpc_eip:
+ type: "5_bgp"
+ dedicated_bandwidth:
+ charge_mode: "traffic"
+ name: "ansible_test_dedicated_bandwidth"
+ size: 1
+ port_id: "{{ port.id }}"
+ state: absent
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result is changed
+#----------------------------------------------------------
+- name: delete a eip (idemponent)
+ hwc_vpc_eip:
+ type: "5_bgp"
+ dedicated_bandwidth:
+ charge_mode: "traffic"
+ name: "ansible_test_dedicated_bandwidth"
+ size: 1
+ port_id: "{{ port.id }}"
+ state: absent
+ check_mode: yes
+ register: result
+- name: idemponent
+ assert:
+ that:
+ - not result.changed
+# ----------------------------------------------------------------------------
+- name: delete a eip that does not exist
+ hwc_vpc_eip:
+ type: "5_bgp"
+ dedicated_bandwidth:
+ charge_mode: "traffic"
+ name: "ansible_test_dedicated_bandwidth"
+ size: 1
+ port_id: "{{ port.id }}"
+ state: absent
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result.failed == 0
+ - result.changed == false
+#---------------------------------------------------------
+# Post-test teardown
+- name: delete a port
+ hwc_vpc_port:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ state: absent
+ register: port
+- name: delete a subnet
+ hwc_vpc_subnet:
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: True
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ state: absent
+ register: subnet
+- name: delete a vpc
+ hwc_network_vpc:
+ cidr: "192.168.100.0/24"
+ name: "ansible_network_vpc_test"
+ state: absent
+ register: vpc
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_peering_connect/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_peering_connect/aliases
new file mode 100644
index 00000000..ad7ccf7a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_peering_connect/aliases
@@ -0,0 +1 @@
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_peering_connect/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_peering_connect/tasks/main.yml
new file mode 100644
index 00000000..2316a4b2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_peering_connect/tasks/main.yml
@@ -0,0 +1,151 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Pre-test setup
+- name: create a vpc
+ hwc_network_vpc:
+ cidr: "192.168.0.0/16"
+ name: "ansible_network_vpc_test_local"
+ state: present
+ register: vpc1
+- name: create a vpc
+ hwc_network_vpc:
+ cidr: "192.168.0.0/16"
+ name: "ansible_network_vpc_test_peering"
+ state: present
+ register: vpc2
+- name: delete a peering connect
+ hwc_vpc_peering_connect:
+ local_vpc_id: "{{ vpc1.id }}"
+ name: "ansible_network_peering_test"
+ peering_vpc:
+ vpc_id: "{{ vpc2.id }}"
+ state: absent
+#----------------------------------------------------------
+- name: create a peering connect (check mode)
+ hwc_vpc_peering_connect:
+ local_vpc_id: "{{ vpc1.id }}"
+ name: "ansible_network_peering_test"
+ peering_vpc:
+ vpc_id: "{{ vpc2.id }}"
+ state: present
+ check_mode: yes
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - not result.id
+ - result.changed
+#----------------------------------------------------------
+- name: create a peering connect
+ hwc_vpc_peering_connect:
+ local_vpc_id: "{{ vpc1.id }}"
+ name: "ansible_network_peering_test"
+ peering_vpc:
+ vpc_id: "{{ vpc2.id }}"
+ state: present
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ result is changed
+#----------------------------------------------------------
+- name: create a peering connect (idemponent)
+ hwc_vpc_peering_connect:
+ local_vpc_id: "{{ vpc1.id }}"
+ name: "ansible_network_peering_test"
+ peering_vpc:
+ vpc_id: "{{ vpc2.id }}"
+ state: present
+ check_mode: yes
+ register: result
+- name: idemponent
+ assert:
+ that:
+ - not result.changed
+# ----------------------------------------------------------------------------
+- name: create a peering connect that already exists
+ hwc_vpc_peering_connect:
+ local_vpc_id: "{{ vpc1.id }}"
+ name: "ansible_network_peering_test"
+ peering_vpc:
+ vpc_id: "{{ vpc2.id }}"
+ state: present
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result.failed == 0
+ - result.changed == false
+#----------------------------------------------------------
+- name: delete a peering connect (check mode)
+ hwc_vpc_peering_connect:
+ local_vpc_id: "{{ vpc1.id }}"
+ name: "ansible_network_peering_test"
+ peering_vpc:
+ vpc_id: "{{ vpc2.id }}"
+ state: absent
+ check_mode: yes
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ result is changed
+#----------------------------------------------------------
+- name: delete a peering connect
+ hwc_vpc_peering_connect:
+ local_vpc_id: "{{ vpc1.id }}"
+ name: "ansible_network_peering_test"
+ peering_vpc:
+ vpc_id: "{{ vpc2.id }}"
+ state: absent
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ result is changed
+#----------------------------------------------------------
+- name: delete a peering connect (idemponent)
+ hwc_vpc_peering_connect:
+ local_vpc_id: "{{ vpc1.id }}"
+ name: "ansible_network_peering_test"
+ peering_vpc:
+ vpc_id: "{{ vpc2.id }}"
+ state: absent
+ check_mode: yes
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - not result.changed
+# ----------------------------------------------------------------------------
+- name: delete a peering connect that does not exist
+ hwc_vpc_peering_connect:
+ local_vpc_id: "{{ vpc1.id }}"
+ name: "ansible_network_peering_test"
+ peering_vpc:
+ vpc_id: "{{ vpc2.id }}"
+ state: absent
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result.failed == 0
+ - result.changed == false
+#---------------------------------------------------------
+# Post-test teardown
+- name: delete a vpc
+ hwc_network_vpc:
+ cidr: "192.168.0.0/16"
+ name: "ansible_network_vpc_test_peering"
+ state: absent
+ register: vpc2
+- name: delete a vpc
+ hwc_network_vpc:
+ cidr: "192.168.0.0/16"
+ name: "ansible_network_vpc_test_local"
+ state: absent
+ register: vpc1
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_port/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_port/aliases
new file mode 100644
index 00000000..ad7ccf7a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_port/aliases
@@ -0,0 +1 @@
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_port/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_port/tasks/main.yml
new file mode 100644
index 00000000..b7f28360
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_port/tasks/main.yml
@@ -0,0 +1,137 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Pre-test setup
+- name: create a vpc
+ hwc_network_vpc:
+ cidr: "192.168.100.0/24"
+ name: "ansible_network_vpc_test"
+ state: present
+ register: vpc
+- name: create a subnet
+ hwc_vpc_subnet:
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: True
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ state: present
+ register: subnet
+- name: delete a port
+ hwc_vpc_port:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ state: absent
+#----------------------------------------------------------
+- name: create a port (check mode)
+ hwc_vpc_port:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ state: present
+ check_mode: yes
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result is changed
+#----------------------------------------------------------
+- name: create a port
+ hwc_vpc_port:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ state: present
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result is changed
+#----------------------------------------------------------
+- name: create a port (idemponent)
+ hwc_vpc_port:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ state: present
+ register: result
+- name: idemponent
+ assert:
+ that:
+ - not result.changed
+# ----------------------------------------------------------------------------
+- name: create a port that already exists
+ hwc_vpc_port:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ state: present
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result.failed == 0
+ - result.changed == false
+#----------------------------------------------------------
+- name: delete a port (check mode)
+ hwc_vpc_port:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ state: absent
+ check_mode: yes
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result is changed
+#----------------------------------------------------------
+- name: delete a port
+ hwc_vpc_port:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ state: absent
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result is changed
+#----------------------------------------------------------
+- name: delete a port (idemponent)
+ hwc_vpc_port:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ state: absent
+ check_mode: yes
+ register: result
+- name: idemponent
+ assert:
+ that:
+ - not result.changed
+# ----------------------------------------------------------------------------
+- name: delete a port that does not exist
+ hwc_vpc_port:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ state: absent
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result.failed == 0
+ - result.changed == false
+#---------------------------------------------------------
+# Post-test teardown
+- name: delete a subnet
+ hwc_vpc_subnet:
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: True
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ state: absent
+ register: subnet
+- name: delete a vpc
+ hwc_network_vpc:
+ cidr: "192.168.100.0/24"
+ name: "ansible_network_vpc_test"
+ state: absent
+ register: vpc
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_private_ip/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_private_ip/aliases
new file mode 100644
index 00000000..ad7ccf7a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_private_ip/aliases
@@ -0,0 +1 @@
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_private_ip/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_private_ip/tasks/main.yml
new file mode 100644
index 00000000..efd6765c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_private_ip/tasks/main.yml
@@ -0,0 +1,138 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Pre-test setup
+- name: create a vpc
+ hwc_network_vpc:
+ cidr: "192.168.100.0/24"
+ name: "ansible_network_vpc_test"
+ state: present
+ register: vpc
+- name: create a subnet
+ hwc_vpc_subnet:
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: True
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ state: present
+ register: subnet
+- name: delete a private ip
+ hwc_vpc_private_ip:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ state: absent
+#----------------------------------------------------------
+- name: create a private ip (check mode)
+ hwc_vpc_private_ip:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ state: present
+ check_mode: yes
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result is changed
+#----------------------------------------------------------
+- name: create a private ip
+ hwc_vpc_private_ip:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ state: present
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result is changed
+#----------------------------------------------------------
+- name: create a private ip (idemponent)
+ hwc_vpc_private_ip:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ state: present
+ check_mode: yes
+ register: result
+- name: idemponent
+ assert:
+ that:
+ - not result.changed
+# ----------------------------------------------------------------------------
+- name: create a private ip that already exists
+ hwc_vpc_private_ip:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ state: present
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result.failed == 0
+ - result.changed == false
+#----------------------------------------------------------
+- name: delete a private ip (check mode)
+ hwc_vpc_private_ip:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ state: absent
+ check_mode: yes
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result is changed
+#----------------------------------------------------------
+- name: delete a private ip
+ hwc_vpc_private_ip:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ state: absent
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result is changed
+#----------------------------------------------------------
+- name: delete a private ip (idemponent)
+ hwc_vpc_private_ip:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ state: absent
+ check_mode: yes
+ register: result
+- name: idemponent
+ assert:
+ that:
+ - not result.changed
+# ----------------------------------------------------------------------------
+- name: delete a private ip that does not exist
+ hwc_vpc_private_ip:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ state: absent
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result.failed == 0
+ - result.changed == false
+#---------------------------------------------------------
+# Post-test teardown
+- name: delete a subnet
+ hwc_vpc_subnet:
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: True
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ state: absent
+ register: subnet
+- name: delete a vpc
+ hwc_network_vpc:
+ cidr: "192.168.100.0/24"
+ name: "ansible_network_vpc_test"
+ state: absent
+ register: vpc
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_route/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_route/aliases
new file mode 100644
index 00000000..ad7ccf7a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_route/aliases
@@ -0,0 +1 @@
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_route/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_route/tasks/main.yml
new file mode 100644
index 00000000..b281000b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_route/tasks/main.yml
@@ -0,0 +1,155 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Pre-test setup
+- name: create a vpc
+ hwc_network_vpc:
+ cidr: "192.168.0.0/16"
+ name: "ansible_network_vpc_test_local"
+ state: present
+ register: vpc1
+- name: create a vpc
+ hwc_network_vpc:
+ cidr: "192.168.0.0/16"
+ name: "ansible_network_vpc_test_peering"
+ state: present
+ register: vpc2
+- name: create a peering connect
+ hwc_vpc_peering_connect:
+ local_vpc_id: "{{ vpc1.id }}"
+ name: "ansible_network_peering_test"
+ filters:
+ - "name"
+ peering_vpc:
+ vpc_id: "{{ vpc2.id }}"
+ state: present
+ register: connect
+- name: delete a route
+ hwc_vpc_route:
+ vpc_id: "{{ vpc1.id }}"
+ destination: "192.168.0.0/16"
+ next_hop: "{{ connect.id }}"
+ state: absent
+#----------------------------------------------------------
+- name: create a route (check mode)
+ hwc_vpc_route:
+ vpc_id: "{{ vpc1.id }}"
+ destination: "192.168.0.0/16"
+ next_hop: "{{ connect.id }}"
+ check_mode: yes
+ register: result
+- assert:
+ that:
+ - not result.id
+ - result.changed
+#----------------------------------------------------------
+- name: create a route
+ hwc_vpc_route:
+ vpc_id: "{{ vpc1.id }}"
+ destination: "192.168.0.0/16"
+ next_hop: "{{ connect.id }}"
+ state: present
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ result is changed
+# ----------------------------------------------------------
+- name: create a route (idemponent)
+ hwc_vpc_route:
+ vpc_id: "{{ vpc1.id }}"
+ destination: "192.168.0.0/16"
+ next_hop: "{{ connect.id }}"
+ state: present
+ check_mode: yes
+ register: result
+- name: idemponent
+ assert:
+ that:
+ - not result.changed
+# -----------------------------------------------------------
+- name: create a route that already exists
+ hwc_vpc_route:
+ vpc_id: "{{ vpc1.id }}"
+ destination: "192.168.0.0/16"
+ next_hop: "{{ connect.id }}"
+ state: present
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result.failed == 0
+ - result.changed == false
+#----------------------------------------------------------
+- name: delete a route (check mode)
+ hwc_vpc_route:
+ vpc_id: "{{ vpc1.id }}"
+ destination: "192.168.0.0/16"
+ next_hop: "{{ connect.id }}"
+ state: absent
+ check_mode: yes
+#----------------------------------------------------------
+- name: delete a route
+ hwc_vpc_route:
+ vpc_id: "{{ vpc1.id }}"
+ destination: "192.168.0.0/16"
+ next_hop: "{{ connect.id }}"
+ state: absent
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ result is changed
+#----------------------------------------------------------
+- name: delete a (idemponent)
+ hwc_vpc_route:
+ vpc_id: "{{ vpc1.id }}"
+ destination: "192.168.0.0/16"
+ next_hop: "{{ connect.id }}"
+ state: absent
+ check_mode: yes
+ register: result
+- name: not changed
+ assert:
+ that:
+ not result.changed
+# ----------------------------------------------------------------------------
+- name: delete a route that does not exist
+ hwc_vpc_route:
+ vpc_id: "{{ vpc1.id }}"
+ destination: "192.168.0.0/16"
+ next_hop: "{{ connect.id }}"
+ state: absent
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result.failed == 0
+ - result.changed == false
+#---------------------------------------------------------
+# Post-test teardown
+- name: delete a peering connect
+ hwc_vpc_peering_connect:
+ local_vpc_id: "{{ vpc1.id }}"
+ name: "ansible_network_peering_test"
+ filters:
+ - "name"
+ peering_vpc:
+ vpc_id: "{{ vpc2.id }}"
+ state: absent
+ register: connect
+- name: delete a vpc
+ hwc_network_vpc:
+ cidr: "192.168.0.0/16"
+ name: "ansible_network_vpc_test_peering"
+ state: absent
+ register: vpc2
+- name: delete a vpc
+ hwc_network_vpc:
+ cidr: "192.168.0.0/16"
+ name: "ansible_network_vpc_test_local"
+ state: absent
+ register: vpc1
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_security_group/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_security_group/aliases
new file mode 100644
index 00000000..ad7ccf7a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_security_group/aliases
@@ -0,0 +1 @@
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_security_group/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_security_group/tasks/main.yml
new file mode 100644
index 00000000..6b21f8b9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_security_group/tasks/main.yml
@@ -0,0 +1,87 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Pre-test setup
+- name: delete a security group
+ hwc_vpc_security_group:
+ name: "ansible_network_security_group_test"
+ state: absent
+#----------------------------------------------------------
+- name: create a security group (check mode)
+ hwc_vpc_security_group:
+ name: "ansible_network_security_group_test"
+ state: present
+ check_mode: yes
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - not result.id
+ - result.changed
+#----------------------------------------------------------
+- name: create a security group
+ hwc_vpc_security_group:
+ name: "ansible_network_security_group_test"
+ state: present
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ result is changed
+#----------------------------------------------------------
+- name: create a security group (idemponent)
+ hwc_vpc_security_group:
+ name: "ansible_network_security_group_test"
+ state: present
+ check_mode: yes
+ register: idemponent
+- name: idemponent
+ assert:
+ that:
+ - not result.changed
+# ----------------------------------------------------------------------------
+- name: create a security group that already exists
+ hwc_vpc_security_group:
+ name: "ansible_network_security_group_test"
+ state: present
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result.failed == 0
+ - result.changed == false
+#----------------------------------------------------------
+- name: delete a security group (check mode)
+ hwc_vpc_security_group:
+ name: "ansible_network_security_group_test"
+ state: absent
+ check_mode: yes
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ result is changed
+#----------------------------------------------------------
+- name: delete a security group
+ hwc_vpc_security_group:
+ name: "ansible_network_security_group_test"
+ state: absent
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ result is changed
+# ----------------------------------------------------------------------------
+- name: delete a security group that does not exist
+ hwc_vpc_security_group:
+ name: "ansible_network_security_group_test"
+ state: absent
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result.failed == 0
+ - result.changed == false
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_security_group_rule/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_security_group_rule/aliases
new file mode 100644
index 00000000..ad7ccf7a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_security_group_rule/aliases
@@ -0,0 +1 @@
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_security_group_rule/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_security_group_rule/tasks/main.yml
new file mode 100644
index 00000000..2d774101
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_security_group_rule/tasks/main.yml
@@ -0,0 +1,162 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Pre-test setup
+- name: create a security group
+ hwc_vpc_security_group:
+ name: "ansible_network_security_group_test"
+ state: present
+ register: sg
+- name: delete a security group rule
+ hwc_vpc_security_group_rule:
+ direction: "ingress"
+ protocol: "tcp"
+ ethertype: "IPv4"
+ port_range_max: 55
+ security_group_id: "{{ sg.id }}"
+ port_range_min: 22
+ remote_ip_prefix: "0.0.0.0/0"
+ state: absent
+#----------------------------------------------------------
+- name: create a security group rule (check mode)
+ hwc_vpc_security_group_rule:
+ direction: "ingress"
+ protocol: "tcp"
+ ethertype: "IPv4"
+ port_range_max: 55
+ security_group_id: "{{ sg.id }}"
+ port_range_min: 22
+ remote_ip_prefix: "0.0.0.0/0"
+ state: present
+ check_mode: yes
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - not result.id
+ - result.changed
+#----------------------------------------------------------
+- name: create a security group rule
+ hwc_vpc_security_group_rule:
+ direction: "ingress"
+ protocol: "tcp"
+ ethertype: "IPv4"
+ port_range_max: 55
+ security_group_id: "{{ sg.id }}"
+ port_range_min: 22
+ remote_ip_prefix: "0.0.0.0/0"
+ state: present
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result is changed
+#----------------------------------------------------------
+- name: create a security group rule (idemponent)
+ hwc_vpc_security_group_rule:
+ direction: "ingress"
+ protocol: "tcp"
+ ethertype: "IPv4"
+ port_range_max: 55
+ security_group_id: "{{ sg.id }}"
+ port_range_min: 22
+ remote_ip_prefix: "0.0.0.0/0"
+ state: present
+ register: result
+- name: idemponent
+ assert:
+ that:
+ - not result.changed
+# ----------------------------------------------------------------------------
+- name: create a security group rule that already exists
+ hwc_vpc_security_group_rule:
+ direction: "ingress"
+ protocol: "tcp"
+ ethertype: "IPv4"
+ port_range_max: 55
+ security_group_id: "{{ sg.id }}"
+ port_range_min: 22
+ remote_ip_prefix: "0.0.0.0/0"
+ state: present
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result.failed == 0
+ - result.changed == false
+#----------------------------------------------------------
+- name: delete a security group rule (check mode)
+ hwc_vpc_security_group_rule:
+ direction: "ingress"
+ protocol: "tcp"
+ ethertype: "IPv4"
+ port_range_max: 55
+ security_group_id: "{{ sg.id }}"
+ port_range_min: 22
+ remote_ip_prefix: "0.0.0.0/0"
+ state: absent
+ check_mode: yes
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result is changed
+#----------------------------------------------------------
+- name: delete a security group rule
+ hwc_vpc_security_group_rule:
+ direction: "ingress"
+ protocol: "tcp"
+ ethertype: "IPv4"
+ port_range_max: 55
+ security_group_id: "{{ sg.id }}"
+ port_range_min: 22
+ remote_ip_prefix: "0.0.0.0/0"
+ state: absent
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result is changed
+#----------------------------------------------------------
+- name: delete a security group rule (idemponent)
+ hwc_vpc_security_group_rule:
+ direction: "ingress"
+ protocol: "tcp"
+ ethertype: "IPv4"
+ port_range_max: 55
+ security_group_id: "{{ sg.id }}"
+ port_range_min: 22
+ remote_ip_prefix: "0.0.0.0/0"
+ state: absent
+ register: result
+- name: idemponent
+ assert:
+ that:
+ - not result.changed
+# ----------------------------------------------------------------------------
+- name: delete a security group rule that does not exist
+ hwc_vpc_security_group_rule:
+ direction: "ingress"
+ protocol: "tcp"
+ ethertype: "IPv4"
+ port_range_max: 55
+ security_group_id: "{{ sg.id }}"
+ port_range_min: 22
+ remote_ip_prefix: "0.0.0.0/0"
+ state: absent
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result.failed == 0
+ - result.changed == false
+#---------------------------------------------------------
+# Post-test teardown
+- name: delete a security group
+ hwc_vpc_security_group:
+ name: "ansible_network_security_group_test"
+ state: absent
+ register: sg
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_subnet/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_subnet/aliases
new file mode 100644
index 00000000..ad7ccf7a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_subnet/aliases
@@ -0,0 +1 @@
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_subnet/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_subnet/tasks/main.yml
new file mode 100644
index 00000000..3b3cf654
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/hwc_vpc_subnet/tasks/main.yml
@@ -0,0 +1,148 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Pre-test setup
+- name: create a vpc
+ hwc_network_vpc:
+ cidr: "192.168.100.0/24"
+ name: "ansible_network_vpc_test"
+ state: present
+ register: vpc
+- name: delete a subnet
+ hwc_vpc_subnet:
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: True
+ state: absent
+#----------------------------------------------------------
+- name: create a subnet (check mode)
+ hwc_vpc_subnet:
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: True
+ state: present
+ check_mode: yes
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - not result.id
+ - result.changed
+#----------------------------------------------------------
+- name: create a subnet
+ hwc_vpc_subnet:
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: True
+ state: present
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ result is changed
+#----------------------------------------------------------
+- name: create a subnet (idemponent)
+ hwc_vpc_subnet:
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: True
+ state: present
+ check_mode: yes
+ register: result
+- name: idemponent
+ assert:
+ that:
+ - not result.changed
+# ----------------------------------------------------------------------------
+- name: create a subnet that already exists
+ hwc_vpc_subnet:
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: True
+ state: present
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result.failed == 0
+ - result.changed == false
+#----------------------------------------------------------
+- name: delete a subnet (check mode)
+ hwc_vpc_subnet:
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: True
+ state: absent
+ check_mode: yes
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result is changed
+#----------------------------------------------------------
+- name: delete a subnet
+ hwc_vpc_subnet:
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: True
+ state: absent
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result is changed
+#----------------------------------------------------------
+- name: delete a subnet (idemponent)
+ hwc_vpc_subnet:
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: True
+ state: absent
+ check_mode: yes
+ register: result
+- name: idemponent
+ assert:
+ that:
+ - not result.changed
+# ----------------------------------------------------------------------------
+- name: delete a subnet that does not exist
+ hwc_vpc_subnet:
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: True
+ state: absent
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result.failed == 0
+ - result.changed == false
+#---------------------------------------------------------
+# Post-test teardown
+- name: delete a vpc
+ hwc_network_vpc:
+ cidr: "192.168.100.0/24"
+ name: "ansible_network_vpc_test"
+ state: absent
+ register: vpc
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/influxdb_user/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/influxdb_user/aliases
new file mode 100644
index 00000000..6fe31dda
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/influxdb_user/aliases
@@ -0,0 +1,7 @@
+destructive
+shippable/posix/group1
+disabled
+skip/osx
+skip/macos
+skip/freebsd
+skip/rhel
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/influxdb_user/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/influxdb_user/meta/main.yml
new file mode 100644
index 00000000..94111702
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/influxdb_user/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_influxdb
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/influxdb_user/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/influxdb_user/tasks/main.yml
new file mode 100644
index 00000000..ad625367
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/influxdb_user/tasks/main.yml
@@ -0,0 +1,9 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+
+- include: tests.yml
+ when: ansible_distribution == 'Ubuntu' and ansible_distribution_release == 'trusty'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/influxdb_user/tasks/tests.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/influxdb_user/tasks/tests.yml
new file mode 100644
index 00000000..b980e290
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/influxdb_user/tasks/tests.yml
@@ -0,0 +1,140 @@
+---
+
+- name: Install influxdb python module
+ pip: name=influxdb
+
+- name: Test add admin user in check mode
+ block:
+ - name: Add admin user
+ influxdb_user: user_name=admin user_password=admin admin=yes
+ check_mode: true
+ register: add_admin_user
+
+ - name: Check that admin user adding succeeds with a change
+ assert:
+ that:
+ - add_admin_user.changed == true
+
+- name: Test add admin user
+ block:
+ - name: Add admin user
+ influxdb_user: user_name=admin user_password=admin admin=yes
+ register: add_admin_user
+
+ - name: Check that admin user adding succeeds with a change
+ assert:
+ that:
+ - add_admin_user.changed == true
+
+- name: Test add admin user idempotence
+ block:
+ - name: Add admin user
+ influxdb_user: user_name=admin user_password=admin admin=yes
+ register: add_admin_user
+
+ - name: Check that admin user adding succeeds without a change
+ assert:
+ that:
+ - add_admin_user.changed == false
+
+- name: Enable authentication and restart service
+ block:
+ - name: Enable authentication
+ lineinfile:
+ path: /etc/influxdb/influxdb.conf
+ regexp: 'auth-enabled ='
+ line: ' auth-enabled = true'
+
+ - name: Restart InfluxDB service
+ service: name=influxdb state=restarted
+
+- name: Test add user in check mode when authentication enabled
+ block:
+ - name: Add user
+ influxdb_user: user_name=user user_password=user login_username=admin login_password=admin
+ check_mode: true
+ register: add_user_with_auth_enabled
+
+ - name: Check that adding user with enabled authentication succeeds with a change
+ assert:
+ that:
+ - add_user_with_auth_enabled.changed == true
+
+- name: Test add user when authentication enabled
+ block:
+ - name: Add user
+ influxdb_user: user_name=user user_password=user login_username=admin login_password=admin
+ register: add_user_with_auth_enabled
+
+ - name: Check that adding user with enabled authentication succeeds with a change
+ assert:
+ that:
+ - add_user_with_auth_enabled.changed == true
+
+- name: Test add user when authentication enabled idempotence
+ block:
+ - name: Add the same user
+ influxdb_user: user_name=user user_password=user login_username=admin login_password=admin
+ register: same_user
+
+ - name: Check that adding same user succeeds without a change
+ assert:
+ that:
+ - same_user.changed == false
+
+- name: Test change user password in check mode
+ block:
+ - name: Change user password
+ influxdb_user: user_name=user user_password=user2 login_username=admin login_password=admin
+ check_mode: true
+ register: change_password
+
+ - name: Check that password changing succeeds with a change
+ assert:
+ that:
+ - change_password.changed == true
+
+- name: Test change user password
+ block:
+ - name: Change user password
+ influxdb_user: user_name=user user_password=user2 login_username=admin login_password=admin
+ register: change_password
+
+ - name: Check that password changing succeeds with a change
+ assert:
+ that:
+ - change_password.changed == true
+
+- name: Test remove user in check mode
+ block:
+ - name: Remove user
+ influxdb_user: user_name=user state=absent login_username=admin login_password=admin
+ check_mode: true
+ register: remove_user
+
+ - name: Check that removing user succeeds with a change
+ assert:
+ that:
+ - remove_user.changed == true
+
+- name: Test remove user
+ block:
+ - name: Remove user
+ influxdb_user: user_name=user state=absent login_username=admin login_password=admin
+ register: remove_user
+
+ - name: Check that removing user succeeds with a change
+ assert:
+ that:
+ - remove_user.changed == true
+
+- name: Test remove user idempotence
+ block:
+ - name: Remove user
+ influxdb_user: user_name=user state=absent login_username=admin login_password=admin
+ register: remove_user
+
+ - name: Check that removing user succeeds without a change
+ assert:
+ that:
+ - remove_user.changed == false
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ini_file/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ini_file/aliases
new file mode 100644
index 00000000..765b70da
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ini_file/aliases
@@ -0,0 +1 @@
+shippable/posix/group2
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ini_file/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ini_file/meta/main.yml
new file mode 100644
index 00000000..1810d4be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ini_file/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_remote_tmp_dir
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ini_file/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ini_file/tasks/main.yml
new file mode 100644
index 00000000..9023b6f4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ini_file/tasks/main.yml
@@ -0,0 +1,413 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# test code for ini_file plugins
+# (c) 2017 Red Hat Inc.
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- name: record the output directory
+ set_fact: output_file={{ remote_tmp_dir }}/foo.ini
+
+- name: add "fav=lemonade" is in section "[drinks]" in specified file
+ ini_file:
+ path: "{{ output_file }}"
+ section: drinks
+ option: fav
+ value: lemonade
+ register: result1
+
+- name: verify ini_file 'changed' is true
+ assert:
+ that:
+ - result1.changed == True
+ - result1.msg == 'section and option added'
+
+- name: read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: set expected content and get current ini file content
+ set_fact:
+ expected1: |
+
+ [drinks]
+ fav = lemonade
+ content1: "{{ output_content.content | b64decode }}"
+
+- name: Verify content of ini file is as expected
+ assert:
+ that:
+ - content1 == expected1
+
+- name: add "fav=lemonade" is in section "[drinks]" again
+ ini_file:
+ path: "{{ output_file }}"
+ section: drinks
+ option: fav
+ value: lemonade
+ register: result2
+
+- name: Ensure unchanged
+ assert:
+ that:
+ - result2.changed == False
+ - result2.msg == 'OK'
+
+- name: Ensure "beverage=coke" is in section "[drinks]"
+ ini_file:
+ path: "{{ output_file }}"
+ section: drinks
+ option: beverage
+ value: coke
+ register: result3
+
+- name: read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: set expected content and get current ini file content
+ set_fact:
+ expected3: |
+
+ [drinks]
+ fav = lemonade
+ beverage = coke
+ content3: "{{ output_content.content | b64decode }}"
+
+- name: assert 'changed' is true and content is OK
+ assert:
+ that:
+ - result3.changed == True
+ - result3.msg == 'option added'
+ - content3 == expected3
+
+- name: Remove option "beverage=coke"
+ ini_file:
+ path: "{{ output_file }}"
+ section: drinks
+ option: beverage
+ state: absent
+ register: result4
+
+- name: read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: get ini file content
+ set_fact:
+ content4: "{{ output_content.content | b64decode }}"
+
+- name: assert changed and content is as expected
+ assert:
+ that:
+ - result4.changed == True
+ - result4.msg == 'option changed'
+ - content4 == expected1
+
+- name: remove section 'drinks'
+ ini_file:
+ path: "{{ output_file }}"
+ section: drinks
+ state: absent
+ register: result5
+
+- name: read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: get current ini file content
+ set_fact:
+ content5: "{{ output_content.content | b64decode }}"
+
+- name: assert changed and content is empty
+ assert:
+ that:
+ - result5.changed == True
+ - result5.msg == 'section removed'
+ - content5 == "\n"
+
+# allow_no_value
+
+- name: test allow_no_value
+ ini_file:
+ path: "{{ output_file }}"
+ section: mysqld
+ option: skip-name
+ allow_no_value: yes
+ register: result6
+
+- name: assert section and option added
+ assert:
+ that:
+ - result6.changed == True
+ - result6.msg == 'section and option added'
+
+- name: test allow_no_value idempotency
+ ini_file:
+ path: "{{ output_file }}"
+ section: mysqld
+ option: skip-name
+ allow_no_value: yes
+ register: result6
+
+- name: assert 'changed' false
+ assert:
+ that:
+ - result6.changed == False
+ - result6.msg == 'OK'
+
+- name: test allow_no_value with loop
+ ini_file:
+ path: "{{ output_file }}"
+ section: mysqld
+ option: "{{ item.o }}"
+ value: "{{ item.v }}"
+ allow_no_value: yes
+ with_items:
+ - { o: "skip-name-resolve", v: null }
+ - { o: "max_connections", v: "500" }
+
+- name: read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: set expected content and get current ini file content
+ set_fact:
+ content7: "{{ output_content.content | b64decode }}"
+ expected7: |
+
+ [mysqld]
+ skip-name
+ skip-name-resolve
+ max_connections = 500
+
+- name: Verify content of ini file is as expected
+ assert:
+ that:
+ - content7 == expected7
+
+- name: change option with no value to option with value
+ ini_file:
+ path: "{{ output_file }}"
+ section: mysqld
+ option: skip-name
+ value: myvalue
+ register: result8
+
+- name: read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: set expected content and get current ini file content
+ set_fact:
+ content8: "{{ output_content.content | b64decode }}"
+ expected8: |
+
+ [mysqld]
+ skip-name = myvalue
+ skip-name-resolve
+ max_connections = 500
+
+- name: assert 'changed' and msg 'option changed' and content is as expected
+ assert:
+ that:
+ - result8.changed == True
+ - result8.msg == 'option changed'
+ - content8 == expected8
+
+- name: change option with value to option with no value
+ ini_file:
+ path: "{{ output_file }}"
+ section: mysqld
+ option: skip-name
+ allow_no_value: yes
+ register: result9
+
+- name: read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: set expected content and get current ini file content
+ set_fact:
+ content9: "{{ output_content.content | b64decode }}"
+ expected9: |
+
+ [mysqld]
+ skip-name
+ skip-name-resolve
+ max_connections = 500
+
+- name: assert 'changed' and msg 'option changed' and content is as expected
+ assert:
+ that:
+ - result9.changed == True
+ - result9.msg == 'option changed'
+ - content9 == expected9
+
+- name: Remove option with no value
+ ini_file:
+ path: "{{ output_file }}"
+ section: mysqld
+ option: skip-name-resolve
+ state: absent
+ register: result10
+
+- name: read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: set expected content and get current ini file content
+ set_fact:
+ content10: "{{ output_content.content | b64decode }}"
+ expected10: |
+
+ [mysqld]
+ skip-name
+ max_connections = 500
+
+- name: assert 'changed' and msg 'option changed' and content is as expected
+ assert:
+ that:
+ - result10.changed == True
+ - result10.msg == 'option changed'
+ - content10 == expected10
+
+- name: Clean test file
+ copy:
+ content: ""
+ dest: "{{ output_file }}"
+ force: yes
+
+- name: Ensure "beverage=coke" is created within no section
+ ini_file:
+ section:
+ path: "{{ output_file }}"
+ option: beverage
+ value: coke
+ register: result11
+
+- name: read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: set expected content and get current ini file content
+ set_fact:
+ expected11: "beverage = coke\n\n"
+ content11: "{{ output_content.content | b64decode }}"
+
+- name: assert 'changed' is true and content is OK (no section)
+ assert:
+ that:
+ - result11 is changed
+ - result11.msg == 'option added'
+ - content11 == expected11
+
+- name: Ensure "beverage=coke" is modified as "beverage=water" within no section
+ ini_file:
+ path: "{{ output_file }}"
+ option: beverage
+ value: water
+ section:
+ register: result12
+
+- name: read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: set expected content and get current ini file content
+ set_fact:
+ expected12: "beverage = water\n\n"
+
+ content12: "{{ output_content.content | b64decode }}"
+
+- name: assert 'changed' is true and content is OK (no section)
+ assert:
+ that:
+ - result12 is changed
+ - result12.msg == 'option changed'
+ - content12 == expected12
+
+- name: remove option 'beverage' within no section
+ ini_file:
+ section:
+ path: "{{ output_file }}"
+ option: beverage
+ state: absent
+ register: result13
+
+- name: read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: get current ini file content
+ set_fact:
+ content13: "{{ output_content.content | b64decode }}"
+
+- name: assert changed (no section)
+ assert:
+ that:
+ - result13 is changed
+ - result13.msg == 'option changed'
+ - content13 == "\n"
+
+- name: Check add option without section before existing section
+ block:
+ - name: Add option with section
+ ini_file:
+ path: "{{ output_file }}"
+ section: drinks
+ option: beverage
+ value: water
+ - name: Add option without section
+ ini_file:
+ path: "{{ output_file }}"
+ section:
+ option: like
+ value: tea
+
+- name: read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: set expected content and get current ini file content
+ set_fact:
+ expected14: |
+ like = tea
+
+ [drinks]
+ beverage = water
+ content14: "{{ output_content.content | b64decode }}"
+
+- name: Verify content of ini file is as expected
+ assert:
+ that:
+ - content14 == expected14
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/aliases
new file mode 100644
index 00000000..7ed38ca3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/aliases
@@ -0,0 +1,11 @@
+disabled
+shippable/posix/group2
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
+destructive
+skip/docker # We need SSH access to the VM and need to be able to let
+ # docker-machine install docker in the VM. This won't work
+ # with tests running in docker containers.
+needs/root
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/docker-machine b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/docker-machine
new file mode 100644
index 00000000..be5d00c5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/docker-machine
@@ -0,0 +1,20 @@
+#!/usr/bin/env bash
+# Mock Docker Machine wrapper for testing purposes
+
+[ "$MOCK_ERROR_IN" == "$1" ] && echo >&2 "Mock Docker Machine error" && exit 1
+case $1 in
+ env)
+ cat <<'EOF'
+export DOCKER_TLS_VERIFY="1"
+export DOCKER_HOST="tcp://134.209.204.160:2376"
+export DOCKER_CERT_PATH="/root/.docker/machine/machines/routinator"
+export DOCKER_MACHINE_NAME="routinator"
+# Run this command to configure your shell:
+# eval $(docker-machine env --shell=bash routinator)
+EOF
+ ;;
+
+ *)
+ /usr/bin/docker-machine $*
+ ;;
+esac
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/inventory_1.docker_machine.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/inventory_1.docker_machine.yml
new file mode 100644
index 00000000..80dd7971
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/inventory_1.docker_machine.yml
@@ -0,0 +1 @@
+plugin: community.general.docker_machine
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/inventory_2.docker_machine.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/inventory_2.docker_machine.yml
new file mode 100644
index 00000000..76b46580
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/inventory_2.docker_machine.yml
@@ -0,0 +1,2 @@
+plugin: community.general.docker_machine
+daemon_env: require
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/inventory_3.docker_machine.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/inventory_3.docker_machine.yml
new file mode 100644
index 00000000..8be33360
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/inventory_3.docker_machine.yml
@@ -0,0 +1,2 @@
+plugin: community.general.docker_machine
+daemon_env: optional
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/meta/main.yml
new file mode 100644
index 00000000..07da8c6d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - setup_docker
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/playbooks/pre-setup.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/playbooks/pre-setup.yml
new file mode 100644
index 00000000..9f526fb4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/playbooks/pre-setup.yml
@@ -0,0 +1,18 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ tasks:
+ - name: Setup docker
+ include_role:
+ name: setup_docker
+
+ # There seems to be no better way to install docker-machine. At least I couldn't find any packages for RHEL7/8.
+ - name: Download docker-machine binary
+ vars:
+ docker_machine_version: "0.16.1"
+ get_url:
+ url: "https://github.com/docker/machine/releases/download/v{{ docker_machine_version }}/docker-machine-{{ ansible_system }}-{{ ansible_userspace_architecture }}"
+ dest: /tmp/docker-machine
+ - name: Install docker-machine binary
+ command: install /tmp/docker-machine /usr/bin/docker-machine
+ become: yes
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/playbooks/setup.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/playbooks/setup.yml
new file mode 100644
index 00000000..78042b62
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/playbooks/setup.yml
@@ -0,0 +1,11 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ tasks:
+ - name: Request Docker Machine to use this machine as a generic VM
+ command: "docker-machine --debug create \
+ --driver generic \
+ --generic-ip-address=localhost \
+ --generic-ssh-key {{ lookup('env', 'HOME') }}/.ssh/id_rsa \
+ --generic-ssh-user root \
+ vm"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/playbooks/teardown.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/playbooks/teardown.yml
new file mode 100644
index 00000000..b272c094
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/playbooks/teardown.yml
@@ -0,0 +1,6 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ tasks:
+ - name: Request Docker Machine to remove this machine as a generic VM
+ command: "docker-machine rm vm -f"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/playbooks/test_inventory_1.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/playbooks/test_inventory_1.yml
new file mode 100644
index 00000000..f76f01c2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/playbooks/test_inventory_1.yml
@@ -0,0 +1,50 @@
+- hosts: 127.0.0.1
+ gather_facts: no
+ tasks:
+ - name: sanity check Docker Machine output
+ vars:
+ dm_ls_format: !unsafe '{{.Name}} | {{.DriverName}} | {{.State}} | {{.URL}} | {{.Error}}'
+ success_regex: "^vm | [^|]+ | Running | tcp://.+ |$"
+ command: docker-machine ls --format '{{ dm_ls_format }}'
+ register: result
+ failed_when: result.rc != 0 or result.stdout is not match(success_regex)
+
+ - name: verify Docker Machine ip
+ command: docker-machine ip vm
+ register: result
+ failed_when: result.rc != 0 or result.stdout != hostvars['vm'].ansible_host
+
+ - name: verify Docker Machine env
+ command: docker-machine env --shell=sh vm
+ register: result
+
+ - debug: var=result.stdout
+
+ - assert:
+ that:
+ - "'DOCKER_TLS_VERIFY=\"{{ hostvars['vm'].dm_DOCKER_TLS_VERIFY }}\"' in result.stdout"
+ - "'DOCKER_HOST=\"{{ hostvars['vm'].dm_DOCKER_HOST }}\"' in result.stdout"
+ - "'DOCKER_CERT_PATH=\"{{ hostvars['vm'].dm_DOCKER_CERT_PATH }}\"' in result.stdout"
+ - "'DOCKER_MACHINE_NAME=\"{{ hostvars['vm'].dm_DOCKER_MACHINE_NAME }}\"' in result.stdout"
+
+- hosts: vm
+ gather_facts: no
+ tasks:
+ - name: do something to verify that accept-new ssh setting was applied by the docker-machine inventory plugin
+ raw: uname -a
+ register: result
+
+ - debug: var=result.stdout
+
+- hosts: 127.0.0.1
+ gather_facts: no
+ environment:
+ DOCKER_CERT_PATH: "{{ hostvars['vm'].dm_DOCKER_CERT_PATH }}"
+ DOCKER_HOST: "{{ hostvars['vm'].dm_DOCKER_HOST }}"
+ DOCKER_MACHINE_NAME: "{{ hostvars['vm'].dm_DOCKER_MACHINE_NAME }}"
+ DOCKER_TLS_VERIFY: "{{ hostvars['vm'].dm_DOCKER_TLS_VERIFY }}"
+ tasks:
+ - name: run a Docker container on the target Docker Machine host to verify that Docker daemon connection settings from the docker-machine inventory plugin work as expected
+ docker_container:
+ name: test
+ image: hello-world:latest
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/runme.sh b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/runme.sh
new file mode 100755
index 00000000..074e64fc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/runme.sh
@@ -0,0 +1,68 @@
+#!/usr/bin/env bash
+
+SCRIPT_DIR=$(dirname "$0")
+
+echo "Who am I: $(whoami)"
+echo "Home: ${HOME}"
+echo "PWD: $(pwd)"
+echo "Script dir: ${SCRIPT_DIR}"
+
+# restrict Ansible just to our inventory plugin, to prevent inventory data being matched by the test but being provided
+# by some other dynamic inventory provider
+export ANSIBLE_INVENTORY_ENABLED=docker_machine
+
+[[ -n "$DEBUG" || -n "$ANSIBLE_DEBUG" ]] && set -x
+
+set -euo pipefail
+
+SAVED_PATH="$PATH"
+
+cleanup() {
+ PATH="${SAVED_PATH}"
+ echo "Cleanup"
+ ansible-playbook -i teardown.docker_machine.yml playbooks/teardown.yml
+ echo "Done"
+}
+
+trap cleanup INT TERM EXIT
+
+echo "Pre-setup (install docker, docker-machine)"
+ANSIBLE_ROLES_PATH=.. ansible-playbook playbooks/pre-setup.yml
+
+echo "Print docker-machine version"
+docker-machine --version
+
+echo "Check preconditions"
+# Host should NOT be known to Ansible before the test starts
+ansible-inventory -i inventory_1.docker_machine.yml --host vm >/dev/null && exit 1
+
+echo "Test that the docker_machine inventory plugin is being loaded"
+ANSIBLE_DEBUG=yes ansible-inventory -i inventory_1.docker_machine.yml --list | grep -F "Loading InventoryModule 'docker_machine'"
+
+echo "Setup"
+ansible-playbook playbooks/setup.yml
+
+echo "Test docker_machine inventory 1"
+ansible-playbook -i inventory_1.docker_machine.yml playbooks/test_inventory_1.yml
+
+echo "Activate Docker Machine mock"
+PATH=${SCRIPT_DIR}:$PATH
+
+echo "Test docker_machine inventory 2: daemon_env=require daemon env success=yes"
+ansible-inventory -i inventory_2.docker_machine.yml --list
+
+echo "Test docker_machine inventory 2: daemon_env=require daemon env success=no"
+export MOCK_ERROR_IN=env
+ansible-inventory -i inventory_2.docker_machine.yml --list
+unset MOCK_ERROR_IN
+
+echo "Test docker_machine inventory 3: daemon_env=optional daemon env success=yes"
+ansible-inventory -i inventory_3.docker_machine.yml --list
+
+echo "Test docker_machine inventory 3: daemon_env=optional daemon env success=no"
+export MOCK_ERROR_IN=env
+ansible-inventory -i inventory_2.docker_machine.yml --list
+unset MOCK_ERROR_IN
+
+echo "Deactivate Docker Machine mock"
+PATH="${SAVED_PATH}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/teardown.docker_machine.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/teardown.docker_machine.yml
new file mode 100644
index 00000000..ea2f203a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_machine/teardown.docker_machine.yml
@@ -0,0 +1,3 @@
+plugin: community.general.docker_machine
+daemon_env: skip
+running_required: false
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_swarm/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_swarm/aliases
new file mode 100644
index 00000000..78341e07
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_swarm/aliases
@@ -0,0 +1,12 @@
+shippable/posix/group2
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
+destructive
+skip/docker # The tests sometimes make docker daemon unstable; hence,
+ # we skip all docker-based CI runs to avoid disrupting
+ # the whole CI system. On VMs, we restart docker daemon
+ # after finishing the tests to minimize potential effects
+ # on other tests.
+needs/root
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_swarm/inventory_1.docker_swarm.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_swarm/inventory_1.docker_swarm.yml
new file mode 100644
index 00000000..172f17c6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_swarm/inventory_1.docker_swarm.yml
@@ -0,0 +1,2 @@
+plugin: community.general.docker_swarm
+docker_host: unix://var/run/docker.sock
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_swarm/inventory_2.docker_swarm.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_swarm/inventory_2.docker_swarm.yml
new file mode 100644
index 00000000..3711ae60
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_swarm/inventory_2.docker_swarm.yml
@@ -0,0 +1,4 @@
+plugin: community.general.docker_swarm
+docker_host: unix://var/run/docker.sock
+verbose_output: false
+include_host_uri: true
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_swarm/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_swarm/meta/main.yml
new file mode 100644
index 00000000..07da8c6d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_swarm/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - setup_docker
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_swarm/playbooks/swarm_cleanup.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_swarm/playbooks/swarm_cleanup.yml
new file mode 100644
index 00000000..9cf87159
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_swarm/playbooks/swarm_cleanup.yml
@@ -0,0 +1,18 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: yes
+ tasks:
+ - name: Make sure swarm is removed
+ docker_swarm:
+ state: absent
+ force: yes
+
+ - name: remove docker pagkages
+ action: "{{ ansible_facts.pkg_mgr }}"
+ args:
+ name:
+ - docker
+ - docker-ce
+ - docker-ce-cli
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_swarm/playbooks/swarm_setup.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_swarm/playbooks/swarm_setup.yml
new file mode 100644
index 00000000..b97106f5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_swarm/playbooks/swarm_setup.yml
@@ -0,0 +1,15 @@
+---
+- hosts: 127.0.0.1
+ connection: local
+ vars:
+ docker_skip_cleanup: yes
+
+ tasks:
+ - name: Setup docker
+ import_role:
+ name: setup_docker
+
+ - name: Create a Swarm cluster
+ community.general.docker_swarm:
+ state: present
+ advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_swarm/playbooks/test_inventory_1.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_swarm/playbooks/test_inventory_1.yml
new file mode 100644
index 00000000..600a89b1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_swarm/playbooks/test_inventory_1.yml
@@ -0,0 +1,58 @@
+---
+- hosts: 127.0.0.1
+ connection: local # otherwise Ansible will complain that it cannot connect via ssh to 127.0.0.1:22
+ gather_facts: no
+ tasks:
+ - name: Show all groups
+ debug:
+ var: groups
+ - name: Make sure docker_swarm groups are there
+ assert:
+ that:
+ - groups.all | length > 0
+ - groups.leader | length == 1
+ - groups.manager | length > 0
+ - groups.worker | length >= 0
+ - groups.nonleaders | length >= 0
+
+- hosts: all
+ connection: local # otherwise Ansible will complain that it cannot connect via ssh to 127.0.0.1:22
+ vars:
+ # for some reason, Ansible can't find the Python interpreter when connecting to the nodes,
+ # which is in fact just localhost in disguise. That's why we use ansible_playbook_python.
+ ansible_python_interpreter: "{{ ansible_playbook_python }}"
+ tasks:
+ - name: Check for groups
+ assert:
+ that:
+ - "groups.manager | length > 0"
+ - "groups.worker | length >= 0"
+ - "groups.leader | length == 1"
+ run_once: yes
+
+ - name: List manager group
+ debug:
+ var: groups.manager
+ run_once: yes
+
+ - name: List worker group
+ debug:
+ var: groups.worker
+ run_once: yes
+
+ - name: List leader group
+ debug:
+ var: groups.leader
+ run_once: yes
+
+ - name: Print ansible_host per host
+ debug:
+ var: ansible_host
+
+ - name: Make sure docker_swarm_node_attributes is available
+ assert:
+ that:
+ - docker_swarm_node_attributes is not undefined
+ - name: Print docker_swarm_node_attributes per host
+ debug:
+ var: docker_swarm_node_attributes
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_swarm/playbooks/test_inventory_2.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_swarm/playbooks/test_inventory_2.yml
new file mode 100644
index 00000000..b2a794d3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_swarm/playbooks/test_inventory_2.yml
@@ -0,0 +1,35 @@
+---
+- hosts: 127.0.0.1
+ connection: local # otherwise Ansible will complain that it cannot connect via ssh to 127.0.0.1:22
+ gather_facts: no
+ tasks:
+ - name: Show all groups
+ debug:
+ var: groups
+ - name: Make sure docker_swarm groups are there
+ assert:
+ that:
+ - groups.all | length > 0
+ - groups.leader | length == 1
+ - groups.manager | length > 0
+ - groups.worker | length >= 0
+ - groups.nonleaders | length >= 0
+
+- hosts: all
+ connection: local # otherwise Ansible will complain that it cannot connect via ssh to 127.0.0.1:22
+ vars:
+ # for some reason, Ansible can't find the Python interpreter when connecting to the nodes,
+ # which is in fact just localhost in disguise. That's why we use ansible_playbook_python.
+ ansible_python_interpreter: "{{ ansible_playbook_python }}"
+ tasks:
+ - name: Make sure docker_swarm_node_attributes is not available
+ assert:
+ that:
+ - docker_swarm_node_attributes is undefined
+ - name: Make sure ansible_host_uri is available
+ assert:
+ that:
+ - ansible_host_uri is defined
+ - name: Print ansible_host_uri
+ debug:
+ var: ansible_host_uri
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_swarm/runme.sh b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_swarm/runme.sh
new file mode 100755
index 00000000..b93d386a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_docker_swarm/runme.sh
@@ -0,0 +1,22 @@
+#!/usr/bin/env bash
+
+[[ -n "$DEBUG" || -n "$ANSIBLE_DEBUG" ]] && set -x
+
+set -euo pipefail
+
+cleanup() {
+ echo "Cleanup"
+ ansible-playbook playbooks/swarm_cleanup.yml
+ echo "Done"
+}
+
+trap cleanup INT TERM EXIT
+
+echo "Setup"
+ANSIBLE_ROLES_PATH=.. ansible-playbook playbooks/swarm_setup.yml
+
+echo "Test docker_swarm inventory 1"
+ansible-playbook -i inventory_1.docker_swarm.yml playbooks/test_inventory_1.yml
+
+echo "Test docker_swarm inventory 2"
+ansible-playbook -i inventory_2.docker_swarm.yml playbooks/test_inventory_2.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_kubevirt/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_kubevirt/aliases
new file mode 100644
index 00000000..765b70da
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_kubevirt/aliases
@@ -0,0 +1 @@
+shippable/posix/group2
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_kubevirt/constraints.txt b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_kubevirt/constraints.txt
new file mode 100644
index 00000000..c44f44e9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_kubevirt/constraints.txt
@@ -0,0 +1 @@
+setuptools < 45 ; python_version <= '2.7' # setuptools 45 and later require python 3.5 or later
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_kubevirt/inventory_diff.py b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_kubevirt/inventory_diff.py
new file mode 100755
index 00000000..f70ff8c1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_kubevirt/inventory_diff.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import sys
+
+
+def check_hosts(contrib, plugin):
+ contrib_hosts = sorted(contrib['_meta']['hostvars'].keys())
+ plugin_hosts = sorted(plugin['_meta']['hostvars'].keys())
+ assert contrib_hosts == plugin_hosts
+ return contrib_hosts, plugin_hosts
+
+
+def check_groups(contrib, plugin):
+ contrib_groups = set(contrib.keys())
+ plugin_groups = set(plugin.keys())
+ missing_groups = contrib_groups.difference(plugin_groups)
+ if missing_groups:
+ print("groups: %s are missing from the plugin" % missing_groups)
+ assert not missing_groups
+ return contrib_groups, plugin_groups
+
+
+def check_host_vars(key, value, plugin, host):
+ # tags are a dict in the plugin
+ if key.startswith('ec2_tag'):
+ print('assert tag', key, value)
+ assert 'tags' in plugin['_meta']['hostvars'][host], 'b file does not have tags in host'
+ btags = plugin['_meta']['hostvars'][host]['tags']
+ tagkey = key.replace('ec2_tag_', '')
+ assert tagkey in btags, '%s tag not in b file host tags' % tagkey
+ assert value == btags[tagkey], '%s != %s' % (value, btags[tagkey])
+ else:
+ print('assert var', key, value, key in plugin['_meta']['hostvars'][host], plugin['_meta']['hostvars'][host].get(key))
+ assert key in plugin['_meta']['hostvars'][host], "%s not in b's %s hostvars" % (key, host)
+ assert value == plugin['_meta']['hostvars'][host][key], "%s != %s" % (value, plugin['_meta']['hostvars'][host][key])
+
+
+def main():
+ # a should be the source of truth (the script output)
+ a = sys.argv[1]
+ # b should be the thing to check (the plugin output)
+ b = sys.argv[2]
+
+ with open(a, 'r') as f:
+ adata = json.loads(f.read())
+ with open(b, 'r') as f:
+ bdata = json.loads(f.read())
+
+ print(adata)
+ print(bdata)
+
+ # all hosts should be present obviously
+ ahosts, bhosts = check_hosts(adata, bdata)
+
+ # all groups should be present obviously
+ agroups, bgroups = check_groups(adata, bdata)
+
+ # check host vars can be reconstructed
+ for ahost in ahosts:
+ contrib_host_vars = adata['_meta']['hostvars'][ahost]
+ for key, value in contrib_host_vars.items():
+ check_host_vars(key, value, bdata, ahost)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_kubevirt/runme.sh b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_kubevirt/runme.sh
new file mode 100755
index 00000000..21517d5b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_kubevirt/runme.sh
@@ -0,0 +1,80 @@
+#!/usr/bin/env bash
+
+if [[ $(python --version 2>&1) =~ 2\.6 ]]
+ then
+ echo "Openshift client is not supported on Python 2.6"
+ exit 0
+fi
+
+set -eux
+
+uname -a
+if [[ $(uname -a) =~ FreeBSD\ 12\.0-RELEASE ]]
+ then
+ # On FreeBSD 12.0 images, upgrade setuptools to avoid error with multidict
+ # This is a bug in pip, which happens because the old setuptools from outside
+ # the venv leaks into the venv (https://github.com/pypa/pip/issues/6264).
+ # Since it is not fixed in latest pip (which is available in the venv), we
+ # need to upgrade setuptools outside the venv.
+ pip3 install --upgrade setuptools
+fi
+
+source virtualenv.sh
+python --version
+pip --version
+pip show setuptools
+pip install openshift -c constraints.txt
+
+./server.py &
+
+cleanup() {
+ kill -9 "$(jobs -p)"
+}
+
+trap cleanup INT TERM EXIT
+
+# Fake auth file
+mkdir -p ~/.kube/
+cat <<EOF > ~/.kube/config
+apiVersion: v1
+clusters:
+- cluster:
+ insecure-skip-tls-verify: true
+ server: http://localhost:12345
+ name: development
+contexts:
+- context:
+ cluster: development
+ user: developer
+ name: dev-frontend
+current-context: dev-frontend
+kind: Config
+preferences: {}
+users:
+- name: developer
+ user:
+ token: ZDNg7LzSlp8a0u0fht_tRnPMTOjxqgJGCyi_iy0ecUw
+EOF
+
+#################################################
+# RUN THE PLUGIN
+#################################################
+
+# run the plugin second
+export ANSIBLE_INVENTORY_ENABLED=community.general.kubevirt
+export ANSIBLE_INVENTORY=test.kubevirt.yml
+
+cat << EOF > "$OUTPUT_DIR/test.kubevirt.yml"
+plugin: community.general.kubevirt
+connections:
+ - namespaces:
+ - default
+EOF
+
+ANSIBLE_JINJA2_NATIVE=1 ansible-inventory -vvvv -i "$OUTPUT_DIR/test.kubevirt.yml" --list --output="$OUTPUT_DIR/plugin.out"
+
+#################################################
+# DIFF THE RESULTS
+#################################################
+
+./inventory_diff.py "$(pwd)/test.out" "$OUTPUT_DIR/plugin.out"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_kubevirt/server.py b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_kubevirt/server.py
new file mode 100755
index 00000000..cb56739a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_kubevirt/server.py
@@ -0,0 +1,164 @@
+#!/usr/bin/env python
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+
+try:
+ from http.server import HTTPServer
+ from http.server import SimpleHTTPRequestHandler
+except ImportError:
+ from BaseHTTPServer import HTTPServer
+ from SimpleHTTPServer import SimpleHTTPRequestHandler
+
+from threading import Thread
+
+try:
+ from urllib.parse import urlparse
+except ImportError:
+ from urlparse import urlparse
+
+
+class TestHandler(SimpleHTTPRequestHandler):
+ # Path handlers:
+ handlers = {}
+
+ def log_message(self, format, *args):
+ """
+ Empty method, so we don't mix output of HTTP server with tests
+ """
+ pass
+
+ def do_GET(self):
+ params = urlparse(self.path)
+
+ if params.path in self.handlers:
+ self.handlers[params.path](self)
+ else:
+ SimpleHTTPRequestHandler.do_GET(self)
+
+ def do_POST(self):
+ params = urlparse(self.path)
+
+ if params.path in self.handlers:
+ self.handlers[params.path](self)
+ else:
+ SimpleHTTPRequestHandler.do_POST(self)
+
+
+class TestServer(object):
+ # The host and port and path used by the embedded tests web server:
+ PORT = None
+
+ # The embedded web server:
+ _httpd = None
+ # Thread for http server:
+ _thread = None
+
+ def set_json_response(self, path, code, body):
+ def _handle_request(handler):
+ handler.send_response(code)
+ handler.send_header('Content-Type', 'application/json')
+ handler.end_headers()
+
+ data = json.dumps(body, ensure_ascii=False).encode('utf-8')
+ handler.wfile.write(data)
+
+ TestHandler.handlers[path] = _handle_request
+
+ def start_server(self, host='localhost'):
+ self._httpd = HTTPServer((host, 12345), TestHandler)
+ self._thread = Thread(target=self._httpd.serve_forever)
+ self._thread.start()
+
+ def stop_server(self):
+ self._httpd.shutdown()
+ self._thread.join()
+
+
+if __name__ == '__main__':
+ print(os.getpid())
+ server = TestServer()
+ server.start_server()
+ server.set_json_response(path="/version", code=200, body={})
+ server.set_json_response(path="/api", code=200, body={
+ "kind": "APIVersions", "versions": ["v1"], "serverAddressByClientCIDRs": [{"clientCIDR": "0.0.0.0/0", "serverAddress": "localhost:12345"}]
+ })
+ server.set_json_response(path="/api/v1", code=200, body={'resources': {}})
+ server.set_json_response(path="/apis", code=200, body={
+ "kind": "APIGroupList", "apiVersion": "v1",
+ "groups": [{
+ "name": "kubevirt.io", "versions": [{"groupVersion": "kubevirt.io/v1alpha3", "version": "v1alpha3"}],
+ "preferredVersion": {"groupVersion": "kubevirt.io/v1alpha3", "version": "v1alpha3"}
+ }]
+ })
+ server.set_json_response(
+ path="/apis/kubevirt.io/v1alpha3",
+ code=200,
+ body={
+ "kind": "APIResourceList", "apiVersion": "v1", "groupVersion": "kubevirt.io/v1alpha3",
+ "resources": [{
+ "name": "virtualmachineinstances", "singularName": "virtualmachineinstance",
+ "namespaced": True, "kind": "VirtualMachineInstance",
+ "verbs": ["delete", "deletecollection", "get", "list", "patch", "create", "update", "watch"],
+ "shortNames":["vmi", "vmis"]
+ }]
+ }
+ )
+ server.set_json_response(
+ path="/apis/kubevirt.io/v1alpha3/namespaces/default/virtualmachineinstances",
+ code=200,
+ body={'apiVersion': 'kubevirt.io/v1alpha3',
+ 'items': [{'apiVersion': 'kubevirt.io/v1alpha3',
+ 'kind': 'VirtualMachineInstance',
+ 'metadata': {'annotations': {'ansible': '{"data1": "yes", "data2": "no"}'},
+ 'creationTimestamp': '2019-04-05T14:17:02Z',
+ 'generateName': 'myvm',
+ 'generation': 1,
+ 'labels': {'kubevirt.io/nodeName': 'localhost',
+ 'label': 'x',
+ 'vm.cnv.io/name': 'myvm'},
+ 'name': 'myvm',
+ 'namespace': 'default',
+ 'ownerReferences': [{'apiVersion': 'kubevirt.io/v1alpha3',
+ 'blockOwnerDeletion': True,
+ 'controller': True,
+ 'kind': 'VirtualMachine',
+ 'name': 'myvm',
+ 'uid': 'f78ebe62-5666-11e9-a214-0800279ffc6b'}],
+ 'resourceVersion': '1614085',
+ 'selfLink': '/apis/kubevirt.io/v1alpha3/namespaces/default/virtualmachineinstances/myvm',
+ 'uid': '7ba1b196-57ad-11e9-9e2e-0800279ffc6b'},
+ 'spec': {'domain': {'devices': {'disks': [{'disk': {'bus': 'virtio'},
+ 'name': 'containerdisk'},
+ {'disk': {'bus': 'virtio'}, 'name': 'ansiblecloudinitdisk'}],
+ 'interfaces': [{'bridge': {}, 'name': 'default'}]},
+ 'firmware': {'uuid': 'cdf77e9e-871b-5acb-a707-80ef3d4b9849'},
+ 'machine': {'type': ''},
+ 'resources': {'requests': {'memory': '64M'}}},
+ 'networks': [{'name': 'default', 'pod': {}}],
+ 'volumes': [{'containerDisk': {'image': 'kubevirt/cirros-container-disk-demo:v0.11.0'},
+ 'name': 'containerdisk'},
+ {'cloudInitNoCloud': {'userData': '#cloud-config\npassword: password\nchpasswd: { expire: False }'},
+ 'name': 'ansiblecloudinitdisk'}]},
+ 'status': {'conditions': [{'lastProbeTime': None,
+ 'lastTransitionTime': None,
+ 'status': 'True',
+ 'type': 'LiveMigratable'},
+ {'lastProbeTime': None,
+ 'lastTransitionTime': '2019-04-05T14:17:27Z',
+ 'status': 'True',
+ 'type': 'Ready'}],
+ 'interfaces': [{'ipAddress': '172.17.0.19',
+ 'mac': '02:42:ac:11:00:13',
+ 'name': 'default'}],
+ 'migrationMethod': 'BlockMigration',
+ 'nodeName': 'localhost',
+ 'phase': 'Running'}}],
+ 'kind': 'VirtualMachineInstanceList',
+ 'metadata': {'continue': '',
+ 'resourceVersion': '1614862',
+ 'selfLink': '/apis/kubevirt.io/v1alpha3/namespaces/default/virtualmachineinstances'}}
+ )
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_kubevirt/test.out b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_kubevirt/test.out
new file mode 100644
index 00000000..932aade0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/inventory_kubevirt/test.out
@@ -0,0 +1,61 @@
+{
+ "_meta": {
+ "hostvars": {
+ "default-myvm-7ba1b196-57ad-11e9-9e2e-0800279ffc6b": {
+ "annotations": {
+ "ansible": "{\"data1\": \"yes\", \"data2\": \"no\"}"
+ },
+ "ansible_host": "172.17.0.19",
+ "data1": "yes",
+ "data2": "no",
+ "labels": {
+ "kubevirt.io/nodeName": "localhost",
+ "label": "x",
+ "vm.cnv.io/name": "myvm"
+ },
+ "object_type": "vm",
+ "resource_version": "1614085",
+ "uid": "7ba1b196-57ad-11e9-9e2e-0800279ffc6b"
+ }
+ }
+ },
+ "all": {
+ "children": [
+ "label_kubevirt_io_nodeName_localhost",
+ "label_label_x",
+ "label_vm_cnv_io_name_myvm",
+ "localhost_12345",
+ "ungrouped"
+ ]
+ },
+ "label_kubevirt_io_nodeName_localhost": {
+ "hosts": [
+ "default-myvm-7ba1b196-57ad-11e9-9e2e-0800279ffc6b"
+ ]
+ },
+ "label_label_x": {
+ "hosts": [
+ "default-myvm-7ba1b196-57ad-11e9-9e2e-0800279ffc6b"
+ ]
+ },
+ "label_vm_cnv_io_name_myvm": {
+ "hosts": [
+ "default-myvm-7ba1b196-57ad-11e9-9e2e-0800279ffc6b"
+ ]
+ },
+ "localhost_12345": {
+ "children": [
+ "namespace_default"
+ ]
+ },
+ "namespace_default": {
+ "children": [
+ "namespace_default_vms"
+ ]
+ },
+ "namespace_default_vms": {
+ "hosts": [
+ "default-myvm-7ba1b196-57ad-11e9-9e2e-0800279ffc6b"
+ ]
+ }
+}
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ipify_facts/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ipify_facts/aliases
new file mode 100644
index 00000000..765b70da
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ipify_facts/aliases
@@ -0,0 +1 @@
+shippable/posix/group2
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ipify_facts/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ipify_facts/tasks/main.yml
new file mode 100644
index 00000000..4fbd5ab6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ipify_facts/tasks/main.yml
@@ -0,0 +1,46 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Test code for the ipify_facts
+# (c) 2017, Abhijeet Kasurde <akasurde@redhat.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+- debug: var=ansible_distribution
+- debug: var=ansible_distribution_version
+
+- set_fact:
+ validate_certs: false
+ when: (ansible_distribution == "MacOSX" and ansible_distribution_version == "10.11.1")
+
+- name: get information about current IP using ipify facts
+ ipify_facts:
+ timeout: 30
+ validate_certs: "{{ validate_certs }}"
+ register: external_ip
+ until: external_ip is successful
+ retries: 5
+ delay: 10
+
+- name: check if task was successful
+ assert:
+ that:
+ - "{{ external_ip.changed == false }}"
+ - "{{ external_ip['ansible_facts'] is defined }}"
+ - "{{ external_ip['ansible_facts']['ipify_public_ip'] is defined }}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ipify_facts/vars/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ipify_facts/vars/main.yml
new file mode 100644
index 00000000..1ccaab46
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ipify_facts/vars/main.yml
@@ -0,0 +1,2 @@
+---
+validate_certs: true
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iptables_state/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iptables_state/aliases
new file mode 100644
index 00000000..741b98f7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iptables_state/aliases
@@ -0,0 +1,7 @@
+destructive
+shippable/posix/group4
+skip/docker # kernel modules not loadable
+skip/freebsd # no iptables/netfilter (Linux specific)
+skip/osx # no iptables/netfilter (Linux specific)
+skip/macos # no iptables/netfilter (Linux specific)
+skip/aix # no iptables/netfilter (Linux specific)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iptables_state/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iptables_state/meta/main.yml
new file mode 100644
index 00000000..5438ced5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iptables_state/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_pkg_mgr
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iptables_state/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iptables_state/tasks/main.yml
new file mode 100644
index 00000000..5e9bf88e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iptables_state/tasks/main.yml
@@ -0,0 +1,34 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: ensure iptables package is installed
+ package:
+ name:
+ - iptables
+ become: yes
+
+
+- name: include tasks
+ vars:
+ iptables_saved: "/tmp/test_iptables_state.saved"
+ iptables_tests: "/tmp/test_iptables_state.tests"
+
+ block:
+ - name: include tasks to perform basic tests (check_mode, async, idempotency)
+ include_tasks: tests/00-basic.yml
+
+ - name: include tasks to test tables handling
+ include_tasks: tests/01-tables.yml
+ when:
+ - xtables_lock is undefined
+
+ - name: include tasks to test rollbacks
+ include_tasks: tests/10-rollback.yml
+ when:
+ - xtables_lock is undefined
+ - ansible_connection in ['ssh', 'paramiko', 'smart']
+
+ become: yes
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iptables_state/tasks/tests/00-basic.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iptables_state/tasks/tests/00-basic.yml
new file mode 100644
index 00000000..fcd259ec
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iptables_state/tasks/tests/00-basic.yml
@@ -0,0 +1,316 @@
+---
+- name: "ensure our next backup is not there (file)"
+ file:
+ path: "{{ iptables_saved }}"
+ state: absent
+
+- name: "ensure our next rule is not there (iptables)"
+ iptables:
+ chain: OUTPUT
+ jump: ACCEPT
+ state: absent
+
+
+#
+# Basic checks about invalid param/value handling.
+#
+- name: "trigger error about invalid param"
+ iptables_state:
+ name: foobar
+ register: iptables_state
+ ignore_errors: yes
+
+- name: "assert that results are as expected"
+ assert:
+ that:
+ - iptables_state is failed
+ - iptables_state.msg is match("Invalid options")
+ quiet: yes
+
+
+
+- name: "trigger error about missing param 'state'"
+ iptables_state:
+ path: foobar
+ register: iptables_state
+ ignore_errors: yes
+
+- name: "assert that results are as expected"
+ assert:
+ that:
+ - iptables_state is failed
+ - iptables_state.msg is match("missing required arguments")
+ quiet: yes
+
+
+
+- name: "trigger error about missing param 'path'"
+ iptables_state:
+ state: saved
+ register: iptables_state
+ ignore_errors: yes
+
+- name: "assert that results are as expected"
+ assert:
+ that:
+ - iptables_state is failed
+ - iptables_state.msg is match("missing required arguments")
+ quiet: yes
+
+
+
+- name: "trigger error about invalid value for param 'state'"
+ iptables_state:
+ path: foobar
+ state: present
+ register: iptables_state
+ ignore_errors: yes
+
+- name: "assert that results are as expected"
+ assert:
+ that:
+ - iptables_state is failed
+ - iptables_state.msg is match("value of state must be one of")
+ quiet: yes
+
+
+#
+# Play with the current state first. We will create a file to store it in, but
+# no more. These tests are for:
+# - idempotency
+# - check_mode
+#
+- name: "save state (check_mode, must report a change)"
+ iptables_state:
+ path: "{{ iptables_saved }}"
+ state: saved
+ register: iptables_state
+ check_mode: yes
+
+- name: "assert that results are as expected"
+ assert:
+ that:
+ - iptables_state is changed
+ - iptables_state.initial_state == iptables_state.saved
+ quiet: yes
+
+
+
+- name: "save state (must report a change)"
+ iptables_state:
+ path: "{{ iptables_saved }}"
+ state: saved
+ register: iptables_state
+
+- name: "assert that results are as expected"
+ assert:
+ that:
+ - iptables_state is changed
+ - iptables_state.initial_state == iptables_state.saved
+ quiet: yes
+
+
+
+- name: "save state (idempotency, must NOT report a change)"
+ iptables_state:
+ path: "{{ iptables_saved }}"
+ state: saved
+ register: iptables_state
+
+- name: "assert that results are as expected"
+ assert:
+ that:
+ - iptables_state is not changed
+ - iptables_state.initial_state == iptables_state.saved
+ quiet: yes
+
+
+
+- name: "save state (check_mode, must NOT report a change)"
+ iptables_state:
+ path: "{{ iptables_saved }}"
+ state: saved
+ register: iptables_state
+ check_mode: yes
+
+- name: "assert that results are as expected"
+ assert:
+ that:
+ - iptables_state is not changed
+ - iptables_state.initial_state == iptables_state.saved
+ quiet: yes
+
+
+
+# We begin with 'state=restored' by restoring the current state on itself.
+# This at least ensures the file produced with state=saved is suitable for
+# state=restored.
+
+- name: "state=restored check_mode=true changed=false"
+ block:
+ - name: "restore state (check_mode, must NOT report a change, no warning)"
+ iptables_state:
+ path: "{{ iptables_saved }}"
+ state: restored
+ register: iptables_state
+ check_mode: yes
+
+ - name: "assert that results are as expected"
+ assert:
+ that:
+ - iptables_state is not changed
+ - iptables_state.initial_state == iptables_state.restored
+ quiet: yes
+
+ rescue:
+ - name: "assert that results are not as expected for only one reason (xtables lock)"
+ assert:
+ that:
+ - iptables_state is failed
+ - iptables_state.stderr is search('xtables lock')
+ quiet: yes
+ register: xtables_lock
+
+
+
+- name: "state=restored changed=false"
+ block:
+ - name: "restore state (must NOT report a change, warning about rollback & async)"
+ iptables_state:
+ path: "{{ iptables_saved }}"
+ state: restored
+ register: iptables_state
+
+ - name: "assert that results are as expected"
+ assert:
+ that:
+ - iptables_state is not changed
+ - iptables_state.initial_state == iptables_state.restored
+ quiet: yes
+
+ rescue:
+ - name: "assert that results are not as expected for only one reason (xtables lock)"
+ assert:
+ that:
+ - iptables_state is failed
+ - iptables_state.stderr is search('xtables lock')
+ quiet: yes
+ register: xtables_lock
+
+
+
+- name: "change iptables state (iptables)"
+ iptables:
+ chain: OUTPUT
+ jump: ACCEPT
+
+
+
+- name: "state=restored changed=true"
+ block:
+ - name: "restore state (check_mode, must report a change)"
+ iptables_state:
+ path: "{{ iptables_saved }}"
+ state: restored
+ register: iptables_state
+ check_mode: yes
+
+ - name: "assert that results are as expected"
+ assert:
+ that:
+ - iptables_state is changed
+ - iptables_state.initial_state != iptables_state.restored
+ quiet: yes
+
+ rescue:
+ - name: "assert that results are not as expected for only one reason (xtables lock)"
+ assert:
+ that:
+ - iptables_state is failed
+ - iptables_state.stderr is search('xtables lock')
+ quiet: yes
+ register: xtables_lock
+
+
+
+- name: "state=restored changed=true"
+ block:
+ - name: "restore state (must report a change, async, no warning)"
+ iptables_state:
+ path: "{{ iptables_saved }}"
+ state: restored
+ register: iptables_state
+ async: "{{ ansible_timeout }}"
+ poll: 0
+
+ - name: "assert that results are as expected"
+ assert:
+ that:
+ - iptables_state is changed
+ - iptables_state.initial_state != iptables_state.restored
+ - iptables_state.applied
+ quiet: yes
+
+ rescue:
+ - name: "assert that results are not as expected for only one reason (xtables lock)"
+ assert:
+ that:
+ - iptables_state is failed
+ - iptables_state.stderr is search('xtables lock')
+ quiet: yes
+ register: xtables_lock
+
+
+
+- name: "state=restored changed=false"
+ block:
+ - name: "restore state (must NOT report a change, async, no warning)"
+ iptables_state:
+ path: "{{ iptables_saved }}"
+ state: restored
+ register: iptables_state
+ async: "{{ ansible_timeout }}"
+ poll: 0
+
+ - name: "assert that results are as expected"
+ assert:
+ that:
+ - iptables_state is not changed
+ - iptables_state.initial_state == iptables_state.restored
+ quiet: yes
+
+ rescue:
+ - name: "assert that results are not as expected for only one reason (xtables lock)"
+ assert:
+ that:
+ - iptables_state is failed
+ - iptables_state.stderr is search('xtables lock')
+ quiet: yes
+ register: xtables_lock
+
+
+
+- name: "state=restored changed=false"
+ block:
+ - name: "restore state (check_mode=yes, must NOT report a change, no warning)"
+ iptables_state:
+ path: "{{ iptables_saved }}"
+ state: restored
+ register: iptables_state
+ check_mode: yes
+
+ - name: "assert that results are as expected"
+ assert:
+ that:
+ - iptables_state is not changed
+ - iptables_state.initial_state == iptables_state.restored
+ quiet: yes
+
+ rescue:
+ - name: "assert that results are not as expected for only one reason (xtables lock)"
+ assert:
+ that:
+ - iptables_state is failed
+ - iptables_state.stderr is search('xtables lock')
+ quiet: yes
+ register: xtables_lock
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iptables_state/tasks/tests/01-tables.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iptables_state/tasks/tests/01-tables.yml
new file mode 100644
index 00000000..e09a26a9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iptables_state/tasks/tests/01-tables.yml
@@ -0,0 +1,299 @@
+---
+- name: "ensure our next rule is not there (iptables)"
+ iptables:
+ table: nat
+ chain: INPUT
+ jump: ACCEPT
+ state: absent
+
+- name: "get state (table filter)"
+ iptables_state:
+ table: filter
+ state: saved
+ path: "{{ iptables_saved }}"
+ register: iptables_state
+ changed_when: false
+ check_mode: yes
+
+- name: "assert that results are as expected"
+ assert:
+ that:
+ - "'*filter' in iptables_state.initial_state"
+ - iptables_state.tables.filter is defined
+ - iptables_state.tables.nat is undefined
+ quiet: yes
+
+
+
+- name: "get state (table nat)"
+ iptables_state:
+ table: nat
+ state: saved
+ path: "{{ iptables_saved }}"
+ register: iptables_state
+ changed_when: false
+ check_mode: yes
+
+- name: "assert that results are as expected"
+ assert:
+ that:
+ - "'*nat' in iptables_state.initial_state"
+ - "'*filter' in iptables_state.initial_state"
+ - iptables_state.tables.nat is defined
+ - iptables_state.tables.filter is undefined
+ quiet: yes
+
+
+
+- name: "save state (table filter)"
+ iptables_state:
+ path: "{{ iptables_saved }}"
+ state: saved
+ table: filter
+ register: iptables_state
+
+- name: "assert that results are as expected"
+ assert:
+ that:
+ - "'*filter' in iptables_state.initial_state"
+ - "'*filter' in iptables_state.saved"
+ - "'*nat' in iptables_state.initial_state"
+ - "'*nat' not in iptables_state.saved"
+ - iptables_state.tables.filter is defined
+ - iptables_state.tables.nat is undefined
+ quiet: yes
+
+
+
+- name: "save state (table nat)"
+ iptables_state:
+ path: "{{ iptables_saved }}"
+ state: saved
+ table: nat
+ register: iptables_state
+
+- name: "assert that results are as expected"
+ assert:
+ that:
+ - iptables_state is changed
+ - "'*nat' in iptables_state.initial_state"
+ - "'*nat' in iptables_state.saved"
+ - "'*filter' in iptables_state.initial_state"
+ - "'*filter' not in iptables_state.saved"
+ - iptables_state.tables.nat is defined
+ - iptables_state.tables.filter is undefined
+ quiet: yes
+
+
+
+- name: "save state (any table)"
+ iptables_state:
+ path: "{{ iptables_saved }}"
+ state: saved
+ register: iptables_state
+
+- name: "assert that results are as expected"
+ assert:
+ that:
+ - iptables_state is changed
+ - "'*filter' in iptables_state.initial_state"
+ - "'*filter' in iptables_state.saved"
+ - "'*nat' in iptables_state.initial_state"
+ - "'*nat' in iptables_state.saved"
+ - iptables_state.tables.filter is defined
+ - iptables_state.tables.nat is defined
+ quiet: yes
+
+
+
+- name: "restore state (table nat, must NOT report a change, no warning)"
+ iptables_state:
+ path: "{{ iptables_saved }}"
+ state: restored
+ table: nat
+ register: iptables_state
+ async: "{{ ansible_timeout }}"
+ poll: 0
+
+- name: "assert that results are as expected"
+ assert:
+ that:
+ - "'*nat' in iptables_state.initial_state"
+ - "'*nat' in iptables_state.restored"
+ - "'*filter' in iptables_state.initial_state"
+ - "'*filter' not in iptables_state.restored"
+ - iptables_state.tables.nat is defined
+ - iptables_state.tables.filter is undefined
+ - iptables_state is not changed
+ quiet: yes
+
+
+
+- name: "change NAT table (iptables)"
+ iptables:
+ table: nat
+ chain: INPUT
+ jump: ACCEPT
+ state: present
+
+
+
+- name: "restore state (table nat, must report a change, no warning)"
+ iptables_state:
+ path: "{{ iptables_saved }}"
+ state: restored
+ table: nat
+ register: iptables_state
+ async: "{{ ansible_timeout }}"
+ poll: 0
+
+- name: "assert that results are as expected"
+ assert:
+ that:
+ - "'*nat' in iptables_state.initial_state"
+ - "'*nat' in iptables_state.restored"
+ - "'*filter' in iptables_state.initial_state"
+ - "'*filter' not in iptables_state.restored"
+ - iptables_state.tables.nat is defined
+ - "'-A INPUT -j ACCEPT' in iptables_state.tables.nat"
+ - "'-A INPUT -j ACCEPT' not in iptables_state.restored"
+ - iptables_state.tables.filter is undefined
+ - iptables_state is changed
+ quiet: yes
+
+
+
+- name: "get security, raw and mangle tables states"
+ iptables_state:
+ path: "{{ iptables_saved }}"
+ state: saved
+ table: "{{ item }}"
+ loop:
+ - security
+ - raw
+ - mangle
+ changed_when: false
+ check_mode: yes
+
+
+
+- name: "save state (any table)"
+ iptables_state:
+ path: "{{ iptables_saved }}"
+ state: saved
+ register: iptables_state
+
+- name: "assert that results are as expected"
+ assert:
+ that:
+ - "'filter' in iptables_state.tables"
+ - "'*filter' in iptables_state.saved"
+ - "'mangle' in iptables_state.tables"
+ - "'*mangle' in iptables_state.saved"
+ - "'nat' in iptables_state.tables"
+ - "'*nat' in iptables_state.saved"
+ - "'raw' in iptables_state.tables"
+ - "'*raw' in iptables_state.saved"
+ - "'security' in iptables_state.tables"
+ - "'*security' in iptables_state.saved"
+ quiet: yes
+
+
+
+- name: "save filter table into a test file"
+ iptables_state:
+ path: "{{ iptables_tests }}"
+ table: filter
+ state: saved
+
+- name: "add a table header in comments (# *mangle)"
+ lineinfile:
+ path: "{{ iptables_tests }}"
+ line: "# *mangle"
+
+
+
+- name: "restore state (table filter, must NOT report a change, no warning)"
+ iptables_state:
+ path: "{{ iptables_tests }}"
+ table: filter
+ state: restored
+ register: iptables_state
+ async: "{{ ansible_timeout }}"
+ poll: 0
+
+- name: "assert that results are as expected"
+ assert:
+ that:
+ - "'*filter' in iptables_state.initial_state"
+ - "'*mangle' in iptables_state.initial_state"
+ - "'*nat' in iptables_state.initial_state"
+ - "'*raw' in iptables_state.initial_state"
+ - "'*security' in iptables_state.initial_state"
+ - "'filter' in iptables_state.tables"
+ - "'mangle' not in iptables_state.tables"
+ - "'nat' not in iptables_state.tables"
+ - "'raw' not in iptables_state.tables"
+ - "'security' not in iptables_state.tables"
+ - "'*filter' in iptables_state.restored"
+ - "'*mangle' not in iptables_state.restored"
+ - "'*nat' not in iptables_state.restored"
+ - "'*raw' not in iptables_state.restored"
+ - "'*security' not in iptables_state.restored"
+ - iptables_state is not changed
+ quiet: yes
+
+
+
+- name: "restore state (any table, must NOT report a change, no warning)"
+ iptables_state:
+ path: "{{ iptables_tests }}"
+ state: restored
+ register: iptables_state
+ async: "{{ ansible_timeout }}"
+ poll: 0
+
+- name: "assert that results are as expected"
+ assert:
+ that:
+ - "'*filter' in iptables_state.initial_state"
+ - "'*mangle' in iptables_state.initial_state"
+ - "'*nat' in iptables_state.initial_state"
+ - "'*raw' in iptables_state.initial_state"
+ - "'*security' in iptables_state.initial_state"
+ - "'filter' in iptables_state.tables"
+ - "'mangle' in iptables_state.tables"
+ - "'nat' in iptables_state.tables"
+ - "'raw' in iptables_state.tables"
+ - "'security' in iptables_state.tables"
+ - "'*filter' in iptables_state.restored"
+ - "'*mangle' in iptables_state.restored"
+ - "'*nat' in iptables_state.restored"
+ - "'*raw' in iptables_state.restored"
+ - "'*security' in iptables_state.restored"
+ - iptables_state is not changed
+ quiet: yes
+
+
+
+- name: "restore state (table mangle, must fail, no warning)"
+ iptables_state:
+ path: "{{ iptables_tests }}"
+ table: mangle
+ state: restored
+ register: iptables_state
+ async: "{{ ansible_timeout }}"
+ poll: 0
+ ignore_errors: yes
+
+- name: "explain expected failure"
+ assert:
+ that:
+ - iptables_state is failed
+ - "iptables_state.msg == 'Table mangle to restore not defined in {{ iptables_tests }}'"
+ success_msg: >-
+ The previous error has been triggered by trying to restore a table
+ that is missing in the file provided to iptables-restore.
+ fail_msg: >-
+ The previous task should have failed due to a missing table (mangle)
+ in the file to restore iptables state from.
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iptables_state/tasks/tests/10-rollback.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iptables_state/tasks/tests/10-rollback.yml
new file mode 100644
index 00000000..1a9db290
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iptables_state/tasks/tests/10-rollback.yml
@@ -0,0 +1,199 @@
+---
+- name: "create a blocking ruleset with a DROP policy"
+ copy:
+ dest: "{{ iptables_tests }}"
+ content: |
+ *filter
+ :INPUT DROP
+ COMMIT
+
+
+
+- name: "restore state from the test file (check_mode, must report a change)"
+ iptables_state:
+ path: "{{ iptables_tests }}"
+ state: restored
+ register: iptables_state
+ check_mode: yes
+
+- name: "assert that results are as expected"
+ assert:
+ that:
+ - iptables_state is changed
+
+
+
+- name: "fail to restore state from the test file"
+ block:
+ - name: "restore state from the test file (bad policies, expected error -> rollback)"
+ iptables_state:
+ path: "{{ iptables_tests }}"
+ state: restored
+ register: iptables_state
+ async: "{{ ansible_timeout }}"
+ poll: 0
+
+ rescue:
+ - name: "explain expected failure"
+ assert:
+ that:
+ - iptables_state is not changed
+ - not iptables_state.applied
+ success_msg: >-
+ The previous error has been triggered to test the rollback. If you
+ are there, it means that 1) connection has been lost right after the
+ bad rules have been restored; 2) a rollback happened, so the bad
+ rules are not applied, finally; 3) module failed because it didn't
+ reach the wanted state, but at least host is not lost !!!
+ fail_msg: >-
+ The previous error has been triggered but its results are not as
+ expected.
+
+- name: "check that the expected failure happened"
+ assert:
+ that:
+ - iptables_state is failed
+
+
+
+- name: "fail to restore state from the test file (again)"
+ block:
+ - name: "try again, with a higher timeout (bad policies, same expected error)"
+ iptables_state:
+ path: "{{ iptables_tests }}"
+ state: restored
+ register: iptables_state
+ async: "{{ ansible_timeout }}"
+ poll: 0
+ vars:
+ ansible_timeout: "{{ max_delay | d(300) }}"
+
+ rescue:
+ - name: "explain expected failure"
+ assert:
+ that:
+ - iptables_state is not changed
+ - not iptables_state.applied
+ success_msg: >-
+ The previous error has been triggered to test the rollback. If you
+ are there, it means that 1) connection has been lost right after the
+ bad rules have been restored; 2) a rollback happened, so the bad
+ rules are not applied, finally; 3) module failed because it didn't
+ reach the wanted state, but at least host is not lost !!!
+ fail_msg: >-
+ The previous error has been triggered but its results are not as
+ expected.
+
+- name: "check that the expected failure happened"
+ assert:
+ that:
+ - iptables_state is failed
+
+
+
+- name: "restore state from backup (must NOT report a change)"
+ iptables_state:
+ path: "{{ iptables_saved }}"
+ state: restored
+ register: iptables_state
+ async: "{{ ansible_timeout }}"
+ poll: 0
+
+- name: "assert that results are as expected"
+ assert:
+ that:
+ - iptables_state is not changed
+
+
+
+- name: "restore state from backup (mangle, must NOT report a change)"
+ iptables_state:
+ path: "{{ iptables_saved }}"
+ table: mangle
+ state: restored
+ register: iptables_state
+ async: "{{ ansible_timeout }}"
+ poll: 0
+
+- name: "assert that results are as expected"
+ assert:
+ that:
+ - iptables_state is not changed
+
+
+
+- name: "create a blocking ruleset with a REJECT rule"
+ copy:
+ dest: "{{ iptables_tests }}"
+ content: |
+ *filter
+ -A INPUT -j REJECT
+ COMMIT
+
+
+
+- name: "fail to restore state from the test file (again)"
+ block:
+ - name: "restore state from the test file (bad rules, expected error -> rollback)"
+ iptables_state:
+ path: "{{ iptables_tests }}"
+ state: restored
+ register: iptables_state
+ async: "{{ ansible_timeout }}"
+ poll: 0
+
+ rescue:
+ - name: "explain expected failure"
+ assert:
+ that:
+ - iptables_state is not changed
+ - not iptables_state.applied
+ success_msg: >-
+ The previous error has been triggered to test the rollback. If you
+ are there, it means that 1) connection has been lost right after the
+ bad rules have been restored; 2) a rollback happened, so the bad
+ rules are not applied, finally; 3) module failed because it didn't
+ reach the wanted state, but at least host is not lost !!!
+ fail_msg: >-
+ The previous error has been triggered but its results are not as
+ expected.
+
+- name: "check that the expected failure happened"
+ assert:
+ that:
+ - iptables_state is failed
+
+
+
+- name: "fail to restore state from the test file (again)"
+ block:
+ - name: "try again, with a higher timeout (bad rules, same expected error)"
+ iptables_state:
+ path: "{{ iptables_tests }}"
+ state: restored
+ register: iptables_state
+ async: "{{ ansible_timeout }}"
+ poll: 0
+ vars:
+ ansible_timeout: "{{ max_delay | d(300) }}"
+
+ rescue:
+ - name: "explain expected failure"
+ assert:
+ that:
+ - iptables_state is not changed
+ - not iptables_state.applied
+ success_msg: >-
+ The previous error has been triggered to test the rollback. If you
+ are there, it means that 1) connection has been lost right after the
+ bad rules have been restored; 2) a rollback happened, so the bad
+ rules are not applied, finally; 3) module failed because it didn't
+ reach the wanted state, but at least host is not lost !!!
+ fail_msg: >-
+ The previous error has been triggered but its results are not as
+ expected.
+
+- name: "check that the expected failure happened"
+ assert:
+ that:
+ - iptables_state is failed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ipwcli_dns/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ipwcli_dns/aliases
new file mode 100644
index 00000000..bd8385ac
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ipwcli_dns/aliases
@@ -0,0 +1,2 @@
+# There is no Ericsson IPWorks
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ipwcli_dns/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ipwcli_dns/tasks/main.yml
new file mode 100644
index 00000000..870f249e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ipwcli_dns/tasks/main.yml
@@ -0,0 +1,111 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Test code for ipwcli_dns
+
+- name: variables username, password, container, tld must be set
+ fail:
+ msg: 'Please set the variables: username, password, container and tld.'
+ when: username is not defined or password is not defined or container is not defined or tld is not defined
+
+- name: add a new A record
+ ipwcli_dns:
+ dnsname: example.{{ tld }}
+ type: A
+ container: '{{ container }}'
+ address: 127.0.0.1
+ ttl: 100
+ username: '{{ username }}'
+ password: '{{ password }}'
+ register: result
+
+- name: assert the new A record is added
+ assert:
+ that:
+ - result is not failed
+ - result is changed
+ - result.record == 'arecord example.{{ tld }} 127.0.0.1 -set ttl=100;container={{ container }}'
+
+- name: delete the A record
+ ipwcli_dns:
+ dnsname: example.{{ tld }}
+ type: A
+ container: '{{ container }}'
+ address: 127.0.0.1
+ ttl: 100
+ username: '{{ username }}'
+ password: '{{ password }}'
+ state: absent
+ register: result
+
+- name: assert the new A record is deleted
+ assert:
+ that:
+ - result is not failed
+ - result is changed
+ - result.record == 'arecord example.{{ tld }} 127.0.0.1 -set ttl=100;container={{ container }}'
+
+- name: delete not existing SRV record
+ ipwcli_dns:
+ dnsname: _sip._tcp.test.example.{{ tld }}
+ type: SRV
+ container: '{{ container }}'
+ target: example.{{ tld }}
+ port: 5060
+ username: '{{ username }}'
+ password: '{{ password }}'
+ state: absent
+ register: result
+
+- name: assert the new a record
+ assert:
+ that:
+ - result is not failed
+ - result is not changed
+ - result.record ==
+ 'srvrecord _sip._tcp.test.example.{{ tld }} -set ttl=3600;container={{ container }};priority=10;weight=10;port=5060;target=example.{{ tld }}'
+
+- name: add a SRV record with weight > 65535 against RFC 2782
+ ipwcli_dns:
+ dnsname: _sip._tcp.test.example.{{ tld }}
+ type: SRV
+ container: '{{ container }}'
+ ttl: 100
+ target: example.{{ tld }}
+ port: 5060
+ weight: 65536
+ username: '{{ username }}'
+ password: '{{ password }}'
+ register: result
+ ignore_errors: yes
+
+- name: assert the failure of the new SRV record
+ assert:
+ that:
+ - result is failed
+ - result is not changed
+ - "'Out of UINT16 range' in result.stderr"
+
+- name: add NAPTR record (check_mode)
+ ipwcli_dns:
+ dnsname: test.example.{{ tld }}
+ type: NAPTR
+ preference: 10
+ container: '{{ container }}'
+ ttl: 100
+ order: 10
+ service: 'SIP+D2T'
+ replacement: '_sip._tcp.test.example.{{ tld }}.'
+ flags: S
+ username: '{{ username }}'
+ password: '{{ password }}'
+ check_mode: yes
+ register: result
+
+- name: assert the NAPTR check_mode
+ assert:
+ that:
+ - result is not failed
+ - result is changed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_create/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_create/aliases
new file mode 100644
index 00000000..9d694de9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_create/aliases
@@ -0,0 +1,4 @@
+shippable/posix/group1
+destructive
+skip/aix
+skip/python2.6
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_create/files/test1.cfg b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_create/files/test1.cfg
new file mode 100644
index 00000000..1c6d0d0d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_create/files/test1.cfg
@@ -0,0 +1,56 @@
+#version=DEVEL
+# System authorization information
+auth --enableshadow --passalgo=sha512
+# Use CDROM installation media
+cdrom
+# Use graphical install
+graphical
+# Run the Setup Agent on first boot
+firstboot --enable
+ignoredisk --only-use=sda
+# Keyboard layouts
+keyboard --vckeymap=us --xlayouts='us'
+# System language
+lang en_US.UTF-8
+# Network information
+network --bootproto=dhcp --device=ens192 --ipv6=auto --no-activate
+network --hostname=localhost.localdomain
+# System services
+services --enabled="chronyd"
+# System timezone
+timezone America/New_York --isUtc
+# X Window System configuration information
+xconfig --startxonboot
+# System bootloader configuration
+bootloader --append=" crashkernel=auto" --location=mbr --boot-drive=sda
+autopart --type=lvm
+# Partition clearing information
+clearpart --none --initlabel
+#firewall --disable
+services --disabled=firewalld
+eula --agreed
+# Reboot when the install is finished.
+reboot
+
+%packages
+@^graphical-server-environment
+@base
+@core
+@desktop-debugging
+@dial-up
+@fonts
+@gnome-desktop
+@guest-agents
+@guest-desktop-agents
+@hardware-monitoring
+@input-methods
+@internet-browser
+@multimedia
+@print-client
+@x11
+chrony
+kexec-tools
+open-vm-tools-desktop
+%end
+%addon com_redhat_kdump --enable --reserve-mb='auto'
+%end
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_create/files/test_dir/test2.cfg b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_create/files/test_dir/test2.cfg
new file mode 100644
index 00000000..1c6d0d0d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_create/files/test_dir/test2.cfg
@@ -0,0 +1,56 @@
+#version=DEVEL
+# System authorization information
+auth --enableshadow --passalgo=sha512
+# Use CDROM installation media
+cdrom
+# Use graphical install
+graphical
+# Run the Setup Agent on first boot
+firstboot --enable
+ignoredisk --only-use=sda
+# Keyboard layouts
+keyboard --vckeymap=us --xlayouts='us'
+# System language
+lang en_US.UTF-8
+# Network information
+network --bootproto=dhcp --device=ens192 --ipv6=auto --no-activate
+network --hostname=localhost.localdomain
+# System services
+services --enabled="chronyd"
+# System timezone
+timezone America/New_York --isUtc
+# X Window System configuration information
+xconfig --startxonboot
+# System bootloader configuration
+bootloader --append=" crashkernel=auto" --location=mbr --boot-drive=sda
+autopart --type=lvm
+# Partition clearing information
+clearpart --none --initlabel
+#firewall --disable
+services --disabled=firewalld
+eula --agreed
+# Reboot when the install is finished.
+reboot
+
+%packages
+@^graphical-server-environment
+@base
+@core
+@desktop-debugging
+@dial-up
+@fonts
+@gnome-desktop
+@guest-agents
+@guest-desktop-agents
+@hardware-monitoring
+@input-methods
+@internet-browser
+@multimedia
+@print-client
+@x11
+chrony
+kexec-tools
+open-vm-tools-desktop
+%end
+%addon com_redhat_kdump --enable --reserve-mb='auto'
+%end
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_create/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_create/meta/main.yml
new file mode 100644
index 00000000..ca521ab1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_create/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - setup_pkg_mgr
+ - prepare_tests
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_create/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_create/tasks/main.yml
new file mode 100644
index 00000000..de462767
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_create/tasks/main.yml
@@ -0,0 +1,154 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Test code for iso_create module
+# Copyright: (c) 2020, Diane Wang (Tomorrow9) <dianew@vmware.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+- name: install pycdlib
+ pip:
+ name: pycdlib
+ # state: latest
+ register: install_pycdlib
+- debug: var=install_pycdlib
+
+- set_fact:
+ output_dir_test: '{{ output_dir }}/test_iso_create'
+
+# - include_tasks: prepare_dest_dir.yml
+
+- name: Test check mode
+ iso_create:
+ src_files:
+ - "{{ role_path }}/files/test1.cfg"
+ dest_iso: "{{ output_dir_test }}/test.iso"
+ interchange_level: 3
+ register: iso_result
+ check_mode: yes
+- debug: var=iso_result
+
+- name: Check if iso file created
+ stat:
+ path: "{{ output_dir_test }}/test.iso"
+ register: iso_file
+- debug: var=iso_file
+- assert:
+ that:
+ - iso_result.changed == True
+ - iso_file.stat.exists == False
+
+- name: Create iso file with a specified file
+ iso_create:
+ src_files:
+ - "{{ role_path }}/files/test1.cfg"
+ dest_iso: "{{ output_dir_test }}/test.iso"
+ interchange_level: 3
+ register: iso_result
+- debug: var=iso_result
+
+- name: Check if iso file created
+ stat:
+ path: "{{ output_dir_test }}/test.iso"
+ register: iso_file
+
+- assert:
+ that:
+ - iso_result.changed == True
+ - iso_file.stat.exists == True
+
+- name: Create iso file with a specified file and folder
+ iso_create:
+ src_files:
+ - "{{ role_path }}/files/test1.cfg"
+ - "{{ role_path }}/files/test_dir"
+ dest_iso: "{{ output_dir_test }}/test1.iso"
+ interchange_level: 3
+ register: iso_result
+- debug: var=iso_result
+
+- name: Check if iso file created
+ stat:
+ path: "{{ output_dir_test }}/test1.iso"
+ register: iso_file
+
+- assert:
+ that:
+ - iso_result.changed == True
+ - iso_file.stat.exists == True
+
+- name: Create iso file with volume identification string
+ iso_create:
+ src_files:
+ - "{{ role_path }}/files/test1.cfg"
+ dest_iso: "{{ output_dir_test }}/test2.iso"
+ vol_ident: "OEMDRV"
+ register: iso_result
+- debug: var=iso_result
+
+- name: Check if iso file created
+ stat:
+ path: "{{ output_dir_test }}/test2.iso"
+ register: iso_file
+
+- assert:
+ that:
+ - iso_result.changed == True
+ - iso_file.stat.exists == True
+
+- name: Create iso file with Rock Ridge extention
+ iso_create:
+ src_files:
+ - "{{ role_path }}/files/test1.cfg"
+ dest_iso: "{{ output_dir_test }}/test3.iso"
+ rock_ridge: "1.09"
+ register: iso_result
+- debug: var=iso_result
+
+- name: Check if iso file created
+ stat:
+ path: "{{ output_dir_test }}/test3.iso"
+ register: iso_file
+
+- assert:
+ that:
+ - iso_result.changed == True
+ - iso_file.stat.exists == True
+
+- name: Create iso file with Joliet extention
+ iso_create:
+ src_files:
+ - "{{ role_path }}/files/test1.cfg"
+ dest_iso: "{{ output_dir_test }}/test4.iso"
+ joliet: 3
+ register: iso_result
+- debug: var=iso_result
+
+- name: Check if iso file created
+ stat:
+ path: "{{ output_dir_test }}/test4.iso"
+ register: iso_file
+
+- assert:
+ that:
+ - iso_result.changed == True
+ - iso_file.stat.exists == True
+
+- name: Create iso file with UDF enabled
+ iso_create:
+ src_files:
+ - "{{ role_path }}/files/test1.cfg"
+ dest_iso: "{{ output_dir_test }}/test5.iso"
+ udf: True
+ register: iso_result
+- debug: var=iso_result
+
+- name: Check if iso file created
+ stat:
+ path: "{{ output_dir_test }}/test5.iso"
+ register: iso_file
+
+- assert:
+ that:
+ - iso_result.changed == True
+ - iso_file.stat.exists == True
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_create/tasks/prepare_dest_dir.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_create/tasks/prepare_dest_dir.yml
new file mode 100644
index 00000000..94c529d5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_create/tasks/prepare_dest_dir.yml
@@ -0,0 +1,12 @@
+# Test code for iso_create module
+# Copyright: (c) 2020, Diane Wang (Tomorrow9) <dianew@vmware.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+- name: Make sure our testing sub-directory does not exist
+ file:
+ path: '{{ output_dir_test }}'
+ state: absent
+
+- name: Create our testing sub-directory
+ file:
+ path: '{{ output_dir_test }}'
+ state: directory
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/aliases
new file mode 100644
index 00000000..0b484bba
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group1
+destructive
+skip/aix
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/files/test.iso b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/files/test.iso
new file mode 100644
index 00000000..d06ff73c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/files/test.iso
Binary files differ
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/meta/main.yml
new file mode 100644
index 00000000..bdc4dfe0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/meta/main.yml
@@ -0,0 +1,4 @@
+dependencies:
+ - setup_pkg_mgr
+ - prepare_tests
+ - setup_epel
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/tasks/7zip.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/tasks/7zip.yml
new file mode 100644
index 00000000..a6bf95ad
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/tasks/7zip.yml
@@ -0,0 +1,66 @@
+# Test code for the iso_extract module.
+# (c) 2017, James Tanner <tanner.jc@gmail.com>
+# (c) 2017, Dag Wieers <dag@wieers.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- name: Gather facts
+ setup:
+ become: yes
+
+- name: Include distribution specific variables
+ include_vars: "{{ lookup('first_found', params) }}"
+ vars:
+ params:
+ files:
+ - "{{ ansible_facts.distribution }}.yml"
+ - "{{ ansible_facts.os_family }}.yml"
+ - default.yml
+ paths:
+ - "{{ role_path }}/vars"
+
+- name: "{{ ansible_facts.os_family | upper }} | Install 7zip package"
+ action: "{{ ansible_facts.pkg_mgr }}"
+ args:
+ name: "{{ iso_extract_7zip_package }}"
+ state: present
+ when: ansible_facts.distribution != 'MacOSX'
+
+- name: macOS
+ when: ansible_facts.distribution == 'MacOSX'
+ block:
+ - name: MACOS | Find brew binary
+ command: which brew
+ register: brew_which
+ when: ansible_distribution in ['MacOSX']
+
+ - name: MACOS | Get owner of brew binary
+ stat:
+ path: "{{ brew_which.stdout }}"
+ register: brew_stat
+ when: ansible_distribution in ['MacOSX']
+
+ - name: MACOS | Install 7zip package
+ homebrew:
+ name: p7zip
+ state: present
+ update_homebrew: no
+ become: yes
+ become_user: "{{ brew_stat.stat.pw_name }}"
+ # Newer versions of brew want to compile a package which takes a long time. Do not upgrade homebrew until a
+ # proper solution can be found
+ environment:
+ HOMEBREW_NO_AUTO_UPDATE: True
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/tasks/main.yml
new file mode 100644
index 00000000..1eb279a3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/tasks/main.yml
@@ -0,0 +1,48 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Test code for the iso_extract module.
+# (c) 2017, James Tanner <tanner.jc@gmail.com>
+# (c) 2017, Dag Wieers <dag@wieers.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- set_fact:
+ output_dir_test: '{{ output_dir }}/test_iso_extract'
+
+- name: Install 7zip
+ import_tasks: 7zip.yml
+
+- name: Prepare environment
+ import_tasks: prepare.yml
+
+- name: Test in normal mode
+ import_tasks: tests.yml
+ vars:
+ in_check_mode: no
+
+- name: Prepare environment
+ import_tasks: prepare.yml
+
+- name: Test in check-mode
+ import_tasks: tests.yml
+ vars:
+ in_check_mode: yes
+ check_mode: yes
+
+# FIXME - fill this in after figuring out how to allow mounts
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/tasks/prepare.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/tasks/prepare.yml
new file mode 100644
index 00000000..78c06ad5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/tasks/prepare.yml
@@ -0,0 +1,33 @@
+# Test code for the iso_extract module.
+# (c) 2017, James Tanner <tanner.jc@gmail.com>
+# (c) 2017, Dag Wieers <dag@wieers.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- name: Make sure our testing sub-directory does not exist
+ file:
+ path: '{{ output_dir_test }}'
+ state: absent
+
+- name: Create our testing sub-directory
+ file:
+ path: '{{ output_dir_test }}'
+ state: directory
+
+- name: copy the iso to the test dir
+ copy:
+ src: test.iso
+ dest: '{{ output_dir_test }}'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/tasks/tests.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/tasks/tests.yml
new file mode 100644
index 00000000..f9182ba6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/tasks/tests.yml
@@ -0,0 +1,52 @@
+# Test code for the iso_extract module.
+# (c) 2017, James Tanner <tanner.jc@gmail.com>
+# (c) 2017, Dag Wieers <dag@wieers.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- name: Extract the iso
+ iso_extract:
+ image: '{{ output_dir_test }}/test.iso'
+ dest: '{{ output_dir_test }}'
+ files:
+ - 1.txt
+ - 2.txt
+ register: iso_extract_test0
+
+- assert:
+ that:
+ - iso_extract_test0 is changed == true
+
+- name: Extract the iso again
+ iso_extract:
+ image: '{{ output_dir_test }}/test.iso'
+ dest: '{{ output_dir_test }}'
+ files:
+ - 1.txt
+ - 2.txt
+ register: iso_extract_test0_again
+
+- name: Test iso_extract_test0_again (normal mode)
+ assert:
+ that:
+ - iso_extract_test0_again is changed == false
+ when: not in_check_mode
+
+- name: Test iso_extract_test0_again (check-mode)
+ assert:
+ that:
+ - iso_extract_test0_again is changed == true
+ when: in_check_mode
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/vars/FreeBSD.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/vars/FreeBSD.yml
new file mode 100644
index 00000000..f9e7ca1a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/vars/FreeBSD.yml
@@ -0,0 +1 @@
+iso_extract_7zip_package: p7zip
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/vars/RedHat.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/vars/RedHat.yml
new file mode 100644
index 00000000..aa925a79
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/vars/RedHat.yml
@@ -0,0 +1 @@
+iso_extract_7zip_package: p7zip-plugins
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/vars/Suse.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/vars/Suse.yml
new file mode 100644
index 00000000..e525c8ff
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/vars/Suse.yml
@@ -0,0 +1,3 @@
+# The 7z executable moved from p7zip to p7zip-full;
+# see https://build.opensuse.org/package/view_file/openSUSE:Leap:15.2/p7zip/p7zip.changes?expand=1
+iso_extract_7zip_package: p7zip-full
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/vars/Ubuntu.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/vars/Ubuntu.yml
new file mode 100644
index 00000000..219c4957
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/vars/Ubuntu.yml
@@ -0,0 +1 @@
+iso_extract_7zip_package: p7zip-full
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/vars/default.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/vars/default.yml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/iso_extract/vars/default.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/java_cert/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/java_cert/aliases
new file mode 100644
index 00000000..49222305
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/java_cert/aliases
@@ -0,0 +1,7 @@
+destructive
+shippable/posix/group3
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
+needs/root
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/java_cert/defaults/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/java_cert/defaults/main.yml
new file mode 100644
index 00000000..22723ff1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/java_cert/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+test_pkcs12_path: testpkcs.p12
+test_keystore_path: keystore.jks \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/java_cert/files/testpkcs.p12 b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/java_cert/files/testpkcs.p12
new file mode 100644
index 00000000..e0fee618
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/java_cert/files/testpkcs.p12
Binary files differ
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/java_cert/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/java_cert/meta/main.yml
new file mode 100644
index 00000000..1d18287a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/java_cert/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_java_keytool
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/java_cert/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/java_cert/tasks/main.yml
new file mode 100644
index 00000000..e701836e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/java_cert/tasks/main.yml
@@ -0,0 +1,60 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+- when: has_java_keytool
+ block:
+
+ - name: prep pkcs12 file
+ copy: src="{{ test_pkcs12_path }}" dest="{{output_dir}}/{{ test_pkcs12_path }}"
+
+ - name: import pkcs12
+ java_cert:
+ pkcs12_path: "{{output_dir}}/{{ test_pkcs12_path }}"
+ pkcs12_password: changeit
+ pkcs12_alias: default
+ cert_alias: default
+ keystore_path: "{{output_dir}}/{{ test_keystore_path }}"
+ keystore_pass: changeme_keystore
+ keystore_create: yes
+ state: present
+ register: result_success
+ - name: verify success
+ assert:
+ that:
+ - result_success is successful
+
+ - name: import pkcs12 with wrong password
+ java_cert:
+ pkcs12_path: "{{output_dir}}/{{ test_pkcs12_path }}"
+ pkcs12_password: wrong_pass
+ pkcs12_alias: default
+ cert_alias: default_new
+ keystore_path: "{{output_dir}}/{{ test_keystore_path }}"
+ keystore_pass: changeme_keystore
+ keystore_create: yes
+ state: present
+ ignore_errors: true
+ register: result_wrong_pass
+
+ - name: verify fail with wrong import password
+ assert:
+ that:
+ - result_wrong_pass is failed
+
+ - name: test fail on mutually exclusive params
+ java_cert:
+ cert_path: ca.crt
+ pkcs12_path: "{{output_dir}}/{{ test_pkcs12_path }}"
+ cert_alias: default
+ keystore_path: "{{output_dir}}/{{ test_keystore_path }}"
+ keystore_pass: changeme_keystore
+ keystore_create: yes
+ state: present
+ ignore_errors: true
+ register: result_excl_params
+ - name: verify failed exclusive params
+ assert:
+ that:
+ - result_excl_params is failed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/java_keystore/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/java_keystore/aliases
new file mode 100644
index 00000000..49222305
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/java_keystore/aliases
@@ -0,0 +1,7 @@
+destructive
+shippable/posix/group3
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
+needs/root
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/java_keystore/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/java_keystore/meta/main.yml
new file mode 100644
index 00000000..9bc23ac6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/java_keystore/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - setup_java_keytool
+ - setup_openssl
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/java_keystore/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/java_keystore/tasks/main.yml
new file mode 100644
index 00000000..2a8ad86e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/java_keystore/tasks/main.yml
@@ -0,0 +1,137 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+- when: has_java_keytool
+ block:
+ - name: Create private keys
+ community.crypto.openssl_privatekey:
+ path: "{{ output_dir ~ '/' ~ (item.keyname | default(item.name)) ~ '.key' }}"
+ size: 2048 # this should work everywhere
+ # The following is more efficient, but might not work everywhere:
+ # type: ECC
+ # curve: secp384r1
+ cipher: "{{ 'auto' if item.passphrase is defined else omit }}"
+ passphrase: "{{ item.passphrase | default(omit) }}"
+ loop:
+ - name: cert
+ - name: cert-pw
+ passphrase: hunter2
+
+ - name: Create CSRs
+ community.crypto.openssl_csr:
+ path: "{{ output_dir ~ '/' ~ item.name ~ '.csr' }}"
+ privatekey_path: "{{ output_dir ~ '/' ~ (item.keyname | default(item.name)) ~ '.key' }}"
+ privatekey_passphrase: "{{ item.passphrase | default(omit) }}"
+ commonName: "{{ item.commonName }}"
+ loop:
+ - name: cert
+ commonName: example.com
+ - name: cert-pw
+ passphrase: hunter2
+ commonName: example.com
+ - name: cert2
+ keyname: cert
+ commonName: example.org
+ - name: cert2-pw
+ keyname: cert-pw
+ passphrase: hunter2
+ commonName: example.org
+
+ - name: Create certificates
+ community.crypto.x509_certificate:
+ path: "{{ output_dir ~ '/' ~ item.name ~ '.pem' }}"
+ csr_path: "{{ output_dir ~ '/' ~ item.name ~ '.csr' }}"
+ privatekey_path: "{{ output_dir ~ '/' ~ (item.keyname | default(item.name)) ~ '.key' }}"
+ privatekey_passphrase: "{{ item.passphrase | default(omit) }}"
+ provider: selfsigned
+ loop:
+ - name: cert
+ commonName: example.com
+ - name: cert-pw
+ passphrase: hunter2
+ commonName: example.com
+ - name: cert2
+ keyname: cert
+ commonName: example.org
+ - name: cert2-pw
+ keyname: cert-pw
+ passphrase: hunter2
+ commonName: example.org
+
+ - name: Create a Java key store for the given certificates (check mode)
+ community.general.java_keystore: &create_key_store_data
+ name: example
+ certificate: "{{lookup('file', output_dir ~ '/' ~ item.name ~ '.pem') }}"
+ private_key: "{{lookup('file', output_dir ~ '/' ~ (item.keyname | default(item.name)) ~ '.key') }}"
+ private_key_passphrase: "{{ item.passphrase | default(omit) }}"
+ password: changeit
+ dest: "{{ output_dir ~ '/' ~ item.name ~ '.jks' }}"
+ loop: &create_key_store_loop
+ - name: cert
+ - name: cert-pw
+ passphrase: hunter2
+ check_mode: yes
+ register: result_check
+
+ - name: Create a Java key store for the given certificates
+ community.general.java_keystore: *create_key_store_data
+ loop: *create_key_store_loop
+ register: result
+
+ - name: Create a Java key store for the given certificates (idempotency, check mode)
+ community.general.java_keystore: *create_key_store_data
+ loop: *create_key_store_loop
+ check_mode: yes
+ register: result_idem_check
+
+ - name: Create a Java key store for the given certificates (idempotency)
+ community.general.java_keystore: *create_key_store_data
+ loop: *create_key_store_loop
+ register: result_idem
+
+ - name: Create a Java key store for the given certificates (certificate changed, check mode)
+ community.general.java_keystore: *create_key_store_data
+ loop: &create_key_store_loop_new_certs
+ - name: cert2
+ keyname: cert
+ - name: cert2-pw
+ keyname: cert-pw
+ passphrase: hunter2
+ check_mode: yes
+ register: result_change_check
+
+ - name: Create a Java key store for the given certificates (certificate changed)
+ community.general.java_keystore: *create_key_store_data
+ loop: *create_key_store_loop_new_certs
+ register: result_change
+
+ - name: Create a Java key store for the given certificates (password changed, check mode)
+ community.general.java_keystore:
+ <<: *create_key_store_data
+ password: hunter2
+ loop: *create_key_store_loop_new_certs
+ check_mode: yes
+ register: result_pw_change_check
+ when: false # FIXME: module currently crashes
+
+ - name: Create a Java key store for the given certificates (password changed)
+ community.general.java_keystore:
+ <<: *create_key_store_data
+ password: hunter2
+ loop: *create_key_store_loop_new_certs
+ register: result_pw_change
+ when: false # FIXME: module currently crashes
+
+ - name: Validate results
+ assert:
+ that:
+ - result is changed
+ - result_check is changed
+ - result_idem is not changed
+ - result_idem_check is not changed
+ - result_change is changed
+ - result_change_check is changed
+ # - result_pw_change is changed # FIXME: module currently crashes
+ # - result_pw_change_check is changed # FIXME: module currently crashes
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/jboss/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/jboss/aliases
new file mode 100644
index 00000000..506d60c8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/jboss/aliases
@@ -0,0 +1,8 @@
+destructive
+shippable/posix/group1
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
+skip/rhel
+needs/root
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/jboss/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/jboss/meta/main.yml
new file mode 100644
index 00000000..00017865
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/jboss/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+- setup_wildfly_server
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/jboss/tasks/jboss.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/jboss/tasks/jboss.yml
new file mode 100644
index 00000000..9f9720a7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/jboss/tasks/jboss.yml
@@ -0,0 +1,236 @@
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# Integration tests for jboss module.
+
+# helloworld.war (got from https://github.com/aeimer/java-example-helloworld-war/) license:
+# MIT License
+#
+# Copyright (c) 2017 Alex Eimer
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+#
+# ===============================
+# Module's note section contains:
+# "- The JBoss standalone deployment-scanner has to be enabled in standalone.xml"
+#
+# Also from https://docs.jboss.org/author/display/WFLY10/Application+deployment?_sscc=t
+# "Deployment content (for example, war, ear, jar, and sar files) can be placed
+# in the standalone/deployments directory of the WildFly distribution,
+# in order to be automatically deployed into the server runtime.
+# For this to work the deployment-scanner subsystem must be present.
+# The scanner periodically checks the contents of the deployments directory
+# and reacts to changes by updating the server."
+# Regarding the information above JBoss server must be installed and running for full test suite.
+# We use WildFly server, free alternative, instead. See setup_wildfly_server role for more information.
+
+- vars:
+ war_file_1: 'helloworld-1.war'
+ war_file_1_path: '{{ wf_homedir }}/{{ war_file_1 }}'
+ fake_src_path: /fake/src
+ test_deployment: helloworld-1.war
+ task_parameters: &task_parameters
+ become_user: '{{ wf_user }}'
+ become: yes
+ register: result
+
+ block:
+ - name: Create test files
+ <<: *task_parameters
+ get_url:
+ url: 'https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/jboss/{{ war_file_1 }}'
+ dest: '{{ wf_homedir }}'
+
+ ##################
+ # Start the tests:
+
+ # Test if state=present and not deployed, check_mode:
+ - name: jboss - deploy war in check_mode, the default deploy_path
+ <<: *task_parameters
+ jboss:
+ deployment: '{{ war_file_1 }}'
+ src: '{{ war_file_1_path }}'
+ check_mode: yes
+
+ - assert:
+ that:
+ - result is changed
+
+ # Check
+ - name: check that nothing changed after the previous step
+ <<: *task_parameters
+ file:
+ path: '{{ deploy_dir }}/{{ war_file_1 }}.deployed'
+ ignore_errors: yes
+
+ - assert:
+ that:
+ - "'is absent' in result.msg"
+
+ # Test if state=present and not deployed, actual mode:
+ - name: jboss - deploy war
+ <<: *task_parameters
+ jboss:
+ deployment: helloworld-1.war
+ deploy_path: '{{ deploy_dir }}'
+ src: '{{ war_file_1_path }}'
+
+ - assert:
+ that:
+ - result is changed
+
+ # Check
+ - name: check that the file is deployed after the previous step
+ <<: *task_parameters
+ file:
+ path: '{{ deploy_dir }}/{{ war_file_1 }}.deployed'
+
+ - assert:
+ that:
+ - result.state == 'file'
+
+ # Test if state=present and deployed in check mode, try again:
+ - name: jboss - try again to deploy war in check_mode, war is deployed now
+ <<: *task_parameters
+ jboss:
+ deployment: '{{ war_file_1 }}'
+ src: '{{ war_file_1_path }}'
+ deploy_path: '{{ deploy_dir }}'
+ check_mode: yes
+
+ - assert:
+ that:
+ - result is not changed
+
+ # Test if state=present and deployed, try again:
+ - name: jboss - try again to deploy war in actual mode, war is deployed now
+ <<: *task_parameters
+ jboss:
+ deployment: '{{ war_file_1 }}'
+ src: '{{ war_file_1_path }}'
+ deploy_path: '{{ deploy_dir }}'
+
+ - assert:
+ that:
+ - result is not changed
+
+ # Check
+ - name: check that nothing changed after the previous step
+ <<: *task_parameters
+ file:
+ path: '{{ deploy_dir }}/{{ war_file_1 }}.deployed'
+
+ - assert:
+ that:
+ - result.state == 'file'
+
+ # Test if state=absent and deployed:
+ - name: jboss - undeploy war in check_mode, war is deployed
+ <<: *task_parameters
+ jboss:
+ deployment: '{{ war_file_1 }}'
+ deploy_path: '{{ deploy_dir }}'
+ state: absent
+ check_mode: yes
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: check that nothing actually changed after the previous step
+ <<: *task_parameters
+ file:
+ path: '{{ deploy_dir }}/{{ war_file_1 }}.deployed'
+
+ - assert:
+ that:
+ - result.state == 'file'
+
+ # Test if state=absent and deployed:
+ - name: jboss - undeploy war in actual mode, war is deployed
+ <<: *task_parameters
+ jboss:
+ deployment: '{{ war_file_1 }}'
+ deploy_path: '{{ deploy_dir }}'
+ state: absent
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: check that file is undeployed after the previous step
+ <<: *task_parameters
+ file:
+ path: '{{ deploy_dir }}/{{ war_file_1 }}.undeployed'
+
+ - assert:
+ that:
+ - result.state == 'file'
+
+ # Test if state=absent and undeployed:
+ - name: jboss - undeploy war in check_mode, war is undeployed
+ <<: *task_parameters
+ jboss:
+ deployment: '{{ war_file_1 }}'
+ deploy_path: '{{ deploy_dir }}'
+ state: absent
+ check_mode: yes
+
+ - assert:
+ that:
+ - result is not changed
+
+ # Test if state=absent and undeployed:
+ - name: jboss - undeploy war in actual_mode, war is undeployed
+ <<: *task_parameters
+ jboss:
+ deployment: '{{ war_file_1 }}'
+ deploy_path: '{{ deploy_dir }}'
+ state: absent
+
+ - assert:
+ that:
+ - result is not changed
+
+ # Test fake src:
+ - name: jboss - test fake src
+ <<: *task_parameters
+ jboss:
+ deployment: '{{ war_file_1 }}'
+ deploy_path: '{{ deploy_dir }}'
+ src: '{{ fake_src_path }}'
+ state: present
+ ignore_errors: yes
+
+ - assert:
+ that:
+ - result is failed
+ - "'Source file {{ fake_src_path }} does not exist.' in result.msg"
+
+ # Test errors where state=present and src is not passed:
+ - name: jboss - must fail when state=present and src is not passed
+ <<: *task_parameters
+ jboss:
+ deployment: '{{ war_file_1 }}'
+ state: present
+ ignore_errors: yes
+
+ - assert:
+ that:
+ - result is failed
+ - "'state is present but all of the following are missing: src' in result.msg"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/jboss/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/jboss/tasks/main.yml
new file mode 100644
index 00000000..fb3860bf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/jboss/tasks/main.yml
@@ -0,0 +1,6 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- import_tasks: jboss.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/aliases
new file mode 100644
index 00000000..a3778709
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group1
+skip/freebsd
+skip/rhel
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/files/ansible_test_service.py b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/files/ansible_test_service.py
new file mode 100644
index 00000000..87a23fc4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/files/ansible_test_service.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import sys
+
+if __name__ == '__main__':
+ if sys.version_info[0] >= 3:
+ import http.server
+ import socketserver
+ PORT = int(sys.argv[1])
+ Handler = http.server.SimpleHTTPRequestHandler
+ httpd = socketserver.TCPServer(("", PORT), Handler)
+ httpd.serve_forever()
+ else:
+ import mimetypes
+ mimetypes.init()
+ mimetypes.add_type('application/json', '.json')
+ import SimpleHTTPServer
+ SimpleHTTPServer.test()
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/meta/main.yml
new file mode 100644
index 00000000..03924939
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/meta/main.yml
@@ -0,0 +1,4 @@
+---
+
+dependencies:
+ - prepare_tests
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/tasks/main.yml
new file mode 100644
index 00000000..d014f224
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/tasks/main.yml
@@ -0,0 +1,27 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+
+- name: Test launchd module
+ block:
+ - name: Expect that launchctl exists
+ stat:
+ path: /bin/launchctl
+ register: launchctl_check
+ failed_when:
+ - not launchctl_check.stat.exists
+
+ - name: Run tests
+ include_tasks: test.yml
+ with_items:
+ - test_unknown
+ - test_start_stop
+ - test_restart
+ - test_unload
+ - test_reload
+ - test_runatload
+
+ when: ansible_os_family == 'Darwin'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/tasks/setup.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/tasks/setup.yml
new file mode 100644
index 00000000..1ec57bf6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/tasks/setup.yml
@@ -0,0 +1,20 @@
+---
+
+- name: "[{{ item }}] Deploy test service configuration"
+ template:
+ src: "{{ launchd_service_name }}.plist.j2"
+ dest: "{{ launchd_plist_location }}"
+ become: yes
+
+- name: install the test daemon script
+ copy:
+ src: ansible_test_service.py
+ dest: /usr/local/sbin/ansible_test_service
+ mode: '755'
+
+- name: rewrite shebang in the test daemon script
+ lineinfile:
+ path: /usr/local/sbin/ansible_test_service
+ line: "#!{{ ansible_python_interpreter | realpath }}"
+ insertbefore: BOF
+ firstmatch: yes
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/tasks/teardown.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/tasks/teardown.yml
new file mode 100644
index 00000000..50b0a36a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/tasks/teardown.yml
@@ -0,0 +1,27 @@
+---
+
+- name: "[{{ item }}] Unload service"
+ launchd:
+ name: "{{ launchd_service_name }}"
+ state: unloaded
+ become: yes
+ register: launchd_unloaded_result
+
+- name: "[{{ item }}] Validation"
+ assert:
+ that:
+ - launchd_unloaded_result is success
+ - launchd_unloaded_result.status.current_state == 'unloaded'
+ - launchd_unloaded_result.status.current_pid == '-'
+
+- name: "[{{ item }}] Remove test service configuration"
+ file:
+ path: "{{ launchd_plist_location }}"
+ state: absent
+ become: yes
+
+- name: "[{{ item }}] Remove test service server"
+ file:
+ path: "/usr/local/sbin/ansible_test_service"
+ state: absent
+ become: yes
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/tasks/test.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/tasks/test.yml
new file mode 100644
index 00000000..211b051d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/tasks/test.yml
@@ -0,0 +1,8 @@
+---
+
+- name: "Running {{ item }}"
+ block:
+ - include_tasks: setup.yml
+ - include_tasks: "tests/{{ item }}.yml"
+ always:
+ - include_tasks: teardown.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_reload.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_reload.yml
new file mode 100644
index 00000000..fe2682ab
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_reload.yml
@@ -0,0 +1,68 @@
+---
+
+# -----------------------------------------------------------
+
+- name: "[{{ item }}] Given a started service in check_mode"
+ launchd:
+ name: "{{ launchd_service_name }}"
+ state: started
+ become: yes
+ register: "test_1_launchd_start_result_check_mode"
+ check_mode: yes
+
+- name: "[{{ item }}] Assert that everything work in check mode"
+ assert:
+ that:
+ - test_1_launchd_start_result_check_mode is success
+ - test_1_launchd_start_result_check_mode is changed
+
+- name: "[{{ item }}] Given a started service..."
+ launchd:
+ name: "{{ launchd_service_name }}"
+ state: started
+ become: yes
+ register: "test_1_launchd_start_result"
+
+
+- name: "[{{ item }}] The started service should run on port 21212"
+ wait_for:
+ port: 21212
+ delay: 5
+ timeout: 10
+
+- name: "[{{ item }}] Deploy a new test service configuration with a new port 21213"
+ template:
+ src: "modified.{{ launchd_service_name }}.plist.j2"
+ dest: "{{ launchd_plist_location }}"
+ become: yes
+
+- name: "[{{ item }}] When reloading the service..."
+ launchd:
+ name: "{{ launchd_service_name }}"
+ state: reloaded
+ become: yes
+ register: "test_1_launchd_reload_result"
+
+- name: "[{{ item }}] Validate that service was reloaded"
+ assert:
+ that:
+ - test_1_launchd_reload_result is success
+ - test_1_launchd_reload_result is changed
+ - test_1_launchd_reload_result.status.previous_pid == test_1_launchd_start_result.status.current_pid
+ - test_1_launchd_reload_result.status.previous_state == test_1_launchd_start_result.status.current_state
+ - test_1_launchd_reload_result.status.current_state == 'stopped'
+ - test_1_launchd_reload_result.status.current_pid == '-'
+
+- name: "[{{ item }}] Start the service with the new configuration..."
+ launchd:
+ name: "{{ launchd_service_name }}"
+ state: started
+ become: yes
+ register: "test_1_launchd_start_result"
+
+
+- name: "[{{ item }}] The started service should run on port 21213"
+ wait_for:
+ port: 21213
+ delay: 5
+ timeout: 10
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_restart.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_restart.yml
new file mode 100644
index 00000000..97677567
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_restart.yml
@@ -0,0 +1,43 @@
+---
+
+# -----------------------------------------------------------
+
+- name: "[{{ item }}] Given a started service..."
+ launchd:
+ name: "{{ launchd_service_name }}"
+ state: started
+ become: yes
+ register: "test_1_launchd_start_result"
+
+
+- name: "[{{ item }}] When restarting the service in check mode"
+ launchd:
+ name: "{{ launchd_service_name }}"
+ state: restarted
+ become: yes
+ register: "test_1_launchd_restart_result_check_mode"
+ check_mode: yes
+
+- name: "[{{ item }}] Validate that service was restarted in check mode"
+ assert:
+ that:
+ - test_1_launchd_restart_result_check_mode is success
+ - test_1_launchd_restart_result_check_mode is changed
+
+- name: "[{{ item }}] When restarting the service..."
+ launchd:
+ name: "{{ launchd_service_name }}"
+ state: restarted
+ become: yes
+ register: "test_1_launchd_restart_result"
+
+- name: "[{{ item }}] Validate that service was restarted"
+ assert:
+ that:
+ - test_1_launchd_restart_result is success
+ - test_1_launchd_restart_result is changed
+ - test_1_launchd_restart_result.status.previous_pid == test_1_launchd_start_result.status.current_pid
+ - test_1_launchd_restart_result.status.previous_state == test_1_launchd_start_result.status.current_state
+ - test_1_launchd_restart_result.status.current_state == 'started'
+ - test_1_launchd_restart_result.status.current_pid != '-'
+ - test_1_launchd_restart_result.status.status_code == '0'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_runatload.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_runatload.yml
new file mode 100644
index 00000000..08f21efc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_runatload.yml
@@ -0,0 +1,32 @@
+---
+# -----------------------------------------------------------
+
+- name: "[{{ item }}] Given a started service with RunAtLoad set to true..."
+ launchd:
+ name: "{{ launchd_service_name }}"
+ state: started
+ enabled: yes
+ become: yes
+ register: test_1_launchd_start_result
+
+- name: "[{{ item }}] Validate that service was started"
+ assert:
+ that:
+ - test_1_launchd_start_result is success
+ - test_1_launchd_start_result is changed
+ - test_1_launchd_start_result.status.previous_pid == '-'
+ - test_1_launchd_start_result.status.previous_state == 'unloaded'
+ - test_1_launchd_start_result.status.current_state == 'started'
+ - test_1_launchd_start_result.status.current_pid != '-'
+ - test_1_launchd_start_result.status.status_code == '0'
+
+- name: "[{{ item }}] Validate that RunAtLoad is set to true"
+ replace:
+ path: "{{ launchd_plist_location }}"
+ regexp: |
+ \s+<key>RunAtLoad</key>
+ \s+<true/>
+ replace: found_run_at_load
+ check_mode: yes
+ register: contents_would_have
+ failed_when: not contents_would_have is changed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_start_stop.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_start_stop.yml
new file mode 100644
index 00000000..b3cc380e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_start_stop.yml
@@ -0,0 +1,112 @@
+---
+
+# -----------------------------------------------------------
+
+- name: "[{{ item }}] Given a started service in check mode"
+ launchd:
+ name: "{{ launchd_service_name }}"
+ state: started
+ become: yes
+ register: "test_1_launchd_start_result_check_mode"
+ check_mode: yes
+
+
+- name: "[{{ item }}] Validate that service was started in check mode"
+ assert:
+ that:
+ - test_1_launchd_start_result_check_mode is success
+ - test_1_launchd_start_result_check_mode is changed
+
+
+- name: "[{{ item }}] Given a started service..."
+ launchd:
+ name: "{{ launchd_service_name }}"
+ state: started
+ become: yes
+ register: "test_1_launchd_start_result"
+
+
+- name: "[{{ item }}] Validate that service was started"
+ assert:
+ that:
+ - test_1_launchd_start_result is success
+ - test_1_launchd_start_result is changed
+ - test_1_launchd_start_result.status.previous_pid == '-'
+ - test_1_launchd_start_result.status.previous_state == 'unloaded'
+ - test_1_launchd_start_result.status.current_state == 'started'
+ - test_1_launchd_start_result.status.current_pid != '-'
+ - test_1_launchd_start_result.status.status_code == '0'
+
+# -----------------------------------------------------------
+
+- name: "[{{ item }}] Given a stopped service..."
+ launchd:
+ name: "{{ launchd_service_name }}"
+ state: stopped
+ become: yes
+ register: "test_2_launchd_stop_result"
+
+- name: "[{{ item }}] Validate that service was stopped after it was started"
+ assert:
+ that:
+ - test_2_launchd_stop_result is success
+ - test_2_launchd_stop_result is changed
+ - test_2_launchd_stop_result.status.previous_pid == test_1_launchd_start_result.status.current_pid
+ - test_2_launchd_stop_result.status.previous_state == test_1_launchd_start_result.status.current_state
+ - test_2_launchd_stop_result.status.current_state == 'stopped'
+ - test_2_launchd_stop_result.status.current_pid == '-'
+
+# -----------------------------------------------------------
+
+- name: "[{{ item }}] Given a stopped service..."
+ launchd:
+ name: "{{ launchd_service_name }}"
+ state: stopped
+ become: yes
+ register: "test_3_launchd_stop_result"
+
+- name: "[{{ item }}] Validate that service can be stopped after being already stopped"
+ assert:
+ that:
+ - test_3_launchd_stop_result is success
+ - not test_3_launchd_stop_result is changed
+ - test_3_launchd_stop_result.status.previous_pid == '-'
+ - test_3_launchd_stop_result.status.previous_state == 'stopped'
+ - test_3_launchd_stop_result.status.current_state == 'stopped'
+ - test_3_launchd_stop_result.status.current_pid == '-'
+
+# -----------------------------------------------------------
+
+- name: "[{{ item }}] Given a started service..."
+ launchd:
+ name: "{{ launchd_service_name }}"
+ state: started
+ become: yes
+ register: "test_4_launchd_start_result"
+
+- name: "[{{ item }}] Validate that service was started..."
+ assert:
+ that:
+ - test_4_launchd_start_result is success
+ - test_4_launchd_start_result is changed
+ - test_4_launchd_start_result.status.previous_pid == '-'
+ - test_4_launchd_start_result.status.previous_state == 'stopped'
+ - test_4_launchd_start_result.status.current_state == 'started'
+ - test_4_launchd_start_result.status.current_pid != '-'
+ - test_4_launchd_start_result.status.status_code == '0'
+
+- name: "[{{ item }}] And when service is started again..."
+ launchd:
+ name: "{{ launchd_service_name }}"
+ state: started
+ become: yes
+ register: "test_5_launchd_start_result"
+
+- name: "[{{ item }}] Validate that service is still in the same state as before"
+ assert:
+ that:
+ - test_5_launchd_start_result is success
+ - not test_5_launchd_start_result is changed
+ - test_5_launchd_start_result.status.previous_pid == test_4_launchd_start_result.status.current_pid
+ - test_5_launchd_start_result.status.previous_state == test_4_launchd_start_result.status.current_state
+ - test_5_launchd_start_result.status.status_code == '0'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_unknown.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_unknown.yml
new file mode 100644
index 00000000..e005d87e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_unknown.yml
@@ -0,0 +1,11 @@
+---
+
+# -----------------------------------------------------------
+
+- name: "[{{ item }}] Expect that an error occurs when an unknown service is used."
+ launchd:
+ name: com.acme.unknownservice
+ state: started
+ register: result
+ failed_when:
+ - not '"Unable to infer the path of com.acme.unknownservice service plist file and it was not found among active services" in result.msg'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_unload.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_unload.yml
new file mode 100644
index 00000000..b51a87fb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_unload.yml
@@ -0,0 +1,62 @@
+---
+
+# -----------------------------------------------------------
+
+- name: "[{{ item }}] Given a started service..."
+ launchd:
+ name: "{{ launchd_service_name }}"
+ state: started
+ become: yes
+ register: "test_1_launchd_start_result"
+
+
+- name: "[{{ item }}] When unloading the service in check mode"
+ launchd:
+ name: "{{ launchd_service_name }}"
+ state: unloaded
+ become: yes
+ register: "test_1_launchd_unloaded_result_check_mode"
+ check_mode: yes
+
+- name: "[{{ item }}] Validate that service was unloaded in check mode"
+ assert:
+ that:
+ - test_1_launchd_unloaded_result_check_mode is success
+ - test_1_launchd_unloaded_result_check_mode is changed
+
+
+- name: "[{{ item }}] When unloading the service..."
+ launchd:
+ name: "{{ launchd_service_name }}"
+ state: unloaded
+ become: yes
+ register: "test_1_launchd_unloaded_result"
+
+- name: "[{{ item }}] Validate that service was unloaded"
+ assert:
+ that:
+ - test_1_launchd_unloaded_result is success
+ - test_1_launchd_unloaded_result is changed
+ - test_1_launchd_unloaded_result.status.previous_pid == test_1_launchd_start_result.status.current_pid
+ - test_1_launchd_unloaded_result.status.previous_state == test_1_launchd_start_result.status.current_state
+ - test_1_launchd_unloaded_result.status.current_state == 'unloaded'
+ - test_1_launchd_unloaded_result.status.current_pid == '-'
+
+# -----------------------------------------------------------
+
+- name: "[{{ item }}] Given an unloaded service on an unloaded service..."
+ launchd:
+ name: "{{ launchd_service_name }}"
+ state: unloaded
+ become: yes
+ register: "test_2_launchd_unloaded_result"
+
+- name: "[{{ item }}] Validate that service did not change and is still unloaded"
+ assert:
+ that:
+ - test_2_launchd_unloaded_result is success
+ - not test_2_launchd_unloaded_result is changed
+ - test_2_launchd_unloaded_result.status.previous_pid == '-'
+ - test_2_launchd_unloaded_result.status.previous_state == 'unloaded'
+ - test_2_launchd_unloaded_result.status.current_state == 'unloaded'
+ - test_2_launchd_unloaded_result.status.current_pid == '-'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/templates/launchd.test.service.plist.j2 b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/templates/launchd.test.service.plist.j2
new file mode 100644
index 00000000..27affa3b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/templates/launchd.test.service.plist.j2
@@ -0,0 +1,13 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+ <dict>
+ <key>Label</key>
+ <string>{{ launchd_service_name }}</string>
+ <key>ProgramArguments</key>
+ <array>
+ <string>/usr/local/sbin/ansible_test_service</string>
+ <string>21212</string>
+ </array>
+ </dict>
+</plist>
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/templates/modified.launchd.test.service.plist.j2 b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/templates/modified.launchd.test.service.plist.j2
new file mode 100644
index 00000000..ac25cab0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/templates/modified.launchd.test.service.plist.j2
@@ -0,0 +1,13 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+ <dict>
+ <key>Label</key>
+ <string>{{ launchd_service_name }}</string>
+ <key>ProgramArguments</key>
+ <array>
+ <string>/usr/local/sbin/ansible_test_service</string>
+ <string>21213</string>
+ </array>
+ </dict>
+</plist>
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/vars/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/vars/main.yml
new file mode 100644
index 00000000..2d58be16
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/launchd/vars/main.yml
@@ -0,0 +1,4 @@
+---
+
+launchd_service_name: launchd.test.service
+launchd_plist_location: /Library/LaunchDaemons/{{ launchd_service_name }}.plist
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ldap_search/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ldap_search/aliases
new file mode 100644
index 00000000..589aeadc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ldap_search/aliases
@@ -0,0 +1,7 @@
+shippable/posix/group1
+skip/aix
+skip/freebsd
+skip/osx
+skip/macos
+skip/rhel
+needs/root
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ldap_search/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ldap_search/meta/main.yml
new file mode 100644
index 00000000..093fafe4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ldap_search/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - setup_openldap
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ldap_search/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ldap_search/tasks/main.yml
new file mode 100644
index 00000000..f4a51013
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ldap_search/tasks/main.yml
@@ -0,0 +1,11 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Run LDAP search module tests
+ block:
+ - include_tasks: "{{ item }}"
+ with_fileglob:
+ - 'tests/*.yml'
+ when: ansible_os_family in ['Ubuntu', 'Debian'] \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ldap_search/tasks/run-test.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ldap_search/tasks/run-test.yml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ldap_search/tasks/run-test.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ldap_search/tasks/tests/basic.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ldap_search/tasks/tests/basic.yml
new file mode 100644
index 00000000..824be4aa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ldap_search/tasks/tests/basic.yml
@@ -0,0 +1,20 @@
+- debug:
+ msg: Running tests/basic.yml
+
+####################################################################
+## Search ##########################################################
+####################################################################
+- name: Test simple search for a user
+ ldap_search:
+ dn: "ou=users,dc=example,dc=com"
+ scope: "onelevel"
+ filter: "(uid=ldaptest)"
+ ignore_errors: yes
+ register: output
+
+- name: assert that test LDAP user can be found
+ assert:
+ that:
+ - output is not failed
+ - output.results | length == 1
+ - output.results.0.displayName == "LDAP Test"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/listen_ports_facts/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/listen_ports_facts/aliases
new file mode 100644
index 00000000..c72768af
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/listen_ports_facts/aliases
@@ -0,0 +1,6 @@
+shippable/posix/group3
+destructive
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/listen_ports_facts/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/listen_ports_facts/tasks/main.yml
new file mode 100644
index 00000000..906e82c6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/listen_ports_facts/tasks/main.yml
@@ -0,0 +1,90 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Test playbook for the listen_ports_facts module
+# Copyright: (c) 2019, Nathan Davison <ndavison85@gmail.com>
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- name: install netstat and netcat on deb
+ apt:
+ name: "{{ item }}"
+ state: latest
+ with_items:
+ - net-tools
+ - netcat
+ when: ansible_os_family == "Debian"
+
+- name: install netstat and netcat on rh < 7
+ yum:
+ name: "{{ item }}"
+ state: latest
+ with_items:
+ - net-tools
+ - nc.x86_64
+ when: ansible_os_family == "RedHat" and ansible_distribution_major_version|int < 7
+
+- name: install netstat and netcat on rh >= 7
+ yum:
+ name: "{{ item }}"
+ state: latest
+ with_items:
+ - net-tools
+ - nmap-ncat
+ when: ansible_os_family == "RedHat" and ansible_distribution_major_version|int >= 7
+
+- name: start UDP server on port 5555
+ command: nc -u -l -p 5555
+ async: 1000
+ poll: 0
+ when: (ansible_os_family == "RedHat" and ansible_distribution_major_version|int >= 7) or ansible_os_family == "Debian"
+
+- name: start UDP server on port 5555
+ command: nc -u -l 5555
+ async: 1000
+ poll: 0
+ when: ansible_os_family == "RedHat" and ansible_distribution_major_version|int < 7
+
+- name: start TCP server on port 5556
+ command: "nc -l -p 5556"
+ async: 1000
+ poll: 0
+ when: (ansible_os_family == "RedHat" and ansible_distribution_major_version|int >= 7) or ansible_os_family == "Debian"
+
+- name: start TCP server on port 5556
+ command: "nc -l 5556"
+ async: 1000
+ poll: 0
+ when: ansible_os_family == "RedHat" and ansible_distribution_major_version|int < 7
+
+- name: Gather listening ports facts
+ listen_ports_facts:
+ when: ansible_os_family == "RedHat" or ansible_os_family == "Debian"
+
+- name: check for ansible_facts.udp_listen exists
+ assert:
+ that: ansible_facts.udp_listen is defined
+ when: ansible_os_family == "RedHat" or ansible_os_family == "Debian"
+
+- name: check for ansible_facts.tcp_listen exists
+ assert:
+ that: ansible_facts.tcp_listen is defined
+ when: ansible_os_family == "RedHat" or ansible_os_family == "Debian"
+
+- name: check TCP 5556 is in listening ports
+ assert:
+ that: 5556 in ansible_facts.tcp_listen | map(attribute='port') | sort | list
+ when: (ansible_os_family == "RedHat" and ansible_distribution_major_version|int >= 7) or ansible_os_family == "Debian"
+
+- name: check UDP 5555 is in listening ports
+ assert:
+ that: 5555 in ansible_facts.udp_listen | map(attribute='port') | sort | list
+ when: (ansible_os_family == "RedHat" and ansible_distribution_major_version|int >= 7) or ansible_os_family == "Debian"
+
+- name: kill all async commands
+ command: "kill -9 {{ item.pid }}"
+ loop: "{{ [tcp_listen, udp_listen]|flatten }}"
+ when: item.name == 'nc'
+ ignore_errors: true
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/locale_gen/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/locale_gen/aliases
new file mode 100644
index 00000000..be6db963
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/locale_gen/aliases
@@ -0,0 +1,4 @@
+destructive
+needs/root
+shippable/posix/group3
+skip/aix
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/locale_gen/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/locale_gen/meta/main.yml
new file mode 100644
index 00000000..07faa217
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/locale_gen/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_tests
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/locale_gen/tasks/locale_gen.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/locale_gen/tasks/locale_gen.yml
new file mode 100644
index 00000000..ae316977
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/locale_gen/tasks/locale_gen.yml
@@ -0,0 +1,94 @@
+- name: Is the locale we're going to test against installed?
+ shell: locale -a | grep pt_BR
+ register: initial_state
+ ignore_errors: True
+
+- name: Make sure the locale is not installed
+ locale_gen:
+ name: pt_BR
+ state: absent
+
+- name: Is the locale present?
+ shell: locale -a | grep pt_BR
+ register: cleaned
+ ignore_errors: True
+
+- name: Make sure the locale is not present
+ assert:
+ that:
+ - "cleaned.rc == 1"
+
+- name: Install the locale
+ locale_gen:
+ name: pt_BR
+ state: present
+ register: output
+
+- name: Is the locale present?
+ shell: locale -a | grep pt_BR
+ register: post_check_output
+ ignore_errors: True
+
+- name: Make sure the locale is present and we say we installed it
+ assert:
+ that:
+ - "post_check_output.rc == 0"
+ - "output.changed"
+
+- name: Install the locale a second time
+ locale_gen:
+ name: pt_BR
+ state: present
+ register: output
+
+- name: Is the locale present?
+ shell: locale -a | grep pt_BR
+ register: post_check_output
+ ignore_errors: True
+
+- name: Make sure the locale is present and we reported no change
+ assert:
+ that:
+ - "post_check_output.rc == 0"
+ - "not output.changed"
+
+- name: Remove the locale
+ locale_gen:
+ name: pt_BR
+ state: absent
+ register: output
+
+- name: Is the locale present?
+ shell: locale -a | grep pt_BR
+ register: post_check_output
+ ignore_errors: True
+
+- name: Make sure the locale is absent and we reported a change
+ assert:
+ that:
+ - "post_check_output.rc == 1"
+ - "output.changed"
+
+- name: Remove the locale a second time
+ locale_gen:
+ name: pt_BR
+ state: absent
+ register: output
+
+- name: Is the locale present?
+ shell: locale -a | grep pt_BR
+ register: post_check_output
+ ignore_errors: True
+
+- name: Make sure the locale is absent and we reported no change
+ assert:
+ that:
+ - "post_check_output.rc == 1"
+ - "not output.changed"
+
+# Cleanup
+- name: Reinstall the locale we tested against if it was initially installed
+ locale_gen:
+ name: pt_BR
+ state: present
+ when: initial_state.rc == 0
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/locale_gen/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/locale_gen/tasks/main.yml
new file mode 100644
index 00000000..7ceb35fd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/locale_gen/tasks/main.yml
@@ -0,0 +1,24 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# (c) 2014, James Tanner <tanner.jc@gmail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- include: 'locale_gen.yml'
+ when: ansible_distribution in ('Ubuntu', 'Debian')
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_cartesian/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_cartesian/aliases
new file mode 100644
index 00000000..07b87020
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_cartesian/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group1
+skip/aix
+skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_cartesian/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_cartesian/tasks/main.yml
new file mode 100644
index 00000000..6e563e0c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_cartesian/tasks/main.yml
@@ -0,0 +1,27 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Test cartesian lookup
+ debug: var=item
+ register: product
+ with_community.general.cartesian:
+ - - A
+ - B
+ - C
+ - - '1'
+ - '2'
+ - '3'
+- name: Verify cartesian lookup
+ assert:
+ that:
+ - product.results[0]['item'] == ["A", "1"]
+ - product.results[1]['item'] == ["A", "2"]
+ - product.results[2]['item'] == ["A", "3"]
+ - product.results[3]['item'] == ["B", "1"]
+ - product.results[4]['item'] == ["B", "2"]
+ - product.results[5]['item'] == ["B", "3"]
+ - product.results[6]['item'] == ["C", "1"]
+ - product.results[7]['item'] == ["C", "2"]
+ - product.results[8]['item'] == ["C", "3"]
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/aliases
new file mode 100644
index 00000000..25a1594d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/aliases
@@ -0,0 +1,10 @@
+shippable/posix/group1
+destructive
+needs/file/tests/utils/constraints.txt
+needs/target/setup_etcd3
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
+skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller
+disabled # see https://github.com/ansible-collections/community.general/issues/322
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/defaults/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/defaults/main.yml
new file mode 100644
index 00000000..331ec312
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/defaults/main.yml
@@ -0,0 +1,4 @@
+---
+
+ etcd3_prefix: '/keyprefix/'
+ etcd3_singlekey: '/singlekeypath'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/dependencies.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/dependencies.yml
new file mode 100644
index 00000000..e42f33ba
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/dependencies.yml
@@ -0,0 +1,6 @@
+---
+- hosts: localhost
+ tasks:
+ - name: Setup etcd3
+ import_role:
+ name: setup_etcd3
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/meta/main.yml
new file mode 100644
index 00000000..5438ced5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_pkg_mgr
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/runme.sh b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/runme.sh
new file mode 100755
index 00000000..962201ff
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/runme.sh
@@ -0,0 +1,9 @@
+#!/usr/bin/env bash
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+set -eux
+
+ANSIBLE_ROLES_PATH=../ \
+ ansible-playbook dependencies.yml -v "$@"
+
+ANSIBLE_ROLES_PATH=../ \
+ ansible-playbook test_lookup_etcd3.yml -v "$@"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/tasks/main.yml
new file mode 100644
index 00000000..3a0c149a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/tasks/main.yml
@@ -0,0 +1,27 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# lookup_etcd3 integration tests
+# 2020, SCC France, Eric Belhomme <ebelhomme@fr.scc.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- name: put key/values with an etcd prefix
+ etcd3:
+ key: "{{ etcd3_prefix }}foo{{ item }}"
+ value: "bar{{ item }}"
+ state: present
+ loop:
+ - 1
+ - 2
+ - 3
+
+- name: put a single key/values in etcd
+ etcd3:
+ key: "{{ etcd3_singlekey }}"
+ value: "foobar"
+ state: present
+
+- import_tasks: tests.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/tasks/tests.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/tasks/tests.yml
new file mode 100644
index 00000000..a1090b48
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/tasks/tests.yml
@@ -0,0 +1,26 @@
+---
+# lookup_etcd3 integration tests
+# 2020, SCC France, Eric Belhomme <ebelhomme@fr.scc.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- block:
+ - name: 'Fetch secrets using "etcd3" lookup'
+ set_fact:
+ etcdoutkey1: "{{ lookup('community.general.etcd3', etcd3_prefix, prefix=True) }}"
+ etcdoutkey2: "{{ lookup('community.general.etcd3', etcd3_singlekey) }}"
+ key_inexistent: "{{ lookup('community.general.etcd3', 'inexistent_key') }}"
+
+ - name: 'Check etcd values'
+ assert:
+ msg: 'unexpected etcd3 values'
+ that:
+ - etcdoutkey1 is sequence
+ - etcdoutkey1 | length() == 3
+ - etcdoutkey1[0].value == 'bar1'
+ - etcdoutkey1[1].value == 'bar2'
+ - etcdoutkey1[2].value == 'bar3'
+ - etcdoutkey2 is sequence
+ - etcdoutkey2 | length() == 2
+ - etcdoutkey2.value == 'foobar'
+ - key_inexistent is sequence
+ - key_inexistent | length() == 0
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/test_lookup_etcd3.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/test_lookup_etcd3.yml
new file mode 100644
index 00000000..583f2a6a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/test_lookup_etcd3.yml
@@ -0,0 +1,6 @@
+---
+- hosts: localhost
+ tasks:
+ - name: Test lookup etcd3
+ import_role:
+ name: lookup_etcd3
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_flattened/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_flattened/aliases
new file mode 100644
index 00000000..bc987654
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_flattened/aliases
@@ -0,0 +1,3 @@
+shippable/posix/group2
+skip/aix
+skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_flattened/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_flattened/tasks/main.yml
new file mode 100644
index 00000000..7d69c6df
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_flattened/tasks/main.yml
@@ -0,0 +1,19 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: test with_flattened
+ set_fact: '{{ item }}=flattened'
+ with_community.general.flattened:
+ - - a__
+ - - b__
+ - - c__
+ - d__
+- name: verify with_flattened results
+ assert:
+ that:
+ - a__ == 'flattened'
+ - b__ == 'flattened'
+ - c__ == 'flattened'
+ - d__ == 'flattened'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/aliases
new file mode 100644
index 00000000..dcc12d79
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/aliases
@@ -0,0 +1,7 @@
+shippable/posix/group2
+destructive
+needs/target/setup_openssl
+needs/file/tests/utils/constraints.txt
+skip/aix
+skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller
+skip/macos # FIXME seems to be always unstable
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/defaults/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/defaults/main.yml
new file mode 100644
index 00000000..73f6915c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/defaults/main.yml
@@ -0,0 +1,5 @@
+---
+vault_gen_path: 'gen/testproject'
+vault_kv1_path: 'kv1/testproject'
+vault_kv2_path: 'kv2/data/testproject'
+vault_kv2_multi_path: 'kv2/data/testmulti'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/files/jwt_private.pem b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/files/jwt_private.pem
new file mode 100644
index 00000000..61056a54
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/files/jwt_private.pem
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEogIBAAKCAQEAnzyis1ZjfNB0bBgKFMSvvkTtwlvBsaJq7S5wA+kzeVOVpVWw
+kWdVha4s38XM/pa/yr47av7+z3VTmvDRyAHcaT92whREFpLv9cj5lTeJSibyr/Mr
+m/YtjCZVWgaOYIhwrXwKLqPr/11inWsAkfIytvHWTxZYEcXLgAXFuUuaS3uF9gEi
+NQwzGTU1v0FqkqTBr4B8nW3HCN47XUu0t8Y0e+lf4s4OxQawWD79J9/5d3Ry0vbV
+3Am1FtGJiJvOwRsIfVChDpYStTcHTCMqtvWbV6L11BWkpzGXSW4Hv43qa+GSYOD2
+QU68Mb59oSk2OB+BtOLpJofmbGEGgvmwyCI9MwIDAQABAoIBACiARq2wkltjtcjs
+kFvZ7w1JAORHbEufEO1Eu27zOIlqbgyAcAl7q+/1bip4Z/x1IVES84/yTaM8p0go
+amMhvgry/mS8vNi1BN2SAZEnb/7xSxbflb70bX9RHLJqKnp5GZe2jexw+wyXlwaM
++bclUCrh9e1ltH7IvUrRrQnFJfh+is1fRon9Co9Li0GwoN0x0byrrngU8Ak3Y6D9
+D8GjQA4Elm94ST3izJv8iCOLSDBmzsPsXfcCUZfmTfZ5DbUDMbMxRnSo3nQeoKGC
+0Lj9FkWcfmLcpGlSXTO+Ww1L7EGq+PT3NtRae1FZPwjddQ1/4V905kyQFLamAA5Y
+lSpE2wkCgYEAy1OPLQcZt4NQnQzPz2SBJqQN2P5u3vXl+zNVKP8w4eBv0vWuJJF+
+hkGNnSxXQrTkvDOIUddSKOzHHgSg4nY6K02ecyT0PPm/UZvtRpWrnBjcEVtHEJNp
+bU9pLD5iZ0J9sbzPU/LxPmuAP2Bs8JmTn6aFRspFrP7W0s1Nmk2jsm0CgYEAyH0X
++jpoqxj4efZfkUrg5GbSEhf+dZglf0tTOA5bVg8IYwtmNk/pniLG/zI7c+GlTc9B
+BwfMr59EzBq/eFMI7+LgXaVUsM/sS4Ry+yeK6SJx/otIMWtDfqxsLD8CPMCRvecC
+2Pip4uSgrl0MOebl9XKp57GoaUWRWRHqwV4Y6h8CgYAZhI4mh4qZtnhKjY4TKDjx
+QYufXSdLAi9v3FxmvchDwOgn4L+PRVdMwDNms2bsL0m5uPn104EzM6w1vzz1zwKz
+5pTpPI0OjgWN13Tq8+PKvm/4Ga2MjgOgPWQkslulO/oMcXbPwWC3hcRdr9tcQtn9
+Imf9n2spL/6EDFId+Hp/7QKBgAqlWdiXsWckdE1Fn91/NGHsc8syKvjjk1onDcw0
+NvVi5vcba9oGdElJX3e9mxqUKMrw7msJJv1MX8LWyMQC5L6YNYHDfbPF1q5L4i8j
+8mRex97UVokJQRRA452V2vCO6S5ETgpnad36de3MUxHgCOX3qL382Qx9/THVmbma
+3YfRAoGAUxL/Eu5yvMK8SAt/dJK6FedngcM3JEFNplmtLYVLWhkIlNRGDwkg3I5K
+y18Ae9n7dHVueyslrb6weq7dTkYDi3iOYRW8HRkIQh06wEdbxt0shTzAJvvCQfrB
+jg/3747WSsf/zBTcHihTRBdAv6OmdhV4/dD5YBfLAkLrd+mX7iE=
+-----END RSA PRIVATE KEY-----
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/files/jwt_public.pem b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/files/jwt_public.pem
new file mode 100644
index 00000000..12301e01
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/files/jwt_public.pem
@@ -0,0 +1,9 @@
+-----BEGIN PUBLIC KEY-----
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAnzyis1ZjfNB0bBgKFMSv
+vkTtwlvBsaJq7S5wA+kzeVOVpVWwkWdVha4s38XM/pa/yr47av7+z3VTmvDRyAHc
+aT92whREFpLv9cj5lTeJSibyr/Mrm/YtjCZVWgaOYIhwrXwKLqPr/11inWsAkfIy
+tvHWTxZYEcXLgAXFuUuaS3uF9gEiNQwzGTU1v0FqkqTBr4B8nW3HCN47XUu0t8Y0
+e+lf4s4OxQawWD79J9/5d3Ry0vbV3Am1FtGJiJvOwRsIfVChDpYStTcHTCMqtvWb
+V6L11BWkpzGXSW4Hv43qa+GSYOD2QU68Mb59oSk2OB+BtOLpJofmbGEGgvmwyCI9
+MwIDAQAB
+-----END PUBLIC KEY-----
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/files/token.jwt b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/files/token.jwt
new file mode 100644
index 00000000..e38d1040
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/files/token.jwt
@@ -0,0 +1 @@
+eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiJ0ZXN0Iiwic3ViIjoiaGFzaGlfdmF1bHRAdGVzdC5hbnNpYmxlLmNvbSIsIm5iZiI6MTYwNDgzNTEwMCwiZXhwIjozMjQ5OTA1MTM1OX0.NEWQR_Eicw8Fa9gU9HPY2M9Rp1czNTUKrICwKe7l1edaZNtgxhMGdyqnBsPrHL_dw1ZIwdvwVAioi8bEyIDEWICls0lzHwM169rrea3WEFrB5CP17A6DkvYL0cnOnGutbwUrXInPCRUfvRogIKEI-w8X-ris9LX2FBPKhXX1K3U0D8uYi5_9t8YWywTe0NkYvY-nTzMugK1MXMoBJ3fCksweJiDp6BOo3v9OU03MLgwgri2UdsqVb7WSk4XvWG-lmbiiSAWVf9BI3mecVDUHpYxbEqjv1HDG_wdX8zy1ZlAFbjp3kIpMlDVK1Q5nu_VPDzQrEvPdTnOzU36LE4UF-w
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/files/token_invalid.jwt b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/files/token_invalid.jwt
new file mode 100644
index 00000000..aa608e6c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/files/token_invalid.jwt
@@ -0,0 +1 @@
+eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOiIxMjM0IiwidXNlcl9jbGFpbSI6InVzZXJfY2xhaW0iLCJuYmYiOjE2MDQ4MzUxMDAsImV4cCI6MzI0OTkwNTEzNTl9.etc2WSH7kR3fHFlVt4wlBYFKNn7Z4DQcRVXUK4gGF-Q
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/meta/main.yml
new file mode 100644
index 00000000..5438ced5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_pkg_mgr
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/approle_secret_id_less_setup.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/approle_secret_id_less_setup.yml
new file mode 100644
index 00000000..34758813
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/approle_secret_id_less_setup.yml
@@ -0,0 +1,19 @@
+- name: 'Create an approle policy'
+ shell: "echo '{{ policy }}' | {{ vault_cmd }} policy write approle-policy-2 -"
+ vars:
+ policy: |
+ path "auth/approle/login" {
+ capabilities = [ "create", "read" ]
+ }
+
+- name: 'Enable the AppRole auth method'
+ command: '{{ vault_cmd }} auth enable approle'
+ register: enable_approle
+ failed_when: "enable_approle.rc!=0 and 'path is already in use' not in enable_approle.stderr"
+
+- name: 'Create a named role without secret id'
+ command: '{{ vault_cmd }} write auth/approle/role/test-role-2 policies="test-policy,approle-policy-2" bind_secret_id=false secret_id_bound_cidrs="0.0.0.0/0"'
+
+- name: 'Fetch the RoleID of the AppRole'
+ command: '{{ vault_cmd }} read -field=role_id auth/approle/role/test-role-2/role-id'
+ register: role_id_cmd_2
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/approle_secret_id_less_test.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/approle_secret_id_less_test.yml
new file mode 100644
index 00000000..f78a12f0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/approle_secret_id_less_test.yml
@@ -0,0 +1,44 @@
+- vars:
+ role_id: '{{ role_id_cmd_2.stdout }}'
+ block:
+ - name: 'Fetch secrets using "hashi_vault" lookup'
+ set_fact:
+ secret1: "{{ lookup('community.general.hashi_vault', conn_params ~ 'secret=' ~ vault_kv2_path ~ '/secret1 auth_method=approle role_id=' ~ role_id) }}"
+ secret2: "{{ lookup('community.general.hashi_vault', conn_params ~ 'secret=' ~ vault_kv2_path ~ '/secret2 auth_method=approle role_id=' ~ role_id) }}"
+
+ - name: 'Check secret values'
+ fail:
+ msg: 'unexpected secret values'
+ when: secret1['value'] != 'foo1' or secret2['value'] != 'foo2'
+
+ - name: 'Failure expected when erroneous credentials are used'
+ vars:
+ secret_wrong_cred: "{{ lookup('community.general.hashi_vault', conn_params ~ 'secret=' ~ vault_kv2_path ~ '/secret2 auth_method=approle role_id=foobar') }}"
+ debug:
+ msg: 'Failure is expected ({{ secret_wrong_cred }})'
+ register: test_wrong_cred
+ ignore_errors: true
+
+ - name: 'Failure expected when unauthorized secret is read'
+ vars:
+ secret_unauthorized: "{{ lookup('community.general.hashi_vault', conn_params ~ 'secret=' ~ vault_kv2_path ~ '/secret3 auth_method=approle role_id=' ~ role_id) }}"
+ debug:
+ msg: 'Failure is expected ({{ secret_unauthorized }})'
+ register: test_unauthorized
+ ignore_errors: true
+
+ - name: 'Failure expected when inexistent secret is read'
+ vars:
+ secret_inexistent: "{{ lookup('community.general.hashi_vault', conn_params ~ 'secret=' ~ vault_kv2_path ~ '/non_existent_secret4 auth_method=approle role_id=' ~ role_id) }}"
+ debug:
+ msg: 'Failure is expected ({{ secret_inexistent }})'
+ register: test_inexistent
+ ignore_errors: true
+
+ - name: 'Check expected failures'
+ assert:
+ msg: "an expected failure didn't occur"
+ that:
+ - test_wrong_cred is failed
+ - test_unauthorized is failed
+ - test_inexistent is failed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/approle_setup.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/approle_setup.yml
new file mode 100644
index 00000000..9f4ce2da
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/approle_setup.yml
@@ -0,0 +1,21 @@
+- name: 'Create an approle policy'
+ command:
+ cmd: '{{ vault_cmd }} policy write approle-policy -'
+ stdin: |
+ path "auth/approle/login" {
+ capabilities = [ "create", "read" ]
+ }
+
+- name: 'Enable the AppRole auth method'
+ command: '{{ vault_cmd }} auth enable approle'
+
+- name: 'Create a named role'
+ command: '{{ vault_cmd }} write auth/approle/role/test-role policies="test-policy,approle-policy"'
+
+- name: 'Fetch the RoleID of the AppRole'
+ command: '{{ vault_cmd }} read -field=role_id auth/approle/role/test-role/role-id'
+ register: role_id_cmd
+
+- name: 'Get a SecretID issued against the AppRole'
+ command: '{{ vault_cmd }} write -field=secret_id -f auth/approle/role/test-role/secret-id'
+ register: secret_id_cmd
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/approle_test.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/approle_test.yml
new file mode 100644
index 00000000..7bb0d83d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/approle_test.yml
@@ -0,0 +1,45 @@
+- vars:
+ role_id: '{{ role_id_cmd.stdout }}'
+ secret_id: '{{ secret_id_cmd.stdout }}'
+ block:
+ - name: 'Fetch secrets using "hashi_vault" lookup'
+ set_fact:
+ secret1: "{{ lookup('community.general.hashi_vault', conn_params ~ 'secret=' ~ vault_kv2_path ~ '/secret1 auth_method=approle secret_id=' ~ secret_id ~ ' role_id=' ~ role_id) }}"
+ secret2: "{{ lookup('community.general.hashi_vault', conn_params ~ 'secret=' ~ vault_kv2_path ~ '/secret2 auth_method=approle secret_id=' ~ secret_id ~ ' role_id=' ~ role_id) }}"
+
+ - name: 'Check secret values'
+ fail:
+ msg: 'unexpected secret values'
+ when: secret1['value'] != 'foo1' or secret2['value'] != 'foo2'
+
+ - name: 'Failure expected when erroneous credentials are used'
+ vars:
+ secret_wrong_cred: "{{ lookup('community.general.hashi_vault', conn_params ~ 'secret=' ~ vault_kv2_path ~ '/secret2 auth_method=approle secret_id=toto role_id=' ~ role_id) }}"
+ debug:
+ msg: 'Failure is expected ({{ secret_wrong_cred }})'
+ register: test_wrong_cred
+ ignore_errors: true
+
+ - name: 'Failure expected when unauthorized secret is read'
+ vars:
+ secret_unauthorized: "{{ lookup('community.general.hashi_vault', conn_params ~ 'secret=' ~ vault_kv2_path ~ '/secret3 auth_method=approle secret_id=' ~ secret_id ~ ' role_id=' ~ role_id) }}"
+ debug:
+ msg: 'Failure is expected ({{ secret_unauthorized }})'
+ register: test_unauthorized
+ ignore_errors: true
+
+ - name: 'Failure expected when inexistent secret is read'
+ vars:
+ secret_inexistent: "{{ lookup('community.general.hashi_vault', conn_params ~ 'secret=' ~ vault_kv2_path ~ '/non_existent_secret4 auth_method=approle secret_id=' ~ secret_id ~ ' role_id=' ~ role_id) }}"
+ debug:
+ msg: 'Failure is expected ({{ secret_inexistent }})'
+ register: test_inexistent
+ ignore_errors: true
+
+ - name: 'Check expected failures'
+ assert:
+ msg: "an expected failure didn't occur"
+ that:
+ - test_wrong_cred is failed
+ - test_unauthorized is failed
+ - test_inexistent is failed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/jwt_setup.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/jwt_setup.yml
new file mode 100644
index 00000000..68cc7ad9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/jwt_setup.yml
@@ -0,0 +1,18 @@
+- name: 'Enable the JWT auth method'
+ command: '{{ vault_cmd }} auth enable jwt'
+
+- name: 'Configure the JWT auth method'
+ command: '{{ vault_cmd }} write auth/jwt/config jwt_validation_pubkeys={{ jwt_public_key | quote }}'
+ vars:
+ jwt_public_key: '{{ lookup("file", "jwt_public.pem") }}'
+
+- name: 'Create a named role'
+ command:
+ cmd: '{{ vault_cmd }} write auth/jwt/role/test-role -'
+ stdin: |
+ {
+ "role_type": "jwt",
+ "policies": "test-policy",
+ "user_claim": "sub",
+ "bound_audiences": "test"
+ }
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/jwt_test.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/jwt_test.yml
new file mode 100644
index 00000000..262b4e74
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/jwt_test.yml
@@ -0,0 +1,46 @@
+- vars:
+ role_id: test-role
+ jwt: '{{ lookup("file", "token.jwt") }}'
+ jwt_invalid: '{{ lookup("file", "token_invalid.jwt") }}'
+ block:
+ - name: 'Fetch secrets using "hashi_vault" lookup'
+ set_fact:
+ secret1: "{{ lookup('community.general.hashi_vault', conn_params ~ 'secret=' ~ vault_kv2_path ~ '/secret1 auth_method=jwt jwt=' ~ jwt ~ ' role_id=' ~ role_id) }}"
+ secret2: "{{ lookup('community.general.hashi_vault', conn_params ~ 'secret=' ~ vault_kv2_path ~ '/secret2 auth_method=jwt jwt=' ~ jwt ~ ' role_id=' ~ role_id) }}"
+
+ - name: 'Check secret values'
+ fail:
+ msg: 'unexpected secret values'
+ when: secret1['value'] != 'foo1' or secret2['value'] != 'foo2'
+
+ - name: 'Failure expected when erroneous credentials are used'
+ vars:
+ secret_wrong_cred: "{{ lookup('community.general.hashi_vault', conn_params ~ 'secret=' ~ vault_kv2_path ~ '/secret2 auth_method=jwt jwt=' ~ jwt_invalid ~ ' role_id=' ~ role_id) }}"
+ debug:
+ msg: 'Failure is expected ({{ secret_wrong_cred }})'
+ register: test_wrong_cred
+ ignore_errors: true
+
+ - name: 'Failure expected when unauthorized secret is read'
+ vars:
+ secret_unauthorized: "{{ lookup('community.general.hashi_vault', conn_params ~ 'secret=' ~ vault_kv2_path ~ '/secret3 auth_method=jwt jwt=' ~ jwt ~ ' role_id=' ~ role_id) }}"
+ debug:
+ msg: 'Failure is expected ({{ secret_unauthorized }})'
+ register: test_unauthorized
+ ignore_errors: true
+
+ - name: 'Failure expected when non-existent secret is read'
+ vars:
+ secret_inexistent: "{{ lookup('community.general.hashi_vault', conn_params ~ 'secret=' ~ vault_kv2_path ~ '/non_existent_secret4 auth_method=jwt jwt=' ~ jwt ~ ' role_id=' ~ role_id) }}"
+ debug:
+ msg: 'Failure is expected ({{ secret_inexistent }})'
+ register: test_inexistent
+ ignore_errors: true
+
+ - name: 'Check expected failures'
+ assert:
+ msg: "an expected failure didn't occur"
+ that:
+ - test_wrong_cred is failed
+ - test_unauthorized is failed
+ - test_inexistent is failed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/main.yml
new file mode 100644
index 00000000..9d0eee18
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/main.yml
@@ -0,0 +1,188 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Install Hashi Vault on controlled node and test
+ vars:
+ vault_version: '0.11.0'
+ vault_uri: 'https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/lookup_hashi_vault/vault_{{ vault_version }}_{{ ansible_system | lower }}_{{ vault_arch }}.zip'
+ vault_cmd: '{{ local_temp_dir }}/vault'
+ block:
+ - name: Create a local temporary directory
+ tempfile:
+ state: directory
+ register: tempfile_result
+
+ - set_fact:
+ local_temp_dir: '{{ tempfile_result.path }}'
+
+ - when: pyopenssl_version.stdout is version('0.15', '>=')
+ block:
+ - name: Generate privatekey
+ community.crypto.openssl_privatekey:
+ path: '{{ local_temp_dir }}/privatekey.pem'
+
+ - name: Generate CSR
+ community.crypto.openssl_csr:
+ path: '{{ local_temp_dir }}/csr.csr'
+ privatekey_path: '{{ local_temp_dir }}/privatekey.pem'
+ subject:
+ commonName: localhost
+
+ - name: Generate selfsigned certificate
+ community.crypto.openssl_certificate:
+ path: '{{ local_temp_dir }}/cert.pem'
+ csr_path: '{{ local_temp_dir }}/csr.csr'
+ privatekey_path: '{{ local_temp_dir }}/privatekey.pem'
+ provider: selfsigned
+ selfsigned_digest: sha256
+ register: selfsigned_certificate
+
+ - name: 'Install unzip'
+ package:
+ name: unzip
+ when: ansible_distribution != "MacOSX" # unzip already installed
+
+ - assert:
+ # Linux: x86_64, FreeBSD: amd64
+ that: ansible_architecture in ['i386', 'x86_64', 'amd64']
+ - set_fact:
+ vault_arch: '386'
+ when: ansible_architecture == 'i386'
+ - set_fact:
+ vault_arch: amd64
+ when: ansible_architecture in ['x86_64', 'amd64']
+
+ - name: 'Download vault binary'
+ unarchive:
+ src: '{{ vault_uri }}'
+ dest: '{{ local_temp_dir }}'
+ remote_src: true
+
+ - environment:
+ # used by vault command
+ VAULT_DEV_ROOT_TOKEN_ID: '47542cbc-6bf8-4fba-8eda-02e0a0d29a0a'
+ block:
+ - name: 'Create configuration file'
+ template:
+ src: vault_config.hcl.j2
+ dest: '{{ local_temp_dir }}/vault_config.hcl'
+
+ - name: 'Start vault service'
+ environment:
+ VAULT_ADDR: 'http://localhost:8200'
+ block:
+ - name: 'Start vault server (dev mode enabled)'
+ shell: 'nohup {{ vault_cmd }} server -dev -config {{ local_temp_dir }}/vault_config.hcl </dev/null >/dev/null 2>&1 &'
+
+ - name: 'Create generic secrets engine'
+ command: '{{ vault_cmd }} secrets enable -path=gen generic'
+
+ - name: 'Create KV v1 secrets engine'
+ command: '{{ vault_cmd }} secrets enable -path=kv1 -version=1 kv'
+
+ - name: 'Create KV v2 secrets engine'
+ command: '{{ vault_cmd }} secrets enable -path=kv2 -version=2 kv'
+
+ - name: 'Create a test policy'
+ command:
+ cmd: '{{ vault_cmd }} policy write test-policy -'
+ stdin: |
+ path "{{ vault_gen_path }}/secret1" {
+ capabilities = ["read"]
+ }
+ path "{{ vault_gen_path }}/secret2" {
+ capabilities = ["read", "update"]
+ }
+ path "{{ vault_gen_path }}/secret3" {
+ capabilities = ["deny"]
+ }
+ path "{{ vault_kv1_path }}/secret1" {
+ capabilities = ["read"]
+ }
+ path "{{ vault_kv1_path }}/secret2" {
+ capabilities = ["read", "update"]
+ }
+ path "{{ vault_kv1_path }}/secret3" {
+ capabilities = ["deny"]
+ }
+ path "{{ vault_kv2_path }}/secret1" {
+ capabilities = ["read"]
+ }
+ path "{{ vault_kv2_path }}/secret2" {
+ capabilities = ["read", "update"]
+ }
+ path "{{ vault_kv2_path }}/secret3" {
+ capabilities = ["deny"]
+ }
+ path "{{ vault_kv2_multi_path }}/secrets" {
+ capabilities = ["read"]
+ }
+ path "{{ vault_kv2_path }}/secret4" {
+ capabilities = ["read", "update"]
+ }
+
+ - name: 'Create generic secrets'
+ command: '{{ vault_cmd }} write {{ vault_gen_path }}/secret{{ item }} value=foo{{ item }}'
+ loop: [1, 2, 3]
+
+ - name: 'Create KV v1 secrets'
+ command: '{{ vault_cmd }} kv put {{ vault_kv1_path }}/secret{{ item }} value=foo{{ item }}'
+ loop: [1, 2, 3]
+
+ - name: 'Create KV v2 secrets'
+ command: '{{ vault_cmd }} kv put {{ vault_kv2_path | regex_replace("/data") }}/secret{{ item }} value=foo{{ item }}'
+ loop: [1, 2, 3, 4]
+
+ - name: 'Update KV v2 secret4 with new value to create version'
+ command: '{{ vault_cmd }} kv put {{ vault_kv2_path | regex_replace("/data") }}/secret4 value=foo5'
+
+ - name: 'Create multiple KV v2 secrets under one path'
+ command: '{{ vault_cmd }} kv put {{ vault_kv2_multi_path | regex_replace("/data") }}/secrets value1=foo1 value2=foo2 value3=foo3'
+
+ - name: setup approle auth
+ import_tasks: approle_setup.yml
+ when: ansible_distribution != 'RedHat' or ansible_distribution_major_version is version('7', '>')
+
+ - name: setup approle secret_id_less auth
+ import_tasks: approle_secret_id_less_setup.yml
+ when: ansible_distribution != 'RedHat' or ansible_distribution_major_version is version('7', '>')
+
+ - name: setup token auth
+ import_tasks: token_setup.yml
+
+ - name: setup jwt auth
+ import_tasks: jwt_setup.yml
+ when: ansible_distribution != 'RedHat' or ansible_distribution_major_version is version('7', '>')
+
+ - import_tasks: tests.yml
+ vars:
+ auth_type: approle
+ when: ansible_distribution != 'RedHat' or ansible_distribution_major_version is version('7', '>')
+
+ - import_tasks: tests.yml
+ vars:
+ auth_type: approle_secret_id_less
+ when: ansible_distribution != 'RedHat' or ansible_distribution_major_version is version('7', '>')
+
+ - import_tasks: tests.yml
+ vars:
+ auth_type: token
+
+ - import_tasks: tests.yml
+ vars:
+ auth_type: jwt
+ when: ansible_distribution != 'RedHat' or ansible_distribution_major_version is version('7', '>')
+
+ always:
+ - name: 'Kill vault process'
+ shell: "kill $(cat {{ local_temp_dir }}/vault.pid)"
+ ignore_errors: true
+
+ always:
+ - name: 'Delete temp dir'
+ file:
+ path: '{{ local_temp_dir }}'
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/tests.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/tests.yml
new file mode 100644
index 00000000..53ec6827
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/tests.yml
@@ -0,0 +1,76 @@
+- name: 'test {{ auth_type }} auth without SSL (lookup parameters)'
+ include_tasks: '{{ auth_type }}_test.yml'
+ vars:
+ conn_params: 'url=http://localhost:8200 '
+
+- name: 'test {{ auth_type }} auth without SSL (environment variable)'
+ include_tasks: '{{ auth_type }}_test.yml'
+ args:
+ apply:
+ vars:
+ conn_params: ''
+ environment:
+ VAULT_ADDR: 'http://localhost:8200'
+
+- when: pyopenssl_version.stdout is version('0.15', '>=')
+ block:
+ - name: 'test {{ auth_type }} auth with certs (validation enabled, lookup parameters)'
+ include_tasks: '{{ auth_type }}_test.yml'
+ vars:
+ conn_params: 'url=https://localhost:8201 ca_cert={{ local_temp_dir }}/cert.pem validate_certs=True '
+
+ - name: 'test {{ auth_type }} auth with certs (validation enabled, environment variables)'
+ include_tasks: '{{ auth_type }}_test.yml'
+ args:
+ apply:
+ vars:
+ conn_params: ''
+ environment:
+ VAULT_ADDR: 'https://localhost:8201'
+ VAULT_CACERT: '{{ local_temp_dir }}/cert.pem'
+
+ - name: 'test {{ auth_type }} auth with certs (validation disabled, lookup parameters)'
+ include_tasks: '{{ auth_type }}_test.yml'
+ vars:
+ conn_params: 'url=https://localhost:8201 validate_certs=False '
+
+ - name: 'test {{ auth_type }} auth with certs (validation using env VAR, lookup parameters)'
+ include_tasks: '{{ auth_type }}_test.yml'
+ args:
+ apply:
+ vars:
+ conn_params: ''
+ environment:
+ VAULT_ADDR: 'https://localhost:8201'
+ VAULT_SKIP_VERIFY: 1
+
+ - name: 'test {{ auth_type }} auth with certs (validation using env VAR (True), lookup parameters)'
+ include_tasks: '{{ auth_type }}_test.yml'
+ args:
+ apply:
+ vars:
+ conn_params: ''
+ environment:
+ VAULT_ADDR: 'https://localhost:8201'
+ VAULT_SKIP_VERIFY: True
+
+ - name: 'test {{ auth_type }} auth with certs (validation using env VAR (y), lookup parameters)'
+ include_tasks: '{{ auth_type }}_test.yml'
+ args:
+ apply:
+ vars:
+ conn_params: ''
+ environment:
+ VAULT_ADDR: 'https://localhost:8201'
+ VAULT_SKIP_VERIFY: y
+
+ - name: 'test {{ auth_type }} auth with certs (precedence of validate_certs over env VAR, lookup parameters)'
+ include_tasks: '{{ auth_type }}_test.yml'
+ args:
+ apply:
+ vars:
+ conn_params: 'validate_certs=False '
+ environment:
+ VAULT_ADDR: 'https://localhost:8201'
+ VAULT_SKIP_VERIFY: False
+ \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/token_setup.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/token_setup.yml
new file mode 100644
index 00000000..d5ce2803
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/token_setup.yml
@@ -0,0 +1,3 @@
+- name: 'Create a test credentials (token)'
+ command: '{{ vault_cmd }} token create -policy test-policy -field token'
+ register: user_token_cmd
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/token_test.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/token_test.yml
new file mode 100644
index 00000000..8ad9c666
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/tasks/token_test.yml
@@ -0,0 +1,88 @@
+- vars:
+ user_token: '{{ user_token_cmd.stdout }}'
+ block:
+ - name: 'Fetch secrets using "hashi_vault" lookup'
+ set_fact:
+ gen_secret1: "{{ lookup('community.general.hashi_vault', conn_params ~ 'secret=' ~ vault_gen_path ~ '/secret1 auth_method=token token=' ~ user_token) }}"
+ gen_secret2: "{{ lookup('community.general.hashi_vault', conn_params ~ 'secret=' ~ vault_gen_path ~ '/secret2 token=' ~ user_token) }}"
+ kv1_secret1: "{{ lookup('community.general.hashi_vault', conn_params ~ 'secret=' ~ vault_kv1_path ~ '/secret1 auth_method=token token=' ~ user_token) }}"
+ kv1_secret2: "{{ lookup('community.general.hashi_vault', conn_params ~ 'secret=' ~ vault_kv1_path ~ '/secret2 token=' ~ user_token) }}"
+ kv2_secret1: "{{ lookup('community.general.hashi_vault', conn_params ~ 'secret=' ~ vault_kv2_path ~ '/secret1 auth_method=token token=' ~ user_token) }}"
+ kv2_secret2: "{{ lookup('community.general.hashi_vault', conn_params ~ 'secret=' ~ vault_kv2_path ~ '/secret2 token=' ~ user_token) }}"
+ kv2_secret4: "{{ lookup('community.general.hashi_vault', conn_params ~ 'secret=' ~ vault_kv2_path ~ '/secret4?version=2 token=' ~ user_token) }}"
+ kv2_secret2_as_raw: "{{ lookup('community.general.hashi_vault', vault_kv2_path ~ '/secret2 ' ~ conn_params, auth_method='token', token=user_token, return_format='raw') }}"
+ kv2_secrets_as_dict: "{{ lookup('community.general.hashi_vault', vault_kv2_multi_path ~ '/secrets ' ~ conn_params, auth_method='token', token=user_token) }}"
+ kv2_secrets_as_values: "{{ query('community.general.hashi_vault', vault_kv2_multi_path ~ '/secrets ' ~ conn_params, auth_method='token', token=user_token, return_format='values') }}"
+
+ - name: 'Check secret generic values'
+ fail:
+ msg: 'unexpected secret values'
+ when: gen_secret1['value'] != 'foo1' or gen_secret2['value'] != 'foo2'
+
+ - name: 'Check secret kv1 values'
+ fail:
+ msg: 'unexpected secret values'
+ when: kv1_secret1['value'] != 'foo1' or kv1_secret2['value'] != 'foo2'
+
+ - name: 'Check secret kv2 values'
+ fail:
+ msg: 'unexpected secret values'
+ when: kv2_secret1['value'] != 'foo1' or kv2_secret2['value'] != 'foo2' or kv2_secret4['value'] != 'foo5'
+
+ - name: 'Check kv2 secret raw return value'
+ fail:
+ msg:
+ when: >-
+ 'data' not in kv2_secret2_as_raw
+ or 'data' not in kv2_secret2_as_raw['data']
+ or 'metadata' not in kv2_secret2_as_raw['data']
+
+ - name: "Check multiple secrets as dict"
+ fail:
+ msg: 'Return value was not dict or items do not match.'
+ when: (kv2_secrets_as_dict | type_debug != 'dict') or (kv2_secrets_as_dict['value{{ item }}'] != 'foo{{ item }}')
+ loop: [1, 2, 3]
+
+ - name: "Check multiple secrets as values"
+ fail:
+ msg: 'Return value was not list or items do not match.'
+ when: (kv2_secrets_as_values | type_debug != 'list') or ('foo{{ item }}' not in kv2_secrets_as_values)
+ loop: [1, 2, 3]
+
+ - name: "Check multiple secrets as dict"
+ fail:
+ msg: 'Return value was not dict or items do not match.'
+ when: (kv2_secrets_as_dict | type_debug != 'dict') or (kv2_secrets_as_dict['value{{ item }}'] != 'foo{{ item }}')
+ loop: [1, 2, 3]
+
+ - name: 'Failure expected when erroneous credentials are used'
+ vars:
+ secret_wrong_cred: "{{ lookup('community.general.hashi_vault', conn_params ~ 'secret=' ~ vault_kv2_path ~ '/secret2 auth_method=token token=wrong_token') }}"
+ debug:
+ msg: 'Failure is expected ({{ secret_wrong_cred }})'
+ register: test_wrong_cred
+ ignore_errors: true
+
+ - name: 'Failure expected when unauthorized secret is read'
+ vars:
+ secret_unauthorized: "{{ lookup('community.general.hashi_vault', conn_params ~ 'secret=' ~ vault_kv2_path ~ '/secret3 token=' ~ user_token) }}"
+ debug:
+ msg: 'Failure is expected ({{ secret_unauthorized }})'
+ register: test_unauthorized
+ ignore_errors: true
+
+ - name: 'Failure expected when inexistent secret is read'
+ vars:
+ secret_inexistent: "{{ lookup('community.general.hashi_vault', conn_params ~ 'secret=' ~ vault_kv2_path ~ '/non_existent_secret4 token=' ~ user_token) }}"
+ debug:
+ msg: 'Failure is expected ({{ secret_inexistent }})'
+ register: test_inexistent
+ ignore_errors: true
+
+ - name: 'Check expected failures'
+ assert:
+ msg: "an expected failure didn't occur"
+ that:
+ - test_wrong_cred is failed
+ - test_unauthorized is failed
+ - test_inexistent is failed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/templates/vault_config.hcl.j2 b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/templates/vault_config.hcl.j2
new file mode 100644
index 00000000..effc90ba
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/lookup_hashi_vault/templates/vault_config.hcl.j2
@@ -0,0 +1,10 @@
+# {{ ansible_managed }}
+pid_file = "{{ local_temp_dir }}/vault.pid"
+{% if pyopenssl_version.stdout is version('0.15', '>=') %}
+listener "tcp" {
+ tls_key_file = "{{ local_temp_dir }}/privatekey.pem"
+ tls_cert_file = "{{ local_temp_dir }}/cert.pem"
+ tls_disable = false
+ address = "localhost:8201"
+}
+{% endif %}
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/playbooks/install_dependencies.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/playbooks/install_dependencies.yml
new file mode 100644
index 00000000..d4c7e9a6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/playbooks/install_dependencies.yml
@@ -0,0 +1,19 @@
+- hosts: localhost
+ tasks:
+ - name: Install openssl
+ import_role:
+ name: setup_openssl
+
+ - name: "RedHat <= 7, select last version compatible with request 2.6.0 (this version doesn't support approle or jwt auth)"
+ set_fact:
+ hvac_package: 'hvac==0.2.5'
+ when: ansible_distribution == 'RedHat' and ansible_distribution_major_version is version('7', '<=')
+
+ - name: 'CentOS < 7, select last version compatible with Python 2.6'
+ set_fact:
+ hvac_package: 'hvac==0.5.0'
+ when: ansible_distribution == 'CentOS' and ansible_distribution_major_version is version('7', '<')
+
+ - name: 'Install hvac Python package'
+ pip:
+ name: "{{ hvac_package|default('hvac') }}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/playbooks/test_lookup_hashi_vault.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/playbooks/test_lookup_hashi_vault.yml
new file mode 100644
index 00000000..8d3f3201
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/playbooks/test_lookup_hashi_vault.yml
@@ -0,0 +1,9 @@
+- hosts: localhost
+ tasks:
+ - name: register pyOpenSSL version
+ command: "{{ ansible_python.executable }} -c 'import OpenSSL; print(OpenSSL.__version__)'"
+ register: pyopenssl_version
+
+ - name: Test lookup hashi_vault
+ import_role:
+ name: lookup_hashi_vault/lookup_hashi_vault
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/runme.sh b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/runme.sh
new file mode 100755
index 00000000..e5e0df34
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_hashi_vault/runme.sh
@@ -0,0 +1,23 @@
+#!/usr/bin/env bash
+
+set -eux
+
+# First install pyOpenSSL, then test lookup in a second playbook in order to
+# workaround this error which occurs on OS X 10.11 only:
+#
+# TASK [lookup_hashi_vault : test token auth with certs (validation enabled, lookup parameters)] ***
+# included: lookup_hashi_vault/tasks/token_test.yml for testhost
+#
+# TASK [lookup_hashi_vault : Fetch secrets using "hashi_vault" lookup] ***
+# From cffi callback <function _verify_callback at 0x106f995f0>:
+# Traceback (most recent call last):
+# File "/usr/local/lib/python2.7/site-packages/OpenSSL/SSL.py", line 309, in wrapper
+# _lib.X509_up_ref(x509)
+# AttributeError: 'module' object has no attribute 'X509_up_ref'
+# fatal: [testhost]: FAILED! => { "msg": "An unhandled exception occurred while running the lookup plugin 'hashi_vault'. Error was a <class 'requests.exceptions.SSLError'>, original message: HTTPSConnectionPool(host='localhost', port=8201): Max retries exceeded with url: /v1/auth/token/lookup-self (Caused by SSLError(SSLError(\"bad handshake: Error([('SSL routines', 'ssl3_get_server_certificate', 'certificate verify failed')],)\",),))"}
+
+ANSIBLE_ROLES_PATH=../ \
+ ansible-playbook playbooks/install_dependencies.yml -v "$@"
+
+ANSIBLE_ROLES_PATH=../ \
+ ansible-playbook playbooks/test_lookup_hashi_vault.yml -v "$@"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_lmdb_kv/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_lmdb_kv/aliases
new file mode 100644
index 00000000..08496766
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_lmdb_kv/aliases
@@ -0,0 +1,4 @@
+shippable/posix/group2
+destructive
+skip/aix
+skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_lmdb_kv/dependencies.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_lmdb_kv/dependencies.yml
new file mode 100644
index 00000000..17622d96
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_lmdb_kv/dependencies.yml
@@ -0,0 +1,12 @@
+---
+- hosts: localhost
+ tasks:
+ - name: Install LMDB Python package
+ pip:
+ name: lmdb
+ environment:
+ LMDB_PURE: "1"
+ - name: Setup test data
+ script: test_db.py
+ args:
+ executable: "{{ ansible_python.executable }}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_lmdb_kv/runme.sh b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_lmdb_kv/runme.sh
new file mode 100755
index 00000000..afdff7bb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_lmdb_kv/runme.sh
@@ -0,0 +1,9 @@
+#!/usr/bin/env bash
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+set -eux
+
+ANSIBLE_ROLES_PATH=../ \
+ ansible-playbook dependencies.yml -v "$@"
+
+ANSIBLE_ROLES_PATH=../ \
+ ansible-playbook test.yml -v "$@"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_lmdb_kv/test.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_lmdb_kv/test.yml
new file mode 100644
index 00000000..1fa0b615
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_lmdb_kv/test.yml
@@ -0,0 +1,26 @@
+- hosts: localhost
+ tasks:
+ - debug:
+ msg: '{{ query(''community.general.lmdb_kv'', ''nl'', ''be'', ''lu'', db=''jp.mdb'') }}'
+ - debug:
+ var: item.1
+ loop: '{{ query(''community.general.lmdb_kv'', db=''jp.mdb'') }}'
+ - assert:
+ that:
+ - query('community.general.lmdb_kv', 'nl', 'be', 'lu', db='jp.mdb') == ['Netherlands', 'Belgium', 'Luxembourg']
+ - query('community.general.lmdb_kv', db='jp.mdb')|length == 5
+ - assert:
+ that:
+ - item.0 == 'nl'
+ - item.1 == 'Netherlands'
+ vars:
+ - lmdb_kv_db: jp.mdb
+ with_community.general.lmdb_kv:
+ - n*
+ - assert:
+ that:
+ - item == 'Belgium'
+ vars:
+ - lmdb_kv_db: jp.mdb
+ with_community.general.lmdb_kv:
+ - be
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_lmdb_kv/test_db.py b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_lmdb_kv/test_db.py
new file mode 100644
index 00000000..f251b7f4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_lmdb_kv/test_db.py
@@ -0,0 +1,11 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import lmdb
+map_size = 1024 * 100
+env = lmdb.open('./jp.mdb', map_size=map_size)
+with env.begin(write=True) as txn:
+ txn.put('fr'.encode(), 'France'.encode())
+ txn.put('nl'.encode(), 'Netherlands'.encode())
+ txn.put('es'.encode(), 'Spain'.encode())
+ txn.put('be'.encode(), 'Belgium'.encode())
+ txn.put('lu'.encode(), 'Luxembourg'.encode())
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/aliases
new file mode 100644
index 00000000..1c21d9b7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/aliases
@@ -0,0 +1,5 @@
+shippable/posix/group4
+destructive
+skip/aix
+skip/rhel
+skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/meta/main.yml
new file mode 100644
index 00000000..5438ced5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_pkg_mgr
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/tasks/main.yml
new file mode 100644
index 00000000..882b4a35
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/tasks/main.yml
@@ -0,0 +1,12 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- block:
+ - include_tasks: package.yml
+ - include_tasks: tests.yml
+ when:
+ # The pass package is no longer available in EPEL, so only test on Fedora, OpenSUSE, FreeBSD, macOS, and Ubuntu
+ # https://lists.zx2c4.com/pipermail/password-store/2019-July/003689.html
+ - ansible_facts.distribution in ['FreeBSD', 'MacOSX', 'openSUSE Leap', 'Ubuntu']
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/tasks/package.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/tasks/package.yml
new file mode 100644
index 00000000..8ec108e0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/tasks/package.yml
@@ -0,0 +1,58 @@
+- name: Include distribution specific variables
+ include_vars: "{{ lookup('first_found', params) }}"
+ vars:
+ params:
+ files:
+ - "{{ ansible_facts.distribution }}.yml"
+ - "{{ ansible_facts.os_family }}.yml"
+ - default.yml
+ paths:
+ - "{{ role_path }}/vars"
+
+- name: Install package
+ action: "{{ ansible_facts.pkg_mgr }}"
+ args:
+ name: "{{ passwordstore_packages }}"
+ state: present
+ when: ansible_facts.pkg_mgr in ['apt', 'dnf', 'yum', 'pkgng', 'community.general.pkgng']
+
+- block:
+ # OpenSUSE Leap>=15.0 don't include password-store in main repo
+ - name: SUSE | Add security:privacy repo
+ template:
+ src: security-privacy.repo.j2
+ dest: /etc/zypp/repos.d/security:privacy.repo
+
+ - name: SUSE | Install package
+ package:
+ name: password-store
+ state: present
+ update_cache: yes
+ disable_gpg_check: yes
+ when: ansible_facts.pkg_mgr in ['zypper', 'community.general.zypper']
+
+- name: Install on macOS
+ when: ansible_facts.distribution == 'MacOSX'
+ block:
+ - name: MACOS | Find brew binary
+ command: which brew
+ register: brew_which
+
+ - name: MACOS | Get owner of brew binary
+ stat:
+ path: "{{ brew_which.stdout }}"
+ register: brew_stat
+
+ - name: MACOS | Install package
+ homebrew:
+ name:
+ - gnupg2
+ - pass
+ state: present
+ update_homebrew: no
+ become: yes
+ become_user: "{{ brew_stat.stat.pw_name }}"
+ # Newer versions of brew want to compile a package which takes a long time. Do not upgrade homebrew until a
+ # proper solution can be found
+ environment:
+ HOMEBREW_NO_AUTO_UPDATE: True
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/tasks/tests.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/tasks/tests.yml
new file mode 100644
index 00000000..d702edaf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/tasks/tests.yml
@@ -0,0 +1,62 @@
+- name: Check name of gpg2 binary
+ command: which gpg2
+ register: gpg2_check
+ ignore_errors: true
+
+- name: Set gpg2 binary name
+ set_fact:
+ gpg2_bin: '{{ "gpg2" if gpg2_check is successful else "gpg" }}'
+
+- name: Stop gpg-agent so we can remove any locks on the GnuPG dir
+ command: gpgconf --kill gpg-agent
+ ignore_errors: yes
+
+- name: Remove previous password files and directory
+ file:
+ dest: "{{ item }}"
+ state: absent
+ loop:
+ - "~/.gnupg"
+ - "~/.password-store"
+
+# How to generate a new GPG key:
+# gpg2 --batch --gen-key input # See templates/input
+# gpg2 --list-secret-keys --keyid-format LONG
+# gpg2 --armor --export-secret-keys [key id]
+# # Get the fingerprint
+# gpg2 --fingerprint --keyid-format LONG | grep [key id] -A 1 | tail -1 | tr -d '[:space:]' | awk -F '=' '{print $2":6:"}'
+
+- name: Import GPG private key
+ shell: echo "{{ passwordstore_privkey }}" | {{ gpg2_bin }} --import --allow-secret-key-import -
+
+- name: Trust key
+ shell: echo "D3E1CC8934E97270CEB066023AF1BD3619AB496A:6:" | {{ gpg2_bin }} --import-ownertrust
+
+- name: Initialise passwordstore
+ command: pass init ansible-test
+
+- name: Create a password
+ set_fact:
+ newpass: "{{ lookup('community.general.passwordstore', 'test-pass length=8 create=yes') }}"
+
+- name: Fetch password from an existing file
+ set_fact:
+ readpass: "{{ lookup('community.general.passwordstore', 'test-pass') }}"
+
+- name: Verify password
+ assert:
+ that:
+ - readpass == newpass
+
+- name: Create a password with equal sign
+ set_fact:
+ newpass: "{{ lookup('community.general.passwordstore', 'test-pass-equal userpass=SimpleSample= create=yes') }}"
+
+- name: Fetch a password with equal sign
+ set_fact:
+ readpass: "{{ lookup('community.general.passwordstore', 'test-pass-equal') }}"
+
+- name: Verify password
+ assert:
+ that:
+ - readpass == newpass
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/templates/input b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/templates/input
new file mode 100644
index 00000000..d639accd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/templates/input
@@ -0,0 +1,9 @@
+%echo Generating a Ansible Test PGP key
+Key-Type: RSA
+Key-Length: 4096
+Subkey-Type: RSA
+Subkey-Length: 4096
+Name-Real: ansible-test
+Expire-Date: 0
+%commit
+%echo done
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/templates/security-privacy.repo.j2 b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/templates/security-privacy.repo.j2
new file mode 100644
index 00000000..e698129a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/templates/security-privacy.repo.j2
@@ -0,0 +1,7 @@
+[security_privacy]
+name=Crypto applications and utilities (openSUSE_Leap_{{ ansible_distribution_version }})
+type=rpm-md
+baseurl=http://download.opensuse.org/repositories/security:/privacy/openSUSE_Leap_{{ ansible_distribution_version }}/
+gpgcheck=1
+gpgkey=http://download.opensuse.org/repositories/security:/privacy/openSUSE_Leap_{{ ansible_distribution_version }}/repodata/repomd.xml.key
+enabled=1
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/Debian.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/Debian.yml
new file mode 100644
index 00000000..3d1c4d45
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/Debian.yml
@@ -0,0 +1,2 @@
+passwordstore_packages:
+ - pass
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/Fedora.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/Fedora.yml
new file mode 100644
index 00000000..3d1c4d45
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/Fedora.yml
@@ -0,0 +1,2 @@
+passwordstore_packages:
+ - pass
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/FreeBSD.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/FreeBSD.yml
new file mode 100644
index 00000000..39e51fbc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/FreeBSD.yml
@@ -0,0 +1,3 @@
+passwordstore_packages:
+ - gnupg
+ - password-store
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/default.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/default.yml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/default.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/main.yml
new file mode 100644
index 00000000..be954d7d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/main.yml
@@ -0,0 +1,117 @@
+passwordstore_privkey: |
+ -----BEGIN PGP PRIVATE KEY BLOCK-----
+ Version: GnuPG v2.0.22 (GNU/Linux)
+
+ lQcYBF0L9gUBEACrYsKIj/rXFQPURHz+YKg54BW6utIvwoF/CbQarc8iXoXfPZW4
+ wQnFaX+dLifkvX5f4xIUVD94qyMXT2oNg+HZXH2y7VwqBFeG9TrNqfgJsYTbTlgP
+ 0MOD3FtZkMy/6TrJyOzY7x7oHUwWY1S5YeBDBsRqWwhS5fIfHLGbZLwoHC/53mlP
+ ve0zL1u28y3Kh8JvYBSlcYrbMlbKQb1g0TSb8gVwq+Iv2CriEmHipUvLI02z02Ny
+ XnT0+rzOEEynaoF7zX1S0eoIwKz46/sen1L5YAYhLM16gq4WSquxUz8klff+jVD5
+ uLgUKzEkhBdBuTvWKjknk6Wu2aDPC6IQXUjm+5dIh+/IyD4SKPA+HY1QZjIF/6nH
+ KV43RRzB8YWkAEW/iQdKCXWyNz1o8zYiun/PxiJtgV2qpz4z5M+ANx1VCj31eMTE
+ A0exSnMLjbUknE3JX2bnBfTgbiKbeXQcL6FNrKIPJrwgNlP3g48JzY6aPT3Tej8Q
+ pd6DqamU0sQfoi2K8zs/Ltl+REocKqzQ9cz0dj7ykQqAMf+TunQfC3pY8yI7IKCr
+ YM2L3aJbiqLTp31dCfwxBSZJ+oalUzp2l91ugu1k1mjuik7ZJaflfBNEFWK9ig3v
+ qhv8FwdoPFNh8TC11btfU1wphcBl65pCaPff2s94/Gj2aQ0PrFYuhPtzqwARAQAB
+ AA//UMXH1yK8XrfJyTXKDv0w663fr9SykM3DyEKjfltiHtmbkF3u/VcFKuQQv9xr
+ 8tMYB0r2T1xxWYaWkDreSaZZP97mYviZEjhfo/xZjWpIuxDOA6nFuRZzvuaQqwKi
+ bOQXz9gBQDBaSZzdKkQAPyqQziYXVeS3ZJJ47Q7R6eGtB95ZAhM/YNSrQQ9V00CC
+ 2UvoaCNJN7vubGYqH0KiZUnT2JdU1wg7Hr9rXoa5WV77/K4Txeefm9xGlNrDNv7Z
+ kaGRiu6K3QiPmzZrjxlwjbsbGOqXmPULlmyWbW0dxAHu5WhEq9SgUEEtiFve2k3i
+ xBfvyny12SAt2t04e7wI0luUqnpQR8j3JPFXcCiFszIauMbXhIAd0QhRfS6/pRCf
+ iBRliFqvQpgmz9w8/enfQtBvyAwZLr3p2vk4OyRW/GLFnfkoKcCvgZtL5vNDPYJm
+ Y1koC+jAsiiFRmUvP9SLyNabVTANz5Hg/jZmcdh+OTWs1xdZl1JyjUOxV6n1n1pW
+ BPc0FaNvFS+wkx6Rp9DgryTP1oTD6wjacNljFh3A9LJ0DTnQgsghoi5ArBKnRP7R
+ 9i0DKUqywoukm+GQHoZlB6bglDBVc3wKZvtw17/SgD6GnKZ3zH+Y8yx3K3MI9wjT
+ Od1jMxQxzKWMxrv72mtzchm/utkubL5BpM5hn6fg32NEkxEIAMhc2f01fuv/UZ1i
+ zlkqXkcMzrd/+9+Mv53meLMJsW2biOwRF52ZXi3k9ulUwHB21FaAXeyOFhueKrh/
+ iKu5Hpydxruj0XCgMRArgvghPL4KLfhh54xvXGKxWw7B0IWkOnvELPikOl3h17cY
+ lQ5rN5mQtlxaqqrJKOxkseEFTvVJudZXZH9oArlVXO88HklDeEHtV4xjdiyvtFKg
+ qWUvo6oNT0LmpFdgstoKJ8H5gKiV3wfl2QJQxqWT40wUFVnNEAoBYC7PbHWajxmD
+ 7ZGoKE9o3ythg11D4rH23uTUFLd5Hc5xeQ2/+OhEKv4Qe0X+okv8/RpM94ObfsW9
+ HdQBsgMIANr6B/SzwhPn8eK0c6wrOYU/B/V370qgTBRpWgrNRCvtN8NuSJKOJRU/
+ qYm74dCsVxBfvUlEPRp9DEsE9djvwZLdIqOfn4amDoLZmYdMQ5LQCOaHlrnEx+ng
+ uHUklFUXIHDNcVRWzhrHm4KQWeB7RrCRL1nEimW/nhh8y++4TmxZQ1Zr2fdUWdMs
+ dSWryw3RE5nwDd7nW8+Wgm3TfS4jhhn3DcKFZxLzG1eo4ZaXoPa4j7zps3xFyBtF
+ KMPgrvAAxzqFwklQKjwXcthYUQ5OzXTt94m8VqOz0nZGoizaGBFRz1l1q9QQxTv4
+ BUI+2OeyfrzWIaKEu+9gsNbx/OfxmzkH/2quLKJj0FQ+aEYkeGXVtf2DsceQXB1l
+ QtBo/qvBWP2XYk6GzAfwjvI8ifEG4MzXCZxm5SKtQ8UljrCo2T6OArG2FK1FSJDX
+ UlQBXPLYWIJCkC9X8SB6UztPSXPoNS6Ktc0K5XFxvGzIDpxAE+zE4UAe3gWGaROe
+ bwxbuRb9/aHAEk6CU3mrgEUUqet+4qUNRpAzJdwYIN6nOAVghHWiVY4OoCSCgMYY
+ 4B9Aa9bVeDQH9S88X5ux3bDW1DPjiewNIYo+0z53gfY3HZeDfHFWD4iM6vaJjSVb
+ 65trGHyGhElkWub56Q3nHXPOxiAKeRBn3iL54wDNiGMlfw/tkoAkTuaNI7QMYW5z
+ aWJsZS10ZXN0iQI5BBMBAgAjBQJdC/YFAhsvBwsJCAcDAgEGFQgCCQoLBBYCAwEC
+ HgECF4AACgkQOvG9NhmrSWo7YhAAhsp+j13R3bJkv/esJwaDa0dbdm1iJzKB9bkf
+ Bi10qMFmJUEDyHHKL9OBiAgSCKna5F4EuEFnaV9YPs1n6PVE+FX3m5UAfCULR6Qk
+ G064+nd25LWEjSJ3I85WHfJNz/fPr3fQvQNH67GEdTZIr7hrWeZHH1nnpGrZ6xx6
+ sVBxKMp3g8wNXey/DJSaDcry5cQ8cZW2RrUzmfXgcSlwAIVBkmHKA1UtgAGu1kq/
+ LzaCJus7ffMdUZd7IYAM5fIdnNEf0fi8/oKcWiv9TqynGJmu2AxjSUu9EG9idESu
+ bZtXZntM2ODbqepfQ0i44ew9Y3HQS8ciP8uhbQYFZuhbMQobXNIkvO6XA1cePSt2
+ Wh4qCh+ymk9u2qBqb4JkqORyOCJhLdOj6TGU0o9TQ8h0EqjB/Wx69ppA0GFQh5si
+ CG7GnwZhabgiPIxoCPQuhDPv+rXgFx5YiGofMddst9DFn0nR/fFx9hqaTuZ4iiuH
+ UzvqQAMGtIMxiOdJKSSI9emsNfQvXTMHjB+s6Cjiw7nF0+G2ciXlLTPbtTFePZVN
+ seDosuN6uMqwm8KNZVJpU0O0qXu5EdI7ptbOooSR7WZSJdQ+MWyr0vGYKKFGYkwp
+ jl/pDzXCA1d3AxK4Mzmb+KvFQvh+9X7VwI9Pgg4HHE5KeyX8wXhrvT2itPoznnC2
+ 33tCCZmdBxcEXQv2BQEQANtkIv93lunfgRUt7X4hJXT8rw/Y787b+xQ/FcApQnfd
+ 5Zg6pubrMPbOoIKFJG4vzNBSmXGSBmqGIdIYqT2eR9eBDoOv5Cl8tCQ+zNoC2V0Q
+ uCOLZV86hoakduHniCv8cKSbsG6mm5oFP61/82yJLlPUarT+EGSuWCR6W1pGC5WR
+ GElnE9VFpaQ5TZ8A3EBWky2YhdX7vOzbjP8x0Jd/3UFfpNd5gRnxfJLx8rrdKt20
+ xYxR4FPUbu9kQFZIyUr2kxNi30R1+oK4hcXbID6gqxt1oW5PWGkNOXYTY6r/Vv6D
+ zU4Bf4gngWc7hgwbtGRkv2jR8Zv3ZIUbo4ZwMAMMs3Un7RWjjEJkrtUzdaIdjtwM
+ jZIH7xMNz/NK748EB3uMKiIOkmWqHrWkU2aa86V8heuTg/AWluKFG6J+WHzzYnPE
+ pb+WbWbZi2PcIQlDY2EDQyluXN0YKdHeFRXdo5QllN+oZ54e0EVioYzpUlzyD4/m
+ sqfGS/ZF//r7EoTeIbrqBJDbEc9pjB3cphpWWHLxxbo42u27w+Gvda6B+1ad2bZX
+ lBp8tnQl2y5JtMPWW7kVZs5EBPS8OY5NRWqVAFPBg1OlnC6OYC2c1rW7tqZll0T0
+ UORR+zdhayYDtLZhJdD5QcSVLRe26jlyeT4920dOUvjI8ANiRSjSOx3wwcnnhtLt
+ ABEBAAEAD/jW435kO/7VlNYaqKG2qUDHFbljDFnXhCCp9CCZ19ADGJWKRei0xilv
+ lXQiY8cLJka2xjEFzMH8FOWLpBwg/qffrclJsz8IY90Oo3SDFcdSIM48Ao2IeQrL
+ Vswa+W2np9utX9GwkABZHEsC5hDIfpWiBq1+Glx0QVCUakSQZ4txNG1VeGE0xMM5
+ 1+bvlygk3KfMQVjV/31Ngr7YNzLZMaTGI6iSZbDOeHpMdDAMWBVkk2vrxUa01Z7T
+ XJ6n5SNFCb+FfZKyu9xjrdlZswgiT71JaC52ncE7JHjj7pnxI6lSIkc14MHJ2ugk
+ 9WiW84v9ybCyOvEsk2Uh+7BwPfLJCee7SIWcVses55mVUm0YNoU68isQfuuuF2+B
+ OwTaoFT5sDwGlE7exdgk7DyUYxIIB3aRTUfNYeAIVW2uR5GruOgTLUw54KPa1N7O
+ NAfiC4OAfc+s6KBTU/Reozdq6mewLEM0UBccEmBtWevet64p5TWleyaL1TtEPZlX
+ DnrkTXA/ZRTCteuSLwmMDpcFEYc3IcgZIQfvqtHO2cDJ67AjlsgvEDwTV65l1MnN
+ krYIgUh8yFMnFGZPO1jw3mRtuU0QottdPj14Xcn855GS2C4CZ31N3YJq7eR+j5Bh
+ SmXk6k5ys8eF/gw4rbEaBbXFTjw8eb1q7Nwus0W+0yrq4r9/J1fxCADkCFXD95jm
+ sddOHz0Dih6c3KEvpkiPLzITOSp5/nPyd3K7T3xx0L2sBQQt2QGT0zeJCwAB5uTE
+ uTb6AjKcvOn7KlPQQqP9O2mgHo7Zzwr/I69DkYLuLGNCsr0XGi5sn9aaDG0dpFqg
+ 2VzItHVTCefoVF5EOmOaDwB1QYunKwXy3vtgNBJ7LwJb4rm1TgNHsheT/rtUsVYP
+ Z7lAxcLGa2xNm215nf8fzmbjZnCfPEyRpMgqlN+YUzGiDJwfN/fwr8OgFnjDSqOL
+ htbqCiLv7vTbsw6TWIuKr21QVEc3Qcqu96v0dNXk6wr4PTnPmezswSK0+Dc2+5JZ
+ PBkYIkbE5eYxCAD2THuc0iiqX4ZFS8KhbjWvmng08xulj45BpHCkY8oRcaMCRD0c
+ AoEDZyPEfwfb8SMNb2NHzvYi8djM0uU5NzqURKUF22bAAbxkgPyBKp8CBECD0X50
+ O4HpJIYShbfUD/XWKLvxGxKYlrOvwHuUboPPnMqsaMLwezu0MoaUztbTlr/OZgA0
+ 8/hwiyXDcsrf7yg5BPfAZghC1OYEvhEiS43VefAPVv/bvwz3Aml0kQLH14eZxGxL
+ z7mb0qoZJ0Qkn36QQ2L9GcWYRfBDgUXcxfZm6JW+UZWuWMBAPMSaPoRd9yt+MMPC
+ QsmyUrBxT9vTLFysUIJYQThycucvO9Xud/19CADt0G5p1u8uRzKi5Qnbtd79V0v5
+ Obud6LRooXJNprYVLhPE6lr0aeeZK6QDIrmRrZqeByQ2kfY6XP6Tl4/9wZX2ajS/
+ 8GJGmv7HP6vqfJdrOQhnAjQkUYYm72C/eicAsm8e/fiOSrpf1ithyPKOArFqVyth
+ pVkAnteqNDABBNellUbqS//XUKnk/Cd2WWaGe4J/nIuj78MphBEzSO4CmWkdbH5G
+ CPyXdq6LEnDp1By3LNmZTwqbeMUtzVcaJVh6k4jW3OEL9h0PIWQ8Ol42+0Wn57Pn
+ 5vPzGwKaf7zFhJlLRCiCdJJl4QOjXo0jvQHBvelQ8NVpeCCAabWmR9OwldqOhJiJ
+ BD4EGAECAAkFAl0L9gUCGy4CKQkQOvG9NhmrSWrBXSAEGQECAAYFAl0L9gUACgkQ
+ kjr8nlUnTh1fBBAAqFbBzvU44nvoxYtBz0b3ZJ8TBCu8rCAwXEquvb+sXWYj52jh
+ ThW+ke24rX178JZDPu6f8EZwF98voZCDxU5XTexqMugTubVSMmTGbJ63YN99Nl5P
+ jjdwO4r1LaBJ7ef30YLT0ZIUI73dDTolQ0ENHxwtD1xnwx8JvXpmQdDJ9/HINQlU
+ HRSt2qWHRSgrutRLFlO7trWQZXIrUwjY3qgKJMPYc2tJqmfsoD6EeISyZOVOJ7m5
+ xgs1f7UgtbVrHYhQOxRiMIAbMDbukRKwlvp1af8R7e+EoFMIcewaObe6/PUTuOFL
+ 0VkCWoHBBKWAJQJ7vHmzW1tgyrDjchHSUAGMqZOEL84uOCWqMQ/6HCj/zaEqiOqg
+ fuoh54K05lKE5OOIBWITVGgqsT9tli29Lov9vJb2p4csN4kSrdKJpLCgP21V8Utk
+ ZWR1OgDhD7h40Eobpph4KauYoAZiAfu3cb4BzNhUAJ69fJ5lrOlKP1GLmYyQ5jfx
+ s73TDCNfj42OBeUCO6tncTSPXs/9P2FziynVLxkCT8cbVq1C4H87BO7TEW9FuxKJ
+ hLfpVGbt/yG1HrvGJ/kRPk0sXu2md9quWkh6qPHF5EThCOrlfbwLD5Pqvt0ZPZlR
+ xxMSRP9L9w09ZYO1Y7f6gegElTpEh/aFLq1jjUxm8h/cO6A9lJ3Bjxb/xmoKoBAA
+ ieGiHu3YgsN0jmxvlnf7GwB89BNajyH6D0tbdsH+OeSU8e6xItxpe6s5GfonWAt+
+ ngPutPSPhgS5AUx8GrxHGu3Sx+wmGvGKpsH+2Tu1ciUN34K/sfrzKjxCuvnpcBTd
+ rOSiEObnKnb6OI6sW329ZH4z/r5tVoetWr45xspc9IE8TVIuavOlJkuInX5/B3Sa
+ DPwAH/aQAYc71j7yDr7ezFzx07h+pH4ePeGsnfdKy6ZWoQ1mmM35j93ZhH8P8YCC
+ N8lklRhxvZhXkHWt4ns/QzT+QawW2sR8Kkha3ydzx9cEcalmNq7nG+QkwSlliazE
+ 6peVL6ga2H1+XM+1p/P/qCsvbLmRobWSyfMURKkL05iykNjnHOYno+A+NaM3IR12
+ uf5tWvNfiJpNXbUf8Yjh0ep73aZfF1kODIcLR1AHtVVt0Yc05XKEwFfv4kADi1Mh
+ Pp0s4MHRMjPCgPU1j2b5ulGZGZKlCECu3799bw8yb7n9Hpj42hL0ZOEsMdMHbCfd
+ 7eQUNNncVW/KDnwrJQAabr/771xSTauWDDdpEJEc2Mdx3m6e7doQvboYaKYvK4kg
+ x/Vi7pcHS1xQO0zC8BPPBq9tMyy5QGSybfVSvMPo+7nIsumZ9fvJwjjRHreZP4pa
+ nbE9Gt4CrEbWoo6jbcscycpQbduEhGtvwj8UFXe5z+M=
+ =o0Ig
+ -----END PGP PRIVATE KEY BLOCK-----
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lvg/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lvg/aliases
new file mode 100644
index 00000000..79f3300f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lvg/aliases
@@ -0,0 +1,7 @@
+destructive
+needs/privileged
+shippable/posix/group1
+skip/aix
+skip/freebsd
+skip/osx
+skip/macos
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lvg/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lvg/meta/main.yml
new file mode 100644
index 00000000..56bc5546
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lvg/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - setup_pkg_mgr
+ - setup_remote_tmp_dir
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lvg/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lvg/tasks/main.yml
new file mode 100644
index 00000000..c9b92d00
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lvg/tasks/main.yml
@@ -0,0 +1,22 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Install required packages (Linux)
+ package:
+ name: lvm2
+ state: present
+ when: ansible_system == 'Linux'
+
+- name: Test lvg module
+ block:
+ - import_tasks: setup.yml
+
+ - import_tasks: test_indempotency.yml
+
+ - import_tasks: test_grow_reduce.yml
+
+ - import_tasks: test_pvresize.yml
+ always:
+ - import_tasks: teardown.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lvg/tasks/setup.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lvg/tasks/setup.yml
new file mode 100644
index 00000000..e63c2d64
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lvg/tasks/setup.yml
@@ -0,0 +1,13 @@
+- name: "Create files to use as a disk devices"
+ command: "dd if=/dev/zero of={{ remote_tmp_dir }}/img{{ item }} bs=1M count=10"
+ with_sequence: 'count=2'
+
+- name: "Create loop device for file"
+ command: "losetup --show -f {{ remote_tmp_dir }}/img{{ item }}"
+ with_sequence: 'count=2'
+ register: loop_devices
+
+- name: "Affect name on disk to work on"
+ set_fact:
+ loop_device1: "{{ loop_devices.results[0] }}"
+ loop_device2: "{{ loop_devices.results[1] }}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lvg/tasks/teardown.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lvg/tasks/teardown.yml
new file mode 100644
index 00000000..ed662f1e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lvg/tasks/teardown.yml
@@ -0,0 +1,17 @@
+- name: Remove test volume group
+ lvg:
+ vg: testvg
+ state: absent
+
+- name: Detach loop device
+ command: "losetup -d {{ item.stdout }}"
+ loop: "{{ loop_devices.results|default([]) }}"
+ when:
+ - item.stdout is defined
+ - item.stdout is match("/dev/.*")
+
+- name: Remove device files
+ file:
+ path: "{{ remote_tmp_dir }}/img{{ item }}"
+ state: absent
+ with_sequence: 'count={{ loop_devices.results|length }}'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lvg/tasks/test_grow_reduce.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lvg/tasks/test_grow_reduce.yml
new file mode 100644
index 00000000..1e988045
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lvg/tasks/test_grow_reduce.yml
@@ -0,0 +1,33 @@
+- name: "Create volume group on first disk"
+ lvg:
+ vg: testvg
+ pvs: "{{ loop_device1.stdout }}"
+
+- name: "get lvm facts"
+ setup:
+
+- debug: var=ansible_lvm
+
+- name: "Assert the testvg span only on first disk"
+ assert:
+ that:
+ - ansible_lvm.pvs[loop_device1.stdout].vg == "testvg"
+ - 'loop_device2.stdout not in ansible_lvm.pvs or
+ ansible_lvm.pvs[loop_device2.stdout].vg == ""'
+
+- name: "Extend to second disk AND reduce from the first disk"
+ lvg:
+ vg: testvg
+ pvs: "{{ loop_device2.stdout }}"
+
+- name: "get lvm facts"
+ setup:
+
+- debug: var=ansible_lvm
+
+- name: "Assert the testvg span only on first disk"
+ assert:
+ that:
+ - 'loop_device1.stdout not in ansible_lvm.pvs or
+ ansible_lvm.pvs[loop_device1.stdout].vg == ""'
+ - ansible_lvm.pvs[loop_device2.stdout].vg == "testvg"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lvg/tasks/test_indempotency.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lvg/tasks/test_indempotency.yml
new file mode 100644
index 00000000..5007e56a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lvg/tasks/test_indempotency.yml
@@ -0,0 +1,15 @@
+- name: Create volume group on disk device
+ lvg:
+ vg: testvg
+ pvs: "{{ loop_device1.stdout }}"
+
+- name: Create the volume group again to verify idempotence
+ lvg:
+ vg: testvg
+ pvs: "{{ loop_device1.stdout }}"
+ register: repeat_vg_create
+
+- name: Do all assertions to verify expected results
+ assert:
+ that:
+ - repeat_vg_create is not changed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lvg/tasks/test_pvresize.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lvg/tasks/test_pvresize.yml
new file mode 100644
index 00000000..9112f8bf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/lvg/tasks/test_pvresize.yml
@@ -0,0 +1,76 @@
+- name: "Create volume group on first disk"
+ lvg:
+ vg: testvg
+ pvs: "{{ loop_device1.stdout }}"
+
+- name: Gets current vg size
+ shell: vgs -v testvg -o pv_size --noheading --units b | xargs
+ register: cmd_result
+
+- name: Assert the testvg size is 8388608B
+ assert:
+ that:
+ - "'8388608B' == cmd_result.stdout"
+
+- name: Increases size in file
+ command: "dd if=/dev/zero bs=8MiB count=1 of={{ remote_tmp_dir }}/img1 conv=notrunc oflag=append"
+
+- name: "Reread size of file associated with loop_device1"
+ command: "losetup -c {{ loop_device1.stdout }}"
+
+- name: "Reruns lvg with pvresize:no"
+ lvg:
+ vg: testvg
+ pvs: "{{ loop_device1.stdout }}"
+ pvresize: no
+ register: cmd_result
+
+- assert:
+ that:
+ - cmd_result is not changed
+
+- name: Gets current vg size
+ shell: vgs -v testvg -o pv_size --noheading --units b | xargs
+ register: cmd_result
+
+- name: Assert the testvg size is still 8388608B
+ assert:
+ that:
+ - "'8388608B' == cmd_result.stdout"
+
+- name: "Reruns lvg with pvresize:yes and check_mode:yes"
+ lvg:
+ vg: testvg
+ pvs: "{{ loop_device1.stdout }}"
+ pvresize: yes
+ check_mode: yes
+ register: cmd_result
+
+- name: Assert that the module returned the state was changed
+ assert:
+ that:
+ - cmd_result is changed
+
+- name: Gets current vg size
+ shell: vgs -v testvg -o pv_size --noheading --units b | xargs
+ register: cmd_result
+
+- name: Assert the testvg size is still 8388608B
+ assert:
+ that:
+ - "'8388608B' == cmd_result.stdout"
+
+- name: "Reruns lvg with pvresize:yes"
+ lvg:
+ vg: testvg
+ pvs: "{{ loop_device1.stdout }}"
+ pvresize: yes
+
+- name: Gets current vg size
+ shell: vgs -v testvg -o pv_size --noheading --units b | xargs
+ register: cmd_result
+
+- name: Assert the testvg size is now 16777216B
+ assert:
+ that:
+ - "'16777216B' == cmd_result.stdout"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/mail/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/mail/aliases
new file mode 100644
index 00000000..a6dafcf8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/mail/aliases
@@ -0,0 +1 @@
+shippable/posix/group1
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/mail/files/smtpserver.crt b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/mail/files/smtpserver.crt
new file mode 100644
index 00000000..44c18a24
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/mail/files/smtpserver.crt
@@ -0,0 +1,22 @@
+-----BEGIN CERTIFICATE-----
+MIIDrzCCApegAwIBAgIJAJyHQUcqSOQpMA0GCSqGSIb3DQEBCwUAMG4xCzAJBgNV
+BAYTAkJFMRMwEQYDVQQIDApWbGFhbmRlcmVuMQ0wCwYDVQQHDARHZW50MQ4wDAYD
+VQQKDAVEYWdpdDELMAkGA1UECwwCSVQxHjAcBgNVBAMMFWxvY2FsaG9zdC5sb2Nh
+bGRvbWFpbjAeFw0xODExMjgxMjQ3MzlaFw0yODExMjUxMjQ3MzlaMG4xCzAJBgNV
+BAYTAkJFMRMwEQYDVQQIDApWbGFhbmRlcmVuMQ0wCwYDVQQHDARHZW50MQ4wDAYD
+VQQKDAVEYWdpdDELMAkGA1UECwwCSVQxHjAcBgNVBAMMFWxvY2FsaG9zdC5sb2Nh
+bGRvbWFpbjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANLqBGgIF44U
+zRhNupGwSKAeTIXT4nXPIJKlIi1kTSQwtywQmBw6leBlvj1qwU73+nhqwSclIrYx
+3ltvrpKHAWG1jqqsExuLRaKRdWgx1YC2WPgZwYC0C+LkE8vs/Kl1v0HgPuPMkzeK
+hDctQfWOaykFOy0mB/BfP2vSVoEckffMlDjG/bHwNt7cG8BnqKd8e9VR+ZcBazFK
+bnKhht0ldR84Wbp+5wpuCr1R1R0ltdO2O+LACrXzvH9Kf0CGhKXGccwGpi43eXyK
+CDbubkGcLjg9Fo7kZ6uW5nU2vHJ1iDGnvUl8X96qKoOFU0EvBveCisc1bY433uG1
+NjEZ1xLPGK8CAwEAAaNQME4wHQYDVR0OBBYEFO6nDFzJBZBLJt4yza+VrUEOy3Zl
+MB8GA1UdIwQYMBaAFO6nDFzJBZBLJt4yza+VrUEOy3ZlMAwGA1UdEwQFMAMBAf8w
+DQYJKoZIhvcNAQELBQADggEBALTq0ycKhEr/3KOsfKBup4bs5Oqv0x7ePaUNxyef
+JSyKTjD0gPY8YNAeNA7gU5XGjMr4h9cNpRmJ0TyfwWJxH4uK4d2p5k1ZpQWKv8jG
+4U9sZTQzkh8nqRBaEl94qsiCIRCllb6VveWbIGE6eqt4rT0V9l9fvbw+hSXdiYXT
+KkkX5VZxctV2OMkbP1mbOYIA22jqZKQiIvAVcMA6vSnlDAJKTi9/kw99/zjUQ9Jb
+8bF2gcnzAijJAWsCqf8hZVq9+pogptBd/bkKUCuTA4MACX5ppgQltkgX2mLrj6Ep
+Po2euqzUZREzKl2cUaP85m+8tClYk0Wjfm0RjxPRa8fgUfM=
+-----END CERTIFICATE-----
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/mail/files/smtpserver.key b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/mail/files/smtpserver.key
new file mode 100644
index 00000000..48ddf644
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/mail/files/smtpserver.key
@@ -0,0 +1,28 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDS6gRoCBeOFM0Y
+TbqRsEigHkyF0+J1zyCSpSItZE0kMLcsEJgcOpXgZb49asFO9/p4asEnJSK2Md5b
+b66ShwFhtY6qrBMbi0WikXVoMdWAtlj4GcGAtAvi5BPL7Pypdb9B4D7jzJM3ioQ3
+LUH1jmspBTstJgfwXz9r0laBHJH3zJQ4xv2x8Dbe3BvAZ6infHvVUfmXAWsxSm5y
+oYbdJXUfOFm6fucKbgq9UdUdJbXTtjviwAq187x/Sn9AhoSlxnHMBqYuN3l8igg2
+7m5BnC44PRaO5GerluZ1NrxydYgxp71JfF/eqiqDhVNBLwb3gorHNW2ON97htTYx
+GdcSzxivAgMBAAECggEALDCRucYwQTmEexoWA94+aSXP/J4XLX23ImJs1bvVGccH
+KblUVV3E9x36DN4oIEZ+eOpNC8mRC0FJiDjPB643kOQ8PvAMlNHKRjRZt/nw9KW/
+4ENtMm0GrIQCzdAaY9ritoeoRYwgMBvadcEKt8seEpsg+eWk9izOmeWY8DYvMw6N
+hNu5zQLkMGTTqfDxkl7KnyKPhjW/++eUdgsTIA31/wHsJSiNR5Pkoy2fOVtNO7JN
+EghcKE3lYKKzOW6vg0LBY8xVQ4KMbCVgnYNI3MU9qpG2bYxy1hlWIrsjrt9PyRp8
+jDSKnLD4Zvv4L6gj2fhelES/YQ/055YyzG801Q+gUQKBgQDohqr5fwQj8Awrc0K/
+DodaIkVwXHkQAhSWidrqxZXmtn4ZsgDA3V82ZTIjWe2v7ES5U4jeYKGoUweCUodr
+PPT0IKEFmS2Fq1AZZx7Ry+ihA7gw6PV5DbG5DwyNYlhn6F6Bghl8pKAcXPGuwtgd
+BKXj7utEp57Q9ue3P00cGNokKQKBgQDoNNFMPnfv5UQp+K0A89cKW8q6sf93/ul4
+kjh72q/KbK57ouhWPNib3tJLvkl7P8S45nrUGQZtd6zLhU/6SzAnGGnNZ7gNAs3l
+SWidcmZDqIiIXh6BF4/4WxXMXJdhfrux9/O8Xk89v+EDAbLbN8jSrvy87+6mOmRM
+r/MAXToxFwKBgHpGbtxalbPMRKoIp33OxxB32yoWBreLUIZFIfC5THWRW8hpWYoS
+H0J8fpwmax5K0WzfZ6cBC6F3YAiBG6Mh3/IMwoAuJ8kV6D4jgwpx/vfE+/QEXSl2
+MRIOvtwObkzd3eyenIZ2D5g6rADphznjOtUcy21D8/kRDZLIX+U5kGTZAoGBAIYg
+/ETuUJlh9V3JJyXFtBFntFLjPo4x0Oq0i6v/RkvHO4JvN4WY4AYpT5Aw+oEW9KtZ
+dtnNGslgt49YEjqh886ha3wazVW2qPgozyUjT68FSth6hWRMF/19n7nMQiUu73x9
+nWzRjTQ+Aduav5WhQ39vVM5OSav7TrR9bgBn2ZVBAoGBAN4Hle/PIFzApQYQRIT0
+wPpOvEVx56+c70ZMvLv5UgmY2jLKZKFUV6oGGUZlJXfh1ZMnXShWY1pjvi/FnIIi
+AKDB9N17DE5AmpzuXFjU3YwXde98MjuUY03P3yaFQ4cXYryqgZxuMPgyGFM9vtMd
+WXFdvCtm0c3WMpPJSr9kgy6Q
+-----END PRIVATE KEY-----
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/mail/files/smtpserver.py b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/mail/files/smtpserver.py
new file mode 100644
index 00000000..01b257e1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/mail/files/smtpserver.py
@@ -0,0 +1,68 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Dag Wieers (@dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import asyncore
+import os.path
+import ssl
+import sys
+
+# Handle TLS and non-TLS support
+try:
+ import smtpd_tls
+ HAS_TLS = True
+except ImportError:
+ import smtpd
+ HAS_TLS = False
+ print('Library smtpd-tls is missing or not supported, hence starttls is NOT supported.')
+
+# Handle custom ports
+port = '25:465'
+if len(sys.argv) > 1:
+ port = sys.argv[1]
+ports = port.split(':')
+if len(ports) > 1:
+ port1, port2 = int(ports[0]), int(ports[1])
+else:
+ port1, port2 = int(port), None
+
+# Handle custom certificate
+basename = os.path.splitext(sys.argv[0])[0]
+certfile = basename + '.crt'
+if len(sys.argv) > 2:
+ certfile = sys.argv[2]
+
+# Handle custom key
+keyfile = basename + '.key'
+if len(sys.argv) > 3:
+ keyfile = sys.argv[3]
+
+try:
+ ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
+except AttributeError:
+ ssl_ctx = None
+ if HAS_TLS:
+ print('Python ssl library does not support SSLContext, hence starttls and TLS are not supported.')
+ import smtpd
+
+if HAS_TLS and ssl_ctx is not None:
+ print('Using %s and %s' % (certfile, keyfile))
+ ssl_ctx.load_cert_chain(certfile=certfile, keyfile=keyfile)
+
+ print('Start SMTP server on port', port1)
+ smtp_server1 = smtpd_tls.DebuggingServer(('127.0.0.1', port1), None, ssl_ctx=ssl_ctx, starttls=True)
+ if port2:
+ print('Start TLS SMTP server on port', port2)
+ smtp_server2 = smtpd_tls.DebuggingServer(('127.0.0.1', port2), None, ssl_ctx=ssl_ctx, starttls=False)
+else:
+ print('Start SMTP server on port', port1)
+ smtp_server1 = smtpd.DebuggingServer(('127.0.0.1', port1), None)
+ if port2:
+ print('WARNING: TLS is NOT supported on this system, not listening on port %s.' % port2)
+
+asyncore.loop()
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/mail/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/mail/tasks/main.yml
new file mode 100644
index 00000000..714b662d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/mail/tasks/main.yml
@@ -0,0 +1,90 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# TODO: Our current implementation does not handle SMTP authentication
+
+# NOTE: If the system does not support smtpd-tls (python 2.6 and older) we do basic tests
+- name: Attempt to install smtpd-tls
+ pip:
+ name: smtpd-tls
+ state: present
+ ignore_errors: yes
+ register: smtpd_tls
+
+- name: Install test smtpserver
+ copy:
+ src: '{{ item }}'
+ dest: '{{ output_dir }}/{{ item }}'
+ loop:
+ - smtpserver.py
+ - smtpserver.crt
+ - smtpserver.key
+
+# FIXME: Verify the mail after it was send would be nice
+# This would require either dumping the content, or registering async task output
+- name: Start test smtpserver
+ shell: '{{ ansible_python.executable }} {{ output_dir }}/smtpserver.py 10025:10465'
+ async: 30
+ poll: 0
+ register: smtpserver
+
+- name: Send a basic test-mail
+ mail:
+ port: 10025
+ subject: Test mail 1 (smtp)
+ secure: never
+
+- name: Send a test-mail with body and specific recipient
+ mail:
+ port: 10025
+ from: ansible@localhost
+ to: root@localhost
+ subject: Test mail 2 (smtp + body)
+ body: Test body 2
+ secure: never
+
+- name: Send a test-mail with attachment
+ mail:
+ port: 10025
+ from: ansible@localhost
+ to: root@localhost
+ subject: Test mail 3 (smtp + body + attachment)
+ body: Test body 3
+ attach: /etc/group
+ secure: never
+
+# NOTE: This might fail if smtpd-tls is missing or python 2.7.8 or older is used
+- name: Send a test-mail using starttls
+ mail:
+ port: 10025
+ from: ansible@localhost
+ to: root@localhost
+ subject: Test mail 4 (smtp + starttls + body + attachment)
+ body: Test body 4
+ attach: /etc/group
+ secure: starttls
+ ignore_errors: yes
+ register: starttls_support
+
+# NOTE: This might fail if smtpd-tls is missing or python 2.7.8 or older is used
+- name: Send a test-mail using TLS
+ mail:
+ port: 10465
+ from: ansible@localhost
+ to: root@localhost
+ subject: Test mail 5 (smtp + tls + body + attachment)
+ body: Test body 5
+ attach: /etc/group
+ secure: always
+ ignore_errors: yes
+ register: tls_support
+
+- fail:
+ msg: Sending mail using starttls failed.
+ when: smtpd_tls is succeeded and starttls_support is failed and tls_support is succeeded
+
+- fail:
+ msg: Send mail using TLS failed.
+ when: smtpd_tls is succeeded and tls_support is failed and starttls_support is succeeded
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/mas/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/mas/aliases
new file mode 100644
index 00000000..57c4f1c0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/mas/aliases
@@ -0,0 +1,2 @@
+needs/root
+unsupported \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/mas/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/mas/tasks/main.yml
new file mode 100644
index 00000000..811bb211
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/mas/tasks/main.yml
@@ -0,0 +1,156 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Test code for the mas module.
+# Copyright: (c) 2020, Lukas Bestle <project-ansible@lukasbestle.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+---
+# Test preparation
+- name: Uninstall Rested to ensure consistent starting point
+ mas:
+ id: 421879749
+ state: absent
+ become: yes
+
+- name: Determine whether the app is installed
+ stat:
+ path: /Applications/Rested.app
+ register: install_status
+
+- name: Ensure the app is uninstalled
+ assert:
+ that:
+ - install_status.stat.exists == false
+
+- name: Wait until the OS-internal cache was updated
+ pause:
+ seconds: 5
+
+# Installation
+- name: Check if Rested needs to be installed
+ mas:
+ id: 421879749
+ state: present
+ register: install_check
+ check_mode: yes
+
+- name: Ensure that the status would have changed
+ assert:
+ that:
+ - install_check is changed
+ - install_check.msg == "Installed 1 app(s)"
+
+- name: Determine whether the app is installed
+ stat:
+ path: /Applications/Rested.app
+ register: install_status
+
+- name: Ensure the app is not yet installed
+ assert:
+ that:
+ - install_status.stat.exists == false
+
+- name: Install Rested
+ mas:
+ id: 421879749
+ state: present
+ register: install
+
+- name: Ensure that the status changed
+ assert:
+ that:
+ - install is changed
+ - install.msg == "Installed 1 app(s)"
+
+- name: Determine whether the app is installed
+ stat:
+ path: /Applications/Rested.app
+ register: install_status
+
+- name: Ensure the app is installed
+ assert:
+ that:
+ - install_status.stat.exists == true
+
+- name: Wait until the OS-internal cache was updated
+ pause:
+ seconds: 5
+
+- name: Install Rested again
+ mas:
+ id: 421879749
+ state: present
+ register: install_again
+
+- name: Ensure that the status is unchanged (already installed)
+ assert:
+ that:
+ - install_again is not changed
+ - "'msg' not in install_again"
+
+# Uninstallation
+- name: Check if Rested needs to be uninstalled
+ mas:
+ id: 421879749
+ state: absent
+ register: uninstall_check
+ become: yes
+ check_mode: yes
+
+- name: Ensure that the status would have changed
+ assert:
+ that:
+ - uninstall_check is changed
+ - uninstall_check.msg == "Uninstalled 1 app(s)"
+
+- name: Determine whether the app is installed
+ stat:
+ path: /Applications/Rested.app
+ register: install_status
+
+- name: Ensure the app is not yet uninstalled
+ assert:
+ that:
+ - install_status.stat.exists == true
+
+- name: Unistall Rested
+ mas:
+ id: 421879749
+ state: absent
+ register: uninstall
+ become: yes
+
+- name: Ensure that the status changed
+ assert:
+ that:
+ - uninstall is changed
+ - uninstall.msg == "Uninstalled 1 app(s)"
+
+- name: Determine whether the app is installed
+ stat:
+ path: /Applications/Rested.app
+ register: uninstall_status
+
+- name: Ensure the app is uninstalled
+ assert:
+ that:
+ - uninstall_status.stat.exists == false
+
+- name: Wait until the OS-internal cache was updated
+ pause:
+ seconds: 5
+
+- name: Uninstall Rested again
+ mas:
+ id: 421879749
+ state: absent
+ register: uninstall_again
+ become: yes
+
+- name: Ensure that the status is unchanged (already uninstalled)
+ assert:
+ that:
+ - uninstall_again is not changed
+ - "'msg' not in uninstall_again"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_dns_reload/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_dns_reload/aliases
new file mode 100644
index 00000000..ad7ccf7a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_dns_reload/aliases
@@ -0,0 +1 @@
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_dns_reload/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_dns_reload/meta/main.yml
new file mode 100644
index 00000000..73b314ff
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_dns_reload/meta/main.yml
@@ -0,0 +1 @@
+--- \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_dns_reload/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_dns_reload/tasks/main.yml
new file mode 100644
index 00000000..20b00b54
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_dns_reload/tasks/main.yml
@@ -0,0 +1,29 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: request reload with invalid API key
+ memset_dns_reload:
+ api_key: "wa9aerahhie0eekee9iaphoorovooyia"
+ ignore_errors: true
+ register: result
+
+- name: check API response with invalid API key
+ assert:
+ that:
+ - "'Memset API returned a 403 response (ApiErrorForbidden, Bad api_key)' in result.msg"
+ - result is not successful
+
+- name: request reload and poll
+ memset_dns_reload:
+ api_key: "{{ api_key }}"
+ poll: true
+ register: result
+
+- name: check reload succeeded
+ assert:
+ that:
+ - result is changed
+ - result is successful
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_memstore_info/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_memstore_info/aliases
new file mode 100644
index 00000000..ad7ccf7a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_memstore_info/aliases
@@ -0,0 +1 @@
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_memstore_info/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_memstore_info/meta/main.yml
new file mode 100644
index 00000000..73b314ff
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_memstore_info/meta/main.yml
@@ -0,0 +1 @@
+--- \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_memstore_info/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_memstore_info/tasks/main.yml
new file mode 100644
index 00000000..6cc72818
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_memstore_info/tasks/main.yml
@@ -0,0 +1,30 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: query API with invalid API key
+ memset_memstore_info:
+ api_key: 'wa9aerahhie0eekee9iaphoorovooyia'
+ name: 'mstestyaa1'
+ ignore_errors: true
+ register: result
+
+- name: check API response with invalid API key
+ assert:
+ that:
+ - "'Memset API returned a 403 response (ApiErrorForbidden, Bad api_key)' in result.msg"
+ - result is not successful
+
+- name: request memstore infos
+ memset_memstore_info:
+ api_key: "{{ api_key }}"
+ name: 'mstestyaa1'
+ register: result
+
+- name: check the request succeeded
+ assert:
+ that:
+ - result is not changed
+ - result is successful
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_server_info/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_server_info/aliases
new file mode 100644
index 00000000..ad7ccf7a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_server_info/aliases
@@ -0,0 +1 @@
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_server_info/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_server_info/meta/main.yml
new file mode 100644
index 00000000..73b314ff
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_server_info/meta/main.yml
@@ -0,0 +1 @@
+--- \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_server_info/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_server_info/tasks/main.yml
new file mode 100644
index 00000000..1a8c776e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_server_info/tasks/main.yml
@@ -0,0 +1,30 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: query API with invalid API key
+ memset_server_info:
+ api_key: 'wa9aerahhie0eekee9iaphoorovooyia'
+ name: 'testyaa1'
+ ignore_errors: true
+ register: result
+
+- name: check API response with invalid API key
+ assert:
+ that:
+ - "'Memset API returned a 403 response (ApiErrorForbidden, Bad api_key)' in result.msg"
+ - result is not successful
+
+- name: request server infos
+ memset_server_info:
+ api_key: "{{ api_key }}"
+ name: 'testyaa1'
+ register: result
+
+- name: check the request succeeded
+ assert:
+ that:
+ - result is not changed
+ - result is successful
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone/aliases
new file mode 100644
index 00000000..ad7ccf7a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone/aliases
@@ -0,0 +1 @@
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone/meta/main.yml
new file mode 100644
index 00000000..ed97d539
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone/meta/main.yml
@@ -0,0 +1 @@
+---
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone/tasks/main.yml
new file mode 100644
index 00000000..abc61dd7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone/tasks/main.yml
@@ -0,0 +1,121 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: create random string
+ set_fact:
+ zone_name: "{{ 65535 | random | string }}.ansible.example.com"
+
+- name: create zone with incorrect API key
+ memset_zone:
+ api_key: "wa9aerahhie0eekee9iaphoorovooyia"
+ state: present
+ name: "{{ zone_name }}"
+ ttl: 300
+ ignore_errors: true
+ register: result
+
+- name: check API response with invalid API key
+ assert:
+ that:
+ - "'Memset API returned a 403 response (ApiErrorForbidden, Bad api_key)' in result.msg"
+ - result is not successful
+
+- name: test creating zone
+ memset_zone:
+ api_key: "{{ api_key }}"
+ state: present
+ name: "{{ zone_name }}"
+ ttl: 300
+ check_mode: true
+ register: result
+
+- name: check if the zone would be created
+ assert:
+ that:
+ - result is changed
+ - result is successful
+
+- name: create zone
+ memset_zone:
+ api_key: "{{ api_key }}"
+ state: present
+ name: "{{ zone_name }}"
+ ttl: 300
+ register: result
+
+- name: create the zone
+ assert:
+ that:
+ - result is changed
+ - result is successful
+
+- name: create duplicate zone
+ memset_zone:
+ api_key: "{{ api_key }}"
+ state: present
+ name: "{{ zone_name }}"
+ ttl: 300
+ register: result
+
+- name: ensure we can't create duplicate zones
+ assert:
+ that:
+ - result is not changed
+
+- name: test deleting zone
+ memset_zone:
+ api_key: "{{ api_key }}"
+ state: absent
+ name: "{{ zone_name }}"
+ check_mode: true
+ register: result
+
+- name: check if the zone would be deleted
+ assert:
+ that:
+ - result is changed
+ - result is successful
+
+- name: delete empty zone
+ memset_zone:
+ api_key: "{{ api_key }}"
+ state: absent
+ name: "{{ zone_name }}"
+ force: false
+ register: result
+
+- name: delete the zone
+ assert:
+ that:
+ - result is changed
+ - result is successful
+
+- name: create zone for deletion test
+ memset_zone:
+ api_key: "{{ api_key }}"
+ state: present
+ name: "{{ zone_name }}"
+ register: result
+
+- name: ensure we can't create duplicate zones
+ assert:
+ that:
+ - result is changed
+ - result is successful
+
+- name: delete zone with force
+ memset_zone:
+ api_key: "{{ api_key }}"
+ state: absent
+ name: "{{ zone_name }}"
+ force: true
+ register: result
+
+- name: ensure force is respected
+ assert:
+ that:
+ - result is changed
+ - result is successful
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone/vars/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone/vars/main.yml
new file mode 100644
index 00000000..1f8f2eba
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone/vars/main.yml
@@ -0,0 +1,2 @@
+---
+random_string: "baiqui8ci6miedoo9eivohJ0aixei7oo"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone_domain/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone_domain/aliases
new file mode 100644
index 00000000..ad7ccf7a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone_domain/aliases
@@ -0,0 +1 @@
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone_domain/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone_domain/meta/main.yml
new file mode 100644
index 00000000..73b314ff
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone_domain/meta/main.yml
@@ -0,0 +1 @@
+--- \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone_domain/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone_domain/tasks/main.yml
new file mode 100644
index 00000000..7edb8092
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone_domain/tasks/main.yml
@@ -0,0 +1,148 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: create domain with invalid API key
+ memset_zone_domain:
+ api_key: "wa9aerahhie0eekee9iaphoorovooyia"
+ state: present
+ domain: "{{ test_domain }}"
+ zone: "{{ target_zone }}"
+ ignore_errors: true
+ register: result
+
+- name: check API response with invalid API key
+ assert:
+ that:
+ - "'Memset API returned a 403 response (ApiErrorForbidden, Bad api_key)' in result.msg"
+ - result is not successful
+
+- name: create domain over 250 chars
+ memset_zone_domain:
+ api_key: "{{ api_key }}"
+ state: present
+ domain: 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.com'
+ zone: "{{ target_zone }}"
+ ignore_errors: true
+ register: result
+
+- name: test domain length is validated
+ assert:
+ that:
+ - result is failed
+ - "'Zone domain must be less than 250 characters in length' in result.stderr"
+
+- name: create domain in non-existent zone
+ memset_zone_domain:
+ api_key: "{{ api_key }}"
+ state: present
+ domain: "{{ test_domain }}"
+ zone: "non-existent-zone"
+ ignore_errors: true
+ register: result
+
+- name: fail if zone does not exist
+ assert:
+ that:
+ - result is failed
+ - "'does not exist, cannot create domain.' in result.stderr"
+
+- name: create domain in non-unique zone
+ memset_zone_domain:
+ api_key: "{{ api_key }}"
+ state: present
+ domain: "{{ test_domain }}"
+ zone: "{{ duplicate_zone }}"
+ ignore_errors: true
+ register: result
+
+- name: fail if the zone is not unique
+ assert:
+ that:
+ - result is failed
+ - "'matches multiple zones, cannot create domain' in result.stderr"
+
+- name: test creating domain
+ memset_zone_domain:
+ api_key: "{{ api_key }}"
+ state: present
+ domain: "{{ test_domain }}"
+ zone: "{{ target_zone }}"
+ check_mode: true
+ register: result
+
+- name: create domain with check mode
+ assert:
+ that:
+ - result is changed
+ - result is successful
+
+- name: create domain
+ memset_zone_domain:
+ api_key: "{{ api_key }}"
+ state: present
+ domain: "{{ test_domain }}"
+ zone: "{{ target_zone }}"
+ register: result
+
+- name: create domain
+ assert:
+ that:
+ - result is changed
+ - result is successful
+
+- name: create existing domain
+ memset_zone_domain:
+ api_key: "{{ api_key }}"
+ state: present
+ domain: "{{ test_domain }}"
+ zone: "{{ target_zone }}"
+ register: result
+
+- name: create existing domain
+ assert:
+ that:
+ - result is not changed
+
+- name: test deleting domain
+ memset_zone_domain:
+ api_key: "{{ api_key }}"
+ state: absent
+ domain: "{{ test_domain }}"
+ zone: "{{ target_zone }}"
+ check_mode: true
+ register: result
+
+- name: delete domain with check mode
+ assert:
+ that:
+ - result is changed
+ - result is successful
+
+- name: delete domain
+ memset_zone_domain:
+ api_key: "{{ api_key }}"
+ state: absent
+ domain: "{{ test_domain }}"
+ zone: "{{ target_zone }}"
+ register: result
+
+- name: delete domain
+ assert:
+ that:
+ - result is changed
+
+- name: delete non-existent domain
+ memset_zone_domain:
+ api_key: "{{ api_key }}"
+ state: absent
+ domain: "{{ test_domain }}"
+ zone: "{{ target_zone }}"
+ register: result
+
+- name: delete absent domain
+ assert:
+ that:
+ - result is not changed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone_domain/vars/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone_domain/vars/main.yml
new file mode 100644
index 00000000..022e6a3c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone_domain/vars/main.yml
@@ -0,0 +1,4 @@
+---
+test_domain: ansible.example.com
+target_zone: ansible-dns-zone
+duplicate_zone: ansible-dns-zone-dupe
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone_record/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone_record/aliases
new file mode 100644
index 00000000..ad7ccf7a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone_record/aliases
@@ -0,0 +1 @@
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone_record/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone_record/meta/main.yml
new file mode 100644
index 00000000..ed97d539
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone_record/meta/main.yml
@@ -0,0 +1 @@
+---
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone_record/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone_record/tasks/main.yml
new file mode 100644
index 00000000..d8b245a1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone_record/tasks/main.yml
@@ -0,0 +1,230 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: create record with incorrect API key
+ memset_zone_record:
+ api_key: "wa9aerahhie0eekee9iaphoorovooyia"
+ state: present
+ zone: "{{ test_zone }}"
+ type: A
+ address: 127.0.0.1
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - "'Memset API returned a 403 response (ApiErrorForbidden, Bad api_key)' in result.msg"
+ - result is not successful
+
+- name: create record in non-existent zone
+ memset_zone_record:
+ api_key: "{{ api_key }}"
+ state: present
+ zone: "a-non-existent-zone"
+ type: A
+ address: 127.0.0.1
+ ignore_errors: true
+ register: result
+
+- name: assert that record is not created
+ assert:
+ that:
+ - "'DNS zone a-non-existent-zone does not exist.' in result.msg"
+ - result is not successful
+
+- name: create record in non-unique zone
+ memset_zone_record:
+ api_key: "{{ api_key }}"
+ state: present
+ zone: "{{ duplicate_zone }}"
+ type: A
+ address: 127.0.0.1
+ ignore_errors: true
+ register: result
+
+- name: assert that record is not created
+ assert:
+ that:
+ - "'ansible-dns-zone-dupe matches multiple zones.' in result.msg"
+ - result is not successful
+
+- name: create record with invalid priority
+ memset_zone_record:
+ api_key: "{{ api_key }}"
+ state: present
+ zone: "{{ test_zone }}"
+ type: SRV
+ address: "0 5269 hostname.example.com"
+ record: "_jabber._tcp"
+ priority: 1001
+ ignore_errors: true
+ register: result
+
+- name: assert that priority was out of range
+ assert:
+ that:
+ - "'Priority must be in the range 0 > 999 (inclusive).' in result.msg"
+ - result is not successful
+
+- name: create record with address longer than 250 chars
+ memset_zone_record:
+ api_key: "{{ api_key }}"
+ state: present
+ zone: "{{ test_zone }}"
+ type: CNAME
+ address: "aaa.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.com"
+ record: "aaa.ansible.com"
+ ignore_errors: true
+ register: result
+
+- name: assert that address was longer than allowed
+ assert:
+ that:
+ - "'Address must be less than 250 characters in length.' in result.msg"
+ - result is not successful
+
+- name: create record longer than 63 chars
+ memset_zone_record:
+ api_key: "{{ api_key }}"
+ state: present
+ zone: "{{ test_zone }}"
+ type: A
+ address: 127.0.0.1
+ record: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
+ ignore_errors: true
+ register: result
+
+- name: assert that record was longer than allowed
+ assert:
+ that:
+ - "'Record must be less than 63 characters in length.' in result.msg"
+ - result is not successful
+
+- name: create record which cannot have relative enabled
+ memset_zone_record:
+ api_key: "{{ api_key }}"
+ state: present
+ zone: "{{ test_zone }}"
+ type: A
+ address: 127.0.0.1
+ relative: true
+ ignore_errors: true
+ register: result
+
+- name: assert that setting relative failed
+ assert:
+ that:
+ - "'Relative is only valid for CNAME, MX, NS and SRV record types' in result.msg"
+ - result is not successful
+
+- name: test creating valid A record
+ memset_zone_record:
+ api_key: "{{ api_key }}"
+ state: present
+ zone: "{{ test_zone }}"
+ type: A
+ address: 127.0.0.1
+ record: "www"
+ check_mode: true
+ register: result
+
+- name: assert that result would have changed
+ assert:
+ that:
+ - result is changed
+ - result is successful
+
+- name: actually create valid A record
+ memset_zone_record:
+ api_key: "{{ api_key }}"
+ state: present
+ zone: "{{ test_zone }}"
+ type: A
+ address: 127.0.0.1
+ record: "www"
+ register: result
+
+- name: assert that result changed
+ assert:
+ that:
+ - result is changed
+ - result is successful
+
+- name: create valid SPF record
+ memset_zone_record:
+ api_key: "{{ api_key }}"
+ state: present
+ zone: "{{ test_zone }}"
+ type: TXT
+ address: "v=spf1 +a +mx +ip4:127.0.0.1 ?all"
+ register: result
+
+- name: assert that result changed
+ assert:
+ that:
+ - result is changed
+ - result is successful
+
+- name: test deleting A record
+ memset_zone_record:
+ api_key: "{{ api_key }}"
+ state: absent
+ zone: "{{ test_zone }}"
+ type: A
+ address: 127.0.0.1
+ record: "www"
+ check_mode: true
+ register: result
+
+- name: assert that result changed
+ assert:
+ that:
+ - result is changed
+ - result is successful
+
+- name: actually delete A record
+ memset_zone_record:
+ api_key: "{{ api_key }}"
+ state: absent
+ zone: "{{ test_zone }}"
+ type: A
+ address: 127.0.0.1
+ record: "www"
+ register: result
+
+- name: assert that result changed
+ assert:
+ that:
+ - result is changed
+ - result is successful
+
+- name: delete SPF record
+ memset_zone_record:
+ api_key: "{{ api_key }}"
+ state: absent
+ zone: "{{ test_zone }}"
+ type: TXT
+ address: "v=spf1 +a +mx +ip4:127.0.0.1 ?all"
+ register: result
+
+- name: assert that result changed
+ assert:
+ that:
+ - result is changed
+ - result is successful
+
+- name: delete non-existent SPF record
+ memset_zone_record:
+ api_key: "{{ api_key }}"
+ state: absent
+ zone: "{{ test_zone }}"
+ type: TXT
+ address: "v=spf1 +a +mx +ip4:127.0.0.1 ?all"
+ register: result
+
+- name: assert that result changed
+ assert:
+ that:
+ - result is not changed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone_record/vars/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone_record/vars/main.yml
new file mode 100644
index 00000000..ae0598bd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/memset_zone_record/vars/main.yml
@@ -0,0 +1,3 @@
+---
+test_zone: ansible-dns-record-tests
+duplicate_zone: ansible-dns-zone-dupe
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/aliases
new file mode 100644
index 00000000..547c800b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/aliases
@@ -0,0 +1,9 @@
+destructive
+needs/target/setup_epel
+shippable/posix/group2
+skip/osx
+skip/macos
+skip/freebsd
+skip/aix
+skip/python2.6 # python-daemon package used in integration tests requires >=2.7
+skip/rhel # FIXME
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/defaults/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/defaults/main.yml
new file mode 100644
index 00000000..71b22f44
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/defaults/main.yml
@@ -0,0 +1,4 @@
+process_root: /opt/httpd_echo
+process_file: "{{ process_root }}/httpd_echo.py"
+process_venv: "{{ process_root }}/venv"
+process_run_cmd: "{{ process_venv }}/bin/python {{ process_file }}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/files/httpd_echo.py b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/files/httpd_echo.py
new file mode 100644
index 00000000..56147037
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/files/httpd_echo.py
@@ -0,0 +1,50 @@
+# (c) 2020, Simon Kelly <simongdkelly@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import daemon
+
+try:
+ from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
+
+ def write_to_output(stream, content):
+ stream.write(content)
+except ImportError:
+ from http.server import BaseHTTPRequestHandler, HTTPServer
+
+ def write_to_output(stream, content):
+ stream.write(bytes(content, "utf-8"))
+
+
+hostname = "localhost"
+server_port = 8082
+
+
+class EchoServer(BaseHTTPRequestHandler):
+ def do_GET(self):
+ self.send_response(200)
+ self.send_header("Content-type", "text/plain")
+ self.end_headers()
+ write_to_output(self.wfile, self.path)
+
+
+def run_webserver():
+ webServer = HTTPServer((hostname, server_port), EchoServer)
+ print("Server started http://%s:%s" % (hostname, server_port))
+
+ try:
+ webServer.serve_forever()
+ except KeyboardInterrupt:
+ pass
+
+ webServer.server_close()
+ print("Server stopped.")
+
+
+if __name__ == "__main__":
+ context = daemon.DaemonContext()
+
+ with context:
+ run_webserver()
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/meta/main.yml
new file mode 100644
index 00000000..5438ced5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_pkg_mgr
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/tasks/check_state.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/tasks/check_state.yml
new file mode 100644
index 00000000..3fb2e6e9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/tasks/check_state.yml
@@ -0,0 +1,20 @@
+- name: "{{ reason }} ('up')"
+ command: "curl -sf http://localhost:8082/hello"
+ args:
+ warn: false
+ when: service_state == 'up'
+ register: curl_result
+ until: not curl_result.failed
+ retries: 5
+ delay: 1
+
+- name: "{{ reason }} ('down')"
+ command: "curl -sf http://localhost:8082/hello"
+ args:
+ warn: false
+ register: curl_result
+ failed_when: curl_result == 0
+ when: service_state == 'down'
+ until: not curl_result.failed
+ retries: 5
+ delay: 1
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/tasks/main.yml
new file mode 100644
index 00000000..447140e2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/tasks/main.yml
@@ -0,0 +1,78 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- block:
+ - name: Install EPEL repository (RHEL only)
+ include_role:
+ name: setup_epel
+
+ - name: create required directories
+ become: yes
+ file:
+ path: "{{ item }}"
+ state: directory
+ loop:
+ - /var/lib/monit
+ - /var/run/monit
+ - "{{ process_root }}"
+
+ - name: install monit
+ become: yes
+ package:
+ name: monit
+ state: present
+
+ - include_vars: '{{ item }}'
+ with_first_found:
+ - files:
+ - "{{ ansible_facts.distribution }}-{{ ansible_facts.distribution_major_version }}.yml"
+ - '{{ ansible_os_family }}.yml'
+ - 'defaults.yml'
+
+ - name: monit config
+ become: yes
+ template:
+ src: "monitrc.j2"
+ dest: "{{ monitrc }}"
+
+ - name: copy process file
+ become: yes
+ copy:
+ src: httpd_echo.py
+ dest: "{{ process_file }}"
+
+ - name: install dependencies
+ pip:
+ name: "{{ item }}"
+ virtualenv: "{{ process_venv }}"
+ loop:
+ - setuptools==44
+ - python-daemon
+
+ - name: restart monit
+ become: yes
+ service:
+ name: monit
+ state: restarted
+
+ - include_tasks: test.yml
+
+ always:
+ - name: stop monit
+ become: yes
+ service:
+ name: monit
+ state: stopped
+
+ - name: uninstall monit
+ become: yes
+ package:
+ name: monit
+ state: absent
+
+ - name: remove process files
+ file:
+ path: "{{ process_root }}"
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/tasks/test.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/tasks/test.yml
new file mode 100644
index 00000000..c36997fc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/tasks/test.yml
@@ -0,0 +1,28 @@
+# order is important
+- import_tasks: test_reload_present.yml
+
+- import_tasks: test_state.yml
+ vars:
+ state: stopped
+ initial_state: up
+ expected_state: down
+
+- import_tasks: test_state.yml
+ vars:
+ state: started
+ initial_state: down
+ expected_state: up
+
+- import_tasks: test_state.yml
+ vars:
+ state: unmonitored
+ initial_state: up
+ expected_state: down
+
+- import_tasks: test_state.yml
+ vars:
+ state: monitored
+ initial_state: down
+ expected_state: up
+
+- import_tasks: test_errors.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/tasks/test_errors.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/tasks/test_errors.yml
new file mode 100644
index 00000000..4520fd8b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/tasks/test_errors.yml
@@ -0,0 +1,6 @@
+- name: Check an error occurs when wrong process name is used
+ monit:
+ name: missing
+ state: started
+ register: result
+ failed_when: result is not skip and (result is success or result is not failed)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/tasks/test_reload_present.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/tasks/test_reload_present.yml
new file mode 100644
index 00000000..31f37e74
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/tasks/test_reload_present.yml
@@ -0,0 +1,60 @@
+- name: reload monit when process is missing
+ monit:
+ name: httpd_echo
+ state: reloaded
+ register: result
+
+- name: check that state is changed
+ assert:
+ that:
+ - result is success
+ - result is changed
+
+- name: test process not present
+ monit:
+ name: httpd_echo
+ state: present
+ timeout: 5
+ register: result
+ failed_when: result is not skip and result is success
+
+- name: test monitor missing process
+ monit:
+ name: httpd_echo
+ state: monitored
+ register: result
+ failed_when: result is not skip and result is success
+
+- name: start process
+ shell: "{{ process_run_cmd }}"
+
+- import_tasks: check_state.yml
+ vars:
+ reason: verify service running
+ service_state: "up"
+
+- name: add process config
+ blockinfile:
+ path: "{{ monitrc }}"
+ block: |
+ check process httpd_echo with matching "httpd_echo"
+ start program = "{{ process_run_cmd }}"
+ stop program = "/bin/sh -c 'kill `pgrep -f httpd_echo`'"
+ if failed host localhost port 8082 then restart
+
+- name: restart monit
+ service:
+ name: monit
+ state: restarted
+
+- name: test process present again
+ monit:
+ name: httpd_echo
+ state: present
+ register: result
+
+- name: check that state is unchanged
+ assert:
+ that:
+ - result is success
+ - result is not changed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/tasks/test_state.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/tasks/test_state.yml
new file mode 100644
index 00000000..f78fbc55
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/tasks/test_state.yml
@@ -0,0 +1,33 @@
+- import_tasks: check_state.yml
+ vars:
+ reason: verify initial service state
+ service_state: "{{ initial_state }}"
+
+- name: change httpd_echo process state to {{ state }}
+ monit:
+ name: httpd_echo
+ state: "{{ state }}"
+ register: result
+
+- name: check that state changed
+ assert:
+ that:
+ - result is success
+ - result is changed
+
+- import_tasks: check_state.yml
+ vars:
+ reason: check service state after action
+ service_state: "{{ expected_state }}"
+
+- name: try change state again to {{ state }}
+ monit:
+ name: httpd_echo
+ state: "{{ state }}"
+ register: result
+
+- name: check that state is not changed
+ assert:
+ that:
+ - result is success
+ - result is not changed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/templates/monitrc.j2 b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/templates/monitrc.j2
new file mode 100644
index 00000000..aba574c2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/templates/monitrc.j2
@@ -0,0 +1,13 @@
+set daemon 2
+set logfile /var/log/monit.log
+set idfile /var/lib/monit/id
+set statefile /var/lib/monit/state
+set pidfile /var/run/monit.pid
+
+set eventqueue
+ basedir /var/lib/monit/events
+ slots 100
+
+set httpd port 2812 and
+ use address localhost
+ allow localhost
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/vars/CentOS-6.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/vars/CentOS-6.yml
new file mode 100644
index 00000000..7b769cb4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/vars/CentOS-6.yml
@@ -0,0 +1 @@
+monitrc: "/etc/monit.conf"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/vars/RedHat.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/vars/RedHat.yml
new file mode 100644
index 00000000..cb76bac9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/vars/RedHat.yml
@@ -0,0 +1 @@
+monitrc: "/etc/monitrc"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/vars/Suse.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/vars/Suse.yml
new file mode 100644
index 00000000..cb76bac9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/vars/Suse.yml
@@ -0,0 +1 @@
+monitrc: "/etc/monitrc"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/vars/defaults.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/vars/defaults.yml
new file mode 100644
index 00000000..5254ded9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/monit/vars/defaults.yml
@@ -0,0 +1 @@
+monitrc: "/etc/monit/monitrc"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/mqtt/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/mqtt/aliases
new file mode 100644
index 00000000..0a4db037
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/mqtt/aliases
@@ -0,0 +1,7 @@
+notification/mqtt
+shippable/posix/group1
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
+skip/rhel
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/mqtt/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/mqtt/meta/main.yml
new file mode 100644
index 00000000..86f3d043
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/mqtt/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_mosquitto
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/mqtt/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/mqtt/tasks/main.yml
new file mode 100644
index 00000000..74e06f32
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/mqtt/tasks/main.yml
@@ -0,0 +1,9 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- include: ubuntu.yml
+ when:
+ - ansible_distribution == 'Ubuntu'
+ - ansible_distribution_release != 'trusty'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/mqtt/tasks/ubuntu.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/mqtt/tasks/ubuntu.yml
new file mode 100644
index 00000000..71ff3e90
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/mqtt/tasks/ubuntu.yml
@@ -0,0 +1,142 @@
+- name: Install pip packages
+ pip:
+ name: paho-mqtt>=1.4.0
+ state: present
+
+- name: MQTT non-TLS endpoint
+ mqtt:
+ topic: /node/s/bar/blurb
+ payload: foo
+ qos: 1
+ client_id: me001
+ register: result
+
+- assert:
+ that:
+ - result is success
+
+- name: Send a test message to TLS1.1 endpoint, no client version specified
+ mqtt:
+ topic: /node/s/bar/blurb
+ payload: foo-tls
+ qos: 1
+ client_id: me001
+ ca_certs: /tls/ca_certificate.pem
+ certfile: /tls/client_certificate.pem
+ keyfile: /tls/client_key.pem
+ port: 8883
+ register: result
+
+- assert:
+ that:
+ - result is success
+
+- name: Send a test message to TLS1.2 endpoint, no client version specified
+ mqtt:
+ topic: /node/s/bar/blurb
+ payload: foo-tls
+ qos: 1
+ client_id: me001
+ ca_certs: /tls/ca_certificate.pem
+ certfile: /tls/client_certificate.pem
+ keyfile: /tls/client_key.pem
+ port: 8884
+ register: result
+
+- assert:
+ that:
+ - result is success
+
+# TODO(Uncomment when TLS1.3 is supported in moquitto and ubuntu version)
+#
+# - name: Send a test message to TLS1.3 endpoint
+# mqtt:
+# topic: /node/s/bar/blurb
+# payload: foo-tls
+# qos: 1
+# client_id: me001
+# ca_certs: /tls/ca_certificate.pem
+# certfile: /tls/client_certificate.pem
+# keyfile: /tls/client_key.pem
+# port: 8885
+# register: result
+
+#- assert:
+# that:
+# - result is success
+
+- name: Send a message, client TLS1.1, server (required) TLS1.2 - Expected failure
+ mqtt:
+ topic: /node/s/bar/blurb
+ payload: foo-tls
+ qos: 1
+ client_id: me001
+ ca_certs: /tls/ca_certificate.pem
+ certfile: /tls/client_certificate.pem
+ keyfile: /tls/client_key.pem
+ tls_version: tlsv1.1
+ port: 8884
+ register: result
+ failed_when: result is success
+
+- assert:
+ that:
+ - result is success
+
+# TODO(Uncomment when TLS1.3 is supported in moquitto and ubuntu version)
+#
+# - name: Send a message, client TLS1.1, server (required) TLS1.3 - Expected failure
+# mqtt:
+# topic: /node/s/bar/blurb
+# payload: foo-tls
+# qos: 1
+# client_id: me001
+# ca_certs: /tls/ca_certificate.pem
+# certfile: /tls/client_certificate.pem
+# keyfile: /tls/client_key.pem
+# tls_version: tlsv1.1
+# port: 8885
+# register: result
+# failed_when: result is success
+
+# - assert:
+# that:
+# - result is success
+
+- name: Send a message, client TLS1.2, server (required) TLS1.1 - Expected failure
+ mqtt:
+ topic: /node/s/bar/blurb
+ payload: foo-tls
+ qos: 1
+ client_id: me001
+ ca_certs: /tls/ca_certificate.pem
+ certfile: /tls/client_certificate.pem
+ keyfile: /tls/client_key.pem
+ tls_version: tlsv1.2
+ port: 8883
+ register: result
+ failed_when: result is success
+
+- assert:
+ that:
+ - result is success
+
+# TODO(Uncomment when TLS1.3 is supported in moquitto and ubuntu version)
+#
+# - name: Send a message, client TLS1.2, server (required) TLS1.3 - Expected failure
+# mqtt:
+# topic: /node/s/bar/blurb
+# payload: foo-tls
+# qos: 1
+# client_id: me001
+# ca_certs: /tls/ca_certificate.pem
+# certfile: /tls/client_certificate.pem
+# keyfile: /tls/client_key.pem
+# tls_version: tlsv1.2
+# port: 8885
+# register: result
+# failed_when: result is success
+
+# - assert:
+# that:
+# - result is success
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_a_record/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_a_record/aliases
new file mode 100644
index 00000000..b3138dc7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_a_record/aliases
@@ -0,0 +1,3 @@
+shippable/cloud/group1
+cloud/nios
+destructive
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_a_record/defaults/main.yaml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_a_record/defaults/main.yaml
new file mode 100644
index 00000000..9ef5ba51
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_a_record/defaults/main.yaml
@@ -0,0 +1,3 @@
+---
+testcase: "*"
+test_items: []
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_a_record/meta/main.yaml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_a_record/meta/main.yaml
new file mode 100644
index 00000000..1b01a972
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_a_record/meta/main.yaml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_nios_tests
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_a_record/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_a_record/tasks/main.yml
new file mode 100644
index 00000000..aa6c4e7d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_a_record/tasks/main.yml
@@ -0,0 +1,6 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- include: nios_a_record_idempotence.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_a_record/tasks/nios_a_record_idempotence.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_a_record/tasks/nios_a_record_idempotence.yml
new file mode 100644
index 00000000..0bbf106c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_a_record/tasks/nios_a_record_idempotence.yml
@@ -0,0 +1,77 @@
+- name: cleanup the parent object
+ nios_zone:
+ name: ansible.com
+ state: absent
+ provider: "{{ nios_provider }}"
+
+- name: create the parent object
+ nios_zone:
+ name: ansible.com
+ state: present
+ provider: "{{ nios_provider }}"
+
+- name: cleanup a_record
+ nios_a_record:
+ name: a.ansible.com
+ ipv4: 192.168.10.1
+ state: absent
+ provider: "{{ nios_provider }}"
+
+- name: create an a_record
+ nios_a_record:
+ name: a.ansible.com
+ ipv4: 192.168.10.1
+ state: present
+ provider: "{{ nios_provider }}"
+ register: a_record_create1
+
+- name: recreate an a_record
+ nios_a_record:
+ name: a.ansible.com
+ ipv4: 192.168.10.1
+ state: present
+ provider: "{{ nios_provider }}"
+ register: a_record_create2
+
+- name: add a comment to an existing a_record
+ nios_a_record:
+ name: a.ansible.com
+ ipv4: 192.168.10.1
+ comment: this is a test comment
+ state: present
+ provider: "{{ nios_provider }}"
+ register: a_record_update1
+
+- name: add a comment to an existing a_record
+ nios_a_record:
+ name: a.ansible.com
+ ipv4: 192.168.10.1
+ comment: this is a test comment
+ state: present
+ provider: "{{ nios_provider }}"
+ register: a_record_update2
+
+- name: remove a a_record from the system
+ nios_a_record:
+ name: a.ansible.com
+ ipv4: 192.168.10.1
+ state: absent
+ provider: "{{ nios_provider }}"
+ register: a_record_delete1
+
+- name: remove a a_record from the system
+ nios_a_record:
+ name: a.ansible.com
+ ipv4: 192.168.10.1
+ state: absent
+ provider: "{{ nios_provider }}"
+ register: a_record_delete2
+
+- assert:
+ that:
+ - "a_record_create1.changed"
+ - "not a_record_create2.changed"
+ - "a_record_update1.changed"
+ - "not a_record_update2.changed"
+ - "a_record_delete1.changed"
+ - "not a_record_delete2.changed"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_aaaa_record/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_aaaa_record/aliases
new file mode 100644
index 00000000..b3138dc7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_aaaa_record/aliases
@@ -0,0 +1,3 @@
+shippable/cloud/group1
+cloud/nios
+destructive
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_aaaa_record/defaults/main.yaml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_aaaa_record/defaults/main.yaml
new file mode 100644
index 00000000..9ef5ba51
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_aaaa_record/defaults/main.yaml
@@ -0,0 +1,3 @@
+---
+testcase: "*"
+test_items: []
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_aaaa_record/meta/main.yaml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_aaaa_record/meta/main.yaml
new file mode 100644
index 00000000..1b01a972
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_aaaa_record/meta/main.yaml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_nios_tests
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_aaaa_record/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_aaaa_record/tasks/main.yml
new file mode 100644
index 00000000..87cd4ffa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_aaaa_record/tasks/main.yml
@@ -0,0 +1,6 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- include: nios_aaaa_record_idempotence.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_aaaa_record/tasks/nios_aaaa_record_idempotence.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_aaaa_record/tasks/nios_aaaa_record_idempotence.yml
new file mode 100644
index 00000000..2df01d34
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_aaaa_record/tasks/nios_aaaa_record_idempotence.yml
@@ -0,0 +1,77 @@
+- name: cleanup the parent object
+ nios_zone:
+ name: ansible.com
+ state: absent
+ provider: "{{ nios_provider }}"
+
+- name: create the parent object
+ nios_zone:
+ name: ansible.com
+ state: present
+ provider: "{{ nios_provider }}"
+
+- name: cleanup aaaa record
+ nios_aaaa_record:
+ name: aaaa.ansible.com
+ ipv6: 2001:0db8:85a3:0000:0000:8a2e:0370:7334
+ state: absent
+ provider: "{{ nios_provider }}"
+
+- name: create an aaaa record
+ nios_aaaa_record:
+ name: aaaa.ansible.com
+ ipv6: 2001:0db8:85a3:0000:0000:8a2e:0370:7334
+ state: present
+ provider: "{{ nios_provider }}"
+ register: aaaa_record_create1
+
+- name: recreate an aaaa record
+ nios_aaaa_record:
+ name: aaaa.ansible.com
+ ipv6: 2001:0db8:85a3:0000:0000:8a2e:0370:7334
+ state: present
+ provider: "{{ nios_provider }}"
+ register: aaaa_record_create2
+
+- name: add a comment to an existing aaaa record
+ nios_aaaa_record:
+ name: aaaa.ansible.com
+ ipv6: 2001:0db8:85a3:0000:0000:8a2e:0370:7334
+ comment: this is a test comment
+ state: present
+ provider: "{{ nios_provider }}"
+ register: aaaa_record_update1
+
+- name: add a comment to an existing aaaa record
+ nios_aaaa_record:
+ name: aaaa.ansible.com
+ ipv6: 2001:0db8:85a3:0000:0000:8a2e:0370:7334
+ comment: this is a test comment
+ state: present
+ provider: "{{ nios_provider }}"
+ register: aaaa_record_update2
+
+- name: remove a aaaa record from the system
+ nios_aaaa_record:
+ name: aaaa.ansible.com
+ ipv6: 2001:0db8:85a3:0000:0000:8a2e:0370:7334
+ state: absent
+ provider: "{{ nios_provider }}"
+ register: aaaa_record_delete1
+
+- name: remove a aaaa record from the system
+ nios_aaaa_record:
+ name: aaaa.ansible.com
+ ipv6: 2001:0db8:85a3:0000:0000:8a2e:0370:7334
+ state: absent
+ provider: "{{ nios_provider }}"
+ register: aaaa_record_delete2
+
+- assert:
+ that:
+ - "aaaa_record_create1.changed"
+ - "not aaaa_record_create2.changed"
+ - "aaaa_record_update1.changed"
+ - "not aaaa_record_update2.changed"
+ - "aaaa_record_delete1.changed"
+ - "not aaaa_record_delete2.changed"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_cname_record/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_cname_record/aliases
new file mode 100644
index 00000000..b3138dc7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_cname_record/aliases
@@ -0,0 +1,3 @@
+shippable/cloud/group1
+cloud/nios
+destructive
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_cname_record/defaults/main.yaml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_cname_record/defaults/main.yaml
new file mode 100644
index 00000000..9ef5ba51
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_cname_record/defaults/main.yaml
@@ -0,0 +1,3 @@
+---
+testcase: "*"
+test_items: []
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_cname_record/meta/main.yaml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_cname_record/meta/main.yaml
new file mode 100644
index 00000000..1b01a972
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_cname_record/meta/main.yaml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_nios_tests
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_cname_record/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_cname_record/tasks/main.yml
new file mode 100644
index 00000000..4c7612ad
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_cname_record/tasks/main.yml
@@ -0,0 +1,6 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- include: nios_cname_record_idempotence.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_cname_record/tasks/nios_cname_record_idempotence.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_cname_record/tasks/nios_cname_record_idempotence.yml
new file mode 100644
index 00000000..f450c716
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_cname_record/tasks/nios_cname_record_idempotence.yml
@@ -0,0 +1,77 @@
+- name: cleanup the parent object
+ nios_zone:
+ name: ansible.com
+ state: absent
+ provider: "{{ nios_provider }}"
+
+- name: create the parent object
+ nios_zone:
+ name: ansible.com
+ state: present
+ provider: "{{ nios_provider }}"
+
+- name: cleanup cname record
+ nios_cname_record:
+ name: cname.ansible.com
+ canonical: realhost.ansible.com
+ state: absent
+ provider: "{{ nios_provider }}"
+
+- name: create an cname record
+ nios_cname_record:
+ name: cname.ansible.com
+ canonical: realhost.ansible.com
+ state: present
+ provider: "{{ nios_provider }}"
+ register: cname_record_create1
+
+- name: recreate an cname record
+ nios_cname_record:
+ name: cname.ansible.com
+ canonical: realhost.ansible.com
+ state: present
+ provider: "{{ nios_provider }}"
+ register: cname_record_create2
+
+- name: add a comment to an existing cname record
+ nios_cname_record:
+ name: cname.ansible.com
+ canonical: realhost.ansible.com
+ comment: this is a test comment
+ state: present
+ provider: "{{ nios_provider }}"
+ register: cname_record_update1
+
+- name: add a comment to an existing cname record
+ nios_cname_record:
+ name: cname.ansible.com
+ canonical: realhost.ansible.com
+ comment: this is a test comment
+ state: present
+ provider: "{{ nios_provider }}"
+ register: cname_record_update2
+
+- name: remove a cname record from the system
+ nios_cname_record:
+ name: cname.ansible.com
+ canonical: realhost.ansible.com
+ state: absent
+ provider: "{{ nios_provider }}"
+ register: cname_record_delete1
+
+- name: remove a cname record from the system
+ nios_cname_record:
+ name: cname.ansible.com
+ canonical: realhost.ansible.com
+ state: absent
+ provider: "{{ nios_provider }}"
+ register: cname_record_delete2
+
+- assert:
+ that:
+ - "cname_record_create1.changed"
+ - "not cname_record_create2.changed"
+ - "cname_record_update1.changed"
+ - "not cname_record_update2.changed"
+ - "cname_record_delete1.changed"
+ - "not cname_record_delete2.changed"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_dns_view/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_dns_view/aliases
new file mode 100644
index 00000000..b3138dc7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_dns_view/aliases
@@ -0,0 +1,3 @@
+shippable/cloud/group1
+cloud/nios
+destructive
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_dns_view/defaults/main.yaml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_dns_view/defaults/main.yaml
new file mode 100644
index 00000000..ebf6ffc9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_dns_view/defaults/main.yaml
@@ -0,0 +1,3 @@
+---
+testcase: "*"
+test_items: [] \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_dns_view/meta/main.yaml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_dns_view/meta/main.yaml
new file mode 100644
index 00000000..9472935b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_dns_view/meta/main.yaml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_nios_tests \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_dns_view/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_dns_view/tasks/main.yml
new file mode 100644
index 00000000..9dbc7f6e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_dns_view/tasks/main.yml
@@ -0,0 +1,6 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- include: nios_dns_view_idempotence.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_dns_view/tasks/nios_dns_view_idempotence.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_dns_view/tasks/nios_dns_view_idempotence.yml
new file mode 100644
index 00000000..b9dc9fec
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_dns_view/tasks/nios_dns_view_idempotence.yml
@@ -0,0 +1,58 @@
+- name: delete dns view instance
+ nios_dns_view:
+ name: ansible-dns
+ state: absent
+ provider: "{{ nios_provider }}"
+
+- name: configure a new dns view instance
+ nios_dns_view:
+ name: ansible-dns
+ state: present
+ provider: "{{ nios_provider }}"
+ register: nios_dns_create1
+
+- name: configure a new dns view instance
+ nios_dns_view:
+ name: ansible-dns
+ state: present
+ provider: "{{ nios_provider }}"
+ register: nios_dns_create2
+
+- name: update the comment for dns view
+ nios_dns_view:
+ name: ansible-dns
+ comment: this is an example comment
+ state: present
+ provider: "{{ nios_provider }}"
+ register: nios_dns_update1
+
+- name: update the comment for dns view
+ nios_dns_view:
+ name: ansible-dns
+ comment: this is an example comment
+ state: present
+ provider: "{{ nios_provider }}"
+ register: nios_dns_update2
+
+- name: delete dns view instance
+ nios_dns_view:
+ name: ansible-dns
+ state: absent
+ provider: "{{ nios_provider }}"
+ register: nios_dns_delete1
+
+- name: delete dns view instance
+ nios_dns_view:
+ name: ansible-dns
+ state: absent
+ provider: "{{ nios_provider }}"
+ register: nios_dns_delete2
+
+- assert:
+ that:
+ - "nios_dns_create1.changed"
+ - "not nios_dns_create2.changed"
+ - "nios_dns_update1.changed"
+ - "not nios_dns_update2.changed"
+ - "nios_dns_delete1.changed"
+ - "not nios_dns_delete2.changed"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_host_record/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_host_record/aliases
new file mode 100644
index 00000000..b3138dc7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_host_record/aliases
@@ -0,0 +1,3 @@
+shippable/cloud/group1
+cloud/nios
+destructive
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_host_record/defaults/main.yaml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_host_record/defaults/main.yaml
new file mode 100644
index 00000000..ebf6ffc9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_host_record/defaults/main.yaml
@@ -0,0 +1,3 @@
+---
+testcase: "*"
+test_items: [] \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_host_record/meta/main.yaml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_host_record/meta/main.yaml
new file mode 100644
index 00000000..9472935b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_host_record/meta/main.yaml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_nios_tests \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_host_record/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_host_record/tasks/main.yml
new file mode 100644
index 00000000..18f1c6e4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_host_record/tasks/main.yml
@@ -0,0 +1,6 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- include: nios_host_record_idempotence.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_host_record/tasks/nios_host_record_idempotence.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_host_record/tasks/nios_host_record_idempotence.yml
new file mode 100644
index 00000000..636431ee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_host_record/tasks/nios_host_record_idempotence.yml
@@ -0,0 +1,126 @@
+- name: cleanup the parent object
+ nios_zone:
+ name: ansible.com
+ state: absent
+ provider: "{{ nios_provider }}"
+
+- name: create the parent object
+ nios_zone:
+ name: ansible.com
+ state: present
+ provider: "{{ nios_provider }}"
+
+- name: cleanup ipv4 host record
+ nios_host_record:
+ name: host.ansible.com
+ ipv4:
+ - address: 192.168.10.1
+ state: absent
+ provider: "{{ nios_provider }}"
+
+- name: create an ipv4 host record
+ nios_host_record:
+ name: host.ansible.com
+ ipv4:
+ - address: 192.168.10.1
+ state: present
+ provider: "{{ nios_provider }}"
+ register: ipv4_create1
+
+- name: recreate an ipv4 host record
+ nios_host_record:
+ name: host.ansible.com
+ ipv4:
+ - address: 192.168.10.1
+ state: present
+ provider: "{{ nios_provider }}"
+ register: ipv4_create2
+
+- name: add a comment to an existing host record
+ nios_host_record:
+ name: host.ansible.com
+ ipv4:
+ - address: 192.168.10.1
+ comment: this is a test comment
+ state: present
+ provider: "{{ nios_provider }}"
+ register: ipv4_update1
+
+- name: add a comment to an existing host record
+ nios_host_record:
+ name: host.ansible.com
+ ipv4:
+ - address: 192.168.10.1
+ comment: this is a test comment
+ state: present
+ provider: "{{ nios_provider }}"
+ register: ipv4_update2
+
+- name: remove a host record from the system
+ nios_host_record:
+ name: host.ansible.com
+ state: absent
+ provider: "{{ nios_provider }}"
+ register: ipv4_delete1
+
+- name: remove a host record from the system
+ nios_host_record:
+ name: host.ansible.com
+ state: absent
+ provider: "{{ nios_provider }}"
+ register: ipv4_delete2
+
+- name: create an ipv4 host record bypassing DNS
+ nios_host_record:
+ name: host
+ ipv4:
+ - address: 192.168.10.1
+ dns: false
+ state: present
+ provider: "{{ nios_provider }}"
+ register: ipv4_create3
+
+- name: recreate an ipv4 host record bypassing DNS
+ nios_host_record:
+ name: host
+ ipv4:
+ - address: 192.168.10.1
+ dns: false
+ state: present
+ provider: "{{ nios_provider }}"
+ register: ipv4_create4
+
+- name: create an ipv4 host record via DHCP and MAC
+ nios_host_record:
+ name: host
+ ipv4:
+ - address: 192.168.10.1
+ dhcp: true
+ mac: 00-80-C8-E3-4C-BD
+ state: present
+ provider: "{{ nios_provider }}"
+ register: ipv4_create5
+
+- name: recreate an ipv4 host record via DHCP and MAC
+ nios_host_record:
+ name: host
+ ipv4:
+ - address: 192.168.10.1
+ dhcp: true
+ mac: 00-80-C8-E3-4C-BD
+ state: present
+ provider: "{{ nios_provider }}"
+ register: ipv4_create6
+
+- assert:
+ that:
+ - "ipv4_create1.changed"
+ - "not ipv4_create2.changed"
+ - "ipv4_update1.changed"
+ - "not ipv4_update2.changed"
+ - "ipv4_delete1.changed"
+ - "not ipv4_delete2.changed"
+ - "ipv4_create3.changed"
+ - "not ipv4_create4.changed"
+ - "ipv4_create5.changed"
+ - "not ipv4_create6.changed"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_mx_record/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_mx_record/aliases
new file mode 100644
index 00000000..b3138dc7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_mx_record/aliases
@@ -0,0 +1,3 @@
+shippable/cloud/group1
+cloud/nios
+destructive
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_mx_record/defaults/main.yaml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_mx_record/defaults/main.yaml
new file mode 100644
index 00000000..9ef5ba51
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_mx_record/defaults/main.yaml
@@ -0,0 +1,3 @@
+---
+testcase: "*"
+test_items: []
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_mx_record/meta/main.yaml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_mx_record/meta/main.yaml
new file mode 100644
index 00000000..1b01a972
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_mx_record/meta/main.yaml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_nios_tests
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_mx_record/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_mx_record/tasks/main.yml
new file mode 100644
index 00000000..cc4159f2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_mx_record/tasks/main.yml
@@ -0,0 +1,6 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- include: nios_mx_record_idempotence.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_mx_record/tasks/nios_mx_record_idempotence.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_mx_record/tasks/nios_mx_record_idempotence.yml
new file mode 100644
index 00000000..65b1e8f4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_mx_record/tasks/nios_mx_record_idempotence.yml
@@ -0,0 +1,84 @@
+- name: cleanup the parent object
+ nios_zone:
+ name: ansible.com
+ state: absent
+ provider: "{{ nios_provider }}"
+
+- name: create the parent object
+ nios_zone:
+ name: ansible.com
+ state: present
+ provider: "{{ nios_provider }}"
+
+- name: cleanup mx record
+ nios_mx_record:
+ name: ansible.com
+ mx: mailhost.ansible.com
+ preference: 0
+ state: absent
+ provider: "{{ nios_provider }}"
+
+- name: create an mx record
+ nios_mx_record:
+ name: ansible.com
+ mx: mailhost.ansible.com
+ preference: 0
+ state: present
+ provider: "{{ nios_provider }}"
+ register: mx_record_create1
+
+- name: recreate an mx record
+ nios_mx_record:
+ name: ansible.com
+ mx: mailhost.ansible.com
+ preference: 0
+ state: present
+ provider: "{{ nios_provider }}"
+ register: mx_record_create2
+
+- name: add a comment to an existing mx record
+ nios_mx_record:
+ name: ansible.com
+ mx: mailhost.ansible.com
+ preference: 0
+ comment: this is a test comment
+ state: present
+ provider: "{{ nios_provider }}"
+ register: mx_record_update1
+
+- name: add a comment to an existing mx record
+ nios_mx_record:
+ name: ansible.com
+ mx: mailhost.ansible.com
+ preference: 0
+ comment: this is a test comment
+ state: present
+ provider: "{{ nios_provider }}"
+ register: mx_record_update2
+
+- name: remove a mx record from the system
+ nios_mx_record:
+ name: ansible.com
+ mx: mailhost.ansible.com
+ preference: 0
+ state: absent
+ provider: "{{ nios_provider }}"
+ register: mx_record_delete1
+
+- name: remove a mx record from the system
+ nios_mx_record:
+ name: ansible.com
+ mx: mailhost.ansible.com
+ preference: 0
+ state: absent
+ provider: "{{ nios_provider }}"
+ register: mx_record_delete2
+
+- assert:
+ that:
+ - "mx_record_create1.changed"
+ - "not mx_record_create2.changed"
+ - "mx_record_update1.changed"
+ - "not mx_record_update2.changed"
+ - "mx_record_delete1.changed"
+ - "not mx_record_delete2.changed"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_naptr_record/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_naptr_record/aliases
new file mode 100644
index 00000000..b3138dc7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_naptr_record/aliases
@@ -0,0 +1,3 @@
+shippable/cloud/group1
+cloud/nios
+destructive
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_naptr_record/defaults/main.yaml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_naptr_record/defaults/main.yaml
new file mode 100644
index 00000000..9ef5ba51
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_naptr_record/defaults/main.yaml
@@ -0,0 +1,3 @@
+---
+testcase: "*"
+test_items: []
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_naptr_record/meta/main.yaml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_naptr_record/meta/main.yaml
new file mode 100644
index 00000000..1b01a972
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_naptr_record/meta/main.yaml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_nios_tests
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_naptr_record/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_naptr_record/tasks/main.yml
new file mode 100644
index 00000000..2ba83279
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_naptr_record/tasks/main.yml
@@ -0,0 +1,6 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- include: nios_naptr_record_idempotence.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_naptr_record/tasks/nios_naptr_record_idempotence.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_naptr_record/tasks/nios_naptr_record_idempotence.yml
new file mode 100644
index 00000000..2303eff5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_naptr_record/tasks/nios_naptr_record_idempotence.yml
@@ -0,0 +1,91 @@
+- name: cleanup the parent object
+ nios_zone:
+ name: ansible.com
+ state: absent
+ provider: "{{ nios_provider }}"
+
+- name: create the parent object
+ nios_zone:
+ name: ansible.com
+ state: present
+ provider: "{{ nios_provider }}"
+
+- name: cleanup naptr record
+ nios_naptr_record:
+ name: '*.subscriber-100.ansiblezone.com'
+ order: 1000
+ preference: 10
+ replacement: replacement1.network.ansiblezone.com
+ state: absent
+ provider: "{{ nios_provider }}"
+
+- name: create an naptr record
+ nios_naptr_record:
+ name: '*.subscriber-100.ansiblezone.com'
+ order: 1000
+ preference: 10
+ replacement: replacement1.network.ansiblezone.com
+ state: present
+ provider: "{{ nios_provider }}"
+ register: naptr_record_create1
+
+- name: recreate an naptr record
+ nios_naptr_record:
+ name: '*.subscriber-100.ansiblezone.com'
+ order: 1000
+ preference: 10
+ replacement: replacement1.network.ansiblezone.com
+ state: present
+ provider: "{{ nios_provider }}"
+ register: naptr_record_create2
+
+- name: add a comment to an existing naptr record
+ nios_naptr_record:
+ name: '*.subscriber-100.ansiblezone.com'
+ order: 1000
+ preference: 10
+ replacement: replacement1.network.ansiblezone.com
+ comment: this is a test comment
+ state: present
+ provider: "{{ nios_provider }}"
+ register: naptr_record_update1
+
+- name: add a comment to an existing naptr record
+ nios_naptr_record:
+ name: '*.subscriber-100.ansiblezone.com'
+ order: 1000
+ preference: 10
+ replacement: replacement1.network.ansiblezone.com
+ comment: this is a test comment
+ state: present
+ provider: "{{ nios_provider }}"
+ register: naptr_record_update2
+
+- name: remove a naptr record from the system
+ nios_naptr_record:
+ name: '*.subscriber-100.ansiblezone.com'
+ order: 1000
+ preference: 10
+ replacement: replacement1.network.ansiblezone.com
+ state: absent
+ provider: "{{ nios_provider }}"
+ register: naptr_record_delete1
+
+- name: remove a naptr record from the system
+ nios_naptr_record:
+ name: '*.subscriber-100.ansiblezone.com'
+ order: 1000
+ preference: 10
+ replacement: replacement1.network.ansiblezone.com
+ state: absent
+ provider: "{{ nios_provider }}"
+ register: naptr_record_delete2
+
+- assert:
+ that:
+ - "naptr_record_create1.changed"
+ - "not naptr_record_create2.changed"
+ - "naptr_record_update1.changed"
+ - "not naptr_record_update2.changed"
+ - "naptr_record_delete1.changed"
+ - "not naptr_record_delete2.changed"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_network/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_network/aliases
new file mode 100644
index 00000000..b3138dc7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_network/aliases
@@ -0,0 +1,3 @@
+shippable/cloud/group1
+cloud/nios
+destructive
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_network/defaults/main.yaml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_network/defaults/main.yaml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_network/defaults/main.yaml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_network/meta/main.yaml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_network/meta/main.yaml
new file mode 100644
index 00000000..9472935b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_network/meta/main.yaml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_nios_tests \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_network/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_network/tasks/main.yml
new file mode 100644
index 00000000..6a980084
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_network/tasks/main.yml
@@ -0,0 +1,6 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- include: nios_network_idempotence.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_network/tasks/nios_network_idempotence.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_network/tasks/nios_network_idempotence.yml
new file mode 100644
index 00000000..3b5dbc4b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_network/tasks/nios_network_idempotence.yml
@@ -0,0 +1,80 @@
+- name: cleanup a network ipv4
+ nios_network:
+ network: 192.168.10.0/24
+ comment: this is a test comment
+ state: absent
+ provider: "{{ nios_provider }}"
+
+- name: configure a network ipv4
+ nios_network:
+ network: 192.168.10.0/24
+ comment: this is a test comment
+ state: present
+ provider: "{{ nios_provider }}"
+ register: nios_ipv4_create1
+
+- name: configure a network ipv4
+ nios_network:
+ network: 192.168.10.0/24
+ comment: this is a test comment
+ state: present
+ provider: "{{ nios_provider }}"
+ register: nios_ipv4_create2
+
+#- assert:
+# that:
+# - "nios_ipv4_create1.changed"
+# - "not nios_ipv4_create2.changed"
+
+- name: set dhcp options for a network ipv4
+ nios_network:
+ network: 192.168.10.0/24
+ comment: this is a test comment
+ options:
+ - name: domain-name
+ value: ansible.com
+ state: present
+ provider: "{{ nios_provider }}"
+ register: nios_ipv4_update1
+
+- name: set dhcp options for a network ipv4
+ nios_network:
+ network: 192.168.10.0/24
+ comment: this is a test comment
+ options:
+ - name: domain-name
+ value: ansible.com
+ state: present
+ provider: "{{ nios_provider }}"
+ register: nios_ipv4_update2
+
+- name: remove a network ipv4
+ nios_network:
+ network: 192.168.10.0/24
+ state: absent
+ provider: "{{ nios_provider }}"
+ register: nios_ipv4_remove1
+
+- name: remove a network ipv4
+ nios_network:
+ network: 192.168.10.0/24
+ state: absent
+ provider: "{{ nios_provider }}"
+ register: nios_ipv4_remove2
+
+- name: configure a network ipv6
+ nios_network:
+ network: fe80::/64
+ comment: this is a test comment
+ state: present
+ provider: "{{ nios_provider }}"
+ register: nios_ipv6_create1
+
+- assert:
+ that:
+ - "nios_ipv4_create1.changed"
+ - "not nios_ipv4_create2.changed"
+ - "nios_ipv4_update1.changed"
+ - "not nios_ipv4_update2.changed"
+ - "nios_ipv4_remove1.changed"
+ - "not nios_ipv4_remove2.changed"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_network_view/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_network_view/aliases
new file mode 100644
index 00000000..b3138dc7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_network_view/aliases
@@ -0,0 +1,3 @@
+shippable/cloud/group1
+cloud/nios
+destructive
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_network_view/defaults/main.yaml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_network_view/defaults/main.yaml
new file mode 100644
index 00000000..ebf6ffc9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_network_view/defaults/main.yaml
@@ -0,0 +1,3 @@
+---
+testcase: "*"
+test_items: [] \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_network_view/meta/main.yaml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_network_view/meta/main.yaml
new file mode 100644
index 00000000..9472935b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_network_view/meta/main.yaml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_nios_tests \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_network_view/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_network_view/tasks/main.yml
new file mode 100644
index 00000000..be20b77a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_network_view/tasks/main.yml
@@ -0,0 +1,6 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- include: nios_network_view_idempotence.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_network_view/tasks/nios_network_view_idempotence.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_network_view/tasks/nios_network_view_idempotence.yml
new file mode 100644
index 00000000..ad13e3a3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_network_view/tasks/nios_network_view_idempotence.yml
@@ -0,0 +1,58 @@
+- name: cleanup a new network view
+ nios_network_view:
+ name: ansible
+ state: absent
+ provider: "{{ nios_provider }}"
+
+- name: configure a new network view
+ nios_network_view:
+ name: ansible
+ state: present
+ provider: "{{ nios_provider }}"
+ register: nios_network_view_create1
+
+- name: configure a new network view
+ nios_network_view:
+ name: ansible
+ state: present
+ provider: "{{ nios_provider }}"
+ register: nios_network_view_create2
+
+- name: update the comment for network view
+ nios_network_view:
+ name: ansible
+ comment: this is an example comment
+ state: present
+ provider: "{{ nios_provider }}"
+ register: nios_network_view_update1
+
+- name: update the comment for network view
+ nios_network_view:
+ name: ansible
+ comment: this is an example comment
+ state: present
+ provider: "{{ nios_provider }}"
+ register: nios_network_view_update2
+
+- name: remove the network view
+ nios_network_view:
+ name: ansible
+ state: absent
+ provider: "{{ nios_provider }}"
+ register: nios_network_view_delete1
+
+- name: remove the network view
+ nios_network_view:
+ name: ansible
+ state: absent
+ provider: "{{ nios_provider }}"
+ register: nios_network_view_delete2
+
+- assert:
+ that:
+ - "nios_network_view_create1.changed"
+ - "not nios_network_view_create2.changed"
+ - "nios_network_view_update1.changed"
+ - "not nios_network_view_update2.changed"
+ - "nios_network_view_delete1.changed"
+ - "not nios_network_view_delete2.changed"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_ptr_record/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_ptr_record/aliases
new file mode 100644
index 00000000..b3138dc7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_ptr_record/aliases
@@ -0,0 +1,3 @@
+shippable/cloud/group1
+cloud/nios
+destructive
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_ptr_record/defaults/main.yaml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_ptr_record/defaults/main.yaml
new file mode 100644
index 00000000..9ef5ba51
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_ptr_record/defaults/main.yaml
@@ -0,0 +1,3 @@
+---
+testcase: "*"
+test_items: []
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_ptr_record/meta/main.yaml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_ptr_record/meta/main.yaml
new file mode 100644
index 00000000..1b01a972
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_ptr_record/meta/main.yaml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_nios_tests
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_ptr_record/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_ptr_record/tasks/main.yml
new file mode 100644
index 00000000..15e8dad0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_ptr_record/tasks/main.yml
@@ -0,0 +1,6 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- include: nios_ptr_record_idempotence.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_ptr_record/tasks/nios_ptr_record_idempotence.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_ptr_record/tasks/nios_ptr_record_idempotence.yml
new file mode 100644
index 00000000..a233d80f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_ptr_record/tasks/nios_ptr_record_idempotence.yml
@@ -0,0 +1,83 @@
+---
+
+- name: create an ipv4 ptr record
+ nios_ptr_record:
+ name: ptr.ansible.com
+ ptrdname: ptr.ansible.com
+ ipv4: 192.168.10.1
+ state: present
+ provider: "{{ nios_provider }}"
+ view: default
+ register: ipv4_ptr_create1
+
+- name: create the same ipv4 ptr record
+ nios_ptr_record:
+ name: ptr.ansible.com
+ ptrdname: ptr.ansible.com
+ ipv4: 192.168.10.1
+ state: present
+ view: default
+ provider: "{{ nios_provider }}"
+ register: ipv4_ptr_create2
+
+- name: add a comment to an existing ipv4 ptr record
+ nios_ptr_record:
+ name: ptr.ansible.com
+ ptrdname: ptr.ansible.com
+ ipv4: 192.168.10.1
+ comment: this is a test comment
+ view: default
+ state: present
+ provider: "{{ nios_provider }}"
+ register: ipv4_ptr_update1
+
+- name: add the same comment to the same ipv4 ptr host record
+ nios_ptr_record:
+ name: ptr.ansible.com
+ ptrdname: ptr.ansible.com
+ ipv4: 192.168.10.1
+ comment: this is a test comment
+ view: default
+ state: present
+ provider: "{{ nios_provider }}"
+ register: ipv4_ptr_update2
+
+- name: remove a ptr record from the system
+ nios_ptr_record:
+ name: ptr.ansible.com
+ ptrdname: ptr.ansible.com
+ ipv4: 192.168.10.1
+ view: default
+ state: absent
+ provider: "{{ nios_provider }}"
+ register: ipv4_ptr_delete1
+
+- name: remove the same ptr record from the system
+ nios_ptr_record:
+ ptrdname: ptr.ansible.com
+ name: ptr.ansible.com
+ ipv4: 192.168.10.1
+ view: default
+ state: absent
+ provider: "{{ nios_provider }}"
+ register: ipv4_ptr_delete2
+
+- name: create an ipv6 ptr record
+ nios_ptr_record:
+ ptrdname: ptr6.ansible.com
+ name: ptr6.ansible.com
+ ipv6: "2002:8ac3:802d:1242:20d:60ff:fe38:6d16"
+ view: default
+ state: present
+ provider: "{{ nios_provider }}"
+ register: ipv6_ptr_create1
+
+- assert:
+ that:
+ - "ipv4_ptr_create1.changed"
+ - "not ipv4_ptr_create2.changed"
+ - "ipv4_ptr_update1.changed"
+ - "not ipv4_ptr_update2.changed"
+ - "ipv4_ptr_delete1.changed"
+ - "not ipv4_ptr_delete2.changed"
+ - "ipv6_ptr_create1.changed"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_srv_record/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_srv_record/aliases
new file mode 100644
index 00000000..b3138dc7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_srv_record/aliases
@@ -0,0 +1,3 @@
+shippable/cloud/group1
+cloud/nios
+destructive
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_srv_record/defaults/main.yaml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_srv_record/defaults/main.yaml
new file mode 100644
index 00000000..9ef5ba51
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_srv_record/defaults/main.yaml
@@ -0,0 +1,3 @@
+---
+testcase: "*"
+test_items: []
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_srv_record/meta/main.yaml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_srv_record/meta/main.yaml
new file mode 100644
index 00000000..1b01a972
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_srv_record/meta/main.yaml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_nios_tests
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_srv_record/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_srv_record/tasks/main.yml
new file mode 100644
index 00000000..25f4cc9d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_srv_record/tasks/main.yml
@@ -0,0 +1,6 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- include: nios_srv_record_idempotence.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_srv_record/tasks/nios_srv_record_idempotence.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_srv_record/tasks/nios_srv_record_idempotence.yml
new file mode 100644
index 00000000..8f3dbfc7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_srv_record/tasks/nios_srv_record_idempotence.yml
@@ -0,0 +1,98 @@
+- name: cleanup the parent object
+ nios_zone:
+ name: ansible.com
+ state: absent
+ provider: "{{ nios_provider }}"
+
+- name: create the parent object
+ nios_zone:
+ name: ansible.com
+ state: present
+ provider: "{{ nios_provider }}"
+
+- name: cleanup srv record
+ nios_srv_record:
+ name: ansible.com
+ port: 5080
+ priority: 10
+ target: service1.ansible.com
+ weight: 10
+ state: absent
+ provider: "{{ nios_provider }}"
+
+- name: create an srv record
+ nios_srv_record:
+ name: ansible.com
+ port: 5080
+ priority: 10
+ target: service1.ansible.com
+ weight: 10
+ state: present
+ provider: "{{ nios_provider }}"
+ register: srv_record_create1
+
+- name: recreate an srv record
+ nios_srv_record:
+ name: ansible.com
+ port: 5080
+ priority: 10
+ target: service1.ansible.com
+ weight: 10
+ state: present
+ provider: "{{ nios_provider }}"
+ register: srv_record_create2
+
+- name: add a comment to an existing srv record
+ nios_srv_record:
+ name: ansible.com
+ port: 5080
+ priority: 10
+ target: service1.ansible.com
+ weight: 10
+ comment: this is a test comment
+ state: present
+ provider: "{{ nios_provider }}"
+ register: srv_record_update1
+
+- name: add a comment to an existing srv record
+ nios_srv_record:
+ name: ansible.com
+ port: 5080
+ priority: 10
+ target: service1.ansible.com
+ weight: 10
+ comment: this is a test comment
+ state: present
+ provider: "{{ nios_provider }}"
+ register: srv_record_update2
+
+- name: remove a srv record from the system
+ nios_srv_record:
+ name: ansible.com
+ port: 5080
+ priority: 10
+ target: service1.ansible.com
+ weight: 10
+ state: absent
+ provider: "{{ nios_provider }}"
+ register: srv_record_delete1
+
+- name: remove a srv record from the system
+ nios_srv_record:
+ name: ansible.com
+ port: 5080
+ priority: 10
+ target: service1.ansible.com
+ weight: 10
+ state: absent
+ provider: "{{ nios_provider }}"
+ register: srv_record_delete2
+
+- assert:
+ that:
+ - "srv_record_create1.changed"
+ - "not srv_record_create2.changed"
+ - "srv_record_update1.changed"
+ - "not srv_record_update2.changed"
+ - "srv_record_delete1.changed"
+ - "not srv_record_delete2.changed"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_txt_record/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_txt_record/aliases
new file mode 100644
index 00000000..b3138dc7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_txt_record/aliases
@@ -0,0 +1,3 @@
+shippable/cloud/group1
+cloud/nios
+destructive
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_txt_record/defaults/main.yaml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_txt_record/defaults/main.yaml
new file mode 100644
index 00000000..ebf6ffc9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_txt_record/defaults/main.yaml
@@ -0,0 +1,3 @@
+---
+testcase: "*"
+test_items: [] \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_txt_record/meta/main.yaml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_txt_record/meta/main.yaml
new file mode 100644
index 00000000..9472935b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_txt_record/meta/main.yaml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_nios_tests \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_txt_record/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_txt_record/tasks/main.yml
new file mode 100644
index 00000000..d5023c66
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_txt_record/tasks/main.yml
@@ -0,0 +1,6 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- include: nios_txt_record_idempotence.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_txt_record/tasks/nios_txt_record_idempotence.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_txt_record/tasks/nios_txt_record_idempotence.yml
new file mode 100644
index 00000000..3b7357af
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_txt_record/tasks/nios_txt_record_idempotence.yml
@@ -0,0 +1,80 @@
+- name: cleanup the parent object
+ nios_zone:
+ name: ansible.com
+ state: absent
+ provider: "{{ nios_provider }}"
+
+- name: create the parent object
+ nios_zone:
+ name: ansible.com
+ state: present
+ provider: "{{ nios_provider }}"
+
+- name: cleanup txt record
+ nios_txt_record:
+ name: txt.ansible.com
+ text: mytext
+ state: absent
+ provider: "{{ nios_provider }}"
+
+- name: create txt record
+ nios_txt_record:
+ name: txt.ansible.com
+ text: mytext
+ state: present
+ provider: "{{ nios_provider }}"
+ register: txt_create1
+
+- name: create txt record
+ nios_txt_record:
+ name: txt.ansible.com
+ text: mytext
+ state: present
+ provider: "{{ nios_provider }}"
+ register: txt_create2
+
+- assert:
+ that:
+ - "txt_create1.changed"
+ - "not txt_create2.changed"
+
+- name: add a comment to an existing txt record
+ nios_txt_record:
+ name: txt.ansible.com
+ text: mytext
+ state: present
+ comment: mycomment
+ provider: "{{ nios_provider }}"
+ register: txt_update1
+
+- name: add a comment to an existing txt record
+ nios_txt_record:
+ name: txt.ansible.com
+ text: mytext
+ state: present
+ comment: mycomment
+ provider: "{{ nios_provider }}"
+ register: txt_update2
+
+- name: remove a txt record from the system
+ nios_txt_record:
+ name: txt.ansible.com
+ state: absent
+ provider: "{{ nios_provider }}"
+ register: txt_delete1
+
+- name: remove a txt record from the system
+ nios_txt_record:
+ name: txt.ansible.com
+ state: absent
+ provider: "{{ nios_provider }}"
+ register: txt_delete2
+
+- assert:
+ that:
+ - "txt_create1.changed"
+ - "not txt_create2.changed"
+ - "txt_update1.changed"
+ - "not txt_update2.changed"
+ - "txt_delete1.changed"
+ - "not txt_delete2.changed"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_zone/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_zone/aliases
new file mode 100644
index 00000000..b3138dc7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_zone/aliases
@@ -0,0 +1,3 @@
+shippable/cloud/group1
+cloud/nios
+destructive
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_zone/defaults/main.yaml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_zone/defaults/main.yaml
new file mode 100644
index 00000000..ebf6ffc9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_zone/defaults/main.yaml
@@ -0,0 +1,3 @@
+---
+testcase: "*"
+test_items: [] \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_zone/meta/main.yaml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_zone/meta/main.yaml
new file mode 100644
index 00000000..9472935b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_zone/meta/main.yaml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_nios_tests \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_zone/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_zone/tasks/main.yml
new file mode 100644
index 00000000..e346a3cb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_zone/tasks/main.yml
@@ -0,0 +1,6 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- include: nios_zone_idempotence.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_zone/tasks/nios_zone_idempotence.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_zone/tasks/nios_zone_idempotence.yml
new file mode 100644
index 00000000..03d40aaf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nios_zone/tasks/nios_zone_idempotence.yml
@@ -0,0 +1,68 @@
+- name: cleanup dns view instance
+ nios_dns_view:
+ name: ansible-dns
+ state: absent
+ provider: "{{ nios_provider }}"
+
+- name: cleanup test zone
+ nios_zone:
+ name: ansible-dns
+ state: absent
+ provider: "{{ nios_provider }}"
+
+- name: configure a zone on the system
+ nios_zone:
+ name: ansible-dns
+ state: present
+ provider: "{{ nios_provider }}"
+ register: zone_create1
+
+- name: configure a zone on the system
+ nios_zone:
+ name: ansible-dns
+ state: present
+ provider: "{{ nios_provider }}"
+ register: zone_create2
+
+- name: update the comment and ext attributes for an existing zone
+ nios_zone:
+ name: ansible-dns
+ comment: this is an example comment
+ extattrs:
+ Site: west-dc
+ state: present
+ provider: "{{ nios_provider }}"
+ register: zone_update1
+
+- name: update the comment and ext attributes for an existing zone
+ nios_zone:
+ name: ansible-dns
+ comment: this is an example comment
+ extattrs:
+ Site: west-dc
+ state: present
+ provider: "{{ nios_provider }}"
+ register: zone_update2
+
+- name: remove the dns zone
+ nios_zone:
+ name: ansible-dns
+ state: absent
+ provider: "{{ nios_provider }}"
+ register: zone_delete1
+
+- name: remove the dns zone
+ nios_zone:
+ name: ansible-dns
+ state: absent
+ provider: "{{ nios_provider }}"
+ register: zone_delete2
+
+- assert:
+ that:
+ - "zone_create1.changed"
+ - "not zone_create2.changed"
+ - "zone_update1.changed"
+ - "not zone_update2.changed"
+ - "zone_delete1.changed"
+ - "not zone_delete2.changed"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nomad/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nomad/aliases
new file mode 100644
index 00000000..3141aee6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nomad/aliases
@@ -0,0 +1,6 @@
+shippable/posix/group2
+nomad_job_info
+destructive
+skip/aix
+skip/centos6
+skip/freebsd
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nomad/files/job.hcl b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nomad/files/job.hcl
new file mode 100644
index 00000000..abcc6854
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nomad/files/job.hcl
@@ -0,0 +1,396 @@
+# There can only be a single job definition per file. This job is named
+# "example" so it will create a job with the ID and Name "example".
+
+# The "job" stanza is the top-most configuration option in the job
+# specification. A job is a declarative specification of tasks that Nomad
+# should run. Jobs have a globally unique name, one or many task groups, which
+# are themselves collections of one or many tasks.
+#
+# For more information and examples on the "job" stanza, please see
+# the online documentation at:
+#
+#
+# https://www.nomadproject.io/docs/job-specification/job.html
+#
+job "example" {
+ # The "region" parameter specifies the region in which to execute the job.
+ # If omitted, this inherits the default region name of "global".
+ # region = "global"
+ #
+ # The "datacenters" parameter specifies the list of datacenters which should
+ # be considered when placing this task. This must be provided.
+ datacenters = ["dc1"]
+
+ # The "type" parameter controls the type of job, which impacts the scheduler's
+ # decision on placement. This configuration is optional and defaults to
+ # "service". For a full list of job types and their differences, please see
+ # the online documentation.
+ #
+ # For more information, please see the online documentation at:
+ #
+ # https://www.nomadproject.io/docs/jobspec/schedulers.html
+ #
+ type = "service"
+
+
+ # The "constraint" stanza defines additional constraints for placing this job,
+ # in addition to any resource or driver constraints. This stanza may be placed
+ # at the "job", "group", or "task" level, and supports variable interpolation.
+ #
+ # For more information and examples on the "constraint" stanza, please see
+ # the online documentation at:
+ #
+ # https://www.nomadproject.io/docs/job-specification/constraint.html
+ #
+ # constraint {
+ # attribute = "${attr.kernel.name}"
+ # value = "linux"
+ # }
+
+ # The "update" stanza specifies the update strategy of task groups. The update
+ # strategy is used to control things like rolling upgrades, canaries, and
+ # blue/green deployments. If omitted, no update strategy is enforced. The
+ # "update" stanza may be placed at the job or task group. When placed at the
+ # job, it applies to all groups within the job. When placed at both the job and
+ # group level, the stanzas are merged with the group's taking precedence.
+ #
+ # For more information and examples on the "update" stanza, please see
+ # the online documentation at:
+ #
+ # https://www.nomadproject.io/docs/job-specification/update.html
+ #
+ update {
+ # The "max_parallel" parameter specifies the maximum number of updates to
+ # perform in parallel. In this case, this specifies to update a single task
+ # at a time.
+ max_parallel = 1
+
+ # The "min_healthy_time" parameter specifies the minimum time the allocation
+ # must be in the healthy state before it is marked as healthy and unblocks
+ # further allocations from being updated.
+ min_healthy_time = "10s"
+
+ # The "healthy_deadline" parameter specifies the deadline in which the
+ # allocation must be marked as healthy after which the allocation is
+ # automatically transitioned to unhealthy. Transitioning to unhealthy will
+ # fail the deployment and potentially roll back the job if "auto_revert" is
+ # set to true.
+ healthy_deadline = "3m"
+
+ # The "progress_deadline" parameter specifies the deadline in which an
+ # allocation must be marked as healthy. The deadline begins when the first
+ # allocation for the deployment is created and is reset whenever an allocation
+ # as part of the deployment transitions to a healthy state. If no allocation
+ # transitions to the healthy state before the progress deadline, the
+ # deployment is marked as failed.
+ progress_deadline = "10m"
+
+ # The "auto_revert" parameter specifies if the job should auto-revert to the
+ # last stable job on deployment failure. A job is marked as stable if all the
+ # allocations as part of its deployment were marked healthy.
+ auto_revert = false
+
+ # The "canary" parameter specifies that changes to the job that would result
+ # in destructive updates should create the specified number of canaries
+ # without stopping any previous allocations. Once the operator determines the
+ # canaries are healthy, they can be promoted which unblocks a rolling update
+ # of the remaining allocations at a rate of "max_parallel".
+ #
+ # Further, setting "canary" equal to the count of the task group allows
+ # blue/green deployments. When the job is updated, a full set of the new
+ # version is deployed and upon promotion the old version is stopped.
+ canary = 0
+ }
+ # The migrate stanza specifies the group's strategy for migrating off of
+ # draining nodes. If omitted, a default migration strategy is applied.
+ #
+ # For more information on the "migrate" stanza, please see
+ # the online documentation at:
+ #
+ # https://www.nomadproject.io/docs/job-specification/migrate.html
+ #
+ migrate {
+ # Specifies the number of task groups that can be migrated at the same
+ # time. This number must be less than the total count for the group as
+ # (count - max_parallel) will be left running during migrations.
+ max_parallel = 1
+
+ # Specifies the mechanism in which allocations health is determined. The
+ # potential values are "checks" or "task_states".
+ health_check = "checks"
+
+ # Specifies the minimum time the allocation must be in the healthy state
+ # before it is marked as healthy and unblocks further allocations from being
+ # migrated. This is specified using a label suffix like "30s" or "15m".
+ min_healthy_time = "10s"
+
+ # Specifies the deadline in which the allocation must be marked as healthy
+ # after which the allocation is automatically transitioned to unhealthy. This
+ # is specified using a label suffix like "2m" or "1h".
+ healthy_deadline = "5m"
+ }
+ # The "group" stanza defines a series of tasks that should be co-located on
+ # the same Nomad client. Any task within a group will be placed on the same
+ # client.
+ #
+ # For more information and examples on the "group" stanza, please see
+ # the online documentation at:
+ #
+ # https://www.nomadproject.io/docs/job-specification/group.html
+ #
+ group "cache" {
+ # The "count" parameter specifies the number of the task groups that should
+ # be running under this group. This value must be non-negative and defaults
+ # to 1.
+ count = 1
+
+ # The "restart" stanza configures a group's behavior on task failure. If
+ # left unspecified, a default restart policy is used based on the job type.
+ #
+ # For more information and examples on the "restart" stanza, please see
+ # the online documentation at:
+ #
+ # https://www.nomadproject.io/docs/job-specification/restart.html
+ #
+ restart {
+ # The number of attempts to run the job within the specified interval.
+ attempts = 2
+ interval = "30m"
+
+ # The "delay" parameter specifies the duration to wait before restarting
+ # a task after it has failed.
+ delay = "15s"
+
+ # The "mode" parameter controls what happens when a task has restarted
+ # "attempts" times within the interval. "delay" mode delays the next
+ # restart until the next interval. "fail" mode does not restart the task
+ # if "attempts" has been hit within the interval.
+ mode = "fail"
+ }
+
+ # The "ephemeral_disk" stanza instructs Nomad to utilize an ephemeral disk
+ # instead of a hard disk requirement. Clients using this stanza should
+ # not specify disk requirements in the resources stanza of the task. All
+ # tasks in this group will share the same ephemeral disk.
+ #
+ # For more information and examples on the "ephemeral_disk" stanza, please
+ # see the online documentation at:
+ #
+ # https://www.nomadproject.io/docs/job-specification/ephemeral_disk.html
+ #
+ ephemeral_disk {
+ # When sticky is true and the task group is updated, the scheduler
+ # will prefer to place the updated allocation on the same node and
+ # will migrate the data. This is useful for tasks that store data
+ # that should persist across allocation updates.
+ # sticky = true
+ #
+ # Setting migrate to true results in the allocation directory of a
+ # sticky allocation directory to be migrated.
+ # migrate = true
+ #
+ # The "size" parameter specifies the size in MB of shared ephemeral disk
+ # between tasks in the group.
+ size = 300
+ }
+
+ # The "affinity" stanza enables operators to express placement preferences
+ # based on node attributes or metadata.
+ #
+ # For more information and examples on the "affinity" stanza, please
+ # see the online documentation at:
+ #
+ # https://www.nomadproject.io/docs/job-specification/affinity.html
+ #
+ # affinity {
+ # attribute specifies the name of a node attribute or metadata
+ # attribute = "${node.datacenter}"
+
+
+ # value specifies the desired attribute value. In this example Nomad
+ # will prefer placement in the "us-west1" datacenter.
+ # value = "us-west1"
+
+
+ # weight can be used to indicate relative preference
+ # when the job has more than one affinity. It defaults to 50 if not set.
+ # weight = 100
+ # }
+
+
+ # The "spread" stanza allows operators to increase the failure tolerance of
+ # their applications by specifying a node attribute that allocations
+ # should be spread over.
+ #
+ # For more information and examples on the "spread" stanza, please
+ # see the online documentation at:
+ #
+ # https://www.nomadproject.io/docs/job-specification/spread.html
+ #
+ # spread {
+ # attribute specifies the name of a node attribute or metadata
+ # attribute = "${node.datacenter}"
+
+
+ # targets can be used to define desired percentages of allocations
+ # for each targeted attribute value.
+ #
+ # target "us-east1" {
+ # percent = 60
+ # }
+ # target "us-west1" {
+ # percent = 40
+ # }
+ # }
+
+ # The "task" stanza creates an individual unit of work, such as a Docker
+ # container, web application, or batch processing.
+ #
+ # For more information and examples on the "task" stanza, please see
+ # the online documentation at:
+ #
+ # https://www.nomadproject.io/docs/job-specification/task.html
+ #
+ task "redis" {
+ # The "driver" parameter specifies the task driver that should be used to
+ # run the task.
+ driver = "docker"
+
+ # The "config" stanza specifies the driver configuration, which is passed
+ # directly to the driver to start the task. The details of configurations
+ # are specific to each driver, so please see specific driver
+ # documentation for more information.
+ config {
+ image = "redis:3.2"
+
+ port_map {
+ db = 6379
+ }
+ }
+
+ # The "artifact" stanza instructs Nomad to download an artifact from a
+ # remote source prior to starting the task. This provides a convenient
+ # mechanism for downloading configuration files or data needed to run the
+ # task. It is possible to specify the "artifact" stanza multiple times to
+ # download multiple artifacts.
+ #
+ # For more information and examples on the "artifact" stanza, please see
+ # the online documentation at:
+ #
+ # https://www.nomadproject.io/docs/job-specification/artifact.html
+ #
+ # artifact {
+ # source = "http://foo.com/artifact.tar.gz"
+ # options {
+ # checksum = "md5:c4aa853ad2215426eb7d70a21922e794"
+ # }
+ # }
+
+
+ # The "logs" stanza instructs the Nomad client on how many log files and
+ # the maximum size of those logs files to retain. Logging is enabled by
+ # default, but the "logs" stanza allows for finer-grained control over
+ # the log rotation and storage configuration.
+ #
+ # For more information and examples on the "logs" stanza, please see
+ # the online documentation at:
+ #
+ # https://www.nomadproject.io/docs/job-specification/logs.html
+ #
+ # logs {
+ # max_files = 10
+ # max_file_size = 15
+ # }
+
+ # The "resources" stanza describes the requirements a task needs to
+ # execute. Resource requirements include memory, network, cpu, and more.
+ # This ensures the task will execute on a machine that contains enough
+ # resource capacity.
+ #
+ # For more information and examples on the "resources" stanza, please see
+ # the online documentation at:
+ #
+ # https://www.nomadproject.io/docs/job-specification/resources.html
+ #
+ resources {
+ cpu = 500 # 500 MHz
+ memory = 256 # 256MB
+
+ network {
+ mbits = 10
+ port "db" {}
+ }
+ }
+ # The "service" stanza instructs Nomad to register this task as a service
+ # in the service discovery engine, which is currently Consul. This will
+ # make the service addressable after Nomad has placed it on a host and
+ # port.
+ #
+ # For more information and examples on the "service" stanza, please see
+ # the online documentation at:
+ #
+ # https://www.nomadproject.io/docs/job-specification/service.html
+ #
+ service {
+ name = "redis-cache"
+ tags = ["global", "cache"]
+ port = "db"
+
+ check {
+ name = "alive"
+ type = "tcp"
+ interval = "10s"
+ timeout = "2s"
+ }
+ }
+
+ # The "template" stanza instructs Nomad to manage a template, such as
+ # a configuration file or script. This template can optionally pull data
+ # from Consul or Vault to populate runtime configuration data.
+ #
+ # For more information and examples on the "template" stanza, please see
+ # the online documentation at:
+ #
+ # https://www.nomadproject.io/docs/job-specification/template.html
+ #
+ # template {
+ # data = "---\nkey: {{ key \"service/my-key\" }}"
+ # destination = "local/file.yml"
+ # change_mode = "signal"
+ # change_signal = "SIGHUP"
+ # }
+
+ # The "template" stanza can also be used to create environment variables
+ # for tasks that prefer those to config files. The task will be restarted
+ # when data pulled from Consul or Vault changes.
+ #
+ # template {
+ # data = "KEY={{ key \"service/my-key\" }}"
+ # destination = "local/file.env"
+ # env = true
+ # }
+
+ # The "vault" stanza instructs the Nomad client to acquire a token from
+ # a HashiCorp Vault server. The Nomad servers must be configured and
+ # authorized to communicate with Vault. By default, Nomad will inject
+ # The token into the job via an environment variable and make the token
+ # available to the "template" stanza. The Nomad client handles the renewal
+ # and revocation of the Vault token.
+ #
+ # For more information and examples on the "vault" stanza, please see
+ # the online documentation at:
+ #
+ # https://www.nomadproject.io/docs/job-specification/vault.html
+ #
+ # vault {
+ # policies = ["cdn", "frontend"]
+ # change_mode = "signal"
+ # change_signal = "SIGHUP"
+ # }
+
+ # Controls the timeout between signalling a task it will be killed
+ # and killing the task. If not set a default is used.
+ # kill_timeout = "20s"
+ }
+ }
+}
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nomad/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nomad/meta/main.yml
new file mode 100644
index 00000000..f4c99a2a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nomad/meta/main.yml
@@ -0,0 +1,4 @@
+---
+dependencies:
+ - setup_pkg_mgr
+ - setup_openssl
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nomad/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nomad/tasks/main.yml
new file mode 100644
index 00000000..1e42e7b2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nomad/tasks/main.yml
@@ -0,0 +1,106 @@
+- name: Skip unsupported platforms
+ meta: end_play
+ when: ansible_distribution == 'CentOS' and ansible_distribution_major_version is not version('7', '>=')
+
+- name: Install Nomad and test
+ vars:
+ nomad_version: 0.12.4
+ nomad_uri: https://releases.hashicorp.com/nomad/{{ nomad_version }}/nomad_{{ nomad_version }}_{{ ansible_system | lower }}_{{ nomad_arch }}.zip
+ nomad_cmd: '{{ output_dir }}/nomad'
+ block:
+
+ - name: register pyOpenSSL version
+ command: '{{ ansible_python_interpreter }} -c ''import OpenSSL; print(OpenSSL.__version__)'''
+ register: pyopenssl_version
+
+ - name: Install requests<2.20 (CentOS/RHEL 6)
+ pip:
+ name: requests<2.20
+ register: result
+ until: result is success
+ when: ansible_distribution_file_variety|default() == 'RedHat' and ansible_distribution_major_version is version('6', '<=')
+
+ - name: Install python-nomad
+ pip:
+ name: python-nomad
+ register: result
+ until: result is success
+
+ - name: Install jmespath
+ pip:
+ name: jmespath
+ register: result
+ until: result is success
+
+ - when: pyopenssl_version.stdout is version('0.15', '>=')
+ block:
+ - name: Generate privatekey
+ community.crypto.openssl_privatekey:
+ path: '{{ output_dir }}/privatekey.pem'
+
+ - name: Generate CSR
+ community.crypto.openssl_csr:
+ path: '{{ output_dir }}/csr.csr'
+ privatekey_path: '{{ output_dir }}/privatekey.pem'
+ subject:
+ commonName: localhost
+
+ - name: Generate selfsigned certificate
+ register: selfsigned_certificate
+ community.crypto.openssl_certificate:
+ path: '{{ output_dir }}/cert.pem'
+ csr_path: '{{ output_dir }}/csr.csr'
+ privatekey_path: '{{ output_dir }}/privatekey.pem'
+ provider: selfsigned
+ selfsigned_digest: sha256
+
+ - name: Install unzip
+ package:
+ name: unzip
+ register: result
+ until: result is success
+ when: ansible_distribution != "MacOSX"
+
+ - assert:
+ that: ansible_architecture in ['i386', 'x86_64', 'amd64']
+
+ - set_fact:
+ nomad_arch: '386'
+ when: ansible_architecture == 'i386'
+
+ - set_fact:
+ nomad_arch: amd64
+ when: ansible_architecture in ['x86_64', 'amd64']
+
+ - name: Download nomad binary
+ unarchive:
+ src: '{{ nomad_uri }}'
+ dest: '{{ output_dir }}'
+ remote_src: true
+ register: result
+ until: result is success
+
+ - vars:
+ remote_dir: '{{ echo_output_dir.stdout }}'
+ block:
+
+ - command: echo {{ output_dir }}
+ register: echo_output_dir
+
+ - name: Run tests integration
+ block:
+ - name: Start nomad (dev mode enabled)
+ shell: nohup {{ nomad_cmd }} agent -dev </dev/null >/dev/null 2>&1 &
+
+ - name: wait nomad up
+ wait_for:
+ host: localhost
+ port: 4646
+ delay: 10
+ timeout: 60
+
+ - import_tasks: nomad_job.yml
+ always:
+
+ - name: kill nomad
+ shell: pkill nomad
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nomad/tasks/nomad_job.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nomad/tasks/nomad_job.yml
new file mode 100644
index 00000000..f2137f4e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/nomad/tasks/nomad_job.yml
@@ -0,0 +1,108 @@
+---
+
+- name: run check deploy nomad job
+ nomad_job:
+ host: localhost
+ state: present
+ use_ssl: false
+ content: "{{ lookup('file', 'job.hcl') }}"
+ register: job_check_deployed
+ check_mode: true
+
+- name: run create nomad job
+ nomad_job:
+ host: localhost
+ state: present
+ use_ssl: false
+ content: "{{ lookup('file', 'job.hcl') }}"
+ force_start: true
+ register: job_deployed
+
+- name: get nomad job deployed
+ nomad_job_info:
+ host: localhost
+ use_ssl: false
+ name: example
+ register: get_nomad_job
+
+- name: get list of nomad jobs
+ nomad_job_info:
+ host: localhost
+ use_ssl: false
+ register: list_nomad_jobs
+
+- name: assert job is deployed and tasks is changed
+ assert:
+ that:
+ - job_check_deployed is changed
+ - job_deployed is changed
+ - get_nomad_job.result[0].ID == "example"
+ - list_nomad_jobs.result | length == 1
+
+- name: run check deploy job idempotence
+ nomad_job:
+ host: localhost
+ state: present
+ use_ssl: false
+ content: "{{ lookup('file', 'job.hcl') }}"
+ register: job_check_deployed_idempotence
+ check_mode: true
+
+- name: run create nomad job idempotence
+ nomad_job:
+ host: localhost
+ state: present
+ use_ssl: false
+ content: "{{ lookup('file', 'job.hcl') }}"
+ register: job_deployed_idempotence
+
+- name: get list of nomad jobs
+ nomad_job_info:
+ host: localhost
+ use_ssl: false
+ register: list_nomad_jobs
+
+- debug:
+ msg: "{{ list_nomad_jobs }}"
+
+- name: run check delete nomad job
+ nomad_job:
+ host: localhost
+ state: absent
+ use_ssl: false
+ content: "{{ lookup('file', 'job.hcl') }}"
+ register: job_deleted_check
+ check_mode: true
+
+- name: run delete nomad job
+ nomad_job:
+ host: localhost
+ state: absent
+ use_ssl: false
+ content: "{{ lookup('file', 'job.hcl') }}"
+ register: job_deleted
+
+- name: get job deleted
+ nomad_job_info:
+ host: localhost
+ use_ssl: false
+ name: example
+ register: get_job_delete
+
+- name: get list of nomad jobs
+ nomad_job_info:
+ host: localhost
+ use_ssl: false
+ register: list_nomad_jobs
+
+- debug:
+ msg: "{{ list_nomad_jobs }}"
+
+- name: assert idempotence
+ assert:
+ that:
+ - job_check_deployed_idempotence is not changed
+ - job_deployed_idempotence is not changed
+ - job_deleted_check is changed
+ - job_deleted is changed
+ - get_job_delete.result[0].Stop
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/npm/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/npm/aliases
new file mode 100644
index 00000000..e09dd445
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/npm/aliases
@@ -0,0 +1,4 @@
+shippable/posix/group2
+destructive
+skip/aix
+skip/freebsd
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/npm/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/npm/meta/main.yml
new file mode 100644
index 00000000..392c3590
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/npm/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - setup_pkg_mgr
+ - setup_gnutar
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/npm/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/npm/tasks/main.yml
new file mode 100644
index 00000000..ed5a16a6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/npm/tasks/main.yml
@@ -0,0 +1,40 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# test code for the npm module
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+# -------------------------------------------------------------
+# Setup steps
+
+# expand remote path
+- command: 'echo {{ output_dir }}'
+ register: echo
+- set_fact:
+ remote_dir: '{{ echo.stdout }}'
+
+- include_tasks: run.yml
+ vars:
+ nodejs_version: '{{ item }}'
+ nodejs_path: 'node-v{{ nodejs_version }}-{{ ansible_system|lower }}-x{{ ansible_userspace_bits }}'
+ with_items:
+ - 7.10.1 # provides npm 4.2.0 (last npm < 5 released)
+ - 8.0.0 # provides npm 5.0.0
+ - 8.2.0 # provides npm 5.3.0 (output change with this version)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/npm/tasks/run.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/npm/tasks/run.yml
new file mode 100644
index 00000000..53b374fa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/npm/tasks/run.yml
@@ -0,0 +1,2 @@
+- include_tasks: setup.yml
+- include_tasks: test.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/npm/tasks/setup.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/npm/tasks/setup.yml
new file mode 100644
index 00000000..4e0d908e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/npm/tasks/setup.yml
@@ -0,0 +1,6 @@
+- name: 'Download NPM'
+ unarchive:
+ src: 'https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/npm/{{ nodejs_path }}.tar.gz'
+ dest: '{{ output_dir }}'
+ remote_src: yes
+ creates: '{{ output_dir }}/{{ nodejs_path }}.tar.gz'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/npm/tasks/test.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/npm/tasks/test.yml
new file mode 100644
index 00000000..ea2dd5b9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/npm/tasks/test.yml
@@ -0,0 +1,69 @@
+- name: 'Remove any node modules'
+ file:
+ path: '{{ remote_dir }}/node_modules'
+ state: absent
+
+- vars:
+ # sample: node-v8.2.0-linux-x64.tar.xz
+ node_path: '{{ remote_dir }}/{{ nodejs_path }}/bin'
+ package: 'iconv-lite'
+ block:
+ - shell: npm --version
+ environment:
+ PATH: '{{ node_path }}:{{ ansible_env.PATH }}'
+ register: npm_version
+
+ - debug:
+ var: npm_version.stdout
+
+ - name: 'Install simple package without dependency'
+ npm:
+ path: '{{ remote_dir }}'
+ executable: '{{ node_path }}/npm'
+ state: present
+ name: '{{ package }}'
+ environment:
+ PATH: '{{ node_path }}:{{ ansible_env.PATH }}'
+ register: npm_install
+
+ - assert:
+ that:
+ - npm_install is success
+ - npm_install is changed
+
+ - name: 'Reinstall simple package without dependency'
+ npm:
+ path: '{{ remote_dir }}'
+ executable: '{{ node_path }}/npm'
+ state: present
+ name: '{{ package }}'
+ environment:
+ PATH: '{{ node_path }}:{{ ansible_env.PATH }}'
+ register: npm_reinstall
+
+ - name: Check there is no change
+ assert:
+ that:
+ - npm_reinstall is success
+ - not (npm_reinstall is changed)
+
+ - name: 'Manually delete package'
+ file:
+ path: '{{ remote_dir }}/node_modules/{{ package }}'
+ state: absent
+
+ - name: 'reinstall simple package'
+ npm:
+ path: '{{ remote_dir }}'
+ executable: '{{ node_path }}/npm'
+ state: present
+ name: '{{ package }}'
+ environment:
+ PATH: '{{ node_path }}:{{ ansible_env.PATH }}'
+ register: npm_fix_install
+
+ - name: Check result is changed and successful
+ assert:
+ that:
+ - npm_fix_install is success
+ - npm_fix_install is changed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/odbc/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/odbc/aliases
new file mode 100644
index 00000000..4ced5274
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/odbc/aliases
@@ -0,0 +1,6 @@
+destructive
+shippable/posix/group1
+skip/osx
+skip/macos
+skip/rhel8.0
+skip/freebsd
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/odbc/defaults/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/odbc/defaults/main.yml
new file mode 100644
index 00000000..f6efa750
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/odbc/defaults/main.yml
@@ -0,0 +1,27 @@
+---
+# defaults file for test_postgresql_db
+my_user: 'ansible_user'
+my_pass: 'md5d5e044ccd9b4b8adc89e8fed2eb0db8a'
+my_pass_decrypted: '6EjMk<hcX3<5(Yp?Xi5aQ8eS`a#Ni'
+dsn: "DRIVER={PostgreSQL};Server=localhost;Port=5432;Database=postgres;Uid={{ my_user }};Pwd={{ my_pass_decrypted }};UseUnicode=True"
+packages:
+ RedHat:
+ - postgresql-odbc
+ - unixODBC
+ - unixODBC-devel
+ - gcc
+ - gcc-c++
+ Debian:
+ - odbc-postgresql
+ - unixodbc
+ - unixodbc-dev
+ - gcc
+ - g++
+ Suse:
+ - psqlODBC
+ - unixODBC
+ - unixODBC-devel
+ - gcc
+ - gcc-c++
+ FreeBSD:
+ - unixODBC
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/odbc/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/odbc/meta/main.yml
new file mode 100644
index 00000000..85b1dc7e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/odbc/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - setup_postgresql_db
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/odbc/tasks/install_pyodbc.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/odbc/tasks/install_pyodbc.yml
new file mode 100644
index 00000000..ab808b92
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/odbc/tasks/install_pyodbc.yml
@@ -0,0 +1,7 @@
+- name: "Install {{ ansible_os_family }} Libraries"
+ package:
+ name: "{{ packages[ansible_os_family] }}"
+
+- name: "Install pyodbc"
+ pip:
+ name: pyodbc
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/odbc/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/odbc/tasks/main.yml
new file mode 100644
index 00000000..88bf4076
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/odbc/tasks/main.yml
@@ -0,0 +1,149 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+#
+# Test for proper failures without pyodbc
+#
+# Some of the docker images already have pyodbc installed on it
+- include_tasks: no_pyodbc.yml
+ when: ansible_os_family != 'FreeBSD' and ansible_os_family != 'Suse' and ansible_os_family != 'Debian'
+
+#
+# Get pyodbc installed
+#
+- include_tasks: install_pyodbc.yml
+
+#
+# Test missing parameters & invalid DSN
+#
+- include_tasks: negative_tests.yml
+
+#
+# Setup DSN per env
+#
+- name: Changing DSN for Suse
+ set_fact:
+ dsn: "DRIVER={PSQL};Server=localhost;Port=5432;Database=postgres;Uid={{ my_user }};Pwd={{ my_pass_decrypted }};UseUnicode=True"
+ when: ansible_os_family == 'Suse'
+
+- name: Changing DSN for Debian
+ set_fact:
+ dsn: "DRIVER={PostgreSQL Unicode};Server=localhost;Port=5432;Database=postgres;Uid={{ my_user }};Pwd={{ my_pass_decrypted }};UseUnicode=True"
+ when: ansible_os_family == 'Debian'
+
+#
+# Name setup database
+#
+- name: Create a user to run the tests with
+ postgresql_user:
+ name: "{{ my_user }}"
+ password: "{{ my_pass }}"
+ encrypted: 'yes'
+ role_attr_flags: "SUPERUSER"
+ db: postgres
+ become_user: "{{ pg_user }}"
+ become: True
+
+- name: Create a table
+ odbc:
+ dsn: "{{ dsn }}"
+ query: |
+ CREATE TABLE films (
+ code char(5) CONSTRAINT firstkey PRIMARY KEY,
+ title varchar(40) NOT NULL,
+ did integer NOT NULL,
+ date_prod date,
+ kind varchar(10),
+ len interval hour to minute
+ );
+ become_user: "{{ pg_user }}"
+ become: True
+ register: results
+
+- assert:
+ that:
+ - results is changed
+
+#
+# Insert records
+#
+- name: Insert a record without params
+ odbc:
+ dsn: "{{ dsn }}"
+ query: "INSERT INTO films (code, title, did, date_prod, kind, len) VALUES ('asdfg', 'My First Movie', 1, '2019-01-12', 'SyFi', '02:00')"
+ become_user: "{{ pg_user }}"
+ become: True
+ register: results
+
+- assert:
+ that:
+ - results is changed
+
+- name: Insert a record with params
+ odbc:
+ dsn: "{{ dsn }}"
+ query: "INSERT INTO films (code, title, did, date_prod, kind, len) VALUES (?, ?, ?, ?, ?, ?)"
+ params:
+ - 'qwert'
+ - 'My Second Movie'
+ - 2
+ - '2019-01-12'
+ - 'Comedy'
+ - '01:30'
+ become_user: "{{ pg_user }}"
+ become: True
+ register: results
+
+- assert:
+ that:
+ - results is changed
+ - results['row_count'] == -1
+ - results['results'] == []
+ - results['description'] == []
+
+#
+# Select data
+#
+- name: Perform select single row without params (do not coherse changed)
+ odbc:
+ dsn: "{{ dsn }}"
+ query: "SELECT * FROM films WHERE code='asdfg'"
+ register: results
+
+- assert:
+ that:
+ - results is changed
+ - results is successful
+ - results.row_count == 1
+
+- name: Perform select multiple rows with params (coherse changed)
+ odbc:
+ dsn: "{{ dsn }}"
+ query: 'SELECT * FROM films WHERE code=? or code=?'
+ params:
+ - 'asdfg'
+ - 'qwert'
+ register: results
+ changed_when: False
+
+- assert:
+ that:
+ - results is not changed
+ - results is successful
+ - results.row_count == 2
+
+- name: Drop the table
+ odbc:
+ dsn: "{{ dsn }}"
+ query: "DROP TABLE films"
+ register: results
+
+- assert:
+ that:
+ - results is successful
+ - results is changed
+ - results['row_count'] == -1
+ - results['results'] == []
+ - results['description'] == []
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/odbc/tasks/negative_tests.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/odbc/tasks/negative_tests.yml
new file mode 100644
index 00000000..a560b73c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/odbc/tasks/negative_tests.yml
@@ -0,0 +1,19 @@
+#
+# Missing params for the module
+# There is nothing you need to do here because the params are required
+#
+
+#
+# Invalid DSN in the module
+#
+- name: "Test with an invalid DSN"
+ odbc:
+ dsn: "t1"
+ query: "SELECT * FROM nothing"
+ register: results
+ ignore_errors: True
+
+- assert:
+ that:
+ - results is failed
+ - "'Failed to connect to DSN' in results.msg"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/odbc/tasks/no_pyodbc.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/odbc/tasks/no_pyodbc.yml
new file mode 100644
index 00000000..ac66edd9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/odbc/tasks/no_pyodbc.yml
@@ -0,0 +1,11 @@
+- name: Testing the module without pyodbc
+ odbc:
+ dsn: "Test"
+ query: "SELECT * FROM nothing"
+ ignore_errors: True
+ register: results
+
+- assert:
+ that:
+ - results is failed
+ - "'Failed to import the required Python library (pyodbc) on' in results.msg"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/one_host/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/one_host/aliases
new file mode 100644
index 00000000..1ff4e0b1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/one_host/aliases
@@ -0,0 +1,2 @@
+cloud/opennebula
+shippable/cloud/group1
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/one_host/files/testhost/tmp/opennebula-fixtures.json.gz b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/one_host/files/testhost/tmp/opennebula-fixtures.json.gz
new file mode 100644
index 00000000..8b67b548
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/one_host/files/testhost/tmp/opennebula-fixtures.json.gz
Binary files differ
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/one_host/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/one_host/meta/main.yml
new file mode 100644
index 00000000..86752df8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/one_host/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_opennebula \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/one_host/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/one_host/tasks/main.yml
new file mode 100644
index 00000000..a3cea768
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/one_host/tasks/main.yml
@@ -0,0 +1,240 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# test code for the one_host module
+
+
+# ENVIRONENT PREPARACTION
+
+- set_fact: test_number= 0
+
+- name: "test_{{test_number}}: copy fixtures to test host"
+ copy:
+ src: testhost/tmp/opennebula-fixtures.json.gz
+ dest: /tmp
+ when:
+ - opennebula_test_fixture
+ - opennebula_test_fixture_replay
+
+
+# SETUP INITIAL TESTING CONDITION
+
+- set_fact: test_number={{ test_number | int + 1 }}
+
+- name: "test_{{test_number}}: ensure the tests hosts are absent"
+ one_host:
+ name: "{{ item }}"
+ state: absent
+ api_endpoint: "{{ opennebula_url }}"
+ api_username: "{{ opennebula_username }}"
+ api_token: "{{ opennebula_password }}"
+ validate_certs: false
+ environment:
+ PYONE_TEST_FIXTURE: "{{ opennebula_test_fixture }}"
+ PYONE_TEST_FIXTURE_FILE: /tmp/opennebula-fixtures.json.gz
+ PYONE_TEST_FIXTURE_REPLAY: "{{ opennebula_test_fixture_replay }}"
+ PYONE_TEST_FIXTURE_UNIT: "test_{{test_number}}_{{ item }}"
+ with_items: "{{opennebula_test.hosts}}"
+ register: result
+
+# NOT EXISTING HOSTS
+
+- set_fact: test_number={{ test_number | int + 1 }}
+
+- name: "test_{{test_number}}: attempt to enable a host that does not exists"
+ one_host:
+ name: badhost
+ state: "{{item}}"
+ api_url: "{{ opennebula_url }}"
+ api_username: "{{ opennebula_username }}"
+ api_password: "{{ opennebula_password }}"
+ validate_certs: false
+ environment:
+ PYONE_TEST_FIXTURE: "{{ opennebula_test_fixture }}"
+ PYONE_TEST_FIXTURE_FILE: /tmp/opennebula-fixtures.json.gz
+ PYONE_TEST_FIXTURE_REPLAY: "{{ opennebula_test_fixture_replay }}"
+ PYONE_TEST_FIXTURE_UNIT: "test_{{test_number}}_{{item}}"
+ ignore_errors: true
+ register: result
+ with_items:
+ - enabled
+ - disabled
+ - offline
+
+- name: "assert test_{{test_number}} failed"
+ assert:
+ that:
+ - result is failed
+ - result.results[0].msg == 'invalid host state ERROR'
+
+# ---
+
+- set_fact: test_number={{ test_number | int + 1 }}
+
+- name: "test_{{test_number}}: delete an unexisting host"
+ one_host:
+ name: badhost
+ state: absent
+ validate_certs: false
+ environment:
+ ONE_URL: "{{ opennebula_url }}"
+ ONE_USERNAME: "{{ opennebula_username }}"
+ ONE_PASSWORD: "{{ opennebula_password }}"
+ PYONE_TEST_FIXTURE: "{{ opennebula_test_fixture }}"
+ PYONE_TEST_FIXTURE_FILE: /tmp/opennebula-fixtures.json.gz
+ PYONE_TEST_FIXTURE_REPLAY: "{{ opennebula_test_fixture_replay }}"
+ PYONE_TEST_FIXTURE_UNIT: "test_{{test_number}}"
+ register: result
+
+- name: "assert test_{{test_number}} worked"
+ assert:
+ that:
+ - result.changed
+
+# HOST ENABLEMENT
+
+- set_fact: test_number={{ test_number | int + 1 }}
+
+
+- name: "test_{{test_number}}: enable the test hosts"
+ one_host:
+ name: "{{ item }}"
+ state: enabled
+ api_url: "{{ opennebula_url }}"
+ api_username: "{{ opennebula_username }}"
+ api_password: "{{ opennebula_password }}"
+ validate_certs: false
+ environment:
+ PYONE_TEST_FIXTURE: "{{ opennebula_test_fixture }}"
+ PYONE_TEST_FIXTURE_FILE: /tmp/opennebula-fixtures.json.gz
+ PYONE_TEST_FIXTURE_REPLAY: "{{ opennebula_test_fixture_replay }}"
+ PYONE_TEST_FIXTURE_UNIT: "test_{{test_number}}_{{ item }}"
+ with_items: "{{opennebula_test.hosts}}"
+ register: result
+
+- name: "assert test_{{test_number}} worked"
+ assert:
+ that:
+ - result.changed
+
+# TEMPLATE MANAGEMENT
+
+- set_fact: test_number={{ test_number | int + 1 }}
+
+- name: "test_{{test_number}}: setup template values on hosts"
+ one_host:
+ name: "{{ item }}"
+ state: enabled
+ api_url: "{{ opennebula_url }}"
+ api_username: "{{ opennebula_username }}"
+ api_password: "{{ opennebula_password }}"
+ validate_certs: false
+ template:
+ LABELS:
+ - test
+ - custom
+ TEST_VALUE: 2
+ environment:
+ PYONE_TEST_FIXTURE: "{{ opennebula_test_fixture }}"
+ PYONE_TEST_FIXTURE_FILE: /tmp/opennebula-fixtures.json.gz
+ PYONE_TEST_FIXTURE_REPLAY: "{{ opennebula_test_fixture_replay }}"
+ PYONE_TEST_FIXTURE_UNIT: "test_{{test_number}}_{{ item }}"
+ with_items: "{{opennebula_test.hosts}}"
+ register: result
+
+- name: "assert test_{{test_number}} worked"
+ assert:
+ that:
+ - result.changed
+
+# ---
+
+- set_fact: test_number={{ test_number | int + 1 }}
+
+- name: "test_{{test_number}}: setup equivalent template values on hosts"
+ one_host:
+ name: "{{ item }}"
+ state: enabled
+ api_url: "{{ opennebula_url }}"
+ api_username: "{{ opennebula_username }}"
+ api_password: "{{ opennebula_password }}"
+ validate_certs: false
+ labels:
+ - test
+ - custom
+ attributes:
+ TEST_VALUE: "2"
+ environment:
+ PYONE_TEST_FIXTURE: "{{ opennebula_test_fixture }}"
+ PYONE_TEST_FIXTURE_FILE: /tmp/opennebula-fixtures.json.gz
+ PYONE_TEST_FIXTURE_REPLAY: "{{ opennebula_test_fixture_replay }}"
+ PYONE_TEST_FIXTURE_UNIT: "test_{{test_number}}_{{ item }}"
+ with_items: "{{opennebula_test.hosts}}"
+ register: result
+
+- name: "assert test_{{test_number}} worked"
+ assert:
+ that:
+ - result.changed == false
+
+# HOST DISABLEMENT
+
+- set_fact: test_number={{ test_number | int + 1 }}
+
+- name: "test_{{test_number}}: disable the test hosts"
+ one_host:
+ name: "{{ item }}"
+ state: disabled
+ api_url: "{{ opennebula_url }}"
+ api_username: "{{ opennebula_username }}"
+ api_password: "{{ opennebula_password }}"
+ validate_certs: false
+ environment:
+ PYONE_TEST_FIXTURE: "{{ opennebula_test_fixture }}"
+ PYONE_TEST_FIXTURE_FILE: /tmp/opennebula-fixtures.json.gz
+ PYONE_TEST_FIXTURE_REPLAY: "{{ opennebula_test_fixture_replay }}"
+ PYONE_TEST_FIXTURE_UNIT: "test_{{test_number}}_{{ item }}"
+ with_items: "{{opennebula_test.hosts}}"
+ register: result
+
+- name: "assert test_{{test_number}} worked"
+ assert:
+ that:
+ - result.changed
+
+# HOST OFFLINE
+
+- set_fact: test_number={{ test_number | int + 1 }}
+
+- name: "test_{{test_number}}: offline the test hosts"
+ one_host:
+ name: "{{ item }}"
+ state: offline
+ api_url: "{{ opennebula_url }}"
+ api_username: "{{ opennebula_username }}"
+ api_password: "{{ opennebula_password }}"
+ validate_certs: false
+ environment:
+ PYONE_TEST_FIXTURE: "{{ opennebula_test_fixture }}"
+ PYONE_TEST_FIXTURE_FILE: /tmp/opennebula-fixtures.json.gz
+ PYONE_TEST_FIXTURE_REPLAY: "{{ opennebula_test_fixture_replay }}"
+ PYONE_TEST_FIXTURE_UNIT: "test_{{test_number}}_{{ item }}"
+ with_items: "{{opennebula_test.hosts}}"
+ register: result
+
+- name: "assert test_{{test_number}} worked"
+ assert:
+ that:
+ - result.changed
+
+# TEARDOWN
+
+- name: fetch fixtures
+ fetch:
+ src: /tmp/opennebula-fixtures.json.gz
+ dest: targets/one_host/files
+ when:
+ - opennebula_test_fixture
+ - not opennebula_test_fixture_replay
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/osx_defaults/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/osx_defaults/aliases
new file mode 100644
index 00000000..6e0b78e0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/osx_defaults/aliases
@@ -0,0 +1,5 @@
+shippable/posix/group1
+skip/aix
+skip/freebsd
+skip/rhel
+skip/docker
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/osx_defaults/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/osx_defaults/tasks/main.yml
new file mode 100644
index 00000000..af4667ce
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/osx_defaults/tasks/main.yml
@@ -0,0 +1,253 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Test code for the osx_defaults module.
+# Copyright: (c) 2019, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+---
+- name: Check if name is required for present
+ osx_defaults:
+ domain: NSGlobalDomain
+ key: AppleMeasurementUnits
+ type: string
+ state: present
+ register: missing_value
+ ignore_errors: yes
+
+- name: Test if state and value are required together
+ assert:
+ that:
+ - "'following are missing: value' in '{{ missing_value['msg'] }}'"
+
+- name: Change value of AppleMeasurementUnits to centimeter in check_mode
+ osx_defaults:
+ domain: NSGlobalDomain
+ key: AppleMeasurementUnits
+ type: string
+ value: Centimeter
+ state: present
+ register: measure_task_check_mode
+ check_mode: yes
+
+- name: Test if AppleMeasurementUnits value is changed to Centimeters in check_mode
+ assert:
+ that:
+ - measure_task_check_mode.changed
+
+- name: Find the current value of AppleMeasurementUnits
+ osx_defaults:
+ domain: NSGlobalDomain
+ key: AppleMeasurementUnits
+ state: list
+ register: apple_measure_value
+
+- debug:
+ msg: "{{ apple_measure_value['value'] }}"
+
+- set_fact:
+ new_value: "Centimeters"
+ when: apple_measure_value['value'] == 'Inches' or apple_measure_value['value'] == None
+
+- set_fact:
+ new_value: "Inches"
+ when: apple_measure_value['value'] == 'Centimeters'
+
+- name: Change value of AppleMeasurementUnits to {{ new_value }}
+ osx_defaults:
+ domain: NSGlobalDomain
+ key: AppleMeasurementUnits
+ type: string
+ value: "{{ new_value }}"
+ state: present
+ register: change_value
+
+- name: Test if AppleMeasurementUnits value is changed to {{ new_value }}
+ assert:
+ that:
+ - change_value.changed
+
+- name: Again change value of AppleMeasurementUnits to {{ new_value }}
+ osx_defaults:
+ domain: NSGlobalDomain
+ key: AppleMeasurementUnits
+ type: string
+ value: "{{ new_value }}"
+ state: present
+ register: change_value
+
+- name: Again test if AppleMeasurementUnits value is not changed to {{ new_value }}
+ assert:
+ that:
+ - not change_value.changed
+
+- name: Check a fake setting for delete operation
+ osx_defaults:
+ domain: com.ansible.fake_value
+ key: ExampleKeyToRemove
+ state: list
+ register: list_fake_value
+
+- debug:
+ msg: "{{ list_fake_value }}"
+
+- name: Check if fake value is listed
+ assert:
+ that:
+ - not list_fake_value.changed
+
+- name: Create a fake setting for delete operation
+ osx_defaults:
+ domain: com.ansible.fake_value
+ key: ExampleKeyToRemove
+ state: present
+ value: sample
+ register: present_fake_value
+
+- debug:
+ msg: "{{ present_fake_value }}"
+
+- name: Check if fake is created
+ assert:
+ that:
+ - present_fake_value.changed
+ when: present_fake_value.changed
+
+- name: List a fake setting
+ osx_defaults:
+ domain: com.ansible.fake_value
+ key: ExampleKeyToRemove
+ state: list
+ register: list_fake
+
+- debug:
+ msg: "{{ list_fake }}"
+
+- name: Delete a fake setting
+ osx_defaults:
+ domain: com.ansible.fake_value
+ key: ExampleKeyToRemove
+ state: absent
+ register: absent_task
+
+- debug:
+ msg: "{{ absent_task }}"
+
+- name: Check if fake setting is deleted
+ assert:
+ that:
+ - absent_task.changed
+ when: present_fake_value.changed
+
+- name: Try deleting a fake setting again
+ osx_defaults:
+ domain: com.ansible.fake_value
+ key: ExampleKeyToRemove
+ state: absent
+ register: absent_task
+
+- debug:
+ msg: "{{ absent_task }}"
+
+- name: Check if fake setting is not deleted
+ assert:
+ that:
+ - not absent_task.changed
+
+- name: Delete operation in check_mode
+ osx_defaults:
+ domain: com.ansible.fake_value
+ key: ExampleKeyToRemove
+ state: absent
+ register: absent_check_mode_task
+ check_mode: yes
+
+- debug:
+ msg: "{{ absent_check_mode_task }}"
+
+- name: Check delete operation with check mode
+ assert:
+ that:
+ - not absent_check_mode_task.changed
+
+
+- name: Use different data types and check if it works with them
+ osx_defaults:
+ domain: com.ansible.fake_values
+ key: "{{ item.key }}"
+ type: "{{ item.type }}"
+ value: "{{ item.value }}"
+ state: present
+ with_items: &data_type
+ - { type: 'int', value: 1, key: 'sample_int'}
+ - { type: 'integer', value: 1, key: 'sample_int_2'}
+ - { type: 'integer', value: -1, key: 'negative_int'}
+ - { type: 'bool', value: True, key: 'sample_bool'}
+ - { type: 'boolean', value: True, key: 'sample_bool_2'}
+ - { type: 'date', value: "2019-02-19 10:10:10", key: 'sample_date'}
+ - { type: 'float', value: 1.2, key: 'sample_float'}
+ - { type: 'string', value: 'sample', key: 'sample_string'}
+ - { type: 'array', value: ['1', '2'], key: 'sample_array'}
+ register: test_data_types
+
+- assert:
+ that: "{{ item.changed }}"
+ with_items: "{{ test_data_types.results }}"
+
+- name: Use different data types and delete them
+ osx_defaults:
+ domain: com.ansible.fake_values
+ key: "{{ item.key }}"
+ value: "{{ item.value }}"
+ type: "{{ item.type }}"
+ state: absent
+ with_items: *data_type
+ register: test_data_types
+
+- assert:
+ that: "{{ item.changed }}"
+ with_items: "{{ test_data_types.results }}"
+
+
+- name: Ensure test key does not exist
+ osx_defaults:
+ domain: com.ansible.fake_array_value
+ key: ExampleArrayKey
+ state: absent
+
+- name: add array value for the first time
+ osx_defaults:
+ domain: com.ansible.fake_array_value
+ key: ExampleArrayKey
+ value:
+ - 'Value with spaces'
+ type: array
+ array_add: yes
+ register: test_array_add
+
+- assert:
+ that: test_array_add.changed
+
+- name: add for the second time, should be skipped
+ osx_defaults:
+ domain: com.ansible.fake_array_value
+ key: ExampleArrayKey
+ value:
+ - 'Value with spaces'
+ type: array
+ array_add: yes
+ register: test_array_add
+
+- assert:
+ that: not test_array_add.changed
+
+- name: Clean up test key
+ osx_defaults:
+ domain: com.ansible.fake_array_value
+ key: ExampleArrayKey
+ state: absent
+ register: test_array_add
+
+- assert:
+ that: test_array_add.changed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/pagerduty_user/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/pagerduty_user/aliases
new file mode 100644
index 00000000..ad7ccf7a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/pagerduty_user/aliases
@@ -0,0 +1 @@
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/pagerduty_user/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/pagerduty_user/tasks/main.yml
new file mode 100644
index 00000000..a6477ff1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/pagerduty_user/tasks/main.yml
@@ -0,0 +1,24 @@
+# Test code for pagerduty_user module
+#
+# Copyright: (c) 2020, Zainab Alsaffar <Zainab.Alsaffar@mail.rit.edu>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+- name: Install required library
+ pip:
+ name: pdpyras
+ state: present
+
+- name: Create a user account on PagerDuty
+ pagerduty_user:
+ access_token: '{{ pd_api_access_token }}'
+ pd_user: '{{ fullname }}'
+ pd_email: '{{ email }}'
+ pd_role: '{{ pd_role }}'
+ pd_teams: '{{ pd_team }}'
+ state: present
+
+- name: Remove a user account from PagerDuty
+ pagerduty_user:
+ access_token: "{{ pd_api_access_token }}"
+ pd_user: "{{ fullname }}"
+ pd_email: "{{ email }}"
+ state: "absent" \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/pagerduty_user/vars/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/pagerduty_user/vars/main.yml
new file mode 100644
index 00000000..384d4b99
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/pagerduty_user/vars/main.yml
@@ -0,0 +1,5 @@
+pd_api_access_token: your_api_access_token
+fullname: User Name
+email: user@email.com
+pd_role: observer
+pd_teams: team1 \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/pids/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/pids/aliases
new file mode 100644
index 00000000..b5983214
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/pids/aliases
@@ -0,0 +1 @@
+shippable/posix/group3
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/pids/files/obtainpid.sh b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/pids/files/obtainpid.sh
new file mode 100644
index 00000000..8e4f4d37
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/pids/files/obtainpid.sh
@@ -0,0 +1,3 @@
+#!/usr/bin/env bash
+"$1" 100 &
+echo "$!" > "$2"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/pids/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/pids/tasks/main.yml
new file mode 100644
index 00000000..4cc69163
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/pids/tasks/main.yml
@@ -0,0 +1,69 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Test code for the pids module
+# Copyright: (c) 2019, Saranya Sridharan
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+- name: "Installing the psutil module"
+ pip:
+ name: psutil < 5.7.0
+ # Version 5.7.0 breaks on older pip versions. See https://github.com/ansible/ansible/pull/70667
+
+- name: "Checking the empty result"
+ pids:
+ name: "blahblah"
+ register: emptypids
+
+- name: "Verify that the list of Process IDs (PIDs) returned is empty"
+ assert:
+ that:
+ - emptypids is not changed
+ - emptypids.pids == []
+
+- name: "Picking a random process name"
+ set_fact:
+ random_name: some-random-long-name-{{ 99999999 | random }}
+
+- name: "finding the 'sleep' binary"
+ command: which sleep
+ register: find_sleep
+
+- name: "Copying 'sleep' binary"
+ copy:
+ src: "{{ find_sleep.stdout }}"
+ dest: "{{ output_dir }}/{{ random_name }}"
+ mode: "0777"
+
+- name: "Running the copy of 'sleep' binary"
+ command: "sh {{ role_path }}/files/obtainpid.sh '{{ output_dir }}/{{ random_name }}' '{{ output_dir }}/obtainpid.txt'"
+
+ async: 100
+ poll: 0
+
+- name: "Wait for one second to make sure that the sleep copy has actually been started"
+ pause:
+ seconds: 1
+
+- name: "Checking the process IDs (PIDs) of sleep binary"
+ pids:
+ name: "{{ random_name }}"
+ register: pids
+
+- name: "Checking that exact non-substring matches are required"
+ pids:
+ name: "{{ random_name[0:5] }}"
+ register: exactpidmatch
+
+- name: "Reading pid from the file"
+ slurp:
+ src: "{{ output_dir }}/obtainpid.txt"
+ register: newpid
+
+- name: "Verify that the Process IDs (PIDs) returned is not empty and also equal to the PIDs obtained in console"
+ assert:
+ that:
+ - "pids.pids | join(' ') == newpid.content | b64decode | trim"
+ - "pids.pids | length > 0"
+ - "exactpidmatch.pids == []"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/pkgutil/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/pkgutil/aliases
new file mode 100644
index 00000000..5e163ed7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/pkgutil/aliases
@@ -0,0 +1,2 @@
+destructive
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/pkgutil/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/pkgutil/tasks/main.yml
new file mode 100644
index 00000000..f2bf44e8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/pkgutil/tasks/main.yml
@@ -0,0 +1,116 @@
+# Test code for the pkgutil module
+
+# Copyright: (c) 2019, Dag Wieers (@dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+# CLEAN ENVIRONMENT
+- name: Remove CSWtop
+ pkgutil:
+ name: CSWtop
+ state: absent
+ register: originally_installed
+
+
+# ADD PACKAGE
+- name: Add package (check_mode)
+ pkgutil:
+ name: CSWtop
+ state: present
+ check_mode: yes
+ register: cm_add_package
+
+- name: Verify cm_add_package
+ assert:
+ that:
+ - cm_add_package is changed
+
+- name: Add package (normal mode)
+ pkgutil:
+ name: CSWtop
+ state: present
+ register: nm_add_package
+
+- name: Verify nm_add_package
+ assert:
+ that:
+ - nm_add_package is changed
+
+- name: Add package again (check_mode)
+ pkgutil:
+ name: CSWtop
+ state: present
+ check_mode: yes
+ register: cm_add_package_again
+
+- name: Verify cm_add_package_again
+ assert:
+ that:
+ - cm_add_package_again is not changed
+
+- name: Add package again (normal mode)
+ pkgutil:
+ name: CSWtop
+ state: present
+ register: nm_add_package_again
+
+- name: Verify nm_add_package_again
+ assert:
+ that:
+ - nm_add_package_again is not changed
+
+
+# REMOVE PACKAGE
+- name: Remove package (check_mode)
+ pkgutil:
+ name: CSWtop
+ state: absent
+ check_mode: yes
+ register: cm_remove_package
+
+- name: Verify cm_remove_package
+ assert:
+ that:
+ - cm_remove_package is changed
+
+- name: Remove package (normal mode)
+ pkgutil:
+ name: CSWtop
+ state: absent
+ register: nm_remove_package
+
+- name: Verify nm_remove_package
+ assert:
+ that:
+ - nm_remove_package is changed
+
+- name: Remove package again (check_mode)
+ pkgutil:
+ name: CSWtop
+ state: absent
+ check_mode: yes
+ register: cm_remove_package_again
+
+- name: Verify cm_remove_package_again
+ assert:
+ that:
+ - cm_remove_package_again is not changed
+
+- name: Remove package again (normal mode)
+ pkgutil:
+ name: CSWtop
+ state: absent
+ register: nm_remove_package_again
+
+- name: Verify nm_remove_package_again
+ assert:
+ that:
+ - nm_remove_package_again is not changed
+
+
+# RESTORE ENVIRONMENT
+- name: Reinstall CSWtop
+ pkgutil:
+ name: CSWtop
+ state: present
+ when: originally_installed is changed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_copy/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_copy/aliases
new file mode 100644
index 00000000..b2033afd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_copy/aliases
@@ -0,0 +1,5 @@
+destructive
+shippable/posix/group4
+skip/aix
+skip/osx
+skip/macos
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_copy/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_copy/meta/main.yml
new file mode 100644
index 00000000..4ce5a583
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_copy/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_postgresql_db
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_copy/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_copy/tasks/main.yml
new file mode 100644
index 00000000..359c5d3b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_copy/tasks/main.yml
@@ -0,0 +1,8 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Initial CI tests of postgresql_copy module
+- import_tasks: postgresql_copy_initial.yml
+ when: postgres_version_resp.stdout is version('9.4', '>=')
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_copy/tasks/postgresql_copy_initial.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_copy/tasks/postgresql_copy_initial.yml
new file mode 100644
index 00000000..cd9981e9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_copy/tasks/postgresql_copy_initial.yml
@@ -0,0 +1,278 @@
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# The file for testing postgresql_copy module.
+
+- vars:
+ test_table: acme
+ data_file_txt: /tmp/data.txt
+ data_file_csv: /tmp/data.csv
+ task_parameters: &task_parameters
+ become_user: '{{ pg_user }}'
+ become: yes
+ register: result
+ pg_parameters: &pg_parameters
+ login_user: '{{ pg_user }}'
+ login_db: postgres
+
+ block:
+ # Test preparation:
+ - name: postgresql_copy - create test table
+ <<: *task_parameters
+ postgresql_table:
+ <<: *pg_parameters
+ name: '{{ test_table }}'
+ columns:
+ - id int
+ - name text
+
+ # Insert the data:
+ - name: postgresql_copy - insert rows into test table
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "INSERT INTO {{ test_table }} (id, name) VALUES (1, 'first')"
+
+ - name: postgresql_copy - ensure that test data files don't exist
+ <<: *task_parameters
+ file:
+ path: '{{ item }}'
+ state: absent
+ with_items:
+ - '{{ data_file_csv }}'
+ - '{{ data_file_txt }}'
+
+ # ##############
+ # Do main tests:
+
+ # check_mode - if it's OK, must always return changed=True:
+ - name: postgresql_copy - check_mode, copy test table content to data_file_txt
+ check_mode: yes
+ <<: *task_parameters
+ postgresql_copy:
+ <<: *pg_parameters
+ copy_to: '{{ data_file_txt }}'
+ src: '{{ test_table }}'
+ trust_input: no
+
+ - assert:
+ that:
+ - result is changed
+
+ # check that nothing changed after the previous step:
+ - name: postgresql_copy - check that data_file_txt doesn't exist
+ <<: *task_parameters
+ ignore_errors: yes
+ shell: head -n 1 '{{ data_file_txt }}'
+
+ - assert:
+ that:
+ - result.failed == true
+ - result.rc == 1
+
+ # check_mode - if it's OK, must always return changed=True:
+ - name: postgresql_copy - check_mode, copy test table content from data_file_txt
+ check_mode: yes
+ <<: *task_parameters
+ postgresql_copy:
+ <<: *pg_parameters
+ copy_from: '{{ data_file_txt }}'
+ dst: '{{ test_table }}'
+ trust_input: no
+
+ - assert:
+ that:
+ - result is changed
+
+ # check that nothing changed after the previous step:
+ - name: postgresql_copy - check that test table continue to have one row
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: 'SELECT * FROM {{ test_table }}'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ # check_mode - test must fail because test table doesn't exist:
+ - name: postgresql_copy - check_mode, copy non existent table to data_file_txt
+ check_mode: yes
+ ignore_errors: yes
+ <<: *task_parameters
+ postgresql_copy:
+ <<: *pg_parameters
+ copy_to: '{{ data_file_txt }}'
+ src: non_existent_table
+ trust_input: no
+
+ - assert:
+ that:
+ - result.failed == true
+ - result.queries is not defined
+
+ - name: postgresql_copy - check trust_input
+ <<: *task_parameters
+ postgresql_copy:
+ <<: *pg_parameters
+ copy_to: '{{ data_file_txt }}'
+ src: '{{ test_table }}'
+ session_role: 'curious.anonymous"; SELECT * FROM information_schema.tables; --'
+ trust_input: no
+ ignore_errors: yes
+
+ - assert:
+ that:
+ - result is failed
+ - result.msg is search('is potentially dangerous')
+
+ - name: postgresql_copy - copy test table data to data_file_txt
+ <<: *task_parameters
+ postgresql_copy:
+ <<: *pg_parameters
+ copy_to: '{{ data_file_txt }}'
+ src: '{{ test_table }}'
+ trust_input: no
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["COPY \"{{ test_table }}\" TO '{{ data_file_txt }}'"]
+ - result.src == '{{ test_table }}'
+ - result.dst == '{{ data_file_txt }}'
+
+ # check the prev test
+ - name: postgresql_copy - check data_file_txt exists and not empty
+ <<: *task_parameters
+ shell: 'head -n 1 {{ data_file_txt }}'
+
+ - assert:
+ that:
+ - result.stdout == '1\tfirst'
+
+ # test different options and columns
+ - name: postgresql_copy - copy test table data to data_file_csv with options and columns
+ <<: *task_parameters
+ postgresql_copy:
+ <<: *pg_parameters
+ copy_to: '{{ data_file_csv }}'
+ src: '{{ test_table }}'
+ columns:
+ - id
+ - name
+ options:
+ format: csv
+ trust_input: no
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["COPY \"{{ test_table }}\" (id,name) TO '{{ data_file_csv }}' (format csv)"]
+ - result.src == '{{ test_table }}'
+ - result.dst == '{{ data_file_csv }}'
+
+ # check the prev test
+ - name: postgresql_copy - check data_file_csv exists and not empty
+ <<: *task_parameters
+ shell: 'head -n 1 {{ data_file_csv }}'
+
+ - assert:
+ that:
+ - result.stdout == '1,first'
+
+ - name: postgresql_copy - copy from data_file_csv to test table
+ <<: *task_parameters
+ postgresql_copy:
+ <<: *pg_parameters
+ copy_from: '{{ data_file_csv }}'
+ dst: '{{ test_table }}'
+ columns:
+ - id
+ - name
+ options:
+ format: csv
+ trust_input: no
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["COPY \"{{ test_table }}\" (id,name) FROM '{{ data_file_csv }}' (format csv)"]
+ - result.dst == '{{ test_table }}'
+ - result.src == '{{ data_file_csv }}'
+
+ - name: postgresql_copy - check that there are two rows in test table after the prev step
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT * FROM {{ test_table }} WHERE id = '1' AND name = 'first'"
+
+ - assert:
+ that:
+ - result.rowcount == 2
+
+ - name: postgresql_copy - test program option, copy to program
+ <<: *task_parameters
+ postgresql_copy:
+ <<: *pg_parameters
+ src: '{{ test_table }}'
+ copy_to: '/bin/true'
+ program: yes
+ columns: id, name
+ options:
+ delimiter: '|'
+ trust_input: no
+ when: ansible_distribution != 'FreeBSD'
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["COPY \"{{ test_table }}\" (id, name) TO PROGRAM '/bin/true' (delimiter '|')"]
+ - result.src == '{{ test_table }}'
+ - result.dst == '/bin/true'
+ when: ansible_distribution != 'FreeBSD'
+
+ - name: postgresql_copy - test program option, copy from program
+ <<: *task_parameters
+ postgresql_copy:
+ <<: *pg_parameters
+ dst: '{{ test_table }}'
+ copy_from: 'echo 1,first'
+ program: yes
+ columns: id, name
+ options:
+ delimiter: ','
+ trust_input: no
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["COPY \"{{ test_table }}\" (id, name) FROM PROGRAM 'echo 1,first' (delimiter ',')"]
+ - result.dst == '{{ test_table }}'
+ - result.src == 'echo 1,first'
+ when: ansible_distribution != 'FreeBSD'
+
+ - name: postgresql_copy - check that there are three rows in test table after the prev step
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT * FROM {{ test_table }} WHERE id = '1' AND name = 'first'"
+
+ - assert:
+ that:
+ - result.rowcount == 3
+
+ # clean up
+ - name: postgresql_copy - remove test table
+ <<: *task_parameters
+ postgresql_table:
+ <<: *pg_parameters
+ name: '{{ test_table }}'
+ state: absent
+
+ - name: postgresql_copy - remove test data files
+ <<: *task_parameters
+ file:
+ path: '{{ item }}'
+ state: absent
+ with_items:
+ - '{{ data_file_csv }}'
+ - '{{ data_file_txt }}'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_db/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_db/aliases
new file mode 100644
index 00000000..7844fd87
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_db/aliases
@@ -0,0 +1,6 @@
+destructive
+shippable/posix/group4
+postgresql_db
+skip/aix
+skip/osx
+skip/macos
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_db/defaults/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_db/defaults/main.yml
new file mode 100644
index 00000000..766feeec
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_db/defaults/main.yml
@@ -0,0 +1,11 @@
+db_name: 'ansible_db'
+db_user1: 'ansible.db.user1'
+db_user2: 'ansible.db.user2'
+tmp_dir: '/tmp'
+db_session_role1: 'session_role1'
+db_session_role2: 'session_role2'
+
+# To test trust_input parameter and
+# possibility to create a database with dots in its name
+db_name_with_dot: 'db.name'
+suspicious_db_name: '{{ db_name_with_dot }}"; --'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_db/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_db/meta/main.yml
new file mode 100644
index 00000000..4ce5a583
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_db/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_postgresql_db
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_db/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_db/tasks/main.yml
new file mode 100644
index 00000000..56b56aec
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_db/tasks/main.yml
@@ -0,0 +1,36 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- import_tasks: postgresql_db_session_role.yml
+
+# Initial tests of postgresql_db module:
+- import_tasks: postgresql_db_initial.yml
+
+# General tests:
+- import_tasks: postgresql_db_general.yml
+
+# Dump/restore tests per format:
+- include_tasks: state_dump_restore.yml
+ vars:
+ test_fixture: user
+ file: '{{ loop_item }}'
+ loop:
+ - dbdata.sql
+ - dbdata.sql.gz
+ - dbdata.sql.bz2
+ - dbdata.sql.xz
+ - dbdata.tar
+ - dbdata.tar.gz
+ - dbdata.tar.bz2
+ - dbdata.tar.xz
+ - dbdata.pgc
+ loop_control:
+ loop_var: loop_item
+
+# Dump/restore tests per other logins:
+- import_tasks: state_dump_restore.yml
+ vars:
+ file: dbdata.tar
+ test_fixture: admin
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_db/tasks/postgresql_db_general.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_db/tasks/postgresql_db_general.yml
new file mode 100644
index 00000000..6a178bea
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_db/tasks/postgresql_db_general.yml
@@ -0,0 +1,152 @@
+- become_user: '{{ pg_user }}'
+ become: true
+ vars:
+ db_tablespace: bar
+ tblspc_location: /ssd
+ db_name: acme
+ block_parameters:
+ become_user: '{{ pg_user }}'
+ become: true
+ task_parameters:
+ register: result
+ pg_parameters:
+ login_user: '{{ pg_user }}'
+ block:
+ - name: postgresql_db - drop dir for test tablespace
+ become: true
+ become_user: root
+ file:
+ path: '{{ tblspc_location }}'
+ state: absent
+ ignore_errors: true
+ - name: postgresql_db - disable selinux
+ become: true
+ become_user: root
+ shell: setenforce 0
+ ignore_errors: true
+ - name: postgresql_db - create dir for test tablespace
+ become: true
+ become_user: root
+ file:
+ path: '{{ tblspc_location }}'
+ state: directory
+ owner: '{{ pg_user }}'
+ group: '{{ pg_user }}'
+ mode: '0700'
+ - name: postgresql_db_ - create a new tablespace
+ postgresql_tablespace:
+ login_user: '{{ pg_user }}'
+ login_db: postgres
+ name: '{{ db_tablespace }}'
+ location: '{{ tblspc_location }}'
+ - register: result
+ name: postgresql_db_tablespace - Create DB with tablespace option in check mode
+ check_mode: true
+ postgresql_db:
+ login_user: '{{ pg_user }}'
+ maintenance_db: postgres
+ name: '{{ db_name }}'
+ tablespace: '{{ db_tablespace }}'
+ - assert:
+ that:
+ - result is changed
+ - register: result
+ name: postgresql_db_tablespace - Check actual DB tablespace, rowcount must be 0 because actually nothing changed
+ postgresql_query:
+ login_user: '{{ pg_user }}'
+ login_db: postgres
+ query: 'SELECT 1 FROM pg_database AS d JOIN pg_tablespace AS t ON d.dattablespace = t.oid WHERE d.datname = ''{{ db_name }}'' AND t.spcname = ''{{ db_tablespace }}''
+
+ '
+ - assert:
+ that:
+ - result.rowcount == 0
+ - register: result
+ name: postgresql_db_tablespace - Create DB with tablespace option
+ postgresql_db:
+ login_user: '{{ pg_user }}'
+ maintenance_db: postgres
+ name: '{{ db_name }}'
+ tablespace: '{{ db_tablespace }}'
+ - assert:
+ that:
+ - result is changed
+ - result.executed_commands == ['CREATE DATABASE "{{ db_name }}" TABLESPACE "{{ db_tablespace }}"']
+ - register: result
+ name: postgresql_db_tablespace - Check actual DB tablespace, rowcount must be 1
+ postgresql_query:
+ login_user: '{{ pg_user }}'
+ login_db: postgres
+ query: 'SELECT 1 FROM pg_database AS d JOIN pg_tablespace AS t ON d.dattablespace = t.oid WHERE d.datname = ''{{ db_name }}'' AND t.spcname = ''{{ db_tablespace }}''
+
+ '
+ - assert:
+ that:
+ - result.rowcount == 1
+ - register: result
+ name: postgresql_db_tablespace - The same DB with tablespace option again
+ postgresql_db:
+ login_user: '{{ pg_user }}'
+ maintenance_db: postgres
+ name: '{{ db_name }}'
+ tablespace: '{{ db_tablespace }}'
+ - assert:
+ that:
+ - result is not changed
+ - register: result
+ name: postgresql_db_tablespace - Change tablespace in check_mode
+ check_mode: true
+ postgresql_db:
+ login_user: '{{ pg_user }}'
+ maintenance_db: postgres
+ name: '{{ db_name }}'
+ tablespace: pg_default
+ - assert:
+ that:
+ - result is changed
+ - register: result
+ name: postgresql_db_tablespace - Check actual DB tablespace, rowcount must be 1 because actually nothing changed
+ postgresql_query:
+ login_user: '{{ pg_user }}'
+ login_db: postgres
+ query: 'SELECT 1 FROM pg_database AS d JOIN pg_tablespace AS t ON d.dattablespace = t.oid WHERE d.datname = ''{{ db_name }}'' AND t.spcname = ''{{ db_tablespace }}''
+
+ '
+ - assert:
+ that:
+ - result.rowcount == 1
+ - register: result
+ name: postgresql_db_tablespace - Change tablespace in actual mode
+ postgresql_db:
+ login_user: '{{ pg_user }}'
+ maintenance_db: postgres
+ name: '{{ db_name }}'
+ tablespace: pg_default
+ - assert:
+ that:
+ - result is changed
+ - register: result
+ name: postgresql_db_tablespace - Check actual DB tablespace, rowcount must be 1
+ postgresql_query:
+ login_user: '{{ pg_user }}'
+ login_db: postgres
+ query: 'SELECT 1 FROM pg_database AS d JOIN pg_tablespace AS t ON d.dattablespace = t.oid WHERE d.datname = ''{{ db_name }}'' AND t.spcname = ''pg_default''
+
+ '
+ - assert:
+ that:
+ - result.rowcount == 1
+ - register: result
+ name: postgresql_db_tablespace - Drop test DB
+ postgresql_db:
+ login_user: '{{ pg_user }}'
+ maintenance_db: postgres
+ name: '{{ db_name }}'
+ state: absent
+ - register: result
+ name: postgresql_db_tablespace - Remove tablespace
+ postgresql_tablespace:
+ login_user: '{{ pg_user }}'
+ login_db: postgres
+ name: '{{ db_tablespace }}'
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_db/tasks/postgresql_db_initial.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_db/tasks/postgresql_db_initial.yml
new file mode 100644
index 00000000..851c19f4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_db/tasks/postgresql_db_initial.yml
@@ -0,0 +1,366 @@
+#
+# Create and destroy db
+#
+- name: Create DB
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_db:
+ state: present
+ name: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ register: result
+
+- name: assert that module reports the db was created
+ assert:
+ that:
+ - result is changed
+ - result.db == "{{ db_name }}"
+ - result.executed_commands == ['CREATE DATABASE "{{ db_name }}"']
+
+- name: Check that database created
+ become_user: "{{ pg_user }}"
+ become: yes
+ shell: echo "select datname from pg_database where datname = '{{ db_name }}';" | psql -d postgres
+ register: result
+
+- assert:
+ that:
+ - "result.stdout_lines[-1] == '(1 row)'"
+
+- name: Run create on an already created db
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_db:
+ state: present
+ name: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ register: result
+
+- name: assert that module reports the db was unchanged
+ assert:
+ that:
+ - result is not changed
+
+- name: Destroy DB
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_db:
+ state: absent
+ name: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ register: result
+
+- name: assert that module reports the db was changed
+ assert:
+ that:
+ - result is changed
+ - result.executed_commands == ['DROP DATABASE "{{ db_name }}"']
+
+- name: Check that database was destroyed
+ become_user: "{{ pg_user }}"
+ become: yes
+ shell: echo "select datname from pg_database where datname = '{{ db_name }}';" | psql -d postgres
+ register: result
+
+- assert:
+ that:
+ - "result.stdout_lines[-1] == '(0 rows)'"
+
+- name: Destroy DB
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_db:
+ state: absent
+ name: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ register: result
+
+- name: assert that removing an already removed db makes no change
+ assert:
+ that:
+ - result is not changed
+
+
+# This corner case works to add but not to drop. This is sufficiently crazy
+# that I'm not going to attempt to fix it unless someone lets me know that they
+# need the functionality
+#
+# - postgresql_db:
+# state: 'present'
+# name: '"silly.""name"'
+# - shell: echo "select datname from pg_database where datname = 'silly.""name';" | psql
+# register: result
+#
+# - assert:
+# that: "result.stdout_lines[-1] == '(1 row)'"
+# - postgresql_db:
+# state: absent
+# name: '"silly.""name"'
+# - shell: echo "select datname from pg_database where datname = 'silly.""name';" | psql
+# register: result
+#
+# - assert:
+# that: "result.stdout_lines[-1] == '(0 rows)'"
+
+#
+# Test conn_limit, encoding, collate, ctype, template options
+#
+- name: Create a DB with conn_limit, encoding, collate, ctype, and template options
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_db:
+ name: '{{ db_name }}'
+ state: 'present'
+ conn_limit: '100'
+ encoding: 'LATIN1'
+ lc_collate: 'pt_BR{{ locale_latin_suffix }}'
+ lc_ctype: 'es_ES{{ locale_latin_suffix }}'
+ template: 'template0'
+ login_user: "{{ pg_user }}"
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.executed_commands == ["CREATE DATABASE \"{{ db_name }}\" TEMPLATE \"template0\" ENCODING 'LATIN1' LC_COLLATE 'pt_BR{{ locale_latin_suffix }}' LC_CTYPE 'es_ES{{ locale_latin_suffix }}' CONNECTION LIMIT 100"] or result.executed_commands == ["CREATE DATABASE \"{{ db_name }}\" TEMPLATE \"template0\" ENCODING E'LATIN1' LC_COLLATE E'pt_BR{{ locale_latin_suffix }}' LC_CTYPE E'es_ES{{ locale_latin_suffix }}' CONNECTION LIMIT 100"]
+
+- name: Check that the DB has all of our options
+ become_user: "{{ pg_user }}"
+ become: yes
+ shell: echo "select datname, datconnlimit, pg_encoding_to_char(encoding), datcollate, datctype from pg_database where datname = '{{ db_name }}';" | psql -d postgres
+ register: result
+
+- assert:
+ that:
+ - "result.stdout_lines[-1] == '(1 row)'"
+ - "'LATIN1' in result.stdout_lines[-2]"
+ - "'pt_BR' in result.stdout_lines[-2]"
+ - "'es_ES' in result.stdout_lines[-2]"
+ - "'UTF8' not in result.stdout_lines[-2]"
+ - "'en_US' not in result.stdout_lines[-2]"
+ - "'100' in result.stdout_lines[-2]"
+
+- name: Check that running db creation with options a second time does nothing
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_db:
+ name: '{{ db_name }}'
+ state: 'present'
+ conn_limit: '100'
+ encoding: 'LATIN1'
+ lc_collate: 'pt_BR{{ locale_latin_suffix }}'
+ lc_ctype: 'es_ES{{ locale_latin_suffix }}'
+ template: 'template0'
+ login_user: "{{ pg_user }}"
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+
+
+- name: Check that attempting to change encoding returns an error
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_db:
+ name: '{{ db_name }}'
+ state: 'present'
+ encoding: 'UTF8'
+ lc_collate: 'pt_BR{{ locale_utf8_suffix }}'
+ lc_ctype: 'es_ES{{ locale_utf8_suffix }}'
+ template: 'template0'
+ login_user: "{{ pg_user }}"
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result is failed
+
+- name: Check that changing the conn_limit actually works
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_db:
+ name: '{{ db_name }}'
+ state: 'present'
+ conn_limit: '200'
+ encoding: 'LATIN1'
+ lc_collate: 'pt_BR{{ locale_latin_suffix }}'
+ lc_ctype: 'es_ES{{ locale_latin_suffix }}'
+ template: 'template0'
+ login_user: "{{ pg_user }}"
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.executed_commands == ['ALTER DATABASE "{{ db_name }}" CONNECTION LIMIT 200']
+
+- name: Check that conn_limit has actually been set / updated to 200
+ become_user: "{{ pg_user }}"
+ become: yes
+ shell: echo "SELECT datconnlimit AS conn_limit FROM pg_database WHERE datname = '{{ db_name }}';" | psql -d postgres
+ register: result
+
+- assert:
+ that:
+ - "result.stdout_lines[-1] == '(1 row)'"
+ - "'200' == '{{ result.stdout_lines[-2] | trim }}'"
+
+- name: Cleanup test DB
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_db:
+ name: '{{ db_name }}'
+ state: 'absent'
+ login_user: "{{ pg_user }}"
+
+- shell: echo "select datname, pg_encoding_to_char(encoding), datcollate, datctype from pg_database where datname = '{{ db_name }}';" | psql -d postgres
+ become_user: "{{ pg_user }}"
+ become: yes
+ register: result
+
+- assert:
+ that:
+ - "result.stdout_lines[-1] == '(0 rows)'"
+
+#
+# Test db ownership
+#
+- name: Create an unprivileged user to own a DB
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_user:
+ name: "{{ item }}"
+ encrypted: 'yes'
+ password: "md55c8ccfd9d6711fc69a7eae647fc54f51"
+ login_user: "{{ pg_user }}"
+ db: postgres
+ loop:
+ - "{{ db_user1 }}"
+ - "{{ db_user2 }}"
+
+- name: Create db with user ownership
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_db:
+ name: "{{ db_name }}"
+ state: "present"
+ owner: "{{ db_user1 }}"
+ login_user: "{{ pg_user }}"
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.executed_commands == ['CREATE DATABASE "{{ db_name }}" OWNER "{{ db_user1 }}"']
+
+- name: Check that the user owns the newly created DB
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: >
+ SELECT 1 FROM pg_catalog.pg_database
+ WHERE datname = '{{ db_name }}'
+ AND pg_catalog.pg_get_userbyid(datdba) = '{{ db_user1 }}'
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+- name: Change the owner on an existing db, username with dots
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_db:
+ name: "{{ db_name }}"
+ state: "present"
+ owner: "{{ db_user2 }}"
+ login_user: "{{ pg_user }}"
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.executed_commands == ['ALTER DATABASE "{{ db_name }}" OWNER TO "{{ db_user2 }}"']
+
+- name: Check the previous step
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ query: >
+ SELECT 1 FROM pg_catalog.pg_database
+ WHERE datname = '{{ db_name }}'
+ AND pg_catalog.pg_get_userbyid(datdba) = '{{ db_user2 }}'
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+- name: Change the owner on an existing db
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_db:
+ name: "{{ db_name }}"
+ state: "present"
+ owner: "{{ pg_user }}"
+ login_user: "{{ pg_user }}"
+ register: result
+
+- name: assert that ansible says it changed the db
+ assert:
+ that:
+ - result is changed
+
+- name: Check that the user owns the newly created DB
+ become_user: "{{ pg_user }}"
+ become: yes
+ shell: echo "select pg_catalog.pg_get_userbyid(datdba) from pg_catalog.pg_database where datname = '{{ db_name }}';" | psql -d postgres
+ register: result
+
+- assert:
+ that:
+ - "result.stdout_lines[-1] == '(1 row)'"
+ - "'{{ pg_user }}' == '{{ result.stdout_lines[-2] | trim }}'"
+
+- name: Cleanup db
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_db:
+ name: "{{ db_name }}"
+ state: "absent"
+ login_user: "{{ pg_user }}"
+
+- name: Check that database was destroyed
+ become_user: "{{ pg_user }}"
+ become: yes
+ shell: echo "select datname from pg_database where datname = '{{ db_name }}';" | psql -d postgres
+ register: result
+
+- assert:
+ that:
+ - "result.stdout_lines[-1] == '(0 rows)'"
+
+- name: Cleanup test user
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_user:
+ name: "{{ db_user1 }}"
+ state: 'absent'
+ login_user: "{{ pg_user }}"
+ db: postgres
+
+- name: Check that they were removed
+ become_user: "{{ pg_user }}"
+ become: yes
+ shell: echo "select * from pg_user where usename='{{ db_user1 }}';" | psql -d postgres
+ register: result
+
+- assert:
+ that:
+ - "result.stdout_lines[-1] == '(0 rows)'"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_db/tasks/postgresql_db_session_role.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_db/tasks/postgresql_db_session_role.yml
new file mode 100644
index 00000000..4cdef73f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_db/tasks/postgresql_db_session_role.yml
@@ -0,0 +1,80 @@
+- name: Check that becoming an non-existing user throws an error
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_db:
+ state: present
+ name: must_fail
+ login_user: "{{ pg_user }}"
+ session_role: "{{ db_session_role1 }}"
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result is failed
+
+- name: Create a high privileged user
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_user:
+ name: "{{ db_session_role1 }}"
+ state: "present"
+ password: "password"
+ role_attr_flags: "CREATEDB,LOGIN,CREATEROLE"
+ login_user: "{{ pg_user }}"
+ db: postgres
+
+- name: Create a low privileged user using the newly created user
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_user:
+ name: "{{ db_session_role2 }}"
+ state: "present"
+ password: "password"
+ role_attr_flags: "LOGIN"
+ login_user: "{{ pg_user }}"
+ session_role: "{{ db_session_role1 }}"
+ db: postgres
+
+- name: Create DB as session_role
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_db:
+ state: present
+ name: "{{ db_session_role1 }}"
+ login_user: "{{ pg_user }}"
+ session_role: "{{ db_session_role1 }}"
+ register: result
+
+- name: Check that database created and is owned by correct user
+ become_user: "{{ pg_user }}"
+ become: yes
+ shell: echo "select rolname from pg_database join pg_roles on datdba = pg_roles.oid where datname = '{{ db_session_role1 }}';" | psql -AtXq postgres
+ register: result
+
+- assert:
+ that:
+ - "result.stdout_lines[-1] == '{{ db_session_role1 }}'"
+
+- name: Fail when creating database as low privileged user
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_db:
+ state: present
+ name: "{{ db_session_role2 }}"
+ login_user: "{{ pg_user }}"
+ session_role: "{{ db_session_role2 }}"
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result is failed
+
+- name: Drop test db
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_db:
+ state: absent
+ name: "{{ db_session_role1 }}"
+ login_user: "{{ pg_user }}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_db/tasks/state_dump_restore.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_db/tasks/state_dump_restore.yml
new file mode 100644
index 00000000..6c62cce6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_db/tasks/state_dump_restore.yml
@@ -0,0 +1,235 @@
+# test code for state dump and restore for postgresql_db module
+# copied from mysql_db/tasks/state_dump_import.yml
+# (c) 2014, Wayne Rosario <wrosario@ansible.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# ============================================================
+
+- name: Create a test user
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_user:
+ name: "{{ db_user1 }}"
+ state: "present"
+ encrypted: 'yes'
+ password: "password"
+ role_attr_flags: "CREATEDB,LOGIN,CREATEROLE"
+ login_user: "{{ pg_user }}"
+ db: postgres
+
+- set_fact: db_file_name="{{tmp_dir}}/{{file}}"
+
+- set_fact:
+ admin_str: "psql -U {{ pg_user }}"
+
+- set_fact:
+ user_str: "env PGPASSWORD=password psql -h localhost -U {{ db_user1 }} {{ db_name }}"
+ when: test_fixture == "user"
+ # "-n public" is required to work around pg_restore issues with plpgsql
+
+- set_fact:
+ user_str: "psql -U {{ pg_user }} {{ db_name }}"
+ when: test_fixture == "admin"
+
+
+
+- set_fact:
+ sql_create: "create table employee(id int, name varchar(100));"
+ sql_insert: "insert into employee values (47,'Joe Smith');"
+ sql_select: "select * from employee;"
+
+- name: state dump/restore - create database
+ postgresql_db:
+ state: present
+ name: "{{ db_name }}"
+ owner: "{{ db_user1 }}"
+ login_user: "{{ pg_user }}"
+
+- name: state dump/restore - create table employee
+ command: '{{ user_str }} -c "{{ sql_create }}"'
+
+- name: state dump/restore - insert data into table employee
+ command: '{{ user_str }} -c "{{ sql_insert }}"'
+
+- name: state dump/restore - file name should not exist
+ file: name={{ db_file_name }} state=absent
+
+- name: test state=dump to backup the database (expect changed=true)
+ postgresql_db:
+ name: "{{ db_name }}"
+ target: "{{ db_file_name }}"
+ owner: "{{ db_user1 }}"
+ login_user: '{{(test_fixture == "user")|ternary(db_user1, pg_user)}}'
+ target_opts: '{{(test_fixture == "user")|ternary("-n public", omit)}}'
+ login_host: '{{(test_fixture == "user")|ternary("localhost", omit)}}'
+ login_password: '{{(test_fixture == "user")|ternary("password", omit)}}'
+ state: dump
+ dump_extra_args: --exclude-table=fake
+ register: result
+ become_user: "{{ pg_user }}"
+ become: yes
+
+- name: assert output message backup the database
+ assert:
+ that:
+ - result is changed
+ - result.executed_commands[0] is search("--exclude-table=fake")
+
+- name: assert database was backed up successfully
+ command: file {{ db_file_name }}
+ register: result
+
+- name: state dump/restore - remove database for restore
+ postgresql_db:
+ name: "{{ db_name }}"
+ target: "{{ db_file_name }}"
+ owner: "{{ db_user1 }}"
+ login_user: '{{(test_fixture == "user")|ternary(db_user1, pg_user)}}'
+ target_opts: '{{(test_fixture == "user")|ternary("-n public", omit)}}'
+ login_host: '{{(test_fixture == "user")|ternary("localhost", omit)}}'
+ login_password: '{{(test_fixture == "user")|ternary("password", omit)}}'
+ state: absent
+
+- name: state dump/restore - re-create database
+ postgresql_db:
+ state: present
+ name: "{{ db_name }}"
+ owner: "{{ db_user1 }}"
+ login_user: "{{ pg_user }}"
+
+- name: test state=restore to restore the database (expect changed=true)
+ postgresql_db:
+ name: "{{ db_name }}"
+ target: "{{ db_file_name }}"
+ owner: "{{ db_user1 }}"
+ login_user: '{{(test_fixture == "user")|ternary(db_user1, pg_user)}}'
+ target_opts: '{{(test_fixture == "user")|ternary("-n public", omit)}}'
+ login_host: '{{(test_fixture == "user")|ternary("localhost", omit)}}'
+ login_password: '{{(test_fixture == "user")|ternary("password", omit)}}'
+ state: restore
+ register: result
+ become_user: "{{ pg_user }}"
+ become: yes
+
+- name: assert output message restore the database
+ assert:
+ that:
+ - result is changed
+
+- name: select data from table employee
+ command: '{{ user_str }} -c "{{ sql_select }}"'
+ register: result
+
+- name: assert data in database is from the restore database
+ assert:
+ that:
+ - "'47' in result.stdout"
+ - "'Joe Smith' in result.stdout"
+
+############################
+# 1. Test trust_input parameter
+# 2. Test db name containing dots
+
+- name: state dump/restore - create database, trust_input no
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_db:
+ state: present
+ name: "{{ suspicious_db_name }}"
+ owner: "{{ db_user1 }}"
+ login_user: "{{ pg_user }}"
+ trust_input: no
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result is failed
+ - result.msg == 'Passed input \'{{ suspicious_db_name }}\' is potentially dangerous'
+
+- name: state dump/restore - create database, trust_input yes explicitly
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_db:
+ state: present
+ name: "{{ suspicious_db_name }}"
+ owner: "{{ db_user1 }}"
+ login_user: "{{ pg_user }}"
+ trust_input: yes
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: test state=restore to restore the database (expect changed=true)
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_db:
+ name: "{{ db_name_with_dot }}"
+ target: "{{ db_file_name }}"
+ owner: "{{ db_user1 }}"
+ login_user: '{{(test_fixture == "user")|ternary(db_user1, pg_user)}}'
+ target_opts: '{{(test_fixture == "user")|ternary("-n public", omit)}}'
+ login_host: '{{(test_fixture == "user")|ternary("localhost", omit)}}'
+ login_password: '{{(test_fixture == "user")|ternary("password", omit)}}'
+ state: restore
+ register: result
+
+- name: assert output message restore the database
+ assert:
+ that:
+ - result is changed
+
+- name: state dump/restore - remove databases
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_db:
+ state: absent
+ name: "{{ db_name_with_dot }}"
+ owner: "{{ db_user1 }}"
+ login_user: "{{ pg_user }}"
+ trust_input: yes
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+# Clean up
+- name: state dump/restore - remove database name
+ postgresql_db:
+ name: "{{ db_name }}"
+ target: "{{ db_file_name }}"
+ owner: "{{ db_user1 }}"
+ login_user: '{{(test_fixture == "user")|ternary(db_user1, pg_user)}}'
+ target_opts: '{{(test_fixture == "user")|ternary("-n public", omit)}}'
+ login_host: '{{(test_fixture == "user")|ternary("localhost", omit)}}'
+ login_password: '{{(test_fixture == "user")|ternary("password", omit)}}'
+ state: absent
+
+- name: remove file name
+ file: name={{ db_file_name }} state=absent
+
+- name: Remove the test user
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_user:
+ name: "{{ db_user1 }}"
+ state: "absent"
+ login_user: "{{ pg_user }}"
+ db: postgres
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ext/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ext/aliases
new file mode 100644
index 00000000..a892de40
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ext/aliases
@@ -0,0 +1,6 @@
+destructive
+shippable/posix/group4
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ext/defaults/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ext/defaults/main.yml
new file mode 100644
index 00000000..05bac61d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ext/defaults/main.yml
@@ -0,0 +1,2 @@
+db_session_role1: 'session_role1'
+db_session_role2: 'session_role2'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ext/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ext/meta/main.yml
new file mode 100644
index 00000000..0ec7d2fc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ext/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - setup_pkg_mgr
+ - setup_postgresql_db
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ext/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ext/tasks/main.yml
new file mode 100644
index 00000000..1fa365be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ext/tasks/main.yml
@@ -0,0 +1,26 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- import_tasks: postgresql_ext_session_role.yml
+
+# Initial CI tests of postgresql_ext module.
+# pg_extension system view is available from PG 9.1.
+# The tests are restricted by Fedora because there will be errors related with
+# attempts to change the environment during postgis installation or
+# missing postgis package in repositories.
+# Anyway, these tests completely depend on Postgres version,
+# not specific distributions.
+- import_tasks: postgresql_ext_initial.yml
+ when:
+ - postgres_version_resp.stdout is version('9.1', '>=')
+ - ansible_distribution == 'Fedora'
+
+# CI tests of "version" option.
+# It uses a mock extension, see test/integration/targets/setup_postgresql_db/.
+# TODO: change postgresql_ext_initial.yml to use the mock extension too.
+- import_tasks: postgresql_ext_version_opt.yml
+ when:
+ - ansible_distribution == 'Ubuntu'
+ - postgres_version_resp.stdout is version('9.1', '>=')
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ext/tasks/postgresql_ext_initial.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ext/tasks/postgresql_ext_initial.yml
new file mode 100644
index 00000000..cabf1f49
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ext/tasks/postgresql_ext_initial.yml
@@ -0,0 +1,208 @@
+---
+- name: postgresql_ext - install postgis on Linux
+ package: name=postgis state=present
+ when: ansible_os_family != "Windows"
+
+- name: postgresql_ext - create schema schema1
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_schema:
+ database: postgres
+ name: schema1
+ state: present
+
+- name: postgresql_ext - drop extension if exists
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: postgres
+ query: DROP EXTENSION IF EXISTS postgis
+ ignore_errors: true
+
+- name: postgresql_ext - create extension postgis in check_mode
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_ext:
+ login_db: postgres
+ login_port: 5432
+ name: postgis
+ check_mode: true
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == []
+
+- name: postgresql_ext - check that extension doesn't exist after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: postgres
+ query: SELECT extname FROM pg_extension WHERE extname='postgis'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+- name: postgresql_ext - create extension postgis
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_ext:
+ login_db: postgres
+ login_port: 5432
+ name: postgis
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['CREATE EXTENSION "postgis"']
+
+- name: postgresql_ext - check that extension exists after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: postgres
+ query: SELECT extname FROM pg_extension WHERE extname='postgis'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+- name: postgresql_ext - drop extension postgis
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_ext:
+ db: postgres
+ name: postgis
+ state: absent
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['DROP EXTENSION "postgis"']
+
+- name: postgresql_ext - check that extension doesn't exist after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: postgres
+ query: SELECT extname FROM pg_extension WHERE extname='postgis'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+- name: postgresql_ext - create extension postgis
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_ext:
+ db: postgres
+ name: postgis
+ schema: schema1
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['CREATE EXTENSION "postgis" WITH SCHEMA "schema1"']
+
+- name: postgresql_ext - check that extension exists after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: postgres
+ query: "SELECT extname FROM pg_extension AS e LEFT JOIN pg_catalog.pg_namespace AS n \nON n.oid = e.extnamespace WHERE e.extname='postgis' AND n.nspname='schema1'\n"
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+- name: postgresql_ext - drop extension postgis cascade
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_ext:
+ db: postgres
+ name: postgis
+ state: absent
+ cascade: true
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['DROP EXTENSION "postgis" CASCADE']
+
+- name: postgresql_ext - check that extension doesn't exist after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: postgres
+ query: SELECT extname FROM pg_extension WHERE extname='postgis'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+- name: postgresql_ext - create extension postgis cascade
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_ext:
+ db: postgres
+ name: postgis
+ cascade: true
+ ignore_errors: true
+ register: result
+ when: postgres_version_resp.stdout is version('9.6', '<=')
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['CREATE EXTENSION "postgis" CASCADE"']
+ when: postgres_version_resp.stdout is version('9.6', '<=')
+
+- name: postgresql_ext - check that extension exists after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: postgres
+ query: SELECT extname FROM pg_extension WHERE extname='postgis'
+ ignore_errors: true
+ register: result
+ when: postgres_version_resp.stdout is version('9.6', '<=')
+
+- assert:
+ that:
+ - result.rowcount == 1
+ when: postgres_version_resp.stdout is version('9.6', '<=')
+
+- name: postgresql_ext - check that using a dangerous name fails
+ postgresql_ext:
+ db: postgres
+ name: postgis
+ session_role: 'curious.anonymous"; SELECT * FROM information_schema.tables; --'
+ trust_input: no
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result is failed
+ - result.msg is search('is potentially dangerous')
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ext/tasks/postgresql_ext_session_role.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ext/tasks/postgresql_ext_session_role.yml
new file mode 100644
index 00000000..c1fed5bf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ext/tasks/postgresql_ext_session_role.yml
@@ -0,0 +1,114 @@
+- name: Create a high privileged user
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_user:
+ name: "{{ db_session_role1 }}"
+ state: "present"
+ password: "password"
+ role_attr_flags: "CREATEDB,LOGIN,CREATEROLE"
+ login_user: "{{ pg_user }}"
+ db: postgres
+
+- name: Create DB as session_role
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_db:
+ state: present
+ name: "{{ db_session_role1 }}"
+ login_user: "{{ pg_user }}"
+ session_role: "{{ db_session_role1 }}"
+ register: result
+
+- name: Check that pg_extension exists (PostgreSQL >= 9.1)
+ become_user: "{{ pg_user }}"
+ become: yes
+ shell: echo "select count(*) from pg_class where relname='pg_extension' and relkind='r'" | psql -AtXq postgres
+ register: pg_extension
+
+- name: Remove plpgsql from testdb using postgresql_ext
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_ext:
+ name: plpgsql
+ db: "{{ db_session_role1 }}"
+ login_user: "{{ pg_user }}"
+ state: absent
+ when:
+ "pg_extension.stdout_lines[-1] == '1'"
+
+- name: Fail when trying to create an extension as a mere mortal user
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_ext:
+ name: plpgsql
+ db: "{{ db_session_role1 }}"
+ login_user: "{{ pg_user }}"
+ session_role: "{{ db_session_role2 }}"
+ ignore_errors: yes
+ register: result
+ when:
+ "pg_extension.stdout_lines[-1] == '1'"
+
+- assert:
+ that:
+ - result is failed
+ when:
+ "pg_extension.stdout_lines[-1] == '1'"
+
+- name: Install extension as session_role
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_ext:
+ name: plpgsql
+ db: "{{ db_session_role1 }}"
+ login_user: "{{ pg_user }}"
+ session_role: "{{ db_session_role1 }}"
+ trust_input: no
+ when:
+ "pg_extension.stdout_lines[-1] == '1'"
+
+- name: Check that extension is created and is owned by session_role
+ become_user: "{{ pg_user }}"
+ become: yes
+ shell: echo "select rolname from pg_extension join pg_roles on extowner=pg_roles.oid where extname='plpgsql';" | psql -AtXq "{{ db_session_role1 }}"
+ register: result
+ when:
+ "pg_extension.stdout_lines[-1] == '1'"
+
+- assert:
+ that:
+ - "result.stdout_lines[-1] == '{{ db_session_role1 }}'"
+ when:
+ "pg_extension.stdout_lines[-1] == '1'"
+
+- name: Remove plpgsql from testdb using postgresql_ext
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_ext:
+ name: plpgsql
+ db: "{{ db_session_role1 }}"
+ login_user: "{{ pg_user }}"
+ state: absent
+ trust_input: no
+ when:
+ "pg_extension.stdout_lines[-1] == '1'"
+
+- name: Drop test db
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_db:
+ state: absent
+ name: "{{ db_session_role1 }}"
+ login_user: "{{ pg_user }}"
+
+- name: Drop test users
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_user:
+ name: "{{ item }}"
+ state: absent
+ login_user: "{{ pg_user }}"
+ db: postgres
+ with_items:
+ - "{{ db_session_role1 }}"
+ - "{{ db_session_role2 }}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ext/tasks/postgresql_ext_version_opt.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ext/tasks/postgresql_ext_version_opt.yml
new file mode 100644
index 00000000..f90340c5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ext/tasks/postgresql_ext_version_opt.yml
@@ -0,0 +1,364 @@
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# Tests for postgresql_ext version option
+
+- vars:
+ test_ext: dummy
+ test_schema: schema1
+ task_parameters: &task_parameters
+ become_user: '{{ pg_user }}'
+ become: yes
+ register: result
+ pg_parameters: &pg_parameters
+ login_user: '{{ pg_user }}'
+ login_db: postgres
+
+ block:
+ # Preparation:
+ - name: postgresql_ext_version - create schema schema1
+ <<: *task_parameters
+ postgresql_schema:
+ <<: *pg_parameters
+ name: "{{ test_schema }}"
+
+ # Do tests:
+ - name: postgresql_ext_version - create extension of specific version, check mode
+ <<: *task_parameters
+ postgresql_ext:
+ <<: *pg_parameters
+ name: "{{ test_ext }}"
+ schema: "{{ test_schema }}"
+ version: '1.0'
+ trust_input: no
+ check_mode: yes
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: postgresql_ext_version - check that nothing was actually changed
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 0
+
+ - name: postgresql_ext_version - create extension of specific version
+ <<: *task_parameters
+ postgresql_ext:
+ <<: *pg_parameters
+ name: "{{ test_ext }}"
+ schema: "{{ test_schema }}"
+ version: '1.0'
+ trust_input: no
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["CREATE EXTENSION \"{{ test_ext }}\" WITH SCHEMA \"{{ test_schema }}\" VERSION '1.0'"]
+
+ - name: postgresql_ext_version - check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}' AND extversion = '1.0'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: postgresql_ext_version - try to create extension of the same version again in check_mode
+ <<: *task_parameters
+ postgresql_ext:
+ <<: *pg_parameters
+ name: "{{ test_ext }}"
+ schema: "{{ test_schema }}"
+ version: '1.0'
+ trust_input: no
+ check_mode: yes
+
+ - assert:
+ that:
+ - result is not changed
+
+ - name: postgresql_ext_version - check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}' AND extversion = '1.0'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: postgresql_ext_version - try to create extension of the same version again in actual mode
+ <<: *task_parameters
+ postgresql_ext:
+ <<: *pg_parameters
+ name: "{{ test_ext }}"
+ schema: "{{ test_schema }}"
+ version: '1.0'
+ trust_input: no
+
+ - assert:
+ that:
+ - result is not changed
+
+ - name: postgresql_ext_version - check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}' AND extversion = '1.0'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: postgresql_ext_version - update the extension to the next version in check_mode
+ <<: *task_parameters
+ postgresql_ext:
+ <<: *pg_parameters
+ name: "{{ test_ext }}"
+ schema: "{{ test_schema }}"
+ version: '2.0'
+ trust_input: no
+ check_mode: yes
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: postgresql_ext_version - check, the version must be 1.0
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}' AND extversion = '1.0'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: postgresql_ext_version - update the extension to the next version
+ <<: *task_parameters
+ postgresql_ext:
+ <<: *pg_parameters
+ name: "{{ test_ext }}"
+ schema: "{{ test_schema }}"
+ version: '2.0'
+ trust_input: no
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["ALTER EXTENSION \"{{ test_ext }}\" UPDATE TO '2.0'"]
+
+ - name: postgresql_ext_version - check, the version must be 2.0
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}' AND extversion = '2.0'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: postgresql_ext_version - check that version won't be changed if version won't be passed
+ <<: *task_parameters
+ postgresql_ext:
+ <<: *pg_parameters
+ name: "{{ test_ext }}"
+ schema: "{{ test_schema }}"
+ trust_input: no
+
+ - assert:
+ that:
+ - result is not changed
+
+ - name: postgresql_ext_version - check, the version must be 2.0
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}' AND extversion = '2.0'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: postgresql_ext_version - update the extension to the latest version
+ <<: *task_parameters
+ postgresql_ext:
+ <<: *pg_parameters
+ name: "{{ test_ext }}"
+ schema: "{{ test_schema }}"
+ version: latest
+ trust_input: no
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["ALTER EXTENSION \"{{ test_ext }}\" UPDATE TO '3.0'"]
+
+ - name: postgresql_ext_version - check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}' AND extversion = '3.0'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: postgresql_ext_version - try to update the extension to the latest version again
+ <<: *task_parameters
+ postgresql_ext:
+ <<: *pg_parameters
+ name: "{{ test_ext }}"
+ schema: "{{ test_schema }}"
+ version: latest
+ trust_input: no
+
+ - assert:
+ that:
+ - result is not changed
+
+ - name: postgresql_ext_version - try to downgrade the extension version, must fail
+ <<: *task_parameters
+ postgresql_ext:
+ <<: *pg_parameters
+ name: "{{ test_ext }}"
+ schema: "{{ test_schema }}"
+ version: '1.0'
+ trust_input: no
+ ignore_errors: yes
+
+ - assert:
+ that:
+ - result.failed == true
+
+ - name: postgresql_ext_version - drop the extension in check_mode
+ <<: *task_parameters
+ postgresql_ext:
+ <<: *pg_parameters
+ name: "{{ test_ext }}"
+ state: absent
+ trust_input: no
+ check_mode: yes
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: postgresql_ext_version - check that extension exists
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}' AND extversion = '3.0'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: postgresql_ext_version - drop the extension in actual mode
+ <<: *task_parameters
+ postgresql_ext:
+ <<: *pg_parameters
+ name: "{{ test_ext }}"
+ state: absent
+ trust_input: no
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: postgresql_ext_version - check that extension doesn't exist after the prev step
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 0
+
+ - name: postgresql_ext_version - try to drop the non-existent extension again
+ <<: *task_parameters
+ postgresql_ext:
+ <<: *pg_parameters
+ name: "{{ test_ext }}"
+ state: absent
+ trust_input: no
+
+ - assert:
+ that:
+ - result is not changed
+
+ - name: postgresql_ext_version - create the extension without passing version
+ <<: *task_parameters
+ postgresql_ext:
+ <<: *pg_parameters
+ name: "{{ test_ext }}"
+ trust_input: no
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["CREATE EXTENSION \"{{ test_ext }}\""]
+
+ - name: postgresql_ext_version - check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT 1 FROM pg_extension WHERE extname = '{{ test_ext }}' AND extversion = '3.0'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: postgresql_ext_version - try to install non-existent version
+ <<: *task_parameters
+ postgresql_ext:
+ <<: *pg_parameters
+ name: non_existent
+ trust_input: no
+ ignore_errors: yes
+
+ - assert:
+ that:
+ - result.failed == true
+ - result.msg == "Extension non_existent is not installed"
+
+ ######################################################################
+ # https://github.com/ansible-collections/community.general/issues/1095
+ - name: Install postgis
+ package:
+ name: postgis
+
+ - name: Create postgis extension
+ <<: *task_parameters
+ postgresql_ext:
+ <<: *pg_parameters
+ name: postgis
+ version: latest
+
+ - assert:
+ that:
+ - result is changed
+
+ # Cleanup:
+ - name: postgresql_ext_version - drop the extension
+ <<: *task_parameters
+ postgresql_ext:
+ <<: *pg_parameters
+ name: "{{ test_ext }}"
+ state: absent
+ trust_input: no
+
+ - name: postgresql_ext_version - drop the schema
+ <<: *task_parameters
+ postgresql_schema:
+ <<: *pg_parameters
+ name: "{{ test_schema }}"
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_idx/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_idx/aliases
new file mode 100644
index 00000000..b2033afd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_idx/aliases
@@ -0,0 +1,5 @@
+destructive
+shippable/posix/group4
+skip/aix
+skip/osx
+skip/macos
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_idx/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_idx/meta/main.yml
new file mode 100644
index 00000000..4ce5a583
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_idx/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_postgresql_db
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_idx/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_idx/tasks/main.yml
new file mode 100644
index 00000000..2f594561
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_idx/tasks/main.yml
@@ -0,0 +1,7 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Initial CI tests of postgresql_idx module
+- import_tasks: postgresql_idx_initial.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_idx/tasks/postgresql_idx_initial.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_idx/tasks/postgresql_idx_initial.yml
new file mode 100644
index 00000000..31d16627
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_idx/tasks/postgresql_idx_initial.yml
@@ -0,0 +1,377 @@
+- name: postgresql_idx - create test table called test_table
+ become_user: '{{ pg_user }}'
+ become: true
+ shell: psql postgres -U "{{ pg_user }}" -t -c "CREATE TABLE test_table (id int, story text);"
+ ignore_errors: true
+
+- name: postgresql_idx - drop test tablespace called ssd if exists
+ become_user: '{{ pg_user }}'
+ become: true
+ shell: psql postgres -U "{{ pg_user }}" -t -c "DROP TABLESPACE IF EXISTS ssd;"
+ ignore_errors: true
+
+- name: postgresql_idx - drop dir for test tablespace
+ become: true
+ file:
+ path: /mnt/ssd
+ state: absent
+ ignore_errors: true
+
+- name: postgresql_idx - create dir for test tablespace
+ become: true
+ file:
+ path: /mnt/ssd
+ state: directory
+ owner: '{{ pg_user }}'
+ mode: '0755'
+ ignore_errors: true
+
+- name: postgresql_idx - create test tablespace called ssd
+ become_user: '{{ pg_user }}'
+ become: true
+ shell: psql postgres -U "{{ pg_user }}" -t -c "CREATE TABLESPACE ssd LOCATION '/mnt/ssd';"
+ ignore_errors: true
+ register: tablespace
+
+- name: postgresql_idx - create test schema
+ become_user: '{{ pg_user }}'
+ become: true
+ shell: psql postgres -U "{{ pg_user }}" -t -c "CREATE SCHEMA foo;"
+ ignore_errors: true
+
+- name: postgresql_idx - create table in non-default schema
+ become_user: '{{ pg_user }}'
+ become: true
+ shell: psql postgres -U "{{ pg_user }}" -t -c "CREATE TABLE foo.foo_table (id int, story text);"
+ ignore_errors: true
+
+- name: postgresql_idx - create btree index in check_mode
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_idx:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ table: test_table
+ columns: id, story
+ idxname: Test0_idx
+ check_mode: true
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is changed
+ - result.tblname == ''
+ - result.name == 'Test0_idx'
+ - result.state == 'absent'
+ - result.valid != ''
+ - result.tblspace == ''
+ - result.storage_params == []
+ - result.schema == ''
+ - result.query == ''
+
+- name: postgresql_idx - check nothing changed after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ query: SELECT 1 FROM pg_indexes WHERE indexname = 'Test0_idx'
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+- name: postgresql_idx - create btree index concurrently
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_idx:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ table: test_table
+ columns: id, story
+ idxname: Test0_idx
+ trust_input: no
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is changed
+ - result.tblname == 'test_table'
+ - result.name == 'Test0_idx'
+ - result.state == 'present'
+ - result.valid != ''
+ - result.tblspace == ''
+ - result.storage_params == []
+ - result.schema == 'public'
+ - result.query == 'CREATE INDEX CONCURRENTLY "Test0_idx" ON "public"."test_table" USING BTREE (id, story)'
+
+- name: postgresql_idx - check the index exists after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ query: SELECT 1 FROM pg_indexes WHERE indexname = 'Test0_idx'
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+- name: postgresql_idx - try to create existing index again
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_idx:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ table: test_table
+ columns: id, story
+ idxname: Test0_idx
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is not changed
+ - result.tblname == 'test_table'
+ - result.name == 'Test0_idx'
+ - result.state == 'present'
+ - result.valid != ''
+ - result.tblspace == ''
+ - result.storage_params == []
+ - result.schema == 'public'
+ - result.query == ''
+
+- name: postgresql_idx - create btree index - non-default schema, tablespace, storage parameter
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_idx:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ schema: foo
+ table: foo_table
+ columns:
+ - id
+ - story
+ idxname: foo_test_idx
+ tablespace: ssd
+ storage_params: fillfactor=90
+ trust_input: no
+ register: result
+ ignore_errors: true
+ when: tablespace.rc == 0
+
+- assert:
+ that:
+ - result is changed
+ - result.tblname == 'foo_table'
+ - result.name == 'foo_test_idx'
+ - result.state == 'present'
+ - result.valid != ''
+ - result.tblspace == 'ssd'
+ - result.storage_params == [ "fillfactor=90" ]
+ - result.schema == 'foo'
+ - result.query == 'CREATE INDEX CONCURRENTLY "foo_test_idx" ON "foo"."foo_table" USING BTREE (id,story) WITH (fillfactor=90) TABLESPACE "ssd"'
+ when: tablespace.rc == 0
+
+- name: postgresql_idx - create brin index not concurrently
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_idx:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ schema: public
+ table: test_table
+ state: present
+ type: brin
+ columns: id
+ idxname: test_brin_idx
+ concurrent: false
+ trust_input: no
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is changed
+ - result.tblname == 'test_table'
+ - result.name == 'test_brin_idx'
+ - result.state == 'present'
+ - result.valid != ''
+ - result.tblspace == ''
+ - result.storage_params == []
+ - result.schema == 'public'
+ - result.query == 'CREATE INDEX "test_brin_idx" ON "public"."test_table" USING brin (id)'
+ when: postgres_version_resp.stdout is version('9.5', '>=')
+
+- name: postgresql_idx - create index with condition
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_idx:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ table: test_table
+ columns: id
+ idxname: test1_idx
+ cond: id > 1 AND id != 10
+ trust_input: no
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is changed
+ - result.tblname == 'test_table'
+ - result.name == 'test1_idx'
+ - result.state == 'present'
+ - result.valid != ''
+ - result.tblspace == ''
+ - result.storage_params == []
+ - result.schema == 'public'
+ - result.query == 'CREATE INDEX CONCURRENTLY "test1_idx" ON "public"."test_table" USING BTREE (id) WHERE id > 1 AND id != 10'
+
+- name: postgresql_idx - create unique index
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_idx:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ table: test_table
+ columns: story
+ idxname: test_unique0_idx
+ unique: true
+ trust_input: no
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is changed
+ - result.tblname == 'test_table'
+ - result.name == 'test_unique0_idx'
+ - result.state == 'present'
+ - result.valid != ''
+ - result.tblspace == ''
+ - result.storage_params == []
+ - result.schema == 'public'
+ - result.query == 'CREATE UNIQUE INDEX CONCURRENTLY "test_unique0_idx" ON "public"."test_table" USING BTREE (story)'
+
+- name: postgresql_idx - avoid unique index with type different of btree
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_idx:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ table: test_table
+ columns: story
+ idxname: test_unique0_idx
+ unique: true
+ concurrent: false
+ type: brin
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is not changed
+ - result.msg == 'Only btree currently supports unique indexes'
+
+- name: postgresql_idx - drop index from specific schema cascade in check_mode
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_idx:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ schema: foo
+ name: foo_test_idx
+ cascade: true
+ state: absent
+ concurrent: false
+ trust_input: yes
+ check_mode: true
+ register: result
+ ignore_errors: true
+ when: tablespace.rc == 0
+
+- assert:
+ that:
+ - result is changed
+ - result.name == 'foo_test_idx'
+ - result.state == 'present'
+ - result.schema == 'foo'
+ - result.query == ''
+ when: tablespace.rc == 0
+
+- name: postgresql_idx - check the index exists after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ query: SELECT 1 FROM pg_indexes WHERE indexname = 'foo_test_idx' AND schemaname = 'foo'
+ register: result
+ when: tablespace.rc == 0
+
+- assert:
+ that:
+ - result.rowcount == 1
+ when: tablespace.rc == 0
+
+- name: postgresql_idx - drop index from specific schema cascade
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_idx:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ schema: foo
+ name: foo_test_idx
+ cascade: true
+ state: absent
+ concurrent: false
+ register: result
+ ignore_errors: true
+ when: tablespace.rc == 0
+
+- assert:
+ that:
+ - result is changed
+ - result.name == 'foo_test_idx'
+ - result.state == 'absent'
+ - result.schema == 'foo'
+ - result.query == 'DROP INDEX "foo"."foo_test_idx" CASCADE'
+ when: tablespace.rc == 0
+
+- name: postgresql_idx - check the index doesn't exist after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ query: SELECT 1 FROM pg_indexes WHERE indexname = 'foo_test_idx' and schemaname = 'foo'
+ register: result
+ when: tablespace.rc == 0
+
+- assert:
+ that:
+ - result.rowcount == 0
+ when: tablespace.rc == 0
+
+- name: postgresql_idx - try to drop not existing index
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_idx:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ schema: foo
+ name: foo_test_idx
+ state: absent
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is not changed
+ - result.query == ''
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_info/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_info/aliases
new file mode 100644
index 00000000..950c1b9f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_info/aliases
@@ -0,0 +1,7 @@
+destructive
+shippable/posix/group1
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
+skip/rhel
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_info/defaults/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_info/defaults/main.yml
new file mode 100644
index 00000000..000532ae
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_info/defaults/main.yml
@@ -0,0 +1,15 @@
+---
+pg_user: postgres
+db_default: postgres
+master_port: 5433
+replica_port: 5434
+
+test_table1: acme1
+test_pub: first_publication
+test_pub2: second_publication
+replication_role: logical_replication
+replication_pass: alsdjfKJKDf1#
+test_db: acme_db
+test_subscription: test
+test_subscription2: test2
+conn_timeout: 100
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_info/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_info/meta/main.yml
new file mode 100644
index 00000000..d72e4d23
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_info/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_postgresql_replication
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_info/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_info/tasks/main.yml
new file mode 100644
index 00000000..04c7788a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_info/tasks/main.yml
@@ -0,0 +1,12 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# For testing getting publication and subscription info
+- import_tasks: setup_publication.yml
+ when: ansible_distribution == 'Ubuntu' and ansible_distribution_major_version >= '18'
+
+# Initial CI tests of postgresql_info module
+- import_tasks: postgresql_info_initial.yml
+ when: ansible_distribution == 'Ubuntu' and ansible_distribution_major_version >= '18'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_info/tasks/postgresql_info_initial.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_info/tasks/postgresql_info_initial.yml
new file mode 100644
index 00000000..0a117b75
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_info/tasks/postgresql_info_initial.yml
@@ -0,0 +1,177 @@
+# Copyright: (c) 2020, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- vars:
+ task_parameters: &task_parameters
+ become_user: '{{ pg_user }}'
+ become: yes
+ register: result
+ pg_parameters: &pg_parameters
+ login_user: '{{ pg_user }}'
+ login_db: '{{ db_default }}'
+
+ block:
+
+ - name: Create test subscription
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ login_db: '{{ test_db }}'
+ state: present
+ publications: '{{ test_pub }}'
+ connparams:
+ host: 127.0.0.1
+ port: '{{ master_port }}'
+ user: '{{ replication_role }}'
+ password: '{{ replication_pass }}'
+ dbname: '{{ test_db }}'
+
+ - name: Create test subscription
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription2 }}'
+ login_db: '{{ test_db }}'
+ state: present
+ publications: '{{ test_pub2 }}'
+ connparams:
+ host: 127.0.0.1
+ port: '{{ master_port }}'
+ user: '{{ replication_role }}'
+ password: '{{ replication_pass }}'
+ dbname: '{{ test_db }}'
+
+ - name: postgresql_info - create role to check session_role
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ login_user: "{{ pg_user }}"
+ name: session_superuser
+ role_attr_flags: SUPERUSER
+
+ - name: postgresql_info - test return values and session_role param
+ <<: *task_parameters
+ postgresql_info:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ session_role: session_superuser
+
+ - assert:
+ that:
+ - result.version != {}
+ - result.in_recovery == false
+ - result.databases.{{ db_default }}.collate
+ - result.databases.{{ db_default }}.languages
+ - result.databases.{{ db_default }}.namespaces
+ - result.databases.{{ db_default }}.extensions
+ - result.databases.{{ test_db }}.subscriptions.{{ test_subscription }}
+ - result.databases.{{ test_db }}.subscriptions.{{ test_subscription2 }}
+ - result.settings
+ - result.tablespaces
+ - result.roles
+
+ - name: postgresql_info - check filter param passed by list
+ <<: *task_parameters
+ postgresql_info:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ filter:
+ - ver*
+ - rol*
+ - in_recov*
+
+ - assert:
+ that:
+ - result.version != {}
+ - result.roles
+ - result.in_recovery == false
+ - result.databases == {}
+ - result.repl_slots == {}
+ - result.replications == {}
+ - result.settings == {}
+ - result.tablespaces == {}
+
+ - name: postgresql_info - check filter param passed by string
+ <<: *task_parameters
+ postgresql_info:
+ <<: *pg_parameters
+ filter: ver*,role*
+
+ - assert:
+ that:
+ - result.version != {}
+ - result.roles
+ - result.databases == {}
+ - result.repl_slots == {}
+ - result.replications == {}
+ - result.settings == {}
+ - result.tablespaces == {}
+
+ - name: postgresql_info - check filter param passed by string
+ <<: *task_parameters
+ postgresql_info:
+ <<: *pg_parameters
+ filter: ver*
+
+ - assert:
+ that:
+ - result.version
+ - result.roles == {}
+
+ - name: postgresql_info - check excluding filter param passed by list
+ <<: *task_parameters
+ postgresql_info:
+ <<: *pg_parameters
+ filter:
+ - "!ver*"
+ - "!rol*"
+ - "!in_rec*"
+
+ - assert:
+ that:
+ - result.version == {}
+ - result.in_recovery == None
+ - result.roles == {}
+ - result.databases
+
+ - name: postgresql_info - test return publication info
+ <<: *task_parameters
+ postgresql_info:
+ <<: *pg_parameters
+ login_db: '{{ test_db }}'
+ login_port: '{{ master_port }}'
+ trust_input: yes
+
+ - assert:
+ that:
+ - result.version != {}
+ - result.in_recovery == false
+ - result.databases.{{ db_default }}.collate
+ - result.databases.{{ db_default }}.languages
+ - result.databases.{{ db_default }}.namespaces
+ - result.databases.{{ db_default }}.extensions
+ - result.databases.{{ test_db }}.publications.{{ test_pub }}.ownername == '{{ pg_user }}'
+ - result.databases.{{ test_db }}.publications.{{ test_pub2 }}.puballtables == true
+ - result.settings
+ - result.tablespaces
+ - result.roles
+
+ - name: postgresql_info - test trust_input parameter
+ <<: *task_parameters
+ postgresql_info:
+ <<: *pg_parameters
+ login_db: '{{ test_db }}'
+ login_port: '{{ master_port }}'
+ trust_input: no
+ session_role: 'curious.anonymous"; SELECT * FROM information_schema.tables; --'
+ register: result
+ ignore_errors: yes
+
+ - assert:
+ that:
+ - result is failed
+ - result.msg is search('is potentially dangerous')
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_info/tasks/setup_publication.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_info/tasks/setup_publication.yml
new file mode 100644
index 00000000..0d7df0d7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_info/tasks/setup_publication.yml
@@ -0,0 +1,61 @@
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# Preparation for further tests of postgresql_subscription module.
+
+- vars:
+ task_parameters: &task_parameters
+ become_user: '{{ pg_user }}'
+ become: yes
+ register: result
+ pg_parameters: &pg_parameters
+ login_user: '{{ pg_user }}'
+ login_db: '{{ test_db }}'
+
+ block:
+ - name: Create test db
+ <<: *task_parameters
+ postgresql_db:
+ login_user: '{{ pg_user }}'
+ login_port: '{{ master_port }}'
+ maintenance_db: '{{ db_default }}'
+ name: '{{ test_db }}'
+
+ - name: Create test role
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ login_port: '{{ master_port }}'
+ name: '{{ replication_role }}'
+ password: '{{ replication_pass }}'
+ role_attr_flags: LOGIN,REPLICATION
+
+ - name: Create test table
+ <<: *task_parameters
+ postgresql_table:
+ <<: *pg_parameters
+ login_port: '{{ master_port }}'
+ name: '{{ test_table1 }}'
+ columns:
+ - id int
+
+ - name: Master - dump schema
+ <<: *task_parameters
+ shell: pg_dumpall -p '{{ master_port }}' -s > /tmp/schema.sql
+
+ - name: Replicat restore schema
+ <<: *task_parameters
+ shell: psql -p '{{ replica_port }}' -f /tmp/schema.sql
+
+ - name: Create publication
+ <<: *task_parameters
+ postgresql_publication:
+ <<: *pg_parameters
+ login_port: '{{ master_port }}'
+ name: '{{ test_pub }}'
+
+ - name: Create publication
+ <<: *task_parameters
+ postgresql_publication:
+ <<: *pg_parameters
+ login_port: '{{ master_port }}'
+ name: '{{ test_pub2 }}'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_lang/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_lang/aliases
new file mode 100644
index 00000000..b2033afd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_lang/aliases
@@ -0,0 +1,5 @@
+destructive
+shippable/posix/group4
+skip/aix
+skip/osx
+skip/macos
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_lang/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_lang/meta/main.yml
new file mode 100644
index 00000000..4ce5a583
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_lang/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_postgresql_db
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_lang/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_lang/tasks/main.yml
new file mode 100644
index 00000000..79950143
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_lang/tasks/main.yml
@@ -0,0 +1,25 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Include distribution specific variables
+ include_vars: "{{ lookup('first_found', params) }}"
+ vars:
+ params:
+ files:
+ - "{{ ansible_facts.distribution }}-{{ ansible_facts.distribution_major_version }}.yml"
+ - default.yml
+ paths:
+ - vars
+
+# Only run on CentOS 7 because there is a stack trace on CentOS 8 because the module
+# is looking for the incorrect version of plpython.
+# https://gist.github.com/samdoran/8fc1b4ae834d3e66d1895d087419b8d8
+- name: Initial CI tests of postgresql_lang module
+ when:
+ - ansible_facts.distribution == 'CentOS'
+ - ansible_facts.distribution_major_version is version ('7', '==')
+ block:
+ - include_tasks: postgresql_lang_initial.yml
+ - include_tasks: postgresql_lang_add_owner_param.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_lang/tasks/postgresql_lang_add_owner_param.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_lang/tasks/postgresql_lang_add_owner_param.yml
new file mode 100644
index 00000000..5d21db56
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_lang/tasks/postgresql_lang_add_owner_param.yml
@@ -0,0 +1,199 @@
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- vars:
+ test_user1: alice
+ test_user2: bob
+ test_lang: plperl
+ non_existent_role: fake_role
+ task_parameters: &task_parameters
+ become_user: '{{ pg_user }}'
+ become: yes
+ register: result
+ pg_parameters: &pg_parameters
+ login_user: '{{ pg_user }}'
+ login_db: postgres
+
+ block:
+ - name: Create roles for tests
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ item }}'
+ loop:
+ - '{{ test_user1 }}'
+ - '{{ test_user2 }}'
+
+ - name: Create lang with owner in check_mode
+ <<: *task_parameters
+ postgresql_lang:
+ <<: *pg_parameters
+ name: '{{ test_lang }}'
+ owner: '{{ test_user1 }}'
+ trust_input: no
+ check_mode: yes
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == []
+
+ - name: Check that nothing was actually changed
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT r.rolname FROM pg_language l
+ JOIN pg_roles r ON l.lanowner = r.oid
+ WHERE l.lanname = '{{ test_lang }}'
+ AND r.rolname = '{{ test_user1 }}'
+
+ - assert:
+ that:
+ - result.rowcount == 0
+
+ - name: Create lang with owner
+ <<: *task_parameters
+ postgresql_lang:
+ <<: *pg_parameters
+ name: '{{ test_lang }}'
+ owner: '{{ test_user1 }}'
+ trust_input: no
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ['CREATE LANGUAGE "{{ test_lang }}"', 'ALTER LANGUAGE "{{ test_lang }}" OWNER TO "{{ test_user1 }}"']
+
+ - name: Check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT r.rolname FROM pg_language l
+ JOIN pg_roles r ON l.lanowner = r.oid
+ WHERE l.lanname = '{{ test_lang }}'
+ AND r.rolname = '{{ test_user1 }}'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Change lang owner in check_mode
+ <<: *task_parameters
+ postgresql_lang:
+ <<: *pg_parameters
+ name: '{{ test_lang }}'
+ owner: '{{ test_user2 }}'
+ trust_input: yes
+ check_mode: yes
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ['ALTER LANGUAGE "{{ test_lang }}" OWNER TO "{{ test_user2 }}"']
+
+ - name: Check that nothing was actually changed
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT r.rolname FROM pg_language l
+ JOIN pg_roles r ON l.lanowner = r.oid
+ WHERE l.lanname = '{{ test_lang }}'
+ AND r.rolname = '{{ test_user2 }}'
+
+ - assert:
+ that:
+ - result.rowcount == 0
+
+ - name: Change lang owner
+ <<: *task_parameters
+ postgresql_lang:
+ <<: *pg_parameters
+ name: '{{ test_lang }}'
+ owner: '{{ test_user2 }}'
+
+ - assert:
+ that:
+ - result is changed
+ # TODO: the first elem of the returned list below
+ # looks like a bug, not related with the option owner, needs to be checked
+ - result.queries == ["UPDATE pg_language SET lanpltrusted = false WHERE lanname = '{{ test_lang }}'", 'ALTER LANGUAGE "{{ test_lang }}" OWNER TO "{{ test_user2 }}"']
+
+ - name: Check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT r.rolname FROM pg_language l
+ JOIN pg_roles r ON l.lanowner = r.oid
+ WHERE l.lanname = '{{ test_lang }}'
+ AND r.rolname = '{{ test_user2 }}'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Try to change lang owner again to the same role
+ <<: *task_parameters
+ postgresql_lang:
+ <<: *pg_parameters
+ name: '{{ test_lang }}'
+ owner: '{{ test_user2 }}'
+
+ - assert:
+ that:
+ - result is not changed
+ - result.queries == []
+
+ - name: Check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT r.rolname FROM pg_language l
+ JOIN pg_roles r ON l.lanowner = r.oid
+ WHERE l.lanname = '{{ test_lang }}'
+ AND r.rolname = '{{ test_user2 }}'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Drop test lang with owner, must ignore
+ <<: *task_parameters
+ postgresql_lang:
+ <<: *pg_parameters
+ name: '{{ test_lang }}'
+ state: absent
+ owner: '{{ non_existent_role }}'
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["DROP LANGUAGE \"{{ test_lang }}\""]
+
+ - name: Check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT r.rolname FROM pg_language l
+ JOIN pg_roles r ON l.lanowner = r.oid
+ WHERE l.lanname = '{{ test_lang }}'
+
+ - assert:
+ that:
+ - result.rowcount == 0
+
+ # Clean up
+ - name: Drop test roles
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ item }}'
+ state: absent
+ loop:
+ - '{{ test_user1 }}'
+ - '{{ test_user2 }}'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_lang/tasks/postgresql_lang_initial.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_lang/tasks/postgresql_lang_initial.yml
new file mode 100644
index 00000000..66023de8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_lang/tasks/postgresql_lang_initial.yml
@@ -0,0 +1,231 @@
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Preparation for tests:
+- name: Install PostgreSQL support packages
+ become: yes
+ action: "{{ ansible_facts.pkg_mgr }}"
+ args:
+ name: "{{ postgresql_lang_packages }}"
+ state: present
+
+###############
+# Do main tests
+#
+
+# Create language in check_mode:
+- name: postgresql_lang - create plperl in check_mode
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_lang:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: plperl
+ register: result
+ ignore_errors: yes
+ check_mode: yes
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == []
+
+- name: postgresql_lang - check that lang doesn't exist after previous step, rowcount must be 0
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_language WHERE lanname = 'plperl'"
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+# Create language:
+- name: postgresql_lang - create plperl
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_lang:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: plperl
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['CREATE LANGUAGE "plperl"']
+
+- name: postgresql_lang - check that lang exists after previous step
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_language WHERE lanname = 'plperl'"
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+# Drop language in check_mode:
+- name: postgresql_lang - drop plperl in check_mode
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_lang:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: plperl
+ state: absent
+ register: result
+ ignore_errors: yes
+ check_mode: yes
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == []
+
+- name: postgresql_lang - check that lang exists after previous step, rowcount must be 1
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_language WHERE lanname = 'plperl'"
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+# Drop language:
+- name: postgresql_lang - drop plperl
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_lang:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: plperl
+ state: absent
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['DROP LANGUAGE "plperl"']
+
+- name: postgresql_lang - check that lang doesn't exist after previous step, rowcount must be 0
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_language WHERE lanname = 'plperl'"
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+# Check fail_on_drop yes
+- name: postgresql_lang - drop c language to check fail_on_drop yes
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_lang:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: c
+ state: absent
+ fail_on_drop: yes
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result.failed == true
+
+# Check fail_on_drop no
+- name: postgresql_lang - drop c language to check fail_on_drop no
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_lang:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: c
+ state: absent
+ fail_on_drop: no
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result.failed == false
+
+# Create trusted language:
+- name: postgresql_lang - create plpythonu
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_lang:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: plpythonu
+ trust: yes
+ force_trust: yes
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['CREATE TRUSTED LANGUAGE "plpythonu"', "UPDATE pg_language SET lanpltrusted = true WHERE lanname = 'plpythonu'"]
+
+- name: postgresql_lang - check that lang exists and it's trusted after previous step
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_language WHERE lanname = 'plpythonu' AND lanpltrusted = 't'"
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+# Drop language cascade, tests of aliases:
+- name: postgresql_lang - drop plpythonu cascade
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_lang:
+ login_db: postgres
+ login_user: "{{ pg_user }}"
+ login_port: 5432
+ lang: plpythonu
+ state: absent
+ cascade: yes
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['DROP LANGUAGE "plpythonu" CASCADE']
+
+- name: postgresql_lang - check that lang doesn't exist after previous step, rowcount must be 0
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_language WHERE lanname = 'plpythonu'"
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_lang/vars/CentOS-7.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_lang/vars/CentOS-7.yml
new file mode 100644
index 00000000..8d4bcc7e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_lang/vars/CentOS-7.yml
@@ -0,0 +1,3 @@
+postgresql_lang_packages:
+ - postgresql-plperl
+ - postgresql-plpython
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_lang/vars/CentOS-8.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_lang/vars/CentOS-8.yml
new file mode 100644
index 00000000..5da004c8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_lang/vars/CentOS-8.yml
@@ -0,0 +1,3 @@
+postgresql_lang_packages:
+ - postgresql-plperl
+ - postgresql-plpython3
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_lang/vars/default.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_lang/vars/default.yml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_lang/vars/default.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_membership/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_membership/aliases
new file mode 100644
index 00000000..b2033afd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_membership/aliases
@@ -0,0 +1,5 @@
+destructive
+shippable/posix/group4
+skip/aix
+skip/osx
+skip/macos
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_membership/defaults/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_membership/defaults/main.yml
new file mode 100644
index 00000000..7b1d49e4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_membership/defaults/main.yml
@@ -0,0 +1,6 @@
+test_group1: group1
+test_group2: group2
+test_group3: group.with.dots
+test_user1: user1
+test_user2: user.with.dots
+dangerous_name: 'curious.anonymous"; SELECT * FROM information_schema.tables; --'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_membership/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_membership/meta/main.yml
new file mode 100644
index 00000000..4ce5a583
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_membership/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_postgresql_db
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_membership/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_membership/tasks/main.yml
new file mode 100644
index 00000000..ea058d08
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_membership/tasks/main.yml
@@ -0,0 +1,7 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Initial CI tests of postgresql_membership module
+- import_tasks: postgresql_membership_initial.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_membership/tasks/postgresql_membership_initial.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_membership/tasks/postgresql_membership_initial.yml
new file mode 100644
index 00000000..d8d7bb6b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_membership/tasks/postgresql_membership_initial.yml
@@ -0,0 +1,390 @@
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+####################
+# Prepare for tests:
+
+# Create test roles:
+- name: postgresql_membership - create test roles
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_user:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: "{{ item }}"
+ ignore_errors: yes
+ with_items:
+ - "{{ test_group1 }}"
+ - "{{ test_group2 }}"
+ - "{{ test_group3 }}"
+ - "{{ test_user1 }}"
+ - "{{ test_user2 }}"
+
+################
+# Do main tests:
+
+### Test check_mode
+# Grant test_group1 to test_user1 in check_mode:
+- name: postgresql_membership - grant test_group1 to test_user1 in check_mode
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_membership:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ group: "{{ test_group1 }}"
+ user: "{{ test_user1 }}"
+ state: present
+ register: result
+ ignore_errors: yes
+ check_mode: yes
+
+- assert:
+ that:
+ - result is changed
+ - result.groups == ["{{ test_group1 }}"]
+ - result.queries == ["GRANT \"{{ test_group1 }}\" TO \"{{ test_user1 }}\""]
+ - result.granted.{{ test_group1 }} == ["{{ test_user1 }}"]
+ - result.state == "present"
+ - result.target_roles == ["{{ test_user1 }}"]
+
+# Try to revoke test_group1 from test_user1 to check that
+# nothing actually changed in check_mode at the previous step:
+- name: postgresql_membership - try to revoke test_group1 from test_user1 for checking check_mode
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_membership:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ group: "{{ test_group1 }}"
+ user: "{{ test_user1 }}"
+ state: absent
+ register: result
+ ignore_errors: yes
+ check_mode: yes
+
+- assert:
+ that:
+ - result is not changed
+ - result.groups == ["{{ test_group1 }}"]
+ - result.queries == []
+ - result.revoked.{{ test_group1 }} == []
+ - result.state == "absent"
+ - result.target_roles == ["{{ test_user1 }}"]
+### End of test check_mode
+
+# Grant test_group1 to test_user1:
+- name: postgresql_membership - grant test_group1 to test_user1
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_membership:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ group: "{{ test_group1 }}"
+ user: "{{ test_user1 }}"
+ state: present
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result is changed
+ - result.groups == ["{{ test_group1 }}"]
+ - result.queries == ["GRANT \"{{ test_group1 }}\" TO \"{{ test_user1 }}\""]
+ - result.granted.{{ test_group1 }} == ["{{ test_user1 }}"]
+ - result.state == "present"
+ - result.target_roles == ["{{ test_user1 }}"]
+
+# Grant test_group1 to test_user1 again to check that nothing changes:
+- name: postgresql_membership - grant test_group1 to test_user1 again
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_membership:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ group: "{{ test_group1 }}"
+ user: "{{ test_user1 }}"
+ state: present
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result is not changed
+ - result.groups == ["{{ test_group1 }}"]
+ - result.queries == []
+ - result.granted.{{ test_group1 }} == []
+ - result.state == "present"
+ - result.target_roles == ["{{ test_user1 }}"]
+
+# Revoke test_group1 from test_user1:
+- name: postgresql_membership - revoke test_group1 from test_user1
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_membership:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ group: "{{ test_group1 }}"
+ user: "{{ test_user1 }}"
+ state: absent
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result is changed
+ - result.groups == ["{{ test_group1 }}"]
+ - result.queries == ["REVOKE \"{{ test_group1 }}\" FROM \"{{ test_user1 }}\""]
+ - result.revoked.{{ test_group1 }} == ["{{ test_user1 }}"]
+ - result.state == "absent"
+ - result.target_roles == ["{{ test_user1 }}"]
+
+# Revoke test_group1 from test_user1 again to check that nothing changes:
+- name: postgresql_membership - revoke test_group1 from test_user1 again
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_membership:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ group: "{{ test_group1 }}"
+ user: "{{ test_user1 }}"
+ state: absent
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result is not changed
+ - result.groups == ["{{ test_group1 }}"]
+ - result.queries == []
+ - result.revoked.{{ test_group1 }} == []
+ - result.state == "absent"
+ - result.target_roles == ["{{ test_user1 }}"]
+
+# Grant test_group1 and test_group2 to test_user1 and test_user2:
+- name: postgresql_membership - grant two groups to two users
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_membership:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ group:
+ - "{{ test_group1 }}"
+ - "{{ test_group2 }}"
+ user:
+ - "{{ test_user1 }}"
+ - "{{ test_user2 }}"
+ state: present
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result is changed
+ - result.groups == ["{{ test_group1 }}", "{{ test_group2 }}"]
+ - result.queries == ["GRANT \"{{ test_group1 }}\" TO \"{{ test_user1 }}\"", "GRANT \"{{ test_group1 }}\" TO \"{{ test_user2 }}\"", "GRANT \"{{ test_group2 }}\" TO \"{{ test_user1 }}\"", "GRANT \"{{ test_group2 }}\" TO \"{{ test_user2 }}\""]
+ - result.granted.{{ test_group1 }} == ["{{ test_user1 }}", "{{ test_user2 }}"]
+ - result.granted.{{ test_group2 }} == ["{{ test_user1 }}", "{{ test_user2 }}"]
+ - result.state == "present"
+ - result.target_roles == ["{{ test_user1 }}", "{{ test_user2 }}"]
+
+# Grant test_group1 and test_group2 to test_user1 and test_user2 again to check that nothing changes:
+- name: postgresql_membership - grant two groups to two users again
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_membership:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ group:
+ - "{{ test_group1 }}"
+ - "{{ test_group2 }}"
+ user:
+ - "{{ test_user1 }}"
+ - "{{ test_user2 }}"
+ state: present
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result is not changed
+ - result.groups == ["{{ test_group1 }}", "{{ test_group2 }}"]
+ - result.queries == []
+ - result.granted.{{ test_group1 }} == []
+ - result.granted.{{ test_group2 }} == []
+ - result.state == "present"
+ - result.target_roles == ["{{ test_user1 }}", "{{ test_user2 }}"]
+
+# Revoke only test_group1 from test_user1:
+- name: postgresql_membership - revoke one group from one user
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_membership:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ group: "{{ test_group1 }}"
+ user: "{{ test_user1 }}"
+ state: absent
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result is changed
+ - result.groups == ["{{ test_group1 }}"]
+ - result.queries == ["REVOKE \"{{ test_group1 }}\" FROM \"{{ test_user1 }}\""]
+ - result.revoked.{{ test_group1 }} == ["{{ test_user1 }}"]
+ - result.state == "absent"
+ - result.target_roles == ["{{ test_user1 }}"]
+
+# Try to grant test_group1 and test_group2 to test_user1 and test_user2 again
+# to check that nothing changes with test_user2:
+- name: postgresql_membership - grant two groups to two users again
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_membership:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ group:
+ - "{{ test_group1 }}"
+ - "{{ test_group2 }}"
+ user:
+ - "{{ test_user1 }}"
+ - "{{ test_user2 }}"
+ state: present
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result is changed
+ - result.groups == ["{{ test_group1 }}", "{{ test_group2 }}"]
+ - result.queries == ["GRANT \"{{ test_group1 }}\" TO \"{{ test_user1 }}\""]
+ - result.granted.{{ test_group1 }} == ["{{ test_user1 }}"]
+ - result.granted.{{ test_group2 }} == []
+ - result.state == "present"
+ - result.target_roles == ["{{ test_user1 }}", "{{ test_user2 }}"]
+
+#####################
+# Check fail_on_role:
+
+# Try to grant non existent group to non existent role with fail_on_role=yes:
+- name: postgresql_membership - revoke non existen group from non existen role
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_membership:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ group: fake_group
+ user: fake_user
+ state: present
+ fail_on_role: yes
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result is not changed
+
+# Try to grant non existent group to non existent role with fail_on_role=no:
+- name: postgresql_membership - revoke non existen group from non existen role
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_membership:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ group: fake_group
+ user: fake_user
+ state: present
+ fail_on_role: no
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result is not changed
+ - result.granted == {}
+ - result.groups == []
+ - result.target_roles == []
+ - result.state == 'present'
+
+# Try to revoke non existent group from non existent role with fail_on_role=no:
+- name: postgresql_membership - revoke non existen group from non existen role
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_membership:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ group: fake_group
+ user: fake_user
+ state: absent
+ fail_on_role: no
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result is not changed
+ - result.revoked == {}
+ - result.groups == []
+ - result.target_roles == []
+ - result.state == 'absent'
+
+# Grant test_group3 with a name containing dots to test_user1.
+- name: postgresql_membership - grant test_group3 with dots to test_user1
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_membership:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ group: "{{ test_group3 }}"
+ user: "{{ test_user1 }}"
+ state: present
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ["GRANT \"{{ test_group3 }}\" TO \"{{ test_user1 }}\""]
+
+#############################
+# Check trust_input parameter
+
+- name: postgresql_membership - try to use dangerous input, don't trust
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_membership:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ group:
+ - "{{ test_group3}}"
+ - "{{ dangerous_name }}"
+ user: "{{ test_user1 }}"
+ state: present
+ trust_input: no
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result is failed
+ - result.msg == 'Passed input \'{{ dangerous_name }}\' is potentially dangerous'
+
+- name: postgresql_membership - try to use dangerous input, trust explicitly
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_membership:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ group:
+ - "{{ test_group3}}"
+ - "{{ dangerous_name }}"
+ user: "{{ test_user1 }}"
+ state: present
+ trust_input: yes
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result is failed
+ - result.msg == 'Role {{ dangerous_name }} does not exist'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_owner/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_owner/aliases
new file mode 100644
index 00000000..b2033afd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_owner/aliases
@@ -0,0 +1,5 @@
+destructive
+shippable/posix/group4
+skip/aix
+skip/osx
+skip/macos
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_owner/defaults/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_owner/defaults/main.yml
new file mode 100644
index 00000000..e43723c4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_owner/defaults/main.yml
@@ -0,0 +1,3 @@
+test_tablespace_path: "/ssd"
+
+dangerous_name: 'curious.anonymous"; SELECT * FROM information_schema.tables; --'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_owner/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_owner/meta/main.yml
new file mode 100644
index 00000000..4ce5a583
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_owner/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_postgresql_db
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_owner/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_owner/tasks/main.yml
new file mode 100644
index 00000000..4b2f5751
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_owner/tasks/main.yml
@@ -0,0 +1,9 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Initial CI tests of postgresql_owner module
+- import_tasks: postgresql_owner_initial.yml
+ when:
+ - postgres_version_resp.stdout is version('9.4', '>=')
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_owner/tasks/postgresql_owner_initial.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_owner/tasks/postgresql_owner_initial.yml
new file mode 100644
index 00000000..65623675
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_owner/tasks/postgresql_owner_initial.yml
@@ -0,0 +1,1073 @@
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+####################
+# Prepare for tests:
+
+# Create test roles:
+- name: postgresql_owner - create test roles
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_user:
+ login_user: '{{ pg_user }}'
+ db: postgres
+ name: '{{ item }}'
+ ignore_errors: true
+ with_items:
+ - alice
+ - bob
+
+- name: postgresql_owner - create test database
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_db:
+ login_user: '{{ pg_user }}'
+ db: acme
+
+- name: postgresql_owner - create test table
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ login_user: '{{ pg_user }}'
+ db: acme
+ query: CREATE TABLE my_table (id int)
+
+- name: postgresql_owner - set owner
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: my_table
+ obj_type: table
+
+- name: postgresql_owner - create test sequence
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ login_user: '{{ pg_user }}'
+ db: acme
+ query: CREATE SEQUENCE test_seq
+
+- name: postgresql_owner - create test function
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ login_user: '{{ pg_user }}'
+ db: acme
+ query: >
+ CREATE FUNCTION increment(integer) RETURNS integer AS 'select $1 + 1;'
+ LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT;
+
+- name: postgresql_owner - create test schema
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ login_user: '{{ pg_user }}'
+ db: acme
+ query: CREATE SCHEMA test_schema
+
+- name: postgresql_owner - create test view
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ login_user: '{{ pg_user }}'
+ db: acme
+ query: CREATE VIEW test_view AS SELECT * FROM my_table
+
+- name: postgresql_owner - create test materialized view
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ login_user: '{{ pg_user }}'
+ db: acme
+ query: CREATE MATERIALIZED VIEW test_mat_view AS SELECT * FROM my_table
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+- name: postgresql_owner - drop dir for test tablespace
+ become: true
+ file:
+ path: '{{ test_tablespace_path }}'
+ state: absent
+ ignore_errors: true
+
+- name: postgresql_owner - disable selinux
+ become: true
+ shell: setenforce 0
+ ignore_errors: true
+
+- name: postgresql_owner - create dir for test tablespace
+ become: true
+ file:
+ path: '{{ test_tablespace_path }}'
+ state: directory
+ owner: '{{ pg_user }}'
+ group: '{{ pg_user }}'
+ mode: '0700'
+ ignore_errors: true
+
+- name: >
+ postgresql_owner - create a new tablespace called acme and
+ set bob as an its owner
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_tablespace:
+ db: acme
+ login_user: '{{ pg_user }}'
+ name: acme
+ owner: alice
+ location: '{{ test_tablespace_path }}'
+
+################
+# Do main tests:
+
+#
+# check reassign_owned_by param
+#
+# try to reassign ownership to non existent user:
+- name: postgresql_owner - reassign_owned_by to non existent user
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: non_existent
+ reassign_owned_by: bob
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result.failed == true
+
+- name: postgresql_owner - reassign_owned_by, check fail_on_role
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: alice
+ reassign_owned_by: non_existent
+ fail_on_role: false
+ register: result
+
+- assert:
+ that:
+ - result.failed == false
+
+- name: postgresql_owner - reassign_owned_by in check_mode
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: alice
+ reassign_owned_by: bob
+ check_mode: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['REASSIGN OWNED BY "bob" TO "alice"']
+
+- name: postgresql_owner - check that nothing changed after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: >
+ SELECT 1 FROM pg_tables WHERE tablename = 'my_table'
+ AND tableowner = 'alice'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+- name: postgresql_owner - reassign_owned_by
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: alice
+ reassign_owned_by: bob
+ trust_input: yes
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['REASSIGN OWNED BY "bob" TO "alice"']
+
+- name: postgresql_owner - check that ownership has been changed after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: SELECT 1 FROM pg_tables WHERE tablename = 'my_table' AND tableowner = 'alice'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+###########################
+# Test trust_inpt parameter
+
+- name: postgresql_owner - reassign_owned_by, trust_input no
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: '{{ dangerous_name }}'
+ reassign_owned_by: alice
+ trust_input: no
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result is failed
+ - result.msg == 'Passed input \'{{ dangerous_name }}\' is potentially dangerous'
+
+- name: postgresql_owner - reassign_owned_by, trust_input yes by default
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: '{{ dangerous_name }}'
+ reassign_owned_by: alice
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result is not changed
+ - result.msg is search('does not exist')
+# End of testing trust_input
+
+#
+# Check obj_type for each type
+#
+
+# #############################
+# check_mode obj_type: database
+- name: postgresql_owner - set db owner in check_mode
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: acme
+ obj_type: database
+ check_mode: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['ALTER DATABASE "acme" OWNER TO "bob"']
+
+- name: postgresql_owner - check that nothing changed after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: >
+ SELECT 1 FROM pg_database AS d JOIN pg_roles AS r
+ ON d.datdba = r.oid WHERE d.datname = 'acme' AND r.rolname = 'bob'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+- name: postgresql_owner - set db owner
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: acme
+ obj_type: database
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['ALTER DATABASE "acme" OWNER TO "bob"']
+
+- name: postgresql_owner - check that db owner has been changed after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: >
+ SELECT 1 FROM pg_database AS d JOIN pg_roles AS r
+ ON d.datdba = r.oid WHERE d.datname = 'acme' AND r.rolname = 'bob'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+- name: postgresql_owner - set db owner again
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: acme
+ obj_type: database
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.queries == []
+
+- name: postgresql_owner - check that db owner is bob
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: >
+ SELECT 1 FROM pg_database AS d JOIN pg_roles AS r
+ ON d.datdba = r.oid WHERE d.datname = 'acme' AND r.rolname = 'bob'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+- name: postgresql_owner - set table owner in check_mode
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: my_table
+ obj_type: table
+ check_mode: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['ALTER TABLE "my_table" OWNER TO "bob"']
+
+- name: postgresql_owner - check that nothing changed after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: >
+ SELECT 1 FROM pg_tables WHERE tablename = 'my_table'
+ AND tableowner = 'bob'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+- name: postgresql_owner - set db owner
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: my_table
+ obj_type: table
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['ALTER TABLE "my_table" OWNER TO "bob"']
+
+- name: postgresql_owner - check that table owner has been changed after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: >
+ SELECT 1 FROM pg_tables WHERE tablename = 'my_table'
+ AND tableowner = 'bob'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+- name: postgresql_owner - set db owner again
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: my_table
+ obj_type: table
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.queries == []
+
+- name: postgresql_owner - check that table owner is bob
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: >
+ SELECT 1 FROM pg_tables WHERE tablename = 'my_table'
+ AND tableowner = 'bob'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+- name: postgresql_owner - set sequence owner in check_mode
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: test_seq
+ obj_type: sequence
+ check_mode: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['ALTER SEQUENCE "test_seq" OWNER TO "bob"']
+
+- name: postgresql_owner - check that nothing changed after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: >
+ SELECT 1 FROM pg_class AS c JOIN pg_roles AS r
+ ON c.relowner = r.oid WHERE c.relkind = 'S'
+ AND c.relname = 'test_seq' AND r.rolname = 'bob'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+- name: postgresql_owner - set db owner
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: test_seq
+ obj_type: sequence
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['ALTER SEQUENCE "test_seq" OWNER TO "bob"']
+
+- name: postgresql_owner - check that table owner has been changed after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: >
+ SELECT 1 FROM pg_class AS c JOIN pg_roles AS r
+ ON c.relowner = r.oid WHERE c.relkind = 'S'
+ AND c.relname = 'test_seq' AND r.rolname = 'bob'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+- name: postgresql_owner - set db owner again
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: test_seq
+ obj_type: sequence
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.queries == []
+
+- name: postgresql_owner - check that sequence owner is bob
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: >
+ SELECT 1 FROM pg_class AS c JOIN pg_roles AS r
+ ON c.relowner = r.oid WHERE c.relkind = 'S'
+ AND c.relname = 'test_seq' AND r.rolname = 'bob'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+- name: postgresql_owner - set function owner in check_mode
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: increment
+ obj_type: function
+ check_mode: true
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['ALTER FUNCTION increment OWNER TO "bob"']
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- name: postgresql_owner - check that nothing changed after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: >
+ SELECT 1 FROM pg_proc AS f JOIN pg_roles AS r
+ ON f.proowner = r.oid WHERE f.proname = 'increment' AND r.rolname = 'bob'
+ ignore_errors: true
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- assert:
+ that:
+ - result.rowcount == 0
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- name: postgresql_owner - set func owner
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: increment
+ obj_type: function
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['ALTER FUNCTION increment OWNER TO "bob"']
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- name: postgresql_owner - check that func owner has been changed after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: >
+ SELECT 1 FROM pg_proc AS f JOIN pg_roles AS r
+ ON f.proowner = r.oid WHERE f.proname = 'increment' AND r.rolname = 'bob'
+ ignore_errors: true
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- assert:
+ that:
+ - result.rowcount == 1
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- name: postgresql_owner - set func owner again
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: increment
+ obj_type: function
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- assert:
+ that:
+ - result is not changed
+ - result.queries == []
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- name: postgresql_owner - check that function owner is bob
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: >
+ SELECT 1 FROM pg_proc AS f JOIN pg_roles AS r
+ ON f.proowner = r.oid WHERE f.proname = 'increment' AND r.rolname = 'bob'
+ ignore_errors: true
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- assert:
+ that:
+ - result.rowcount == 1
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- name: postgresql_owner - set schema owner in check_mode
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: test_schema
+ obj_type: schema
+ check_mode: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['ALTER SCHEMA "test_schema" OWNER TO "bob"']
+
+- name: postgresql_owner - check that nothing changed after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: >
+ SELECT 1 FROM information_schema.schemata
+ WHERE schema_name = 'test_schema' AND schema_owner = 'bob'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+- name: postgresql_owner - set schema owner
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: test_schema
+ obj_type: schema
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['ALTER SCHEMA "test_schema" OWNER TO "bob"']
+
+- name: postgresql_owner - check that schema owner has been changed after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: >
+ SELECT 1 FROM information_schema.schemata
+ WHERE schema_name = 'test_schema' AND schema_owner = 'bob'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+- name: postgresql_owner - set schema owner again
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: test_seq
+ obj_type: sequence
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.queries == []
+
+- name: postgresql_owner - check that schema owner is bob
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: >
+ SELECT 1 FROM information_schema.schemata
+ WHERE schema_name = 'test_schema' AND schema_owner = 'bob'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+- name: postgresql_owner - set view owner in check_mode
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: test_view
+ obj_type: view
+ check_mode: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['ALTER VIEW "test_view" OWNER TO "bob"']
+
+- name: postgresql_owner - check that nothing changed after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: SELECT 1 FROM pg_views WHERE viewname = 'test_view' AND viewowner = 'bob'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+- name: postgresql_owner - set view owner
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: test_view
+ obj_type: view
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['ALTER VIEW "test_view" OWNER TO "bob"']
+
+- name: postgresql_owner - check that view owner has been changed after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: SELECT 1 FROM pg_views WHERE viewname = 'test_view' AND viewowner = 'bob'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+- name: postgresql_owner - set view owner again
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: test_view
+ obj_type: view
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.queries == []
+
+- name: postgresql_owner - check that view owner is bob
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: SELECT 1 FROM pg_views WHERE viewname = 'test_view' AND viewowner = 'bob'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+- name: postgresql_owner - set matview owner in check_mode
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: test_mat_view
+ obj_type: matview
+ check_mode: true
+ register: result
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['ALTER MATERIALIZED VIEW "test_mat_view" OWNER TO "bob"']
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+- name: postgresql_owner - check that nothing changed after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: SELECT 1 FROM pg_matviews WHERE matviewname = 'test_view' AND matviewowner = 'bob'
+ ignore_errors: true
+ register: result
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+- assert:
+ that:
+ - result.rowcount == 0
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+- name: postgresql_owner - set matview owner
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: test_mat_view
+ obj_type: matview
+ register: result
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['ALTER MATERIALIZED VIEW "test_mat_view" OWNER TO "bob"']
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+- name: postgresql_owner - check that matview owner has been changed after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: SELECT 1 FROM pg_matviews WHERE matviewname = 'test_mat_view' AND matviewowner = 'bob'
+ ignore_errors: true
+ register: result
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+- assert:
+ that:
+ - result.rowcount == 1
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+- name: postgresql_owner - set matview owner again
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: test_mat_view
+ obj_type: matview
+ register: result
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+- assert:
+ that:
+ - result is not changed
+ - result.queries == []
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+- name: postgresql_owner - check that matview owner is bob
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: SELECT 1 FROM pg_matviews WHERE matviewname = 'test_mat_view' AND matviewowner = 'bob'
+ ignore_errors: true
+ register: result
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+- assert:
+ that:
+ - result.rowcount == 1
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+- name: postgresql_owner - set tablespace owner in check_mode
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: acme
+ obj_type: tablespace
+ check_mode: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['ALTER TABLESPACE "acme" OWNER TO "bob"']
+
+- name: postgresql_owner - check that nothing changed after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: >
+ SELECT 1 FROM pg_tablespace AS t JOIN pg_roles AS r
+ ON t.spcowner = r.oid WHERE t.spcname = 'acme' AND r.rolname = 'bob'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+- name: postgresql_owner - set tablespace owner
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: acme
+ obj_type: tablespace
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['ALTER TABLESPACE "acme" OWNER TO "bob"']
+
+- name: postgresql_owner - check that tablespace owner has been changed after the previous step
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: >
+ SELECT 1 FROM pg_tablespace AS t JOIN pg_roles AS r
+ ON t.spcowner = r.oid WHERE t.spcname = 'acme' AND r.rolname = 'bob'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+- name: postgresql_owner - set tablespace owner again
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_owner:
+ login_user: '{{ pg_user }}'
+ db: acme
+ new_owner: bob
+ obj_name: acme
+ obj_type: tablespace
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.queries == []
+
+- name: postgresql_owner - check that tablespace owner is bob
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ db: acme
+ login_user: '{{ pg_user }}'
+ query: >
+ SELECT 1 FROM pg_tablespace AS t JOIN pg_roles AS r
+ ON t.spcowner = r.oid WHERE t.spcname = 'acme' AND r.rolname = 'bob'
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+#
+# Crean up
+#
+- name: postgresql_owner - drop test database
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_db:
+ login_user: '{{ pg_user }}'
+ db: acme
+ state: absent
+
+- name: postgresql_owner - drop test tablespace
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_tablespace:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ name: acme
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_pg_hba/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_pg_hba/aliases
new file mode 100644
index 00000000..b2033afd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_pg_hba/aliases
@@ -0,0 +1,5 @@
+destructive
+shippable/posix/group4
+skip/aix
+skip/osx
+skip/macos
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_pg_hba/defaults/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_pg_hba/defaults/main.yml
new file mode 100644
index 00000000..4e1fe7dd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_pg_hba/defaults/main.yml
@@ -0,0 +1,23 @@
+---
+pg_hba_test_ips:
+- contype: local
+ users: 'all,postgres,test'
+- source: '0000:ffff::'
+ netmask: 'ffff:fff0::'
+- source: '192.168.0.0/24'
+ netmask: ''
+ databases: 'all,replication'
+- source: '192.168.1.0/24'
+ netmask: ''
+ databases: 'all'
+ method: reject
+- source: '127.0.0.1/32'
+ netmask: ''
+- source: '::1/128'
+ netmask: ''
+- source: '0000:ff00::'
+ netmask: 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ff00'
+ method: scram-sha-256
+- source: '172.16.0.0'
+ netmask: '255.255.0.0'
+ method: trust
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_pg_hba/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_pg_hba/meta/main.yml
new file mode 100644
index 00000000..4ce5a583
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_pg_hba/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_postgresql_db
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_pg_hba/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_pg_hba/tasks/main.yml
new file mode 100644
index 00000000..d6d298d5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_pg_hba/tasks/main.yml
@@ -0,0 +1,7 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Initial CI tests of postgresql_pg_hba module
+- import_tasks: postgresql_pg_hba_initial.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_pg_hba/tasks/postgresql_pg_hba_initial.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_pg_hba/tasks/postgresql_pg_hba_initial.yml
new file mode 100644
index 00000000..478d8936
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_pg_hba/tasks/postgresql_pg_hba_initial.yml
@@ -0,0 +1,183 @@
+- name: Make sure file does not exist
+ file:
+ dest: /tmp/pg_hba.conf
+ state: absent
+
+- name: check_mode run
+ postgresql_pg_hba:
+ dest: /tmp/pg_hba.conf
+ contype: host
+ source: '0000:ffff::'
+ netmask: 'ffff:fff0::'
+ method: md5
+ backup: 'True'
+ order: sud
+ state: "{{item}}"
+ check_mode: yes
+ with_items:
+ - present
+ - absent
+
+- name: check_mode check
+ stat:
+ path: /tmp/pg_hba.conf
+ register: pg_hba_checkmode_check
+
+- name: Remove several ip addresses for idempotency check
+ postgresql_pg_hba:
+ contype: "{{item.contype|default('host')}}"
+ databases: "{{item.databases|default('all')}}"
+ dest: /tmp/pg_hba.conf
+ method: "{{item.method|default('md5')}}"
+ netmask: "{{item.netmask|default('')}}"
+ order: sud
+ source: "{{item.source|default('')}}"
+ state: absent
+ users: "{{item.users|default('all')}}"
+ with_items: "{{pg_hba_test_ips}}"
+ register: pg_hba_idempotency_check1
+
+- name: idempotency not creating file check
+ stat:
+ path: /tmp/pg_hba.conf
+ register: pg_hba_idempotency_file_check
+
+- name: Add several ip addresses
+ postgresql_pg_hba:
+ backup: 'True'
+ contype: "{{item.contype|default('host')}}"
+ create: 'True'
+ databases: "{{item.databases|default('all')}}"
+ dest: /tmp/pg_hba.conf
+ method: "{{item.method|default('md5')}}"
+ netmask: "{{item.netmask|default('')}}"
+ order: sud
+ source: "{{item.source|default('')}}"
+ state: present
+ users: "{{item.users|default('all')}}"
+ register: pg_hba_change
+ with_items: "{{pg_hba_test_ips}}"
+
+- name: Able to add options on rule without
+ postgresql_pg_hba:
+ dest: "/tmp/pg_hba.conf"
+ users: "+some"
+ order: "sud"
+ state: "present"
+ contype: "local"
+ method: "cert"
+ options: "{{ item }}"
+ address: ""
+ with_items:
+ - ""
+ - "clientcert=1"
+
+- name: Retain options even if they contain spaces
+ postgresql_pg_hba:
+ dest: "/tmp/pg_hba.conf"
+ users: "+some"
+ order: "sud"
+ state: "present"
+ contype: "{{ item.contype }}"
+ method: "{{ item.method }}"
+ options: "{{ item.options }}"
+ address: "{{ item.address }}"
+ with_items:
+ - { address: "", contype: "local", method: "ldap", options: "ldapserver=example.com ldapport=389 ldapprefix=\"cn=\"" }
+ - { address: "red", contype: "hostssl", method: "cert", options: "clientcert=1 map=mymap" }
+ - { address: "blue", contype: "hostssl", method: "cert", options: "clientcert=1 map=mymap" }
+ register: pg_hba_options
+
+- name: read pg_hba rules
+ postgresql_pg_hba:
+ dest: /tmp/pg_hba.conf
+ register: pg_hba
+
+- name: Add several ip addresses again for idempotency check
+ postgresql_pg_hba:
+ contype: "{{item.contype|default('host')}}"
+ databases: "{{item.databases|default('all')}}"
+ dest: /tmp/pg_hba.conf
+ method: "{{item.method|default('md5')}}"
+ netmask: "{{item.netmask|default('')}}"
+ order: sud
+ source: "{{item.source|default('')}}"
+ state: present
+ users: "{{item.users|default('all')}}"
+ with_items: "{{pg_hba_test_ips}}"
+ register: pg_hba_idempotency_check2
+
+- name: pre-backup stat
+ stat:
+ path: /tmp/pg_hba.conf
+ register: prebackupstat
+
+- name: Add new ip address for backup check and netmask_sameas_prefix check
+ postgresql_pg_hba:
+ backup: 'True'
+ contype: host
+ dest: /tmp/pg_hba.conf
+ method: md5
+ netmask: 255.255.255.0
+ order: sud
+ source: '172.21.0.0'
+ state: present
+ register: pg_hba_backup_check2
+
+- name: Add new ip address for netmask_sameas_prefix check
+ postgresql_pg_hba:
+ backup: 'True'
+ contype: host
+ dest: /tmp/pg_hba.conf
+ method: md5
+ order: sud
+ source: '172.21.0.0/24'
+ state: present
+ register: netmask_sameas_prefix_check
+
+- name: post-backup stat
+ stat:
+ path: "{{pg_hba_backup_check2.backup_file}}"
+ register: postbackupstat
+
+- name: Dont allow netmask for src in [all, samehost, samenet]
+ postgresql_pg_hba:
+ contype: host
+ dest: /tmp/pg_hba.conf
+ method: md5
+ netmask: '255.255.255.255'
+ order: sud
+ source: all
+ state: present
+ register: pg_hba_fail_src_all_with_netmask
+ ignore_errors: yes
+
+- debug:
+ var: pg_hba.pg_hba
+- assert:
+ that:
+ - 'pg_hba.pg_hba == [
+ { "db": "all", "method": "ldap", "type": "local", "usr": "+some", "options": "ldapserver=example.com ldapport=389 ldapprefix=\"cn=\"" },
+ { "db": "all", "method": "md5", "type": "local", "usr": "postgres" },
+ { "db": "all", "method": "md5", "type": "local", "usr": "test" },
+ { "db": "all", "method": "md5", "type": "local", "usr": "all" },
+ { "db": "all", "method": "cert", "src": "blue", "type": "hostssl", "usr": "+some", "options": "clientcert=1 map=mymap" },
+ { "db": "all", "method": "cert", "src": "red", "type": "hostssl", "usr": "+some", "options": "clientcert=1 map=mymap" },
+ { "db": "all", "method": "md5", "src": "127.0.0.1/32", "type": "host", "usr": "all" },
+ { "db": "all", "method": "md5", "src": "::1/128", "type": "host", "usr": "all" },
+ { "db": "all", "method": "scram-sha-256", "src": "0:ff00::/120", "type": "host", "usr": "all" },
+ { "db": "replication", "method": "md5", "src": "192.168.0.0/24", "type": "host", "usr": "all" },
+ { "db": "all", "method": "md5", "src": "192.168.0.0/24", "type": "host", "usr": "all" },
+ { "db": "all", "method": "reject", "src": "192.168.1.0/24", "type": "host", "usr": "all" },
+ { "db": "all", "method": "trust", "src": "172.16.0.0/16", "type": "host", "usr": "all" },
+ { "db": "all", "method": "md5", "src": "0:fff0::/28", "type": "host", "usr": "all" }
+ ]'
+ - 'pg_hba_change is changed'
+ - 'pg_hba_checkmode_check.stat.exists == false'
+ - 'not pg_hba_idempotency_check1 is changed'
+ - 'not pg_hba_idempotency_check2 is changed'
+ - 'pg_hba_idempotency_file_check.stat.exists == false'
+ - 'prebackupstat.stat.checksum == postbackupstat.stat.checksum'
+ - 'pg_hba_fail_src_all_with_netmask is failed'
+ - 'not netmask_sameas_prefix_check is changed'
+ - 'pg_hba_options is changed'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ping/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ping/aliases
new file mode 100644
index 00000000..b2033afd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ping/aliases
@@ -0,0 +1,5 @@
+destructive
+shippable/posix/group4
+skip/aix
+skip/osx
+skip/macos
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ping/defaults/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ping/defaults/main.yml
new file mode 100644
index 00000000..73eb55ae
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ping/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+db_default: postgres
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ping/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ping/meta/main.yml
new file mode 100644
index 00000000..4ce5a583
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ping/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_postgresql_db
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ping/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ping/tasks/main.yml
new file mode 100644
index 00000000..bcb18d2f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ping/tasks/main.yml
@@ -0,0 +1,9 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Initial CI tests of postgresql_ping module
+- import_tasks: postgresql_ping_initial.yml
+ vars:
+ db_name_nonexist: fake_db
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ping/tasks/postgresql_ping_initial.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ping/tasks/postgresql_ping_initial.yml
new file mode 100644
index 00000000..9d35d91c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_ping/tasks/postgresql_ping_initial.yml
@@ -0,0 +1,75 @@
+# Test code for the postgresql_ping module
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- name: postgresql_ping - test return values
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_ping:
+ db: "{{ db_default }}"
+ login_user: "{{ pg_user }}"
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result.is_available == true
+ - result.server_version != {}
+ - result.server_version.major != false
+ - result.server_version.minor != false
+ - result is not changed
+
+- name: postgresql_ping - check ping of non-existing database doesn't return anything
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_ping:
+ db: "{{ db_name_nonexist }}"
+ login_user: "{{ pg_user }}"
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result.is_available == false
+ - result.server_version == {}
+ - result is not changed
+
+- name: postgresql_ping - ping DB with SSL
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_ping:
+ db: "{{ ssl_db }}"
+ login_user: "{{ ssl_user }}"
+ login_password: "{{ ssl_pass }}"
+ login_host: 127.0.0.1
+ login_port: 5432
+ ssl_mode: require
+ ca_cert: '{{ ssl_rootcert }}'
+ trust_input: yes
+ register: result
+ when:
+ - ansible_os_family == 'Debian'
+ - postgres_version_resp.stdout is version('9.4', '>=')
+
+- assert:
+ that:
+ - result.is_available == true
+ when:
+ - ansible_os_family == 'Debian'
+ - postgres_version_resp.stdout is version('9.4', '>=')
+
+- name: postgresql_ping - check trust_input
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_ping:
+ db: "{{ db_default }}"
+ login_user: "{{ pg_user }}"
+ trust_input: no
+ session_role: 'curious.anonymous"; SELECT * FROM information_schema.tables; --'
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result is failed
+ - result.msg is search('is potentially dangerous')
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_privs/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_privs/aliases
new file mode 100644
index 00000000..b2033afd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_privs/aliases
@@ -0,0 +1,5 @@
+destructive
+shippable/posix/group4
+skip/aix
+skip/osx
+skip/macos
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_privs/defaults/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_privs/defaults/main.yml
new file mode 100644
index 00000000..e03dd494
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_privs/defaults/main.yml
@@ -0,0 +1,12 @@
+db_name: ansible_db
+db_user1: ansible_db_user1
+db_user2: ansible_db_user2
+db_user3: ansible_db_user3
+db_user_with_dots1: role.with.dots1
+db_user_with_dots2: role.with.dots2
+db_name_with_hyphens: ansible-db
+db_user_with_hyphens: ansible-db-user
+db_schema_with_hyphens: ansible-db-schema
+db_session_role1: session_role1
+db_session_role2: session_role2
+dangerous_name: 'curious.anonymous"; SELECT * FROM information_schema.tables; --'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_privs/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_privs/meta/main.yml
new file mode 100644
index 00000000..4ce5a583
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_privs/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_postgresql_db
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_privs/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_privs/tasks/main.yml
new file mode 100644
index 00000000..cf7b6352
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_privs/tasks/main.yml
@@ -0,0 +1,19 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- include_tasks: postgresql_privs_session_role.yml
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+# Initial CI tests of postgresql_privs module:
+- include_tasks: postgresql_privs_initial.yml
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+# General tests:
+- include_tasks: postgresql_privs_general.yml
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+# Tests default_privs with target_role:
+- include_tasks: test_target_role.yml
+ when: postgres_version_resp.stdout is version('9.4', '>=')
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_privs/tasks/pg_authid_not_readable.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_privs/tasks/pg_authid_not_readable.yml
new file mode 100644
index 00000000..f5d502d0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_privs/tasks/pg_authid_not_readable.yml
@@ -0,0 +1,50 @@
+- name: "Admin user is allowed to access pg_authid relation: password comparison will succeed, password won't be updated"
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_user:
+ name: "{{ db_user1 }}"
+ encrypted: 'yes'
+ password: "md5{{ (db_password ~ db_user1) | hash('md5')}}"
+ db: "{{ db_name }}"
+ priv: 'test_table1:INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER/test_table2:INSERT/CREATE,CONNECT,TEMP'
+ login_user: "{{ pg_user }}"
+ register: redo_as_admin
+
+- name: "Check that task succeeded without any change"
+ assert:
+ that:
+ - 'redo_as_admin is not failed'
+ - 'redo_as_admin is not changed'
+ - 'redo_as_admin is successful'
+
+- name: "Check that normal user isn't allowed to access pg_authid"
+ shell: 'psql -c "select * from pg_authid;" {{ db_name }} {{ db_user1 }}'
+ environment:
+ PGPASSWORD: '{{ db_password }}'
+ ignore_errors: yes
+ register: pg_authid
+
+- assert:
+ that:
+ - 'pg_authid is failed'
+ - pg_authid.stderr is search('permission denied for (relation|table) pg_authid')
+
+- name: "Normal user isn't allowed to access pg_authid relation: password comparison will fail, password will be updated"
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_user:
+ name: "{{ db_user1 }}"
+ encrypted: 'yes'
+ password: "md5{{ (db_password ~ db_user1) | hash('md5')}}"
+ db: "{{ db_name }}"
+ priv: 'test_table1:INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER/test_table2:INSERT/CREATE,CONNECT,TEMP'
+ login_user: "{{ db_user1 }}"
+ login_password: "{{ db_password }}"
+ register: redo_as_normal_user
+
+- name: "Check that task succeeded and that result is changed"
+ assert:
+ that:
+ - 'redo_as_normal_user is not failed'
+ - 'redo_as_normal_user is changed'
+ - 'redo_as_normal_user is successful'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_privs/tasks/postgresql_privs_general.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_privs/tasks/postgresql_privs_general.yml
new file mode 100644
index 00000000..530e0d1e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_privs/tasks/postgresql_privs_general.yml
@@ -0,0 +1,1533 @@
+# Setup
+- name: Create DB
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_db:
+ state: present
+ name: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+
+- name: Create a user to be owner of objects
+ postgresql_user:
+ name: "{{ db_user3 }}"
+ state: present
+ encrypted: yes
+ password: password
+ role_attr_flags: CREATEDB,LOGIN
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+
+- name: Create a user to be given permissions and other tests
+ postgresql_user:
+ name: "{{ db_user2 }}"
+ state: present
+ encrypted: yes
+ password: password
+ role_attr_flags: LOGIN
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+
+#############################
+# Test of solving bug 656 #
+#############################
+- name: Create DB with hyphen in the name
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_db:
+ state: present
+ name: "{{ db_name_with_hyphens }}"
+ login_user: "{{ pg_user }}"
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: Create a user with hyphen in the name
+ postgresql_user:
+ name: "{{ db_user_with_hyphens }}"
+ state: present
+ encrypted: yes
+ password: password
+ role_attr_flags: CREATEDB,LOGIN
+ db: "{{ db_name_with_hyphens }}"
+ login_user: "{{ pg_user }}"
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: Create schema with hyphen in the name
+ postgresql_schema:
+ login_user: "{{ pg_user }}"
+ login_password: password
+ db: "{{ db_name_with_hyphens }}"
+ name: "{{ db_schema_with_hyphens }}"
+ state: present
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+# Also covers https://github.com/ansible-collections/community.general/issues/884
+- name: Set table default privs on the schema with hyphen in the name
+ postgresql_privs:
+ login_user: "{{ pg_user }}"
+ password: password
+ db: "{{ db_name_with_hyphens }}"
+ schema: "{{ db_schema_with_hyphens }}"
+ role: "{{ db_user_with_hyphens }}"
+ type: default_privs
+ obj: TABLES
+ privs: all
+ state: present
+ usage_on_types: yes
+ register: result
+ check_mode: yes
+
+- assert:
+ that:
+ - result is changed
+ - result.queries is search('ON TYPES')
+
+# Also covers https://github.com/ansible-collections/community.general/issues/884
+- name: Set table default privs on the schema with hyphen in the name
+ postgresql_privs:
+ login_user: "{{ pg_user }}"
+ password: password
+ db: "{{ db_name_with_hyphens }}"
+ schema: "{{ db_schema_with_hyphens }}"
+ role: "{{ db_user_with_hyphens }}"
+ type: default_privs
+ obj: TABLES
+ privs: all
+ state: present
+ usage_on_types: no
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries is not search('ON TYPES')
+
+- name: Delete table default privs on the schema with hyphen in the name
+ postgresql_privs:
+ login_user: "{{ pg_user }}"
+ password: password
+ db: "{{ db_name_with_hyphens }}"
+ schema: "{{ db_schema_with_hyphens }}"
+ role: "{{ db_user_with_hyphens }}"
+ type: default_privs
+ obj: TABLES
+ privs: all
+ state: absent
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: Delete schema with hyphen in the name
+ postgresql_schema:
+ login_user: "{{ pg_user }}"
+ login_password: password
+ db: "{{ db_name_with_hyphens }}"
+ name: "{{ db_schema_with_hyphens }}"
+ state: absent
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: Delete a user with hyphen in the name
+ postgresql_user:
+ name: "{{ db_user_with_hyphens }}"
+ state: absent
+ encrypted: yes
+ password: password
+ role_attr_flags: CREATEDB,LOGIN
+ db: "{{ db_name_with_hyphens }}"
+ login_user: "{{ pg_user }}"
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: Delete DB with hyphen in the name
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_db:
+ state: absent
+ name: "{{ db_name_with_hyphens }}"
+ login_user: "{{ pg_user }}"
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+#############################
+# Test of solving bug 27327 #
+#############################
+
+# Create the test table and view:
+- name: Create table
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_table:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: test_table1
+ columns:
+ - id int
+
+- name: Create view
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_query:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ query: "CREATE VIEW test_view AS SELECT id FROM test_table1"
+
+# Test check_mode:
+- name: Grant SELECT on test_view, check_mode
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_privs:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ state: present
+ privs: SELECT
+ type: table
+ objs: test_view
+ roles: "{{ db_user2 }}"
+ trust_input: no
+ check_mode: yes
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+# Check:
+- name: Check that nothing was changed after the prev step
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_query:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ query: "SELECT grantee FROM information_schema.role_table_grants WHERE table_name='test_view' AND grantee = '{{ db_user2 }}'"
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+# Test true mode:
+- name: Grant SELECT on test_view
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_privs:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ state: present
+ privs: SELECT
+ type: table
+ objs: test_view
+ roles: "{{ db_user2 }}"
+ trust_input: no
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+# Check:
+- name: Check that nothing was changed after the prev step
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_query:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ query: "SELECT grantee FROM information_schema.role_table_grants WHERE table_name='test_view' AND grantee = '{{ db_user2 }}'"
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+# Test true mode:
+- name: Try to grant SELECT again
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_privs:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ state: present
+ privs: SELECT
+ type: table
+ objs: test_view
+ roles: "{{ db_user2 }}"
+ trust_input: no
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+
+# Cleanup:
+- name: Drop test view
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_query:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ query: "DROP VIEW test_view"
+
+- name: Drop test table
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_table:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: test_table1
+ state: absent
+
+######################################################
+# Test foreign data wrapper and foreign server privs #
+######################################################
+
+# Foreign data wrapper setup
+- name: Create foreign data wrapper extension
+ become: yes
+ become_user: "{{ pg_user }}"
+ shell: echo "CREATE EXTENSION postgres_fdw" | psql -d "{{ db_name }}"
+
+- name: Create dummy foreign data wrapper
+ become: yes
+ become_user: "{{ pg_user }}"
+ shell: echo "CREATE FOREIGN DATA WRAPPER dummy" | psql -d "{{ db_name }}"
+
+- name: Create foreign server
+ become: yes
+ become_user: "{{ pg_user }}"
+ shell: echo "CREATE SERVER dummy_server FOREIGN DATA WRAPPER dummy" | psql -d "{{ db_name }}"
+
+# Test
+- name: Grant foreign data wrapper privileges
+ postgresql_privs:
+ state: present
+ type: foreign_data_wrapper
+ roles: "{{ db_user2 }}"
+ privs: ALL
+ objs: dummy
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ trust_input: no
+ register: result
+ ignore_errors: yes
+
+# Checks
+- assert:
+ that:
+ - result is changed
+
+- name: Get foreign data wrapper privileges
+ become: yes
+ become_user: "{{ pg_user }}"
+ shell: echo "{{ fdw_query }}" | psql -d "{{ db_name }}"
+ vars:
+ fdw_query: >
+ SELECT fdwacl FROM pg_catalog.pg_foreign_data_wrapper
+ WHERE fdwname = ANY (ARRAY['dummy']) ORDER BY fdwname
+ register: fdw_result
+
+- assert:
+ that:
+ - "fdw_result.stdout_lines[-1] == '(1 row)'"
+ - "'{{ db_user2 }}' in fdw_result.stdout_lines[-2]"
+
+# Test
+- name: Grant foreign data wrapper privileges second time
+ postgresql_privs:
+ state: present
+ type: foreign_data_wrapper
+ roles: "{{ db_user2 }}"
+ privs: ALL
+ objs: dummy
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ trust_input: no
+ register: result
+ ignore_errors: yes
+
+# Checks
+- assert:
+ that:
+ - result is not changed
+
+# Test
+- name: Revoke foreign data wrapper privileges
+ postgresql_privs:
+ state: absent
+ type: foreign_data_wrapper
+ roles: "{{ db_user2 }}"
+ privs: ALL
+ objs: dummy
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ trust_input: no
+ register: result
+ ignore_errors: yes
+
+# Checks
+- assert:
+ that:
+ - result is changed
+
+- name: Get foreign data wrapper privileges
+ become: yes
+ become_user: "{{ pg_user }}"
+ shell: echo "{{ fdw_query }}" | psql -d "{{ db_name }}"
+ vars:
+ fdw_query: >
+ SELECT fdwacl FROM pg_catalog.pg_foreign_data_wrapper
+ WHERE fdwname = ANY (ARRAY['dummy']) ORDER BY fdwname
+ register: fdw_result
+
+- assert:
+ that:
+ - "fdw_result.stdout_lines[-1] == '(1 row)'"
+ - "'{{ db_user2 }}' not in fdw_result.stdout_lines[-2]"
+
+# Test
+- name: Revoke foreign data wrapper privileges for second time
+ postgresql_privs:
+ state: absent
+ type: foreign_data_wrapper
+ roles: "{{ db_user2 }}"
+ privs: ALL
+ objs: dummy
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ trust_input: no
+ register: result
+ ignore_errors: yes
+
+# Checks
+- assert:
+ that:
+ - result is not changed
+
+# Test
+- name: Grant foreign server privileges
+ postgresql_privs:
+ state: present
+ type: foreign_server
+ roles: "{{ db_user2 }}"
+ privs: ALL
+ objs: dummy_server
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ trust_input: no
+ register: result
+ ignore_errors: yes
+
+# Checks
+- assert:
+ that:
+ - result is changed
+
+- name: Get foreign server privileges
+ become: yes
+ become_user: "{{ pg_user }}"
+ shell: echo "{{ fdw_query }}" | psql -d "{{ db_name }}"
+ vars:
+ fdw_query: >
+ SELECT srvacl FROM pg_catalog.pg_foreign_server
+ WHERE srvname = ANY (ARRAY['dummy_server']) ORDER BY srvname
+ register: fs_result
+
+- assert:
+ that:
+ - "fs_result.stdout_lines[-1] == '(1 row)'"
+ - "'{{ db_user2 }}' in fs_result.stdout_lines[-2]"
+
+# Test
+- name: Grant foreign server privileges for second time
+ postgresql_privs:
+ state: present
+ type: foreign_server
+ roles: "{{ db_user2 }}"
+ privs: ALL
+ objs: dummy_server
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ trust_input: no
+ register: result
+ ignore_errors: yes
+
+# Checks
+- assert:
+ that:
+ - result is not changed
+
+# Test
+- name: Revoke foreign server privileges
+ postgresql_privs:
+ state: absent
+ type: foreign_server
+ roles: "{{ db_user2 }}"
+ privs: ALL
+ objs: dummy_server
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ trust_input: no
+ register: result
+ ignore_errors: yes
+
+# Checks
+- assert:
+ that:
+ - result is changed
+
+- name: Get foreign server privileges
+ become: yes
+ become_user: "{{ pg_user }}"
+ shell: echo "{{ fdw_query }}" | psql -d "{{ db_name }}"
+ vars:
+ fdw_query: >
+ SELECT srvacl FROM pg_catalog.pg_foreign_server
+ WHERE srvname = ANY (ARRAY['dummy_server']) ORDER BY srvname
+ register: fs_result
+
+- assert:
+ that:
+ - "fs_result.stdout_lines[-1] == '(1 row)'"
+ - "'{{ db_user2 }}' not in fs_result.stdout_lines[-2]"
+
+# Test
+- name: Revoke foreign server privileges for second time
+ postgresql_privs:
+ state: absent
+ type: foreign_server
+ roles: "{{ db_user2 }}"
+ privs: ALL
+ objs: dummy_server
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ trust_input: no
+ register: result
+ ignore_errors: yes
+
+# Checks
+- assert:
+ that:
+ - result is not changed
+
+# Foreign data wrapper cleanup
+- name: Drop foreign server
+ become: yes
+ become_user: "{{ pg_user }}"
+ shell: echo "DROP SERVER dummy_server" | psql -d "{{ db_name }}"
+
+- name: Drop dummy foreign data wrapper
+ become: yes
+ become_user: "{{ pg_user }}"
+ shell: echo "DROP FOREIGN DATA WRAPPER dummy" | psql -d "{{ db_name }}"
+
+- name: Drop foreign data wrapper extension
+ become: yes
+ become_user: "{{ pg_user }}"
+ shell: echo "DROP EXTENSION postgres_fdw" | psql -d "{{ db_name }}"
+
+##########################################
+# Test ALL_IN_SCHEMA for 'function' type #
+##########################################
+
+# Function ALL_IN_SCHEMA Setup
+- name: Create function for test
+ postgresql_query:
+ query: CREATE FUNCTION public.a() RETURNS integer LANGUAGE SQL AS 'SELECT 2';
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+
+# Test
+- name: Grant execute to all functions
+ postgresql_privs:
+ type: function
+ state: present
+ privs: EXECUTE
+ roles: "{{ db_user2 }}"
+ objs: ALL_IN_SCHEMA
+ schema: public
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+ trust_input: no
+ register: result
+ ignore_errors: yes
+
+# Checks
+- assert:
+ that: result is changed
+
+- name: Check that all functions have execute privileges
+ become: yes
+ become_user: "{{ pg_user }}"
+ shell: psql {{ db_name }} -c "SELECT proacl FROM pg_proc WHERE proname = 'a'" -t
+ register: result
+
+- assert:
+ that: "'{{ db_user2 }}=X/{{ db_user3 }}' in '{{ result.stdout_lines[0] }}'"
+
+# Test
+- name: Grant execute to all functions again
+ postgresql_privs:
+ type: function
+ state: present
+ privs: EXECUTE
+ roles: "{{ db_user2 }}"
+ objs: ALL_IN_SCHEMA
+ schema: public
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+ trust_input: no
+ register: result
+ ignore_errors: yes
+
+# Checks
+- assert:
+ that: result is not changed
+
+# Test
+- name: Revoke execute to all functions
+ postgresql_privs:
+ type: function
+ state: absent
+ privs: EXECUTE
+ roles: "{{ db_user2 }}"
+ objs: ALL_IN_SCHEMA
+ schema: public
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+ trust_input: no
+ register: result
+ ignore_errors: yes
+
+# Checks
+- assert:
+ that: result is changed
+
+# Test
+- name: Revoke execute to all functions again
+ postgresql_privs:
+ type: function
+ state: absent
+ privs: EXECUTE
+ roles: "{{ db_user2 }}"
+ objs: ALL_IN_SCHEMA
+ schema: public
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+ trust_input: no
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that: result is not changed
+
+# Function ALL_IN_SCHEMA cleanup
+- name: Remove function for test
+ postgresql_query:
+ query: DROP FUNCTION public.a();
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+
+# Issue https://github.com/ansible-collections/community.general/issues/994
+- name: Create a procedure for tests
+ postgresql_query:
+ query: "CREATE PROCEDURE mock_procedure() LANGUAGE SQL AS $$ SELECT 1; $$;"
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+ when: postgres_version_resp.stdout is version('11', '>=')
+
+# Issue https://github.com/ansible-collections/community.general/issues/994
+- name: Try to run module against a procedure, not function
+ postgresql_privs:
+ type: function
+ state: present
+ privs: ALL
+ roles: "{{ db_user2 }}"
+ objs: ALL_IN_SCHEMA
+ schema: public
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+ register: result
+ when: postgres_version_resp.stdout is version('11', '>=')
+
+- assert:
+ that:
+ - result is not changed
+ when: postgres_version_resp.stdout is version('11', '>=')
+
+###########################
+# Test for procedure type #
+###########################
+- name: Create another procedure for tests
+ postgresql_query:
+ query: "CREATE PROCEDURE mock_procedure1(int, int) LANGUAGE SQL AS $$ SELECT 1; $$;"
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+ when: postgres_version_resp.stdout is version('11', '>=')
+
+- name: Grant privs on procedure
+ postgresql_privs:
+ type: procedure
+ state: present
+ privs: EXECUTE
+ roles: "{{ db_user2 }}"
+ objs: 'mock_procedure1(int:int)'
+ schema: public
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+ register: result
+ when: postgres_version_resp.stdout is version('11', '>=')
+
+- assert:
+ that:
+ - result is changed
+ when: postgres_version_resp.stdout is version('11', '>=')
+
+- name: Grant privs on procedure again
+ postgresql_privs:
+ type: procedure
+ state: present
+ privs: EXECUTE
+ roles: "{{ db_user2 }}"
+ objs: 'mock_procedure1(int:int)'
+ schema: public
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+ register: result
+ when: postgres_version_resp.stdout is version('11', '>=')
+
+- assert:
+ that:
+ - result is not changed
+ when: postgres_version_resp.stdout is version('11', '>=')
+
+- name: Revoke procedure privs
+ postgresql_privs:
+ type: procedure
+ state: absent
+ privs: EXECUTE
+ roles: "{{ db_user2 }}"
+ objs: 'mock_procedure1(int:int)'
+ schema: public
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+ register: result
+ when: postgres_version_resp.stdout is version('11', '>=')
+
+- assert:
+ that:
+ - result is changed
+ when: postgres_version_resp.stdout is version('11', '>=')
+
+- name: Revoke procedure privs again
+ postgresql_privs:
+ type: procedure
+ state: absent
+ privs: EXECUTE
+ roles: "{{ db_user2 }}"
+ objs: 'mock_procedure1(int:int)'
+ schema: public
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+ register: result
+ when: postgres_version_resp.stdout is version('11', '>=')
+
+- assert:
+ that:
+ - result is not changed
+ when: postgres_version_resp.stdout is version('11', '>=')
+
+- name: Grant procedure privs for all object in schema
+ postgresql_privs:
+ type: procedure
+ state: present
+ privs: ALL
+ roles: "{{ db_user2 }}"
+ objs: ALL_IN_SCHEMA
+ schema: public
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+ register: result
+ when: postgres_version_resp.stdout is version('11', '>=')
+
+- assert:
+ that:
+ - result is changed
+ when: postgres_version_resp.stdout is version('11', '>=')
+
+- name: Grant procedure privs for all object in schema again
+ postgresql_privs:
+ type: procedure
+ state: present
+ privs: ALL
+ roles: "{{ db_user2 }}"
+ objs: ALL_IN_SCHEMA
+ schema: public
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+ register: result
+ when: postgres_version_resp.stdout is version('11', '>=')
+
+- assert:
+ that:
+ - result is not changed
+ when: postgres_version_resp.stdout is version('11', '>=')
+
+- name: Revoke procedure privs for all object in schema
+ postgresql_privs:
+ type: procedure
+ state: absent
+ privs: ALL
+ roles: "{{ db_user2 }}"
+ objs: ALL_IN_SCHEMA
+ schema: public
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+ register: result
+ when: postgres_version_resp.stdout is version('11', '>=')
+
+- assert:
+ that:
+ - result is changed
+ when: postgres_version_resp.stdout is version('11', '>=')
+
+#################################################
+# Test ALL_IN_SCHEMA for 'partioned tables type #
+#################################################
+
+# Partitioning tables is a feature introduced in Postgresql 10.
+# (see https://www.postgresql.org/docs/10/ddl-partitioning.html )
+# The test below check for this version
+
+# Function ALL_IN_SCHEMA Setup
+- name: Create partioned table for test purpose
+ postgresql_query:
+ query: CREATE TABLE public.testpt (id int not null, logdate date not null) PARTITION BY RANGE (logdate);
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+# Test
+- name: Grant execute to all tables in check mode
+ postgresql_privs:
+ type: table
+ state: present
+ privs: SELECT
+ roles: "{{ db_user2 }}"
+ objs: ALL_IN_SCHEMA
+ schema: public
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+ trust_input: no
+ register: result
+ ignore_errors: yes
+ when: postgres_version_resp.stdout is version('10', '>=')
+ check_mode: yes
+
+# Checks
+- name: Check that all partitioned tables don't have select privileges after the check mode task
+ postgresql_query:
+ query: SELECT grantee, privilege_type FROM information_schema.role_table_grants WHERE table_name='testpt' and privilege_type='SELECT' and grantee = %(grantuser)s
+ db: "{{ db_name }}"
+ login_user: '{{ db_user2 }}'
+ login_password: password
+ named_args:
+ grantuser: '{{ db_user2 }}'
+ become: yes
+ become_user: "{{ pg_user }}"
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- assert:
+ that:
+ - result.rowcount == 0
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+# Test
+- name: Grant execute with grant option on pg_create_restore_point function
+ postgresql_privs:
+ privs: EXECUTE
+ type: function
+ schema: pg_catalog
+ obj: pg_create_restore_point(text)
+ db: "{{ db_name }}"
+ roles: "{{ db_user2 }}"
+ login_user: "{{ pg_user }}"
+ grant_option: yes
+ state: present
+ become: yes
+ become_user: "{{ pg_user }}"
+ register: result
+
+# Checks
+- assert:
+ that: result is changed
+
+- name: Check that user has GRANT privilege on the function
+ postgresql_query:
+ query: SELECT proacl FROM pg_proc WHERE proname='pg_create_restore_point'
+ db: "{{ db_name }}"
+ login_user: "{{ db_user2 }}"
+ login_password: password
+ become: yes
+ become_user: "{{ pg_user }}"
+ register: result
+
+- assert:
+ that: "'{{ db_user2 }}=X*/{{ pg_user }}' in result.query_result[0].proacl"
+
+# Test
+- name: Grant execute without specifying grant_option to check idempotence
+ postgresql_privs:
+ privs: EXECUTE
+ type: function
+ schema: pg_catalog
+ obj: pg_create_restore_point(text)
+ db: "{{ db_name }}"
+ roles: "{{ db_user2 }}"
+ login_user: "{{ pg_user }}"
+ state: present
+ become: yes
+ become_user: "{{ pg_user }}"
+ register: result
+
+# Checks
+- assert:
+ that: result is not changed
+
+- name: Check that user has GRANT privilege on the function
+ postgresql_query:
+ query: SELECT proacl FROM pg_proc WHERE proname='pg_create_restore_point'
+ db: "{{ db_name }}"
+ login_user: "{{ db_user2 }}"
+ login_password: password
+ become: yes
+ become_user: "{{ pg_user }}"
+ register: result
+
+- assert:
+ that: "'{{ db_user2 }}=X*/{{ pg_user }}' in result.query_result[0].proacl"
+
+# Test
+- name: Revoke grant option on pg_create_restore_point function
+ postgresql_privs:
+ privs: EXECUTE
+ type: function
+ schema: pg_catalog
+ obj: pg_create_restore_point(text)
+ db: "{{ db_name }}"
+ roles: "{{ db_user2 }}"
+ login_user: "{{ pg_user }}"
+ grant_option: no
+ state: present
+ become: yes
+ become_user: "{{ pg_user }}"
+ register: result
+
+# Checks
+- assert:
+ that: result is changed
+
+- name: Check that user does not have GRANT privilege on the function
+ postgresql_query:
+ query: SELECT proacl FROM pg_proc WHERE proname='pg_create_restore_point'
+ db: "{{ db_name }}"
+ login_user: "{{ db_user2 }}"
+ login_password: password
+ become: yes
+ become_user: "{{ pg_user }}"
+ register: result
+
+- assert:
+ that: "'{{ db_user2 }}=X/{{ pg_user }}' in result.query_result[0].proacl"
+
+# Test
+- name: Revoke execute on pg_create_restore_point function
+ postgresql_privs:
+ privs: EXECUTE
+ type: function
+ schema: pg_catalog
+ obj: pg_create_restore_point(text)
+ db: "{{ db_name }}"
+ roles: "{{ db_user2 }}"
+ login_user: "{{ pg_user }}"
+ state: absent
+ become: yes
+ become_user: "{{ pg_user }}"
+ register: result
+
+# Checks
+- assert:
+ that: result is changed
+
+- name: Check that user does not have EXECUTE privilege on the function
+ postgresql_query:
+ query: SELECT proacl FROM pg_proc WHERE proname='pg_create_restore_point'
+ db: "{{ db_name }}"
+ login_user: "{{ db_user2 }}"
+ login_password: password
+ become: yes
+ become_user: "{{ pg_user }}"
+ register: result
+
+- assert:
+ that: "'{{ db_user2 }}' not in result.query_result[0].proacl"
+
+# Test
+- name: Grant execute to all tables
+ postgresql_privs:
+ type: table
+ state: present
+ privs: SELECT
+ roles: "{{ db_user2 }}"
+ objs: ALL_IN_SCHEMA
+ schema: public
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+ trust_input: no
+ register: result
+ ignore_errors: yes
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+# Checks
+- assert:
+ that: result is changed
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- name: Check that all partitioned tables have select privileges
+ postgresql_query:
+ query: SELECT grantee, privilege_type FROM information_schema.role_table_grants WHERE table_name='testpt' and privilege_type='SELECT' and grantee = %(grantuser)s
+ db: "{{ db_name }}"
+ login_user: '{{ db_user2 }}'
+ login_password: password
+ named_args:
+ grantuser: '{{ db_user2 }}'
+ become: yes
+ become_user: "{{ pg_user }}"
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- assert:
+ that:
+ - result.rowcount == 1
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+# Test
+- name: Grant execute to all tables again to see no changes are reported
+ postgresql_privs:
+ type: table
+ state: present
+ privs: SELECT
+ roles: "{{ db_user2 }}"
+ objs: ALL_IN_SCHEMA
+ schema: public
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+ trust_input: no
+ register: result
+ ignore_errors: yes
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+# Checks
+- assert:
+ that: result is not changed
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+# Test
+- name: Revoke SELECT to all tables
+ postgresql_privs:
+ type: table
+ state: absent
+ privs: SELECT
+ roles: "{{ db_user2 }}"
+ objs: ALL_IN_SCHEMA
+ schema: public
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+ trust_input: no
+ register: result
+ ignore_errors: yes
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+# Checks
+- assert:
+ that: result is changed
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- name: Check that all partitioned tables don't have select privileges
+ postgresql_query:
+ query: SELECT grantee, privilege_type FROM information_schema.role_table_grants WHERE table_name='testpt' and privilege_type='SELECT' and grantee = %(grantuser)s
+ db: "{{ db_name }}"
+ login_user: '{{ db_user2 }}'
+ login_password: password
+ named_args:
+ grantuser: '{{ db_user2 }}'
+ become: yes
+ become_user: "{{ pg_user }}"
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- assert:
+ that:
+ - result.rowcount == 0
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+# Test
+- name: Revoke SELECT to all tables and no changes are reported
+ postgresql_privs:
+ type: table
+ state: absent
+ privs: SELECT
+ roles: "{{ db_user2 }}"
+ objs: ALL_IN_SCHEMA
+ schema: public
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+ trust_input: no
+ register: result
+ ignore_errors: yes
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- assert:
+ that: result is not changed
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+# Table ALL_IN_SCHEMA cleanup
+- name: Remove table for test
+ postgresql_query:
+ query: DROP TABLE public.testpt;
+ db: "{{ db_name }}"
+ login_user: "{{ db_user3 }}"
+ login_password: password
+ trust_input: no
+ ignore_errors: yes
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+###########################################
+# Test for 'type' value of type parameter #
+###########################################
+
+# Test
+- name: Grant type privileges
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_privs:
+ state: present
+ type: type
+ roles: "{{ db_user2 }}"
+ privs: ALL
+ objs: numeric
+ schema: pg_catalog
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ trust_input: no
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+# Checks
+- assert:
+ that:
+ - result is changed
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- name: Get type privileges
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_query:
+ login_user: "{{ pg_user }}"
+ login_db: "{{ db_name }}"
+ query: SELECT typacl FROM pg_catalog.pg_type WHERE typname = 'numeric';
+ register: typ_result
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- assert:
+ that:
+ - "'{{ db_user2 }}' in typ_result.query_result[0].typacl"
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- name: Grant type privileges again using check_mode
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_privs:
+ state: present
+ type: type
+ roles: "{{ db_user2 }}"
+ privs: ALL
+ objs: numeric
+ schema: pg_catalog
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ trust_input: no
+ register: result
+ check_mode: yes
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+# Checks
+- assert:
+ that:
+ - result is not changed
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- name: Get type privileges
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_query:
+ login_user: "{{ pg_user }}"
+ login_db: "{{ db_name }}"
+ query: SELECT typacl FROM pg_catalog.pg_type WHERE typname = 'numeric';
+ register: typ_result
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- assert:
+ that:
+ - "'{{ db_user2 }}' in typ_result.query_result[0].typacl"
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- name: Grant type privileges again
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_privs:
+ state: present
+ type: type
+ roles: "{{ db_user2 }}"
+ privs: ALL
+ objs: numeric
+ schema: pg_catalog
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ trust_input: no
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+# Checks
+- assert:
+ that:
+ - result is not changed
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- name: Get type privileges
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_query:
+ login_user: "{{ pg_user }}"
+ login_db: "{{ db_name }}"
+ query: SELECT typacl FROM pg_catalog.pg_type WHERE typname = 'numeric';
+ register: typ_result
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- assert:
+ that:
+ - "'{{ db_user2 }}' in typ_result.query_result[0].typacl"
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- name: Revoke type privileges in check_mode
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_privs:
+ state: absent
+ type: type
+ roles: "{{ db_user2 }}"
+ privs: ALL
+ objs: numeric
+ schema: pg_catalog
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ trust_input: no
+ register: result
+ check_mode: yes
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+# Checks
+- assert:
+ that:
+ - result is changed
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- name: Get type privileges
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_query:
+ login_user: "{{ pg_user }}"
+ login_db: "{{ db_name }}"
+ query: SELECT typacl FROM pg_catalog.pg_type WHERE typname = 'numeric';
+ register: typ_result
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- assert:
+ that:
+ - "'{{ db_user2 }}' in typ_result.query_result[0].typacl"
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- name: Revoke type privileges
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_privs:
+ state: absent
+ type: type
+ roles: "{{ db_user2 }}"
+ privs: ALL
+ objs: numeric
+ schema: pg_catalog
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ trust_input: no
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+# Checks
+- assert:
+ that:
+ - result is changed
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- name: Get type privileges
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_query:
+ login_user: "{{ pg_user }}"
+ login_db: "{{ db_name }}"
+ query: SELECT typacl FROM pg_catalog.pg_type WHERE typname = 'numeric';
+ register: typ_result
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- assert:
+ that:
+ - "'{{ db_user2 }}' not in typ_result.query_result[0].typacl"
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+# type with default schema (public):
+- name: Create custom type in schema public
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_query:
+ login_user: "{{ pg_user }}"
+ login_db: "{{ db_name }}"
+ query: "CREATE TYPE compfoo AS (f1 int, f2 text)"
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+# Test
+- name: Grant type privileges with default schema
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_privs:
+ state: present
+ type: type
+ roles: "{{ db_user2 }}"
+ privs: ALL
+ objs: compfoo
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ trust_input: no
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+# Checks
+- assert:
+ that:
+ - result is changed
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- name: Get type privileges
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_query:
+ login_user: "{{ pg_user }}"
+ login_db: "{{ db_name }}"
+ query: >
+ SELECT t.typacl FROM pg_catalog.pg_type t JOIN pg_catalog.pg_namespace n
+ ON n.oid = t.typnamespace WHERE t.typname = 'compfoo' AND n.nspname = 'public';
+ register: typ_result
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- assert:
+ that:
+ - "'{{ db_user2 }}' in typ_result.query_result[0].typacl"
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+######################################################################
+# https://github.com/ansible-collections/community.general/issues/1058
+- name: Create user for test
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_user:
+ login_user: "{{ pg_user }}"
+ login_db: "{{ db_name }}"
+ name: "test-role"
+ role_attr_flags: "NOLOGIN,NOSUPERUSER,INHERIT,NOCREATEDB,NOCREATEROLE,NOREPLICATION"
+
+- name: Test community.general/issue/1058 GRANT with hyphen
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_privs:
+ login_user: "{{ pg_user }}"
+ login_db: "{{ db_name }}"
+ roles: "test-role"
+ objs: "{{ pg_user }}"
+ type: "group"
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ["GRANT \"{{ pg_user }}\" TO \"test-role\";"]
+
+- name: Test community.general/issue/1058 REVOKE
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_privs:
+ login_user: "{{ pg_user }}"
+ login_db: "{{ db_name }}"
+ roles: "test-role"
+ objs: "{{ pg_user }}"
+ type: "group"
+ state: absent
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ["REVOKE \"{{ pg_user }}\" FROM \"test-role\";"]
+
+- name: Test community.general/issue/1058 GRANT without hyphen
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_privs:
+ login_user: "{{ pg_user }}"
+ login_db: "{{ db_name }}"
+ roles: "{{ db_user3 }}"
+ objs: "{{ pg_user }}"
+ type: "group"
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ["GRANT \"{{ pg_user }}\" TO \"{{ db_user3 }}\";"]
+
+- name: Test community.general/issue/1058 GRANT with hyphen as an object
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_privs:
+ login_user: "{{ pg_user }}"
+ login_db: "{{ db_name }}"
+ roles: "{{ db_user3 }}"
+ objs: "test-role,{{ db_user2 }}"
+ type: "group"
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ["GRANT \"test-role\",\"{{ db_user2 }}\" TO \"{{ db_user3 }}\";"]
+
+- name: Test community.general/issue/1058 GRANT with hyphen as an object
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_privs:
+ login_user: "{{ pg_user }}"
+ login_db: "{{ db_name }}"
+ roles: "{{ db_user3 }}"
+ objs: "test-role"
+ type: "group"
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+
+# Cleanup
+- name: Remove privs
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_privs:
+ state: absent
+ type: type
+ roles: "{{ db_user2 }}"
+ privs: ALL
+ objs: compfoo
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ trust_input: no
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- name: Reassign ownership
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_owner:
+ login_user: "{{ pg_user }}"
+ db: "{{ db_name }}"
+ new_owner: "{{ pg_user }}"
+ reassign_owned_by: "{{ item }}"
+ loop:
+ - "{{ db_user2 }}"
+ - "{{ db_user3 }}"
+
+- name: Remove user given permissions
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_user:
+ name: "{{ db_user2 }}"
+ state: absent
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+
+- name: Remove user owner of objects
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_user:
+ name: "{{ item }}"
+ state: absent
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ loop:
+ - '{{ db_user3 }}'
+ - 'test-role'
+
+- name: Destroy DB
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_db:
+ state: absent
+ name: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_privs/tasks/postgresql_privs_initial.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_privs/tasks/postgresql_privs_initial.yml
new file mode 100644
index 00000000..8aa6b409
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_privs/tasks/postgresql_privs_initial.yml
@@ -0,0 +1,407 @@
+# The tests below were added initially and moved here
+# from the shared target called ``postgresql`` by @Andersson007 <aaklychkov@mail.ru>.
+# You can see modern examples of CI tests in postgresql_publication directory, for example.
+
+#
+# Test settings privileges
+#
+- name: Create db
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_db:
+ name: "{{ db_name }}"
+ state: "present"
+ login_user: "{{ pg_user }}"
+
+- name: Create some tables on the db
+ become_user: "{{ pg_user }}"
+ become: yes
+ shell: echo "create table test_table1 (field text);" | psql {{ db_name }}
+
+- become_user: "{{ pg_user }}"
+ become: yes
+ shell: echo "create table test_table2 (field text);" | psql {{ db_name }}
+
+- vars:
+ db_password: 'secretù' # use UTF-8
+ block:
+ - name: Create a user with some permissions on the db
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_user:
+ name: "{{ db_user1 }}"
+ encrypted: 'yes'
+ password: "md5{{ (db_password ~ db_user1) | hash('md5')}}"
+ db: "{{ db_name }}"
+ priv: 'test_table1:INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,TRIGGER/test_table2:INSERT/CREATE,CONNECT,TEMP'
+ login_user: "{{ pg_user }}"
+
+ - include_tasks: pg_authid_not_readable.yml
+
+- name: Check that the user has the requested permissions (table1)
+ become_user: "{{ pg_user }}"
+ become: yes
+ shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table1';" | psql {{ db_name }}
+ register: result_table1
+
+- name: Check that the user has the requested permissions (table2)
+ become_user: "{{ pg_user }}"
+ become: yes
+ shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table2';" | psql {{ db_name }}
+ register: result_table2
+
+- name: Check that the user has the requested permissions (database)
+ become_user: "{{ pg_user }}"
+ become: yes
+ shell: echo "select datacl from pg_database where datname='{{ db_name }}';" | psql {{ db_name }}
+ register: result_database
+
+- assert:
+ that:
+ - "result_table1.stdout_lines[-1] == '(7 rows)'"
+ - "'INSERT' in result_table1.stdout"
+ - "'SELECT' in result_table1.stdout"
+ - "'UPDATE' in result_table1.stdout"
+ - "'DELETE' in result_table1.stdout"
+ - "'TRUNCATE' in result_table1.stdout"
+ - "'REFERENCES' in result_table1.stdout"
+ - "'TRIGGER' in result_table1.stdout"
+ - "result_table2.stdout_lines[-1] == '(1 row)'"
+ - "'INSERT' == '{{ result_table2.stdout_lines[-2] | trim }}'"
+ - "result_database.stdout_lines[-1] == '(1 row)'"
+ - "'{{ db_user1 }}=CTc/{{ pg_user }}' in result_database.stdout_lines[-2]"
+
+- name: Add another permission for the user
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_user:
+ name: "{{ db_user1 }}"
+ encrypted: 'yes'
+ password: "md55c8ccfd9d6711fc69a7eae647fc54f51"
+ db: "{{ db_name }}"
+ priv: 'test_table2:select'
+ login_user: "{{ pg_user }}"
+ register: result
+
+- name: Check that ansible reports it changed the user
+ assert:
+ that:
+ - result is changed
+
+- name: Check that the user has the requested permissions (table2)
+ become_user: "{{ pg_user }}"
+ become: yes
+ shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table2';" | psql {{ db_name }}
+ register: result_table2
+
+- assert:
+ that:
+ - "result_table2.stdout_lines[-1] == '(2 rows)'"
+ - "'INSERT' in result_table2.stdout"
+ - "'SELECT' in result_table2.stdout"
+
+#
+# Test priv setting via postgresql_privs module
+# (Depends on state from previous _user privs tests)
+#
+
+- name: Revoke a privilege
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_privs:
+ type: "table"
+ state: "absent"
+ roles: "{{ db_user1 }}"
+ privs: "INSERT"
+ objs: "test_table2"
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ trust_input: no
+ register: result
+
+- name: Check that ansible reports it changed the user
+ assert:
+ that:
+ - result is changed
+
+- name: Check that the user has the requested permissions (table2)
+ become_user: "{{ pg_user }}"
+ become: yes
+ shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table2';" | psql {{ db_name }}
+ register: result_table2
+
+- assert:
+ that:
+ - "result_table2.stdout_lines[-1] == '(1 row)'"
+ - "'SELECT' == '{{ result_table2.stdout_lines[-2] | trim }}'"
+
+- name: Revoke many privileges on multiple tables
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_privs:
+ state: "absent"
+ roles: "{{ db_user1 }}"
+ privs: "INSERT,select,UPDATE,TRUNCATE,REFERENCES,TRIGGER,delete"
+ objs: "test_table2,test_table1"
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ trust_input: no
+ register: result
+
+- name: Check that ansible reports it changed the user
+ assert:
+ that:
+ - result is changed
+
+- name: Check that permissions were revoked (table1)
+ become_user: "{{ pg_user }}"
+ become: yes
+ shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table1';" | psql {{ db_name }}
+ register: result_table1
+
+- name: Check that permissions were revoked (table2)
+ become_user: "{{ pg_user }}"
+ become: yes
+ shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table2';" | psql {{ db_name }}
+ register: result_table2
+
+- assert:
+ that:
+ - "result_table1.stdout_lines[-1] == '(0 rows)'"
+ - "result_table2.stdout_lines[-1] == '(0 rows)'"
+
+- name: Revoke database privileges
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_privs:
+ type: "database"
+ state: "absent"
+ roles: "{{ db_user1 }}"
+ privs: "Create,connect,TEMP"
+ objs: "{{ db_name }}"
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ trust_input: no
+
+- name: Check that the user has the requested permissions (database)
+ become_user: "{{ pg_user }}"
+ become: yes
+ shell: echo "select datacl from pg_database where datname='{{ db_name }}';" | psql {{ db_name }}
+ register: result_database
+
+- assert:
+ that:
+ - "result_database.stdout_lines[-1] == '(1 row)'"
+ - "'{{ db_user1 }}' not in result_database.stdout"
+
+- name: Grant database privileges
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_privs:
+ type: "database"
+ state: "present"
+ roles: "{{ db_user1 }}"
+ privs: "CREATE,connect"
+ objs: "{{ db_name }}"
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ trust_input: no
+ register: result
+
+- name: Check that ansible reports it changed the user
+ assert:
+ that:
+ - result is changed
+
+- name: Check that the user has the requested permissions (database)
+ become_user: "{{ pg_user }}"
+ become: yes
+ shell: echo "select datacl from pg_database where datname='{{ db_name }}';" | psql {{ db_name }}
+ register: result_database
+
+- assert:
+ that:
+ - "result_database.stdout_lines[-1] == '(1 row)'"
+ - "'{{ db_user1 }}=Cc' in result_database.stdout"
+
+- name: Grant a single privilege on a table
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_privs:
+ state: "present"
+ roles: "{{ db_user1 }}"
+ privs: "INSERT"
+ objs: "test_table1"
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ trust_input: no
+
+- name: Check that permissions were added (table1)
+ become_user: "{{ pg_user }}"
+ become: yes
+ shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table1';" | psql {{ db_name }}
+ register: result_table1
+
+- assert:
+ that:
+ - "result_table1.stdout_lines[-1] == '(1 row)'"
+ - "'{{ result_table1.stdout_lines[-2] | trim }}' == 'INSERT'"
+
+- name: Grant many privileges on multiple tables
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_privs:
+ state: "present"
+ roles: "{{ db_user1 }}"
+ privs: 'INSERT,SELECT,UPDATE,DELETE,TRUNCATE,REFERENCES,trigger'
+ objs: "test_table2,test_table1"
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ trust_input: no
+
+- name: Check that permissions were added (table1)
+ become_user: "{{ pg_user }}"
+ become: yes
+ shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table1';" | psql {{ db_name }}
+ register: result_table1
+
+- name: Check that permissions were added (table2)
+ become_user: "{{ pg_user }}"
+ become: yes
+ shell: echo "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user1 }}' and table_name='test_table2';" | psql {{ db_name }}
+ register: result_table2
+
+- assert:
+ that:
+ - "result_table1.stdout_lines[-1] == '(7 rows)'"
+ - "'INSERT' in result_table1.stdout"
+ - "'SELECT' in result_table1.stdout"
+ - "'UPDATE' in result_table1.stdout"
+ - "'DELETE' in result_table1.stdout"
+ - "'TRUNCATE' in result_table1.stdout"
+ - "'REFERENCES' in result_table1.stdout"
+ - "'TRIGGER' in result_table1.stdout"
+ - "result_table2.stdout_lines[-1] == '(7 rows)'"
+ - "'INSERT' in result_table2.stdout"
+ - "'SELECT' in result_table2.stdout"
+ - "'UPDATE' in result_table2.stdout"
+ - "'DELETE' in result_table2.stdout"
+ - "'TRUNCATE' in result_table2.stdout"
+ - "'REFERENCES' in result_table2.stdout"
+ - "'TRIGGER' in result_table2.stdout"
+
+# Check passing roles with dots
+# https://github.com/ansible/ansible/issues/63204
+- name: Create roles for further tests
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_user:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ name: "{{ item }}"
+ loop:
+ - "{{ db_user_with_dots1 }}"
+ - "{{ db_user_with_dots2 }}"
+
+- name: Pass role with dots in its name to roles parameter
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_privs:
+ state: "present"
+ roles: "{{ db_user_with_dots1 }}"
+ privs: "INSERT"
+ objs: "test_table1"
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ trust_input: no
+
+- name: Check that permissions were added (table1)
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "select privilege_type from information_schema.role_table_grants where grantee='{{ db_user_with_dots1 }}' and table_name='test_table1'"
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+# We don't need to check anything here, only that nothing failed
+- name: Pass role with dots in its name to target_roles parameter
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_privs:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ state: "present"
+ roles: "{{ db_user_with_dots1 }}"
+ privs: "INSERT"
+ objs: TABLES
+ type: default_privs
+ target_roles: "{{ db_user_with_dots2 }}"
+ trust_input: no
+
+# Bugfix for https://github.com/ansible-collections/community.general/issues/857
+- name: Test passing lowercase PUBLIC role
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_privs:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ type: 'database'
+ privs: 'connect'
+ role: 'public'
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ["GRANT CONNECT ON database \"{{ db_name }}\" TO PUBLIC;"]
+
+#
+# Cleanup
+#
+- name: Cleanup db
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_db:
+ name: "{{ db_name }}"
+ state: "absent"
+ login_user: "{{ pg_user }}"
+
+- name: Check that database was destroyed
+ become_user: "{{ pg_user }}"
+ become: yes
+ shell: echo "select datname from pg_database where datname = '{{ db_name }}';" | psql -d postgres
+ register: result
+
+- assert:
+ that:
+ - "result.stdout_lines[-1] == '(0 rows)'"
+
+- name: Cleanup test user
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_user:
+ name: "{{ item }}"
+ state: 'absent'
+ login_user: "{{ pg_user }}"
+ db: postgres
+ loop:
+ - "{{ db_user1 }}"
+ - "{{ db_user2 }}"
+ - "{{ db_user3 }}"
+ - "{{ db_user_with_dots1 }}"
+ - "{{ db_user_with_dots2 }}"
+
+- name: Check that they were removed
+ become_user: "{{ pg_user }}"
+ become: yes
+ shell: echo "select * from pg_user where usename='{{ db_user1 }}';" | psql -d postgres
+ register: result
+
+- assert:
+ that:
+ - "result.stdout_lines[-1] == '(0 rows)'"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_privs/tasks/postgresql_privs_session_role.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_privs/tasks/postgresql_privs_session_role.yml
new file mode 100644
index 00000000..9a06c9d2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_privs/tasks/postgresql_privs_session_role.yml
@@ -0,0 +1,102 @@
+- name: Create a high privileged user
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_user:
+ name: "{{ db_session_role1 }}"
+ state: "present"
+ password: "password"
+ role_attr_flags: "CREATEDB,LOGIN,CREATEROLE"
+ login_user: "{{ pg_user }}"
+ db: postgres
+
+- name: Create a low privileged user using the newly created user
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_user:
+ name: "{{ db_session_role2 }}"
+ state: "present"
+ password: "password"
+ role_attr_flags: "LOGIN"
+ login_user: "{{ pg_user }}"
+ session_role: "{{ db_session_role1 }}"
+ db: postgres
+
+- name: Create DB as session_role
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_db:
+ state: present
+ name: "{{ db_session_role1 }}"
+ login_user: "{{ pg_user }}"
+ session_role: "{{ db_session_role1 }}"
+ register: result
+
+- name: Create table to be able to grant privileges
+ become_user: "{{ pg_user }}"
+ become: yes
+ shell: echo "CREATE TABLE test(i int); CREATE TABLE test2(i int);" | psql -AtXq "{{ db_session_role1 }}"
+
+- name: Grant all privileges on test1 table to low privileged user
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_privs:
+ db: "{{ db_session_role1 }}"
+ type: table
+ objs: test
+ roles: "{{ db_session_role2 }}"
+ login_user: "{{ pg_user }}"
+ privs: select
+ admin_option: yes
+
+- name: Verify admin option was successful for grants
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_privs:
+ db: "{{ db_session_role1 }}"
+ type: table
+ objs: test
+ roles: "{{ db_session_role1 }}"
+ login_user: "{{ pg_user }}"
+ privs: select
+ session_role: "{{ db_session_role2 }}"
+
+- name: Verify no grants can be granted for test2 table
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_privs:
+ db: "{{ db_session_role1 }}"
+ type: table
+ objs: test2
+ roles: "{{ db_session_role1 }}"
+ login_user: "{{ pg_user }}"
+ privs: update
+ session_role: "{{ db_session_role2 }}"
+ ignore_errors: yes
+ register: result
+
+- assert:
+ that:
+ - result is failed
+
+########################
+# Test trust_input param
+
+- name: Verify trust_input parameter
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_privs:
+ db: "{{ db_session_role1 }}"
+ type: table
+ objs: test2
+ roles: "{{ db_session_role1 }}"
+ login_user: "{{ pg_user }}"
+ privs: update
+ session_role: "{{ dangerous_name }}"
+ trust_input: no
+ ignore_errors: yes
+ register: result
+
+- assert:
+ that:
+ - result is failed
+ - result.msg == 'Passed input \'{{ dangerous_name }}\' is potentially dangerous'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_privs/tasks/test_target_role.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_privs/tasks/test_target_role.yml
new file mode 100644
index 00000000..a1d2805a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_privs/tasks/test_target_role.yml
@@ -0,0 +1,120 @@
+# Setup
+- name: Create a test user
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_user:
+ name: "{{ db_user1 }}"
+ login_user: "{{ pg_user }}"
+ db: postgres
+
+- name: Create DB
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_db:
+ state: present
+ name: "{{ db_name }}"
+ owner: "{{ db_user1 }}"
+ login_user: "{{ pg_user }}"
+
+- name: Create a user to be given permissions and other tests
+ postgresql_user:
+ name: "{{ db_user2 }}"
+ state: present
+ encrypted: yes
+ password: password
+ role_attr_flags: LOGIN
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+
+#######################################
+# Test default_privs with target_role #
+#######################################
+
+# Test
+- name: Grant default privileges for new table objects
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_privs:
+ db: "{{ db_name }}"
+ objs: TABLES
+ privs: SELECT
+ type: default_privs
+ role: "{{ db_user2 }}"
+ target_roles: "{{ db_user1 }}"
+ login_user: "{{ pg_user }}"
+ register: result
+
+# Checks
+- assert:
+ that: result is changed
+
+- name: Check that default privileges are set
+ become: yes
+ become_user: "{{ pg_user }}"
+ shell: psql {{ db_name }} -c "SELECT defaclrole, defaclobjtype, defaclacl FROM pg_default_acl a JOIN pg_roles b ON a.defaclrole=b.oid;" -t
+ register: result
+
+- assert:
+ that: "'{{ db_user2 }}=r/{{ db_user1 }}' in '{{ result.stdout_lines[0] }}'"
+
+# Test
+- name: Revoke default privileges for new table objects
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_privs:
+ db: "{{ db_name }}"
+ state: absent
+ objs: TABLES
+ privs: SELECT
+ type: default_privs
+ role: "{{ db_user2 }}"
+ target_roles: "{{ db_user1 }}"
+ login_user: "{{ pg_user }}"
+ register: result
+
+# Checks
+- assert:
+ that: result is changed
+
+# Cleanup
+- name: Remove user given permissions
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_user:
+ name: "{{ db_user2 }}"
+ state: absent
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+
+- name: Remove user owner of objects
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_user:
+ name: "{{ db_user3 }}"
+ state: absent
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+
+- name: Destroy DBs
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_db:
+ state: absent
+ name: "{{ item }}"
+ login_user: "{{ pg_user }}"
+ loop:
+ - "{{ db_name }}"
+ - "{{ db_session_role1 }}"
+
+- name: Remove test users
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_user:
+ name: "{{ item }}"
+ state: absent
+ db: postgres
+ login_user: "{{ pg_user }}"
+ loop:
+ - "{{ db_user1 }}"
+ - "{{ db_session_role1 }}"
+ - "{{ db_session_role2 }}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_publication/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_publication/aliases
new file mode 100644
index 00000000..a892de40
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_publication/aliases
@@ -0,0 +1,6 @@
+destructive
+shippable/posix/group4
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_publication/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_publication/meta/main.yml
new file mode 100644
index 00000000..4ce5a583
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_publication/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_postgresql_db
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_publication/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_publication/tasks/main.yml
new file mode 100644
index 00000000..507c1e23
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_publication/tasks/main.yml
@@ -0,0 +1,8 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Initial CI tests of postgresql_publication module
+- import_tasks: postgresql_publication_initial.yml
+ when: postgres_version_resp.stdout is version('10', '>=')
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_publication/tasks/postgresql_publication_initial.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_publication/tasks/postgresql_publication_initial.yml
new file mode 100644
index 00000000..0300fc07
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_publication/tasks/postgresql_publication_initial.yml
@@ -0,0 +1,436 @@
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# The file for testing postgresql_copy module.
+
+- vars:
+ test_table1: acme1
+ test_table2: acme2
+ test_table3: acme3
+ test_pub: acme_publ
+ test_role: alice
+ dangerous_name: 'curious.anonymous"; SELECT * FROM information_schema.tables; --'
+ test_schema: acme_schema
+ test_db: acme_db
+ task_parameters: &task_parameters
+ become_user: '{{ pg_user }}'
+ become: yes
+ register: result
+ pg_parameters: &pg_parameters
+ login_user: '{{ pg_user }}'
+ login_db: '{{ test_db }}'
+
+ block:
+ #################################################
+ # Test preparation, create database test objects:
+ - name: postgresql_publication - create test db
+ <<: *task_parameters
+ postgresql_db:
+ login_user: '{{ pg_user }}'
+ maintenance_db: postgres
+ name: '{{ test_db }}'
+
+ - name: postgresql_publication - create test schema
+ <<: *task_parameters
+ postgresql_schema:
+ <<: *pg_parameters
+ name: '{{ test_schema }}'
+
+ - name: postgresql_publication - create test role
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_role }}'
+ role_attr_flags: SUPERUSER
+
+ - name: postgresql_publication - create test tables
+ <<: *task_parameters
+ postgresql_table:
+ <<: *pg_parameters
+ name: '{{ item }}'
+ columns:
+ - id int
+ loop:
+ - '{{ test_table1 }}'
+ - '{{ test_schema }}.{{ test_table2 }}'
+ - '{{ test_table3 }}'
+
+
+ ################
+ # Do main tests:
+
+ # Test
+ - name: postgresql_publication - create publication, check_mode
+ <<: *task_parameters
+ postgresql_publication:
+ <<: *pg_parameters
+ name: '{{ test_pub }}'
+ trust_input: no
+ check_mode: yes
+
+ - assert:
+ that:
+ - result is changed
+ - result.exists == false
+ - result.queries == ["CREATE PUBLICATION \"{{ test_pub }}\" FOR ALL TABLES"]
+
+ # Check
+ - name: postgresql_publication - check that nothing has been changed
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: SELECT * FROM pg_publication WHERE pubname = '{{ test_pub }}'
+
+ - assert:
+ that:
+ - result.rowcount == 0
+
+ # Test
+ - name: postgresql_publication - create publication
+ <<: *task_parameters
+ postgresql_publication:
+ <<: *pg_parameters
+ name: '{{ test_pub }}'
+ trust_input: no
+
+ - assert:
+ that:
+ - result is changed
+ - result.exists == true
+ - result.queries == ["CREATE PUBLICATION \"{{ test_pub }}\" FOR ALL TABLES"]
+ - result.owner == '{{ pg_user }}'
+ - result.alltables == true
+ - result.tables == []
+ - result.parameters.publish != {}
+
+ # Check
+ - name: postgresql_publication - check that nothing has been changed
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT * FROM pg_publication WHERE pubname = '{{ test_pub }}'
+ AND pubowner = '10' AND puballtables = 't'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ # Test
+ - name: postgresql_publication - drop publication, check_mode
+ <<: *task_parameters
+ postgresql_publication:
+ <<: *pg_parameters
+ name: '{{ test_pub }}'
+ state: absent
+ trust_input: no
+ check_mode: yes
+
+ - assert:
+ that:
+ - result is changed
+ - result.exists == true
+ - result.queries == ["DROP PUBLICATION \"{{ test_pub }}\""]
+ - result.owner == '{{ pg_user }}'
+ - result.alltables == true
+ - result.tables == []
+ - result.parameters.publish != {}
+
+ # Check
+ - name: postgresql_publication - check that nothing has been changed
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: SELECT * FROM pg_publication WHERE pubname = '{{ test_pub }}'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ # Test
+ - name: postgresql_publication - drop publication
+ <<: *task_parameters
+ postgresql_publication:
+ <<: *pg_parameters
+ name: '{{ test_pub }}'
+ state: absent
+ cascade: yes
+
+ - assert:
+ that:
+ - result is changed
+ - result.exists == false
+ - result.queries == ["DROP PUBLICATION \"{{ test_pub }}\" CASCADE"]
+
+ # Check
+ - name: postgresql_publication - check that publication does not exist
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: SELECT * FROM pg_publication WHERE pubname = '{{ test_pub }}'
+
+ - assert:
+ that:
+ - result.rowcount == 0
+
+ # Test
+ - name: postgresql_publication - create publication with tables, owner, params
+ <<: *task_parameters
+ postgresql_publication:
+ <<: *pg_parameters
+ name: '{{ test_pub }}'
+ owner: '{{ test_role }}'
+ tables:
+ - '{{ test_table1 }}'
+ - '{{ test_schema }}.{{ test_table2 }}'
+ parameters:
+ publish: 'insert'
+ trust_input: no
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["CREATE PUBLICATION \"{{ test_pub }}\" FOR TABLE \"public\".\"{{ test_table1 }}\", \"{{ test_schema }}\".\"{{ test_table2 }}\" WITH (publish = 'insert')", "ALTER PUBLICATION \"{{ test_pub }}\" OWNER TO \"{{ test_role }}\""]
+ - result.owner == '{{ test_role }}'
+ - result.tables == ["\"public\".\"{{ test_table1 }}\"", "\"{{ test_schema }}\".\"{{ test_table2 }}\""]
+ - result.parameters.publish.insert == true
+ - result.parameters.publish.delete == false
+
+ # Check 1
+ - name: postgresql_publication - check that test publication exists
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT * FROM pg_publication WHERE pubname = '{{ test_pub }}'
+ AND pubowner != '10' AND puballtables = 'f' AND pubinsert = 't' AND pubdelete = 'f'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ # Check 2
+ - name: postgresql_publication - check that test_table1 from schema public is in publication
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: SELECT * FROM pg_publication_tables WHERE pubname = '{{ test_pub }}' AND schemaname = 'public'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ # Check 3
+ - name: postgresql_publication - check that test_table2 from test schema is in publication
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: SELECT * FROM pg_publication_tables WHERE pubname = '{{ test_pub }}' AND schemaname = '{{ test_schema }}'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ # Test
+ - name: postgresql_publication - test trust_input parameter
+ <<: *task_parameters
+ postgresql_publication:
+ <<: *pg_parameters
+ name: '{{ test_pub }}'
+ session_role: '{{ dangerous_name }}'
+ owner: '{{ dangerous_name }}'
+ trust_input: no
+ ignore_errors: yes
+
+ - assert:
+ that:
+ - result is failed
+ - result.msg is search('is potentially dangerous')
+
+ # Test
+ - name: postgresql_publication - add table to publication, change owner, check_mode
+ <<: *task_parameters
+ postgresql_publication:
+ <<: *pg_parameters
+ name: '{{ test_pub }}'
+ owner: '{{ pg_user }}'
+ tables:
+ - '{{ test_table1 }}'
+ - '{{ test_schema }}.{{ test_table2 }}'
+ - '{{ test_table3 }}'
+ trust_input: no
+ check_mode: yes
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["ALTER PUBLICATION \"{{ test_pub }}\" ADD TABLE \"public\".\"{{ test_table3 }}\"", "ALTER PUBLICATION \"{{ test_pub }}\" OWNER TO \"{{ pg_user }}\""]
+ - result.tables == ["\"public\".\"{{ test_table1 }}\"", "\"{{ test_schema }}\".\"{{ test_table2 }}\""]
+
+ # Check
+ - name: postgresql_publication - check that nothing changes after the previous step
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT * FROM pg_publication WHERE pubname = '{{ test_pub }}'
+ AND pubowner != '10' AND puballtables = 'f' AND pubinsert = 't' AND pubupdate = 't'
+
+ - assert:
+ that:
+ - result.rowcount == 0
+
+ # Check
+ - name: postgresql_publication - check that 2 tables are in publication
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: SELECT * FROM pg_publication_tables WHERE pubname = '{{ test_pub }}'
+
+ - assert:
+ that:
+ - result.rowcount == 2
+
+ # Test
+ - name: postgresql_publication - add table to publication, change owner
+ <<: *task_parameters
+ postgresql_publication:
+ <<: *pg_parameters
+ name: '{{ test_pub }}'
+ owner: '{{ pg_user }}'
+ tables:
+ - '{{ test_table1 }}'
+ - '{{ test_schema }}.{{ test_table2 }}'
+ - '{{ test_table3 }}'
+ trust_input: no
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["ALTER PUBLICATION \"{{ test_pub }}\" ADD TABLE \"public\".\"{{ test_table3 }}\"", "ALTER PUBLICATION \"{{ test_pub }}\" OWNER TO \"{{ pg_user }}\""]
+ - result.tables == ["\"public\".\"{{ test_table1 }}\"", "\"{{ test_schema }}\".\"{{ test_table2 }}\"", "\"public\".\"{{ test_table3 }}\""]
+
+ # Check 1
+ - name: postgresql_publication - check owner has been changed
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT * FROM pg_publication WHERE pubname = '{{ test_pub }}' AND pubowner = '10'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ # Check 2
+ - name: postgresql_publication - check that 3 tables are in publication
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: SELECT * FROM pg_publication_tables WHERE pubname = '{{ test_pub }}'
+
+ - assert:
+ that:
+ - result.rowcount == 3
+
+ # Test
+ - name: postgresql_publication - remove table from publication, check_mode
+ <<: *task_parameters
+ postgresql_publication:
+ <<: *pg_parameters
+ name: '{{ test_pub }}'
+ tables:
+ - '{{ test_table1 }}'
+ - '{{ test_schema }}.{{ test_table2 }}'
+ parameters:
+ publish: 'insert'
+ trust_input: no
+ check_mode: yes
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["ALTER PUBLICATION \"{{ test_pub }}\" DROP TABLE \"public\".\"{{ test_table3 }}\""]
+ - result.tables == ["\"public\".\"{{ test_table1 }}\"", "\"{{ test_schema }}\".\"{{ test_table2 }}\"", "\"public\".\"{{ test_table3 }}\""]
+
+ # Check 1
+ - name: postgresql_publication - check that 3 tables are in publication
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: SELECT * FROM pg_publication_tables WHERE pubname = '{{ test_pub }}'
+
+ - assert:
+ that:
+ - result.rowcount == 3
+
+ # Check 2
+ - name: postgresql_publication - check no parameters have been changed
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: SELECT * FROM pg_publication WHERE pubname = '{{ test_pub }}' AND pubinsert = 't'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ # Test
+ - name: postgresql_publication - remove table from publication
+ <<: *task_parameters
+ postgresql_publication:
+ <<: *pg_parameters
+ name: '{{ test_pub }}'
+ tables:
+ - '{{ test_table1 }}'
+ - '{{ test_schema }}.{{ test_table2 }}'
+ parameters:
+ publish: 'delete'
+ trust_input: no
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["ALTER PUBLICATION \"{{ test_pub }}\" DROP TABLE \"public\".\"{{ test_table3 }}\"", "ALTER PUBLICATION \"{{ test_pub }}\" SET (publish = 'delete')"]
+ - result.tables == ["\"public\".\"{{ test_table1 }}\"", "\"{{ test_schema }}\".\"{{ test_table2 }}\""]
+
+ # Check 1
+ - name: postgresql_publication - check that 2 tables are in publication
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: SELECT * FROM pg_publication_tables WHERE pubname = '{{ test_pub }}'
+
+ - assert:
+ that:
+ - result.rowcount == 2
+
+ # Check 2
+ - name: postgresql_publication - check parameter has been changed
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: SELECT * FROM pg_publication WHERE pubname = '{{ test_pub }}' AND pubinsert = 'f'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ always:
+ ###########
+ # Clean up:
+
+ - name: postgresql_publication - remove test db
+ <<: *task_parameters
+ postgresql_db:
+ login_user: '{{ pg_user }}'
+ maintenance_db: postgres
+ name: '{{ test_db }}'
+ state: absent
+
+ - name: postgresql_publication - remove test role
+ <<: *task_parameters
+ postgresql_user:
+ login_user: '{{ pg_user }}'
+ login_db: postgres
+ name: '{{ test_role }}'
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_query/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_query/aliases
new file mode 100644
index 00000000..b2033afd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_query/aliases
@@ -0,0 +1,5 @@
+destructive
+shippable/posix/group4
+skip/aix
+skip/osx
+skip/macos
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_query/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_query/meta/main.yml
new file mode 100644
index 00000000..4ce5a583
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_query/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_postgresql_db
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_query/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_query/tasks/main.yml
new file mode 100644
index 00000000..7b24dbf9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_query/tasks/main.yml
@@ -0,0 +1,7 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Initial CI tests of postgresql_query module
+- import_tasks: postgresql_query_initial.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_query/tasks/postgresql_query_initial.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_query/tasks/postgresql_query_initial.yml
new file mode 100644
index 00000000..6f71480c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_query/tasks/postgresql_query_initial.yml
@@ -0,0 +1,534 @@
+- name: postgresql_query - drop test table if exists
+ become_user: '{{ pg_user }}'
+ become: true
+ shell: psql postgres -U "{{ pg_user }}" -t -c "DROP TABLE IF EXISTS test_table;"
+ ignore_errors: true
+
+- name: postgresql_query - create test table called test_table
+ become_user: '{{ pg_user }}'
+ become: true
+ shell: psql postgres -U "{{ pg_user }}" -t -c "CREATE TABLE test_table (id int, story text);"
+ ignore_errors: true
+
+- name: postgresql_query - insert some data into test_table
+ become_user: '{{ pg_user }}'
+ become: true
+ shell: psql postgres -U "{{ pg_user }}" -t -c "INSERT INTO test_table (id, story) VALUES (1, 'first'), (2, 'second'), (3, 'third');"
+ ignore_errors: true
+
+- name: postgresql_query - remove SQL script if exists
+ become: true
+ file:
+ path: ~{{ pg_user}}/test.sql
+ state: absent
+ ignore_errors: true
+
+- name: postgresql_query - create an empty file to check permission
+ become: true
+ file:
+ path: ~{{ pg_user}}/test.sql
+ state: touch
+ owner: '{{ pg_user }}'
+ group: '{{ pg_user }}'
+ mode: '0644'
+ register: sql_file_created
+ ignore_errors: true
+
+- name: postgresql_query - prepare SQL script
+ become_user: '{{ pg_user }}'
+ become: true
+ shell: echo "{{ item }}" >> ~{{ pg_user}}/test.sql
+ ignore_errors: true
+ with_items:
+ - SELECT version();
+ - SELECT story FROM test_table
+ - WHERE id = %s OR story = 'Данные';
+ when: sql_file_created
+- name: postgresql_query - analyze test_table
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ login_user: '{{ pg_user }}'
+ db: postgres
+ query: ANALYZE test_table
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is changed
+ - result.query == 'ANALYZE test_table'
+ - result.query_list == ['ANALYZE test_table']
+ - result.rowcount == 0
+ - result.statusmessage == 'ANALYZE'
+ - result.query_result == {}
+ - result.query_all_results == [{}]
+
+- name: postgresql_query - run queries from SQL script
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ login_user: '{{ pg_user }}'
+ db: postgres
+ path_to_script: ~{{ pg_user }}/test.sql
+ positional_args:
+ - 1
+ encoding: UTF-8
+ register: result
+ ignore_errors: true
+ when: sql_file_created
+
+- assert:
+ that:
+ - result is not changed
+ - result.query == "\nSELECT story FROM test_table\nWHERE id = 1 OR story = 'Данные'"
+ - result.query_result[0].story == 'first'
+ - result.query_all_results[0][0].version is search('PostgreSQL')
+ - result.query_all_results[1][0].story == 'first'
+ - result.rowcount == 2
+ - result.statusmessage == 'SELECT 1' or result.statusmessage == 'SELECT'
+ when: sql_file_created
+
+- name: postgresql_query - simple select query to test_table
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ login_user: '{{ pg_user }}'
+ db: postgres
+ query: SELECT * FROM test_table
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is not changed
+ - result.query == 'SELECT * FROM test_table'
+ - result.rowcount == 3
+ - result.statusmessage == 'SELECT 3' or result.statusmessage == 'SELECT'
+ - result.query_result[0].id == 1
+ - result.query_result[1].id == 2
+ - result.query_result[2].id == 3
+ - result.query_result[0].story == 'first'
+ - result.query_result[1].story == 'second'
+ - result.query_result[2].story == 'third'
+
+- name: postgresql_query - select query with named args
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ login_user: '{{ pg_user }}'
+ db: postgres
+ query: SELECT id FROM test_table WHERE id = %(id_val)s AND story = %(story_val)s
+ named_args:
+ id_val: 1
+ story_val: first
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is not changed
+ - result.query == "SELECT id FROM test_table WHERE id = 1 AND story = 'first'" or result.query == "SELECT id FROM test_table WHERE id = 1 AND story = E'first'"
+ - result.rowcount == 1
+ - result.statusmessage == 'SELECT 1' or result.statusmessage == 'SELECT'
+ - result.query_result[0].id == 1
+
+- name: postgresql_query - select query with positional arguments
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ login_user: '{{ pg_user }}'
+ db: postgres
+ query: SELECT story FROM test_table WHERE id = %s AND story = %s
+ positional_args:
+ - 2
+ - second
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is not changed
+ - result.query == "SELECT story FROM test_table WHERE id = 2 AND story = 'second'" or result.query == "SELECT story FROM test_table WHERE id = 2 AND story = E'second'"
+ - result.rowcount == 1
+ - result.statusmessage == 'SELECT 1' or result.statusmessage == 'SELECT'
+ - result.query_result[0].story == 'second'
+
+- name: postgresql_query - simple update query
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ login_user: '{{ pg_user }}'
+ db: postgres
+ query: UPDATE test_table SET story = 'new' WHERE id = 3
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is changed
+ - result.query == "UPDATE test_table SET story = 'new' WHERE id = 3"
+ - result.rowcount == 1
+ - result.statusmessage == 'UPDATE 1'
+ - result.query_result == {}
+
+- name: check the previous update
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ login_user: '{{ pg_user }}'
+ db: postgres
+ query: SELECT * FROM test_table WHERE story = 'new' AND id = 3
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+- name: postgresql_query - simple update query in check_mode
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ login_user: '{{ pg_user }}'
+ db: postgres
+ query: UPDATE test_table SET story = 'CHECK_MODE' WHERE id = 3
+ register: result
+ check_mode: true
+
+- assert:
+ that:
+ - result is changed
+ - result.query == "UPDATE test_table SET story = 'CHECK_MODE' WHERE id = 3"
+ - result.rowcount == 1
+ - result.statusmessage == 'UPDATE 1'
+ - result.query_result == {}
+
+- name: check the previous update that nothing has been changed
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ login_user: '{{ pg_user }}'
+ db: postgres
+ query: SELECT * FROM test_table WHERE story = 'CHECK_MODE' AND id = 3
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+- name: postgresql_query - try to update not existing row
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ login_user: '{{ pg_user }}'
+ db: postgres
+ query: UPDATE test_table SET story = 'new' WHERE id = 100
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is not changed
+ - result.query == "UPDATE test_table SET story = 'new' WHERE id = 100"
+ - result.rowcount == 0
+ - result.statusmessage == 'UPDATE 0'
+ - result.query_result == {}
+
+- name: postgresql_query - insert query
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ login_user: '{{ pg_user }}'
+ db: postgres
+ query: INSERT INTO test_table (id, story) VALUES (%s, %s)
+ positional_args:
+ - 4
+ - fourth
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is changed
+ - result.query == "INSERT INTO test_table (id, story) VALUES (4, 'fourth')" or result.query == "INSERT INTO test_table (id, story) VALUES (4, E'fourth')"
+ - result.rowcount == 1
+ - result.statusmessage == 'INSERT 0 1'
+ - result.query_result == {}
+
+- name: postgresql_query - truncate test_table
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ login_user: '{{ pg_user }}'
+ db: postgres
+ query: TRUNCATE test_table
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is changed
+ - result.query == "TRUNCATE test_table"
+ - result.rowcount == 0
+ - result.statusmessage == 'TRUNCATE TABLE'
+ - result.query_result == {}
+
+- name: postgresql_query - alter test_table
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ login_user: '{{ pg_user }}'
+ db: postgres
+ query: ALTER TABLE test_table ADD COLUMN foo int
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is changed
+ - result.query == "ALTER TABLE test_table ADD COLUMN foo int"
+ - result.rowcount == 0
+ - result.statusmessage == 'ALTER TABLE'
+
+- name: postgresql_query - vacuum without autocommit must fail
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ login_user: '{{ pg_user }}'
+ db: postgres
+ query: VACUUM
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result.failed == true
+
+- name: postgresql_query - autocommit in check_mode must fail
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ login_user: '{{ pg_user }}'
+ db: postgres
+ query: VACUUM
+ autocommit: true
+ check_mode: true
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result.failed == true
+ - result.msg == "Using autocommit is mutually exclusive with check_mode"
+
+- name: postgresql_query - vacuum with autocommit
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ login_user: '{{ pg_user }}'
+ db: postgres
+ query: VACUUM
+ autocommit: true
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.query == "VACUUM"
+ - result.rowcount == 0
+ - result.statusmessage == 'VACUUM'
+ - result.query_result == {}
+
+- name: postgresql_query - create test table for issue 59955
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_table:
+ login_user: '{{ pg_user }}'
+ login_db: postgres
+ name: test_array_table
+ columns:
+ - arr_col int[]
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+- set_fact:
+ my_list:
+ - 1
+ - 2
+ - 3
+ my_arr: '{1, 2, 3}'
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+- name: postgresql_query - insert array into test table by positional args
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ login_user: '{{ pg_user }}'
+ login_db: postgres
+ query: INSERT INTO test_array_table (arr_col) VALUES (%s)
+ positional_args:
+ - '{{ my_list }}'
+ register: result
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+- assert:
+ that:
+ - result is changed
+ - result.query == "INSERT INTO test_array_table (arr_col) VALUES ('{1, 2, 3}')"
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+- name: postgresql_query - select array from test table by passing positional_args
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ login_user: '{{ pg_user }}'
+ login_db: postgres
+ query: SELECT * FROM test_array_table WHERE arr_col = %s
+ positional_args:
+ - '{{ my_list }}'
+ register: result
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+- assert:
+ that:
+ - result is not changed
+ - result.query == "SELECT * FROM test_array_table WHERE arr_col = '{1, 2, 3}'"
+ - result.rowcount == 1
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+- name: postgresql_query - select array from test table by passing named_args
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ login_user: '{{ pg_user }}'
+ login_db: postgres
+ query: SELECT * FROM test_array_table WHERE arr_col = %(arr_val)s
+ named_args:
+ arr_val:
+ - '{{ my_list }}'
+ register: result
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+- assert:
+ that:
+ - result is not changed
+ - result.query == "SELECT * FROM test_array_table WHERE arr_col = '{1, 2, 3}'"
+ - result.rowcount == 1
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+- name: postgresql_query - select array from test table by passing positional_args as a string
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ login_user: '{{ pg_user }}'
+ login_db: postgres
+ query: SELECT * FROM test_array_table WHERE arr_col = %s
+ positional_args:
+ - '{{ my_arr|string }}'
+ trust_input: yes
+ register: result
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+- assert:
+ that:
+ - result is not changed
+ - result.query == "SELECT * FROM test_array_table WHERE arr_col = '{1, 2, 3}'"
+ - result.rowcount == 1
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+- name: postgresql_query - test trust_input parameter
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ login_user: '{{ pg_user }}'
+ login_db: postgres
+ session_role: 'curious.anonymous"; SELECT * FROM information_schema.tables; --'
+ query: SELECT version()
+ trust_input: no
+ ignore_errors: yes
+ register: result
+
+- assert:
+ that:
+ - result is failed
+ - result.msg is search('is potentially dangerous')
+
+- name: postgresql_query - clean up
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_table:
+ login_user: '{{ pg_user }}'
+ login_db: postgres
+ name: test_array_table
+ state: absent
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+#############################
+# Check search_path parameter
+
+- name: postgresql_set - create test schemas
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_schema:
+ login_user: '{{ pg_user }}'
+ login_db: postgres
+ name: '{{ item }}'
+ loop:
+ - query_test1
+ - query_test2
+
+- name: postgresql_set - create test tables
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_table:
+ login_user: '{{ pg_user }}'
+ login_db: postgres
+ name: '{{ item }}'
+ columns:
+ - id int
+ loop:
+ - 'query_test1.test1'
+ - 'query_test2.test2'
+
+- name: postgresql_query - insert data
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ login_user: '{{ pg_user }}'
+ login_db: postgres
+ query: 'INSERT INTO {{ item }} (id) VALUES (1)'
+ search_path:
+ - query_test1
+ - query_test2
+ loop:
+ - test1
+ - test2
+
+- name: postgresql_query - get data
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ login_user: '{{ pg_user }}'
+ login_db: postgres
+ query: 'SELECT id FROM test1'
+ search_path:
+ - query_test1
+ - query_test2
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+- name: postgresql_query - get data, must fail
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_query:
+ login_user: '{{ pg_user }}'
+ login_db: postgres
+ query: 'SELECT id FROM test1'
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result is failed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_schema/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_schema/aliases
new file mode 100644
index 00000000..b2033afd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_schema/aliases
@@ -0,0 +1,5 @@
+destructive
+shippable/posix/group4
+skip/aix
+skip/osx
+skip/macos
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_schema/defaults/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_schema/defaults/main.yml
new file mode 100644
index 00000000..ff6dd5cb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_schema/defaults/main.yml
@@ -0,0 +1,7 @@
+---
+db_name: 'ansible_db'
+db_user1: 'ansible_db_user1'
+db_user2: 'ansible_db_user2'
+dangerous_name: 'curious.anonymous"; SELECT * FROM information_schema.tables; --'
+db_session_role1: 'session_role1'
+db_session_role2: 'session_role2'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_schema/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_schema/meta/main.yml
new file mode 100644
index 00000000..4ce5a583
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_schema/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_postgresql_db
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_schema/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_schema/tasks/main.yml
new file mode 100644
index 00000000..d894dd04
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_schema/tasks/main.yml
@@ -0,0 +1,9 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- import_tasks: postgresql_schema_session_role.yml
+
+# Initial CI tests of postgresql_schema module
+- import_tasks: postgresql_schema_initial.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_schema/tasks/postgresql_schema_initial.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_schema/tasks/postgresql_schema_initial.yml
new file mode 100644
index 00000000..7d73ddb5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_schema/tasks/postgresql_schema_initial.yml
@@ -0,0 +1,331 @@
+---
+
+# Setup
+- name: Create test roles
+ postgresql_user:
+ name: "{{ item }}"
+ state: present
+ encrypted: yes
+ password: password
+ role_attr_flags: LOGIN
+ db: postgres
+ login_user: "{{ pg_user }}"
+ loop:
+ - "{{ db_user1 }}"
+ - "{{ db_user2 }}"
+
+- name: Create DB
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_db:
+ state: present
+ name: "{{ db_name }}"
+ owner: "{{ db_user1 }}"
+ login_user: "{{ pg_user }}"
+
+# Test: CREATE SCHEMA in checkmode
+- name: Create a new schema with name "acme" in check_mode
+ become_user: "{{ pg_user }}"
+ become: yes
+ check_mode: yes
+ postgresql_schema:
+ database: "{{ db_name }}"
+ name: acme
+ login_user: "{{ pg_user }}"
+ register: result
+
+# Checks
+- assert:
+ that:
+ - result is changed
+ - result.schema == 'acme'
+
+- name: Check that the new schema "acme" not exists
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT schema_name FROM information_schema.schemata WHERE schema_name = 'acme'"
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+# Test: CREATE SCHEMA
+- name: Create a new schema with name "acme"
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_schema:
+ database: "{{ db_name }}"
+ name: acme
+ login_user: "{{ pg_user }}"
+ trust_input: yes
+ register: result
+
+# Checks
+- assert:
+ that:
+ - result is changed
+ - result.schema == 'acme'
+ - result.queries == [ 'CREATE SCHEMA "acme"' ]
+
+- name: Check that the new schema "acme" exists
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT schema_name FROM information_schema.schemata WHERE schema_name = 'acme'"
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+# Test: DROP SCHEMA in checkmode
+- name: Drop schema "acme" in check_mode
+ become_user: "{{ pg_user }}"
+ become: yes
+ check_mode: yes
+ postgresql_schema:
+ database: "{{ db_name }}"
+ name: acme
+ state: absent
+ login_user: "{{ pg_user }}"
+ register: result
+
+# Checks
+- assert:
+ that:
+ - result is not changed
+
+- name: Check that the new schema "acme" still exists
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT schema_name FROM information_schema.schemata WHERE schema_name = 'acme'"
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+# Test: DROP SCHEMA
+- name: Drop schema "acme"
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_schema:
+ database: "{{ db_name }}"
+ name: acme
+ state: absent
+ login_user: "{{ pg_user }}"
+ register: result
+
+# Checks
+- assert:
+ that:
+ - result is changed
+ - result.queries == [ 'DROP SCHEMA "acme"' ]
+
+- name: Check that no schema "acme" exists
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT schema_name FROM information_schema.schemata WHERE schema_name = 'acme'"
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+# Test: trust_input parameter
+- name: Create a new schema with potentially dangerous owner name
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_schema:
+ database: "{{ db_name }}"
+ name: acme
+ login_user: "{{ pg_user }}"
+ owner: "{{ dangerous_name }}"
+ trust_input: no
+ register: result
+ ignore_errors: yes
+
+# Checks
+- assert:
+ that:
+ - result is failed
+ - result.msg == 'Passed input \'{{ dangerous_name }}\' is potentially dangerous'
+
+# Test: CREATE SCHEMA; WITH TABLE for DROP CASCADE test
+- name: Create a new schema "acme"
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_schema:
+ database: "{{ db_name }}"
+ name: acme
+ login_user: "{{ pg_user }}"
+ register: result
+
+- name: Create table in schema for DROP CASCADE check
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "CREATE TABLE acme.table1()"
+ register: result2
+
+# Checks
+- assert:
+ that:
+ - result is changed
+ - result.schema == 'acme'
+ - result.queries == [ 'CREATE SCHEMA "acme"' ]
+ - result2.changed == true
+ - result2.statusmessage == 'CREATE TABLE'
+
+- name: Check that the new schema "acme" exists
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT schema_name,schema_owner FROM information_schema.schemata WHERE schema_name = 'acme'"
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+- name: Check that the new table "table1" in schema 'acme' exists
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'acme' AND tablename = 'table1')"
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+# Test: DROP SCHEMA ... CASCADE;
+- name: Drop schema "acme" with cascade
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_schema:
+ database: "{{ db_name }}"
+ name: acme
+ state: absent
+ cascade_drop: yes
+ login_user: "{{ pg_user }}"
+ register: result
+
+# Checks
+- assert:
+ that:
+ - result is changed
+ - result.queries == [ 'DROP SCHEMA "acme" CASCADE' ]
+
+- name: Check that no schema "acme" exists
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT schema_name FROM information_schema.schemata WHERE schema_name = 'acme'"
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+# Test: CREATE SCHEMA WITH OWNER ...;
+- name: Create a new schema "acme" with a user "{{ db_user2 }}" who will own it
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_schema:
+ database: "{{ db_name }}"
+ name: acme
+ owner: "{{ db_user2 }}"
+ login_user: "{{ pg_user }}"
+ register: result
+
+# Checks
+- assert:
+ that:
+ - result is changed
+ - result.schema == 'acme'
+ - result.queries == [ 'CREATE SCHEMA "acme" AUTHORIZATION "{{ db_user2 }}"' ]
+
+- name: Check that the new schema "acme" exists and "{{ db_user2 }}" own it
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT schema_name,schema_owner FROM information_schema.schemata WHERE schema_name = 'acme' AND schema_owner = '{{ db_user2 }}'"
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+# Test: DROP SCHEMA
+- name: Drop schema "acme"
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_schema:
+ database: "{{ db_name }}"
+ name: acme
+ state: absent
+ login_user: "{{ pg_user }}"
+ register: result
+
+# Checks
+- assert:
+ that:
+ - result is changed
+ - result.queries == [ 'DROP SCHEMA "acme"' ]
+
+- name: Check that no schema "acme" exists
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT schema_name FROM information_schema.schemata WHERE schema_name = 'acme'"
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+
+# Cleanup
+- name: Remove user
+ postgresql_user:
+ name: "{{ db_user2 }}"
+ state: absent
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+
+- name: Destroy DB
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_db:
+ state: absent
+ name: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_schema/tasks/postgresql_schema_session_role.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_schema/tasks/postgresql_schema_session_role.yml
new file mode 100644
index 00000000..6aaefff5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_schema/tasks/postgresql_schema_session_role.yml
@@ -0,0 +1,78 @@
+- name: Create a high privileged user
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_user:
+ name: "{{ db_session_role1 }}"
+ state: "present"
+ password: "password"
+ role_attr_flags: "CREATEDB,LOGIN,CREATEROLE"
+ login_user: "{{ pg_user }}"
+ db: postgres
+
+- name: Create DB as session_role
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_db:
+ state: present
+ name: "{{ db_session_role1 }}"
+ login_user: "{{ pg_user }}"
+ session_role: "{{ db_session_role1 }}"
+ register: result
+
+- name: Create schema in own database
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_schema:
+ database: "{{ db_session_role1 }}"
+ login_user: "{{ pg_user }}"
+ name: "{{ db_session_role1 }}"
+ session_role: "{{ db_session_role1 }}"
+
+- name: Create schema in own database, should be owned by session_role
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_schema:
+ database: "{{ db_session_role1 }}"
+ login_user: "{{ pg_user }}"
+ name: "{{ db_session_role1 }}"
+ owner: "{{ db_session_role1 }}"
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+
+- name: Fail when creating schema in postgres database as a regular user
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_schema:
+ database: postgres
+ login_user: "{{ pg_user }}"
+ name: "{{ db_session_role1 }}"
+ session_role: "{{ db_session_role1 }}"
+ ignore_errors: yes
+ register: result
+
+- assert:
+ that:
+ - result is failed
+
+- name: Drop test db
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_db:
+ state: absent
+ name: "{{ db_session_role1 }}"
+ login_user: "{{ pg_user }}"
+
+- name: Drop test users
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_user:
+ name: "{{ item }}"
+ state: absent
+ login_user: "{{ pg_user }}"
+ db: postgres
+ with_items:
+ - "{{ db_session_role1 }}"
+ - "{{ db_session_role2 }}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_sequence/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_sequence/aliases
new file mode 100644
index 00000000..b2033afd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_sequence/aliases
@@ -0,0 +1,5 @@
+destructive
+shippable/posix/group4
+skip/aix
+skip/osx
+skip/macos
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_sequence/defaults/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_sequence/defaults/main.yml
new file mode 100644
index 00000000..049b5531
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_sequence/defaults/main.yml
@@ -0,0 +1,5 @@
+---
+db_name: 'ansible_db'
+db_user1: 'ansible_db_user1'
+db_user2: 'ansible_db_user2'
+db_default: 'postgres'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_sequence/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_sequence/meta/main.yml
new file mode 100644
index 00000000..4ce5a583
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_sequence/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_postgresql_db
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_sequence/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_sequence/tasks/main.yml
new file mode 100644
index 00000000..b5306900
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_sequence/tasks/main.yml
@@ -0,0 +1,8 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Initial CI tests of postgresql_sequence module
+- import_tasks: postgresql_sequence_initial.yml
+ when: postgres_version_resp.stdout is version('9.4', '>=')
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_sequence/tasks/postgresql_sequence_initial.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_sequence/tasks/postgresql_sequence_initial.yml
new file mode 100644
index 00000000..f3672f26
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_sequence/tasks/postgresql_sequence_initial.yml
@@ -0,0 +1,730 @@
+---
+# Copyright: (c) 2019, Tobias Birkefeld (@tcraxs) <t@craxs.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Preparation for tests.
+- name: postgresql_sequence - create a user to be owner of a database
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_user:
+ name: "{{ db_user1 }}"
+ state: present
+ encrypted: yes
+ password: password
+ role_attr_flags: LOGIN
+ db: "{{ db_default }}"
+ login_user: "{{ pg_user }}"
+
+- name: postgresql_sequence - create DB
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_db:
+ state: present
+ name: "{{ db_name }}"
+ owner: "{{ db_user1 }}"
+ login_user: "{{ pg_user }}"
+
+- name: Create a user to be owner of a sequence
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_user:
+ name: "{{ db_user2 }}"
+ state: present
+ encrypted: yes
+ password: password
+ role_attr_flags: LOGIN
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+
+- name: postgresql_sequence - create a schema
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_schema:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ name: foobar_schema
+
+####################
+# Test: create sequence in checkmode
+- name: postgresql_sequence - create a new sequence with name "foobar" in check_mode
+ become_user: "{{ pg_user }}"
+ become: yes
+ check_mode: yes
+ postgresql_sequence:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ name: foobar
+ register: result
+
+# Checks
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result is changed
+ - result.sequence == 'foobar'
+ - result.queries == ["CREATE SEQUENCE \"public\".\"foobar\""]
+
+# Real SQL check
+- name: postgresql_sequence - check that the new sequence "foobar" not exists
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT sequence_name FROM information_schema.sequences WHERE sequence_name = 'foobar'"
+ register: result
+
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result.rowcount == 0
+ - result.statusmessage == 'SELECT 0'
+
+####################
+# Test: create sequence
+- name: postgresql_sequence - create a new sequence with name "foobar"
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_sequence:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ name: foobar
+ register: result
+
+# Checks
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result is changed
+ - result.sequence == 'foobar'
+ - result.queries == ["CREATE SEQUENCE \"public\".\"foobar\""]
+
+# Real SQL check
+- name: postgresql_sequence - check that the new sequence "foobar" exists
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT sequence_name FROM information_schema.sequences WHERE sequence_name = 'foobar'"
+ register: result
+
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result.rowcount == 1
+
+####################
+# Test: drop sequence in checkmode
+- name: postgresql_sequence - drop a sequence called foobar
+ become_user: "{{ pg_user }}"
+ become: yes
+ check_mode: yes
+ postgresql_sequence:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ name: foobar
+ state: absent
+ register: result
+
+# Checks
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result is changed
+ - result.sequence == 'foobar'
+ - result.queries == ["DROP SEQUENCE \"public\".\"foobar\""]
+
+# Real SQL check
+- name: postgresql_sequence - check that the sequence "foobar" still exists
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT sequence_name FROM information_schema.sequences WHERE sequence_name = 'foobar'"
+ register: result
+
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result.rowcount == 1
+
+####################
+# Test: drop sequence
+- name: postgresql_sequence - drop a sequence called foobar
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_sequence:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ name: foobar
+ state: absent
+ register: result
+
+# Checks
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result is changed
+ - result.sequence == 'foobar'
+ - result.queries == ["DROP SEQUENCE \"public\".\"foobar\""]
+
+# Real SQL check
+- name: postgresql_sequence - check that the sequence "foobar" not exists
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT sequence_name FROM information_schema.sequences WHERE sequence_name = 'foobar'"
+ register: result
+
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result.rowcount == 0
+
+####################
+# Test: drop nonexistent sequence
+- name: postgresql_sequence - drop a sequence called foobar which does not exists
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_sequence:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ name: foobar
+ state: absent
+ register: result
+
+# Checks
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result is not changed
+ - result.sequence == 'foobar'
+ - result.queries == []
+
+# Real SQL check
+- name: postgresql_sequence - check that the sequence "foobar" not exists
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT sequence_name FROM information_schema.sequences WHERE sequence_name = 'foobar'"
+ register: result
+
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result.rowcount == 0
+
+####################
+# Test: create sequence with options
+- name: postgresql_sequence - create an descending sequence called foobar_desc, starting at 101 and which cycle between 1 to 1000
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_sequence:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ name: foobar_desc
+ increment: -1
+ start: 101
+ minvalue: 1
+ maxvalue: 1000
+ cycle: yes
+ register: result
+
+# Checks
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result is changed
+ - result.sequence == 'foobar_desc'
+ - result.increment == '-1'
+ - result.minvalue == '1'
+ - result.maxvalue == '1000'
+ - result.cycle == 'YES'
+ - result.queries == ["CREATE SEQUENCE \"public\".\"foobar_desc\" INCREMENT BY -1 MINVALUE 1 MAXVALUE 1000 START WITH 101 CYCLE"]
+
+# Real SQL check
+- name: postgresql_sequence - check that the new sequence "foobar_desc" exists
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT sequence_name FROM information_schema.sequences WHERE sequence_name = 'foobar_desc'"
+ register: result
+
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result.rowcount == 1
+
+####################
+# Test: rename a sequence in checkmode
+- name: postgresql_sequence - rename an existing sequence named foobar_desc to foobar_with_options
+ become_user: "{{ pg_user }}"
+ become: yes
+ check_mode: yes
+ postgresql_sequence:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ name: foobar_desc
+ rename_to: foobar_with_options
+ register: result
+
+# Checks
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result is changed
+ - result.sequence == 'foobar_desc'
+ - result.newname == 'foobar_with_options'
+ - result.queries == ["ALTER SEQUENCE \"public\".\"foobar_desc\" RENAME TO \"foobar_with_options\""]
+
+# Real SQL check
+- name: postgresql_sequence - check that the sequence "foobar_desc" still exists and is not renamed
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT sequence_name FROM information_schema.sequences WHERE sequence_name = 'foobar_desc'"
+ register: result
+
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result.rowcount == 1
+
+####################
+# Test: rename a sequence
+- name: postgresql_sequence - rename an existing sequence named foobar_desc to foobar_with_options
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_sequence:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ name: foobar_desc
+ rename_to: foobar_with_options
+ register: result
+
+# Checks
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result is changed
+ - result.sequence == 'foobar_desc'
+ - result.newname == 'foobar_with_options'
+ - result.queries == ["ALTER SEQUENCE \"public\".\"foobar_desc\" RENAME TO \"foobar_with_options\""]
+
+# Real SQL check
+- name: postgresql_sequence - check that the renamed sequence "foobar_with_options" exists
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT sequence_name FROM information_schema.sequences WHERE sequence_name = 'foobar_with_options'"
+ register: result
+
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result.rowcount == 1
+
+####################
+# Test: change schema of a sequence in checkmode
+- name: postgresql_sequence - change schema of an existing sequence from public to foobar_schema
+ become_user: "{{ pg_user }}"
+ become: yes
+ check_mode: yes
+ postgresql_sequence:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ name: foobar_with_options
+ newschema: foobar_schema
+ register: result
+
+# Checks
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result is changed
+ - result.sequence == 'foobar_with_options'
+ - result.schema == 'public'
+ - result.newschema == 'foobar_schema'
+ - result.queries == ["ALTER SEQUENCE \"public\".\"foobar_with_options\" SET SCHEMA \"foobar_schema\""]
+
+# Real SQL check
+- name: postgresql_sequence - check that the sequence "foobar_with_options" still exists in the old schema
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT sequence_name,sequence_schema FROM information_schema.sequences WHERE sequence_name = 'foobar_with_options' AND sequence_schema = 'public'"
+ register: result
+
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result.rowcount == 1
+
+####################
+# Test: change schema of a sequence
+- name: postgresql_sequence - change schema of an existing sequence from public to foobar_schema
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_sequence:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ name: foobar_with_options
+ newschema: foobar_schema
+ register: result
+
+# Checks
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result is changed
+ - result.sequence == 'foobar_with_options'
+ - result.schema == 'public'
+ - result.newschema == 'foobar_schema'
+ - result.queries == ["ALTER SEQUENCE \"public\".\"foobar_with_options\" SET SCHEMA \"foobar_schema\""]
+
+# Real SQL check
+- name: postgresql_sequence - check that the sequence "foobar_with_options" exists in new schema
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT sequence_name,sequence_schema FROM information_schema.sequences WHERE sequence_name = 'foobar_with_options' AND sequence_schema = 'foobar_schema'"
+ register: result
+
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result.rowcount == 1
+
+####################
+# Test: change owner of a sequence in checkmode
+- name: postgresql_sequence - change owner of an existing sequence from "{{ pg_user }}" to "{{ db_user1 }}"
+ become_user: "{{ pg_user }}"
+ become: yes
+ check_mode: yes
+ postgresql_sequence:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ name: foobar_with_options
+ schema: foobar_schema
+ owner: "{{ db_user1 }}"
+ register: result
+
+# Checks
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result is changed
+ - result.sequence == 'foobar_with_options'
+ - result.owner == "{{ pg_user }}"
+ - result.queries == ["ALTER SEQUENCE \"foobar_schema\".\"foobar_with_options\" OWNER TO \"{{ db_user1 }}\""]
+
+# Real SQL check
+- name: postgresql_sequence - check that the sequence "foobar_with_options" has still the old owner
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT c.relname,a.rolname,n.nspname
+ FROM pg_class as c
+ JOIN pg_authid as a on (c.relowner = a.oid)
+ JOIN pg_namespace as n on (c.relnamespace = n.oid)
+ WHERE c.relkind = 'S' and
+ c.relname = 'foobar_with_options' and
+ n.nspname = 'foobar_schema' and
+ a.rolname = '{{ pg_user }}'"
+ register: result
+
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result.rowcount == 1
+
+####################
+# Test: change owner of a sequence
+- name: postgresql_sequence - change owner of an existing sequence from "{{ pg_user }}" to "{{ db_user1 }}"
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_sequence:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ name: foobar_with_options
+ schema: foobar_schema
+ owner: "{{ db_user1 }}"
+ register: result
+
+# Checks
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result is changed
+ - result.sequence == 'foobar_with_options'
+ - result.owner == "{{ pg_user }}"
+ - result.queries == ["ALTER SEQUENCE \"foobar_schema\".\"foobar_with_options\" OWNER TO \"{{ db_user1 }}\""]
+
+# Real SQL check
+- name: postgresql_sequence - check that the sequence "foobar_with_options" has a new owner
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT c.relname,a.rolname,n.nspname
+ FROM pg_class as c
+ JOIN pg_authid as a on (c.relowner = a.oid)
+ JOIN pg_namespace as n on (c.relnamespace = n.oid)
+ WHERE c.relkind = 'S' and
+ c.relname = 'foobar_with_options' and
+ n.nspname = 'foobar_schema' and
+ a.rolname = '{{ db_user1 }}'"
+ register: result
+
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result.rowcount == 1
+
+####################
+# Test: drop sequence with cascade
+
+# CREATE SEQUENCE seq1;
+# CREATE TABLE t1 (f1 INT NOT NULL DEFAULT nextval('seq1'));
+# DROP SEQUENCE seq1 CASCADE;
+- name: postgresql_sequence - create sequence for drop cascade test
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_sequence:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ name: seq1
+
+- name: postgresql_sequence - create table which use sequence for drop cascade test
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_table:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ name: t1
+ columns:
+ - f1 INT NOT NULL DEFAULT nextval('seq1')
+
+####################
+# Test: drop sequence with cascade in checkmode
+- name: postgresql_sequence - drop with cascade a sequence called seq1
+ become_user: "{{ pg_user }}"
+ become: yes
+ check_mode: yes
+ postgresql_sequence:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ name: seq1
+ state: absent
+ cascade: yes
+ register: result
+
+# Checks
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result is changed
+ - result.sequence == 'seq1'
+ - result.queries == ["DROP SEQUENCE \"public\".\"seq1\" CASCADE"]
+
+# Real SQL check
+- name: postgresql_sequence - check that the sequence "seq1" still exists
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT sequence_name FROM information_schema.sequences WHERE sequence_name = 'seq1'"
+ register: result
+
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result.rowcount == 1
+
+####################
+# Test: drop sequence with cascade
+- name: postgresql_sequence - drop with cascade a sequence called seq1
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_sequence:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ name: seq1
+ state: absent
+ cascade: yes
+ register: result
+
+# Checks
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result is changed
+ - result.sequence == 'seq1'
+ - result.queries == ["DROP SEQUENCE \"public\".\"seq1\" CASCADE"]
+
+# Real SQL check
+- name: postgresql_sequence - check that the sequence "seq1" not exists
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT sequence_name FROM information_schema.sequences WHERE sequence_name = 'seq1'"
+ register: result
+
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result.rowcount == 0
+
+####################
+# Test: create sequence with owner in checkmode
+- name: postgresql_sequence - create a new sequence with name "foobar2" with owner "{{ db_user2 }}"
+ become_user: "{{ pg_user }}"
+ become: yes
+ check_mode: yes
+ postgresql_sequence:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ name: foobar2
+ owner: "{{ db_user2 }}"
+ register: result
+
+# Checks
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result is changed
+ - result.sequence == 'foobar2'
+ - result.queries == ["CREATE SEQUENCE \"public\".\"foobar2\"", "ALTER SEQUENCE \"public\".\"foobar2\" OWNER TO \"ansible_db_user2\""]
+
+# Real SQL check
+- name: postgresql_sequence - check that the new sequence "foobar2" does not exists
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT sequence_name FROM information_schema.sequences WHERE sequence_name = 'foobar2'"
+ register: result
+
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result.rowcount == 0
+
+####################
+# Test: create sequence with owner
+- name: postgresql_sequence - create a new sequence with name "foobar2" with owner "{{ db_user2 }}"
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_sequence:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ name: foobar2
+ owner: "{{ db_user2 }}"
+ register: result
+
+# Checks
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result is changed
+ - result.sequence == 'foobar2'
+ - result.queries == ["CREATE SEQUENCE \"public\".\"foobar2\"", "ALTER SEQUENCE \"public\".\"foobar2\" OWNER TO \"ansible_db_user2\""]
+
+# Real SQL check
+- name: postgresql_sequence - check that the new sequence "foobar2" exists
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT sequence_name FROM information_schema.sequences WHERE sequence_name = 'foobar2'"
+ register: result
+
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result.rowcount == 1
+
+- name: postgresql_sequence - check that the sequence "foobar2" has owner "{{ db_user2 }}"
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ query: "SELECT c.relname,a.rolname,n.nspname
+ FROM pg_class as c
+ JOIN pg_authid as a on (c.relowner = a.oid)
+ JOIN pg_namespace as n on (c.relnamespace = n.oid)
+ WHERE c.relkind = 'S' and
+ c.relname = 'foobar2' and
+ n.nspname = 'public' and
+ a.rolname = '{{ db_user2 }}'"
+ register: result
+
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result.rowcount == 1
+
+####################
+# Test: create sequence with trust_input
+- name: postgresql_sequence - check that trust_input works as expected
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_sequence:
+ db: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+ name: 'just_a_name"; SELECT * FROM information_schema.tables; --'
+ trust_input: no
+ owner: "{{ db_user2 }}"
+ ignore_errors: yes
+ register: result
+
+# Checks
+- name: postgresql_sequence - check with assert the output
+ assert:
+ that:
+ - result is failed
+ - result.msg is search('is potentially dangerous')
+
+# Cleanup
+- name: postgresql_sequence - destroy DB
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_db:
+ state: absent
+ name: "{{ db_name }}"
+ login_user: "{{ pg_user }}"
+
+- name: remove test roles
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_user:
+ state: absent
+ login_db: "{{ db_default }}"
+ login_user: "{{ pg_user }}"
+ name: "{{ item }}"
+ loop:
+ - "{{ db_user1 }}"
+ - "{{ db_user2 }}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_set/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_set/aliases
new file mode 100644
index 00000000..b2033afd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_set/aliases
@@ -0,0 +1,5 @@
+destructive
+shippable/posix/group4
+skip/aix
+skip/osx
+skip/macos
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_set/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_set/meta/main.yml
new file mode 100644
index 00000000..4ce5a583
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_set/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_postgresql_db
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_set/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_set/tasks/main.yml
new file mode 100644
index 00000000..9750fff7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_set/tasks/main.yml
@@ -0,0 +1,8 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Initial CI tests of postgresql_initial module
+- include_tasks: postgresql_set_initial.yml
+ when: postgres_version_resp.stdout is version('9.6', '>=')
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_set/tasks/postgresql_set_initial.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_set/tasks/postgresql_set_initial.yml
new file mode 100644
index 00000000..af0502af
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_set/tasks/postgresql_set_initial.yml
@@ -0,0 +1,375 @@
+# Test code for the postgresql_set module
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+# Notice: assertions are different for Ubuntu 16.04 and FreeBSD because they don't work
+# correctly for these tests. There are some stranges exactly in Shippable CI.
+# However I checked it manually for all points (including Ubuntu 16.05 and FreeBSD)
+# and it worked as expected.
+
+- vars:
+ task_parameters: &task_parameters
+ become_user: '{{ pg_user }}'
+ become: yes
+ pg_parameters: &pg_parameters
+ login_user: '{{ pg_user }}'
+ login_db: postgres
+
+ block:
+ - name: postgresql_set - preparation to the next step
+ <<: *task_parameters
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_set:
+ <<: *pg_parameters
+ name: work_mem
+ reset: yes
+
+ #####################
+ # Testing check_mode:
+ - name: postgresql_set - get work_mem initial value
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: SHOW work_mem
+ register: before
+
+ - name: postgresql_set - set work_mem (restart is not required), check_mode
+ <<: *task_parameters
+ postgresql_set:
+ <<: *pg_parameters
+ name: work_mem
+ value: 12MB
+ register: set_wm
+ check_mode: yes
+
+ - assert:
+ that:
+ - set_wm.name == 'work_mem'
+ - set_wm.changed == true
+ - set_wm.prev_val_pretty == before.query_result[0].work_mem
+ - set_wm.value_pretty == '12MB'
+ - set_wm.restart_required == false
+
+ - name: postgresql_set - get work_mem value to check, must be the same as initial
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: SHOW work_mem
+ register: after
+
+ - assert:
+ that:
+ - before.query_result[0].work_mem == after.query_result[0].work_mem
+ ######
+ #
+
+ - name: postgresql_set - set work_mem (restart is not required)
+ <<: *task_parameters
+ postgresql_set:
+ <<: *pg_parameters
+ name: work_mem
+ value: 12mb
+ register: set_wm
+
+ - assert:
+ that:
+ - set_wm.name == 'work_mem'
+ - set_wm.changed == true
+ - set_wm.value_pretty == '12MB'
+ - set_wm.value_pretty != set_wm.prev_val_pretty
+ - set_wm.restart_required == false
+ - set_wm.value.value == 12582912
+ - set_wm.value.unit == 'b'
+ when:
+ - ansible_distribution != "Ubuntu"
+ - ansible_distribution_major_version != '16'
+ - ansible_distribution != "FreeBSD"
+
+ - assert:
+ that:
+ - set_wm.name == 'work_mem'
+ - set_wm.changed == true
+ - set_wm.restart_required == false
+ when:
+ - ansible_distribution == "Ubuntu"
+ - ansible_distribution_major_version == '16'
+
+ - name: postgresql_set - reset work_mem (restart is not required)
+ <<: *task_parameters
+ postgresql_set:
+ <<: *pg_parameters
+ name: work_mem
+ reset: yes
+ register: reset_wm
+
+ - assert:
+ that:
+ - reset_wm.name == 'work_mem'
+ - reset_wm.changed == true
+ - reset_wm.value_pretty != reset_wm.prev_val_pretty
+ - reset_wm.restart_required == false
+ - reset_wm.value.value != '12582912'
+ when:
+ - ansible_distribution != "Ubuntu"
+ - ansible_distribution_major_version != '16'
+ - ansible_distribution != "FreeBSD"
+
+ - assert:
+ that:
+ - reset_wm.name == 'work_mem'
+ - reset_wm.changed == true
+ - reset_wm.restart_required == false
+ when:
+ - ansible_distribution == "Ubuntu"
+ - ansible_distribution_major_version == '16'
+
+ - name: postgresql_set - reset work_mem again to check that nothing changed (restart is not required)
+ <<: *task_parameters
+ postgresql_set:
+ <<: *pg_parameters
+ name: work_mem
+ reset: yes
+ register: reset_wm2
+
+ - assert:
+ that:
+ - reset_wm2.name == 'work_mem'
+ - reset_wm2.changed == false
+ - reset_wm2.value_pretty == reset_wm2.prev_val_pretty
+ - reset_wm2.restart_required == false
+ when:
+ - ansible_distribution != "Ubuntu"
+ - ansible_distribution_major_version != '16'
+
+ - assert:
+ that:
+ - reset_wm2.name == 'work_mem'
+ - reset_wm2.changed == false
+ - reset_wm2.restart_required == false
+ when:
+ - ansible_distribution == "Ubuntu"
+ - ansible_distribution_major_version == '16'
+
+ - name: postgresql_set - preparation to the next step
+ <<: *task_parameters
+ postgresql_set:
+ <<: *pg_parameters
+ name: work_mem
+ value: 14MB
+
+ - name: postgresql_set - set work_mem to initial state (restart is not required)
+ <<: *task_parameters
+ postgresql_set:
+ <<: *pg_parameters
+ name: work_mem
+ value: default
+ register: def_wm
+
+ - assert:
+ that:
+ - def_wm.name == 'work_mem'
+ - def_wm.changed == true
+ - def_wm.value_pretty != def_wm.prev_val_pretty
+ - def_wm.restart_required == false
+ - def_wm.value.value != '14680064'
+ when:
+ - ansible_distribution != "Ubuntu"
+ - ansible_distribution_major_version != '16'
+ - ansible_distribution != 'FreeBSD'
+
+ - assert:
+ that:
+ - def_wm.name == 'work_mem'
+ - def_wm.changed == true
+ - def_wm.restart_required == false
+ when:
+ - ansible_distribution == "Ubuntu"
+ - ansible_distribution_major_version == '16'
+ - ansible_distribution != 'FreeBSD'
+
+ - name: postgresql_set - set shared_buffers (restart is required)
+ <<: *task_parameters
+ postgresql_set:
+ <<: *pg_parameters
+ name: shared_buffers
+ value: 111MB
+ register: set_shb
+
+ - assert:
+ that:
+ - set_shb.name == 'shared_buffers'
+ - set_shb.changed == true
+ - set_shb.restart_required == true
+
+ # We don't check value.unit because it is none
+ - name: postgresql_set - set autovacuum (enabled by default, restart is not required)
+ <<: *task_parameters
+ postgresql_set:
+ <<: *pg_parameters
+ name: autovacuum
+ value: off
+ register: set_aut
+
+ - assert:
+ that:
+ - set_aut.name == 'autovacuum'
+ - set_aut.changed == true
+ - set_aut.restart_required == false
+ - set_aut.value.value == 'off'
+
+ # Test check_mode, step 1. At the previous test we set autovacuum = 'off'
+ - name: postgresql - try to change autovacuum again in check_mode
+ <<: *task_parameters
+ postgresql_set:
+ <<: *pg_parameters
+ name: autovacuum
+ value: on
+ register: set_aut
+ check_mode: yes
+
+ - assert:
+ that:
+ - set_aut.name == 'autovacuum'
+ - set_aut.changed == true
+ - set_aut.restart_required == false
+ - set_aut.value.value == 'off'
+
+ # Test check_mode, step 2
+ - name: postgresql - check that autovacuum wasn't actually changed after change in check_mode
+ <<: *task_parameters
+ postgresql_set:
+ <<: *pg_parameters
+ name: autovacuum
+ value: off
+ register: set_aut
+ check_mode: yes
+
+ - assert:
+ that:
+ - set_aut.name == 'autovacuum'
+ - set_aut.changed == false
+ - set_aut.restart_required == false
+ - set_aut.value.value == 'off'
+
+ # Additional check by SQL query:
+ - name: postgresql_set - get autovacuum value to check, must be off
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: SHOW autovacuum
+ register: result
+
+ - assert:
+ that:
+ - result.query_result[0].autovacuum == 'off'
+
+ # Test check_mode, step 3. It is different from
+ # the prev test - it runs without check_mode: yes.
+ # Before the check_mode tests autovacuum was off
+ - name: postgresql - check that autovacuum wasn't actually changed after change in check_mode
+ <<: *task_parameters
+ postgresql_set:
+ <<: *pg_parameters
+ name: autovacuum
+ value: off
+ register: set_aut
+
+ - assert:
+ that:
+ - set_aut.name == 'autovacuum'
+ - set_aut.changed == false
+ - set_aut.restart_required == false
+ - set_aut.value.value == 'off'
+
+ #################
+ # Bugfix of 67377
+ - name: archive command with mb
+ <<: *task_parameters
+ postgresql_set:
+ <<: *pg_parameters
+ trust_input: yes
+ name: archive_command
+ value: 'test ! -f /mnt/postgres/mb/%f && cp %p /mnt/postgres/mb/%f'
+
+ # Check:
+ - name: check value
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: select reset_val from pg_settings where name = 'archive_command'
+ register: result
+
+ - assert:
+ that:
+ - result.query_result.0.reset_val == "test ! -f /mnt/postgres/mb/%f && cp %p /mnt/postgres/mb/%f"
+
+ #############################
+ # Check trust_input parameter
+ - name: postgresql_set - check trust_input
+ <<: *task_parameters
+ postgresql_set:
+ <<: *pg_parameters
+ name: shared_buffers
+ value: 111MB
+ trust_input: no
+ session_role: 'curious.anonymous"; SELECT * FROM information_schema.tables; --'
+ register: result
+ ignore_errors: yes
+
+ - assert:
+ that:
+ - result is failed
+ - result.msg is search('is potentially dangerous')
+
+ ###############################################################################
+ # Bugfix of https://github.com/ansible-collections/community.general/issues/775
+ - name: postgresql_set - turn on archive mode
+ <<: *task_parameters
+ postgresql_set:
+ <<: *pg_parameters
+ name: archive_mode
+ value: 'on'
+
+ - name: Restart PostgreSQL
+ become: yes
+ service:
+ name: "{{ postgresql_service }}"
+ state: restarted
+
+ - name: postgresql_set - set empty string as value
+ <<: *task_parameters
+ postgresql_set:
+ <<: *pg_parameters
+ name: archive_command
+ value: ''
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: postgresql_set - set empty string as value again
+ <<: *task_parameters
+ postgresql_set:
+ <<: *pg_parameters
+ name: archive_command
+ value: ''
+ register: result
+
+ - assert:
+ that:
+ - result is not changed
+
+ - name: postgresql_set - set empty string as value again in check mode
+ <<: *task_parameters
+ postgresql_set:
+ <<: *pg_parameters
+ name: archive_command
+ value: ''
+ register: result
+ check_mode: yes
+
+ - assert:
+ that:
+ - result is not changed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_slot/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_slot/aliases
new file mode 100644
index 00000000..b2033afd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_slot/aliases
@@ -0,0 +1,5 @@
+destructive
+shippable/posix/group4
+skip/aix
+skip/osx
+skip/macos
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_slot/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_slot/meta/main.yml
new file mode 100644
index 00000000..4ce5a583
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_slot/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_postgresql_db
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_slot/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_slot/tasks/main.yml
new file mode 100644
index 00000000..d44aab9d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_slot/tasks/main.yml
@@ -0,0 +1,9 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Initial CI tests of postgresql_slot module
+# Physical replication slots are available since PostgreSQL 9.4
+- import_tasks: postgresql_slot_initial.yml
+ when: postgres_version_resp.stdout is version('9.4', '>=')
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_slot/tasks/postgresql_slot_initial.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_slot/tasks/postgresql_slot_initial.yml
new file mode 100644
index 00000000..4f009ac0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_slot/tasks/postgresql_slot_initial.yml
@@ -0,0 +1,735 @@
+---
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- name: postgresql_slot - set max_replication_slots
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_set:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: max_replication_slots
+ value: '10'
+
+- name: postgresql_slot - set wal_level to logical
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_set:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: wal_level
+ value: logical
+
+# To avoid CI timeouts
+- name: Kill all postgres processes
+ shell: 'pkill -u {{ pg_user }}'
+ become: yes
+ when: ansible_facts.distribution == 'CentOS' and ansible_facts.distribution_major_version == '8'
+ ignore_errors: yes
+
+- name: postgresql_slot - stop PostgreSQL
+ become: yes
+ service:
+ name: "{{ postgresql_service }}"
+ state: stopped
+ when: (ansible_facts.distribution_major_version != '8' and ansible_facts.distribution == 'CentOS') or ansible_facts.distribution != 'CentOS'
+
+- name: postgresql_slot - pause between stop and start PostgreSQL
+ ansible.builtin.pause:
+ seconds: 5
+
+- name: postgresql_slot - start PostgreSQL
+ become: yes
+ service:
+ name: "{{ postgresql_service }}"
+ state: started
+
+#
+# state=present
+#
+
+# check_mode
+- name: postgresql_slot - create slot in check_mode
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_slot:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: slot0
+ check_mode: yes
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == []
+
+# Check, rowcount must be 0
+- name: postgresql_slot - check that nothing changed after the previous step
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot0'"
+ ignore_errors: yes
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+# true mode
+- name: postgresql_slot - create physical slot
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_slot:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: slot0
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ["SELECT pg_create_physical_replication_slot('slot0', false)"]
+ when: postgres_version_resp.stdout is version('9.6', '>=')
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ["SELECT pg_create_physical_replication_slot('slot0')"]
+ when: postgres_version_resp.stdout is version('9.6', '<')
+
+# Check, rowcount must be 1
+- name: postgresql_slot - check that the slot exists after the previous step
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot0' and slot_type = 'physical'"
+ ignore_errors: yes
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+# check mode
+- name: postgresql_slot - try create physical slot again in check_mode
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_slot:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: slot0
+ check_mode: yes
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.queries == []
+
+# Check, rowcount must be 1
+- name: postgresql_slot - check that nothing changed after the previous step
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot0' and slot_type = 'physical'"
+ ignore_errors: yes
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+# true mode
+- name: postgresql_slot - try create physical slot again
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_slot:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: slot0
+ slot_type: physical
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.queries == []
+
+# Check, rowcount must be 1
+- name: postgresql_slot - check that nothing changed after the previous step
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot0' and slot_type = 'physical'"
+ ignore_errors: yes
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+#
+# immediately_reserve
+#
+
+- name: postgresql_slot - create physical slot with immediately_reserve
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_slot:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: slot1
+ immediately_reserve: yes
+ register: result
+ when: postgres_version_resp.stdout is version('9.6', '>=')
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ["SELECT pg_create_physical_replication_slot('slot1', true)"]
+ when: postgres_version_resp.stdout is version('9.6', '>=')
+
+# Check, rowcount must be 1
+- name: postgresql_slot - check that the slot exists after the previous step
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot1' and slot_type = 'physical' and restart_lsn is not NULL"
+ ignore_errors: yes
+ register: result
+ when: postgres_version_resp.stdout is version('9.6', '>=')
+
+- assert:
+ that:
+ - result.rowcount == 1
+ when: postgres_version_resp.stdout is version('9.6', '>=')
+
+#
+# slot_type: logical
+#
+# available from postgresql 10
+#
+# on RedHat family tests failed:
+# ERROR: could not access file "test_decoding": No such file or directory
+# "Your distrib did not compile the test decoder."
+# So the tests are restricted by Ubuntu because of the module functionality
+# depends on PostgreSQL server version only.
+
+# check_mode
+- name: postgresql_slot - create slot in check_mode
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_slot:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: slot2
+ slot_type: logical
+ check_mode: yes
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == []
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+# Check, rowcount must be 0
+- name: postgresql_slot - check that nothing changed after the previous step
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot2'"
+ ignore_errors: yes
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+- assert:
+ that:
+ - result.rowcount == 0
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+# true mode
+- name: postgresql_slot - create logical slot
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_slot:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: slot2
+ slot_type: logical
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ["SELECT pg_create_logical_replication_slot('slot2', 'test_decoding')"]
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+# Check, rowcount must be 1
+- name: postgresql_slot - check that the slot exists after the previous step
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot2' and slot_type = 'logical'"
+ ignore_errors: yes
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+- assert:
+ that:
+ - result.rowcount == 1
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+# check mode
+- name: postgresql_slot - try create logical slot again in check_mode
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_slot:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: slot2
+ slot_type: logical
+ check_mode: yes
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+- assert:
+ that:
+ - result is not changed
+ - result.queries == []
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+# Check, rowcount must be 1
+- name: postgresql_slot - check that nothing changed after the previous step
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot2' and slot_type = 'logical'"
+ ignore_errors: yes
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- assert:
+ that:
+ - result.rowcount == 1
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+# true mode
+- name: postgresql_slot - try create logical slot again
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_slot:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: slot2
+ slot_type: logical
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+- assert:
+ that:
+ - result is not changed
+ - result.queries == []
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+# Check, rowcount must be 1
+- name: postgresql_slot - check that nothing changed after the previous step
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot2' and slot_type = 'logical'"
+ ignore_errors: yes
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+- assert:
+ that:
+ - result.rowcount == 1
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+#
+# output_plugin: test_decoding
+#
+
+- name: postgresql_slot - create logical slot with output_plugin
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_slot:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: slot3
+ slot_type: logical
+ output_plugin: test_decoding
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ["SELECT pg_create_logical_replication_slot('slot3', 'test_decoding')"]
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+# Check, rowcount must be 1
+- name: postgresql_slot - check that the slot exists after the previous step
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot3' and slot_type = 'logical' and plugin = 'test_decoding'"
+ ignore_errors: yes
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+- assert:
+ that:
+ - result.rowcount == 1
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+#
+# state: absent for logical slots
+#
+
+# check_mode
+- name: postgresql_slot - drop logical slot in check_mode
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_slot:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: slot2
+ state: absent
+ check_mode: yes
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == []
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+# Check, rowcount must be 1
+- name: postgresql_slot - check that nothing changed after the previous step
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot2'"
+ ignore_errors: yes
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+- assert:
+ that:
+ - result.rowcount == 1
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+# true mode
+- name: postgresql_slot - drop logical slot
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_slot:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: slot2
+ state: absent
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ["SELECT pg_drop_replication_slot('slot2')"]
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+# Check, rowcount must be 0
+- name: postgresql_slot - check that the slot does not exist after the previous step
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot2' and slot_type = 'logical'"
+ ignore_errors: yes
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+- assert:
+ that:
+ - result.rowcount == 0
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+# check mode
+- name: postgresql_slot - try drop logical slot again in check_mode
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_slot:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: slot2
+ state: absent
+ check_mode: yes
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+- assert:
+ that:
+ - result is not changed
+ - result.queries == []
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+# Check, rowcount must be 0
+- name: postgresql_slot - check that nothing changed after the previous step
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot2' and slot_type = 'logical'"
+ ignore_errors: yes
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=')
+
+- assert:
+ that:
+ - result.rowcount == 0
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+# true mode
+- name: postgresql_slot - try drop logical slot again
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_slot:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: slot2
+ state: absent
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+- assert:
+ that:
+ - result is not changed
+ - result.queries == []
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+# Check, rowcount must be 0
+- name: postgresql_slot - check that nothing changed after the previous step
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot2' and slot_type = 'logical'"
+ ignore_errors: yes
+ register: result
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+- assert:
+ that:
+ - result.rowcount == 0
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+
+#
+# state=absent for physical slots
+#
+
+# check_mode
+- name: postgresql_slot - drop physical slot in check_mode
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_slot:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: slot1
+ state: absent
+ check_mode: yes
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == []
+ when: postgres_version_resp.stdout is version('9.6', '>=')
+
+# Check, rowcount must be 1
+- name: postgresql_slot - check that nothing changed after the previous step
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot1'"
+ ignore_errors: yes
+ register: result
+ when: postgres_version_resp.stdout is version('9.6', '>=')
+
+- assert:
+ that:
+ - result.rowcount == 1
+ when: postgres_version_resp.stdout is version('9.6', '>=')
+
+# true mode
+- name: postgresql_slot - drop physical slot
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_slot:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: slot1
+ state: absent
+ register: result
+ when: postgres_version_resp.stdout is version('9.6', '>=')
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ["SELECT pg_drop_replication_slot('slot1')"]
+ when: postgres_version_resp.stdout is version('9.6', '>=')
+
+# Check, rowcount must be 0
+- name: postgresql_slot - check that the slot does not exist after the previous step
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot1' and slot_type = 'physical'"
+ ignore_errors: yes
+ register: result
+ when: postgres_version_resp.stdout is version('9.6', '>=')
+
+- assert:
+ that:
+ - result.rowcount == 0
+ when: postgres_version_resp.stdout is version('9.6', '>=')
+
+# check mode
+- name: postgresql_slot - try drop physical slot again in check_mode
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_slot:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: slot1
+ state: absent
+ check_mode: yes
+ register: result
+ when: postgres_version_resp.stdout is version('9.6', '>=')
+
+- assert:
+ that:
+ - result is not changed
+ - result.queries == []
+ when: postgres_version_resp.stdout is version('9.6', '>=')
+
+# Check, rowcount must be 0
+- name: postgresql_slot - check that nothing changed after the previous step
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot1' and slot_type = 'physical'"
+ ignore_errors: yes
+ register: result
+ when: postgres_version_resp.stdout is version('9.6', '>=')
+
+- assert:
+ that:
+ - result.rowcount == 0
+ when: postgres_version_resp.stdout is version('9.6', '>=')
+
+# true mode
+- name: postgresql_slot - try drop physical slot again
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_slot:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: slot1
+ state: absent
+ register: result
+ when: postgres_version_resp.stdout is version('9.6', '>=')
+
+- assert:
+ that:
+ - result is not changed
+ - result.queries == []
+ when: postgres_version_resp.stdout is version('9.6', '>=')
+
+# Check, rowcount must be 0
+- name: postgresql_slot - check that nothing changed after the previous step
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_replication_slots WHERE slot_name = 'slot1' and slot_type = 'physical'"
+ ignore_errors: yes
+ register: result
+ when: postgres_version_resp.stdout is version('9.6', '>=')
+
+- assert:
+ that:
+ - result.rowcount == 0
+ when: postgres_version_resp.stdout is version('9.6', '>=')
+
+# Check trust input
+- name: postgresql_slot - try using a bad name
+ postgresql_slot:
+ session_role: 'curious.anonymous"; SELECT * FROM information_schema.tables; --'
+ db: postgres
+ name: slot1
+ trust_input: no
+ register: result
+ ignore_errors: true
+ when: postgres_version_resp.stdout is version('9.6', '>=')
+
+- name: postgresql_slot - check that using a dangerous name fails
+ assert:
+ that:
+ - result is failed
+ - result.msg is search('is potentially dangerous')
+ when: postgres_version_resp.stdout is version('9.6', '>=')
+
+#
+# clean up
+#
+- name: postgresql_slot - clean up
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_slot:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: "{{ item }}"
+ state: absent
+ ignore_errors: yes
+ when: postgres_version_resp.stdout is version('10', '>=') and ansible_distribution == 'Ubuntu'
+ with_items:
+ - slot0
+ - slot3
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_subscription/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_subscription/aliases
new file mode 100644
index 00000000..950c1b9f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_subscription/aliases
@@ -0,0 +1,7 @@
+destructive
+shippable/posix/group1
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
+skip/rhel
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_subscription/defaults/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_subscription/defaults/main.yml
new file mode 100644
index 00000000..e1433f9f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_subscription/defaults/main.yml
@@ -0,0 +1,15 @@
+pg_user: postgres
+db_default: postgres
+master_port: 5433
+replica_port: 5434
+
+test_table1: acme1
+test_pub: first_publication
+test_pub2: second_publication
+replication_role: logical_replication
+replication_pass: alsdjfKJKDf1#
+test_db: acme_db
+test_subscription: test
+test_role1: alice
+test_role2: bob
+conn_timeout: 100
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_subscription/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_subscription/meta/main.yml
new file mode 100644
index 00000000..d72e4d23
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_subscription/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_postgresql_replication
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_subscription/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_subscription/tasks/main.yml
new file mode 100644
index 00000000..e440e8c8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_subscription/tasks/main.yml
@@ -0,0 +1,12 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Initial tests of postgresql_subscription module:
+
+- import_tasks: setup_publication.yml
+ when: ansible_distribution == 'Ubuntu' and ansible_distribution_major_version >= '18'
+
+- import_tasks: postgresql_subscription_initial.yml
+ when: ansible_distribution == 'Ubuntu' and ansible_distribution_major_version >= '18'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_subscription/tasks/postgresql_subscription_initial.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_subscription/tasks/postgresql_subscription_initial.yml
new file mode 100644
index 00000000..695edd0e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_subscription/tasks/postgresql_subscription_initial.yml
@@ -0,0 +1,672 @@
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- vars:
+ dangerous_name: 'curious.anonymous"; SELECT * FROM information_schema.tables; --'
+ task_parameters: &task_parameters
+ become_user: '{{ pg_user }}'
+ become: yes
+ register: result
+ pg_parameters: &pg_parameters
+ login_user: '{{ pg_user }}'
+ login_db: '{{ test_db }}'
+
+ block:
+
+ - name: Create roles to test owner parameter
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ item }}'
+ role_attr_flags: SUPERUSER,LOGIN
+ loop:
+ - '{{ test_role1 }}'
+ - '{{ test_role2 }}'
+
+ ####################
+ # Test mode: present
+ ####################
+ - name: Create subscription
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ state: present
+ publications: '{{ test_pub }}'
+ connparams:
+ host: 127.0.0.1
+ port: '{{ master_port }}'
+ user: '{{ replication_role }}'
+ password: '{{ replication_pass }}'
+ dbname: '{{ test_db }}'
+ trust_input: no
+
+ - assert:
+ that:
+ - result is changed
+ - result.name == '{{ test_subscription }}'
+ - result.queries == ["CREATE SUBSCRIPTION test CONNECTION 'host=127.0.0.1 port={{ master_port }} user={{ replication_role }} password={{ replication_pass }} dbname={{ test_db }}' PUBLICATION {{ test_pub }}"]
+ - result.exists == true
+ - result.initial_state == {}
+ - result.final_state.owner == '{{ pg_user }}'
+ - result.final_state.enabled == true
+ - result.final_state.publications == ["{{ test_pub }}"]
+ - result.final_state.synccommit == true
+ - result.final_state.slotname == '{{ test_subscription }}'
+ - result.final_state.conninfo.dbname == '{{ test_db }}'
+ - result.final_state.conninfo.host == '127.0.0.1'
+ - result.final_state.conninfo.port == {{ master_port }}
+ - result.final_state.conninfo.user == '{{ replication_role }}'
+ - result.final_state.conninfo.password == '{{ replication_pass }}'
+
+ - name: Check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ query: "SELECT subname FROM pg_subscription WHERE subname = '{{ test_subscription }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ ###################
+ # Test mode: absent
+ ###################
+
+ - name: Drop subscription in check mode
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ state: absent
+ trust_input: no
+ check_mode: yes
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["DROP SUBSCRIPTION {{ test_subscription }}"]
+ - result.final_state == result.initial_state
+
+ - name: Check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ query: "SELECT subname FROM pg_subscription WHERE subname = '{{ test_subscription }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Drop subscription
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ state: absent
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["DROP SUBSCRIPTION {{ test_subscription }}"]
+ - result.final_state != result.initial_state
+
+ - name: Check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ query: "SELECT subname FROM pg_subscription WHERE subname = '{{ test_subscription }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 0
+
+ ##################
+ # Test owner param
+ ##################
+
+ - name: Create with owner
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ state: present
+ publications: '{{ test_pub }}'
+ owner: '{{ test_role1 }}'
+ connparams:
+ host: 127.0.0.1
+ port: '{{ master_port }}'
+ user: '{{ replication_role }}'
+ password: '{{ replication_pass }}'
+ dbname: '{{ test_db }}'
+ trust_input: no
+
+ - assert:
+ that:
+ - result.final_state.owner == '{{ test_role1 }}'
+ - result.queries[1] == 'ALTER SUBSCRIPTION {{ test_subscription }} OWNER TO "{{ test_role1 }}"'
+
+ - name: Try to set this owner again
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ state: present
+ publications: '{{ test_pub }}'
+ owner: '{{ test_role1 }}'
+ trust_input: no
+
+ - assert:
+ that:
+ - result is not changed
+ - result.initial_state == result.final_state
+ - result.final_state.owner == '{{ test_role1 }}'
+
+ - name: Check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ query: >
+ SELECT subname FROM pg_subscription AS s
+ JOIN pg_catalog.pg_roles AS r ON s.subowner = r.oid
+ WHERE subname = '{{ test_subscription }}' and r.rolname = '{{ test_role1 }}'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Set another owner in check mode
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ state: present
+ publications: '{{ test_pub }}'
+ owner: '{{ test_role2 }}'
+ trust_input: no
+ check_mode: yes
+
+ - assert:
+ that:
+ - result is changed
+ - result.initial_state == result.final_state
+ - result.final_state.owner == '{{ test_role1 }}'
+ - result.queries == ['ALTER SUBSCRIPTION {{ test_subscription }} OWNER TO "{{ test_role2 }}"']
+
+ - name: Check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ query: >
+ SELECT subname FROM pg_subscription AS s
+ JOIN pg_catalog.pg_roles AS r ON s.subowner = r.oid
+ WHERE subname = '{{ test_subscription }}' and r.rolname = '{{ test_role1 }}'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Set another owner
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ state: present
+ publications: '{{ test_pub }}'
+ owner: '{{ test_role2 }}'
+ trust_input: no
+
+ - assert:
+ that:
+ - result is changed
+ - result.initial_state != result.final_state
+ - result.final_state.owner == '{{ test_role2 }}'
+ - result.queries == ['ALTER SUBSCRIPTION {{ test_subscription }} OWNER TO "{{ test_role2 }}"']
+
+ - name: Check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ query: >
+ SELECT subname FROM pg_subscription AS s
+ JOIN pg_catalog.pg_roles AS r ON s.subowner = r.oid
+ WHERE subname = '{{ test_subscription }}' and r.rolname = '{{ test_role2 }}'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ ##########################
+ # Test trust_input param #
+ ##########################
+
+ - name: Test trust_input parameter
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ state: present
+ publications: '{{ test_pub }}'
+ session_role: '{{ dangerous_name }}'
+ owner: '{{ test_role1 }}'
+ trust_input: no
+ connparams:
+ host: 127.0.0.1
+ port: '{{ master_port }}'
+ user: '{{ replication_role }}'
+ password: '{{ replication_pass }}'
+ dbname: '{{ test_db }}'
+ ignore_errors: yes
+
+ - assert:
+ that:
+ - result is failed
+ - result.msg is search('is potentially dangerous')
+
+ ##############
+ # Test cascade
+ ##############
+
+ - name: Drop subscription cascade in check mode
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ state: absent
+ cascade: yes
+ trust_input: no
+ check_mode: yes
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["DROP SUBSCRIPTION {{ test_subscription }} CASCADE"]
+ - result.final_state == result.initial_state
+
+ - name: Check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ query: "SELECT subname FROM pg_subscription WHERE subname = '{{ test_subscription }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Drop subscription cascade
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ state: absent
+ cascade: yes
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["DROP SUBSCRIPTION {{ test_subscription }} CASCADE"]
+ - result.final_state != result.initial_state
+
+ - name: Check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ query: "SELECT subname FROM pg_subscription WHERE subname = '{{ test_subscription }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 0
+
+ ###########################
+ # Test subsparams parameter
+ ###########################
+
+ - name: Create subscription with subsparams
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ state: present
+ publications: '{{ test_pub }}'
+ connparams:
+ host: 127.0.0.1
+ port: '{{ master_port }}'
+ user: '{{ replication_role }}'
+ password: '{{ replication_pass }}'
+ dbname: '{{ test_db }}'
+ subsparams:
+ enabled: no
+ synchronous_commit: no
+ trust_input: no
+
+ - assert:
+ that:
+ - result is changed
+ - result.name == '{{ test_subscription }}'
+ - result.queries == ["CREATE SUBSCRIPTION test CONNECTION 'host=127.0.0.1 port={{ master_port }} user={{ replication_role }} password={{ replication_pass }} dbname={{ test_db }}' PUBLICATION {{ test_pub }} WITH (enabled = false, synchronous_commit = false)"]
+ - result.exists == true
+ - result.final_state.enabled == false
+ - result.final_state.synccommit == false
+
+ - name: Check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ query: >
+ SELECT subname FROM pg_subscription WHERE subname = '{{ test_subscription }}'
+ AND subenabled = 'f' AND subsynccommit = 'false'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Enable changed params
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ publications: '{{ test_pub }}'
+ subsparams:
+ enabled: yes
+ synchronous_commit: yes
+ trust_input: no
+
+ - assert:
+ that:
+ - result is changed
+ - result.name == '{{ test_subscription }}'
+ - result.queries == ["ALTER SUBSCRIPTION {{ test_subscription }} ENABLE", "ALTER SUBSCRIPTION {{ test_subscription }} SET (synchronous_commit = true)"]
+ - result.exists == true
+ - result.final_state.enabled == true
+ - result.final_state.synccommit == true
+
+ - name: Check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ query: >
+ SELECT subname FROM pg_subscription WHERE subname = '{{ test_subscription }}'
+ AND subenabled = 't' AND subsynccommit = 'true'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Enable the same params again
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ publications: '{{ test_pub }}'
+ subsparams:
+ enabled: yes
+ synchronous_commit: yes
+ trust_input: no
+
+ - assert:
+ that:
+ - result is not changed
+ - result.name == '{{ test_subscription }}'
+ - result.queries == []
+ - result.exists == true
+ - result.final_state == result.initial_state
+ - result.final_state.enabled == true
+ - result.final_state.synccommit == true
+
+ ##########################
+ # Test change publications
+ ##########################
+
+ - name: Change publications in check mode
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ state: present
+ publications:
+ - '{{ test_pub }}'
+ - '{{ test_pub2 }}'
+ trust_input: no
+ check_mode: yes
+
+ - assert:
+ that:
+ - result is changed
+ - result.name == '{{ test_subscription }}'
+ - result.final_state.publications == result.initial_state.publications
+ - result.final_state.publications == ['{{ test_pub }}']
+ - result.queries == ['ALTER SUBSCRIPTION {{ test_subscription }} SET PUBLICATION {{ test_pub }}, {{ test_pub2 }}']
+
+ - name: Check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ query: >
+ SELECT subname FROM pg_subscription WHERE subname = '{{ test_subscription }}'
+ AND subpublications = '{"{{ test_pub }}"}'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Change publications
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ state: present
+ publications:
+ - '{{ test_pub }}'
+ - '{{ test_pub2 }}'
+ trust_input: no
+
+ - assert:
+ that:
+ - result is changed
+ - result.name == '{{ test_subscription }}'
+ - result.final_state.publications != result.initial_state.publications
+ - result.final_state.publications == ['{{ test_pub }}', '{{ test_pub2 }}']
+ - result.queries == ['ALTER SUBSCRIPTION {{ test_subscription }} SET PUBLICATION {{ test_pub }}, {{ test_pub2 }}']
+
+ - name: Check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ query: >
+ SELECT subname FROM pg_subscription WHERE subname = '{{ test_subscription }}'
+ AND subpublications = '{"{{ test_pub }}", "{{ test_pub2 }}"}'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Change publications with the same values again
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ state: present
+ publications:
+ - '{{ test_pub }}'
+ - '{{ test_pub2 }}'
+ trust_input: no
+
+ - assert:
+ that:
+ - result is not changed
+ - result.name == '{{ test_subscription }}'
+ - result.final_state.publications == result.initial_state.publications
+ - result.final_state.publications == ['{{ test_pub }}', '{{ test_pub2 }}']
+ - result.queries == []
+
+ - name: Check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ query: >
+ SELECT subname FROM pg_subscription WHERE subname = '{{ test_subscription }}'
+ AND subpublications = '{"{{ test_pub }}", "{{ test_pub2 }}"}'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ ######################
+ # Test update conninfo
+ ######################
+
+ - name: Change conninfo in check mode
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ state: present
+ connparams:
+ host: 127.0.0.1
+ port: '{{ master_port }}'
+ user: '{{ replication_role }}'
+ password: '{{ replication_pass }}'
+ dbname: '{{ test_db }}'
+ connect_timeout: '{{ conn_timeout }}'
+ trust_input: no
+ check_mode: yes
+
+ - assert:
+ that:
+ - result is changed
+ - result.name == '{{ test_subscription }}'
+ - result.queries == ["ALTER SUBSCRIPTION {{ test_subscription }} CONNECTION 'host=127.0.0.1 port={{ master_port }} user={{ replication_role }} password={{ replication_pass }} dbname={{ test_db }} connect_timeout={{ conn_timeout }}'"]
+ - result.initial_state.conninfo == result.final_state.conninfo
+
+ - name: Change conninfo
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ state: present
+ connparams:
+ host: 127.0.0.1
+ port: '{{ master_port }}'
+ user: '{{ replication_role }}'
+ password: '{{ replication_pass }}'
+ dbname: '{{ test_db }}'
+ connect_timeout: '{{ conn_timeout }}'
+ trust_input: no
+
+ - assert:
+ that:
+ - result is changed
+ - result.name == '{{ test_subscription }}'
+ - result.queries == ["ALTER SUBSCRIPTION {{ test_subscription }} CONNECTION 'host=127.0.0.1 port={{ master_port }} user={{ replication_role }} password={{ replication_pass }} dbname={{ test_db }} connect_timeout={{ conn_timeout }}'"]
+ - result.initial_state.conninfo != result.final_state.conninfo
+
+ - name: Check
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ query: "SELECT * FROM pg_subscription WHERE subname = '{{ test_subscription }}'"
+
+ - assert:
+ that:
+ - result.query_result[0].subconninfo == "host=127.0.0.1 port={{ master_port }} user={{ replication_role }} password={{ replication_pass }} dbname={{ test_db }} connect_timeout={{ conn_timeout }}"
+
+ - name: Try to change conninfo again with the same values
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ state: present
+ connparams:
+ host: 127.0.0.1
+ port: '{{ master_port }}'
+ user: '{{ replication_role }}'
+ password: '{{ replication_pass }}'
+ dbname: '{{ test_db }}'
+ connect_timeout: '{{ conn_timeout }}'
+ trust_input: no
+
+ - assert:
+ that:
+ - result is not changed
+ - result.name == '{{ test_subscription }}'
+ - result.queries == []
+ - result.initial_state.conninfo == result.final_state.conninfo
+ - result.final_state.conninfo.connect_timeout == {{ conn_timeout }}
+
+ ####################
+ # Test state refresh
+ ####################
+
+ - name: Refresh in check mode
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ state: refresh
+ check_mode: yes
+
+ - assert:
+ that:
+ - result is changed
+ - result.name == '{{ test_subscription }}'
+ - result.queries == ["ALTER SUBSCRIPTION {{ test_subscription }} REFRESH PUBLICATION"]
+
+ - name: Refresh
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ state: refresh
+
+ - assert:
+ that:
+ - result is changed
+ - result.name == '{{ test_subscription }}'
+ - result.queries == ["ALTER SUBSCRIPTION {{ test_subscription }} REFRESH PUBLICATION"]
+
+ ##########
+ # Clean up
+ ##########
+ - name: Drop subscription
+ <<: *task_parameters
+ postgresql_subscription:
+ <<: *pg_parameters
+ login_port: '{{ replica_port }}'
+ name: '{{ test_subscription }}'
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_subscription/tasks/setup_publication.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_subscription/tasks/setup_publication.yml
new file mode 100644
index 00000000..dc99f89d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_subscription/tasks/setup_publication.yml
@@ -0,0 +1,84 @@
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# Preparation for further tests of postgresql_subscription module.
+
+- vars:
+ task_parameters: &task_parameters
+ become_user: '{{ pg_user }}'
+ become: yes
+ register: result
+ pg_parameters: &pg_parameters
+ login_user: '{{ pg_user }}'
+ login_db: '{{ test_db }}'
+
+ block:
+ - name: postgresql_publication - create test db
+ <<: *task_parameters
+ postgresql_db:
+ login_user: '{{ pg_user }}'
+ login_port: '{{ master_port }}'
+ maintenance_db: '{{ db_default }}'
+ name: '{{ test_db }}'
+
+ - name: postgresql_publication - create test role
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ login_port: '{{ master_port }}'
+ name: '{{ replication_role }}'
+ password: '{{ replication_pass }}'
+ role_attr_flags: LOGIN,REPLICATION
+
+ - name: postgresql_publication - create test table
+ <<: *task_parameters
+ postgresql_table:
+ <<: *pg_parameters
+ login_port: '{{ master_port }}'
+ name: '{{ test_table1 }}'
+ columns:
+ - id int
+
+ - name: Master - dump schema
+ <<: *task_parameters
+ shell: pg_dumpall -p '{{ master_port }}' -s > /tmp/schema.sql
+
+ - name: Replicat restore schema
+ <<: *task_parameters
+ shell: psql -p '{{ replica_port }}' -f /tmp/schema.sql
+
+ - name: postgresql_publication - create publication
+ <<: *task_parameters
+ postgresql_publication:
+ <<: *pg_parameters
+ login_port: '{{ master_port }}'
+ name: '{{ test_pub }}'
+
+ - assert:
+ that:
+ - result is changed
+ - result.exists == true
+ - result.queries == ["CREATE PUBLICATION \"{{ test_pub }}\" FOR ALL TABLES"]
+ - result.owner == '{{ pg_user }}'
+ - result.alltables == true
+ - result.tables == []
+ - result.parameters.publish != {}
+
+ - name: postgresql_publication - create one more publication
+ <<: *task_parameters
+ postgresql_publication:
+ <<: *pg_parameters
+ login_port: '{{ master_port }}'
+ name: '{{ test_pub2 }}'
+
+ - name: postgresql_publication - check the publication was created
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ login_port: '{{ master_port }}'
+ query: >
+ SELECT * FROM pg_publication WHERE pubname = '{{ test_pub }}'
+ AND pubowner = '10' AND puballtables = 't'
+
+ - assert:
+ that:
+ - result.rowcount == 1
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_table/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_table/aliases
new file mode 100644
index 00000000..3e532f5d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_table/aliases
@@ -0,0 +1,5 @@
+destructive
+shippable/posix/group5
+skip/aix
+skip/osx
+skip/macos
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_table/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_table/meta/main.yml
new file mode 100644
index 00000000..4ce5a583
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_table/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_postgresql_db
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_table/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_table/tasks/main.yml
new file mode 100644
index 00000000..3534c73b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_table/tasks/main.yml
@@ -0,0 +1,7 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Initial CI tests of postgresql_table module
+- import_tasks: postgresql_table_initial.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_table/tasks/postgresql_table_initial.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_table/tasks/postgresql_table_initial.yml
new file mode 100644
index 00000000..c06403a4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_table/tasks/postgresql_table_initial.yml
@@ -0,0 +1,899 @@
+# Test code for the postgresql_set module
+
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Create a role for tests:
+- name: postgresql_table - create a role for tests
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_user:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: alice
+
+- name: postgresql_table - create test schema
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_schema:
+ database: postgres
+ login_user: "{{ pg_user }}"
+ name: acme
+
+#
+# Check table creation
+#
+
+# Create a simple table in check_mode:
+- name: postgresql_table - create table in check_mode
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_table:
+ login_db: postgres
+ login_port: 5432
+ login_user: "{{ pg_user }}"
+ name: test1
+ owner: alice
+ columns: id int
+ register: result
+ ignore_errors: yes
+ check_mode: yes
+
+- assert:
+ that:
+ - result is changed
+ - result.table == 'test1'
+ - result.queries == ['CREATE TABLE "test1" (id int)', 'ALTER TABLE "test1" OWNER TO "alice"']
+ - result.state == 'absent'
+
+# Check that the table doesn't exist after the previous step, rowcount must be 0
+- name: postgresql_table - check that table doesn't exist after the previous step
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_stat_all_tables WHERE relname = 'test1'"
+ ignore_errors: yes
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+# Create a simple table:
+- name: postgresql_table - create table
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_table:
+ login_db: postgres
+ login_port: 5432
+ login_user: "{{ pg_user }}"
+ name: test1
+ owner: alice
+ columns: id int
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result is changed
+ - result.table == 'test1'
+ - result.queries == ['CREATE TABLE "test1" (id int)', 'ALTER TABLE "test1" OWNER TO "alice"']
+ - result.state == 'present'
+ - result.storage_params == []
+ - result.tablespace == ""
+ - result.owner == "alice"
+
+# Check that the table exists after the previous step, rowcount must be 1
+- name: postgresql_table - check that table exists after the previous step
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_stat_all_tables WHERE relname = 'test1'"
+ ignore_errors: yes
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+# Check that the tableowner is alice
+- name: postgresql_table - check that table owner is alice
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_tables WHERE tablename = 'test1' AND tableowner = 'alice'"
+ ignore_errors: yes
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+#
+# Check create table like another table
+#
+
+# Create a table LIKE another table without any additional parameters in check_mode:
+- name: postgresql_table - create table like in check_mode
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: test2
+ like: test1
+ register: result
+ ignore_errors: yes
+ check_mode: yes
+
+- assert:
+ that:
+ - result is changed
+ - result.table == 'test2'
+ - result.queries == ['CREATE TABLE "test2" (LIKE "test1")']
+ - result.state == 'absent'
+
+# Check that the table doesn't exist after the previous step, rowcount must be 0
+- name: postgresql_table - check that table doesn't exist after the previous step
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_stat_all_tables WHERE relname = 'test2'"
+ ignore_errors: yes
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+# Create a table LIKE another table without any additional parameters:
+- name: postgresql_table - create table like
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: test2
+ like: test1
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result is changed
+ - result.table == 'test2'
+ - result.queries == ['CREATE TABLE "test2" (LIKE "test1")']
+ - result.state == 'present'
+ - result.storage_params == []
+ - result.tablespace == ""
+ - result.owner == "{{ pg_user }}"
+
+# Check that the table exists after the previous step, rowcount must be 1
+- name: postgresql_table - check that table exists after the previous step
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_stat_all_tables WHERE relname = 'test2'"
+ ignore_errors: yes
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+#
+# Check drop table
+#
+
+# Drop a table in check_mode:
+- name: postgresql_table - drop table in check_mode
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: test2
+ state: absent
+ register: result
+ ignore_errors: yes
+ check_mode: yes
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['DROP TABLE "test2"']
+ - result.state == 'present'
+ - result.storage_params == []
+ - result.tablespace == ""
+ - result.owner == "{{ pg_user }}"
+
+# Check that the table exists after the previous step, rowcount must be 1
+- name: postgresql_table - check that table exists after the previous step
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_stat_all_tables WHERE relname = 'test2'"
+ ignore_errors: yes
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+# Drop a table:
+- name: postgresql_table - drop table
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: test2
+ state: absent
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['DROP TABLE "test2"']
+ - result.state == 'absent'
+
+# Check that the table doesn't exist after the previous step, rowcount must be 0
+- name: postgresql_table - check that table doesn't exist after the previous step
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_stat_all_tables WHERE relname = 'test2'"
+ ignore_errors: yes
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+# Create a table like another table including:
+- name: postgresql_table - create table like with including indexes
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: test2
+ like: test1
+ including: indexes
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['CREATE TABLE "test2" (LIKE "test1" INCLUDING indexes)']
+ - result.state == 'present'
+ - result.storage_params == []
+ - result.tablespace == ""
+ - result.owner == "{{ pg_user }}"
+
+# Check to create table if it exists:
+- name: postgresql_table - try to create existing table again
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: test2
+ like: test1
+ including: indexes
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result is not changed
+
+# Drop the table to prepare for the next step:
+- name: postgresql_table - drop table
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: test2
+ state: absent
+ register: result
+ ignore_errors: yes
+
+# Try to drop non existing table:
+- name: postgresql_table - try drop dropped table again
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: test2
+ state: absent
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result is not changed
+
+#
+# Change ownership
+#
+
+# Create user to prepare for the next step:
+- name: postgresql_table - create the new user test_user
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_user:
+ login_user: "{{ pg_user }}"
+ db: postgres
+ name: test_user
+ state: present
+ ignore_errors: yes
+
+# Try to change owner to test_user in check_mode
+- name: postgresql_table - change table ownership to test_user in check_mode
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: test1
+ owner: test_user
+ register: result
+ ignore_errors: yes
+ check_mode: yes
+
+- assert:
+ that:
+ - result.owner == 'alice'
+ - result.queries == ['ALTER TABLE "test1" OWNER TO "test_user"']
+ - result.state == 'present'
+ - result is changed
+
+# Check that the tableowner was not changed to test_user
+- name: postgresql_table - check that table owner was not changed
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_tables WHERE tablename = 'test1' AND tableowner = 'test_user'"
+ ignore_errors: yes
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+
+# Try to change owner to test_user
+- name: postgresql_table - change table ownership to test_user
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: test1
+ owner: test_user
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result.owner == 'test_user'
+ - result.queries == ['ALTER TABLE "test1" OWNER TO "test_user"']
+ - result.state == 'present'
+ - result is changed
+
+# Check that the tableowner was changed to test_user
+- name: postgresql_table - check that table owner was changed
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_tables WHERE tablename = 'test1' AND tableowner = 'test_user'"
+ ignore_errors: yes
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+#
+# Additional storage parameters
+#
+
+# Create a table with additional storage parameters:
+- name: postgresql_table - create table with storage_params
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: test3
+ columns:
+ - id int
+ - name text
+ storage_params:
+ - fillfactor=10
+ - autovacuum_analyze_threshold=1
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result is changed
+ - result.state == 'present'
+ - result.queries == ['CREATE TABLE "test3" (id int,name text) WITH (fillfactor=10,autovacuum_analyze_threshold=1)']
+ - result.storage_params == [ "fillfactor=10", "autovacuum_analyze_threshold=1" ]
+
+# Check storage parameters
+- name: postgresql_table - check storage parameters
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT reloptions FROM pg_class WHERE relname = 'test3'"
+ ignore_errors: yes
+ register: result
+
+- assert:
+ that:
+ - result.query_result[0].reloptions == ["fillfactor=10", "autovacuum_analyze_threshold=1"]
+#
+# Check truncate table
+#
+
+# Insert a row to test table:
+- name: postgresql_table - insert a row
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "INSERT INTO test3 (id, name) VALUES (1, 'first')"
+
+# Truncate a table in check_mode:
+- name: postgresql_table - truncate table
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: test3
+ truncate: yes
+ register: result
+ ignore_errors: yes
+ check_mode: yes
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['TRUNCATE TABLE "test3"']
+ - result.state == "present"
+
+# Check the row exists:
+- name: postgresql_table - check that row exists after the previous step
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT * FROM test3 WHERE id = '1'"
+ ignore_errors: yes
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+# Truncate a table. It always returns changed == true
+# because it always creates a new table with the same schema and drop the old table:
+- name: postgresql_table - truncate table
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: test3
+ truncate: yes
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['TRUNCATE TABLE "test3"']
+ - result.state == "present"
+
+# Check the row exists:
+- name: postgresql_table - check that row doesn't exist after the previous step
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT * FROM test3 WHERE id = '1'"
+ ignore_errors: yes
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+#
+# Check rename table
+#
+
+# Rename a table in check_mode.
+# In check_mode test4 won't be exist after the following playbook,
+# so result.changed == 'absent' for the table with this name
+- name: postgresql_table - rename table in check_mode
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: test3
+ rename: test4
+ register: result
+ ignore_errors: yes
+ check_mode: yes
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['ALTER TABLE "test3" RENAME TO "test4"']
+ - result.state == "absent"
+
+# Check that the table exists after the previous step, rowcount must be 1
+- name: postgresql_table - check that table exists after the previous step
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_stat_all_tables WHERE relname = 'test3'"
+ ignore_errors: yes
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+# Rename a table:
+- name: postgresql_table - rename table
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: test3
+ rename: test4
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['ALTER TABLE "test3" RENAME TO "test4"']
+ - result.state == "present"
+
+# Check that the table test 3 doesn't exist after the previous step, rowcount must be - 0
+- name: postgresql_table - check that table exists after the previous step
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_stat_all_tables WHERE relname = 'test3'"
+ ignore_errors: yes
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+# Check that the table test 4 exists after the previous step, rowcount must be - 1
+- name: postgresql_table - check that table exists after the previous step
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_stat_all_tables WHERE relname = 'test4'"
+ ignore_errors: yes
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+#
+# Check create unlogged table
+#
+
+# Create unlogged table in check_mode:
+- name: postgresql_table - create unlogged table in check_mode
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: test5
+ unlogged: yes
+ register: result
+ ignore_errors: yes
+ check_mode: yes
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['CREATE UNLOGGED TABLE "test5" ()']
+ when: postgres_version_resp.stdout is version('9.1', '>=')
+
+# Check that the table doesn't exist after the previous step, rowcount must be - 0
+- name: postgresql_table - check that table doesn't exist after the previous step
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_stat_all_tables WHERE relname = 'test5'"
+ ignore_errors: yes
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+# Create unlogged table:
+- name: postgresql_table - create unlogged table
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: test5
+ unlogged: yes
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['CREATE UNLOGGED TABLE "test5" ()']
+ when: postgres_version_resp.stdout is version('9.1', '>=')
+
+# Check that the table exists after the previous step, rowcount must be - 1
+- name: postgresql_table - check that table exists after the previous step
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_stat_all_tables WHERE relname = 'test5'"
+ ignore_errors: yes
+ register: result
+ when: postgres_version_resp.stdout is version('9.1', '>=')
+
+- assert:
+ that:
+ - result.rowcount == 1
+ when: postgres_version_resp.stdout is version('9.1', '>=')
+
+# Drop table CASCADE:
+- name: postgresql_table - drop table cascade
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: test5
+ state: absent
+ cascade: yes
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['DROP TABLE "test5" CASCADE']
+ when: postgres_version_resp.stdout is version('9.1', '>=')
+
+# Check that the table doesn't exist after the previous step, rowcount must be - 0
+- name: postgresql_table - check that table doesn't exist after the previous step
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_stat_all_tables WHERE relname = 'test5'"
+ ignore_errors: yes
+ register: result
+ when: postgres_version_resp.stdout is version('9.1', '>=')
+
+- assert:
+ that:
+ - result.rowcount == 0
+ when: postgres_version_resp.stdout is version('9.1', '>=')
+
+#
+# Create, drop, and rename table in a specific schema:
+#
+- name: postgresql_table - create table in a specific schema
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: acme.test_schema_table
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['CREATE TABLE "acme"."test_schema_table" ()']
+
+- name: postgresql_table - check that table exists after the previous step
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_stat_all_tables WHERE relname = 'test_schema_table' and schemaname = 'acme'"
+ ignore_errors: yes
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+- name: postgresql_table - try to create a table with the same name and schema again
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: acme.test_schema_table
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+
+- name: postgresql_table - create a table in the default schema for the next test
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: test_schema_table
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: postgresql_table - drop the table from schema acme
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: postgres.acme.test_schema_table
+ state: absent
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['DROP TABLE "postgres"."acme"."test_schema_table"']
+
+- name: postgresql_table - check that the table doesn't exist after the previous step
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_stat_all_tables WHERE relname = 'test_schema_table' and schemaname = 'acme'"
+ ignore_errors: yes
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 0
+
+- name: postgresql_table - try to drop the table from schema acme again
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: acme.test_schema_table
+ state: absent
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+
+- name: postgresql_table - check that the table with the same name in schema public exists
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_query:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ query: "SELECT 1 FROM pg_stat_all_tables WHERE relname = 'test_schema_table' and schemaname = 'public'"
+ ignore_errors: yes
+ register: result
+
+- assert:
+ that:
+ - result.rowcount == 1
+
+- name: postgresql_table - rename the table that contents a schema name
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: public.test_schema_table
+ rename: new_test_schema_table
+ trust_input: yes
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ['ALTER TABLE "public"."test_schema_table" RENAME TO "new_test_schema_table"']
+
+############################
+# Test trust_input parameter
+- name: postgresql_table - check trust_input
+ postgresql_table:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: postgres.acme.test_schema_table
+ state: absent
+ trust_input: no
+ session_role: 'curious.anonymous"; SELECT * FROM information_schema.tables; --'
+ register: result
+ ignore_errors: yes
+
+- assert:
+ that:
+ - result is failed
+ - result.msg is search('is potentially dangerous')
+
+#
+# Clean up
+#
+- name: postgresql_table - drop test schema
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_schema:
+ database: postgres
+ login_user: "{{ pg_user }}"
+ name: acme
+ state: absent
+ cascade_drop: yes
+
+- name: postgresql_table - drop test role
+ become_user: "{{ pg_user }}"
+ become: yes
+ postgresql_user:
+ db: postgres
+ login_user: "{{ pg_user }}"
+ name: "{{ item }}"
+ state: absent
+ loop:
+ - test_user
+ - alice
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_tablespace/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_tablespace/aliases
new file mode 100644
index 00000000..b2033afd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_tablespace/aliases
@@ -0,0 +1,5 @@
+destructive
+shippable/posix/group4
+skip/aix
+skip/osx
+skip/macos
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_tablespace/defaults/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_tablespace/defaults/main.yml
new file mode 100644
index 00000000..1eb5b843
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_tablespace/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+test_tablespace_path: "/ssd"
+dangerous_name: 'curious.anonymous"; SELECT * FROM information_schema.tables; --'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_tablespace/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_tablespace/meta/main.yml
new file mode 100644
index 00000000..4ce5a583
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_tablespace/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_postgresql_db
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_tablespace/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_tablespace/tasks/main.yml
new file mode 100644
index 00000000..21a47ee3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_tablespace/tasks/main.yml
@@ -0,0 +1,7 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Initial CI tests of postgresql_tablespace module
+- import_tasks: postgresql_tablespace_initial.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_tablespace/tasks/postgresql_tablespace_initial.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_tablespace/tasks/postgresql_tablespace_initial.yml
new file mode 100644
index 00000000..f5884d99
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_tablespace/tasks/postgresql_tablespace_initial.yml
@@ -0,0 +1,245 @@
+- name: postgresql_tablespace - drop dir for test tablespace
+ become: true
+ file:
+ path: '{{ test_tablespace_path }}'
+ state: absent
+ ignore_errors: true
+
+- name: postgresql_tablespace - disable selinux
+ become: true
+ shell: setenforce 0
+ ignore_errors: true
+
+- name: postgresql_tablespace - create dir for test tablespace
+ become: true
+ file:
+ path: '{{ test_tablespace_path }}'
+ state: directory
+ owner: '{{ pg_user }}'
+ group: '{{ pg_user }}'
+ mode: '0700'
+ ignore_errors: true
+
+- name: postgresql_tablespace - create test role to test change ownership
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_user:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ name: bob
+ state: present
+ ignore_errors: true
+
+- name: postgresql_tablespace - create test role to test change ownership
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_user:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ name: alice
+ state: present
+ ignore_errors: true
+
+- name: postgresql_tablespace - create a new tablespace called acme and set bob as an its owner
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_tablespace:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ name: acme
+ owner: bob
+ location: /ssd
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is changed
+ - result.owner == 'bob'
+ - result.queries == ["CREATE TABLESPACE \"acme\" LOCATION '/ssd'", "ALTER TABLESPACE \"acme\" OWNER TO \"bob\""]
+ - result.state == 'present'
+ - result.tablespace == 'acme'
+ - result.options == {}
+ - result.location == '/ssd'
+
+- name: postgresql_tablespace - try to create the same tablespace with different location
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_tablespace:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ name: acme
+ location: /another-ssd
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is not changed
+ - result.msg == "Tablespace 'acme' exists with different location '/ssd'"
+
+- name: postgresql_tablespace - change tablespace owner to alice
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_tablespace:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ name: acme
+ owner: alice
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is changed
+ - result.owner == 'alice'
+ - result.queries == ["ALTER TABLESPACE \"acme\" OWNER TO \"alice\""]
+ - result.state == 'present'
+ - result.tablespace == 'acme'
+ - result.options == {}
+
+- name: postgresql_tablespace - try to change tablespace owner to alice again to be sure that nothing changes
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_tablespace:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ name: acme
+ owner: alice
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is not changed
+ - result.owner == 'alice'
+ - result.queries == []
+ - result.state == 'present'
+ - result.tablespace == 'acme'
+ - result.options == {}
+
+- name: postgresql_tablespace - change tablespace options
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_tablespace:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ name: acme
+ set:
+ seq_page_cost: 4
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is changed
+ - result.owner == 'alice'
+ - result.queries == ["ALTER TABLESPACE \"acme\" SET (seq_page_cost = '4')"]
+ - result.state == 'present'
+ - result.tablespace == 'acme'
+ - result.options.seq_page_cost == '4'
+ when: postgres_version_resp.stdout is version('9.0', '>=')
+
+- name: postgresql_tablespace - reset seq_page_cost option
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_tablespace:
+ login_db: postgres
+ login_user: '{{ pg_user }}'
+ name: acme
+ set:
+ seq_page_cost: reset
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is changed
+ - result.queries == ["ALTER TABLESPACE \"acme\" RESET (seq_page_cost)"]
+ when: postgres_version_resp.stdout is version('9.0', '>=')
+
+- name: postgresql_tablespace - reset seq_page_cost option again
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_tablespace:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ name: acme
+ set:
+ seq_page_cost: reset
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is not changed
+ - result.queries == []
+ when: postgres_version_resp.stdout is version('9.0', '>=')
+
+- name: postgresql_tablespace - rename tablespace
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_tablespace:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ name: acme
+ rename_to: foo
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is changed
+ - result.newname == 'foo'
+ - result.queries == ["ALTER TABLESPACE \"acme\" RENAME TO \"foo\""]
+
+- name: postgresql_tablespace - rename tablespace to potentially dangerous name
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_tablespace:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ name: foo
+ rename_to: '{{ dangerous_name }}'
+ trust_input: no
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is failed
+ - result.msg == 'Passed input \'{{ dangerous_name }}\' is potentially dangerous'
+
+- name: postgresql_tablespace - drop tablespace
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_tablespace:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ name: foo
+ state: absent
+ trust_input: yes
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is changed
+ - result.state == 'absent'
+ - result.queries == ["DROP TABLESPACE \"foo\""]
+
+- name: postgresql_tablespace - try to drop nonexistent tablespace
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_tablespace:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ name: foo
+ state: absent
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is not changed
+ - result.msg == "Tries to drop nonexistent tablespace 'foo'"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user/aliases
new file mode 100644
index 00000000..b2033afd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user/aliases
@@ -0,0 +1,5 @@
+destructive
+shippable/posix/group4
+skip/aix
+skip/osx
+skip/macos
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user/defaults/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user/defaults/main.yml
new file mode 100644
index 00000000..dbcbea12
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user/defaults/main.yml
@@ -0,0 +1,4 @@
+db_name: 'ansible_db'
+db_user1: 'ansible_db_user1'
+db_user2: 'ansible_db_user2'
+dangerous_name: 'curious.anonymous"; SELECT * FROM information_schema.tables; --'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user/meta/main.yml
new file mode 100644
index 00000000..4ce5a583
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_postgresql_db
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user/tasks/main.yml
new file mode 100644
index 00000000..183494ed
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user/tasks/main.yml
@@ -0,0 +1,12 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Initial CI tests of postgresql_user module
+- import_tasks: postgresql_user_initial.yml
+ when: postgres_version_resp.stdout is version('9.4', '>=')
+
+# General tests:
+- import_tasks: postgresql_user_general.yml
+ when: postgres_version_resp.stdout is version('9.4', '>=')
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user/tasks/postgresql_user_general.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user/tasks/postgresql_user_general.yml
new file mode 100644
index 00000000..b007492d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user/tasks/postgresql_user_general.yml
@@ -0,0 +1,775 @@
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# Integration tests for postgresql_user module.
+
+- vars:
+ test_user: hello.user.with.dots
+ test_user2: hello
+ test_group1: group1
+ test_group2: group2
+ test_table: test
+ test_comment1: 'comment1'
+ test_comment2: 'comment2'
+ task_parameters: &task_parameters
+ become_user: '{{ pg_user }}'
+ become: yes
+ register: result
+ pg_parameters: &pg_parameters
+ login_user: '{{ pg_user }}'
+ login_db: postgres
+
+ block:
+ #
+ # Common tests
+ #
+ - name: Create role in check_mode
+ <<: *task_parameters
+ check_mode: yes
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ trust_input: no
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_user }}'
+
+ - name: check that the user doesn't exist
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_user }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 0
+
+ - name: Create role in actual mode
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_user }}'
+
+ - name: check that the user exists
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_user }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Add a comment on the user
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ comment: '{{ test_comment1 }}'
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["COMMENT ON ROLE \"{{ test_user }}\" IS '{{ test_comment1 }}'"]
+
+ - name: check the comment
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT pg_catalog.shobj_description(r.oid, 'pg_authid') AS comment
+ FROM pg_catalog.pg_roles r WHERE r.rolname = '{{ test_user }}'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+ - result.query_result[0].comment == '{{ test_comment1 }}'
+
+ - name: Try to add the same comment on the user
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ comment: '{{ test_comment1 }}'
+ trust_input: no
+
+ - assert:
+ that:
+ - result is not changed
+
+ - name: Try to add another comment on the user
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ comment: '{{ test_comment2 }}'
+
+ - assert:
+ that:
+ - result is changed
+ - result.queries == ["COMMENT ON ROLE \"{{ test_user }}\" IS '{{ test_comment2 }}'"]
+
+ - name: check the comment
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT pg_catalog.shobj_description(r.oid, 'pg_authid') AS comment
+ FROM pg_catalog.pg_roles r WHERE r.rolname = '{{ test_user }}'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+ - result.query_result[0].comment == '{{ test_comment2 }}'
+
+ - name: Try to create role again in check_mode
+ <<: *task_parameters
+ check_mode: yes
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+
+ - assert:
+ that:
+ - result is not changed
+ - result.user == '{{ test_user }}'
+
+ - name: check that the user exists
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_user }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Try to create role again
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+
+ - assert:
+ that:
+ - result is not changed
+ - result.user == '{{ test_user }}'
+
+ - name: check that the user exists
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_user }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Drop role in check_mode
+ <<: *task_parameters
+ check_mode: yes
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ state: absent
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_user }}'
+
+ - name: check that the user actually exists
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_user }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Drop role in actual mode
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ state: absent
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_user }}'
+
+ - name: check that the user doesn't exist
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_user }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 0
+
+ - name: Try to drop role in check mode again
+ <<: *task_parameters
+ check_mode: yes
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ state: absent
+
+ - assert:
+ that:
+ - result is not changed
+ - result.user == '{{ test_user }}'
+
+ - name: Try to drop role in actual mode again
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ state: absent
+
+ - assert:
+ that:
+ - result is not changed
+ - result.user == '{{ test_user }}'
+
+ #
+ # password, no_password_changes, encrypted, expires parameters
+ #
+
+ - name: Create role with password, passed as hashed md5
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ password: md59543f1d82624df2b31672ec0f7050460
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_user }}'
+
+ - name: Check that the user exist with a proper password
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}' and rolpassword = 'md59543f1d82624df2b31672ec0f7050460'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Test no_password_changes
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ password: u123
+ no_password_changes: yes
+
+ - assert:
+ that:
+ - result is not changed
+ - result.user == '{{ test_user }}'
+
+
+ - name: Check that nothing changed
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}' and rolpassword = 'md59543f1d82624df2b31672ec0f7050460'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ # Storing unencrypted passwords is not available from PostgreSQL 10
+ - name: Change password, passed as unencrypted
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ password: myunencryptedpass
+ encrypted: no
+ when: postgres_version_resp.stdout is version('10', '<')
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_user }}'
+ when: postgres_version_resp.stdout is version('10', '<')
+
+ - name: Check that the user exist with the unencrypted password
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}' and rolpassword = 'myunencryptedpass'"
+ when: postgres_version_resp.stdout is version('10', '<')
+
+ - assert:
+ that:
+ - result.rowcount == 1
+ when: postgres_version_resp.stdout is version('10', '<')
+
+ - name: Change password, explicit encrypted=yes
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ password: myunencryptedpass
+ encrypted: yes
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_user }}'
+
+ - name: Check that the user exist with encrypted password
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}' and rolpassword != 'myunencryptedpass'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Change rolvaliduntil attribute
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ expires: 'Jan 31 2020'
+ trust_input: no
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_user }}'
+
+ - name: Check the prev step
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}'
+ AND rolvaliduntil::text like '2020-01-31%'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Try to set the same rolvaliduntil value again
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ expires: 'Jan 31 2020'
+
+ - assert:
+ that:
+ - result is not changed
+ - result.user == '{{ test_user }}'
+
+ - name: Check that nothing changed
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}'
+ AND rolvaliduntil::text like '2020-01-31%'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ #
+ # role_attr_flags
+ #
+ - name: Set role attributes
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ role_attr_flags: CREATEROLE,CREATEDB
+ trust_input: no
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_user }}'
+
+ - name: Check the prev step
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}'
+ AND rolcreaterole = 't' and rolcreatedb = 't'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Set the same role attributes again
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ role_attr_flags: CREATEROLE,CREATEDB
+
+ - assert:
+ that:
+ - result is not changed
+ - result.user == '{{ test_user }}'
+
+ - name: Check the prev step
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}'
+ AND rolcreaterole = 't' and rolcreatedb = 't'
+
+ - name: Set role attributes
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ role_attr_flags: NOCREATEROLE,NOCREATEDB
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_user }}'
+
+ - name: Check the prev step
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}'
+ AND rolcreaterole = 'f' and rolcreatedb = 'f'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Set role attributes
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ role_attr_flags: NOCREATEROLE,NOCREATEDB
+
+ - assert:
+ that:
+ - result is not changed
+ - result.user == '{{ test_user }}'
+
+ - name: Check the prev step
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: >
+ SELECT rolname FROM pg_authid WHERE rolname = '{{ test_user }}'
+ AND rolcreaterole = 'f' and rolcreatedb = 'f'
+
+ #
+ # priv
+ #
+ - name: Create test table
+ <<: *task_parameters
+ postgresql_table:
+ <<: *pg_parameters
+ name: '{{ test_table }}'
+ columns:
+ - id int
+
+ - name: Insert data to test table
+ <<: *task_parameters
+ postgresql_query:
+ query: "INSERT INTO {{ test_table }} (id) VALUES ('1')"
+ <<: *pg_parameters
+
+ - name: Check that test_user is not allowed to read the data
+ <<: *task_parameters
+ postgresql_query:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ session_role: '{{ test_user }}'
+ query: 'SELECT * FROM {{ test_table }}'
+ ignore_errors: yes
+
+ - assert:
+ that:
+ - result is failed
+ - "'permission denied' in result.msg"
+
+ - name: Grant privileges
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ priv: '{{ test_table }}:SELECT'
+ trust_input: no
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: Check that test_user is allowed to read the data
+ <<: *task_parameters
+ postgresql_query:
+ db: postgres
+ login_user: '{{ pg_user }}'
+ session_role: '{{ test_user }}'
+ query: 'SELECT * FROM {{ test_table }}'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Grant the same privileges again
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ priv: '{{ test_table }}:SELECT'
+
+ - assert:
+ that:
+ - result is not changed
+
+ - name: Remove test table
+ <<: *task_parameters
+ postgresql_table:
+ <<: *pg_parameters
+ name: '{{ test_table }}'
+ state: absent
+
+ #
+ # fail_on_user
+ #
+ - name: Create role for test
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user2 }}'
+
+ - name: Create test table, set owner as test_user
+ <<: *task_parameters
+ postgresql_table:
+ <<: *pg_parameters
+ name: '{{ test_table }}'
+ owner: '{{ test_user2 }}'
+
+ - name: Test fail_on_user
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user2 }}'
+ state: absent
+ ignore_errors: yes
+
+ - assert:
+ that:
+ - result is failed
+ - result.msg == 'Unable to remove user'
+
+ - name: Test fail_on_user
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ fail_on_user: no
+
+ - assert:
+ that:
+ - result is not changed
+
+ #
+ # Test groups parameter
+ #
+ - name: Create test group
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_group2 }}'
+ role_attr_flags: NOLOGIN
+
+ - name: Create role test_group1 and grant test_group2 to test_group1 in check_mode
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_group1 }}'
+ groups: '{{ test_group2 }}'
+ role_attr_flags: NOLOGIN
+ check_mode: yes
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_group1 }}'
+ - result.queries == ['CREATE USER "{{ test_group1 }}" NOLOGIN', 'GRANT "{{ test_group2 }}" TO "{{ test_group1 }}"']
+
+ - name: check that the user doesn't exist
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_group1 }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 0
+
+ - name: check membership
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT grolist FROM pg_group WHERE groname = '{{ test_group2 }}' AND grolist != '{}'"
+
+ - assert:
+ that:
+ - result.rowcount == 0
+
+ - name: Create role test_group1 and grant test_group2 to test_group1
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_group1 }}'
+ groups: '{{ test_group2 }}'
+ role_attr_flags: NOLOGIN
+ trust_input: no
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_group1 }}'
+ - result.queries == ['CREATE USER "{{ test_group1 }}" NOLOGIN', 'GRANT "{{ test_group2 }}" TO "{{ test_group1 }}"']
+
+ - name: check that the user exists
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT rolname FROM pg_roles WHERE rolname = '{{ test_group1 }}'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: check membership
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT grolist FROM pg_group WHERE groname = '{{ test_group2 }}' AND grolist != '{}'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Grant test_group2 to test_group1 again
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_group1 }}'
+ groups: '{{ test_group2 }}'
+
+ - assert:
+ that:
+ - result is not changed
+ - result.user == '{{ test_group1 }}'
+
+ - name: check membership
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT grolist FROM pg_group WHERE groname = '{{ test_group2 }}' AND grolist != '{}'"
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: Grant groups to existent role
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ test_user }}'
+ groups:
+ - '{{ test_group1 }}'
+ - '{{ test_group2 }}'
+ trust_input: no
+
+ - assert:
+ that:
+ - result is changed
+ - result.user == '{{ test_user }}'
+ - result.queries == ['GRANT "{{ test_group1 }}" TO "{{ test_user }}"', 'GRANT "{{ test_group2 }}" TO "{{ test_user }}"']
+
+ - name: check membership
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: "SELECT * FROM pg_group WHERE groname in ('{{ test_group1 }}', '{{ test_group2 }}') AND grolist != '{}'"
+
+ - assert:
+ that:
+ - result.rowcount == 2
+
+ ########################
+ # Test trust_input param
+
+ - name: Create role with potentially dangerous name, don't trust
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ dangerous_name }}'
+ trust_input: no
+ ignore_errors: yes
+
+ - assert:
+ that:
+ - result is failed
+ - result.msg == 'Passed input \'{{ dangerous_name }}\' is potentially dangerous'
+
+ - name: Create role with potentially dangerous name, trust
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ dangerous_name }}'
+
+ - assert:
+ that:
+ - result is changed
+
+ always:
+ #
+ # Clean up
+ #
+ - name: Drop test table
+ <<: *task_parameters
+ postgresql_table:
+ <<: *pg_parameters
+ name: '{{ test_table }}'
+ state: absent
+
+ - name: Drop test user
+ <<: *task_parameters
+ postgresql_user:
+ <<: *pg_parameters
+ name: '{{ item }}'
+ state: absent
+ loop:
+ - '{{ test_user }}'
+ - '{{ test_user2 }}'
+ - '{{ test_group1 }}'
+ - '{{ test_group2 }}'
+ - '{{ dangerous_name }}'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user/tasks/postgresql_user_initial.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user/tasks/postgresql_user_initial.yml
new file mode 100644
index 00000000..79be2237
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user/tasks/postgresql_user_initial.yml
@@ -0,0 +1,156 @@
+#
+# Create and destroy user, test 'password' and 'encrypted' parameters
+#
+# unencrypted values are not supported on newer versions
+# do not run the encrypted: no tests if on 10+
+- ansible.builtin.set_fact:
+ encryption_values:
+ - 'yes'
+
+- ansible.builtin.set_fact:
+ encryption_values: '{{ encryption_values + ["no"]}}'
+ when: postgres_version_resp.stdout is version('10', '<=')
+
+- include_tasks: test_password.yml
+ vars:
+ encrypted: '{{ loop_item }}'
+ db_password1: 'secretù' # use UTF-8
+ loop: '{{ encryption_values }}'
+ loop_control:
+ loop_var: loop_item
+
+# BYPASSRLS role attribute was introduced in PostgreSQL 9.5, so
+# we want to test attribute management differently depending
+# on the version.
+- ansible.builtin.set_fact:
+ bypassrls_supported: "{{ postgres_version_resp.stdout is version('9.5.0', '>=') }}"
+
+# test 'no_password_change' and 'role_attr_flags' parameters
+- include_tasks: test_no_password_change.yml
+ vars:
+ no_password_changes: '{{ loop_item }}'
+ loop:
+ - 'yes'
+ - 'no'
+ loop_control:
+ loop_var: loop_item
+
+### TODO: fail_on_user
+
+#
+# Test login_user functionality
+#
+- name: Create a user to test login module parameters
+ become: yes
+ become_user: "{{ pg_user }}"
+ postgresql_user:
+ name: "{{ db_user1 }}"
+ state: "present"
+ encrypted: 'yes'
+ password: "password"
+ role_attr_flags: "CREATEDB,LOGIN,CREATEROLE"
+ login_user: "{{ pg_user }}"
+ trust_input: no
+ db: postgres
+
+- name: Create db
+ postgresql_db:
+ name: "{{ db_name }}"
+ state: "present"
+ login_user: "{{ db_user1 }}"
+ login_password: "password"
+ login_host: "localhost"
+
+- name: Check that database created
+ become: yes
+ become_user: "{{ pg_user }}"
+ shell: echo "select datname from pg_database where datname = '{{ db_name }}';" | psql -d postgres
+ register: result
+
+- ansible.builtin.assert:
+ that:
+ - "result.stdout_lines[-1] == '(1 row)'"
+
+- name: Create a user
+ postgresql_user:
+ name: "{{ db_user2 }}"
+ state: "present"
+ encrypted: 'yes'
+ password: "md55c8ccfd9d6711fc69a7eae647fc54f51"
+ db: "{{ db_name }}"
+ login_user: "{{ db_user1 }}"
+ login_password: "password"
+ login_host: "localhost"
+ trust_input: no
+
+- name: Check that it was created
+ become: yes
+ become_user: "{{ pg_user }}"
+ shell: echo "select * from pg_user where usename='{{ db_user2 }}';" | psql -d postgres
+ register: result
+
+- ansible.builtin.assert:
+ that:
+ - "result.stdout_lines[-1] == '(1 row)'"
+
+- name: Grant database privileges
+ postgresql_privs:
+ type: "database"
+ state: "present"
+ roles: "{{ db_user2 }}"
+ privs: "CREATE,connect"
+ objs: "{{ db_name }}"
+ db: "{{ db_name }}"
+ login: "{{ db_user1 }}"
+ password: "password"
+ host: "localhost"
+
+- name: Check that the user has the requested permissions (database)
+ become: yes
+ become_user: "{{ pg_user }}"
+ shell: echo "select datacl from pg_database where datname='{{ db_name }}';" | psql {{ db_name }}
+ register: result_database
+
+- ansible.builtin.assert:
+ that:
+ - "result_database.stdout_lines[-1] == '(1 row)'"
+ - "db_user2 ~ '=Cc' in result_database.stdout"
+
+- name: Remove user
+ postgresql_user:
+ name: "{{ db_user2 }}"
+ state: 'absent'
+ priv: "ALL"
+ db: "{{ db_name }}"
+ login_user: "{{ db_user1 }}"
+ login_password: "password"
+ login_host: "localhost"
+ trust_input: no
+
+- name: Check that they were removed
+ become: yes
+ become_user: "{{ pg_user }}"
+ shell: echo "select * from pg_user where usename='{{ db_user2 }}';" | psql -d postgres
+ register: result
+
+- ansible.builtin.assert:
+ that:
+ - "result.stdout_lines[-1] == '(0 rows)'"
+
+- name: Destroy DB
+ postgresql_db:
+ state: absent
+ name: "{{ db_name }}"
+ login_user: "{{ db_user1 }}"
+ login_password: "password"
+ login_host: "localhost"
+
+- name: Check that database was destroyed
+ become: yes
+ become_user: "{{ pg_user }}"
+ shell: echo "select datname from pg_database where datname = '{{ db_name }}';" | psql -d postgres
+ register: result
+
+- ansible.builtin.assert:
+ that:
+ - "result.stdout_lines[-1] == '(0 rows)'"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user/tasks/test_no_password_change.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user/tasks/test_no_password_change.yml
new file mode 100644
index 00000000..c296c0ea
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user/tasks/test_no_password_change.yml
@@ -0,0 +1,167 @@
+- vars:
+ task_parameters: &task_parameters
+ become_user: "{{ pg_user }}"
+ become: yes
+ register: result
+ postgresql_parameters: &parameters
+ db: postgres
+ name: "{{ db_user1 }}"
+ login_user: "{{ pg_user }}"
+
+ block:
+
+ - name: Create a user with all role attributes
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ state: "present"
+ role_attr_flags: "SUPERUSER,CREATEROLE,CREATEDB,INHERIT,LOGIN{{ bypassrls_supported | ternary(',BYPASSRLS', '') }}"
+ no_password_changes: '{{ no_password_changes }}' # no_password_changes is ignored when user doesn't already exist
+
+ - name: Check that the user has the requested role attributes
+ <<: *task_parameters
+ shell: "echo \"select 'super:'||rolsuper, 'createrole:'||rolcreaterole, 'create:'||rolcreatedb, 'inherit:'||rolinherit, 'login:'||rolcanlogin {{ bypassrls_supported | ternary(\", 'bypassrls:'||rolbypassrls\", '') }} from pg_roles where rolname='{{ db_user1 }}';\" | psql -d postgres"
+
+ - assert:
+ that:
+ - "result.stdout_lines[-1] == '(1 row)'"
+ - "'super:t' in result.stdout_lines[-2]"
+ - "'createrole:t' in result.stdout_lines[-2]"
+ - "'create:t' in result.stdout_lines[-2]"
+ - "'inherit:t' in result.stdout_lines[-2]"
+ - "'login:t' in result.stdout_lines[-2]"
+
+ - block:
+ - name: Check that the user has the requested role attribute BYPASSRLS
+ <<: *task_parameters
+ shell: "echo \"select 'bypassrls:'||rolbypassrls from pg_roles where rolname='{{ db_user1 }}';\" | psql -d postgres"
+
+ - assert:
+ that:
+ - "not bypassrls_supported or 'bypassrls:t' in result.stdout_lines[-2]"
+ when: bypassrls_supported
+
+ - name: Modify a user to have no role attributes
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ state: "present"
+ role_attr_flags: "NOSUPERUSER,NOCREATEROLE,NOCREATEDB,noinherit,NOLOGIN{{ bypassrls_supported | ternary(',NOBYPASSRLS', '') }}"
+ no_password_changes: '{{ no_password_changes }}'
+
+ - name: Check that ansible reports it modified the role
+ assert:
+ that:
+ - result is changed
+
+ - name: "Check that the user doesn't have any attribute"
+ <<: *task_parameters
+ shell: "echo \"select 'super:'||rolsuper, 'createrole:'||rolcreaterole, 'create:'||rolcreatedb, 'inherit:'||rolinherit, 'login:'||rolcanlogin from pg_roles where rolname='{{ db_user1 }}';\" | psql -d postgres"
+
+ - assert:
+ that:
+ - "result.stdout_lines[-1] == '(1 row)'"
+ - "'super:f' in result.stdout_lines[-2]"
+ - "'createrole:f' in result.stdout_lines[-2]"
+ - "'create:f' in result.stdout_lines[-2]"
+ - "'inherit:f' in result.stdout_lines[-2]"
+ - "'login:f' in result.stdout_lines[-2]"
+
+ - block:
+ - name: Check that the user has the requested role attribute BYPASSRLS
+ <<: *task_parameters
+ shell: "echo \"select 'bypassrls:'||rolbypassrls from pg_roles where rolname='{{ db_user1 }}';\" | psql -d postgres"
+
+ - assert:
+ that:
+ - "not bypassrls_supported or 'bypassrls:f' in result.stdout_lines[-2]"
+ when: bypassrls_supported
+
+ - name: Try to add an invalid attribute
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ state: "present"
+ role_attr_flags: "NOSUPERUSER,NOCREATEROLE,NOCREATEDB,noinherit,NOLOGIN{{ bypassrls_supported | ternary(',NOBYPASSRLS', '') }},INVALID"
+ no_password_changes: '{{ no_password_changes }}'
+ ignore_errors: yes
+
+ - name: Check that ansible reports failure
+ assert:
+ that:
+ - result is not changed
+ - result is failed
+ - "result.msg == 'Invalid role_attr_flags specified: INVALID'"
+
+ - name: Modify a single role attribute on a user
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ state: "present"
+ role_attr_flags: "LOGIN"
+ no_password_changes: '{{ no_password_changes }}'
+
+ - name: Check that ansible reports it modified the role
+ assert:
+ that:
+ - result is changed
+
+ - name: Check the role attributes
+ <<: *task_parameters
+ shell: echo "select 'super:'||rolsuper, 'createrole:'||rolcreaterole, 'create:'||rolcreatedb, 'inherit:'||rolinherit, 'login:'||rolcanlogin from pg_roles where rolname='{{ db_user1 }}';" | psql -d postgres
+
+ - assert:
+ that:
+ - "result.stdout_lines[-1] == '(1 row)'"
+ - "'super:f' in result.stdout_lines[-2]"
+ - "'createrole:f' in result.stdout_lines[-2]"
+ - "'create:f' in result.stdout_lines[-2]"
+ - "'inherit:f' in result.stdout_lines[-2]"
+ - "'login:t' in result.stdout_lines[-2]"
+
+ - block:
+ - name: Check the role attribute BYPASSRLS
+ <<: *task_parameters
+ shell: echo "select 'bypassrls:'||rolbypassrls from pg_roles where rolname='{{ db_user1 }}';" | psql -d postgres
+
+ - assert:
+ that:
+ - "( postgres_version_resp.stdout is version('9.5.0', '<')) or 'bypassrls:f' in result.stdout_lines[-2]"
+ when: bypassrls_supported
+
+ - name: Check that using same attribute a second time does nothing
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ state: "present"
+ role_attr_flags: "LOGIN"
+ no_password_changes: '{{ no_password_changes }}'
+ environment:
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - name: Check there isn't any update reported
+ assert:
+ that:
+ - result is not changed
+
+ - name: Cleanup the user
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ state: 'absent'
+ no_password_changes: '{{ no_password_changes }}' # user deletion: no_password_changes is ignored
+
+ - name: Check that user was removed
+ <<: *task_parameters
+ shell: echo "select * from pg_user where usename='{{ db_user1 }}';" | psql -d postgres
+
+ - assert:
+ that:
+ - "result.stdout_lines[-1] == '(0 rows)'"
+
+ always:
+ - name: Cleanup the user
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ state: 'absent'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user/tasks/test_password.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user/tasks/test_password.yml
new file mode 100644
index 00000000..0f1edcff
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user/tasks/test_password.yml
@@ -0,0 +1,429 @@
+- vars:
+ task_parameters: &task_parameters
+ become_user: "{{ pg_user }}"
+ become: yes
+ register: result
+ postgresql_query_parameters: &query_parameters
+ db: postgres
+ login_user: "{{ pg_user }}"
+ postgresql_parameters: &parameters
+ <<: *query_parameters
+ name: "{{ db_user1 }}"
+
+ block:
+ - name: 'Check that PGOPTIONS environment variable is effective (1/2)'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: '{{ db_password1 }}'
+ ignore_errors: true
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - name: 'Check that PGOPTIONS environment variable is effective (2/2)'
+ assert:
+ that:
+ - "{{ result is failed }}"
+
+ - name: 'Create a user (password encrypted: {{ encrypted }})'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: '{{ db_password1 }}'
+ encrypted: '{{ encrypted }}'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+
+ - block: &changed # block is only used here in order to be able to define YAML anchor
+ - name: Check that ansible reports it was created
+ assert:
+ that:
+ - "{{ result is changed }}"
+
+ - name: Check that it was created
+ <<: *task_parameters
+ shell: echo "select * from pg_user where usename='{{ db_user1 }}';" | psql -d postgres
+
+ - assert:
+ that:
+ - "result.stdout_lines[-1] == '(1 row)'"
+
+ - name: Check that creating user a second time does nothing
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: '{{ db_password1 }}'
+ encrypted: '{{ encrypted }}'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - block: &not_changed # block is only used here in order to be able to define YAML anchor
+ - name: Check that ansible reports no change
+ assert:
+ that:
+ - "{{ result is not changed }}"
+
+ - name: 'Define an expiration time'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ expires: '2025-01-01'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+
+ - <<: *changed
+
+ - name: 'Redefine the same expiration time'
+ <<: *task_parameters
+ postgresql_user:
+ expires: '2025-01-01'
+ <<: *parameters
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ - block:
+
+ - name: 'Using MD5-hashed password: check that password not changed when using cleartext password'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: '{{ db_password1 }}'
+ encrypted: 'yes'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ - name: "Using MD5-hashed password: check that password not changed when using md5 hash with 'ENCRYPTED'"
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: "md5{{ (db_password1 ~ db_user1) | hash('md5')}}"
+ encrypted: 'yes'
+ environment:
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ - name: "Using MD5-hashed password: check that password not changed when using md5 hash with 'UNENCRYPTED'"
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: "md5{{ (db_password1 ~ db_user1) | hash('md5')}}"
+ encrypted: 'no'
+ environment:
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ - name: 'Redefine the same expiration time and password (encrypted)'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ encrypted: 'yes'
+ password: "md5{{ (db_password1 ~ db_user1) | hash('md5')}}"
+ expires: '2025-01-01'
+ environment:
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ - name: 'Using MD5-hashed password: check that password changed when using another cleartext password'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: 'prefix{{ db_password1 }}'
+ encrypted: 'yes'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+
+ - <<: *changed
+
+ - name: "Using MD5-hashed password: check that password changed when using another md5 hash with 'ENCRYPTED'"
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: "md5{{ ('prefix1' ~ db_password1 ~ db_user1) | hash('md5')}}"
+ encrypted: 'yes'
+
+ - <<: *changed
+
+ - name: "Using MD5-hashed password: check that password changed when using md5 hash with 'UNENCRYPTED'"
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: "md5{{ ('prefix2' ~ db_password1 ~ db_user1) | hash('md5')}}"
+ encrypted: 'no'
+ register: change_pass_unencrypted
+ failed_when:
+ - change_pass_unencrypted is failed
+ # newer version of psycopg2 no longer supported unencrypted password, we ignore the error
+ - '"UNENCRYPTED PASSWORD is no longer supported" not in change_pass_unencrypted.msg'
+
+ - <<: *changed
+
+ - name: 'Using MD5-hashed password: check that password changed when clearing the password'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: ''
+ encrypted: 'yes'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+
+ - <<: *changed
+
+ - name: 'Using MD5-hashed password: check that password not changed when clearing the password again'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: ''
+ encrypted: 'yes'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ - name: 'Using cleartext password: check that password not changed when clearing the password again'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: ''
+ encrypted: 'no'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ - name: 'Using MD5-hashed password: check that password changed when using a cleartext password'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: '{{ db_password1 }}'
+ encrypted: 'yes'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+
+ - <<: *changed
+
+ when: encrypted == 'yes'
+
+ - block:
+
+ - name: 'Using cleartext password: check that password not changed when using cleartext password'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: "{{ db_password1 }}"
+ encrypted: 'no'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ - name: 'Redefine the same expiration time and password (not encrypted)'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: "{{ db_password1 }}"
+ encrypted: 'no'
+ expires: '2025-01-01'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ - name: 'Using cleartext password: check that password changed when using another cleartext password'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: "changed{{ db_password1 }}"
+ encrypted: 'no'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+
+ - <<: *changed
+
+ - name: 'Using cleartext password: check that password changed when clearing the password'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: ''
+ encrypted: 'no'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+
+ - <<: *changed
+
+ - name: 'Using cleartext password: check that password not changed when clearing the password again'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: ''
+ encrypted: 'no'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ - name: 'Using MD5-hashed password: check that password not changed when clearing the password again'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: ''
+ encrypted: 'yes'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ - name: 'Using cleartext password: check that password changed when using cleartext password'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: "{{ db_password1 }}"
+ encrypted: 'no'
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+
+ - <<: *changed
+
+ when: encrypted == 'no'
+
+ # start of block scram-sha-256
+ # scram-sha-256 password encryption type is supported since PostgreSQL 10
+ - when: postgres_version_resp.stdout is version('10', '>=')
+ block:
+
+ - name: 'Using cleartext password with scram-sha-256: resetting password'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: ""
+ encrypted: "{{ encrypted }}"
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+
+ - name: 'Using cleartext password with scram-sha-256: check that password is changed when using cleartext password'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: "{{ db_password1 }}"
+ encrypted: "{{ encrypted }}"
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ # ansible postgresql_user module interface does not (yet) support forcing password_encryption
+ # type value, we'll have to hack it in env variable to force correct encryption
+ PGOPTIONS: "-c password_encryption=scram-sha-256"
+
+ - <<: *changed
+
+ - name: 'Using cleartext password with scram-sha-256: ensure password is properly encrypted'
+ <<: *task_parameters
+ postgresql_query:
+ <<: *query_parameters
+ query: select * from pg_authid where rolname=%s and rolpassword like %s
+ positional_args:
+ - '{{ db_user1 }}'
+ - 'SCRAM-SHA-256$%'
+
+ - assert:
+ that:
+ - result.rowcount == 1
+
+ - name: 'Using cleartext password with scram-sha-256: check that password is not changed when using the same password'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: "{{ db_password1 }}"
+ encrypted: "{{ encrypted }}"
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: "-c password_encryption=scram-sha-256"
+
+ - <<: *not_changed
+
+ - name: 'Using cleartext password with scram-sha-256: check that password is changed when using another cleartext password'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: "changed{{ db_password1 }}"
+ encrypted: "{{ encrypted }}"
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: "-c password_encryption=scram-sha-256"
+
+ - <<: *changed
+
+ - name: 'Using cleartext password with scram-sha-256: check that password is changed when clearing the password'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: ''
+ encrypted: "{{ encrypted }}"
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: "-c password_encryption=scram-sha-256"
+
+ - <<: *changed
+
+ - name: 'Using cleartext password with scram-sha-256: check that password is not changed when clearing the password again'
+ <<: *task_parameters
+ postgresql_user:
+ <<: *parameters
+ password: ''
+ encrypted: "{{ encrypted }}"
+ environment:
+ PGCLIENTENCODING: 'UTF8'
+ PGOPTIONS: "-c password_encryption=scram-sha-256"
+
+ - <<: *not_changed
+
+ # end of block scram-sha-256
+
+ - name: Remove user
+ <<: *task_parameters
+ postgresql_user:
+ state: 'absent'
+ <<: *parameters
+
+ - <<: *changed
+
+ - name: Check that they were removed
+ <<: *task_parameters
+ shell: echo "select * from pg_user where usename='{{ db_user1 }}';" | psql -d postgres
+ environment:
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - assert:
+ that:
+ - "result.stdout_lines[-1] == '(0 rows)'"
+
+ - name: Check that removing user a second time does nothing
+ <<: *task_parameters
+ postgresql_user:
+ state: 'absent'
+ <<: *parameters
+ environment:
+ PGOPTIONS: '-c default_transaction_read_only=on' # ensure 'alter user' query isn't executed
+
+ - <<: *not_changed
+
+ always:
+ - name: Remove user
+ <<: *task_parameters
+ postgresql_user:
+ state: 'absent'
+ <<: *parameters
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user_obj_stat_info/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user_obj_stat_info/aliases
new file mode 100644
index 00000000..e0807d58
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user_obj_stat_info/aliases
@@ -0,0 +1,7 @@
+destructive
+shippable/posix/group1
+skip/osx
+skip/macos
+skip/freebsd
+skip/rhel
+skip/aix
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user_obj_stat_info/defaults/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user_obj_stat_info/defaults/main.yml
new file mode 100644
index 00000000..f697cefd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user_obj_stat_info/defaults/main.yml
@@ -0,0 +1,12 @@
+pg_user: postgres
+db_default: postgres
+
+test_table1: acme1
+test_table2: acme2
+test_table3: acme3
+test_idx1: idx1
+test_idx2: idx2
+test_func1: func1
+test_func2: func2
+test_func3: func3
+test_schema1: schema1
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user_obj_stat_info/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user_obj_stat_info/meta/main.yml
new file mode 100644
index 00000000..4ce5a583
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user_obj_stat_info/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_postgresql_db
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user_obj_stat_info/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user_obj_stat_info/tasks/main.yml
new file mode 100644
index 00000000..fa47fdc5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user_obj_stat_info/tasks/main.yml
@@ -0,0 +1,8 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Initial tests of postgresql_user_obj_stat_info module:
+- import_tasks: postgresql_user_obj_stat_info.yml
+ when: postgres_version_resp.stdout is version('9.4', '>=')
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user_obj_stat_info/tasks/postgresql_user_obj_stat_info.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user_obj_stat_info/tasks/postgresql_user_obj_stat_info.yml
new file mode 100644
index 00000000..6e6ff212
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/postgresql_user_obj_stat_info/tasks/postgresql_user_obj_stat_info.yml
@@ -0,0 +1,222 @@
+---
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- vars:
+ task_parameters: &task_parameters
+ become_user: '{{ pg_user }}'
+ become: yes
+ register: result
+ pg_parameters: &pg_parameters
+ login_user: '{{ pg_user }}'
+ login_db: '{{ db_default }}'
+
+ block:
+ # Preparation:
+ # 0. create test schema
+ # 1. create test tables
+ # 2. create test indexes
+ # 3. create test functions
+ # 4. enable track_functions and restart
+
+ - name: Create schema
+ <<: *task_parameters
+ postgresql_schema:
+ <<: *pg_parameters
+ name: '{{ test_schema1 }}'
+
+ - name: Create test tables
+ <<: *task_parameters
+ postgresql_table:
+ <<: *pg_parameters
+ name: '{{ item }}'
+ columns:
+ - id int
+ loop:
+ - '{{ test_table1 }}'
+ - '{{ test_table2 }}'
+
+ - name: Create test table in another schema
+ <<: *task_parameters
+ postgresql_table:
+ <<: *pg_parameters
+ name: '{{ test_schema1 }}.{{ test_table3 }}'
+
+ - name: Create test indexes
+ <<: *task_parameters
+ postgresql_idx:
+ <<: *pg_parameters
+ name: '{{ item }}'
+ table: '{{ test_table1 }}'
+ columns:
+ - id
+ loop:
+ - '{{ test_idx1 }}'
+ - '{{ test_idx2 }}'
+
+ - name: Set track_function (restart is required)
+ <<: *task_parameters
+ postgresql_set:
+ <<: *pg_parameters
+ name: track_functions
+ value: all
+
+ # To avoid CI timeouts
+ - name: Kill all postgres processes
+ shell: 'pkill -u {{ pg_user }}'
+ become: yes
+ when: ansible_facts.distribution == 'CentOS' and ansible_facts.distribution_major_version == '8'
+ ignore_errors: yes
+
+ - name: Stop PostgreSQL
+ become: yes
+ service:
+ name: "{{ postgresql_service }}"
+ state: stopped
+ when: (ansible_facts.distribution_major_version != '8' and ansible_facts.distribution == 'CentOS') or ansible_facts.distribution != 'CentOS'
+
+ - name: Pause between stop and start PosgreSQL
+ pause:
+ seconds: 5
+
+ - name: Start PostgreSQL
+ become: yes
+ service:
+ name: "{{ postgresql_service }}"
+ state: started
+
+ - name: Create test functions
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: 'CREATE FUNCTION {{ item }}() RETURNS boolean AS $$ BEGIN RETURN 1; END; $$ LANGUAGE PLPGSQL'
+ loop:
+ - '{{ test_func1 }}'
+ - '{{ test_func2 }}'
+ - '{{ test_schema1 }}.{{ test_func3 }}'
+
+ - name: Touch test functions
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: 'SELECT {{ item }}()'
+ loop:
+ - '{{ test_func1 }}'
+ - '{{ test_func2 }}'
+ - '{{ test_schema1 }}.{{ test_func3 }}'
+
+ #######
+ # Tests
+ #######
+ # 0. Without filter
+ - name: Collect all stats
+ <<: *task_parameters
+ postgresql_user_obj_stat_info:
+ <<: *pg_parameters
+
+ - assert:
+ that:
+ - result is not changed
+ - result.tables.public.{{ test_table1 }}.size == 0
+ - result.tables.public.{{ test_table1 }}.size == 0
+ - result.tables.{{ test_schema1 }}.{{ test_table3 }}.size == 0
+ - result.functions.public.{{ test_func1 }}.calls == 1
+ - result.functions.public.{{ test_func2 }}.calls == 1
+ - result.functions.{{ test_schema1 }}.{{ test_func3 }}.calls == 1
+ - result.indexes.public.{{ test_idx1 }}.idx_scan == 0
+ - result.indexes.public.{{ test_idx2 }}.idx_scan == 0
+
+ # 1. With filter
+ - name: Collect stats with filter
+ <<: *task_parameters
+ postgresql_user_obj_stat_info:
+ <<: *pg_parameters
+ filter: tables, indexes
+
+ - assert:
+ that:
+ - result is not changed
+ - result.tables.public.{{ test_table1 }}.size == 0
+ - result.tables.public.{{ test_table1 }}.size == 0
+ - result.tables.{{ test_schema1 }}.{{ test_table3 }}.size == 0
+ - result.functions == {}
+ - result.indexes.public.{{ test_idx1 }}.idx_scan == 0
+ - result.indexes.public.{{ test_idx2 }}.idx_scan == 0
+
+ # 2. With schema
+ - name: Collect stats for objects in certain schema
+ <<: *task_parameters
+ postgresql_user_obj_stat_info:
+ <<: *pg_parameters
+ schema: public
+
+ - assert:
+ that:
+ - result is not changed
+ - result.tables.public.{{ test_table1 }}.size == 0
+ - result.tables.public.{{ test_table1 }}.size == 0
+ - result.indexes.public.{{ test_idx1 }}.idx_scan == 0
+ - result.indexes.public.{{ test_idx2 }}.idx_scan == 0
+ - result.functions.public.{{ test_func1 }}.calls == 1
+ - result.functions.public.{{ test_func2 }}.calls == 1
+ - result.tables.{{ test_schema1 }} is not defined
+
+
+ # 3. With wrong schema
+ - name: Try to collect data in nonexistent schema
+ <<: *task_parameters
+ postgresql_user_obj_stat_info:
+ <<: *pg_parameters
+ schema: nonexistent
+ ignore_errors: yes
+
+ - assert:
+ that:
+ - result is failed
+ - result.msg == "Schema 'nonexistent' does not exist"
+
+ # 4. Test Trust Input
+ - name: Try running with SQL injection
+ <<: *task_parameters
+ postgresql_user_obj_stat_info:
+ <<: *pg_parameters
+ session_role: 'curious.anonymous"; SELECT * FROM information_schema.tables; --'
+ trust_input: no
+ ignore_errors: yes
+
+ - assert:
+ that:
+ - result is failed
+ - result.msg is search('is potentially dangerous')
+
+ ##########
+ # Clean up
+ ##########
+ - name: Drop schema
+ <<: *task_parameters
+ postgresql_schema:
+ <<: *pg_parameters
+ name: '{{ test_schema1 }}'
+ state: absent
+ cascade_drop: yes
+
+ - name: Drop test tables
+ <<: *task_parameters
+ postgresql_table:
+ <<: *pg_parameters
+ name: '{{ item }}'
+ state: absent
+ loop:
+ - '{{ test_table1 }}'
+ - '{{ test_table2 }}'
+
+ - name: Drop test functions
+ <<: *task_parameters
+ postgresql_query:
+ <<: *pg_parameters
+ query: 'DROP FUNCTION {{ item }}()'
+ loop:
+ - '{{ test_func1 }}'
+ - '{{ test_func2 }}'
+ - '{{ test_schema1 }}.{{ test_func3 }}'
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/prepare_nios_tests/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/prepare_nios_tests/tasks/main.yml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/prepare_nios_tests/tasks/main.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/prepare_nuage_tests/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/prepare_nuage_tests/tasks/main.yml
new file mode 100644
index 00000000..2a902dc8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/prepare_nuage_tests/tasks/main.yml
@@ -0,0 +1,24 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- block:
+ - name: Install Nuage VSD API Simulator
+ pip:
+ name: nuage-vsd-sim
+
+ - name: Start Nuage VSD API Simulator
+ shell: "(cd /; nuage-vsd-sim >/dev/null 2>&1)"
+ async: 1800
+ poll: 0
+
+ - name: Wait for API to be ready
+ uri:
+ url: http://localhost:5000
+ register: api
+ delay: 3
+ retries: 10
+ until: api.status == 200
+
+ when: "ansible_python_version is version('2.7', '>=')"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/prepare_tests/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/prepare_tests/tasks/main.yml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/prepare_tests/tasks/main.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/proxmox/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/proxmox/aliases
new file mode 100644
index 00000000..d5a5dcd1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/proxmox/aliases
@@ -0,0 +1,4 @@
+unsupported
+proxmox_domain_info
+proxmox_group_info
+proxmox_user_info
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/proxmox/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/proxmox/tasks/main.yml
new file mode 100644
index 00000000..c615faf5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/proxmox/tasks/main.yml
@@ -0,0 +1,111 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright: (c) 2020, Tristan Le Guern <tleguern at bouledef.eu>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- name: List domains
+ proxmox_domain_info:
+ api_host: "{{ api_host }}"
+ api_user: "{{ user }}@{{ domain }}"
+ api_password: "{{ api_password | default(omit) }}"
+ api_token_id: "{{ api_token_id | default(omit) }}"
+ api_token_secret: "{{ api_token_secret | default(omit) }}"
+ validate_certs: "{{ validate_certs }}"
+ register: results
+
+- assert:
+ that:
+ - results is not changed
+ - results.proxmox_domains is defined
+
+- name: Retrieve info about pve
+ proxmox_domain_info:
+ api_host: "{{ api_host }}"
+ api_user: "{{ user }}@{{ domain }}"
+ api_password: "{{ api_password | default(omit) }}"
+ api_token_id: "{{ api_token_id | default(omit) }}"
+ api_token_secret: "{{ api_token_secret | default(omit) }}"
+ validate_certs: "{{ validate_certs }}"
+ domain: pve
+ register: results
+
+- assert:
+ that:
+ - results is not changed
+ - results.proxmox_domains is defined
+ - results.proxmox_domains|length == 1
+ - results.proxmox_domains[0].type == 'pve'
+
+- name: List groups
+ proxmox_group_info:
+ api_host: "{{ api_host }}"
+ api_user: "{{ user }}@{{ domain }}"
+ api_password: "{{ api_password | default(omit) }}"
+ api_token_id: "{{ api_token_id | default(omit) }}"
+ api_token_secret: "{{ api_token_secret | default(omit) }}"
+ validate_certs: "{{ validate_certs }}"
+ register: results
+
+- assert:
+ that:
+ - results is not changed
+ - results.proxmox_groups is defined
+
+- name: List users
+ proxmox_user_info:
+ api_host: "{{ api_host }}"
+ api_user: "{{ user }}@{{ domain }}"
+ api_password: "{{ api_password | default(omit) }}"
+ api_token_id: "{{ api_token_id | default(omit) }}"
+ api_token_secret: "{{ api_token_secret | default(omit) }}"
+ validate_certs: "{{ validate_certs }}"
+ register: results
+
+- assert:
+ that:
+ - results is not changed
+ - results.proxmox_users is defined
+
+- name: Retrieve info about api_user using name and domain
+ proxmox_user_info:
+ api_host: "{{ api_host }}"
+ api_user: "{{ user }}@{{ domain }}"
+ api_password: "{{ api_password | default(omit) }}"
+ api_token_id: "{{ api_token_id | default(omit) }}"
+ api_token_secret: "{{ api_token_secret | default(omit) }}"
+ validate_certs: "{{ validate_certs }}"
+ user: "{{ user }}"
+ domain: "{{ domain }}"
+ register: results_user_domain
+
+- assert:
+ that:
+ - results_user_domain is not changed
+ - results_user_domain.proxmox_users is defined
+ - results_user_domain.proxmox_users|length == 1
+ - results_user_domain.proxmox_users[0].domain == "{{ domain }}"
+ - results_user_domain.proxmox_users[0].user == "{{ user }}"
+ - results_user_domain.proxmox_users[0].userid == "{{ user }}@{{ domain }}"
+
+- name: Retrieve info about api_user using userid
+ proxmox_user_info:
+ api_host: "{{ api_host }}"
+ api_user: "{{ user }}@{{ domain }}"
+ api_password: "{{ api_password | default(omit) }}"
+ api_token_id: "{{ api_token_id | default(omit) }}"
+ api_token_secret: "{{ api_token_secret | default(omit) }}"
+ validate_certs: "{{ validate_certs }}"
+ userid: "{{ user }}@{{ domain }}"
+ register: results_userid
+
+- assert:
+ that:
+ - results_userid is not changed
+ - results_userid.proxmox_users is defined
+ - results_userid.proxmox_users|length == 1
+ - results_userid.proxmox_users[0].domain == "{{ domain }}"
+ - results_userid.proxmox_users[0].user == "{{ user }}"
+ - results_userid.proxmox_users[0].userid == "{{ user }}@{{ domain }}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/python_requirements_info/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/python_requirements_info/aliases
new file mode 100644
index 00000000..765b70da
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/python_requirements_info/aliases
@@ -0,0 +1 @@
+shippable/posix/group2
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/python_requirements_info/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/python_requirements_info/tasks/main.yml
new file mode 100644
index 00000000..9dc2ae44
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/python_requirements_info/tasks/main.yml
@@ -0,0 +1,27 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: run python_requirements_info module
+ python_requirements_info:
+ register: basic_info
+
+- name: ensure python_requirements_info returns desired info
+ assert:
+ that:
+ - "'python' in basic_info"
+ - "'python_version' in basic_info"
+
+- name: run python_requirements_info module
+ python_requirements_info:
+ dependencies:
+ - notreal<1
+ - pip>1
+ register: dep_info
+
+- name: ensure python_requirements_info returns desired info
+ assert:
+ that:
+ - "'installed' in dep_info.valid.pip"
+ - "'notreal' in dep_info.not_found"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/read_csv/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/read_csv/aliases
new file mode 100644
index 00000000..765b70da
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/read_csv/aliases
@@ -0,0 +1 @@
+shippable/posix/group2
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/read_csv/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/read_csv/tasks/main.yml
new file mode 100644
index 00000000..e1379b1c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/read_csv/tasks/main.yml
@@ -0,0 +1,145 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Create basic CSV file
+- name: Create unique CSV file
+ copy:
+ content: |
+ name,uid,gid,gecos
+ dag,500,500,Dag Wieërs
+ jeroen,501,500,Jeroen Hoekx
+ dest: users_unique.csv
+
+# Read a CSV file and access user 'dag'
+- name: Read users from CSV file and return a dictionary
+ read_csv:
+ path: users_unique.csv
+ key: name
+ register: users_unique
+
+- assert:
+ that:
+ - users_unique.dict.dag.name == 'dag'
+ - users_unique.dict.dag.gecos == 'Dag Wieërs'
+ - users_unique.dict.dag.uid == '500'
+ - users_unique.dict.dag.gid == '500'
+ - users_unique.dict.jeroen.name == 'jeroen'
+ - users_unique.dict.jeroen.gecos == 'Jeroen Hoekx'
+ - users_unique.dict.jeroen.uid == '501'
+ - users_unique.dict.jeroen.gid == '500'
+
+ # Read a CSV file and access the first item
+- name: Read users from CSV file and return a list
+ read_csv:
+ path: users_unique.csv
+ register: users_unique
+
+- assert:
+ that:
+ - users_unique.list.0.name == 'dag'
+ - users_unique.list.0.gecos == 'Dag Wieërs'
+ - users_unique.list.0.uid == '500'
+ - users_unique.list.0.gid == '500'
+ - users_unique.list.1.name == 'jeroen'
+ - users_unique.list.1.gecos == 'Jeroen Hoekx'
+ - users_unique.list.1.uid == '501'
+ - users_unique.list.1.gid == '500'
+
+
+# Create basic CSV file using semi-colon
+- name: Create non-unique CSV file using semi-colon
+ copy:
+ content: |
+ name;uid;gid;gecos
+ dag;500;500;Dag Wieërs
+ jeroen;501;500;Jeroen Hoekx
+ dag;502;500;Dag Wieers
+ dest: users_nonunique.csv
+
+# Read a CSV file and access user 'dag'
+- name: Read users from CSV file and return a dictionary
+ read_csv:
+ path: users_nonunique.csv
+ key: name
+ unique: no
+ delimiter: ';'
+ register: users_nonunique
+
+- assert:
+ that:
+ - users_nonunique.dict.dag.name == 'dag'
+ - users_nonunique.dict.dag.gecos == 'Dag Wieers'
+ - users_nonunique.dict.dag.uid == '502'
+ - users_nonunique.dict.dag.gid == '500'
+ - users_nonunique.dict.jeroen.name == 'jeroen'
+ - users_nonunique.dict.jeroen.gecos == 'Jeroen Hoekx'
+ - users_nonunique.dict.jeroen.uid == '501'
+ - users_nonunique.dict.jeroen.gid == '500'
+
+
+# Read a CSV file using an non-existing dialect
+- name: Read users from CSV file and return a dictionary
+ read_csv:
+ path: users_nonunique.csv
+ dialect: placebo
+ register: users_placebo
+ ignore_errors: yes
+
+- assert:
+ that:
+ - users_placebo is failed
+ - users_placebo.msg == "Dialect 'placebo' is not supported by your version of python."
+
+
+# Create basic CSV file without header
+- name: Create unique CSV file without header
+ copy:
+ content: |
+ dag,500,500,Dag Wieërs
+ jeroen,501,500,Jeroen Hoekx
+ dest: users_noheader.csv
+
+# Read a CSV file and access user 'dag'
+- name: Read users from CSV file and return a dictionary
+ read_csv:
+ path: users_noheader.csv
+ key: name
+ fieldnames: name,uid,gid,gecos
+ register: users_noheader
+
+- assert:
+ that:
+ - users_noheader.dict.dag.name == 'dag'
+ - users_noheader.dict.dag.gecos == 'Dag Wieërs'
+ - users_noheader.dict.dag.uid == '500'
+ - users_noheader.dict.dag.gid == '500'
+ - users_noheader.dict.jeroen.name == 'jeroen'
+ - users_noheader.dict.jeroen.gecos == 'Jeroen Hoekx'
+ - users_noheader.dict.jeroen.uid == '501'
+ - users_noheader.dict.jeroen.gid == '500'
+
+
+# Create broken file
+- name: Create unique CSV file
+ copy:
+ content: |
+ name,uid,gid,gecos
+ dag,500,500,Dag Wieërs
+ jeroen,501,500,"Jeroen"Hoekx"
+ dest: users_broken.csv
+
+# Read a broken CSV file using strict
+- name: Read users from a broken CSV file
+ read_csv:
+ path: users_broken.csv
+ key: name
+ strict: yes
+ register: users_broken
+ ignore_errors: yes
+
+- assert:
+ that:
+ - users_broken is failed
+ - "'Unable to process file' in users_broken.msg"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/redis_info/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/redis_info/aliases
new file mode 100644
index 00000000..c8c8757f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/redis_info/aliases
@@ -0,0 +1,6 @@
+destructive
+shippable/posix/group1
+skip/aix
+skip/osx
+skip/macos
+skip/rhel
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/redis_info/defaults/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/redis_info/defaults/main.yml
new file mode 100644
index 00000000..1352c55c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/redis_info/defaults/main.yml
@@ -0,0 +1,4 @@
+---
+redis_password: PASS
+master_port: 6379
+slave_port: 6380
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/redis_info/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/redis_info/meta/main.yml
new file mode 100644
index 00000000..5b078371
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/redis_info/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+- setup_redis_replication
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/redis_info/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/redis_info/tasks/main.yml
new file mode 100644
index 00000000..d0277520
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/redis_info/tasks/main.yml
@@ -0,0 +1,47 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright: (c) 2020, Pavlo Bashynskyi (@levonet) <levonet@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- name: redis_info - connect to master with default host/port
+ community.general.redis_info:
+ login_password: "{{ redis_password }}"
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.info is defined
+ - result.info.tcp_port == master_port
+ - result.info.role == 'master'
+
+- name: redis_info - connect to master (check)
+ community.general.redis_info:
+ login_host: 127.0.0.1
+ login_port: "{{ master_port }}"
+ login_password: "{{ redis_password }}"
+ check_mode: yes
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.info is defined
+ - result.info.tcp_port == master_port
+ - result.info.role == 'master'
+
+- name: redis_info - connect to slave
+ community.general.redis_info:
+ login_port: "{{ slave_port }}"
+ login_password: "{{ redis_password }}"
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.info is defined
+ - result.info.tcp_port == slave_port
+ - result.info.role == 'slave'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_compute/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_compute/aliases
new file mode 100644
index 00000000..f24a42a8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_compute/aliases
@@ -0,0 +1,2 @@
+cloud/scaleway
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_compute/defaults/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_compute/defaults/main.yml
new file mode 100644
index 00000000..4b2b2799
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_compute/defaults/main.yml
@@ -0,0 +1,9 @@
+# Below information has been taken from https://developer.scaleway.com/#servers
+---
+scaleway_image_id: 6a601340-19c1-4ca7-9c1c-0704bcc9f5fe
+scaleway_organization: '{{ scw_org }}'
+scaleway_region: ams1
+scaleway_commerial_type: START1-S
+scaleway_name: scaleway_compute_test
+first_server_name: scaleway_compute_test_first
+second_server_name: scaleway_compute_test_second
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_compute/tasks/ip.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_compute/tasks/ip.yml
new file mode 100644
index 00000000..445e955d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_compute/tasks/ip.yml
@@ -0,0 +1,201 @@
+- name: Create a server with no IP (Check)
+ check_mode: yes
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ public_ip: absent
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+
+ register: server_creation_absent_check_task
+
+- debug: var=server_creation_absent_check_task
+
+- assert:
+ that:
+ - server_creation_absent_check_task is success
+ - server_creation_absent_check_task is changed
+
+- name: Create a server
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ public_ip: absent
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+
+ register: server_creation_absent_task
+
+- debug: var=server_creation_absent_task
+
+- assert:
+ that:
+ - server_creation_absent_task is success
+ - server_creation_absent_task is changed
+
+- name: Create a server (Confirmation)
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ public_ip: absent
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+
+ register: server_creation_absent_confirmation_task
+
+- debug: var=server_creation_absent_confirmation_task
+
+- assert:
+ that:
+ - server_creation_absent_confirmation_task is success
+ - server_creation_absent_confirmation_task is not changed
+
+# Add a dynamic IP to the instance
+
+- name: Patch server tags (Check)
+ check_mode: yes
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ public_ip: dynamic
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+
+ register: ip_patching_check_task
+
+- debug: var=ip_patching_check_task
+
+- assert:
+ that:
+ - ip_patching_check_task is success
+ - ip_patching_check_task is changed
+
+- name: Patch server tags
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ public_ip: dynamic
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+ register: ip_patching_task
+
+- debug: var=ip_patching_task
+
+- assert:
+ that:
+ - ip_patching_task is success
+ - ip_patching_task is changed
+
+- name: Patch server tags (Confirmation)
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ public_ip: dynamic
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+
+ register: ip_patching_confirmation_task
+
+- debug: var=ip_patching_confirmation_task
+
+- assert:
+ that:
+ - ip_patching_confirmation_task is success
+ - ip_patching_confirmation_task is not changed
+
+# Remove dynamic IP
+
+- name: Patch server tags (Check)
+ check_mode: yes
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ public_ip: absent
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+
+ register: remove_ip_check_task
+
+- debug: var=remove_ip_check_task
+
+- assert:
+ that:
+ - remove_ip_check_task is success
+ - remove_ip_check_task is changed
+
+- name: Patch server tags
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ public_ip: absent
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+
+ register: remove_ip_task
+
+- debug: var=remove_ip_task
+
+- assert:
+ that:
+ - remove_ip_task is success
+ - remove_ip_task is changed
+
+- name: Patch server tags (Confirmation)
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ public_ip: absent
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+
+ register: remove_ip_confirmation_task
+
+- debug: var=remove_ip_confirmation_task
+
+- assert:
+ that:
+ - remove_ip_confirmation_task is success
+ - remove_ip_confirmation_task is not changed
+
+- name: Destroy it
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: absent
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+
+ register: server_destroy_task
+
+- debug: var=server_destroy_task
+
+- assert:
+ that:
+ - server_destroy_task is success
+ - server_destroy_task is changed \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_compute/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_compute/tasks/main.yml
new file mode 100644
index 00000000..8945a41c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_compute/tasks/main.yml
@@ -0,0 +1,9 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- include_tasks: state.yml
+- include_tasks: ip.yml
+- include_tasks: security_group.yml
+- include_tasks: pagination.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_compute/tasks/pagination.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_compute/tasks/pagination.yml
new file mode 100644
index 00000000..7b7dd49b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_compute/tasks/pagination.yml
@@ -0,0 +1,71 @@
+- name: Create a first server
+ scaleway_compute:
+ name: '{{ first_server_name }}'
+ state: present
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+
+- name: Create a second server
+ scaleway_compute:
+ name: '{{ second_server_name }}'
+ state: present
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+
+- name: Get server informations of the first page
+ scaleway_server_info:
+ region: par1
+ query_parameters:
+ per_page: 1
+ page: 1
+ register: first_page
+
+- debug: var=first_page
+
+- assert:
+ that:
+ - first_page is success
+
+- name: Get server informations of the second page
+ scaleway_server_info:
+ region: par1
+ query_parameters:
+ per_page: 1
+ page: 2
+ register: second_page
+
+- debug: var=second_page
+
+- assert:
+ that:
+ - second_page is success
+
+- assert:
+ that:
+ - first_page.scaleway_server_info[0].id != second_page.scaleway_server_info[0].id
+
+- name: Delete first server
+ scaleway_compute:
+ name: '{{ first_server_name }}'
+ state: absent
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+
+- name: Delete second server
+ scaleway_compute:
+ name: '{{ second_server_name }}'
+ state: absent
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_compute/tasks/security_group.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_compute/tasks/security_group.yml
new file mode 100644
index 00000000..a0e273c2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_compute/tasks/security_group.yml
@@ -0,0 +1,147 @@
+- name: Create a scaleway security_group
+ scaleway_security_group:
+ state: present
+ region: '{{ scaleway_region }}'
+ name: test_compute
+ description: test_compute
+ organization: '{{ scaleway_organization }}'
+ stateful: true
+ inbound_default_policy: accept
+ outbound_default_policy: accept
+ organization_default: false
+ register: security_group
+
+- debug: var=security_group
+
+- block:
+ - name: Create a server with security_group (Check)
+ check_mode: yes
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ security_group: '{{ security_group.scaleway_security_group.id }}'
+
+ register: server_creation_check_task
+
+ - debug: var=server_creation_check_task
+
+ - assert:
+ that:
+ - server_creation_check_task is success
+ - server_creation_check_task is changed
+
+ - name: Create a server
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ security_group: '{{ security_group.scaleway_security_group.id }}'
+ wait: true
+
+ register: server_creation_task
+
+ - debug: var=server_creation_task
+
+ - assert:
+ that:
+ - server_creation_task is success
+ - server_creation_task is changed
+
+ - name: Create a server with security_group (Confirmation)
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ security_group: '{{ security_group.scaleway_security_group.id }}'
+ wait: true
+
+ register: server_creation_confirmation_task
+
+ - debug: var=server_creation_confirmation_task
+
+ - assert:
+ that:
+ - server_creation_confirmation_task is success
+ - server_creation_confirmation_task is not changed
+
+ - name: Keep current security_group (Check)
+ check_mode: yes
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ security_group: '{{ security_group.scaleway_security_group.id }}'
+ wait: true
+
+ register: server_creation_confirmation_task
+
+ - debug: var=server_creation_confirmation_task
+
+ - assert:
+ that:
+ - server_creation_confirmation_task is success
+ - server_creation_confirmation_task is not changed
+
+ - name: Keep current security_group
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+
+ register: server_creation_confirmation_task
+
+ - debug: var=server_creation_confirmation_task
+
+ - assert:
+ that:
+ - server_creation_confirmation_task is success
+ - server_creation_confirmation_task is not changed
+
+ always:
+ - name: Destroy it
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: absent
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+
+ register: server_destroy_task
+
+ - debug: var=server_destroy_task
+
+ - assert:
+ that:
+ - server_destroy_task is success
+ - server_destroy_task is changed
+
+ - name: Create a scaleway security_group
+ scaleway_security_group:
+ state: absent
+ region: '{{ scaleway_region }}'
+ name: test_compute
+ description: test_compute
+ organization: '{{ scaleway_organization }}'
+ stateful: true
+ inbound_default_policy: accept
+ outbound_default_policy: accept
+ organization_default: false
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_compute/tasks/state.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_compute/tasks/state.yml
new file mode 100644
index 00000000..effd27e8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_compute/tasks/state.yml
@@ -0,0 +1,387 @@
+- name: Create a server (Check)
+ check_mode: yes
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+
+ register: server_creation_check_task
+
+- debug: var=server_creation_check_task
+
+- assert:
+ that:
+ - server_creation_check_task is success
+ - server_creation_check_task is changed
+
+- name: Create a server
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+
+ register: server_creation_task
+
+- debug: var=server_creation_task
+
+- assert:
+ that:
+ - server_creation_task is success
+ - server_creation_task is changed
+
+- name: Create a server (Confirmation)
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+
+ register: server_creation_confirmation_task
+
+- debug: var=server_creation_confirmation_task
+
+- assert:
+ that:
+ - server_creation_confirmation_task is success
+ - server_creation_confirmation_task is not changed
+
+- name: Patch server tags (Check)
+ check_mode: yes
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ tags:
+ - test
+ - www
+ register: server_patching_check_task
+
+- debug: var=server_patching_check_task
+
+- assert:
+ that:
+ - server_patching_check_task is success
+ - server_patching_check_task is changed
+
+- name: Patch server tags
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+ tags:
+ - test
+ - www
+ register: server_patching_task
+
+- debug: var=server_patching_task
+
+- assert:
+ that:
+ - server_patching_task is success
+ - server_patching_task is changed
+
+- name: Patch server tags (Confirmation)
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+ tags:
+ - test
+ - www
+ register: server_patching_confirmation_task
+
+- debug: var=server_patching_confirmation_task
+
+- assert:
+ that:
+ - server_patching_confirmation_task is success
+ - server_patching_confirmation_task is not changed
+
+- name: Run it (Check mode)
+ check_mode: yes
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: running
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ tags:
+ - test
+ - www
+ register: server_run_check_task
+
+- debug: var=server_run_check_task
+
+- assert:
+ that:
+ - server_run_check_task is success
+ - server_run_check_task is changed
+
+- name: Run it
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: running
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+ tags:
+ - test
+ - www
+ register: server_run_task
+
+- debug: var=server_run_task
+
+- assert:
+ that:
+ - server_run_task is success
+ - server_run_task is changed
+
+- name: Run it
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: running
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+ tags:
+ - test
+ - www
+ register: server_run_confirmation_task
+
+- debug: var=server_run_confirmation_task
+
+- assert:
+ that:
+ - server_run_confirmation_task is success
+ - server_run_confirmation_task is not changed
+
+- name: Reboot it (Check mode)
+ check_mode: yes
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: restarted
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ tags:
+ - test
+ - www
+ register: server_reboot_check_task
+
+- debug: var=server_reboot_check_task
+
+- assert:
+ that:
+ - server_reboot_check_task is success
+ - server_reboot_check_task is changed
+
+- name: Reboot it
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: restarted
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+ tags:
+ - test
+ - www
+ register: server_reboot_task
+
+- debug: var=server_reboot_task
+
+- assert:
+ that:
+ - server_reboot_task is success
+ - server_reboot_task is changed
+
+- name: Stop it (Check mode)
+ check_mode: yes
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: stopped
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ tags:
+ - test
+ - www
+ register: server_stop_check_task
+
+- debug: var=server_stop_check_task
+
+- assert:
+ that:
+ - server_stop_check_task is success
+ - server_stop_check_task is changed
+
+- name: Stop it
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: stopped
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+ tags:
+ - test
+ - www
+ register: server_stop_task
+
+- debug: var=server_stop_task
+
+- assert:
+ that:
+ - server_stop_task is success
+ - server_stop_task is changed
+
+- name: Stop it (Confirmation)
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: stopped
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+ tags:
+ - test
+ - www
+ register: server_stop_confirmation_task
+
+- debug: var=server_stop_confirmation_task
+
+- assert:
+ that:
+ - server_stop_confirmation_task is success
+ - server_stop_confirmation_task is not changed
+
+- name: Destroy it (Check mode)
+ check_mode: yes
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: absent
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ tags:
+ - test
+ - www
+ register: server_destroy_check_task
+
+- debug: var=server_destroy_check_task
+
+- assert:
+ that:
+ - server_destroy_check_task is success
+ - server_destroy_check_task is changed
+
+- name: Destroy it
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: absent
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+ tags:
+ - test
+ - www
+ register: server_destroy_task
+
+- debug: var=server_destroy_task
+
+- assert:
+ that:
+ - server_destroy_task is success
+ - server_destroy_task is changed
+
+- name: Destroy it (Confirmation)
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: absent
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+ tags:
+ - test
+ - www
+ register: server_destroy_confirmation_task
+
+- debug: var=server_destroy_confirmation_task
+
+- assert:
+ that:
+ - server_destroy_confirmation_task is success
+ - server_destroy_confirmation_task is not changed
+
+- name: Testing for unauthorized organization
+ ignore_errors: yes
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ image: '{{ scaleway_image_id }}'
+ organization: this-organization-does-not-exists
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ register: unauthorized_organization_task
+
+- debug: var=unauthorized_organization_task
+
+- assert:
+ that:
+ - unauthorized_organization_task is not success
+ - unauthorized_organization_task is not changed
+
+- name: Testing for unexisting image
+ ignore_errors: yes
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ image: this-image-does-not-exists
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ register: unexisting_image_check
+
+- debug: var=unexisting_image_check
+
+- assert:
+ that:
+ - unexisting_image_check is not success
+ - unexisting_image_check is not changed
+ - unexisting_image_check.msg == "Error in getting image this-image-does-not-exists on https://cp-{{scaleway_region}}.scaleway.com"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_database_backup/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_database_backup/aliases
new file mode 100644
index 00000000..03de00bc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_database_backup/aliases
@@ -0,0 +1,2 @@
+cloud/scaleway
+unsupported \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_database_backup/defaults/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_database_backup/defaults/main.yml
new file mode 100644
index 00000000..0e2cdcb9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_database_backup/defaults/main.yml
@@ -0,0 +1,5 @@
+---
+scaleway_name: scaleway_database_backup_test
+scaleway_region: fr-par
+scaleway_database_name: scaleway_database_test
+scaleway_instance_id: \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_database_backup/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_database_backup/tasks/main.yml
new file mode 100644
index 00000000..11d06f8f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_database_backup/tasks/main.yml
@@ -0,0 +1,233 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Create a backup (Check)
+ check_mode: yes
+ scaleway_database_backup:
+ name: '{{ scaleway_name }}'
+ state: present
+ region: '{{ scaleway_region }}'
+ database_name: '{{ scaleway_database_name }}'
+ instance_id: '{{ scaleway_instance_id }}'
+
+ register: backup_creation_check_task
+
+- debug: var=backup_creation_check_task
+
+- assert:
+ that:
+ - backup_creation_check_task is success
+ - backup_creation_check_task is changed
+
+- name: Create a backup
+ scaleway_database_backup:
+ name: '{{ scaleway_name }}'
+ state: present
+ region: '{{ scaleway_region }}'
+ database_name: '{{ scaleway_database_name }}'
+ instance_id: '{{ scaleway_instance_id }}'
+ wait: true
+
+ register: backup_creation_task
+
+- debug: var=backup_creation_task
+
+- assert:
+ that:
+ - backup_creation_task is success
+ - backup_creation_task is changed
+
+- name: Create a backup (Confirmation)
+ scaleway_database_backup:
+ name: '{{ scaleway_name }}'
+ state: present
+ region: '{{ scaleway_region }}'
+ database_name: '{{ scaleway_database_name }}'
+ instance_id: '{{ scaleway_instance_id }}'
+ id: '{{ backup_creation_task.metadata.id }}'
+
+ register: backup_creation_confirmation_task
+
+- debug: var=backup_creation_confirmation_task
+
+- assert:
+ that:
+ - backup_creation_confirmation_task is success
+ - backup_creation_confirmation_task is not changed
+
+- name: Patch backup name (Check)
+ check_mode: yes
+ scaleway_database_backup:
+ name: '{{ scaleway_name }}-changed'
+ state: present
+ region: '{{ scaleway_region }}'
+ database_name: '{{ scaleway_database_name }}'
+ instance_id: '{{ scaleway_instance_id }}'
+ id: '{{ backup_creation_task.metadata.id }}'
+ register: backup_patching_check_task
+
+- debug: var=backup_patching_check_task
+
+- assert:
+ that:
+ - backup_patching_check_task is success
+ - backup_patching_check_task is changed
+
+- name: Patch backup name
+ scaleway_database_backup:
+ name: '{{ scaleway_name }}-changed'
+ state: present
+ region: '{{ scaleway_region }}'
+ database_name: '{{ scaleway_database_name }}'
+ instance_id: '{{ scaleway_instance_id }}'
+ id: '{{ backup_creation_task.metadata.id }}'
+ register: backup_patching_task
+
+- debug: var=backup_patching_task
+
+- assert:
+ that:
+ - backup_patching_task is success
+ - backup_patching_task is changed
+
+- name: Patch backup name (Confirmation)
+ scaleway_database_backup:
+ name: '{{ scaleway_name }}-changed'
+ state: present
+ region: '{{ scaleway_region }}'
+ database_name: '{{ scaleway_database_name }}'
+ instance_id: '{{ scaleway_instance_id }}'
+ id: '{{ backup_creation_task.metadata.id }}'
+ register: backup_patching_confirmation_task
+
+- debug: var=backup_patching_confirmation_task
+
+- assert:
+ that:
+ - backup_patching_confirmation_task is success
+ - backup_patching_confirmation_task is not changed
+
+- name: Export backup (Check)
+ check_mode: yes
+ scaleway_database_backup:
+ id: '{{ backup_creation_task.metadata.id }}'
+ state: exported
+ region: '{{ scaleway_region }}'
+ register: backup_export_check_task
+
+- debug: var=backup_export_check_task
+
+- assert:
+ that:
+ - backup_export_check_task is success
+ - backup_export_check_task is changed
+
+- name: Export backup
+ scaleway_database_backup:
+ id: '{{ backup_creation_task.metadata.id }}'
+ state: exported
+ region: '{{ scaleway_region }}'
+ wait: true
+ register: backup_export_task
+
+- debug: var=backup_export_task
+
+- assert:
+ that:
+ - backup_export_task is success
+ - backup_export_task is changed
+ - backup_export_task.metadata.download_url != ""
+
+- name: Export backup (Confirmation)
+ scaleway_database_backup:
+ id: '{{ backup_creation_task.metadata.id }}'
+ state: exported
+ region: '{{ scaleway_region }}'
+ register: backup_export_confirmation_task
+
+- debug: var=backup_export_confirmation_task
+
+- assert:
+ that:
+ - backup_export_confirmation_task is success
+ - backup_export_confirmation_task is not changed
+ - backup_export_confirmation_task.metadata.download_url != ""
+
+- name: Restore backup (Check)
+ check_mode: yes
+ scaleway_database_backup:
+ id: '{{ backup_creation_task.metadata.id }}'
+ state: restored
+ region: '{{ scaleway_region }}'
+ database_name: '{{ scaleway_database_name }}'
+ instance_id: '{{ scaleway_instance_id }}'
+ register: backup_restore_check_task
+
+- debug: var=backup_restore_check_task
+
+- assert:
+ that:
+ - backup_restore_check_task is success
+ - backup_restore_check_task is changed
+
+- name: Restore backup
+ scaleway_database_backup:
+ id: '{{ backup_creation_task.metadata.id }}'
+ state: restored
+ region: '{{ scaleway_region }}'
+ database_name: '{{ scaleway_database_name }}'
+ instance_id: '{{ scaleway_instance_id }}'
+ wait: true
+ register: backup_restore_task
+
+- debug: var=backup_restore_task
+
+- assert:
+ that:
+ - backup_restore_task is success
+ - backup_restore_task is changed
+
+- name: Delete backup (Check)
+ check_mode: yes
+ scaleway_database_backup:
+ id: '{{ backup_creation_task.metadata.id }}'
+ state: absent
+ region: '{{ scaleway_region }}'
+ register: backup_delete_check_task
+
+- debug: var=backup_delete_check_task
+
+- assert:
+ that:
+ - backup_delete_check_task is success
+ - backup_delete_check_task is changed
+
+- name: Delete backup
+ scaleway_database_backup:
+ id: '{{ backup_creation_task.metadata.id }}'
+ state: absent
+ region: '{{ scaleway_region }}'
+ register: backup_delete_task
+
+- debug: var=backup_delete_task
+
+- assert:
+ that:
+ - backup_delete_task is success
+ - backup_delete_task is changed
+
+- name: Delete backup (Confirmation)
+ scaleway_database_backup:
+ id: '{{ backup_creation_task.metadata.id }}'
+ state: absent
+ region: '{{ scaleway_region }}'
+ register: backup_delete_confirmation_task
+
+- debug: var=backup_delete_confirmation_task
+
+- assert:
+ that:
+ - backup_delete_confirmation_task is success
+ - backup_delete_confirmation_task is not changed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_image_info/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_image_info/aliases
new file mode 100644
index 00000000..f24a42a8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_image_info/aliases
@@ -0,0 +1,2 @@
+cloud/scaleway
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_image_info/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_image_info/tasks/main.yml
new file mode 100644
index 00000000..20513771
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_image_info/tasks/main.yml
@@ -0,0 +1,32 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Get image informations and register it in a variable
+ scaleway_image_info:
+ region: par1
+ register: images
+
+- name: Display images variable
+ debug:
+ var: images
+
+- name: Ensure retrieval of images info is success
+ assert:
+ that:
+ - images is success
+
+- name: Get image informations from ams1 and register it in a variable
+ scaleway_image_info:
+ region: ams1
+ register: images_ams1
+
+- name: Display images variable from ams1
+ debug:
+ var: images_ams1
+
+- name: Ensure retrieval of images info is success
+ assert:
+ that:
+ - images_ams1 is success
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_ip/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_ip/aliases
new file mode 100644
index 00000000..f24a42a8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_ip/aliases
@@ -0,0 +1,2 @@
+cloud/scaleway
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_ip/defaults/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_ip/defaults/main.yml
new file mode 100644
index 00000000..fe7aa93b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_ip/defaults/main.yml
@@ -0,0 +1,7 @@
+---
+scaleway_organization: '{{ scw_org }}'
+scaleway_region: ams1
+scaleway_image_id: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe
+scaleway_commerial_type: START1-S
+scaleway_server_name: scaleway_ip_test_server
+scaleway_reverse_name: scaleway.com
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_ip/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_ip/tasks/main.yml
new file mode 100644
index 00000000..dcb4fae7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_ip/tasks/main.yml
@@ -0,0 +1,444 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Create IP (Check)
+ check_mode: yes
+ scaleway_ip:
+ state: present
+ region: '{{ scaleway_region }}'
+ organization: '{{ scaleway_organization }}'
+ register: ip_creation_check_task
+
+- debug: var=ip_creation_check_task
+
+- name: ip_creation_check_task is success
+ assert:
+ that:
+ - ip_creation_check_task is success
+
+- name: ip_creation_check_task is changed
+ assert:
+ that:
+ - ip_creation_check_task is changed
+
+- name: Create IP
+ scaleway_ip:
+ state: present
+ region: '{{ scaleway_region }}'
+ organization: '{{ scaleway_organization }}'
+ register: ip_creation_task
+
+- debug: var=ip_creation_task
+
+- name: ip_creation_task is success
+ assert:
+ that:
+ - ip_creation_task is success
+
+- name: ip_creation_task is changed
+ assert:
+ that:
+ - ip_creation_task is changed
+
+- name: ip_creation_task.scaleway_ip.server is none
+ assert:
+ that:
+ - '{{ ip_creation_task.scaleway_ip.server is none }}'
+
+- name: Create IP (Confirmation)
+ scaleway_ip:
+ id: '{{ ip_creation_task.scaleway_ip.id }}'
+ state: present
+ region: '{{ scaleway_region }}'
+ organization: '{{ scaleway_organization }}'
+ register: ip_creation_confirmation_task
+
+- debug: var=ip_creation_confirmation_task
+
+- name: ip_creation_confirmation_task is success
+ assert:
+ that:
+ - ip_creation_confirmation_task is success
+
+- name: ip_creation_confirmation_task is not changed
+ assert:
+ that:
+ - ip_creation_confirmation_task is not changed
+
+- name: ip_creation_confirmation_task.scaleway_ip.server is none
+ assert:
+ that:
+ - '{{ ip_creation_task.scaleway_ip.server is none }}'
+
+- name: Assign reverse to server (Check)
+ check_mode: yes
+ scaleway_ip:
+ state: present
+ id: '{{ ip_creation_task.scaleway_ip.id }}'
+ region: '{{ scaleway_region }}'
+ organization: '{{ scaleway_organization }}'
+ reverse: '{{ scaleway_reverse_name }}'
+ register: ip_reverse_assignation_check_task
+
+- debug: var=ip_reverse_assignation_check_task
+
+- name: ip_reverse_assignation_check_task is success
+ assert:
+ that:
+ - ip_reverse_assignation_check_task is success
+
+- name: ip_reverse_assignation_check_task is success
+ assert:
+ that:
+ - ip_reverse_assignation_check_task is success
+
+- name: Assign reverse to an IP
+ scaleway_ip:
+ state: present
+ id: '{{ ip_creation_task.scaleway_ip.id }}'
+ region: '{{ scaleway_region }}'
+ organization: '{{ scaleway_organization }}'
+ reverse: '{{ scaleway_reverse_name }}'
+ register: ip_reverse_assignation_task
+
+- debug: var=ip_reverse_assignation_task
+
+- name: ip_reverse_assignation_task is success
+ assert:
+ that:
+ - ip_reverse_assignation_task is success
+
+- name: ip_reverse_assignation_task is changed
+ assert:
+ that:
+ - ip_reverse_assignation_task is changed
+
+- name: Assign reverse to an IP (Confirmation)
+ scaleway_ip:
+ state: present
+ id: '{{ ip_creation_task.scaleway_ip.id }}'
+ region: '{{ scaleway_region }}'
+ organization: '{{ scaleway_organization }}'
+ reverse: '{{ scaleway_reverse_name }}'
+ register: ip_reverse_assignation_confirmation_task
+
+- debug: var=ip_reverse_assignation_confirmation_task
+
+- name: ip_reverse_assignation_confirmation_task is success
+ assert:
+ that:
+ - ip_reverse_assignation_confirmation_task is success
+
+- name: ip_reverse_assignation_confirmation_task is not changed
+ assert:
+ that:
+ - ip_reverse_assignation_confirmation_task is not changed
+
+- name: Create a server
+ scaleway_compute:
+ state: present
+ name: '{{ scaleway_server_name }}'
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ dynamic_ip_required: false
+ wait: true
+
+ register: server_creation_task
+
+- debug: var=server_creation_task
+
+- name: server_creation_task is success
+ assert:
+ that:
+ - server_creation_task is success
+
+- name: Assign IP to server (Check)
+ check_mode: yes
+ scaleway_ip:
+ state: present
+ id: '{{ ip_creation_task.scaleway_ip.id }}'
+ region: '{{ scaleway_region }}'
+ organization: '{{ scaleway_organization }}'
+ server: '{{ server_creation_task.msg.id }}'
+ reverse: '{{ scaleway_reverse_name }}'
+ register: ip_assignation_check_task
+
+- debug: var=ip_assignation_check_task
+
+- name: ip_assignation_check_task is success
+ assert:
+ that:
+ - ip_assignation_check_task is success
+
+- name: ip_assignation_check_task is success
+ assert:
+ that:
+ - ip_assignation_check_task is success
+
+- name: Assign IP to server
+ scaleway_ip:
+ state: present
+ id: '{{ ip_creation_task.scaleway_ip.id }}'
+ region: '{{ scaleway_region }}'
+ organization: '{{ scaleway_organization }}'
+ server: '{{ server_creation_task.msg.id }}'
+ reverse: '{{ scaleway_reverse_name }}'
+ register: ip_assignation_task
+
+- debug: var=ip_assignation_task
+
+- name: ip_assignation_task is success
+ assert:
+ that:
+ - ip_assignation_task is success
+
+- name: ip_assignation_task is changed
+ assert:
+ that:
+ - ip_assignation_task is changed
+
+- name: Assign IP to server (Confirmation)
+ scaleway_ip:
+ state: present
+ id: '{{ ip_creation_task.scaleway_ip.id }}'
+ region: '{{ scaleway_region }}'
+ organization: '{{ scaleway_organization }}'
+ server: '{{ server_creation_task.msg.id }}'
+ reverse: '{{ scaleway_reverse_name }}'
+ register: ip_assignation_confirmation_task
+
+- debug: var=ip_assignation_confirmation_task
+
+- name: ip_assignation_confirmation_task is success
+ assert:
+ that:
+ - ip_assignation_confirmation_task is success
+
+- name: ip_assignation_confirmation_task is not changed
+ assert:
+ that:
+ - ip_assignation_confirmation_task is not changed
+
+- name: Unassign IP to server (Check)
+ check_mode: yes
+ scaleway_ip:
+ state: present
+ id: '{{ ip_creation_task.scaleway_ip.id }}'
+ region: '{{ scaleway_region }}'
+ organization: '{{ scaleway_organization }}'
+ reverse: '{{ scaleway_reverse_name }}'
+ register: ip_unassignation_check_task
+
+- debug: var=ip_unassignation_check_task
+
+- name: ip_unassignation_check_task is success
+ assert:
+ that:
+ - ip_unassignation_check_task is success
+
+- name: ip_unassignation_check_task is changed
+ assert:
+ that:
+ - ip_unassignation_check_task is changed
+
+- name: Unassign IP to server
+ scaleway_ip:
+ state: present
+ id: '{{ ip_creation_task.scaleway_ip.id }}'
+ region: '{{ scaleway_region }}'
+ organization: '{{ scaleway_organization }}'
+ reverse: '{{ scaleway_reverse_name }}'
+ register: ip_unassignation_task
+
+- debug: var=ip_unassignation_task
+
+- name: ip_unassignation_task is success
+ assert:
+ that:
+ - ip_unassignation_task is success
+
+- name: ip_unassignation_task is changed
+ assert:
+ that:
+ - ip_unassignation_task is changed
+
+- name: ip_unassignation_task.scaleway_ip.server is none
+ assert:
+ that:
+ - '{{ ip_unassignation_task.scaleway_ip.server is none }}'
+
+- name: Unassign IP to server (Confirmation)
+ scaleway_ip:
+ state: present
+ id: '{{ ip_creation_task.scaleway_ip.id }}'
+ region: '{{ scaleway_region }}'
+ organization: '{{ scaleway_organization }}'
+ reverse: '{{ scaleway_reverse_name }}'
+ register: ip_unassignation_confirmation_task
+
+- debug: var=ip_unassignation_confirmation_task
+
+- name: ip_unassignation_confirmation_task is success
+ assert:
+ that:
+ - ip_unassignation_confirmation_task is success
+
+- name: ip_unassignation_confirmation_task is not changed
+ assert:
+ that:
+ - ip_unassignation_confirmation_task is not changed
+
+- name: ip_unassignation_confirmation_task.scaleway_ip.server is none
+ assert:
+ that:
+ - '{{ ip_unassignation_task.scaleway_ip.server is none }}'
+
+- name: Unassign reverse to IP (Check)
+ check_mode: yes
+ scaleway_ip:
+ state: present
+ id: '{{ ip_creation_task.scaleway_ip.id }}'
+ region: '{{ scaleway_region }}'
+ organization: '{{ scaleway_organization }}'
+ register: ip_reverse_unassignation_check_task
+
+- debug: var=ip_reverse_unassignation_check_task
+
+- name: ip_reverse_unassignation_check_task is success
+ assert:
+ that:
+ - ip_reverse_unassignation_check_task is success
+
+- name: ip_reverse_unassignation_check_task is changed
+ assert:
+ that:
+ - ip_reverse_unassignation_check_task is changed
+
+- name: Unassign reverse to an IP
+ scaleway_ip:
+ state: present
+ id: '{{ ip_creation_task.scaleway_ip.id }}'
+ region: '{{ scaleway_region }}'
+ organization: '{{ scaleway_organization }}'
+ register: ip_reverse_unassignation_task
+
+- debug: var=ip_reverse_unassignation_task
+
+- name: ip_reverse_unassignation_task is success
+ assert:
+ that:
+ - ip_reverse_unassignation_task is success
+
+- name: ip_reverse_unassignation_task is changed
+ assert:
+ that:
+ - ip_reverse_unassignation_task is changed
+
+- name: ip_reverse_unassignation_task.scaleway_ip.reverse is none
+ assert:
+ that:
+ - '{{ ip_reverse_unassignation_task.scaleway_ip.reverse is none }}'
+
+- name: Unassign reverse to an IP (Confirmation)
+ scaleway_ip:
+ state: present
+ id: '{{ ip_creation_task.scaleway_ip.id }}'
+ region: '{{ scaleway_region }}'
+ organization: '{{ scaleway_organization }}'
+ register: ip_reverse_unassignation_confirmation_task
+
+- debug: var=ip_reverse_unassignation_confirmation_task
+
+- name: ip_reverse_unassignation_confirmation_task is success
+ assert:
+ that:
+ - ip_reverse_unassignation_confirmation_task is success
+
+- name: ip_reverse_unassignation_confirmation_task is not changed
+ assert:
+ that:
+ - ip_reverse_unassignation_confirmation_task is not changed
+
+- name: ip_reverse_unassignation_confirmation_task.scaleway_ip.server is none
+ assert:
+ that:
+ - '{{ ip_reverse_unassignation_confirmation_task.scaleway_ip.reverse is none }}'
+
+- name: Destroy a server
+ scaleway_compute:
+ name: '{{ scaleway_server_name }}'
+ state: absent
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+ register: server_destroy_task
+
+- debug: var=server_destroy_task
+
+- name: server_destroy_task is success
+ assert:
+ that:
+ - server_destroy_task is success
+
+- name: server_destroy_task is changed
+ assert:
+ that:
+ - server_destroy_task is changed
+
+- name: Delete IP (Check)
+ check_mode: yes
+ scaleway_ip:
+ state: absent
+ region: '{{ scaleway_region }}'
+ id: '{{ ip_creation_task.scaleway_ip.id }}'
+ register: ip_deletion_check_task
+
+- name: ip_deletion_check_task is success
+ assert:
+ that:
+ - ip_deletion_check_task is success
+
+- name: ip_deletion_check_task is changed
+ assert:
+ that:
+ - ip_deletion_check_task is changed
+
+- name: Delete IP
+ scaleway_ip:
+ state: absent
+ region: '{{ scaleway_region }}'
+ id: '{{ ip_creation_task.scaleway_ip.id }}'
+ register: ip_deletion_task
+
+- name: ip_deletion_task is success
+ assert:
+ that:
+ - ip_deletion_task is success
+
+- name: ip_deletion_task is changed
+ assert:
+ that:
+ - ip_deletion_task is changed
+
+- name: Delete IP (Confirmation)
+ scaleway_ip:
+ state: absent
+ region: '{{ scaleway_region }}'
+ id: '{{ ip_creation_task.scaleway_ip.id }}'
+ register: ip_deletion_confirmation_task
+
+- name: ip_deletion_confirmation_task is success
+ assert:
+ that:
+ - ip_deletion_confirmation_task is success
+
+- name: ip_deletion_confirmation_task is not changed
+ assert:
+ that:
+ - ip_deletion_confirmation_task is not changed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_ip_info/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_ip_info/aliases
new file mode 100644
index 00000000..f24a42a8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_ip_info/aliases
@@ -0,0 +1,2 @@
+cloud/scaleway
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_ip_info/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_ip_info/tasks/main.yml
new file mode 100644
index 00000000..51918979
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_ip_info/tasks/main.yml
@@ -0,0 +1,32 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Get ip informations and register it in a variable
+ scaleway_ip_info:
+ region: par1
+ register: ips
+
+- name: Display ips variable
+ debug:
+ var: ips
+
+- name: Ensure retrieval of ips info is success
+ assert:
+ that:
+ - ips is success
+
+- name: Get ip informations and register it in a variable
+ scaleway_ip_info:
+ region: ams1
+ register: ips_ams1
+
+- name: Display ips variable
+ debug:
+ var: ips_ams1
+
+- name: Ensure retrieval of ips info is success
+ assert:
+ that:
+ - ips_ams1 is success
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_lb/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_lb/aliases
new file mode 100644
index 00000000..f24a42a8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_lb/aliases
@@ -0,0 +1,2 @@
+cloud/scaleway
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_lb/defaults/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_lb/defaults/main.yml
new file mode 100644
index 00000000..48d56e10
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_lb/defaults/main.yml
@@ -0,0 +1,8 @@
+---
+scaleway_region: fr-par
+name: lb_ansible_test
+description: Load-balancer used for testing scaleway_lb ansible module
+updated_description: Load-balancer used for testing scaleway_lb ansible module (Updated description)
+tags:
+ - first_tag
+ - second_tag
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_lb/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_lb/tasks/main.yml
new file mode 100644
index 00000000..7da0bcbb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_lb/tasks/main.yml
@@ -0,0 +1,219 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Create a load-balancer (Check)
+ check_mode: yes
+ scaleway_lb:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ organization_id: '{{ scw_org }}'
+ description: '{{ description }}'
+ tags: '{{ tags }}'
+ register: lb_creation_check_task
+
+- debug: var=lb_creation_check_task
+
+- name: lb_creation_check_task is success
+ assert:
+ that:
+ - lb_creation_check_task is success
+
+- name: lb_creation_check_task is changed
+ assert:
+ that:
+ - lb_creation_check_task is changed
+
+- name: Create load-balancer
+ scaleway_lb:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ organization_id: '{{ scw_org }}'
+ description: '{{ description }}'
+ tags: '{{ tags }}'
+ wait: true
+ register: lb_creation_task
+
+- debug: var=lb_creation_task
+
+- name: lb_creation_task is success
+ assert:
+ that:
+ - lb_creation_task is success
+
+- name: lb_creation_task is changed
+ assert:
+ that:
+ - lb_creation_task is changed
+
+- name: Assert that the load-balancer is in a valid state
+ assert:
+ that:
+ - lb_creation_task.scaleway_lb.status == "ready"
+
+- name: Create load-balancer (Confirmation)
+ scaleway_lb:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ organization_id: '{{ scw_org }}'
+ tags: '{{ tags }}'
+ description: '{{ description }}'
+ register: lb_creation_confirmation_task
+
+- debug: var=lb_creation_confirmation_task
+
+- name: lb_creation_confirmation_task is success
+ assert:
+ that:
+ - lb_creation_confirmation_task is success
+
+- name: lb_creation_confirmation_task is not changed
+ assert:
+ that:
+ - lb_creation_confirmation_task is not changed
+
+- name: Assert that the load-balancer is in a valid state
+ assert:
+ that:
+ - lb_creation_confirmation_task.scaleway_lb.status == "ready"
+
+- name: Update load-balancer (Check)
+ check_mode: yes
+ scaleway_lb:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ organization_id: '{{ scw_org }}'
+ tags: '{{ tags }}'
+ description: '{{ updated_description }}'
+ register: lb_update_check_task
+
+- debug: var=lb_update_check_task
+
+- name: lb_update_check_task is success
+ assert:
+ that:
+ - lb_update_check_task is success
+
+- name: lb_update_check_task is changed
+ assert:
+ that:
+ - lb_update_check_task is changed
+
+- name: Update load-balancer
+ scaleway_lb:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ tags: '{{ tags }}'
+ organization_id: '{{ scw_org }}'
+ description: '{{ updated_description }}'
+ wait: true
+ register: lb_update_task
+
+- debug: var=lb_update_task
+
+- name: lb_update_task is success
+ assert:
+ that:
+ - lb_update_task is success
+
+- name: lb_update_task is changed
+ assert:
+ that:
+ - lb_update_task is changed
+
+- name: Assert that the load-balancer is in a valid state
+ assert:
+ that:
+ - lb_update_task.scaleway_lb.status == "ready"
+
+- name: Update load-balancer (Confirmation)
+ scaleway_lb:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ tags: '{{ tags }}'
+ organization_id: '{{ scw_org }}'
+ description: '{{ updated_description }}'
+ register: lb_update_confirmation_task
+
+- debug: var=lb_update_confirmation_task
+
+- name: lb_update_confirmation_task is success
+ assert:
+ that:
+ - lb_update_confirmation_task is success
+
+- name: lb_update_confirmation_task is not changed
+ assert:
+ that:
+ - lb_update_confirmation_task is not changed
+
+- name: Assert that the load-balancer is in a valid state
+ assert:
+ that:
+ - lb_update_confirmation_task.scaleway_lb.status == "ready"
+
+- name: Delete load-balancer (Check)
+ check_mode: yes
+ scaleway_lb:
+ state: absent
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ description: '{{ description }}'
+ organization_id: '{{ scw_org }}'
+ register: lb_deletion_check_task
+
+- name: lb_deletion_check_task is success
+ assert:
+ that:
+ - lb_deletion_check_task is success
+
+- name: lb_deletion_check_task is changed
+ assert:
+ that:
+ - lb_deletion_check_task is changed
+
+- name: Delete load-balancer
+ scaleway_lb:
+ state: absent
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ description: '{{ description }}'
+ organization_id: '{{ scw_org }}'
+ wait: true
+ register: lb_deletion_task
+
+- name: lb_deletion_task is success
+ assert:
+ that:
+ - lb_deletion_task is success
+
+- name: lb_deletion_task is changed
+ assert:
+ that:
+ - lb_deletion_task is changed
+
+- name: Delete load-balancer (Confirmation)
+ scaleway_lb:
+ state: absent
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ description: '{{ description }}'
+ organization_id: '{{ scw_org }}'
+ register: lb_deletion_confirmation_task
+
+- name: lb_deletion_confirmation_task is success
+ assert:
+ that:
+ - lb_deletion_confirmation_task is success
+
+- name: lb_deletion_confirmation_task is not changed
+ assert:
+ that:
+ - lb_deletion_confirmation_task is not changed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_organization_info/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_organization_info/aliases
new file mode 100644
index 00000000..f24a42a8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_organization_info/aliases
@@ -0,0 +1,2 @@
+cloud/scaleway
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_organization_info/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_organization_info/tasks/main.yml
new file mode 100644
index 00000000..39983609
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_organization_info/tasks/main.yml
@@ -0,0 +1,17 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Get organization informations and register it in a variable
+ scaleway_organization_info:
+ register: organizations
+
+- name: Display organizations variable
+ debug:
+ var: organizations
+
+- name: Ensure retrieval of organizations info is success
+ assert:
+ that:
+ - organizations is success
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_security_group/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_security_group/aliases
new file mode 100644
index 00000000..f24a42a8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_security_group/aliases
@@ -0,0 +1,2 @@
+cloud/scaleway
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_security_group/defaults/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_security_group/defaults/main.yml
new file mode 100644
index 00000000..13bbef06
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_security_group/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+scaleway_organization: '{{ scw_org }}'
+scaleway_region: ams1
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_security_group/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_security_group/tasks/main.yml
new file mode 100644
index 00000000..1c4e409b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_security_group/tasks/main.yml
@@ -0,0 +1,134 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Create security group check
+ check_mode: yes
+ scaleway_security_group:
+ state: present
+ region: '{{ scaleway_region }}'
+ name: security_group
+ description: 'my security group description'
+ organization: '{{ scaleway_organization }}'
+ stateful: false
+ inbound_default_policy: accept
+ outbound_default_policy: accept
+ organization_default: false
+ register: security_group_creation
+
+- debug: var=security_group_creation
+
+- name: Ensure security groups check facts is success
+ assert:
+ that:
+ - security_group_creation is success
+ - security_group_creation is changed
+
+- block:
+ - name: Create security group
+ scaleway_security_group:
+ state: present
+ region: '{{ scaleway_region }}'
+ name: security_group
+ description: 'my security group description'
+ organization: '{{ scaleway_organization }}'
+ stateful: false
+ inbound_default_policy: accept
+ outbound_default_policy: accept
+ organization_default: false
+ register: security_group_creation
+
+ - debug: var=security_group_creation
+
+ - name: Ensure security groups facts is success
+ assert:
+ that:
+ - security_group_creation is success
+ - security_group_creation is changed
+
+ - name: Create security group duplicate
+ scaleway_security_group:
+ state: present
+ region: '{{ scaleway_region }}'
+ name: security_group
+ description: 'my security group description'
+ organization: '{{ scaleway_organization }}'
+ stateful: false
+ inbound_default_policy: accept
+ outbound_default_policy: accept
+ organization_default: false
+ register: security_group_creation
+
+ - debug: var=security_group_creation
+
+ - name: Ensure security groups duplicate facts is success
+ assert:
+ that:
+ - security_group_creation is success
+ - security_group_creation is not changed
+
+ - name: Delete security group check
+ check_mode: yes
+ scaleway_security_group:
+ state: absent
+ region: '{{ scaleway_region }}'
+ name: security_group
+ description: 'my security group description'
+ organization: '{{ scaleway_organization }}'
+ stateful: false
+ inbound_default_policy: accept
+ outbound_default_policy: accept
+ organization_default: false
+ register: security_group_deletion
+
+ - debug: var=security_group_deletion
+
+ - name: Ensure security groups delete check facts is success
+ assert:
+ that:
+ - security_group_deletion is success
+ - security_group_deletion is changed
+
+ always:
+ - name: Delete security group
+ scaleway_security_group:
+ state: absent
+ region: '{{ scaleway_region }}'
+ name: security_group
+ description: 'my security group description'
+ organization: '{{ scaleway_organization }}'
+ stateful: false
+ inbound_default_policy: accept
+ outbound_default_policy: accept
+ organization_default: false
+ register: security_group_deletion
+
+ - debug: var=security_group_deletion
+
+ - name: Ensure security groups delete facts is success
+ assert:
+ that:
+ - security_group_deletion is success
+ - security_group_deletion is changed
+
+- name: Delete security group duplicate
+ scaleway_security_group:
+ state: absent
+ region: '{{ scaleway_region }}'
+ name: security_group
+ description: 'my security group description'
+ organization: '{{ scaleway_organization }}'
+ stateful: false
+ inbound_default_policy: accept
+ outbound_default_policy: accept
+ organization_default: false
+ register: security_group_deletion
+
+- debug: var=security_group_deletion
+
+- name: Ensure security groups delete duplicate facts is success
+ assert:
+ that:
+ - security_group_deletion is success
+ - security_group_deletion is not changed \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_security_group_info/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_security_group_info/aliases
new file mode 100644
index 00000000..f24a42a8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_security_group_info/aliases
@@ -0,0 +1,2 @@
+cloud/scaleway
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_security_group_info/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_security_group_info/tasks/main.yml
new file mode 100644
index 00000000..1c0c0afc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_security_group_info/tasks/main.yml
@@ -0,0 +1,32 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Get security group informations and register it in a variable
+ scaleway_security_group_info:
+ region: par1
+ register: security_groups
+
+- name: Display security_groups variable
+ debug:
+ var: security_groups
+
+- name: Ensure retrieval of security groups info is success
+ assert:
+ that:
+ - security_groups is success
+
+- name: Get security group informations and register it in a variable (AMS1)
+ scaleway_security_group_info:
+ region: ams1
+ register: ams1_security_groups
+
+- name: Display security_groups variable (AMS1)
+ debug:
+ var: ams1_security_groups
+
+- name: Ensure retrieval of security groups info is success (AMS1)
+ assert:
+ that:
+ - ams1_security_groups is success
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_security_group_rule/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_security_group_rule/aliases
new file mode 100644
index 00000000..f24a42a8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_security_group_rule/aliases
@@ -0,0 +1,2 @@
+cloud/scaleway
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_security_group_rule/defaults/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_security_group_rule/defaults/main.yml
new file mode 100644
index 00000000..9c56c4db
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_security_group_rule/defaults/main.yml
@@ -0,0 +1,8 @@
+---
+scaleway_organization: '{{ scw_org }}'
+scaleway_region: par1
+protocol: "TCP"
+port: 80
+ip_range: "0.0.0.0/0"
+direction: "inbound"
+action: "accept"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_security_group_rule/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_security_group_rule/tasks/main.yml
new file mode 100644
index 00000000..c6f6396b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_security_group_rule/tasks/main.yml
@@ -0,0 +1,247 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Create a scaleway security_group
+ scaleway_security_group:
+ state: present
+ region: '{{ scaleway_region }}'
+ name: test_compute
+ description: test_compute
+ organization: '{{ scaleway_organization }}'
+ stateful: true
+ inbound_default_policy: accept
+ outbound_default_policy: accept
+ organization_default: false
+ register: security_group
+
+- debug: var=security_group
+
+- name: Create security_group_rule check
+ check_mode: true
+ scaleway_security_group_rule:
+ state: present
+ region: '{{ scaleway_region }}'
+ protocol: '{{ protocol }}'
+ port: '{{ port }}'
+ ip_range: '{{ ip_range }}'
+ direction: '{{ direction }}'
+ action: '{{ action }}'
+ security_group: '{{ security_group.scaleway_security_group.id }}'
+ register: security_group_rule_creation_task
+
+- debug: var=security_group_rule_creation_task
+
+- assert:
+ that:
+ - security_group_rule_creation_task is success
+ - security_group_rule_creation_task is changed
+
+- block:
+ - name: Create security_group_rule check
+ scaleway_security_group_rule:
+ state: present
+ region: '{{ scaleway_region }}'
+ protocol: '{{ protocol }}'
+ port: '{{ port }}'
+ ip_range: '{{ ip_range }}'
+ direction: '{{ direction }}'
+ action: '{{ action }}'
+ security_group: '{{ security_group.scaleway_security_group.id }}'
+ register: security_group_rule_creation_task
+
+ - debug: var=security_group_rule_creation_task
+
+ - assert:
+ that:
+ - security_group_rule_creation_task is success
+ - security_group_rule_creation_task is changed
+
+ - name: Create security_group_rule duplicate
+ scaleway_security_group_rule:
+ state: present
+ region: '{{ scaleway_region }}'
+ protocol: '{{ protocol }}'
+ port: '{{ port }}'
+ ip_range: '{{ ip_range }}'
+ direction: '{{ direction }}'
+ action: '{{ action }}'
+ security_group: '{{ security_group.scaleway_security_group.id }}'
+ register: security_group_rule_creation_task
+
+ - debug: var=security_group_rule_creation_task
+
+ - assert:
+ that:
+ - security_group_rule_creation_task is success
+ - security_group_rule_creation_task is not changed
+
+ - name: Delete security_group_rule check
+ check_mode: true
+ scaleway_security_group_rule:
+ state: absent
+ region: '{{ scaleway_region }}'
+ protocol: '{{ protocol }}'
+ port: '{{ port }}'
+ ip_range: '{{ ip_range }}'
+ direction: '{{ direction }}'
+ action: '{{ action }}'
+ security_group: '{{ security_group.scaleway_security_group.id }}'
+ register: security_group_rule_deletion_task
+
+ - debug: var=security_group_rule_deletion_task
+
+ - assert:
+ that:
+ - security_group_rule_deletion_task is success
+ - security_group_rule_deletion_task is changed
+
+ always:
+ - name: Delete security_group_rule check
+ scaleway_security_group_rule:
+ state: absent
+ region: '{{ scaleway_region }}'
+ protocol: '{{ protocol }}'
+ port: '{{ port }}'
+ ip_range: '{{ ip_range }}'
+ direction: '{{ direction }}'
+ action: '{{ action }}'
+ security_group: '{{ security_group.scaleway_security_group.id }}'
+ register: security_group_rule_deletion_task
+
+ - debug: var=security_group_rule_deletion_task
+
+ - assert:
+ that:
+ - security_group_rule_deletion_task is success
+ - security_group_rule_deletion_task is changed
+
+- name: Delete security_group_rule check
+ scaleway_security_group_rule:
+ state: absent
+ region: '{{ scaleway_region }}'
+ protocol: '{{ protocol }}'
+ port: '{{ port }}'
+ ip_range: '{{ ip_range }}'
+ direction: '{{ direction }}'
+ action: '{{ action }}'
+ security_group: '{{ security_group.scaleway_security_group.id }}'
+ register: security_group_rule_deletion_task
+
+- debug: var=security_group_rule_deletion_task
+
+- assert:
+ that:
+ - security_group_rule_deletion_task is success
+ - security_group_rule_deletion_task is not changed
+
+- block:
+ - name: Create security_group_rule with null check
+ scaleway_security_group_rule:
+ state: present
+ region: '{{ scaleway_region }}'
+ protocol: '{{ protocol }}'
+ port: null
+ ip_range: '{{ ip_range }}'
+ direction: '{{ direction }}'
+ action: '{{ action }}'
+ security_group: '{{ security_group.scaleway_security_group.id }}'
+ register: security_group_rule_creation_task
+
+ - debug: var=security_group_rule_creation_task
+
+ - assert:
+ that:
+ - security_group_rule_creation_task is success
+ - security_group_rule_creation_task is changed
+
+ - name: Create security_group_rule with null duplicate
+ scaleway_security_group_rule:
+ state: present
+ region: '{{ scaleway_region }}'
+ protocol: '{{ protocol }}'
+ port: null
+ ip_range: '{{ ip_range }}'
+ direction: '{{ direction }}'
+ action: '{{ action }}'
+ security_group: '{{ security_group.scaleway_security_group.id }}'
+ register: security_group_rule_creation_task
+
+ - debug: var=security_group_rule_creation_task
+
+ - assert:
+ that:
+ - security_group_rule_creation_task is success
+ - security_group_rule_creation_task is not changed
+
+ - name: Delete security_group_rule with null check
+ check_mode: true
+ scaleway_security_group_rule:
+ state: absent
+ region: '{{ scaleway_region }}'
+ protocol: '{{ protocol }}'
+ port: null
+ ip_range: '{{ ip_range }}'
+ direction: '{{ direction }}'
+ action: '{{ action }}'
+ security_group: '{{ security_group.scaleway_security_group.id }}'
+ register: security_group_rule_deletion_task
+
+ - debug: var=security_group_rule_deletion_task
+
+ - assert:
+ that:
+ - security_group_rule_deletion_task is success
+ - security_group_rule_deletion_task is changed
+
+ always:
+ - name: Delete security_group_rule with null check
+ scaleway_security_group_rule:
+ state: absent
+ region: '{{ scaleway_region }}'
+ protocol: '{{ protocol }}'
+ port: null
+ ip_range: '{{ ip_range }}'
+ direction: '{{ direction }}'
+ action: '{{ action }}'
+ security_group: '{{ security_group.scaleway_security_group.id }}'
+ register: security_group_rule_deletion_task
+
+ - debug: var=security_group_rule_deletion_task
+
+ - assert:
+ that:
+ - security_group_rule_deletion_task is success
+ - security_group_rule_deletion_task is changed
+
+- name: Delete security_group_rule with null check
+ scaleway_security_group_rule:
+ state: absent
+ region: '{{ scaleway_region }}'
+ protocol: '{{ protocol }}'
+ port: null
+ ip_range: '{{ ip_range }}'
+ direction: '{{ direction }}'
+ action: '{{ action }}'
+ security_group: '{{ security_group.scaleway_security_group.id }}'
+ register: security_group_rule_deletion_task
+
+- debug: var=security_group_rule_deletion_task
+
+- assert:
+ that:
+ - security_group_rule_deletion_task is success
+ - security_group_rule_deletion_task is not changed
+
+- name: Delete scaleway security_group
+ scaleway_security_group:
+ state: absent
+ region: '{{ scaleway_region }}'
+ name: test_compute
+ description: test_compute
+ organization: '{{ scaleway_organization }}'
+ stateful: true
+ inbound_default_policy: accept
+ outbound_default_policy: accept
+ organization_default: false \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_server_info/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_server_info/aliases
new file mode 100644
index 00000000..f24a42a8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_server_info/aliases
@@ -0,0 +1,2 @@
+cloud/scaleway
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_server_info/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_server_info/tasks/main.yml
new file mode 100644
index 00000000..a85a221f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_server_info/tasks/main.yml
@@ -0,0 +1,32 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Get server informations and register it in a variable
+ scaleway_server_info:
+ region: par1
+ register: servers
+
+- name: Display servers variable
+ debug:
+ var: servers
+
+- name: Ensure retrieval of servers info is success
+ assert:
+ that:
+ - servers is success
+
+- name: Get server informations and register it in a variable
+ scaleway_server_info:
+ region: ams1
+ register: ams1_servers
+
+- name: Display servers variable
+ debug:
+ var: ams1_servers
+
+- name: Ensure retrieval of servers info is success
+ assert:
+ that:
+ - ams1_servers is success
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_snapshot_info/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_snapshot_info/aliases
new file mode 100644
index 00000000..f24a42a8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_snapshot_info/aliases
@@ -0,0 +1,2 @@
+cloud/scaleway
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_snapshot_info/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_snapshot_info/tasks/main.yml
new file mode 100644
index 00000000..39807698
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_snapshot_info/tasks/main.yml
@@ -0,0 +1,32 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Get snapshot informations and register it in a variable
+ scaleway_snapshot_info:
+ region: par1
+ register: snapshots
+
+- name: Display snapshots variable
+ debug:
+ var: snapshots
+
+- name: Ensure retrieval of snapshots info is success
+ assert:
+ that:
+ - snapshots is success
+
+- name: Get snapshot informations and register it in a variable (AMS1)
+ scaleway_snapshot_info:
+ region: ams1
+ register: ams1_snapshots
+
+- name: Display snapshots variable (AMS1)
+ debug:
+ var: ams1_snapshots
+
+- name: Ensure retrieval of snapshots info is success (AMS1)
+ assert:
+ that:
+ - ams1_snapshots is success
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_sshkey/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_sshkey/aliases
new file mode 100644
index 00000000..f24a42a8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_sshkey/aliases
@@ -0,0 +1,2 @@
+cloud/scaleway
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_sshkey/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_sshkey/tasks/main.yml
new file mode 100644
index 00000000..b951fbad
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_sshkey/tasks/main.yml
@@ -0,0 +1,44 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- scaleway_sshkey:
+ ssh_pub_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDf29yyommeGyKSIgSmX0ISVXP+3x6RUY4JDGLoAMFh2efkfDaRVdsvkvnFuUywgP2RewrjTyLE8w0NpCBHVS5Fm1BAn3yvxOUtTMxTbsQcw6HQ8swJ02+1tewJYjHPwc4GrBqiDo3Nmlq354Us0zBOJg/bBzuEnVD5eJ3GO3gKaCSUYTVrYwO0U4eJE0D9OJeUP9J48kl4ULbCub976+mTHdBvlzRw0Tzfl2kxgdDwlks0l2NefY/uiTdz2oMt092bAY3wZHxjto/DXoChxvaf5s2k8Zb+J7CjimUYnzPlH+zA9F6ROjP5AUu6ZWPd0jOIBl1nDWWb2j/qfNLYM43l sieben@sieben-macbook.local"
+ state: present
+ check_mode: yes
+
+- scaleway_sshkey:
+ ssh_pub_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDf29yyommeGyKSIgSmX0ISVXP+3x6RUY4JDGLoAMFh2efkfDaRVdsvkvnFuUywgP2RewrjTyLE8w0NpCBHVS5Fm1BAn3yvxOUtTMxTbsQcw6HQ8swJ02+1tewJYjHPwc4GrBqiDo3Nmlq354Us0zBOJg/bBzuEnVD5eJ3GO3gKaCSUYTVrYwO0U4eJE0D9OJeUP9J48kl4ULbCub976+mTHdBvlzRw0Tzfl2kxgdDwlks0l2NefY/uiTdz2oMt092bAY3wZHxjto/DXoChxvaf5s2k8Zb+J7CjimUYnzPlH+zA9F6ROjP5AUu6ZWPd0jOIBl1nDWWb2j/qfNLYM43l sieben@sieben-macbook.local"
+ state: present
+ register: result1
+
+- scaleway_sshkey:
+ ssh_pub_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDf29yyommeGyKSIgSmX0ISVXP+3x6RUY4JDGLoAMFh2efkfDaRVdsvkvnFuUywgP2RewrjTyLE8w0NpCBHVS5Fm1BAn3yvxOUtTMxTbsQcw6HQ8swJ02+1tewJYjHPwc4GrBqiDo3Nmlq354Us0zBOJg/bBzuEnVD5eJ3GO3gKaCSUYTVrYwO0U4eJE0D9OJeUP9J48kl4ULbCub976+mTHdBvlzRw0Tzfl2kxgdDwlks0l2NefY/uiTdz2oMt092bAY3wZHxjto/DXoChxvaf5s2k8Zb+J7CjimUYnzPlH+zA9F6ROjP5AUu6ZWPd0jOIBl1nDWWb2j/qfNLYM43l sieben@sieben-macbook.local"
+ state: present
+ register: result2
+
+- assert:
+ that:
+ - result1 is success and result1 is changed
+ - result2 is success and result2 is not changed
+
+- scaleway_sshkey:
+ ssh_pub_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDf29yyommeGyKSIgSmX0ISVXP+3x6RUY4JDGLoAMFh2efkfDaRVdsvkvnFuUywgP2RewrjTyLE8w0NpCBHVS5Fm1BAn3yvxOUtTMxTbsQcw6HQ8swJ02+1tewJYjHPwc4GrBqiDo3Nmlq354Us0zBOJg/bBzuEnVD5eJ3GO3gKaCSUYTVrYwO0U4eJE0D9OJeUP9J48kl4ULbCub976+mTHdBvlzRw0Tzfl2kxgdDwlks0l2NefY/uiTdz2oMt092bAY3wZHxjto/DXoChxvaf5s2k8Zb+J7CjimUYnzPlH+zA9F6ROjP5AUu6ZWPd0jOIBl1nDWWb2j/qfNLYM43l sieben@sieben-macbook.local"
+ state: absent
+ check_mode: yes
+
+- scaleway_sshkey:
+ ssh_pub_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDf29yyommeGyKSIgSmX0ISVXP+3x6RUY4JDGLoAMFh2efkfDaRVdsvkvnFuUywgP2RewrjTyLE8w0NpCBHVS5Fm1BAn3yvxOUtTMxTbsQcw6HQ8swJ02+1tewJYjHPwc4GrBqiDo3Nmlq354Us0zBOJg/bBzuEnVD5eJ3GO3gKaCSUYTVrYwO0U4eJE0D9OJeUP9J48kl4ULbCub976+mTHdBvlzRw0Tzfl2kxgdDwlks0l2NefY/uiTdz2oMt092bAY3wZHxjto/DXoChxvaf5s2k8Zb+J7CjimUYnzPlH+zA9F6ROjP5AUu6ZWPd0jOIBl1nDWWb2j/qfNLYM43l sieben@sieben-macbook.local"
+ state: absent
+ register: result1
+
+- scaleway_sshkey:
+ ssh_pub_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDf29yyommeGyKSIgSmX0ISVXP+3x6RUY4JDGLoAMFh2efkfDaRVdsvkvnFuUywgP2RewrjTyLE8w0NpCBHVS5Fm1BAn3yvxOUtTMxTbsQcw6HQ8swJ02+1tewJYjHPwc4GrBqiDo3Nmlq354Us0zBOJg/bBzuEnVD5eJ3GO3gKaCSUYTVrYwO0U4eJE0D9OJeUP9J48kl4ULbCub976+mTHdBvlzRw0Tzfl2kxgdDwlks0l2NefY/uiTdz2oMt092bAY3wZHxjto/DXoChxvaf5s2k8Zb+J7CjimUYnzPlH+zA9F6ROjP5AUu6ZWPd0jOIBl1nDWWb2j/qfNLYM43l sieben@sieben-macbook.local"
+ state: absent
+ register: result2
+
+- assert:
+ that:
+ - result1 is success and result1 is changed
+ - result2 is success and result2 is not changed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_user_data/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_user_data/aliases
new file mode 100644
index 00000000..f24a42a8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_user_data/aliases
@@ -0,0 +1,2 @@
+cloud/scaleway
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_user_data/defaults/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_user_data/defaults/main.yml
new file mode 100644
index 00000000..69396fcc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_user_data/defaults/main.yml
@@ -0,0 +1,15 @@
+---
+
+cloud_init_script: '''
+#cloud-config
+
+# final_message
+# default: cloud-init boot finished at $TIMESTAMP. Up $UPTIME seconds
+# this message is written by cloud-final when the system is finished
+# its first boot
+final_message: "The system is finally up, after $UPTIME seconds"
+'''
+scaleway_image_id: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe
+scaleway_organization: '{{ scw_org }}'
+scaleway_region: ams1
+scaleway_commerial_type: START1-S
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_user_data/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_user_data/tasks/main.yml
new file mode 100644
index 00000000..df744709
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_user_data/tasks/main.yml
@@ -0,0 +1,82 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Create a server
+ scaleway_compute:
+ name: foobar
+ state: present
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+
+ register: server_creation_task
+
+- debug: var=server_creation_task
+
+- set_fact:
+ server_id: "{{ server_creation_task.msg.id }}"
+
+- debug: var=server_id
+
+- name: Patch user_data cloud-init configuration (Check)
+ check_mode: yes
+ scaleway_user_data:
+ region: '{{ scaleway_region }}'
+ server_id: "{{ server_id }}"
+ user_data:
+ cloud-init: "{{ cloud_init_script }}"
+ register: user_data_check_task
+
+- debug: var=user_data_check_task
+
+- assert:
+ that:
+ - user_data_check_task is success
+ - user_data_check_task is changed
+
+- name: Patch user_data cloud-init configuration
+ scaleway_user_data:
+ region: '{{ scaleway_region }}'
+ server_id: "{{ server_id }}"
+ user_data:
+ cloud-init: "{{ cloud_init_script }}"
+ register: user_data_task
+
+- debug: var=user_data_task
+
+- assert:
+ that:
+ - user_data_task is success
+ - user_data_task is changed
+
+- name: Patch user_data cloud-init configuration (Confirmation)
+ scaleway_user_data:
+ region: '{{ scaleway_region }}'
+ server_id: "{{ server_id }}"
+ user_data:
+ cloud-init: "{{ cloud_init_script }}"
+ register: user_data_confirmation_task
+
+- debug: var=user_data_confirmation_task
+
+- assert:
+ that:
+ - user_data_confirmation_task is success
+ - user_data_confirmation_task is not changed
+
+- name: Destroy it
+ scaleway_compute:
+ name: foobar
+ state: absent
+ region: '{{ scaleway_region }}'
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+ register: server_destroy_task
+
+- debug: var=server_destroy_task
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_volume/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_volume/aliases
new file mode 100644
index 00000000..f24a42a8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_volume/aliases
@@ -0,0 +1,2 @@
+cloud/scaleway
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_volume/defaults/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_volume/defaults/main.yml
new file mode 100644
index 00000000..13bbef06
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_volume/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+scaleway_organization: '{{ scw_org }}'
+scaleway_region: ams1
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_volume/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_volume/tasks/main.yml
new file mode 100644
index 00000000..3f983db9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_volume/tasks/main.yml
@@ -0,0 +1,46 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Make sure volume is not there before tests
+ scaleway_volume:
+ name: ansible-test-volume
+ state: absent
+ region: '{{ scaleway_region }}'
+ organization: '{{ scaleway_organization }}'
+ register: server_creation_check_task
+
+- assert:
+ that:
+ - server_creation_check_task is success
+
+- name: Create volume
+ scaleway_volume:
+ name: ansible-test-volume
+ state: present
+ region: '{{ scaleway_region }}'
+ organization: '{{ scaleway_organization }}'
+ "size": 10000000000
+ volume_type: l_ssd
+ register: server_creation_check_task
+
+- debug: var=server_creation_check_task
+
+- assert:
+ that:
+ - server_creation_check_task is success
+ - server_creation_check_task is changed
+
+- name: Make sure volume is deleted
+ scaleway_volume:
+ name: ansible-test-volume
+ state: absent
+ region: '{{ scaleway_region }}'
+ organization: '{{ scaleway_organization }}'
+ register: server_creation_check_task
+
+- assert:
+ that:
+ - server_creation_check_task is success
+ - server_creation_check_task is changed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_volume_info/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_volume_info/aliases
new file mode 100644
index 00000000..f24a42a8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_volume_info/aliases
@@ -0,0 +1,2 @@
+cloud/scaleway
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_volume_info/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_volume_info/tasks/main.yml
new file mode 100644
index 00000000..ce3f13cb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/scaleway_volume_info/tasks/main.yml
@@ -0,0 +1,32 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Get volume informations and register it in a variable
+ scaleway_volume_info:
+ region: par1
+ register: volumes
+
+- name: Display volumes variable
+ debug:
+ var: volumes
+
+- name: Ensure retrieval of volumes info is success
+ assert:
+ that:
+ - volumes is success
+
+- name: Get volume informations and register it in a variable (AMS1)
+ scaleway_volume_info:
+ region: ams1
+ register: ams1_volumes
+
+- name: Display volumes variable
+ debug:
+ var: ams1_volumes
+
+- name: Ensure retrieval of volumes info is success
+ assert:
+ that:
+ - ams1_volumes is success
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/script_inventory_foreman/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/script_inventory_foreman/aliases
new file mode 100644
index 00000000..a965d6e8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/script_inventory_foreman/aliases
@@ -0,0 +1,3 @@
+shippable/cloud/group1
+cloud/foreman
+needs/file/scripts/inventory/foreman.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/script_inventory_foreman/foreman.sh b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/script_inventory_foreman/foreman.sh
new file mode 100755
index 00000000..1b3e70fb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/script_inventory_foreman/foreman.sh
@@ -0,0 +1,10 @@
+#!/usr/bin/env bash
+# Wrapper to use the correct Python interpreter and support code coverage.
+
+REL_SCRIPT="../../../../scripts/inventory/foreman.py"
+ABS_SCRIPT="$("${ANSIBLE_TEST_PYTHON_INTERPRETER}" -c "import os; print(os.path.abspath('${REL_SCRIPT}'))")"
+
+# Make sure output written to current directory ends up in the temp dir.
+cd "${OUTPUT_DIR}"
+
+python.py "${ABS_SCRIPT}" "$@"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/script_inventory_foreman/runme.sh b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/script_inventory_foreman/runme.sh
new file mode 100755
index 00000000..a9c94fbe
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/script_inventory_foreman/runme.sh
@@ -0,0 +1,50 @@
+#!/usr/bin/env bash
+
+set -euo pipefail
+
+export FOREMAN_HOST="${FOREMAN_HOST:-localhost}"
+export FOREMAN_PORT="${FOREMAN_PORT:-8080}"
+export FOREMAN_INI_PATH="${OUTPUT_DIR}/foreman.ini"
+
+
+############################################
+# SMOKETEST WITH SIMPLE INI
+############################################
+
+cat > "$FOREMAN_INI_PATH" <<FOREMAN_INI
+[foreman]
+url = http://${FOREMAN_HOST}:${FOREMAN_PORT}
+user = ansible-tester
+password = secure
+ssl_verify = False
+use_reports_api = False
+FOREMAN_INI
+
+# use ansible to validate the return data
+ansible-playbook -i foreman.sh test_foreman_inventory.yml --connection=local
+RC=$?
+if [[ $RC != 0 ]]; then
+ echo "foreman inventory script smoketest failed"
+ exit $RC
+fi
+
+############################################
+# SMOKETEST WITH NON-ASCII INI
+############################################
+
+cat > "$FOREMAN_INI_PATH" <<FOREMAN_INI
+[foreman]
+url = http://${FOREMAN_HOST}:${FOREMAN_PORT}
+user = ansible-tester
+password = secure
+ssl_verify = False
+group_prefix = Ľuboš_
+FOREMAN_INI
+
+# use ansible to validate the return data
+ansible-playbook -i foreman.sh test_foreman_inventory.yml --connection=local
+RC=$?
+if [[ $RC != 0 ]]; then
+ echo "foreman inventory script non-ascii failed"
+ exit $RC
+fi
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/script_inventory_foreman/test_foreman_inventory.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/script_inventory_foreman/test_foreman_inventory.yml
new file mode 100644
index 00000000..3b563c76
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/script_inventory_foreman/test_foreman_inventory.yml
@@ -0,0 +1,7 @@
+- name: check the foreman inventory script result size and attributes
+ hosts: localhost
+ gather_facts: False
+ tasks:
+ - assert:
+ that:
+ - "{{ groups['all']|length > 900 }}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sefcontext/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sefcontext/aliases
new file mode 100644
index 00000000..58a2a31e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sefcontext/aliases
@@ -0,0 +1,3 @@
+needs/root
+shippable/posix/group2
+skip/aix
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sefcontext/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sefcontext/meta/main.yml
new file mode 100644
index 00000000..5438ced5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sefcontext/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_pkg_mgr
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sefcontext/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sefcontext/tasks/main.yml
new file mode 100644
index 00000000..e450dee9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sefcontext/tasks/main.yml
@@ -0,0 +1,33 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# (c) 2016, Dag Wieers <dag@wieers.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# FIXME: Unfortunately ansible_selinux could be a boolean or a dictionary !
+- debug:
+ msg: SELinux is disabled
+ when: ansible_selinux is defined and ansible_selinux == False
+
+- debug:
+ msg: SELinux is {{ ansible_selinux.status }}
+ when: ansible_selinux is defined and ansible_selinux != False
+
+- include: sefcontext.yml
+ when: ansible_selinux is defined and ansible_selinux != False and ansible_selinux.status == 'enabled'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sefcontext/tasks/sefcontext.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sefcontext/tasks/sefcontext.yml
new file mode 100644
index 00000000..b452ea28
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sefcontext/tasks/sefcontext.yml
@@ -0,0 +1,114 @@
+# (c) 2016, Dag Wieers <dag@wieers.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- name: install requirements for RHEL
+ package:
+ name: policycoreutils-python
+ when:
+ - ansible_distribution == 'RedHat'
+ - ansible_distribution_major_version|int < 8
+
+- name: install requirements for rhel8 beta
+ package:
+ name: python3-policycoreutils
+ when:
+ - ansible_distribution == 'RedHat'
+ - ansible_distribution_major_version|int >= 8
+
+- name: Ensure we start with a clean state
+ sefcontext:
+ path: '/tmp/foo/bar(/.*)?'
+ setype: httpd_sys_content_t
+ state: absent
+
+- name: Set SELinux file context of foo/bar
+ sefcontext:
+ path: '/tmp/foo/bar(/.*)?'
+ setype: httpd_sys_content_t
+ state: present
+ reload: no
+ register: first
+
+- assert:
+ that:
+ - first is changed
+ - first.setype == 'httpd_sys_content_t'
+
+- name: Set SELinux file context of foo/bar (again)
+ sefcontext:
+ path: '/tmp/foo/bar(/.*)?'
+ setype: httpd_sys_content_t
+ state: present
+ reload: no
+ register: second
+
+- assert:
+ that:
+ - second is not changed
+ - second.setype == 'httpd_sys_content_t'
+
+- name: Change SELinux file context of foo/bar
+ sefcontext:
+ path: '/tmp/foo/bar(/.*)?'
+ setype: unlabeled_t
+ state: present
+ reload: no
+ register: third
+
+- assert:
+ that:
+ - third is changed
+ - third.setype == 'unlabeled_t'
+
+- name: Change SELinux file context of foo/bar (again)
+ sefcontext:
+ path: '/tmp/foo/bar(/.*)?'
+ setype: unlabeled_t
+ state: present
+ reload: no
+ register: fourth
+
+- assert:
+ that:
+ - fourth is not changed
+ - fourth.setype == 'unlabeled_t'
+
+- name: Delete SELinux file context of foo/bar
+ sefcontext:
+ path: '/tmp/foo/bar(/.*)?'
+ setype: httpd_sys_content_t
+ state: absent
+ reload: no
+ register: fifth
+
+- assert:
+ that:
+ - fifth is changed
+ - fifth.setype == 'httpd_sys_content_t'
+
+- name: Delete SELinux file context of foo/bar (again)
+ sefcontext:
+ path: '/tmp/foo/bar(/.*)?'
+ setype: unlabeled_t
+ state: absent
+ reload: no
+ register: sixth
+
+- assert:
+ that:
+ - sixth is not changed
+ - sixth.setype == 'unlabeled_t'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sensu_client/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sensu_client/aliases
new file mode 100644
index 00000000..5dcaaac6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sensu_client/aliases
@@ -0,0 +1,2 @@
+shippable/posix/group1
+needs/root
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sensu_client/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sensu_client/tasks/main.yml
new file mode 100644
index 00000000..0203ee0c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sensu_client/tasks/main.yml
@@ -0,0 +1,174 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Creating a client if the directory doesn't exist should work
+ sensu_client:
+ subscriptions:
+ - default
+
+- name: Set variable for client file
+ set_fact:
+ client_file: "/etc/sensu/conf.d/client.json"
+
+- name: Insert invalid JSON in the client file
+ lineinfile:
+ state: "present"
+ create: "yes"
+ path: "{{ client_file }}"
+ line: "{'foo' = bar}"
+
+- name: Configure Sensu client with an existing invalid file
+ sensu_client:
+ name: "client"
+ state: "present"
+ subscriptions:
+ - default
+ register: client
+
+- name: Retrieve configuration file
+ slurp:
+ src: "{{ client_file }}"
+ register: client_config
+
+- name: Assert that client data was set successfully and properly
+ assert:
+ that:
+ - "client is successful"
+ - "client is changed"
+ - "client['config']['name'] == 'client'"
+ - "'default' in client['config']['subscriptions']"
+ - "client['file'] == client_file"
+
+- name: Assert that the client configuration file is actually configured properly
+ vars:
+ config: "{{ client_config.content | b64decode | from_json }}"
+ assert:
+ that:
+ - "config['client']['keepalives'] == true"
+ - "config['client']['name'] == 'client'"
+ - "config['client']['safe_mode'] == false"
+ - "'default' in config['client']['subscriptions']"
+
+- name: Delete Sensu client configuration
+ sensu_client:
+ state: "absent"
+ register: client_delete
+
+- name: Delete Sensu client configuration (again)
+ sensu_client:
+ state: "absent"
+ register: client_delete_twice
+
+- name: Retrieve configuration file stat
+ stat:
+ path: "{{ client_file }}"
+ register: client_stat
+
+- name: Assert that client deletion was successful
+ assert:
+ that:
+ - "client_delete is successful"
+ - "client_delete is changed"
+ - "client_delete_twice is successful"
+ - "client_delete_twice is not changed"
+ - "client_stat.stat.exists == false"
+
+- name: Configuring a client without subscriptions should fail
+ sensu_client:
+ name: "failure"
+ register: failure
+ ignore_errors: true
+
+- name: Assert failure to create client
+ assert:
+ that:
+ - failure is failed
+ - "'the following are missing: subscriptions' in failure['msg']"
+
+- name: Configure a new client from scratch with custom parameters
+ sensu_client:
+ name: "custom"
+ address: "host.fqdn"
+ subscriptions:
+ - "default"
+ - "webserver"
+ redact:
+ - "password"
+ socket:
+ bind: "127.0.0.1"
+ port: "3030"
+ keepalive:
+ thresholds:
+ warning: "180"
+ critical: "300"
+ handlers:
+ - "email"
+ custom:
+ - broadcast: "irc"
+ occurrences: "3"
+ register: client
+
+- name: Configure a new client from scratch with custom parameters (twice)
+ sensu_client:
+ name: "custom"
+ address: "host.fqdn"
+ subscriptions:
+ - "default"
+ - "webserver"
+ redact:
+ - "password"
+ socket:
+ bind: "127.0.0.1"
+ port: "3030"
+ keepalive:
+ thresholds:
+ warning: "180"
+ critical: "300"
+ handlers:
+ - "email"
+ custom:
+ - broadcast: "irc"
+ occurrences: "3"
+ register: client_twice
+
+- name: Retrieve configuration file
+ slurp:
+ src: "{{ client_file }}"
+ register: client_config
+
+- name: Assert that client data was set successfully and properly
+ assert:
+ that:
+ - "client is successful"
+ - "client is changed"
+ - "client_twice is successful"
+ - "client_twice is not changed"
+ - "client['config']['name'] == 'custom'"
+ - "client['config']['address'] == 'host.fqdn'"
+ - "'default' in client['config']['subscriptions']"
+ - "'webserver' in client['config']['subscriptions']"
+ - "'password' in client['config']['redact']"
+ - "client['config']['keepalive']['thresholds']['warning'] == '180'"
+ - "client['config']['keepalive']['thresholds']['critical'] == '300'"
+ - "'email' in client['config']['keepalive']['handlers']"
+ - "client['config']['keepalive']['occurrences'] == '3'"
+ - "client['file'] == client_file"
+
+- name: Assert that the client configuration file is actually configured properly
+ vars:
+ config: "{{ client_config.content | b64decode | from_json }}"
+ assert:
+ that:
+ - "config['client']['name'] == 'custom'"
+ - "config['client']['address'] == 'host.fqdn'"
+ - "config['client']['keepalives'] == true"
+ - "config['client']['safe_mode'] == false"
+ - "'default' in config['client']['subscriptions']"
+ - "'webserver' in config['client']['subscriptions']"
+ - "'password' in config['client']['redact']"
+ - "config['client']['keepalive']['thresholds']['warning'] == '180'"
+ - "config['client']['keepalive']['thresholds']['critical'] == '300'"
+ - "'email' in config['client']['keepalive']['handlers']"
+ - "config['client']['keepalive']['occurrences'] == '3'"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sensu_handler/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sensu_handler/aliases
new file mode 100644
index 00000000..5dcaaac6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sensu_handler/aliases
@@ -0,0 +1,2 @@
+shippable/posix/group1
+needs/root
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/main.yml
new file mode 100644
index 00000000..795cdfcb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/main.yml
@@ -0,0 +1,124 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Creating a handler if the directory doesn't exist should work
+ sensu_handler:
+ name: "handler"
+ type: "pipe"
+ command: "/bin/bash"
+ state: "present"
+
+- name: Insert junk JSON in a handlers file
+ lineinfile:
+ state: "present"
+ create: "yes"
+ path: "/etc/sensu/conf.d/handlers/handler.json"
+ line: "{'foo' = bar}"
+
+- name: Configure a handler with an existing invalid file
+ sensu_handler:
+ name: "handler"
+ type: "pipe"
+ command: "/bin/bash"
+ state: "present"
+ register: handler
+
+- name: Configure a handler (again)
+ sensu_handler:
+ name: "handler"
+ type: "pipe"
+ command: "/bin/bash"
+ state: "present"
+ register: handler_twice
+
+- name: Retrieve configuration file
+ slurp:
+ src: "{{ handler['file'] }}"
+ register: handler_config
+
+- name: Assert that handler data was set successfully and properly
+ assert:
+ that:
+ - "handler is successful"
+ - "handler is changed"
+ - "handler_twice is successful"
+ - "handler_twice is not changed"
+ - "handler['name'] == 'handler'"
+ - "handler['file'] == '/etc/sensu/conf.d/handlers/handler.json'"
+ - "handler['config']['type'] == 'pipe'"
+ - "handler['config']['command'] == '/bin/bash'"
+ - "handler['config']['timeout'] == 10"
+ - "handler['config']['handle_flapping'] == false"
+ - "handler['config']['handle_silenced'] == false"
+
+- name: Assert that the handler configuration file is actually configured properly
+ vars:
+ config: "{{ handler_config.content | b64decode | from_json }}"
+ assert:
+ that:
+ - "'handler' in config['handlers']"
+ - "config['handlers']['handler']['type'] == 'pipe'"
+ - "config['handlers']['handler']['command'] == '/bin/bash'"
+ - "config['handlers']['handler']['timeout'] == 10"
+ - "config['handlers']['handler']['handle_flapping'] == false"
+ - "config['handlers']['handler']['handle_silenced'] == false"
+
+- name: Delete Sensu handler configuration
+ sensu_handler:
+ name: "handler"
+ state: "absent"
+ register: handler_delete
+
+- name: Delete Sensu handler configuration (again)
+ sensu_handler:
+ name: "handler"
+ state: "absent"
+ register: handler_delete_twice
+
+- name: Retrieve configuration file stat
+ stat:
+ path: "{{ handler['file'] }}"
+ register: handler_stat
+
+- name: Assert that handler deletion was successful
+ assert:
+ that:
+ - "handler_delete is successful"
+ - "handler_delete is changed"
+ - "handler_delete_twice is successful"
+ - "handler_delete_twice is not changed"
+ - "handler_stat.stat.exists == false"
+
+- name: Configuring a handler without a name should fail
+ sensu_handler:
+ type: "pipe"
+ command: "/bin/bash"
+ register: failure
+ ignore_errors: true
+
+- name: Assert that configuring a handler without a name fails
+ assert:
+ that:
+ - failure is failed
+ - "'required arguments: name' in failure['msg']"
+
+- name: Configuring a handler without a type should fail
+ sensu_handler:
+ name: "pipe"
+ command: "/bin/bash"
+ register: failure
+ ignore_errors: true
+
+- name: Assert that configuring a handler without a type fails
+ assert:
+ that:
+ - failure is failed
+ - "'the following are missing: type' in failure['msg']"
+
+- include: pipe.yml
+- include: tcp.yml
+- include: udp.yml
+- include: set.yml
+- include: transport.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/pipe.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/pipe.yml
new file mode 100644
index 00000000..02f59a4c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/pipe.yml
@@ -0,0 +1,20 @@
+# Note: Pipe handlers are also tested and used as part of basic main.yml coverage
+- name: Configuring a handler with missing pipe parameters should fail
+ sensu_handler:
+ name: "pipe"
+ type: "pipe"
+ register: failure
+ ignore_errors: true
+
+- name: Assert that configuring a handler with missing pipe parameters fails
+ assert:
+ that:
+ - failure is failed
+ - "'the following are missing: command' in failure['msg']"
+
+- name: Configure a handler with pipe parameters
+ sensu_handler:
+ name: "pipe"
+ type: "pipe"
+ command: "/bin/bash"
+ register: handler
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/set.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/set.yml
new file mode 100644
index 00000000..393d711d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/set.yml
@@ -0,0 +1,48 @@
+- name: Configuring a handler with missing set parameters should fail
+ sensu_handler:
+ name: "set"
+ type: "set"
+ register: failure
+ ignore_errors: true
+
+- name: Assert that configuring a handler with missing set parameters fails
+ assert:
+ that:
+ - failure is failed
+ - "'the following are missing: handlers' in failure['msg']"
+
+- name: Configure a set handler
+ sensu_handler:
+ name: "set"
+ type: "set"
+ handlers:
+ - anotherhandler
+ register: handler
+
+- name: Retrieve configuration file
+ slurp:
+ src: "{{ handler['file'] }}"
+ register: handler_config
+
+- name: Validate set handler return data
+ assert:
+ that:
+ - "handler is successful"
+ - "handler is changed"
+ - "handler['name'] == 'set'"
+ - "handler['file'] == '/etc/sensu/conf.d/handlers/set.json'"
+ - "handler['config']['type'] == 'set'"
+ - "'anotherhandler' in handler['config']['handlers']"
+ - "handler['config']['handle_flapping'] == false"
+ - "handler['config']['handle_silenced'] == false"
+
+- name: Assert that the handler configuration file is actually configured properly
+ vars:
+ config: "{{ handler_config.content | b64decode | from_json }}"
+ assert:
+ that:
+ - "'set' in config['handlers']"
+ - "config['handlers']['set']['type'] == 'set'"
+ - "'anotherhandler' in config['handlers']['set']['handlers']"
+ - "config['handlers']['set']['handle_flapping'] == false"
+ - "config['handlers']['set']['handle_silenced'] == false"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/tcp.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/tcp.yml
new file mode 100644
index 00000000..076ea14e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/tcp.yml
@@ -0,0 +1,51 @@
+- name: Configuring a handler with missing tcp parameters should fail
+ sensu_handler:
+ name: "tcp"
+ type: "tcp"
+ register: failure
+ ignore_errors: true
+
+- name: Assert that configuring a handler with missing tcp parameters fails
+ assert:
+ that:
+ - failure is failed
+ - "'the following are missing: socket' in failure['msg']"
+
+- name: Configure a tcp handler
+ sensu_handler:
+ name: "tcp"
+ type: "tcp"
+ socket:
+ host: 127.0.0.1
+ port: 8000
+ register: handler
+
+- name: Retrieve configuration file
+ slurp:
+ src: "{{ handler['file'] }}"
+ register: handler_config
+
+- name: Validate tcp handler return data
+ assert:
+ that:
+ - "handler is successful"
+ - "handler is changed"
+ - "handler['name'] == 'tcp'"
+ - "handler['file'] == '/etc/sensu/conf.d/handlers/tcp.json'"
+ - "handler['config']['type'] == 'tcp'"
+ - "handler['config']['socket']['host'] == '127.0.0.1'"
+ - "handler['config']['socket']['port'] == 8000"
+ - "handler['config']['handle_flapping'] == false"
+ - "handler['config']['handle_silenced'] == false"
+
+- name: Assert that the handler configuration file is actually configured properly
+ vars:
+ config: "{{ handler_config.content | b64decode | from_json }}"
+ assert:
+ that:
+ - "'tcp' in config['handlers']"
+ - "config['handlers']['tcp']['type'] == 'tcp'"
+ - "config['handlers']['tcp']['socket']['host'] == '127.0.0.1'"
+ - "config['handlers']['tcp']['socket']['port'] == 8000"
+ - "config['handlers']['tcp']['handle_flapping'] == false"
+ - "config['handlers']['tcp']['handle_silenced'] == false"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/transport.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/transport.yml
new file mode 100644
index 00000000..34816194
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/transport.yml
@@ -0,0 +1,51 @@
+- name: Configuring a handler with missing transport parameters should fail
+ sensu_handler:
+ name: "transport"
+ type: "transport"
+ register: failure
+ ignore_errors: true
+
+- name: Assert that configuring a handler with missing transport parameters fails
+ assert:
+ that:
+ - failure is failed
+ - "'the following are missing: pipe' in failure['msg']"
+
+- name: Configure a transport handler
+ sensu_handler:
+ name: "transport"
+ type: "transport"
+ pipe:
+ type: "topic"
+ name: "transport_handler"
+ register: handler
+
+- name: Retrieve configuration file
+ slurp:
+ src: "{{ handler['file'] }}"
+ register: handler_config
+
+- name: Validate transport handler return data
+ assert:
+ that:
+ - "handler is successful"
+ - "handler is changed"
+ - "handler['name'] == 'transport'"
+ - "handler['file'] == '/etc/sensu/conf.d/handlers/transport.json'"
+ - "handler['config']['type'] == 'transport'"
+ - "handler['config']['pipe']['type'] == 'topic'"
+ - "handler['config']['pipe']['name'] == 'transport_handler'"
+ - "handler['config']['handle_flapping'] == false"
+ - "handler['config']['handle_silenced'] == false"
+
+- name: Assert that the handler configuration file is actually configured properly
+ vars:
+ config: "{{ handler_config.content | b64decode | from_json }}"
+ assert:
+ that:
+ - "'transport' in config['handlers']"
+ - "config['handlers']['transport']['type'] == 'transport'"
+ - "config['handlers']['transport']['pipe']['type'] == 'topic'"
+ - "config['handlers']['transport']['pipe']['name'] == 'transport_handler'"
+ - "config['handlers']['transport']['handle_flapping'] == false"
+ - "config['handlers']['transport']['handle_silenced'] == false"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/udp.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/udp.yml
new file mode 100644
index 00000000..8f13a2a2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/udp.yml
@@ -0,0 +1,51 @@
+- name: Configuring a handler with missing udp parameters should fail
+ sensu_handler:
+ name: "udp"
+ type: "udp"
+ register: failure
+ ignore_errors: true
+
+- name: Assert that configuring a handler with missing udp parameters fails
+ assert:
+ that:
+ - failure is failed
+ - "'the following are missing: socket' in failure['msg']"
+
+- name: Configure a udp handler
+ sensu_handler:
+ name: "udp"
+ type: "udp"
+ socket:
+ host: 127.0.0.1
+ port: 8000
+ register: handler
+
+- name: Retrieve configuration file
+ slurp:
+ src: "{{ handler['file'] }}"
+ register: handler_config
+
+- name: Validate udp handler return data
+ assert:
+ that:
+ - "handler is successful"
+ - "handler is changed"
+ - "handler['name'] == 'udp'"
+ - "handler['file'] == '/etc/sensu/conf.d/handlers/udp.json'"
+ - "handler['config']['type'] == 'udp'"
+ - "handler['config']['socket']['host'] == '127.0.0.1'"
+ - "handler['config']['socket']['port'] == 8000"
+ - "handler['config']['handle_flapping'] == false"
+ - "handler['config']['handle_silenced'] == false"
+
+- name: Assert that the handler configuration file is actually configured properly
+ vars:
+ config: "{{ handler_config.content | b64decode | from_json }}"
+ assert:
+ that:
+ - "'udp' in config['handlers']"
+ - "config['handlers']['udp']['type'] == 'udp'"
+ - "config['handlers']['udp']['socket']['host'] == '127.0.0.1'"
+ - "config['handlers']['udp']['socket']['port'] == 8000"
+ - "config['handlers']['udp']['handle_flapping'] == false"
+ - "config['handlers']['udp']['handle_silenced'] == false"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_cron/defaults/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_cron/defaults/main.yml
new file mode 100644
index 00000000..e4b0123d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_cron/defaults/main.yml
@@ -0,0 +1 @@
+remote_dir: "{{ lookup('env', 'OUTPUT_DIR') }}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_cron/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_cron/meta/main.yml
new file mode 100644
index 00000000..5438ced5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_cron/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_pkg_mgr
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_cron/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_cron/tasks/main.yml
new file mode 100644
index 00000000..bacdfaf3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_cron/tasks/main.yml
@@ -0,0 +1,69 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Include distribution specific variables
+ include_vars: '{{ lookup(''first_found'', search) }}'
+ vars:
+ search:
+ files:
+ - '{{ ansible_distribution | lower }}.yml'
+ - '{{ ansible_os_family | lower }}.yml'
+ - '{{ ansible_system | lower }}.yml'
+ - default.yml
+ paths:
+ - vars
+- name: install cron package
+ package:
+ name: '{{ cron_pkg }}'
+ when: cron_pkg | default(false, true)
+ register: cron_package_installed
+ until: cron_package_installed is success
+- when: faketime_pkg | default(false, true)
+ block:
+ - name: install cron and faketime packages
+ package:
+ name: '{{ faketime_pkg }}'
+ register: faketime_package_installed
+ until: faketime_package_installed is success
+ - name: Find libfaketime path
+ shell: '{{ list_pkg_files }} {{ faketime_pkg }} | grep -F libfaketime.so.1'
+ args:
+ warn: false
+ register: libfaketime_path
+ - when: ansible_service_mgr == 'systemd'
+ block:
+ - name: create directory for cron drop-in file
+ file:
+ path: /etc/systemd/system/{{ cron_service }}.service.d
+ state: directory
+ owner: root
+ group: root
+ mode: '0755'
+ - name: Use faketime with cron service
+ copy:
+ content: '[Service]
+
+ Environment=LD_PRELOAD={{ libfaketime_path.stdout_lines[0].strip() }}
+
+ Environment="FAKETIME=+0y x10"
+
+ Environment=RANDOM_DELAY=0'
+ dest: /etc/systemd/system/{{ cron_service }}.service.d/faketime.conf
+ owner: root
+ group: root
+ mode: '0644'
+ - when: ansible_system == 'FreeBSD'
+ name: Use faketime with cron service
+ copy:
+ content: cron_env='LD_PRELOAD={{ libfaketime_path.stdout_lines[0].strip() }} FAKETIME="+0y x10"'
+ dest: /etc/rc.conf.d/cron
+ owner: root
+ group: wheel
+ mode: '0644'
+- name: enable cron service
+ service:
+ daemon-reload: '{{ (ansible_service_mgr == ''systemd'') | ternary(true, omit) }}'
+ name: '{{ cron_service }}'
+ state: restarted
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/debian.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/debian.yml
new file mode 100644
index 00000000..cd04871c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/debian.yml
@@ -0,0 +1,3 @@
+cron_pkg: cron
+cron_service: cron
+list_pkg_files: dpkg -L
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/default.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/default.yml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/default.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/fedora.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/fedora.yml
new file mode 100644
index 00000000..b80a51b5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/fedora.yml
@@ -0,0 +1,3 @@
+cron_pkg: cronie
+cron_service: crond
+list_pkg_files: rpm -ql
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/freebsd.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/freebsd.yml
new file mode 100644
index 00000000..41ed4493
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/freebsd.yml
@@ -0,0 +1,3 @@
+cron_pkg:
+cron_service: cron
+list_pkg_files: pkg info --list-files
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/redhat.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/redhat.yml
new file mode 100644
index 00000000..2dff13de
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/redhat.yml
@@ -0,0 +1,4 @@
+cron_pkg: cronie
+cron_service: crond
+faketime_pkg:
+list_pkg_files: rpm -ql
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/suse.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/suse.yml
new file mode 100644
index 00000000..cd3677a6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/suse.yml
@@ -0,0 +1,3 @@
+cron_pkg: cron
+cron_service: cron
+list_pkg_files: rpm -ql
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/aliases
new file mode 100644
index 00000000..65e83152
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/aliases
@@ -0,0 +1 @@
+needs/target/setup_epel
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/defaults/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/defaults/main.yml
new file mode 100644
index 00000000..10cc5903
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/defaults/main.yml
@@ -0,0 +1,16 @@
+docker_cli_version: '0.0'
+docker_api_version: '0.0'
+docker_py_version: '0.0'
+docker_skip_cleanup: no
+docker_prereq_packages: []
+docker_packages:
+ - docker-ce
+
+docker_pip_extra_packages: []
+docker_pip_package: docker
+docker_pip_package_limit: ''
+
+docker_cleanup_packages:
+ - docker
+ - docker-ce
+ - docker-ce-cli
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/handlers/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/handlers/main.yml
new file mode 100644
index 00000000..a389f91d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/handlers/main.yml
@@ -0,0 +1,14 @@
+- name: Remove pip packages
+ pip:
+ state: present
+ name: "{{ [docker_pip_package] | union(docker_pip_extra_packages) }}"
+ listen: cleanup docker
+ when: not docker_skip_cleanup | bool
+
+- name: Remove docker pagkages
+ action: "{{ ansible_facts.pkg_mgr }}"
+ args:
+ name: "{{ docker_cleanup_packages }}"
+ state: absent
+ listen: cleanup docker
+ when: not docker_skip_cleanup | bool
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/library/current_container_facts.py b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/library/current_container_facts.py
new file mode 100644
index 00000000..b1fea1b2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/library/current_container_facts.py
@@ -0,0 +1,101 @@
+#!/usr/bin/python
+#
+# (c) 2020 Matt Clay <mclay@redhat.com>
+# (c) 2020 Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# This module properly lives in community.docker; it has been vendored here
+# to support the 1.x.y docker integration tests.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: current_container_facts
+short_description: Return facts about whether the module runs in a Docker container
+description:
+ - Return facts about whether the module runs in a Docker container.
+author:
+ - Felix Fontein (@felixfontein)
+'''
+
+EXAMPLES = '''
+- name: Get facts on current container
+ community.docker.current_container_facts:
+- name: Print information on current container when running in a container
+ ansible.builtin.debug:
+ msg: "Container ID is {{ ansible_module_container_id }}"
+ when: ansible_module_running_in_container
+'''
+
+RETURN = '''
+ansible_facts:
+ description: Ansible facts returned by the module
+ type: dict
+ returned: always
+ contains:
+ ansible_module_running_in_container:
+ description:
+ - Whether the module was able to detect that it runs in a container or not.
+ returned: always
+ type: bool
+ ansible_module_container_id:
+ description:
+ - The detected container ID.
+ - Contains an empty string if no container was detected.
+ returned: always
+ type: str
+ ansible_module_container_type:
+ description:
+ - The detected container environment.
+ - Contains an empty string if no container was detected.
+ - Otherwise, will be one of C(docker) or C(azure_pipelines).
+ returned: always
+ type: str
+ # choices:
+ # - docker
+ # - azure_pipelines
+'''
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(dict(), supports_check_mode=True)
+
+ path = '/proc/self/cpuset'
+ container_id = ''
+ container_type = ''
+
+ if os.path.exists(path):
+ # File content varies based on the environment:
+ # No Container: /
+ # Docker: /docker/c86f3732b5ba3d28bb83b6e14af767ab96abbc52de31313dcb1176a62d91a507
+ # Azure Pipelines (Docker): /azpl_job/0f2edfed602dd6ec9f2e42c867f4d5ee640ebf4c058e6d3196d4393bb8fd0891
+ # Podman: /../../../../../..
+ with open(path, 'rb') as f:
+ contents = f.read().decode('utf-8')
+
+ cgroup_path, cgroup_name = os.path.split(contents.strip())
+
+ if cgroup_path == '/docker':
+ container_id = cgroup_name
+ container_type = 'docker'
+
+ if cgroup_path == '/azpl_job':
+ container_id = cgroup_name
+ container_type = 'azure_pipelines'
+
+ module.exit_json(ansible_facts=dict(
+ ansible_module_running_in_container=container_id != '',
+ ansible_module_container_id=container_id,
+ ansible_module_container_type=container_type,
+ ))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/meta/main.yml
new file mode 100644
index 00000000..2be15776
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - setup_remote_constraints
+ - setup_pkg_mgr
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/tasks/Debian.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/tasks/Debian.yml
new file mode 100644
index 00000000..7ca964dd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/tasks/Debian.yml
@@ -0,0 +1,45 @@
+---
+- name: Get OS version
+ shell: uname -r
+ register: os_version
+
+- name: Install pre-reqs
+ apt:
+ name: '{{ docker_prereq_packages }}'
+ state: present
+ update_cache: true
+ notify: cleanup docker
+
+- name: Add gpg key
+ shell: curl -fsSL https://download.docker.com/linux/ubuntu/gpg >key && apt-key add key
+
+- name: Add Docker repo
+ shell: add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
+
+- block:
+ - name: Prevent service restart
+ copy:
+ content: exit 101
+ dest: /usr/sbin/policy-rc.d
+ backup: true
+ mode: '0755'
+ register: policy_rc_d
+
+ - name: Install Docker CE
+ apt:
+ name: '{{ docker_packages }}'
+ state: present
+ update_cache: true
+
+ always:
+ - name: Restore /usr/sbin/policy-rc.d (if needed)
+ command: mv {{ policy_rc_d.backup_file }} /usr/sbin/policy-rc.d
+ when:
+ - '''backup_file'' in policy_rc_d'
+
+ - name: Remove /usr/sbin/policy-rc.d (if needed)
+ file:
+ path: /usr/sbin/policy-rc.d
+ state: absent
+ when:
+ - '''backup_file'' not in policy_rc_d'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/tasks/Fedora.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/tasks/Fedora.yml
new file mode 100644
index 00000000..28848373
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/tasks/Fedora.yml
@@ -0,0 +1,26 @@
+---
+- name: Import GPG key
+ rpm_key:
+ key: https://download.docker.com/linux/fedora/gpg
+ state: present
+
+- name: Add repository
+ yum_repository:
+ file: docker-ce
+ name: docker-ce-stable
+ description: Docker CE Stable - $basearch
+ baseurl: https://download.docker.com/linux/fedora/{{ 31 if ansible_facts.distribution_major_version|int > 31 else '$releasever' }}/$basearch/stable
+ enabled: yes
+ gpgcheck: yes
+
+- name: Update cache
+ command: dnf makecache
+ args:
+ warn: no
+
+- name: Install docker
+ dnf:
+ name: "{{ docker_packages }}"
+ state: present
+ enablerepo: docker-ce-test
+ notify: cleanup docker
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/tasks/RedHat-7.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/tasks/RedHat-7.yml
new file mode 100644
index 00000000..6e798ce2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/tasks/RedHat-7.yml
@@ -0,0 +1,45 @@
+---
+# The RHEL extras repository must be enabled to provide the container-selinux package.
+# See: https://docs.docker.com/engine/installation/linux/docker-ee/rhel/#install-using-the-repository
+
+- name: Install Docker pre-reqs
+ yum:
+ name: "{{ docker_prereq_packages }}"
+ state: present
+ notify: cleanup docker
+
+- name: Install epel repo which is missing on rhel-7 and is needed for pigz (needed for docker-ce 18)
+ include_role:
+ name: setup_epel
+
+- name: Enable extras repository for RHEL on AWS
+ # RHEL 7.6 uses REGION-rhel-server-extras and RHEL 7.7+ use rhel-7-server-rhui-extras-rpms
+ command: yum-config-manager --enable REGION-rhel-server-extras rhel-7-server-rhui-extras-rpms
+ args:
+ warn: no
+
+# Docker broke their .repo file, so we set it up ourselves
+- name: Set-up repository
+ yum_repository:
+ name: docker-ce
+ description: docker-ce
+ baseurl: https://download.docker.com/linux/centos/{{ ansible_facts.distribution_major_version }}/$basearch/stable
+ gpgcheck: true
+ gpgkey: https://download.docker.com/linux/centos/gpg
+
+- name: Update cache
+ command: yum -y makecache fast
+ args:
+ warn: no
+
+- name: Install docker
+ yum:
+ name: "{{ docker_packages }}"
+ state: present
+ notify: cleanup docker
+
+- name: Make sure the docker daemon is running (failure expected inside docker container)
+ service:
+ name: docker
+ state: started
+ ignore_errors: "{{ ansible_virtualization_type in ['docker', 'container', 'containerd'] }}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/tasks/RedHat-8.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/tasks/RedHat-8.yml
new file mode 100644
index 00000000..c207f4ab
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/tasks/RedHat-8.yml
@@ -0,0 +1,34 @@
+---
+# The RHEL extras repository must be enabled to provide the container-selinux package.
+# See: https://docs.docker.com/engine/installation/linux/docker-ee/rhel/#install-using-the-repository
+
+- name: Install Docker pre-reqs
+ dnf:
+ name: "{{ docker_prereq_packages }}"
+ state: present
+ notify: cleanup docker
+ register: result
+ until: result is success
+ retries: 10
+ delay: 2
+
+# Docker broke their .repo file, so we set it up ourselves
+- name: Set-up repository
+ yum_repository:
+ name: docker-ce
+ description: docker-ce
+ baseurl: https://download.docker.com/linux/centos/{{ ansible_facts.distribution_major_version }}/$basearch/stable
+ gpgcheck: true
+ gpgkey: https://download.docker.com/linux/centos/gpg
+
+- name: Install docker
+ dnf:
+ name: "{{ docker_packages }}"
+ state: present
+ notify: cleanup docker
+
+- name: Make sure the docker daemon is running (failure expected inside docker container)
+ service:
+ name: docker
+ state: started
+ ignore_errors: "{{ ansible_virtualization_type in ['docker', 'container', 'containerd'] }}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/tasks/Suse.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/tasks/Suse.yml
new file mode 100644
index 00000000..f107506b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/tasks/Suse.yml
@@ -0,0 +1,8 @@
+---
+- name: Install docker 17
+ community.general.zypper:
+ name: "{{ docker_packages }}"
+ force: yes
+ disable_gpg_check: yes
+ update_cache: yes
+ notify: cleanup docker
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/tasks/main.yml
new file mode 100644
index 00000000..e1b25dc8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/tasks/main.yml
@@ -0,0 +1,150 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Setup Docker
+ when: ansible_facts.distribution ~ ansible_facts.distribution_major_version not in ['CentOS6', 'RedHat6']
+ block:
+ - name: Include distribution specific variables
+ include_vars: "{{ lookup('first_found', params) }}"
+ vars:
+ params:
+ files:
+ - "{{ ansible_facts.distribution }}-{{ ansible_facts.distribution_major_version }}.yml"
+ - "{{ ansible_facts.os_family }}-{{ ansible_facts.distribution_major_version }}.yml"
+ - "{{ ansible_facts.distribution }}.yml"
+ - "{{ ansible_facts.os_family }}.yml"
+ - default.yml
+ paths:
+ - "{{ role_path }}/vars"
+
+ - name: Include distribution specific tasks
+ include_tasks: "{{ lookup('first_found', params) }}"
+ vars:
+ params:
+ files:
+ - "{{ ansible_facts.distribution }}-{{ ansible_facts.distribution_major_version }}.yml"
+ - "{{ ansible_facts.os_family }}-{{ ansible_facts.distribution_major_version }}.yml"
+ - "{{ ansible_facts.distribution }}.yml"
+ - "{{ ansible_facts.os_family }}.yml"
+ paths:
+ - "{{ role_path }}/tasks"
+
+ # Detect docker API version
+ - name: Check Docker API version
+ command: "docker version -f {% raw %}'{{(index .Server.Components 0).Details.ApiVersion}}'{% endraw %}"
+ register: docker_api_version_stdout
+ ignore_errors: yes
+
+ - name: Limit docker pypi package version to < 4.3.0
+ set_fact:
+ docker_pip_package_limit: '<4.3.0'
+ when: (docker_api_version_stdout.stdout | default('0.0')) is version('1.39', '<')
+
+ - name: Install Python requirements
+ pip:
+ state: present
+ name: "{{ [docker_pip_package ~ docker_pip_package_limit] + docker_pip_extra_packages }}"
+ extra_args: "-c {{ remote_constraints }}"
+ notify: cleanup docker
+
+ # Detect docker CLI, API and docker-py versions
+ - name: Check Docker CLI version
+ command: "docker version -f {% raw %}'{{.Client.Version}}'{% endraw %}"
+ register: docker_cli_version_stdout
+ ignore_errors: yes
+
+ - name: Check Docker API version
+ command: "{{ ansible_python.executable }} -c 'import docker; print(docker.from_env().version()[\"ApiVersion\"])'"
+ register: docker_api_version_stdout
+ ignore_errors: yes
+
+ - name: Check docker-py API version
+ command: "{{ ansible_python.executable }} -c 'import docker; print(docker.__version__)'"
+ register: docker_py_version_stdout
+ ignore_errors: yes
+
+ - set_fact:
+ docker_cli_version: "{{ docker_cli_version_stdout.stdout | default('0.0') }}"
+ docker_api_version: "{{ docker_api_version_stdout.stdout | default('0.0') }}"
+ docker_py_version: "{{ docker_py_version_stdout.stdout | default('0.0') }}"
+
+ - debug:
+ msg: "Docker CLI version: {{ docker_cli_version }}; Docker API version: {{ docker_api_version }}; docker-py library version: {{ docker_py_version }}"
+
+ - block:
+ # Cleanup docker daemon
+ - name: "Remove all ansible-test-* docker containers"
+ shell: 'docker ps --no-trunc --format {% raw %}"{{.Names}}"{% endraw %} | grep "^ansible-test-" | xargs -r docker rm -f'
+ register: docker_containers
+ retries: 3
+ delay: 3
+ until: docker_containers is success
+ ignore_errors: yes
+
+ - name: "Remove all ansible-test-* docker volumes"
+ shell: 'docker volume ls --format {% raw %}"{{.Name}}"{% endraw %} | grep "^ansible-test-" | xargs -r docker volume rm -f'
+ register: docker_volumes
+ ignore_errors: yes
+
+ - name: "Remove all ansible-test-* docker networks"
+ shell: 'docker network ls --no-trunc --format {% raw %}"{{.Name}}"{% endraw %} | grep "^ansible-test-" | xargs -r docker network rm'
+ register: docker_networks
+ ignore_errors: yes
+
+ - name: Cleaned docker resources
+ debug:
+ var: docker_resources
+ vars:
+ docker_resources:
+ containers: "{{ docker_containers.stdout_lines | default([]) }}"
+ volumes: "{{ docker_volumes.stdout_lines | default([]) }}"
+ networks: "{{ docker_networks.stdout_lines | default([]) }}"
+
+ # List all existing docker resources
+ - name: List all docker containers
+ command: docker ps --no-trunc -a
+ register: docker_containers
+ ignore_errors: yes
+
+ - name: List all docker volumes
+ command: docker volume ls
+ register: docker_volumes
+ ignore_errors: yes
+
+ - name: List all docker networks
+ command: docker network ls --no-trunc
+ register: docker_networks
+ ignore_errors: yes
+
+ - name: List all docker images
+ command: docker images --no-trunc -a
+ register: docker_images
+ ignore_errors: yes
+
+ - name: Still existing docker resources
+ debug:
+ var: docker_resources
+ vars:
+ docker_resources:
+ containers: "{{ docker_containers.stdout_lines | default([]) }}"
+ volumes: "{{ docker_volumes.stdout_lines | default([]) }}"
+ networks: "{{ docker_networks.stdout_lines | default([]) }}"
+ images: "{{ docker_images.stdout_lines | default([]) }}"
+
+ when: docker_cli_version is version('0.0', '>')
+
+ - name: Detect whether we are running inside a container
+ current_container_facts:
+
+ - name: Inspect current container
+ docker_container_info:
+ name: "{{ ansible_module_container_id }}"
+ register: current_container_info
+ when: ansible_module_running_in_container
+
+ - name: Determine network name
+ set_fact:
+ current_container_network_ip: "{{ (current_container_info.container.NetworkSettings.Networks | dictsort)[0].0 | default('') if ansible_module_running_in_container else '' }}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/vars/Debian.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/vars/Debian.yml
new file mode 100644
index 00000000..e9ffb941
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/vars/Debian.yml
@@ -0,0 +1,5 @@
+docker_prereq_packages:
+ - apt-transport-https
+ - ca-certificates
+ - curl
+ - software-properties-common
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/vars/Fedora.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/vars/Fedora.yml
new file mode 100644
index 00000000..43126881
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/vars/Fedora.yml
@@ -0,0 +1,4 @@
+docker_prereq_packages: []
+
+docker_packages:
+ - docker-ce
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/vars/RedHat-7.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/vars/RedHat-7.yml
new file mode 100644
index 00000000..7166b1f5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/vars/RedHat-7.yml
@@ -0,0 +1,8 @@
+docker_prereq_packages:
+ - yum-utils
+ - device-mapper-persistent-data
+ - lvm2
+ - libseccomp
+
+docker_pip_extra_packages:
+ - requests==2.6.0
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/vars/RedHat-8.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/vars/RedHat-8.yml
new file mode 100644
index 00000000..ff6dcf7b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/vars/RedHat-8.yml
@@ -0,0 +1,9 @@
+docker_prereq_packages:
+ - yum-utils
+ - device-mapper-persistent-data
+ - lvm2
+ - libseccomp
+
+docker_packages:
+ - docker-ce-19.03.13
+ - docker-ce-cli-19.03.13
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/vars/Suse.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/vars/Suse.yml
new file mode 100644
index 00000000..ad0d515e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/vars/Suse.yml
@@ -0,0 +1,2 @@
+docker_packages:
+ - docker>=17
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/vars/Ubuntu-14.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/vars/Ubuntu-14.yml
new file mode 100644
index 00000000..36ab54b9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/vars/Ubuntu-14.yml
@@ -0,0 +1,5 @@
+docker_pip_extra_packages:
+ # Installing requests >=2.12.0 on Ubuntu 14.04 breaks certificate validation. We restrict to an older version
+ # to ensure out get_url tests work out fine. This is only an issue if pyOpenSSL is also installed.
+ # Not sure why RHEL7 needs this specific version
+ - requests==2.6.0
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/vars/default.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/vars/default.yml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/vars/default.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/vars/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/vars/main.yml
new file mode 100644
index 00000000..8d4b74c5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker/vars/main.yml
@@ -0,0 +1,11 @@
+---
+docker_test_image_digest_v1: e004c2cc521c95383aebb1fb5893719aa7a8eae2e7a71f316a4410784edb00a9
+docker_test_image_digest_v2: ee44b399df993016003bf5466bd3eeb221305e9d0fa831606bc7902d149c775b
+docker_test_image_digest_base: quay.io/ansible/docker-test-containers
+docker_test_image_hello_world: quay.io/ansible/docker-test-containers:hello-world
+docker_test_image_hello_world_base: quay.io/ansible/docker-test-containers
+docker_test_image_busybox: quay.io/ansible/docker-test-containers:busybox
+docker_test_image_alpine: quay.io/ansible/docker-test-containers:alpine3.8
+docker_test_image_alpine_different: quay.io/ansible/docker-test-containers:alpine3.7
+docker_test_image_registry_nginx: quay.io/ansible/docker-test-containers:nginx-alpine
+docker_test_image_registry: registry:2.6.1
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker_registry/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker_registry/aliases
new file mode 100644
index 00000000..688c8884
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker_registry/aliases
@@ -0,0 +1,2 @@
+needs/target/setup_docker
+needs/target/setup_openssl
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker_registry/files/nginx.conf b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker_registry/files/nginx.conf
new file mode 100644
index 00000000..bfba5204
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker_registry/files/nginx.conf
@@ -0,0 +1,46 @@
+events {
+ worker_connections 16;
+}
+
+http {
+ include /etc/nginx/mime.types;
+ default_type application/octet-stream;
+
+ error_log /dev/stdout info;
+ access_log /dev/stdout;
+
+ server {
+ listen *:5000 ssl;
+ server_name test-registry.ansible.com;
+ server_name_in_redirect on;
+
+ ssl_protocols TLSv1.2;
+ ssl_ciphers 'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-DSS-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-RSA-AES128-SHA256';
+ ssl_ecdh_curve X25519:secp521r1:secp384r1;
+ ssl_prefer_server_ciphers on;
+ ssl_certificate /etc/nginx/cert.pem;
+ ssl_certificate_key /etc/nginx/cert.key;
+
+ location / {
+ return 401;
+ }
+
+ location /v2/ {
+ proxy_pass http://real-registry:5000;
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection "upgrade";
+ proxy_set_header Host $http_host;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ proxy_set_header X-Forwarded-For $remote_addr;
+ proxy_set_header X-Forwarded-Port $server_port;
+ proxy_set_header X-Request-Start $msec;
+
+ client_max_body_size 0;
+ chunked_transfer_encoding on;
+
+ auth_basic "Ansible Test Docker Registry";
+ auth_basic_user_file /etc/nginx/nginx.htpasswd;
+ }
+ }
+}
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker_registry/files/nginx.htpasswd b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker_registry/files/nginx.htpasswd
new file mode 100644
index 00000000..1291ae77
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker_registry/files/nginx.htpasswd
@@ -0,0 +1 @@
+testuser:{PLAIN}hunter2
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker_registry/handlers/cleanup.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker_registry/handlers/cleanup.yml
new file mode 100644
index 00000000..f9d2c9e5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker_registry/handlers/cleanup.yml
@@ -0,0 +1,55 @@
+---
+- name: "Make sure all images are removed"
+ docker_image:
+ name: "{{ item }}"
+ state: absent
+ with_items: "{{ docker_registry_setup_inames }}"
+
+- name: "Get registry logs"
+ command: "docker logs {{ docker_registry_container_name_registry }}"
+ register: registry_logs
+ no_log: yes
+ ignore_errors: yes
+
+- name: "Printing registry logs"
+ debug:
+ var: registry_logs.stdout_lines
+ when: registry_logs is not failed
+
+- name: "Get nginx logs for first instance"
+ command: "docker logs {{ docker_registry_container_name_nginx }}"
+ register: nginx_logs
+ no_log: yes
+ ignore_errors: yes
+
+- name: "Get nginx logs for second instance"
+ command: "docker logs {{ docker_registry_container_name_nginx2 }}"
+ register: nginx2_logs
+ no_log: yes
+ ignore_errors: yes
+
+- name: "Printing nginx logs for first instance"
+ debug:
+ var: nginx_logs.stdout_lines
+ when: nginx_logs is not failed
+
+- name: "Printing nginx logs for second instance"
+ debug:
+ var: nginx2_logs.stdout_lines
+ when: nginx_logs is not failed
+
+- name: "Make sure all containers are removed"
+ docker_container:
+ name: "{{ item }}"
+ state: absent
+ force_kill: yes
+ with_items: "{{ docker_registry_setup_cnames }}"
+ register: result
+ retries: 3
+ delay: 3
+ until: result is success
+
+- name: "Make sure all volumes are removed"
+ command: "docker rm -f {{ item }}"
+ with_items: "{{ docker_registry_setup_vnames }}"
+ ignore_errors: yes
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker_registry/handlers/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker_registry/handlers/main.yml
new file mode 100644
index 00000000..23030182
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker_registry/handlers/main.yml
@@ -0,0 +1,3 @@
+---
+- name: Remove test registry
+ include_tasks: ../handlers/cleanup.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker_registry/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker_registry/meta/main.yml
new file mode 100644
index 00000000..b252b0e7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker_registry/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ #- setup_docker -- done in setup.yml, to work around cleanup problems!
+ - setup_openssl
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker_registry/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker_registry/tasks/main.yml
new file mode 100644
index 00000000..320df246
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker_registry/tasks/main.yml
@@ -0,0 +1,9 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- when: ansible_facts.distribution ~ ansible_facts.distribution_major_version not in ['CentOS6', 'RedHat6']
+ include_tasks:
+ file: setup.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker_registry/tasks/setup-frontend.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker_registry/tasks/setup-frontend.yml
new file mode 100644
index 00000000..f1055ea3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker_registry/tasks/setup-frontend.yml
@@ -0,0 +1,97 @@
+---
+# Set up first nginx frontend for registry
+- name: Start nginx frontend for registry
+ docker_volume:
+ name: '{{ docker_registry_container_name_frontend }}'
+ state: present
+
+- name: Create container for nginx frontend for registry
+ docker_container:
+ state: stopped
+ name: '{{ docker_registry_container_name_frontend }}'
+ image: "{{ docker_test_image_registry_nginx }}"
+ ports: 5000
+ # `links` does not work when using a network. That's why the docker_container task
+ # in setup.yml specifies `aliases` so we get the same effect.
+ links:
+ - '{{ docker_registry_container_name_registry }}:real-registry'
+ volumes:
+ - '{{ docker_registry_container_name_frontend }}:/etc/nginx/'
+ network_mode: '{{ current_container_network_ip | default(omit, true) }}'
+ networks: >-
+ {{
+ [dict([['name', current_container_network_ip]])]
+ if current_container_network_ip not in ['', 'bridge'] else omit
+ }}
+ register: nginx_container
+
+- name: Copy static files into volume
+ command: docker cp {{ role_path }}/files/{{ item }} {{ docker_registry_container_name_frontend }}:/etc/nginx/{{ item }}
+ loop:
+ - nginx.conf
+ - nginx.htpasswd
+ register: can_copy_files
+ ignore_errors: yes
+
+- when: can_copy_files is not failed
+ block:
+
+ - name: Create private key for frontend certificate
+ community.crypto.openssl_privatekey:
+ path: '{{ output_dir }}/cert.key'
+ type: ECC
+ curve: secp256r1
+ force: yes
+
+ - name: Create CSR for frontend certificate
+ community.crypto.openssl_csr:
+ path: '{{ output_dir }}/cert.csr'
+ privatekey_path: '{{ output_dir }}/cert.key'
+ subject_alt_name:
+ - DNS:test-registry.ansible.com
+
+ - name: Create frontend certificate
+ community.crypto.openssl_certificate:
+ path: '{{ output_dir }}/cert.pem'
+ csr_path: '{{ output_dir }}/cert.csr'
+ privatekey_path: '{{ output_dir }}/cert.key'
+ provider: selfsigned
+
+ - name: Copy dynamic files into volume
+ command: docker cp {{ output_dir }}/{{ item }} {{ docker_registry_container_name_frontend }}:/etc/nginx/{{ item }}
+ loop:
+ - cert.pem
+ - cert.key
+
+ - name: Start nginx frontend for registry
+ docker_container:
+ name: '{{ docker_registry_container_name_frontend }}'
+ state: started
+ register: nginx_container
+
+ - name: Output nginx container network settings
+ debug:
+ var: nginx_container.container.NetworkSettings
+
+ - name: Get registry URL
+ set_fact:
+ # Note that this host/port combination is used by the Docker daemon, that's why `localhost` is appropriate!
+ # This host/port combination cannot be used if the tests are running inside a docker container.
+ docker_registry_frontend_address: localhost:{{ nginx_container.container.NetworkSettings.Ports['5000/tcp'].0.HostPort }}
+ # The following host/port combination can be used from inside the docker container.
+ docker_registry_frontend_address_internal: "{{ nginx_container.container.NetworkSettings.Networks[current_container_network_ip].IPAddress if current_container_network_ip else nginx_container.container.NetworkSettings.IPAddress }}:5000"
+
+ - name: Wait for registry frontend
+ uri:
+ url: https://{{ docker_registry_frontend_address_internal }}/v2/
+ url_username: testuser
+ url_password: hunter2
+ validate_certs: false
+ register: result
+ until: result is success
+ retries: 5
+ delay: 1
+
+- set_fact:
+ docker_registry_frontend_address: 'n/a'
+ when: can_copy_files is failed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker_registry/tasks/setup.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker_registry/tasks/setup.yml
new file mode 100644
index 00000000..6782da9d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker_registry/tasks/setup.yml
@@ -0,0 +1,80 @@
+---
+- name: Register registry cleanup
+ # This must be done **before** docker is set up (see next task), to ensure that the
+ # registry is removed **before** docker itself is removed. This is necessary as the
+ # registry and its frontends run as docker containers.
+ command: 'true'
+ notify: Remove test registry
+
+- name: Setup Docker
+ # Please note that we do setup_docker here and not via meta/main.yml to avoid the problem that
+ # our cleanup is called **after** setup_docker's cleanup has been called!
+ include_role:
+ name: setup_docker
+
+- name: Create random name prefix and test registry name
+ set_fact:
+ docker_registry_container_name_registry: '{{ ''ansible-test-registry-%0x'' % ((2**32) | random) }}'
+ docker_registry_container_name_nginx: '{{ ''ansible-test-registry-frontend-%0x'' % ((2**32) | random) }}'
+ docker_registry_container_name_nginx2: '{{ ''ansible-test-registry-frontend2-%0x'' % ((2**32) | random) }}'
+
+- name: Create image and container list
+ set_fact:
+ docker_registry_setup_inames: []
+ docker_registry_setup_cnames:
+ - '{{ docker_registry_container_name_registry }}'
+ - '{{ docker_registry_container_name_nginx }}'
+ - '{{ docker_registry_container_name_nginx2 }}'
+ docker_registry_setup_vnames:
+ - '{{ docker_registry_container_name_nginx }}'
+ - '{{ docker_registry_container_name_nginx2 }}'
+
+- debug:
+ msg: Using test registry name {{ docker_registry_container_name_registry }} and nginx frontend names {{ docker_registry_container_name_nginx }} and {{ docker_registry_container_name_nginx2 }}
+
+- fail: msg="Too old docker / docker-py version to set up docker registry!"
+ when: not(docker_py_version is version('1.8.0', '>=') and docker_api_version is version('1.20', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
+
+- when: docker_py_version is version('1.8.0', '>=') and docker_api_version is version('1.20', '>=')
+ block:
+
+ # Set up registry container
+ - name: Start test registry
+ docker_container:
+ name: '{{ docker_registry_container_name_registry }}'
+ image: "{{ docker_test_image_registry }}"
+ ports: 5000
+ network_mode: '{{ current_container_network_ip | default(omit, true) }}'
+ # We need to define the alias `real-registry` here because the global `links`
+ # option for the NGINX containers (see setup-frontend.yml) does not work when
+ # using networks.
+ networks: >-
+ {{
+ [dict([['name', current_container_network_ip], ['aliases', ['real-registry']]])]
+ if current_container_network_ip not in ['', 'bridge'] else omit
+ }}
+ register: registry_container
+
+ - name: Get registry URL
+ set_fact:
+ registry_address: localhost:{{ registry_container.container.NetworkSettings.Ports['5000/tcp'].0.HostPort }}
+
+ # Set up first nginx frontend for registry
+ - include_tasks: setup-frontend.yml
+ vars:
+ docker_registry_container_name_frontend: '{{ docker_registry_container_name_nginx }}'
+
+ - set_fact:
+ registry_frontend_address: '{{ docker_registry_frontend_address }}'
+
+ # Set up second nginx frontend for registry
+ - include_tasks: setup-frontend.yml
+ vars:
+ docker_registry_container_name_frontend: '{{ docker_registry_container_name_nginx2 }}'
+
+ - set_fact:
+ registry_frontend2_address: '{{ docker_registry_frontend_address }}'
+
+ # Print addresses for registry and frontends
+ - debug:
+ msg: "Registry available under {{ registry_address }}, NGINX frontends available under {{ registry_frontend_address }} and {{ registry_frontend2_address }}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker_registry/vars/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker_registry/vars/main.yml
new file mode 100644
index 00000000..8d4b74c5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_docker_registry/vars/main.yml
@@ -0,0 +1,11 @@
+---
+docker_test_image_digest_v1: e004c2cc521c95383aebb1fb5893719aa7a8eae2e7a71f316a4410784edb00a9
+docker_test_image_digest_v2: ee44b399df993016003bf5466bd3eeb221305e9d0fa831606bc7902d149c775b
+docker_test_image_digest_base: quay.io/ansible/docker-test-containers
+docker_test_image_hello_world: quay.io/ansible/docker-test-containers:hello-world
+docker_test_image_hello_world_base: quay.io/ansible/docker-test-containers
+docker_test_image_busybox: quay.io/ansible/docker-test-containers:busybox
+docker_test_image_alpine: quay.io/ansible/docker-test-containers:alpine3.8
+docker_test_image_alpine_different: quay.io/ansible/docker-test-containers:alpine3.7
+docker_test_image_registry_nginx: quay.io/ansible/docker-test-containers:nginx-alpine
+docker_test_image_registry: registry:2.6.1
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_epel/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_epel/tasks/main.yml
new file mode 100644
index 00000000..21627cfa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_epel/tasks/main.yml
@@ -0,0 +1,10 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Install EPEL
+ yum:
+ name: https://s3.amazonaws.com/ansible-ci-files/test/integration/targets/setup_epel/epel-release-latest-{{ ansible_distribution_major_version }}.noarch.rpm
+ disable_gpg_check: true
+ when: ansible_facts.distribution in ['RedHat', 'CentOS']
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_etcd3/defaults/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_etcd3/defaults/main.yml
new file mode 100644
index 00000000..95e4b837
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_etcd3/defaults/main.yml
@@ -0,0 +1,16 @@
+---
+# setup etcd3 for integration tests on module/lookup
+# (c) 2017, Jean-Philippe Evrard <jean-philippe@evrard.me>
+# 2020, SCC France, Eric Belhomme <ebelhomme@fr.scc.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# # Copyright: (c) 2018, Ansible Project
+#
+etcd3_ver: "v3.2.14"
+etcd3_download_server: "https://storage.googleapis.com/etcd"
+#etcd3_download_server: "https://github.com/coreos/etcd/releases/download"
+etcd3_download_url: "{{ etcd3_download_server }}/{{ etcd3_ver }}/etcd-{{ etcd3_ver }}-linux-amd64.tar.gz"
+etcd3_download_location: /tmp/etcd-download-test
+etcd3_path: "{{ etcd3_download_location }}/etcd-{{ etcd3_ver }}-linux-amd64"
+
+etcd3_pip_module: etcd3>=0.12
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_etcd3/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_etcd3/meta/main.yml
new file mode 100644
index 00000000..5438ced5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_etcd3/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_pkg_mgr
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_etcd3/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_etcd3/tasks/main.yml
new file mode 100644
index 00000000..de7697e9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_etcd3/tasks/main.yml
@@ -0,0 +1,117 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# setup etcd3 for integration tests on module/lookup
+# (c) 2017, Jean-Philippe Evrard <jean-philippe@evrard.me>
+# 2020, SCC France, Eric Belhomme <ebelhomme@fr.scc.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# ============================================================
+
+# setup etcd3 for supported distros
+- block:
+
+ - name: python 2
+ set_fact:
+ python_suffix: ""
+ when: ansible_python_version is version('3', '<')
+
+ - name: python 3
+ set_fact:
+ python_suffix: "-py3"
+ when: ansible_python_version is version('3', '>=')
+
+ - include_vars: '{{ item }}'
+ with_first_found:
+ - files:
+ - '{{ ansible_distribution }}-{{ ansible_distribution_major_version }}{{ python_suffix }}.yml'
+ - '{{ ansible_distribution }}-{{ ansible_distribution_version }}{{ python_suffix }}.yml'
+ - '{{ ansible_os_family }}-{{ ansible_distribution_major_version }}{{ python_suffix }}.yml'
+ - '{{ ansible_os_family }}{{ python_suffix }}.yml'
+ - 'default{{ python_suffix }}.yml'
+ - 'default.yml'
+ paths: '../vars'
+
+ - name: Upgrade setuptools python2 module
+ pip:
+ name: setuptools<45
+ extra_args: --upgrade
+ state: present
+ when: python_suffix == ''
+
+ - name: Install etcd3 python modules
+ pip:
+ name: "{{ etcd3_pip_module }}"
+ extra_args: --only-binary grpcio
+ state: present
+
+ # Check if re-installing etcd3 is required
+ - name: Check if etcd3ctl exists for re-use.
+ shell: "ETCDCTL_API=3 {{ etcd3_path }}/etcdctl --endpoints=localhost:2379 get foo"
+ args:
+ executable: /bin/bash
+ changed_when: false
+ failed_when: false
+ register: _testetcd3ctl
+
+ - block:
+ # Installing etcd3
+ - name: If can't reuse, prepare download folder
+ file:
+ path: "{{ etcd3_download_location }}"
+ state: directory
+ register: _etcddownloadexists
+ when:
+ - _testetcd3ctl.rc != 0
+
+ - name: Delete download folder if already exists (to start clean)
+ file:
+ path: "{{ etcd3_download_location }}"
+ state: absent
+ when:
+ - _etcddownloadexists is not changed
+
+ - name: Recreate download folder if purged
+ file:
+ path: "{{ etcd3_download_location }}"
+ state: directory
+ when:
+ - _etcddownloadexists is not changed
+
+ - name: Download etcd3
+ unarchive:
+ src: "{{ etcd3_download_url }}"
+ dest: "{{ etcd3_download_location }}"
+ remote_src: yes
+
+ # Running etcd3 and kill afterwards if it wasn't running before.
+ - name: Run etcd3
+ shell: "{{ etcd3_path }}/etcd &"
+ register: _etcd3run
+ changed_when: true
+
+# - name: kill etcd3
+# command: "pkill etcd"
+
+ when:
+ - _testetcd3ctl.rc != 0
+
+ when:
+ - ansible_distribution | lower ~ "-" ~ ansible_distribution_major_version | lower != 'centos-6'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_etcd3/vars/RedHat-7.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_etcd3/vars/RedHat-7.yml
new file mode 100644
index 00000000..2e0c0824
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_etcd3/vars/RedHat-7.yml
@@ -0,0 +1 @@
+etcd3_pip_module: etcd3<0.12 \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_etcd3/vars/Suse-py3.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_etcd3/vars/Suse-py3.yml
new file mode 100644
index 00000000..bacd4a37
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_etcd3/vars/Suse-py3.yml
@@ -0,0 +1,3 @@
+# SuSE's python 3.6.10 comes with six 1.11.0 as distutil
+# we restrict to etcd3 < 0.11 to avoid pip to try to upgrade six
+etcd3_pip_module: 'etcd3<0.11'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_etcd3/vars/Suse.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_etcd3/vars/Suse.yml
new file mode 100644
index 00000000..bacd4a37
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_etcd3/vars/Suse.yml
@@ -0,0 +1,3 @@
+# SuSE's python 3.6.10 comes with six 1.11.0 as distutil
+# we restrict to etcd3 < 0.11 to avoid pip to try to upgrade six
+etcd3_pip_module: 'etcd3<0.11'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_etcd3/vars/default.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_etcd3/vars/default.yml
new file mode 100644
index 00000000..4a01b0ac
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_etcd3/vars/default.yml
@@ -0,0 +1,2 @@
+---
+# default should don't touch anything \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/README.md b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/README.md
new file mode 100644
index 00000000..d7916c14
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/README.md
@@ -0,0 +1,138 @@
+# Create a dummy flatpak repository remote
+
+This document describes how to create a local flatpak dummy repo. Just like the one contained in the `files/repo.tar.gxz` archive.
+
+
+## Create a hello world app
+
+Prerequisites:
+
+ - flathub
+
+Prepare the environment:
+
+```
+flatpak install --system flathub org.freedesktop.Platform//1.6 org.freedesktop.Sdk//1.6
+```
+
+Create a hello world executable:
+
+```
+echo $'#!/bin/sh\necho hello world' > hello.sh
+```
+
+To create dummy flatpaks, run this (defining a unique NUM for every flatpak to add):
+
+```
+export NUM=1
+flatpak build-init appdir$NUM com.dummy.App$NUM org.freedesktop.Sdk org.freedesktop.Platform 1.6;
+flatpak build appdir$NUM mkdir /app/bin;
+flatpak build appdir$NUM install --mode=750 hello.sh /app/bin;
+flatpak build-finish --command=hello.sh appdir$NUM
+```
+
+## Create a repo and/or add the app to it
+
+Create a repo and add the file to it in one command:
+
+```
+flatpak build-export repo appdir$NUM stable
+```
+
+## Create flatpak*-files
+
+Put a flatpakref file under the repo folder (`repo/com.dummy.App1.flatpakref`):
+
+```
+[Flatpak Ref]
+Title=Dummy App$NUM
+Name=com.dummy.App$NUM
+Branch=stable
+Url=file:///tmp/flatpak/repo
+GPGKey={{ base64-encoded public KEY }}
+IsRuntime=false
+RuntimeRepo=https://flathub.org/repo/flathub.flatpakrepo
+```
+
+Add a `.flatpakrepo` file to the `repo` folder (`repo/dummy-repo.flatpakrepo`):
+
+```
+[Flatpak Repo]
+Title=Dummy Repo
+Url=file:///tmp/flatpak/repo
+Comment=Dummy repo for ansible module integration testing
+Description=Dummy repo for ansible module integration testing
+GPGKey={{ base64-encoded public KEY }}
+```
+
+## Sign the repo
+
+Create a new key in a new gpg home folder (On RedHat systems, the executable needs to addressed as gpg2):
+
+```
+mkdir gpg
+gpg --homedir gpg --quick-gen-key test@dummy.com
+```
+
+Sign the repo and summary file, you need to redo this when you update the repository:
+
+```
+flatpak build-sign repo --gpg-sign=KEY_ID --gpg-homedir=gpg
+flatpak build-update-repo repo --gpg-sign=KEY_ID --gpg-homedir=gpg
+```
+
+Export the public key as a file:
+
+```
+gpg --homedir=gpg --export KEY_ID > dummy-repo.gpg
+```
+
+Create base64-encoded string from gpg-file for `GPGKey=` property in flatpak*-files:
+
+```
+base64 dummy-repo.gpg | tr -d '\n'
+```
+
+## How to use the repo
+
+Now you can add the `repo` folder as a local repo:
+
+```
+flatpak --system remote-add --gpg-import=/tmp/flatpak/repo/dummy-repo.gpg dummy-repo /tmp/flatpak/repo
+```
+
+Or, via `.flatpakrepo` file:
+
+```
+flatpak --system remote-add dummy-repo /tmp/flatpak/repo/dummy-repo.flatpakrepo
+```
+
+And install the hello world flatpaks like this:
+
+```
+flatpak --system install dummy-repo com.dummy.App$NUM
+```
+
+Or from flatpakref:
+
+```
+flatpak --system install --from /tmp/flatpak/repo/com.dummy.App$NUM.flatpakref
+```
+
+Run the app:
+
+```
+flatpak run com.dummy.App$NUM
+```
+
+To install an app without any runtime dependencies (the app will be broken, but it is enough to test flatpak installation):
+
+```
+flatpak --system install --no-deps dummy-repo com.dummy.App$NUM
+```
+
+## Sources:
+
+* https://blogs.gnome.org/alexl/2017/02/10/maintaining-a-flatpak-repository/
+
+* http://docs.flatpak.org/en/latest/first-build.html
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/files/repo.tar.xz b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/files/repo.tar.xz
new file mode 100644
index 00000000..41a89c46
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/files/repo.tar.xz
Binary files differ
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/handlers/main.yaml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/handlers/main.yaml
new file mode 100644
index 00000000..9380dee9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/handlers/main.yaml
@@ -0,0 +1,4 @@
+- name: remove temporary flatpak link
+ file:
+ state: absent
+ path: /tmp/flatpak
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/meta/main.yaml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/meta/main.yaml
new file mode 100644
index 00000000..75ee4583
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/meta/main.yaml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_remote_tmp_dir
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/tasks/main.yaml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/tasks/main.yaml
new file mode 100644
index 00000000..d80d3646
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/tasks/main.yaml
@@ -0,0 +1,24 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Set up dummy flatpak repository remote
+ block:
+ - name: Copy repo into place
+ unarchive:
+ src: repo.tar.xz
+ dest: '{{ remote_tmp_dir }}'
+ owner: root
+ group: root
+ mode: '0644'
+ - name: Create deterministic link to temp directory
+ file:
+ state: link
+ src: '{{ remote_tmp_dir }}/'
+ path: /tmp/flatpak
+ owner: root
+ group: root
+ mode: '0644'
+ notify: remove temporary flatpak link
+ become: true
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_gnutar/handlers/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_gnutar/handlers/main.yml
new file mode 100644
index 00000000..b4f66cd3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_gnutar/handlers/main.yml
@@ -0,0 +1,7 @@
+---
+- name: uninstall gnu-tar
+ community.general.homebrew:
+ name: gnu-tar
+ state: absent
+ become: yes
+ become_user: "{{ brew_stat.stat.pw_name }}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_gnutar/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_gnutar/tasks/main.yml
new file mode 100644
index 00000000..7b973d47
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_gnutar/tasks/main.yml
@@ -0,0 +1,20 @@
+---
+- when: ansible_facts.distribution == 'MacOSX'
+ block:
+ - name: MACOS | Find brew binary
+ command: which brew
+ register: brew_which
+
+ - name: MACOS | Get owner of brew binary
+ stat:
+ path: "{{ brew_which.stdout }}"
+ register: brew_stat
+
+ - name: MACOS | Install gnu-tar
+ community.general.homebrew:
+ name: gnu-tar
+ state: present
+ become: yes
+ become_user: "{{ brew_stat.stat.pw_name }}"
+ notify:
+ - uninstall gnu-tar
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_influxdb/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_influxdb/tasks/main.yml
new file mode 100644
index 00000000..6b2a59c5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_influxdb/tasks/main.yml
@@ -0,0 +1,9 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+
+- include: setup.yml
+ when: ansible_distribution == 'Ubuntu' and ansible_distribution_release == 'trusty'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_influxdb/tasks/setup.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_influxdb/tasks/setup.yml
new file mode 100644
index 00000000..0069be15
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_influxdb/tasks/setup.yml
@@ -0,0 +1,26 @@
+---
+
+- name: Install https transport for apt and ca-certificates
+ apt: name={{ item }} state=latest force=yes
+ with_items:
+ - apt-transport-https
+ - ca-certificates
+
+- name: Install apt_key dependencies
+ pip: name={{ item }}
+ with_items:
+ - pyOpenSSL
+ - ndg-httpsclient
+ - pyasn1
+
+- name: Add InfluxDB public GPG key
+ apt_key: url=https://repos.influxdata.com/influxdb.key state=present
+
+- name: Add InfluxDB repository
+ apt_repository: repo='deb https://repos.influxdata.com/ubuntu trusty stable' filename='influxdb' state=present update_cache=yes
+
+- name: Install InfluxDB
+ apt: name=influxdb state=latest
+
+- name: Start InfluxDB service
+ service: name=influxdb state=started
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_java_keytool/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_java_keytool/meta/main.yml
new file mode 100644
index 00000000..2be15776
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_java_keytool/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - setup_remote_constraints
+ - setup_pkg_mgr
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_java_keytool/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_java_keytool/tasks/main.yml
new file mode 100644
index 00000000..be844952
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_java_keytool/tasks/main.yml
@@ -0,0 +1,21 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+- set_fact:
+ has_java_keytool: >-
+ {{
+ ansible_os_family not in ['Darwin', 'FreeBSD']
+ and not (ansible_distribution == "CentOS" and ansible_distribution_version is version("7.0", "<"))
+ }}
+
+- name: Include OS-specific variables
+ include_vars: '{{ ansible_os_family }}.yml'
+ when: has_java_keytool
+
+- name: Install keytool
+ package:
+ name: '{{ keytool_package_name }}'
+ become: true
+ when: has_java_keytool
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_java_keytool/vars/Debian.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_java_keytool/vars/Debian.yml
new file mode 100644
index 00000000..faace450
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_java_keytool/vars/Debian.yml
@@ -0,0 +1,2 @@
+---
+keytool_package_name: ca-certificates-java
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_java_keytool/vars/RedHat.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_java_keytool/vars/RedHat.yml
new file mode 100644
index 00000000..d301dff4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_java_keytool/vars/RedHat.yml
@@ -0,0 +1,2 @@
+---
+keytool_package_name: java-11-openjdk-headless
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_java_keytool/vars/Suse.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_java_keytool/vars/Suse.yml
new file mode 100644
index 00000000..d301dff4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_java_keytool/vars/Suse.yml
@@ -0,0 +1,2 @@
+---
+keytool_package_name: java-11-openjdk-headless
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_mosquitto/files/mosquitto.conf b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_mosquitto/files/mosquitto.conf
new file mode 100644
index 00000000..84a80b71
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_mosquitto/files/mosquitto.conf
@@ -0,0 +1,35 @@
+# Plain MQTT protocol
+listener 1883
+
+# MQTT over TLS 1.1
+listener 8883
+tls_version tlsv1.1
+cafile /tls/ca_certificate.pem
+certfile /tls/server_certificate.pem
+keyfile /tls/server_key.pem
+
+# MQTT over TLS 1.2
+listener 8884
+tls_version tlsv1.2
+cafile /tls/ca_certificate.pem
+certfile /tls/server_certificate.pem
+keyfile /tls/server_key.pem
+
+# TODO(This does not appear to be supported on Ubuntu 18.04. Re-try on 20.04 or next LTS release)
+# MQTT over TLS 1.3
+#
+# listener 8885
+# tls_version tlsv1.3
+# cafile /tls/ca_certificate.pem
+# certfile /tls/server_certificate.pem
+# keyfile /tls/server_key.pem
+
+log_dest syslog
+
+log_type error
+log_type warning
+log_type notice
+log_type information
+log_type debug
+
+connection_messages true
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_mosquitto/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_mosquitto/meta/main.yml
new file mode 100644
index 00000000..af05db79
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_mosquitto/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - setup_tls
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_mosquitto/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_mosquitto/tasks/main.yml
new file mode 100644
index 00000000..6573ca46
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_mosquitto/tasks/main.yml
@@ -0,0 +1,8 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- include: ubuntu.yml
+ when: ansible_distribution == 'Ubuntu'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_mosquitto/tasks/ubuntu.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_mosquitto/tasks/ubuntu.yml
new file mode 100644
index 00000000..5675cb89
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_mosquitto/tasks/ubuntu.yml
@@ -0,0 +1,24 @@
+- name: Install https transport for apt
+ apt:
+ name: apt-transport-https
+ state: latest
+ force: yes
+
+- name: Install Mosquitto Server
+ apt:
+ name: mosquitto
+ state: latest
+ register: result
+ until: result is success
+ delay: 3
+ retries: 10
+
+- name: Ensure TLS config
+ copy:
+ src: mosquitto.conf
+ dest: /etc/mosquitto/mosquitto.conf
+
+- name: Start Mosquitto service
+ service:
+ name: mosquitto
+ state: restarted
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openldap/files/initial_config.ldif b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openldap/files/initial_config.ldif
new file mode 100644
index 00000000..13397758
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openldap/files/initial_config.ldif
@@ -0,0 +1,22 @@
+dn: ou=users,dc=example,dc=com
+objectClass: organizationalUnit
+objectClass: top
+ou: users
+
+dn: uid=ldaptest,ou=users,dc=example,dc=com
+uid: ldaptest
+uidNumber: 1111
+gidNUmber: 100
+objectClass: top
+objectClass: posixAccount
+objectClass: shadowAccount
+objectClass: person
+objectClass: organizationalPerson
+objectClass: inetOrgPerson
+loginShell: /bin/sh
+homeDirectory: /home/ldaptest
+cn: LDAP Test
+gecos: LDAP Test
+displayName: LDAP Test
+mail: ldap.test@example.com
+sn: Test \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openldap/files/rootpw_cnconfig.ldif b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openldap/files/rootpw_cnconfig.ldif
new file mode 100644
index 00000000..1fc061dd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openldap/files/rootpw_cnconfig.ldif
@@ -0,0 +1,4 @@
+dn: olcDatabase={0}config,cn=config
+changetype: modify
+replace: olcRootPW
+olcRootPW: "Test1234!" \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openldap/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openldap/meta/main.yml
new file mode 100644
index 00000000..ed97d539
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openldap/meta/main.yml
@@ -0,0 +1 @@
+---
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openldap/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openldap/tasks/main.yml
new file mode 100644
index 00000000..4fd27058
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openldap/tasks/main.yml
@@ -0,0 +1,68 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Setup OpenLDAP on Debian or Ubuntu
+ block:
+ - name: Include OS-specific variables
+ include_vars: '{{ ansible_os_family }}.yml'
+
+ - name: Install OpenLDAP server and tools
+ become: True
+ package:
+ name: '{{ item }}'
+ loop: '{{ openldap_packages_name }}'
+
+ - name: Install python-ldap (Python 3)
+ become: True
+ package:
+ name: '{{ python_ldap_package_name_python3 }}'
+ when: ansible_python_version is version('3.0', '>=')
+
+ - name: Install python-ldap (Python 2)
+ become: True
+ package:
+ name: '{{ python_ldap_package_name }}'
+ when: ansible_python_version is version('3.0', '<')
+
+ - name: Make sure OpenLDAP service is stopped
+ become: True
+ shell: 'cat /var/run/slapd/slapd.pid | xargs -r kill -9 '
+
+ - name: Debconf
+ shell: 'echo "slapd {{ item.question }} {{ item.vtype }} {{ item.value }}" >> /root/debconf-slapd.conf'
+ loop: "{{ openldap_debconfs }}"
+
+ - name: Dpkg reconfigure
+ shell:
+ cmd: "export DEBIAN_FRONTEND=noninteractive; cat /root/debconf-slapd.conf | debconf-set-selections; dpkg-reconfigure -f noninteractive slapd"
+ creates: "/root/slapd_configured"
+
+ - name: Start OpenLDAP service
+ become: True
+ service:
+ name: '{{ openldap_service_name }}'
+ enabled: True
+ state: started
+
+ - name: Copy initial config ldif file
+ become: True
+ copy:
+ src: 'files/{{ item }}'
+ dest: '/tmp/{{ item }}'
+ owner: root
+ group: root
+ mode: '0644'
+ loop:
+ - rootpw_cnconfig.ldif
+ - initial_config.ldif
+
+ - name: Configure admin password for cn=config
+ shell: "ldapmodify -Y EXTERNAL -H ldapi:/// -f /tmp/rootpw_cnconfig.ldif"
+
+ - name: Add initial config
+ become: True
+ shell: 'ldapadd -H ldapi:/// -x -D "cn=admin,dc=example,dc=com" -w Test1234! -f /tmp/initial_config.ldif'
+ when: ansible_os_family in ['Ubuntu', 'Debian']
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openldap/vars/Debian.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openldap/vars/Debian.yml
new file mode 100644
index 00000000..bcc4feb9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openldap/vars/Debian.yml
@@ -0,0 +1,55 @@
+python_ldap_package_name: python-ldap
+python_ldap_package_name_python3: python3-ldap
+openldap_packages_name:
+ - slapd
+ - ldap-utils
+openldap_service_name: slapd
+openldap_debconfs:
+ - question: "shared/organization"
+ value: "Example Organization"
+ vtype: "string"
+ - question: "slapd/allow_ldap_v2"
+ value: "false"
+ vtype: "boolean"
+ - question: "slapd/backend"
+ value: "MDB"
+ vtype: "select"
+ - question: "slapd/domain"
+ value: "example.com"
+ vtype: "string"
+ - question: "slapd/dump_database"
+ value: "when needed"
+ vtype: "select"
+ - question: "slapd/dump_database_destdir"
+ value: "/var/backups/slapd-VERSION"
+ vtype: "string"
+ - question: "slapd/internal/adminpw"
+ value: "Test1234!"
+ vtype: "password"
+ - question: "slapd/internal/generated_adminpw"
+ value: "Test1234!"
+ vtype: "password"
+ - question: "slapd/invalid_config"
+ value: "true"
+ vtype: "boolean"
+ - question: "slapd/move_old_database"
+ value: "true"
+ vtype: "boolean"
+ - question: "slapd/no_configuration"
+ value: "false"
+ vtype: "boolean"
+ - question: "slapd/password1"
+ value: "Test1234!"
+ vtype: "password"
+ - question: "slapd/password2"
+ value: "Test1234!"
+ vtype: "password"
+ - question: "slapd/password_mismatch"
+ value: ""
+ vtype: "note"
+ - question: "slapd/purge_database"
+ value: "false"
+ vtype: "boolean"
+ - question: "slapd/upgrade_slapcat_failure"
+ value: ""
+ vtype: "error"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openldap/vars/Ubuntu.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openldap/vars/Ubuntu.yml
new file mode 100644
index 00000000..bcc4feb9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openldap/vars/Ubuntu.yml
@@ -0,0 +1,55 @@
+python_ldap_package_name: python-ldap
+python_ldap_package_name_python3: python3-ldap
+openldap_packages_name:
+ - slapd
+ - ldap-utils
+openldap_service_name: slapd
+openldap_debconfs:
+ - question: "shared/organization"
+ value: "Example Organization"
+ vtype: "string"
+ - question: "slapd/allow_ldap_v2"
+ value: "false"
+ vtype: "boolean"
+ - question: "slapd/backend"
+ value: "MDB"
+ vtype: "select"
+ - question: "slapd/domain"
+ value: "example.com"
+ vtype: "string"
+ - question: "slapd/dump_database"
+ value: "when needed"
+ vtype: "select"
+ - question: "slapd/dump_database_destdir"
+ value: "/var/backups/slapd-VERSION"
+ vtype: "string"
+ - question: "slapd/internal/adminpw"
+ value: "Test1234!"
+ vtype: "password"
+ - question: "slapd/internal/generated_adminpw"
+ value: "Test1234!"
+ vtype: "password"
+ - question: "slapd/invalid_config"
+ value: "true"
+ vtype: "boolean"
+ - question: "slapd/move_old_database"
+ value: "true"
+ vtype: "boolean"
+ - question: "slapd/no_configuration"
+ value: "false"
+ vtype: "boolean"
+ - question: "slapd/password1"
+ value: "Test1234!"
+ vtype: "password"
+ - question: "slapd/password2"
+ value: "Test1234!"
+ vtype: "password"
+ - question: "slapd/password_mismatch"
+ value: ""
+ vtype: "note"
+ - question: "slapd/purge_database"
+ value: "false"
+ vtype: "boolean"
+ - question: "slapd/upgrade_slapcat_failure"
+ value: ""
+ vtype: "error"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_opennebula/vars/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_opennebula/vars/main.yml
new file mode 100644
index 00000000..05cf47e2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_opennebula/vars/main.yml
@@ -0,0 +1,6 @@
+---
+
+opennebula_test:
+ hosts:
+ - hv1
+ - hv2 \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openssl/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openssl/meta/main.yml
new file mode 100644
index 00000000..2be15776
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openssl/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - setup_remote_constraints
+ - setup_pkg_mgr
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openssl/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openssl/tasks/main.yml
new file mode 100644
index 00000000..62df7dd5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openssl/tasks/main.yml
@@ -0,0 +1,46 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Include OS-specific variables
+ include_vars: '{{ ansible_os_family }}.yml'
+ when: not ansible_os_family == "Darwin"
+
+- name: Install OpenSSL
+ become: True
+ package:
+ name: '{{ openssl_package_name }}'
+ when: not ansible_os_family == 'Darwin'
+
+- name: Install pyOpenSSL (Python 3)
+ become: True
+ package:
+ name: '{{ pyopenssl_package_name_python3 }}'
+ when: not ansible_os_family == 'Darwin' and ansible_python_version is version('3.0', '>=')
+
+- name: Install pyOpenSSL (Python 2)
+ become: True
+ package:
+ name: '{{ pyopenssl_package_name }}'
+ when: not ansible_os_family == 'Darwin' and ansible_python_version is version('3.0', '<')
+
+- name: Install pyOpenSSL (Darwin)
+ become: True
+ pip:
+ name: pyOpenSSL
+ extra_args: "-c {{ remote_constraints }}"
+ when: ansible_os_family == 'Darwin'
+
+- name: register pyOpenSSL version
+ command: "{{ ansible_python.executable }} -c 'import OpenSSL; print(OpenSSL.__version__)'"
+ register: pyopenssl_version
+
+- name: register openssl version
+ shell: "openssl version | cut -d' ' -f2"
+ register: openssl_version
+
+- name: register cryptography version
+ command: "{{ ansible_python.executable }} -c 'import cryptography; print(cryptography.__version__)'"
+ register: cryptography_version
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/Debian.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/Debian.yml
new file mode 100644
index 00000000..755c7a08
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/Debian.yml
@@ -0,0 +1,3 @@
+pyopenssl_package_name: python-openssl
+pyopenssl_package_name_python3: python3-openssl
+openssl_package_name: openssl
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/FreeBSD.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/FreeBSD.yml
new file mode 100644
index 00000000..60868915
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/FreeBSD.yml
@@ -0,0 +1,3 @@
+pyopenssl_package_name: py27-openssl
+pyopenssl_package_name_python3: py36-openssl
+openssl_package_name: openssl
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/RedHat.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/RedHat.yml
new file mode 100644
index 00000000..2959932c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/RedHat.yml
@@ -0,0 +1,3 @@
+pyopenssl_package_name: pyOpenSSL
+pyopenssl_package_name_python3: python3-pyOpenSSL
+openssl_package_name: openssl
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/Suse.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/Suse.yml
new file mode 100644
index 00000000..2d5200f3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/Suse.yml
@@ -0,0 +1,3 @@
+pyopenssl_package_name: python-pyOpenSSL
+pyopenssl_package_name_python3: python3-pyOpenSSL
+openssl_package_name: openssl
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_pkg_mgr/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_pkg_mgr/tasks/main.yml
new file mode 100644
index 00000000..24d02228
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_pkg_mgr/tasks/main.yml
@@ -0,0 +1,17 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- set_fact:
+ pkg_mgr: community.general.pkgng
+ ansible_pkg_mgr: community.general.pkgng
+ cacheable: yes
+ when: ansible_os_family == "FreeBSD"
+
+- set_fact:
+ pkg_mgr: community.general.zypper
+ ansible_pkg_mgr: community.general.zypper
+ cacheable: yes
+ when: ansible_os_family == "Suse"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/defaults/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/defaults/main.yml
new file mode 100644
index 00000000..aea02442
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/defaults/main.yml
@@ -0,0 +1,17 @@
+postgresql_service: postgresql
+
+postgresql_packages:
+ - postgresql-server
+ - python-psycopg2
+
+pg_user: postgres
+pg_group: root
+
+locale_latin_suffix:
+locale_utf8_suffix:
+
+# defaults for test SSL
+ssl_db: 'ssl_db'
+ssl_user: 'ssl_user'
+ssl_pass: 'ssl_pass'
+ssl_rootcert: '~{{ pg_user }}/root.crt'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/files/dummy--1.0.sql b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/files/dummy--1.0.sql
new file mode 100644
index 00000000..53c79666
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/files/dummy--1.0.sql
@@ -0,0 +1,2 @@
+CREATE OR REPLACE FUNCTION dummy_display_ext_version()
+RETURNS text LANGUAGE SQL AS 'SELECT (''1.0'')::text';
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/files/dummy--2.0.sql b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/files/dummy--2.0.sql
new file mode 100644
index 00000000..227ba1b4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/files/dummy--2.0.sql
@@ -0,0 +1,2 @@
+CREATE OR REPLACE FUNCTION dummy_display_ext_version()
+RETURNS text LANGUAGE SQL AS 'SELECT (''2.0'')::text';
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/files/dummy--3.0.sql b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/files/dummy--3.0.sql
new file mode 100644
index 00000000..7d6a60e5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/files/dummy--3.0.sql
@@ -0,0 +1,2 @@
+CREATE OR REPLACE FUNCTION dummy_display_ext_version()
+RETURNS text LANGUAGE SQL AS 'SELECT (''3.0'')::text';
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/files/dummy.control b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/files/dummy.control
new file mode 100644
index 00000000..4f8553c2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/files/dummy.control
@@ -0,0 +1,3 @@
+comment = 'dummy extension used to test postgresql_ext Ansible module'
+default_version = '3.0'
+relocatable = true
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/files/pg_hba.conf b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/files/pg_hba.conf
new file mode 100644
index 00000000..58de3607
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/files/pg_hba.conf
@@ -0,0 +1,10 @@
+# !!! This file managed by Ansible. Any local changes may be overwritten. !!!
+
+# Database administrative login by UNIX sockets
+# note: you may wish to restrict this further later
+local all {{ pg_user }} trust
+
+# TYPE DATABASE USER CIDR-ADDRESS METHOD
+local all all md5
+host all all 127.0.0.1/32 md5
+host all all ::1/128 md5
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/meta/main.yml
new file mode 100644
index 00000000..5438ced5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_pkg_mgr
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/tasks/main.yml
new file mode 100644
index 00000000..3723c5ef
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/tasks/main.yml
@@ -0,0 +1,247 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Exit when Suse because it causes CI problems
+- meta: end_play
+ when: ansible_os_family == 'Suse'
+
+# To avoid hangings on service start/stop postgres during CI runs:
+- meta: end_play
+ when: ansible_facts.distribution == 'CentOS' and ansible_facts.distribution_major_version == '8'
+
+- name: python 2
+ set_fact:
+ python_suffix: ''
+ when: ansible_python_version is version('3', '<')
+
+- name: python 3
+ set_fact:
+ python_suffix: -py3
+ when: ansible_python_version is version('3', '>=')
+
+- name: Include distribution and Python version specific variables
+ include_vars: '{{ lookup(''first_found'', params) }}'
+ vars:
+ params:
+ files:
+ - '{{ ansible_distribution }}-{{ ansible_distribution_major_version }}{{ python_suffix }}.yml'
+ - '{{ ansible_distribution }}-{{ ansible_distribution_version }}{{ python_suffix }}.yml'
+ - '{{ ansible_os_family }}{{ python_suffix }}.yml'
+ - default{{ python_suffix }}.yml
+ paths:
+ - '{{ role_path }}/vars'
+
+- name: make sure the dbus service is started under systemd
+ systemd:
+ name: dbus
+ state: started
+ when: ansible_service_mgr == 'systemd' and ansible_distribution == 'Fedora'
+
+- name: Kill all postgres processes
+ shell: 'pkill -u {{ pg_user }}'
+ become: yes
+ when: ansible_facts.distribution == 'CentOS' and ansible_facts.distribution_major_version == '8'
+ ignore_errors: yes
+
+- name: stop postgresql service
+ service: name={{ postgresql_service }} state=stopped
+ ignore_errors: true
+
+- name: remove old db (RedHat or Suse)
+ file:
+ path: '{{ pg_dir }}'
+ state: absent
+ ignore_errors: true
+ when: ansible_os_family == "RedHat" or ansible_os_family == "Suse"
+
+- name: remove old db (FreeBSD)
+ file:
+ path: '{{ pg_dir }}'
+ state: absent
+ ignore_errors: true
+ when: ansible_os_family == "FreeBSD"
+
+- name: remove old db config and files (debian)
+ file:
+ path: '{{ loop_item }}'
+ state: absent
+ ignore_errors: true
+ when: ansible_os_family == "Debian"
+ loop:
+ - /etc/postgresql
+ - /var/lib/postgresql
+ loop_control:
+ loop_var: loop_item
+
+- name: install dependencies for postgresql test
+ package:
+ name: '{{ postgresql_package_item }}'
+ state: present
+ with_items: '{{ postgresql_packages }}'
+ loop_control:
+ loop_var: postgresql_package_item
+
+- name: initialize postgres (FreeBSD)
+ command: /usr/local/etc/rc.d/postgresql oneinitdb
+ when: ansible_os_family == "FreeBSD"
+
+- name: Initialize postgres (RedHat systemd)
+ command: postgresql-setup initdb
+ when: ansible_os_family == "RedHat" and ansible_service_mgr == "systemd"
+
+- name: Initialize postgres (RedHat sysv)
+ command: /sbin/service postgresql initdb
+ when: ansible_os_family == "RedHat" and ansible_service_mgr != "systemd"
+
+- name: Initialize postgres (Debian)
+ shell: . /usr/share/postgresql-common/maintscripts-functions && set_system_locale && /usr/bin/pg_createcluster -u postgres {{ pg_ver }} main
+ args:
+ creates: /etc/postgresql/{{ pg_ver }}/
+ when: ansible_os_family == 'Debian'
+
+- name: Initialize postgres (Suse)
+ service: name=postgresql state=stopped
+ when: ansible_os_family == 'Suse'
+
+- name: Pause between stop and start postgresql
+ pause:
+ seconds: 5
+ when: ansible_os_family == 'Suse'
+
+- name: Initialize postgres (Suse)
+ service: name=postgresql state=started
+ when: ansible_os_family == 'Suse'
+
+- name: Copy pg_hba into place
+ template:
+ src: files/pg_hba.conf
+ dest: '{{ pg_hba_location }}'
+ owner: '{{ pg_user }}'
+ group: '{{ pg_group }}'
+ mode: '0644'
+
+- name: Generate locales (Debian)
+ locale_gen:
+ name: '{{ item }}'
+ state: present
+ with_items:
+ - pt_BR
+ - es_ES
+ when: ansible_os_family == 'Debian'
+
+- block:
+ - name: Install langpacks (RHEL8)
+ yum:
+ name:
+ - glibc-langpack-es
+ - glibc-langpack-pt
+ - glibc-all-langpacks
+ state: present
+ when: ansible_distribution_major_version is version('8', '>=')
+
+ - name: Check if locales need to be generated (RedHat)
+ shell: localedef --list-archive | grep -a -q '^{{ locale }}$'
+ register: locale_present
+ ignore_errors: true
+ with_items:
+ - es_ES
+ - pt_BR
+ loop_control:
+ loop_var: locale
+
+ - name: Reinstall internationalization files
+ shell: yum -y reinstall glibc-common || yum -y install glibc-common
+ args:
+ warn: false
+ when: locale_present is failed
+
+ - name: Generate locale (RedHat)
+ command: localedef -f ISO-8859-1 -i {{ item.locale }} {{ item.locale }}
+ when: item is failed
+ with_items: '{{ locale_present.results }}'
+ when: ansible_os_family == 'RedHat' and ansible_distribution != 'Fedora'
+
+- name: Install glibc langpacks (Fedora >= 24)
+ package:
+ name: '{{ item }}'
+ state: latest
+ with_items:
+ - glibc-langpack-es
+ - glibc-langpack-pt
+ when: ansible_distribution == 'Fedora' and ansible_distribution_major_version is version('24', '>=')
+
+- name: enable postgresql service (FreeBSD)
+ lineinfile:
+ path: /etc/rc.conf
+ line: postgresql_enable="YES"
+ when: ansible_os_family == "FreeBSD"
+
+- name: start postgresql service
+ service: name={{ postgresql_service }} state=started
+
+- name: Pause between start and stop
+ pause:
+ seconds: 5
+
+- name: Kill all postgres processes
+ shell: 'pkill -u {{ pg_user }}'
+ become: yes
+ when: ansible_facts.distribution == 'CentOS' and ansible_facts.distribution_major_version == '8'
+ ignore_errors: yes
+ register: terminate
+
+- name: Stop postgresql service
+ service: name={{ postgresql_service }} state=stopped
+ when: terminate is not succeeded
+
+- name: Pause between stop and start
+ pause:
+ seconds: 5
+
+- name: Start postgresql service
+ service: name={{ postgresql_service }} state=started
+
+- name: copy control file for dummy ext
+ copy:
+ src: dummy.control
+ dest: /usr/share/postgresql/{{ pg_ver }}/extension/dummy.control
+ mode: '0444'
+ when: ansible_os_family == 'Debian'
+
+- name: copy version files for dummy ext
+ copy:
+ src: '{{ item }}'
+ dest: /usr/share/postgresql/{{ pg_ver }}/extension/{{ item }}
+ mode: '0444'
+ with_items:
+ - dummy--1.0.sql
+ - dummy--2.0.sql
+ - dummy--3.0.sql
+ when: ansible_os_family == 'Debian'
+
+- name: add update paths
+ file:
+ path: /usr/share/postgresql/{{ pg_ver }}/extension/{{ item }}
+ mode: '0444'
+ state: touch
+ with_items:
+ - dummy--1.0--2.0.sql
+ - dummy--2.0--3.0.sql
+ when: ansible_os_family == 'Debian'
+
+- name: Get PostgreSQL version
+ become_user: '{{ pg_user }}'
+ become: true
+ shell: echo 'SHOW SERVER_VERSION' | psql --tuples-only --no-align --dbname postgres
+ register: postgres_version_resp
+
+- name: Print PostgreSQL server version
+ debug:
+ msg: '{{ postgres_version_resp.stdout }}'
+
+- import_tasks: ssl.yml
+ when:
+ - ansible_os_family == 'Debian'
+ - postgres_version_resp.stdout is version('9.4', '>=')
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/tasks/ssl.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/tasks/ssl.yml
new file mode 100644
index 00000000..1bc4411d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/tasks/ssl.yml
@@ -0,0 +1,66 @@
+- name: postgresql SSL - create database
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_db:
+ name: '{{ ssl_db }}'
+
+- name: postgresql SSL - create role
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_user:
+ name: '{{ ssl_user }}'
+ role_attr_flags: SUPERUSER
+ password: '{{ ssl_pass }}'
+
+- name: postgresql SSL - install openssl
+ become: true
+ package: name=openssl state=present
+
+- name: postgresql SSL - create certs 1
+ become_user: root
+ become: true
+ shell: openssl req -new -nodes -text -out ~{{ pg_user }}/root.csr \ -keyout ~{{ pg_user }}/root.key -subj "/CN=localhost.local"
+
+- name: postgresql SSL - create certs 2
+ become_user: root
+ become: true
+ shell: openssl x509 -req -in ~{{ pg_user }}/root.csr -text -days 3650 \ -extensions v3_ca -signkey ~{{ pg_user }}/root.key -out ~{{ pg_user }}/root.crt
+
+- name: postgresql SSL - create certs 3
+ become_user: root
+ become: true
+ shell: openssl req -new -nodes -text -out ~{{ pg_user }}/server.csr \ -keyout ~{{ pg_user }}/server.key -subj "/CN=localhost.local"
+
+- name: postgresql SSL - create certs 4
+ become_user: root
+ become: true
+ shell: openssl x509 -req -in ~{{ pg_user }}/server.csr -text -days 365 \ -CA ~{{ pg_user }}/root.crt -CAkey ~{{ pg_user }}/root.key -CAcreateserial -out server.crt
+
+- name: postgresql SSL - set right permissions to files
+ become_user: root
+ become: true
+ file:
+ path: '{{ item }}'
+ mode: '0600'
+ owner: '{{ pg_user }}'
+ group: '{{ pg_user }}'
+ with_items:
+ - ~{{ pg_user }}/root.key
+ - ~{{ pg_user }}/server.key
+ - ~{{ pg_user }}/root.crt
+ - ~{{ pg_user }}/server.csr
+
+- name: postgresql SSL - enable SSL
+ become_user: '{{ pg_user }}'
+ become: true
+ postgresql_set:
+ login_user: '{{ pg_user }}'
+ db: postgres
+ name: ssl
+ value: true
+
+- name: postgresql SSL - reload PostgreSQL to enable ssl on
+ become: true
+ service:
+ name: '{{ postgresql_service }}'
+ state: reloaded
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Debian-8.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Debian-8.yml
new file mode 100644
index 00000000..c5c6795e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Debian-8.yml
@@ -0,0 +1,8 @@
+postgresql_packages:
+ - "postgresql"
+ - "postgresql-common"
+ - "python-psycopg2"
+
+pg_hba_location: "/etc/postgresql/9.4/main/pg_hba.conf"
+pg_dir: "/var/lib/postgresql/9.4/main"
+pg_ver: 9.4
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-11-py3.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-11-py3.yml
new file mode 100644
index 00000000..2f6b0d98
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-11-py3.yml
@@ -0,0 +1,12 @@
+postgresql_packages:
+ - postgresql95-server
+ - py36-psycopg2
+
+pg_dir: /usr/local/pgsql/data
+pg_hba_location: "{{ pg_dir }}/pg_hba.conf"
+pg_ver: 9.5
+pg_user: pgsql
+pg_group: pgsql
+
+locale_latin_suffix: .ISO8859-1
+locale_utf8_suffix: .UTF-8
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-11.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-11.yml
new file mode 100644
index 00000000..efb0603b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-11.yml
@@ -0,0 +1,12 @@
+postgresql_packages:
+ - postgresql95-server
+ - py27-psycopg2
+
+pg_dir: /usr/local/pgsql/data
+pg_hba_location: "{{ pg_dir }}/pg_hba.conf"
+pg_ver: 9.5
+pg_user: pgsql
+pg_group: pgsql
+
+locale_latin_suffix: .ISO8859-1
+locale_utf8_suffix: .UTF-8
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.0-py3.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.0-py3.yml
new file mode 100644
index 00000000..2f6b0d98
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.0-py3.yml
@@ -0,0 +1,12 @@
+postgresql_packages:
+ - postgresql95-server
+ - py36-psycopg2
+
+pg_dir: /usr/local/pgsql/data
+pg_hba_location: "{{ pg_dir }}/pg_hba.conf"
+pg_ver: 9.5
+pg_user: pgsql
+pg_group: pgsql
+
+locale_latin_suffix: .ISO8859-1
+locale_utf8_suffix: .UTF-8
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.0.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.0.yml
new file mode 100644
index 00000000..1fe66782
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.0.yml
@@ -0,0 +1,12 @@
+postgresql_packages:
+ - postgresql96-server
+ - py27-psycopg2
+
+pg_dir: /usr/local/pgsql/data
+pg_hba_location: "{{ pg_dir }}/pg_hba.conf"
+pg_ver: 9.6
+pg_user: pgsql
+pg_group: pgsql
+
+locale_latin_suffix: .ISO8859-1
+locale_utf8_suffix: .UTF-8
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.1-py3.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.1-py3.yml
new file mode 100644
index 00000000..cd7c83a4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.1-py3.yml
@@ -0,0 +1,12 @@
+postgresql_packages:
+ - postgresql11-server
+ - py36-psycopg2
+
+pg_dir: /var/db/postgres/data11
+pg_hba_location: "{{ pg_dir }}/pg_hba.conf"
+pg_ver: 11
+pg_user: postgres
+pg_group: postgres
+
+locale_latin_suffix: .ISO8859-1
+locale_utf8_suffix: .UTF-8
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.1.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.1.yml
new file mode 100644
index 00000000..0b1ab5b2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.1.yml
@@ -0,0 +1,12 @@
+postgresql_packages:
+ - postgresql11-server
+ - py27-psycopg2
+
+pg_dir: /var/db/postgres/data11
+pg_hba_location: "{{ pg_dir }}/pg_hba.conf"
+pg_ver: 11
+pg_user: postgres
+pg_group: postgres
+
+locale_latin_suffix: .ISO8859-1
+locale_utf8_suffix: .UTF-8
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/RedHat-py3.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/RedHat-py3.yml
new file mode 100644
index 00000000..ee083722
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/RedHat-py3.yml
@@ -0,0 +1,8 @@
+postgresql_packages:
+ - "postgresql-server"
+ - "python3-psycopg2"
+ - "bzip2"
+ - "xz"
+
+pg_hba_location: "/var/lib/pgsql/data/pg_hba.conf"
+pg_dir: "/var/lib/pgsql/data"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/RedHat.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/RedHat.yml
new file mode 100644
index 00000000..20c4b1f5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/RedHat.yml
@@ -0,0 +1,7 @@
+postgresql_packages:
+ - "postgresql-server"
+ - "python-psycopg2"
+ - "bzip2"
+
+pg_hba_location: "/var/lib/pgsql/data/pg_hba.conf"
+pg_dir: "/var/lib/pgsql/data"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-12.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-12.yml
new file mode 100644
index 00000000..4b6e744b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-12.yml
@@ -0,0 +1,8 @@
+postgresql_packages:
+ - "postgresql"
+ - "postgresql-common"
+ - "python-psycopg2"
+
+pg_hba_location: "/etc/postgresql/9.1/main/pg_hba.conf"
+pg_dir: "/var/lib/postgresql/9.1/main"
+pg_ver: 9.1
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-14.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-14.yml
new file mode 100644
index 00000000..ffcc8dd4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-14.yml
@@ -0,0 +1,8 @@
+postgresql_packages:
+ - "postgresql"
+ - "postgresql-common"
+ - "python-psycopg2"
+
+pg_hba_location: "/etc/postgresql/9.3/main/pg_hba.conf"
+pg_dir: "/var/lib/postgresql/9.3/main"
+pg_ver: 9.3
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-16-py3.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-16-py3.yml
new file mode 100644
index 00000000..b088c310
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-16-py3.yml
@@ -0,0 +1,8 @@
+postgresql_packages:
+ - "postgresql"
+ - "postgresql-common"
+ - "python3-psycopg2"
+
+pg_hba_location: "/etc/postgresql/9.5/main/pg_hba.conf"
+pg_dir: "/var/lib/postgresql/9.5/main"
+pg_ver: 9.5
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-16.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-16.yml
new file mode 100644
index 00000000..897efd2c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-16.yml
@@ -0,0 +1,8 @@
+postgresql_packages:
+ - "postgresql"
+ - "postgresql-common"
+ - "python-psycopg2"
+
+pg_hba_location: "/etc/postgresql/9.5/main/pg_hba.conf"
+pg_dir: "/var/lib/postgresql/9.5/main"
+pg_ver: 9.5
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-18-py3.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-18-py3.yml
new file mode 100644
index 00000000..10453bdf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-18-py3.yml
@@ -0,0 +1,8 @@
+postgresql_packages:
+ - "postgresql"
+ - "postgresql-common"
+ - "python3-psycopg2"
+
+pg_hba_location: "/etc/postgresql/10/main/pg_hba.conf"
+pg_dir: "/var/lib/postgresql/10/main"
+pg_ver: 10
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/default-py3.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/default-py3.yml
new file mode 100644
index 00000000..19152a64
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/default-py3.yml
@@ -0,0 +1,6 @@
+postgresql_packages:
+ - "postgresql-server"
+ - "python3-psycopg2"
+
+pg_hba_location: "/var/lib/pgsql/data/pg_hba.conf"
+pg_dir: "/var/lib/pgsql/data"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/default.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/default.yml
new file mode 100644
index 00000000..ab36dd9f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/default.yml
@@ -0,0 +1,6 @@
+postgresql_packages:
+ - "postgresql-server"
+ - "python-psycopg2"
+
+pg_hba_location: "/var/lib/pgsql/data/pg_hba.conf"
+pg_dir: "/var/lib/pgsql/data"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_replication/defaults/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_replication/defaults/main.yml
new file mode 100644
index 00000000..1b1d8b41
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_replication/defaults/main.yml
@@ -0,0 +1,30 @@
+# General:
+pg_user: postgres
+db_default: postgres
+
+pg_package_list:
+- apt-utils
+- postgresql
+- postgresql-contrib
+- python3-psycopg2
+
+packages_to_remove:
+- postgresql
+- postgresql-contrib
+- postgresql-server
+- postgresql-libs
+- python3-psycopg2
+
+# Master specific defaults:
+master_root_dir: '/var/lib/pgsql/master'
+master_data_dir: '{{ master_root_dir }}/data'
+master_postgresql_conf: '{{ master_data_dir }}/postgresql.conf'
+master_pg_hba_conf: '{{ master_data_dir }}/pg_hba.conf'
+master_port: 5433
+
+# Replica specific defaults:
+replica_root_dir: '/var/lib/pgsql/replica'
+replica_data_dir: '{{ replica_root_dir }}/data'
+replica_postgresql_conf: '{{ replica_data_dir }}/postgresql.conf'
+replica_pg_hba_conf: '{{ replica_data_dir }}/pg_hba.conf'
+replica_port: 5434
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_replication/handlers/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_replication/handlers/main.yml
new file mode 100644
index 00000000..7f4dc5cc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_replication/handlers/main.yml
@@ -0,0 +1,23 @@
+- name: Stop services
+ become: yes
+ become_user: '{{ pg_user }}'
+ shell: '{{ pg_ctl }} -D {{ item.datadir }} -o "-p {{ item.port }}" -m immediate stop'
+ loop:
+ - { datadir: '{{ master_data_dir }}', port: '{{ master_port }}' }
+ - { datadir: '{{ replica_data_dir }}', port: '{{ replica_port }}' }
+ listen: stop postgresql
+
+- name: Remove packages
+ apt:
+ name: '{{ packages_to_remove }}'
+ state: absent
+ listen: cleanup postgresql
+
+- name: Remove FS objects
+ file:
+ state: absent
+ path: "{{ item }}"
+ loop:
+ - "{{ master_root_dir }}"
+ - "{{ replica_root_dir }}"
+ listen: cleanup postgresql
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_replication/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_replication/tasks/main.yml
new file mode 100644
index 00000000..21f6ffa9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_replication/tasks/main.yml
@@ -0,0 +1,13 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Setup PostgreSQL master-standby replication into one container:
+- import_tasks: setup_postgresql_cluster.yml
+ when:
+ - ansible_distribution == 'Ubuntu'
+ - ansible_distribution_major_version >= '18'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_replication/tasks/setup_postgresql_cluster.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_replication/tasks/setup_postgresql_cluster.yml
new file mode 100644
index 00000000..9ef657c7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_replication/tasks/setup_postgresql_cluster.yml
@@ -0,0 +1,84 @@
+- name: Install packages
+ apt:
+ name: '{{ pg_package_list }}'
+ notify: cleanup postgresql
+- name: Create root dirs
+ file:
+ state: directory
+ path: '{{ item }}'
+ owner: postgres
+ group: postgres
+ mode: '0700'
+ loop:
+ - '{{ master_root_dir }}'
+ - '{{ master_data_dir }}'
+ - '{{ replica_root_dir }}'
+ - '{{ replica_data_dir }}'
+ notify: cleanup postgresql
+- name: Find initdb
+ shell: find /usr/lib -type f -name "initdb"
+ register: result
+- name: Set path to initdb
+ set_fact:
+ initdb: '{{ result.stdout }}'
+- name: Initialize databases
+ become: true
+ become_user: '{{ pg_user }}'
+ shell: '{{ initdb }} --pgdata {{ item }}'
+ loop:
+ - '{{ master_data_dir }}'
+ - '{{ replica_data_dir }}'
+- name: Copy config templates
+ template:
+ src: '{{ item.conf_templ }}'
+ dest: '{{ item.conf_dest }}'
+ owner: postgres
+ group: postgres
+ force: true
+ loop:
+ - conf_templ: master_postgresql.conf.j2
+ conf_dest: '{{ master_postgresql_conf }}'
+ - conf_templ: replica_postgresql.conf.j2
+ conf_dest: '{{ replica_postgresql_conf }}'
+ - conf_templ: pg_hba.conf.j2
+ conf_dest: '{{ master_pg_hba_conf }}'
+ - conf_templ: pg_hba.conf.j2
+ conf_dest: '{{ replica_pg_hba_conf }}'
+- name: Find pg_ctl
+ shell: find /usr/lib -type f -name "pg_ctl"
+ register: result
+- name: Set path to initdb
+ set_fact:
+ pg_ctl: '{{ result.stdout }}'
+- name: Start servers
+ become: true
+ become_user: '{{ pg_user }}'
+ shell: '{{ pg_ctl }} -D {{ item.datadir }} -o "-p {{ item.port }}" start'
+ loop:
+ - datadir: '{{ master_data_dir }}'
+ port: '{{ master_port }}'
+ - datadir: '{{ replica_data_dir }}'
+ port: '{{ replica_port }}'
+ notify: stop postgresql
+- name: Check connectivity to the master and get PostgreSQL version
+ become: true
+ become_user: '{{ pg_user }}'
+ postgresql_ping:
+ db: '{{ db_default }}'
+ login_user: '{{ pg_user }}'
+ login_port: '{{ master_port }}'
+ register: result
+- name: Check connectivity to the replica and get PostgreSQL version
+ become: true
+ become_user: '{{ pg_user }}'
+ postgresql_ping:
+ db: '{{ db_default }}'
+ login_user: '{{ pg_user }}'
+ login_port: '{{ replica_port }}'
+- name: Define server version
+ set_fact:
+ pg_major_version: '{{ result.server_version.major }}'
+ pg_minor_version: '{{ result.server_version.minor }}'
+- name: Print PostgreSQL version
+ debug:
+ msg: PostgreSQL version is {{ pg_major_version }}.{{ pg_minor_version }}
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_replication/templates/master_postgresql.conf.j2 b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_replication/templates/master_postgresql.conf.j2
new file mode 100644
index 00000000..744243ff
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_replication/templates/master_postgresql.conf.j2
@@ -0,0 +1,28 @@
+# Important parameters:
+listen_addresses='*'
+port = {{ master_port }}
+wal_level = logical
+max_wal_senders = 8
+track_commit_timestamp = on
+max_replication_slots = 10
+
+# Unimportant parameters:
+max_connections=10
+shared_buffers=8MB
+dynamic_shared_memory_type=posix
+log_destination='stderr'
+logging_collector=on
+log_directory='log'
+log_filename='postgresql-%a.log'
+log_truncate_on_rotation=on
+log_rotation_age=1d
+log_rotation_size=0
+log_line_prefix='%m[%p]'
+log_timezone='W-SU'
+datestyle='iso,mdy'
+timezone='W-SU'
+lc_messages='en_US.UTF-8'
+lc_monetary='en_US.UTF-8'
+lc_numeric='en_US.UTF-8'
+lc_time='en_US.UTF-8'
+default_text_search_config='pg_catalog.english'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_replication/templates/pg_hba.conf.j2 b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_replication/templates/pg_hba.conf.j2
new file mode 100644
index 00000000..62e05ffc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_replication/templates/pg_hba.conf.j2
@@ -0,0 +1,7 @@
+local all all trust
+local replication logical_replication trust
+host replication logical_replication 127.0.0.1/32 trust
+host replication logical_replication 0.0.0.0/0 trust
+local all logical_replication trust
+host all logical_replication 127.0.0.1/32 trust
+host all logical_replication 0.0.0.0/0 trust
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_replication/templates/replica_postgresql.conf.j2 b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_replication/templates/replica_postgresql.conf.j2
new file mode 100644
index 00000000..206ab2eb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_postgresql_replication/templates/replica_postgresql.conf.j2
@@ -0,0 +1,28 @@
+# Important parameters:
+listen_addresses='*'
+port = {{ replica_port }}
+wal_level = logical
+max_wal_senders = 8
+track_commit_timestamp = on
+max_replication_slots = 10
+
+# Unimportant parameters:
+max_connections=10
+shared_buffers=8MB
+dynamic_shared_memory_type=posix
+log_destination='stderr'
+logging_collector=on
+log_directory='log'
+log_filename='postgresql-%a.log'
+log_truncate_on_rotation=on
+log_rotation_age=1d
+log_rotation_size=0
+log_line_prefix='%m[%p]'
+log_timezone='W-SU'
+datestyle='iso,mdy'
+timezone='W-SU'
+lc_messages='en_US.UTF-8'
+lc_monetary='en_US.UTF-8'
+lc_numeric='en_US.UTF-8'
+lc_time='en_US.UTF-8'
+default_text_search_config='pg_catalog.english'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_redis_replication/defaults/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_redis_replication/defaults/main.yml
new file mode 100644
index 00000000..bdbbbb2c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_redis_replication/defaults/main.yml
@@ -0,0 +1,35 @@
+# General
+redis_packages:
+ Ubuntu:
+ - redis-server
+ openSUSE Leap:
+ - redis
+ Fedora:
+ - redis
+ CentOS:
+ - redis
+ FreeBSD:
+ - redis
+
+redis_bin:
+ Ubuntu: /usr/bin/redis-server
+ openSUSE Leap: /usr/sbin/redis-server
+ Fedora: /usr/bin/redis-server
+ CentOS: /usr/bin/redis-server
+ FreeBSD: /usr/local/bin/redis-server
+
+redis_module: "{{ (ansible_python_version is version('2.7', '>=')) | ternary('redis', 'redis==2.10.6') }}"
+
+redis_password: PASS
+
+# Master
+master_port: 6379
+master_conf: /etc/redis-master.conf
+master_datadir: /var/lib/redis-master
+master_logdir: /var/log/redis-master
+
+# Slave
+slave_port: 6380
+slave_conf: /etc/redis-slave.conf
+slave_datadir: /var/lib/redis-slave
+slave_logdir: /var/log/redis-slave
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_redis_replication/handlers/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_redis_replication/handlers/main.yml
new file mode 100644
index 00000000..d4d535cd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_redis_replication/handlers/main.yml
@@ -0,0 +1,34 @@
+- name: stop redis services
+ shell: |
+ kill -TERM $(cat /var/run/redis_{{ master_port }}.pid)
+ kill -TERM $(cat /var/run/redis_{{ slave_port }}.pid)
+ listen: cleanup redis
+
+- name: remove redis packages
+ action: "{{ ansible_facts.pkg_mgr }}"
+ args:
+ name: "{{ item }}"
+ state: absent
+ loop: "{{ redis_packages[ansible_distribution] }}"
+ listen: cleanup redis
+
+- name: remove pip packages
+ pip:
+ name: redis
+ state: absent
+ listen: cleanup redis
+
+- name: remove redis data
+ file:
+ path: "{{ item }}"
+ state: absent
+ loop:
+ - "{{ master_conf }}"
+ - "{{ master_datadir }}"
+ - "{{ master_logdir }}"
+ - /var/run/redis_{{ master_port }}.pid
+ - "{{ slave_conf }}"
+ - "{{ slave_datadir }}"
+ - "{{ slave_logdir }}"
+ - /var/run/redis_{{ slave_port }}.pid
+ listen: cleanup redis
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_redis_replication/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_redis_replication/meta/main.yml
new file mode 100644
index 00000000..6f094434
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_redis_replication/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+- setup_pkg_mgr
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_redis_replication/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_redis_replication/tasks/main.yml
new file mode 100644
index 00000000..49e81c61
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_redis_replication/tasks/main.yml
@@ -0,0 +1,11 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright: (c) 2020, Pavlo Bashynskyi (@levonet) <levonet@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+- import_tasks: setup_redis_cluster.yml
+ when:
+ - ansible_distribution in ['CentOS', 'Fedora', 'FreeBSD', 'openSUSE Leap', 'Ubuntu']
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_redis_replication/tasks/setup_redis_cluster.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_redis_replication/tasks/setup_redis_cluster.yml
new file mode 100644
index 00000000..2445ba24
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_redis_replication/tasks/setup_redis_cluster.yml
@@ -0,0 +1,105 @@
+# We run two servers listening different ports
+# to be able to check replication (one server for master, another for slave).
+
+- name: Install redis server apt dependencies
+ apt:
+ name: "{{ redis_packages[ansible_distribution] }}"
+ state: latest
+ policy_rc_d: 101
+ when:
+ - ansible_facts.pkg_mgr == 'apt'
+ notify: cleanup redis
+
+- name: Install redis server rpm dependencies
+ yum:
+ name: "{{ redis_packages[ansible_distribution] }}"
+ state: latest
+ when:
+ - ansible_facts.pkg_mgr == 'yum'
+ notify: cleanup redis
+
+- name: Install redis rpm dependencies
+ dnf:
+ name: "{{ redis_packages[ansible_distribution] }}"
+ state: latest
+ when: ansible_facts.pkg_mgr == 'dnf'
+ notify: cleanup redis
+
+- name: Install redis server zypper dependencies
+ zypper:
+ name: "{{ redis_packages[ansible_distribution] }}"
+ state: latest
+ when:
+ - ansible_facts.pkg_mgr == 'community.general.zypper'
+ notify: cleanup redis
+
+- name: Install redis FreeBSD dependencies
+ community.general.pkgng:
+ name: "{{ redis_packages[ansible_distribution] }}"
+ state: latest
+ when:
+ - ansible_facts.pkg_mgr == 'community.general.pkgng'
+ notify: cleanup redis
+
+- name: Install redis module
+ pip:
+ name: "{{ redis_module }}"
+ state: present
+ notify: cleanup redis
+
+- name: Create redis directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ owner: redis
+ group: redis
+ loop:
+ - "{{ master_datadir }}"
+ - "{{ master_logdir }}"
+ - "{{ slave_datadir }}"
+ - "{{ slave_logdir }}"
+
+- name: Create redis configs
+ copy:
+ dest: "{{ item.file }}"
+ content: |
+ daemonize yes
+ port {{ item.port }}
+ pidfile /var/run/redis_{{ item.port }}.pid
+ logfile {{ item.logdir }}/redis.log
+ dir {{ item.datadir }}
+ requirepass {{ redis_password }}
+ masterauth {{ redis_password }}
+ loop:
+ - file: "{{ master_conf }}"
+ port: "{{ master_port }}"
+ logdir: "{{ master_logdir }}"
+ datadir: "{{ master_datadir }}"
+ - file: "{{ slave_conf }}"
+ port: "{{ slave_port }}"
+ logdir: "{{ slave_logdir }}"
+ datadir: "{{ slave_datadir }}"
+
+- name: Start redis master
+ shell: "{{ redis_bin[ansible_distribution] }} {{ master_conf }}"
+
+- name: Start redis slave
+ shell: "{{ redis_bin[ansible_distribution] }} {{ slave_conf }} --slaveof 127.0.0.1 {{ master_port }}"
+
+- name: Wait for redis master to be started
+ ansible.builtin.wait_for:
+ host: 127.0.0.1
+ port: "{{ master_port }}"
+ state: started
+ delay: 1
+ connect_timeout: 5
+ timeout: 30
+
+- name: Wait for redis slave to be started
+ ansible.builtin.wait_for:
+ host: 127.0.0.1
+ port: "{{ slave_port }}"
+ state: started
+ delay: 1
+ connect_timeout: 5
+ timeout: 30
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_remote_constraints/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_remote_constraints/aliases
new file mode 100644
index 00000000..1ad133ba
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_remote_constraints/aliases
@@ -0,0 +1 @@
+needs/file/tests/utils/constraints.txt
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_remote_constraints/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_remote_constraints/meta/main.yml
new file mode 100644
index 00000000..1810d4be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_remote_constraints/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_remote_tmp_dir
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_remote_constraints/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_remote_constraints/tasks/main.yml
new file mode 100644
index 00000000..d4f8148c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_remote_constraints/tasks/main.yml
@@ -0,0 +1,13 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: record constraints.txt path on remote host
+ set_fact:
+ remote_constraints: "{{ remote_tmp_dir }}/constraints.txt"
+
+- name: copy constraints.txt to remote host
+ copy:
+ src: "{{ role_path }}/../../../utils/constraints.txt"
+ dest: "{{ remote_constraints }}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir/handlers/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir/handlers/main.yml
new file mode 100644
index 00000000..229037c8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir/handlers/main.yml
@@ -0,0 +1,5 @@
+- name: delete temporary directory
+ include_tasks: default-cleanup.yml
+
+- name: delete temporary directory (windows)
+ include_tasks: windows-cleanup.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml
new file mode 100644
index 00000000..39872d74
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml
@@ -0,0 +1,5 @@
+- name: delete temporary directory
+ file:
+ path: "{{ remote_tmp_dir }}"
+ state: absent
+ no_log: yes
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir/tasks/default.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir/tasks/default.yml
new file mode 100644
index 00000000..1e0f51b8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir/tasks/default.yml
@@ -0,0 +1,11 @@
+- name: create temporary directory
+ tempfile:
+ state: directory
+ suffix: .test
+ register: remote_tmp_dir
+ notify:
+ - delete temporary directory
+
+- name: record temporary directory
+ set_fact:
+ remote_tmp_dir: "{{ remote_tmp_dir.path }}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir/tasks/main.yml
new file mode 100644
index 00000000..93d786f0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir/tasks/main.yml
@@ -0,0 +1,15 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: make sure we have the ansible_os_family and ansible_distribution_version facts
+ setup:
+ gather_subset: distribution
+ when: ansible_facts == {}
+
+- include_tasks: "{{ lookup('first_found', files)}}"
+ vars:
+ files:
+ - "{{ ansible_os_family | lower }}.yml"
+ - "default.yml"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_tls/files/ca_certificate.pem b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_tls/files/ca_certificate.pem
new file mode 100644
index 00000000..a438d926
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_tls/files/ca_certificate.pem
@@ -0,0 +1,19 @@
+-----BEGIN CERTIFICATE-----
+MIIDAjCCAeqgAwIBAgIJANguFROhaWocMA0GCSqGSIb3DQEBCwUAMDExIDAeBgNV
+BAMMF1RMU0dlblNlbGZTaWduZWR0Um9vdENBMQ0wCwYDVQQHDAQkJCQkMB4XDTE5
+MDExMTA4MzMxNVoXDTI5MDEwODA4MzMxNVowMTEgMB4GA1UEAwwXVExTR2VuU2Vs
+ZlNpZ25lZHRSb290Q0ExDTALBgNVBAcMBCQkJCQwggEiMA0GCSqGSIb3DQEBAQUA
+A4IBDwAwggEKAoIBAQDqVt84czSxWnWW4Ng6hmKE3NarbLsycwtjrYBokV7Kk7Mp
+7PrBbYF05FOgSdJLvL6grlRSQK2VPsXdLfEv5uFXX6gyd2WQwKCiGGf4UY4ZIl4l
+JVpSDsBV2orR4pOIf1s1+iSwvcRQkX46SVjoKWbDUc4VLo1uy8UvavQI+DMioYyy
+0K2MbRs7oG2rdKks8zisfT0ymKnrFTdVeUjIrg0sStaMnf9VVkcEeYkfNY0vWqdn
+CV5wPfDBlnnxGMgqGdLSpzfyJ7qafFET+q+gOvjsEqzn7DvlPkmk86hIIWXKi3aM
+A9swknL3rnagJL6GioWRpYUwKdRKmZxdyr4I2JTTAgMBAAGjHTAbMAwGA1UdEwQF
+MAMBAf8wCwYDVR0PBAQDAgEGMA0GCSqGSIb3DQEBCwUAA4IBAQACTpPBf5WSwZ7r
+hrbPUN3qVh70HI0ZNK2jlK6b5fpSdw3JI/GQl0Kw3eGICLzwTByWvhD62U7IigL5
+0UWxWuEod310Y/qo/7OxRVPp5PH/0oNGoKHhEzas2ii0heQYGsHQUKGzYNNyVfjy
+nqBFz5AcKf067LcXivYqod6JDQHqFq/5/hWlIsHHrZIeijqqtthPq39GlGAYO+AB
+U66nzlH7YQgmfYfy6l7O4LsjXf/bz9rWvueO3NqCsmXV+FacDkOkwWA5Kf6rcgNL
+3G+2HAVTRIXDnO4ShnK6aYMW+UklpYRlVYBBUOdwoNIp5gI+BlSc1IuF6PdLVt3q
+VdjN1MjY
+-----END CERTIFICATE-----
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_tls/files/ca_key.pem b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_tls/files/ca_key.pem
new file mode 100644
index 00000000..0a950eda
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_tls/files/ca_key.pem
@@ -0,0 +1,28 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDqVt84czSxWnWW
+4Ng6hmKE3NarbLsycwtjrYBokV7Kk7Mp7PrBbYF05FOgSdJLvL6grlRSQK2VPsXd
+LfEv5uFXX6gyd2WQwKCiGGf4UY4ZIl4lJVpSDsBV2orR4pOIf1s1+iSwvcRQkX46
+SVjoKWbDUc4VLo1uy8UvavQI+DMioYyy0K2MbRs7oG2rdKks8zisfT0ymKnrFTdV
+eUjIrg0sStaMnf9VVkcEeYkfNY0vWqdnCV5wPfDBlnnxGMgqGdLSpzfyJ7qafFET
++q+gOvjsEqzn7DvlPkmk86hIIWXKi3aMA9swknL3rnagJL6GioWRpYUwKdRKmZxd
+yr4I2JTTAgMBAAECggEBALpg9ZDUMCiOpc+mbNO/ZkP90M7u38Q0M+7HY8XHOPkt
+l+XUkWueSMRLhSeLDzMlnwf1HyN8RZLaJkzP6XAL1VXEwuXAiIskaZ4Cg07Arp/W
+8cHhf4CcMuUVuCtOZcC+ajD4Do5zn9vkm9yH0ap0o0LdoWa/a8WfU+luy0EHBsSW
+6qqI+nqNFmISluVbfWt7t3zp273+8sir6YeHQu9G91/jzggv8rHmu4EHhi3cnU0K
+vY6OPCGBL7nrg9Rv1LSFpH95TvlIM6/Cm0AjgW7m6XwWUTaI9p+GvKzrYUSLd9L/
+QxlmAwiu/sBTXLrsWyr8XEtj+lVGxQ6eFbf6E+lUm8ECgYEA+8Wgmhf3VsC3gvJz
+w2jApEoOioD5iGOWGClGVURkfaBhFELr4XCTVMdBuCtxT7LYTMHTAlBqIbdWDjB4
+m/E417hLGogSDy7j0R0Mx75OOGEitxYUhe0VGDNoytgCNd2UnTMt42lp+9vAHZag
+INhVDOnxRNdtNTf1yYkWUMEbh1sCgYEA7kZNJXPVYJtR78+km/Gcv64Umci7KUV+
+hYc7chR5xv3cXvXg5eojKa4G7CyMQTX7VnRa6CiQKdN73AbIAhS4Oy5UlCOKtmb8
+xnBiOAYwSpOfIeZhjq0RvEeZX0t6u7XsErBZ03rEPKXF2nNDo1x8byrlKPtlUzwJ
+gb5yjmK/mekCgYEA1TWQAs5m4+2Bun+tbv7nnHkmhT4hktGays0xRYYMf6Jwc6MU
+dC5MZg/zZI5Nf8uZhq7hDWWh6vmCA7QifxSxKWVlHIu8l2UDAhRSvVg4j2Aa8Obe
+7GdQZNUsWhLBFHKXpuQvaRTc7q8yqxvicM4igDQg4EZ6sgW4vDm+TxapRF8CgYAz
+n6mhPqpxRtWGxo8cdkmGwfmWpAXg2DykQ3teqQ8FTQUM0erLBWJe6mR3kONGUaLF
+xWnYuMkbNsW0EwgMY17S+6O5gMXR5RhJChpNlxGpZrhoiNiEJ/0atMyG9/x8ZNrj
+5a9ggU248hWe0bBK2YPgNgP2UBlQ4kYRBSkerkhi2QKBgF+tlpyqcU+0iY82qRS2
+wMf7oI2pWR8nX9LPAY/nnvwWvqwcAFJPMlSMTu8Ext6h7l9yu+7JGL6JWwsO57Lb
+Gm/RxbuZ/kG/13+lSNmZiyHrhj6hZhkAMeFM34fpT4+DBXqSxZuvdrmwBc5B2jYg
+F9Bv8gcmZlGhqONL23evr9Gu
+-----END PRIVATE KEY-----
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_tls/files/client_certificate.pem b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_tls/files/client_certificate.pem
new file mode 100644
index 00000000..501d8389
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_tls/files/client_certificate.pem
@@ -0,0 +1,20 @@
+-----BEGIN CERTIFICATE-----
+MIIDRjCCAi6gAwIBAgIBAjANBgkqhkiG9w0BAQsFADAxMSAwHgYDVQQDDBdUTFNH
+ZW5TZWxmU2lnbmVkdFJvb3RDQTENMAsGA1UEBwwEJCQkJDAeFw0xOTAxMTEwODMz
+MThaFw0yOTAxMDgwODMzMThaMC0xGjAYBgNVBAMMEWFuc2libGUudGxzLnRlc3Rz
+MQ8wDQYDVQQKDAZjbGllbnQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB
+AQCoM+OQ3HCnCUAAz9KGGTwWB9hQbUfAZXm/stlb2/uOAp3rNwxAlCs/giymBHE6
+Iu6mrK006Vn+Z9ibqIrD2LuCOxcu25y8goqG62TgdP5sa9wR+597s0XssnwnaY8y
+bJ3p2zWAJvMgqQ0iNW/ZynpWbO85K5SryUykF7FAeNU9ogGGlIwCPjHhPvnwjkqd
+yDqaA1VaJKDUWIF9joI7sV4VLgGhQvzXRrHULsTeIF2m0+ebL0PTNEWHQ0dtgLYX
+kW7YO4Y6+n3cjHNH4qTof8V30EK8pk8kTdJ/x6ubwf+klFCAyroOxNOaxUy299Oo
+yD6qIPJPnGkPhrKtWnWIhNzJAgMBAAGjbTBrMAkGA1UdEwQCMAAwCwYDVR0PBAQD
+AgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMCMDwGA1UdEQQ1MDOCEWFuc2libGUudGxz
+LnRlc3RzghNNYWNCb29rLVByby00LmxvY2Fsgglsb2NhbGhvc3QwDQYJKoZIhvcN
+AQELBQADggEBAK214+VVXnGnsUlvd9Q6A2Ea6UGrr6b7xkmlnIaNd+6xoUsDsHob
+srHYm7UC0uLi1KwSunI7AU5ZELVEUfAmJzh3O4d6C5sQyqKYPqd5harWOQ3BOD0I
+plHpp7qMtsPDuJBtmE/bmvF85eto0H7pPz+cTTXRlOaVVeiHjMggFcXdy1MzGo9C
+X/4wLQmsFeypTfe+ZGqvDh99VV+ffNMIsMh+opWEloaKiHmDKB6S9aC/MsVVM4RR
+nHm/UKTOukaGE9QIPkSSaygv3sBkVnQ2SHMvvtnjPHVHlizNoq6+YTnuOvKpo4o5
+V7Bij+W7rkBQLsEfwv2IC+gzmRz2yxr2tXk=
+-----END CERTIFICATE-----
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_tls/files/client_key.pem b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_tls/files/client_key.pem
new file mode 100644
index 00000000..850260a8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_tls/files/client_key.pem
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEAqDPjkNxwpwlAAM/Shhk8FgfYUG1HwGV5v7LZW9v7jgKd6zcM
+QJQrP4IspgRxOiLupqytNOlZ/mfYm6iKw9i7gjsXLtucvIKKhutk4HT+bGvcEfuf
+e7NF7LJ8J2mPMmyd6ds1gCbzIKkNIjVv2cp6VmzvOSuUq8lMpBexQHjVPaIBhpSM
+Aj4x4T758I5Kncg6mgNVWiSg1FiBfY6CO7FeFS4BoUL810ax1C7E3iBdptPnmy9D
+0zRFh0NHbYC2F5Fu2DuGOvp93IxzR+Kk6H/Fd9BCvKZPJE3Sf8erm8H/pJRQgMq6
+DsTTmsVMtvfTqMg+qiDyT5xpD4ayrVp1iITcyQIDAQABAoIBAHPszzpXs4xr46Cr
+mvyxB6hnX76OkpUXWwGz0fptcsI9K3mhRuB7PhNXNE53YVIgITreZ8G/0jZ0e+VM
+E9dG2HS5JRE2ap/BmJfERJIuD+vJqrL6KMCondi0arz/E6I9GdjDK+xW69nmqRaa
+nawM0KQgD//m+WAsLJYrfg5hORZwI2SHaahawnCp0QaMmz3bdDWKRacM3q0UFX46
+Ze6CaZkUn+e1rHsTMcZBvxQWIVzysFNXh150idIB/PxL5YfCQqTSAj1c/nxaxz6a
+BvHFlpaYR3tvXXlexxfjglCwsGyckbvTyP1cBZqpv5oES+VKt2PrOve9Zyax+CYT
+0uQf6cECgYEA09+46QHXLfWh6jiJYu9skC9UrLU5czfCNB6PrUtFcjPFMYjZDcw9
+inJmcuTPXmfplxc47YDfpwotU+szTJDF+R8kknnfw9zVr/sIwZ5wsFfUQl/56Svn
+AIOVvHHvcvMX95XKGiuTsoCIJZNjJN3l3ztu/bRciuiVLyizglwIVrMCgYEAyzvK
+PFlWilbp3GPJlnW7x1bUxe1ziLE/Um+ujZx96+fy34hJLFdNdNzpNUjoOf3IDTGq
+6xl+vXcf12gimWMFcD3qNIGKHBDM9cIB2RDbb6YcqI8lOqopsmOyGmVLPkRpCoUK
+72kacQwvw6M9xjmpiG3dN8lE881jDmZi+hyCnJMCgYEAoIQnQAhP8Jbeo2dP1q+T
+bS0elnX532uH6xqYOW8EXwAPznZiEw0ANspzCWqGHHzXQMusKmtvhcq1CpXvWHt6
+MUHB4GMK/wVosxmZya5yq3bu7ZZu7JOBQCdwosMi6NB5AO7vnaIUFLFB9E3UWBLw
+243YicdCMU8B7yeD0ChPfPcCgYA1dYHKBBn+g8Q6Y8lIGaoOUmnfsok8gJtOfPAm
+ce6xmi7J29iboE9QmTeC+62Sa44u4ky6UNeE0QwAJnVLcb+hebfcneKNZWH0l1bT
+GVsPcFuDfzvkxZP4R782sERtmaMj0EFDHpuE9xatWIhMVyigKX4SSZAorXML+6S3
+c75rnwKBgBR+WU934wS+DbwTLlUB2mJWqJMEbOH/CUwPC7+VN4h1h3/i455iAeiU
+BizLS0SlD+MoSbC7URcZuquqGkmMlnJXoxF+NdxoWZK78tYNftryWoR87TloiVc/
+LhkxZxje4tgW/mTLqH3zKDoyyzDzG6Q6tAUN2ZTjJFEws7qF30Qe
+-----END RSA PRIVATE KEY-----
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_tls/files/server_certificate.pem b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_tls/files/server_certificate.pem
new file mode 100644
index 00000000..4a0ebc6e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_tls/files/server_certificate.pem
@@ -0,0 +1,20 @@
+-----BEGIN CERTIFICATE-----
+MIIDRjCCAi6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAxMSAwHgYDVQQDDBdUTFNH
+ZW5TZWxmU2lnbmVkdFJvb3RDQTENMAsGA1UEBwwEJCQkJDAeFw0xOTAxMTEwODMz
+MTZaFw0yOTAxMDgwODMzMTZaMC0xGjAYBgNVBAMMEWFuc2libGUudGxzLnRlc3Rz
+MQ8wDQYDVQQKDAZzZXJ2ZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB
+AQDIwErHwAesRBfd9HiZkmB3VYh28c1QkE9I8nYyHJKX2ZBUhAzK+h80BkcTJJ94
+265qWyACH/wl54Xe/ofFUFrGa4vz0qz4UkL/KI0OGw28Y4qnKdorb9DumbiIPB+9
+I9TJT9vhtXTxBNlBTpv3ONHL8EzdV6ZmuvELU11H27oQ4xoUYhfXPXLMLK0sOnXZ
+lt0BOMMd5fVpJVa8fvXiw3626a0aXCr4e/MWUsBFRnzrXfgoW+AjYoTjKKS2hLYo
+8//MM05h7ROIXrNe990sf9C1G+fOThmOMszK9sjMhu2xHranRcz5aA0UTfyOjTs8
+9WexUYhC5VorYyRWtVZu2mDjAgMBAAGjbTBrMAkGA1UdEwQCMAAwCwYDVR0PBAQD
+AgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMBMDwGA1UdEQQ1MDOCEWFuc2libGUudGxz
+LnRlc3RzghNNYWNCb29rLVByby00LmxvY2Fsgglsb2NhbGhvc3QwDQYJKoZIhvcN
+AQELBQADggEBAFoPBeB6tQhFS1198sia5NDHDDrghDOIlE0QbaoA+MSKzsaIy8Mu
+mNcM2ewYpT600XXTBxcqF6/vuKL9OEbvivtRYQu1YfkifN1jzREoWTieUkR5ytzt
+8ATfFkgTWJmiRiOIb/fNgewvhd+aKxep0OGwDiSKKl1ab6F17Cp4iK8sDBWmnUb6
+0Wf7pfver1Gl0Gp8vRXGUuc8a7udA9a8mV70HJlLkMdMvR9U8Bqih0+iRaqNWXRZ
+7Lc6v5LbzrW/ntilmgU6F0lwxPydg49MY4UrSXcjYLZs9T4iYHwTfLxFjFMIgGwn
+peYMKRj18akP9i2mjj5O2mRu4K+ecuUSOGI=
+-----END CERTIFICATE-----
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_tls/files/server_key.pem b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_tls/files/server_key.pem
new file mode 100644
index 00000000..c79ab648
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_tls/files/server_key.pem
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEAyMBKx8AHrEQX3fR4mZJgd1WIdvHNUJBPSPJ2MhySl9mQVIQM
+yvofNAZHEySfeNuualsgAh/8JeeF3v6HxVBaxmuL89Ks+FJC/yiNDhsNvGOKpyna
+K2/Q7pm4iDwfvSPUyU/b4bV08QTZQU6b9zjRy/BM3VemZrrxC1NdR9u6EOMaFGIX
+1z1yzCytLDp12ZbdATjDHeX1aSVWvH714sN+tumtGlwq+HvzFlLARUZ86134KFvg
+I2KE4yiktoS2KPP/zDNOYe0TiF6zXvfdLH/QtRvnzk4ZjjLMyvbIzIbtsR62p0XM
++WgNFE38jo07PPVnsVGIQuVaK2MkVrVWbtpg4wIDAQABAoIBAHw3wA3pnNXTLJGC
+fD1KfbZZjp9K76gyI10X6lsHow2i6dPiAah3LGecms4VkzfNdxcIW7303Kj3obZh
++ND277RnR6oPakgdXqdUCDP6OX2gemMFWqIWBkodhDmIOntmeHw4le4LwdiBD42B
+frBy0B5JCsbLPYPDmPNRGh8krvVS+Eir4hb4tK95TPMSL0vEjvHYFbCxv7//Ri1p
+3CROGp2CGX0WZ+Zs0crRNoIhRRM6kLAhROcqejtnEy6o7l5CWpCAL2vxlE9y8/kL
+iRawSZRFZnz/zGnqpx0vswgvijkuPfcNGMSzdwaiDgQz8D0GkJ7s9VgzZJazNy+1
+ET/4YIECgYEA612rwP9Ar9qdYbmmMPaJzITnaIrNGfO2JvaQqZt+DG8sVgdxL7V5
+D6emcw406drKRZvFAxnW6ZW2bVpmit02osl0re2A/nOTXLNuo338Qkap/hG8YZrF
+bw7w75pFa/rwlDtedjBnGHO2KbRXeU5Hn5wLoKjYgJoF6Ht+PPdL0IsCgYEA2lnC
+pQEhM51iRMDqNdmVJyvsTNU1ikoO8HaXHq+LwOQETaKMnDwp4Bn14E815CTulAc/
+tsDTKSDk6umZ+IufG1a2v7CqgKVwkB4HkgxKFQs2gQdTFfoMi5eeHR+njuNtklp1
+9fWfKHsP/ddrg+iTVTRZBLWexgKK89IMHYalpAkCgYEAy0Q3a9NF81mTJ+3kOE8C
+zO1OyLtuzGXsvxOb9c6C+owctyNwPeq05a89EgqH6hr5K0qOx9HOCCcyyJgVDQJl
+CAuByB/gkmAQOTQBbhMFA9vxPanljknTDsnRjKwoHkw2712ig+Hjd3ufK79C+FGB
+i7eBVzva1p2uUowshsxv3mcCgYAOFiRciMofjlO8o8V4W+Undcn02vxtQ4HbOYte
+S2z0sMEmUQpJOghpkMMwCWwsn8VUf3M40w/MY3bhQNjSFA/br6hyjW8yhXnRkl5i
+qbBN0z9c66AMlukgSFPHBTfGHB4Bhxx9Fa+C6Q2LDs6839BBevMTPrRTie509GQb
+s4gUIQKBgAvE8wLcmozno0GLDnBdKRZP/C7tmVnAINuraITPUBTASwI+Qo8ILigQ
+LRLaDqF84BEpjb8vdzkYFQqRQSZ8BI8NydfuKEFSBfL27sBvSGMYQJVm6bryUmPq
+T3ayaeZ4Wb3FFDijgtM9dRKyf7p4hQPOqM44QrntAtb43b2Q5L7M
+-----END RSA PRIVATE KEY-----
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_tls/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_tls/tasks/main.yml
new file mode 100644
index 00000000..bbdc30cc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_tls/tasks/main.yml
@@ -0,0 +1,26 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Generated certificate with: https://github.com/michaelklishin/tls-gen
+# ~/tls-gen/basic# make PASSWORD=bunnies CN=ansible.tls.tests
+# verify with: make info
+
+- name: ensure target directory is present
+ file:
+ path: /tls
+ state: directory
+
+- name: ensure TLS files are present
+ copy:
+ src: "{{ item }}"
+ dest: "/tls/{{ item }}"
+ loop:
+ - ca_certificate.pem
+ - ca_key.pem
+ - client_certificate.pem
+ - client_key.pem
+ - server_certificate.pem
+ - server_key.pem
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/defaults/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/defaults/main.yml
new file mode 100644
index 00000000..64019a04
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/defaults/main.yml
@@ -0,0 +1,8 @@
+wf_tmp_dir: '{{ remote_tmp_dir }}/wildfly_tmp'
+wf_homedir: '{{ wf_tmp_dir }}/wildfly'
+wf_service_file_path: /etc/systemd/system/wildfly.service
+wf_version: 16.0.0.Final
+wf_user: wildfly
+jboss_root: '{{ wf_homedir }}'
+deploy_dir: '{{ jboss_root }}/standalone/deployments'
+default_deploy_root: /var/lib/jbossas/standalone/deployments
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/files/wildfly.conf b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/files/wildfly.conf
new file mode 100644
index 00000000..4ff3293b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/files/wildfly.conf
@@ -0,0 +1,8 @@
+# The configuration you want to run
+WILDFLY_CONFIG=standalone.xml
+
+# The mode you want to run
+WILDFLY_MODE=standalone
+
+# The address to bind to
+WILDFLY_BIND=0.0.0.0
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/handlers/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/handlers/main.yml
new file mode 100644
index 00000000..98db569d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/handlers/main.yml
@@ -0,0 +1,13 @@
+- name: Stop wildfly (jboss)
+ systemd:
+ name: wildfly
+ state: stopped
+ ignore_errors: yes
+
+- name: Remove files
+ file:
+ path: '{{ item }}'
+ state: absent
+ loop:
+ - '{{ wf_service_file_path }}'
+ - '{{ default_deploy_root }}'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/meta/main.yml
new file mode 100644
index 00000000..6dabc5c7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+- setup_pkg_mgr
+- setup_remote_tmp_dir
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/tasks/main.yml
new file mode 100644
index 00000000..beb58f8d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/tasks/main.yml
@@ -0,0 +1,102 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Skip unsupported platforms
+ meta: end_play
+ when: (ansible_distribution != 'CentOS') or
+ (ansible_distribution == 'CentOS' and ansible_distribution_major_version is not version('7', '>='))
+
+- name: Install java
+ package:
+ name: java-1.8.0-openjdk-devel
+
+- name: Create wf_tmp_dir
+ file:
+ path: '{{ wf_tmp_dir }}'
+ state: directory
+
+- name: Download wildfly
+ get_url:
+ url: 'https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/setup_wildfly_server/wildfly-{{ wf_version }}.tar.gz'
+ dest: '{{ wf_tmp_dir }}/wildfly-{{ wf_version }}.tar.gz'
+
+- name: Unarchive tar
+ unarchive:
+ src: '{{ wf_tmp_dir }}/wildfly-{{ wf_version }}.tar.gz'
+ dest: '{{ wf_tmp_dir }}'
+ remote_src: yes
+
+- name: Remove tar
+ file:
+ path: '{{ wf_tmp_dir }}/wildfly-{{ wf_version }}.tar.gz'
+ state: absent
+
+- name: Create symlink
+ file:
+ src: '{{ wf_tmp_dir }}/wildfly-{{ wf_version }}'
+ dest: '{{ wf_tmp_dir }}/wildfly'
+ state: link
+
+- name: Create group for wildfly
+ group:
+ name: '{{ wf_user }}'
+ system: yes
+
+- name: Create user for wildfly
+ user:
+ name: '{{ wf_user }}'
+ system: yes
+ group: '{{ wf_user }}'
+ home: '{{ wf_homedir }}'
+
+- name: Set permissions
+ file:
+ path: '{{ remote_tmp_dir }}'
+ state: directory
+ owner: '{{ wf_user }}'
+ group: '{{ wf_user }}'
+ recurse: yes
+
+- name: Create config file
+ copy:
+ src: wildfly.conf
+ dest: '{{ wf_homedir }}/wildfly.conf'
+ mode: "0644"
+
+- name: Create launcher
+ template:
+ src: launch.sh.j2
+ dest: '{{ wf_homedir }}/bin/launch.sh'
+ mode: "0755"
+
+- name: Make scripts executable
+ shell: 'chmod +rx {{ wf_homedir }}/bin/*.sh'
+
+- name: Create service file
+ template:
+ src: wildfly.service.j2
+ dest: '{{ wf_service_file_path }}'
+ mode: "0644"
+
+- name: Create directories for testing the default deploy_path
+ become: yes
+ file:
+ path: '{{ default_deploy_root }}'
+ state: directory
+ recurse: yes
+ owner: '{{ wf_user }}'
+ group: '{{ wf_user }}'
+
+- name: Create simlink for testing the default deploy_path
+ file:
+ state: link
+ src: '{{ deploy_dir }}'
+ dest: '{{ default_deploy_root }}/deployments'
+
+- name: Reload systemd and start wildfly
+ systemd:
+ daemon_reload: yes
+ name: wildfly
+ state: started
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/templates/launch.sh.j2 b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/templates/launch.sh.j2
new file mode 100644
index 00000000..a01bcc51
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/templates/launch.sh.j2
@@ -0,0 +1,11 @@
+#!/usr/bin/env bash
+
+if [ "x$WILDFLY_HOME" = "x" ]; then
+ WILDFLY_HOME="{{ wf_homedir }}"
+fi
+
+if [[ "$1" == "domain" ]]; then
+ $WILDFLY_HOME/bin/domain.sh -c "$2" -b "$3"
+else
+ $WILDFLY_HOME/bin/standalone.sh -c "$2" -b "$3"
+fi
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/templates/wildfly.service.j2 b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/templates/wildfly.service.j2
new file mode 100644
index 00000000..686c3019
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/templates/wildfly.service.j2
@@ -0,0 +1,16 @@
+[Unit]
+Description=The WildFly Application Server
+After=syslog.target network.target
+Before=httpd.service
+
+[Service]
+Environment=LAUNCH_JBOSS_IN_BACKGROUND=1
+EnvironmentFile=-{{ wf_homedir }}/wildfly.conf
+User=wildfly
+LimitNOFILE=102642
+PIDFile=/var/run/wildfly/wildfly.pid
+ExecStart={{ wf_homedir }}/bin/launch.sh $WILDFLY_MODE $WILDFLY_CONFIG $WILDFLY_BIND
+StandardOutput=null
+
+[Install]
+WantedBy=multi-user.target
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/shutdown/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/shutdown/aliases
new file mode 100644
index 00000000..a6dafcf8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/shutdown/aliases
@@ -0,0 +1 @@
+shippable/posix/group1
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/shutdown/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/shutdown/tasks/main.yml
new file mode 100644
index 00000000..3e435731
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/shutdown/tasks/main.yml
@@ -0,0 +1,89 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Install systemd-sysv on ubuntu 18
+ apt:
+ name: systemd-sysv
+ state: present
+ when: ansible_distribution == 'Ubuntu' and ansible_distribution_major_version is version('18', '>=')
+ register: systemd_sysv_install
+
+- name: Execute shutdown with custom message and delay
+ community.general.shutdown:
+ delay: 100
+ msg: "Custom Message"
+ register: shutdown_result
+ check_mode: yes
+
+- name: Execute shutdown with minus delay
+ community.general.shutdown:
+ delay: -100
+ register: shutdown_result_minus
+ check_mode: yes
+
+- name: Verify Custom Message except Alpine, AIX
+ assert:
+ that:
+ - '"Custom Message" in shutdown_result["shutdown_command"]'
+ - '"Shut down initiated by Ansible" in shutdown_result_minus["shutdown_command"]'
+ - '"Custom Message" not in shutdown_result_minus["shutdown_command"]'
+ when: ansible_os_family not in ['Alpine', 'AIX']
+
+- name: Verify shutdown command is present except Alpine, VMKernel
+ assert:
+ that: '"shutdown" in shutdown_result["shutdown_command"]'
+ when: ansible_os_family != 'Alpine' and ansible_system != 'VMKernel'
+
+- name: Verify shutdown command is present in Alpine
+ assert:
+ that: '"poweroff" in shutdown_result["shutdown_command"]'
+ when: ansible_os_family == 'Alpine'
+
+- name: Verify shutdown command is present in VMKernel
+ assert:
+ that: '"halt" in shutdown_result["shutdown_command"]'
+ when: ansible_system == 'VMKernel'
+
+- name: Verify shutdown delay is present in minutes in Linux
+ assert:
+ that:
+ - '"-h 1" in shutdown_result["shutdown_command"]'
+ - '"-h 0" in shutdown_result_minus["shutdown_command"]'
+ when: ansible_system == 'Linux' and ansible_os_family != 'Alpine'
+
+- name: Verify shutdown delay is present in minutes in Void, MacOSX, OpenBSD
+ assert:
+ that:
+ - '"-h +1" in shutdown_result["shutdown_command"]'
+ - '"-h +0" in shutdown_result_minus["shutdown_command"]'
+ when: ansible_system in ['Void', 'Darwin', 'OpenBSD']
+
+- name: Verify shutdown delay is present in seconds in FreeBSD
+ assert:
+ that:
+ - '"-h +100s" in shutdown_result["shutdown_command"]'
+ - '"-h +0s" in shutdown_result_minus["shutdown_command"]'
+ when: ansible_system == 'FreeBSD'
+
+- name: Verify shutdown delay is present in seconds in Solaris, SunOS
+ assert:
+ that:
+ - '"-g 100" in shutdown_result["shutdown_command"]'
+ - '"-g 0" in shutdown_result_minus["shutdown_command"]'
+ when: ansible_system in ['Solaris', 'SunOS']
+
+- name: Verify shutdown delay is present in seconds, VMKernel
+ assert:
+ that:
+ - '"-d 100" in shutdown_result["shutdown_command"]'
+ - '"-d 0" in shutdown_result_minus["shutdown_command"]'
+ when: ansible_system == 'VMKernel'
+
+- name: Remove systemd-sysv in ubuntu 18 in case it has been installed in test
+ apt:
+ name: systemd-sysv
+ state: absent
+ when: systemd_sysv_install is changed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/aliases
new file mode 100644
index 00000000..1dbdd801
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/aliases
@@ -0,0 +1,4 @@
+destructive
+shippable/posix/group2
+skip/python3
+skip/aix
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/files/sendProcessStdin.py b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/files/sendProcessStdin.py
new file mode 100644
index 00000000..90b318cc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/files/sendProcessStdin.py
@@ -0,0 +1,27 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+
+proc = sys.argv[1]
+value = sys.argv[2]
+username = sys.argv[3]
+password = sys.argv[4]
+
+if sys.version_info[0] == 2:
+ from xmlrpclib import ServerProxy
+ from urllib import quote
+else:
+ from xmlrpc.client import ServerProxy
+ from urllib.parse import quote
+
+if username:
+ url = 'http://%s:%s@127.0.0.1:9001/RPC2' % (quote(username, safe=''), quote(password, safe=''))
+else:
+ url = 'http://127.0.0.1:9001/RPC2'
+
+server = ServerProxy(url, verbose=True)
+server.supervisor.sendProcessStdin(proc, 'import sys; print(%s); sys.stdout.flush();\n' % value)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/meta/main.yml
new file mode 100644
index 00000000..5438ced5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_pkg_mgr
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_Darwin.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_Darwin.yml
new file mode 100644
index 00000000..e3582160
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_Darwin.yml
@@ -0,0 +1,4 @@
+- name: install supervisord
+ pip:
+ name: supervisor<4.0.0 # supervisor version 4.0.0 fails tests
+ state: present
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_FreeBSD.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_FreeBSD.yml
new file mode 100644
index 00000000..e3582160
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_FreeBSD.yml
@@ -0,0 +1,4 @@
+- name: install supervisord
+ pip:
+ name: supervisor<4.0.0 # supervisor version 4.0.0 fails tests
+ state: present
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_Linux.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_Linux.yml
new file mode 100644
index 00000000..af1790cc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_Linux.yml
@@ -0,0 +1,10 @@
+- name: install supervisor
+ package:
+ name: supervisor
+ state: present
+
+- name: disable supervisord system service
+ service:
+ name: '{{ supervisor_service_name }}'
+ state: stopped
+ enabled: no
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_RedHat.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_RedHat.yml
new file mode 100644
index 00000000..e3582160
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_RedHat.yml
@@ -0,0 +1,4 @@
+- name: install supervisord
+ pip:
+ name: supervisor<4.0.0 # supervisor version 4.0.0 fails tests
+ state: present
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_Suse.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_Suse.yml
new file mode 100644
index 00000000..e3582160
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_Suse.yml
@@ -0,0 +1,4 @@
+- name: install supervisord
+ pip:
+ name: supervisor<4.0.0 # supervisor version 4.0.0 fails tests
+ state: present
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_pip.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_pip.yml
new file mode 100644
index 00000000..e3582160
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_pip.yml
@@ -0,0 +1,4 @@
+- name: install supervisord
+ pip:
+ name: supervisor<4.0.0 # supervisor version 4.0.0 fails tests
+ state: present
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/main.yml
new file mode 100644
index 00000000..a6ad10bd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/main.yml
@@ -0,0 +1,52 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- block:
+ - tempfile:
+ state: directory
+ suffix: supervisorctl-tests
+ register: supervisord_sock_path
+
+ - command: 'echo {{ output_dir }}'
+ register: echo
+ - set_fact:
+ remote_dir: '{{ echo.stdout }}'
+
+ - include_vars: '{{ item }}'
+ with_first_found:
+ - files:
+ - '{{ ansible_distribution }}.yml'
+ - '{{ ansible_os_family }}.yml'
+ - 'defaults.yml'
+
+ - include_tasks: '{{ item }}'
+ with_first_found:
+ - files:
+ - 'install_{{ ansible_distribution }}.yml' # CentOS
+ - 'install_{{ ansible_os_family }}.yml' # RedHat
+ - 'install_{{ ansible_system }}.yml' # Linux
+
+ - include_tasks: test.yml
+ with_items:
+ - { username: '', password: '' }
+ - { username: 'testétest', password: 'passéword' } # non-ASCII credentials
+ loop_control:
+ loop_var: credentials
+
+ # setuptools is too old on RHEL/CentOS 6 (https://github.com/Supervisor/meld3/issues/23)
+ when: ansible_os_family != 'RedHat' or ansible_distribution_major_version|int > 6
+
+ always:
+ - include_tasks: '{{ item }}'
+ when: ansible_os_family != 'RedHat' or ansible_distribution_major_version|int > 6
+ with_first_found:
+ - files:
+ - 'uninstall_{{ ansible_distribution }}.yml' # CentOS
+ - 'uninstall_{{ ansible_os_family }}.yml' # RedHat
+ - 'uninstall_{{ ansible_system }}.yml' # Linux
+
+ - file:
+ path: '{{ supervisord_sock_path.path }}'
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/start_supervisord.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/start_supervisord.yml
new file mode 100644
index 00000000..9067a273
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/start_supervisord.yml
@@ -0,0 +1,9 @@
+- name: start supervisord
+ command: 'supervisord -c {{ remote_dir }}/supervisord.conf'
+
+- name: wait_for supervisord
+ ansible.builtin.wait_for:
+ port: 9001
+ host: 127.0.0.1
+ timeout: 15
+ state: started
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/stop_supervisord.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/stop_supervisord.yml
new file mode 100644
index 00000000..1bf48f21
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/stop_supervisord.yml
@@ -0,0 +1,2 @@
+- name: stop supervisord
+ command: "supervisorctl -c {{ remote_dir }}/supervisord.conf {% if credentials.username %}-u {{ credentials.username }} -p {{ credentials.password }}{% endif %} shutdown"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/test.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/test.yml
new file mode 100644
index 00000000..bfd2a06e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/test.yml
@@ -0,0 +1,12 @@
+- name: generate supervisor configuration
+ template:
+ src: supervisord.conf
+ dest: '{{ remote_dir }}/supervisord.conf'
+
+- block:
+ - import_tasks: start_supervisord.yml
+
+ - import_tasks: test_start.yml
+ - import_tasks: test_stop.yml
+ always:
+ - import_tasks: stop_supervisord.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/test_start.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/test_start.yml
new file mode 100644
index 00000000..cc56ac5a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/test_start.yml
@@ -0,0 +1,135 @@
+- name: start py1 service (without auth)
+ supervisorctl:
+ name: 'pys:py1'
+ state: started
+ config: '{{ remote_dir }}/supervisord.conf'
+ register: result
+ when: credentials.username == ''
+
+- name: start py1 service (with auth)
+ supervisorctl:
+ name: 'pys:py1'
+ state: started
+ server_url: http://127.0.0.1:9001
+ username: '{{ credentials.username }}'
+ password: '{{ credentials.password }}'
+ register: result_with_auth
+ when: credentials.username != ''
+
+- command: "supervisorctl -c {{ remote_dir }}/supervisord.conf {% if credentials.username %}-u {{ credentials.username }} -p {{ credentials.password }}{% endif %} status"
+
+- name: check that service is started
+ assert:
+ that:
+ - (result is success and result_with_auth is skip) or (result is skip and result_with_auth is changed)
+ - (result is changed and result_with_auth is skip) or (result is skip and result_with_auth is changed)
+
+- name: check that service is running (part1) # py1.log content is checked below
+ script: "files/sendProcessStdin.py 'pys:py1' 2 \
+ '{{ credentials.username }}' '{{ credentials.password }}'"
+
+- name: try again to start py1 service (without auth)
+ supervisorctl:
+ name: pys:py1
+ state: started
+ config: '{{ remote_dir }}/supervisord.conf'
+ register: result
+ when: credentials.username == ''
+
+- name: try again to start py1 service (with auth)
+ supervisorctl:
+ name: pys:py1
+ state: started
+ server_url: http://127.0.0.1:9001
+ username: '{{ credentials.username }}'
+ password: '{{ credentials.password }}'
+ register: result_with_auth
+ when: credentials.username != ''
+
+- name: check that service is already running
+ assert:
+ that:
+ - (result is success and result_with_auth is skip) or (result is skip and result_with_auth is success)
+ - (result is not changed and result_with_auth is skip) or (result is skip and result_with_auth is not changed)
+
+- import_tasks: stop_supervisord.yml
+
+# supervisord has been stopped, check logfile
+- name: check that service has done what it was expected (part 2)
+ shell: 'test "$(tail -2 {{ remote_dir }}/py1.log | head -1)" = ">>> 2"'
+
+# restart supervisord and py1 service for next tasks
+- import_tasks: start_supervisord.yml
+
+- name: start py1 service (without auth)
+ supervisorctl:
+ name: 'pys:py1'
+ state: started
+ config: '{{ remote_dir }}/supervisord.conf'
+ register: result
+ when: credentials.username == ''
+
+- name: start py1 service (with auth)
+ supervisorctl:
+ name: 'pys:py1'
+ state: started
+ server_url: http://127.0.0.1:9001
+ username: '{{ credentials.username }}'
+ password: '{{ credentials.password }}'
+ register: result_with_auth
+ when: credentials.username != ''
+
+- name: check that service is started
+ assert:
+ that:
+ - (result is success and result_with_auth is skip) or (result is skip and result_with_auth is changed)
+ - (result is changed and result_with_auth is skip) or (result is skip and result_with_auth is changed)
+
+#############################################################
+
+- name: Check an error occurs when wrong credentials are used
+ supervisorctl:
+ name: pys:py1
+ state: started
+ server_url: http://127.0.0.1:9001
+ username: '{{ credentials.username }}wrong_creds'
+ password: '{{ credentials.password }}same_here'
+ register: result
+ failed_when: result is not skip and (result is success or result is not failed)
+ when: credentials.username != ''
+
+- name: Check an error occurs when wrong URL is used
+ supervisorctl:
+ name: pys:py1
+ state: started
+ server_url: http://127.0.0.1:9002
+ register: result
+ failed_when: result is success or result is not failed
+
+- name: Check an error occurs when wrong config path is used
+ supervisorctl:
+ name: 'pys:py1'
+ state: started
+ config: '{{ remote_dir }}/supervisord_not_here.conf'
+ register: result
+ failed_when: result is success or result is not failed
+
+- name: Check an error occurs wrong name is used (without auth)
+ supervisorctl:
+ name: 'invalid'
+ state: started
+ config: '{{ remote_dir }}/supervisord.conf'
+ register: result
+ failed_when: result is skip or (result is success or result is not failed)
+ when: credentials.username == ''
+
+- name: Check an error occurs wrong name is used (with auth)
+ supervisorctl:
+ name: 'invalid'
+ state: started
+ config: '{{ remote_dir }}/supervisord.conf'
+ username: '{{ credentials.username }}wrong_creds'
+ password: '{{ credentials.password }}same_here'
+ register: result
+ failed_when: result is skip or (result is success or result is not failed)
+ when: credentials.username != ''
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/test_stop.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/test_stop.yml
new file mode 100644
index 00000000..5c76a681
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/test_stop.yml
@@ -0,0 +1,59 @@
+- name: stop py1 service
+ supervisorctl:
+ name: 'pys:py1'
+ state: stopped
+ # test with 'server_url' parameter
+ server_url: 'unix://{{ supervisord_sock_path.path }}/supervisord.sock'
+ register: result
+ when: credentials.username == ''
+
+- name: stop py1 service
+ supervisorctl:
+ name: 'pys:py1'
+ state: stopped
+ # test with unix socket
+ server_url: 'unix://{{ supervisord_sock_path.path }}/supervisord.sock'
+ username: '{{ credentials.username }}'
+ password: '{{ credentials.password }}'
+ register: result_with_auth
+ when: credentials.username != ''
+
+- command: "supervisorctl -c {{ remote_dir }}/supervisord.conf {% if credentials.username %}-u {{ credentials.username }} -p {{ credentials.password }}{% endif %} status"
+
+- name: check that service is stopped
+ assert:
+ that:
+ - (result is success and result_with_auth is skip) or (result is skip and result_with_auth is success)
+ - (result is changed and result_with_auth is skip) or (result is skip and result_with_auth is changed)
+
+- name: "check that service isn't running"
+ script: "files/sendProcessStdin.py 'pys:py1' 1 \
+ '{{ credentials.username }}' '{{ credentials.password }}'"
+ register: is_py1_alive
+ failed_when: is_py1_alive is success
+
+- name: try again to stop py1 service (without auth)
+ supervisorctl:
+ name: pys:py1
+ state: stopped
+ # test with 'server_url' parameter
+ server_url: 'unix://{{ supervisord_sock_path.path }}/supervisord.sock'
+ register: result
+ when: credentials.username == ''
+
+- name: try again to stop py1 service (with auth)
+ supervisorctl:
+ name: pys:py1
+ state: stopped
+ # test with unix socket
+ server_url: 'unix://{{ supervisord_sock_path.path }}/supervisord.sock'
+ username: '{{ credentials.username }}'
+ password: '{{ credentials.password }}'
+ register: result_with_auth
+ when: credentials.username != ''
+
+- name: check that service is already stopped
+ assert:
+ that:
+ - (result is success and result_with_auth is skip) or (result is skip and result_with_auth is success)
+ - (result is not changed and result_with_auth is skip) or (result is skip and result_with_auth is not changed)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_Darwin.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_Darwin.yml
new file mode 100644
index 00000000..5bb5ee13
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_Darwin.yml
@@ -0,0 +1,4 @@
+- name: uninstall supervisord
+ pip:
+ name: supervisor
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_FreeBSD.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_FreeBSD.yml
new file mode 100644
index 00000000..5bb5ee13
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_FreeBSD.yml
@@ -0,0 +1,4 @@
+- name: uninstall supervisord
+ pip:
+ name: supervisor
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_Linux.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_Linux.yml
new file mode 100644
index 00000000..30023973
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_Linux.yml
@@ -0,0 +1,4 @@
+- name: uninstall supervisor
+ package:
+ name: supervisor
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_RedHat.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_RedHat.yml
new file mode 100644
index 00000000..5bb5ee13
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_RedHat.yml
@@ -0,0 +1,4 @@
+- name: uninstall supervisord
+ pip:
+ name: supervisor
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_Suse.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_Suse.yml
new file mode 100644
index 00000000..5bb5ee13
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_Suse.yml
@@ -0,0 +1,4 @@
+- name: uninstall supervisord
+ pip:
+ name: supervisor
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_pip.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_pip.yml
new file mode 100644
index 00000000..5bb5ee13
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_pip.yml
@@ -0,0 +1,4 @@
+- name: uninstall supervisord
+ pip:
+ name: supervisor
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/templates/supervisord.conf b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/templates/supervisord.conf
new file mode 100644
index 00000000..2f80e02b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/templates/supervisord.conf
@@ -0,0 +1,42 @@
+[supervisord]
+pidfile={{ remote_dir }}/supervisord.pid
+logfile={{ remote_dir }}/supervisord.log
+
+[program:py1]
+command={{ ansible_python.executable }} -i -u -
+user={{ ansible_user_id }}
+autostart=false
+autorestart=false
+stdout_logfile={{ remote_dir }}/py1.log
+redirect_stderr=yes
+
+[program:py2]
+command={{ ansible_python.executable }} -i -u -
+user={{ ansible_user_id }}
+autostart=false
+autorestart=false
+stdout_logfile={{ remote_dir }}/py2.log
+redirect_stderr=yes
+
+[group:pys]
+programs=py1,py2
+
+[unix_http_server]
+file={{ supervisord_sock_path.path }}/supervisord.sock
+{% if credentials.username is defined and credentials.username|default(false, boolean=true) %}
+username = {{ credentials.username }}
+password = {{ credentials.password }}
+{% endif %}
+
+[inet_http_server]
+port=127.0.0.1:9001
+{% if credentials.username is defined and credentials.username|default(false, boolean=true) %}
+username = {{ credentials.username }}
+password = {{ credentials.password }}
+{% endif %}
+
+[supervisorctl]
+serverurl=unix://{{ supervisord_sock_path.path }}/supervisord.sock
+
+[rpcinterface:supervisor]
+supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/vars/Debian.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/vars/Debian.yml
new file mode 100644
index 00000000..d4b1bdcc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/vars/Debian.yml
@@ -0,0 +1 @@
+supervisor_service_name: supervisor
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/vars/defaults.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/vars/defaults.yml
new file mode 100644
index 00000000..fc3aa0a8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/supervisorctl/vars/defaults.yml
@@ -0,0 +1 @@
+supervisor_service_name: supervisord
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/synchronize-buildah/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/synchronize-buildah/aliases
new file mode 100644
index 00000000..30b10b7c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/synchronize-buildah/aliases
@@ -0,0 +1,3 @@
+non_local
+needs/root
+unsupported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/synchronize-buildah/inventory b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/synchronize-buildah/inventory
new file mode 100644
index 00000000..2eeaf313
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/synchronize-buildah/inventory
@@ -0,0 +1 @@
+buildah-container ansible_host=buildah-container ansible_connection=buildah
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/synchronize-buildah/roles/test_buildah_synchronize/files/normal_file.txt b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/synchronize-buildah/roles/test_buildah_synchronize/files/normal_file.txt
new file mode 100644
index 00000000..33257a92
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/synchronize-buildah/roles/test_buildah_synchronize/files/normal_file.txt
@@ -0,0 +1 @@
+abnormal content
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/synchronize-buildah/roles/test_buildah_synchronize/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/synchronize-buildah/roles/test_buildah_synchronize/tasks/main.yml
new file mode 100644
index 00000000..92fd0830
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/synchronize-buildah/roles/test_buildah_synchronize/tasks/main.yml
@@ -0,0 +1,71 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# test code for the synchronize module
+# (c) 2014, James Tanner <tanner.jc@gmail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- name: cleanup old files
+ file:
+ path: '{{ output_dir }}'
+ state: absent
+
+- name: ensure the target directory exists
+ file:
+ path: '{{ output_dir }}'
+ state: directory
+
+- name: synchronize file to new filename
+ synchronize:
+ src: normal_file.txt
+ dest: '{{ output_dir }}/remote_file.txt'
+ register: sync_result
+
+- assert:
+ that:
+ - "'changed' in sync_result"
+ - "sync_result.changed == true"
+ - "'cmd' in sync_result"
+ - "'rsync' in sync_result.cmd"
+ - "'msg' in sync_result"
+ - "sync_result.msg.startswith('<f+')"
+ - "sync_result.msg.endswith('+ normal_file.txt\n')"
+
+- name: test that the file was really copied over
+ stat:
+ path: "{{ output_dir }}/remote_file.txt"
+ register: stat_result
+
+- assert:
+ that:
+ - "stat_result.stat.exists == True"
+ - "stat_result.stat.checksum == '4f11fb5cd9fe0171ea6fab02ae33f65138f3e44e'"
+
+- name: test that the file is not copied a second time
+ synchronize: src=normal_file.txt dest={{output_dir}}/remote_file.txt
+ register: sync_result
+
+- assert:
+ that:
+ - "sync_result.changed == False"
+
+- name: cleanup old files
+ file:
+ path: '{{ output_dir }}'
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/synchronize-buildah/runme.sh b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/synchronize-buildah/runme.sh
new file mode 100644
index 00000000..e9e4811f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/synchronize-buildah/runme.sh
@@ -0,0 +1,15 @@
+#!/usr/bin/env bash
+
+set -ux
+
+CONTAINER_NAME=buildah-container
+
+buildah rm $CONTAINER_NAME >/dev/null 2>/dev/null
+
+set -e
+
+buildah from --name $CONTAINER_NAME docker.io/library/centos:7
+trap '{ buildah rm $CONTAINER_NAME; }' EXIT
+buildah run $CONTAINER_NAME -- yum install -y rsync
+
+ansible-playbook test_synchronize_buildah.yml -c buildah -i inventory -vv
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/synchronize-buildah/test_synchronize_buildah.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/synchronize-buildah/test_synchronize_buildah.yml
new file mode 100644
index 00000000..e1cc9665
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/synchronize-buildah/test_synchronize_buildah.yml
@@ -0,0 +1,8 @@
+---
+- hosts: buildah-container
+ connection: buildah
+ gather_facts: no
+ vars:
+ output_dir: /tmp/ansible_test_synchronize_buildah
+ roles:
+ - test_buildah_synchronize
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/timezone/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/timezone/aliases
new file mode 100644
index 00000000..1ef4c361
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/timezone/aliases
@@ -0,0 +1,5 @@
+destructive
+shippable/posix/group1
+skip/aix
+skip/osx
+skip/macos
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/timezone/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/timezone/tasks/main.yml
new file mode 100644
index 00000000..3662d3e9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/timezone/tasks/main.yml
@@ -0,0 +1,77 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Because hwclock usually isn't available inside Docker containers in Shippable
+# these tasks will detect if hwclock works and only run hwclock tests if it is
+# supported. That is why it is recommended to run these tests locally with
+# `--docker-privileged` on centos6, centos7 and ubuntu1404 images. Example
+# command to run on centos6:
+#
+# ansible-test integration --docker centos6 --docker-privileged -v timezone
+
+##
+## set path to timezone config files
+##
+
+- name: set config file path on Debian
+ set_fact:
+ timezone_config_file: '/etc/timezone'
+ when: ansible_os_family == 'Debian'
+
+- name: set config file path on RedHat
+ set_fact:
+ timezone_config_file: '/etc/sysconfig/clock'
+ when: ansible_os_family == 'RedHat'
+
+##
+## set path to hwclock config files
+##
+
+- name: set config file path on Debian
+ set_fact:
+ hwclock_config_file: '/etc/default/rcS'
+ when: ansible_os_family == 'Debian'
+
+- name: set config file path on RedHat
+ set_fact:
+ hwclock_config_file: '/etc/sysconfig/clock'
+ when: ansible_os_family == 'RedHat'
+
+####
+#### timezone tests
+####
+
+- name: make sure the dbus service is started under systemd
+ systemd:
+ name: dbus
+ state: started
+ when:
+ - ansible_service_mgr == 'systemd'
+ - ansible_distribution == 'Fedora'
+ - ansible_facts.distribution_major_version is version('31', '<')
+
+
+- name: Run tests
+ # Skip tests on Fedora 31 and 32 because dbus fails to start unless the container is run in priveleged mode.
+ # Even then, it starts unreliably. This may be due to the move to cgroup v2 in Fedora 31 and 32.
+ # https://www.redhat.com/sysadmin/fedora-31-control-group-v2
+ when: ansible_facts.distribution ~ ansible_facts.distribution_major_version not in ['Fedora31', 'Fedora32']
+ block:
+ - name: set timezone to Etc/UTC
+ timezone:
+ name: Etc/UTC
+ register: original_timezone
+
+ - name: Value of original_timezone
+ debug:
+ msg: "{{ original_timezone }}"
+
+ - block:
+ - include_tasks: test.yml
+ always:
+ - name: Restore original system timezone - {{ original_timezone.diff.before.name }}
+ timezone:
+ name: "{{ original_timezone.diff.before.name }}"
+ when: original_timezone is changed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/timezone/tasks/test.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/timezone/tasks/test.yml
new file mode 100644
index 00000000..ec0d854d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/timezone/tasks/test.yml
@@ -0,0 +1,607 @@
+##
+## test setting timezone, idempotency and checkmode
+##
+
+- name: set timezone to Australia/Brisbane (checkmode)
+ timezone:
+ name: Australia/Brisbane
+ check_mode: yes
+ register: timezone_set_checkmode
+
+- name: ensure timezone reported as changed in checkmode
+ assert:
+ that:
+ - timezone_set_checkmode.changed
+ - timezone_set_checkmode.diff.after.name == 'Australia/Brisbane'
+ - timezone_set_checkmode.diff.before.name == 'Etc/UTC'
+
+- name: ensure checkmode didn't change the timezone
+ command: cmp /etc/localtime /usr/share/zoneinfo/Australia/Brisbane
+ register: result
+ failed_when: result is not failed
+ changed_when: no
+
+- name: ensure that checkmode didn't update the timezone in the config file
+ command: egrep '^(TIME)?ZONE="Etc/UTC"' {{ timezone_config_file }}
+ when:
+ - ansible_service_mgr != 'systemd'
+ - ansible_os_family == 'RedHat'
+
+- name: ensure that checkmode didn't update the timezone in the config file
+ command: egrep '^Etc/UTC' {{ timezone_config_file }}
+ when:
+ - ansible_service_mgr != 'systemd'
+ - ansible_os_family == 'Debian'
+
+- name: set timezone to Australia/Brisbane
+ timezone:
+ name: Australia/Brisbane
+ register: timezone_set
+
+- name: ensure timezone changed
+ assert:
+ that:
+ - timezone_set.changed
+ - timezone_set.diff.after.name == 'Australia/Brisbane'
+ - timezone_set.diff.before.name == 'Etc/UTC'
+
+- name: ensure that the timezone is actually set
+ command: cmp /etc/localtime /usr/share/zoneinfo/Australia/Brisbane
+ changed_when: no
+
+- name: ensure that the timezone is updated in the config file
+ command: egrep '^(TIME)?ZONE="Australia/Brisbane"' {{ timezone_config_file }}
+ when:
+ - ansible_service_mgr != 'systemd'
+ - ansible_os_family == 'RedHat'
+
+- name: ensure that the timezone is updated in the config file
+ command: egrep '^Australia/Brisbane' {{ timezone_config_file }}
+ when:
+ - ansible_service_mgr != 'systemd'
+ - ansible_os_family == 'Debian'
+
+- name: set timezone to Australia/Brisbane again
+ timezone:
+ name: Australia/Brisbane
+ register: timezone_again
+
+- name: ensure timezone idempotency
+ assert:
+ that:
+ - not timezone_again.changed
+
+- name: set timezone to Australia/Brisbane again in checkmode
+ timezone:
+ name: Australia/Brisbane
+ register: timezone_again_checkmode
+
+- name: set timezone idempotency (checkmode)
+ assert:
+ that:
+ - not timezone_again_checkmode.changed
+
+##
+## tests for same timezones with different names
+##
+
+- name: check dpkg-reconfigure
+ shell: type dpkg-reconfigure
+ register: check_dpkg_reconfigure
+ ignore_errors: yes
+ changed_when: no
+
+- name: check timedatectl
+ shell: type timedatectl && timedatectl
+ register: check_timedatectl
+ ignore_errors: yes
+ changed_when: no
+
+- block:
+ - name: set timezone to Etc/UTC
+ timezone:
+ name: Etc/UTC
+
+ - name: change timezone from Etc/UTC to UTC
+ timezone:
+ name: UTC
+ register: timezone_etcutc_to_utc
+
+ - name: check timezone changed from Etc/UTC to UTC
+ assert:
+ that:
+ - timezone_etcutc_to_utc.changed
+ - timezone_etcutc_to_utc.diff.before.name == 'Etc/UTC'
+ - timezone_etcutc_to_utc.diff.after.name == 'UTC'
+
+ - name: change timezone from UTC to Etc/UTC
+ timezone:
+ name: Etc/UTC
+ register: timezone_utc_to_etcutc
+
+ - name: check timezone changed from UTC to Etc/UTC
+ assert:
+ that:
+ - timezone_utc_to_etcutc.changed
+ - timezone_utc_to_etcutc.diff.before.name == 'UTC'
+ - timezone_utc_to_etcutc.diff.after.name == 'Etc/UTC'
+
+ when:
+ # FIXME: Due to the bug of the dpkg-reconfigure, those tests failed on non-systemd debian
+ - check_dpkg_reconfigure.rc != 0 or check_timedatectl.rc == 0
+
+##
+## no systemd tests for timezone
+##
+
+- block:
+ ##
+ ## test with empty config file
+ ##
+
+ - name: empty config file
+ command: cp /dev/null {{ timezone_config_file }}
+
+ - name: set timezone to Europe/Belgrade (empty config file)
+ timezone:
+ name: Europe/Belgrade
+ register: timezone_empty_conf
+
+ - name: check if timezone set (empty config file)
+ assert:
+ that:
+ - timezone_empty_conf.changed
+ - timezone_empty_conf.diff.after.name == 'Europe/Belgrade'
+ - timezone_empty_conf.diff.before.name == 'n/a'
+
+ - name: check if the timezone is actually set (empty config file)
+ command: cmp /etc/localtime /usr/share/zoneinfo/Europe/Belgrade
+ changed_when: no
+
+
+ ##
+ ## test with deleted config file
+ ##
+
+ - name: remove config file
+ file:
+ path: '{{ timezone_config_file }}'
+ state: absent
+
+ - name: set timezone to Europe/Belgrade (no config file)
+ timezone:
+ name: Europe/Belgrade
+ register: timezone_missing_conf
+
+ - name: check if timezone set (no config file)
+ assert:
+ that:
+ - timezone_missing_conf.changed
+ - timezone_missing_conf.diff.after.name == 'Europe/Belgrade'
+ - timezone_missing_conf.diff.before.name == 'n/a'
+
+ - name: check if the timezone is actually set (no config file)
+ command: cmp /etc/localtime /usr/share/zoneinfo/Europe/Belgrade
+ changed_when: no
+
+
+ ##
+ ## test with /etc/localtime as symbolic link to a zoneinfo file
+ ##
+
+ - name: create symlink /etc/locatime -> /usr/share/zoneinfo/Etc/UTC
+ file:
+ src: /usr/share/zoneinfo/Etc/UTC
+ dest: /etc/localtime
+ state: link
+ force: yes
+
+ - name: set timezone to Europe/Belgrade (over symlink)
+ timezone:
+ name: Europe/Belgrade
+ register: timezone_symllink
+
+ - name: check if timezone set (over symlink)
+ assert:
+ that:
+ - timezone_symllink.changed
+ - timezone_symllink.diff.after.name == 'Europe/Belgrade'
+ - timezone_symllink.diff.before.name == 'Etc/UTC'
+
+ - name: check if the timezone is actually set (over symlink)
+ command: cmp /etc/localtime /usr/share/zoneinfo/Europe/Belgrade
+ changed_when: no
+
+
+ ##
+ ## test with /etc/localtime as broken symbolic link
+ ##
+
+ - name: set timezone to a broken symlink
+ file:
+ src: /tmp/foo
+ dest: /etc/localtime
+ state: link
+ force: yes
+
+ - name: set timezone to Europe/Belgrade (over broken symlink)
+ timezone:
+ name: Europe/Belgrade
+ register: timezone_symllink_broken
+
+ - name: check if timezone set (over broken symlink)
+ assert:
+ that:
+ - timezone_symllink_broken.changed
+ - timezone_symllink_broken.diff.after.name == 'Europe/Belgrade'
+ - timezone_symllink_broken.diff.before.name == 'n/a'
+
+ - name: check if the timezone is actually set (over broken symlink)
+ command: cmp /etc/localtime /usr/share/zoneinfo/Europe/Belgrade
+ changed_when: no
+
+
+ ##
+ ## test with /etc/localtime set manually using copy
+ ##
+
+ - name: set timezone manually by coping zone info file to /etc/localtime
+ copy:
+ src: /usr/share/zoneinfo/Etc/UTC
+ dest: /etc/localtime
+ remote_src: yes
+
+ - name: set timezone to Europe/Belgrade (over copied file)
+ timezone:
+ name: Europe/Belgrade
+ register: timezone_copied
+
+ - name: check if timezone set (over copied file)
+ assert:
+ that:
+ - timezone_copied.changed
+ - timezone_copied.diff.after.name == 'Europe/Belgrade'
+ - timezone_copied.diff.before.name == 'n/a'
+
+ - name: check if the timezone is actually set (over copied file)
+ command: cmp /etc/localtime /usr/share/zoneinfo/Europe/Belgrade
+ changed_when: no
+ when:
+ - ansible_service_mgr != 'systemd'
+ - timezone_config_file is defined
+
+
+####
+#### hwclock tests
+####
+
+- name: check if hwclock is supported in the environment
+ command: hwclock --test
+ register: hwclock_test
+ ignore_errors: yes
+
+- name: check if timedatectl works in the environment
+ command: timedatectl
+ register: timedatectl_test
+ ignore_errors: yes
+
+- name:
+ set_fact:
+ hwclock_supported: '{{ hwclock_test is successful or timedatectl_test is successful }}'
+##
+## test set hwclock, idempotency and checkmode
+##
+
+- block:
+ - name: set hwclock to local
+ timezone:
+ hwclock: local
+
+ - name: set hwclock to UTC (checkmode)
+ timezone:
+ hwclock: UTC
+ check_mode: yes
+ register: hwclock_set_checkmode
+
+ - name: ensure hwclock reported as changed (checkmode)
+ assert:
+ that:
+ - hwclock_set_checkmode.changed
+ - hwclock_set_checkmode.diff.after.hwclock == 'UTC'
+ - hwclock_set_checkmode.diff.before.hwclock == 'local'
+
+ - block:
+ - name: ensure that checkmode didn't update hwclock in /etc/adjtime
+ command: grep ^UTC /etc/adjtime
+ register: result
+ failed_when: result is not failed
+
+ - name: ensure that checkmode didn't update hwclock the config file
+ command: grep ^UTC=no {{ hwclock_config_file }}
+ when: ansible_service_mgr != 'systemd'
+
+ - name: set hwclock to UTC
+ timezone:
+ hwclock: UTC
+ register: hwclock_set
+
+ - name: ensure hwclock changed
+ assert:
+ that:
+ - hwclock_set.changed
+ - hwclock_set.diff.after.hwclock == 'UTC'
+ - hwclock_set.diff.before.hwclock == 'local'
+
+ - block:
+ - name: ensure that hwclock is updated in /etc/adjtime
+ command: grep ^UTC /etc/adjtime
+
+ - name: ensure that hwclock is updated in the config file
+ command: grep ^UTC=yes {{ hwclock_config_file }}
+ when: ansible_service_mgr != 'systemd'
+
+ - name: set hwclock to RTC again
+ timezone:
+ hwclock: UTC
+ register: hwclock_again
+
+ - name: set hwclock idempotency
+ assert:
+ that:
+ - not hwclock_again.changed
+
+ - name: set hwclock to RTC again (checkmode)
+ timezone:
+ hwclock: UTC
+ check_mode: yes
+ register: hwclock_again_checkmode
+
+ - name: set hwclock idempotency (checkmode)
+ assert:
+ that:
+ - not hwclock_again_checkmode.changed
+
+
+ ##
+ ## no systemd tests for hwclock
+ ##
+
+ - block:
+ ##
+ ## test set hwclock with both /etc/adjtime and conf file deleted
+ ##
+
+ - name: remove /etc/adjtime and conf file
+ file:
+ path: '{{ item }}'
+ state: absent
+ with_items:
+ - /etc/adjtime
+ - '{{ hwclock_config_file }}'
+
+ - name: set hwclock to UTC with deleted /etc/adjtime and conf file
+ timezone:
+ hwclock: UTC
+ register: hwclock_set_utc_deleted_adjtime_and_conf
+
+ - name: ensure hwclock changed with deleted /etc/adjtime and conf
+ assert:
+ that:
+ - hwclock_set_utc_deleted_adjtime_and_conf.changed
+ - hwclock_set_utc_deleted_adjtime_and_conf.diff.after.hwclock == 'UTC'
+ - hwclock_set_utc_deleted_adjtime_and_conf.diff.before.hwclock == 'n/a'
+
+
+ ##
+ ## test set hwclock with /etc/adjtime deleted
+ ##
+
+ - name: remove /etc/adjtime
+ file:
+ path: '{{ item }}'
+ state: absent
+ with_items:
+ - /etc/adjtime
+
+ - name: set hwclock to UTC with deleted /etc/adjtime
+ timezone:
+ hwclock: UTC
+ register: hwclock_set_utc_deleted_adjtime_utc
+
+ - name: ensure hwclock changed with deleted /etc/adjtime
+ assert:
+ that:
+ - not hwclock_set_utc_deleted_adjtime_utc.changed
+ - hwclock_set_utc_deleted_adjtime_utc.diff.after.hwclock == 'UTC'
+ - hwclock_set_utc_deleted_adjtime_utc.diff.before.hwclock == 'UTC'
+
+ - name: set hwclock to LOCAL with deleted /etc/adjtime
+ timezone:
+ hwclock: local
+ register: hwclock_set_local_deleted_adjtime_local
+
+ - name: ensure hwclock changed to LOCAL with deleted /etc/adjtime
+ assert:
+ that:
+ - hwclock_set_local_deleted_adjtime_local.changed
+ - hwclock_set_local_deleted_adjtime_local.diff.after.hwclock == 'local'
+ - hwclock_set_local_deleted_adjtime_local.diff.before.hwclock == 'UTC'
+
+
+ ##
+ ## test set hwclock with conf file deleted
+ ##
+
+ - name: remove conf file
+ file:
+ path: '{{ item }}'
+ state: absent
+ with_items:
+ - '{{ hwclock_config_file }}'
+
+ - name: set hwclock to UTC with deleted conf
+ timezone:
+ hwclock: UTC
+ register: hwclock_set_utc_deleted_conf
+
+ - name: ensure hwclock changed with deleted /etc/adjtime
+ assert:
+ that:
+ - hwclock_set_utc_deleted_conf.changed
+ - hwclock_set_utc_deleted_conf.diff.after.hwclock == 'UTC'
+ - hwclock_set_utc_deleted_conf.diff.before.hwclock == 'n/a'
+
+
+ ##
+ ## test set hwclock with /etc/adjtime missing UTC/LOCAL strings
+ ##
+
+ - name: create /etc/adjtime without UTC/LOCAL
+ copy:
+ content: '0.0 0 0\n0'
+ dest: /etc/adjtime
+
+ - name: set hwclock to UTC with broken /etc/adjtime
+ timezone:
+ hwclock: UTC
+ register: hwclock_set_utc_broken_adjtime
+
+ - name: ensure hwclock doesn't report changed with broken /etc/adjtime
+ assert:
+ that:
+ - not hwclock_set_utc_broken_adjtime.changed
+ - hwclock_set_utc_broken_adjtime.diff.after.hwclock == 'UTC'
+ - hwclock_set_utc_broken_adjtime.diff.before.hwclock == 'UTC'
+
+ - name: set hwclock to LOCAL with broken /etc/adjtime
+ timezone:
+ hwclock: local
+ register: hwclock_set_local_broken_adjtime
+
+ - name: ensure hwclock changed to LOCAL with broken /etc/adjtime
+ assert:
+ that:
+ - hwclock_set_local_broken_adjtime.changed
+ - hwclock_set_local_broken_adjtime.diff.after.hwclock == 'local'
+ - hwclock_set_local_broken_adjtime.diff.before.hwclock == 'UTC'
+ when:
+ - ansible_service_mgr != 'systemd'
+ - hwclock_config_file is defined
+
+ ####
+ #### timezone + hwclock tests
+ ####
+
+ ##
+ ## test set timezone and hwclock, idempotency and checkmode
+ ##
+
+ - name: set timezone to Etc/UTC and hwclock to local
+ timezone:
+ name: Etc/UTC
+ hwclock: local
+
+ - name: set timezone to Europe/Belgrade and hwclock to UTC (checkmode)
+ timezone:
+ name: Europe/Belgrade
+ hwclock: UTC
+ check_mode: yes
+ register: tzclock_set_checkmode
+
+ - name: ensure timezone and hwclock reported as changed in checkmode
+ assert:
+ that:
+ - tzclock_set_checkmode.changed
+ - tzclock_set_checkmode.diff.after.name == 'Europe/Belgrade'
+ - tzclock_set_checkmode.diff.before.name == 'Etc/UTC'
+ - tzclock_set_checkmode.diff.after.hwclock == 'UTC'
+ - tzclock_set_checkmode.diff.before.hwclock == 'local'
+
+ - name: ensure checkmode didn't change the timezone
+ command: cmp /etc/localtime /usr/share/zoneinfo/Australia/Brisbane
+ register: result
+ failed_when: result is not failed
+ changed_when: no
+
+ - block:
+ - name: ensure that checkmode didn't update the timezone in the config file
+ command: egrep '^(TIME)?ZONE="Etc/UTC"' {{ timezone_config_file }}
+ when:
+ - ansible_os_family == 'RedHat'
+
+ - name: ensure that checkmode didn't update the timezone in the config file
+ command: egrep '^Etc/UTC' {{ timezone_config_file }}
+ when:
+ - ansible_os_family == 'Debian'
+
+ - name: ensure that checkmode didn't update hwclock in /etc/adjtime
+ command: grep ^UTC /etc/adjtime
+ register: result
+ failed_when: result is not failed
+
+ - name: ensure that checkmode didn't update hwclock the config file
+ command: grep ^UTC=no {{ hwclock_config_file }}
+ when: ansible_service_mgr != 'systemd'
+
+ - name: set timezone to Europe/Belgrade and hwclock to UTC
+ timezone:
+ name: Europe/Belgrade
+ hwclock: UTC
+ register: tzclock_set
+
+ - name: ensure timezone and hwclock changed
+ assert:
+ that:
+ - tzclock_set.changed
+ - tzclock_set.diff.after.name == 'Europe/Belgrade'
+ - tzclock_set.diff.before.name == 'Etc/UTC'
+ - tzclock_set.diff.after.hwclock == 'UTC'
+ - tzclock_set.diff.before.hwclock == 'local'
+
+ - name: ensure that the timezone is actually set
+ command: cmp /etc/localtime /usr/share/zoneinfo/Europe/Belgrade
+ changed_when: no
+
+ - block:
+ - name: ensure that the timezone is updated in the config file
+ command: egrep '^(TIME)?ZONE="Europe/Belgrade"' {{ timezone_config_file }}
+ when:
+ - ansible_os_family == 'RedHat'
+
+ - name: ensure that the timezone is updated in the config file
+ command: egrep 'Europe/Belgrade' {{ timezone_config_file }}
+ when:
+ - ansible_os_family == 'Debian'
+
+ - name: ensure that hwclock is updated in /etc/adjtime
+ command: grep ^UTC /etc/adjtime
+
+ - name: ensure that hwclock is updated in the config file
+ command: grep ^UTC=yes {{ hwclock_config_file }}
+ when: ansible_service_mgr != 'systemd'
+
+ - name: set timezone to Europe/Belgrade and hwclock to UTC again
+ timezone:
+ name: Europe/Belgrade
+ hwclock: UTC
+ register: tzclock_set_again
+
+ - name: set timezone and hwclock idempotency
+ assert:
+ that:
+ - not tzclock_set_again.changed
+
+ - name: set timezone to Europe/Belgrade and hwclock to UTC again (checkmode)
+ timezone:
+ name: Europe/Belgrade
+ hwclock: UTC
+ register: tzclock_set_again_checkmode
+
+ - name: set timezone and hwclock idempotency in checkmode
+ assert:
+ that:
+ - not tzclock_set_again_checkmode.changed
+
+ when:
+ - ansible_system == 'Linux'
+ - hwclock_supported
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ufw/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ufw/aliases
new file mode 100644
index 00000000..7ab34d8e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ufw/aliases
@@ -0,0 +1,12 @@
+shippable/posix/group2
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
+skip/rhel8.0
+skip/rhel8.0b
+skip/rhel8.1b
+skip/docker
+needs/root
+destructive
+needs/target/setup_epel
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ufw/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ufw/meta/main.yml
new file mode 100644
index 00000000..5438ced5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ufw/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_pkg_mgr
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ufw/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ufw/tasks/main.yml
new file mode 100644
index 00000000..3feb480c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ufw/tasks/main.yml
@@ -0,0 +1,39 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Make sure ufw is installed
+- name: Install EPEL repository (RHEL only)
+ include_role:
+ name: setup_epel
+ when: ansible_distribution == 'RedHat'
+- name: Install iptables (SuSE only)
+ package:
+ name: iptables
+ become: yes
+ when: ansible_os_family == 'Suse'
+- name: Install ufw
+ become: yes
+ package:
+ name: ufw
+
+# Run the tests
+- block:
+ - include_tasks: run-test.yml
+ with_fileglob:
+ - "tests/*.yml"
+ become: yes
+
+ # Cleanup
+ always:
+ - pause:
+ # ufw creates backups of the rule files with a timestamp; if reset is called
+ # twice in a row fast enough (so that both timestamps are taken in the same second),
+ # the second call will notice that the backup files are already there and fail.
+ # Waiting one second fixes this problem.
+ seconds: 1
+ - name: Reset ufw to factory defaults and disable
+ ufw:
+ state: reset
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ufw/tasks/run-test.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ufw/tasks/run-test.yml
new file mode 100644
index 00000000..e9c5d292
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ufw/tasks/run-test.yml
@@ -0,0 +1,21 @@
+---
+- pause:
+ # ufw creates backups of the rule files with a timestamp; if reset is called
+ # twice in a row fast enough (so that both timestamps are taken in the same second),
+ # the second call will notice that the backup files are already there and fail.
+ # Waiting one second fixes this problem.
+ seconds: 1
+- name: Reset ufw to factory defaults
+ ufw:
+ state: reset
+- name: Disable ufw
+ ufw:
+ # Some versions of ufw have a bug which won't disable on reset.
+ # That's why we explicitly deactivate here. See
+ # https://bugs.launchpad.net/ufw/+bug/1810082
+ state: disabled
+- name: "Loading tasks from {{ item }}"
+ include_tasks: "{{ item }}"
+- name: Reset to factory defaults
+ ufw:
+ state: reset
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ufw/tasks/tests/basic.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ufw/tasks/tests/basic.yml
new file mode 100644
index 00000000..3c625112
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ufw/tasks/tests/basic.yml
@@ -0,0 +1,402 @@
+---
+# ############################################
+- name: Make sure it is off
+ ufw:
+ state: disabled
+- name: Enable (check mode)
+ ufw:
+ state: enabled
+ check_mode: yes
+ register: enable_check
+- name: Enable
+ ufw:
+ state: enabled
+ register: enable
+- name: Enable (idempotency)
+ ufw:
+ state: enabled
+ register: enable_idem
+- name: Enable (idempotency, check mode)
+ ufw:
+ state: enabled
+ check_mode: yes
+ register: enable_idem_check
+- assert:
+ that:
+ - enable_check is changed
+ - enable is changed
+ - enable_idem is not changed
+ - enable_idem_check is not changed
+
+# ############################################
+- name: ipv4 allow (check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ check_mode: yes
+ register: ipv4_allow_check
+- name: ipv4 allow
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ register: ipv4_allow
+- name: ipv4 allow (idempotency)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ register: ipv4_allow_idem
+- name: ipv4 allow (idempotency, check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ check_mode: yes
+ register: ipv4_allow_idem_check
+- assert:
+ that:
+ - ipv4_allow_check is changed
+ - ipv4_allow is changed
+ - ipv4_allow_idem is not changed
+ - ipv4_allow_idem_check is not changed
+
+# ############################################
+- name: delete ipv4 allow (check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ delete: yes
+ check_mode: yes
+ register: delete_ipv4_allow_check
+- name: delete ipv4 allow
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ delete: yes
+ register: delete_ipv4_allow
+- name: delete ipv4 allow (idempotency)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ delete: yes
+ register: delete_ipv4_allow_idem
+- name: delete ipv4 allow (idempotency, check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ delete: yes
+ check_mode: yes
+ register: delete_ipv4_allow_idem_check
+- assert:
+ that:
+ - delete_ipv4_allow_check is changed
+ - delete_ipv4_allow is changed
+ - delete_ipv4_allow_idem is not changed
+ - delete_ipv4_allow_idem_check is not changed
+
+# ############################################
+- name: ipv6 allow (check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ check_mode: yes
+ register: ipv6_allow_check
+- name: ipv6 allow
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ register: ipv6_allow
+- name: ipv6 allow (idempotency)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ register: ipv6_allow_idem
+- name: ipv6 allow (idempotency, check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ check_mode: yes
+ register: ipv6_allow_idem_check
+- assert:
+ that:
+ - ipv6_allow_check is changed
+ - ipv6_allow is changed
+ - ipv6_allow_idem is not changed
+ - ipv6_allow_idem_check is not changed
+
+# ############################################
+- name: delete ipv6 allow (check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ delete: yes
+ check_mode: yes
+ register: delete_ipv6_allow_check
+- name: delete ipv6 allow
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ delete: yes
+ register: delete_ipv6_allow
+- name: delete ipv6 allow (idempotency)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ delete: yes
+ register: delete_ipv6_allow_idem
+- name: delete ipv6 allow (idempotency, check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ delete: yes
+ check_mode: yes
+ register: delete_ipv6_allow_idem_check
+- assert:
+ that:
+ - delete_ipv6_allow_check is changed
+ - delete_ipv6_allow is changed
+ - delete_ipv6_allow_idem is not changed
+ - delete_ipv6_allow_idem_check is not changed
+
+
+# ############################################
+- name: ipv4 allow (check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ check_mode: yes
+ register: ipv4_allow_check
+- name: ipv4 allow
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ register: ipv4_allow
+- name: ipv4 allow (idempotency)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ register: ipv4_allow_idem
+- name: ipv4 allow (idempotency, check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ check_mode: yes
+ register: ipv4_allow_idem_check
+- assert:
+ that:
+ - ipv4_allow_check is changed
+ - ipv4_allow is changed
+ - ipv4_allow_idem is not changed
+ - ipv4_allow_idem_check is not changed
+
+# ############################################
+- name: delete ipv4 allow (check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ delete: yes
+ check_mode: yes
+ register: delete_ipv4_allow_check
+- name: delete ipv4 allow
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ delete: yes
+ register: delete_ipv4_allow
+- name: delete ipv4 allow (idempotency)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ delete: yes
+ register: delete_ipv4_allow_idem
+- name: delete ipv4 allow (idempotency, check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ delete: yes
+ check_mode: yes
+ register: delete_ipv4_allow_idem_check
+- assert:
+ that:
+ - delete_ipv4_allow_check is changed
+ - delete_ipv4_allow is changed
+ - delete_ipv4_allow_idem is not changed
+ - delete_ipv4_allow_idem_check is not changed
+
+# ############################################
+- name: ipv6 allow (check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ check_mode: yes
+ register: ipv6_allow_check
+- name: ipv6 allow
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ register: ipv6_allow
+- name: ipv6 allow (idempotency)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ register: ipv6_allow_idem
+- name: ipv6 allow (idempotency, check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ check_mode: yes
+ register: ipv6_allow_idem_check
+- assert:
+ that:
+ - ipv6_allow_check is changed
+ - ipv6_allow is changed
+ - ipv6_allow_idem is not changed
+ - ipv6_allow_idem_check is not changed
+
+# ############################################
+- name: delete ipv6 allow (check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ delete: yes
+ check_mode: yes
+ register: delete_ipv6_allow_check
+- name: delete ipv6 allow
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ delete: yes
+ register: delete_ipv6_allow
+- name: delete ipv6 allow (idempotency)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ delete: yes
+ register: delete_ipv6_allow_idem
+- name: delete ipv6 allow (idempotency, check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ delete: yes
+ check_mode: yes
+ register: delete_ipv6_allow_idem_check
+- assert:
+ that:
+ - delete_ipv6_allow_check is changed
+ - delete_ipv6_allow is changed
+ - delete_ipv6_allow_idem is not changed
+ - delete_ipv6_allow_idem_check is not changed
+
+# ############################################
+- name: Reload ufw
+ ufw:
+ state: reloaded
+ register: reload
+- name: Reload ufw (check mode)
+ ufw:
+ state: reloaded
+ check_mode: yes
+ register: reload_check
+- assert:
+ that:
+ - reload is changed
+ - reload_check is changed
+
+# ############################################
+- name: Disable (check mode)
+ ufw:
+ state: disabled
+ check_mode: yes
+ register: disable_check
+- name: Disable
+ ufw:
+ state: disabled
+ register: disable
+- name: Disable (idempotency)
+ ufw:
+ state: disabled
+ register: disable_idem
+- name: Disable (idempotency, check mode)
+ ufw:
+ state: disabled
+ check_mode: yes
+ register: disable_idem_check
+- assert:
+ that:
+ - disable_check is changed
+ - disable is changed
+ - disable_idem is not changed
+ - disable_idem_check is not changed
+
+# ############################################
+- name: Re-enable
+ ufw:
+ state: enabled
+- name: Reset (check mode)
+ ufw:
+ state: reset
+ check_mode: yes
+ register: reset_check
+- pause:
+ # Should not be needed, but since ufw is ignoring --dry-run for reset
+ # (https://bugs.launchpad.net/ufw/+bug/1810082) we have to wait here as well.
+ seconds: 1
+- name: Reset
+ ufw:
+ state: reset
+ register: reset
+- pause:
+ # ufw creates backups of the rule files with a timestamp; if reset is called
+ # twice in a row fast enough (so that both timestamps are taken in the same second),
+ # the second call will notice that the backup files are already there and fail.
+ # Waiting one second fixes this problem.
+ seconds: 1
+- name: Reset (idempotency)
+ ufw:
+ state: reset
+ register: reset_idem
+- pause:
+ # Should not be needed, but since ufw is ignoring --dry-run for reset
+ # (https://bugs.launchpad.net/ufw/+bug/1810082) we have to wait here as well.
+ seconds: 1
+- name: Reset (idempotency, check mode)
+ ufw:
+ state: reset
+ check_mode: yes
+ register: reset_idem_check
+- assert:
+ that:
+ - reset_check is changed
+ - reset is changed
+ - reset_idem is changed
+ - reset_idem_check is changed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ufw/tasks/tests/global-state.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ufw/tasks/tests/global-state.yml
new file mode 100644
index 00000000..69b2cde9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ufw/tasks/tests/global-state.yml
@@ -0,0 +1,150 @@
+---
+- name: Enable ufw
+ ufw:
+ state: enabled
+
+# ############################################
+- name: Make sure logging is off
+ ufw:
+ logging: no
+- name: Logging (check mode)
+ ufw:
+ logging: yes
+ check_mode: yes
+ register: logging_check
+- name: Logging
+ ufw:
+ logging: yes
+ register: logging
+- name: Get logging
+ shell: |
+ ufw status verbose | grep "^Logging:"
+ register: ufw_logging
+ environment:
+ LC_ALL: C
+- name: Logging (idempotency)
+ ufw:
+ logging: yes
+ register: logging_idem
+- name: Logging (idempotency, check mode)
+ ufw:
+ logging: yes
+ check_mode: yes
+ register: logging_idem_check
+- name: Logging (change, check mode)
+ ufw:
+ logging: full
+ check_mode: yes
+ register: logging_change_check
+- name: Logging (change)
+ ufw:
+ logging: full
+ register: logging_change
+- name: Get logging
+ shell: |
+ ufw status verbose | grep "^Logging:"
+ register: ufw_logging_change
+ environment:
+ LC_ALL: C
+- assert:
+ that:
+ - logging_check is changed
+ - logging is changed
+ - "ufw_logging.stdout == 'Logging: on (low)'"
+ - logging_idem is not changed
+ - logging_idem_check is not changed
+ - "ufw_logging_change.stdout == 'Logging: on (full)'"
+ - logging_change is changed
+ - logging_change_check is changed
+
+# ############################################
+- name: Default (check mode)
+ ufw:
+ default: reject
+ direction: incoming
+ check_mode: yes
+ register: default_check
+- name: Default
+ ufw:
+ default: reject
+ direction: incoming
+ register: default
+- name: Get defaults
+ shell: |
+ ufw status verbose | grep "^Default:"
+ register: ufw_defaults
+ environment:
+ LC_ALL: C
+- name: Default (idempotency)
+ ufw:
+ default: reject
+ direction: incoming
+ register: default_idem
+- name: Default (idempotency, check mode)
+ ufw:
+ default: reject
+ direction: incoming
+ check_mode: yes
+ register: default_idem_check
+- name: Default (change, check mode)
+ ufw:
+ default: allow
+ direction: incoming
+ check_mode: yes
+ register: default_change_check
+- name: Default (change)
+ ufw:
+ default: allow
+ direction: incoming
+ register: default_change
+- name: Get defaults
+ shell: |
+ ufw status verbose | grep "^Default:"
+ register: ufw_defaults_change
+ environment:
+ LC_ALL: C
+- name: Default (change again)
+ ufw:
+ default: deny
+ direction: incoming
+ register: default_change_2
+- name: Default (change incoming implicitly, check mode)
+ ufw:
+ default: allow
+ check_mode: yes
+ register: default_change_implicit_check
+- name: Default (change incoming implicitly)
+ ufw:
+ default: allow
+ register: default_change_implicit
+- name: Get defaults
+ shell: |
+ ufw status verbose | grep "^Default:"
+ register: ufw_defaults_change_implicit
+ environment:
+ LC_ALL: C
+- name: Default (change incoming implicitly, idempotent, check mode)
+ ufw:
+ default: allow
+ check_mode: yes
+ register: default_change_implicit_idem_check
+- name: Default (change incoming implicitly, idempotent)
+ ufw:
+ default: allow
+ register: default_change_implicit_idem
+- assert:
+ that:
+ - default_check is changed
+ - default is changed
+ - "'reject (incoming)' in ufw_defaults.stdout"
+ - default_idem is not changed
+ - default_idem_check is not changed
+ - default_change_check is changed
+ - default_change is changed
+ - "'allow (incoming)' in ufw_defaults_change.stdout"
+ - default_change_2 is changed
+ - default_change_implicit_check is changed
+ - default_change_implicit is changed
+ - default_change_implicit_idem_check is not changed
+ - default_change_implicit_idem is not changed
+ - "'allow (incoming)' in ufw_defaults_change_implicit.stdout"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ufw/tasks/tests/insert_relative_to.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ufw/tasks/tests/insert_relative_to.yml
new file mode 100644
index 00000000..3bb44a0e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ufw/tasks/tests/insert_relative_to.yml
@@ -0,0 +1,80 @@
+---
+- name: Enable
+ ufw:
+ state: enabled
+ register: enable
+
+# ## CREATE RULES ############################
+- name: ipv4
+ ufw:
+ rule: deny
+ port: 22
+ to_ip: 0.0.0.0
+- name: ipv4
+ ufw:
+ rule: deny
+ port: 23
+ to_ip: 0.0.0.0
+
+- name: ipv6
+ ufw:
+ rule: deny
+ port: 122
+ to_ip: "::"
+- name: ipv6
+ ufw:
+ rule: deny
+ port: 123
+ to_ip: "::"
+
+- name: first-ipv4
+ ufw:
+ rule: deny
+ port: 10
+ to_ip: 0.0.0.0
+ insert: 0
+ insert_relative_to: first-ipv4
+- name: last-ipv4
+ ufw:
+ rule: deny
+ port: 11
+ to_ip: 0.0.0.0
+ insert: 0
+ insert_relative_to: last-ipv4
+
+- name: first-ipv6
+ ufw:
+ rule: deny
+ port: 110
+ to_ip: "::"
+ insert: 0
+ insert_relative_to: first-ipv6
+- name: last-ipv6
+ ufw:
+ rule: deny
+ port: 111
+ to_ip: "::"
+ insert: 0
+ insert_relative_to: last-ipv6
+
+# ## CHECK RESULT ############################
+- name: Get rules
+ shell: |
+ ufw status | grep DENY | cut -f 1-2 -d ' ' | grep -E "^(0\.0\.0\.0|::) [123]+"
+ # Note that there was also a rule "ff02::fb mDNS" on at least one CI run;
+ # to ignore these, the extra filtering (grepping for DENY and the regex) makes
+ # sure to remove all rules not added here.
+ register: ufw_status
+- assert:
+ that:
+ - ufw_status.stdout_lines == expected_stdout
+ vars:
+ expected_stdout:
+ - "0.0.0.0 10"
+ - "0.0.0.0 22"
+ - "0.0.0.0 11"
+ - "0.0.0.0 23"
+ - ":: 110"
+ - ":: 122"
+ - ":: 111"
+ - ":: 123"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ufw/tasks/tests/interface.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ufw/tasks/tests/interface.yml
new file mode 100644
index 00000000..776a72f8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/ufw/tasks/tests/interface.yml
@@ -0,0 +1,81 @@
+- name: Enable
+ ufw:
+ state: enabled
+
+- name: Route with interface in and out
+ ufw:
+ rule: allow
+ route: yes
+ interface_in: foo
+ interface_out: bar
+ proto: tcp
+ from_ip: 1.1.1.1
+ to_ip: 8.8.8.8
+ from_port: 1111
+ to_port: 2222
+
+- name: Route with interface in
+ ufw:
+ rule: allow
+ route: yes
+ interface_in: foo
+ proto: tcp
+ from_ip: 1.1.1.1
+ from_port: 1111
+
+- name: Route with interface out
+ ufw:
+ rule: allow
+ route: yes
+ interface_out: bar
+ proto: tcp
+ from_ip: 1.1.1.1
+ from_port: 1111
+
+- name: Non-route with interface in
+ ufw:
+ rule: allow
+ interface_in: foo
+ proto: tcp
+ from_ip: 1.1.1.1
+ from_port: 3333
+
+- name: Non-route with interface out
+ ufw:
+ rule: allow
+ interface_out: bar
+ proto: tcp
+ from_ip: 1.1.1.1
+ from_port: 4444
+
+- name: Check result
+ shell: ufw status |grep -E '(ALLOW|DENY|REJECT|LIMIT)' |sed -E 's/[ \t]+/ /g'
+ register: ufw_status
+
+- assert:
+ that:
+ - '"8.8.8.8 2222/tcp on bar ALLOW FWD 1.1.1.1 1111/tcp on foo " in stdout'
+ - '"Anywhere ALLOW FWD 1.1.1.1 1111/tcp on foo " in stdout'
+ - '"Anywhere on bar ALLOW FWD 1.1.1.1 1111/tcp " in stdout'
+ - '"Anywhere on foo ALLOW 1.1.1.1 3333/tcp " in stdout'
+ - '"Anywhere ALLOW OUT 1.1.1.1 4444/tcp on bar " in stdout'
+ vars:
+ stdout: '{{ ufw_status.stdout_lines }}'
+
+- name: Non-route with interface_in and interface_out
+ ufw:
+ rule: allow
+ interface_in: foo
+ interface_out: bar
+ proto: tcp
+ from_ip: 1.1.1.1
+ from_port: 1111
+ to_ip: 8.8.8.8
+ to_port: 2222
+ ignore_errors: yes
+ register: ufw_non_route_iface
+
+- assert:
+ that:
+ - ufw_non_route_iface is failed
+ - '"Only route rules" in ufw_non_route_iface.msg'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/wakeonlan/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/wakeonlan/aliases
new file mode 100644
index 00000000..ed821c27
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/wakeonlan/aliases
@@ -0,0 +1,2 @@
+shippable/posix/group2
+skip/aix
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/wakeonlan/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/wakeonlan/tasks/main.yml
new file mode 100644
index 00000000..166bef99
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/wakeonlan/tasks/main.yml
@@ -0,0 +1,53 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Send a magic Wake-on-LAN packet to 00:00:5E:00:53:66
+ wakeonlan:
+ mac: 00:00:5E:00:53:66
+ broadcast: 192.0.2.255
+
+- name: Send a magic Wake-on-LAN packet on port 9 to 00-00-5E-00-53-66
+ wakeonlan:
+ mac: 00-00-5E-00-53-66
+ port: 9
+
+- name: Provide an incorrect MAC length
+ wakeonlan:
+ mac: 00-00-5E-00-53-66-AB
+ port: 9
+ ignore_errors: yes
+ register: incorrect_mac_length
+
+- name: Check error message
+ assert:
+ that:
+ - incorrect_mac_length is failed
+ - incorrect_mac_length.msg is search('Incorrect MAC address length')
+
+- name: Provide an incorrect MAC format
+ wakeonlan:
+ mac: ZW-YX-WV-UT-SR-QP
+ port: 9
+ ignore_errors: yes
+ register: incorrect_mac_format
+
+- name: Check error message
+ assert:
+ that:
+ - incorrect_mac_format is failed
+ - incorrect_mac_format.msg is search('Incorrect MAC address format')
+
+- name: Cause a socket error
+ wakeonlan:
+ mac: 00-00-5E-00-53-66
+ broadcast: 345.567.678.890
+ ignore_errors: yes
+ register: incorrect_broadcast_address
+
+- name: Check error message
+ assert:
+ that:
+ - incorrect_broadcast_address is failed
+ - incorrect_broadcast_address.msg is search('not known|Name does not resolve')
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xattr/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xattr/aliases
new file mode 100644
index 00000000..48e54c3f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xattr/aliases
@@ -0,0 +1,7 @@
+shippable/posix/group2
+skip/aix
+skip/docker
+skip/freebsd
+skip/osx
+skip/macos
+destructive
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xattr/defaults/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xattr/defaults/main.yml
new file mode 100644
index 00000000..af18fb84
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xattr/defaults/main.yml
@@ -0,0 +1 @@
+test_file: "{{ output_dir }}/foo.txt"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xattr/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xattr/meta/main.yml
new file mode 100644
index 00000000..5438ced5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xattr/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_pkg_mgr
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xattr/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xattr/tasks/main.yml
new file mode 100644
index 00000000..989d19ea
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xattr/tasks/main.yml
@@ -0,0 +1,16 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Setup
+ include: setup.yml
+
+- name: Check availability of xattr support
+ command: setfattr -n user.foo {{ test_file }}
+ ignore_errors: yes
+ register: xattr
+
+- name: Test
+ include: test.yml
+ when: xattr is not failed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xattr/tasks/setup.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xattr/tasks/setup.yml
new file mode 100644
index 00000000..674e250a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xattr/tasks/setup.yml
@@ -0,0 +1,9 @@
+- name: Install
+ package:
+ name: attr
+ state: present
+
+- name: Create file
+ file:
+ path: "{{ test_file }}"
+ state: touch \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xattr/tasks/test.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xattr/tasks/test.yml
new file mode 100644
index 00000000..61643268
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xattr/tasks/test.yml
@@ -0,0 +1,67 @@
+- name: Set attributes
+ xattr:
+ path: "{{ test_file }}"
+ key: user.foo
+ value: bar
+ register: xattr_set_result
+
+- name: Get attributes
+ xattr:
+ path: "{{ test_file }}"
+ register: xattr_get_all_result
+
+- name: Get specific attribute
+ xattr:
+ path: "{{ test_file }}"
+ key: foo
+ register: xattr_get_specific_result
+
+- assert:
+ that:
+ - "xattr_set_result.changed"
+ - "xattr_get_all_result['xattr']['user.foo'] == 'bar'"
+ - "not xattr_get_all_result.changed"
+ - "xattr_get_specific_result['xattr']['user.foo'] == 'bar'"
+ - "not xattr_get_specific_result.changed"
+
+- name: Set attribute again
+ xattr:
+ path: "{{ test_file }}"
+ namespace: user
+ key: foo
+ value: bar
+ register: xattr_set_again_result
+
+- assert:
+ that:
+ - "not xattr_set_again_result.changed"
+
+- name: Unset attribute
+ xattr:
+ path: "{{ test_file }}"
+ key: foo
+ state: absent
+ register: xattr_unset_result
+
+- name: Get attributes
+ xattr:
+ path: "{{ test_file }}"
+ register: xattr_get_after_unset_result
+
+- assert:
+ that:
+ - "xattr_unset_result.changed"
+ - "xattr_get_after_unset_result['xattr'] == {}"
+ - "not xattr_get_after_unset_result.changed"
+
+- name: Unset attribute again
+ xattr:
+ path: "{{ test_file }}"
+ namespace: user
+ key: foo
+ state: absent
+ register: xattr_unset_result
+
+- assert:
+ that:
+ - "not xattr_set_again_result.changed"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xfs_quota/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xfs_quota/aliases
new file mode 100644
index 00000000..8781d1ef
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xfs_quota/aliases
@@ -0,0 +1,7 @@
+needs/privileged
+needs/root
+shippable/posix/group1
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xfs_quota/defaults/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xfs_quota/defaults/main.yml
new file mode 100644
index 00000000..ff030ce7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xfs_quota/defaults/main.yml
@@ -0,0 +1,42 @@
+---
+uquota_default_bsoft: 1m
+uquota_default_bhard: 2m
+uquota_default_isoft: 100
+uquota_default_ihard: 200
+uquota_default_rtbsoft: 1m
+uquota_default_rtbhard: 2m
+
+uquota_user_bsoft: 2m
+uquota_user_bhard: 3m
+uquota_user_isoft: 300
+uquota_user_ihard: 400
+uquota_user_rtbsoft: 3m
+uquota_user_rtbhard: 4m
+
+gquota_default_bsoft: 1m
+gquota_default_bhard: 2m
+gquota_default_isoft: 100
+gquota_default_ihard: 200
+gquota_default_rtbsoft: 1m
+gquota_default_rtbhard: 2m
+
+gquota_group_bsoft: 2m
+gquota_group_bhard: 3m
+gquota_group_isoft: 300
+gquota_group_ihard: 400
+gquota_group_rtbsoft: 3m
+gquota_group_rtbhard: 4m
+
+pquota_default_bsoft: 1m
+pquota_default_bhard: 2m
+pquota_default_isoft: 100
+pquota_default_ihard: 200
+pquota_default_rtbsoft: 1m
+pquota_default_rtbhard: 2m
+
+pquota_project_bsoft: 2m
+pquota_project_bhard: 3m
+pquota_project_isoft: 300
+pquota_project_ihard: 400
+pquota_project_rtbsoft: 3m
+pquota_project_rtbhard: 4m
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xfs_quota/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xfs_quota/meta/main.yml
new file mode 100644
index 00000000..1810d4be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xfs_quota/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_remote_tmp_dir
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xfs_quota/tasks/gquota.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xfs_quota/tasks/gquota.yml
new file mode 100644
index 00000000..daa50d95
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xfs_quota/tasks/gquota.yml
@@ -0,0 +1,142 @@
+- name: Create disk image
+ command: 'dd if=/dev/zero of={{ remote_tmp_dir }}/img-gquota bs=1M count=20
+
+ '
+- name: Create XFS filesystem
+ filesystem:
+ dev: '{{ remote_tmp_dir }}/img-gquota'
+ fstype: xfs
+- block:
+ - name: Mount filesystem
+ become: true
+ ansible.posix.mount:
+ fstab: '{{ remote_tmp_dir }}/fstab'
+ src: '{{ remote_tmp_dir }}/img-gquota'
+ path: '{{ remote_tmp_dir }}/gquota'
+ fstype: xfs
+ opts: gquota
+ state: mounted
+ - name: Apply default group limits
+ xfs_quota:
+ bsoft: '{{ gquota_default_bsoft }}'
+ bhard: '{{ gquota_default_bhard }}'
+ isoft: '{{ gquota_default_isoft }}'
+ ihard: '{{ gquota_default_ihard }}'
+ mountpoint: '{{ remote_tmp_dir }}/gquota'
+ rtbsoft: '{{ gquota_default_rtbsoft }}'
+ rtbhard: '{{ gquota_default_rtbhard }}'
+ type: group
+ become: true
+ register: test_gquota_default_before
+ - name: Assert default group limits results
+ assert:
+ that:
+ - test_gquota_default_before.changed
+ - test_gquota_default_before.bsoft == gquota_default_bsoft|human_to_bytes
+ - test_gquota_default_before.bhard == gquota_default_bhard|human_to_bytes
+ - test_gquota_default_before.isoft == gquota_default_isoft
+ - test_gquota_default_before.ihard == gquota_default_ihard
+ - test_gquota_default_before.rtbsoft == gquota_default_rtbsoft|human_to_bytes
+ - test_gquota_default_before.rtbhard == gquota_default_rtbhard|human_to_bytes
+ - name: Apply group limits
+ xfs_quota:
+ bsoft: '{{ gquota_group_bsoft }}'
+ bhard: '{{ gquota_group_bhard }}'
+ isoft: '{{ gquota_group_isoft }}'
+ ihard: '{{ gquota_group_ihard }}'
+ mountpoint: '{{ remote_tmp_dir }}/gquota'
+ name: xfsquotauser
+ rtbsoft: '{{ gquota_group_rtbsoft }}'
+ rtbhard: '{{ gquota_group_rtbhard }}'
+ type: group
+ become: true
+ register: test_gquota_group_before
+ - name: Assert group limits results for xfsquotauser
+ assert:
+ that:
+ - test_gquota_group_before.changed
+ - test_gquota_group_before.bsoft == gquota_group_bsoft|human_to_bytes
+ - test_gquota_group_before.bhard == gquota_group_bhard|human_to_bytes
+ - test_gquota_group_before.isoft == gquota_group_isoft
+ - test_gquota_group_before.ihard == gquota_group_ihard
+ - test_gquota_group_before.rtbsoft == gquota_group_rtbsoft|human_to_bytes
+ - test_gquota_group_before.rtbhard == gquota_group_rtbhard|human_to_bytes
+ - name: Re-apply default group limits
+ xfs_quota:
+ bsoft: '{{ gquota_default_bsoft }}'
+ bhard: '{{ gquota_default_bhard }}'
+ isoft: '{{ gquota_default_isoft }}'
+ ihard: '{{ gquota_default_ihard }}'
+ mountpoint: '{{ remote_tmp_dir }}/gquota'
+ rtbsoft: '{{ gquota_default_rtbsoft }}'
+ rtbhard: '{{ gquota_default_rtbhard }}'
+ type: group
+ become: true
+ register: test_gquota_default_after
+ - name: Assert default group limits results after re-apply
+ assert:
+ that:
+ - not test_gquota_default_after.changed
+ - name: Re-apply group limits
+ xfs_quota:
+ bsoft: '{{ gquota_group_bsoft }}'
+ bhard: '{{ gquota_group_bhard }}'
+ isoft: '{{ gquota_group_isoft }}'
+ ihard: '{{ gquota_group_ihard }}'
+ mountpoint: '{{ remote_tmp_dir }}/gquota'
+ name: xfsquotauser
+ rtbsoft: '{{ gquota_group_rtbsoft }}'
+ rtbhard: '{{ gquota_group_rtbhard }}'
+ type: group
+ become: true
+ register: test_gquota_group_after
+ - name: Assert group limits results for xfsquotauser after re-apply
+ assert:
+ that:
+ - not test_gquota_group_after.changed
+ - name: Reset default group limits
+ xfs_quota:
+ mountpoint: '{{ remote_tmp_dir }}/gquota'
+ state: absent
+ type: group
+ become: true
+ register: test_reset_gquota_default
+ - name: Assert reset of default group limits results
+ assert:
+ that:
+ - test_reset_gquota_default.changed
+ - test_reset_gquota_default.bsoft == 0
+ - test_reset_gquota_default.bhard == 0
+ - test_reset_gquota_default.isoft == 0
+ - test_reset_gquota_default.ihard == 0
+ - test_reset_gquota_default.rtbsoft == 0
+ - test_reset_gquota_default.rtbhard == 0
+ - name: Reset group limits for xfsquotauser
+ xfs_quota:
+ mountpoint: '{{ remote_tmp_dir }}/gquota'
+ name: xfsquotauser
+ state: absent
+ type: group
+ become: true
+ register: test_reset_gquota_group
+ - name: Assert reset of default group limits results
+ assert:
+ that:
+ - test_reset_gquota_group.changed
+ - test_reset_gquota_group.bsoft == 0
+ - test_reset_gquota_group.bhard == 0
+ - test_reset_gquota_group.isoft == 0
+ - test_reset_gquota_group.ihard == 0
+ - test_reset_gquota_group.rtbsoft == 0
+ - test_reset_gquota_group.rtbhard == 0
+ always:
+ - name: Unmount filesystem
+ become: true
+ ansible.posix.mount:
+ fstab: '{{ remote_tmp_dir }}/fstab'
+ path: '{{ remote_tmp_dir }}/gquota'
+ state: unmounted
+ - name: Remove disk image
+ file:
+ path: '{{ remote_tmp_dir }}/img-gquota'
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xfs_quota/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xfs_quota/tasks/main.yml
new file mode 100644
index 00000000..0256d5cd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xfs_quota/tasks/main.yml
@@ -0,0 +1,23 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- block:
+ - name: Create test user
+ user:
+ name: xfsquotauser
+ state: present
+ become: yes
+
+ - include_tasks: uquota.yml
+ - include_tasks: gquota.yml
+ - include_tasks: pquota.yml
+
+ always:
+ - name: cleanup test user
+ user:
+ name: xfsquotauser
+ state: absent
+ become: yes
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xfs_quota/tasks/pquota.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xfs_quota/tasks/pquota.yml
new file mode 100644
index 00000000..5d89ba44
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xfs_quota/tasks/pquota.yml
@@ -0,0 +1,179 @@
+- name: Create disk image
+ command: 'dd if=/dev/zero of={{ remote_tmp_dir }}/img-pquota bs=1M count=20
+
+ '
+- name: Create XFS filesystem
+ filesystem:
+ dev: '{{ remote_tmp_dir }}/img-pquota'
+ fstype: xfs
+- name: Create xfs related files
+ file:
+ path: /etc/{{ item }}
+ state: touch
+ become: true
+ loop:
+ - projid
+ - projects
+- name: Add test xfs quota project id
+ lineinfile:
+ path: /etc/projid
+ line: xft_quotaval:99999
+ state: present
+ become: true
+- name: Add test xfs quota project path
+ lineinfile:
+ path: /etc/projects
+ line: 99999:{{ remote_tmp_dir }}/pquota/test
+ state: present
+ become: true
+- block:
+ - name: Mount filesystem
+ become: true
+ ansible.posix.mount:
+ fstab: '{{ remote_tmp_dir }}/fstab'
+ src: '{{ remote_tmp_dir }}/img-pquota'
+ path: '{{ remote_tmp_dir }}/pquota'
+ fstype: xfs
+ opts: pquota
+ state: mounted
+ - name: Create test directory
+ file:
+ path: '{{ remote_tmp_dir }}/pquota/test'
+ state: directory
+ become: true
+ - name: Apply default project limits
+ xfs_quota:
+ bsoft: '{{ pquota_default_bsoft }}'
+ bhard: '{{ pquota_default_bhard }}'
+ isoft: '{{ pquota_default_isoft }}'
+ ihard: '{{ pquota_default_ihard }}'
+ mountpoint: '{{ remote_tmp_dir }}/pquota'
+ rtbsoft: '{{ pquota_default_rtbsoft }}'
+ rtbhard: '{{ pquota_default_rtbhard }}'
+ type: project
+ become: true
+ register: test_pquota_default_before
+ - name: Assert default project limits results
+ assert:
+ that:
+ - test_pquota_default_before.changed
+ - test_pquota_default_before.bsoft == pquota_default_bsoft|human_to_bytes
+ - test_pquota_default_before.bhard == pquota_default_bhard|human_to_bytes
+ - test_pquota_default_before.isoft == pquota_default_isoft
+ - test_pquota_default_before.ihard == pquota_default_ihard
+ - test_pquota_default_before.rtbsoft == pquota_default_rtbsoft|human_to_bytes
+ - test_pquota_default_before.rtbhard == pquota_default_rtbhard|human_to_bytes
+ - name: Apply project limits
+ xfs_quota:
+ bsoft: '{{ pquota_project_bsoft }}'
+ bhard: '{{ pquota_project_bhard }}'
+ isoft: '{{ pquota_project_isoft }}'
+ ihard: '{{ pquota_project_ihard }}'
+ mountpoint: '{{ remote_tmp_dir }}/pquota'
+ name: xft_quotaval
+ rtbsoft: '{{ pquota_project_rtbsoft }}'
+ rtbhard: '{{ pquota_project_rtbhard }}'
+ type: project
+ become: true
+ register: test_pquota_project_before
+ - name: Assert project limits results for xft_quotaval
+ assert:
+ that:
+ - test_pquota_project_before.changed
+ - test_pquota_project_before.bsoft == pquota_project_bsoft|human_to_bytes
+ - test_pquota_project_before.bhard == pquota_project_bhard|human_to_bytes
+ - test_pquota_project_before.isoft == pquota_project_isoft
+ - test_pquota_project_before.ihard == pquota_project_ihard
+ - test_pquota_project_before.rtbsoft == pquota_project_rtbsoft|human_to_bytes
+ - test_pquota_project_before.rtbhard == pquota_project_rtbhard|human_to_bytes
+ - name: Re-apply default project limits
+ xfs_quota:
+ bsoft: '{{ pquota_default_bsoft }}'
+ bhard: '{{ pquota_default_bhard }}'
+ isoft: '{{ pquota_default_isoft }}'
+ ihard: '{{ pquota_default_ihard }}'
+ mountpoint: '{{ remote_tmp_dir }}/pquota'
+ rtbsoft: '{{ pquota_default_rtbsoft }}'
+ rtbhard: '{{ pquota_default_rtbhard }}'
+ type: project
+ become: true
+ register: test_pquota_default_after
+ - name: Assert default project limits results after re-apply
+ assert:
+ that:
+ - not test_pquota_default_after.changed
+ - name: Re-apply project limits
+ xfs_quota:
+ bsoft: '{{ pquota_project_bsoft }}'
+ bhard: '{{ pquota_project_bhard }}'
+ isoft: '{{ pquota_project_isoft }}'
+ ihard: '{{ pquota_project_ihard }}'
+ mountpoint: '{{ remote_tmp_dir }}/pquota'
+ name: xft_quotaval
+ rtbsoft: '{{ pquota_project_rtbsoft }}'
+ rtbhard: '{{ pquota_project_rtbhard }}'
+ type: project
+ become: true
+ register: test_pquota_project_after
+ - name: Assert project limits results for xft_quotaval after re-apply
+ assert:
+ that:
+ - not test_pquota_project_after.changed
+ - name: Reset default project limits
+ xfs_quota:
+ mountpoint: '{{ remote_tmp_dir }}/pquota'
+ state: absent
+ type: project
+ become: true
+ register: test_reset_pquota_default
+ - name: Assert reset of default projecy limits results
+ assert:
+ that:
+ - test_reset_pquota_default.changed
+ - test_reset_pquota_default.bsoft == 0
+ - test_reset_pquota_default.bhard == 0
+ - test_reset_pquota_default.isoft == 0
+ - test_reset_pquota_default.ihard == 0
+ - test_reset_pquota_default.rtbsoft == 0
+ - test_reset_pquota_default.rtbhard == 0
+ - name: Reset project limits for xft_quotaval
+ xfs_quota:
+ mountpoint: '{{ remote_tmp_dir }}/pquota'
+ name: xft_quotaval
+ state: absent
+ type: project
+ become: true
+ register: test_reset_pquota_project
+ - name: Assert reset of project limits results for xft_quotaval
+ assert:
+ that:
+ - test_reset_pquota_project.changed
+ - test_reset_pquota_project.bsoft == 0
+ - test_reset_pquota_project.bhard == 0
+ - test_reset_pquota_project.isoft == 0
+ - test_reset_pquota_project.ihard == 0
+ - test_reset_pquota_project.rtbsoft == 0
+ - test_reset_pquota_project.rtbhard == 0
+ always:
+ - name: Unmount filesystem
+ become: true
+ ansible.posix.mount:
+ fstab: '{{ remote_tmp_dir }}/fstab'
+ path: '{{ remote_tmp_dir }}/pquota'
+ state: unmounted
+ - name: Remove disk image
+ file:
+ path: '{{ remote_tmp_dir }}/img-pquota'
+ state: absent
+ - name: Remove xfs quota project id
+ lineinfile:
+ path: /etc/projid
+ regexp: ^xft_quotaval:99999$
+ state: absent
+ become: true
+ - name: Remove xfs quota project path
+ lineinfile:
+ path: /etc/projects
+ regexp: ^99999:.*$
+ state: absent
+ become: true
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xfs_quota/tasks/uquota.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xfs_quota/tasks/uquota.yml
new file mode 100644
index 00000000..d2e5bfe2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xfs_quota/tasks/uquota.yml
@@ -0,0 +1,142 @@
+- name: Create disk image
+ command: 'dd if=/dev/zero of={{ remote_tmp_dir }}/img-uquota bs=1M count=20
+
+ '
+- name: Create XFS filesystem
+ filesystem:
+ dev: '{{ remote_tmp_dir }}/img-uquota'
+ fstype: xfs
+- block:
+ - name: Mount filesystem
+ become: true
+ ansible.posix.mount:
+ fstab: '{{ remote_tmp_dir }}/fstab'
+ src: '{{ remote_tmp_dir }}/img-uquota'
+ path: '{{ remote_tmp_dir }}/uquota'
+ fstype: xfs
+ opts: uquota
+ state: mounted
+ - name: Apply default user limits
+ xfs_quota:
+ bsoft: '{{ uquota_default_bsoft }}'
+ bhard: '{{ uquota_default_bhard }}'
+ isoft: '{{ uquota_default_isoft }}'
+ ihard: '{{ uquota_default_ihard }}'
+ mountpoint: '{{ remote_tmp_dir }}/uquota'
+ rtbsoft: '{{ uquota_default_rtbsoft }}'
+ rtbhard: '{{ uquota_default_rtbhard }}'
+ type: user
+ become: true
+ register: test_uquota_default_before
+ - name: Assert default user limits results
+ assert:
+ that:
+ - test_uquota_default_before.changed
+ - test_uquota_default_before.bsoft == uquota_default_bsoft|human_to_bytes
+ - test_uquota_default_before.bhard == uquota_default_bhard|human_to_bytes
+ - test_uquota_default_before.isoft == uquota_default_isoft
+ - test_uquota_default_before.ihard == uquota_default_ihard
+ - test_uquota_default_before.rtbsoft == uquota_default_rtbsoft|human_to_bytes
+ - test_uquota_default_before.rtbhard == uquota_default_rtbhard|human_to_bytes
+ - name: Apply user limits
+ xfs_quota:
+ bsoft: '{{ uquota_user_bsoft }}'
+ bhard: '{{ uquota_user_bhard }}'
+ isoft: '{{ uquota_user_isoft }}'
+ ihard: '{{ uquota_user_ihard }}'
+ mountpoint: '{{ remote_tmp_dir }}/uquota'
+ name: xfsquotauser
+ rtbsoft: '{{ uquota_user_rtbsoft }}'
+ rtbhard: '{{ uquota_user_rtbhard }}'
+ type: user
+ become: true
+ register: test_uquota_user_before
+ - name: Assert user limits results
+ assert:
+ that:
+ - test_uquota_user_before.changed
+ - test_uquota_user_before.bsoft == uquota_user_bsoft|human_to_bytes
+ - test_uquota_user_before.bhard == uquota_user_bhard|human_to_bytes
+ - test_uquota_user_before.isoft == uquota_user_isoft
+ - test_uquota_user_before.ihard == uquota_user_ihard
+ - test_uquota_user_before.rtbsoft == uquota_user_rtbsoft|human_to_bytes
+ - test_uquota_user_before.rtbhard == uquota_user_rtbhard|human_to_bytes
+ - name: Re-apply default user limits
+ xfs_quota:
+ bsoft: '{{ uquota_default_bsoft }}'
+ bhard: '{{ uquota_default_bhard }}'
+ isoft: '{{ uquota_default_isoft }}'
+ ihard: '{{ uquota_default_ihard }}'
+ mountpoint: '{{ remote_tmp_dir }}/uquota'
+ rtbsoft: '{{ uquota_default_rtbsoft }}'
+ rtbhard: '{{ uquota_default_rtbhard }}'
+ type: user
+ become: true
+ register: test_uquota_default_after
+ - name: Assert default user limits results after re-apply
+ assert:
+ that:
+ - not test_uquota_default_after.changed
+ - name: Re-apply user limits
+ xfs_quota:
+ bsoft: '{{ uquota_user_bsoft }}'
+ bhard: '{{ uquota_user_bhard }}'
+ isoft: '{{ uquota_user_isoft }}'
+ ihard: '{{ uquota_user_ihard }}'
+ mountpoint: '{{ remote_tmp_dir }}/uquota'
+ name: xfsquotauser
+ rtbsoft: '{{ uquota_user_rtbsoft }}'
+ rtbhard: '{{ uquota_user_rtbhard }}'
+ type: user
+ become: true
+ register: test_uquota_user_after
+ - name: Assert user limits results for xfsquotauser after re-apply
+ assert:
+ that:
+ - not test_uquota_user_after.changed
+ - name: Reset default user limits
+ xfs_quota:
+ mountpoint: '{{ remote_tmp_dir }}/uquota'
+ state: absent
+ type: user
+ become: true
+ register: test_reset_uquota_default
+ - name: Assert reset of default user limits results
+ assert:
+ that:
+ - test_reset_uquota_default.changed
+ - test_reset_uquota_default.bsoft == 0
+ - test_reset_uquota_default.bhard == 0
+ - test_reset_uquota_default.isoft == 0
+ - test_reset_uquota_default.ihard == 0
+ - test_reset_uquota_default.rtbsoft == 0
+ - test_reset_uquota_default.rtbhard == 0
+ - name: Reset user limits for xfsquotauser
+ xfs_quota:
+ mountpoint: '{{ remote_tmp_dir }}/uquota'
+ name: xfsquotauser
+ state: absent
+ type: user
+ become: true
+ register: test_reset_uquota_user
+ - name: Assert reset of default user limits results
+ assert:
+ that:
+ - test_reset_uquota_user.changed
+ - test_reset_uquota_user.bsoft == 0
+ - test_reset_uquota_user.bhard == 0
+ - test_reset_uquota_user.isoft == 0
+ - test_reset_uquota_user.ihard == 0
+ - test_reset_uquota_user.rtbsoft == 0
+ - test_reset_uquota_user.rtbhard == 0
+ always:
+ - name: Unmount filesystem
+ become: true
+ ansible.posix.mount:
+ fstab: '{{ remote_tmp_dir }}/fstab'
+ path: '{{ remote_tmp_dir }}/uquota'
+ state: unmounted
+ - name: Remove disk image
+ file:
+ path: '{{ remote_tmp_dir }}/img-uquota'
+ state: absent
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/aliases
new file mode 100644
index 00000000..0725da56
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/aliases
@@ -0,0 +1,3 @@
+destructive
+shippable/posix/group3
+skip/aix
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/fixtures/ansible-xml-beers-unicode.xml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/fixtures/ansible-xml-beers-unicode.xml
new file mode 100644
index 00000000..d0e3e39a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/fixtures/ansible-xml-beers-unicode.xml
@@ -0,0 +1,13 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Толстый бар</name>
+ <beers>
+ <beer>Окское</beer>
+ <beer>Невское</beer>
+ </beers>
+ <rating subjective="да">десять</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tolstyybar.com</address>
+ </website>
+</business>
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/fixtures/ansible-xml-beers.xml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/fixtures/ansible-xml-beers.xml
new file mode 100644
index 00000000..5afc7974
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/fixtures/ansible-xml-beers.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/fixtures/ansible-xml-namespaced-beers.xml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/fixtures/ansible-xml-namespaced-beers.xml
new file mode 100644
index 00000000..61747d4b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/fixtures/ansible-xml-namespaced-beers.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business xmlns="http://test.business" xmlns:attr="http://test.attribute" type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers xmlns="http://test.beers">
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating xmlns="http://test.rating" attr:subjective="true">10</rating>
+ <website xmlns="http://test.website">
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/meta/main.yml
new file mode 100644
index 00000000..5438ced5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - setup_pkg_mgr
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-elements-unicode.xml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-elements-unicode.xml
new file mode 100644
index 00000000..525330c2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-elements-unicode.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ <beer>Окское</beer></beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-elements.xml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-elements.xml
new file mode 100644
index 00000000..f9ff2517
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-elements.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ <beer>Old Rasputin</beer></beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-from-groupvars.xml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-from-groupvars.xml
new file mode 100644
index 00000000..565ba402
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-from-groupvars.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ <beer>Natty Lite</beer><beer>Miller Lite</beer><beer>Coors Lite</beer></beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-insertafter.xml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-insertafter.xml
new file mode 100644
index 00000000..8da96336
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-insertafter.xml
@@ -0,0 +1,17 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Old Rasputin</beer>
+ <beer>Old Motor Oil</beer>
+ <beer>Old Curmudgeon</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business>
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-insertbefore.xml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-insertbefore.xml
new file mode 100644
index 00000000..c409e54b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-insertbefore.xml
@@ -0,0 +1,17 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>Old Rasputin</beer>
+ <beer>Old Motor Oil</beer>
+ <beer>Old Curmudgeon</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business>
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-with-attributes-unicode.xml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-with-attributes-unicode.xml
new file mode 100644
index 00000000..37465224
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-with-attributes-unicode.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ <beer name="Окское" type="экстра"/></beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-with-attributes.xml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-with-attributes.xml
new file mode 100644
index 00000000..5a3907f6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-with-attributes.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ <beer name="Ansible Brew" type="light"/></beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-element-implicitly.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-element-implicitly.yml
new file mode 100644
index 00000000..fa1ddfca
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-element-implicitly.yml
@@ -0,0 +1,32 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ <beer color="red">George Killian's Irish Red</beer>
+ <beer origin="CZ" color="blonde">Pilsner Urquell</beer>
+ </beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ <validxhtml validateon=""/>
+ </website>
+ <phonenumber>555-555-1234</phonenumber>
+ <owner dob="1976-04-12">
+ <name>
+ <last>Smith</last>
+ <first>John</first>
+ <middle>Q</middle>
+ </name>
+ </owner>
+ <website_bis>
+ <validxhtml validateon=""/>
+ </website_bis>
+ <testnormalelement>xml tag with no special characters</testnormalelement>
+ <test-with-dash>xml tag with dashes</test-with-dash>
+ <test-with-dash.and.dot>xml tag with dashes and dots</test-with-dash.and.dot>
+ <test-with.dash_and.dot_and-underscores>xml tag with dashes, dots and underscores</test-with.dash_and.dot_and-underscores>
+</business>
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-namespaced-children-elements.xml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-namespaced-children-elements.xml
new file mode 100644
index 00000000..3d27e8aa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-namespaced-children-elements.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business xmlns="http://test.business" xmlns:attr="http://test.attribute" type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers xmlns="http://test.beers">
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ <beer>Old Rasputin</beer></beers>
+ <rating xmlns="http://test.rating" attr:subjective="true">10</rating>
+ <website xmlns="http://test.website">
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-pretty-print-only.xml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-pretty-print-only.xml
new file mode 100644
index 00000000..f47909ac
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-pretty-print-only.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business>
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-pretty-print.xml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-pretty-print.xml
new file mode 100644
index 00000000..b5c38262
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-pretty-print.xml
@@ -0,0 +1,15 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ <beer>Old Rasputin</beer>
+ </beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business>
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-attribute.xml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-attribute.xml
new file mode 100644
index 00000000..8a621cf1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-attribute.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating>10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-element.xml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-element.xml
new file mode 100644
index 00000000..454d905c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-element.xml
@@ -0,0 +1,13 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-namespaced-attribute.xml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-namespaced-attribute.xml
new file mode 100644
index 00000000..732a0ed2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-namespaced-attribute.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business xmlns="http://test.business" xmlns:attr="http://test.attribute" type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers xmlns="http://test.beers">
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating xmlns="http://test.rating">10</rating>
+ <website xmlns="http://test.website">
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-namespaced-element.xml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-namespaced-element.xml
new file mode 100644
index 00000000..16df98e2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-namespaced-element.xml
@@ -0,0 +1,13 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business xmlns="http://test.business" xmlns:attr="http://test.attribute" type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers xmlns="http://test.beers">
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <website xmlns="http://test.website">
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-attribute-value-unicode.xml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-attribute-value-unicode.xml
new file mode 100644
index 00000000..de3bc3f6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-attribute-value-unicode.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating subjective="нет">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-attribute-value.xml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-attribute-value.xml
new file mode 100644
index 00000000..143fe7bf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-attribute-value.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating subjective="false">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-children-elements-level.xml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-children-elements-level.xml
new file mode 100644
index 00000000..0ef2b7e6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-children-elements-level.xml
@@ -0,0 +1,11 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer alcohol="0.5" name="90 Minute IPA"><Water liter="0.2" quantity="200g"/><Starch quantity="10g"/><Hops quantity="50g"/><Yeast quantity="20g"/></beer><beer alcohol="0.3" name="Harvest Pumpkin Ale"><Water liter="0.2" quantity="200g"/><Hops quantity="25g"/><Yeast quantity="20g"/></beer></beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-children-elements-unicode.xml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-children-elements-unicode.xml
new file mode 100644
index 00000000..f19d5356
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-children-elements-unicode.xml
@@ -0,0 +1,11 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Окское</beer><beer>Невское</beer></beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-children-elements.xml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-children-elements.xml
new file mode 100644
index 00000000..be313a5a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-children-elements.xml
@@ -0,0 +1,11 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>90 Minute IPA</beer><beer>Harvest Pumpkin Ale</beer></beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-element-value-empty.xml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-element-value-empty.xml
new file mode 100644
index 00000000..785beb64
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-element-value-empty.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address></address>
+ </website>
+</business> \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-element-value-unicode.xml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-element-value-unicode.xml
new file mode 100644
index 00000000..734fe6db
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-element-value-unicode.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating subjective="true">пять</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+<rating>пять</rating></business> \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-element-value.xml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-element-value.xml
new file mode 100644
index 00000000..fc97ec3b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-element-value.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating subjective="true">5</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+<rating>5</rating></business> \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-namespaced-attribute-value.xml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-namespaced-attribute-value.xml
new file mode 100644
index 00000000..44abda43
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-namespaced-attribute-value.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business xmlns="http://test.business" xmlns:attr="http://test.attribute" type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers xmlns="http://test.beers">
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating xmlns="http://test.rating" attr:subjective="false">10</rating>
+ <website xmlns="http://test.website">
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-namespaced-element-value.xml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-namespaced-element-value.xml
new file mode 100644
index 00000000..0cc8a79e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-namespaced-element-value.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business xmlns="http://test.business" xmlns:attr="http://test.attribute" type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers xmlns="http://test.beers">
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating xmlns="http://test.rating" attr:subjective="true">11</rating>
+ <website xmlns="http://test.website">
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/main.yml
new file mode 100644
index 00000000..a5c6e87a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/main.yml
@@ -0,0 +1,73 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Install lxml (FreeBSD)
+ package:
+ name: '{{ "py27-lxml" if ansible_python.version.major == 2 else "py36-lxml" }}'
+ state: present
+ when: ansible_os_family == "FreeBSD"
+
+# Needed for MacOSX !
+- name: Install lxml
+ pip:
+ name: lxml
+ state: present
+# when: ansible_os_family == "Darwin"
+
+- name: Get lxml version
+ command: "{{ ansible_python_interpreter }} -c 'from lxml import etree; print(\".\".join(str(v) for v in etree.LXML_VERSION))'"
+ register: lxml_version
+
+- name: Set lxml capabilities as variables
+ set_fact:
+ # NOTE: Some tests require predictable element attribute order,
+ # which is only guaranteed starting from lxml v3.0alpha1
+ lxml_predictable_attribute_order: '{{ lxml_version.stdout is version("3", ">=") }}'
+
+ # NOTE: The xml module requires at least lxml v2.3.0
+ lxml_xpath_attribute_result_attrname: '{{ lxml_version.stdout is version("2.3.0", ">=") }}'
+
+- name: Only run the tests when lxml v2.3.0+
+ when: lxml_xpath_attribute_result_attrname
+ block:
+
+ - include_tasks: test-add-children-elements.yml
+ - include_tasks: test-add-children-from-groupvars.yml
+ - include_tasks: test-add-children-insertafter.yml
+ - include_tasks: test-add-children-insertbefore.yml
+ - include_tasks: test-add-children-with-attributes.yml
+ - include_tasks: test-add-element-implicitly.yml
+ - include_tasks: test-count.yml
+ - include_tasks: test-mutually-exclusive-attributes.yml
+ - include_tasks: test-remove-attribute.yml
+ - include_tasks: test-remove-attribute-nochange.yml
+ - include_tasks: test-remove-element.yml
+ - include_tasks: test-remove-element-nochange.yml
+ - include_tasks: test-set-attribute-value.yml
+ - include_tasks: test-set-children-elements.yml
+ - include_tasks: test-set-children-elements-level.yml
+ - include_tasks: test-set-element-value.yml
+ - include_tasks: test-set-element-value-empty.yml
+ - include_tasks: test-pretty-print.yml
+ - include_tasks: test-pretty-print-only.yml
+ - include_tasks: test-add-namespaced-children-elements.yml
+ - include_tasks: test-remove-namespaced-attribute.yml
+ - include_tasks: test-remove-namespaced-attribute-nochange.yml
+ - include_tasks: test-set-namespaced-attribute-value.yml
+ - include_tasks: test-set-namespaced-element-value.yml
+ - include_tasks: test-set-namespaced-children-elements.yml
+ - include_tasks: test-get-element-content.yml
+ - include_tasks: test-xmlstring.yml
+ - include_tasks: test-children-elements-xml.yml
+
+ # Unicode tests
+ - include_tasks: test-add-children-elements-unicode.yml
+ - include_tasks: test-add-children-with-attributes-unicode.yml
+ - include_tasks: test-set-attribute-value-unicode.yml
+ - include_tasks: test-count-unicode.yml
+ - include_tasks: test-get-element-content.yml
+ - include_tasks: test-set-children-elements-unicode.yml
+ - include_tasks: test-set-element-value-unicode.yml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-elements-unicode.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-elements-unicode.yml
new file mode 100644
index 00000000..8ad91501
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-elements-unicode.yml
@@ -0,0 +1,29 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Add child element
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ add_children:
+ - beer: Окское
+ register: add_children_elements_unicode
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-add-children-elements-unicode.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - add_children_elements_unicode.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-add-children-elements-unicode.xml /tmp/ansible-xml-beers.xml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-elements.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-elements.yml
new file mode 100644
index 00000000..8d9b0686
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-elements.yml
@@ -0,0 +1,29 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Add child element
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ add_children:
+ - beer: Old Rasputin
+ register: add_children_elements
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-add-children-elements.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - add_children_elements.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-add-children-elements.xml /tmp/ansible-xml-beers.xml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-from-groupvars.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-from-groupvars.yml
new file mode 100644
index 00000000..e062de8d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-from-groupvars.yml
@@ -0,0 +1,28 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Add child element
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ add_children: '{{ bad_beers }}'
+ register: add_children_from_groupvars
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-add-children-from-groupvars.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - add_children_from_groupvars.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-add-children-from-groupvars.xml /tmp/ansible-xml-beers.xml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-insertafter.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-insertafter.yml
new file mode 100644
index 00000000..2d42e2d5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-insertafter.yml
@@ -0,0 +1,32 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Add child element
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: '/business/beers/beer[text()="St. Bernardus Abbot 12"]'
+ insertafter: yes
+ add_children:
+ - beer: Old Rasputin
+ - beer: Old Motor Oil
+ - beer: Old Curmudgeon
+ pretty_print: yes
+ register: add_children_insertafter
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-add-children-insertafter.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - add_children_insertafter.changed == true
+ - comparison.changed == false # identical
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-insertbefore.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-insertbefore.yml
new file mode 100644
index 00000000..8550f12c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-insertbefore.yml
@@ -0,0 +1,32 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Add child element
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: '/business/beers/beer[text()="St. Bernardus Abbot 12"]'
+ insertbefore: yes
+ add_children:
+ - beer: Old Rasputin
+ - beer: Old Motor Oil
+ - beer: Old Curmudgeon
+ pretty_print: yes
+ register: add_children_insertbefore
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-add-children-insertbefore.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - add_children_insertbefore.changed == true
+ - comparison.changed == false # identical
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-with-attributes-unicode.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-with-attributes-unicode.yml
new file mode 100644
index 00000000..d4a2329f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-with-attributes-unicode.yml
@@ -0,0 +1,31 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Add child element
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ add_children:
+ - beer:
+ name: Окское
+ type: экстра
+ register: add_children_with_attributes_unicode
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-add-children-with-attributes-unicode.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - add_children_with_attributes_unicode.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-add-children-with-attributes-unicode.xml /tmp/ansible-xml-beers.xml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-with-attributes.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-with-attributes.yml
new file mode 100644
index 00000000..91e92637
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-with-attributes.yml
@@ -0,0 +1,35 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Add child element
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ add_children:
+ - beer:
+ name: Ansible Brew
+ type: light
+ register: add_children_with_attributes
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-add-children-with-attributes.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ # NOTE: This test may fail if lxml does not support predictable element attribute order
+ # So we filter the failure out for these platforms (e.g. CentOS 6)
+ # The module still works fine, we simply are not comparing as smart as we should.
+ - name: Test expected result
+ assert:
+ that:
+ - add_children_with_attributes.changed == true
+ - comparison.changed == false # identical
+ when: lxml_predictable_attribute_order
+ #command: diff -u {{ role_path }}/results/test-add-children-with-attributes.xml /tmp/ansible-xml-beers.xml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-element-implicitly.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-element-implicitly.yml
new file mode 100644
index 00000000..db674ba4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-element-implicitly.yml
@@ -0,0 +1,237 @@
+---
+- name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers-implicit.xml
+
+
+- name: Add a phonenumber element to the business element. Implicit mkdir -p behavior where applicable
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/phonenumber
+ value: 555-555-1234
+
+- name: Add a owner element to the business element, testing implicit mkdir -p behavior 1/2
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/owner/name/last
+ value: Smith
+
+- name: Add a owner element to the business element, testing implicit mkdir -p behavior 2/2
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/owner/name/first
+ value: John
+
+- name: Add a validxhtml element to the website element. Note that ensure is present by default and while value defaults to null for elements, if one doesn't specify it we don't know what to do.
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/website/validxhtml
+
+- name: Add an empty validateon attribute to the validxhtml element. This actually makes the previous example redundant because of the implicit parent-node creation behavior.
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/website/validxhtml/@validateon
+
+- name: Add an empty validateon attribute to the validxhtml element. Actually verifies the implicit parent-node creation behavior.
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/website_bis/validxhtml/@validateon
+
+- name: Add an attribute with a value
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/owner/@dob='1976-04-12'
+
+- name: Add an element with a value, alternate syntax
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/beers/beer/text()="George Killian's Irish Red" # note the quote within an XPath string thing
+
+- name: Add an element without special characters
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/testnormalelement
+ value: xml tag with no special characters
+ pretty_print: yes
+
+- name: Add an element with dash
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/test-with-dash
+ value: xml tag with dashes
+ pretty_print: yes
+
+- name: Add an element with dot
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/test-with-dash.and.dot
+ value: xml tag with dashes and dots
+ pretty_print: yes
+
+- name: Add an element with underscore
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/test-with.dash_and.dot_and-underscores
+ value: xml tag with dashes, dots and underscores
+ pretty_print: yes
+
+- name: Add an attribute on a conditional element
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/beers/beer[text()="George Killian's Irish Red"]/@color='red'
+
+- name: Add two attributes on a conditional element
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/beers/beer[text()="Pilsner Urquell" and @origin='CZ']/@color='blonde'
+
+- name: Add a owner element to the business element, testing implicit mkdir -p behavior 3/2 -- complex lookup
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/owner/name[first/text()='John']/middle
+ value: Q
+
+- name: Pretty Print this!
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ pretty_print: yes
+
+- name: Compare to expected result
+ copy:
+ src: results/test-add-element-implicitly.yml
+ dest: /tmp/ansible-xml-beers-implicit.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+- name: Test expected result
+ assert:
+ that:
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-add-element-implicitly.yml /tmp/ansible-xml-beers-implicit.xml
+
+
+# Now we repeat the same, just to ensure proper use of namespaces
+- name: Add a phonenumber element to the business element. Implicit mkdir -p behavior where applicable
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/a:phonenumber
+ value: 555-555-1234
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add a owner element to the business element, testing implicit mkdir -p behavior 1/2
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/a:owner/a:name/a:last
+ value: Smith
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add a owner element to the business element, testing implicit mkdir -p behavior 2/2
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/a:owner/a:name/a:first
+ value: John
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add a validxhtml element to the website element. Note that ensure is present by default and while value defaults to null for elements, if one doesn't specify it we don't know what to do.
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/a:website/a:validxhtml
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add an empty validateon attribute to the validxhtml element. This actually makes the previous example redundant because of the implicit parent-node creation behavior.
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/a:website/a:validxhtml/@a:validateon
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add an empty validateon attribute to the validxhtml element. Actually verifies the implicit parent-node creation behavior.
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/a:website_bis/a:validxhtml/@a:validateon
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add an attribute with a value
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/a:owner/@a:dob='1976-04-12'
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add an element with a value, alternate syntax
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/a:beers/a:beer/text()="George Killian's Irish Red" # note the quote within an XPath string thing
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add an attribute on a conditional element
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/a:beers/a:beer[text()="George Killian's Irish Red"]/@a:color='red'
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add two attributes on a conditional element
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/a:beers/a:beer[text()="Pilsner Urquell" and @a:origin='CZ']/@a:color='blonde'
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add a owner element to the business element, testing implicit mkdir -p behavior 3/2 -- complex lookup
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/a:owner/a:name[a:first/text()='John']/a:middle
+ value: Q
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add an element without special characters
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/testnormalelement
+ value: xml tag with no special characters
+ pretty_print: yes
+ namespaces:
+ a: http://example.com/some/namespace
+
+
+- name: Add an element with dash
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/test-with-dash
+ value: xml tag with dashes
+ pretty_print: yes
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add an element with dot
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/test-with-dash.and.dot
+ value: xml tag with dashes and dots
+ pretty_print: yes
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add an element with underscore
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/test-with.dash_and.dot_and-underscores
+ value: xml tag with dashes, dots and underscores
+ pretty_print: yes
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Pretty Print this!
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ pretty_print: yes
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-namespaced-children-elements.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-namespaced-children-elements.yml
new file mode 100644
index 00000000..25eca47f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-namespaced-children-elements.yml
@@ -0,0 +1,32 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-namespaced-beers.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+
+
+ - name: Add namespaced child element
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers.xml
+ xpath: /bus:business/ber:beers
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ add_children:
+ - beer: Old Rasputin
+ register: add_namespaced_children_elements
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-add-namespaced-children-elements.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - add_namespaced_children_elements.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-add-namespaced-children-elements.xml /tmp/ansible-xml-namespaced-beers.xml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-children-elements-xml.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-children-elements-xml.yml
new file mode 100644
index 00000000..e63100c4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-children-elements-xml.yml
@@ -0,0 +1,30 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Add child element with xml format
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ input_type: xml
+ add_children:
+ - '<beer>Old Rasputin</beer>'
+ register: children_elements
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-add-children-elements.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - children_elements.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-add-children-elements.xml /tmp/ansible-xml-beers.xml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-count-unicode.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-count-unicode.yml
new file mode 100644
index 00000000..47a806bf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-count-unicode.yml
@@ -0,0 +1,19 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers-unicode.xml
+ dest: /tmp/ansible-xml-beers-unicode.xml
+
+
+ - name: Count child element
+ xml:
+ path: /tmp/ansible-xml-beers-unicode.xml
+ xpath: /business/beers/beer
+ count: yes
+ register: beers
+
+ - name: Test expected result
+ assert:
+ that:
+ - beers.changed == false
+ - beers.count == 2
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-count.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-count.yml
new file mode 100644
index 00000000..cbc97e32
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-count.yml
@@ -0,0 +1,19 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Add child element
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers/beer
+ count: yes
+ register: beers
+
+ - name: Test expected result
+ assert:
+ that:
+ - beers.changed == false
+ - beers.count == 3
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-get-element-content-unicode.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-get-element-content-unicode.yml
new file mode 100644
index 00000000..73ae9667
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-get-element-content-unicode.yml
@@ -0,0 +1,32 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers-unicode.xml
+ dest: /tmp/ansible-xml-beers-unicode.xml
+
+
+ - name: Get element attributes
+ xml:
+ path: /tmp/ansible-xml-beers-unicode.xml
+ xpath: /business/rating
+ content: attribute
+ register: get_element_attribute
+
+ - name: Test expected result
+ assert:
+ that:
+ - get_element_attribute.changed == false
+ - get_element_attribute.matches[0]['rating'] is defined and get_element_attribute.matches[0]['rating']['subjective'] == 'да'
+
+ - name: Get element text
+ xml:
+ path: /tmp/ansible-xml-beers-unicode.xml
+ xpath: /business/rating
+ content: text
+ register: get_element_text
+
+ - name: Test expected result
+ assert:
+ that:
+ - get_element_text.changed == false
+ - get_element_text.matches[0]['rating'] == 'десять'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-get-element-content.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-get-element-content.yml
new file mode 100644
index 00000000..29a42300
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-get-element-content.yml
@@ -0,0 +1,52 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Get element attributes
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ content: attribute
+ register: get_element_attribute
+
+ - name: Test expected result
+ assert:
+ that:
+ - get_element_attribute.changed == false
+ - get_element_attribute.matches[0]['rating'] is defined
+ - get_element_attribute.matches[0]['rating']['subjective'] == 'true'
+
+ # TODO: Remove this in 2.0.0 when this incorrect use of attribute is deprecated
+ - name: Get element attributes
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ content: attribute
+ attribute: subjective
+ register: get_element_attribute_wrong
+
+ - name: Test expected result
+ assert:
+ that:
+ - get_element_attribute_wrong.changed == false
+ - get_element_attribute_wrong.matches[0]['rating'] is defined
+ - get_element_attribute_wrong.matches[0]['rating']['subjective'] == 'true'
+ - get_element_attribute_wrong.deprecations is defined
+ - get_element_attribute_wrong.deprecations[0].msg == "Parameter 'attribute=subjective' is ignored when using 'content=attribute' only 'xpath' is used. Please remove entry."
+ - get_element_attribute_wrong.deprecations[0].version == '2.0.0'
+
+ - name: Get element text
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ content: text
+ register: get_element_text
+
+ - name: Test expected result
+ assert:
+ that:
+ - get_element_text.changed == false
+ - get_element_text.matches[0]['rating'] == '10'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-mutually-exclusive-attributes.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-mutually-exclusive-attributes.yml
new file mode 100644
index 00000000..3f24b0ac
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-mutually-exclusive-attributes.yml
@@ -0,0 +1,22 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Specify both children to add and a value
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ add_children:
+ - child01
+ - child02
+ value: conflict!
+ register: module_output
+ ignore_errors: yes
+
+ - name: Test expected result
+ assert:
+ that:
+ - module_output.changed == false
+ - module_output.failed == true
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-pretty-print-only.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-pretty-print-only.yml
new file mode 100644
index 00000000..7c0f7d5f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-pretty-print-only.yml
@@ -0,0 +1,29 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml.orig
+
+ - name: Remove spaces from test fixture
+ shell: sed 's/^[ ]*//g' < /tmp/ansible-xml-beers.xml.orig > /tmp/ansible-xml-beers.xml
+
+ - name: Pretty print without modification
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ pretty_print: yes
+ register: pretty_print_only
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-pretty-print-only.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - pretty_print_only.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-pretty-print-only.xml /tmp/ansible-xml-beers.xml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-pretty-print.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-pretty-print.yml
new file mode 100644
index 00000000..88b618b2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-pretty-print.yml
@@ -0,0 +1,30 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Pretty print
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ pretty_print: yes
+ add_children:
+ - beer: Old Rasputin
+ register: pretty_print
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-pretty-print.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - pretty_print.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-pretty-print.xml /tmp/ansible-xml-beers.xml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-attribute-nochange.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-attribute-nochange.yml
new file mode 100644
index 00000000..d09dee40
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-attribute-nochange.yml
@@ -0,0 +1,28 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: results/test-remove-attribute.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Remove non-existing '/business/rating/@subjective'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating/@subjective
+ state: absent
+ register: remove_attribute
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-remove-attribute.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - remove_attribute.changed == false
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-remove-attribute.xml /tmp/ansible-xml-beers.xml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-attribute.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-attribute.yml
new file mode 100644
index 00000000..9aa395e6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-attribute.yml
@@ -0,0 +1,28 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Remove '/business/rating/@subjective'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating/@subjective
+ state: absent
+ register: remove_attribute
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-remove-attribute.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - remove_attribute.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-remove-attribute.xml /tmp/ansible-xml-beers.xml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-element-nochange.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-element-nochange.yml
new file mode 100644
index 00000000..2debc80d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-element-nochange.yml
@@ -0,0 +1,28 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: results/test-remove-element.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Remove non-existing '/business/rating'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ state: absent
+ register: remove_element
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-remove-element.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - remove_element.changed == false
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-remove-element.xml /tmp/ansible-xml-beers.xml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-element.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-element.yml
new file mode 100644
index 00000000..f2e20ea2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-element.yml
@@ -0,0 +1,28 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Remove '/business/rating'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ state: absent
+ register: remove_element
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-remove-element.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - remove_element.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-remove-element.xml /tmp/ansible-xml-beers.xml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute-nochange.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute-nochange.yml
new file mode 100644
index 00000000..291536d3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute-nochange.yml
@@ -0,0 +1,33 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: results/test-remove-namespaced-attribute.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+
+
+ - name: Remove non-existing namespaced '/bus:business/rat:rating/@attr:subjective'
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers.xml
+ xpath: /bus:business/rat:rating/@attr:subjective
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ rat: http://test.rating
+ attr: http://test.attribute
+ state: absent
+ register: remove_namespaced_attribute
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-remove-namespaced-attribute.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - remove_namespaced_attribute.changed == false
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-remove-namespaced-attribute.xml /tmp/ansible-xml-namespaced-beers.xml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute.yml
new file mode 100644
index 00000000..a7ccdac4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute.yml
@@ -0,0 +1,33 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-namespaced-beers.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+
+
+ - name: Remove namespaced '/bus:business/rat:rating/@attr:subjective'
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers.xml
+ xpath: /bus:business/rat:rating/@attr:subjective
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ rat: http://test.rating
+ attr: http://test.attribute
+ state: absent
+ register: remove_namespaced_attribute
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-remove-namespaced-attribute.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - remove_namespaced_attribute.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-remove-namespaced-attribute.xml /tmp/ansible-xml-namespaced-beers.xml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-namespaced-element-nochange.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-namespaced-element-nochange.yml
new file mode 100644
index 00000000..b1938e45
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-namespaced-element-nochange.yml
@@ -0,0 +1,33 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: results/test-remove-element.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+
+
+ - name: Remove non-existing namespaced '/bus:business/rat:rating'
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers.xml
+ xpath: /bus:business/rat:rating
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ rat: http://test.rating
+ attr: http://test.attribute
+ state: absent
+ register: remove_namespaced_element
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-remove-element.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - remove_namespaced_element.changed == false
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-remove-element.xml /tmp/ansible-xml-namespaced-beers.xml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-namespaced-element.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-namespaced-element.yml
new file mode 100644
index 00000000..be78af68
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-namespaced-element.yml
@@ -0,0 +1,33 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-namespaced-beers.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+
+
+ - name: Remove namespaced '/bus:business/rat:rating'
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers.xml
+ xpath: /bus:business/rat:rating
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ rat: http://test.rating
+ attr: http://test.attribute
+ state: absent
+ register: remove_namespaced_element
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-remove-element.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - remove_namespaced_element.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-remove-element.xml /tmp/ansible-xml-namespaced-beers.xml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-attribute-value-unicode.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-attribute-value-unicode.yml
new file mode 100644
index 00000000..dabf72a1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-attribute-value-unicode.yml
@@ -0,0 +1,29 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Set '/business/rating/@subjective' to 'нет'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ attribute: subjective
+ value: нет
+ register: set_attribute_value_unicode
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-attribute-value-unicode.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_attribute_value_unicode.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-set-attribute-value-unicode.xml /tmp/ansible-xml-beers.xml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-attribute-value.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-attribute-value.yml
new file mode 100644
index 00000000..2aa39fe2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-attribute-value.yml
@@ -0,0 +1,29 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Set '/business/rating/@subjective' to 'false'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ attribute: subjective
+ value: 'false'
+ register: set_attribute_value
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-attribute-value.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_attribute_value.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-set-attribute-value.xml /tmp/ansible-xml-beers.xml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-children-elements-level.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-children-elements-level.yml
new file mode 100644
index 00000000..3e2c0adb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-children-elements-level.yml
@@ -0,0 +1,74 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Set child elements
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ set_children: &children
+ - beer:
+ alcohol: "0.5"
+ name: 90 Minute IPA
+ _:
+ - Water:
+ liter: "0.2"
+ quantity: 200g
+ - Starch:
+ quantity: 10g
+ - Hops:
+ quantity: 50g
+ - Yeast:
+ quantity: 20g
+ - beer:
+ alcohol: "0.3"
+ name: Harvest Pumpkin Ale
+ _:
+ - Water:
+ liter: "0.2"
+ quantity: 200g
+ - Hops:
+ quantity: 25g
+ - Yeast:
+ quantity: 20g
+ register: set_children_elements_level
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-children-elements-level.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_children_elements_level.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-set-children-elements-level.xml /tmp/ansible-xml-beers.xml
+
+
+ - name: Set child elements (again)
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ set_children: *children
+ register: set_children_again
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-children-elements-level.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_children_again.changed == false
+ - comparison.changed == false # identical
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-children-elements-unicode.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-children-elements-unicode.yml
new file mode 100644
index 00000000..240b894a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-children-elements-unicode.yml
@@ -0,0 +1,46 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Set child elements
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ set_children: &children
+ - beer: Окское
+ - beer: Невское
+ register: set_children_elements_unicode
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-children-elements-unicode.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_children_elements_unicode.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-set-children-elements-unicode.xml /tmp/ansible-xml-beers.xml
+
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-children-elements-unicode.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_children_again.changed == false
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-set-children-elements-unicode.xml /tmp/ansible-xml-beers.xml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-children-elements.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-children-elements.yml
new file mode 100644
index 00000000..7b0f3247
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-children-elements.yml
@@ -0,0 +1,53 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Set child elements
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ set_children: &children
+ - beer: 90 Minute IPA
+ - beer: Harvest Pumpkin Ale
+ register: set_children_elements
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-children-elements.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_children_elements.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-set-children-elements.xml /tmp/ansible-xml-beers.xml
+
+
+ - name: Set child elements (again)
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ set_children: *children
+ register: set_children_again
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-children-elements.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_children_again.changed == false
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-set-children-elements.xml /tmp/ansible-xml-beers.xml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-element-value-empty.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-element-value-empty.yml
new file mode 100644
index 00000000..5814803c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-element-value-empty.yml
@@ -0,0 +1,28 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Set '/business/website/address' to empty string.
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/website/address
+ value: ''
+ register: set_element_value_empty
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-element-value-empty.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_element_value_empty.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-set-element-value-empty.xml /tmp/ansible-xml-beers.xml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-element-value-unicode.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-element-value-unicode.yml
new file mode 100644
index 00000000..c3a40b7d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-element-value-unicode.yml
@@ -0,0 +1,43 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Add 2nd '/business/rating' with value 'пять'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business
+ add_children:
+ - rating: пять
+
+ - name: Set '/business/rating' to 'пять'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ value: пять
+ register: set_element_first_run
+
+ - name: Set '/business/rating' to 'false'... again
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ value: пять
+ register: set_element_second_run
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-element-value-unicode.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_element_first_run.changed == true
+ - set_element_second_run.changed == false
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-set-element-value-unicode.xml /tmp/ansible-xml-beers.xml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-element-value.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-element-value.yml
new file mode 100644
index 00000000..dbd070f1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-element-value.yml
@@ -0,0 +1,43 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Add 2nd '/business/rating' with value '5'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business
+ add_children:
+ - rating: '5'
+
+ - name: Set '/business/rating' to '5'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ value: '5'
+ register: set_element_first_run
+
+ - name: Set '/business/rating' to '5'... again
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ value: '5'
+ register: set_element_second_run
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-element-value.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_element_first_run.changed == true
+ - set_element_second_run.changed == false
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-set-element-value.xml /tmp/ansible-xml-beers.xml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-namespaced-attribute-value.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-namespaced-attribute-value.yml
new file mode 100644
index 00000000..e0086efe
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-namespaced-attribute-value.yml
@@ -0,0 +1,34 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-namespaced-beers.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+
+
+ - name: Set namespaced '/bus:business/rat:rating/@attr:subjective' to 'false'
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers.xml
+ xpath: /bus:business/rat:rating
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ rat: http://test.rating
+ attr: http://test.attribute
+ attribute: attr:subjective
+ value: 'false'
+ register: set_namespaced_attribute_value
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-namespaced-attribute-value.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_namespaced_attribute_value.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-set-namespaced-attribute-value.xml /tmp/ansible-xml-namespaced-beers.xml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-namespaced-children-elements.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-namespaced-children-elements.yml
new file mode 100644
index 00000000..8e66e70e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-namespaced-children-elements.yml
@@ -0,0 +1,57 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-namespaced-beers.xml
+ dest: /tmp/ansible-xml-namespaced-beers-xml.xml
+
+ - name: Set child elements
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers-xml.xml
+ xpath: /bus:business/ber:beers
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ set_children:
+ - beer: 90 Minute IPA
+ - beer: Harvest Pumpkin Ale
+
+ - name: Copy state after first set_children
+ copy:
+ src: /tmp/ansible-xml-namespaced-beers.xml
+ dest: /tmp/ansible-xml-namespaced-beers-1.xml
+ remote_src: yes
+
+ - name: Set child elements again
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers-xml.xml
+ xpath: /bus:business/ber:beers
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ set_children:
+ - beer: 90 Minute IPA
+ - beer: Harvest Pumpkin Ale
+ register: set_children_again
+
+ - name: Copy state after second set_children
+ copy:
+ src: /tmp/ansible-xml-namespaced-beers.xml
+ dest: /tmp/ansible-xml-namespaced-beers-2.xml
+ remote_src: yes
+
+ - name: Compare to expected result
+ copy:
+ src: /tmp/ansible-xml-namespaced-beers-1.xml
+ dest: /tmp/ansible-xml-namespaced-beers-2.xml
+ remote_src: yes
+ check_mode: yes
+ diff: yes
+ register: comparison
+ #command: diff /tmp/ansible-xml-namespaced-beers-1.xml /tmp/ansible-xml-namespaced-beers-2.xml
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_children_again.changed == false # idempotency
+ - set_namespaced_attribute_value.changed == true
+ - comparison.changed == false # identical
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-namespaced-element-value.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-namespaced-element-value.yml
new file mode 100644
index 00000000..f77d7537
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-namespaced-element-value.yml
@@ -0,0 +1,46 @@
+---
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-namespaced-beers.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+
+
+ - name: Set namespaced '/bus:business/rat:rating' to '11'
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers.xml
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ rat: http://test.rating
+ attr: http://test.attribute
+ xpath: /bus:business/rat:rating
+ value: '11'
+ register: set_element_first_run
+
+ - name: Set namespaced '/bus:business/rat:rating' to '11' again
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers.xml
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ rat: http://test.rating
+ attr: http://test.attribute
+ xpath: /bus:business/rat:rating
+ value: '11'
+ register: set_element_second_run
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-namespaced-element-value.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+ check_mode: yes
+ diff: yes
+ register: comparison
+ #command: diff -u {{ role_path }}/results/test-set-namespaced-element-value.xml /tmp/ansible-xml-namespaced-beers.xml
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_element_first_run.changed == true
+ - set_element_second_run.changed == false
+ - comparison.changed == false # identical
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-xmlstring.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-xmlstring.yml
new file mode 100644
index 00000000..4620d984
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-xmlstring.yml
@@ -0,0 +1,81 @@
+---
+ - name: Copy expected results to remote
+ copy:
+ src: "results/{{ item }}"
+ dest: "/tmp/{{ item }}"
+ with_items:
+ - test-pretty-print.xml
+ - test-pretty-print-only.xml
+
+ # NOTE: Jinja2 templating eats trailing newlines
+ - name: Read from xmlstring (not using pretty_print)
+ xml:
+ xmlstring: "{{ lookup('file', '{{ role_path }}/fixtures/ansible-xml-beers.xml') }}"
+ xpath: .
+ register: xmlresponse
+
+ - name: Compare to expected result
+ copy:
+ content: "{{ xmlresponse.xmlstring }}\n"
+ dest: '/tmp/test-pretty-print-only.xml'
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - xmlresponse.changed == false
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-pretty-print-only.xml /tmp/ansible-xml-beers.xml
+
+
+ # NOTE: Jinja2 templating eats trailing newlines
+ - name: Read from xmlstring (using pretty_print)
+ xml:
+ xmlstring: "{{ lookup('file', '{{ role_path }}/fixtures/ansible-xml-beers.xml') }}"
+ pretty_print: yes
+ register: xmlresponse
+
+ - name: Compare to expected result
+ copy:
+ content: '{{ xmlresponse.xmlstring }}'
+ dest: '/tmp/test-pretty-print-only.xml'
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ # FIXME: This change is related to the newline added by pretty_print
+ - name: Test expected result
+ assert:
+ that:
+ - xmlresponse.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-pretty-print-only.xml /tmp/ansible-xml-beers.xml
+
+
+ # NOTE: Jinja2 templating eats trailing newlines
+ - name: Read from xmlstring
+ xml:
+ xmlstring: "{{ lookup('file', '{{ role_path }}/fixtures/ansible-xml-beers.xml') }}"
+ xpath: /business/beers
+ pretty_print: yes
+ add_children:
+ - beer: Old Rasputin
+ register: xmlresponse_modification
+
+ - name: Compare to expected result
+ copy:
+ content: '{{ xmlresponse_modification.xmlstring }}'
+ dest: '/tmp/test-pretty-print.xml'
+ check_mode: yes
+ diff: yes
+ register: comparison
+
+ # FIXME: This change is related to the newline added by pretty_print
+ - name: Test expected result
+ assert:
+ that:
+ - xmlresponse_modification.changed == true
+ - comparison.changed == false # identical
+ #command: diff -u {{ role_path }}/results/test-pretty-print.xml /tmp/ansible-xml-beers.xml
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/vars/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/vars/main.yml
new file mode 100644
index 00000000..7c5675bd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/xml/vars/main.yml
@@ -0,0 +1,6 @@
+# -*- mode: yaml -*
+---
+bad_beers:
+- beer: "Natty Lite"
+- beer: "Miller Lite"
+- beer: "Coors Lite"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/yarn/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/yarn/aliases
new file mode 100644
index 00000000..6cd621d4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/yarn/aliases
@@ -0,0 +1,4 @@
+shippable/posix/group1
+destructive
+skip/aix
+skip/freebsd
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/yarn/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/yarn/meta/main.yml
new file mode 100644
index 00000000..392c3590
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/yarn/meta/main.yml
@@ -0,0 +1,3 @@
+dependencies:
+ - setup_pkg_mgr
+ - setup_gnutar
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/yarn/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/yarn/tasks/main.yml
new file mode 100644
index 00000000..830539f1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/yarn/tasks/main.yml
@@ -0,0 +1,33 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Yarn package manager integration tests
+# (c) 2018 David Gunter, <david.gunter@tivix.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# ============================================================
+
+- include: run.yml
+ vars:
+ nodejs_version: '{{ item.node_version }}'
+ nodejs_path: 'node-v{{ nodejs_version }}-{{ ansible_system|lower }}-x{{ ansible_userspace_bits }}'
+ yarn_version: '{{ item.yarn_version }}'
+ with_items:
+ - {node_version: 4.8.0, yarn_version: 1.6.0} # Lowest compatible nodejs version
+ - {node_version: 8.0.0, yarn_version: 1.6.0} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/yarn/tasks/run.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/yarn/tasks/run.yml
new file mode 100644
index 00000000..bd17d7ff
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/yarn/tasks/run.yml
@@ -0,0 +1,132 @@
+- name: 'Create directory for Node'
+ file:
+ path: /usr/local/lib/nodejs
+ state: directory
+
+- name: 'Download Nodejs'
+ unarchive:
+ src: 'https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/yarn/{{ nodejs_path }}.tar.gz'
+ dest: '{{ output_dir }}'
+ remote_src: yes
+ creates: '{{ output_dir }}/{{ nodejs_path }}.tar.gz'
+
+- name: 'Download Yarn'
+ unarchive:
+ src: 'https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/yarn/yarn-v{{yarn_version}}.tar.gz'
+ dest: '{{ output_dir }}'
+ remote_src: yes
+ creates: '{{ output_dir }}/yarn-v{{yarn_version}}_pkg.tar.gz'
+
+- name: 'Copy node to directory created earlier'
+ command: "mv {{ output_dir }}/{{ nodejs_path }} /usr/local/lib/nodejs/{{nodejs_path}}"
+
+# Clean up before running tests
+- name: Remove any previous Nodejs modules
+ file:
+ path: '{{output_dir}}/node_modules'
+ state: absent
+
+# Set vars for our test harness
+- vars:
+ #node_bin_path: "/usr/local/lib/nodejs/node-v{{nodejs_version}}/bin"
+ node_bin_path: "/usr/local/lib/nodejs/{{ nodejs_path }}/bin"
+ yarn_bin_path: "{{ output_dir }}/yarn-v{{ yarn_version }}/bin"
+ package: 'iconv-lite'
+ environment:
+ PATH: "{{ node_bin_path }}:{{ansible_env.PATH}}"
+ block:
+
+ # Get the version of Yarn and register to a variable
+ - shell: '{{ yarn_bin_path }}/yarn --version'
+ environment:
+ PATH: '{{ node_bin_path }}:{{ ansible_env.PATH }}'
+ register: yarn_version
+
+ - name: 'Create dummy package.json'
+ copy:
+ src: templates/package.j2
+ dest: '{{ output_dir }}/package.json'
+
+ - name: 'Install all packages.'
+ yarn:
+ path: '{{ output_dir }}'
+ executable: '{{ yarn_bin_path }}/yarn'
+ state: present
+ environment:
+ PATH: '{{ node_bin_path }}:{{ ansible_env.PATH }}'
+
+ - name: 'Install the same package from package.json again.'
+ yarn:
+ path: '{{ output_dir }}'
+ executable: '{{ yarn_bin_path }}/yarn'
+ name: '{{ package }}'
+ state: present
+ environment:
+ PATH: '{{ node_bin_path }}:{{ ansible_env.PATH }}'
+ register: yarn_install
+
+ - assert:
+ that:
+ - not (yarn_install is changed)
+
+ - name: 'Install all packages in check mode.'
+ yarn:
+ path: '{{ output_dir }}'
+ executable: '{{ yarn_bin_path }}/yarn'
+ state: present
+ environment:
+ PATH: '{{ node_bin_path }}:{{ ansible_env.PATH }}'
+ check_mode: true
+ register: yarn_install_check
+
+ - name: verify test yarn global installation in check mode
+ assert:
+ that:
+ - yarn_install_check.err is defined
+ - yarn_install_check.out is defined
+ - yarn_install_check.err is none
+ - yarn_install_check.out is none
+
+ - name: 'Install package with explicit version (older version of package)'
+ yarn:
+ path: '{{ output_dir }}'
+ executable: '{{ yarn_bin_path }}/yarn'
+ name: left-pad
+ version: 1.1.0
+ state: present
+ environment:
+ PATH: '{{ node_bin_path }}:{{ ansible_env.PATH }}'
+ register: yarn_install_old_package
+
+ - assert:
+ that:
+ - yarn_install_old_package is changed
+
+ - name: 'Upgrade old package'
+ yarn:
+ path: '{{ output_dir }}'
+ executable: '{{ yarn_bin_path }}/yarn'
+ name: left-pad
+ state: latest
+ environment:
+ PATH: '{{ node_bin_path }}:{{ ansible_env.PATH }}'
+ register: yarn_update_old_package
+
+ - assert:
+ that:
+ - yarn_update_old_package is changed
+
+ - name: 'Remove a package'
+ yarn:
+ path: '{{ output_dir }}'
+ executable: '{{ yarn_bin_path }}/yarn'
+ name: '{{ package }}'
+ state: absent
+ environment:
+ PATH: '{{ node_bin_path }}:{{ ansible_env.PATH }}'
+ register: yarn_uninstall_package
+
+ - name: 'Assert package removed'
+ assert:
+ that:
+ - yarn_uninstall_package is changed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/yarn/templates/package.j2 b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/yarn/templates/package.j2
new file mode 100644
index 00000000..8ca73aa8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/yarn/templates/package.j2
@@ -0,0 +1,9 @@
+{
+ "name": "ansible-yarn-testing",
+ "version": "1.0.0",
+ "license": "MIT",
+ "dependencies": {
+ "iconv-lite": "^0.4.21",
+ "@types/node": "^12.0.0"
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/zypper/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/zypper/aliases
new file mode 100644
index 00000000..817614d6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/zypper/aliases
@@ -0,0 +1,7 @@
+destructive
+shippable/posix/group1
+skip/aix
+skip/freebsd
+skip/osx
+skip/macos
+skip/rhel
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/zypper/files/empty.spec b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/zypper/files/empty.spec
new file mode 100644
index 00000000..044ea3a5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/zypper/files/empty.spec
@@ -0,0 +1,12 @@
+Summary: Empty RPM
+Name: empty
+Version: 1
+Release: 0
+License: GPLv3
+Group: Applications/System
+BuildArch: noarch
+
+%description
+Empty RPM
+
+%files
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/zypper/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/zypper/meta/main.yml
new file mode 100644
index 00000000..07faa217
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/zypper/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_tests
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/zypper/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/zypper/tasks/main.yml
new file mode 100644
index 00000000..b0181e69
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/zypper/tasks/main.yml
@@ -0,0 +1,30 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# test code for the zyppe module
+#
+# (c) 2015, Guido Günther <agx@sigxcpu.org>
+#
+# heavily based on the yum tests which are
+#
+# (c) 2014, James Tanner <tanner.jc@gmail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- include: 'zypper.yml'
+ when: ansible_os_family == 'Suse'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/zypper/tasks/zypper.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/zypper/tasks/zypper.yml
new file mode 100644
index 00000000..eed27ca3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/zypper/tasks/zypper.yml
@@ -0,0 +1,525 @@
+- name: get hello package version
+ shell: zypper --xmlout se -svx hello | grep 'name="hello"' | grep 'repository="Main Repository"' | sed 's/.*edition="\([^ ]*\)".*/\1/'
+ register: hello_version
+
+- name: set URL of test package
+ set_fact:
+ hello_package_url: https://download.opensuse.org/distribution/leap/{{ ansible_distribution_version }}/repo/oss/x86_64/hello-{{ hello_version.stdout }}.x86_64.rpm
+
+- debug: var=hello_package_url
+
+# UNINSTALL
+- name: uninstall hello
+ zypper:
+ name: hello
+ state: removed
+ register: zypper_result
+
+- name: check hello with rpm
+ shell: rpm -q hello
+ failed_when: False
+ register: rpm_result
+
+- debug: var=zypper_result
+- debug: var=rpm_result
+
+- name: verify uninstallation of hello
+ assert:
+ that:
+ - "zypper_result.rc == 0"
+ - "rpm_result.rc == 1"
+
+# UNINSTALL AGAIN
+- name: uninstall hello again
+ zypper:
+ name: hello
+ state: removed
+ register: zypper_result
+
+- name: verify no change on re-uninstall
+ assert:
+ that:
+ - "not zypper_result.changed"
+
+# INSTALL
+- name: install hello
+ zypper:
+ name: hello
+ state: present
+ register: zypper_result
+
+- name: check hello with rpm
+ shell: rpm -q hello
+ failed_when: False
+ register: rpm_result
+
+- debug: var=zypper_result
+- debug: var=rpm_result
+
+- name: verify installation of hello
+ assert:
+ that:
+ - "zypper_result.rc == 0"
+ - "zypper_result.changed"
+ - "rpm_result.rc == 0"
+
+# INSTALL AGAIN
+- name: install hello again
+ zypper:
+ name: hello
+ state: present
+ register: zypper_result
+
+- name: verify no change on second install
+ assert:
+ that:
+ - "not zypper_result.changed"
+
+# Multiple packages
+- name: uninstall hello and metamail
+ zypper:
+ name:
+ - hello
+ - metamail
+ state: removed
+ register: zypper_result
+
+- name: check hello with rpm
+ shell: rpm -q hello
+ failed_when: False
+ register: rpm_hello_result
+
+- name: check metamail with rpm
+ shell: rpm -q metamail
+ failed_when: False
+ register: rpm_metamail_result
+
+- name: verify packages uninstalled
+ assert:
+ that:
+ - "rpm_hello_result.rc != 0"
+ - "rpm_metamail_result.rc != 0"
+
+- name: install hello and metamail
+ zypper:
+ name:
+ - hello
+ - metamail
+ state: present
+ register: zypper_result
+
+- name: check hello with rpm
+ shell: rpm -q hello
+ failed_when: False
+ register: rpm_hello_result
+
+- name: check metamail with rpm
+ shell: rpm -q metamail
+ failed_when: False
+ register: rpm_metamail_result
+
+- name: verify packages installed
+ assert:
+ that:
+ - "zypper_result.rc == 0"
+ - "zypper_result.changed"
+ - "rpm_hello_result.rc == 0"
+ - "rpm_metamail_result.rc == 0"
+
+- name: uninstall hello and metamail
+ zypper:
+ name:
+ - hello
+ - metamail
+ state: removed
+
+# INSTALL nonexistent package
+- name: install hello from url
+ zypper:
+ name: doesnotexist
+ state: present
+ register: zypper_result
+ ignore_errors: yes
+
+- name: verify package installation failed
+ assert:
+ that:
+ - "zypper_result.rc == 104"
+ - "zypper_result.msg.startswith('No provider of')"
+
+# INSTALL broken local package
+- name: create directory
+ file:
+ path: "{{output_dir | expanduser}}/zypper1"
+ state: directory
+
+- name: fake rpm package
+ file:
+ path: "{{output_dir | expanduser}}/zypper1/broken.rpm"
+ state: touch
+
+- name: install broken rpm
+ zypper:
+ name: "{{output_dir | expanduser}}/zypper1/broken.rpm"
+ state: present
+ register: zypper_result
+ ignore_errors: yes
+
+- debug: var=zypper_result
+
+- name: verify we failed installation of broken rpm
+ assert:
+ that:
+ - "zypper_result.rc == 3"
+ - "'Problem reading the RPM header' in zypper_result.stdout"
+
+# Build and install an empty rpm
+- name: uninstall empty
+ zypper:
+ name: empty
+ state: removed
+
+- name: install rpmbuild
+ zypper:
+ name: rpmbuild
+ state: present
+
+- name: clean zypper RPM cache
+ file:
+ name: /var/cache/zypper/RPMS
+ state: absent
+
+- name: create directory
+ file:
+ path: "{{output_dir | expanduser}}/zypper2"
+ state: directory
+
+- name: copy spec file
+ copy:
+ src: empty.spec
+ dest: "{{ output_dir | expanduser }}/zypper2/empty.spec"
+
+- name: build rpm
+ command: |
+ rpmbuild -bb \
+ --define "_topdir {{output_dir | expanduser }}/zypper2/rpm-build"
+ --define "_builddir %{_topdir}" \
+ --define "_rpmdir %{_topdir}" \
+ --define "_srcrpmdir %{_topdir}" \
+ --define "_specdir {{output_dir | expanduser}}/zypper2" \
+ --define "_sourcedir %{_topdir}" \
+ {{ output_dir }}/zypper2/empty.spec
+ register: rpm_build_result
+
+- name: install empty rpm
+ zypper:
+ name: "{{ output_dir | expanduser }}/zypper2/rpm-build/noarch/empty-1-0.noarch.rpm"
+ disable_gpg_check: yes
+ register: zypper_result
+
+- name: check empty with rpm
+ shell: rpm -q empty
+ failed_when: False
+ register: rpm_result
+
+- name: verify installation of empty
+ assert:
+ that:
+ - "zypper_result.rc == 0"
+ - "zypper_result.changed"
+ - "rpm_result.rc == 0"
+
+- name: uninstall empty
+ zypper:
+ name: empty
+ state: removed
+
+- name: extract from rpm
+ zypper:
+ name: "{{ output_dir | expanduser }}/zypper2/rpm-build/noarch/empty-1-0.noarch.rpm"
+ state: installed
+ disable_gpg_check: yes
+ extra_args_precommand: --root {{ output_dir | expanduser }}/testdir/
+
+- name: check that dir var is exist
+ stat: path={{ output_dir | expanduser }}/testdir/var
+ register: stat_result
+
+- name: check that we extract rpm package in testdir folder and folder var is exist
+ assert:
+ that:
+ - "stat_result.stat.exists == true"
+
+
+# test simultaneous remove and install using +- prefixes
+
+- name: install hello to prep next task
+ zypper:
+ name: hello
+ state: present
+
+- name: remove metamail to prep next task
+ zypper:
+ name: metamail
+ state: absent
+
+- name: install and remove in the same run, with +- prefix
+ zypper:
+ name:
+ - -hello
+ - +metamail
+ state: present
+ register: zypper_res1
+
+- name: install and remove again, leave out plus
+ zypper:
+ name:
+ - metamail
+ - -hello
+ state: present
+ register: zypper_res1a
+
+- name: in and rm swapped
+ zypper:
+ name:
+ - -metamail
+ - hello
+ state: present
+ register: zypper_res1b
+
+- name: install metamail
+ zypper:
+ name: metamail
+ state: absent
+ register: zypper_res2
+
+- name: remove hello
+ zypper:
+ name: hello
+ state: present
+ register: zypper_res3
+
+- name: verify simultaneous install/remove worked
+ assert:
+ that:
+ - zypper_res1 is successful
+ - zypper_res1 is changed
+ - zypper_res1a is not changed
+ - zypper_res1b is changed
+ - zypper_res2 is not changed
+ - zypper_res3 is not changed
+
+
+- name: install and remove with state=absent
+ zypper:
+ name:
+ - metamail
+ - +hello
+ state: absent
+ register: zypper_res
+ ignore_errors: yes
+
+- name: verify simultaneous install/remove failed with absent
+ assert:
+ that:
+ - zypper_res is failed
+ - zypper_res.msg == "Can not combine '+' prefix with state=remove/absent."
+
+- name: try rm patch
+ zypper:
+ name: openSUSE-2016-128
+ type: patch
+ state: absent
+ ignore_errors: yes
+ register: zypper_patch
+- assert:
+ that:
+ - zypper_patch is failed
+ - zypper_patch.msg.startswith('Can not remove patches.')
+
+- name: try rm URL
+ zypper:
+ name: "{{ hello_package_url }}"
+ state: absent
+ ignore_errors: yes
+ register: zypper_rm
+- assert:
+ that:
+ - zypper_rm is failed
+ - zypper_rm.msg.startswith('Can not remove via URL.')
+
+- name: remove pattern update_test
+ zypper:
+ name: update_test
+ type: pattern
+ state: absent
+
+- name: install pattern update_test
+ zypper:
+ name: update_test
+ type: pattern
+ state: present
+ register: zypper_install_pattern1
+
+- name: install pattern update_test again
+ zypper:
+ name: update_test
+ type: pattern
+ state: present
+ register: zypper_install_pattern2
+
+- assert:
+ that:
+ - zypper_install_pattern1 is changed
+ - zypper_install_pattern2 is not changed
+
+- name: remove hello
+ zypper:
+ name: hello
+ state: absent
+
+- name: install via URL
+ zypper:
+ state: present
+ name: "{{ hello_package_url }}"
+ register: zypperin1
+
+- name: test install
+ zypper:
+ name: hello
+ state: present
+ register: zypperin2
+
+- assert:
+ that:
+ - zypperin1 is succeeded
+ - zypperin1 is changed
+ - zypperin2 is not changed
+
+# check for https://github.com/ansible/ansible/issues/20139
+- name: run updatecache
+ zypper:
+ name: hello
+ state: present
+ update_cache: True
+ register: zypper_result_update_cache
+
+- name: run updatecache in check mode
+ zypper:
+ name: hello
+ state: present
+ update_cache: True
+ check_mode: True
+ register: zypper_result_update_cache_check
+
+
+- assert:
+ that:
+ - zypper_result_update_cache is successful
+ - zypper_result_update_cache_check is successful
+ - zypper_result_update_cache_check is not changed
+
+- name: ensure no previous netcat package still exists
+ zypper:
+ name:
+ - netcat-openbsd
+ - gnu-netcat
+ state: absent
+
+- name: install netcat-openbsd which conflicts with gnu-netcat
+ zypper:
+ name: netcat-openbsd
+ state: present
+
+- name: try installation of gnu-netcat which should fail due to the conflict
+ zypper:
+ name: gnu-netcat
+ state: present
+ ignore_errors: yes
+ register: zypper_pkg_conflict
+
+- assert:
+ that:
+ - zypper_pkg_conflict is failed
+ - "'conflicts with netcat-openbsd provided' in zypper_pkg_conflict.stdout"
+
+- name: retry installation of gnu-netcat with force_resolution set to choose a resolution
+ zypper:
+ name: gnu-netcat
+ state: present
+ force_resolution: True
+
+- name: duplicate rpms block
+ vars:
+ looplist:
+ - 1
+ - 2
+ block:
+ - name: Deploy spec files to build 2 packages with duplicate files.
+ template:
+ src: duplicate.spec.j2
+ dest: "{{ output_dir | expanduser }}/zypper2/duplicate{{ item }}.spec"
+ loop: "{{ looplist }}"
+
+ - name: build rpms with duplicate files
+ command: |
+ rpmbuild -bb \
+ --define "_topdir {{output_dir | expanduser }}/zypper2/rpm-build"
+ --define "_builddir %{_topdir}" \
+ --define "_rpmdir %{_topdir}" \
+ --define "_srcrpmdir %{_topdir}" \
+ --define "_specdir {{output_dir | expanduser}}/zypper2" \
+ --define "_sourcedir %{_topdir}" \
+ {{ output_dir | expanduser }}/zypper2/duplicate{{ item }}.spec
+ loop: "{{ looplist }}"
+
+ - name: install duplicate rpms
+ zypper:
+ name: >-
+ {{ output_dir | expanduser }}/zypper2/rpm-build/noarch/duplicate{{ item }}-1-0.noarch.rpm
+ disable_gpg_check: true
+ ignore_errors: true
+ register: zypper_duplicate_result
+ loop: "{{ looplist }}"
+
+ - name: Read in duplicate file contents
+ slurp:
+ src: /usr/lib/duplicate/duplicate.txt
+ register: duplicate_out
+
+ - name: Check failure when installing rpms with duplicate files without replacefiles option
+ assert:
+ that:
+ - zypper_duplicate_result.results[0] is successful
+ - zypper_duplicate_result.results[1] is failed
+ - '"fileconflict" in zypper_duplicate_result.results[1].stdout'
+ - '"/usr/lib/duplicate/duplicate.txt" in zypper_duplicate_result.results[1].stdout'
+ - '"duplicate1" in duplicate_out.content | b64decode'
+
+ - name: install duplicate rpms
+ zypper:
+ name: >-
+ {{ output_dir | expanduser }}/zypper2/rpm-build/noarch/duplicate{{ item }}-1-0.noarch.rpm
+ disable_gpg_check: true
+ replacefiles: true
+ ignore_errors: true
+ register: zypper_duplicate_result
+ loop: "{{ looplist }}"
+
+ - name: Read in duplicate file contents
+ slurp:
+ src: /usr/lib/duplicate/duplicate.txt
+ register: duplicate_out
+
+ - name: Check success installing rpms with duplicate files using replacefiles option
+ assert:
+ that:
+ - zypper_duplicate_result is successful
+ - zypper_duplicate_result is changed
+ - '"duplicate2" in duplicate_out.content | b64decode'
+
+ - name: Remove installed duplicate rpms
+ zypper:
+ name: "duplicate{{ item }}-1-0"
+ state: absent
+ loop: "{{ looplist }}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/zypper/templates/duplicate.spec.j2 b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/zypper/templates/duplicate.spec.j2
new file mode 100644
index 00000000..9d1dd56a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/zypper/templates/duplicate.spec.j2
@@ -0,0 +1,18 @@
+Summary: Duplicate{{ item }} RPM. Installs one file that is a duplicate of other Duplicate# RPMs
+Name: duplicate{{ item }}
+Version: 1
+Release: 0
+License: GPLv3
+Group: Applications/System
+BuildArch: noarch
+
+%description
+Duplicate {{ item }} RPM. Package one file that will be a duplicate of other Duplicate RPM contents.
+This is only for testing of the replacefiles zypper option.
+
+%install
+mkdir -p "%{buildroot}/usr/lib/duplicate"
+echo "%{name}" > "%{buildroot}/usr/lib/duplicate/duplicate.txt"
+
+%files
+/usr/lib/duplicate/duplicate.txt
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/zypper_repository/aliases b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/zypper_repository/aliases
new file mode 100644
index 00000000..817614d6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/zypper_repository/aliases
@@ -0,0 +1,7 @@
+destructive
+shippable/posix/group1
+skip/aix
+skip/freebsd
+skip/osx
+skip/macos
+skip/rhel
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/zypper_repository/meta/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/zypper_repository/meta/main.yml
new file mode 100644
index 00000000..07faa217
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/zypper_repository/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_tests
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/zypper_repository/tasks/main.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/zypper_repository/tasks/main.yml
new file mode 100644
index 00000000..bbba3bcc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/zypper_repository/tasks/main.yml
@@ -0,0 +1,26 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# test code for the zypper repository module
+#
+# (c) 2016, Guido Günther <agx@sigxcpu.org>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- include: 'test.yml'
+ when: ansible_os_family == 'Suse'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/zypper_repository/tasks/test.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/zypper_repository/tasks/test.yml
new file mode 100644
index 00000000..e2b2f847
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/zypper_repository/tasks/test.yml
@@ -0,0 +1,33 @@
+- name: collect repo configuration before test
+ shell: "grep . /etc/zypp/repos.d/*"
+ register: before
+
+- name: ensure zypper ref works
+ command: zypper -n ref
+
+- block:
+ - include: 'zypper_repository.yml'
+ always:
+ - name: remove repositories added during test
+ community.general.zypper_repository:
+ name: "{{item}}"
+ state: absent
+ with_items:
+ - chrome1
+ - chrome2
+ - test
+ - testrefresh
+ - testprio
+ - Apache_PHP_Modules
+
+ - name: collect repo configuration after test
+ shell: "grep . /etc/zypp/repos.d/*"
+ register: after
+
+ - name: verify repo configuration has been restored
+ assert:
+ that:
+ - before.stdout == after.stdout
+
+ - name: ensure zypper ref still works
+ command: zypper -n ref
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/zypper_repository/tasks/zypper_repository.yml b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/zypper_repository/tasks/zypper_repository.yml
new file mode 100644
index 00000000..0290fa4d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/integration/targets/zypper_repository/tasks/zypper_repository.yml
@@ -0,0 +1,127 @@
+- name: Delete test repo
+ community.general.zypper_repository:
+ name: test
+ state: absent
+ register: zypper_result
+
+- name: Add test repo
+ community.general.zypper_repository:
+ name: test
+ state: present
+ repo: http://dl.google.com/linux/chrome/rpm/stable/x86_64
+ register: zypper_result
+
+- name: verify repo addition
+ assert:
+ that:
+ - "zypper_result.changed"
+
+- name: Add same repo again
+ community.general.zypper_repository:
+ name: test
+ state: present
+ repo: http://dl.google.com/linux/chrome/rpm/stable/x86_64
+ register: zypper_result
+
+- name: verify no change on second install
+ assert:
+ that:
+ - "not zypper_result.changed"
+
+- name: Change repo URL
+ community.general.zypper_repository:
+ name: test
+ state: present
+ repo: http://download.videolan.org/pub/vlc/SuSE/Leap_{{ ansible_distribution_version }}/
+ register: zypper_result
+
+- name: Verify change on URL only change
+ assert:
+ that:
+ - "zypper_result.changed"
+
+- name: use refresh option
+ community.general.zypper_repository:
+ name: testrefresh
+ refresh: no
+ state: present
+ repo: http://download.videolan.org/pub/vlc/SuSE/Leap_{{ ansible_distribution_version }}/
+
+- name: check refreshoption
+ command: zypper -x lr testrefresh
+ register: zypper_result
+
+- assert:
+ that:
+ - '"autorefresh=\"0\"" in zypper_result.stdout'
+
+- name: set repo priority
+ community.general.zypper_repository:
+ name: testprio
+ priority: 55
+ state: present
+ repo: http://download.videolan.org/pub/vlc/SuSE/Leap_{{ ansible_distribution_version }}/
+
+- name: check refreshoption
+ command: zypper -x lr testprio
+ register: zypper_result
+
+- assert:
+ that:
+ - '"priority=\"55\"" in zypper_result.stdout'
+
+- name: add two repos with same url
+ community.general.zypper_repository:
+ name: "{{item}}"
+ state: present
+ repo: http://dl.google.com/linux/chrome/rpm/stable/x86_64
+ with_items:
+ - chrome1
+ - chrome2
+
+- name: check repo is updated by url
+ command: zypper lr chrome1
+ register: zypper_result1
+ ignore_errors: yes
+
+- name: check repo is updated by url
+ command: zypper lr chrome2
+ register: zypper_result2
+
+- assert:
+ that:
+ - "zypper_result1.rc != 0"
+ - "'not found' in zypper_result1.stderr"
+ - "zypper_result2.rc == 0"
+ - "'http://dl.google.com/linux/chrome/rpm/stable/x86_64' in zypper_result2.stdout"
+
+- name: add two repos with same name
+ community.general.zypper_repository:
+ name: samename
+ state: present
+ repo: "{{ item }}"
+ with_items:
+ - http://download.opensuse.org/repositories/science/openSUSE_Leap_{{ ansible_distribution_version }}/
+ - http://download.opensuse.org/repositories/devel:/languages:/ruby/openSUSE_Leap_{{ ansible_distribution_version }}/
+
+- name: check repo is updated by name
+ command: zypper lr samename
+ register: zypper_result
+
+- assert:
+ that:
+ - "'/science/' not in zypper_result.stdout"
+ - "'/devel:/languages:/ruby/' in zypper_result.stdout"
+
+- name: remove last added repos (by URL to test that)
+ community.general.zypper_repository:
+ repo: http://download.opensuse.org/repositories/devel:/languages:/ruby/openSUSE_Leap_{{ ansible_distribution_version }}/
+ state: absent
+
+- name: "Test adding a repo with custom GPG key"
+ community.general.zypper_repository:
+ name: "Apache_PHP_Modules"
+ repo: "http://download.opensuse.org/repositories/server:/php:/applications/openSUSE_Tumbleweed/"
+ priority: 100
+ auto_import_keys: true
+ state: "present"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/requirements.yml b/collections-debian-merged/ansible_collections/community/general/tests/requirements.yml
new file mode 100644
index 00000000..71491e70
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/requirements.yml
@@ -0,0 +1,10 @@
+integration_tests_dependencies:
+- ansible.netcommon
+- ansible.posix
+- community.crypto
+- community.kubernetes
+unit_tests_dependencies:
+- ansible.netcommon
+- community.internal_test_tools
+- community.kubernetes
+- google.cloud
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/sanity/extra/no-unwanted-files.json b/collections-debian-merged/ansible_collections/community/general/tests/sanity/extra/no-unwanted-files.json
new file mode 100644
index 00000000..c789a7fd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/sanity/extra/no-unwanted-files.json
@@ -0,0 +1,7 @@
+{
+ "include_symlinks": true,
+ "prefixes": [
+ "plugins/"
+ ],
+ "output": "path-message"
+}
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/sanity/extra/no-unwanted-files.py b/collections-debian-merged/ansible_collections/community/general/tests/sanity/extra/no-unwanted-files.py
new file mode 100755
index 00000000..49806f2e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/sanity/extra/no-unwanted-files.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""Prevent unwanted files from being added to the source tree."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+
+
+def main():
+ """Main entry point."""
+ paths = sys.argv[1:] or sys.stdin.read().splitlines()
+
+ allowed_extensions = (
+ '.cs',
+ '.ps1',
+ '.psm1',
+ '.py',
+ )
+
+ skip_paths = set([
+ ])
+
+ skip_directories = (
+ )
+
+ for path in paths:
+ if path in skip_paths:
+ continue
+
+ if any(path.startswith(skip_directory) for skip_directory in skip_directories):
+ continue
+
+ ext = os.path.splitext(path)[1]
+
+ if ext not in allowed_extensions:
+ print('%s: extension must be one of: %s' % (path, ', '.join(allowed_extensions)))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/sanity/ignore-2.10.txt b/collections-debian-merged/ansible_collections/community/general/tests/sanity/ignore-2.10.txt
new file mode 100644
index 00000000..4c775d54
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/sanity/ignore-2.10.txt
@@ -0,0 +1,562 @@
+plugins/callback/hipchat.py pylint:blacklisted-name
+plugins/connection/lxc.py pylint:blacklisted-name
+plugins/module_utils/compat/ipaddress.py no-assert
+plugins/module_utils/compat/ipaddress.py no-unicode-literals
+plugins/module_utils/_mount.py future-import-boilerplate
+plugins/module_utils/_mount.py metaclass-boilerplate
+plugins/modules/cloud/centurylink/clc_alert_policy.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/centurylink/clc_blueprint_package.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/centurylink/clc_firewall_policy.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/centurylink/clc_loadbalancer.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/centurylink/clc_modify_server.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/centurylink/clc_publicip.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/centurylink/clc_server.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/centurylink/clc_server_snapshot.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/docker/docker_container.py use-argspec-type-path # uses colon-separated paths, can't use type=path
+plugins/modules/cloud/google/gcdns_record.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/google/gce.py pylint:blacklisted-name
+plugins/modules/cloud/google/gce.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/google/gce.py yamllint:unparsable-with-libyaml
+plugins/modules/cloud/google/gce_eip.py pylint:blacklisted-name
+plugins/modules/cloud/google/gce_eip.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/google/gce_img.py pylint:blacklisted-name
+plugins/modules/cloud/google/gce_instance_template.py pylint:blacklisted-name
+plugins/modules/cloud/google/gce_instance_template.py validate-modules:doc-missing-type
+plugins/modules/cloud/google/gce_instance_template.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/google/gce_labels.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/google/gce_lb.py pylint:blacklisted-name
+plugins/modules/cloud/google/gce_lb.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/google/gce_mig.py pylint:blacklisted-name
+plugins/modules/cloud/google/gce_mig.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/google/gce_net.py pylint:blacklisted-name
+plugins/modules/cloud/google/gce_net.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/google/gce_pd.py pylint:blacklisted-name
+plugins/modules/cloud/google/gce_pd.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/google/gce_snapshot.py pylint:blacklisted-name
+plugins/modules/cloud/google/gce_snapshot.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/google/gce_tag.py pylint:blacklisted-name
+plugins/modules/cloud/google/gce_tag.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/google/gcp_backend_service.py pylint:blacklisted-name
+plugins/modules/cloud/google/gcp_backend_service.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/google/gcp_forwarding_rule.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/google/gcp_healthcheck.py pylint:blacklisted-name
+plugins/modules/cloud/google/gcp_healthcheck.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/google/gcp_target_proxy.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/google/gcp_url_map.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/google/gcpubsub.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/google/gcpubsub_info.py validate-modules:parameter-state-invalid-choice
+plugins/modules/cloud/heroku/heroku_collaborator.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/kubevirt/kubevirt_cdi_upload.py validate-modules:doc-missing-type
+plugins/modules/cloud/kubevirt/kubevirt_cdi_upload.py validate-modules:doc-required-mismatch
+plugins/modules/cloud/kubevirt/kubevirt_cdi_upload.py validate-modules:mutually_exclusive-unknown
+plugins/modules/cloud/kubevirt/kubevirt_cdi_upload.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/kubevirt/kubevirt_preset.py validate-modules:mutually_exclusive-unknown
+plugins/modules/cloud/kubevirt/kubevirt_preset.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/kubevirt/kubevirt_preset.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/kubevirt/kubevirt_pvc.py validate-modules:mutually_exclusive-unknown
+plugins/modules/cloud/kubevirt/kubevirt_pvc.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/kubevirt/kubevirt_pvc.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/kubevirt/kubevirt_pvc.py validate-modules:return-syntax-error
+plugins/modules/cloud/kubevirt/kubevirt_rs.py validate-modules:doc-required-mismatch
+plugins/modules/cloud/kubevirt/kubevirt_rs.py validate-modules:mutually_exclusive-unknown
+plugins/modules/cloud/kubevirt/kubevirt_rs.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/kubevirt/kubevirt_rs.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/kubevirt/kubevirt_template.py validate-modules:mutually_exclusive-unknown
+plugins/modules/cloud/kubevirt/kubevirt_template.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/kubevirt/kubevirt_vm.py validate-modules:mutually_exclusive-unknown
+plugins/modules/cloud/kubevirt/kubevirt_vm.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/kubevirt/kubevirt_vm.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/linode/linode.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/linode/linode.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/linode/linode.py validate-modules:undocumented-parameter
+plugins/modules/cloud/linode/linode_v4.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/lxc/lxc_container.py pylint:blacklisted-name
+plugins/modules/cloud/lxc/lxc_container.py use-argspec-type-path
+plugins/modules/cloud/lxc/lxc_container.py validate-modules:invalid-ansiblemodule-schema
+plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not-popen
+plugins/modules/cloud/lxd/lxd_container.py validate-modules:invalid-ansiblemodule-schema
+plugins/modules/cloud/lxd/lxd_container.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/misc/rhevm.py validate-modules:parameter-state-invalid-choice
+plugins/modules/cloud/oneandone/oneandone_firewall_policy.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/oneandone/oneandone_load_balancer.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/oneandone/oneandone_private_network.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/oneandone/oneandone_server.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/online/online_server_facts.py validate-modules:return-syntax-error
+plugins/modules/cloud/online/online_server_info.py validate-modules:return-syntax-error
+plugins/modules/cloud/online/online_user_facts.py validate-modules:return-syntax-error
+plugins/modules/cloud/online/online_user_info.py validate-modules:return-syntax-error
+plugins/modules/cloud/opennebula/one_host.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/opennebula/one_image_info.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/opennebula/one_vm.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/oracle/oci_vcn.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_affinity_label_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_affinity_label_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_api_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_cluster_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_cluster_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_datacenter_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_datacenter_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_disk_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_disk_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_event_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_external_provider_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_external_provider_facts.py validate-modules:undocumented-parameter
+plugins/modules/cloud/ovirt/ovirt_group_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_group_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_host_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_host_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_host_storage_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_host_storage_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_host_storage_facts.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/ovirt/ovirt_network_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_network_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_nic_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_nic_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_permission_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_permission_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_quota_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_quota_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_scheduling_policy_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_scheduling_policy_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_snapshot_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_snapshot_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_storage_domain_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_storage_domain_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_storage_template_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_storage_template_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_storage_template_facts.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/ovirt/ovirt_storage_vm_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_storage_vm_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_storage_vm_facts.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/ovirt/ovirt_tag_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_tag_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_template_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_template_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_user_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_user_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_vm_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_vm_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_vm_facts.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/packet/packet_device.py validate-modules:doc-missing-type
+plugins/modules/cloud/packet/packet_device.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/packet/packet_device.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/packet/packet_sshkey.py validate-modules:doc-missing-type
+plugins/modules/cloud/packet/packet_sshkey.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/packet/packet_sshkey.py validate-modules:undocumented-parameter
+plugins/modules/cloud/packet/packet_volume_attachment.py pylint:ansible-bad-function
+plugins/modules/cloud/profitbricks/profitbricks.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/profitbricks/profitbricks_volume.py validate-modules:doc-missing-type
+plugins/modules/cloud/profitbricks/profitbricks_volume.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/profitbricks/profitbricks_volume.py validate-modules:undocumented-parameter
+plugins/modules/cloud/pubnub/pubnub_blocks.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/rackspace/rax.py use-argspec-type-path # fix needed
+plugins/modules/cloud/rackspace/rax.py validate-modules:doc-missing-type
+plugins/modules/cloud/rackspace/rax.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/rackspace/rax.py validate-modules:undocumented-parameter
+plugins/modules/cloud/rackspace/rax_cdb_user.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/rackspace/rax_files.py validate-modules:parameter-state-invalid-choice
+plugins/modules/cloud/rackspace/rax_files_objects.py use-argspec-type-path
+plugins/modules/cloud/rackspace/rax_mon_notification_plan.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/rackspace/rax_scaling_group.py use-argspec-type-path # fix needed
+plugins/modules/cloud/rackspace/rax_scaling_group.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/scaleway/scaleway_compute.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/scaleway/scaleway_image_facts.py validate-modules:return-syntax-error
+plugins/modules/cloud/scaleway/scaleway_image_info.py validate-modules:return-syntax-error
+plugins/modules/cloud/scaleway/scaleway_ip_facts.py validate-modules:return-syntax-error
+plugins/modules/cloud/scaleway/scaleway_ip_info.py validate-modules:return-syntax-error
+plugins/modules/cloud/scaleway/scaleway_lb.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/scaleway/scaleway_organization_facts.py validate-modules:return-syntax-error
+plugins/modules/cloud/scaleway/scaleway_organization_info.py validate-modules:return-syntax-error
+plugins/modules/cloud/scaleway/scaleway_security_group_facts.py validate-modules:return-syntax-error
+plugins/modules/cloud/scaleway/scaleway_security_group_info.py validate-modules:return-syntax-error
+plugins/modules/cloud/scaleway/scaleway_server_facts.py validate-modules:return-syntax-error
+plugins/modules/cloud/scaleway/scaleway_server_info.py validate-modules:return-syntax-error
+plugins/modules/cloud/scaleway/scaleway_snapshot_facts.py validate-modules:return-syntax-error
+plugins/modules/cloud/scaleway/scaleway_snapshot_info.py validate-modules:return-syntax-error
+plugins/modules/cloud/scaleway/scaleway_volume_facts.py validate-modules:return-syntax-error
+plugins/modules/cloud/scaleway/scaleway_volume_info.py validate-modules:return-syntax-error
+plugins/modules/cloud/smartos/smartos_image_info.py validate-modules:doc-missing-type
+plugins/modules/cloud/smartos/vmadm.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/smartos/vmadm.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/smartos/vmadm.py validate-modules:undocumented-parameter
+plugins/modules/cloud/softlayer/sl_vm.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:undocumented-parameter
+plugins/modules/cloud/univention/udm_dns_record.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/univention/udm_dns_zone.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/univention/udm_dns_zone.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/univention/udm_dns_zone.py validate-modules:undocumented-parameter
+plugins/modules/cloud/univention/udm_share.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/univention/udm_user.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/webfaction/webfaction_app.py validate-modules:doc-missing-type
+plugins/modules/cloud/webfaction/webfaction_db.py validate-modules:doc-missing-type
+plugins/modules/cloud/webfaction/webfaction_domain.py validate-modules:doc-missing-type
+plugins/modules/cloud/webfaction/webfaction_domain.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/webfaction/webfaction_domain.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/webfaction/webfaction_mailbox.py validate-modules:doc-missing-type
+plugins/modules/cloud/webfaction/webfaction_site.py validate-modules:doc-missing-type
+plugins/modules/cloud/webfaction/webfaction_site.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/webfaction/webfaction_site.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:doc-choices-do-not-match-spec
+plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:doc-required-mismatch
+plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:missing-suboption-docs
+plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:undocumented-parameter
+plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type
+plugins/modules/clustering/consul/consul.py validate-modules:parameter-list-no-elements
+plugins/modules/clustering/consul/consul.py validate-modules:undocumented-parameter
+plugins/modules/clustering/consul/consul_acl.py validate-modules:doc-missing-type
+plugins/modules/clustering/consul/consul_acl.py validate-modules:parameter-list-no-elements
+plugins/modules/clustering/consul/consul_kv.py validate-modules:parameter-type-not-in-doc
+plugins/modules/clustering/consul/consul_session.py validate-modules:parameter-list-no-elements
+plugins/modules/clustering/consul/consul_session.py validate-modules:parameter-state-invalid-choice
+plugins/modules/clustering/etcd3.py validate-modules:parameter-type-not-in-doc
+plugins/modules/clustering/znode.py validate-modules:doc-missing-type
+plugins/modules/clustering/znode.py validate-modules:parameter-type-not-in-doc
+plugins/modules/database/aerospike/aerospike_migrations.py yamllint:unparsable-with-libyaml
+plugins/modules/database/misc/elasticsearch_plugin.py validate-modules:doc-missing-type
+plugins/modules/database/misc/elasticsearch_plugin.py validate-modules:invalid-ansiblemodule-schema
+plugins/modules/database/misc/elasticsearch_plugin.py validate-modules:parameter-type-not-in-doc
+plugins/modules/database/misc/kibana_plugin.py validate-modules:doc-missing-type
+plugins/modules/database/misc/kibana_plugin.py validate-modules:invalid-ansiblemodule-schema
+plugins/modules/database/misc/kibana_plugin.py validate-modules:parameter-type-not-in-doc
+plugins/modules/database/misc/riak.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/database/misc/riak.py validate-modules:doc-missing-type
+plugins/modules/database/misc/riak.py validate-modules:parameter-type-not-in-doc
+plugins/modules/database/postgresql/postgresql_db.py use-argspec-type-path
+plugins/modules/database/postgresql/postgresql_db.py validate-modules:use-run-command-not-popen
+plugins/modules/database/postgresql/postgresql_privs.py validate-modules:parameter-documented-multiple-times
+plugins/modules/database/postgresql/postgresql_tablespace.py validate-modules:mutually_exclusive-unknown
+plugins/modules/database/postgresql/postgresql_user.py validate-modules:doc-choices-do-not-match-spec
+plugins/modules/database/vertica/vertica_info.py validate-modules:doc-missing-type
+plugins/modules/database/vertica/vertica_role.py validate-modules:doc-missing-type
+plugins/modules/database/vertica/vertica_role.py validate-modules:undocumented-parameter
+plugins/modules/database/vertica/vertica_schema.py validate-modules:doc-missing-type
+plugins/modules/database/vertica/vertica_schema.py validate-modules:undocumented-parameter
+plugins/modules/database/vertica/vertica_user.py validate-modules:doc-missing-type
+plugins/modules/database/vertica/vertica_user.py validate-modules:undocumented-parameter
+plugins/modules/files/iso_extract.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/files/xml.py validate-modules:parameter-list-no-elements
+plugins/modules/identity/keycloak/keycloak_client.py validate-modules:parameter-list-no-elements
+plugins/modules/identity/onepassword_info.py validate-modules:parameter-list-no-elements
+plugins/modules/identity/opendj/opendj_backendprop.py validate-modules:doc-missing-type
+plugins/modules/identity/opendj/opendj_backendprop.py validate-modules:parameter-type-not-in-doc
+plugins/modules/monitoring/bigpanda.py validate-modules:invalid-argument-name
+plugins/modules/monitoring/datadog/datadog_event.py validate-modules:parameter-list-no-elements
+plugins/modules/monitoring/datadog/datadog_monitor.py validate-modules:invalid-argument-name
+plugins/modules/monitoring/datadog/datadog_monitor.py validate-modules:parameter-list-no-elements
+plugins/modules/monitoring/icinga2_host.py validate-modules:undocumented-parameter
+plugins/modules/monitoring/librato_annotation.py validate-modules:parameter-list-no-elements
+plugins/modules/monitoring/logstash_plugin.py validate-modules:invalid-ansiblemodule-schema
+plugins/modules/monitoring/pagerduty.py validate-modules:parameter-list-no-elements
+plugins/modules/monitoring/sensu/sensu_check.py validate-modules:parameter-list-no-elements
+plugins/modules/monitoring/sensu/sensu_client.py validate-modules:parameter-list-no-elements
+plugins/modules/monitoring/sensu/sensu_handler.py validate-modules:parameter-list-no-elements
+plugins/modules/monitoring/statusio_maintenance.py pylint:blacklisted-name
+plugins/modules/monitoring/statusio_maintenance.py validate-modules:parameter-list-no-elements
+plugins/modules/net_tools/dnsimple.py validate-modules:parameter-list-no-elements
+plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:parameter-type-not-in-doc # This triggers when a parameter is undocumented
+plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:undocumented-parameter # Parameter removed but reason for removal is shown by custom code
+plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:doc-missing-type
+plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:undocumented-parameter # Parameter removed but reason for removal is shown by custom code
+plugins/modules/net_tools/nios/nios_a_record.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_a_record.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_a_record.py validate-modules:invalid-ansiblemodule-schema
+plugins/modules/net_tools/nios/nios_a_record.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_a_record.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nios/nios_aaaa_record.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_aaaa_record.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_aaaa_record.py validate-modules:invalid-ansiblemodule-schema
+plugins/modules/net_tools/nios/nios_aaaa_record.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_aaaa_record.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nios/nios_cname_record.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_cname_record.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_cname_record.py validate-modules:invalid-ansiblemodule-schema
+plugins/modules/net_tools/nios/nios_cname_record.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_cname_record.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nios/nios_dns_view.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_dns_view.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_dns_view.py validate-modules:invalid-ansiblemodule-schema
+plugins/modules/net_tools/nios/nios_dns_view.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_dns_view.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nios/nios_fixed_address.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_fixed_address.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_fixed_address.py validate-modules:invalid-ansiblemodule-schema
+plugins/modules/net_tools/nios/nios_fixed_address.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_fixed_address.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nios/nios_host_record.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_host_record.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_host_record.py validate-modules:invalid-ansiblemodule-schema
+plugins/modules/net_tools/nios/nios_host_record.py validate-modules:parameter-list-no-elements
+plugins/modules/net_tools/nios/nios_host_record.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_host_record.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nios/nios_member.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_member.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_member.py validate-modules:invalid-ansiblemodule-schema
+plugins/modules/net_tools/nios/nios_member.py validate-modules:parameter-list-no-elements
+plugins/modules/net_tools/nios/nios_member.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_member.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nios/nios_mx_record.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_mx_record.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_mx_record.py validate-modules:invalid-ansiblemodule-schema
+plugins/modules/net_tools/nios/nios_mx_record.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_mx_record.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nios/nios_naptr_record.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_naptr_record.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_naptr_record.py validate-modules:invalid-ansiblemodule-schema
+plugins/modules/net_tools/nios/nios_naptr_record.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_naptr_record.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nios/nios_network.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_network.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_network.py validate-modules:doc-required-mismatch
+plugins/modules/net_tools/nios/nios_network.py validate-modules:invalid-ansiblemodule-schema
+plugins/modules/net_tools/nios/nios_network.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_network.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nios/nios_network_view.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_network_view.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_network_view.py validate-modules:invalid-ansiblemodule-schema
+plugins/modules/net_tools/nios/nios_network_view.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_network_view.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nios/nios_nsgroup.py validate-modules:doc-choices-do-not-match-spec
+plugins/modules/net_tools/nios/nios_nsgroup.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_nsgroup.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_nsgroup.py validate-modules:doc-required-mismatch
+plugins/modules/net_tools/nios/nios_nsgroup.py validate-modules:invalid-ansiblemodule-schema
+plugins/modules/net_tools/nios/nios_nsgroup.py validate-modules:missing-suboption-docs
+plugins/modules/net_tools/nios/nios_nsgroup.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_nsgroup.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nios/nios_ptr_record.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_ptr_record.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_ptr_record.py validate-modules:invalid-ansiblemodule-schema
+plugins/modules/net_tools/nios/nios_ptr_record.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_ptr_record.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nios/nios_srv_record.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_srv_record.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_srv_record.py validate-modules:invalid-ansiblemodule-schema
+plugins/modules/net_tools/nios/nios_srv_record.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_srv_record.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nios/nios_txt_record.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_txt_record.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_txt_record.py validate-modules:invalid-ansiblemodule-schema
+plugins/modules/net_tools/nios/nios_txt_record.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_txt_record.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nios/nios_zone.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_zone.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_zone.py validate-modules:invalid-ansiblemodule-schema
+plugins/modules/net_tools/nios/nios_zone.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_zone.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nsupdate.py validate-modules:parameter-list-no-elements
+plugins/modules/net_tools/nsupdate.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/omapi_host.py validate-modules:parameter-list-no-elements
+plugins/modules/notification/cisco_webex.py validate-modules:invalid-argument-name
+plugins/modules/notification/grove.py validate-modules:invalid-argument-name
+plugins/modules/notification/grove.py validate-modules:nonexistent-parameter-documented
+plugins/modules/notification/mail.py validate-modules:parameter-list-no-elements
+plugins/modules/notification/nexmo.py validate-modules:parameter-list-no-elements
+plugins/modules/notification/office_365_connector_card.py validate-modules:parameter-list-no-elements
+plugins/modules/notification/pushbullet.py validate-modules:parameter-type-not-in-doc
+plugins/modules/notification/pushbullet.py validate-modules:undocumented-parameter
+plugins/modules/notification/rocketchat.py validate-modules:parameter-list-no-elements
+plugins/modules/notification/sendgrid.py validate-modules:parameter-list-no-elements
+plugins/modules/notification/slack.py validate-modules:parameter-list-no-elements
+plugins/modules/notification/twilio.py validate-modules:parameter-list-no-elements
+plugins/modules/packaging/language/bundler.py validate-modules:parameter-list-no-elements
+plugins/modules/packaging/language/composer.py validate-modules:parameter-invalid
+plugins/modules/packaging/language/maven_artifact.py validate-modules:parameter-type-not-in-doc
+plugins/modules/packaging/language/maven_artifact.py validate-modules:undocumented-parameter
+plugins/modules/packaging/language/pip_package_info.py validate-modules:parameter-list-no-elements
+plugins/modules/packaging/os/apt_rpm.py validate-modules:parameter-invalid
+plugins/modules/packaging/os/homebrew.py validate-modules:parameter-invalid
+plugins/modules/packaging/os/homebrew_cask.py validate-modules:parameter-invalid
+plugins/modules/packaging/os/opkg.py validate-modules:parameter-invalid
+plugins/modules/packaging/os/pacman.py validate-modules:parameter-invalid
+plugins/modules/packaging/os/redhat_subscription.py validate-modules:mutually_exclusive-unknown
+plugins/modules/packaging/os/redhat_subscription.py validate-modules:parameter-list-no-elements
+plugins/modules/packaging/os/redhat_subscription.py validate-modules:return-syntax-error
+plugins/modules/packaging/os/slackpkg.py validate-modules:parameter-invalid
+plugins/modules/packaging/os/urpmi.py validate-modules:parameter-invalid
+plugins/modules/packaging/os/xbps.py validate-modules:parameter-invalid
+plugins/modules/remote_management/dellemc/idrac_server_config_profile.py validate-modules:doc-missing-type
+plugins/modules/remote_management/dellemc/idrac_server_config_profile.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/dellemc/ome_device_info.py validate-modules:parameter-list-no-elements
+plugins/modules/remote_management/foreman/foreman.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/foreman/katello.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/foreman/katello.py yamllint:unparsable-with-libyaml
+plugins/modules/remote_management/hpilo/hpilo_boot.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/hpilo/hpilo_info.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/hpilo/hponcfg.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/imc/imc_rest.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/ipmi/ipmi_boot.py validate-modules:doc-missing-type
+plugins/modules/remote_management/ipmi/ipmi_boot.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/ipmi/ipmi_power.py validate-modules:doc-missing-type
+plugins/modules/remote_management/ipmi/ipmi_power.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/lxca/lxca_cmms.py validate-modules:doc-missing-type
+plugins/modules/remote_management/lxca/lxca_nodes.py validate-modules:doc-missing-type
+plugins/modules/remote_management/manageiq/manageiq_alert_profiles.py validate-modules:parameter-list-no-elements
+plugins/modules/remote_management/manageiq/manageiq_policies.py validate-modules:parameter-list-no-elements
+plugins/modules/remote_management/manageiq/manageiq_policies.py validate-modules:parameter-state-invalid-choice
+plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:doc-choices-do-not-match-spec
+plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:doc-missing-type
+plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:invalid-ansiblemodule-schema
+plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:undocumented-parameter
+plugins/modules/remote_management/manageiq/manageiq_tags.py validate-modules:parameter-list-no-elements
+plugins/modules/remote_management/manageiq/manageiq_tags.py validate-modules:parameter-state-invalid-choice
+plugins/modules/remote_management/oneview/oneview_datacenter_info.py validate-modules:parameter-list-no-elements
+plugins/modules/remote_management/oneview/oneview_datacenter_info.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/oneview/oneview_datacenter_info.py validate-modules:undocumented-parameter
+plugins/modules/remote_management/oneview/oneview_enclosure_info.py validate-modules:parameter-list-no-elements
+plugins/modules/remote_management/oneview/oneview_enclosure_info.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/oneview/oneview_enclosure_info.py validate-modules:undocumented-parameter
+plugins/modules/remote_management/oneview/oneview_ethernet_network.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/oneview/oneview_ethernet_network.py validate-modules:undocumented-parameter
+plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py validate-modules:parameter-list-no-elements
+plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py validate-modules:undocumented-parameter
+plugins/modules/remote_management/oneview/oneview_fc_network.py validate-modules:doc-missing-type
+plugins/modules/remote_management/oneview/oneview_fc_network.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/oneview/oneview_fc_network.py validate-modules:undocumented-parameter
+plugins/modules/remote_management/oneview/oneview_fc_network_info.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/oneview/oneview_fc_network_info.py validate-modules:undocumented-parameter
+plugins/modules/remote_management/oneview/oneview_fcoe_network.py validate-modules:doc-missing-type
+plugins/modules/remote_management/oneview/oneview_fcoe_network.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/oneview/oneview_fcoe_network.py validate-modules:undocumented-parameter
+plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py validate-modules:undocumented-parameter
+plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py validate-modules:doc-missing-type
+plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py validate-modules:undocumented-parameter
+plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py validate-modules:undocumented-parameter
+plugins/modules/remote_management/oneview/oneview_network_set.py validate-modules:doc-missing-type
+plugins/modules/remote_management/oneview/oneview_network_set.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/oneview/oneview_network_set.py validate-modules:undocumented-parameter
+plugins/modules/remote_management/oneview/oneview_network_set_info.py validate-modules:parameter-list-no-elements
+plugins/modules/remote_management/oneview/oneview_network_set_info.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/oneview/oneview_network_set_info.py validate-modules:undocumented-parameter
+plugins/modules/remote_management/oneview/oneview_san_manager.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/oneview/oneview_san_manager.py validate-modules:undocumented-parameter
+plugins/modules/remote_management/oneview/oneview_san_manager_info.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/oneview/oneview_san_manager_info.py validate-modules:undocumented-parameter
+plugins/modules/remote_management/redfish/idrac_redfish_command.py validate-modules:parameter-list-no-elements
+plugins/modules/remote_management/redfish/idrac_redfish_config.py validate-modules:parameter-list-no-elements
+plugins/modules/remote_management/redfish/idrac_redfish_info.py validate-modules:parameter-list-no-elements
+plugins/modules/remote_management/redfish/redfish_command.py validate-modules:parameter-list-no-elements
+plugins/modules/remote_management/redfish/redfish_config.py validate-modules:parameter-list-no-elements
+plugins/modules/remote_management/redfish/redfish_info.py validate-modules:parameter-list-no-elements
+plugins/modules/remote_management/stacki/stacki_host.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/remote_management/stacki/stacki_host.py validate-modules:no-default-for-required-parameter
+plugins/modules/remote_management/stacki/stacki_host.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/stacki/stacki_host.py validate-modules:undocumented-parameter
+plugins/modules/source_control/bzr.py validate-modules:parameter-type-not-in-doc
+plugins/modules/source_control/git_config.py validate-modules:doc-missing-type
+plugins/modules/source_control/git_config.py validate-modules:parameter-type-not-in-doc
+plugins/modules/source_control/github/github_deploy_key.py validate-modules:doc-missing-type
+plugins/modules/source_control/github/github_deploy_key.py validate-modules:parameter-invalid
+plugins/modules/source_control/github/github_deploy_key.py validate-modules:parameter-type-not-in-doc
+plugins/modules/source_control/github/github_hooks.py validate-modules:doc-missing-type
+plugins/modules/source_control/github/github_issue.py validate-modules:doc-missing-type
+plugins/modules/source_control/github/github_issue.py validate-modules:parameter-type-not-in-doc
+plugins/modules/source_control/github/github_key.py validate-modules:doc-missing-type
+plugins/modules/source_control/github/github_release.py validate-modules:doc-missing-type
+plugins/modules/source_control/github/github_release.py validate-modules:parameter-type-not-in-doc
+plugins/modules/source_control/github/github_webhook.py validate-modules:parameter-type-not-in-doc
+plugins/modules/source_control/github/github_webhook_info.py validate-modules:parameter-type-not-in-doc
+plugins/modules/source_control/gitlab/gitlab_runner.py validate-modules:parameter-list-no-elements
+plugins/modules/source_control/hg.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/emc/emc_vnx_sg_member.py validate-modules:doc-missing-type
+plugins/modules/storage/emc/emc_vnx_sg_member.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/glusterfs/gluster_heal_info.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/glusterfs/gluster_peer.py validate-modules:parameter-list-no-elements
+plugins/modules/storage/glusterfs/gluster_volume.py validate-modules:parameter-list-no-elements
+plugins/modules/storage/glusterfs/gluster_volume.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/ibm/ibm_sa_domain.py validate-modules:doc-missing-type
+plugins/modules/storage/ibm/ibm_sa_host.py validate-modules:doc-missing-type
+plugins/modules/storage/ibm/ibm_sa_host_ports.py validate-modules:doc-missing-type
+plugins/modules/storage/ibm/ibm_sa_pool.py validate-modules:doc-missing-type
+plugins/modules/storage/ibm/ibm_sa_vol.py validate-modules:doc-missing-type
+plugins/modules/storage/ibm/ibm_sa_vol_map.py validate-modules:doc-missing-type
+plugins/modules/storage/netapp/na_cdot_aggregate.py validate-modules:doc-missing-type
+plugins/modules/storage/netapp/na_cdot_aggregate.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/netapp/na_cdot_license.py validate-modules:incompatible-default-type
+plugins/modules/storage/netapp/na_cdot_license.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/netapp/na_cdot_lun.py validate-modules:doc-missing-type
+plugins/modules/storage/netapp/na_cdot_lun.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/netapp/na_cdot_qtree.py validate-modules:doc-missing-type
+plugins/modules/storage/netapp/na_cdot_qtree.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/netapp/na_cdot_svm.py validate-modules:doc-missing-type
+plugins/modules/storage/netapp/na_cdot_svm.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/netapp/na_cdot_user.py validate-modules:doc-missing-type
+plugins/modules/storage/netapp/na_cdot_user.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/netapp/na_cdot_user_role.py validate-modules:doc-missing-type
+plugins/modules/storage/netapp/na_cdot_user_role.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/netapp/na_cdot_volume.py validate-modules:doc-missing-type
+plugins/modules/storage/netapp/na_cdot_volume.py validate-modules:no-default-for-required-parameter
+plugins/modules/storage/netapp/na_cdot_volume.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/netapp/na_cdot_volume.py validate-modules:undocumented-parameter
+plugins/modules/storage/netapp/na_ontap_gather_facts.py validate-modules:doc-missing-type
+plugins/modules/storage/netapp/na_ontap_gather_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/storage/netapp/na_ontap_gather_facts.py validate-modules:parameter-state-invalid-choice
+plugins/modules/storage/netapp/na_ontap_gather_facts.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/netapp/sf_account_manager.py validate-modules:doc-missing-type
+plugins/modules/storage/netapp/sf_account_manager.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/netapp/sf_check_connections.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/netapp/sf_snapshot_schedule_manager.py validate-modules:doc-missing-type
+plugins/modules/storage/netapp/sf_snapshot_schedule_manager.py validate-modules:parameter-list-no-elements
+plugins/modules/storage/netapp/sf_snapshot_schedule_manager.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/netapp/sf_volume_access_group_manager.py validate-modules:doc-missing-type
+plugins/modules/storage/netapp/sf_volume_access_group_manager.py validate-modules:parameter-list-no-elements
+plugins/modules/storage/netapp/sf_volume_access_group_manager.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/netapp/sf_volume_manager.py validate-modules:doc-missing-type
+plugins/modules/storage/netapp/sf_volume_manager.py validate-modules:parameter-invalid
+plugins/modules/storage/netapp/sf_volume_manager.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/netapp/sf_volume_manager.py validate-modules:undocumented-parameter
+plugins/modules/storage/purestorage/purefa_facts.py validate-modules:doc-required-mismatch
+plugins/modules/storage/purestorage/purefa_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/storage/purestorage/purefa_facts.py validate-modules:return-syntax-error
+plugins/modules/storage/purestorage/purefb_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/storage/purestorage/purefb_facts.py validate-modules:return-syntax-error
+plugins/modules/storage/zfs/zfs.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/zfs/zfs_delegate_admin.py validate-modules:parameter-list-no-elements
+plugins/modules/storage/zfs/zfs_delegate_admin.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/zfs/zfs_facts.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/zfs/zpool_facts.py validate-modules:parameter-type-not-in-doc
+plugins/modules/system/alternatives.py pylint:blacklisted-name
+plugins/modules/system/beadm.py pylint:blacklisted-name
+plugins/modules/system/cronvar.py pylint:blacklisted-name
+plugins/modules/system/dconf.py pylint:blacklisted-name
+plugins/modules/system/filesystem.py pylint:blacklisted-name
+plugins/modules/system/gconftool2.py pylint:blacklisted-name
+plugins/modules/system/gconftool2.py validate-modules:parameter-state-invalid-choice
+plugins/modules/system/interfaces_file.py pylint:blacklisted-name
+plugins/modules/system/iptables_state.py validate-modules:undocumented-parameter
+plugins/modules/system/java_cert.py pylint:blacklisted-name
+plugins/modules/system/launchd.py use-argspec-type-path # False positive
+plugins/modules/system/lvg.py pylint:blacklisted-name
+plugins/modules/system/lvol.py pylint:blacklisted-name
+plugins/modules/system/osx_defaults.py validate-modules:parameter-state-invalid-choice
+plugins/modules/system/parted.py pylint:blacklisted-name
+plugins/modules/system/parted.py validate-modules:parameter-state-invalid-choice
+plugins/modules/system/puppet.py use-argspec-type-path
+plugins/modules/system/puppet.py validate-modules:doc-default-does-not-match-spec # show_diff is not documented
+plugins/modules/system/puppet.py validate-modules:parameter-invalid
+plugins/modules/system/puppet.py validate-modules:parameter-type-not-in-doc
+plugins/modules/system/puppet.py validate-modules:undocumented-parameter
+plugins/modules/system/runit.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/system/runit.py validate-modules:parameter-type-not-in-doc
+plugins/modules/system/runit.py validate-modules:undocumented-parameter
+plugins/modules/system/timezone.py pylint:blacklisted-name
+plugins/modules/system/xfconf.py validate-modules:parameter-state-invalid-choice
+plugins/modules/system/xfconf.py validate-modules:return-syntax-error
+plugins/modules/web_infrastructure/jenkins_plugin.py use-argspec-type-path
+plugins/modules/web_infrastructure/rundeck_acl_policy.py pylint:blacklisted-name
+scripts/inventory/gce.py pylint:blacklisted-name
+tests/unit/plugins/modules/cloud/google/test_gcp_forwarding_rule.py future-import-boilerplate
+tests/unit/plugins/modules/cloud/google/test_gcp_forwarding_rule.py metaclass-boilerplate
+tests/utils/shippable/check_matrix.py replace-urlopen
+tests/utils/shippable/timing.py shebang
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/sanity/ignore-2.11.txt b/collections-debian-merged/ansible_collections/community/general/tests/sanity/ignore-2.11.txt
new file mode 100644
index 00000000..4c775d54
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/sanity/ignore-2.11.txt
@@ -0,0 +1,562 @@
+plugins/callback/hipchat.py pylint:blacklisted-name
+plugins/connection/lxc.py pylint:blacklisted-name
+plugins/module_utils/compat/ipaddress.py no-assert
+plugins/module_utils/compat/ipaddress.py no-unicode-literals
+plugins/module_utils/_mount.py future-import-boilerplate
+plugins/module_utils/_mount.py metaclass-boilerplate
+plugins/modules/cloud/centurylink/clc_alert_policy.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/centurylink/clc_blueprint_package.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/centurylink/clc_firewall_policy.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/centurylink/clc_loadbalancer.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/centurylink/clc_modify_server.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/centurylink/clc_publicip.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/centurylink/clc_server.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/centurylink/clc_server_snapshot.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/docker/docker_container.py use-argspec-type-path # uses colon-separated paths, can't use type=path
+plugins/modules/cloud/google/gcdns_record.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/google/gce.py pylint:blacklisted-name
+plugins/modules/cloud/google/gce.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/google/gce.py yamllint:unparsable-with-libyaml
+plugins/modules/cloud/google/gce_eip.py pylint:blacklisted-name
+plugins/modules/cloud/google/gce_eip.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/google/gce_img.py pylint:blacklisted-name
+plugins/modules/cloud/google/gce_instance_template.py pylint:blacklisted-name
+plugins/modules/cloud/google/gce_instance_template.py validate-modules:doc-missing-type
+plugins/modules/cloud/google/gce_instance_template.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/google/gce_labels.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/google/gce_lb.py pylint:blacklisted-name
+plugins/modules/cloud/google/gce_lb.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/google/gce_mig.py pylint:blacklisted-name
+plugins/modules/cloud/google/gce_mig.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/google/gce_net.py pylint:blacklisted-name
+plugins/modules/cloud/google/gce_net.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/google/gce_pd.py pylint:blacklisted-name
+plugins/modules/cloud/google/gce_pd.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/google/gce_snapshot.py pylint:blacklisted-name
+plugins/modules/cloud/google/gce_snapshot.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/google/gce_tag.py pylint:blacklisted-name
+plugins/modules/cloud/google/gce_tag.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/google/gcp_backend_service.py pylint:blacklisted-name
+plugins/modules/cloud/google/gcp_backend_service.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/google/gcp_forwarding_rule.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/google/gcp_healthcheck.py pylint:blacklisted-name
+plugins/modules/cloud/google/gcp_healthcheck.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/google/gcp_target_proxy.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/google/gcp_url_map.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/google/gcpubsub.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/google/gcpubsub_info.py validate-modules:parameter-state-invalid-choice
+plugins/modules/cloud/heroku/heroku_collaborator.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/kubevirt/kubevirt_cdi_upload.py validate-modules:doc-missing-type
+plugins/modules/cloud/kubevirt/kubevirt_cdi_upload.py validate-modules:doc-required-mismatch
+plugins/modules/cloud/kubevirt/kubevirt_cdi_upload.py validate-modules:mutually_exclusive-unknown
+plugins/modules/cloud/kubevirt/kubevirt_cdi_upload.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/kubevirt/kubevirt_preset.py validate-modules:mutually_exclusive-unknown
+plugins/modules/cloud/kubevirt/kubevirt_preset.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/kubevirt/kubevirt_preset.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/kubevirt/kubevirt_pvc.py validate-modules:mutually_exclusive-unknown
+plugins/modules/cloud/kubevirt/kubevirt_pvc.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/kubevirt/kubevirt_pvc.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/kubevirt/kubevirt_pvc.py validate-modules:return-syntax-error
+plugins/modules/cloud/kubevirt/kubevirt_rs.py validate-modules:doc-required-mismatch
+plugins/modules/cloud/kubevirt/kubevirt_rs.py validate-modules:mutually_exclusive-unknown
+plugins/modules/cloud/kubevirt/kubevirt_rs.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/kubevirt/kubevirt_rs.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/kubevirt/kubevirt_template.py validate-modules:mutually_exclusive-unknown
+plugins/modules/cloud/kubevirt/kubevirt_template.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/kubevirt/kubevirt_vm.py validate-modules:mutually_exclusive-unknown
+plugins/modules/cloud/kubevirt/kubevirt_vm.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/kubevirt/kubevirt_vm.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/linode/linode.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/linode/linode.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/linode/linode.py validate-modules:undocumented-parameter
+plugins/modules/cloud/linode/linode_v4.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/lxc/lxc_container.py pylint:blacklisted-name
+plugins/modules/cloud/lxc/lxc_container.py use-argspec-type-path
+plugins/modules/cloud/lxc/lxc_container.py validate-modules:invalid-ansiblemodule-schema
+plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not-popen
+plugins/modules/cloud/lxd/lxd_container.py validate-modules:invalid-ansiblemodule-schema
+plugins/modules/cloud/lxd/lxd_container.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/misc/rhevm.py validate-modules:parameter-state-invalid-choice
+plugins/modules/cloud/oneandone/oneandone_firewall_policy.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/oneandone/oneandone_load_balancer.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/oneandone/oneandone_private_network.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/oneandone/oneandone_server.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/online/online_server_facts.py validate-modules:return-syntax-error
+plugins/modules/cloud/online/online_server_info.py validate-modules:return-syntax-error
+plugins/modules/cloud/online/online_user_facts.py validate-modules:return-syntax-error
+plugins/modules/cloud/online/online_user_info.py validate-modules:return-syntax-error
+plugins/modules/cloud/opennebula/one_host.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/opennebula/one_image_info.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/opennebula/one_vm.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/oracle/oci_vcn.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_affinity_label_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_affinity_label_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_api_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_cluster_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_cluster_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_datacenter_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_datacenter_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_disk_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_disk_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_event_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_external_provider_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_external_provider_facts.py validate-modules:undocumented-parameter
+plugins/modules/cloud/ovirt/ovirt_group_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_group_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_host_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_host_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_host_storage_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_host_storage_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_host_storage_facts.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/ovirt/ovirt_network_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_network_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_nic_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_nic_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_permission_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_permission_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_quota_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_quota_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_scheduling_policy_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_scheduling_policy_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_snapshot_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_snapshot_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_storage_domain_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_storage_domain_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_storage_template_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_storage_template_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_storage_template_facts.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/ovirt/ovirt_storage_vm_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_storage_vm_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_storage_vm_facts.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/ovirt/ovirt_tag_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_tag_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_template_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_template_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_user_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_user_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_vm_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_vm_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/ovirt/ovirt_vm_facts.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/packet/packet_device.py validate-modules:doc-missing-type
+plugins/modules/cloud/packet/packet_device.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/packet/packet_device.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/packet/packet_sshkey.py validate-modules:doc-missing-type
+plugins/modules/cloud/packet/packet_sshkey.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/packet/packet_sshkey.py validate-modules:undocumented-parameter
+plugins/modules/cloud/packet/packet_volume_attachment.py pylint:ansible-bad-function
+plugins/modules/cloud/profitbricks/profitbricks.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/profitbricks/profitbricks_volume.py validate-modules:doc-missing-type
+plugins/modules/cloud/profitbricks/profitbricks_volume.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/profitbricks/profitbricks_volume.py validate-modules:undocumented-parameter
+plugins/modules/cloud/pubnub/pubnub_blocks.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/rackspace/rax.py use-argspec-type-path # fix needed
+plugins/modules/cloud/rackspace/rax.py validate-modules:doc-missing-type
+plugins/modules/cloud/rackspace/rax.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/rackspace/rax.py validate-modules:undocumented-parameter
+plugins/modules/cloud/rackspace/rax_cdb_user.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/rackspace/rax_files.py validate-modules:parameter-state-invalid-choice
+plugins/modules/cloud/rackspace/rax_files_objects.py use-argspec-type-path
+plugins/modules/cloud/rackspace/rax_mon_notification_plan.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/rackspace/rax_scaling_group.py use-argspec-type-path # fix needed
+plugins/modules/cloud/rackspace/rax_scaling_group.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/scaleway/scaleway_compute.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/scaleway/scaleway_image_facts.py validate-modules:return-syntax-error
+plugins/modules/cloud/scaleway/scaleway_image_info.py validate-modules:return-syntax-error
+plugins/modules/cloud/scaleway/scaleway_ip_facts.py validate-modules:return-syntax-error
+plugins/modules/cloud/scaleway/scaleway_ip_info.py validate-modules:return-syntax-error
+plugins/modules/cloud/scaleway/scaleway_lb.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/scaleway/scaleway_organization_facts.py validate-modules:return-syntax-error
+plugins/modules/cloud/scaleway/scaleway_organization_info.py validate-modules:return-syntax-error
+plugins/modules/cloud/scaleway/scaleway_security_group_facts.py validate-modules:return-syntax-error
+plugins/modules/cloud/scaleway/scaleway_security_group_info.py validate-modules:return-syntax-error
+plugins/modules/cloud/scaleway/scaleway_server_facts.py validate-modules:return-syntax-error
+plugins/modules/cloud/scaleway/scaleway_server_info.py validate-modules:return-syntax-error
+plugins/modules/cloud/scaleway/scaleway_snapshot_facts.py validate-modules:return-syntax-error
+plugins/modules/cloud/scaleway/scaleway_snapshot_info.py validate-modules:return-syntax-error
+plugins/modules/cloud/scaleway/scaleway_volume_facts.py validate-modules:return-syntax-error
+plugins/modules/cloud/scaleway/scaleway_volume_info.py validate-modules:return-syntax-error
+plugins/modules/cloud/smartos/smartos_image_info.py validate-modules:doc-missing-type
+plugins/modules/cloud/smartos/vmadm.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/smartos/vmadm.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/smartos/vmadm.py validate-modules:undocumented-parameter
+plugins/modules/cloud/softlayer/sl_vm.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:undocumented-parameter
+plugins/modules/cloud/univention/udm_dns_record.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/univention/udm_dns_zone.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/univention/udm_dns_zone.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/univention/udm_dns_zone.py validate-modules:undocumented-parameter
+plugins/modules/cloud/univention/udm_share.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/univention/udm_user.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/webfaction/webfaction_app.py validate-modules:doc-missing-type
+plugins/modules/cloud/webfaction/webfaction_db.py validate-modules:doc-missing-type
+plugins/modules/cloud/webfaction/webfaction_domain.py validate-modules:doc-missing-type
+plugins/modules/cloud/webfaction/webfaction_domain.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/webfaction/webfaction_domain.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/webfaction/webfaction_mailbox.py validate-modules:doc-missing-type
+plugins/modules/cloud/webfaction/webfaction_site.py validate-modules:doc-missing-type
+plugins/modules/cloud/webfaction/webfaction_site.py validate-modules:parameter-list-no-elements
+plugins/modules/cloud/webfaction/webfaction_site.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:doc-choices-do-not-match-spec
+plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:doc-required-mismatch
+plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:missing-suboption-docs
+plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:undocumented-parameter
+plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type
+plugins/modules/clustering/consul/consul.py validate-modules:parameter-list-no-elements
+plugins/modules/clustering/consul/consul.py validate-modules:undocumented-parameter
+plugins/modules/clustering/consul/consul_acl.py validate-modules:doc-missing-type
+plugins/modules/clustering/consul/consul_acl.py validate-modules:parameter-list-no-elements
+plugins/modules/clustering/consul/consul_kv.py validate-modules:parameter-type-not-in-doc
+plugins/modules/clustering/consul/consul_session.py validate-modules:parameter-list-no-elements
+plugins/modules/clustering/consul/consul_session.py validate-modules:parameter-state-invalid-choice
+plugins/modules/clustering/etcd3.py validate-modules:parameter-type-not-in-doc
+plugins/modules/clustering/znode.py validate-modules:doc-missing-type
+plugins/modules/clustering/znode.py validate-modules:parameter-type-not-in-doc
+plugins/modules/database/aerospike/aerospike_migrations.py yamllint:unparsable-with-libyaml
+plugins/modules/database/misc/elasticsearch_plugin.py validate-modules:doc-missing-type
+plugins/modules/database/misc/elasticsearch_plugin.py validate-modules:invalid-ansiblemodule-schema
+plugins/modules/database/misc/elasticsearch_plugin.py validate-modules:parameter-type-not-in-doc
+plugins/modules/database/misc/kibana_plugin.py validate-modules:doc-missing-type
+plugins/modules/database/misc/kibana_plugin.py validate-modules:invalid-ansiblemodule-schema
+plugins/modules/database/misc/kibana_plugin.py validate-modules:parameter-type-not-in-doc
+plugins/modules/database/misc/riak.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/database/misc/riak.py validate-modules:doc-missing-type
+plugins/modules/database/misc/riak.py validate-modules:parameter-type-not-in-doc
+plugins/modules/database/postgresql/postgresql_db.py use-argspec-type-path
+plugins/modules/database/postgresql/postgresql_db.py validate-modules:use-run-command-not-popen
+plugins/modules/database/postgresql/postgresql_privs.py validate-modules:parameter-documented-multiple-times
+plugins/modules/database/postgresql/postgresql_tablespace.py validate-modules:mutually_exclusive-unknown
+plugins/modules/database/postgresql/postgresql_user.py validate-modules:doc-choices-do-not-match-spec
+plugins/modules/database/vertica/vertica_info.py validate-modules:doc-missing-type
+plugins/modules/database/vertica/vertica_role.py validate-modules:doc-missing-type
+plugins/modules/database/vertica/vertica_role.py validate-modules:undocumented-parameter
+plugins/modules/database/vertica/vertica_schema.py validate-modules:doc-missing-type
+plugins/modules/database/vertica/vertica_schema.py validate-modules:undocumented-parameter
+plugins/modules/database/vertica/vertica_user.py validate-modules:doc-missing-type
+plugins/modules/database/vertica/vertica_user.py validate-modules:undocumented-parameter
+plugins/modules/files/iso_extract.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/files/xml.py validate-modules:parameter-list-no-elements
+plugins/modules/identity/keycloak/keycloak_client.py validate-modules:parameter-list-no-elements
+plugins/modules/identity/onepassword_info.py validate-modules:parameter-list-no-elements
+plugins/modules/identity/opendj/opendj_backendprop.py validate-modules:doc-missing-type
+plugins/modules/identity/opendj/opendj_backendprop.py validate-modules:parameter-type-not-in-doc
+plugins/modules/monitoring/bigpanda.py validate-modules:invalid-argument-name
+plugins/modules/monitoring/datadog/datadog_event.py validate-modules:parameter-list-no-elements
+plugins/modules/monitoring/datadog/datadog_monitor.py validate-modules:invalid-argument-name
+plugins/modules/monitoring/datadog/datadog_monitor.py validate-modules:parameter-list-no-elements
+plugins/modules/monitoring/icinga2_host.py validate-modules:undocumented-parameter
+plugins/modules/monitoring/librato_annotation.py validate-modules:parameter-list-no-elements
+plugins/modules/monitoring/logstash_plugin.py validate-modules:invalid-ansiblemodule-schema
+plugins/modules/monitoring/pagerduty.py validate-modules:parameter-list-no-elements
+plugins/modules/monitoring/sensu/sensu_check.py validate-modules:parameter-list-no-elements
+plugins/modules/monitoring/sensu/sensu_client.py validate-modules:parameter-list-no-elements
+plugins/modules/monitoring/sensu/sensu_handler.py validate-modules:parameter-list-no-elements
+plugins/modules/monitoring/statusio_maintenance.py pylint:blacklisted-name
+plugins/modules/monitoring/statusio_maintenance.py validate-modules:parameter-list-no-elements
+plugins/modules/net_tools/dnsimple.py validate-modules:parameter-list-no-elements
+plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:parameter-type-not-in-doc # This triggers when a parameter is undocumented
+plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:undocumented-parameter # Parameter removed but reason for removal is shown by custom code
+plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:doc-missing-type
+plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:undocumented-parameter # Parameter removed but reason for removal is shown by custom code
+plugins/modules/net_tools/nios/nios_a_record.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_a_record.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_a_record.py validate-modules:invalid-ansiblemodule-schema
+plugins/modules/net_tools/nios/nios_a_record.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_a_record.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nios/nios_aaaa_record.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_aaaa_record.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_aaaa_record.py validate-modules:invalid-ansiblemodule-schema
+plugins/modules/net_tools/nios/nios_aaaa_record.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_aaaa_record.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nios/nios_cname_record.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_cname_record.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_cname_record.py validate-modules:invalid-ansiblemodule-schema
+plugins/modules/net_tools/nios/nios_cname_record.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_cname_record.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nios/nios_dns_view.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_dns_view.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_dns_view.py validate-modules:invalid-ansiblemodule-schema
+plugins/modules/net_tools/nios/nios_dns_view.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_dns_view.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nios/nios_fixed_address.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_fixed_address.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_fixed_address.py validate-modules:invalid-ansiblemodule-schema
+plugins/modules/net_tools/nios/nios_fixed_address.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_fixed_address.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nios/nios_host_record.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_host_record.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_host_record.py validate-modules:invalid-ansiblemodule-schema
+plugins/modules/net_tools/nios/nios_host_record.py validate-modules:parameter-list-no-elements
+plugins/modules/net_tools/nios/nios_host_record.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_host_record.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nios/nios_member.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_member.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_member.py validate-modules:invalid-ansiblemodule-schema
+plugins/modules/net_tools/nios/nios_member.py validate-modules:parameter-list-no-elements
+plugins/modules/net_tools/nios/nios_member.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_member.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nios/nios_mx_record.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_mx_record.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_mx_record.py validate-modules:invalid-ansiblemodule-schema
+plugins/modules/net_tools/nios/nios_mx_record.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_mx_record.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nios/nios_naptr_record.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_naptr_record.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_naptr_record.py validate-modules:invalid-ansiblemodule-schema
+plugins/modules/net_tools/nios/nios_naptr_record.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_naptr_record.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nios/nios_network.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_network.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_network.py validate-modules:doc-required-mismatch
+plugins/modules/net_tools/nios/nios_network.py validate-modules:invalid-ansiblemodule-schema
+plugins/modules/net_tools/nios/nios_network.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_network.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nios/nios_network_view.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_network_view.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_network_view.py validate-modules:invalid-ansiblemodule-schema
+plugins/modules/net_tools/nios/nios_network_view.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_network_view.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nios/nios_nsgroup.py validate-modules:doc-choices-do-not-match-spec
+plugins/modules/net_tools/nios/nios_nsgroup.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_nsgroup.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_nsgroup.py validate-modules:doc-required-mismatch
+plugins/modules/net_tools/nios/nios_nsgroup.py validate-modules:invalid-ansiblemodule-schema
+plugins/modules/net_tools/nios/nios_nsgroup.py validate-modules:missing-suboption-docs
+plugins/modules/net_tools/nios/nios_nsgroup.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_nsgroup.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nios/nios_ptr_record.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_ptr_record.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_ptr_record.py validate-modules:invalid-ansiblemodule-schema
+plugins/modules/net_tools/nios/nios_ptr_record.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_ptr_record.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nios/nios_srv_record.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_srv_record.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_srv_record.py validate-modules:invalid-ansiblemodule-schema
+plugins/modules/net_tools/nios/nios_srv_record.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_srv_record.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nios/nios_txt_record.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_txt_record.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_txt_record.py validate-modules:invalid-ansiblemodule-schema
+plugins/modules/net_tools/nios/nios_txt_record.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_txt_record.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nios/nios_zone.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_zone.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_zone.py validate-modules:invalid-ansiblemodule-schema
+plugins/modules/net_tools/nios/nios_zone.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_zone.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nsupdate.py validate-modules:parameter-list-no-elements
+plugins/modules/net_tools/nsupdate.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/omapi_host.py validate-modules:parameter-list-no-elements
+plugins/modules/notification/cisco_webex.py validate-modules:invalid-argument-name
+plugins/modules/notification/grove.py validate-modules:invalid-argument-name
+plugins/modules/notification/grove.py validate-modules:nonexistent-parameter-documented
+plugins/modules/notification/mail.py validate-modules:parameter-list-no-elements
+plugins/modules/notification/nexmo.py validate-modules:parameter-list-no-elements
+plugins/modules/notification/office_365_connector_card.py validate-modules:parameter-list-no-elements
+plugins/modules/notification/pushbullet.py validate-modules:parameter-type-not-in-doc
+plugins/modules/notification/pushbullet.py validate-modules:undocumented-parameter
+plugins/modules/notification/rocketchat.py validate-modules:parameter-list-no-elements
+plugins/modules/notification/sendgrid.py validate-modules:parameter-list-no-elements
+plugins/modules/notification/slack.py validate-modules:parameter-list-no-elements
+plugins/modules/notification/twilio.py validate-modules:parameter-list-no-elements
+plugins/modules/packaging/language/bundler.py validate-modules:parameter-list-no-elements
+plugins/modules/packaging/language/composer.py validate-modules:parameter-invalid
+plugins/modules/packaging/language/maven_artifact.py validate-modules:parameter-type-not-in-doc
+plugins/modules/packaging/language/maven_artifact.py validate-modules:undocumented-parameter
+plugins/modules/packaging/language/pip_package_info.py validate-modules:parameter-list-no-elements
+plugins/modules/packaging/os/apt_rpm.py validate-modules:parameter-invalid
+plugins/modules/packaging/os/homebrew.py validate-modules:parameter-invalid
+plugins/modules/packaging/os/homebrew_cask.py validate-modules:parameter-invalid
+plugins/modules/packaging/os/opkg.py validate-modules:parameter-invalid
+plugins/modules/packaging/os/pacman.py validate-modules:parameter-invalid
+plugins/modules/packaging/os/redhat_subscription.py validate-modules:mutually_exclusive-unknown
+plugins/modules/packaging/os/redhat_subscription.py validate-modules:parameter-list-no-elements
+plugins/modules/packaging/os/redhat_subscription.py validate-modules:return-syntax-error
+plugins/modules/packaging/os/slackpkg.py validate-modules:parameter-invalid
+plugins/modules/packaging/os/urpmi.py validate-modules:parameter-invalid
+plugins/modules/packaging/os/xbps.py validate-modules:parameter-invalid
+plugins/modules/remote_management/dellemc/idrac_server_config_profile.py validate-modules:doc-missing-type
+plugins/modules/remote_management/dellemc/idrac_server_config_profile.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/dellemc/ome_device_info.py validate-modules:parameter-list-no-elements
+plugins/modules/remote_management/foreman/foreman.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/foreman/katello.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/foreman/katello.py yamllint:unparsable-with-libyaml
+plugins/modules/remote_management/hpilo/hpilo_boot.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/hpilo/hpilo_info.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/hpilo/hponcfg.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/imc/imc_rest.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/ipmi/ipmi_boot.py validate-modules:doc-missing-type
+plugins/modules/remote_management/ipmi/ipmi_boot.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/ipmi/ipmi_power.py validate-modules:doc-missing-type
+plugins/modules/remote_management/ipmi/ipmi_power.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/lxca/lxca_cmms.py validate-modules:doc-missing-type
+plugins/modules/remote_management/lxca/lxca_nodes.py validate-modules:doc-missing-type
+plugins/modules/remote_management/manageiq/manageiq_alert_profiles.py validate-modules:parameter-list-no-elements
+plugins/modules/remote_management/manageiq/manageiq_policies.py validate-modules:parameter-list-no-elements
+plugins/modules/remote_management/manageiq/manageiq_policies.py validate-modules:parameter-state-invalid-choice
+plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:doc-choices-do-not-match-spec
+plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:doc-missing-type
+plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:invalid-ansiblemodule-schema
+plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:undocumented-parameter
+plugins/modules/remote_management/manageiq/manageiq_tags.py validate-modules:parameter-list-no-elements
+plugins/modules/remote_management/manageiq/manageiq_tags.py validate-modules:parameter-state-invalid-choice
+plugins/modules/remote_management/oneview/oneview_datacenter_info.py validate-modules:parameter-list-no-elements
+plugins/modules/remote_management/oneview/oneview_datacenter_info.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/oneview/oneview_datacenter_info.py validate-modules:undocumented-parameter
+plugins/modules/remote_management/oneview/oneview_enclosure_info.py validate-modules:parameter-list-no-elements
+plugins/modules/remote_management/oneview/oneview_enclosure_info.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/oneview/oneview_enclosure_info.py validate-modules:undocumented-parameter
+plugins/modules/remote_management/oneview/oneview_ethernet_network.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/oneview/oneview_ethernet_network.py validate-modules:undocumented-parameter
+plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py validate-modules:parameter-list-no-elements
+plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py validate-modules:undocumented-parameter
+plugins/modules/remote_management/oneview/oneview_fc_network.py validate-modules:doc-missing-type
+plugins/modules/remote_management/oneview/oneview_fc_network.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/oneview/oneview_fc_network.py validate-modules:undocumented-parameter
+plugins/modules/remote_management/oneview/oneview_fc_network_info.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/oneview/oneview_fc_network_info.py validate-modules:undocumented-parameter
+plugins/modules/remote_management/oneview/oneview_fcoe_network.py validate-modules:doc-missing-type
+plugins/modules/remote_management/oneview/oneview_fcoe_network.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/oneview/oneview_fcoe_network.py validate-modules:undocumented-parameter
+plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py validate-modules:undocumented-parameter
+plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py validate-modules:doc-missing-type
+plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py validate-modules:undocumented-parameter
+plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py validate-modules:undocumented-parameter
+plugins/modules/remote_management/oneview/oneview_network_set.py validate-modules:doc-missing-type
+plugins/modules/remote_management/oneview/oneview_network_set.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/oneview/oneview_network_set.py validate-modules:undocumented-parameter
+plugins/modules/remote_management/oneview/oneview_network_set_info.py validate-modules:parameter-list-no-elements
+plugins/modules/remote_management/oneview/oneview_network_set_info.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/oneview/oneview_network_set_info.py validate-modules:undocumented-parameter
+plugins/modules/remote_management/oneview/oneview_san_manager.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/oneview/oneview_san_manager.py validate-modules:undocumented-parameter
+plugins/modules/remote_management/oneview/oneview_san_manager_info.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/oneview/oneview_san_manager_info.py validate-modules:undocumented-parameter
+plugins/modules/remote_management/redfish/idrac_redfish_command.py validate-modules:parameter-list-no-elements
+plugins/modules/remote_management/redfish/idrac_redfish_config.py validate-modules:parameter-list-no-elements
+plugins/modules/remote_management/redfish/idrac_redfish_info.py validate-modules:parameter-list-no-elements
+plugins/modules/remote_management/redfish/redfish_command.py validate-modules:parameter-list-no-elements
+plugins/modules/remote_management/redfish/redfish_config.py validate-modules:parameter-list-no-elements
+plugins/modules/remote_management/redfish/redfish_info.py validate-modules:parameter-list-no-elements
+plugins/modules/remote_management/stacki/stacki_host.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/remote_management/stacki/stacki_host.py validate-modules:no-default-for-required-parameter
+plugins/modules/remote_management/stacki/stacki_host.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/stacki/stacki_host.py validate-modules:undocumented-parameter
+plugins/modules/source_control/bzr.py validate-modules:parameter-type-not-in-doc
+plugins/modules/source_control/git_config.py validate-modules:doc-missing-type
+plugins/modules/source_control/git_config.py validate-modules:parameter-type-not-in-doc
+plugins/modules/source_control/github/github_deploy_key.py validate-modules:doc-missing-type
+plugins/modules/source_control/github/github_deploy_key.py validate-modules:parameter-invalid
+plugins/modules/source_control/github/github_deploy_key.py validate-modules:parameter-type-not-in-doc
+plugins/modules/source_control/github/github_hooks.py validate-modules:doc-missing-type
+plugins/modules/source_control/github/github_issue.py validate-modules:doc-missing-type
+plugins/modules/source_control/github/github_issue.py validate-modules:parameter-type-not-in-doc
+plugins/modules/source_control/github/github_key.py validate-modules:doc-missing-type
+plugins/modules/source_control/github/github_release.py validate-modules:doc-missing-type
+plugins/modules/source_control/github/github_release.py validate-modules:parameter-type-not-in-doc
+plugins/modules/source_control/github/github_webhook.py validate-modules:parameter-type-not-in-doc
+plugins/modules/source_control/github/github_webhook_info.py validate-modules:parameter-type-not-in-doc
+plugins/modules/source_control/gitlab/gitlab_runner.py validate-modules:parameter-list-no-elements
+plugins/modules/source_control/hg.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/emc/emc_vnx_sg_member.py validate-modules:doc-missing-type
+plugins/modules/storage/emc/emc_vnx_sg_member.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/glusterfs/gluster_heal_info.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/glusterfs/gluster_peer.py validate-modules:parameter-list-no-elements
+plugins/modules/storage/glusterfs/gluster_volume.py validate-modules:parameter-list-no-elements
+plugins/modules/storage/glusterfs/gluster_volume.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/ibm/ibm_sa_domain.py validate-modules:doc-missing-type
+plugins/modules/storage/ibm/ibm_sa_host.py validate-modules:doc-missing-type
+plugins/modules/storage/ibm/ibm_sa_host_ports.py validate-modules:doc-missing-type
+plugins/modules/storage/ibm/ibm_sa_pool.py validate-modules:doc-missing-type
+plugins/modules/storage/ibm/ibm_sa_vol.py validate-modules:doc-missing-type
+plugins/modules/storage/ibm/ibm_sa_vol_map.py validate-modules:doc-missing-type
+plugins/modules/storage/netapp/na_cdot_aggregate.py validate-modules:doc-missing-type
+plugins/modules/storage/netapp/na_cdot_aggregate.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/netapp/na_cdot_license.py validate-modules:incompatible-default-type
+plugins/modules/storage/netapp/na_cdot_license.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/netapp/na_cdot_lun.py validate-modules:doc-missing-type
+plugins/modules/storage/netapp/na_cdot_lun.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/netapp/na_cdot_qtree.py validate-modules:doc-missing-type
+plugins/modules/storage/netapp/na_cdot_qtree.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/netapp/na_cdot_svm.py validate-modules:doc-missing-type
+plugins/modules/storage/netapp/na_cdot_svm.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/netapp/na_cdot_user.py validate-modules:doc-missing-type
+plugins/modules/storage/netapp/na_cdot_user.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/netapp/na_cdot_user_role.py validate-modules:doc-missing-type
+plugins/modules/storage/netapp/na_cdot_user_role.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/netapp/na_cdot_volume.py validate-modules:doc-missing-type
+plugins/modules/storage/netapp/na_cdot_volume.py validate-modules:no-default-for-required-parameter
+plugins/modules/storage/netapp/na_cdot_volume.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/netapp/na_cdot_volume.py validate-modules:undocumented-parameter
+plugins/modules/storage/netapp/na_ontap_gather_facts.py validate-modules:doc-missing-type
+plugins/modules/storage/netapp/na_ontap_gather_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/storage/netapp/na_ontap_gather_facts.py validate-modules:parameter-state-invalid-choice
+plugins/modules/storage/netapp/na_ontap_gather_facts.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/netapp/sf_account_manager.py validate-modules:doc-missing-type
+plugins/modules/storage/netapp/sf_account_manager.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/netapp/sf_check_connections.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/netapp/sf_snapshot_schedule_manager.py validate-modules:doc-missing-type
+plugins/modules/storage/netapp/sf_snapshot_schedule_manager.py validate-modules:parameter-list-no-elements
+plugins/modules/storage/netapp/sf_snapshot_schedule_manager.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/netapp/sf_volume_access_group_manager.py validate-modules:doc-missing-type
+plugins/modules/storage/netapp/sf_volume_access_group_manager.py validate-modules:parameter-list-no-elements
+plugins/modules/storage/netapp/sf_volume_access_group_manager.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/netapp/sf_volume_manager.py validate-modules:doc-missing-type
+plugins/modules/storage/netapp/sf_volume_manager.py validate-modules:parameter-invalid
+plugins/modules/storage/netapp/sf_volume_manager.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/netapp/sf_volume_manager.py validate-modules:undocumented-parameter
+plugins/modules/storage/purestorage/purefa_facts.py validate-modules:doc-required-mismatch
+plugins/modules/storage/purestorage/purefa_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/storage/purestorage/purefa_facts.py validate-modules:return-syntax-error
+plugins/modules/storage/purestorage/purefb_facts.py validate-modules:parameter-list-no-elements
+plugins/modules/storage/purestorage/purefb_facts.py validate-modules:return-syntax-error
+plugins/modules/storage/zfs/zfs.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/zfs/zfs_delegate_admin.py validate-modules:parameter-list-no-elements
+plugins/modules/storage/zfs/zfs_delegate_admin.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/zfs/zfs_facts.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/zfs/zpool_facts.py validate-modules:parameter-type-not-in-doc
+plugins/modules/system/alternatives.py pylint:blacklisted-name
+plugins/modules/system/beadm.py pylint:blacklisted-name
+plugins/modules/system/cronvar.py pylint:blacklisted-name
+plugins/modules/system/dconf.py pylint:blacklisted-name
+plugins/modules/system/filesystem.py pylint:blacklisted-name
+plugins/modules/system/gconftool2.py pylint:blacklisted-name
+plugins/modules/system/gconftool2.py validate-modules:parameter-state-invalid-choice
+plugins/modules/system/interfaces_file.py pylint:blacklisted-name
+plugins/modules/system/iptables_state.py validate-modules:undocumented-parameter
+plugins/modules/system/java_cert.py pylint:blacklisted-name
+plugins/modules/system/launchd.py use-argspec-type-path # False positive
+plugins/modules/system/lvg.py pylint:blacklisted-name
+plugins/modules/system/lvol.py pylint:blacklisted-name
+plugins/modules/system/osx_defaults.py validate-modules:parameter-state-invalid-choice
+plugins/modules/system/parted.py pylint:blacklisted-name
+plugins/modules/system/parted.py validate-modules:parameter-state-invalid-choice
+plugins/modules/system/puppet.py use-argspec-type-path
+plugins/modules/system/puppet.py validate-modules:doc-default-does-not-match-spec # show_diff is not documented
+plugins/modules/system/puppet.py validate-modules:parameter-invalid
+plugins/modules/system/puppet.py validate-modules:parameter-type-not-in-doc
+plugins/modules/system/puppet.py validate-modules:undocumented-parameter
+plugins/modules/system/runit.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/system/runit.py validate-modules:parameter-type-not-in-doc
+plugins/modules/system/runit.py validate-modules:undocumented-parameter
+plugins/modules/system/timezone.py pylint:blacklisted-name
+plugins/modules/system/xfconf.py validate-modules:parameter-state-invalid-choice
+plugins/modules/system/xfconf.py validate-modules:return-syntax-error
+plugins/modules/web_infrastructure/jenkins_plugin.py use-argspec-type-path
+plugins/modules/web_infrastructure/rundeck_acl_policy.py pylint:blacklisted-name
+scripts/inventory/gce.py pylint:blacklisted-name
+tests/unit/plugins/modules/cloud/google/test_gcp_forwarding_rule.py future-import-boilerplate
+tests/unit/plugins/modules/cloud/google/test_gcp_forwarding_rule.py metaclass-boilerplate
+tests/utils/shippable/check_matrix.py replace-urlopen
+tests/utils/shippable/timing.py shebang
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/sanity/ignore-2.9.txt b/collections-debian-merged/ansible_collections/community/general/tests/sanity/ignore-2.9.txt
new file mode 100644
index 00000000..908f8e0d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/sanity/ignore-2.9.txt
@@ -0,0 +1,492 @@
+plugins/callback/hipchat.py pylint:blacklisted-name
+plugins/connection/lxc.py pylint:blacklisted-name
+plugins/module_utils/compat/ipaddress.py no-assert
+plugins/module_utils/compat/ipaddress.py no-unicode-literals
+plugins/module_utils/_mount.py future-import-boilerplate
+plugins/module_utils/_mount.py metaclass-boilerplate
+plugins/modules/cloud/docker/docker_container.py use-argspec-type-path # uses colon-separated paths, can't use type=path
+plugins/modules/cloud/google/gcdns_record.py validate-modules:deprecation-mismatch
+plugins/modules/cloud/google/gcdns_record.py validate-modules:invalid-documentation
+plugins/modules/cloud/google/gcdns_record.py validate-modules:missing-main-call
+plugins/modules/cloud/google/gcdns_zone.py validate-modules:deprecation-mismatch
+plugins/modules/cloud/google/gcdns_zone.py validate-modules:invalid-documentation
+plugins/modules/cloud/google/gcdns_zone.py validate-modules:missing-main-call
+plugins/modules/cloud/google/gce.py pylint:blacklisted-name
+plugins/modules/cloud/google/gce.py validate-modules:deprecation-mismatch
+plugins/modules/cloud/google/gce.py validate-modules:invalid-documentation
+plugins/modules/cloud/google/gce.py validate-modules:missing-main-call
+plugins/modules/cloud/google/gce_eip.py pylint:blacklisted-name
+plugins/modules/cloud/google/gce_img.py pylint:blacklisted-name
+plugins/modules/cloud/google/gce_instance_template.py pylint:blacklisted-name
+plugins/modules/cloud/google/gce_instance_template.py validate-modules:doc-missing-type
+plugins/modules/cloud/google/gce_lb.py pylint:blacklisted-name
+plugins/modules/cloud/google/gce_mig.py pylint:blacklisted-name
+plugins/modules/cloud/google/gce_net.py pylint:blacklisted-name
+plugins/modules/cloud/google/gce_pd.py pylint:blacklisted-name
+plugins/modules/cloud/google/gce_snapshot.py pylint:blacklisted-name
+plugins/modules/cloud/google/gce_tag.py pylint:blacklisted-name
+plugins/modules/cloud/google/gcp_backend_service.py pylint:blacklisted-name
+plugins/modules/cloud/google/gcp_backend_service.py validate-modules:deprecation-mismatch
+plugins/modules/cloud/google/gcp_backend_service.py validate-modules:invalid-documentation
+plugins/modules/cloud/google/gcp_backend_service.py validate-modules:missing-main-call
+plugins/modules/cloud/google/gcp_forwarding_rule.py validate-modules:deprecation-mismatch
+plugins/modules/cloud/google/gcp_forwarding_rule.py validate-modules:invalid-documentation
+plugins/modules/cloud/google/gcp_forwarding_rule.py validate-modules:missing-main-call
+plugins/modules/cloud/google/gcp_healthcheck.py pylint:blacklisted-name
+plugins/modules/cloud/google/gcp_healthcheck.py validate-modules:deprecation-mismatch
+plugins/modules/cloud/google/gcp_healthcheck.py validate-modules:invalid-documentation
+plugins/modules/cloud/google/gcp_healthcheck.py validate-modules:missing-main-call
+plugins/modules/cloud/google/gcp_target_proxy.py validate-modules:deprecation-mismatch
+plugins/modules/cloud/google/gcp_target_proxy.py validate-modules:invalid-documentation
+plugins/modules/cloud/google/gcp_target_proxy.py validate-modules:missing-main-call
+plugins/modules/cloud/google/gcp_url_map.py validate-modules:deprecation-mismatch
+plugins/modules/cloud/google/gcp_url_map.py validate-modules:invalid-documentation
+plugins/modules/cloud/google/gcp_url_map.py validate-modules:missing-main-call
+plugins/modules/cloud/google/gcspanner.py validate-modules:deprecation-mismatch
+plugins/modules/cloud/google/gcspanner.py validate-modules:invalid-documentation
+plugins/modules/cloud/google/gcspanner.py validate-modules:missing-main-call
+plugins/modules/cloud/kubevirt/kubevirt_cdi_upload.py validate-modules:doc-missing-type
+plugins/modules/cloud/kubevirt/kubevirt_preset.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/kubevirt/kubevirt_pvc.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/kubevirt/kubevirt_rs.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/kubevirt/kubevirt_vm.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/linode/linode.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/linode/linode.py validate-modules:undocumented-parameter
+plugins/modules/cloud/lxc/lxc_container.py pylint:blacklisted-name
+plugins/modules/cloud/lxc/lxc_container.py use-argspec-type-path
+plugins/modules/cloud/lxc/lxc_container.py validate-modules:use-run-command-not-popen
+plugins/modules/cloud/misc/helm.py validate-modules:deprecation-mismatch
+plugins/modules/cloud/misc/helm.py validate-modules:invalid-documentation
+plugins/modules/cloud/misc/ovirt.py validate-modules:deprecation-mismatch
+plugins/modules/cloud/misc/ovirt.py validate-modules:invalid-documentation
+plugins/modules/cloud/online/online_server_facts.py validate-modules:deprecation-mismatch
+plugins/modules/cloud/online/online_server_facts.py validate-modules:invalid-documentation
+plugins/modules/cloud/online/online_server_facts.py validate-modules:return-syntax-error
+plugins/modules/cloud/online/online_server_info.py validate-modules:return-syntax-error
+plugins/modules/cloud/online/online_user_facts.py validate-modules:deprecation-mismatch
+plugins/modules/cloud/online/online_user_facts.py validate-modules:invalid-documentation
+plugins/modules/cloud/online/online_user_facts.py validate-modules:return-syntax-error
+plugins/modules/cloud/online/online_user_info.py validate-modules:return-syntax-error
+plugins/modules/cloud/ovirt/ovirt_affinity_label_facts.py validate-modules:deprecation-mismatch
+plugins/modules/cloud/ovirt/ovirt_affinity_label_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_affinity_label_facts.py validate-modules:invalid-documentation
+plugins/modules/cloud/ovirt/ovirt_api_facts.py validate-modules:deprecation-mismatch
+plugins/modules/cloud/ovirt/ovirt_api_facts.py validate-modules:invalid-documentation
+plugins/modules/cloud/ovirt/ovirt_cluster_facts.py validate-modules:deprecation-mismatch
+plugins/modules/cloud/ovirt/ovirt_cluster_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_cluster_facts.py validate-modules:invalid-documentation
+plugins/modules/cloud/ovirt/ovirt_datacenter_facts.py validate-modules:deprecation-mismatch
+plugins/modules/cloud/ovirt/ovirt_datacenter_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_datacenter_facts.py validate-modules:invalid-documentation
+plugins/modules/cloud/ovirt/ovirt_disk_facts.py validate-modules:deprecation-mismatch
+plugins/modules/cloud/ovirt/ovirt_disk_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_disk_facts.py validate-modules:invalid-documentation
+plugins/modules/cloud/ovirt/ovirt_event_facts.py validate-modules:deprecation-mismatch
+plugins/modules/cloud/ovirt/ovirt_event_facts.py validate-modules:invalid-documentation
+plugins/modules/cloud/ovirt/ovirt_external_provider_facts.py validate-modules:deprecation-mismatch
+plugins/modules/cloud/ovirt/ovirt_external_provider_facts.py validate-modules:invalid-documentation
+plugins/modules/cloud/ovirt/ovirt_external_provider_facts.py validate-modules:undocumented-parameter
+plugins/modules/cloud/ovirt/ovirt_group_facts.py validate-modules:deprecation-mismatch
+plugins/modules/cloud/ovirt/ovirt_group_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_group_facts.py validate-modules:invalid-documentation
+plugins/modules/cloud/ovirt/ovirt_host_facts.py validate-modules:deprecation-mismatch
+plugins/modules/cloud/ovirt/ovirt_host_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_host_facts.py validate-modules:invalid-documentation
+plugins/modules/cloud/ovirt/ovirt_host_storage_facts.py validate-modules:deprecation-mismatch
+plugins/modules/cloud/ovirt/ovirt_host_storage_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_host_storage_facts.py validate-modules:invalid-documentation
+plugins/modules/cloud/ovirt/ovirt_host_storage_facts.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/ovirt/ovirt_network_facts.py validate-modules:deprecation-mismatch
+plugins/modules/cloud/ovirt/ovirt_network_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_network_facts.py validate-modules:invalid-documentation
+plugins/modules/cloud/ovirt/ovirt_nic_facts.py validate-modules:deprecation-mismatch
+plugins/modules/cloud/ovirt/ovirt_nic_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_nic_facts.py validate-modules:invalid-documentation
+plugins/modules/cloud/ovirt/ovirt_permission_facts.py validate-modules:deprecation-mismatch
+plugins/modules/cloud/ovirt/ovirt_permission_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_permission_facts.py validate-modules:invalid-documentation
+plugins/modules/cloud/ovirt/ovirt_quota_facts.py validate-modules:deprecation-mismatch
+plugins/modules/cloud/ovirt/ovirt_quota_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_quota_facts.py validate-modules:invalid-documentation
+plugins/modules/cloud/ovirt/ovirt_scheduling_policy_facts.py validate-modules:deprecation-mismatch
+plugins/modules/cloud/ovirt/ovirt_scheduling_policy_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_scheduling_policy_facts.py validate-modules:invalid-documentation
+plugins/modules/cloud/ovirt/ovirt_snapshot_facts.py validate-modules:deprecation-mismatch
+plugins/modules/cloud/ovirt/ovirt_snapshot_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_snapshot_facts.py validate-modules:invalid-documentation
+plugins/modules/cloud/ovirt/ovirt_storage_domain_facts.py validate-modules:deprecation-mismatch
+plugins/modules/cloud/ovirt/ovirt_storage_domain_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_storage_domain_facts.py validate-modules:invalid-documentation
+plugins/modules/cloud/ovirt/ovirt_storage_template_facts.py validate-modules:deprecation-mismatch
+plugins/modules/cloud/ovirt/ovirt_storage_template_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_storage_template_facts.py validate-modules:invalid-documentation
+plugins/modules/cloud/ovirt/ovirt_storage_template_facts.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/ovirt/ovirt_storage_vm_facts.py validate-modules:deprecation-mismatch
+plugins/modules/cloud/ovirt/ovirt_storage_vm_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_storage_vm_facts.py validate-modules:invalid-documentation
+plugins/modules/cloud/ovirt/ovirt_storage_vm_facts.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/ovirt/ovirt_tag_facts.py validate-modules:deprecation-mismatch
+plugins/modules/cloud/ovirt/ovirt_tag_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_tag_facts.py validate-modules:invalid-documentation
+plugins/modules/cloud/ovirt/ovirt_template_facts.py validate-modules:deprecation-mismatch
+plugins/modules/cloud/ovirt/ovirt_template_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_template_facts.py validate-modules:invalid-documentation
+plugins/modules/cloud/ovirt/ovirt_user_facts.py validate-modules:deprecation-mismatch
+plugins/modules/cloud/ovirt/ovirt_user_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_user_facts.py validate-modules:invalid-documentation
+plugins/modules/cloud/ovirt/ovirt_vm_facts.py validate-modules:deprecation-mismatch
+plugins/modules/cloud/ovirt/ovirt_vm_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_vm_facts.py validate-modules:invalid-documentation
+plugins/modules/cloud/ovirt/ovirt_vm_facts.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:deprecation-mismatch
+plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:doc-missing-type
+plugins/modules/cloud/ovirt/ovirt_vmpool_facts.py validate-modules:invalid-documentation
+plugins/modules/cloud/packet/packet_device.py validate-modules:doc-missing-type
+plugins/modules/cloud/packet/packet_device.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/packet/packet_sshkey.py validate-modules:doc-missing-type
+plugins/modules/cloud/packet/packet_sshkey.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/packet/packet_sshkey.py validate-modules:undocumented-parameter
+plugins/modules/cloud/profitbricks/profitbricks_volume.py validate-modules:doc-missing-type
+plugins/modules/cloud/profitbricks/profitbricks_volume.py validate-modules:undocumented-parameter
+plugins/modules/cloud/rackspace/rax.py use-argspec-type-path
+plugins/modules/cloud/rackspace/rax.py validate-modules:doc-missing-type
+plugins/modules/cloud/rackspace/rax.py validate-modules:undocumented-parameter
+plugins/modules/cloud/rackspace/rax_files_objects.py use-argspec-type-path
+plugins/modules/cloud/rackspace/rax_scaling_group.py use-argspec-type-path
+plugins/modules/cloud/scaleway/scaleway_image_facts.py validate-modules:deprecation-mismatch
+plugins/modules/cloud/scaleway/scaleway_image_facts.py validate-modules:invalid-documentation
+plugins/modules/cloud/scaleway/scaleway_image_facts.py validate-modules:return-syntax-error
+plugins/modules/cloud/scaleway/scaleway_image_info.py validate-modules:return-syntax-error
+plugins/modules/cloud/scaleway/scaleway_ip_facts.py validate-modules:deprecation-mismatch
+plugins/modules/cloud/scaleway/scaleway_ip_facts.py validate-modules:invalid-documentation
+plugins/modules/cloud/scaleway/scaleway_ip_facts.py validate-modules:return-syntax-error
+plugins/modules/cloud/scaleway/scaleway_ip_info.py validate-modules:return-syntax-error
+plugins/modules/cloud/scaleway/scaleway_organization_facts.py validate-modules:deprecation-mismatch
+plugins/modules/cloud/scaleway/scaleway_organization_facts.py validate-modules:invalid-documentation
+plugins/modules/cloud/scaleway/scaleway_organization_facts.py validate-modules:return-syntax-error
+plugins/modules/cloud/scaleway/scaleway_organization_info.py validate-modules:return-syntax-error
+plugins/modules/cloud/scaleway/scaleway_security_group_facts.py validate-modules:deprecation-mismatch
+plugins/modules/cloud/scaleway/scaleway_security_group_facts.py validate-modules:invalid-documentation
+plugins/modules/cloud/scaleway/scaleway_security_group_facts.py validate-modules:return-syntax-error
+plugins/modules/cloud/scaleway/scaleway_security_group_info.py validate-modules:return-syntax-error
+plugins/modules/cloud/scaleway/scaleway_server_facts.py validate-modules:deprecation-mismatch
+plugins/modules/cloud/scaleway/scaleway_server_facts.py validate-modules:invalid-documentation
+plugins/modules/cloud/scaleway/scaleway_server_facts.py validate-modules:return-syntax-error
+plugins/modules/cloud/scaleway/scaleway_server_info.py validate-modules:return-syntax-error
+plugins/modules/cloud/scaleway/scaleway_snapshot_facts.py validate-modules:deprecation-mismatch
+plugins/modules/cloud/scaleway/scaleway_snapshot_facts.py validate-modules:invalid-documentation
+plugins/modules/cloud/scaleway/scaleway_snapshot_facts.py validate-modules:return-syntax-error
+plugins/modules/cloud/scaleway/scaleway_snapshot_info.py validate-modules:return-syntax-error
+plugins/modules/cloud/scaleway/scaleway_volume_facts.py validate-modules:deprecation-mismatch
+plugins/modules/cloud/scaleway/scaleway_volume_facts.py validate-modules:invalid-documentation
+plugins/modules/cloud/scaleway/scaleway_volume_facts.py validate-modules:return-syntax-error
+plugins/modules/cloud/scaleway/scaleway_volume_info.py validate-modules:return-syntax-error
+plugins/modules/cloud/smartos/smartos_image_info.py validate-modules:doc-missing-type
+plugins/modules/cloud/smartos/vmadm.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/smartos/vmadm.py validate-modules:undocumented-parameter
+plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py validate-modules:undocumented-parameter
+plugins/modules/cloud/univention/udm_dns_record.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/univention/udm_dns_zone.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/univention/udm_dns_zone.py validate-modules:undocumented-parameter
+plugins/modules/cloud/webfaction/webfaction_app.py validate-modules:doc-missing-type
+plugins/modules/cloud/webfaction/webfaction_db.py validate-modules:doc-missing-type
+plugins/modules/cloud/webfaction/webfaction_domain.py validate-modules:doc-missing-type
+plugins/modules/cloud/webfaction/webfaction_domain.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/webfaction/webfaction_mailbox.py validate-modules:doc-missing-type
+plugins/modules/cloud/webfaction/webfaction_site.py validate-modules:doc-missing-type
+plugins/modules/cloud/webfaction/webfaction_site.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:doc-choices-do-not-match-spec
+plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:missing-suboption-docs
+plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:parameter-type-not-in-doc
+plugins/modules/cloud/xenserver/xenserver_guest.py validate-modules:undocumented-parameter
+plugins/modules/clustering/consul/consul.py validate-modules:doc-missing-type
+plugins/modules/clustering/consul/consul.py validate-modules:undocumented-parameter
+plugins/modules/clustering/consul/consul_acl.py validate-modules:doc-missing-type
+plugins/modules/clustering/consul/consul_kv.py validate-modules:parameter-type-not-in-doc
+plugins/modules/clustering/etcd3.py validate-modules:parameter-type-not-in-doc
+plugins/modules/clustering/znode.py validate-modules:doc-missing-type
+plugins/modules/clustering/znode.py validate-modules:parameter-type-not-in-doc
+plugins/modules/database/misc/elasticsearch_plugin.py validate-modules:doc-missing-type
+plugins/modules/database/misc/elasticsearch_plugin.py validate-modules:parameter-type-not-in-doc
+plugins/modules/database/misc/kibana_plugin.py validate-modules:doc-missing-type
+plugins/modules/database/misc/kibana_plugin.py validate-modules:parameter-type-not-in-doc
+plugins/modules/database/misc/riak.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/database/misc/riak.py validate-modules:doc-missing-type
+plugins/modules/database/misc/riak.py validate-modules:parameter-type-not-in-doc
+plugins/modules/database/postgresql/postgresql_db.py use-argspec-type-path
+plugins/modules/database/postgresql/postgresql_db.py validate-modules:parameter-type-not-in-doc
+plugins/modules/database/postgresql/postgresql_db.py validate-modules:use-run-command-not-popen
+plugins/modules/database/postgresql/postgresql_ext.py validate-modules:parameter-type-not-in-doc
+plugins/modules/database/postgresql/postgresql_schema.py validate-modules:parameter-type-not-in-doc
+plugins/modules/database/postgresql/postgresql_user.py validate-modules:doc-choices-do-not-match-spec
+plugins/modules/database/postgresql/postgresql_user.py validate-modules:parameter-type-not-in-doc
+plugins/modules/database/vertica/vertica_configuration.py validate-modules:doc-missing-type
+plugins/modules/database/vertica/vertica_info.py validate-modules:doc-missing-type
+plugins/modules/database/vertica/vertica_role.py validate-modules:doc-missing-type
+plugins/modules/database/vertica/vertica_role.py validate-modules:undocumented-parameter
+plugins/modules/database/vertica/vertica_schema.py validate-modules:doc-missing-type
+plugins/modules/database/vertica/vertica_schema.py validate-modules:undocumented-parameter
+plugins/modules/database/vertica/vertica_user.py validate-modules:doc-missing-type
+plugins/modules/database/vertica/vertica_user.py validate-modules:undocumented-parameter
+plugins/modules/files/iso_extract.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/identity/opendj/opendj_backendprop.py validate-modules:doc-missing-type
+plugins/modules/identity/opendj/opendj_backendprop.py validate-modules:parameter-type-not-in-doc
+plugins/modules/monitoring/icinga2_host.py validate-modules:undocumented-parameter
+plugins/modules/monitoring/statusio_maintenance.py pylint:blacklisted-name
+plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:deprecation-mismatch
+plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:invalid-documentation
+plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/ldap/ldap_attr.py validate-modules:undocumented-parameter # Parameter removed but reason for removal is shown by custom code
+plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:doc-missing-type
+plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/ldap/ldap_entry.py validate-modules:undocumented-parameter # Parameter removed but reason for removal is shown by custom code
+plugins/modules/net_tools/nios/nios_a_record.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_a_record.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_a_record.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_a_record.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nios/nios_aaaa_record.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_aaaa_record.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_aaaa_record.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_aaaa_record.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nios/nios_cname_record.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_cname_record.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_cname_record.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_cname_record.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nios/nios_dns_view.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_dns_view.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_dns_view.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_dns_view.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nios/nios_fixed_address.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_fixed_address.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_fixed_address.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_fixed_address.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nios/nios_host_record.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_host_record.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_host_record.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_host_record.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nios/nios_member.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_member.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_member.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_member.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nios/nios_mx_record.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_mx_record.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_mx_record.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_mx_record.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nios/nios_naptr_record.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_naptr_record.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_naptr_record.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_naptr_record.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nios/nios_network.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_network.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_network.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_network.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nios/nios_network_view.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_network_view.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_network_view.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_network_view.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nios/nios_nsgroup.py validate-modules:doc-choices-do-not-match-spec
+plugins/modules/net_tools/nios/nios_nsgroup.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_nsgroup.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_nsgroup.py validate-modules:missing-suboption-docs
+plugins/modules/net_tools/nios/nios_nsgroup.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_nsgroup.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nios/nios_ptr_record.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_ptr_record.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_ptr_record.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_ptr_record.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nios/nios_srv_record.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_srv_record.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_srv_record.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_srv_record.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nios/nios_txt_record.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_txt_record.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_txt_record.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_txt_record.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nios/nios_zone.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/net_tools/nios/nios_zone.py validate-modules:doc-missing-type
+plugins/modules/net_tools/nios/nios_zone.py validate-modules:parameter-type-not-in-doc
+plugins/modules/net_tools/nios/nios_zone.py validate-modules:undocumented-parameter
+plugins/modules/net_tools/nsupdate.py validate-modules:parameter-type-not-in-doc
+plugins/modules/notification/pushbullet.py validate-modules:parameter-type-not-in-doc
+plugins/modules/notification/pushbullet.py validate-modules:undocumented-parameter
+plugins/modules/packaging/language/composer.py validate-modules:parameter-invalid
+plugins/modules/packaging/language/maven_artifact.py validate-modules:parameter-type-not-in-doc
+plugins/modules/packaging/os/apt_rpm.py validate-modules:parameter-invalid
+plugins/modules/packaging/os/homebrew.py validate-modules:parameter-invalid
+plugins/modules/packaging/os/homebrew_cask.py validate-modules:parameter-invalid
+plugins/modules/packaging/os/opkg.py validate-modules:parameter-invalid
+plugins/modules/packaging/os/pacman.py validate-modules:parameter-invalid
+plugins/modules/packaging/os/redhat_subscription.py validate-modules:return-syntax-error
+plugins/modules/packaging/os/slackpkg.py validate-modules:parameter-invalid
+plugins/modules/packaging/os/urpmi.py validate-modules:parameter-invalid
+plugins/modules/packaging/os/xbps.py validate-modules:parameter-invalid
+plugins/modules/remote_management/dellemc/idrac_server_config_profile.py validate-modules:doc-missing-type
+plugins/modules/remote_management/dellemc/idrac_server_config_profile.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/foreman/foreman.py validate-modules:deprecation-mismatch
+plugins/modules/remote_management/foreman/foreman.py validate-modules:invalid-documentation
+plugins/modules/remote_management/foreman/foreman.py validate-modules:missing-main-call
+plugins/modules/remote_management/foreman/katello.py validate-modules:deprecation-mismatch
+plugins/modules/remote_management/foreman/katello.py validate-modules:invalid-documentation
+plugins/modules/remote_management/foreman/katello.py validate-modules:missing-main-call
+plugins/modules/remote_management/hpilo/hpilo_boot.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/hpilo/hpilo_info.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/hpilo/hponcfg.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/imc/imc_rest.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/ipmi/ipmi_boot.py validate-modules:doc-missing-type
+plugins/modules/remote_management/ipmi/ipmi_boot.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/ipmi/ipmi_power.py validate-modules:doc-missing-type
+plugins/modules/remote_management/ipmi/ipmi_power.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/lxca/lxca_cmms.py validate-modules:doc-missing-type
+plugins/modules/remote_management/lxca/lxca_nodes.py validate-modules:doc-missing-type
+plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:doc-choices-do-not-match-spec
+plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:doc-missing-type
+plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/manageiq/manageiq_provider.py validate-modules:undocumented-parameter
+plugins/modules/remote_management/oneview/oneview_datacenter_info.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/oneview/oneview_datacenter_info.py validate-modules:undocumented-parameter
+plugins/modules/remote_management/oneview/oneview_enclosure_info.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/oneview/oneview_enclosure_info.py validate-modules:undocumented-parameter
+plugins/modules/remote_management/oneview/oneview_ethernet_network.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/oneview/oneview_ethernet_network.py validate-modules:undocumented-parameter
+plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py validate-modules:undocumented-parameter
+plugins/modules/remote_management/oneview/oneview_fc_network.py validate-modules:doc-missing-type
+plugins/modules/remote_management/oneview/oneview_fc_network.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/oneview/oneview_fc_network.py validate-modules:undocumented-parameter
+plugins/modules/remote_management/oneview/oneview_fc_network_info.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/oneview/oneview_fc_network_info.py validate-modules:undocumented-parameter
+plugins/modules/remote_management/oneview/oneview_fcoe_network.py validate-modules:doc-missing-type
+plugins/modules/remote_management/oneview/oneview_fcoe_network.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/oneview/oneview_fcoe_network.py validate-modules:undocumented-parameter
+plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py validate-modules:undocumented-parameter
+plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py validate-modules:doc-missing-type
+plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py validate-modules:undocumented-parameter
+plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py validate-modules:undocumented-parameter
+plugins/modules/remote_management/oneview/oneview_network_set.py validate-modules:doc-missing-type
+plugins/modules/remote_management/oneview/oneview_network_set.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/oneview/oneview_network_set.py validate-modules:undocumented-parameter
+plugins/modules/remote_management/oneview/oneview_network_set_info.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/oneview/oneview_network_set_info.py validate-modules:undocumented-parameter
+plugins/modules/remote_management/oneview/oneview_san_manager.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/oneview/oneview_san_manager.py validate-modules:undocumented-parameter
+plugins/modules/remote_management/oneview/oneview_san_manager_info.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/oneview/oneview_san_manager_info.py validate-modules:undocumented-parameter
+plugins/modules/remote_management/stacki/stacki_host.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/remote_management/stacki/stacki_host.py validate-modules:no-default-for-required-parameter
+plugins/modules/remote_management/stacki/stacki_host.py validate-modules:parameter-type-not-in-doc
+plugins/modules/remote_management/stacki/stacki_host.py validate-modules:undocumented-parameter
+plugins/modules/source_control/bzr.py validate-modules:parameter-type-not-in-doc
+plugins/modules/source_control/git_config.py validate-modules:doc-missing-type
+plugins/modules/source_control/git_config.py validate-modules:parameter-type-not-in-doc
+plugins/modules/source_control/github/github_deploy_key.py validate-modules:doc-missing-type
+plugins/modules/source_control/github/github_deploy_key.py validate-modules:parameter-invalid
+plugins/modules/source_control/github/github_deploy_key.py validate-modules:parameter-type-not-in-doc
+plugins/modules/source_control/github/github_hooks.py validate-modules:deprecation-mismatch
+plugins/modules/source_control/github/github_hooks.py validate-modules:invalid-documentation
+plugins/modules/source_control/github/github_hooks.py validate-modules:missing-main-call
+plugins/modules/source_control/github/github_issue.py validate-modules:doc-missing-type
+plugins/modules/source_control/github/github_issue.py validate-modules:parameter-type-not-in-doc
+plugins/modules/source_control/github/github_key.py validate-modules:doc-missing-type
+plugins/modules/source_control/github/github_release.py validate-modules:doc-missing-type
+plugins/modules/source_control/github/github_release.py validate-modules:parameter-type-not-in-doc
+plugins/modules/source_control/github/github_webhook.py validate-modules:parameter-type-not-in-doc
+plugins/modules/source_control/github/github_webhook_info.py validate-modules:parameter-type-not-in-doc
+plugins/modules/source_control/hg.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/emc/emc_vnx_sg_member.py validate-modules:doc-missing-type
+plugins/modules/storage/emc/emc_vnx_sg_member.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/glusterfs/gluster_heal_info.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/glusterfs/gluster_volume.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/ibm/ibm_sa_domain.py validate-modules:doc-missing-type
+plugins/modules/storage/ibm/ibm_sa_host.py validate-modules:doc-missing-type
+plugins/modules/storage/ibm/ibm_sa_host_ports.py validate-modules:doc-missing-type
+plugins/modules/storage/ibm/ibm_sa_pool.py validate-modules:doc-missing-type
+plugins/modules/storage/ibm/ibm_sa_vol.py validate-modules:doc-missing-type
+plugins/modules/storage/ibm/ibm_sa_vol_map.py validate-modules:doc-missing-type
+plugins/modules/storage/netapp/na_cdot_aggregate.py validate-modules:deprecation-mismatch
+plugins/modules/storage/netapp/na_cdot_aggregate.py validate-modules:invalid-documentation
+plugins/modules/storage/netapp/na_cdot_aggregate.py validate-modules:missing-main-call
+plugins/modules/storage/netapp/na_cdot_license.py validate-modules:deprecation-mismatch
+plugins/modules/storage/netapp/na_cdot_license.py validate-modules:invalid-documentation
+plugins/modules/storage/netapp/na_cdot_license.py validate-modules:missing-main-call
+plugins/modules/storage/netapp/na_cdot_lun.py validate-modules:deprecation-mismatch
+plugins/modules/storage/netapp/na_cdot_lun.py validate-modules:invalid-documentation
+plugins/modules/storage/netapp/na_cdot_lun.py validate-modules:missing-main-call
+plugins/modules/storage/netapp/na_cdot_qtree.py validate-modules:deprecation-mismatch
+plugins/modules/storage/netapp/na_cdot_qtree.py validate-modules:invalid-documentation
+plugins/modules/storage/netapp/na_cdot_qtree.py validate-modules:missing-main-call
+plugins/modules/storage/netapp/na_cdot_svm.py validate-modules:deprecation-mismatch
+plugins/modules/storage/netapp/na_cdot_svm.py validate-modules:invalid-documentation
+plugins/modules/storage/netapp/na_cdot_svm.py validate-modules:missing-main-call
+plugins/modules/storage/netapp/na_cdot_user.py validate-modules:deprecation-mismatch
+plugins/modules/storage/netapp/na_cdot_user.py validate-modules:invalid-documentation
+plugins/modules/storage/netapp/na_cdot_user.py validate-modules:missing-main-call
+plugins/modules/storage/netapp/na_cdot_user_role.py validate-modules:deprecation-mismatch
+plugins/modules/storage/netapp/na_cdot_user_role.py validate-modules:invalid-documentation
+plugins/modules/storage/netapp/na_cdot_user_role.py validate-modules:missing-main-call
+plugins/modules/storage/netapp/na_cdot_volume.py validate-modules:deprecation-mismatch
+plugins/modules/storage/netapp/na_cdot_volume.py validate-modules:invalid-documentation
+plugins/modules/storage/netapp/na_cdot_volume.py validate-modules:missing-main-call
+plugins/modules/storage/netapp/na_ontap_gather_facts.py validate-modules:deprecation-mismatch
+plugins/modules/storage/netapp/na_ontap_gather_facts.py validate-modules:doc-missing-type
+plugins/modules/storage/netapp/na_ontap_gather_facts.py validate-modules:invalid-documentation
+plugins/modules/storage/netapp/na_ontap_gather_facts.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/netapp/sf_account_manager.py validate-modules:deprecation-mismatch
+plugins/modules/storage/netapp/sf_account_manager.py validate-modules:invalid-documentation
+plugins/modules/storage/netapp/sf_account_manager.py validate-modules:missing-main-call
+plugins/modules/storage/netapp/sf_check_connections.py validate-modules:deprecation-mismatch
+plugins/modules/storage/netapp/sf_check_connections.py validate-modules:invalid-documentation
+plugins/modules/storage/netapp/sf_check_connections.py validate-modules:missing-main-call
+plugins/modules/storage/netapp/sf_snapshot_schedule_manager.py validate-modules:deprecation-mismatch
+plugins/modules/storage/netapp/sf_snapshot_schedule_manager.py validate-modules:invalid-documentation
+plugins/modules/storage/netapp/sf_snapshot_schedule_manager.py validate-modules:missing-main-call
+plugins/modules/storage/netapp/sf_volume_access_group_manager.py validate-modules:deprecation-mismatch
+plugins/modules/storage/netapp/sf_volume_access_group_manager.py validate-modules:invalid-documentation
+plugins/modules/storage/netapp/sf_volume_access_group_manager.py validate-modules:missing-main-call
+plugins/modules/storage/netapp/sf_volume_manager.py validate-modules:deprecation-mismatch
+plugins/modules/storage/netapp/sf_volume_manager.py validate-modules:invalid-documentation
+plugins/modules/storage/netapp/sf_volume_manager.py validate-modules:missing-main-call
+plugins/modules/storage/purestorage/purefa_facts.py validate-modules:deprecation-mismatch
+plugins/modules/storage/purestorage/purefa_facts.py validate-modules:invalid-documentation
+plugins/modules/storage/purestorage/purefa_facts.py validate-modules:return-syntax-error
+plugins/modules/storage/purestorage/purefb_facts.py validate-modules:deprecation-mismatch
+plugins/modules/storage/purestorage/purefb_facts.py validate-modules:invalid-documentation
+plugins/modules/storage/purestorage/purefb_facts.py validate-modules:return-syntax-error
+plugins/modules/storage/zfs/zfs.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/zfs/zfs_delegate_admin.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/zfs/zfs_facts.py validate-modules:parameter-type-not-in-doc
+plugins/modules/storage/zfs/zpool_facts.py validate-modules:parameter-type-not-in-doc
+plugins/modules/system/alternatives.py pylint:blacklisted-name
+plugins/modules/system/beadm.py pylint:blacklisted-name
+plugins/modules/system/cronvar.py pylint:blacklisted-name
+plugins/modules/system/dconf.py pylint:blacklisted-name
+plugins/modules/system/filesystem.py pylint:blacklisted-name
+plugins/modules/system/gconftool2.py pylint:blacklisted-name
+plugins/modules/system/interfaces_file.py pylint:blacklisted-name
+plugins/modules/system/iptables_state.py validate-modules:undocumented-parameter
+plugins/modules/system/java_cert.py pylint:blacklisted-name
+plugins/modules/system/launchd.py use-argspec-type-path # False positive
+plugins/modules/system/lvg.py pylint:blacklisted-name
+plugins/modules/system/lvol.py pylint:blacklisted-name
+plugins/modules/system/parted.py pylint:blacklisted-name
+plugins/modules/system/puppet.py use-argspec-type-path
+plugins/modules/system/puppet.py validate-modules:parameter-invalid
+plugins/modules/system/puppet.py validate-modules:parameter-type-not-in-doc
+plugins/modules/system/puppet.py validate-modules:undocumented-parameter
+plugins/modules/system/runit.py validate-modules:doc-default-does-not-match-spec
+plugins/modules/system/runit.py validate-modules:parameter-type-not-in-doc
+plugins/modules/system/runit.py validate-modules:undocumented-parameter
+plugins/modules/system/timezone.py pylint:blacklisted-name
+plugins/modules/system/xfconf.py validate-modules:return-syntax-error
+plugins/modules/web_infrastructure/jenkins_plugin.py use-argspec-type-path
+plugins/modules/web_infrastructure/nginx_status_facts.py validate-modules:deprecation-mismatch
+plugins/modules/web_infrastructure/nginx_status_facts.py validate-modules:invalid-documentation
+plugins/modules/web_infrastructure/rundeck_acl_policy.py pylint:blacklisted-name
+scripts/inventory/gce.py pylint:blacklisted-name
+tests/unit/plugins/modules/cloud/google/test_gcp_forwarding_rule.py future-import-boilerplate
+tests/unit/plugins/modules/cloud/google/test_gcp_forwarding_rule.py metaclass-boilerplate
+tests/utils/shippable/check_matrix.py replace-urlopen
+tests/utils/shippable/timing.py shebang
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/compat/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/compat/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/compat/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/compat/builtins.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/compat/builtins.py
new file mode 100644
index 00000000..f60ee678
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/compat/builtins.py
@@ -0,0 +1,33 @@
+# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+#
+# Compat for python2.7
+#
+
+# One unittest needs to import builtins via __import__() so we need to have
+# the string that represents it
+try:
+ import __builtin__
+except ImportError:
+ BUILTINS = 'builtins'
+else:
+ BUILTINS = '__builtin__'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/compat/mock.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/compat/mock.py
new file mode 100644
index 00000000..0972cd2e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/compat/mock.py
@@ -0,0 +1,122 @@
+# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+'''
+Compat module for Python3.x's unittest.mock module
+'''
+import sys
+
+# Python 2.7
+
+# Note: Could use the pypi mock library on python3.x as well as python2.x. It
+# is the same as the python3 stdlib mock library
+
+try:
+ # Allow wildcard import because we really do want to import all of mock's
+ # symbols into this compat shim
+ # pylint: disable=wildcard-import,unused-wildcard-import
+ from unittest.mock import *
+except ImportError:
+ # Python 2
+ # pylint: disable=wildcard-import,unused-wildcard-import
+ try:
+ from mock import *
+ except ImportError:
+ print('You need the mock library installed on python2.x to run tests')
+
+
+# Prior to 3.4.4, mock_open cannot handle binary read_data
+if sys.version_info >= (3,) and sys.version_info < (3, 4, 4):
+ file_spec = None
+
+ def _iterate_read_data(read_data):
+ # Helper for mock_open:
+ # Retrieve lines from read_data via a generator so that separate calls to
+ # readline, read, and readlines are properly interleaved
+ sep = b'\n' if isinstance(read_data, bytes) else '\n'
+ data_as_list = [l + sep for l in read_data.split(sep)]
+
+ if data_as_list[-1] == sep:
+ # If the last line ended in a newline, the list comprehension will have an
+ # extra entry that's just a newline. Remove this.
+ data_as_list = data_as_list[:-1]
+ else:
+ # If there wasn't an extra newline by itself, then the file being
+ # emulated doesn't have a newline to end the last line remove the
+ # newline that our naive format() added
+ data_as_list[-1] = data_as_list[-1][:-1]
+
+ for line in data_as_list:
+ yield line
+
+ def mock_open(mock=None, read_data=''):
+ """
+ A helper function to create a mock to replace the use of `open`. It works
+ for `open` called directly or used as a context manager.
+
+ The `mock` argument is the mock object to configure. If `None` (the
+ default) then a `MagicMock` will be created for you, with the API limited
+ to methods or attributes available on standard file handles.
+
+ `read_data` is a string for the `read` methoddline`, and `readlines` of the
+ file handle to return. This is an empty string by default.
+ """
+ def _readlines_side_effect(*args, **kwargs):
+ if handle.readlines.return_value is not None:
+ return handle.readlines.return_value
+ return list(_data)
+
+ def _read_side_effect(*args, **kwargs):
+ if handle.read.return_value is not None:
+ return handle.read.return_value
+ return type(read_data)().join(_data)
+
+ def _readline_side_effect():
+ if handle.readline.return_value is not None:
+ while True:
+ yield handle.readline.return_value
+ for line in _data:
+ yield line
+
+ global file_spec
+ if file_spec is None:
+ import _io
+ file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO))))
+
+ if mock is None:
+ mock = MagicMock(name='open', spec=open)
+
+ handle = MagicMock(spec=file_spec)
+ handle.__enter__.return_value = handle
+
+ _data = _iterate_read_data(read_data)
+
+ handle.write.return_value = None
+ handle.read.return_value = None
+ handle.readline.return_value = None
+ handle.readlines.return_value = None
+
+ handle.read.side_effect = _read_side_effect
+ handle.readline.side_effect = _readline_side_effect()
+ handle.readlines.side_effect = _readlines_side_effect
+
+ mock.return_value = handle
+ return mock
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/compat/unittest.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/compat/unittest.py
new file mode 100644
index 00000000..98f08ad6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/compat/unittest.py
@@ -0,0 +1,38 @@
+# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+'''
+Compat module for Python2.7's unittest module
+'''
+
+import sys
+
+# Allow wildcard import because we really do want to import all of
+# unittests's symbols into this compat shim
+# pylint: disable=wildcard-import,unused-wildcard-import
+if sys.version_info < (2, 7):
+ try:
+ # Need unittest2 on python2.6
+ from unittest2 import *
+ except ImportError:
+ print('You need unittest2 installed on python2.6.x to run tests')
+else:
+ from unittest import *
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/mock/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/mock/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/mock/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/mock/loader.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/mock/loader.py
new file mode 100644
index 00000000..907ec9b9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/mock/loader.py
@@ -0,0 +1,102 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible.errors import AnsibleParserError
+from ansible.parsing.dataloader import DataLoader
+from ansible.module_utils._text import to_bytes, to_text
+
+
+class DictDataLoader(DataLoader):
+
+ def __init__(self, file_mapping=None):
+ file_mapping = {} if file_mapping is None else file_mapping
+ assert type(file_mapping) == dict
+
+ super(DictDataLoader, self).__init__()
+
+ self._file_mapping = file_mapping
+ self._build_known_directories()
+ self._vault_secrets = None
+
+ def load_from_file(self, path, cache=True, unsafe=False):
+ path = to_text(path)
+ if path in self._file_mapping:
+ return self.load(self._file_mapping[path], path)
+ return None
+
+ # TODO: the real _get_file_contents returns a bytestring, so we actually convert the
+ # unicode/text it's created with to utf-8
+ def _get_file_contents(self, path):
+ path = to_text(path)
+ if path in self._file_mapping:
+ return (to_bytes(self._file_mapping[path]), False)
+ else:
+ raise AnsibleParserError("file not found: %s" % path)
+
+ def path_exists(self, path):
+ path = to_text(path)
+ return path in self._file_mapping or path in self._known_directories
+
+ def is_file(self, path):
+ path = to_text(path)
+ return path in self._file_mapping
+
+ def is_directory(self, path):
+ path = to_text(path)
+ return path in self._known_directories
+
+ def list_directory(self, path):
+ ret = []
+ path = to_text(path)
+ for x in (list(self._file_mapping.keys()) + self._known_directories):
+ if x.startswith(path):
+ if os.path.dirname(x) == path:
+ ret.append(os.path.basename(x))
+ return ret
+
+ def is_executable(self, path):
+ # FIXME: figure out a way to make paths return true for this
+ return False
+
+ def _add_known_directory(self, directory):
+ if directory not in self._known_directories:
+ self._known_directories.append(directory)
+
+ def _build_known_directories(self):
+ self._known_directories = []
+ for path in self._file_mapping:
+ dirname = os.path.dirname(path)
+ while dirname not in ('/', ''):
+ self._add_known_directory(dirname)
+ dirname = os.path.dirname(dirname)
+
+ def push(self, path, content):
+ rebuild_dirs = False
+ if path not in self._file_mapping:
+ rebuild_dirs = True
+
+ self._file_mapping[path] = content
+
+ if rebuild_dirs:
+ self._build_known_directories()
+
+ def pop(self, path):
+ if path in self._file_mapping:
+ del self._file_mapping[path]
+ self._build_known_directories()
+
+ def clear(self):
+ self._file_mapping = dict()
+ self._known_directories = []
+
+ def get_basedir(self):
+ return os.getcwd()
+
+ def set_vault_secrets(self, vault_secrets):
+ self._vault_secrets = vault_secrets
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/mock/path.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/mock/path.py
new file mode 100644
index 00000000..c1c075bc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/mock/path.py
@@ -0,0 +1,8 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat.mock import MagicMock
+from ansible.utils.path import unfrackpath
+
+
+mock_unfrackpath_noop = MagicMock(spec_set=unfrackpath, side_effect=lambda x, *args, **kwargs: x)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/mock/procenv.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/mock/procenv.py
new file mode 100644
index 00000000..616a75bb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/mock/procenv.py
@@ -0,0 +1,76 @@
+# (c) 2016, Matt Davis <mdavis@ansible.com>
+# (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+import json
+
+from contextlib import contextmanager
+from io import BytesIO, StringIO
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible.module_utils.six import PY3
+from ansible.module_utils._text import to_bytes
+
+
+@contextmanager
+def swap_stdin_and_argv(stdin_data='', argv_data=tuple()):
+ """
+ context manager that temporarily masks the test runner's values for stdin and argv
+ """
+ real_stdin = sys.stdin
+ real_argv = sys.argv
+
+ if PY3:
+ fake_stream = StringIO(stdin_data)
+ fake_stream.buffer = BytesIO(to_bytes(stdin_data))
+ else:
+ fake_stream = BytesIO(to_bytes(stdin_data))
+
+ try:
+ sys.stdin = fake_stream
+ sys.argv = argv_data
+
+ yield
+ finally:
+ sys.stdin = real_stdin
+ sys.argv = real_argv
+
+
+@contextmanager
+def swap_stdout():
+ """
+ context manager that temporarily replaces stdout for tests that need to verify output
+ """
+ old_stdout = sys.stdout
+
+ if PY3:
+ fake_stream = StringIO()
+ else:
+ fake_stream = BytesIO()
+
+ try:
+ sys.stdout = fake_stream
+
+ yield fake_stream
+ finally:
+ sys.stdout = old_stdout
+
+
+class ModuleTestCase(unittest.TestCase):
+ def setUp(self, module_args=None):
+ if module_args is None:
+ module_args = {'_ansible_remote_tmp': '/tmp', '_ansible_keep_remote_files': False}
+
+ args = json.dumps(dict(ANSIBLE_MODULE_ARGS=module_args))
+
+ # unittest doesn't have a clean place to use a context manager, so we have to enter/exit manually
+ self.stdin_swap = swap_stdin_and_argv(stdin_data=args)
+ self.stdin_swap.__enter__()
+
+ def tearDown(self):
+ # unittest doesn't have a clean place to use a context manager, so we have to enter/exit manually
+ self.stdin_swap.__exit__(None, None, None)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/mock/vault_helper.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/mock/vault_helper.py
new file mode 100644
index 00000000..b54629da
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/mock/vault_helper.py
@@ -0,0 +1,27 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils._text import to_bytes
+
+from ansible.parsing.vault import VaultSecret
+
+
+class TextVaultSecret(VaultSecret):
+ '''A secret piece of text. ie, a password. Tracks text encoding.
+
+ The text encoding of the text may not be the default text encoding so
+ we keep track of the encoding so we encode it to the same bytes.'''
+
+ def __init__(self, text, encoding=None, errors=None, _bytes=None):
+ super(TextVaultSecret, self).__init__()
+ self.text = text
+ self.encoding = encoding or 'utf-8'
+ self._bytes = _bytes
+ self.errors = errors or 'strict'
+
+ @property
+ def bytes(self):
+ '''The text encoded with encoding, unless we specifically set _bytes.'''
+ return self._bytes or to_bytes(self.text, encoding=self.encoding, errors=self.errors)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/mock/yaml_helper.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/mock/yaml_helper.py
new file mode 100644
index 00000000..a646c024
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/mock/yaml_helper.py
@@ -0,0 +1,126 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import io
+import yaml
+
+from ansible.module_utils.six import PY3
+from ansible.parsing.yaml.loader import AnsibleLoader
+from ansible.parsing.yaml.dumper import AnsibleDumper
+
+
+class YamlTestUtils(object):
+ """Mixin class to combine with a unittest.TestCase subclass."""
+ def _loader(self, stream):
+ """Vault related tests will want to override this.
+
+ Vault cases should setup a AnsibleLoader that has the vault password."""
+ return AnsibleLoader(stream)
+
+ def _dump_stream(self, obj, stream, dumper=None):
+ """Dump to a py2-unicode or py3-string stream."""
+ if PY3:
+ return yaml.dump(obj, stream, Dumper=dumper)
+ else:
+ return yaml.dump(obj, stream, Dumper=dumper, encoding=None)
+
+ def _dump_string(self, obj, dumper=None):
+ """Dump to a py2-unicode or py3-string"""
+ if PY3:
+ return yaml.dump(obj, Dumper=dumper)
+ else:
+ return yaml.dump(obj, Dumper=dumper, encoding=None)
+
+ def _dump_load_cycle(self, obj):
+ # Each pass though a dump or load revs the 'generation'
+ # obj to yaml string
+ string_from_object_dump = self._dump_string(obj, dumper=AnsibleDumper)
+
+ # wrap a stream/file like StringIO around that yaml
+ stream_from_object_dump = io.StringIO(string_from_object_dump)
+ loader = self._loader(stream_from_object_dump)
+ # load the yaml stream to create a new instance of the object (gen 2)
+ obj_2 = loader.get_data()
+
+ # dump the gen 2 objects directory to strings
+ string_from_object_dump_2 = self._dump_string(obj_2,
+ dumper=AnsibleDumper)
+
+ # The gen 1 and gen 2 yaml strings
+ self.assertEqual(string_from_object_dump, string_from_object_dump_2)
+ # the gen 1 (orig) and gen 2 py object
+ self.assertEqual(obj, obj_2)
+
+ # again! gen 3... load strings into py objects
+ stream_3 = io.StringIO(string_from_object_dump_2)
+ loader_3 = self._loader(stream_3)
+ obj_3 = loader_3.get_data()
+
+ string_from_object_dump_3 = self._dump_string(obj_3, dumper=AnsibleDumper)
+
+ self.assertEqual(obj, obj_3)
+ # should be transitive, but...
+ self.assertEqual(obj_2, obj_3)
+ self.assertEqual(string_from_object_dump, string_from_object_dump_3)
+
+ def _old_dump_load_cycle(self, obj):
+ '''Dump the passed in object to yaml, load it back up, dump again, compare.'''
+ stream = io.StringIO()
+
+ yaml_string = self._dump_string(obj, dumper=AnsibleDumper)
+ self._dump_stream(obj, stream, dumper=AnsibleDumper)
+
+ yaml_string_from_stream = stream.getvalue()
+
+ # reset stream
+ stream.seek(0)
+
+ loader = self._loader(stream)
+ # loader = AnsibleLoader(stream, vault_password=self.vault_password)
+ obj_from_stream = loader.get_data()
+
+ stream_from_string = io.StringIO(yaml_string)
+ loader2 = self._loader(stream_from_string)
+ # loader2 = AnsibleLoader(stream_from_string, vault_password=self.vault_password)
+ obj_from_string = loader2.get_data()
+
+ stream_obj_from_stream = io.StringIO()
+ stream_obj_from_string = io.StringIO()
+
+ if PY3:
+ yaml.dump(obj_from_stream, stream_obj_from_stream, Dumper=AnsibleDumper)
+ yaml.dump(obj_from_stream, stream_obj_from_string, Dumper=AnsibleDumper)
+ else:
+ yaml.dump(obj_from_stream, stream_obj_from_stream, Dumper=AnsibleDumper, encoding=None)
+ yaml.dump(obj_from_stream, stream_obj_from_string, Dumper=AnsibleDumper, encoding=None)
+
+ yaml_string_stream_obj_from_stream = stream_obj_from_stream.getvalue()
+ yaml_string_stream_obj_from_string = stream_obj_from_string.getvalue()
+
+ stream_obj_from_stream.seek(0)
+ stream_obj_from_string.seek(0)
+
+ if PY3:
+ yaml_string_obj_from_stream = yaml.dump(obj_from_stream, Dumper=AnsibleDumper)
+ yaml_string_obj_from_string = yaml.dump(obj_from_string, Dumper=AnsibleDumper)
+ else:
+ yaml_string_obj_from_stream = yaml.dump(obj_from_stream, Dumper=AnsibleDumper, encoding=None)
+ yaml_string_obj_from_string = yaml.dump(obj_from_string, Dumper=AnsibleDumper, encoding=None)
+
+ assert yaml_string == yaml_string_obj_from_stream
+ assert yaml_string == yaml_string_obj_from_stream == yaml_string_obj_from_string
+ assert (yaml_string == yaml_string_obj_from_stream == yaml_string_obj_from_string == yaml_string_stream_obj_from_stream ==
+ yaml_string_stream_obj_from_string)
+ assert obj == obj_from_stream
+ assert obj == obj_from_string
+ assert obj == yaml_string_obj_from_stream
+ assert obj == yaml_string_obj_from_string
+ assert obj == obj_from_stream == obj_from_string == yaml_string_obj_from_stream == yaml_string_obj_from_string
+ return {'obj': obj,
+ 'yaml_string': yaml_string,
+ 'yaml_string_from_stream': yaml_string_from_stream,
+ 'obj_from_stream': obj_from_stream,
+ 'obj_from_string': obj_from_string,
+ 'yaml_string_obj_from_string': yaml_string_obj_from_string}
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/become/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/become/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/become/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/become/conftest.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/become/conftest.py
new file mode 100644
index 00000000..a04a5e2d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/become/conftest.py
@@ -0,0 +1,37 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2017 Ansible Project
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible.cli.arguments import option_helpers as opt_help
+from ansible.utils import context_objects as co
+
+
+@pytest.fixture
+def parser():
+ parser = opt_help.create_base_parser('testparser')
+
+ opt_help.add_runas_options(parser)
+ opt_help.add_meta_options(parser)
+ opt_help.add_runtask_options(parser)
+ opt_help.add_vault_options(parser)
+ opt_help.add_async_options(parser)
+ opt_help.add_connect_options(parser)
+ opt_help.add_subset_options(parser)
+ opt_help.add_check_options(parser)
+ opt_help.add_inventory_options(parser)
+
+ return parser
+
+
+@pytest.fixture
+def reset_cli_args():
+ co.GlobalCLIArgs._Singleton__instance = None
+ yield
+ co.GlobalCLIArgs._Singleton__instance = None
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/become/helper.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/become/helper.py
new file mode 100644
index 00000000..69e3ea6c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/become/helper.py
@@ -0,0 +1,19 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleError
+from ansible.plugins.loader import become_loader, get_shell_plugin
+
+
+def call_become_plugin(task, var_options, cmd, executable=None):
+ """Helper function to call become plugin simiarly on how Ansible itself handles this."""
+ plugin = become_loader.get(task['become_method'])
+ plugin.set_options(task_keys=task, var_options=var_options)
+ shell = get_shell_plugin(executable=executable)
+ return plugin.build_become_command(cmd, shell)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/become/test_doas.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/become/test_doas.py
new file mode 100644
index 00000000..0474b3b6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/become/test_doas.py
@@ -0,0 +1,84 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2020 Ansible Project
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+from ansible import context
+
+from .helper import call_become_plugin
+
+
+def test_doas_basic(mocker, parser, reset_cli_args):
+ options = parser.parse_args([])
+ context._init_global_context(options)
+
+ default_cmd = "/bin/foo"
+ default_exe = "/bin/bash"
+ doas_exe = 'doas'
+ doas_flags = '-n'
+
+ success = 'BECOME-SUCCESS-.+?'
+
+ task = {
+ 'become_method': 'community.general.doas',
+ }
+ var_options = {}
+ cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe)
+ print(cmd)
+ assert (re.match("""%s %s %s -c 'echo %s; %s'""" % (doas_exe, doas_flags, default_exe, success,
+ default_cmd), cmd) is not None)
+
+
+def test_doas(mocker, parser, reset_cli_args):
+ options = parser.parse_args([])
+ context._init_global_context(options)
+
+ default_cmd = "/bin/foo"
+ default_exe = "/bin/bash"
+ doas_exe = 'doas'
+ doas_flags = '-n'
+
+ success = 'BECOME-SUCCESS-.+?'
+
+ task = {
+ 'become_user': 'foo',
+ 'become_method': 'community.general.doas',
+ 'become_flags': doas_flags,
+ }
+ var_options = {}
+ cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe)
+ print(cmd)
+ assert (re.match("""%s %s -u %s %s -c 'echo %s; %s'""" % (doas_exe, doas_flags, task['become_user'], default_exe, success,
+ default_cmd), cmd) is not None)
+
+
+def test_doas_varoptions(mocker, parser, reset_cli_args):
+ options = parser.parse_args([])
+ context._init_global_context(options)
+
+ default_cmd = "/bin/foo"
+ default_exe = "/bin/bash"
+ doas_exe = 'doas'
+ doas_flags = '-n'
+
+ success = 'BECOME-SUCCESS-.+?'
+
+ task = {
+ 'become_user': 'foo',
+ 'become_method': 'community.general.doas',
+ 'become_flags': 'xxx',
+ }
+ var_options = {
+ 'ansible_become_user': 'bar',
+ 'ansible_become_flags': doas_flags,
+ }
+ cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe)
+ print(cmd)
+ assert (re.match("""%s %s -u %s %s -c 'echo %s; %s'""" % (doas_exe, doas_flags, var_options['ansible_become_user'], default_exe, success,
+ default_cmd), cmd) is not None)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/become/test_dzdo.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/become/test_dzdo.py
new file mode 100644
index 00000000..eb5932cd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/become/test_dzdo.py
@@ -0,0 +1,94 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2020 Ansible Project
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+from ansible import context
+
+from .helper import call_become_plugin
+
+
+def test_dzdo_basic(mocker, parser, reset_cli_args):
+ options = parser.parse_args([])
+ context._init_global_context(options)
+
+ default_cmd = "/bin/foo"
+ default_exe = "/bin/bash"
+ dzdo_exe = 'dzdo'
+ dzdo_flags = '-H -S -n'
+
+ success = 'BECOME-SUCCESS-.+?'
+
+ task = {
+ 'become_method': 'community.general.dzdo',
+ }
+ var_options = {}
+ cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe)
+ print(cmd)
+ assert re.match("""%s %s %s -c 'echo %s; %s'""" % (dzdo_exe, dzdo_flags, default_exe,
+ success, default_cmd), cmd) is not None
+
+
+def test_dzdo(mocker, parser, reset_cli_args):
+ options = parser.parse_args([])
+ context._init_global_context(options)
+
+ default_cmd = "/bin/foo"
+ default_exe = "/bin/bash"
+ dzdo_exe = 'dzdo'
+ dzdo_flags = ''
+
+ success = 'BECOME-SUCCESS-.+?'
+
+ task = {
+ 'become_user': 'foo',
+ 'become_method': 'community.general.dzdo',
+ 'become_flags': dzdo_flags,
+ }
+ var_options = {}
+ cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe)
+ print(cmd)
+ assert re.match("""%s %s -u %s %s -c 'echo %s; %s'""" % (dzdo_exe, dzdo_flags, task['become_user'], default_exe,
+ success, default_cmd), cmd) is not None
+ task['become_pass'] = 'testpass'
+ cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe)
+ print(cmd)
+ assert re.match("""%s %s -p %s -u %s %s -c 'echo %s; %s'""" % (dzdo_exe, dzdo_flags, r'\"\[dzdo via ansible, key=.+?\] password:\"',
+ task['become_user'], default_exe, success, default_cmd), cmd) is not None
+
+
+def test_dzdo_varoptions(mocker, parser, reset_cli_args):
+ options = parser.parse_args([])
+ context._init_global_context(options)
+
+ default_cmd = "/bin/foo"
+ default_exe = "/bin/bash"
+ dzdo_exe = 'dzdo'
+ dzdo_flags = ''
+
+ success = 'BECOME-SUCCESS-.+?'
+
+ task = {
+ 'become_user': 'foo',
+ 'become_method': 'community.general.dzdo',
+ 'become_flags': 'xxx',
+ }
+ var_options = {
+ 'ansible_become_user': 'bar',
+ 'ansible_become_flags': dzdo_flags,
+ }
+ cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe)
+ print(cmd)
+ assert re.match("""%s %s -u %s %s -c 'echo %s; %s'""" % (dzdo_exe, dzdo_flags, var_options['ansible_become_user'], default_exe,
+ success, default_cmd), cmd) is not None
+ var_options['ansible_become_pass'] = 'testpass'
+ cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe)
+ print(cmd)
+ assert re.match("""%s %s -p %s -u %s %s -c 'echo %s; %s'""" % (dzdo_exe, dzdo_flags, r'\"\[dzdo via ansible, key=.+?\] password:\"',
+ var_options['ansible_become_user'], default_exe, success, default_cmd), cmd) is not None
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/become/test_ksu.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/become/test_ksu.py
new file mode 100644
index 00000000..87c33706
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/become/test_ksu.py
@@ -0,0 +1,85 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2020 Ansible Project
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+from ansible import context
+
+from .helper import call_become_plugin
+
+
+def test_ksu_basic(mocker, parser, reset_cli_args):
+ options = parser.parse_args([])
+ context._init_global_context(options)
+
+ default_cmd = "/bin/foo"
+ default_exe = "/bin/bash"
+ ksu_exe = 'ksu'
+ ksu_flags = ''
+
+ success = 'BECOME-SUCCESS-.+?'
+
+ task = {
+ 'become_user': 'foo',
+ 'become_method': 'community.general.ksu',
+ }
+ var_options = {}
+ cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe)
+ print(cmd)
+ assert (re.match("""%s %s %s -e %s -c 'echo %s; %s'""" % (ksu_exe, task['become_user'], ksu_flags,
+ default_exe, success, default_cmd), cmd) is not None)
+
+
+def test_ksu(mocker, parser, reset_cli_args):
+ options = parser.parse_args([])
+ context._init_global_context(options)
+
+ default_cmd = "/bin/foo"
+ default_exe = "/bin/bash"
+ ksu_exe = 'ksu'
+ ksu_flags = ''
+
+ success = 'BECOME-SUCCESS-.+?'
+
+ task = {
+ 'become_user': 'foo',
+ 'become_method': 'community.general.ksu',
+ 'become_flags': ksu_flags,
+ }
+ var_options = {}
+ cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe)
+ print(cmd)
+ assert (re.match("""%s %s %s -e %s -c 'echo %s; %s'""" % (ksu_exe, task['become_user'], ksu_flags,
+ default_exe, success, default_cmd), cmd) is not None)
+
+
+def test_ksu_varoptions(mocker, parser, reset_cli_args):
+ options = parser.parse_args([])
+ context._init_global_context(options)
+
+ default_cmd = "/bin/foo"
+ default_exe = "/bin/bash"
+ ksu_exe = 'ksu'
+ ksu_flags = ''
+
+ success = 'BECOME-SUCCESS-.+?'
+
+ task = {
+ 'become_user': 'foo',
+ 'become_method': 'community.general.ksu',
+ 'become_flags': 'xxx',
+ }
+ var_options = {
+ 'ansible_become_user': 'bar',
+ 'ansible_become_flags': ksu_flags,
+ }
+ cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe)
+ print(cmd)
+ assert (re.match("""%s %s %s -e %s -c 'echo %s; %s'""" % (ksu_exe, var_options['ansible_become_user'], ksu_flags,
+ default_exe, success, default_cmd), cmd) is not None)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/become/test_pbrun.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/become/test_pbrun.py
new file mode 100644
index 00000000..b8e369d2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/become/test_pbrun.py
@@ -0,0 +1,84 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2020 Ansible Project
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+from ansible import context
+
+from .helper import call_become_plugin
+
+
+def test_pbrun_basic(mocker, parser, reset_cli_args):
+ options = parser.parse_args([])
+ context._init_global_context(options)
+
+ default_cmd = "/bin/foo"
+ default_exe = "/bin/bash"
+ pbrun_exe = 'pbrun'
+ pbrun_flags = ''
+
+ success = 'BECOME-SUCCESS-.+?'
+
+ task = {
+ 'become_method': 'community.general.pbrun',
+ }
+ var_options = {}
+ cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe)
+ print(cmd)
+ assert re.match("""%s %s 'echo %s; %s'""" % (pbrun_exe, pbrun_flags,
+ success, default_cmd), cmd) is not None
+
+
+def test_pbrun(mocker, parser, reset_cli_args):
+ options = parser.parse_args([])
+ context._init_global_context(options)
+
+ default_cmd = "/bin/foo"
+ default_exe = "/bin/bash"
+ pbrun_exe = 'pbrun'
+ pbrun_flags = ''
+
+ success = 'BECOME-SUCCESS-.+?'
+
+ task = {
+ 'become_user': 'foo',
+ 'become_method': 'community.general.pbrun',
+ 'become_flags': pbrun_flags,
+ }
+ var_options = {}
+ cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe)
+ print(cmd)
+ assert re.match("""%s %s -u %s 'echo %s; %s'""" % (pbrun_exe, pbrun_flags, task['become_user'],
+ success, default_cmd), cmd) is not None
+
+
+def test_pbrun_var_varoptions(mocker, parser, reset_cli_args):
+ options = parser.parse_args([])
+ context._init_global_context(options)
+
+ default_cmd = "/bin/foo"
+ default_exe = "/bin/bash"
+ pbrun_exe = 'pbrun'
+ pbrun_flags = ''
+
+ success = 'BECOME-SUCCESS-.+?'
+
+ task = {
+ 'become_user': 'foo',
+ 'become_method': 'community.general.pbrun',
+ 'become_flags': 'xxx',
+ }
+ var_options = {
+ 'ansible_become_user': 'bar',
+ 'ansible_become_flags': pbrun_flags,
+ }
+ cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe)
+ print(cmd)
+ assert re.match("""%s %s -u %s 'echo %s; %s'""" % (pbrun_exe, pbrun_flags, var_options['ansible_become_user'],
+ success, default_cmd), cmd) is not None
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/become/test_pfexec.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/become/test_pfexec.py
new file mode 100644
index 00000000..3adc2ed5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/become/test_pfexec.py
@@ -0,0 +1,81 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2020 Ansible Project
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+from ansible import context
+
+from .helper import call_become_plugin
+
+
+def test_pfexec_basic(mocker, parser, reset_cli_args):
+ options = parser.parse_args([])
+ context._init_global_context(options)
+
+ default_cmd = "/bin/foo"
+ default_exe = "/bin/bash"
+ pfexec_exe = 'pfexec'
+ pfexec_flags = '-H -S -n'
+
+ success = 'BECOME-SUCCESS-.+?'
+
+ task = {
+ 'become_method': 'community.general.pfexec',
+ }
+ var_options = {}
+ cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe)
+ print(cmd)
+ assert re.match('''%s %s "'echo %s; %s'"''' % (pfexec_exe, pfexec_flags, success, default_cmd), cmd) is not None
+
+
+def test_pfexec(mocker, parser, reset_cli_args):
+ options = parser.parse_args([])
+ context._init_global_context(options)
+
+ default_cmd = "/bin/foo"
+ default_exe = "/bin/bash"
+ pfexec_exe = 'pfexec'
+ pfexec_flags = ''
+
+ success = 'BECOME-SUCCESS-.+?'
+
+ task = {
+ 'become_user': 'foo',
+ 'become_method': 'community.general.pfexec',
+ 'become_flags': pfexec_flags,
+ }
+ var_options = {}
+ cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe)
+ print(cmd)
+ assert re.match('''%s %s "'echo %s; %s'"''' % (pfexec_exe, pfexec_flags, success, default_cmd), cmd) is not None
+
+
+def test_pfexec_varoptions(mocker, parser, reset_cli_args):
+ options = parser.parse_args([])
+ context._init_global_context(options)
+
+ default_cmd = "/bin/foo"
+ default_exe = "/bin/bash"
+ pfexec_exe = 'pfexec'
+ pfexec_flags = ''
+
+ success = 'BECOME-SUCCESS-.+?'
+
+ task = {
+ 'become_user': 'foo',
+ 'become_method': 'community.general.pfexec',
+ 'become_flags': 'xxx',
+ }
+ var_options = {
+ 'ansible_become_user': 'bar',
+ 'ansible_become_flags': pfexec_flags,
+ }
+ cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe)
+ print(cmd)
+ assert re.match('''%s %s "'echo %s; %s'"''' % (pfexec_exe, pfexec_flags, success, default_cmd), cmd) is not None
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/cache/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/cache/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/cache/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/cache/test_memcached.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/cache/test_memcached.py
new file mode 100644
index 00000000..f460b0a3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/cache/test_memcached.py
@@ -0,0 +1,35 @@
+# (c) 2012-2015, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+pytest.importorskip('memcache')
+
+from ansible.plugins.loader import cache_loader
+from ansible_collections.community.general.plugins.cache.memcached import CacheModule as MemcachedCache
+
+
+def test_memcached_cachemodule():
+ assert isinstance(MemcachedCache(), MemcachedCache)
+
+
+def test_memcached_cachemodule_with_loader():
+ assert isinstance(cache_loader.get('community.general.memcached'), MemcachedCache)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/cache/test_redis.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/cache/test_redis.py
new file mode 100644
index 00000000..2effec34
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/cache/test_redis.py
@@ -0,0 +1,36 @@
+# (c) 2012-2015, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+pytest.importorskip('redis')
+
+from ansible.plugins.loader import cache_loader
+from ansible_collections.community.general.plugins.cache.redis import CacheModule as RedisCache
+
+
+def test_redis_cachemodule():
+ assert isinstance(RedisCache(), RedisCache)
+
+
+def test_redis_cachemodule_with_loader():
+ # The _uri option is required for the redis plugin
+ assert isinstance(cache_loader.get('community.general.redis', **{'_uri': '127.0.0.1:6379:1'}), RedisCache)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/connection/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/connection/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/connection/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/connection/test_docker.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/connection/test_docker.py
new file mode 100644
index 00000000..b1c55d4b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/connection/test_docker.py
@@ -0,0 +1,67 @@
+# (c) 2020 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from io import StringIO
+import pytest
+
+from ansible_collections.community.general.tests.unit.compat import mock
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible.errors import AnsibleError
+from ansible.playbook.play_context import PlayContext
+from ansible_collections.community.general.plugins.connection.docker import Connection as DockerConnection
+
+
+class TestDockerConnectionClass(unittest.TestCase):
+
+ def setUp(self):
+ self.play_context = PlayContext()
+ self.play_context.prompt = (
+ '[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: '
+ )
+ self.in_stream = StringIO()
+
+ def tearDown(self):
+ pass
+
+ @mock.patch('ansible_collections.community.general.plugins.connection.docker.Connection._old_docker_version',
+ return_value=('false', 'garbage', '', 1))
+ @mock.patch('ansible_collections.community.general.plugins.connection.docker.Connection._new_docker_version',
+ return_value=('docker version', '1.2.3', '', 0))
+ def test_docker_connection_module_too_old(self, mock_new_docker_verison, mock_old_docker_version):
+ self.assertRaisesRegexp(AnsibleError, '^docker connection type requires docker 1.3 or higher$',
+ DockerConnection, self.play_context, self.in_stream, docker_command='/fake/docker')
+
+ @mock.patch('ansible_collections.community.general.plugins.connection.docker.Connection._old_docker_version',
+ return_value=('false', 'garbage', '', 1))
+ @mock.patch('ansible_collections.community.general.plugins.connection.docker.Connection._new_docker_version',
+ return_value=('docker version', '1.3.4', '', 0))
+ def test_docker_connection_module(self, mock_new_docker_verison, mock_old_docker_version):
+ self.assertIsInstance(DockerConnection(self.play_context, self.in_stream, docker_command='/fake/docker'),
+ DockerConnection)
+
+ # old version and new version fail
+ @mock.patch('ansible_collections.community.general.plugins.connection.docker.Connection._old_docker_version',
+ return_value=('false', 'garbage', '', 1))
+ @mock.patch('ansible_collections.community.general.plugins.connection.docker.Connection._new_docker_version',
+ return_value=('false', 'garbage', '', 1))
+ def test_docker_connection_module_wrong_cmd(self, mock_new_docker_version, mock_old_docker_version):
+ self.assertRaisesRegexp(AnsibleError, '^Docker version check (.*?) failed: ',
+ DockerConnection, self.play_context, self.in_stream, docker_command='/fake/docker')
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/connection/test_lxc.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/connection/test_lxc.py
new file mode 100644
index 00000000..28bfc331
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/connection/test_lxc.py
@@ -0,0 +1,40 @@
+#
+# (c) 2020 Red Hat Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from io import StringIO
+import pytest
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.plugins.connection import lxc
+from ansible.playbook.play_context import PlayContext
+
+
+class TestLXCConnectionClass(unittest.TestCase):
+
+ def test_lxc_connection_module(self):
+ play_context = PlayContext()
+ play_context.prompt = (
+ '[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: '
+ )
+ in_stream = StringIO()
+
+ self.assertIsInstance(lxc.Connection(play_context, in_stream), lxc.Connection)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/inventory/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/inventory/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/inventory/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/inventory/test_cobbler.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/inventory/test_cobbler.py
new file mode 100644
index 00000000..477a3039
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/inventory/test_cobbler.py
@@ -0,0 +1,41 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Orion Poplawski <orion@nwra.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+import sys
+
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible_collections.community.general.plugins.inventory.cobbler import InventoryModule
+
+
+@pytest.fixture(scope="module")
+def inventory():
+ return InventoryModule()
+
+
+def test_init_cache(inventory):
+ inventory._init_cache()
+ assert inventory._cache[inventory.cache_key] == {}
+
+
+def test_verify_file_bad_config(inventory):
+ assert inventory.verify_file('foobar.cobber.yml') is False
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/inventory/test_linode.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/inventory/test_linode.py
new file mode 100644
index 00000000..bbab0b9a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/inventory/test_linode.py
@@ -0,0 +1,76 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2018 Luke Murphy <lukewm@riseup.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+import sys
+
+linode_apiv4 = pytest.importorskip('linode_api4')
+mandatory_py_version = pytest.mark.skipif(
+ sys.version_info < (2, 7),
+ reason='The linode_api4 dependency requires python2.7 or higher'
+)
+
+
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible_collections.community.general.plugins.inventory.linode import InventoryModule
+
+
+@pytest.fixture(scope="module")
+def inventory():
+ return InventoryModule()
+
+
+def test_access_token_lookup(inventory):
+ inventory._options = {'access_token': None}
+ with pytest.raises(AnsibleError) as error_message:
+ inventory._build_client()
+ assert 'Could not retrieve Linode access token' in error_message
+
+
+def test_validate_option(inventory):
+ assert ['eu-west'] == inventory._validate_option('regions', list, 'eu-west')
+ assert ['eu-west'] == inventory._validate_option('regions', list, ['eu-west'])
+
+
+def test_validation_option_bad_option(inventory):
+ with pytest.raises(AnsibleParserError) as error_message:
+ inventory._validate_option('regions', dict, [])
+ assert "The option filters ([]) must be a <class 'dict'>" == error_message
+
+
+def test_empty_config_query_options(inventory):
+ regions, types = inventory._get_query_options({})
+ assert regions == types == []
+
+
+def test_conig_query_options(inventory):
+ regions, types = inventory._get_query_options({
+ 'regions': ['eu-west', 'us-east'],
+ 'types': ['g5-standard-2', 'g6-standard-2'],
+ })
+
+ assert regions == ['eu-west', 'us-east']
+ assert types == ['g5-standard-2', 'g6-standard-2']
+
+
+def test_verify_file_bad_config(inventory):
+ assert inventory.verify_file('foobar.linde.yml') is False
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/inventory/test_proxmox.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/inventory/test_proxmox.py
new file mode 100644
index 00000000..14332e75
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/inventory/test_proxmox.py
@@ -0,0 +1,207 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2020, Jeffrey van Pelt <jeff@vanpelt.one>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+# The API responses used in these tests were recorded from PVE version 6.2.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.inventory.data import InventoryData
+from ansible_collections.community.general.plugins.inventory.proxmox import InventoryModule
+
+
+@pytest.fixture(scope="module")
+def inventory():
+ r = InventoryModule()
+ r.inventory = InventoryData()
+ return r
+
+
+def test_verify_file_bad_config(inventory):
+ assert inventory.verify_file('foobar.proxmox.yml') is False
+
+
+def get_auth():
+ return True
+
+
+# NOTE: when updating/adding replies to this function,
+# be sure to only add only the _contents_ of the 'data' dict in the API reply
+def get_json(url):
+ if url == "https://localhost:8006/api2/json/nodes":
+ # _get_nodes
+ return [{"type": "node",
+ "cpu": 0.01,
+ "maxdisk": 500,
+ "mem": 500,
+ "node": "testnode",
+ "id": "node/testnode",
+ "maxcpu": 1,
+ "status": "online",
+ "ssl_fingerprint": "xx",
+ "disk": 1000,
+ "maxmem": 1000,
+ "uptime": 10000,
+ "level": ""}]
+ elif url == "https://localhost:8006/api2/json/pools":
+ # _get_pools
+ return [{"poolid": "test"}]
+ elif url == "https://localhost:8006/api2/json/nodes/testnode/lxc":
+ # _get_lxc_per_node
+ return [{"cpus": 1,
+ "name": "test-lxc",
+ "cpu": 0.01,
+ "diskwrite": 0,
+ "lock": "",
+ "maxmem": 1000,
+ "template": "",
+ "diskread": 0,
+ "mem": 1000,
+ "swap": 0,
+ "type": "lxc",
+ "maxswap": 0,
+ "maxdisk": "1000",
+ "netout": 1000,
+ "pid": "1000",
+ "netin": 1000,
+ "status": "running",
+ "vmid": "100",
+ "disk": "1000",
+ "uptime": 1000}]
+ elif url == "https://localhost:8006/api2/json/nodes/testnode/qemu":
+ # _get_qemu_per_node
+ return [{"name": "test-qemu",
+ "cpus": 1,
+ "mem": 1000,
+ "template": "",
+ "diskread": 0,
+ "cpu": 0.01,
+ "maxmem": 1000,
+ "diskwrite": 0,
+ "netout": 1000,
+ "pid": "1001",
+ "netin": 1000,
+ "maxdisk": 1000,
+ "vmid": "101",
+ "uptime": 1000,
+ "disk": 0,
+ "status": "running"},
+ {"name": "test-qemu-template",
+ "cpus": 1,
+ "mem": 0,
+ "template": 1,
+ "diskread": 0,
+ "cpu": 0,
+ "maxmem": 1000,
+ "diskwrite": 0,
+ "netout": 0,
+ "pid": "1001",
+ "netin": 0,
+ "maxdisk": 1000,
+ "vmid": "9001",
+ "uptime": 0,
+ "disk": 0,
+ "status": "stopped"}]
+ elif url == "https://localhost:8006/api2/json/pools/test":
+ # _get_members_per_pool
+ return {"members": [{"uptime": 1000,
+ "template": 0,
+ "id": "qemu/101",
+ "mem": 1000,
+ "status": "running",
+ "cpu": 0.01,
+ "maxmem": 1000,
+ "diskwrite": 1000,
+ "name": "test-qemu",
+ "netout": 1000,
+ "netin": 1000,
+ "vmid": 101,
+ "node": "testnode",
+ "maxcpu": 1,
+ "type": "qemu",
+ "maxdisk": 1000,
+ "disk": 0,
+ "diskread": 1000}]}
+ elif url == "https://localhost:8006/api2/json/nodes/testnode/network":
+ # _get_node_ip
+ return [{"families": ["inet"],
+ "priority": 3,
+ "active": 1,
+ "cidr": "10.1.1.2/24",
+ "iface": "eth0",
+ "method": "static",
+ "exists": 1,
+ "type": "eth",
+ "netmask": "24",
+ "gateway": "10.1.1.1",
+ "address": "10.1.1.2",
+ "method6": "manual",
+ "autostart": 1},
+ {"method6": "manual",
+ "autostart": 1,
+ "type": "OVSPort",
+ "exists": 1,
+ "method": "manual",
+ "iface": "eth1",
+ "ovs_bridge": "vmbr0",
+ "active": 1,
+ "families": ["inet"],
+ "priority": 5,
+ "ovs_type": "OVSPort"},
+ {"type": "OVSBridge",
+ "method": "manual",
+ "iface": "vmbr0",
+ "families": ["inet"],
+ "priority": 4,
+ "ovs_ports": "eth1",
+ "ovs_type": "OVSBridge",
+ "method6": "manual",
+ "autostart": 1,
+ "active": 1}]
+
+
+def get_vm_status(node, vmtype, vmid, name):
+ return True
+
+
+def get_option(option):
+ if option == 'group_prefix':
+ return 'proxmox_'
+ else:
+ return False
+
+
+def test_populate(inventory, mocker):
+ # module settings
+ inventory.proxmox_user = 'root@pam'
+ inventory.proxmox_password = 'password'
+ inventory.proxmox_url = 'https://localhost:8006'
+
+ # bypass authentication and API fetch calls
+ inventory._get_auth = mocker.MagicMock(side_effect=get_auth)
+ inventory._get_json = mocker.MagicMock(side_effect=get_json)
+ inventory._get_vm_status = mocker.MagicMock(side_effect=get_vm_status)
+ inventory.get_option = mocker.MagicMock(side_effect=get_option)
+ inventory._populate()
+
+ # get different hosts
+ host_qemu = inventory.inventory.get_host('test-qemu')
+ host_qemu_template = inventory.inventory.get_host('test-qemu-template')
+ host_lxc = inventory.inventory.get_host('test-lxc')
+ host_node = inventory.inventory.get_host('testnode')
+
+ # check if qemu-test is in the proxmox_pool_test group
+ assert 'proxmox_pool_test' in inventory.inventory.groups
+ group_qemu = inventory.inventory.groups['proxmox_pool_test']
+ assert group_qemu.hosts == [host_qemu]
+
+ # check if lxc-test has been discovered correctly
+ group_lxc = inventory.inventory.groups['proxmox_all_lxc']
+ assert group_lxc.hosts == [host_lxc]
+
+ # check if qemu template is not present
+ assert host_qemu_template is None
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/inventory/test_stackpath_compute.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/inventory/test_stackpath_compute.py
new file mode 100644
index 00000000..9359cd68
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/inventory/test_stackpath_compute.py
@@ -0,0 +1,200 @@
+# Copyright (c) 2020 Shay Rybak <shay.rybak@stackpath.com>
+# Copyright (c) 2020 Ansible Project
+# GNGeneral Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible.errors import AnsibleError
+from ansible.inventory.data import InventoryData
+from ansible_collections.community.general.plugins.inventory.stackpath_compute import InventoryModule
+
+
+@pytest.fixture(scope="module")
+def inventory():
+ r = InventoryModule()
+ r.inventory = InventoryData()
+ return r
+
+
+def test_get_stack_slugs(inventory):
+ stacks = [
+ {
+ 'status': 'ACTIVE',
+ 'name': 'test1',
+ 'id': 'XXXX',
+ 'updatedAt': '2020-07-08T01:00:00.000000Z',
+ 'slug': 'test1',
+ 'createdAt': '2020-07-08T00:00:00.000000Z',
+ 'accountId': 'XXXX',
+ }, {
+ 'status': 'ACTIVE',
+ 'name': 'test2',
+ 'id': 'XXXX',
+ 'updatedAt': '2019-10-22T18:00:00.000000Z',
+ 'slug': 'test2',
+ 'createdAt': '2019-10-22T18:00:00.000000Z',
+ 'accountId': 'XXXX',
+ }, {
+ 'status': 'DISABLED',
+ 'name': 'test3',
+ 'id': 'XXXX',
+ 'updatedAt': '2020-01-16T20:00:00.000000Z',
+ 'slug': 'test3',
+ 'createdAt': '2019-10-15T13:00:00.000000Z',
+ 'accountId': 'XXXX',
+ }, {
+ 'status': 'ACTIVE',
+ 'name': 'test4',
+ 'id': 'XXXX',
+ 'updatedAt': '2019-11-20T22:00:00.000000Z',
+ 'slug': 'test4',
+ 'createdAt': '2019-11-20T22:00:00.000000Z',
+ 'accountId': 'XXXX',
+ }
+ ]
+ inventory._get_stack_slugs(stacks)
+ assert len(inventory.stack_slugs) == 4
+ assert inventory.stack_slugs == [
+ "test1",
+ "test2",
+ "test3",
+ "test4"
+ ]
+
+
+def test_verify_file_bad_config(inventory):
+ assert inventory.verify_file('foobar.stackpath_compute.yml') is False
+
+
+def test_validate_config(inventory):
+ config = {
+ "client_secret": "short_client_secret",
+ "use_internal_ip": False,
+ "stack_slugs": ["test1"],
+ "client_id": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
+ "plugin": "community.general.stackpath_compute",
+ }
+ with pytest.raises(AnsibleError) as error_message:
+ inventory._validate_config(config)
+ assert "client_secret must be 64 characters long" in error_message
+
+ config = {
+ "client_secret": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
+ "use_internal_ip": True,
+ "stack_slugs": ["test1"],
+ "client_id": "short_client_id",
+ "plugin": "community.general.stackpath_compute",
+ }
+ with pytest.raises(AnsibleError) as error_message:
+ inventory._validate_config(config)
+ assert "client_id must be 32 characters long" in error_message
+
+ config = {
+ "use_internal_ip": True,
+ "stack_slugs": ["test1"],
+ "client_id": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
+ "plugin": "community.general.stackpath_compute",
+ }
+ with pytest.raises(AnsibleError) as error_message:
+ inventory._validate_config(config)
+ assert "config missing client_secret, a required paramter" in error_message
+
+ config = {
+ "client_secret": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
+ "use_internal_ip": False,
+ "plugin": "community.general.stackpath_compute",
+ }
+ with pytest.raises(AnsibleError) as error_message:
+ inventory._validate_config(config)
+ assert "config missing client_id, a required paramter" in error_message
+
+
+def test_populate(inventory):
+ instances = [
+ {
+ "name": "instance1",
+ "countryCode": "SE",
+ "workloadSlug": "wokrload1",
+ "continent": "Europe",
+ "workloadId": "id1",
+ "cityCode": "ARN",
+ "externalIpAddress": "20.0.0.1",
+ "target": "target1",
+ "stackSlug": "stack1",
+ "ipAddress": "10.0.0.1",
+ },
+ {
+ "name": "instance2",
+ "countryCode": "US",
+ "workloadSlug": "wokrload2",
+ "continent": "America",
+ "workloadId": "id2",
+ "cityCode": "JFK",
+ "externalIpAddress": "20.0.0.2",
+ "target": "target2",
+ "stackSlug": "stack1",
+ "ipAddress": "10.0.0.2",
+ },
+ {
+ "name": "instance3",
+ "countryCode": "SE",
+ "workloadSlug": "workload3",
+ "continent": "Europe",
+ "workloadId": "id3",
+ "cityCode": "ARN",
+ "externalIpAddress": "20.0.0.3",
+ "target": "target1",
+ "stackSlug": "stack2",
+ "ipAddress": "10.0.0.3",
+ },
+ {
+ "name": "instance4",
+ "countryCode": "US",
+ "workloadSlug": "workload3",
+ "continent": "America",
+ "workloadId": "id4",
+ "cityCode": "JFK",
+ "externalIpAddress": "20.0.0.4",
+ "target": "target2",
+ "stackSlug": "stack2",
+ "ipAddress": "10.0.0.4",
+ },
+ ]
+ inventory.hostname_key = "externalIpAddress"
+ inventory._populate(instances)
+ # get different hosts
+ host1 = inventory.inventory.get_host('20.0.0.1')
+ host2 = inventory.inventory.get_host('20.0.0.2')
+ host3 = inventory.inventory.get_host('20.0.0.3')
+ host4 = inventory.inventory.get_host('20.0.0.4')
+
+ # get different groups
+ assert 'citycode_arn' in inventory.inventory.groups
+ group_citycode_arn = inventory.inventory.groups['citycode_arn']
+ assert 'countrycode_se' in inventory.inventory.groups
+ group_countrycode_se = inventory.inventory.groups['countrycode_se']
+ assert 'continent_america' in inventory.inventory.groups
+ group_continent_america = inventory.inventory.groups['continent_america']
+ assert 'name_instance1' in inventory.inventory.groups
+ group_name_instance1 = inventory.inventory.groups['name_instance1']
+ assert 'stackslug_stack1' in inventory.inventory.groups
+ group_stackslug_stack1 = inventory.inventory.groups['stackslug_stack1']
+ assert 'target_target1' in inventory.inventory.groups
+ group_target_target1 = inventory.inventory.groups['target_target1']
+ assert 'workloadslug_workload3' in inventory.inventory.groups
+ group_workloadslug_workload3 = inventory.inventory.groups['workloadslug_workload3']
+ assert 'workloadid_id1' in inventory.inventory.groups
+ group_workloadid_id1 = inventory.inventory.groups['workloadid_id1']
+
+ assert group_citycode_arn.hosts == [host1, host3]
+ assert group_countrycode_se.hosts == [host1, host3]
+ assert group_continent_america.hosts == [host2, host4]
+ assert group_name_instance1.hosts == [host1]
+ assert group_stackslug_stack1.hosts == [host1, host2]
+ assert group_target_target1.hosts == [host1, host3]
+ assert group_workloadslug_workload3.hosts == [host3, host4]
+ assert group_workloadid_id1.hosts == [host1]
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/lookup/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/lookup/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/lookup/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/lookup/test_dsv.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/lookup/test_dsv.py
new file mode 100644
index 00000000..376bd725
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/lookup/test_dsv.py
@@ -0,0 +1,43 @@
+# -*- coding: utf-8 -*-
+# (c) 2020, Adam Migus <adam@migus.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat.unittest import TestCase
+from ansible_collections.community.general.tests.unit.compat.mock import (
+ patch,
+ MagicMock,
+)
+from ansible_collections.community.general.plugins.lookup import dsv
+from ansible.plugins.loader import lookup_loader
+
+
+class MockSecretsVault(MagicMock):
+ RESPONSE = '{"foo": "bar"}'
+
+ def get_secret_json(self, path):
+ return self.RESPONSE
+
+
+class TestLookupModule(TestCase):
+ def setUp(self):
+ dsv.sdk_is_missing = False
+ self.lookup = lookup_loader.get("community.general.dsv")
+
+ @patch(
+ "ansible_collections.community.general.plugins.lookup.dsv.LookupModule.Client",
+ MockSecretsVault(),
+ )
+ def test_get_secret_json(self):
+ self.assertListEqual(
+ [MockSecretsVault.RESPONSE],
+ self.lookup.run(
+ ["/dummy"],
+ [],
+ **{"tenant": "dummy", "client_id": "dummy", "client_secret": "dummy", }
+ ),
+ )
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/lookup/test_etcd3.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/lookup/test_etcd3.py
new file mode 100644
index 00000000..b0663dff
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/lookup/test_etcd3.py
@@ -0,0 +1,57 @@
+# -*- coding: utf-8 -*-
+#
+# (c) 2020, SCC France, Eric Belhomme <ebelhomme@fr.scc.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import pytest
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.tests.unit.compat.mock import patch, MagicMock
+from ansible.errors import AnsibleError
+from ansible_collections.community.general.plugins.lookup import etcd3
+from ansible.plugins.loader import lookup_loader
+
+
+class FakeKVMetadata:
+
+ def __init__(self, keyvalue, header):
+ self.key = keyvalue
+ self.create_revision = ''
+ self.mod_revision = ''
+ self.version = ''
+ self.lease_id = ''
+ self.response_header = header
+
+
+class FakeEtcd3Client(MagicMock):
+
+ def get_prefix(self, key):
+ for i in range(1, 4):
+ yield self.get('{0}_{1}'.format(key, i))
+
+ def get(self, key):
+ return ("{0} value".format(key), FakeKVMetadata(key, None))
+
+
+class TestLookupModule(unittest.TestCase):
+
+ def setUp(self):
+ etcd3.HAS_ETCD = True
+ self.lookup = lookup_loader.get('community.general.etcd3')
+
+ @patch('ansible_collections.community.general.plugins.lookup.etcd3.etcd3_client', FakeEtcd3Client())
+ def test_key(self):
+ expected_result = [{'key': 'a_key', 'value': 'a_key value'}]
+ self.assertListEqual(expected_result, self.lookup.run(['a_key'], []))
+
+ @patch('ansible_collections.community.general.plugins.lookup.etcd3.etcd3_client', FakeEtcd3Client())
+ def test_key_prefix(self):
+ expected_result = [
+ {'key': 'a_key_1', 'value': 'a_key_1 value'},
+ {'key': 'a_key_2', 'value': 'a_key_2 value'},
+ {'key': 'a_key_3', 'value': 'a_key_3 value'},
+ ]
+ self.assertListEqual(expected_result, self.lookup.run(['a_key'], [], **{'prefix': True}))
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/lookup/test_lastpass.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/lookup/test_lastpass.py
new file mode 100644
index 00000000..cce693d3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/lookup/test_lastpass.py
@@ -0,0 +1,187 @@
+# (c)2016 Andrew Zenk <azenk@umn.edu>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from argparse import ArgumentParser
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+
+from ansible.errors import AnsibleError
+from ansible.module_utils import six
+from ansible_collections.community.general.plugins.lookup.lastpass import LookupModule, LPass, LPassException
+
+
+MOCK_ENTRIES = [{'username': 'user',
+ 'name': 'Mock Entry',
+ 'password': 't0pS3cret passphrase entry!',
+ 'url': 'https://localhost/login',
+ 'notes': 'Test\nnote with multiple lines.\n',
+ 'id': '0123456789'}]
+
+
+class MockLPass(LPass):
+
+ _mock_logged_out = False
+ _mock_disconnected = False
+
+ def _lookup_mock_entry(self, key):
+ for entry in MOCK_ENTRIES:
+ if key == entry['id'] or key == entry['name']:
+ return entry
+
+ def _run(self, args, stdin=None, expected_rc=0):
+ # Mock behavior of lpass executable
+ base_options = ArgumentParser(add_help=False)
+ base_options.add_argument('--color', default="auto", choices=['auto', 'always', 'never'])
+
+ p = ArgumentParser()
+ sp = p.add_subparsers(help='command', dest='subparser_name')
+
+ logout_p = sp.add_parser('logout', parents=[base_options], help='logout')
+ show_p = sp.add_parser('show', parents=[base_options], help='show entry details')
+
+ field_group = show_p.add_mutually_exclusive_group(required=True)
+ for field in MOCK_ENTRIES[0].keys():
+ field_group.add_argument("--{0}".format(field), default=False, action='store_true')
+ field_group.add_argument('--field', default=None)
+ show_p.add_argument('selector', help='Unique Name or ID')
+
+ args = p.parse_args(args)
+
+ def mock_exit(output='', error='', rc=0):
+ if rc != expected_rc:
+ raise LPassException(error)
+ return output, error
+
+ if args.color != 'never':
+ return mock_exit(error='Error: Mock only supports --color=never', rc=1)
+
+ if args.subparser_name == 'logout':
+ if self._mock_logged_out:
+ return mock_exit(error='Error: Not currently logged in', rc=1)
+
+ logged_in_error = 'Are you sure you would like to log out? [Y/n]'
+ if stdin and stdin.lower() == 'n\n':
+ return mock_exit(output='Log out: aborted.', error=logged_in_error, rc=1)
+ elif stdin and stdin.lower() == 'y\n':
+ return mock_exit(output='Log out: complete.', error=logged_in_error, rc=0)
+ else:
+ return mock_exit(error='Error: aborted response', rc=1)
+
+ if args.subparser_name == 'show':
+ if self._mock_logged_out:
+ return mock_exit(error='Error: Could not find decryption key.' +
+ ' Perhaps you need to login with `lpass login`.', rc=1)
+
+ if self._mock_disconnected:
+ return mock_exit(error='Error: Couldn\'t resolve host name.', rc=1)
+
+ mock_entry = self._lookup_mock_entry(args.selector)
+
+ if args.field:
+ return mock_exit(output=mock_entry.get(args.field, ''))
+ elif args.password:
+ return mock_exit(output=mock_entry.get('password', ''))
+ elif args.username:
+ return mock_exit(output=mock_entry.get('username', ''))
+ elif args.url:
+ return mock_exit(output=mock_entry.get('url', ''))
+ elif args.name:
+ return mock_exit(output=mock_entry.get('name', ''))
+ elif args.id:
+ return mock_exit(output=mock_entry.get('id', ''))
+ elif args.notes:
+ return mock_exit(output=mock_entry.get('notes', ''))
+
+ raise LPassException('We should never get here')
+
+
+class DisconnectedMockLPass(MockLPass):
+
+ _mock_disconnected = True
+
+
+class LoggedOutMockLPass(MockLPass):
+
+ _mock_logged_out = True
+
+
+class TestLPass(unittest.TestCase):
+
+ def test_lastpass_cli_path(self):
+ lp = MockLPass(path='/dev/null')
+ self.assertEqual('/dev/null', lp.cli_path)
+
+ def test_lastpass_build_args_logout(self):
+ lp = MockLPass()
+ self.assertEqual(['logout', '--color=never'], lp._build_args("logout"))
+
+ def test_lastpass_logged_in_true(self):
+ lp = MockLPass()
+ self.assertTrue(lp.logged_in)
+
+ def test_lastpass_logged_in_false(self):
+ lp = LoggedOutMockLPass()
+ self.assertFalse(lp.logged_in)
+
+ def test_lastpass_show_disconnected(self):
+ lp = DisconnectedMockLPass()
+
+ with self.assertRaises(LPassException):
+ lp.get_field('0123456789', 'username')
+
+ def test_lastpass_show(self):
+ lp = MockLPass()
+ for entry in MOCK_ENTRIES:
+ entry_id = entry.get('id')
+ for k, v in six.iteritems(entry):
+ self.assertEqual(v.strip(), lp.get_field(entry_id, k))
+
+
+class TestLastpassPlugin(unittest.TestCase):
+
+ @patch('ansible_collections.community.general.plugins.lookup.lastpass.LPass', new=MockLPass)
+ def test_lastpass_plugin_normal(self):
+ lookup_plugin = LookupModule()
+
+ for entry in MOCK_ENTRIES:
+ entry_id = entry.get('id')
+ for k, v in six.iteritems(entry):
+ self.assertEqual(v.strip(),
+ lookup_plugin.run([entry_id], field=k)[0])
+
+ @patch('ansible_collections.community.general.plugins.lookup.lastpass.LPass', LoggedOutMockLPass)
+ def test_lastpass_plugin_logged_out(self):
+ lookup_plugin = LookupModule()
+
+ entry = MOCK_ENTRIES[0]
+ entry_id = entry.get('id')
+ with self.assertRaises(AnsibleError):
+ lookup_plugin.run([entry_id], field='password')
+
+ @patch('ansible_collections.community.general.plugins.lookup.lastpass.LPass', DisconnectedMockLPass)
+ def test_lastpass_plugin_disconnected(self):
+ lookup_plugin = LookupModule()
+
+ entry = MOCK_ENTRIES[0]
+ entry_id = entry.get('id')
+ with self.assertRaises(AnsibleError):
+ lookup_plugin.run([entry_id], field='password')
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/lookup/test_manifold.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/lookup/test_manifold.py
new file mode 100644
index 00000000..0eb49c8c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/lookup/test_manifold.py
@@ -0,0 +1,536 @@
+# (c) 2018, Arigato Machine Inc.
+# (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.tests.unit.compat.mock import patch, call
+from ansible.errors import AnsibleError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
+from ansible.module_utils import six
+from ansible_collections.community.general.plugins.lookup.manifold import ManifoldApiClient, LookupModule, ApiError
+import json
+
+
+API_FIXTURES = {
+ 'https://api.marketplace.manifold.co/v1/resources':
+ [
+ {
+ "body": {
+ "label": "resource-1",
+ "name": "Resource 1"
+ },
+ "id": "rid-1"
+ },
+ {
+ "body": {
+ "label": "resource-2",
+ "name": "Resource 2"
+ },
+ "id": "rid-2"
+ }
+ ],
+ 'https://api.marketplace.manifold.co/v1/resources?label=resource-1':
+ [
+ {
+ "body": {
+ "label": "resource-1",
+ "name": "Resource 1"
+ },
+ "id": "rid-1"
+ }
+ ],
+ 'https://api.marketplace.manifold.co/v1/resources?label=resource-2':
+ [
+ {
+ "body": {
+ "label": "resource-2",
+ "name": "Resource 2"
+ },
+ "id": "rid-2"
+ }
+ ],
+ 'https://api.marketplace.manifold.co/v1/resources?team_id=tid-1':
+ [
+ {
+ "body": {
+ "label": "resource-1",
+ "name": "Resource 1"
+ },
+ "id": "rid-1"
+ }
+ ],
+ 'https://api.marketplace.manifold.co/v1/resources?project_id=pid-1':
+ [
+ {
+ "body": {
+ "label": "resource-2",
+ "name": "Resource 2"
+ },
+ "id": "rid-2"
+ }
+ ],
+ 'https://api.marketplace.manifold.co/v1/resources?project_id=pid-2':
+ [
+ {
+ "body": {
+ "label": "resource-1",
+ "name": "Resource 1"
+ },
+ "id": "rid-1"
+ },
+ {
+ "body": {
+ "label": "resource-3",
+ "name": "Resource 3"
+ },
+ "id": "rid-3"
+ }
+ ],
+ 'https://api.marketplace.manifold.co/v1/resources?team_id=tid-1&project_id=pid-1':
+ [
+ {
+ "body": {
+ "label": "resource-1",
+ "name": "Resource 1"
+ },
+ "id": "rid-1"
+ }
+ ],
+ 'https://api.marketplace.manifold.co/v1/projects':
+ [
+ {
+ "body": {
+ "label": "project-1",
+ "name": "Project 1",
+ },
+ "id": "pid-1",
+ },
+ {
+ "body": {
+ "label": "project-2",
+ "name": "Project 2",
+ },
+ "id": "pid-2",
+ }
+ ],
+ 'https://api.marketplace.manifold.co/v1/projects?label=project-2':
+ [
+ {
+ "body": {
+ "label": "project-2",
+ "name": "Project 2",
+ },
+ "id": "pid-2",
+ }
+ ],
+ 'https://api.marketplace.manifold.co/v1/credentials?resource_id=rid-1':
+ [
+ {
+ "body": {
+ "resource_id": "rid-1",
+ "values": {
+ "RESOURCE_TOKEN_1": "token-1",
+ "RESOURCE_TOKEN_2": "token-2"
+ }
+ },
+ "id": "cid-1",
+ }
+ ],
+ 'https://api.marketplace.manifold.co/v1/credentials?resource_id=rid-2':
+ [
+ {
+ "body": {
+ "resource_id": "rid-2",
+ "values": {
+ "RESOURCE_TOKEN_3": "token-3",
+ "RESOURCE_TOKEN_4": "token-4"
+ }
+ },
+ "id": "cid-2",
+ }
+ ],
+ 'https://api.marketplace.manifold.co/v1/credentials?resource_id=rid-3':
+ [
+ {
+ "body": {
+ "resource_id": "rid-3",
+ "values": {
+ "RESOURCE_TOKEN_1": "token-5",
+ "RESOURCE_TOKEN_2": "token-6"
+ }
+ },
+ "id": "cid-3",
+ }
+ ],
+ 'https://api.identity.manifold.co/v1/teams':
+ [
+ {
+ "id": "tid-1",
+ "body": {
+ "name": "Team 1",
+ "label": "team-1"
+ }
+ },
+ {
+ "id": "tid-2",
+ "body": {
+ "name": "Team 2",
+ "label": "team-2"
+ }
+ }
+ ]
+}
+
+
+def mock_fixture(open_url_mock, fixture=None, data=None, headers=None):
+ if not headers:
+ headers = {}
+ if fixture:
+ data = json.dumps(API_FIXTURES[fixture])
+ if 'content-type' not in headers:
+ headers['content-type'] = 'application/json'
+
+ open_url_mock.return_value.read.return_value = data
+ open_url_mock.return_value.headers = headers
+
+
+class TestManifoldApiClient(unittest.TestCase):
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
+ def test_request_sends_default_headers(self, open_url_mock):
+ mock_fixture(open_url_mock, data='hello')
+ client = ManifoldApiClient('token-123')
+ client.request('test', 'endpoint')
+ open_url_mock.assert_called_with('https://api.test.manifold.co/v1/endpoint',
+ headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'},
+ http_agent='python-manifold-ansible-1.0.0')
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
+ def test_request_decodes_json(self, open_url_mock):
+ mock_fixture(open_url_mock, fixture='https://api.marketplace.manifold.co/v1/resources')
+ client = ManifoldApiClient('token-123')
+ self.assertIsInstance(client.request('marketplace', 'resources'), list)
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
+ def test_request_streams_text(self, open_url_mock):
+ mock_fixture(open_url_mock, data='hello', headers={'content-type': "text/plain"})
+ client = ManifoldApiClient('token-123')
+ self.assertEqual('hello', client.request('test', 'endpoint'))
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
+ def test_request_processes_parameterized_headers(self, open_url_mock):
+ mock_fixture(open_url_mock, data='hello')
+ client = ManifoldApiClient('token-123')
+ client.request('test', 'endpoint', headers={'X-HEADER': 'MANIFOLD'})
+ open_url_mock.assert_called_with('https://api.test.manifold.co/v1/endpoint',
+ headers={'Accept': '*/*', 'Authorization': 'Bearer token-123',
+ 'X-HEADER': 'MANIFOLD'},
+ http_agent='python-manifold-ansible-1.0.0')
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
+ def test_request_passes_arbitrary_parameters(self, open_url_mock):
+ mock_fixture(open_url_mock, data='hello')
+ client = ManifoldApiClient('token-123')
+ client.request('test', 'endpoint', use_proxy=False, timeout=5)
+ open_url_mock.assert_called_with('https://api.test.manifold.co/v1/endpoint',
+ headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'},
+ http_agent='python-manifold-ansible-1.0.0',
+ use_proxy=False, timeout=5)
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
+ def test_request_raises_on_incorrect_json(self, open_url_mock):
+ mock_fixture(open_url_mock, data='noJson', headers={'content-type': "application/json"})
+ client = ManifoldApiClient('token-123')
+ with self.assertRaises(ApiError) as context:
+ client.request('test', 'endpoint')
+ self.assertEqual('JSON response can\'t be parsed while requesting https://api.test.manifold.co/v1/endpoint:\n'
+ 'noJson',
+ str(context.exception))
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
+ def test_request_raises_on_status_500(self, open_url_mock):
+ open_url_mock.side_effect = HTTPError('https://api.test.manifold.co/v1/endpoint',
+ 500, 'Server error', {}, six.StringIO('ERROR'))
+ client = ManifoldApiClient('token-123')
+ with self.assertRaises(ApiError) as context:
+ client.request('test', 'endpoint')
+ self.assertEqual('Server returned: HTTP Error 500: Server error while requesting '
+ 'https://api.test.manifold.co/v1/endpoint:\nERROR',
+ str(context.exception))
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
+ def test_request_raises_on_bad_url(self, open_url_mock):
+ open_url_mock.side_effect = URLError('URL is invalid')
+ client = ManifoldApiClient('token-123')
+ with self.assertRaises(ApiError) as context:
+ client.request('test', 'endpoint')
+ self.assertEqual('Failed lookup url for https://api.test.manifold.co/v1/endpoint : <url'
+ 'open error URL is invalid>',
+ str(context.exception))
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
+ def test_request_raises_on_ssl_error(self, open_url_mock):
+ open_url_mock.side_effect = SSLValidationError('SSL Error')
+ client = ManifoldApiClient('token-123')
+ with self.assertRaises(ApiError) as context:
+ client.request('test', 'endpoint')
+ self.assertEqual('Error validating the server\'s certificate for https://api.test.manifold.co/v1/endpoint: '
+ 'SSL Error',
+ str(context.exception))
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
+ def test_request_raises_on_connection_error(self, open_url_mock):
+ open_url_mock.side_effect = ConnectionError('Unknown connection error')
+ client = ManifoldApiClient('token-123')
+ with self.assertRaises(ApiError) as context:
+ client.request('test', 'endpoint')
+ self.assertEqual('Error connecting to https://api.test.manifold.co/v1/endpoint: Unknown connection error',
+ str(context.exception))
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
+ def test_get_resources_get_all(self, open_url_mock):
+ url = 'https://api.marketplace.manifold.co/v1/resources'
+ mock_fixture(open_url_mock, fixture=url)
+ client = ManifoldApiClient('token-123')
+ self.assertListEqual(API_FIXTURES[url], client.get_resources())
+ open_url_mock.assert_called_with(url,
+ headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'},
+ http_agent='python-manifold-ansible-1.0.0')
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
+ def test_get_resources_filter_label(self, open_url_mock):
+ url = 'https://api.marketplace.manifold.co/v1/resources?label=resource-1'
+ mock_fixture(open_url_mock, fixture=url)
+ client = ManifoldApiClient('token-123')
+ self.assertListEqual(API_FIXTURES[url], client.get_resources(label='resource-1'))
+ open_url_mock.assert_called_with(url,
+ headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'},
+ http_agent='python-manifold-ansible-1.0.0')
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
+ def test_get_resources_filter_team_and_project(self, open_url_mock):
+ url = 'https://api.marketplace.manifold.co/v1/resources?team_id=tid-1&project_id=pid-1'
+ mock_fixture(open_url_mock, fixture=url)
+ client = ManifoldApiClient('token-123')
+ self.assertListEqual(API_FIXTURES[url], client.get_resources(team_id='tid-1', project_id='pid-1'))
+ args, kwargs = open_url_mock.call_args
+ url_called = args[0]
+ # Dict order is not guaranteed, so an url may have querystring parameters order randomized
+ self.assertIn('team_id=tid-1', url_called)
+ self.assertIn('project_id=pid-1', url_called)
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
+ def test_get_teams_get_all(self, open_url_mock):
+ url = 'https://api.identity.manifold.co/v1/teams'
+ mock_fixture(open_url_mock, fixture=url)
+ client = ManifoldApiClient('token-123')
+ self.assertListEqual(API_FIXTURES[url], client.get_teams())
+ open_url_mock.assert_called_with(url,
+ headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'},
+ http_agent='python-manifold-ansible-1.0.0')
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
+ def test_get_teams_filter_label(self, open_url_mock):
+ url = 'https://api.identity.manifold.co/v1/teams'
+ mock_fixture(open_url_mock, fixture=url)
+ client = ManifoldApiClient('token-123')
+ self.assertListEqual(API_FIXTURES[url][1:2], client.get_teams(label='team-2'))
+ open_url_mock.assert_called_with(url,
+ headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'},
+ http_agent='python-manifold-ansible-1.0.0')
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
+ def test_get_projects_get_all(self, open_url_mock):
+ url = 'https://api.marketplace.manifold.co/v1/projects'
+ mock_fixture(open_url_mock, fixture=url)
+ client = ManifoldApiClient('token-123')
+ self.assertListEqual(API_FIXTURES[url], client.get_projects())
+ open_url_mock.assert_called_with(url,
+ headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'},
+ http_agent='python-manifold-ansible-1.0.0')
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
+ def test_get_projects_filter_label(self, open_url_mock):
+ url = 'https://api.marketplace.manifold.co/v1/projects?label=project-2'
+ mock_fixture(open_url_mock, fixture=url)
+ client = ManifoldApiClient('token-123')
+ self.assertListEqual(API_FIXTURES[url], client.get_projects(label='project-2'))
+ open_url_mock.assert_called_with(url,
+ headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'},
+ http_agent='python-manifold-ansible-1.0.0')
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
+ def test_get_credentials(self, open_url_mock):
+ url = 'https://api.marketplace.manifold.co/v1/credentials?resource_id=rid-1'
+ mock_fixture(open_url_mock, fixture=url)
+ client = ManifoldApiClient('token-123')
+ self.assertListEqual(API_FIXTURES[url], client.get_credentials(resource_id='rid-1'))
+ open_url_mock.assert_called_with(url,
+ headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'},
+ http_agent='python-manifold-ansible-1.0.0')
+
+
+class TestLookupModule(unittest.TestCase):
+ def setUp(self):
+ self.lookup = LookupModule()
+ self.lookup._load_name = "manifold"
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient')
+ def test_get_all(self, client_mock):
+ expected_result = [{'RESOURCE_TOKEN_1': 'token-1',
+ 'RESOURCE_TOKEN_2': 'token-2',
+ 'RESOURCE_TOKEN_3': 'token-3',
+ 'RESOURCE_TOKEN_4': 'token-4'
+ }]
+ client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources']
+ client_mock.return_value.get_credentials.side_effect = lambda x: API_FIXTURES['https://api.marketplace.manifold.co/v1/'
+ 'credentials?resource_id={0}'.format(x)]
+ self.assertListEqual(expected_result, self.lookup.run([], api_token='token-123'))
+ client_mock.assert_called_with('token-123')
+ client_mock.return_value.get_resources.assert_called_with(team_id=None, project_id=None)
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient')
+ def test_get_one_resource(self, client_mock):
+ expected_result = [{'RESOURCE_TOKEN_3': 'token-3',
+ 'RESOURCE_TOKEN_4': 'token-4'
+ }]
+ client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources?label=resource-2']
+ client_mock.return_value.get_credentials.side_effect = lambda x: API_FIXTURES['https://api.marketplace.manifold.co/v1/'
+ 'credentials?resource_id={0}'.format(x)]
+ self.assertListEqual(expected_result, self.lookup.run(['resource-2'], api_token='token-123'))
+ client_mock.return_value.get_resources.assert_called_with(team_id=None, project_id=None, label='resource-2')
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient')
+ def test_get_two_resources(self, client_mock):
+ expected_result = [{'RESOURCE_TOKEN_1': 'token-1',
+ 'RESOURCE_TOKEN_2': 'token-2',
+ 'RESOURCE_TOKEN_3': 'token-3',
+ 'RESOURCE_TOKEN_4': 'token-4'
+ }]
+ client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources']
+ client_mock.return_value.get_credentials.side_effect = lambda x: API_FIXTURES['https://api.marketplace.manifold.co/v1/'
+ 'credentials?resource_id={0}'.format(x)]
+ self.assertListEqual(expected_result, self.lookup.run(['resource-1', 'resource-2'], api_token='token-123'))
+ client_mock.assert_called_with('token-123')
+ client_mock.return_value.get_resources.assert_called_with(team_id=None, project_id=None)
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.display')
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient')
+ def test_get_resources_with_same_credential_names(self, client_mock, display_mock):
+ expected_result = [{'RESOURCE_TOKEN_1': 'token-5',
+ 'RESOURCE_TOKEN_2': 'token-6'
+ }]
+ client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources?project_id=pid-2']
+ client_mock.return_value.get_projects.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/projects?label=project-2']
+ client_mock.return_value.get_credentials.side_effect = lambda x: API_FIXTURES['https://api.marketplace.manifold.co/v1/'
+ 'credentials?resource_id={0}'.format(x)]
+ self.assertListEqual(expected_result, self.lookup.run([], api_token='token-123', project='project-2'))
+ client_mock.assert_called_with('token-123')
+ display_mock.warning.assert_has_calls([
+ call("'RESOURCE_TOKEN_1' with label 'resource-1' was replaced by resource data with label 'resource-3'"),
+ call("'RESOURCE_TOKEN_2' with label 'resource-1' was replaced by resource data with label 'resource-3'")],
+ any_order=True
+ )
+ client_mock.return_value.get_resources.assert_called_with(team_id=None, project_id='pid-2')
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient')
+ def test_filter_by_team(self, client_mock):
+ expected_result = [{'RESOURCE_TOKEN_1': 'token-1',
+ 'RESOURCE_TOKEN_2': 'token-2'
+ }]
+ client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources?team_id=tid-1']
+ client_mock.return_value.get_teams.return_value = API_FIXTURES['https://api.identity.manifold.co/v1/teams'][0:1]
+ client_mock.return_value.get_credentials.side_effect = lambda x: API_FIXTURES['https://api.marketplace.manifold.co/v1/'
+ 'credentials?resource_id={0}'.format(x)]
+ self.assertListEqual(expected_result, self.lookup.run([], api_token='token-123', team='team-1'))
+ client_mock.assert_called_with('token-123')
+ client_mock.return_value.get_resources.assert_called_with(team_id='tid-1', project_id=None)
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient')
+ def test_filter_by_project(self, client_mock):
+ expected_result = [{'RESOURCE_TOKEN_3': 'token-3',
+ 'RESOURCE_TOKEN_4': 'token-4'
+ }]
+ client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources?project_id=pid-1']
+ client_mock.return_value.get_projects.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/projects'][0:1]
+ client_mock.return_value.get_credentials.side_effect = lambda x: API_FIXTURES['https://api.marketplace.manifold.co/v1/'
+ 'credentials?resource_id={0}'.format(x)]
+ self.assertListEqual(expected_result, self.lookup.run([], api_token='token-123', project='project-1'))
+ client_mock.assert_called_with('token-123')
+ client_mock.return_value.get_resources.assert_called_with(team_id=None, project_id='pid-1')
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient')
+ def test_filter_by_team_and_project(self, client_mock):
+ expected_result = [{'RESOURCE_TOKEN_1': 'token-1',
+ 'RESOURCE_TOKEN_2': 'token-2'
+ }]
+ client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources?team_id=tid-1&project_id=pid-1']
+ client_mock.return_value.get_teams.return_value = API_FIXTURES['https://api.identity.manifold.co/v1/teams'][0:1]
+ client_mock.return_value.get_projects.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/projects'][0:1]
+ client_mock.return_value.get_credentials.side_effect = lambda x: API_FIXTURES['https://api.marketplace.manifold.co/v1/'
+ 'credentials?resource_id={0}'.format(x)]
+ self.assertListEqual(expected_result, self.lookup.run([], api_token='token-123', project='project-1'))
+ client_mock.assert_called_with('token-123')
+ client_mock.return_value.get_resources.assert_called_with(team_id=None, project_id='pid-1')
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient')
+ def test_raise_team_doesnt_exist(self, client_mock):
+ client_mock.return_value.get_teams.return_value = []
+ with self.assertRaises(AnsibleError) as context:
+ self.lookup.run([], api_token='token-123', team='no-team')
+ self.assertEqual("Team 'no-team' does not exist",
+ str(context.exception))
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient')
+ def test_raise_project_doesnt_exist(self, client_mock):
+ client_mock.return_value.get_projects.return_value = []
+ with self.assertRaises(AnsibleError) as context:
+ self.lookup.run([], api_token='token-123', project='no-project')
+ self.assertEqual("Project 'no-project' does not exist",
+ str(context.exception))
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient')
+ def test_raise_resource_doesnt_exist(self, client_mock):
+ client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources']
+ with self.assertRaises(AnsibleError) as context:
+ self.lookup.run(['resource-1', 'no-resource-1', 'no-resource-2'], api_token='token-123')
+ self.assertEqual("Resource(s) no-resource-1, no-resource-2 do not exist",
+ str(context.exception))
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient')
+ def test_catch_api_error(self, client_mock):
+ client_mock.side_effect = ApiError('Generic error')
+ with self.assertRaises(AnsibleError) as context:
+ self.lookup.run([], api_token='token-123')
+ self.assertEqual("API Error: Generic error",
+ str(context.exception))
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient')
+ def test_catch_unhandled_exception(self, client_mock):
+ client_mock.side_effect = Exception('Unknown error')
+ with self.assertRaises(AnsibleError) as context:
+ self.lookup.run([], api_token='token-123')
+ self.assertTrue('Exception: Unknown error' in str(context.exception))
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.os.getenv')
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient')
+ def test_falls_back_to_env_var(self, client_mock, getenv_mock):
+ getenv_mock.return_value = 'token-321'
+ client_mock.return_value.get_resources.return_value = []
+ client_mock.return_value.get_credentials.return_value = []
+ self.lookup.run([])
+ getenv_mock.assert_called_with('MANIFOLD_API_TOKEN')
+ client_mock.assert_called_with('token-321')
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.os.getenv')
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient')
+ def test_falls_raises_on_no_token(self, client_mock, getenv_mock):
+ getenv_mock.return_value = None
+ client_mock.return_value.get_resources.return_value = []
+ client_mock.return_value.get_credentials.return_value = []
+ with self.assertRaises(AnsibleError) as context:
+ self.lookup.run([])
+ self.assertEqual('API token is required. Please set api_token parameter or MANIFOLD_API_TOKEN env var',
+ str(context.exception))
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/lookup/test_onepassword.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/lookup/test_onepassword.py
new file mode 100644
index 00000000..0312a0e0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/lookup/test_onepassword.py
@@ -0,0 +1,321 @@
+# (c) 2018, Scott Buchanan <sbuchanan@ri.pn>
+# (c) 2016, Andrew Zenk <azenk@umn.edu> (test_lastpass.py used as starting point)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import datetime
+
+try:
+ from urllib.parse import urlparse
+except ImportError:
+ from urlparse import urlparse
+
+from argparse import ArgumentParser
+
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible.errors import AnsibleError
+from ansible_collections.community.general.plugins.lookup.onepassword import OnePass, LookupModule
+from ansible_collections.community.general.plugins.lookup.onepassword_raw import LookupModule as OnePasswordRawLookup
+
+
+# Intentionally excludes metadata leaf nodes that would exist in real output if not relevant.
+MOCK_ENTRIES = [
+ {
+ 'vault_name': 'Acme "Quot\'d" Servers',
+ 'queries': [
+ '0123456789',
+ 'Mock "Quot\'d" Server'
+ ],
+ 'output': {
+ 'uuid': '0123456789',
+ 'vaultUuid': '2468',
+ 'overview': {
+ 'title': 'Mock "Quot\'d" Server'
+ },
+ 'details': {
+ 'sections': [{
+ 'title': '',
+ 'fields': [
+ {'t': 'username', 'v': 'jamesbond'},
+ {'t': 'password', 'v': 't0pS3cret'},
+ {'t': 'notes', 'v': 'Test note with\nmultiple lines and trailing space.\n\n'},
+ {'t': 'tricksy "quot\'d" field\\', 'v': '"quot\'d" value'}
+ ]
+ }]
+ }
+ }
+ },
+ {
+ 'vault_name': 'Acme Logins',
+ 'queries': [
+ '9876543210',
+ 'Mock Website',
+ 'acme.com'
+ ],
+ 'output': {
+ 'uuid': '9876543210',
+ 'vaultUuid': '1357',
+ 'overview': {
+ 'title': 'Mock Website',
+ 'URLs': [
+ {'l': 'website', 'u': 'https://acme.com/login'}
+ ]
+ },
+ 'details': {
+ 'sections': [{
+ 'title': '',
+ 'fields': [
+ {'t': 'password', 'v': 't0pS3cret'}
+ ]
+ }]
+ }
+ }
+ },
+ {
+ 'vault_name': 'Acme Logins',
+ 'queries': [
+ '864201357'
+ ],
+ 'output': {
+ 'uuid': '864201357',
+ 'vaultUuid': '1357',
+ 'overview': {
+ 'title': 'Mock Something'
+ },
+ 'details': {
+ 'fields': [
+ {
+ 'value': 'jbond@mi6.gov.uk',
+ 'name': 'emailAddress'
+ },
+ {
+ 'name': 'password',
+ 'value': 'vauxhall'
+ },
+ {},
+ ]
+ }
+ }
+ },
+]
+
+
+def get_mock_query_generator(require_field=None):
+ def _process_field(field, section_title=None):
+ field_name = field.get('name', field.get('t', ''))
+ field_value = field.get('value', field.get('v', ''))
+
+ if require_field is None or field_name == require_field:
+ return entry, query, section_title, field_name, field_value
+
+ for entry in MOCK_ENTRIES:
+ for query in entry['queries']:
+ for field in entry['output']['details'].get('fields', []):
+ fixture = _process_field(field)
+ if fixture:
+ yield fixture
+ for section in entry['output']['details'].get('sections', []):
+ for field in section['fields']:
+ fixture = _process_field(field, section['title'])
+ if fixture:
+ yield fixture
+
+
+def get_one_mock_query(require_field=None):
+ generator = get_mock_query_generator(require_field)
+ return next(generator)
+
+
+class MockOnePass(OnePass):
+
+ _mock_logged_out = False
+ _mock_timed_out = False
+
+ def _lookup_mock_entry(self, key, vault=None):
+ for entry in MOCK_ENTRIES:
+ if vault is not None and vault.lower() != entry['vault_name'].lower() and vault.lower() != entry['output']['vaultUuid'].lower():
+ continue
+
+ match_fields = [
+ entry['output']['uuid'],
+ entry['output']['overview']['title']
+ ]
+
+ # Note that exactly how 1Password matches on domains in non-trivial cases is neither documented
+ # nor obvious, so this may not precisely match the real behavior.
+ urls = entry['output']['overview'].get('URLs')
+ if urls is not None:
+ match_fields += [urlparse(url['u']).netloc for url in urls]
+
+ if key in match_fields:
+ return entry['output']
+
+ def _run(self, args, expected_rc=0, command_input=None, ignore_errors=False):
+ parser = ArgumentParser()
+
+ command_parser = parser.add_subparsers(dest='command')
+
+ get_parser = command_parser.add_parser('get')
+ get_options = ArgumentParser(add_help=False)
+ get_options.add_argument('--vault')
+ get_type_parser = get_parser.add_subparsers(dest='object_type')
+ get_type_parser.add_parser('account', parents=[get_options])
+ get_item_parser = get_type_parser.add_parser('item', parents=[get_options])
+ get_item_parser.add_argument('item_id')
+
+ args = parser.parse_args(args)
+
+ def mock_exit(output='', error='', rc=0):
+ if rc != expected_rc:
+ raise AnsibleError(error)
+ if error != '':
+ now = datetime.date.today()
+ error = '[LOG] {0} (ERROR) {1}'.format(now.strftime('%Y/%m/%d %H:$M:$S'), error)
+ return rc, output, error
+
+ if args.command == 'get':
+ if self._mock_logged_out:
+ return mock_exit(error='You are not currently signed in. Please run `op signin --help` for instructions', rc=1)
+
+ if self._mock_timed_out:
+ return mock_exit(error='401: Authentication required.', rc=1)
+
+ if args.object_type == 'item':
+ mock_entry = self._lookup_mock_entry(args.item_id, args.vault)
+
+ if mock_entry is None:
+ return mock_exit(error='Item {0} not found'.format(args.item_id))
+
+ return mock_exit(output=json.dumps(mock_entry))
+
+ if args.object_type == 'account':
+ # Since we don't actually ever use this output, don't bother mocking output.
+ return mock_exit()
+
+ raise AnsibleError('Unsupported command string passed to OnePass mock: {0}'.format(args))
+
+
+class LoggedOutMockOnePass(MockOnePass):
+
+ _mock_logged_out = True
+
+
+class TimedOutMockOnePass(MockOnePass):
+
+ _mock_timed_out = True
+
+
+class TestOnePass(unittest.TestCase):
+
+ def test_onepassword_cli_path(self):
+ op = MockOnePass(path='/dev/null')
+ self.assertEqual('/dev/null', op.cli_path)
+
+ def test_onepassword_logged_in(self):
+ op = MockOnePass()
+ try:
+ op.assert_logged_in()
+ except Exception:
+ self.fail()
+
+ def test_onepassword_logged_out(self):
+ op = LoggedOutMockOnePass()
+ with self.assertRaises(AnsibleError):
+ op.assert_logged_in()
+
+ def test_onepassword_timed_out(self):
+ op = TimedOutMockOnePass()
+ with self.assertRaises(AnsibleError):
+ op.assert_logged_in()
+
+ def test_onepassword_get(self):
+ op = MockOnePass()
+ op.logged_in = True
+ query_generator = get_mock_query_generator()
+ for dummy, query, dummy, field_name, field_value in query_generator:
+ self.assertEqual(field_value, op.get_field(query, field_name))
+
+ def test_onepassword_get_raw(self):
+ op = MockOnePass()
+ op.logged_in = True
+ for entry in MOCK_ENTRIES:
+ for query in entry['queries']:
+ self.assertEqual(json.dumps(entry['output']), op.get_raw(query))
+
+ def test_onepassword_get_not_found(self):
+ op = MockOnePass()
+ op.logged_in = True
+ self.assertEqual('', op.get_field('a fake query', 'a fake field'))
+
+ def test_onepassword_get_with_section(self):
+ op = MockOnePass()
+ op.logged_in = True
+ dummy, query, section_title, field_name, field_value = get_one_mock_query()
+ self.assertEqual(field_value, op.get_field(query, field_name, section=section_title))
+
+ def test_onepassword_get_with_vault(self):
+ op = MockOnePass()
+ op.logged_in = True
+ entry, query, dummy, field_name, field_value = get_one_mock_query()
+ for vault_query in [entry['vault_name'], entry['output']['vaultUuid']]:
+ self.assertEqual(field_value, op.get_field(query, field_name, vault=vault_query))
+
+ def test_onepassword_get_with_wrong_vault(self):
+ op = MockOnePass()
+ op.logged_in = True
+ dummy, query, dummy, field_name, dummy = get_one_mock_query()
+ self.assertEqual('', op.get_field(query, field_name, vault='a fake vault'))
+
+ def test_onepassword_get_diff_case(self):
+ op = MockOnePass()
+ op.logged_in = True
+ entry, query, section_title, field_name, field_value = get_one_mock_query()
+ self.assertEqual(
+ field_value,
+ op.get_field(
+ query,
+ field_name.upper(),
+ vault=entry['vault_name'].upper(),
+ section=section_title.upper()
+ )
+ )
+
+
+@patch('ansible_collections.community.general.plugins.lookup.onepassword.OnePass', MockOnePass)
+class TestLookupModule(unittest.TestCase):
+
+ def test_onepassword_plugin_multiple(self):
+ lookup_plugin = LookupModule()
+
+ entry = MOCK_ENTRIES[0]
+ field = entry['output']['details']['sections'][0]['fields'][0]
+
+ self.assertEqual(
+ [field['v']] * len(entry['queries']),
+ lookup_plugin.run(entry['queries'], field=field['t'])
+ )
+
+ def test_onepassword_plugin_default_field(self):
+ lookup_plugin = LookupModule()
+
+ dummy, query, dummy, dummy, field_value = get_one_mock_query('password')
+ self.assertEqual([field_value], lookup_plugin.run([query]))
+
+
+@patch('ansible_collections.community.general.plugins.lookup.onepassword_raw.OnePass', MockOnePass)
+class TestOnePasswordRawLookup(unittest.TestCase):
+
+ def test_onepassword_raw_plugin_multiple(self):
+ raw_lookup_plugin = OnePasswordRawLookup()
+
+ entry = MOCK_ENTRIES[0]
+ raw_value = entry['output']
+
+ self.assertEqual(
+ [raw_value] * len(entry['queries']),
+ raw_lookup_plugin.run(entry['queries'])
+ )
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/lookup/test_tss.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/lookup/test_tss.py
new file mode 100644
index 00000000..cca2f6ff
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/lookup/test_tss.py
@@ -0,0 +1,43 @@
+# -*- coding: utf-8 -*-
+# (c) 2020, Adam Migus <adam@migus.org>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat.unittest import TestCase
+from ansible_collections.community.general.tests.unit.compat.mock import (
+ patch,
+ MagicMock,
+)
+from ansible_collections.community.general.plugins.lookup import tss
+from ansible.plugins.loader import lookup_loader
+
+
+class MockSecretServer(MagicMock):
+ RESPONSE = '{"foo": "bar"}'
+
+ def get_secret_json(self, path):
+ return self.RESPONSE
+
+
+class TestLookupModule(TestCase):
+ def setUp(self):
+ tss.sdk_is_missing = False
+ self.lookup = lookup_loader.get("community.general.tss")
+
+ @patch(
+ "ansible_collections.community.general.plugins.lookup.tss.LookupModule.Client",
+ MockSecretServer(),
+ )
+ def test_get_secret_json(self):
+ self.assertListEqual(
+ [MockSecretServer.RESPONSE],
+ self.lookup.run(
+ [1],
+ [],
+ **{"base_url": "dummy", "username": "dummy", "password": "dummy", }
+ ),
+ )
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/cloud/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/cloud/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/cloud/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/cloud/test_backoff.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/cloud/test_backoff.py
new file mode 100644
index 00000000..5d0438b7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/cloud/test_backoff.py
@@ -0,0 +1,52 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import random
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.plugins.module_utils.cloud import _exponential_backoff, \
+ _full_jitter_backoff
+
+
+class ExponentialBackoffStrategyTestCase(unittest.TestCase):
+ def test_no_retries(self):
+ strategy = _exponential_backoff(retries=0)
+ result = list(strategy())
+ self.assertEqual(result, [], 'list should be empty')
+
+ def test_exponential_backoff(self):
+ strategy = _exponential_backoff(retries=5, delay=1, backoff=2)
+ result = list(strategy())
+ self.assertEqual(result, [1, 2, 4, 8, 16])
+
+ def test_max_delay(self):
+ strategy = _exponential_backoff(retries=7, delay=1, backoff=2, max_delay=60)
+ result = list(strategy())
+ self.assertEqual(result, [1, 2, 4, 8, 16, 32, 60])
+
+ def test_max_delay_none(self):
+ strategy = _exponential_backoff(retries=7, delay=1, backoff=2, max_delay=None)
+ result = list(strategy())
+ self.assertEqual(result, [1, 2, 4, 8, 16, 32, 64])
+
+
+class FullJitterBackoffStrategyTestCase(unittest.TestCase):
+ def test_no_retries(self):
+ strategy = _full_jitter_backoff(retries=0)
+ result = list(strategy())
+ self.assertEqual(result, [], 'list should be empty')
+
+ def test_full_jitter(self):
+ retries = 5
+ seed = 1
+
+ r = random.Random(seed)
+ expected = [r.randint(0, 2**i) for i in range(0, retries)]
+
+ strategy = _full_jitter_backoff(
+ retries=retries, delay=1, _random=random.Random(seed))
+ result = list(strategy())
+
+ self.assertEqual(result, expected)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/conftest.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/conftest.py
new file mode 100644
index 00000000..8bc13c4d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/conftest.py
@@ -0,0 +1,72 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import sys
+from io import BytesIO
+
+import pytest
+
+import ansible.module_utils.basic
+from ansible.module_utils.six import PY3, string_types
+from ansible.module_utils._text import to_bytes
+from ansible.module_utils.common._collections_compat import MutableMapping
+
+
+@pytest.fixture
+def stdin(mocker, request):
+ old_args = ansible.module_utils.basic._ANSIBLE_ARGS
+ ansible.module_utils.basic._ANSIBLE_ARGS = None
+ old_argv = sys.argv
+ sys.argv = ['ansible_unittest']
+
+ if isinstance(request.param, string_types):
+ args = request.param
+ elif isinstance(request.param, MutableMapping):
+ if 'ANSIBLE_MODULE_ARGS' not in request.param:
+ request.param = {'ANSIBLE_MODULE_ARGS': request.param}
+ if '_ansible_remote_tmp' not in request.param['ANSIBLE_MODULE_ARGS']:
+ request.param['ANSIBLE_MODULE_ARGS']['_ansible_remote_tmp'] = '/tmp'
+ if '_ansible_keep_remote_files' not in request.param['ANSIBLE_MODULE_ARGS']:
+ request.param['ANSIBLE_MODULE_ARGS']['_ansible_keep_remote_files'] = False
+ args = json.dumps(request.param)
+ else:
+ raise Exception('Malformed data to the stdin pytest fixture')
+
+ fake_stdin = BytesIO(to_bytes(args, errors='surrogate_or_strict'))
+ if PY3:
+ mocker.patch('ansible.module_utils.basic.sys.stdin', mocker.MagicMock())
+ mocker.patch('ansible.module_utils.basic.sys.stdin.buffer', fake_stdin)
+ else:
+ mocker.patch('ansible.module_utils.basic.sys.stdin', fake_stdin)
+
+ yield fake_stdin
+
+ ansible.module_utils.basic._ANSIBLE_ARGS = old_args
+ sys.argv = old_argv
+
+
+@pytest.fixture
+def am(stdin, request):
+ old_args = ansible.module_utils.basic._ANSIBLE_ARGS
+ ansible.module_utils.basic._ANSIBLE_ARGS = None
+ old_argv = sys.argv
+ sys.argv = ['ansible_unittest']
+
+ argspec = {}
+ if hasattr(request, 'param'):
+ if isinstance(request.param, dict):
+ argspec = request.param
+
+ am = ansible.module_utils.basic.AnsibleModule(
+ argument_spec=argspec,
+ )
+ am._name = 'ansible_unittest'
+
+ yield am
+
+ ansible.module_utils.basic._ANSIBLE_ARGS = old_args
+ sys.argv = old_argv
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/docker/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/docker/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/docker/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/docker/test_common.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/docker/test_common.py
new file mode 100644
index 00000000..35d55729
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/docker/test_common.py
@@ -0,0 +1,518 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible_collections.community.general.plugins.module_utils.docker.common import (
+ compare_dict_allow_more_present,
+ compare_generic,
+ convert_duration_to_nanosecond,
+ parse_healthcheck
+)
+
+DICT_ALLOW_MORE_PRESENT = (
+ {
+ 'av': {},
+ 'bv': {'a': 1},
+ 'result': True
+ },
+ {
+ 'av': {'a': 1},
+ 'bv': {'a': 1, 'b': 2},
+ 'result': True
+ },
+ {
+ 'av': {'a': 1},
+ 'bv': {'b': 2},
+ 'result': False
+ },
+ {
+ 'av': {'a': 1},
+ 'bv': {'a': None, 'b': 1},
+ 'result': False
+ },
+ {
+ 'av': {'a': None},
+ 'bv': {'b': 1},
+ 'result': False
+ },
+)
+
+COMPARE_GENERIC = [
+ ########################################################################################
+ # value
+ {
+ 'a': 1,
+ 'b': 2,
+ 'method': 'strict',
+ 'type': 'value',
+ 'result': False
+ },
+ {
+ 'a': 'hello',
+ 'b': 'hello',
+ 'method': 'strict',
+ 'type': 'value',
+ 'result': True
+ },
+ {
+ 'a': None,
+ 'b': 'hello',
+ 'method': 'strict',
+ 'type': 'value',
+ 'result': False
+ },
+ {
+ 'a': None,
+ 'b': None,
+ 'method': 'strict',
+ 'type': 'value',
+ 'result': True
+ },
+ {
+ 'a': 1,
+ 'b': 2,
+ 'method': 'ignore',
+ 'type': 'value',
+ 'result': True
+ },
+ {
+ 'a': None,
+ 'b': 2,
+ 'method': 'ignore',
+ 'type': 'value',
+ 'result': True
+ },
+ ########################################################################################
+ # list
+ {
+ 'a': [
+ 'x',
+ ],
+ 'b': [
+ 'y',
+ ],
+ 'method': 'strict',
+ 'type': 'list',
+ 'result': False
+ },
+ {
+ 'a': [
+ 'x',
+ ],
+ 'b': [
+ 'x',
+ 'x',
+ ],
+ 'method': 'strict',
+ 'type': 'list',
+ 'result': False
+ },
+ {
+ 'a': [
+ 'x',
+ 'y',
+ ],
+ 'b': [
+ 'x',
+ 'y',
+ ],
+ 'method': 'strict',
+ 'type': 'list',
+ 'result': True
+ },
+ {
+ 'a': [
+ 'x',
+ 'y',
+ ],
+ 'b': [
+ 'y',
+ 'x',
+ ],
+ 'method': 'strict',
+ 'type': 'list',
+ 'result': False
+ },
+ {
+ 'a': [
+ 'x',
+ 'y',
+ ],
+ 'b': [
+ 'x',
+ ],
+ 'method': 'allow_more_present',
+ 'type': 'list',
+ 'result': False
+ },
+ {
+ 'a': [
+ 'x',
+ ],
+ 'b': [
+ 'x',
+ 'y',
+ ],
+ 'method': 'allow_more_present',
+ 'type': 'list',
+ 'result': True
+ },
+ {
+ 'a': [
+ 'x',
+ 'x',
+ 'y',
+ ],
+ 'b': [
+ 'x',
+ 'y',
+ ],
+ 'method': 'allow_more_present',
+ 'type': 'list',
+ 'result': False
+ },
+ {
+ 'a': [
+ 'x',
+ 'z',
+ ],
+ 'b': [
+ 'x',
+ 'y',
+ 'x',
+ 'z',
+ ],
+ 'method': 'allow_more_present',
+ 'type': 'list',
+ 'result': True
+ },
+ {
+ 'a': [
+ 'x',
+ 'y',
+ ],
+ 'b': [
+ 'y',
+ 'x',
+ ],
+ 'method': 'ignore',
+ 'type': 'list',
+ 'result': True
+ },
+ ########################################################################################
+ # set
+ {
+ 'a': [
+ 'x',
+ ],
+ 'b': [
+ 'y',
+ ],
+ 'method': 'strict',
+ 'type': 'set',
+ 'result': False
+ },
+ {
+ 'a': [
+ 'x',
+ ],
+ 'b': [
+ 'x',
+ 'x',
+ ],
+ 'method': 'strict',
+ 'type': 'set',
+ 'result': True
+ },
+ {
+ 'a': [
+ 'x',
+ 'y',
+ ],
+ 'b': [
+ 'x',
+ 'y',
+ ],
+ 'method': 'strict',
+ 'type': 'set',
+ 'result': True
+ },
+ {
+ 'a': [
+ 'x',
+ 'y',
+ ],
+ 'b': [
+ 'y',
+ 'x',
+ ],
+ 'method': 'strict',
+ 'type': 'set',
+ 'result': True
+ },
+ {
+ 'a': [
+ 'x',
+ 'y',
+ ],
+ 'b': [
+ 'x',
+ ],
+ 'method': 'allow_more_present',
+ 'type': 'set',
+ 'result': False
+ },
+ {
+ 'a': [
+ 'x',
+ ],
+ 'b': [
+ 'x',
+ 'y',
+ ],
+ 'method': 'allow_more_present',
+ 'type': 'set',
+ 'result': True
+ },
+ {
+ 'a': [
+ 'x',
+ 'x',
+ 'y',
+ ],
+ 'b': [
+ 'x',
+ 'y',
+ ],
+ 'method': 'allow_more_present',
+ 'type': 'set',
+ 'result': True
+ },
+ {
+ 'a': [
+ 'x',
+ 'z',
+ ],
+ 'b': [
+ 'x',
+ 'y',
+ 'x',
+ 'z',
+ ],
+ 'method': 'allow_more_present',
+ 'type': 'set',
+ 'result': True
+ },
+ {
+ 'a': [
+ 'x',
+ 'a',
+ ],
+ 'b': [
+ 'y',
+ 'z',
+ ],
+ 'method': 'ignore',
+ 'type': 'set',
+ 'result': True
+ },
+ ########################################################################################
+ # set(dict)
+ {
+ 'a': [
+ {'x': 1},
+ ],
+ 'b': [
+ {'y': 1},
+ ],
+ 'method': 'strict',
+ 'type': 'set(dict)',
+ 'result': False
+ },
+ {
+ 'a': [
+ {'x': 1},
+ ],
+ 'b': [
+ {'x': 1},
+ ],
+ 'method': 'strict',
+ 'type': 'set(dict)',
+ 'result': True
+ },
+ {
+ 'a': [
+ {'x': 1},
+ ],
+ 'b': [
+ {'x': 1, 'y': 2},
+ ],
+ 'method': 'strict',
+ 'type': 'set(dict)',
+ 'result': True
+ },
+ {
+ 'a': [
+ {'x': 1},
+ {'x': 2, 'y': 3},
+ ],
+ 'b': [
+ {'x': 1},
+ {'x': 2, 'y': 3},
+ ],
+ 'method': 'strict',
+ 'type': 'set(dict)',
+ 'result': True
+ },
+ {
+ 'a': [
+ {'x': 1},
+ ],
+ 'b': [
+ {'x': 1, 'z': 2},
+ {'x': 2, 'y': 3},
+ ],
+ 'method': 'allow_more_present',
+ 'type': 'set(dict)',
+ 'result': True
+ },
+ {
+ 'a': [
+ {'x': 1, 'y': 2},
+ ],
+ 'b': [
+ {'x': 1},
+ {'x': 2, 'y': 3},
+ ],
+ 'method': 'allow_more_present',
+ 'type': 'set(dict)',
+ 'result': False
+ },
+ {
+ 'a': [
+ {'x': 1, 'y': 3},
+ ],
+ 'b': [
+ {'x': 1},
+ {'x': 1, 'y': 3, 'z': 4},
+ ],
+ 'method': 'allow_more_present',
+ 'type': 'set(dict)',
+ 'result': True
+ },
+ {
+ 'a': [
+ {'x': 1},
+ {'x': 2, 'y': 3},
+ ],
+ 'b': [
+ {'x': 1},
+ ],
+ 'method': 'ignore',
+ 'type': 'set(dict)',
+ 'result': True
+ },
+ ########################################################################################
+ # dict
+ {
+ 'a': {'x': 1},
+ 'b': {'y': 1},
+ 'method': 'strict',
+ 'type': 'dict',
+ 'result': False
+ },
+ {
+ 'a': {'x': 1},
+ 'b': {'x': 1, 'y': 2},
+ 'method': 'strict',
+ 'type': 'dict',
+ 'result': False
+ },
+ {
+ 'a': {'x': 1},
+ 'b': {'x': 1},
+ 'method': 'strict',
+ 'type': 'dict',
+ 'result': True
+ },
+ {
+ 'a': {'x': 1, 'z': 2},
+ 'b': {'x': 1, 'y': 2},
+ 'method': 'strict',
+ 'type': 'dict',
+ 'result': False
+ },
+ {
+ 'a': {'x': 1, 'z': 2},
+ 'b': {'x': 1, 'y': 2},
+ 'method': 'ignore',
+ 'type': 'dict',
+ 'result': True
+ },
+] + [{
+ 'a': entry['av'],
+ 'b': entry['bv'],
+ 'method': 'allow_more_present',
+ 'type': 'dict',
+ 'result': entry['result']
+} for entry in DICT_ALLOW_MORE_PRESENT]
+
+
+@pytest.mark.parametrize("entry", DICT_ALLOW_MORE_PRESENT)
+def test_dict_allow_more_present(entry):
+ assert compare_dict_allow_more_present(entry['av'], entry['bv']) == entry['result']
+
+
+@pytest.mark.parametrize("entry", COMPARE_GENERIC)
+def test_compare_generic(entry):
+ assert compare_generic(entry['a'], entry['b'], entry['method'], entry['type']) == entry['result']
+
+
+def test_convert_duration_to_nanosecond():
+ nanoseconds = convert_duration_to_nanosecond('5s')
+ assert nanoseconds == 5000000000
+ nanoseconds = convert_duration_to_nanosecond('1m5s')
+ assert nanoseconds == 65000000000
+ with pytest.raises(ValueError):
+ convert_duration_to_nanosecond([1, 2, 3])
+ with pytest.raises(ValueError):
+ convert_duration_to_nanosecond('10x')
+
+
+def test_parse_healthcheck():
+ result, disabled = parse_healthcheck({
+ 'test': 'sleep 1',
+ 'interval': '1s',
+ })
+ assert disabled is False
+ assert result == {
+ 'test': ['CMD-SHELL', 'sleep 1'],
+ 'interval': 1000000000
+ }
+
+ result, disabled = parse_healthcheck({
+ 'test': ['NONE'],
+ })
+ assert result is None
+ assert disabled
+
+ result, disabled = parse_healthcheck({
+ 'test': 'sleep 1',
+ 'interval': '1s423ms'
+ })
+ assert result == {
+ 'test': ['CMD-SHELL', 'sleep 1'],
+ 'interval': 1423000000
+ }
+ assert disabled is False
+
+ result, disabled = parse_healthcheck({
+ 'test': 'sleep 1',
+ 'interval': '1h1m2s3ms4us'
+ })
+ assert result == {
+ 'test': ['CMD-SHELL', 'sleep 1'],
+ 'interval': 3662003004000
+ }
+ assert disabled is False
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/gcp/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/gcp/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/gcp/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/gcp/test_auth.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/gcp/test_auth.py
new file mode 100644
index 00000000..6ca8c86f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/gcp/test_auth.py
@@ -0,0 +1,162 @@
+# -*- coding: utf-8 -*-
+# (c) 2016, Tom Melendez (@supertom) <tom@supertom.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+import pytest
+
+from ansible_collections.community.general.tests.unit.compat import mock, unittest
+from ansible_collections.community.general.plugins.module_utils.gcp import (_get_gcp_ansible_credentials, _get_gcp_credentials, _get_gcp_environ_var,
+ _get_gcp_environment_credentials,
+ _validate_credentials_file)
+
+# Fake data/function used for testing
+fake_env_data = {'GCE_EMAIL': 'gce-email'}
+
+
+def fake_get_gcp_environ_var(var_name, default_value):
+ if var_name not in fake_env_data:
+ return default_value
+ else:
+ return fake_env_data[var_name]
+
+# Fake AnsibleModule for use in tests
+
+
+class FakeModule(object):
+ class Params():
+ data = {}
+
+ def get(self, key, alt=None):
+ if key in self.data:
+ return self.data[key]
+ else:
+ return alt
+
+ def __init__(self, data=None):
+ data = {} if data is None else data
+
+ self.params = FakeModule.Params()
+ self.params.data = data
+
+ def fail_json(self, **kwargs):
+ raise ValueError("fail_json")
+
+ def deprecate(self, **kwargs):
+ return None
+
+
+class GCPAuthTestCase(unittest.TestCase):
+ """Tests to verify different Auth mechanisms."""
+
+ def setup_method(self, method):
+ global fake_env_data
+ fake_env_data = {'GCE_EMAIL': 'gce-email'}
+
+ def test_get_gcp_ansible_credentials(self):
+ input_data = {'service_account_email': 'mysa',
+ 'credentials_file': 'path-to-file.json',
+ 'project_id': 'my-cool-project'}
+
+ module = FakeModule(input_data)
+ actual = _get_gcp_ansible_credentials(module)
+ expected = tuple(input_data.values())
+ self.assertEqual(sorted(expected), sorted(actual))
+
+ def test_get_gcp_environ_var(self):
+ # Chose not to mock this so we could really verify that it
+ # works as expected.
+ existing_var_name = 'gcp_ansible_auth_test_54321'
+ non_existing_var_name = 'doesnt_exist_gcp_ansible_auth_test_12345'
+ os.environ[existing_var_name] = 'foobar'
+ self.assertEqual('foobar', _get_gcp_environ_var(
+ existing_var_name, None))
+ del os.environ[existing_var_name]
+ self.assertEqual('default_value', _get_gcp_environ_var(
+ non_existing_var_name, 'default_value'))
+
+ def test_validate_credentials_file(self):
+ # TODO(supertom): Only dealing with p12 here, check the other states
+ # of this function
+ module = FakeModule()
+ with mock.patch('ansible_collections.community.general.plugins.module_utils.gcp.open',
+ mock.mock_open(read_data='foobar'), create=True):
+ # pem condition, warning is suppressed with the return_value
+ credentials_file = '/foopath/pem.pem'
+ with self.assertRaises(ValueError):
+ _validate_credentials_file(module,
+ credentials_file=credentials_file,
+ require_valid_json=False,
+ check_libcloud=False)
+
+ @mock.patch('ansible_collections.community.general.plugins.module_utils.gcp._get_gcp_environ_var',
+ side_effect=fake_get_gcp_environ_var)
+ def test_get_gcp_environment_credentials(self, mockobj):
+ global fake_env_data
+
+ actual = _get_gcp_environment_credentials(None, None, None)
+ expected = tuple(['gce-email', None, None])
+ self.assertEqual(expected, actual)
+
+ fake_env_data = {'GCE_PEM_FILE_PATH': '/path/to/pem.pem'}
+ expected = tuple([None, '/path/to/pem.pem', None])
+ actual = _get_gcp_environment_credentials(None, None, None)
+ self.assertEqual(expected, actual)
+
+ # pem and creds are set, expect creds
+ fake_env_data = {'GCE_PEM_FILE_PATH': '/path/to/pem.pem',
+ 'GCE_CREDENTIALS_FILE_PATH': '/path/to/creds.json'}
+ expected = tuple([None, '/path/to/creds.json', None])
+ actual = _get_gcp_environment_credentials(None, None, None)
+ self.assertEqual(expected, actual)
+
+ # expect GOOGLE_APPLICATION_CREDENTIALS over PEM
+ fake_env_data = {'GCE_PEM_FILE_PATH': '/path/to/pem.pem',
+ 'GOOGLE_APPLICATION_CREDENTIALS': '/path/to/appcreds.json'}
+ expected = tuple([None, '/path/to/appcreds.json', None])
+ actual = _get_gcp_environment_credentials(None, None, None)
+ self.assertEqual(expected, actual)
+
+ # project tests
+ fake_env_data = {'GCE_PROJECT': 'my-project'}
+ expected = tuple([None, None, 'my-project'])
+ actual = _get_gcp_environment_credentials(None, None, None)
+ self.assertEqual(expected, actual)
+
+ fake_env_data = {'GOOGLE_CLOUD_PROJECT': 'my-cloud-project'}
+ expected = tuple([None, None, 'my-cloud-project'])
+ actual = _get_gcp_environment_credentials(None, None, None)
+ self.assertEqual(expected, actual)
+
+ # data passed in, picking up project id only
+ fake_env_data = {'GOOGLE_CLOUD_PROJECT': 'my-project'}
+ expected = tuple(['my-sa-email', '/path/to/creds.json', 'my-project'])
+ actual = _get_gcp_environment_credentials(
+ 'my-sa-email', '/path/to/creds.json', None)
+ self.assertEqual(expected, actual)
+
+ @mock.patch('ansible_collections.community.general.plugins.module_utils.gcp._get_gcp_environ_var',
+ side_effect=fake_get_gcp_environ_var)
+ def test_get_gcp_credentials(self, mockobj):
+ global fake_env_data
+
+ fake_env_data = {}
+ module = FakeModule()
+ module.params.data = {}
+ # Nothing is set, calls fail_json
+ with pytest.raises(ValueError):
+ _get_gcp_credentials(module)
+
+ # project_id (only) is set from Ansible params.
+ module.params.data['project_id'] = 'my-project'
+ actual = _get_gcp_credentials(
+ module, require_valid_json=True, check_libcloud=False)
+ expected = {'service_account_email': '',
+ 'project_id': 'my-project',
+ 'credentials_file': ''}
+ self.assertEqual(expected, actual)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/gcp/test_utils.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/gcp/test_utils.py
new file mode 100644
index 00000000..c05f6768
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/gcp/test_utils.py
@@ -0,0 +1,361 @@
+# -*- coding: utf-8 -*-
+# (c) 2016, Tom Melendez <tom@supertom.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import mock, unittest
+from ansible_collections.community.general.plugins.module_utils.gcp import check_min_pkg_version, GCPUtils, GCPInvalidURLError
+
+
+def build_distribution(version):
+ obj = mock.MagicMock()
+ obj.version = '0.5.0'
+ return obj
+
+
+class GCPUtilsTestCase(unittest.TestCase):
+ params_dict = {
+ 'url_map_name': 'foo_url_map_name',
+ 'description': 'foo_url_map description',
+ 'host_rules': [
+ {
+ 'description': 'host rules description',
+ 'hosts': [
+ 'www.example.com',
+ 'www2.example.com'
+ ],
+ 'path_matcher': 'host_rules_path_matcher'
+ }
+ ],
+ 'path_matchers': [
+ {
+ 'name': 'path_matcher_one',
+ 'description': 'path matcher one',
+ 'defaultService': 'bes-pathmatcher-one-default',
+ 'pathRules': [
+ {
+ 'service': 'my-one-bes',
+ 'paths': [
+ '/',
+ '/aboutus'
+ ]
+ }
+ ]
+ },
+ {
+ 'name': 'path_matcher_two',
+ 'description': 'path matcher two',
+ 'defaultService': 'bes-pathmatcher-two-default',
+ 'pathRules': [
+ {
+ 'service': 'my-two-bes',
+ 'paths': [
+ '/webapp',
+ '/graphs'
+ ]
+ }
+ ]
+ }
+ ]
+ }
+
+ @mock.patch("pkg_resources.get_distribution", side_effect=build_distribution)
+ def test_check_minimum_pkg_version(self, mockobj):
+ self.assertTrue(check_min_pkg_version('foobar', '0.4.0'))
+ self.assertTrue(check_min_pkg_version('foobar', '0.5.0'))
+ self.assertFalse(check_min_pkg_version('foobar', '0.6.0'))
+
+ def test_parse_gcp_url(self):
+ # region, resource, entity, method
+ input_url = 'https://www.googleapis.com/compute/v1/projects/myproject/regions/us-east1/instanceGroupManagers/my-mig/recreateInstances'
+ actual = GCPUtils.parse_gcp_url(input_url)
+ self.assertEqual('compute', actual['service'])
+ self.assertEqual('v1', actual['api_version'])
+ self.assertEqual('myproject', actual['project'])
+ self.assertEqual('us-east1', actual['region'])
+ self.assertEqual('instanceGroupManagers', actual['resource_name'])
+ self.assertEqual('my-mig', actual['entity_name'])
+ self.assertEqual('recreateInstances', actual['method_name'])
+
+ # zone, resource, entity, method
+ input_url = 'https://www.googleapis.com/compute/v1/projects/myproject/zones/us-east1-c/instanceGroupManagers/my-mig/recreateInstances'
+ actual = GCPUtils.parse_gcp_url(input_url)
+ self.assertEqual('compute', actual['service'])
+ self.assertEqual('v1', actual['api_version'])
+ self.assertEqual('myproject', actual['project'])
+ self.assertEqual('us-east1-c', actual['zone'])
+ self.assertEqual('instanceGroupManagers', actual['resource_name'])
+ self.assertEqual('my-mig', actual['entity_name'])
+ self.assertEqual('recreateInstances', actual['method_name'])
+
+ # global, resource
+ input_url = 'https://www.googleapis.com/compute/v1/projects/myproject/global/urlMaps'
+ actual = GCPUtils.parse_gcp_url(input_url)
+ self.assertEqual('compute', actual['service'])
+ self.assertEqual('v1', actual['api_version'])
+ self.assertEqual('myproject', actual['project'])
+ self.assertTrue('global' in actual)
+ self.assertTrue(actual['global'])
+ self.assertEqual('urlMaps', actual['resource_name'])
+
+ # global, resource, entity
+ input_url = 'https://www.googleapis.com/compute/v1/projects/myproject/global/urlMaps/my-url-map'
+ actual = GCPUtils.parse_gcp_url(input_url)
+ self.assertEqual('myproject', actual['project'])
+ self.assertTrue('global' in actual)
+ self.assertTrue(actual['global'])
+ self.assertEqual('v1', actual['api_version'])
+ self.assertEqual('compute', actual['service'])
+
+ # global URL, resource, entity, method_name
+ input_url = 'https://www.googleapis.com/compute/v1/projects/myproject/global/backendServices/mybackendservice/getHealth'
+ actual = GCPUtils.parse_gcp_url(input_url)
+ self.assertEqual('compute', actual['service'])
+ self.assertEqual('v1', actual['api_version'])
+ self.assertEqual('myproject', actual['project'])
+ self.assertTrue('global' in actual)
+ self.assertTrue(actual['global'])
+ self.assertEqual('backendServices', actual['resource_name'])
+ self.assertEqual('mybackendservice', actual['entity_name'])
+ self.assertEqual('getHealth', actual['method_name'])
+
+ # no location in URL
+ input_url = 'https://www.googleapis.com/compute/v1/projects/myproject/targetHttpProxies/mytargetproxy/setUrlMap'
+ actual = GCPUtils.parse_gcp_url(input_url)
+ self.assertEqual('compute', actual['service'])
+ self.assertEqual('v1', actual['api_version'])
+ self.assertEqual('myproject', actual['project'])
+ self.assertFalse('global' in actual)
+ self.assertEqual('targetHttpProxies', actual['resource_name'])
+ self.assertEqual('mytargetproxy', actual['entity_name'])
+ self.assertEqual('setUrlMap', actual['method_name'])
+
+ input_url = 'https://www.googleapis.com/compute/v1/projects/myproject/targetHttpProxies/mytargetproxy'
+ actual = GCPUtils.parse_gcp_url(input_url)
+ self.assertEqual('compute', actual['service'])
+ self.assertEqual('v1', actual['api_version'])
+ self.assertEqual('myproject', actual['project'])
+ self.assertFalse('global' in actual)
+ self.assertEqual('targetHttpProxies', actual['resource_name'])
+ self.assertEqual('mytargetproxy', actual['entity_name'])
+
+ input_url = 'https://www.googleapis.com/compute/v1/projects/myproject/targetHttpProxies'
+ actual = GCPUtils.parse_gcp_url(input_url)
+ self.assertEqual('compute', actual['service'])
+ self.assertEqual('v1', actual['api_version'])
+ self.assertEqual('myproject', actual['project'])
+ self.assertFalse('global' in actual)
+ self.assertEqual('targetHttpProxies', actual['resource_name'])
+
+ # test exceptions
+ no_projects_input_url = 'https://www.googleapis.com/compute/v1/not-projects/myproject/global/backendServices/mybackendservice/getHealth'
+ no_resource_input_url = 'https://www.googleapis.com/compute/v1/not-projects/myproject/global'
+
+ no_resource_no_loc_input_url = 'https://www.googleapis.com/compute/v1/not-projects/myproject'
+
+ with self.assertRaises(GCPInvalidURLError) as cm:
+ GCPUtils.parse_gcp_url(no_projects_input_url)
+ self.assertTrue(cm.exception, GCPInvalidURLError)
+
+ with self.assertRaises(GCPInvalidURLError) as cm:
+ GCPUtils.parse_gcp_url(no_resource_input_url)
+ self.assertTrue(cm.exception, GCPInvalidURLError)
+
+ with self.assertRaises(GCPInvalidURLError) as cm:
+ GCPUtils.parse_gcp_url(no_resource_no_loc_input_url)
+ self.assertTrue(cm.exception, GCPInvalidURLError)
+
+ def test_params_to_gcp_dict(self):
+
+ expected = {
+ 'description': 'foo_url_map description',
+ 'hostRules': [
+ {
+ 'description': 'host rules description',
+ 'hosts': [
+ 'www.example.com',
+ 'www2.example.com'
+ ],
+ 'pathMatcher': 'host_rules_path_matcher'
+ }
+ ],
+ 'name': 'foo_url_map_name',
+ 'pathMatchers': [
+ {
+ 'defaultService': 'bes-pathmatcher-one-default',
+ 'description': 'path matcher one',
+ 'name': 'path_matcher_one',
+ 'pathRules': [
+ {
+ 'paths': [
+ '/',
+ '/aboutus'
+ ],
+ 'service': 'my-one-bes'
+ }
+ ]
+ },
+ {
+ 'defaultService': 'bes-pathmatcher-two-default',
+ 'description': 'path matcher two',
+ 'name': 'path_matcher_two',
+ 'pathRules': [
+ {
+ 'paths': [
+ '/webapp',
+ '/graphs'
+ ],
+ 'service': 'my-two-bes'
+ }
+ ]
+ }
+ ]
+ }
+
+ actual = GCPUtils.params_to_gcp_dict(self.params_dict, 'url_map_name')
+ self.assertEqual(expected, actual)
+
+ def test_get_gcp_resource_from_methodId(self):
+ input_data = 'compute.urlMaps.list'
+ actual = GCPUtils.get_gcp_resource_from_methodId(input_data)
+ self.assertEqual('urlMaps', actual)
+ input_data = None
+ actual = GCPUtils.get_gcp_resource_from_methodId(input_data)
+ self.assertFalse(actual)
+ input_data = 666
+ actual = GCPUtils.get_gcp_resource_from_methodId(input_data)
+ self.assertFalse(actual)
+
+ def test_get_entity_name_from_resource_name(self):
+ input_data = 'urlMaps'
+ actual = GCPUtils.get_entity_name_from_resource_name(input_data)
+ self.assertEqual('urlMap', actual)
+ input_data = 'targetHttpProxies'
+ actual = GCPUtils.get_entity_name_from_resource_name(input_data)
+ self.assertEqual('targetHttpProxy', actual)
+ input_data = 'globalForwardingRules'
+ actual = GCPUtils.get_entity_name_from_resource_name(input_data)
+ self.assertEqual('forwardingRule', actual)
+ input_data = ''
+ actual = GCPUtils.get_entity_name_from_resource_name(input_data)
+ self.assertEqual(None, actual)
+ input_data = 666
+ actual = GCPUtils.get_entity_name_from_resource_name(input_data)
+ self.assertEqual(None, actual)
+
+ def test_are_params_equal(self):
+ params1 = {'one': 1}
+ params2 = {'one': 1}
+ actual = GCPUtils.are_params_equal(params1, params2)
+ self.assertTrue(actual)
+
+ params1 = {'one': 1}
+ params2 = {'two': 2}
+ actual = GCPUtils.are_params_equal(params1, params2)
+ self.assertFalse(actual)
+
+ params1 = {'three': 3, 'two': 2, 'one': 1}
+ params2 = {'one': 1, 'two': 2, 'three': 3}
+ actual = GCPUtils.are_params_equal(params1, params2)
+ self.assertTrue(actual)
+
+ params1 = {
+ "creationTimestamp": "2017-04-21T11:19:20.718-07:00",
+ "defaultService": "https://www.googleapis.com/compute/v1/projects/myproject/global/backendServices/default-backend-service",
+ "description": "",
+ "fingerprint": "ickr_pwlZPU=",
+ "hostRules": [
+ {
+ "description": "",
+ "hosts": [
+ "*."
+ ],
+ "pathMatcher": "path-matcher-one"
+ }
+ ],
+ "id": "8566395781175047111",
+ "kind": "compute#urlMap",
+ "name": "newtesturlmap-foo",
+ "pathMatchers": [
+ {
+ "defaultService": "https://www.googleapis.com/compute/v1/projects/myproject/global/backendServices/bes-pathmatcher-one-default",
+ "description": "path matcher one",
+ "name": "path-matcher-one",
+ "pathRules": [
+ {
+ "paths": [
+ "/data",
+ "/aboutus"
+ ],
+ "service": "https://www.googleapis.com/compute/v1/projects/myproject/global/backendServices/my-one-bes"
+ }
+ ]
+ }
+ ],
+ "selfLink": "https://www.googleapis.com/compute/v1/projects/myproject/global/urlMaps/newtesturlmap-foo"
+ }
+ params2 = {
+ "defaultService": "https://www.googleapis.com/compute/v1/projects/myproject/global/backendServices/default-backend-service",
+ "hostRules": [
+ {
+ "description": "",
+ "hosts": [
+ "*."
+ ],
+ "pathMatcher": "path-matcher-one"
+ }
+ ],
+ "name": "newtesturlmap-foo",
+ "pathMatchers": [
+ {
+ "defaultService": "https://www.googleapis.com/compute/v1/projects/myproject/global/backendServices/bes-pathmatcher-one-default",
+ "description": "path matcher one",
+ "name": "path-matcher-one",
+ "pathRules": [
+ {
+ "paths": [
+ "/data",
+ "/aboutus"
+ ],
+ "service": "https://www.googleapis.com/compute/v1/projects/myproject/global/backendServices/my-one-bes"
+ }
+ ]
+ }
+ ],
+ }
+
+ # params1 has exclude fields, params2 doesn't. Should be equal
+ actual = GCPUtils.are_params_equal(params1, params2)
+ self.assertTrue(actual)
+
+ def test_filter_gcp_fields(self):
+ input_data = {
+ u'kind': u'compute#httpsHealthCheck',
+ u'description': u'',
+ u'timeoutSec': 5,
+ u'checkIntervalSec': 5,
+ u'port': 443,
+ u'healthyThreshold': 2,
+ u'host': u'',
+ u'requestPath': u'/',
+ u'unhealthyThreshold': 2,
+ u'creationTimestamp': u'2017-05-16T15:09:36.546-07:00',
+ u'id': u'8727093129334146639',
+ u'selfLink': u'https://www.googleapis.com/compute/v1/projects/myproject/global/httpsHealthChecks/myhealthcheck',
+ u'name': u'myhealthcheck'}
+
+ expected = {
+ 'name': 'myhealthcheck',
+ 'checkIntervalSec': 5,
+ 'port': 443,
+ 'unhealthyThreshold': 2,
+ 'healthyThreshold': 2,
+ 'host': '',
+ 'timeoutSec': 5,
+ 'requestPath': '/'}
+
+ actual = GCPUtils.filter_gcp_fields(input_data)
+ self.assertEqual(expected, actual)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/hwc/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/hwc/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/hwc/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/hwc/test_dict_comparison.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/hwc/test_dict_comparison.py
new file mode 100644
index 00000000..bcdfc4e2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/hwc/test_dict_comparison.py
@@ -0,0 +1,166 @@
+# -*- coding: utf-8 -*-
+# 2018.07.26 --- use DictComparison instead of GcpRequest
+#
+# (c) 2016, Tom Melendez <tom@supertom.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import are_different_dicts
+
+
+class HwcDictComparisonTestCase(unittest.TestCase):
+ def test_simple_no_difference(self):
+ value1 = {
+ 'foo': 'bar',
+ 'test': 'original'
+ }
+
+ self.assertFalse(are_different_dicts(value1, value1))
+
+ def test_simple_different(self):
+ value1 = {
+ 'foo': 'bar',
+ 'test': 'original'
+ }
+ value2 = {
+ 'foo': 'bar',
+ 'test': 'different'
+ }
+ value3 = {
+ 'test': 'original'
+ }
+
+ self.assertTrue(are_different_dicts(value1, value2))
+ self.assertTrue(are_different_dicts(value1, value3))
+ self.assertTrue(are_different_dicts(value2, value3))
+
+ def test_nested_dictionaries_no_difference(self):
+ value1 = {
+ 'foo': {
+ 'quiet': {
+ 'tree': 'test'
+ },
+ 'bar': 'baz'
+ },
+ 'test': 'original'
+ }
+
+ self.assertFalse(are_different_dicts(value1, value1))
+
+ def test_nested_dictionaries_with_difference(self):
+ value1 = {
+ 'foo': {
+ 'quiet': {
+ 'tree': 'test'
+ },
+ 'bar': 'baz'
+ },
+ 'test': 'original'
+ }
+ value2 = {
+ 'foo': {
+ 'quiet': {
+ 'tree': 'baz'
+ },
+ 'bar': 'hello'
+ },
+ 'test': 'original'
+ }
+ value3 = {
+ 'foo': {
+ 'quiet': {
+ 'tree': 'test'
+ },
+ 'bar': 'baz'
+ }
+ }
+
+ self.assertTrue(are_different_dicts(value1, value2))
+ self.assertTrue(are_different_dicts(value1, value3))
+ self.assertTrue(are_different_dicts(value2, value3))
+
+ def test_arrays_strings_no_difference(self):
+ value1 = {
+ 'foo': [
+ 'baz',
+ 'bar'
+ ]
+ }
+
+ self.assertFalse(are_different_dicts(value1, value1))
+
+ def test_arrays_strings_with_difference(self):
+ value1 = {
+ 'foo': [
+ 'baz',
+ 'bar',
+ ]
+ }
+
+ value2 = {
+ 'foo': [
+ 'baz',
+ 'hello'
+ ]
+ }
+ value3 = {
+ 'foo': [
+ 'bar',
+ ]
+ }
+
+ self.assertTrue(are_different_dicts(value1, value2))
+ self.assertTrue(are_different_dicts(value1, value3))
+ self.assertTrue(are_different_dicts(value2, value3))
+
+ def test_arrays_dicts_with_no_difference(self):
+ value1 = {
+ 'foo': [
+ {
+ 'test': 'value',
+ 'foo': 'bar'
+ },
+ {
+ 'different': 'dict'
+ }
+ ]
+ }
+
+ self.assertFalse(are_different_dicts(value1, value1))
+
+ def test_arrays_dicts_with_difference(self):
+ value1 = {
+ 'foo': [
+ {
+ 'test': 'value',
+ 'foo': 'bar'
+ },
+ {
+ 'different': 'dict'
+ }
+ ]
+ }
+ value2 = {
+ 'foo': [
+ {
+ 'test': 'value2',
+ 'foo': 'bar2'
+ },
+ ]
+ }
+ value3 = {
+ 'foo': [
+ {
+ 'test': 'value',
+ 'foo': 'bar'
+ }
+ ]
+ }
+
+ self.assertTrue(are_different_dicts(value1, value2))
+ self.assertTrue(are_different_dicts(value1, value3))
+ self.assertTrue(are_different_dicts(value2, value3))
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/hwc/test_hwc_utils.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/hwc/test_hwc_utils.py
new file mode 100644
index 00000000..18918759
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/hwc/test_hwc_utils.py
@@ -0,0 +1,38 @@
+# -*- coding: utf-8 -*-
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (HwcModuleException, navigate_value)
+
+
+class HwcUtilsTestCase(unittest.TestCase):
+ def test_navigate_value(self):
+ value = {
+ 'foo': {
+ 'quiet': {
+ 'tree': 'test',
+ "trees": [0, 1]
+ },
+ }
+ }
+
+ self.assertEqual(navigate_value(value, ["foo", "quiet", "tree"]),
+ "test")
+
+ self.assertEqual(
+ navigate_value(value, ["foo", "quiet", "trees"],
+ {"foo.quiet.trees": 1}),
+ 1)
+
+ self.assertRaisesRegexp(HwcModuleException,
+ r".* key\(q\) is not exist in dict",
+ navigate_value, value, ["foo", "q", "tree"])
+
+ self.assertRaisesRegexp(HwcModuleException,
+ r".* the index is out of list",
+ navigate_value, value,
+ ["foo", "quiet", "trees"],
+ {"foo.quiet.trees": 2})
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/identity/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/identity/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/identity/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/identity/keycloak/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/identity/keycloak/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/identity/keycloak/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/identity/keycloak/test_keycloak_connect.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/identity/keycloak/test_keycloak_connect.py
new file mode 100644
index 00000000..a929382a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/identity/keycloak/test_keycloak_connect.py
@@ -0,0 +1,169 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+from itertools import count
+
+from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import (
+ get_token,
+ KeycloakError,
+)
+from ansible.module_utils.six import StringIO
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+
+def build_mocked_request(get_id_user_count, response_dict):
+ def _mocked_requests(*args, **kwargs):
+ url = args[0]
+ method = kwargs['method']
+ future_response = response_dict.get(url, None)
+ return get_response(future_response, method, get_id_user_count)
+ return _mocked_requests
+
+
+def get_response(object_with_future_response, method, get_id_call_count):
+ if callable(object_with_future_response):
+ return object_with_future_response()
+ if isinstance(object_with_future_response, dict):
+ return get_response(
+ object_with_future_response[method], method, get_id_call_count)
+ if isinstance(object_with_future_response, list):
+ try:
+ call_number = get_id_call_count.__next__()
+ except AttributeError:
+ # manage python 2 versions.
+ call_number = get_id_call_count.next()
+ return get_response(
+ object_with_future_response[call_number], method, get_id_call_count)
+ return object_with_future_response
+
+
+def create_wrapper(text_as_string):
+ """Allow to mock many times a call to one address.
+ Without this function, the StringIO is empty for the second call.
+ """
+ def _create_wrapper():
+ return StringIO(text_as_string)
+ return _create_wrapper
+
+
+@pytest.fixture()
+def mock_good_connection(mocker):
+ token_response = {
+ 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper('{"access_token": "alongtoken"}'), }
+ return mocker.patch(
+ 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url',
+ side_effect=build_mocked_request(count(), token_response),
+ autospec=True
+ )
+
+
+def test_connect_to_keycloak(mock_good_connection):
+ keycloak_header = get_token(
+ base_url='http://keycloak.url/auth',
+ validate_certs=True,
+ auth_realm='master',
+ client_id='admin-cli',
+ auth_username='admin',
+ auth_password='admin',
+ client_secret=None
+ )
+ assert keycloak_header == {
+ 'Authorization': 'Bearer alongtoken',
+ 'Content-Type': 'application/json'
+ }
+
+
+@pytest.fixture()
+def mock_bad_json_returned(mocker):
+ token_response = {
+ 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper('{"access_token":'), }
+ return mocker.patch(
+ 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url',
+ side_effect=build_mocked_request(count(), token_response),
+ autospec=True
+ )
+
+
+def test_bad_json_returned(mock_bad_json_returned):
+ with pytest.raises(KeycloakError) as raised_error:
+ get_token(
+ base_url='http://keycloak.url/auth',
+ validate_certs=True,
+ auth_realm='master',
+ client_id='admin-cli',
+ auth_username='admin',
+ auth_password='admin',
+ client_secret=None
+ )
+ # cannot check all the message, different errors message for the value
+ # error in python 2.6, 2.7 and 3.*.
+ assert (
+ 'API returned invalid JSON when trying to obtain access token from '
+ 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token: '
+ ) in str(raised_error.value)
+
+
+def raise_401(url):
+ def _raise_401():
+ raise HTTPError(url=url, code=401, msg='Unauthorized', hdrs='', fp=StringIO(''))
+ return _raise_401
+
+
+@pytest.fixture()
+def mock_401_returned(mocker):
+ token_response = {
+ 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': raise_401(
+ 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token'),
+ }
+ return mocker.patch(
+ 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url',
+ side_effect=build_mocked_request(count(), token_response),
+ autospec=True
+ )
+
+
+def test_error_returned(mock_401_returned):
+ with pytest.raises(KeycloakError) as raised_error:
+ get_token(
+ base_url='http://keycloak.url/auth',
+ validate_certs=True,
+ auth_realm='master',
+ client_id='admin-cli',
+ auth_username='notadminuser',
+ auth_password='notadminpassword',
+ client_secret=None
+ )
+ assert str(raised_error.value) == (
+ 'Could not obtain access token from http://keycloak.url'
+ '/auth/realms/master/protocol/openid-connect/token: '
+ 'HTTP Error 401: Unauthorized'
+ )
+
+
+@pytest.fixture()
+def mock_json_without_token_returned(mocker):
+ token_response = {
+ 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper('{"not_token": "It is not a token"}'), }
+ return mocker.patch(
+ 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url',
+ side_effect=build_mocked_request(count(), token_response),
+ autospec=True
+ )
+
+
+def test_json_without_token_returned(mock_json_without_token_returned):
+ with pytest.raises(KeycloakError) as raised_error:
+ get_token(
+ base_url='http://keycloak.url/auth',
+ validate_certs=True,
+ auth_realm='master',
+ client_id='admin-cli',
+ auth_username='admin',
+ auth_password='admin',
+ client_secret=None
+ )
+ assert str(raised_error.value) == (
+ 'Could not obtain access token from http://keycloak.url'
+ '/auth/realms/master/protocol/openid-connect/token'
+ )
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/net_tools/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/net_tools/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/net_tools/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/net_tools/nios/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/net_tools/nios/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/net_tools/nios/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/net_tools/nios/test_api.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/net_tools/nios/test_api.py
new file mode 100644
index 00000000..89fccb0a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/net_tools/nios/test_api.py
@@ -0,0 +1,251 @@
+# (c) 2018 Red Hat, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import copy
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.tests.unit.compat.mock import patch, MagicMock, Mock
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios import api
+
+
+class TestNiosApi(unittest.TestCase):
+
+ def setUp(self):
+ super(TestNiosApi, self).setUp()
+
+ self.module = MagicMock(name='AnsibleModule')
+ self.module.check_mode = False
+ self.module.params = {'provider': None}
+
+ self.mock_connector = patch('ansible_collections.community.general.plugins.module_utils.net_tools.nios.api.get_connector')
+ self.mock_connector.start()
+
+ def tearDown(self):
+ super(TestNiosApi, self).tearDown()
+
+ self.mock_connector.stop()
+
+ def test_get_provider_spec(self):
+ provider_options = ['host', 'username', 'password', 'validate_certs', 'silent_ssl_warnings',
+ 'http_request_timeout', 'http_pool_connections',
+ 'http_pool_maxsize', 'max_retries', 'wapi_version', 'max_results']
+ res = api.WapiBase.provider_spec
+ self.assertIsNotNone(res)
+ self.assertIn('provider', res)
+ self.assertIn('options', res['provider'])
+ returned_options = res['provider']['options']
+ self.assertEqual(sorted(provider_options), sorted(returned_options.keys()))
+
+ def _get_wapi(self, test_object):
+ wapi = api.WapiModule(self.module)
+ wapi.get_object = Mock(name='get_object', return_value=test_object)
+ wapi.create_object = Mock(name='create_object')
+ wapi.update_object = Mock(name='update_object')
+ wapi.delete_object = Mock(name='delete_object')
+ return wapi
+
+ def test_wapi_no_change(self):
+ self.module.params = {'provider': None, 'state': 'present', 'name': 'default',
+ 'comment': 'test comment', 'extattrs': None}
+
+ test_object = [
+ {
+ "comment": "test comment",
+ "_ref": "networkview/ZG5zLm5ldHdvcmtfdmlldyQw:default/true",
+ "name": self.module._check_type_dict().__getitem__(),
+ "extattrs": {}
+ }
+ ]
+
+ test_spec = {
+ "name": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertFalse(res['changed'])
+
+ def test_wapi_change(self):
+ self.module.params = {'provider': None, 'state': 'present', 'name': 'default',
+ 'comment': 'updated comment', 'extattrs': None}
+
+ test_object = [
+ {
+ "comment": "test comment",
+ "_ref": "networkview/ZG5zLm5ldHdvcmtfdmlldyQw:default/true",
+ "name": "default",
+ "extattrs": {}
+ }
+ ]
+
+ test_spec = {
+ "name": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.update_object.called_once_with(test_object)
+
+ def test_wapi_change_false(self):
+ self.module.params = {'provider': None, 'state': 'present', 'name': 'default',
+ 'comment': 'updated comment', 'extattrs': None, 'fqdn': 'foo'}
+
+ test_object = [
+ {
+ "comment": "test comment",
+ "_ref": "networkview/ZG5zLm5ldHdvcmtfdmlldyQw:default/true",
+ "name": "default",
+ "extattrs": {}
+ }
+ ]
+
+ test_spec = {
+ "name": {"ib_req": True},
+ "fqdn": {"ib_req": True, 'update': False},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.update_object.called_once_with(test_object)
+
+ def test_wapi_extattrs_change(self):
+ self.module.params = {'provider': None, 'state': 'present', 'name': 'default',
+ 'comment': 'test comment', 'extattrs': {'Site': 'update'}}
+
+ ref = "networkview/ZG5zLm5ldHdvcmtfdmlldyQw:default/true"
+
+ test_object = [{
+ "comment": "test comment",
+ "_ref": ref,
+ "name": "default",
+ "extattrs": {'Site': {'value': 'test'}}
+ }]
+
+ test_spec = {
+ "name": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ kwargs = copy.deepcopy(test_object[0])
+ kwargs['extattrs']['Site']['value'] = 'update'
+ kwargs['name'] = self.module._check_type_dict().__getitem__()
+ del kwargs['_ref']
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.update_object.assert_called_once_with(ref, kwargs)
+
+ def test_wapi_extattrs_nochange(self):
+ self.module.params = {'provider': None, 'state': 'present', 'name': 'default',
+ 'comment': 'test comment', 'extattrs': {'Site': 'test'}}
+
+ test_object = [{
+ "comment": "test comment",
+ "_ref": "networkview/ZG5zLm5ldHdvcmtfdmlldyQw:default/true",
+ "name": self.module._check_type_dict().__getitem__(),
+ "extattrs": {'Site': {'value': 'test'}}
+ }]
+
+ test_spec = {
+ "name": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertFalse(res['changed'])
+
+ def test_wapi_create(self):
+ self.module.params = {'provider': None, 'state': 'present', 'name': 'ansible',
+ 'comment': None, 'extattrs': None}
+
+ test_object = None
+
+ test_spec = {
+ "name": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.create_object.assert_called_once_with('testobject', {'name': self.module._check_type_dict().__getitem__()})
+
+ def test_wapi_delete(self):
+ self.module.params = {'provider': None, 'state': 'absent', 'name': 'ansible',
+ 'comment': None, 'extattrs': None}
+
+ ref = "networkview/ZG5zLm5ldHdvcmtfdmlldyQw:ansible/false"
+
+ test_object = [{
+ "comment": "test comment",
+ "_ref": ref,
+ "name": "ansible",
+ "extattrs": {'Site': {'value': 'test'}}
+ }]
+
+ test_spec = {
+ "name": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.delete_object.assert_called_once_with(ref)
+
+ def test_wapi_strip_network_view(self):
+ self.module.params = {'provider': None, 'state': 'present', 'name': 'ansible',
+ 'comment': 'updated comment', 'extattrs': None,
+ 'network_view': 'default'}
+
+ test_object = [{
+ "comment": "test comment",
+ "_ref": "view/ZG5zLm5ldHdvcmtfdmlldyQw:ansible/true",
+ "name": "ansible",
+ "extattrs": {},
+ "network_view": "default"
+ }]
+
+ test_spec = {
+ "name": {"ib_req": True},
+ "network_view": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ kwargs = test_object[0].copy()
+ ref = kwargs.pop('_ref')
+ kwargs['comment'] = 'updated comment'
+ kwargs['name'] = self.module._check_type_dict().__getitem__()
+ del kwargs['network_view']
+ del kwargs['extattrs']
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.update_object.assert_called_once_with(ref, kwargs)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/postgresql/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/postgresql/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/postgresql/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/postgresql/test_postgres.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/postgresql/test_postgres.py
new file mode 100644
index 00000000..b5bb693b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/postgresql/test_postgres.py
@@ -0,0 +1,325 @@
+# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+import ansible_collections.community.general.plugins.module_utils.postgres as pg
+
+
+INPUT_DICT = dict(
+ session_role=dict(default=''),
+ login_user=dict(default='postgres'),
+ login_password=dict(default='test', no_log=True),
+ login_host=dict(default='test'),
+ login_unix_socket=dict(default=''),
+ port=dict(type='int', default=5432, aliases=['login_port']),
+ ssl_mode=dict(
+ default='prefer',
+ choices=['allow', 'disable', 'prefer', 'require', 'verify-ca', 'verify-full']
+ ),
+ ca_cert=dict(aliases=['ssl_rootcert']),
+)
+
+EXPECTED_DICT = dict(
+ user=dict(default='postgres'),
+ password=dict(default='test', no_log=True),
+ host=dict(default='test'),
+ port=dict(type='int', default=5432, aliases=['login_port']),
+ sslmode=dict(
+ default='prefer',
+ choices=['allow', 'disable', 'prefer', 'require', 'verify-ca', 'verify-full']
+ ),
+ sslrootcert=dict(aliases=['ssl_rootcert']),
+)
+
+
+class TestPostgresCommonArgSpec():
+
+ """
+ Namespace for testing postgresql_common_arg_spec() function.
+ """
+
+ def test_postgres_common_argument_spec(self):
+ """
+ Test for postgresql_common_arg_spec() function.
+
+ The tested function just returns a dictionary with the default
+ parameters and their values for PostgreSQL modules.
+ The return and expected dictionaries must be compared.
+ """
+ expected_dict = dict(
+ login_user=dict(default='postgres'),
+ login_password=dict(default='', no_log=True),
+ login_host=dict(default=''),
+ login_unix_socket=dict(default=''),
+ port=dict(type='int', default=5432, aliases=['login_port']),
+ ssl_mode=dict(
+ default='prefer',
+ choices=['allow', 'disable', 'prefer', 'require', 'verify-ca', 'verify-full']
+ ),
+ ca_cert=dict(aliases=['ssl_rootcert']),
+ )
+ assert pg.postgres_common_argument_spec() == expected_dict
+
+
+@pytest.fixture
+def m_psycopg2():
+ """Return mock object for psycopg2 emulation."""
+ global Cursor
+ Cursor = None
+
+ class Cursor():
+ def __init__(self):
+ self.passed_query = None
+
+ def execute(self, query):
+ self.passed_query = query
+
+ def close(self):
+ pass
+
+ global DbConnection
+ DbConnection = None
+
+ class DbConnection():
+ def __init__(self):
+ pass
+
+ def cursor(self, cursor_factory=None):
+ return Cursor()
+
+ def set_session(self, autocommit=None):
+ pass
+
+ def set_isolation_level(self, isolevel):
+ pass
+
+ class Extras():
+ def __init__(self):
+ self.DictCursor = True
+
+ class Extensions():
+ def __init__(self):
+ self.ISOLATION_LEVEL_AUTOCOMMIT = True
+
+ class DummyPsycopg2():
+ def __init__(self):
+ self.__version__ = '2.4.3'
+ self.extras = Extras()
+ self.extensions = Extensions()
+
+ def connect(self, host=None, port=None, user=None,
+ password=None, sslmode=None, sslrootcert=None):
+ if user == 'Exception':
+ raise Exception()
+
+ return DbConnection()
+
+ return DummyPsycopg2()
+
+
+class TestEnsureReqLibs():
+
+ """
+ Namespace for testing ensure_required_libs() function.
+
+ If there is something wrong with libs, the function invokes fail_json()
+ method of AnsibleModule object passed as an argument called 'module'.
+ Therefore we must check:
+ 1. value of err_msg attribute of m_ansible_module mock object.
+ """
+
+ @pytest.fixture(scope='class')
+ def m_ansible_module(self):
+ """Return an object of dummy AnsibleModule class."""
+ class Dummym_ansible_module():
+ def __init__(self):
+ self.params = {'ca_cert': False}
+ self.err_msg = ''
+
+ def fail_json(self, msg):
+ self.err_msg = msg
+
+ return Dummym_ansible_module()
+
+ def test_ensure_req_libs_has_not_psycopg2(self, m_ansible_module):
+ """Test ensure_required_libs() with psycopg2 is None."""
+ # HAS_PSYCOPG2 is False by default
+ pg.ensure_required_libs(m_ansible_module)
+ assert 'Failed to import the required Python library (psycopg2)' in m_ansible_module.err_msg
+
+ def test_ensure_req_libs_has_psycopg2(self, m_ansible_module, monkeypatch):
+ """Test ensure_required_libs() with psycopg2 is not None."""
+ monkeypatch.setattr(pg, 'HAS_PSYCOPG2', True)
+
+ pg.ensure_required_libs(m_ansible_module)
+ assert m_ansible_module.err_msg == ''
+
+ def test_ensure_req_libs_ca_cert(self, m_ansible_module, m_psycopg2, monkeypatch):
+ """
+ Test with module.params['ca_cert'], psycopg2 version is suitable.
+ """
+ m_ansible_module.params['ca_cert'] = True
+ monkeypatch.setattr(pg, 'HAS_PSYCOPG2', True)
+ monkeypatch.setattr(pg, 'psycopg2', m_psycopg2)
+
+ pg.ensure_required_libs(m_ansible_module)
+ assert m_ansible_module.err_msg == ''
+
+ def test_ensure_req_libs_ca_cert_low_psycopg2_ver(self, m_ansible_module, m_psycopg2, monkeypatch):
+ """
+ Test with module.params['ca_cert'], psycopg2 version is wrong.
+ """
+ m_ansible_module.params['ca_cert'] = True
+ monkeypatch.setattr(pg, 'HAS_PSYCOPG2', True)
+ # Set wrong psycopg2 version number:
+ psycopg2 = m_psycopg2
+ psycopg2.__version__ = '2.4.2'
+ monkeypatch.setattr(pg, 'psycopg2', psycopg2)
+
+ pg.ensure_required_libs(m_ansible_module)
+ assert 'psycopg2 must be at least 2.4.3' in m_ansible_module.err_msg
+
+
+@pytest.fixture(scope='class')
+def m_ansible_module():
+ """Return an object of dummy AnsibleModule class."""
+ class DummyAnsibleModule():
+ def __init__(self):
+ self.params = pg.postgres_common_argument_spec()
+ self.err_msg = ''
+ self.warn_msg = ''
+
+ def fail_json(self, msg):
+ self.err_msg = msg
+
+ def warn(self, msg):
+ self.warn_msg = msg
+
+ return DummyAnsibleModule()
+
+
+class TestConnectToDb():
+
+ """
+ Namespace for testing connect_to_db() function.
+
+ When some connection errors occure connect_to_db() caught any of them
+ and invoke fail_json() or warn() methods of AnsibleModule object
+ depending on the passed parameters.
+ connect_to_db may return db_connection object or None if errors occured.
+ Therefore we must check:
+ 1. Values of err_msg and warn_msg attributes of m_ansible_module mock object.
+ 2. Types of return objects (db_connection and cursor).
+ """
+
+ def test_connect_to_db(self, m_ansible_module, monkeypatch, m_psycopg2):
+ """Test connect_to_db(), common test."""
+ monkeypatch.setattr(pg, 'HAS_PSYCOPG2', True)
+ monkeypatch.setattr(pg, 'psycopg2', m_psycopg2)
+
+ conn_params = pg.get_conn_params(m_ansible_module, m_ansible_module.params)
+ db_connection = pg.connect_to_db(m_ansible_module, conn_params)
+ cursor = db_connection.cursor()
+ # if errors, db_connection returned as None:
+ assert isinstance(db_connection, DbConnection)
+ assert isinstance(cursor, Cursor)
+ assert m_ansible_module.err_msg == ''
+ # The default behaviour, normal in this case:
+ assert 'Database name has not been passed' in m_ansible_module.warn_msg
+
+ def test_session_role(self, m_ansible_module, monkeypatch, m_psycopg2):
+ """Test connect_to_db(), switch on session_role."""
+ monkeypatch.setattr(pg, 'HAS_PSYCOPG2', True)
+ monkeypatch.setattr(pg, 'psycopg2', m_psycopg2)
+
+ m_ansible_module.params['session_role'] = 'test_role'
+ conn_params = pg.get_conn_params(m_ansible_module, m_ansible_module.params)
+ db_connection = pg.connect_to_db(m_ansible_module, conn_params)
+ cursor = db_connection.cursor()
+ # if errors, db_connection returned as None:
+ assert isinstance(db_connection, DbConnection)
+ assert isinstance(cursor, Cursor)
+ assert m_ansible_module.err_msg == ''
+ # The default behaviour, normal in this case:
+ assert 'Database name has not been passed' in m_ansible_module.warn_msg
+
+ def test_fail_on_conn_true(self, m_ansible_module, monkeypatch, m_psycopg2):
+ """
+ Test connect_to_db(), fail_on_conn arg passed as True (the default behavior).
+ """
+ monkeypatch.setattr(pg, 'HAS_PSYCOPG2', True)
+ monkeypatch.setattr(pg, 'psycopg2', m_psycopg2)
+
+ m_ansible_module.params['login_user'] = 'Exception' # causes Exception
+
+ conn_params = pg.get_conn_params(m_ansible_module, m_ansible_module.params)
+ db_connection = pg.connect_to_db(m_ansible_module, conn_params, fail_on_conn=True)
+
+ assert 'unable to connect to database' in m_ansible_module.err_msg
+ assert db_connection is None
+
+ def test_fail_on_conn_false(self, m_ansible_module, monkeypatch, m_psycopg2):
+ """
+ Test connect_to_db(), fail_on_conn arg passed as False.
+ """
+ monkeypatch.setattr(pg, 'HAS_PSYCOPG2', True)
+ monkeypatch.setattr(pg, 'psycopg2', m_psycopg2)
+
+ m_ansible_module.params['login_user'] = 'Exception' # causes Exception
+
+ conn_params = pg.get_conn_params(m_ansible_module, m_ansible_module.params)
+ db_connection = pg.connect_to_db(m_ansible_module, conn_params, fail_on_conn=False)
+
+ assert m_ansible_module.err_msg == ''
+ assert 'PostgreSQL server is unavailable' in m_ansible_module.warn_msg
+ assert db_connection is None
+
+ def test_autocommit_true(self, m_ansible_module, monkeypatch, m_psycopg2):
+ """
+ Test connect_to_db(), autocommit arg passed as True (the default is False).
+ """
+ monkeypatch.setattr(pg, 'HAS_PSYCOPG2', True)
+
+ # case 1: psycopg2.__version >= 2.4.2 (the default in m_psycopg2)
+ monkeypatch.setattr(pg, 'psycopg2', m_psycopg2)
+
+ conn_params = pg.get_conn_params(m_ansible_module, m_ansible_module.params)
+ db_connection = pg.connect_to_db(m_ansible_module, conn_params, autocommit=True)
+ cursor = db_connection.cursor()
+
+ # if errors, db_connection returned as None:
+ assert isinstance(db_connection, DbConnection)
+ assert isinstance(cursor, Cursor)
+ assert m_ansible_module.err_msg == ''
+
+ # case 2: psycopg2.__version < 2.4.2
+ m_psycopg2.__version__ = '2.4.1'
+ monkeypatch.setattr(pg, 'psycopg2', m_psycopg2)
+
+ conn_params = pg.get_conn_params(m_ansible_module, m_ansible_module.params)
+ db_connection = pg.connect_to_db(m_ansible_module, conn_params, autocommit=True)
+ cursor = db_connection.cursor()
+
+ # if errors, db_connection returned as None:
+ assert isinstance(db_connection, DbConnection)
+ assert isinstance(cursor, Cursor)
+ assert 'psycopg2 must be at least 2.4.3' in m_ansible_module.err_msg
+
+
+class TestGetConnParams():
+
+ """Namespace for testing get_conn_params() function."""
+
+ def test_get_conn_params_def(self, m_ansible_module):
+ """Test get_conn_params(), warn_db_default kwarg is default."""
+ assert pg.get_conn_params(m_ansible_module, INPUT_DICT) == EXPECTED_DICT
+ assert m_ansible_module.warn_msg == 'Database name has not been passed, used default database to connect to.'
+
+ def test_get_conn_params_warn_db_def_false(self, m_ansible_module):
+ """Test get_conn_params(), warn_db_default kwarg is False."""
+ assert pg.get_conn_params(m_ansible_module, INPUT_DICT, warn_db_default=False) == EXPECTED_DICT
+ assert m_ansible_module.warn_msg == ''
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/remote_management/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/remote_management/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/remote_management/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/remote_management/dellemc/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/remote_management/dellemc/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/remote_management/dellemc/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/remote_management/dellemc/test_ome.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/remote_management/dellemc/test_ome.py
new file mode 100644
index 00000000..cc698d0b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/remote_management/dellemc/test_ome.py
@@ -0,0 +1,79 @@
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 2.0
+# Copyright (C) 2019 Dell Inc.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# All rights reserved. Dell, EMC, and other trademarks are trademarks of Dell Inc. or its subsidiaries.
+# Other trademarks may be trademarks of their respective owners.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible_collections.community.general.plugins.module_utils.remote_management.dellemc.ome import RestOME
+from ansible_collections.community.general.tests.unit.compat.mock import MagicMock
+import json
+
+
+class TestRestOME(object):
+ @pytest.fixture
+ def mock_response(self):
+ mock_response = MagicMock()
+ mock_response.getcode.return_value = 200
+ mock_response.headers = mock_response.getheaders.return_value = {'X-Auth-Token': 'token_id'}
+ mock_response.read.return_value = json.dumps({"value": "data"})
+ return mock_response
+
+ def test_invoke_request_with_session(self, mock_response, mocker):
+ mocker.patch('ansible_collections.community.general.plugins.module_utils.remote_management.dellemc.ome.open_url',
+ return_value=mock_response)
+ module_params = {'hostname': '192.168.0.1', 'username': 'username',
+ 'password': 'password', "port": 443}
+ req_session = True
+ with RestOME(module_params, req_session) as obj:
+ response = obj.invoke_request("/testpath", "GET")
+ assert response.status_code == 200
+ assert response.json_data == {"value": "data"}
+ assert response.success is True
+
+ def test_invoke_request_without_session(self, mock_response, mocker):
+ mocker.patch('ansible_collections.community.general.plugins.module_utils.remote_management.dellemc.ome.open_url',
+ return_value=mock_response)
+ module_params = {'hostname': '192.168.0.1', 'username': 'username',
+ 'password': 'password', "port": 443}
+ req_session = False
+ with RestOME(module_params, req_session) as obj:
+ response = obj.invoke_request("/testpath", "GET")
+ assert response.status_code == 200
+ assert response.json_data == {"value": "data"}
+ assert response.success is True
+
+ @pytest.mark.parametrize("exc", [URLError, SSLValidationError, ConnectionError])
+ def test_invoke_request_error_case_handling(self, exc, mock_response, mocker):
+ open_url_mock = mocker.patch('ansible_collections.community.general.plugins.module_utils.remote_management.dellemc.ome.open_url',
+ return_value=mock_response)
+ open_url_mock.side_effect = exc("test")
+ module_params = {'hostname': '192.168.0.1', 'username': 'username',
+ 'password': 'password', "port": 443}
+ req_session = False
+ with pytest.raises(exc) as e:
+ with RestOME(module_params, req_session) as obj:
+ obj.invoke_request("/testpath", "GET")
+
+ def test_invoke_request_http_error_handling(self, mock_response, mocker):
+ open_url_mock = mocker.patch('ansible_collections.community.general.plugins.module_utils.remote_management.dellemc.ome.open_url',
+ return_value=mock_response)
+ open_url_mock.side_effect = HTTPError('http://testhost.com/', 400,
+ 'Bad Request Error', {}, None)
+ module_params = {'hostname': '192.168.0.1', 'username': 'username',
+ 'password': 'password', "port": 443}
+ req_session = False
+ with pytest.raises(HTTPError) as e:
+ with RestOME(module_params, req_session) as obj:
+ obj.invoke_request("/testpath", "GET")
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/test_database.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/test_database.py
new file mode 100644
index 00000000..f5784a59
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/test_database.py
@@ -0,0 +1,141 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible_collections.community.general.plugins.module_utils.database import (
+ is_input_dangerous,
+ pg_quote_identifier,
+ SQLParseError,
+)
+
+# These are all valid strings
+# The results are based on interpreting the identifier as a table name
+VALID = {
+ # User quoted
+ '"public.table"': '"public.table"',
+ '"public"."table"': '"public"."table"',
+ '"schema test"."table test"': '"schema test"."table test"',
+
+ # We quote part
+ 'public.table': '"public"."table"',
+ '"public".table': '"public"."table"',
+ 'public."table"': '"public"."table"',
+ 'schema test.table test': '"schema test"."table test"',
+ '"schema test".table test': '"schema test"."table test"',
+ 'schema test."table test"': '"schema test"."table test"',
+
+ # Embedded double quotes
+ 'table "test"': '"table ""test"""',
+ 'public."table ""test"""': '"public"."table ""test"""',
+ 'public.table "test"': '"public"."table ""test"""',
+ 'schema "test".table': '"schema ""test"""."table"',
+ '"schema ""test""".table': '"schema ""test"""."table"',
+ '"""wat"""."""test"""': '"""wat"""."""test"""',
+ # Sigh, handle these as well:
+ '"no end quote': '"""no end quote"',
+ 'schema."table': '"schema"."""table"',
+ '"schema.table': '"""schema"."table"',
+ 'schema."table.something': '"schema"."""table"."something"',
+
+ # Embedded dots
+ '"schema.test"."table.test"': '"schema.test"."table.test"',
+ '"schema.".table': '"schema."."table"',
+ '"schema."."table"': '"schema."."table"',
+ 'schema.".table"': '"schema".".table"',
+ '"schema".".table"': '"schema".".table"',
+ '"schema.".".table"': '"schema.".".table"',
+ # These are valid but maybe not what the user intended
+ '."table"': '".""table"""',
+ 'table.': '"table."',
+}
+
+INVALID = {
+ ('test.too.many.dots', 'table'): 'PostgreSQL does not support table with more than 3 dots',
+ ('"test.too".many.dots', 'database'): 'PostgreSQL does not support database with more than 1 dots',
+ ('test.too."many.dots"', 'database'): 'PostgreSQL does not support database with more than 1 dots',
+ ('"test"."too"."many"."dots"', 'database'): "PostgreSQL does not support database with more than 1 dots",
+ ('"test"."too"."many"."dots"', 'schema'): "PostgreSQL does not support schema with more than 2 dots",
+ ('"test"."too"."many"."dots"', 'table'): "PostgreSQL does not support table with more than 3 dots",
+ ('"test"."too"."many"."dots"."for"."column"', 'column'): "PostgreSQL does not support column with more than 4 dots",
+ ('"table "invalid" double quote"', 'table'): 'User escaped identifiers must escape extra quotes',
+ ('"schema "invalid"""."table "invalid"', 'table'): 'User escaped identifiers must escape extra quotes',
+ ('"schema."table"', 'table'): 'User escaped identifiers must escape extra quotes',
+ ('"schema".', 'table'): 'Identifier name unspecified or unquoted trailing dot',
+}
+
+HOW_MANY_DOTS = (
+ ('role', 'role', '"role"',
+ 'PostgreSQL does not support role with more than 1 dots'),
+ ('db', 'database', '"db"',
+ 'PostgreSQL does not support database with more than 1 dots'),
+ ('db.schema', 'schema', '"db"."schema"',
+ 'PostgreSQL does not support schema with more than 2 dots'),
+ ('db.schema.table', 'table', '"db"."schema"."table"',
+ 'PostgreSQL does not support table with more than 3 dots'),
+ ('db.schema.table.column', 'column', '"db"."schema"."table"."column"',
+ 'PostgreSQL does not support column with more than 4 dots'),
+)
+
+VALID_QUOTES = ((test, VALID[test]) for test in sorted(VALID))
+INVALID_QUOTES = ((test[0], test[1], INVALID[test]) for test in sorted(INVALID))
+
+IS_STRINGS_DANGEROUS = (
+ (u'', False),
+ (u' ', False),
+ (u'alternative database', False),
+ (u'backup of TRUNCATED table', False),
+ (u'bob.dropper', False),
+ (u'd\'artagnan', False),
+ (u'user_with_select_update_truncate_right', False),
+ (u';DROP DATABASE fluffy_pets_photos', True),
+ (u';drop DATABASE fluffy_pets_photos', True),
+ (u'; TRUNCATE TABLE his_valuable_table', True),
+ (u'; truncate TABLE his_valuable_table', True),
+ (u'\'--', True),
+ (u'"--', True),
+ (u'\' union select username, password from admin_credentials', True),
+ (u'\' UNION SELECT username, password from admin_credentials', True),
+ (u'\' intersect select', True),
+ (u'\' INTERSECT select', True),
+ (u'\' except select', True),
+ (u'\' EXCEPT select', True),
+ (u';ALTER TABLE prices', True),
+ (u';alter table prices', True),
+ (u"; UPDATE products SET price = '0'", True),
+ (u";update products SET price = '0'", True),
+ (u"; DELETE FROM products", True),
+ (u"; delete FROM products", True),
+ (u"; SELECT * FROM products", True),
+ (u" ; select * from products", True),
+)
+
+
+@pytest.mark.parametrize("identifier, quoted_identifier", VALID_QUOTES)
+def test_valid_quotes(identifier, quoted_identifier):
+ assert pg_quote_identifier(identifier, 'table') == quoted_identifier
+
+
+@pytest.mark.parametrize("identifier, id_type, msg", INVALID_QUOTES)
+def test_invalid_quotes(identifier, id_type, msg):
+ with pytest.raises(SQLParseError) as ex:
+ pg_quote_identifier(identifier, id_type)
+
+ ex.match(msg)
+
+
+@pytest.mark.parametrize("identifier, id_type, quoted_identifier, msg", HOW_MANY_DOTS)
+def test_how_many_dots(identifier, id_type, quoted_identifier, msg):
+ assert pg_quote_identifier(identifier, id_type) == quoted_identifier
+
+ with pytest.raises(SQLParseError) as ex:
+ pg_quote_identifier('%s.more' % identifier, id_type)
+
+ ex.match(msg)
+
+
+@pytest.mark.parametrize("string, result", IS_STRINGS_DANGEROUS)
+def test_is_input_dangerous(string, result):
+ assert is_input_dangerous(string) == result
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/test_hetzner.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/test_hetzner.py
new file mode 100644
index 00000000..065801a1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/test_hetzner.py
@@ -0,0 +1,268 @@
+# Copyright: (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import copy
+import json
+import pytest
+
+from mock import MagicMock
+from ansible_collections.community.general.plugins.module_utils import hetzner
+
+
+class ModuleFailException(Exception):
+ def __init__(self, msg, **kwargs):
+ super(ModuleFailException, self).__init__(msg)
+ self.fail_msg = msg
+ self.fail_kwargs = kwargs
+
+
+def get_module_mock():
+ def f(msg, **kwargs):
+ raise ModuleFailException(msg, **kwargs)
+
+ module = MagicMock()
+ module.fail_json = f
+ module.from_json = json.loads
+ return module
+
+
+# ########################################################################################
+
+FETCH_URL_JSON_SUCCESS = [
+ (
+ (None, dict(
+ body=json.dumps(dict(
+ a='b'
+ )).encode('utf-8'),
+ )),
+ None,
+ (dict(
+ a='b'
+ ), None)
+ ),
+ (
+ (None, dict(
+ body=json.dumps(dict(
+ error=dict(
+ code="foo",
+ status=400,
+ message="bar",
+ ),
+ a='b'
+ )).encode('utf-8'),
+ )),
+ ['foo'],
+ (dict(
+ error=dict(
+ code="foo",
+ status=400,
+ message="bar",
+ ),
+ a='b'
+ ), 'foo')
+ ),
+]
+
+
+FETCH_URL_JSON_FAIL = [
+ (
+ (None, dict(
+ body=json.dumps(dict(
+ error=dict(
+ code="foo",
+ status=400,
+ message="bar",
+ ),
+ )).encode('utf-8'),
+ )),
+ None,
+ 'Request failed: 400 foo (bar)'
+ ),
+ (
+ (None, dict(
+ body=json.dumps(dict(
+ error=dict(
+ code="foo",
+ status=400,
+ message="bar",
+ ),
+ )).encode('utf-8'),
+ )),
+ ['bar'],
+ 'Request failed: 400 foo (bar)'
+ ),
+]
+
+
+@pytest.mark.parametrize("return_value, accept_errors, result", FETCH_URL_JSON_SUCCESS)
+def test_fetch_url_json(monkeypatch, return_value, accept_errors, result):
+ module = get_module_mock()
+ hetzner.fetch_url = MagicMock(return_value=return_value)
+
+ assert hetzner.fetch_url_json(module, 'https://foo/bar', accept_errors=accept_errors) == result
+
+
+@pytest.mark.parametrize("return_value, accept_errors, result", FETCH_URL_JSON_FAIL)
+def test_fetch_url_json_fail(monkeypatch, return_value, accept_errors, result):
+ module = get_module_mock()
+ hetzner.fetch_url = MagicMock(return_value=return_value)
+
+ with pytest.raises(ModuleFailException) as exc:
+ hetzner.fetch_url_json(module, 'https://foo/bar', accept_errors=accept_errors)
+
+ assert exc.value.fail_msg == result
+ assert exc.value.fail_kwargs == dict()
+
+
+# ########################################################################################
+
+GET_FAILOVER_SUCCESS = [
+ (
+ '1.2.3.4',
+ (None, dict(
+ body=json.dumps(dict(
+ failover=dict(
+ active_server_ip='1.1.1.1',
+ ip='1.2.3.4',
+ netmask='255.255.255.255',
+ )
+ )).encode('utf-8'),
+ )),
+ '1.1.1.1',
+ dict(
+ active_server_ip='1.1.1.1',
+ ip='1.2.3.4',
+ netmask='255.255.255.255',
+ )
+ ),
+]
+
+
+GET_FAILOVER_FAIL = [
+ (
+ '1.2.3.4',
+ (None, dict(
+ body=json.dumps(dict(
+ error=dict(
+ code="foo",
+ status=400,
+ message="bar",
+ ),
+ )).encode('utf-8'),
+ )),
+ 'Request failed: 400 foo (bar)'
+ ),
+]
+
+
+@pytest.mark.parametrize("ip, return_value, result, record", GET_FAILOVER_SUCCESS)
+def test_get_failover_record(monkeypatch, ip, return_value, result, record):
+ module = get_module_mock()
+ hetzner.fetch_url = MagicMock(return_value=copy.deepcopy(return_value))
+
+ assert hetzner.get_failover_record(module, ip) == record
+
+
+@pytest.mark.parametrize("ip, return_value, result", GET_FAILOVER_FAIL)
+def test_get_failover_record_fail(monkeypatch, ip, return_value, result):
+ module = get_module_mock()
+ hetzner.fetch_url = MagicMock(return_value=copy.deepcopy(return_value))
+
+ with pytest.raises(ModuleFailException) as exc:
+ hetzner.get_failover_record(module, ip)
+
+ assert exc.value.fail_msg == result
+ assert exc.value.fail_kwargs == dict()
+
+
+@pytest.mark.parametrize("ip, return_value, result, record", GET_FAILOVER_SUCCESS)
+def test_get_failover(monkeypatch, ip, return_value, result, record):
+ module = get_module_mock()
+ hetzner.fetch_url = MagicMock(return_value=copy.deepcopy(return_value))
+
+ assert hetzner.get_failover(module, ip) == result
+
+
+@pytest.mark.parametrize("ip, return_value, result", GET_FAILOVER_FAIL)
+def test_get_failover_fail(monkeypatch, ip, return_value, result):
+ module = get_module_mock()
+ hetzner.fetch_url = MagicMock(return_value=copy.deepcopy(return_value))
+
+ with pytest.raises(ModuleFailException) as exc:
+ hetzner.get_failover(module, ip)
+
+ assert exc.value.fail_msg == result
+ assert exc.value.fail_kwargs == dict()
+
+
+# ########################################################################################
+
+SET_FAILOVER_SUCCESS = [
+ (
+ '1.2.3.4',
+ '1.1.1.1',
+ (None, dict(
+ body=json.dumps(dict(
+ failover=dict(
+ active_server_ip='1.1.1.2',
+ )
+ )).encode('utf-8'),
+ )),
+ ('1.1.1.2', True)
+ ),
+ (
+ '1.2.3.4',
+ '1.1.1.1',
+ (None, dict(
+ body=json.dumps(dict(
+ error=dict(
+ code="FAILOVER_ALREADY_ROUTED",
+ status=400,
+ message="Failover already routed",
+ ),
+ )).encode('utf-8'),
+ )),
+ ('1.1.1.1', False)
+ ),
+]
+
+
+SET_FAILOVER_FAIL = [
+ (
+ '1.2.3.4',
+ '1.1.1.1',
+ (None, dict(
+ body=json.dumps(dict(
+ error=dict(
+ code="foo",
+ status=400,
+ message="bar",
+ ),
+ )).encode('utf-8'),
+ )),
+ 'Request failed: 400 foo (bar)'
+ ),
+]
+
+
+@pytest.mark.parametrize("ip, value, return_value, result", SET_FAILOVER_SUCCESS)
+def test_set_failover(monkeypatch, ip, value, return_value, result):
+ module = get_module_mock()
+ hetzner.fetch_url = MagicMock(return_value=copy.deepcopy(return_value))
+
+ assert hetzner.set_failover(module, ip, value) == result
+
+
+@pytest.mark.parametrize("ip, value, return_value, result", SET_FAILOVER_FAIL)
+def test_set_failover_fail(monkeypatch, ip, value, return_value, result):
+ module = get_module_mock()
+ hetzner.fetch_url = MagicMock(return_value=copy.deepcopy(return_value))
+
+ with pytest.raises(ModuleFailException) as exc:
+ hetzner.set_failover(module, ip, value)
+
+ assert exc.value.fail_msg == result
+ assert exc.value.fail_kwargs == dict()
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/test_known_hosts.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/test_known_hosts.py
new file mode 100644
index 00000000..e7c57eb3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/test_known_hosts.py
@@ -0,0 +1,116 @@
+# -*- coding: utf-8 -*-
+# (c) 2015, Michael Scherer <mscherer@redhat.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible_collections.community.general.plugins.module_utils import known_hosts
+
+
+URLS = {
+ 'ssh://one.example.org/example.git': {
+ 'is_ssh_url': True,
+ 'get_fqdn': 'one.example.org',
+ 'add_host_key_cmd': " -t rsa one.example.org",
+ 'port': None,
+ },
+ 'ssh+git://two.example.org/example.git': {
+ 'is_ssh_url': True,
+ 'get_fqdn': 'two.example.org',
+ 'add_host_key_cmd': " -t rsa two.example.org",
+ 'port': None,
+ },
+ 'rsync://three.example.org/user/example.git': {
+ 'is_ssh_url': False,
+ 'get_fqdn': 'three.example.org',
+ 'add_host_key_cmd': None, # not called for non-ssh urls
+ 'port': None,
+ },
+ 'git@four.example.org:user/example.git': {
+ 'is_ssh_url': True,
+ 'get_fqdn': 'four.example.org',
+ 'add_host_key_cmd': " -t rsa four.example.org",
+ 'port': None,
+ },
+ 'git+ssh://five.example.org/example.git': {
+ 'is_ssh_url': True,
+ 'get_fqdn': 'five.example.org',
+ 'add_host_key_cmd': " -t rsa five.example.org",
+ 'port': None,
+ },
+ 'ssh://six.example.org:21/example.org': {
+ # ssh on FTP Port?
+ 'is_ssh_url': True,
+ 'get_fqdn': 'six.example.org',
+ 'add_host_key_cmd': " -t rsa -p 21 six.example.org",
+ 'port': '21',
+ },
+ 'ssh://[2001:DB8::abcd:abcd]/example.git': {
+ 'is_ssh_url': True,
+ 'get_fqdn': '[2001:DB8::abcd:abcd]',
+ 'add_host_key_cmd': " -t rsa [2001:DB8::abcd:abcd]",
+ 'port': None,
+ },
+ 'ssh://[2001:DB8::abcd:abcd]:22/example.git': {
+ 'is_ssh_url': True,
+ 'get_fqdn': '[2001:DB8::abcd:abcd]',
+ 'add_host_key_cmd': " -t rsa -p 22 [2001:DB8::abcd:abcd]",
+ 'port': '22',
+ },
+ 'username@[2001:DB8::abcd:abcd]/example.git': {
+ 'is_ssh_url': True,
+ 'get_fqdn': '[2001:DB8::abcd:abcd]',
+ 'add_host_key_cmd': " -t rsa [2001:DB8::abcd:abcd]",
+ 'port': None,
+ },
+ 'username@[2001:DB8::abcd:abcd]:path/example.git': {
+ 'is_ssh_url': True,
+ 'get_fqdn': '[2001:DB8::abcd:abcd]',
+ 'add_host_key_cmd': " -t rsa [2001:DB8::abcd:abcd]",
+ 'port': None,
+ },
+ 'ssh://internal.git.server:7999/repos/repo.git': {
+ 'is_ssh_url': True,
+ 'get_fqdn': 'internal.git.server',
+ 'add_host_key_cmd': " -t rsa -p 7999 internal.git.server",
+ 'port': '7999',
+ },
+}
+
+
+@pytest.mark.parametrize('url, is_ssh_url', ((k, URLS[k]['is_ssh_url']) for k in sorted(URLS)))
+def test_is_ssh_url(url, is_ssh_url):
+ assert known_hosts.is_ssh_url(url) == is_ssh_url
+
+
+@pytest.mark.parametrize('url, fqdn, port', ((k, URLS[k]['get_fqdn'], URLS[k]['port']) for k in sorted(URLS)))
+def test_get_fqdn_and_port(url, fqdn, port):
+ assert known_hosts.get_fqdn_and_port(url) == (fqdn, port)
+
+
+@pytest.mark.parametrize('fqdn, port, add_host_key_cmd, stdin',
+ ((URLS[k]['get_fqdn'], URLS[k]['port'], URLS[k]['add_host_key_cmd'], {})
+ for k in sorted(URLS) if URLS[k]['is_ssh_url']),
+ indirect=['stdin'])
+def test_add_host_key(am, mocker, fqdn, port, add_host_key_cmd):
+ get_bin_path = mocker.MagicMock()
+ get_bin_path.return_value = keyscan_cmd = "/custom/path/ssh-keyscan"
+ am.get_bin_path = get_bin_path
+
+ run_command = mocker.MagicMock()
+ run_command.return_value = (0, "Needs output, otherwise thinks ssh-keyscan timed out'", "")
+ am.run_command = run_command
+
+ append_to_file = mocker.MagicMock()
+ append_to_file.return_value = (None,)
+ am.append_to_file = append_to_file
+
+ mocker.patch('os.path.isdir', return_value=True)
+ mocker.patch('os.path.exists', return_value=True)
+
+ known_hosts.add_host_key(am, fqdn, port=port)
+ run_command.assert_called_with(keyscan_cmd + add_host_key_cmd)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/test_kubevirt.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/test_kubevirt.py
new file mode 100644
index 00000000..6ae361d9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/test_kubevirt.py
@@ -0,0 +1,56 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import pytest
+
+from ansible_collections.community.general.plugins.module_utils import kubevirt as mymodule
+
+
+def test_simple_merge_dicts():
+ dict1 = {'labels': {'label1': 'value'}}
+ dict2 = {'labels': {'label2': 'value'}}
+ dict3 = json.dumps({'labels': {'label1': 'value', 'label2': 'value'}}, sort_keys=True)
+ assert dict3 == json.dumps(dict(mymodule.KubeVirtRawModule.merge_dicts(dict1, dict2)), sort_keys=True)
+
+
+def test_simple_multi_merge_dicts():
+ dict1 = {'labels': {'label1': 'value', 'label3': 'value'}}
+ dict2 = {'labels': {'label2': 'value'}}
+ dict3 = json.dumps({'labels': {'label1': 'value', 'label2': 'value', 'label3': 'value'}}, sort_keys=True)
+ assert dict3 == json.dumps(dict(mymodule.KubeVirtRawModule.merge_dicts(dict1, dict2)), sort_keys=True)
+
+
+def test_double_nested_merge_dicts():
+ dict1 = {'metadata': {'labels': {'label1': 'value', 'label3': 'value'}}}
+ dict2 = {'metadata': {'labels': {'label2': 'value'}}}
+ dict3 = json.dumps({'metadata': {'labels': {'label1': 'value', 'label2': 'value', 'label3': 'value'}}}, sort_keys=True)
+ assert dict3 == json.dumps(dict(mymodule.KubeVirtRawModule.merge_dicts(dict1, dict2)), sort_keys=True)
+
+
+@pytest.mark.parametrize("lval, operations, rval, result", [
+ ('v1', ['<', '<='], 'v2', True),
+ ('v1', ['>', '>=', '=='], 'v2', False),
+ ('v1', ['>'], 'v1alpha1', True),
+ ('v1', ['==', '<', '<='], 'v1alpha1', False),
+ ('v1beta5', ['==', '<=', '>='], 'v1beta5', True),
+ ('v1beta5', ['<', '>', '!='], 'v1beta5', False),
+
+])
+def test_kubeapiversion_comparisons(lval, operations, rval, result):
+ KubeAPIVersion = mymodule.KubeAPIVersion
+ for op in operations:
+ test = '(KubeAPIVersion("{0}") {1} KubeAPIVersion("{2}")) == {3}'.format(lval, op, rval, result)
+ assert eval(test)
+
+
+@pytest.mark.parametrize("ver", ('nope', 'v1delta7', '1.5', 'v1beta', 'v'))
+def test_kubeapiversion_unsupported_versions(ver):
+ threw = False
+ try:
+ mymodule.KubeAPIVersion(ver)
+ except ValueError:
+ threw = True
+ assert threw
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/test_module_helper.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/test_module_helper.py
new file mode 100644
index 00000000..f20594bb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/test_module_helper.py
@@ -0,0 +1,84 @@
+# -*- coding: utf-8 -*-
+# (c) 2020, Alexei Znamensky <russoz@gmail.com>
+# Copyright (c) 2020 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible_collections.community.general.plugins.module_utils.module_helper import (
+ ArgFormat, DependencyCtxMgr, ModuleHelper
+)
+
+
+def single_lambda_2star(x, y, z):
+ return ["piggies=[{0},{1},{2}]".format(x, y, z)]
+
+
+ARG_FORMATS = dict(
+ simple_boolean_true=("--superflag", ArgFormat.BOOLEAN, 0, True, ["--superflag"]),
+ simple_boolean_false=("--superflag", ArgFormat.BOOLEAN, 0, False, []),
+ single_printf=("--param=%s", ArgFormat.PRINTF, 0, "potatoes", ["--param=potatoes"]),
+ single_printf_no_substitution=("--param", ArgFormat.PRINTF, 0, "potatoes", ["--param"]),
+ multiple_printf=(["--param", "free-%s"], ArgFormat.PRINTF, 0, "potatoes", ["--param", "free-potatoes"]),
+ single_format=("--param={0}", ArgFormat.FORMAT, 0, "potatoes", ["--param=potatoes"]),
+ single_format_no_substitution=("--param", ArgFormat.FORMAT, 0, "potatoes", ["--param"]),
+ multiple_format=(["--param", "free-{0}"], ArgFormat.FORMAT, 0, "potatoes", ["--param", "free-potatoes"]),
+ single_lambda_0star=((lambda v: ["piggies=[{0},{1},{2}]".format(v[0], v[1], v[2])]),
+ None, 0, ['a', 'b', 'c'], ["piggies=[a,b,c]"]),
+ single_lambda_1star=((lambda a, b, c: ["piggies=[{0},{1},{2}]".format(a, b, c)]),
+ None, 1, ['a', 'b', 'c'], ["piggies=[a,b,c]"]),
+ single_lambda_2star=(single_lambda_2star, None, 2, dict(z='c', x='a', y='b'), ["piggies=[a,b,c]"])
+)
+ARG_FORMATS_IDS = sorted(ARG_FORMATS.keys())
+
+
+@pytest.mark.parametrize('fmt, style, stars, value, expected',
+ (ARG_FORMATS[tc] for tc in ARG_FORMATS_IDS),
+ ids=ARG_FORMATS_IDS)
+def test_arg_format(fmt, style, stars, value, expected):
+ af = ArgFormat('name', fmt, style, stars)
+ actual = af.to_text(value)
+ print("formatted string = {0}".format(actual))
+ assert actual == expected
+
+
+ARG_FORMATS_FAIL = dict(
+ int_fmt=(3, None, 0, "", [""]),
+ bool_fmt=(True, None, 0, "", [""]),
+)
+ARG_FORMATS_FAIL_IDS = sorted(ARG_FORMATS_FAIL.keys())
+
+
+@pytest.mark.parametrize('fmt, style, stars, value, expected',
+ (ARG_FORMATS_FAIL[tc] for tc in ARG_FORMATS_FAIL_IDS),
+ ids=ARG_FORMATS_FAIL_IDS)
+def test_arg_format_fail(fmt, style, stars, value, expected):
+ with pytest.raises(TypeError):
+ af = ArgFormat('name', fmt, style, stars)
+ actual = af.to_text(value)
+ print("formatted string = {0}".format(actual))
+
+
+def test_dependency_ctxmgr():
+ ctx = DependencyCtxMgr("POTATOES", "Potatoes must be installed")
+ with ctx:
+ import potatoes_that_will_never_be_there
+ print("POTATOES: ctx.text={0}".format(ctx.text))
+ assert ctx.text == "Potatoes must be installed"
+ assert not ctx.has_it
+
+ ctx = DependencyCtxMgr("POTATOES2")
+ with ctx:
+ import potatoes_that_will_never_be_there_again
+ assert not ctx.has_it
+ print("POTATOES2: ctx.text={0}".format(ctx.text))
+ assert ctx.text.startswith("No module named")
+ assert "potatoes_that_will_never_be_there_again" in ctx.text
+
+ ctx = DependencyCtxMgr("TYPING")
+ with ctx:
+ import sys
+ assert ctx.has_it
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/test_saslprep.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/test_saslprep.py
new file mode 100644
index 00000000..4829f55b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/test_saslprep.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+# Copyright: (c) 2019, Andrey Tuzhilin <andrei.tuzhilin@gmail.com>
+# Copyright: (c) 2020, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible_collections.community.general.plugins.module_utils.saslprep import saslprep
+
+
+VALID = [
+ (u'', u''),
+ (u'\u00A0', u' '),
+ (u'a', u'a'),
+ (u'й', u'й'),
+ (u'\u30DE\u30C8\u30EA\u30C3\u30AF\u30B9', u'\u30DE\u30C8\u30EA\u30C3\u30AF\u30B9'),
+ (u'The\u00ADM\u00AAtr\u2168', u'TheMatrIX'),
+ (u'I\u00ADX', u'IX'),
+ (u'user', u'user'),
+ (u'USER', u'USER'),
+ (u'\u00AA', u'a'),
+ (u'\u2168', u'IX'),
+ (u'\u05BE\u00A0\u05BE', u'\u05BE\u0020\u05BE'),
+]
+
+INVALID = [
+ (None, TypeError),
+ (b'', TypeError),
+ (u'\u0221', ValueError),
+ (u'\u0007', ValueError),
+ (u'\u0627\u0031', ValueError),
+ (u'\uE0001', ValueError),
+ (u'\uE0020', ValueError),
+ (u'\uFFF9', ValueError),
+ (u'\uFDD0', ValueError),
+ (u'\u0000', ValueError),
+ (u'\u06DD', ValueError),
+ (u'\uFFFFD', ValueError),
+ (u'\uD800', ValueError),
+ (u'\u200E', ValueError),
+ (u'\u05BE\u00AA\u05BE', ValueError),
+]
+
+
+@pytest.mark.parametrize('source,target', VALID)
+def test_saslprep_conversions(source, target):
+ assert saslprep(source) == target
+
+
+@pytest.mark.parametrize('source,exception', INVALID)
+def test_saslprep_exceptions(source, exception):
+ with pytest.raises(exception) as ex:
+ saslprep(source)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/test_utm_utils.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/test_utm_utils.py
new file mode 100644
index 00000000..f28d5dc8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/test_utm_utils.py
@@ -0,0 +1,47 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright: (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
+#
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM
+
+
+class FakeModule:
+ def __init__(self, params):
+ self.params = params
+
+
+def test_combine_headers_returns_only_default():
+ expected = {"Accept": "application/json", "Content-type": "application/json"}
+ module = FakeModule(
+ params={'utm_protocol': 'utm_protocol', 'utm_host': 'utm_host', 'utm_port': 1234, 'utm_token': 'utm_token',
+ 'name': 'FakeName', 'headers': {}})
+ result = UTM(module, "endpoint", [])._combine_headers()
+ assert result == expected
+
+
+def test_combine_headers_returns_only_default2():
+ expected = {"Accept": "application/json", "Content-type": "application/json"}
+ module = FakeModule(
+ params={'utm_protocol': 'utm_protocol', 'utm_host': 'utm_host', 'utm_port': 1234, 'utm_token': 'utm_token',
+ 'name': 'FakeName'})
+ result = UTM(module, "endpoint", [])._combine_headers()
+ assert result == expected
+
+
+def test_combine_headers_returns_combined():
+ expected = {"Accept": "application/json", "Content-type": "application/json",
+ "extraHeader": "extraHeaderValue"}
+ module = FakeModule(params={'utm_protocol': 'utm_protocol', 'utm_host': 'utm_host', 'utm_port': 1234,
+ 'utm_token': 'utm_token', 'name': 'FakeName',
+ "headers": {"extraHeader": "extraHeaderValue"}})
+ result = UTM(module, "endpoint", [])._combine_headers()
+ assert result == expected
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/FakeAnsibleModule.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/FakeAnsibleModule.py
new file mode 100644
index 00000000..c443dfdb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/FakeAnsibleModule.py
@@ -0,0 +1,33 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2019, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class AnsibleModuleException(Exception):
+ def __init__(self, *args, **kwargs):
+ self.args = args
+ self.kwargs = kwargs
+
+
+class ExitJsonException(AnsibleModuleException):
+ pass
+
+
+class FailJsonException(AnsibleModuleException):
+ pass
+
+
+class FakeAnsibleModule:
+ def __init__(self, params=None, check_mode=False):
+ self.params = params
+ self.check_mode = check_mode
+
+ def exit_json(self, *args, **kwargs):
+ raise ExitJsonException(*args, **kwargs)
+
+ def fail_json(self, *args, **kwargs):
+ raise FailJsonException(*args, **kwargs)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/FakeXenAPI.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/FakeXenAPI.py
new file mode 100644
index 00000000..24933175
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/FakeXenAPI.py
@@ -0,0 +1,69 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2019, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+FAKE_API_VERSION = "1.1"
+
+
+class Failure(Exception):
+ def __init__(self, details):
+ self.details = details
+
+ def __str__(self):
+ return str(self.details)
+
+
+class Session(object):
+ def __init__(self, uri, transport=None, encoding=None, verbose=0,
+ allow_none=1, ignore_ssl=False):
+
+ self.transport = transport
+ self._session = None
+ self.last_login_method = None
+ self.last_login_params = None
+ self.API_version = FAKE_API_VERSION
+
+ def _get_api_version(self):
+ return FAKE_API_VERSION
+
+ def _login(self, method, params):
+ self._session = "OpaqueRef:fake-xenapi-session-ref"
+ self.last_login_method = method
+ self.last_login_params = params
+ self.API_version = self._get_api_version()
+
+ def _logout(self):
+ self._session = None
+ self.last_login_method = None
+ self.last_login_params = None
+ self.API_version = FAKE_API_VERSION
+
+ def xenapi_request(self, methodname, params):
+ if methodname.startswith('login'):
+ self._login(methodname, params)
+ return None
+ elif methodname == 'logout' or methodname == 'session.logout':
+ self._logout()
+ return None
+ else:
+ # Should be patched with mocker.patch().
+ return None
+
+ def __getattr__(self, name):
+ if name == 'handle':
+ return self._session
+ elif name == 'xenapi':
+ # Should be patched with mocker.patch().
+ return None
+ elif name.startswith('login') or name.startswith('slave_local'):
+ return lambda *params: self._login(name, params)
+ elif name == 'logout':
+ return self._logout
+
+
+def xapi_local():
+ return Session("http://_var_lib_xcp_xapi/")
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/common.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/common.py
new file mode 100644
index 00000000..52a01c99
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/common.py
@@ -0,0 +1,25 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2019, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def fake_xenapi_ref(xenapi_class):
+ return "OpaqueRef:fake-xenapi-%s-ref" % xenapi_class
+
+
+testcase_bad_xenapi_refs = {
+ "params": [
+ None,
+ '',
+ 'OpaqueRef:NULL',
+ ],
+ "ids": [
+ 'none',
+ 'empty',
+ 'ref-null',
+ ],
+}
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/conftest.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/conftest.py
new file mode 100644
index 00000000..52f654bc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/conftest.py
@@ -0,0 +1,118 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2019, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import sys
+import importlib
+import os
+import json
+import pytest
+
+from .FakeAnsibleModule import FakeAnsibleModule
+from ansible.module_utils import six
+from mock import MagicMock
+
+
+@pytest.fixture
+def fake_ansible_module(request):
+ """Returns fake AnsibleModule with fake module params."""
+ if hasattr(request, 'param'):
+ return FakeAnsibleModule(request.param)
+ else:
+ params = {
+ "hostname": "somehost",
+ "username": "someuser",
+ "password": "somepwd",
+ "validate_certs": True,
+ }
+
+ return FakeAnsibleModule(params)
+
+
+@pytest.fixture(autouse=True)
+def XenAPI():
+ """Imports and returns fake XenAPI module."""
+
+ # Import of fake XenAPI module is wrapped by fixture so that it does not
+ # affect other unit tests which could potentialy also use XenAPI module.
+
+ # First we use importlib.import_module() to import the module and assign
+ # it to a local symbol.
+ fake_xenapi = importlib.import_module('ansible_collections.community.general.tests.unit.plugins.module_utils.xenserver.FakeXenAPI')
+
+ # Now we populate Python module cache with imported fake module using the
+ # original module name (XenAPI). That way, any 'import XenAPI' statement
+ # will just load already imported fake module from the cache.
+ sys.modules['XenAPI'] = fake_xenapi
+
+ return fake_xenapi
+
+
+@pytest.fixture(autouse=True)
+def xenserver(XenAPI):
+ """Imports and returns xenserver module util."""
+
+ # Since we are wrapping fake XenAPI module inside a fixture, all modules
+ # that depend on it have to be imported inside a test function. To make
+ # this easier to handle and remove some code repetition, we wrap the import
+ # of xenserver module util with a fixture.
+ from ansible_collections.community.general.plugins.module_utils import xenserver
+
+ return xenserver
+
+
+@pytest.fixture
+def mock_xenapi_failure(XenAPI, mocker):
+ """
+ Returns mock object that raises XenAPI.Failure on any XenAPI
+ method call.
+ """
+ fake_error_msg = "Fake XAPI method call error!"
+
+ # We need to use our MagicMock based class that passes side_effect to its
+ # children because calls to xenapi methods can generate an arbitrary
+ # hierarchy of mock objects. Any such object when called should use the
+ # same side_effect as its parent mock object.
+ class MagicMockSideEffect(MagicMock):
+ def _get_child_mock(self, **kw):
+ child_mock = super(MagicMockSideEffect, self)._get_child_mock(**kw)
+ child_mock.side_effect = self.side_effect
+ return child_mock
+
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', new=MagicMockSideEffect(), create=True)
+ mocked_xenapi.side_effect = XenAPI.Failure(fake_error_msg)
+
+ return mocked_xenapi, fake_error_msg
+
+
+@pytest.fixture
+def fixture_data_from_file(request):
+ """Loads fixture data from files."""
+ if not hasattr(request, 'param'):
+ return {}
+
+ fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
+ fixture_data = {}
+
+ if isinstance(request.param, six.string_types):
+ request.param = [request.param]
+
+ for fixture_name in request.param:
+ path = os.path.join(fixture_path, fixture_name)
+
+ with open(path) as f:
+ data = f.read()
+
+ try:
+ data = json.loads(data)
+ except Exception:
+ pass
+
+ fixture_data[fixture_name] = data
+
+ return fixture_data
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-1-facts.json b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-1-facts.json
new file mode 100644
index 00000000..add2dcf4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-1-facts.json
@@ -0,0 +1,73 @@
+{
+ "cdrom": {
+ "type": "none"
+ },
+ "customization_agent": "native",
+ "disks": [
+ {
+ "name": "ansible-test-vm-1-C",
+ "name_desc": "C:\\",
+ "os_device": "xvda",
+ "size": 42949672960,
+ "sr": "Ansible Test Storage 1",
+ "sr_uuid": "767b30e4-f8db-a83d-8ba7-f5e6e732e06f",
+ "vbd_userdevice": "0"
+ }
+ ],
+ "domid": "143",
+ "folder": "/Ansible/Test",
+ "hardware": {
+ "memory_mb": 2048,
+ "num_cpu_cores_per_socket": 2,
+ "num_cpus": 2
+ },
+ "home_server": "",
+ "is_template": false,
+ "name": "ansible-test-vm-1",
+ "name_desc": "Created by Ansible",
+ "networks": [
+ {
+ "gateway": "10.0.0.1",
+ "gateway6": "",
+ "ip": "10.0.0.2",
+ "ip6": [
+ "fe80:0000:0000:0000:11e1:12c9:ef3b:75a0"
+ ],
+ "mac": "7a:a6:48:1e:31:46",
+ "mtu": "1500",
+ "name": "Host internal management network",
+ "netmask": "255.255.255.0",
+ "prefix": "24",
+ "prefix6": "",
+ "vif_device": "0"
+ }
+ ],
+ "other_config": {
+ "base_template_name": "Windows Server 2016 (64-bit)",
+ "folder": "/Ansible/Test",
+ "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5",
+ "install-methods": "cdrom",
+ "instant": "true",
+ "mac_seed": "366fe8e0-878b-4320-8731-90d1ed3c0b93"
+ },
+ "platform": {
+ "acpi": "1",
+ "apic": "true",
+ "cores-per-socket": "2",
+ "device_id": "0002",
+ "hpet": "true",
+ "nx": "true",
+ "pae": "true",
+ "timeoffset": "-28800",
+ "vga": "std",
+ "videoram": "8",
+ "viridian": "true",
+ "viridian_reference_tsc": "true",
+ "viridian_time_ref_count": "true"
+ },
+ "state": "poweredon",
+ "uuid": "81c373d7-a407-322f-911b-31386eb5215d",
+ "xenstore_data": {
+ "vm-data": ""
+ }
+}
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-1-params.json b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-1-params.json
new file mode 100644
index 00000000..70976966
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-1-params.json
@@ -0,0 +1,707 @@
+{
+ "SR": {
+ "OpaqueRef:f746e964-e0fe-c36d-d60b-6897cfde583f": {
+ "PBDs": [],
+ "VDIs": [],
+ "allowed_operations": [
+ "unplug",
+ "plug",
+ "pbd_create",
+ "update",
+ "pbd_destroy",
+ "vdi_resize",
+ "vdi_clone",
+ "scan",
+ "vdi_snapshot",
+ "vdi_mirror",
+ "vdi_create",
+ "vdi_destroy"
+ ],
+ "blobs": {},
+ "clustered": false,
+ "content_type": "",
+ "current_operations": {},
+ "introduced_by": "OpaqueRef:NULL",
+ "is_tools_sr": false,
+ "local_cache_enabled": false,
+ "name_description": "",
+ "name_label": "Ansible Test Storage 1",
+ "other_config": {
+ "auto-scan": "false"
+ },
+ "physical_size": "2521133219840",
+ "physical_utilisation": "1551485632512",
+ "shared": true,
+ "sm_config": {
+ "allocation": "thick",
+ "devserial": "scsi-3600a098038302d353624495242443848",
+ "multipathable": "true",
+ "use_vhd": "true"
+ },
+ "tags": [],
+ "type": "lvmohba",
+ "uuid": "767b30e4-f8db-a83d-8ba7-f5e6e732e06f",
+ "virtual_allocation": "1556925644800"
+ }
+ },
+ "VBD": {
+ "OpaqueRef:1c0a7c6d-09e5-9b2c-bbe3-9a73aadcff9f": {
+ "VDI": "OpaqueRef:NULL",
+ "VM": "OpaqueRef:43a1b8d4-da96-cb08-10f5-fb368abed19c",
+ "allowed_operations": [
+ "attach",
+ "unpause",
+ "insert",
+ "pause"
+ ],
+ "bootable": false,
+ "current_operations": {},
+ "currently_attached": true,
+ "device": "xvdd",
+ "empty": true,
+ "metrics": "OpaqueRef:1a36eae4-87c8-0945-cee9-c85a71fd843f",
+ "mode": "RO",
+ "other_config": {},
+ "qos_algorithm_params": {},
+ "qos_algorithm_type": "",
+ "qos_supported_algorithms": [],
+ "runtime_properties": {},
+ "status_code": "0",
+ "status_detail": "",
+ "storage_lock": false,
+ "type": "CD",
+ "unpluggable": true,
+ "userdevice": "3",
+ "uuid": "e6aacd53-a2c8-649f-b405-93fcb811411a"
+ },
+ "OpaqueRef:ea4a4088-19c3-6db6-ebdf-c3c0ee4405a3": {
+ "VDI": "OpaqueRef:fd20510d-e9ca-b966-3b98-4ae547dacf9a",
+ "VM": "OpaqueRef:43a1b8d4-da96-cb08-10f5-fb368abed19c",
+ "allowed_operations": [
+ "attach",
+ "unpause",
+ "unplug",
+ "unplug_force",
+ "pause"
+ ],
+ "bootable": true,
+ "current_operations": {},
+ "currently_attached": true,
+ "device": "xvda",
+ "empty": false,
+ "metrics": "OpaqueRef:ddbd70d4-7dde-b51e-6208-eb434b300009",
+ "mode": "RW",
+ "other_config": {
+ "owner": "true"
+ },
+ "qos_algorithm_params": {},
+ "qos_algorithm_type": "",
+ "qos_supported_algorithms": [],
+ "runtime_properties": {},
+ "status_code": "0",
+ "status_detail": "",
+ "storage_lock": false,
+ "type": "Disk",
+ "unpluggable": true,
+ "userdevice": "0",
+ "uuid": "ffd6de9c-c416-1d52-3e9d-3bcbf567245e"
+ }
+ },
+ "VDI": {
+ "OpaqueRef:fd20510d-e9ca-b966-3b98-4ae547dacf9a": {
+ "SR": "OpaqueRef:f746e964-e0fe-c36d-d60b-6897cfde583f",
+ "VBDs": [
+ "OpaqueRef:ea4a4088-19c3-6db6-ebdf-c3c0ee4405a3"
+ ],
+ "allow_caching": false,
+ "allowed_operations": [
+ "clone",
+ "snapshot"
+ ],
+ "crash_dumps": [],
+ "current_operations": {},
+ "is_a_snapshot": false,
+ "is_tools_iso": false,
+ "location": "b807f67b-3f37-4a6e-ad6c-033f812ab093",
+ "managed": true,
+ "metadata_latest": false,
+ "metadata_of_pool": "",
+ "missing": false,
+ "name_description": "C:\\",
+ "name_label": "ansible-test-vm-1-C",
+ "on_boot": "persist",
+ "other_config": {},
+ "parent": "OpaqueRef:NULL",
+ "physical_utilisation": "43041947648",
+ "read_only": false,
+ "sharable": false,
+ "sm_config": {
+ "host_OpaqueRef:07a8da76-f1cf-f3b5-a531-6b751384f770": "RW",
+ "read-caching-enabled-on-92ac8132-276b-4d0f-9d3a-54db51e4a438": "false",
+ "read-caching-reason-92ac8132-276b-4d0f-9d3a-54db51e4a438": "LICENSE_RESTRICTION",
+ "vdi_type": "vhd"
+ },
+ "snapshot_of": "OpaqueRef:NULL",
+ "snapshot_time": "19700101T00:00:00Z",
+ "snapshots": [],
+ "storage_lock": false,
+ "tags": [],
+ "type": "system",
+ "uuid": "b807f67b-3f37-4a6e-ad6c-033f812ab093",
+ "virtual_size": "42949672960",
+ "xenstore_data": {}
+ }
+ },
+ "VIF": {
+ "OpaqueRef:38da2120-6086-5043-8383-ab0a53ede42a": {
+ "MAC": "7a:a6:48:1e:31:46",
+ "MAC_autogenerated": false,
+ "MTU": "1500",
+ "VM": "OpaqueRef:43a1b8d4-da96-cb08-10f5-fb368abed19c",
+ "allowed_operations": [
+ "attach",
+ "unplug"
+ ],
+ "current_operations": {},
+ "currently_attached": true,
+ "device": "0",
+ "ipv4_addresses": [
+ "10.0.0.2/24"
+ ],
+ "ipv4_allowed": [],
+ "ipv4_configuration_mode": "Static",
+ "ipv4_gateway": "10.0.0.1",
+ "ipv6_addresses": [
+ ""
+ ],
+ "ipv6_allowed": [],
+ "ipv6_configuration_mode": "None",
+ "ipv6_gateway": "",
+ "locking_mode": "network_default",
+ "metrics": "OpaqueRef:15502939-df0f-0095-1ce3-e51367199d27",
+ "network": "OpaqueRef:8a404c5e-5673-ab69-5d6f-5a35a33b8724",
+ "other_config": {},
+ "qos_algorithm_params": {},
+ "qos_algorithm_type": "",
+ "qos_supported_algorithms": [],
+ "runtime_properties": {},
+ "status_code": "0",
+ "status_detail": "",
+ "uuid": "bd108d25-488a-f9b5-4c7b-02d40f1e38a8"
+ }
+ },
+ "VM": {
+ "OpaqueRef:43a1b8d4-da96-cb08-10f5-fb368abed19c": {
+ "HVM_boot_params": {
+ "order": "dc"
+ },
+ "HVM_boot_policy": "BIOS order",
+ "HVM_shadow_multiplier": 1.0,
+ "PCI_bus": "",
+ "PV_args": "",
+ "PV_bootloader": "",
+ "PV_bootloader_args": "",
+ "PV_kernel": "",
+ "PV_legacy_args": "",
+ "PV_ramdisk": "",
+ "VBDs": [
+ "OpaqueRef:1c0a7c6d-09e5-9b2c-bbe3-9a73aadcff9f",
+ "OpaqueRef:ea4a4088-19c3-6db6-ebdf-c3c0ee4405a3"
+ ],
+ "VCPUs_at_startup": "2",
+ "VCPUs_max": "2",
+ "VCPUs_params": {},
+ "VGPUs": [],
+ "VIFs": [
+ "OpaqueRef:38da2120-6086-5043-8383-ab0a53ede42a"
+ ],
+ "VTPMs": [],
+ "actions_after_crash": "restart",
+ "actions_after_reboot": "restart",
+ "actions_after_shutdown": "destroy",
+ "affinity": "OpaqueRef:NULL",
+ "allowed_operations": [
+ "changing_dynamic_range",
+ "migrate_send",
+ "pool_migrate",
+ "changing_VCPUs_live",
+ "suspend",
+ "hard_reboot",
+ "hard_shutdown",
+ "clean_reboot",
+ "clean_shutdown",
+ "pause",
+ "checkpoint",
+ "snapshot"
+ ],
+ "appliance": "OpaqueRef:NULL",
+ "attached_PCIs": [],
+ "bios_strings": {
+ "bios-vendor": "Xen",
+ "bios-version": "",
+ "hp-rombios": "",
+ "oem-1": "Xen",
+ "oem-2": "MS_VM_CERT/SHA1/bdbeb6e0a816d43fa6d3fe8aaef04c2bad9d3e3d",
+ "system-manufacturer": "Xen",
+ "system-product-name": "HVM domU",
+ "system-serial-number": "",
+ "system-version": ""
+ },
+ "blobs": {},
+ "blocked_operations": {},
+ "children": [],
+ "consoles": [
+ "OpaqueRef:4fa7d34e-1fb6-9e88-1b21-41a3c6550d8b"
+ ],
+ "crash_dumps": [],
+ "current_operations": {},
+ "domarch": "",
+ "domid": "143",
+ "generation_id": "3274224479562869847:6952848762503845513",
+ "guest_metrics": "OpaqueRef:453f21be-954d-2ca8-e38e-09741e91350c",
+ "ha_always_run": false,
+ "ha_restart_priority": "",
+ "hardware_platform_version": "0",
+ "has_vendor_device": false,
+ "is_a_snapshot": false,
+ "is_a_template": false,
+ "is_control_domain": false,
+ "is_default_template": false,
+ "is_snapshot_from_vmpp": false,
+ "is_vmss_snapshot": false,
+ "last_boot_CPU_flags": {
+ "features": "17cbfbff-f7fa3223-2d93fbff-00000023-00000001-000007ab-00000000-00000000-00001000-0c000000",
+ "vendor": "GenuineIntel"
+ },
+ "last_booted_record": "",
+ "memory_dynamic_max": "2147483648",
+ "memory_dynamic_min": "2147483648",
+ "memory_overhead": "20971520",
+ "memory_static_max": "2147483648",
+ "memory_static_min": "1073741824",
+ "memory_target": "2147483648",
+ "metrics": "OpaqueRef:6eede779-4e55-7cfb-8b8a-e4b9becf770b",
+ "name_description": "Created by Ansible",
+ "name_label": "ansible-test-vm-1",
+ "order": "0",
+ "other_config": {
+ "base_template_name": "Windows Server 2016 (64-bit)",
+ "folder": "/Ansible/Test",
+ "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5",
+ "install-methods": "cdrom",
+ "instant": "true",
+ "mac_seed": "366fe8e0-878b-4320-8731-90d1ed3c0b93"
+ },
+ "parent": "OpaqueRef:NULL",
+ "platform": {
+ "acpi": "1",
+ "apic": "true",
+ "cores-per-socket": "2",
+ "device_id": "0002",
+ "hpet": "true",
+ "nx": "true",
+ "pae": "true",
+ "timeoffset": "-28800",
+ "vga": "std",
+ "videoram": "8",
+ "viridian": "true",
+ "viridian_reference_tsc": "true",
+ "viridian_time_ref_count": "true"
+ },
+ "power_state": "Running",
+ "protection_policy": "OpaqueRef:NULL",
+ "recommendations": "<restrictions><restriction field=\"memory-static-max\" max=\"1649267441664\"/><restriction field=\"vcpus-max\" max=\"32\"/><restriction field=\"has-vendor-device\" value=\"true\"/><restriction max=\"255\" property=\"number-of-vbds\"/><restriction max=\"7\" property=\"number-of-vifs\"/></restrictions>",
+ "reference_label": "windows-server-2016-64bit",
+ "requires_reboot": false,
+ "resident_on": "OpaqueRef:07a8da76-f1cf-f3b5-a531-6b751384f770",
+ "shutdown_delay": "0",
+ "snapshot_info": {},
+ "snapshot_metadata": "",
+ "snapshot_of": "OpaqueRef:NULL",
+ "snapshot_schedule": "OpaqueRef:NULL",
+ "snapshot_time": "19700101T00:00:00Z",
+ "snapshots": [],
+ "start_delay": "0",
+ "suspend_SR": "OpaqueRef:NULL",
+ "suspend_VDI": "OpaqueRef:NULL",
+ "tags": [],
+ "transportable_snapshot_id": "",
+ "user_version": "1",
+ "uuid": "81c373d7-a407-322f-911b-31386eb5215d",
+ "version": "0",
+ "xenstore_data": {
+ "vm-data": ""
+ }
+ }
+ },
+ "VM_guest_metrics": {
+ "OpaqueRef:453f21be-954d-2ca8-e38e-09741e91350c": {
+ "PV_drivers_detected": true,
+ "PV_drivers_up_to_date": true,
+ "PV_drivers_version": {
+ "build": "1020",
+ "major": "7",
+ "micro": "0",
+ "minor": "1"
+ },
+ "can_use_hotplug_vbd": "yes",
+ "can_use_hotplug_vif": "yes",
+ "disks": {},
+ "last_updated": "20190113T19:40:34Z",
+ "live": true,
+ "memory": {},
+ "networks": {
+ "0/ip": "10.0.0.2",
+ "0/ipv6/0": "fe80:0000:0000:0000:11e1:12c9:ef3b:75a0"
+ },
+ "os_version": {
+ "distro": "windows",
+ "major": "6",
+ "minor": "2",
+ "name": "Microsoft Windows Server 2016 Standard|C:\\Windows|\\Device\\Harddisk0\\Partition2",
+ "spmajor": "0",
+ "spminor": "0"
+ },
+ "other": {
+ "data-ts": "1",
+ "error": "WTSQueryUserToken : 1008 failed.",
+ "feature-balloon": "1",
+ "feature-poweroff": "1",
+ "feature-reboot": "1",
+ "feature-s3": "1",
+ "feature-s4": "1",
+ "feature-setcomputername": "1",
+ "feature-static-ip-setting": "1",
+ "feature-suspend": "1",
+ "feature-ts": "1",
+ "feature-ts2": "1",
+ "feature-xs-batcmd": "1",
+ "has-vendor-device": "0",
+ "platform-feature-multiprocessor-suspend": "1"
+ },
+ "other_config": {},
+ "uuid": "9ea6803f-12ca-3d6a-47b7-c90a33b67b98"
+ }
+ },
+ "VM_metrics": {
+ "OpaqueRef:6eede779-4e55-7cfb-8b8a-e4b9becf770b": {
+ "VCPUs_CPU": {},
+ "VCPUs_flags": {},
+ "VCPUs_number": "2",
+ "VCPUs_params": {},
+ "VCPUs_utilisation": {},
+ "hvm": true,
+ "install_time": "20190113T19:31:47Z",
+ "last_updated": "19700101T00:00:00Z",
+ "memory_actual": "2147475456",
+ "nested_virt": false,
+ "nomigrate": false,
+ "other_config": {},
+ "start_time": "20190113T19:38:59Z",
+ "state": [],
+ "uuid": "c67fadf7-8143-0c92-c772-cd3901c18e70"
+ }
+ },
+ "host": {
+ "OpaqueRef:07a8da76-f1cf-f3b5-a531-6b751384f770": {
+ "API_version_major": "2",
+ "API_version_minor": "7",
+ "API_version_vendor": "XenSource",
+ "API_version_vendor_implementation": {},
+ "PBDs": [],
+ "PCIs": [],
+ "PGPUs": [],
+ "PIFs": [],
+ "address": "10.0.0.1",
+ "allowed_operations": [
+ "vm_migrate",
+ "provision",
+ "vm_resume",
+ "evacuate",
+ "vm_start"
+ ],
+ "bios_strings": {},
+ "blobs": {},
+ "capabilities": [
+ "xen-3.0-x86_64",
+ "xen-3.0-x86_32p",
+ "hvm-3.0-x86_32",
+ "hvm-3.0-x86_32p",
+ "hvm-3.0-x86_64",
+ ""
+ ],
+ "chipset_info": {
+ "iommu": "true"
+ },
+ "control_domain": "OpaqueRef:a2a31555-f232-822b-8f36-10d75d44b79c",
+ "cpu_configuration": {},
+ "cpu_info": {
+ "cpu_count": "40",
+ "family": "6",
+ "features": "7ffefbff-bfebfbff-00000021-2c100800",
+ "features_hvm": "17cbfbff-f7fa3223-2d93fbff-00000023-00000001-000007ab-00000000-00000000-00001000-0c000000",
+ "features_pv": "17c9cbf5-f6f83203-2191cbf5-00000023-00000001-00000329-00000000-00000000-00001000-0c000000",
+ "flags": "fpu de tsc msr pae mce cx8 apic sep mca cmov pat clflush acpi mmx fxsr sse sse2 ht syscall nx lm constant_tsc arch_perfmon rep_good nopl nonstop_tsc eagerfpu pni pclmulqdq monitor est ssse3 fma cx16 sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm ida arat epb pln pts dtherm fsgsbase bmi1 avx2 bmi2 erms xsaveopt cqm_llc cqm_occup_llc",
+ "model": "63",
+ "modelname": "Intel(R) Xeon(R) CPU E5-2660 v3 @ 2.60GHz",
+ "socket_count": "2",
+ "speed": "2597.064",
+ "stepping": "2",
+ "vendor": "GenuineIntel"
+ },
+ "crash_dump_sr": "OpaqueRef:ed72d7bf-4e53-67fc-17f5-e27b203042ba",
+ "crashdumps": [],
+ "current_operations": {},
+ "display": "enabled",
+ "edition": "free",
+ "enabled": true,
+ "external_auth_configuration": {},
+ "external_auth_service_name": "",
+ "external_auth_type": "",
+ "features": [],
+ "guest_VCPUs_params": {},
+ "ha_network_peers": [],
+ "ha_statefiles": [],
+ "host_CPUs": [
+ "OpaqueRef:f7e744f6-a6f9-c460-999a-c27e1395e2e0",
+ "OpaqueRef:f6e5dcf0-0453-8f3f-88c1-7ad6e2ef3dd1",
+ "OpaqueRef:f27a52fb-5feb-173d-1a07-d1735a83c2cc",
+ "OpaqueRef:ed65327a-508a-ccfc-dba6-2a0175cb2432",
+ "OpaqueRef:e41d2f2a-fe9e-72cb-8104-b22d6d314b13",
+ "OpaqueRef:e1988469-b814-5d10-17a6-bfd7c62d2b5f",
+ "OpaqueRef:d73967dc-b8d8-b47b-39f4-d599fdcabf55",
+ "OpaqueRef:cba9ebd9-40dc-0611-d1bb-aa661bd0bf70",
+ "OpaqueRef:c53d3110-4085-60af-8300-d879818789f7",
+ "OpaqueRef:bee0cf87-7df6-79a6-94e8-36f98e69ad20",
+ "OpaqueRef:bde28e83-213f-0e65-b6ad-0ae1ecebb98d",
+ "OpaqueRef:bbfefe67-f65f-98cb-c3fc-cb8ea0588006",
+ "OpaqueRef:b38ac595-afea-0ca0-49a0-9f5ef2368e3b",
+ "OpaqueRef:b14ef333-78b1-193d-02da-dc9bfed36912",
+ "OpaqueRef:afd478bf-57b9-0c79-f257-50aeb81504f1",
+ "OpaqueRef:a307cd3a-2132-2e42-4ebc-cc1c7780736d",
+ "OpaqueRef:a1a9df7d-88ba-64fd-a55c-0f6472e1753f",
+ "OpaqueRef:a0e39c9c-3e0b-fa03-e5d0-93a09aa77393",
+ "OpaqueRef:9fd5719b-36ab-8e25-7756-20a496ccb331",
+ "OpaqueRef:9ac4195d-ac07-cfe2-bc19-27ee54cf91fb",
+ "OpaqueRef:98c5c00c-1e2d-e22b-842e-79e85ce07873",
+ "OpaqueRef:961129bf-e695-f206-7297-64f9007a64f3",
+ "OpaqueRef:64368b4c-3488-2808-f0b3-42f2a656df2b",
+ "OpaqueRef:620dabc0-d7c5-0dc8-52df-3be25194c2fb",
+ "OpaqueRef:5cee2759-dd8e-7e1a-0727-21e196584030",
+ "OpaqueRef:58f70163-863d-5787-ffbb-2416cb16ca1e",
+ "OpaqueRef:4462f848-f396-653d-67f9-2bed13be2c58",
+ "OpaqueRef:40e800c2-19db-7cd8-c045-5ae93f908cae",
+ "OpaqueRef:3f84278b-dec6-ded0-1a33-4daa0ce75a2f",
+ "OpaqueRef:3ef14992-62f6-e1f0-5715-0ee02a834a9c",
+ "OpaqueRef:3e274c24-c55b-06f5-2c8f-415421043ab2",
+ "OpaqueRef:35ff27da-f286-7b70-adc1-a200880bb79f",
+ "OpaqueRef:2511aa53-8660-e442-3cd2-305982d1f751",
+ "OpaqueRef:21d234e3-138c-81ca-9ed8-febc81b874e9",
+ "OpaqueRef:1f9b4ee3-dcc7-114e-b401-dc3e94c07efa",
+ "OpaqueRef:1b94a981-d340-dd07-41c2-b3ff3c545fed",
+ "OpaqueRef:197ad104-64a8-5af3-8c7a-95f3d301aadd",
+ "OpaqueRef:1672e747-dc4b-737b-ddcf-0a373f966012",
+ "OpaqueRef:12ced494-a225-7584-456b-739331bb5114",
+ "OpaqueRef:0139ff72-62ac-1a6a-8f6f-cb01d8a4ee92"
+ ],
+ "hostname": "ansible-test-host-1",
+ "license_params": {
+ "address1": "",
+ "address2": "",
+ "city": "",
+ "company": "",
+ "country": "",
+ "enable_xha": "true",
+ "expiry": "20291231T23:00:00Z",
+ "grace": "no",
+ "license_type": "",
+ "name": "",
+ "platform_filter": "false",
+ "postalcode": "",
+ "productcode": "",
+ "regular_nag_dialog": "false",
+ "restrict_ad": "false",
+ "restrict_batch_hotfix_apply": "true",
+ "restrict_checkpoint": "false",
+ "restrict_cifs": "true",
+ "restrict_connection": "false",
+ "restrict_cpu_masking": "false",
+ "restrict_dmc": "false",
+ "restrict_dr": "false",
+ "restrict_email_alerting": "false",
+ "restrict_equalogic": "false",
+ "restrict_export_resource_data": "true",
+ "restrict_gpu": "false",
+ "restrict_guest_agent_auto_update": "true",
+ "restrict_guest_ip_setting": "false",
+ "restrict_health_check": "false",
+ "restrict_historical_performance": "false",
+ "restrict_hotfix_apply": "false",
+ "restrict_integrated_gpu_passthrough": "false",
+ "restrict_intellicache": "false",
+ "restrict_lab": "false",
+ "restrict_live_patching": "true",
+ "restrict_marathon": "false",
+ "restrict_nested_virt": "true",
+ "restrict_netapp": "false",
+ "restrict_pci_device_for_auto_update": "true",
+ "restrict_pool_attached_storage": "false",
+ "restrict_pooling": "false",
+ "restrict_pvs_proxy": "true",
+ "restrict_qos": "false",
+ "restrict_rbac": "false",
+ "restrict_read_caching": "true",
+ "restrict_set_vcpus_number_live": "true",
+ "restrict_ssl_legacy_switch": "false",
+ "restrict_stage": "false",
+ "restrict_storage_xen_motion": "false",
+ "restrict_storagelink": "false",
+ "restrict_storagelink_site_recovery": "false",
+ "restrict_vgpu": "true",
+ "restrict_vif_locking": "false",
+ "restrict_vlan": "false",
+ "restrict_vm_memory_introspection": "true",
+ "restrict_vmpr": "false",
+ "restrict_vmss": "false",
+ "restrict_vss": "false",
+ "restrict_vswitch_controller": "false",
+ "restrict_web_selfservice": "true",
+ "restrict_web_selfservice_manager": "true",
+ "restrict_wlb": "true",
+ "restrict_xcm": "true",
+ "restrict_xen_motion": "false",
+ "serialnumber": "",
+ "sku_marketing_name": "Citrix XenServer",
+ "sku_type": "free",
+ "sockets": "2",
+ "state": "",
+ "version": ""
+ },
+ "license_server": {
+ "address": "localhost",
+ "port": "27000"
+ },
+ "local_cache_sr": "OpaqueRef:ed72d7bf-4e53-67fc-17f5-e27b203042ba",
+ "logging": {},
+ "memory_overhead": "4606619648",
+ "metrics": "OpaqueRef:82b6937a-60c2-96d8-4e78-9f9a1143033f",
+ "name_description": "",
+ "name_label": "ansible-test-host-1",
+ "other_config": {
+ "agent_start_time": "1532019557.",
+ "boot_time": "1530023264.",
+ "iscsi_iqn": "iqn.2018-06.com.example:c8bac750",
+ "last_blob_sync_time": "1547394076.36",
+ "multipathhandle": "dmp",
+ "multipathing": "true"
+ },
+ "patches": [
+ "OpaqueRef:f74ca18d-cfb7-e4fe-e5c4-819843de11e2",
+ "OpaqueRef:f53ff05e-8dd8-3a15-d3b0-8dcf6004fbe2",
+ "OpaqueRef:ed7f38da-1a50-a48b-60bf-933cabe8d7bc",
+ "OpaqueRef:e7bb1462-51a5-1aaf-3b56-11b8ebd83a94",
+ "OpaqueRef:d87b343b-6ba3-db8b-b80e-e02319ba5924",
+ "OpaqueRef:ccb00450-ed04-4eaa-e6d7-130ef3722374",
+ "OpaqueRef:b79b8864-11d9-1d5f-09e5-a66d7b64b9e2",
+ "OpaqueRef:9bebcc7d-61ae-126b-3be0-9156026e586f",
+ "OpaqueRef:740a1156-b991-00b8-ef50-fdbb22a4d911",
+ "OpaqueRef:71def430-754b-2bfb-6c93-ec3b67b754e4",
+ "OpaqueRef:6c73b00d-df66-1740-9578-2b14e46297ba",
+ "OpaqueRef:6a53d2ae-3d6b-32ed-705f-fd53f1304470",
+ "OpaqueRef:35a67684-b094-1c77-beff-8237d87c7a27",
+ "OpaqueRef:33da42c2-c421-9859-79b7-ce9b6c394a1b",
+ "OpaqueRef:2baa6b4b-9bbe-c1b2-23ce-c8c831ac581d",
+ "OpaqueRef:2ac3beea-dee2-44e7-9f67-5fd216e593a0",
+ "OpaqueRef:1bd8f24b-3190-6e7a-b36e-e2998197d062",
+ "OpaqueRef:1694ea26-4930-6ca1-036e-273438375de9",
+ "OpaqueRef:09813f03-0c6f-a6af-768f-ef4cdde2c641"
+ ],
+ "power_on_config": {},
+ "power_on_mode": "",
+ "resident_VMs": [],
+ "sched_policy": "credit",
+ "software_version": {
+ "build_number": "release/falcon/master/8",
+ "date": "2017-05-11",
+ "db_schema": "5.120",
+ "dbv": "2017.0517",
+ "hostname": "f7d02093adae",
+ "linux": "4.4.0+10",
+ "network_backend": "openvswitch",
+ "platform_name": "XCP",
+ "platform_version": "2.3.0",
+ "product_brand": "XenServer",
+ "product_version": "7.2.0",
+ "product_version_text": "7.2",
+ "product_version_text_short": "7.2",
+ "xapi": "1.9",
+ "xen": "4.7.5-2.12",
+ "xencenter_max": "2.7",
+ "xencenter_min": "2.7"
+ },
+ "ssl_legacy": true,
+ "supported_bootloaders": [
+ "pygrub",
+ "eliloader"
+ ],
+ "suspend_image_sr": "OpaqueRef:ed72d7bf-4e53-67fc-17f5-e27b203042ba",
+ "tags": [],
+ "updates": [
+ "OpaqueRef:b71938bf-4c4f-eb17-7e78-588e71297a74",
+ "OpaqueRef:91cfa47b-52f9-a4e3-4e78-52e3eb3e5141",
+ "OpaqueRef:e2209ae9-5362-3a20-f691-9294144e49f2",
+ "OpaqueRef:6ac77a0f-f079-8067-85cc-c9ae2f8dcca9",
+ "OpaqueRef:a17e721d-faf4-6ad1-c617-dd4899279534",
+ "OpaqueRef:6c9b814c-e1c2-b8be-198f-de358686b10a",
+ "OpaqueRef:fbaabbfe-88d5-d89b-5b3f-d6374601ca71",
+ "OpaqueRef:9eccc765-9726-d220-96b1-2e85adf77ecc",
+ "OpaqueRef:204558d7-dce0-2304-bdc5-80ec5fd7e3c3",
+ "OpaqueRef:65b14ae7-f440-0c4d-4af9-c7946b90fd2f",
+ "OpaqueRef:0760c608-b02e-743a-18a1-fa8f205374d6",
+ "OpaqueRef:1ced32ca-fec4-8b44-0e8f-753c97f2d93f",
+ "OpaqueRef:3fffd7c7-f4d1-6b03-a5b8-d75211bb7b8f",
+ "OpaqueRef:01befb95-412e-e9dd-5b5d-edd50df61cb1",
+ "OpaqueRef:a3f9481e-fe3d-1f00-235f-44d404f51128",
+ "OpaqueRef:507ee5fc-59d3-e635-21d5-98a5cace4bf2",
+ "OpaqueRef:7b4b5da1-54af-d0c4-3fea-394b4257bffe",
+ "OpaqueRef:f61edc83-91d9-a161-113f-00c110196238",
+ "OpaqueRef:7efce157-9b93-d116-f3f8-7eb0c6fb1a79"
+ ],
+ "updates_requiring_reboot": [],
+ "uuid": "92ac8132-276b-4d0f-9d3a-54db51e4a438",
+ "virtual_hardware_platform_versions": [
+ "0",
+ "1",
+ "2"
+ ]
+ }
+ },
+ "network": {
+ "OpaqueRef:8a404c5e-5673-ab69-5d6f-5a35a33b8724": {
+ "MTU": "1500",
+ "PIFs": [],
+ "VIFs": [],
+ "allowed_operations": [],
+ "assigned_ips": {
+ "OpaqueRef:8171dad1-f902-ec00-7ba2-9f92d8aa75ab": "169.254.0.3",
+ "OpaqueRef:9754a0ed-e100-d224-6a70-a55a9c2cedf9": "169.254.0.2"
+ },
+ "blobs": {},
+ "bridge": "xenapi",
+ "current_operations": {},
+ "default_locking_mode": "unlocked",
+ "managed": true,
+ "name_description": "Network on which guests will be assigned a private link-local IP address which can be used to talk XenAPI",
+ "name_label": "Host internal management network",
+ "other_config": {
+ "ip_begin": "169.254.0.1",
+ "ip_end": "169.254.255.254",
+ "is_guest_installer_network": "true",
+ "is_host_internal_management_network": "true",
+ "netmask": "255.255.0.0"
+ },
+ "tags": [],
+ "uuid": "dbb96525-944f-0d1a-54ed-e65cb6d07450"
+ }
+ }
+}
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-2-facts.json b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-2-facts.json
new file mode 100644
index 00000000..607212c0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-2-facts.json
@@ -0,0 +1,87 @@
+{
+ "cdrom": {
+ "type": "none"
+ },
+ "customization_agent": "custom",
+ "disks": [
+ {
+ "name": "ansible-test-vm-2-root",
+ "name_desc": "/",
+ "os_device": "xvda",
+ "size": 10737418240,
+ "sr": "Ansible Test Storage 1",
+ "sr_uuid": "767b30e4-f8db-a83d-8ba7-f5e6e732e06f",
+ "vbd_userdevice": "0"
+ },
+ {
+ "name": "ansible-test-vm-2-mysql",
+ "name_desc": "/var/lib/mysql",
+ "os_device": "xvdb",
+ "size": 1073741824,
+ "sr": "Ansible Test Storage 1",
+ "sr_uuid": "767b30e4-f8db-a83d-8ba7-f5e6e732e06f",
+ "vbd_userdevice": "1"
+ }
+ ],
+ "domid": "140",
+ "folder": "/Ansible/Test",
+ "hardware": {
+ "memory_mb": 1024,
+ "num_cpu_cores_per_socket": 1,
+ "num_cpus": 1
+ },
+ "home_server": "ansible-test-host-2",
+ "is_template": false,
+ "name": "ansible-test-vm-2",
+ "name_desc": "Created by Ansible",
+ "networks": [
+ {
+ "gateway": "10.0.0.1",
+ "gateway6": "",
+ "ip": "169.254.0.2",
+ "ip6": [],
+ "mac": "16:87:31:70:d6:31",
+ "mtu": "1500",
+ "name": "Host internal management network",
+ "netmask": "255.255.255.0",
+ "prefix": "24",
+ "prefix6": "",
+ "vif_device": "0"
+ }
+ ],
+ "other_config": {
+ "base_template_name": "CentOS 7",
+ "folder": "/Ansible/Test",
+ "import_task": "OpaqueRef:cf1402d3-b6c1-d908-fe62-06502e3b311a",
+ "install-methods": "cdrom,nfs,http,ftp",
+ "instant": "true",
+ "linux_template": "true",
+ "mac_seed": "0ab46664-f519-5383-166e-e4ea485ede7d"
+ },
+ "platform": {
+ "acpi": "1",
+ "apic": "true",
+ "cores-per-socket": "1",
+ "device_id": "0001",
+ "nx": "true",
+ "pae": "true",
+ "timeoffset": "0",
+ "vga": "std",
+ "videoram": "8",
+ "viridian": "false"
+ },
+ "state": "poweredon",
+ "uuid": "0a05d5ad-3e4b-f0dc-6101-8c56623958bc",
+ "xenstore_data": {
+ "vm-data": "",
+ "vm-data/networks": "",
+ "vm-data/networks/0": "",
+ "vm-data/networks/0/gateway": "10.0.0.1",
+ "vm-data/networks/0/ip": "10.0.0.3",
+ "vm-data/networks/0/mac": "16:87:31:70:d6:31",
+ "vm-data/networks/0/name": "Host internal management network",
+ "vm-data/networks/0/netmask": "255.255.255.0",
+ "vm-data/networks/0/prefix": "24",
+ "vm-data/networks/0/type": "static"
+ }
+}
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-2-params.json b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-2-params.json
new file mode 100644
index 00000000..10615f40
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-2-params.json
@@ -0,0 +1,771 @@
+{
+ "SR": {
+ "OpaqueRef:f746e964-e0fe-c36d-d60b-6897cfde583f": {
+ "PBDs": [],
+ "VDIs": [],
+ "allowed_operations": [
+ "unplug",
+ "plug",
+ "pbd_create",
+ "update",
+ "pbd_destroy",
+ "vdi_resize",
+ "vdi_clone",
+ "scan",
+ "vdi_snapshot",
+ "vdi_mirror",
+ "vdi_create",
+ "vdi_destroy"
+ ],
+ "blobs": {},
+ "clustered": false,
+ "content_type": "",
+ "current_operations": {},
+ "introduced_by": "OpaqueRef:NULL",
+ "is_tools_sr": false,
+ "local_cache_enabled": false,
+ "name_description": "",
+ "name_label": "Ansible Test Storage 1",
+ "other_config": {
+ "auto-scan": "false"
+ },
+ "physical_size": "2521133219840",
+ "physical_utilisation": "1551485632512",
+ "shared": true,
+ "sm_config": {
+ "allocation": "thick",
+ "devserial": "scsi-3600a098038302d353624495242443848",
+ "multipathable": "true",
+ "use_vhd": "true"
+ },
+ "tags": [],
+ "type": "lvmohba",
+ "uuid": "767b30e4-f8db-a83d-8ba7-f5e6e732e06f",
+ "virtual_allocation": "1556925644800"
+ }
+ },
+ "VBD": {
+ "OpaqueRef:510e214e-f0ba-3bc9-7834-a4f4d3fa33ef": {
+ "VDI": "OpaqueRef:NULL",
+ "VM": "OpaqueRef:08632af0-473e-5106-f400-7910229e49be",
+ "allowed_operations": [
+ "attach",
+ "unpause",
+ "insert",
+ "pause"
+ ],
+ "bootable": false,
+ "current_operations": {},
+ "currently_attached": true,
+ "device": "xvdd",
+ "empty": true,
+ "metrics": "OpaqueRef:1075bebe-ba71-66ef-ba30-8afbc83bc6b5",
+ "mode": "RO",
+ "other_config": {},
+ "qos_algorithm_params": {},
+ "qos_algorithm_type": "",
+ "qos_supported_algorithms": [],
+ "runtime_properties": {},
+ "status_code": "0",
+ "status_detail": "",
+ "storage_lock": false,
+ "type": "CD",
+ "unpluggable": true,
+ "userdevice": "3",
+ "uuid": "79ee1d8e-944b-3bfd-ba4c-a0c165d84f3d"
+ },
+ "OpaqueRef:6bc2c353-f132-926d-6e9b-e4d1d55a3760": {
+ "VDI": "OpaqueRef:102bef39-b134-d23a-9a50-490e1dbca8f7",
+ "VM": "OpaqueRef:08632af0-473e-5106-f400-7910229e49be",
+ "allowed_operations": [
+ "attach",
+ "unpause",
+ "pause"
+ ],
+ "bootable": true,
+ "current_operations": {},
+ "currently_attached": true,
+ "device": "xvda",
+ "empty": false,
+ "metrics": "OpaqueRef:1c71ccde-d7e9-10fb-569c-993b880fa790",
+ "mode": "RW",
+ "other_config": {
+ "owner": ""
+ },
+ "qos_algorithm_params": {},
+ "qos_algorithm_type": "",
+ "qos_supported_algorithms": [],
+ "runtime_properties": {},
+ "status_code": "0",
+ "status_detail": "",
+ "storage_lock": false,
+ "type": "Disk",
+ "unpluggable": false,
+ "userdevice": "0",
+ "uuid": "932fdf6d-7ac5-45e8-a48e-694af75726f1"
+ },
+ "OpaqueRef:9bd6decd-2e55-b55e-387d-c40aa67ff151": {
+ "VDI": "OpaqueRef:87b45ac6-af36-f4fd-6ebd-a08bed9001e4",
+ "VM": "OpaqueRef:08632af0-473e-5106-f400-7910229e49be",
+ "allowed_operations": [
+ "attach",
+ "unpause",
+ "unplug",
+ "unplug_force",
+ "pause"
+ ],
+ "bootable": false,
+ "current_operations": {},
+ "currently_attached": true,
+ "device": "xvdb",
+ "empty": false,
+ "metrics": "OpaqueRef:b8424146-d3ea-4850-db9a-47f0059c10ac",
+ "mode": "RW",
+ "other_config": {},
+ "qos_algorithm_params": {},
+ "qos_algorithm_type": "",
+ "qos_supported_algorithms": [],
+ "runtime_properties": {},
+ "status_code": "0",
+ "status_detail": "",
+ "storage_lock": false,
+ "type": "Disk",
+ "unpluggable": true,
+ "userdevice": "1",
+ "uuid": "c0c1e648-3690-e1fb-9f47-24b4df0cb458"
+ }
+ },
+ "VDI": {
+ "OpaqueRef:102bef39-b134-d23a-9a50-490e1dbca8f7": {
+ "SR": "OpaqueRef:f746e964-e0fe-c36d-d60b-6897cfde583f",
+ "VBDs": [
+ "OpaqueRef:6bc2c353-f132-926d-6e9b-e4d1d55a3760"
+ ],
+ "allow_caching": false,
+ "allowed_operations": [
+ "clone",
+ "snapshot"
+ ],
+ "crash_dumps": [],
+ "current_operations": {},
+ "is_a_snapshot": false,
+ "is_tools_iso": false,
+ "location": "fa1202b8-326f-4235-802e-fafbed66b26b",
+ "managed": true,
+ "metadata_latest": false,
+ "metadata_of_pool": "",
+ "missing": false,
+ "name_description": "/",
+ "name_label": "ansible-test-vm-2-root",
+ "on_boot": "persist",
+ "other_config": {},
+ "parent": "OpaqueRef:NULL",
+ "physical_utilisation": "10766778368",
+ "read_only": false,
+ "sharable": false,
+ "sm_config": {
+ "host_OpaqueRef:e87be804-57a1-532e-56ac-6c4910957be0": "RW",
+ "read-caching-enabled-on-dff6702e-bcb6-4704-8dd4-952e8c883365": "false",
+ "read-caching-reason-dff6702e-bcb6-4704-8dd4-952e8c883365": "LICENSE_RESTRICTION",
+ "vdi_type": "vhd"
+ },
+ "snapshot_of": "OpaqueRef:NULL",
+ "snapshot_time": "19700101T00:00:00Z",
+ "snapshots": [],
+ "storage_lock": false,
+ "tags": [],
+ "type": "system",
+ "uuid": "fa1202b8-326f-4235-802e-fafbed66b26b",
+ "virtual_size": "10737418240",
+ "xenstore_data": {}
+ },
+ "OpaqueRef:87b45ac6-af36-f4fd-6ebd-a08bed9001e4": {
+ "SR": "OpaqueRef:f746e964-e0fe-c36d-d60b-6897cfde583f",
+ "VBDs": [
+ "OpaqueRef:9bd6decd-2e55-b55e-387d-c40aa67ff151"
+ ],
+ "allow_caching": false,
+ "allowed_operations": [
+ "clone",
+ "snapshot"
+ ],
+ "crash_dumps": [],
+ "current_operations": {},
+ "is_a_snapshot": false,
+ "is_tools_iso": false,
+ "location": "ab3a4d72-f498-4687-86ce-ca937046db76",
+ "managed": true,
+ "metadata_latest": false,
+ "metadata_of_pool": "",
+ "missing": false,
+ "name_description": "/var/lib/mysql",
+ "name_label": "ansible-test-vm-2-mysql",
+ "on_boot": "persist",
+ "other_config": {},
+ "parent": "OpaqueRef:NULL",
+ "physical_utilisation": "1082130432",
+ "read_only": false,
+ "sharable": false,
+ "sm_config": {
+ "host_OpaqueRef:e87be804-57a1-532e-56ac-6c4910957be0": "RW",
+ "read-caching-enabled-on-dff6702e-bcb6-4704-8dd4-952e8c883365": "false",
+ "read-caching-reason-dff6702e-bcb6-4704-8dd4-952e8c883365": "LICENSE_RESTRICTION",
+ "vdi_type": "vhd"
+ },
+ "snapshot_of": "OpaqueRef:NULL",
+ "snapshot_time": "19700101T00:00:00Z",
+ "snapshots": [],
+ "storage_lock": false,
+ "tags": [],
+ "type": "user",
+ "uuid": "ab3a4d72-f498-4687-86ce-ca937046db76",
+ "virtual_size": "1073741824",
+ "xenstore_data": {}
+ }
+ },
+ "VIF": {
+ "OpaqueRef:9754a0ed-e100-d224-6a70-a55a9c2cedf9": {
+ "MAC": "16:87:31:70:d6:31",
+ "MAC_autogenerated": false,
+ "MTU": "1500",
+ "VM": "OpaqueRef:08632af0-473e-5106-f400-7910229e49be",
+ "allowed_operations": [
+ "attach",
+ "unplug"
+ ],
+ "current_operations": {},
+ "currently_attached": true,
+ "device": "0",
+ "ipv4_addresses": [],
+ "ipv4_allowed": [],
+ "ipv4_configuration_mode": "None",
+ "ipv4_gateway": "",
+ "ipv6_addresses": [],
+ "ipv6_allowed": [],
+ "ipv6_configuration_mode": "None",
+ "ipv6_gateway": "",
+ "locking_mode": "network_default",
+ "metrics": "OpaqueRef:d74d5f20-f0ab-ee36-9a74-496ffb994232",
+ "network": "OpaqueRef:8a404c5e-5673-ab69-5d6f-5a35a33b8724",
+ "other_config": {},
+ "qos_algorithm_params": {},
+ "qos_algorithm_type": "",
+ "qos_supported_algorithms": [],
+ "runtime_properties": {},
+ "status_code": "0",
+ "status_detail": "",
+ "uuid": "07b70134-9396-94fc-5105-179b430ce4f8"
+ }
+ },
+ "VM": {
+ "OpaqueRef:08632af0-473e-5106-f400-7910229e49be": {
+ "HVM_boot_params": {
+ "order": "cdn"
+ },
+ "HVM_boot_policy": "BIOS order",
+ "HVM_shadow_multiplier": 1.0,
+ "PCI_bus": "",
+ "PV_args": "",
+ "PV_bootloader": "",
+ "PV_bootloader_args": "",
+ "PV_kernel": "",
+ "PV_legacy_args": "",
+ "PV_ramdisk": "",
+ "VBDs": [
+ "OpaqueRef:510e214e-f0ba-3bc9-7834-a4f4d3fa33ef",
+ "OpaqueRef:9bd6decd-2e55-b55e-387d-c40aa67ff151",
+ "OpaqueRef:6bc2c353-f132-926d-6e9b-e4d1d55a3760"
+ ],
+ "VCPUs_at_startup": "1",
+ "VCPUs_max": "1",
+ "VCPUs_params": {},
+ "VGPUs": [],
+ "VIFs": [
+ "OpaqueRef:9754a0ed-e100-d224-6a70-a55a9c2cedf9"
+ ],
+ "VTPMs": [],
+ "actions_after_crash": "restart",
+ "actions_after_reboot": "restart",
+ "actions_after_shutdown": "destroy",
+ "affinity": "OpaqueRef:e87be804-57a1-532e-56ac-6c4910957be0",
+ "allowed_operations": [
+ "changing_dynamic_range",
+ "migrate_send",
+ "pool_migrate",
+ "changing_VCPUs_live",
+ "suspend",
+ "hard_reboot",
+ "hard_shutdown",
+ "clean_reboot",
+ "clean_shutdown",
+ "pause",
+ "checkpoint",
+ "snapshot"
+ ],
+ "appliance": "OpaqueRef:NULL",
+ "attached_PCIs": [],
+ "bios_strings": {
+ "bios-vendor": "Xen",
+ "bios-version": "",
+ "hp-rombios": "",
+ "oem-1": "Xen",
+ "oem-2": "MS_VM_CERT/SHA1/bdbeb6e0a816d43fa6d3fe8aaef04c2bad9d3e3d",
+ "system-manufacturer": "Xen",
+ "system-product-name": "HVM domU",
+ "system-serial-number": "",
+ "system-version": ""
+ },
+ "blobs": {},
+ "blocked_operations": {},
+ "children": [],
+ "consoles": [
+ "OpaqueRef:2a24e023-a856-de30-aea3-2024bacdc71f"
+ ],
+ "crash_dumps": [],
+ "current_operations": {},
+ "domarch": "",
+ "domid": "140",
+ "generation_id": "",
+ "guest_metrics": "OpaqueRef:150d2dfa-b634-7965-92ab-31fc26382683",
+ "ha_always_run": false,
+ "ha_restart_priority": "",
+ "hardware_platform_version": "0",
+ "has_vendor_device": false,
+ "is_a_snapshot": false,
+ "is_a_template": false,
+ "is_control_domain": false,
+ "is_default_template": false,
+ "is_snapshot_from_vmpp": false,
+ "is_vmss_snapshot": false,
+ "last_boot_CPU_flags": {
+ "features": "17cbfbff-f7fa3223-2d93fbff-00000023-00000001-000007ab-00000000-00000000-00001000-0c000000",
+ "vendor": "GenuineIntel"
+ },
+ "last_booted_record": "",
+ "memory_dynamic_max": "1073741824",
+ "memory_dynamic_min": "1073741824",
+ "memory_overhead": "11534336",
+ "memory_static_max": "1073741824",
+ "memory_static_min": "1073741824",
+ "memory_target": "1073741824",
+ "metrics": "OpaqueRef:b56b460b-6476-304d-b143-ce543ffab828",
+ "name_description": "Created by Ansible",
+ "name_label": "ansible-test-vm-2",
+ "order": "0",
+ "other_config": {
+ "base_template_name": "CentOS 7",
+ "folder": "/Ansible/Test",
+ "import_task": "OpaqueRef:cf1402d3-b6c1-d908-fe62-06502e3b311a",
+ "install-methods": "cdrom,nfs,http,ftp",
+ "instant": "true",
+ "linux_template": "true",
+ "mac_seed": "0ab46664-f519-5383-166e-e4ea485ede7d"
+ },
+ "parent": "OpaqueRef:NULL",
+ "platform": {
+ "acpi": "1",
+ "apic": "true",
+ "cores-per-socket": "1",
+ "device_id": "0001",
+ "nx": "true",
+ "pae": "true",
+ "timeoffset": "0",
+ "vga": "std",
+ "videoram": "8",
+ "viridian": "false"
+ },
+ "power_state": "Running",
+ "protection_policy": "OpaqueRef:NULL",
+ "recommendations": "<restrictions><restriction field=\"memory-static-max\" max=\"549755813888\" /><restriction field=\"vcpus-max\" max=\"16\" /><restriction property=\"number-of-vbds\" max=\"16\" /><restriction property=\"number-of-vifs\" max=\"7\" /><restriction field=\"allow-gpu-passthrough\" value=\"0\" /></restrictions>",
+ "reference_label": "",
+ "requires_reboot": false,
+ "resident_on": "OpaqueRef:e87be804-57a1-532e-56ac-6c4910957be0",
+ "shutdown_delay": "0",
+ "snapshot_info": {},
+ "snapshot_metadata": "",
+ "snapshot_of": "OpaqueRef:NULL",
+ "snapshot_schedule": "OpaqueRef:NULL",
+ "snapshot_time": "19700101T00:00:00Z",
+ "snapshots": [],
+ "start_delay": "0",
+ "suspend_SR": "OpaqueRef:NULL",
+ "suspend_VDI": "OpaqueRef:NULL",
+ "tags": [],
+ "transportable_snapshot_id": "",
+ "user_version": "1",
+ "uuid": "0a05d5ad-3e4b-f0dc-6101-8c56623958bc",
+ "version": "0",
+ "xenstore_data": {
+ "vm-data": "",
+ "vm-data/networks": "",
+ "vm-data/networks/0": "",
+ "vm-data/networks/0/gateway": "10.0.0.1",
+ "vm-data/networks/0/ip": "10.0.0.3",
+ "vm-data/networks/0/mac": "16:87:31:70:d6:31",
+ "vm-data/networks/0/name": "Host internal management network",
+ "vm-data/networks/0/netmask": "255.255.255.0",
+ "vm-data/networks/0/prefix": "24",
+ "vm-data/networks/0/type": "static"
+ }
+ }
+ },
+ "VM_guest_metrics": {
+ "OpaqueRef:150d2dfa-b634-7965-92ab-31fc26382683": {
+ "PV_drivers_detected": true,
+ "PV_drivers_up_to_date": true,
+ "PV_drivers_version": {
+ "build": "90977",
+ "major": "6",
+ "micro": "0",
+ "minor": "5"
+ },
+ "can_use_hotplug_vbd": "unspecified",
+ "can_use_hotplug_vif": "unspecified",
+ "disks": {},
+ "last_updated": "20190113T19:36:26Z",
+ "live": true,
+ "memory": {},
+ "networks": {
+ "0/ip": "169.254.0.2"
+ },
+ "os_version": {
+ "distro": "centos",
+ "major": "7",
+ "minor": "2",
+ "name": "CentOS Linux release 7.2.1511 (Core)",
+ "uname": "3.10.0-327.22.2.el7.x86_64"
+ },
+ "other": {
+ "feature-balloon": "1",
+ "feature-shutdown": "1",
+ "feature-suspend": "1",
+ "feature-vcpu-hotplug": "1",
+ "has-vendor-device": "0",
+ "platform-feature-multiprocessor-suspend": "1"
+ },
+ "other_config": {},
+ "uuid": "5c9d1be5-7eee-88f2-46c3-df1d44f9cdb5"
+ }
+ },
+ "VM_metrics": {
+ "OpaqueRef:b56b460b-6476-304d-b143-ce543ffab828": {
+ "VCPUs_CPU": {},
+ "VCPUs_flags": {},
+ "VCPUs_number": "1",
+ "VCPUs_params": {},
+ "VCPUs_utilisation": {},
+ "hvm": true,
+ "install_time": "20190113T19:32:46Z",
+ "last_updated": "19700101T00:00:00Z",
+ "memory_actual": "1073729536",
+ "nested_virt": false,
+ "nomigrate": false,
+ "other_config": {},
+ "start_time": "20190113T19:35:15Z",
+ "state": [],
+ "uuid": "876dd44c-aad1-97bf-9ee5-4cd58eac7163"
+ }
+ },
+ "host": {
+ "OpaqueRef:e87be804-57a1-532e-56ac-6c4910957be0": {
+ "API_version_major": "2",
+ "API_version_minor": "7",
+ "API_version_vendor": "XenSource",
+ "API_version_vendor_implementation": {},
+ "PBDs": [],
+ "PCIs": [],
+ "PGPUs": [],
+ "PIFs": [],
+ "address": "10.0.0.1",
+ "allowed_operations": [
+ "vm_migrate",
+ "provision",
+ "vm_resume",
+ "evacuate",
+ "vm_start"
+ ],
+ "bios_strings": {},
+ "blobs": {},
+ "capabilities": [
+ "xen-3.0-x86_64",
+ "xen-3.0-x86_32p",
+ "hvm-3.0-x86_32",
+ "hvm-3.0-x86_32p",
+ "hvm-3.0-x86_64",
+ ""
+ ],
+ "chipset_info": {
+ "iommu": "true"
+ },
+ "control_domain": "OpaqueRef:ffcc92a1-8fde-df6f-a501-44b37811286b",
+ "cpu_configuration": {},
+ "cpu_info": {
+ "cpu_count": "40",
+ "family": "6",
+ "features": "7ffefbff-bfebfbff-00000021-2c100800",
+ "features_hvm": "17cbfbff-f7fa3223-2d93fbff-00000023-00000001-000007ab-00000000-00000000-00001000-0c000000",
+ "features_pv": "17c9cbf5-f6f83203-2191cbf5-00000023-00000001-00000329-00000000-00000000-00001000-0c000000",
+ "flags": "fpu de tsc msr pae mce cx8 apic sep mca cmov pat clflush acpi mmx fxsr sse sse2 ht syscall nx lm constant_tsc arch_perfmon rep_good nopl nonstop_tsc eagerfpu pni pclmulqdq monitor est ssse3 fma cx16 sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm ida arat epb pln pts dtherm fsgsbase bmi1 avx2 bmi2 erms xsaveopt cqm_llc cqm_occup_llc",
+ "model": "63",
+ "modelname": "Intel(R) Xeon(R) CPU E5-2660 v3 @ 2.60GHz",
+ "socket_count": "2",
+ "speed": "2597.070",
+ "stepping": "2",
+ "vendor": "GenuineIntel"
+ },
+ "crash_dump_sr": "OpaqueRef:0b984cec-a36c-ce84-7b34-9f0088352d55",
+ "crashdumps": [],
+ "current_operations": {},
+ "display": "enabled",
+ "edition": "free",
+ "enabled": true,
+ "external_auth_configuration": {},
+ "external_auth_service_name": "",
+ "external_auth_type": "",
+ "features": [],
+ "guest_VCPUs_params": {},
+ "ha_network_peers": [],
+ "ha_statefiles": [],
+ "host_CPUs": [
+ "OpaqueRef:ec3ba9c4-9b57-236b-3eaa-b157affc1621",
+ "OpaqueRef:e6de7ab3-f4ad-f271-e51b-e3d8c041d3fb",
+ "OpaqueRef:e519ef88-bf41-86ac-16b3-c178cb4b78b1",
+ "OpaqueRef:e48f1bc1-98ba-89e5-ab69-821c625f7f82",
+ "OpaqueRef:e2659936-3de6-dbca-cc44-4af50960b2b7",
+ "OpaqueRef:d0da1e31-20ac-4aff-8897-e80df8200648",
+ "OpaqueRef:cec473ba-41a8-439d-b397-be0c60467b5d",
+ "OpaqueRef:ce88014d-b06c-c959-0624-04d79b791885",
+ "OpaqueRef:c656ca58-41fe-3689-d322-174aa5798beb",
+ "OpaqueRef:c0a21f14-8f46-19de-1cf4-530a34c4aa17",
+ "OpaqueRef:bf70c061-7b45-0497-7ef6-65a236e898e8",
+ "OpaqueRef:b7a2ba0f-f11b-3633-ad47-4f5f76a600a8",
+ "OpaqueRef:b4fef1fa-3aae-9790-f47e-6a17f645339c",
+ "OpaqueRef:b4594721-f8f4-4475-61c5-4efeec1733f1",
+ "OpaqueRef:9dcba36f-c29f-478f-f578-d1ea347410a6",
+ "OpaqueRef:987897e8-1184-917e-6a5f-e205d0c739e5",
+ "OpaqueRef:90f06d64-be18-7fdf-36ba-bbd696a26cf3",
+ "OpaqueRef:90150bc1-e604-4cd4-35ad-9cfa8e985de3",
+ "OpaqueRef:838f4ad4-8ad2-0d6c-a74e-26baa461de3d",
+ "OpaqueRef:736fb523-d347-e8c0-089b-c9811d3c1195",
+ "OpaqueRef:7137b479-87d4-9097-a684-e54cc4de5d09",
+ "OpaqueRef:6e08fa1d-7d7b-d9be-1574-ffe95bd515fd",
+ "OpaqueRef:6b9e6ecd-54e5-4248-5aea-ee5b99248818",
+ "OpaqueRef:65d56b24-3445-b444-5125-c91e6966fd29",
+ "OpaqueRef:60908eca-1e5c-c938-5b76-e8ff9d8899ab",
+ "OpaqueRef:46e96878-c076-2164-2373-6cdd108c2436",
+ "OpaqueRef:40ccdaf4-6008-2b83-92cb-ca197f73433f",
+ "OpaqueRef:3bc8133a-ccb2-6790-152f-b3f577517751",
+ "OpaqueRef:38c8edd8-0621-76de-53f6-86bef2a9e05c",
+ "OpaqueRef:342c1bab-a211-a0eb-79a5-780bd5ad1f23",
+ "OpaqueRef:1e20e6d0-5502-0dff-4f17-5d35eb833af1",
+ "OpaqueRef:176baafa-0e63-7000-f754-25e2a6b74959",
+ "OpaqueRef:16cab1a2-0111-b2af-6dfe-3724b79e6b6b",
+ "OpaqueRef:0f213647-8362-9c5e-e99b-0ebaefc609ce",
+ "OpaqueRef:0e019819-b41f-0bfb-d4ee-dd5484fea9b6",
+ "OpaqueRef:0d39212f-82ba-190c-b304-19b3fa491fff",
+ "OpaqueRef:087ce3ad-3b66-ae1e-3130-3ae640dcc638",
+ "OpaqueRef:0730f24c-87ed-8296-8f14-3036e5ad2357",
+ "OpaqueRef:04c27426-4895-39a7-9ade-ef33d3721c26",
+ "OpaqueRef:017b27bf-0270-19e7-049a-5a9b3bb54898"
+ ],
+ "hostname": "ansible-test-host-2",
+ "license_params": {
+ "address1": "",
+ "address2": "",
+ "city": "",
+ "company": "",
+ "country": "",
+ "enable_xha": "true",
+ "expiry": "20291231T23:00:00Z",
+ "grace": "no",
+ "license_type": "",
+ "name": "",
+ "platform_filter": "false",
+ "postalcode": "",
+ "productcode": "",
+ "regular_nag_dialog": "false",
+ "restrict_ad": "false",
+ "restrict_batch_hotfix_apply": "true",
+ "restrict_checkpoint": "false",
+ "restrict_cifs": "true",
+ "restrict_connection": "false",
+ "restrict_cpu_masking": "false",
+ "restrict_dmc": "false",
+ "restrict_dr": "false",
+ "restrict_email_alerting": "false",
+ "restrict_equalogic": "false",
+ "restrict_export_resource_data": "true",
+ "restrict_gpu": "false",
+ "restrict_guest_agent_auto_update": "true",
+ "restrict_guest_ip_setting": "false",
+ "restrict_health_check": "false",
+ "restrict_historical_performance": "false",
+ "restrict_hotfix_apply": "false",
+ "restrict_integrated_gpu_passthrough": "false",
+ "restrict_intellicache": "false",
+ "restrict_lab": "false",
+ "restrict_live_patching": "true",
+ "restrict_marathon": "false",
+ "restrict_nested_virt": "true",
+ "restrict_netapp": "false",
+ "restrict_pci_device_for_auto_update": "true",
+ "restrict_pool_attached_storage": "false",
+ "restrict_pooling": "false",
+ "restrict_pvs_proxy": "true",
+ "restrict_qos": "false",
+ "restrict_rbac": "false",
+ "restrict_read_caching": "true",
+ "restrict_set_vcpus_number_live": "true",
+ "restrict_ssl_legacy_switch": "false",
+ "restrict_stage": "false",
+ "restrict_storage_xen_motion": "false",
+ "restrict_storagelink": "false",
+ "restrict_storagelink_site_recovery": "false",
+ "restrict_vgpu": "true",
+ "restrict_vif_locking": "false",
+ "restrict_vlan": "false",
+ "restrict_vm_memory_introspection": "true",
+ "restrict_vmpr": "false",
+ "restrict_vmss": "false",
+ "restrict_vss": "false",
+ "restrict_vswitch_controller": "false",
+ "restrict_web_selfservice": "true",
+ "restrict_web_selfservice_manager": "true",
+ "restrict_wlb": "true",
+ "restrict_xcm": "true",
+ "restrict_xen_motion": "false",
+ "serialnumber": "",
+ "sku_marketing_name": "Citrix XenServer",
+ "sku_type": "free",
+ "sockets": "2",
+ "state": "",
+ "version": ""
+ },
+ "license_server": {
+ "address": "localhost",
+ "port": "27000"
+ },
+ "local_cache_sr": "OpaqueRef:0b984cec-a36c-ce84-7b34-9f0088352d55",
+ "logging": {},
+ "memory_overhead": "4865126400",
+ "metrics": "OpaqueRef:f55653cb-92eb-8257-f2ee-7a2d1c2d6aef",
+ "name_description": "",
+ "name_label": "ansible-test-host-2",
+ "other_config": {
+ "agent_start_time": "1532019582.",
+ "boot_time": "1528986759.",
+ "iscsi_iqn": "iqn.2018-06.com.example:87b7637d",
+ "last_blob_sync_time": "1547394065.41",
+ "multipathhandle": "dmp",
+ "multipathing": "true"
+ },
+ "patches": [
+ "OpaqueRef:f5bd18b6-1423-893a-5d7f-7095338e6a2d",
+ "OpaqueRef:eecb0b95-87fb-a53e-651c-9741efd18bb6",
+ "OpaqueRef:e92c9ef3-2e51-1a36-d400-9e237982b782",
+ "OpaqueRef:cc98226c-2c08-799e-5f15-7761a398e4a0",
+ "OpaqueRef:c4f35e66-d064-55a7-6946-7f4b145275a6",
+ "OpaqueRef:c3794494-f894-6141-b811-f37a8fe60094",
+ "OpaqueRef:bcf61af7-63a9-e430-5b7c-a740ba470596",
+ "OpaqueRef:b58ac71e-797e-6f66-71ad-fe298c94fd10",
+ "OpaqueRef:a2ea18fd-5343-f8db-718d-f059c2a8cce0",
+ "OpaqueRef:929db459-6861-c588-158f-70f763331d6d",
+ "OpaqueRef:92962d94-2205-f6e1-12f9-b55a99fd824d",
+ "OpaqueRef:65dfb07a-f90d-dad9-9ab8-1cc2b1e79afb",
+ "OpaqueRef:537a87c4-3bf4-969f-f06a-2dd8d3a018a2",
+ "OpaqueRef:32dd1de3-c9c8-bcbb-27a0-83d4a930876d",
+ "OpaqueRef:30a8ccc8-74a9-b31f-0403-66b117e281b6",
+ "OpaqueRef:24545c44-ffd1-8a28-18c6-3d008bf4d63e",
+ "OpaqueRef:1fcef81b-7c44-a4db-f59a-c4a147da9c49",
+ "OpaqueRef:1e98a240-514b-1863-5518-c771d0ebf579",
+ "OpaqueRef:1632cab2-b268-6ce8-4f7b-ce7fd4bfa1eb"
+ ],
+ "power_on_config": {},
+ "power_on_mode": "",
+ "resident_VMs": [],
+ "sched_policy": "credit",
+ "software_version": {
+ "build_number": "release/falcon/master/8",
+ "date": "2017-05-11",
+ "db_schema": "5.120",
+ "dbv": "2017.0517",
+ "hostname": "f7d02093adae",
+ "linux": "4.4.0+10",
+ "network_backend": "openvswitch",
+ "platform_name": "XCP",
+ "platform_version": "2.3.0",
+ "product_brand": "XenServer",
+ "product_version": "7.2.0",
+ "product_version_text": "7.2",
+ "product_version_text_short": "7.2",
+ "xapi": "1.9",
+ "xen": "4.7.5-2.12",
+ "xencenter_max": "2.7",
+ "xencenter_min": "2.7"
+ },
+ "ssl_legacy": true,
+ "supported_bootloaders": [
+ "pygrub",
+ "eliloader"
+ ],
+ "suspend_image_sr": "OpaqueRef:0b984cec-a36c-ce84-7b34-9f0088352d55",
+ "tags": [],
+ "updates": [
+ "OpaqueRef:7b4b5da1-54af-d0c4-3fea-394b4257bffe",
+ "OpaqueRef:fbaabbfe-88d5-d89b-5b3f-d6374601ca71",
+ "OpaqueRef:507ee5fc-59d3-e635-21d5-98a5cace4bf2",
+ "OpaqueRef:6c9b814c-e1c2-b8be-198f-de358686b10a",
+ "OpaqueRef:a17e721d-faf4-6ad1-c617-dd4899279534",
+ "OpaqueRef:6ac77a0f-f079-8067-85cc-c9ae2f8dcca9",
+ "OpaqueRef:f61edc83-91d9-a161-113f-00c110196238",
+ "OpaqueRef:b71938bf-4c4f-eb17-7e78-588e71297a74",
+ "OpaqueRef:01befb95-412e-e9dd-5b5d-edd50df61cb1",
+ "OpaqueRef:a3f9481e-fe3d-1f00-235f-44d404f51128",
+ "OpaqueRef:0760c608-b02e-743a-18a1-fa8f205374d6",
+ "OpaqueRef:204558d7-dce0-2304-bdc5-80ec5fd7e3c3",
+ "OpaqueRef:9eccc765-9726-d220-96b1-2e85adf77ecc",
+ "OpaqueRef:91cfa47b-52f9-a4e3-4e78-52e3eb3e5141",
+ "OpaqueRef:3fffd7c7-f4d1-6b03-a5b8-d75211bb7b8f",
+ "OpaqueRef:7efce157-9b93-d116-f3f8-7eb0c6fb1a79",
+ "OpaqueRef:e2209ae9-5362-3a20-f691-9294144e49f2",
+ "OpaqueRef:1ced32ca-fec4-8b44-0e8f-753c97f2d93f",
+ "OpaqueRef:65b14ae7-f440-0c4d-4af9-c7946b90fd2f"
+ ],
+ "updates_requiring_reboot": [],
+ "uuid": "dff6702e-bcb6-4704-8dd4-952e8c883365",
+ "virtual_hardware_platform_versions": [
+ "0",
+ "1",
+ "2"
+ ]
+ }
+ },
+ "network": {
+ "OpaqueRef:8a404c5e-5673-ab69-5d6f-5a35a33b8724": {
+ "MTU": "1500",
+ "PIFs": [],
+ "VIFs": [],
+ "allowed_operations": [],
+ "assigned_ips": {
+ "OpaqueRef:8171dad1-f902-ec00-7ba2-9f92d8aa75ab": "169.254.0.3",
+ "OpaqueRef:9754a0ed-e100-d224-6a70-a55a9c2cedf9": "169.254.0.2"
+ },
+ "blobs": {},
+ "bridge": "xenapi",
+ "current_operations": {},
+ "default_locking_mode": "unlocked",
+ "managed": true,
+ "name_description": "Network on which guests will be assigned a private link-local IP address which can be used to talk XenAPI",
+ "name_label": "Host internal management network",
+ "other_config": {
+ "ip_begin": "169.254.0.1",
+ "ip_end": "169.254.255.254",
+ "is_guest_installer_network": "true",
+ "is_host_internal_management_network": "true",
+ "netmask": "255.255.0.0"
+ },
+ "tags": [],
+ "uuid": "dbb96525-944f-0d1a-54ed-e65cb6d07450"
+ }
+ }
+}
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-3-facts.json b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-3-facts.json
new file mode 100644
index 00000000..5ed7df7f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-3-facts.json
@@ -0,0 +1,75 @@
+{
+ "cdrom": {
+ "type": "none"
+ },
+ "customization_agent": "custom",
+ "disks": [
+ {
+ "name": "ansible-test-vm-3-root",
+ "name_desc": "/",
+ "os_device": "xvda",
+ "size": 8589934592,
+ "sr": "Ansible Test Storage 1",
+ "sr_uuid": "767b30e4-f8db-a83d-8ba7-f5e6e732e06f",
+ "vbd_userdevice": "0"
+ }
+ ],
+ "domid": "-1",
+ "folder": "",
+ "hardware": {
+ "memory_mb": 1024,
+ "num_cpu_cores_per_socket": 1,
+ "num_cpus": 1
+ },
+ "home_server": "",
+ "is_template": false,
+ "name": "ansible-test-vm-3",
+ "name_desc": "Created by Ansible",
+ "networks": [
+ {
+ "gateway": "",
+ "gateway6": "",
+ "ip": "169.254.0.3",
+ "ip6": [],
+ "mac": "72:fb:c7:ac:b9:97",
+ "mtu": "1500",
+ "name": "Host internal management network",
+ "netmask": "",
+ "prefix": "",
+ "prefix6": "",
+ "vif_device": "0"
+ }
+ ],
+ "other_config": {
+ "auto_poweron": "true",
+ "base_template_name": "zatemplate",
+ "import_task": "OpaqueRef:9948fd82-6d79-8882-2f01-4edc8795e361",
+ "install-methods": "cdrom,nfs,http,ftp",
+ "install-repository": "http://mirror.centos.org/centos-6/6.2/os/x86_64/",
+ "instant": "true",
+ "last_shutdown_action": "Destroy",
+ "last_shutdown_initiator": "external",
+ "last_shutdown_reason": "halted",
+ "last_shutdown_time": "20140314T21:16:41Z",
+ "linux_template": "true",
+ "mac_seed": "06e27068-70c2-4c69-614b-7c54b5a4a781",
+ "rhel6": "true"
+ },
+ "platform": {
+ "acpi": "true",
+ "apic": "true",
+ "cores-per-socket": "1",
+ "nx": "false",
+ "pae": "true",
+ "viridian": "true"
+ },
+ "state": "poweredoff",
+ "uuid": "8f5bc97c-42fa-d619-aba4-d25eced735e0",
+ "xenstore_data": {
+ "vm-data": "",
+ "vm-data/networks": "",
+ "vm-data/networks/0": "",
+ "vm-data/networks/0/mac": "72:fb:c7:ac:b9:97",
+ "vm-data/networks/0/name": "Host internal management network"
+ }
+}
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-3-params.json b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-3-params.json
new file mode 100644
index 00000000..02e224bf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-3-params.json
@@ -0,0 +1,420 @@
+{
+ "SR": {
+ "OpaqueRef:f746e964-e0fe-c36d-d60b-6897cfde583f": {
+ "PBDs": [],
+ "VDIs": [],
+ "allowed_operations": [
+ "unplug",
+ "plug",
+ "pbd_create",
+ "update",
+ "pbd_destroy",
+ "vdi_resize",
+ "vdi_clone",
+ "scan",
+ "vdi_snapshot",
+ "vdi_mirror",
+ "vdi_create",
+ "vdi_destroy"
+ ],
+ "blobs": {},
+ "clustered": false,
+ "content_type": "",
+ "current_operations": {},
+ "introduced_by": "OpaqueRef:NULL",
+ "is_tools_sr": false,
+ "local_cache_enabled": false,
+ "name_description": "",
+ "name_label": "Ansible Test Storage 1",
+ "other_config": {
+ "auto-scan": "false"
+ },
+ "physical_size": "2521133219840",
+ "physical_utilisation": "1551485632512",
+ "shared": true,
+ "sm_config": {
+ "allocation": "thick",
+ "devserial": "scsi-3600a098038302d353624495242443848",
+ "multipathable": "true",
+ "use_vhd": "true"
+ },
+ "tags": [],
+ "type": "lvmohba",
+ "uuid": "767b30e4-f8db-a83d-8ba7-f5e6e732e06f",
+ "virtual_allocation": "1556925644800"
+ }
+ },
+ "VBD": {
+ "OpaqueRef:024b722e-8d0f-65e6-359e-f301a009b683": {
+ "VDI": "OpaqueRef:NULL",
+ "VM": "OpaqueRef:957f576a-2347-1789-80db-4beb50466bc2",
+ "allowed_operations": [
+ "attach",
+ "insert"
+ ],
+ "bootable": false,
+ "current_operations": {},
+ "currently_attached": false,
+ "device": "",
+ "empty": true,
+ "metrics": "OpaqueRef:81509584-b22f-bc71-3c4e-e6c3bdca71f0",
+ "mode": "RO",
+ "other_config": {},
+ "qos_algorithm_params": {},
+ "qos_algorithm_type": "",
+ "qos_supported_algorithms": [],
+ "runtime_properties": {},
+ "status_code": "0",
+ "status_detail": "",
+ "storage_lock": false,
+ "type": "CD",
+ "unpluggable": true,
+ "userdevice": "3",
+ "uuid": "38d850d0-c402-490e-6b97-1d23558c4e0e"
+ },
+ "OpaqueRef:235f4f04-1dc9-9fa5-c229-a1df187ba48c": {
+ "VDI": "OpaqueRef:4d3e9fc7-ae61-b312-e0a8-b53bee06282e",
+ "VM": "OpaqueRef:957f576a-2347-1789-80db-4beb50466bc2",
+ "allowed_operations": [
+ "attach"
+ ],
+ "bootable": true,
+ "current_operations": {},
+ "currently_attached": false,
+ "device": "xvda",
+ "empty": false,
+ "metrics": "OpaqueRef:529f6071-5627-28c5-1f41-ee8c0733f1da",
+ "mode": "RW",
+ "other_config": {
+ "owner": ""
+ },
+ "qos_algorithm_params": {},
+ "qos_algorithm_type": "",
+ "qos_supported_algorithms": [],
+ "runtime_properties": {},
+ "status_code": "0",
+ "status_detail": "",
+ "storage_lock": false,
+ "type": "Disk",
+ "unpluggable": false,
+ "userdevice": "0",
+ "uuid": "3fd7d35c-cb9d-f0c4-726b-e188ef0dc446"
+ }
+ },
+ "VDI": {
+ "OpaqueRef:4d3e9fc7-ae61-b312-e0a8-b53bee06282e": {
+ "SR": "OpaqueRef:f746e964-e0fe-c36d-d60b-6897cfde583f",
+ "VBDs": [
+ "OpaqueRef:235f4f04-1dc9-9fa5-c229-a1df187ba48c"
+ ],
+ "allow_caching": false,
+ "allowed_operations": [
+ "forget",
+ "generate_config",
+ "update",
+ "resize",
+ "destroy",
+ "clone",
+ "copy",
+ "snapshot"
+ ],
+ "crash_dumps": [],
+ "current_operations": {},
+ "is_a_snapshot": false,
+ "is_tools_iso": false,
+ "location": "bdd0baeb-5447-4963-9e71-a5ff6e85fa59",
+ "managed": true,
+ "metadata_latest": false,
+ "metadata_of_pool": "",
+ "missing": false,
+ "name_description": "/",
+ "name_label": "ansible-test-vm-3-root",
+ "on_boot": "persist",
+ "other_config": {
+ "content_id": "cd8e8b2b-f158-c519-02f0-81d130fe83c5"
+ },
+ "parent": "OpaqueRef:NULL",
+ "physical_utilisation": "8615100416",
+ "read_only": false,
+ "sharable": false,
+ "sm_config": {
+ "vdi_type": "vhd"
+ },
+ "snapshot_of": "OpaqueRef:NULL",
+ "snapshot_time": "19700101T00:00:00Z",
+ "snapshots": [],
+ "storage_lock": false,
+ "tags": [],
+ "type": "system",
+ "uuid": "bdd0baeb-5447-4963-9e71-a5ff6e85fa59",
+ "virtual_size": "8589934592",
+ "xenstore_data": {}
+ }
+ },
+ "VIF": {
+ "OpaqueRef:8171dad1-f902-ec00-7ba2-9f92d8aa75ab": {
+ "MAC": "72:fb:c7:ac:b9:97",
+ "MAC_autogenerated": true,
+ "MTU": "1500",
+ "VM": "OpaqueRef:957f576a-2347-1789-80db-4beb50466bc2",
+ "allowed_operations": [
+ "attach"
+ ],
+ "current_operations": {},
+ "currently_attached": false,
+ "device": "0",
+ "ipv4_addresses": [],
+ "ipv4_allowed": [],
+ "ipv4_configuration_mode": "None",
+ "ipv4_gateway": "",
+ "ipv6_addresses": [],
+ "ipv6_allowed": [],
+ "ipv6_configuration_mode": "None",
+ "ipv6_gateway": "",
+ "locking_mode": "network_default",
+ "metrics": "OpaqueRef:e5b53fb1-3e99-4bf5-6b00-95fdba1f2610",
+ "network": "OpaqueRef:8a404c5e-5673-ab69-5d6f-5a35a33b8724",
+ "other_config": {},
+ "qos_algorithm_params": {},
+ "qos_algorithm_type": "",
+ "qos_supported_algorithms": [],
+ "runtime_properties": {},
+ "status_code": "0",
+ "status_detail": "",
+ "uuid": "94bd4913-4940-437c-a1c3-50f7eb354c55"
+ }
+ },
+ "VM": {
+ "OpaqueRef:957f576a-2347-1789-80db-4beb50466bc2": {
+ "HVM_boot_params": {
+ "order": ""
+ },
+ "HVM_boot_policy": "",
+ "HVM_shadow_multiplier": 1.0,
+ "PCI_bus": "",
+ "PV_args": "graphical utf8",
+ "PV_bootloader": "pygrub",
+ "PV_bootloader_args": "",
+ "PV_kernel": "",
+ "PV_legacy_args": "",
+ "PV_ramdisk": "",
+ "VBDs": [
+ "OpaqueRef:235f4f04-1dc9-9fa5-c229-a1df187ba48c",
+ "OpaqueRef:024b722e-8d0f-65e6-359e-f301a009b683"
+ ],
+ "VCPUs_at_startup": "1",
+ "VCPUs_max": "1",
+ "VCPUs_params": {},
+ "VGPUs": [],
+ "VIFs": [
+ "OpaqueRef:8171dad1-f902-ec00-7ba2-9f92d8aa75ab"
+ ],
+ "VTPMs": [],
+ "actions_after_crash": "restart",
+ "actions_after_reboot": "restart",
+ "actions_after_shutdown": "destroy",
+ "affinity": "OpaqueRef:NULL",
+ "allowed_operations": [
+ "changing_dynamic_range",
+ "changing_shadow_memory",
+ "changing_static_range",
+ "make_into_template",
+ "migrate_send",
+ "destroy",
+ "export",
+ "start_on",
+ "start",
+ "clone",
+ "copy",
+ "snapshot"
+ ],
+ "appliance": "OpaqueRef:NULL",
+ "attached_PCIs": [],
+ "bios_strings": {
+ "bios-vendor": "Xen",
+ "bios-version": "",
+ "hp-rombios": "",
+ "oem-1": "Xen",
+ "oem-2": "MS_VM_CERT/SHA1/bdbeb6e0a816d43fa6d3fe8aaef04c2bad9d3e3d",
+ "system-manufacturer": "Xen",
+ "system-product-name": "HVM domU",
+ "system-serial-number": "",
+ "system-version": ""
+ },
+ "blobs": {},
+ "blocked_operations": {},
+ "children": [],
+ "consoles": [],
+ "crash_dumps": [],
+ "current_operations": {},
+ "domarch": "",
+ "domid": "-1",
+ "generation_id": "",
+ "guest_metrics": "OpaqueRef:6a8acd85-4cab-4e52-27d5-5f4a51c1bf69",
+ "ha_always_run": false,
+ "ha_restart_priority": "",
+ "hardware_platform_version": "0",
+ "has_vendor_device": false,
+ "is_a_snapshot": false,
+ "is_a_template": false,
+ "is_control_domain": false,
+ "is_default_template": false,
+ "is_snapshot_from_vmpp": false,
+ "is_vmss_snapshot": false,
+ "last_boot_CPU_flags": {
+ "features": "17c9cbf5-f6f83203-2191cbf5-00000023-00000001-00000329-00000000-00000000-00001000-0c000000",
+ "vendor": "GenuineIntel"
+ },
+ "last_booted_record": "",
+ "memory_dynamic_max": "1073741824",
+ "memory_dynamic_min": "1073741824",
+ "memory_overhead": "10485760",
+ "memory_static_max": "1073741824",
+ "memory_static_min": "536870912",
+ "memory_target": "0",
+ "metrics": "OpaqueRef:87fc5829-478b-1dcd-989f-50e8ba58a87d",
+ "name_description": "Created by Ansible",
+ "name_label": "ansible-test-vm-3",
+ "order": "0",
+ "other_config": {
+ "auto_poweron": "true",
+ "base_template_name": "zatemplate",
+ "import_task": "OpaqueRef:9948fd82-6d79-8882-2f01-4edc8795e361",
+ "install-methods": "cdrom,nfs,http,ftp",
+ "install-repository": "http://mirror.centos.org/centos-6/6.2/os/x86_64/",
+ "instant": "true",
+ "last_shutdown_action": "Destroy",
+ "last_shutdown_initiator": "external",
+ "last_shutdown_reason": "halted",
+ "last_shutdown_time": "20140314T21:16:41Z",
+ "linux_template": "true",
+ "mac_seed": "06e27068-70c2-4c69-614b-7c54b5a4a781",
+ "rhel6": "true"
+ },
+ "parent": "OpaqueRef:NULL",
+ "platform": {
+ "acpi": "true",
+ "apic": "true",
+ "cores-per-socket": "1",
+ "nx": "false",
+ "pae": "true",
+ "viridian": "true"
+ },
+ "power_state": "Halted",
+ "protection_policy": "OpaqueRef:NULL",
+ "recommendations": "<restrictions><restriction field=\"memory-static-max\" max=\"17179869184\" /><restriction field=\"vcpus-max\" max=\"8\" /><restriction property=\"number-of-vbds\" max=\"7\" /><restriction property=\"number-of-vifs\" max=\"7\" /></restrictions>",
+ "reference_label": "",
+ "requires_reboot": false,
+ "resident_on": "OpaqueRef:NULL",
+ "shutdown_delay": "0",
+ "snapshot_info": {},
+ "snapshot_metadata": "",
+ "snapshot_of": "OpaqueRef:NULL",
+ "snapshot_schedule": "OpaqueRef:NULL",
+ "snapshot_time": "19700101T00:00:00Z",
+ "snapshots": [],
+ "start_delay": "0",
+ "suspend_SR": "OpaqueRef:NULL",
+ "suspend_VDI": "OpaqueRef:NULL",
+ "tags": [
+ "web-frontend"
+ ],
+ "transportable_snapshot_id": "",
+ "user_version": "1",
+ "uuid": "8f5bc97c-42fa-d619-aba4-d25eced735e0",
+ "version": "0",
+ "xenstore_data": {
+ "vm-data": "",
+ "vm-data/networks": "",
+ "vm-data/networks/0": "",
+ "vm-data/networks/0/mac": "72:fb:c7:ac:b9:97",
+ "vm-data/networks/0/name": "Host internal management network"
+ }
+ }
+ },
+ "VM_guest_metrics": {
+ "OpaqueRef:6a8acd85-4cab-4e52-27d5-5f4a51c1bf69": {
+ "PV_drivers_detected": true,
+ "PV_drivers_up_to_date": true,
+ "PV_drivers_version": {
+ "build": "46676",
+ "major": "5",
+ "micro": "100",
+ "minor": "6"
+ },
+ "can_use_hotplug_vbd": "unspecified",
+ "can_use_hotplug_vif": "unspecified",
+ "disks": {},
+ "last_updated": "20190113T19:36:07Z",
+ "live": true,
+ "memory": {},
+ "networks": {
+ "0/ip": "169.254.0.3"
+ },
+ "os_version": {
+ "distro": "centos",
+ "major": "6",
+ "minor": "10",
+ "name": "CentOS release 6.10 (Final)",
+ "uname": "2.6.32-754.6.3.el6.x86_64"
+ },
+ "other": {
+ "feature-balloon": "1",
+ "has-vendor-device": "0",
+ "platform-feature-multiprocessor-suspend": "1"
+ },
+ "other_config": {},
+ "uuid": "3928a6a4-1acd-c134-ed35-eb0ccfaed65c"
+ }
+ },
+ "VM_metrics": {
+ "OpaqueRef:87fc5829-478b-1dcd-989f-50e8ba58a87d": {
+ "VCPUs_CPU": {},
+ "VCPUs_flags": {},
+ "VCPUs_number": "0",
+ "VCPUs_params": {},
+ "VCPUs_utilisation": {
+ "0": 0.0
+ },
+ "hvm": false,
+ "install_time": "20190113T19:35:05Z",
+ "last_updated": "19700101T00:00:00Z",
+ "memory_actual": "1073741824",
+ "nested_virt": false,
+ "nomigrate": false,
+ "other_config": {},
+ "start_time": "19700101T00:00:00Z",
+ "state": [],
+ "uuid": "6cb05fe9-b83e-34c8-29e0-3b793e1da661"
+ }
+ },
+ "host": {},
+ "network": {
+ "OpaqueRef:8a404c5e-5673-ab69-5d6f-5a35a33b8724": {
+ "MTU": "1500",
+ "PIFs": [],
+ "VIFs": [],
+ "allowed_operations": [],
+ "assigned_ips": {
+ "OpaqueRef:8171dad1-f902-ec00-7ba2-9f92d8aa75ab": "169.254.0.3",
+ "OpaqueRef:9754a0ed-e100-d224-6a70-a55a9c2cedf9": "169.254.0.2"
+ },
+ "blobs": {},
+ "bridge": "xenapi",
+ "current_operations": {},
+ "default_locking_mode": "unlocked",
+ "managed": true,
+ "name_description": "Network on which guests will be assigned a private link-local IP address which can be used to talk XenAPI",
+ "name_label": "Host internal management network",
+ "other_config": {
+ "ip_begin": "169.254.0.1",
+ "ip_end": "169.254.255.254",
+ "is_guest_installer_network": "true",
+ "is_host_internal_management_network": "true",
+ "netmask": "255.255.0.0"
+ },
+ "tags": [],
+ "uuid": "dbb96525-944f-0d1a-54ed-e65cb6d07450"
+ }
+ }
+}
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_gather_vm_params_and_facts.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_gather_vm_params_and_facts.py
new file mode 100644
index 00000000..b1020bee
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_gather_vm_params_and_facts.py
@@ -0,0 +1,74 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2019, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import pytest
+
+from .common import testcase_bad_xenapi_refs
+
+
+testcase_gather_vm_params_and_facts = {
+ "params": [
+ ["ansible-test-vm-1-params.json", "ansible-test-vm-1-facts.json"],
+ ["ansible-test-vm-2-params.json", "ansible-test-vm-2-facts.json"],
+ ["ansible-test-vm-3-params.json", "ansible-test-vm-3-facts.json"],
+ ],
+ "ids": [
+ "ansible-test-vm-1",
+ "ansible-test-vm-2",
+ "ansible-test-vm-3",
+ ],
+}
+
+
+@pytest.mark.parametrize('vm_ref', testcase_bad_xenapi_refs['params'], ids=testcase_bad_xenapi_refs['ids'])
+def test_gather_vm_params_bad_vm_ref(fake_ansible_module, xenserver, vm_ref):
+ """Tests return of empty dict on bad vm_ref."""
+ assert xenserver.gather_vm_params(fake_ansible_module, vm_ref) == {}
+
+
+def test_gather_vm_facts_no_vm_params(fake_ansible_module, xenserver):
+ """Tests return of empty facts dict when vm_params is not available"""
+ assert xenserver.gather_vm_facts(fake_ansible_module, None) == {}
+ assert xenserver.gather_vm_facts(fake_ansible_module, {}) == {}
+
+
+@pytest.mark.parametrize('fixture_data_from_file',
+ testcase_gather_vm_params_and_facts['params'],
+ ids=testcase_gather_vm_params_and_facts['ids'],
+ indirect=True)
+def test_gather_vm_params_and_facts(mocker, fake_ansible_module, XenAPI, xenserver, fixture_data_from_file):
+ """Tests proper parsing of VM parameters and facts."""
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
+
+ if "params" in list(fixture_data_from_file.keys())[0]:
+ params_file = list(fixture_data_from_file.keys())[0]
+ facts_file = list(fixture_data_from_file.keys())[1]
+ else:
+ params_file = list(fixture_data_from_file.keys())[1]
+ facts_file = list(fixture_data_from_file.keys())[0]
+
+ mocked_returns = {
+ "VM.get_record.side_effect": lambda obj_ref: fixture_data_from_file[params_file]['VM'][obj_ref],
+ "VM_metrics.get_record.side_effect": lambda obj_ref: fixture_data_from_file[params_file]['VM_metrics'][obj_ref],
+ "VM_guest_metrics.get_record.side_effect": lambda obj_ref: fixture_data_from_file[params_file]['VM_guest_metrics'][obj_ref],
+ "VBD.get_record.side_effect": lambda obj_ref: fixture_data_from_file[params_file]['VBD'][obj_ref],
+ "VDI.get_record.side_effect": lambda obj_ref: fixture_data_from_file[params_file]['VDI'][obj_ref],
+ "SR.get_record.side_effect": lambda obj_ref: fixture_data_from_file[params_file]['SR'][obj_ref],
+ "VIF.get_record.side_effect": lambda obj_ref: fixture_data_from_file[params_file]['VIF'][obj_ref],
+ "network.get_record.side_effect": lambda obj_ref: fixture_data_from_file[params_file]['network'][obj_ref],
+ "host.get_record.side_effect": lambda obj_ref: fixture_data_from_file[params_file]['host'][obj_ref],
+ }
+
+ mocked_xenapi.configure_mock(**mocked_returns)
+
+ mocker.patch('ansible_collections.community.general.plugins.module_utils.xenserver.get_xenserver_version', return_value=[7, 2, 0])
+
+ vm_ref = list(fixture_data_from_file[params_file]['VM'].keys())[0]
+
+ assert xenserver.gather_vm_facts(fake_ansible_module, xenserver.gather_vm_params(fake_ansible_module, vm_ref)) == fixture_data_from_file[facts_file]
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_get_object_ref.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_get_object_ref.py
new file mode 100644
index 00000000..0fe7a7c1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_get_object_ref.py
@@ -0,0 +1,73 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2019, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import pytest
+
+from .FakeAnsibleModule import FailJsonException
+from .common import fake_xenapi_ref
+
+
+def test_get_object_ref_xenapi_failure(mocker, fake_ansible_module, XenAPI, xenserver):
+ """Tests catching of XenAPI failures."""
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi_request', side_effect=XenAPI.Failure('Fake XAPI method call error!'))
+
+ with pytest.raises(FailJsonException) as exc_info:
+ xenserver.get_object_ref(fake_ansible_module, "name")
+
+ assert exc_info.value.kwargs['msg'] == "XAPI ERROR: Fake XAPI method call error!"
+
+
+def test_get_object_ref_bad_uuid_and_name(mocker, fake_ansible_module, XenAPI, xenserver):
+ """Tests failure on bad object uuid and/or name."""
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi_request')
+
+ with pytest.raises(FailJsonException) as exc_info:
+ xenserver.get_object_ref(fake_ansible_module, None, msg_prefix="Test: ")
+
+ mocked_xenapi.xenapi_request.assert_not_called()
+ assert exc_info.value.kwargs['msg'] == "Test: no valid name or UUID supplied for VM!"
+
+
+def test_get_object_ref_uuid_not_found(mocker, fake_ansible_module, XenAPI, xenserver):
+ """Tests when object is not found by uuid."""
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi_request', side_effect=XenAPI.Failure('Fake XAPI not found error!'))
+
+ with pytest.raises(FailJsonException) as exc_info:
+ xenserver.get_object_ref(fake_ansible_module, "name", uuid="fake-uuid", msg_prefix="Test: ")
+
+ assert exc_info.value.kwargs['msg'] == "Test: VM with UUID 'fake-uuid' not found!"
+ assert xenserver.get_object_ref(fake_ansible_module, "name", uuid="fake-uuid", fail=False, msg_prefix="Test: ") is None
+
+
+def test_get_object_ref_name_not_found(mocker, fake_ansible_module, XenAPI, xenserver):
+ """Tests when object is not found by name."""
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi_request', return_value=[])
+
+ with pytest.raises(FailJsonException) as exc_info:
+ xenserver.get_object_ref(fake_ansible_module, "name", msg_prefix="Test: ")
+
+ assert exc_info.value.kwargs['msg'] == "Test: VM with name 'name' not found!"
+ assert xenserver.get_object_ref(fake_ansible_module, "name", fail=False, msg_prefix="Test: ") is None
+
+
+def test_get_object_ref_name_multiple_found(mocker, fake_ansible_module, XenAPI, xenserver):
+ """Tests when multiple objects are found by name."""
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi_request', return_value=[fake_xenapi_ref('VM'), fake_xenapi_ref('VM')])
+
+ error_msg = "Test: multiple VMs with name 'name' found! Please use UUID."
+
+ with pytest.raises(FailJsonException) as exc_info:
+ xenserver.get_object_ref(fake_ansible_module, "name", msg_prefix="Test: ")
+
+ assert exc_info.value.kwargs['msg'] == error_msg
+
+ with pytest.raises(FailJsonException) as exc_info:
+ xenserver.get_object_ref(fake_ansible_module, "name", fail=False, msg_prefix="Test: ")
+
+ assert exc_info.value.kwargs['msg'] == error_msg
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_misc.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_misc.py
new file mode 100644
index 00000000..3fad0ee5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_misc.py
@@ -0,0 +1,17 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2019, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def test_xapi_to_module_vm_power_state_bad_power_state(xenserver):
+ """Tests that None is returned on bad power state."""
+ assert xenserver.xapi_to_module_vm_power_state("bad") is None
+
+
+def test_module_to_xapi_vm_power_state_bad_power_state(xenserver):
+ """Tests that None is returned on bad power state."""
+ assert xenserver.module_to_xapi_vm_power_state("bad") is None
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_netaddr_functions.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_netaddr_functions.py
new file mode 100644
index 00000000..d4b80f47
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_netaddr_functions.py
@@ -0,0 +1,182 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2019, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import pytest
+
+from ansible.module_utils.common.network import is_mac
+
+testcase_is_valid_mac_addr = [
+ ('A4-23-8D-F8-C9-E5', True),
+ ('35:71:F4:11:0B:D8', True),
+ ('b3-bd-20-59-0c-cf', True),
+ ('32:61:ca:65:f1:f4', True),
+ ('asdf', False),
+ ('A4-23-8D-G8-C9-E5', False),
+ ('A4-3-8D-F8-C9-E5', False),
+ ('A4-23-88D-F8-C9-E5', False),
+ ('A4-23-8D-F8-C9_E5', False),
+ ('A4-23--8D-F8-C9-E5', False),
+]
+
+testcase_is_valid_ip_addr = [
+ ('0.0.0.0', True),
+ ('10.0.0.1', True),
+ ('192.168.0.1', True),
+ ('255.255.255.255', True),
+ ('asdf', False),
+ ('a.b.c.d', False),
+ ('345.345.345.345', False),
+ ('-10.0.0.1', False),
+]
+
+testcase_is_valid_ip_netmask = [
+ ('240.0.0.0', True),
+ ('255.224.0.0', True),
+ ('255.255.248.0', True),
+ ('255.255.255.255', True),
+ ('asdf', False),
+ ('a.b.c.d', False),
+ ('192.168.0.1', False),
+ ('255.0.248.0', False),
+]
+
+testcase_is_valid_ip_prefix = [
+ ('0', True),
+ ('16', True),
+ ('24', True),
+ ('32', True),
+ ('asdf', False),
+ ('-10', False),
+ ('60', False),
+ ('60s', False),
+]
+
+testcase_ip_prefix_to_netmask = {
+ "params": [
+ ('0', '0.0.0.0'),
+ ('8', '255.0.0.0'),
+ ('11', '255.224.0.0'),
+ ('16', '255.255.0.0'),
+ ('21', '255.255.248.0'),
+ ('24', '255.255.255.0'),
+ ('26', '255.255.255.192'),
+ ('32', '255.255.255.255'),
+ ('a', ''),
+ ('60', ''),
+ ],
+ "ids": [
+ '0',
+ '8',
+ '11',
+ '16',
+ '21',
+ '24',
+ '26',
+ '32',
+ 'a',
+ '60',
+ ],
+}
+
+testcase_ip_netmask_to_prefix = {
+ "params": [
+ ('0.0.0.0', '0'),
+ ('255.0.0.0', '8'),
+ ('255.224.0.0', '11'),
+ ('255.255.0.0', '16'),
+ ('255.255.248.0', '21'),
+ ('255.255.255.0', '24'),
+ ('255.255.255.192', '26'),
+ ('255.255.255.255', '32'),
+ ('a', ''),
+ ('60', ''),
+ ],
+ "ids": [
+ '0.0.0.0',
+ '255.0.0.0',
+ '255.224.0.0',
+ '255.255.0.0',
+ '255.255.248.0',
+ '255.255.255.0',
+ '255.255.255.192',
+ '255.255.255.255',
+ 'a',
+ '60',
+ ],
+}
+
+testcase_is_valid_ip6_addr = [
+ ('::1', True),
+ ('2001:DB8:0:0:8:800:200C:417A', True),
+ ('2001:DB8::8:800:200C:417A', True),
+ ('FF01::101', True),
+ ('asdf', False),
+ ('2001:DB8:0:0:8:800:200C:417A:221', False),
+ ('FF01::101::2', False),
+ ('2001:db8:85a3::8a2e:370k:7334', False),
+]
+
+testcase_is_valid_ip6_prefix = [
+ ('0', True),
+ ('56', True),
+ ('78', True),
+ ('128', True),
+ ('asdf', False),
+ ('-10', False),
+ ('345', False),
+ ('60s', False),
+]
+
+
+@pytest.mark.parametrize('mac_addr, result', testcase_is_valid_mac_addr)
+def test_is_valid_mac_addr(xenserver, mac_addr, result):
+ """Tests against examples of valid and invalid mac addresses."""
+ assert is_mac(mac_addr) is result
+
+
+@pytest.mark.parametrize('ip_addr, result', testcase_is_valid_ip_addr)
+def test_is_valid_ip_addr(xenserver, ip_addr, result):
+ """Tests against examples of valid and invalid ip addresses."""
+ assert xenserver.is_valid_ip_addr(ip_addr) is result
+
+
+@pytest.mark.parametrize('ip_netmask, result', testcase_is_valid_ip_netmask)
+def test_is_valid_ip_netmask(xenserver, ip_netmask, result):
+ """Tests against examples of valid and invalid ip netmasks."""
+ assert xenserver.is_valid_ip_netmask(ip_netmask) is result
+
+
+@pytest.mark.parametrize('ip_prefix, result', testcase_is_valid_ip_prefix)
+def test_is_valid_ip_prefix(xenserver, ip_prefix, result):
+ """Tests against examples of valid and invalid ip prefixes."""
+ assert xenserver.is_valid_ip_prefix(ip_prefix) is result
+
+
+@pytest.mark.parametrize('ip_prefix, ip_netmask', testcase_ip_prefix_to_netmask['params'], ids=testcase_ip_prefix_to_netmask['ids'])
+def test_ip_prefix_to_netmask(xenserver, ip_prefix, ip_netmask):
+ """Tests ip prefix to netmask conversion."""
+ assert xenserver.ip_prefix_to_netmask(ip_prefix) == ip_netmask
+
+
+@pytest.mark.parametrize('ip_netmask, ip_prefix', testcase_ip_netmask_to_prefix['params'], ids=testcase_ip_netmask_to_prefix['ids'])
+def test_ip_netmask_to_prefix(xenserver, ip_netmask, ip_prefix):
+ """Tests ip netmask to prefix conversion."""
+ assert xenserver.ip_netmask_to_prefix(ip_netmask) == ip_prefix
+
+
+@pytest.mark.parametrize('ip6_addr, result', testcase_is_valid_ip6_addr)
+def test_is_valid_ip6_addr(xenserver, ip6_addr, result):
+ """Tests against examples of valid and invalid ip6 addresses."""
+ assert xenserver.is_valid_ip6_addr(ip6_addr) is result
+
+
+@pytest.mark.parametrize('ip6_prefix, result', testcase_is_valid_ip6_prefix)
+def test_is_valid_ip6_prefix(xenserver, ip6_prefix, result):
+ """Tests against examples of valid and invalid ip6 prefixes."""
+ assert xenserver.is_valid_ip6_prefix(ip6_prefix) is result
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_set_vm_power_state.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_set_vm_power_state.py
new file mode 100644
index 00000000..a3048f43
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_set_vm_power_state.py
@@ -0,0 +1,413 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2019, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import pytest
+
+from .FakeAnsibleModule import FailJsonException
+from .common import fake_xenapi_ref, testcase_bad_xenapi_refs
+
+
+testcase_set_vm_power_state_bad_transitions = {
+ "params": [
+ ('restarted', 'Halted', "Cannot restart VM in state 'poweredoff'!"),
+ ('restarted', 'Suspended', "Cannot restart VM in state 'suspended'!"),
+ ('suspended', 'Halted', "Cannot suspend VM in state 'poweredoff'!"),
+ ('suspended', 'Paused', "Cannot suspend VM in state 'paused'!"),
+ ('shutdownguest', 'Halted', "Cannot shutdown guest when VM is in state 'poweredoff'!"),
+ ('shutdownguest', 'Suspended', "Cannot shutdown guest when VM is in state 'suspended'!"),
+ ('shutdownguest', 'Paused', "Cannot shutdown guest when VM is in state 'paused'!"),
+ ('rebootguest', 'Halted', "Cannot reboot guest when VM is in state 'poweredoff'!"),
+ ('rebootguest', 'Suspended', "Cannot reboot guest when VM is in state 'suspended'!"),
+ ('rebootguest', 'Paused', "Cannot reboot guest when VM is in state 'paused'!"),
+ ],
+ "ids": [
+ "poweredoff->restarted",
+ "suspended->restarted",
+ "poweredoff->suspended",
+ "paused->suspended",
+ "poweredoff->shutdownguest",
+ "suspended->shutdownguest",
+ "paused->shutdownguest",
+ "poweredoff->rebootguest",
+ "suspended->rebootguest",
+ "paused->rebootguest",
+ ],
+}
+
+testcase_set_vm_power_state_task_timeout = {
+ "params": [
+ ('shutdownguest', "Guest shutdown task failed: 'timeout'!"),
+ ('rebootguest', "Guest reboot task failed: 'timeout'!"),
+ ],
+ "ids": [
+ "shutdownguest-timeout",
+ "rebootguest-timeout",
+ ],
+}
+
+testcase_set_vm_power_state_no_transitions = {
+ "params": [
+ ('poweredon', "Running"),
+ ('Poweredon', "Running"),
+ ('powered-on', "Running"),
+ ('Powered_on', "Running"),
+ ('poweredoff', "Halted"),
+ ('Poweredoff', "Halted"),
+ ('powered-off', "Halted"),
+ ('powered_off', "Halted"),
+ ('suspended', "Suspended"),
+ ('Suspended', "Suspended"),
+ ],
+ "ids": [
+ "poweredon",
+ "poweredon-cap",
+ "poweredon-dash",
+ "poweredon-under",
+ "poweredoff",
+ "poweredoff-cap",
+ "poweredoff-dash",
+ "poweredoff-under",
+ "suspended",
+ "suspended-cap",
+ ],
+}
+
+testcase_set_vm_power_state_transitions = {
+ "params": [
+ ('poweredon', 'Halted', 'running', 'VM.start'),
+ ('Poweredon', 'Halted', 'running', 'VM.start'),
+ ('powered-on', 'Halted', 'running', 'VM.start'),
+ ('Powered_on', 'Halted', 'running', 'VM.start'),
+ ('poweredon', 'Suspended', 'running', 'VM.resume'),
+ ('Poweredon', 'Suspended', 'running', 'VM.resume'),
+ ('powered-on', 'Suspended', 'running', 'VM.resume'),
+ ('Powered_on', 'Suspended', 'running', 'VM.resume'),
+ ('poweredon', 'Paused', 'running', 'VM.unpause'),
+ ('Poweredon', 'Paused', 'running', 'VM.unpause'),
+ ('powered-on', 'Paused', 'running', 'VM.unpause'),
+ ('Powered_on', 'Paused', 'running', 'VM.unpause'),
+ ('poweredoff', 'Running', 'halted', 'VM.hard_shutdown'),
+ ('Poweredoff', 'Running', 'halted', 'VM.hard_shutdown'),
+ ('powered-off', 'Running', 'halted', 'VM.hard_shutdown'),
+ ('powered_off', 'Running', 'halted', 'VM.hard_shutdown'),
+ ('poweredoff', 'Suspended', 'halted', 'VM.hard_shutdown'),
+ ('Poweredoff', 'Suspended', 'halted', 'VM.hard_shutdown'),
+ ('powered-off', 'Suspended', 'halted', 'VM.hard_shutdown'),
+ ('powered_off', 'Suspended', 'halted', 'VM.hard_shutdown'),
+ ('poweredoff', 'Paused', 'halted', 'VM.hard_shutdown'),
+ ('Poweredoff', 'Paused', 'halted', 'VM.hard_shutdown'),
+ ('powered-off', 'Paused', 'halted', 'VM.hard_shutdown'),
+ ('powered_off', 'Paused', 'halted', 'VM.hard_shutdown'),
+ ('restarted', 'Running', 'running', 'VM.hard_reboot'),
+ ('Restarted', 'Running', 'running', 'VM.hard_reboot'),
+ ('restarted', 'Paused', 'running', 'VM.hard_reboot'),
+ ('Restarted', 'Paused', 'running', 'VM.hard_reboot'),
+ ('suspended', 'Running', 'suspended', 'VM.suspend'),
+ ('Suspended', 'Running', 'suspended', 'VM.suspend'),
+ ('shutdownguest', 'Running', 'halted', 'VM.clean_shutdown'),
+ ('Shutdownguest', 'Running', 'halted', 'VM.clean_shutdown'),
+ ('shutdown-guest', 'Running', 'halted', 'VM.clean_shutdown'),
+ ('shutdown_guest', 'Running', 'halted', 'VM.clean_shutdown'),
+ ('rebootguest', 'Running', 'running', 'VM.clean_reboot'),
+ ('rebootguest', 'Running', 'running', 'VM.clean_reboot'),
+ ('reboot-guest', 'Running', 'running', 'VM.clean_reboot'),
+ ('reboot_guest', 'Running', 'running', 'VM.clean_reboot'),
+ ],
+ "ids": [
+ "poweredoff->poweredon",
+ "poweredoff->poweredon-cap",
+ "poweredoff->poweredon-dash",
+ "poweredoff->poweredon-under",
+ "suspended->poweredon",
+ "suspended->poweredon-cap",
+ "suspended->poweredon-dash",
+ "suspended->poweredon-under",
+ "paused->poweredon",
+ "paused->poweredon-cap",
+ "paused->poweredon-dash",
+ "paused->poweredon-under",
+ "poweredon->poweredoff",
+ "poweredon->poweredoff-cap",
+ "poweredon->poweredoff-dash",
+ "poweredon->poweredoff-under",
+ "suspended->poweredoff",
+ "suspended->poweredoff-cap",
+ "suspended->poweredoff-dash",
+ "suspended->poweredoff-under",
+ "paused->poweredoff",
+ "paused->poweredoff-cap",
+ "paused->poweredoff-dash",
+ "paused->poweredoff-under",
+ "poweredon->restarted",
+ "poweredon->restarted-cap",
+ "paused->restarted",
+ "paused->restarted-cap",
+ "poweredon->suspended",
+ "poweredon->suspended-cap",
+ "poweredon->shutdownguest",
+ "poweredon->shutdownguest-cap",
+ "poweredon->shutdownguest-dash",
+ "poweredon->shutdownguest-under",
+ "poweredon->rebootguest",
+ "poweredon->rebootguest-cap",
+ "poweredon->rebootguest-dash",
+ "poweredon->rebootguest-under",
+ ],
+}
+
+testcase_set_vm_power_state_transitions_async = {
+ "params": [
+ ('shutdownguest', 'Running', 'halted', 'Async.VM.clean_shutdown'),
+ ('Shutdownguest', 'Running', 'halted', 'Async.VM.clean_shutdown'),
+ ('shutdown-guest', 'Running', 'halted', 'Async.VM.clean_shutdown'),
+ ('shutdown_guest', 'Running', 'halted', 'Async.VM.clean_shutdown'),
+ ('rebootguest', 'Running', 'running', 'Async.VM.clean_reboot'),
+ ('rebootguest', 'Running', 'running', 'Async.VM.clean_reboot'),
+ ('reboot-guest', 'Running', 'running', 'Async.VM.clean_reboot'),
+ ('reboot_guest', 'Running', 'running', 'Async.VM.clean_reboot'),
+ ],
+ "ids": [
+ "poweredon->shutdownguest",
+ "poweredon->shutdownguest-cap",
+ "poweredon->shutdownguest-dash",
+ "poweredon->shutdownguest-under",
+ "poweredon->rebootguest",
+ "poweredon->rebootguest-cap",
+ "poweredon->rebootguest-dash",
+ "poweredon->rebootguest-under",
+ ],
+}
+
+
+@pytest.mark.parametrize('vm_ref', testcase_bad_xenapi_refs['params'], ids=testcase_bad_xenapi_refs['ids'])
+def test_set_vm_power_state_bad_vm_ref(fake_ansible_module, xenserver, vm_ref):
+ """Tests failure on bad vm_ref."""
+ with pytest.raises(FailJsonException) as exc_info:
+ xenserver.set_vm_power_state(fake_ansible_module, vm_ref, None)
+
+ assert exc_info.value.kwargs['msg'] == "Cannot set VM power state. Invalid VM reference supplied!"
+
+
+def test_set_vm_power_state_xenapi_failure(mock_xenapi_failure, fake_ansible_module, xenserver):
+ """Tests catching of XenAPI failures."""
+ with pytest.raises(FailJsonException) as exc_info:
+ xenserver.set_vm_power_state(fake_ansible_module, fake_xenapi_ref('VM'), "poweredon")
+
+ assert exc_info.value.kwargs['msg'] == "XAPI ERROR: %s" % mock_xenapi_failure[1]
+
+
+def test_set_vm_power_state_bad_power_state(mocker, fake_ansible_module, XenAPI, xenserver):
+ """Tests failure on unsupported power state."""
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
+
+ mocked_returns = {
+ "VM.get_power_state.return_value": "Running",
+ }
+
+ mocked_xenapi.configure_mock(**mocked_returns)
+
+ with pytest.raises(FailJsonException) as exc_info:
+ xenserver.set_vm_power_state(fake_ansible_module, fake_xenapi_ref('VM'), "bad")
+
+ # Beside VM.get_power_state() no other method should have been
+ # called additionally.
+ assert len(mocked_xenapi.method_calls) == 1
+
+ assert exc_info.value.kwargs['msg'] == "Requested VM power state 'bad' is unsupported!"
+
+
+@pytest.mark.parametrize('power_state_desired, power_state_current, error_msg',
+ testcase_set_vm_power_state_bad_transitions['params'],
+ ids=testcase_set_vm_power_state_bad_transitions['ids'])
+def test_set_vm_power_state_bad_transition(mocker, fake_ansible_module, XenAPI, xenserver, power_state_desired, power_state_current, error_msg):
+ """Tests failure on bad power state transition."""
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
+
+ mocked_returns = {
+ "VM.get_power_state.return_value": power_state_current,
+ }
+
+ mocked_xenapi.configure_mock(**mocked_returns)
+
+ with pytest.raises(FailJsonException) as exc_info:
+ xenserver.set_vm_power_state(fake_ansible_module, fake_xenapi_ref('VM'), power_state_desired)
+
+ # Beside VM.get_power_state() no other method should have been
+ # called additionally.
+ assert len(mocked_xenapi.method_calls) == 1
+
+ assert exc_info.value.kwargs['msg'] == error_msg
+
+
+@pytest.mark.parametrize('power_state, error_msg',
+ testcase_set_vm_power_state_task_timeout['params'],
+ ids=testcase_set_vm_power_state_task_timeout['ids'])
+def test_set_vm_power_state_task_timeout(mocker, fake_ansible_module, XenAPI, xenserver, power_state, error_msg):
+ """Tests failure on async task timeout."""
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
+
+ mocked_returns = {
+ "VM.get_power_state.return_value": "Running",
+ "Async.VM.clean_shutdown.return_value": fake_xenapi_ref('task'),
+ "Async.VM.clean_reboot.return_value": fake_xenapi_ref('task'),
+ }
+
+ mocked_xenapi.configure_mock(**mocked_returns)
+
+ mocker.patch('ansible_collections.community.general.plugins.module_utils.xenserver.wait_for_task', return_value="timeout")
+
+ with pytest.raises(FailJsonException) as exc_info:
+ xenserver.set_vm_power_state(fake_ansible_module, fake_xenapi_ref('VM'), power_state, timeout=1)
+
+ # Beside VM.get_power_state() only one of Async.VM.clean_shutdown or
+ # Async.VM.clean_reboot should have been called additionally.
+ assert len(mocked_xenapi.method_calls) == 2
+
+ assert exc_info.value.kwargs['msg'] == error_msg
+
+
+@pytest.mark.parametrize('power_state_desired, power_state_current',
+ testcase_set_vm_power_state_no_transitions['params'],
+ ids=testcase_set_vm_power_state_no_transitions['ids'])
+def test_set_vm_power_state_no_transition(mocker, fake_ansible_module, XenAPI, xenserver, power_state_desired, power_state_current):
+ """Tests regular invocation without power state transition."""
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
+
+ mocked_returns = {
+ "VM.get_power_state.return_value": power_state_current,
+ }
+
+ mocked_xenapi.configure_mock(**mocked_returns)
+
+ result = xenserver.set_vm_power_state(fake_ansible_module, fake_xenapi_ref('VM'), power_state_desired)
+
+ # Beside VM.get_power_state() no other method should have been
+ # called additionally.
+ assert len(mocked_xenapi.method_calls) == 1
+
+ assert result[0] is False
+ assert result[1] == power_state_current.lower()
+
+
+@pytest.mark.parametrize('power_state_desired, power_state_current, power_state_resulting, activated_xenapi_method',
+ testcase_set_vm_power_state_transitions['params'],
+ ids=testcase_set_vm_power_state_transitions['ids'])
+def test_set_vm_power_state_transition(mocker,
+ fake_ansible_module,
+ XenAPI,
+ xenserver,
+ power_state_desired,
+ power_state_current,
+ power_state_resulting,
+ activated_xenapi_method):
+ """Tests regular invocation with power state transition."""
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
+
+ mocked_returns = {
+ "VM.get_power_state.return_value": power_state_current,
+ }
+
+ mocked_xenapi.configure_mock(**mocked_returns)
+
+ result = xenserver.set_vm_power_state(fake_ansible_module, fake_xenapi_ref('VM'), power_state_desired, timeout=0)
+
+ mocked_xenapi_method = mocked_xenapi
+
+ for activated_xenapi_class in activated_xenapi_method.split('.'):
+ mocked_xenapi_method = getattr(mocked_xenapi_method, activated_xenapi_class)
+
+ mocked_xenapi_method.assert_called_once()
+
+ # Beside VM.get_power_state() only activated_xenapi_method should have
+ # been called additionally.
+ assert len(mocked_xenapi.method_calls) == 2
+
+ assert result[0] is True
+ assert result[1] == power_state_resulting
+
+
+@pytest.mark.parametrize('power_state_desired, power_state_current, power_state_resulting, activated_xenapi_method',
+ testcase_set_vm_power_state_transitions_async['params'],
+ ids=testcase_set_vm_power_state_transitions_async['ids'])
+def test_set_vm_power_state_transition_async(mocker,
+ fake_ansible_module,
+ XenAPI,
+ xenserver,
+ power_state_desired,
+ power_state_current,
+ power_state_resulting,
+ activated_xenapi_method):
+ """
+ Tests regular invocation with async power state transition
+ (shutdownguest and rebootguest only).
+ """
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
+
+ mocked_returns = {
+ "VM.get_power_state.return_value": power_state_current,
+ "%s.return_value" % activated_xenapi_method: fake_xenapi_ref('task'),
+ }
+
+ mocked_xenapi.configure_mock(**mocked_returns)
+
+ mocker.patch('ansible_collections.community.general.plugins.module_utils.xenserver.wait_for_task', return_value="")
+
+ result = xenserver.set_vm_power_state(fake_ansible_module, fake_xenapi_ref('VM'), power_state_desired, timeout=1)
+
+ mocked_xenapi_method = mocked_xenapi
+
+ for activated_xenapi_class in activated_xenapi_method.split('.'):
+ mocked_xenapi_method = getattr(mocked_xenapi_method, activated_xenapi_class)
+
+ mocked_xenapi_method.assert_called_once()
+
+ # Beside VM.get_power_state() only activated_xenapi_method should have
+ # been called additionally.
+ assert len(mocked_xenapi.method_calls) == 2
+
+ assert result[0] is True
+ assert result[1] == power_state_resulting
+
+
+@pytest.mark.parametrize('power_state_desired, power_state_current, power_state_resulting, activated_xenapi_method',
+ testcase_set_vm_power_state_transitions['params'],
+ ids=testcase_set_vm_power_state_transitions['ids'])
+def test_set_vm_power_state_transition_check_mode(mocker,
+ fake_ansible_module,
+ XenAPI,
+ xenserver,
+ power_state_desired,
+ power_state_current,
+ power_state_resulting,
+ activated_xenapi_method):
+ """Tests regular invocation with power state transition in check mode."""
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
+
+ mocked_returns = {
+ "VM.get_power_state.return_value": power_state_current,
+ }
+
+ mocked_xenapi.configure_mock(**mocked_returns)
+
+ fake_ansible_module.check_mode = True
+ result = xenserver.set_vm_power_state(fake_ansible_module, fake_xenapi_ref('VM'), power_state_desired, timeout=0)
+
+ mocked_xenapi_method = mocked_xenapi
+
+ for activated_xenapi_class in activated_xenapi_method.split('.'):
+ mocked_xenapi_method = getattr(mocked_xenapi_method, activated_xenapi_class)
+
+ mocked_xenapi_method.assert_not_called()
+
+ # Beside VM.get_power_state() no other method should have been
+ # called additionally.
+ assert len(mocked_xenapi.method_calls) == 1
+
+ assert result[0] is True
+ assert result[1] == power_state_resulting
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_wait_for_functions.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_wait_for_functions.py
new file mode 100644
index 00000000..c06ad6de
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_wait_for_functions.py
@@ -0,0 +1,220 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2019, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import pytest
+
+from .FakeAnsibleModule import FailJsonException
+from .common import fake_xenapi_ref, testcase_bad_xenapi_refs
+
+
+testcase_wait_for_vm_ip_address_bad_power_states = {
+ "params": [
+ 'Halted',
+ 'Paused',
+ 'Suspended',
+ 'Other',
+ ],
+ "ids": [
+ 'state-halted',
+ 'state-paused',
+ 'state-suspended',
+ 'state-other',
+ ]
+}
+
+testcase_wait_for_vm_ip_address_bad_guest_metrics = {
+ "params": [
+ ('OpaqueRef:NULL', {"networks": {}}),
+ (fake_xenapi_ref('VM_guest_metrics'), {"networks": {}}),
+ ],
+ "ids": [
+ 'vm_guest_metrics_ref-null, no-ip',
+ 'vm_guest_metrics_ref-ok, no-ip',
+ ],
+}
+
+testcase_wait_for_task_all_statuses = {
+ "params": [
+ ('Success', ''),
+ ('Failure', 'failure'),
+ ('Cancelling', 'cancelling'),
+ ('Cancelled', 'cancelled'),
+ ('Other', 'other'),
+ ],
+ "ids": [
+ 'task-success',
+ 'task-failure',
+ 'task-cancelling',
+ 'task-cancelled',
+ 'task-other',
+ ]
+}
+
+
+@pytest.mark.parametrize('vm_ref', testcase_bad_xenapi_refs['params'], ids=testcase_bad_xenapi_refs['ids'])
+def test_wait_for_vm_ip_address_bad_vm_ref(fake_ansible_module, xenserver, vm_ref):
+ """Tests failure on bad vm_ref."""
+ with pytest.raises(FailJsonException) as exc_info:
+ xenserver.wait_for_vm_ip_address(fake_ansible_module, vm_ref)
+
+ assert exc_info.value.kwargs['msg'] == "Cannot wait for VM IP address. Invalid VM reference supplied!"
+
+
+def test_wait_for_vm_ip_address_xenapi_failure(mock_xenapi_failure, xenserver, fake_ansible_module):
+ """Tests catching of XenAPI failures."""
+ with pytest.raises(FailJsonException) as exc_info:
+ xenserver.wait_for_vm_ip_address(fake_ansible_module, fake_xenapi_ref('VM'))
+
+ assert exc_info.value.kwargs['msg'] == "XAPI ERROR: %s" % mock_xenapi_failure[1]
+
+
+@pytest.mark.parametrize('bad_power_state',
+ testcase_wait_for_vm_ip_address_bad_power_states['params'],
+ ids=testcase_wait_for_vm_ip_address_bad_power_states['ids'])
+def test_wait_for_vm_ip_address_bad_power_state(mocker, fake_ansible_module, XenAPI, xenserver, bad_power_state):
+ """Tests failure on bad power state."""
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
+
+ mocked_returns = {
+ "VM.get_power_state.return_value": bad_power_state,
+ }
+
+ mocked_xenapi.configure_mock(**mocked_returns)
+
+ with pytest.raises(FailJsonException) as exc_info:
+ xenserver.wait_for_vm_ip_address(fake_ansible_module, fake_xenapi_ref('VM'))
+
+ assert exc_info.value.kwargs['msg'] == ("Cannot wait for VM IP address when VM is in state '%s'!" %
+ xenserver.xapi_to_module_vm_power_state(bad_power_state.lower()))
+
+
+@pytest.mark.parametrize('bad_guest_metrics_ref, bad_guest_metrics',
+ testcase_wait_for_vm_ip_address_bad_guest_metrics['params'],
+ ids=testcase_wait_for_vm_ip_address_bad_guest_metrics['ids'])
+def test_wait_for_vm_ip_address_timeout(mocker, fake_ansible_module, XenAPI, xenserver, bad_guest_metrics_ref, bad_guest_metrics):
+ """Tests timeout."""
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
+
+ mocked_returns = {
+ "VM.get_power_state.return_value": "Running",
+ "VM.get_guest_metrics.return_value": bad_guest_metrics_ref,
+ "VM_guest_metrics.get_record.return_value": bad_guest_metrics,
+ }
+
+ mocked_xenapi.configure_mock(**mocked_returns)
+
+ mocker.patch('time.sleep')
+
+ with pytest.raises(FailJsonException) as exc_info:
+ xenserver.wait_for_vm_ip_address(fake_ansible_module, fake_xenapi_ref('VM'), timeout=1)
+
+ assert exc_info.value.kwargs['msg'] == "Timed out waiting for VM IP address!"
+
+
+def test_wait_for_vm_ip_address(mocker, fake_ansible_module, XenAPI, xenserver):
+ """Tests regular invocation."""
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
+
+ # This mock simulates regular VM IP acquirement lifecycle:
+ #
+ # 1) First, no guest metrics are available because VM is not yet fully
+ # booted and guest agent is not yet started.
+ # 2) Next, guest agent is started and guest metrics are available but
+ # IP address is still not acquired.
+ # 3) Lastly, IP address is acquired by VM on its primary VIF.
+ mocked_returns = {
+ "VM.get_power_state.return_value": "Running",
+ "VM.get_guest_metrics.side_effect": [
+ 'OpaqueRef:NULL',
+ fake_xenapi_ref('VM_guest_metrics'),
+ fake_xenapi_ref('VM_guest_metrics'),
+ ],
+ "VM_guest_metrics.get_record.side_effect": [
+ {
+ "networks": {},
+ },
+ {
+ "networks": {
+ "0/ip": "192.168.0.1",
+ "1/ip": "10.0.0.1",
+ },
+ },
+ ],
+ }
+
+ mocked_xenapi.configure_mock(**mocked_returns)
+
+ mocker.patch('time.sleep')
+
+ fake_guest_metrics = xenserver.wait_for_vm_ip_address(fake_ansible_module, fake_xenapi_ref('VM'))
+
+ assert fake_guest_metrics == mocked_returns['VM_guest_metrics.get_record.side_effect'][1]
+
+
+@pytest.mark.parametrize('task_ref', testcase_bad_xenapi_refs['params'], ids=testcase_bad_xenapi_refs['ids'])
+def test_wait_for_task_bad_task_ref(fake_ansible_module, xenserver, task_ref):
+ """Tests failure on bad task_ref."""
+ with pytest.raises(FailJsonException) as exc_info:
+ xenserver.wait_for_task(fake_ansible_module, task_ref)
+
+ assert exc_info.value.kwargs['msg'] == "Cannot wait for task. Invalid task reference supplied!"
+
+
+def test_wait_for_task_xenapi_failure(mock_xenapi_failure, fake_ansible_module, xenserver):
+ """Tests catching of XenAPI failures."""
+ with pytest.raises(FailJsonException) as exc_info:
+ xenserver.wait_for_task(fake_ansible_module, fake_xenapi_ref('task'))
+
+ assert exc_info.value.kwargs['msg'] == "XAPI ERROR: %s" % mock_xenapi_failure[1]
+
+
+def test_wait_for_task_timeout(mocker, fake_ansible_module, XenAPI, xenserver):
+ """Tests timeout."""
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
+
+ mocked_returns = {
+ "task.get_status.return_value": "Pending",
+ "task.destroy.return_value": None,
+ }
+
+ mocked_xenapi.configure_mock(**mocked_returns)
+
+ mocker.patch('time.sleep')
+
+ fake_result = xenserver.wait_for_task(fake_ansible_module, fake_xenapi_ref('task'), timeout=1)
+
+ mocked_xenapi.task.destroy.assert_called_once()
+ assert fake_result == "timeout"
+
+
+@pytest.mark.parametrize('task_status, result',
+ testcase_wait_for_task_all_statuses['params'],
+ ids=testcase_wait_for_task_all_statuses['ids'])
+def test_wait_for_task(mocker, fake_ansible_module, XenAPI, xenserver, task_status, result):
+ """Tests regular invocation."""
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
+
+ # Mock will first return Pending status and on second invocation it will
+ # return one of possible final statuses.
+ mocked_returns = {
+ "task.get_status.side_effect": [
+ 'Pending',
+ task_status,
+ ],
+ "task.destroy.return_value": None,
+ }
+
+ mocked_xenapi.configure_mock(**mocked_returns)
+
+ mocker.patch('time.sleep')
+
+ fake_result = xenserver.wait_for_task(fake_ansible_module, fake_xenapi_ref('task'))
+
+ mocked_xenapi.task.destroy.assert_called_once()
+ assert fake_result == result
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_xapi.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_xapi.py
new file mode 100644
index 00000000..3b553d12
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_xapi.py
@@ -0,0 +1,175 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2019, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import pytest
+import atexit
+
+from .FakeAnsibleModule import FailJsonException
+from ansible.module_utils.ansible_release import __version__ as ANSIBLE_VERSION
+
+
+testcase_module_local_conn = {
+ "params": [
+ {
+ "hostname": "localhost",
+ "username": "someuser",
+ "password": "somepwd",
+ "validate_certs": True,
+ },
+ ],
+ "ids": [
+ "local-conn",
+ ],
+}
+
+testcase_module_remote_conn = {
+ "params": [
+ {
+ "hostname": "somehost",
+ "username": "someuser",
+ "password": "somepwd",
+ "validate_certs": True,
+ },
+ ],
+ "ids": [
+ "remote-conn",
+ ],
+}
+
+testcase_module_remote_conn_scheme = {
+ "params": [
+ {
+ "hostname": "http://somehost",
+ "username": "someuser",
+ "password": "somepwd",
+ "validate_certs": True,
+ },
+ {
+ "hostname": "https://somehost",
+ "username": "someuser",
+ "password": "somepwd",
+ "validate_certs": True,
+ },
+ ],
+ "ids": [
+ "remote-conn-http",
+ "remote-conn-https",
+ ],
+}
+
+
+@pytest.mark.parametrize('fake_ansible_module', testcase_module_local_conn['params'], ids=testcase_module_local_conn['ids'], indirect=True)
+def test_xapi_connect_local_session(mocker, fake_ansible_module, XenAPI, xenserver):
+ """Tests that connection to localhost uses XenAPI.xapi_local() function."""
+ mocker.patch('XenAPI.xapi_local')
+
+ xapi_session = xenserver.XAPI.connect(fake_ansible_module)
+
+ XenAPI.xapi_local.assert_called_once()
+
+
+@pytest.mark.parametrize('fake_ansible_module', testcase_module_local_conn['params'], ids=testcase_module_local_conn['ids'], indirect=True)
+def test_xapi_connect_local_login(mocker, fake_ansible_module, XenAPI, xenserver):
+ """Tests that connection to localhost uses empty username and password."""
+ mocker.patch.object(XenAPI.Session, 'login_with_password', create=True)
+
+ xapi_session = xenserver.XAPI.connect(fake_ansible_module)
+
+ XenAPI.Session.login_with_password.assert_called_once_with('', '', ANSIBLE_VERSION, 'Ansible')
+
+
+def test_xapi_connect_login(mocker, fake_ansible_module, XenAPI, xenserver):
+ """
+ Tests that username and password are properly propagated to
+ XenAPI.Session.login_with_password() function.
+ """
+ mocker.patch.object(XenAPI.Session, 'login_with_password', create=True)
+
+ xapi_session = xenserver.XAPI.connect(fake_ansible_module)
+
+ username = fake_ansible_module.params['username']
+ password = fake_ansible_module.params['password']
+
+ XenAPI.Session.login_with_password.assert_called_once_with(username, password, ANSIBLE_VERSION, 'Ansible')
+
+
+def test_xapi_connect_login_failure(mocker, fake_ansible_module, XenAPI, xenserver):
+ """Tests that login failure is properly handled."""
+ fake_error_msg = "Fake XAPI login error!"
+
+ mocked_login = mocker.patch.object(XenAPI.Session, 'login_with_password', create=True)
+ mocked_login.side_effect = XenAPI.Failure(fake_error_msg)
+
+ hostname = fake_ansible_module.params['hostname']
+ username = fake_ansible_module.params['username']
+
+ with pytest.raises(FailJsonException) as exc_info:
+ xapi_session = xenserver.XAPI.connect(fake_ansible_module)
+
+ assert exc_info.value.kwargs['msg'] == "Unable to log on to XenServer at http://%s as %s: %s" % (hostname, username, fake_error_msg)
+
+
+@pytest.mark.parametrize('fake_ansible_module', testcase_module_remote_conn_scheme['params'], ids=testcase_module_remote_conn_scheme['ids'], indirect=True)
+def test_xapi_connect_remote_scheme(mocker, fake_ansible_module, XenAPI, xenserver):
+ """Tests that explicit scheme in hostname param is preserved."""
+ mocker.patch('XenAPI.Session')
+
+ xapi_session = xenserver.XAPI.connect(fake_ansible_module)
+
+ hostname = fake_ansible_module.params['hostname']
+ ignore_ssl = not fake_ansible_module.params['validate_certs']
+
+ XenAPI.Session.assert_called_once_with(hostname, ignore_ssl=ignore_ssl)
+
+
+@pytest.mark.parametrize('fake_ansible_module', testcase_module_remote_conn['params'], ids=testcase_module_remote_conn['ids'], indirect=True)
+def test_xapi_connect_remote_no_scheme(mocker, fake_ansible_module, XenAPI, xenserver):
+ """Tests that proper scheme is prepended to hostname without scheme."""
+ mocker.patch('XenAPI.Session')
+
+ xapi_session = xenserver.XAPI.connect(fake_ansible_module)
+
+ hostname = fake_ansible_module.params['hostname']
+ ignore_ssl = not fake_ansible_module.params['validate_certs']
+
+ XenAPI.Session.assert_called_once_with("http://%s" % hostname, ignore_ssl=ignore_ssl)
+
+
+def test_xapi_connect_support_ignore_ssl(mocker, fake_ansible_module, XenAPI, xenserver):
+ """Tests proper handling of ignore_ssl support."""
+ mocked_session = mocker.patch('XenAPI.Session')
+ mocked_session.side_effect = TypeError()
+
+ with pytest.raises(TypeError) as exc_info:
+ xapi_session = xenserver.XAPI.connect(fake_ansible_module)
+
+ hostname = fake_ansible_module.params['hostname']
+ ignore_ssl = not fake_ansible_module.params['validate_certs']
+
+ XenAPI.Session.assert_called_with("http://%s" % hostname)
+
+
+def test_xapi_connect_no_disconnect_atexit(mocker, fake_ansible_module, XenAPI, xenserver):
+ """Tests skipping registration of atexit disconnect handler."""
+ mocker.patch('atexit.register')
+
+ xapi_session = xenserver.XAPI.connect(fake_ansible_module, disconnect_atexit=False)
+
+ atexit.register.assert_not_called()
+
+
+def test_xapi_connect_singleton(mocker, fake_ansible_module, XenAPI, xenserver):
+ """Tests if XAPI.connect() returns singleton."""
+ mocker.patch('XenAPI.Session')
+
+ xapi_session1 = xenserver.XAPI.connect(fake_ansible_module)
+ xapi_session2 = xenserver.XAPI.connect(fake_ansible_module)
+
+ XenAPI.Session.assert_called_once()
+ assert xapi_session1 == xapi_session2
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_xenserverobject.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_xenserverobject.py
new file mode 100644
index 00000000..60570e03
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_xenserverobject.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2019, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import pytest
+
+from .FakeAnsibleModule import FailJsonException
+from .common import fake_xenapi_ref
+
+
+def test_xenserverobject_xenapi_lib_detection(mocker, fake_ansible_module, xenserver):
+ """Tests XenAPI lib detection code."""
+ mocker.patch('ansible_collections.community.general.plugins.module_utils.xenserver.HAS_XENAPI', new=False)
+
+ with pytest.raises(FailJsonException) as exc_info:
+ xenserver.XenServerObject(fake_ansible_module)
+
+ assert 'Failed to import the required Python library (XenAPI) on' in exc_info.value.kwargs['msg']
+
+
+def test_xenserverobject_xenapi_failure(mock_xenapi_failure, fake_ansible_module, xenserver):
+ """Tests catching of XenAPI failures."""
+ with pytest.raises(FailJsonException) as exc_info:
+ xenserver.XenServerObject(fake_ansible_module)
+
+ assert exc_info.value.kwargs['msg'] == "XAPI ERROR: %s" % mock_xenapi_failure[1]
+
+
+def test_xenserverobject(mocker, fake_ansible_module, XenAPI, xenserver):
+ """Tests successful creation of XenServerObject."""
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
+
+ mocked_returns = {
+ "pool.get_all.return_value": [fake_xenapi_ref('pool')],
+ "pool.get_default_SR.return_value": fake_xenapi_ref('SR'),
+ "session.get_this_host.return_value": fake_xenapi_ref('host'),
+ "host.get_software_version.return_value": {"product_version": "7.2.0"},
+ }
+
+ mocked_xenapi.configure_mock(**mocked_returns)
+
+ xso = xenserver.XenServerObject(fake_ansible_module)
+
+ assert xso.pool_ref == fake_xenapi_ref('pool')
+ assert xso.xenserver_version == [7, 2, 0]
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/docker/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/docker/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/docker/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/docker/test_docker_container.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/docker/test_docker_container.py
new file mode 100644
index 00000000..3a6a1703
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/docker/test_docker_container.py
@@ -0,0 +1,22 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import unittest
+
+from ansible_collections.community.general.plugins.modules.cloud.docker.docker_container import TaskParameters
+
+
+class TestTaskParameters(unittest.TestCase):
+ """Unit tests for TaskParameters."""
+
+ def test_parse_exposed_ports_tcp_udp(self):
+ """
+ Ensure _parse_exposed_ports does not cancel ports with the same
+ number but different protocol.
+ """
+ task_params = TaskParameters.__new__(TaskParameters)
+ task_params.exposed_ports = None
+ result = task_params._parse_exposed_ports([80, '443', '443/udp'])
+ self.assertTrue((80, 'tcp') in result)
+ self.assertTrue((443, 'tcp') in result)
+ self.assertTrue((443, 'udp') in result)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/docker/test_docker_network.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/docker/test_docker_network.py
new file mode 100644
index 00000000..aa9f1154
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/docker/test_docker_network.py
@@ -0,0 +1,31 @@
+"""Unit tests for docker_network."""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible_collections.community.general.plugins.modules.cloud.docker.docker_network import validate_cidr
+
+
+@pytest.mark.parametrize("cidr,expected", [
+ ('192.168.0.1/16', 'ipv4'),
+ ('192.168.0.1/24', 'ipv4'),
+ ('192.168.0.1/32', 'ipv4'),
+ ('fdd1:ac8c:0557:7ce2::/64', 'ipv6'),
+ ('fdd1:ac8c:0557:7ce2::/128', 'ipv6'),
+])
+def test_validate_cidr_positives(cidr, expected):
+ assert validate_cidr(cidr) == expected
+
+
+@pytest.mark.parametrize("cidr", [
+ '192.168.0.1',
+ '192.168.0.1/34',
+ '192.168.0.1/asd',
+ 'fdd1:ac8c:0557:7ce2::',
+])
+def test_validate_cidr_negatives(cidr):
+ with pytest.raises(ValueError) as e:
+ validate_cidr(cidr)
+ assert '"{0}" is not a valid CIDR'.format(cidr) == str(e.value)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/docker/test_docker_swarm_service.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/docker/test_docker_swarm_service.py
new file mode 100644
index 00000000..da5914b0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/docker/test_docker_swarm_service.py
@@ -0,0 +1,510 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+
+class APIErrorMock(Exception):
+ def __init__(self, message, response=None, explanation=None):
+ self.message = message
+ self.response = response
+ self.explanation = explanation
+
+
+@pytest.fixture(autouse=True)
+def docker_module_mock(mocker):
+ docker_module_mock = mocker.MagicMock()
+ docker_utils_module_mock = mocker.MagicMock()
+ docker_errors_module_mock = mocker.MagicMock()
+ docker_errors_module_mock.APIError = APIErrorMock
+ mock_modules = {
+ 'docker': docker_module_mock,
+ 'docker.utils': docker_utils_module_mock,
+ 'docker.errors': docker_errors_module_mock,
+ }
+ return mocker.patch.dict('sys.modules', **mock_modules)
+
+
+@pytest.fixture(autouse=True)
+def docker_swarm_service():
+ from ansible_collections.community.general.plugins.modules.cloud.docker import docker_swarm_service
+
+ return docker_swarm_service
+
+
+def test_retry_on_out_of_sequence_error(mocker, docker_swarm_service):
+ run_mock = mocker.MagicMock(
+ side_effect=APIErrorMock(
+ message='',
+ response=None,
+ explanation='rpc error: code = Unknown desc = update out of sequence',
+ )
+ )
+ manager = docker_swarm_service.DockerServiceManager(client=None)
+ manager.run = run_mock
+ with pytest.raises(APIErrorMock):
+ manager.run_safe()
+ assert run_mock.call_count == 3
+
+
+def test_no_retry_on_general_api_error(mocker, docker_swarm_service):
+ run_mock = mocker.MagicMock(
+ side_effect=APIErrorMock(message='', response=None, explanation='some error')
+ )
+ manager = docker_swarm_service.DockerServiceManager(client=None)
+ manager.run = run_mock
+ with pytest.raises(APIErrorMock):
+ manager.run_safe()
+ assert run_mock.call_count == 1
+
+
+def test_get_docker_environment(mocker, docker_swarm_service):
+ env_file_result = {'TEST1': 'A', 'TEST2': 'B', 'TEST3': 'C'}
+ env_dict = {'TEST3': 'CC', 'TEST4': 'D'}
+ env_string = "TEST3=CC,TEST4=D"
+
+ env_list = ['TEST3=CC', 'TEST4=D']
+ expected_result = sorted(['TEST1=A', 'TEST2=B', 'TEST3=CC', 'TEST4=D'])
+ mocker.patch.object(
+ docker_swarm_service, 'parse_env_file', return_value=env_file_result
+ )
+ mocker.patch.object(
+ docker_swarm_service,
+ 'format_environment',
+ side_effect=lambda d: ['{0}={1}'.format(key, value) for key, value in d.items()],
+ )
+ # Test with env dict and file
+ result = docker_swarm_service.get_docker_environment(
+ env_dict, env_files=['dummypath']
+ )
+ assert result == expected_result
+ # Test with env list and file
+ result = docker_swarm_service.get_docker_environment(
+ env_list,
+ env_files=['dummypath']
+ )
+ assert result == expected_result
+ # Test with env string and file
+ result = docker_swarm_service.get_docker_environment(
+ env_string, env_files=['dummypath']
+ )
+ assert result == expected_result
+
+ assert result == expected_result
+ # Test with empty env
+ result = docker_swarm_service.get_docker_environment(
+ [], env_files=None
+ )
+ assert result == []
+ # Test with empty env_files
+ result = docker_swarm_service.get_docker_environment(
+ None, env_files=[]
+ )
+ assert result == []
+
+
+def test_get_nanoseconds_from_raw_option(docker_swarm_service):
+ value = docker_swarm_service.get_nanoseconds_from_raw_option('test', None)
+ assert value is None
+
+ value = docker_swarm_service.get_nanoseconds_from_raw_option('test', '1m30s535ms')
+ assert value == 90535000000
+
+ value = docker_swarm_service.get_nanoseconds_from_raw_option('test', 10000000000)
+ assert value == 10000000000
+
+ with pytest.raises(ValueError):
+ docker_swarm_service.get_nanoseconds_from_raw_option('test', [])
+
+
+def test_has_dict_changed(docker_swarm_service):
+ assert not docker_swarm_service.has_dict_changed(
+ {"a": 1},
+ {"a": 1},
+ )
+ assert not docker_swarm_service.has_dict_changed(
+ {"a": 1},
+ {"a": 1, "b": 2}
+ )
+ assert docker_swarm_service.has_dict_changed(
+ {"a": 1},
+ {"a": 2, "b": 2}
+ )
+ assert docker_swarm_service.has_dict_changed(
+ {"a": 1, "b": 1},
+ {"a": 1}
+ )
+ assert not docker_swarm_service.has_dict_changed(
+ None,
+ {"a": 2, "b": 2}
+ )
+ assert docker_swarm_service.has_dict_changed(
+ {},
+ {"a": 2, "b": 2}
+ )
+ assert docker_swarm_service.has_dict_changed(
+ {"a": 1},
+ {}
+ )
+ assert docker_swarm_service.has_dict_changed(
+ {"a": 1},
+ None
+ )
+ assert not docker_swarm_service.has_dict_changed(
+ {},
+ {}
+ )
+ assert not docker_swarm_service.has_dict_changed(
+ None,
+ None
+ )
+ assert not docker_swarm_service.has_dict_changed(
+ {},
+ None
+ )
+ assert not docker_swarm_service.has_dict_changed(
+ None,
+ {}
+ )
+
+
+def test_has_list_changed(docker_swarm_service):
+
+ # List comparisons without dictionaries
+ # I could improve the indenting, but pycodestyle wants this instead
+ assert not docker_swarm_service.has_list_changed(None, None)
+ assert not docker_swarm_service.has_list_changed(None, [])
+ assert not docker_swarm_service.has_list_changed(None, [1, 2])
+
+ assert not docker_swarm_service.has_list_changed([], None)
+ assert not docker_swarm_service.has_list_changed([], [])
+ assert docker_swarm_service.has_list_changed([], [1, 2])
+
+ assert docker_swarm_service.has_list_changed([1, 2], None)
+ assert docker_swarm_service.has_list_changed([1, 2], [])
+
+ assert docker_swarm_service.has_list_changed([1, 2, 3], [1, 2])
+ assert docker_swarm_service.has_list_changed([1, 2], [1, 2, 3])
+
+ # Check list sorting
+ assert not docker_swarm_service.has_list_changed([1, 2], [2, 1])
+ assert docker_swarm_service.has_list_changed(
+ [1, 2],
+ [2, 1],
+ sort_lists=False
+ )
+
+ # Check type matching
+ assert docker_swarm_service.has_list_changed([None, 1], [2, 1])
+ assert docker_swarm_service.has_list_changed([2, 1], [None, 1])
+ assert docker_swarm_service.has_list_changed(
+ "command --with args",
+ ['command', '--with', 'args']
+ )
+ assert docker_swarm_service.has_list_changed(
+ ['sleep', '3400'],
+ [u'sleep', u'3600'],
+ sort_lists=False
+ )
+
+ # List comparisons with dictionaries
+ assert not docker_swarm_service.has_list_changed(
+ [{'a': 1}],
+ [{'a': 1}],
+ sort_key='a'
+ )
+
+ assert not docker_swarm_service.has_list_changed(
+ [{'a': 1}, {'a': 2}],
+ [{'a': 1}, {'a': 2}],
+ sort_key='a'
+ )
+
+ with pytest.raises(Exception):
+ docker_swarm_service.has_list_changed(
+ [{'a': 1}, {'a': 2}],
+ [{'a': 1}, {'a': 2}]
+ )
+
+ # List sort checking with sort key
+ assert not docker_swarm_service.has_list_changed(
+ [{'a': 1}, {'a': 2}],
+ [{'a': 2}, {'a': 1}],
+ sort_key='a'
+ )
+ assert docker_swarm_service.has_list_changed(
+ [{'a': 1}, {'a': 2}],
+ [{'a': 2}, {'a': 1}],
+ sort_lists=False
+ )
+
+ assert docker_swarm_service.has_list_changed(
+ [{'a': 1}, {'a': 2}, {'a': 3}],
+ [{'a': 2}, {'a': 1}],
+ sort_key='a'
+ )
+ assert docker_swarm_service.has_list_changed(
+ [{'a': 1}, {'a': 2}],
+ [{'a': 1}, {'a': 2}, {'a': 3}],
+ sort_lists=False
+ )
+
+ # Additional dictionary elements
+ assert not docker_swarm_service.has_list_changed(
+ [
+ {"src": 1, "dst": 2},
+ {"src": 1, "dst": 2, "protocol": "udp"},
+ ],
+ [
+ {"src": 1, "dst": 2, "protocol": "tcp"},
+ {"src": 1, "dst": 2, "protocol": "udp"},
+ ],
+ sort_key='dst'
+ )
+ assert not docker_swarm_service.has_list_changed(
+ [
+ {"src": 1, "dst": 2, "protocol": "udp"},
+ {"src": 1, "dst": 3, "protocol": "tcp"},
+ ],
+ [
+ {"src": 1, "dst": 2, "protocol": "udp"},
+ {"src": 1, "dst": 3, "protocol": "tcp"},
+ ],
+ sort_key='dst'
+ )
+ assert docker_swarm_service.has_list_changed(
+ [
+ {"src": 1, "dst": 2, "protocol": "udp"},
+ {"src": 1, "dst": 2},
+ {"src": 3, "dst": 4},
+ ],
+ [
+ {"src": 1, "dst": 3, "protocol": "udp"},
+ {"src": 1, "dst": 2, "protocol": "tcp"},
+ {"src": 3, "dst": 4, "protocol": "tcp"},
+ ],
+ sort_key='dst'
+ )
+ assert docker_swarm_service.has_list_changed(
+ [
+ {"src": 1, "dst": 3, "protocol": "tcp"},
+ {"src": 1, "dst": 2, "protocol": "udp"},
+ ],
+ [
+ {"src": 1, "dst": 2, "protocol": "tcp"},
+ {"src": 1, "dst": 2, "protocol": "udp"},
+ ],
+ sort_key='dst'
+ )
+ assert docker_swarm_service.has_list_changed(
+ [
+ {"src": 1, "dst": 2, "protocol": "udp"},
+ {"src": 1, "dst": 2, "protocol": "tcp", "extra": {"test": "foo"}},
+ ],
+ [
+ {"src": 1, "dst": 2, "protocol": "udp"},
+ {"src": 1, "dst": 2, "protocol": "tcp"},
+ ],
+ sort_key='dst'
+ )
+ assert not docker_swarm_service.has_list_changed(
+ [{'id': '123', 'aliases': []}],
+ [{'id': '123'}],
+ sort_key='id'
+ )
+
+
+def test_have_networks_changed(docker_swarm_service):
+ assert not docker_swarm_service.have_networks_changed(
+ None,
+ None
+ )
+
+ assert not docker_swarm_service.have_networks_changed(
+ [],
+ None
+ )
+
+ assert not docker_swarm_service.have_networks_changed(
+ [{'id': 1}],
+ [{'id': 1}]
+ )
+
+ assert docker_swarm_service.have_networks_changed(
+ [{'id': 1}],
+ [{'id': 1}, {'id': 2}]
+ )
+
+ assert not docker_swarm_service.have_networks_changed(
+ [{'id': 1}, {'id': 2}],
+ [{'id': 1}, {'id': 2}]
+ )
+
+ assert not docker_swarm_service.have_networks_changed(
+ [{'id': 1}, {'id': 2}],
+ [{'id': 2}, {'id': 1}]
+ )
+
+ assert not docker_swarm_service.have_networks_changed(
+ [
+ {'id': 1},
+ {'id': 2, 'aliases': []}
+ ],
+ [
+ {'id': 1},
+ {'id': 2}
+ ]
+ )
+
+ assert docker_swarm_service.have_networks_changed(
+ [
+ {'id': 1},
+ {'id': 2, 'aliases': ['alias1']}
+ ],
+ [
+ {'id': 1},
+ {'id': 2}
+ ]
+ )
+
+ assert docker_swarm_service.have_networks_changed(
+ [
+ {'id': 1},
+ {'id': 2, 'aliases': ['alias1', 'alias2']}
+ ],
+ [
+ {'id': 1},
+ {'id': 2, 'aliases': ['alias1']}
+ ]
+ )
+
+ assert not docker_swarm_service.have_networks_changed(
+ [
+ {'id': 1},
+ {'id': 2, 'aliases': ['alias1', 'alias2']}
+ ],
+ [
+ {'id': 1},
+ {'id': 2, 'aliases': ['alias1', 'alias2']}
+ ]
+ )
+
+ assert not docker_swarm_service.have_networks_changed(
+ [
+ {'id': 1},
+ {'id': 2, 'aliases': ['alias1', 'alias2']}
+ ],
+ [
+ {'id': 1},
+ {'id': 2, 'aliases': ['alias2', 'alias1']}
+ ]
+ )
+
+ assert not docker_swarm_service.have_networks_changed(
+ [
+ {'id': 1, 'options': {}},
+ {'id': 2, 'aliases': ['alias1', 'alias2']}],
+ [
+ {'id': 1},
+ {'id': 2, 'aliases': ['alias2', 'alias1']}
+ ]
+ )
+
+ assert not docker_swarm_service.have_networks_changed(
+ [
+ {'id': 1, 'options': {'option1': 'value1'}},
+ {'id': 2, 'aliases': ['alias1', 'alias2']}],
+ [
+ {'id': 1, 'options': {'option1': 'value1'}},
+ {'id': 2, 'aliases': ['alias2', 'alias1']}
+ ]
+ )
+
+ assert docker_swarm_service.have_networks_changed(
+ [
+ {'id': 1, 'options': {'option1': 'value1'}},
+ {'id': 2, 'aliases': ['alias1', 'alias2']}],
+ [
+ {'id': 1, 'options': {'option1': 'value2'}},
+ {'id': 2, 'aliases': ['alias2', 'alias1']}
+ ]
+ )
+
+
+def test_get_docker_networks(docker_swarm_service):
+ network_names = [
+ 'network_1',
+ 'network_2',
+ 'network_3',
+ 'network_4',
+ ]
+ networks = [
+ network_names[0],
+ {'name': network_names[1]},
+ {'name': network_names[2], 'aliases': ['networkalias1']},
+ {'name': network_names[3], 'aliases': ['networkalias2'], 'options': {'foo': 'bar'}},
+ ]
+ network_ids = {
+ network_names[0]: '1',
+ network_names[1]: '2',
+ network_names[2]: '3',
+ network_names[3]: '4',
+ }
+ parsed_networks = docker_swarm_service.get_docker_networks(
+ networks,
+ network_ids
+ )
+ assert len(parsed_networks) == 4
+ for i, network in enumerate(parsed_networks):
+ assert 'name' not in network
+ assert 'id' in network
+ expected_name = network_names[i]
+ assert network['id'] == network_ids[expected_name]
+ if i == 2:
+ assert network['aliases'] == ['networkalias1']
+ if i == 3:
+ assert network['aliases'] == ['networkalias2']
+ if i == 3:
+ assert 'foo' in network['options']
+ # Test missing name
+ with pytest.raises(TypeError):
+ docker_swarm_service.get_docker_networks([{'invalid': 'err'}], {'err': 1})
+ # test for invalid aliases type
+ with pytest.raises(TypeError):
+ docker_swarm_service.get_docker_networks(
+ [{'name': 'test', 'aliases': 1}],
+ {'test': 1}
+ )
+ # Test invalid aliases elements
+ with pytest.raises(TypeError):
+ docker_swarm_service.get_docker_networks(
+ [{'name': 'test', 'aliases': [1]}],
+ {'test': 1}
+ )
+ # Test for invalid options type
+ with pytest.raises(TypeError):
+ docker_swarm_service.get_docker_networks(
+ [{'name': 'test', 'options': 1}],
+ {'test': 1}
+ )
+ # Test for invalid networks type
+ with pytest.raises(TypeError):
+ docker_swarm_service.get_docker_networks(
+ 1,
+ {'test': 1}
+ )
+ # Test for non existing networks
+ with pytest.raises(ValueError):
+ docker_swarm_service.get_docker_networks(
+ [{'name': 'idontexist'}],
+ {'test': 1}
+ )
+ # Test empty values
+ assert docker_swarm_service.get_docker_networks([], {}) == []
+ assert docker_swarm_service.get_docker_networks(None, {}) is None
+ # Test invalid options
+ with pytest.raises(TypeError):
+ docker_swarm_service.get_docker_networks(
+ [{'name': 'test', 'nonexisting_option': 'foo'}],
+ {'test': '1'}
+ )
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/docker/test_docker_volume.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/docker/test_docker_volume.py
new file mode 100644
index 00000000..9544a765
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/docker/test_docker_volume.py
@@ -0,0 +1,36 @@
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+import pytest
+
+from ansible_collections.community.general.plugins.modules.cloud.docker import docker_volume
+from ansible_collections.community.general.plugins.module_utils.docker import common
+
+pytestmark = pytest.mark.usefixtures('patch_ansible_module')
+
+TESTCASE_DOCKER_VOLUME = [
+ {
+ 'name': 'daemon_config',
+ 'state': 'present'
+ }
+]
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_DOCKER_VOLUME, indirect=['patch_ansible_module'])
+def test_create_volume_on_invalid_docker_version(mocker, capfd):
+ mocker.patch.object(common, 'HAS_DOCKER_PY', True)
+ mocker.patch.object(common, 'docker_version', '1.8.0')
+
+ with pytest.raises(SystemExit):
+ docker_volume.main()
+
+ out, dummy = capfd.readouterr()
+ results = json.loads(out)
+ assert results['failed']
+ assert 'Error: Docker SDK for Python version is 1.8.0 ' in results['msg']
+ assert 'Minimum version required is 1.10.0.' in results['msg']
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/google/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/google/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/google/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/google/test_gce_tag.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/google/test_gce_tag.py
new file mode 100644
index 00000000..708bb454
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/google/test_gce_tag.py
@@ -0,0 +1,66 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import unittest
+
+from ansible_collections.community.general.plugins.modules.cloud.google.gce_tag import _get_changed_items, _intersect_items, _union_items
+
+
+class TestGCETag(unittest.TestCase):
+ """Unit tests for gce_tag module."""
+
+ def test_union_items(self):
+ """
+ Combine items in both lists
+ removing duplicates.
+ """
+ listA = [1, 2, 3, 4, 5, 8, 9]
+ listB = [1, 2, 3, 4, 5, 6, 7]
+ want = [1, 2, 3, 4, 5, 6, 7, 8, 9]
+ got = _union_items(listA, listB)
+ self.assertEqual(want, got)
+
+ def test_intersect_items(self):
+ """
+ All unique items from either list.
+ """
+ listA = [1, 2, 3, 4, 5, 8, 9]
+ listB = [1, 2, 3, 4, 5, 6, 7]
+ want = [1, 2, 3, 4, 5]
+ got = _intersect_items(listA, listB)
+ self.assertEqual(want, got)
+
+ # tags removed
+ new_tags = ['one', 'two']
+ existing_tags = ['two']
+ want = ['two'] # only remove the tag that was present
+ got = _intersect_items(existing_tags, new_tags)
+ self.assertEqual(want, got)
+
+ def test_get_changed_items(self):
+ """
+ All the items from left list that don't match
+ any item from the right list.
+ """
+ listA = [1, 2, 3, 4, 5, 8, 9]
+ listB = [1, 2, 3, 4, 5, 6, 7]
+ want = [8, 9]
+ got = _get_changed_items(listA, listB)
+ self.assertEqual(want, got)
+
+ # simulate new tags added
+ tags_to_add = ['one', 'two']
+ existing_tags = ['two']
+ want = ['one']
+ got = _get_changed_items(tags_to_add, existing_tags)
+ self.assertEqual(want, got)
+
+ # simulate removing tags
+ # specifying one tag on right that doesn't exist
+ tags_to_remove = ['one', 'two']
+ existing_tags = ['two', 'three']
+ want = ['three']
+ got = _get_changed_items(existing_tags, tags_to_remove)
+ self.assertEqual(want, got)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/google/test_gcp_forwarding_rule.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/google/test_gcp_forwarding_rule.py
new file mode 100644
index 00000000..ebaf6bb5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/google/test_gcp_forwarding_rule.py
@@ -0,0 +1,30 @@
+import unittest
+
+from ansible_collections.community.general.plugins.modules.cloud.google.gcp_forwarding_rule import _build_global_forwarding_rule_dict
+
+
+class TestGCPFowardingRule(unittest.TestCase):
+ """Unit tests for gcp_fowarding_rule module."""
+ params_dict = {
+ 'forwarding_rule_name': 'foo_fowarding_rule_name',
+ 'address': 'foo_external_address',
+ 'target': 'foo_targetproxy',
+ 'region': 'global',
+ 'port_range': 80,
+ 'protocol': 'TCP',
+ 'state': 'present',
+ }
+
+ def test__build_global_forwarding_rule_dict(self):
+
+ expected = {
+ 'name': 'foo_fowarding_rule_name',
+ 'IPAddress': 'https://www.googleapis.com/compute/v1/projects/my-project/global/addresses/foo_external_address',
+ 'target': 'https://www.googleapis.com/compute/v1/projects/my-project/global/targetHttpProxies/foo_targetproxy',
+ 'region': 'global',
+ 'portRange': 80,
+ 'IPProtocol': 'TCP',
+ }
+ actual = _build_global_forwarding_rule_dict(
+ self.params_dict, 'my-project')
+ self.assertEqual(expected, actual)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/google/test_gcp_url_map.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/google/test_gcp_url_map.py
new file mode 100644
index 00000000..84b46357
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/google/test_gcp_url_map.py
@@ -0,0 +1,169 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import unittest
+
+from ansible_collections.community.general.plugins.modules.cloud.google.gcp_url_map import _build_path_matchers, _build_url_map_dict
+
+
+class TestGCPUrlMap(unittest.TestCase):
+ """Unit tests for gcp_url_map module."""
+ params_dict = {
+ 'url_map_name': 'foo_url_map_name',
+ 'description': 'foo_url_map description',
+ 'host_rules': [
+ {
+ 'description': 'host rules description',
+ 'hosts': [
+ 'www.example.com',
+ 'www2.example.com'
+ ],
+ 'path_matcher': 'host_rules_path_matcher'
+ }
+ ],
+ 'path_matchers': [
+ {
+ 'name': 'path_matcher_one',
+ 'description': 'path matcher one',
+ 'defaultService': 'bes-pathmatcher-one-default',
+ 'pathRules': [
+ {
+ 'service': 'my-one-bes',
+ 'paths': [
+ '/',
+ '/aboutus'
+ ]
+ }
+ ]
+ },
+ {
+ 'name': 'path_matcher_two',
+ 'description': 'path matcher two',
+ 'defaultService': 'bes-pathmatcher-two-default',
+ 'pathRules': [
+ {
+ 'service': 'my-two-bes',
+ 'paths': [
+ '/webapp',
+ '/graphs'
+ ]
+ }
+ ]
+ }
+ ]
+ }
+
+ def test__build_path_matchers(self):
+ input_list = [
+ {
+ 'defaultService': 'bes-pathmatcher-one-default',
+ 'description': 'path matcher one',
+ 'name': 'path_matcher_one',
+ 'pathRules': [
+ {
+ 'paths': [
+ '/',
+ '/aboutus'
+ ],
+ 'service': 'my-one-bes'
+ }
+ ]
+ },
+ {
+ 'defaultService': 'bes-pathmatcher-two-default',
+ 'description': 'path matcher two',
+ 'name': 'path_matcher_two',
+ 'pathRules': [
+ {
+ 'paths': [
+ '/webapp',
+ '/graphs'
+ ],
+ 'service': 'my-two-bes'
+ }
+ ]
+ }
+ ]
+ expected = [
+ {
+ 'defaultService': 'https://www.googleapis.com/compute/v1/projects/my-project/global/backendServices/bes-pathmatcher-one-default',
+ 'description': 'path matcher one',
+ 'name': 'path_matcher_one',
+ 'pathRules': [
+ {
+ 'paths': [
+ '/',
+ '/aboutus'
+ ],
+ 'service': 'https://www.googleapis.com/compute/v1/projects/my-project/global/backendServices/my-one-bes'
+ }
+ ]
+ },
+ {
+ 'defaultService': 'https://www.googleapis.com/compute/v1/projects/my-project/global/backendServices/bes-pathmatcher-two-default',
+ 'description': 'path matcher two',
+ 'name': 'path_matcher_two',
+ 'pathRules': [
+ {
+ 'paths': [
+ '/webapp',
+ '/graphs'
+ ],
+ 'service': 'https://www.googleapis.com/compute/v1/projects/my-project/global/backendServices/my-two-bes'
+ }
+ ]
+ }
+ ]
+ actual = _build_path_matchers(input_list, 'my-project')
+ self.assertEqual(expected, actual)
+
+ def test__build_url_map_dict(self):
+
+ expected = {
+ 'description': 'foo_url_map description',
+ 'hostRules': [
+ {
+ 'description': 'host rules description',
+ 'hosts': [
+ 'www.example.com',
+ 'www2.example.com'
+ ],
+ 'pathMatcher': 'host_rules_path_matcher'
+ }
+ ],
+ 'name': 'foo_url_map_name',
+ 'pathMatchers': [
+ {
+ 'defaultService': 'https://www.googleapis.com/compute/v1/projects/my-project/global/backendServices/bes-pathmatcher-one-default',
+ 'description': 'path matcher one',
+ 'name': 'path_matcher_one',
+ 'pathRules': [
+ {
+ 'paths': [
+ '/',
+ '/aboutus'
+ ],
+ 'service': 'https://www.googleapis.com/compute/v1/projects/my-project/global/backendServices/my-one-bes'
+ }
+ ]
+ },
+ {
+ 'defaultService': 'https://www.googleapis.com/compute/v1/projects/my-project/global/backendServices/bes-pathmatcher-two-default',
+ 'description': 'path matcher two',
+ 'name': 'path_matcher_two',
+ 'pathRules': [
+ {
+ 'paths': [
+ '/webapp',
+ '/graphs'
+ ],
+ 'service': 'https://www.googleapis.com/compute/v1/projects/my-project/global/backendServices/my-two-bes'
+ }
+ ]
+ }
+ ]
+ }
+ actual = _build_url_map_dict(self.params_dict, 'my-project')
+ self.assertEqual(expected, actual)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/kubevirt/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/kubevirt/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/kubevirt/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/kubevirt/kubevirt_fixtures.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/kubevirt/kubevirt_fixtures.py
new file mode 100644
index 00000000..71fb1dd8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/kubevirt/kubevirt_fixtures.py
@@ -0,0 +1,74 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.tests.unit.compat.mock import MagicMock
+
+from ansible_collections.community.kubernetes.plugins.module_utils.common import K8sAnsibleMixin
+from ansible_collections.community.kubernetes.plugins.module_utils.raw import KubernetesRawModule
+from ansible_collections.community.general.plugins.module_utils.kubevirt import KubeVirtRawModule
+
+import openshift.dynamic
+
+RESOURCE_DEFAULT_ARGS = {'api_version': 'v1alpha3', 'group': 'kubevirt.io',
+ 'prefix': 'apis', 'namespaced': True}
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught
+ by the test case"""
+ def __init__(self, **kwargs):
+ for k in kwargs:
+ setattr(self, k, kwargs[k])
+
+ def __getitem__(self, attr):
+ return getattr(self, attr)
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught
+ by the test case"""
+ def __init__(self, **kwargs):
+ for k in kwargs:
+ setattr(self, k, kwargs[k])
+
+ def __getitem__(self, attr):
+ return getattr(self, attr)
+
+
+def exit_json(*args, **kwargs):
+ kwargs['success'] = True
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(**kwargs)
+
+
+def fail_json(*args, **kwargs):
+ kwargs['success'] = False
+ raise AnsibleFailJson(**kwargs)
+
+
+@pytest.fixture()
+def base_fixture(monkeypatch):
+ monkeypatch.setattr(
+ AnsibleModule, "exit_json", exit_json)
+ monkeypatch.setattr(
+ AnsibleModule, "fail_json", fail_json)
+ # Create mock methods in Resource directly, otherwise dyn client
+ # tries binding those to corresponding methods in DynamicClient
+ # (with partial()), which is more problematic to intercept
+ openshift.dynamic.Resource.get = MagicMock()
+ openshift.dynamic.Resource.create = MagicMock()
+ openshift.dynamic.Resource.delete = MagicMock()
+ openshift.dynamic.Resource.patch = MagicMock()
+ openshift.dynamic.Resource.search = MagicMock()
+ openshift.dynamic.Resource.watch = MagicMock()
+ # Globally mock some methods, since all tests will use this
+ KubernetesRawModule.patch_resource = MagicMock()
+ KubernetesRawModule.patch_resource.return_value = ({}, None)
+ K8sAnsibleMixin.get_api_client = MagicMock()
+ K8sAnsibleMixin.get_api_client.return_value = None
+ K8sAnsibleMixin.find_resource = MagicMock()
+ KubeVirtRawModule.find_supported_resource = MagicMock()
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/kubevirt/test_kubevirt_rs.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/kubevirt/test_kubevirt_rs.py
new file mode 100644
index 00000000..07492146
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/kubevirt/test_kubevirt_rs.py
@@ -0,0 +1,80 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+openshiftdynamic = pytest.importorskip("openshift.dynamic")
+
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args
+from .kubevirt_fixtures import base_fixture, RESOURCE_DEFAULT_ARGS, AnsibleExitJson
+
+from ansible_collections.community.kubernetes.plugins.module_utils.raw import KubernetesRawModule
+from ansible_collections.community.general.plugins.modules.cloud.kubevirt import kubevirt_rs as mymodule
+
+KIND = 'VirtualMachineInstanceReplicaSet'
+
+
+@pytest.mark.usefixtures("base_fixture")
+@pytest.mark.parametrize("_replicas, _changed", ((1, True),
+ (3, True),
+ (2, False),
+ (5, True),))
+def test_scale_rs_nowait(_replicas, _changed):
+ _name = 'test-rs'
+ # Desired state:
+ args = dict(name=_name, namespace='vms', replicas=_replicas, wait=False)
+ set_module_args(args)
+
+ # Mock pre-change state:
+ resource_args = dict(kind=KIND, **RESOURCE_DEFAULT_ARGS)
+ mymodule.KubeVirtVMIRS.find_supported_resource.return_value = openshiftdynamic.Resource(**resource_args)
+ res_inst = openshiftdynamic.ResourceInstance('', dict(kind=KIND, metadata={'name': _name}, spec={'replicas': 2}))
+ openshiftdynamic.Resource.get.return_value = res_inst
+ openshiftdynamic.Resource.search.return_value = [res_inst]
+
+ # Final state, after patching the object
+ KubernetesRawModule.patch_resource.return_value = dict(kind=KIND, metadata={'name': _name},
+ spec={'replicas': _replicas}), None
+
+ # Run code:
+ with pytest.raises(AnsibleExitJson) as result:
+ mymodule.KubeVirtVMIRS().execute_module()
+
+ # Verify result:
+ assert result.value['changed'] == _changed
+
+
+@pytest.mark.usefixtures("base_fixture")
+@pytest.mark.parametrize("_replicas, _success", ((1, False),
+ (2, False),
+ (5, True),))
+def test_scale_rs_wait(_replicas, _success):
+ _name = 'test-rs'
+ # Desired state:
+ args = dict(name=_name, namespace='vms', replicas=5, wait=True)
+ set_module_args(args)
+
+ # Mock pre-change state:
+ resource_args = dict(kind=KIND, **RESOURCE_DEFAULT_ARGS)
+ mymodule.KubeVirtVMIRS.find_supported_resource.return_value = openshiftdynamic.Resource(**resource_args)
+ res_inst = openshiftdynamic.ResourceInstance('', dict(kind=KIND, metadata={'name': _name}, spec={'replicas': 2}))
+ openshiftdynamic.Resource.get.return_value = res_inst
+ openshiftdynamic.Resource.search.return_value = [res_inst]
+
+ # ~Final state, after patching the object (`replicas` match desired state)
+ KubernetesRawModule.patch_resource.return_value = dict(kind=KIND, name=_name, metadata={'name': _name},
+ spec={'replicas': 5}), None
+
+ # Final final state, as returned by resource.watch()
+ final_obj = dict(metadata=dict(name=_name), status=dict(readyReplicas=_replicas), **resource_args)
+ event = openshiftdynamic.ResourceInstance(None, final_obj)
+ openshiftdynamic.Resource.watch.return_value = [dict(object=event)]
+
+ # Run code:
+ with pytest.raises(Exception) as result:
+ mymodule.KubeVirtVMIRS().execute_module()
+
+ # Verify result:
+ assert result.value['success'] == _success
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/kubevirt/test_kubevirt_vm.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/kubevirt/test_kubevirt_vm.py
new file mode 100644
index 00000000..9b3f5fbc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/kubevirt/test_kubevirt_vm.py
@@ -0,0 +1,115 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+openshiftdynamic = pytest.importorskip("openshift.dynamic")
+
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args
+from .kubevirt_fixtures import base_fixture, RESOURCE_DEFAULT_ARGS, AnsibleExitJson
+
+from ansible_collections.community.general.plugins.module_utils.kubevirt import KubeVirtRawModule
+from ansible_collections.community.general.plugins.modules.cloud.kubevirt import kubevirt_vm as mymodule
+
+KIND = 'VirtulMachine'
+
+
+@pytest.mark.usefixtures("base_fixture")
+def test_create_vm_with_multus_nowait():
+ # Desired state:
+ args = dict(
+ state='present', name='testvm',
+ namespace='vms',
+ interfaces=[
+ {'bridge': {}, 'name': 'default', 'network': {'pod': {}}},
+ {'bridge': {}, 'name': 'mynet', 'network': {'multus': {'networkName': 'mynet'}}},
+ ],
+ wait=False,
+ )
+ set_module_args(args)
+
+ # State as "returned" by the "k8s cluster":
+ resource_args = dict(kind=KIND, **RESOURCE_DEFAULT_ARGS)
+ KubeVirtRawModule.find_supported_resource.return_value = openshiftdynamic.Resource(**resource_args)
+ openshiftdynamic.Resource.get.return_value = None # Object doesn't exist in the cluster
+
+ # Run code:
+ with pytest.raises(AnsibleExitJson) as result:
+ mymodule.KubeVirtVM().execute_module()
+
+ # Verify result:
+ assert result.value['changed']
+ assert result.value['method'] == 'create'
+
+
+@pytest.mark.usefixtures("base_fixture")
+@pytest.mark.parametrize("_wait", (False, True))
+def test_vm_is_absent(_wait):
+ # Desired state:
+ args = dict(
+ state='absent', name='testvmi',
+ namespace='vms',
+ wait=_wait,
+ )
+ set_module_args(args)
+
+ # State as "returned" by the "k8s cluster":
+ resource_args = dict(kind=KIND, **RESOURCE_DEFAULT_ARGS)
+ KubeVirtRawModule.find_supported_resource.return_value = openshiftdynamic.Resource(**resource_args)
+ openshiftdynamic.Resource.get.return_value = None # Object doesn't exist in the cluster
+
+ # Run code:
+ with pytest.raises(AnsibleExitJson) as result:
+ mymodule.KubeVirtVM().execute_module()
+
+ # Verify result:
+ assert not result.value['kubevirt_vm']
+ assert result.value['method'] == 'delete'
+ # Note: nothing actually gets deleted, as we mock that there's not object in the cluster present,
+ # so if the method changes to something other than 'delete' at some point, that's fine
+
+
+@pytest.mark.usefixtures("base_fixture")
+def test_vmpreset_create():
+ KIND = 'VirtulMachineInstancePreset'
+ # Desired state:
+ args = dict(state='present', name='testvmipreset', namespace='vms', memory='1024Mi', wait=False)
+ set_module_args(args)
+
+ # State as "returned" by the "k8s cluster":
+ resource_args = dict(kind=KIND, **RESOURCE_DEFAULT_ARGS)
+ KubeVirtRawModule.find_supported_resource.return_value = openshiftdynamic.Resource(**resource_args)
+ openshiftdynamic.Resource.get.return_value = None # Object doesn't exist in the cluster
+
+ # Run code:
+ with pytest.raises(AnsibleExitJson) as result:
+ mymodule.KubeVirtVM().execute_module()
+
+ # Verify result:
+ assert result.value['changed']
+ assert result.value['method'] == 'create'
+
+
+@pytest.mark.usefixtures("base_fixture")
+def test_vmpreset_is_absent():
+ KIND = 'VirtulMachineInstancePreset'
+ # Desired state:
+ args = dict(state='absent', name='testvmipreset', namespace='vms')
+ set_module_args(args)
+
+ # State as "returned" by the "k8s cluster":
+ resource_args = dict(kind=KIND, **RESOURCE_DEFAULT_ARGS)
+ KubeVirtRawModule.find_supported_resource.return_value = openshiftdynamic.Resource(**resource_args)
+ openshiftdynamic.Resource.get.return_value = None # Object doesn't exist in the cluster
+
+ # Run code:
+ with pytest.raises(AnsibleExitJson) as result:
+ mymodule.KubeVirtVM().execute_module()
+
+ # Verify result:
+ assert not result.value['kubevirt_vm']
+ assert result.value['method'] == 'delete'
+ # Note: nothing actually gets deleted, as we mock that there's not object in the cluster present,
+ # so if the method changes to something other than 'delete' at some point, that's fine
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/linode/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/linode/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/linode/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/linode/conftest.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/linode/conftest.py
new file mode 100644
index 00000000..6ce13a72
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/linode/conftest.py
@@ -0,0 +1,85 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+
+@pytest.fixture
+def api_key(monkeypatch):
+ monkeypatch.setenv('LINODE_API_KEY', 'foobar')
+
+
+@pytest.fixture
+def auth(monkeypatch):
+ def patched_test_echo(dummy):
+ return []
+ monkeypatch.setattr('linode.api.Api.test_echo', patched_test_echo)
+
+
+@pytest.fixture
+def access_token(monkeypatch):
+ monkeypatch.setenv('LINODE_ACCESS_TOKEN', 'barfoo')
+
+
+@pytest.fixture
+def no_access_token_in_env(monkeypatch):
+ try:
+ monkeypatch.delenv('LINODE_ACCESS_TOKEN')
+ except KeyError:
+ pass
+
+
+@pytest.fixture
+def default_args():
+ return {'state': 'present', 'label': 'foo'}
+
+
+@pytest.fixture
+def mock_linode():
+ class Linode():
+ def delete(self, *args, **kwargs):
+ pass
+
+ @property
+ def _raw_json(self):
+ return {
+ "alerts": {
+ "cpu": 90,
+ "io": 10000,
+ "network_in": 10,
+ "network_out": 10,
+ "transfer_quota": 80
+ },
+ "backups": {
+ "enabled": False,
+ "schedule": {
+ "day": None,
+ "window": None,
+ }
+ },
+ "created": "2018-09-26T08:12:33",
+ "group": "Foobar Group",
+ "hypervisor": "kvm",
+ "id": 10480444,
+ "image": "linode/centos7",
+ "ipv4": [
+ "130.132.285.233"
+ ],
+ "ipv6": "2a82:7e00::h03c:46ff:fe04:5cd2/64",
+ "label": "lin-foo",
+ "region": "eu-west",
+ "specs": {
+ "disk": 25600,
+ "memory": 1024,
+ "transfer": 1000,
+ "vcpus": 1
+ },
+ "status": "running",
+ "tags": [],
+ "type": "g6-nanode-1",
+ "updated": "2018-09-26T10:10:14",
+ "watchdog_enabled": True
+ }
+ return Linode()
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/linode/test_linode.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/linode/test_linode.py
new file mode 100644
index 00000000..51e9b805
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/linode/test_linode.py
@@ -0,0 +1,16 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible_collections.community.general.plugins.modules.cloud.linode import linode
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args
+
+if not linode.HAS_LINODE:
+ pytestmark = pytest.mark.skip('test_linode.py requires the `linode-python` module')
+
+
+def test_name_is_a_required_parameter(api_key, auth):
+ with pytest.raises(SystemExit):
+ set_module_args({})
+ linode.main()
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/linode/test_linode_v4.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/linode/test_linode_v4.py
new file mode 100644
index 00000000..fece3414
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/linode/test_linode_v4.py
@@ -0,0 +1,324 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import sys
+
+import pytest
+
+linode_apiv4 = pytest.importorskip('linode_api4')
+mandatory_py_version = pytest.mark.skipif(
+ sys.version_info < (2, 7),
+ reason='The linode_api4 dependency requires python2.7 or higher'
+)
+
+from linode_api4.errors import ApiError as LinodeApiError
+from linode_api4 import LinodeClient
+
+from ansible_collections.community.general.plugins.modules.cloud.linode import linode_v4
+from ansible_collections.community.general.plugins.module_utils.linode import get_user_agent
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args
+from ansible_collections.community.general.tests.unit.compat import mock
+
+
+def test_mandatory_state_is_validated(capfd):
+ with pytest.raises(SystemExit):
+ set_module_args({'label': 'foo'})
+ linode_v4.initialise_module()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+
+ assert all(txt in results['msg'] for txt in ('state', 'required'))
+ assert results['failed'] is True
+
+
+def test_mandatory_label_is_validated(capfd):
+ with pytest.raises(SystemExit):
+ set_module_args({'state': 'present'})
+ linode_v4.initialise_module()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+
+ assert all(txt in results['msg'] for txt in ('label', 'required'))
+ assert results['failed'] is True
+
+
+def test_mandatory_access_token_is_validated(default_args,
+ no_access_token_in_env,
+ capfd):
+ with pytest.raises(SystemExit):
+ set_module_args(default_args)
+ linode_v4.initialise_module()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+
+ assert results['failed'] is True
+ assert all(txt in results['msg'] for txt in (
+ 'missing',
+ 'required',
+ 'access_token',
+ ))
+
+
+def test_mandatory_access_token_passed_in_env(default_args,
+ access_token):
+ set_module_args(default_args)
+
+ try:
+ module = linode_v4.initialise_module()
+ except SystemExit:
+ pytest.fail("'access_token' is passed in environment")
+
+ now_set_token = module.params['access_token']
+ assert now_set_token == os.environ['LINODE_ACCESS_TOKEN']
+
+
+def test_mandatory_access_token_passed_in_as_parameter(default_args,
+ no_access_token_in_env):
+ default_args.update({'access_token': 'foo'})
+ set_module_args(default_args)
+
+ try:
+ module = linode_v4.initialise_module()
+ except SystemExit:
+ pytest.fail("'access_token' is passed in as parameter")
+
+ assert module.params['access_token'] == 'foo'
+
+
+def test_instance_by_label_cannot_authenticate(capfd, access_token,
+ default_args):
+ set_module_args(default_args)
+ module = linode_v4.initialise_module()
+ client = LinodeClient(module.params['access_token'])
+
+ target = 'linode_api4.linode_client.LinodeGroup.instances'
+ with mock.patch(target, side_effect=LinodeApiError('foo')):
+ with pytest.raises(SystemExit):
+ linode_v4.maybe_instance_from_label(module, client)
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+
+ assert results['failed'] is True
+ assert 'Unable to query the Linode API' in results['msg']
+
+
+def test_no_instances_found_with_label_gives_none(default_args,
+ access_token):
+ set_module_args(default_args)
+ module = linode_v4.initialise_module()
+ client = LinodeClient(module.params['access_token'])
+
+ target = 'linode_api4.linode_client.LinodeGroup.instances'
+ with mock.patch(target, return_value=[]):
+ result = linode_v4.maybe_instance_from_label(module, client)
+
+ assert result is None
+
+
+def test_optional_region_is_validated(default_args, capfd, access_token):
+ default_args.update({'type': 'foo', 'image': 'bar'})
+ set_module_args(default_args)
+
+ with pytest.raises(SystemExit):
+ linode_v4.initialise_module()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+
+ assert results['failed'] is True
+ assert all(txt in results['msg'] for txt in (
+ 'required',
+ 'together',
+ 'region'
+ ))
+
+
+def test_optional_type_is_validated(default_args, capfd, access_token):
+ default_args.update({'region': 'foo', 'image': 'bar'})
+ set_module_args(default_args)
+
+ with pytest.raises(SystemExit):
+ linode_v4.initialise_module()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+
+ assert results['failed'] is True
+ assert all(txt in results['msg'] for txt in (
+ 'required',
+ 'together',
+ 'type'
+ ))
+
+
+def test_optional_image_is_validated(default_args, capfd, access_token):
+ default_args.update({'type': 'foo', 'region': 'bar'})
+ set_module_args(default_args)
+
+ with pytest.raises(SystemExit):
+ linode_v4.initialise_module()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+
+ assert results['failed'] is True
+ assert all(txt in results['msg'] for txt in (
+ 'required',
+ 'together',
+ 'image'
+ ))
+
+
+def test_instance_already_created(default_args,
+ mock_linode,
+ capfd,
+ access_token):
+ default_args.update({
+ 'type': 'foo',
+ 'region': 'bar',
+ 'image': 'baz'
+ })
+ set_module_args(default_args)
+
+ target = 'linode_api4.linode_client.LinodeGroup.instances'
+ with mock.patch(target, return_value=[mock_linode]):
+ with pytest.raises(SystemExit) as sys_exit_exc:
+ linode_v4.main()
+
+ assert sys_exit_exc.value.code == 0
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+
+ assert results['changed'] is False
+ assert 'root_password' not in results['instance']
+ assert (
+ results['instance']['label'] ==
+ mock_linode._raw_json['label']
+ )
+
+
+def test_instance_to_be_created_without_root_pass(default_args,
+ mock_linode,
+ capfd,
+ access_token):
+ default_args.update({
+ 'type': 'foo',
+ 'region': 'bar',
+ 'image': 'baz'
+ })
+ set_module_args(default_args)
+
+ target = 'linode_api4.linode_client.LinodeGroup.instances'
+ with mock.patch(target, return_value=[]):
+ with pytest.raises(SystemExit) as sys_exit_exc:
+ target = 'linode_api4.linode_client.LinodeGroup.instance_create'
+ with mock.patch(target, return_value=(mock_linode, 'passw0rd')):
+ linode_v4.main()
+
+ assert sys_exit_exc.value.code == 0
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+
+ assert results['changed'] is True
+ assert (
+ results['instance']['label'] ==
+ mock_linode._raw_json['label']
+ )
+ assert results['instance']['root_pass'] == 'passw0rd'
+
+
+def test_instance_to_be_created_with_root_pass(default_args,
+ mock_linode,
+ capfd,
+ access_token):
+ default_args.update({
+ 'type': 'foo',
+ 'region': 'bar',
+ 'image': 'baz',
+ 'root_pass': 'passw0rd',
+ })
+ set_module_args(default_args)
+
+ target = 'linode_api4.linode_client.LinodeGroup.instances'
+ with mock.patch(target, return_value=[]):
+ with pytest.raises(SystemExit) as sys_exit_exc:
+ target = 'linode_api4.linode_client.LinodeGroup.instance_create'
+ with mock.patch(target, return_value=mock_linode):
+ linode_v4.main()
+
+ assert sys_exit_exc.value.code == 0
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+
+ assert results['changed'] is True
+ assert (
+ results['instance']['label'] ==
+ mock_linode._raw_json['label']
+ )
+ assert 'root_pass' not in results['instance']
+
+
+def test_instance_to_be_deleted(default_args,
+ mock_linode,
+ capfd,
+ access_token):
+ default_args.update({'state': 'absent'})
+ set_module_args(default_args)
+
+ target = 'linode_api4.linode_client.LinodeGroup.instances'
+ with mock.patch(target, return_value=[mock_linode]):
+ with pytest.raises(SystemExit) as sys_exit_exc:
+ linode_v4.main()
+
+ assert sys_exit_exc.value.code == 0
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+
+ assert results['changed'] is True
+ assert (
+ results['instance']['label'] ==
+ mock_linode._raw_json['label']
+ )
+
+
+def test_instance_already_deleted_no_change(default_args,
+ mock_linode,
+ capfd,
+ access_token):
+ default_args.update({'state': 'absent'})
+ set_module_args(default_args)
+
+ target = 'linode_api4.linode_client.LinodeGroup.instances'
+ with mock.patch(target, return_value=[]):
+ with pytest.raises(SystemExit) as sys_exit_exc:
+ linode_v4.main()
+
+ assert sys_exit_exc.value.code == 0
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+
+ assert results['changed'] is False
+ assert results['instance'] == {}
+
+
+def test_user_agent_created_properly():
+ try:
+ from ansible.module_utils.ansible_release import (
+ __version__ as ansible_version
+ )
+ except ImportError:
+ ansible_version = 'unknown'
+
+ expected_user_agent = 'Ansible-linode_v4_module/%s' % ansible_version
+ assert expected_user_agent == get_user_agent('linode_v4_module')
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/misc/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/misc/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/misc/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/misc/test_terraform.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/misc/test_terraform.py
new file mode 100644
index 00000000..898f99f2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/misc/test_terraform.py
@@ -0,0 +1,22 @@
+# Copyright: (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+import pytest
+
+from ansible_collections.community.general.plugins.modules.cloud.misc import terraform
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args
+
+
+def test_terraform_without_argument(capfd):
+ set_module_args({})
+ with pytest.raises(SystemExit) as results:
+ terraform.main()
+
+ out, err = capfd.readouterr()
+ assert not err
+ assert json.loads(out)['failed']
+ assert 'project_path' in json.loads(out)['msg']
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/xenserver/FakeAnsibleModule.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/xenserver/FakeAnsibleModule.py
new file mode 100644
index 00000000..c443dfdb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/xenserver/FakeAnsibleModule.py
@@ -0,0 +1,33 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2019, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class AnsibleModuleException(Exception):
+ def __init__(self, *args, **kwargs):
+ self.args = args
+ self.kwargs = kwargs
+
+
+class ExitJsonException(AnsibleModuleException):
+ pass
+
+
+class FailJsonException(AnsibleModuleException):
+ pass
+
+
+class FakeAnsibleModule:
+ def __init__(self, params=None, check_mode=False):
+ self.params = params
+ self.check_mode = check_mode
+
+ def exit_json(self, *args, **kwargs):
+ raise ExitJsonException(*args, **kwargs)
+
+ def fail_json(self, *args, **kwargs):
+ raise FailJsonException(*args, **kwargs)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/xenserver/FakeXenAPI.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/xenserver/FakeXenAPI.py
new file mode 100644
index 00000000..24933175
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/xenserver/FakeXenAPI.py
@@ -0,0 +1,69 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2019, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+FAKE_API_VERSION = "1.1"
+
+
+class Failure(Exception):
+ def __init__(self, details):
+ self.details = details
+
+ def __str__(self):
+ return str(self.details)
+
+
+class Session(object):
+ def __init__(self, uri, transport=None, encoding=None, verbose=0,
+ allow_none=1, ignore_ssl=False):
+
+ self.transport = transport
+ self._session = None
+ self.last_login_method = None
+ self.last_login_params = None
+ self.API_version = FAKE_API_VERSION
+
+ def _get_api_version(self):
+ return FAKE_API_VERSION
+
+ def _login(self, method, params):
+ self._session = "OpaqueRef:fake-xenapi-session-ref"
+ self.last_login_method = method
+ self.last_login_params = params
+ self.API_version = self._get_api_version()
+
+ def _logout(self):
+ self._session = None
+ self.last_login_method = None
+ self.last_login_params = None
+ self.API_version = FAKE_API_VERSION
+
+ def xenapi_request(self, methodname, params):
+ if methodname.startswith('login'):
+ self._login(methodname, params)
+ return None
+ elif methodname == 'logout' or methodname == 'session.logout':
+ self._logout()
+ return None
+ else:
+ # Should be patched with mocker.patch().
+ return None
+
+ def __getattr__(self, name):
+ if name == 'handle':
+ return self._session
+ elif name == 'xenapi':
+ # Should be patched with mocker.patch().
+ return None
+ elif name.startswith('login') or name.startswith('slave_local'):
+ return lambda *params: self._login(name, params)
+ elif name == 'logout':
+ return self._logout
+
+
+def xapi_local():
+ return Session("http://_var_lib_xcp_xapi/")
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/xenserver/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/xenserver/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/xenserver/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/xenserver/common.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/xenserver/common.py
new file mode 100644
index 00000000..9d6ff0ae
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/xenserver/common.py
@@ -0,0 +1,11 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2019, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def fake_xenapi_ref(xenapi_class):
+ return "OpaqueRef:fake-xenapi-%s-ref" % xenapi_class
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/xenserver/conftest.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/xenserver/conftest.py
new file mode 100644
index 00000000..d2bfcd0e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/xenserver/conftest.py
@@ -0,0 +1,75 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2019, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import sys
+import importlib
+import pytest
+
+from .FakeAnsibleModule import FakeAnsibleModule
+
+
+@pytest.fixture
+def fake_ansible_module(request):
+ """Returns fake AnsibleModule with fake module params."""
+ if hasattr(request, 'param'):
+ return FakeAnsibleModule(request.param)
+ else:
+ params = {
+ "hostname": "somehost",
+ "username": "someuser",
+ "password": "somepwd",
+ "validate_certs": True,
+ }
+
+ return FakeAnsibleModule(params)
+
+
+@pytest.fixture(autouse=True)
+def XenAPI():
+ """Imports and returns fake XenAPI module."""
+
+ # Import of fake XenAPI module is wrapped by fixture so that it does not
+ # affect other unit tests which could potentialy also use XenAPI module.
+
+ # First we use importlib.import_module() to import the module and assign
+ # it to a local symbol.
+ fake_xenapi = importlib.import_module('ansible_collections.community.general.tests.unit.plugins.modules.cloud.xenserver.FakeXenAPI')
+
+ # Now we populate Python module cache with imported fake module using the
+ # original module name (XenAPI). That way, any 'import XenAPI' statement
+ # will just load already imported fake module from the cache.
+ sys.modules['XenAPI'] = fake_xenapi
+
+ return fake_xenapi
+
+
+@pytest.fixture
+def xenserver_guest_info(XenAPI):
+ """Imports and returns xenserver_guest_info module."""
+
+ # Since we are wrapping fake XenAPI module inside a fixture, all modules
+ # that depend on it have to be imported inside a test function. To make
+ # this easier to handle and remove some code repetition, we wrap the import
+ # of xenserver_guest_info module with a fixture.
+ from ansible_collections.community.general.plugins.modules.cloud.xenserver import xenserver_guest_info
+
+ return xenserver_guest_info
+
+
+@pytest.fixture
+def xenserver_guest_powerstate(XenAPI):
+ """Imports and returns xenserver_guest_powerstate module."""
+
+ # Since we are wrapping fake XenAPI module inside a fixture, all modules
+ # that depend on it have to be imported inside a test function. To make
+ # this easier to handle and remove some code repetition, we wrap the import
+ # of xenserver_guest_powerstate module with a fixture.
+ from ansible_collections.community.general.plugins.modules.cloud.xenserver import xenserver_guest_powerstate
+
+ return xenserver_guest_powerstate
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/xenserver/test_xenserver_guest_info.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/xenserver/test_xenserver_guest_info.py
new file mode 100644
index 00000000..16f209c2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/xenserver/test_xenserver_guest_info.py
@@ -0,0 +1,77 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2019, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import json
+import pytest
+
+from .common import fake_xenapi_ref
+
+pytestmark = pytest.mark.usefixtures('patch_ansible_module')
+
+
+testcase_module_params = {
+ "params": [
+ {
+ "hostname": "somehost",
+ "username": "someuser",
+ "password": "somepwd",
+ "name": "somevmname",
+ },
+ {
+ "hostname": "somehost",
+ "username": "someuser",
+ "password": "somepwd",
+ "uuid": "somevmuuid",
+ },
+ {
+ "hostname": "somehost",
+ "username": "someuser",
+ "password": "somepwd",
+ "name": "somevmname",
+ "uuid": "somevmuuid",
+ },
+ ],
+ "ids": [
+ "name",
+ "uuid",
+ "name+uuid",
+ ],
+}
+
+
+@pytest.mark.parametrize('patch_ansible_module', testcase_module_params['params'], ids=testcase_module_params['ids'], indirect=True)
+def test_xenserver_guest_info(mocker, capfd, XenAPI, xenserver_guest_info):
+ """
+ Tests regular module invocation including parsing and propagation of
+ module params and module output.
+ """
+ fake_vm_facts = {"fake-vm-fact": True}
+
+ mocker.patch('ansible_collections.community.general.plugins.modules.cloud.xenserver.xenserver_guest_info.get_object_ref', return_value=None)
+ mocker.patch('ansible_collections.community.general.plugins.modules.cloud.xenserver.xenserver_guest_info.gather_vm_params', return_value=None)
+ mocker.patch('ansible_collections.community.general.plugins.modules.cloud.xenserver.xenserver_guest_info.gather_vm_facts', return_value=fake_vm_facts)
+
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
+
+ mocked_returns = {
+ "pool.get_all.return_value": [fake_xenapi_ref('pool')],
+ "pool.get_default_SR.return_value": fake_xenapi_ref('SR'),
+ }
+
+ mocked_xenapi.configure_mock(**mocked_returns)
+
+ mocker.patch('ansible_collections.community.general.plugins.module_utils.xenserver.get_xenserver_version', return_value=[7, 2, 0])
+
+ with pytest.raises(SystemExit):
+ xenserver_guest_info.main()
+
+ out, err = capfd.readouterr()
+ result = json.loads(out)
+
+ assert result['instance'] == fake_vm_facts
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/xenserver/test_xenserver_guest_powerstate.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/xenserver/test_xenserver_guest_powerstate.py
new file mode 100644
index 00000000..ae8735c3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/cloud/xenserver/test_xenserver_guest_powerstate.py
@@ -0,0 +1,297 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright: (c) 2019, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import pytest
+
+from .common import fake_xenapi_ref
+
+
+testcase_set_powerstate = {
+ "params": [
+ (False, "someoldstate"),
+ (True, "somenewstate"),
+ ],
+ "ids": [
+ "state-same",
+ "state-changed",
+ ],
+}
+
+testcase_module_params_state_present = {
+ "params": [
+ {
+ "hostname": "somehost",
+ "username": "someuser",
+ "password": "somepwd",
+ "name": "somevmname",
+ },
+ {
+ "hostname": "somehost",
+ "username": "someuser",
+ "password": "somepwd",
+ "name": "somevmname",
+ "state": "present",
+ },
+ ],
+ "ids": [
+ "present-implicit",
+ "present-explicit",
+ ],
+}
+
+testcase_module_params_state_other = {
+ "params": [
+ {
+ "hostname": "somehost",
+ "username": "someuser",
+ "password": "somepwd",
+ "name": "somevmname",
+ "state": "powered-on",
+ },
+ {
+ "hostname": "somehost",
+ "username": "someuser",
+ "password": "somepwd",
+ "name": "somevmname",
+ "state": "powered-off",
+ },
+ {
+ "hostname": "somehost",
+ "username": "someuser",
+ "password": "somepwd",
+ "name": "somevmname",
+ "state": "restarted",
+ },
+ {
+ "hostname": "somehost",
+ "username": "someuser",
+ "password": "somepwd",
+ "name": "somevmname",
+ "state": "shutdown-guest",
+ },
+ {
+ "hostname": "somehost",
+ "username": "someuser",
+ "password": "somepwd",
+ "name": "somevmname",
+ "state": "reboot-guest",
+ },
+ {
+ "hostname": "somehost",
+ "username": "someuser",
+ "password": "somepwd",
+ "name": "somevmname",
+ "state": "suspended",
+ },
+ ],
+ "ids": [
+ "powered-on",
+ "powered-off",
+ "restarted",
+ "shutdown-guest",
+ "reboot-guest",
+ "suspended",
+ ],
+}
+
+testcase_module_params_wait = {
+ "params": [
+ {
+ "hostname": "somehost",
+ "username": "someuser",
+ "password": "somepwd",
+ "name": "somevmname",
+ "state": "present",
+ "wait_for_ip_address": "yes",
+ },
+ {
+ "hostname": "somehost",
+ "username": "someuser",
+ "password": "somepwd",
+ "name": "somevmname",
+ "state": "powered-on",
+ "wait_for_ip_address": "yes",
+ },
+ ],
+ "ids": [
+ "wait-present",
+ "wait-other",
+ ],
+}
+
+
+@pytest.mark.parametrize('power_state', testcase_set_powerstate['params'], ids=testcase_set_powerstate['ids'])
+def test_xenserver_guest_powerstate_set_power_state(mocker, fake_ansible_module, XenAPI, xenserver_guest_powerstate, power_state):
+ """Tests power state change handling."""
+ mocker.patch('ansible_collections.community.general.plugins.modules.cloud.xenserver.xenserver_guest_powerstate.get_object_ref',
+ return_value=fake_xenapi_ref('VM'))
+ mocker.patch('ansible_collections.community.general.plugins.modules.cloud.xenserver.xenserver_guest_powerstate.gather_vm_params',
+ return_value={"power_state": "Someoldstate"})
+ mocked_set_vm_power_state = mocker.patch(
+ 'ansible_collections.community.general.plugins.modules.cloud.xenserver.xenserver_guest_powerstate.set_vm_power_state',
+ return_value=power_state)
+
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
+
+ mocked_returns = {
+ "pool.get_all.return_value": [fake_xenapi_ref('pool')],
+ "pool.get_default_SR.return_value": fake_xenapi_ref('SR'),
+ }
+
+ mocked_xenapi.configure_mock(**mocked_returns)
+
+ mocker.patch('ansible_collections.community.general.plugins.module_utils.xenserver.get_xenserver_version', return_value=[7, 2, 0])
+
+ fake_ansible_module.params.update({
+ "name": "somename",
+ "uuid": "someuuid",
+ "state_change_timeout": 1,
+ })
+
+ vm = xenserver_guest_powerstate.XenServerVM(fake_ansible_module)
+ state_changed = vm.set_power_state(None)
+
+ mocked_set_vm_power_state.assert_called_once_with(fake_ansible_module, fake_xenapi_ref('VM'), None, 1)
+ assert state_changed == power_state[0]
+ assert vm.vm_params['power_state'] == power_state[1].capitalize()
+
+
+@pytest.mark.parametrize('patch_ansible_module',
+ testcase_module_params_state_present['params'],
+ ids=testcase_module_params_state_present['ids'],
+ indirect=True)
+def test_xenserver_guest_powerstate_present(mocker, patch_ansible_module, capfd, XenAPI, xenserver_guest_powerstate):
+ """
+ Tests regular module invocation including parsing and propagation of
+ module params and module output when state is set to present.
+ """
+ fake_vm_facts = {"fake-vm-fact": True}
+
+ mocker.patch('ansible_collections.community.general.plugins.modules.cloud.xenserver.xenserver_guest_powerstate.get_object_ref',
+ return_value=fake_xenapi_ref('VM'))
+ mocker.patch('ansible_collections.community.general.plugins.modules.cloud.xenserver.xenserver_guest_powerstate.gather_vm_params', return_value={})
+ mocker.patch('ansible_collections.community.general.plugins.modules.cloud.xenserver.xenserver_guest_powerstate.gather_vm_facts',
+ return_value=fake_vm_facts)
+ mocked_set_vm_power_state = mocker.patch(
+ 'ansible_collections.community.general.plugins.modules.cloud.xenserver.xenserver_guest_powerstate.set_vm_power_state',
+ return_value=(True, "somenewstate"))
+ mocked_wait_for_vm_ip_address = mocker.patch(
+ 'ansible_collections.community.general.plugins.modules.cloud.xenserver.xenserver_guest_powerstate.wait_for_vm_ip_address',
+ return_value={})
+
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
+
+ mocked_returns = {
+ "pool.get_all.return_value": [fake_xenapi_ref('pool')],
+ "pool.get_default_SR.return_value": fake_xenapi_ref('SR'),
+ }
+
+ mocked_xenapi.configure_mock(**mocked_returns)
+
+ mocker.patch('ansible_collections.community.general.plugins.module_utils.xenserver.get_xenserver_version', return_value=[7, 2, 0])
+
+ with pytest.raises(SystemExit):
+ xenserver_guest_powerstate.main()
+
+ out, err = capfd.readouterr()
+ result = json.loads(out)
+
+ mocked_set_vm_power_state.assert_not_called()
+ mocked_wait_for_vm_ip_address.assert_not_called()
+ assert result['changed'] is False
+ assert result['instance'] == fake_vm_facts
+
+
+@pytest.mark.parametrize('patch_ansible_module',
+ testcase_module_params_state_other['params'],
+ ids=testcase_module_params_state_other['ids'],
+ indirect=True)
+def test_xenserver_guest_powerstate_other(mocker, patch_ansible_module, capfd, XenAPI, xenserver_guest_powerstate):
+ """
+ Tests regular module invocation including parsing and propagation of
+ module params and module output when state is set to other value than
+ present.
+ """
+ fake_vm_facts = {"fake-vm-fact": True}
+
+ mocker.patch('ansible_collections.community.general.plugins.modules.cloud.xenserver.xenserver_guest_powerstate.get_object_ref',
+ return_value=fake_xenapi_ref('VM'))
+ mocker.patch('ansible_collections.community.general.plugins.modules.cloud.xenserver.xenserver_guest_powerstate.gather_vm_params', return_value={})
+ mocker.patch('ansible_collections.community.general.plugins.modules.cloud.xenserver.xenserver_guest_powerstate.gather_vm_facts', return_value=fake_vm_facts)
+ mocked_set_vm_power_state = mocker.patch(
+ 'ansible_collections.community.general.plugins.modules.cloud.xenserver.xenserver_guest_powerstate.set_vm_power_state',
+ return_value=(True, "somenewstate"))
+ mocked_wait_for_vm_ip_address = mocker.patch(
+ 'ansible_collections.community.general.plugins.modules.cloud.xenserver.xenserver_guest_powerstate.wait_for_vm_ip_address',
+ return_value={})
+
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
+
+ mocked_returns = {
+ "pool.get_all.return_value": [fake_xenapi_ref('pool')],
+ "pool.get_default_SR.return_value": fake_xenapi_ref('SR'),
+ }
+
+ mocked_xenapi.configure_mock(**mocked_returns)
+
+ mocker.patch('ansible_collections.community.general.plugins.module_utils.xenserver.get_xenserver_version', return_value=[7, 2, 0])
+
+ with pytest.raises(SystemExit):
+ xenserver_guest_powerstate.main()
+
+ out, err = capfd.readouterr()
+ result = json.loads(out)
+
+ mocked_set_vm_power_state.assert_called_once()
+ mocked_wait_for_vm_ip_address.assert_not_called()
+ assert result['changed'] is True
+ assert result['instance'] == fake_vm_facts
+
+
+@pytest.mark.parametrize('patch_ansible_module',
+ testcase_module_params_wait['params'],
+ ids=testcase_module_params_wait['ids'],
+ indirect=True)
+def test_xenserver_guest_powerstate_wait(mocker, patch_ansible_module, capfd, XenAPI, xenserver_guest_powerstate):
+ """
+ Tests regular module invocation including parsing and propagation of
+ module params and module output when wait_for_ip_address option is used.
+ """
+ fake_vm_facts = {"fake-vm-fact": True}
+
+ mocker.patch('ansible_collections.community.general.plugins.modules.cloud.xenserver.xenserver_guest_powerstate.get_object_ref',
+ return_value=fake_xenapi_ref('VM'))
+ mocker.patch('ansible_collections.community.general.plugins.modules.cloud.xenserver.xenserver_guest_powerstate.gather_vm_params', return_value={})
+ mocker.patch('ansible_collections.community.general.plugins.modules.cloud.xenserver.xenserver_guest_powerstate.gather_vm_facts', return_value=fake_vm_facts)
+ mocked_set_vm_power_state = mocker.patch(
+ 'ansible_collections.community.general.plugins.modules.cloud.xenserver.xenserver_guest_powerstate.set_vm_power_state',
+ return_value=(True, "somenewstate"))
+ mocked_wait_for_vm_ip_address = mocker.patch(
+ 'ansible_collections.community.general.plugins.modules.cloud.xenserver.xenserver_guest_powerstate.wait_for_vm_ip_address',
+ return_value={})
+
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
+
+ mocked_returns = {
+ "pool.get_all.return_value": [fake_xenapi_ref('pool')],
+ "pool.get_default_SR.return_value": fake_xenapi_ref('SR'),
+ }
+
+ mocked_xenapi.configure_mock(**mocked_returns)
+
+ mocker.patch('ansible_collections.community.general.plugins.module_utils.xenserver.get_xenserver_version', return_value=[7, 2, 0])
+
+ with pytest.raises(SystemExit):
+ xenserver_guest_powerstate.main()
+
+ out, err = capfd.readouterr()
+ result = json.loads(out)
+
+ mocked_wait_for_vm_ip_address.assert_called_once()
+ assert result['instance'] == fake_vm_facts
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/conftest.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/conftest.py
new file mode 100644
index 00000000..a7d1e047
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/conftest.py
@@ -0,0 +1,31 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+import pytest
+
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_bytes
+from ansible.module_utils.common._collections_compat import MutableMapping
+
+
+@pytest.fixture
+def patch_ansible_module(request, mocker):
+ if isinstance(request.param, string_types):
+ args = request.param
+ elif isinstance(request.param, MutableMapping):
+ if 'ANSIBLE_MODULE_ARGS' not in request.param:
+ request.param = {'ANSIBLE_MODULE_ARGS': request.param}
+ if '_ansible_remote_tmp' not in request.param['ANSIBLE_MODULE_ARGS']:
+ request.param['ANSIBLE_MODULE_ARGS']['_ansible_remote_tmp'] = '/tmp'
+ if '_ansible_keep_remote_files' not in request.param['ANSIBLE_MODULE_ARGS']:
+ request.param['ANSIBLE_MODULE_ARGS']['_ansible_keep_remote_files'] = False
+ args = json.dumps(request.param)
+ else:
+ raise Exception('Malformed data to the patch_ansible_module pytest fixture')
+
+ mocker.patch('ansible.module_utils.basic._ANSIBLE_ARGS', to_bytes(args))
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/database/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/database/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/database/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/database/misc/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/database/misc/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/database/misc/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/database/misc/test_redis_info.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/database/misc/test_redis_info.py
new file mode 100644
index 00000000..4ff1efc5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/database/misc/test_redis_info.py
@@ -0,0 +1,76 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2020, Pavlo Bashynskyi (@levonet) <levonet@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat.mock import patch, MagicMock
+from ansible_collections.community.general.plugins.modules.database.misc import redis_info
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
+
+
+class FakeRedisClient(MagicMock):
+
+ def ping(self):
+ pass
+
+ def info(self):
+ return {'redis_version': '999.999.999'}
+
+
+class FakeRedisClientFail(MagicMock):
+
+ def ping(self):
+ raise Exception('Test Error')
+
+ def info(self):
+ pass
+
+
+class TestRedisInfoModule(ModuleTestCase):
+
+ def setUp(self):
+ super(TestRedisInfoModule, self).setUp()
+ redis_info.HAS_REDIS_PACKAGE = True
+ self.module = redis_info
+
+ def tearDown(self):
+ super(TestRedisInfoModule, self).tearDown()
+
+ def patch_redis_client(self, **kwds):
+ return patch('ansible_collections.community.general.plugins.modules.database.misc.redis_info.redis_client', autospec=True, **kwds)
+
+ def test_without_parameters(self):
+ """Test without parameters"""
+ with self.patch_redis_client(side_effect=FakeRedisClient) as redis_client:
+ with self.assertRaises(AnsibleExitJson) as result:
+ set_module_args({})
+ self.module.main()
+ self.assertEqual(redis_client.call_count, 1)
+ self.assertEqual(redis_client.call_args, ({'host': 'localhost', 'port': 6379, 'password': None},))
+ self.assertEqual(result.exception.args[0]['info']['redis_version'], '999.999.999')
+
+ def test_with_parameters(self):
+ """Test with all parameters"""
+ with self.patch_redis_client(side_effect=FakeRedisClient) as redis_client:
+ with self.assertRaises(AnsibleExitJson) as result:
+ set_module_args({
+ 'login_host': 'test',
+ 'login_port': 1234,
+ 'login_password': 'PASS'
+ })
+ self.module.main()
+ self.assertEqual(redis_client.call_count, 1)
+ self.assertEqual(redis_client.call_args, ({'host': 'test', 'port': 1234, 'password': 'PASS'},))
+ self.assertEqual(result.exception.args[0]['info']['redis_version'], '999.999.999')
+
+ def test_with_fail_client(self):
+ """Test failure message"""
+ with self.patch_redis_client(side_effect=FakeRedisClientFail) as redis_client:
+ with self.assertRaises(AnsibleFailJson) as result:
+ set_module_args({})
+ self.module.main()
+ self.assertEqual(redis_client.call_count, 1)
+ self.assertEqual(result.exception.args[0]['msg'], 'unable to connect to database: Test Error')
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/database/postgresql/test_postgresql_ext.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/database/postgresql/test_postgresql_ext.py
new file mode 100644
index 00000000..89417eaf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/database/postgresql/test_postgresql_ext.py
@@ -0,0 +1,35 @@
+# Copyright 2020, Andrew Klychkov @Andersson007 <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import pytest
+
+from ansible_collections.community.general.plugins.modules.database.postgresql.postgresql_ext import (
+ parse_ext_versions,
+)
+
+
+@pytest.mark.parametrize(
+ 'current,test_input,expected',
+ [
+ (
+ '2.0.0',
+ [{'version': '3.1.0dev'}, {'version': '3.1.0devnext'}, {'version': 'unpackaged'}],
+ ['3.1.0dev', '3.1.0devnext'],
+ ),
+ (
+ '2.0.0',
+ [{'version': 'unpackaged'}, {'version': '3.1.0dev'}, {'version': '3.1.0devnext'}],
+ ['3.1.0dev', '3.1.0devnext'],
+ ),
+ (
+ '2.0.1',
+ [{'version': 'unpackaged'}, {'version': '2.0.0'}, {'version': '2.1.0dev'}],
+ ['2.1.0dev'],
+ ),
+ ]
+)
+def test_parse_ext_versions(current, test_input, expected):
+ assert parse_ext_versions(current, test_input) == expected
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/messaging/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/messaging/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/messaging/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/monitoring/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/monitoring/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/monitoring/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/monitoring/test_circonus_annotation.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/monitoring/test_circonus_annotation.py
new file mode 100644
index 00000000..b64a20f8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/monitoring/test_circonus_annotation.py
@@ -0,0 +1,151 @@
+# -*- coding: utf-8 -*-
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import io
+import json
+import re
+import uuid
+from urllib3.response import HTTPResponse
+
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible.module_utils._text import to_bytes
+from ansible_collections.community.general.plugins.modules.monitoring import circonus_annotation
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
+
+
+class TestCirconusAnnotation(ModuleTestCase):
+
+ def setUp(self):
+ super(TestCirconusAnnotation, self).setUp()
+ self.module = circonus_annotation
+
+ def tearDown(self):
+ super(TestCirconusAnnotation, self).tearDown()
+
+ def test_without_required_parameters(self):
+ """Failure must occurs when all parameters are missing"""
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({})
+ self.module.main()
+
+ def test_add_annotation(self):
+ """Check that result is changed"""
+ set_module_args({
+ 'category': 'test category',
+ 'description': 'test description',
+ 'title': 'test title',
+ 'api_key': str(uuid.uuid4()),
+ })
+
+ cid = '/annotation/100000'
+
+ def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
+ data = {
+ '_cid': cid,
+ '_created': 1502146995,
+ '_last_modified': 1502146995,
+ '_last_modified_by': '/user/1000',
+ 'category': 'test category',
+ 'description': 'test description',
+ 'rel_metrics': [],
+ 'start': 1502145480,
+ 'stop': None,
+ 'title': 'test title',
+ }
+ raw = to_bytes(json.dumps(data))
+ resp = HTTPResponse(body=io.BytesIO(raw), preload_content=False)
+ resp.status = 200
+ resp.reason = 'OK'
+ resp.headers = {'X-Circonus-API-Version': '2.00'}
+ return self.build_response(request, resp)
+
+ with patch('requests.adapters.HTTPAdapter.send', autospec=True, side_effect=send) as send:
+ with self.assertRaises(AnsibleExitJson) as result:
+ self.module.main()
+ self.assertTrue(result.exception.args[0]['changed'])
+ self.assertEqual(result.exception.args[0]['annotation']['_cid'], cid)
+ self.assertEqual(send.call_count, 1)
+
+ def test_add_annotation_unicode(self):
+ """Check that result is changed.
+ Note: it seems there is a bug which prevent to create an annotation
+ with a non-ASCII category if this category already exists, in such
+ case an Internal Server Error (500) occurs."""
+ set_module_args({
+ 'category': 'new catégorÿ',
+ 'description': 'test description',
+ 'title': 'test title',
+ 'api_key': str(uuid.uuid4()),
+ })
+
+ cid = '/annotation/100000'
+
+ def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
+ data = {
+ '_cid': '/annotation/100000',
+ '_created': 1502236928,
+ '_last_modified': 1502236928,
+ '_last_modified_by': '/user/1000',
+ # use res['annotation']['category'].encode('latin1').decode('utf8')
+ 'category': u'new cat\xc3\xa9gor\xc3\xbf',
+ 'description': 'test description',
+ 'rel_metrics': [],
+ 'start': 1502236927,
+ 'stop': 1502236927,
+ 'title': 'test title',
+ }
+
+ raw = to_bytes(json.dumps(data), encoding='latin1')
+ resp = HTTPResponse(body=io.BytesIO(raw), preload_content=False)
+ resp.status = 200
+ resp.reason = 'OK'
+ resp.headers = {'X-Circonus-API-Version': '2.00'}
+ return self.build_response(request, resp)
+
+ with patch('requests.adapters.HTTPAdapter.send', autospec=True, side_effect=send) as send:
+ with self.assertRaises(AnsibleExitJson) as result:
+ self.module.main()
+ self.assertTrue(result.exception.args[0]['changed'])
+ self.assertEqual(result.exception.args[0]['annotation']['_cid'], cid)
+ self.assertEqual(send.call_count, 1)
+
+ def test_auth_failure(self):
+ """Check that an error is raised when authentication failed"""
+ set_module_args({
+ 'category': 'test category',
+ 'description': 'test description',
+ 'title': 'test title',
+ 'api_key': str(uuid.uuid4()),
+ })
+
+ cid = '/annotation/100000'
+
+ def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
+ data = {
+ '_cid': cid,
+ '_created': 1502146995,
+ '_last_modified': 1502146995,
+ '_last_modified_by': '/user/1000',
+ 'category': 'test category',
+ 'description': 'test description',
+ 'rel_metrics': [],
+ 'start': 1502145480,
+ 'stop': None,
+ 'title': 'test title',
+ }
+ raw = to_bytes(json.dumps(data))
+ resp = HTTPResponse(body=io.BytesIO(raw), preload_content=False)
+ resp.status = 403
+ resp.reason = 'Forbidden'
+ resp.headers = {'X-Circonus-API-Version': '2.00'}
+ return self.build_response(request, resp)
+
+ with patch('requests.adapters.HTTPAdapter.send', autospec=True, side_effect=send) as send:
+ with self.assertRaises(AnsibleFailJson) as result:
+ self.module.main()
+ self.assertTrue(result.exception.args[0]['failed'])
+ self.assertTrue(re.match(r'\b403\b', result.exception.args[0]['reason']))
+ self.assertEqual(send.call_count, 1)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/monitoring/test_icinga2_feature.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/monitoring/test_icinga2_feature.py
new file mode 100644
index 00000000..52398a84
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/monitoring/test_icinga2_feature.py
@@ -0,0 +1,99 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018, Ansible Project
+# Copyright (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.plugins.modules.monitoring import icinga2_feature
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible.module_utils import basic
+
+
+def get_bin_path(*args, **kwargs):
+ """Function to return path of icinga2 binary."""
+ return "/bin/icinga2"
+
+
+class TestIcinga2Feature(ModuleTestCase):
+ """Main class for testing icinga2_feature module."""
+
+ def setUp(self):
+ """Setup."""
+ super(TestIcinga2Feature, self).setUp()
+ self.module = icinga2_feature
+ self.mock_get_bin_path = patch.object(basic.AnsibleModule, 'get_bin_path', get_bin_path)
+ self.mock_get_bin_path.start()
+ self.addCleanup(self.mock_get_bin_path.stop) # ensure that the patching is 'undone'
+
+ def tearDown(self):
+ """Teardown."""
+ super(TestIcinga2Feature, self).tearDown()
+
+ def test_without_required_parameters(self):
+ """Failure must occurs when all parameters are missing."""
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({})
+ self.module.main()
+
+ def test_enable_feature(self):
+ """Check that result is changed."""
+ set_module_args({
+ 'name': 'api',
+ })
+ with patch.object(basic.AnsibleModule, 'run_command') as run_command:
+ run_command.return_value = 0, '', '' # successful execution, no output
+ with self.assertRaises(AnsibleExitJson) as result:
+ icinga2_feature.main()
+ self.assertTrue(result.exception.args[0]['changed'])
+
+ self.assertEqual(run_command.call_count, 2)
+ self.assertEqual(run_command.call_args[0][0][-1], 'api')
+
+ def test_enable_feature_with_check_mode(self):
+ """Check that result is changed in check mode."""
+ set_module_args({
+ 'name': 'api',
+ '_ansible_check_mode': True,
+ })
+ with patch.object(basic.AnsibleModule, 'run_command') as run_command:
+ run_command.return_value = 0, '', '' # successful execution, no output
+ with self.assertRaises(AnsibleExitJson) as result:
+ icinga2_feature.main()
+ self.assertTrue(result.exception.args[0]['changed'])
+
+ self.assertEqual(run_command.call_count, 1)
+
+ def test_disable_feature(self):
+ """Check that result is changed."""
+ set_module_args({
+ 'name': 'api',
+ 'state': 'absent'
+ })
+ with patch.object(basic.AnsibleModule, 'run_command') as run_command:
+ run_command.return_value = 0, '', '' # successful execution, no output
+ with self.assertRaises(AnsibleExitJson) as result:
+ icinga2_feature.main()
+ self.assertTrue(result.exception.args[0]['changed'])
+
+ self.assertEqual(run_command.call_count, 2)
+ self.assertEqual(run_command.call_args[0][0][-1], 'api')
+
+ def test_disable_feature_with_check_mode(self):
+ """Check that result is changed in check mode."""
+ set_module_args({
+ 'name': 'api',
+ 'state': 'absent',
+ '_ansible_check_mode': True,
+ })
+ with patch.object(basic.AnsibleModule, 'run_command') as run_command:
+ run_command.return_value = 0, '', '' # successful execution, no output
+ with self.assertRaises(AnsibleExitJson) as result:
+ icinga2_feature.main()
+ self.assertTrue(result.exception.args[0]['changed'])
+
+ self.assertEqual(run_command.call_count, 1)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/monitoring/test_monit.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/monitoring/test_monit.py
new file mode 100644
index 00000000..1d30812e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/monitoring/test_monit.py
@@ -0,0 +1,157 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import mock
+import pytest
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.plugins.modules.monitoring import monit
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson
+
+
+TEST_OUTPUT = """
+%s '%s'
+ status %s
+ monitoring status Not monitored
+ monitoring mode active
+"""
+
+
+class MonitTest(unittest.TestCase):
+ def setUp(self):
+ self.module = mock.MagicMock()
+ self.module.exit_json.side_effect = AnsibleExitJson
+ self.module.fail_json.side_effect = AnsibleFailJson
+ self.monit = monit.Monit(self.module, 'monit', 'processX', 1)
+ self.monit._status_change_retry_count = 1
+ mock_sleep = mock.patch('time.sleep')
+ mock_sleep.start()
+ self.addCleanup(mock_sleep.stop)
+
+ def patch_status(self, side_effect):
+ if not isinstance(side_effect, list):
+ side_effect = [side_effect]
+ return mock.patch.object(self.monit, 'get_status', side_effect=side_effect)
+
+ def test_change_state_success(self):
+ with self.patch_status([monit.Status.OK, monit.Status.NOT_MONITORED]):
+ with self.assertRaises(AnsibleExitJson):
+ self.monit.stop()
+ self.module.fail_json.assert_not_called()
+ self.module.run_command.assert_called_with('monit stop processX', check_rc=True)
+
+ def test_change_state_fail(self):
+ with self.patch_status([monit.Status.OK] * 3):
+ with self.assertRaises(AnsibleFailJson):
+ self.monit.stop()
+
+ def test_reload_fail(self):
+ self.module.run_command.return_value = (1, 'stdout', 'stderr')
+ with self.assertRaises(AnsibleFailJson):
+ self.monit.reload()
+
+ def test_reload(self):
+ self.module.run_command.return_value = (0, '', '')
+ with self.patch_status(monit.Status.OK):
+ with self.assertRaises(AnsibleExitJson):
+ self.monit.reload()
+
+ def test_wait_for_status_to_stop_pending(self):
+ status = [
+ monit.Status.MISSING,
+ monit.Status.DOES_NOT_EXIST,
+ monit.Status.INITIALIZING,
+ monit.Status.OK.pending(),
+ monit.Status.OK
+ ]
+ with self.patch_status(status) as get_status:
+ self.monit.wait_for_monit_to_stop_pending()
+ self.assertEqual(get_status.call_count, len(status))
+
+ def test_wait_for_status_change(self):
+ with self.patch_status([monit.Status.NOT_MONITORED, monit.Status.OK]) as get_status:
+ self.monit.wait_for_status_change(monit.Status.NOT_MONITORED)
+ self.assertEqual(get_status.call_count, 2)
+
+ def test_wait_for_status_change_fail(self):
+ with self.patch_status([monit.Status.OK] * 3):
+ with self.assertRaises(AnsibleFailJson):
+ self.monit.wait_for_status_change(monit.Status.OK)
+
+ def test_monitor(self):
+ with self.patch_status([monit.Status.NOT_MONITORED, monit.Status.OK.pending(), monit.Status.OK]):
+ with self.assertRaises(AnsibleExitJson):
+ self.monit.monitor()
+
+ def test_monitor_fail(self):
+ with self.patch_status([monit.Status.NOT_MONITORED] * 3):
+ with self.assertRaises(AnsibleFailJson):
+ self.monit.monitor()
+
+ def test_timeout(self):
+ self.monit.timeout = 0
+ with self.patch_status(monit.Status.NOT_MONITORED.pending()):
+ with self.assertRaises(AnsibleFailJson):
+ self.monit.wait_for_monit_to_stop_pending()
+
+
+@pytest.mark.parametrize('status_name', [name for name in monit.StatusValue.ALL_STATUS])
+def test_status_value(status_name):
+ value = getattr(monit.StatusValue, status_name.upper())
+ status = monit.StatusValue(value)
+ assert getattr(status, 'is_%s' % status_name)
+ assert not all(getattr(status, 'is_%s' % name) for name in monit.StatusValue.ALL_STATUS if name != status_name)
+
+
+BASIC_OUTPUT_CASES = [
+ (TEST_OUTPUT % ('Process', 'processX', name), getattr(monit.Status, name.upper()))
+ for name in monit.StatusValue.ALL_STATUS
+]
+
+
+@pytest.mark.parametrize('output, expected', BASIC_OUTPUT_CASES + [
+ ('', monit.Status.MISSING),
+ (TEST_OUTPUT % ('Process', 'processY', 'OK'), monit.Status.MISSING),
+ (TEST_OUTPUT % ('Process', 'processX', 'Not Monitored - start pending'), monit.Status.OK),
+ (TEST_OUTPUT % ('Process', 'processX', 'Monitored - stop pending'), monit.Status.NOT_MONITORED),
+ (TEST_OUTPUT % ('Process', 'processX', 'Monitored - restart pending'), monit.Status.OK),
+ (TEST_OUTPUT % ('Process', 'processX', 'Not Monitored - monitor pending'), monit.Status.OK),
+ (TEST_OUTPUT % ('Process', 'processX', 'Does not exist'), monit.Status.DOES_NOT_EXIST),
+ (TEST_OUTPUT % ('Process', 'processX', 'Not monitored'), monit.Status.NOT_MONITORED),
+ (TEST_OUTPUT % ('Process', 'processX', 'Running'), monit.Status.OK),
+ (TEST_OUTPUT % ('Process', 'processX', 'Execution failed | Does not exist'), monit.Status.EXECUTION_FAILED),
+])
+def test_parse_status(output, expected):
+ status = monit.Monit(None, '', 'processX', 0)._parse_status(output, '')
+ assert status == expected
+
+
+@pytest.mark.parametrize('output, expected', BASIC_OUTPUT_CASES + [
+ (TEST_OUTPUT % ('Process', 'processX', 'OK'), monit.Status.OK),
+ (TEST_OUTPUT % ('File', 'processX', 'OK'), monit.Status.OK),
+ (TEST_OUTPUT % ('Fifo', 'processX', 'OK'), monit.Status.OK),
+ (TEST_OUTPUT % ('Filesystem', 'processX', 'OK'), monit.Status.OK),
+ (TEST_OUTPUT % ('Directory', 'processX', 'OK'), monit.Status.OK),
+ (TEST_OUTPUT % ('Remote host', 'processX', 'OK'), monit.Status.OK),
+ (TEST_OUTPUT % ('System', 'processX', 'OK'), monit.Status.OK),
+ (TEST_OUTPUT % ('Program', 'processX', 'OK'), monit.Status.OK),
+ (TEST_OUTPUT % ('Network', 'processX', 'OK'), monit.Status.OK),
+ (TEST_OUTPUT % ('Unsupported', 'processX', 'OK'), monit.Status.MISSING),
+])
+def test_parse_status_supports_all_services(output, expected):
+ status = monit.Monit(None, '', 'processX', 0)._parse_status(output, '')
+ assert status == expected
+
+
+@pytest.mark.parametrize('output, expected', [
+ ('This is monit version 5.18.1', '5.18.1'),
+ ('This is monit version 12.18', '12.18'),
+ ('This is monit version 5.1.12', '5.1.12'),
+])
+def test_parse_version(output, expected):
+ module = mock.MagicMock()
+ module.run_command.return_value = (0, output, '')
+ raw_version, version_tuple = monit.Monit(module, '', 'processX', 0)._get_monit_version()
+ assert raw_version == expected
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/monitoring/test_pagerduty.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/monitoring/test_pagerduty.py
new file mode 100644
index 00000000..3c9c28a4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/monitoring/test_pagerduty.py
@@ -0,0 +1,128 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.plugins.modules.monitoring import pagerduty
+
+import json
+
+
+class PagerDutyTest(unittest.TestCase):
+ def setUp(self):
+ self.pd = pagerduty.PagerDutyRequest(module=pagerduty, name='name', user='user', token='token')
+
+ def _assert_ongoing_maintenance_windows(self, module, url, headers):
+ self.assertEqual('https://api.pagerduty.com/maintenance_windows?filter=ongoing', url)
+ return object(), {'status': 200}
+
+ def _assert_ongoing_window_with_v1_compatible_header(self, module, url, headers, data=None, method=None):
+ self.assertDictContainsSubset(
+ {'Accept': 'application/vnd.pagerduty+json;version=2'},
+ headers,
+ 'Accept:application/vnd.pagerduty+json;version=2 HTTP header not found'
+ )
+ return object(), {'status': 200}
+
+ def _assert_create_a_maintenance_window_url(self, module, url, headers, data=None, method=None):
+ self.assertEqual('https://api.pagerduty.com/maintenance_windows', url)
+ return object(), {'status': 201}
+
+ def _assert_create_a_maintenance_window_http_method(self, module, url, headers, data=None, method=None):
+ self.assertEqual('POST', method)
+ return object(), {'status': 201}
+
+ def _assert_create_a_maintenance_window_from_header(self, module, url, headers, data=None, method=None):
+ self.assertDictContainsSubset(
+ {'From': 'requester_id'},
+ headers,
+ 'From:requester_id HTTP header not found'
+ )
+ return object(), {'status': 201}
+
+ def _assert_create_window_with_v1_compatible_header(self, module, url, headers, data=None, method=None):
+ self.assertDictContainsSubset(
+ {'Accept': 'application/vnd.pagerduty+json;version=2'},
+ headers,
+ 'Accept:application/vnd.pagerduty+json;version=2 HTTP header not found'
+ )
+ return object(), {'status': 201}
+
+ def _assert_create_window_payload(self, module, url, headers, data=None, method=None):
+ payload = json.loads(data)
+ window_data = payload['maintenance_window']
+ self.assertTrue('start_time' in window_data, '"start_time" is requiered attribute')
+ self.assertTrue('end_time' in window_data, '"end_time" is requiered attribute')
+ self.assertTrue('services' in window_data, '"services" is requiered attribute')
+ return object(), {'status': 201}
+
+ def _assert_create_window_single_service(self, module, url, headers, data=None, method=None):
+ payload = json.loads(data)
+ window_data = payload['maintenance_window']
+ services = window_data['services']
+ self.assertEqual(
+ [{'id': 'service_id', 'type': 'service_reference'}],
+ services
+ )
+ return object(), {'status': 201}
+
+ def _assert_create_window_multiple_service(self, module, url, headers, data=None, method=None):
+ payload = json.loads(data)
+ window_data = payload['maintenance_window']
+ services = window_data['services']
+ print(services)
+ self.assertEqual(
+ [
+ {'id': 'service_id_1', 'type': 'service_reference'},
+ {'id': 'service_id_2', 'type': 'service_reference'},
+ {'id': 'service_id_3', 'type': 'service_reference'},
+ ],
+ services
+ )
+ return object(), {'status': 201}
+
+ def _assert_absent_maintenance_window_url(self, module, url, headers, method=None):
+ self.assertEqual('https://api.pagerduty.com/maintenance_windows/window_id', url)
+ return object(), {'status': 204}
+
+ def _assert_absent_window_with_v1_compatible_header(self, module, url, headers, method=None):
+ self.assertDictContainsSubset(
+ {'Accept': 'application/vnd.pagerduty+json;version=2'},
+ headers,
+ 'Accept:application/vnd.pagerduty+json;version=2 HTTP header not found'
+ )
+ return object(), {'status': 204}
+
+ def test_ongoing_maintenance_windos_url(self):
+ self.pd.ongoing(http_call=self._assert_ongoing_maintenance_windows)
+
+ def test_ongoing_maintenance_windos_compatibility_header(self):
+ self.pd.ongoing(http_call=self._assert_ongoing_window_with_v1_compatible_header)
+
+ def test_create_maintenance_window_url(self):
+ self.pd.create('requester_id', 'service', 1, 0, 'desc', http_call=self._assert_create_a_maintenance_window_url)
+
+ def test_create_maintenance_window_http_method(self):
+ self.pd.create('requester_id', 'service', 1, 0, 'desc', http_call=self._assert_create_a_maintenance_window_http_method)
+
+ def test_create_maintenance_from_header(self):
+ self.pd.create('requester_id', 'service', 1, 0, 'desc', http_call=self._assert_create_a_maintenance_window_from_header)
+
+ def test_create_maintenance_compatibility_header(self):
+ self.pd.create('requester_id', 'service', 1, 0, 'desc', http_call=self._assert_create_window_with_v1_compatible_header)
+
+ def test_create_maintenance_request_payload(self):
+ self.pd.create('requester_id', 'service', 1, 0, 'desc', http_call=self._assert_create_window_payload)
+
+ def test_create_maintenance_for_single_service(self):
+ self.pd.create('requester_id', 'service_id', 1, 0, 'desc', http_call=self._assert_create_window_single_service)
+
+ def test_create_maintenance_for_multiple_services(self):
+ self.pd.create('requester_id', ['service_id_1', 'service_id_2', 'service_id_3'], 1, 0, 'desc', http_call=self._assert_create_window_multiple_service)
+
+ def test_absent_maintenance_window_url(self):
+ self.pd.absent('window_id', http_call=self._assert_absent_maintenance_window_url)
+
+ def test_absent_maintenance_compatibility_header(self):
+ self.pd.absent('window_id', http_call=self._assert_absent_window_with_v1_compatible_header)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/monitoring/test_pagerduty_alert.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/monitoring/test_pagerduty_alert.py
new file mode 100644
index 00000000..e0951dcf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/monitoring/test_pagerduty_alert.py
@@ -0,0 +1,44 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.plugins.modules.monitoring import pagerduty_alert
+
+
+class PagerDutyAlertsTest(unittest.TestCase):
+ def _assert_incident_api(self, module, url, method, headers):
+ self.assertTrue('https://api.pagerduty.com/incidents' in url, 'url must contain REST API v2 network path')
+ self.assertTrue('service_ids%5B%5D=service_id' in url, 'url must contain service id to filter incidents')
+ self.assertTrue('sort_by=incident_number%3Adesc' in url, 'url should contain sorting parameter')
+ self.assertTrue('time_zone=UTC' in url, 'url should contain time zone parameter')
+ return Response(), {'status': 200}
+
+ def _assert_compatibility_header(self, module, url, method, headers):
+ self.assertDictContainsSubset(
+ {'Accept': 'application/vnd.pagerduty+json;version=2'},
+ headers,
+ 'Accept:application/vnd.pagerduty+json;version=2 HTTP header not found'
+ )
+ return Response(), {'status': 200}
+
+ def _assert_incident_key(self, module, url, method, headers):
+ self.assertTrue('incident_key=incident_key_value' in url, 'url must contain incident key')
+ return Response(), {'status': 200}
+
+ def test_incident_url(self):
+ pagerduty_alert.check(None, 'name', 'state', 'service_id', 'integration_key', 'api_key', http_call=self._assert_incident_api)
+
+ def test_compatibility_header(self):
+ pagerduty_alert.check(None, 'name', 'state', 'service_id', 'integration_key', 'api_key', http_call=self._assert_compatibility_header)
+
+ def test_incident_key_in_url_when_it_is_given(self):
+ pagerduty_alert.check(
+ None, 'name', 'state', 'service_id', 'integration_key', 'api_key', incident_key='incident_key_value', http_call=self._assert_incident_key
+ )
+
+
+class Response(object):
+ def read(self):
+ return '{"incidents":[{"id": "incident_id", "status": "triggered"}]}'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/monitoring/test_pagerduty_change.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/monitoring/test_pagerduty_change.py
new file mode 100644
index 00000000..57b62a51
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/monitoring/test_pagerduty_change.py
@@ -0,0 +1,82 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)from __future__ import (absolute_import, division, print_function)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import pytest
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible_collections.community.general.plugins.modules.monitoring import pagerduty_change
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
+
+
+class TestPagerDutyChangeModule(ModuleTestCase):
+ def setUp(self):
+ super(TestPagerDutyChangeModule, self).setUp()
+ self.module = pagerduty_change
+
+ def tearDown(self):
+ super(TestPagerDutyChangeModule, self).tearDown()
+
+ @pytest.fixture
+ def fetch_url_mock(self, mocker):
+ return mocker.patch('ansible.module_utils.monitoring.pagerduty_change.fetch_url')
+
+ def test_module_fail_when_required_args_missing(self):
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({})
+ self.module.main()
+
+ def test_ensure_change_event_created_with_minimal_data(self):
+ set_module_args({
+ 'integration_key': 'test',
+ 'summary': 'Testing'
+ })
+
+ with patch.object(pagerduty_change, 'fetch_url') as fetch_url_mock:
+ fetch_url_mock.return_value = (None, {"status": 202})
+ with self.assertRaises(AnsibleExitJson):
+ self.module.main()
+
+ assert fetch_url_mock.call_count == 1
+ url = fetch_url_mock.call_args[0][1]
+ json_data = fetch_url_mock.call_args[1]['data']
+ data = json.loads(json_data)
+
+ assert url == 'https://events.pagerduty.com/v2/change/enqueue'
+ assert data['routing_key'] == 'test'
+ assert data['payload']['summary'] == 'Testing'
+ assert data['payload']['source'] == 'Ansible'
+
+ def test_ensure_change_event_created_with_full_data(self):
+ set_module_args({
+ 'integration_key': 'test',
+ 'summary': 'Testing',
+ 'source': 'My Ansible Script',
+ 'user': 'ansible',
+ 'repo': 'github.com/ansible/ansible',
+ 'revision': '8c67432',
+ 'environment': 'production',
+ 'link_url': 'https://pagerduty.com',
+ 'link_text': 'PagerDuty'
+ })
+
+ with patch.object(pagerduty_change, 'fetch_url') as fetch_url_mock:
+ fetch_url_mock.return_value = (None, {"status": 202})
+ with self.assertRaises(AnsibleExitJson):
+ self.module.main()
+
+ assert fetch_url_mock.call_count == 1
+ url = fetch_url_mock.call_args[0][1]
+ json_data = fetch_url_mock.call_args[1]['data']
+ data = json.loads(json_data)
+
+ assert url == 'https://events.pagerduty.com/v2/change/enqueue'
+ assert data['routing_key'] == 'test'
+ assert data['payload']['summary'] == 'Testing'
+ assert data['payload']['source'] == 'My Ansible Script'
+ assert data['payload']['custom_details']['user'] == 'ansible'
+ assert data['payload']['custom_details']['repo'] == 'github.com/ansible/ansible'
+ assert data['payload']['custom_details']['revision'] == '8c67432'
+ assert data['payload']['custom_details']['environment'] == 'production'
+ assert data['links'][0]['href'] == 'https://pagerduty.com'
+ assert data['links'][0]['text'] == 'PagerDuty'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/fixtures/nios_result.txt b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/fixtures/nios_result.txt
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/fixtures/nios_result.txt
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_a_record.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_a_record.py
new file mode 100644
index 00000000..4e51b894
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_a_record.py
@@ -0,0 +1,159 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible_collections.community.general.plugins.modules.net_tools.nios import nios_a_record
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios import api
+from ansible_collections.community.general.tests.unit.compat.mock import patch, MagicMock, Mock
+from .test_nios_module import TestNiosModule, load_fixture
+
+
+class TestNiosARecordModule(TestNiosModule):
+
+ module = nios_a_record
+
+ def setUp(self):
+ super(TestNiosARecordModule, self).setUp()
+ self.module = MagicMock(name='ansible_collections.community.general.plugins.modules.net_tools.nios.nios_a_record.WapiModule')
+ self.module.check_mode = False
+ self.module.params = {'provider': None}
+ self.mock_wapi = patch('ansible_collections.community.general.plugins.modules.net_tools.nios.nios_a_record.WapiModule')
+ self.exec_command = self.mock_wapi.start()
+ self.mock_wapi_run = patch('ansible_collections.community.general.plugins.modules.net_tools.nios.nios_a_record.WapiModule.run')
+ self.mock_wapi_run.start()
+ self.load_config = self.mock_wapi_run.start()
+
+ def tearDown(self):
+ super(TestNiosARecordModule, self).tearDown()
+ self.mock_wapi.stop()
+ self.mock_wapi_run.stop()
+
+ def _get_wapi(self, test_object):
+ wapi = api.WapiModule(self.module)
+ wapi.get_object = Mock(name='get_object', return_value=test_object)
+ wapi.create_object = Mock(name='create_object')
+ wapi.update_object = Mock(name='update_object')
+ wapi.delete_object = Mock(name='delete_object')
+ return wapi
+
+ def load_fixtures(self, commands=None):
+ self.exec_command.return_value = (0, load_fixture('nios_result.txt').strip(), None)
+ self.load_config.return_value = dict(diff=None, session='session')
+
+ def test_nios_a_record_create(self):
+ self.module.params = {'provider': None, 'state': 'present', 'name': 'a.ansible.com',
+ 'ipv4': '192.168.10.1', 'comment': None, 'extattrs': None}
+
+ test_object = None
+
+ test_spec = {
+ "name": {"ib_req": True},
+ "ipv4": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ print("WAPI: ", wapi.__dict__)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.create_object.assert_called_once_with('testobject', {'name': self.module._check_type_dict().__getitem__(),
+ 'ipv4': '192.168.10.1'})
+
+ def test_nios_a_record_update_comment(self):
+ self.module.params = {'provider': None, 'state': 'present', 'name': 'a.ansible.com', 'ipv4': '192.168.10.1',
+ 'comment': 'updated comment', 'extattrs': None}
+
+ test_object = [
+ {
+ "comment": "test comment",
+ "_ref": "arecord/ZG5zLm5ldHdvcmtfdmlldyQw:default/true",
+ "name": "a.ansible.com",
+ "ipv4": "192.168.10.1",
+ "extattrs": {}
+ }
+ ]
+
+ test_spec = {
+ "name": {"ib_req": True},
+ "ipv4": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+
+ def test_nios_a_record_remove(self):
+ self.module.params = {'provider': None, 'state': 'absent', 'name': 'a.ansible.com', 'ipv4': '192.168.10.1',
+ 'comment': None, 'extattrs': None}
+
+ ref = "arecord/ZG5zLm5ldHdvcmtfdmlldyQw:default/false"
+
+ test_object = [{
+ "comment": "test comment",
+ "_ref": ref,
+ "name": "a.ansible.com",
+ "ipv4": "192.168.10.1",
+ "extattrs": {'Site': {'value': 'test'}}
+ }]
+
+ test_spec = {
+ "name": {"ib_req": True},
+ "ipv4": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.delete_object.assert_called_once_with(ref)
+
+ def test_nios_a_record_update_record_name(self):
+ self.module.params = {'provider': None, 'state': 'present', 'name': {'new_name': 'a_new.ansible.com', 'old_name': 'a.ansible.com'},
+ 'comment': 'comment', 'extattrs': None}
+
+ test_object = [
+ {
+ "comment": "test comment",
+ "_ref": "arecord/ZG5zLm5ldHdvcmtfdmlldyQw:default/true",
+ "name": "a_new.ansible.com",
+ "old_name": "a.ansible.com",
+ "extattrs": {}
+ }
+ ]
+
+ test_spec = {
+ "name": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.update_object.called_once_with(test_object)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_aaaa_record.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_aaaa_record.py
new file mode 100644
index 00000000..83f19845
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_aaaa_record.py
@@ -0,0 +1,159 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible_collections.community.general.plugins.modules.net_tools.nios import nios_aaaa_record
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios import api
+from ansible_collections.community.general.tests.unit.compat.mock import patch, MagicMock, Mock
+from .test_nios_module import TestNiosModule, load_fixture
+
+
+class TestNiosAAAARecordModule(TestNiosModule):
+
+ module = nios_aaaa_record
+
+ def setUp(self):
+ super(TestNiosAAAARecordModule, self).setUp()
+ self.module = MagicMock(name='ansible_collections.community.general.plugins.modules.net_tools.nios.nios_aaaa_record.WapiModule')
+ self.module.check_mode = False
+ self.module.params = {'provider': None}
+ self.mock_wapi = patch('ansible_collections.community.general.plugins.modules.net_tools.nios.nios_aaaa_record.WapiModule')
+ self.exec_command = self.mock_wapi.start()
+ self.mock_wapi_run = patch('ansible_collections.community.general.plugins.modules.net_tools.nios.nios_aaaa_record.WapiModule.run')
+ self.mock_wapi_run.start()
+ self.load_config = self.mock_wapi_run.start()
+
+ def tearDown(self):
+ super(TestNiosAAAARecordModule, self).tearDown()
+ self.mock_wapi.stop()
+ self.mock_wapi_run.stop()
+
+ def _get_wapi(self, test_object):
+ wapi = api.WapiModule(self.module)
+ wapi.get_object = Mock(name='get_object', return_value=test_object)
+ wapi.create_object = Mock(name='create_object')
+ wapi.update_object = Mock(name='update_object')
+ wapi.delete_object = Mock(name='delete_object')
+ return wapi
+
+ def load_fixtures(self, commands=None):
+ self.exec_command.return_value = (0, load_fixture('nios_result.txt').strip(), None)
+ self.load_config.return_value = dict(diff=None, session='session')
+
+ def test_nios_aaaa_record_create(self):
+ self.module.params = {'provider': None, 'state': 'present', 'name': 'aaaa.ansible.com',
+ 'ipv6': '2001:0db8:85a3:0000:0000:8a2e:0370:7334', 'comment': None, 'extattrs': None}
+
+ test_object = None
+
+ test_spec = {
+ "name": {"ib_req": True},
+ "ipv6": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ print("WAPI: ", wapi.__dict__)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.create_object.assert_called_once_with('testobject', {'name': self.module._check_type_dict().__getitem__(),
+ 'ipv6': '2001:0db8:85a3:0000:0000:8a2e:0370:7334'})
+
+ def test_nios_aaaa_record_update_comment(self):
+ self.module.params = {'provider': None, 'state': 'present', 'name': 'aaaa.ansible.com',
+ 'ipv6': '2001:0db8:85a3:0000:0000:8a2e:0370:7334', 'comment': 'updated comment', 'extattrs': None}
+
+ test_object = [
+ {
+ "comment": "test comment",
+ "_ref": "aaaarecord/ZG5zLm5ldHdvcmtfdmlldyQw:default/true",
+ "name": "aaaa.ansible.com",
+ "ipv6": "2001:0db8:85a3:0000:0000:8a2e:0370:7334",
+ "extattrs": {}
+ }
+ ]
+
+ test_spec = {
+ "name": {"ib_req": True},
+ "ipv6": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+
+ def test_nios_aaaa_record_remove(self):
+ self.module.params = {'provider': None, 'state': 'absent', 'name': 'aaaa.ansible.com',
+ 'ipv6': '2001:0db8:85a3:0000:0000:8a2e:0370:7334', 'comment': None, 'extattrs': None}
+
+ ref = "aaaarecord/ZG5zLm5ldHdvcmtfdmlldyQw:default/false"
+
+ test_object = [{
+ "comment": "test comment",
+ "_ref": ref,
+ "name": "aaaa.ansible.com",
+ "ipv6": "2001:0db8:85a3:0000:0000:8a2e:0370:7334",
+ "extattrs": {'Site': {'value': 'test'}}
+ }]
+
+ test_spec = {
+ "name": {"ib_req": True},
+ "ipv6": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.delete_object.assert_called_once_with(ref)
+
+ def test_nios_aaaa_record_update_record_name(self):
+ self.module.params = {'provider': None, 'state': 'present', 'name': {'new_name': 'aaaa_new.ansible.com', 'old_name': 'aaaa.ansible.com'},
+ 'comment': 'comment', 'extattrs': None}
+
+ test_object = [
+ {
+ "comment": "test comment",
+ "_ref": "aaaarecord/ZG5zLm5ldHdvcmtfdmlldyQw:default/true",
+ "name": "aaaa_new.ansible.com",
+ "old_name": "aaaa.ansible.com",
+ "extattrs": {}
+ }
+ ]
+
+ test_spec = {
+ "name": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.update_object.called_once_with(test_object)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_cname_record.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_cname_record.py
new file mode 100644
index 00000000..12f97243
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_cname_record.py
@@ -0,0 +1,133 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible_collections.community.general.plugins.modules.net_tools.nios import nios_cname_record
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios import api
+from ansible_collections.community.general.tests.unit.compat.mock import patch, MagicMock, Mock
+from .test_nios_module import TestNiosModule, load_fixture
+
+
+class TestNiosCNameRecordModule(TestNiosModule):
+
+ module = nios_cname_record
+
+ def setUp(self):
+ super(TestNiosCNameRecordModule, self).setUp()
+ self.module = MagicMock(name='ansible_collections.community.general.plugins.modules.net_tools.nios.nios_cname_record.WapiModule')
+ self.module.check_mode = False
+ self.module.params = {'provider': None}
+ self.mock_wapi = patch('ansible_collections.community.general.plugins.modules.net_tools.nios.nios_cname_record.WapiModule')
+ self.exec_command = self.mock_wapi.start()
+ self.mock_wapi_run = patch('ansible_collections.community.general.plugins.modules.net_tools.nios.nios_cname_record.WapiModule.run')
+ self.mock_wapi_run.start()
+ self.load_config = self.mock_wapi_run.start()
+
+ def tearDown(self):
+ super(TestNiosCNameRecordModule, self).tearDown()
+ self.mock_wapi.stop()
+ self.mock_wapi_run.stop()
+
+ def _get_wapi(self, test_object):
+ wapi = api.WapiModule(self.module)
+ wapi.get_object = Mock(name='get_object', return_value=test_object)
+ wapi.create_object = Mock(name='create_object')
+ wapi.update_object = Mock(name='update_object')
+ wapi.delete_object = Mock(name='delete_object')
+ return wapi
+
+ def load_fixtures(self, commands=None):
+ self.exec_command.return_value = (0, load_fixture('nios_result.txt').strip(), None)
+ self.load_config.return_value = dict(diff=None, session='session')
+
+ def test_nios_a_record_create(self):
+ self.module.params = {'provider': None, 'state': 'present', 'name': 'cname.ansible.com',
+ 'canonical': 'realhost.ansible.com', 'comment': None, 'extattrs': None}
+
+ test_object = None
+
+ test_spec = {
+ "name": {"ib_req": True},
+ "canonical": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ print("WAPI: ", wapi.__dict__)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.create_object.assert_called_once_with('testobject', {'name': self.module._check_type_dict().__getitem__(),
+ 'canonical': 'realhost.ansible.com'})
+
+ def test_nios_a_record_update_comment(self):
+ self.module.params = {'provider': None, 'state': 'present', 'name': 'cname.ansible.com',
+ 'canonical': 'realhost.ansible.com', 'comment': 'updated comment', 'extattrs': None}
+
+ test_object = [
+ {
+ "comment": "test comment",
+ "_ref": "cnamerecord/ZG5zLm5ldHdvcmtfdmlldyQw:default/true",
+ "name": "cname.ansible.com",
+ "canonical": "realhost.ansible.com",
+ "extattrs": {}
+ }
+ ]
+
+ test_spec = {
+ "name": {"ib_req": True},
+ "canonical": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+
+ def test_nios_a_record_remove(self):
+ self.module.params = {'provider': None, 'state': 'absent', 'name': 'cname.ansible.com',
+ 'canonical': 'realhost.ansible.com', 'comment': None, 'extattrs': None}
+
+ ref = "cnamerecord/ZG5zLm5ldHdvcmtfdmlldyQw:default/false"
+
+ test_object = [{
+ "comment": "test comment",
+ "_ref": ref,
+ "name": "cname.ansible.com",
+ "canonical": "realhost.ansible.com",
+ "extattrs": {'Site': {'value': 'test'}}
+ }]
+
+ test_spec = {
+ "name": {"ib_req": True},
+ "canonical": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.delete_object.assert_called_once_with(ref)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_dns_view.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_dns_view.py
new file mode 100644
index 00000000..5d6fe90f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_dns_view.py
@@ -0,0 +1,127 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible_collections.community.general.plugins.modules.net_tools.nios import nios_dns_view
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios import api
+from ansible_collections.community.general.tests.unit.compat.mock import patch, MagicMock, Mock
+from .test_nios_module import TestNiosModule, load_fixture
+
+
+class TestNiosDnsViewModule(TestNiosModule):
+
+ module = nios_dns_view
+
+ def setUp(self):
+ super(TestNiosDnsViewModule, self).setUp()
+ self.module = MagicMock(name='ansible_collections.community.general.plugins.modules.net_tools.nios.nios_dns_view.WapiModule')
+ self.module.check_mode = False
+ self.module.params = {'provider': None}
+ self.mock_wapi = patch('ansible_collections.community.general.plugins.modules.net_tools.nios.nios_dns_view.WapiModule')
+ self.exec_command = self.mock_wapi.start()
+ self.mock_wapi_run = patch('ansible_collections.community.general.plugins.modules.net_tools.nios.nios_dns_view.WapiModule.run')
+ self.mock_wapi_run.start()
+ self.load_config = self.mock_wapi_run.start()
+
+ def tearDown(self):
+ super(TestNiosDnsViewModule, self).tearDown()
+ self.mock_wapi.stop()
+ self.mock_wapi_run.stop()
+
+ def _get_wapi(self, test_object):
+ wapi = api.WapiModule(self.module)
+ wapi.get_object = Mock(name='get_object', return_value=test_object)
+ wapi.create_object = Mock(name='create_object')
+ wapi.update_object = Mock(name='update_object')
+ wapi.delete_object = Mock(name='delete_object')
+ return wapi
+
+ def load_fixtures(self, commands=None):
+ self.exec_command.return_value = (0, load_fixture('nios_result.txt').strip(), None)
+ self.load_config.return_value = dict(diff=None, session='session')
+
+ def test_nios_dns_view_create(self):
+ self.module.params = {'provider': None, 'state': 'present', 'name': 'ansible-dns',
+ 'comment': None, 'extattrs': None}
+
+ test_object = None
+
+ test_spec = {
+ "name": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ print("WAPI: ", wapi)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.create_object.assert_called_once_with('testobject', {'name': self.module._check_type_dict().__getitem__()})
+
+ def test_nios_dns_view_update_comment(self):
+ self.module.params = {'provider': None, 'state': 'present', 'name': 'ansible-dns',
+ 'comment': 'updated comment', 'extattrs': None}
+
+ test_object = [
+ {
+ "comment": "test comment",
+ "_ref": "dnsview/ZG5zLm5ldHdvcmtfdmlldyQw:default/true",
+ "name": "ansible-dns",
+ "extattrs": {}
+ }
+ ]
+
+ test_spec = {
+ "name": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+
+ def test_nios_dns_view_remove(self):
+ self.module.params = {'provider': None, 'state': 'absent', 'name': 'ansible-dns',
+ 'comment': None, 'extattrs': None}
+
+ ref = "dnsview/ZG5zLm5ldHdvcmtfdmlldyQw:ansible/false"
+
+ test_object = [{
+ "comment": "test comment",
+ "_ref": ref,
+ "name": "ansible-dns",
+ "extattrs": {'Site': {'value': 'test'}}
+ }]
+
+ test_spec = {
+ "name": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.delete_object.assert_called_once_with(ref)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_fixed_address.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_fixed_address.py
new file mode 100644
index 00000000..2be401f7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_fixed_address.py
@@ -0,0 +1,201 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios import api
+from ansible_collections.community.general.plugins.modules.net_tools.nios import nios_fixed_address
+from ansible_collections.community.general.tests.unit.compat.mock import patch, MagicMock, Mock
+from .test_nios_module import TestNiosModule, load_fixture
+
+
+class TestNiosFixedAddressModule(TestNiosModule):
+
+ module = nios_fixed_address
+
+ def setUp(self):
+ super(TestNiosFixedAddressModule, self).setUp()
+ self.module = MagicMock(name='ansible_collections.community.general.plugins.modules.net_tools.nios.nios_fixed_address.WapiModule')
+ self.module.check_mode = False
+ self.module.params = {'provider': None}
+ self.mock_wapi = patch('ansible_collections.community.general.plugins.modules.net_tools.nios.nios_fixed_address.WapiModule')
+ self.exec_command = self.mock_wapi.start()
+ self.mock_wapi_run = patch('ansible_collections.community.general.plugins.modules.net_tools.nios.nios_fixed_address.WapiModule.run')
+ self.mock_wapi_run.start()
+ self.load_config = self.mock_wapi_run.start()
+
+ def tearDown(self):
+ super(TestNiosFixedAddressModule, self).tearDown()
+ self.mock_wapi.stop()
+ self.mock_wapi_run.stop()
+
+ def load_fixtures(self, commands=None):
+ self.exec_command.return_value = (0, load_fixture('nios_result.txt').strip(), None)
+ self.load_config.return_value = dict(diff=None, session='session')
+
+ def _get_wapi(self, test_object):
+ wapi = api.WapiModule(self.module)
+ wapi.get_object = Mock(name='get_object', return_value=test_object)
+ wapi.create_object = Mock(name='create_object')
+ wapi.update_object = Mock(name='update_object')
+ wapi.delete_object = Mock(name='delete_object')
+ return wapi
+
+ def test_nios_fixed_address_ipv4_create(self):
+ self.module.params = {'provider': None, 'state': 'present', 'name': 'test_fa', 'ipaddr': '192.168.10.1', 'mac': '08:6d:41:e8:fd:e8',
+ 'network': '192.168.10.0/24', 'network_view': 'default', 'comment': None, 'extattrs': None}
+
+ test_object = None
+ test_spec = {
+ "name": {},
+ "ipaddr": {"ib_req": True},
+ "mac": {"ib_req": True},
+ "network": {"ib_req": True},
+ "network_view": {},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.create_object.assert_called_once_with('testobject', {'name': 'test_fa', 'ipaddr': '192.168.10.1', 'mac': '08:6d:41:e8:fd:e8',
+ 'network': '192.168.10.0/24', 'network_view': 'default'})
+
+ def test_nios_fixed_address_ipv4_dhcp_update(self):
+ self.module.params = {'provider': None, 'state': 'present', 'name': 'test_fa', 'ipaddr': '192.168.10.1', 'mac': '08:6d:41:e8:fd:e8',
+ 'network': '192.168.10.0/24', 'network_view': 'default', 'comment': 'updated comment', 'extattrs': None}
+
+ test_object = [
+ {
+ "comment": "test comment",
+ "name": "test_fa",
+ "_ref": "network/ZG5zLm5ldHdvcmtfdmlldyQw:default/true",
+ "ipaddr": "192.168.10.1",
+ "mac": "08:6d:41:e8:fd:e8",
+ "network": "192.168.10.0/24",
+ "network_view": "default",
+ "extattrs": {'options': {'name': 'test', 'value': 'ansible.com'}}
+ }
+ ]
+
+ test_spec = {
+ "name": {},
+ "ipaddr": {"ib_req": True},
+ "mac": {"ib_req": True},
+ "network": {"ib_req": True},
+ "network_view": {},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+
+ def test_nios_fixed_address_ipv4_remove(self):
+ self.module.params = {'provider': None, 'state': 'absent', 'name': 'test_fa', 'ipaddr': '192.168.10.1', 'mac': '08:6d:41:e8:fd:e8',
+ 'network': '192.168.10.0/24', 'network_view': 'default', 'comment': None, 'extattrs': None}
+
+ ref = "fixedaddress/ZG5zLm5ldHdvcmtfdmlldyQw:ansible/false"
+
+ test_object = [{
+ "comment": "test comment",
+ "name": "test_fa",
+ "_ref": ref,
+ "ipaddr": "192.168.10.1",
+ "mac": "08:6d:41:e8:fd:e8",
+ "network": "192.168.10.0/24",
+ "network_view": "default",
+ "extattrs": {'Site': {'value': 'test'}}
+ }]
+
+ test_spec = {
+ "name": {},
+ "ipaddr": {"ib_req": True},
+ "mac": {"ib_req": True},
+ "network": {"ib_req": True},
+ "network_view": {},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.delete_object.assert_called_once_with(ref)
+
+ def test_nios_fixed_address_ipv6_create(self):
+ self.module.params = {'provider': None, 'state': 'present', 'name': 'test_fa', 'ipaddr': 'fe80::1/10', 'mac': '08:6d:41:e8:fd:e8',
+ 'network': 'fe80::/64', 'network_view': 'default', 'comment': None, 'extattrs': None}
+
+ test_object = None
+
+ test_spec = {
+ "name": {},
+ "ipaddr": {"ib_req": True},
+ "mac": {"ib_req": True},
+ "network": {"ib_req": True},
+ "network_view": {},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ print("WAPI: ", wapi)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.create_object.assert_called_once_with('testobject', {'name': 'test_fa', 'ipaddr': 'fe80::1/10', 'mac': '08:6d:41:e8:fd:e8',
+ 'network': 'fe80::/64', 'network_view': 'default'})
+
+ def test_nios_fixed_address_ipv6_remove(self):
+ self.module.params = {'provider': None, 'state': 'absent', 'name': 'test_fa', 'ipaddr': 'fe80::1/10', 'mac': '08:6d:41:e8:fd:e8',
+ 'network': 'fe80::/64', 'network_view': 'default', 'comment': None, 'extattrs': None}
+
+ ref = "ipv6fixedaddress/ZG5zLm5ldHdvcmtfdmlldyQw:ansible/false"
+
+ test_object = [{
+ "comment": "test comment",
+ "name": "test_fa",
+ "_ref": ref,
+ "ipaddr": "fe80::1/10",
+ "mac": "08:6d:41:e8:fd:e8",
+ "network": "fe80::/64",
+ "network_view": "default",
+ "extattrs": {'Site': {'value': 'test'}}
+ }]
+
+ test_spec = {
+ "name": {},
+ "ipaddr": {"ib_req": True},
+ "mac": {"ib_req": True},
+ "network": {"ib_req": True},
+ "network_view": {},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.delete_object.assert_called_once_with(ref)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_host_record.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_host_record.py
new file mode 100644
index 00000000..0f7dc58a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_host_record.py
@@ -0,0 +1,152 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible_collections.community.general.plugins.modules.net_tools.nios import nios_host_record
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios import api
+from ansible_collections.community.general.tests.unit.compat.mock import patch, MagicMock, Mock
+from .test_nios_module import TestNiosModule, load_fixture
+
+
+class TestNiosHostRecordModule(TestNiosModule):
+
+ module = nios_host_record
+
+ def setUp(self):
+
+ super(TestNiosHostRecordModule, self).setUp()
+ self.module = MagicMock(name='ansible_collections.community.general.plugins.modules.net_tools.nios.nios_host_record.WapiModule')
+ self.module.check_mode = False
+ self.module.params = {'provider': None}
+
+ self.mock_wapi = patch('ansible_collections.community.general.plugins.modules.net_tools.nios.nios_host_record.WapiModule')
+ self.exec_command = self.mock_wapi.start()
+ self.mock_wapi_run = patch('ansible_collections.community.general.plugins.modules.net_tools.nios.nios_host_record.WapiModule.run')
+ self.mock_wapi_run.start()
+
+ self.load_config = self.mock_wapi_run.start()
+
+ def tearDown(self):
+ super(TestNiosHostRecordModule, self).tearDown()
+ self.mock_wapi.stop()
+
+ def _get_wapi(self, test_object):
+ wapi = api.WapiModule(self.module)
+ wapi.get_object = Mock(name='get_object', return_value=test_object)
+ wapi.create_object = Mock(name='create_object')
+ wapi.update_object = Mock(name='update_object')
+ wapi.delete_object = Mock(name='delete_object')
+ return wapi
+
+ def load_fixtures(self, commands=None):
+ self.exec_command.return_value = (0, load_fixture('nios_result.txt').strip(), None)
+ self.load_config.return_value = dict(diff=None, session='session')
+
+ def test_nios_host_record_create(self):
+ self.module.params = {'provider': None, 'state': 'present', 'name': 'ansible',
+ 'comment': None, 'extattrs': None}
+
+ test_object = None
+ test_spec = {
+ "name": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ print("WAPI: ", wapi)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.create_object.assert_called_once_with('testobject', {'name': self.module._check_type_dict().__getitem__()})
+
+ def test_nios_host_record_remove(self):
+ self.module.params = {'provider': None, 'state': 'absent', 'name': 'ansible',
+ 'comment': None, 'extattrs': None}
+
+ ref = "record:host/ZG5zLm5ldHdvcmtfdmlldyQw:ansible/false"
+
+ test_object = [{
+ "comment": "test comment",
+ "_ref": ref,
+ "name": "ansible",
+ "extattrs": {'Site': {'value': 'test'}}
+ }]
+
+ test_spec = {
+ "name": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+ self.assertTrue(res['changed'])
+ wapi.delete_object.assert_called_once_with(ref)
+
+ def test_nios_host_record_update_comment(self):
+ self.module.params = {'provider': None, 'state': 'present', 'name': 'default',
+ 'comment': 'updated comment', 'extattrs': None}
+
+ test_object = [
+ {
+ "comment": "test comment",
+ "_ref": "record:host/ZG5zLm5ldHdvcmtfdmlldyQw:default/true",
+ "name": "default",
+ "extattrs": {}
+ }
+ ]
+
+ test_spec = {
+ "name": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.update_object.called_once_with(test_object)
+
+ def test_nios_host_record_update_record_name(self):
+ self.module.params = {'provider': None, 'state': 'present', 'name': {'new_name': 'default', 'old_name': 'old_default'},
+ 'comment': 'comment', 'extattrs': None}
+
+ test_object = [
+ {
+ "comment": "test comment",
+ "_ref": "record:host/ZG5zLm5ldHdvcmtfdmlldyQw:default/true",
+ "name": "default",
+ "old_name": "old_default",
+ "extattrs": {}
+ }
+ ]
+
+ test_spec = {
+ "name": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.update_object.called_once_with(test_object)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_member.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_member.py
new file mode 100644
index 00000000..e181b70d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_member.py
@@ -0,0 +1,162 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios import api
+from ansible_collections.community.general.plugins.modules.net_tools.nios import nios_member
+from ansible_collections.community.general.tests.unit.compat.mock import patch, MagicMock, Mock
+from .test_nios_module import TestNiosModule, load_fixture
+
+
+class TestNiosMemberModule(TestNiosModule):
+
+ module = nios_member
+
+ def setUp(self):
+ super(TestNiosMemberModule, self).setUp()
+ self.module = MagicMock(name='ansible_collections.community.general.plugins.modules.net_tools.nios.nios_member.WapiModule')
+ self.module.check_mode = False
+ self.module.params = {'provider': None}
+ self.mock_wapi = patch('ansible_collections.community.general.plugins.modules.net_tools.nios.nios_member.WapiModule')
+ self.exec_command = self.mock_wapi.start()
+ self.mock_wapi_run = patch('ansible_collections.community.general.plugins.modules.net_tools.nios.nios_member.WapiModule.run')
+ self.mock_wapi_run.start()
+ self.load_config = self.mock_wapi_run.start()
+
+ def tearDown(self):
+ super(TestNiosMemberModule, self).tearDown()
+ self.mock_wapi.stop()
+ self.mock_wapi_run.stop()
+
+ def load_fixtures(self, commands=None):
+ self.exec_command.return_value = (0, load_fixture('nios_result.txt').strip(), None)
+ self.load_config.return_value = dict(diff=None, session='session')
+
+ def _get_wapi(self, test_object):
+ wapi = api.WapiModule(self.module)
+ wapi.get_object = Mock(name='get_object', return_value=test_object)
+ wapi.create_object = Mock(name='create_object')
+ wapi.update_object = Mock(name='update_object')
+ wapi.delete_object = Mock(name='delete_object')
+ return wapi
+
+ def test_nios_member_create(self):
+ self.module.params = {'provider': None, 'state': 'present', 'host_name': 'test_member',
+ 'vip_setting': {'address': '192.168.1.110', 'subnet_mask': '255.255.255.0', 'gateway': '192.168.1.1'},
+ 'config_addr_type': 'IPV4', 'platform': 'VNIOS', 'comment': None, 'extattrs': None}
+
+ test_object = None
+ test_spec = {
+ "host_name": {"ib_req": True},
+ "vip_setting": {},
+ "config_addr_type": {},
+ "platform": {},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.create_object.assert_called_once_with('testobject', {'host_name': 'test_member',
+ 'vip_setting': {'address': '192.168.1.110', 'subnet_mask': '255.255.255.0',
+ 'gateway': '192.168.1.1'},
+ 'config_addr_type': 'IPV4', 'platform': 'VNIOS'})
+
+ def test_nios_member_update(self):
+ self.module.params = {'provider': None, 'state': 'present', 'host_name': 'test_member',
+ 'vip_setting': {'address': '192.168.1.110', 'subnet_mask': '255.255.255.0', 'gateway': '192.168.1.1'},
+ 'config_addr_type': 'IPV4', 'platform': 'VNIOS', 'comment': 'updated comment', 'extattrs': None}
+
+ test_object = [
+ {
+ "comment": "Created with Ansible",
+ "_ref": "member/b25lLnZpcnR1YWxfbm9kZSQ3:member01.ansible-dev.com",
+ "config_addr_type": "IPV4",
+ "host_name": "member01.ansible-dev.com",
+ "platform": "VNIOS",
+ "service_type_configuration": "ALL_V4",
+ "vip_setting":
+ {
+ "address": "192.168.1.100",
+ "dscp": 0,
+ "gateway": "192.168.1.1",
+ "primary": True,
+ "subnet_mask": "255.255.255.0",
+ "use_dscp": False
+ }
+ }
+ ]
+
+ test_spec = {
+ "host_name": {"ib_req": True},
+ "vip_setting": {},
+ "config_addr_type": {},
+ "platform": {},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+
+ def test_nios_member_remove(self):
+ self.module.params = {'provider': None, 'state': 'absent', 'host_name': 'test_member',
+ 'vip_setting': {'address': '192.168.1.110', 'subnet_mask': '255.255.255.0', 'gateway': '192.168.1.1'},
+ 'config_addr_type': 'IPV4', 'platform': 'VNIOS', 'comment': 'updated comment', 'extattrs': None}
+
+ ref = "member/b25lLnZpcnR1YWxfbm9kZSQ3:member01.ansible-dev.com"
+
+ test_object = [
+ {
+ "comment": "Created with Ansible",
+ "_ref": "member/b25lLnZpcnR1YWxfbm9kZSQ3:member01.ansible-dev.com",
+ "config_addr_type": "IPV4",
+ "host_name": "member01.ansible-dev.com",
+ "platform": "VNIOS",
+ "service_type_configuration": "ALL_V4",
+ "vip_setting":
+ {
+ "address": "192.168.1.100",
+ "dscp": 0,
+ "gateway": "192.168.1.1",
+ "primary": True,
+ "subnet_mask": "255.255.255.0",
+ "use_dscp": False
+ }
+ }
+ ]
+
+ test_spec = {
+ "host_name": {"ib_req": True},
+ "vip_setting": {},
+ "config_addr_type": {},
+ "platform": {},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.delete_object.assert_called_once_with(ref)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_module.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_module.py
new file mode 100644
index 00000000..545d2437
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_module.py
@@ -0,0 +1,88 @@
+# (c) 2018 Red Hat Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import json
+
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase
+
+
+fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
+fixture_data = {}
+
+
+def load_fixture(name):
+ path = os.path.join(fixture_path, name)
+
+ if path in fixture_data:
+ return fixture_data[path]
+
+ with open(path) as f:
+ data = f.read()
+
+ try:
+ data = json.loads(data)
+ except Exception:
+ pass
+
+ fixture_data[path] = data
+ return data
+
+
+class TestNiosModule(ModuleTestCase):
+
+ def execute_module(self, failed=False, changed=False, commands=None, sort=True, defaults=False):
+
+ self.load_fixtures(commands)
+
+ if failed:
+ result = self.failed()
+ self.assertTrue(result['failed'], result)
+ else:
+ result = self.changed(changed)
+ self.assertEqual(result['changed'], changed, result)
+
+ if commands is not None:
+ if sort:
+ self.assertEqual(sorted(commands), sorted(result['commands']), result['commands'])
+ else:
+ self.assertEqual(commands, result['commands'], result['commands'])
+
+ return result
+
+ def failed(self):
+ with self.assertRaises(AnsibleFailJson) as exc:
+ self.module.main()
+
+ result = exc.exception.args[0]
+ self.assertTrue(result['failed'], result)
+ return result
+
+ def changed(self, changed=False):
+ with self.assertRaises(AnsibleExitJson) as exc:
+ self.module.main()
+
+ result = exc.exception.args[0]
+ self.assertEqual(result['changed'], changed, result)
+ return result
+
+ def load_fixtures(self, commands=None):
+ pass
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_mx_record.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_mx_record.py
new file mode 100644
index 00000000..219e86bf
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_mx_record.py
@@ -0,0 +1,137 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible_collections.community.general.plugins.modules.net_tools.nios import nios_mx_record
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios import api
+from ansible_collections.community.general.tests.unit.compat.mock import patch, MagicMock, Mock
+from .test_nios_module import TestNiosModule, load_fixture
+
+
+class TestNiosMXRecordModule(TestNiosModule):
+
+ module = nios_mx_record
+
+ def setUp(self):
+ super(TestNiosMXRecordModule, self).setUp()
+ self.module = MagicMock(name='ansible_collections.community.general.plugins.modules.net_tools.nios.nios_mx_record.WapiModule')
+ self.module.check_mode = False
+ self.module.params = {'provider': None}
+ self.mock_wapi = patch('ansible_collections.community.general.plugins.modules.net_tools.nios.nios_mx_record.WapiModule')
+ self.exec_command = self.mock_wapi.start()
+ self.mock_wapi_run = patch('ansible_collections.community.general.plugins.modules.net_tools.nios.nios_mx_record.WapiModule.run')
+ self.mock_wapi_run.start()
+ self.load_config = self.mock_wapi_run.start()
+
+ def tearDown(self):
+ super(TestNiosMXRecordModule, self).tearDown()
+ self.mock_wapi.stop()
+ self.mock_wapi_run.stop()
+
+ def _get_wapi(self, test_object):
+ wapi = api.WapiModule(self.module)
+ wapi.get_object = Mock(name='get_object', return_value=test_object)
+ wapi.create_object = Mock(name='create_object')
+ wapi.update_object = Mock(name='update_object')
+ wapi.delete_object = Mock(name='delete_object')
+ return wapi
+
+ def load_fixtures(self, commands=None):
+ self.exec_command.return_value = (0, load_fixture('nios_result.txt').strip(), None)
+ self.load_config.return_value = dict(diff=None, session='session')
+
+ def test_nios_mx_record_create(self):
+ self.module.params = {'provider': None, 'state': 'present', 'name': 'ansible.com',
+ 'mx': 'mailhost.ansible.com', 'preference': 0, 'comment': None, 'extattrs': None}
+
+ test_object = None
+
+ test_spec = {
+ "name": {"ib_req": True},
+ "mx": {"ib_req": True},
+ "preference": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ print("WAPI: ", wapi)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.create_object.assert_called_once_with('testobject', {'name': self.module._check_type_dict().__getitem__(),
+ 'mx': 'mailhost.ansible.com', 'preference': 0})
+
+ def test_nios_mx_record_update_comment(self):
+ self.module.params = {'provider': None, 'state': 'present', 'name': 'ansible.com', 'mx': 'mailhost.ansible.com',
+ 'preference': 0, 'comment': 'updated comment', 'extattrs': None}
+
+ test_object = [
+ {
+ "comment": "test comment",
+ "_ref": "mxrecord/ZG5zLm5ldHdvcmtfdmlldyQw:default/true",
+ "name": "ansible.com",
+ "mx": "mailhost.ansible.com",
+ "preference": 0,
+ "extattrs": {}
+ }
+ ]
+
+ test_spec = {
+ "name": {"ib_req": True},
+ "mx": {"ib_req": True},
+ "preference": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+
+ def test_nios_mx_record_remove(self):
+ self.module.params = {'provider': None, 'state': 'absent', 'name': 'ansible.com', 'mx': 'mailhost.ansible.com',
+ 'preference': 0, 'comment': None, 'extattrs': None}
+
+ ref = "mxrecord/ZG5zLm5ldHdvcmtfdmlldyQw:default/false"
+
+ test_object = [{
+ "comment": "test comment",
+ "_ref": ref,
+ "name": "ansible.com",
+ "mx": "mailhost.ansible.com",
+ "extattrs": {'Site': {'value': 'test'}}
+ }]
+
+ test_spec = {
+ "name": {"ib_req": True},
+ "mx": {"ib_req": True},
+ "preference": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.delete_object.assert_called_once_with(ref)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_naptr_record.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_naptr_record.py
new file mode 100644
index 00000000..510df69b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_naptr_record.py
@@ -0,0 +1,147 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible_collections.community.general.plugins.modules.net_tools.nios import nios_naptr_record
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios import api
+from ansible_collections.community.general.tests.unit.compat.mock import patch, MagicMock, Mock
+from .test_nios_module import TestNiosModule, load_fixture
+
+
+class TestNiosNAPTRRecordModule(TestNiosModule):
+
+ module = nios_naptr_record
+
+ def setUp(self):
+ super(TestNiosNAPTRRecordModule, self).setUp()
+ self.module = MagicMock(name='ansible_collections.community.general.plugins.modules.net_tools.nios.nios_naptr_record.WapiModule')
+ self.module.check_mode = False
+ self.module.params = {'provider': None}
+ self.mock_wapi = patch('ansible_collections.community.general.plugins.modules.net_tools.nios.nios_naptr_record.WapiModule')
+ self.exec_command = self.mock_wapi.start()
+ self.mock_wapi_run = patch('ansible_collections.community.general.plugins.modules.net_tools.nios.nios_naptr_record.WapiModule.run')
+ self.mock_wapi_run.start()
+ self.load_config = self.mock_wapi_run.start()
+
+ def tearDown(self):
+ super(TestNiosNAPTRRecordModule, self).tearDown()
+ self.mock_wapi.stop()
+ self.mock_wapi_run.stop()
+
+ def _get_wapi(self, test_object):
+ wapi = api.WapiModule(self.module)
+ wapi.get_object = Mock(name='get_object', return_value=test_object)
+ wapi.create_object = Mock(name='create_object')
+ wapi.update_object = Mock(name='update_object')
+ wapi.delete_object = Mock(name='delete_object')
+ return wapi
+
+ def load_fixtures(self, commands=None):
+ self.exec_command.return_value = (0, load_fixture('nios_result.txt').strip(), None)
+ self.load_config.return_value = dict(diff=None, session='session')
+
+ def test_nios_naptr_record_create(self):
+ self.module.params = {'provider': None, 'state': 'present', 'name': '*.subscriber-100.ansiblezone.com',
+ 'order': '1000', 'preference': '10', 'replacement': 'replacement1.network.ansiblezone.com',
+ 'comment': None, 'extattrs': None}
+
+ test_object = None
+
+ test_spec = {
+ "name": {"ib_req": True},
+ "order": {"ib_req": True},
+ "preference": {"ib_req": True},
+ "replacement": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ print("WAPI: ", wapi.__dict__)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.create_object.assert_called_once_with('testobject', {'name': self.module._check_type_dict().__getitem__(),
+ 'order': '1000', 'preference': '10',
+ 'replacement': 'replacement1.network.ansiblezone.com'})
+
+ def test_nios_naptr_record_update_comment(self):
+ self.module.params = {'provider': None, 'state': 'present', 'name': '*.subscriber-100.ansiblezone.com',
+ 'order': '1000', 'preference': '10', 'replacement': 'replacement1.network.ansiblezone.com',
+ 'comment': 'updated comment', 'extattrs': None}
+
+ test_object = [
+ {
+ "comment": "test comment",
+ "_ref": "naptrrecord/ZG5zLm5ldHdvcmtfdmlldyQw:default/true",
+ "name": "*.subscriber-100.ansiblezone.com",
+ "order": "1000",
+ "preference": "10",
+ "replacement": "replacement1.network.ansiblezone.com",
+ "extattrs": {}
+ }
+ ]
+
+ test_spec = {
+ "name": {"ib_req": True},
+ "order": {"ib_req": True},
+ "preference": {"ib_req": True},
+ "replacement": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+
+ def test_nios_naptr_record_remove(self):
+ self.module.params = {'provider': None, 'state': 'absent', 'name': '*.subscriber-100.ansiblezone.com',
+ 'order': '1000', 'preference': '10', 'replacement': 'replacement1.network.ansiblezone.com',
+ 'comment': None, 'extattrs': None}
+
+ ref = "naptrrecord/ZG5zLm5ldHdvcmtfdmlldyQw:default/false"
+
+ test_object = [{
+ "comment": "test comment",
+ "_ref": ref,
+ "name": "*.subscriber-100.ansiblezone.com",
+ "order": "1000",
+ "preference": "10",
+ "replacement": "replacement1.network.ansiblezone.com",
+ "extattrs": {'Site': {'value': 'test'}}
+ }]
+
+ test_spec = {
+ "name": {"ib_req": True},
+ "order": {"ib_req": True},
+ "preference": {"ib_req": True},
+ "replacement": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.delete_object.assert_called_once_with(ref)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_network.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_network.py
new file mode 100644
index 00000000..e14d39f5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_network.py
@@ -0,0 +1,248 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios import api
+from ansible_collections.community.general.plugins.modules.net_tools.nios import nios_network
+from ansible_collections.community.general.tests.unit.compat.mock import patch, MagicMock, Mock
+from .test_nios_module import TestNiosModule, load_fixture
+
+
+class TestNiosNetworkModule(TestNiosModule):
+
+ module = nios_network
+
+ def setUp(self):
+ super(TestNiosNetworkModule, self).setUp()
+ self.module = MagicMock(name='ansible_collections.community.general.plugins.modules.net_tools.nios.nios_network.WapiModule')
+ self.module.check_mode = False
+ self.module.params = {'provider': None}
+ self.mock_wapi = patch('ansible_collections.community.general.plugins.modules.net_tools.nios.nios_network.WapiModule')
+ self.exec_command = self.mock_wapi.start()
+ self.mock_wapi_run = patch('ansible_collections.community.general.plugins.modules.net_tools.nios.nios_network.WapiModule.run')
+ self.mock_wapi_run.start()
+ self.load_config = self.mock_wapi_run.start()
+
+ def tearDown(self):
+ super(TestNiosNetworkModule, self).tearDown()
+ self.mock_wapi.stop()
+ self.mock_wapi_run.stop()
+
+ def load_fixtures(self, commands=None):
+ self.exec_command.return_value = (0, load_fixture('nios_result.txt').strip(), None)
+ self.load_config.return_value = dict(diff=None, session='session')
+
+ def _get_wapi(self, test_object):
+ wapi = api.WapiModule(self.module)
+ wapi.get_object = Mock(name='get_object', return_value=test_object)
+ wapi.create_object = Mock(name='create_object')
+ wapi.update_object = Mock(name='update_object')
+ wapi.delete_object = Mock(name='delete_object')
+ return wapi
+
+ def test_nios_network_ipv4_create(self):
+ self.module.params = {'provider': None, 'state': 'present', 'network': '192.168.10.0/24',
+ 'comment': None, 'extattrs': None}
+
+ test_object = None
+ test_spec = {
+ "network": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ print("WAPI: ", wapi)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.create_object.assert_called_once_with('testobject', {'network': '192.168.10.0/24'})
+
+ def test_nios_network_ipv4_dhcp_update(self):
+ self.module.params = {'provider': None, 'state': 'present', 'network': '192.168.10.0/24',
+ 'comment': 'updated comment', 'extattrs': None}
+
+ test_object = [
+ {
+ "comment": "test comment",
+ "_ref": "network/ZG5zLm5ldHdvcmtfdmlldyQw:default/true",
+ "network": "192.168.10.0/24",
+ "extattrs": {'options': {'name': 'test', 'value': 'ansible.com'}}
+ }
+ ]
+
+ test_spec = {
+ "network": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+
+ def test_nios_network_ipv6_dhcp_update(self):
+ self.module.params = {'provider': None, 'state': 'present', 'ipv6network': 'fe80::/64',
+ 'comment': 'updated comment', 'extattrs': None}
+
+ test_object = [
+ {
+ "comment": "test comment",
+ "_ref": "ipv6network/ZG5zLm5ldHdvcmtfdmlldyQw:default/true",
+ "ipv6network": "fe80::/64",
+ "extattrs": {'options': {'name': 'test', 'value': 'ansible.com'}}
+ }
+ ]
+
+ test_spec = {
+ "ipv6network": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+ self.assertTrue(res['changed'])
+
+ def test_nios_network_ipv4_remove(self):
+ self.module.params = {'provider': None, 'state': 'absent', 'network': '192.168.10.0/24',
+ 'comment': None, 'extattrs': None}
+
+ ref = "network/ZG5zLm5ldHdvcmtfdmlldyQw:ansible/false"
+
+ test_object = [{
+ "comment": "test comment",
+ "_ref": ref,
+ "network": "192.168.10.0/24",
+ "extattrs": {'Site': {'value': 'test'}}
+ }]
+
+ test_spec = {
+ "network": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.delete_object.assert_called_once_with(ref)
+
+ def test_nios_network_ipv6_create(self):
+ self.module.params = {'provider': None, 'state': 'present', 'ipv6network': 'fe80::/64',
+ 'comment': None, 'extattrs': None}
+
+ test_object = None
+
+ test_spec = {
+ "ipv6network": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.create_object.assert_called_once_with('testobject', {'ipv6network': 'fe80::/64'})
+
+ def test_nios_network_ipv6_remove(self):
+ self.module.params = {'provider': None, 'state': 'absent', 'ipv6network': 'fe80::/64',
+ 'comment': None, 'extattrs': None}
+
+ ref = "ipv6network/ZG5zLm5ldHdvcmtfdmlldyQw:ansible/false"
+
+ test_object = [{
+ "comment": "test comment",
+ "_ref": ref,
+ "ipv6network": "fe80::/64",
+ "extattrs": {'Site': {'value': 'test'}}
+ }]
+
+ test_spec = {
+ "ipv6network": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.delete_object.assert_called_once_with(ref)
+
+ def test_nios_networkcontainer_ipv4_create(self):
+ self.module.params = {'provider': None, 'state': 'present', 'networkcontainer': '192.168.10.0/24',
+ 'comment': None, 'extattrs': None}
+
+ test_object = None
+ test_spec = {
+ "networkcontainer": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.create_object.assert_called_once_with('testobject', {'networkcontainer': '192.168.10.0/24'})
+
+ def test_nios_networkcontainer_ipv4_remove(self):
+ self.module.params = {'provider': None, 'state': 'absent', 'networkcontainer': '192.168.10.0/24',
+ 'comment': None, 'extattrs': None}
+
+ ref = "networkcontainer/ZG5zLm5ldHdvcmtfdmlldyQw:ansible/false"
+
+ test_object = [{
+ "comment": "test comment",
+ "_ref": ref,
+ "networkcontainer": "192.168.10.0/24"
+ }]
+
+ test_spec = {
+ "networkcontainer": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.delete_object.assert_called_once_with(ref)
+
+ def test_nios_networkcontainer_ipv6_create(self):
+ self.module.params = {'provider': None, 'state': 'present', 'ipv6networkcontainer': 'fe80::/64',
+ 'comment': None, 'extattrs': None}
+
+ test_object = None
+ test_spec = {
+ "ipv6networkcontainer": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.create_object.assert_called_once_with('testobject', {'ipv6networkcontainer': 'fe80::/64'})
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_network_view.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_network_view.py
new file mode 100644
index 00000000..9c38951b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_network_view.py
@@ -0,0 +1,156 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible_collections.community.general.plugins.modules.net_tools.nios import nios_network_view
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios import api
+from ansible_collections.community.general.tests.unit.compat.mock import patch, MagicMock, Mock
+from .test_nios_module import TestNiosModule, load_fixture
+
+
+class TestNiosNetworkViewModule(TestNiosModule):
+
+ module = nios_network_view
+
+ def setUp(self):
+ super(TestNiosNetworkViewModule, self).setUp()
+ self.module = MagicMock(name='ansible_collections.community.general.plugins.modules.net_tools.nios.nios_network_view.WapiModule')
+ self.module.check_mode = False
+ self.module.params = {'provider': None}
+ self.mock_wapi = patch('ansible_collections.community.general.plugins.modules.net_tools.nios.nios_network_view.WapiModule')
+ self.exec_command = self.mock_wapi.start()
+ self.mock_wapi_run = patch('ansible_collections.community.general.plugins.modules.net_tools.nios.nios_network_view.WapiModule.run')
+ self.mock_wapi_run.start()
+ self.load_config = self.mock_wapi_run.start()
+
+ def tearDown(self):
+ super(TestNiosNetworkViewModule, self).tearDown()
+ self.mock_wapi.stop()
+ self.mock_wapi_run.stop()
+
+ def _get_wapi(self, test_object):
+ wapi = api.WapiModule(self.module)
+ wapi.get_object = Mock(name='get_object', return_value=test_object)
+ wapi.create_object = Mock(name='create_object')
+ wapi.update_object = Mock(name='update_object')
+ wapi.delete_object = Mock(name='delete_object')
+ return wapi
+
+ def load_fixtures(self, commands=None):
+ self.exec_command.return_value = (0, load_fixture('nios_result.txt').strip(), None)
+ self.load_config.return_value = dict(diff=None, session='session')
+
+ def test_nios_network_view_create(self):
+ self.module.params = {'provider': None, 'state': 'present', 'name': 'ansible',
+ 'comment': None, 'extattrs': None}
+
+ test_object = None
+ test_spec = {
+ "name": {"ib_req": True},
+ "comment": {},
+ "extattrs": {},
+
+ }
+
+ wapi = self._get_wapi(test_object)
+ print("WAPI: ", wapi)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.create_object.assert_called_once_with('testobject', {'name': self.module._check_type_dict().__getitem__()})
+
+ def test_nios_network_view_update_comment(self):
+ self.module.params = {'provider': None, 'state': 'present', 'name': 'default',
+ 'comment': 'updated comment', 'extattrs': None, 'network_view': 'default'}
+
+ test_object = [
+ {
+ "comment": "test comment",
+ "_ref": "networkview/ZG5zLm5ldHdvcmtfdmlldyQw:default/true",
+ "name": "default",
+ "extattrs": {},
+ "network_view": "default"
+ }
+ ]
+
+ test_spec = {
+ "name": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.update_object.called_once_with(test_object)
+
+ def test_nios_network_view_update_name(self):
+ self.module.params = {'provider': None, 'state': 'present', 'name': 'default', 'old_name': 'old_default',
+ 'comment': 'updated comment', 'extattrs': None, 'network_view': 'default'}
+
+ test_object = [
+ {
+ "comment": "test comment",
+ "_ref": "networkview/ZG5zLm5ldHdvcmtfdmlldyQw:default/true",
+ "name": "default",
+ "old_name": "old_default",
+ "extattrs": {},
+ "network_view": "default"
+ }
+ ]
+
+ test_spec = {
+ "name": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.update_object.called_once_with(test_object)
+
+ def test_nios_network_view_remove(self):
+ self.module.params = {'provider': None, 'state': 'absent', 'name': 'ansible',
+ 'comment': None, 'extattrs': None}
+
+ ref = "networkview/ZG5zLm5ldHdvcmtfdmlldyQw:ansible/false"
+
+ test_object = [{
+ "comment": "test comment",
+ "_ref": ref,
+ "name": "ansible",
+ "extattrs": {'Site': {'value': 'test'}}
+ }]
+
+ test_spec = {
+ "name": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.delete_object.assert_called_once_with(ref)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_nsgroup.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_nsgroup.py
new file mode 100644
index 00000000..63f59bff
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_nsgroup.py
@@ -0,0 +1,125 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible_collections.community.general.plugins.modules.net_tools.nios import nios_nsgroup
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios import api
+from ansible_collections.community.general.tests.unit.compat.mock import patch, MagicMock, Mock
+from .test_nios_module import TestNiosModule, load_fixture
+
+
+class TestNiosNSGroupModule(TestNiosModule):
+
+ module = nios_nsgroup
+
+ def setUp(self):
+
+ super(TestNiosNSGroupModule, self).setUp()
+ self.module = MagicMock(name='ansible_collections.community.general.plugins.modules.net_tools.nios.nios_nsgroup.WapiModule')
+ self.module.check_mode = False
+ self.module.params = {'provider': None}
+
+ self.mock_wapi = patch('ansible_collections.community.general.plugins.modules.net_tools.nios.nios_nsgroup.WapiModule')
+ self.exec_command = self.mock_wapi.start()
+ self.mock_wapi_run = patch('ansible_collections.community.general.plugins.modules.net_tools.nios.nios_nsgroup.WapiModule.run')
+ self.mock_wapi_run.start()
+
+ self.load_config = self.mock_wapi_run.start()
+
+ def tearDown(self):
+ super(TestNiosNSGroupModule, self).tearDown()
+ self.mock_wapi.stop()
+
+ def _get_wapi(self, test_object):
+ wapi = api.WapiModule(self.module)
+ wapi.get_object = Mock(name='get_object', return_value=test_object)
+ wapi.create_object = Mock(name='create_object')
+ wapi.update_object = Mock(name='update_object')
+ wapi.delete_object = Mock(name='delete_object')
+ return wapi
+
+ def load_fixtures(self, commands=None):
+ self.exec_command.return_value = (0, load_fixture('nios_result.txt').strip(), None)
+ self.load_config.return_value = dict(diff=None, session='session')
+
+ def test_nios_nsgroup_create(self):
+ self.module.params = {'provider': None, 'state': 'present', 'name': 'my-simple-group',
+ 'comment': None, 'grid_primary': None}
+
+ test_object = None
+ test_spec = {
+ "name": {"ib_req": True},
+ "comment": {},
+ "grid_primary": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.create_object.assert_called_once_with('testobject', {'name': self.module._check_type_dict().__getitem__()})
+
+ def test_nios_nsgroup_remove(self):
+ self.module.params = {'provider': None, 'state': 'absent', 'name': 'my-simple-group',
+ 'comment': None, 'grid_primary': None}
+
+ ref = "nsgroup/ZG5zLm5ldHdvcmtfdmlldyQw:ansible/false"
+
+ test_object = [{
+ "comment": "test comment",
+ "_ref": ref,
+ "name": "my-simple-group",
+ "grid_primary": {'name': 'infoblox-test.example.com'}
+ }]
+
+ test_spec = {
+ "name": {"ib_req": True},
+ "comment": {},
+ "grid_primary": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+ self.assertTrue(res['changed'])
+ wapi.delete_object.assert_called_once_with(ref)
+
+ def test_nios_nsgroup_update_comment(self):
+ self.module.params = {'provider': None, 'state': 'present', 'name': 'default',
+ 'comment': 'updated comment', 'grid_primary': None}
+
+ test_object = [
+ {
+ "comment": "test comment",
+ "_ref": "nsgroup/ZG5zLm5ldHdvcmtfdmlldyQw:default/true",
+ "name": "default",
+ "grid_primary": {}
+ }
+ ]
+
+ test_spec = {
+ "name": {"ib_req": True},
+ "comment": {},
+ "grid_primary": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.update_object.called_once_with(test_object)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_ptr_record.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_ptr_record.py
new file mode 100644
index 00000000..7d51057d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_ptr_record.py
@@ -0,0 +1,184 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible_collections.community.general.plugins.modules.net_tools.nios import nios_ptr_record
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios import api
+from ansible_collections.community.general.tests.unit.compat.mock import patch, MagicMock, Mock
+from .test_nios_module import TestNiosModule, load_fixture
+
+
+class TestNiosPTRRecordModule(TestNiosModule):
+
+ module = nios_ptr_record
+
+ def setUp(self):
+
+ super(TestNiosPTRRecordModule, self).setUp()
+ self.module = MagicMock(name='ansible_collections.community.general.plugins.modules.net_tools.nios.nios_ptr_record.WapiModule')
+ self.module.check_mode = False
+ self.module.params = {'provider': None}
+
+ self.mock_wapi = patch('ansible_collections.community.general.plugins.modules.net_tools.nios.nios_ptr_record.WapiModule')
+ self.exec_command = self.mock_wapi.start()
+ self.mock_wapi_run = patch('ansible_collections.community.general.plugins.modules.net_tools.nios.nios_ptr_record.WapiModule.run')
+ self.mock_wapi_run.start()
+
+ self.load_config = self.mock_wapi_run.start()
+
+ def tearDown(self):
+ super(TestNiosPTRRecordModule, self).tearDown()
+ self.mock_wapi.stop()
+
+ def _get_wapi(self, test_object):
+ wapi = api.WapiModule(self.module)
+ wapi.get_object = Mock(name='get_object', return_value=test_object)
+ wapi.create_object = Mock(name='create_object')
+ wapi.update_object = Mock(name='update_object')
+ wapi.delete_object = Mock(name='delete_object')
+ return wapi
+
+ def load_fixtures(self, commands=None):
+ self.exec_command.return_value = (0, load_fixture('nios_result.txt').strip(), None)
+ self.load_config.return_value = dict(diff=None, session='session')
+
+ def test_nios_ptr_record_create(self):
+ self.module.params = {'provider': None, 'state': 'present', 'ptrdname': 'ansible.test.com',
+ 'ipv4addr': '10.36.241.14', 'comment': None, 'extattrs': None, 'view': 'default'}
+
+ test_object = None
+ test_spec = {
+ "ipv4addr": {"ib_req": True},
+ "ptrdname": {"ib_req": True},
+ "comment": {},
+ "extattrs": {},
+ "view": {"ib_req": True}
+ }
+
+ wapi = self._get_wapi(test_object)
+ print("WAPI: ", wapi)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.create_object.assert_called_once_with('testobject', {'ipv4addr': '10.36.241.14', 'ptrdname': 'ansible.test.com', 'view': 'default'})
+
+ def test_nios_ptr_record_remove(self):
+ self.module.params = {'provider': None, 'state': 'absent', 'ptrdname': 'ansible.test.com',
+ 'ipv4addr': '10.36.241.14', 'comment': None, 'extattrs': None, 'view': 'default'}
+
+ ref = "record:ptr/ZG5zLm5ldHdvcmtfdmlldyQw:14.241.36.10.in-addr.arpa/default"
+
+ test_object = [{
+ "comment": "test comment",
+ "_ref": ref,
+ "ptrdname": "ansible.test.com",
+ "ipv4addr": "10.36.241.14",
+ "view": "default",
+ "extattrs": {'Site': {'value': 'test'}}
+ }]
+
+ test_spec = {
+ "ipv4addr": {"ib_req": True},
+ "ptrdname": {"ib_req": True},
+ "comment": {},
+ "extattrs": {},
+ "view": {"ib_req": True}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+ self.assertTrue(res['changed'])
+ wapi.delete_object.assert_called_once_with(ref)
+
+ def test_nios_ptr_record_update_comment(self):
+ self.module.params = {'provider': None, 'state': 'present', 'ptrdname': 'ansible.test.com',
+ 'ipv4addr': '10.36.241.14', 'comment': 'updated comment', 'extattrs': None, 'view': 'default'}
+
+ test_object = [
+ {
+ "comment": "test comment",
+ "_ref": "record:ptr/ZG5zLm5ldHdvcmtfdmlldyQw:14.241.36.10.in-addr.arpa/default",
+ "ptrdname": "ansible.test.com",
+ "ipv4addr": "10.36.241.14",
+ "extattrs": {},
+ "view": "default"
+ }
+ ]
+
+ test_spec = {
+ "ipv4addr": {"ib_req": True},
+ "ptrdname": {"ib_req": True},
+ "comment": {},
+ "extattrs": {},
+ "view": {"ib_req": True}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.update_object.called_once_with(test_object)
+
+ def test_nios_ptr_record_update_record_ptrdname(self):
+ self.module.params = {'provider': None, 'state': 'present', 'ptrdname': 'ansible.test.org',
+ 'ipv4addr': '10.36.241.14', 'comment': 'comment', 'extattrs': None, 'view': 'default'}
+
+ test_object = [
+ {
+ "comment": "test comment",
+ "_ref": "record:ptr/ZG5zLm5ldHdvcmtfdmlldyQw:14.241.36.10.in-addr.arpa/default",
+ "ptrdname": "ansible.test.com",
+ "ipv4addr": "10.36.241.14",
+ "extattrs": {},
+ "view": "default"
+ }
+ ]
+
+ test_spec = {
+ "ipv4addr": {"ib_req": True},
+ "ptrdname": {"ib_req": True},
+ "comment": {},
+ "extattrs": {},
+ "view": {"ib_req": True}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.update_object.called_once_with(test_object)
+
+ def test_nios_ptr6_record_create(self):
+ self.module.params = {'provider': None, 'state': 'present', 'ptrdname': 'ansible6.test.com',
+ 'ipv6addr': '2002:8ac3:802d:1242:20d:60ff:fe38:6d16', 'comment': None, 'extattrs': None, 'view': 'default'}
+
+ test_object = None
+ test_spec = {"ipv6addr": {"ib_req": True},
+ "ptrdname": {"ib_req": True},
+ "comment": {},
+ "extattrs": {},
+ "view": {"ib_req": True}}
+
+ wapi = self._get_wapi(test_object)
+ print("WAPI: ", wapi)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.create_object.assert_called_once_with('testobject', {'ipv6addr': '2002:8ac3:802d:1242:20d:60ff:fe38:6d16',
+ 'ptrdname': 'ansible6.test.com', 'view': 'default'})
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_srv_record.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_srv_record.py
new file mode 100644
index 00000000..39024657
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_srv_record.py
@@ -0,0 +1,153 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible_collections.community.general.plugins.modules.net_tools.nios import nios_srv_record
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios import api
+from ansible_collections.community.general.tests.unit.compat.mock import patch, MagicMock, Mock
+from .test_nios_module import TestNiosModule, load_fixture
+
+
+class TestNiosSRVRecordModule(TestNiosModule):
+
+ module = nios_srv_record
+
+ def setUp(self):
+ super(TestNiosSRVRecordModule, self).setUp()
+ self.module = MagicMock(name='ansible_collections.community.general.plugins.modules.net_tools.nios.nios_srv_record.WapiModule')
+ self.module.check_mode = False
+ self.module.params = {'provider': None}
+ self.mock_wapi = patch('ansible_collections.community.general.plugins.modules.net_tools.nios.nios_srv_record.WapiModule')
+ self.exec_command = self.mock_wapi.start()
+ self.mock_wapi_run = patch('ansible_collections.community.general.plugins.modules.net_tools.nios.nios_srv_record.WapiModule.run')
+ self.mock_wapi_run.start()
+ self.load_config = self.mock_wapi_run.start()
+
+ def tearDown(self):
+ super(TestNiosSRVRecordModule, self).tearDown()
+ self.mock_wapi.stop()
+ self.mock_wapi_run.stop()
+
+ def _get_wapi(self, test_object):
+ wapi = api.WapiModule(self.module)
+ wapi.get_object = Mock(name='get_object', return_value=test_object)
+ wapi.create_object = Mock(name='create_object')
+ wapi.update_object = Mock(name='update_object')
+ wapi.delete_object = Mock(name='delete_object')
+ return wapi
+
+ def load_fixtures(self, commands=None):
+ self.exec_command.return_value = (0, load_fixture('nios_result.txt').strip(), None)
+ self.load_config.return_value = dict(diff=None, session='session')
+
+ def test_nios_srv_record_create(self):
+ self.module.params = {'provider': None, 'state': 'present', 'name': '_sip._tcp.service.ansible.com',
+ 'port': 5080, 'target': 'service1.ansible.com', 'priority': 10, 'weight': 10,
+ 'comment': None, 'extattrs': None}
+
+ test_object = None
+
+ test_spec = {
+ "name": {"ib_req": True},
+ "port": {"ib_req": True},
+ "target": {"ib_req": True},
+ "priority": {"ib_req": True},
+ "weight": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ print("WAPI: ", wapi)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.create_object.assert_called_once_with('testobject', {'name': self.module._check_type_dict().__getitem__(),
+ 'port': 5080, 'target': 'service1.ansible.com', 'priority': 10, 'weight': 10})
+
+ def test_nios_srv_record_update_comment(self):
+ self.module.params = {'provider': None, 'state': 'present', 'name': '_sip._tcp.service.ansible.com',
+ 'port': 5080, 'target': 'service1.ansible.com', 'priority': 10, 'weight': 10,
+ 'comment': None, 'extattrs': None}
+
+ test_object = [
+ {
+ "comment": "test comment",
+ "_ref": "srvrecord/ZG5zLm5ldHdvcmtfdmlldyQw:default/true",
+ "name": "_sip._tcp.service.ansible.com",
+ 'port': 5080,
+ "target": "mailhost.ansible.com",
+ "priority": 10,
+ 'weight': 10,
+ "extattrs": {}
+ }
+ ]
+
+ test_spec = {
+ "name": {"ib_req": True},
+ "port": {"ib_req": True},
+ "target": {"ib_req": True},
+ "priority": {"ib_req": True},
+ "weight": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+
+ def test_nios_srv_record_remove(self):
+ self.module.params = {'provider': None, 'state': 'absent', 'name': '_sip._tcp.service.ansible.com',
+ 'port': 5080, 'target': 'service1.ansible.com', 'priority': 10, 'weight': 10,
+ 'comment': None, 'extattrs': None}
+
+ ref = "srvrecord/ZG5zLm5ldHdvcmtfdmlldyQw:default/false"
+
+ test_object = [
+ {
+ "comment": "test comment",
+ "_ref": ref,
+ "name": "_sip._tcp.service.ansible.com",
+ "port": 5080,
+ "target": "mailhost.ansible.com",
+ "priority": 10,
+ "weight": 10,
+ "extattrs": {'Site': {'value': 'test'}}
+ }
+ ]
+
+ test_spec = {
+ "name": {"ib_req": True},
+ "port": {"ib_req": True},
+ "target": {"ib_req": True},
+ "priority": {"ib_req": True},
+ "weight": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.delete_object.assert_called_once_with(ref)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_zone.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_zone.py
new file mode 100644
index 00000000..a660b50e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/nios/test_nios_zone.py
@@ -0,0 +1,287 @@
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible_collections.community.general.plugins.modules.net_tools.nios import nios_zone
+from ansible_collections.community.general.plugins.module_utils.net_tools.nios import api
+from ansible_collections.community.general.tests.unit.compat.mock import patch, MagicMock, Mock
+from .test_nios_module import TestNiosModule, load_fixture
+
+
+class TestNiosZoneModule(TestNiosModule):
+
+ module = nios_zone
+
+ def setUp(self):
+ super(TestNiosZoneModule, self).setUp()
+ self.module = MagicMock(name='ansible_collections.community.general.plugins.modules.net_tools.nios.nios_zone.WapiModule')
+ self.module.check_mode = False
+ self.module.params = {'provider': None}
+ self.mock_wapi = patch('ansible_collections.community.general.plugins.modules.net_tools.nios.nios_zone.WapiModule')
+ self.exec_command = self.mock_wapi.start()
+ self.mock_wapi_run = patch('ansible_collections.community.general.plugins.modules.net_tools.nios.nios_zone.WapiModule.run')
+ self.mock_wapi_run.start()
+ self.load_config = self.mock_wapi_run.start()
+
+ def tearDown(self):
+ super(TestNiosZoneModule, self).tearDown()
+ self.mock_wapi.stop()
+ self.mock_wapi_run.stop()
+
+ def _get_wapi(self, test_object):
+ wapi = api.WapiModule(self.module)
+ wapi.get_object = Mock(name='get_object', return_value=test_object)
+ wapi.create_object = Mock(name='create_object')
+ wapi.update_object = Mock(name='update_object')
+ wapi.delete_object = Mock(name='delete_object')
+ return wapi
+
+ def load_fixtures(self, commands=None):
+ self.exec_command.return_value = (0, load_fixture('nios_result.txt').strip(), None)
+ self.load_config.return_value = dict(diff=None, session='session')
+
+ def test_nios_zone_create(self):
+ self.module.params = {'provider': None, 'state': 'present', 'fqdn': 'ansible.com',
+ 'comment': None, 'extattrs': None}
+
+ test_object = None
+
+ test_spec = {
+ "fqdn": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ print("WAPI: ", wapi)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.create_object.assert_called_once_with('testobject', {'fqdn': 'ansible.com'})
+
+ def test_nios_zone_remove(self):
+ self.module.params = {'provider': None, 'state': 'absent', 'fqdn': 'ansible.com',
+ 'comment': None, 'extattrs': None}
+
+ ref = "zone/ZG5zLm5ldHdvcmtfdmlldyQw:ansible/false"
+
+ test_object = [{
+ "comment": "test comment",
+ "_ref": ref,
+ "fqdn": "ansible.com",
+ "extattrs": {'Site': {'value': 'test'}}
+ }]
+
+ test_spec = {
+ "fqdn": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.delete_object.assert_called_once_with(ref)
+
+ def test_nios_zone_update_comment(self):
+ self.module.params = {'provider': None, 'state': 'present', 'fqdn': 'ansible.com',
+ 'comment': 'updated comment', 'extattrs': None}
+
+ test_object = [
+ {
+ "comment": "test comment",
+ "_ref": "zone/ZG5zLm5ldHdvcmtfdmlldyQw:default/true",
+ "fqdn": "ansible.com",
+ "extattrs": {'Site': {'value': 'test'}}
+ }
+ ]
+
+ test_spec = {
+ "fqdn": {"ib_req": True},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+
+ def test_nios_zone_create_using_grid_primary_secondaries(self):
+ self.module.params = {'provider': None, 'state': 'present', 'fqdn': 'ansible.com',
+ 'grid_primary': [{"name": "gridprimary.grid.com"}],
+ 'grid_secondaries': [{"name": "gridsecondary1.grid.com"},
+ {"name": "gridsecondary2.grid.com"}],
+ 'restart_if_needed': True,
+ 'comment': None, 'extattrs': None}
+
+ test_object = None
+ grid_spec = dict(
+ name=dict(required=True),
+ )
+ test_spec = {
+ "fqdn": {"ib_req": True},
+ "grid_primary": {},
+ "grid_secondaries": {},
+ "restart_if_needed": {},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ print("WAPI: ", wapi)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.create_object.assert_called_once_with('testobject', {'fqdn': 'ansible.com',
+ "grid_primary": [{"name": "gridprimary.grid.com"}],
+ "grid_secondaries": [{"name": "gridsecondary1.grid.com"},
+ {"name": "gridsecondary2.grid.com"}],
+ "restart_if_needed": True
+ })
+
+ def test_nios_zone_remove_using_grid_primary_secondaries(self):
+ self.module.params = {'provider': None, 'state': 'absent', 'fqdn': 'ansible.com',
+ 'grid_primary': [{"name": "gridprimary.grid.com"}],
+ 'grid_secondaries': [{"name": "gridsecondary1.grid.com"},
+ {"name": "gridsecondary2.grid.com"}],
+ 'restart_if_needed': True,
+ 'comment': None, 'extattrs': None}
+
+ ref = "zone/ZG5zLm5ldHdvcmtfdmlldyQw:ansible/false"
+
+ test_object = [{
+ "comment": "test comment",
+ "_ref": ref,
+ "fqdn": "ansible.com",
+ "grid_primary": [{"name": "gridprimary.grid.com"}],
+ "grid_secondaries": [{"name": "gridsecondary1.grid.com"}, {"name": "gridsecondary2.grid.com"}],
+ "restart_if_needed": True,
+ "extattrs": {'Site': {'value': 'test'}}
+ }]
+
+ test_spec = {
+ "fqdn": {"ib_req": True},
+ "grid_primary": {},
+ "grid_secondaries": {},
+ "restart_if_needed": {},
+ "comment": {},
+ "extattrs": {}
+ }
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.delete_object.assert_called_once_with(ref)
+
+ def test_nios_zone_create_using_name_server_group(self):
+ self.module.params = {'provider': None, 'state': 'present', 'fqdn': 'ansible.com',
+ 'ns_group': 'examplensg', 'comment': None, 'extattrs': None}
+
+ test_object = None
+
+ test_spec = {
+ "fqdn": {"ib_req": True},
+ "ns_group": {},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ print("WAPI: ", wapi)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.create_object.assert_called_once_with('testobject', {'fqdn': 'ansible.com',
+ 'ns_group': 'examplensg'})
+
+ def test_nios_zone_remove_using_name_server_group(self):
+ self.module.params = {'provider': None, 'state': 'absent', 'fqdn': 'ansible.com',
+ 'ns_group': 'examplensg', 'comment': None, 'extattrs': None}
+
+ ref = "zone/ZG5zLm5ldHdvcmtfdmlldyQw:ansible/false"
+
+ test_object = [{
+ "comment": "test comment",
+ "_ref": ref,
+ "fqdn": "ansible.com",
+ "ns_group": "examplensg",
+ "extattrs": {'Site': {'value': 'test'}}
+ }]
+
+ test_spec = {
+ "fqdn": {"ib_req": True},
+ "ns_group": {},
+ "comment": {},
+ "extattrs": {}
+ }
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.delete_object.assert_called_once_with(ref)
+
+ def test_nios_zone_create_using_zone_format(self):
+ self.module.params = {'provider': None, 'state': 'present', 'fqdn': '10.10.10.in-addr.arpa',
+ 'zone_format': 'IPV4', 'comment': None, 'extattrs': None}
+
+ test_object = None
+
+ test_spec = {
+ "fqdn": {"ib_req": True},
+ "zone_format": {},
+ "comment": {},
+ "extattrs": {}
+ }
+
+ wapi = self._get_wapi(test_object)
+ print("WAPI: ", wapi)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.create_object.assert_called_once_with('testobject', {'fqdn': '10.10.10.in-addr.arpa',
+ 'zone_format': 'IPV4'})
+
+ def test_nios_zone_remove_using_using_zone_format(self):
+ self.module.params = {'provider': None, 'state': 'absent', 'fqdn': 'ansible.com',
+ 'zone_format': 'IPV4', 'comment': None, 'extattrs': None}
+
+ ref = "zone/ZG5zLm5ldHdvcmtfdmlldyQw:ansible/false"
+
+ test_object = [{
+ "comment": "test comment",
+ "_ref": ref,
+ "fqdn": "ansible.com",
+ "zone_format": "IPV4",
+ "extattrs": {'Site': {'value': 'test'}}
+ }]
+
+ test_spec = {
+ "fqdn": {"ib_req": True},
+ "zone_format": {},
+ "comment": {},
+ "extattrs": {}
+ }
+ wapi = self._get_wapi(test_object)
+ res = wapi.run('testobject', test_spec)
+
+ self.assertTrue(res['changed'])
+ wapi.delete_object.assert_called_once_with(ref)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/test_hetzner_failover_ip.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/test_hetzner_failover_ip.py
new file mode 100644
index 00000000..47c07605
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/test_hetzner_failover_ip.py
@@ -0,0 +1,219 @@
+# (c) 2020 Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible_collections.community.internal_test_tools.tests.unit.utils.fetch_url_module_framework import (
+ FetchUrlCall,
+ BaseTestModule,
+)
+
+from ansible_collections.community.general.plugins.module_utils.hetzner import BASE_URL
+from ansible_collections.community.general.plugins.modules.net_tools import hetzner_failover_ip
+
+
+class TestHetznerFailoverIP(BaseTestModule):
+ MOCK_ANSIBLE_MODULEUTILS_BASIC_ANSIBLEMODULE = 'ansible_collections.community.general.plugins.modules.net_tools.hetzner_failover_ip.AnsibleModule'
+ MOCK_ANSIBLE_MODULEUTILS_URLS_FETCH_URL = 'ansible_collections.community.general.plugins.module_utils.hetzner.fetch_url'
+
+ # Tests for state idempotence (routed and unrouted)
+
+ def test_unrouted(self, mocker):
+ result = self.run_module_success(mocker, hetzner_failover_ip, {
+ 'hetzner_user': '',
+ 'hetzner_password': '',
+ 'failover_ip': '1.2.3.4',
+ 'state': 'unrouted',
+ }, [
+ FetchUrlCall('GET', 200)
+ .result_json({
+ 'failover': {
+ 'ip': '1.2.3.4',
+ 'netmask': '255.255.255.255',
+ 'server_ip': '2.3.4.5',
+ 'server_number': 2345,
+ 'active_server_ip': None,
+ },
+ })
+ .expect_url('{0}/failover/1.2.3.4'.format(BASE_URL)),
+ ])
+ assert result['changed'] is False
+ assert result['value'] is None
+ assert result['state'] == 'unrouted'
+
+ def test_routed(self, mocker):
+ result = self.run_module_success(mocker, hetzner_failover_ip, {
+ 'hetzner_user': '',
+ 'hetzner_password': '',
+ 'failover_ip': '1.2.3.4',
+ 'state': 'routed',
+ 'value': '4.3.2.1',
+ }, [
+ FetchUrlCall('GET', 200)
+ .result_json({
+ 'failover': {
+ 'ip': '1.2.3.4',
+ 'netmask': '255.255.255.255',
+ 'server_ip': '2.3.4.5',
+ 'server_number': 2345,
+ 'active_server_ip': '4.3.2.1',
+ },
+ })
+ .expect_url('{0}/failover/1.2.3.4'.format(BASE_URL)),
+ ])
+ assert result['changed'] is False
+ assert result['value'] == '4.3.2.1'
+ assert result['state'] == 'routed'
+
+ # Tests for changing state (unrouted to routed, vice versa)
+
+ def test_unrouted_to_routed(self, mocker):
+ result = self.run_module_success(mocker, hetzner_failover_ip, {
+ 'hetzner_user': '',
+ 'hetzner_password': '',
+ 'failover_ip': '1.2.3.4',
+ 'state': 'routed',
+ 'value': '4.3.2.1',
+ }, [
+ FetchUrlCall('GET', 200)
+ .result_json({
+ 'failover': {
+ 'ip': '1.2.3.4',
+ 'netmask': '255.255.255.255',
+ 'server_ip': '2.3.4.5',
+ 'server_number': 2345,
+ 'active_server_ip': None,
+ },
+ })
+ .expect_url('{0}/failover/1.2.3.4'.format(BASE_URL)),
+ FetchUrlCall('POST', 200)
+ .result_json({
+ 'failover': {
+ 'ip': '1.2.3.4',
+ 'netmask': '255.255.255.255',
+ 'server_ip': '2.3.4.5',
+ 'server_number': 2345,
+ 'active_server_ip': '4.3.2.1',
+ },
+ })
+ .expect_form_value('active_server_ip', '4.3.2.1')
+ .expect_url('{0}/failover/1.2.3.4'.format(BASE_URL)),
+ ])
+ assert result['changed'] is True
+ assert result['value'] == '4.3.2.1'
+ assert result['state'] == 'routed'
+
+ def test_routed_to_unrouted(self, mocker):
+ result = self.run_module_success(mocker, hetzner_failover_ip, {
+ 'hetzner_user': '',
+ 'hetzner_password': '',
+ 'failover_ip': '1.2.3.4',
+ 'state': 'unrouted',
+ }, [
+ FetchUrlCall('GET', 200)
+ .result_json({
+ 'failover': {
+ 'ip': '1.2.3.4',
+ 'netmask': '255.255.255.255',
+ 'server_ip': '2.3.4.5',
+ 'server_number': 2345,
+ 'active_server_ip': '4.3.2.1',
+ },
+ })
+ .expect_url('{0}/failover/1.2.3.4'.format(BASE_URL)),
+ FetchUrlCall('DELETE', 200)
+ .result_json({
+ 'failover': {
+ 'ip': '1.2.3.4',
+ 'netmask': '255.255.255.255',
+ 'server_ip': '2.3.4.5',
+ 'server_number': 2345,
+ 'active_server_ip': None,
+ },
+ })
+ .expect_url('{0}/failover/1.2.3.4'.format(BASE_URL)),
+ ])
+ assert result['changed'] is True
+ assert result['value'] is None
+ assert result['state'] == 'unrouted'
+
+ # Tests for re-routing
+
+ def test_rerouting(self, mocker):
+ result = self.run_module_success(mocker, hetzner_failover_ip, {
+ 'hetzner_user': '',
+ 'hetzner_password': '',
+ 'failover_ip': '1.2.3.4',
+ 'state': 'routed',
+ 'value': '4.3.2.1',
+ }, [
+ FetchUrlCall('GET', 200)
+ .result_json({
+ 'failover': {
+ 'ip': '1.2.3.4',
+ 'netmask': '255.255.255.255',
+ 'server_ip': '2.3.4.5',
+ 'server_number': 2345,
+ 'active_server_ip': '5.4.3.2',
+ },
+ })
+ .expect_url('{0}/failover/1.2.3.4'.format(BASE_URL)),
+ FetchUrlCall('POST', 200)
+ .result_json({
+ 'failover': {
+ 'ip': '1.2.3.4',
+ 'netmask': '255.255.255.255',
+ 'server_ip': '2.3.4.5',
+ 'server_number': 2345,
+ 'active_server_ip': '4.3.2.1',
+ },
+ })
+ .expect_form_value('active_server_ip', '4.3.2.1')
+ .expect_url('{0}/failover/1.2.3.4'.format(BASE_URL)),
+ ])
+ assert result['changed'] is True
+ assert result['value'] == '4.3.2.1'
+ assert result['state'] == 'routed'
+
+ def test_rerouting_already_routed(self, mocker):
+ result = self.run_module_success(mocker, hetzner_failover_ip, {
+ 'hetzner_user': '',
+ 'hetzner_password': '',
+ 'failover_ip': '1.2.3.4',
+ 'state': 'routed',
+ 'value': '4.3.2.1',
+ }, [
+ FetchUrlCall('GET', 200)
+ .result_json({
+ 'failover': {
+ 'ip': '1.2.3.4',
+ 'netmask': '255.255.255.255',
+ 'server_ip': '2.3.4.5',
+ 'server_number': 2345,
+ 'active_server_ip': '5.4.3.2',
+ },
+ })
+ .expect_url('{0}/failover/1.2.3.4'.format(BASE_URL)),
+ FetchUrlCall('POST', 409)
+ .result_json({
+ 'error': {
+ 'status': 409,
+ 'code': 'FAILOVER_ALREADY_ROUTED',
+ 'message': 'Failover already routed',
+ },
+ 'failover': {
+ 'ip': '1.2.3.4',
+ 'netmask': '255.255.255.255',
+ 'server_ip': '2.3.4.5',
+ 'server_number': 2345,
+ 'active_server_ip': '4.3.2.1',
+ },
+ })
+ .expect_form_value('active_server_ip', '4.3.2.1')
+ .expect_url('{0}/failover/1.2.3.4'.format(BASE_URL)),
+ ])
+ assert result['changed'] is False
+ assert result['value'] == '4.3.2.1'
+ assert result['state'] == 'routed'
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/test_hetzner_failover_ip_info.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/test_hetzner_failover_ip_info.py
new file mode 100644
index 00000000..cbb11e8b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/test_hetzner_failover_ip_info.py
@@ -0,0 +1,71 @@
+# (c) 2020 Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible_collections.community.internal_test_tools.tests.unit.utils.fetch_url_module_framework import (
+ FetchUrlCall,
+ BaseTestModule,
+)
+
+from ansible_collections.community.general.plugins.module_utils.hetzner import BASE_URL
+from ansible_collections.community.general.plugins.modules.net_tools import hetzner_failover_ip_info
+
+
+class TestHetznerFailoverIPInfo(BaseTestModule):
+ MOCK_ANSIBLE_MODULEUTILS_BASIC_ANSIBLEMODULE = 'ansible_collections.community.general.plugins.modules.net_tools.hetzner_failover_ip_info.AnsibleModule'
+ MOCK_ANSIBLE_MODULEUTILS_URLS_FETCH_URL = 'ansible_collections.community.general.plugins.module_utils.hetzner.fetch_url'
+
+ # Tests for state (routed and unrouted)
+
+ def test_unrouted(self, mocker):
+ result = self.run_module_success(mocker, hetzner_failover_ip_info, {
+ 'hetzner_user': '',
+ 'hetzner_password': '',
+ 'failover_ip': '1.2.3.4',
+ }, [
+ FetchUrlCall('GET', 200)
+ .result_json({
+ 'failover': {
+ 'ip': '1.2.3.4',
+ 'netmask': '255.255.255.255',
+ 'server_ip': '2.3.4.5',
+ 'server_number': 2345,
+ 'active_server_ip': None,
+ },
+ })
+ .expect_url('{0}/failover/1.2.3.4'.format(BASE_URL)),
+ ])
+ assert result['changed'] is False
+ assert result['value'] is None
+ assert result['state'] == 'unrouted'
+ assert result['failover_ip'] == '1.2.3.4'
+ assert result['server_ip'] == '2.3.4.5'
+ assert result['server_number'] == 2345
+
+ def test_routed(self, mocker):
+ result = self.run_module_success(mocker, hetzner_failover_ip_info, {
+ 'hetzner_user': '',
+ 'hetzner_password': '',
+ 'failover_ip': '1.2.3.4',
+ }, [
+ FetchUrlCall('GET', 200)
+ .result_json({
+ 'failover': {
+ 'ip': '1.2.3.4',
+ 'netmask': '255.255.255.255',
+ 'server_ip': '2.3.4.5',
+ 'server_number': 2345,
+ 'active_server_ip': '4.3.2.1',
+ },
+ })
+ .expect_url('{0}/failover/1.2.3.4'.format(BASE_URL)),
+ ])
+ assert result['changed'] is False
+ assert result['value'] == '4.3.2.1'
+ assert result['state'] == 'routed'
+ assert result['failover_ip'] == '1.2.3.4'
+ assert result['server_ip'] == '2.3.4.5'
+ assert result['server_number'] == 2345
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/test_hetzner_firewall.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/test_hetzner_firewall.py
new file mode 100644
index 00000000..bc87a51c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/test_hetzner_firewall.py
@@ -0,0 +1,1193 @@
+# (c) 2019 Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import pytest
+
+from ansible_collections.community.internal_test_tools.tests.unit.utils.fetch_url_module_framework import (
+ FetchUrlCall,
+ BaseTestModule,
+)
+
+from ansible_collections.community.general.plugins.module_utils.hetzner import BASE_URL
+from ansible_collections.community.general.plugins.modules.net_tools import hetzner_firewall
+
+
+def create_params(parameter, *values):
+ assert len(values) > 1
+ result = []
+ for i in range(1, len(values)):
+ result.append((parameter, values[i - 1], values[i]))
+ return result
+
+
+def flatten(list_of_lists):
+ result = []
+ for l in list_of_lists:
+ result.extend(l)
+ return result
+
+
+class TestHetznerFirewall(BaseTestModule):
+ MOCK_ANSIBLE_MODULEUTILS_BASIC_ANSIBLEMODULE = 'ansible_collections.community.general.plugins.modules.net_tools.hetzner_firewall.AnsibleModule'
+ MOCK_ANSIBLE_MODULEUTILS_URLS_FETCH_URL = 'ansible_collections.community.general.plugins.module_utils.hetzner.fetch_url'
+
+ # Tests for state (absent and present)
+
+ def test_absent_idempotency(self, mocker):
+ result = self.run_module_success(mocker, hetzner_firewall, {
+ 'hetzner_user': '',
+ 'hetzner_password': '',
+ 'server_ip': '1.2.3.4',
+ 'state': 'absent',
+ }, [
+ FetchUrlCall('GET', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'disabled',
+ 'whitelist_hos': False,
+ 'port': 'main',
+ 'rules': {
+ 'input': [],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
+ ])
+ assert result['changed'] is False
+ assert result['diff']['before']['status'] == 'disabled'
+ assert result['diff']['after']['status'] == 'disabled'
+ assert result['firewall']['status'] == 'disabled'
+ assert result['firewall']['server_ip'] == '1.2.3.4'
+ assert result['firewall']['server_number'] == 1
+
+ def test_absent_changed(self, mocker):
+ result = self.run_module_success(mocker, hetzner_firewall, {
+ 'hetzner_user': '',
+ 'hetzner_password': '',
+ 'server_ip': '1.2.3.4',
+ 'state': 'absent',
+ }, [
+ FetchUrlCall('GET', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'active',
+ 'whitelist_hos': True,
+ 'port': 'main',
+ 'rules': {
+ 'input': [],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
+ FetchUrlCall('POST', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'disabled',
+ 'whitelist_hos': False,
+ 'port': 'main',
+ 'rules': {
+ 'input': [],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL))
+ .expect_form_value('status', 'disabled'),
+ ])
+ assert result['changed'] is True
+ assert result['diff']['before']['status'] == 'active'
+ assert result['diff']['after']['status'] == 'disabled'
+ assert result['firewall']['status'] == 'disabled'
+ assert result['firewall']['server_ip'] == '1.2.3.4'
+ assert result['firewall']['server_number'] == 1
+
+ def test_present_idempotency(self, mocker):
+ result = self.run_module_success(mocker, hetzner_firewall, {
+ 'hetzner_user': '',
+ 'hetzner_password': '',
+ 'server_ip': '1.2.3.4',
+ 'state': 'present',
+ }, [
+ FetchUrlCall('GET', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'active',
+ 'whitelist_hos': False,
+ 'port': 'main',
+ 'rules': {
+ 'input': [],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
+ ])
+ assert result['changed'] is False
+ assert result['diff']['before']['status'] == 'active'
+ assert result['diff']['after']['status'] == 'active'
+ assert result['firewall']['status'] == 'active'
+ assert result['firewall']['server_ip'] == '1.2.3.4'
+ assert result['firewall']['server_number'] == 1
+
+ def test_present_changed(self, mocker):
+ result = self.run_module_success(mocker, hetzner_firewall, {
+ 'hetzner_user': '',
+ 'hetzner_password': '',
+ 'server_ip': '1.2.3.4',
+ 'state': 'present',
+ }, [
+ FetchUrlCall('GET', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'disabled',
+ 'whitelist_hos': True,
+ 'port': 'main',
+ 'rules': {
+ 'input': [],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
+ FetchUrlCall('POST', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'active',
+ 'whitelist_hos': False,
+ 'port': 'main',
+ 'rules': {
+ 'input': [],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL))
+ .expect_form_value('status', 'active'),
+ ])
+ assert result['changed'] is True
+ assert result['diff']['before']['status'] == 'disabled'
+ assert result['diff']['after']['status'] == 'active'
+ assert result['firewall']['status'] == 'active'
+ assert result['firewall']['server_ip'] == '1.2.3.4'
+ assert result['firewall']['server_number'] == 1
+
+ # Tests for state (absent and present) with check mode
+
+ def test_absent_idempotency_check(self, mocker):
+ result = self.run_module_success(mocker, hetzner_firewall, {
+ 'hetzner_user': '',
+ 'hetzner_password': '',
+ 'server_ip': '1.2.3.4',
+ 'state': 'absent',
+ '_ansible_check_mode': True,
+ }, [
+ FetchUrlCall('GET', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'disabled',
+ 'whitelist_hos': False,
+ 'port': 'main',
+ 'rules': {
+ 'input': [],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
+ ])
+ assert result['changed'] is False
+ assert result['diff']['before']['status'] == 'disabled'
+ assert result['diff']['after']['status'] == 'disabled'
+ assert result['firewall']['status'] == 'disabled'
+ assert result['firewall']['server_ip'] == '1.2.3.4'
+ assert result['firewall']['server_number'] == 1
+
+ def test_absent_changed_check(self, mocker):
+ result = self.run_module_success(mocker, hetzner_firewall, {
+ 'hetzner_user': '',
+ 'hetzner_password': '',
+ 'server_ip': '1.2.3.4',
+ 'state': 'absent',
+ '_ansible_check_mode': True,
+ }, [
+ FetchUrlCall('GET', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'active',
+ 'whitelist_hos': True,
+ 'port': 'main',
+ 'rules': {
+ 'input': [],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
+ ])
+ assert result['changed'] is True
+ assert result['diff']['before']['status'] == 'active'
+ assert result['diff']['after']['status'] == 'disabled'
+ assert result['firewall']['status'] == 'disabled'
+ assert result['firewall']['server_ip'] == '1.2.3.4'
+ assert result['firewall']['server_number'] == 1
+
+ def test_present_idempotency_check(self, mocker):
+ result = self.run_module_success(mocker, hetzner_firewall, {
+ 'hetzner_user': '',
+ 'hetzner_password': '',
+ 'server_ip': '1.2.3.4',
+ 'state': 'present',
+ '_ansible_check_mode': True,
+ }, [
+ FetchUrlCall('GET', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'active',
+ 'whitelist_hos': False,
+ 'port': 'main',
+ 'rules': {
+ 'input': [],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
+ ])
+ assert result['changed'] is False
+ assert result['diff']['before']['status'] == 'active'
+ assert result['diff']['after']['status'] == 'active'
+ assert result['firewall']['status'] == 'active'
+ assert result['firewall']['server_ip'] == '1.2.3.4'
+ assert result['firewall']['server_number'] == 1
+
+ def test_present_changed_check(self, mocker):
+ result = self.run_module_success(mocker, hetzner_firewall, {
+ 'hetzner_user': '',
+ 'hetzner_password': '',
+ 'server_ip': '1.2.3.4',
+ 'state': 'present',
+ '_ansible_check_mode': True,
+ }, [
+ FetchUrlCall('GET', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'disabled',
+ 'whitelist_hos': True,
+ 'port': 'main',
+ 'rules': {
+ 'input': [],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
+ ])
+ assert result['changed'] is True
+ assert result['diff']['before']['status'] == 'disabled'
+ assert result['diff']['after']['status'] == 'active'
+ assert result['firewall']['status'] == 'active'
+ assert result['firewall']['server_ip'] == '1.2.3.4'
+ assert result['firewall']['server_number'] == 1
+
+ # Tests for port
+
+ def test_port_idempotency(self, mocker):
+ result = self.run_module_success(mocker, hetzner_firewall, {
+ 'hetzner_user': '',
+ 'hetzner_password': '',
+ 'server_ip': '1.2.3.4',
+ 'state': 'present',
+ 'port': 'main',
+ }, [
+ FetchUrlCall('GET', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'active',
+ 'whitelist_hos': False,
+ 'port': 'main',
+ 'rules': {
+ 'input': [],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
+ ])
+ assert result['changed'] is False
+ assert result['diff']['before']['port'] == 'main'
+ assert result['diff']['after']['port'] == 'main'
+ assert result['firewall']['status'] == 'active'
+ assert result['firewall']['server_ip'] == '1.2.3.4'
+ assert result['firewall']['server_number'] == 1
+ assert result['firewall']['port'] == 'main'
+
+ def test_port_changed(self, mocker):
+ result = self.run_module_success(mocker, hetzner_firewall, {
+ 'hetzner_user': '',
+ 'hetzner_password': '',
+ 'server_ip': '1.2.3.4',
+ 'state': 'present',
+ 'port': 'main',
+ }, [
+ FetchUrlCall('GET', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'disabled',
+ 'whitelist_hos': True,
+ 'port': 'kvm',
+ 'rules': {
+ 'input': [],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
+ FetchUrlCall('POST', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'active',
+ 'whitelist_hos': False,
+ 'port': 'main',
+ 'rules': {
+ 'input': [],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL))
+ .expect_form_value('port', 'main'),
+ ])
+ assert result['changed'] is True
+ assert result['diff']['before']['port'] == 'kvm'
+ assert result['diff']['after']['port'] == 'main'
+ assert result['firewall']['status'] == 'active'
+ assert result['firewall']['server_ip'] == '1.2.3.4'
+ assert result['firewall']['server_number'] == 1
+ assert result['firewall']['port'] == 'main'
+
+ # Tests for whitelist_hos
+
+ def test_whitelist_hos_idempotency(self, mocker):
+ result = self.run_module_success(mocker, hetzner_firewall, {
+ 'hetzner_user': '',
+ 'hetzner_password': '',
+ 'server_ip': '1.2.3.4',
+ 'state': 'present',
+ 'whitelist_hos': True,
+ }, [
+ FetchUrlCall('GET', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'active',
+ 'whitelist_hos': True,
+ 'port': 'main',
+ 'rules': {
+ 'input': [],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
+ ])
+ assert result['changed'] is False
+ assert result['diff']['before']['whitelist_hos'] is True
+ assert result['diff']['after']['whitelist_hos'] is True
+ assert result['firewall']['status'] == 'active'
+ assert result['firewall']['server_ip'] == '1.2.3.4'
+ assert result['firewall']['server_number'] == 1
+ assert result['firewall']['whitelist_hos'] is True
+
+ def test_whitelist_hos_changed(self, mocker):
+ result = self.run_module_success(mocker, hetzner_firewall, {
+ 'hetzner_user': '',
+ 'hetzner_password': '',
+ 'server_ip': '1.2.3.4',
+ 'state': 'present',
+ 'whitelist_hos': True,
+ }, [
+ FetchUrlCall('GET', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'disabled',
+ 'whitelist_hos': False,
+ 'port': 'main',
+ 'rules': {
+ 'input': [],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
+ FetchUrlCall('POST', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'active',
+ 'whitelist_hos': True,
+ 'port': 'main',
+ 'rules': {
+ 'input': [],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL))
+ .expect_form_value('whitelist_hos', 'true'),
+ ])
+ assert result['changed'] is True
+ assert result['diff']['before']['whitelist_hos'] is False
+ assert result['diff']['after']['whitelist_hos'] is True
+ assert result['firewall']['status'] == 'active'
+ assert result['firewall']['server_ip'] == '1.2.3.4'
+ assert result['firewall']['server_number'] == 1
+ assert result['firewall']['whitelist_hos'] is True
+
+ # Tests for wait_for_configured in getting status
+
+ def test_wait_get(self, mocker):
+ mocker.patch('time.sleep', lambda duration: None)
+ result = self.run_module_success(mocker, hetzner_firewall, {
+ 'hetzner_user': '',
+ 'hetzner_password': '',
+ 'server_ip': '1.2.3.4',
+ 'state': 'present',
+ 'wait_for_configured': True,
+ }, [
+ FetchUrlCall('GET', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'in process',
+ 'whitelist_hos': False,
+ 'port': 'main',
+ 'rules': {
+ 'input': [],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
+ FetchUrlCall('GET', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'active',
+ 'whitelist_hos': False,
+ 'port': 'main',
+ 'rules': {
+ 'input': [],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
+ ])
+ assert result['changed'] is False
+ assert result['diff']['before']['status'] == 'active'
+ assert result['diff']['after']['status'] == 'active'
+ assert result['firewall']['status'] == 'active'
+ assert result['firewall']['server_ip'] == '1.2.3.4'
+ assert result['firewall']['server_number'] == 1
+
+ def test_wait_get_timeout(self, mocker):
+ mocker.patch('time.sleep', lambda duration: None)
+ result = self.run_module_failed(mocker, hetzner_firewall, {
+ 'hetzner_user': '',
+ 'hetzner_password': '',
+ 'server_ip': '1.2.3.4',
+ 'state': 'present',
+ 'wait_for_configured': True,
+ 'timeout': 0,
+ }, [
+ FetchUrlCall('GET', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'in process',
+ 'whitelist_hos': False,
+ 'port': 'main',
+ 'rules': {
+ 'input': [],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
+ FetchUrlCall('GET', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'in process',
+ 'whitelist_hos': False,
+ 'port': 'main',
+ 'rules': {
+ 'input': [],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
+ ])
+ assert result['msg'] == 'Timeout while waiting for firewall to be configured.'
+
+ def test_nowait_get(self, mocker):
+ result = self.run_module_failed(mocker, hetzner_firewall, {
+ 'hetzner_user': '',
+ 'hetzner_password': '',
+ 'server_ip': '1.2.3.4',
+ 'state': 'present',
+ 'wait_for_configured': False,
+ }, [
+ FetchUrlCall('GET', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'in process',
+ 'whitelist_hos': False,
+ 'port': 'main',
+ 'rules': {
+ 'input': [],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
+ ])
+ assert result['msg'] == 'Firewall configuration cannot be read as it is not configured.'
+
+ # Tests for wait_for_configured in setting status
+
+ def test_wait_update(self, mocker):
+ mocker.patch('time.sleep', lambda duration: None)
+ result = self.run_module_success(mocker, hetzner_firewall, {
+ 'hetzner_user': '',
+ 'hetzner_password': '',
+ 'server_ip': '1.2.3.4',
+ 'wait_for_configured': True,
+ 'state': 'present',
+ }, [
+ FetchUrlCall('GET', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'disabled',
+ 'whitelist_hos': False,
+ 'port': 'main',
+ 'rules': {
+ 'input': [],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
+ FetchUrlCall('POST', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'in process',
+ 'whitelist_hos': False,
+ 'port': 'main',
+ 'rules': {
+ 'input': [],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
+ FetchUrlCall('GET', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'active',
+ 'whitelist_hos': False,
+ 'port': 'main',
+ 'rules': {
+ 'input': [],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
+ ])
+ assert result['changed'] is True
+ assert result['diff']['before']['status'] == 'disabled'
+ assert result['diff']['after']['status'] == 'active'
+ assert result['firewall']['status'] == 'active'
+ assert result['firewall']['server_ip'] == '1.2.3.4'
+ assert result['firewall']['server_number'] == 1
+
+ def test_wait_update_timeout(self, mocker):
+ mocker.patch('time.sleep', lambda duration: None)
+ result = self.run_module_success(mocker, hetzner_firewall, {
+ 'hetzner_user': '',
+ 'hetzner_password': '',
+ 'server_ip': '1.2.3.4',
+ 'state': 'present',
+ 'wait_for_configured': True,
+ 'timeout': 0,
+ }, [
+ FetchUrlCall('GET', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'disabled',
+ 'whitelist_hos': False,
+ 'port': 'main',
+ 'rules': {
+ 'input': [],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
+ FetchUrlCall('POST', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'in process',
+ 'whitelist_hos': False,
+ 'port': 'main',
+ 'rules': {
+ 'input': [],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
+ FetchUrlCall('GET', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'in process',
+ 'whitelist_hos': False,
+ 'port': 'main',
+ 'rules': {
+ 'input': [],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
+ ])
+ assert result['changed'] is True
+ assert result['diff']['before']['status'] == 'disabled'
+ assert result['diff']['after']['status'] == 'active'
+ assert result['firewall']['status'] == 'in process'
+ assert result['firewall']['server_ip'] == '1.2.3.4'
+ assert result['firewall']['server_number'] == 1
+ assert 'Timeout while waiting for firewall to be configured.' in result['warnings']
+
+ def test_nowait_update(self, mocker):
+ result = self.run_module_success(mocker, hetzner_firewall, {
+ 'hetzner_user': '',
+ 'hetzner_password': '',
+ 'server_ip': '1.2.3.4',
+ 'state': 'present',
+ 'wait_for_configured': False,
+ }, [
+ FetchUrlCall('GET', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'disabled',
+ 'whitelist_hos': False,
+ 'port': 'main',
+ 'rules': {
+ 'input': [],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
+ FetchUrlCall('POST', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'in process',
+ 'whitelist_hos': False,
+ 'port': 'main',
+ 'rules': {
+ 'input': [],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
+ ])
+ assert result['changed'] is True
+ assert result['diff']['before']['status'] == 'disabled'
+ assert result['diff']['after']['status'] == 'active'
+ assert result['firewall']['status'] == 'in process'
+ assert result['firewall']['server_ip'] == '1.2.3.4'
+ assert result['firewall']['server_number'] == 1
+
+ # Idempotency checks: different amount of input rules
+
+ def test_input_rule_len_change_0_1(self, mocker):
+ result = self.run_module_success(mocker, hetzner_firewall, {
+ 'hetzner_user': '',
+ 'hetzner_password': '',
+ 'server_ip': '1.2.3.4',
+ 'state': 'present',
+ 'rules': {
+ 'input': [
+ {
+ 'ip_version': 'ipv4',
+ 'action': 'discard',
+ },
+ ],
+ },
+ }, [
+ FetchUrlCall('GET', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'active',
+ 'whitelist_hos': True,
+ 'port': 'main',
+ 'rules': {
+ 'input': [],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
+ FetchUrlCall('POST', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'active',
+ 'whitelist_hos': False,
+ 'port': 'main',
+ 'rules': {
+ 'input': [
+ {
+ 'name': None,
+ 'ip_version': 'ipv4',
+ 'dst_ip': None,
+ 'dst_port': None,
+ 'src_ip': None,
+ 'src_port': None,
+ 'protocol': None,
+ 'tcp_flags': None,
+ 'action': 'discard',
+ },
+ ],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL))
+ .expect_form_value('status', 'active')
+ .expect_form_value_absent('rules[input][0][name]')
+ .expect_form_value('rules[input][0][ip_version]', 'ipv4')
+ .expect_form_value_absent('rules[input][0][dst_ip]')
+ .expect_form_value_absent('rules[input][0][dst_port]')
+ .expect_form_value_absent('rules[input][0][src_ip]')
+ .expect_form_value_absent('rules[input][0][src_port]')
+ .expect_form_value_absent('rules[input][0][protocol]')
+ .expect_form_value_absent('rules[input][0][tcp_flags]')
+ .expect_form_value('rules[input][0][action]', 'discard')
+ .expect_form_value_absent('rules[input][1][action]'),
+ ])
+ assert result['changed'] is True
+ assert result['diff']['before']['status'] == 'active'
+ assert result['diff']['after']['status'] == 'active'
+ assert len(result['diff']['before']['rules']['input']) == 0
+ assert len(result['diff']['after']['rules']['input']) == 1
+ assert result['firewall']['status'] == 'active'
+ assert len(result['firewall']['rules']['input']) == 1
+
+ def test_input_rule_len_change_1_0(self, mocker):
+ result = self.run_module_success(mocker, hetzner_firewall, {
+ 'hetzner_user': '',
+ 'hetzner_password': '',
+ 'server_ip': '1.2.3.4',
+ 'state': 'present',
+ 'rules': {
+ },
+ }, [
+ FetchUrlCall('GET', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'active',
+ 'whitelist_hos': True,
+ 'port': 'main',
+ 'rules': {
+ 'input': [
+ {
+ 'name': None,
+ 'ip_version': 'ipv4',
+ 'dst_ip': None,
+ 'dst_port': None,
+ 'src_ip': None,
+ 'src_port': None,
+ 'protocol': None,
+ 'tcp_flags': None,
+ 'action': 'discard',
+ },
+ ],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
+ FetchUrlCall('POST', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'active',
+ 'whitelist_hos': False,
+ 'port': 'main',
+ 'rules': {
+ 'input': [],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL))
+ .expect_form_value('status', 'active')
+ .expect_form_value_absent('rules[input][0][action]'),
+ ])
+ assert result['changed'] is True
+ assert result['diff']['before']['status'] == 'active'
+ assert result['diff']['after']['status'] == 'active'
+ assert len(result['diff']['before']['rules']['input']) == 1
+ assert len(result['diff']['after']['rules']['input']) == 0
+ assert result['firewall']['status'] == 'active'
+ assert len(result['firewall']['rules']['input']) == 0
+
+ def test_input_rule_len_change_1_2(self, mocker):
+ result = self.run_module_success(mocker, hetzner_firewall, {
+ 'hetzner_user': '',
+ 'hetzner_password': '',
+ 'server_ip': '1.2.3.4',
+ 'state': 'present',
+ 'rules': {
+ 'input': [
+ {
+ 'ip_version': 'ipv4',
+ 'dst_port': 80,
+ 'protocol': 'tcp',
+ 'action': 'accept',
+ },
+ {
+ 'ip_version': 'ipv4',
+ 'action': 'discard',
+ },
+ ],
+ },
+ }, [
+ FetchUrlCall('GET', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'active',
+ 'whitelist_hos': True,
+ 'port': 'main',
+ 'rules': {
+ 'input': [
+ {
+ 'name': None,
+ 'ip_version': 'ipv4',
+ 'dst_ip': None,
+ 'dst_port': None,
+ 'src_ip': None,
+ 'src_port': None,
+ 'protocol': None,
+ 'tcp_flags': None,
+ 'action': 'discard',
+ },
+ ],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
+ FetchUrlCall('POST', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'active',
+ 'whitelist_hos': False,
+ 'port': 'main',
+ 'rules': {
+ 'input': [
+ {
+ 'name': None,
+ 'ip_version': 'ipv4',
+ 'dst_ip': None,
+ 'dst_port': '80',
+ 'src_ip': None,
+ 'src_port': None,
+ 'protocol': 'tcp',
+ 'tcp_flags': None,
+ 'action': 'accept',
+ },
+ {
+ 'name': None,
+ 'ip_version': 'ipv4',
+ 'dst_ip': None,
+ 'dst_port': None,
+ 'src_ip': None,
+ 'src_port': None,
+ 'protocol': None,
+ 'tcp_flags': None,
+ 'action': 'discard',
+ },
+ ],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL))
+ .expect_form_value('status', 'active')
+ .expect_form_value('rules[input][0][action]', 'accept')
+ .expect_form_value('rules[input][1][action]', 'discard')
+ .expect_form_value_absent('rules[input][2][action]'),
+ ])
+ assert result['changed'] is True
+ assert result['diff']['before']['status'] == 'active'
+ assert result['diff']['after']['status'] == 'active'
+ assert len(result['diff']['before']['rules']['input']) == 1
+ assert len(result['diff']['after']['rules']['input']) == 2
+ assert result['firewall']['status'] == 'active'
+ assert len(result['firewall']['rules']['input']) == 2
+
+ # Idempotency checks: change one value
+
+ @pytest.mark.parametrize("parameter, before, after", flatten([
+ create_params('name', None, '', 'Test', 'Test', 'foo', '', None),
+ create_params('ip_version', 'ipv4', 'ipv4', 'ipv6', 'ipv6'),
+ create_params('dst_ip', None, '1.2.3.4/24', '1.2.3.4/32', '1.2.3.4/32', None),
+ create_params('dst_port', None, '80', '80-443', '80-443', None),
+ create_params('src_ip', None, '1.2.3.4/24', '1.2.3.4/32', '1.2.3.4/32', None),
+ create_params('src_port', None, '80', '80-443', '80-443', None),
+ create_params('protocol', None, 'tcp', 'tcp', 'udp', 'udp', None),
+ create_params('tcp_flags', None, 'syn', 'syn|fin', 'syn|fin', 'syn&fin', '', None),
+ create_params('action', 'accept', 'accept', 'discard', 'discard'),
+ ]))
+ def test_input_rule_value_change(self, mocker, parameter, before, after):
+ input_call = {
+ 'ip_version': 'ipv4',
+ 'action': 'discard',
+ }
+ input_before = {
+ 'name': None,
+ 'ip_version': 'ipv4',
+ 'dst_ip': None,
+ 'dst_port': None,
+ 'src_ip': None,
+ 'src_port': None,
+ 'protocol': None,
+ 'tcp_flags': None,
+ 'action': 'discard',
+ }
+ input_after = {
+ 'name': None,
+ 'ip_version': 'ipv4',
+ 'dst_ip': None,
+ 'dst_port': None,
+ 'src_ip': None,
+ 'src_port': None,
+ 'protocol': None,
+ 'tcp_flags': None,
+ 'action': 'discard',
+ }
+ if after is not None:
+ input_call[parameter] = after
+ input_before[parameter] = before
+ input_after[parameter] = after
+
+ calls = [
+ FetchUrlCall('GET', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'active',
+ 'whitelist_hos': True,
+ 'port': 'main',
+ 'rules': {
+ 'input': [input_before],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
+ ]
+
+ changed = (before != after)
+ if changed:
+ after_call = (
+ FetchUrlCall('POST', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'active',
+ 'whitelist_hos': False,
+ 'port': 'main',
+ 'rules': {
+ 'input': [input_after],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL))
+ .expect_form_value('status', 'active')
+ .expect_form_value_absent('rules[input][1][action]')
+ )
+ if parameter != 'ip_version':
+ after_call.expect_form_value('rules[input][0][ip_version]', 'ipv4')
+ if parameter != 'action':
+ after_call.expect_form_value('rules[input][0][action]', 'discard')
+ if after is not None:
+ after_call.expect_form_value('rules[input][0][{0}]'.format(parameter), after)
+ else:
+ after_call.expect_form_value_absent('rules[input][0][{0}]'.format(parameter))
+ calls.append(after_call)
+
+ result = self.run_module_success(mocker, hetzner_firewall, {
+ 'hetzner_user': '',
+ 'hetzner_password': '',
+ 'server_ip': '1.2.3.4',
+ 'state': 'present',
+ 'rules': {
+ 'input': [input_call],
+ },
+ }, calls)
+ assert result['changed'] == changed
+ assert result['diff']['before']['status'] == 'active'
+ assert result['diff']['after']['status'] == 'active'
+ assert len(result['diff']['before']['rules']['input']) == 1
+ assert len(result['diff']['after']['rules']['input']) == 1
+ assert result['diff']['before']['rules']['input'][0][parameter] == before
+ assert result['diff']['after']['rules']['input'][0][parameter] == after
+ assert result['firewall']['status'] == 'active'
+ assert len(result['firewall']['rules']['input']) == 1
+ assert result['firewall']['rules']['input'][0][parameter] == after
+
+ # Idempotency checks: IP address normalization
+
+ @pytest.mark.parametrize("ip_version, parameter, before_normalized, after_normalized, after", [
+ ('ipv4', 'src_ip', '1.2.3.4/32', '1.2.3.4/32', '1.2.3.4'),
+ ('ipv6', 'src_ip', '1:2:3::4/128', '1:2:3::4/128', '1:2:3::4'),
+ ('ipv6', 'dst_ip', '1:2:3::4/128', '1:2:3::4/128', '1:2:3:0::4'),
+ ('ipv6', 'dst_ip', '::/0', '::/0', '0:0::0/0'),
+ ])
+ def test_input_rule_ip_normalization(self, mocker, ip_version, parameter, before_normalized, after_normalized, after):
+ assert ip_version in ('ipv4', 'ipv6')
+ assert parameter in ('src_ip', 'dst_ip')
+ input_call = {
+ 'ip_version': ip_version,
+ 'action': 'discard',
+ }
+ input_before = {
+ 'name': None,
+ 'ip_version': ip_version,
+ 'dst_ip': None,
+ 'dst_port': None,
+ 'src_ip': None,
+ 'src_port': None,
+ 'protocol': None,
+ 'tcp_flags': None,
+ 'action': 'discard',
+ }
+ input_after = {
+ 'name': None,
+ 'ip_version': ip_version,
+ 'dst_ip': None,
+ 'dst_port': None,
+ 'src_ip': None,
+ 'src_port': None,
+ 'protocol': None,
+ 'tcp_flags': None,
+ 'action': 'discard',
+ }
+ if after is not None:
+ input_call[parameter] = after
+ input_before[parameter] = before_normalized
+ input_after[parameter] = after_normalized
+
+ calls = [
+ FetchUrlCall('GET', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'active',
+ 'whitelist_hos': True,
+ 'port': 'main',
+ 'rules': {
+ 'input': [input_before],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
+ ]
+
+ changed = (before_normalized != after_normalized)
+ if changed:
+ after_call = (
+ FetchUrlCall('POST', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'active',
+ 'whitelist_hos': False,
+ 'port': 'main',
+ 'rules': {
+ 'input': [input_after],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL))
+ .expect_form_value('status', 'active')
+ .expect_form_value_absent('rules[input][1][action]')
+ )
+ after_call.expect_form_value('rules[input][0][ip_version]', ip_version)
+ after_call.expect_form_value('rules[input][0][action]', 'discard')
+ after_call.expect_form_value('rules[input][0][{0}]'.format(parameter), after_normalized)
+ calls.append(after_call)
+
+ result = self.run_module_success(mocker, hetzner_firewall, {
+ 'hetzner_user': '',
+ 'hetzner_password': '',
+ 'server_ip': '1.2.3.4',
+ 'state': 'present',
+ 'rules': {
+ 'input': [input_call],
+ },
+ }, calls)
+ assert result['changed'] == changed
+ assert result['diff']['before']['status'] == 'active'
+ assert result['diff']['after']['status'] == 'active'
+ assert len(result['diff']['before']['rules']['input']) == 1
+ assert len(result['diff']['after']['rules']['input']) == 1
+ assert result['diff']['before']['rules']['input'][0][parameter] == before_normalized
+ assert result['diff']['after']['rules']['input'][0][parameter] == after_normalized
+ assert result['firewall']['status'] == 'active'
+ assert len(result['firewall']['rules']['input']) == 1
+ assert result['firewall']['rules']['input'][0][parameter] == after_normalized
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/test_hetzner_firewall_info.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/test_hetzner_firewall_info.py
new file mode 100644
index 00000000..3d70df47
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/test_hetzner_firewall_info.py
@@ -0,0 +1,240 @@
+# (c) 2019 Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible_collections.community.internal_test_tools.tests.unit.utils.fetch_url_module_framework import (
+ FetchUrlCall,
+ BaseTestModule,
+)
+
+from ansible_collections.community.general.plugins.module_utils.hetzner import BASE_URL
+from ansible_collections.community.general.plugins.modules.net_tools import hetzner_firewall_info
+
+
+class TestHetznerFirewallInfo(BaseTestModule):
+ MOCK_ANSIBLE_MODULEUTILS_BASIC_ANSIBLEMODULE = 'ansible_collections.community.general.plugins.modules.net_tools.hetzner_firewall_info.AnsibleModule'
+ MOCK_ANSIBLE_MODULEUTILS_URLS_FETCH_URL = 'ansible_collections.community.general.plugins.module_utils.hetzner.fetch_url'
+
+ # Tests for state (absent and present)
+
+ def test_absent(self, mocker):
+ result = self.run_module_success(mocker, hetzner_firewall_info, {
+ 'hetzner_user': '',
+ 'hetzner_password': '',
+ 'server_ip': '1.2.3.4',
+ }, [
+ FetchUrlCall('GET', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'disabled',
+ 'whitelist_hos': False,
+ 'port': 'main',
+ 'rules': {
+ 'input': [],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
+ ])
+ assert result['changed'] is False
+ assert result['firewall']['status'] == 'disabled'
+ assert result['firewall']['server_ip'] == '1.2.3.4'
+ assert result['firewall']['server_number'] == 1
+
+ def test_present(self, mocker):
+ result = self.run_module_success(mocker, hetzner_firewall_info, {
+ 'hetzner_user': '',
+ 'hetzner_password': '',
+ 'server_ip': '1.2.3.4',
+ }, [
+ FetchUrlCall('GET', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'active',
+ 'whitelist_hos': False,
+ 'port': 'main',
+ 'rules': {
+ 'input': [],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
+ ])
+ assert result['changed'] is False
+ assert result['firewall']['status'] == 'active'
+ assert result['firewall']['server_ip'] == '1.2.3.4'
+ assert result['firewall']['server_number'] == 1
+ assert len(result['firewall']['rules']['input']) == 0
+
+ def test_present_w_rules(self, mocker):
+ result = self.run_module_success(mocker, hetzner_firewall_info, {
+ 'hetzner_user': '',
+ 'hetzner_password': '',
+ 'server_ip': '1.2.3.4',
+ }, [
+ FetchUrlCall('GET', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'active',
+ 'whitelist_hos': False,
+ 'port': 'main',
+ 'rules': {
+ 'input': [
+ {
+ 'name': 'Accept HTTPS traffic',
+ 'ip_version': 'ipv4',
+ 'dst_ip': None,
+ 'dst_port': '443',
+ 'src_ip': None,
+ 'src_port': None,
+ 'protocol': 'tcp',
+ 'tcp_flags': None,
+ 'action': 'accept',
+ },
+ {
+ 'name': None,
+ 'ip_version': 'ipv4',
+ 'dst_ip': None,
+ 'dst_port': None,
+ 'src_ip': None,
+ 'src_port': None,
+ 'protocol': None,
+ 'tcp_flags': None,
+ 'action': 'discard',
+ }
+ ],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
+ ])
+ assert result['changed'] is False
+ assert result['firewall']['status'] == 'active'
+ assert result['firewall']['server_ip'] == '1.2.3.4'
+ assert result['firewall']['server_number'] == 1
+ assert len(result['firewall']['rules']['input']) == 2
+ assert result['firewall']['rules']['input'][0]['name'] == 'Accept HTTPS traffic'
+ assert result['firewall']['rules']['input'][0]['dst_port'] == '443'
+ assert result['firewall']['rules']['input'][0]['action'] == 'accept'
+ assert result['firewall']['rules']['input'][1]['dst_port'] is None
+ assert result['firewall']['rules']['input'][1]['action'] == 'discard'
+
+ # Tests for wait_for_configured in getting status
+
+ def test_wait_get(self, mocker):
+ mocker.patch('time.sleep', lambda duration: None)
+ result = self.run_module_success(mocker, hetzner_firewall_info, {
+ 'hetzner_user': '',
+ 'hetzner_password': '',
+ 'server_ip': '1.2.3.4',
+ 'wait_for_configured': True,
+ }, [
+ FetchUrlCall('GET', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'in process',
+ 'whitelist_hos': False,
+ 'port': 'main',
+ 'rules': {
+ 'input': [],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
+ FetchUrlCall('GET', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'active',
+ 'whitelist_hos': False,
+ 'port': 'main',
+ 'rules': {
+ 'input': [],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
+ ])
+ assert result['changed'] is False
+ assert result['firewall']['status'] == 'active'
+ assert result['firewall']['server_ip'] == '1.2.3.4'
+ assert result['firewall']['server_number'] == 1
+
+ def test_wait_get_timeout(self, mocker):
+ mocker.patch('time.sleep', lambda duration: None)
+ result = self.run_module_failed(mocker, hetzner_firewall_info, {
+ 'hetzner_user': '',
+ 'hetzner_password': '',
+ 'server_ip': '1.2.3.4',
+ 'wait_for_configured': True,
+ 'timeout': 0,
+ }, [
+ FetchUrlCall('GET', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'in process',
+ 'whitelist_hos': False,
+ 'port': 'main',
+ 'rules': {
+ 'input': [],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
+ FetchUrlCall('GET', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'in process',
+ 'whitelist_hos': False,
+ 'port': 'main',
+ 'rules': {
+ 'input': [],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
+ ])
+ assert result['msg'] == 'Timeout while waiting for firewall to be configured.'
+
+ def test_nowait_get(self, mocker):
+ result = self.run_module_success(mocker, hetzner_firewall_info, {
+ 'hetzner_user': '',
+ 'hetzner_password': '',
+ 'server_ip': '1.2.3.4',
+ 'wait_for_configured': False,
+ }, [
+ FetchUrlCall('GET', 200)
+ .result_json({
+ 'firewall': {
+ 'server_ip': '1.2.3.4',
+ 'server_number': 1,
+ 'status': 'in process',
+ 'whitelist_hos': False,
+ 'port': 'main',
+ 'rules': {
+ 'input': [],
+ },
+ },
+ })
+ .expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
+ ])
+ assert result['changed'] is False
+ assert result['firewall']['status'] == 'in process'
+ assert result['firewall']['server_ip'] == '1.2.3.4'
+ assert result['firewall']['server_number'] == 1
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/test_nmcli.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/test_nmcli.py
new file mode 100644
index 00000000..40a67e35
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/net_tools/test_nmcli.py
@@ -0,0 +1,1210 @@
+# Copyright: (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+import pytest
+
+from ansible.module_utils._text import to_text
+from ansible_collections.community.general.plugins.modules.net_tools import nmcli
+
+pytestmark = pytest.mark.usefixtures('patch_ansible_module')
+
+TESTCASE_CONNECTION = [
+ {
+ 'type': 'ethernet',
+ 'conn_name': 'non_existent_nw_device',
+ 'state': 'absent',
+ '_ansible_check_mode': True,
+ },
+ {
+ 'type': 'generic',
+ 'conn_name': 'non_existent_nw_device',
+ 'state': 'absent',
+ '_ansible_check_mode': True,
+ },
+ {
+ 'type': 'team',
+ 'conn_name': 'non_existent_nw_device',
+ 'state': 'absent',
+ '_ansible_check_mode': True,
+ },
+ {
+ 'type': 'bond',
+ 'conn_name': 'non_existent_nw_device',
+ 'state': 'absent',
+ '_ansible_check_mode': True,
+ },
+ {
+ 'type': 'bond-slave',
+ 'conn_name': 'non_existent_nw_device',
+ 'state': 'absent',
+ '_ansible_check_mode': True,
+ },
+ {
+ 'type': 'bridge',
+ 'conn_name': 'non_existent_nw_device',
+ 'state': 'absent',
+ '_ansible_check_mode': True,
+ },
+ {
+ 'type': 'vlan',
+ 'conn_name': 'non_existent_nw_device',
+ 'state': 'absent',
+ '_ansible_check_mode': True,
+ },
+ {
+ 'type': 'vxlan',
+ 'conn_name': 'non_existent_nw_device',
+ 'state': 'absent',
+ '_ansible_check_mode': True,
+ },
+ {
+ 'type': 'ipip',
+ 'conn_name': 'non_existent_nw_device',
+ 'state': 'absent',
+ '_ansible_check_mode': True,
+ },
+ {
+ 'type': 'sit',
+ 'conn_name': 'non_existent_nw_device',
+ 'state': 'absent',
+ '_ansible_check_mode': True,
+ },
+]
+
+TESTCASE_GENERIC = [
+ {
+ 'type': 'generic',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'generic_non_existant',
+ 'ip4': '10.10.10.10/24',
+ 'gw4': '10.10.10.1',
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ },
+]
+
+TESTCASE_GENERIC_SHOW_OUTPUT = """\
+connection.id: non_existent_nw_device
+connection.interface-name: generic_non_existant
+connection.autoconnect: yes
+ipv4.method: manual
+ipv4.addresses: 10.10.10.10/24
+ipv4.gateway: 10.10.10.1
+ipv6.method: auto
+"""
+
+TESTCASE_GENERIC_DNS4_SEARCH = [
+ {
+ 'type': 'generic',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'generic_non_existant',
+ 'ip4': '10.10.10.10/24',
+ 'gw4': '10.10.10.1',
+ 'state': 'present',
+ 'dns4_search': 'search.redhat.com',
+ 'dns6_search': 'search6.redhat.com',
+ '_ansible_check_mode': False,
+ }
+]
+
+TESTCASE_GENERIC_DNS4_SEARCH_SHOW_OUTPUT = """\
+connection.id: non_existent_nw_device
+connection.interface-name: generic_non_existant
+connection.autoconnect: yes
+ipv4.method: manual
+ipv4.addresses: 10.10.10.10/24
+ipv4.gateway: 10.10.10.1
+ipv4.dns-search: search.redhat.com
+ipv6.dns-search: search6.redhat.com
+ipv6.method: auto
+"""
+
+TESTCASE_BOND = [
+ {
+ 'type': 'bond',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'bond_non_existant',
+ 'mode': 'active-backup',
+ 'ip4': '10.10.10.10/24',
+ 'gw4': '10.10.10.1',
+ 'state': 'present',
+ 'primary': 'non_existent_primary',
+ '_ansible_check_mode': False,
+ }
+]
+
+TESTCASE_BOND_SHOW_OUTPUT = """\
+connection.id: non_existent_nw_device
+connection.interface-name: bond_non_existant
+connection.autoconnect: yes
+ipv4.method: manual
+ipv4.addresses: 10.10.10.10/24
+ipv4.gateway: 10.10.10.1
+ipv6.method: auto
+bond.options: mode=active-backup,primary=non_existent_primary
+"""
+
+TESTCASE_BRIDGE = [
+ {
+ 'type': 'bridge',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'br0_non_existant',
+ 'ip4': '10.10.10.10/24',
+ 'gw4': '10.10.10.1',
+ 'maxage': 100,
+ 'stp': True,
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ }
+]
+
+TESTCASE_BRIDGE_SHOW_OUTPUT = """\
+connection.id: non_existent_nw_device
+connection.interface-name: br0_non_existant
+connection.autoconnect: yes
+ipv4.method: manual
+ipv4.addresses: 10.10.10.10/24
+ipv4.gateway: 10.10.10.1
+ipv6.method: auto
+bridge.stp: yes
+bridge.max-age: 100
+bridge.ageing-time: 300
+bridge.hello-time: 2
+bridge.priority: 128
+bridge.forward-delay: 15
+"""
+
+TESTCASE_BRIDGE_SLAVE = [
+ {
+ 'type': 'bridge-slave',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'br0_non_existant',
+ 'path_cost': 100,
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ }
+]
+
+TESTCASE_BRIDGE_SLAVE_SHOW_OUTPUT = """\
+connection.id: non_existent_nw_device
+connection.interface-name: br0_non_existant
+connection.autoconnect: yes
+bridge-port.path-cost: 100
+bridge-port.hairpin-mode: yes
+bridge-port.priority: 32
+"""
+
+TESTCASE_VLAN = [
+ {
+ 'type': 'vlan',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'vlan_not_exists',
+ 'ip4': '10.10.10.10/24',
+ 'gw4': '10.10.10.1',
+ 'vlanid': 10,
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ }
+]
+
+TESTCASE_VLAN_SHOW_OUTPUT = """\
+connection.id: non_existent_nw_device
+connection.interface-name: vlan_not_exists
+connection.autoconnect: yes
+ipv4.method: manual
+ipv4.addresses: 10.10.10.10/24
+ipv4.gateway: 10.10.10.1
+ipv6.method: auto
+vlan.id: 10
+"""
+
+TESTCASE_VXLAN = [
+ {
+ 'type': 'vxlan',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'vxlan-existent_nw_device',
+ 'vxlan_id': 11,
+ 'vxlan_local': '192.168.225.5',
+ 'vxlan_remote': '192.168.225.6',
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ }
+]
+
+TESTCASE_VXLAN_SHOW_OUTPUT = """\
+connection.id: non_existent_nw_device
+connection.interface-name: vxlan-existent_nw_device
+connection.autoconnect: yes
+vxlan.id: 11
+vxlan.local: 192.168.225.5
+vxlan.remote: 192.168.225.6
+"""
+
+TESTCASE_IPIP = [
+ {
+ 'type': 'ipip',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'ipip-existent_nw_device',
+ 'ip_tunnel_dev': 'non_existent_ipip_device',
+ 'ip_tunnel_local': '192.168.225.5',
+ 'ip_tunnel_remote': '192.168.225.6',
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ }
+]
+
+TESTCASE_IPIP_SHOW_OUTPUT = """\
+connection.id: non_existent_nw_device
+connection.interface-name: ipip-existent_nw_device
+connection.autoconnect: yes
+ip-tunnel.mode: ipip
+ip-tunnel.parent: non_existent_ipip_device
+ip-tunnel.local: 192.168.225.5
+ip-tunnel.remote: 192.168.225.6
+"""
+
+TESTCASE_SIT = [
+ {
+ 'type': 'sit',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'sit-existent_nw_device',
+ 'ip_tunnel_dev': 'non_existent_sit_device',
+ 'ip_tunnel_local': '192.168.225.5',
+ 'ip_tunnel_remote': '192.168.225.6',
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ }
+]
+
+TESTCASE_SIT_SHOW_OUTPUT = """\
+connection.id: non_existent_nw_device
+connection.interface-name: sit-existent_nw_device
+connection.autoconnect: yes
+ip-tunnel.mode: sit
+ip-tunnel.parent: non_existent_sit_device
+ip-tunnel.local: 192.168.225.5
+ip-tunnel.remote: 192.168.225.6
+"""
+
+TESTCASE_ETHERNET_DHCP = [
+ {
+ 'type': 'ethernet',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'ethernet_non_existant',
+ 'dhcp_client_id': '00:11:22:AA:BB:CC:DD',
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ }
+]
+
+TESTCASE_ETHERNET_DHCP_SHOW_OUTPUT = """\
+connection.id: non_existent_nw_device
+connection.interface-name: ethernet_non_existant
+connection.autoconnect: yes
+802-3-ethernet.mtu: auto
+ipv4.method: auto
+ipv4.dhcp-client-id: 00:11:22:AA:BB:CC:DD
+ipv6.method: auto
+"""
+
+TESTCASE_ETHERNET_STATIC = [
+ {
+ 'type': 'ethernet',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'ethernet_non_existant',
+ 'ip4': '10.10.10.10/24',
+ 'gw4': '10.10.10.1',
+ 'dns4': ['1.1.1.1', '8.8.8.8'],
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ }
+]
+
+TESTCASE_ETHERNET_STATIC_SHOW_OUTPUT = """\
+connection.id: non_existent_nw_device
+connection.interface-name: ethernet_non_existant
+connection.autoconnect: yes
+802-3-ethernet.mtu: auto
+ipv4.method: manual
+ipv4.addresses: 10.10.10.10/24
+ipv4.gateway: 10.10.10.1
+ipv4.dns: 1.1.1.1,8.8.8.8
+ipv6.method: auto
+"""
+
+
+def mocker_set(mocker,
+ connection_exists=False,
+ execute_return=(0, "", ""),
+ execute_side_effect=None,
+ changed_return=None):
+ """
+ Common mocker object
+ """
+ get_bin_path = mocker.patch('ansible.module_utils.basic.AnsibleModule.get_bin_path')
+ get_bin_path.return_value = '/usr/bin/nmcli'
+ connection = mocker.patch.object(nmcli.Nmcli, 'connection_exists')
+ connection.return_value = connection_exists
+ execute_command = mocker.patch.object(nmcli.Nmcli, 'execute_command')
+ if execute_return:
+ execute_command.return_value = execute_return
+ if execute_side_effect:
+ execute_command.side_effect = execute_side_effect
+ if changed_return:
+ is_connection_changed = mocker.patch.object(nmcli.Nmcli, 'is_connection_changed')
+ is_connection_changed.return_value = changed_return
+
+
+@pytest.fixture
+def mocked_generic_connection_create(mocker):
+ mocker_set(mocker)
+
+
+@pytest.fixture
+def mocked_connection_exists(mocker):
+ mocker_set(mocker, connection_exists=True)
+
+
+@pytest.fixture
+def mocked_generic_connection_modify(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ changed_return=(True, dict()))
+
+
+@pytest.fixture
+def mocked_generic_connection_unchanged(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=(0, TESTCASE_GENERIC_SHOW_OUTPUT, ""))
+
+
+@pytest.fixture
+def mocked_generic_connection_dns_search_unchanged(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=(0, TESTCASE_GENERIC_DNS4_SEARCH_SHOW_OUTPUT, ""))
+
+
+@pytest.fixture
+def mocked_bond_connection_unchanged(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=(0, TESTCASE_BOND_SHOW_OUTPUT, ""))
+
+
+@pytest.fixture
+def mocked_bridge_connection_unchanged(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=(0, TESTCASE_BRIDGE_SHOW_OUTPUT, ""))
+
+
+@pytest.fixture
+def mocked_bridge_slave_unchanged(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=(0, TESTCASE_BRIDGE_SLAVE_SHOW_OUTPUT, ""))
+
+
+@pytest.fixture
+def mocked_vlan_connection_unchanged(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=(0, TESTCASE_VLAN_SHOW_OUTPUT, ""))
+
+
+@pytest.fixture
+def mocked_vxlan_connection_unchanged(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=(0, TESTCASE_VXLAN_SHOW_OUTPUT, ""))
+
+
+@pytest.fixture
+def mocked_ipip_connection_unchanged(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=(0, TESTCASE_IPIP_SHOW_OUTPUT, ""))
+
+
+@pytest.fixture
+def mocked_sit_connection_unchanged(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=(0, TESTCASE_SIT_SHOW_OUTPUT, ""))
+
+
+@pytest.fixture
+def mocked_ethernet_connection_unchanged(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=(0, TESTCASE_ETHERNET_DHCP, ""))
+
+
+@pytest.fixture
+def mocked_ethernet_connection_dhcp_unchanged(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=(0, TESTCASE_ETHERNET_DHCP_SHOW_OUTPUT, ""))
+
+
+@pytest.fixture
+def mocked_ethernet_connection_static_unchanged(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=(0, TESTCASE_ETHERNET_STATIC_SHOW_OUTPUT, ""))
+
+
+@pytest.fixture
+def mocked_ethernet_connection_dhcp_to_static(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=None,
+ execute_side_effect=(
+ (0, TESTCASE_ETHERNET_DHCP_SHOW_OUTPUT, ""),
+ (0, "", ""),
+ ))
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_BOND, indirect=['patch_ansible_module'])
+def test_bond_connection_create(mocked_generic_connection_create, capfd):
+ """
+ Test : Bond connection created
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert args[0][0] == '/usr/bin/nmcli'
+ assert args[0][1] == 'con'
+ assert args[0][2] == 'add'
+ assert args[0][3] == 'type'
+ assert args[0][4] == 'bond'
+ assert args[0][5] == 'con-name'
+ assert args[0][6] == 'non_existent_nw_device'
+
+ for param in ['ipv4.gateway', 'primary', 'connection.autoconnect',
+ 'connection.interface-name', 'bond_non_existant',
+ 'mode', 'active-backup', 'ipv4.addresses']:
+ assert param in args[0]
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_BOND, indirect=['patch_ansible_module'])
+def test_bond_connection_unchanged(mocked_bond_connection_unchanged, capfd):
+ """
+ Test : Bond connection unchanged
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert not results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GENERIC, indirect=['patch_ansible_module'])
+def test_generic_connection_create(mocked_generic_connection_create, capfd):
+ """
+ Test : Generic connection created
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert args[0][0] == '/usr/bin/nmcli'
+ assert args[0][1] == 'con'
+ assert args[0][2] == 'add'
+ assert args[0][3] == 'type'
+ assert args[0][4] == 'generic'
+ assert args[0][5] == 'con-name'
+ assert args[0][6] == 'non_existent_nw_device'
+
+ for param in ['connection.autoconnect', 'ipv4.gateway', 'ipv4.addresses']:
+ assert param in args[0]
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GENERIC, indirect=['patch_ansible_module'])
+def test_generic_connection_modify(mocked_generic_connection_modify, capfd):
+ """
+ Test : Generic connection modify
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert args[0][0] == '/usr/bin/nmcli'
+ assert args[0][1] == 'con'
+ assert args[0][2] == 'modify'
+ assert args[0][3] == 'non_existent_nw_device'
+
+ for param in ['ipv4.gateway', 'ipv4.addresses']:
+ assert param in args[0]
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GENERIC, indirect=['patch_ansible_module'])
+def test_generic_connection_unchanged(mocked_generic_connection_unchanged, capfd):
+ """
+ Test : Generic connection unchanged
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert not results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GENERIC_DNS4_SEARCH, indirect=['patch_ansible_module'])
+def test_generic_connection_create_dns_search(mocked_generic_connection_create, capfd):
+ """
+ Test : Generic connection created with dns search
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert 'ipv4.dns-search' in args[0]
+ assert 'ipv6.dns-search' in args[0]
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GENERIC_DNS4_SEARCH, indirect=['patch_ansible_module'])
+def test_generic_connection_modify_dns_search(mocked_generic_connection_create, capfd):
+ """
+ Test : Generic connection modified with dns search
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert 'ipv4.dns-search' in args[0]
+ assert 'ipv6.dns-search' in args[0]
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GENERIC_DNS4_SEARCH, indirect=['patch_ansible_module'])
+def test_generic_connection_dns_search_unchanged(mocked_generic_connection_dns_search_unchanged, capfd):
+ """
+ Test : Generic connection with dns search unchanged
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert not results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_CONNECTION, indirect=['patch_ansible_module'])
+def test_dns4_none(mocked_connection_exists, capfd):
+ """
+ Test if DNS4 param is None
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_BRIDGE, indirect=['patch_ansible_module'])
+def test_create_bridge(mocked_generic_connection_create, capfd):
+ """
+ Test if Bridge created
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert args[0][0] == '/usr/bin/nmcli'
+ assert args[0][1] == 'con'
+ assert args[0][2] == 'add'
+ assert args[0][3] == 'type'
+ assert args[0][4] == 'bridge'
+ assert args[0][5] == 'con-name'
+ assert args[0][6] == 'non_existent_nw_device'
+
+ args_text = list(map(to_text, args[0]))
+ for param in ['ipv4.addresses', '10.10.10.10/24', 'ipv4.gateway', '10.10.10.1', 'bridge.max-age', '100', 'bridge.stp', 'yes']:
+ assert param in args_text
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_BRIDGE, indirect=['patch_ansible_module'])
+def test_mod_bridge(mocked_generic_connection_modify, capfd):
+ """
+ Test if Bridge modified
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert args[0][0] == '/usr/bin/nmcli'
+ assert args[0][1] == 'con'
+ assert args[0][2] == 'modify'
+ assert args[0][3] == 'non_existent_nw_device'
+
+ args_text = list(map(to_text, args[0]))
+ for param in ['ipv4.addresses', '10.10.10.10/24', 'ipv4.gateway', '10.10.10.1', 'bridge.max-age', '100', 'bridge.stp', 'yes']:
+ assert param in args_text
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_BRIDGE, indirect=['patch_ansible_module'])
+def test_bridge_connection_unchanged(mocked_bridge_connection_unchanged, capfd):
+ """
+ Test : Bridge connection unchanged
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert not results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_BRIDGE_SLAVE, indirect=['patch_ansible_module'])
+def test_create_bridge_slave(mocked_generic_connection_create, capfd):
+ """
+ Test if Bridge_slave created
+ """
+
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert args[0][0] == '/usr/bin/nmcli'
+ assert args[0][1] == 'con'
+ assert args[0][2] == 'add'
+ assert args[0][3] == 'type'
+ assert args[0][4] == 'bridge-slave'
+ assert args[0][5] == 'con-name'
+ assert args[0][6] == 'non_existent_nw_device'
+
+ args_text = list(map(to_text, args[0]))
+ for param in ['bridge-port.path-cost', '100']:
+ assert param in args_text
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_BRIDGE_SLAVE, indirect=['patch_ansible_module'])
+def test_mod_bridge_slave(mocked_generic_connection_modify, capfd):
+ """
+ Test if Bridge_slave modified
+ """
+
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert args[0][0] == '/usr/bin/nmcli'
+ assert args[0][1] == 'con'
+ assert args[0][2] == 'modify'
+ assert args[0][3] == 'non_existent_nw_device'
+
+ args_text = list(map(to_text, args[0]))
+ for param in ['bridge-port.path-cost', '100']:
+ assert param in args_text
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_BRIDGE_SLAVE, indirect=['patch_ansible_module'])
+def test_bridge_slave_unchanged(mocked_bridge_slave_unchanged, capfd):
+ """
+ Test : Bridge-slave connection unchanged
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert not results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_VLAN, indirect=['patch_ansible_module'])
+def test_create_vlan_con(mocked_generic_connection_create, capfd):
+ """
+ Test if VLAN created
+ """
+
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert args[0][0] == '/usr/bin/nmcli'
+ assert args[0][1] == 'con'
+ assert args[0][2] == 'add'
+ assert args[0][3] == 'type'
+ assert args[0][4] == 'vlan'
+ assert args[0][5] == 'con-name'
+ assert args[0][6] == 'non_existent_nw_device'
+
+ args_text = list(map(to_text, args[0]))
+ for param in ['ipv4.addresses', '10.10.10.10/24', 'ipv4.gateway', '10.10.10.1', 'vlan.id', '10']:
+ assert param in args_text
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_VLAN, indirect=['patch_ansible_module'])
+def test_mod_vlan_conn(mocked_generic_connection_modify, capfd):
+ """
+ Test if VLAN modified
+ """
+
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert args[0][0] == '/usr/bin/nmcli'
+ assert args[0][1] == 'con'
+ assert args[0][2] == 'modify'
+ assert args[0][3] == 'non_existent_nw_device'
+
+ args_text = list(map(to_text, args[0]))
+ for param in ['ipv4.addresses', '10.10.10.10/24', 'ipv4.gateway', '10.10.10.1', 'vlan.id', '10']:
+ assert param in args_text
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_VLAN, indirect=['patch_ansible_module'])
+def test_vlan_connection_unchanged(mocked_vlan_connection_unchanged, capfd):
+ """
+ Test : VLAN connection unchanged
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert not results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_VXLAN, indirect=['patch_ansible_module'])
+def test_create_vxlan(mocked_generic_connection_create, capfd):
+ """
+ Test if vxlan created
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert args[0][0] == '/usr/bin/nmcli'
+ assert args[0][1] == 'con'
+ assert args[0][2] == 'add'
+ assert args[0][3] == 'type'
+ assert args[0][4] == 'vxlan'
+ assert args[0][5] == 'con-name'
+ assert args[0][6] == 'non_existent_nw_device'
+
+ args_text = list(map(to_text, args[0]))
+ for param in ['connection.interface-name', 'vxlan-existent_nw_device',
+ 'vxlan.local', '192.168.225.5', 'vxlan.remote', '192.168.225.6', 'vxlan.id', '11']:
+ assert param in args_text
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_VXLAN, indirect=['patch_ansible_module'])
+def test_vxlan_mod(mocked_generic_connection_modify, capfd):
+ """
+ Test if vxlan modified
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert args[0][0] == '/usr/bin/nmcli'
+ assert args[0][1] == 'con'
+ assert args[0][2] == 'modify'
+ assert args[0][3] == 'non_existent_nw_device'
+
+ args_text = list(map(to_text, args[0]))
+ for param in ['vxlan.local', '192.168.225.5', 'vxlan.remote', '192.168.225.6', 'vxlan.id', '11']:
+ assert param in args_text
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_VXLAN, indirect=['patch_ansible_module'])
+def test_vxlan_connection_unchanged(mocked_vxlan_connection_unchanged, capfd):
+ """
+ Test : VxLAN connection unchanged
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert not results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_IPIP, indirect=['patch_ansible_module'])
+def test_create_ipip(mocked_generic_connection_create, capfd):
+ """
+ Test if ipip created
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert args[0][0] == '/usr/bin/nmcli'
+ assert args[0][1] == 'con'
+ assert args[0][2] == 'add'
+ assert args[0][3] == 'type'
+ assert args[0][4] == 'ip-tunnel'
+ assert args[0][5] == 'con-name'
+ assert args[0][6] == 'non_existent_nw_device'
+
+ args_text = list(map(to_text, args[0]))
+ for param in ['connection.interface-name', 'ipip-existent_nw_device',
+ 'ip-tunnel.local', '192.168.225.5',
+ 'ip-tunnel.mode', 'ipip',
+ 'ip-tunnel.parent', 'non_existent_ipip_device',
+ 'ip-tunnel.remote', '192.168.225.6']:
+ assert param in args_text
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_IPIP, indirect=['patch_ansible_module'])
+def test_ipip_mod(mocked_generic_connection_modify, capfd):
+ """
+ Test if ipip modified
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert args[0][0] == '/usr/bin/nmcli'
+ assert args[0][1] == 'con'
+ assert args[0][2] == 'modify'
+ assert args[0][3] == 'non_existent_nw_device'
+
+ args_text = list(map(to_text, args[0]))
+ for param in ['ip-tunnel.local', '192.168.225.5', 'ip-tunnel.remote', '192.168.225.6']:
+ assert param in args_text
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_IPIP, indirect=['patch_ansible_module'])
+def test_ipip_connection_unchanged(mocked_ipip_connection_unchanged, capfd):
+ """
+ Test : IPIP connection unchanged
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert not results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_SIT, indirect=['patch_ansible_module'])
+def test_create_sit(mocked_generic_connection_create, capfd):
+ """
+ Test if sit created
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert args[0][0] == '/usr/bin/nmcli'
+ assert args[0][1] == 'con'
+ assert args[0][2] == 'add'
+ assert args[0][3] == 'type'
+ assert args[0][4] == 'ip-tunnel'
+ assert args[0][5] == 'con-name'
+ assert args[0][6] == 'non_existent_nw_device'
+
+ args_text = list(map(to_text, args[0]))
+ for param in ['connection.interface-name', 'sit-existent_nw_device',
+ 'ip-tunnel.local', '192.168.225.5',
+ 'ip-tunnel.mode', 'sit',
+ 'ip-tunnel.parent', 'non_existent_sit_device',
+ 'ip-tunnel.remote', '192.168.225.6']:
+ assert param in args_text
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_SIT, indirect=['patch_ansible_module'])
+def test_sit_mod(mocked_generic_connection_modify, capfd):
+ """
+ Test if sit modified
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert args[0][0] == '/usr/bin/nmcli'
+ assert args[0][1] == 'con'
+ assert args[0][2] == 'modify'
+ assert args[0][3] == 'non_existent_nw_device'
+
+ args_text = list(map(to_text, args[0]))
+ for param in ['ip-tunnel.local', '192.168.225.5', 'ip-tunnel.remote', '192.168.225.6']:
+ assert param in args_text
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_SIT, indirect=['patch_ansible_module'])
+def test_sit_connection_unchanged(mocked_sit_connection_unchanged, capfd):
+ """
+ Test : SIT connection unchanged
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert not results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_DHCP, indirect=['patch_ansible_module'])
+def test_eth_dhcp_client_id_con_create(mocked_generic_connection_create, capfd):
+ """
+ Test : Ethernet connection created with DHCP_CLIENT_ID
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert 'ipv4.dhcp-client-id' in args[0]
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_DHCP, indirect=['patch_ansible_module'])
+def test_ethernet_connection_dhcp_unchanged(mocked_ethernet_connection_dhcp_unchanged, capfd):
+ """
+ Test : Ethernet connection with DHCP_CLIENT_ID unchanged
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert not results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_STATIC, indirect=['patch_ansible_module'])
+def test_modify_ethernet_dhcp_to_static(mocked_ethernet_connection_dhcp_to_static, capfd):
+ """
+ Test : Modify ethernet connection from DHCP to static
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 2
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[1]
+
+ assert args[0][0] == '/usr/bin/nmcli'
+ assert args[0][1] == 'con'
+ assert args[0][2] == 'modify'
+ assert args[0][3] == 'non_existent_nw_device'
+
+ for param in ['ipv4.method', 'ipv4.gateway', 'ipv4.addresses']:
+ assert param in args[0]
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_STATIC, indirect=['patch_ansible_module'])
+def test_create_ethernet_static(mocked_generic_connection_create, capfd):
+ """
+ Test : Create ethernet connection with static IP configuration
+ """
+
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 2
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ add_args, add_kw = arg_list[0]
+
+ assert add_args[0][0] == '/usr/bin/nmcli'
+ assert add_args[0][1] == 'con'
+ assert add_args[0][2] == 'add'
+ assert add_args[0][3] == 'type'
+ assert add_args[0][4] == 'ethernet'
+ assert add_args[0][5] == 'con-name'
+ assert add_args[0][6] == 'non_existent_nw_device'
+
+ add_args_text = list(map(to_text, add_args[0]))
+ for param in ['connection.interface-name', 'ethernet_non_existant',
+ 'ipv4.addresses', '10.10.10.10/24',
+ 'ipv4.gateway', '10.10.10.1',
+ 'ipv4.dns', '1.1.1.1,8.8.8.8']:
+ assert param in add_args_text
+
+ up_args, up_kw = arg_list[1]
+ assert up_args[0][0] == '/usr/bin/nmcli'
+ assert up_args[0][1] == 'con'
+ assert up_args[0][2] == 'up'
+ assert up_args[0][3] == 'non_existent_nw_device'
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_STATIC, indirect=['patch_ansible_module'])
+def test_ethernet_connection_static_unchanged(mocked_ethernet_connection_static_unchanged, capfd):
+ """
+ Test : Ethernet connection with static IP configuration unchanged
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert not results['changed']
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/notification/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/notification/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/notification/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/notification/test_campfire.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/notification/test_campfire.py
new file mode 100644
index 00000000..72bbd579
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/notification/test_campfire.py
@@ -0,0 +1,93 @@
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible_collections.community.general.plugins.modules.notification import campfire
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
+
+
+class TestCampfireModule(ModuleTestCase):
+
+ def setUp(self):
+ super(TestCampfireModule, self).setUp()
+ self.module = campfire
+
+ def tearDown(self):
+ super(TestCampfireModule, self).tearDown()
+
+ @pytest.fixture
+ def fetch_url_mock(self, mocker):
+ return mocker.patch('ansible.module_utils.notification.campfire.fetch_url')
+
+ def test_without_required_parameters(self):
+ """Failure must occurs when all parameters are missing"""
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({})
+ self.module.main()
+
+ def test_successful_message(self):
+ """Test failure message"""
+ set_module_args({
+ 'subscription': 'test',
+ 'token': 'abc',
+ 'room': 'test',
+ 'msg': 'test'
+ })
+
+ with patch.object(campfire, "fetch_url") as fetch_url_mock:
+ fetch_url_mock.return_value = (None, {"status": 200})
+ with self.assertRaises(AnsibleExitJson):
+ self.module.main()
+
+ assert fetch_url_mock.call_count == 1
+ url = fetch_url_mock.call_args[0][1]
+ data = fetch_url_mock.call_args[1]['data']
+
+ assert url == 'https://test.campfirenow.com/room/test/speak.xml'
+ assert data == '<message><body>test</body></message>'
+
+ def test_successful_message_with_notify(self):
+ """Test failure message"""
+ set_module_args({
+ 'subscription': 'test',
+ 'token': 'abc',
+ 'room': 'test',
+ 'msg': 'test',
+ 'notify': 'bell'
+ })
+
+ with patch.object(campfire, "fetch_url") as fetch_url_mock:
+ fetch_url_mock.return_value = (None, {"status": 200})
+ with self.assertRaises(AnsibleExitJson):
+ self.module.main()
+
+ assert fetch_url_mock.call_count == 2
+ notify_call = fetch_url_mock.mock_calls[0]
+ url = notify_call[1][1]
+ data = notify_call[2]['data']
+
+ assert url == 'https://test.campfirenow.com/room/test/speak.xml'
+ assert data == '<message><type>SoundMessage</type><body>bell</body></message>'
+
+ message_call = fetch_url_mock.mock_calls[1]
+ url = message_call[1][1]
+ data = message_call[2]['data']
+
+ assert url == 'https://test.campfirenow.com/room/test/speak.xml'
+ assert data == '<message><body>test</body></message>'
+
+ def test_failure_message(self):
+ """Test failure message"""
+ set_module_args({
+ 'subscription': 'test',
+ 'token': 'abc',
+ 'room': 'test',
+ 'msg': 'test'
+ })
+
+ with patch.object(campfire, "fetch_url") as fetch_url_mock:
+ fetch_url_mock.return_value = (None, {"status": 403})
+ with self.assertRaises(AnsibleFailJson):
+ self.module.main()
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/notification/test_slack.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/notification/test_slack.py
new file mode 100644
index 00000000..85f9b100
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/notification/test_slack.py
@@ -0,0 +1,201 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import pytest
+from ansible_collections.community.general.tests.unit.compat.mock import Mock, patch
+from ansible_collections.community.general.plugins.modules.notification import slack
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
+
+
+class TestSlackModule(ModuleTestCase):
+
+ def setUp(self):
+ super(TestSlackModule, self).setUp()
+ self.module = slack
+
+ def tearDown(self):
+ super(TestSlackModule, self).tearDown()
+
+ @pytest.fixture
+ def fetch_url_mock(self, mocker):
+ return mocker.patch('ansible.module_utils.notification.slack.fetch_url')
+
+ def test_without_required_parameters(self):
+ """Failure must occurs when all parameters are missing"""
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({})
+ self.module.main()
+
+ def test_invalid_old_token(self):
+ """Failure if there is an old style token"""
+ set_module_args({
+ 'token': 'test',
+ })
+ with self.assertRaises(AnsibleFailJson):
+ self.module.main()
+
+ def test_sucessful_message(self):
+ """tests sending a message. This is example 1 from the docs"""
+ set_module_args({
+ 'token': 'XXXX/YYYY/ZZZZ',
+ 'msg': 'test'
+ })
+
+ with patch.object(slack, "fetch_url") as fetch_url_mock:
+ fetch_url_mock.return_value = (None, {"status": 200})
+ with self.assertRaises(AnsibleExitJson):
+ self.module.main()
+
+ self.assertTrue(fetch_url_mock.call_count, 1)
+ call_data = json.loads(fetch_url_mock.call_args[1]['data'])
+ assert call_data['username'] == "Ansible"
+ assert call_data['text'] == "test"
+ assert fetch_url_mock.call_args[1]['url'] == "https://hooks.slack.com/services/XXXX/YYYY/ZZZZ"
+
+ def test_failed_message(self):
+ """tests failing to send a message"""
+
+ set_module_args({
+ 'token': 'XXXX/YYYY/ZZZZ',
+ 'msg': 'test'
+ })
+
+ with patch.object(slack, "fetch_url") as fetch_url_mock:
+ fetch_url_mock.return_value = (None, {"status": 404, 'msg': 'test'})
+ with self.assertRaises(AnsibleFailJson):
+ self.module.main()
+
+ def test_message_with_thread(self):
+ """tests sending a message with a thread"""
+ set_module_args({
+ 'token': 'XXXX/YYYY/ZZZZ',
+ 'msg': 'test',
+ 'thread_id': '100.00'
+ })
+
+ with patch.object(slack, "fetch_url") as fetch_url_mock:
+ fetch_url_mock.return_value = (None, {"status": 200})
+ with self.assertRaises(AnsibleExitJson):
+ self.module.main()
+
+ self.assertTrue(fetch_url_mock.call_count, 1)
+ call_data = json.loads(fetch_url_mock.call_args[1]['data'])
+ assert call_data['username'] == "Ansible"
+ assert call_data['text'] == "test"
+ assert call_data['thread_ts'] == '100.00'
+ assert fetch_url_mock.call_args[1]['url'] == "https://hooks.slack.com/services/XXXX/YYYY/ZZZZ"
+
+ # https://github.com/ansible-collections/community.general/issues/1097
+ def test_ts_in_message_does_not_cause_edit(self):
+ set_module_args({
+ 'token': 'xoxa-123456789abcdef',
+ 'msg': 'test with ts'
+ })
+
+ with patch.object(slack, "fetch_url") as fetch_url_mock:
+ mock_response = Mock()
+ mock_response.read.return_value = '{"fake":"data"}'
+ fetch_url_mock.return_value = (mock_response, {"status": 200})
+ with self.assertRaises(AnsibleExitJson):
+ self.module.main()
+
+ self.assertTrue(fetch_url_mock.call_count, 1)
+ self.assertEquals(fetch_url_mock.call_args[1]['url'], "https://slack.com/api/chat.postMessage")
+
+ def test_edit_message(self):
+ set_module_args({
+ 'token': 'xoxa-123456789abcdef',
+ 'msg': 'test2',
+ 'message_id': '12345'
+ })
+
+ with patch.object(slack, "fetch_url") as fetch_url_mock:
+ mock_response = Mock()
+ mock_response.read.return_value = '{"messages":[{"ts":"12345","msg":"test1"}]}'
+ fetch_url_mock.side_effect = [
+ (mock_response, {"status": 200}),
+ (mock_response, {"status": 200}),
+ ]
+ with self.assertRaises(AnsibleExitJson):
+ self.module.main()
+
+ self.assertTrue(fetch_url_mock.call_count, 2)
+ self.assertEquals(fetch_url_mock.call_args[1]['url'], "https://slack.com/api/chat.update")
+ call_data = json.loads(fetch_url_mock.call_args[1]['data'])
+ self.assertEquals(call_data['ts'], "12345")
+
+ def test_message_with_blocks(self):
+ """tests sending a message with blocks"""
+ set_module_args({
+ 'token': 'XXXX/YYYY/ZZZZ',
+ 'msg': 'test',
+ 'blocks': [{
+ 'type': 'section',
+ 'text': {
+ 'type': 'mrkdwn',
+ 'text': '*test*'
+ },
+ 'accessory': {
+ 'type': 'image',
+ 'image_url': 'https://www.ansible.com/favicon.ico',
+ 'alt_text': 'test'
+ }
+ }, {
+ 'type': 'section',
+ 'text': {
+ 'type': 'plain_text',
+ 'text': 'test',
+ 'emoji': True
+ }
+ }]
+ })
+
+ with patch.object(slack, "fetch_url") as fetch_url_mock:
+ fetch_url_mock.return_value = (None, {"status": 200})
+ with self.assertRaises(AnsibleExitJson):
+ self.module.main()
+
+ self.assertTrue(fetch_url_mock.call_count, 1)
+ call_data = json.loads(fetch_url_mock.call_args[1]['data'])
+ assert call_data['username'] == "Ansible"
+ assert call_data['blocks'][1]['text']['text'] == "test"
+ assert fetch_url_mock.call_args[1]['url'] == "https://hooks.slack.com/services/XXXX/YYYY/ZZZZ"
+
+ def test_message_with_invalid_color(self):
+ """tests sending invalid color value to module"""
+ set_module_args({
+ 'token': 'XXXX/YYYY/ZZZZ',
+ 'msg': 'test',
+ 'color': 'aa',
+ })
+ with self.assertRaises(AnsibleFailJson) as exec_info:
+ self.module.main()
+
+ msg = "Color value specified should be either one of" \
+ " ['normal', 'good', 'warning', 'danger'] or any valid" \
+ " hex value with length 3 or 6."
+ assert exec_info.exception.args[0]['msg'] == msg
+
+
+color_test = [
+ ('#111111', True),
+ ('#00aabb', True),
+ ('#abc', True),
+ ('#gghhjj', False),
+ ('#ghj', False),
+ ('#a', False),
+ ('#aaaaaaaa', False),
+ ('', False),
+ ('aaaa', False),
+ ('$00aabb', False),
+ ('$00a', False),
+]
+
+
+@pytest.mark.parametrize("color_value, ret_status", color_test)
+def test_is_valid_hex_color(color_value, ret_status):
+ generated_value = slack.is_valid_hex_color(color_value)
+ assert generated_value == ret_status
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/language/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/language/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/language/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/language/test_gem.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/language/test_gem.py
new file mode 100644
index 00000000..41f504f6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/language/test_gem.py
@@ -0,0 +1,142 @@
+# Copyright (c) 2018 Antoine Catton
+# MIT License (see licenses/MIT-license.txt or https://opensource.org/licenses/MIT)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import copy
+
+import pytest
+
+from ansible_collections.community.general.plugins.modules.packaging.language import gem
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
+
+
+def get_command(run_command):
+ """Generate the command line string from the patched run_command"""
+ args = run_command.call_args[0]
+ command = args[0]
+ return ' '.join(command)
+
+
+class TestGem(ModuleTestCase):
+ def setUp(self):
+ super(TestGem, self).setUp()
+ self.rubygems_path = ['/usr/bin/gem']
+ self.mocker.patch(
+ 'ansible_collections.community.general.plugins.modules.packaging.language.gem.get_rubygems_path',
+ lambda module: copy.deepcopy(self.rubygems_path),
+ )
+
+ @pytest.fixture(autouse=True)
+ def _mocker(self, mocker):
+ self.mocker = mocker
+
+ def patch_installed_versions(self, versions):
+ """Mocks the versions of the installed package"""
+
+ target = 'ansible_collections.community.general.plugins.modules.packaging.language.gem.get_installed_versions'
+
+ def new(module, remote=False):
+ return versions
+
+ return self.mocker.patch(target, new)
+
+ def patch_rubygems_version(self, version=None):
+ target = 'ansible_collections.community.general.plugins.modules.packaging.language.gem.get_rubygems_version'
+
+ def new(module):
+ return version
+
+ return self.mocker.patch(target, new)
+
+ def patch_run_command(self):
+ target = 'ansible.module_utils.basic.AnsibleModule.run_command'
+ return self.mocker.patch(target)
+
+ def test_fails_when_user_install_and_install_dir_are_combined(self):
+ set_module_args({
+ 'name': 'dummy',
+ 'user_install': True,
+ 'install_dir': '/opt/dummy',
+ })
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ gem.main()
+
+ result = exc.value.args[0]
+ assert result['failed']
+ assert result['msg'] == "install_dir requires user_install=false"
+
+ def test_passes_install_dir_to_gem(self):
+ # XXX: This test is extremely fragile, and makes assuptions about the module code, and how
+ # functions are run.
+ # If you start modifying the code of the module, you might need to modify what this
+ # test mocks. The only thing that matters is the assertion that this 'gem install' is
+ # invoked with '--install-dir'.
+
+ set_module_args({
+ 'name': 'dummy',
+ 'user_install': False,
+ 'install_dir': '/opt/dummy',
+ })
+
+ self.patch_rubygems_version()
+ self.patch_installed_versions([])
+ run_command = self.patch_run_command()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ gem.main()
+
+ result = exc.value.args[0]
+ assert result['changed']
+ assert run_command.called
+
+ assert '--install-dir /opt/dummy' in get_command(run_command)
+
+ def test_passes_install_dir_and_gem_home_when_uninstall_gem(self):
+ # XXX: This test is also extremely fragile because of mocking.
+ # If this breaks, the only that matters is to check whether '--install-dir' is
+ # in the run command, and that GEM_HOME is passed to the command.
+ set_module_args({
+ 'name': 'dummy',
+ 'user_install': False,
+ 'install_dir': '/opt/dummy',
+ 'state': 'absent',
+ })
+
+ self.patch_rubygems_version()
+ self.patch_installed_versions(['1.0.0'])
+
+ run_command = self.patch_run_command()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ gem.main()
+
+ result = exc.value.args[0]
+
+ assert result['changed']
+ assert run_command.called
+
+ assert '--install-dir /opt/dummy' in get_command(run_command)
+
+ update_environ = run_command.call_args[1].get('environ_update', {})
+ assert update_environ.get('GEM_HOME') == '/opt/dummy'
+
+ def test_passes_add_force_option(self):
+ set_module_args({
+ 'name': 'dummy',
+ 'force': True,
+ })
+
+ self.patch_rubygems_version()
+ self.patch_installed_versions([])
+ run_command = self.patch_run_command()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ gem.main()
+
+ result = exc.value.args[0]
+ assert result['changed']
+ assert run_command.called
+
+ assert '--force' in get_command(run_command)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/language/test_maven_artifact.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/language/test_maven_artifact.py
new file mode 100644
index 00000000..1cd7e243
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/language/test_maven_artifact.py
@@ -0,0 +1,70 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible_collections.community.general.plugins.modules.packaging.language import maven_artifact
+from ansible.module_utils import basic
+
+
+pytestmark = pytest.mark.usefixtures('patch_ansible_module')
+
+maven_metadata_example = b"""<?xml version="1.0" encoding="UTF-8"?>
+<metadata>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ <versioning>
+ <latest>4.13-beta-2</latest>
+ <release>4.13-beta-2</release>
+ <versions>
+ <version>3.7</version>
+ <version>3.8</version>
+ <version>3.8.1</version>
+ <version>3.8.2</version>
+ <version>4.0</version>
+ <version>4.1</version>
+ <version>4.2</version>
+ <version>4.3</version>
+ <version>4.3.1</version>
+ <version>4.4</version>
+ <version>4.5</version>
+ <version>4.6</version>
+ <version>4.7</version>
+ <version>4.8</version>
+ <version>4.8.1</version>
+ <version>4.8.2</version>
+ <version>4.9</version>
+ <version>4.10</version>
+ <version>4.11-beta-1</version>
+ <version>4.11</version>
+ <version>4.12-beta-1</version>
+ <version>4.12-beta-2</version>
+ <version>4.12-beta-3</version>
+ <version>4.12</version>
+ <version>4.13-beta-1</version>
+ <version>4.13-beta-2</version>
+ </versions>
+ <lastUpdated>20190202141051</lastUpdated>
+ </versioning>
+</metadata>
+"""
+
+
+@pytest.mark.parametrize('patch_ansible_module, version_by_spec, version_choosed', [
+ (None, "(,3.9]", "3.8.2"),
+ (None, "3.0", "3.8.2"),
+ (None, "[3.7]", "3.7"),
+ (None, "[4.10, 4.12]", "4.12"),
+ (None, "[4.10, 4.12)", "4.11"),
+ (None, "[2.0,)", "4.13-beta-2"),
+])
+def test_find_version_by_spec(mocker, version_by_spec, version_choosed):
+ _getContent = mocker.patch('ansible_collections.community.general.plugins.modules.packaging.language.maven_artifact.MavenDownloader._getContent')
+ _getContent.return_value = maven_metadata_example
+
+ artifact = maven_artifact.Artifact("junit", "junit", None, version_by_spec, "jar")
+ mvn_downloader = maven_artifact.MavenDownloader(basic.AnsibleModule, "https://repo1.maven.org/maven2")
+
+ assert mvn_downloader.find_version_by_spec(artifact) == version_choosed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/language/test_npm.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/language/test_npm.py
new file mode 100644
index 00000000..849bfac1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/language/test_npm.py
@@ -0,0 +1,70 @@
+#
+# Copyright: (c) 2021, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat.mock import call, patch
+from ansible_collections.community.general.plugins.modules.packaging.language import npm
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import (
+ AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args)
+
+
+class NPMModuleTestCase(ModuleTestCase):
+ module = npm
+
+ def setUp(self):
+ super(NPMModuleTestCase, self).setUp()
+ ansible_module_path = "ansible_collections.community.general.plugins.modules.packaging.language.npm.AnsibleModule"
+ self.mock_run_command = patch('%s.run_command' % ansible_module_path)
+ self.module_main_command = self.mock_run_command.start()
+ self.mock_get_bin_path = patch('%s.get_bin_path' % ansible_module_path)
+ self.get_bin_path = self.mock_get_bin_path.start()
+ self.get_bin_path.return_value = '/testbin/npm'
+
+ def tearDown(self):
+ self.mock_run_command.stop()
+ self.mock_get_bin_path.stop()
+ super(NPMModuleTestCase, self).tearDown()
+
+ def module_main(self, exit_exc):
+ with self.assertRaises(exit_exc) as exc:
+ self.module.main()
+ return exc.exception.args[0]
+
+ def test_present(self):
+ set_module_args({
+ 'name': 'coffee-script',
+ 'global': 'true',
+ 'state': 'present'
+ })
+ self.module_main_command.side_effect = [
+ (0, '{}', ''),
+ (0, '{}', ''),
+ ]
+
+ result = self.module_main(AnsibleExitJson)
+
+ self.assertTrue(result['changed'])
+ self.module_main_command.assert_has_calls([
+ call(['/testbin/npm', 'list', '--json', '--long', '--global'], check_rc=False, cwd=None),
+ ])
+
+ def test_absent(self):
+ set_module_args({
+ 'name': 'coffee-script',
+ 'global': 'true',
+ 'state': 'absent'
+ })
+ self.module_main_command.side_effect = [
+ (0, '{"dependencies": {"coffee-script": {}}}', ''),
+ (0, '{}', ''),
+ ]
+
+ result = self.module_main(AnsibleExitJson)
+
+ self.assertTrue(result['changed'])
+ self.module_main_command.assert_has_calls([
+ call(['/testbin/npm', 'uninstall', '--global', 'coffee-script'], check_rc=True, cwd=None),
+ ])
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/os/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/os/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/os/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/os/conftest.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/os/conftest.py
new file mode 100644
index 00000000..408a0237
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/os/conftest.py
@@ -0,0 +1,34 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible.module_utils.six.moves import xmlrpc_client
+
+import pytest
+
+
+def get_method_name(request_body):
+ return xmlrpc_client.loads(request_body)[1]
+
+
+@pytest.fixture
+def mock_request(request, mocker):
+ responses = request.getfixturevalue('testcase')['calls']
+ module_name = request.module.TESTED_MODULE
+
+ def transport_request(host, handler, request_body, verbose=0):
+ """Fake request"""
+ method_name = get_method_name(request_body)
+ excepted_name, response = responses.pop(0)
+ if method_name == excepted_name:
+ if isinstance(response, Exception):
+ raise response
+ else:
+ return response
+ else:
+ raise Exception('Expected call: %r, called with: %r' % (excepted_name, method_name))
+
+ target = '{0}.xmlrpc_client.Transport.request'.format(module_name)
+ mocker.patch(target, side_effect=transport_request)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/os/test_apk.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/os/test_apk.py
new file mode 100644
index 00000000..9577e892
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/os/test_apk.py
@@ -0,0 +1,36 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import mock
+from ansible_collections.community.general.tests.unit.compat import unittest
+
+from ansible_collections.community.general.plugins.modules.packaging.os import apk
+
+
+class TestApkQueryLatest(unittest.TestCase):
+
+ def setUp(self):
+ self.module_names = [
+ 'bash',
+ 'g++',
+ ]
+
+ @mock.patch('ansible_collections.community.general.plugins.modules.packaging.os.apk.AnsibleModule')
+ def test_not_latest(self, mock_module):
+ apk.APK_PATH = ""
+ for module_name in self.module_names:
+ command_output = module_name + '-2.0.0-r1 < 3.0.0-r2 '
+ mock_module.run_command.return_value = (0, command_output, None)
+ command_result = apk.query_latest(mock_module, module_name)
+ self.assertFalse(command_result)
+
+ @mock.patch('ansible_collections.community.general.plugins.modules.packaging.os.apk.AnsibleModule')
+ def test_latest(self, mock_module):
+ apk.APK_PATH = ""
+ for module_name in self.module_names:
+ command_output = module_name + '-2.0.0-r1 = 2.0.0-r1 '
+ mock_module.run_command.return_value = (0, command_output, None)
+ command_result = apk.query_latest(mock_module, module_name)
+ self.assertTrue(command_result)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/os/test_homebrew.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/os/test_homebrew.py
new file mode 100644
index 00000000..c2297fd4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/os/test_homebrew.py
@@ -0,0 +1,22 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.plugins.modules.packaging.os.homebrew import Homebrew
+
+
+class TestHomebrewModule(unittest.TestCase):
+
+ def setUp(self):
+ self.brew_app_names = [
+ 'git-ssh',
+ 'awscli@1',
+ 'bash'
+ ]
+
+ def test_valid_package_names(self):
+ for name in self.brew_app_names:
+ self.assertTrue(Homebrew.valid_package(name))
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/os/test_homebrew_cask.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/os/test_homebrew_cask.py
new file mode 100644
index 00000000..57cf225a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/os/test_homebrew_cask.py
@@ -0,0 +1,21 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.plugins.modules.packaging.os.homebrew_cask import HomebrewCask
+
+
+class TestHomebrewCaskModule(unittest.TestCase):
+
+ def setUp(self):
+ self.brew_cask_names = [
+ 'visual-studio-code',
+ 'firefox'
+ ]
+
+ def test_valid_cask_names(self):
+ for name in self.brew_cask_names:
+ self.assertTrue(HomebrewCask.valid_cask(name))
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/os/test_macports.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/os/test_macports.py
new file mode 100644
index 00000000..ef7c522f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/os/test_macports.py
@@ -0,0 +1,34 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils import basic
+from ansible_collections.community.general.plugins.modules.packaging.os import macports
+
+import pytest
+
+TESTED_MODULE = macports.__name__
+
+QUERY_PORT_TEST_CASES = [
+ pytest.param('', False, False, id='Not installed'),
+ pytest.param(' git @2.29.2_0+credential_osxkeychain+diff_highlight+doc+pcre+perl5_28', True, False, id='Installed but not active'),
+ pytest.param(' git @2.29.2_0+credential_osxkeychain+diff_highlight+doc+pcre+perl5_28 (active)', True, True, id='Installed and active'),
+ pytest.param(''' git @2.29.2_0+credential_osxkeychain+diff_highlight+doc+pcre+perl5_28
+ git @2.28.1_0+credential_osxkeychain+diff_highlight+doc+pcre+perl5_28
+''', True, False, id='2 versions installed, neither active'),
+ pytest.param(''' git @2.29.2_0+credential_osxkeychain+diff_highlight+doc+pcre+perl5_28 (active)
+ git @2.28.1_0+credential_osxkeychain+diff_highlight+doc+pcre+perl5_28
+''', True, True, id='2 versions installed, one active'),
+]
+
+
+@pytest.mark.parametrize("run_cmd_return_val, present_expected, active_expected", QUERY_PORT_TEST_CASES)
+def test_macports_query_port(mocker, run_cmd_return_val, present_expected, active_expected):
+ module = mocker.Mock()
+ run_command = mocker.Mock()
+ run_command.return_value = (0, run_cmd_return_val, '')
+ module.run_command = run_command
+
+ assert macports.query_port(module, 'port', 'git', state="present") == present_expected
+ assert macports.query_port(module, 'port', 'git', state="active") == active_expected
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/os/test_pkgin.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/os/test_pkgin.py
new file mode 100644
index 00000000..a53cfd49
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/os/test_pkgin.py
@@ -0,0 +1,143 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import mock
+from ansible_collections.community.general.tests.unit.compat import unittest
+
+from ansible_collections.community.general.plugins.modules.packaging.os import pkgin
+
+
+class TestPkginQueryPackage(unittest.TestCase):
+
+ def setUp(self):
+ pkgin.PKGIN_PATH = ""
+
+ @mock.patch('ansible_collections.community.general.plugins.modules.packaging.os.pkgin.AnsibleModule')
+ def test_package_without_version_is_present(self, mock_module):
+ # given
+ package = 'py37-conan'
+ parseable_flag_not_supported = 1
+ mock_module.run_command.side_effect = [
+ (parseable_flag_not_supported, "pkgin 0.11.7 for Darwin-18.6.0 x86_64 (using SQLite 3.27.2)", None),
+ (0, "%s-1.21.0 = C/C++ package manager" % package, None),
+ ]
+
+ # when
+ command_result = pkgin.query_package(mock_module, package)
+
+ # then
+ self.assertEquals(command_result, pkgin.PackageState.PRESENT)
+
+ @mock.patch('ansible_collections.community.general.plugins.modules.packaging.os.pkgin.AnsibleModule')
+ def test_package_with_version_is_present(self, mock_module):
+ # given
+ package = 'py37-conan-1.21.0'
+ parseable_flag_not_supported = 1
+ mock_module.run_command.side_effect = [
+ (parseable_flag_not_supported, "pkgin 0.11.7 for Darwin-18.6.0 x86_64 (using SQLite 3.27.2)", None),
+ (0, "%s = C/C++ package manager" % package, None),
+ ]
+
+ # when
+ command_result = pkgin.query_package(mock_module, package)
+
+ # then
+ self.assertEquals(command_result, pkgin.PackageState.PRESENT)
+
+ @mock.patch('ansible_collections.community.general.plugins.modules.packaging.os.pkgin.AnsibleModule')
+ def test_package_found_but_not_installed(self, mock_module):
+ # given
+ package = 'cmake'
+ parseable_flag_not_supported = 1
+ mock_module.run_command.side_effect = [
+ (parseable_flag_not_supported, "pkgin 0.11.7 for Darwin-18.6.0 x86_64 (using SQLite 3.27.2)", None),
+ (0, "cmake316-3.16.0nb1 = Cross platform make\ncmake314-3.14.6nb1 = Cross platform make\ncmake-3.14.0 Cross platform make", None),
+ ]
+
+ # when
+ command_result = pkgin.query_package(mock_module, package)
+
+ # then
+ self.assertEquals(command_result, pkgin.PackageState.NOT_INSTALLED)
+
+ @mock.patch('ansible_collections.community.general.plugins.modules.packaging.os.pkgin.AnsibleModule')
+ def test_package_found_outdated(self, mock_module):
+ # given
+ package = 'cmake316'
+ parseable_flag_not_supported = 1
+ mock_module.run_command.side_effect = [
+ (parseable_flag_not_supported, "pkgin 0.11.7 for Darwin-18.6.0 x86_64 (using SQLite 3.27.2)", None),
+ (0, "cmake316-3.16.0nb1 < Cross platform make", None),
+ ]
+
+ # when
+ command_result = pkgin.query_package(mock_module, package)
+
+ # then
+ self.assertEquals(command_result, pkgin.PackageState.OUTDATED)
+
+ @mock.patch('ansible_collections.community.general.plugins.modules.packaging.os.pkgin.AnsibleModule')
+ def test_package_with_version_found_outdated(self, mock_module):
+ # given
+ package = 'cmake316-3.16.0nb1'
+ parseable_flag_not_supported = 1
+ mock_module.run_command.side_effect = [
+ (parseable_flag_not_supported, "pkgin 0.11.7 for Darwin-18.6.0 x86_64 (using SQLite 3.27.2)", None),
+ (0, "cmake316-3.16.0nb1 < Cross platform make", None),
+ ]
+
+ # when
+ command_result = pkgin.query_package(mock_module, package)
+
+ # then
+ self.assertEquals(command_result, pkgin.PackageState.OUTDATED)
+
+ @mock.patch('ansible_collections.community.general.plugins.modules.packaging.os.pkgin.AnsibleModule')
+ def test_package_not_found(self, mock_module):
+ # given
+ package = 'cmake320-3.20.0nb1'
+ parseable_flag_not_supported = 1
+ mock_module.run_command.side_effect = [
+ (parseable_flag_not_supported, "pkgin 0.11.7 for Darwin-18.6.0 x86_64 (using SQLite 3.27.2)", None),
+ (1, None, "No results found for %s" % package),
+ ]
+
+ # when
+ command_result = pkgin.query_package(mock_module, package)
+
+ # then
+ self.assertEquals(command_result, pkgin.PackageState.NOT_FOUND)
+
+ @mock.patch('ansible_collections.community.general.plugins.modules.packaging.os.pkgin.AnsibleModule')
+ def test_with_parseable_flag_supported_package_is_present(self, mock_module):
+ # given
+ package = 'py37-conan'
+ parseable_flag_supported = 0
+ mock_module.run_command.side_effect = [
+ (parseable_flag_supported, "pkgin 0.11.7 for Darwin-18.6.0 x86_64 (using SQLite 3.27.2)", None),
+ (0, "%s-1.21.0;=;C/C++ package manager" % package, None),
+ ]
+
+ # when
+ command_result = pkgin.query_package(mock_module, package)
+
+ # then
+ self.assertEquals(command_result, pkgin.PackageState.PRESENT)
+
+ @mock.patch('ansible_collections.community.general.plugins.modules.packaging.os.pkgin.AnsibleModule')
+ def test_with_parseable_flag_not_supported_package_is_present(self, mock_module):
+ # given
+ package = 'py37-conan'
+ parseable_flag_not_supported = 1
+ mock_module.run_command.side_effect = [
+ (parseable_flag_not_supported, "pkgin 0.11.7 for Darwin-18.6.0 x86_64 (using SQLite 3.27.2)", None),
+ (0, "%s-1.21.0 = C/C++ package manager" % package, None),
+ ]
+
+ # when
+ command_result = pkgin.query_package(mock_module, package)
+
+ # then
+ self.assertEquals(command_result, pkgin.PackageState.PRESENT)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/os/test_redhat_subscription.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/os/test_redhat_subscription.py
new file mode 100644
index 00000000..ef6f28b8
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/os/test_redhat_subscription.py
@@ -0,0 +1,1221 @@
+# Author: Jiri Hnidek (jhnidek@redhat.com)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+from ansible.module_utils import basic
+from ansible_collections.community.general.plugins.modules.packaging.os import redhat_subscription
+
+import pytest
+
+TESTED_MODULE = redhat_subscription.__name__
+
+
+@pytest.fixture
+def patch_redhat_subscription(mocker):
+ """
+ Function used for mocking some parts of redhat_subscription module
+ """
+ mocker.patch('ansible_collections.community.general.plugins.modules.packaging.os.redhat_subscription.RegistrationBase.REDHAT_REPO')
+ mocker.patch('ansible_collections.community.general.plugins.modules.packaging.os.redhat_subscription.isfile', return_value=False)
+ mocker.patch('ansible_collections.community.general.plugins.modules.packaging.os.redhat_subscription.unlink', return_value=True)
+ mocker.patch('ansible_collections.community.general.plugins.modules.packaging.os.redhat_subscription.AnsibleModule.get_bin_path',
+ return_value='/testbin/subscription-manager')
+
+
+@pytest.mark.parametrize('patch_ansible_module', [{}], indirect=['patch_ansible_module'])
+@pytest.mark.usefixtures('patch_ansible_module')
+def test_without_required_parameters(capfd, patch_redhat_subscription):
+ """
+ Failure must occurs when all parameters are missing
+ """
+ with pytest.raises(SystemExit):
+ redhat_subscription.main()
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert results['failed']
+ assert 'state is present but any of the following are missing' in results['msg']
+
+
+TEST_CASES = [
+ # Test the case, when the system is already registered
+ [
+ {
+ 'state': 'present',
+ 'server_hostname': 'subscription.rhsm.redhat.com',
+ 'username': 'admin',
+ 'password': 'admin',
+ 'org_id': 'admin'
+ },
+ {
+ 'id': 'test_already_registered_system',
+ 'run_command.calls': [
+ (
+ # Calling of following command will be asserted
+ ['/testbin/subscription-manager', 'identity'],
+ # Was return code checked?
+ {'check_rc': False},
+ # Mock of returned code, stdout and stderr
+ (0, 'system identity: b26df632-25ed-4452-8f89-0308bfd167cb', '')
+ )
+ ],
+ 'changed': False,
+ 'msg': 'System already registered.'
+ }
+ ],
+ # Test simple registration using username and password
+ [
+ {
+ 'state': 'present',
+ 'server_hostname': 'satellite.company.com',
+ 'username': 'admin',
+ 'password': 'admin',
+ },
+ {
+ 'id': 'test_registeration_username_password',
+ 'run_command.calls': [
+ (
+ ['/testbin/subscription-manager', 'identity'],
+ {'check_rc': False},
+ (1, '', '')
+ ),
+ (
+ ['/testbin/subscription-manager', 'config', '--server.hostname=satellite.company.com'],
+ {'check_rc': True},
+ (0, '', '')
+ ),
+ (
+ ['/testbin/subscription-manager', 'register',
+ '--serverurl', 'satellite.company.com',
+ '--username', 'admin',
+ '--password', 'admin'],
+ {'check_rc': True, 'expand_user_and_vars': False},
+ (0, '', '')
+ )
+ ],
+ 'changed': True,
+ 'msg': "System successfully registered to 'satellite.company.com'."
+ }
+ ],
+ # Test unregistration, when system is unregistered
+ [
+ {
+ 'state': 'absent',
+ 'server_hostname': 'subscription.rhsm.redhat.com',
+ 'username': 'admin',
+ 'password': 'admin',
+ },
+ {
+ 'id': 'test_unregisteration',
+ 'run_command.calls': [
+ (
+ ['/testbin/subscription-manager', 'identity'],
+ {'check_rc': False},
+ (0, 'system identity: b26df632-25ed-4452-8f89-0308bfd167cb', '')
+ ),
+ (
+ ['/testbin/subscription-manager', 'unsubscribe', '--all'],
+ {'check_rc': True},
+ (0, '', '')
+ ),
+ (
+ ['/testbin/subscription-manager', 'unregister'],
+ {'check_rc': True},
+ (0, '', '')
+ )
+ ],
+ 'changed': True,
+ 'msg': "System successfully unregistered from subscription.rhsm.redhat.com."
+ }
+ ],
+ # Test unregistration of already unregistered system
+ [
+ {
+ 'state': 'absent',
+ 'server_hostname': 'subscription.rhsm.redhat.com',
+ 'username': 'admin',
+ 'password': 'admin',
+ },
+ {
+ 'id': 'test_unregisteration_of_unregistered_system',
+ 'run_command.calls': [
+ (
+ ['/testbin/subscription-manager', 'identity'],
+ {'check_rc': False},
+ (1, 'This system is not yet registered.', '')
+ )
+ ],
+ 'changed': False,
+ 'msg': "System already unregistered."
+ }
+ ],
+ # Test registration using activation key
+ [
+ {
+ 'state': 'present',
+ 'server_hostname': 'satellite.company.com',
+ 'activationkey': 'some-activation-key',
+ 'org_id': 'admin'
+ },
+ {
+ 'id': 'test_registeration_activation_key',
+ 'run_command.calls': [
+ (
+ ['/testbin/subscription-manager', 'identity'],
+ {'check_rc': False},
+ (1, 'This system is not yet registered.', '')
+ ),
+ (
+ ['/testbin/subscription-manager', 'config', '--server.hostname=satellite.company.com'],
+ {'check_rc': True},
+ (0, '', '')
+ ),
+ (
+ [
+ '/testbin/subscription-manager',
+ 'register',
+ '--serverurl', 'satellite.company.com',
+ '--org', 'admin',
+ '--activationkey', 'some-activation-key'
+ ],
+ {'check_rc': True, 'expand_user_and_vars': False},
+ (0, '', '')
+ )
+ ],
+ 'changed': True,
+ 'msg': "System successfully registered to 'satellite.company.com'."
+ }
+ ],
+ # Test of registration using username and password with auto-attach option
+ [
+ {
+ 'state': 'present',
+ 'username': 'admin',
+ 'password': 'admin',
+ 'org_id': 'admin',
+ 'auto_attach': 'true'
+ },
+ {
+ 'id': 'test_registeration_username_password_auto_attach',
+ 'run_command.calls': [
+ (
+ ['/testbin/subscription-manager', 'identity'],
+ {'check_rc': False},
+ (1, 'This system is not yet registered.', '')
+ ),
+ (
+ [
+ '/testbin/subscription-manager',
+ 'register',
+ '--org', 'admin',
+ '--auto-attach',
+ '--username', 'admin',
+ '--password', 'admin'
+ ],
+ {'check_rc': True, 'expand_user_and_vars': False},
+ (0, '', '')
+ )
+ ],
+ 'changed': True,
+ 'msg': "System successfully registered to 'None'."
+ }
+ ],
+ # Test of force registration despite the system is already registered
+ [
+ {
+ 'state': 'present',
+ 'username': 'admin',
+ 'password': 'admin',
+ 'org_id': 'admin',
+ 'force_register': 'true'
+ },
+ {
+ 'id': 'test_force_registeration_username_password',
+ 'run_command.calls': [
+ (
+ ['/testbin/subscription-manager', 'identity'],
+ {'check_rc': False},
+ (0, 'This system already registered.', '')
+ ),
+ (
+ [
+ '/testbin/subscription-manager',
+ 'register',
+ '--force',
+ '--org', 'admin',
+ '--username', 'admin',
+ '--password', 'admin'
+ ],
+ {'check_rc': True, 'expand_user_and_vars': False},
+ (0, '', '')
+ )
+ ],
+ 'changed': True,
+ 'msg': "System successfully registered to 'None'."
+ }
+ ],
+ # Test of registration using username, password and proxy options
+ [
+ {
+ 'state': 'present',
+ 'username': 'admin',
+ 'password': 'admin',
+ 'org_id': 'admin',
+ 'force_register': 'true',
+ 'server_proxy_hostname': 'proxy.company.com',
+ 'server_proxy_port': '12345',
+ 'server_proxy_user': 'proxy_user',
+ 'server_proxy_password': 'secret_proxy_password'
+ },
+ {
+ 'id': 'test_registeration_username_password_proxy_options',
+ 'run_command.calls': [
+ (
+ ['/testbin/subscription-manager', 'identity'],
+ {'check_rc': False},
+ (0, 'This system already registered.', '')
+ ),
+ (
+ [
+ '/testbin/subscription-manager',
+ 'config',
+ '--server.proxy_hostname=proxy.company.com',
+ '--server.proxy_password=secret_proxy_password',
+ '--server.proxy_port=12345',
+ '--server.proxy_user=proxy_user'
+ ],
+ {'check_rc': True},
+ (0, '', '')
+ ),
+ (
+ [
+ '/testbin/subscription-manager',
+ 'register',
+ '--force',
+ '--org', 'admin',
+ '--proxy', 'proxy.company.com:12345',
+ '--proxyuser', 'proxy_user',
+ '--proxypassword', 'secret_proxy_password',
+ '--username', 'admin',
+ '--password', 'admin'
+ ],
+ {'check_rc': True, 'expand_user_and_vars': False},
+ (0, '', '')
+ )
+ ],
+ 'changed': True,
+ 'msg': "System successfully registered to 'None'."
+ }
+ ],
+ # Test of registration using username and password and attach to pool
+ [
+ {
+ 'state': 'present',
+ 'username': 'admin',
+ 'password': 'admin',
+ 'org_id': 'admin',
+ 'pool': 'ff8080816b8e967f016b8e99632804a6'
+ },
+ {
+ 'id': 'test_registeration_username_password_pool',
+ 'run_command.calls': [
+ (
+ ['/testbin/subscription-manager', 'identity'],
+ {'check_rc': False},
+ (1, 'This system is not yet registered.', '')
+ ),
+ (
+ [
+ '/testbin/subscription-manager',
+ 'register',
+ '--org', 'admin',
+ '--username', 'admin',
+ '--password', 'admin'
+ ],
+ {'check_rc': True, 'expand_user_and_vars': False},
+ (0, '', '')
+ ),
+ (
+ [
+ 'subscription-manager list --available',
+ {'check_rc': True, 'environ_update': {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C'}},
+ (0,
+ '''
++-------------------------------------------+
+ Available Subscriptions
++-------------------------------------------+
+Subscription Name: SP Server Premium (S: Premium, U: Production, R: SP Server)
+Provides: SP Server Bits
+SKU: sp-server-prem-prod
+Contract: 0
+Pool ID: ff8080816b8e967f016b8e99632804a6
+Provides Management: Yes
+Available: 5
+Suggested: 1
+Service Type: L1-L3
+Roles: SP Server
+Service Level: Premium
+Usage: Production
+Add-ons:
+Subscription Type: Standard
+Starts: 06/25/19
+Ends: 06/24/20
+Entitlement Type: Physical
+''', ''),
+ ]
+ ),
+ (
+ 'subscription-manager attach --pool ff8080816b8e967f016b8e99632804a6',
+ {'check_rc': True},
+ (0, '', '')
+ )
+ ],
+ 'changed': True,
+ 'msg': "System successfully registered to 'None'."
+ }
+ ],
+ # Test of registration using username and password and attach to pool ID and quantities
+ [
+ {
+ 'state': 'present',
+ 'username': 'admin',
+ 'password': 'admin',
+ 'org_id': 'admin',
+ 'pool_ids': [{'ff8080816b8e967f016b8e99632804a6': 2}, {'ff8080816b8e967f016b8e99747107e9': 4}]
+ },
+ {
+ 'id': 'test_registeration_username_password_pool_ids_quantities',
+ 'run_command.calls': [
+ (
+ ['/testbin/subscription-manager', 'identity'],
+ {'check_rc': False},
+ (1, 'This system is not yet registered.', '')
+ ),
+ (
+ [
+ '/testbin/subscription-manager',
+ 'register',
+ '--org', 'admin',
+ '--username', 'admin',
+ '--password', 'admin'
+ ],
+ {'check_rc': True, 'expand_user_and_vars': False},
+ (0, '', '')
+ ),
+ (
+ [
+ 'subscription-manager list --available',
+ {'check_rc': True, 'environ_update': {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C'}},
+ (0,
+ '''
++-------------------------------------------+
+ Available Subscriptions
++-------------------------------------------+
+Subscription Name: SP Smart Management (A: ADDON1)
+Provides: SP Addon 1 bits
+SKU: sp-with-addon-1
+Contract: 1
+Pool ID: ff8080816b8e967f016b8e99747107e9
+Provides Management: Yes
+Available: 10
+Suggested: 1
+Service Type:
+Roles:
+Service Level:
+Usage:
+Add-ons: ADDON1
+Subscription Type: Standard
+Starts: 25.6.2019
+Ends: 24.6.2020
+Entitlement Type: Physical
+
+Subscription Name: SP Server Premium (S: Premium, U: Production, R: SP Server)
+Provides: SP Server Bits
+SKU: sp-server-prem-prod
+Contract: 0
+Pool ID: ff8080816b8e967f016b8e99632804a6
+Provides Management: Yes
+Available: 5
+Suggested: 1
+Service Type: L1-L3
+Roles: SP Server
+Service Level: Premium
+Usage: Production
+Add-ons:
+Subscription Type: Standard
+Starts: 06/25/19
+Ends: 06/24/20
+Entitlement Type: Physical
+''', '')
+ ]
+ ),
+ (
+ [
+ '/testbin/subscription-manager',
+ 'attach',
+ '--pool', 'ff8080816b8e967f016b8e99632804a6',
+ '--quantity', '2'
+ ],
+ {'check_rc': True},
+ (0, '', '')
+ ),
+ (
+ [
+ '/testbin/subscription-manager',
+ 'attach',
+ '--pool', 'ff8080816b8e967f016b8e99747107e9',
+ '--quantity', '4'
+ ],
+ {'check_rc': True},
+ (0, '', '')
+ )
+ ],
+ 'changed': True,
+ 'msg': "System successfully registered to 'None'."
+ }
+ ],
+ # Test of registration using username and password and attach to pool ID without quantities
+ [
+ {
+ 'state': 'present',
+ 'username': 'admin',
+ 'password': 'admin',
+ 'org_id': 'admin',
+ 'pool_ids': ['ff8080816b8e967f016b8e99632804a6', 'ff8080816b8e967f016b8e99747107e9']
+ },
+ {
+ 'id': 'test_registeration_username_password_pool_ids',
+ 'run_command.calls': [
+ (
+ ['/testbin/subscription-manager', 'identity'],
+ {'check_rc': False},
+ (1, 'This system is not yet registered.', '')
+ ),
+ (
+ [
+ '/testbin/subscription-manager',
+ 'register',
+ '--org', 'admin',
+ '--username', 'admin',
+ '--password', 'admin'
+ ],
+ {'check_rc': True, 'expand_user_and_vars': False},
+ (0, '', '')
+ ),
+ (
+ [
+ 'subscription-manager list --available',
+ {'check_rc': True, 'environ_update': {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C'}},
+ (0,
+ '''
++-------------------------------------------+
+ Available Subscriptions
++-------------------------------------------+
+Subscription Name: SP Smart Management (A: ADDON1)
+Provides: SP Addon 1 bits
+SKU: sp-with-addon-1
+Contract: 1
+Pool ID: ff8080816b8e967f016b8e99747107e9
+Provides Management: Yes
+Available: 10
+Suggested: 1
+Service Type:
+Roles:
+Service Level:
+Usage:
+Add-ons: ADDON1
+Subscription Type: Standard
+Starts: 25.6.2019
+Ends: 24.6.2020
+Entitlement Type: Physical
+
+Subscription Name: SP Server Premium (S: Premium, U: Production, R: SP Server)
+Provides: SP Server Bits
+SKU: sp-server-prem-prod
+Contract: 0
+Pool ID: ff8080816b8e967f016b8e99632804a6
+Provides Management: Yes
+Available: 5
+Suggested: 1
+Service Type: L1-L3
+Roles: SP Server
+Service Level: Premium
+Usage: Production
+Add-ons:
+Subscription Type: Standard
+Starts: 06/25/19
+Ends: 06/24/20
+Entitlement Type: Physical
+''', '')
+ ]
+ ),
+ (
+ [
+ '/testbin/subscription-manager',
+ 'attach',
+ '--pool', 'ff8080816b8e967f016b8e99632804a6'
+ ],
+ {'check_rc': True},
+ (0, '', '')
+ ),
+ (
+ [
+ '/testbin/subscription-manager',
+ 'attach',
+ '--pool', 'ff8080816b8e967f016b8e99747107e9'
+ ],
+ {'check_rc': True},
+ (0, '', '')
+ )
+ ],
+ 'changed': True,
+ 'msg': "System successfully registered to 'None'."
+ }
+ ],
+ # Test of registration using username and password and attach to pool ID (one pool)
+ [
+ {
+ 'state': 'present',
+ 'username': 'admin',
+ 'password': 'admin',
+ 'org_id': 'admin',
+ 'pool_ids': ['ff8080816b8e967f016b8e99632804a6']
+ },
+ {
+ 'id': 'test_registeration_username_password_one_pool_id',
+ 'run_command.calls': [
+ (
+ ['/testbin/subscription-manager', 'identity'],
+ {'check_rc': False},
+ (1, 'This system is not yet registered.', '')
+ ),
+ (
+ [
+ '/testbin/subscription-manager',
+ 'register',
+ '--org', 'admin',
+ '--username', 'admin',
+ '--password', 'admin'
+ ],
+ {'check_rc': True, 'expand_user_and_vars': False},
+ (0, '', '')
+ ),
+ (
+ [
+ 'subscription-manager list --available',
+ {'check_rc': True, 'environ_update': {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C'}},
+ (0,
+ '''
++-------------------------------------------+
+ Available Subscriptions
++-------------------------------------------+
+Subscription Name: SP Smart Management (A: ADDON1)
+Provides: SP Addon 1 bits
+SKU: sp-with-addon-1
+Contract: 1
+Pool ID: ff8080816b8e967f016b8e99747107e9
+Provides Management: Yes
+Available: 10
+Suggested: 1
+Service Type:
+Roles:
+Service Level:
+Usage:
+Add-ons: ADDON1
+Subscription Type: Standard
+Starts: 25.6.2019
+Ends: 24.6.2020
+Entitlement Type: Physical
+
+Subscription Name: SP Server Premium (S: Premium, U: Production, R: SP Server)
+Provides: SP Server Bits
+SKU: sp-server-prem-prod
+Contract: 0
+Pool ID: ff8080816b8e967f016b8e99632804a6
+Provides Management: Yes
+Available: 5
+Suggested: 1
+Service Type: L1-L3
+Roles: SP Server
+Service Level: Premium
+Usage: Production
+Add-ons:
+Subscription Type: Standard
+Starts: 06/25/19
+Ends: 06/24/20
+Entitlement Type: Physical
+''', '')
+ ]
+ ),
+ (
+ [
+ '/testbin/subscription-manager',
+ 'attach',
+ '--pool', 'ff8080816b8e967f016b8e99632804a6',
+ ],
+ {'check_rc': True},
+ (0, '', '')
+ )
+ ],
+ 'changed': True,
+ 'msg': "System successfully registered to 'None'."
+ }
+ ],
+ # Test attaching different set of pool IDs
+ [
+ {
+ 'state': 'present',
+ 'username': 'admin',
+ 'password': 'admin',
+ 'org_id': 'admin',
+ 'pool_ids': [{'ff8080816b8e967f016b8e99632804a6': 2}, {'ff8080816b8e967f016b8e99747107e9': 4}]
+ },
+ {
+ 'id': 'test_attaching_different_pool_ids',
+ 'run_command.calls': [
+ (
+ ['/testbin/subscription-manager', 'identity'],
+ {'check_rc': False},
+ (0, 'system identity: b26df632-25ed-4452-8f89-0308bfd167cb', ''),
+ ),
+ (
+ 'subscription-manager list --consumed',
+ {'check_rc': True, 'environ_update': {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C'}},
+ (0, '''
++-------------------------------------------+
+ Consumed Subscriptions
++-------------------------------------------+
+Subscription Name: Multi-Attribute Stackable (4 cores, no content)
+Provides: Multi-Attribute Limited Product (no content)
+SKU: cores4-multiattr
+Contract: 1
+Account: 12331131231
+Serial: 7807912223970164816
+Pool ID: ff8080816b8e967f016b8e995f5103b5
+Provides Management: No
+Active: True
+Quantity Used: 1
+Service Type: Level 3
+Roles:
+Service Level: Premium
+Usage:
+Add-ons:
+Status Details: Subscription is current
+Subscription Type: Stackable
+Starts: 06/25/19
+Ends: 06/24/20
+Entitlement Type: Physical
+''', '')
+ ),
+ (
+ [
+ '/testbin/subscription-manager',
+ 'unsubscribe',
+ '--serial=7807912223970164816',
+ ],
+ {'check_rc': True},
+ (0, '', '')
+ ),
+ (
+ [
+ 'subscription-manager list --available',
+ {'check_rc': True, 'environ_update': {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C'}},
+ (0,
+ '''
++-------------------------------------------+
+ Available Subscriptions
++-------------------------------------------+
+Subscription Name: SP Smart Management (A: ADDON1)
+Provides: SP Addon 1 bits
+SKU: sp-with-addon-1
+Contract: 1
+Pool ID: ff8080816b8e967f016b8e99747107e9
+Provides Management: Yes
+Available: 10
+Suggested: 1
+Service Type:
+Roles:
+Service Level:
+Usage:
+Add-ons: ADDON1
+Subscription Type: Standard
+Starts: 25.6.2019
+Ends: 24.6.2020
+Entitlement Type: Physical
+
+Subscription Name: SP Server Premium (S: Premium, U: Production, R: SP Server)
+Provides: SP Server Bits
+SKU: sp-server-prem-prod
+Contract: 0
+Pool ID: ff8080816b8e967f016b8e99632804a6
+Provides Management: Yes
+Available: 5
+Suggested: 1
+Service Type: L1-L3
+Roles: SP Server
+Service Level: Premium
+Usage: Production
+Add-ons:
+Subscription Type: Standard
+Starts: 06/25/19
+Ends: 06/24/20
+Entitlement Type: Physical
+
+Subscription Name: Multi-Attribute Stackable (4 cores, no content)
+Provides: Multi-Attribute Limited Product (no content)
+SKU: cores4-multiattr
+Contract: 1
+Pool ID: ff8080816b8e967f016b8e995f5103b5
+Provides Management: No
+Available: 10
+Suggested: 1
+Service Type: Level 3
+Roles:
+Service Level: Premium
+Usage:
+Add-ons:
+Subscription Type: Stackable
+Starts: 11.7.2019
+Ends: 10.7.2020
+Entitlement Type: Physical
+''', '')
+ ]
+ ),
+ (
+ [
+ '/testbin/subscription-manager',
+ 'attach',
+ '--pool', 'ff8080816b8e967f016b8e99632804a6',
+ '--quantity', '2'
+ ],
+ {'check_rc': True},
+ (0, '', '')
+ ),
+ (
+ [
+ '/testbin/subscription-manager',
+ 'attach',
+ '--pool', 'ff8080816b8e967f016b8e99747107e9',
+ '--quantity', '4'
+ ],
+ {'check_rc': True},
+ (0, '', '')
+ )
+ ],
+ 'changed': True,
+ }
+ ]
+]
+
+
+TEST_CASES_IDS = [item[1]['id'] for item in TEST_CASES]
+
+
+@pytest.mark.parametrize('patch_ansible_module, testcase', TEST_CASES, ids=TEST_CASES_IDS, indirect=['patch_ansible_module'])
+@pytest.mark.usefixtures('patch_ansible_module')
+def test_redhat_subscription(mocker, capfd, patch_redhat_subscription, testcase):
+ """
+ Run unit tests for test cases listen in TEST_CASES
+ """
+
+ # Mock function used for running commands first
+ call_results = [item[2] for item in testcase['run_command.calls']]
+ mock_run_command = mocker.patch.object(
+ basic.AnsibleModule,
+ 'run_command',
+ side_effect=call_results)
+
+ # Try to run test case
+ with pytest.raises(SystemExit):
+ redhat_subscription.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+
+ assert 'changed' in results
+ assert results['changed'] == testcase['changed']
+ if 'msg' in results:
+ assert results['msg'] == testcase['msg']
+
+ assert basic.AnsibleModule.run_command.call_count == len(testcase['run_command.calls'])
+ if basic.AnsibleModule.run_command.call_count:
+ call_args_list = [(item[0][0], item[1]) for item in basic.AnsibleModule.run_command.call_args_list]
+ expected_call_args_list = [(item[0], item[1]) for item in testcase['run_command.calls']]
+ assert call_args_list == expected_call_args_list
+
+
+SYSPURPOSE_TEST_CASES = [
+ # Test setting syspurpose attributes (system is already registered)
+ # and synchronization with candlepin server
+ [
+ {
+ 'state': 'present',
+ 'server_hostname': 'subscription.rhsm.redhat.com',
+ 'username': 'admin',
+ 'password': 'admin',
+ 'org_id': 'admin',
+ 'syspurpose': {
+ 'role': 'AwesomeOS',
+ 'usage': 'Production',
+ 'service_level_agreement': 'Premium',
+ 'addons': ['ADDON1', 'ADDON2'],
+ 'sync': True
+ }
+ },
+ {
+ 'id': 'test_setting_syspurpose_attributes',
+ 'existing_syspurpose': {},
+ 'expected_syspurpose': {
+ 'role': 'AwesomeOS',
+ 'usage': 'Production',
+ 'service_level_agreement': 'Premium',
+ 'addons': ['ADDON1', 'ADDON2'],
+ },
+ 'run_command.calls': [
+ (
+ ['/testbin/subscription-manager', 'identity'],
+ {'check_rc': False},
+ (0, 'system identity: b26df632-25ed-4452-8f89-0308bfd167cb', '')
+ ),
+ (
+ ['/testbin/subscription-manager', 'status'],
+ {'check_rc': False},
+ (0, '''
++-------------------------------------------+
+ System Status Details
++-------------------------------------------+
+Overall Status: Current
+
+System Purpose Status: Matched
+''', '')
+ )
+ ],
+ 'changed': True,
+ 'msg': 'Syspurpose attributes changed.'
+ }
+ ],
+ # Test setting unspupported attributes
+ [
+ {
+ 'state': 'present',
+ 'server_hostname': 'subscription.rhsm.redhat.com',
+ 'username': 'admin',
+ 'password': 'admin',
+ 'org_id': 'admin',
+ 'syspurpose': {
+ 'foo': 'Bar',
+ 'role': 'AwesomeOS',
+ 'usage': 'Production',
+ 'service_level_agreement': 'Premium',
+ 'addons': ['ADDON1', 'ADDON2'],
+ 'sync': True
+ }
+ },
+ {
+ 'id': 'test_setting_syspurpose_wrong_attributes',
+ 'existing_syspurpose': {},
+ 'expected_syspurpose': {},
+ 'run_command.calls': [],
+ 'failed': True
+ }
+ ],
+ # Test setting addons not a list
+ [
+ {
+ 'state': 'present',
+ 'server_hostname': 'subscription.rhsm.redhat.com',
+ 'username': 'admin',
+ 'password': 'admin',
+ 'org_id': 'admin',
+ 'syspurpose': {
+ 'role': 'AwesomeOS',
+ 'usage': 'Production',
+ 'service_level_agreement': 'Premium',
+ 'addons': 'ADDON1',
+ 'sync': True
+ }
+ },
+ {
+ 'id': 'test_setting_syspurpose_addons_not_list',
+ 'existing_syspurpose': {},
+ 'expected_syspurpose': {
+ 'role': 'AwesomeOS',
+ 'usage': 'Production',
+ 'service_level_agreement': 'Premium',
+ 'addons': ['ADDON1']
+ },
+ 'run_command.calls': [
+ (
+ ['/testbin/subscription-manager', 'identity'],
+ {'check_rc': False},
+ (0, 'system identity: b26df632-25ed-4452-8f89-0308bfd167cb', '')
+ ),
+ (
+ ['/testbin/subscription-manager', 'status'],
+ {'check_rc': False},
+ (0, '''
++-------------------------------------------+
+ System Status Details
++-------------------------------------------+
+Overall Status: Current
+
+System Purpose Status: Matched
+''', '')
+ )
+ ],
+ 'changed': True,
+ 'msg': 'Syspurpose attributes changed.'
+ }
+ ],
+ # Test setting syspurpose attributes (system is already registered)
+ # without synchronization with candlepin server. Some syspurpose attributes were set
+ # in the past
+ [
+ {
+ 'state': 'present',
+ 'server_hostname': 'subscription.rhsm.redhat.com',
+ 'username': 'admin',
+ 'password': 'admin',
+ 'org_id': 'admin',
+ 'syspurpose': {
+ 'role': 'AwesomeOS',
+ 'service_level_agreement': 'Premium',
+ 'addons': ['ADDON1', 'ADDON2'],
+ 'sync': False
+ }
+ },
+ {
+ 'id': 'test_changing_syspurpose_attributes',
+ 'existing_syspurpose': {
+ 'role': 'CoolOS',
+ 'usage': 'Production',
+ 'service_level_agreement': 'Super',
+ 'addons': [],
+ 'foo': 'bar'
+ },
+ 'expected_syspurpose': {
+ 'role': 'AwesomeOS',
+ 'service_level_agreement': 'Premium',
+ 'addons': ['ADDON1', 'ADDON2'],
+ 'foo': 'bar'
+ },
+ 'run_command.calls': [
+ (
+ ['/testbin/subscription-manager', 'identity'],
+ {'check_rc': False},
+ (0, 'system identity: b26df632-25ed-4452-8f89-0308bfd167cb', '')
+ ),
+ ],
+ 'changed': True,
+ 'msg': 'Syspurpose attributes changed.'
+ }
+ ],
+ # Test trying to set syspurpose attributes (system is already registered)
+ # without synchronization with candlepin server. Some syspurpose attributes were set
+ # in the past. Syspurpose attributes are same as before
+ [
+ {
+ 'state': 'present',
+ 'server_hostname': 'subscription.rhsm.redhat.com',
+ 'username': 'admin',
+ 'password': 'admin',
+ 'org_id': 'admin',
+ 'syspurpose': {
+ 'role': 'AwesomeOS',
+ 'service_level_agreement': 'Premium',
+ 'addons': ['ADDON1', 'ADDON2'],
+ 'sync': False
+ }
+ },
+ {
+ 'id': 'test_not_changing_syspurpose_attributes',
+ 'existing_syspurpose': {
+ 'role': 'AwesomeOS',
+ 'service_level_agreement': 'Premium',
+ 'addons': ['ADDON1', 'ADDON2'],
+ },
+ 'expected_syspurpose': {
+ 'role': 'AwesomeOS',
+ 'service_level_agreement': 'Premium',
+ 'addons': ['ADDON1', 'ADDON2'],
+ },
+ 'run_command.calls': [
+ (
+ ['/testbin/subscription-manager', 'identity'],
+ {'check_rc': False},
+ (0, 'system identity: b26df632-25ed-4452-8f89-0308bfd167cb', '')
+ ),
+ ],
+ 'changed': False,
+ 'msg': 'System already registered.'
+ }
+ ],
+ # Test of registration using username and password with auto-attach option, when
+ # syspurpose attributes are set
+ [
+ {
+ 'state': 'present',
+ 'username': 'admin',
+ 'password': 'admin',
+ 'org_id': 'admin',
+ 'auto_attach': 'true',
+ 'syspurpose': {
+ 'role': 'AwesomeOS',
+ 'usage': 'Testing',
+ 'service_level_agreement': 'Super',
+ 'addons': ['ADDON1'],
+ 'sync': False
+ },
+ },
+ {
+ 'id': 'test_registeration_username_password_auto_attach_syspurpose',
+ 'existing_syspurpose': None,
+ 'expected_syspurpose': {
+ 'role': 'AwesomeOS',
+ 'usage': 'Testing',
+ 'service_level_agreement': 'Super',
+ 'addons': ['ADDON1'],
+ },
+ 'run_command.calls': [
+ (
+ ['/testbin/subscription-manager', 'identity'],
+ {'check_rc': False},
+ (1, 'This system is not yet registered.', '')
+ ),
+ (
+ [
+ '/testbin/subscription-manager',
+ 'register',
+ '--org', 'admin',
+ '--auto-attach',
+ '--username', 'admin',
+ '--password', 'admin'
+ ],
+ {'check_rc': True, 'expand_user_and_vars': False},
+ (0, '', '')
+ )
+ ],
+ 'changed': True,
+ 'msg': "System successfully registered to 'None'."
+ }
+ ],
+ # Test of registration using username and password with auto-attach option, when
+ # syspurpose attributes are set. Syspurpose attributes are also synchronized
+ # in this case
+ [
+ {
+ 'state': 'present',
+ 'username': 'admin',
+ 'password': 'admin',
+ 'org_id': 'admin',
+ 'auto_attach': 'true',
+ 'syspurpose': {
+ 'role': 'AwesomeOS',
+ 'usage': 'Testing',
+ 'service_level_agreement': 'Super',
+ 'addons': ['ADDON1'],
+ 'sync': True
+ },
+ },
+ {
+ 'id': 'test_registeration_username_password_auto_attach_syspurpose_sync',
+ 'existing_syspurpose': None,
+ 'expected_syspurpose': {
+ 'role': 'AwesomeOS',
+ 'usage': 'Testing',
+ 'service_level_agreement': 'Super',
+ 'addons': ['ADDON1'],
+ },
+ 'run_command.calls': [
+ (
+ ['/testbin/subscription-manager', 'identity'],
+ {'check_rc': False},
+ (1, 'This system is not yet registered.', '')
+ ),
+ (
+ [
+ '/testbin/subscription-manager',
+ 'register',
+ '--org', 'admin',
+ '--auto-attach',
+ '--username', 'admin',
+ '--password', 'admin'
+ ],
+ {'check_rc': True, 'expand_user_and_vars': False},
+ (0, '', '')
+ ),
+ (
+ ['/testbin/subscription-manager', 'status'],
+ {'check_rc': False},
+ (0, '''
++-------------------------------------------+
+ System Status Details
++-------------------------------------------+
+Overall Status: Current
+
+System Purpose Status: Matched
+''', '')
+ )
+ ],
+ 'changed': True,
+ 'msg': "System successfully registered to 'None'."
+ }
+ ],
+]
+
+
+SYSPURPOSE_TEST_CASES_IDS = [item[1]['id'] for item in SYSPURPOSE_TEST_CASES]
+
+
+@pytest.mark.parametrize('patch_ansible_module, testcase', SYSPURPOSE_TEST_CASES, ids=SYSPURPOSE_TEST_CASES_IDS, indirect=['patch_ansible_module'])
+@pytest.mark.usefixtures('patch_ansible_module')
+def test_redhat_subscription_syspurpose(mocker, capfd, patch_redhat_subscription, patch_ansible_module, testcase, tmpdir):
+ """
+ Run unit tests for test cases listen in SYSPURPOSE_TEST_CASES (syspurpose specific cases)
+ """
+
+ # Mock function used for running commands first
+ call_results = [item[2] for item in testcase['run_command.calls']]
+ mock_run_command = mocker.patch.object(
+ basic.AnsibleModule,
+ 'run_command',
+ side_effect=call_results)
+
+ mock_syspurpose_file = tmpdir.mkdir("syspurpose").join("syspurpose.json")
+ # When there there are some existing syspurpose attributes specified, then
+ # write them to the file first
+ if testcase['existing_syspurpose'] is not None:
+ mock_syspurpose_file.write(json.dumps(testcase['existing_syspurpose']))
+ else:
+ mock_syspurpose_file.write("{}")
+
+ redhat_subscription.SysPurpose.SYSPURPOSE_FILE_PATH = str(mock_syspurpose_file)
+
+ # Try to run test case
+ with pytest.raises(SystemExit):
+ redhat_subscription.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+
+ if 'failed' in testcase:
+ assert results['failed'] == testcase['failed']
+ else:
+ assert 'changed' in results
+ assert results['changed'] == testcase['changed']
+ if 'msg' in results:
+ assert results['msg'] == testcase['msg']
+
+ mock_file_content = mock_syspurpose_file.read_text("utf-8")
+ current_syspurpose = json.loads(mock_file_content)
+ assert current_syspurpose == testcase['expected_syspurpose']
+
+ assert basic.AnsibleModule.run_command.call_count == len(testcase['run_command.calls'])
+ if basic.AnsibleModule.run_command.call_count:
+ call_args_list = [(item[0][0], item[1]) for item in basic.AnsibleModule.run_command.call_args_list]
+ expected_call_args_list = [(item[0], item[1]) for item in testcase['run_command.calls']]
+ assert call_args_list == expected_call_args_list
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/os/test_rhn_channel.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/os/test_rhn_channel.py
new file mode 100644
index 00000000..548deaab
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/os/test_rhn_channel.py
@@ -0,0 +1,144 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017 Pierre-Louis Bonicoli <pierre-louis@libregerbil.fr>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+from ansible_collections.community.general.plugins.modules.packaging.os import rhn_channel
+
+import pytest
+
+
+pytestmark = pytest.mark.usefixtures('patch_ansible_module')
+
+
+@pytest.mark.parametrize('patch_ansible_module', [{}], indirect=['patch_ansible_module'])
+def test_without_required_parameters(capfd):
+ with pytest.raises(SystemExit):
+ rhn_channel.main()
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert results['failed']
+ assert 'missing required arguments' in results['msg']
+
+
+TESTED_MODULE = rhn_channel.__name__
+TEST_CASES = [
+ [
+ # add channel already added, check that result isn't changed
+ {
+ 'name': 'rhel-x86_64-server-6',
+ 'sysname': 'server01',
+ 'url': 'https://rhn.redhat.com/rpc/api',
+ 'user': 'user',
+ 'password': 'pass',
+ },
+ {
+ 'calls': [
+ ('auth.login', ['X' * 43]),
+ ('system.listUserSystems',
+ [[{'last_checkin': '2017-08-06 19:49:52.0', 'id': '0123456789', 'name': 'server01'}]]),
+ ('channel.software.listSystemChannels',
+ [[{'channel_name': 'Red Hat Enterprise Linux Server (v. 6 for 64-bit x86_64)', 'channel_label': 'rhel-x86_64-server-6'}]]),
+ ('auth.logout', [1]),
+ ],
+ 'changed': False,
+ 'msg': 'Channel rhel-x86_64-server-6 already exists',
+ }
+ ],
+ [
+ # add channel, check that result is changed
+ {
+ 'name': 'rhel-x86_64-server-6-debuginfo',
+ 'sysname': 'server01',
+ 'url': 'https://rhn.redhat.com/rpc/api',
+ 'user': 'user',
+ 'password': 'pass',
+ },
+ {
+ 'calls': [
+ ('auth.login', ['X' * 43]),
+ ('system.listUserSystems',
+ [[{'last_checkin': '2017-08-06 19:49:52.0', 'id': '0123456789', 'name': 'server01'}]]),
+ ('channel.software.listSystemChannels',
+ [[{'channel_name': 'Red Hat Enterprise Linux Server (v. 6 for 64-bit x86_64)', 'channel_label': 'rhel-x86_64-server-6'}]]),
+ ('channel.software.listSystemChannels',
+ [[{'channel_name': 'Red Hat Enterprise Linux Server (v. 6 for 64-bit x86_64)', 'channel_label': 'rhel-x86_64-server-6'}]]),
+ ('system.setChildChannels', [1]),
+ ('auth.logout', [1]),
+ ],
+ 'changed': True,
+ 'msg': 'Channel rhel-x86_64-server-6-debuginfo added',
+ }
+ ],
+ [
+ # remove inexistent channel, check that result isn't changed
+ {
+ 'name': 'rhel-x86_64-server-6-debuginfo',
+ 'state': 'absent',
+ 'sysname': 'server01',
+ 'url': 'https://rhn.redhat.com/rpc/api',
+ 'user': 'user',
+ 'password': 'pass',
+ },
+ {
+ 'calls': [
+ ('auth.login', ['X' * 43]),
+ ('system.listUserSystems',
+ [[{'last_checkin': '2017-08-06 19:49:52.0', 'id': '0123456789', 'name': 'server01'}]]),
+ ('channel.software.listSystemChannels',
+ [[{'channel_name': 'Red Hat Enterprise Linux Server (v. 6 for 64-bit x86_64)', 'channel_label': 'rhel-x86_64-server-6'}]]),
+ ('auth.logout', [1]),
+ ],
+ 'changed': False,
+ 'msg': 'Not subscribed to channel rhel-x86_64-server-6-debuginfo.',
+ }
+ ],
+ [
+ # remove channel, check that result is changed
+ {
+ 'name': 'rhel-x86_64-server-6-debuginfo',
+ 'state': 'absent',
+ 'sysname': 'server01',
+ 'url': 'https://rhn.redhat.com/rpc/api',
+ 'user': 'user',
+ 'password': 'pass',
+ },
+ {
+ 'calls': [
+ ('auth.login', ['X' * 43]),
+ ('system.listUserSystems',
+ [[{'last_checkin': '2017-08-06 19:49:52.0', 'id': '0123456789', 'name': 'server01'}]]),
+ ('channel.software.listSystemChannels', [[
+ {'channel_name': 'RHEL Server Debuginfo (v.6 for x86_64)', 'channel_label': 'rhel-x86_64-server-6-debuginfo'},
+ {'channel_name': 'Red Hat Enterprise Linux Server (v. 6 for 64-bit x86_64)', 'channel_label': 'rhel-x86_64-server-6'}
+ ]]),
+ ('channel.software.listSystemChannels', [[
+ {'channel_name': 'RHEL Server Debuginfo (v.6 for x86_64)', 'channel_label': 'rhel-x86_64-server-6-debuginfo'},
+ {'channel_name': 'Red Hat Enterprise Linux Server (v. 6 for 64-bit x86_64)', 'channel_label': 'rhel-x86_64-server-6'}
+ ]]),
+ ('system.setChildChannels', [1]),
+ ('auth.logout', [1]),
+ ],
+ 'changed': True,
+ 'msg': 'Channel rhel-x86_64-server-6-debuginfo removed'
+ }
+ ]
+]
+
+
+@pytest.mark.parametrize('patch_ansible_module, testcase', TEST_CASES, indirect=['patch_ansible_module'])
+def test_rhn_channel(capfd, mocker, testcase, mock_request):
+ """Check 'msg' and 'changed' results"""
+
+ with pytest.raises(SystemExit):
+ rhn_channel.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert results['changed'] == testcase['changed']
+ assert results['msg'] == testcase['msg']
+ assert not testcase['calls'] # all calls should have been consumed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/os/test_rhn_register.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/os/test_rhn_register.py
new file mode 100644
index 00000000..ae2f44ae
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/os/test_rhn_register.py
@@ -0,0 +1,289 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+
+from ansible_collections.community.general.tests.unit.compat.mock import mock_open
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_native
+import ansible.module_utils.six
+from ansible.module_utils.six.moves import xmlrpc_client
+from ansible_collections.community.general.plugins.modules.packaging.os import rhn_register
+
+import pytest
+
+
+SYSTEMID = """<?xml version="1.0"?>
+<params>
+<param>
+<value><struct>
+<member>
+<name>system_id</name>
+<value><string>ID-123456789</string></value>
+</member>
+</struct></value>
+</param>
+</params>
+"""
+
+
+def skipWhenAllModulesMissing(modules):
+ """Skip the decorated test unless one of modules is available."""
+ for module in modules:
+ try:
+ __import__(module)
+ return False
+ except ImportError:
+ continue
+
+ return True
+
+
+orig_import = __import__
+
+
+@pytest.fixture
+def import_libxml(mocker):
+ def mock_import(name, *args, **kwargs):
+ if name in ['libxml2', 'libxml']:
+ raise ImportError()
+ else:
+ return orig_import(name, *args, **kwargs)
+
+ if ansible.module_utils.six.PY3:
+ mocker.patch('builtins.__import__', side_effect=mock_import)
+ else:
+ mocker.patch('__builtin__.__import__', side_effect=mock_import)
+
+
+@pytest.fixture
+def patch_rhn(mocker):
+ load_config_return = {
+ 'serverURL': 'https://xmlrpc.rhn.redhat.com/XMLRPC',
+ 'systemIdPath': '/etc/sysconfig/rhn/systemid'
+ }
+
+ mocker.patch.object(rhn_register.Rhn, 'load_config', return_value=load_config_return)
+ mocker.patch.object(rhn_register, 'HAS_UP2DATE_CLIENT', mocker.PropertyMock(return_value=True))
+
+
+@pytest.mark.skipif(skipWhenAllModulesMissing(['libxml2', 'libxml']), reason='none are available: libxml2, libxml')
+def test_systemid_with_requirements(capfd, mocker, patch_rhn):
+ """Check 'msg' and 'changed' results"""
+
+ mocker.patch.object(rhn_register.Rhn, 'enable')
+ mock_isfile = mocker.patch('os.path.isfile', return_value=True)
+ mocker.patch('ansible_collections.community.general.plugins.modules.packaging.os.rhn_register.open', mock_open(read_data=SYSTEMID), create=True)
+ rhn = rhn_register.Rhn()
+ assert '123456789' == to_native(rhn.systemid)
+
+
+@pytest.mark.parametrize('patch_ansible_module', [{}], indirect=['patch_ansible_module'])
+@pytest.mark.usefixtures('patch_ansible_module')
+def test_systemid_requirements_missing(capfd, mocker, patch_rhn, import_libxml):
+ """Check that missing dependencies are detected"""
+
+ mocker.patch('os.path.isfile', return_value=True)
+ mocker.patch('ansible_collections.community.general.plugins.modules.packaging.os.rhn_register.open', mock_open(read_data=SYSTEMID), create=True)
+
+ with pytest.raises(SystemExit):
+ rhn_register.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert results['failed']
+ assert 'Missing arguments' in results['msg']
+
+
+@pytest.mark.parametrize('patch_ansible_module', [{}], indirect=['patch_ansible_module'])
+@pytest.mark.usefixtures('patch_ansible_module')
+def test_without_required_parameters(capfd, patch_rhn):
+ """Failure must occurs when all parameters are missing"""
+
+ with pytest.raises(SystemExit):
+ rhn_register.main()
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert results['failed']
+ assert 'Missing arguments' in results['msg']
+
+
+TESTED_MODULE = rhn_register.__name__
+TEST_CASES = [
+ [
+ # Registering an unregistered host with channels
+ {
+ 'channels': 'rhel-x86_64-server-6',
+ 'username': 'user',
+ 'password': 'pass',
+ },
+ {
+ 'calls': [
+ ('auth.login', ['X' * 43]),
+ ('channel.software.listSystemChannels',
+ [[{'channel_name': 'Red Hat Enterprise Linux Server (v. 6 for 64-bit x86_64)', 'channel_label': 'rhel-x86_64-server-6'}]]),
+ ('channel.software.setSystemChannels', [1]),
+ ('auth.logout', [1]),
+ ],
+ 'is_registered': False,
+ 'is_registered.call_count': 1,
+ 'enable.call_count': 1,
+ 'systemid.call_count': 2,
+ 'changed': True,
+ 'msg': "System successfully registered to 'rhn.redhat.com'.",
+ 'run_command.call_count': 1,
+ 'run_command.call_args': '/usr/sbin/rhnreg_ks',
+ 'request_called': True,
+ 'unlink.call_count': 0,
+ }
+ ],
+ [
+ # Registering an unregistered host without channels
+ {
+ 'activationkey': 'key',
+ 'username': 'user',
+ 'password': 'pass',
+ },
+ {
+ 'calls': [
+ ],
+ 'is_registered': False,
+ 'is_registered.call_count': 1,
+ 'enable.call_count': 1,
+ 'systemid.call_count': 0,
+ 'changed': True,
+ 'msg': "System successfully registered to 'rhn.redhat.com'.",
+ 'run_command.call_count': 1,
+ 'run_command.call_args': '/usr/sbin/rhnreg_ks',
+ 'request_called': False,
+ 'unlink.call_count': 0,
+ }
+ ],
+ [
+ # Register an host already registered, check that result is unchanged
+ {
+ 'activationkey': 'key',
+ 'username': 'user',
+ 'password': 'pass',
+ },
+ {
+ 'calls': [
+ ],
+ 'is_registered': True,
+ 'is_registered.call_count': 1,
+ 'enable.call_count': 0,
+ 'systemid.call_count': 0,
+ 'changed': False,
+ 'msg': 'System already registered.',
+ 'run_command.call_count': 0,
+ 'request_called': False,
+ 'unlink.call_count': 0,
+ },
+ ],
+ [
+ # Unregister an host, check that result is changed
+ {
+ 'activationkey': 'key',
+ 'username': 'user',
+ 'password': 'pass',
+ 'state': 'absent',
+ },
+ {
+ 'calls': [
+ ('auth.login', ['X' * 43]),
+ ('system.deleteSystems', [1]),
+ ('auth.logout', [1]),
+ ],
+ 'is_registered': True,
+ 'is_registered.call_count': 1,
+ 'enable.call_count': 0,
+ 'systemid.call_count': 1,
+ 'changed': True,
+ 'msg': 'System successfully unregistered from rhn.redhat.com.',
+ 'run_command.call_count': 0,
+ 'request_called': True,
+ 'unlink.call_count': 1,
+ }
+ ],
+ [
+ # Unregister a unregistered host (systemid missing) locally, check that result is unchanged
+ {
+ 'activationkey': 'key',
+ 'username': 'user',
+ 'password': 'pass',
+ 'state': 'absent',
+ },
+ {
+ 'calls': [],
+ 'is_registered': False,
+ 'is_registered.call_count': 1,
+ 'enable.call_count': 0,
+ 'systemid.call_count': 0,
+ 'changed': False,
+ 'msg': 'System already unregistered.',
+ 'run_command.call_count': 0,
+ 'request_called': False,
+ 'unlink.call_count': 0,
+ }
+
+ ],
+ [
+ # Unregister an unknown host (an host with a systemid available locally, check that result contains failed
+ {
+ 'activationkey': 'key',
+ 'username': 'user',
+ 'password': 'pass',
+ 'state': 'absent',
+ },
+ {
+ 'calls': [
+ ('auth.login', ['X' * 43]),
+ ('system.deleteSystems', xmlrpc_client.Fault(1003, 'The following systems were NOT deleted: 123456789')),
+ ('auth.logout', [1]),
+ ],
+ 'is_registered': True,
+ 'is_registered.call_count': 1,
+ 'enable.call_count': 0,
+ 'systemid.call_count': 1,
+ 'failed': True,
+ 'msg': "Failed to unregister: <Fault 1003: 'The following systems were NOT deleted: 123456789'>",
+ 'run_command.call_count': 0,
+ 'request_called': True,
+ 'unlink.call_count': 0,
+ }
+ ],
+]
+
+
+@pytest.mark.parametrize('patch_ansible_module, testcase', TEST_CASES, indirect=['patch_ansible_module'])
+@pytest.mark.usefixtures('patch_ansible_module')
+def test_register_parameters(mocker, capfd, mock_request, patch_rhn, testcase):
+ # successful execution, no output
+ mocker.patch.object(basic.AnsibleModule, 'run_command', return_value=(0, '', ''))
+ mock_is_registered = mocker.patch.object(rhn_register.Rhn, 'is_registered', mocker.PropertyMock(return_value=testcase['is_registered']))
+ mocker.patch.object(rhn_register.Rhn, 'enable')
+ mock_systemid = mocker.patch.object(rhn_register.Rhn, 'systemid', mocker.PropertyMock(return_value=12345))
+ mocker.patch('os.unlink', return_value=True)
+
+ with pytest.raises(SystemExit):
+ rhn_register.main()
+
+ assert basic.AnsibleModule.run_command.call_count == testcase['run_command.call_count']
+ if basic.AnsibleModule.run_command.call_count:
+ assert basic.AnsibleModule.run_command.call_args[0][0][0] == testcase['run_command.call_args']
+
+ assert mock_is_registered.call_count == testcase['is_registered.call_count']
+ assert rhn_register.Rhn.enable.call_count == testcase['enable.call_count']
+ assert mock_systemid.call_count == testcase['systemid.call_count']
+ assert xmlrpc_client.Transport.request.called == testcase['request_called']
+ assert os.unlink.call_count == testcase['unlink.call_count']
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert results.get('changed') == testcase.get('changed')
+ assert results.get('failed') == testcase.get('failed')
+ assert results['msg'] == testcase['msg']
+ assert not testcase['calls'] # all calls should have been consumed
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/os/test_rhsm_release.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/os/test_rhsm_release.py
new file mode 100644
index 00000000..a75ec694
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/packaging/os/test_rhsm_release.py
@@ -0,0 +1,141 @@
+# (c) 2018, Sean Myers <sean.myers@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat.mock import call, patch
+from ansible_collections.community.general.plugins.modules.packaging.os import rhsm_release
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import (
+ AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args)
+
+
+class RhsmRepositoryReleaseModuleTestCase(ModuleTestCase):
+ module = rhsm_release
+
+ def setUp(self):
+ super(RhsmRepositoryReleaseModuleTestCase, self).setUp()
+
+ # Mainly interested that the subscription-manager calls are right
+ # based on the module args, so patch out run_command in the module.
+ # returns (rc, out, err) structure
+ self.mock_run_command = patch('ansible_collections.community.general.plugins.modules.packaging.os.rhsm_release.'
+ 'AnsibleModule.run_command')
+ self.module_main_command = self.mock_run_command.start()
+
+ # Module does a get_bin_path check before every run_command call
+ self.mock_get_bin_path = patch('ansible_collections.community.general.plugins.modules.packaging.os.rhsm_release.'
+ 'AnsibleModule.get_bin_path')
+ self.get_bin_path = self.mock_get_bin_path.start()
+ self.get_bin_path.return_value = '/testbin/subscription-manager'
+
+ def tearDown(self):
+ self.mock_run_command.stop()
+ self.mock_get_bin_path.stop()
+ super(RhsmRepositoryReleaseModuleTestCase, self).tearDown()
+
+ def module_main(self, exit_exc):
+ with self.assertRaises(exit_exc) as exc:
+ self.module.main()
+ return exc.exception.args[0]
+
+ def test_release_set(self):
+ # test that the module attempts to change the release when the current
+ # release is not the same as the user-specific target release
+ set_module_args({'release': '7.5'})
+ self.module_main_command.side_effect = [
+ # first call, get_release: returns different version so set_release is called
+ (0, '7.4', ''),
+ # second call, set_release: just needs to exit with 0 rc
+ (0, '', ''),
+ ]
+
+ result = self.module_main(AnsibleExitJson)
+
+ self.assertTrue(result['changed'])
+ self.assertEqual('7.5', result['current_release'])
+ self.module_main_command.assert_has_calls([
+ call('/testbin/subscription-manager release --show', check_rc=True),
+ call('/testbin/subscription-manager release --set 7.5', check_rc=True),
+ ])
+
+ def test_release_set_idempotent(self):
+ # test that the module does not attempt to change the release when
+ # the current release matches the user-specified target release
+ set_module_args({'release': '7.5'})
+ self.module_main_command.side_effect = [
+ # first call, get_release: returns same version, set_release is not called
+ (0, '7.5', ''),
+ ]
+
+ result = self.module_main(AnsibleExitJson)
+
+ self.assertFalse(result['changed'])
+ self.assertEqual('7.5', result['current_release'])
+ self.module_main_command.assert_has_calls([
+ call('/testbin/subscription-manager release --show', check_rc=True),
+ ])
+
+ def test_release_unset(self):
+ # test that the module attempts to change the release when the current
+ # release is not the same as the user-specific target release
+ set_module_args({'release': None})
+ self.module_main_command.side_effect = [
+ # first call, get_release: returns version so set_release is called
+ (0, '7.5', ''),
+ # second call, set_release: just needs to exit with 0 rc
+ (0, '', ''),
+ ]
+
+ result = self.module_main(AnsibleExitJson)
+
+ self.assertTrue(result['changed'])
+ self.assertIsNone(result['current_release'])
+ self.module_main_command.assert_has_calls([
+ call('/testbin/subscription-manager release --show', check_rc=True),
+ call('/testbin/subscription-manager release --unset', check_rc=True),
+ ])
+
+ def test_release_unset_idempotent(self):
+ # test that the module attempts to change the release when the current
+ # release is not the same as the user-specific target release
+ set_module_args({'release': None})
+ self.module_main_command.side_effect = [
+ # first call, get_release: returns no version, set_release is not called
+ (0, 'Release not set', ''),
+ ]
+
+ result = self.module_main(AnsibleExitJson)
+
+ self.assertFalse(result['changed'])
+ self.assertIsNone(result['current_release'])
+ self.module_main_command.assert_has_calls([
+ call('/testbin/subscription-manager release --show', check_rc=True),
+ ])
+
+ def test_release_insane(self):
+ # test that insane values for release trigger fail_json
+ insane_value = 'this is an insane release value'
+ set_module_args({'release': insane_value})
+
+ result = self.module_main(AnsibleFailJson)
+
+ # also ensure that the fail msg includes the insane value
+ self.assertIn(insane_value, result['msg'])
+
+ def test_release_matcher(self):
+ # throw a few values at the release matcher -- only sane_values should match
+ sane_values = ['1Server', '10Server', '1.10', '10.0']
+ insane_values = [
+ '6server', # lowercase 's'
+ '100Server', # excessively long 'x' component
+ '100.0', # excessively long 'x' component
+ '6.100', # excessively long 'y' component
+ '100.100', # excessively long 'x' and 'y' components
+ ]
+
+ matches = self.module.release_matcher.findall(' '.join(sane_values + insane_values))
+
+ # matches should be returned in the same order they were parsed,
+ # so sorting shouldn't be necessary here
+ self.assertEqual(matches, sane_values)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/dellemc/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/dellemc/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/dellemc/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/dellemc/test_ome_device_info.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/dellemc/test_ome_device_info.py
new file mode 100644
index 00000000..5e825c42
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/dellemc/test_ome_device_info.py
@@ -0,0 +1,196 @@
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 2.0
+# Copyright (C) 2019 Dell Inc.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# All rights reserved. Dell, EMC, and other trademarks are trademarks of Dell Inc. or its subsidiaries.
+# Other trademarks may be trademarks of their respective owners.
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args, exit_json, \
+ fail_json, AnsibleFailJson, AnsibleExitJson
+from ansible.module_utils import basic
+from ansible_collections.community.general.plugins.modules.remote_management.dellemc import ome_device_info
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+default_args = {'hostname': '192.168.0.1', 'username': 'username', 'password': 'password'}
+resource_basic_inventory = {"basic_inventory": "DeviceService/Devices"}
+resource_detailed_inventory = {"detailed_inventory:": {"device_id": {1234: None},
+ "device_service_tag": {1345: "MXL1234"}}}
+
+
+class TestOmeDeviceInfo(object):
+ module = ome_device_info
+
+ @pytest.fixture(autouse=True)
+ def module_mock(self, mocker):
+ return mocker.patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json)
+
+ @pytest.fixture
+ def connection_mock(self, mocker):
+ connection_class_mock = mocker.patch('ansible_collections.community.general.plugins.modules.remote_management.dellemc.ome_device_info.RestOME')
+ return connection_class_mock.return_value
+
+ @pytest.fixture
+ def response_mock(self, mocker):
+ response_class_mock = mocker.patch('ansible_collections.community.general.plugins.module_utils.remote_management.dellemc.ome.OpenURLResponse')
+ return response_class_mock
+
+ @pytest.fixture
+ def validate_inputs_mock(self, mocker):
+ response_class_mock = mocker.patch('ansible_collections.community.general.plugins.modules.remote_management.dellemc.ome_device_info._validate_inputs')
+ response_class_mock.return_value = None
+
+ @pytest.fixture
+ def get_device_identifier_map_mock(self, mocker):
+ response_class_mock = mocker.patch(
+ 'ansible_collections.community.general.plugins.modules.remote_management.dellemc.ome_device_info._get_device_identifier_map'
+ )
+ response_class_mock.return_value = resource_detailed_inventory
+ return response_class_mock.return_value
+
+ @pytest.fixture
+ def get_resource_parameters_mock(self, mocker):
+ response_class_mock = mocker.patch(
+ 'ansible_collections.community.general.plugins.modules.remote_management.dellemc.ome_device_info._get_resource_parameters'
+ )
+ return response_class_mock
+
+ def test_main_basic_inventory_success_case(self, module_mock, validate_inputs_mock, connection_mock, get_resource_parameters_mock, response_mock):
+ get_resource_parameters_mock.return_value = resource_basic_inventory
+ connection_mock.__enter__.return_value = connection_mock
+ connection_mock.invoke_request.return_value = response_mock
+ response_mock.json_data = {"value": [{"device_id1": "details", "device_id2": "details"}]}
+ response_mock.status_code = 200
+ result = self._run_module(default_args)
+ assert result['changed'] is False
+ assert 'device_info' in result
+
+ def test_main_basic_inventory_failure_case(self, module_mock, validate_inputs_mock, connection_mock, get_resource_parameters_mock, response_mock):
+ get_resource_parameters_mock.return_value = resource_basic_inventory
+ connection_mock.__enter__.return_value = connection_mock
+ connection_mock.invoke_request.return_value = response_mock
+ response_mock.status_code = 500
+ result = self._run_module_with_fail_json(default_args)
+ assert result['msg'] == 'Failed to fetch the device information'
+
+ def test_main_detailed_inventory_success_case(self, module_mock, validate_inputs_mock, connection_mock, get_resource_parameters_mock, response_mock):
+ default_args.update({"fact_subset": "detailed_inventory", "system_query_options": {"device_id": [1234], "device_service_tag": ["MXL1234"]}})
+ detailed_inventory = {"detailed_inventory:": {"device_id": {1234: "DeviceService/Devices(1234)/InventoryDetails"},
+ "device_service_tag": {"MXL1234": "DeviceService/Devices(4321)/InventoryDetails"}}}
+ get_resource_parameters_mock.return_value = detailed_inventory
+ connection_mock.__enter__.return_value = connection_mock
+ connection_mock.invoke_request.return_value = response_mock
+ response_mock.json_data = {"value": [{"device_id": {"1234": "details"}}, {"device_service_tag": {"MXL1234": "details"}}]}
+ response_mock.status_code = 200
+ result = self._run_module(default_args)
+ assert result['changed'] is False
+ assert 'device_info' in result
+
+ def test_main_HTTPError_error_case(self, module_mock, validate_inputs_mock, connection_mock, get_resource_parameters_mock, response_mock):
+ get_resource_parameters_mock.return_value = resource_basic_inventory
+ connection_mock.__enter__.return_value = connection_mock
+ connection_mock.invoke_request.side_effect = HTTPError('http://testhost.com', 400, '', {}, None)
+ response_mock.json_data = {"value": [{"device_id1": "details", "device_id2": "details"}]}
+ response_mock.status_code = 400
+ result = self._run_module_with_fail_json(default_args)
+ assert 'device_info' not in result
+ assert result['failed'] is True
+
+ @pytest.mark.parametrize("fact_subset, mutually_exclusive_call", [("basic_inventory", False), ("detailed_inventory", True)])
+ def test_validate_inputs(self, fact_subset, mutually_exclusive_call, mocker):
+ module_params = {"fact_subset": fact_subset}
+ check_mutually_inclusive_arguments_mock = mocker.patch(
+ 'ansible_collections.community.general.plugins.modules.remote_management.dellemc.ome_device_info._check_mutually_inclusive_arguments')
+ check_mutually_inclusive_arguments_mock.return_value = None
+ self.module._validate_inputs(module_params)
+ if mutually_exclusive_call:
+ check_mutually_inclusive_arguments_mock.assert_called()
+ else:
+ check_mutually_inclusive_arguments_mock.assert_not_called()
+ check_mutually_inclusive_arguments_mock.reset_mock()
+
+ system_query_options_params = [{"system_query_options": None}, {"system_query_options": {"device_id": None}},
+ {"system_query_options": {"device_service_tag": None}}]
+
+ @pytest.mark.parametrize("system_query_options_params", system_query_options_params)
+ def test_check_mutually_inclusive_arguments(self, system_query_options_params):
+ module_params = {"fact_subset": "subsystem_health"}
+ required_args = ["device_id", "device_service_tag"]
+ module_params.update(system_query_options_params)
+ with pytest.raises(ValueError) as ex:
+ self.module._check_mutually_inclusive_arguments(module_params["fact_subset"], module_params, ["device_id", "device_service_tag"])
+ assert "One of the following {0} is required for {1}".format(required_args, module_params["fact_subset"]) == str(ex.value)
+
+ params = [{"fact_subset": "basic_inventory", "system_query_options": {"device_id": [1234]}},
+ {"fact_subset": "subsystem_health", "system_query_options": {"device_service_tag": ["MXL1234"]}},
+ {"fact_subset": "detailed_inventory", "system_query_options": {"device_id": [1234], "inventory_type": "serverDeviceCards"}}]
+
+ @pytest.mark.parametrize("module_params", params)
+ def test_get_resource_parameters(self, module_params, connection_mock):
+ self.module._get_resource_parameters(module_params, connection_mock)
+
+ @pytest.mark.parametrize("module_params,data", [({"system_query_options": None}, None), ({"system_query_options": {"fileter": None}}, None),
+ ({"system_query_options": {"filter": "abc"}}, "$filter")])
+ def test_get_query_parameters(self, module_params, data):
+ res = self.module._get_query_parameters(module_params)
+ if data is not None:
+ assert data in res
+ else:
+ assert res is None
+
+ @pytest.mark.parametrize("module_params", params)
+ def test_get_device_identifier_map(self, module_params, connection_mock, mocker):
+ get_device_id_from_service_tags_mock = mocker.patch(
+ 'ansible_collections.community.general.plugins.modules.remote_management.dellemc.ome_device_info._get_device_id_from_service_tags'
+ )
+ get_device_id_from_service_tags_mock.return_value = None
+ res = self.module._get_device_identifier_map(module_params, connection_mock)
+ assert isinstance(res, dict)
+
+ def test_check_duplicate_device_id(self):
+ self.module._check_duplicate_device_id([1234], {1234: "MX1234"})
+ assert self.module.device_fact_error_report["MX1234"] == "Duplicate report of device_id: 1234"
+
+ @pytest.mark.parametrize("val,expected_res", [(123, True), ("abc", False)])
+ def test_is_int(self, val, expected_res):
+ actual_res = self.module.is_int(val)
+ assert actual_res == expected_res
+
+ def test_get_device_id_from_service_tags(self, connection_mock, response_mock):
+ connection_mock.__enter__.return_value = connection_mock
+ connection_mock.invoke_request.return_value = response_mock
+ response_mock.json_data = {"value": [{"DeviceServiceTag": "MX1234", "Id": 1234}]}
+ response_mock.status_code = 200
+ response_mock.success = True
+ self.module._get_device_id_from_service_tags(["MX1234", "INVALID"], connection_mock)
+
+ def test_get_device_id_from_service_tags_error_case(self, connection_mock, response_mock):
+ connection_mock.__enter__.return_value = connection_mock
+ connection_mock.invoke_request.side_effect = HTTPError('http://testhost.com',
+ 400, '', {}, None)
+ response_mock.json_data = {"value": [{"DeviceServiceTag": "MX1234", "Id": 1234}]}
+ response_mock.status_code = 200
+ response_mock.success = True
+ with pytest.raises(HTTPError) as ex:
+ self.module._get_device_id_from_service_tags(["INVALID"], connection_mock)
+
+ def _run_module(self, module_args):
+ set_module_args(module_args)
+ with pytest.raises(AnsibleExitJson) as ex:
+ self.module.main()
+ return ex.value.args[0]
+
+ def _run_module_with_fail_json(self, module_args):
+ set_module_args(module_args)
+ with pytest.raises(AnsibleFailJson) as exc:
+ self.module.main()
+ result = exc.value.args[0]
+ return result
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/lxca/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/lxca/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/lxca/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/lxca/test_lxca_cmms.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/lxca/test_lxca_cmms.py
new file mode 100644
index 00000000..ab1333de
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/lxca/test_lxca_cmms.py
@@ -0,0 +1,99 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+import pytest
+from ansible_collections.community.general.tests.unit.compat import mock
+from ansible_collections.community.general.plugins.modules.remote_management.lxca import lxca_cmms
+
+
+@pytest.fixture(scope='module')
+@mock.patch('ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common.close_conn', autospec=True)
+def setup_module(close_conn):
+ close_conn.return_value = True
+
+
+class TestMyModule():
+ @pytest.mark.parametrize('patch_ansible_module',
+ [
+ {},
+ {
+ "auth_url": "https://10.240.14.195",
+ "login_user": "USERID",
+ },
+ {
+ "auth_url": "https://10.240.14.195",
+ "login_password": "Password",
+ },
+ {
+ "login_user": "USERID",
+ "login_password": "Password",
+ },
+ ],
+ indirect=['patch_ansible_module'])
+ @pytest.mark.usefixtures('patch_ansible_module')
+ @mock.patch('ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common.setup_conn', autospec=True)
+ @mock.patch('ansible_collections.community.general.plugins.modules.remote_management.lxca.lxca_cmms.execute_module', autospec=True)
+ def test_without_required_parameters(self, _setup_conn, _execute_module,
+ mocker, capfd, setup_module):
+ """Failure must occurs when all parameters are missing"""
+ with pytest.raises(SystemExit):
+ _setup_conn.return_value = "Fake connection"
+ _execute_module.return_value = "Fake execution"
+ lxca_cmms.main()
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert results['failed']
+ assert 'missing required arguments' in results['msg']
+
+ @mock.patch('ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common.setup_conn', autospec=True)
+ @mock.patch('ansible_collections.community.general.plugins.modules.remote_management.lxca.lxca_cmms.execute_module', autospec=True)
+ @mock.patch('ansible_collections.community.general.plugins.modules.remote_management.lxca.lxca_cmms.AnsibleModule', autospec=True)
+ def test__argument_spec(self, ansible_mod_cls, _execute_module, _setup_conn, setup_module):
+ expected_arguments_spec = dict(
+ login_user=dict(required=True),
+ login_password=dict(required=True, no_log=True),
+ command_options=dict(default='cmms', choices=['cmms', 'cmms_by_uuid',
+ 'cmms_by_chassis_uuid']),
+ auth_url=dict(required=True),
+ uuid=dict(default=None),
+ chassis=dict(default=None),
+ )
+ _setup_conn.return_value = "Fake connection"
+ _execute_module.return_value = []
+ mod_obj = ansible_mod_cls.return_value
+ args = {
+ "auth_url": "https://10.243.30.195",
+ "login_user": "USERID",
+ "login_password": "password",
+ "command_options": "cmms",
+ }
+ mod_obj.params = args
+ lxca_cmms.main()
+ assert(mock.call(argument_spec=expected_arguments_spec,
+ supports_check_mode=False) == ansible_mod_cls.call_args)
+
+ @mock.patch('ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common.setup_conn', autospec=True)
+ @mock.patch('ansible_collections.community.general.plugins.modules.remote_management.lxca.lxca_cmms._cmms_by_uuid',
+ autospec=True)
+ @mock.patch('ansible_collections.community.general.plugins.modules.remote_management.lxca.lxca_cmms.AnsibleModule',
+ autospec=True)
+ def test__cmms_empty_list(self, ansible_mod_cls, _get_cmms, _setup_conn, setup_module):
+ mod_obj = ansible_mod_cls.return_value
+ args = {
+ "auth_url": "https://10.243.30.195",
+ "login_user": "USERID",
+ "login_password": "password",
+ "uuid": "3C737AA5E31640CE949B10C129A8B01F",
+ "command_options": "cmms_by_uuid",
+ }
+ mod_obj.params = args
+ _setup_conn.return_value = "Fake connection"
+ empty_nodes_list = []
+ _get_cmms.return_value = empty_nodes_list
+ ret_cmms = _get_cmms(mod_obj, args)
+ assert mock.call(mod_obj, mod_obj.params) == _get_cmms.call_args
+ assert _get_cmms.return_value == ret_cmms
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/lxca/test_lxca_nodes.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/lxca/test_lxca_nodes.py
new file mode 100644
index 00000000..7b009ef2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/lxca/test_lxca_nodes.py
@@ -0,0 +1,103 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+import pytest
+from ansible_collections.community.general.tests.unit.compat import mock
+from ansible_collections.community.general.plugins.modules.remote_management.lxca import lxca_nodes
+from ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common import setup_conn
+from ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common import close_conn
+
+
+@pytest.fixture(scope='module')
+@mock.patch('ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common.close_conn', autospec=True)
+def setup_module(close_conn):
+ close_conn.return_value = True
+
+
+class TestMyModule():
+ @pytest.mark.parametrize('patch_ansible_module',
+ [
+ {},
+ {
+ "auth_url": "https://10.240.14.195",
+ "login_user": "USERID",
+ },
+ {
+ "auth_url": "https://10.240.14.195",
+ "login_password": "Password",
+ },
+ {
+ "login_user": "USERID",
+ "login_password": "Password",
+ },
+ ],
+ indirect=['patch_ansible_module'])
+ @pytest.mark.usefixtures('patch_ansible_module')
+ @mock.patch('ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common.setup_conn', autospec=True)
+ @mock.patch('ansible_collections.community.general.plugins.modules.remote_management.lxca.lxca_nodes.execute_module', autospec=True)
+ def test_without_required_parameters(self, _setup_conn, _execute_module,
+ mocker, capfd, setup_module):
+ """Failure must occurs when all parameters are missing"""
+ with pytest.raises(SystemExit):
+ _setup_conn.return_value = "Fake connection"
+ _execute_module.return_value = "Fake execution"
+ lxca_nodes.main()
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert results['failed']
+ assert 'missing required arguments' in results['msg']
+
+ @mock.patch('ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common.setup_conn', autospec=True)
+ @mock.patch('ansible_collections.community.general.plugins.modules.remote_management.lxca.lxca_nodes.execute_module', autospec=True)
+ @mock.patch('ansible_collections.community.general.plugins.modules.remote_management.lxca.lxca_nodes.AnsibleModule', autospec=True)
+ def test__argument_spec(self, ansible_mod_cls, _execute_module, _setup_conn, setup_module):
+ expected_arguments_spec = dict(
+ login_user=dict(required=True),
+ login_password=dict(required=True, no_log=True),
+ command_options=dict(default='nodes', choices=['nodes', 'nodes_by_uuid',
+ 'nodes_by_chassis_uuid',
+ 'nodes_status_managed',
+ 'nodes_status_unmanaged']),
+ auth_url=dict(required=True),
+ uuid=dict(default=None),
+ chassis=dict(default=None),
+ )
+ _setup_conn.return_value = "Fake connection"
+ _execute_module.return_value = []
+ mod_obj = ansible_mod_cls.return_value
+ args = {
+ "auth_url": "https://10.243.30.195",
+ "login_user": "USERID",
+ "login_password": "password",
+ "command_options": "nodes",
+ }
+ mod_obj.params = args
+ lxca_nodes.main()
+ assert(mock.call(argument_spec=expected_arguments_spec,
+ supports_check_mode=False) == ansible_mod_cls.call_args)
+
+ @mock.patch('ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common.setup_conn', autospec=True)
+ @mock.patch('ansible_collections.community.general.plugins.modules.remote_management.lxca.lxca_nodes._nodes_by_uuid',
+ autospec=True)
+ @mock.patch('ansible_collections.community.general.plugins.modules.remote_management.lxca.lxca_nodes.AnsibleModule',
+ autospec=True)
+ def test__nodes_empty_list(self, ansible_mod_cls, _get_nodes, _setup_conn, setup_module):
+ mod_obj = ansible_mod_cls.return_value
+ args = {
+ "auth_url": "https://10.243.30.195",
+ "login_user": "USERID",
+ "login_password": "password",
+ "uuid": "3C737AA5E31640CE949B10C129A8B01F",
+ "command_options": "nodes_by_uuid",
+ }
+ mod_obj.params = args
+ _setup_conn.return_value = "Fake connection"
+ empty_nodes_list = []
+ _get_nodes.return_value = empty_nodes_list
+ ret_nodes = _get_nodes(mod_obj, args)
+ assert mock.call(mod_obj, mod_obj.params) == _get_nodes.call_args
+ assert _get_nodes.return_value == ret_nodes
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/conftest.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/conftest.py
new file mode 100644
index 00000000..740c7174
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/conftest.py
@@ -0,0 +1,27 @@
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from mock import Mock, patch
+from .oneview_module_loader import ONEVIEW_MODULE_UTILS_PATH
+from hpOneView.oneview_client import OneViewClient
+
+
+@pytest.fixture
+def mock_ov_client():
+ patcher_json_file = patch.object(OneViewClient, 'from_json_file')
+ client = patcher_json_file.start()
+ return client.return_value
+
+
+@pytest.fixture
+def mock_ansible_module():
+ patcher_ansible = patch(ONEVIEW_MODULE_UTILS_PATH + '.AnsibleModule')
+ patcher_ansible = patcher_ansible.start()
+ ansible_module = Mock()
+ patcher_ansible.return_value = ansible_module
+ return ansible_module
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/hpe_test_utils.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/hpe_test_utils.py
new file mode 100644
index 00000000..1cd6fb78
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/hpe_test_utils.py
@@ -0,0 +1,205 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (2016-2017) Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+import re
+import yaml
+
+from mock import Mock, patch
+from .oneview_module_loader import ONEVIEW_MODULE_UTILS_PATH
+from hpOneView.oneview_client import OneViewClient
+
+
+class OneViewBaseTest(object):
+ @pytest.fixture(autouse=True)
+ def setUp(self, mock_ansible_module, mock_ov_client, request):
+ marker = request.node.get_marker('resource')
+ self.resource = getattr(mock_ov_client, "%s" % (marker.args))
+ self.mock_ov_client = mock_ov_client
+ self.mock_ansible_module = mock_ansible_module
+
+ @pytest.fixture
+ def testing_module(self):
+ resource_name = type(self).__name__.replace('Test', '')
+ resource_module_path_name = resource_name.replace('Module', '')
+ resource_module_path_name = re.findall('[A-Z][^A-Z]*', resource_module_path_name)
+ resource_module_path_name = 'oneview_' + str.join('_', resource_module_path_name).lower()
+
+ ansible_collections = __import__('ansible_collections')
+ oneview_module = ansible_collections.community.general.plugins.modules.remote_management.oneview
+ resource_module = getattr(oneview_module, resource_module_path_name)
+ self.testing_class = getattr(resource_module, resource_name)
+ testing_module = self.testing_class.__module__.split('.')[-1]
+ testing_module = getattr(oneview_module, testing_module)
+ try:
+ # Load scenarios from module examples (Also checks if it is a valid yaml)
+ EXAMPLES = yaml.load(testing_module.EXAMPLES, yaml.SafeLoader)
+
+ except yaml.scanner.ScannerError:
+ message = "Something went wrong while parsing yaml from {0}.EXAMPLES".format(self.testing_class.__module__)
+ raise Exception(message)
+ return testing_module
+
+ def test_main_function_should_call_run_method(self, testing_module, mock_ansible_module):
+ mock_ansible_module.params = {'config': 'config.json'}
+
+ main_func = getattr(testing_module, 'main')
+
+ with patch.object(self.testing_class, "run") as mock_run:
+ main_func()
+ mock_run.assert_called_once()
+
+
+class FactsParamsTest(OneViewBaseTest):
+ def test_should_get_all_using_filters(self, testing_module):
+ self.resource.get_all.return_value = []
+
+ params_get_all_with_filters = dict(
+ config='config.json',
+ name=None,
+ params={
+ 'start': 1,
+ 'count': 3,
+ 'sort': 'name:descending',
+ 'filter': 'purpose=General',
+ 'query': 'imported eq true'
+ })
+ self.mock_ansible_module.params = params_get_all_with_filters
+
+ self.testing_class().run()
+
+ self.resource.get_all.assert_called_once_with(start=1, count=3, sort='name:descending', filter='purpose=General', query='imported eq true')
+
+ def test_should_get_all_without_params(self, testing_module):
+ self.resource.get_all.return_value = []
+
+ params_get_all_with_filters = dict(
+ config='config.json',
+ name=None
+ )
+ self.mock_ansible_module.params = params_get_all_with_filters
+
+ self.testing_class().run()
+
+ self.resource.get_all.assert_called_once_with()
+
+
+class OneViewBaseTestCase(object):
+ mock_ov_client_from_json_file = None
+ testing_class = None
+ mock_ansible_module = None
+ mock_ov_client = None
+ testing_module = None
+ EXAMPLES = None
+
+ def configure_mocks(self, test_case, testing_class):
+ """
+ Preload mocked OneViewClient instance and AnsibleModule
+ Args:
+ test_case (object): class instance (self) that are inheriting from OneViewBaseTestCase
+ testing_class (object): class being tested
+ """
+ self.testing_class = testing_class
+
+ # Define OneView Client Mock (FILE)
+ patcher_json_file = patch.object(OneViewClient, 'from_json_file')
+ test_case.addCleanup(patcher_json_file.stop)
+ self.mock_ov_client_from_json_file = patcher_json_file.start()
+
+ # Define OneView Client Mock
+ self.mock_ov_client = self.mock_ov_client_from_json_file.return_value
+
+ # Define Ansible Module Mock
+ patcher_ansible = patch(ONEVIEW_MODULE_UTILS_PATH + '.AnsibleModule')
+ test_case.addCleanup(patcher_ansible.stop)
+ mock_ansible_module = patcher_ansible.start()
+ self.mock_ansible_module = Mock()
+ mock_ansible_module.return_value = self.mock_ansible_module
+
+ self.__set_module_examples()
+
+ def test_main_function_should_call_run_method(self):
+ self.mock_ansible_module.params = {'config': 'config.json'}
+
+ main_func = getattr(self.testing_module, 'main')
+
+ with patch.object(self.testing_class, "run") as mock_run:
+ main_func()
+ mock_run.assert_called_once()
+
+ def __set_module_examples(self):
+ # Load scenarios from module examples (Also checks if it is a valid yaml)
+ ansible_collections = __import__('ansible_collections')
+ testing_module = self.testing_class.__module__.split('.')[-1]
+ self.testing_module = getattr(ansible_collections.community.general.plugins.modules.remote_management.oneview, testing_module)
+
+ try:
+ # Load scenarios from module examples (Also checks if it is a valid yaml)
+ self.EXAMPLES = yaml.load(self.testing_module.EXAMPLES, yaml.SafeLoader)
+
+ except yaml.scanner.ScannerError:
+ message = "Something went wrong while parsing yaml from {0}.EXAMPLES".format(self.testing_class.__module__)
+ raise Exception(message)
+
+
+class FactsParamsTestCase(OneViewBaseTestCase):
+ """
+ FactsParamsTestCase has common test for classes that support pass additional
+ parameters when retrieving all resources.
+ """
+
+ def configure_client_mock(self, resorce_client):
+ """
+ Args:
+ resorce_client: Resource client that is being called
+ """
+ self.resource_client = resorce_client
+
+ def __validations(self):
+ if not self.testing_class:
+ raise Exception("Mocks are not configured, you must call 'configure_mocks' before running this test.")
+
+ if not self.resource_client:
+ raise Exception(
+ "Mock for the client not configured, you must call 'configure_client_mock' before running this test.")
+
+ def test_should_get_all_using_filters(self):
+ self.__validations()
+ self.resource_client.get_all.return_value = []
+
+ params_get_all_with_filters = dict(
+ config='config.json',
+ name=None,
+ params={
+ 'start': 1,
+ 'count': 3,
+ 'sort': 'name:descending',
+ 'filter': 'purpose=General',
+ 'query': 'imported eq true'
+ })
+ self.mock_ansible_module.params = params_get_all_with_filters
+
+ self.testing_class().run()
+
+ self.resource_client.get_all.assert_called_once_with(start=1, count=3, sort='name:descending',
+ filter='purpose=General',
+ query='imported eq true')
+
+ def test_should_get_all_without_params(self):
+ self.__validations()
+ self.resource_client.get_all.return_value = []
+
+ params_get_all_with_filters = dict(
+ config='config.json',
+ name=None
+ )
+ self.mock_ansible_module.params = params_get_all_with_filters
+
+ self.testing_class().run()
+
+ self.resource_client.get_all.assert_called_once_with()
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/oneview_module_loader.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/oneview_module_loader.py
new file mode 100644
index 00000000..3b41cee1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/oneview_module_loader.py
@@ -0,0 +1,30 @@
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+from ansible_collections.community.general.tests.unit.compat.mock import Mock
+
+# FIXME: These should be done inside of a fixture so that they're only mocked during
+# these unittests
+sys.modules['hpOneView'] = Mock()
+sys.modules['hpOneView.oneview_client'] = Mock()
+
+ONEVIEW_MODULE_UTILS_PATH = 'ansible_collections.community.general.plugins.module_utils.oneview'
+from ansible_collections.community.general.plugins.module_utils.oneview import (OneViewModuleException,
+ OneViewModuleTaskError,
+ OneViewModuleResourceNotFound,
+ OneViewModuleBase)
+
+from ansible_collections.community.general.plugins.modules.remote_management.oneview.oneview_ethernet_network import EthernetNetworkModule
+from ansible_collections.community.general.plugins.modules.remote_management.oneview.oneview_ethernet_network_info import EthernetNetworkInfoModule
+from ansible_collections.community.general.plugins.modules.remote_management.oneview.oneview_fc_network import FcNetworkModule
+from ansible_collections.community.general.plugins.modules.remote_management.oneview.oneview_fc_network_info import FcNetworkInfoModule
+from ansible_collections.community.general.plugins.modules.remote_management.oneview.oneview_fcoe_network import FcoeNetworkModule
+from ansible_collections.community.general.plugins.modules.remote_management.oneview.oneview_fcoe_network_info import FcoeNetworkInfoModule
+from ansible_collections.community.general.plugins.modules.remote_management.oneview.oneview_network_set import NetworkSetModule
+from ansible_collections.community.general.plugins.modules.remote_management.oneview.oneview_network_set_info import NetworkSetInfoModule
+from ansible_collections.community.general.plugins.modules.remote_management.oneview.oneview_san_manager import SanManagerModule
+from ansible_collections.community.general.plugins.modules.remote_management.oneview.oneview_san_manager_info import SanManagerInfoModule
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_datacenter_info.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_datacenter_info.py
new file mode 100644
index 00000000..d694d4a3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_datacenter_info.py
@@ -0,0 +1,77 @@
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible_collections.community.general.plugins.modules.remote_management.oneview.oneview_datacenter_info import DatacenterInfoModule
+from .hpe_test_utils import FactsParamsTest
+
+PARAMS_GET_CONNECTED = dict(
+ config='config.json',
+ name="MyDatacenter",
+ options=['visualContent']
+)
+
+
+class TestDatacenterInfoModule(FactsParamsTest):
+ @pytest.fixture(autouse=True)
+ def setUp(self, mock_ansible_module, mock_ov_client):
+ self.resource = mock_ov_client.datacenters
+ self.mock_ansible_module = mock_ansible_module
+ self.mock_ov_client = mock_ov_client
+
+ def test_should_get_all_datacenters(self):
+ self.resource.get_all.return_value = {"name": "Data Center Name"}
+
+ self.mock_ansible_module.params = dict(config='config.json',)
+
+ DatacenterInfoModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ datacenters=({"name": "Data Center Name"})
+ )
+
+ def test_should_get_datacenter_by_name(self):
+ self.resource.get_by.return_value = [{"name": "Data Center Name"}]
+
+ self.mock_ansible_module.params = dict(config='config.json', name="MyDatacenter")
+
+ DatacenterInfoModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ datacenters=([{"name": "Data Center Name"}])
+ )
+
+ def test_should_get_datacenter_visual_content(self):
+ self.resource.get_by.return_value = [{"name": "Data Center Name", "uri": "/rest/datacenter/id"}]
+
+ self.resource.get_visual_content.return_value = {
+ "name": "Visual Content"}
+
+ self.mock_ansible_module.params = PARAMS_GET_CONNECTED
+
+ DatacenterInfoModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ datacenter_visual_content={'name': 'Visual Content'},
+ datacenters=[{'name': 'Data Center Name', 'uri': '/rest/datacenter/id'}]
+ )
+
+ def test_should_get_none_datacenter_visual_content(self):
+ self.resource.get_by.return_value = []
+
+ self.mock_ansible_module.params = PARAMS_GET_CONNECTED
+
+ DatacenterInfoModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ datacenter_visual_content=None,
+ datacenters=[]
+ )
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_enclosure_info.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_enclosure_info.py
new file mode 100644
index 00000000..493b83ed
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_enclosure_info.py
@@ -0,0 +1,135 @@
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.plugins.modules.remote_management.oneview.oneview_enclosure_info import EnclosureInfoModule
+from .hpe_test_utils import FactsParamsTestCase
+
+
+ERROR_MSG = 'Fake message error'
+
+PARAMS_GET_ALL = dict(
+ config='config.json',
+ name=None
+)
+
+PARAMS_GET_BY_NAME = dict(
+ config='config.json',
+ name="Test-Enclosure",
+ options=[]
+)
+
+PARAMS_GET_BY_NAME_WITH_OPTIONS = dict(
+ config='config.json',
+ name="Test-Enclosure",
+ options=['utilization', 'environmentalConfiguration', 'script']
+)
+
+PARAMS_GET_UTILIZATION_WITH_PARAMS = dict(
+ config='config.json',
+ name="Test-Enclosure",
+ options=[dict(utilization=dict(fields='AveragePower',
+ filter=['startDate=2016-06-30T03:29:42.000Z',
+ 'endDate=2016-07-01T03:29:42.000Z'],
+ view='day',
+ refresh=True))]
+)
+
+PRESENT_ENCLOSURES = [{
+ "name": "Test-Enclosure",
+ "uri": "/rest/enclosures/c6bf9af9-48e7-4236-b08a-77684dc258a5"
+}]
+
+ENCLOSURE_SCRIPT = '# script content'
+
+ENCLOSURE_UTILIZATION = {
+ "isFresh": "True"
+}
+
+ENCLOSURE_ENVIRONMENTAL_CONFIG = {
+ "calibratedMaxPower": "2500"
+}
+
+
+class EnclosureInfoSpec(unittest.TestCase,
+ FactsParamsTestCase):
+ def setUp(self):
+ self.configure_mocks(self, EnclosureInfoModule)
+ self.enclosures = self.mock_ov_client.enclosures
+ FactsParamsTestCase.configure_client_mock(self, self.enclosures)
+
+ def test_should_get_all_enclosures(self):
+ self.enclosures.get_all.return_value = PRESENT_ENCLOSURES
+ self.mock_ansible_module.params = PARAMS_GET_ALL
+
+ EnclosureInfoModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ enclosures=(PRESENT_ENCLOSURES)
+ )
+
+ def test_should_get_enclosure_by_name(self):
+ self.enclosures.get_by.return_value = PRESENT_ENCLOSURES
+ self.mock_ansible_module.params = PARAMS_GET_BY_NAME
+
+ EnclosureInfoModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ enclosures=(PRESENT_ENCLOSURES)
+
+ )
+
+ def test_should_get_enclosure_by_name_with_options(self):
+ self.enclosures.get_by.return_value = PRESENT_ENCLOSURES
+ self.enclosures.get_script.return_value = ENCLOSURE_SCRIPT
+ self.enclosures.get_utilization.return_value = ENCLOSURE_UTILIZATION
+ self.enclosures.get_environmental_configuration.return_value = ENCLOSURE_ENVIRONMENTAL_CONFIG
+
+ self.mock_ansible_module.params = PARAMS_GET_BY_NAME_WITH_OPTIONS
+
+ EnclosureInfoModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ enclosures=PRESENT_ENCLOSURES,
+ enclosure_script=ENCLOSURE_SCRIPT,
+ enclosure_environmental_configuration=ENCLOSURE_ENVIRONMENTAL_CONFIG,
+ enclosure_utilization=ENCLOSURE_UTILIZATION
+ )
+
+ def test_should_get_all_utilization_data(self):
+ self.enclosures.get_by.return_value = PRESENT_ENCLOSURES
+ self.enclosures.get_script.return_value = ENCLOSURE_SCRIPT
+ self.enclosures.get_utilization.return_value = ENCLOSURE_UTILIZATION
+ self.enclosures.get_environmental_configuration.return_value = ENCLOSURE_ENVIRONMENTAL_CONFIG
+
+ self.mock_ansible_module.params = PARAMS_GET_BY_NAME_WITH_OPTIONS
+
+ EnclosureInfoModule().run()
+
+ self.enclosures.get_utilization.assert_called_once_with(PRESENT_ENCLOSURES[0]['uri'], fields='', filter='',
+ view='', refresh='')
+
+ def test_should_get_utilization_with_parameters(self):
+ self.enclosures.get_by.return_value = PRESENT_ENCLOSURES
+ self.enclosures.get_script.return_value = ENCLOSURE_SCRIPT
+ self.enclosures.get_utilization.return_value = ENCLOSURE_UTILIZATION
+ self.enclosures.get_environmental_configuration.return_value = ENCLOSURE_ENVIRONMENTAL_CONFIG
+
+ self.mock_ansible_module.params = PARAMS_GET_UTILIZATION_WITH_PARAMS
+
+ EnclosureInfoModule().run()
+
+ date_filter = ["startDate=2016-06-30T03:29:42.000Z", "endDate=2016-07-01T03:29:42.000Z"]
+
+ self.enclosures.get_utilization.assert_called_once_with(
+ PRESENT_ENCLOSURES[0]['uri'], fields='AveragePower', filter=date_filter, view='day', refresh=True)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_ethernet_network.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_ethernet_network.py
new file mode 100644
index 00000000..205a1854
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_ethernet_network.py
@@ -0,0 +1,391 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (2016-2017) Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import yaml
+
+from ansible_collections.community.general.tests.unit.compat import unittest, mock
+from .oneview_module_loader import EthernetNetworkModule
+from .hpe_test_utils import OneViewBaseTestCase
+
+FAKE_MSG_ERROR = 'Fake message error'
+DEFAULT_ETHERNET_NAME = 'Test Ethernet Network'
+RENAMED_ETHERNET = 'Renamed Ethernet Network'
+
+DEFAULT_ENET_TEMPLATE = dict(
+ name=DEFAULT_ETHERNET_NAME,
+ vlanId=200,
+ ethernetNetworkType="Tagged",
+ purpose="General",
+ smartLink=False,
+ privateNetwork=False,
+ connectionTemplateUri=None
+)
+
+PARAMS_FOR_PRESENT = dict(
+ config='config.json',
+ state='present',
+ data=dict(name=DEFAULT_ETHERNET_NAME)
+)
+
+PARAMS_TO_RENAME = dict(
+ config='config.json',
+ state='present',
+ data=dict(name=DEFAULT_ETHERNET_NAME,
+ newName=RENAMED_ETHERNET)
+)
+
+YAML_PARAMS_WITH_CHANGES = """
+ config: "config.json"
+ state: present
+ data:
+ name: 'Test Ethernet Network'
+ purpose: Management
+ connectionTemplateUri: ~
+ bandwidth:
+ maximumBandwidth: 3000
+ typicalBandwidth: 2000
+"""
+
+YAML_RESET_CONNECTION_TEMPLATE = """
+ config: "{{ config }}"
+ state: default_bandwidth_reset
+ data:
+ name: 'network name'
+"""
+
+PARAMS_FOR_SCOPES_SET = dict(
+ config='config.json',
+ state='present',
+ data=dict(name=DEFAULT_ETHERNET_NAME)
+)
+
+PARAMS_FOR_ABSENT = dict(
+ config='config.json',
+ state='absent',
+ data=dict(name=DEFAULT_ETHERNET_NAME)
+)
+
+PARAMS_FOR_BULK_CREATED = dict(
+ config='config.json',
+ state='present',
+ data=dict(namePrefix="TestNetwork", vlanIdRange="1-2,5,9-10")
+)
+
+DEFAULT_BULK_ENET_TEMPLATE = [
+ {'name': 'TestNetwork_1', 'vlanId': 1},
+ {'name': 'TestNetwork_2', 'vlanId': 2},
+ {'name': 'TestNetwork_5', 'vlanId': 5},
+ {'name': 'TestNetwork_9', 'vlanId': 9},
+ {'name': 'TestNetwork_10', 'vlanId': 10},
+]
+
+DICT_PARAMS_WITH_CHANGES = yaml.safe_load(YAML_PARAMS_WITH_CHANGES)["data"]
+
+
+class EthernetNetworkModuleSpec(unittest.TestCase,
+ OneViewBaseTestCase):
+ """
+ OneViewBaseTestCase provides the mocks used in this test case
+ """
+
+ def setUp(self):
+ self.configure_mocks(self, EthernetNetworkModule)
+ self.resource = self.mock_ov_client.ethernet_networks
+
+ def test_should_create_new_ethernet_network(self):
+ self.resource.get_by.return_value = []
+ self.resource.create.return_value = DEFAULT_ENET_TEMPLATE
+
+ self.mock_ansible_module.params = PARAMS_FOR_PRESENT
+
+ EthernetNetworkModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=EthernetNetworkModule.MSG_CREATED,
+ ansible_facts=dict(ethernet_network=DEFAULT_ENET_TEMPLATE)
+ )
+
+ def test_should_not_update_when_data_is_equals(self):
+ self.resource.get_by.return_value = [DEFAULT_ENET_TEMPLATE]
+
+ self.mock_ansible_module.params = PARAMS_FOR_PRESENT
+
+ EthernetNetworkModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ msg=EthernetNetworkModule.MSG_ALREADY_PRESENT,
+ ansible_facts=dict(ethernet_network=DEFAULT_ENET_TEMPLATE)
+ )
+
+ def test_update_when_data_has_modified_attributes(self):
+ data_merged = DEFAULT_ENET_TEMPLATE.copy()
+ data_merged['purpose'] = 'Management'
+
+ self.resource.get_by.return_value = [DEFAULT_ENET_TEMPLATE]
+ self.resource.update.return_value = data_merged
+ self.mock_ov_client.connection_templates.get.return_value = {"uri": "uri"}
+
+ self.mock_ansible_module.params = yaml.load(YAML_PARAMS_WITH_CHANGES)
+
+ EthernetNetworkModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=EthernetNetworkModule.MSG_UPDATED,
+ ansible_facts=dict(ethernet_network=data_merged)
+ )
+
+ def test_update_when_only_bandwidth_has_modified_attributes(self):
+ self.resource.get_by.return_value = [DICT_PARAMS_WITH_CHANGES]
+ self.mock_ov_client.connection_templates.get.return_value = {"uri": "uri"}
+
+ self.mock_ansible_module.params = yaml.load(YAML_PARAMS_WITH_CHANGES)
+
+ EthernetNetworkModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=EthernetNetworkModule.MSG_UPDATED,
+ ansible_facts=dict(ethernet_network=DICT_PARAMS_WITH_CHANGES)
+ )
+
+ def test_update_when_data_has_modified_attributes_but_bandwidth_is_equal(self):
+ data_merged = DEFAULT_ENET_TEMPLATE.copy()
+ data_merged['purpose'] = 'Management'
+
+ self.resource.get_by.return_value = [DEFAULT_ENET_TEMPLATE]
+ self.resource.update.return_value = data_merged
+ self.mock_ov_client.connection_templates.get.return_value = {
+ "bandwidth": DICT_PARAMS_WITH_CHANGES['bandwidth']}
+
+ self.mock_ansible_module.params = yaml.load(YAML_PARAMS_WITH_CHANGES)
+
+ EthernetNetworkModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=EthernetNetworkModule.MSG_UPDATED,
+ ansible_facts=dict(ethernet_network=data_merged)
+ )
+
+ def test_update_successfully_even_when_connection_template_uri_not_exists(self):
+ data_merged = DEFAULT_ENET_TEMPLATE.copy()
+ del data_merged['connectionTemplateUri']
+
+ self.resource.get_by.return_value = [DEFAULT_ENET_TEMPLATE]
+ self.resource.update.return_value = data_merged
+
+ self.mock_ansible_module.params = yaml.load(YAML_PARAMS_WITH_CHANGES)
+
+ EthernetNetworkModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=EthernetNetworkModule.MSG_UPDATED,
+ ansible_facts=dict(ethernet_network=data_merged)
+ )
+
+ def test_rename_when_resource_exists(self):
+ data_merged = DEFAULT_ENET_TEMPLATE.copy()
+ data_merged['name'] = RENAMED_ETHERNET
+ params_to_rename = PARAMS_TO_RENAME.copy()
+
+ self.resource.get_by.return_value = [DEFAULT_ENET_TEMPLATE]
+ self.resource.update.return_value = data_merged
+
+ self.mock_ansible_module.params = params_to_rename
+
+ EthernetNetworkModule().run()
+
+ self.resource.update.assert_called_once_with(data_merged)
+
+ def test_create_with_new_name_when_resource_not_exists(self):
+ data_merged = DEFAULT_ENET_TEMPLATE.copy()
+ data_merged['name'] = RENAMED_ETHERNET
+ params_to_rename = PARAMS_TO_RENAME.copy()
+
+ self.resource.get_by.return_value = []
+ self.resource.create.return_value = DEFAULT_ENET_TEMPLATE
+
+ self.mock_ansible_module.params = params_to_rename
+
+ EthernetNetworkModule().run()
+
+ self.resource.create.assert_called_once_with(PARAMS_TO_RENAME['data'])
+
+ def test_should_remove_ethernet_network(self):
+ self.resource.get_by.return_value = [DEFAULT_ENET_TEMPLATE]
+
+ self.mock_ansible_module.params = PARAMS_FOR_ABSENT
+
+ EthernetNetworkModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=EthernetNetworkModule.MSG_DELETED
+ )
+
+ def test_should_do_nothing_when_ethernet_network_not_exist(self):
+ self.resource.get_by.return_value = []
+
+ self.mock_ansible_module.params = PARAMS_FOR_ABSENT
+
+ EthernetNetworkModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ msg=EthernetNetworkModule.MSG_ALREADY_ABSENT
+ )
+
+ def test_should_create_all_ethernet_networks(self):
+ self.resource.get_range.side_effect = [[], DEFAULT_BULK_ENET_TEMPLATE]
+ self.resource.create_bulk.return_value = DEFAULT_BULK_ENET_TEMPLATE
+
+ self.mock_ansible_module.params = PARAMS_FOR_BULK_CREATED
+
+ EthernetNetworkModule().run()
+
+ self.resource.create_bulk.assert_called_once_with(
+ dict(namePrefix="TestNetwork", vlanIdRange="1-2,5,9-10"))
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=EthernetNetworkModule.MSG_BULK_CREATED,
+ ansible_facts=dict(ethernet_network_bulk=DEFAULT_BULK_ENET_TEMPLATE))
+
+ def test_should_create_missing_ethernet_networks(self):
+ enet_get_range_return = [
+ {'name': 'TestNetwork_1', 'vlanId': 1},
+ {'name': 'TestNetwork_2', 'vlanId': 2},
+ ]
+
+ self.resource.get_range.side_effect = [enet_get_range_return, DEFAULT_BULK_ENET_TEMPLATE]
+ self.resource.dissociate_values_or_ranges.return_value = [1, 2, 5, 9, 10]
+
+ self.mock_ansible_module.params = PARAMS_FOR_BULK_CREATED
+
+ EthernetNetworkModule().run()
+
+ self.resource.create_bulk.assert_called_once_with(
+ dict(namePrefix="TestNetwork", vlanIdRange="5,9,10"))
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True, msg=EthernetNetworkModule.MSG_MISSING_BULK_CREATED,
+ ansible_facts=dict(ethernet_network_bulk=DEFAULT_BULK_ENET_TEMPLATE))
+
+ def test_should_create_missing_ethernet_networks_with_just_one_difference(self):
+ enet_get_range_return = [
+ {'name': 'TestNetwork_1', 'vlanId': 1},
+ {'name': 'TestNetwork_2', 'vlanId': 2},
+ ]
+
+ self.resource.get_range.side_effect = [enet_get_range_return, DEFAULT_BULK_ENET_TEMPLATE]
+ self.resource.dissociate_values_or_ranges.return_value = [1, 2, 5]
+
+ self.mock_ansible_module.params = PARAMS_FOR_BULK_CREATED
+
+ EthernetNetworkModule().run()
+
+ self.resource.create_bulk.assert_called_once_with({'vlanIdRange': '5-5', 'namePrefix': 'TestNetwork'})
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=EthernetNetworkModule.MSG_MISSING_BULK_CREATED,
+ ansible_facts=dict(ethernet_network_bulk=DEFAULT_BULK_ENET_TEMPLATE))
+
+ def test_should_do_nothing_when_ethernet_networks_already_exist(self):
+ self.resource.get_range.return_value = DEFAULT_BULK_ENET_TEMPLATE
+ self.resource.dissociate_values_or_ranges.return_value = [1, 2, 5, 9, 10]
+
+ self.mock_ansible_module.params = PARAMS_FOR_BULK_CREATED
+
+ EthernetNetworkModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False, msg=EthernetNetworkModule.MSG_BULK_ALREADY_EXIST,
+ ansible_facts=dict(ethernet_network_bulk=DEFAULT_BULK_ENET_TEMPLATE))
+
+ def test_reset_successfully(self):
+ self.resource.get_by.return_value = [DICT_PARAMS_WITH_CHANGES]
+ self.mock_ov_client.connection_templates.update.return_value = {'result': 'success'}
+ self.mock_ov_client.connection_templates.get.return_value = {
+ "bandwidth": DICT_PARAMS_WITH_CHANGES['bandwidth']}
+
+ self.mock_ov_client.connection_templates.get_default.return_value = {"bandwidth": {
+ "max": 1
+ }}
+
+ self.mock_ansible_module.params = yaml.load(YAML_RESET_CONNECTION_TEMPLATE)
+
+ EthernetNetworkModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True, msg=EthernetNetworkModule.MSG_CONNECTION_TEMPLATE_RESET,
+ ansible_facts=dict(ethernet_network_connection_template={'result': 'success'}))
+
+ def test_should_fail_when_reset_not_existing_ethernet_network(self):
+ self.resource.get_by.return_value = [None]
+
+ self.mock_ansible_module.params = yaml.load(YAML_RESET_CONNECTION_TEMPLATE)
+
+ EthernetNetworkModule().run()
+
+ self.mock_ansible_module.fail_json.assert_called_once_with(
+ exception=mock.ANY,
+ msg=EthernetNetworkModule.MSG_ETHERNET_NETWORK_NOT_FOUND
+ )
+
+ def test_update_scopes_when_different(self):
+ params_to_scope = PARAMS_FOR_PRESENT.copy()
+ params_to_scope['data']['scopeUris'] = ['test']
+ self.mock_ansible_module.params = params_to_scope
+
+ resource_data = DEFAULT_ENET_TEMPLATE.copy()
+ resource_data['scopeUris'] = ['fake']
+ resource_data['uri'] = 'rest/ethernet/fake'
+ self.resource.get_by.return_value = [resource_data]
+
+ patch_return = resource_data.copy()
+ patch_return['scopeUris'] = ['test']
+ self.resource.patch.return_value = patch_return
+
+ EthernetNetworkModule().run()
+
+ self.resource.patch.assert_called_once_with('rest/ethernet/fake',
+ operation='replace',
+ path='/scopeUris',
+ value=['test'])
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ ansible_facts=dict(ethernet_network=patch_return),
+ msg=EthernetNetworkModule.MSG_UPDATED
+ )
+
+ def test_should_do_nothing_when_scopes_are_the_same(self):
+ params_to_scope = PARAMS_FOR_PRESENT.copy()
+ params_to_scope['data']['scopeUris'] = ['test']
+ self.mock_ansible_module.params = params_to_scope
+
+ resource_data = DEFAULT_ENET_TEMPLATE.copy()
+ resource_data['scopeUris'] = ['test']
+ self.resource.get_by.return_value = [resource_data]
+
+ EthernetNetworkModule().run()
+
+ self.resource.patch.not_been_called()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ ansible_facts=dict(ethernet_network=resource_data),
+ msg=EthernetNetworkModule.MSG_ALREADY_PRESENT
+ )
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_ethernet_network_info.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_ethernet_network_info.py
new file mode 100644
index 00000000..bc25a030
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_ethernet_network_info.py
@@ -0,0 +1,103 @@
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+
+from .oneview_module_loader import EthernetNetworkInfoModule
+from .hpe_test_utils import FactsParamsTestCase
+
+ERROR_MSG = 'Fake message error'
+
+PARAMS_GET_ALL = dict(
+ config='config.json',
+ name=None
+)
+
+PARAMS_GET_BY_NAME = dict(
+ config='config.json',
+ name="Test Ethernet Network",
+ options=[]
+)
+
+PARAMS_GET_BY_NAME_WITH_OPTIONS = dict(
+ config='config.json',
+ name="Test Ethernet Network",
+ options=['associatedProfiles', 'associatedUplinkGroups']
+)
+
+PRESENT_ENETS = [{
+ "name": "Test Ethernet Network",
+ "uri": "/rest/ethernet-networks/d34dcf5e-0d8e-441c-b00d-e1dd6a067188"
+}]
+
+ENET_ASSOCIATED_UPLINK_GROUP_URIS = [
+ "/rest/uplink-sets/c6bf9af9-48e7-4236-b08a-77684dc258a5",
+ "/rest/uplink-sets/e2f0031b-52bd-4223-9ac1-d91cb519d548"
+]
+
+ENET_ASSOCIATED_PROFILE_URIS = [
+ "/rest/server-profiles/83e2e117-59dc-4e33-9f24-462af951cbbe",
+ "/rest/server-profiles/57d3af2a-b6d2-4446-8645-f38dd808ea4d"
+]
+
+ENET_ASSOCIATED_UPLINK_GROUPS = [dict(uri=ENET_ASSOCIATED_UPLINK_GROUP_URIS[0], name='Uplink Set 1'),
+ dict(uri=ENET_ASSOCIATED_UPLINK_GROUP_URIS[1], name='Uplink Set 2')]
+
+ENET_ASSOCIATED_PROFILES = [dict(uri=ENET_ASSOCIATED_PROFILE_URIS[0], name='Server Profile 1'),
+ dict(uri=ENET_ASSOCIATED_PROFILE_URIS[1], name='Server Profile 2')]
+
+
+class EthernetNetworkInfoSpec(unittest.TestCase,
+ FactsParamsTestCase
+ ):
+ def setUp(self):
+ self.configure_mocks(self, EthernetNetworkInfoModule)
+ self.ethernet_networks = self.mock_ov_client.ethernet_networks
+ FactsParamsTestCase.configure_client_mock(self, self.ethernet_networks)
+
+ def test_should_get_all_enets(self):
+ self.ethernet_networks.get_all.return_value = PRESENT_ENETS
+ self.mock_ansible_module.params = PARAMS_GET_ALL
+
+ EthernetNetworkInfoModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ ethernet_networks=(PRESENT_ENETS)
+ )
+
+ def test_should_get_enet_by_name(self):
+ self.ethernet_networks.get_by.return_value = PRESENT_ENETS
+ self.mock_ansible_module.params = PARAMS_GET_BY_NAME
+
+ EthernetNetworkInfoModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ ethernet_networks=(PRESENT_ENETS)
+ )
+
+ def test_should_get_enet_by_name_with_options(self):
+ self.ethernet_networks.get_by.return_value = PRESENT_ENETS
+ self.ethernet_networks.get_associated_profiles.return_value = ENET_ASSOCIATED_PROFILE_URIS
+ self.ethernet_networks.get_associated_uplink_groups.return_value = ENET_ASSOCIATED_UPLINK_GROUP_URIS
+ self.mock_ov_client.server_profiles.get.side_effect = ENET_ASSOCIATED_PROFILES
+ self.mock_ov_client.uplink_sets.get.side_effect = ENET_ASSOCIATED_UPLINK_GROUPS
+
+ self.mock_ansible_module.params = PARAMS_GET_BY_NAME_WITH_OPTIONS
+
+ EthernetNetworkInfoModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ ethernet_networks=PRESENT_ENETS,
+ enet_associated_profiles=ENET_ASSOCIATED_PROFILES,
+ enet_associated_uplink_groups=ENET_ASSOCIATED_UPLINK_GROUPS
+ )
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_fc_network.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_fc_network.py
new file mode 100644
index 00000000..04bb42ba
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_fc_network.py
@@ -0,0 +1,169 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (2016-2017) Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from .oneview_module_loader import FcNetworkModule
+from .hpe_test_utils import OneViewBaseTestCase
+
+FAKE_MSG_ERROR = 'Fake message error'
+
+DEFAULT_FC_NETWORK_TEMPLATE = dict(
+ name='New FC Network 2',
+ autoLoginRedistribution=True,
+ fabricType='FabricAttach'
+)
+
+PARAMS_FOR_PRESENT = dict(
+ config='config.json',
+ state='present',
+ data=dict(name=DEFAULT_FC_NETWORK_TEMPLATE['name'])
+)
+
+PARAMS_WITH_CHANGES = dict(
+ config='config.json',
+ state='present',
+ data=dict(name=DEFAULT_FC_NETWORK_TEMPLATE['name'],
+ newName="New Name",
+ fabricType='DirectAttach')
+)
+
+PARAMS_FOR_ABSENT = dict(
+ config='config.json',
+ state='absent',
+ data=dict(name=DEFAULT_FC_NETWORK_TEMPLATE['name'])
+)
+
+
+class FcNetworkModuleSpec(unittest.TestCase,
+ OneViewBaseTestCase):
+ """
+ OneViewBaseTestCase provides the mocks used in this test case
+ """
+
+ def setUp(self):
+ self.configure_mocks(self, FcNetworkModule)
+ self.resource = self.mock_ov_client.fc_networks
+
+ def test_should_create_new_fc_network(self):
+ self.resource.get_by.return_value = []
+ self.resource.create.return_value = DEFAULT_FC_NETWORK_TEMPLATE
+
+ self.mock_ansible_module.params = PARAMS_FOR_PRESENT
+
+ FcNetworkModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=FcNetworkModule.MSG_CREATED,
+ ansible_facts=dict(fc_network=DEFAULT_FC_NETWORK_TEMPLATE)
+ )
+
+ def test_should_not_update_when_data_is_equals(self):
+ self.resource.get_by.return_value = [DEFAULT_FC_NETWORK_TEMPLATE]
+
+ self.mock_ansible_module.params = PARAMS_FOR_PRESENT
+
+ FcNetworkModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ msg=FcNetworkModule.MSG_ALREADY_PRESENT,
+ ansible_facts=dict(fc_network=DEFAULT_FC_NETWORK_TEMPLATE)
+ )
+
+ def test_update_when_data_has_modified_attributes(self):
+ data_merged = DEFAULT_FC_NETWORK_TEMPLATE.copy()
+
+ data_merged['fabricType'] = 'DirectAttach'
+
+ self.resource.get_by.return_value = [DEFAULT_FC_NETWORK_TEMPLATE]
+ self.resource.update.return_value = data_merged
+
+ self.mock_ansible_module.params = PARAMS_WITH_CHANGES
+
+ FcNetworkModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=FcNetworkModule.MSG_UPDATED,
+ ansible_facts=dict(fc_network=data_merged)
+ )
+
+ def test_should_remove_fc_network(self):
+ self.resource.get_by.return_value = [DEFAULT_FC_NETWORK_TEMPLATE]
+
+ self.mock_ansible_module.params = PARAMS_FOR_ABSENT
+
+ FcNetworkModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=FcNetworkModule.MSG_DELETED
+ )
+
+ def test_should_do_nothing_when_fc_network_not_exist(self):
+ self.resource.get_by.return_value = []
+
+ self.mock_ansible_module.params = PARAMS_FOR_ABSENT
+
+ FcNetworkModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ msg=FcNetworkModule.MSG_ALREADY_ABSENT
+ )
+
+ def test_update_scopes_when_different(self):
+ params_to_scope = PARAMS_FOR_PRESENT.copy()
+ params_to_scope['data']['scopeUris'] = ['test']
+ self.mock_ansible_module.params = params_to_scope
+
+ resource_data = DEFAULT_FC_NETWORK_TEMPLATE.copy()
+ resource_data['scopeUris'] = ['fake']
+ resource_data['uri'] = 'rest/fc/fake'
+ self.resource.get_by.return_value = [resource_data]
+
+ patch_return = resource_data.copy()
+ patch_return['scopeUris'] = ['test']
+ self.resource.patch.return_value = patch_return
+
+ FcNetworkModule().run()
+
+ self.resource.patch.assert_called_once_with('rest/fc/fake',
+ operation='replace',
+ path='/scopeUris',
+ value=['test'])
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ ansible_facts=dict(fc_network=patch_return),
+ msg=FcNetworkModule.MSG_UPDATED
+ )
+
+ def test_should_do_nothing_when_scopes_are_the_same(self):
+ params_to_scope = PARAMS_FOR_PRESENT.copy()
+ params_to_scope['data']['scopeUris'] = ['test']
+ self.mock_ansible_module.params = params_to_scope
+
+ resource_data = DEFAULT_FC_NETWORK_TEMPLATE.copy()
+ resource_data['scopeUris'] = ['test']
+ self.resource.get_by.return_value = [resource_data]
+
+ FcNetworkModule().run()
+
+ self.resource.patch.not_been_called()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ ansible_facts=dict(fc_network=resource_data),
+ msg=FcNetworkModule.MSG_ALREADY_PRESENT
+ )
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_fc_network_info.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_fc_network_info.py
new file mode 100644
index 00000000..6096aff7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_fc_network_info.py
@@ -0,0 +1,60 @@
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from .oneview_module_loader import FcNetworkInfoModule
+from .hpe_test_utils import FactsParamsTestCase
+
+ERROR_MSG = 'Fake message error'
+
+PARAMS_GET_ALL = dict(
+ config='config.json',
+ name=None
+)
+
+PARAMS_GET_BY_NAME = dict(
+ config='config.json',
+ name="Test FC Network"
+)
+
+PRESENT_NETWORKS = [{
+ "name": "Test FC Network",
+ "uri": "/rest/fc-networks/c6bf9af9-48e7-4236-b08a-77684dc258a5"
+}]
+
+
+class FcNetworkInfoSpec(unittest.TestCase,
+ FactsParamsTestCase):
+ def setUp(self):
+ self.configure_mocks(self, FcNetworkInfoModule)
+ self.fc_networks = self.mock_ov_client.fc_networks
+ FactsParamsTestCase.configure_client_mock(self, self.fc_networks)
+
+ def test_should_get_all_fc_networks(self):
+ self.fc_networks.get_all.return_value = PRESENT_NETWORKS
+ self.mock_ansible_module.params = PARAMS_GET_ALL
+
+ FcNetworkInfoModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ fc_networks=PRESENT_NETWORKS
+ )
+
+ def test_should_get_fc_network_by_name(self):
+ self.fc_networks.get_by.return_value = PRESENT_NETWORKS
+ self.mock_ansible_module.params = PARAMS_GET_BY_NAME
+
+ FcNetworkInfoModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ fc_networks=PRESENT_NETWORKS
+ )
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_fcoe_network.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_fcoe_network.py
new file mode 100644
index 00000000..af00803f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_fcoe_network.py
@@ -0,0 +1,167 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (2016-2017) Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from .oneview_module_loader import FcoeNetworkModule
+from .hpe_test_utils import OneViewBaseTestCase
+
+FAKE_MSG_ERROR = 'Fake message error'
+
+DEFAULT_FCOE_NETWORK_TEMPLATE = dict(
+ name='New FCoE Network 2',
+ vlanId="201",
+ connectionTemplateUri=None
+)
+
+PARAMS_FOR_PRESENT = dict(
+ config='config.json',
+ state='present',
+ data=dict(name=DEFAULT_FCOE_NETWORK_TEMPLATE['name'])
+)
+
+PARAMS_WITH_CHANGES = dict(
+ config='config.json',
+ state='present',
+ data=dict(name=DEFAULT_FCOE_NETWORK_TEMPLATE['name'],
+ fabricType='DirectAttach',
+ newName='New Name')
+)
+
+PARAMS_FOR_ABSENT = dict(
+ config='config.json',
+ state='absent',
+ data=dict(name=DEFAULT_FCOE_NETWORK_TEMPLATE['name'])
+)
+
+
+class FcoeNetworkSpec(unittest.TestCase,
+ OneViewBaseTestCase):
+ """
+ OneViewBaseTestCase provides the mocks used in this test case
+ """
+
+ def setUp(self):
+ self.configure_mocks(self, FcoeNetworkModule)
+ self.resource = self.mock_ov_client.fcoe_networks
+
+ def test_should_create_new_fcoe_network(self):
+ self.resource.get_by.return_value = []
+ self.resource.create.return_value = DEFAULT_FCOE_NETWORK_TEMPLATE
+
+ self.mock_ansible_module.params = PARAMS_FOR_PRESENT
+
+ FcoeNetworkModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=FcoeNetworkModule.MSG_CREATED,
+ ansible_facts=dict(fcoe_network=DEFAULT_FCOE_NETWORK_TEMPLATE)
+ )
+
+ def test_should_not_update_when_data_is_equals(self):
+ self.resource.get_by.return_value = [DEFAULT_FCOE_NETWORK_TEMPLATE]
+ self.mock_ansible_module.params = PARAMS_FOR_PRESENT.copy()
+
+ FcoeNetworkModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ msg=FcoeNetworkModule.MSG_ALREADY_PRESENT,
+ ansible_facts=dict(fcoe_network=DEFAULT_FCOE_NETWORK_TEMPLATE)
+ )
+
+ def test_update_when_data_has_modified_attributes(self):
+ data_merged = DEFAULT_FCOE_NETWORK_TEMPLATE.copy()
+ data_merged['fabricType'] = 'DirectAttach'
+
+ self.resource.get_by.return_value = [DEFAULT_FCOE_NETWORK_TEMPLATE]
+ self.resource.update.return_value = data_merged
+
+ self.mock_ansible_module.params = PARAMS_WITH_CHANGES
+
+ FcoeNetworkModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=FcoeNetworkModule.MSG_UPDATED,
+ ansible_facts=dict(fcoe_network=data_merged)
+ )
+
+ def test_should_remove_fcoe_network(self):
+ self.resource.get_by.return_value = [DEFAULT_FCOE_NETWORK_TEMPLATE]
+
+ self.mock_ansible_module.params = PARAMS_FOR_ABSENT
+
+ FcoeNetworkModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=FcoeNetworkModule.MSG_DELETED
+ )
+
+ def test_should_do_nothing_when_fcoe_network_not_exist(self):
+ self.resource.get_by.return_value = []
+
+ self.mock_ansible_module.params = PARAMS_FOR_ABSENT
+
+ FcoeNetworkModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ msg=FcoeNetworkModule.MSG_ALREADY_ABSENT
+ )
+
+ def test_update_scopes_when_different(self):
+ params_to_scope = PARAMS_FOR_PRESENT.copy()
+ params_to_scope['data']['scopeUris'] = ['test']
+ self.mock_ansible_module.params = params_to_scope
+
+ resource_data = DEFAULT_FCOE_NETWORK_TEMPLATE.copy()
+ resource_data['scopeUris'] = ['fake']
+ resource_data['uri'] = 'rest/fcoe/fake'
+ self.resource.get_by.return_value = [resource_data]
+
+ patch_return = resource_data.copy()
+ patch_return['scopeUris'] = ['test']
+ self.resource.patch.return_value = patch_return
+
+ FcoeNetworkModule().run()
+
+ self.resource.patch.assert_called_once_with('rest/fcoe/fake',
+ operation='replace',
+ path='/scopeUris',
+ value=['test'])
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ ansible_facts=dict(fcoe_network=patch_return),
+ msg=FcoeNetworkModule.MSG_UPDATED
+ )
+
+ def test_should_do_nothing_when_scopes_are_the_same(self):
+ params_to_scope = PARAMS_FOR_PRESENT.copy()
+ params_to_scope['data']['scopeUris'] = ['test']
+ self.mock_ansible_module.params = params_to_scope
+
+ resource_data = DEFAULT_FCOE_NETWORK_TEMPLATE.copy()
+ resource_data['scopeUris'] = ['test']
+ self.resource.get_by.return_value = [resource_data]
+
+ FcoeNetworkModule().run()
+
+ self.resource.patch.not_been_called()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ ansible_facts=dict(fcoe_network=resource_data),
+ msg=FcoeNetworkModule.MSG_ALREADY_PRESENT
+ )
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_fcoe_network_info.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_fcoe_network_info.py
new file mode 100644
index 00000000..7dd7309d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_fcoe_network_info.py
@@ -0,0 +1,63 @@
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+
+from .oneview_module_loader import FcoeNetworkInfoModule
+
+from .hpe_test_utils import FactsParamsTestCase
+
+ERROR_MSG = 'Fake message error'
+
+PARAMS_GET_ALL = dict(
+ config='config.json',
+ name=None
+)
+
+PARAMS_GET_BY_NAME = dict(
+ config='config.json',
+ name="Test FCoE Networks"
+)
+
+PRESENT_NETWORKS = [{
+ "name": "Test FCoE Networks",
+ "uri": "/rest/fcoe-networks/c6bf9af9-48e7-4236-b08a-77684dc258a5"
+}]
+
+
+class FcoeNetworkInfoSpec(unittest.TestCase,
+ FactsParamsTestCase
+ ):
+ def setUp(self):
+ self.configure_mocks(self, FcoeNetworkInfoModule)
+ self.fcoe_networks = self.mock_ov_client.fcoe_networks
+ FactsParamsTestCase.configure_client_mock(self, self.fcoe_networks)
+
+ def test_should_get_all_fcoe_network(self):
+ self.fcoe_networks.get_all.return_value = PRESENT_NETWORKS
+ self.mock_ansible_module.params = PARAMS_GET_ALL
+
+ FcoeNetworkInfoModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ fcoe_networks=PRESENT_NETWORKS
+ )
+
+ def test_should_get_fcoe_network_by_name(self):
+ self.fcoe_networks.get_by.return_value = PRESENT_NETWORKS
+ self.mock_ansible_module.params = PARAMS_GET_BY_NAME
+
+ FcoeNetworkInfoModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ fcoe_networks=PRESENT_NETWORKS
+ )
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_logical_interconnect_group.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_logical_interconnect_group.py
new file mode 100644
index 00000000..be7d9662
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_logical_interconnect_group.py
@@ -0,0 +1,260 @@
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from copy import deepcopy
+
+from ansible_collections.community.general.tests.unit.compat import unittest, mock
+from ansible_collections.community.general.plugins.modules.remote_management.oneview.oneview_logical_interconnect_group import LogicalInterconnectGroupModule
+from .hpe_test_utils import OneViewBaseTestCase
+
+
+FAKE_MSG_ERROR = 'Fake message error'
+
+DEFAULT_LIG_NAME = 'Test Logical Interconnect Group'
+RENAMED_LIG = 'Renamed Logical Interconnect Group'
+
+DEFAULT_LIG_TEMPLATE = dict(
+ name=DEFAULT_LIG_NAME,
+ uplinkSets=[],
+ enclosureType='C7000',
+ interconnectMapTemplate=dict(
+ interconnectMapEntryTemplates=[]
+ )
+)
+
+PARAMS_LIG_TEMPLATE_WITH_MAP = dict(
+ config='config.json',
+ state='present',
+ data=dict(
+ name=DEFAULT_LIG_NAME,
+ uplinkSets=[],
+ enclosureType='C7000',
+ interconnectMapTemplate=dict(
+ interconnectMapEntryTemplates=[
+ {
+ "logicalDownlinkUri": None,
+ "logicalLocation": {
+ "locationEntries": [
+ {
+ "relativeValue": "1",
+ "type": "Bay"
+ },
+ {
+ "relativeValue": 1,
+ "type": "Enclosure"
+ }
+ ]
+ },
+ "permittedInterconnectTypeName": "HP VC Flex-10/10D Module"
+ }]
+ )
+ ))
+
+PARAMS_FOR_PRESENT = dict(
+ config='config.json',
+ state='present',
+ data=dict(name=DEFAULT_LIG_NAME)
+)
+
+PARAMS_TO_RENAME = dict(
+ config='config.json',
+ state='present',
+ data=dict(name=DEFAULT_LIG_NAME,
+ newName=RENAMED_LIG)
+)
+
+PARAMS_WITH_CHANGES = dict(
+ config='config.json',
+ state='present',
+ data=dict(name=DEFAULT_LIG_NAME,
+ description='It is an example')
+)
+
+PARAMS_FOR_ABSENT = dict(
+ config='config.json',
+ state='absent',
+ data=dict(name=DEFAULT_LIG_NAME)
+)
+
+
+class LogicalInterconnectGroupGeneralSpec(unittest.TestCase,
+ OneViewBaseTestCase):
+ def setUp(self):
+ self.configure_mocks(self, LogicalInterconnectGroupModule)
+ self.resource = self.mock_ov_client.logical_interconnect_groups
+
+ def test_should_create_new_lig(self):
+ self.resource.get_by.return_value = []
+ self.resource.create.return_value = DEFAULT_LIG_TEMPLATE
+
+ self.mock_ansible_module.params = PARAMS_FOR_PRESENT
+
+ LogicalInterconnectGroupModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=LogicalInterconnectGroupModule.MSG_CREATED,
+ ansible_facts=dict(logical_interconnect_group=DEFAULT_LIG_TEMPLATE)
+ )
+
+ def test_should_create_new_with_named_permitted_interconnect_type(self):
+ self.resource.get_by.return_value = []
+ self.resource.create.return_value = PARAMS_FOR_PRESENT
+
+ self.mock_ansible_module.params = deepcopy(PARAMS_LIG_TEMPLATE_WITH_MAP)
+
+ LogicalInterconnectGroupModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=LogicalInterconnectGroupModule.MSG_CREATED,
+ ansible_facts=dict(logical_interconnect_group=PARAMS_FOR_PRESENT.copy())
+ )
+
+ def test_should_fail_when_permitted_interconnect_type_name_not_exists(self):
+ self.resource.get_by.return_value = []
+ self.resource.create.return_value = PARAMS_FOR_PRESENT
+ self.mock_ov_client.interconnect_types.get_by.return_value = []
+
+ self.mock_ansible_module.params = deepcopy(PARAMS_LIG_TEMPLATE_WITH_MAP)
+
+ LogicalInterconnectGroupModule().run()
+
+ self.mock_ansible_module.fail_json.assert_called_once_with(
+ exception=mock.ANY,
+ msg=LogicalInterconnectGroupModule.MSG_INTERCONNECT_TYPE_NOT_FOUND)
+
+ def test_should_not_update_when_data_is_equals(self):
+ self.resource.get_by.return_value = [DEFAULT_LIG_TEMPLATE]
+
+ self.mock_ansible_module.params = PARAMS_FOR_PRESENT
+
+ LogicalInterconnectGroupModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ msg=LogicalInterconnectGroupModule.MSG_ALREADY_PRESENT,
+ ansible_facts=dict(logical_interconnect_group=DEFAULT_LIG_TEMPLATE)
+ )
+
+ def test_update_when_data_has_modified_attributes(self):
+ data_merged = DEFAULT_LIG_TEMPLATE.copy()
+ data_merged['description'] = 'New description'
+
+ self.resource.get_by.return_value = [DEFAULT_LIG_TEMPLATE]
+ self.resource.update.return_value = data_merged
+
+ self.mock_ansible_module.params = PARAMS_WITH_CHANGES
+
+ LogicalInterconnectGroupModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=LogicalInterconnectGroupModule.MSG_UPDATED,
+ ansible_facts=dict(logical_interconnect_group=data_merged)
+ )
+
+ def test_rename_when_resource_exists(self):
+ data_merged = DEFAULT_LIG_TEMPLATE.copy()
+ data_merged['name'] = RENAMED_LIG
+ params_to_rename = PARAMS_TO_RENAME.copy()
+
+ self.resource.get_by.return_value = [DEFAULT_LIG_TEMPLATE]
+ self.resource.update.return_value = data_merged
+
+ self.mock_ansible_module.params = params_to_rename
+
+ LogicalInterconnectGroupModule().run()
+
+ self.resource.update.assert_called_once_with(data_merged)
+
+ def test_create_with_newName_when_resource_not_exists(self):
+ data_merged = DEFAULT_LIG_TEMPLATE.copy()
+ data_merged['name'] = RENAMED_LIG
+ params_to_rename = PARAMS_TO_RENAME.copy()
+
+ self.resource.get_by.return_value = []
+ self.resource.create.return_value = DEFAULT_LIG_TEMPLATE
+
+ self.mock_ansible_module.params = params_to_rename
+
+ LogicalInterconnectGroupModule().run()
+
+ self.resource.create.assert_called_once_with(PARAMS_TO_RENAME['data'])
+
+ def test_should_remove_lig(self):
+ self.resource.get_by.return_value = [DEFAULT_LIG_TEMPLATE]
+
+ self.mock_ansible_module.params = PARAMS_FOR_ABSENT
+
+ LogicalInterconnectGroupModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=LogicalInterconnectGroupModule.MSG_DELETED
+ )
+
+ def test_should_do_nothing_when_lig_not_exist(self):
+ self.resource.get_by.return_value = []
+
+ self.mock_ansible_module.params = PARAMS_FOR_ABSENT
+
+ LogicalInterconnectGroupModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ msg=LogicalInterconnectGroupModule.MSG_ALREADY_ABSENT
+ )
+
+ def test_update_scopes_when_different(self):
+ params_to_scope = PARAMS_FOR_PRESENT.copy()
+ params_to_scope['data']['scopeUris'] = ['test']
+ self.mock_ansible_module.params = params_to_scope
+
+ resource_data = DEFAULT_LIG_TEMPLATE.copy()
+ resource_data['scopeUris'] = ['fake']
+ resource_data['uri'] = 'rest/lig/fake'
+ self.resource.get_by.return_value = [resource_data]
+
+ patch_return = resource_data.copy()
+ patch_return['scopeUris'] = ['test']
+ self.resource.patch.return_value = patch_return
+
+ LogicalInterconnectGroupModule().run()
+
+ self.resource.patch.assert_called_once_with('rest/lig/fake',
+ operation='replace',
+ path='/scopeUris',
+ value=['test'])
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ ansible_facts=dict(logical_interconnect_group=patch_return),
+ msg=LogicalInterconnectGroupModule.MSG_UPDATED
+ )
+
+ def test_should_do_nothing_when_scopes_are_the_same(self):
+ params_to_scope = PARAMS_FOR_PRESENT.copy()
+ params_to_scope['data']['scopeUris'] = ['test']
+ self.mock_ansible_module.params = params_to_scope
+
+ resource_data = DEFAULT_LIG_TEMPLATE.copy()
+ resource_data['scopeUris'] = ['test']
+ self.resource.get_by.return_value = [resource_data]
+
+ LogicalInterconnectGroupModule().run()
+
+ self.resource.patch.not_been_called()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ ansible_facts=dict(logical_interconnect_group=resource_data),
+ msg=LogicalInterconnectGroupModule.MSG_ALREADY_PRESENT
+ )
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_logical_interconnect_group_info.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_logical_interconnect_group_info.py
new file mode 100644
index 00000000..dc16031f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_logical_interconnect_group_info.py
@@ -0,0 +1,62 @@
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.plugins.modules.remote_management.oneview.oneview_logical_interconnect_group_info import (
+ LogicalInterconnectGroupInfoModule
+)
+from .hpe_test_utils import FactsParamsTestCase
+
+
+ERROR_MSG = 'Fake message error'
+
+PARAMS_GET_ALL = dict(
+ config='config.json',
+ name=None
+)
+
+PARAMS_GET_BY_NAME = dict(
+ config='config.json',
+ name="Test Logical Interconnect Group"
+)
+
+PRESENT_LIGS = [{
+ "name": "Test Logical Interconnect Group",
+ "uri": "/rest/logical-interconnect-groups/ebb4ada8-08df-400e-8fac-9ff987ac5140"
+}]
+
+
+class LogicalInterconnectGroupInfoSpec(unittest.TestCase, FactsParamsTestCase):
+ def setUp(self):
+ self.configure_mocks(self, LogicalInterconnectGroupInfoModule)
+ self.logical_interconnect_groups = self.mock_ov_client.logical_interconnect_groups
+ FactsParamsTestCase.configure_client_mock(self, self.logical_interconnect_groups)
+
+ def test_should_get_all_ligs(self):
+ self.logical_interconnect_groups.get_all.return_value = PRESENT_LIGS
+ self.mock_ansible_module.params = PARAMS_GET_ALL
+
+ LogicalInterconnectGroupInfoModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ logical_interconnect_groups=(PRESENT_LIGS)
+ )
+
+ def test_should_get_lig_by_name(self):
+ self.logical_interconnect_groups.get_by.return_value = PRESENT_LIGS
+ self.mock_ansible_module.params = PARAMS_GET_BY_NAME
+
+ LogicalInterconnectGroupInfoModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ logical_interconnect_groups=(PRESENT_LIGS)
+ )
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_network_set.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_network_set.py
new file mode 100644
index 00000000..b4106064
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_network_set.py
@@ -0,0 +1,186 @@
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest, mock
+from .hpe_test_utils import OneViewBaseTestCase
+from .oneview_module_loader import NetworkSetModule
+
+FAKE_MSG_ERROR = 'Fake message error'
+
+NETWORK_SET = dict(
+ name='OneViewSDK Test Network Set',
+ networkUris=['/rest/ethernet-networks/aaa-bbb-ccc']
+)
+
+NETWORK_SET_WITH_NEW_NAME = dict(name='OneViewSDK Test Network Set - Renamed')
+
+PARAMS_FOR_PRESENT = dict(
+ config='config.json',
+ state='present',
+ data=dict(name=NETWORK_SET['name'],
+ networkUris=['/rest/ethernet-networks/aaa-bbb-ccc'])
+)
+
+PARAMS_WITH_CHANGES = dict(
+ config='config.json',
+ state='present',
+ data=dict(name=NETWORK_SET['name'],
+ newName=NETWORK_SET['name'] + " - Renamed",
+ networkUris=['/rest/ethernet-networks/aaa-bbb-ccc', 'Name of a Network'])
+)
+
+PARAMS_FOR_ABSENT = dict(
+ config='config.json',
+ state='absent',
+ data=dict(name=NETWORK_SET['name'])
+)
+
+
+class NetworkSetModuleSpec(unittest.TestCase,
+ OneViewBaseTestCase):
+ """
+ OneViewBaseTestCase has common tests for class constructor and main function,
+ also provides the mocks used in this test case.
+ """
+
+ def setUp(self):
+ self.configure_mocks(self, NetworkSetModule)
+ self.resource = self.mock_ov_client.network_sets
+ self.ethernet_network_client = self.mock_ov_client.ethernet_networks
+
+ def test_should_create_new_network_set(self):
+ self.resource.get_by.return_value = []
+ self.resource.create.return_value = NETWORK_SET
+
+ self.mock_ansible_module.params = PARAMS_FOR_PRESENT
+
+ NetworkSetModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=NetworkSetModule.MSG_CREATED,
+ ansible_facts=dict(network_set=NETWORK_SET)
+ )
+
+ def test_should_not_update_when_data_is_equals(self):
+ self.resource.get_by.return_value = [NETWORK_SET]
+
+ self.mock_ansible_module.params = PARAMS_FOR_PRESENT
+
+ NetworkSetModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ msg=NetworkSetModule.MSG_ALREADY_PRESENT,
+ ansible_facts=dict(network_set=NETWORK_SET)
+ )
+
+ def test_update_when_data_has_modified_attributes(self):
+ data_merged = dict(name=NETWORK_SET['name'] + " - Renamed",
+ networkUris=['/rest/ethernet-networks/aaa-bbb-ccc',
+ '/rest/ethernet-networks/ddd-eee-fff']
+ )
+
+ self.resource.get_by.side_effect = [NETWORK_SET], []
+ self.resource.update.return_value = data_merged
+ self.ethernet_network_client.get_by.return_value = [{'uri': '/rest/ethernet-networks/ddd-eee-fff'}]
+
+ self.mock_ansible_module.params = PARAMS_WITH_CHANGES
+
+ NetworkSetModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=NetworkSetModule.MSG_UPDATED,
+ ansible_facts=dict(network_set=data_merged)
+ )
+
+ def test_should_raise_exception_when_ethernet_network_not_found(self):
+ self.resource.get_by.side_effect = [NETWORK_SET], []
+ self.ethernet_network_client.get_by.return_value = []
+
+ self.mock_ansible_module.params = PARAMS_WITH_CHANGES
+
+ NetworkSetModule().run()
+
+ self.mock_ansible_module.fail_json.assert_called_once_with(
+ exception=mock.ANY,
+ msg=NetworkSetModule.MSG_ETHERNET_NETWORK_NOT_FOUND + "Name of a Network"
+ )
+
+ def test_should_remove_network(self):
+ self.resource.get_by.return_value = [NETWORK_SET]
+
+ self.mock_ansible_module.params = PARAMS_FOR_ABSENT
+
+ NetworkSetModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=NetworkSetModule.MSG_DELETED
+ )
+
+ def test_should_do_nothing_when_network_set_not_exist(self):
+ self.resource.get_by.return_value = []
+
+ self.mock_ansible_module.params = PARAMS_FOR_ABSENT
+
+ NetworkSetModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ msg=NetworkSetModule.MSG_ALREADY_ABSENT
+ )
+
+ def test_update_scopes_when_different(self):
+ params_to_scope = PARAMS_FOR_PRESENT.copy()
+ params_to_scope['data']['scopeUris'] = ['test']
+ self.mock_ansible_module.params = params_to_scope
+
+ resource_data = NETWORK_SET.copy()
+ resource_data['scopeUris'] = ['fake']
+ resource_data['uri'] = 'rest/network-sets/fake'
+ self.resource.get_by.return_value = [resource_data]
+
+ patch_return = resource_data.copy()
+ patch_return['scopeUris'] = ['test']
+ self.resource.patch.return_value = patch_return
+
+ NetworkSetModule().run()
+
+ self.resource.patch.assert_called_once_with('rest/network-sets/fake',
+ operation='replace',
+ path='/scopeUris',
+ value=['test'])
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ ansible_facts=dict(network_set=patch_return),
+ msg=NetworkSetModule.MSG_UPDATED
+ )
+
+ def test_should_do_nothing_when_scopes_are_the_same(self):
+ params_to_scope = PARAMS_FOR_PRESENT.copy()
+ params_to_scope['data']['scopeUris'] = ['test']
+ self.mock_ansible_module.params = params_to_scope
+
+ resource_data = NETWORK_SET.copy()
+ resource_data['scopeUris'] = ['test']
+ self.resource.get_by.return_value = [resource_data]
+
+ NetworkSetModule().run()
+
+ self.resource.patch.not_been_called()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ ansible_facts=dict(network_set=resource_data),
+ msg=NetworkSetModule.MSG_ALREADY_PRESENT
+ )
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_network_set_info.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_network_set_info.py
new file mode 100644
index 00000000..dd0a5cf4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_network_set_info.py
@@ -0,0 +1,120 @@
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from .oneview_module_loader import NetworkSetInfoModule
+from .hpe_test_utils import FactsParamsTestCase
+
+ERROR_MSG = 'Fake message error'
+
+PARAMS_GET_ALL = dict(
+ config='config.json',
+ name=None
+)
+
+PARAMS_GET_ALL_WITHOUT_ETHERNET = dict(
+ config='config.json',
+ name=None,
+ options=['withoutEthernet']
+)
+
+PARAMS_GET_BY_NAME = dict(
+ config='config.json',
+ name='Network Set 1'
+)
+
+PARAMS_GET_BY_NAME_WITHOUT_ETHERNET = dict(
+ config='config.json',
+ name='Network Set 1',
+ options=['withoutEthernet']
+)
+
+
+class NetworkSetInfoSpec(unittest.TestCase,
+ FactsParamsTestCase):
+ def setUp(self):
+ self.configure_mocks(self, NetworkSetInfoModule)
+ self.network_sets = self.mock_ov_client.network_sets
+ FactsParamsTestCase.configure_client_mock(self, self.network_sets)
+
+ def test_should_get_all_network_sets(self):
+ network_sets = [{
+ "name": "Network Set 1",
+ "networkUris": ['/rest/ethernet-networks/aaa-bbb-ccc']
+ }, {
+ "name": "Network Set 2",
+ "networkUris": ['/rest/ethernet-networks/ddd-eee-fff', '/rest/ethernet-networks/ggg-hhh-fff']
+ }]
+
+ self.network_sets.get_all.return_value = network_sets
+ self.mock_ansible_module.params = PARAMS_GET_ALL
+
+ NetworkSetInfoModule().run()
+
+ self.network_sets.get_all.assert_called_once_with()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ network_sets=network_sets)
+
+ def test_should_get_all_network_sets_without_ethernet(self):
+ network_sets = [{
+ "name": "Network Set 1",
+ "networkUris": []
+ }, {
+ "name": "Network Set 2",
+ "networkUris": []
+ }]
+
+ self.network_sets.get_all.return_value = network_sets
+ self.mock_ansible_module.params = PARAMS_GET_ALL
+
+ NetworkSetInfoModule().run()
+
+ self.network_sets.get_all.assert_called_once_with()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ network_sets=network_sets)
+
+ def test_should_get_network_set_by_name(self):
+ network_sets = [{
+ "name": "Network Set 1",
+ "networkUris": ['/rest/ethernet-networks/aaa-bbb-ccc']
+ }]
+
+ self.network_sets.get_by.return_value = network_sets
+ self.mock_ansible_module.params = PARAMS_GET_BY_NAME
+
+ NetworkSetInfoModule().run()
+
+ self.network_sets.get_by.assert_called_once_with('name', 'Network Set 1')
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ network_sets=network_sets)
+
+ def test_should_get_network_set_by_name_without_ethernet(self):
+ network_sets = [{
+ "name": "Network Set 1",
+ "networkUris": []
+ }]
+
+ self.network_sets.get_all_without_ethernet.return_value = network_sets
+ self.mock_ansible_module.params = PARAMS_GET_BY_NAME_WITHOUT_ETHERNET
+
+ NetworkSetInfoModule().run()
+
+ expected_filter = "\"'name'='Network Set 1'\""
+ self.network_sets.get_all_without_ethernet.assert_called_once_with(filter=expected_filter)
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ network_sets=network_sets)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_san_manager.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_san_manager.py
new file mode 100644
index 00000000..82c26570
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_san_manager.py
@@ -0,0 +1,242 @@
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest, mock
+from .oneview_module_loader import SanManagerModule
+from .hpe_test_utils import OneViewBaseTestCase
+from copy import deepcopy
+
+FAKE_MSG_ERROR = 'Fake message error'
+
+DEFAULT_SAN_MANAGER_TEMPLATE = dict(
+ name='172.18.15.1',
+ providerDisplayName='Brocade Network Advisor',
+ uri='/rest/fc-sans/device-managers/UUU-AAA-BBB',
+ refreshState='OK',
+ connectionInfo=[
+ {
+ 'valueFormat': 'IPAddressOrHostname',
+ 'displayName': 'Host',
+ 'name': 'Host',
+ 'valueType': 'String',
+ 'required': False,
+ 'value': '172.18.15.1'
+ }]
+)
+
+
+class SanManagerModuleSpec(unittest.TestCase,
+ OneViewBaseTestCase):
+ PARAMS_FOR_PRESENT = dict(
+ config='config.json',
+ state='present',
+ data=DEFAULT_SAN_MANAGER_TEMPLATE
+ )
+
+ PARAMS_FOR_CONNECTION_INFORMATION_SET = dict(
+ config='config.json',
+ state='connection_information_set',
+ data=DEFAULT_SAN_MANAGER_TEMPLATE.copy()
+ )
+
+ PARAMS_WITH_CHANGES = dict(
+ config='config.json',
+ state='present',
+ data=dict(name=DEFAULT_SAN_MANAGER_TEMPLATE['name'],
+ refreshState='RefreshPending')
+ )
+
+ PARAMS_FOR_ABSENT = dict(
+ config='config.json',
+ state='absent',
+ data=dict(name=DEFAULT_SAN_MANAGER_TEMPLATE['name'])
+ )
+
+ def setUp(self):
+ self.configure_mocks(self, SanManagerModule)
+ self.resource = self.mock_ov_client.san_managers
+
+ def test_should_add_new_san_manager(self):
+ self.resource.get_by_name.return_value = []
+ self.resource.get_provider_uri.return_value = '/rest/fc-sans/providers/123/device-managers'
+ self.resource.add.return_value = DEFAULT_SAN_MANAGER_TEMPLATE
+
+ self.mock_ansible_module.params = self.PARAMS_FOR_PRESENT
+
+ SanManagerModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=SanManagerModule.MSG_CREATED,
+ ansible_facts=dict(san_manager=DEFAULT_SAN_MANAGER_TEMPLATE)
+ )
+
+ def test_should_find_provider_uri_to_add(self):
+ self.resource.get_by_name.return_value = []
+ self.resource.get_provider_uri.return_value = '/rest/fc-sans/providers/123/device-managers'
+ self.resource.add.return_value = DEFAULT_SAN_MANAGER_TEMPLATE
+
+ self.mock_ansible_module.params = self.PARAMS_FOR_PRESENT
+
+ SanManagerModule().run()
+
+ provider_display_name = DEFAULT_SAN_MANAGER_TEMPLATE['providerDisplayName']
+ self.resource.get_provider_uri.assert_called_once_with(provider_display_name)
+
+ def test_should_not_update_when_data_is_equals(self):
+ output_data = deepcopy(DEFAULT_SAN_MANAGER_TEMPLATE)
+ output_data.pop('connectionInfo')
+ self.resource.get_by_name.return_value = deepcopy(DEFAULT_SAN_MANAGER_TEMPLATE)
+ self.resource.get_provider_uri.return_value = '/rest/fc-sans/providers/123/device-managers'
+
+ self.mock_ansible_module.params = self.PARAMS_FOR_PRESENT
+
+ SanManagerModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ msg=SanManagerModule.MSG_ALREADY_PRESENT,
+ ansible_facts=dict(san_manager=output_data)
+ )
+
+ def test_update_when_data_has_modified_attributes(self):
+ data_merged = deepcopy(DEFAULT_SAN_MANAGER_TEMPLATE)
+ data_merged['fabricType'] = 'DirectAttach'
+
+ self.resource.get_by_name.return_value = DEFAULT_SAN_MANAGER_TEMPLATE
+ self.resource.get_provider_uri.return_value = '/rest/fc-sans/providers/123/device-managers'
+
+ self.resource.update.return_value = data_merged
+ self.mock_ansible_module.params = self.PARAMS_WITH_CHANGES
+
+ SanManagerModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=SanManagerModule.MSG_UPDATED,
+ ansible_facts=dict(san_manager=data_merged)
+ )
+
+ def test_update_should_not_send_connection_info_when_not_informed_on_data(self):
+ merged_data = deepcopy(DEFAULT_SAN_MANAGER_TEMPLATE)
+ merged_data['refreshState'] = 'RefreshPending'
+ output_data = deepcopy(merged_data)
+ output_data.pop('connectionInfo')
+
+ self.resource.get_by_name.return_value = DEFAULT_SAN_MANAGER_TEMPLATE
+ self.resource.get_provider_uri.return_value = '/rest/fc-sans/providers/123/device-managers'
+
+ self.resource.update.return_value = merged_data
+ self.mock_ansible_module.params = self.PARAMS_WITH_CHANGES
+
+ SanManagerModule().run()
+
+ self.resource.update.assert_called_once_with(resource=output_data, id_or_uri=output_data['uri'])
+
+ def test_should_remove_san_manager(self):
+ self.resource.get_by_name.return_value = deepcopy(DEFAULT_SAN_MANAGER_TEMPLATE)
+ self.resource.get_provider_uri.return_value = '/rest/fc-sans/providers/123/device-managers'
+
+ self.mock_ansible_module.params = self.PARAMS_FOR_ABSENT.copy()
+
+ SanManagerModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=SanManagerModule.MSG_DELETED
+ )
+
+ def test_should_do_nothing_when_san_manager_not_exist(self):
+ self.resource.get_by_name.return_value = []
+
+ self.mock_ansible_module.params = self.PARAMS_FOR_ABSENT.copy()
+
+ SanManagerModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ msg=SanManagerModule.MSG_ALREADY_ABSENT
+ )
+
+ def test_should_fail_when_name_not_found(self):
+ self.resource.get_by_name.return_value = []
+ self.resource.get_provider_uri.return_value = None
+
+ self.mock_ansible_module.params = self.PARAMS_FOR_PRESENT
+
+ SanManagerModule().run()
+
+ self.mock_ansible_module.fail_json.assert_called_once_with(
+ exception=mock.ANY,
+ msg="The provider 'Brocade Network Advisor' was not found."
+ )
+
+ def test_should_fail_when_name_and_hosts_in_connectionInfo_missing(self):
+ bad_params = deepcopy(self.PARAMS_FOR_PRESENT)
+ bad_params['data'].pop('name')
+ bad_params['data'].pop('connectionInfo')
+
+ self.mock_ansible_module.params = bad_params
+
+ SanManagerModule().run()
+
+ msg = 'A "name" or "connectionInfo" must be provided inside the "data" field for this operation. '
+ msg += 'If a "connectionInfo" is provided, the "Host" name is considered as the "name" for the resource.'
+
+ self.mock_ansible_module.fail_json.assert_called_once_with(exception=mock.ANY, msg=msg)
+
+ def test_connection_information_set_should_set_the_connection_information(self):
+ data_merged = deepcopy(DEFAULT_SAN_MANAGER_TEMPLATE)
+ data_merged['fabricType'] = 'DirectAttach'
+
+ self.resource.get_by_name.return_value = DEFAULT_SAN_MANAGER_TEMPLATE
+ self.resource.get_provider_uri.return_value = '/rest/fc-sans/providers/123/device-managers'
+
+ self.resource.update.return_value = data_merged
+ self.mock_ansible_module.params = self.PARAMS_FOR_CONNECTION_INFORMATION_SET
+
+ SanManagerModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=SanManagerModule.MSG_UPDATED,
+ ansible_facts=dict(san_manager=data_merged)
+ )
+
+ def test_should_add_new_san_manager_when_connection_information_set_called_without_resource(self):
+ self.resource.get_by_name.return_value = []
+ self.resource.get_provider_uri.return_value = '/rest/fc-sans/providers/123/device-managers'
+ self.resource.add.return_value = DEFAULT_SAN_MANAGER_TEMPLATE
+
+ self.mock_ansible_module.params = self.PARAMS_FOR_CONNECTION_INFORMATION_SET
+
+ SanManagerModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=SanManagerModule.MSG_CREATED,
+ ansible_facts=dict(san_manager=DEFAULT_SAN_MANAGER_TEMPLATE)
+ )
+
+ def test_should_fail_when_required_attribute_missing(self):
+ bad_params = deepcopy(self.PARAMS_FOR_CONNECTION_INFORMATION_SET)
+ bad_params['data'] = self.PARAMS_FOR_CONNECTION_INFORMATION_SET['data'].copy()
+ bad_params['data'].pop('connectionInfo')
+
+ self.resource.get_by_name.return_value = DEFAULT_SAN_MANAGER_TEMPLATE
+ self.resource.get_provider_uri.return_value = '/rest/fc-sans/providers/123/device-managers'
+
+ self.mock_ansible_module.params = bad_params
+
+ SanManagerModule().run()
+
+ msg = 'A connectionInfo field is required for this operation.'
+
+ self.mock_ansible_module.fail_json.assert_called_once_with(exception=mock.ANY, msg=msg)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_san_manager_info.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_san_manager_info.py
new file mode 100644
index 00000000..df011b70
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/remote_management/oneview/test_oneview_san_manager_info.py
@@ -0,0 +1,71 @@
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from .oneview_module_loader import SanManagerInfoModule
+from .hpe_test_utils import FactsParamsTestCase
+
+
+class SanManagerInfoSpec(unittest.TestCase, FactsParamsTestCase):
+ ERROR_MSG = 'Fake message error'
+
+ PARAMS_GET_ALL = dict(
+ config='config.json',
+ provider_display_name=None
+ )
+
+ PARAMS_GET_BY_PROVIDER_DISPLAY_NAME = dict(
+ config='config.json',
+ provider_display_name="Brocade Network Advisor"
+ )
+
+ PRESENT_SAN_MANAGERS = [{
+ "providerDisplayName": "Brocade Network Advisor",
+ "uri": "/rest/fc-sans/device-managers//d60efc8a-15b8-470c-8470-738d16d6b319"
+ }]
+
+ def setUp(self):
+ self.configure_mocks(self, SanManagerInfoModule)
+ self.san_managers = self.mock_ov_client.san_managers
+
+ FactsParamsTestCase.configure_client_mock(self, self.san_managers)
+
+ def test_should_get_all(self):
+ self.san_managers.get_all.return_value = self.PRESENT_SAN_MANAGERS
+ self.mock_ansible_module.params = self.PARAMS_GET_ALL
+
+ SanManagerInfoModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ san_managers=self.PRESENT_SAN_MANAGERS
+ )
+
+ def test_should_get_by_display_name(self):
+ self.san_managers.get_by_provider_display_name.return_value = self.PRESENT_SAN_MANAGERS[0]
+ self.mock_ansible_module.params = self.PARAMS_GET_BY_PROVIDER_DISPLAY_NAME
+
+ SanManagerInfoModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ san_managers=self.PRESENT_SAN_MANAGERS
+ )
+
+ def test_should_return_empty_list_when_get_by_display_name_is_null(self):
+ self.san_managers.get_by_provider_display_name.return_value = None
+ self.mock_ansible_module.params = self.PARAMS_GET_BY_PROVIDER_DISPLAY_NAME
+
+ SanManagerInfoModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ san_managers=[]
+ )
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/bitbucket/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/bitbucket/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/bitbucket/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/bitbucket/test_bitbucket_access_key.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/bitbucket/test_bitbucket_access_key.py
new file mode 100644
index 00000000..654f565c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/bitbucket/test_bitbucket_access_key.py
@@ -0,0 +1,342 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper
+from ansible_collections.community.general.plugins.modules.source_control.bitbucket import bitbucket_access_key
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleFailJson, AnsibleExitJson, ModuleTestCase, set_module_args
+
+
+class TestBucketAccessKeyModule(ModuleTestCase):
+ def setUp(self):
+ super(TestBucketAccessKeyModule, self).setUp()
+ self.module = bitbucket_access_key
+
+ def test_missing_key_with_present_state(self):
+ with self.assertRaises(AnsibleFailJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'username': 'name',
+ 'repository': 'repo',
+ 'label': 'key name',
+ 'state': 'present',
+ })
+ self.module.main()
+
+ self.assertEqual(exec_info.exception.args[0]['msg'], self.module.error_messages['required_key'])
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_access_key, 'get_existing_deploy_key', return_value=None)
+ def test_create_deploy_key(self, *args):
+ with patch.object(self.module, 'create_deploy_key') as create_deploy_key_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'username': 'name',
+ 'repository': 'repo',
+ 'key': 'public_key',
+ 'label': 'key name',
+ 'state': 'present',
+ })
+ self.module.main()
+
+ self.assertEqual(create_deploy_key_mock.call_count, 1)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_access_key, 'get_existing_deploy_key', return_value=None)
+ def test_create_deploy_key_check_mode(self, *args):
+ with patch.object(self.module, 'create_deploy_key') as create_deploy_key_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'username': 'name',
+ 'repository': 'repo',
+ 'key': 'public_key',
+ 'label': 'key name',
+ 'state': 'present',
+ '_ansible_check_mode': True,
+ })
+ self.module.main()
+
+ self.assertEqual(create_deploy_key_mock.call_count, 0)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_access_key, 'get_existing_deploy_key', return_value={
+ "id": 123,
+ "label": "mykey",
+ "created_on": "2019-03-23T10:15:21.517377+00:00",
+ "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADA...AdkTg7HGqL3rlaDrEcWfL7Lu6TnhBdq5",
+ "type": "deploy_key",
+ "comment": "",
+ "last_used": None,
+ "repository": {
+ "links": {
+ "self": {
+ "href": "https://api.bitbucket.org/2.0/repositories/mleu/test"
+ },
+ "html": {
+ "href": "https://bitbucket.org/mleu/test"
+ },
+ "avatar": {
+ "href": "..."
+ }
+ },
+ "type": "repository",
+ "name": "test",
+ "full_name": "mleu/test",
+ "uuid": "{85d08b4e-571d-44e9-a507-fa476535aa98}"
+ },
+ "links": {
+ "self": {
+ "href": "https://api.bitbucket.org/2.0/repositories/mleu/test/deploy-keys/123"
+ }
+ },
+ })
+ def test_update_deploy_key(self, *args):
+ with patch.object(self.module, 'delete_deploy_key') as delete_deploy_key_mock:
+ with patch.object(self.module, 'create_deploy_key') as create_deploy_key_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'username': 'name',
+ 'repository': 'repo',
+ 'key': 'new public key',
+ 'label': 'mykey',
+ 'state': 'present',
+ })
+ self.module.main()
+
+ self.assertEqual(delete_deploy_key_mock.call_count, 1)
+ self.assertEqual(create_deploy_key_mock.call_count, 1)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_access_key, 'get_existing_deploy_key', return_value={
+ "id": 123,
+ "label": "mykey",
+ "created_on": "2019-03-23T10:15:21.517377+00:00",
+ "key": "new public key",
+ "type": "deploy_key",
+ "comment": "",
+ "last_used": None,
+ "repository": {
+ "links": {
+ "self": {
+ "href": "https://api.bitbucket.org/2.0/repositories/mleu/test"
+ },
+ "html": {
+ "href": "https://bitbucket.org/mleu/test"
+ },
+ "avatar": {
+ "href": "..."
+ }
+ },
+ "type": "repository",
+ "name": "test",
+ "full_name": "mleu/test",
+ "uuid": "{85d08b4e-571d-44e9-a507-fa476535aa98}"
+ },
+ "links": {
+ "self": {
+ "href": "https://api.bitbucket.org/2.0/repositories/mleu/test/deploy-keys/123"
+ }
+ },
+ })
+ def test_dont_update_same_value(self, *args):
+ with patch.object(self.module, 'delete_deploy_key') as delete_deploy_key_mock:
+ with patch.object(self.module, 'create_deploy_key') as create_deploy_key_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'username': 'name',
+ 'repository': 'repo',
+ 'key': 'new public key',
+ 'label': 'mykey',
+ 'state': 'present',
+ })
+ self.module.main()
+
+ self.assertEqual(delete_deploy_key_mock.call_count, 0)
+ self.assertEqual(create_deploy_key_mock.call_count, 0)
+ self.assertEqual(exec_info.exception.args[0]['changed'], False)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_access_key, 'get_existing_deploy_key', return_value={
+ "id": 123,
+ "label": "mykey",
+ "created_on": "2019-03-23T10:15:21.517377+00:00",
+ "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADA...AdkTg7HGqL3rlaDrEcWfL7Lu6TnhBdq5",
+ "type": "deploy_key",
+ "comment": "",
+ "last_used": None,
+ "repository": {
+ "links": {
+ "self": {
+ "href": "https://api.bitbucket.org/2.0/repositories/mleu/test"
+ },
+ "html": {
+ "href": "https://bitbucket.org/mleu/test"
+ },
+ "avatar": {
+ "href": "..."
+ }
+ },
+ "type": "repository",
+ "name": "test",
+ "full_name": "mleu/test",
+ "uuid": "{85d08b4e-571d-44e9-a507-fa476535aa98}"
+ },
+ "links": {
+ "self": {
+ "href": "https://api.bitbucket.org/2.0/repositories/mleu/test/deploy-keys/123"
+ }
+ },
+ })
+ def test_update_deploy_key_check_mode(self, *args):
+ with patch.object(self.module, 'delete_deploy_key') as delete_deploy_key_mock:
+ with patch.object(self.module, 'create_deploy_key') as create_deploy_key_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'username': 'name',
+ 'repository': 'repo',
+ 'key': 'new public key',
+ 'label': 'mykey',
+ 'state': 'present',
+ '_ansible_check_mode': True,
+ })
+ self.module.main()
+
+ self.assertEqual(delete_deploy_key_mock.call_count, 0)
+ self.assertEqual(create_deploy_key_mock.call_count, 0)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_access_key, 'get_existing_deploy_key', return_value={
+ "id": 123,
+ "label": "mykey",
+ "created_on": "2019-03-23T10:15:21.517377+00:00",
+ "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADA...AdkTg7HGqL3rlaDrEcWfL7Lu6TnhBdq5",
+ "type": "deploy_key",
+ "comment": "",
+ "last_used": None,
+ "repository": {
+ "links": {
+ "self": {
+ "href": "https://api.bitbucket.org/2.0/repositories/mleu/test"
+ },
+ "html": {
+ "href": "https://bitbucket.org/mleu/test"
+ },
+ "avatar": {
+ "href": "..."
+ }
+ },
+ "type": "repository",
+ "name": "test",
+ "full_name": "mleu/test",
+ "uuid": "{85d08b4e-571d-44e9-a507-fa476535aa98}"
+ },
+ "links": {
+ "self": {
+ "href": "https://api.bitbucket.org/2.0/repositories/mleu/test/deploy-keys/123"
+ }
+ },
+ })
+ def test_delete_deploy_key(self, *args):
+ with patch.object(self.module, 'delete_deploy_key') as delete_deploy_key_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'username': 'name',
+ 'repository': 'repo',
+ 'label': 'mykey',
+ 'state': 'absent',
+ })
+ self.module.main()
+
+ self.assertEqual(delete_deploy_key_mock.call_count, 1)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_access_key, 'get_existing_deploy_key', return_value=None)
+ def test_delete_absent_deploy_key(self, *args):
+ with patch.object(self.module, 'delete_deploy_key') as delete_deploy_key_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'username': 'name',
+ 'repository': 'repo',
+ 'label': 'mykey',
+ 'state': 'absent',
+ })
+ self.module.main()
+
+ self.assertEqual(delete_deploy_key_mock.call_count, 0)
+ self.assertEqual(exec_info.exception.args[0]['changed'], False)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_access_key, 'get_existing_deploy_key', return_value={
+ "id": 123,
+ "label": "mykey",
+ "created_on": "2019-03-23T10:15:21.517377+00:00",
+ "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADA...AdkTg7HGqL3rlaDrEcWfL7Lu6TnhBdq5",
+ "type": "deploy_key",
+ "comment": "",
+ "last_used": None,
+ "repository": {
+ "links": {
+ "self": {
+ "href": "https://api.bitbucket.org/2.0/repositories/mleu/test"
+ },
+ "html": {
+ "href": "https://bitbucket.org/mleu/test"
+ },
+ "avatar": {
+ "href": "..."
+ }
+ },
+ "type": "repository",
+ "name": "test",
+ "full_name": "mleu/test",
+ "uuid": "{85d08b4e-571d-44e9-a507-fa476535aa98}"
+ },
+ "links": {
+ "self": {
+ "href": "https://api.bitbucket.org/2.0/repositories/mleu/test/deploy-keys/123"
+ }
+ },
+ })
+ def test_delete_deploy_key_check_mode(self, *args):
+ with patch.object(self.module, 'delete_deploy_key') as delete_deploy_key_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'username': 'name',
+ 'repository': 'repo',
+ 'label': 'mykey',
+ 'state': 'absent',
+ '_ansible_check_mode': True,
+ })
+ self.module.main()
+
+ self.assertEqual(delete_deploy_key_mock.call_count, 0)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/bitbucket/test_bitbucket_pipeline_key_pair.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/bitbucket/test_bitbucket_pipeline_key_pair.py
new file mode 100644
index 00000000..6c5ee015
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/bitbucket/test_bitbucket_pipeline_key_pair.py
@@ -0,0 +1,197 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper
+from ansible_collections.community.general.plugins.modules.source_control.bitbucket import bitbucket_pipeline_key_pair
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleFailJson, AnsibleExitJson, ModuleTestCase, set_module_args
+
+
+class TestBucketPipelineKeyPairModule(ModuleTestCase):
+ def setUp(self):
+ super(TestBucketPipelineKeyPairModule, self).setUp()
+ self.module = bitbucket_pipeline_key_pair
+
+ def test_missing_keys_with_present_state(self):
+ with self.assertRaises(AnsibleFailJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'username': 'name',
+ 'repository': 'repo',
+ 'state': 'present',
+ })
+ self.module.main()
+
+ self.assertEqual(exec_info.exception.args[0]['msg'], self.module.error_messages['required_keys'])
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_key_pair, 'get_existing_ssh_key_pair', return_value=None)
+ def test_create_keys(self, *args):
+ with patch.object(self.module, 'update_ssh_key_pair') as update_ssh_key_pair_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'username': 'name',
+ 'repository': 'repo',
+ 'public_key': 'public',
+ 'private_key': 'PRIVATE',
+ 'state': 'present',
+ })
+ self.module.main()
+
+ self.assertEqual(update_ssh_key_pair_mock.call_count, 1)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_key_pair, 'get_existing_ssh_key_pair', return_value=None)
+ def test_create_keys_check_mode(self, *args):
+ with patch.object(self.module, 'update_ssh_key_pair') as update_ssh_key_pair_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'username': 'name',
+ 'repository': 'repo',
+ 'public_key': 'public',
+ 'private_key': 'PRIVATE',
+ 'state': 'present',
+ '_ansible_check_mode': True,
+ })
+ self.module.main()
+
+ self.assertEqual(update_ssh_key_pair_mock.call_count, 0)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_key_pair, 'get_existing_ssh_key_pair', return_value={
+ 'public_key': 'unknown',
+ 'type': 'pipeline_ssh_key_pair',
+ })
+ def test_update_keys(self, *args):
+ with patch.object(self.module, 'update_ssh_key_pair') as update_ssh_key_pair_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'username': 'name',
+ 'repository': 'repo',
+ 'public_key': 'public',
+ 'private_key': 'PRIVATE',
+ 'state': 'present',
+ })
+ self.module.main()
+
+ self.assertEqual(update_ssh_key_pair_mock.call_count, 1)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_key_pair, 'get_existing_ssh_key_pair', return_value={
+ 'public_key': 'public',
+ 'type': 'pipeline_ssh_key_pair',
+ })
+ def test_dont_update_same_key(self, *args):
+ with patch.object(self.module, 'update_ssh_key_pair') as update_ssh_key_pair_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'username': 'name',
+ 'repository': 'repo',
+ 'public_key': 'public',
+ 'private_key': 'PRIVATE',
+ 'state': 'present',
+ })
+ self.module.main()
+
+ self.assertEqual(update_ssh_key_pair_mock.call_count, 0)
+ self.assertEqual(exec_info.exception.args[0]['changed'], False)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_key_pair, 'get_existing_ssh_key_pair', return_value={
+ 'public_key': 'unknown',
+ 'type': 'pipeline_ssh_key_pair',
+ })
+ def test_update_keys_check_mode(self, *args):
+ with patch.object(self.module, 'update_ssh_key_pair') as update_ssh_key_pair_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'username': 'name',
+ 'repository': 'repo',
+ 'public_key': 'public',
+ 'private_key': 'PRIVATE',
+ 'state': 'present',
+ '_ansible_check_mode': True,
+ })
+ self.module.main()
+
+ self.assertEqual(update_ssh_key_pair_mock.call_count, 0)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_key_pair, 'get_existing_ssh_key_pair', return_value={
+ 'public_key': 'public',
+ 'type': 'pipeline_ssh_key_pair',
+ })
+ def test_delete_keys(self, *args):
+ with patch.object(self.module, 'delete_ssh_key_pair') as delete_ssh_key_pair_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'username': 'name',
+ 'repository': 'repo',
+ 'state': 'absent',
+ })
+ self.module.main()
+
+ self.assertEqual(delete_ssh_key_pair_mock.call_count, 1)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_key_pair, 'get_existing_ssh_key_pair', return_value=None)
+ def test_delete_absent_keys(self, *args):
+ with patch.object(self.module, 'delete_ssh_key_pair') as delete_ssh_key_pair_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'username': 'name',
+ 'repository': 'repo',
+ 'state': 'absent',
+ })
+ self.module.main()
+
+ self.assertEqual(delete_ssh_key_pair_mock.call_count, 0)
+ self.assertEqual(exec_info.exception.args[0]['changed'], False)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_key_pair, 'get_existing_ssh_key_pair', return_value={
+ 'public_key': 'public',
+ 'type': 'pipeline_ssh_key_pair',
+ })
+ def test_delete_keys_check_mode(self, *args):
+ with patch.object(self.module, 'delete_ssh_key_pair') as delete_ssh_key_pair_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'username': 'name',
+ 'repository': 'repo',
+ 'state': 'absent',
+ '_ansible_check_mode': True,
+ })
+ self.module.main()
+
+ self.assertEqual(delete_ssh_key_pair_mock.call_count, 0)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/bitbucket/test_bitbucket_pipeline_known_host.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/bitbucket/test_bitbucket_pipeline_known_host.py
new file mode 100644
index 00000000..5d797f41
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/bitbucket/test_bitbucket_pipeline_known_host.py
@@ -0,0 +1,192 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper
+from ansible_collections.community.general.plugins.modules.source_control.bitbucket import bitbucket_pipeline_known_host
+from ansible_collections.community.general.plugins.modules.source_control.bitbucket.bitbucket_pipeline_known_host import HAS_PARAMIKO
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args
+
+
+class TestBucketPipelineKnownHostModule(ModuleTestCase):
+ def setUp(self):
+ super(TestBucketPipelineKnownHostModule, self).setUp()
+ self.module = bitbucket_pipeline_known_host
+
+ @pytest.mark.skipif(not HAS_PARAMIKO, reason='paramiko must be installed to test key creation')
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_known_host, 'get_existing_known_host', return_value=None)
+ def test_create_known_host(self, *args):
+ with patch.object(self.module, 'create_known_host') as create_known_host_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'username': 'name',
+ 'repository': 'repo',
+ 'name': 'bitbucket.org',
+ 'state': 'present',
+ })
+ self.module.main()
+
+ self.assertEqual(create_known_host_mock.call_count, 1)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(BitbucketHelper, 'request', return_value=(dict(status=201), dict()))
+ @patch.object(bitbucket_pipeline_known_host, 'get_existing_known_host', return_value=None)
+ def test_create_known_host_with_key(self, *args):
+ with patch.object(self.module, 'get_host_key') as get_host_key_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'username': 'name',
+ 'repository': 'repo',
+ 'name': 'bitbucket.org',
+ 'key': 'ssh-rsa public',
+ 'state': 'present',
+ })
+ self.module.main()
+
+ self.assertEqual(get_host_key_mock.call_count, 0)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+ @pytest.mark.skipif(not HAS_PARAMIKO, reason='paramiko must be installed to test key creation')
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_known_host, 'get_existing_known_host', return_value={
+ 'type': 'pipeline_known_host',
+ 'uuid': '{21cc0590-bebe-4fae-8baf-03722704119a7}',
+ 'hostname': 'bitbucket.org',
+ 'public_key': {
+ 'type': 'pipeline_ssh_public_key',
+ 'md5_fingerprint': 'md5:97:8c:1b:f2:6f:14:6b:4b:3b:ec:aa:46:46:74:7c:40',
+ 'sha256_fingerprint': 'SHA256:zzXQOXSFBEiUtuE8AikoYKwbHaxvSc0ojez9YXaGp1A',
+ 'key_type': 'ssh-rsa',
+ 'key': 'AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kN...seeFVBoGqzHM9yXw=='
+ }
+ })
+ def test_dont_create_same_value(self, *args):
+ with patch.object(self.module, 'create_known_host') as create_known_host_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'username': 'name',
+ 'repository': 'repo',
+ 'name': 'bitbucket.org',
+ 'state': 'present',
+ })
+ self.module.main()
+
+ self.assertEqual(create_known_host_mock.call_count, 0)
+ self.assertEqual(exec_info.exception.args[0]['changed'], False)
+
+ @pytest.mark.skipif(not HAS_PARAMIKO, reason='paramiko must be installed to test key creation')
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_known_host, 'get_existing_known_host', return_value=None)
+ def test_create_known_host_check_mode(self, *args):
+ with patch.object(self.module, 'create_known_host') as create_known_host_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'username': 'name',
+ 'repository': 'repo',
+ 'name': 'bitbucket.org',
+ 'state': 'present',
+ '_ansible_check_mode': True,
+ })
+ self.module.main()
+
+ self.assertEqual(create_known_host_mock.call_count, 0)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+ @pytest.mark.skipif(not HAS_PARAMIKO, reason='paramiko must be installed to test key creation')
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_known_host, 'get_existing_known_host', return_value={
+ 'type': 'pipeline_known_host',
+ 'uuid': '{21cc0590-bebe-4fae-8baf-03722704119a7}',
+ 'hostname': 'bitbucket.org',
+ 'public_key': {
+ 'type': 'pipeline_ssh_public_key',
+ 'md5_fingerprint': 'md5:97:8c:1b:f2:6f:14:6b:4b:3b:ec:aa:46:46:74:7c:40',
+ 'sha256_fingerprint': 'SHA256:zzXQOXSFBEiUtuE8AikoYKwbHaxvSc0ojez9YXaGp1A',
+ 'key_type': 'ssh-rsa',
+ 'key': 'AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kN...seeFVBoGqzHM9yXw=='
+ }
+ })
+ def test_delete_known_host(self, *args):
+ with patch.object(self.module, 'delete_known_host') as delete_known_host_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'username': 'name',
+ 'repository': 'repo',
+ 'name': 'bitbucket.org',
+ 'state': 'absent',
+ })
+ self.module.main()
+
+ self.assertEqual(delete_known_host_mock.call_count, 1)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+ @pytest.mark.skipif(not HAS_PARAMIKO, reason='paramiko must be installed to test key creation')
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_known_host, 'get_existing_known_host', return_value=None)
+ def test_delete_absent_known_host(self, *args):
+ with patch.object(self.module, 'delete_known_host') as delete_known_host_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'username': 'name',
+ 'repository': 'repo',
+ 'name': 'bitbucket.org',
+ 'state': 'absent',
+ })
+ self.module.main()
+
+ self.assertEqual(delete_known_host_mock.call_count, 0)
+ self.assertEqual(exec_info.exception.args[0]['changed'], False)
+
+ @pytest.mark.skipif(not HAS_PARAMIKO, reason='paramiko must be installed to test key creation')
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_known_host, 'get_existing_known_host', return_value={
+ 'type': 'pipeline_known_host',
+ 'uuid': '{21cc0590-bebe-4fae-8baf-03722704119a7}',
+ 'hostname': 'bitbucket.org',
+ 'public_key': {
+ 'type': 'pipeline_ssh_public_key',
+ 'md5_fingerprint': 'md5:97:8c:1b:f2:6f:14:6b:4b:3b:ec:aa:46:46:74:7c:40',
+ 'sha256_fingerprint': 'SHA256:zzXQOXSFBEiUtuE8AikoYKwbHaxvSc0ojez9YXaGp1A',
+ 'key_type': 'ssh-rsa',
+ 'key': 'AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kN...seeFVBoGqzHM9yXw=='
+ }
+ })
+ def test_delete_known_host_check_mode(self, *args):
+ with patch.object(self.module, 'delete_known_host') as delete_known_host_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'username': 'name',
+ 'repository': 'repo',
+ 'name': 'bitbucket.org',
+ 'state': 'absent',
+ '_ansible_check_mode': True,
+ })
+ self.module.main()
+
+ self.assertEqual(delete_known_host_mock.call_count, 0)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/bitbucket/test_bitbucket_pipeline_variable.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/bitbucket/test_bitbucket_pipeline_variable.py
new file mode 100644
index 00000000..e291b09f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/bitbucket/test_bitbucket_pipeline_variable.py
@@ -0,0 +1,295 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper
+from ansible_collections.community.general.plugins.modules.source_control.bitbucket import bitbucket_pipeline_variable
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleFailJson, AnsibleExitJson, ModuleTestCase, set_module_args
+
+
+class TestBucketPipelineVariableModule(ModuleTestCase):
+ def setUp(self):
+ super(TestBucketPipelineVariableModule, self).setUp()
+ self.module = bitbucket_pipeline_variable
+
+ def test_without_required_parameters(self):
+ with self.assertRaises(AnsibleFailJson) as exec_info:
+ set_module_args({
+ 'username': 'name',
+ 'repository': 'repo',
+ 'name': 'PIPELINE_VAR_NAME',
+ 'state': 'absent',
+ })
+ self.module.main()
+
+ self.assertEqual(exec_info.exception.args[0]['msg'], BitbucketHelper.error_messages['required_client_id'])
+
+ def test_missing_value_with_present_state(self):
+ with self.assertRaises(AnsibleFailJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'username': 'name',
+ 'repository': 'repo',
+ 'name': 'PIPELINE_VAR_NAME',
+ 'state': 'present',
+ })
+ self.module.main()
+
+ self.assertEqual(exec_info.exception.args[0]['msg'], self.module.error_messages['required_value'])
+
+ @patch.dict('os.environ', {
+ 'BITBUCKET_CLIENT_ID': 'ABC',
+ 'BITBUCKET_CLIENT_SECRET': 'XXX',
+ })
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_variable, 'get_existing_pipeline_variable', return_value=None)
+ def test_env_vars_params(self, *args):
+ with self.assertRaises(AnsibleExitJson):
+ set_module_args({
+ 'username': 'name',
+ 'repository': 'repo',
+ 'name': 'PIPELINE_VAR_NAME',
+ 'state': 'absent',
+ })
+ self.module.main()
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_variable, 'get_existing_pipeline_variable', return_value=None)
+ def test_create_variable(self, *args):
+ with patch.object(self.module, 'create_pipeline_variable') as create_pipeline_variable_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'username': 'name',
+ 'repository': 'repo',
+ 'name': 'PIPELINE_VAR_NAME',
+ 'value': '42',
+ 'state': 'present',
+ })
+ self.module.main()
+
+ self.assertEqual(create_pipeline_variable_mock.call_count, 1)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_variable, 'get_existing_pipeline_variable', return_value=None)
+ def test_create_variable_check_mode(self, *args):
+ with patch.object(self.module, 'create_pipeline_variable') as create_pipeline_variable_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'username': 'name',
+ 'repository': 'repo',
+ 'name': 'PIPELINE_VAR_NAME',
+ 'value': '42',
+ 'state': 'present',
+ '_ansible_check_mode': True,
+ })
+ self.module.main()
+
+ self.assertEqual(create_pipeline_variable_mock.call_count, 0)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_variable, 'get_existing_pipeline_variable', return_value={
+ 'name': 'PIPELINE_VAR_NAME',
+ 'value': 'Im alive',
+ 'type': 'pipeline_variable',
+ 'secured': False,
+ 'uuid': '{9ddb0507-439a-495a- 99f3 - 564f15138127}'
+ })
+ def test_update_variable(self, *args):
+ with patch.object(self.module, 'update_pipeline_variable') as update_pipeline_variable_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'username': 'name',
+ 'repository': 'repo',
+ 'name': 'PIPELINE_VAR_NAME',
+ 'value': '42',
+ 'state': 'present',
+ })
+ self.module.main()
+
+ self.assertEqual(update_pipeline_variable_mock.call_count, 1)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_variable, 'get_existing_pipeline_variable', return_value={
+ 'name': 'PIPELINE_VAR_NAME',
+ 'type': 'pipeline_variable',
+ 'secured': True,
+ 'uuid': '{9ddb0507-439a-495a- 99f3 - 564f15138127}'
+ })
+ def test_update_secured_variable(self, *args):
+ with patch.object(self.module, 'update_pipeline_variable') as update_pipeline_variable_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'username': 'name',
+ 'repository': 'repo',
+ 'name': 'PIPELINE_VAR_NAME',
+ 'value': '42',
+ 'secured': True,
+ 'state': 'present',
+ })
+ self.module.main()
+
+ self.assertEqual(update_pipeline_variable_mock.call_count, 1)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_variable, 'get_existing_pipeline_variable', return_value={
+ 'name': 'PIPELINE_VAR_NAME',
+ 'value': '42',
+ 'type': 'pipeline_variable',
+ 'secured': False,
+ 'uuid': '{9ddb0507-439a-495a- 99f3 - 564f15138127}'
+ })
+ def test_update_secured_state(self, *args):
+ with patch.object(self.module, 'update_pipeline_variable') as update_pipeline_variable_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'username': 'name',
+ 'repository': 'repo',
+ 'name': 'PIPELINE_VAR_NAME',
+ 'value': '42',
+ 'secured': True,
+ 'state': 'present',
+ })
+ self.module.main()
+
+ self.assertEqual(update_pipeline_variable_mock.call_count, 1)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_variable, 'get_existing_pipeline_variable', return_value={
+ 'name': 'PIPELINE_VAR_NAME',
+ 'value': '42',
+ 'type': 'pipeline_variable',
+ 'secured': False,
+ 'uuid': '{9ddb0507-439a-495a- 99f3 - 564f15138127}'
+ })
+ def test_dont_update_same_value(self, *args):
+ with patch.object(self.module, 'update_pipeline_variable') as update_pipeline_variable_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'username': 'name',
+ 'repository': 'repo',
+ 'name': 'PIPELINE_VAR_NAME',
+ 'value': '42',
+ 'state': 'present',
+ })
+ self.module.main()
+
+ self.assertEqual(update_pipeline_variable_mock.call_count, 0)
+ self.assertEqual(exec_info.exception.args[0]['changed'], False)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_variable, 'get_existing_pipeline_variable', return_value={
+ 'name': 'PIPELINE_VAR_NAME',
+ 'value': 'Im alive',
+ 'type': 'pipeline_variable',
+ 'secured': False,
+ 'uuid': '{9ddb0507-439a-495a- 99f3 - 564f15138127}'
+ })
+ def test_update_variable_check_mode(self, *args):
+ with patch.object(self.module, 'update_pipeline_variable') as update_pipeline_variable_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'username': 'name',
+ 'repository': 'repo',
+ 'name': 'PIPELINE_VAR_NAME',
+ 'value': '42',
+ 'state': 'present',
+ '_ansible_check_mode': True,
+ })
+ self.module.main()
+
+ self.assertEqual(update_pipeline_variable_mock.call_count, 0)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_variable, 'get_existing_pipeline_variable', return_value={
+ 'name': 'PIPELINE_VAR_NAME',
+ 'value': 'Im alive',
+ 'type': 'pipeline_variable',
+ 'secured': False,
+ 'uuid': '{9ddb0507-439a-495a- 99f3 - 564f15138127}'
+ })
+ def test_delete_variable(self, *args):
+ with patch.object(self.module, 'delete_pipeline_variable') as delete_pipeline_variable_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'username': 'name',
+ 'repository': 'repo',
+ 'name': 'PIPELINE_VAR_NAME',
+ 'state': 'absent',
+ })
+ self.module.main()
+
+ self.assertEqual(delete_pipeline_variable_mock.call_count, 1)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_variable, 'get_existing_pipeline_variable', return_value=None)
+ def test_delete_absent_variable(self, *args):
+ with patch.object(self.module, 'delete_pipeline_variable') as delete_pipeline_variable_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'username': 'name',
+ 'repository': 'repo',
+ 'name': 'PIPELINE_VAR_NAME',
+ 'state': 'absent',
+ })
+ self.module.main()
+
+ self.assertEqual(delete_pipeline_variable_mock.call_count, 0)
+ self.assertEqual(exec_info.exception.args[0]['changed'], False)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_variable, 'get_existing_pipeline_variable', return_value={
+ 'name': 'PIPELINE_VAR_NAME',
+ 'value': 'Im alive',
+ 'type': 'pipeline_variable',
+ 'secured': False,
+ 'uuid': '{9ddb0507-439a-495a- 99f3 - 564f15138127}'
+ })
+ def test_delete_variable_check_mode(self, *args):
+ with patch.object(self.module, 'delete_pipeline_variable') as delete_pipeline_variable_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'username': 'name',
+ 'repository': 'repo',
+ 'name': 'PIPELINE_VAR_NAME',
+ 'state': 'absent',
+ '_ansible_check_mode': True,
+ })
+ self.module.main()
+
+ self.assertEqual(delete_pipeline_variable_mock.call_count, 0)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/gitlab/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/gitlab/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/gitlab/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/gitlab/gitlab.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/gitlab/gitlab.py
new file mode 100644
index 00000000..77b1075c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/gitlab/gitlab.py
@@ -0,0 +1,581 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+
+from httmock import response # noqa
+from httmock import urlmatch # noqa
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+
+from gitlab import Gitlab
+
+
+class FakeAnsibleModule(object):
+ def __init__(self):
+ self.check_mode = False
+
+ def fail_json(self, **args):
+ pass
+
+ def exit_json(self, **args):
+ pass
+
+
+class GitlabModuleTestCase(unittest.TestCase):
+ def setUp(self):
+ unitest_python_version_check_requirement(self)
+
+ self.mock_module = FakeAnsibleModule()
+
+ self.gitlab_instance = Gitlab("http://localhost", private_token="private_token", api_version=4)
+
+
+# Python 2.7+ is needed for python-gitlab
+GITLAB_MINIMUM_PYTHON_VERSION = (2, 7)
+
+
+# Verify if the current Python version is higher than GITLAB_MINIMUM_PYTHON_VERSION
+def python_version_match_requirement():
+ return sys.version_info >= GITLAB_MINIMUM_PYTHON_VERSION
+
+
+# Skip unittest test case if python version don't match requirement
+def unitest_python_version_check_requirement(unittest_testcase):
+ if not python_version_match_requirement():
+ unittest_testcase.skipTest("Python %s+ is needed for python-gitlab" % ",".join(map(str, GITLAB_MINIMUM_PYTHON_VERSION)))
+
+
+'''
+USER API
+'''
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users", method="get")
+def resp_find_user(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('[{"id": 1, "username": "john_smith", "name": "John Smith", "state": "active",'
+ '"avatar_url": "http://localhost:3000/uploads/user/avatar/1/cd8.jpeg",'
+ '"web_url": "http://localhost:3000/john_smith"}, {"id": 2,'
+ '"username": "jack_smith", "name": "Jack Smith", "state": "blocked",'
+ '"avatar_url": "http://gravatar.com/../e32131cd8.jpeg",'
+ '"web_url": "http://localhost:3000/jack_smith"}]')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users", method="post")
+def resp_create_user(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{"id": 1, "username": "john_smith", "name": "John Smith", "state": "active",'
+ '"avatar_url": "http://localhost:3000/uploads/user/avatar/1/cd8.jpeg",'
+ '"web_url": "http://localhost:3000/john_smith","created_at": "2012-05-23T08:00:58Z",'
+ '"bio": null, "location": null, "public_email": "john@example.com", "skype": "",'
+ '"linkedin": "", "twitter": "", "website_url": "", "organization": ""}')
+ content = content.encode("utf-8")
+ return response(201, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1", method="get")
+def resp_get_user(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{"id": 1, "username": "john_smith", "name": "John Smith",'
+ '"state": "active",'
+ '"avatar_url": "http://localhost:3000/uploads/user/avatar/1/cd8.jpeg",'
+ '"web_url": "http://localhost:3000/john_smith",'
+ '"created_at": "2012-05-23T08:00:58Z", "bio": null, "location": null,'
+ '"public_email": "john@example.com", "skype": "", "linkedin": "",'
+ '"twitter": "", "website_url": "", "organization": "", "is_admin": false}')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1", method="get")
+def resp_get_missing_user(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{}')
+ content = content.encode("utf-8")
+ return response(404, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1", method="delete")
+def resp_delete_user(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{}')
+ content = content.encode("utf-8")
+ return response(204, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1", method="delete")
+def resp_delete_missing_user(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{}')
+ content = content.encode("utf-8")
+ return response(404, content, headers, None, 5, request)
+
+
+'''
+USER SSHKEY API
+'''
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1/keys", method="get")
+def resp_get_user_keys(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('[{"id": 1, "title": "Public key",'
+ '"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596'
+ 'k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQa'
+ 'SeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",'
+ '"created_at": "2014-08-01T14:47:39.080Z"},{"id": 3,'
+ '"title": "Another Public key",'
+ '"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596'
+ 'k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQaS'
+ 'eP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",'
+ '"created_at": "2014-08-01T14:47:39.080Z"}]')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1/keys", method="post")
+def resp_create_user_keys(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{"id": 1, "title": "Private key",'
+ '"key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDA1YotVDm2mAyk2tPt4E7AHm01sS6JZmcUdRuSuA5z'
+ 'szUJzYPPUSRAX3BCgTqLqYx//UuVncK7YqLVSbbwjKR2Ez5lISgCnVfLVEXzwhv+xawxKWmI7hJ5S0tOv6MJ+Ixy'
+ 'Ta4xcKwJTwB86z22n9fVOQeJTR2dSOH1WJrf0PvRk+KVNY2jTiGHTi9AIjLnyD/jWRpOgtdfkLRc8EzAWrWlgNmH'
+ '2WOKBw6za0az6XoG75obUdFVdW3qcD0xc809OHLi7FDf+E7U4wiZJCFuUizMeXyuK/SkaE1aee4Qp5R4dxTR4TP9'
+ 'M1XAYkf+kF0W9srZ+mhF069XD/zhUPJsvwEF",'
+ '"created_at": "2014-08-01T14:47:39.080Z"}')
+ content = content.encode("utf-8")
+ return response(201, content, headers, None, 5, request)
+
+
+'''
+GROUP API
+'''
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups", method="get")
+def resp_find_group(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('[{"id": 1, "name": "Foobar Group", "path": "foo-bar",'
+ '"description": "An interesting group", "visibility": "public",'
+ '"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/1/foo.jpg",'
+ '"web_url": "http://localhost:3000/groups/foo-bar", "request_access_enabled": false,'
+ '"full_name": "Foobar Group", "full_path": "foo-bar",'
+ '"file_template_project_id": 1, "parent_id": null, "projects": []}, {"id": 2, "name": "BarFoo Group", "path": "bar-foor",'
+ '"description": "An interesting group", "visibility": "public",'
+ '"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/2/bar.jpg",'
+ '"web_url": "http://localhost:3000/groups/bar-foo", "request_access_enabled": false,'
+ '"full_name": "BarFoo Group", "full_path": "bar-foo",'
+ '"file_template_project_id": 1, "parent_id": null, "projects": []}]')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1", method="get")
+def resp_get_group(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{"id": 1, "name": "Foobar Group", "path": "foo-bar",'
+ '"description": "An interesting group", "visibility": "public",'
+ '"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/1/foo.jpg",'
+ '"web_url": "http://localhost:3000/groups/foo-bar", "request_access_enabled": false,'
+ '"full_name": "Foobar Group", "full_path": "foo-bar",'
+ '"file_template_project_id": 1, "parent_id": null, "projects": [{"id": 1,"description": null, "default_branch": "master",'
+ '"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",'
+ '"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",'
+ '"web_url": "http://example.com/diaspora/diaspora-client",'
+ '"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",'
+ '"tag_list": ["example","disapora client"],"name": "Diaspora Client",'
+ '"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",'
+ '"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",'
+ '"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,'
+ '"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",'
+ '"star_count": 0}]}')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1", method="get")
+def resp_get_missing_group(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{}')
+ content = content.encode("utf-8")
+ return response(404, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups", method="post")
+def resp_create_group(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{"id": 1, "name": "Foobar Group", "path": "foo-bar",'
+ '"description": "An interesting group", "visibility": "public",'
+ '"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/1/foo.jpg",'
+ '"web_url": "http://localhost:3000/groups/foo-bar", "request_access_enabled": false,'
+ '"full_name": "Foobar Group", "full_path": "foo-bar",'
+ '"file_template_project_id": 1, "parent_id": null}')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups", method="post")
+def resp_create_subgroup(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{"id": 2, "name": "BarFoo Group", "path": "bar-foor",'
+ '"description": "An interesting group", "visibility": "public",'
+ '"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/2/bar.jpg",'
+ '"web_url": "http://localhost:3000/groups/foo-bar/bar-foo", "request_access_enabled": false,'
+ '"full_name": "BarFoo Group", "full_path": "foo-bar/bar-foo",'
+ '"file_template_project_id": 1, "parent_id": 1}')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1", method="delete")
+def resp_delete_group(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{}')
+ content = content.encode("utf-8")
+ return response(204, content, headers, None, 5, request)
+
+
+'''
+GROUP MEMBER API
+'''
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/members/1", method="get")
+def resp_get_member(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{"id": 1, "username": "raymond_smith", "name": "Raymond Smith", "state": "active",'
+ '"avatar_url": "https://www.gravatar.com/avatar/c2525a7f58ae3776070e44c106c48e15?s=80&d=identicon",'
+ '"web_url": "http://192.168.1.8:3000/root", "expires_at": "2012-10-22T14:13:35Z", "access_level": 30}')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/members", method="get")
+def resp_find_member(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('[{"id": 1, "username": "raymond_smith", "name": "Raymond Smith", "state": "active",'
+ '"avatar_url": "https://www.gravatar.com/avatar/c2525a7f58ae3776070e44c106c48e15?s=80&d=identicon",'
+ '"web_url": "http://192.168.1.8:3000/root", "expires_at": "2012-10-22T14:13:35Z", "access_level": 30},{'
+ '"id": 2, "username": "john_doe", "name": "John Doe","state": "active",'
+ '"avatar_url": "https://www.gravatar.com/avatar/c2525a7f58ae3776070e44c106c48e15?s=80&d=identicon",'
+ '"web_url": "http://192.168.1.8:3000/root","expires_at": "2012-10-22T14:13:35Z",'
+ '"access_level": 30}]')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/members", method="post")
+def resp_add_member(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{"id": 1, "username": "raymond_smith", "name": "Raymond Smith",'
+ '"state": "active",'
+ '"avatar_url": "https://www.gravatar.com/avatar/c2525a7f58ae3776070e44c106c48e15?s=80&d=identicon",'
+ '"web_url": "http://192.168.1.8:3000/root", "expires_at": "2012-10-22T14:13:35Z",'
+ '"access_level": 30}')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/members/1", method="put")
+def resp_update_member(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{"id": 1, "username": "raymond_smith", "name": "Raymond Smith",'
+ '"state": "active",'
+ '"avatar_url": "https://www.gravatar.com/avatar/c2525a7f58ae3776070e44c106c48e15?s=80&d=identicon",'
+ '"web_url": "http://192.168.1.8:3000/root", "expires_at": "2012-10-22T14:13:35Z",'
+ '"access_level": 10}')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+'''
+DEPLOY KEY API
+'''
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/deploy_keys", method="get")
+def resp_find_project_deploy_key(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('[{"id": 1,"title": "Public key",'
+ '"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxc'
+ 'KDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",'
+ '"created_at": "2013-10-02T10:12:29Z"},{"id": 3,"title": "Another Public key",'
+ '"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxc'
+ 'KDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",'
+ '"created_at": "2013-10-02T11:12:29Z"}]')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/deploy_keys/1", method="get")
+def resp_get_project_deploy_key(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{"id": 1,"title": "Public key",'
+ '"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxc'
+ 'KDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",'
+ '"created_at": "2013-10-02T10:12:29Z"}')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/deploy_keys", method="post")
+def resp_create_project_deploy_key(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{"id": 1,"title": "Public key",'
+ '"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxc'
+ 'KDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",'
+ '"created_at": "2013-10-02T10:12:29Z"}')
+ content = content.encode("utf-8")
+ return response(201, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/deploy_keys/1", method="delete")
+def resp_delete_project_deploy_key(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{}')
+ content = content.encode("utf-8")
+ return response(204, content, headers, None, 5, request)
+
+
+'''
+PROJECT API
+'''
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects", method="get")
+def resp_find_project(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('[{"id": 1,"description": null, "default_branch": "master", "merge_method": "merge",'
+ '"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",'
+ '"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",'
+ '"web_url": "http://example.com/diaspora/diaspora-client",'
+ '"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",'
+ '"tag_list": ["example","disapora client"],"name": "Diaspora Client",'
+ '"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",'
+ '"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",'
+ '"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,'
+ '"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",'
+ '"star_count": 0}]')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1", method="get")
+def resp_get_project(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{"id": 1,"description": null, "default_branch": "master", "merge_method": "merge",'
+ '"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",'
+ '"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",'
+ '"web_url": "http://example.com/diaspora/diaspora-client",'
+ '"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",'
+ '"tag_list": ["example","disapora client"],"name": "Diaspora Client",'
+ '"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",'
+ '"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",'
+ '"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,'
+ '"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",'
+ '"star_count": 0}')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/foo-bar%2Fdiaspora-client", method="get")
+def resp_get_project_by_name(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{"id": 1,"description": null, "default_branch": "master", "merge_method": "merge",'
+ '"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",'
+ '"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",'
+ '"web_url": "http://example.com/diaspora/diaspora-client",'
+ '"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",'
+ '"tag_list": ["example","disapora client"],"name": "Diaspora Client",'
+ '"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",'
+ '"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",'
+ '"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,'
+ '"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",'
+ '"star_count": 0}')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/projects", method="get")
+def resp_find_group_project(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('[{"id": 1,"description": null, "default_branch": "master", "merge_method": "merge",'
+ '"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",'
+ '"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",'
+ '"web_url": "http://example.com/diaspora/diaspora-client",'
+ '"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",'
+ '"tag_list": ["example","disapora client"],"name": "Diaspora Client",'
+ '"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",'
+ '"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",'
+ '"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,'
+ '"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",'
+ '"star_count": 0}]')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/projects/1", method="get")
+def resp_get_group_project(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{"id": 1,"description": null, "default_branch": "master", "merge_method": "merge",'
+ '"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",'
+ '"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",'
+ '"web_url": "http://example.com/diaspora/diaspora-client",'
+ '"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",'
+ '"tag_list": ["example","disapora client"],"name": "Diaspora Client",'
+ '"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",'
+ '"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",'
+ '"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,'
+ '"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",'
+ '"star_count": 0}')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects", method="post")
+def resp_create_project(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{"id": 1,"description": null, "default_branch": "master", "merge_method": "merge",'
+ '"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",'
+ '"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",'
+ '"web_url": "http://example.com/diaspora/diaspora-client",'
+ '"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",'
+ '"tag_list": ["example","disapora client"],"name": "Diaspora Client",'
+ '"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",'
+ '"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",'
+ '"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,'
+ '"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",'
+ '"star_count": 0}')
+ content = content.encode("utf-8")
+ return response(201, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1", method="delete")
+def resp_delete_project(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{}')
+ content = content.encode("utf-8")
+
+ return response(204, content, headers, None, 5, request)
+
+
+'''
+HOOK API
+'''
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/hooks", method="get")
+def resp_find_project_hook(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('[{"id": 1,"url": "http://example.com/hook","project_id": 3,'
+ '"push_events": true,"push_events_branch_filter": "","issues_events": true,'
+ '"confidential_issues_events": true,"merge_requests_events": true,'
+ '"tag_push_events": true,"note_events": true,"job_events": true,'
+ '"pipeline_events": true,"wiki_page_events": true,"enable_ssl_verification": true,'
+ '"created_at": "2012-10-12T17:04:47Z"}]')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/hooks/1", method="get")
+def resp_get_project_hook(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{"id": 1,"url": "http://example.com/hook","project_id": 3,'
+ '"push_events": true,"push_events_branch_filter": "","issues_events": true,'
+ '"confidential_issues_events": true,"merge_requests_events": true,'
+ '"tag_push_events": true,"note_events": true,"job_events": true,'
+ '"pipeline_events": true,"wiki_page_events": true,"enable_ssl_verification": true,'
+ '"created_at": "2012-10-12T17:04:47Z"}')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/hooks", method="post")
+def resp_create_project_hook(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{"id": 1,"url": "http://example.com/hook","project_id": 3,'
+ '"push_events": true,"push_events_branch_filter": "","issues_events": true,'
+ '"confidential_issues_events": true,"merge_requests_events": true,'
+ '"tag_push_events": true,"note_events": true,"job_events": true,'
+ '"pipeline_events": true,"wiki_page_events": true,"enable_ssl_verification": true,'
+ '"created_at": "2012-10-12T17:04:47Z"}')
+ content = content.encode("utf-8")
+ return response(201, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/hooks/1", method="delete")
+def resp_delete_project_hook(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{}')
+ content = content.encode("utf-8")
+ return response(204, content, headers, None, 5, request)
+
+
+'''
+RUNNER API
+'''
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/runners/all", method="get")
+def resp_find_runners_all(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('[{"active": true,"description": "test-1-20150125","id": 1,'
+ '"is_shared": false,"ip_address": "127.0.0.1","name": null,'
+ '"online": true,"status": "online"},{"active": true,'
+ '"description": "test-2-20150125","id": 2,"ip_address": "127.0.0.1",'
+ '"is_shared": false,"name": null,"online": false,"status": "offline"}]')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/runners", method="get")
+def resp_find_runners_list(url, request):
+ headers = {'content-type': 'application/json',
+ "X-Page": 1,
+ "X-Next-Page": 2,
+ "X-Per-Page": 1,
+ "X-Total-Pages": 1,
+ "X-Total": 2}
+ content = ('[{"active": true,"description": "test-1-20150125","id": 1,'
+ '"is_shared": false,"ip_address": "127.0.0.1","name": null,'
+ '"online": true,"status": "online"},{"active": true,'
+ '"description": "test-2-20150125","id": 2,"ip_address": "127.0.0.1",'
+ '"is_shared": false,"name": null,"online": false,"status": "offline"}]')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/runners/1", method="get")
+def resp_get_runner(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{"active": true,"description": "test-1-20150125","id": 1,'
+ '"is_shared": false,"ip_address": "127.0.0.1","name": null,'
+ '"online": true,"status": "online"}')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/runners", method="post")
+def resp_create_runner(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{"active": true,"description": "test-1-20150125","id": 1,'
+ '"is_shared": false,"ip_address": "127.0.0.1","name": null,'
+ '"online": true,"status": "online"}')
+ content = content.encode("utf-8")
+ return response(201, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/runners/1", method="delete")
+def resp_delete_runner(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{}')
+ content = content.encode("utf-8")
+ return response(204, content, headers, None, 5, request)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_deploy_key.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_deploy_key.py
new file mode 100644
index 00000000..7647644d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_deploy_key.py
@@ -0,0 +1,108 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible_collections.community.general.plugins.modules.source_control.gitlab.gitlab_deploy_key import GitLabDeployKey
+
+
+def _dummy(x):
+ """Dummy function. Only used as a placeholder for toplevel definitions when the test is going
+ to be skipped anyway"""
+ return x
+
+
+pytestmark = []
+try:
+ from .gitlab import (GitlabModuleTestCase,
+ python_version_match_requirement,
+ resp_get_project, resp_find_project_deploy_key,
+ resp_create_project_deploy_key, resp_delete_project_deploy_key)
+
+ # GitLab module requirements
+ if python_version_match_requirement():
+ from gitlab.v4.objects import ProjectKey
+except ImportError:
+ pytestmark.append(pytest.mark.skip("Could not load gitlab module required for testing"))
+ # Need to set these to something so that we don't fail when parsing
+ GitlabModuleTestCase = object
+ resp_get_project = _dummy
+ resp_find_project_deploy_key = _dummy
+ resp_create_project_deploy_key = _dummy
+ resp_delete_project_deploy_key = _dummy
+
+# Unit tests requirements
+try:
+ from httmock import with_httmock # noqa
+except ImportError:
+ pytestmark.append(pytest.mark.skip("Could not load httmock module required for testing"))
+ with_httmock = _dummy
+
+
+class TestGitlabDeployKey(GitlabModuleTestCase):
+ def setUp(self):
+ super(TestGitlabDeployKey, self).setUp()
+
+ self.moduleUtil = GitLabDeployKey(module=self.mock_module, gitlab_instance=self.gitlab_instance)
+
+ @with_httmock(resp_get_project)
+ @with_httmock(resp_find_project_deploy_key)
+ def test_deploy_key_exist(self):
+ project = self.gitlab_instance.projects.get(1)
+
+ rvalue = self.moduleUtil.existsDeployKey(project, "Public key")
+
+ self.assertEqual(rvalue, True)
+
+ rvalue = self.moduleUtil.existsDeployKey(project, "Private key")
+
+ self.assertEqual(rvalue, False)
+
+ @with_httmock(resp_get_project)
+ @with_httmock(resp_create_project_deploy_key)
+ def test_create_deploy_key(self):
+ project = self.gitlab_instance.projects.get(1)
+
+ deploy_key = self.moduleUtil.createDeployKey(project, {"title": "Public key",
+ "key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM"
+ "4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxc"
+ "KDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfD"
+ "zpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0="})
+
+ self.assertEqual(type(deploy_key), ProjectKey)
+ self.assertEqual(deploy_key.title, "Public key")
+
+ @with_httmock(resp_get_project)
+ @with_httmock(resp_find_project_deploy_key)
+ @with_httmock(resp_create_project_deploy_key)
+ def test_update_deploy_key(self):
+ project = self.gitlab_instance.projects.get(1)
+ deployKey = self.moduleUtil.findDeployKey(project, "Public key")
+
+ changed, newDeploy_key = self.moduleUtil.updateDeployKey(deployKey, {"title": "Private key"})
+
+ self.assertEqual(changed, True)
+ self.assertEqual(type(newDeploy_key), ProjectKey)
+ self.assertEqual(newDeploy_key.title, "Private key")
+
+ changed, newDeploy_key = self.moduleUtil.updateDeployKey(deployKey, {"title": "Private key"})
+
+ self.assertEqual(changed, False)
+ self.assertEqual(newDeploy_key.title, "Private key")
+
+ @with_httmock(resp_get_project)
+ @with_httmock(resp_find_project_deploy_key)
+ @with_httmock(resp_delete_project_deploy_key)
+ def test_delete_deploy_key(self):
+ project = self.gitlab_instance.projects.get(1)
+
+ self.moduleUtil.existsDeployKey(project, "Public key")
+
+ rvalue = self.moduleUtil.deleteDeployKey()
+
+ self.assertEqual(rvalue, None)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_group.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_group.py
new file mode 100644
index 00000000..abf49860
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_group.py
@@ -0,0 +1,112 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible_collections.community.general.plugins.modules.source_control.gitlab.gitlab_group import GitLabGroup
+
+
+def _dummy(x):
+ """Dummy function. Only used as a placeholder for toplevel definitions when the test is going
+ to be skipped anyway"""
+ return x
+
+
+pytestmark = []
+try:
+ from .gitlab import (GitlabModuleTestCase,
+ python_version_match_requirement,
+ resp_get_group, resp_get_missing_group, resp_create_group,
+ resp_create_subgroup, resp_delete_group, resp_find_group_project)
+
+ # GitLab module requirements
+ if python_version_match_requirement():
+ from gitlab.v4.objects import Group
+except ImportError:
+ pytestmark.append(pytest.mark.skip("Could not load gitlab module required for testing"))
+ # Need to set these to something so that we don't fail when parsing
+ GitlabModuleTestCase = object
+ resp_get_group = _dummy
+ resp_get_missing_group = _dummy
+ resp_create_group = _dummy
+ resp_create_subgroup = _dummy
+ resp_delete_group = _dummy
+ resp_find_group_project = _dummy
+
+# Unit tests requirements
+try:
+ from httmock import with_httmock # noqa
+except ImportError:
+ pytestmark.append(pytest.mark.skip("Could not load httmock module required for testing"))
+ with_httmock = _dummy
+
+
+class TestGitlabGroup(GitlabModuleTestCase):
+ def setUp(self):
+ super(TestGitlabGroup, self).setUp()
+
+ self.moduleUtil = GitLabGroup(module=self.mock_module, gitlab_instance=self.gitlab_instance)
+
+ @with_httmock(resp_get_group)
+ def test_exist_group(self):
+ rvalue = self.moduleUtil.existsGroup(1)
+
+ self.assertEqual(rvalue, True)
+
+ @with_httmock(resp_get_missing_group)
+ def test_exist_group(self):
+ rvalue = self.moduleUtil.existsGroup(1)
+
+ self.assertEqual(rvalue, False)
+
+ @with_httmock(resp_create_group)
+ def test_create_group(self):
+ group = self.moduleUtil.createGroup({'name': "Foobar Group",
+ 'path': "foo-bar",
+ 'description': "An interesting group"})
+
+ self.assertEqual(type(group), Group)
+ self.assertEqual(group.name, "Foobar Group")
+ self.assertEqual(group.path, "foo-bar")
+ self.assertEqual(group.description, "An interesting group")
+ self.assertEqual(group.id, 1)
+
+ @with_httmock(resp_create_subgroup)
+ def test_create_subgroup(self):
+ group = self.moduleUtil.createGroup({'name': "BarFoo Group", 'path': "bar-foo", "parent_id": 1})
+
+ self.assertEqual(type(group), Group)
+ self.assertEqual(group.name, "BarFoo Group")
+ self.assertEqual(group.full_path, "foo-bar/bar-foo")
+ self.assertEqual(group.id, 2)
+ self.assertEqual(group.parent_id, 1)
+
+ @with_httmock(resp_get_group)
+ def test_update_group(self):
+ group = self.gitlab_instance.groups.get(1)
+ changed, newGroup = self.moduleUtil.updateGroup(group, {'name': "BarFoo Group", "visibility": "private"})
+
+ self.assertEqual(changed, True)
+ self.assertEqual(newGroup.name, "BarFoo Group")
+ self.assertEqual(newGroup.visibility, "private")
+
+ changed, newGroup = self.moduleUtil.updateGroup(group, {'name': "BarFoo Group"})
+
+ self.assertEqual(changed, False)
+
+ @with_httmock(resp_get_group)
+ @with_httmock(resp_find_group_project)
+ @with_httmock(resp_delete_group)
+ def test_delete_group(self):
+ self.moduleUtil.existsGroup(1)
+
+ print(self.moduleUtil.groupObject.projects)
+
+ rvalue = self.moduleUtil.deleteGroup()
+
+ self.assertEqual(rvalue, None)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_hook.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_hook.py
new file mode 100644
index 00000000..740f91ba
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_hook.py
@@ -0,0 +1,103 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible_collections.community.general.plugins.modules.source_control.gitlab.gitlab_hook import GitLabHook
+
+
+def _dummy(x):
+ """Dummy function. Only used as a placeholder for toplevel definitions when the test is going
+ to be skipped anyway"""
+ return x
+
+
+pytestmark = []
+try:
+ from .gitlab import (GitlabModuleTestCase,
+ python_version_match_requirement,
+ resp_get_project, resp_find_project_hook,
+ resp_create_project_hook, resp_delete_project_hook)
+
+ # GitLab module requirements
+ if python_version_match_requirement():
+ from gitlab.v4.objects import ProjectHook
+except ImportError:
+ pytestmark.append(pytest.mark.skip("Could not load gitlab module required for testing"))
+ # Need to set these to something so that we don't fail when parsing
+ GitlabModuleTestCase = object
+ resp_get_project = _dummy
+ resp_find_project_hook = _dummy
+ resp_create_project_hook = _dummy
+ resp_delete_project_hook = _dummy
+
+# Unit tests requirements
+try:
+ from httmock import with_httmock # noqa
+except ImportError:
+ pytestmark.append(pytest.mark.skip("Could not load httmock module required for testing"))
+ with_httmock = _dummy
+
+
+class TestGitlabHook(GitlabModuleTestCase):
+ def setUp(self):
+ super(TestGitlabHook, self).setUp()
+
+ self.moduleUtil = GitLabHook(module=self.mock_module, gitlab_instance=self.gitlab_instance)
+
+ @with_httmock(resp_get_project)
+ @with_httmock(resp_find_project_hook)
+ def test_hook_exist(self):
+ project = self.gitlab_instance.projects.get(1)
+
+ rvalue = self.moduleUtil.existsHook(project, "http://example.com/hook")
+
+ self.assertEqual(rvalue, True)
+
+ rvalue = self.moduleUtil.existsHook(project, "http://gitlab.com/hook")
+
+ self.assertEqual(rvalue, False)
+
+ @with_httmock(resp_get_project)
+ @with_httmock(resp_create_project_hook)
+ def test_create_hook(self):
+ project = self.gitlab_instance.projects.get(1)
+
+ hook = self.moduleUtil.createHook(project, {"url": "http://example.com/hook"})
+
+ self.assertEqual(type(hook), ProjectHook)
+ self.assertEqual(hook.url, "http://example.com/hook")
+
+ @with_httmock(resp_get_project)
+ @with_httmock(resp_find_project_hook)
+ def test_update_hook(self):
+ project = self.gitlab_instance.projects.get(1)
+ hook = self.moduleUtil.findHook(project, "http://example.com/hook")
+
+ changed, newHook = self.moduleUtil.updateHook(hook, {"url": "http://gitlab.com/hook"})
+
+ self.assertEqual(changed, True)
+ self.assertEqual(type(newHook), ProjectHook)
+ self.assertEqual(newHook.url, "http://gitlab.com/hook")
+
+ changed, newHook = self.moduleUtil.updateHook(hook, {"url": "http://gitlab.com/hook"})
+
+ self.assertEqual(changed, False)
+ self.assertEqual(newHook.url, "http://gitlab.com/hook")
+
+ @with_httmock(resp_get_project)
+ @with_httmock(resp_find_project_hook)
+ @with_httmock(resp_delete_project_hook)
+ def test_delete_hook(self):
+ project = self.gitlab_instance.projects.get(1)
+
+ self.moduleUtil.existsHook(project, "http://example.com/hook")
+
+ rvalue = self.moduleUtil.deleteHook()
+
+ self.assertEqual(rvalue, None)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_project.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_project.py
new file mode 100644
index 00000000..dc009c60
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_project.py
@@ -0,0 +1,124 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible_collections.community.general.plugins.modules.source_control.gitlab.gitlab_project import GitLabProject
+
+
+def _dummy(x):
+ """Dummy function. Only used as a placeholder for toplevel definitions when the test is going
+ to be skipped anyway"""
+ return x
+
+
+pytestmark = []
+try:
+ from .gitlab import (GitlabModuleTestCase,
+ python_version_match_requirement,
+ resp_get_group, resp_get_project_by_name, resp_create_project,
+ resp_get_project, resp_delete_project, resp_get_user)
+
+ # GitLab module requirements
+ if python_version_match_requirement():
+ from gitlab.v4.objects import Project
+except ImportError:
+ pytestmark.append(pytest.mark.skip("Could not load gitlab module required for testing"))
+ # Need to set these to something so that we don't fail when parsing
+ GitlabModuleTestCase = object
+ resp_get_group = _dummy
+ resp_get_project_by_name = _dummy
+ resp_create_project = _dummy
+ resp_get_project = _dummy
+ resp_delete_project = _dummy
+ resp_get_user = _dummy
+
+# Unit tests requirements
+try:
+ from httmock import with_httmock # noqa
+except ImportError:
+ pytestmark.append(pytest.mark.skip("Could not load httmock module required for testing"))
+ with_httmock = _dummy
+
+
+class TestGitlabProject(GitlabModuleTestCase):
+ @with_httmock(resp_get_user)
+ def setUp(self):
+ super(TestGitlabProject, self).setUp()
+
+ self.gitlab_instance.user = self.gitlab_instance.users.get(1)
+ self.moduleUtil = GitLabProject(module=self.mock_module, gitlab_instance=self.gitlab_instance)
+
+ @with_httmock(resp_get_group)
+ @with_httmock(resp_get_project_by_name)
+ def test_project_exist(self):
+ group = self.gitlab_instance.groups.get(1)
+
+ rvalue = self.moduleUtil.existsProject(group, "diaspora-client")
+
+ self.assertEqual(rvalue, True)
+
+ rvalue = self.moduleUtil.existsProject(group, "missing-project")
+
+ self.assertEqual(rvalue, False)
+
+ @with_httmock(resp_get_group)
+ @with_httmock(resp_create_project)
+ def test_create_project(self):
+ group = self.gitlab_instance.groups.get(1)
+ project = self.moduleUtil.createProject(group, {"name": "Diaspora Client", "path": "diaspora-client", "namespace_id": group.id})
+
+ self.assertEqual(type(project), Project)
+ self.assertEqual(project.name, "Diaspora Client")
+
+ @with_httmock(resp_get_project)
+ def test_update_project(self):
+ project = self.gitlab_instance.projects.get(1)
+
+ changed, newProject = self.moduleUtil.updateProject(project, {"name": "New Name"})
+
+ self.assertEqual(changed, True)
+ self.assertEqual(type(newProject), Project)
+ self.assertEqual(newProject.name, "New Name")
+
+ changed, newProject = self.moduleUtil.updateProject(project, {"name": "New Name"})
+
+ self.assertEqual(changed, False)
+ self.assertEqual(newProject.name, "New Name")
+
+ @with_httmock(resp_get_project)
+ def test_update_project_merge_method(self):
+ project = self.gitlab_instance.projects.get(1)
+
+ # merge_method should be 'merge' by default
+ self.assertEqual(project.merge_method, "merge")
+
+ changed, newProject = self.moduleUtil.updateProject(project, {"name": "New Name", "merge_method": "rebase_merge"})
+
+ self.assertEqual(changed, True)
+ self.assertEqual(type(newProject), Project)
+ self.assertEqual(newProject.name, "New Name")
+ self.assertEqual(newProject.merge_method, "rebase_merge")
+
+ changed, newProject = self.moduleUtil.updateProject(project, {"name": "New Name", "merge_method": "rebase_merge"})
+
+ self.assertEqual(changed, False)
+ self.assertEqual(newProject.name, "New Name")
+ self.assertEqual(newProject.merge_method, "rebase_merge")
+
+ @with_httmock(resp_get_group)
+ @with_httmock(resp_get_project_by_name)
+ @with_httmock(resp_delete_project)
+ def test_delete_project(self):
+ group = self.gitlab_instance.groups.get(1)
+
+ self.moduleUtil.existsProject(group, "diaspora-client")
+
+ rvalue = self.moduleUtil.deleteProject()
+
+ self.assertEqual(rvalue, None)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_runner.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_runner.py
new file mode 100644
index 00000000..6af406f4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_runner.py
@@ -0,0 +1,95 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible_collections.community.general.plugins.modules.source_control.gitlab.gitlab_runner import GitLabRunner
+
+
+def _dummy(x):
+ """Dummy function. Only used as a placeholder for toplevel definitions when the test is going
+ to be skipped anyway"""
+ return x
+
+
+pytestmark = []
+try:
+ from .gitlab import (GitlabModuleTestCase,
+ python_version_match_requirement,
+ resp_find_runners_list, resp_get_runner,
+ resp_create_runner, resp_delete_runner)
+
+ # GitLab module requirements
+ if python_version_match_requirement():
+ from gitlab.v4.objects import Runner
+except ImportError:
+ pytestmark.append(pytest.mark.skip("Could not load gitlab module required for testing"))
+ # Need to set these to something so that we don't fail when parsing
+ GitlabModuleTestCase = object
+ resp_find_runners_list = _dummy
+ resp_get_runner = _dummy
+ resp_create_runner = _dummy
+ resp_delete_runner = _dummy
+
+# Unit tests requirements
+try:
+ from httmock import with_httmock # noqa
+except ImportError:
+ pytestmark.append(pytest.mark.skip("Could not load httmock module required for testing"))
+ with_httmock = _dummy
+
+
+class TestGitlabRunner(GitlabModuleTestCase):
+ def setUp(self):
+ super(TestGitlabRunner, self).setUp()
+
+ self.moduleUtil = GitLabRunner(module=self.mock_module, gitlab_instance=self.gitlab_instance)
+
+ @with_httmock(resp_find_runners_list)
+ @with_httmock(resp_get_runner)
+ def test_runner_exist(self):
+ rvalue = self.moduleUtil.existsRunner("test-1-20150125")
+
+ self.assertEqual(rvalue, True)
+
+ rvalue = self.moduleUtil.existsRunner("test-3-00000000")
+
+ self.assertEqual(rvalue, False)
+
+ @with_httmock(resp_create_runner)
+ def test_create_runner(self):
+ runner = self.moduleUtil.createRunner({"token": "token", "description": "test-1-20150125"})
+
+ self.assertEqual(type(runner), Runner)
+ self.assertEqual(runner.description, "test-1-20150125")
+
+ @with_httmock(resp_find_runners_list)
+ @with_httmock(resp_get_runner)
+ def test_update_runner(self):
+ runner = self.moduleUtil.findRunner("test-1-20150125")
+
+ changed, newRunner = self.moduleUtil.updateRunner(runner, {"description": "Runner description"})
+
+ self.assertEqual(changed, True)
+ self.assertEqual(type(newRunner), Runner)
+ self.assertEqual(newRunner.description, "Runner description")
+
+ changed, newRunner = self.moduleUtil.updateRunner(runner, {"description": "Runner description"})
+
+ self.assertEqual(changed, False)
+ self.assertEqual(newRunner.description, "Runner description")
+
+ @with_httmock(resp_find_runners_list)
+ @with_httmock(resp_get_runner)
+ @with_httmock(resp_delete_runner)
+ def test_delete_runner(self):
+ self.moduleUtil.existsRunner("test-1-20150125")
+
+ rvalue = self.moduleUtil.deleteRunner()
+
+ self.assertEqual(rvalue, None)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_user.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_user.py
new file mode 100644
index 00000000..4a47654a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/source_control/gitlab/test_gitlab_user.py
@@ -0,0 +1,181 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible_collections.community.general.plugins.modules.source_control.gitlab.gitlab_user import GitLabUser
+
+
+def _dummy(x):
+ """Dummy function. Only used as a placeholder for toplevel definitions when the test is going
+ to be skipped anyway"""
+ return x
+
+
+pytestmark = []
+try:
+ from .gitlab import (GitlabModuleTestCase,
+ python_version_match_requirement,
+ resp_find_user, resp_get_user, resp_get_user_keys,
+ resp_create_user_keys, resp_create_user, resp_delete_user,
+ resp_get_member, resp_get_group, resp_add_member,
+ resp_update_member, resp_get_member)
+
+ # GitLab module requirements
+ if python_version_match_requirement():
+ from gitlab.v4.objects import User
+except ImportError:
+ pytestmark.append(pytest.mark.skip("Could not load gitlab module required for testing"))
+ # Need to set these to something so that we don't fail when parsing
+ GitlabModuleTestCase = object
+ resp_find_user = _dummy
+ resp_get_user = _dummy
+ resp_get_user_keys = _dummy
+ resp_create_user_keys = _dummy
+ resp_create_user = _dummy
+ resp_delete_user = _dummy
+ resp_get_member = _dummy
+ resp_get_group = _dummy
+ resp_add_member = _dummy
+ resp_update_member = _dummy
+ resp_get_member = _dummy
+
+# Unit tests requirements
+try:
+ from httmock import with_httmock # noqa
+except ImportError:
+ pytestmark.append(pytest.mark.skip("Could not load httmock module required for testing"))
+ with_httmock = _dummy
+
+
+class TestGitlabUser(GitlabModuleTestCase):
+ def setUp(self):
+ super(TestGitlabUser, self).setUp()
+
+ self.moduleUtil = GitLabUser(module=self.mock_module, gitlab_instance=self.gitlab_instance)
+
+ @with_httmock(resp_find_user)
+ def test_exist_user(self):
+ rvalue = self.moduleUtil.existsUser("john_smith")
+
+ self.assertEqual(rvalue, True)
+
+ rvalue = self.moduleUtil.existsUser("paul_smith")
+
+ self.assertEqual(rvalue, False)
+
+ @with_httmock(resp_find_user)
+ def test_find_user(self):
+ user = self.moduleUtil.findUser("john_smith")
+
+ self.assertEqual(type(user), User)
+ self.assertEqual(user.name, "John Smith")
+ self.assertEqual(user.id, 1)
+
+ @with_httmock(resp_create_user)
+ def test_create_user(self):
+ user = self.moduleUtil.createUser({'email': 'john@example.com', 'password': 's3cur3s3cr3T',
+ 'username': 'john_smith', 'name': 'John Smith'})
+ self.assertEqual(type(user), User)
+ self.assertEqual(user.name, "John Smith")
+ self.assertEqual(user.id, 1)
+
+ @with_httmock(resp_get_user)
+ def test_update_user(self):
+ user = self.gitlab_instance.users.get(1)
+
+ changed, newUser = self.moduleUtil.updateUser(
+ user,
+ {'name': {'value': "Jack Smith"}, "is_admin": {'value': "true", 'setter': 'admin'}}, {}
+ )
+
+ self.assertEqual(changed, True)
+ self.assertEqual(newUser.name, "Jack Smith")
+ self.assertEqual(newUser.admin, "true")
+
+ changed, newUser = self.moduleUtil.updateUser(user, {'name': {'value': "Jack Smith"}}, {})
+
+ self.assertEqual(changed, False)
+
+ changed, newUser = self.moduleUtil.updateUser(
+ user,
+ {}, {
+ 'skip_reconfirmation': {'value': True},
+ 'password': {'value': 'super_secret-super_secret'},
+ }
+ )
+
+ # note: uncheckable parameters dont set changed state
+ self.assertEqual(changed, False)
+ self.assertEqual(newUser.skip_reconfirmation, True)
+ self.assertEqual(newUser.password, 'super_secret-super_secret')
+
+ @with_httmock(resp_find_user)
+ @with_httmock(resp_delete_user)
+ def test_delete_user(self):
+ self.moduleUtil.existsUser("john_smith")
+ rvalue = self.moduleUtil.deleteUser()
+
+ self.assertEqual(rvalue, None)
+
+ @with_httmock(resp_get_user)
+ @with_httmock(resp_get_user_keys)
+ def test_sshkey_exist(self):
+ user = self.gitlab_instance.users.get(1)
+
+ exist = self.moduleUtil.sshKeyExists(user, "Public key")
+ self.assertEqual(exist, True)
+
+ notExist = self.moduleUtil.sshKeyExists(user, "Private key")
+ self.assertEqual(notExist, False)
+
+ @with_httmock(resp_get_user)
+ @with_httmock(resp_create_user_keys)
+ @with_httmock(resp_get_user_keys)
+ def test_create_sshkey(self):
+ user = self.gitlab_instance.users.get(1)
+
+ rvalue = self.moduleUtil.addSshKeyToUser(user, {
+ 'name': "Public key",
+ 'file': "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJe"
+ "jgt4596k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4"
+ "soW6NUlfDzpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0="})
+ self.assertEqual(rvalue, False)
+
+ rvalue = self.moduleUtil.addSshKeyToUser(user, {
+ 'name': "Private key",
+ 'file': "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDA1YotVDm2mAyk2tPt4E7AHm01sS6JZmcU"
+ "dRuSuA5zszUJzYPPUSRAX3BCgTqLqYx//UuVncK7YqLVSbbwjKR2Ez5lISgCnVfLVEXzwhv+"
+ "xawxKWmI7hJ5S0tOv6MJ+IxyTa4xcKwJTwB86z22n9fVOQeJTR2dSOH1WJrf0PvRk+KVNY2j"
+ "TiGHTi9AIjLnyD/jWRpOgtdfkLRc8EzAWrWlgNmH2WOKBw6za0az6XoG75obUdFVdW3qcD0x"
+ "c809OHLi7FDf+E7U4wiZJCFuUizMeXyuK/SkaE1aee4Qp5R4dxTR4TP9M1XAYkf+kF0W9srZ+mhF069XD/zhUPJsvwEF"})
+ self.assertEqual(rvalue, True)
+
+ @with_httmock(resp_get_group)
+ @with_httmock(resp_get_member)
+ def test_find_member(self):
+ group = self.gitlab_instance.groups.get(1)
+
+ user = self.moduleUtil.findMember(group, 1)
+ self.assertEqual(user.username, "raymond_smith")
+
+ @with_httmock(resp_get_user)
+ @with_httmock(resp_get_group)
+ @with_httmock(resp_get_group)
+ @with_httmock(resp_get_member)
+ @with_httmock(resp_add_member)
+ @with_httmock(resp_update_member)
+ def test_assign_user_to_group(self):
+ group = self.gitlab_instance.groups.get(1)
+ user = self.gitlab_instance.users.get(1)
+
+ rvalue = self.moduleUtil.assignUserToGroup(user, group.id, "developer")
+ self.assertEqual(rvalue, False)
+
+ rvalue = self.moduleUtil.assignUserToGroup(user, group.id, "guest")
+ self.assertEqual(rvalue, True)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/storage/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/storage/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/storage/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/storage/hpe3par/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/storage/hpe3par/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/storage/hpe3par/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/storage/hpe3par/test_ss_3par_cpg.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/storage/hpe3par/test_ss_3par_cpg.py
new file mode 100644
index 00000000..8e32e150
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/storage/hpe3par/test_ss_3par_cpg.py
@@ -0,0 +1,247 @@
+# Copyright: (c) 2018, Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import mock
+import sys
+sys.modules['hpe3par_sdk'] = mock.Mock()
+sys.modules['hpe3par_sdk.client'] = mock.Mock()
+sys.modules['hpe3parclient'] = mock.Mock()
+sys.modules['hpe3parclient.exceptions'] = mock.Mock()
+from ansible_collections.community.general.plugins.modules.storage.hpe3par import ss_3par_cpg
+from ansible_collections.community.general.plugins.module_utils.storage.hpe3par import hpe3par
+
+
+@mock.patch('ansible_collections.community.general.plugins.modules.storage.hpe3par.ss_3par_cpg.client')
+@mock.patch('ansible_collections.community.general.plugins.modules.storage.hpe3par.ss_3par_cpg.AnsibleModule')
+@mock.patch('ansible_collections.community.general.plugins.modules.storage.hpe3par.ss_3par_cpg.create_cpg')
+def test_module_args(mock_create_cpg, mock_module, mock_client):
+ """
+ hpe3par CPG - test module arguments
+ """
+
+ PARAMS_FOR_PRESENT = {
+ 'storage_system_ip': '192.168.0.1',
+ 'storage_system_username': 'USER',
+ 'storage_system_password': 'PASS',
+ 'cpg_name': 'test_cpg',
+ 'domain': 'test_domain',
+ 'growth_increment': 32768,
+ 'growth_increment_unit': 'MiB',
+ 'growth_limit': 32768,
+ 'growth_limit_unit': 'MiB',
+ 'growth_warning': 32768,
+ 'growth_warning_unit': 'MiB',
+ 'raid_type': 'R6',
+ 'set_size': 8,
+ 'high_availability': 'MAG',
+ 'disk_type': 'FC',
+ 'state': 'present',
+ 'secure': False
+ }
+ mock_module.params = PARAMS_FOR_PRESENT
+ mock_module.return_value = mock_module
+ mock_client.HPE3ParClient.login.return_value = True
+ mock_create_cpg.return_value = (True, True, "Created CPG successfully.")
+ ss_3par_cpg.main()
+ mock_module.assert_called_with(
+ argument_spec=hpe3par.cpg_argument_spec(),
+ required_together=[['raid_type', 'set_size']])
+
+
+@mock.patch('ansible_collections.community.general.plugins.modules.storage.hpe3par.ss_3par_cpg.client')
+@mock.patch('ansible_collections.community.general.plugins.modules.storage.hpe3par.ss_3par_cpg.AnsibleModule')
+@mock.patch('ansible_collections.community.general.plugins.modules.storage.hpe3par.ss_3par_cpg.create_cpg')
+def test_main_exit_functionality_present_success_without_issue_attr_dict(mock_create_cpg, mock_module, mock_client):
+ """
+ hpe3par flash cache - success check
+ """
+ PARAMS_FOR_PRESENT = {
+ 'storage_system_ip': '192.168.0.1',
+ 'storage_system_name': '3PAR',
+ 'storage_system_username': 'USER',
+ 'storage_system_password': 'PASS',
+ 'cpg_name': 'test_cpg',
+ 'domain': 'test_domain',
+ 'growth_increment': 32768,
+ 'growth_increment_unit': 'MiB',
+ 'growth_limit': 32768,
+ 'growth_limit_unit': 'MiB',
+ 'growth_warning': 32768,
+ 'growth_warning_unit': 'MiB',
+ 'raid_type': 'R6',
+ 'set_size': 8,
+ 'high_availability': 'MAG',
+ 'disk_type': 'FC',
+ 'state': 'present',
+ 'secure': False
+ }
+ # This creates a instance of the AnsibleModule mock.
+ mock_module.params = PARAMS_FOR_PRESENT
+ mock_module.return_value = mock_module
+ instance = mock_module.return_value
+ mock_client.HPE3ParClient.login.return_value = True
+ mock_create_cpg.return_value = (
+ True, True, "Created CPG successfully.")
+ ss_3par_cpg.main()
+ # AnsibleModule.exit_json should be called
+ instance.exit_json.assert_called_with(
+ changed=True, msg="Created CPG successfully.")
+ # AnsibleModule.fail_json should not be called
+ assert instance.fail_json.call_count == 0
+
+
+@mock.patch('ansible_collections.community.general.plugins.modules.storage.hpe3par.ss_3par_cpg.client')
+@mock.patch('ansible_collections.community.general.plugins.modules.storage.hpe3par.ss_3par_cpg.AnsibleModule')
+@mock.patch('ansible_collections.community.general.plugins.modules.storage.hpe3par.ss_3par_cpg.delete_cpg')
+def test_main_exit_functionality_absent_success_without_issue_attr_dict(mock_delete_cpg, mock_module, mock_client):
+ """
+ hpe3par flash cache - success check
+ """
+ PARAMS_FOR_DELETE = {
+ 'storage_system_ip': '192.168.0.1',
+ 'storage_system_name': '3PAR',
+ 'storage_system_username': 'USER',
+ 'storage_system_password': 'PASS',
+ 'cpg_name': 'test_cpg',
+ 'domain': None,
+ 'growth_increment': None,
+ 'growth_increment_unit': None,
+ 'growth_limit': None,
+ 'growth_limit_unit': None,
+ 'growth_warning': None,
+ 'growth_warning_unit': None,
+ 'raid_type': None,
+ 'set_size': None,
+ 'high_availability': None,
+ 'disk_type': None,
+ 'state': 'absent',
+ 'secure': False
+ }
+ # This creates a instance of the AnsibleModule mock.
+ mock_module.params = PARAMS_FOR_DELETE
+ mock_module.return_value = mock_module
+ instance = mock_module.return_value
+ mock_delete_cpg.return_value = (
+ True, True, "Deleted CPG test_cpg successfully.")
+ mock_client.HPE3ParClient.login.return_value = True
+ ss_3par_cpg.main()
+ # AnsibleModule.exit_json should be called
+ instance.exit_json.assert_called_with(
+ changed=True, msg="Deleted CPG test_cpg successfully.")
+ # AnsibleModule.fail_json should not be called
+ assert instance.fail_json.call_count == 0
+
+
+def test_convert_to_binary_multiple():
+ assert hpe3par.convert_to_binary_multiple(None) == -1
+ assert hpe3par.convert_to_binary_multiple('-1.0 MiB') == -1
+ assert hpe3par.convert_to_binary_multiple('-1.0GiB') == -1
+ assert hpe3par.convert_to_binary_multiple('1.0 MiB') == 1
+ assert hpe3par.convert_to_binary_multiple('1.5GiB') == 1.5 * 1024
+ assert hpe3par.convert_to_binary_multiple('1.5 TiB') == 1.5 * 1024 * 1024
+ assert hpe3par.convert_to_binary_multiple(' 1.5 TiB ') == 1.5 * 1024 * 1024
+
+
+@mock.patch('ansible_collections.community.general.plugins.modules.storage.hpe3par.ss_3par_cpg.client')
+def test_validate_set_size(mock_client):
+ mock_client.HPE3ParClient.RAID_MAP = {'R0': {'raid_value': 1, 'set_sizes': [1]},
+ 'R1': {'raid_value': 2, 'set_sizes': [2, 3, 4]},
+ 'R5': {'raid_value': 3, 'set_sizes': [3, 4, 5, 6, 7, 8, 9]},
+ 'R6': {'raid_value': 4, 'set_sizes': [6, 8, 10, 12, 16]}
+ }
+ raid_type = 'R0'
+ set_size = 1
+ assert ss_3par_cpg.validate_set_size(raid_type, set_size)
+
+ set_size = 2
+ assert not ss_3par_cpg.validate_set_size(raid_type, set_size)
+
+ raid_type = None
+ assert not ss_3par_cpg.validate_set_size(raid_type, set_size)
+
+
+@mock.patch('ansible_collections.community.general.plugins.modules.storage.hpe3par.ss_3par_cpg.client')
+def test_cpg_ldlayout_map(mock_client):
+ mock_client.HPE3ParClient.PORT = 1
+ mock_client.HPE3ParClient.RAID_MAP = {'R0': {'raid_value': 1, 'set_sizes': [1]},
+ 'R1': {'raid_value': 2, 'set_sizes': [2, 3, 4]},
+ 'R5': {'raid_value': 3, 'set_sizes': [3, 4, 5, 6, 7, 8, 9]},
+ 'R6': {'raid_value': 4, 'set_sizes': [6, 8, 10, 12, 16]}
+ }
+ ldlayout_dict = {'RAIDType': 'R6', 'HA': 'PORT'}
+ assert ss_3par_cpg.cpg_ldlayout_map(ldlayout_dict) == {
+ 'RAIDType': 4, 'HA': 1}
+
+
+@mock.patch('ansible_collections.community.general.plugins.modules.storage.hpe3par.ss_3par_cpg.client')
+def test_create_cpg(mock_client):
+ ss_3par_cpg.validate_set_size = mock.Mock(return_value=True)
+ ss_3par_cpg.cpg_ldlayout_map = mock.Mock(
+ return_value={'RAIDType': 4, 'HA': 1})
+
+ mock_client.HPE3ParClient.login.return_value = True
+ mock_client.HPE3ParClient.cpgExists.return_value = False
+ mock_client.HPE3ParClient.FC = 1
+ mock_client.HPE3ParClient.createCPG.return_value = True
+
+ assert ss_3par_cpg.create_cpg(mock_client.HPE3ParClient,
+ 'test_cpg',
+ 'test_domain',
+ '32768 MiB',
+ '32768 MiB',
+ '32768 MiB',
+ 'R6',
+ 8,
+ 'MAG',
+ 'FC'
+ ) == (True, True, "Created CPG %s successfully." % 'test_cpg')
+
+ mock_client.HPE3ParClient.cpgExists.return_value = True
+ assert ss_3par_cpg.create_cpg(mock_client.HPE3ParClient,
+ 'test_cpg',
+ 'test_domain',
+ '32768.0 MiB',
+ '32768.0 MiB',
+ '32768.0 MiB',
+ 'R6',
+ 8,
+ 'MAG',
+ 'FC'
+ ) == (True, False, 'CPG already present')
+
+ ss_3par_cpg.validate_set_size = mock.Mock(return_value=False)
+ assert ss_3par_cpg.create_cpg(mock_client.HPE3ParClient,
+ 'test_cpg',
+ 'test_domain',
+ '32768.0 MiB',
+ '32768 MiB',
+ '32768.0 MiB',
+ 'R6',
+ 3,
+ 'MAG',
+ 'FC'
+ ) == (False, False, 'Set size 3 not part of RAID set R6')
+
+
+@mock.patch('ansible_collections.community.general.plugins.modules.storage.hpe3par.ss_3par_cpg.client')
+def test_delete_cpg(mock_client):
+ mock_client.HPE3ParClient.login.return_value = True
+ mock_client.HPE3ParClient.cpgExists.return_value = True
+ mock_client.HPE3ParClient.FC = 1
+ mock_client.HPE3ParClient.deleteCPG.return_value = True
+
+ assert ss_3par_cpg.delete_cpg(mock_client.HPE3ParClient,
+ 'test_cpg'
+ ) == (True, True, "Deleted CPG %s successfully." % 'test_cpg')
+
+ mock_client.HPE3ParClient.cpgExists.return_value = False
+
+ assert ss_3par_cpg.delete_cpg(mock_client.HPE3ParClient,
+ 'test_cpg'
+ ) == (True, False, "CPG does not exist")
+ assert ss_3par_cpg.delete_cpg(mock_client.HPE3ParClient,
+ None
+ ) == (True, False, "CPG does not exist")
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family.test_no_changes b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family.test_no_changes
new file mode 100644
index 00000000..bc4ecea7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family.test_no_changes
@@ -0,0 +1,12 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet static
+ address 192.168.0.1
+ post-up echo configuring ipv4
+
+iface eth0 inet6 static
+ address fc00::1
+ post-up echo configuring ipv6
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family.test_no_changes.json b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family.test_no_changes.json
new file mode 100644
index 00000000..ee632bd5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family.test_no_changes.json
@@ -0,0 +1,21 @@
+{
+ "eth0": {
+ "address": "fc00::1",
+ "address_family": "inet6",
+ "down": [],
+ "method": "static",
+ "post-up": [
+ "echo configuring ipv6"
+ ],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_aggi_up b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_aggi_up
new file mode 100644
index 00000000..bc4ecea7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_aggi_up
@@ -0,0 +1,12 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet static
+ address 192.168.0.1
+ post-up echo configuring ipv4
+
+iface eth0 inet6 static
+ address fc00::1
+ post-up echo configuring ipv6
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_aggi_up.exceptions.txt b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_aggi_up.exceptions.txt
new file mode 100644
index 00000000..8d223b04
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_aggi_up.exceptions.txt
@@ -0,0 +1,8 @@
+[0] fail_json message: Error: interface aggi not found
+options:
+{
+ "iface": "aggi",
+ "option": "up",
+ "state": "present",
+ "value": "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi"
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_aggi_up.json b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_aggi_up.json
new file mode 100644
index 00000000..ee632bd5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_aggi_up.json
@@ -0,0 +1,21 @@
+{
+ "eth0": {
+ "address": "fc00::1",
+ "address_family": "inet6",
+ "down": [],
+ "method": "static",
+ "post-up": [
+ "echo configuring ipv6"
+ ],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_and_delete_aggi_up b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_and_delete_aggi_up
new file mode 100644
index 00000000..bc4ecea7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_and_delete_aggi_up
@@ -0,0 +1,12 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet static
+ address 192.168.0.1
+ post-up echo configuring ipv4
+
+iface eth0 inet6 static
+ address fc00::1
+ post-up echo configuring ipv6
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_and_delete_aggi_up.exceptions.txt b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_and_delete_aggi_up.exceptions.txt
new file mode 100644
index 00000000..1c9adbd9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_and_delete_aggi_up.exceptions.txt
@@ -0,0 +1,17 @@
+[0] fail_json message: Error: interface aggi not found
+options:
+{
+ "iface": "aggi",
+ "option": "up",
+ "state": "present",
+ "value": "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi"
+}
+=====
+[1] fail_json message: Error: interface aggi not found
+options:
+{
+ "iface": "aggi",
+ "option": "up",
+ "state": "absent",
+ "value": null
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_and_delete_aggi_up.json b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_and_delete_aggi_up.json
new file mode 100644
index 00000000..ee632bd5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_and_delete_aggi_up.json
@@ -0,0 +1,21 @@
+{
+ "eth0": {
+ "address": "fc00::1",
+ "address_family": "inet6",
+ "down": [],
+ "method": "static",
+ "post-up": [
+ "echo configuring ipv6"
+ ],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4 b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4
new file mode 100644
index 00000000..9a2f5b05
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4
@@ -0,0 +1,12 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet static
+ address 192.168.0.42
+ post-up echo configuring ipv4
+
+iface eth0 inet6 static
+ address fc00::1
+ post-up echo configuring ipv6
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4.exceptions.txt b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4.exceptions.txt
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4.exceptions.txt
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4.json b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4.json
new file mode 100644
index 00000000..ee632bd5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4.json
@@ -0,0 +1,21 @@
+{
+ "eth0": {
+ "address": "fc00::1",
+ "address_family": "inet6",
+ "down": [],
+ "method": "static",
+ "post-up": [
+ "echo configuring ipv6"
+ ],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_post_up b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_post_up
new file mode 100644
index 00000000..5077e3a6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_post_up
@@ -0,0 +1,13 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet static
+ address 192.168.0.1
+ post-up echo configuring ipv4
+ post-up XXXX_ipv4
+
+iface eth0 inet6 static
+ address fc00::1
+ post-up echo configuring ipv6
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_post_up.exceptions.txt b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_post_up.exceptions.txt
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_post_up.exceptions.txt
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_post_up.json b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_post_up.json
new file mode 100644
index 00000000..ee632bd5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_post_up.json
@@ -0,0 +1,21 @@
+{
+ "eth0": {
+ "address": "fc00::1",
+ "address_family": "inet6",
+ "down": [],
+ "method": "static",
+ "post-up": [
+ "echo configuring ipv6"
+ ],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_pre_up b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_pre_up
new file mode 100644
index 00000000..5c0f6973
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_pre_up
@@ -0,0 +1,13 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet static
+ address 192.168.0.1
+ post-up echo configuring ipv4
+ pre-up XXXX_ipv4
+
+iface eth0 inet6 static
+ address fc00::1
+ post-up echo configuring ipv6
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_pre_up.exceptions.txt b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_pre_up.exceptions.txt
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_pre_up.exceptions.txt
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_pre_up.json b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_pre_up.json
new file mode 100644
index 00000000..ee632bd5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_pre_up.json
@@ -0,0 +1,21 @@
+{
+ "eth0": {
+ "address": "fc00::1",
+ "address_family": "inet6",
+ "down": [],
+ "method": "static",
+ "post-up": [
+ "echo configuring ipv6"
+ ],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6 b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6
new file mode 100644
index 00000000..afaaac96
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6
@@ -0,0 +1,12 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet static
+ address 192.168.0.1
+ post-up echo configuring ipv4
+
+iface eth0 inet6 static
+ address fc00::42
+ post-up echo configuring ipv6
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6.exceptions.txt b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6.exceptions.txt
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6.exceptions.txt
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6.json b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6.json
new file mode 100644
index 00000000..ee632bd5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6.json
@@ -0,0 +1,21 @@
+{
+ "eth0": {
+ "address": "fc00::1",
+ "address_family": "inet6",
+ "down": [],
+ "method": "static",
+ "post-up": [
+ "echo configuring ipv6"
+ ],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_post_up b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_post_up
new file mode 100644
index 00000000..cb3e98b7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_post_up
@@ -0,0 +1,13 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet static
+ address 192.168.0.1
+ post-up echo configuring ipv4
+
+iface eth0 inet6 static
+ address fc00::1
+ post-up echo configuring ipv6
+ post-up XXXX_ipv6
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_post_up.exceptions.txt b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_post_up.exceptions.txt
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_post_up.exceptions.txt
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_post_up.json b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_post_up.json
new file mode 100644
index 00000000..ee632bd5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_post_up.json
@@ -0,0 +1,21 @@
+{
+ "eth0": {
+ "address": "fc00::1",
+ "address_family": "inet6",
+ "down": [],
+ "method": "static",
+ "post-up": [
+ "echo configuring ipv6"
+ ],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_pre_up b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_pre_up
new file mode 100644
index 00000000..149da568
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_pre_up
@@ -0,0 +1,13 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet static
+ address 192.168.0.1
+ post-up echo configuring ipv4
+
+iface eth0 inet6 static
+ address fc00::1
+ post-up echo configuring ipv6
+ pre-up XXXX_ipv6
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_pre_up.exceptions.txt b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_pre_up.exceptions.txt
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_pre_up.exceptions.txt
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_pre_up.json b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_pre_up.json
new file mode 100644
index 00000000..ee632bd5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_pre_up.json
@@ -0,0 +1,21 @@
+{
+ "eth0": {
+ "address": "fc00::1",
+ "address_family": "inet6",
+ "down": [],
+ "method": "static",
+ "post-up": [
+ "echo configuring ipv6"
+ ],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_method b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_method
new file mode 100644
index 00000000..bc4ecea7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_method
@@ -0,0 +1,12 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet static
+ address 192.168.0.1
+ post-up echo configuring ipv4
+
+iface eth0 inet6 static
+ address fc00::1
+ post-up echo configuring ipv6
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_method.exceptions.txt b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_method.exceptions.txt
new file mode 100644
index 00000000..050a9839
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_method.exceptions.txt
@@ -0,0 +1,8 @@
+fail_json message: Error: interface eth1 not found
+options:
+{
+ "iface": "eth1",
+ "option": "method",
+ "state": "present",
+ "value": "dhcp"
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_method.json b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_method.json
new file mode 100644
index 00000000..ee632bd5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_method.json
@@ -0,0 +1,21 @@
+{
+ "eth0": {
+ "address": "fc00::1",
+ "address_family": "inet6",
+ "down": [],
+ "method": "static",
+ "post-up": [
+ "echo configuring ipv6"
+ ],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_revert b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_revert
new file mode 100644
index 00000000..bc4ecea7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_revert
@@ -0,0 +1,12 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet static
+ address 192.168.0.1
+ post-up echo configuring ipv4
+
+iface eth0 inet6 static
+ address fc00::1
+ post-up echo configuring ipv6
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_revert.exceptions.txt b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_revert.exceptions.txt
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_revert.exceptions.txt
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_revert.json b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_revert.json
new file mode 100644
index 00000000..ee632bd5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_revert.json
@@ -0,0 +1,21 @@
+{
+ "eth0": {
+ "address": "fc00::1",
+ "address_family": "inet6",
+ "down": [],
+ "method": "static",
+ "post-up": [
+ "echo configuring ipv6"
+ ],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_and_eth0_mtu b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_and_eth0_mtu
new file mode 100644
index 00000000..40331271
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_and_eth0_mtu
@@ -0,0 +1,13 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet static
+ address 192.168.0.1
+ post-up echo configuring ipv4
+
+iface eth0 inet6 static
+ address fc00::1
+ post-up echo configuring ipv6
+ mtu 1350
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_and_eth0_mtu.exceptions.txt b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_and_eth0_mtu.exceptions.txt
new file mode 100644
index 00000000..3f0da8b1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_and_eth0_mtu.exceptions.txt
@@ -0,0 +1,8 @@
+[0] fail_json message: Error: interface aggi not found
+options:
+{
+ "iface": "aggi",
+ "option": "mtu",
+ "state": "present",
+ "value": "1350"
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_and_eth0_mtu.json b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_and_eth0_mtu.json
new file mode 100644
index 00000000..ee632bd5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_and_eth0_mtu.json
@@ -0,0 +1,21 @@
+{
+ "eth0": {
+ "address": "fc00::1",
+ "address_family": "inet6",
+ "down": [],
+ "method": "static",
+ "post-up": [
+ "echo configuring ipv6"
+ ],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_slaves b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_slaves
new file mode 100644
index 00000000..bc4ecea7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_slaves
@@ -0,0 +1,12 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet static
+ address 192.168.0.1
+ post-up echo configuring ipv4
+
+iface eth0 inet6 static
+ address fc00::1
+ post-up echo configuring ipv6
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_slaves.exceptions.txt b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_slaves.exceptions.txt
new file mode 100644
index 00000000..0af87750
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_slaves.exceptions.txt
@@ -0,0 +1,8 @@
+[0] fail_json message: Error: interface aggi not found
+options:
+{
+ "iface": "aggi",
+ "option": "slaves",
+ "state": "present",
+ "value": "int1 int3"
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_slaves.json b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_slaves.json
new file mode 100644
index 00000000..ee632bd5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_slaves.json
@@ -0,0 +1,21 @@
+{
+ "eth0": {
+ "address": "fc00::1",
+ "address_family": "inet6",
+ "down": [],
+ "method": "static",
+ "post-up": [
+ "echo configuring ipv6"
+ ],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp.test_no_changes b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp.test_no_changes
new file mode 100644
index 00000000..bd4522ec
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp.test_no_changes
@@ -0,0 +1,6 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet dhcp
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp.test_no_changes.json b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp.test_no_changes.json
new file mode 100644
index 00000000..bffc17a9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp.test_no_changes.json
@@ -0,0 +1,18 @@
+{
+ "eth0": {
+ "address_family": "inet",
+ "down": [],
+ "method": "dhcp",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_aggi_up b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_aggi_up
new file mode 100644
index 00000000..bd4522ec
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_aggi_up
@@ -0,0 +1,6 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet dhcp
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_aggi_up.exceptions.txt b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_aggi_up.exceptions.txt
new file mode 100644
index 00000000..8d223b04
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_aggi_up.exceptions.txt
@@ -0,0 +1,8 @@
+[0] fail_json message: Error: interface aggi not found
+options:
+{
+ "iface": "aggi",
+ "option": "up",
+ "state": "present",
+ "value": "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi"
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_aggi_up.json b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_aggi_up.json
new file mode 100644
index 00000000..bffc17a9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_aggi_up.json
@@ -0,0 +1,18 @@
+{
+ "eth0": {
+ "address_family": "inet",
+ "down": [],
+ "method": "dhcp",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_and_delete_aggi_up b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_and_delete_aggi_up
new file mode 100644
index 00000000..bd4522ec
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_and_delete_aggi_up
@@ -0,0 +1,6 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet dhcp
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_and_delete_aggi_up.exceptions.txt b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_and_delete_aggi_up.exceptions.txt
new file mode 100644
index 00000000..1c9adbd9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_and_delete_aggi_up.exceptions.txt
@@ -0,0 +1,17 @@
+[0] fail_json message: Error: interface aggi not found
+options:
+{
+ "iface": "aggi",
+ "option": "up",
+ "state": "present",
+ "value": "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi"
+}
+=====
+[1] fail_json message: Error: interface aggi not found
+options:
+{
+ "iface": "aggi",
+ "option": "up",
+ "state": "absent",
+ "value": null
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_and_delete_aggi_up.json b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_and_delete_aggi_up.json
new file mode 100644
index 00000000..bffc17a9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_and_delete_aggi_up.json
@@ -0,0 +1,18 @@
+{
+ "eth0": {
+ "address_family": "inet",
+ "down": [],
+ "method": "dhcp",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4 b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4
new file mode 100644
index 00000000..69629374
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4
@@ -0,0 +1,7 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet dhcp
+ address 192.168.0.42
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4.exceptions.txt b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4.exceptions.txt
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4.exceptions.txt
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4.json b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4.json
new file mode 100644
index 00000000..bffc17a9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4.json
@@ -0,0 +1,18 @@
+{
+ "eth0": {
+ "address_family": "inet",
+ "down": [],
+ "method": "dhcp",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_post_up b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_post_up
new file mode 100644
index 00000000..998f4844
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_post_up
@@ -0,0 +1,7 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet dhcp
+ post-up XXXX_ipv4
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_post_up.exceptions.txt b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_post_up.exceptions.txt
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_post_up.exceptions.txt
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_post_up.json b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_post_up.json
new file mode 100644
index 00000000..bffc17a9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_post_up.json
@@ -0,0 +1,18 @@
+{
+ "eth0": {
+ "address_family": "inet",
+ "down": [],
+ "method": "dhcp",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_pre_up b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_pre_up
new file mode 100644
index 00000000..5e6af40a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_pre_up
@@ -0,0 +1,7 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet dhcp
+ pre-up XXXX_ipv4
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_pre_up.exceptions.txt b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_pre_up.exceptions.txt
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_pre_up.exceptions.txt
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_pre_up.json b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_pre_up.json
new file mode 100644
index 00000000..bffc17a9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_pre_up.json
@@ -0,0 +1,18 @@
+{
+ "eth0": {
+ "address_family": "inet",
+ "down": [],
+ "method": "dhcp",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6 b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6
new file mode 100644
index 00000000..bd4522ec
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6
@@ -0,0 +1,6 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet dhcp
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6.exceptions.txt b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6.exceptions.txt
new file mode 100644
index 00000000..04c20891
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6.exceptions.txt
@@ -0,0 +1,9 @@
+fail_json message: Error: interface eth0 not found
+options:
+{
+ "address_family": "inet6",
+ "iface": "eth0",
+ "option": "address",
+ "state": "present",
+ "value": "fc00::42"
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6.json b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6.json
new file mode 100644
index 00000000..bffc17a9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6.json
@@ -0,0 +1,18 @@
+{
+ "eth0": {
+ "address_family": "inet",
+ "down": [],
+ "method": "dhcp",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_post_up b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_post_up
new file mode 100644
index 00000000..bd4522ec
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_post_up
@@ -0,0 +1,6 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet dhcp
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_post_up.exceptions.txt b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_post_up.exceptions.txt
new file mode 100644
index 00000000..48cb29b0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_post_up.exceptions.txt
@@ -0,0 +1,9 @@
+fail_json message: Error: interface eth0 not found
+options:
+{
+ "address_family": "inet6",
+ "iface": "eth0",
+ "option": "post-up",
+ "state": "present",
+ "value": "XXXX_ipv6"
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_post_up.json b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_post_up.json
new file mode 100644
index 00000000..bffc17a9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_post_up.json
@@ -0,0 +1,18 @@
+{
+ "eth0": {
+ "address_family": "inet",
+ "down": [],
+ "method": "dhcp",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_pre_up b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_pre_up
new file mode 100644
index 00000000..bd4522ec
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_pre_up
@@ -0,0 +1,6 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet dhcp
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_pre_up.exceptions.txt b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_pre_up.exceptions.txt
new file mode 100644
index 00000000..fbfed6be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_pre_up.exceptions.txt
@@ -0,0 +1,9 @@
+fail_json message: Error: interface eth0 not found
+options:
+{
+ "address_family": "inet6",
+ "iface": "eth0",
+ "option": "pre-up",
+ "state": "present",
+ "value": "XXXX_ipv6"
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_pre_up.json b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_pre_up.json
new file mode 100644
index 00000000..bffc17a9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_pre_up.json
@@ -0,0 +1,18 @@
+{
+ "eth0": {
+ "address_family": "inet",
+ "down": [],
+ "method": "dhcp",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_method b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_method
new file mode 100644
index 00000000..bd4522ec
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_method
@@ -0,0 +1,6 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet dhcp
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_method.exceptions.txt b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_method.exceptions.txt
new file mode 100644
index 00000000..050a9839
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_method.exceptions.txt
@@ -0,0 +1,8 @@
+fail_json message: Error: interface eth1 not found
+options:
+{
+ "iface": "eth1",
+ "option": "method",
+ "state": "present",
+ "value": "dhcp"
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_method.json b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_method.json
new file mode 100644
index 00000000..bffc17a9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_method.json
@@ -0,0 +1,18 @@
+{
+ "eth0": {
+ "address_family": "inet",
+ "down": [],
+ "method": "dhcp",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_revert b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_revert
new file mode 100644
index 00000000..bd4522ec
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_revert
@@ -0,0 +1,6 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet dhcp
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_revert.exceptions.txt b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_revert.exceptions.txt
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_revert.exceptions.txt
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_revert.json b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_revert.json
new file mode 100644
index 00000000..bffc17a9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_revert.json
@@ -0,0 +1,18 @@
+{
+ "eth0": {
+ "address_family": "inet",
+ "down": [],
+ "method": "dhcp",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu
new file mode 100644
index 00000000..7bbad22a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu
@@ -0,0 +1,7 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet dhcp
+ mtu 1350
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu.exceptions.txt b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu.exceptions.txt
new file mode 100644
index 00000000..3f0da8b1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu.exceptions.txt
@@ -0,0 +1,8 @@
+[0] fail_json message: Error: interface aggi not found
+options:
+{
+ "iface": "aggi",
+ "option": "mtu",
+ "state": "present",
+ "value": "1350"
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu.json b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu.json
new file mode 100644
index 00000000..bffc17a9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu.json
@@ -0,0 +1,18 @@
+{
+ "eth0": {
+ "address_family": "inet",
+ "down": [],
+ "method": "dhcp",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_slaves b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_slaves
new file mode 100644
index 00000000..bd4522ec
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_slaves
@@ -0,0 +1,6 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet dhcp
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_slaves.exceptions.txt b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_slaves.exceptions.txt
new file mode 100644
index 00000000..0af87750
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_slaves.exceptions.txt
@@ -0,0 +1,8 @@
+[0] fail_json message: Error: interface aggi not found
+options:
+{
+ "iface": "aggi",
+ "option": "slaves",
+ "state": "present",
+ "value": "int1 int3"
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_slaves.json b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_slaves.json
new file mode 100644
index 00000000..bffc17a9
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_slaves.json
@@ -0,0 +1,18 @@
+{
+ "eth0": {
+ "address_family": "inet",
+ "down": [],
+ "method": "dhcp",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com.test_no_changes b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com.test_no_changes
new file mode 100644
index 00000000..c826bbe7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com.test_no_changes
@@ -0,0 +1,61 @@
+ auto aggi
+ iface aggi inet static
+ hwaddress ether 22:44:77:88:D5:96
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1500
+ slaves int1 int2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K aggi tx off tso off
+
+ auto agge
+ iface agge inet manual
+
+ auto br0
+ iface br0 inet static
+ bridge_ports agge
+ hwaddress ether 22:44:77:88:D5:98
+ address 188.44.133.76
+ netmask 255.255.255.248
+ gateway 188.44.133.75
+ slaves ext1 ext2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K agge tx off tso off
+
+ up route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi
+ up route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi
+ up route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi
+
+ auto int1
+ iface int1 inet manual
+ bond-master aggi
+
+ auto int2
+ iface int2 inet manual
+ bond-master aggi
+
+ auto ext1
+ iface ext1 inet manual
+ bond-master agge
+
+ auto ext2
+ iface ext2 inet manual
+ bond-master agge
+
+ auto eth1
+ iface eth1 inet manual
+
+ auto lo
+ iface lo inet loopback
+
+source /etc/network/interfaces.d/*.cfg
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com.test_no_changes.json b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com.test_no_changes.json
new file mode 100644
index 00000000..9e97da32
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com.test_no_changes.json
@@ -0,0 +1,109 @@
+{
+ "agge": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "aggi": {
+ "address": "10.44.15.196",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "down": [],
+ "hwaddress": "ether 22:44:77:88:D5:96",
+ "method": "static",
+ "mtu": "1500",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K aggi tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "int1 int2",
+ "up": []
+ },
+ "br0": {
+ "address": "188.44.133.76",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "bridge_ports": "agge",
+ "down": [],
+ "gateway": "188.44.133.75",
+ "hwaddress": "ether 22:44:77:88:D5:98",
+ "method": "static",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K agge tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "ext1 ext2",
+ "up": [
+ "route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi",
+ "route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi",
+ "route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi"
+ ]
+ },
+ "eth1": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext1": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext2": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int1": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int2": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_aggi_up b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_aggi_up
new file mode 100644
index 00000000..e86b2578
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_aggi_up
@@ -0,0 +1,62 @@
+ auto aggi
+ iface aggi inet static
+ hwaddress ether 22:44:77:88:D5:96
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1500
+ slaves int1 int2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K aggi tx off tso off
+ up route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi
+
+ auto agge
+ iface agge inet manual
+
+ auto br0
+ iface br0 inet static
+ bridge_ports agge
+ hwaddress ether 22:44:77:88:D5:98
+ address 188.44.133.76
+ netmask 255.255.255.248
+ gateway 188.44.133.75
+ slaves ext1 ext2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K agge tx off tso off
+
+ up route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi
+ up route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi
+ up route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi
+
+ auto int1
+ iface int1 inet manual
+ bond-master aggi
+
+ auto int2
+ iface int2 inet manual
+ bond-master aggi
+
+ auto ext1
+ iface ext1 inet manual
+ bond-master agge
+
+ auto ext2
+ iface ext2 inet manual
+ bond-master agge
+
+ auto eth1
+ iface eth1 inet manual
+
+ auto lo
+ iface lo inet loopback
+
+source /etc/network/interfaces.d/*.cfg
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_aggi_up.exceptions.txt b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_aggi_up.exceptions.txt
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_aggi_up.exceptions.txt
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_aggi_up.json b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_aggi_up.json
new file mode 100644
index 00000000..9e97da32
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_aggi_up.json
@@ -0,0 +1,109 @@
+{
+ "agge": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "aggi": {
+ "address": "10.44.15.196",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "down": [],
+ "hwaddress": "ether 22:44:77:88:D5:96",
+ "method": "static",
+ "mtu": "1500",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K aggi tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "int1 int2",
+ "up": []
+ },
+ "br0": {
+ "address": "188.44.133.76",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "bridge_ports": "agge",
+ "down": [],
+ "gateway": "188.44.133.75",
+ "hwaddress": "ether 22:44:77:88:D5:98",
+ "method": "static",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K agge tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "ext1 ext2",
+ "up": [
+ "route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi",
+ "route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi",
+ "route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi"
+ ]
+ },
+ "eth1": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext1": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext2": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int1": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int2": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_and_delete_aggi_up b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_and_delete_aggi_up
new file mode 100644
index 00000000..c826bbe7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_and_delete_aggi_up
@@ -0,0 +1,61 @@
+ auto aggi
+ iface aggi inet static
+ hwaddress ether 22:44:77:88:D5:96
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1500
+ slaves int1 int2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K aggi tx off tso off
+
+ auto agge
+ iface agge inet manual
+
+ auto br0
+ iface br0 inet static
+ bridge_ports agge
+ hwaddress ether 22:44:77:88:D5:98
+ address 188.44.133.76
+ netmask 255.255.255.248
+ gateway 188.44.133.75
+ slaves ext1 ext2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K agge tx off tso off
+
+ up route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi
+ up route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi
+ up route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi
+
+ auto int1
+ iface int1 inet manual
+ bond-master aggi
+
+ auto int2
+ iface int2 inet manual
+ bond-master aggi
+
+ auto ext1
+ iface ext1 inet manual
+ bond-master agge
+
+ auto ext2
+ iface ext2 inet manual
+ bond-master agge
+
+ auto eth1
+ iface eth1 inet manual
+
+ auto lo
+ iface lo inet loopback
+
+source /etc/network/interfaces.d/*.cfg
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_and_delete_aggi_up.exceptions.txt b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_and_delete_aggi_up.exceptions.txt
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_and_delete_aggi_up.exceptions.txt
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_and_delete_aggi_up.json b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_and_delete_aggi_up.json
new file mode 100644
index 00000000..9e97da32
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_and_delete_aggi_up.json
@@ -0,0 +1,109 @@
+{
+ "agge": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "aggi": {
+ "address": "10.44.15.196",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "down": [],
+ "hwaddress": "ether 22:44:77:88:D5:96",
+ "method": "static",
+ "mtu": "1500",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K aggi tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "int1 int2",
+ "up": []
+ },
+ "br0": {
+ "address": "188.44.133.76",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "bridge_ports": "agge",
+ "down": [],
+ "gateway": "188.44.133.75",
+ "hwaddress": "ether 22:44:77:88:D5:98",
+ "method": "static",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K agge tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "ext1 ext2",
+ "up": [
+ "route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi",
+ "route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi",
+ "route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi"
+ ]
+ },
+ "eth1": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext1": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext2": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int1": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int2": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4 b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4
new file mode 100644
index 00000000..c826bbe7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4
@@ -0,0 +1,61 @@
+ auto aggi
+ iface aggi inet static
+ hwaddress ether 22:44:77:88:D5:96
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1500
+ slaves int1 int2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K aggi tx off tso off
+
+ auto agge
+ iface agge inet manual
+
+ auto br0
+ iface br0 inet static
+ bridge_ports agge
+ hwaddress ether 22:44:77:88:D5:98
+ address 188.44.133.76
+ netmask 255.255.255.248
+ gateway 188.44.133.75
+ slaves ext1 ext2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K agge tx off tso off
+
+ up route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi
+ up route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi
+ up route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi
+
+ auto int1
+ iface int1 inet manual
+ bond-master aggi
+
+ auto int2
+ iface int2 inet manual
+ bond-master aggi
+
+ auto ext1
+ iface ext1 inet manual
+ bond-master agge
+
+ auto ext2
+ iface ext2 inet manual
+ bond-master agge
+
+ auto eth1
+ iface eth1 inet manual
+
+ auto lo
+ iface lo inet loopback
+
+source /etc/network/interfaces.d/*.cfg
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4.exceptions.txt b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4.exceptions.txt
new file mode 100644
index 00000000..a1600d9a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4.exceptions.txt
@@ -0,0 +1,9 @@
+fail_json message: Error: interface eth0 not found
+options:
+{
+ "address_family": "inet",
+ "iface": "eth0",
+ "option": "address",
+ "state": "present",
+ "value": "192.168.0.42"
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4.json b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4.json
new file mode 100644
index 00000000..9e97da32
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4.json
@@ -0,0 +1,109 @@
+{
+ "agge": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "aggi": {
+ "address": "10.44.15.196",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "down": [],
+ "hwaddress": "ether 22:44:77:88:D5:96",
+ "method": "static",
+ "mtu": "1500",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K aggi tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "int1 int2",
+ "up": []
+ },
+ "br0": {
+ "address": "188.44.133.76",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "bridge_ports": "agge",
+ "down": [],
+ "gateway": "188.44.133.75",
+ "hwaddress": "ether 22:44:77:88:D5:98",
+ "method": "static",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K agge tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "ext1 ext2",
+ "up": [
+ "route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi",
+ "route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi",
+ "route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi"
+ ]
+ },
+ "eth1": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext1": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext2": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int1": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int2": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_post_up b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_post_up
new file mode 100644
index 00000000..c826bbe7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_post_up
@@ -0,0 +1,61 @@
+ auto aggi
+ iface aggi inet static
+ hwaddress ether 22:44:77:88:D5:96
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1500
+ slaves int1 int2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K aggi tx off tso off
+
+ auto agge
+ iface agge inet manual
+
+ auto br0
+ iface br0 inet static
+ bridge_ports agge
+ hwaddress ether 22:44:77:88:D5:98
+ address 188.44.133.76
+ netmask 255.255.255.248
+ gateway 188.44.133.75
+ slaves ext1 ext2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K agge tx off tso off
+
+ up route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi
+ up route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi
+ up route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi
+
+ auto int1
+ iface int1 inet manual
+ bond-master aggi
+
+ auto int2
+ iface int2 inet manual
+ bond-master aggi
+
+ auto ext1
+ iface ext1 inet manual
+ bond-master agge
+
+ auto ext2
+ iface ext2 inet manual
+ bond-master agge
+
+ auto eth1
+ iface eth1 inet manual
+
+ auto lo
+ iface lo inet loopback
+
+source /etc/network/interfaces.d/*.cfg
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_post_up.exceptions.txt b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_post_up.exceptions.txt
new file mode 100644
index 00000000..e1e01523
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_post_up.exceptions.txt
@@ -0,0 +1,9 @@
+fail_json message: Error: interface eth0 not found
+options:
+{
+ "address_family": "inet",
+ "iface": "eth0",
+ "option": "post-up",
+ "state": "present",
+ "value": "XXXX_ipv4"
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_post_up.json b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_post_up.json
new file mode 100644
index 00000000..9e97da32
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_post_up.json
@@ -0,0 +1,109 @@
+{
+ "agge": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "aggi": {
+ "address": "10.44.15.196",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "down": [],
+ "hwaddress": "ether 22:44:77:88:D5:96",
+ "method": "static",
+ "mtu": "1500",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K aggi tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "int1 int2",
+ "up": []
+ },
+ "br0": {
+ "address": "188.44.133.76",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "bridge_ports": "agge",
+ "down": [],
+ "gateway": "188.44.133.75",
+ "hwaddress": "ether 22:44:77:88:D5:98",
+ "method": "static",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K agge tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "ext1 ext2",
+ "up": [
+ "route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi",
+ "route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi",
+ "route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi"
+ ]
+ },
+ "eth1": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext1": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext2": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int1": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int2": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_pre_up b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_pre_up
new file mode 100644
index 00000000..c826bbe7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_pre_up
@@ -0,0 +1,61 @@
+ auto aggi
+ iface aggi inet static
+ hwaddress ether 22:44:77:88:D5:96
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1500
+ slaves int1 int2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K aggi tx off tso off
+
+ auto agge
+ iface agge inet manual
+
+ auto br0
+ iface br0 inet static
+ bridge_ports agge
+ hwaddress ether 22:44:77:88:D5:98
+ address 188.44.133.76
+ netmask 255.255.255.248
+ gateway 188.44.133.75
+ slaves ext1 ext2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K agge tx off tso off
+
+ up route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi
+ up route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi
+ up route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi
+
+ auto int1
+ iface int1 inet manual
+ bond-master aggi
+
+ auto int2
+ iface int2 inet manual
+ bond-master aggi
+
+ auto ext1
+ iface ext1 inet manual
+ bond-master agge
+
+ auto ext2
+ iface ext2 inet manual
+ bond-master agge
+
+ auto eth1
+ iface eth1 inet manual
+
+ auto lo
+ iface lo inet loopback
+
+source /etc/network/interfaces.d/*.cfg
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_pre_up.exceptions.txt b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_pre_up.exceptions.txt
new file mode 100644
index 00000000..9e510654
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_pre_up.exceptions.txt
@@ -0,0 +1,9 @@
+fail_json message: Error: interface eth0 not found
+options:
+{
+ "address_family": "inet",
+ "iface": "eth0",
+ "option": "pre-up",
+ "state": "present",
+ "value": "XXXX_ipv4"
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_pre_up.json b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_pre_up.json
new file mode 100644
index 00000000..9e97da32
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_pre_up.json
@@ -0,0 +1,109 @@
+{
+ "agge": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "aggi": {
+ "address": "10.44.15.196",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "down": [],
+ "hwaddress": "ether 22:44:77:88:D5:96",
+ "method": "static",
+ "mtu": "1500",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K aggi tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "int1 int2",
+ "up": []
+ },
+ "br0": {
+ "address": "188.44.133.76",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "bridge_ports": "agge",
+ "down": [],
+ "gateway": "188.44.133.75",
+ "hwaddress": "ether 22:44:77:88:D5:98",
+ "method": "static",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K agge tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "ext1 ext2",
+ "up": [
+ "route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi",
+ "route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi",
+ "route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi"
+ ]
+ },
+ "eth1": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext1": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext2": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int1": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int2": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6 b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6
new file mode 100644
index 00000000..c826bbe7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6
@@ -0,0 +1,61 @@
+ auto aggi
+ iface aggi inet static
+ hwaddress ether 22:44:77:88:D5:96
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1500
+ slaves int1 int2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K aggi tx off tso off
+
+ auto agge
+ iface agge inet manual
+
+ auto br0
+ iface br0 inet static
+ bridge_ports agge
+ hwaddress ether 22:44:77:88:D5:98
+ address 188.44.133.76
+ netmask 255.255.255.248
+ gateway 188.44.133.75
+ slaves ext1 ext2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K agge tx off tso off
+
+ up route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi
+ up route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi
+ up route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi
+
+ auto int1
+ iface int1 inet manual
+ bond-master aggi
+
+ auto int2
+ iface int2 inet manual
+ bond-master aggi
+
+ auto ext1
+ iface ext1 inet manual
+ bond-master agge
+
+ auto ext2
+ iface ext2 inet manual
+ bond-master agge
+
+ auto eth1
+ iface eth1 inet manual
+
+ auto lo
+ iface lo inet loopback
+
+source /etc/network/interfaces.d/*.cfg
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6.exceptions.txt b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6.exceptions.txt
new file mode 100644
index 00000000..04c20891
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6.exceptions.txt
@@ -0,0 +1,9 @@
+fail_json message: Error: interface eth0 not found
+options:
+{
+ "address_family": "inet6",
+ "iface": "eth0",
+ "option": "address",
+ "state": "present",
+ "value": "fc00::42"
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6.json b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6.json
new file mode 100644
index 00000000..9e97da32
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6.json
@@ -0,0 +1,109 @@
+{
+ "agge": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "aggi": {
+ "address": "10.44.15.196",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "down": [],
+ "hwaddress": "ether 22:44:77:88:D5:96",
+ "method": "static",
+ "mtu": "1500",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K aggi tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "int1 int2",
+ "up": []
+ },
+ "br0": {
+ "address": "188.44.133.76",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "bridge_ports": "agge",
+ "down": [],
+ "gateway": "188.44.133.75",
+ "hwaddress": "ether 22:44:77:88:D5:98",
+ "method": "static",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K agge tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "ext1 ext2",
+ "up": [
+ "route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi",
+ "route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi",
+ "route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi"
+ ]
+ },
+ "eth1": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext1": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext2": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int1": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int2": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_post_up b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_post_up
new file mode 100644
index 00000000..c826bbe7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_post_up
@@ -0,0 +1,61 @@
+ auto aggi
+ iface aggi inet static
+ hwaddress ether 22:44:77:88:D5:96
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1500
+ slaves int1 int2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K aggi tx off tso off
+
+ auto agge
+ iface agge inet manual
+
+ auto br0
+ iface br0 inet static
+ bridge_ports agge
+ hwaddress ether 22:44:77:88:D5:98
+ address 188.44.133.76
+ netmask 255.255.255.248
+ gateway 188.44.133.75
+ slaves ext1 ext2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K agge tx off tso off
+
+ up route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi
+ up route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi
+ up route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi
+
+ auto int1
+ iface int1 inet manual
+ bond-master aggi
+
+ auto int2
+ iface int2 inet manual
+ bond-master aggi
+
+ auto ext1
+ iface ext1 inet manual
+ bond-master agge
+
+ auto ext2
+ iface ext2 inet manual
+ bond-master agge
+
+ auto eth1
+ iface eth1 inet manual
+
+ auto lo
+ iface lo inet loopback
+
+source /etc/network/interfaces.d/*.cfg
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_post_up.exceptions.txt b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_post_up.exceptions.txt
new file mode 100644
index 00000000..48cb29b0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_post_up.exceptions.txt
@@ -0,0 +1,9 @@
+fail_json message: Error: interface eth0 not found
+options:
+{
+ "address_family": "inet6",
+ "iface": "eth0",
+ "option": "post-up",
+ "state": "present",
+ "value": "XXXX_ipv6"
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_post_up.json b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_post_up.json
new file mode 100644
index 00000000..9e97da32
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_post_up.json
@@ -0,0 +1,109 @@
+{
+ "agge": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "aggi": {
+ "address": "10.44.15.196",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "down": [],
+ "hwaddress": "ether 22:44:77:88:D5:96",
+ "method": "static",
+ "mtu": "1500",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K aggi tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "int1 int2",
+ "up": []
+ },
+ "br0": {
+ "address": "188.44.133.76",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "bridge_ports": "agge",
+ "down": [],
+ "gateway": "188.44.133.75",
+ "hwaddress": "ether 22:44:77:88:D5:98",
+ "method": "static",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K agge tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "ext1 ext2",
+ "up": [
+ "route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi",
+ "route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi",
+ "route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi"
+ ]
+ },
+ "eth1": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext1": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext2": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int1": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int2": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_pre_up b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_pre_up
new file mode 100644
index 00000000..c826bbe7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_pre_up
@@ -0,0 +1,61 @@
+ auto aggi
+ iface aggi inet static
+ hwaddress ether 22:44:77:88:D5:96
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1500
+ slaves int1 int2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K aggi tx off tso off
+
+ auto agge
+ iface agge inet manual
+
+ auto br0
+ iface br0 inet static
+ bridge_ports agge
+ hwaddress ether 22:44:77:88:D5:98
+ address 188.44.133.76
+ netmask 255.255.255.248
+ gateway 188.44.133.75
+ slaves ext1 ext2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K agge tx off tso off
+
+ up route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi
+ up route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi
+ up route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi
+
+ auto int1
+ iface int1 inet manual
+ bond-master aggi
+
+ auto int2
+ iface int2 inet manual
+ bond-master aggi
+
+ auto ext1
+ iface ext1 inet manual
+ bond-master agge
+
+ auto ext2
+ iface ext2 inet manual
+ bond-master agge
+
+ auto eth1
+ iface eth1 inet manual
+
+ auto lo
+ iface lo inet loopback
+
+source /etc/network/interfaces.d/*.cfg
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_pre_up.exceptions.txt b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_pre_up.exceptions.txt
new file mode 100644
index 00000000..fbfed6be
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_pre_up.exceptions.txt
@@ -0,0 +1,9 @@
+fail_json message: Error: interface eth0 not found
+options:
+{
+ "address_family": "inet6",
+ "iface": "eth0",
+ "option": "pre-up",
+ "state": "present",
+ "value": "XXXX_ipv6"
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_pre_up.json b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_pre_up.json
new file mode 100644
index 00000000..9e97da32
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_pre_up.json
@@ -0,0 +1,109 @@
+{
+ "agge": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "aggi": {
+ "address": "10.44.15.196",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "down": [],
+ "hwaddress": "ether 22:44:77:88:D5:96",
+ "method": "static",
+ "mtu": "1500",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K aggi tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "int1 int2",
+ "up": []
+ },
+ "br0": {
+ "address": "188.44.133.76",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "bridge_ports": "agge",
+ "down": [],
+ "gateway": "188.44.133.75",
+ "hwaddress": "ether 22:44:77:88:D5:98",
+ "method": "static",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K agge tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "ext1 ext2",
+ "up": [
+ "route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi",
+ "route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi",
+ "route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi"
+ ]
+ },
+ "eth1": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext1": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext2": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int1": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int2": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_method b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_method
new file mode 100644
index 00000000..065bf0f0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_method
@@ -0,0 +1,61 @@
+ auto aggi
+ iface aggi inet static
+ hwaddress ether 22:44:77:88:D5:96
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1500
+ slaves int1 int2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K aggi tx off tso off
+
+ auto agge
+ iface agge inet manual
+
+ auto br0
+ iface br0 inet static
+ bridge_ports agge
+ hwaddress ether 22:44:77:88:D5:98
+ address 188.44.133.76
+ netmask 255.255.255.248
+ gateway 188.44.133.75
+ slaves ext1 ext2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K agge tx off tso off
+
+ up route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi
+ up route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi
+ up route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi
+
+ auto int1
+ iface int1 inet manual
+ bond-master aggi
+
+ auto int2
+ iface int2 inet manual
+ bond-master aggi
+
+ auto ext1
+ iface ext1 inet manual
+ bond-master agge
+
+ auto ext2
+ iface ext2 inet manual
+ bond-master agge
+
+ auto eth1
+ iface eth1 inet dhcp
+
+ auto lo
+ iface lo inet loopback
+
+source /etc/network/interfaces.d/*.cfg
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_method.exceptions.txt b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_method.exceptions.txt
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_method.exceptions.txt
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_method.json b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_method.json
new file mode 100644
index 00000000..8e9863b2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_method.json
@@ -0,0 +1,109 @@
+{
+ "agge": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "aggi": {
+ "address": "10.44.15.196",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "down": [],
+ "hwaddress": "ether 22:44:77:88:D5:96",
+ "method": "static",
+ "mtu": "1500",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K aggi tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "int1 int2",
+ "up": []
+ },
+ "br0": {
+ "address": "188.44.133.76",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "bridge_ports": "agge",
+ "down": [],
+ "gateway": "188.44.133.75",
+ "hwaddress": "ether 22:44:77:88:D5:98",
+ "method": "static",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K agge tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "ext1 ext2",
+ "up": [
+ "route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi",
+ "route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi",
+ "route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi"
+ ]
+ },
+ "eth1": {
+ "address_family": "inet",
+ "down": [],
+ "method": "dhcp",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext1": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext2": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int1": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int2": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_revert b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_revert
new file mode 100644
index 00000000..c826bbe7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_revert
@@ -0,0 +1,61 @@
+ auto aggi
+ iface aggi inet static
+ hwaddress ether 22:44:77:88:D5:96
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1500
+ slaves int1 int2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K aggi tx off tso off
+
+ auto agge
+ iface agge inet manual
+
+ auto br0
+ iface br0 inet static
+ bridge_ports agge
+ hwaddress ether 22:44:77:88:D5:98
+ address 188.44.133.76
+ netmask 255.255.255.248
+ gateway 188.44.133.75
+ slaves ext1 ext2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K agge tx off tso off
+
+ up route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi
+ up route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi
+ up route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi
+
+ auto int1
+ iface int1 inet manual
+ bond-master aggi
+
+ auto int2
+ iface int2 inet manual
+ bond-master aggi
+
+ auto ext1
+ iface ext1 inet manual
+ bond-master agge
+
+ auto ext2
+ iface ext2 inet manual
+ bond-master agge
+
+ auto eth1
+ iface eth1 inet manual
+
+ auto lo
+ iface lo inet loopback
+
+source /etc/network/interfaces.d/*.cfg
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_revert.exceptions.txt b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_revert.exceptions.txt
new file mode 100644
index 00000000..fddf3b3b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_revert.exceptions.txt
@@ -0,0 +1,8 @@
+fail_json message: Error: interface eth0 not found
+options:
+{
+ "iface": "eth0",
+ "option": "mtu",
+ "state": "absent",
+ "value": "1350"
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_revert.json b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_revert.json
new file mode 100644
index 00000000..9e97da32
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_revert.json
@@ -0,0 +1,109 @@
+{
+ "agge": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "aggi": {
+ "address": "10.44.15.196",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "down": [],
+ "hwaddress": "ether 22:44:77:88:D5:96",
+ "method": "static",
+ "mtu": "1500",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K aggi tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "int1 int2",
+ "up": []
+ },
+ "br0": {
+ "address": "188.44.133.76",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "bridge_ports": "agge",
+ "down": [],
+ "gateway": "188.44.133.75",
+ "hwaddress": "ether 22:44:77:88:D5:98",
+ "method": "static",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K agge tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "ext1 ext2",
+ "up": [
+ "route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi",
+ "route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi",
+ "route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi"
+ ]
+ },
+ "eth1": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext1": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext2": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int1": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int2": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu
new file mode 100644
index 00000000..5218eed1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu
@@ -0,0 +1,61 @@
+ auto aggi
+ iface aggi inet static
+ hwaddress ether 22:44:77:88:D5:96
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1350
+ slaves int1 int2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K aggi tx off tso off
+
+ auto agge
+ iface agge inet manual
+
+ auto br0
+ iface br0 inet static
+ bridge_ports agge
+ hwaddress ether 22:44:77:88:D5:98
+ address 188.44.133.76
+ netmask 255.255.255.248
+ gateway 188.44.133.75
+ slaves ext1 ext2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K agge tx off tso off
+
+ up route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi
+ up route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi
+ up route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi
+
+ auto int1
+ iface int1 inet manual
+ bond-master aggi
+
+ auto int2
+ iface int2 inet manual
+ bond-master aggi
+
+ auto ext1
+ iface ext1 inet manual
+ bond-master agge
+
+ auto ext2
+ iface ext2 inet manual
+ bond-master agge
+
+ auto eth1
+ iface eth1 inet manual
+
+ auto lo
+ iface lo inet loopback
+
+source /etc/network/interfaces.d/*.cfg
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu.exceptions.txt b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu.exceptions.txt
new file mode 100644
index 00000000..764c9cb0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu.exceptions.txt
@@ -0,0 +1,8 @@
+[1] fail_json message: Error: interface eth0 not found
+options:
+{
+ "iface": "eth0",
+ "option": "mtu",
+ "state": "present",
+ "value": "1350"
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu.json b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu.json
new file mode 100644
index 00000000..9e97da32
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu.json
@@ -0,0 +1,109 @@
+{
+ "agge": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "aggi": {
+ "address": "10.44.15.196",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "down": [],
+ "hwaddress": "ether 22:44:77:88:D5:96",
+ "method": "static",
+ "mtu": "1500",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K aggi tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "int1 int2",
+ "up": []
+ },
+ "br0": {
+ "address": "188.44.133.76",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "bridge_ports": "agge",
+ "down": [],
+ "gateway": "188.44.133.75",
+ "hwaddress": "ether 22:44:77:88:D5:98",
+ "method": "static",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K agge tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "ext1 ext2",
+ "up": [
+ "route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi",
+ "route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi",
+ "route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi"
+ ]
+ },
+ "eth1": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext1": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext2": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int1": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int2": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_slaves b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_slaves
new file mode 100644
index 00000000..e2b78e93
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_slaves
@@ -0,0 +1,61 @@
+ auto aggi
+ iface aggi inet static
+ hwaddress ether 22:44:77:88:D5:96
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1500
+ slaves int1 int3
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K aggi tx off tso off
+
+ auto agge
+ iface agge inet manual
+
+ auto br0
+ iface br0 inet static
+ bridge_ports agge
+ hwaddress ether 22:44:77:88:D5:98
+ address 188.44.133.76
+ netmask 255.255.255.248
+ gateway 188.44.133.75
+ slaves ext1 ext2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K agge tx off tso off
+
+ up route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi
+ up route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi
+ up route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi
+
+ auto int1
+ iface int1 inet manual
+ bond-master aggi
+
+ auto int2
+ iface int2 inet manual
+ bond-master aggi
+
+ auto ext1
+ iface ext1 inet manual
+ bond-master agge
+
+ auto ext2
+ iface ext2 inet manual
+ bond-master agge
+
+ auto eth1
+ iface eth1 inet manual
+
+ auto lo
+ iface lo inet loopback
+
+source /etc/network/interfaces.d/*.cfg
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_slaves.exceptions.txt b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_slaves.exceptions.txt
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_slaves.exceptions.txt
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_slaves.json b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_slaves.json
new file mode 100644
index 00000000..9e97da32
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_slaves.json
@@ -0,0 +1,109 @@
+{
+ "agge": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "aggi": {
+ "address": "10.44.15.196",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "down": [],
+ "hwaddress": "ether 22:44:77:88:D5:96",
+ "method": "static",
+ "mtu": "1500",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K aggi tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "int1 int2",
+ "up": []
+ },
+ "br0": {
+ "address": "188.44.133.76",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "bridge_ports": "agge",
+ "down": [],
+ "gateway": "188.44.133.75",
+ "hwaddress": "ether 22:44:77:88:D5:98",
+ "method": "static",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K agge tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "ext1 ext2",
+ "up": [
+ "route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi",
+ "route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi",
+ "route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi"
+ ]
+ },
+ "eth1": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext1": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext2": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int1": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int2": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+} \ No newline at end of file
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/input/address_family b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/input/address_family
new file mode 100644
index 00000000..bc4ecea7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/input/address_family
@@ -0,0 +1,12 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet static
+ address 192.168.0.1
+ post-up echo configuring ipv4
+
+iface eth0 inet6 static
+ address fc00::1
+ post-up echo configuring ipv6
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/input/default_dhcp b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/input/default_dhcp
new file mode 100644
index 00000000..bd4522ec
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/input/default_dhcp
@@ -0,0 +1,6 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet dhcp
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/input/servers.com b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/input/servers.com
new file mode 100644
index 00000000..c826bbe7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/fixtures/input/servers.com
@@ -0,0 +1,61 @@
+ auto aggi
+ iface aggi inet static
+ hwaddress ether 22:44:77:88:D5:96
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1500
+ slaves int1 int2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K aggi tx off tso off
+
+ auto agge
+ iface agge inet manual
+
+ auto br0
+ iface br0 inet static
+ bridge_ports agge
+ hwaddress ether 22:44:77:88:D5:98
+ address 188.44.133.76
+ netmask 255.255.255.248
+ gateway 188.44.133.75
+ slaves ext1 ext2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K agge tx off tso off
+
+ up route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi
+ up route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi
+ up route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi
+
+ auto int1
+ iface int1 inet manual
+ bond-master aggi
+
+ auto int2
+ iface int2 inet manual
+ bond-master aggi
+
+ auto ext1
+ iface ext1 inet manual
+ bond-master agge
+
+ auto ext2
+ iface ext2 inet manual
+ bond-master agge
+
+ auto eth1
+ iface eth1 inet manual
+
+ auto lo
+ iface lo inet loopback
+
+source /etc/network/interfaces.d/*.cfg
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/test_interfaces_file.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/test_interfaces_file.py
new file mode 100644
index 00000000..a96737c7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/interfaces_file/test_interfaces_file.py
@@ -0,0 +1,320 @@
+# (c) 2017, Roman Belyakovsky <ihryamzik () gmail.com>
+#
+# This file is part of Ansible
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.plugins.modules.system import interfaces_file
+from shutil import copyfile, move
+import difflib
+import inspect
+import io
+import json
+import os
+import re
+import shutil
+import tempfile
+
+
+class AnsibleFailJson(Exception):
+ pass
+
+
+class ModuleMocked():
+ def atomic_move(self, src, dst):
+ move(src, dst)
+
+ def backup_local(self, path):
+ backupp = os.path.join("/tmp", os.path.basename(path) + ".bak")
+ copyfile(path, backupp)
+ return backupp
+
+ def fail_json(self, msg):
+ raise AnsibleFailJson(msg)
+
+
+module = ModuleMocked()
+fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures', 'input')
+golden_output_path = os.path.join(os.path.dirname(__file__), 'fixtures', 'golden_output')
+
+
+class TestInterfacesFileModule(unittest.TestCase):
+ def getTestFiles(self, include_filter=None, exclude_filter=None):
+ flist = next(os.walk(fixture_path))[2]
+ if include_filter:
+ flist = filter(lambda x: re.match(include_filter, x), flist)
+ if exclude_filter:
+ flist = filter(lambda x: not re.match(exclude_filter, x), flist)
+ return flist
+
+ def compareFileToBackup(self, path, backup):
+ with open(path) as f1:
+ with open(backup) as f2:
+ diffs = difflib.context_diff(f1.readlines(),
+ f2.readlines(),
+ fromfile=os.path.basename(path),
+ tofile=os.path.basename(backup))
+ # Restore backup
+ move(backup, path)
+ deltas = [d for d in diffs]
+ self.assertTrue(len(deltas) == 0)
+
+ def compareInterfacesLinesToFile(self, interfaces_lines, path, testname=None):
+ if not testname:
+ testname = "%s.%s" % (path, inspect.stack()[1][3])
+ self.compareStringWithFile("".join([d['line'] for d in interfaces_lines if 'line' in d]), testname)
+
+ def compareInterfacesToFile(self, ifaces, path, testname=None):
+ if not testname:
+ testname = "%s.%s.json" % (path, inspect.stack()[1][3])
+ self.compareStringWithFile(json.dumps(ifaces, sort_keys=True, indent=4, separators=(',', ': ')), testname)
+
+ def compareStringWithFile(self, string, path):
+ # self.assertEqual("","_",msg=path)
+ testfilepath = os.path.join(golden_output_path, path)
+ goldenstring = string
+ if not os.path.isfile(testfilepath):
+ f = io.open(testfilepath, 'wb')
+ f.write(string)
+ f.close()
+ else:
+ with open(testfilepath, 'r') as goldenfile:
+ goldenstring = goldenfile.read()
+ goldenfile.close()
+ self.assertEqual(string, goldenstring)
+
+ def test_no_changes(self):
+ for testfile in self.getTestFiles():
+ path = os.path.join(fixture_path, testfile)
+ lines, ifaces = interfaces_file.read_interfaces_file(module, path)
+ self.compareInterfacesLinesToFile(lines, testfile)
+ self.compareInterfacesToFile(ifaces, testfile)
+
+ def test_add_up_aoption_to_aggi(self):
+ testcases = {
+ "add_aggi_up": [
+ {
+ 'iface': 'aggi',
+ 'option': 'up',
+ 'value': 'route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi',
+ 'state': 'present',
+ }
+ ],
+ "add_and_delete_aggi_up": [
+ {
+ 'iface': 'aggi',
+ 'option': 'up',
+ 'value': 'route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi',
+ 'state': 'present',
+ },
+ {
+ 'iface': 'aggi',
+ 'option': 'up',
+ 'value': None,
+ 'state': 'absent',
+ },
+ ],
+ "set_aggi_slaves": [
+ {
+ 'iface': 'aggi',
+ 'option': 'slaves',
+ 'value': 'int1 int3',
+ 'state': 'present',
+ },
+ ],
+ "set_aggi_and_eth0_mtu": [
+ {
+ 'iface': 'aggi',
+ 'option': 'mtu',
+ 'value': '1350',
+ 'state': 'present',
+ },
+ {
+ 'iface': 'eth0',
+ 'option': 'mtu',
+ 'value': '1350',
+ 'state': 'present',
+ },
+ ],
+ }
+ for testname, options_list in testcases.items():
+ for testfile in self.getTestFiles():
+ path = os.path.join(fixture_path, testfile)
+ lines, ifaces = interfaces_file.read_interfaces_file(module, path)
+ fail_json_iterations = []
+ for i, options in enumerate(options_list):
+ try:
+ dummy, lines = interfaces_file.setInterfaceOption(module, lines, options['iface'], options['option'],
+ options['value'], options['state'])
+ except AnsibleFailJson as e:
+ fail_json_iterations.append("[%d] fail_json message: %s\noptions:\n%s" %
+ (i, str(e), json.dumps(options, sort_keys=True, indent=4, separators=(',', ': '))))
+ self.compareStringWithFile("\n=====\n".join(fail_json_iterations), "%s_%s.exceptions.txt" % (testfile, testname))
+
+ self.compareInterfacesLinesToFile(lines, testfile, "%s_%s" % (testfile, testname))
+ self.compareInterfacesToFile(ifaces, testfile, "%s_%s.json" % (testfile, testname))
+
+ def test_revert(self):
+ testcases = {
+ "revert": [
+ {
+ 'iface': 'eth0',
+ 'option': 'mtu',
+ 'value': '1350',
+ }
+ ],
+ }
+ for testname, options_list in testcases.items():
+ for testfile in self.getTestFiles():
+ with tempfile.NamedTemporaryFile() as temp_file:
+ src_path = os.path.join(fixture_path, testfile)
+ path = temp_file.name
+ shutil.copy(src_path, path)
+ lines, ifaces = interfaces_file.read_interfaces_file(module, path)
+ backupp = module.backup_local(path)
+ options = options_list[0]
+ for state in ['present', 'absent']:
+ fail_json_iterations = []
+ options['state'] = state
+ try:
+ dummy, lines = interfaces_file.setInterfaceOption(module, lines,
+ options['iface'], options['option'], options['value'], options['state'])
+ except AnsibleFailJson as e:
+ fail_json_iterations.append("fail_json message: %s\noptions:\n%s" %
+ (str(e), json.dumps(options, sort_keys=True, indent=4, separators=(',', ': '))))
+ interfaces_file.write_changes(module, [d['line'] for d in lines if 'line' in d], path)
+
+ self.compareStringWithFile("\n=====\n".join(fail_json_iterations), "%s_%s.exceptions.txt" % (testfile, testname))
+
+ self.compareInterfacesLinesToFile(lines, testfile, "%s_%s" % (testfile, testname))
+ self.compareInterfacesToFile(ifaces, testfile, "%s_%s.json" % (testfile, testname))
+ self.compareFileToBackup(path, backupp)
+
+ def test_change_method(self):
+ testcases = {
+ "change_method": [
+ {
+ 'iface': 'eth1',
+ 'option': 'method',
+ 'value': 'dhcp',
+ 'state': 'present',
+ }
+ ],
+ }
+ for testname, options_list in testcases.items():
+ for testfile in self.getTestFiles():
+ with tempfile.NamedTemporaryFile() as temp_file:
+ src_path = os.path.join(fixture_path, testfile)
+ path = temp_file.name
+ shutil.copy(src_path, path)
+ lines, ifaces = interfaces_file.read_interfaces_file(module, path)
+ backupp = module.backup_local(path)
+ options = options_list[0]
+ fail_json_iterations = []
+ try:
+ changed, lines = interfaces_file.setInterfaceOption(module, lines, options['iface'], options['option'],
+ options['value'], options['state'])
+ # When a changed is made try running it again for proper idempotency
+ if changed:
+ changed_again, lines = interfaces_file.setInterfaceOption(module, lines, options['iface'],
+ options['option'], options['value'], options['state'])
+ self.assertFalse(changed_again,
+ msg='Second request for change should return false for {0} running on {1}'.format(testname,
+ testfile))
+ except AnsibleFailJson as e:
+ fail_json_iterations.append("fail_json message: %s\noptions:\n%s" %
+ (str(e), json.dumps(options, sort_keys=True, indent=4, separators=(',', ': '))))
+ interfaces_file.write_changes(module, [d['line'] for d in lines if 'line' in d], path)
+
+ self.compareStringWithFile("\n=====\n".join(fail_json_iterations), "%s_%s.exceptions.txt" % (testfile, testname))
+
+ self.compareInterfacesLinesToFile(lines, testfile, "%s_%s" % (testfile, testname))
+ self.compareInterfacesToFile(ifaces, testfile, "%s_%s.json" % (testfile, testname))
+ # Restore backup
+ move(backupp, path)
+
+ def test_inet_inet6(self):
+ testcases = {
+ "change_ipv4": [
+ {
+ 'iface': 'eth0',
+ 'address_family': 'inet',
+ 'option': 'address',
+ 'value': '192.168.0.42',
+ 'state': 'present',
+ }
+ ],
+ "change_ipv6": [
+ {
+ 'iface': 'eth0',
+ 'address_family': 'inet6',
+ 'option': 'address',
+ 'value': 'fc00::42',
+ 'state': 'present',
+ }
+ ],
+ "change_ipv4_pre_up": [
+ {
+ 'iface': 'eth0',
+ 'address_family': 'inet',
+ 'option': 'pre-up',
+ 'value': 'XXXX_ipv4',
+ 'state': 'present',
+ }
+ ],
+ "change_ipv6_pre_up": [
+ {
+ 'iface': 'eth0',
+ 'address_family': 'inet6',
+ 'option': 'pre-up',
+ 'value': 'XXXX_ipv6',
+ 'state': 'present',
+ }
+ ],
+ "change_ipv4_post_up": [
+ {
+ 'iface': 'eth0',
+ 'address_family': 'inet',
+ 'option': 'post-up',
+ 'value': 'XXXX_ipv4',
+ 'state': 'present',
+ }
+ ],
+ "change_ipv6_post_up": [
+ {
+ 'iface': 'eth0',
+ 'address_family': 'inet6',
+ 'option': 'post-up',
+ 'value': 'XXXX_ipv6',
+ 'state': 'present',
+ }
+ ],
+ }
+ for testname, options_list in testcases.items():
+ for testfile in self.getTestFiles():
+ with tempfile.NamedTemporaryFile() as temp_file:
+ src_path = os.path.join(fixture_path, testfile)
+ path = temp_file.name
+ shutil.copy(src_path, path)
+ lines, ifaces = interfaces_file.read_interfaces_file(module, path)
+ backupp = module.backup_local(path)
+ options = options_list[0]
+ fail_json_iterations = []
+ try:
+ dummy, lines = interfaces_file.setInterfaceOption(module, lines, options['iface'], options['option'],
+ options['value'], options['state'], options['address_family'])
+ except AnsibleFailJson as e:
+ fail_json_iterations.append("fail_json message: %s\noptions:\n%s" %
+ (str(e), json.dumps(options, sort_keys=True, indent=4, separators=(',', ': '))))
+ interfaces_file.write_changes(module, [d['line'] for d in lines if 'line' in d], path)
+
+ self.compareStringWithFile("\n=====\n".join(fail_json_iterations), "%s_%s.exceptions.txt" % (testfile, testname))
+
+ self.compareInterfacesLinesToFile(lines, testfile, "%s_%s" % (testfile, testname))
+ self.compareInterfacesToFile(ifaces, testfile, "%s_%s.json" % (testfile, testname))
+ # Restore backup
+ move(backupp, path)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/test_java_keystore.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/test_java_keystore.py
new file mode 100644
index 00000000..c2f3421c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/test_java_keystore.py
@@ -0,0 +1,297 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018, Ansible Project
+# Copyright (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import ModuleTestCase, set_module_args
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible_collections.community.general.tests.unit.compat.mock import Mock
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.modules.system.java_keystore import create_jks, cert_changed, ArgumentSpec
+
+
+class TestCreateJavaKeystore(ModuleTestCase):
+ """Test the creation of a Java keystore."""
+
+ def setUp(self):
+ """Setup."""
+ super(TestCreateJavaKeystore, self).setUp()
+
+ orig_exists = os.path.exists
+ self.spec = ArgumentSpec()
+ self.mock_create_file = patch('ansible_collections.community.general.plugins.modules.system.java_keystore.create_file',
+ side_effect=lambda path, content: path)
+ self.mock_run_commands = patch('ansible_collections.community.general.plugins.modules.system.java_keystore.run_commands')
+ self.mock_os_path_exists = patch('os.path.exists',
+ side_effect=lambda path: True if path == '/path/to/keystore.jks' else orig_exists(path))
+ self.mock_selinux_context = patch('ansible.module_utils.basic.AnsibleModule.selinux_context',
+ side_effect=lambda path: ['unconfined_u', 'object_r', 'user_home_t', 's0'])
+ self.mock_is_special_selinux_path = patch('ansible.module_utils.basic.AnsibleModule.is_special_selinux_path',
+ side_effect=lambda path: (False, None))
+ self.run_commands = self.mock_run_commands.start()
+ self.create_file = self.mock_create_file.start()
+ self.selinux_context = self.mock_selinux_context.start()
+ self.is_special_selinux_path = self.mock_is_special_selinux_path.start()
+ self.os_path_exists = self.mock_os_path_exists.start()
+
+ def tearDown(self):
+ """Teardown."""
+ super(TestCreateJavaKeystore, self).tearDown()
+ self.mock_create_file.stop()
+ self.mock_run_commands.stop()
+ self.mock_selinux_context.stop()
+ self.mock_is_special_selinux_path.stop()
+ self.mock_os_path_exists.stop()
+
+ def test_create_jks_success(self):
+ set_module_args(dict(
+ certificate='cert-foo',
+ private_key='private-foo',
+ dest='/path/to/keystore.jks',
+ name='foo',
+ password='changeit'
+ ))
+
+ module = AnsibleModule(
+ argument_spec=self.spec.argument_spec,
+ supports_check_mode=self.spec.supports_check_mode
+ )
+
+ module.exit_json = Mock()
+
+ with patch('os.remove', return_value=True):
+ self.run_commands.side_effect = lambda module, cmd, data: (0, '', '')
+ create_jks(module, "test", "openssl", "keytool", "/path/to/keystore.jks", "changeit", "")
+ module.exit_json.assert_called_once_with(
+ changed=True,
+ cmd=["keytool", "-importkeystore",
+ "-destkeystore", "/path/to/keystore.jks",
+ "-srckeystore", "/tmp/keystore.p12", "-srcstoretype", "pkcs12", "-alias", "test",
+ "-deststorepass", "changeit", "-srcstorepass", "changeit", "-noprompt"],
+ msg='',
+ rc=0,
+ stdout_lines=''
+ )
+
+ def test_create_jks_keypass_fail_export_pkcs12(self):
+ set_module_args(dict(
+ certificate='cert-foo',
+ private_key='private-foo',
+ private_key_passphrase='passphrase-foo',
+ dest='/path/to/keystore.jks',
+ name='foo',
+ password='changeit'
+ ))
+
+ module = AnsibleModule(
+ argument_spec=self.spec.argument_spec,
+ supports_check_mode=self.spec.supports_check_mode
+ )
+
+ module.fail_json = Mock()
+
+ with patch('os.remove', return_value=True):
+ self.run_commands.side_effect = [(1, '', ''), (0, '', '')]
+ create_jks(module, "test", "openssl", "keytool", "/path/to/keystore.jks", "changeit", "passphrase-foo")
+ module.fail_json.assert_called_once_with(
+ cmd=["openssl", "pkcs12", "-export", "-name", "test",
+ "-in", "/tmp/foo.crt", "-inkey", "/tmp/foo.key",
+ "-out", "/tmp/keystore.p12",
+ "-passout", "stdin",
+ "-passin", "stdin"],
+ msg='',
+ rc=1
+ )
+
+ def test_create_jks_fail_export_pkcs12(self):
+ set_module_args(dict(
+ certificate='cert-foo',
+ private_key='private-foo',
+ dest='/path/to/keystore.jks',
+ name='foo',
+ password='changeit'
+ ))
+
+ module = AnsibleModule(
+ argument_spec=self.spec.argument_spec,
+ supports_check_mode=self.spec.supports_check_mode
+ )
+
+ module.fail_json = Mock()
+
+ with patch('os.remove', return_value=True):
+ self.run_commands.side_effect = [(1, '', ''), (0, '', '')]
+ create_jks(module, "test", "openssl", "keytool", "/path/to/keystore.jks", "changeit", "")
+ module.fail_json.assert_called_once_with(
+ cmd=["openssl", "pkcs12", "-export", "-name", "test",
+ "-in", "/tmp/foo.crt", "-inkey", "/tmp/foo.key",
+ "-out", "/tmp/keystore.p12",
+ "-passout", "stdin"],
+ msg='',
+ rc=1
+ )
+
+ def test_create_jks_fail_import_key(self):
+ set_module_args(dict(
+ certificate='cert-foo',
+ private_key='private-foo',
+ dest='/path/to/keystore.jks',
+ name='foo',
+ password='changeit'
+ ))
+
+ module = AnsibleModule(
+ argument_spec=self.spec.argument_spec,
+ supports_check_mode=self.spec.supports_check_mode
+ )
+
+ module.fail_json = Mock()
+
+ with patch('os.remove', return_value=True):
+ self.run_commands.side_effect = [(0, '', ''), (1, '', '')]
+ create_jks(module, "test", "openssl", "keytool", "/path/to/keystore.jks", "changeit", "")
+ module.fail_json.assert_called_once_with(
+ cmd=["keytool", "-importkeystore",
+ "-destkeystore", "/path/to/keystore.jks",
+ "-srckeystore", "/tmp/keystore.p12", "-srcstoretype", "pkcs12", "-alias", "test",
+ "-deststorepass", "changeit", "-srcstorepass", "changeit", "-noprompt"],
+ msg='',
+ rc=1
+ )
+
+
+class TestCertChanged(ModuleTestCase):
+ """Test if the cert has changed."""
+
+ def setUp(self):
+ """Setup."""
+ super(TestCertChanged, self).setUp()
+ self.spec = ArgumentSpec()
+ self.mock_create_file = patch('ansible_collections.community.general.plugins.modules.system.java_keystore.create_file',
+ side_effect=lambda path, content: path)
+ self.mock_run_commands = patch('ansible_collections.community.general.plugins.modules.system.java_keystore.run_commands')
+ self.run_commands = self.mock_run_commands.start()
+ self.create_file = self.mock_create_file.start()
+
+ def tearDown(self):
+ """Teardown."""
+ super(TestCertChanged, self).tearDown()
+ self.mock_create_file.stop()
+ self.mock_run_commands.stop()
+
+ def test_cert_unchanged_same_fingerprint(self):
+ set_module_args(dict(
+ certificate='cert-foo',
+ private_key='private-foo',
+ dest='/path/to/keystore.jks',
+ name='foo',
+ password='changeit'
+ ))
+
+ module = AnsibleModule(
+ argument_spec=self.spec.argument_spec,
+ supports_check_mode=self.spec.supports_check_mode
+ )
+
+ with patch('os.remove', return_value=True):
+ self.run_commands.side_effect = [(0, 'foo=abcd:1234:efgh', ''), (0, 'SHA256: abcd:1234:efgh', '')]
+ result = cert_changed(module, "openssl", "keytool", "/path/to/keystore.jks", "changeit", 'foo')
+ self.assertFalse(result, 'Fingerprint is identical')
+
+ def test_cert_changed_fingerprint_mismatch(self):
+ set_module_args(dict(
+ certificate='cert-foo',
+ private_key='private-foo',
+ dest='/path/to/keystore.jks',
+ name='foo',
+ password='changeit'
+ ))
+
+ module = AnsibleModule(
+ argument_spec=self.spec.argument_spec,
+ supports_check_mode=self.spec.supports_check_mode
+ )
+
+ with patch('os.remove', return_value=True):
+ self.run_commands.side_effect = [(0, 'foo=abcd:1234:efgh', ''), (0, 'SHA256: wxyz:9876:stuv', '')]
+ result = cert_changed(module, "openssl", "keytool", "/path/to/keystore.jks", "changeit", 'foo')
+ self.assertTrue(result, 'Fingerprint mismatch')
+
+ def test_cert_changed_alias_does_not_exist(self):
+ set_module_args(dict(
+ certificate='cert-foo',
+ private_key='private-foo',
+ dest='/path/to/keystore.jks',
+ name='foo',
+ password='changeit'
+ ))
+
+ module = AnsibleModule(
+ argument_spec=self.spec.argument_spec,
+ supports_check_mode=self.spec.supports_check_mode
+ )
+
+ with patch('os.remove', return_value=True):
+ self.run_commands.side_effect = [(0, 'foo=abcd:1234:efgh', ''),
+ (1, 'keytool error: java.lang.Exception: Alias <foo> does not exist', '')]
+ result = cert_changed(module, "openssl", "keytool", "/path/to/keystore.jks", "changeit", 'foo')
+ self.assertTrue(result, 'Certificate does not exist')
+
+ def test_cert_changed_fail_read_cert(self):
+ set_module_args(dict(
+ certificate='cert-foo',
+ private_key='private-foo',
+ dest='/path/to/keystore.jks',
+ name='foo',
+ password='changeit'
+ ))
+
+ module = AnsibleModule(
+ argument_spec=self.spec.argument_spec,
+ supports_check_mode=self.spec.supports_check_mode
+ )
+
+ module.fail_json = Mock()
+
+ with patch('os.remove', return_value=True):
+ self.run_commands.side_effect = [(1, '', 'Oops'), (0, 'SHA256: wxyz:9876:stuv', '')]
+ cert_changed(module, "openssl", "keytool", "/path/to/keystore.jks", "changeit", 'foo')
+ module.fail_json.assert_called_once_with(
+ cmd=["openssl", "x509", "-noout", "-in", "/tmp/foo.crt", "-fingerprint", "-sha256"],
+ msg='',
+ err='Oops',
+ rc=1
+ )
+
+ def test_cert_changed_fail_read_keystore(self):
+ set_module_args(dict(
+ certificate='cert-foo',
+ private_key='private-foo',
+ dest='/path/to/keystore.jks',
+ name='foo',
+ password='changeit'
+ ))
+
+ module = AnsibleModule(
+ argument_spec=self.spec.argument_spec,
+ supports_check_mode=self.spec.supports_check_mode
+ )
+
+ module.fail_json = Mock(return_value=True)
+
+ with patch('os.remove', return_value=True):
+ self.run_commands.side_effect = [(0, 'foo: wxyz:9876:stuv', ''), (1, '', 'Oops')]
+ cert_changed(module, "openssl", "keytool", "/path/to/keystore.jks", "changeit", 'foo')
+ module.fail_json.assert_called_with(
+ cmd=["keytool", "-list", "-alias", "foo", "-keystore", "/path/to/keystore.jks", "-storepass", "changeit", "-v"],
+ msg='',
+ err='Oops',
+ rc=1
+ )
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/test_pamd.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/test_pamd.py
new file mode 100644
index 00000000..e7a68835
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/test_pamd.py
@@ -0,0 +1,376 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+
+from ansible_collections.community.general.plugins.modules.system.pamd import PamdRule
+from ansible_collections.community.general.plugins.modules.system.pamd import PamdLine
+from ansible_collections.community.general.plugins.modules.system.pamd import PamdComment
+from ansible_collections.community.general.plugins.modules.system.pamd import PamdInclude
+from ansible_collections.community.general.plugins.modules.system.pamd import PamdService
+
+
+class PamdLineTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.pamd_line = PamdLine("This is a test")
+
+ def test_line(self):
+ self.assertEqual("This is a test", str(self.pamd_line))
+
+ def test_matches(self):
+ self.assertFalse(self.pamd_line.matches("test", "matches", "foo", "bar"))
+
+
+class PamdIncludeTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.good_include = PamdInclude("@include foobar")
+ self.bad_include = PamdInclude("include foobar")
+
+ def test_line(self):
+ self.assertEqual("@include foobar", str(self.good_include))
+
+ def test_matches(self):
+ self.assertFalse(self.good_include.matches("something", "something", "dark", "side"))
+
+ def test_valid(self):
+ self.assertTrue(self.good_include.is_valid)
+ self.assertFalse(self.bad_include.is_valid)
+
+
+class PamdCommentTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.good_comment = PamdComment("# This is a test comment")
+ self.bad_comment = PamdComment("This is a bad test comment")
+
+ def test_line(self):
+ self.assertEqual("# This is a test comment", str(self.good_comment))
+
+ def test_matches(self):
+ self.assertFalse(self.good_comment.matches("test", "matches", "foo", "bar"))
+
+ def test_valid(self):
+ self.assertTrue(self.good_comment.is_valid)
+ self.assertFalse(self.bad_comment.is_valid)
+
+
+class PamdRuleTestCase(unittest.TestCase):
+ def setUp(self):
+ self.rule = PamdRule('account', 'optional', 'pam_keyinit.so', 'revoke')
+
+ def test_type(self):
+ self.assertEqual(self.rule.rule_type, 'account')
+
+ def test_control(self):
+ self.assertEqual(self.rule.rule_control, 'optional')
+ self.assertEqual(self.rule._control, 'optional')
+
+ def test_path(self):
+ self.assertEqual(self.rule.rule_path, 'pam_keyinit.so')
+
+ def test_args(self):
+ self.assertEqual(self.rule.rule_args, ['revoke'])
+
+ def test_valid(self):
+ self.assertTrue(self.rule.validate()[0])
+
+
+class PamdRuleBadValidationTestCase(unittest.TestCase):
+ def setUp(self):
+ self.bad_type = PamdRule('foobar', 'optional', 'pam_keyinit.so', 'revoke')
+ self.bad_control_simple = PamdRule('account', 'foobar', 'pam_keyinit.so', 'revoke')
+ self.bad_control_value = PamdRule('account', '[foobar=1 default=ignore]', 'pam_keyinit.so', 'revoke')
+ self.bad_control_action = PamdRule('account', '[success=1 default=foobar]', 'pam_keyinit.so', 'revoke')
+
+ def test_validate_bad_type(self):
+ self.assertFalse(self.bad_type.validate()[0])
+
+ def test_validate_bad_control_simple(self):
+ self.assertFalse(self.bad_control_simple.validate()[0])
+
+ def test_validate_bad_control_value(self):
+ self.assertFalse(self.bad_control_value.validate()[0])
+
+ def test_validate_bad_control_action(self):
+ self.assertFalse(self.bad_control_action.validate()[0])
+
+
+class PamdServiceTestCase(unittest.TestCase):
+ def setUp(self):
+ self.system_auth_string = """#%PAM-1.0
+# This file is auto-generated.
+# User changes will be destroyed the next time authconfig is run.
+@include common-auth
+@include common-account
+@include common-session
+auth required pam_env.so
+auth sufficient pam_unix.so nullok try_first_pass
+auth requisite pam_succeed_if.so uid
+auth required pam_deny.so
+# Test comment
+auth sufficient pam_rootok.so
+
+account required pam_unix.so
+account sufficient pam_localuser.so
+account sufficient pam_succeed_if.so uid
+account [success=1 default=ignore] \
+ pam_succeed_if.so user = vagrant use_uid quiet
+account required pam_permit.so
+account required pam_access.so listsep=,
+session include system-auth
+
+password requisite pam_pwquality.so try_first_pass local_users_only retry=3 authtok_type=
+password sufficient pam_unix.so sha512 shadow nullok try_first_pass use_authtok
+password required pam_deny.so
+
+session optional pam_keyinit.so revoke
+session required pam_limits.so
+-session optional pam_systemd.so
+session [success=1 default=ignore] pam_succeed_if.so service in crond quiet use_uid
+session [success=1 test=me default=ignore] pam_succeed_if.so service in crond quiet use_uid
+session required pam_unix.so"""
+
+ self.simple_system_auth_string = """#%PAM-1.0
+ auth required pam_env.so
+"""
+
+ self.no_header_system_auth_string = """auth required pam_env.so
+auth sufficient pam_unix.so nullok try_first_pass
+auth requisite pam_succeed_if.so uid
+auth required pam_deny.so
+"""
+
+ self.pamd = PamdService(self.system_auth_string)
+
+ def test_properly_parsed(self):
+ num_lines = len(self.system_auth_string.splitlines()) + 1
+ num_lines_processed = len(str(self.pamd).splitlines())
+ self.assertEqual(num_lines, num_lines_processed)
+
+ def test_has_rule(self):
+ self.assertTrue(self.pamd.has_rule('account', 'required', 'pam_permit.so'))
+ self.assertTrue(self.pamd.has_rule('account', '[success=1 default=ignore]', 'pam_succeed_if.so'))
+
+ def test_doesnt_have_rule(self):
+ self.assertFalse(self.pamd.has_rule('account', 'requisite', 'pam_permit.so'))
+
+ # Test Update
+ def test_update_rule_type(self):
+ self.assertTrue(self.pamd.update_rule('session', 'optional', 'pam_keyinit.so', new_type='account'))
+ self.assertTrue(self.pamd.has_rule('account', 'optional', 'pam_keyinit.so'))
+ test_rule = PamdRule('account', 'optional', 'pam_keyinit.so', 'revoke')
+ self.assertIn(str(test_rule), str(self.pamd))
+
+ def test_update_rule_that_doesnt_exist(self):
+ self.assertFalse(self.pamd.update_rule('blah', 'blah', 'blah', new_type='account'))
+ self.assertFalse(self.pamd.has_rule('blah', 'blah', 'blah'))
+ test_rule = PamdRule('blah', 'blah', 'blah', 'account')
+ self.assertNotIn(str(test_rule), str(self.pamd))
+
+ def test_update_rule_type_two(self):
+ self.assertTrue(self.pamd.update_rule('session', '[success=1 default=ignore]', 'pam_succeed_if.so', new_type='account'))
+ self.assertTrue(self.pamd.has_rule('account', '[success=1 default=ignore]', 'pam_succeed_if.so'))
+ test_rule = PamdRule('account', '[success=1 default=ignore]', 'pam_succeed_if.so')
+ self.assertIn(str(test_rule), str(self.pamd))
+
+ def test_update_rule_control_simple(self):
+ self.assertTrue(self.pamd.update_rule('session', 'optional', 'pam_keyinit.so', new_control='required'))
+ self.assertTrue(self.pamd.has_rule('session', 'required', 'pam_keyinit.so'))
+ test_rule = PamdRule('session', 'required', 'pam_keyinit.so')
+ self.assertIn(str(test_rule), str(self.pamd))
+
+ def test_update_rule_control_complex(self):
+ self.assertTrue(self.pamd.update_rule('session',
+ '[success=1 default=ignore]',
+ 'pam_succeed_if.so',
+ new_control='[success=2 test=me default=ignore]'))
+ self.assertTrue(self.pamd.has_rule('session', '[success=2 test=me default=ignore]', 'pam_succeed_if.so'))
+ test_rule = PamdRule('session', '[success=2 test=me default=ignore]', 'pam_succeed_if.so')
+ self.assertIn(str(test_rule), str(self.pamd))
+
+ def test_update_rule_control_more_complex(self):
+
+ self.assertTrue(self.pamd.update_rule('session',
+ '[success=1 test=me default=ignore]',
+ 'pam_succeed_if.so',
+ new_control='[success=2 test=me default=ignore]'))
+ self.assertTrue(self.pamd.has_rule('session', '[success=2 test=me default=ignore]', 'pam_succeed_if.so'))
+ test_rule = PamdRule('session', '[success=2 test=me default=ignore]', 'pam_succeed_if.so')
+ self.assertIn(str(test_rule), str(self.pamd))
+
+ def test_update_rule_module_path(self):
+ self.assertTrue(self.pamd.update_rule('auth', 'required', 'pam_env.so', new_path='pam_limits.so'))
+ self.assertTrue(self.pamd.has_rule('auth', 'required', 'pam_limits.so'))
+
+ def test_update_rule_module_path_slash(self):
+ self.assertTrue(self.pamd.update_rule('auth', 'required', 'pam_env.so', new_path='/lib64/security/pam_duo.so'))
+ self.assertTrue(self.pamd.has_rule('auth', 'required', '/lib64/security/pam_duo.so'))
+
+ def test_update_rule_module_args(self):
+ self.assertTrue(self.pamd.update_rule('auth', 'sufficient', 'pam_unix.so', new_args='uid uid'))
+ test_rule = PamdRule('auth', 'sufficient', 'pam_unix.so', 'uid uid')
+ self.assertIn(str(test_rule), str(self.pamd))
+
+ test_rule = PamdRule('auth', 'sufficient', 'pam_unix.so', 'nullok try_first_pass')
+ self.assertNotIn(str(test_rule), str(self.pamd))
+
+ def test_update_first_three(self):
+ self.assertTrue(self.pamd.update_rule('auth', 'required', 'pam_env.so',
+ new_type='one', new_control='two', new_path='three'))
+ self.assertTrue(self.pamd.has_rule('one', 'two', 'three'))
+
+ def test_update_first_three_with_module_args(self):
+ self.assertTrue(self.pamd.update_rule('auth', 'sufficient', 'pam_unix.so',
+ new_type='one', new_control='two', new_path='three'))
+ self.assertTrue(self.pamd.has_rule('one', 'two', 'three'))
+ test_rule = PamdRule('one', 'two', 'three')
+ self.assertIn(str(test_rule), str(self.pamd))
+ self.assertIn(str(test_rule), str(self.pamd))
+
+ def test_update_all_four(self):
+ self.assertTrue(self.pamd.update_rule('auth', 'sufficient', 'pam_unix.so',
+ new_type='one', new_control='two', new_path='three',
+ new_args='four five'))
+ test_rule = PamdRule('one', 'two', 'three', 'four five')
+ self.assertIn(str(test_rule), str(self.pamd))
+
+ test_rule = PamdRule('auth', 'sufficient', 'pam_unix.so', 'nullok try_first_pass')
+ self.assertNotIn(str(test_rule), str(self.pamd))
+
+ def test_update_rule_with_slash(self):
+ self.assertTrue(self.pamd.update_rule('account', '[success=1 default=ignore]', 'pam_succeed_if.so',
+ new_type='session', new_path='pam_access.so'))
+ test_rule = PamdRule('session', '[success=1 default=ignore]', 'pam_access.so')
+ self.assertIn(str(test_rule), str(self.pamd))
+
+ # Insert Before
+ def test_insert_before_rule(self):
+
+ count = self.pamd.insert_before('account', 'required', 'pam_access.so',
+ new_type='account', new_control='required', new_path='pam_limits.so')
+ self.assertEqual(count, 1)
+
+ rules = self.pamd.get("account", "required", "pam_access.so")
+ for current_rule in rules:
+ self.assertTrue(current_rule.prev.matches("account", "required", "pam_limits.so"))
+
+ def test_insert_before_rule_where_rule_doesnt_exist(self):
+
+ count = self.pamd.insert_before('account', 'sufficient', 'pam_access.so',
+ new_type='account', new_control='required', new_path='pam_limits.so')
+ self.assertFalse(count)
+
+ def test_insert_before_rule_with_args(self):
+ self.assertTrue(self.pamd.insert_before('account', 'required', 'pam_access.so',
+ new_type='account', new_control='required', new_path='pam_limits.so',
+ new_args='uid'))
+
+ rules = self.pamd.get("account", "required", "pam_access.so")
+ for current_rule in rules:
+ self.assertTrue(current_rule.prev.matches("account", "required", "pam_limits.so", 'uid'))
+
+ def test_insert_before_rule_test_duplicates(self):
+ self.assertTrue(self.pamd.insert_before('account', 'required', 'pam_access.so',
+ new_type='account', new_control='required', new_path='pam_limits.so'))
+
+ self.pamd.insert_before('account', 'required', 'pam_access.so',
+ new_type='account', new_control='required', new_path='pam_limits.so')
+
+ rules = self.pamd.get("account", "required", "pam_access.so")
+ for current_rule in rules:
+ previous_rule = current_rule.prev
+ self.assertTrue(previous_rule.matches("account", "required", "pam_limits.so"))
+ self.assertFalse(previous_rule.prev.matches("account", "required", "pam_limits.so"))
+
+ def test_insert_before_first_rule(self):
+ self.assertTrue(self.pamd.insert_before('auth', 'required', 'pam_env.so',
+ new_type='account', new_control='required', new_path='pam_limits.so'))
+
+ def test_insert_before_first_rule_simple(self):
+ simple_service = PamdService(self.simple_system_auth_string)
+ self.assertTrue(simple_service.insert_before('auth', 'required', 'pam_env.so',
+ new_type='account', new_control='required', new_path='pam_limits.so'))
+
+ # Insert After
+ def test_insert_after_rule(self):
+ self.assertTrue(self.pamd.insert_after('account', 'required', 'pam_unix.so',
+ new_type='account', new_control='required', new_path='pam_permit.so'))
+ rules = self.pamd.get("account", "required", "pam_unix.so")
+ for current_rule in rules:
+ self.assertTrue(current_rule.next.matches("account", "required", "pam_permit.so"))
+
+ def test_insert_after_rule_with_args(self):
+ self.assertTrue(self.pamd.insert_after('account', 'required', 'pam_access.so',
+ new_type='account', new_control='required', new_path='pam_permit.so',
+ new_args='uid'))
+ rules = self.pamd.get("account", "required", "pam_access.so")
+ for current_rule in rules:
+ self.assertTrue(current_rule.next.matches("account", "required", "pam_permit.so", "uid"))
+
+ def test_insert_after_test_duplicates(self):
+ self.assertTrue(self.pamd.insert_after('account', 'required', 'pam_access.so',
+ new_type='account', new_control='required', new_path='pam_permit.so',
+ new_args='uid'))
+ self.assertFalse(self.pamd.insert_after('account', 'required', 'pam_access.so',
+ new_type='account', new_control='required', new_path='pam_permit.so',
+ new_args='uid'))
+
+ rules = self.pamd.get("account", "required", "pam_access.so")
+ for current_rule in rules:
+ self.assertTrue(current_rule.next.matches("account", "required", "pam_permit.so", "uid"))
+ self.assertFalse(current_rule.next.next.matches("account", "required", "pam_permit.so", "uid"))
+
+ def test_insert_after_rule_last_rule(self):
+ self.assertTrue(self.pamd.insert_after('session', 'required', 'pam_unix.so',
+ new_type='account', new_control='required', new_path='pam_permit.so',
+ new_args='uid'))
+ rules = self.pamd.get("session", "required", "pam_unix.so")
+ for current_rule in rules:
+ self.assertTrue(current_rule.next.matches("account", "required", "pam_permit.so", "uid"))
+
+ # Remove Module Arguments
+ def test_remove_module_arguments_one(self):
+ self.assertTrue(self.pamd.remove_module_arguments('auth', 'sufficient', 'pam_unix.so', 'nullok'))
+
+ def test_remove_module_arguments_one_list(self):
+ self.assertTrue(self.pamd.remove_module_arguments('auth', 'sufficient', 'pam_unix.so', ['nullok']))
+
+ def test_remove_module_arguments_two(self):
+ self.assertTrue(self.pamd.remove_module_arguments('session', '[success=1 default=ignore]', 'pam_succeed_if.so', 'service crond'))
+
+ def test_remove_module_arguments_two_list(self):
+ self.assertTrue(self.pamd.remove_module_arguments('session', '[success=1 default=ignore]', 'pam_succeed_if.so', ['service', 'crond']))
+
+ def test_remove_module_arguments_where_none_existed(self):
+ self.assertTrue(self.pamd.add_module_arguments('session', 'required', 'pam_limits.so', 'arg1 arg2= arg3=arg3'))
+
+ def test_add_module_arguments_where_none_existed(self):
+ self.assertTrue(self.pamd.add_module_arguments('account', 'required', 'pam_unix.so', 'arg1 arg2= arg3=arg3'))
+
+ def test_add_module_arguments_where_none_existed_list(self):
+ self.assertTrue(self.pamd.add_module_arguments('account', 'required', 'pam_unix.so', ['arg1', 'arg2=', 'arg3=arg3']))
+
+ def test_add_module_arguments_where_some_existed(self):
+ self.assertTrue(self.pamd.add_module_arguments('auth', 'sufficient', 'pam_unix.so', 'arg1 arg2= arg3=arg3'))
+
+ def test_remove_rule(self):
+ self.assertTrue(self.pamd.remove('account', 'required', 'pam_unix.so'))
+ # Second run should not change anything
+ self.assertFalse(self.pamd.remove('account', 'required', 'pam_unix.so'))
+ test_rule = PamdRule('account', 'required', 'pam_unix.so')
+ self.assertNotIn(str(test_rule), str(self.pamd))
+
+ def test_remove_first_rule(self):
+ no_header_service = PamdService(self.no_header_system_auth_string)
+ self.assertTrue(no_header_service.remove('auth', 'required', 'pam_env.so'))
+ test_rule = PamdRule('auth', 'required', 'pam_env.so')
+ self.assertNotIn(str(test_rule), str(no_header_service))
+
+ def test_remove_last_rule(self):
+ self.assertTrue(self.pamd.remove('session', 'required', 'pam_unix.so'))
+ test_rule = PamdRule('session', 'required', 'pam_unix.so')
+ self.assertNotIn(str(test_rule), str(self.pamd))
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/test_parted.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/test_parted.py
new file mode 100644
index 00000000..18faf6a6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/test_parted.py
@@ -0,0 +1,345 @@
+# (c) 2017 Red Hat Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat.mock import patch, call
+from ansible_collections.community.general.plugins.modules.system import parted as parted_module
+from ansible_collections.community.general.plugins.modules.system.parted import parse_parted_version
+from ansible_collections.community.general.plugins.modules.system.parted import parse_partition_info
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
+
+# Example of output : parted -s -m /dev/sdb -- unit 'MB' print
+parted_output1 = """
+BYT;
+/dev/sdb:286061MB:scsi:512:512:msdos:ATA TOSHIBA THNSFJ25:;
+1:1.05MB:106MB:105MB:fat32::esp;
+2:106MB:368MB:262MB:ext2::;
+3:368MB:256061MB:255692MB:::;"""
+
+parted_version_info = {"""
+ parted (GNU parted) 3.3
+ Copyright (C) 2019 Free Software Foundation, Inc.
+ License GPLv3+: GNU GPL version 3 or later <https://gnu.org/licenses/gpl.html>.
+ This is free software: you are free to change and redistribute it.
+ There is NO WARRANTY, to the extent permitted by law.
+
+ Written by <http://git.debian.org/?p=parted/parted.git;a=blob_plain;f=AUTHORS>.
+ """: (3, 3, 0), """
+ parted (GNU parted) 3.4.5
+ Copyright (C) 2019 Free Software Foundation, Inc.
+ License GPLv3+: GNU GPL version 3 or later <https://gnu.org/licenses/gpl.html>.
+ This is free software: you are free to change and redistribute it.
+ There is NO WARRANTY, to the extent permitted by law.
+
+ Written by <http://git.debian.org/?p=parted/parted.git;a=blob_plain;f=AUTHORS>.
+ """: (3, 4, 5), """
+ parted (GNU parted) 3.3.14-dfc61
+ Copyright (C) 2019 Free Software Foundation, Inc.
+ License GPLv3+: GNU GPL version 3 or later <https://gnu.org/licenses/gpl.html>.
+ This is free software: you are free to change and redistribute it.
+ There is NO WARRANTY, to the extent permitted by law.
+
+ Written by <http://git.debian.org/?p=parted/parted.git;a=blob_plain;f=AUTHORS>.
+ """: (3, 3, 14)}
+
+# corresponding dictionary after parsing by parse_partition_info
+parted_dict1 = {
+ "generic": {
+ "dev": "/dev/sdb",
+ "size": 286061.0,
+ "unit": "mb",
+ "table": "msdos",
+ "model": "ATA TOSHIBA THNSFJ25",
+ "logical_block": 512,
+ "physical_block": 512
+ },
+ "partitions": [{
+ "num": 1,
+ "begin": 1.05,
+ "end": 106.0,
+ "size": 105.0,
+ "fstype": "fat32",
+ "name": '',
+ "flags": ["esp"],
+ "unit": "mb"
+ }, {
+ "num": 2,
+ "begin": 106.0,
+ "end": 368.0,
+ "size": 262.0,
+ "fstype": "ext2",
+ "name": '',
+ "flags": [],
+ "unit": "mb"
+ }, {
+ "num": 3,
+ "begin": 368.0,
+ "end": 256061.0,
+ "size": 255692.0,
+ "fstype": "",
+ "name": '',
+ "flags": [],
+ "unit": "mb"
+ }]
+}
+
+parted_output2 = """
+BYT;
+/dev/sdb:286061MB:scsi:512:512:msdos:ATA TOSHIBA THNSFJ25:;"""
+
+# corresponding dictionary after parsing by parse_partition_info
+parted_dict2 = {
+ "generic": {
+ "dev": "/dev/sdb",
+ "size": 286061.0,
+ "unit": "mb",
+ "table": "msdos",
+ "model": "ATA TOSHIBA THNSFJ25",
+ "logical_block": 512,
+ "physical_block": 512
+ },
+ "partitions": []
+}
+
+# fake some_flag exists
+parted_dict3 = {
+ "generic": {
+ "dev": "/dev/sdb",
+ "size": 286061.0,
+ "unit": "mb",
+ "table": "msdos",
+ "model": "ATA TOSHIBA THNSFJ25",
+ "logical_block": 512,
+ "physical_block": 512
+ },
+ "partitions": [{
+ "num": 1,
+ "begin": 1.05,
+ "end": 106.0,
+ "size": 105.0,
+ "fstype": "fat32",
+ "name": '',
+ "flags": ["some_flag"],
+ "unit": "mb"
+ }]
+}
+
+
+class TestParted(ModuleTestCase):
+ def setUp(self):
+ super(TestParted, self).setUp()
+
+ self.module = parted_module
+ self.mock_check_parted_label = (patch('ansible_collections.community.general.plugins.modules.system.parted.check_parted_label', return_value=False))
+ self.check_parted_label = self.mock_check_parted_label.start()
+
+ self.mock_parted = (patch('ansible_collections.community.general.plugins.modules.system.parted.parted'))
+ self.parted = self.mock_parted.start()
+
+ self.mock_run_command = (patch('ansible.module_utils.basic.AnsibleModule.run_command'))
+ self.run_command = self.mock_run_command.start()
+
+ self.mock_get_bin_path = (patch('ansible.module_utils.basic.AnsibleModule.get_bin_path'))
+ self.get_bin_path = self.mock_get_bin_path.start()
+
+ def tearDown(self):
+ super(TestParted, self).tearDown()
+ self.mock_run_command.stop()
+ self.mock_get_bin_path.stop()
+ self.mock_parted.stop()
+ self.mock_check_parted_label.stop()
+
+ def execute_module(self, failed=False, changed=False, script=None):
+ if failed:
+ result = self.failed()
+ self.assertTrue(result['failed'], result)
+ else:
+ result = self.changed(changed)
+ self.assertEqual(result['changed'], changed, result)
+
+ if script:
+ self.assertEqual(script, result['script'], result['script'])
+
+ return result
+
+ def failed(self):
+ with self.assertRaises(AnsibleFailJson) as exc:
+ self.module.main()
+
+ result = exc.exception.args[0]
+ self.assertTrue(result['failed'], result)
+ return result
+
+ def changed(self, changed=False):
+ with self.assertRaises(AnsibleExitJson) as exc:
+ self.module.main()
+
+ result = exc.exception.args[0]
+ self.assertEqual(result['changed'], changed, result)
+ return result
+
+ def test_parse_partition_info(self):
+ """Test that the parse_partition_info returns the expected dictionary"""
+ self.assertEqual(parse_partition_info(parted_output1, 'MB'), parted_dict1)
+ self.assertEqual(parse_partition_info(parted_output2, 'MB'), parted_dict2)
+
+ def test_partition_already_exists(self):
+ set_module_args({
+ 'device': '/dev/sdb',
+ 'number': 1,
+ 'state': 'present',
+ })
+ with patch('ansible_collections.community.general.plugins.modules.system.parted.get_device_info', return_value=parted_dict1):
+ self.execute_module(changed=False)
+
+ def test_create_new_partition(self):
+ set_module_args({
+ 'device': '/dev/sdb',
+ 'number': 4,
+ 'state': 'present',
+ })
+ with patch('ansible_collections.community.general.plugins.modules.system.parted.get_device_info', return_value=parted_dict1):
+ self.execute_module(changed=True, script='unit KiB mkpart primary 0% 100%')
+
+ def test_create_new_partition_1G(self):
+ set_module_args({
+ 'device': '/dev/sdb',
+ 'number': 4,
+ 'state': 'present',
+ 'part_end': '1GiB',
+ })
+ with patch('ansible_collections.community.general.plugins.modules.system.parted.get_device_info', return_value=parted_dict1):
+ self.execute_module(changed=True, script='unit KiB mkpart primary 0% 1GiB')
+
+ def test_create_new_partition_minus_1G(self):
+ set_module_args({
+ 'device': '/dev/sdb',
+ 'number': 4,
+ 'state': 'present',
+ 'fs_type': 'ext2',
+ 'part_start': '-1GiB',
+ })
+ with patch('ansible_collections.community.general.plugins.modules.system.parted.get_device_info', return_value=parted_dict1):
+ self.execute_module(changed=True, script='unit KiB mkpart primary ext2 -1GiB 100%')
+
+ def test_remove_partition_number_1(self):
+ set_module_args({
+ 'device': '/dev/sdb',
+ 'number': 1,
+ 'state': 'absent',
+ })
+ with patch('ansible_collections.community.general.plugins.modules.system.parted.get_device_info', return_value=parted_dict1):
+ self.execute_module(changed=True, script='rm 1')
+
+ def test_resize_partition(self):
+ set_module_args({
+ 'device': '/dev/sdb',
+ 'number': 3,
+ 'state': 'present',
+ 'part_end': '100%',
+ 'resize': True
+ })
+ with patch('ansible_collections.community.general.plugins.modules.system.parted.get_device_info', return_value=parted_dict1):
+ self.execute_module(changed=True, script='resizepart 3 100%')
+
+ def test_change_flag(self):
+ # Flags are set in a second run of parted().
+ # Between the two runs, the partition dict is updated.
+ # use checkmode here allow us to continue even if the dictionary is
+ # not updated.
+ set_module_args({
+ 'device': '/dev/sdb',
+ 'number': 3,
+ 'state': 'present',
+ 'flags': ['lvm', 'boot'],
+ '_ansible_check_mode': True,
+ })
+
+ with patch('ansible_collections.community.general.plugins.modules.system.parted.get_device_info', return_value=parted_dict1):
+ self.parted.reset_mock()
+ self.execute_module(changed=True)
+ # When using multiple flags:
+ # order of execution is non deterministic, because set() operations are used in
+ # the current implementation.
+ expected_calls_order1 = [call('unit KiB set 3 lvm on set 3 boot on ',
+ '/dev/sdb', 'optimal')]
+ expected_calls_order2 = [call('unit KiB set 3 boot on set 3 lvm on ',
+ '/dev/sdb', 'optimal')]
+ self.assertTrue(self.parted.mock_calls == expected_calls_order1 or
+ self.parted.mock_calls == expected_calls_order2)
+
+ def test_create_new_primary_lvm_partition(self):
+ # use check_mode, see previous test comment
+ set_module_args({
+ 'device': '/dev/sdb',
+ 'number': 4,
+ 'flags': ["boot"],
+ 'state': 'present',
+ 'part_start': '257GiB',
+ 'fs_type': 'ext3',
+ '_ansible_check_mode': True,
+ })
+ with patch('ansible_collections.community.general.plugins.modules.system.parted.get_device_info', return_value=parted_dict1):
+ self.execute_module(changed=True, script='unit KiB mkpart primary ext3 257GiB 100% unit KiB set 4 boot on')
+
+ def test_create_label_gpt(self):
+ # Like previous test, current implementation use parted to create the partition and
+ # then retrieve and update the dictionary. Use check_mode to force to continue even if
+ # dictionary is not updated.
+ set_module_args({
+ 'device': '/dev/sdb',
+ 'number': 1,
+ 'flags': ["lvm"],
+ 'label': 'gpt',
+ 'name': 'lvmpartition',
+ 'state': 'present',
+ '_ansible_check_mode': True,
+ })
+ with patch('ansible_collections.community.general.plugins.modules.system.parted.get_device_info', return_value=parted_dict2):
+ self.execute_module(changed=True, script='unit KiB mklabel gpt mkpart primary 0% 100% unit KiB name 1 \'"lvmpartition"\' set 1 lvm on')
+
+ def test_change_label_gpt(self):
+ # When partitions already exists and label is changed, mkpart should be called even when partition already exists,
+ # because new empty label will be created anyway
+ set_module_args({
+ 'device': '/dev/sdb',
+ 'number': 1,
+ 'state': 'present',
+ 'label': 'gpt',
+ '_ansible_check_mode': True,
+ })
+ with patch('ansible_collections.community.general.plugins.modules.system.parted.get_device_info', return_value=parted_dict1):
+ self.execute_module(changed=True, script='unit KiB mklabel gpt mkpart primary 0% 100%')
+
+ def test_check_mode_unchanged(self):
+ # Test that get_device_info result is checked in check mode too
+ # No change on partition 1
+ set_module_args({
+ 'device': '/dev/sdb',
+ 'number': 1,
+ 'state': 'present',
+ 'flags': ['some_flag'],
+ '_ansible_check_mode': True,
+ })
+ with patch('ansible_collections.community.general.plugins.modules.system.parted.get_device_info', return_value=parted_dict3):
+ self.execute_module(changed=False)
+
+ def test_check_mode_changed(self):
+ # Test that get_device_info result is checked in check mode too
+ # Flag change on partition 1
+ set_module_args({
+ 'device': '/dev/sdb',
+ 'number': 1,
+ 'state': 'present',
+ 'flags': ['other_flag'],
+ '_ansible_check_mode': True,
+ })
+ with patch('ansible_collections.community.general.plugins.modules.system.parted.get_device_info', return_value=parted_dict3):
+ self.execute_module(changed=True)
+
+ def test_version_info(self):
+ """Test that the parse_parted_version returns the expected tuple"""
+ for key, value in parted_version_info.items():
+ self.assertEqual(parse_parted_version(key), value)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/test_solaris_zone.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/test_solaris_zone.py
new file mode 100644
index 00000000..4cf5c5ff
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/test_solaris_zone.py
@@ -0,0 +1,115 @@
+# Copyright (c) 2020 Justin Bronn <jbronn@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import json
+import platform
+
+import pytest
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.modules.system import (
+ solaris_zone
+)
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import (
+ set_module_args,
+)
+
+
+ZONEADM = "/usr/sbin/zoneadm"
+
+
+def mocker_zone_set(mocker, rc=0, out="", err="", zone_exists=False, zone_status=None):
+ """
+ Configure common mocker object for Solaris Zone tests
+ """
+ exists = mocker.patch.object(solaris_zone.Zone, "exists")
+ exists.return_value = zone_exists
+ get_bin_path = mocker.patch.object(AnsibleModule, "get_bin_path")
+ get_bin_path.return_value = ZONEADM
+ run_command = mocker.patch.object(AnsibleModule, "run_command")
+ run_command.return_value = (rc, out, err)
+ platform_release = mocker.patch.object(platform, "release")
+ platform_release.return_value = "5.11"
+ platform_system = mocker.patch.object(platform, "system")
+ platform_system.return_value = "SunOS"
+ if zone_status is not None:
+ status = mocker.patch.object(solaris_zone.Zone, "status")
+ status.return_value = zone_status
+
+
+@pytest.fixture
+def mocked_zone_create(mocker):
+ mocker_zone_set(mocker)
+
+
+@pytest.fixture
+def mocked_zone_delete(mocker):
+ mocker_zone_set(mocker, zone_exists=True, zone_status="running")
+
+
+def test_zone_create(mocked_zone_create, capfd):
+ """
+ test zone creation
+ """
+ set_module_args(
+ {
+ "name": "z1",
+ "state": "installed",
+ "path": "/zones/z1",
+ "_ansible_check_mode": False,
+ }
+ )
+ with pytest.raises(SystemExit):
+ solaris_zone.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get("failed")
+ assert results["changed"]
+
+
+def test_zone_delete(mocked_zone_delete, capfd):
+ """
+ test zone deletion
+ """
+ set_module_args(
+ {
+ "name": "z1",
+ "state": "absent",
+ "path": "/zones/z1",
+ "_ansible_check_mode": False,
+ }
+ )
+ with pytest.raises(SystemExit):
+ solaris_zone.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get("failed")
+ assert results["changed"]
+
+
+def test_zone_create_invalid_names(mocked_zone_create, capfd):
+ """
+ test zone creation with invalid names
+ """
+ # 1. Invalid character ('!').
+ # 2. Zone name > 64 characters.
+ # 3. Zone name beginning with non-alphanumeric character.
+ for invalid_name in ('foo!bar', 'z' * 65, '_zone'):
+ set_module_args(
+ {
+ "name": invalid_name,
+ "state": "installed",
+ "path": "/zones/" + invalid_name,
+ "_ansible_check_mode": False,
+ }
+ )
+ with pytest.raises(SystemExit):
+ solaris_zone.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert results.get("failed")
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/test_sysupgrade.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/test_sysupgrade.py
new file mode 100644
index 00000000..1ea8bf20
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/test_sysupgrade.py
@@ -0,0 +1,67 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils import basic
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase
+from ansible_collections.community.general.plugins.modules.system import sysupgrade
+
+
+class TestSysupgradeModule(ModuleTestCase):
+
+ def setUp(self):
+ super(TestSysupgradeModule, self).setUp()
+ self.module = sysupgrade
+ self.mock_get_bin_path = (patch('ansible.module_utils.basic.AnsibleModule.get_bin_path'))
+ self.get_bin_path = self.mock_get_bin_path.start()
+
+ def tearDown(self):
+ super(TestSysupgradeModule, self).tearDown()
+ self.mock_get_bin_path.stop()
+
+ def test_upgrade_success(self):
+ """ Upgrade was successful """
+
+ rc = 0
+ stdout = """
+ SHA256.sig 100% |*************************************| 2141 00:00
+ Signature Verified
+ INSTALL.amd64 100% |************************************| 43512 00:00
+ base67.tgz 100% |*************************************| 238 MB 02:16
+ bsd 100% |*************************************| 18117 KB 00:24
+ bsd.mp 100% |*************************************| 18195 KB 00:17
+ bsd.rd 100% |*************************************| 10109 KB 00:14
+ comp67.tgz 100% |*************************************| 74451 KB 00:53
+ game67.tgz 100% |*************************************| 2745 KB 00:03
+ man67.tgz 100% |*************************************| 7464 KB 00:04
+ xbase67.tgz 100% |*************************************| 22912 KB 00:30
+ xfont67.tgz 100% |*************************************| 39342 KB 00:28
+ xserv67.tgz 100% |*************************************| 16767 KB 00:24
+ xshare67.tgz 100% |*************************************| 4499 KB 00:06
+ Verifying sets.
+ Fetching updated firmware.
+ Will upgrade on next reboot
+ """
+ stderr = ""
+
+ with patch.object(basic.AnsibleModule, "run_command") as run_command:
+ run_command.return_value = (rc, stdout, stderr)
+ with self.assertRaises(AnsibleExitJson) as result:
+ self.module.main()
+ self.assertTrue(result.exception.args[0]['changed'])
+
+ def test_upgrade_failed(self):
+ """ Upgrade failed """
+
+ rc = 1
+ stdout = ""
+ stderr = "sysupgrade: need root privileges"
+
+ with patch.object(basic.AnsibleModule, "run_command") as run_command_mock:
+ run_command_mock.return_value = (rc, stdout, stderr)
+ with self.assertRaises(AnsibleFailJson) as result:
+ self.module.main()
+ self.assertTrue(result.exception.args[0]['failed'])
+ self.assertIn('need root', result.exception.args[0]['msg'])
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/test_ufw.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/test_ufw.py
new file mode 100644
index 00000000..3374c493
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/test_ufw.py
@@ -0,0 +1,438 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+import ansible_collections.community.general.plugins.modules.system.ufw as module
+
+import json
+
+
+# mock ufw messages
+
+ufw_version_35 = """ufw 0.35\nCopyright 2008-2015 Canonical Ltd.\n"""
+
+ufw_verbose_header = """Status: active
+Logging: on (low)
+Default: deny (incoming), allow (outgoing), deny (routed)
+New profiles: skip
+
+To Action From
+-- ------ ----"""
+
+
+ufw_status_verbose_with_port_7000 = ufw_verbose_header + """
+7000/tcp ALLOW IN Anywhere
+7000/tcp (v6) ALLOW IN Anywhere (v6)
+"""
+
+user_rules_with_port_7000 = """### tuple ### allow tcp 7000 0.0.0.0/0 any 0.0.0.0/0 in
+### tuple ### allow tcp 7000 ::/0 any ::/0 in
+"""
+
+user_rules_with_ipv6 = """### tuple ### allow udp 5353 0.0.0.0/0 any 224.0.0.251 in
+### tuple ### allow udp 5353 ::/0 any ff02::fb in
+"""
+
+ufw_status_verbose_with_ipv6 = ufw_verbose_header + """
+5353/udp ALLOW IN 224.0.0.251
+5353/udp ALLOW IN ff02::fb
+"""
+
+ufw_status_verbose_nothing = ufw_verbose_header
+
+skippg_adding_existing_rules = "Skipping adding existing rule\nSkipping adding existing rule (v6)\n"
+
+grep_config_cli = "grep -h '^### tuple' /lib/ufw/user.rules /lib/ufw/user6.rules /etc/ufw/user.rules /etc/ufw/user6.rules "
+grep_config_cli += "/var/lib/ufw/user.rules /var/lib/ufw/user6.rules"
+
+dry_mode_cmd_with_port_700 = {
+ "ufw status verbose": ufw_status_verbose_with_port_7000,
+ "ufw --version": ufw_version_35,
+ "ufw --dry-run allow from any to any port 7000 proto tcp": skippg_adding_existing_rules,
+ "ufw --dry-run delete allow from any to any port 7000 proto tcp": "",
+ "ufw --dry-run delete allow from any to any port 7001 proto tcp": user_rules_with_port_7000,
+ "ufw --dry-run route allow in on foo out on bar from 1.1.1.1 port 7000 to 8.8.8.8 port 7001 proto tcp": "",
+ "ufw --dry-run allow in on foo from any to any port 7003 proto tcp": "",
+ "ufw --dry-run allow in on foo from 1.1.1.1 port 7002 to 8.8.8.8 port 7003 proto tcp": "",
+ "ufw --dry-run allow out on foo from any to any port 7004 proto tcp": "",
+ "ufw --dry-run allow out on foo from 1.1.1.1 port 7003 to 8.8.8.8 port 7004 proto tcp": "",
+ grep_config_cli: user_rules_with_port_7000
+}
+
+# setup configuration :
+# ufw reset
+# ufw enable
+# ufw allow proto udp to any port 5353 from 224.0.0.251
+# ufw allow proto udp to any port 5353 from ff02::fb
+dry_mode_cmd_with_ipv6 = {
+ "ufw status verbose": ufw_status_verbose_with_ipv6,
+ "ufw --version": ufw_version_35,
+ # CONTENT of the command sudo ufw --dry-run delete allow in from ff02::fb port 5353 proto udp | grep -E "^### tupple"
+ "ufw --dry-run delete allow from ff02::fb to any port 5353 proto udp": "### tuple ### allow udp any ::/0 5353 ff02::fb in",
+ grep_config_cli: user_rules_with_ipv6,
+ "ufw --dry-run allow from ff02::fb to any port 5353 proto udp": skippg_adding_existing_rules,
+ "ufw --dry-run allow from 224.0.0.252 to any port 5353 proto udp": """### tuple ### allow udp 5353 0.0.0.0/0 any 224.0.0.251 in
+### tuple ### allow udp 5353 0.0.0.0/0 any 224.0.0.252 in
+""",
+ "ufw --dry-run allow from 10.0.0.0/24 to any port 1577 proto udp": "### tuple ### allow udp 1577 0.0.0.0/0 any 10.0.0.0/24 in"
+}
+
+dry_mode_cmd_nothing = {
+ "ufw status verbose": ufw_status_verbose_nothing,
+ "ufw --version": ufw_version_35,
+ grep_config_cli: "",
+ "ufw --dry-run allow from any to :: port 23": "### tuple ### allow any 23 :: any ::/0 in"
+}
+
+
+def do_nothing_func_nothing(*args, **kwarg):
+ return 0, dry_mode_cmd_nothing[args[0]], ""
+
+
+def do_nothing_func_ipv6(*args, **kwarg):
+ return 0, dry_mode_cmd_with_ipv6[args[0]], ""
+
+
+def do_nothing_func_port_7000(*args, **kwarg):
+ return 0, dry_mode_cmd_with_port_700[args[0]], ""
+
+
+def set_module_args(args):
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ """prepare arguments so that they will be picked up during module creation"""
+ basic._ANSIBLE_ARGS = to_bytes(args)
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs):
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs):
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+def get_bin_path(self, arg, required=False):
+ """Mock AnsibleModule.get_bin_path"""
+ return arg
+
+
+class TestUFW(unittest.TestCase):
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json,
+ get_bin_path=get_bin_path)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def test_filter_line_that_contains_ipv4(self):
+ reg = module.compile_ipv4_regexp()
+
+ self.assertTrue(reg.search("### tuple ### allow udp 5353 ::/0 any ff02::fb in") is None)
+ self.assertTrue(reg.search("### tuple ### allow udp 5353 0.0.0.0/0 any 224.0.0.251 in") is not None)
+
+ self.assertTrue(reg.match("ff02::fb") is None)
+ self.assertTrue(reg.match("224.0.0.251") is not None)
+ self.assertTrue(reg.match("10.0.0.0/8") is not None)
+ self.assertTrue(reg.match("somethingElse") is None)
+ self.assertTrue(reg.match("::") is None)
+ self.assertTrue(reg.match("any") is None)
+
+ def test_filter_line_that_contains_ipv6(self):
+ reg = module.compile_ipv6_regexp()
+ self.assertTrue(reg.search("### tuple ### allow udp 5353 ::/0 any ff02::fb in") is not None)
+ self.assertTrue(reg.search("### tuple ### allow udp 5353 0.0.0.0/0 any 224.0.0.251 in") is None)
+ self.assertTrue(reg.search("### tuple ### allow any 23 :: any ::/0 in") is not None)
+ self.assertTrue(reg.match("ff02::fb") is not None)
+ self.assertTrue(reg.match("224.0.0.251") is None)
+ self.assertTrue(reg.match("::") is not None)
+
+ def test_check_mode_add_rules(self):
+ set_module_args({
+ 'rule': 'allow',
+ 'proto': 'tcp',
+ 'port': '7000',
+ '_ansible_check_mode': True
+ })
+ result = self.__getResult(do_nothing_func_port_7000)
+ self.assertFalse(result.exception.args[0]['changed'])
+
+ def test_check_mode_add_detailed_route(self):
+ set_module_args({
+ 'rule': 'allow',
+ 'route': 'yes',
+ 'interface_in': 'foo',
+ 'interface_out': 'bar',
+ 'proto': 'tcp',
+ 'from_ip': '1.1.1.1',
+ 'to_ip': '8.8.8.8',
+ 'from_port': '7000',
+ 'to_port': '7001',
+ '_ansible_check_mode': True
+ })
+
+ result = self.__getResult(do_nothing_func_port_7000)
+ self.assertTrue(result.exception.args[0]['changed'])
+
+ def test_check_mode_add_ambiguous_route(self):
+ set_module_args({
+ 'rule': 'allow',
+ 'route': 'yes',
+ 'interface_in': 'foo',
+ 'interface_out': 'bar',
+ 'direction': 'in',
+ 'interface': 'baz',
+ '_ansible_check_mode': True
+ })
+
+ with self.assertRaises(AnsibleFailJson) as result:
+ self.__getResult(do_nothing_func_port_7000)
+
+ exc = result.exception.args[0]
+ self.assertTrue(exc['failed'])
+ self.assertIn('mutually exclusive', exc['msg'])
+
+ def test_check_mode_add_interface_in(self):
+ set_module_args({
+ 'rule': 'allow',
+ 'proto': 'tcp',
+ 'port': '7003',
+ 'interface_in': 'foo',
+ '_ansible_check_mode': True
+ })
+ result = self.__getResult(do_nothing_func_port_7000)
+ self.assertTrue(result.exception.args[0]['changed'])
+
+ def test_check_mode_add_interface_out(self):
+ set_module_args({
+ 'rule': 'allow',
+ 'proto': 'tcp',
+ 'port': '7004',
+ 'interface_out': 'foo',
+ '_ansible_check_mode': True
+ })
+ result = self.__getResult(do_nothing_func_port_7000)
+ self.assertTrue(result.exception.args[0]['changed'])
+
+ def test_check_mode_add_non_route_interface_both(self):
+ set_module_args({
+ 'rule': 'allow',
+ 'proto': 'tcp',
+ 'port': '7004',
+ 'interface_in': 'foo',
+ 'interface_out': 'bar',
+ '_ansible_check_mode': True
+ })
+
+ with self.assertRaises(AnsibleFailJson) as result:
+ self.__getResult(do_nothing_func_port_7000)
+
+ exc = result.exception.args[0]
+ self.assertTrue(exc['failed'])
+ self.assertIn('combine', exc['msg'])
+
+ def test_check_mode_add_direction_in(self):
+ set_module_args({
+ 'rule': 'allow',
+ 'proto': 'tcp',
+ 'port': '7003',
+ 'direction': 'in',
+ 'interface': 'foo',
+ '_ansible_check_mode': True
+ })
+ result = self.__getResult(do_nothing_func_port_7000)
+ self.assertTrue(result.exception.args[0]['changed'])
+
+ def test_check_mode_add_direction_in_with_ip(self):
+ set_module_args({
+ 'rule': 'allow',
+ 'proto': 'tcp',
+ 'from_ip': '1.1.1.1',
+ 'from_port': '7002',
+ 'to_ip': '8.8.8.8',
+ 'to_port': '7003',
+ 'direction': 'in',
+ 'interface': 'foo',
+ '_ansible_check_mode': True
+ })
+ result = self.__getResult(do_nothing_func_port_7000)
+ self.assertTrue(result.exception.args[0]['changed'])
+
+ def test_check_mode_add_direction_out(self):
+ set_module_args({
+ 'rule': 'allow',
+ 'proto': 'tcp',
+ 'port': '7004',
+ 'direction': 'out',
+ 'interface': 'foo',
+ '_ansible_check_mode': True
+ })
+ result = self.__getResult(do_nothing_func_port_7000)
+ self.assertTrue(result.exception.args[0]['changed'])
+
+ def test_check_mode_add_direction_out_with_ip(self):
+ set_module_args({
+ 'rule': 'allow',
+ 'proto': 'tcp',
+ 'from_ip': '1.1.1.1',
+ 'from_port': '7003',
+ 'to_ip': '8.8.8.8',
+ 'to_port': '7004',
+ 'direction': 'out',
+ 'interface': 'foo',
+ '_ansible_check_mode': True
+ })
+ result = self.__getResult(do_nothing_func_port_7000)
+ self.assertTrue(result.exception.args[0]['changed'])
+
+ def test_check_mode_delete_existing_rules(self):
+
+ set_module_args({
+ 'rule': 'allow',
+ 'proto': 'tcp',
+ 'port': '7000',
+ 'delete': 'yes',
+ '_ansible_check_mode': True,
+ })
+
+ self.assertTrue(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed'])
+
+ def test_check_mode_delete_not_existing_rules(self):
+
+ set_module_args({
+ 'rule': 'allow',
+ 'proto': 'tcp',
+ 'port': '7001',
+ 'delete': 'yes',
+ '_ansible_check_mode': True,
+ })
+
+ self.assertFalse(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed'])
+
+ def test_enable_mode(self):
+ set_module_args({
+ 'state': 'enabled',
+ '_ansible_check_mode': True
+ })
+
+ self.assertFalse(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed'])
+
+ def test_disable_mode(self):
+ set_module_args({
+ 'state': 'disabled',
+ '_ansible_check_mode': True
+ })
+
+ self.assertTrue(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed'])
+
+ def test_logging_off(self):
+ set_module_args({
+ 'logging': 'off',
+ '_ansible_check_mode': True
+ })
+
+ self.assertTrue(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed'])
+
+ def test_logging_on(self):
+ set_module_args({
+ 'logging': 'on',
+ '_ansible_check_mode': True
+ })
+
+ self.assertFalse(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed'])
+
+ def test_default_changed(self):
+ set_module_args({
+ 'default': 'allow',
+ "direction": "incoming",
+ '_ansible_check_mode': True
+ })
+ self.assertTrue(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed'])
+
+ def test_default_not_changed(self):
+ set_module_args({
+ 'default': 'deny',
+ "direction": "incoming",
+ '_ansible_check_mode': True
+ })
+ self.assertFalse(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed'])
+
+ def test_ipv6_remove(self):
+ set_module_args({
+ 'rule': 'allow',
+ 'proto': 'udp',
+ 'port': '5353',
+ 'from': 'ff02::fb',
+ 'delete': 'yes',
+ '_ansible_check_mode': True,
+ })
+ self.assertTrue(self.__getResult(do_nothing_func_ipv6).exception.args[0]['changed'])
+
+ def test_ipv6_add_existing(self):
+ set_module_args({
+ 'rule': 'allow',
+ 'proto': 'udp',
+ 'port': '5353',
+ 'from': 'ff02::fb',
+ '_ansible_check_mode': True,
+ })
+ self.assertFalse(self.__getResult(do_nothing_func_ipv6).exception.args[0]['changed'])
+
+ def test_add_not_existing_ipv4_submask(self):
+ set_module_args({
+ 'rule': 'allow',
+ 'proto': 'udp',
+ 'port': '1577',
+ 'from': '10.0.0.0/24',
+ '_ansible_check_mode': True,
+ })
+ self.assertTrue(self.__getResult(do_nothing_func_ipv6).exception.args[0]['changed'])
+
+ def test_ipv4_add_with_existing_ipv6(self):
+ set_module_args({
+ 'rule': 'allow',
+ 'proto': 'udp',
+ 'port': '5353',
+ 'from': '224.0.0.252',
+ '_ansible_check_mode': True,
+ })
+ self.assertTrue(self.__getResult(do_nothing_func_ipv6).exception.args[0]['changed'])
+
+ def test_ipv6_add_from_nothing(self):
+ set_module_args({
+ 'rule': 'allow',
+ 'port': '23',
+ 'to': '::',
+ '_ansible_check_mode': True,
+ })
+ result = self.__getResult(do_nothing_func_nothing).exception.args[0]
+ print(result)
+ self.assertTrue(result['changed'])
+
+ def __getResult(self, cmd_fun):
+ with patch.object(basic.AnsibleModule, 'run_command') as mock_run_command:
+ mock_run_command.side_effect = cmd_fun
+ with self.assertRaises(AnsibleExitJson) as result:
+ module.main()
+ return result
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/test_xfconf.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/test_xfconf.py
new file mode 100644
index 00000000..1002952c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/system/test_xfconf.py
@@ -0,0 +1,367 @@
+# Author: Alexei Znamensky (russoz@gmail.com)
+# Largely adapted from test_redhat_subscription by
+# Jiri Hnidek (jhnidek@redhat.com)
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+from ansible_collections.community.general.plugins.modules.system import xfconf
+
+import pytest
+
+TESTED_MODULE = xfconf.__name__
+
+
+@pytest.fixture
+def patch_xfconf(mocker):
+ """
+ Function used for mocking some parts of redhat_subscribtion module
+ """
+ mocker.patch('ansible_collections.community.general.plugins.module_utils.module_helper.AnsibleModule.get_bin_path',
+ return_value='/testbin/xfconf-query')
+
+
+@pytest.mark.parametrize('patch_ansible_module', [{}], indirect=['patch_ansible_module'])
+@pytest.mark.usefixtures('patch_ansible_module')
+def test_without_required_parameters(capfd, patch_xfconf):
+ """
+ Failure must occurs when all parameters are missing
+ """
+ with pytest.raises(SystemExit):
+ xfconf.main()
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert results['failed']
+ assert 'missing required arguments' in results['msg']
+
+
+TEST_CASES = [
+ [
+ {'channel': 'xfwm4', 'property': '/general/inactive_opacity', 'state': 'get'},
+ {
+ 'id': 'test_simple_property_get',
+ 'run_command.calls': [
+ (
+ # Calling of following command will be asserted
+ ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/inactive_opacity'],
+ # Was return code checked?
+ {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': False},
+ # Mock of returned code, stdout and stderr
+ (0, '100\n', '',),
+ ),
+ ],
+ 'changed': False,
+ 'previous_value': None,
+ 'value_type': None,
+ 'value': '100',
+ }
+ ],
+ [
+ {'channel': 'xfwm4', 'property': '/general/i_dont_exist', 'state': 'get'},
+ {
+ 'id': 'test_simple_property_get_nonexistent',
+ 'run_command.calls': [
+ (
+ # Calling of following command will be asserted
+ ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/i_dont_exist'],
+ # Was return code checked?
+ {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': False},
+ # Mock of returned code, stdout and stderr
+ (1, '', 'Property "/general/i_dont_exist" does not exist on channel "xfwm4".\n',),
+ ),
+ ],
+ 'changed': False,
+ 'previous_value': None,
+ 'value_type': None,
+ 'value': None,
+ }
+ ],
+ [
+ {'channel': 'xfwm4', 'property': '/general/workspace_names', 'state': 'get'},
+ {
+ 'id': 'test_property_get_array',
+ 'run_command.calls': [
+ (
+ # Calling of following command will be asserted
+ ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/workspace_names'],
+ # Was return code checked?
+ {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': False},
+ # Mock of returned code, stdout and stderr
+ (0, 'Value is an array with 3 items:\n\nMain\nWork\nTmp\n', '',),
+ ),
+ ],
+ 'changed': False,
+ 'previous_value': None,
+ 'value_type': None,
+ 'value': ['Main', 'Work', 'Tmp'],
+ },
+ ],
+ [
+ {'channel': 'xfwm4', 'property': '/general/use_compositing', 'state': 'get'},
+ {
+ 'id': 'test_property_get_bool',
+ 'run_command.calls': [
+ (
+ # Calling of following command will be asserted
+ ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/use_compositing'],
+ # Was return code checked?
+ {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': False},
+ # Mock of returned code, stdout and stderr
+ (0, 'true', '',),
+ ),
+ ],
+ 'changed': False,
+ 'previous_value': None,
+ 'value_type': None,
+ 'value': 'true',
+ },
+ ],
+ [
+ {'channel': 'xfwm4', 'property': '/general/use_compositing', 'state': 'get'},
+ {
+ 'id': 'test_property_get_bool_false',
+ 'run_command.calls': [
+ (
+ # Calling of following command will be asserted
+ ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/use_compositing'],
+ # Was return code checked?
+ {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': False},
+ # Mock of returned code, stdout and stderr
+ (0, 'false', '',),
+ ),
+ ],
+ 'changed': False,
+ 'previous_value': None,
+ 'value_type': None,
+ 'value': 'false',
+ },
+ ],
+ [
+ {
+ 'channel': 'xfwm4',
+ 'property': '/general/inactive_opacity',
+ 'state': 'present',
+ 'value_type': 'int',
+ 'value': 90,
+ },
+ {
+ 'id': 'test_property_set_property',
+ 'run_command.calls': [
+ (
+ # Calling of following command will be asserted
+ ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/inactive_opacity'],
+ # Was return code checked?
+ {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': False},
+ # Mock of returned code, stdout and stderr
+ (0, '100\n', '',),
+ ),
+ (
+ # Calling of following command will be asserted
+ ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/inactive_opacity',
+ '--create', '--type', 'int', '--set', '90'],
+ # Was return code checked?
+ {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': False},
+ # Mock of returned code, stdout and stderr
+ (0, '', '',),
+ ),
+ ],
+ 'changed': True,
+ 'previous_value': '100',
+ 'value_type': 'int',
+ 'value': '90',
+ },
+ ],
+ [
+ {
+ 'channel': 'xfwm4',
+ 'property': '/general/inactive_opacity',
+ 'state': 'present',
+ 'value_type': 'int',
+ 'value': 90,
+ },
+ {
+ 'id': 'test_property_set_property_same_value',
+ 'run_command.calls': [
+ (
+ # Calling of following command will be asserted
+ ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/inactive_opacity'],
+ # Was return code checked?
+ {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': False},
+ # Mock of returned code, stdout and stderr
+ (0, '90\n', '',),
+ ),
+ (
+ # Calling of following command will be asserted
+ ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/inactive_opacity',
+ '--create', '--type', 'int', '--set', '90'],
+ # Was return code checked?
+ {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': False},
+ # Mock of returned code, stdout and stderr
+ (0, '', '',),
+ ),
+ ],
+ 'changed': False,
+ 'previous_value': '90',
+ 'value_type': 'int',
+ 'value': '90',
+ },
+ ],
+ [
+ {
+ 'channel': 'xfwm4',
+ 'property': '/general/workspace_names',
+ 'state': 'present',
+ 'value_type': 'string',
+ 'value': ['A', 'B', 'C'],
+ },
+ {
+ 'id': 'test_property_set_array',
+ 'run_command.calls': [
+ (
+ # Calling of following command will be asserted
+ ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/workspace_names'],
+ # Was return code checked?
+ {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': False},
+ # Mock of returned code, stdout and stderr
+ (0, 'Value is an array with 3 items:\n\nMain\nWork\nTmp\n', '',),
+ ),
+ (
+ # Calling of following command will be asserted
+ ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/workspace_names',
+ '--create', '--force-array', '--type', 'string', '--set', 'A', '--type', 'string', '--set', 'B',
+ '--type', 'string', '--set', 'C'],
+ # Was return code checked?
+ {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': False},
+ # Mock of returned code, stdout and stderr
+ (0, '', '',),
+ ),
+ ],
+ 'changed': True,
+ 'previous_value': ['Main', 'Work', 'Tmp'],
+ 'value_type': ['str', 'str', 'str'],
+ 'value': ['A', 'B', 'C'],
+ },
+ ],
+ [
+ {
+ 'channel': 'xfwm4',
+ 'property': '/general/workspace_names',
+ 'state': 'present',
+ 'value_type': 'string',
+ 'value': ['A', 'B', 'C'],
+ },
+ {
+ 'id': 'test_property_set_array_to_same_value',
+ 'run_command.calls': [
+ (
+ # Calling of following command will be asserted
+ ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/workspace_names'],
+ # Was return code checked?
+ {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': False},
+ # Mock of returned code, stdout and stderr
+ (0, 'Value is an array with 3 items:\n\nA\nB\nC\n', '',),
+ ),
+ (
+ # Calling of following command will be asserted
+ ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/workspace_names',
+ '--create', '--force-array', '--type', 'string', '--set', 'A', '--type', 'string', '--set', 'B',
+ '--type', 'string', '--set', 'C'],
+ # Was return code checked?
+ {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': False},
+ # Mock of returned code, stdout and stderr
+ (0, '', '',),
+ ),
+ ],
+ 'changed': False,
+ 'previous_value': ['A', 'B', 'C'],
+ 'value_type': ['str', 'str', 'str'],
+ 'value': ['A', 'B', 'C'],
+ },
+ ],
+ [
+ {
+ 'channel': 'xfwm4',
+ 'property': '/general/workspace_names',
+ 'state': 'absent',
+ },
+ {
+ 'id': 'test_property_reset_value',
+ 'run_command.calls': [
+ (
+ # Calling of following command will be asserted
+ ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/workspace_names'],
+ # Was return code checked?
+ {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': False},
+ # Mock of returned code, stdout and stderr
+ (0, 'Value is an array with 3 items:\n\nA\nB\nC\n', '',),
+ ),
+ (
+ # Calling of following command will be asserted
+ ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/workspace_names',
+ '--reset'],
+ # Was return code checked?
+ {'environ_update': {'LANGUAGE': 'C'}, 'check_rc': False},
+ # Mock of returned code, stdout and stderr
+ (0, '', '',),
+ ),
+ ],
+ 'changed': True,
+ 'previous_value': ['A', 'B', 'C'],
+ 'value_type': None,
+ 'value': None,
+ },
+ ],
+]
+TEST_CASES_IDS = [item[1]['id'] for item in TEST_CASES]
+
+
+@pytest.mark.parametrize('patch_ansible_module, testcase',
+ TEST_CASES,
+ ids=TEST_CASES_IDS,
+ indirect=['patch_ansible_module'])
+@pytest.mark.usefixtures('patch_ansible_module')
+def test_xfconf(mocker, capfd, patch_xfconf, testcase):
+ """
+ Run unit tests for test cases listen in TEST_CASES
+ """
+
+ # Mock function used for running commands first
+ call_results = [item[2] for item in testcase['run_command.calls']]
+ mock_run_command = mocker.patch(
+ 'ansible_collections.community.general.plugins.module_utils.module_helper.AnsibleModule.run_command',
+ side_effect=call_results)
+
+ # Try to run test case
+ with pytest.raises(SystemExit):
+ xfconf.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ print("testcase =\n%s" % testcase)
+ print("results =\n%s" % results)
+
+ assert 'changed' in results
+ assert results['changed'] == testcase['changed']
+
+ for test_result in ('channel', 'property'):
+ assert test_result in results, "'{0}' not found in {1}".format(test_result, results)
+ assert results[test_result] == results['invocation']['module_args'][test_result], \
+ "'{0}': '{1}' != '{2}'".format(test_result, results[test_result], results['invocation']['module_args'][test_result])
+
+ for conditional_test_result in ('msg', 'value', 'previous_value'):
+ if conditional_test_result in testcase:
+ assert conditional_test_result in results, "'{0}' not found in {1}".format(conditional_test_result, results)
+ assert results[conditional_test_result] == testcase[conditional_test_result], \
+ "'{0}': '{1}' != '{2}'".format(conditional_test_result, results[conditional_test_result], testcase[conditional_test_result])
+
+ assert mock_run_command.call_count == len(testcase['run_command.calls'])
+ if mock_run_command.call_count:
+ call_args_list = [(item[0][0], item[1]) for item in mock_run_command.call_args_list]
+ expected_call_args_list = [(item[0], item[1]) for item in testcase['run_command.calls']]
+ print("call args list =\n%s" % call_args_list)
+ print("expected args list =\n%s" % expected_call_args_list)
+ assert call_args_list == expected_call_args_list
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/utils.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/utils.py
new file mode 100644
index 00000000..1a28072b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/utils.py
@@ -0,0 +1,52 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+
+
+def set_module_args(args):
+ if '_ansible_remote_tmp' not in args:
+ args['_ansible_remote_tmp'] = '/tmp'
+ if '_ansible_keep_remote_files' not in args:
+ args['_ansible_keep_remote_files'] = False
+
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args)
+
+
+class AnsibleExitJson(Exception):
+ pass
+
+
+class AnsibleFailJson(Exception):
+ pass
+
+
+def exit_json(*args, **kwargs):
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs):
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class ModuleTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.mock_module = patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json)
+ self.mock_module.start()
+ self.mock_sleep = patch('time.sleep')
+ self.mock_sleep.start()
+ set_module_args({})
+ self.addCleanup(self.mock_module.stop)
+ self.addCleanup(self.mock_sleep.stop)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/web_infrastructure/__init__.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/web_infrastructure/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/web_infrastructure/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/web_infrastructure/test_apache2_module.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/web_infrastructure/test_apache2_module.py
new file mode 100644
index 00000000..db3f04dd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/web_infrastructure/test_apache2_module.py
@@ -0,0 +1,22 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible_collections.community.general.plugins.modules.web_infrastructure.apache2_module import create_apache_identifier
+
+REPLACEMENTS = [
+ ('php7.1', 'php7_module'),
+ ('php5.6', 'php5_module'),
+ ('shib2', 'mod_shib'),
+ ('evasive', 'evasive20_module'),
+ ('thismoduledoesnotexist', 'thismoduledoesnotexist_module'), # the default
+]
+
+
+@pytest.mark.parametrize("replacement", REPLACEMENTS, ids=lambda x: x[0])
+def test_apache_identifier(replacement):
+ "test the correct replacement of an a2enmod name with an apache2ctl name"
+ assert create_apache_identifier(replacement[0]) == replacement[1]
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/web_infrastructure/test_jenkins_plugin.py b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/web_infrastructure/test_jenkins_plugin.py
new file mode 100644
index 00000000..ccfeb245
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/plugins/modules/web_infrastructure/test_jenkins_plugin.py
@@ -0,0 +1,153 @@
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from io import BytesIO
+
+from ansible_collections.community.general.plugins.modules.web_infrastructure.jenkins_plugin import JenkinsPlugin
+from ansible.module_utils.common._collections_compat import Mapping
+
+
+def pass_function(*args, **kwargs):
+ pass
+
+
+GITHUB_DATA = {"url": u'https://api.github.com/repos/ansible/ansible',
+ "response": b"""
+{
+ "id": 3638964,
+ "name": "ansible",
+ "full_name": "ansible/ansible",
+ "owner": {
+ "login": "ansible",
+ "id": 1507452,
+ "avatar_url": "https://avatars2.githubusercontent.com/u/1507452?v=4",
+ "gravatar_id": "",
+ "url": "https://api.github.com/users/ansible",
+ "html_url": "https://github.com/ansible",
+ "followers_url": "https://api.github.com/users/ansible/followers",
+ "following_url": "https://api.github.com/users/ansible/following{/other_user}",
+ "gists_url": "https://api.github.com/users/ansible/gists{/gist_id}",
+ "starred_url": "https://api.github.com/users/ansible/starred{/owner}{/repo}",
+ "subscriptions_url": "https://api.github.com/users/ansible/subscriptions",
+ "organizations_url": "https://api.github.com/users/ansible/orgs",
+ "repos_url": "https://api.github.com/users/ansible/repos",
+ "events_url": "https://api.github.com/users/ansible/events{/privacy}",
+ "received_events_url": "https://api.github.com/users/ansible/received_events",
+ "type": "Organization",
+ "site_admin": false
+ },
+ "private": false,
+ "html_url": "https://github.com/ansible/ansible",
+ "description": "Ansible is a radically simple IT automation platform that makes your applications and systems easier to deploy.",
+ "fork": false,
+ "url": "https://api.github.com/repos/ansible/ansible",
+ "forks_url": "https://api.github.com/repos/ansible/ansible/forks",
+ "keys_url": "https://api.github.com/repos/ansible/ansible/keys{/key_id}",
+ "collaborators_url": "https://api.github.com/repos/ansible/ansible/collaborators{/collaborator}",
+ "teams_url": "https://api.github.com/repos/ansible/ansible/teams",
+ "hooks_url": "https://api.github.com/repos/ansible/ansible/hooks",
+ "issue_events_url": "https://api.github.com/repos/ansible/ansible/issues/events{/number}",
+ "events_url": "https://api.github.com/repos/ansible/ansible/events",
+ "assignees_url": "https://api.github.com/repos/ansible/ansible/assignees{/user}",
+ "branches_url": "https://api.github.com/repos/ansible/ansible/branches{/branch}",
+ "tags_url": "https://api.github.com/repos/ansible/ansible/tags",
+ "blobs_url": "https://api.github.com/repos/ansible/ansible/git/blobs{/sha}",
+ "git_tags_url": "https://api.github.com/repos/ansible/ansible/git/tags{/sha}",
+ "git_refs_url": "https://api.github.com/repos/ansible/ansible/git/refs{/sha}",
+ "trees_url": "https://api.github.com/repos/ansible/ansible/git/trees{/sha}",
+ "statuses_url": "https://api.github.com/repos/ansible/ansible/statuses/{sha}",
+ "languages_url": "https://api.github.com/repos/ansible/ansible/languages",
+ "stargazers_url": "https://api.github.com/repos/ansible/ansible/stargazers",
+ "contributors_url": "https://api.github.com/repos/ansible/ansible/contributors",
+ "subscribers_url": "https://api.github.com/repos/ansible/ansible/subscribers",
+ "subscription_url": "https://api.github.com/repos/ansible/ansible/subscription",
+ "commits_url": "https://api.github.com/repos/ansible/ansible/commits{/sha}",
+ "git_commits_url": "https://api.github.com/repos/ansible/ansible/git/commits{/sha}",
+ "comments_url": "https://api.github.com/repos/ansible/ansible/comments{/number}",
+ "issue_comment_url": "https://api.github.com/repos/ansible/ansible/issues/comments{/number}",
+ "contents_url": "https://api.github.com/repos/ansible/ansible/contents/{+path}",
+ "compare_url": "https://api.github.com/repos/ansible/ansible/compare/{base}...{head}",
+ "merges_url": "https://api.github.com/repos/ansible/ansible/merges",
+ "archive_url": "https://api.github.com/repos/ansible/ansible/{archive_format}{/ref}",
+ "downloads_url": "https://api.github.com/repos/ansible/ansible/downloads",
+ "issues_url": "https://api.github.com/repos/ansible/ansible/issues{/number}",
+ "pulls_url": "https://api.github.com/repos/ansible/ansible/pulls{/number}",
+ "milestones_url": "https://api.github.com/repos/ansible/ansible/milestones{/number}",
+ "notifications_url": "https://api.github.com/repos/ansible/ansible/notifications{?since,all,participating}",
+ "labels_url": "https://api.github.com/repos/ansible/ansible/labels{/name}",
+ "releases_url": "https://api.github.com/repos/ansible/ansible/releases{/id}",
+ "deployments_url": "https://api.github.com/repos/ansible/ansible/deployments",
+ "created_at": "2012-03-06T14:58:02Z",
+ "updated_at": "2017-09-19T18:10:54Z",
+ "pushed_at": "2017-09-19T18:04:51Z",
+ "git_url": "git://github.com/ansible/ansible.git",
+ "ssh_url": "git@github.com:ansible/ansible.git",
+ "clone_url": "https://github.com/ansible/ansible.git",
+ "svn_url": "https://github.com/ansible/ansible",
+ "homepage": "https://www.ansible.com/",
+ "size": 91174,
+ "stargazers_count": 25552,
+ "watchers_count": 25552,
+ "language": "Python",
+ "has_issues": true,
+ "has_projects": true,
+ "has_downloads": true,
+ "has_wiki": false,
+ "has_pages": false,
+ "forks_count": 8893,
+ "mirror_url": null,
+ "open_issues_count": 4283,
+ "forks": 8893,
+ "open_issues": 4283,
+ "watchers": 25552,
+ "default_branch": "devel",
+ "organization": {
+ "login": "ansible",
+ "id": 1507452,
+ "avatar_url": "https://avatars2.githubusercontent.com/u/1507452?v=4",
+ "gravatar_id": "",
+ "url": "https://api.github.com/users/ansible",
+ "html_url": "https://github.com/ansible",
+ "followers_url": "https://api.github.com/users/ansible/followers",
+ "following_url": "https://api.github.com/users/ansible/following{/other_user}",
+ "gists_url": "https://api.github.com/users/ansible/gists{/gist_id}",
+ "starred_url": "https://api.github.com/users/ansible/starred{/owner}{/repo}",
+ "subscriptions_url": "https://api.github.com/users/ansible/subscriptions",
+ "organizations_url": "https://api.github.com/users/ansible/orgs",
+ "repos_url": "https://api.github.com/users/ansible/repos",
+ "events_url": "https://api.github.com/users/ansible/events{/privacy}",
+ "received_events_url": "https://api.github.com/users/ansible/received_events",
+ "type": "Organization",
+ "site_admin": false
+ },
+ "network_count": 8893,
+ "subscribers_count": 1733
+}
+"""
+ }
+
+
+def test__get_json_data(mocker):
+ "test the json conversion of _get_url_data"
+
+ timeout = 30
+ params = {
+ 'url': GITHUB_DATA['url'],
+ 'timeout': timeout
+ }
+ module = mocker.Mock()
+ module.params = params
+
+ JenkinsPlugin._csrf_enabled = pass_function
+ JenkinsPlugin._get_installed_plugins = pass_function
+ JenkinsPlugin._get_url_data = mocker.Mock()
+ JenkinsPlugin._get_url_data.return_value = BytesIO(GITHUB_DATA['response'])
+ jenkins_plugin = JenkinsPlugin(module)
+
+ json_data = jenkins_plugin._get_json_data(
+ "{url}".format(url=GITHUB_DATA['url']),
+ 'CSRF')
+
+ assert isinstance(json_data, Mapping)
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/unit/requirements.txt b/collections-debian-merged/ansible_collections/community/general/tests/unit/requirements.txt
new file mode 100644
index 00000000..4cd5fe4d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/unit/requirements.txt
@@ -0,0 +1,23 @@
+unittest2 ; python_version < '2.7'
+importlib ; python_version < '2.7'
+
+# requirement for the memcached cache plugin
+python-memcached
+
+# requirement for the redis cache plugin
+redis
+
+# requirement for the linode module
+linode-python # APIv3
+linode_api4 ; python_version > '2.6' # APIv4
+
+# requirement for the gitlab module
+python-gitlab < 2.3.0 # version 2.3.0 makes gitlab_runner tests fail
+httmock
+
+# requirement for kubevirt modules
+openshift ; python_version >= '2.7'
+
+# requirement for maven_artifact module
+lxml
+semantic_version
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/utils/constraints.txt b/collections-debian-merged/ansible_collections/community/general/tests/utils/constraints.txt
new file mode 100644
index 00000000..c5db3156
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/utils/constraints.txt
@@ -0,0 +1,54 @@
+coverage >= 4.2, < 5.0.0, != 4.3.2 ; python_version <= '3.7' # features in 4.2+ required, avoid known bug in 4.3.2 on python 2.6, coverage 5.0+ incompatible
+coverage >= 4.5.4, < 5.0.0 ; python_version > '3.7' # coverage had a bug in < 4.5.4 that would cause unit tests to hang in Python 3.8, coverage 5.0+ incompatible
+cryptography < 2.2 ; python_version < '2.7' # cryptography 2.2 drops support for python 2.6
+cryptography >= 3.0, < 3.4 ; python_version < '3.6' # cryptography 3.4 drops support for python 2.7
+cryptography >= 3.3, < 3.4 # FIXME: the upper limit is needed for RHEL8.2, CentOS 8, Ubuntu 18.04, and OpenSuSE 15
+deepdiff < 4.0.0 ; python_version < '3' # deepdiff 4.0.0 and later require python 3
+jinja2 < 2.11 ; python_version < '2.7' # jinja2 2.11 and later require python 2.7 or later
+urllib3 < 1.24 ; python_version < '2.7' # urllib3 1.24 and later require python 2.7 or later
+pywinrm >= 0.3.0 # message encryption support
+sphinx < 1.6 ; python_version < '2.7' # sphinx 1.6 and later require python 2.7 or later
+sphinx < 1.8 ; python_version >= '2.7' # sphinx 1.8 and later are currently incompatible with rstcheck 3.3
+pygments >= 2.4.0 # Pygments 2.4.0 includes bugfixes for YAML and YAML+Jinja lexers
+wheel < 0.30.0 ; python_version < '2.7' # wheel 0.30.0 and later require python 2.7 or later
+yamllint != 1.8.0, < 1.14.0 ; python_version < '2.7' # yamllint 1.8.0 and 1.14.0+ require python 2.7+
+pycrypto >= 2.6 # Need features found in 2.6 and greater
+ncclient >= 0.5.2 # Need features added in 0.5.2 and greater
+idna < 2.6, >= 2.5 # linode requires idna < 2.9, >= 2.5, requests requires idna < 2.6, but cryptography will cause the latest version to be installed instead
+paramiko < 2.4.0 ; python_version < '2.7' # paramiko 2.4.0 drops support for python 2.6
+pytest < 3.3.0 ; python_version < '2.7' # pytest 3.3.0 drops support for python 2.6
+pytest < 5.0.0 ; python_version == '2.7' # pytest 5.0.0 and later will no longer support python 2.7
+pytest-forked < 1.0.2 ; python_version < '2.7' # pytest-forked 1.0.2 and later require python 2.7 or later
+pytest-forked >= 1.0.2 ; python_version >= '2.7' # pytest-forked before 1.0.2 does not work with pytest 4.2.0+ (which requires python 2.7+)
+ntlm-auth >= 1.3.0 # message encryption support using cryptography
+requests < 2.20.0 ; python_version < '2.7' # requests 2.20.0 drops support for python 2.6
+requests-ntlm >= 1.1.0 # message encryption support
+requests-credssp >= 0.1.0 # message encryption support
+voluptuous >= 0.11.0 # Schema recursion via Self
+openshift >= 0.6.2, < 0.9.0 # merge_type support
+virtualenv < 16.0.0 ; python_version < '2.7' # virtualenv 16.0.0 and later require python 2.7 or later
+pathspec < 0.6.0 ; python_version < '2.7' # pathspec 0.6.0 and later require python 2.7 or later
+pyopenssl < 18.0.0 ; python_version < '2.7' # pyOpenSSL 18.0.0 and later require python 2.7 or later
+pyfmg == 0.6.1 # newer versions do not pass current unit tests
+pyyaml < 5.1 ; python_version < '2.7' # pyyaml 5.1 and later require python 2.7 or later
+pycparser < 2.19 ; python_version < '2.7' # pycparser 2.19 and later require python 2.7 or later
+mock >= 2.0.0 # needed for features backported from Python 3.6 unittest.mock (assert_called, assert_called_once...)
+pytest-mock >= 1.4.0 # needed for mock_use_standalone_module pytest option
+xmltodict < 0.12.0 ; python_version < '2.7' # xmltodict 0.12.0 and later require python 2.7 or later
+lxml < 4.3.0 ; python_version < '2.7' # lxml 4.3.0 and later require python 2.7 or later
+pyvmomi < 6.0.0 ; python_version < '2.7' # pyvmomi 6.0.0 and later require python 2.7 or later
+pyone == 1.1.9 # newer versions do not pass current integration tests
+boto3 < 1.11 ; python_version < '2.7' # boto3 1.11 drops Python 2.6 support
+botocore >= 1.10.0, < 1.14 ; python_version < '2.7' # adds support for the following AWS services: secretsmanager, fms, and acm-pca; botocore 1.14 drops Python 2.6 support
+botocore >= 1.10.0 ; python_version >= '2.7' # adds support for the following AWS services: secretsmanager, fms, and acm-pca
+setuptools < 45 ; python_version <= '2.7' # setuptools 45 and later require python 3.5 or later
+cffi >= 1.14.2, != 1.14.3 # Yanked version which older versions of pip will still install:
+
+# freeze pylint and its requirements for consistent test results
+astroid == 2.2.5
+isort == 4.3.15
+lazy-object-proxy == 1.3.1
+mccabe == 0.6.1
+pylint == 2.3.1
+typed-ast == 1.4.0 # 1.4.0 is required to compile on Python 3.8
+wrapt == 1.11.1
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/utils/shippable/aix.sh b/collections-debian-merged/ansible_collections/community/general/tests/utils/shippable/aix.sh
new file mode 100755
index 00000000..cd3014cc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/utils/shippable/aix.sh
@@ -0,0 +1,22 @@
+#!/usr/bin/env bash
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+platform="${args[0]}"
+version="${args[1]}"
+
+if [ "${#args[@]}" -gt 2 ]; then
+ target="shippable/posix/group${args[2]}/"
+else
+ target="shippable/posix/"
+fi
+
+stage="${S:-prod}"
+provider="${P:-default}"
+
+# shellcheck disable=SC2086
+ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \
+ --remote "${platform}/${version}" --remote-terminate always --remote-stage "${stage}" --remote-provider "${provider}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/utils/shippable/check_matrix.py b/collections-debian-merged/ansible_collections/community/general/tests/utils/shippable/check_matrix.py
new file mode 100755
index 00000000..ca56c4db
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/utils/shippable/check_matrix.py
@@ -0,0 +1,120 @@
+#!/usr/bin/env python
+"""Verify the currently executing Shippable test matrix matches the one defined in the "shippable.yml" file."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import datetime
+import json
+import os
+import re
+import sys
+import time
+
+try:
+ from typing import NoReturn
+except ImportError:
+ NoReturn = None
+
+try:
+ # noinspection PyCompatibility
+ from urllib2 import urlopen # pylint: disable=ansible-bad-import-from
+except ImportError:
+ # noinspection PyCompatibility
+ from urllib.request import urlopen
+
+
+def main(): # type: () -> None
+ """Main entry point."""
+ repo_full_name = os.environ['REPO_FULL_NAME']
+ required_repo_full_name = 'ansible-collections/community.general'
+
+ if repo_full_name != required_repo_full_name:
+ sys.stderr.write('Skipping matrix check on repo "%s" which is not "%s".\n' % (repo_full_name, required_repo_full_name))
+ return
+
+ with open('shippable.yml', 'rb') as yaml_file:
+ yaml = yaml_file.read().decode('utf-8').splitlines()
+
+ defined_matrix = [match.group(1) for match in [re.search(r'^ *- env: T=(.*)$', line) for line in yaml] if match and match.group(1) != 'none']
+
+ if not defined_matrix:
+ fail('No matrix entries found in the "shippable.yml" file.',
+ 'Did you modify the "shippable.yml" file?')
+
+ run_id = os.environ['SHIPPABLE_BUILD_ID']
+ sleep = 1
+ jobs = []
+
+ for attempts_remaining in range(4, -1, -1):
+ try:
+ jobs = json.loads(urlopen('https://api.shippable.com/jobs?runIds=%s' % run_id).read())
+
+ if not isinstance(jobs, list):
+ raise Exception('Shippable run %s data is not a list.' % run_id)
+
+ break
+ except Exception as ex:
+ if not attempts_remaining:
+ fail('Unable to retrieve Shippable run %s matrix.' % run_id,
+ str(ex))
+
+ sys.stderr.write('Unable to retrieve Shippable run %s matrix: %s\n' % (run_id, ex))
+ sys.stderr.write('Trying again in %d seconds...\n' % sleep)
+ time.sleep(sleep)
+ sleep *= 2
+
+ if len(jobs) != len(defined_matrix):
+ if len(jobs) == 1:
+ hint = '\n\nMake sure you do not use the "Rebuild with SSH" option.'
+ else:
+ hint = ''
+
+ fail('Shippable run %s has %d jobs instead of the expected %d jobs.' % (run_id, len(jobs), len(defined_matrix)),
+ 'Try re-running the entire matrix.%s' % hint)
+
+ actual_matrix = dict((job.get('jobNumber'), dict(tuple(line.split('=', 1)) for line in job.get('env', [])).get('T', '')) for job in jobs)
+ errors = [(job_number, test, actual_matrix.get(job_number)) for job_number, test in enumerate(defined_matrix, 1) if actual_matrix.get(job_number) != test]
+
+ if len(errors):
+ error_summary = '\n'.join('Job %s expected "%s" but found "%s" instead.' % (job_number, expected, actual) for job_number, expected, actual in errors)
+
+ fail('Shippable run %s has a job matrix mismatch.' % run_id,
+ 'Try re-running the entire matrix.\n\n%s' % error_summary)
+
+
+def fail(message, output): # type: (str, str) -> NoReturn
+ # Include a leading newline to improve readability on Shippable "Tests" tab.
+ # Without this, the first line becomes indented.
+ output = '\n' + output.strip()
+
+ timestamp = datetime.datetime.utcnow().replace(microsecond=0).isoformat()
+
+ # hack to avoid requiring junit-xml, which isn't pre-installed on Shippable outside our test containers
+ xml = '''
+<?xml version="1.0" encoding="utf-8"?>
+<testsuites disabled="0" errors="1" failures="0" tests="1" time="0.0">
+\t<testsuite disabled="0" errors="1" failures="0" file="None" log="None" name="ansible-test" skipped="0" tests="1" time="0" timestamp="%s" url="None">
+\t\t<testcase classname="timeout" name="timeout">
+\t\t\t<error message="%s" type="error">%s</error>
+\t\t</testcase>
+\t</testsuite>
+</testsuites>
+''' % (timestamp, message, output)
+
+ path = 'shippable/testresults/check-matrix.xml'
+ dir_path = os.path.dirname(path)
+
+ if not os.path.exists(dir_path):
+ os.makedirs(dir_path)
+
+ with open(path, 'w') as junit_fd:
+ junit_fd.write(xml.lstrip())
+
+ sys.stderr.write(message + '\n')
+ sys.stderr.write(output + '\n')
+
+ sys.exit(1)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/utils/shippable/cloud.sh b/collections-debian-merged/ansible_collections/community/general/tests/utils/shippable/cloud.sh
new file mode 100755
index 00000000..d76c3228
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/utils/shippable/cloud.sh
@@ -0,0 +1,19 @@
+#!/usr/bin/env bash
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+cloud="${args[0]}"
+python="${args[1]}"
+group="${args[2]}"
+
+target="shippable/${cloud}/group${group}/"
+
+stage="${S:-prod}"
+
+# shellcheck disable=SC2086
+ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \
+ --remote-terminate always --remote-stage "${stage}" \
+ --docker --python "${python}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/utils/shippable/freebsd.sh b/collections-debian-merged/ansible_collections/community/general/tests/utils/shippable/freebsd.sh
new file mode 100755
index 00000000..cd3014cc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/utils/shippable/freebsd.sh
@@ -0,0 +1,22 @@
+#!/usr/bin/env bash
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+platform="${args[0]}"
+version="${args[1]}"
+
+if [ "${#args[@]}" -gt 2 ]; then
+ target="shippable/posix/group${args[2]}/"
+else
+ target="shippable/posix/"
+fi
+
+stage="${S:-prod}"
+provider="${P:-default}"
+
+# shellcheck disable=SC2086
+ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \
+ --remote "${platform}/${version}" --remote-terminate always --remote-stage "${stage}" --remote-provider "${provider}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/utils/shippable/linux.sh b/collections-debian-merged/ansible_collections/community/general/tests/utils/shippable/linux.sh
new file mode 100755
index 00000000..9cc2f966
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/utils/shippable/linux.sh
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+image="${args[1]}"
+
+if [ "${#args[@]}" -gt 2 ]; then
+ target="shippable/posix/group${args[2]}/"
+else
+ target="shippable/posix/"
+fi
+
+# shellcheck disable=SC2086
+ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \
+ --docker "${image}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/utils/shippable/macos.sh b/collections-debian-merged/ansible_collections/community/general/tests/utils/shippable/macos.sh
new file mode 100755
index 00000000..cd3014cc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/utils/shippable/macos.sh
@@ -0,0 +1,22 @@
+#!/usr/bin/env bash
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+platform="${args[0]}"
+version="${args[1]}"
+
+if [ "${#args[@]}" -gt 2 ]; then
+ target="shippable/posix/group${args[2]}/"
+else
+ target="shippable/posix/"
+fi
+
+stage="${S:-prod}"
+provider="${P:-default}"
+
+# shellcheck disable=SC2086
+ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \
+ --remote "${platform}/${version}" --remote-terminate always --remote-stage "${stage}" --remote-provider "${provider}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/utils/shippable/osx.sh b/collections-debian-merged/ansible_collections/community/general/tests/utils/shippable/osx.sh
new file mode 100755
index 00000000..cd3014cc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/utils/shippable/osx.sh
@@ -0,0 +1,22 @@
+#!/usr/bin/env bash
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+platform="${args[0]}"
+version="${args[1]}"
+
+if [ "${#args[@]}" -gt 2 ]; then
+ target="shippable/posix/group${args[2]}/"
+else
+ target="shippable/posix/"
+fi
+
+stage="${S:-prod}"
+provider="${P:-default}"
+
+# shellcheck disable=SC2086
+ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \
+ --remote "${platform}/${version}" --remote-terminate always --remote-stage "${stage}" --remote-provider "${provider}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/utils/shippable/remote.sh b/collections-debian-merged/ansible_collections/community/general/tests/utils/shippable/remote.sh
new file mode 100755
index 00000000..cd3014cc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/utils/shippable/remote.sh
@@ -0,0 +1,22 @@
+#!/usr/bin/env bash
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+platform="${args[0]}"
+version="${args[1]}"
+
+if [ "${#args[@]}" -gt 2 ]; then
+ target="shippable/posix/group${args[2]}/"
+else
+ target="shippable/posix/"
+fi
+
+stage="${S:-prod}"
+provider="${P:-default}"
+
+# shellcheck disable=SC2086
+ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \
+ --remote "${platform}/${version}" --remote-terminate always --remote-stage "${stage}" --remote-provider "${provider}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/utils/shippable/rhel.sh b/collections-debian-merged/ansible_collections/community/general/tests/utils/shippable/rhel.sh
new file mode 100755
index 00000000..cd3014cc
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/utils/shippable/rhel.sh
@@ -0,0 +1,22 @@
+#!/usr/bin/env bash
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+platform="${args[0]}"
+version="${args[1]}"
+
+if [ "${#args[@]}" -gt 2 ]; then
+ target="shippable/posix/group${args[2]}/"
+else
+ target="shippable/posix/"
+fi
+
+stage="${S:-prod}"
+provider="${P:-default}"
+
+# shellcheck disable=SC2086
+ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \
+ --remote "${platform}/${version}" --remote-terminate always --remote-stage "${stage}" --remote-provider "${provider}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/utils/shippable/sanity.sh b/collections-debian-merged/ansible_collections/community/general/tests/utils/shippable/sanity.sh
new file mode 100755
index 00000000..18710540
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/utils/shippable/sanity.sh
@@ -0,0 +1,42 @@
+#!/usr/bin/env bash
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+group="${args[1]}"
+
+if [ "${BASE_BRANCH:-}" ]; then
+ base_branch="origin/${BASE_BRANCH}"
+else
+ base_branch=""
+fi
+
+if [ "${group}" == "extra" ]; then
+ ../internal_test_tools/tools/run.py --color
+ exit
+fi
+
+case "${group}" in
+ 1) options=(--skip-test pylint --skip-test ansible-doc --skip-test validate-modules) ;;
+ 2) options=( --test ansible-doc --test validate-modules) ;;
+ 3) options=(--test pylint plugins/modules/) ;;
+ 4) options=(--test pylint --exclude plugins/modules/) ;;
+esac
+
+# allow collection migration sanity tests for groups 3 and 4 to pass without updating this script during migration
+network_path="lib/ansible/modules/network/"
+
+if [ -d "${network_path}" ]; then
+ if [ "${group}" -eq 3 ]; then
+ options+=(--exclude "${network_path}")
+ elif [ "${group}" -eq 4 ]; then
+ options+=("${network_path}")
+ fi
+fi
+
+# shellcheck disable=SC2086
+ansible-test sanity --color -v --junit ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} \
+ --docker --base-branch "${base_branch}" \
+ "${options[@]}" --allow-disabled
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/utils/shippable/shippable.sh b/collections-debian-merged/ansible_collections/community/general/tests/utils/shippable/shippable.sh
new file mode 100755
index 00000000..5547c0c7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/utils/shippable/shippable.sh
@@ -0,0 +1,223 @@
+#!/usr/bin/env bash
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+ansible_version="${args[0]}"
+script="${args[1]}"
+
+function join {
+ local IFS="$1";
+ shift;
+ echo "$*";
+}
+
+# Ensure we can write other collections to this dir
+sudo chown "$(whoami)" "${PWD}/../../"
+
+test="$(join / "${args[@]:1}")"
+
+docker images ansible/ansible
+docker images quay.io/ansible/*
+docker ps
+
+for container in $(docker ps --format '{{.Image}} {{.ID}}' | grep -v -e '^drydock/' -e '^quay.io/ansible/azure-pipelines-test-container:' | sed 's/^.* //'); do
+ docker rm -f "${container}" || true # ignore errors
+done
+
+docker ps
+
+if [ -d /home/shippable/cache/ ]; then
+ ls -la /home/shippable/cache/
+fi
+
+command -v python
+python -V
+
+function retry
+{
+ # shellcheck disable=SC2034
+ for repetition in 1 2 3; do
+ set +e
+ "$@"
+ result=$?
+ set -e
+ if [ ${result} == 0 ]; then
+ return ${result}
+ fi
+ echo "@* -> ${result}"
+ done
+ echo "Command '@*' failed 3 times!"
+ exit -1
+}
+
+command -v pip
+pip --version
+pip list --disable-pip-version-check
+if [ "${ansible_version}" == "devel" ]; then
+ retry pip install https://github.com/ansible/ansible/archive/devel.tar.gz --disable-pip-version-check
+else
+ retry pip install "https://github.com/ansible/ansible/archive/stable-${ansible_version}.tar.gz" --disable-pip-version-check
+fi
+
+if [ "${SHIPPABLE_BUILD_ID:-}" ]; then
+ export ANSIBLE_COLLECTIONS_PATHS="${HOME}/.ansible"
+ SHIPPABLE_RESULT_DIR="$(pwd)/shippable"
+ TEST_DIR="${ANSIBLE_COLLECTIONS_PATHS}/ansible_collections/community/general"
+ mkdir -p "${TEST_DIR}"
+ cp -aT "${SHIPPABLE_BUILD_DIR}" "${TEST_DIR}"
+ cd "${TEST_DIR}"
+else
+ export ANSIBLE_COLLECTIONS_PATHS="${PWD}/../../../"
+fi
+
+# START: HACK install dependencies
+retry ansible-galaxy -vvv collection install ansible.netcommon
+retry ansible-galaxy -vvv collection install community.kubernetes
+retry ansible-galaxy -vvv collection install google.cloud
+
+if [ "${script}" != "sanity" ] || [ "${test}" == "sanity/extra" ]; then
+ # Nothing further should be added to this list.
+ # This is to prevent modules or plugins in this collection having a runtime dependency on other collections.
+ retry ansible-galaxy -vvv collection install community.internal_test_tools
+fi
+
+if [ "${script}" != "sanity" ] && [ "${script}" != "units" ]; then
+ # To prevent Python dependencies on other collections only install other collections for integration tests
+ retry ansible-galaxy -vvv collection install ansible.posix
+ retry ansible-galaxy -vvv collection install community.crypto
+fi
+
+# END: HACK
+
+export PYTHONIOENCODING='utf-8'
+
+if [ "${JOB_TRIGGERED_BY_NAME:-}" == "nightly-trigger" ]; then
+ COVERAGE=yes
+ COMPLETE=yes
+fi
+
+if [ -n "${COVERAGE:-}" ]; then
+ # on-demand coverage reporting triggered by setting the COVERAGE environment variable to a non-empty value
+ export COVERAGE="--coverage"
+elif [[ "${COMMIT_MESSAGE}" =~ ci_coverage ]]; then
+ # on-demand coverage reporting triggered by having 'ci_coverage' in the latest commit message
+ export COVERAGE="--coverage"
+else
+ # on-demand coverage reporting disabled (default behavior, always-on coverage reporting remains enabled)
+ export COVERAGE="--coverage-check"
+fi
+
+if [ -n "${COMPLETE:-}" ]; then
+ # disable change detection triggered by setting the COMPLETE environment variable to a non-empty value
+ export CHANGED=""
+elif [[ "${COMMIT_MESSAGE}" =~ ci_complete ]]; then
+ # disable change detection triggered by having 'ci_complete' in the latest commit message
+ export CHANGED=""
+else
+ # enable change detection (default behavior)
+ export CHANGED="--changed"
+fi
+
+if [ "${IS_PULL_REQUEST:-}" == "true" ]; then
+ # run unstable tests which are targeted by focused changes on PRs
+ export UNSTABLE="--allow-unstable-changed"
+else
+ # do not run unstable tests outside PRs
+ export UNSTABLE=""
+fi
+
+# remove empty core/extras module directories from PRs created prior to the repo-merge
+find plugins -type d -empty -print -delete
+
+function cleanup
+{
+ # for complete on-demand coverage generate a report for all files with no coverage on the "sanity/5" job so we only have one copy
+ if [ "${COVERAGE}" == "--coverage" ] && [ "${CHANGED}" == "" ] && [ "${test}" == "sanity/5" ]; then
+ stub="--stub"
+ # trigger coverage reporting for stubs even if no other coverage data exists
+ mkdir -p tests/output/coverage/
+ else
+ stub=""
+ fi
+
+ if [ -d tests/output/coverage/ ]; then
+ if find tests/output/coverage/ -mindepth 1 -name '.*' -prune -o -print -quit | grep -q .; then
+ process_coverage='yes' # process existing coverage files
+ elif [ "${stub}" ]; then
+ process_coverage='yes' # process coverage when stubs are enabled
+ else
+ process_coverage=''
+ fi
+
+ if [ "${process_coverage}" ]; then
+ # use python 3.7 for coverage to avoid running out of memory during coverage xml processing
+ # only use it for coverage to avoid the additional overhead of setting up a virtual environment for a potential no-op job
+ virtualenv --python /usr/bin/python3.7 ~/ansible-venv
+ set +ux
+ . ~/ansible-venv/bin/activate
+ set -ux
+
+ # shellcheck disable=SC2086
+ ansible-test coverage xml --color -v --requirements --group-by command --group-by version ${stub:+"$stub"}
+ cp -a tests/output/reports/coverage=*.xml "$SHIPPABLE_RESULT_DIR/codecoverage/"
+
+ if [ "${ansible_version}" != "2.9" ]; then
+ # analyze and capture code coverage aggregated by integration test target
+ ansible-test coverage analyze targets generate -v "$SHIPPABLE_RESULT_DIR/testresults/coverage-analyze-targets.json"
+ fi
+
+ # upload coverage report to codecov.io only when using complete on-demand coverage
+ if [ "${COVERAGE}" == "--coverage" ] && [ "${CHANGED}" == "" ]; then
+ for file in tests/output/reports/coverage=*.xml; do
+ flags="${file##*/coverage=}"
+ flags="${flags%-powershell.xml}"
+ flags="${flags%.xml}"
+ # remove numbered component from stub files when converting to tags
+ flags="${flags//stub-[0-9]*/stub}"
+ flags="${flags//=/,}"
+ flags="${flags//[^a-zA-Z0-9_,]/_}"
+
+ bash <(curl -s https://codecov.io/bash) \
+ -f "${file}" \
+ -F "${flags}" \
+ -n "${test}" \
+ -t 20636cf5-4d6a-4b9a-8d2d-6f22ebbaa752 \
+ -X coveragepy \
+ -X gcov \
+ -X fix \
+ -X search \
+ -X xcode \
+ || echo "Failed to upload code coverage report to codecov.io: ${file}"
+ done
+ fi
+ fi
+ fi
+
+ if [ -d tests/output/junit/ ]; then
+ cp -aT tests/output/junit/ "$SHIPPABLE_RESULT_DIR/testresults/"
+ fi
+
+ if [ -d tests/output/data/ ]; then
+ cp -a tests/output/data/ "$SHIPPABLE_RESULT_DIR/testresults/"
+ fi
+
+ if [ -d tests/output/bot/ ]; then
+ cp -aT tests/output/bot/ "$SHIPPABLE_RESULT_DIR/testresults/"
+ fi
+}
+
+if [ "${SHIPPABLE_BUILD_ID:-}" ]; then trap cleanup EXIT; fi
+
+if [[ "${COVERAGE:-}" == "--coverage" ]]; then
+ timeout=60
+else
+ timeout=50
+fi
+
+ansible-test env --dump --show --timeout "${timeout}" --color -v
+
+if [ "${SHIPPABLE_BUILD_ID:-}" ]; then "tests/utils/shippable/check_matrix.py"; fi
+"tests/utils/shippable/${script}.sh" "${test}"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/utils/shippable/timing.py b/collections-debian-merged/ansible_collections/community/general/tests/utils/shippable/timing.py
new file mode 100755
index 00000000..fb538271
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/utils/shippable/timing.py
@@ -0,0 +1,16 @@
+#!/usr/bin/env python3.7
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+import time
+
+start = time.time()
+
+sys.stdin.reconfigure(errors='surrogateescape')
+sys.stdout.reconfigure(errors='surrogateescape')
+
+for line in sys.stdin:
+ seconds = time.time() - start
+ sys.stdout.write('%02d:%02d %s' % (seconds // 60, seconds % 60, line))
+ sys.stdout.flush()
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/utils/shippable/timing.sh b/collections-debian-merged/ansible_collections/community/general/tests/utils/shippable/timing.sh
new file mode 100755
index 00000000..77e25783
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/utils/shippable/timing.sh
@@ -0,0 +1,5 @@
+#!/usr/bin/env bash
+
+set -o pipefail -eu
+
+"$@" 2>&1 | "$(dirname "$0")/timing.py"
diff --git a/collections-debian-merged/ansible_collections/community/general/tests/utils/shippable/units.sh b/collections-debian-merged/ansible_collections/community/general/tests/utils/shippable/units.sh
new file mode 100755
index 00000000..38e79935
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/general/tests/utils/shippable/units.sh
@@ -0,0 +1,27 @@
+#!/usr/bin/env bash
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+version="${args[1]}"
+group="${args[2]}"
+
+if [[ "${COVERAGE:-}" == "--coverage" ]]; then
+ timeout=90
+else
+ timeout=30
+fi
+
+group1=()
+
+case "${group}" in
+ 1) options=("${group1[@]:+${group1[@]}}") ;;
+esac
+
+ansible-test env --timeout "${timeout}" --color -v
+
+# shellcheck disable=SC2086
+ansible-test units --color -v --docker default --python "${version}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} \
+ "${options[@]:+${options[@]}}" \